diff -Nru aptly-0.9.6/debian/changelog aptly-0.9.7/debian/changelog --- aptly-0.9.6/debian/changelog 2016-02-10 17:53:29.000000000 +0000 +++ aptly-0.9.7/debian/changelog 2016-05-24 07:17:38.000000000 +0000 @@ -1,3 +1,10 @@ +aptly (0.9.7-1) unstable; urgency=medium + + * Imported new upstream version 0.9.7 + * Add new licenses and copyrights info + + -- Sebastien Delafond Tue, 24 May 2016 09:17:08 +0200 + aptly (0.9.6-1) unstable; urgency=medium * Import new upstream version 0.9.6 diff -Nru aptly-0.9.6/debian/copyright aptly-0.9.7/debian/copyright --- aptly-0.9.6/debian/copyright 2016-02-10 17:53:29.000000000 +0000 +++ aptly-0.9.7/debian/copyright 2016-05-24 07:17:38.000000000 +0000 @@ -55,6 +55,16 @@ License: Apache-2 See: /usr/share/common-licenses/Apache-2.0 +Files: src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/*, src/github.com/smira/aptly/_vendor/src/github.com/go-ini/*, +Copyright: 2014 Unknown +License: Apache-2 + See: /usr/share/common-licenses/Apache-2.0 + +Files: src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/* +Copyright: 2015 James Saryerwinnie +License: Apache-2 + See: /usr/share/common-licenses/Apache-2.0 + Files: src/github.com/smira/aptly/_vendor/src/github.com/golang/snappy/* Copyright: 2011 The Snappy-Go Authors License: BSD-3-clause @@ -63,6 +73,10 @@ Copyright: 2013-2014 Yasuhiro Matsumoto License: MIT +Files: src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth +Copyright: 2013 SmartyStreets +License: MIT + Files: src/github.com/smira/aptly/_vendor/src/github.com/smira/commander/* Copyright: 2012 The Go-Commander Authors License: BSD-3-clause @@ -91,21 +105,6 @@ Copyright: 2013 Maxim Khitrov License: BSD-3-clause -Files: src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/* -Copyright: 2011 Canonical Ltd -License: LGPL-3 - See: /usr/share/common-licenses/LGPL-3 - -Files: src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/* -Copyright: 2011 Memeo Inc -License: LGPL-3 - See: /usr/share/common-licenses/LGPL-3 - -Files: src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sdb/* -Copyright: 2011 AppsAttic Ltd -License: LGPL-3 - See: /usr/share/common-licenses/LGPL-3 - Files: src/github.com/smira/aptly/_vendor/src/github.com/DisposaBoy/JsonConfigReader/* Copyright: 2012 The JsonConfigReader Authors License: MIT diff -Nru aptly-0.9.6/src/github.com/smira/aptly/api/api.go aptly-0.9.7/src/github.com/smira/aptly/api/api.go --- aptly-0.9.6/src/github.com/smira/aptly/api/api.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/api/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -27,6 +27,29 @@ RELEASEDB ) +// Flushes all collections which cache in-memory objects +func flushColections() { + // lock everything to eliminate in-progress calls + r := context.CollectionFactory().RemoteRepoCollection() + r.Lock() + defer r.Unlock() + + l := context.CollectionFactory().LocalRepoCollection() + l.Lock() + defer l.Unlock() + + s := context.CollectionFactory().SnapshotCollection() + s.Lock() + defer s.Unlock() + + p := context.CollectionFactory().PublishedRepoCollection() + p.Lock() + defer p.Unlock() + + // all collections locked, flush them + context.CollectionFactory().Flush() +} + // Periodically flushes CollectionFactory to free up memory used by // collections, flushing caches. If the two channels are provided, // they are used to acquire and release the database. @@ -38,40 +61,12 @@ for { <-ticker - func() { - // lock database if needed - if requests != nil { - requests <- ACQUIREDB - err := <-acks - if err != nil { - return - } - defer func() { - requests <- RELEASEDB - <-acks - }() - } - - // lock everything to eliminate in-progress calls - r := context.CollectionFactory().RemoteRepoCollection() - r.Lock() - defer r.Unlock() - - l := context.CollectionFactory().LocalRepoCollection() - l.Lock() - defer l.Unlock() - - s := context.CollectionFactory().SnapshotCollection() - s.Lock() - defer s.Unlock() - - p := context.CollectionFactory().PublishedRepoCollection() - p.Lock() - defer p.Unlock() - - // all collections locked, flush them - context.CollectionFactory().Flush() - }() + // if aptly API runs in -no-lock mode, + // caches are flushed when DB is closed anyway, no need + // to flush them here + if requests == nil { + flushColections() + } } } @@ -95,6 +90,7 @@ case RELEASEDB: clients-- if clients == 0 { + flushColections() acks <- context.CloseDatabase() } else { acks <- nil @@ -146,6 +142,7 @@ nil, context.DependencyOptions(), architecturesList) if err != nil { c.Fail(500, fmt.Errorf("unable to search: %s", err)) + return } } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/api/publish.go aptly-0.9.7/src/github.com/smira/aptly/api/publish.go --- aptly-0.9.6/src/github.com/smira/aptly/api/publish.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/api/publish.go 2016-05-24 07:05:22.000000000 +0000 @@ -97,6 +97,7 @@ Label string Origin string ForceOverwrite bool + SkipContents *bool Architectures []string Signing SigningOptions } @@ -183,6 +184,11 @@ published.Origin = b.Origin published.Label = b.Label + published.SkipContents = context.Config().SkipContentsPublishing + if b.SkipContents != nil { + published.SkipContents = *b.SkipContents + } + duplicate := collection.CheckDuplicate(published) if duplicate != nil { context.CollectionFactory().PublishedRepoCollection().LoadComplete(duplicate, context.CollectionFactory()) @@ -199,6 +205,7 @@ err = collection.Add(published) if err != nil { c.Fail(500, fmt.Errorf("unable to save to DB: %s", err)) + return } c.JSON(201, published) @@ -213,6 +220,7 @@ var b struct { ForceOverwrite bool Signing SigningOptions + SkipContents *bool Snapshots []struct { Component string `binding:"required"` Name string `binding:"required"` @@ -289,22 +297,30 @@ } } else { c.Fail(500, fmt.Errorf("unknown published repository type")) + return + } + + if b.SkipContents != nil { + published.SkipContents = *b.SkipContents } err = published.Publish(context.PackagePool(), context, context.CollectionFactory(), signer, nil, b.ForceOverwrite) if err != nil { c.Fail(500, fmt.Errorf("unable to update: %s", err)) + return } err = collection.Update(published) if err != nil { c.Fail(500, fmt.Errorf("unable to save to DB: %s", err)) + return } err = collection.CleanupPrefixComponentFiles(published.Prefix, updatedComponents, context.GetPublishedStorage(storage), context.CollectionFactory(), nil) if err != nil { c.Fail(500, fmt.Errorf("unable to update: %s", err)) + return } c.JSON(200, published) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/aptly/version.go aptly-0.9.7/src/github.com/smira/aptly/aptly/version.go --- aptly-0.9.6/src/github.com/smira/aptly/aptly/version.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/aptly/version.go 2016-05-24 07:05:22.000000000 +0000 @@ -1,7 +1,7 @@ package aptly // Version of aptly -const Version = "0.9.6" +const Version = "0.9.7" // Enable debugging features? const EnableDebug = false diff -Nru aptly-0.9.6/src/github.com/smira/aptly/AUTHORS aptly-0.9.7/src/github.com/smira/aptly/AUTHORS --- aptly-0.9.6/src/github.com/smira/aptly/AUTHORS 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/AUTHORS 2016-05-24 07:05:22.000000000 +0000 @@ -19,3 +19,6 @@ * Paul Krohn (https://github.com/paul-krohn) * Vincent Bernat (https://github.com/vincentbernat) * x539 (https://github.com/x539) +* Phil Frost (https://github.com/bitglue) +* Benoit Foucher (https://github.com/bentoi) +* Geoffrey Thomas (https://github.com/geofft) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/cmd/publish_snapshot.go aptly-0.9.7/src/github.com/smira/aptly/cmd/publish_snapshot.go --- aptly-0.9.6/src/github.com/smira/aptly/cmd/publish_snapshot.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/cmd/publish_snapshot.go 2016-05-24 07:05:22.000000000 +0000 @@ -119,6 +119,8 @@ published.Origin = context.Flags().Lookup("origin").Value.String() published.Label = context.Flags().Lookup("label").Value.String() + published.SkipContents = context.Config().SkipContentsPublishing + if context.Flags().IsSet("skip-contents") { published.SkipContents = context.Flags().Lookup("skip-contents").Value.Get().(bool) } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/context/context.go aptly-0.9.7/src/github.com/smira/aptly/context/context.go --- aptly-0.9.6/src/github.com/smira/aptly/context/context.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/context/context.go 2016-05-24 07:05:22.000000000 +0000 @@ -321,9 +321,11 @@ } var err error - publishedStorage, err = s3.NewPublishedStorage(params.AccessKeyID, params.SecretAccessKey, + publishedStorage, err = s3.NewPublishedStorage( + params.AccessKeyID, params.SecretAccessKey, params.SessionToken, params.Region, params.Endpoint, params.Bucket, params.ACL, params.Prefix, params.StorageClass, - params.EncryptionMethod, params.PlusWorkaround, params.DisableMultiDel) + params.EncryptionMethod, params.PlusWorkaround, params.DisableMultiDel, + params.ForceSigV2, params.Debug) if err != nil { Fatal(err) } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/database/leveldb.go aptly-0.9.7/src/github.com/smira/aptly/database/leveldb.go --- aptly-0.9.6/src/github.com/smira/aptly/database/leveldb.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/database/leveldb.go 2016-05-24 07:05:22.000000000 +0000 @@ -61,7 +61,7 @@ // RecoverDB recovers LevelDB database from corruption func RecoverDB(path string) error { - stor, err := storage.OpenFile(path) + stor, err := storage.OpenFile(path, false) if err != nil { return err } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/deb/changes.go aptly-0.9.7/src/github.com/smira/aptly/deb/changes.go --- aptly-0.9.6/src/github.com/smira/aptly/deb/changes.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/deb/changes.go 2016-05-24 07:05:22.000000000 +0000 @@ -256,6 +256,12 @@ return nil }) + + if err2 != nil { + reporter.Warning("Unable to process %s: %s", location, err2) + failedFiles = append(failedFiles, location) + continue + } } else if strings.HasSuffix(info.Name(), ".changes") { changesFiles = append(changesFiles, location) } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/deb/deb.go aptly-0.9.7/src/github.com/smira/aptly/deb/deb.go --- aptly-0.9.6/src/github.com/smira/aptly/deb/deb.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/deb/deb.go 2016-05-24 07:05:22.000000000 +0000 @@ -127,7 +127,7 @@ case "data.tar.gz": ungzip, err := gzip.NewReader(library) if err != nil { - return nil, fmt.Errorf("unable to ungzip data.tar.gz from %s: %s", packageFile,err) + return nil, fmt.Errorf("unable to ungzip data.tar.gz from %s: %s", packageFile, err) } defer ungzip.Close() tarInput = ungzip diff -Nru aptly-0.9.6/src/github.com/smira/aptly/deb/format.go aptly-0.9.7/src/github.com/smira/aptly/deb/format.go --- aptly-0.9.6/src/github.com/smira/aptly/deb/format.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/deb/format.go 2016-05-24 07:05:22.000000000 +0000 @@ -30,6 +30,7 @@ "MD5Sum", "SHA1", "SHA256", + "SHA512", } canonicalOrderBinary = []string{ @@ -59,6 +60,7 @@ "MD5sum", "SHA1", "SHA256", + "SHA512", "Description", } @@ -104,6 +106,8 @@ return true case "Checksums-Sha256": return true + case "Checksums-Sha512": + return true case "Package-List": return true case "MD5Sum": @@ -112,6 +116,8 @@ return isRelease case "SHA256": return isRelease + case "SHA512": + return isRelease } return false } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/deb/import.go aptly-0.9.7/src/github.com/smira/aptly/deb/import.go --- aptly-0.9.6/src/github.com/smira/aptly/deb/import.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/deb/import.go 2016-05-24 07:05:22.000000000 +0000 @@ -34,6 +34,12 @@ return nil }) + + if err2 != nil { + reporter.Warning("Unable to process %s: %s", location, err2) + failedFiles = append(failedFiles, location) + continue + } } else { if strings.HasSuffix(info.Name(), ".deb") || strings.HasSuffix(info.Name(), ".udeb") || strings.HasSuffix(info.Name(), ".dsc") || strings.HasSuffix(info.Name(), ".ddeb") { diff -Nru aptly-0.9.6/src/github.com/smira/aptly/deb/list.go aptly-0.9.7/src/github.com/smira/aptly/deb/list.go --- aptly-0.9.6/src/github.com/smira/aptly/deb/list.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/deb/list.go 2016-05-24 07:05:22.000000000 +0000 @@ -36,6 +36,10 @@ packagesIndex []*Package // Map of packages for each virtual package (provides) providesIndex map[string][]*Package + // Package key generation function + keyFunc func(p *Package) string + // Allow duplicates? + duplicatesAllowed bool } // PackageConflictError means that package can't be added to the list due to error @@ -49,9 +53,35 @@ _ PackageCatalog = &PackageList{} ) -// NewPackageList creates empty package list +func packageShortKey(p *Package) string { + return string(p.ShortKey("")) +} + +func packageFullKey(p *Package) string { + return string(p.Key("")) +} + +// NewPackageList creates empty package list without duplicate package func NewPackageList() *PackageList { - return &PackageList{packages: make(map[string]*Package, 1000)} + return NewPackageListWithDuplicates(false, 1000) +} + +func NewPackageListWithDuplicates(duplicates bool, capacity int) *PackageList { + if capacity == 0 { + capacity = 1000 + } + + result := &PackageList{ + packages: make(map[string]*Package, capacity), + duplicatesAllowed: duplicates, + keyFunc: packageShortKey, + } + + if duplicates { + result.keyFunc = packageFullKey + } + + return result } // NewPackageListFromRefList loads packages list from PackageRefList @@ -61,7 +91,7 @@ return NewPackageList(), nil } - result := &PackageList{packages: make(map[string]*Package, reflist.Len())} + result := NewPackageListWithDuplicates(false, reflist.Len()) if progress != nil { progress.InitBar(int64(reflist.Len()), false) @@ -91,7 +121,7 @@ // Add appends package to package list, additionally checking for uniqueness func (l *PackageList) Add(p *Package) error { - key := string(p.ShortKey("")) + key := l.keyFunc(p) existing, ok := l.packages[key] if ok { if !existing.Equals(p) { @@ -170,7 +200,7 @@ // Remove removes package from the list, and updates index when required func (l *PackageList) Remove(p *Package) { - delete(l.packages, string(p.ShortKey(""))) + delete(l.packages, l.keyFunc(p)) if l.indexed { for _, provides := range p.Provides { for i, pkg := range l.providesIndex[provides] { @@ -252,7 +282,7 @@ // VerifyDependencies looks for missing dependencies in package list. // -// Analysis would be peformed for each architecture, in specified sources +// Analysis would be performed for each architecture, in specified sources func (l *PackageList) VerifyDependencies(options int, architectures []string, sources *PackageList, progress aptly.Progress) ([]Dependency, error) { l.PrepareIndex() missing := make([]Dependency, 0, 128) @@ -365,7 +395,7 @@ // Scan searches package index using full scan func (l *PackageList) Scan(q PackageQuery) (result *PackageList) { - result = NewPackageList() + result = NewPackageListWithDuplicates(l.duplicatesAllowed, 0) for _, pkg := range l.packages { if q.Matches(pkg) { result.Add(pkg) @@ -382,7 +412,7 @@ // SearchByKey looks up package by exact key reference func (l *PackageList) SearchByKey(arch, name, version string) (result *PackageList) { - result = NewPackageList() + result = NewPackageListWithDuplicates(l.duplicatesAllowed, 0) pkg := l.packages["P"+arch+" "+name+" "+version] if pkg != nil { diff -Nru aptly-0.9.6/src/github.com/smira/aptly/deb/package_collection.go aptly-0.9.7/src/github.com/smira/aptly/deb/package_collection.go --- aptly-0.9.6/src/github.com/smira/aptly/deb/package_collection.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/deb/package_collection.go 2016-05-24 07:05:22.000000000 +0000 @@ -282,7 +282,7 @@ // Scan does full scan on all the packages func (collection *PackageCollection) Scan(q PackageQuery) (result *PackageList) { - result = NewPackageList() + result = NewPackageListWithDuplicates(true, 0) for _, key := range collection.db.KeysByPrefix([]byte("P")) { pkg, err := collection.ByKey(key) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/deb/package_files.go aptly-0.9.7/src/github.com/smira/aptly/deb/package_files.go --- aptly-0.9.6/src/github.com/smira/aptly/deb/package_files.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/deb/package_files.go 2016-05-24 07:05:22.000000000 +0000 @@ -134,10 +134,16 @@ if err != nil { return nil, err } + files, err = files.parseSumField(stanza["Checksums-Sha256"], func(sum *utils.ChecksumInfo, data string) { sum.SHA256 = data }) if err != nil { return nil, err } + + files, err = files.parseSumField(stanza["Checksums-Sha512"], func(sum *utils.ChecksumInfo, data string) { sum.SHA512 = data }) + if err != nil { + return nil, err + } return files, nil } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/deb/package.go aptly-0.9.7/src/github.com/smira/aptly/deb/package.go --- aptly-0.9.6/src/github.com/smira/aptly/deb/package.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/deb/package.go 2016-05-24 07:05:22.000000000 +0000 @@ -76,6 +76,7 @@ MD5: strings.TrimSpace(md5), SHA1: strings.TrimSpace(input["SHA1"]), SHA256: strings.TrimSpace(input["SHA256"]), + SHA512: strings.TrimSpace(input["SHA512"]), }, }}) @@ -84,6 +85,7 @@ delete(input, "MD5Sum") delete(input, "SHA1") delete(input, "SHA256") + delete(input, "SHA512") delete(input, "Size") depends := &PackageDependencies{} @@ -405,15 +407,7 @@ return nil } - if p.contents == nil { - if p.collection == nil { - panic("contents == nil && collection == nil") - } - - p.contents = p.collection.loadContents(p, packagePool) - } - - return p.contents + return p.collection.loadContents(p, packagePool) } // CalculateContents looks up contents in package file @@ -458,7 +452,7 @@ } if p.IsSource { - md5, sha1, sha256 := make([]string, 0), make([]string, 0), make([]string, 0) + md5, sha1, sha256, sha512 := []string{}, []string{}, []string{}, []string{} for _, f := range p.Files() { if f.Checksums.MD5 != "" { @@ -470,11 +464,21 @@ if f.Checksums.SHA256 != "" { sha256 = append(sha256, fmt.Sprintf(" %s %d %s\n", f.Checksums.SHA256, f.Checksums.Size, f.Filename)) } + if f.Checksums.SHA512 != "" { + sha512 = append(sha512, fmt.Sprintf(" %s %d %s\n", f.Checksums.SHA512, f.Checksums.Size, f.Filename)) + } } result["Files"] = strings.Join(md5, "") - result["Checksums-Sha1"] = strings.Join(sha1, "") - result["Checksums-Sha256"] = strings.Join(sha256, "") + if len(sha1) > 0 { + result["Checksums-Sha1"] = strings.Join(sha1, "") + } + if len(sha256) > 0 { + result["Checksums-Sha256"] = strings.Join(sha256, "") + } + if len(sha512) > 0 { + result["Checksums-Sha512"] = strings.Join(sha512, "") + } } else { f := p.Files()[0] result["Filename"] = f.DownloadURL() @@ -487,6 +491,9 @@ if f.Checksums.SHA256 != "" { result["SHA256"] = f.Checksums.SHA256 } + if f.Checksums.SHA512 != "" { + result["SHA512"] = f.Checksums.SHA512 + } result["Size"] = fmt.Sprintf("%d", f.Checksums.Size) } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/deb/publish.go aptly-0.9.7/src/github.com/smira/aptly/deb/publish.go --- aptly-0.9.6/src/github.com/smira/aptly/deb/publish.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/deb/publish.go 2016-05-24 07:05:22.000000000 +0000 @@ -290,10 +290,11 @@ "SourceKind": p.SourceKind, "Sources": sources, "Storage": p.Storage, + "SkipContents": p.SkipContents, }) } -// String returns human-readable represenation of PublishedRepo +// String returns human-readable representation of PublishedRepo func (p *PublishedRepo) String() string { var sources = []string{} @@ -640,6 +641,9 @@ var bufWriter *bufio.Writer bufWriter, err = indexes.ReleaseIndex(component, arch, udeb).BufWriter() + if err != nil { + return fmt.Errorf("unable to get ReleaseIndex writer: %s", err) + } err = release.WriteTo(bufWriter, false, true) if err != nil { @@ -669,6 +673,7 @@ release["MD5Sum"] = "" release["SHA1"] = "" release["SHA256"] = "" + release["SHA512"] = "" release["Components"] = strings.Join(p.Components(), " ") @@ -676,6 +681,7 @@ release["MD5Sum"] += fmt.Sprintf(" %s %8d %s\n", info.MD5, info.Size, path) release["SHA1"] += fmt.Sprintf(" %s %8d %s\n", info.SHA1, info.Size, path) release["SHA256"] += fmt.Sprintf(" %s %8d %s\n", info.SHA256, info.Size, path) + release["SHA512"] += fmt.Sprintf(" %s %8d %s\n", info.SHA512, info.Size, path) } releaseFile := indexes.ReleaseFile() diff -Nru aptly-0.9.6/src/github.com/smira/aptly/deb/remote.go aptly-0.9.7/src/github.com/smira/aptly/deb/remote.go --- aptly-0.9.6/src/github.com/smira/aptly/deb/remote.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/deb/remote.go 2016-05-24 07:05:22.000000000 +0000 @@ -146,7 +146,7 @@ return repo.Distribution == "" || (strings.HasPrefix(repo.Distribution, ".") && strings.HasSuffix(repo.Distribution, "/")) } -// NumPackages return number of packages retrived from remote repo +// NumPackages return number of packages retrieved from remote repo func (repo *RemoteRepo) NumPackages() int { if repo.packageRefs == nil { return 0 diff -Nru aptly-0.9.6/src/github.com/smira/aptly/files/package_pool.go aptly-0.9.7/src/github.com/smira/aptly/files/package_pool.go --- aptly-0.9.6/src/github.com/smira/aptly/files/package_pool.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/files/package_pool.go 2016-05-24 07:05:22.000000000 +0000 @@ -151,7 +151,7 @@ } // create subdirs as necessary - err = os.MkdirAll(filepath.Dir(poolPath), 0755) + err = os.MkdirAll(filepath.Dir(poolPath), 0777) if err != nil { return err } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/files/public.go aptly-0.9.7/src/github.com/smira/aptly/files/public.go --- aptly-0.9.6/src/github.com/smira/aptly/files/public.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/files/public.go 2016-05-24 07:05:22.000000000 +0000 @@ -32,7 +32,7 @@ // MkDir creates directory recursively under public path func (storage *PublishedStorage) MkDir(path string) error { - return os.MkdirAll(filepath.Join(storage.rootPath, path), 0755) + return os.MkdirAll(filepath.Join(storage.rootPath, path), 0777) } // PutFile puts file into published storage at specified path @@ -87,7 +87,7 @@ baseName := filepath.Base(sourcePath) poolPath := filepath.Join(storage.rootPath, publishedDirectory) - err := os.MkdirAll(poolPath, 0755) + err := os.MkdirAll(poolPath, 0777) if err != nil { return err } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/Gomfile aptly-0.9.7/src/github.com/smira/aptly/Gomfile --- aptly-0.9.6/src/github.com/smira/aptly/Gomfile 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/Gomfile 2016-05-24 07:05:22.000000000 +0000 @@ -1,23 +1,26 @@ gom 'github.com/AlekSi/pointer', :commit => '5f6d527dae3d678b46fbb20331ddf44e2b841943' gom 'github.com/awalterschulze/gographviz', :commit => '20d1f693416d9be045340150094aa42035a41c9e' +gom 'github.com/aws/aws-sdk-go', :commit => 'a170e9cb76475a0da7c0326a13cc2b39e9244b3b' gom 'github.com/cheggaaa/pb', :commit => '2c1b74620cc58a81ac152ee2d322e28c806d81ed' gom 'github.com/DisposaBoy/JsonConfigReader', :commit => '33a99fdf1d5ee1f79b5077e9c06f955ad356d5f4' gom 'github.com/gin-gonic/gin', :commit => 'b1758d3bfa09e61ddbc1c9a627e936eec6a170de' +gom 'github.com/go-ini/ini', :commit => 'afbd495e5aaea13597b5e14fe514ddeaa4d76fc3' gom 'github.com/jlaffaye/ftp', :commit => 'fec71e62e457557fbe85cefc847a048d57815d76' +gom 'github.com/jmespath/go-jmespath', :commit => '0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74' gom 'github.com/julienschmidt/httprouter', :commit => '46807412fe50aaceb73bb57061c2230fd26a1640' gom 'github.com/mattn/go-shellwords', :commit => 'c7ca6f94add751566a61cf2199e1de78d4c3eee4' -gom 'github.com/mitchellh/goamz/s3', :commit => 'caaaea8b30ee15616494ee68abd5d8ebbbef05cf' gom 'github.com/mkrautz/goar', :commit => '282caa8bd9daba480b51f1d5a988714913b97aad' gom 'github.com/mxk/go-flowrate/flowrate', :commit => 'cca7078d478f8520f85629ad7c68962d31ed7682' gom 'github.com/ncw/swift', :commit => '384ef27c70645e285f8bb9d02276bf654d06027e' +gom 'github.com/smira/go-aws-auth', :commit => '0070896e9d7f4f9f2d558532b2d896ce2239992a' gom 'github.com/smira/go-xz', :commit => '0c531f070014e218b21f3cfca801cc992d52726d' gom 'github.com/smira/commander', :commit => 'f408b00e68d5d6e21b9f18bd310978dafc604e47' gom 'github.com/smira/flag', :commit => '357ed3e599ffcbd4aeaa828e1d10da2df3ea5107' gom 'github.com/smira/go-ftp-protocol/protocol', :commit => '066b75c2b70dca7ae10b1b88b47534a3c31ccfaa' gom 'github.com/smira/go-uuid/uuid', :commit => 'ed3ca8a15a931b141440a7e98e4f716eec255f7d' -gom 'github.com/smira/lzma', :commit => '2a7c55cad4a2d02ab972a03357db5760833a49bc' +gom 'github.com/smira/lzma', :commit => '7f0af6269940baa2c938fabe73e0d7ba41205683' gom 'github.com/golang/snappy', :commit => '723cc1e459b8eea2dea4583200fd60757d40097a' -gom 'github.com/syndtr/goleveldb/leveldb', :commit => '1a9d62f03ea92815b46fcaab357cfd4df264b1a0' +gom 'github.com/syndtr/goleveldb/leveldb', :commit => '917f41c560270110ceb73c5b38be2a9127387071' gom 'github.com/ugorji/go/codec', :commit => '71c2886f5a673a35f909803f38ece5810165097b' gom 'github.com/vaughan0/go-ini', :commit => 'a98ad7ee00ec53921f08832bc06ecf7fd600e6a1' gom 'github.com/wsxiaoys/terminal/color', :commit => '5668e431776a7957528361f90ce828266c69ed08' diff -Nru aptly-0.9.6/src/github.com/smira/aptly/.goxc.json aptly-0.9.7/src/github.com/smira/aptly/.goxc.json --- aptly-0.9.6/src/github.com/smira/aptly/.goxc.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/.goxc.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,38 @@ +{ + "AppName": "aptly", + "ArtifactsDest": "xc-out/", + "TasksExclude": [ + "rmbin" + ], + "TasksAppend": [ + "bintray" + ], + "TaskSettings": { + "deb": { + "metadata": { + "maintainer": "Andrey Smirnov", + "maintainerEmail": "me@smira.ru", + "description": "Debian repository management tool" + }, + "metadata-deb": { + "License": "MIT", + "Homepage": "https://www.aptly.info/", + "Recommends": "bzip2, graphviz, xz-utils", + "Depends": "" + }, + "other-mapped-files": { + "/": "root/" + } + }, + "bintray": { + "repository": "aptly", + "subject": "smira", + "package": "aptly", + "downloadspage": "bintray.md" + } + }, + "Arch": "386 amd64", + "Os": "linux darwin freebsd", + "MainDirsExclude": "man,_vendor", + "ConfigVersion": "0.9" +} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/http/download.go aptly-0.9.7/src/github.com/smira/aptly/http/download.go --- aptly-0.9.6/src/github.com/smira/aptly/http/download.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/http/download.go 2016-05-24 07:05:22.000000000 +0000 @@ -167,7 +167,7 @@ return } - err = os.MkdirAll(filepath.Dir(task.destination), 0755) + err = os.MkdirAll(filepath.Dir(task.destination), 0777) if err != nil { task.result <- fmt.Errorf("%s: %s", task.url, err) return @@ -209,6 +209,8 @@ err = fmt.Errorf("%s: sha1 hash mismatch %#v != %#v", task.url, actual.SHA1, task.expected.SHA1) } else if task.expected.SHA256 != "" && actual.SHA256 != task.expected.SHA256 { err = fmt.Errorf("%s: sha256 hash mismatch %#v != %#v", task.url, actual.SHA256, task.expected.SHA256) + } else if task.expected.SHA512 != "" && actual.SHA512 != task.expected.SHA512 { + err = fmt.Errorf("%s: sha512 hash mismatch %#v != %#v", task.url, actual.SHA512, task.expected.SHA512) } if err != nil { diff -Nru aptly-0.9.6/src/github.com/smira/aptly/Makefile aptly-0.9.7/src/github.com/smira/aptly/Makefile --- aptly-0.9.6/src/github.com/smira/aptly/Makefile 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/Makefile 2016-05-24 07:05:22.000000000 +0000 @@ -59,18 +59,6 @@ gnuplot mem.gp open mem.png -package: - rm -rf root/ - mkdir -p root/usr/bin/ root/usr/share/man/man1/ root/etc/bash_completion.d - cp $(BINPATH)/aptly root/usr/bin - cp man/aptly.1 root/usr/share/man/man1 - (cd root/etc/bash_completion.d && wget https://raw.github.com/aptly-dev/aptly-bash-completion/master/aptly) - gzip root/usr/share/man/man1/aptly.1 - fpm -s dir -t deb -n aptly -v $(VERSION) --url=http://www.aptly.info/ --license=MIT --vendor="Andrey Smirnov " \ - -f -m "Andrey Smirnov " --description="Debian repository management tool" --deb-recommends bzip2 \ - --deb-recommends graphviz --deb-recommends xz-utils -C root/ . - mv aptly_$(VERSION)_*.deb ~ - src-package: rm -rf aptly-$(VERSION) mkdir -p aptly-$(VERSION)/src/github.com/smira/aptly/ @@ -84,4 +72,12 @@ rm -rf aptly-$(VERSION) curl -T aptly-$(VERSION)-src.tar.bz2 -usmira:$(BINTRAY_KEY) https://api.bintray.com/content/smira/aptly/aptly/$(VERSION)/$(VERSION)/aptly-$(VERSION)-src.tar.bz2 +goxc: + rm -rf root/ + mkdir -p root/usr/share/man/man1/ root/etc/bash_completion.d + cp man/aptly.1 root/usr/share/man/man1 + (cd root/etc/bash_completion.d && wget https://raw.github.com/aptly-dev/aptly-bash-completion/master/aptly) + gzip root/usr/share/man/man1/aptly.1 + gom exec goxc -pv=$(VERSION) -max-processors=4 $(GOXC_OPTS) + .PHONY: coverage.out diff -Nru aptly-0.9.6/src/github.com/smira/aptly/man/aptly.1 aptly-0.9.7/src/github.com/smira/aptly/man/aptly.1 --- aptly-0.9.6/src/github.com/smira/aptly/man/aptly.1 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/man/aptly.1 2016-05-24 07:05:22.000000000 +0000 @@ -1,7 +1,7 @@ .\" generated with Ronn/v0.7.3 .\" http://github.com/rtomayko/ronn/tree/0.7.3 . -.TH "APTLY" "1" "January 2016" "" "" +.TH "APTLY" "1" "March 2016" "" "" . .SH "NAME" \fBaptly\fR \- Debian repository management tool @@ -48,6 +48,7 @@ "downloadSourcePackages": false, "ppaDistributorID": "ubuntu", "ppaCodename": "", + "skipContentsPublishing": false, "S3PublishEndpoints": { "test": { "region": "us\-east\-1", @@ -60,7 +61,9 @@ "storageClass": "", "encryptionMethod": "", "plusWorkaround": false, - "disableMultiDel": false + "disableMultiDel": false, + "forceSigV2": false, + "debug": false } }, "SwiftPublishEndpoints": { @@ -1818,5 +1821,8 @@ .IP "\[ci]" 4 x539 (https://github\.com/x539) . +.IP "\[ci]" 4 +Phil Frost (https://github\.com/bitglue) +. .IP "" 0 diff -Nru aptly-0.9.6/src/github.com/smira/aptly/man/aptly.1.ronn.tmpl aptly-0.9.7/src/github.com/smira/aptly/man/aptly.1.ronn.tmpl --- aptly-0.9.6/src/github.com/smira/aptly/man/aptly.1.ronn.tmpl 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/man/aptly.1.ronn.tmpl 2016-05-24 07:05:22.000000000 +0000 @@ -40,6 +40,7 @@ "downloadSourcePackages": false, "ppaDistributorID": "ubuntu", "ppaCodename": "", + "skipContentsPublishing": false, "S3PublishEndpoints": { "test": { "region": "us-east-1", @@ -52,7 +53,9 @@ "storageClass": "", "encryptionMethod": "", "plusWorkaround": false, - "disableMultiDel": false + "disableMultiDel": false, + "forceSigV2": false, + "debug": false } }, "SwiftPublishEndpoints": { diff -Nru aptly-0.9.6/src/github.com/smira/aptly/README.rst aptly-0.9.7/src/github.com/smira/aptly/README.rst --- aptly-0.9.6/src/github.com/smira/aptly/README.rst 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/README.rst 2016-05-24 07:05:22.000000000 +0000 @@ -48,7 +48,7 @@ And import key that is used to sign the release:: - $ apt-key adv --keyserver keys.gnupg.net --recv-keys E083A3782A194991 + $ apt-key adv --keyserver keys.gnupg.net --recv-keys 9E3E53F19C7DE460 After that you can install aptly as any other software package:: @@ -90,6 +90,7 @@ Docker: - `Docker container `_ with aptly inside by Mike Purvis +- `Docker container `_ with aptly and nginx by Bryan Hong With configuration management systems: @@ -104,3 +105,7 @@ CLI for aptly API: - `Ruby aptly CLI/library `_ by Zane Williamson + +Scala sbt: + +- `sbt aptly plugin `_ by Arup Malakar diff -Nru aptly-0.9.6/src/github.com/smira/aptly/s3/public.go aptly-0.9.7/src/github.com/smira/aptly/s3/public.go --- aptly-0.9.6/src/github.com/smira/aptly/s3/public.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/s3/public.go 2016-05-24 07:05:22.000000000 +0000 @@ -2,11 +2,15 @@ import ( "fmt" - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/s3" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" "github.com/smira/aptly/aptly" "github.com/smira/aptly/files" - "net/http" + "github.com/smira/go-aws-auth" "os" "path/filepath" "strings" @@ -15,8 +19,9 @@ // PublishedStorage abstract file system with published files (actually hosted on S3) type PublishedStorage struct { s3 *s3.S3 - bucket *s3.Bucket - acl s3.ACL + config *aws.Config + bucket string + acl string prefix string storageClass string encryptionMethod string @@ -31,8 +36,11 @@ ) // NewPublishedStorageRaw creates published storage from raw aws credentials -func NewPublishedStorageRaw(auth aws.Auth, region aws.Region, bucket, defaultACL, prefix, - storageClass, encryptionMethod string, plusWorkaround, disabledMultiDel bool) (*PublishedStorage, error) { +func NewPublishedStorageRaw( + bucket, defaultACL, prefix, storageClass, encryptionMethod string, + plusWorkaround, disabledMultiDel bool, + config *aws.Config, +) (*PublishedStorage, error) { if defaultACL == "" { defaultACL = "private" } @@ -41,9 +49,13 @@ storageClass = "" } + sess := session.New(config) + result := &PublishedStorage{ - s3: s3.New(auth, region), - acl: s3.ACL(defaultACL), + s3: s3.New(sess), + bucket: bucket, + config: config, + acl: defaultACL, prefix: prefix, storageClass: storageClass, encryptionMethod: encryptionMethod, @@ -51,48 +63,56 @@ disableMultiDel: disabledMultiDel, } - result.s3.HTTPClient = func() *http.Client { - return RetryingClient - } - result.bucket = result.s3.Bucket(bucket) - return result, nil } // NewPublishedStorage creates new instance of PublishedStorage with specified S3 access // keys, region and bucket name -func NewPublishedStorage(accessKey, secretKey, region, endpoint, bucket, defaultACL, prefix, - storageClass, encryptionMethod string, plusWorkaround, disableMultiDel bool) (*PublishedStorage, error) { - auth, err := aws.GetAuth(accessKey, secretKey) - if err != nil { - return nil, err +func NewPublishedStorage(accessKey, secretKey, sessionToken, region, endpoint, bucket, defaultACL, prefix, + storageClass, encryptionMethod string, plusWorkaround, disableMultiDel, forceSigV2, debug bool) (*PublishedStorage, error) { + + config := &aws.Config{ + Region: aws.String(region), } - var awsRegion aws.Region + if endpoint != "" { + config = config.WithEndpoint(endpoint).WithS3ForcePathStyle(true) + } - if endpoint == "" { - var ok bool + if accessKey != "" { + config.Credentials = credentials.NewStaticCredentials(accessKey, secretKey, sessionToken) + } - awsRegion, ok = aws.Regions[region] - if !ok { - return nil, fmt.Errorf("unknown region: %#v", region) - } - } else { - awsRegion = aws.Region{ - Name: region, - S3Endpoint: endpoint, - S3LocationConstraint: true, - S3LowercaseBucket: true, + if debug { + config = config.WithLogLevel(aws.LogDebug) + } + + result, err := NewPublishedStorageRaw(bucket, defaultACL, prefix, storageClass, + encryptionMethod, plusWorkaround, disableMultiDel, config) + + if err == nil && forceSigV2 { + creds := []awsauth.Credentials{} + + if accessKey != "" { + creds = append(creds, awsauth.Credentials{ + AccessKeyID: accessKey, + SecretAccessKey: secretKey, + }) } + + result.s3.Handlers.Sign.Clear() + result.s3.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + result.s3.Handlers.Sign.PushBack(func(req *request.Request) { + awsauth.SignS3(req.HTTPRequest, creds...) + }) } - return NewPublishedStorageRaw(auth, awsRegion, bucket, defaultACL, prefix, storageClass, encryptionMethod, - plusWorkaround, disableMultiDel) + return result, err } // String func (storage *PublishedStorage) String() string { - return fmt.Sprintf("S3: %s:%s/%s", storage.s3.Region.Name, storage.bucket.Name, storage.prefix) + return fmt.Sprintf("S3: %s:%s/%s", *storage.config.Region, storage.bucket, storage.prefix) } // MkDir creates directory recursively under public path @@ -106,7 +126,6 @@ var ( source *os.File err error - fi os.FileInfo ) source, err = os.Open(sourceFilename) if err != nil { @@ -114,22 +133,20 @@ } defer source.Close() - fi, err = source.Stat() - if err != nil { - return err - } - - headers := map[string][]string{ - "Content-Type": {"binary/octet-stream"}, + params := &s3.PutObjectInput{ + Bucket: aws.String(storage.bucket), + Key: aws.String(filepath.Join(storage.prefix, path)), + Body: source, + ACL: aws.String(storage.acl), } if storage.storageClass != "" { - headers["x-amz-storage-class"] = []string{storage.storageClass} + params.StorageClass = aws.String(storage.storageClass) } if storage.encryptionMethod != "" { - headers["x-amz-server-side-encryption"] = []string{storage.encryptionMethod} + params.ServerSideEncryption = aws.String(storage.encryptionMethod) } - err = storage.bucket.PutReaderHeader(filepath.Join(storage.prefix, path), source, fi.Size(), headers, storage.acl) + _, err = storage.s3.PutObject(params) if err != nil { return fmt.Errorf("error uploading %s to %s: %s", sourceFilename, storage, err) } @@ -142,7 +159,11 @@ // Remove removes single file under public path func (storage *PublishedStorage) Remove(path string) error { - err := storage.bucket.Del(filepath.Join(storage.prefix, path)) + params := &s3.DeleteObjectInput{ + Bucket: aws.String(storage.bucket), + Key: aws.String(path), + } + _, err := storage.s3.DeleteObject(params) if err != nil { return fmt.Errorf("error deleting %s from %s: %s", path, storage, err) } @@ -165,7 +186,11 @@ if storage.disableMultiDel { for i := range filelist { - err = storage.bucket.Del(filepath.Join(storage.prefix, path, filelist[i])) + params := &s3.DeleteObjectInput{ + Bucket: aws.String(storage.bucket), + Key: aws.String(filepath.Join(storage.prefix, path, filelist[i])), + } + _, err := storage.s3.DeleteObject(params) if err != nil { return fmt.Errorf("error deleting path %s from %s: %s", filelist[i], storage, err) } @@ -180,13 +205,23 @@ } else { part = filelist[i*page : (i+1)*page] } - paths := make([]string, len(part)) + paths := make([]*s3.ObjectIdentifier, len(part)) for i := range part { - paths[i] = filepath.Join(storage.prefix, path, part[i]) + paths[i] = &s3.ObjectIdentifier{ + Key: aws.String(filepath.Join(storage.prefix, path, part[i])), + } } - err = storage.bucket.MultiDel(paths) + params := &s3.DeleteObjectsInput{ + Bucket: aws.String(storage.bucket), + Delete: &s3.Delete{ + Objects: paths, + Quiet: aws.Bool(true), + }, + } + + _, err := storage.s3.DeleteObjects(params) if err != nil { return fmt.Errorf("error deleting multiple paths from %s: %s", storage, err) } @@ -259,44 +294,38 @@ func (storage *PublishedStorage) internalFilelist(prefix string, hidePlusWorkaround bool) (paths []string, md5s []string, err error) { paths = make([]string, 0, 1024) md5s = make([]string, 0, 1024) - marker := "" prefix = filepath.Join(storage.prefix, prefix) if prefix != "" { prefix += "/" } - for { - contents, err := storage.bucket.List(prefix, "", marker, 1000) - if err != nil { - return nil, nil, fmt.Errorf("error listing under prefix %s in %s: %s", prefix, storage, err) - } - lastKey := "" + + params := &s3.ListObjectsInput{ + Bucket: aws.String(storage.bucket), + Prefix: aws.String(prefix), + MaxKeys: aws.Int64(1000), + } + + err = storage.s3.ListObjectsPages(params, func(contents *s3.ListObjectsOutput, lastPage bool) bool { for _, key := range contents.Contents { - lastKey = key.Key - if storage.plusWorkaround && hidePlusWorkaround && strings.Index(lastKey, " ") != -1 { + if storage.plusWorkaround && hidePlusWorkaround && strings.Index(*key.Key, " ") != -1 { // if we use plusWorkaround, we want to hide those duplicates /// from listing continue } if prefix == "" { - paths = append(paths, key.Key) + paths = append(paths, *key.Key) } else { - paths = append(paths, key.Key[len(prefix):]) + paths = append(paths, (*key.Key)[len(prefix):]) } - md5s = append(md5s, strings.Replace(key.ETag, "\"", "", -1)) - } - if contents.IsTruncated { - marker = contents.NextMarker - if marker == "" { - // From the s3 docs: If response does not include the - // NextMarker and it is truncated, you can use the value of the - // last Key in the response as the marker in the subsequent - // request to get the next set of object keys. - marker = lastKey - } - } else { - break + md5s = append(md5s, strings.Replace(*key.ETag, "\"", "", -1)) } + + return true + }) + + if err != nil { + return nil, nil, fmt.Errorf("error listing under prefix %s in %s: %s", prefix, storage, err) } return paths, md5s, nil @@ -304,7 +333,16 @@ // RenameFile renames (moves) file func (storage *PublishedStorage) RenameFile(oldName, newName string) error { - err := storage.bucket.Copy(filepath.Join(storage.prefix, oldName), filepath.Join(storage.prefix, newName), storage.acl) + source := fmt.Sprintf("/%s/%s", storage.bucket, filepath.Join(storage.prefix, oldName)) + + params := &s3.CopyObjectInput{ + Bucket: aws.String(storage.bucket), + CopySource: aws.String(source), + Key: aws.String(filepath.Join(storage.prefix, newName)), + ACL: aws.String(storage.acl), + } + + _, err := storage.s3.CopyObject(params) if err != nil { return fmt.Errorf("error copying %s -> %s in %s: %s", oldName, newName, storage, err) } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/s3/public_test.go aptly-0.9.7/src/github.com/smira/aptly/s3/public_test.go --- aptly-0.9.6/src/github.com/smira/aptly/s3/public_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/s3/public_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -1,18 +1,21 @@ package s3 import ( - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/s3/s3test" - "github.com/smira/aptly/files" + "bytes" "io/ioutil" "os" "path/filepath" . "gopkg.in/check.v1" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + + "github.com/smira/aptly/files" ) type PublishedStorageSuite struct { - srv *s3test.Server + srv *Server storage, prefixedStorage *PublishedStorage } @@ -20,18 +23,16 @@ func (s *PublishedStorageSuite) SetUpTest(c *C) { var err error - s.srv, err = s3test.NewServer(&s3test.Config{}) + s.srv, err = NewServer(&Config{}) c.Assert(err, IsNil) c.Assert(s.srv, NotNil) - auth, _ := aws.GetAuth("aa", "bb") - s.storage, err = NewPublishedStorageRaw(auth, aws.Region{Name: "test-1", S3Endpoint: s.srv.URL(), S3LocationConstraint: true}, "test", "", "", "", "", false, true) + s.storage, err = NewPublishedStorage("aa", "bb", "", "test-1", s.srv.URL(), "test", "", "", "", "", false, true, false, false) c.Assert(err, IsNil) - - s.prefixedStorage, err = NewPublishedStorageRaw(auth, aws.Region{Name: "test-1", S3Endpoint: s.srv.URL(), S3LocationConstraint: true}, "test", "", "lala", "", "", false, true) + s.prefixedStorage, err = NewPublishedStorage("aa", "bb", "", "test-1", s.srv.URL(), "test", "", "lala", "", "", false, true, false, false) c.Assert(err, IsNil) - err = s.storage.s3.Bucket("test").PutBucket("private") + _, err = s.storage.s3.CreateBucket(&s3.CreateBucketInput{Bucket: aws.String("test")}) c.Assert(err, IsNil) } @@ -39,10 +40,37 @@ s.srv.Quit() } -func (s *PublishedStorageSuite) TestNewPublishedStorage(c *C) { - stor, err := NewPublishedStorage("aa", "bbb", "", "", "", "", "", "", "", false, false) - c.Check(stor, IsNil) - c.Check(err, ErrorMatches, "unknown region: .*") +func (s *PublishedStorageSuite) GetFile(c *C, path string) []byte { + resp, err := s.storage.s3.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(s.storage.bucket), + Key: aws.String(path), + }) + c.Assert(err, IsNil) + + body, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + c.Assert(err, IsNil) + + return body +} + +func (s *PublishedStorageSuite) AssertNoFile(c *C, path string) { + _, err := s.storage.s3.HeadObject(&s3.HeadObjectInput{ + Bucket: aws.String(s.storage.bucket), + Key: aws.String(path), + }) + c.Assert(err, ErrorMatches, ".*\n.*status code: 404.*") +} + +func (s *PublishedStorageSuite) PutFile(c *C, path string, data []byte) { + _, err := s.storage.s3.PutObject(&s3.PutObjectInput{ + Bucket: aws.String(s.storage.bucket), + Key: aws.String(path), + Body: bytes.NewReader(data), + ContentType: aws.String("binary/octet-stream"), + ACL: aws.String("private"), + }) + c.Assert(err, IsNil) } func (s *PublishedStorageSuite) TestPutFile(c *C) { @@ -53,16 +81,12 @@ err = s.storage.PutFile("a/b.txt", filepath.Join(dir, "a")) c.Check(err, IsNil) - data, err := s.storage.bucket.Get("a/b.txt") - c.Check(err, IsNil) - c.Check(data, DeepEquals, []byte("welcome to s3!")) + c.Check(s.GetFile(c, "a/b.txt"), DeepEquals, []byte("welcome to s3!")) err = s.prefixedStorage.PutFile("a/b.txt", filepath.Join(dir, "a")) c.Check(err, IsNil) - data, err = s.storage.bucket.Get("lala/a/b.txt") - c.Check(err, IsNil) - c.Check(data, DeepEquals, []byte("welcome to s3!")) + c.Check(s.GetFile(c, "lala/a/b.txt"), DeepEquals, []byte("welcome to s3!")) } func (s *PublishedStorageSuite) TestPutFilePlusWorkaround(c *C) { @@ -75,20 +99,15 @@ err = s.storage.PutFile("a/b+c.txt", filepath.Join(dir, "a")) c.Check(err, IsNil) - data, err := s.storage.bucket.Get("a/b+c.txt") - c.Check(err, IsNil) - c.Check(data, DeepEquals, []byte("welcome to s3!")) + c.Check(s.GetFile(c, "a/b+c.txt"), DeepEquals, []byte("welcome to s3!")) - data, err = s.storage.bucket.Get("a/b c.txt") - c.Check(err, IsNil) - c.Check(data, DeepEquals, []byte("welcome to s3!")) + c.Check(s.GetFile(c, "a/b c.txt"), DeepEquals, []byte("welcome to s3!")) } func (s *PublishedStorageSuite) TestFilelist(c *C) { paths := []string{"a", "b", "c", "testa", "test/a", "test/b", "lala/a", "lala/b", "lala/c"} for _, path := range paths { - err := s.storage.bucket.Put(path, []byte("test"), "binary/octet-stream", "private") - c.Check(err, IsNil) + s.PutFile(c, path, []byte("test")) } list, err := s.storage.Filelist("") @@ -114,8 +133,7 @@ paths := []string{"a", "b", "c", "testa", "test/a+1", "test/a 1", "lala/a+b", "lala/a b", "lala/c"} for _, path := range paths { - err := s.storage.bucket.Put(path, []byte("test"), "binary/octet-stream", "private") - c.Check(err, IsNil) + s.PutFile(c, path, []byte("test")) } list, err := s.storage.Filelist("") @@ -136,40 +154,30 @@ } func (s *PublishedStorageSuite) TestRemove(c *C) { - err := s.storage.bucket.Put("a/b", []byte("test"), "binary/octet-stream", "private") - c.Check(err, IsNil) + s.PutFile(c, "a/b", []byte("test")) - err = s.storage.Remove("a/b") + err := s.storage.Remove("a/b") c.Check(err, IsNil) - _, err = s.storage.bucket.Get("a/b") - c.Check(err, ErrorMatches, "The specified key does not exist.") + s.AssertNoFile(c, "a/b") } func (s *PublishedStorageSuite) TestRemovePlusWorkaround(c *C) { s.storage.plusWorkaround = true - err := s.storage.bucket.Put("a/b+c", []byte("test"), "binary/octet-stream", "private") - c.Check(err, IsNil) - - err = s.storage.bucket.Put("a/b", []byte("test"), "binary/octet-stream", "private") - c.Check(err, IsNil) + s.PutFile(c, "a/b+c", []byte("test")) + s.PutFile(c, "a/b", []byte("test")) - err = s.storage.Remove("a/b+c") + err := s.storage.Remove("a/b+c") c.Check(err, IsNil) - _, err = s.storage.bucket.Get("a/b+c") - c.Check(err, ErrorMatches, "The specified key does not exist.") - - _, err = s.storage.bucket.Get("a/b c") - c.Check(err, ErrorMatches, "The specified key does not exist.") + s.AssertNoFile(c, "a/b+c") + s.AssertNoFile(c, "a/b c") err = s.storage.Remove("a/b") c.Check(err, IsNil) - _, err = s.storage.bucket.Get("a/b") - c.Check(err, ErrorMatches, "The specified key does not exist.") - + s.AssertNoFile(c, "a/b") } func (s *PublishedStorageSuite) TestRemoveDirs(c *C) { @@ -177,8 +185,7 @@ paths := []string{"a", "b", "c", "testa", "test/a+1", "test/a 1", "lala/a+b", "lala/a b", "lala/c"} for _, path := range paths { - err := s.storage.bucket.Put(path, []byte("test"), "binary/octet-stream", "private") - c.Check(err, IsNil) + s.PutFile(c, path, []byte("test")) } err := s.storage.RemoveDirs("test", nil) @@ -192,8 +199,7 @@ func (s *PublishedStorageSuite) TestRemoveDirsPlusWorkaround(c *C) { paths := []string{"a", "b", "c", "testa", "test/a", "test/b", "lala/a", "lala/b", "lala/c"} for _, path := range paths { - err := s.storage.bucket.Put(path, []byte("test"), "binary/octet-stream", "private") - c.Check(err, IsNil) + s.PutFile(c, path, []byte("test")) } err := s.storage.RemoveDirs("test", nil) @@ -230,31 +236,23 @@ err = s.storage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), pool, sourcePath, "c1df1da7a1ce305a3b60af9d5733ac1d", false) c.Check(err, IsNil) - data, err := s.storage.bucket.Get("pool/main/m/mars-invaders/mars-invaders_1.03.deb") - c.Check(err, IsNil) - c.Check(data, DeepEquals, []byte("Contents")) + c.Check(s.GetFile(c, "pool/main/m/mars-invaders/mars-invaders_1.03.deb"), DeepEquals, []byte("Contents")) // duplicate link from pool err = s.storage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), pool, sourcePath, "c1df1da7a1ce305a3b60af9d5733ac1d", false) c.Check(err, IsNil) - data, err = s.storage.bucket.Get("pool/main/m/mars-invaders/mars-invaders_1.03.deb") - c.Check(err, IsNil) - c.Check(data, DeepEquals, []byte("Contents")) + c.Check(s.GetFile(c, "pool/main/m/mars-invaders/mars-invaders_1.03.deb"), DeepEquals, []byte("Contents")) // link from pool with conflict err = s.storage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), pool, sourcePath2, "e9dfd31cc505d51fc26975250750deab", false) c.Check(err, ErrorMatches, ".*file already exists and is different.*") - data, err = s.storage.bucket.Get("pool/main/m/mars-invaders/mars-invaders_1.03.deb") - c.Check(err, IsNil) - c.Check(data, DeepEquals, []byte("Contents")) + c.Check(s.GetFile(c, "pool/main/m/mars-invaders/mars-invaders_1.03.deb"), DeepEquals, []byte("Contents")) // link from pool with conflict and force err = s.storage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), pool, sourcePath2, "e9dfd31cc505d51fc26975250750deab", true) c.Check(err, IsNil) - data, err = s.storage.bucket.Get("pool/main/m/mars-invaders/mars-invaders_1.03.deb") - c.Check(err, IsNil) - c.Check(data, DeepEquals, []byte("Spam")) + c.Check(s.GetFile(c, "pool/main/m/mars-invaders/mars-invaders_1.03.deb"), DeepEquals, []byte("Spam")) } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/s3/retry.go aptly-0.9.7/src/github.com/smira/aptly/s3/retry.go --- aptly-0.9.6/src/github.com/smira/aptly/s3/retry.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/s3/retry.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,121 +0,0 @@ -package s3 - -// This was taken from github.com/mitchellh/goamz/amz/client.go: - -import ( - "math" - "net" - "net/http" - "time" -) - -type RetryableFunc func(*http.Request, *http.Response, error) bool -type WaitFunc func(try int) -type DeadlineFunc func() time.Time - -type ResilientTransport struct { - // Timeout is the maximum amount of time a dial will wait for - // a connect to complete. - // - // The default is no timeout. - // - // With or without a timeout, the operating system may impose - // its own earlier timeout. For instance, TCP timeouts are - // often around 3 minutes. - DialTimeout time.Duration - - // MaxTries, if non-zero, specifies the number of times we will retry on - // failure. Retries are only attempted for temporary network errors or known - // safe failures. - MaxTries int - ShouldRetry RetryableFunc - Wait WaitFunc - transport *http.Transport -} - -// Convenience method for creating an http client -func NewClient(rt *ResilientTransport) *http.Client { - rt.transport = &http.Transport{ - Dial: func(netw, addr string) (net.Conn, error) { - c, err := net.DialTimeout(netw, addr, rt.DialTimeout) - if err != nil { - return nil, err - } - return c, nil - }, - DisableKeepAlives: true, - Proxy: http.ProxyFromEnvironment, - } - // TODO: Would be nice is ResilientTransport allowed clients to initialize - // with http.Transport attributes. - return &http.Client{ - Transport: rt, - } -} - -var retryingTransport = &ResilientTransport{ - DialTimeout: 15 * time.Second, - MaxTries: 3, - ShouldRetry: awsRetry, - Wait: ExpBackoff, -} - -// Exported default client -var RetryingClient = NewClient(retryingTransport) - -func (t *ResilientTransport) RoundTrip(req *http.Request) (*http.Response, error) { - return t.tries(req) -} - -// Retry a request a maximum of t.MaxTries times. -// We'll only retry if the proper criteria are met. -// If a wait function is specified, wait that amount of time -// In between requests. -func (t *ResilientTransport) tries(req *http.Request) (res *http.Response, err error) { - for try := 0; try < t.MaxTries; try += 1 { - res, err = t.transport.RoundTrip(req) - - if !t.ShouldRetry(req, res, err) { - break - } - if try == (t.MaxTries - 1) { - break - } - if res != nil { - res.Body.Close() - } - if t.Wait != nil { - t.Wait(try) - } - } - - return -} - -func ExpBackoff(try int) { - time.Sleep(100 * time.Millisecond * - time.Duration(math.Exp2(float64(try)))) -} - -// Decide if we should retry a request. -// In general, the criteria for retrying a request is described here -// http://docs.aws.amazon.com/general/latest/gr/api-retries.html -func awsRetry(req *http.Request, res *http.Response, err error) bool { - retry := false - - // Retry if there's a temporary network error. - if neterr, ok := err.(net.Error); ok { - if neterr.Temporary() { - retry = true - } - } - - // Retry if we get a 5xx series error. - if res != nil { - if res.StatusCode >= 500 && res.StatusCode < 600 { - retry = true - } - } - - return retry -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/s3/server_test.go aptly-0.9.7/src/github.com/smira/aptly/s3/server_test.go --- aptly-0.9.6/src/github.com/smira/aptly/s3/server_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/s3/server_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,699 @@ +package s3 + +import ( + "bytes" + "crypto/md5" + "encoding/hex" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/url" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "time" +) + +const debug = true + +type s3Error struct { + statusCode int + XMLName struct{} `xml:"Error"` + Code string + Message string + BucketName string + RequestId string + HostId string +} + +type action struct { + srv *Server + w http.ResponseWriter + req *http.Request + reqId string +} + +// Config controls the internal behaviour of the Server. A nil config is the default +// and behaves as if all configurations assume their default behaviour. Once passed +// to NewServer, the configuration must not be modified. +type Config struct { + // Send409Conflict controls how the Server will respond to calls to PUT on a + // previously existing bucket. The default is false, and corresponds to the + // us-east-1 s3 enpoint. Setting this value to true emulates the behaviour of + // all other regions. + // http://docs.amazonwebservices.com/AmazonS3/latest/API/ErrorResponses.html + Send409Conflict bool +} + +func (c *Config) send409Conflict() bool { + if c != nil { + return c.Send409Conflict + } + return false +} + +// Server is a fake S3 server for testing purposes. +// All of the data for the server is kept in memory. +type Server struct { + url string + reqId int + listener net.Listener + mu sync.Mutex + buckets map[string]*bucket + config *Config +} + +type bucket struct { + name string + acl string + ctime time.Time + objects map[string]*object +} + +type object struct { + name string + mtime time.Time + meta http.Header // metadata to return with requests. + checksum []byte // also held as Content-MD5 in meta. + data []byte +} + +// A resource encapsulates the subject of an HTTP request. +// The resource referred to may or may not exist +// when the request is made. +type resource interface { + put(a *action) interface{} + get(a *action) interface{} + post(a *action) interface{} + delete(a *action) interface{} +} + +func NewServer(config *Config) (*Server, error) { + l, err := net.Listen("tcp", "localhost:0") + if err != nil { + return nil, fmt.Errorf("cannot listen on localhost: %v", err) + } + srv := &Server{ + listener: l, + url: "http://" + l.Addr().String(), + buckets: make(map[string]*bucket), + config: config, + } + go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + srv.serveHTTP(w, req) + })) + return srv, nil +} + +// Quit closes down the server. +func (srv *Server) Quit() { + srv.listener.Close() +} + +// URL returns a URL for the server. +func (srv *Server) URL() string { + return srv.url +} + +func fatalError(code int, codeStr string, errf string, a ...interface{}) { + panic(&s3Error{ + statusCode: code, + Code: codeStr, + Message: fmt.Sprintf(errf, a...), + }) +} + +// serveHTTP serves the S3 protocol. +func (srv *Server) serveHTTP(w http.ResponseWriter, req *http.Request) { + // ignore error from ParseForm as it's usually spurious. + req.ParseForm() + + srv.mu.Lock() + defer srv.mu.Unlock() + + if debug { + log.Printf("s3test %q %q", req.Method, req.URL) + } + a := &action{ + srv: srv, + w: w, + req: req, + reqId: fmt.Sprintf("%09X", srv.reqId), + } + srv.reqId++ + + var r resource + defer func() { + switch err := recover().(type) { + case *s3Error: + switch r := r.(type) { + case objectResource: + err.BucketName = r.bucket.name + case bucketResource: + err.BucketName = r.name + } + err.RequestId = a.reqId + // TODO HostId + w.Header().Set("Content-Type", `xml version="1.0" encoding="UTF-8"`) + w.WriteHeader(err.statusCode) + xmlMarshal(w, err) + case nil: + default: + panic(err) + } + }() + + r = srv.resourceForURL(req.URL) + + var resp interface{} + switch req.Method { + case "PUT": + resp = r.put(a) + case "GET", "HEAD": + resp = r.get(a) + case "DELETE": + resp = r.delete(a) + case "POST": + resp = r.post(a) + default: + fatalError(400, "MethodNotAllowed", "unknown http request method %q", req.Method) + } + if resp != nil && req.Method != "HEAD" { + xmlMarshal(w, resp) + } +} + +// xmlMarshal is the same as xml.Marshal except that +// it panics on error. The marshalling should not fail, +// but we want to know if it does. +func xmlMarshal(w io.Writer, x interface{}) { + if err := xml.NewEncoder(w).Encode(x); err != nil { + panic(fmt.Errorf("error marshalling %#v: %v", x, err)) + } +} + +// In a fully implemented test server, each of these would have +// its own resource type. +var unimplementedBucketResourceNames = map[string]bool{ + "acl": true, + "lifecycle": true, + "policy": true, + "location": true, + "logging": true, + "notification": true, + "versions": true, + "requestPayment": true, + "versioning": true, + "website": true, + "uploads": true, +} + +var unimplementedObjectResourceNames = map[string]bool{ + "uploadId": true, + "acl": true, + "torrent": true, + "uploads": true, +} + +var pathRegexp = regexp.MustCompile("/(([^/]+)(/(.*))?)?") + +// resourceForURL returns a resource object for the given URL. +func (srv *Server) resourceForURL(u *url.URL) (r resource) { + + if u.Path == "/" { + return serviceResource{ + buckets: srv.buckets, + } + } + + m := pathRegexp.FindStringSubmatch(u.Path) + if m == nil { + fatalError(404, "InvalidURI", "Couldn't parse the specified URI") + } + bucketName := m[2] + objectName := m[4] + if bucketName == "" { + return nullResource{} // root + } + b := bucketResource{ + name: bucketName, + bucket: srv.buckets[bucketName], + } + q := u.Query() + if objectName == "" { + for name := range q { + if unimplementedBucketResourceNames[name] { + return nullResource{} + } + } + return b + + } + if b.bucket == nil { + fatalError(404, "NoSuchBucket", "The specified bucket does not exist") + } + objr := objectResource{ + name: objectName, + version: q.Get("versionId"), + bucket: b.bucket, + } + for name := range q { + if unimplementedObjectResourceNames[name] { + return nullResource{} + } + } + if obj := objr.bucket.objects[objr.name]; obj != nil { + objr.object = obj + } + return objr +} + +// nullResource has error stubs for all resource methods. +type nullResource struct{} + +func notAllowed() interface{} { + fatalError(400, "MethodNotAllowed", "The specified method is not allowed against this resource") + return nil +} + +func (nullResource) put(a *action) interface{} { return notAllowed() } +func (nullResource) get(a *action) interface{} { return notAllowed() } +func (nullResource) post(a *action) interface{} { return notAllowed() } +func (nullResource) delete(a *action) interface{} { return notAllowed() } + +const timeFormat = "2006-01-02T15:04:05Z" + +type serviceResource struct { + buckets map[string]*bucket +} + +func (serviceResource) put(a *action) interface{} { return notAllowed() } +func (serviceResource) post(a *action) interface{} { return notAllowed() } +func (serviceResource) delete(a *action) interface{} { return notAllowed() } + +// GET on an s3 service lists the buckets. +// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTServiceGET.html +func (r serviceResource) get(a *action) interface{} { + type respBucket struct { + Name string + } + + type response struct { + Buckets []respBucket `xml:">Bucket"` + } + + resp := response{} + + for _, bucketPtr := range r.buckets { + bkt := respBucket{ + Name: bucketPtr.name, + } + resp.Buckets = append(resp.Buckets, bkt) + } + + return &resp +} + +type bucketResource struct { + name string + bucket *bucket // non-nil if the bucket already exists. +} + +type Owner struct { + ID string + DisplayName string +} + +// The ListResp type holds the results of a List bucket operation. +type ListResp struct { + Name string + Prefix string + Delimiter string + Marker string + NextMarker string + MaxKeys int + // IsTruncated is true if the results have been truncated because + // there are more keys and prefixes than can fit in MaxKeys. + // N.B. this is the opposite sense to that documented (incorrectly) in + // http://goo.gl/YjQTc + IsTruncated bool + Contents []Key + CommonPrefixes []string `xml:">Prefix"` +} + +// The Key type represents an item stored in an S3 bucket. +type Key struct { + Key string + LastModified string + Size int64 + // ETag gives the hex-encoded MD5 sum of the contents, + // surrounded with double-quotes. + ETag string + StorageClass string + Owner Owner +} + +// GET on a bucket lists the objects in the bucket. +// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGET.html +func (r bucketResource) get(a *action) interface{} { + if r.bucket == nil { + fatalError(404, "NoSuchBucket", "The specified bucket does not exist") + } + delimiter := a.req.Form.Get("delimiter") + marker := a.req.Form.Get("marker") + maxKeys := -1 + if s := a.req.Form.Get("max-keys"); s != "" { + i, err := strconv.Atoi(s) + if err != nil || i < 0 { + fatalError(400, "invalid value for max-keys: %q", s) + } + maxKeys = i + } + prefix := a.req.Form.Get("prefix") + a.w.Header().Set("Content-Type", "application/xml") + + if a.req.Method == "HEAD" { + return nil + } + + var objs orderedObjects + + // first get all matching objects and arrange them in alphabetical order. + for name, obj := range r.bucket.objects { + if strings.HasPrefix(name, prefix) { + objs = append(objs, obj) + } + } + sort.Sort(objs) + + if maxKeys <= 0 { + maxKeys = 1000 + } + resp := &ListResp{ + Name: r.bucket.name, + Prefix: prefix, + Delimiter: delimiter, + Marker: marker, + MaxKeys: maxKeys, + } + + var prefixes []string + for _, obj := range objs { + if !strings.HasPrefix(obj.name, prefix) { + continue + } + name := obj.name + isPrefix := false + if delimiter != "" { + if i := strings.Index(obj.name[len(prefix):], delimiter); i >= 0 { + name = obj.name[:len(prefix)+i+len(delimiter)] + if prefixes != nil && prefixes[len(prefixes)-1] == name { + continue + } + isPrefix = true + } + } + if name <= marker { + continue + } + if len(resp.Contents)+len(prefixes) >= maxKeys { + resp.IsTruncated = true + break + } + if isPrefix { + prefixes = append(prefixes, name) + } else { + // Contents contains only keys not found in CommonPrefixes + resp.Contents = append(resp.Contents, obj.s3Key()) + } + } + resp.CommonPrefixes = prefixes + return resp +} + +// orderedObjects holds a slice of objects that can be sorted +// by name. +type orderedObjects []*object + +func (s orderedObjects) Len() int { + return len(s) +} +func (s orderedObjects) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s orderedObjects) Less(i, j int) bool { + return s[i].name < s[j].name +} + +func (obj *object) s3Key() Key { + return Key{ + Key: obj.name, + LastModified: obj.mtime.Format(timeFormat), + Size: int64(len(obj.data)), + ETag: fmt.Sprintf(`"%x"`, obj.checksum), + // TODO StorageClass + // TODO Owner + } +} + +// DELETE on a bucket deletes the bucket if it's not empty. +func (r bucketResource) delete(a *action) interface{} { + b := r.bucket + if b == nil { + fatalError(404, "NoSuchBucket", "The specified bucket does not exist") + } + if len(b.objects) > 0 { + fatalError(400, "BucketNotEmpty", "The bucket you tried to delete is not empty") + } + delete(a.srv.buckets, b.name) + return nil +} + +// PUT on a bucket creates the bucket. +// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html +func (r bucketResource) put(a *action) interface{} { + var created bool + if r.bucket == nil { + if !validBucketName(r.name) { + fatalError(400, "InvalidBucketName", "The specified bucket is not valid") + } + if loc := locationConstraint(a); loc == "" { + fatalError(400, "InvalidRequets", "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to.") + } + // TODO validate acl + r.bucket = &bucket{ + name: r.name, + // TODO default acl + objects: make(map[string]*object), + } + a.srv.buckets[r.name] = r.bucket + created = true + } + if !created && a.srv.config.send409Conflict() { + fatalError(409, "BucketAlreadyOwnedByYou", "Your previous request to create the named bucket succeeded and you already own it.") + } + r.bucket.acl = a.req.Header.Get("x-amz-acl") + return nil +} + +func (bucketResource) post(a *action) interface{} { + fatalError(400, "Method", "bucket POST method not available") + return nil +} + +// validBucketName returns whether name is a valid bucket name. +// Here are the rules, from: +// http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/BucketRestrictions.html +// +// Can contain lowercase letters, numbers, periods (.), underscores (_), +// and dashes (-). You can use uppercase letters for buckets only in the +// US Standard region. +// +// Must start with a number or letter +// +// Must be between 3 and 255 characters long +// +// There's one extra rule (Must not be formatted as an IP address (e.g., 192.168.5.4) +// but the real S3 server does not seem to check that rule, so we will not +// check it either. +// +func validBucketName(name string) bool { + if len(name) < 3 || len(name) > 255 { + return false + } + r := name[0] + if !(r >= '0' && r <= '9' || r >= 'a' && r <= 'z') { + return false + } + for _, r := range name { + switch { + case r >= '0' && r <= '9': + case r >= 'a' && r <= 'z': + case r == '_' || r == '-': + case r == '.': + default: + return false + } + } + return true +} + +var responseParams = map[string]bool{ + "content-type": true, + "content-language": true, + "expires": true, + "cache-control": true, + "content-disposition": true, + "content-encoding": true, +} + +type objectResource struct { + name string + version string + bucket *bucket // always non-nil. + object *object // may be nil. +} + +// GET on an object gets the contents of the object. +// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html +func (objr objectResource) get(a *action) interface{} { + obj := objr.object + if obj == nil { + fatalError(404, "NoSuchKey", "The specified key does not exist.") + } + h := a.w.Header() + // add metadata + for name, d := range obj.meta { + h[name] = d + } + // override header values in response to request parameters. + for name, vals := range a.req.Form { + if strings.HasPrefix(name, "response-") { + name = name[len("response-"):] + if !responseParams[name] { + continue + } + h.Set(name, vals[0]) + } + } + if r := a.req.Header.Get("Range"); r != "" { + fatalError(400, "NotImplemented", "range unimplemented") + } + // TODO Last-Modified-Since + // TODO If-Modified-Since + // TODO If-Unmodified-Since + // TODO If-Match + // TODO If-None-Match + // TODO Connection: close ?? + // TODO x-amz-request-id + h.Set("Content-Length", fmt.Sprint(len(obj.data))) + h.Set("ETag", hex.EncodeToString(obj.checksum)) + h.Set("Last-Modified", obj.mtime.UTC().Format(http.TimeFormat)) + if a.req.Method == "HEAD" { + return nil + } + // TODO avoid holding the lock when writing data. + _, err := a.w.Write(obj.data) + if err != nil { + // we can't do much except just log the fact. + log.Printf("error writing data: %v", err) + } + return nil +} + +var metaHeaders = map[string]bool{ + "Content-MD5": true, + "x-amz-acl": true, + "Content-Type": true, + "Content-Encoding": true, + "Content-Disposition": true, +} + +// PUT on an object creates the object. +func (objr objectResource) put(a *action) interface{} { + // TODO Cache-Control header + // TODO Expires header + // TODO x-amz-server-side-encryption + // TODO x-amz-storage-class + + // TODO is this correct, or should we erase all previous metadata? + obj := objr.object + if obj == nil { + obj = &object{ + name: objr.name, + meta: make(http.Header), + } + } + + var expectHash []byte + if c := a.req.Header.Get("Content-MD5"); c != "" { + var err error + expectHash, err = hex.DecodeString(c) + if err != nil || len(expectHash) != md5.Size { + fatalError(400, "InvalidDigest", "The Content-MD5 you specified was invalid") + } + } + sum := md5.New() + // TODO avoid holding lock while reading data. + data, err := ioutil.ReadAll(io.TeeReader(a.req.Body, sum)) + if err != nil { + fatalError(400, "TODO", "read error") + } + gotHash := sum.Sum(nil) + if expectHash != nil && bytes.Compare(gotHash, expectHash) != 0 { + fatalError(400, "BadDigest", "The Content-MD5 you specified did not match what we received") + } + if a.req.ContentLength >= 0 && int64(len(data)) != a.req.ContentLength { + fatalError(400, "IncompleteBody", "You did not provide the number of bytes specified by the Content-Length HTTP header") + } + + // PUT request has been successful - save data and metadata + for key, values := range a.req.Header { + key = http.CanonicalHeaderKey(key) + if metaHeaders[key] || strings.HasPrefix(key, "X-Amz-Meta-") { + obj.meta[key] = values + } + } + obj.data = data + obj.checksum = gotHash + obj.mtime = time.Now() + objr.bucket.objects[objr.name] = obj + return nil +} + +func (objr objectResource) delete(a *action) interface{} { + delete(objr.bucket.objects, objr.name) + return nil +} + +func (objr objectResource) post(a *action) interface{} { + fatalError(400, "MethodNotAllowed", "The specified method is not allowed against this resource") + return nil +} + +type CreateBucketConfiguration struct { + LocationConstraint string +} + +// locationConstraint parses the request body (if present). +// If there is no body, an empty string will be returned. +func locationConstraint(a *action) string { + var body bytes.Buffer + if _, err := io.Copy(&body, a.req.Body); err != nil { + fatalError(400, "InvalidRequest", err.Error()) + } + if body.Len() == 0 { + return "" + } + var loc CreateBucketConfiguration + if err := xml.NewDecoder(&body).Decode(&loc); err != nil { + fatalError(400, "InvalidRequest", err.Error()) + } + return loc.LocationConstraint +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/api_lib.py aptly-0.9.7/src/github.com/smira/aptly/system/api_lib.py --- aptly-0.9.6/src/github.com/smira/aptly/system/api_lib.py 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/api_lib.py 2016-05-24 07:05:22.000000000 +0000 @@ -27,7 +27,7 @@ if APITest.aptly_server is None: super(APITest, self).prepare() - APITest.aptly_server = self._start_process("aptly api serve -listen=%s" % (self.base_url),) + APITest.aptly_server = self._start_process("aptly api serve -no-lock -listen=%s" % (self.base_url),) time.sleep(1) if os.path.exists(os.path.join(os.environ["HOME"], ".aptly", "upload")): diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/s3_lib.py aptly-0.9.7/src/github.com/smira/aptly/system/s3_lib.py --- aptly-0.9.6/src/github.com/smira/aptly/system/s3_lib.py 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/s3_lib.py 2016-05-24 07:05:22.000000000 +0000 @@ -22,7 +22,7 @@ return super(S3Test, self).fixture_available() and s3_conn is not None def prepare(self): - self.bucket_name = "aptly-sys-test-" + str(uuid.uuid4()) + self.bucket_name = "aptly-sys-test-" + str(uuid.uuid1()) self.bucket = s3_conn.create_bucket(self.bucket_name) self.configOverride = {"S3PublishEndpoints": { "test1": { diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/swift_lib.py aptly-0.9.7/src/github.com/smira/aptly/system/swift_lib.py --- aptly-0.9.6/src/github.com/smira/aptly/system/swift_lib.py 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/swift_lib.py 2016-05-24 07:05:22.000000000 +0000 @@ -37,7 +37,7 @@ return super(SwiftTest, self).fixture_available() and swift_conn is not None def prepare(self): - self.container_name = "aptly-sys-test-" + str(uuid.uuid4()) + self.container_name = "aptly-sys-test-" + str(uuid.uuid1()) swift_conn.put_container(self.container_name) self.configOverride = {"SwiftPublishEndpoints": { diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t01_version/VersionTest_gold aptly-0.9.7/src/github.com/smira/aptly/system/t01_version/VersionTest_gold --- aptly-0.9.6/src/github.com/smira/aptly/system/t01_version/VersionTest_gold 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t01_version/VersionTest_gold 2016-05-24 07:05:22.000000000 +0000 @@ -1 +1 @@ -aptly version: 0.9.6 +aptly version: 0.9.7 diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t02_config/ConfigShowTest_gold aptly-0.9.7/src/github.com/smira/aptly/system/t02_config/ConfigShowTest_gold --- aptly-0.9.6/src/github.com/smira/aptly/system/t02_config/ConfigShowTest_gold 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t02_config/ConfigShowTest_gold 2016-05-24 07:05:22.000000000 +0000 @@ -12,6 +12,7 @@ "downloadSourcePackages": false, "ppaDistributorID": "ubuntu", "ppaCodename": "", + "skipContentsPublishing": false, "S3PublishEndpoints": {}, "SwiftPublishEndpoints": {} } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t02_config/CreateConfigTest_gold aptly-0.9.7/src/github.com/smira/aptly/system/t02_config/CreateConfigTest_gold --- aptly-0.9.6/src/github.com/smira/aptly/system/t02_config/CreateConfigTest_gold 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t02_config/CreateConfigTest_gold 2016-05-24 07:05:22.000000000 +0000 @@ -12,6 +12,7 @@ "downloadSourcePackages": false, "ppaDistributorID": "ubuntu", "ppaCodename": "", + "skipContentsPublishing": false, "S3PublishEndpoints": {}, "SwiftPublishEndpoints": {} } \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t04_mirror/CreateMirror11Test_gold aptly-0.9.7/src/github.com/smira/aptly/system/t04_mirror/CreateMirror11Test_gold --- aptly-0.9.6/src/github.com/smira/aptly/system/t04_mirror/CreateMirror11Test_gold 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t04_mirror/CreateMirror11Test_gold 2016-05-24 07:05:22.000000000 +0000 @@ -1,10 +1,12 @@ -Downloading http://mirror.yandex.ru/debian/dists/squeeze/InRelease... -Downloading http://mirror.yandex.ru/debian/dists/squeeze/Release... -Downloading http://mirror.yandex.ru/debian/dists/squeeze/Release.gpg... -gpgv: RSA key ID 473041FA -gpgv: Good signature from "Debian Archive Automatic Signing Key (6.0/squeeze) " -gpgv: RSA key ID B98321F9 -gpgv: Good signature from "Squeeze Stable Release Key " +Downloading http://mirror.yandex.ru/debian/dists/wheezy/InRelease... +Downloading http://mirror.yandex.ru/debian/dists/wheezy/Release... +Downloading http://mirror.yandex.ru/debian/dists/wheezy/Release.gpg... +gpgv: RSA key ID 46925553 +gpgv: Good signature from "Debian Archive Automatic Signing Key (7.0/wheezy) " +gpgv: RSA key ID 2B90D010 +gpgv: Good signature from "Debian Archive Automatic Signing Key (8/jessie) " +gpgv: RSA key ID 65FFB764 +gpgv: Good signature from "Wheezy Stable Release Key " -Mirror [mirror11]: http://mirror.yandex.ru/debian/ squeeze successfully added. +Mirror [mirror11]: http://mirror.yandex.ru/debian/ wheezy successfully added. You can run 'aptly mirror update mirror11' to download repository contents. diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t04_mirror/CreateMirror11Test_mirror_show aptly-0.9.7/src/github.com/smira/aptly/system/t04_mirror/CreateMirror11Test_mirror_show --- aptly-0.9.6/src/github.com/smira/aptly/system/t04_mirror/CreateMirror11Test_mirror_show 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t04_mirror/CreateMirror11Test_mirror_show 2016-05-24 07:05:22.000000000 +0000 @@ -1,20 +1,20 @@ Name: mirror11 Archive Root URL: http://mirror.yandex.ru/debian/ -Distribution: squeeze +Distribution: wheezy Components: main, contrib, non-free -Architectures: amd64, armel, i386, ia64, kfreebsd-amd64, kfreebsd-i386, mips, mipsel, powerpc, s390, sparc +Architectures: amd64, armel, armhf, i386, ia64, kfreebsd-amd64, kfreebsd-i386, mips, mipsel, powerpc, s390, s390x, sparc Download Sources: no Download .udebs: no Last update: never Information from release file: -Architectures: amd64 armel i386 ia64 kfreebsd-amd64 kfreebsd-i386 mips mipsel powerpc s390 sparc -Codename: squeeze +Architectures: amd64 armel armhf i386 ia64 kfreebsd-amd64 kfreebsd-i386 mips mipsel powerpc s390 s390x sparc +Codename: wheezy Components: main contrib non-free -Date: Sat, 25 Apr 2015 11:01:14 UTC -Description: Debian 6.0.10 Released 19 July 2014 +Date: Sat, 05 Sep 2015 11:44:23 UTC +Description: Debian 7.9 Released 05 September 2015 Label: Debian Origin: Debian -Suite: oldoldstable -Version: 6.0.10 +Suite: oldstable +Version: 7.9 diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t04_mirror/CreateMirror12Test_gold aptly-0.9.7/src/github.com/smira/aptly/system/t04_mirror/CreateMirror12Test_gold --- aptly-0.9.6/src/github.com/smira/aptly/system/t04_mirror/CreateMirror12Test_gold 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t04_mirror/CreateMirror12Test_gold 2016-05-24 07:05:22.000000000 +0000 @@ -1,9 +1,10 @@ -Downloading http://mirror.yandex.ru/debian/dists/squeeze/InRelease... -Downloading http://mirror.yandex.ru/debian/dists/squeeze/Release... -Downloading http://mirror.yandex.ru/debian/dists/squeeze/Release.gpg... -gpgv: keyblock resource `${HOME}/.gnupg/aptlytest.gpg': file open error -gpgv: RSA key ID 473041FA -gpgv: Can't check signature: public key not found -gpgv: RSA key ID B98321F9 -gpgv: Can't check signature: public key not found +Downloading http://mirror.yandex.ru/debian/dists/squeeze-lts/InRelease... + +gpgv: RSA key ID 46925553 + +Downloading http://mirror.yandex.ru/debian/dists/squeeze-lts/Release... +Downloading http://mirror.yandex.ru/debian/dists/squeeze-lts/Release.gpg... + +gpgv: RSA key ID 46925553 + ERROR: unable to fetch mirror: verification of detached signature failed: exit status 2 diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t04_mirror/CreateMirror9Test_gold aptly-0.9.7/src/github.com/smira/aptly/system/t04_mirror/CreateMirror9Test_gold --- aptly-0.9.6/src/github.com/smira/aptly/system/t04_mirror/CreateMirror9Test_gold 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t04_mirror/CreateMirror9Test_gold 2016-05-24 07:05:22.000000000 +0000 @@ -1,6 +1,8 @@ -Downloading http://mirror.yandex.ru/debian-backports/dists/squeeze-backports/InRelease... -gpgv: Signature made Fri Feb 7 06:56:50 2014 MSK using RSA key ID 46925553 +Downloading http://mirror.yandex.ru/debian/dists/wheezy-backports/InRelease... +gpgv: RSA key ID 46925553 gpgv: Good signature from "Debian Archive Automatic Signing Key (7.0/wheezy) " +gpgv: RSA key ID 2B90D010 +gpgv: Good signature from "Debian Archive Automatic Signing Key (8/jessie) " -Mirror [mirror9]: http://mirror.yandex.ru/debian-backports/ squeeze-backports successfully added. +Mirror [mirror9]: http://mirror.yandex.ru/debian/ wheezy-backports successfully added. You can run 'aptly mirror update mirror9' to download repository contents. diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t04_mirror/CreateMirror9Test_mirror_show aptly-0.9.7/src/github.com/smira/aptly/system/t04_mirror/CreateMirror9Test_mirror_show --- aptly-0.9.6/src/github.com/smira/aptly/system/t04_mirror/CreateMirror9Test_mirror_show 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t04_mirror/CreateMirror9Test_mirror_show 2016-05-24 07:05:22.000000000 +0000 @@ -1,20 +1,21 @@ Name: mirror9 -Archive Root URL: http://mirror.yandex.ru/debian-backports/ -Distribution: squeeze-backports +Archive Root URL: http://mirror.yandex.ru/debian/ +Distribution: wheezy-backports Components: main, contrib, non-free -Architectures: amd64, armel, i386, ia64, kfreebsd-amd64, kfreebsd-i386, mips, mipsel, powerpc, s390, sparc +Architectures: amd64, armel, armhf, i386, ia64, kfreebsd-amd64, kfreebsd-i386, mips, mipsel, powerpc, s390, s390x, sparc Download Sources: no Download .udebs: no Last update: never Information from release file: -Architectures: amd64 armel i386 ia64 kfreebsd-amd64 kfreebsd-i386 mips mipsel powerpc s390 sparc +Architectures: amd64 armel armhf i386 ia64 kfreebsd-amd64 kfreebsd-i386 mips mipsel powerpc s390 s390x sparc ButAutomaticUpgrades: yes -Codename: squeeze-backports +Codename: wheezy-backports Components: main contrib non-free -Description: Backports for the Squeeze Distribution +Description: Backports for the Wheezy Distribution Label: Debian Backports NotAutomatic: yes Origin: Debian Backports -Suite: squeeze-backports +Suite: wheezy-backports +Version: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t04_mirror/create.py aptly-0.9.7/src/github.com/smira/aptly/system/t04_mirror/create.py --- aptly-0.9.6/src/github.com/smira/aptly/system/t04_mirror/create.py 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t04_mirror/create.py 2016-05-24 07:05:22.000000000 +0000 @@ -90,7 +90,7 @@ """ create mirror: repo with InRelease verification """ - runCmd = "aptly mirror create --keyring=aptlytest.gpg mirror9 http://mirror.yandex.ru/debian-backports/ squeeze-backports" + runCmd = "aptly mirror create --keyring=aptlytest.gpg mirror9 http://mirror.yandex.ru/debian/ wheezy-backports" fixtureGpg = True outputMatchPrepare = lambda _, s: re.sub(r'Signature made .* using|Warning: using insecure memory!\n', '', s) @@ -117,7 +117,7 @@ """ create mirror: repo with Release + Release.gpg verification """ - runCmd = "aptly mirror create --keyring=aptlytest.gpg mirror11 http://mirror.yandex.ru/debian/ squeeze" + runCmd = "aptly mirror create --keyring=aptlytest.gpg mirror11 http://mirror.yandex.ru/debian/ wheezy" fixtureGpg = True outputMatchPrepare = lambda _, s: re.sub(r'Signature made .* using', '', s) @@ -130,7 +130,7 @@ """ create mirror: repo with Release+Release.gpg verification, failure """ - runCmd = "aptly mirror create --keyring=aptlytest.gpg mirror12 http://mirror.yandex.ru/debian/ squeeze" + runCmd = "aptly mirror create --keyring=aptlytest.gpg mirror12 http://mirror.yandex.ru/debian/ squeeze-lts" fixtureGpg = False gold_processor = BaseTest.expand_environ outputMatchPrepare = lambda _, s: re.sub(r'Signature made .* using|gpgv: keyblock resource .*$|gpgv: Can\'t check signature: .*$', '', s, flags=re.MULTILINE) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t04_mirror/ListMirror1Test_gold aptly-0.9.7/src/github.com/smira/aptly/system/t04_mirror/ListMirror1Test_gold --- aptly-0.9.6/src/github.com/smira/aptly/system/t04_mirror/ListMirror1Test_gold 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t04_mirror/ListMirror1Test_gold 2016-05-24 07:05:22.000000000 +0000 @@ -1,7 +1,7 @@ List of mirrors: * [mirror1]: http://mirror.yandex.ru/debian/ wheezy - * [mirror2]: http://mirror.yandex.ru/debian/ squeeze [src] - * [mirror3]: http://mirror.yandex.ru/debian/ squeeze + * [mirror2]: http://mirror.yandex.ru/debian/ wheezy [src] + * [mirror3]: http://mirror.yandex.ru/debian/ wheezy * [mirror4]: http://download.opensuse.org/repositories/Apache:/MirrorBrain/Debian_7.0/ ./ To get more information about mirror, run `aptly mirror show `. diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t04_mirror/list.py aptly-0.9.7/src/github.com/smira/aptly/system/t04_mirror/list.py --- aptly-0.9.6/src/github.com/smira/aptly/system/t04_mirror/list.py 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t04_mirror/list.py 2016-05-24 07:05:22.000000000 +0000 @@ -7,8 +7,8 @@ """ fixtureCmds = [ "aptly mirror create --ignore-signatures mirror1 http://mirror.yandex.ru/debian/ wheezy", - "aptly mirror create -with-sources --ignore-signatures mirror2 http://mirror.yandex.ru/debian/ squeeze contrib", - "aptly -architectures=i386 mirror create --ignore-signatures mirror3 http://mirror.yandex.ru/debian/ squeeze non-free", + "aptly mirror create -with-sources --ignore-signatures mirror2 http://mirror.yandex.ru/debian/ wheezy contrib", + "aptly -architectures=i386 mirror create --ignore-signatures mirror3 http://mirror.yandex.ru/debian/ wheezy non-free", "aptly mirror create -ignore-signatures mirror4 http://download.opensuse.org/repositories/Apache:/MirrorBrain/Debian_7.0/ ./", ] runCmd = "aptly mirror list" diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t04_mirror/UpdateMirror12Test_gold aptly-0.9.7/src/github.com/smira/aptly/system/t04_mirror/UpdateMirror12Test_gold --- aptly-0.9.6/src/github.com/smira/aptly/system/t04_mirror/UpdateMirror12Test_gold 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t04_mirror/UpdateMirror12Test_gold 2016-05-24 07:05:22.000000000 +0000 @@ -2,32 +2,35 @@ Applying filter... Building download queue... -Download queue: 10 items (0.76 MiB) +Download queue: 11 items (5.76 MiB) Downloading & parsing package files... -Downloading http://mirror.yandex.ru/debian/dists/squeeze/InRelease... -Downloading http://mirror.yandex.ru/debian/dists/squeeze/Release... -Downloading http://mirror.yandex.ru/debian/dists/squeeze/Release.gpg... -Downloading http://mirror.yandex.ru/debian/dists/squeeze/main/binary-amd64/Packages.bz2... -Downloading http://mirror.yandex.ru/debian/dists/squeeze/main/binary-i386/Packages.bz2... -Downloading http://mirror.yandex.ru/debian/dists/squeeze/main/debian-installer/binary-amd64/Packages.bz2... -Downloading http://mirror.yandex.ru/debian/dists/squeeze/main/debian-installer/binary-i386/Packages.bz2... -Downloading http://mirror.yandex.ru/debian/dists/squeeze/non-free/binary-amd64/Packages.bz2... -Downloading http://mirror.yandex.ru/debian/dists/squeeze/non-free/binary-i386/Packages.bz2... -Downloading http://mirror.yandex.ru/debian/dists/squeeze/non-free/debian-installer/binary-amd64/Packages.bz2... -Downloading http://mirror.yandex.ru/debian/dists/squeeze/non-free/debian-installer/binary-i386/Packages.bz2... -Downloading http://mirror.yandex.ru/debian/pool/main/d/dmraid/dmraid-udeb_1.0.0.rc16-4.1_amd64.udeb... -Downloading http://mirror.yandex.ru/debian/pool/main/d/dmraid/dmraid-udeb_1.0.0.rc16-4.1_i386.udeb... -Downloading http://mirror.yandex.ru/debian/pool/main/d/dmraid/dmraid_1.0.0.rc16-4.1_amd64.deb... -Downloading http://mirror.yandex.ru/debian/pool/main/d/dmraid/dmraid_1.0.0.rc16-4.1_i386.deb... -Downloading http://mirror.yandex.ru/debian/pool/main/d/dmraid/libdmraid-dev_1.0.0.rc16-4.1_amd64.deb... -Downloading http://mirror.yandex.ru/debian/pool/main/d/dmraid/libdmraid-dev_1.0.0.rc16-4.1_i386.deb... -Downloading http://mirror.yandex.ru/debian/pool/main/d/dmraid/libdmraid1.0.0.rc16-udeb_1.0.0.rc16-4.1_amd64.udeb... -Downloading http://mirror.yandex.ru/debian/pool/main/d/dmraid/libdmraid1.0.0.rc16-udeb_1.0.0.rc16-4.1_i386.udeb... -Downloading http://mirror.yandex.ru/debian/pool/main/d/dmraid/libdmraid1.0.0.rc16_1.0.0.rc16-4.1_amd64.deb... -Downloading http://mirror.yandex.ru/debian/pool/main/d/dmraid/libdmraid1.0.0.rc16_1.0.0.rc16-4.1_i386.deb... -Mirror `squeeze` has been successfully updated. -Packages filtered: 45830 -> 10. -gpgv: Good signature from "Debian Archive Automatic Signing Key (6.0/squeeze) " -gpgv: Good signature from "Squeeze Stable Release Key " -gpgv: RSA key ID 473041FA -gpgv: RSA key ID B98321F9 \ No newline at end of file +Downloading http://mirror.yandex.ru/debian/dists/wheezy/InRelease... +Downloading http://mirror.yandex.ru/debian/dists/wheezy/Release... +Downloading http://mirror.yandex.ru/debian/dists/wheezy/Release.gpg... +Downloading http://mirror.yandex.ru/debian/dists/wheezy/main/binary-amd64/Packages.bz2... +Downloading http://mirror.yandex.ru/debian/dists/wheezy/main/binary-i386/Packages.bz2... +Downloading http://mirror.yandex.ru/debian/dists/wheezy/main/debian-installer/binary-amd64/Packages.bz2... +Downloading http://mirror.yandex.ru/debian/dists/wheezy/main/debian-installer/binary-i386/Packages.bz2... +Downloading http://mirror.yandex.ru/debian/dists/wheezy/non-free/binary-amd64/Packages.bz2... +Downloading http://mirror.yandex.ru/debian/dists/wheezy/non-free/binary-i386/Packages.bz2... +Downloading http://mirror.yandex.ru/debian/dists/wheezy/non-free/debian-installer/binary-amd64/Packages.bz2... +Downloading http://mirror.yandex.ru/debian/dists/wheezy/non-free/debian-installer/binary-i386/Packages.bz2... +Downloading http://mirror.yandex.ru/debian/pool/main/g/gnupg/gnupg-curl_1.4.12-7+deb7u7_amd64.deb... +Downloading http://mirror.yandex.ru/debian/pool/main/g/gnupg/gnupg-curl_1.4.12-7+deb7u7_i386.deb... +Downloading http://mirror.yandex.ru/debian/pool/main/g/gnupg/gnupg-udeb_1.4.12-7+deb7u7_amd64.udeb... +Downloading http://mirror.yandex.ru/debian/pool/main/g/gnupg/gnupg-udeb_1.4.12-7+deb7u7_i386.udeb... +Downloading http://mirror.yandex.ru/debian/pool/main/g/gnupg/gnupg_1.4.12-7+deb7u7_amd64.deb... +Downloading http://mirror.yandex.ru/debian/pool/main/g/gnupg/gnupg_1.4.12-7+deb7u7_i386.deb... +Downloading http://mirror.yandex.ru/debian/pool/main/g/gnupg/gpgv-udeb_1.4.12-7+deb7u7_amd64.udeb... +Downloading http://mirror.yandex.ru/debian/pool/main/g/gnupg/gpgv-udeb_1.4.12-7+deb7u7_i386.udeb... +Downloading http://mirror.yandex.ru/debian/pool/main/g/gnupg/gpgv-win32_1.4.12-7+deb7u7_all.deb... +Downloading http://mirror.yandex.ru/debian/pool/main/g/gnupg/gpgv_1.4.12-7+deb7u7_amd64.deb... +Downloading http://mirror.yandex.ru/debian/pool/main/g/gnupg/gpgv_1.4.12-7+deb7u7_i386.deb... +Mirror `wheezy` has been successfully updated. +Packages filtered: 57430 -> 11. +gpgv: Good signature from "Debian Archive Automatic Signing Key (7.0/wheezy) " +gpgv: Good signature from "Debian Archive Automatic Signing Key (8/jessie) " +gpgv: Good signature from "Wheezy Stable Release Key " +gpgv: RSA key ID 2B90D010 +gpgv: RSA key ID 46925553 +gpgv: RSA key ID 65FFB764 \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t04_mirror/update.py aptly-0.9.7/src/github.com/smira/aptly/system/t04_mirror/update.py --- aptly-0.9.6/src/github.com/smira/aptly/system/t04_mirror/update.py 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t04_mirror/update.py 2016-05-24 07:05:22.000000000 +0000 @@ -165,9 +165,9 @@ longTest = False fixtureGpg = True fixtureCmds = [ - "aptly -architectures=i386,amd64 mirror create -keyring=aptlytest.gpg -filter='$$Source (dmraid)' -with-udebs squeeze http://mirror.yandex.ru/debian/ squeeze main non-free", + "aptly -architectures=i386,amd64 mirror create -keyring=aptlytest.gpg -filter='$$Source (gnupg)' -with-udebs wheezy http://mirror.yandex.ru/debian/ wheezy main non-free", ] - runCmd = "aptly mirror update -keyring=aptlytest.gpg squeeze" + runCmd = "aptly mirror update -keyring=aptlytest.gpg wheezy" outputMatchPrepare = lambda _, s: re.sub(r'Signature made .* using', '', s) def output_processor(self, output): diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishRepo12Test_release aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishRepo12Test_release --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishRepo12Test_release 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishRepo12Test_release 2016-05-24 07:05:22.000000000 +0000 @@ -8,3 +8,4 @@ MD5Sum: SHA1: SHA256: +SHA512: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishRepo15Test_release aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishRepo15Test_release --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishRepo15Test_release 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishRepo15Test_release 2016-05-24 07:05:22.000000000 +0000 @@ -8,3 +8,4 @@ MD5Sum: SHA1: SHA256: +SHA512: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishRepo17Test_release aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishRepo17Test_release --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishRepo17Test_release 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishRepo17Test_release 2016-05-24 07:05:22.000000000 +0000 @@ -8,3 +8,4 @@ MD5Sum: SHA1: SHA256: +SHA512: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishRepo1Test_binary aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishRepo1Test_binary --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishRepo1Test_binary 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishRepo1Test_binary 2016-05-24 07:05:22.000000000 +0000 @@ -20,6 +20,7 @@ Priority: optional SHA1: 36895eb64cfe89c33c0a2f7ac2f0c6e0e889e04b SHA256: c76b4bd12fd92e4dfe1b55b18a67a669d92f62985d6a96c8a21d96120982cf12 +SHA512: d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c Section: libdevel Size: 2738 Source: boost-defaults diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishRepo1Test_release aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishRepo1Test_release --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishRepo1Test_release 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishRepo1Test_release 2016-05-24 07:05:22.000000000 +0000 @@ -8,3 +8,4 @@ MD5Sum: SHA1: SHA256: +SHA512: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishRepo1Test_sources aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishRepo1Test_sources --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishRepo1Test_sources 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishRepo1Test_sources 2016-05-24 07:05:22.000000000 +0000 @@ -3,6 +3,7 @@ 22ff26db69b73d3438fdde21ab5ba2f1 3456 pyspi_0.6.1-1.3.diff.gz 22ff26db69b73d3438fdde21ab5ba2f1 3456 pyspi_0.6.1-1.3.diff.gz + 262cac59a2e81c7f110851ff9670c97ffc3d192d9937b880422a0907f26340d43e7de7e68b904a4fb10bedb02b65c3bd1f7bdd20ea8c4293e690e7a8e0e70ee5 893 pyspi-0.6.1-1.3.stripped.dsc 289d3aefa970876e9c43686ce2b02f478d7f3ed35a713928464a98d54ae4fca3 893 pyspi-0.6.1-1.3.stripped.dsc 2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz 2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz @@ -19,6 +20,7 @@ d494aaf526f1ec6b02f14c2f81e060a5722d6532ddc760ec16972e45c2625989 1782 pyspi_0.6.1-1.3.dsc def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz + fde06b7dc5762a04986d0669420822f6a1e82b195322ae9cbd2dae40bda557c57ad77fe3546007ea645f801c4cd30ef4eb0e96efb2dee6b71c4c9a187d643683 1782 pyspi_0.6.1-1.3.dsc Architecture: any Architecture: any Binary: python-at-spi @@ -29,6 +31,8 @@ Checksums-Sha1: Checksums-Sha256: Checksums-Sha256: +Checksums-Sha512: +Checksums-Sha512: Directory: pool/main/p/pyspi Directory: pool/main/p/pyspi Files: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishRepo27Test_udeb_binary aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishRepo27Test_udeb_binary --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishRepo27Test_udeb_binary 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishRepo27Test_udeb_binary 2016-05-24 07:05:22.000000000 +0000 @@ -1,20 +1,22 @@ -Package: dmraid-udeb -Version: 1.0.0.rc16-4.1 -Installed-Size: 36 -Priority: optional -Section: debian-installer -Maintainer: Giuseppe Iuculano -Architecture: i386 -Description: Device-Mapper Software RAID support tool (udeb) - dmraid discovers, activates, deactivates and displays properties - of software RAID sets (eg, ATARAID) and contained DOS partitions. + + . This is the minimal package (udeb) used by debian-installer + dmraid discovers, activates, deactivates and displays properties + of software RAID sets (eg, ATARAID) and contained DOS partitions. +Architecture: i386 +Depends: libc6-udeb (>= 2.11), libdmraid1.0.0.rc16-udeb (>= 1.0.0.rc16), dmsetup-udeb +Description: Device-Mapper Software RAID support tool (udeb) +Filename: pool/main/d/dmraid/dmraid-udeb_1.0.0.rc16-4.1_i386.udeb +Installed-Size: 36 MD5sum: 4d8bb4dafb0ef9059dac75846e162784 +Maintainer: Giuseppe Iuculano +Package: dmraid-udeb +Priority: optional SHA1: fd5c73e08d4c5381b1136c2ff170332d77526246 SHA256: fe4ff3351186f03039f8cd6f78e8e4f473a75b613f950caac06fa21dda2d59e8 -Filename: pool/main/d/dmraid/dmraid-udeb_1.0.0.rc16-4.1_i386.udeb +SHA512: d215bfffe485964a9a3db39788be713a8e10cd39fa1b2849e9e474eb1de2b01f69d3fff0997cc577cac7804da41123af2b4826baafb19e5e6ebdf5070f17a6e8 +Section: debian-installer Size: 11022 Source: dmraid -Depends: libc6-udeb (>= 2.11), libdmraid1.0.0.rc16-udeb (>= 1.0.0.rc16), dmsetup-udeb - +Version: 1.0.0.rc16-4.1 \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot13Test_release aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot13Test_release --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot13Test_release 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot13Test_release 2016-05-24 07:05:22.000000000 +0000 @@ -8,3 +8,4 @@ MD5Sum: SHA1: SHA256: +SHA512: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot15Test_release aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot15Test_release --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot15Test_release 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot15Test_release 2016-05-24 07:05:22.000000000 +0000 @@ -8,3 +8,4 @@ MD5Sum: SHA1: SHA256: +SHA512: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot16Test_release aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot16Test_release --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot16Test_release 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot16Test_release 2016-05-24 07:05:22.000000000 +0000 @@ -8,3 +8,4 @@ MD5Sum: SHA1: SHA256: +SHA512: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot17Test_binary aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot17Test_binary --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot17Test_binary 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot17Test_binary 2016-05-24 07:05:22.000000000 +0000 @@ -20,6 +20,7 @@ Priority: optional SHA1: 36895eb64cfe89c33c0a2f7ac2f0c6e0e889e04b SHA256: c76b4bd12fd92e4dfe1b55b18a67a669d92f62985d6a96c8a21d96120982cf12 +SHA512: d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c Section: libdevel Size: 2738 Source: boost-defaults diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot17Test_release aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot17Test_release --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot17Test_release 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot17Test_release 2016-05-24 07:05:22.000000000 +0000 @@ -8,3 +8,4 @@ MD5Sum: SHA1: SHA256: +SHA512: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot17Test_sources aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot17Test_sources --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot17Test_sources 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot17Test_sources 2016-05-24 07:05:22.000000000 +0000 @@ -3,6 +3,7 @@ 22ff26db69b73d3438fdde21ab5ba2f1 3456 pyspi_0.6.1-1.3.diff.gz 22ff26db69b73d3438fdde21ab5ba2f1 3456 pyspi_0.6.1-1.3.diff.gz + 262cac59a2e81c7f110851ff9670c97ffc3d192d9937b880422a0907f26340d43e7de7e68b904a4fb10bedb02b65c3bd1f7bdd20ea8c4293e690e7a8e0e70ee5 893 pyspi-0.6.1-1.3.stripped.dsc 289d3aefa970876e9c43686ce2b02f478d7f3ed35a713928464a98d54ae4fca3 893 pyspi-0.6.1-1.3.stripped.dsc 2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz 2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz @@ -19,6 +20,7 @@ d494aaf526f1ec6b02f14c2f81e060a5722d6532ddc760ec16972e45c2625989 1782 pyspi_0.6.1-1.3.dsc def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz + fde06b7dc5762a04986d0669420822f6a1e82b195322ae9cbd2dae40bda557c57ad77fe3546007ea645f801c4cd30ef4eb0e96efb2dee6b71c4c9a187d643683 1782 pyspi_0.6.1-1.3.dsc Architecture: any Architecture: any Binary: python-at-spi @@ -29,6 +31,8 @@ Checksums-Sha1: Checksums-Sha256: Checksums-Sha256: +Checksums-Sha512: +Checksums-Sha512: Directory: pool/main/p/pyspi Directory: pool/main/p/pyspi Files: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot1Test_release aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot1Test_release --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot1Test_release 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot1Test_release 2016-05-24 07:05:22.000000000 +0000 @@ -2,10 +2,10 @@ Label: . maverick Suite: maverick Codename: maverick -Date: Fri, 31 Jan 2014 14:18:52 UTC Architectures: amd64 i386 Components: main Description: Generated by aptly MD5Sum: SHA1: SHA256: +SHA512: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot24Test_release aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot24Test_release --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot24Test_release 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot24Test_release 2016-05-24 07:05:22.000000000 +0000 @@ -8,3 +8,4 @@ MD5Sum: SHA1: SHA256: +SHA512: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot26Test_release aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot26Test_release --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot26Test_release 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot26Test_release 2016-05-24 07:05:22.000000000 +0000 @@ -8,3 +8,4 @@ MD5Sum: SHA1: SHA256: +SHA512: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot2Test_release aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot2Test_release --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot2Test_release 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot2Test_release 2016-05-24 07:05:22.000000000 +0000 @@ -8,3 +8,4 @@ MD5Sum: SHA1: SHA256: +SHA512: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot35Test_gold aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot35Test_gold --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot35Test_gold 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot35Test_gold 2016-05-24 07:05:22.000000000 +0000 @@ -4,10 +4,10 @@ Signing file 'Release' with gpg, please enter your passphrase when prompted: Clearsigning file 'Release' with gpg, please enter your passphrase when prompted: -Snapshot squeeze has been successfully published. +Snapshot wheezy has been successfully published. Please setup your webserver to serve directory '${HOME}/.aptly/public' with autoindexing. Now you can add following line to apt sources: - deb http://your-server/ squeeze main + deb http://your-server/ wheezy main Don't forget to add your GPG key to apt with apt-key. You can also use `aptly serve` to publish your repositories over HTTP quickly. diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot35Test_packages_amd64 aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot35Test_packages_amd64 --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot35Test_packages_amd64 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot35Test_packages_amd64 2016-05-24 07:05:22.000000000 +0000 @@ -1,87 +1,81 @@ -Package: dmraid -Priority: optional -Section: admin -Installed-Size: 112 -Maintainer: Giuseppe Iuculano -Architecture: amd64 -Version: 1.0.0.rc16-4.1 -Depends: libc6 (>= 2.3), libdmraid1.0.0.rc16 (>= 1.0.0.rc16), libselinux1 (>= 1.32), libsepol1 (>= 1.14), udev, dmsetup -Filename: pool/main/d/dmraid/dmraid_1.0.0.rc16-4.1_amd64.deb -Size: 38620 -MD5sum: 35da9bcdd12c7fb08eb7192f0a17ddf2 -SHA1: 6a89d3f9e3b80a172811bb7d74eac43f119a8b7c -SHA256: 125405c4b0a7364bf209c161f393d4d0152ba9d02a55a95d90a7637f7b373b8f -Description: Device-Mapper Software RAID support tool - dmraid discovers, activates, deactivates and displays properties - of software RAID sets (eg, ATARAID) and contained DOS partitions. - . - dmraid uses the Linux device-mapper to create devices with respective - mappings for the ATARAID sets discovered. - . - The following formats are supported: - Highpoint HPT37X/HPT45X - Intel Software RAID - LSI Logic MegaRAID - NVidia NForce RAID (nvraid) - Promise FastTrack - Silicon Image(tm) Medley(tm) - VIA Software RAID - . - Please read the documentation in /usr/share/doc/dmraid BEFORE attempting - any use of this software. Improper use can cause data loss! -Homepage: http://people.redhat.com/~heinzm/sw/dmraid/ -Tag: admin::filesystem, admin::kernel, hardware::storage, implemented-in::c, interface::commandline, role::program, scope::utility, use::scanning -Package: libdmraid-dev -Priority: optional -Section: libdevel -Installed-Size: 496 -Maintainer: Giuseppe Iuculano -Architecture: amd64 -Source: dmraid -Version: 1.0.0.rc16-4.1 -Depends: libdmraid1.0.0.rc16 (= 1.0.0.rc16-4.1) -Filename: pool/main/d/dmraid/libdmraid-dev_1.0.0.rc16-4.1_amd64.deb -Size: 152618 -MD5sum: bb209b5796592d786c28844b949216dc -SHA1: cd8baba807fa92a88a265a044d821df8b677b5cb -SHA256: 081a48ad5372a941c35d41733da89a52cbe2d8f49032c2a4ef03148e4049615f -Description: Device-Mapper Software RAID support tool - header files - dmraid discovers, activates, deactivates and displays properties - of software RAID sets (eg, ATARAID) and contained DOS partitions. - . - dmraid uses the Linux device-mapper to create devices with respective - mappings for the ATARAID sets discovered. - . - This package contains the header files needed to link programs against - dmraid. -Tag: admin::hardware, devel::lang:c, devel::library, hardware::storage, implemented-in::c, qa::low-popcon, role::devel-lib, use::driver -Homepage: http://people.redhat.com/~heinzm/sw/dmraid/ -Package: libdmraid1.0.0.rc16 -Priority: optional -Section: libs -Installed-Size: 244 -Maintainer: Giuseppe Iuculano -Architecture: amd64 -Source: dmraid -Version: 1.0.0.rc16-4.1 -Replaces: libdmraid1.0.0.rc15 (<< 1.0.0.rc16-1) -Depends: libc6 (>= 2.7), libdevmapper1.02.1 (>= 2:1.02.20) -Filename: pool/main/d/dmraid/libdmraid1.0.0.rc16_1.0.0.rc16-4.1_amd64.deb -Size: 108978 -MD5sum: a66d03bb1ddad78f879660ddedf86295 -SHA1: 6292936617c466e67a3148c66d0c27c068d055d3 -SHA256: 29f06bd3ae42e3380b356b69598be07724d178af35f2f1a64648c7f8ff85bef9 -Description: Device-Mapper Software RAID support tool - shared library - dmraid discovers, activates, deactivates and displays properties - of software RAID sets (eg, ATARAID) and contained DOS partitions. - . - dmraid uses the Linux device-mapper to create devices with respective - mappings for the ATARAID sets discovered. - . - This package contains the dmraid shared library, which implements - the back half of dmraid, including on-disk metadata formats. -Homepage: http://people.redhat.com/~heinzm/sw/dmraid/ -Tag: admin::hardware, admin::kernel, devel::lang:c, devel::library, hardware::storage, implemented-in::c, role::{devel-lib,kernel,shared-lib}, use::driver + + +Architecture: all +Architecture: amd64 +Architecture: amd64 +Architecture: amd64 +Depends: libbz2-1.0, libc6 (>= 2.4), libreadline6 (>= 6.0), libusb-0.1-4 (>= 2:0.1.12), zlib1g (>= 1:1.1.4), dpkg (>= 1.15.4) | install-info, gpgv +Depends: libbz2-1.0, libc6 (>= 2.4), zlib1g (>= 1:1.1.4) +Depends: libc6 (>= 2.4), libcurl3-gnutls (>= 7.16.2), libldap-2.4-2 (>= 2.4.7), gnupg +Description-Md5: 19709c7fc27595437225fd34d295b347 +Description-Md5: 3f8767984a5f4b323de309446d07435d +Description-Md5: 55306a4e1e1fd63e577767c1b9f5161c +Description-Md5: 8bbdb812806fb623e26b7b93f549c74b +Description: GNU privacy guard - a free PGP replacement +Description: GNU privacy guard - a free PGP replacement (cURL) +Description: GNU privacy guard - signature verification tool +Description: GNU privacy guard - signature verification tool (win32 build) +Filename: pool/main/g/gnupg/gnupg-curl_1.4.12-7+deb7u7_amd64.deb +Filename: pool/main/g/gnupg/gnupg_1.4.12-7+deb7u7_amd64.deb +Filename: pool/main/g/gnupg/gpgv-win32_1.4.12-7+deb7u7_all.deb +Filename: pool/main/g/gnupg/gpgv_1.4.12-7+deb7u7_amd64.deb +Homepage: http://www.gnupg.org +Homepage: http://www.gnupg.org +Homepage: http://www.gnupg.org +Homepage: http://www.gnupg.org +Installed-Size: 130 +Installed-Size: 1480 +Installed-Size: 438 +Installed-Size: 4962 +MD5sum: 17916456c6e84c434205bad15e98e902 +MD5sum: 56699ccfefc9bb6c39325d746363c018 +MD5sum: 5f15f3ac2f586b95ab21c3f83fd1bf35 +MD5sum: 91a07e1a42703f0ce59c4a1de60e961d +Maintainer: Debian GnuPG-Maintainers +Maintainer: Debian GnuPG-Maintainers +Maintainer: Debian GnuPG-Maintainers +Maintainer: Debian GnuPG-Maintainers +Multi-Arch: foreign +Multi-Arch: foreign +Multi-Arch: foreign +Package: gnupg +Package: gnupg-curl +Package: gpgv +Package: gpgv-win32 +Priority: extra +Priority: important +Priority: important +Priority: optional +Recommends: libldap-2.4-2 (>= 2.4.7), gnupg-curl +SHA1: 8dae53bc42d1f35054ce35124da8b92f6097f1c2 +SHA1: bc5c60462be7702988e083cf68c7f8edfcb962a5 +SHA1: c03f15e5ee0fba0b77a51e063db87708aee0e422 +SHA1: df8a0ef18df0fb86167128ac6c31d6709c2f9c6b +SHA256: 27760f636f6dbfe387dfbede1131fe7a0dd5fd3b0ab562213193ffa7cfcadfb5 +SHA256: 2920249908a8297f85006def6a55fb99abfcc8466cac2b9f28d01ce8315df065 +SHA256: 8361f45f51a7e70e3367e5b2df59fa8defc8648a76afa4159da3f249460f5b33 +SHA256: b626c3320c0ba2c41c5214bf8175c713f3713cc393e9361a977dc0202c197875 +Section: utils +Section: utils +Section: utils +Section: utils +Size: 1956126 +Size: 228244 +Size: 617064 +Size: 64308 +Source: gnupg +Source: gnupg +Source: gnupg +Suggests: gnupg +Suggests: gnupg-doc, xloadimage | imagemagick | eog, libpcsclite1 +Suggests: wine +Tag: implemented-in::c, interface::commandline, network::client, protocol::http, role::plugin, role::program, scope::utility, security::authentication, security::cryptography, suite::gnu, use::checking, works-with::file, works-with::text +Tag: implemented-in::c, interface::commandline, role::program, scope::utility, security::authentication, security::cryptography, security::privacy, suite::gnu, use::checking, works-with::file, works-with::text +Tag: implemented-in::c, interface::commandline, role::program, scope::utility, security::cryptography, suite::gnu, use::checking +Version: 1.4.12-7+deb7u7 +Version: 1.4.12-7+deb7u7 +Version: 1.4.12-7+deb7u7 +Version: 1.4.12-7+deb7u7 \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot35Test_packages_i386 aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot35Test_packages_i386 --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot35Test_packages_i386 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot35Test_packages_i386 2016-05-24 07:05:22.000000000 +0000 @@ -1,87 +1,81 @@ -Package: dmraid -Priority: optional -Section: admin -Installed-Size: 176 -Maintainer: Giuseppe Iuculano -Architecture: i386 -Version: 1.0.0.rc16-4.1 -Depends: libc6 (>= 2.3), libdmraid1.0.0.rc16 (>= 1.0.0.rc16), libselinux1 (>= 1.32), libsepol1 (>= 1.14), udev, dmsetup -Filename: pool/main/d/dmraid/dmraid_1.0.0.rc16-4.1_i386.deb -Size: 37984 -MD5sum: f8aea4e9eaea341b112f02e9efe1678e -SHA1: bb96a258038c79bc04eef49d5875deed4c67dd16 -SHA256: 6a8294bef99040055009da41597869bfdb17ac89c3166e49c57340abe7f702ba -Description: Device-Mapper Software RAID support tool - dmraid discovers, activates, deactivates and displays properties - of software RAID sets (eg, ATARAID) and contained DOS partitions. - . - dmraid uses the Linux device-mapper to create devices with respective - mappings for the ATARAID sets discovered. - . - The following formats are supported: - Highpoint HPT37X/HPT45X - Intel Software RAID - LSI Logic MegaRAID - NVidia NForce RAID (nvraid) - Promise FastTrack - Silicon Image(tm) Medley(tm) - VIA Software RAID - . - Please read the documentation in /usr/share/doc/dmraid BEFORE attempting - any use of this software. Improper use can cause data loss! -Tag: admin::filesystem, admin::kernel, hardware::storage, implemented-in::c, interface::commandline, role::program, scope::utility, use::scanning -Homepage: http://people.redhat.com/~heinzm/sw/dmraid/ -Package: libdmraid-dev -Priority: optional -Section: libdevel -Installed-Size: 440 -Maintainer: Giuseppe Iuculano -Architecture: i386 -Source: dmraid -Version: 1.0.0.rc16-4.1 -Depends: libdmraid1.0.0.rc16 (= 1.0.0.rc16-4.1) -Filename: pool/main/d/dmraid/libdmraid-dev_1.0.0.rc16-4.1_i386.deb -Size: 145808 -MD5sum: 5395970df02ab5f1609cd7eccc15ead1 -SHA1: f27bd38eeb58a32ee7e58ac8a2950649bd4ef17b -SHA256: 2abe9142ce6aa341df57303b5bc847522779ea9109b0fe734e2ae4419872da71 -Description: Device-Mapper Software RAID support tool - header files - dmraid discovers, activates, deactivates and displays properties - of software RAID sets (eg, ATARAID) and contained DOS partitions. - . - dmraid uses the Linux device-mapper to create devices with respective - mappings for the ATARAID sets discovered. - . - This package contains the header files needed to link programs against - dmraid. -Tag: admin::hardware, devel::lang:c, devel::library, hardware::storage, implemented-in::c, qa::low-popcon, role::devel-lib, use::driver -Homepage: http://people.redhat.com/~heinzm/sw/dmraid/ -Package: libdmraid1.0.0.rc16 -Priority: optional -Section: libs -Installed-Size: 268 -Maintainer: Giuseppe Iuculano -Architecture: i386 -Source: dmraid -Version: 1.0.0.rc16-4.1 -Replaces: libdmraid1.0.0.rc15 (<< 1.0.0.rc16-1) -Depends: libc6 (>= 2.7), libdevmapper1.02.1 (>= 2:1.02.20) -Filename: pool/main/d/dmraid/libdmraid1.0.0.rc16_1.0.0.rc16-4.1_i386.deb -Size: 106088 -MD5sum: 9330ba2ffd2f22d695fdf692f8120159 -SHA1: 6b262419836e8cad4500043f5e9e6a1581074023 -SHA256: 2b2238679ac8ff4776a3a2caf533c551700d9f92a7d2af23d6457acf7de5d6c8 -Description: Device-Mapper Software RAID support tool - shared library - dmraid discovers, activates, deactivates and displays properties - of software RAID sets (eg, ATARAID) and contained DOS partitions. - . - dmraid uses the Linux device-mapper to create devices with respective - mappings for the ATARAID sets discovered. - . - This package contains the dmraid shared library, which implements - the back half of dmraid, including on-disk metadata formats. -Tag: admin::hardware, admin::kernel, devel::lang:c, devel::library, hardware::storage, implemented-in::c, role::{devel-lib,kernel,shared-lib}, use::driver -Homepage: http://people.redhat.com/~heinzm/sw/dmraid/ + + +Architecture: all +Architecture: i386 +Architecture: i386 +Architecture: i386 +Depends: libbz2-1.0, libc6 (>= 2.4), libreadline6 (>= 6.0), libusb-0.1-4 (>= 2:0.1.12), zlib1g (>= 1:1.1.4), dpkg (>= 1.15.4) | install-info, gpgv +Depends: libbz2-1.0, libc6 (>= 2.4), zlib1g (>= 1:1.1.4) +Depends: libc6 (>= 2.4), libcurl3-gnutls (>= 7.16.2), libldap-2.4-2 (>= 2.4.7), gnupg +Description-Md5: 19709c7fc27595437225fd34d295b347 +Description-Md5: 3f8767984a5f4b323de309446d07435d +Description-Md5: 55306a4e1e1fd63e577767c1b9f5161c +Description-Md5: 8bbdb812806fb623e26b7b93f549c74b +Description: GNU privacy guard - a free PGP replacement +Description: GNU privacy guard - a free PGP replacement (cURL) +Description: GNU privacy guard - signature verification tool +Description: GNU privacy guard - signature verification tool (win32 build) +Filename: pool/main/g/gnupg/gnupg-curl_1.4.12-7+deb7u7_i386.deb +Filename: pool/main/g/gnupg/gnupg_1.4.12-7+deb7u7_i386.deb +Filename: pool/main/g/gnupg/gpgv-win32_1.4.12-7+deb7u7_all.deb +Filename: pool/main/g/gnupg/gpgv_1.4.12-7+deb7u7_i386.deb +Homepage: http://www.gnupg.org +Homepage: http://www.gnupg.org +Homepage: http://www.gnupg.org +Homepage: http://www.gnupg.org +Installed-Size: 1480 +Installed-Size: 401 +Installed-Size: 4613 +Installed-Size: 89 +MD5sum: 5f15f3ac2f586b95ab21c3f83fd1bf35 +MD5sum: 7619869434ee598ba4b1e3de3a48b7a6 +MD5sum: b7456c472a331b1e905712328d25da27 +MD5sum: f892c96687ced1c2adfd8f00d9ca6f5a +Maintainer: Debian GnuPG-Maintainers +Maintainer: Debian GnuPG-Maintainers +Maintainer: Debian GnuPG-Maintainers +Maintainer: Debian GnuPG-Maintainers +Multi-Arch: foreign +Multi-Arch: foreign +Multi-Arch: foreign +Package: gnupg +Package: gnupg-curl +Package: gpgv +Package: gpgv-win32 +Priority: extra +Priority: important +Priority: important +Priority: optional +Recommends: libldap-2.4-2 (>= 2.4.7), gnupg-curl +SHA1: 31e0942b2511a2c73723271163efc0eb29d056ff +SHA1: 72c9a6c444d8a6e8ec57f2b778c631946def9d1a +SHA1: 9889f1717f36e00b6143aed530bd3d0c54c116fd +SHA1: df8a0ef18df0fb86167128ac6c31d6709c2f9c6b +SHA256: 27760f636f6dbfe387dfbede1131fe7a0dd5fd3b0ab562213193ffa7cfcadfb5 +SHA256: 6898801e3f3c97a30bef1ee50381479b69360a28807fb63fcce4abef4da1aec7 +SHA256: b852d7681ea328bd8b45140973624781e65d0363961d92bcc2ab0bbf1cc6ed52 +SHA256: cff40c87faea248c77de7d9fc50fcbc80631cd1bc8cec2b1033e0db452e08ea6 +Section: utils +Section: utils +Section: utils +Section: utils +Size: 1938694 +Size: 221686 +Size: 617064 +Size: 63192 +Source: gnupg +Source: gnupg +Source: gnupg +Suggests: gnupg +Suggests: gnupg-doc, xloadimage | imagemagick | eog, libpcsclite1 +Suggests: wine +Tag: implemented-in::c, interface::commandline, network::client, protocol::http, role::plugin, role::program, scope::utility, security::authentication, security::cryptography, suite::gnu, use::checking, works-with::file, works-with::text +Tag: implemented-in::c, interface::commandline, role::program, scope::utility, security::authentication, security::cryptography, security::privacy, suite::gnu, use::checking, works-with::file, works-with::text +Tag: implemented-in::c, interface::commandline, role::program, scope::utility, security::cryptography, suite::gnu, use::checking +Version: 1.4.12-7+deb7u7 +Version: 1.4.12-7+deb7u7 +Version: 1.4.12-7+deb7u7 +Version: 1.4.12-7+deb7u7 \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot35Test_packages_udeb_amd64 aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot35Test_packages_udeb_amd64 --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot35Test_packages_udeb_amd64 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot35Test_packages_udeb_amd64 2016-05-24 07:05:22.000000000 +0000 @@ -1,40 +1,35 @@ -Package: dmraid-udeb -Version: 1.0.0.rc16-4.1 -Installed-Size: 32 -Priority: optional -Section: debian-installer -Maintainer: Giuseppe Iuculano -Architecture: amd64 -Description: Device-Mapper Software RAID support tool (udeb) - dmraid discovers, activates, deactivates and displays properties - of software RAID sets (eg, ATARAID) and contained DOS partitions. - . - This is the minimal package (udeb) used by debian-installer -MD5sum: 721685fde18001ad0c9ac172c3118983 -SHA1: 88e229b76cb5866c8868a491a6690b3fde2b33d5 -SHA256: efae69921b97494e40437712053b60a5105fa433f3cfbae3bb2991d341eb95a6 -Filename: pool/main/d/dmraid/dmraid-udeb_1.0.0.rc16-4.1_amd64.udeb -Depends: libc6-udeb (>= 2.11), libdmraid1.0.0.rc16-udeb (>= 1.0.0.rc16), dmsetup-udeb -Source: dmraid -Size: 11806 -Package: libdmraid1.0.0.rc16-udeb -Version: 1.0.0.rc16-4.1 -Installed-Size: 0 -Priority: optional -Section: debian-installer -Maintainer: Giuseppe Iuculano -Architecture: amd64 -Description: Device-Mapper Software RAID support tool - shared library (udeb) - dmraid discovers, activates, deactivates and displays properties - of software RAID sets (eg, ATARAID) and contained DOS partitions. - . - This is the minimal package (udeb shared library) used by debian-installer -MD5sum: efae3ee2d1ccd78aaec7d452ecba4c6a -SHA1: 2ef8c01a0375c92f59fed32949b9469cc53d0b99 -SHA256: aabf098de9fcf2da0c0f66f2d9f1cb61f7e244dd2b009361e40cd29827749d44 -Size: 92372 -Filename: pool/main/d/dmraid/libdmraid1.0.0.rc16-udeb_1.0.0.rc16-4.1_amd64.udeb -Source: dmraid -Depends: libc6-udeb (>= 2.11), libdevmapper1.02.1-udeb (>= 2:1.02.48) + +Architecture: amd64 +Architecture: amd64 +Depends: libc6-udeb (>= 2.13), libusb-0.1-udeb, zlib1g-udeb (>= 1:1.2.3.3.dfsg-1) +Depends: libc6-udeb (>= 2.13), zlib1g-udeb (>= 1:1.2.3.3.dfsg-1) +Description-Md5: 0d5b74cda45e2a6526c6943d2cd0c362 +Description-Md5: c52656f4cc79fd4b089086b9173d2923 +Description: GNU privacy guard - a free PGP replacement +Description: minimal signature verification tool +Filename: pool/main/g/gnupg/gnupg-udeb_1.4.12-7+deb7u7_amd64.udeb +Filename: pool/main/g/gnupg/gpgv-udeb_1.4.12-7+deb7u7_amd64.udeb +Installed-Size: 301 +Installed-Size: 833 +MD5sum: 2fda838d1101cc202ddd087c8c98b635 +MD5sum: 6d90567115ee873d4ce6c87991cfaed0 +Maintainer: Debian GnuPG-Maintainers +Maintainer: Debian GnuPG-Maintainers +Package: gnupg-udeb +Package: gpgv-udeb +Priority: extra +Priority: extra +SHA1: 5d32171182e956f8277d44378b1623bbeae23110 +SHA1: dbe121bae44db6eb6108311f41997c4ede1178b2 +SHA256: 4abcb1191d8a3e58d88fb56084f9d784255ba68c767babc3c2819b7a1a689b78 +SHA256: dd7230f9d025c47e8c94e4101e2970e94aed50ec0c65801f9c7cd0a03d6723e1 +Section: debian-installer +Section: debian-installer +Size: 130734 +Size: 354018 +Source: gnupg +Source: gnupg +Version: 1.4.12-7+deb7u7 +Version: 1.4.12-7+deb7u7 \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot35Test_packages_udeb_i386 aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot35Test_packages_udeb_i386 --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot35Test_packages_udeb_i386 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot35Test_packages_udeb_i386 2016-05-24 07:05:22.000000000 +0000 @@ -1,40 +1,35 @@ -Package: dmraid-udeb -Version: 1.0.0.rc16-4.1 -Installed-Size: 36 -Priority: optional -Section: debian-installer -Maintainer: Giuseppe Iuculano -Architecture: i386 -Description: Device-Mapper Software RAID support tool (udeb) - dmraid discovers, activates, deactivates and displays properties - of software RAID sets (eg, ATARAID) and contained DOS partitions. - . - This is the minimal package (udeb) used by debian-installer -MD5sum: 4d8bb4dafb0ef9059dac75846e162784 -SHA1: fd5c73e08d4c5381b1136c2ff170332d77526246 -SHA256: fe4ff3351186f03039f8cd6f78e8e4f473a75b613f950caac06fa21dda2d59e8 -Source: dmraid -Size: 11022 -Filename: pool/main/d/dmraid/dmraid-udeb_1.0.0.rc16-4.1_i386.udeb -Depends: libc6-udeb (>= 2.11), libdmraid1.0.0.rc16-udeb (>= 1.0.0.rc16), dmsetup-udeb -Package: libdmraid1.0.0.rc16-udeb -Version: 1.0.0.rc16-4.1 -Installed-Size: 212 -Priority: optional -Section: debian-installer -Maintainer: Giuseppe Iuculano -Architecture: i386 -Description: Device-Mapper Software RAID support tool - shared library (udeb) - dmraid discovers, activates, deactivates and displays properties - of software RAID sets (eg, ATARAID) and contained DOS partitions. - . - This is the minimal package (udeb shared library) used by debian-installer -MD5sum: aba78093c15c8bcd8e237f6a578c6c65 -SHA1: c5e95d443889775a48d6c48bf332a21a37ce63c6 -SHA256: 1c51dbf4cd1a5a683fd60e2b4f44dc6f8f574de3aea52354541a9a105f10f918 -Depends: libc6-udeb (>= 2.11), libdevmapper1.02.1-udeb (>= 2:1.02.48) -Source: dmraid -Filename: pool/main/d/dmraid/libdmraid1.0.0.rc16-udeb_1.0.0.rc16-4.1_i386.udeb -Size: 89490 + +Architecture: i386 +Architecture: i386 +Depends: libc6-udeb (>= 2.13), libusb-0.1-udeb, zlib1g-udeb (>= 1:1.2.3.3.dfsg-1) +Depends: libc6-udeb (>= 2.13), zlib1g-udeb (>= 1:1.2.3.3.dfsg-1) +Description-Md5: 0d5b74cda45e2a6526c6943d2cd0c362 +Description-Md5: c52656f4cc79fd4b089086b9173d2923 +Description: GNU privacy guard - a free PGP replacement +Description: minimal signature verification tool +Filename: pool/main/g/gnupg/gnupg-udeb_1.4.12-7+deb7u7_i386.udeb +Filename: pool/main/g/gnupg/gpgv-udeb_1.4.12-7+deb7u7_i386.udeb +Installed-Size: 275 +Installed-Size: 783 +MD5sum: 1abee98b231ab5b25dd7976ab61247cf +MD5sum: e12304db5e3c3401e64ad5967a5c9064 +Maintainer: Debian GnuPG-Maintainers +Maintainer: Debian GnuPG-Maintainers +Package: gnupg-udeb +Package: gpgv-udeb +Priority: extra +Priority: extra +SHA1: e64cb327e89ba41ba6aaeca7e9e69cf18479ed40 +SHA1: f6937084ae96b269131a08bb365619e704f91d21 +SHA256: 7d86005e0f2a7bdeff3204ccb0e50d6d06b07011621acb56ad322480bd11494c +SHA256: 96eae21eb31fa79d196dfbec63594f62c39753aad59d02d69bf9495ad486ec01 +Section: debian-installer +Section: debian-installer +Size: 125582 +Size: 343860 +Source: gnupg +Source: gnupg +Version: 1.4.12-7+deb7u7 +Version: 1.4.12-7+deb7u7 \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot35Test_release aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot35Test_release --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot35Test_release 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot35Test_release 2016-05-24 07:05:22.000000000 +0000 @@ -1,59 +1,11 @@ -Origin: . squeeze -Label: . squeeze -Suite: squeeze -Codename: squeeze -Date: Tue, 30 Sep 2014 15:35:22 UTC +Origin: . wheezy +Label: . wheezy +Suite: wheezy +Codename: wheezy Architectures: amd64 i386 Components: main Description: Generated by aptly MD5Sum: - a75ee7a5106ba4369de928e26b7afefd 803 main/debian-installer/binary-i386/Packages.bz2 - d82f063b0a674ee60d070fc960c33c92 677 main/debian-installer/binary-amd64/Packages.gz - 8b51fb682910e0d52caa31b61ef1192a 807 main/debian-installer/binary-amd64/Packages.bz2 - a77ec46f63b69e32fdf3a5aa484c1190 1592 main/binary-i386/Packages.bz2 - 9efff4ebb46b70b71215a8df4f71069d 88 main/binary-amd64/Release - d9d38d0cff22f7364cbabb4e8b536316 87 main/debian-installer/binary-i386/Release - 0eaacc9b677879735bcc958c2e24c699 1395 main/binary-i386/Packages.gz - e1c910470349056521dbc4d473a48637 677 main/debian-installer/binary-i386/Packages.gz - d9d38d0cff22f7364cbabb4e8b536316 87 main/binary-i386/Release - 1093e4c5170235ac5cc872f985088815 3669 main/binary-amd64/Packages - c4b9d1069fcb04fdad832a657ff02ef3 3663 main/binary-i386/Packages - b58a784bc0764d523fd9134b53c8dda0 1585 main/binary-amd64/Packages.bz2 - 9ac58b6597a8e0344d69c2550aca9720 1601 main/debian-installer/binary-i386/Packages - f940214380907f004b1e175a6c20bf07 1603 main/debian-installer/binary-amd64/Packages - 9efff4ebb46b70b71215a8df4f71069d 88 main/debian-installer/binary-amd64/Release - 703b425641f4e847a1f0a8a0c28fb128 1394 main/binary-amd64/Packages.gz SHA1: - a0c5944608dc219fad9d799b3fa6aae280d331c0 803 main/debian-installer/binary-i386/Packages.bz2 - 5faf018385934f65a6af0c4ab3af2fda62c63aff 677 main/debian-installer/binary-amd64/Packages.gz - 61c9b82f75a642839e6e32e5a734f890417b1160 807 main/debian-installer/binary-amd64/Packages.bz2 - e69414d40bb79bca8dc1b274ceb42fb04c3d02ee 1592 main/binary-i386/Packages.bz2 - 7c25a15429615225e3eb90540ba783561fc09448 88 main/binary-amd64/Release - f07fcb0797d81341b6284ed86e5903dc57341a90 87 main/debian-installer/binary-i386/Release - a8657c2409859da9f91280a5da48f3b5276e2829 1395 main/binary-i386/Packages.gz - b8e5b5b41a6ded99006a94c0550cd2291ac19d7f 677 main/debian-installer/binary-i386/Packages.gz - f07fcb0797d81341b6284ed86e5903dc57341a90 87 main/binary-i386/Release - 0c86f7bd6ed2b52b0ab12ea08a76d14235b85d7c 3669 main/binary-amd64/Packages - 4227cdcd3260e10eee066182f22ec8eec4fc7f0a 3663 main/binary-i386/Packages - 8cec67723e4cee24f67ffa46a1f4ae7165fb31f0 1585 main/binary-amd64/Packages.bz2 - ae94f4b0b3396951399de65e04784ef7b0f95119 1601 main/debian-installer/binary-i386/Packages - 6f8e5137388e594b31bed56ca9e08f8e9f305ca4 1603 main/debian-installer/binary-amd64/Packages - 7c25a15429615225e3eb90540ba783561fc09448 88 main/debian-installer/binary-amd64/Release - 163a7a656c5e338d53bbc6cbe80263ca551dfa15 1394 main/binary-amd64/Packages.gz SHA256: - 4f8eeab36071b8791ce74099df89e01d46ab66f3c76dd9afe6c31fe48c30783d 803 main/debian-installer/binary-i386/Packages.bz2 - bf7b96d1c66abb7dc6037299ab4fe0119d42b66c8c01cfa0520e27d813c99e50 677 main/debian-installer/binary-amd64/Packages.gz - 3a30d9da1ed1108d3451c0c7fe60d99594a2cdf2459a8e505920ed69043bdc6c 807 main/debian-installer/binary-amd64/Packages.bz2 - 1d947dcc40ad2ace3b8226b68161948478a187eb9865d4b62c5068200e0ec058 1592 main/binary-i386/Packages.bz2 - e8378aced6fec291729f656e1d884225ec9c28ba67fc434ef2531223bc37033e 88 main/binary-amd64/Release - 62b9292134aefb30a75aff3e25c2c694d128d73a1d193f29a397789dd902a854 87 main/debian-installer/binary-i386/Release - e30a8b568654e69f1fe7744ace4ffb0d385a8e52502ffd9f84a8184130386a08 1395 main/binary-i386/Packages.gz - f6f2350eab308eb2f290b98f088e973e70ded5d1244688b71edfb201ac85e832 677 main/debian-installer/binary-i386/Packages.gz - 62b9292134aefb30a75aff3e25c2c694d128d73a1d193f29a397789dd902a854 87 main/binary-i386/Release - e2d936cb65a504e6bf13bb09c5a0c6e8943cdd7845d715d571b1fb58262a624f 3669 main/binary-amd64/Packages - 14ae70d15fa8263b55056ef36bac9208ee9e03847118788cc00b6d2a46b5fa10 3663 main/binary-i386/Packages - 0128db3912e0e2f92b2e3a277c28239d6e072323b35bc007dbf32bc696df413c 1585 main/binary-amd64/Packages.bz2 - c3f2708d36c503619f5b3f43b2c7da3f559b72f723c96d0ce9c664f92c6fcc14 1601 main/debian-installer/binary-i386/Packages - 1f90f76bc0df9a588940d14f3ee0ad7d26a86809537f2e5ff4d340e4a8a21f3d 1603 main/debian-installer/binary-amd64/Packages - e8378aced6fec291729f656e1d884225ec9c28ba67fc434ef2531223bc37033e 88 main/debian-installer/binary-amd64/Release - e179f48a91a8dc614a37e2fb21d8d82ff3937fd44e077ec0e2507b8382d896ab 1394 main/binary-amd64/Packages.gz +SHA512: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot35Test_release_udeb_i386 aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot35Test_release_udeb_i386 --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot35Test_release_udeb_i386 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot35Test_release_udeb_i386 2016-05-24 07:05:22.000000000 +0000 @@ -1,5 +1,5 @@ -Origin: . squeeze -Label: . squeeze -Archive: squeeze +Origin: . wheezy +Label: . wheezy +Archive: wheezy Architecture: i386 Component: main diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot3Test_release aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot3Test_release --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot3Test_release 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot3Test_release 2016-05-24 07:05:22.000000000 +0000 @@ -8,3 +8,4 @@ MD5Sum: SHA1: SHA256: +SHA512: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot4Test_release aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot4Test_release --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot4Test_release 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSnapshot4Test_release 2016-05-24 07:05:22.000000000 +0000 @@ -8,3 +8,4 @@ MD5Sum: SHA1: SHA256: +SHA512: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSwitch1Test_release aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSwitch1Test_release --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSwitch1Test_release 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSwitch1Test_release 2016-05-24 07:05:22.000000000 +0000 @@ -8,3 +8,4 @@ MD5Sum: SHA1: SHA256: +SHA512: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSwitch8Test_binaryC aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSwitch8Test_binaryC --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSwitch8Test_binaryC 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSwitch8Test_binaryC 2016-05-24 07:05:22.000000000 +0000 @@ -20,6 +20,7 @@ Priority: optional SHA1: 36895eb64cfe89c33c0a2f7ac2f0c6e0e889e04b SHA256: c76b4bd12fd92e4dfe1b55b18a67a669d92f62985d6a96c8a21d96120982cf12 +SHA512: d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c Section: libdevel Size: 2738 Source: boost-defaults diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSwitch8Test_release aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSwitch8Test_release --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishSwitch8Test_release 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishSwitch8Test_release 2016-05-24 07:05:22.000000000 +0000 @@ -8,3 +8,4 @@ MD5Sum: SHA1: SHA256: +SHA512: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishUpdate1Test_binary aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishUpdate1Test_binary --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishUpdate1Test_binary 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishUpdate1Test_binary 2016-05-24 07:05:22.000000000 +0000 @@ -20,6 +20,7 @@ Priority: optional SHA1: 36895eb64cfe89c33c0a2f7ac2f0c6e0e889e04b SHA256: c76b4bd12fd92e4dfe1b55b18a67a669d92f62985d6a96c8a21d96120982cf12 +SHA512: d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c Section: libdevel Size: 2738 Source: boost-defaults diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishUpdate1Test_release aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishUpdate1Test_release --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishUpdate1Test_release 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishUpdate1Test_release 2016-05-24 07:05:22.000000000 +0000 @@ -8,3 +8,4 @@ MD5Sum: SHA1: SHA256: +SHA512: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishUpdate2Test_binary aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishUpdate2Test_binary --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishUpdate2Test_binary 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishUpdate2Test_binary 2016-05-24 07:05:22.000000000 +0000 @@ -1,4 +1,5 @@ + (name, value) pairs from the user, via conventional methods such as . . @@ -19,7 +20,8 @@ Priority: optional SHA1: 36895eb64cfe89c33c0a2f7ac2f0c6e0e889e04b SHA256: c76b4bd12fd92e4dfe1b55b18a67a669d92f62985d6a96c8a21d96120982cf12 +SHA512: d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c Section: libdevel Size: 2738 Source: boost-defaults -Version: 1.49.0.1 +Version: 1.49.0.1 \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishUpdate2Test_sources aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishUpdate2Test_sources --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishUpdate2Test_sources 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishUpdate2Test_sources 2016-05-24 07:05:22.000000000 +0000 @@ -3,6 +3,7 @@ 22ff26db69b73d3438fdde21ab5ba2f1 3456 pyspi_0.6.1-1.3.diff.gz 22ff26db69b73d3438fdde21ab5ba2f1 3456 pyspi_0.6.1-1.3.diff.gz + 262cac59a2e81c7f110851ff9670c97ffc3d192d9937b880422a0907f26340d43e7de7e68b904a4fb10bedb02b65c3bd1f7bdd20ea8c4293e690e7a8e0e70ee5 893 pyspi-0.6.1-1.3.stripped.dsc 289d3aefa970876e9c43686ce2b02f478d7f3ed35a713928464a98d54ae4fca3 893 pyspi-0.6.1-1.3.stripped.dsc 2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz 2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz @@ -19,6 +20,7 @@ d494aaf526f1ec6b02f14c2f81e060a5722d6532ddc760ec16972e45c2625989 1782 pyspi_0.6.1-1.3.dsc def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz + fde06b7dc5762a04986d0669420822f6a1e82b195322ae9cbd2dae40bda557c57ad77fe3546007ea645f801c4cd30ef4eb0e96efb2dee6b71c4c9a187d643683 1782 pyspi_0.6.1-1.3.dsc Architecture: any Architecture: any Binary: python-at-spi @@ -29,6 +31,8 @@ Checksums-Sha1: Checksums-Sha256: Checksums-Sha256: +Checksums-Sha512: +Checksums-Sha512: Directory: pool/main/p/pyspi Directory: pool/main/p/pyspi Files: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishUpdate7Test_binary2 aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishUpdate7Test_binary2 --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishUpdate7Test_binary2 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishUpdate7Test_binary2 2016-05-24 07:05:22.000000000 +0000 @@ -20,6 +20,7 @@ Priority: optional SHA1: 36895eb64cfe89c33c0a2f7ac2f0c6e0e889e04b SHA256: c76b4bd12fd92e4dfe1b55b18a67a669d92f62985d6a96c8a21d96120982cf12 +SHA512: d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c Section: libdevel Size: 2738 Source: boost-defaults diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishUpdate7Test_sources aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishUpdate7Test_sources --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/PublishUpdate7Test_sources 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/PublishUpdate7Test_sources 2016-05-24 07:05:22.000000000 +0000 @@ -3,6 +3,7 @@ 22ff26db69b73d3438fdde21ab5ba2f1 3456 pyspi_0.6.1-1.3.diff.gz 22ff26db69b73d3438fdde21ab5ba2f1 3456 pyspi_0.6.1-1.3.diff.gz + 262cac59a2e81c7f110851ff9670c97ffc3d192d9937b880422a0907f26340d43e7de7e68b904a4fb10bedb02b65c3bd1f7bdd20ea8c4293e690e7a8e0e70ee5 893 pyspi-0.6.1-1.3.stripped.dsc 289d3aefa970876e9c43686ce2b02f478d7f3ed35a713928464a98d54ae4fca3 893 pyspi-0.6.1-1.3.stripped.dsc 2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz 2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz @@ -19,6 +20,7 @@ d494aaf526f1ec6b02f14c2f81e060a5722d6532ddc760ec16972e45c2625989 1782 pyspi_0.6.1-1.3.dsc def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz + fde06b7dc5762a04986d0669420822f6a1e82b195322ae9cbd2dae40bda557c57ad77fe3546007ea645f801c4cd30ef4eb0e96efb2dee6b71c4c9a187d643683 1782 pyspi_0.6.1-1.3.dsc Architecture: any Architecture: any Binary: python-at-spi @@ -29,6 +31,8 @@ Checksums-Sha1: Checksums-Sha256: Checksums-Sha256: +Checksums-Sha512: +Checksums-Sha512: Directory: pool/main/p/pyspi Directory: pool/main/p/pyspi Files: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/repo.py aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/repo.py --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/repo.py 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/repo.py 2016-05-24 07:05:22.000000000 +0000 @@ -79,8 +79,10 @@ h = hashlib.md5() elif len(fileHash) == 40: h = hashlib.sha1() - else: + elif len(fileHash) == 64: h = hashlib.sha256() + else: + h = hashlib.sha512() h.update(self.read_file(os.path.join('public/dists/maverick', path))) @@ -471,8 +473,10 @@ h = hashlib.md5() elif len(fileHash) == 40: h = hashlib.sha1() - else: + elif len(fileHash) == 64: h = hashlib.sha256() + else: + h = hashlib.sha512() h.update(self.read_file(os.path.join('public/dists/maverick', path))) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/S3Publish1Test_binary aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/S3Publish1Test_binary --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/S3Publish1Test_binary 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/S3Publish1Test_binary 2016-05-24 07:05:22.000000000 +0000 @@ -1,25 +1,27 @@ -Package: libboost-program-options-dev -Version: 1.49.0.1 -Installed-Size: 26 -Priority: optional -Section: libdevel -Maintainer: Debian Boost Team -Architecture: i386 -Description: program options library for C++ (default version) - This package forms part of the Boost C++ Libraries collection. - . - Library to let program developers obtain program options, that is + + (name, value) pairs from the user, via conventional methods such as - command line and config file. . - This package is a dependency package, which depends on Debian's default + . Boost version (currently 1.49). + Library to let program developers obtain program options, that is + This package forms part of the Boost C++ Libraries collection. + This package is a dependency package, which depends on Debian's default + command line and config file. +Architecture: i386 +Depends: libboost-program-options1.49-dev +Description: program options library for C++ (default version) +Filename: pool/main/b/boost-defaults/libboost-program-options-dev_1.49.0.1_i386.deb +Homepage: http://www.boost.org/libs/program_options/ +Installed-Size: 26 MD5sum: 0035d7822b2f8f0ec4013f270fd650c2 +Maintainer: Debian Boost Team +Package: libboost-program-options-dev +Priority: optional SHA1: 36895eb64cfe89c33c0a2f7ac2f0c6e0e889e04b SHA256: c76b4bd12fd92e4dfe1b55b18a67a669d92f62985d6a96c8a21d96120982cf12 -Filename: pool/main/b/boost-defaults/libboost-program-options-dev_1.49.0.1_i386.deb +SHA512: d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c +Section: libdevel Size: 2738 -Homepage: http://www.boost.org/libs/program_options/ Source: boost-defaults -Depends: libboost-program-options1.49-dev - +Version: 1.49.0.1 \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/S3Publish1Test_release aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/S3Publish1Test_release --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/S3Publish1Test_release 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/S3Publish1Test_release 2016-05-24 07:05:22.000000000 +0000 @@ -2,34 +2,10 @@ Label: . maverick Suite: maverick Codename: maverick -Date: Wed, 1 Oct 2014 08:48:48 UTC Architectures: i386 Components: main Description: Generated by aptly MD5Sum: - b844530d1336e9a3c431f0d36cfc01b0 602 main/binary-i386/Packages.gz - 1d7829dac8923aafe499f313abfaadd7 652 main/binary-i386/Packages.bz2 - 307b6495eab59c221e2ff8962896631b 2300 main/source/Sources - 65dd7338cfac70762457b586629e87e4 839 main/source/Sources.gz - 5cc219da21fdb8a96b265bca1c4c0808 1009 main/source/Sources.bz2 - 60b30b7b0c62ae04bb3bc457abadaced 90 main/binary-i386/Release - 945211dc923a8d1b97835232648c0aa7 92 main/source/Release - d419bd11e2b7fe9669bccdf67a18ca17 984 main/binary-i386/Packages SHA1: - 1b314cedcf18a6d08d4aabbd8b9b5605ba293d04 602 main/binary-i386/Packages.gz - 5406a984c100b20fbebacdbac24ae3378885f73b 652 main/binary-i386/Packages.bz2 - e30d7bc51cd042ee987316967bf3043ab95c8ce9 2300 main/source/Sources - d60a7032080848eb48bcf68962698ba642dcc383 839 main/source/Sources.gz - fb194b90e0e0efd456a7346c4224294018b6677d 1009 main/source/Sources.bz2 - 2bfef2580deadf6863ee6f893e8b9a2c7522e1ed 90 main/binary-i386/Release - 8b98a2148d157bf87cc1955ef00ba1ba31275f94 92 main/source/Release - be80e1c588c6052f30865e44e3f1429f730d5bc8 984 main/binary-i386/Packages SHA256: - a079102fdc72e6228229aaa8e5e6ad59b582026419737e81e11a8af2addd125e 602 main/binary-i386/Packages.gz - 25d101a333e85d952afc74f684cef3716d69e3c33d8a4b1544faec683c1b5d96 652 main/binary-i386/Packages.bz2 - bcf1fcf1ca2d1bb5565da8b4c39052d906832ad4885c21682d605b830e55a506 2300 main/source/Sources - 3e6cf6dc079333cdf01905957c611702f4ee10f654c84895ac7bf166bbbbd3bc 839 main/source/Sources.gz - 47b9d37fa81d23d227dd26e85821dd4f74db8f17ddefbe6ca686f62ddfedd8ad 1009 main/source/Sources.bz2 - 1d91164164e6310a5e5fc93390995028956f657490a9ce7aa136dc94291828a8 90 main/binary-i386/Release - 2d75333511325affcefe66c6cfbaa6ab21e6aa0e85a6b4fa39a4191146b81460 92 main/source/Release - 59643cc2d105694d6876dc328290a1c949b4e91e62ee8db396abac83a7034f9f 984 main/binary-i386/Packages +SHA512: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/S3Publish1Test_sources aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/S3Publish1Test_sources --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/S3Publish1Test_sources 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/S3Publish1Test_sources 2016-05-24 07:05:22.000000000 +0000 @@ -1,48 +1,52 @@ -Package: pyspi -Version: 0.6.1-1.3 -Maintainer: Jose Carlos Garcia Sogo -Architecture: any -Binary: python-at-spi -Standards-Version: 3.7.3 -Format: 1.0 -Files: - 22ff26db69b73d3438fdde21ab5ba2f1 3456 pyspi_0.6.1-1.3.diff.gz - b72cb94699298a117b7c82641c68b6fd 1782 pyspi_0.6.1-1.3.dsc - def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz -Checksums-Sha1: - 95a2468e4bbce730ba286f2211fa41861b9f1d90 3456 pyspi_0.6.1-1.3.diff.gz - 56c8a9b1f4ab636052be8966690998cbe865cd6c 1782 pyspi_0.6.1-1.3.dsc - 9694b80acc171c0a5bc99f707933864edfce555e 29063 pyspi_0.6.1.orig.tar.gz -Vcs-Svn: svn://svn.tribulaciones.org/srv/svn/pyspi/trunk -Homepage: http://people.redhat.com/zcerza/dogtail -Build-Depends: debhelper (>= 5), cdbs, libatspi-dev, python-pyrex, python-support (>= 0.4), python-all-dev, libx11-dev -Directory: pool/main/p/pyspi -Checksums-Sha256: - 2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz - d494aaf526f1ec6b02f14c2f81e060a5722d6532ddc760ec16972e45c2625989 1782 pyspi_0.6.1-1.3.dsc - 64069ee828c50b1c597d10a3fefbba279f093a4723965388cdd0ac02f029bfb9 29063 pyspi_0.6.1.orig.tar.gz -Package: pyspi -Version: 0.6.1-1.4 -Maintainer: Jose Carlos Garcia Sogo -Architecture: any -Vcs-Svn: svn://svn.tribulaciones.org/srv/svn/pyspi/trunk -Standards-Version: 3.7.3 -Homepage: http://people.redhat.com/zcerza/dogtail -Directory: pool/main/p/pyspi -Build-Depends: debhelper (>= 5), cdbs, libatspi-dev, python-pyrex, python-support (>= 0.4), python-all-dev, libx11-dev -Checksums-Sha256: + + 22ff26db69b73d3438fdde21ab5ba2f1 3456 pyspi_0.6.1-1.3.diff.gz + 22ff26db69b73d3438fdde21ab5ba2f1 3456 pyspi_0.6.1-1.3.diff.gz + 262cac59a2e81c7f110851ff9670c97ffc3d192d9937b880422a0907f26340d43e7de7e68b904a4fb10bedb02b65c3bd1f7bdd20ea8c4293e690e7a8e0e70ee5 893 pyspi-0.6.1-1.3.stripped.dsc 289d3aefa970876e9c43686ce2b02f478d7f3ed35a713928464a98d54ae4fca3 893 pyspi-0.6.1-1.3.stripped.dsc 2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz - 64069ee828c50b1c597d10a3fefbba279f093a4723965388cdd0ac02f029bfb9 29063 pyspi_0.6.1.orig.tar.gz -Format: 1.0 -Checksums-Sha1: + 2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz + 2f5bd47cf38852b6fc927a50f98c1448 893 pyspi-0.6.1-1.3.stripped.dsc 5005fbd1f30637edc1d380b30f45db9b79100d07 893 pyspi-0.6.1-1.3.stripped.dsc + 56c8a9b1f4ab636052be8966690998cbe865cd6c 1782 pyspi_0.6.1-1.3.dsc + 64069ee828c50b1c597d10a3fefbba279f093a4723965388cdd0ac02f029bfb9 29063 pyspi_0.6.1.orig.tar.gz + 64069ee828c50b1c597d10a3fefbba279f093a4723965388cdd0ac02f029bfb9 29063 pyspi_0.6.1.orig.tar.gz + 95a2468e4bbce730ba286f2211fa41861b9f1d90 3456 pyspi_0.6.1-1.3.diff.gz 95a2468e4bbce730ba286f2211fa41861b9f1d90 3456 pyspi_0.6.1-1.3.diff.gz 9694b80acc171c0a5bc99f707933864edfce555e 29063 pyspi_0.6.1.orig.tar.gz + 9694b80acc171c0a5bc99f707933864edfce555e 29063 pyspi_0.6.1.orig.tar.gz + b72cb94699298a117b7c82641c68b6fd 1782 pyspi_0.6.1-1.3.dsc + d494aaf526f1ec6b02f14c2f81e060a5722d6532ddc760ec16972e45c2625989 1782 pyspi_0.6.1-1.3.dsc + def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz + def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz + fde06b7dc5762a04986d0669420822f6a1e82b195322ae9cbd2dae40bda557c57ad77fe3546007ea645f801c4cd30ef4eb0e96efb2dee6b71c4c9a187d643683 1782 pyspi_0.6.1-1.3.dsc +Architecture: any +Architecture: any +Binary: python-at-spi Binary: python-at-spi +Build-Depends: debhelper (>= 5), cdbs, libatspi-dev, python-pyrex, python-support (>= 0.4), python-all-dev, libx11-dev +Build-Depends: debhelper (>= 5), cdbs, libatspi-dev, python-pyrex, python-support (>= 0.4), python-all-dev, libx11-dev +Checksums-Sha1: +Checksums-Sha1: +Checksums-Sha256: +Checksums-Sha256: +Checksums-Sha512: +Checksums-Sha512: +Directory: pool/main/p/pyspi +Directory: pool/main/p/pyspi Files: - 2f5bd47cf38852b6fc927a50f98c1448 893 pyspi-0.6.1-1.3.stripped.dsc - 22ff26db69b73d3438fdde21ab5ba2f1 3456 pyspi_0.6.1-1.3.diff.gz - def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz - +Files: +Format: 1.0 +Format: 1.0 +Homepage: http://people.redhat.com/zcerza/dogtail +Homepage: http://people.redhat.com/zcerza/dogtail +Maintainer: Jose Carlos Garcia Sogo +Maintainer: Jose Carlos Garcia Sogo +Package: pyspi +Package: pyspi +Standards-Version: 3.7.3 +Standards-Version: 3.7.3 +Vcs-Svn: svn://svn.tribulaciones.org/srv/svn/pyspi/trunk +Vcs-Svn: svn://svn.tribulaciones.org/srv/svn/pyspi/trunk +Version: 0.6.1-1.3 +Version: 0.6.1-1.4 diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/S3Publish2Test_binary aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/S3Publish2Test_binary --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/S3Publish2Test_binary 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/S3Publish2Test_binary 2016-05-24 07:05:22.000000000 +0000 @@ -1,25 +1,27 @@ -Package: libboost-program-options-dev -Version: 1.49.0.1 -Installed-Size: 26 -Priority: optional -Section: libdevel -Maintainer: Debian Boost Team -Architecture: i386 -Description: program options library for C++ (default version) - This package forms part of the Boost C++ Libraries collection. - . - Library to let program developers obtain program options, that is + + (name, value) pairs from the user, via conventional methods such as - command line and config file. . - This package is a dependency package, which depends on Debian's default + . Boost version (currently 1.49). + Library to let program developers obtain program options, that is + This package forms part of the Boost C++ Libraries collection. + This package is a dependency package, which depends on Debian's default + command line and config file. +Architecture: i386 +Depends: libboost-program-options1.49-dev +Description: program options library for C++ (default version) +Filename: pool/main/b/boost-defaults/libboost-program-options-dev_1.49.0.1_i386.deb +Homepage: http://www.boost.org/libs/program_options/ +Installed-Size: 26 MD5sum: 0035d7822b2f8f0ec4013f270fd650c2 +Maintainer: Debian Boost Team +Package: libboost-program-options-dev +Priority: optional SHA1: 36895eb64cfe89c33c0a2f7ac2f0c6e0e889e04b SHA256: c76b4bd12fd92e4dfe1b55b18a67a669d92f62985d6a96c8a21d96120982cf12 -Source: boost-defaults -Filename: pool/main/b/boost-defaults/libboost-program-options-dev_1.49.0.1_i386.deb -Depends: libboost-program-options1.49-dev -Homepage: http://www.boost.org/libs/program_options/ +SHA512: d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c +Section: libdevel Size: 2738 - +Source: boost-defaults +Version: 1.49.0.1 \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/S3Publish2Test_release aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/S3Publish2Test_release --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/S3Publish2Test_release 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/S3Publish2Test_release 2016-05-24 07:05:22.000000000 +0000 @@ -2,34 +2,10 @@ Label: . maverick Suite: maverick Codename: maverick -Date: Wed, 1 Oct 2014 09:13:14 UTC Architectures: i386 Components: main Description: Generated by aptly MD5Sum: - d41d8cd98f00b204e9800998ecf8427e 0 main/source/Sources - f41c10a4b35cd3e1ec8abb9c2ab676ed 23 main/source/Sources.gz - 4059d198768f9f8dc9372dc1c54bc3c3 14 main/source/Sources.bz2 - 60b30b7b0c62ae04bb3bc457abadaced 90 main/binary-i386/Release - 945211dc923a8d1b97835232648c0aa7 92 main/source/Release - db76ccafa3c9e4c1dba620259df78f87 984 main/binary-i386/Packages - d666eb8b2fc8a0ef525d37aff33c7b2f 603 main/binary-i386/Packages.gz - ca2b3a9fc60f4a0a1091b9f0357b11eb 651 main/binary-i386/Packages.bz2 SHA1: - da39a3ee5e6b4b0d3255bfef95601890afd80709 0 main/source/Sources - 92c6cff562771f64540523a54baaa0b2afe54b3f 23 main/source/Sources.gz - 64a543afbb5f4bf728636bdcbbe7a2ed0804adc2 14 main/source/Sources.bz2 - 2bfef2580deadf6863ee6f893e8b9a2c7522e1ed 90 main/binary-i386/Release - 8b98a2148d157bf87cc1955ef00ba1ba31275f94 92 main/source/Release - 7dcfa6945771369da0a22c2f90f2300b5d238662 984 main/binary-i386/Packages - ba6efb87b17aa8d08476b3f181702e4d3199794e 603 main/binary-i386/Packages.gz - 0b36a014d1a5ccbf3d73de0035970737659e3c0f 651 main/binary-i386/Packages.bz2 SHA256: - e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 0 main/source/Sources - 1775fca35fb6a4d31c541746eaea63c5cb3c00280c8b5a351d4e944cdca7489d 23 main/source/Sources.gz - d3dda84eb03b9738d118eb2be78e246106900493c0ae07819ad60815134a8058 14 main/source/Sources.bz2 - 1d91164164e6310a5e5fc93390995028956f657490a9ce7aa136dc94291828a8 90 main/binary-i386/Release - 2d75333511325affcefe66c6cfbaa6ab21e6aa0e85a6b4fa39a4191146b81460 92 main/source/Release - 0e2e7586903004efb49dd419be8a98260dab502352c4b1bf6074f658220aef4e 984 main/binary-i386/Packages - e2bd1d551b4983253cc26004504ead7b6987e609db8cb7185ab3dde69d346acd 603 main/binary-i386/Packages.gz - 81bcd3d47fc3e9dbe1e201d7ec1b356dd2ae3bc5c171f76247243a64755c25d6 651 main/binary-i386/Packages.bz2 +SHA512: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/S3Publish3Test_release aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/S3Publish3Test_release --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/S3Publish3Test_release 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/S3Publish3Test_release 2016-05-24 07:05:22.000000000 +0000 @@ -2,34 +2,10 @@ Label: . maverick Suite: maverick Codename: maverick -Date: Wed, 1 Oct 2014 09:16:49 UTC Architectures: amd64 i386 Components: main Description: Generated by aptly MD5Sum: - 4717e26fc4a8703cd8886feb8ff9532d 91 main/binary-amd64/Release - 60b30b7b0c62ae04bb3bc457abadaced 90 main/binary-i386/Release - 2b810443a56c38746aba877b84fc74a1 1526 main/binary-amd64/Packages - 28bced4c89869001d9fe6b7c553dd1df 862 main/binary-amd64/Packages.gz - aaa2ee36bda75a9c66e31881ae128016 931 main/binary-amd64/Packages.bz2 - aac26f9e4705d03000094f76d475aea2 1524 main/binary-i386/Packages - 158aec0342fc4ca52178b4512c5ee1b5 862 main/binary-i386/Packages.gz - 34859d0bf49cb66045de43d01b1de311 939 main/binary-i386/Packages.bz2 SHA1: - 93c9982ebbb6a74a118d07e500b596097c8c4780 91 main/binary-amd64/Release - 2bfef2580deadf6863ee6f893e8b9a2c7522e1ed 90 main/binary-i386/Release - 876cafdad8672c4b0b66baec5b12213d2bcb4cf3 1526 main/binary-amd64/Packages - b3e2e9ad945a190e2ce4aeb36d1946d9ad04a075 862 main/binary-amd64/Packages.gz - bc8a7022261b79f5aeacdca551c51aeb7530b969 931 main/binary-amd64/Packages.bz2 - 7eca65cdb4a4a6bcb51747f2c8d4829f4457f22b 1524 main/binary-i386/Packages - e1f5ab02bdd1fcaa0ab93c5680919f612692992c 862 main/binary-i386/Packages.gz - 8a7f311f39316dcedc8a199421116ba92a941028 939 main/binary-i386/Packages.bz2 SHA256: - 73aa8d6aaf47a1bf3c546869ceb09a882a8c2d840f81878e552fe2d1260ac4e2 91 main/binary-amd64/Release - 1d91164164e6310a5e5fc93390995028956f657490a9ce7aa136dc94291828a8 90 main/binary-i386/Release - f47ca8ea0dc02b4423b1291b302e5594c0ac5c01da72c6f9de1ae17d3eddef2f 1526 main/binary-amd64/Packages - 0a939f23e1ed98ec3cf2033eb5665d4c40e7494d6331f453ac2043be3e234897 862 main/binary-amd64/Packages.gz - abdb8e2537c11272fc9f70ccbcbd2ee867ae797666d3bf11a51972fa2f4d0325 931 main/binary-amd64/Packages.bz2 - 7b1e711ab4647a3e200af742690ffee76bcf7244f597fda699495e29177b1c71 1524 main/binary-i386/Packages - 5723a156f299c657b2eebd1c17ff1a0ca3f50036fc9a1b6c7d9f985a1841c171 862 main/binary-i386/Packages.gz - 41f396a3b5c7f78d743971a1011706c6782c8abac3168ff862fa301255baa040 939 main/binary-i386/Packages.bz2 +SHA512: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/snapshot.py aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/snapshot.py --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/snapshot.py 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/snapshot.py 2016-05-24 07:05:22.000000000 +0000 @@ -91,8 +91,10 @@ h = hashlib.md5() elif len(fileHash) == 40: h = hashlib.sha1() - else: + elif len(fileHash) == 64: h = hashlib.sha256() + else: + h = hashlib.sha512() h.update(self.read_file(os.path.join('public/dists/maverick', path))) @@ -731,8 +733,10 @@ h = hashlib.md5() elif len(fileHash) == 40: h = hashlib.sha1() - else: + elif len(fileHash) == 64: h = hashlib.sha256() + else: + h = hashlib.sha512() h.update(self.read_file(os.path.join('public/dists/maverick', path))) @@ -878,61 +882,61 @@ """ fixtureGpg = True fixtureCmds = [ - "aptly -architectures=i386,amd64 mirror create -keyring=aptlytest.gpg -filter='$$Source (dmraid)' -with-udebs squeeze http://mirror.yandex.ru/debian/ squeeze main non-free", - "aptly mirror update -keyring=aptlytest.gpg squeeze", - "aptly snapshot create squeeze from mirror squeeze", + "aptly -architectures=i386,amd64 mirror create -keyring=aptlytest.gpg -filter='$$Source (gnupg)' -with-udebs wheezy http://mirror.yandex.ru/debian/ wheezy main non-free", + "aptly mirror update -keyring=aptlytest.gpg wheezy", + "aptly snapshot create wheezy from mirror wheezy", ] - runCmd = "aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec squeeze" + runCmd = "aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec wheezy" gold_processor = BaseTest.expand_environ def check(self): super(PublishSnapshot35Test, self).check() - self.check_exists('public/dists/squeeze/InRelease') - self.check_exists('public/dists/squeeze/Release') - self.check_exists('public/dists/squeeze/Release.gpg') - - self.check_exists('public/dists/squeeze/main/binary-i386/Release') - self.check_exists('public/dists/squeeze/main/binary-i386/Packages') - self.check_exists('public/dists/squeeze/main/binary-i386/Packages.gz') - self.check_exists('public/dists/squeeze/main/binary-i386/Packages.bz2') - self.check_exists('public/dists/squeeze/main/Contents-i386.gz') - self.check_exists('public/dists/squeeze/main/debian-installer/binary-i386/Release') - self.check_exists('public/dists/squeeze/main/debian-installer/binary-i386/Packages') - self.check_exists('public/dists/squeeze/main/debian-installer/binary-i386/Packages.gz') - self.check_exists('public/dists/squeeze/main/debian-installer/binary-i386/Packages.bz2') - self.check_exists('public/dists/squeeze/main/Contents-udeb-i386.gz') - self.check_exists('public/dists/squeeze/main/binary-amd64/Release') - self.check_exists('public/dists/squeeze/main/binary-amd64/Packages') - self.check_exists('public/dists/squeeze/main/binary-amd64/Packages.gz') - self.check_exists('public/dists/squeeze/main/binary-amd64/Packages.bz2') - self.check_exists('public/dists/squeeze/main/Contents-amd64.gz') - self.check_exists('public/dists/squeeze/main/debian-installer/binary-amd64/Release') - self.check_exists('public/dists/squeeze/main/debian-installer/binary-amd64/Packages') - self.check_exists('public/dists/squeeze/main/debian-installer/binary-amd64/Packages.gz') - self.check_exists('public/dists/squeeze/main/debian-installer/binary-amd64/Packages.bz2') - self.check_exists('public/dists/squeeze/main/Contents-udeb-amd64.gz') - self.check_not_exists('public/dists/squeeze/main/source/Sources') - self.check_not_exists('public/dists/squeeze/main/source/Sources.gz') - self.check_not_exists('public/dists/squeeze/main/source/Sources.bz2') - - self.check_exists('public/pool/main/d/dmraid/dmraid-udeb_1.0.0.rc16-4.1_amd64.udeb') - self.check_exists('public/pool/main/d/dmraid/dmraid-udeb_1.0.0.rc16-4.1_i386.udeb') - self.check_exists('public/pool/main/d/dmraid/dmraid_1.0.0.rc16-4.1_amd64.deb') - self.check_exists('public/pool/main/d/dmraid/dmraid_1.0.0.rc16-4.1_i386.deb') - - self.check_file_contents('public/dists/squeeze/main/binary-i386/Packages', 'packages_i386', match_prepare=sorted_processor) - self.check_file_contents('public/dists/squeeze/main/debian-installer/binary-i386/Packages', 'packages_udeb_i386', match_prepare=sorted_processor) - self.check_file_contents('public/dists/squeeze/main/binary-amd64/Packages', 'packages_amd64', match_prepare=sorted_processor) - self.check_file_contents('public/dists/squeeze/main/debian-installer/binary-amd64/Packages', 'packages_udeb_amd64', match_prepare=sorted_processor) + self.check_exists('public/dists/wheezy/InRelease') + self.check_exists('public/dists/wheezy/Release') + self.check_exists('public/dists/wheezy/Release.gpg') + + self.check_exists('public/dists/wheezy/main/binary-i386/Release') + self.check_exists('public/dists/wheezy/main/binary-i386/Packages') + self.check_exists('public/dists/wheezy/main/binary-i386/Packages.gz') + self.check_exists('public/dists/wheezy/main/binary-i386/Packages.bz2') + self.check_exists('public/dists/wheezy/main/Contents-i386.gz') + self.check_exists('public/dists/wheezy/main/debian-installer/binary-i386/Release') + self.check_exists('public/dists/wheezy/main/debian-installer/binary-i386/Packages') + self.check_exists('public/dists/wheezy/main/debian-installer/binary-i386/Packages.gz') + self.check_exists('public/dists/wheezy/main/debian-installer/binary-i386/Packages.bz2') + self.check_exists('public/dists/wheezy/main/Contents-udeb-i386.gz') + self.check_exists('public/dists/wheezy/main/binary-amd64/Release') + self.check_exists('public/dists/wheezy/main/binary-amd64/Packages') + self.check_exists('public/dists/wheezy/main/binary-amd64/Packages.gz') + self.check_exists('public/dists/wheezy/main/binary-amd64/Packages.bz2') + self.check_exists('public/dists/wheezy/main/Contents-amd64.gz') + self.check_exists('public/dists/wheezy/main/debian-installer/binary-amd64/Release') + self.check_exists('public/dists/wheezy/main/debian-installer/binary-amd64/Packages') + self.check_exists('public/dists/wheezy/main/debian-installer/binary-amd64/Packages.gz') + self.check_exists('public/dists/wheezy/main/debian-installer/binary-amd64/Packages.bz2') + self.check_exists('public/dists/wheezy/main/Contents-udeb-amd64.gz') + self.check_not_exists('public/dists/wheezy/main/source/Sources') + self.check_not_exists('public/dists/wheezy/main/source/Sources.gz') + self.check_not_exists('public/dists/wheezy/main/source/Sources.bz2') + + self.check_exists('public/pool/main/g/gnupg/gpgv-udeb_1.4.12-7+deb7u7_amd64.udeb') + self.check_exists('public/pool/main/g/gnupg/gpgv-udeb_1.4.12-7+deb7u7_i386.udeb') + self.check_exists('public/pool/main/g/gnupg/gpgv_1.4.12-7+deb7u7_amd64.deb') + self.check_exists('public/pool/main/g/gnupg/gpgv_1.4.12-7+deb7u7_i386.deb') + + self.check_file_contents('public/dists/wheezy/main/binary-i386/Packages', 'packages_i386', match_prepare=sorted_processor) + self.check_file_contents('public/dists/wheezy/main/debian-installer/binary-i386/Packages', 'packages_udeb_i386', match_prepare=sorted_processor) + self.check_file_contents('public/dists/wheezy/main/binary-amd64/Packages', 'packages_amd64', match_prepare=sorted_processor) + self.check_file_contents('public/dists/wheezy/main/debian-installer/binary-amd64/Packages', 'packages_udeb_amd64', match_prepare=sorted_processor) # verify contents except of sums - self.check_file_contents('public/dists/squeeze/Release', 'release', match_prepare=strip_processor) + self.check_file_contents('public/dists/wheezy/Release', 'release', match_prepare=strip_processor) - self.check_file_contents('public/dists/squeeze/main/debian-installer/binary-i386/Release', 'release_udeb_i386', match_prepare=strip_processor) + self.check_file_contents('public/dists/wheezy/main/debian-installer/binary-i386/Release', 'release_udeb_i386', match_prepare=strip_processor) # verify sums - release = self.read_file('public/dists/squeeze/Release').split("\n") + release = self.read_file('public/dists/wheezy/Release').split("\n") release = [l for l in release if l.startswith(" ")] pathsSeen = set() for l in release: @@ -941,7 +945,7 @@ fileSize = int(fileSize) - st = os.stat(os.path.join(os.environ["HOME"], ".aptly", 'public/dists/squeeze/', path)) + st = os.stat(os.path.join(os.environ["HOME"], ".aptly", 'public/dists/wheezy/', path)) if fileSize != st.st_size: raise Exception("file size doesn't match for %s: %d != %d" % (path, fileSize, st.st_size)) @@ -949,10 +953,12 @@ h = hashlib.md5() elif len(fileHash) == 40: h = hashlib.sha1() - else: + elif len(fileHash) == 64: h = hashlib.sha256() + else: + h = hashlib.sha512() - h.update(self.read_file(os.path.join('public/dists/squeeze', path))) + h.update(self.read_file(os.path.join('public/dists/wheezy', path))) if h.hexdigest() != fileHash: raise Exception("file hash doesn't match for %s: %s != %s" % (path, fileHash, h.hexdigest())) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/SwiftPublish1Test_binary aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/SwiftPublish1Test_binary --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/SwiftPublish1Test_binary 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/SwiftPublish1Test_binary 2016-05-24 07:05:22.000000000 +0000 @@ -1,25 +1,27 @@ -Package: libboost-program-options-dev -Version: 1.49.0.1 -Installed-Size: 26 -Priority: optional -Section: libdevel -Maintainer: Debian Boost Team -Architecture: i386 -Description: program options library for C++ (default version) - This package forms part of the Boost C++ Libraries collection. - . - Library to let program developers obtain program options, that is + + (name, value) pairs from the user, via conventional methods such as - command line and config file. . - This package is a dependency package, which depends on Debian's default + . Boost version (currently 1.49). + Library to let program developers obtain program options, that is + This package forms part of the Boost C++ Libraries collection. + This package is a dependency package, which depends on Debian's default + command line and config file. +Architecture: i386 +Depends: libboost-program-options1.49-dev +Description: program options library for C++ (default version) +Filename: pool/main/b/boost-defaults/libboost-program-options-dev_1.49.0.1_i386.deb +Homepage: http://www.boost.org/libs/program_options/ +Installed-Size: 26 MD5sum: 0035d7822b2f8f0ec4013f270fd650c2 +Maintainer: Debian Boost Team +Package: libboost-program-options-dev +Priority: optional SHA1: 36895eb64cfe89c33c0a2f7ac2f0c6e0e889e04b SHA256: c76b4bd12fd92e4dfe1b55b18a67a669d92f62985d6a96c8a21d96120982cf12 -Filename: pool/main/b/boost-defaults/libboost-program-options-dev_1.49.0.1_i386.deb +SHA512: d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c +Section: libdevel Size: 2738 -Homepage: http://www.boost.org/libs/program_options/ Source: boost-defaults -Depends: libboost-program-options1.49-dev - +Version: 1.49.0.1 \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/SwiftPublish1Test_release aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/SwiftPublish1Test_release --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/SwiftPublish1Test_release 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/SwiftPublish1Test_release 2016-05-24 07:05:22.000000000 +0000 @@ -2,34 +2,10 @@ Label: . maverick Suite: maverick Codename: maverick -Date: Sun, 22 Feb 2015 10:44:27 UTC Architectures: i386 Components: main Description: Generated by aptly MD5Sum: - c002a2d87044f69fe3f535cd126db3ba 92 main/source/Release - c03724183668d701ac6aa300d13af562 984 main/binary-i386/Packages - 585addbc83368aeee669c9c4894aa85d 601 main/binary-i386/Packages.gz - c6622332526ac6c6b0e3945ed4977d92 650 main/binary-i386/Packages.bz2 - 43403263280667501151421b9cd7fc68 2300 main/source/Sources - 8f77bf834a8c9a30f5647b153d73dc88 828 main/source/Sources.gz - 30682c2ffbfcb8141af41fc88236ffcf 1013 main/source/Sources.bz2 - 2dea0f34410be57d2e41713f14dd047b 90 main/binary-i386/Release SHA1: - a4d68060e2ba2b00073d2b8848224b30c81a2060 92 main/source/Release - 5baf9f7d7ece54603fe839a414cd652d22da6ba2 984 main/binary-i386/Packages - 312c9eb461c3bc08d26355f4e233ea761573fad4 601 main/binary-i386/Packages.gz - 7f5a42f1cccde76e61db4785cc7e2d568366527f 650 main/binary-i386/Packages.bz2 - 7faccfe9d57725a221f7bcf24cb9f6fd15cfbc46 2300 main/source/Sources - e779570785b5be8be647cecbe2f9aee9593e9f22 828 main/source/Sources.gz - 5cadb5837aae91aa666b76030fdd89db9142aeda 1013 main/source/Sources.bz2 - 689d7b3d67cb1cc3d4a0e730a2c2462aa3344fef 90 main/binary-i386/Release SHA256: - 8fd77dcde4aacfdaca30f1c74ec058ffdb79ec741ec0c04647c90d927f74ced8 92 main/source/Release - e16e3432eaf9a48f782c2afc92263819d2295169d276635282a163cb4b8da073 984 main/binary-i386/Packages - 2ced8276178f296b76d85f2a2bd640d912923d0110b98c86899be1eac3f98afc 601 main/binary-i386/Packages.gz - 97fbdadc2dfc8d26c9f4ea11642e208a3abe5d6868ba49b5c855ccfb54311ae3 650 main/binary-i386/Packages.bz2 - bef12367ae59c9bd0e4dfd35ff1a5b202fa74d32bf842ff973096f5397912f3f 2300 main/source/Sources - 085e855df6237384010d4147b80f600b190045ee9018dcecd64fa9441218f306 828 main/source/Sources.gz - c0c27f19c08ff311efd4611990959866464bdcb6ef73fdd4904a93992301299c 1013 main/source/Sources.bz2 - faa23b79fc6811f4eb4839e2a98bb023c9d70b815b1f39b1e54b8c42a0afc74e 90 main/binary-i386/Release +SHA512: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/SwiftPublish1Test_sources aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/SwiftPublish1Test_sources --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/SwiftPublish1Test_sources 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/SwiftPublish1Test_sources 2016-05-24 07:05:22.000000000 +0000 @@ -1,48 +1,52 @@ -Package: pyspi -Version: 0.6.1-1.3 -Maintainer: Jose Carlos Garcia Sogo -Architecture: any -Binary: python-at-spi -Standards-Version: 3.7.3 -Format: 1.0 -Files: - 22ff26db69b73d3438fdde21ab5ba2f1 3456 pyspi_0.6.1-1.3.diff.gz - b72cb94699298a117b7c82641c68b6fd 1782 pyspi_0.6.1-1.3.dsc - def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz -Checksums-Sha1: - 95a2468e4bbce730ba286f2211fa41861b9f1d90 3456 pyspi_0.6.1-1.3.diff.gz - 56c8a9b1f4ab636052be8966690998cbe865cd6c 1782 pyspi_0.6.1-1.3.dsc - 9694b80acc171c0a5bc99f707933864edfce555e 29063 pyspi_0.6.1.orig.tar.gz -Vcs-Svn: svn://svn.tribulaciones.org/srv/svn/pyspi/trunk -Homepage: http://people.redhat.com/zcerza/dogtail -Build-Depends: debhelper (>= 5), cdbs, libatspi-dev, python-pyrex, python-support (>= 0.4), python-all-dev, libx11-dev -Directory: pool/main/p/pyspi -Checksums-Sha256: - 2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz - d494aaf526f1ec6b02f14c2f81e060a5722d6532ddc760ec16972e45c2625989 1782 pyspi_0.6.1-1.3.dsc - 64069ee828c50b1c597d10a3fefbba279f093a4723965388cdd0ac02f029bfb9 29063 pyspi_0.6.1.orig.tar.gz -Package: pyspi -Version: 0.6.1-1.4 -Maintainer: Jose Carlos Garcia Sogo -Architecture: any -Vcs-Svn: svn://svn.tribulaciones.org/srv/svn/pyspi/trunk -Standards-Version: 3.7.3 -Homepage: http://people.redhat.com/zcerza/dogtail -Directory: pool/main/p/pyspi -Build-Depends: debhelper (>= 5), cdbs, libatspi-dev, python-pyrex, python-support (>= 0.4), python-all-dev, libx11-dev -Checksums-Sha256: + + 22ff26db69b73d3438fdde21ab5ba2f1 3456 pyspi_0.6.1-1.3.diff.gz + 22ff26db69b73d3438fdde21ab5ba2f1 3456 pyspi_0.6.1-1.3.diff.gz + 262cac59a2e81c7f110851ff9670c97ffc3d192d9937b880422a0907f26340d43e7de7e68b904a4fb10bedb02b65c3bd1f7bdd20ea8c4293e690e7a8e0e70ee5 893 pyspi-0.6.1-1.3.stripped.dsc 289d3aefa970876e9c43686ce2b02f478d7f3ed35a713928464a98d54ae4fca3 893 pyspi-0.6.1-1.3.stripped.dsc 2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz - 64069ee828c50b1c597d10a3fefbba279f093a4723965388cdd0ac02f029bfb9 29063 pyspi_0.6.1.orig.tar.gz -Format: 1.0 -Checksums-Sha1: + 2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz + 2f5bd47cf38852b6fc927a50f98c1448 893 pyspi-0.6.1-1.3.stripped.dsc 5005fbd1f30637edc1d380b30f45db9b79100d07 893 pyspi-0.6.1-1.3.stripped.dsc + 56c8a9b1f4ab636052be8966690998cbe865cd6c 1782 pyspi_0.6.1-1.3.dsc + 64069ee828c50b1c597d10a3fefbba279f093a4723965388cdd0ac02f029bfb9 29063 pyspi_0.6.1.orig.tar.gz + 64069ee828c50b1c597d10a3fefbba279f093a4723965388cdd0ac02f029bfb9 29063 pyspi_0.6.1.orig.tar.gz + 95a2468e4bbce730ba286f2211fa41861b9f1d90 3456 pyspi_0.6.1-1.3.diff.gz 95a2468e4bbce730ba286f2211fa41861b9f1d90 3456 pyspi_0.6.1-1.3.diff.gz 9694b80acc171c0a5bc99f707933864edfce555e 29063 pyspi_0.6.1.orig.tar.gz + 9694b80acc171c0a5bc99f707933864edfce555e 29063 pyspi_0.6.1.orig.tar.gz + b72cb94699298a117b7c82641c68b6fd 1782 pyspi_0.6.1-1.3.dsc + d494aaf526f1ec6b02f14c2f81e060a5722d6532ddc760ec16972e45c2625989 1782 pyspi_0.6.1-1.3.dsc + def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz + def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz + fde06b7dc5762a04986d0669420822f6a1e82b195322ae9cbd2dae40bda557c57ad77fe3546007ea645f801c4cd30ef4eb0e96efb2dee6b71c4c9a187d643683 1782 pyspi_0.6.1-1.3.dsc +Architecture: any +Architecture: any +Binary: python-at-spi Binary: python-at-spi +Build-Depends: debhelper (>= 5), cdbs, libatspi-dev, python-pyrex, python-support (>= 0.4), python-all-dev, libx11-dev +Build-Depends: debhelper (>= 5), cdbs, libatspi-dev, python-pyrex, python-support (>= 0.4), python-all-dev, libx11-dev +Checksums-Sha1: +Checksums-Sha1: +Checksums-Sha256: +Checksums-Sha256: +Checksums-Sha512: +Checksums-Sha512: +Directory: pool/main/p/pyspi +Directory: pool/main/p/pyspi Files: - 2f5bd47cf38852b6fc927a50f98c1448 893 pyspi-0.6.1-1.3.stripped.dsc - 22ff26db69b73d3438fdde21ab5ba2f1 3456 pyspi_0.6.1-1.3.diff.gz - def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz - +Files: +Format: 1.0 +Format: 1.0 +Homepage: http://people.redhat.com/zcerza/dogtail +Homepage: http://people.redhat.com/zcerza/dogtail +Maintainer: Jose Carlos Garcia Sogo +Maintainer: Jose Carlos Garcia Sogo +Package: pyspi +Package: pyspi +Standards-Version: 3.7.3 +Standards-Version: 3.7.3 +Vcs-Svn: svn://svn.tribulaciones.org/srv/svn/pyspi/trunk +Vcs-Svn: svn://svn.tribulaciones.org/srv/svn/pyspi/trunk +Version: 0.6.1-1.3 +Version: 0.6.1-1.4 diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/SwiftPublish2Test_binary aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/SwiftPublish2Test_binary --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/SwiftPublish2Test_binary 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/SwiftPublish2Test_binary 2016-05-24 07:05:22.000000000 +0000 @@ -1,25 +1,27 @@ -Package: libboost-program-options-dev -Version: 1.49.0.1 -Installed-Size: 26 -Priority: optional -Section: libdevel -Maintainer: Debian Boost Team -Architecture: i386 -Description: program options library for C++ (default version) - This package forms part of the Boost C++ Libraries collection. - . - Library to let program developers obtain program options, that is + + (name, value) pairs from the user, via conventional methods such as - command line and config file. . - This package is a dependency package, which depends on Debian's default + . Boost version (currently 1.49). + Library to let program developers obtain program options, that is + This package forms part of the Boost C++ Libraries collection. + This package is a dependency package, which depends on Debian's default + command line and config file. +Architecture: i386 +Depends: libboost-program-options1.49-dev +Description: program options library for C++ (default version) +Filename: pool/main/b/boost-defaults/libboost-program-options-dev_1.49.0.1_i386.deb +Homepage: http://www.boost.org/libs/program_options/ +Installed-Size: 26 MD5sum: 0035d7822b2f8f0ec4013f270fd650c2 +Maintainer: Debian Boost Team +Package: libboost-program-options-dev +Priority: optional SHA1: 36895eb64cfe89c33c0a2f7ac2f0c6e0e889e04b SHA256: c76b4bd12fd92e4dfe1b55b18a67a669d92f62985d6a96c8a21d96120982cf12 -Source: boost-defaults -Filename: pool/main/b/boost-defaults/libboost-program-options-dev_1.49.0.1_i386.deb -Depends: libboost-program-options1.49-dev -Homepage: http://www.boost.org/libs/program_options/ +SHA512: d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c +Section: libdevel Size: 2738 - +Source: boost-defaults +Version: 1.49.0.1 \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/SwiftPublish2Test_release aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/SwiftPublish2Test_release --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/SwiftPublish2Test_release 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/SwiftPublish2Test_release 2016-05-24 07:05:22.000000000 +0000 @@ -2,34 +2,10 @@ Label: . maverick Suite: maverick Codename: maverick -Date: Sun, 22 Feb 2015 10:44:40 UTC Architectures: i386 Components: main Description: Generated by aptly MD5Sum: - 585addbc83368aeee669c9c4894aa85d 601 main/binary-i386/Packages.gz - c6622332526ac6c6b0e3945ed4977d92 650 main/binary-i386/Packages.bz2 - d41d8cd98f00b204e9800998ecf8427e 0 main/source/Sources - f41c10a4b35cd3e1ec8abb9c2ab676ed 23 main/source/Sources.gz - 4059d198768f9f8dc9372dc1c54bc3c3 14 main/source/Sources.bz2 - 2dea0f34410be57d2e41713f14dd047b 90 main/binary-i386/Release - c002a2d87044f69fe3f535cd126db3ba 92 main/source/Release - c03724183668d701ac6aa300d13af562 984 main/binary-i386/Packages SHA1: - 312c9eb461c3bc08d26355f4e233ea761573fad4 601 main/binary-i386/Packages.gz - 7f5a42f1cccde76e61db4785cc7e2d568366527f 650 main/binary-i386/Packages.bz2 - da39a3ee5e6b4b0d3255bfef95601890afd80709 0 main/source/Sources - 92c6cff562771f64540523a54baaa0b2afe54b3f 23 main/source/Sources.gz - 64a543afbb5f4bf728636bdcbbe7a2ed0804adc2 14 main/source/Sources.bz2 - 689d7b3d67cb1cc3d4a0e730a2c2462aa3344fef 90 main/binary-i386/Release - a4d68060e2ba2b00073d2b8848224b30c81a2060 92 main/source/Release - 5baf9f7d7ece54603fe839a414cd652d22da6ba2 984 main/binary-i386/Packages SHA256: - 2ced8276178f296b76d85f2a2bd640d912923d0110b98c86899be1eac3f98afc 601 main/binary-i386/Packages.gz - 97fbdadc2dfc8d26c9f4ea11642e208a3abe5d6868ba49b5c855ccfb54311ae3 650 main/binary-i386/Packages.bz2 - e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 0 main/source/Sources - 1775fca35fb6a4d31c541746eaea63c5cb3c00280c8b5a351d4e944cdca7489d 23 main/source/Sources.gz - d3dda84eb03b9738d118eb2be78e246106900493c0ae07819ad60815134a8058 14 main/source/Sources.bz2 - faa23b79fc6811f4eb4839e2a98bb023c9d70b815b1f39b1e54b8c42a0afc74e 90 main/binary-i386/Release - 8fd77dcde4aacfdaca30f1c74ec058ffdb79ec741ec0c04647c90d927f74ced8 92 main/source/Release - e16e3432eaf9a48f782c2afc92263819d2295169d276635282a163cb4b8da073 984 main/binary-i386/Packages +SHA512: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/SwiftPublish3Test_release aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/SwiftPublish3Test_release --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/SwiftPublish3Test_release 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/SwiftPublish3Test_release 2016-05-24 07:05:22.000000000 +0000 @@ -2,34 +2,10 @@ Label: . maverick Suite: maverick Codename: maverick -Date: Sun, 22 Feb 2015 10:45:11 UTC Architectures: amd64 i386 Components: main Description: Generated by aptly MD5Sum: - ed4a96d0034fa903b943468d1a7cb73b 91 main/binary-amd64/Release - 2dea0f34410be57d2e41713f14dd047b 90 main/binary-i386/Release - 9fe269c89d1aac6bceeef8d9df4b3735 1526 main/binary-amd64/Packages - a6f1ebab83ff467727220c85cb3fb093 863 main/binary-amd64/Packages.gz - 583f5b63ccd8f833789357440f414eaf 930 main/binary-amd64/Packages.bz2 - 515b376e5200e6e5c3d26b2bb3466123 1524 main/binary-i386/Packages - 9b03fafb2b578c5e678a85ffd2950084 865 main/binary-i386/Packages.gz - 548ab0b40975dc0e28a0f97421294c1d 929 main/binary-i386/Packages.bz2 SHA1: - 95af074e15d6a50d8f5defdf5f8a0f464d62ce97 91 main/binary-amd64/Release - 689d7b3d67cb1cc3d4a0e730a2c2462aa3344fef 90 main/binary-i386/Release - f4d8cfac4c02f57be17a0888971a7af5d68fe58b 1526 main/binary-amd64/Packages - c0b190dc735a59e18db4a6db3cbe2f06615f0f6b 863 main/binary-amd64/Packages.gz - bdecced219abb1db32c0df390f19df5f8656f975 930 main/binary-amd64/Packages.bz2 - d96e51c75dc23ae516d10f27931d46a65ed136e2 1524 main/binary-i386/Packages - 8c6f4dc45c9870d5b73751ba336640220f70d4c8 865 main/binary-i386/Packages.gz - 16d49629b2fcc5eb3557fe4a712953d255ef2042 929 main/binary-i386/Packages.bz2 SHA256: - 3d9f8a049c9f85b8755316b04240369f73e0b74a8e4e64d008b46116e47f656e 91 main/binary-amd64/Release - faa23b79fc6811f4eb4839e2a98bb023c9d70b815b1f39b1e54b8c42a0afc74e 90 main/binary-i386/Release - 7af97eb8a100b006cc2b49c9d12b4ed78e2ba89c05ff8c0059898c8eab9b1400 1526 main/binary-amd64/Packages - cc842568f69d941516414b5753e9c1f500bfe6a209ee2d0cece17554715eabdb 863 main/binary-amd64/Packages.gz - e1511b7ffc5f9bcc6e1fdcfb32e5a8902f339f28910776d771de8c0dcd10034e 930 main/binary-amd64/Packages.bz2 - 1c06ffbaae938cca6f1471c7074fbf1ae5da09033183a4e41d9d4737ddc19048 1524 main/binary-i386/Packages - e469b02604cec35f69912b375b948ff2190a8741cd74ca175d6baed7ba4ca280 865 main/binary-i386/Packages.gz - af51d9566c47b93ca2bbd2004db83f90e3598e76b3925781358781799a24c39b 929 main/binary-i386/Packages.bz2 +SHA512: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/switch.py aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/switch.py --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/switch.py 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/switch.py 2016-05-24 07:05:22.000000000 +0000 @@ -73,8 +73,10 @@ h = hashlib.md5() elif len(fileHash) == 40: h = hashlib.sha1() - else: + elif len(fileHash) == 64: h = hashlib.sha256() + else: + h = hashlib.sha512() h.update(self.read_file(os.path.join('public/dists/maverick', path))) @@ -331,8 +333,10 @@ h = hashlib.md5() elif len(fileHash) == 40: h = hashlib.sha1() - else: + elif len(fileHash) == 64: h = hashlib.sha256() + else: + h = hashlib.sha512() h.update(self.read_file(os.path.join('public/dists/maverick', path))) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/update.py aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/update.py --- aptly-0.9.6/src/github.com/smira/aptly/system/t06_publish/update.py 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t06_publish/update.py 2016-05-24 07:05:22.000000000 +0000 @@ -72,8 +72,10 @@ h = hashlib.md5() elif len(fileHash) == 40: h = hashlib.sha1() - else: + elif len(fileHash) == 64: h = hashlib.sha256() + else: + h = hashlib.sha512() h.update(self.read_file(os.path.join('public/dists/maverick', path))) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t10_task/RunTask1Test_gold aptly-0.9.7/src/github.com/smira/aptly/system/t10_task/RunTask1Test_gold --- aptly-0.9.6/src/github.com/smira/aptly/system/t10_task/RunTask1Test_gold 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t10_task/RunTask1Test_gold 2016-05-24 07:05:22.000000000 +0000 @@ -21,6 +21,6 @@ 4) [Running]: version Begin command output: ---------------------------- -aptly version: 0.9.6 +aptly version: 0.9.7 End command output: ------------------------------ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t11_package/ShowPackage7Test/pyspi_0.6.1-1.3.conflict.dsc aptly-0.9.7/src/github.com/smira/aptly/system/t11_package/ShowPackage7Test/pyspi_0.6.1-1.3.conflict.dsc --- aptly-0.9.6/src/github.com/smira/aptly/system/t11_package/ShowPackage7Test/pyspi_0.6.1-1.3.conflict.dsc 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t11_package/ShowPackage7Test/pyspi_0.6.1-1.3.conflict.dsc 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,12 @@ +Format: 1.0 +Source: pyspi +Binary: python-at-spi +Architecture: any +Version: 0.6.1-1.3 +Maintainer: Jose Carlos Garcia Sogo +Homepage: http://people.redhat.com/zcerza/dogtail +Standards-Version: 3.7.3 +Vcs-Svn: svn://svn.tribulaciones.org/srv/svn/pyspi/trunk +Build-Depends: debhelper (>= 5), cdbs, libatspi-dev, python-pyrex, python-support (>= 0.4), python-all-dev, libx11-dev +Files: + d41d8cd98f00b204e9800998ecf8427e 0 pyspi_0.6.1.orig.tar.gz diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t11_package/ShowPackage7Test_gold aptly-0.9.7/src/github.com/smira/aptly/system/t11_package/ShowPackage7Test_gold --- aptly-0.9.6/src/github.com/smira/aptly/system/t11_package/ShowPackage7Test_gold 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t11_package/ShowPackage7Test_gold 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,51 @@ +Package: pyspi +Binary: python-at-spi +Version: 0.6.1-1.3 +Maintainer: Jose Carlos Garcia Sogo +Build-Depends: debhelper (>= 5), cdbs, libatspi-dev, python-pyrex, python-support (>= 0.4), python-all-dev, libx11-dev +Architecture: any +Standards-Version: 3.7.3 +Format: 1.0 +Files: + 22ff26db69b73d3438fdde21ab5ba2f1 3456 pyspi_0.6.1-1.3.diff.gz + b72cb94699298a117b7c82641c68b6fd 1782 pyspi_0.6.1-1.3.dsc + def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz +Homepage: http://people.redhat.com/zcerza/dogtail +Checksums-Sha256: + 2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz + d494aaf526f1ec6b02f14c2f81e060a5722d6532ddc760ec16972e45c2625989 1782 pyspi_0.6.1-1.3.dsc + 64069ee828c50b1c597d10a3fefbba279f093a4723965388cdd0ac02f029bfb9 29063 pyspi_0.6.1.orig.tar.gz +Checksums-Sha1: + 95a2468e4bbce730ba286f2211fa41861b9f1d90 3456 pyspi_0.6.1-1.3.diff.gz + 56c8a9b1f4ab636052be8966690998cbe865cd6c 1782 pyspi_0.6.1-1.3.dsc + 9694b80acc171c0a5bc99f707933864edfce555e 29063 pyspi_0.6.1.orig.tar.gz +Vcs-Svn: svn://svn.tribulaciones.org/srv/svn/pyspi/trunk +Checksums-Sha512: + fde06b7dc5762a04986d0669420822f6a1e82b195322ae9cbd2dae40bda557c57ad77fe3546007ea645f801c4cd30ef4eb0e96efb2dee6b71c4c9a187d643683 1782 pyspi_0.6.1-1.3.dsc + +References to package: + local repo [a] + +Package: pyspi +Binary: python-at-spi +Version: 0.6.1-1.3 +Maintainer: Jose Carlos Garcia Sogo +Build-Depends: debhelper (>= 5), cdbs, libatspi-dev, python-pyrex, python-support (>= 0.4), python-all-dev, libx11-dev +Architecture: any +Standards-Version: 3.7.3 +Format: 1.0 +Files: + d95c4fb8bf5066968b524e04f35c6d34 458 pyspi_0.6.1-1.3.conflict.dsc + d41d8cd98f00b204e9800998ecf8427e 0 pyspi_0.6.1.orig.tar.gz +Vcs-Svn: svn://svn.tribulaciones.org/srv/svn/pyspi/trunk +Checksums-Sha512: + ec9b3ea45d9a14f341c947bfd4b4d70ee508f9ffe9374ff2eceaa5df45ee48e3103f67d0af57d62308fee62957dae2b60c4ff5649543ea6dbfef1bccf151b27e 458 pyspi_0.6.1-1.3.conflict.dsc +Checksums-Sha256: + 33dc6feab9ff1cf863b27f4d622985fe0114252d157a744dcc3d575bf7cfaad8 458 pyspi_0.6.1-1.3.conflict.dsc +Checksums-Sha1: + 4d94f5e09bc745af159ddf9ce7a13a84ac3434d0 458 pyspi_0.6.1-1.3.conflict.dsc +Homepage: http://people.redhat.com/zcerza/dogtail + +References to package: + local repo [b] + diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t11_package/show.py aptly-0.9.7/src/github.com/smira/aptly/system/t11_package/show.py --- aptly-0.9.6/src/github.com/smira/aptly/system/t11_package/show.py 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t11_package/show.py 2016-05-24 07:05:22.000000000 +0000 @@ -60,3 +60,17 @@ ] outputMatchPrepare = lambda _, s: "\n".join(sorted(s.split("\n"))) runCmd = "aptly package show -with-references nginx-full_1.2.1-2.2+wheezy2_amd64" + + +class ShowPackage7Test(BaseTest): + """ + show package: with duplicates + """ + fixtureCmds = [ + "aptly repo create a", + "aptly repo create b", + "aptly repo add a ${files}", + "aptly repo add b ${testfiles}" + ] + outputMatchPrepare = lambda _, s: "\n".join(sorted(s.split("\n"))) + runCmd = "aptly package show -with-references \"pyspi (0.6.1-1.3)\"" diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t12_api/packages.py aptly-0.9.7/src/github.com/smira/aptly/system/t12_api/packages.py --- aptly-0.9.6/src/github.com/smira/aptly/system/t12_api/packages.py 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t12_api/packages.py 2016-05-24 07:05:22.000000000 +0000 @@ -27,6 +27,7 @@ 'Build-Depends': 'debhelper (>= 5), cdbs, libatspi-dev, python-pyrex, python-support (>= 0.4), python-all-dev, libx11-dev', 'Checksums-Sha1': ' 95a2468e4bbce730ba286f2211fa41861b9f1d90 3456 pyspi_0.6.1-1.3.diff.gz\n 56c8a9b1f4ab636052be8966690998cbe865cd6c 1782 pyspi_0.6.1-1.3.dsc\n 9694b80acc171c0a5bc99f707933864edfce555e 29063 pyspi_0.6.1.orig.tar.gz\n', 'Checksums-Sha256': ' 2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz\n d494aaf526f1ec6b02f14c2f81e060a5722d6532ddc760ec16972e45c2625989 1782 pyspi_0.6.1-1.3.dsc\n 64069ee828c50b1c597d10a3fefbba279f093a4723965388cdd0ac02f029bfb9 29063 pyspi_0.6.1.orig.tar.gz\n', + 'Checksums-Sha512': ' fde06b7dc5762a04986d0669420822f6a1e82b195322ae9cbd2dae40bda557c57ad77fe3546007ea645f801c4cd30ef4eb0e96efb2dee6b71c4c9a187d643683 1782 pyspi_0.6.1-1.3.dsc\n', 'Files': ' 22ff26db69b73d3438fdde21ab5ba2f1 3456 pyspi_0.6.1-1.3.diff.gz\n b72cb94699298a117b7c82641c68b6fd 1782 pyspi_0.6.1-1.3.dsc\n def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz\n', 'FilesHash': '3a8b37cbd9a3559e', 'Format': '1.0', diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t12_api/publish.py aptly-0.9.7/src/github.com/smira/aptly/system/t12_api/publish.py --- aptly-0.9.6/src/github.com/smira/aptly/system/t12_api/publish.py 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t12_api/publish.py 2016-05-24 07:05:22.000000000 +0000 @@ -41,6 +41,7 @@ 'Label': '', 'Origin': '', 'Prefix': prefix, + 'SkipContents': False, 'SourceKind': 'local', 'Sources': [{'Component': 'main', 'Name': repo_name}], 'Storage': ''} @@ -60,7 +61,7 @@ # publishing under root, custom distribution, architectures distribution = self.random_name() - resp = self.post("/api/publish", + resp = self.post("/api/publish/:.", json={ "SourceKind": "local", "Sources": [{"Name": repo_name}], @@ -74,6 +75,7 @@ 'Label': '', 'Origin': '', 'Prefix': ".", + 'SkipContents': False, 'SourceKind': 'local', 'Sources': [{'Component': 'main', 'Name': repo_name}], 'Storage': ''} @@ -129,6 +131,7 @@ 'Label': '', 'Origin': '', 'Prefix': prefix, + 'SkipContents': False, 'SourceKind': 'snapshot', 'Sources': [{'Component': 'main', 'Name': snapshot_name}], 'Storage': ''}) @@ -188,6 +191,7 @@ 'Label': '', 'Origin': '', 'Prefix': prefix, + 'SkipContents': False, 'SourceKind': 'local', 'Sources': [{'Component': 'main', 'Name': repo_name}], 'Storage': ''} @@ -238,6 +242,7 @@ 'Label': '', 'Origin': '', 'Prefix': prefix, + 'SkipContents': False, 'SourceKind': 'snapshot', 'Sources': [{'Component': 'main', 'Name': snapshot1_name}], 'Storage': ''} @@ -261,6 +266,7 @@ json={ "Snapshots": [{"Component": "main", "Name": snapshot2_name}], "Signing": DefaultSigningOptions, + "SkipContents": True, }) repo_expected = { 'Architectures': ['i386', 'source'], @@ -268,6 +274,7 @@ 'Label': '', 'Origin': '', 'Prefix': prefix, + 'SkipContents': True, 'SourceKind': 'snapshot', 'Sources': [{'Component': 'main', 'Name': snapshot2_name}], 'Storage': ''} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/system/t12_api/version.py aptly-0.9.7/src/github.com/smira/aptly/system/t12_api/version.py --- aptly-0.9.6/src/github.com/smira/aptly/system/t12_api/version.py 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/system/t12_api/version.py 2016-05-24 07:05:22.000000000 +0000 @@ -7,4 +7,4 @@ """ def check(self): - self.check_equal(self.get("/api/version").json(), {'Version': '0.9.6'}) + self.check_equal(self.get("/api/version").json(), {'Version': '0.9.7'}) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/utils/checksum.go aptly-0.9.7/src/github.com/smira/aptly/utils/checksum.go --- aptly-0.9.6/src/github.com/smira/aptly/utils/checksum.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/utils/checksum.go 2016-05-24 07:05:22.000000000 +0000 @@ -4,6 +4,7 @@ "crypto/md5" "crypto/sha1" "crypto/sha256" + "crypto/sha512" "fmt" "hash" "io" @@ -16,6 +17,7 @@ MD5 string SHA1 string SHA256 string + SHA512 string } // ChecksumsForFile generates size, MD5, SHA1 & SHA256 checksums for given file @@ -51,7 +53,7 @@ // NewChecksumWriter creates checksum calculator for given writer w func NewChecksumWriter() *ChecksumWriter { return &ChecksumWriter{ - hashes: []hash.Hash{md5.New(), sha1.New(), sha256.New()}, + hashes: []hash.Hash{md5.New(), sha1.New(), sha256.New(), sha512.New()}, } } @@ -71,6 +73,7 @@ c.sum.MD5 = fmt.Sprintf("%x", c.hashes[0].Sum(nil)) c.sum.SHA1 = fmt.Sprintf("%x", c.hashes[1].Sum(nil)) c.sum.SHA256 = fmt.Sprintf("%x", c.hashes[2].Sum(nil)) + c.sum.SHA512 = fmt.Sprintf("%x", c.hashes[3].Sum(nil)) return c.sum } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/utils/config.go aptly-0.9.7/src/github.com/smira/aptly/utils/config.go --- aptly-0.9.6/src/github.com/smira/aptly/utils/config.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/utils/config.go 2016-05-24 07:05:22.000000000 +0000 @@ -21,6 +21,7 @@ DownloadSourcePackages bool `json:"downloadSourcePackages"` PpaDistributorID string `json:"ppaDistributorID"` PpaCodename string `json:"ppaCodename"` + SkipContentsPublishing bool `json:"skipContentsPublishing"` S3PublishRoots map[string]S3PublishRoot `json:"S3PublishEndpoints"` SwiftPublishRoots map[string]SwiftPublishRoot `json:"SwiftPublishEndpoints"` } @@ -32,12 +33,15 @@ Endpoint string `json:"endpoint"` AccessKeyID string `json:"awsAccessKeyID"` SecretAccessKey string `json:"awsSecretAccessKey"` + SessionToken string `json:"awsSessionToken"` Prefix string `json:"prefix"` ACL string `json:"acl"` StorageClass string `json:"storageClass"` EncryptionMethod string `json:"encryptionMethod"` PlusWorkaround bool `json:"plusWorkaround"` DisableMultiDel bool `json:"disableMultiDel"` + ForceSigV2 bool `json:"forceSigV2"` + Debug bool `json:"debug"` } // SwiftPublishRoot describes single OpenStack Swift publishing entry point diff -Nru aptly-0.9.6/src/github.com/smira/aptly/utils/config_test.go aptly-0.9.7/src/github.com/smira/aptly/utils/config_test.go --- aptly-0.9.6/src/github.com/smira/aptly/utils/config_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/utils/config_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -62,6 +62,7 @@ " \"downloadSourcePackages\": false,\n"+ " \"ppaDistributorID\": \"\",\n"+ " \"ppaCodename\": \"\",\n"+ + " \"skipContentsPublishing\": false,\n"+ " \"S3PublishEndpoints\": {\n"+ " \"test\": {\n"+ " \"region\": \"us-east-1\",\n"+ @@ -69,12 +70,15 @@ " \"endpoint\": \"\",\n"+ " \"awsAccessKeyID\": \"\",\n"+ " \"awsSecretAccessKey\": \"\",\n"+ + " \"awsSessionToken\": \"\",\n"+ " \"prefix\": \"\",\n"+ " \"acl\": \"\",\n"+ " \"storageClass\": \"\",\n"+ " \"encryptionMethod\": \"\",\n"+ " \"plusWorkaround\": false,\n"+ - " \"disableMultiDel\": false\n"+ + " \"disableMultiDel\": false,\n"+ + " \"forceSigV2\": false,\n"+ + " \"debug\": false\n"+ " }\n"+ " },\n"+ " \"SwiftPublishEndpoints\": {\n"+ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/utils/gpg.go aptly-0.9.7/src/github.com/smira/aptly/utils/gpg.go --- aptly-0.9.6/src/github.com/smira/aptly/utils/gpg.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/utils/gpg.go 2016-05-24 07:05:22.000000000 +0000 @@ -143,7 +143,7 @@ func (g *GpgSigner) DetachedSign(source string, destination string) error { fmt.Printf("Signing file '%s' with gpg, please enter your passphrase when prompted:\n", filepath.Base(source)) - args := []string{"-o", destination, "--armor", "--yes"} + args := []string{"-o", destination, "--digest-algo", "SHA256", "--armor", "--yes"} args = append(args, g.gpgArgs()...) args = append(args, "--detach-sign", source) cmd := exec.Command("gpg", args...) @@ -156,7 +156,7 @@ // ClearSign clear-signs the file func (g *GpgSigner) ClearSign(source string, destination string) error { fmt.Printf("Clearsigning file '%s' with gpg, please enter your passphrase when prompted:\n", filepath.Base(source)) - args := []string{"-o", destination, "--yes"} + args := []string{"-o", destination, "--digest-algo", "SHA256", "--yes"} args = append(args, g.gpgArgs()...) args = append(args, "--clearsign", source) cmd := exec.Command("gpg", args...) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awserr/error.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awserr/error.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awserr/error.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awserr/error.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,127 @@ +// Package awserr represents API error interface accessors for the SDK. +package awserr + +// An Error wraps lower level errors with code, message and an original error. +// The underlying concrete error type may also satisfy other interfaces which +// can be to used to obtain more specific information about the error. +// +// Calling Error() or String() will always include the full information about +// an error based on its underlying type. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Get error details +// log.Println("Error:", awsErr.Code(), awsErr.Message()) +// +// // Prints out full error message, including original error if there was one. +// log.Println("Error:", awsErr.Error()) +// +// // Get original error +// if origErr := awsErr.OrigErr(); origErr != nil { +// // operate on original error. +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type Error interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErr() error +} + +// BatchError is a batch of errors which also wraps lower level errors with code, message, +// and original errors. Calling Error() will only return the error that is at the end +// of the list. +type BatchError interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// New returns an Error object described by the code, message, and origErr. +// +// If origErr satisfies the Error interface it will not be wrapped within a new +// Error object and will instead be returned. +func New(code, message string, origErr error) Error { + if e, ok := origErr.(Error); ok && e != nil { + return e + } + return newBaseError(code, message, origErr) +} + +// NewBatchError returns an baseError with an expectation of an array of errors +func NewBatchError(code, message string, errs []error) BatchError { + return newBaseErrors(code, message, errs) +} + +// A RequestFailure is an interface to extract request failure information from +// an Error such as the request ID of the failed request returned by a service. +// RequestFailures may not always have a requestID value if the request failed +// prior to reaching the service such as a connection error. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if reqerr, ok := err.(RequestFailure); ok { +// log.Printf("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// } else { +// log.Printf("Error:", err.Error() +// } +// } +// +// Combined with awserr.Error: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Generic AWS Error with Code, Message, and original error (if any) +// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) +// +// if reqErr, ok := err.(awserr.RequestFailure); ok { +// // A service error occurred +// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type RequestFailure interface { + Error + + // The status code of the HTTP response. + StatusCode() int + + // The request ID returned by the service for a request failure. This will + // be empty if no request ID is available such as the request failed due + // to a connection error. + RequestID() string +} + +// NewRequestFailure returns a new request error wrapper for the given Error +// provided. +func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { + return newRequestError(err, statusCode, reqID) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awserr/types.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awserr/types.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awserr/types.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awserr/types.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,197 @@ +package awserr + +import "fmt" + +// SprintError returns a string of the formatted error code. +// +// Both extra and origErr are optional. If they are included their lines +// will be added, but if they are not included their lines will be ignored. +func SprintError(code, message, extra string, origErr error) string { + msg := fmt.Sprintf("%s: %s", code, message) + if extra != "" { + msg = fmt.Sprintf("%s\n\t%s", msg, extra) + } + if origErr != nil { + msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) + } + return msg +} + +// A baseError wraps the code and message which defines an error. It also +// can be used to wrap an original error object. +// +// Should be used as the root for errors satisfying the awserr.Error. Also +// for any error which does not fit into a specific error wrapper type. +type baseError struct { + // Classification of error + code string + + // Detailed information about error + message string + + // Optional original error this error is based off of. Allows building + // chained errors. + errs []error +} + +// newBaseError returns an error object for the code, message, and err. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the error. +// +// origErr is the error object which will be nested under the new error to be returned. +func newBaseError(code, message string, origErr error) *baseError { + b := &baseError{ + code: code, + message: message, + } + + if origErr != nil { + b.errs = append(b.errs, origErr) + } + + return b +} + +// newBaseErrors returns an error object for the code, message, and errors. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the error. +// +// origErrs is the error objects which will be nested under the new errors to be returned. +func newBaseErrors(code, message string, origErrs []error) *baseError { + b := &baseError{ + code: code, + message: message, + errs: origErrs, + } + + return b +} + +// Error returns the string representation of the error. +// +// See ErrorWithExtra for formatting. +// +// Satisfies the error interface. +func (b baseError) Error() string { + size := len(b.errs) + if size > 0 { + return SprintError(b.code, b.message, "", errorList(b.errs)) + } + + return SprintError(b.code, b.message, "", nil) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (b baseError) String() string { + return b.Error() +} + +// Code returns the short phrase depicting the classification of the error. +func (b baseError) Code() string { + return b.code +} + +// Message returns the error details message. +func (b baseError) Message() string { + return b.message +} + +// OrigErr returns the original error if one was set. Nil is returned if no error +// was set. This only returns the first element in the list. If the full list is +// needed, use BatchError +func (b baseError) OrigErr() error { + if size := len(b.errs); size > 0 { + return b.errs[0] + } + + return nil +} + +// OrigErrs returns the original errors if one was set. An empty slice is returned if +// no error was set:w +func (b baseError) OrigErrs() []error { + return b.errs +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError Error + +// A requestError wraps a request or service error. +// +// Composed of baseError for code, message, and original error. +type requestError struct { + awsError + statusCode int + requestID string +} + +// newRequestError returns a wrapped error with additional information for request +// status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +// +// Also wraps original errors via the baseError. +func newRequestError(err Error, statusCode int, requestID string) *requestError { + return &requestError{ + awsError: err, + statusCode: statusCode, + requestID: requestID, + } +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (r requestError) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s", + r.statusCode, r.requestID) + return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (r requestError) String() string { + return r.Error() +} + +// StatusCode returns the wrapped status code for the error +func (r requestError) StatusCode() int { + return r.statusCode +} + +// RequestID returns the wrapped requestID +func (r requestError) RequestID() string { + return r.requestID +} + +// An error list that satisfies the golang interface +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + // How do we want to handle the array size being zero + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += fmt.Sprintf("%s", e[i].Error()) + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n' + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,100 @@ +package awsutil + +import ( + "io" + "reflect" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + dst.Set(reflect.New(e)) + } + if src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,233 @@ +package awsutil_test + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "testing" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/stretchr/testify/assert" +) + +func ExampleCopy() { + type Foo struct { + A int + B []*string + } + + // Create the initial value + str1 := "hello" + str2 := "bye bye" + f1 := &Foo{A: 1, B: []*string{&str1, &str2}} + + // Do the copy + var f2 Foo + awsutil.Copy(&f2, f1) + + // Print the result + fmt.Println(awsutil.Prettify(f2)) + + // Output: + // { + // A: 1, + // B: ["hello","bye bye"] + // } +} + +func TestCopy(t *testing.T) { + type Foo struct { + A int + B []*string + C map[string]*int + } + + // Create the initial value + str1 := "hello" + str2 := "bye bye" + int1 := 1 + int2 := 2 + f1 := &Foo{ + A: 1, + B: []*string{&str1, &str2}, + C: map[string]*int{ + "A": &int1, + "B": &int2, + }, + } + + // Do the copy + var f2 Foo + awsutil.Copy(&f2, f1) + + // Values are equal + assert.Equal(t, f2.A, f1.A) + assert.Equal(t, f2.B, f1.B) + assert.Equal(t, f2.C, f1.C) + + // But pointers are not! + str3 := "nothello" + int3 := 57 + f2.A = 100 + f2.B[0] = &str3 + f2.C["B"] = &int3 + assert.NotEqual(t, f2.A, f1.A) + assert.NotEqual(t, f2.B, f1.B) + assert.NotEqual(t, f2.C, f1.C) +} + +func TestCopyNestedWithUnexported(t *testing.T) { + type Bar struct { + a int + B int + } + type Foo struct { + A string + B Bar + } + + f1 := &Foo{A: "string", B: Bar{a: 1, B: 2}} + + var f2 Foo + awsutil.Copy(&f2, f1) + + // Values match + assert.Equal(t, f2.A, f1.A) + assert.NotEqual(t, f2.B, f1.B) + assert.NotEqual(t, f2.B.a, f1.B.a) + assert.Equal(t, f2.B.B, f2.B.B) +} + +func TestCopyIgnoreNilMembers(t *testing.T) { + type Foo struct { + A *string + B []string + C map[string]string + } + + f := &Foo{} + assert.Nil(t, f.A) + assert.Nil(t, f.B) + assert.Nil(t, f.C) + + var f2 Foo + awsutil.Copy(&f2, f) + assert.Nil(t, f2.A) + assert.Nil(t, f2.B) + assert.Nil(t, f2.C) + + fcopy := awsutil.CopyOf(f) + f3 := fcopy.(*Foo) + assert.Nil(t, f3.A) + assert.Nil(t, f3.B) + assert.Nil(t, f3.C) +} + +func TestCopyPrimitive(t *testing.T) { + str := "hello" + var s string + awsutil.Copy(&s, &str) + assert.Equal(t, "hello", s) +} + +func TestCopyNil(t *testing.T) { + var s string + awsutil.Copy(&s, nil) + assert.Equal(t, "", s) +} + +func TestCopyReader(t *testing.T) { + var buf io.Reader = bytes.NewReader([]byte("hello world")) + var r io.Reader + awsutil.Copy(&r, buf) + b, err := ioutil.ReadAll(r) + assert.NoError(t, err) + assert.Equal(t, []byte("hello world"), b) + + // empty bytes because this is not a deep copy + b, err = ioutil.ReadAll(buf) + assert.NoError(t, err) + assert.Equal(t, []byte(""), b) +} + +func TestCopyDifferentStructs(t *testing.T) { + type SrcFoo struct { + A int + B []*string + C map[string]*int + SrcUnique string + SameNameDiffType int + unexportedPtr *int + ExportedPtr *int + } + type DstFoo struct { + A int + B []*string + C map[string]*int + DstUnique int + SameNameDiffType string + unexportedPtr *int + ExportedPtr *int + } + + // Create the initial value + str1 := "hello" + str2 := "bye bye" + int1 := 1 + int2 := 2 + f1 := &SrcFoo{ + A: 1, + B: []*string{&str1, &str2}, + C: map[string]*int{ + "A": &int1, + "B": &int2, + }, + SrcUnique: "unique", + SameNameDiffType: 1, + unexportedPtr: &int1, + ExportedPtr: &int2, + } + + // Do the copy + var f2 DstFoo + awsutil.Copy(&f2, f1) + + // Values are equal + assert.Equal(t, f2.A, f1.A) + assert.Equal(t, f2.B, f1.B) + assert.Equal(t, f2.C, f1.C) + assert.Equal(t, "unique", f1.SrcUnique) + assert.Equal(t, 1, f1.SameNameDiffType) + assert.Equal(t, 0, f2.DstUnique) + assert.Equal(t, "", f2.SameNameDiffType) + assert.Equal(t, int1, *f1.unexportedPtr) + assert.Nil(t, f2.unexportedPtr) + assert.Equal(t, int2, *f1.ExportedPtr) + assert.Equal(t, int2, *f2.ExportedPtr) +} + +func ExampleCopyOf() { + type Foo struct { + A int + B []*string + } + + // Create the initial value + str1 := "hello" + str2 := "bye bye" + f1 := &Foo{A: 1, B: []*string{&str1, &str2}} + + // Do the copy + v := awsutil.CopyOf(f1) + var f2 *Foo = v.(*Foo) + + // Print the result + fmt.Println(awsutil.Prettify(f2)) + + // Output: + // { + // A: 1, + // B: ["hello","bye bye"] + // } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,27 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type the are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/equal_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/equal_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/equal_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/equal_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,29 @@ +package awsutil_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/stretchr/testify/assert" +) + +func TestDeepEqual(t *testing.T) { + cases := []struct { + a, b interface{} + equal bool + }{ + {"a", "a", true}, + {"a", "b", false}, + {"a", aws.String(""), false}, + {"a", nil, false}, + {"a", aws.String("a"), true}, + {(*bool)(nil), (*bool)(nil), true}, + {(*bool)(nil), (*string)(nil), false}, + {nil, nil, true}, + } + + for i, c := range cases { + assert.Equal(t, c.equal, awsutil.DeepEqual(c.a, c.b), "%d, a:%v b:%v, %t", i, c.a, c.b, c.equal) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,222 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/jmespath/go-jmespath" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) + if len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { + return true + } + return false + }) + + if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { + if !value.IsNil() { + value.Set(reflect.Zero(value.Type())) + } + return []reflect.Value{value} + } + + if createPath && value.Kind() == reflect.Ptr && value.IsNil() { + // TODO if the value is the terminus it should not be created + // if the value to be set to its position is nil. + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, value := range values { + value := reflect.Indirect(value) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if createPath { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err + } + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil +} + +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. +func SetValueAtPath(i interface{}, path string, v interface{}) { + if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) + } + } +} + +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) + } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + dstVal.Set(srcVal) + } + +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,142 @@ +package awsutil_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/stretchr/testify/assert" +) + +type Struct struct { + A []Struct + z []Struct + B *Struct + D *Struct + C string + E map[string]string +} + +var data = Struct{ + A: []Struct{{C: "value1"}, {C: "value2"}, {C: "value3"}}, + z: []Struct{{C: "value1"}, {C: "value2"}, {C: "value3"}}, + B: &Struct{B: &Struct{C: "terminal"}, D: &Struct{C: "terminal2"}}, + C: "initial", +} +var data2 = Struct{A: []Struct{ + {A: []Struct{{C: "1"}, {C: "1"}, {C: "1"}, {C: "1"}, {C: "1"}}}, + {A: []Struct{{C: "2"}, {C: "2"}, {C: "2"}, {C: "2"}, {C: "2"}}}, +}} + +func TestValueAtPathSuccess(t *testing.T) { + var testCases = []struct { + expect []interface{} + data interface{} + path string + }{ + {[]interface{}{"initial"}, data, "C"}, + {[]interface{}{"value1"}, data, "A[0].C"}, + {[]interface{}{"value2"}, data, "A[1].C"}, + {[]interface{}{"value3"}, data, "A[2].C"}, + {[]interface{}{"value3"}, data, "a[2].c"}, + {[]interface{}{"value3"}, data, "A[-1].C"}, + {[]interface{}{"value1", "value2", "value3"}, data, "A[].C"}, + {[]interface{}{"terminal"}, data, "B . B . C"}, + {[]interface{}{"initial"}, data, "A.D.X || C"}, + {[]interface{}{"initial"}, data, "A[0].B || C"}, + {[]interface{}{ + Struct{A: []Struct{{C: "1"}, {C: "1"}, {C: "1"}, {C: "1"}, {C: "1"}}}, + Struct{A: []Struct{{C: "2"}, {C: "2"}, {C: "2"}, {C: "2"}, {C: "2"}}}, + }, data2, "A"}, + } + for i, c := range testCases { + v, err := awsutil.ValuesAtPath(c.data, c.path) + assert.NoError(t, err, "case %d, expected no error, %s", i, c.path) + assert.Equal(t, c.expect, v, "case %d, %s", i, c.path) + } +} + +func TestValueAtPathFailure(t *testing.T) { + var testCases = []struct { + expect []interface{} + errContains string + data interface{} + path string + }{ + {nil, "", data, "C.x"}, + {nil, "SyntaxError: Invalid token: tDot", data, ".x"}, + {nil, "", data, "X.Y.Z"}, + {nil, "", data, "A[100].C"}, + {nil, "", data, "A[3].C"}, + {nil, "", data, "B.B.C.Z"}, + {nil, "", data, "z[-1].C"}, + {nil, "", nil, "A.B.C"}, + {[]interface{}{}, "", Struct{}, "A"}, + {nil, "", data, "A[0].B.C"}, + {nil, "", data, "D"}, + } + + for i, c := range testCases { + v, err := awsutil.ValuesAtPath(c.data, c.path) + if c.errContains != "" { + assert.Contains(t, err.Error(), c.errContains, "case %d, expected error, %s", i, c.path) + continue + } else { + assert.NoError(t, err, "case %d, expected no error, %s", i, c.path) + } + assert.Equal(t, c.expect, v, "case %d, %s", i, c.path) + } +} + +func TestSetValueAtPathSuccess(t *testing.T) { + var s Struct + awsutil.SetValueAtPath(&s, "C", "test1") + awsutil.SetValueAtPath(&s, "B.B.C", "test2") + awsutil.SetValueAtPath(&s, "B.D.C", "test3") + assert.Equal(t, "test1", s.C) + assert.Equal(t, "test2", s.B.B.C) + assert.Equal(t, "test3", s.B.D.C) + + awsutil.SetValueAtPath(&s, "B.*.C", "test0") + assert.Equal(t, "test0", s.B.B.C) + assert.Equal(t, "test0", s.B.D.C) + + var s2 Struct + awsutil.SetValueAtPath(&s2, "b.b.c", "test0") + assert.Equal(t, "test0", s2.B.B.C) + awsutil.SetValueAtPath(&s2, "A", []Struct{{}}) + assert.Equal(t, []Struct{{}}, s2.A) + + str := "foo" + + s3 := Struct{} + awsutil.SetValueAtPath(&s3, "b.b.c", str) + assert.Equal(t, "foo", s3.B.B.C) + + s3 = Struct{B: &Struct{B: &Struct{C: str}}} + awsutil.SetValueAtPath(&s3, "b.b.c", nil) + assert.Equal(t, "", s3.B.B.C) + + s3 = Struct{} + awsutil.SetValueAtPath(&s3, "b.b.c", nil) + assert.Equal(t, "", s3.B.B.C) + + s3 = Struct{} + awsutil.SetValueAtPath(&s3, "b.b.c", &str) + assert.Equal(t, "foo", s3.B.B.C) + + var s4 struct{ Name *string } + awsutil.SetValueAtPath(&s4, "Name", str) + assert.Equal(t, str, *s4.Name) + + s4 = struct{ Name *string }{} + awsutil.SetValueAtPath(&s4, "Name", nil) + assert.Equal(t, (*string)(nil), s4.Name) + + s4 = struct{ Name *string }{Name: &str} + awsutil.SetValueAtPath(&s4, "Name", nil) + assert.Equal(t, (*string)(nil), s4.Name) + + s4 = struct{ Name *string }{} + awsutil.SetValueAtPath(&s4, "Name", &str) + assert.Equal(t, str, *s4.Name) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,103 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + prettify(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,89 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + stringValue(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/client/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/client/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/client/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/client/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,120 @@ +package client + +import ( + "fmt" + "io/ioutil" + "net/http/httputil" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides configuration to a service client instance. +type Config struct { + Config *aws.Config + Handlers request.Handlers + Endpoint, SigningRegion string +} + +// ConfigProvider provides a generic way for a service client to receive +// the ClientConfig without circular dependencies. +type ConfigProvider interface { + ClientConfig(serviceName string, cfgs ...*aws.Config) Config +} + +// A Client implements the base client request and response handling +// used by all service clients. +type Client struct { + request.Retryer + metadata.ClientInfo + + Config aws.Config + Handlers request.Handlers +} + +// New will return a pointer to a new initialized service client. +func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { + svc := &Client{ + Config: cfg, + ClientInfo: info, + Handlers: handlers, + } + + switch retryer, ok := cfg.Retryer.(request.Retryer); { + case ok: + svc.Retryer = retryer + case cfg.Retryer != nil && cfg.Logger != nil: + s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) + cfg.Logger.Log(s) + fallthrough + default: + maxRetries := aws.IntValue(cfg.MaxRetries) + if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = 3 + } + svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} + } + + svc.AddDebugHandlers() + + for _, option := range options { + option(svc) + } + + return svc +} + +// NewRequest returns a new Request pointer for the service API +// operation and parameters. +func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { + return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) +} + +// AddDebugHandlers injects debug logging handlers into the service to log request +// debug information. +func (c *Client) AddDebugHandlers() { + if !c.Config.LogLevel.AtLeast(aws.LogDebug) { + return + } + + c.Handlers.Send.PushFront(logRequest) + c.Handlers.Send.PushBack(logResponse) +} + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +func logRequest(r *request.Request) { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody) + + if logBody { + // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's + // Body as a NoOpCloser and will not be reset after read by the HTTP + // client reader. + r.Body.Seek(r.BodyStart, 0) + r.HTTPRequest.Body = ioutil.NopCloser(r.Body) + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +func logResponse(r *request.Request) { + var msg = "no response data" + if r.HTTPResponse != nil { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody) + msg = string(dumpedBody) + } else if r.Error != nil { + msg = r.Error.Error() + } + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg)) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,45 @@ +package client + +import ( + "math" + "math/rand" + "time" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// DefaultRetryer implements basic retry logic using exponential backoff for +// most services. If you want to implement custom retry logic, implement the +// request.Retryer interface or create a structure type that composes this +// struct and override the specific methods. For example, to override only +// the MaxRetries method: +// +// type retryer struct { +// service.DefaultRetryer +// } +// +// // This implementation always has 100 max retries +// func (d retryer) MaxRetries() uint { return 100 } +type DefaultRetryer struct { + NumMaxRetries int +} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API request. +func (d DefaultRetryer) MaxRetries() int { + return d.NumMaxRetries +} + +// RetryRules returns the delay duration before retrying this request again +func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { + delay := int(math.Pow(2, float64(r.RetryCount))) * (rand.Intn(30) + 30) + return time.Duration(delay) * time.Millisecond +} + +// ShouldRetry returns if the request should be retried. +func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + if r.HTTPResponse.StatusCode >= 500 { + return true + } + return r.IsErrorRetryable() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,12 @@ +package metadata + +// ClientInfo wraps immutable data from the client.Client structure. +type ClientInfo struct { + ServiceName string + APIVersion string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/config.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/config.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/config.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/config.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,311 @@ +package aws + +import ( + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +// UseServiceDefaultRetries instructs the config to use the service's own default +// number of retries. This will be the default action if Config.MaxRetries +// is nil also. +const UseServiceDefaultRetries = -1 + +// RequestRetryer is an alias for a type that implements the request.Retryer interface. +type RequestRetryer interface{} + +// A Config provides service configuration for service clients. By default, +// all clients will use the {defaults.DefaultConfig} structure. +type Config struct { + // Enables verbose error printing of all credential chain errors. + // Should be used when wanting to see all errors while attempting to retreive + // credentials. + CredentialsChainVerboseErrors *bool + + // The credentials object to use when signing requests. Defaults to + // a chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials *credentials.Credentials + + // An optional endpoint URL (hostname only or fully qualified URI) + // that overrides the default generated endpoint for a client. Set this + // to `""` to use the default generated endpoint. + // + // @note You must still provide a `Region` value when specifying an + // endpoint for a client. + Endpoint *string + + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // @see http://docs.aws.amazon.com/general/latest/gr/rande.html + // AWS Regions and Endpoints + Region *string + + // Set this to `true` to disable SSL when sending requests. Defaults + // to `false`. + DisableSSL *bool + + // The HTTP client to use when sending requests. Defaults to + // `http.DefaultClient`. + HTTPClient *http.Client + + // An integer value representing the logging level. The default log level + // is zero (LogOff), which represents no logging. To enable logging set + // to a LogLevel Value. + LogLevel *LogLevelType + + // The logger writer interface to write logging messages to. Defaults to + // standard out. + Logger Logger + + // The maximum number of times that a request will be retried for failures. + // Defaults to -1, which defers the max retry setting to the service specific + // configuration. + MaxRetries *int + + // Retryer guides how HTTP requests should be retried in case of recoverable failures. + // + // When nil or the value does not implement the request.Retryer interface, + // the request.DefaultRetryer will be used. + // + // When both Retryer and MaxRetries are non-nil, the former is used and + // the latter ignored. + // + // To set the Retryer field in a type-safe manner and with chaining, use + // the request.WithRetryer helper function: + // + // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) + // + Retryer RequestRetryer + + // Disables semantic parameter validation, which validates input for missing + // required fields and/or other semantic request input errors. + DisableParamValidation *bool + + // Disables the computation of request and response checksums, e.g., + // CRC32 checksums in Amazon DynamoDB. + DisableComputeChecksums *bool + + // Set this to `true` to force the request to use path-style addressing, + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will + // use virtual hosted bucket addressing when possible + // (`http://BUCKET.s3.amazonaws.com/KEY`). + // + // @note This configuration option is specific to the Amazon S3 service. + // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // Amazon S3: Virtual Hosting of Buckets + S3ForcePathStyle *bool + + // Set this to `true` to disable the EC2Metadata client from overriding the + // default http.Client's Timeout. This is helpful if you do not want the EC2Metadata + // client to create a new http.Client. This options is only meaningful if you're not + // already using a custom HTTP client with the SDK. Enabled by default. + // + // Must be set and provided to the session.New() in order to disable the EC2Metadata + // overriding the timeout for default credentials chain. + // + // Example: + // sess := session.New(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true)) + // svc := s3.New(sess) + // + EC2MetadataDisableTimeoutOverride *bool + + SleepDelay func(time.Duration) +} + +// NewConfig returns a new Config pointer that can be chained with builder methods to +// set multiple configuration values inline without using pointers. +// +// svc := s3.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10)) +// +func NewConfig() *Config { + return &Config{} +} + +// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning +// a Config pointer. +func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { + c.CredentialsChainVerboseErrors = &verboseErrs + return c +} + +// WithCredentials sets a config Credentials value returning a Config pointer +// for chaining. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithEndpoint sets a config Endpoint value returning a Config pointer for +// chaining. +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = &endpoint + return c +} + +// WithRegion sets a config Region value returning a Config pointer for +// chaining. +func (c *Config) WithRegion(region string) *Config { + c.Region = ®ion + return c +} + +// WithDisableSSL sets a config DisableSSL value returning a Config pointer +// for chaining. +func (c *Config) WithDisableSSL(disable bool) *Config { + c.DisableSSL = &disable + return c +} + +// WithHTTPClient sets a config HTTPClient value returning a Config pointer +// for chaining. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithMaxRetries sets a config MaxRetries value returning a Config pointer +// for chaining. +func (c *Config) WithMaxRetries(max int) *Config { + c.MaxRetries = &max + return c +} + +// WithDisableParamValidation sets a config DisableParamValidation value +// returning a Config pointer for chaining. +func (c *Config) WithDisableParamValidation(disable bool) *Config { + c.DisableParamValidation = &disable + return c +} + +// WithDisableComputeChecksums sets a config DisableComputeChecksums value +// returning a Config pointer for chaining. +func (c *Config) WithDisableComputeChecksums(disable bool) *Config { + c.DisableComputeChecksums = &disable + return c +} + +// WithLogLevel sets a config LogLevel value returning a Config pointer for +// chaining. +func (c *Config) WithLogLevel(level LogLevelType) *Config { + c.LogLevel = &level + return c +} + +// WithLogger sets a config Logger value returning a Config pointer for +// chaining. +func (c *Config) WithLogger(logger Logger) *Config { + c.Logger = logger + return c +} + +// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config +// pointer for chaining. +func (c *Config) WithS3ForcePathStyle(force bool) *Config { + c.S3ForcePathStyle = &force + return c +} + +// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { + c.EC2MetadataDisableTimeoutOverride = &enable + return c +} + +// WithSleepDelay overrides the function used to sleep while waiting for the +// next retry. Defaults to time.Sleep. +func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { + c.SleepDelay = fn + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.CredentialsChainVerboseErrors != nil { + dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors + } + + if other.Credentials != nil { + dst.Credentials = other.Credentials + } + + if other.Endpoint != nil { + dst.Endpoint = other.Endpoint + } + + if other.Region != nil { + dst.Region = other.Region + } + + if other.DisableSSL != nil { + dst.DisableSSL = other.DisableSSL + } + + if other.HTTPClient != nil { + dst.HTTPClient = other.HTTPClient + } + + if other.LogLevel != nil { + dst.LogLevel = other.LogLevel + } + + if other.Logger != nil { + dst.Logger = other.Logger + } + + if other.MaxRetries != nil { + dst.MaxRetries = other.MaxRetries + } + + if other.Retryer != nil { + dst.Retryer = other.Retryer + } + + if other.DisableParamValidation != nil { + dst.DisableParamValidation = other.DisableParamValidation + } + + if other.DisableComputeChecksums != nil { + dst.DisableComputeChecksums = other.DisableComputeChecksums + } + + if other.S3ForcePathStyle != nil { + dst.S3ForcePathStyle = other.S3ForcePathStyle + } + + if other.EC2MetadataDisableTimeoutOverride != nil { + dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride + } + + if other.SleepDelay != nil { + dst.SleepDelay = other.SleepDelay + } +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c *Config) Copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.MergeIn(c) + + for _, cfg := range cfgs { + dst.MergeIn(cfg) + } + + return dst +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/config_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/config_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/config_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/config_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,86 @@ +package aws + +import ( + "net/http" + "reflect" + "testing" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +var testCredentials = credentials.NewStaticCredentials("AKID", "SECRET", "SESSION") + +var copyTestConfig = Config{ + Credentials: testCredentials, + Endpoint: String("CopyTestEndpoint"), + Region: String("COPY_TEST_AWS_REGION"), + DisableSSL: Bool(true), + HTTPClient: http.DefaultClient, + LogLevel: LogLevel(LogDebug), + Logger: NewDefaultLogger(), + MaxRetries: Int(3), + DisableParamValidation: Bool(true), + DisableComputeChecksums: Bool(true), + S3ForcePathStyle: Bool(true), +} + +func TestCopy(t *testing.T) { + want := copyTestConfig + got := copyTestConfig.Copy() + if !reflect.DeepEqual(*got, want) { + t.Errorf("Copy() = %+v", got) + t.Errorf(" want %+v", want) + } + + got.Region = String("other") + if got.Region == want.Region { + t.Errorf("Expect setting copy values not not reflect in source") + } +} + +func TestCopyReturnsNewInstance(t *testing.T) { + want := copyTestConfig + got := copyTestConfig.Copy() + if got == &want { + t.Errorf("Copy() = %p; want different instance as source %p", got, &want) + } +} + +var mergeTestZeroValueConfig = Config{} + +var mergeTestConfig = Config{ + Credentials: testCredentials, + Endpoint: String("MergeTestEndpoint"), + Region: String("MERGE_TEST_AWS_REGION"), + DisableSSL: Bool(true), + HTTPClient: http.DefaultClient, + LogLevel: LogLevel(LogDebug), + Logger: NewDefaultLogger(), + MaxRetries: Int(10), + DisableParamValidation: Bool(true), + DisableComputeChecksums: Bool(true), + S3ForcePathStyle: Bool(true), +} + +var mergeTests = []struct { + cfg *Config + in *Config + want *Config +}{ + {&Config{}, nil, &Config{}}, + {&Config{}, &mergeTestZeroValueConfig, &Config{}}, + {&Config{}, &mergeTestConfig, &mergeTestConfig}, +} + +func TestMerge(t *testing.T) { + for i, tt := range mergeTests { + got := tt.cfg.Copy() + got.MergeIn(tt.in) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Config %d %+v", i, tt.cfg) + t.Errorf(" Merge(%+v)", tt.in) + t.Errorf(" got %+v", got) + t.Errorf(" want %+v", tt.want) + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/convert_types.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/convert_types.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/convert_types.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/convert_types.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,357 @@ +package aws + +import "time" + +// String returns a pointer to of the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to of the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to of the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to of the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to of the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to of the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/convert_types_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/convert_types_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/convert_types_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/convert_types_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,437 @@ +package aws + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +var testCasesStringSlice = [][]string{ + {"a", "b", "c", "d", "e"}, + {"a", "b", "", "", "e"}, +} + +func TestStringSlice(t *testing.T) { + for idx, in := range testCasesStringSlice { + if in == nil { + continue + } + out := StringSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := StringValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesStringValueSlice = [][]*string{ + {String("a"), String("b"), nil, String("c")}, +} + +func TestStringValueSlice(t *testing.T) { + for idx, in := range testCasesStringValueSlice { + if in == nil { + continue + } + out := StringValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := StringSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesStringMap = []map[string]string{ + {"a": "1", "b": "2", "c": "3"}, +} + +func TestStringMap(t *testing.T) { + for idx, in := range testCasesStringMap { + if in == nil { + continue + } + out := StringMap(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := StringValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesBoolSlice = [][]bool{ + {true, true, false, false}, +} + +func TestBoolSlice(t *testing.T) { + for idx, in := range testCasesBoolSlice { + if in == nil { + continue + } + out := BoolSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := BoolValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesBoolValueSlice = [][]*bool{} + +func TestBoolValueSlice(t *testing.T) { + for idx, in := range testCasesBoolValueSlice { + if in == nil { + continue + } + out := BoolValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := BoolSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesBoolMap = []map[string]bool{ + {"a": true, "b": false, "c": true}, +} + +func TestBoolMap(t *testing.T) { + for idx, in := range testCasesBoolMap { + if in == nil { + continue + } + out := BoolMap(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := BoolValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesIntSlice = [][]int{ + {1, 2, 3, 4}, +} + +func TestIntSlice(t *testing.T) { + for idx, in := range testCasesIntSlice { + if in == nil { + continue + } + out := IntSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := IntValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesIntValueSlice = [][]*int{} + +func TestIntValueSlice(t *testing.T) { + for idx, in := range testCasesIntValueSlice { + if in == nil { + continue + } + out := IntValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := IntSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesIntMap = []map[string]int{ + {"a": 3, "b": 2, "c": 1}, +} + +func TestIntMap(t *testing.T) { + for idx, in := range testCasesIntMap { + if in == nil { + continue + } + out := IntMap(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := IntValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesInt64Slice = [][]int64{ + {1, 2, 3, 4}, +} + +func TestInt64Slice(t *testing.T) { + for idx, in := range testCasesInt64Slice { + if in == nil { + continue + } + out := Int64Slice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := Int64ValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesInt64ValueSlice = [][]*int64{} + +func TestInt64ValueSlice(t *testing.T) { + for idx, in := range testCasesInt64ValueSlice { + if in == nil { + continue + } + out := Int64ValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := Int64Slice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesInt64Map = []map[string]int64{ + {"a": 3, "b": 2, "c": 1}, +} + +func TestInt64Map(t *testing.T) { + for idx, in := range testCasesInt64Map { + if in == nil { + continue + } + out := Int64Map(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := Int64ValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesFloat64Slice = [][]float64{ + {1, 2, 3, 4}, +} + +func TestFloat64Slice(t *testing.T) { + for idx, in := range testCasesFloat64Slice { + if in == nil { + continue + } + out := Float64Slice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := Float64ValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesFloat64ValueSlice = [][]*float64{} + +func TestFloat64ValueSlice(t *testing.T) { + for idx, in := range testCasesFloat64ValueSlice { + if in == nil { + continue + } + out := Float64ValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := Float64Slice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesFloat64Map = []map[string]float64{ + {"a": 3, "b": 2, "c": 1}, +} + +func TestFloat64Map(t *testing.T) { + for idx, in := range testCasesFloat64Map { + if in == nil { + continue + } + out := Float64Map(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := Float64ValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesTimeSlice = [][]time.Time{ + {time.Now(), time.Now().AddDate(100, 0, 0)}, +} + +func TestTimeSlice(t *testing.T) { + for idx, in := range testCasesTimeSlice { + if in == nil { + continue + } + out := TimeSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := TimeValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesTimeValueSlice = [][]*time.Time{} + +func TestTimeValueSlice(t *testing.T) { + for idx, in := range testCasesTimeValueSlice { + if in == nil { + continue + } + out := TimeValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := TimeSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesTimeMap = []map[string]time.Time{ + {"a": time.Now().AddDate(-100, 0, 0), "b": time.Now()}, +} + +func TestTimeMap(t *testing.T) { + for idx, in := range testCasesTimeMap { + if in == nil { + continue + } + out := TimeMap(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := TimeValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,139 @@ +package corehandlers + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "runtime" + "strconv" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Interface for matching types which also have a Len method. +type lener interface { + Len() int +} + +// BuildContentLengthHandler builds the content length of a request based on the body, +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable +// to determine request body length and no "Content-Length" was specified it will panic. +var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ := strconv.ParseInt(slength, 10, 64) + r.HTTPRequest.ContentLength = length + return + } + + var length int64 + switch body := r.Body.(type) { + case nil: + length = 0 + case lener: + length = int64(body.Len()) + case io.Seeker: + r.BodyStart, _ = body.Seek(0, 1) + end, _ := body.Seek(0, 2) + body.Seek(r.BodyStart, 0) // make sure to seek back to original location + length = end - r.BodyStart + default: + panic("Cannot get length of body, must provide `ContentLength`") + } + + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) +}} + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +var reStatusCode = regexp.MustCompile(`^(\d{3})`) + +// SendHandler is a request handler to send service request using HTTP client. +var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) { + var err error + r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest) + if err != nil { + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other url redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) + r.HTTPResponse = &http.Response{ + StatusCode: int(code), + Status: http.StatusText(int(code)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + return + } + } + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all other request errors. + r.Error = awserr.New("RequestError", "send request failed", err) + r.Retryable = aws.Bool(true) // network errors are retryable + } +}} + +// ValidateResponseHandler is a request handler to validate service response. +var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { + // this may be replaced by an UnmarshalError handler + r.Error = awserr.New("UnknownError", "unknown error", nil) + } +}} + +// AfterRetryHandler performs final checks to determine if the request should +// be retried and how long to delay. +var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } + + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) + r.Config.SleepDelay(r.RetryDelay) + + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } + + r.RetryCount++ + r.Error = nil + } +}} + +// ValidateEndpointHandler is a request handler to validate a request had the +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or +// region is not valid. +var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { + if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { + r.Error = aws.ErrMissingRegion + } else if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +}} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,113 @@ +package corehandlers_test + +import ( + "fmt" + "net/http" + "os" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" +) + +func TestValidateEndpointHandler(t *testing.T) { + os.Clearenv() + + svc := awstesting.NewClient(aws.NewConfig().WithRegion("us-west-2")) + svc.Handlers.Clear() + svc.Handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + + req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil) + err := req.Build() + + assert.NoError(t, err) +} + +func TestValidateEndpointHandlerErrorRegion(t *testing.T) { + os.Clearenv() + + svc := awstesting.NewClient() + svc.Handlers.Clear() + svc.Handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + + req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil) + err := req.Build() + + assert.Error(t, err) + assert.Equal(t, aws.ErrMissingRegion, err) +} + +type mockCredsProvider struct { + expired bool + retrieveCalled bool +} + +func (m *mockCredsProvider) Retrieve() (credentials.Value, error) { + m.retrieveCalled = true + return credentials.Value{ProviderName: "mockCredsProvider"}, nil +} + +func (m *mockCredsProvider) IsExpired() bool { + return m.expired +} + +func TestAfterRetryRefreshCreds(t *testing.T) { + os.Clearenv() + credProvider := &mockCredsProvider{} + + svc := awstesting.NewClient(&aws.Config{ + Credentials: credentials.NewCredentials(credProvider), + MaxRetries: aws.Int(1), + }) + + svc.Handlers.Clear() + svc.Handlers.ValidateResponse.PushBack(func(r *request.Request) { + r.Error = awserr.New("UnknownError", "", nil) + r.HTTPResponse = &http.Response{StatusCode: 400} + }) + svc.Handlers.UnmarshalError.PushBack(func(r *request.Request) { + r.Error = awserr.New("ExpiredTokenException", "", nil) + }) + svc.Handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + + assert.True(t, svc.Config.Credentials.IsExpired(), "Expect to start out expired") + assert.False(t, credProvider.retrieveCalled) + + req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil) + req.Send() + + assert.True(t, svc.Config.Credentials.IsExpired()) + assert.False(t, credProvider.retrieveCalled) + + _, err := svc.Config.Credentials.Get() + assert.NoError(t, err) + assert.True(t, credProvider.retrieveCalled) +} + +type testSendHandlerTransport struct{} + +func (t *testSendHandlerTransport) RoundTrip(r *http.Request) (*http.Response, error) { + return nil, fmt.Errorf("mock error") +} + +func TestSendHandlerError(t *testing.T) { + svc := awstesting.NewClient(&aws.Config{ + HTTPClient: &http.Client{ + Transport: &testSendHandlerTransport{}, + }, + }) + svc.Handlers.Clear() + svc.Handlers.Send.PushBackNamed(corehandlers.SendHandler) + r := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil) + + r.Send() + + assert.Error(t, r.Error) + assert.NotNil(t, r.HTTPResponse) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,144 @@ +package corehandlers + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ValidateParametersHandler is a request handler to validate the input parameters. +// Validating parameters only has meaning if done prior to the request being sent. +var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { + if r.ParamsFilled() { + v := validator{errors: []string{}} + v.validateAny(reflect.ValueOf(r.Params), "") + + if count := len(v.errors); count > 0 { + format := "%d validation errors:\n- %s" + msg := fmt.Sprintf(format, count, strings.Join(v.errors, "\n- ")) + r.Error = awserr.New("InvalidParameter", msg, nil) + } + } +}} + +// A validator validates values. Collects validations errors which occurs. +type validator struct { + errors []string +} + +// validateAny will validate any struct, slice or map type. All validations +// are also performed recursively for nested types. +func (v *validator) validateAny(value reflect.Value, path string) { + value = reflect.Indirect(value) + if !value.IsValid() { + return + } + + switch value.Kind() { + case reflect.Struct: + v.validateStruct(value, path) + case reflect.Slice: + for i := 0; i < value.Len(); i++ { + v.validateAny(value.Index(i), path+fmt.Sprintf("[%d]", i)) + } + case reflect.Map: + for _, n := range value.MapKeys() { + v.validateAny(value.MapIndex(n), path+fmt.Sprintf("[%q]", n.String())) + } + } +} + +// validateStruct will validate the struct value's fields. If the structure has +// nested types those types will be validated also. +func (v *validator) validateStruct(value reflect.Value, path string) { + prefix := "." + if path == "" { + prefix = "" + } + + for i := 0; i < value.Type().NumField(); i++ { + f := value.Type().Field(i) + if strings.ToLower(f.Name[0:1]) == f.Name[0:1] { + continue + } + fvalue := value.FieldByName(f.Name) + + err := validateField(f, fvalue, validateFieldRequired, validateFieldMin) + if err != nil { + v.errors = append(v.errors, fmt.Sprintf("%s: %s", err.Error(), path+prefix+f.Name)) + continue + } + + v.validateAny(fvalue, path+prefix+f.Name) + } +} + +type validatorFunc func(f reflect.StructField, fvalue reflect.Value) error + +func validateField(f reflect.StructField, fvalue reflect.Value, funcs ...validatorFunc) error { + for _, fn := range funcs { + if err := fn(f, fvalue); err != nil { + return err + } + } + return nil +} + +// Validates that a field has a valid value provided for required fields. +func validateFieldRequired(f reflect.StructField, fvalue reflect.Value) error { + if f.Tag.Get("required") == "" { + return nil + } + + switch fvalue.Kind() { + case reflect.Ptr, reflect.Slice, reflect.Map: + if fvalue.IsNil() { + return fmt.Errorf("missing required parameter") + } + default: + if !fvalue.IsValid() { + return fmt.Errorf("missing required parameter") + } + } + return nil +} + +// Validates that if a value is provided for a field, that value must be at +// least a minimum length. +func validateFieldMin(f reflect.StructField, fvalue reflect.Value) error { + minStr := f.Tag.Get("min") + if minStr == "" { + return nil + } + min, _ := strconv.ParseInt(minStr, 10, 64) + + kind := fvalue.Kind() + if kind == reflect.Ptr { + if fvalue.IsNil() { + return nil + } + fvalue = fvalue.Elem() + } + + switch fvalue.Kind() { + case reflect.String: + if int64(fvalue.Len()) < min { + return fmt.Errorf("field too short, minimum length %d", min) + } + case reflect.Slice, reflect.Map: + if fvalue.IsNil() { + return nil + } + if int64(fvalue.Len()) < min { + return fmt.Errorf("field too short, minimum length %d", min) + } + + // TODO min can also apply to number minimum value. + + } + return nil +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,129 @@ +package corehandlers_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/stretchr/testify/require" +) + +var testSvc = func() *client.Client { + s := &client.Client{ + Config: aws.Config{}, + ClientInfo: metadata.ClientInfo{ + ServiceName: "mock-service", + APIVersion: "2015-01-01", + }, + } + return s +}() + +type StructShape struct { + RequiredList []*ConditionalStructShape `required:"true"` + RequiredMap map[string]*ConditionalStructShape `required:"true"` + RequiredBool *bool `required:"true"` + OptionalStruct *ConditionalStructShape + + hiddenParameter *string + _ struct{} +} + +type ConditionalStructShape struct { + Name *string `required:"true"` + _ struct{} +} + +func TestNoErrors(t *testing.T) { + input := &StructShape{ + RequiredList: []*ConditionalStructShape{}, + RequiredMap: map[string]*ConditionalStructShape{ + "key1": {Name: aws.String("Name")}, + "key2": {Name: aws.String("Name")}, + }, + RequiredBool: aws.Bool(true), + OptionalStruct: &ConditionalStructShape{Name: aws.String("Name")}, + } + + req := testSvc.NewRequest(&request.Operation{}, input, nil) + corehandlers.ValidateParametersHandler.Fn(req) + require.NoError(t, req.Error) +} + +func TestMissingRequiredParameters(t *testing.T) { + input := &StructShape{} + req := testSvc.NewRequest(&request.Operation{}, input, nil) + corehandlers.ValidateParametersHandler.Fn(req) + + require.Error(t, req.Error) + assert.Equal(t, "InvalidParameter", req.Error.(awserr.Error).Code()) + assert.Equal(t, "3 validation errors:\n- missing required parameter: RequiredList\n- missing required parameter: RequiredMap\n- missing required parameter: RequiredBool", req.Error.(awserr.Error).Message()) +} + +func TestNestedMissingRequiredParameters(t *testing.T) { + input := &StructShape{ + RequiredList: []*ConditionalStructShape{{}}, + RequiredMap: map[string]*ConditionalStructShape{ + "key1": {Name: aws.String("Name")}, + "key2": {}, + }, + RequiredBool: aws.Bool(true), + OptionalStruct: &ConditionalStructShape{}, + } + + req := testSvc.NewRequest(&request.Operation{}, input, nil) + corehandlers.ValidateParametersHandler.Fn(req) + + require.Error(t, req.Error) + assert.Equal(t, "InvalidParameter", req.Error.(awserr.Error).Code()) + assert.Equal(t, "3 validation errors:\n- missing required parameter: RequiredList[0].Name\n- missing required parameter: RequiredMap[\"key2\"].Name\n- missing required parameter: OptionalStruct.Name", req.Error.(awserr.Error).Message()) +} + +type testInput struct { + StringField string `min:"5"` + PtrStrField *string `min:"2"` + ListField []string `min:"3"` + MapField map[string]string `min:"4"` +} + +var testsFieldMin = []struct { + err awserr.Error + in testInput +}{ + { + err: awserr.New("InvalidParameter", "1 validation errors:\n- field too short, minimum length 5: StringField", nil), + in: testInput{StringField: "abcd"}, + }, + { + err: awserr.New("InvalidParameter", "2 validation errors:\n- field too short, minimum length 5: StringField\n- field too short, minimum length 3: ListField", nil), + in: testInput{StringField: "abcd", ListField: []string{"a", "b"}}, + }, + { + err: awserr.New("InvalidParameter", "3 validation errors:\n- field too short, minimum length 5: StringField\n- field too short, minimum length 3: ListField\n- field too short, minimum length 4: MapField", nil), + in: testInput{StringField: "abcd", ListField: []string{"a", "b"}, MapField: map[string]string{"a": "a", "b": "b"}}, + }, + { + err: awserr.New("InvalidParameter", "1 validation errors:\n- field too short, minimum length 2: PtrStrField", nil), + in: testInput{StringField: "abcde", PtrStrField: aws.String("v")}, + }, + { + err: nil, + in: testInput{StringField: "abcde", PtrStrField: aws.String("value"), + ListField: []string{"a", "b", "c"}, MapField: map[string]string{"a": "a", "b": "b", "c": "c", "d": "d"}}, + }, +} + +func TestValidateFieldMinParameter(t *testing.T) { + for i, c := range testsFieldMin { + req := testSvc.NewRequest(&request.Operation{}, &c.in, nil) + corehandlers.ValidateParametersHandler.Fn(req) + + require.Equal(t, c.err, req.Error, "%d case failed", i) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,100 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var ( + // ErrNoValidProvidersFoundInChain Is returned when there are no valid + // providers in the ChainProvider. + // + // This has been deprecated. For verbose error messaging set + // aws.Config.CredentialsChainVerboseErrors to true + // + // @readonly + ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", + `no valid providers in chain. Deprecated. + For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, + nil) +) + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. +// In this example EnvProvider will first check if any credentials are available +// vai the environment variables. If there are none ChainProvider will check +// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider +// does not return any credentials ChainProvider will return the error +// ErrNoValidProvidersFoundInChain +// +// creds := NewChainCredentials( +// []Provider{ +// &EnvProvider{}, +// &EC2RoleProvider{ +// Client: ec2metadata.New(sess), +// }, +// }) +// +// // Usage of ChainCredentials with aws.Config +// svc := ec2.New(&aws.Config{Credentials: creds}) +// +type ChainProvider struct { + Providers []Provider + curr Provider + VerboseErrors bool +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *ChainProvider) Retrieve() (Value, error) { + var errs []error + for _, p := range c.Providers { + creds, err := p.Retrieve() + if err == nil { + c.curr = p + return creds, nil + } + errs = append(errs, err) + } + c.curr = nil + + var err error + err = ErrNoValidProvidersFoundInChain + if c.VerboseErrors { + err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) + } + return Value{}, err +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *ChainProvider) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,154 @@ +package credentials + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/stretchr/testify/assert" +) + +type secondStubProvider struct { + creds Value + expired bool + err error +} + +func (s *secondStubProvider) Retrieve() (Value, error) { + s.expired = false + s.creds.ProviderName = "secondStubProvider" + return s.creds, s.err +} +func (s *secondStubProvider) IsExpired() bool { + return s.expired +} + +func TestChainProviderWithNames(t *testing.T) { + p := &ChainProvider{ + Providers: []Provider{ + &stubProvider{err: awserr.New("FirstError", "first provider error", nil)}, + &stubProvider{err: awserr.New("SecondError", "second provider error", nil)}, + &secondStubProvider{ + creds: Value{ + AccessKeyID: "AKIF", + SecretAccessKey: "NOSECRET", + SessionToken: "", + }, + }, + &stubProvider{ + creds: Value{ + AccessKeyID: "AKID", + SecretAccessKey: "SECRET", + SessionToken: "", + }, + }, + }, + } + + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + assert.Equal(t, "secondStubProvider", creds.ProviderName, "Expect provider name to match") + + // Also check credentials + assert.Equal(t, "AKIF", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "NOSECRET", creds.SecretAccessKey, "Expect secret access key to match") + assert.Empty(t, creds.SessionToken, "Expect session token to be empty") + +} + +func TestChainProviderGet(t *testing.T) { + p := &ChainProvider{ + Providers: []Provider{ + &stubProvider{err: awserr.New("FirstError", "first provider error", nil)}, + &stubProvider{err: awserr.New("SecondError", "second provider error", nil)}, + &stubProvider{ + creds: Value{ + AccessKeyID: "AKID", + SecretAccessKey: "SECRET", + SessionToken: "", + }, + }, + }, + } + + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match") + assert.Empty(t, creds.SessionToken, "Expect session token to be empty") +} + +func TestChainProviderIsExpired(t *testing.T) { + stubProvider := &stubProvider{expired: true} + p := &ChainProvider{ + Providers: []Provider{ + stubProvider, + }, + } + + assert.True(t, p.IsExpired(), "Expect expired to be true before any Retrieve") + _, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + assert.False(t, p.IsExpired(), "Expect not expired after retrieve") + + stubProvider.expired = true + assert.True(t, p.IsExpired(), "Expect return of expired provider") + + _, err = p.Retrieve() + assert.False(t, p.IsExpired(), "Expect not expired after retrieve") +} + +func TestChainProviderWithNoProvider(t *testing.T) { + p := &ChainProvider{ + Providers: []Provider{}, + } + + assert.True(t, p.IsExpired(), "Expect expired with no providers") + _, err := p.Retrieve() + assert.Equal(t, + ErrNoValidProvidersFoundInChain, + err, + "Expect no providers error returned") +} + +func TestChainProviderWithNoValidProvider(t *testing.T) { + errs := []error{ + awserr.New("FirstError", "first provider error", nil), + awserr.New("SecondError", "second provider error", nil), + } + p := &ChainProvider{ + Providers: []Provider{ + &stubProvider{err: errs[0]}, + &stubProvider{err: errs[1]}, + }, + } + + assert.True(t, p.IsExpired(), "Expect expired with no providers") + _, err := p.Retrieve() + + assert.Equal(t, + ErrNoValidProvidersFoundInChain, + err, + "Expect no providers error returned") +} + +func TestChainProviderWithNoValidProviderWithVerboseEnabled(t *testing.T) { + errs := []error{ + awserr.New("FirstError", "first provider error", nil), + awserr.New("SecondError", "second provider error", nil), + } + p := &ChainProvider{ + VerboseErrors: true, + Providers: []Provider{ + &stubProvider{err: errs[0]}, + &stubProvider{err: errs[1]}, + }, + } + + assert.True(t, p.IsExpired(), "Expect expired with no providers") + _, err := p.Retrieve() + + assert.Equal(t, + awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs), + err, + "Expect no providers error returned") +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,223 @@ +// Package credentials provides credential retrieval and management +// +// The Credentials is the primary method of getting access to and managing +// credentials Values. Using dependency injection retrieval of the credential +// values is handled by a object which satisfies the Provider interface. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials Value have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := NewEnvCredentials() +// +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := NewCredentials(&EC2RoleProvider{}) +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials + +import ( + "sync" + "time" +) + +// AnonymousCredentials is an empty Credential object that can be used as +// dummy placeholder credentials for requests that do not need signed. +// +// This Credentials can be used to configure a service to not sign requests +// when making service API calls. For example, when accessing public +// s3 buckets. +// +// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials}) +// // Access public S3 buckets. +// +// @readonly +var AnonymousCredentials = NewStaticCredentials("", "", "") + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Provider used to get credentials + ProviderName string +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + // Refresh returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type EC2RoleProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. Available for testing + // to be able to mock out the current time. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + e.expiration = expiration + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + if e.CurrentTime == nil { + e.CurrentTime = time.Now + } + return e.expiration.Before(e.CurrentTime()) +} + +// A Credentials provides synchronous safe retrieval of AWS credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + creds Value + forceRefresh bool + m sync.Mutex + + provider Provider +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + c.m.Lock() + defer c.m.Unlock() + + if c.isExpired() { + creds, err := c.provider.Retrieve() + if err != nil { + return Value{}, err + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.m.Lock() + defer c.m.Unlock() + + c.forceRefresh = true +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be retrieved. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.m.Lock() + defer c.m.Unlock() + + return c.isExpired() +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired() bool { + return c.forceRefresh || c.provider.IsExpired() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,73 @@ +package credentials + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/stretchr/testify/assert" +) + +type stubProvider struct { + creds Value + expired bool + err error +} + +func (s *stubProvider) Retrieve() (Value, error) { + s.expired = false + s.creds.ProviderName = "stubProvider" + return s.creds, s.err +} +func (s *stubProvider) IsExpired() bool { + return s.expired +} + +func TestCredentialsGet(t *testing.T) { + c := NewCredentials(&stubProvider{ + creds: Value{ + AccessKeyID: "AKID", + SecretAccessKey: "SECRET", + SessionToken: "", + }, + expired: true, + }) + + creds, err := c.Get() + assert.Nil(t, err, "Expected no error") + assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match") + assert.Empty(t, creds.SessionToken, "Expect session token to be empty") +} + +func TestCredentialsGetWithError(t *testing.T) { + c := NewCredentials(&stubProvider{err: awserr.New("provider error", "", nil), expired: true}) + + _, err := c.Get() + assert.Equal(t, "provider error", err.(awserr.Error).Code(), "Expected provider error") +} + +func TestCredentialsExpire(t *testing.T) { + stub := &stubProvider{} + c := NewCredentials(stub) + + stub.expired = false + assert.True(t, c.IsExpired(), "Expected to start out expired") + c.Expire() + assert.True(t, c.IsExpired(), "Expected to be expired") + + c.forceRefresh = false + assert.False(t, c.IsExpired(), "Expected not to be expired") + + stub.expired = true + assert.True(t, c.IsExpired(), "Expected to be expired") +} + +func TestCredentialsGetWithProviderName(t *testing.T) { + stub := &stubProvider{} + + c := NewCredentials(stub) + + creds, err := c.Get() + assert.Nil(t, err, "Expected no error") + assert.Equal(t, creds.ProviderName, "stubProvider", "Expected provider name to match") +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,178 @@ +package ec2rolecreds + +import ( + "bufio" + "encoding/json" + "fmt" + "path" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/ec2metadata" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// Example how to configure the EC2RoleProvider with custom http Client, Endpoint +// or ExpiryWindow +// +// p := &ec2rolecreds.EC2RoleProvider{ +// // Pass in a custom timeout to be used when requesting +// // IAM EC2 Role credentials. +// Client: ec2metadata.New(sess, aws.Config{ +// HTTPClient: &http.Client{Timeout: 10 * time.Second}, +// }), +// +// // Do not use early expiry of credentials. If a non zero value is +// // specified the credentials will be expired early +// ExpiryWindow: 0, +// } +type EC2RoleProvider struct { + credentials.Expiry + + // Required EC2Metadata client to use when connecting to EC2 metadata service. + Client *ec2metadata.EC2Metadata + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. +// The ConfigProvider is satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: ec2metadata.New(c), + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 +// metadata service. +func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: client, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { + credsList, err := requestCredList(m.Client) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + if len(credsList) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) + } + credsName := credsList[0] + + roleCreds, err := requestCred(m.Client, credsName) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + ProviderName: ProviderName, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshalling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "/iam/security-credentials" + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadata(iamSecurityCredsPath) + if err != nil { + return nil, awserr.New("EC2RoleRequestError", "failed to list EC2 Roles", err) + } + + credsList := []string{} + s := bufio.NewScanner(strings.NewReader(resp)) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, awserr.New("SerializationError", "failed to read list of EC2 Roles", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName)) + if err != nil { + return ec2RoleCredRespBody{}, + awserr.New("EC2RoleRequestError", + fmt.Sprintf("failed to get %s EC2 Role credentials", credsName), + err) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + awserr.New("SerializationError", + fmt.Sprintf("failed to decode %s EC2 Role credentials", credsName), + err) + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) + } + + return respCreds, nil +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,159 @@ +package ec2rolecreds_test + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" +) + +const credsRespTmpl = `{ + "Code": "Success", + "Type": "AWS-HMAC", + "AccessKeyId" : "accessKey", + "SecretAccessKey" : "secret", + "Token" : "token", + "Expiration" : "%s", + "LastUpdated" : "2009-11-23T0:00:00Z" +}` + +const credsFailRespTmpl = `{ + "Code": "ErrorCode", + "Message": "ErrorMsg", + "LastUpdated": "2009-11-23T0:00:00Z" +}` + +func initTestServer(expireOn string, failAssume bool) *httptest.Server { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/latest/meta-data/iam/security-credentials" { + fmt.Fprintln(w, "RoleName") + } else if r.URL.Path == "/latest/meta-data/iam/security-credentials/RoleName" { + if failAssume { + fmt.Fprintf(w, credsFailRespTmpl) + } else { + fmt.Fprintf(w, credsRespTmpl, expireOn) + } + } else { + http.Error(w, "bad request", http.StatusBadRequest) + } + })) + + return server +} + +func TestEC2RoleProvider(t *testing.T) { + server := initTestServer("2014-12-16T01:51:37Z", false) + defer server.Close() + + p := &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}), + } + + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error, %v", err) + + assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Equal(t, "token", creds.SessionToken, "Expect session token to match") +} + +func TestEC2RoleProviderFailAssume(t *testing.T) { + server := initTestServer("2014-12-16T01:51:37Z", true) + defer server.Close() + + p := &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}), + } + + creds, err := p.Retrieve() + assert.Error(t, err, "Expect error") + + e := err.(awserr.Error) + assert.Equal(t, "ErrorCode", e.Code()) + assert.Equal(t, "ErrorMsg", e.Message()) + assert.Nil(t, e.OrigErr()) + + assert.Equal(t, "", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "", creds.SecretAccessKey, "Expect secret access key to match") + assert.Equal(t, "", creds.SessionToken, "Expect session token to match") +} + +func TestEC2RoleProviderIsExpired(t *testing.T) { + server := initTestServer("2014-12-16T01:51:37Z", false) + defer server.Close() + + p := &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}), + } + p.CurrentTime = func() time.Time { + return time.Date(2014, 12, 15, 21, 26, 0, 0, time.UTC) + } + + assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve.") + + _, err := p.Retrieve() + assert.Nil(t, err, "Expect no error, %v", err) + + assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve.") + + p.CurrentTime = func() time.Time { + return time.Date(3014, 12, 15, 21, 26, 0, 0, time.UTC) + } + + assert.True(t, p.IsExpired(), "Expect creds to be expired.") +} + +func TestEC2RoleProviderExpiryWindowIsExpired(t *testing.T) { + server := initTestServer("2014-12-16T01:51:37Z", false) + defer server.Close() + + p := &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}), + ExpiryWindow: time.Hour * 1, + } + p.CurrentTime = func() time.Time { + return time.Date(2014, 12, 15, 0, 51, 37, 0, time.UTC) + } + + assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve.") + + _, err := p.Retrieve() + assert.Nil(t, err, "Expect no error, %v", err) + + assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve.") + + p.CurrentTime = func() time.Time { + return time.Date(2014, 12, 16, 0, 55, 37, 0, time.UTC) + } + + assert.True(t, p.IsExpired(), "Expect creds to be expired.") +} + +func BenchmarkEC3RoleProvider(b *testing.B) { + server := initTestServer("2014-12-16T01:51:37Z", false) + defer server.Close() + + p := &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}), + } + _, err := p.Retrieve() + if err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := p.Retrieve(); err != nil { + b.Fatal(err) + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,77 @@ +package credentials + +import ( + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// EnvProviderName provides a name of Env provider +const EnvProviderName = "EnvProvider" + +var ( + // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be + // found in the process's environment. + // + // @readonly + ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) + + // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key + // can't be found in the process's environment. + // + // @readonly + ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) +) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. Environment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY +type EnvProvider struct { + retrieved bool +} + +// NewEnvCredentials returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + if id == "" { + return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound + } + + if secret == "" { + return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + ProviderName: EnvProviderName, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvProvider) IsExpired() bool { + return !e.retrieved +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,70 @@ +package credentials + +import ( + "github.com/stretchr/testify/assert" + "os" + "testing" +) + +func TestEnvProviderRetrieve(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_ACCESS_KEY_ID", "access") + os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") + os.Setenv("AWS_SESSION_TOKEN", "token") + + e := EnvProvider{} + creds, err := e.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "access", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Equal(t, "token", creds.SessionToken, "Expect session token to match") +} + +func TestEnvProviderIsExpired(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_ACCESS_KEY_ID", "access") + os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") + os.Setenv("AWS_SESSION_TOKEN", "token") + + e := EnvProvider{} + + assert.True(t, e.IsExpired(), "Expect creds to be expired before retrieve.") + + _, err := e.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.False(t, e.IsExpired(), "Expect creds to not be expired after retrieve.") +} + +func TestEnvProviderNoAccessKeyID(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") + + e := EnvProvider{} + creds, err := e.Retrieve() + assert.Equal(t, ErrAccessKeyIDNotFound, err, "ErrAccessKeyIDNotFound expected, but was %#v error: %#v", creds, err) +} + +func TestEnvProviderNoSecretAccessKey(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_ACCESS_KEY_ID", "access") + + e := EnvProvider{} + creds, err := e.Retrieve() + assert.Equal(t, ErrSecretAccessKeyNotFound, err, "ErrSecretAccessKeyNotFound expected, but was %#v error: %#v", creds, err) +} + +func TestEnvProviderAlternateNames(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_ACCESS_KEY", "access") + os.Setenv("AWS_SECRET_KEY", "secret") + + e := EnvProvider{} + creds, err := e.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "access", creds.AccessKeyID, "Expected access key ID") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expected secret access key") + assert.Empty(t, creds.SessionToken, "Expected no token") +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,12 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,151 @@ +package credentials + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/go-ini/ini" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// SharedCredsProviderName provides a name of SharedCreds provider +const SharedCredsProviderName = "SharedCredentialsProvider" + +var ( + // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. + // + // @readonly + ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) +) + +// A SharedCredentialsProvider retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type SharedCredentialsProvider struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewSharedCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewSharedCredentials(filename, profile string) *Credentials { + return NewCredentials(&SharedCredentialsProvider{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *SharedCredentialsProvider) Retrieve() (Value, error) { + p.retrieved = false + + filename, err := p.filename() + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + creds, err := loadProfile(filename, p.profile()) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + p.retrieved = true + return creds, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *SharedCredentialsProvider) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (Value, error) { + config, err := ini.Load(filename) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + } + iniProfile, err := config.GetSection(profile) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err) + } + + id, err := iniProfile.GetKey("aws_access_key_id") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", + fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), + err) + } + + secret, err := iniProfile.GetKey("aws_secret_access_key") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", + fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), + nil) + } + + // Default to empty string if not found + token := iniProfile.Key("aws_session_token") + + return Value{ + AccessKeyID: id.String(), + SecretAccessKey: secret.String(), + SessionToken: token.String(), + ProviderName: SharedCredsProviderName, + }, nil +} + +// filename returns the filename to use to read AWS shared credentials. +// +// Will return an error if the user's home directory path cannot be found. +func (p *SharedCredentialsProvider) filename() (string, error) { + if p.Filename == "" { + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" { + return p.Filename, nil + } + + homeDir := os.Getenv("HOME") // *nix + if homeDir == "" { // Windows + homeDir = os.Getenv("USERPROFILE") + } + if homeDir == "" { + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = filepath.Join(homeDir, ".aws", "credentials") + } + + return p.Filename, nil +} + +// profile returns the AWS shared credentials profile. If empty will read +// environment variable "AWS_PROFILE". If that is not set profile will +// return "default". +func (p *SharedCredentialsProvider) profile() string { + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + } + if p.Profile == "" { + p.Profile = "default" + } + + return p.Profile +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,116 @@ +package credentials + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSharedCredentialsProvider(t *testing.T) { + os.Clearenv() + + p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""} + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Equal(t, "token", creds.SessionToken, "Expect session token to match") +} + +func TestSharedCredentialsProviderIsExpired(t *testing.T) { + os.Clearenv() + + p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""} + + assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve") + + _, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve") +} + +func TestSharedCredentialsProviderWithAWS_SHARED_CREDENTIALS_FILE(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_SHARED_CREDENTIALS_FILE", "example.ini") + p := SharedCredentialsProvider{} + creds, err := p.Retrieve() + + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Equal(t, "token", creds.SessionToken, "Expect session token to match") +} + +func TestSharedCredentialsProviderWithAWS_SHARED_CREDENTIALS_FILEAbsPath(t *testing.T) { + os.Clearenv() + wd, err := os.Getwd() + assert.NoError(t, err) + os.Setenv("AWS_SHARED_CREDENTIALS_FILE", filepath.Join(wd, "example.ini")) + p := SharedCredentialsProvider{} + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Equal(t, "token", creds.SessionToken, "Expect session token to match") +} + +func TestSharedCredentialsProviderWithAWS_PROFILE(t *testing.T) { + os.Clearenv() + os.Setenv("AWS_PROFILE", "no_token") + + p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""} + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Empty(t, creds.SessionToken, "Expect no token") +} + +func TestSharedCredentialsProviderWithoutTokenFromProfile(t *testing.T) { + os.Clearenv() + + p := SharedCredentialsProvider{Filename: "example.ini", Profile: "no_token"} + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Empty(t, creds.SessionToken, "Expect no token") +} + +func TestSharedCredentialsProviderColonInCredFile(t *testing.T) { + os.Clearenv() + + p := SharedCredentialsProvider{Filename: "example.ini", Profile: "with_colon"} + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") + assert.Empty(t, creds.SessionToken, "Expect no token") +} + +func BenchmarkSharedCredentialsProvider(b *testing.B) { + os.Clearenv() + + p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""} + _, err := p.Retrieve() + if err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := p.Retrieve() + if err != nil { + b.Fatal(err) + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,48 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// StaticProviderName provides a name of Static provider +const StaticProviderName = "StaticProvider" + +var ( + // ErrStaticCredentialsEmpty is emitted when static credentials are empty. + // + // @readonly + ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) +) + +// A StaticProvider is a set of credentials which are set pragmatically, +// and will never expire. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStaticCredentials(id, secret, token string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + }}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty + } + + s.Value.ProviderName = StaticProviderName + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For StaticProvider, the credentials never expired. +func (s *StaticProvider) IsExpired() bool { + return false +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,34 @@ +package credentials + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestStaticProviderGet(t *testing.T) { + s := StaticProvider{ + Value: Value{ + AccessKeyID: "AKID", + SecretAccessKey: "SECRET", + SessionToken: "", + }, + } + + creds, err := s.Retrieve() + assert.Nil(t, err, "Expect no error") + assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match") + assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match") + assert.Empty(t, creds.SessionToken, "Expect no session token") +} + +func TestStaticProviderIsExpired(t *testing.T) { + s := StaticProvider{ + Value: Value{ + AccessKeyID: "AKID", + SecretAccessKey: "SECRET", + SessionToken: "", + }, + } + + assert.False(t, s.IsExpired(), "Expect static credentials to never expire") +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,134 @@ +// Package stscreds are credential Providers to retrieve STS AWS credentials. +// +// STS provides multiple ways to retrieve credentials which can be used when making +// future AWS service API operation calls. +package stscreds + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sts" +) + +// ProviderName provides a name of AssumeRole provider +const ProviderName = "AssumeRoleProvider" + +// AssumeRoler represents the minimal subset of the STS client API used by this provider. +type AssumeRoler interface { + AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the credentials +// will be valid for. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. This provider must be used explicitly, +// as it is not included in the credentials chain. +type AssumeRoleProvider struct { + credentials.Expiry + + // STS client to make assume role request with. + Client AssumeRoler + + // Role to be assumed. + RoleARN string + + // Session name, if you wish to reuse the credentials elsewhere. + RoleSessionName string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes a Config provider to create the STS client. The ConfigProvider is +// satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: sts.New(c), + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes an AssumeRoler which can be satisfiede by the STS client. +func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: svc, + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + + // Apply defaults where parameters are not set. + if p.RoleSessionName == "" { + // Try to work out a role name that will hopefully end up unique. + p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) + } + if p.Duration == 0 { + // Expire as often as AWS permits. + p.Duration = DefaultDuration + } + + roleOutput, err := p.Client.AssumeRole(&sts.AssumeRoleInput{ + DurationSeconds: aws.Int64(int64(p.Duration / time.Second)), + RoleArn: aws.String(p.RoleARN), + RoleSessionName: aws.String(p.RoleSessionName), + ExternalId: p.ExternalID, + }) + + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // We will proactively generate new credentials before they expire. + p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: *roleOutput.Credentials.AccessKeyId, + SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, + SessionToken: *roleOutput.Credentials.SessionToken, + ProviderName: ProviderName, + }, nil +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,56 @@ +package stscreds + +import ( + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/stretchr/testify/assert" +) + +type stubSTS struct { +} + +func (s *stubSTS) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { + expiry := time.Now().Add(60 * time.Minute) + return &sts.AssumeRoleOutput{ + Credentials: &sts.Credentials{ + // Just reflect the role arn to the provider. + AccessKeyId: input.RoleArn, + SecretAccessKey: aws.String("assumedSecretAccessKey"), + SessionToken: aws.String("assumedSessionToken"), + Expiration: &expiry, + }, + }, nil +} + +func TestAssumeRoleProvider(t *testing.T) { + stub := &stubSTS{} + p := &AssumeRoleProvider{ + Client: stub, + RoleARN: "roleARN", + } + + creds, err := p.Retrieve() + assert.Nil(t, err, "Expect no error") + + assert.Equal(t, "roleARN", creds.AccessKeyID, "Expect access key ID to be reflected role ARN") + assert.Equal(t, "assumedSecretAccessKey", creds.SecretAccessKey, "Expect secret access key to match") + assert.Equal(t, "assumedSessionToken", creds.SessionToken, "Expect session token to match") +} + +func BenchmarkAssumeRoleProvider(b *testing.B) { + stub := &stubSTS{} + p := &AssumeRoleProvider{ + Client: stub, + RoleARN: "roleARN", + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := p.Retrieve(); err != nil { + b.Fatal(err) + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,96 @@ +// Package defaults is a collection of helpers to retrieve the SDK's default +// configuration and handlers. +// +// Generally this package shouldn't be used directly, but session.Session +// instead. This package is useful when you need to reset the defaults +// of a session or service client to the SDK defaults before setting +// additional parameters. +package defaults + +import ( + "net/http" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/endpoints" +) + +// A Defaults provides a collection of default values for SDK clients. +type Defaults struct { + Config *aws.Config + Handlers request.Handlers +} + +// Get returns the SDK's default values with Config and handlers pre-configured. +func Get() Defaults { + cfg := Config() + handlers := Handlers() + cfg.Credentials = CredChain(cfg, handlers) + + return Defaults{ + Config: cfg, + Handlers: handlers, + } +} + +// Config returns the default configuration without credentials. +// To retrieve a config with credentials also included use +// `defaults.Get().Config` instead. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the configuration of an +// existing service client or session. +func Config() *aws.Config { + return aws.NewConfig(). + WithCredentials(credentials.AnonymousCredentials). + WithRegion(os.Getenv("AWS_REGION")). + WithHTTPClient(http.DefaultClient). + WithMaxRetries(aws.UseServiceDefaultRetries). + WithLogger(aws.NewDefaultLogger()). + WithLogLevel(aws.LogOff). + WithSleepDelay(time.Sleep) +} + +// Handlers returns the default request handlers. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the request handlers of an +// existing service client or session. +func Handlers() request.Handlers { + var handlers request.Handlers + + handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + handlers.Send.PushBackNamed(corehandlers.SendHandler) + handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) + + return handlers +} + +// CredChain returns the default credential chain. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the credentials of an +// existing service client or session's Config. +func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { + endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, *cfg.Region, true) + + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.NewClient(*cfg, handlers, endpoint, signingRegion), + ExpiryWindow: 5 * time.Minute, + }, + }}) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,43 @@ +package ec2metadata + +import ( + "path" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// GetMetadata uses the path provided to request +func (c *EC2Metadata) GetMetadata(p string) (string, error) { + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "meta-data", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + + return output.Content, req.Send() +} + +// Region returns the region the instance is running in. +func (c *EC2Metadata) Region() (string, error) { + resp, err := c.GetMetadata("placement/availability-zone") + if err != nil { + return "", err + } + + // returns region without the suffix. Eg: us-west-2a becomes us-west-2 + return resp[:len(resp)-1], nil +} + +// Available returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) Available() bool { + if _, err := c.GetMetadata("instance-id"); err != nil { + return false + } + + return true +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,101 @@ +package ec2metadata_test + +import ( + "bytes" + "io/ioutil" + "net/http" + "net/http/httptest" + "path" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" +) + +func initTestServer(path string, resp string) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.RequestURI != path { + http.Error(w, "not found", http.StatusNotFound) + return + } + + w.Write([]byte(resp)) + })) +} + +func TestEndpoint(t *testing.T) { + c := ec2metadata.New(session.New()) + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "meta-data", "testpath"), + } + + req := c.NewRequest(op, nil, nil) + assert.Equal(t, "http://169.254.169.254/latest", req.ClientInfo.Endpoint) + assert.Equal(t, "http://169.254.169.254/latest/meta-data/testpath", req.HTTPRequest.URL.String()) +} + +func TestGetMetadata(t *testing.T) { + server := initTestServer( + "/latest/meta-data/some/path", + "success", // real response includes suffix + ) + defer server.Close() + c := ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}) + + resp, err := c.GetMetadata("some/path") + + assert.NoError(t, err) + assert.Equal(t, "success", resp) +} + +func TestGetRegion(t *testing.T) { + server := initTestServer( + "/latest/meta-data/placement/availability-zone", + "us-west-2a", // real response includes suffix + ) + defer server.Close() + c := ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}) + + region, err := c.Region() + + assert.NoError(t, err) + assert.Equal(t, "us-west-2", region) +} + +func TestMetadataAvailable(t *testing.T) { + server := initTestServer( + "/latest/meta-data/instance-id", + "instance-id", + ) + defer server.Close() + c := ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}) + + available := c.Available() + + assert.True(t, available) +} + +func TestMetadataNotAvailable(t *testing.T) { + c := ec2metadata.New(session.New()) + c.Handlers.Send.Clear() + c.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + r.Error = awserr.New("RequestError", "send request failed", nil) + r.Retryable = aws.Bool(true) // network errors are retryable + }) + + available := c.Available() + + assert.False(t, available) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,117 @@ +// Package ec2metadata provides the client for making API calls to the +// EC2 Metadata service. +package ec2metadata + +import ( + "io/ioutil" + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ServiceName is the name of the service. +const ServiceName = "ec2metadata" + +// A EC2Metadata is an EC2 Metadata service Client. +type EC2Metadata struct { + *client.Client +} + +// New creates a new instance of the EC2Metadata client with a session. +// This client is safe to use across multiple goroutines. +// +// +// Example: +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { + c := p.ClientConfig(ServiceName, cfgs...) + return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// NewClient returns a new EC2Metadata client. Should be used to create +// a client when not using a session. Generally using just New with a session +// is preferred. +// +// If an unmodified HTTP client is provided from the stdlib default, or no client +// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. +// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. +func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { + if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { + // If the http client is unmodified and this feature is not disabled + // set custom timeouts for EC2Metadata requests. + cfg.HTTPClient = &http.Client{ + // use a shorter timeout than default because the metadata + // service is local if it is running, and to fail faster + // if not running on an ec2 instance. + Timeout: 5 * time.Second, + } + } + + svc := &EC2Metadata{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: endpoint, + APIVersion: "latest", + }, + handlers, + ), + } + + svc.Handlers.Unmarshal.PushBack(unmarshalHandler) + svc.Handlers.UnmarshalError.PushBack(unmarshalError) + svc.Handlers.Validate.Clear() + svc.Handlers.Validate.PushBack(validateEndpointHandler) + + // Add additional options to the service config + for _, option := range opts { + option(svc.Client) + } + + return svc +} + +func httpClientZero(c *http.Client) bool { + return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) +} + +type metadataOutput struct { + Content string +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) + } + + data := r.Data.(*metadataOutput) + data.Content = string(b) +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + _, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) + } + + // TODO extract the error... +} + +func validateEndpointHandler(r *request.Request) { + if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,79 @@ +package ec2metadata_test + +import ( + "net/http" + "net/http/httptest" + "sync" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/stretchr/testify/assert" +) + +func TestClientOverrideDefaultHTTPClientTimeout(t *testing.T) { + svc := ec2metadata.New(session.New()) + + assert.NotEqual(t, http.DefaultClient, svc.Config.HTTPClient) + assert.Equal(t, 5*time.Second, svc.Config.HTTPClient.Timeout) +} + +func TestClientNotOverrideDefaultHTTPClientTimeout(t *testing.T) { + origClient := *http.DefaultClient + http.DefaultClient.Transport = &http.Transport{} + defer func() { + http.DefaultClient = &origClient + }() + + svc := ec2metadata.New(session.New()) + + assert.Equal(t, http.DefaultClient, svc.Config.HTTPClient) + + tr, ok := svc.Config.HTTPClient.Transport.(*http.Transport) + assert.True(t, ok) + assert.NotNil(t, tr) + assert.Nil(t, tr.Dial) +} + +func TestClientDisableOverrideDefaultHTTPClientTimeout(t *testing.T) { + svc := ec2metadata.New(session.New(aws.NewConfig().WithEC2MetadataDisableTimeoutOverride(true))) + + assert.Equal(t, http.DefaultClient, svc.Config.HTTPClient) +} + +func TestClientOverrideDefaultHTTPClientTimeoutRace(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("us-east-1a")) + })) + + cfg := aws.NewConfig().WithEndpoint(server.URL) + runEC2MetadataClients(t, cfg, 100) +} + +func TestClientOverrideDefaultHTTPClientTimeoutRaceWithTransport(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("us-east-1a")) + })) + + cfg := aws.NewConfig().WithEndpoint(server.URL).WithHTTPClient(&http.Client{ + Transport: http.DefaultTransport, + }) + + runEC2MetadataClients(t, cfg, 100) +} + +func runEC2MetadataClients(t *testing.T, cfg *aws.Config, atOnce int) { + var wg sync.WaitGroup + wg.Add(atOnce) + for i := 0; i < atOnce; i++ { + go func() { + svc := ec2metadata.New(session.New(), cfg) + _, err := svc.Region() + assert.NoError(t, err) + wg.Done() + }() + } + wg.Wait() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/errors.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/errors.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/errors.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/errors.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,17 @@ +package aws + +import "github.com/aws/aws-sdk-go/aws/awserr" + +var ( + // ErrMissingRegion is an error that is returned if region configuration is + // not found. + // + // @readonly + ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) + + // ErrMissingEndpoint is an error that is returned if an endpoint cannot be + // resolved for a service. + // + // @readonly + ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/logger.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/logger.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/logger.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/logger.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,98 @@ +package aws + +import ( + "log" + "os" +) + +// A LogLevelType defines the level logging should be performed at. Used to instruct +// the SDK which statements should be logged. +type LogLevelType uint + +// LogLevel returns the pointer to a LogLevel. Should be used to workaround +// not being able to take the address of a non-composite literal. +func LogLevel(l LogLevelType) *LogLevelType { + return &l +} + +// Value returns the LogLevel value or the default value LogOff if the LogLevel +// is nil. Safe to use on nil value LogLevelTypes. +func (l *LogLevelType) Value() LogLevelType { + if l != nil { + return *l + } + return LogOff +} + +// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be +// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If +// LogLevel is nill, will default to LogOff comparison. +func (l *LogLevelType) Matches(v LogLevelType) bool { + c := l.Value() + return c&v == v +} + +// AtLeast returns true if this LogLevel is at least high enough to satisfies v. +// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default +// to LogOff comparison. +func (l *LogLevelType) AtLeast(v LogLevelType) bool { + c := l.Value() + return c >= v +} + +const ( + // LogOff states that no logging should be performed by the SDK. This is the + // default state of the SDK, and should be use to disable all logging. + LogOff LogLevelType = iota * 0x1000 + + // LogDebug state that debug output should be logged by the SDK. This should + // be used to inspect request made and responses received. + LogDebug +) + +// Debug Logging Sub Levels +const ( + // LogDebugWithSigning states that the SDK should log request signing and + // presigning events. This should be used to log the signing details of + // requests for debugging. Will also enable LogDebug. + LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) + + // LogDebugWithHTTPBody states the SDK should log HTTP request and response + // HTTP bodys in addition to the headers and path. This should be used to + // see the body content of requests and responses made while using the SDK + // Will also enable LogDebug. + LogDebugWithHTTPBody + + // LogDebugWithRequestRetries states the SDK should log when service requests will + // be retried. This should be used to log when you want to log when service + // requests are being retried. Will also enable LogDebug. + LogDebugWithRequestRetries + + // LogDebugWithRequestErrors states the SDK should log when service requests fail + // to build, send, validate, or unmarshal. + LogDebugWithRequestErrors +) + +// A Logger is a minimalistic interface for the SDK to log messages to. Should +// be used to provide custom logging writers for the SDK to use. +type Logger interface { + Log(...interface{}) +} + +// NewDefaultLogger returns a Logger which will write log messages to stdout, and +// use same formatting runes as the stdlib log.Logger +func NewDefaultLogger() Logger { + return &defaultLogger{ + logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +// A defaultLogger provides a minimalistic logger satisfying the Logger interface. +type defaultLogger struct { + logger *log.Logger +} + +// Log logs the parameters to the stdlib logger. See log.Println. +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/request/handlers.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/request/handlers.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/request/handlers.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/request/handlers.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,140 @@ +package request + +import ( + "fmt" + "strings" +) + +// A Handlers provides a collection of request handlers for various +// stages of handling requests. +type Handlers struct { + Validate HandlerList + Build HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList +} + +// Copy returns of this handler's lists. +func (h *Handlers) Copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + } +} + +// Clear removes callback functions for all handlers +func (h *Handlers) Clear() { + h.Validate.Clear() + h.Build.Clear() + h.Send.Clear() + h.Sign.Clear() + h.Unmarshal.Clear() + h.UnmarshalMeta.Clear() + h.UnmarshalError.Clear() + h.ValidateResponse.Clear() + h.Retry.Clear() + h.AfterRetry.Clear() +} + +// A HandlerList manages zero or more handlers in a list. +type HandlerList struct { + list []NamedHandler +} + +// A NamedHandler is a struct that contains a name and function callback. +type NamedHandler struct { + Name string + Fn func(*Request) +} + +// copy creates a copy of the handler list. +func (l *HandlerList) copy() HandlerList { + var n HandlerList + n.list = append([]NamedHandler{}, l.list...) + return n +} + +// Clear clears the handler list. +func (l *HandlerList) Clear() { + l.list = []NamedHandler{} +} + +// Len returns the number of handlers in the list. +func (l *HandlerList) Len() int { + return len(l.list) +} + +// PushBack pushes handler f to the back of the handler list. +func (l *HandlerList) PushBack(f func(*Request)) { + l.list = append(l.list, NamedHandler{"__anonymous", f}) +} + +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...) +} + +// PushBackNamed pushes named handler f to the back of the handler list. +func (l *HandlerList) PushBackNamed(n NamedHandler) { + l.list = append(l.list, n) +} + +// PushFrontNamed pushes named handler f to the front of the handler list. +func (l *HandlerList) PushFrontNamed(n NamedHandler) { + l.list = append([]NamedHandler{n}, l.list...) +} + +// Remove removes a NamedHandler n +func (l *HandlerList) Remove(n NamedHandler) { + newlist := []NamedHandler{} + for _, m := range l.list { + if m.Name != n.Name { + newlist = append(newlist, m) + } + } + l.list = newlist +} + +// Run executes all handlers in the list with a given request object. +func (l *HandlerList) Run(r *Request) { + for _, f := range l.list { + f.Fn(r) + } +} + +// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request +// header. If the extra parameters are provided they will be added as metadata to the +// name/version pair resulting in the following format. +// "name/version (extra0; extra1; ...)" +// The user agent part will be concatenated with this current request's user agent string. +func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { + ua := fmt.Sprintf("%s/%s", name, version) + if len(extra) > 0 { + ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) + } + return func(r *Request) { + AddToUserAgent(r, ua) + } +} + +// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. +// The input string will be concatenated with the current request's user agent string. +func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { + return func(r *Request) { + AddToUserAgent(r, s) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/request/handlers_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/request/handlers_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/request/handlers_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/request/handlers_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,47 @@ +package request_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +func TestHandlerList(t *testing.T) { + s := "" + r := &request.Request{} + l := request.HandlerList{} + l.PushBack(func(r *request.Request) { + s += "a" + r.Data = s + }) + l.Run(r) + assert.Equal(t, "a", s) + assert.Equal(t, "a", r.Data) +} + +func TestMultipleHandlers(t *testing.T) { + r := &request.Request{} + l := request.HandlerList{} + l.PushBack(func(r *request.Request) { r.Data = nil }) + l.PushFront(func(r *request.Request) { r.Data = aws.Bool(true) }) + l.Run(r) + if r.Data != nil { + t.Error("Expected handler to execute") + } +} + +func TestNamedHandlers(t *testing.T) { + l := request.HandlerList{} + named := request.NamedHandler{Name: "Name", Fn: func(r *request.Request) {}} + named2 := request.NamedHandler{Name: "NotName", Fn: func(r *request.Request) {}} + l.PushBackNamed(named) + l.PushBackNamed(named) + l.PushBackNamed(named2) + l.PushBack(func(r *request.Request) {}) + assert.Equal(t, 4, l.Len()) + l.Remove(named) + assert.Equal(t, 2, l.Len()) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/request/request.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/request/request.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/request/request.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/request/request.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,294 @@ +package request + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" +) + +// A Request is the service request to be made. +type Request struct { + Config aws.Config + ClientInfo metadata.ClientInfo + Handlers Handlers + + Retryer + Time time.Time + ExpireTime time.Duration + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + + built bool +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + *Paginator +} + +// Paginator keeps track of pagination configuration for an API operation. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// New returns a new Request pointer for the service API +// operation and parameters. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, + retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + p := operation.HTTPPath + if p == "" { + p = "/" + } + + httpReq, _ := http.NewRequest(method, "", nil) + httpReq.URL, _ = url.Parse(clientInfo.Endpoint + p) + + r := &Request{ + Config: cfg, + ClientInfo: clientInfo, + Handlers: handlers.Copy(), + + Retryer: retryer, + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: nil, + Data: data, + } + r.SetBufferBody([]byte{}) + + return r +} + +// WillRetry returns if the request's can be retried. +func (r *Request) WillRetry() bool { + return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() +} + +// ParamsFilled returns if the request's parameters have been populated +// and the parameters are valid. False is returned if no parameters are +// provided or invalid. +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// SetBufferBody will set the request's body bytes that will be sent to +// the service API. +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +// SetStringBody sets the body of the request to be backed by a string. +func (r *Request) SetStringBody(s string) { + r.SetReaderBody(strings.NewReader(s)) +} + +// SetReaderBody will set the request's body reader. +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.HTTPRequest.Body = ioutil.NopCloser(reader) + r.Body = reader +} + +// Presign returns the request's signed URL. Error will be returned +// if the signing fails. +func (r *Request) Presign(expireTime time.Duration) (string, error) { + r.ExpireTime = expireTime + r.NotHoist = false + r.Sign() + if r.Error != nil { + return "", r.Error + } + return r.HTTPRequest.URL.String(), nil +} + +// PresignRequest behaves just like presign, but hoists all headers and signs them. +// Also returns the signed hash back to the user +func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { + r.ExpireTime = expireTime + r.NotHoist = true + r.Sign() + if r.Error != nil { + return "", nil, r.Error + } + return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil +} + +func debugLogReqError(r *Request, stage string, retrying bool, err error) { + if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { + return + } + + retryStr := "not retrying" + if retrying { + retryStr = "will retry" + } + + r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", + stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) +} + +// Build will build the request's object so it can be signed and sent +// to the service. Build will also validate all the request's parameters. +// Anny additional build Handlers set on this request will be run +// in the order they were set. +// +// The request will only be built once. Multiple calls to build will have +// no effect. +// +// If any Validate or Build errors occur the build will stop and the error +// which occurred will be returned. +func (r *Request) Build() error { + if !r.built { + r.Error = nil + r.Handlers.Validate.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Request", false, r.Error) + return r.Error + } + r.Handlers.Build.Run(r) + r.built = true + } + + return r.Error +} + +// Sign will sign the request retuning error if errors are encountered. +// +// Send will build the request prior to signing. All Sign Handlers will +// be executed in the order they were set. +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + + r.Handlers.Sign.Run(r) + return r.Error +} + +// Send will send the request returning error if errors are encountered. +// +// Send will sign the request prior to sending. All Send Handlers will +// be executed in the order they were set. +func (r *Request) Send() error { + for { + r.Sign() + if r.Error != nil { + return r.Error + } + + if aws.BoolValue(r.Retryable) { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + // Re-seek the body back to the original point in for a retry so that + // send will send the body's contents again in the upcoming request. + r.Body.Seek(r.BodyStart, 0) + r.HTTPRequest.Body = ioutil.NopCloser(r.Body) + } + r.Retryable = nil + + r.Handlers.Send.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", false, r.Error) + return r.Error + } + debugLogReqError(r, "Send Request", true, err) + continue + } + + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.UnmarshalError.Run(r) + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Response", false, r.Error) + return r.Error + } + debugLogReqError(r, "Validate Response", true, err) + continue + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", false, r.Error) + return r.Error + } + debugLogReqError(r, "Unmarshal Response", true, err) + continue + } + + break + } + + return nil +} + +// AddToUserAgent adds the string to the end of the request's current user agent. +func AddToUserAgent(r *Request, s string) { + curUA := r.HTTPRequest.Header.Get("User-Agent") + if len(curUA) > 0 { + s = curUA + " " + s + } + r.HTTPRequest.Header.Set("User-Agent", s) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,104 @@ +package request + +import ( + "reflect" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +//type Paginater interface { +// HasNextPage() bool +// NextPage() *Request +// EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error +//} + +// HasNextPage returns true if this request has more pages of data available. +func (r *Request) HasNextPage() bool { + return len(r.nextPageTokens()) > 0 +} + +// nextPageTokens returns the tokens to use when asking for the next page of +// data. +func (r *Request) nextPageTokens() []interface{} { + if r.Operation.Paginator == nil { + return nil + } + + if r.Operation.TruncationToken != "" { + tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) + if len(tr) == 0 { + return nil + } + + switch v := tr[0].(type) { + case *bool: + if !aws.BoolValue(v) { + return nil + } + case bool: + if v == false { + return nil + } + } + } + + tokens := []interface{}{} + tokenAdded := false + for _, outToken := range r.Operation.OutputTokens { + v, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(v) > 0 { + tokens = append(tokens, v[0]) + tokenAdded = true + } else { + tokens = append(tokens, nil) + } + } + if !tokenAdded { + return nil + } + + return tokens +} + +// NextPage returns a new Request that can be executed to return the next +// page of result data. Call .Send() on this request to execute it. +func (r *Request) NextPage() *Request { + tokens := r.nextPageTokens() + if len(tokens) == 0 { + return nil + } + + data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() + nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) + for i, intok := range nr.Operation.InputTokens { + awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) + } + return nr +} + +// EachPage iterates over each page of a paginated request object. The fn +// parameter should be a function with the following sample signature: +// +// func(page *T, lastPage bool) bool { +// return true // return false to stop iterating +// } +// +// Where "T" is the structure type matching the output structure of the given +// operation. For example, a request object generated by +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput +// as the structure "T". The lastPage value represents whether the page is +// the last page of data or not. The return value of this function should +// return true to keep iterating or false to stop. +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + for page := r; page != nil; page = page.NextPage() { + if err := page.Send(); err != nil { + return err + } + if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { + return page.Error + } + } + + return nil +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/request/request_pagination_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/request/request_pagination_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/request/request_pagination_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/request/request_pagination_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,455 @@ +package request_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/route53" + "github.com/aws/aws-sdk-go/service/s3" +) + +// Use DynamoDB methods for simplicity +func TestPaginationQueryPage(t *testing.T) { + db := dynamodb.New(unit.Session) + tokens, pages, numPages, gotToEnd := []map[string]*dynamodb.AttributeValue{}, []map[string]*dynamodb.AttributeValue{}, 0, false + + reqNum := 0 + resps := []*dynamodb.QueryOutput{ + { + LastEvaluatedKey: map[string]*dynamodb.AttributeValue{"key": {S: aws.String("key1")}}, + Count: aws.Int64(1), + Items: []map[string]*dynamodb.AttributeValue{ + { + "key": {S: aws.String("key1")}, + }, + }, + }, + { + LastEvaluatedKey: map[string]*dynamodb.AttributeValue{"key": {S: aws.String("key2")}}, + Count: aws.Int64(1), + Items: []map[string]*dynamodb.AttributeValue{ + { + "key": {S: aws.String("key2")}, + }, + }, + }, + { + LastEvaluatedKey: map[string]*dynamodb.AttributeValue{}, + Count: aws.Int64(1), + Items: []map[string]*dynamodb.AttributeValue{ + { + "key": {S: aws.String("key3")}, + }, + }, + }, + } + + db.Handlers.Send.Clear() // mock sending + db.Handlers.Unmarshal.Clear() + db.Handlers.UnmarshalMeta.Clear() + db.Handlers.ValidateResponse.Clear() + db.Handlers.Build.PushBack(func(r *request.Request) { + in := r.Params.(*dynamodb.QueryInput) + if in == nil { + tokens = append(tokens, nil) + } else if len(in.ExclusiveStartKey) != 0 { + tokens = append(tokens, in.ExclusiveStartKey) + } + }) + db.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = resps[reqNum] + reqNum++ + }) + + params := &dynamodb.QueryInput{ + Limit: aws.Int64(2), + TableName: aws.String("tablename"), + } + err := db.QueryPages(params, func(p *dynamodb.QueryOutput, last bool) bool { + numPages++ + for _, item := range p.Items { + pages = append(pages, item) + } + if last { + if gotToEnd { + assert.Fail(t, "last=true happened twice") + } + gotToEnd = true + } + return true + }) + assert.Nil(t, err) + + assert.Equal(t, + []map[string]*dynamodb.AttributeValue{ + {"key": {S: aws.String("key1")}}, + {"key": {S: aws.String("key2")}}, + }, tokens) + assert.Equal(t, + []map[string]*dynamodb.AttributeValue{ + {"key": {S: aws.String("key1")}}, + {"key": {S: aws.String("key2")}}, + {"key": {S: aws.String("key3")}}, + }, pages) + assert.Equal(t, 3, numPages) + assert.True(t, gotToEnd) + assert.Nil(t, params.ExclusiveStartKey) +} + +// Use DynamoDB methods for simplicity +func TestPagination(t *testing.T) { + db := dynamodb.New(unit.Session) + tokens, pages, numPages, gotToEnd := []string{}, []string{}, 0, false + + reqNum := 0 + resps := []*dynamodb.ListTablesOutput{ + {TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")}, + {TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")}, + {TableNames: []*string{aws.String("Table5")}}, + } + + db.Handlers.Send.Clear() // mock sending + db.Handlers.Unmarshal.Clear() + db.Handlers.UnmarshalMeta.Clear() + db.Handlers.ValidateResponse.Clear() + db.Handlers.Build.PushBack(func(r *request.Request) { + in := r.Params.(*dynamodb.ListTablesInput) + if in == nil { + tokens = append(tokens, "") + } else if in.ExclusiveStartTableName != nil { + tokens = append(tokens, *in.ExclusiveStartTableName) + } + }) + db.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = resps[reqNum] + reqNum++ + }) + + params := &dynamodb.ListTablesInput{Limit: aws.Int64(2)} + err := db.ListTablesPages(params, func(p *dynamodb.ListTablesOutput, last bool) bool { + numPages++ + for _, t := range p.TableNames { + pages = append(pages, *t) + } + if last { + if gotToEnd { + assert.Fail(t, "last=true happened twice") + } + gotToEnd = true + } + return true + }) + + assert.Equal(t, []string{"Table2", "Table4"}, tokens) + assert.Equal(t, []string{"Table1", "Table2", "Table3", "Table4", "Table5"}, pages) + assert.Equal(t, 3, numPages) + assert.True(t, gotToEnd) + assert.Nil(t, err) + assert.Nil(t, params.ExclusiveStartTableName) +} + +// Use DynamoDB methods for simplicity +func TestPaginationEachPage(t *testing.T) { + db := dynamodb.New(unit.Session) + tokens, pages, numPages, gotToEnd := []string{}, []string{}, 0, false + + reqNum := 0 + resps := []*dynamodb.ListTablesOutput{ + {TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")}, + {TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")}, + {TableNames: []*string{aws.String("Table5")}}, + } + + db.Handlers.Send.Clear() // mock sending + db.Handlers.Unmarshal.Clear() + db.Handlers.UnmarshalMeta.Clear() + db.Handlers.ValidateResponse.Clear() + db.Handlers.Build.PushBack(func(r *request.Request) { + in := r.Params.(*dynamodb.ListTablesInput) + if in == nil { + tokens = append(tokens, "") + } else if in.ExclusiveStartTableName != nil { + tokens = append(tokens, *in.ExclusiveStartTableName) + } + }) + db.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = resps[reqNum] + reqNum++ + }) + + params := &dynamodb.ListTablesInput{Limit: aws.Int64(2)} + req, _ := db.ListTablesRequest(params) + err := req.EachPage(func(p interface{}, last bool) bool { + numPages++ + for _, t := range p.(*dynamodb.ListTablesOutput).TableNames { + pages = append(pages, *t) + } + if last { + if gotToEnd { + assert.Fail(t, "last=true happened twice") + } + gotToEnd = true + } + + return true + }) + + assert.Equal(t, []string{"Table2", "Table4"}, tokens) + assert.Equal(t, []string{"Table1", "Table2", "Table3", "Table4", "Table5"}, pages) + assert.Equal(t, 3, numPages) + assert.True(t, gotToEnd) + assert.Nil(t, err) +} + +// Use DynamoDB methods for simplicity +func TestPaginationEarlyExit(t *testing.T) { + db := dynamodb.New(unit.Session) + numPages, gotToEnd := 0, false + + reqNum := 0 + resps := []*dynamodb.ListTablesOutput{ + {TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")}, + {TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")}, + {TableNames: []*string{aws.String("Table5")}}, + } + + db.Handlers.Send.Clear() // mock sending + db.Handlers.Unmarshal.Clear() + db.Handlers.UnmarshalMeta.Clear() + db.Handlers.ValidateResponse.Clear() + db.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = resps[reqNum] + reqNum++ + }) + + params := &dynamodb.ListTablesInput{Limit: aws.Int64(2)} + err := db.ListTablesPages(params, func(p *dynamodb.ListTablesOutput, last bool) bool { + numPages++ + if numPages == 2 { + return false + } + if last { + if gotToEnd { + assert.Fail(t, "last=true happened twice") + } + gotToEnd = true + } + return true + }) + + assert.Equal(t, 2, numPages) + assert.False(t, gotToEnd) + assert.Nil(t, err) +} + +func TestSkipPagination(t *testing.T) { + client := s3.New(unit.Session) + client.Handlers.Send.Clear() // mock sending + client.Handlers.Unmarshal.Clear() + client.Handlers.UnmarshalMeta.Clear() + client.Handlers.ValidateResponse.Clear() + client.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = &s3.HeadBucketOutput{} + }) + + req, _ := client.HeadBucketRequest(&s3.HeadBucketInput{Bucket: aws.String("bucket")}) + + numPages, gotToEnd := 0, false + req.EachPage(func(p interface{}, last bool) bool { + numPages++ + if last { + gotToEnd = true + } + return true + }) + assert.Equal(t, 1, numPages) + assert.True(t, gotToEnd) +} + +// Use S3 for simplicity +func TestPaginationTruncation(t *testing.T) { + client := s3.New(unit.Session) + + reqNum := 0 + resps := []*s3.ListObjectsOutput{ + {IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key1")}}}, + {IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key2")}}}, + {IsTruncated: aws.Bool(false), Contents: []*s3.Object{{Key: aws.String("Key3")}}}, + {IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key4")}}}, + } + + client.Handlers.Send.Clear() // mock sending + client.Handlers.Unmarshal.Clear() + client.Handlers.UnmarshalMeta.Clear() + client.Handlers.ValidateResponse.Clear() + client.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = resps[reqNum] + reqNum++ + }) + + params := &s3.ListObjectsInput{Bucket: aws.String("bucket")} + + results := []string{} + err := client.ListObjectsPages(params, func(p *s3.ListObjectsOutput, last bool) bool { + results = append(results, *p.Contents[0].Key) + return true + }) + + assert.Equal(t, []string{"Key1", "Key2", "Key3"}, results) + assert.Nil(t, err) + + // Try again without truncation token at all + reqNum = 0 + resps[1].IsTruncated = nil + resps[2].IsTruncated = aws.Bool(true) + results = []string{} + err = client.ListObjectsPages(params, func(p *s3.ListObjectsOutput, last bool) bool { + results = append(results, *p.Contents[0].Key) + return true + }) + + assert.Equal(t, []string{"Key1", "Key2"}, results) + assert.Nil(t, err) +} + +func TestPaginationNilToken(t *testing.T) { + client := route53.New(unit.Session) + + reqNum := 0 + resps := []*route53.ListResourceRecordSetsOutput{ + { + ResourceRecordSets: []*route53.ResourceRecordSet{ + {Name: aws.String("first.example.com.")}, + }, + IsTruncated: aws.Bool(true), + NextRecordName: aws.String("second.example.com."), + NextRecordType: aws.String("MX"), + NextRecordIdentifier: aws.String("second"), + MaxItems: aws.String("1"), + }, + { + ResourceRecordSets: []*route53.ResourceRecordSet{ + {Name: aws.String("second.example.com.")}, + }, + IsTruncated: aws.Bool(true), + NextRecordName: aws.String("third.example.com."), + NextRecordType: aws.String("MX"), + MaxItems: aws.String("1"), + }, + { + ResourceRecordSets: []*route53.ResourceRecordSet{ + {Name: aws.String("third.example.com.")}, + }, + IsTruncated: aws.Bool(false), + MaxItems: aws.String("1"), + }, + } + client.Handlers.Send.Clear() // mock sending + client.Handlers.Unmarshal.Clear() + client.Handlers.UnmarshalMeta.Clear() + client.Handlers.ValidateResponse.Clear() + + idents := []string{} + client.Handlers.Build.PushBack(func(r *request.Request) { + p := r.Params.(*route53.ListResourceRecordSetsInput) + idents = append(idents, aws.StringValue(p.StartRecordIdentifier)) + + }) + client.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = resps[reqNum] + reqNum++ + }) + + params := &route53.ListResourceRecordSetsInput{ + HostedZoneId: aws.String("id-zone"), + } + + results := []string{} + err := client.ListResourceRecordSetsPages(params, func(p *route53.ListResourceRecordSetsOutput, last bool) bool { + results = append(results, *p.ResourceRecordSets[0].Name) + return true + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"", "second", ""}, idents) + assert.Equal(t, []string{"first.example.com.", "second.example.com.", "third.example.com."}, results) +} + +// Benchmarks +var benchResps = []*dynamodb.ListTablesOutput{ + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, + {TableNames: []*string{aws.String("TABLE")}}, +} + +var benchDb = func() *dynamodb.DynamoDB { + db := dynamodb.New(unit.Session) + db.Handlers.Send.Clear() // mock sending + db.Handlers.Unmarshal.Clear() + db.Handlers.UnmarshalMeta.Clear() + db.Handlers.ValidateResponse.Clear() + return db +} + +func BenchmarkCodegenIterator(b *testing.B) { + reqNum := 0 + db := benchDb() + db.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = benchResps[reqNum] + reqNum++ + }) + + input := &dynamodb.ListTablesInput{Limit: aws.Int64(2)} + iter := func(fn func(*dynamodb.ListTablesOutput, bool) bool) error { + page, _ := db.ListTablesRequest(input) + for ; page != nil; page = page.NextPage() { + page.Send() + out := page.Data.(*dynamodb.ListTablesOutput) + if result := fn(out, !page.HasNextPage()); page.Error != nil || !result { + return page.Error + } + } + return nil + } + + for i := 0; i < b.N; i++ { + reqNum = 0 + iter(func(p *dynamodb.ListTablesOutput, last bool) bool { + return true + }) + } +} + +func BenchmarkEachPageIterator(b *testing.B) { + reqNum := 0 + db := benchDb() + db.Handlers.Unmarshal.PushBack(func(r *request.Request) { + r.Data = benchResps[reqNum] + reqNum++ + }) + + input := &dynamodb.ListTablesInput{Limit: aws.Int64(2)} + for i := 0; i < b.N; i++ { + reqNum = 0 + req, _ := db.ListTablesRequest(input) + req.EachPage(func(p interface{}, last bool) bool { + return true + }) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/request/request_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/request/request_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/request/request_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/request/request_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,261 @@ +package request_test + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "runtime" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" +) + +type testData struct { + Data string +} + +func body(str string) io.ReadCloser { + return ioutil.NopCloser(bytes.NewReader([]byte(str))) +} + +func unmarshal(req *request.Request) { + defer req.HTTPResponse.Body.Close() + if req.Data != nil { + json.NewDecoder(req.HTTPResponse.Body).Decode(req.Data) + } + return +} + +func unmarshalError(req *request.Request) { + bodyBytes, err := ioutil.ReadAll(req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.New("UnmarshaleError", req.HTTPResponse.Status, err) + return + } + if len(bodyBytes) == 0 { + req.Error = awserr.NewRequestFailure( + awserr.New("UnmarshaleError", req.HTTPResponse.Status, fmt.Errorf("empty body")), + req.HTTPResponse.StatusCode, + "", + ) + return + } + var jsonErr jsonErrorResponse + if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil { + req.Error = awserr.New("UnmarshaleError", "JSON unmarshal", err) + return + } + req.Error = awserr.NewRequestFailure( + awserr.New(jsonErr.Code, jsonErr.Message, nil), + req.HTTPResponse.StatusCode, + "", + ) +} + +type jsonErrorResponse struct { + Code string `json:"__type"` + Message string `json:"message"` +} + +// test that retries occur for 5xx status codes +func TestRequestRecoverRetry5xx(t *testing.T) { + reqNum := 0 + reqs := []http.Response{ + {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + {StatusCode: 501, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + {StatusCode: 200, Body: body(`{"data":"valid"}`)}, + } + + s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10)) + s.Handlers.Validate.Clear() + s.Handlers.Unmarshal.PushBack(unmarshal) + s.Handlers.UnmarshalError.PushBack(unmarshalError) + s.Handlers.Send.Clear() // mock sending + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &reqs[reqNum] + reqNum++ + }) + out := &testData{} + r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out) + err := r.Send() + assert.Nil(t, err) + assert.Equal(t, 2, int(r.RetryCount)) + assert.Equal(t, "valid", out.Data) +} + +// test that retries occur for 4xx status codes with a response type that can be retried - see `shouldRetry` +func TestRequestRecoverRetry4xxRetryable(t *testing.T) { + reqNum := 0 + reqs := []http.Response{ + {StatusCode: 400, Body: body(`{"__type":"Throttling","message":"Rate exceeded."}`)}, + {StatusCode: 429, Body: body(`{"__type":"ProvisionedThroughputExceededException","message":"Rate exceeded."}`)}, + {StatusCode: 200, Body: body(`{"data":"valid"}`)}, + } + + s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10)) + s.Handlers.Validate.Clear() + s.Handlers.Unmarshal.PushBack(unmarshal) + s.Handlers.UnmarshalError.PushBack(unmarshalError) + s.Handlers.Send.Clear() // mock sending + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &reqs[reqNum] + reqNum++ + }) + out := &testData{} + r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out) + err := r.Send() + assert.Nil(t, err) + assert.Equal(t, 2, int(r.RetryCount)) + assert.Equal(t, "valid", out.Data) +} + +// test that retries don't occur for 4xx status codes with a response type that can't be retried +func TestRequest4xxUnretryable(t *testing.T) { + s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10)) + s.Handlers.Validate.Clear() + s.Handlers.Unmarshal.PushBack(unmarshal) + s.Handlers.UnmarshalError.PushBack(unmarshalError) + s.Handlers.Send.Clear() // mock sending + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &http.Response{StatusCode: 401, Body: body(`{"__type":"SignatureDoesNotMatch","message":"Signature does not match."}`)} + }) + out := &testData{} + r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out) + err := r.Send() + assert.NotNil(t, err) + if e, ok := err.(awserr.RequestFailure); ok { + assert.Equal(t, 401, e.StatusCode()) + } else { + assert.Fail(t, "Expected error to be a service failure") + } + assert.Equal(t, "SignatureDoesNotMatch", err.(awserr.Error).Code()) + assert.Equal(t, "Signature does not match.", err.(awserr.Error).Message()) + assert.Equal(t, 0, int(r.RetryCount)) +} + +func TestRequestExhaustRetries(t *testing.T) { + delays := []time.Duration{} + sleepDelay := func(delay time.Duration) { + delays = append(delays, delay) + } + + reqNum := 0 + reqs := []http.Response{ + {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, + } + + s := awstesting.NewClient(aws.NewConfig().WithSleepDelay(sleepDelay)) + s.Handlers.Validate.Clear() + s.Handlers.Unmarshal.PushBack(unmarshal) + s.Handlers.UnmarshalError.PushBack(unmarshalError) + s.Handlers.Send.Clear() // mock sending + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &reqs[reqNum] + reqNum++ + }) + r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, nil) + err := r.Send() + assert.NotNil(t, err) + if e, ok := err.(awserr.RequestFailure); ok { + assert.Equal(t, 500, e.StatusCode()) + } else { + assert.Fail(t, "Expected error to be a service failure") + } + assert.Equal(t, "UnknownError", err.(awserr.Error).Code()) + assert.Equal(t, "An error occurred.", err.(awserr.Error).Message()) + assert.Equal(t, 3, int(r.RetryCount)) + + expectDelays := []struct{ min, max time.Duration }{{30, 59}, {60, 118}, {120, 236}} + for i, v := range delays { + min := expectDelays[i].min * time.Millisecond + max := expectDelays[i].max * time.Millisecond + assert.True(t, min <= v && v <= max, + "Expect delay to be within range, i:%d, v:%s, min:%s, max:%s", i, v, min, max) + } +} + +// test that the request is retried after the credentials are expired. +func TestRequestRecoverExpiredCreds(t *testing.T) { + reqNum := 0 + reqs := []http.Response{ + {StatusCode: 400, Body: body(`{"__type":"ExpiredTokenException","message":"expired token"}`)}, + {StatusCode: 200, Body: body(`{"data":"valid"}`)}, + } + + s := awstesting.NewClient(&aws.Config{MaxRetries: aws.Int(10), Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "")}) + s.Handlers.Validate.Clear() + s.Handlers.Unmarshal.PushBack(unmarshal) + s.Handlers.UnmarshalError.PushBack(unmarshalError) + + credExpiredBeforeRetry := false + credExpiredAfterRetry := false + + s.Handlers.AfterRetry.PushBack(func(r *request.Request) { + credExpiredAfterRetry = r.Config.Credentials.IsExpired() + }) + + s.Handlers.Sign.Clear() + s.Handlers.Sign.PushBack(func(r *request.Request) { + r.Config.Credentials.Get() + }) + s.Handlers.Send.Clear() // mock sending + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &reqs[reqNum] + reqNum++ + }) + out := &testData{} + r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out) + err := r.Send() + assert.Nil(t, err) + + assert.False(t, credExpiredBeforeRetry, "Expect valid creds before retry check") + assert.True(t, credExpiredAfterRetry, "Expect expired creds after retry check") + assert.False(t, s.Config.Credentials.IsExpired(), "Expect valid creds after cred expired recovery") + + assert.Equal(t, 1, int(r.RetryCount)) + assert.Equal(t, "valid", out.Data) +} + +func TestMakeAddtoUserAgentHandler(t *testing.T) { + fn := request.MakeAddToUserAgentHandler("name", "version", "extra1", "extra2") + r := &request.Request{HTTPRequest: &http.Request{Header: http.Header{}}} + r.HTTPRequest.Header.Set("User-Agent", "foo/bar") + fn(r) + + assert.Equal(t, "foo/bar name/version (extra1; extra2)", r.HTTPRequest.Header.Get("User-Agent")) +} + +func TestMakeAddtoUserAgentFreeFormHandler(t *testing.T) { + fn := request.MakeAddToUserAgentFreeFormHandler("name/version (extra1; extra2)") + r := &request.Request{HTTPRequest: &http.Request{Header: http.Header{}}} + r.HTTPRequest.Header.Set("User-Agent", "foo/bar") + fn(r) + + assert.Equal(t, "foo/bar name/version (extra1; extra2)", r.HTTPRequest.Header.Get("User-Agent")) +} + +func TestRequestUserAgent(t *testing.T) { + s := awstesting.NewClient(&aws.Config{Region: aws.String("us-east-1")}) + // s.Handlers.Validate.Clear() + + req := s.NewRequest(&request.Operation{Name: "Operation"}, nil, &testData{}) + req.HTTPRequest.Header.Set("User-Agent", "foo/bar") + assert.NoError(t, req.Build()) + + expectUA := fmt.Sprintf("foo/bar %s/%s (%s; %s; %s)", + aws.SDKName, aws.SDKVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH) + assert.Equal(t, expectUA, req.HTTPRequest.Header.Get("User-Agent")) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/request/retryer.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/request/retryer.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/request/retryer.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/request/retryer.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,82 @@ +package request + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Retryer is an interface to control retry logic for a given service. +// The default implementation used by most services is the service.DefaultRetryer +// structure, which contains basic retry logic using exponential backoff. +type Retryer interface { + RetryRules(*Request) time.Duration + ShouldRetry(*Request) bool + MaxRetries() int +} + +// WithRetryer sets a config Retryer value to the given Config returning it +// for chaining. +func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + cfg.Retryer = retryer + return cfg +} + +// retryableCodes is a collection of service response codes which are retry-able +// without any further action. +var retryableCodes = map[string]struct{}{ + "RequestError": {}, + "RequestTimeout": {}, + "ProvisionedThroughputExceededException": {}, + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once + "TooManyRequestsException": {}, // Lambda functions +} + +// credsExpiredCodes is a collection of error codes which signify the credentials +// need to be refreshed. Expired tokens require refreshing of credentials, and +// resigning before the request can be retried. +var credsExpiredCodes = map[string]struct{}{ + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "RequestExpired": {}, // EC2 Only +} + +func isCodeRetryable(code string) bool { + if _, ok := retryableCodes[code]; ok { + return true + } + + return isCodeExpiredCreds(code) +} + +func isCodeExpiredCreds(code string) bool { + _, ok := credsExpiredCodes[code] + return ok +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +func (r *Request) IsErrorRetryable() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeRetryable(err.Code()) + } + } + return false +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +func (r *Request) IsErrorExpired() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeExpiredCreds(err.Code()) + } + } + return false +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/session/session.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/session/session.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/session/session.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/session/session.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,120 @@ +// Package session provides a way to create service clients with shared configuration +// and handlers. +// +// Generally this package should be used instead of the `defaults` package. +// +// A session should be used to share configurations and request handlers between multiple +// service clients. When service clients need specific configuration aws.Config can be +// used to provide additional configuration directly to the service client. +package session + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/endpoints" +) + +// A Session provides a central location to create service clients from and +// store configurations and request handlers for those services. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the session concurrently. +type Session struct { + Config *aws.Config + Handlers request.Handlers +} + +// New creates a new instance of the handlers merging in the provided Configs +// on top of the SDK's default configurations. Once the session is created it +// can be mutated to modify Configs or Handlers. The session is safe to be read +// concurrently, but it should not be written to concurrently. +// +// Example: +// // Create a session with the default config and request handlers. +// sess := session.New() +// +// // Create a session with a custom region +// sess := session.New(&aws.Config{Region: aws.String("us-east-1")}) +// +// // Create a session, and add additional handlers for all service +// // clients created with the session to inherit. Adds logging handler. +// sess := session.New() +// sess.Handlers.Send.PushFront(func(r *request.Request) { +// // Log every request made and its payload +// logger.Println("Request: %s/%s, Payload: %s", r.ClientInfo.ServiceName, r.Operation, r.Params) +// }) +// +// // Create a S3 client instance from a session +// sess := session.New() +// svc := s3.New(sess) +func New(cfgs ...*aws.Config) *Session { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Apply the passed in configs so the configuration can be applied to the + // default credential chain + cfg.MergeIn(cfgs...) + cfg.Credentials = defaults.CredChain(cfg, handlers) + + // Reapply any passed in configs to override credentials if set + cfg.MergeIn(cfgs...) + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + + return s +} + +func initHandlers(s *Session) { + // Add the Validate parameter handler if it is not disabled. + s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) + if !aws.BoolValue(s.Config.DisableParamValidation) { + s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) + } +} + +// Copy creates and returns a copy of the current session, coping the config +// and handlers. If any additional configs are provided they will be merged +// on top of the session's copied config. +// +// Example: +// // Create a copy of the current session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2"}) +func (s *Session) Copy(cfgs ...*aws.Config) *Session { + newSession := &Session{ + Config: s.Config.Copy(cfgs...), + Handlers: s.Handlers.Copy(), + } + + initHandlers(newSession) + + return newSession +} + +// ClientConfig satisfies the client.ConfigProvider interface and is used to +// configure the service client instances. Passing the Session to the service +// client's constructor (New) will use this method to configure the client. +// +// Example: +// sess := session.New() +// s3.New(sess) +func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + endpoint, signingRegion := endpoints.NormalizeEndpoint( + aws.StringValue(s.Config.Endpoint), serviceName, + aws.StringValue(s.Config.Region), aws.BoolValue(s.Config.DisableSSL)) + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: endpoint, + SigningRegion: signingRegion, + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/session/session_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/session/session_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/session/session_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/session/session_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,20 @@ +package session_test + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" +) + +func TestNewDefaultSession(t *testing.T) { + s := session.New(&aws.Config{Region: aws.String("region")}) + + assert.Equal(t, "region", *s.Config.Region) + assert.Equal(t, http.DefaultClient, s.Config.HTTPClient) + assert.NotNil(t, s.Config.Logger) + assert.Equal(t, aws.LogOff, *s.Config.LogLevel) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/types.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/types.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/types.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/types.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,88 @@ +package aws + +import ( + "io" + "sync" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + b.m.Lock() + defer b.m.Unlock() + + expLen := pos + int64(len(p)) + if int64(len(b.buf)) < expLen { + newBuf := make([]byte, expLen) + copy(newBuf, b.buf) + b.buf = newBuf + } + copy(b.buf[pos:], p) + return len(p), nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf[:len(b.buf):len(b.buf)] +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/types_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/types_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/types_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/types_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,56 @@ +package aws + +import ( + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestWriteAtBuffer(t *testing.T) { + b := &WriteAtBuffer{} + + n, err := b.WriteAt([]byte{1}, 0) + assert.NoError(t, err) + assert.Equal(t, 1, n) + + n, err = b.WriteAt([]byte{1, 1, 1}, 5) + assert.NoError(t, err) + assert.Equal(t, 3, n) + + n, err = b.WriteAt([]byte{2}, 1) + assert.NoError(t, err) + assert.Equal(t, 1, n) + + n, err = b.WriteAt([]byte{3}, 2) + assert.NoError(t, err) + assert.Equal(t, 1, n) + + assert.Equal(t, []byte{1, 2, 3, 0, 0, 1, 1, 1}, b.Bytes()) +} + +func BenchmarkWriteAtBuffer(b *testing.B) { + buf := &WriteAtBuffer{} + r := rand.New(rand.NewSource(1)) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + to := r.Intn(10) * 4096 + bs := make([]byte, to) + buf.WriteAt(bs, r.Int63n(10)*4096) + } +} + +func BenchmarkWriteAtBufferParallel(b *testing.B) { + buf := &WriteAtBuffer{} + r := rand.New(rand.NewSource(1)) + + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + to := r.Intn(10) * 4096 + bs := make([]byte, to) + buf.WriteAt(bs, r.Int63n(10)*4096) + } + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/version.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/version.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/version.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/aws/version.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go" + +// SDKVersion is the version of this SDK +const SDKVersion = "1.1.0" diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/gen/gen.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/gen/gen.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/gen/gen.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/gen/gen.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,198 @@ +package main + +import ( + "bytes" + "go/format" + "io" + "os" + "path/filepath" + "sort" + "strings" + "text/template" + + "github.com/aws/aws-sdk-go/private/model/api" +) + +type pkg struct { + oldAPI *api.API + newAPI *api.API + shapes map[string]*shapentry + operations map[string]*opentry +} + +type shapentry struct { + oldShape *api.Shape + newShape *api.Shape +} + +type opentry struct { + oldName string + newName string +} + +type packageRenames struct { + Shapes map[string]string + Operations map[string]string + Fields map[string]string +} + +var exportMap = map[string]*packageRenames{} + +func generateRenames(w io.Writer) error { + tmpl, err := template.New("renames").Parse(t) + if err != nil { + return err + } + + out := bytes.NewBuffer(nil) + if err = tmpl.Execute(out, exportMap); err != nil { + return err + } + + b, err := format.Source(bytes.TrimSpace(out.Bytes())) + if err != nil { + return err + } + + _, err = io.Copy(w, bytes.NewReader(b)) + return err +} + +const t = ` +package rename + +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +var renamedPackages = map[string]*packageRenames{ + {{ range $key, $entry := . }}"{{ $key }}": &packageRenames{ + operations: map[string]string{ + {{ range $old, $new := $entry.Operations }}"{{ $old }}": "{{ $new }}", + {{ end }} + }, + shapes: map[string]string{ + {{ range $old, $new := $entry.Shapes }}"{{ $old }}": "{{ $new }}", + {{ end }} + }, + fields: map[string]string{ + {{ range $old, $new := $entry.Fields }}"{{ $old }}": "{{ $new }}", + {{ end }} + }, + }, + {{ end }} +} +` + +func (p *pkg) buildRenames() { + pkgName := "github.com/aws/aws-sdk-go/service/" + p.oldAPI.PackageName() + if exportMap[pkgName] == nil { + exportMap[pkgName] = &packageRenames{map[string]string{}, map[string]string{}, map[string]string{}} + } + ifacename := "github.com/aws/aws-sdk-go/service/" + p.oldAPI.PackageName() + "/" + + p.oldAPI.InterfacePackageName() + if exportMap[ifacename] == nil { + exportMap[ifacename] = &packageRenames{map[string]string{}, map[string]string{}, map[string]string{}} + } + + for _, entry := range p.operations { + if entry.oldName != entry.newName { + pkgNames := []string{pkgName, ifacename} + for _, p := range pkgNames { + exportMap[p].Operations[entry.oldName] = entry.newName + exportMap[p].Operations[entry.oldName+"Request"] = entry.newName + "Request" + exportMap[p].Operations[entry.oldName+"Pages"] = entry.newName + "Pages" + } + } + } + + for _, entry := range p.shapes { + if entry.oldShape.Type == "structure" { + if entry.oldShape.ShapeName != entry.newShape.ShapeName { + exportMap[pkgName].Shapes[entry.oldShape.ShapeName] = entry.newShape.ShapeName + } + + for _, n := range entry.oldShape.MemberNames() { + for _, m := range entry.newShape.MemberNames() { + if n != m && strings.ToLower(n) == strings.ToLower(m) { + exportMap[pkgName].Fields[n] = m + } + } + } + } + } +} + +func load(file string) *pkg { + p := &pkg{&api.API{}, &api.API{}, map[string]*shapentry{}, map[string]*opentry{}} + + p.oldAPI.Attach(file) + p.oldAPI.Setup() + + p.newAPI.Attach(file) + p.newAPI.Setup() + + for _, name := range p.oldAPI.OperationNames() { + p.operations[strings.ToLower(name)] = &opentry{oldName: name} + } + + for _, name := range p.newAPI.OperationNames() { + p.operations[strings.ToLower(name)].newName = name + } + + for _, shape := range p.oldAPI.ShapeList() { + p.shapes[strings.ToLower(shape.ShapeName)] = &shapentry{oldShape: shape} + } + + for _, shape := range p.newAPI.ShapeList() { + if _, ok := p.shapes[strings.ToLower(shape.ShapeName)]; !ok { + panic("missing shape " + shape.ShapeName) + } + p.shapes[strings.ToLower(shape.ShapeName)].newShape = shape + } + + return p +} + +var excludeServices = map[string]struct{}{ + "simpledb": {}, + "importexport": {}, +} + +func main() { + files, _ := filepath.Glob("../../apis/*/*/api-2.json") + + sort.Strings(files) + + // Remove old API versions from list + m := map[string]bool{} + for i := range files { + idx := len(files) - 1 - i + parts := strings.Split(files[idx], string(filepath.Separator)) + svc := parts[len(parts)-3] // service name is 2nd-to-last component + + if m[svc] { + files[idx] = "" // wipe this one out if we already saw the service + } + m[svc] = true + } + + for i := range files { + file := files[i] + if file == "" { // empty file + continue + } + + if g := load(file); g != nil { + if _, ok := excludeServices[g.oldAPI.PackageName()]; !ok { + g.buildRenames() + } + } + } + + outfile, err := os.Create("rename/renames.go") + if err != nil { + panic(err) + } + if err := generateRenames(outfile); err != nil { + panic(err) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename/rename14.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename/rename14.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename/rename14.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename/rename14.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,116 @@ +// +build !go1.5 + +package rename + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "go/parser" + "go/token" + "io/ioutil" + + "golang.org/x/tools/go/loader" + "golang.org/x/tools/go/types" +) + +var dryRun = flag.Bool("dryrun", false, "Dry run") +var verbose = flag.Bool("verbose", false, "Verbose") + +type packageRenames struct { + operations map[string]string + shapes map[string]string + fields map[string]string +} + +type renamer struct { + *loader.Program + files map[*token.File]bool +} + +// ParsePathsFromArgs parses arguments from command line and looks at import +// paths to rename objects. +func ParsePathsFromArgs() { + flag.Parse() + for _, dir := range flag.Args() { + var conf loader.Config + conf.ParserMode = parser.ParseComments + conf.ImportWithTests(dir) + prog, err := conf.Load() + if err != nil { + panic(err) + } + + r := renamer{prog, map[*token.File]bool{}} + r.parse() + if !*dryRun { + r.write() + } + } +} + +func (r *renamer) dryInfo() string { + if *dryRun { + return "[DRY-RUN]" + } + return "[!]" +} + +func (r *renamer) printf(msg string, args ...interface{}) { + if *verbose { + fmt.Printf(msg, args...) + } +} + +func (r *renamer) parse() { + for _, pkg := range r.InitialPackages() { + r.parseUses(pkg) + } +} + +func (r *renamer) write() { + for _, pkg := range r.InitialPackages() { + for _, f := range pkg.Files { + tokenFile := r.Fset.File(f.Pos()) + if r.files[tokenFile] { + var buf bytes.Buffer + format.Node(&buf, r.Fset, f) + if err := ioutil.WriteFile(tokenFile.Name(), buf.Bytes(), 0644); err != nil { + panic(err) + } + } + } + } +} + +func (r *renamer) parseUses(pkg *loader.PackageInfo) { + for k, v := range pkg.Uses { + if v.Pkg() != nil { + pkgPath := v.Pkg().Path() + if renames, ok := renamedPackages[pkgPath]; ok { + name := k.Name + switch t := v.(type) { + case *types.Func: + if newName, ok := renames.operations[t.Name()]; ok && newName != name { + r.printf("%s Rename [OPERATION]: %q -> %q\n", r.dryInfo(), name, newName) + r.files[r.Fset.File(k.Pos())] = true + k.Name = newName + } + case *types.TypeName: + if newName, ok := renames.shapes[name]; ok && newName != name { + r.printf("%s Rename [SHAPE]: %q -> %q\n", r.dryInfo(), t.Name(), newName) + r.files[r.Fset.File(k.Pos())] = true + k.Name = newName + } + case *types.Var: + if newName, ok := renames.fields[name]; ok && newName != name { + r.printf("%s Rename [FIELD]: %q -> %q\n", r.dryInfo(), t.Name(), newName) + r.files[r.Fset.File(k.Pos())] = true + k.Name = newName + } + } + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename/rename.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename/rename.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename/rename.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename/rename.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,116 @@ +// +build go1.5 + +package rename + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "go/parser" + "go/token" + "go/types" + "io/ioutil" + + "golang.org/x/tools/go/loader" +) + +var dryRun = flag.Bool("dryrun", false, "Dry run") +var verbose = flag.Bool("verbose", false, "Verbose") + +type packageRenames struct { + operations map[string]string + shapes map[string]string + fields map[string]string +} + +type renamer struct { + *loader.Program + files map[*token.File]bool +} + +// ParsePathsFromArgs parses arguments from command line and looks at import +// paths to rename objects. +func ParsePathsFromArgs() { + flag.Parse() + for _, dir := range flag.Args() { + var conf loader.Config + conf.ParserMode = parser.ParseComments + conf.ImportWithTests(dir) + prog, err := conf.Load() + if err != nil { + panic(err) + } + + r := renamer{prog, map[*token.File]bool{}} + r.parse() + if !*dryRun { + r.write() + } + } +} + +func (r *renamer) dryInfo() string { + if *dryRun { + return "[DRY-RUN]" + } + return "[!]" +} + +func (r *renamer) printf(msg string, args ...interface{}) { + if *verbose { + fmt.Printf(msg, args...) + } +} + +func (r *renamer) parse() { + for _, pkg := range r.InitialPackages() { + r.parseUses(pkg) + } +} + +func (r *renamer) write() { + for _, pkg := range r.InitialPackages() { + for _, f := range pkg.Files { + tokenFile := r.Fset.File(f.Pos()) + if r.files[tokenFile] { + var buf bytes.Buffer + format.Node(&buf, r.Fset, f) + if err := ioutil.WriteFile(tokenFile.Name(), buf.Bytes(), 0644); err != nil { + panic(err) + } + } + } + } +} + +func (r *renamer) parseUses(pkg *loader.PackageInfo) { + for k, v := range pkg.Uses { + if v.Pkg() != nil { + pkgPath := v.Pkg().Path() + if renames, ok := renamedPackages[pkgPath]; ok { + name := k.Name + switch t := v.(type) { + case *types.Func: + if newName, ok := renames.operations[t.Name()]; ok && newName != name { + r.printf("%s Rename [OPERATION]: %q -> %q\n", r.dryInfo(), name, newName) + r.files[r.Fset.File(k.Pos())] = true + k.Name = newName + } + case *types.TypeName: + if newName, ok := renames.shapes[name]; ok && newName != name { + r.printf("%s Rename [SHAPE]: %q -> %q\n", r.dryInfo(), t.Name(), newName) + r.files[r.Fset.File(k.Pos())] = true + k.Name = newName + } + case *types.Var: + if newName, ok := renames.fields[name]; ok && newName != name { + r.printf("%s Rename [FIELD]: %q -> %q\n", r.dryInfo(), t.Name(), newName) + r.files[r.Fset.File(k.Pos())] = true + k.Name = newName + } + } + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename/renames.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename/renames.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename/renames.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename/renames.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2118 @@ +package rename + +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +var renamedPackages = map[string]*packageRenames{ + "github.com/aws/aws-sdk-go/service/autoscaling": { + operations: map[string]string{}, + shapes: map[string]string{ + "EBS": "Ebs", + }, + fields: map[string]string{ + "ActivityID": "ActivityId", + "ActivityIDs": "ActivityIds", + "AssociatePublicIPAddress": "AssociatePublicIpAddress", + "ClassicLinkVPCID": "ClassicLinkVPCId", + "EBS": "Ebs", + "EBSOptimized": "EbsOptimized", + "IAMInstanceProfile": "IamInstanceProfile", + "IOPS": "Iops", + "ImageID": "ImageId", + "InstanceID": "InstanceId", + "InstanceIDs": "InstanceIds", + "KernelID": "KernelId", + "RAMDiskID": "RamdiskId", + "ResourceID": "ResourceId", + "SnapshotID": "SnapshotId", + }, + }, + "github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/cloudformation": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "EventID": "EventId", + "LogicalResourceID": "LogicalResourceId", + "PhysicalResourceID": "PhysicalResourceId", + "StackID": "StackId", + "URL": "Url", + "UniqueID": "UniqueId", + }, + }, + "github.com/aws/aws-sdk-go/service/cloudformation/cloudformationiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/cloudfront": { + operations: map[string]string{}, + shapes: map[string]string{ + "KeyPairIDs": "KeyPairIds", + }, + fields: map[string]string{ + "AWSAccountNumber": "AwsAccountNumber", + "DistributionID": "DistributionId", + "IAMCertificateID": "IAMCertificateId", + "ID": "Id", + "KeyPairIDs": "KeyPairIds", + "S3CanonicalUserID": "S3CanonicalUserId", + "TargetOriginID": "TargetOriginId", + }, + }, + "github.com/aws/aws-sdk-go/service/cloudfront/cloudfrontiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/cloudhsm": { + operations: map[string]string{ + "CreateHAPG": "CreateHapg", + "CreateHAPGPages": "CreateHapgPages", + "CreateHAPGRequest": "CreateHapgRequest", + "CreateHSM": "CreateHsm", + "CreateHSMPages": "CreateHsmPages", + "CreateHSMRequest": "CreateHsmRequest", + "DeleteHAPG": "DeleteHapg", + "DeleteHAPGPages": "DeleteHapgPages", + "DeleteHAPGRequest": "DeleteHapgRequest", + "DeleteHSM": "DeleteHsm", + "DeleteHSMPages": "DeleteHsmPages", + "DeleteHSMRequest": "DeleteHsmRequest", + "DescribeHAPG": "DescribeHapg", + "DescribeHAPGPages": "DescribeHapgPages", + "DescribeHAPGRequest": "DescribeHapgRequest", + "DescribeHSM": "DescribeHsm", + "DescribeHSMPages": "DescribeHsmPages", + "DescribeHSMRequest": "DescribeHsmRequest", + "ListHSMs": "ListHsms", + "ListHSMsPages": "ListHsmsPages", + "ListHSMsRequest": "ListHsmsRequest", + "ModifyHAPG": "ModifyHapg", + "ModifyHAPGPages": "ModifyHapgPages", + "ModifyHAPGRequest": "ModifyHapgRequest", + "ModifyHSM": "ModifyHsm", + "ModifyHSMPages": "ModifyHsmPages", + "ModifyHSMRequest": "ModifyHsmRequest", + }, + shapes: map[string]string{ + "CreateHAPGInput": "CreateHapgInput", + "CreateHAPGOutput": "CreateHapgOutput", + "CreateHSMInput": "CreateHsmInput", + "CreateHSMOutput": "CreateHsmOutput", + "DeleteHAPGInput": "DeleteHapgInput", + "DeleteHAPGOutput": "DeleteHapgOutput", + "DeleteHSMInput": "DeleteHsmInput", + "DeleteHSMOutput": "DeleteHsmOutput", + "DescribeHAPGInput": "DescribeHapgInput", + "DescribeHAPGOutput": "DescribeHapgOutput", + "DescribeHSMInput": "DescribeHsmInput", + "DescribeHSMOutput": "DescribeHsmOutput", + "ListHSMsInput": "ListHsmsInput", + "ListHSMsOutput": "ListHsmsOutput", + "ModifyHAPGInput": "ModifyHapgInput", + "ModifyHAPGOutput": "ModifyHapgOutput", + "ModifyHSMInput": "ModifyHsmInput", + "ModifyHSMOutput": "ModifyHsmOutput", + }, + fields: map[string]string{ + "ClientARN": "ClientArn", + "ENIID": "EniId", + "ENIIP": "EniIp", + "ExternalID": "ExternalId", + "HAPGARN": "HapgArn", + "HAPGList": "HapgList", + "HAPGSerial": "HapgSerial", + "HSMARN": "HsmArn", + "HSMList": "HsmList", + "HSMSerialNumber": "HsmSerialNumber", + "HSMType": "HsmType", + "HSMsLastActionFailed": "HsmsLastActionFailed", + "HSMsPendingDeletion": "HsmsPendingDeletion", + "HSMsPendingRegistration": "HsmsPendingRegistration", + "IAMRoleARN": "IamRoleArn", + "SSHKey": "SshKey", + "SSHKeyLastUpdated": "SshKeyLastUpdated", + "SSHPublicKey": "SshPublicKey", + "ServerCertURI": "ServerCertUri", + "SubnetID": "SubnetId", + "SyslogIP": "SyslogIp", + "VPCID": "VpcId", + }, + }, + "github.com/aws/aws-sdk-go/service/cloudhsm/cloudhsmiface": { + operations: map[string]string{ + "CreateHAPG": "CreateHapg", + "CreateHAPGPages": "CreateHapgPages", + "CreateHAPGRequest": "CreateHapgRequest", + "CreateHSM": "CreateHsm", + "CreateHSMPages": "CreateHsmPages", + "CreateHSMRequest": "CreateHsmRequest", + "DeleteHAPG": "DeleteHapg", + "DeleteHAPGPages": "DeleteHapgPages", + "DeleteHAPGRequest": "DeleteHapgRequest", + "DeleteHSM": "DeleteHsm", + "DeleteHSMPages": "DeleteHsmPages", + "DeleteHSMRequest": "DeleteHsmRequest", + "DescribeHAPG": "DescribeHapg", + "DescribeHAPGPages": "DescribeHapgPages", + "DescribeHAPGRequest": "DescribeHapgRequest", + "DescribeHSM": "DescribeHsm", + "DescribeHSMPages": "DescribeHsmPages", + "DescribeHSMRequest": "DescribeHsmRequest", + "ListHSMs": "ListHsms", + "ListHSMsPages": "ListHsmsPages", + "ListHSMsRequest": "ListHsmsRequest", + "ModifyHAPG": "ModifyHapg", + "ModifyHAPGPages": "ModifyHapgPages", + "ModifyHAPGRequest": "ModifyHapgRequest", + "ModifyHSM": "ModifyHsm", + "ModifyHSMPages": "ModifyHsmPages", + "ModifyHSMRequest": "ModifyHsmRequest", + }, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/cloudsearch": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "DomainID": "DomainId", + }, + }, + "github.com/aws/aws-sdk-go/service/cloudsearch/cloudsearchiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/cloudsearchdomain": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "ID": "Id", + "RID": "Rid", + "TimeMS": "Timems", + }, + }, + "github.com/aws/aws-sdk-go/service/cloudsearchdomain/cloudsearchdomainiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/cloudtrail": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "CloudWatchLogsLogGroupARN": "CloudWatchLogsLogGroupArn", + "CloudWatchLogsRoleARN": "CloudWatchLogsRoleArn", + "EventID": "EventId", + "SNSTopicName": "SnsTopicName", + }, + }, + "github.com/aws/aws-sdk-go/service/cloudtrail/cloudtrailiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/cloudwatch": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "AlarmARN": "AlarmArn", + }, + }, + "github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/cloudwatchlogs": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "ARN": "Arn", + "DestinationARN": "DestinationArn", + "EventID": "EventId", + "RoleARN": "RoleArn", + "TargetARN": "TargetArn", + }, + }, + "github.com/aws/aws-sdk-go/service/cloudwatchlogs/cloudwatchlogsiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/codecommit": { + operations: map[string]string{}, + shapes: map[string]string{ + "RepositoryNameIDPair": "RepositoryNameIdPair", + }, + fields: map[string]string{ + "ARN": "Arn", + "AccountID": "AccountId", + "CloneURLHTTP": "CloneUrlHttp", + "CloneURLSSH": "CloneUrlSsh", + "CommitID": "CommitId", + "RepositoryID": "RepositoryId", + }, + }, + "github.com/aws/aws-sdk-go/service/codecommit/codecommitiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/codedeploy": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "ApplicationID": "ApplicationId", + "CommitID": "CommitId", + "DeploymentConfigID": "DeploymentConfigId", + "DeploymentGroupID": "DeploymentGroupId", + "DeploymentID": "DeploymentId", + "DeploymentIDs": "DeploymentIds", + "EC2TagFilters": "Ec2TagFilters", + "IAMUserARN": "IamUserArn", + "InstanceARN": "InstanceArn", + "InstanceID": "InstanceId", + "ServiceRoleARN": "ServiceRoleArn", + }, + }, + "github.com/aws/aws-sdk-go/service/codedeploy/codedeployiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/codepipeline": { + operations: map[string]string{}, + shapes: map[string]string{ + "ActionTypeID": "ActionTypeId", + }, + fields: map[string]string{ + "AccessKeyID": "AccessKeyId", + "AccountID": "AccountId", + "ActionTypeID": "ActionTypeId", + "ClientID": "ClientId", + "EntityURL": "EntityUrl", + "EntityURLTemplate": "EntityUrlTemplate", + "ExecutionURLTemplate": "ExecutionUrlTemplate", + "ExternalExecutionID": "ExternalExecutionId", + "ExternalExecutionURL": "ExternalExecutionUrl", + "ID": "Id", + "JobID": "JobId", + "PipelineExecutionID": "PipelineExecutionId", + "RevisionChangeID": "RevisionChangeId", + "RevisionID": "RevisionId", + "RevisionURL": "RevisionUrl", + "RevisionURLTemplate": "RevisionUrlTemplate", + "RoleARN": "RoleArn", + "ThirdPartyConfigurationURL": "ThirdPartyConfigurationUrl", + }, + }, + "github.com/aws/aws-sdk-go/service/codepipeline/codepipelineiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/cognitoidentity": { + operations: map[string]string{ + "GetID": "GetId", + "GetIDPages": "GetIdPages", + "GetIDRequest": "GetIdRequest", + "GetOpenIDToken": "GetOpenIdToken", + "GetOpenIDTokenForDeveloperIdentity": "GetOpenIdTokenForDeveloperIdentity", + "GetOpenIDTokenForDeveloperIdentityPages": "GetOpenIdTokenForDeveloperIdentityPages", + "GetOpenIDTokenForDeveloperIdentityRequest": "GetOpenIdTokenForDeveloperIdentityRequest", + "GetOpenIDTokenPages": "GetOpenIdTokenPages", + "GetOpenIDTokenRequest": "GetOpenIdTokenRequest", + }, + shapes: map[string]string{ + "GetIDInput": "GetIdInput", + "GetIDOutput": "GetIdOutput", + "GetOpenIDTokenForDeveloperIdentityInput": "GetOpenIdTokenForDeveloperIdentityInput", + "GetOpenIDTokenForDeveloperIdentityOutput": "GetOpenIdTokenForDeveloperIdentityOutput", + "GetOpenIDTokenInput": "GetOpenIdTokenInput", + "GetOpenIDTokenOutput": "GetOpenIdTokenOutput", + "UnprocessedIdentityID": "UnprocessedIdentityId", + }, + fields: map[string]string{ + "AccessKeyID": "AccessKeyId", + "AccountID": "AccountId", + "IdentityID": "IdentityId", + "IdentityIDsToDelete": "IdentityIdsToDelete", + "IdentityPoolID": "IdentityPoolId", + "OpenIDConnectProviderARNs": "OpenIdConnectProviderARNs", + "UnprocessedIdentityIDs": "UnprocessedIdentityIds", + }, + }, + "github.com/aws/aws-sdk-go/service/cognitoidentity/cognitoidentityiface": { + operations: map[string]string{ + "GetID": "GetId", + "GetIDPages": "GetIdPages", + "GetIDRequest": "GetIdRequest", + "GetOpenIDToken": "GetOpenIdToken", + "GetOpenIDTokenForDeveloperIdentity": "GetOpenIdTokenForDeveloperIdentity", + "GetOpenIDTokenForDeveloperIdentityPages": "GetOpenIdTokenForDeveloperIdentityPages", + "GetOpenIDTokenForDeveloperIdentityRequest": "GetOpenIdTokenForDeveloperIdentityRequest", + "GetOpenIDTokenPages": "GetOpenIdTokenPages", + "GetOpenIDTokenRequest": "GetOpenIdTokenRequest", + }, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/cognitosync": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "ApplicationARNs": "ApplicationArns", + "DeviceID": "DeviceId", + "IdentityID": "IdentityId", + "IdentityPoolID": "IdentityPoolId", + "RoleARN": "RoleArn", + }, + }, + "github.com/aws/aws-sdk-go/service/cognitosync/cognitosynciface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/configservice": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "ARN": "Arn", + "AccountID": "AccountId", + "ConfigSnapshotID": "ConfigSnapshotId", + "ConfigurationStateID": "ConfigurationStateId", + "ResourceID": "ResourceId", + "SNSTopicARN": "SnsTopicARN", + }, + }, + "github.com/aws/aws-sdk-go/service/configservice/configserviceiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/datapipeline": { + operations: map[string]string{}, + shapes: map[string]string{ + "PipelineIDName": "PipelineIdName", + }, + fields: map[string]string{ + "AttemptID": "AttemptId", + "ErrorID": "ErrorId", + "ID": "Id", + "IDs": "Ids", + "ObjectID": "ObjectId", + "ObjectIDs": "ObjectIds", + "PipelineID": "PipelineId", + "PipelineIDList": "PipelineIdList", + "PipelineIDs": "PipelineIds", + "TaskID": "TaskId", + "TaskRunnerID": "TaskrunnerId", + "UniqueID": "UniqueId", + }, + }, + "github.com/aws/aws-sdk-go/service/datapipeline/datapipelineiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/devicefarm": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "ARN": "Arn", + "AWSAccountNumber": "AwsAccountNumber", + "AppARN": "AppArn", + "CPU": "Cpu", + "DevicePoolARN": "DevicePoolArn", + "ExtraDataPackageARN": "ExtraDataPackageArn", + "NetworkProfileARN": "NetworkProfileArn", + "ProjectARN": "ProjectArn", + "TestPackageARN": "TestPackageArn", + "URL": "Url", + }, + }, + "github.com/aws/aws-sdk-go/service/devicefarm/devicefarmiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/directconnect": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "ASN": "Asn", + "CIDR": "Cidr", + "ConnectionID": "ConnectionId", + "InterconnectID": "InterconnectId", + "VLAN": "Vlan", + "VirtualGatewayID": "VirtualGatewayId", + "VirtualInterfaceID": "VirtualInterfaceId", + }, + }, + "github.com/aws/aws-sdk-go/service/directconnect/directconnectiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/directoryservice": { + operations: map[string]string{ + "DisableSSO": "DisableSso", + "DisableSSOPages": "DisableSsoPages", + "DisableSSORequest": "DisableSsoRequest", + "EnableSSO": "EnableSso", + "EnableSSOPages": "EnableSsoPages", + "EnableSSORequest": "EnableSsoRequest", + }, + shapes: map[string]string{ + "DirectoryVPCSettings": "DirectoryVpcSettings", + "DirectoryVPCSettingsDescription": "DirectoryVpcSettingsDescription", + "DisableSSOInput": "DisableSsoInput", + "DisableSSOOutput": "DisableSsoOutput", + "EnableSSOInput": "EnableSsoInput", + "EnableSSOOutput": "EnableSsoOutput", + }, + fields: map[string]string{ + "AccessURL": "AccessUrl", + "ComputerID": "ComputerId", + "ConnectIPs": "ConnectIps", + "CustomerDNSIPs": "CustomerDnsIps", + "DNSIPAddrs": "DnsIpAddrs", + "DirectoryID": "DirectoryId", + "DirectoryIDs": "DirectoryIds", + "SSOEnabled": "SsoEnabled", + "SecurityGroupID": "SecurityGroupId", + "SnapshotID": "SnapshotId", + "SnapshotIDs": "SnapshotIds", + "SubnetIDs": "SubnetIds", + "VPCID": "VpcId", + "VPCSettings": "VpcSettings", + }, + }, + "github.com/aws/aws-sdk-go/service/directoryservice/directoryserviceiface": { + operations: map[string]string{ + "DisableSSO": "DisableSso", + "DisableSSOPages": "DisableSsoPages", + "DisableSSORequest": "DisableSsoRequest", + "EnableSSO": "EnableSso", + "EnableSSOPages": "EnableSsoPages", + "EnableSSORequest": "EnableSsoRequest", + }, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/dynamodb": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "IndexARN": "IndexArn", + "LatestStreamARN": "LatestStreamArn", + "TableARN": "TableArn", + }, + }, + "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/dynamodbstreams": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "AWSRegion": "AwsRegion", + "DynamoDB": "Dynamodb", + "ExclusiveStartShardID": "ExclusiveStartShardId", + "ExclusiveStartStreamARN": "ExclusiveStartStreamArn", + "LastEvaluatedShardID": "LastEvaluatedShardId", + "LastEvaluatedStreamARN": "LastEvaluatedStreamArn", + "ParentShardID": "ParentShardId", + "ShardID": "ShardId", + "StreamARN": "StreamArn", + }, + }, + "github.com/aws/aws-sdk-go/service/dynamodbstreams/dynamodbstreamsiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/ec2": { + operations: map[string]string{ + "AcceptVPCPeeringConnection": "AcceptVpcPeeringConnection", + "AcceptVPCPeeringConnectionPages": "AcceptVpcPeeringConnectionPages", + "AcceptVPCPeeringConnectionRequest": "AcceptVpcPeeringConnectionRequest", + "AssignPrivateIPAddresses": "AssignPrivateIpAddresses", + "AssignPrivateIPAddressesPages": "AssignPrivateIpAddressesPages", + "AssignPrivateIPAddressesRequest": "AssignPrivateIpAddressesRequest", + "AssociateDHCPOptions": "AssociateDhcpOptions", + "AssociateDHCPOptionsPages": "AssociateDhcpOptionsPages", + "AssociateDHCPOptionsRequest": "AssociateDhcpOptionsRequest", + "AttachClassicLinkVPC": "AttachClassicLinkVpc", + "AttachClassicLinkVPCPages": "AttachClassicLinkVpcPages", + "AttachClassicLinkVPCRequest": "AttachClassicLinkVpcRequest", + "AttachVPNGateway": "AttachVpnGateway", + "AttachVPNGatewayPages": "AttachVpnGatewayPages", + "AttachVPNGatewayRequest": "AttachVpnGatewayRequest", + "CreateDHCPOptions": "CreateDhcpOptions", + "CreateDHCPOptionsPages": "CreateDhcpOptionsPages", + "CreateDHCPOptionsRequest": "CreateDhcpOptionsRequest", + "CreateNetworkACL": "CreateNetworkAcl", + "CreateNetworkACLEntry": "CreateNetworkAclEntry", + "CreateNetworkACLEntryPages": "CreateNetworkAclEntryPages", + "CreateNetworkACLEntryRequest": "CreateNetworkAclEntryRequest", + "CreateNetworkACLPages": "CreateNetworkAclPages", + "CreateNetworkACLRequest": "CreateNetworkAclRequest", + "CreateVPC": "CreateVpc", + "CreateVPCEndpoint": "CreateVpcEndpoint", + "CreateVPCEndpointPages": "CreateVpcEndpointPages", + "CreateVPCEndpointRequest": "CreateVpcEndpointRequest", + "CreateVPCPages": "CreateVpcPages", + "CreateVPCPeeringConnection": "CreateVpcPeeringConnection", + "CreateVPCPeeringConnectionPages": "CreateVpcPeeringConnectionPages", + "CreateVPCPeeringConnectionRequest": "CreateVpcPeeringConnectionRequest", + "CreateVPCRequest": "CreateVpcRequest", + "CreateVPNConnection": "CreateVpnConnection", + "CreateVPNConnectionPages": "CreateVpnConnectionPages", + "CreateVPNConnectionRequest": "CreateVpnConnectionRequest", + "CreateVPNConnectionRoute": "CreateVpnConnectionRoute", + "CreateVPNConnectionRoutePages": "CreateVpnConnectionRoutePages", + "CreateVPNConnectionRouteRequest": "CreateVpnConnectionRouteRequest", + "CreateVPNGateway": "CreateVpnGateway", + "CreateVPNGatewayPages": "CreateVpnGatewayPages", + "CreateVPNGatewayRequest": "CreateVpnGatewayRequest", + "DeleteDHCPOptions": "DeleteDhcpOptions", + "DeleteDHCPOptionsPages": "DeleteDhcpOptionsPages", + "DeleteDHCPOptionsRequest": "DeleteDhcpOptionsRequest", + "DeleteNetworkACL": "DeleteNetworkAcl", + "DeleteNetworkACLEntry": "DeleteNetworkAclEntry", + "DeleteNetworkACLEntryPages": "DeleteNetworkAclEntryPages", + "DeleteNetworkACLEntryRequest": "DeleteNetworkAclEntryRequest", + "DeleteNetworkACLPages": "DeleteNetworkAclPages", + "DeleteNetworkACLRequest": "DeleteNetworkAclRequest", + "DeleteVPC": "DeleteVpc", + "DeleteVPCEndpoints": "DeleteVpcEndpoints", + "DeleteVPCEndpointsPages": "DeleteVpcEndpointsPages", + "DeleteVPCEndpointsRequest": "DeleteVpcEndpointsRequest", + "DeleteVPCPages": "DeleteVpcPages", + "DeleteVPCPeeringConnection": "DeleteVpcPeeringConnection", + "DeleteVPCPeeringConnectionPages": "DeleteVpcPeeringConnectionPages", + "DeleteVPCPeeringConnectionRequest": "DeleteVpcPeeringConnectionRequest", + "DeleteVPCRequest": "DeleteVpcRequest", + "DeleteVPNConnection": "DeleteVpnConnection", + "DeleteVPNConnectionPages": "DeleteVpnConnectionPages", + "DeleteVPNConnectionRequest": "DeleteVpnConnectionRequest", + "DeleteVPNConnectionRoute": "DeleteVpnConnectionRoute", + "DeleteVPNConnectionRoutePages": "DeleteVpnConnectionRoutePages", + "DeleteVPNConnectionRouteRequest": "DeleteVpnConnectionRouteRequest", + "DeleteVPNGateway": "DeleteVpnGateway", + "DeleteVPNGatewayPages": "DeleteVpnGatewayPages", + "DeleteVPNGatewayRequest": "DeleteVpnGatewayRequest", + "DescribeDHCPOptions": "DescribeDhcpOptions", + "DescribeDHCPOptionsPages": "DescribeDhcpOptionsPages", + "DescribeDHCPOptionsRequest": "DescribeDhcpOptionsRequest", + "DescribeNetworkACLs": "DescribeNetworkAcls", + "DescribeNetworkACLsPages": "DescribeNetworkAclsPages", + "DescribeNetworkACLsRequest": "DescribeNetworkAclsRequest", + "DescribeVPCAttribute": "DescribeVpcAttribute", + "DescribeVPCAttributePages": "DescribeVpcAttributePages", + "DescribeVPCAttributeRequest": "DescribeVpcAttributeRequest", + "DescribeVPCClassicLink": "DescribeVpcClassicLink", + "DescribeVPCClassicLinkPages": "DescribeVpcClassicLinkPages", + "DescribeVPCClassicLinkRequest": "DescribeVpcClassicLinkRequest", + "DescribeVPCEndpointServices": "DescribeVpcEndpointServices", + "DescribeVPCEndpointServicesPages": "DescribeVpcEndpointServicesPages", + "DescribeVPCEndpointServicesRequest": "DescribeVpcEndpointServicesRequest", + "DescribeVPCEndpoints": "DescribeVpcEndpoints", + "DescribeVPCEndpointsPages": "DescribeVpcEndpointsPages", + "DescribeVPCEndpointsRequest": "DescribeVpcEndpointsRequest", + "DescribeVPCPeeringConnections": "DescribeVpcPeeringConnections", + "DescribeVPCPeeringConnectionsPages": "DescribeVpcPeeringConnectionsPages", + "DescribeVPCPeeringConnectionsRequest": "DescribeVpcPeeringConnectionsRequest", + "DescribeVPCs": "DescribeVpcs", + "DescribeVPCsPages": "DescribeVpcsPages", + "DescribeVPCsRequest": "DescribeVpcsRequest", + "DescribeVPNConnections": "DescribeVpnConnections", + "DescribeVPNConnectionsPages": "DescribeVpnConnectionsPages", + "DescribeVPNConnectionsRequest": "DescribeVpnConnectionsRequest", + "DescribeVPNGateways": "DescribeVpnGateways", + "DescribeVPNGatewaysPages": "DescribeVpnGatewaysPages", + "DescribeVPNGatewaysRequest": "DescribeVpnGatewaysRequest", + "DetachClassicLinkVPC": "DetachClassicLinkVpc", + "DetachClassicLinkVPCPages": "DetachClassicLinkVpcPages", + "DetachClassicLinkVPCRequest": "DetachClassicLinkVpcRequest", + "DetachVPNGateway": "DetachVpnGateway", + "DetachVPNGatewayPages": "DetachVpnGatewayPages", + "DetachVPNGatewayRequest": "DetachVpnGatewayRequest", + "DisableVGWRoutePropagation": "DisableVgwRoutePropagation", + "DisableVGWRoutePropagationPages": "DisableVgwRoutePropagationPages", + "DisableVGWRoutePropagationRequest": "DisableVgwRoutePropagationRequest", + "DisableVPCClassicLink": "DisableVpcClassicLink", + "DisableVPCClassicLinkPages": "DisableVpcClassicLinkPages", + "DisableVPCClassicLinkRequest": "DisableVpcClassicLinkRequest", + "EnableVGWRoutePropagation": "EnableVgwRoutePropagation", + "EnableVGWRoutePropagationPages": "EnableVgwRoutePropagationPages", + "EnableVGWRoutePropagationRequest": "EnableVgwRoutePropagationRequest", + "EnableVPCClassicLink": "EnableVpcClassicLink", + "EnableVPCClassicLinkPages": "EnableVpcClassicLinkPages", + "EnableVPCClassicLinkRequest": "EnableVpcClassicLinkRequest", + "ModifyVPCAttribute": "ModifyVpcAttribute", + "ModifyVPCAttributePages": "ModifyVpcAttributePages", + "ModifyVPCAttributeRequest": "ModifyVpcAttributeRequest", + "ModifyVPCEndpoint": "ModifyVpcEndpoint", + "ModifyVPCEndpointPages": "ModifyVpcEndpointPages", + "ModifyVPCEndpointRequest": "ModifyVpcEndpointRequest", + "MoveAddressToVPC": "MoveAddressToVpc", + "MoveAddressToVPCPages": "MoveAddressToVpcPages", + "MoveAddressToVPCRequest": "MoveAddressToVpcRequest", + "RejectVPCPeeringConnection": "RejectVpcPeeringConnection", + "RejectVPCPeeringConnectionPages": "RejectVpcPeeringConnectionPages", + "RejectVPCPeeringConnectionRequest": "RejectVpcPeeringConnectionRequest", + "ReplaceNetworkACLAssociation": "ReplaceNetworkAclAssociation", + "ReplaceNetworkACLAssociationPages": "ReplaceNetworkAclAssociationPages", + "ReplaceNetworkACLAssociationRequest": "ReplaceNetworkAclAssociationRequest", + "ReplaceNetworkACLEntry": "ReplaceNetworkAclEntry", + "ReplaceNetworkACLEntryPages": "ReplaceNetworkAclEntryPages", + "ReplaceNetworkACLEntryRequest": "ReplaceNetworkAclEntryRequest", + "UnassignPrivateIPAddresses": "UnassignPrivateIpAddresses", + "UnassignPrivateIPAddressesPages": "UnassignPrivateIpAddressesPages", + "UnassignPrivateIPAddressesRequest": "UnassignPrivateIpAddressesRequest", + }, + shapes: map[string]string{ + "AcceptVPCPeeringConnectionInput": "AcceptVpcPeeringConnectionInput", + "AcceptVPCPeeringConnectionOutput": "AcceptVpcPeeringConnectionOutput", + "AssignPrivateIPAddressesInput": "AssignPrivateIpAddressesInput", + "AssignPrivateIPAddressesOutput": "AssignPrivateIpAddressesOutput", + "AssociateDHCPOptionsInput": "AssociateDhcpOptionsInput", + "AssociateDHCPOptionsOutput": "AssociateDhcpOptionsOutput", + "AttachClassicLinkVPCInput": "AttachClassicLinkVpcInput", + "AttachClassicLinkVPCOutput": "AttachClassicLinkVpcOutput", + "AttachVPNGatewayInput": "AttachVpnGatewayInput", + "AttachVPNGatewayOutput": "AttachVpnGatewayOutput", + "CreateDHCPOptionsInput": "CreateDhcpOptionsInput", + "CreateDHCPOptionsOutput": "CreateDhcpOptionsOutput", + "CreateNetworkACLEntryInput": "CreateNetworkAclEntryInput", + "CreateNetworkACLEntryOutput": "CreateNetworkAclEntryOutput", + "CreateNetworkACLInput": "CreateNetworkAclInput", + "CreateNetworkACLOutput": "CreateNetworkAclOutput", + "CreateVPCEndpointInput": "CreateVpcEndpointInput", + "CreateVPCEndpointOutput": "CreateVpcEndpointOutput", + "CreateVPCInput": "CreateVpcInput", + "CreateVPCOutput": "CreateVpcOutput", + "CreateVPCPeeringConnectionInput": "CreateVpcPeeringConnectionInput", + "CreateVPCPeeringConnectionOutput": "CreateVpcPeeringConnectionOutput", + "CreateVPNConnectionInput": "CreateVpnConnectionInput", + "CreateVPNConnectionOutput": "CreateVpnConnectionOutput", + "CreateVPNConnectionRouteInput": "CreateVpnConnectionRouteInput", + "CreateVPNConnectionRouteOutput": "CreateVpnConnectionRouteOutput", + "CreateVPNGatewayInput": "CreateVpnGatewayInput", + "CreateVPNGatewayOutput": "CreateVpnGatewayOutput", + "DHCPConfiguration": "DhcpConfiguration", + "DHCPOptions": "DhcpOptions", + "DeleteDHCPOptionsInput": "DeleteDhcpOptionsInput", + "DeleteDHCPOptionsOutput": "DeleteDhcpOptionsOutput", + "DeleteNetworkACLEntryInput": "DeleteNetworkAclEntryInput", + "DeleteNetworkACLEntryOutput": "DeleteNetworkAclEntryOutput", + "DeleteNetworkACLInput": "DeleteNetworkAclInput", + "DeleteNetworkACLOutput": "DeleteNetworkAclOutput", + "DeleteVPCEndpointsInput": "DeleteVpcEndpointsInput", + "DeleteVPCEndpointsOutput": "DeleteVpcEndpointsOutput", + "DeleteVPCInput": "DeleteVpcInput", + "DeleteVPCOutput": "DeleteVpcOutput", + "DeleteVPCPeeringConnectionInput": "DeleteVpcPeeringConnectionInput", + "DeleteVPCPeeringConnectionOutput": "DeleteVpcPeeringConnectionOutput", + "DeleteVPNConnectionInput": "DeleteVpnConnectionInput", + "DeleteVPNConnectionOutput": "DeleteVpnConnectionOutput", + "DeleteVPNConnectionRouteInput": "DeleteVpnConnectionRouteInput", + "DeleteVPNConnectionRouteOutput": "DeleteVpnConnectionRouteOutput", + "DeleteVPNGatewayInput": "DeleteVpnGatewayInput", + "DeleteVPNGatewayOutput": "DeleteVpnGatewayOutput", + "DescribeDHCPOptionsInput": "DescribeDhcpOptionsInput", + "DescribeDHCPOptionsOutput": "DescribeDhcpOptionsOutput", + "DescribeNetworkACLsInput": "DescribeNetworkAclsInput", + "DescribeNetworkACLsOutput": "DescribeNetworkAclsOutput", + "DescribeVPCAttributeInput": "DescribeVpcAttributeInput", + "DescribeVPCAttributeOutput": "DescribeVpcAttributeOutput", + "DescribeVPCClassicLinkInput": "DescribeVpcClassicLinkInput", + "DescribeVPCClassicLinkOutput": "DescribeVpcClassicLinkOutput", + "DescribeVPCEndpointServicesInput": "DescribeVpcEndpointServicesInput", + "DescribeVPCEndpointServicesOutput": "DescribeVpcEndpointServicesOutput", + "DescribeVPCEndpointsInput": "DescribeVpcEndpointsInput", + "DescribeVPCEndpointsOutput": "DescribeVpcEndpointsOutput", + "DescribeVPCPeeringConnectionsInput": "DescribeVpcPeeringConnectionsInput", + "DescribeVPCPeeringConnectionsOutput": "DescribeVpcPeeringConnectionsOutput", + "DescribeVPCsInput": "DescribeVpcsInput", + "DescribeVPCsOutput": "DescribeVpcsOutput", + "DescribeVPNConnectionsInput": "DescribeVpnConnectionsInput", + "DescribeVPNConnectionsOutput": "DescribeVpnConnectionsOutput", + "DescribeVPNGatewaysInput": "DescribeVpnGatewaysInput", + "DescribeVPNGatewaysOutput": "DescribeVpnGatewaysOutput", + "DetachClassicLinkVPCInput": "DetachClassicLinkVpcInput", + "DetachClassicLinkVPCOutput": "DetachClassicLinkVpcOutput", + "DetachVPNGatewayInput": "DetachVpnGatewayInput", + "DetachVPNGatewayOutput": "DetachVpnGatewayOutput", + "DisableVGWRoutePropagationInput": "DisableVgwRoutePropagationInput", + "DisableVGWRoutePropagationOutput": "DisableVgwRoutePropagationOutput", + "DisableVPCClassicLinkInput": "DisableVpcClassicLinkInput", + "DisableVPCClassicLinkOutput": "DisableVpcClassicLinkOutput", + "EBSBlockDevice": "EbsBlockDevice", + "EBSInstanceBlockDevice": "EbsInstanceBlockDevice", + "EBSInstanceBlockDeviceSpecification": "EbsInstanceBlockDeviceSpecification", + "EnableVGWRoutePropagationInput": "EnableVgwRoutePropagationInput", + "EnableVGWRoutePropagationOutput": "EnableVgwRoutePropagationOutput", + "EnableVPCClassicLinkInput": "EnableVpcClassicLinkInput", + "EnableVPCClassicLinkOutput": "EnableVpcClassicLinkOutput", + "IAMInstanceProfile": "IamInstanceProfile", + "IAMInstanceProfileSpecification": "IamInstanceProfileSpecification", + "ICMPTypeCode": "IcmpTypeCode", + "IPPermission": "IpPermission", + "IPRange": "IpRange", + "InstancePrivateIPAddress": "InstancePrivateIpAddress", + "ModifyVPCAttributeInput": "ModifyVpcAttributeInput", + "ModifyVPCAttributeOutput": "ModifyVpcAttributeOutput", + "ModifyVPCEndpointInput": "ModifyVpcEndpointInput", + "ModifyVPCEndpointOutput": "ModifyVpcEndpointOutput", + "MoveAddressToVPCInput": "MoveAddressToVpcInput", + "MoveAddressToVPCOutput": "MoveAddressToVpcOutput", + "NetworkACL": "NetworkAcl", + "NetworkACLAssociation": "NetworkAclAssociation", + "NetworkACLEntry": "NetworkAclEntry", + "NetworkInterfacePrivateIPAddress": "NetworkInterfacePrivateIpAddress", + "NewDHCPConfiguration": "NewDhcpConfiguration", + "PrefixListID": "PrefixListId", + "PrivateIPAddressSpecification": "PrivateIpAddressSpecification", + "PropagatingVGW": "PropagatingVgw", + "RejectVPCPeeringConnectionInput": "RejectVpcPeeringConnectionInput", + "RejectVPCPeeringConnectionOutput": "RejectVpcPeeringConnectionOutput", + "ReplaceNetworkACLAssociationInput": "ReplaceNetworkAclAssociationInput", + "ReplaceNetworkACLAssociationOutput": "ReplaceNetworkAclAssociationOutput", + "ReplaceNetworkACLEntryInput": "ReplaceNetworkAclEntryInput", + "ReplaceNetworkACLEntryOutput": "ReplaceNetworkAclEntryOutput", + "ReservedInstancesID": "ReservedInstancesId", + "UnassignPrivateIPAddressesInput": "UnassignPrivateIpAddressesInput", + "UnassignPrivateIPAddressesOutput": "UnassignPrivateIpAddressesOutput", + "UserIDGroupPair": "UserIdGroupPair", + "VGWTelemetry": "VgwTelemetry", + "VPC": "Vpc", + "VPCAttachment": "VpcAttachment", + "VPCClassicLink": "VpcClassicLink", + "VPCEndpoint": "VpcEndpoint", + "VPCPeeringConnection": "VpcPeeringConnection", + "VPCPeeringConnectionStateReason": "VpcPeeringConnectionStateReason", + "VPCPeeringConnectionVPCInfo": "VpcPeeringConnectionVpcInfo", + "VPNConnection": "VpnConnection", + "VPNConnectionOptions": "VpnConnectionOptions", + "VPNConnectionOptionsSpecification": "VpnConnectionOptionsSpecification", + "VPNGateway": "VpnGateway", + "VPNStaticRoute": "VpnStaticRoute", + }, + fields: map[string]string{ + "AMILaunchIndex": "AmiLaunchIndex", + "ARN": "Arn", + "AWSAccessKeyID": "AWSAccessKeyId", + "AccepterVPCInfo": "AccepterVpcInfo", + "AddRouteTableIDs": "AddRouteTableIds", + "AllocationID": "AllocationId", + "AllocationIDs": "AllocationIds", + "AssociatePublicIPAddress": "AssociatePublicIpAddress", + "AssociationID": "AssociationId", + "AttachmentID": "AttachmentId", + "AvailableIPAddressCount": "AvailableIpAddressCount", + "BGPASN": "BgpAsn", + "BundleID": "BundleId", + "BundleIDs": "BundleIds", + "CIDRBlock": "CidrBlock", + "CIDRIP": "CidrIp", + "CIDRs": "Cidrs", + "ConversionTaskID": "ConversionTaskId", + "ConversionTaskIDs": "ConversionTaskIds", + "CustomerGatewayID": "CustomerGatewayId", + "CustomerGatewayIDs": "CustomerGatewayIds", + "DHCPConfigurations": "DhcpConfigurations", + "DHCPOptions": "DhcpOptions", + "DHCPOptionsID": "DhcpOptionsId", + "DHCPOptionsIDs": "DhcpOptionsIds", + "DefaultForAZ": "DefaultForAz", + "DeliverLogsPermissionARN": "DeliverLogsPermissionArn", + "DestinationCIDRBlock": "DestinationCidrBlock", + "DestinationPrefixListID": "DestinationPrefixListId", + "DisableAPITermination": "DisableApiTermination", + "EBS": "Ebs", + "EBSOptimized": "EbsOptimized", + "EnableDNSHostnames": "EnableDnsHostnames", + "EnableDNSSupport": "EnableDnsSupport", + "EventID": "EventId", + "ExportTaskID": "ExportTaskId", + "ExportTaskIDs": "ExportTaskIds", + "FlowLogID": "FlowLogId", + "FlowLogIDs": "FlowLogIds", + "GatewayID": "GatewayId", + "GroupID": "GroupId", + "GroupIDs": "GroupIds", + "IAMFleetRole": "IamFleetRole", + "IAMInstanceProfile": "IamInstanceProfile", + "ICMPTypeCode": "IcmpTypeCode", + "ID": "Id", + "IOPS": "Iops", + "IPAddress": "IpAddress", + "IPOwnerID": "IpOwnerId", + "IPPermissions": "IpPermissions", + "IPPermissionsEgress": "IpPermissionsEgress", + "IPProtocol": "IpProtocol", + "IPRanges": "IpRanges", + "ImageID": "ImageId", + "ImageIDs": "ImageIds", + "ImportManifestURL": "ImportManifestUrl", + "ImportTaskID": "ImportTaskId", + "ImportTaskIDs": "ImportTaskIds", + "InstanceID": "InstanceId", + "InstanceIDs": "InstanceIds", + "InstanceOwnerID": "InstanceOwnerId", + "InternetGatewayID": "InternetGatewayId", + "InternetGatewayIDs": "InternetGatewayIds", + "KMSKeyID": "KmsKeyId", + "KernelID": "KernelId", + "MACAddress": "MacAddress", + "MapPublicIPOnLaunch": "MapPublicIpOnLaunch", + "NetworkACL": "NetworkAcl", + "NetworkACLAssociationID": "NetworkAclAssociationId", + "NetworkACLID": "NetworkAclId", + "NetworkACLIDs": "NetworkAclIds", + "NetworkACLs": "NetworkAcls", + "NetworkInterfaceID": "NetworkInterfaceId", + "NetworkInterfaceIDs": "NetworkInterfaceIds", + "NetworkInterfaceOwnerID": "NetworkInterfaceOwnerId", + "NewAssociationID": "NewAssociationId", + "OutsideIPAddress": "OutsideIpAddress", + "OwnerID": "OwnerId", + "OwnerIDs": "OwnerIds", + "PeerOwnerID": "PeerOwnerId", + "PeerVPCID": "PeerVpcId", + "PrefixListID": "PrefixListId", + "PrefixListIDs": "PrefixListIds", + "PresignedURL": "PresignedUrl", + "PrivateDNSName": "PrivateDnsName", + "PrivateIPAddress": "PrivateIpAddress", + "PrivateIPAddresses": "PrivateIpAddresses", + "ProductCodeID": "ProductCodeId", + "PropagatingVGWs": "PropagatingVgws", + "PublicDNSName": "PublicDnsName", + "PublicIP": "PublicIp", + "PublicIPAddress": "PublicIpAddress", + "PublicIPs": "PublicIps", + "RAMDisk": "Ramdisk", + "RAMDiskID": "RamdiskId", + "RemoveRouteTableIDs": "RemoveRouteTableIds", + "RequesterID": "RequesterId", + "RequesterVPCInfo": "RequesterVpcInfo", + "ReservationID": "ReservationId", + "ReservedInstancesID": "ReservedInstancesId", + "ReservedInstancesIDs": "ReservedInstancesIds", + "ReservedInstancesListingID": "ReservedInstancesListingId", + "ReservedInstancesModificationID": "ReservedInstancesModificationId", + "ReservedInstancesModificationIDs": "ReservedInstancesModificationIds", + "ReservedInstancesOfferingID": "ReservedInstancesOfferingId", + "ReservedInstancesOfferingIDs": "ReservedInstancesOfferingIds", + "ResourceID": "ResourceId", + "ResourceIDs": "ResourceIds", + "RestorableByUserIDs": "RestorableByUserIds", + "RouteTableAssociationID": "RouteTableAssociationId", + "RouteTableID": "RouteTableId", + "RouteTableIDs": "RouteTableIds", + "SRIOVNetSupport": "SriovNetSupport", + "SecondaryPrivateIPAddressCount": "SecondaryPrivateIpAddressCount", + "SecurityGroupIDs": "SecurityGroupIds", + "SnapshotID": "SnapshotId", + "SnapshotIDs": "SnapshotIds", + "SourceImageID": "SourceImageId", + "SourceSecurityGroupOwnerID": "SourceSecurityGroupOwnerId", + "SourceSnapshotID": "SourceSnapshotId", + "SpotFleetRequestID": "SpotFleetRequestId", + "SpotFleetRequestIDs": "SpotFleetRequestIds", + "SpotInstanceRequestID": "SpotInstanceRequestId", + "SpotInstanceRequestIDs": "SpotInstanceRequestIds", + "SubnetID": "SubnetId", + "SubnetIDs": "SubnetIds", + "URL": "Url", + "UserID": "UserId", + "UserIDGroupPairs": "UserIdGroupPairs", + "UserIDs": "UserIds", + "VGWTelemetry": "VgwTelemetry", + "VPC": "Vpc", + "VPCAttachment": "VpcAttachment", + "VPCAttachments": "VpcAttachments", + "VPCEndpoint": "VpcEndpoint", + "VPCEndpointID": "VpcEndpointId", + "VPCEndpointIDs": "VpcEndpointIds", + "VPCEndpoints": "VpcEndpoints", + "VPCID": "VpcId", + "VPCIDs": "VpcIds", + "VPCPeeringConnection": "VpcPeeringConnection", + "VPCPeeringConnectionID": "VpcPeeringConnectionId", + "VPCPeeringConnectionIDs": "VpcPeeringConnectionIds", + "VPCPeeringConnections": "VpcPeeringConnections", + "VPCs": "Vpcs", + "VPNConnection": "VpnConnection", + "VPNConnectionID": "VpnConnectionId", + "VPNConnectionIDs": "VpnConnectionIds", + "VPNConnections": "VpnConnections", + "VPNGateway": "VpnGateway", + "VPNGatewayID": "VpnGatewayId", + "VPNGatewayIDs": "VpnGatewayIds", + "VPNGateways": "VpnGateways", + "VolumeID": "VolumeId", + "VolumeIDs": "VolumeIds", + }, + }, + "github.com/aws/aws-sdk-go/service/ec2/ec2iface": { + operations: map[string]string{ + "AcceptVPCPeeringConnection": "AcceptVpcPeeringConnection", + "AcceptVPCPeeringConnectionPages": "AcceptVpcPeeringConnectionPages", + "AcceptVPCPeeringConnectionRequest": "AcceptVpcPeeringConnectionRequest", + "AssignPrivateIPAddresses": "AssignPrivateIpAddresses", + "AssignPrivateIPAddressesPages": "AssignPrivateIpAddressesPages", + "AssignPrivateIPAddressesRequest": "AssignPrivateIpAddressesRequest", + "AssociateDHCPOptions": "AssociateDhcpOptions", + "AssociateDHCPOptionsPages": "AssociateDhcpOptionsPages", + "AssociateDHCPOptionsRequest": "AssociateDhcpOptionsRequest", + "AttachClassicLinkVPC": "AttachClassicLinkVpc", + "AttachClassicLinkVPCPages": "AttachClassicLinkVpcPages", + "AttachClassicLinkVPCRequest": "AttachClassicLinkVpcRequest", + "AttachVPNGateway": "AttachVpnGateway", + "AttachVPNGatewayPages": "AttachVpnGatewayPages", + "AttachVPNGatewayRequest": "AttachVpnGatewayRequest", + "CreateDHCPOptions": "CreateDhcpOptions", + "CreateDHCPOptionsPages": "CreateDhcpOptionsPages", + "CreateDHCPOptionsRequest": "CreateDhcpOptionsRequest", + "CreateNetworkACL": "CreateNetworkAcl", + "CreateNetworkACLEntry": "CreateNetworkAclEntry", + "CreateNetworkACLEntryPages": "CreateNetworkAclEntryPages", + "CreateNetworkACLEntryRequest": "CreateNetworkAclEntryRequest", + "CreateNetworkACLPages": "CreateNetworkAclPages", + "CreateNetworkACLRequest": "CreateNetworkAclRequest", + "CreateVPC": "CreateVpc", + "CreateVPCEndpoint": "CreateVpcEndpoint", + "CreateVPCEndpointPages": "CreateVpcEndpointPages", + "CreateVPCEndpointRequest": "CreateVpcEndpointRequest", + "CreateVPCPages": "CreateVpcPages", + "CreateVPCPeeringConnection": "CreateVpcPeeringConnection", + "CreateVPCPeeringConnectionPages": "CreateVpcPeeringConnectionPages", + "CreateVPCPeeringConnectionRequest": "CreateVpcPeeringConnectionRequest", + "CreateVPCRequest": "CreateVpcRequest", + "CreateVPNConnection": "CreateVpnConnection", + "CreateVPNConnectionPages": "CreateVpnConnectionPages", + "CreateVPNConnectionRequest": "CreateVpnConnectionRequest", + "CreateVPNConnectionRoute": "CreateVpnConnectionRoute", + "CreateVPNConnectionRoutePages": "CreateVpnConnectionRoutePages", + "CreateVPNConnectionRouteRequest": "CreateVpnConnectionRouteRequest", + "CreateVPNGateway": "CreateVpnGateway", + "CreateVPNGatewayPages": "CreateVpnGatewayPages", + "CreateVPNGatewayRequest": "CreateVpnGatewayRequest", + "DeleteDHCPOptions": "DeleteDhcpOptions", + "DeleteDHCPOptionsPages": "DeleteDhcpOptionsPages", + "DeleteDHCPOptionsRequest": "DeleteDhcpOptionsRequest", + "DeleteNetworkACL": "DeleteNetworkAcl", + "DeleteNetworkACLEntry": "DeleteNetworkAclEntry", + "DeleteNetworkACLEntryPages": "DeleteNetworkAclEntryPages", + "DeleteNetworkACLEntryRequest": "DeleteNetworkAclEntryRequest", + "DeleteNetworkACLPages": "DeleteNetworkAclPages", + "DeleteNetworkACLRequest": "DeleteNetworkAclRequest", + "DeleteVPC": "DeleteVpc", + "DeleteVPCEndpoints": "DeleteVpcEndpoints", + "DeleteVPCEndpointsPages": "DeleteVpcEndpointsPages", + "DeleteVPCEndpointsRequest": "DeleteVpcEndpointsRequest", + "DeleteVPCPages": "DeleteVpcPages", + "DeleteVPCPeeringConnection": "DeleteVpcPeeringConnection", + "DeleteVPCPeeringConnectionPages": "DeleteVpcPeeringConnectionPages", + "DeleteVPCPeeringConnectionRequest": "DeleteVpcPeeringConnectionRequest", + "DeleteVPCRequest": "DeleteVpcRequest", + "DeleteVPNConnection": "DeleteVpnConnection", + "DeleteVPNConnectionPages": "DeleteVpnConnectionPages", + "DeleteVPNConnectionRequest": "DeleteVpnConnectionRequest", + "DeleteVPNConnectionRoute": "DeleteVpnConnectionRoute", + "DeleteVPNConnectionRoutePages": "DeleteVpnConnectionRoutePages", + "DeleteVPNConnectionRouteRequest": "DeleteVpnConnectionRouteRequest", + "DeleteVPNGateway": "DeleteVpnGateway", + "DeleteVPNGatewayPages": "DeleteVpnGatewayPages", + "DeleteVPNGatewayRequest": "DeleteVpnGatewayRequest", + "DescribeDHCPOptions": "DescribeDhcpOptions", + "DescribeDHCPOptionsPages": "DescribeDhcpOptionsPages", + "DescribeDHCPOptionsRequest": "DescribeDhcpOptionsRequest", + "DescribeNetworkACLs": "DescribeNetworkAcls", + "DescribeNetworkACLsPages": "DescribeNetworkAclsPages", + "DescribeNetworkACLsRequest": "DescribeNetworkAclsRequest", + "DescribeVPCAttribute": "DescribeVpcAttribute", + "DescribeVPCAttributePages": "DescribeVpcAttributePages", + "DescribeVPCAttributeRequest": "DescribeVpcAttributeRequest", + "DescribeVPCClassicLink": "DescribeVpcClassicLink", + "DescribeVPCClassicLinkPages": "DescribeVpcClassicLinkPages", + "DescribeVPCClassicLinkRequest": "DescribeVpcClassicLinkRequest", + "DescribeVPCEndpointServices": "DescribeVpcEndpointServices", + "DescribeVPCEndpointServicesPages": "DescribeVpcEndpointServicesPages", + "DescribeVPCEndpointServicesRequest": "DescribeVpcEndpointServicesRequest", + "DescribeVPCEndpoints": "DescribeVpcEndpoints", + "DescribeVPCEndpointsPages": "DescribeVpcEndpointsPages", + "DescribeVPCEndpointsRequest": "DescribeVpcEndpointsRequest", + "DescribeVPCPeeringConnections": "DescribeVpcPeeringConnections", + "DescribeVPCPeeringConnectionsPages": "DescribeVpcPeeringConnectionsPages", + "DescribeVPCPeeringConnectionsRequest": "DescribeVpcPeeringConnectionsRequest", + "DescribeVPCs": "DescribeVpcs", + "DescribeVPCsPages": "DescribeVpcsPages", + "DescribeVPCsRequest": "DescribeVpcsRequest", + "DescribeVPNConnections": "DescribeVpnConnections", + "DescribeVPNConnectionsPages": "DescribeVpnConnectionsPages", + "DescribeVPNConnectionsRequest": "DescribeVpnConnectionsRequest", + "DescribeVPNGateways": "DescribeVpnGateways", + "DescribeVPNGatewaysPages": "DescribeVpnGatewaysPages", + "DescribeVPNGatewaysRequest": "DescribeVpnGatewaysRequest", + "DetachClassicLinkVPC": "DetachClassicLinkVpc", + "DetachClassicLinkVPCPages": "DetachClassicLinkVpcPages", + "DetachClassicLinkVPCRequest": "DetachClassicLinkVpcRequest", + "DetachVPNGateway": "DetachVpnGateway", + "DetachVPNGatewayPages": "DetachVpnGatewayPages", + "DetachVPNGatewayRequest": "DetachVpnGatewayRequest", + "DisableVGWRoutePropagation": "DisableVgwRoutePropagation", + "DisableVGWRoutePropagationPages": "DisableVgwRoutePropagationPages", + "DisableVGWRoutePropagationRequest": "DisableVgwRoutePropagationRequest", + "DisableVPCClassicLink": "DisableVpcClassicLink", + "DisableVPCClassicLinkPages": "DisableVpcClassicLinkPages", + "DisableVPCClassicLinkRequest": "DisableVpcClassicLinkRequest", + "EnableVGWRoutePropagation": "EnableVgwRoutePropagation", + "EnableVGWRoutePropagationPages": "EnableVgwRoutePropagationPages", + "EnableVGWRoutePropagationRequest": "EnableVgwRoutePropagationRequest", + "EnableVPCClassicLink": "EnableVpcClassicLink", + "EnableVPCClassicLinkPages": "EnableVpcClassicLinkPages", + "EnableVPCClassicLinkRequest": "EnableVpcClassicLinkRequest", + "ModifyVPCAttribute": "ModifyVpcAttribute", + "ModifyVPCAttributePages": "ModifyVpcAttributePages", + "ModifyVPCAttributeRequest": "ModifyVpcAttributeRequest", + "ModifyVPCEndpoint": "ModifyVpcEndpoint", + "ModifyVPCEndpointPages": "ModifyVpcEndpointPages", + "ModifyVPCEndpointRequest": "ModifyVpcEndpointRequest", + "MoveAddressToVPC": "MoveAddressToVpc", + "MoveAddressToVPCPages": "MoveAddressToVpcPages", + "MoveAddressToVPCRequest": "MoveAddressToVpcRequest", + "RejectVPCPeeringConnection": "RejectVpcPeeringConnection", + "RejectVPCPeeringConnectionPages": "RejectVpcPeeringConnectionPages", + "RejectVPCPeeringConnectionRequest": "RejectVpcPeeringConnectionRequest", + "ReplaceNetworkACLAssociation": "ReplaceNetworkAclAssociation", + "ReplaceNetworkACLAssociationPages": "ReplaceNetworkAclAssociationPages", + "ReplaceNetworkACLAssociationRequest": "ReplaceNetworkAclAssociationRequest", + "ReplaceNetworkACLEntry": "ReplaceNetworkAclEntry", + "ReplaceNetworkACLEntryPages": "ReplaceNetworkAclEntryPages", + "ReplaceNetworkACLEntryRequest": "ReplaceNetworkAclEntryRequest", + "UnassignPrivateIPAddresses": "UnassignPrivateIpAddresses", + "UnassignPrivateIPAddressesPages": "UnassignPrivateIpAddressesPages", + "UnassignPrivateIPAddressesRequest": "UnassignPrivateIpAddressesRequest", + }, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/ecs": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "ARN": "Arn", + "CPU": "Cpu", + "ClusterARN": "ClusterArn", + "ClusterARNs": "ClusterArns", + "ContainerARN": "ContainerArn", + "ContainerInstanceARN": "ContainerInstanceArn", + "ContainerInstanceARNs": "ContainerInstanceArns", + "EC2InstanceID": "Ec2InstanceId", + "ID": "Id", + "RoleARN": "RoleArn", + "ServiceARN": "ServiceArn", + "ServiceARNs": "ServiceArns", + "TaskARN": "TaskArn", + "TaskARNs": "TaskArns", + "TaskDefinitionARN": "TaskDefinitionArn", + "TaskDefinitionARNs": "TaskDefinitionArns", + }, + }, + "github.com/aws/aws-sdk-go/service/ecs/ecsiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/efs": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "FileSystemID": "FileSystemId", + "IPAddress": "IpAddress", + "MountTargetID": "MountTargetId", + "NetworkInterfaceID": "NetworkInterfaceId", + "OwnerID": "OwnerId", + "SubnetID": "SubnetId", + }, + }, + "github.com/aws/aws-sdk-go/service/efs/efsiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/elasticache": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "CacheClusterID": "CacheClusterId", + "CacheNodeID": "CacheNodeId", + "CacheNodeIDsToReboot": "CacheNodeIdsToReboot", + "CacheNodeIDsToRemove": "CacheNodeIdsToRemove", + "EC2SecurityGroupOwnerID": "EC2SecurityGroupOwnerId", + "NodeGroupID": "NodeGroupId", + "NotificationTopicARN": "NotificationTopicArn", + "OwnerID": "OwnerId", + "PrimaryClusterID": "PrimaryClusterId", + "ReplicationGroupID": "ReplicationGroupId", + "ReservedCacheNodeID": "ReservedCacheNodeId", + "ReservedCacheNodesOfferingID": "ReservedCacheNodesOfferingId", + "SecurityGroupID": "SecurityGroupId", + "SecurityGroupIDs": "SecurityGroupIds", + "SnapshotARNs": "SnapshotArns", + "SnapshottingClusterID": "SnapshottingClusterId", + "SourceCacheNodeID": "SourceCacheNodeId", + "SubnetIDs": "SubnetIds", + "TopicARN": "TopicArn", + "VPCID": "VpcId", + }, + }, + "github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/elasticbeanstalk": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "DestinationEnvironmentID": "DestinationEnvironmentId", + "EC2InstanceID": "Ec2InstanceId", + "EnvironmentID": "EnvironmentId", + "EnvironmentIDs": "EnvironmentIds", + "ID": "Id", + "OK": "Ok", + "RequestID": "RequestId", + "SourceEnvironmentID": "SourceEnvironmentId", + }, + }, + "github.com/aws/aws-sdk-go/service/elasticbeanstalk/elasticbeanstalkiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/elastictranscoder": { + operations: map[string]string{}, + shapes: map[string]string{ + "HLSContentProtection": "HlsContentProtection", + "PlayReadyDRM": "PlayReadyDrm", + }, + fields: map[string]string{ + "ARN": "Arn", + "AWSKMSKeyARN": "AwsKmsKeyArn", + "HLSContentProtection": "HlsContentProtection", + "ID": "Id", + "KeyID": "KeyId", + "KeyMD5": "KeyMd5", + "LicenseAcquisitionURL": "LicenseAcquisitionUrl", + "PipelineID": "PipelineId", + "PlayReadyDRM": "PlayReadyDrm", + "PresetID": "PresetId", + "PresetWatermarkID": "PresetWatermarkId", + }, + }, + "github.com/aws/aws-sdk-go/service/elastictranscoder/elastictranscoderiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/elb": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "InstanceID": "InstanceId", + "SSLCertificateID": "SSLCertificateId", + "VPCID": "VPCId", + }, + }, + "github.com/aws/aws-sdk-go/service/elb/elbiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/emr": { + operations: map[string]string{}, + shapes: map[string]string{ + "EC2InstanceAttributes": "Ec2InstanceAttributes", + "HadoopJARStepConfig": "HadoopJarStepConfig", + }, + fields: map[string]string{ + "AMIVersion": "AmiVersion", + "ClusterID": "ClusterId", + "EC2AvailabilityZone": "Ec2AvailabilityZone", + "EC2InstanceAttributes": "Ec2InstanceAttributes", + "EC2InstanceID": "Ec2InstanceId", + "EC2InstanceIDsToTerminate": "EC2InstanceIdsToTerminate", + "EC2KeyName": "Ec2KeyName", + "EC2SubnetID": "Ec2SubnetId", + "EMRManagedMasterSecurityGroup": "EmrManagedMasterSecurityGroup", + "EMRManagedSlaveSecurityGroup": "EmrManagedSlaveSecurityGroup", + "HadoopJARStep": "HadoopJarStep", + "IAMInstanceProfile": "IamInstanceProfile", + "ID": "Id", + "InstanceGroupID": "InstanceGroupId", + "InstanceGroupIDs": "InstanceGroupIds", + "JAR": "Jar", + "JobFlowID": "JobFlowId", + "JobFlowIDs": "JobFlowIds", + "LogURI": "LogUri", + "MasterInstanceID": "MasterInstanceId", + "MasterPublicDNSName": "MasterPublicDnsName", + "PrivateDNSName": "PrivateDnsName", + "PrivateIPAddress": "PrivateIpAddress", + "PublicDNSName": "PublicDnsName", + "PublicIPAddress": "PublicIpAddress", + "RequestedAMIVersion": "RequestedAmiVersion", + "ResourceID": "ResourceId", + "RunningAMIVersion": "RunningAmiVersion", + "StepID": "StepId", + "StepIDs": "StepIds", + }, + }, + "github.com/aws/aws-sdk-go/service/emr/emriface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/glacier": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "AccountID": "AccountId", + "ArchiveID": "ArchiveId", + "JobID": "JobId", + "LockID": "LockId", + "MultipartUploadID": "MultipartUploadId", + "UploadID": "UploadId", + }, + }, + "github.com/aws/aws-sdk-go/service/glacier/glacieriface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/iam": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "ARN": "Arn", + "AccessKeyID": "AccessKeyId", + "CertificateID": "CertificateId", + "DefaultVersionID": "DefaultVersionId", + "GroupID": "GroupId", + "InstanceProfileID": "InstanceProfileId", + "OpenIDConnectProviderARN": "OpenIDConnectProviderArn", + "PolicyARN": "PolicyArn", + "PolicyID": "PolicyId", + "RoleID": "RoleId", + "SAMLProviderARN": "SAMLProviderArn", + "SSHPublicKeyID": "SSHPublicKeyId", + "ServerCertificateID": "ServerCertificateId", + "URL": "Url", + "UserID": "UserId", + "VersionID": "VersionId", + }, + }, + "github.com/aws/aws-sdk-go/service/iam/iamiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/kinesis": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "AdjacentParentShardID": "AdjacentParentShardId", + "ExclusiveStartShardID": "ExclusiveStartShardId", + "ParentShardID": "ParentShardId", + "ShardID": "ShardId", + }, + }, + "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/kms": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "ARN": "Arn", + "AWSAccountID": "AWSAccountId", + "AliasARN": "AliasArn", + "DestinationKeyID": "DestinationKeyId", + "GrantID": "GrantId", + "KeyARN": "KeyArn", + "KeyID": "KeyId", + "SourceKeyID": "SourceKeyId", + "TargetKeyID": "TargetKeyId", + }, + }, + "github.com/aws/aws-sdk-go/service/kms/kmsiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/lambda": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "EventSourceARN": "EventSourceArn", + "FunctionARN": "FunctionArn", + "SourceARN": "SourceArn", + "StatementID": "StatementId", + }, + }, + "github.com/aws/aws-sdk-go/service/lambda/lambdaiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/machinelearning": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "BatchPredictionDataSourceID": "BatchPredictionDataSourceId", + "BatchPredictionID": "BatchPredictionId", + "CreatedByIAMUser": "CreatedByIamUser", + "DataPipelineID": "DataPipelineId", + "DataSchemaURI": "DataSchemaUri", + "DataSourceID": "DataSourceId", + "EndpointURL": "EndpointUrl", + "EvaluationDataSourceID": "EvaluationDataSourceId", + "EvaluationID": "EvaluationId", + "LogURI": "LogUri", + "MLModelID": "MLModelId", + "OutputURI": "OutputUri", + "RecipeURI": "RecipeUri", + "SecurityGroupIDs": "SecurityGroupIds", + "SelectSQLQuery": "SelectSqlQuery", + "SubnetID": "SubnetId", + "TrainingDataSourceID": "TrainingDataSourceId", + }, + }, + "github.com/aws/aws-sdk-go/service/machinelearning/machinelearningiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/mobileanalytics": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "ID": "Id", + }, + }, + "github.com/aws/aws-sdk-go/service/mobileanalytics/mobileanalyticsiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/opsworks": { + operations: map[string]string{ + "AssociateElasticIP": "AssociateElasticIp", + "AssociateElasticIPPages": "AssociateElasticIpPages", + "AssociateElasticIPRequest": "AssociateElasticIpRequest", + "DeregisterElasticIP": "DeregisterElasticIp", + "DeregisterElasticIPPages": "DeregisterElasticIpPages", + "DeregisterElasticIPRequest": "DeregisterElasticIpRequest", + "DeregisterRDSDBInstance": "DeregisterRdsDbInstance", + "DeregisterRDSDBInstancePages": "DeregisterRdsDbInstancePages", + "DeregisterRDSDBInstanceRequest": "DeregisterRdsDbInstanceRequest", + "DescribeElasticIPs": "DescribeElasticIps", + "DescribeElasticIPsPages": "DescribeElasticIpsPages", + "DescribeElasticIPsRequest": "DescribeElasticIpsRequest", + "DescribeRAIDArrays": "DescribeRaidArrays", + "DescribeRAIDArraysPages": "DescribeRaidArraysPages", + "DescribeRAIDArraysRequest": "DescribeRaidArraysRequest", + "DescribeRDSDBInstances": "DescribeRdsDbInstances", + "DescribeRDSDBInstancesPages": "DescribeRdsDbInstancesPages", + "DescribeRDSDBInstancesRequest": "DescribeRdsDbInstancesRequest", + "DisassociateElasticIP": "DisassociateElasticIp", + "DisassociateElasticIPPages": "DisassociateElasticIpPages", + "DisassociateElasticIPRequest": "DisassociateElasticIpRequest", + "RegisterElasticIP": "RegisterElasticIp", + "RegisterElasticIPPages": "RegisterElasticIpPages", + "RegisterElasticIPRequest": "RegisterElasticIpRequest", + "RegisterRDSDBInstance": "RegisterRdsDbInstance", + "RegisterRDSDBInstancePages": "RegisterRdsDbInstancePages", + "RegisterRDSDBInstanceRequest": "RegisterRdsDbInstanceRequest", + "UpdateElasticIP": "UpdateElasticIp", + "UpdateElasticIPPages": "UpdateElasticIpPages", + "UpdateElasticIPRequest": "UpdateElasticIpRequest", + "UpdateRDSDBInstance": "UpdateRdsDbInstance", + "UpdateRDSDBInstancePages": "UpdateRdsDbInstancePages", + "UpdateRDSDBInstanceRequest": "UpdateRdsDbInstanceRequest", + }, + shapes: map[string]string{ + "AssociateElasticIPInput": "AssociateElasticIpInput", + "AssociateElasticIPOutput": "AssociateElasticIpOutput", + "DeregisterElasticIPInput": "DeregisterElasticIpInput", + "DeregisterElasticIPOutput": "DeregisterElasticIpOutput", + "DeregisterRDSDBInstanceInput": "DeregisterRdsDbInstanceInput", + "DeregisterRDSDBInstanceOutput": "DeregisterRdsDbInstanceOutput", + "DescribeElasticIPsInput": "DescribeElasticIpsInput", + "DescribeElasticIPsOutput": "DescribeElasticIpsOutput", + "DescribeRAIDArraysInput": "DescribeRaidArraysInput", + "DescribeRAIDArraysOutput": "DescribeRaidArraysOutput", + "DescribeRDSDBInstancesInput": "DescribeRdsDbInstancesInput", + "DescribeRDSDBInstancesOutput": "DescribeRdsDbInstancesOutput", + "DisassociateElasticIPInput": "DisassociateElasticIpInput", + "DisassociateElasticIPOutput": "DisassociateElasticIpOutput", + "EBSBlockDevice": "EbsBlockDevice", + "ElasticIP": "ElasticIp", + "RAIDArray": "RaidArray", + "RDSDBInstance": "RdsDbInstance", + "RegisterElasticIPInput": "RegisterElasticIpInput", + "RegisterElasticIPOutput": "RegisterElasticIpOutput", + "RegisterRDSDBInstanceInput": "RegisterRdsDbInstanceInput", + "RegisterRDSDBInstanceOutput": "RegisterRdsDbInstanceOutput", + "SSLConfiguration": "SslConfiguration", + "UpdateElasticIPInput": "UpdateElasticIpInput", + "UpdateElasticIPOutput": "UpdateElasticIpOutput", + "UpdateRDSDBInstanceInput": "UpdateRdsDbInstanceInput", + "UpdateRDSDBInstanceOutput": "UpdateRdsDbInstanceOutput", + }, + fields: map[string]string{ + "AMIID": "AmiId", + "ARN": "Arn", + "AgentInstallerURL": "AgentInstallerUrl", + "AllowSSH": "AllowSsh", + "AppID": "AppId", + "AppIDs": "AppIds", + "AutoAssignElasticIPs": "AutoAssignElasticIps", + "AutoAssignPublicIPs": "AutoAssignPublicIps", + "CPUThreshold": "CpuThreshold", + "CloneAppIDs": "CloneAppIds", + "CommandID": "CommandId", + "CommandIDs": "CommandIds", + "CustomInstanceProfileARN": "CustomInstanceProfileArn", + "CustomJSON": "CustomJson", + "CustomSecurityGroupIDs": "CustomSecurityGroupIds", + "DBInstanceIdentifier": "DbInstanceIdentifier", + "DBPassword": "DbPassword", + "DBUser": "DbUser", + "DNSName": "DnsName", + "DefaultInstanceProfileARN": "DefaultInstanceProfileArn", + "DefaultSSHKeyName": "DefaultSshKeyName", + "DefaultSubnetID": "DefaultSubnetId", + "DelayUntilELBConnectionsDrained": "DelayUntilElbConnectionsDrained", + "DeleteElasticIP": "DeleteElasticIp", + "DeploymentID": "DeploymentId", + "DeploymentIDs": "DeploymentIds", + "EBS": "Ebs", + "EBSOptimized": "EbsOptimized", + "EC2InstanceID": "Ec2InstanceId", + "EC2InstanceIDs": "Ec2InstanceIds", + "EC2VolumeID": "Ec2VolumeId", + "EcsClusterARN": "EcsClusterArn", + "EcsClusterARNs": "EcsClusterArns", + "EcsContainerInstanceARN": "EcsContainerInstanceArn", + "ElasticIP": "ElasticIp", + "ElasticIPs": "ElasticIps", + "EnableSSL": "EnableSsl", + "IAMUserARN": "IamUserArn", + "IAMUserARNs": "IamUserArns", + "IOPS": "Iops", + "IP": "Ip", + "IPs": "Ips", + "InstanceID": "InstanceId", + "InstanceIDs": "InstanceIds", + "InstanceProfileARN": "InstanceProfileArn", + "LastServiceErrorID": "LastServiceErrorId", + "LayerID": "LayerId", + "LayerIDs": "LayerIds", + "LogURL": "LogUrl", + "MissingOnRDS": "MissingOnRds", + "PrivateDNS": "PrivateDns", + "PrivateIP": "PrivateIp", + "PublicDNS": "PublicDns", + "PublicIP": "PublicIp", + "RAIDArrayID": "RaidArrayId", + "RAIDArrayIDs": "RaidArrayIds", + "RAIDArrays": "RaidArrays", + "RAIDLevel": "RaidLevel", + "RDSDBInstanceARN": "RdsDbInstanceArn", + "RDSDBInstanceARNs": "RdsDbInstanceArns", + "RDSDBInstances": "RdsDbInstances", + "RSAPublicKey": "RsaPublicKey", + "RSAPublicKeyFingerprint": "RsaPublicKeyFingerprint", + "RootDeviceVolumeID": "RootDeviceVolumeId", + "SSHHostDSAKeyFingerprint": "SshHostDsaKeyFingerprint", + "SSHHostRSAKeyFingerprint": "SshHostRsaKeyFingerprint", + "SSHKey": "SshKey", + "SSHKeyName": "SshKeyName", + "SSHPublicKey": "SshPublicKey", + "SSHUsername": "SshUsername", + "SSLConfiguration": "SslConfiguration", + "SecurityGroupIDs": "SecurityGroupIds", + "ServiceErrorID": "ServiceErrorId", + "ServiceErrorIDs": "ServiceErrorIds", + "ServiceRoleARN": "ServiceRoleArn", + "SnapshotID": "SnapshotId", + "SourceStackID": "SourceStackId", + "StackID": "StackId", + "StackIDs": "StackIds", + "SubnetID": "SubnetId", + "SubnetIDs": "SubnetIds", + "URL": "Url", + "UseEBSOptimizedInstances": "UseEbsOptimizedInstances", + "UseOpsWorksSecurityGroups": "UseOpsworksSecurityGroups", + "VPCID": "VpcId", + "VolumeID": "VolumeId", + "VolumeIDs": "VolumeIds", + }, + }, + "github.com/aws/aws-sdk-go/service/opsworks/opsworksiface": { + operations: map[string]string{ + "AssociateElasticIP": "AssociateElasticIp", + "AssociateElasticIPPages": "AssociateElasticIpPages", + "AssociateElasticIPRequest": "AssociateElasticIpRequest", + "DeregisterElasticIP": "DeregisterElasticIp", + "DeregisterElasticIPPages": "DeregisterElasticIpPages", + "DeregisterElasticIPRequest": "DeregisterElasticIpRequest", + "DeregisterRDSDBInstance": "DeregisterRdsDbInstance", + "DeregisterRDSDBInstancePages": "DeregisterRdsDbInstancePages", + "DeregisterRDSDBInstanceRequest": "DeregisterRdsDbInstanceRequest", + "DescribeElasticIPs": "DescribeElasticIps", + "DescribeElasticIPsPages": "DescribeElasticIpsPages", + "DescribeElasticIPsRequest": "DescribeElasticIpsRequest", + "DescribeRAIDArrays": "DescribeRaidArrays", + "DescribeRAIDArraysPages": "DescribeRaidArraysPages", + "DescribeRAIDArraysRequest": "DescribeRaidArraysRequest", + "DescribeRDSDBInstances": "DescribeRdsDbInstances", + "DescribeRDSDBInstancesPages": "DescribeRdsDbInstancesPages", + "DescribeRDSDBInstancesRequest": "DescribeRdsDbInstancesRequest", + "DisassociateElasticIP": "DisassociateElasticIp", + "DisassociateElasticIPPages": "DisassociateElasticIpPages", + "DisassociateElasticIPRequest": "DisassociateElasticIpRequest", + "RegisterElasticIP": "RegisterElasticIp", + "RegisterElasticIPPages": "RegisterElasticIpPages", + "RegisterElasticIPRequest": "RegisterElasticIpRequest", + "RegisterRDSDBInstance": "RegisterRdsDbInstance", + "RegisterRDSDBInstancePages": "RegisterRdsDbInstancePages", + "RegisterRDSDBInstanceRequest": "RegisterRdsDbInstanceRequest", + "UpdateElasticIP": "UpdateElasticIp", + "UpdateElasticIPPages": "UpdateElasticIpPages", + "UpdateElasticIPRequest": "UpdateElasticIpRequest", + "UpdateRDSDBInstance": "UpdateRdsDbInstance", + "UpdateRDSDBInstancePages": "UpdateRdsDbInstancePages", + "UpdateRDSDBInstanceRequest": "UpdateRdsDbInstanceRequest", + }, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/rds": { + operations: map[string]string{}, + shapes: map[string]string{ + "VPCSecurityGroupMembership": "VpcSecurityGroupMembership", + }, + fields: map[string]string{ + "AllowsVPCAndNonVPCInstanceMemberships": "AllowsVpcAndNonVpcInstanceMemberships", + "CustSubscriptionID": "CustSubscriptionId", + "CustomerAWSID": "CustomerAwsId", + "DBIResourceID": "DbiResourceId", + "DBInstancePort": "DbInstancePort", + "EC2SecurityGroupID": "EC2SecurityGroupId", + "EC2SecurityGroupOwnerID": "EC2SecurityGroupOwnerId", + "IOPS": "Iops", + "KMSKeyID": "KmsKeyId", + "OwnerID": "OwnerId", + "ReservedDBInstanceID": "ReservedDBInstanceId", + "ReservedDBInstancesOfferingID": "ReservedDBInstancesOfferingId", + "SNSTopicARN": "SnsTopicArn", + "SourceIDs": "SourceIds", + "SourceIDsList": "SourceIdsList", + "SubnetIDs": "SubnetIds", + "SupportsIOPS": "SupportsIops", + "TDECredentialARN": "TdeCredentialArn", + "TDECredentialPassword": "TdeCredentialPassword", + "VPC": "Vpc", + "VPCID": "VpcId", + "VPCSecurityGroupID": "VpcSecurityGroupId", + "VPCSecurityGroupIDs": "VpcSecurityGroupIds", + "VPCSecurityGroupMemberships": "VpcSecurityGroupMemberships", + "VPCSecurityGroups": "VpcSecurityGroups", + }, + }, + "github.com/aws/aws-sdk-go/service/rds/rdsiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/redshift": { + operations: map[string]string{ + "CreateHSMClientCertificate": "CreateHsmClientCertificate", + "CreateHSMClientCertificatePages": "CreateHsmClientCertificatePages", + "CreateHSMClientCertificateRequest": "CreateHsmClientCertificateRequest", + "CreateHSMConfiguration": "CreateHsmConfiguration", + "CreateHSMConfigurationPages": "CreateHsmConfigurationPages", + "CreateHSMConfigurationRequest": "CreateHsmConfigurationRequest", + "DeleteHSMClientCertificate": "DeleteHsmClientCertificate", + "DeleteHSMClientCertificatePages": "DeleteHsmClientCertificatePages", + "DeleteHSMClientCertificateRequest": "DeleteHsmClientCertificateRequest", + "DeleteHSMConfiguration": "DeleteHsmConfiguration", + "DeleteHSMConfigurationPages": "DeleteHsmConfigurationPages", + "DeleteHSMConfigurationRequest": "DeleteHsmConfigurationRequest", + "DescribeHSMClientCertificates": "DescribeHsmClientCertificates", + "DescribeHSMClientCertificatesPages": "DescribeHsmClientCertificatesPages", + "DescribeHSMClientCertificatesRequest": "DescribeHsmClientCertificatesRequest", + "DescribeHSMConfigurations": "DescribeHsmConfigurations", + "DescribeHSMConfigurationsPages": "DescribeHsmConfigurationsPages", + "DescribeHSMConfigurationsRequest": "DescribeHsmConfigurationsRequest", + }, + shapes: map[string]string{ + "CreateHSMClientCertificateInput": "CreateHsmClientCertificateInput", + "CreateHSMClientCertificateOutput": "CreateHsmClientCertificateOutput", + "CreateHSMConfigurationInput": "CreateHsmConfigurationInput", + "CreateHSMConfigurationOutput": "CreateHsmConfigurationOutput", + "DeleteHSMClientCertificateInput": "DeleteHsmClientCertificateInput", + "DeleteHSMClientCertificateOutput": "DeleteHsmClientCertificateOutput", + "DeleteHSMConfigurationInput": "DeleteHsmConfigurationInput", + "DeleteHSMConfigurationOutput": "DeleteHsmConfigurationOutput", + "DescribeHSMClientCertificatesInput": "DescribeHsmClientCertificatesInput", + "DescribeHSMClientCertificatesOutput": "DescribeHsmClientCertificatesOutput", + "DescribeHSMConfigurationsInput": "DescribeHsmConfigurationsInput", + "DescribeHSMConfigurationsOutput": "DescribeHsmConfigurationsOutput", + "ElasticIPStatus": "ElasticIpStatus", + "HSMClientCertificate": "HsmClientCertificate", + "HSMConfiguration": "HsmConfiguration", + "HSMStatus": "HsmStatus", + "VPCSecurityGroupMembership": "VpcSecurityGroupMembership", + }, + fields: map[string]string{ + "AccountID": "AccountId", + "CustSubscriptionID": "CustSubscriptionId", + "CustomerAWSID": "CustomerAwsId", + "EC2SecurityGroupOwnerID": "EC2SecurityGroupOwnerId", + "ElasticIP": "ElasticIp", + "ElasticIPStatus": "ElasticIpStatus", + "EventID": "EventId", + "HSMClientCertificate": "HsmClientCertificate", + "HSMClientCertificateIdentifier": "HsmClientCertificateIdentifier", + "HSMClientCertificatePublicKey": "HsmClientCertificatePublicKey", + "HSMClientCertificates": "HsmClientCertificates", + "HSMConfiguration": "HsmConfiguration", + "HSMConfigurationIdentifier": "HsmConfigurationIdentifier", + "HSMConfigurations": "HsmConfigurations", + "HSMIPAddress": "HsmIpAddress", + "HSMPartitionName": "HsmPartitionName", + "HSMPartitionPassword": "HsmPartitionPassword", + "HSMServerPublicCertificate": "HsmServerPublicCertificate", + "HSMStatus": "HsmStatus", + "KMSKeyID": "KmsKeyId", + "ReservedNodeID": "ReservedNodeId", + "ReservedNodeOfferingID": "ReservedNodeOfferingId", + "SNSTopicARN": "SnsTopicArn", + "SourceIDs": "SourceIds", + "SourceIDsList": "SourceIdsList", + "SubnetIDs": "SubnetIds", + "VPCID": "VpcId", + "VPCSecurityGroupID": "VpcSecurityGroupId", + "VPCSecurityGroupIDs": "VpcSecurityGroupIds", + "VPCSecurityGroups": "VpcSecurityGroups", + }, + }, + "github.com/aws/aws-sdk-go/service/redshift/redshiftiface": { + operations: map[string]string{ + "CreateHSMClientCertificate": "CreateHsmClientCertificate", + "CreateHSMClientCertificatePages": "CreateHsmClientCertificatePages", + "CreateHSMClientCertificateRequest": "CreateHsmClientCertificateRequest", + "CreateHSMConfiguration": "CreateHsmConfiguration", + "CreateHSMConfigurationPages": "CreateHsmConfigurationPages", + "CreateHSMConfigurationRequest": "CreateHsmConfigurationRequest", + "DeleteHSMClientCertificate": "DeleteHsmClientCertificate", + "DeleteHSMClientCertificatePages": "DeleteHsmClientCertificatePages", + "DeleteHSMClientCertificateRequest": "DeleteHsmClientCertificateRequest", + "DeleteHSMConfiguration": "DeleteHsmConfiguration", + "DeleteHSMConfigurationPages": "DeleteHsmConfigurationPages", + "DeleteHSMConfigurationRequest": "DeleteHsmConfigurationRequest", + "DescribeHSMClientCertificates": "DescribeHsmClientCertificates", + "DescribeHSMClientCertificatesPages": "DescribeHsmClientCertificatesPages", + "DescribeHSMClientCertificatesRequest": "DescribeHsmClientCertificatesRequest", + "DescribeHSMConfigurations": "DescribeHsmConfigurations", + "DescribeHSMConfigurationsPages": "DescribeHsmConfigurationsPages", + "DescribeHSMConfigurationsRequest": "DescribeHsmConfigurationsRequest", + }, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/route53": { + operations: map[string]string{ + "GetCheckerIPRanges": "GetCheckerIpRanges", + "GetCheckerIPRangesPages": "GetCheckerIpRangesPages", + "GetCheckerIPRangesRequest": "GetCheckerIpRangesRequest", + }, + shapes: map[string]string{ + "GetCheckerIPRangesInput": "GetCheckerIpRangesInput", + "GetCheckerIPRangesOutput": "GetCheckerIpRangesOutput", + }, + fields: map[string]string{ + "CheckerIPRanges": "CheckerIpRanges", + "DelegationSetID": "DelegationSetId", + "HealthCheckID": "HealthCheckId", + "HostedZoneID": "HostedZoneId", + "ID": "Id", + "NextHostedZoneID": "NextHostedZoneId", + "ResourceID": "ResourceId", + "ResourceIDs": "ResourceIds", + "VPCID": "VPCId", + }, + }, + "github.com/aws/aws-sdk-go/service/route53/route53iface": { + operations: map[string]string{ + "GetCheckerIPRanges": "GetCheckerIpRanges", + "GetCheckerIPRangesPages": "GetCheckerIpRangesPages", + "GetCheckerIPRangesRequest": "GetCheckerIpRangesRequest", + }, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/route53domains": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "DNSSec": "DnsSec", + "GlueIPs": "GlueIps", + "IDNLangCode": "IdnLangCode", + "OperationID": "OperationId", + "RegistrarURL": "RegistrarUrl", + "RegistryDomainID": "RegistryDomainId", + }, + }, + "github.com/aws/aws-sdk-go/service/route53domains/route53domainsiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/s3": { + operations: map[string]string{ + "DeleteBucketCORS": "DeleteBucketCors", + "DeleteBucketCORSPages": "DeleteBucketCorsPages", + "DeleteBucketCORSRequest": "DeleteBucketCorsRequest", + "GetBucketACL": "GetBucketAcl", + "GetBucketACLPages": "GetBucketAclPages", + "GetBucketACLRequest": "GetBucketAclRequest", + "GetBucketCORS": "GetBucketCors", + "GetBucketCORSPages": "GetBucketCorsPages", + "GetBucketCORSRequest": "GetBucketCorsRequest", + "GetObjectACL": "GetObjectAcl", + "GetObjectACLPages": "GetObjectAclPages", + "GetObjectACLRequest": "GetObjectAclRequest", + "PutBucketACL": "PutBucketAcl", + "PutBucketACLPages": "PutBucketAclPages", + "PutBucketACLRequest": "PutBucketAclRequest", + "PutBucketCORS": "PutBucketCors", + "PutBucketCORSPages": "PutBucketCorsPages", + "PutBucketCORSRequest": "PutBucketCorsRequest", + "PutObjectACL": "PutObjectAcl", + "PutObjectACLPages": "PutObjectAclPages", + "PutObjectACLRequest": "PutObjectAclRequest", + }, + shapes: map[string]string{ + "DeleteBucketCORSInput": "DeleteBucketCorsInput", + "DeleteBucketCORSOutput": "DeleteBucketCorsOutput", + "GetBucketACLInput": "GetBucketAclInput", + "GetBucketACLOutput": "GetBucketAclOutput", + "GetBucketCORSInput": "GetBucketCorsInput", + "GetBucketCORSOutput": "GetBucketCorsOutput", + "GetObjectACLInput": "GetObjectAclInput", + "GetObjectACLOutput": "GetObjectAclOutput", + "PutBucketACLInput": "PutBucketAclInput", + "PutBucketACLOutput": "PutBucketAclOutput", + "PutBucketCORSInput": "PutBucketCorsInput", + "PutBucketCORSOutput": "PutBucketCorsOutput", + "PutObjectACLInput": "PutObjectAclInput", + "PutObjectACLOutput": "PutObjectAclOutput", + }, + fields: map[string]string{ + "CopySourceVersionID": "CopySourceVersionId", + "DeleteMarkerVersionID": "DeleteMarkerVersionId", + "HTTPErrorCodeReturnedEquals": "HttpErrorCodeReturnedEquals", + "HTTPRedirectCode": "HttpRedirectCode", + "ID": "Id", + "LambdaFunctionARN": "LambdaFunctionArn", + "NextUploadIDMarker": "NextUploadIdMarker", + "NextVersionIDMarker": "NextVersionIdMarker", + "QueueARN": "QueueArn", + "SSEKMSKeyID": "SSEKMSKeyId", + "TopicARN": "TopicArn", + "UploadID": "UploadId", + "UploadIDMarker": "UploadIdMarker", + "VersionID": "VersionId", + "VersionIDMarker": "VersionIdMarker", + }, + }, + "github.com/aws/aws-sdk-go/service/s3/s3manager": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "UploadID": "UploadId", + }, + }, + "github.com/aws/aws-sdk-go/service/s3/s3iface": { + operations: map[string]string{ + "DeleteBucketCORS": "DeleteBucketCors", + "DeleteBucketCORSPages": "DeleteBucketCorsPages", + "DeleteBucketCORSRequest": "DeleteBucketCorsRequest", + "GetBucketACL": "GetBucketAcl", + "GetBucketACLPages": "GetBucketAclPages", + "GetBucketACLRequest": "GetBucketAclRequest", + "GetBucketCORS": "GetBucketCors", + "GetBucketCORSPages": "GetBucketCorsPages", + "GetBucketCORSRequest": "GetBucketCorsRequest", + "GetObjectACL": "GetObjectAcl", + "GetObjectACLPages": "GetObjectAclPages", + "GetObjectACLRequest": "GetObjectAclRequest", + "PutBucketACL": "PutBucketAcl", + "PutBucketACLPages": "PutBucketAclPages", + "PutBucketACLRequest": "PutBucketAclRequest", + "PutBucketCORS": "PutBucketCors", + "PutBucketCORSPages": "PutBucketCorsPages", + "PutBucketCORSRequest": "PutBucketCorsRequest", + "PutObjectACL": "PutObjectAcl", + "PutObjectACLPages": "PutObjectAclPages", + "PutObjectACLRequest": "PutObjectAclRequest", + }, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/ses": { + operations: map[string]string{ + "GetIdentityDKIMAttributes": "GetIdentityDkimAttributes", + "GetIdentityDKIMAttributesPages": "GetIdentityDkimAttributesPages", + "GetIdentityDKIMAttributesRequest": "GetIdentityDkimAttributesRequest", + "SetIdentityDKIMEnabled": "SetIdentityDkimEnabled", + "SetIdentityDKIMEnabledPages": "SetIdentityDkimEnabledPages", + "SetIdentityDKIMEnabledRequest": "SetIdentityDkimEnabledRequest", + "VerifyDomainDKIM": "VerifyDomainDkim", + "VerifyDomainDKIMPages": "VerifyDomainDkimPages", + "VerifyDomainDKIMRequest": "VerifyDomainDkimRequest", + }, + shapes: map[string]string{ + "GetIdentityDKIMAttributesInput": "GetIdentityDkimAttributesInput", + "GetIdentityDKIMAttributesOutput": "GetIdentityDkimAttributesOutput", + "IdentityDKIMAttributes": "IdentityDkimAttributes", + "SetIdentityDKIMEnabledInput": "SetIdentityDkimEnabledInput", + "SetIdentityDKIMEnabledOutput": "SetIdentityDkimEnabledOutput", + "VerifyDomainDKIMInput": "VerifyDomainDkimInput", + "VerifyDomainDKIMOutput": "VerifyDomainDkimOutput", + }, + fields: map[string]string{ + "BCCAddresses": "BccAddresses", + "CCAddresses": "CcAddresses", + "DKIMAttributes": "DkimAttributes", + "DKIMEnabled": "DkimEnabled", + "DKIMTokens": "DkimTokens", + "DKIMVerificationStatus": "DkimVerificationStatus", + "FromARN": "FromArn", + "HTML": "Html", + "MessageID": "MessageId", + "ReturnPathARN": "ReturnPathArn", + "SNSTopic": "SnsTopic", + "SourceARN": "SourceArn", + }, + }, + "github.com/aws/aws-sdk-go/service/ses/sesiface": { + operations: map[string]string{ + "GetIdentityDKIMAttributes": "GetIdentityDkimAttributes", + "GetIdentityDKIMAttributesPages": "GetIdentityDkimAttributesPages", + "GetIdentityDKIMAttributesRequest": "GetIdentityDkimAttributesRequest", + "SetIdentityDKIMEnabled": "SetIdentityDkimEnabled", + "SetIdentityDKIMEnabledPages": "SetIdentityDkimEnabledPages", + "SetIdentityDKIMEnabledRequest": "SetIdentityDkimEnabledRequest", + "VerifyDomainDKIM": "VerifyDomainDkim", + "VerifyDomainDKIMPages": "VerifyDomainDkimPages", + "VerifyDomainDKIMRequest": "VerifyDomainDkimRequest", + }, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/sns": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "AWSAccountID": "AWSAccountId", + "EndpointARN": "EndpointArn", + "MessageID": "MessageId", + "PlatformApplicationARN": "PlatformApplicationArn", + "SubscriptionARN": "SubscriptionArn", + "TargetARN": "TargetArn", + "TopicARN": "TopicArn", + }, + }, + "github.com/aws/aws-sdk-go/service/sns/snsiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/sqs": { + operations: map[string]string{ + "GetQueueURL": "GetQueueUrl", + "GetQueueURLPages": "GetQueueUrlPages", + "GetQueueURLRequest": "GetQueueUrlRequest", + }, + shapes: map[string]string{ + "GetQueueURLInput": "GetQueueUrlInput", + "GetQueueURLOutput": "GetQueueUrlOutput", + }, + fields: map[string]string{ + "AWSAccountIDs": "AWSAccountIds", + "ID": "Id", + "MessageID": "MessageId", + "QueueOwnerAWSAccountID": "QueueOwnerAWSAccountId", + "QueueURL": "QueueUrl", + "QueueURLs": "QueueUrls", + }, + }, + "github.com/aws/aws-sdk-go/service/sqs/sqsiface": { + operations: map[string]string{ + "GetQueueURL": "GetQueueUrl", + "GetQueueURLPages": "GetQueueUrlPages", + "GetQueueURLRequest": "GetQueueUrlRequest", + }, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/ssm": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "InstanceID": "InstanceId", + "SHA1": "Sha1", + }, + }, + "github.com/aws/aws-sdk-go/service/ssm/ssmiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/storagegateway": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "DiskID": "DiskId", + "DiskIDs": "DiskIds", + "GatewayID": "GatewayId", + "IPV4Address": "Ipv4Address", + "IPV6Address": "Ipv6Address", + "MACAddress": "MacAddress", + "NetworkInterfaceID": "NetworkInterfaceId", + "SnapshotID": "SnapshotId", + "SourceSnapshotID": "SourceSnapshotId", + "VolumeDiskID": "VolumeDiskId", + "VolumeID": "VolumeId", + }, + }, + "github.com/aws/aws-sdk-go/service/storagegateway/storagegatewayiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/sts": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "ARN": "Arn", + "AccessKeyID": "AccessKeyId", + "AssumedRoleID": "AssumedRoleId", + "ExternalID": "ExternalId", + "FederatedUserID": "FederatedUserId", + "PrincipalARN": "PrincipalArn", + "ProviderID": "ProviderId", + "RoleARN": "RoleArn", + }, + }, + "github.com/aws/aws-sdk-go/service/sts/stsiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/support": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "AttachmentID": "AttachmentId", + "AttachmentSetID": "AttachmentSetId", + "CCEmailAddresses": "CcEmailAddresses", + "CaseID": "CaseId", + "CaseIDList": "CaseIdList", + "CheckID": "CheckId", + "CheckIDs": "CheckIds", + "DisplayID": "DisplayId", + "ID": "Id", + "ResourceID": "ResourceId", + }, + }, + "github.com/aws/aws-sdk-go/service/support/supportiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/swf": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "ActivityID": "ActivityId", + "ContinuedExecutionRunID": "ContinuedExecutionRunId", + "DecisionTaskCompletedEventID": "DecisionTaskCompletedEventId", + "EventID": "EventId", + "ExternalInitiatedEventID": "ExternalInitiatedEventId", + "ID": "Id", + "InitiatedEventID": "InitiatedEventId", + "LatestCancelRequestedEventID": "LatestCancelRequestedEventId", + "NewExecutionRunID": "NewExecutionRunId", + "ParentInitiatedEventID": "ParentInitiatedEventId", + "PreviousStartedEventID": "PreviousStartedEventId", + "RunID": "RunId", + "ScheduledEventID": "ScheduledEventId", + "StartedEventID": "StartedEventId", + "TimerID": "TimerId", + "WorkflowID": "WorkflowId", + }, + }, + "github.com/aws/aws-sdk-go/service/swf/swfiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, + "github.com/aws/aws-sdk-go/service/workspaces": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{ + "BundleID": "BundleId", + "BundleIDs": "BundleIds", + "CustomSecurityGroupID": "CustomSecurityGroupId", + "DNSIPAddresses": "DnsIpAddresses", + "DefaultOU": "DefaultOu", + "DirectoryID": "DirectoryId", + "DirectoryIDs": "DirectoryIds", + "IAMRoleID": "IamRoleId", + "IPAddress": "IpAddress", + "SubnetID": "SubnetId", + "SubnetIDs": "SubnetIds", + "WorkspaceID": "WorkspaceId", + "WorkspaceIDs": "WorkspaceIds", + "WorkspaceSecurityGroupID": "WorkspaceSecurityGroupId", + }, + }, + "github.com/aws/aws-sdk-go/service/workspaces/workspacesiface": { + operations: map[string]string{}, + shapes: map[string]string{}, + fields: map[string]string{}, + }, +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/renamer.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/renamer.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/renamer.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/renamer.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,43 @@ +package main + +//go:generate go run gen/gen.go + +import ( + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/rename" +) + +var safeTag = "4e554f77f00d527b452c68a46f2e68595284121b" + +func main() { + gopath := os.Getenv("GOPATH") + if gopath == "" { + panic("GOPATH not set!") + } + gopath = strings.Split(gopath, ":")[0] + + // change directory to SDK + err := os.Chdir(filepath.Join(gopath, "src", "github.com", "aws", "aws-sdk-go")) + if err != nil { + panic("Cannot find SDK repository") + } + + // store orig HEAD + head, err := exec.Command("git", "rev-parse", "--abbrev-ref", "HEAD").Output() + if err != nil { + panic("Cannot find SDK repository") + } + origHEAD := strings.Trim(string(head), " \r\n") + + // checkout to safe tag and run conversion + exec.Command("git", "checkout", safeTag).Run() + defer func() { + exec.Command("git", "checkout", origHEAD).Run() + }() + + rename.ParsePathsFromArgs() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/assert.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/assert.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/assert.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/assert.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,192 @@ +package awstesting + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "net/url" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "testing" + + "github.com/aws/aws-sdk-go/private/model/api" + "github.com/stretchr/testify/assert" +) + +// findMember searches the shape for the member with the matching key name. +func findMember(shape *api.Shape, key string) string { + for actualKey := range shape.MemberRefs { + if strings.ToLower(key) == strings.ToLower(actualKey) { + return actualKey + } + } + return "" +} + +// GenerateAssertions builds assertions for a shape based on its type. +// +// The shape's recursive values also will have assertions generated for them. +func GenerateAssertions(out interface{}, shape *api.Shape, prefix string) string { + switch t := out.(type) { + case map[string]interface{}: + keys := SortedKeys(t) + + code := "" + if shape.Type == "map" { + for _, k := range keys { + v := t[k] + s := shape.ValueRef.Shape + code += GenerateAssertions(v, s, prefix+"[\""+k+"\"]") + } + } else { + for _, k := range keys { + v := t[k] + m := findMember(shape, k) + s := shape.MemberRefs[m].Shape + code += GenerateAssertions(v, s, prefix+"."+m+"") + } + } + return code + case []interface{}: + code := "" + for i, v := range t { + s := shape.MemberRef.Shape + code += GenerateAssertions(v, s, prefix+"["+strconv.Itoa(i)+"]") + } + return code + default: + switch shape.Type { + case "timestamp": + return fmt.Sprintf("assert.Equal(t, time.Unix(%#v, 0).UTC().String(), %s.String())\n", out, prefix) + case "blob": + return fmt.Sprintf("assert.Equal(t, %#v, string(%s))\n", out, prefix) + case "integer", "long": + return fmt.Sprintf("assert.Equal(t, int64(%#v), *%s)\n", out, prefix) + default: + if !reflect.ValueOf(out).IsValid() { + return fmt.Sprintf("assert.Nil(t, %s)\n", prefix) + } + return fmt.Sprintf("assert.Equal(t, %#v, *%s)\n", out, prefix) + } + } +} + +// Match is a testing helper to test for testing error by comparing expected +// with a regular expression. +func Match(t *testing.T, regex, expected string) { + if !regexp.MustCompile(regex).Match([]byte(expected)) { + t.Errorf("%q\n\tdoes not match /%s/", expected, regex) + } +} + +// AssertURL verifies the expected URL is matches the actual. +func AssertURL(t *testing.T, expect, actual string, msgAndArgs ...interface{}) bool { + expectURL, err := url.Parse(expect) + if err != nil { + t.Errorf(errMsg("unable to parse expected URL", err, msgAndArgs)) + return false + } + actualURL, err := url.Parse(actual) + if err != nil { + t.Errorf(errMsg("unable to parse actual URL", err, msgAndArgs)) + return false + } + + assert.Equal(t, expectURL.Host, actualURL.Host, msgAndArgs...) + assert.Equal(t, expectURL.Scheme, actualURL.Scheme, msgAndArgs...) + assert.Equal(t, expectURL.Path, actualURL.Path, msgAndArgs...) + + return AssertQuery(t, expectURL.Query().Encode(), actualURL.Query().Encode(), msgAndArgs...) +} + +// AssertQuery verifies the expect HTTP query string matches the actual. +func AssertQuery(t *testing.T, expect, actual string, msgAndArgs ...interface{}) bool { + expectQ, err := url.ParseQuery(expect) + if err != nil { + t.Errorf(errMsg("unable to parse expected Query", err, msgAndArgs)) + return false + } + actualQ, err := url.ParseQuery(expect) + if err != nil { + t.Errorf(errMsg("unable to parse actual Query", err, msgAndArgs)) + return false + } + + // Make sure the keys are the same + if !assert.Equal(t, queryValueKeys(expectQ), queryValueKeys(actualQ), msgAndArgs...) { + return false + } + + for k, expectQVals := range expectQ { + sort.Strings(expectQVals) + actualQVals := actualQ[k] + sort.Strings(actualQVals) + assert.Equal(t, expectQVals, actualQVals, msgAndArgs...) + } + + return true +} + +// AssertJSON verifies that the expect json string matches the actual. +func AssertJSON(t *testing.T, expect, actual string, msgAndArgs ...interface{}) bool { + expectVal := map[string]interface{}{} + if err := json.Unmarshal([]byte(expect), &expectVal); err != nil { + t.Errorf(errMsg("unable to parse expected JSON", err, msgAndArgs...)) + return false + } + + actualVal := map[string]interface{}{} + if err := json.Unmarshal([]byte(actual), &actualVal); err != nil { + t.Errorf(errMsg("unable to parse actual JSON", err, msgAndArgs...)) + return false + } + + return assert.Equal(t, expectVal, actualVal, msgAndArgs...) +} + +// AssertXML verifies that the expect xml string matches the actual. +func AssertXML(t *testing.T, expect, actual string, container interface{}, msgAndArgs ...interface{}) bool { + expectVal := container + if err := xml.Unmarshal([]byte(expect), &expectVal); err != nil { + t.Errorf(errMsg("unable to parse expected XML", err, msgAndArgs...)) + } + + actualVal := container + if err := xml.Unmarshal([]byte(actual), &actualVal); err != nil { + t.Errorf(errMsg("unable to parse actual XML", err, msgAndArgs...)) + } + return assert.Equal(t, expectVal, actualVal, msgAndArgs...) +} + +func errMsg(baseMsg string, err error, msgAndArgs ...interface{}) string { + message := messageFromMsgAndArgs(msgAndArgs) + if message != "" { + message += ", " + } + return fmt.Sprintf("%s%s, %v", message, baseMsg, err) +} + +func messageFromMsgAndArgs(msgAndArgs []interface{}) string { + if len(msgAndArgs) == 0 || msgAndArgs == nil { + return "" + } + if len(msgAndArgs) == 1 { + return msgAndArgs[0].(string) + } + if len(msgAndArgs) > 1 { + return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) + } + return "" +} + +func queryValueKeys(v url.Values) []string { + keys := make([]string, 0, len(v)) + for k := range v { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/assert_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/assert_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/assert_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/assert_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,64 @@ +package awstesting_test + +import ( + "encoding/xml" + "testing" + + "github.com/aws/aws-sdk-go/awstesting" +) + +func TestAssertJSON(t *testing.T) { + cases := []struct { + e, a string + asserts bool + }{ + { + e: `{"RecursiveStruct":{"RecursiveMap":{"foo":{"NoRecurse":"foo"},"bar":{"NoRecurse":"bar"}}}}`, + a: `{"RecursiveStruct":{"RecursiveMap":{"bar":{"NoRecurse":"bar"},"foo":{"NoRecurse":"foo"}}}}`, + asserts: true, + }, + } + + for i, c := range cases { + mockT := &testing.T{} + if awstesting.AssertJSON(mockT, c.e, c.a) != c.asserts { + t.Error("Assert JSON result was not expected.", i) + } + } +} + +func TestAssertXML(t *testing.T) { + cases := []struct { + e, a string + asserts bool + container struct { + XMLName xml.Name `xml:"OperationRequest"` + NS string `xml:"xmlns,attr"` + RecursiveStruct struct { + RecursiveMap struct { + Entries []struct { + XMLName xml.Name `xml:"entries"` + Key string `xml:"key"` + Value struct { + XMLName xml.Name `xml:"value"` + NoRecurse string + } + } + } + } + } + }{ + { + e: `foofoobarbar`, + a: `barbarfoofoo`, + asserts: true, + }, + } + + for i, c := range cases { + // mockT := &testing.T{} + if awstesting.AssertXML(t, c.e, c.a, c.container) != c.asserts { + t.Error("Assert XML result was not expected.", i) + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,20 @@ +package awstesting + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/defaults" +) + +// NewClient creates and initializes a generic service client for testing. +func NewClient(cfgs ...*aws.Config) *client.Client { + info := metadata.ClientInfo{ + Endpoint: "http://endpoint", + SigningName: "", + } + def := defaults.Get() + def.Config.MergeIn(cfgs...) + + return client.New(*def.Config, info, def.Handlers) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/integration_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/integration_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/integration_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/integration_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,124 @@ +// +build integration + +// Package s3_test runs integration tests for S3 +package s3_test + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/integration" + "github.com/aws/aws-sdk-go/service/s3" +) + +var bucketName *string +var svc *s3.S3 + +func TestMain(m *testing.M) { + setup() + defer teardown() // only called if we panic + result := m.Run() + teardown() + os.Exit(result) +} + +// Create a bucket for testing +func setup() { + svc = s3.New(integration.Session) + bucketName = aws.String( + fmt.Sprintf("aws-sdk-go-integration-%d-%s", time.Now().Unix(), integration.UniqueID())) + + for i := 0; i < 10; i++ { + _, err := svc.CreateBucket(&s3.CreateBucketInput{Bucket: bucketName}) + if err == nil { + break + } + } + + for { + _, err := svc.HeadBucket(&s3.HeadBucketInput{Bucket: bucketName}) + if err == nil { + break + } + time.Sleep(1 * time.Second) + } +} + +// Delete the bucket +func teardown() { + resp, _ := svc.ListObjects(&s3.ListObjectsInput{Bucket: bucketName}) + for _, o := range resp.Contents { + svc.DeleteObject(&s3.DeleteObjectInput{Bucket: bucketName, Key: o.Key}) + } + svc.DeleteBucket(&s3.DeleteBucketInput{Bucket: bucketName}) +} + +func TestWriteToObject(t *testing.T) { + _, err := svc.PutObject(&s3.PutObjectInput{ + Bucket: bucketName, + Key: aws.String("key name"), + Body: bytes.NewReader([]byte("hello world")), + }) + assert.NoError(t, err) + + resp, err := svc.GetObject(&s3.GetObjectInput{ + Bucket: bucketName, + Key: aws.String("key name"), + }) + assert.NoError(t, err) + + b, _ := ioutil.ReadAll(resp.Body) + assert.Equal(t, []byte("hello world"), b) +} + +func TestPresignedGetPut(t *testing.T) { + putreq, _ := svc.PutObjectRequest(&s3.PutObjectInput{ + Bucket: bucketName, + Key: aws.String("presigned-key"), + }) + var err error + + // Presign a PUT request + var puturl string + puturl, err = putreq.Presign(300 * time.Second) + assert.NoError(t, err) + + // PUT to the presigned URL with a body + var puthttpreq *http.Request + buf := bytes.NewReader([]byte("hello world")) + puthttpreq, err = http.NewRequest("PUT", puturl, buf) + assert.NoError(t, err) + + var putresp *http.Response + putresp, err = http.DefaultClient.Do(puthttpreq) + assert.NoError(t, err) + assert.Equal(t, 200, putresp.StatusCode) + + // Presign a GET on the same URL + getreq, _ := svc.GetObjectRequest(&s3.GetObjectInput{ + Bucket: bucketName, + Key: aws.String("presigned-key"), + }) + + var geturl string + geturl, err = getreq.Presign(300 * time.Second) + assert.NoError(t, err) + + // Get the body + var getresp *http.Response + getresp, err = http.Get(geturl) + assert.NoError(t, err) + + var b []byte + defer getresp.Body.Close() + b, err = ioutil.ReadAll(getresp.Body) + assert.Equal(t, "hello world", string(b)) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3manager/integration_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3manager/integration_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3manager/integration_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3manager/integration_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,163 @@ +// +build integration + +// Package s3manager provides +package s3manager + +import ( + "bytes" + "crypto/md5" + "fmt" + "io" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/awstesting/integration" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" +) + +var integBuf12MB = make([]byte, 1024*1024*12) +var integMD512MB = fmt.Sprintf("%x", md5.Sum(integBuf12MB)) +var bucketName *string + +func TestMain(m *testing.M) { + setup() + defer teardown() // only called if we panic + result := m.Run() + teardown() + os.Exit(result) +} + +func setup() { + // Create a bucket for testing + svc := s3.New(integration.Session) + bucketName = aws.String( + fmt.Sprintf("aws-sdk-go-integration-%d-%s", time.Now().Unix(), integration.UniqueID())) + + for i := 0; i < 10; i++ { + _, err := svc.CreateBucket(&s3.CreateBucketInput{Bucket: bucketName}) + if err == nil { + break + } + } + + for { + _, err := svc.HeadBucket(&s3.HeadBucketInput{Bucket: bucketName}) + if err == nil { + break + } + time.Sleep(1 * time.Second) + } +} + +// Delete the bucket +func teardown() { + svc := s3.New(session.New()) + + objs, _ := svc.ListObjects(&s3.ListObjectsInput{Bucket: bucketName}) + for _, o := range objs.Contents { + svc.DeleteObject(&s3.DeleteObjectInput{Bucket: bucketName, Key: o.Key}) + } + + uploads, _ := svc.ListMultipartUploads(&s3.ListMultipartUploadsInput{Bucket: bucketName}) + for _, u := range uploads.Uploads { + svc.AbortMultipartUpload(&s3.AbortMultipartUploadInput{ + Bucket: bucketName, + Key: u.Key, + UploadId: u.UploadId, + }) + } + + svc.DeleteBucket(&s3.DeleteBucketInput{Bucket: bucketName}) +} + +type dlwriter struct { + buf []byte +} + +func newDLWriter(size int) *dlwriter { + return &dlwriter{buf: make([]byte, size)} +} + +func (d dlwriter) WriteAt(p []byte, pos int64) (n int, err error) { + if pos > int64(len(d.buf)) { + return 0, io.EOF + } + + written := 0 + for i, b := range p { + if i >= len(d.buf) { + break + } + d.buf[pos+int64(i)] = b + written++ + } + return written, nil +} + +func validate(t *testing.T, key string, md5value string) { + mgr := s3manager.NewDownloader(integration.Session) + params := &s3.GetObjectInput{Bucket: bucketName, Key: &key} + + w := newDLWriter(1024 * 1024 * 20) + n, err := mgr.Download(w, params) + assert.NoError(t, err) + assert.Equal(t, md5value, fmt.Sprintf("%x", md5.Sum(w.buf[0:n]))) +} + +func TestUploadConcurrently(t *testing.T) { + key := "12mb-1" + mgr := s3manager.NewUploader(integration.Session) + out, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: bucketName, + Key: &key, + Body: bytes.NewReader(integBuf12MB), + }) + + assert.NoError(t, err) + assert.NotEqual(t, "", out.UploadID) + assert.Regexp(t, `^https?://.+/`+key+`$`, out.Location) + + validate(t, key, integMD512MB) +} + +func TestUploadFailCleanup(t *testing.T) { + svc := s3.New(session.New()) + + // Break checksum on 2nd part so it fails + part := 0 + svc.Handlers.Build.PushBack(func(r *request.Request) { + if r.Operation.Name == "UploadPart" { + if part == 1 { + r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "000") + } + part++ + } + }) + + key := "12mb-leave" + mgr := s3manager.NewUploaderWithClient(svc, func(u *s3manager.Uploader) { + u.LeavePartsOnError = false + }) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: bucketName, + Key: &key, + Body: bytes.NewReader(integBuf12MB), + }) + assert.Error(t, err) + uploadID := "" + if merr, ok := err.(s3manager.MultiUploadFailure); ok { + uploadID = merr.UploadID() + } + assert.NotEmpty(t, uploadID) + + _, err = svc.ListParts(&s3.ListPartsInput{ + Bucket: bucketName, Key: &key, UploadId: &uploadID}) + assert.Error(t, err) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3manager/stub.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3manager/stub.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3manager/stub.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/s3manager/stub.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +package s3manager diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/stub.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/stub.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/stub.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/customizations/s3/stub.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +package s3 diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/integration.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/integration.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/integration.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/integration.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,42 @@ +// Package integration performs initialization and validation for integration +// tests. +package integration + +import ( + "crypto/rand" + "fmt" + "io" + "os" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" +) + +// Session is a shared session for all integration tests to use. +var Session = session.New() + +func init() { + logLevel := Session.Config.LogLevel + if os.Getenv("DEBUG") != "" { + logLevel = aws.LogLevel(aws.LogDebug) + } + if os.Getenv("DEBUG_SIGNING") != "" { + logLevel = aws.LogLevel(aws.LogDebugWithSigning) + } + if os.Getenv("DEBUG_BODY") != "" { + logLevel = aws.LogLevel(aws.LogDebugWithSigning | aws.LogDebugWithHTTPBody) + } + Session.Config.LogLevel = logLevel + + if aws.StringValue(Session.Config.Region) == "" { + panic("AWS_REGION must be configured to run integration tests") + } +} + +// UniqueID returns a unique UUID-like identifier for use in generating +// resources for integration tests. +func UniqueID() string { + uuid := make([]byte, 16) + io.ReadFull(rand.Reader, uuid) + return fmt.Sprintf("%x", uuid) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/acm/acm.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/acm/acm.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/acm/acm.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/acm/acm.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +#language en +@acm @client +Feature: AWS Certificate Manager + + Scenario: Making a request + When I call the "ListCertificates" API + Then the request should be successful + + Scenario: Handling errors + When I attempt to call the "GetCertificate" API with: + | CertificateArn | arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012 | + Then I expect the response error code to be "ResourceNotFoundException" + And I expect the response error message not be empty + diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/acm/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/acm/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/acm/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/acm/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package acm provides gucumber integration tests support. +package acm + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/acm" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@acm", func() { + World["client"] = acm.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/apigateway/apigateway.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/apigateway/apigateway.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/apigateway/apigateway.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/apigateway/apigateway.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@apigateway @client +Feature: Amazon API Gateway + + Scenario: Making a request + When I call the "GetAccountRequest" API + Then the request should be successful + + Scenario: Handing errors + When I attempt to call the "GetRestApi" API with: + | RestApiId | api123 | + Then I expect the response error code to be "NotFoundException" + And I expect the response error message to include: + """ + Invalid REST API identifier specified + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/apigateway/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/apigateway/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/apigateway/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/apigateway/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package apigateway provides gucumber integration tests support. +package apigateway + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/apigateway" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@apigateway", func() { + World["client"] = apigateway.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/autoscaling/autoscaling.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/autoscaling/autoscaling.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/autoscaling/autoscaling.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/autoscaling/autoscaling.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,18 @@ +# language: en +@autoscaling @client +Feature: Auto Scaling + + Scenario: Making a request + When I call the "DescribeScalingProcessTypes" API + Then the value at "Processes" should be a list + + Scenario: Handing errors + When I attempt to call the "CreateLaunchConfiguration" API with: + | LaunchConfigurationName | | + | ImageId | ami-12345678 | + | InstanceType | m1.small | + Then I expect the response error code to be "InvalidParameter" + And I expect the response error message to include: + """ + LaunchConfigurationName + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/autoscaling/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/autoscaling/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/autoscaling/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/autoscaling/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package autoscaling provides gucumber integration tests support. +package autoscaling + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/autoscaling" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@autoscaling", func() { + World["client"] = autoscaling.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudformation/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudformation/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudformation/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudformation/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package cloudformation provides gucumber integration tests support. +package cloudformation + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/cloudformation" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@cloudformation", func() { + World["client"] = cloudformation.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudformation/cloudformation.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudformation/cloudformation.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudformation/cloudformation.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudformation/cloudformation.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,17 @@ +# language: en +@cloudformation @client +Feature: AWS CloudFormation + + Scenario: Making a request + When I call the "ListStacks" API + Then the value at "StackSummaries" should be a list + + Scenario: Handling errors + When I attempt to call the "CreateStack" API with: + | StackName | fakestack | + | TemplateURL | http://s3.amazonaws.com/foo/bar | + Then I expect the response error code to be "ValidationError" + And I expect the response error message to include: + """ + TemplateURL must reference a valid S3 object to which you have access. + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudfront/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudfront/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudfront/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudfront/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package cloudfront provides gucumber integration tests support. +package cloudfront + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/cloudfront" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@cloudfront", func() { + World["client"] = cloudfront.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudfront/cloudfront.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudfront/cloudfront.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudfront/cloudfront.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudfront/cloudfront.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,17 @@ +# language: en +@cloudfront @client +Feature: Amazon CloudFront + + Scenario: Making a basic request + When I call the "ListDistributions" API with: + | MaxItems | 1 | + Then the value at "DistributionList.Items" should be a list + + Scenario: Error handling + When I attempt to call the "GetDistribution" API with: + | Id | fake-id | + Then I expect the response error code to be "NoSuchDistribution" + And I expect the response error message to include: + """ + The specified distribution does not exist. + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudhsm/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudhsm/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudhsm/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudhsm/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package cloudhsm provides gucumber integration tests support. +package cloudhsm + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/cloudhsm" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@cloudhsm", func() { + World["client"] = cloudhsm.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudhsm/cloudhsm.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudhsm/cloudhsm.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudhsm/cloudhsm.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudhsm/cloudhsm.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@cloudhsm @client +Feature: Amazon CloudHSM + + Scenario: Making a request + When I call the "ListHapgs" API + Then the value at "HapgList" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeHapg" API with: + | HapgArn | bogus-arn | + Then I expect the response error code to be "ValidationException" + And I expect the response error message to include: + """ + Value 'bogus-arn' at 'hapgArn' failed to satisfy constraint + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudsearch/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudsearch/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudsearch/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudsearch/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package cloudsearch provides gucumber integration tests support. +package cloudsearch + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/cloudsearch" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@cloudsearch", func() { + World["client"] = cloudsearch.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudsearch/cloudsearch.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudsearch/cloudsearch.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudsearch/cloudsearch.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudsearch/cloudsearch.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@cloudsearch @client +Feature: Amazon CloudSearch + + Scenario: Making a request + When I call the "DescribeDomains" API + Then the response should contain a "DomainStatusList" + + Scenario: Handling errors + When I attempt to call the "DescribeIndexFields" API with: + | DomainName | fakedomain | + Then I expect the response error code to be "ResourceNotFound" + And I expect the response error message to include: + """ + Domain not found: fakedomain + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudtrail/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudtrail/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudtrail/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudtrail/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package cloudtrail provides gucumber integration tests support. +package cloudtrail + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/cloudtrail" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@cloudtrail", func() { + World["client"] = cloudtrail.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudtrail/cloudtrail.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudtrail/cloudtrail.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudtrail/cloudtrail.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudtrail/cloudtrail.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@cloudtrail @client +Feature: AWS CloudTrail + + Scenario: Making a request + When I call the "DescribeTrails" API + Then the response should contain a "trailList" + + Scenario: Handling errors + When I attempt to call the "DeleteTrail" API with: + | Name | faketrail | + Then I expect the response error code to be "TrailNotFoundException" + And I expect the response error message to include: + """ + Unknown trail + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatch/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatch/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatch/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatch/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package cloudwatch provides gucumber integration tests support. +package cloudwatch + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/cloudwatch" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@cloudwatch", func() { + World["client"] = cloudwatch.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatch/cloudwatch.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatch/cloudwatch.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatch/cloudwatch.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatch/cloudwatch.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,19 @@ +# language: en +@cloudwatch @monitoring @client +Feature: Amazon CloudWatch + + Scenario: Making a request + When I call the "ListMetrics" API with: + | Namespace | AWS/EC2 | + Then the value at "Metrics" should be a list + + Scenario: Handling errors + When I attempt to call the "SetAlarmState" API with: + | AlarmName | abc | + | StateValue | mno | + | StateReason | xyz | + Then I expect the response error code to be "ValidationError" + And I expect the response error message to include: + """ + failed to satisfy constraint + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatchlogs/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatchlogs/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatchlogs/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatchlogs/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package cloudwatchlogs provides gucumber integration tests support. +package cloudwatchlogs + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@cloudwatchlogs", func() { + World["client"] = cloudwatchlogs.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatchlogs/cloudwatchlogs.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatchlogs/cloudwatchlogs.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatchlogs/cloudwatchlogs.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cloudwatchlogs/cloudwatchlogs.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,17 @@ +# language: en +@cloudwatchlogs @logs +Feature: Amazon CloudWatch Logs + + Scenario: Making a request + When I call the "DescribeLogGroups" API + Then the value at "logGroups" should be a list + + Scenario: Handling errors + When I attempt to call the "GetLogEvents" API with: + | logGroupName | fakegroup | + | logStreamName | fakestream | + Then I expect the response error code to be "ResourceNotFoundException" + And I expect the response error message to include: + """ + The specified log group does not exist. + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codecommit/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codecommit/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codecommit/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codecommit/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package codecommit provides gucumber integration tests support. +package codecommit + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/codecommit" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@codecommit", func() { + World["client"] = codecommit.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codecommit/codecommit.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codecommit/codecommit.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codecommit/codecommit.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codecommit/codecommit.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@codecommit @client +Feature: Amazon CodeCommit + + Scenario: Making a request + When I call the "ListRepositories" API + Then the value at "repositories" should be a list + + Scenario: Handling errors + When I attempt to call the "ListBranches" API with: + | repositoryName | fake-repo | + Then I expect the response error code to be "RepositoryDoesNotExistException" + And I expect the response error message to include: + """ + fake-repo does not exist + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codedeploy/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codedeploy/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codedeploy/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codedeploy/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package codedeploy provides gucumber integration tests support. +package codedeploy + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/codedeploy" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@codedeploy", func() { + World["client"] = codedeploy.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codedeploy/codedeploy.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codedeploy/codedeploy.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codedeploy/codedeploy.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codedeploy/codedeploy.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@codedeploy @client +Feature: Amazon CodeDeploy + + Scenario: Making a request + When I call the "ListApplications" API + Then the value at "applications" should be a list + + Scenario: Handling errors + When I attempt to call the "GetDeployment" API with: + | deploymentId | d-USUAELQEX | + Then I expect the response error code to be "DeploymentDoesNotExistException" + And I expect the response error message to include: + """ + The deployment d-USUAELQEX could not be found + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codepipeline/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codepipeline/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codepipeline/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codepipeline/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package codepipeline provides gucumber integration tests support. +package codepipeline + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/codepipeline" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@codepipeline", func() { + World["client"] = codepipeline.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codepipeline/codepipeline.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codepipeline/codepipeline.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codepipeline/codepipeline.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/codepipeline/codepipeline.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@codepipeline @client +Feature: Amazon CodePipeline + + Scenario: Making a request + When I call the "ListPipelines" API + Then the value at "pipelines" should be a list + + Scenario: Handling errors + When I attempt to call the "GetPipeline" API with: + | name | fake-pipeline | + Then I expect the response error code to be "PipelineNotFoundException" + And I expect the response error message to include: + """ + does not have a pipeline with name 'fake-pipeline' + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitoidentity/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitoidentity/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitoidentity/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitoidentity/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package cognitoidentity provides gucumber integration tests support. +package cognitoidentity + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/cognitoidentity" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@cognitoidentity", func() { + World["client"] = cognitoidentity.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitoidentity/cognitoidentity.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitoidentity/cognitoidentity.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitoidentity/cognitoidentity.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitoidentity/cognitoidentity.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,19 @@ +# language: en +@cognitoidentity @client +Feature: Amazon Cognito Idenity + + Scenario: Making a request + When I call the "ListIdentityPools" API with JSON: + """ + {"MaxResults": 10} + """ + Then the value at "IdentityPools" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeIdentityPool" API with: + | IdentityPoolId | us-east-1:aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee | + Then I expect the response error code to be "ResourceNotFoundException" + And I expect the response error message to include: + """ + IdentityPool 'us-east-1:aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' not found + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitosync/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitosync/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitosync/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitosync/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package cognitosync provides gucumber integration tests support. +package cognitosync + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/cognitosync" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@cognitosync", func() { + World["client"] = cognitosync.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitosync/cognitosync.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitosync/cognitosync.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitosync/cognitosync.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/cognitosync/cognitosync.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@cognitosync @client +Feature: Amazon Cognito Sync + + Scenario: Making a request + When I call the "ListIdentityPoolUsage" API + Then the value at "IdentityPoolUsages" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeIdentityPoolUsage" API with: + | IdentityPoolId | us-east-1:aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee | + Then I expect the response error code to be "ResourceNotFoundException" + And I expect the response error message to include: + """ + IdentityPool 'us-east-1:aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' not found + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/configservice/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/configservice/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/configservice/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/configservice/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package configservice provides gucumber integration tests support. +package configservice + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/configservice" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@configservice", func() { + World["client"] = configservice.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/configservice/configservice.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/configservice/configservice.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/configservice/configservice.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/configservice/configservice.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,17 @@ +# language: en +@configservice @config @client +Feature: AWS Config + + Scenario: Making a request + When I call the "DescribeConfigurationRecorders" API + Then the value at "ConfigurationRecorders" should be a list + + Scenario: Handling errors + When I attempt to call the "GetResourceConfigHistory" API with: + | resourceType | fake-type | + | resourceId | fake-id | + Then I expect the response error code to be "ValidationException" + And I expect the response error message to include: + """ + failed to satisfy constraint + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/datapipeline/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/datapipeline/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/datapipeline/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/datapipeline/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package datapipeline provides gucumber integration tests support. +package datapipeline + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/datapipeline" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@datapipeline", func() { + World["client"] = datapipeline.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/datapipeline/datapipeline.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/datapipeline/datapipeline.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/datapipeline/datapipeline.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/datapipeline/datapipeline.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@datapipeline @client +Feature: AWS Data Pipeline + + Scenario: Making a request + When I call the "ListPipelines" API + Then the response should contain a "pipelineIdList" + + Scenario: Handling errors + When I attempt to call the "GetPipelineDefinition" API with: + | pipelineId | fake-id | + Then I expect the response error code to be "PipelineNotFoundException" + And I expect the response error message to include: + """ + does not exist + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/devicefarm/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/devicefarm/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/devicefarm/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/devicefarm/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,17 @@ +//Package devicefarm provides gucumber integration tests support. +package devicefarm + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/devicefarm" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@devicefarm", func() { + // FIXME remove custom region + World["client"] = devicefarm.New(smoke.Session, + aws.NewConfig().WithRegion("us-west-2")) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/devicefarm/devicefarm.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/devicefarm/devicefarm.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/devicefarm/devicefarm.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/devicefarm/devicefarm.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@devicefarm @client +Feature: AWS Device Farm + + Scenario: Making a request + When I call the "ListDevices" API + Then the value at "devices" should be a list + + Scenario: Handling errors + When I attempt to call the "GetDevice" API with: + | arn | arn:aws:devicefarm:us-west-2::device:000000000000000000000000fake-arn | + Then I expect the response error code to be "NotFoundException" + And I expect the response error message to include: + """ + No device was found for arn + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directconnect/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directconnect/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directconnect/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directconnect/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package directconnect provides gucumber integration tests support. +package directconnect + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/directconnect" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@directconnect", func() { + World["client"] = directconnect.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directconnect/directconnect.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directconnect/directconnect.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directconnect/directconnect.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directconnect/directconnect.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@directconnect @client +Feature: AWS Direct Connect + + Scenario: Making a request + When I call the "DescribeConnections" API + Then the value at "connections" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeConnections" API with: + | connectionId | fake-connection | + Then I expect the response error code to be "DirectConnectClientException" + And I expect the response error message to include: + """ + Connection ID fake-connection has an invalid format + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directoryservice/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directoryservice/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directoryservice/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directoryservice/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package directoryservice provides gucumber integration tests support. +package directoryservice + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/directoryservice" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@directoryservice", func() { + World["client"] = directoryservice.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directoryservice/directoryservice.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directoryservice/directoryservice.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directoryservice/directoryservice.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/directoryservice/directoryservice.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,17 @@ +# language: en +@directoryservice @ds @client +Feature: AWS Directory Service + + I want to use AWS Directory Service + + Scenario: Making a request + When I call the "DescribeDirectories" API + Then the value at "DirectoryDescriptions" should be a list + + Scenario: Handling errors + When I attempt to call the "CreateDirectory" API with: + | Name | | + | Password | | + | Size | | + Then I expect the response error code to be "ValidationException" + diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodb/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodb/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodb/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodb/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package dynamodb provides gucumber integration tests support. +package dynamodb + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/dynamodb" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@dynamodb", func() { + World["client"] = dynamodb.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodb/dynamodb.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodb/dynamodb.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodb/dynamodb.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodb/dynamodb.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,19 @@ +# language: en +@dynamodb @client +Feature: Amazon DynamoDB + + Scenario: Making a request + When I call the "ListTables" API with JSON: + """ + {"Limit": 1} + """ + Then the value at "TableNames" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeTable" API with: + | TableName | fake-table | + Then I expect the response error code to be "ResourceNotFoundException" + And I expect the response error message to include: + """ + Requested resource not found: Table: fake-table not found + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodbstreams/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodbstreams/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodbstreams/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodbstreams/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package dynamodbstreams provides gucumber integration tests support. +package dynamodbstreams + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/dynamodbstreams" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@dynamodbstreams", func() { + World["client"] = dynamodbstreams.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodbstreams/dynamodbstreams.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodbstreams/dynamodbstreams.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodbstreams/dynamodbstreams.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/dynamodbstreams/dynamodbstreams.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@dynamodbstreams @client +Feature: Amazon DynamoDB Streams + + Scenario: Making a request + When I call the "ListStreams" API + Then the value at "Streams" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeStream" API with: + | StreamArn | fake-stream | + Then I expect the response error code to be "InvalidParameter" + And I expect the response error message to include: + """ + StreamArn + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ec2/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ec2/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ec2/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ec2/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package ec2 provides gucumber integration tests support. +package ec2 + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/ec2" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@ec2", func() { + World["client"] = ec2.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ec2/ec2.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ec2/ec2.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ec2/ec2.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ec2/ec2.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,18 @@ +# language: en +@ec2 @client +Feature: Amazon Elastic Compute Cloud + + Scenario: Making a request + When I call the "DescribeRegions" API + Then the value at "Regions" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeInstances" API with JSON: + """ + {"InstanceIds": ["i-12345678"]} + """ + Then I expect the response error code to be "InvalidInstanceID.NotFound" + And I expect the response error message to include: + """ + The instance ID 'i-12345678' does not exist + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ecs/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ecs/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ecs/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ecs/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,17 @@ +//Package ecs provides gucumber integration tests support. +package ecs + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/ecs" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@ecs", func() { + // FIXME remove custom region + World["client"] = ecs.New(smoke.Session, + aws.NewConfig().WithRegion("us-west-2")) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ecs/ecs.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ecs/ecs.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ecs/ecs.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ecs/ecs.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +# language: en +@ecs @client +Feature: Amazon ECS + + I want to use Amazon ECS + + Scenario: Making a request + When I call the "ListClusters" API + Then the value at "clusterArns" should be a list + + Scenario: Handling errors + When I attempt to call the "StopTask" API with: + | task | xxxxxxxxxxx-xxxxxxxxxxxx-xxxxxxxxxxx | + Then the error code should be "ClusterNotFoundException" diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/efs/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/efs/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/efs/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/efs/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,17 @@ +//Package efs provides gucumber integration tests support. +package efs + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/efs" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@efs", func() { + // FIXME remove custom region + World["client"] = efs.New(smoke.Session, + aws.NewConfig().WithRegion("us-west-2")) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/efs/efs.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/efs/efs.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/efs/efs.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/efs/efs.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +# language: en +@efs @elasticfilesystem @client +Feature: Amazon Elastic File System + + I want to use Amazon Elastic File System + + Scenario: Making a request + When I call the "DescribeFileSystems" API + Then the value at "FileSystems" should be a list + + Scenario: Handling errors + When I attempt to call the "DeleteFileSystem" API with: + | FileSystemId | fake-id | + Then the error code should be "BadRequest" diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticache/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticache/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticache/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticache/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package elasticache provides gucumber integration tests support. +package elasticache + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/elasticache" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@elasticache", func() { + World["client"] = elasticache.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticache/elasticache.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticache/elasticache.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticache/elasticache.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticache/elasticache.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@elasticache @client +Feature: ElastiCache + + Scenario: Making a request + When I call the "DescribeEvents" API + Then the value at "Events" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeCacheClusters" API with: + | CacheClusterId | fake_cluster | + Then I expect the response error code to be "InvalidParameterValue" + And I expect the response error message to include: + """ + The parameter CacheClusterIdentifier is not a valid identifier. + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticbeanstalk/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticbeanstalk/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticbeanstalk/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticbeanstalk/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package elasticbeanstalk provides gucumber integration tests support. +package elasticbeanstalk + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/elasticbeanstalk" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@elasticbeanstalk", func() { + World["client"] = elasticbeanstalk.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticbeanstalk/elasticbeanstalk.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticbeanstalk/elasticbeanstalk.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticbeanstalk/elasticbeanstalk.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticbeanstalk/elasticbeanstalk.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@elasticbeanstalk @client +Feature: AWS Elastic Beanstalk + + Scenario: Making a request + When I call the "ListAvailableSolutionStacks" API + Then the value at "SolutionStacks" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeEnvironmentResources" API with: + | EnvironmentId | fake_environment | + Then I expect the response error code to be "InvalidParameterValue" + And I expect the response error message to include: + """ + No Environment found for EnvironmentId = 'fake_environment'. + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticloadbalancing/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticloadbalancing/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticloadbalancing/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticloadbalancing/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package elasticloadbalancing provides gucumber integration tests support. +package elasticloadbalancing + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/elb" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@elasticloadbalancing", func() { + World["client"] = elb.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticloadbalancing/elasticloadbalancing.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticloadbalancing/elasticloadbalancing.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticloadbalancing/elasticloadbalancing.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elasticloadbalancing/elasticloadbalancing.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,18 @@ +# language: en +@elasticloadbalancing @client +Feature: Elastic Load Balancing + + Scenario: Making a request + When I call the "DescribeLoadBalancers" API + Then the value at "LoadBalancerDescriptions" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeLoadBalancers" API with JSON: + """ + {"LoadBalancerNames": ["fake_load_balancer"]} + """ + Then I expect the response error code to be "ValidationError" + And I expect the response error message to include: + """ + LoadBalancer name cannot contain characters that are not letters, or digits or the dash. + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elastictranscoder/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elastictranscoder/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elastictranscoder/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elastictranscoder/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package elastictranscoder provides gucumber integration tests support. +package elastictranscoder + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/elastictranscoder" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@elastictranscoder", func() { + World["client"] = elastictranscoder.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elastictranscoder/elastictranscoder.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elastictranscoder/elastictranscoder.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elastictranscoder/elastictranscoder.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/elastictranscoder/elastictranscoder.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@elastictranscoder @client +Feature: Amazon Elastic Transcoder + + Scenario: Making a request + When I call the "ListPresets" API + Then the value at "Presets" should be a list + + Scenario: Handling errors + When I attempt to call the "ReadJob" API with: + | Id | fake_job | + Then I expect the response error code to be "ValidationException" + And I expect the response error message to include: + """ + Value 'fake_job' at 'id' failed to satisfy constraint + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/emr/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/emr/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/emr/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/emr/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package emr provides gucumber integration tests support. +package emr + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/emr" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@emr", func() { + World["client"] = emr.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/emr/emr.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/emr/emr.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/emr/emr.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/emr/emr.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@emr @client @elasticmapreduce +Feature: Amazon EMR + + Scenario: Making a request + When I call the "ListClusters" API + Then the value at "Clusters" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeCluster" API with: + | ClusterId | fake_cluster | + Then I expect the response error code to be "InvalidRequestException" + And I expect the response error message to include: + """ + Cluster id 'fake_cluster' is not valid. + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/es/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/es/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/es/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/es/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package es provides gucumber integration tests support. +package es + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/elasticsearchservice" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@es", func() { + World["client"] = elasticsearchservice.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/es/es.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/es/es.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/es/es.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/es/es.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@es @elasticsearchservice +Feature: Amazon ElasticsearchService + + Scenario: Making a request + When I call the "ListDomainNames" API + Then the value at "DomainNames" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeElasticsearchDomain" API with: + | DomainName | not-a-domain | + Then the error code should be "ResourceNotFoundException" + And I expect the response error message to include: + """ + Domain not found: not-a-domain + """ \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/glacier/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/glacier/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/glacier/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/glacier/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package glacier provides gucumber integration tests support. +package glacier + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/glacier" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@glacier", func() { + World["client"] = glacier.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/glacier/glacier.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/glacier/glacier.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/glacier/glacier.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/glacier/glacier.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@glacier @client +Feature: Amazon Glacier + + Scenario: Making a request + When I call the "ListVaults" API + Then the response should contain a "VaultList" + + Scenario: Handling errors + When I attempt to call the "ListVaults" API with: + | accountId | abcmnoxyz | + Then I expect the response error code to be "UnrecognizedClientException" + And I expect the response error message to include: + """ + No account found for the given parameters + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iam/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iam/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iam/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iam/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package iam provides gucumber integration tests support. +package iam + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/iam" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@iam", func() { + World["client"] = iam.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iam/iam.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iam/iam.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iam/iam.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iam/iam.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@iam @client +Feature: AWS Identity and Access Management + + Scenario: Making a request + When I call the "ListUsers" API + Then the value at "Users" should be a list + + Scenario: Handling errors + When I attempt to call the "GetUser" API with: + | UserName | fake_user | + Then I expect the response error code to be "NoSuchEntity" + And I expect the response error message to include: + """ + The user with name fake_user cannot be found. + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iotdataplane/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iotdataplane/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iotdataplane/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iotdataplane/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,24 @@ +//Package iotdataplane provides gucumber integration tests support. +package iotdataplane + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/iot" + "github.com/aws/aws-sdk-go/service/iotdataplane" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@iotdataplane", func() { + svc := iot.New(smoke.Session) + result, err := svc.DescribeEndpoint(&iot.DescribeEndpointInput{}) + if err != nil { + World["error"] = err + return + } + + World["client"] = iotdataplane.New(smoke.Session, aws.NewConfig(). + WithEndpoint(*result.EndpointAddress)) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iotdataplane/iotdataplane.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iotdataplane/iotdataplane.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iotdataplane/iotdataplane.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/iotdataplane/iotdataplane.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,12 @@ +# language: en +@iotdataplane @client +Feature: AWS IoT Data Plane + + Scenario: Handling errors + When I attempt to call the "GetThingShadow" API with: + | ThingName | "fake_thing" | + Then I expect the response error code to be "ResourceNotFoundException" + And I expect the response error message to include: + """ + Not Found + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kinesis/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kinesis/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kinesis/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kinesis/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package kinesis provides gucumber integration tests support. +package kinesis + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/kinesis" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@kinesis", func() { + World["client"] = kinesis.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kinesis/kinesis.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kinesis/kinesis.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kinesis/kinesis.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kinesis/kinesis.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@kinesis @client +Feature: AWS Kinesis + + Scenario: Making a request + When I call the "ListStreams" API + Then the value at "StreamNames" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeStream" API with: + | StreamName | bogus-stream-name | + Then I expect the response error code to be "ResourceNotFoundException" + And I expect the response error message to include: + """ + Stream bogus-stream-name under account + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kms/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kms/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kms/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kms/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package kms provides gucumber integration tests support. +package kms + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/kms" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@kms", func() { + World["client"] = kms.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kms/kms.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kms/kms.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kms/kms.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/kms/kms.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,13 @@ +# language: en +@kms @client +Feature: Amazon Key Management Service + + Scenario: Making a request + When I call the "ListAliases" API + Then the value at "Aliases" should be a list + + Scenario: Handling errors + When I attempt to call the "GetKeyPolicy" API with: + | KeyId | fake-key | + | PolicyName | fake-policy | + Then I expect the response error code to be "NotFoundException" diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/lambda/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/lambda/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/lambda/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/lambda/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package lambda provides gucumber integration tests support. +package lambda + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/lambda" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@lambda", func() { + World["client"] = lambda.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/lambda/lambda.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/lambda/lambda.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/lambda/lambda.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/lambda/lambda.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@lambda @client +Feature: Amazon Lambda + + Scenario: Making a request + When I call the "ListFunctions" API + Then the value at "Functions" should be a list + + Scenario: Handling errors + When I attempt to call the "Invoke" API with: + | FunctionName | bogus-function | + Then I expect the response error code to be "ResourceNotFoundException" + And I expect the response error message to include: + """ + Function not found + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/machinelearning/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/machinelearning/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/machinelearning/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/machinelearning/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package machinelearning provides gucumber integration tests support. +package machinelearning + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/machinelearning" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@machinelearning", func() { + World["client"] = machinelearning.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/machinelearning/machinelearning.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/machinelearning/machinelearning.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/machinelearning/machinelearning.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/machinelearning/machinelearning.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,18 @@ +# language: en +@machinelearning @client +Feature: Amazon Machine Learning + + I want to use Amazon Machine Learning + + Scenario: Making a request + When I call the "DescribeMLModels" API + Then the value at "Results" should be a list + + Scenario: Error handling + When I attempt to call the "GetBatchPrediction" API with: + | BatchPredictionId | fake-id | + Then the error code should be "ResourceNotFoundException" + And the error message should contain: + """ + No BatchPrediction with id fake-id exists + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/opsworks/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/opsworks/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/opsworks/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/opsworks/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package opsworks provides gucumber integration tests support. +package opsworks + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/opsworks" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@opsworks", func() { + World["client"] = opsworks.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/opsworks/opsworks.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/opsworks/opsworks.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/opsworks/opsworks.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/opsworks/opsworks.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@opsworks @client +Feature: AWS OpsWorks + + Scenario: Making a request + When I call the "DescribeStacks" API + Then the value at "Stacks" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeLayers" API with: + | StackId | fake_stack | + Then I expect the response error code to be "ResourceNotFoundException" + And I expect the response error message to include: + """ + Unable to find stack with ID fake_stack + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/rds/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/rds/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/rds/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/rds/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package rds provides gucumber integration tests support. +package rds + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/rds" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@rds", func() { + World["client"] = rds.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/rds/rds.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/rds/rds.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/rds/rds.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/rds/rds.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@rds @client +Feature: Amazon RDS + + Scenario: Making a request + When I call the "DescribeDBEngineVersions" API + Then the value at "DBEngineVersions" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeDBInstances" API with: + | DBInstanceIdentifier | fake-id | + Then I expect the response error code to be "DBInstanceNotFound" + And I expect the response error message to include: + """ + DBInstance fake-id not found. + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/redshift/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/redshift/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/redshift/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/redshift/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package redshift provides gucumber integration tests support. +package redshift + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/redshift" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@redshift", func() { + World["client"] = redshift.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/redshift/redshift.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/redshift/redshift.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/redshift/redshift.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/redshift/redshift.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@redshift @client +Feature: Amazon Redshift + + Scenario: Making a request + When I call the "DescribeClusterVersions" API + Then the value at "ClusterVersions" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeClusters" API with: + | ClusterIdentifier | fake-cluster | + Then I expect the response error code to be "ClusterNotFound" + And I expect the response error message to include: + """ + Cluster fake-cluster not found. + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package route53 provides gucumber integration tests support. +package route53 + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/route53" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@route53", func() { + World["client"] = route53.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53/route53.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53/route53.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53/route53.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53/route53.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@route53 @client +Feature: Amazon Route 53 + + Scenario: Making a request + When I call the "ListHostedZones" API + Then the value at "HostedZones" should be a list + + Scenario: Handling errors + When I attempt to call the "GetHostedZone" API with: + | Id | fake-zone | + Then I expect the response error code to be "NoSuchHostedZone" + And I expect the response error message to include: + """ + No hosted zone found with ID: fake-zone + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53domains/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53domains/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53domains/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53domains/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package route53domains provides gucumber integration tests support. +package route53domains + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/route53domains" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@route53domains", func() { + World["client"] = route53domains.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53domains/route53domains.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53domains/route53domains.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53domains/route53domains.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/route53domains/route53domains.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@route53domains @client +Feature: Amazon Route53 Domains + + Scenario: Making a request + When I call the "ListDomains" API + Then the value at "Domains" should be a list + + Scenario: Handling errors + When I attempt to call the "GetDomainDetail" API with: + | DomainName | fake-domain-name | + Then I expect the response error code to be "InvalidInput" + And I expect the response error message to include: + """ + domain name must contain more than 1 label + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ses/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ses/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ses/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ses/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package ses provides gucumber integration tests support. +package ses + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/ses" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@ses", func() { + World["client"] = ses.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ses/ses.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ses/ses.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ses/ses.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ses/ses.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@ses @email @client +Feature: Amazon Simple Email Service + + Scenario: Making a request + When I call the "ListIdentities" API + Then the value at "Identities" should be a list + + Scenario: Handling errors + When I attempt to call the "VerifyEmailIdentity" API with: + | EmailAddress | fake_email | + Then I expect the response error code to be "InvalidParameterValue" + And I expect the response error message to include: + """ + Invalid email address. + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/shared.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/shared.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/shared.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/shared.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,228 @@ +// Package smoke contains shared step definitions that are used across integration tests +package smoke + +import ( + "encoding/json" + "fmt" + "os" + "reflect" + "regexp" + "strconv" + "strings" + + . "github.com/lsegal/gucumber" + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/session" +) + +// Session is a shared session for all integration smoke tests to use. +var Session = session.New() + +func init() { + logLevel := Session.Config.LogLevel + if os.Getenv("DEBUG") != "" { + logLevel = aws.LogLevel(aws.LogDebug) + } + if os.Getenv("DEBUG_SIGNING") != "" { + logLevel = aws.LogLevel(aws.LogDebugWithSigning) + } + if os.Getenv("DEBUG_BODY") != "" { + logLevel = aws.LogLevel(aws.LogDebugWithHTTPBody) + } + Session.Config.LogLevel = logLevel + + When(`^I call the "(.+?)" API$`, func(op string) { + call(op, nil, false) + }) + + When(`^I call the "(.+?)" API with:$`, func(op string, args [][]string) { + call(op, args, false) + }) + + Then(`^the value at "(.+?)" should be a list$`, func(member string) { + vals, _ := awsutil.ValuesAtPath(World["response"], member) + assert.NotNil(T, vals) + }) + + Then(`^the response should contain a "(.+?)"$`, func(member string) { + vals, _ := awsutil.ValuesAtPath(World["response"], member) + assert.NotEmpty(T, vals) + }) + + When(`^I attempt to call the "(.+?)" API with:$`, func(op string, args [][]string) { + call(op, args, true) + }) + + Then(`^I expect the response error code to be "(.+?)"$`, func(code string) { + err, ok := World["error"].(awserr.Error) + assert.True(T, ok, "no error returned") + if ok { + assert.Equal(T, code, err.Code(), "Error: %v", err) + } + }) + + And(`^I expect the response error message to include:$`, func(data string) { + err, ok := World["error"].(awserr.Error) + assert.True(T, ok, "no error returned") + if ok { + assert.Contains(T, err.Message(), data) + } + }) + + And(`^I expect the response error message to include one of:$`, func(table [][]string) { + err, ok := World["error"].(awserr.Error) + assert.True(T, ok, "no error returned") + if ok { + found := false + for _, row := range table { + if strings.Contains(err.Message(), row[0]) { + found = true + break + } + } + + assert.True(T, found, fmt.Sprintf("no error messages matched: \"%s\"", err.Message())) + } + }) + + And(`^I expect the response error message not be empty$`, func() { + err, ok := World["error"].(awserr.Error) + assert.True(T, ok, "no error returned") + assert.NotEmpty(T, err.Message()) + }) + + When(`^I call the "(.+?)" API with JSON:$`, func(s1 string, data string) { + callWithJSON(s1, data, false) + }) + + When(`^I attempt to call the "(.+?)" API with JSON:$`, func(s1 string, data string) { + callWithJSON(s1, data, true) + }) + + Then(`^the error code should be "(.+?)"$`, func(s1 string) { + err, ok := World["error"].(awserr.Error) + assert.True(T, ok, "no error returned") + assert.Equal(T, s1, err.Code()) + }) + + And(`^the error message should contain:$`, func(data string) { + err, ok := World["error"].(awserr.Error) + assert.True(T, ok, "no error returned") + assert.Contains(T, err.Error(), data) + }) + + Then(`^the request should fail$`, func() { + err, ok := World["error"].(awserr.Error) + assert.True(T, ok, "no error returned") + assert.Error(T, err) + }) + + Then(`^the request should be successful$`, func() { + err, ok := World["error"].(awserr.Error) + assert.False(T, ok, "error returned") + assert.NoError(T, err) + }) +} + +// findMethod finds the op operation on the v structure using a case-insensitive +// lookup. Returns nil if no method is found. +func findMethod(v reflect.Value, op string) *reflect.Value { + t := v.Type() + op = strings.ToLower(op) + for i := 0; i < t.NumMethod(); i++ { + name := t.Method(i).Name + if strings.ToLower(name) == op { + m := v.MethodByName(name) + return &m + } + } + return nil +} + +// call calls an operation on World["client"] by the name op using the args +// table of arguments to set. +func call(op string, args [][]string, allowError bool) { + v := reflect.ValueOf(World["client"]) + if m := findMethod(v, op); m != nil { + t := m.Type() + in := reflect.New(t.In(0).Elem()) + fillArgs(in, args) + + resps := m.Call([]reflect.Value{in}) + World["response"] = resps[0].Interface() + World["error"] = resps[1].Interface() + + if !allowError { + err, _ := World["error"].(error) + assert.NoError(T, err) + } + } else { + assert.Fail(T, "failed to find operation "+op) + } +} + +// reIsNum is a regular expression matching a numeric input (integer) +var reIsNum = regexp.MustCompile(`^\d+$`) + +// reIsArray is a regular expression matching a list +var reIsArray = regexp.MustCompile(`^\['.*?'\]$`) +var reArrayElem = regexp.MustCompile(`'(.+?)'`) + +// fillArgs fills arguments on the input structure using the args table of +// arguments. +func fillArgs(in reflect.Value, args [][]string) { + if args == nil { + return + } + + for _, row := range args { + path := row[0] + var val interface{} = row[1] + if reIsArray.MatchString(row[1]) { + quotedStrs := reArrayElem.FindAllString(row[1], -1) + strs := make([]*string, len(quotedStrs)) + for i, e := range quotedStrs { + str := e[1 : len(e)-1] + strs[i] = &str + } + val = strs + } else if reIsNum.MatchString(row[1]) { // handle integer values + num, err := strconv.ParseInt(row[1], 10, 64) + if err == nil { + val = num + } + } + awsutil.SetValueAtPath(in.Interface(), path, val) + } +} + +func callWithJSON(op, j string, allowError bool) { + v := reflect.ValueOf(World["client"]) + if m := findMethod(v, op); m != nil { + t := m.Type() + in := reflect.New(t.In(0).Elem()) + fillJSON(in, j) + + resps := m.Call([]reflect.Value{in}) + World["response"] = resps[0].Interface() + World["error"] = resps[1].Interface() + + if !allowError { + err, _ := World["error"].(error) + assert.NoError(T, err) + } + } else { + assert.Fail(T, "failed to find operation "+op) + } +} + +func fillJSON(in reflect.Value, j string) { + d := json.NewDecoder(strings.NewReader(j)) + if err := d.Decode(in.Interface()); err != nil { + panic(err) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/simpledb/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/simpledb/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/simpledb/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/simpledb/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package simpledb provides gucumber integration tests support. +package simpledb + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/simpledb" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@simpledb", func() { + World["client"] = simpledb.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/simpledb/simpledb.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/simpledb/simpledb.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/simpledb/simpledb.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/simpledb/simpledb.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,24 @@ +# language: en +@simpledb @sdb +Feature: Amazon SimpleDB + + I want to use Amazon SimpleDB + + Scenario: Making a request + When I call the "CreateDomain" API with: + | DomainName | sample-domain | + Then the request should be successful + And I call the "ListDomains" API + Then the value at "DomainNames" should be a list + And I call the "DeleteDomain" API with: + | DomainName | sample-domain | + Then the request should be successful + + Scenario: Handling errors + When I attempt to call the "CreateDomain" API with: + | DomainName | | + Then I expect the response error code to be "InvalidParameterValue" + And I expect the response error message to include: + """ + DomainName is invalid + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sns/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sns/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sns/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sns/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package sns provides gucumber integration tests support. +package sns + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/sns" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@sns", func() { + World["client"] = sns.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sns/sns.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sns/sns.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sns/sns.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sns/sns.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,17 @@ +# language: en +@sns @client +Feature: Amazon Simple Notification Service + + Scenario: Making a request + When I call the "ListTopics" API + Then the value at "Topics" should be a list + + Scenario: Handling errors + When I attempt to call the "Publish" API with: + | Message | hello | + | TopicArn | fake_topic | + Then I expect the response error code to be "InvalidParameter" + And I expect the response error message to include: + """ + Invalid parameter: TopicArn Reason: fake_topic does not start with arn + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sqs/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sqs/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sqs/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sqs/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package sqs provides gucumber integration tests support. +package sqs + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/sqs" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@sqs", func() { + World["client"] = sqs.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sqs/sqs.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sqs/sqs.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sqs/sqs.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sqs/sqs.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@sqs @client +Feature: Amazon Simple Queue Service + + Scenario: Making a request + When I call the "ListQueues" API + Then the value at "QueueUrls" should be a list + + Scenario: Handling errors + When I attempt to call the "GetQueueUrl" API with: + | QueueName | fake_queue | + Then I expect the response error code to be "AWS.SimpleQueueService.NonExistentQueue" + And I expect the response error message to include: + """ + The specified queue does not exist for this wsdl version. + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ssm/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ssm/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ssm/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ssm/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package ssm provides gucumber integration tests support. +package ssm + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/ssm" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@ssm", func() { + World["client"] = ssm.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ssm/ssm.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ssm/ssm.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ssm/ssm.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/ssm/ssm.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@ssm @client +Feature: Amazon SSM + + Scenario: Making a request + When I call the "ListDocuments" API + Then the value at "DocumentIdentifiers" should be a list + + Scenario: Handling errors + When I attempt to call the "GetDocument" API with: + | Name | 'fake-name' | + Then I expect the response error code to be "ValidationException" + And I expect the response error message to include: + """ + validation error detected + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/storagegateway/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/storagegateway/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/storagegateway/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/storagegateway/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package storagegateway provides gucumber integration tests support. +package storagegateway + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/storagegateway" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@storagegateway", func() { + World["client"] = storagegateway.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/storagegateway/storagegateway.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/storagegateway/storagegateway.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/storagegateway/storagegateway.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/storagegateway/storagegateway.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +# language: en +@storagegateway @client +Feature: AWS Storage Gateway + + Scenario: Making a request + When I call the "ListGateways" API + Then the value at "Gateways" should be a list + + Scenario: Handling errors + When I attempt to call the "ListVolumes" API with: + | GatewayARN | fake_gateway | + Then I expect the response error code to be "InvalidParameter" + And I expect the response error message to include: + """ + GatewayARN + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sts/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sts/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sts/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sts/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package sts provides gucumber integration tests support. +package sts + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/sts" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@sts", func() { + World["client"] = sts.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sts/sts.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sts/sts.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sts/sts.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/sts/sts.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,17 @@ +# language: en +@sts @client +Feature: AWS STS + + Scenario: Making a request + When I call the "GetSessionToken" API + Then the response should contain a "Credentials" + + Scenario: Handling errors + When I attempt to call the "GetFederationToken" API with: + | Name | temp | + | Policy | | + Then I expect the response error code to be "InvalidParameter" + And I expect the response error message to include: + """ + Policy + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/support/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/support/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/support/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/support/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package support provides gucumber integration tests support. +package support + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/support" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@support", func() { + World["client"] = support.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/support/support.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/support/support.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/support/support.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/support/support.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,22 @@ +# language: en +@support @client +Feature: AWS Support + + I want to use AWS Support + + Scenario: Making a request + When I call the "DescribeServices" API + Then the value at "services" should be a list + + Scenario: Handling errors + When I attempt to call the "CreateCase" API with: + | subject | subject | + | communicationBody | communication | + | categoryCode | category | + | serviceCode | amazon-dynamodb | + | severityCode | low | + Then I expect the response error code to be "InvalidParameterValueException" + And the error message should contain: + """ + Invalid category code + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/swf/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/swf/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/swf/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/swf/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package swf provides gucumber integration tests support. +package swf + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/swf" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@swf", func() { + World["client"] = swf.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/swf/swf.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/swf/swf.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/swf/swf.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/swf/swf.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,17 @@ +# language: en +@swf @client +Feature: Amazon Simple Workflow Service + + Scenario: Making a request + When I call the "ListDomains" API with: + | registrationStatus | REGISTERED | + Then the value at "domainInfos" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeDomain" API with: + | name | fake_domain | + Then I expect the response error code to be "UnknownResourceFault" + And I expect the response error message to include: + """ + Unknown domain: fake_domain + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/waf/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/waf/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/waf/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/waf/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package waf provides gucumber integration tests support. +package waf + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/waf" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@waf", func() { + World["client"] = waf.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/waf/waf.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/waf/waf.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/waf/waf.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/waf/waf.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,20 @@ +# language: en +@waf +Feature: AWS WAF + + Scenario: Making a request + When I call the "ListRules" API with JSON: + """ + {"Limit":20} + """ + Then the value at "Rules" should be a list + + Scenario: Handling errors + When I attempt to call the "CreateSqlInjectionMatchSet" API with: + | Name | fake_name | + | ChangeToken | fake_token | + Then I expect the response error code to be "WAFStaleDataException" + And I expect the response error message to include: + """ + The input token is no longer current + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/workspaces/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/workspaces/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/workspaces/client.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/workspaces/client.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +//Package workspaces provides gucumber integration tests support. +package workspaces + +import ( + "github.com/aws/aws-sdk-go/awstesting/integration/smoke" + "github.com/aws/aws-sdk-go/service/workspaces" + . "github.com/lsegal/gucumber" +) + +func init() { + Before("@workspaces", func() { + World["client"] = workspaces.New(smoke.Session) + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/workspaces/workspaces.feature aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/workspaces/workspaces.feature --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/workspaces/workspaces.feature 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/integration/smoke/workspaces/workspaces.feature 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,18 @@ +# language: en +@workspaces @client +Feature: Amazon WorkSpaces + + I want to use Amazon WorkSpaces + + Scenario: Making a request + When I call the "DescribeWorkspaces" API + Then the value at "Workspaces" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeWorkspaces" API with: + | DirectoryId | fake-id | + Then I expect the response error code to be "ValidationException" + And I expect the response error message to include: + """ + The Directory ID fake-id in the request is invalid. + """ diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/param_filler.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/param_filler.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/param_filler.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/param_filler.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,132 @@ +package awstesting + +import ( + "fmt" + "reflect" + "strings" + + "github.com/aws/aws-sdk-go/private/model/api" + "github.com/aws/aws-sdk-go/private/util" +) + +// A paramFiller provides string formatting for a shape and its types. +type paramFiller struct { + prefixPackageName bool +} + +// typeName returns the type name of a shape. +func (f paramFiller) typeName(shape *api.Shape) string { + if f.prefixPackageName && shape.Type == "structure" { + return "*" + shape.API.PackageName() + "." + shape.GoTypeElem() + } + return shape.GoType() +} + +// ParamsStructFromJSON returns a JSON string representation of a structure. +func ParamsStructFromJSON(value interface{}, shape *api.Shape, prefixPackageName bool) string { + f := paramFiller{prefixPackageName: prefixPackageName} + return util.GoFmt(f.paramsStructAny(value, shape)) +} + +// paramsStructAny returns the string representation of any value. +func (f paramFiller) paramsStructAny(value interface{}, shape *api.Shape) string { + if value == nil { + return "" + } + + switch shape.Type { + case "structure": + if value != nil { + vmap := value.(map[string]interface{}) + return f.paramsStructStruct(vmap, shape) + } + case "list": + vlist := value.([]interface{}) + return f.paramsStructList(vlist, shape) + case "map": + vmap := value.(map[string]interface{}) + return f.paramsStructMap(vmap, shape) + case "string", "character": + v := reflect.Indirect(reflect.ValueOf(value)) + if v.IsValid() { + return fmt.Sprintf("aws.String(%#v)", v.Interface()) + } + case "blob": + v := reflect.Indirect(reflect.ValueOf(value)) + if v.IsValid() && shape.Streaming { + return fmt.Sprintf("aws.ReadSeekCloser(bytes.NewBufferString(%#v))", v.Interface()) + } else if v.IsValid() { + return fmt.Sprintf("[]byte(%#v)", v.Interface()) + } + case "boolean": + v := reflect.Indirect(reflect.ValueOf(value)) + if v.IsValid() { + return fmt.Sprintf("aws.Bool(%#v)", v.Interface()) + } + case "integer", "long": + v := reflect.Indirect(reflect.ValueOf(value)) + if v.IsValid() { + return fmt.Sprintf("aws.Int64(%v)", v.Interface()) + } + case "float", "double": + v := reflect.Indirect(reflect.ValueOf(value)) + if v.IsValid() { + return fmt.Sprintf("aws.Float64(%v)", v.Interface()) + } + case "timestamp": + v := reflect.Indirect(reflect.ValueOf(value)) + if v.IsValid() { + return fmt.Sprintf("aws.Time(time.Unix(%d, 0))", int(v.Float())) + } + default: + panic("Unhandled type " + shape.Type) + } + return "" +} + +// paramsStructStruct returns the string representation of a structure +func (f paramFiller) paramsStructStruct(value map[string]interface{}, shape *api.Shape) string { + out := "&" + f.typeName(shape)[1:] + "{\n" + for _, n := range shape.MemberNames() { + ref := shape.MemberRefs[n] + name := findParamMember(value, n) + + if val := f.paramsStructAny(value[name], ref.Shape); val != "" { + out += fmt.Sprintf("%s: %s,\n", n, val) + } + } + out += "}" + return out +} + +// paramsStructMap returns the string representation of a map of values +func (f paramFiller) paramsStructMap(value map[string]interface{}, shape *api.Shape) string { + out := f.typeName(shape) + "{\n" + keys := SortedKeys(value) + for _, k := range keys { + v := value[k] + out += fmt.Sprintf("%q: %s,\n", k, f.paramsStructAny(v, shape.ValueRef.Shape)) + } + out += "}" + return out +} + +// paramsStructList returns the string representation of slice of values +func (f paramFiller) paramsStructList(value []interface{}, shape *api.Shape) string { + out := f.typeName(shape) + "{\n" + for _, v := range value { + out += fmt.Sprintf("%s,\n", f.paramsStructAny(v, shape.MemberRef.Shape)) + } + out += "}" + return out +} + +// findParamMember searches a map for a key ignoring case. Returns the map key if found. +func findParamMember(value map[string]interface{}, key string) string { + for actualKey := range value { + if strings.ToLower(key) == strings.ToLower(actualKey) { + return actualKey + } + } + return "" +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/sort_keys.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/sort_keys.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/sort_keys.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/sort_keys.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +package awstesting + +import "sort" + +// SortedKeys returns a sorted slice of keys of a map. +func SortedKeys(m map[string]interface{}) []string { + i, sorted := 0, make([]string, len(m)) + for k := range m { + sorted[i] = k + i++ + } + sort.Strings(sorted) + return sorted +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/unit/unit.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/unit/unit.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/unit/unit.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/unit/unit.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,13 @@ +// Package unit performs initialization and validation for unit tests +package unit + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" +) + +// Session is a shared session for unit tests to use. +var Session = session.New(aws.NewConfig(). + WithCredentials(credentials.NewStaticCredentials("AKID", "SECRET", "SESSION")). + WithRegion("mock-region")) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/util.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/util.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/util.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/awstesting/util.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,12 @@ +package awstesting + +// ZeroReader is a io.Reader which will always write zeros to the byte slice provided. +type ZeroReader struct{} + +// Read fills the provided byte slice with zeros returning the number of bytes written. +func (r *ZeroReader) Read(b []byte) (int, error) { + for i := 0; i < len(b); i++ { + b[i] = 0 + } + return len(b), nil +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/plugin.rb aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/plugin.rb --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/plugin.rb 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/plugin.rb 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,187 @@ +require 'yard' +require 'yard-go' + +module GoLinksHelper + def signature(obj, link = true, show_extras = true, full_attr_name = true) + case obj + when YARDGo::CodeObjects::FuncObject + if link && obj.has_tag?(:service_operation) + ret = signature_types(obj, !link) + args = obj.parameters.map {|m| m[0].split(/\s+/).last }.join(", ") + line = "#{obj.name}(#{args}) #{ret}" + return link ? linkify(obj, line) : line + end + end + + super(obj, link, show_extras, full_attr_name) + end + + def html_syntax_highlight(source, type = nil) + src = super(source, type || :go) + object.has_tag?(:service_operation) ? link_types(src) : src + end +end + +YARD::Templates::Helpers::HtmlHelper.send(:prepend, GoLinksHelper) +YARD::Templates::Engine.register_template_path(File.dirname(__FILE__) + '/templates') + +YARD::Parser::SourceParser.after_parse_list do + YARD::Registry.all(:struct).each do |obj| + if obj.file =~ /\/?service\/(.+?)\/(service|api)\.go$/ + obj.add_tag YARD::Tags::Tag.new(:service, $1) + obj.groups = ["Constructor Functions", "Service Operations", "Request Methods", "Pagination Methods"] + end + end + + YARD::Registry.all(:method).each do |obj| + if obj.file =~ /service\/.+?\/api\.go$/ && obj.scope == :instance + if obj.name.to_s =~ /Pages$/ + obj.group = "Pagination Methods" + opname = obj.name.to_s.sub(/Pages$/, '') + obj.docstring = <<-eof +#{obj.name} iterates over the pages of a {#{opname} #{opname}()} operation, calling the `fn` +function callback with the response data in each page. To stop iterating, return `false` from +the function callback. + +@note This operation can generate multiple requests to a service. +@example Iterating over at most 3 pages of a #{opname} operation + pageNum := 0 + err := client.#{obj.name}(params, func(page *#{obj.parent.parent.name}.#{obj.parameters[1][0].split("*").last}, lastPage bool) bool { + pageNum++ + fmt.Println(page) + return pageNum <= 3 + }) +@see #{opname} +eof + obj.add_tag YARD::Tags::Tag.new(:paginator, '') + elsif obj.name.to_s =~ /Request$/ + obj.group = "Request Methods" + obj.signature = obj.name.to_s + obj.parameters = [] + opname = obj.name.to_s.sub(/Request$/, '') + obj.docstring = <<-eof +#{obj.name} generates a {aws/request.Request} object representing the client request for +the {#{opname} #{opname}()} operation. The `output` return value can be used to capture +response data after {aws/request.Request.Send Request.Send()} is called. + +Creating a request object using this method should be used when you want to inject +custom logic into the request lifecycle using a custom handler, or if you want to +access properties on the request object before or after sending the request. If +you just want the service response, call the {#{opname} service operation method} +directly instead. + +@note You must call the {aws/request.Request.Send Send()} method on the returned + request object in order to execute the request. +@example Sending a request using the #{obj.name}() method + req, resp := client.#{obj.name}(params) + err := req.Send() + + if err == nil { // resp is now filled + fmt.Println(resp) + } +eof + obj.add_tag YARD::Tags::Tag.new(:request_method, '') + else + obj.group = "Service Operations" + obj.add_tag YARD::Tags::Tag.new(:service_operation, '') + if ex = obj.tag(:example) + ex.name = "Calling the #{obj.name} operation" + end + end + end + end + + apply_docs +end + +def apply_docs + svc_pkg = YARD::Registry.at('service') + return if svc_pkg.nil? + + pkgs = svc_pkg.children.select {|t| t.type == :package } + pkgs.each do |pkg| + svc = pkg.children.find {|t| t.has_tag?(:service) } + ctor = P(svc, ".New") + svc_name = ctor.source[/ServiceName:\s*"(.+?)",/, 1] + api_ver = ctor.source[/APIVersion:\s*"(.+?)",/, 1] + log.progress "Parsing service documentation for #{svc_name} (#{api_ver})" + file = Dir.glob("models/apis/#{svc_name}/#{api_ver}/docs-2.json").sort.last + next if file.nil? + + next if svc.nil? + exmeth = svc.children.find {|s| s.has_tag?(:service_operation) } + pkg.docstring += <<-eof + +@example Sending a request using the {#{svc.name}} client + client := #{pkg.name}.New(nil) + params := &#{pkg.name}.#{exmeth.parameters.first[0].split("*").last}{...} + resp, err := client.#{exmeth.name}(params) +@see #{svc.name} +@version #{api_ver} +eof + + ctor.docstring += <<-eof + +@example Constructing a client using default configuration + client := #{pkg.name}.New(nil) + +@example Constructing a client with custom configuration + config := aws.NewConfig().WithRegion("us-west-2") + client := #{pkg.name}.New(config) +eof + + json = JSON.parse(File.read(file)) + if svc + apply_doc(svc, json["service"]) + end + + json["operations"].each do |op, doc| + if doc && obj = svc.children.find {|t| t.name.to_s.downcase == op.downcase } + apply_doc(obj, doc) + end + end + + json["shapes"].each do |shape, data| + shape = shape_name(shape) + if obj = pkg.children.find {|t| t.name.to_s.downcase == shape.downcase } + apply_doc(obj, data["base"]) + end + + data["refs"].each do |refname, doc| + refshape, member = *refname.split("$") + refshape = shape_name(refshape) + if refobj = pkg.children.find {|t| t.name.to_s.downcase == refshape.downcase } + if m = refobj.children.find {|t| t.name.to_s.downcase == member.downcase } + apply_doc(m, doc || data["base"]) + end + end + end if data["refs"] + end + end +end + +def apply_doc(obj, doc) + tags = obj.docstring.tags || [] + obj.docstring = clean_docstring(doc) + tags.each {|t| obj.docstring.add_tag(t) } +end + +def shape_name(shape) + shape.sub(/Request$/, "Input").sub(/Response$/, "Output") +end + +def clean_docstring(docs) + return nil unless docs + docs = docs.gsub(//m, '') + docs = docs.gsub(/.+?<\/fullname?>/m, '') + docs = docs.gsub(/.+?<\/examples?>/m, '') + docs = docs.gsub(/\s*<\/note>/m, '') + docs = docs.gsub(/(.+?)<\/a>/, '\1') + docs = docs.gsub(/(.+?)<\/note>/m) do + text = $1.gsub(/<\/?p>/, '') + "
Note: #{text}
" + end + docs = docs.gsub(/\{(.+?)\}/, '`{\1}`') + docs = docs.gsub(/\s+/, ' ').strip + docs == '' ? nil : docs +end diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/layout/html/footer.erb aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/layout/html/footer.erb --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/layout/html/footer.erb 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/layout/html/footer.erb 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,31 @@ +
+ + + + + + + + + + + diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/module/html/client.erb aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/module/html/client.erb --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/module/html/client.erb 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/module/html/client.erb 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,4 @@ +

Client Structure collapse

+
    + <%= yieldall :item => @client %> +
diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/module/html/item_summary.erb aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/module/html/item_summary.erb --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/module/html/item_summary.erb 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/module/html/item_summary.erb 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,28 @@ +<% if !@item.has_tag?(:paginator) %> +
  • + <%= signature(@item) %> + <% if object != @item.namespace %> + + <%= @item.namespace.type == :class ? 'inherited' : (@item.scope == :class ? 'extended' : 'included') %> + from <%= linkify @item, object.relative_path(@item.namespace) %> + + <% end %> + <% if @item.type == :enum %>enum<% end %> + <% if @item.type == :bare_struct || @item.type == :struct %>struct<% end %> + <% if @item.has_tag?(:service) %>client<% end %> + <% if @item.has_tag?(:service_operation) %>operation<% end %> + <% if @item.type == :interface %>interface<% end %> + <% if @item.has_tag?(:readonly) %>readonly<% end %> + <% if @item.has_tag?(:writeonly) %>writeonly<% end %> + <% if @item.visibility != :public %><%= @item.visibility %><% end %> + <% if @item.has_tag?(:abstract) %>interface<% end %> + <% if @item.has_tag?(:deprecated) %>deprecated<% end %> + <% if @item.has_tag?(:api) && @item.tag(:api).text == 'private' %>private<% end %> + + <% if @item.has_tag?(:deprecated) %> + Deprecated. <%= htmlify_line @item.tag(:deprecated).text %> + <% else %> + <%= htmlify_line docstring_summary(@item) %> + <% end %> +
  • +<% end %> diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/module/html/setup.rb aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/module/html/setup.rb --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/module/html/setup.rb 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/module/html/setup.rb 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,9 @@ +def init + super + sections.place(:client, [:item_summary]).before(:constant_summary) +end + +def client + @client = object.children.find {|c| c.has_tag?(:service) } + erb(:client) if @client +end diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/package/html/setup.rb aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/package/html/setup.rb --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/package/html/setup.rb 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/package/html/setup.rb 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,8 @@ +def type_summary + @items = object.children. + select {|c| c.type == :bare_struct || c.type == :struct || c.type == :enum }. + reject {|c| c.has_tag?(:service) }. + sort_by {|c| c.name.to_s } + @name = "Type" + erb :list_summary +end diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/struct/html/paginators.erb aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/struct/html/paginators.erb --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/struct/html/paginators.erb 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/struct/html/paginators.erb 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,4 @@ +
    +

    Pagination Methods

    +

    <%= @items.map {|pkg| link_object(pkg, pkg.name) }.join(" ") %>

    +
    diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/struct/html/request_methods.erb aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/struct/html/request_methods.erb --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/struct/html/request_methods.erb 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/struct/html/request_methods.erb 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,4 @@ +
    +

    Request Methods

    +

    <%= @items.map {|pkg| link_object(pkg, pkg.name) }.join(" ") %>

    +
    diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/struct/html/setup.rb aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/struct/html/setup.rb --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/struct/html/setup.rb 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/doc-src/plugin/templates/default/struct/html/setup.rb 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,20 @@ +def init + super + sections.place(:request_methods, :paginators).after(:method_summary) +end + +def groups(list, type = "Method") + super(list.reject {|o| o.has_tag?(:paginator) || o.has_tag?(:request_method) }, type) +end + +def paginators + @items = object.children.select {|o| o.has_tag?(:paginator) } + return if @items.size == 0 + erb(:paginators) +end + +def request_methods + @items = object.children.select {|o| o.has_tag?(:request_method) } + return if @items.size == 0 + erb(:request_methods) +end diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/example/listS3EncryptedObjects/main.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/example/listS3EncryptedObjects/main.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/example/listS3EncryptedObjects/main.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/example/listS3EncryptedObjects/main.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,223 @@ +package main + +import ( + "fmt" + "os" + "sort" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" +) + +func exit(msg ...interface{}) { + fmt.Fprintln(os.Stderr, msg...) + os.Exit(1) +} + +func main() { + accounts := []string{"default", "default2", "otherprofile"} + + // Spin off a worker for each account to retrieve that account's + bucketCh := make(chan *Bucket, 5) + var wg sync.WaitGroup + for _, acc := range accounts { + wg.Add(1) + go func(acc string) { + sess := session.New(&aws.Config{Credentials: credentials.NewSharedCredentials("", acc)}) + if err := getAccountBuckets(sess, bucketCh, acc); err != nil { + fmt.Fprintf(os.Stderr, "failed to get account %s's bucket info, %v\n", acc, err) + } + wg.Done() + }(acc) + } + // Spin off a goroutine which will wait until all account buckets have been collected and + // added to the bucketCh. Close the bucketCh so the for range below will exit once all + // bucket info is printed. + go func() { + wg.Wait() + close(bucketCh) + }() + + // Receive from the bucket channel printing the information for each bucket to the console + // when the bucketCh channel is drained. + buckets := []*Bucket{} + for b := range bucketCh { + buckets = append(buckets, b) + } + + sortBuckets(buckets) + for _, b := range buckets { + if b.Error != nil { + fmt.Printf("Bucket %s, owned by: %s, failed: %v\n", b.Name, b.Owner, b.Error) + continue + } + + encObjs := b.encryptedObjects() + fmt.Printf("Bucket: %s, owned by: %s, total objects: %d, failed objects: %d, encrypted objects: %d\n", + b.Name, b.Owner, len(b.Objects), len(b.ErrObjects), len(encObjs)) + if len(encObjs) > 0 { + for _, encObj := range encObjs { + fmt.Printf("\t%s %s:%s/%s\n", encObj.EncryptionType, b.Region, b.Name, encObj.Key) + } + } + } +} + +func sortBuckets(buckets []*Bucket) { + s := sortalbeBuckets(buckets) + sort.Sort(s) +} + +type sortalbeBuckets []*Bucket + +func (s sortalbeBuckets) Len() int { return len(s) } +func (s sortalbeBuckets) Swap(a, b int) { s[a], s[b] = s[b], s[a] } +func (s sortalbeBuckets) Less(a, b int) bool { + if s[a].Owner == s[b].Owner && s[a].Name < s[b].Name { + return true + } + + if s[a].Owner < s[b].Owner { + return true + } + + return false +} + +func getAccountBuckets(sess *session.Session, bucketCh chan<- *Bucket, owner string) error { + svc := s3.New(sess) + buckets, err := listBuckets(svc) + if err != nil { + return fmt.Errorf("failed to list buckets, %v", err) + } + for _, bucket := range buckets { + bucket.Owner = owner + if bucket.Error != nil { + continue + } + + bckSvc := s3.New(sess, &aws.Config{ + Region: aws.String(bucket.Region), + Credentials: svc.Config.Credentials, + }) + bucketDetails(bckSvc, bucket) + bucketCh <- bucket + } + + return nil +} + +func bucketDetails(svc *s3.S3, bucket *Bucket) { + objs, errObjs, err := listBucketObjects(svc, bucket.Name) + if err != nil { + bucket.Error = err + } else { + bucket.Objects = objs + bucket.ErrObjects = errObjs + } +} + +// A Object provides details of an S3 object +type Object struct { + Bucket string + Key string + Encrypted bool + EncryptionType string +} + +// An ErrObject provides details of the error occurred retrieving +// an object's status. +type ErrObject struct { + Bucket string + Key string + Error error +} + +// A Bucket provides details about a bucket and its objects +type Bucket struct { + Owner string + Name string + CreationDate time.Time + Region string + Objects []Object + Error error + ErrObjects []ErrObject +} + +func (b *Bucket) encryptedObjects() []Object { + encObjs := []Object{} + for _, obj := range b.Objects { + if obj.Encrypted { + encObjs = append(encObjs, obj) + } + } + return encObjs +} + +func listBuckets(svc *s3.S3) ([]*Bucket, error) { + res, err := svc.ListBuckets(&s3.ListBucketsInput{}) + if err != nil { + return nil, err + } + + buckets := make([]*Bucket, len(res.Buckets)) + for i, b := range res.Buckets { + buckets[i] = &Bucket{ + Name: *b.Name, + CreationDate: *b.CreationDate, + } + + locRes, err := svc.GetBucketLocation(&s3.GetBucketLocationInput{ + Bucket: b.Name, + }) + if err != nil { + buckets[i].Error = err + continue + } + + if locRes.LocationConstraint == nil { + buckets[i].Region = "us-east-1" + } else { + buckets[i].Region = *locRes.LocationConstraint + } + } + + return buckets, nil +} + +func listBucketObjects(svc *s3.S3, bucket string) ([]Object, []ErrObject, error) { + listRes, err := svc.ListObjects(&s3.ListObjectsInput{ + Bucket: &bucket, + }) + if err != nil { + return nil, nil, err + } + + objs := make([]Object, 0, len(listRes.Contents)) + errObjs := []ErrObject{} + for _, listObj := range listRes.Contents { + objData, err := svc.HeadObject(&s3.HeadObjectInput{ + Bucket: &bucket, + Key: listObj.Key, + }) + + if err != nil { + errObjs = append(errObjs, ErrObject{Bucket: bucket, Key: *listObj.Key, Error: err}) + continue + } + + obj := Object{Bucket: bucket, Key: *listObj.Key} + if objData.ServerSideEncryption != nil { + obj.Encrypted = true + obj.EncryptionType = *objData.ServerSideEncryption + } + + objs = append(objs, obj) + } + + return objs, errObjs, nil +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/example/listS3EncryptedObjects/README.md aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/example/listS3EncryptedObjects/README.md --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/example/listS3EncryptedObjects/README.md 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/example/listS3EncryptedObjects/README.md 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,11 @@ +## Example + +listS3EncryptedObjects is an example using the AWS SDK for Go concurrently to list the the encrypted objects in the S3 buckets owned by an account. + +## Usage + +The example's `accounts` string slice contains a list of the SharedCredentials profiles which will be used to look up the buckets owned by each profile. Each bucket's objects will be queried. + +``` +AWS_REGION=us-east-1 go run example/listS3EncryptedObjects/main.go +``` diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/Gemfile aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/Gemfile --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/Gemfile 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/Gemfile 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,6 @@ +source 'https://rubygems.org' + +gem 'yard', git: 'git://github.com/lsegal/yard' +gem 'yard-go', git: 'git://github.com/lsegal/yard-go' +gem 'rdiscount' + diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/.gitignore aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/.gitignore --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/.gitignore 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,10 @@ +dist +doc +.yardoc +Gemfile.lock +awstesting/integration/smoke/**/importmarker__.go +awstesting/integration/smoke/_test/ +/vendor/bin/ +/vendor/pkg/ +/vendor/src/ +/private/model/cli/gen-api/gen-api diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/LICENSE.txt aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/LICENSE.txt --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/LICENSE.txt 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/LICENSE.txt 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/Makefile aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/Makefile --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/Makefile 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,98 @@ +LINTIGNOREDOT='awstesting/integration.+should not use dot imports' +LINTIGNOREDOC='service/[^/]+/(api|service|waiters)\.go:.+(comment on exported|should have comment or be unexported)' +LINTIGNORECONST='service/[^/]+/(api|service|waiters)\.go:.+(type|struct field|const|func) ([^ ]+) should be ([^ ]+)' +LINTIGNORESTUTTER='service/[^/]+/(api|service)\.go:.+(and that stutters)' +LINTIGNOREINFLECT='service/[^/]+/(api|service)\.go:.+method .+ should be ' +LINTIGNOREDEPS='vendor/.+\.go' + +SDK_WITH_VENDOR_PKGS=$(shell go list ./... | grep -v "/vendor/src") +SDK_ONLY_PKGS=$(shell go list ./... | grep -v "/vendor/") + +all: get-deps generate unit + +help: + @echo "Please use \`make ' where is one of" + @echo " api_info to print a list of services and versions" + @echo " docs to build SDK documentation" + @echo " build to go build the SDK" + @echo " unit to run unit tests" + @echo " integration to run integration tests" + @echo " verify to verify tests" + @echo " lint to lint the SDK" + @echo " vet to vet the SDK" + @echo " generate to go generate and make services" + @echo " gen-test to generate protocol tests" + @echo " gen-services to generate services" + @echo " get-deps to go get the SDK dependencies" + @echo " get-deps-tests to get the SDK's test dependencies" + @echo " get-deps-verify to get the SDK's verification dependencies" + +generate: gen-test gen-endpoints gen-services + +gen-test: gen-protocol-test + +gen-services: + go generate ./service + +gen-protocol-test: + go generate ./private/protocol/... + +gen-endpoints: + go generate ./private/endpoints + +build: + @echo "go build SDK and vendor packages" + @go build $(SDK_WITH_VENDOR_PKGS) + +unit: get-deps-tests build verify + @echo "go test SDK and vendor packages" + @go test $(SDK_WITH_VENDOR_PKGS) + +unit-with-race-cover: get-deps-tests build verify + @echo "go test SDK and vendor packages" + @go test -v -race -cpu=1,2,4 -covermode=atomic $(SDK_WITH_VENDOR_PKGS) + +integration: get-deps-tests + go test -tags=integration ./awstesting/integration/customizations/... + gucumber ./awstesting/integration/smoke + +verify: get-deps-verify lint vet + +lint: + @echo "go lint SDK and vendor packages" + @lint=`golint ./...`; \ + lint=`echo "$$lint" | grep -E -v -e ${LINTIGNOREDOT} -e ${LINTIGNOREDOC} -e ${LINTIGNORECONST} -e ${LINTIGNORESTUTTER} -e ${LINTIGNOREINFLECT} -e ${LINTIGNOREDEPS}`; \ + echo "$$lint"; \ + if [ "$$lint" != "" ]; then exit 1; fi + +vet: + go tool vet -all -shadow $(shell ls -d */ | grep -v vendor) + +get-deps: get-deps-tests get-deps-verify + @echo "go get SDK dependencies" + @go get -v $(SDK_ONLY_PKGS) + +get-deps-tests: + @echo "go get SDK testing dependencies" + go get github.com/lsegal/gucumber/cmd/gucumber + go get github.com/stretchr/testify + go get github.com/smartystreets/goconvey + +get-deps-verify: + @echo "go get SDK verification utilities" + go get github.com/golang/lint/golint + +bench: + @echo "go bench SDK packages" + @go test -run NONE -bench . -benchmem -tags 'bench' $(SDK_ONLY_PKGS) + +bench-protocol: + @echo "go bench SDK protocol marshallers" + @go test -run NONE -bench . -benchmem -tags 'bench' ./private/protocol/... + +docs: + @echo "generate SDK docs" + rm -rf doc && bundle install && bundle exec yard + +api_info: + @go run private/model/cli/api-info/api-info.go diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,422 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-12-08", + "endpointPrefix":"acm", + "jsonVersion":"1.1", + "serviceAbbreviation":"ACM", + "serviceFullName":"AWS Certificate Manager", + "signatureVersion":"v4", + "targetPrefix":"CertificateManager", + "protocol":"json" + }, + "operations":{ + "DeleteCertificate":{ + "name":"DeleteCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCertificateRequest"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"ResourceInUseException", + "exception":true + }, + { + "shape":"InvalidArnException", + "exception":true + } + ] + }, + "DescribeCertificate":{ + "name":"DescribeCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCertificateRequest"}, + "output":{"shape":"DescribeCertificateResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"InvalidArnException", + "exception":true + } + ] + }, + "GetCertificate":{ + "name":"GetCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetCertificateRequest"}, + "output":{"shape":"GetCertificateResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"RequestInProgressException", + "exception":true + }, + { + "shape":"InvalidArnException", + "exception":true + } + ] + }, + "ListCertificates":{ + "name":"ListCertificates", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListCertificatesRequest"}, + "output":{"shape":"ListCertificatesResponse"} + }, + "RequestCertificate":{ + "name":"RequestCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RequestCertificateRequest"}, + "output":{"shape":"RequestCertificateResponse"}, + "errors":[ + { + "shape":"LimitExceededException", + "exception":true + }, + { + "shape":"InvalidDomainValidationOptionsException", + "exception":true + } + ] + }, + "ResendValidationEmail":{ + "name":"ResendValidationEmail", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResendValidationEmailRequest"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"InvalidStateException", + "exception":true + }, + { + "shape":"InvalidArnException", + "exception":true + }, + { + "shape":"InvalidDomainValidationOptionsException", + "exception":true + } + ] + } + }, + "shapes":{ + "Arn":{ + "type":"string", + "min":20, + "max":2048, + "pattern":"arn:[\\w+=/,.@-]+:[\\w+=/,.@-]+:[\\w+=/,.@-]*:[0-9]+:[\\w+=,.@-]+(/[\\w+=/,.@-]+)*" + }, + "CertificateBody":{ + "type":"string", + "min":1, + "max":524288, + "pattern":"-{5}BEGIN CERTIFICATE-{5}\\u000D?\\u000A([A-Za-z0-9/+]{64}\\u000D?\\u000A)*[A-Za-z0-9/+]{1,64}={0,2}\\u000D?\\u000A-{5}END CERTIFICATE-{5}(\\u000D?\\u000A)?" + }, + "CertificateChain":{ + "type":"string", + "min":1, + "max":2097152, + "pattern":"(-{5}BEGIN CERTIFICATE-{5}\\u000D?\\u000A([A-Za-z0-9/+]{64}\\u000D?\\u000A)*[A-Za-z0-9/+]{1,64}={0,2}\\u000D?\\u000A-{5}END CERTIFICATE-{5}\\u000D?\\u000A)*-{5}BEGIN CERTIFICATE-{5}\\u000D?\\u000A([A-Za-z0-9/+]{64}\\u000D?\\u000A)*[A-Za-z0-9/+]{1,64}={0,2}\\u000D?\\u000A-{5}END CERTIFICATE-{5}(\\u000D?\\u000A)?" + }, + "CertificateDetail":{ + "type":"structure", + "members":{ + "CertificateArn":{"shape":"Arn"}, + "DomainName":{"shape":"DomainNameString"}, + "SubjectAlternativeNames":{"shape":"DomainList"}, + "DomainValidationOptions":{"shape":"DomainValidationList"}, + "Serial":{"shape":"String"}, + "Subject":{"shape":"String"}, + "Issuer":{"shape":"String"}, + "CreatedAt":{"shape":"TStamp"}, + "IssuedAt":{"shape":"TStamp"}, + "Status":{"shape":"CertificateStatus"}, + "RevokedAt":{"shape":"TStamp"}, + "RevocationReason":{"shape":"RevocationReason"}, + "NotBefore":{"shape":"TStamp"}, + "NotAfter":{"shape":"TStamp"}, + "KeyAlgorithm":{"shape":"KeyAlgorithm"}, + "SignatureAlgorithm":{"shape":"String"}, + "InUseBy":{"shape":"InUseList"} + } + }, + "CertificateStatus":{ + "type":"string", + "enum":[ + "PENDING_VALIDATION", + "ISSUED", + "INACTIVE", + "EXPIRED", + "VALIDATION_TIMED_OUT", + "REVOKED", + "FAILED" + ] + }, + "CertificateStatuses":{ + "type":"list", + "member":{"shape":"CertificateStatus"} + }, + "CertificateSummary":{ + "type":"structure", + "members":{ + "CertificateArn":{"shape":"Arn"}, + "DomainName":{"shape":"DomainNameString"} + } + }, + "CertificateSummaryList":{ + "type":"list", + "member":{"shape":"CertificateSummary"} + }, + "DeleteCertificateRequest":{ + "type":"structure", + "required":["CertificateArn"], + "members":{ + "CertificateArn":{"shape":"Arn"} + } + }, + "DescribeCertificateRequest":{ + "type":"structure", + "required":["CertificateArn"], + "members":{ + "CertificateArn":{"shape":"Arn"} + } + }, + "DescribeCertificateResponse":{ + "type":"structure", + "members":{ + "Certificate":{"shape":"CertificateDetail"} + } + }, + "DomainList":{ + "type":"list", + "member":{"shape":"DomainNameString"}, + "min":1, + "max":1000 + }, + "DomainNameString":{ + "type":"string", + "min":1, + "max":253, + "pattern":"^(\\*\\.)?(((?!-)[A-Za-z0-9-]{0,62}[A-Za-z0-9])\\.)+((?!-)[A-Za-z0-9-]{1,62}[A-Za-z0-9])$" + }, + "DomainValidation":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainNameString"}, + "ValidationEmails":{"shape":"ValidationEmailList"}, + "ValidationDomain":{"shape":"DomainNameString"} + } + }, + "DomainValidationList":{ + "type":"list", + "member":{"shape":"DomainValidation"}, + "min":1, + "max":1000 + }, + "DomainValidationOption":{ + "type":"structure", + "required":[ + "DomainName", + "ValidationDomain" + ], + "members":{ + "DomainName":{"shape":"DomainNameString"}, + "ValidationDomain":{"shape":"DomainNameString"} + } + }, + "DomainValidationOptionList":{ + "type":"list", + "member":{"shape":"DomainValidationOption"}, + "min":1, + "max":1000 + }, + "GetCertificateRequest":{ + "type":"structure", + "required":["CertificateArn"], + "members":{ + "CertificateArn":{"shape":"Arn"} + } + }, + "GetCertificateResponse":{ + "type":"structure", + "members":{ + "Certificate":{"shape":"CertificateBody"}, + "CertificateChain":{"shape":"CertificateChain"} + } + }, + "IdempotencyToken":{ + "type":"string", + "min":1, + "max":32, + "pattern":"\\w+" + }, + "InUseList":{ + "type":"list", + "member":{"shape":"String"} + }, + "InvalidArnException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "InvalidDomainValidationOptionsException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "InvalidStateException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "KeyAlgorithm":{ + "type":"string", + "enum":[ + "RSA_2048", + "EC_prime256v1" + ] + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "ListCertificatesRequest":{ + "type":"structure", + "members":{ + "CertificateStatuses":{"shape":"CertificateStatuses"}, + "NextToken":{"shape":"NextToken"}, + "MaxItems":{"shape":"MaxItems"} + } + }, + "ListCertificatesResponse":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"NextToken"}, + "CertificateSummaryList":{"shape":"CertificateSummaryList"} + } + }, + "MaxItems":{ + "type":"integer", + "min":1, + "max":1000 + }, + "NextToken":{ + "type":"string", + "min":1, + "max":320, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]*" + }, + "RequestCertificateRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainNameString"}, + "SubjectAlternativeNames":{"shape":"DomainList"}, + "IdempotencyToken":{"shape":"IdempotencyToken"}, + "DomainValidationOptions":{"shape":"DomainValidationOptionList"} + } + }, + "RequestCertificateResponse":{ + "type":"structure", + "members":{ + "CertificateArn":{"shape":"Arn"} + } + }, + "RequestInProgressException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "ResendValidationEmailRequest":{ + "type":"structure", + "required":[ + "CertificateArn", + "Domain", + "ValidationDomain" + ], + "members":{ + "CertificateArn":{"shape":"Arn"}, + "Domain":{"shape":"DomainNameString"}, + "ValidationDomain":{"shape":"DomainNameString"} + } + }, + "ResourceInUseException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "RevocationReason":{ + "type":"string", + "enum":[ + "UNSPECIFIED", + "KEY_COMPROMISE", + "CA_COMPROMISE", + "AFFILIATION_CHANGED", + "SUPERCEDED", + "CESSATION_OF_OPERATION", + "CERTIFICATE_HOLD", + "REMOVE_FROM_CRL", + "PRIVILEGE_WITHDRAWN", + "A_A_COMPROMISE" + ] + }, + "String":{"type":"string"}, + "TStamp":{"type":"timestamp"}, + "ValidationEmailList":{ + "type":"list", + "member":{"shape":"String"} + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,271 @@ +{ + "version": "2.0", + "operations": { + "DeleteCertificate": "

    Deletes an ACM Certificate and its associated private key. If this action succeeds, the certificate no longer appears in the list of ACM Certificates that can be displayed by calling the ListCertificates action or be retrieved by calling the GetCertificate action. The certificate will not be available for use by other AWS services.

    You cannot delete an ACM Certificate that is being used by another AWS service. To delete a certificate that is in use, the certificate association must first be removed. ", + "DescribeCertificate": "

    Returns a list of the fields contained in the specified ACM Certificate. For example, this action returns the certificate status, a flag that indicates whether the certificate is associated with any other AWS service, and the date at which the certificate request was created. The ACM Certificate is specified on input by its Amazon Resource Name (ARN).

    ", + "GetCertificate": "

    Retrieves an ACM Certificate and certificate chain for the certificate specified by an ARN. The chain is an ordered list of certificates that contains the root certificate, intermediate certificates of subordinate CAs, and the ACM Certificate. The certificate and certificate chain are base64 encoded. If you want to decode the certificate chain to see the individual certificate fields, you can use OpenSSL.

    Currently, ACM Certificates can be used only with Elastic Load Balancing and Amazon CloudFront. ", + "ListCertificates": "

    Retrieves a list of the ACM Certificate ARNs, and the domain name for each ARN, owned by the calling account. You can filter the list based on the CertificateStatuses parameter, and you can display up to MaxItems certificates at one time. If you have more than MaxItems certificates, use the NextToken marker from the response object in your next call to the ListCertificates action to retrieve the next set of certificate ARNs.

    ", + "RequestCertificate": "

    Requests an ACM Certificate for use with other AWS services. To request an ACM Certificate, you must specify the fully qualified domain name (FQDN) for your site. You can also specify additional FQDNs if users can reach your site by using other names. For each domain name you specify, email is sent to the domain owner to request approval to issue the certificate. After receiving approval from the domain owner, the ACM Certificate is issued. For more information, see the AWS Certificate Manager User Guide .

    ", + "ResendValidationEmail": "

    Resends the email that requests domain ownership validation. The domain owner or an authorized representative must approve the ACM Certificate before it can be issued. The certificate can be approved by clicking a link in the mail to navigate to the Amazon certificate approval website and then clicking I Approve. However, the validation email can be blocked by spam filters. Therefore, if you do not receive the original mail, you can request that the mail be resent within 72 hours of requesting the ACM Certificate. If more than 72 hours have elapsed since your original request or since your last attempt to resend validation mail, you must request a new certificate.

    " + }, + "service": "AWS Certificate Manager

    Welcome to the AWS Certificate Manager (ACM) CLI Command Reference. This guide provides descriptions, syntax, and usage examples for each ACM CLI command. You can use AWS Certificate Manager to request ACM Certificates for your AWS-based websites and applications. For general information about using ACM and for more information about using the console, see the AWS Certificate Manager User Guide. For more information about using the ACM API, see the AWS Certificate Manager API Reference.

    ", + "shapes": { + "Arn": { + "base": null, + "refs": { + "CertificateDetail$CertificateArn": "

    Amazon Resource Name (ARN) of the certificate. This is of the form:

    arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

    ", + "CertificateSummary$CertificateArn": "

    Amazon Resource Name (ARN) of the certificate. This is of the form:

    arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

    ", + "DeleteCertificateRequest$CertificateArn": "

    String that contains the ARN of the ACM Certificate to be deleted. This must be of the form:

    arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

    ", + "DescribeCertificateRequest$CertificateArn": "

    String that contains an ACM Certificate ARN. The ARN must be of the form:

    arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

    ", + "GetCertificateRequest$CertificateArn": "

    String that contains a certificate ARN in the following format:

    arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

    For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

    ", + "RequestCertificateResponse$CertificateArn": "

    String that contains the ARN of the issued certificate. This must be of the form:

    arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012

    ", + "ResendValidationEmailRequest$CertificateArn": "

    String that contains the ARN of the requested certificate. The certificate ARN is generated and returned by RequestCertificate as soon as the request is made. By default, using this parameter causes email to be sent to all top-level domains you specified in the certificate request.

    The ARN must be of the form:

    arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012

    " + } + }, + "CertificateBody": { + "base": null, + "refs": { + "GetCertificateResponse$Certificate": "

    String that contains the ACM Certificate represented by the ARN specified at input.

    " + } + }, + "CertificateChain": { + "base": null, + "refs": { + "GetCertificateResponse$CertificateChain": "

    The certificate chain that contains the root certificate issued by the certificate authority (CA).

    " + } + }, + "CertificateDetail": { + "base": "

    This structure is returned in the response object of the DescribeCertificate action.

    ", + "refs": { + "DescribeCertificateResponse$Certificate": "

    Contains a CertificateDetail structure that lists the fields of an ACM Certificate.

    " + } + }, + "CertificateStatus": { + "base": null, + "refs": { + "CertificateDetail$Status": "

    A CertificateStatus enumeration value that can contain one of the following:

    • PENDING_VALIDATION
    • ISSUED
    • INACTIVE
    • EXPIRED
    • REVOKED
    • FAILED
    • VALIDATION_TIMED_OUT

    ", + "CertificateStatuses$member": null + } + }, + "CertificateStatuses": { + "base": null, + "refs": { + "ListCertificatesRequest$CertificateStatuses": "

    Identifies the statuses of the ACM Certificates for which you want to retrieve the ARNs. This can be one or more of the following values:

    • PENDING_VALIDATION
    • ISSUED
    • INACTIVE
    • EXPIRED
    • VALIDATION_TIMED_OUT
    • REVOKED
    • FAILED

    " + } + }, + "CertificateSummary": { + "base": "

    This structure is returned in the response object of ListCertificates action.

    ", + "refs": { + "CertificateSummaryList$member": null + } + }, + "CertificateSummaryList": { + "base": null, + "refs": { + "ListCertificatesResponse$CertificateSummaryList": "

    A list of the certificate ARNs.

    " + } + }, + "DeleteCertificateRequest": { + "base": null, + "refs": { + } + }, + "DescribeCertificateRequest": { + "base": null, + "refs": { + } + }, + "DescribeCertificateResponse": { + "base": null, + "refs": { + } + }, + "DomainList": { + "base": null, + "refs": { + "CertificateDetail$SubjectAlternativeNames": "

    One or more domain names (subject alternative names) included in the certificate request. After the certificate is issued, this list includes the domain names bound to the public key contained in the certificate. The subject alternative names include the canonical domain name (CN) of the certificate and additional domain names that can be used to connect to the website.

    ", + "RequestCertificateRequest$SubjectAlternativeNames": "

    Additional FQDNs to be included in the Subject Alternative Name extension of the ACM Certificate. For example, add the name www.example.net to a certificate for which the DomainName field is www.example.com if users can reach your site by using either name.

    " + } + }, + "DomainNameString": { + "base": null, + "refs": { + "CertificateDetail$DomainName": "

    Fully qualified domain name (FQDN), such as www.example.com or example.com, for the certificate.

    ", + "CertificateSummary$DomainName": "

    Fully qualified domain name (FQDN), such as www.example.com or example.com, for the certificate.

    ", + "DomainList$member": null, + "DomainValidation$DomainName": "

    Fully Qualified Domain Name (FQDN) of the form www.example.com or example.com

    ", + "DomainValidation$ValidationDomain": "

    The base validation domain that acts as the suffix of the email addresses that are used to send the emails.

    ", + "DomainValidationOption$DomainName": "

    Fully Qualified Domain Name (FQDN) of the certificate being requested.

    ", + "DomainValidationOption$ValidationDomain": "

    The domain to which validation email is sent. This is the base validation domain that will act as the suffix of the email addresses. This must be the same as the DomainName value or a superdomain of the DomainName value. For example, if you requested a certificate for site.subdomain.example.com and specify a ValidationDomain of subdomain.example.com, ACM sends email to the domain registrant, technical contact, and administrative contact in WHOIS for the base domain and the and the following five addresses:

    • admin@subdomain.example.com
    • administrator@subdomain.example.com
    • hostmaster@subdomain.example.com
    • postmaster@subdomain.example.com
    • webmaster@subdomain.example.com

    ", + "RequestCertificateRequest$DomainName": "

    Fully qualified domain name (FQDN), such as www.example.com, of the site you want to secure with an ACM Certificate. Use an asterisk (*) to create a wildcard certificate that protects several sites in the same domain. For example, *.example.com protects www.example.com, site.example.com, and images.example.com.

    ", + "ResendValidationEmailRequest$Domain": "

    The Fully Qualified Domain Name (FQDN) of the certificate that needs to be validated.

    ", + "ResendValidationEmailRequest$ValidationDomain": "

    The base validation domain that will act as the suffix of the email addresses that are used to send the emails. This must be the same as the Domain value or a superdomain of the Domain value. For example, if you requested a certificate for site.subdomain.example.com and specify a ValidationDomain of subdomain.example.com, ACM sends email to the domain registrant, technical contact, and administrative contact in WHOIS and the following five addresses:

    • admin@subdomain.example.com
    • administrator@subdomain.example.com
    • hostmaster@subdomain.example.com
    • postmaster@subdomain.example.com
    • webmaster@subdomain.example.com

    " + } + }, + "DomainValidation": { + "base": "

    Structure that contains the domain name, the base validation domain to which validation email is sent, and the email addresses used to validate the domain identity.

    ", + "refs": { + "DomainValidationList$member": null + } + }, + "DomainValidationList": { + "base": null, + "refs": { + "CertificateDetail$DomainValidationOptions": "

    References a DomainValidation structure that contains the domain name in the certificate and the email address that can be used for validation.

    " + } + }, + "DomainValidationOption": { + "base": "

    This structure is used in the request object of the RequestCertificate action.

    ", + "refs": { + "DomainValidationOptionList$member": null + } + }, + "DomainValidationOptionList": { + "base": null, + "refs": { + "RequestCertificateRequest$DomainValidationOptions": "

    The base validation domain that will act as the suffix of the email addresses that are used to send the emails. This must be the same as the Domain value or a superdomain of the Domain value. For example, if you requested a certificate for www.example.com and specify DomainValidationOptions of example.com, ACM sends email to the domain registrant, technical contact, and administrative contact in WHOIS and the following five addresses:

    • admin@example.com
    • administrator@example.com
    • hostmaster@example.com
    • postmaster@example.com
    • webmaster@example.com

    " + } + }, + "GetCertificateRequest": { + "base": null, + "refs": { + } + }, + "GetCertificateResponse": { + "base": null, + "refs": { + } + }, + "IdempotencyToken": { + "base": null, + "refs": { + "RequestCertificateRequest$IdempotencyToken": "

    Customer chosen string that can be used to distinguish between calls to RequestCertificate. Idempotency tokens time out after one hour. Therefore, if you call RequestCertificate multiple times with the same idempotency token within one hour, ACM recognizes that you are requesting only one certificate and will issue only one. If you change the idempotency token for each call, ACM recognizes that you are requesting multiple certificates.

    " + } + }, + "InUseList": { + "base": null, + "refs": { + "CertificateDetail$InUseBy": "

    List that identifies ARNs that are using the certificate. A single ACM Certificate can be used by multiple AWS resources.

    " + } + }, + "InvalidArnException": { + "base": "

    The requested Amazon Resource Name (ARN) does not refer to an existing resource.

    ", + "refs": { + } + }, + "InvalidDomainValidationOptionsException": { + "base": "

    One or more values in the DomainValidationOption structure is incorrect.

    ", + "refs": { + } + }, + "InvalidStateException": { + "base": "

    Processing has reached an invalid state. For example, this exception can occur if the specified domain is not using email validation, or the current certificate status does not permit the requested operation. See the exception message returned by ACM to determine which state is not valid.

    ", + "refs": { + } + }, + "KeyAlgorithm": { + "base": null, + "refs": { + "CertificateDetail$KeyAlgorithm": "

    Asymmetric algorithm used to generate the public and private key pair. Currently the only supported value is RSA_2048.

    " + } + }, + "LimitExceededException": { + "base": "

    An ACM limit has been exceeded. For example, you may have input more domains than are allowed or you've requested too many certificates for your account. See the exception message returned by ACM to determine which limit you have violated. For more information about ACM limits, see the Limits topic.

    ", + "refs": { + } + }, + "ListCertificatesRequest": { + "base": null, + "refs": { + } + }, + "ListCertificatesResponse": { + "base": null, + "refs": { + } + }, + "MaxItems": { + "base": null, + "refs": { + "ListCertificatesRequest$MaxItems": "

    Specify this parameter when paginating results to indicate the maximum number of ACM Certificates that you want to display for each response. If there are additional certificates beyond the maximum you specify, use the NextToken value in your next call to the ListCertificates action.

    " + } + }, + "NextToken": { + "base": null, + "refs": { + "ListCertificatesRequest$NextToken": "

    String that contains an opaque marker of the next ACM Certificate ARN to be displayed. Use this parameter when paginating results, and only in a subsequent request after you've received a response where the results have been truncated. Set it to an empty string the first time you call this action, and set it to the value of the NextToken element you receive in the response object for subsequent calls.

    ", + "ListCertificatesResponse$NextToken": "

    If the list has been truncated, this value is present and should be used for the NextToken input parameter on your next call to ListCertificates.

    " + } + }, + "RequestCertificateRequest": { + "base": null, + "refs": { + } + }, + "RequestCertificateResponse": { + "base": null, + "refs": { + } + }, + "RequestInProgressException": { + "base": "

    The certificate request is in process and the certificate in your account has not yet been issued.

    ", + "refs": { + } + }, + "ResendValidationEmailRequest": { + "base": null, + "refs": { + } + }, + "ResourceInUseException": { + "base": "

    The certificate is in use by another AWS service in the caller's account. Remove the association and try again.

    ", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "

    The specified certificate cannot be found in the caller's account, or the caller's account cannot be found.

    ", + "refs": { + } + }, + "RevocationReason": { + "base": null, + "refs": { + "CertificateDetail$RevocationReason": "

    A RevocationReason enumeration value that indicates why the certificate was revoked. This value exists only if the certificate has been revoked. This can be one of the following vales:

    • UNSPECIFIED
    • KEY_COMPROMISE
    • CA_COMPROMISE
    • AFFILIATION_CHANGED
    • SUPERCEDED
    • CESSATION_OF_OPERATION
    • CERTIFICATE_HOLD
    • REMOVE_FROM_CRL
    • PRIVILEGE_WITHDRAWN
    • A_A_COMPROMISE

    " + } + }, + "String": { + "base": null, + "refs": { + "CertificateDetail$Serial": "

    String that contains the serial number of the certificate.

    ", + "CertificateDetail$Subject": "

    The X.500 distinguished name of the entity associated with the public key contained in the certificate.

    ", + "CertificateDetail$Issuer": "

    The X.500 distinguished name of the CA that issued and signed the certificate.

    ", + "CertificateDetail$SignatureAlgorithm": "

    Algorithm used to generate a signature. Currently the only supported value is SHA256WITHRSA.

    ", + "InUseList$member": null, + "InvalidArnException$message": null, + "InvalidDomainValidationOptionsException$message": null, + "InvalidStateException$message": null, + "LimitExceededException$message": null, + "RequestInProgressException$message": null, + "ResourceInUseException$message": null, + "ResourceNotFoundException$message": null, + "ValidationEmailList$member": null + } + }, + "TStamp": { + "base": null, + "refs": { + "CertificateDetail$CreatedAt": "

    Time at which the certificate was requested.

    ", + "CertificateDetail$IssuedAt": "

    Time at which the certificate was issued.

    ", + "CertificateDetail$RevokedAt": "

    The time, if any, at which the certificate was revoked. This value exists only if the certificate has been revoked.

    ", + "CertificateDetail$NotBefore": "

    Time before which the certificate is not valid.

    ", + "CertificateDetail$NotAfter": "

    Time after which the certificate is not valid.

    " + } + }, + "ValidationEmailList": { + "base": null, + "refs": { + "DomainValidation$ValidationEmails": "

    A list of contact address for the domain registrant.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version":"1.0", + "examples":{ + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/acm/2015-12-08/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "pagination": { + "ListCertificates": { + "limit_key": "MaxItems", + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "CertificateSummaryList" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/apigateway/2015-07-09/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/apigateway/2015-07-09/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/apigateway/2015-07-09/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/apigateway/2015-07-09/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,3888 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-07-09", + "endpointPrefix":"apigateway", + "serviceFullName":"Amazon API Gateway", + "signatureVersion":"v4", + "protocol":"rest-json" + }, + "operations":{ + "CreateApiKey":{ + "name":"CreateApiKey", + "http":{ + "method":"POST", + "requestUri":"/apikeys", + "responseCode":201 + }, + "input":{"shape":"CreateApiKeyRequest"}, + "output":{"shape":"ApiKey"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "CreateBasePathMapping":{ + "name":"CreateBasePathMapping", + "http":{ + "method":"POST", + "requestUri":"/domainnames/{domain_name}/basepathmappings", + "responseCode":201 + }, + "input":{"shape":"CreateBasePathMappingRequest"}, + "output":{"shape":"BasePathMapping"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "CreateDeployment":{ + "name":"CreateDeployment", + "http":{ + "method":"POST", + "requestUri":"/restapis/{restapi_id}/deployments", + "responseCode":201 + }, + "input":{"shape":"CreateDeploymentRequest"}, + "output":{"shape":"Deployment"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"ConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + } + ] + }, + "CreateDomainName":{ + "name":"CreateDomainName", + "http":{ + "method":"POST", + "requestUri":"/domainnames", + "responseCode":201 + }, + "input":{"shape":"CreateDomainNameRequest"}, + "output":{"shape":"DomainName"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "CreateModel":{ + "name":"CreateModel", + "http":{ + "method":"POST", + "requestUri":"/restapis/{restapi_id}/models", + "responseCode":201 + }, + "input":{"shape":"CreateModelRequest"}, + "output":{"shape":"Model"}, + "errors":[ + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"ConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "CreateResource":{ + "name":"CreateResource", + "http":{ + "method":"POST", + "requestUri":"/restapis/{restapi_id}/resources/{parent_id}", + "responseCode":201 + }, + "input":{"shape":"CreateResourceRequest"}, + "output":{"shape":"Resource"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"ConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "CreateRestApi":{ + "name":"CreateRestApi", + "http":{ + "method":"POST", + "requestUri":"/restapis", + "responseCode":201 + }, + "input":{"shape":"CreateRestApiRequest"}, + "output":{"shape":"RestApi"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "CreateStage":{ + "name":"CreateStage", + "http":{ + "method":"POST", + "requestUri":"/restapis/{restapi_id}/stages", + "responseCode":201 + }, + "input":{"shape":"CreateStageRequest"}, + "output":{"shape":"Stage"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"ConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "DeleteApiKey":{ + "name":"DeleteApiKey", + "http":{ + "method":"DELETE", + "requestUri":"/apikeys/{api_Key}", + "responseCode":202 + }, + "input":{"shape":"DeleteApiKeyRequest"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "DeleteBasePathMapping":{ + "name":"DeleteBasePathMapping", + "http":{ + "method":"DELETE", + "requestUri":"/domainnames/{domain_name}/basepathmappings/{base_path}", + "responseCode":202 + }, + "input":{"shape":"DeleteBasePathMappingRequest"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "DeleteClientCertificate":{ + "name":"DeleteClientCertificate", + "http":{ + "method":"DELETE", + "requestUri":"/clientcertificates/{clientcertificate_id}", + "responseCode":202 + }, + "input":{"shape":"DeleteClientCertificateRequest"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + } + ] + }, + "DeleteDeployment":{ + "name":"DeleteDeployment", + "http":{ + "method":"DELETE", + "requestUri":"/restapis/{restapi_id}/deployments/{deployment_id}", + "responseCode":202 + }, + "input":{"shape":"DeleteDeploymentRequest"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "DeleteDomainName":{ + "name":"DeleteDomainName", + "http":{ + "method":"DELETE", + "requestUri":"/domainnames/{domain_name}", + "responseCode":202 + }, + "input":{"shape":"DeleteDomainNameRequest"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "DeleteIntegration":{ + "name":"DeleteIntegration", + "http":{ + "method":"DELETE", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration", + "responseCode":204 + }, + "input":{"shape":"DeleteIntegrationRequest"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "DeleteIntegrationResponse":{ + "name":"DeleteIntegrationResponse", + "http":{ + "method":"DELETE", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration/responses/{status_code}", + "responseCode":204 + }, + "input":{"shape":"DeleteIntegrationResponseRequest"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "DeleteMethod":{ + "name":"DeleteMethod", + "http":{ + "method":"DELETE", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}", + "responseCode":204 + }, + "input":{"shape":"DeleteMethodRequest"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "DeleteMethodResponse":{ + "name":"DeleteMethodResponse", + "http":{ + "method":"DELETE", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/responses/{status_code}", + "responseCode":204 + }, + "input":{"shape":"DeleteMethodResponseRequest"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "DeleteModel":{ + "name":"DeleteModel", + "http":{ + "method":"DELETE", + "requestUri":"/restapis/{restapi_id}/models/{model_name}", + "responseCode":202 + }, + "input":{"shape":"DeleteModelRequest"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ConflictException", + "error":{"httpStatusCode":409}, + "exception":true + } + ] + }, + "DeleteResource":{ + "name":"DeleteResource", + "http":{ + "method":"DELETE", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}", + "responseCode":202 + }, + "input":{"shape":"DeleteResourceRequest"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "DeleteRestApi":{ + "name":"DeleteRestApi", + "http":{ + "method":"DELETE", + "requestUri":"/restapis/{restapi_id}", + "responseCode":202 + }, + "input":{"shape":"DeleteRestApiRequest"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "DeleteStage":{ + "name":"DeleteStage", + "http":{ + "method":"DELETE", + "requestUri":"/restapis/{restapi_id}/stages/{stage_name}", + "responseCode":202 + }, + "input":{"shape":"DeleteStageRequest"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "FlushStageCache":{ + "name":"FlushStageCache", + "http":{ + "method":"DELETE", + "requestUri":"/restapis/{restapi_id}/stages/{stage_name}/cache/data", + "responseCode":202 + }, + "input":{"shape":"FlushStageCacheRequest"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "GenerateClientCertificate":{ + "name":"GenerateClientCertificate", + "http":{ + "method":"POST", + "requestUri":"/clientcertificates", + "responseCode":201 + }, + "input":{"shape":"GenerateClientCertificateRequest"}, + "output":{"shape":"ClientCertificate"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "GetAccount":{ + "name":"GetAccount", + "http":{ + "method":"GET", + "requestUri":"/account" + }, + "input":{"shape":"GetAccountRequest"}, + "output":{"shape":"Account"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "GetApiKey":{ + "name":"GetApiKey", + "http":{ + "method":"GET", + "requestUri":"/apikeys/{api_Key}" + }, + "input":{"shape":"GetApiKeyRequest"}, + "output":{"shape":"ApiKey"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "GetApiKeys":{ + "name":"GetApiKeys", + "http":{ + "method":"GET", + "requestUri":"/apikeys" + }, + "input":{"shape":"GetApiKeysRequest"}, + "output":{"shape":"ApiKeys"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "GetBasePathMapping":{ + "name":"GetBasePathMapping", + "http":{ + "method":"GET", + "requestUri":"/domainnames/{domain_name}/basepathmappings/{base_path}" + }, + "input":{"shape":"GetBasePathMappingRequest"}, + "output":{"shape":"BasePathMapping"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "GetBasePathMappings":{ + "name":"GetBasePathMappings", + "http":{ + "method":"GET", + "requestUri":"/domainnames/{domain_name}/basepathmappings" + }, + "input":{"shape":"GetBasePathMappingsRequest"}, + "output":{"shape":"BasePathMappings"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "GetClientCertificate":{ + "name":"GetClientCertificate", + "http":{ + "method":"GET", + "requestUri":"/clientcertificates/{clientcertificate_id}" + }, + "input":{"shape":"GetClientCertificateRequest"}, + "output":{"shape":"ClientCertificate"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "GetClientCertificates":{ + "name":"GetClientCertificates", + "http":{ + "method":"GET", + "requestUri":"/clientcertificates" + }, + "input":{"shape":"GetClientCertificatesRequest"}, + "output":{"shape":"ClientCertificates"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "GetDeployment":{ + "name":"GetDeployment", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/deployments/{deployment_id}" + }, + "input":{"shape":"GetDeploymentRequest"}, + "output":{"shape":"Deployment"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + } + ] + }, + "GetDeployments":{ + "name":"GetDeployments", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/deployments" + }, + "input":{"shape":"GetDeploymentsRequest"}, + "output":{"shape":"Deployments"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + } + ] + }, + "GetDomainName":{ + "name":"GetDomainName", + "http":{ + "method":"GET", + "requestUri":"/domainnames/{domain_name}" + }, + "input":{"shape":"GetDomainNameRequest"}, + "output":{"shape":"DomainName"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "GetDomainNames":{ + "name":"GetDomainNames", + "http":{ + "method":"GET", + "requestUri":"/domainnames" + }, + "input":{"shape":"GetDomainNamesRequest"}, + "output":{"shape":"DomainNames"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "GetIntegration":{ + "name":"GetIntegration", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration" + }, + "input":{"shape":"GetIntegrationRequest"}, + "output":{"shape":"Integration"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "GetIntegrationResponse":{ + "name":"GetIntegrationResponse", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration/responses/{status_code}" + }, + "input":{"shape":"GetIntegrationResponseRequest"}, + "output":{"shape":"IntegrationResponse"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "GetMethod":{ + "name":"GetMethod", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}" + }, + "input":{"shape":"GetMethodRequest"}, + "output":{"shape":"Method"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "GetMethodResponse":{ + "name":"GetMethodResponse", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/responses/{status_code}" + }, + "input":{"shape":"GetMethodResponseRequest"}, + "output":{"shape":"MethodResponse"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "GetModel":{ + "name":"GetModel", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/models/{model_name}" + }, + "input":{"shape":"GetModelRequest"}, + "output":{"shape":"Model"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "GetModelTemplate":{ + "name":"GetModelTemplate", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/models/{model_name}/default_template" + }, + "input":{"shape":"GetModelTemplateRequest"}, + "output":{"shape":"Template"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "GetModels":{ + "name":"GetModels", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/models" + }, + "input":{"shape":"GetModelsRequest"}, + "output":{"shape":"Models"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "GetResource":{ + "name":"GetResource", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}" + }, + "input":{"shape":"GetResourceRequest"}, + "output":{"shape":"Resource"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "GetResources":{ + "name":"GetResources", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/resources" + }, + "input":{"shape":"GetResourcesRequest"}, + "output":{"shape":"Resources"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "GetRestApi":{ + "name":"GetRestApi", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}" + }, + "input":{"shape":"GetRestApiRequest"}, + "output":{"shape":"RestApi"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "GetRestApis":{ + "name":"GetRestApis", + "http":{ + "method":"GET", + "requestUri":"/restapis" + }, + "input":{"shape":"GetRestApisRequest"}, + "output":{"shape":"RestApis"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "GetSdk":{ + "name":"GetSdk", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/stages/{stage_name}/sdks/{sdk_type}", + "responseCode":200 + }, + "input":{"shape":"GetSdkRequest"}, + "output":{"shape":"SdkResponse"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "GetStage":{ + "name":"GetStage", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/stages/{stage_name}" + }, + "input":{"shape":"GetStageRequest"}, + "output":{"shape":"Stage"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "GetStages":{ + "name":"GetStages", + "http":{ + "method":"GET", + "requestUri":"/restapis/{restapi_id}/stages" + }, + "input":{"shape":"GetStagesRequest"}, + "output":{"shape":"Stages"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "PutIntegration":{ + "name":"PutIntegration", + "http":{ + "method":"PUT", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration", + "responseCode":201 + }, + "input":{"shape":"PutIntegrationRequest"}, + "output":{"shape":"Integration"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "PutIntegrationResponse":{ + "name":"PutIntegrationResponse", + "http":{ + "method":"PUT", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration/responses/{status_code}", + "responseCode":201 + }, + "input":{"shape":"PutIntegrationResponseRequest"}, + "output":{"shape":"IntegrationResponse"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "PutMethod":{ + "name":"PutMethod", + "http":{ + "method":"PUT", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}", + "responseCode":201 + }, + "input":{"shape":"PutMethodRequest"}, + "output":{"shape":"Method"}, + "errors":[ + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"ConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "PutMethodResponse":{ + "name":"PutMethodResponse", + "http":{ + "method":"PUT", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/responses/{status_code}", + "responseCode":201 + }, + "input":{"shape":"PutMethodResponseRequest"}, + "output":{"shape":"MethodResponse"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"ConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "TestInvokeMethod":{ + "name":"TestInvokeMethod", + "http":{ + "method":"POST", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}" + }, + "input":{"shape":"TestInvokeMethodRequest"}, + "output":{"shape":"TestInvokeMethodResponse"}, + "errors":[ + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "UpdateAccount":{ + "name":"UpdateAccount", + "http":{ + "method":"PATCH", + "requestUri":"/account" + }, + "input":{"shape":"UpdateAccountRequest"}, + "output":{"shape":"Account"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "UpdateApiKey":{ + "name":"UpdateApiKey", + "http":{ + "method":"PATCH", + "requestUri":"/apikeys/{api_Key}" + }, + "input":{"shape":"UpdateApiKeyRequest"}, + "output":{"shape":"ApiKey"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "UpdateBasePathMapping":{ + "name":"UpdateBasePathMapping", + "http":{ + "method":"PATCH", + "requestUri":"/domainnames/{domain_name}/basepathmappings/{base_path}" + }, + "input":{"shape":"UpdateBasePathMappingRequest"}, + "output":{"shape":"BasePathMapping"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"ConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "UpdateClientCertificate":{ + "name":"UpdateClientCertificate", + "http":{ + "method":"PATCH", + "requestUri":"/clientcertificates/{clientcertificate_id}" + }, + "input":{"shape":"UpdateClientCertificateRequest"}, + "output":{"shape":"ClientCertificate"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + } + ] + }, + "UpdateDeployment":{ + "name":"UpdateDeployment", + "http":{ + "method":"PATCH", + "requestUri":"/restapis/{restapi_id}/deployments/{deployment_id}" + }, + "input":{"shape":"UpdateDeploymentRequest"}, + "output":{"shape":"Deployment"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + } + ] + }, + "UpdateDomainName":{ + "name":"UpdateDomainName", + "http":{ + "method":"PATCH", + "requestUri":"/domainnames/{domain_name}" + }, + "input":{"shape":"UpdateDomainNameRequest"}, + "output":{"shape":"DomainName"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "UpdateIntegration":{ + "name":"UpdateIntegration", + "http":{ + "method":"PATCH", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration" + }, + "input":{"shape":"UpdateIntegrationRequest"}, + "output":{"shape":"Integration"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "UpdateIntegrationResponse":{ + "name":"UpdateIntegrationResponse", + "http":{ + "method":"PATCH", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration/responses/{status_code}" + }, + "input":{"shape":"UpdateIntegrationResponseRequest"}, + "output":{"shape":"IntegrationResponse"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "UpdateMethod":{ + "name":"UpdateMethod", + "http":{ + "method":"PATCH", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}" + }, + "input":{"shape":"UpdateMethodRequest"}, + "output":{"shape":"Method"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "UpdateMethodResponse":{ + "name":"UpdateMethodResponse", + "http":{ + "method":"PATCH", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/responses/{status_code}", + "responseCode":201 + }, + "input":{"shape":"UpdateMethodResponseRequest"}, + "output":{"shape":"MethodResponse"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"ConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "UpdateModel":{ + "name":"UpdateModel", + "http":{ + "method":"PATCH", + "requestUri":"/restapis/{restapi_id}/models/{model_name}" + }, + "input":{"shape":"UpdateModelRequest"}, + "output":{"shape":"Model"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "UpdateResource":{ + "name":"UpdateResource", + "http":{ + "method":"PATCH", + "requestUri":"/restapis/{restapi_id}/resources/{resource_id}" + }, + "input":{"shape":"UpdateResourceRequest"}, + "output":{"shape":"Resource"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"ConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "UpdateRestApi":{ + "name":"UpdateRestApi", + "http":{ + "method":"PATCH", + "requestUri":"/restapis/{restapi_id}" + }, + "input":{"shape":"UpdateRestApiRequest"}, + "output":{"shape":"RestApi"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"ConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "UpdateStage":{ + "name":"UpdateStage", + "http":{ + "method":"PATCH", + "requestUri":"/restapis/{restapi_id}/stages/{stage_name}" + }, + "input":{"shape":"UpdateStageRequest"}, + "output":{"shape":"Stage"}, + "errors":[ + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"ConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + } + }, + "shapes":{ + "Account":{ + "type":"structure", + "members":{ + "cloudwatchRoleArn":{"shape":"String"}, + "throttleSettings":{"shape":"ThrottleSettings"} + } + }, + "ApiKey":{ + "type":"structure", + "members":{ + "id":{"shape":"String"}, + "name":{"shape":"String"}, + "description":{"shape":"String"}, + "enabled":{"shape":"Boolean"}, + "stageKeys":{"shape":"ListOfString"}, + "createdDate":{"shape":"Timestamp"}, + "lastUpdatedDate":{"shape":"Timestamp"} + } + }, + "ApiKeys":{ + "type":"structure", + "members":{ + "position":{"shape":"String"}, + "items":{ + "shape":"ListOfApiKey", + "locationName":"item" + } + } + }, + "BadRequestException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "BasePathMapping":{ + "type":"structure", + "members":{ + "basePath":{"shape":"String"}, + "restApiId":{"shape":"String"}, + "stage":{"shape":"String"} + } + }, + "BasePathMappings":{ + "type":"structure", + "members":{ + "position":{"shape":"String"}, + "items":{ + "shape":"ListOfBasePathMapping", + "locationName":"item" + } + } + }, + "Blob":{"type":"blob"}, + "Boolean":{"type":"boolean"}, + "CacheClusterSize":{ + "type":"string", + "enum":[ + "0.5", + "1.6", + "6.1", + "13.5", + "28.4", + "58.2", + "118", + "237" + ] + }, + "CacheClusterStatus":{ + "type":"string", + "enum":[ + "CREATE_IN_PROGRESS", + "AVAILABLE", + "DELETE_IN_PROGRESS", + "NOT_AVAILABLE", + "FLUSH_IN_PROGRESS" + ] + }, + "ClientCertificate":{ + "type":"structure", + "members":{ + "clientCertificateId":{"shape":"String"}, + "description":{"shape":"String"}, + "pemEncodedCertificate":{"shape":"String"}, + "createdDate":{"shape":"Timestamp"}, + "expirationDate":{"shape":"Timestamp"} + } + }, + "ClientCertificates":{ + "type":"structure", + "members":{ + "position":{"shape":"String"}, + "items":{ + "shape":"ListOfClientCertificate", + "locationName":"item" + } + } + }, + "ConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CreateApiKeyRequest":{ + "type":"structure", + "members":{ + "name":{"shape":"String"}, + "description":{"shape":"String"}, + "enabled":{"shape":"Boolean"}, + "stageKeys":{"shape":"ListOfStageKeys"} + } + }, + "CreateBasePathMappingRequest":{ + "type":"structure", + "required":[ + "domainName", + "restApiId" + ], + "members":{ + "domainName":{ + "shape":"String", + "location":"uri", + "locationName":"domain_name" + }, + "basePath":{"shape":"String"}, + "restApiId":{"shape":"String"}, + "stage":{"shape":"String"} + } + }, + "CreateDeploymentRequest":{ + "type":"structure", + "required":[ + "restApiId", + "stageName" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "stageName":{"shape":"String"}, + "stageDescription":{"shape":"String"}, + "description":{"shape":"String"}, + "cacheClusterEnabled":{"shape":"NullableBoolean"}, + "cacheClusterSize":{"shape":"CacheClusterSize"}, + "variables":{"shape":"MapOfStringToString"} + } + }, + "CreateDomainNameRequest":{ + "type":"structure", + "members":{ + "domainName":{"shape":"String"}, + "certificateName":{"shape":"String"}, + "certificateBody":{"shape":"String"}, + "certificatePrivateKey":{"shape":"String"}, + "certificateChain":{"shape":"String"} + }, + "required":[ + "domainName", + "certificateName", + "certificateBody", + "certificatePrivateKey", + "certificateChain" + ] + }, + "CreateModelRequest":{ + "type":"structure", + "required":[ + "restApiId", + "name", + "contentType" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "name":{"shape":"String"}, + "description":{"shape":"String"}, + "schema":{"shape":"String"}, + "contentType":{"shape":"String"} + } + }, + "CreateResourceRequest":{ + "type":"structure", + "required":[ + "restApiId", + "parentId", + "pathPart" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "parentId":{ + "shape":"String", + "location":"uri", + "locationName":"parent_id" + }, + "pathPart":{"shape":"String"} + } + }, + "CreateRestApiRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"String"}, + "description":{"shape":"String"}, + "cloneFrom":{"shape":"String"} + } + }, + "CreateStageRequest":{ + "type":"structure", + "required":[ + "restApiId", + "stageName", + "deploymentId" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "stageName":{"shape":"String"}, + "deploymentId":{"shape":"String"}, + "description":{"shape":"String"}, + "cacheClusterEnabled":{"shape":"Boolean"}, + "cacheClusterSize":{"shape":"CacheClusterSize"}, + "variables":{"shape":"MapOfStringToString"} + } + }, + "DeleteApiKeyRequest":{ + "type":"structure", + "required":["apiKey"], + "members":{ + "apiKey":{ + "shape":"String", + "location":"uri", + "locationName":"api_Key" + } + } + }, + "DeleteBasePathMappingRequest":{ + "type":"structure", + "required":[ + "domainName", + "basePath" + ], + "members":{ + "domainName":{ + "shape":"String", + "location":"uri", + "locationName":"domain_name" + }, + "basePath":{ + "shape":"String", + "location":"uri", + "locationName":"base_path" + } + } + }, + "DeleteClientCertificateRequest":{ + "type":"structure", + "required":["clientCertificateId"], + "members":{ + "clientCertificateId":{ + "shape":"String", + "location":"uri", + "locationName":"clientcertificate_id" + } + } + }, + "DeleteDeploymentRequest":{ + "type":"structure", + "required":[ + "restApiId", + "deploymentId" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "deploymentId":{ + "shape":"String", + "location":"uri", + "locationName":"deployment_id" + } + } + }, + "DeleteDomainNameRequest":{ + "type":"structure", + "required":["domainName"], + "members":{ + "domainName":{ + "shape":"String", + "location":"uri", + "locationName":"domain_name" + } + } + }, + "DeleteIntegrationRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + } + } + }, + "DeleteIntegrationResponseRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod", + "statusCode" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + }, + "statusCode":{ + "shape":"StatusCode", + "location":"uri", + "locationName":"status_code" + } + } + }, + "DeleteMethodRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + } + } + }, + "DeleteMethodResponseRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod", + "statusCode" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + }, + "statusCode":{ + "shape":"StatusCode", + "location":"uri", + "locationName":"status_code" + } + } + }, + "DeleteModelRequest":{ + "type":"structure", + "required":[ + "restApiId", + "modelName" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "modelName":{ + "shape":"String", + "location":"uri", + "locationName":"model_name" + } + } + }, + "DeleteResourceRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + } + } + }, + "DeleteRestApiRequest":{ + "type":"structure", + "required":["restApiId"], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + } + } + }, + "DeleteStageRequest":{ + "type":"structure", + "required":[ + "restApiId", + "stageName" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "stageName":{ + "shape":"String", + "location":"uri", + "locationName":"stage_name" + } + } + }, + "Deployment":{ + "type":"structure", + "members":{ + "id":{"shape":"String"}, + "description":{"shape":"String"}, + "createdDate":{"shape":"Timestamp"}, + "apiSummary":{"shape":"PathToMapOfMethodSnapshot"} + } + }, + "Deployments":{ + "type":"structure", + "members":{ + "position":{"shape":"String"}, + "items":{ + "shape":"ListOfDeployment", + "locationName":"item" + } + } + }, + "DomainName":{ + "type":"structure", + "members":{ + "domainName":{"shape":"String"}, + "certificateName":{"shape":"String"}, + "certificateUploadDate":{"shape":"Timestamp"}, + "distributionDomainName":{"shape":"String"} + } + }, + "DomainNames":{ + "type":"structure", + "members":{ + "position":{"shape":"String"}, + "items":{ + "shape":"ListOfDomainName", + "locationName":"item" + } + } + }, + "Double":{"type":"double"}, + "FlushStageCacheRequest":{ + "type":"structure", + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "stageName":{ + "shape":"String", + "location":"uri", + "locationName":"stage_name" + } + }, + "required":[ + "restApiId", + "stageName" + ] + }, + "GenerateClientCertificateRequest":{ + "type":"structure", + "members":{ + "description":{"shape":"String"} + } + }, + "GetAccountRequest":{ + "type":"structure", + "members":{ + } + }, + "GetApiKeyRequest":{ + "type":"structure", + "required":["apiKey"], + "members":{ + "apiKey":{ + "shape":"String", + "location":"uri", + "locationName":"api_Key" + } + } + }, + "GetApiKeysRequest":{ + "type":"structure", + "members":{ + "position":{ + "shape":"String", + "location":"querystring", + "locationName":"position" + }, + "limit":{ + "shape":"NullableInteger", + "location":"querystring", + "locationName":"limit" + } + } + }, + "GetBasePathMappingRequest":{ + "type":"structure", + "required":[ + "domainName", + "basePath" + ], + "members":{ + "domainName":{ + "shape":"String", + "location":"uri", + "locationName":"domain_name" + }, + "basePath":{ + "shape":"String", + "location":"uri", + "locationName":"base_path" + } + } + }, + "GetBasePathMappingsRequest":{ + "type":"structure", + "required":["domainName"], + "members":{ + "domainName":{ + "shape":"String", + "location":"uri", + "locationName":"domain_name" + }, + "position":{ + "shape":"String", + "location":"querystring", + "locationName":"position" + }, + "limit":{ + "shape":"NullableInteger", + "location":"querystring", + "locationName":"limit" + } + } + }, + "GetClientCertificateRequest":{ + "type":"structure", + "required":["clientCertificateId"], + "members":{ + "clientCertificateId":{ + "shape":"String", + "location":"uri", + "locationName":"clientcertificate_id" + } + } + }, + "GetClientCertificatesRequest":{ + "type":"structure", + "members":{ + "position":{ + "shape":"String", + "location":"querystring", + "locationName":"position" + }, + "limit":{ + "shape":"NullableInteger", + "location":"querystring", + "locationName":"limit" + } + } + }, + "GetDeploymentRequest":{ + "type":"structure", + "required":[ + "restApiId", + "deploymentId" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "deploymentId":{ + "shape":"String", + "location":"uri", + "locationName":"deployment_id" + } + } + }, + "GetDeploymentsRequest":{ + "type":"structure", + "required":["restApiId"], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "position":{ + "shape":"String", + "location":"querystring", + "locationName":"position" + }, + "limit":{ + "shape":"NullableInteger", + "location":"querystring", + "locationName":"limit" + } + } + }, + "GetDomainNameRequest":{ + "type":"structure", + "required":["domainName"], + "members":{ + "domainName":{ + "shape":"String", + "location":"uri", + "locationName":"domain_name" + } + } + }, + "GetDomainNamesRequest":{ + "type":"structure", + "members":{ + "position":{ + "shape":"String", + "location":"querystring", + "locationName":"position" + }, + "limit":{ + "shape":"NullableInteger", + "location":"querystring", + "locationName":"limit" + } + } + }, + "GetIntegrationRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + } + } + }, + "GetIntegrationResponseRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod", + "statusCode" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + }, + "statusCode":{ + "shape":"StatusCode", + "location":"uri", + "locationName":"status_code" + } + } + }, + "GetMethodRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + } + } + }, + "GetMethodResponseRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod", + "statusCode" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + }, + "statusCode":{ + "shape":"StatusCode", + "location":"uri", + "locationName":"status_code" + } + } + }, + "GetModelRequest":{ + "type":"structure", + "required":[ + "restApiId", + "modelName" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "modelName":{ + "shape":"String", + "location":"uri", + "locationName":"model_name" + }, + "flatten":{ + "shape":"Boolean", + "location":"querystring", + "locationName":"flatten" + } + } + }, + "GetModelTemplateRequest":{ + "type":"structure", + "required":[ + "restApiId", + "modelName" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "modelName":{ + "shape":"String", + "location":"uri", + "locationName":"model_name" + } + } + }, + "GetModelsRequest":{ + "type":"structure", + "required":["restApiId"], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "position":{ + "shape":"String", + "location":"querystring", + "locationName":"position" + }, + "limit":{ + "shape":"NullableInteger", + "location":"querystring", + "locationName":"limit" + } + } + }, + "GetResourceRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + } + } + }, + "GetResourcesRequest":{ + "type":"structure", + "required":["restApiId"], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "position":{ + "shape":"String", + "location":"querystring", + "locationName":"position" + }, + "limit":{ + "shape":"NullableInteger", + "location":"querystring", + "locationName":"limit" + } + } + }, + "GetRestApiRequest":{ + "type":"structure", + "required":["restApiId"], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + } + } + }, + "GetRestApisRequest":{ + "type":"structure", + "members":{ + "position":{ + "shape":"String", + "location":"querystring", + "locationName":"position" + }, + "limit":{ + "shape":"NullableInteger", + "location":"querystring", + "locationName":"limit" + } + } + }, + "GetSdkRequest":{ + "type":"structure", + "required":[ + "restApiId", + "stageName", + "sdkType" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "stageName":{ + "shape":"String", + "location":"uri", + "locationName":"stage_name" + }, + "sdkType":{ + "shape":"String", + "location":"uri", + "locationName":"sdk_type" + }, + "parameters":{ + "shape":"MapOfStringToString", + "location":"querystring" + } + } + }, + "GetStageRequest":{ + "type":"structure", + "required":[ + "restApiId", + "stageName" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "stageName":{ + "shape":"String", + "location":"uri", + "locationName":"stage_name" + } + } + }, + "GetStagesRequest":{ + "type":"structure", + "required":["restApiId"], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "deploymentId":{ + "shape":"String", + "location":"querystring", + "locationName":"deploymentId" + } + } + }, + "Integer":{"type":"integer"}, + "Integration":{ + "type":"structure", + "members":{ + "type":{"shape":"IntegrationType"}, + "httpMethod":{"shape":"String"}, + "uri":{"shape":"String"}, + "credentials":{"shape":"String"}, + "requestParameters":{"shape":"MapOfStringToString"}, + "requestTemplates":{"shape":"MapOfStringToString"}, + "cacheNamespace":{"shape":"String"}, + "cacheKeyParameters":{"shape":"ListOfString"}, + "integrationResponses":{"shape":"MapOfIntegrationResponse"} + } + }, + "IntegrationResponse":{ + "type":"structure", + "members":{ + "statusCode":{"shape":"StatusCode"}, + "selectionPattern":{"shape":"String"}, + "responseParameters":{"shape":"MapOfStringToString"}, + "responseTemplates":{"shape":"MapOfStringToString"} + } + }, + "IntegrationType":{ + "type":"string", + "enum":[ + "HTTP", + "AWS", + "MOCK" + ] + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "retryAfterSeconds":{ + "shape":"String", + "location":"header", + "locationName":"Retry-After" + }, + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":429}, + "exception":true + }, + "ListOfApiKey":{ + "type":"list", + "member":{"shape":"ApiKey"} + }, + "ListOfBasePathMapping":{ + "type":"list", + "member":{"shape":"BasePathMapping"} + }, + "ListOfClientCertificate":{ + "type":"list", + "member":{"shape":"ClientCertificate"} + }, + "ListOfDeployment":{ + "type":"list", + "member":{"shape":"Deployment"} + }, + "ListOfDomainName":{ + "type":"list", + "member":{"shape":"DomainName"} + }, + "ListOfModel":{ + "type":"list", + "member":{"shape":"Model"} + }, + "ListOfPatchOperation":{ + "type":"list", + "member":{"shape":"PatchOperation"} + }, + "ListOfResource":{ + "type":"list", + "member":{"shape":"Resource"} + }, + "ListOfRestApi":{ + "type":"list", + "member":{"shape":"RestApi"} + }, + "ListOfStage":{ + "type":"list", + "member":{"shape":"Stage"} + }, + "ListOfStageKeys":{ + "type":"list", + "member":{"shape":"StageKey"} + }, + "ListOfString":{ + "type":"list", + "member":{"shape":"String"} + }, + "Long":{"type":"long"}, + "MapOfHeaderValues":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "MapOfIntegrationResponse":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"IntegrationResponse"} + }, + "MapOfMethod":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"Method"} + }, + "MapOfMethodResponse":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"MethodResponse"} + }, + "MapOfMethodSettings":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"MethodSetting"} + }, + "MapOfMethodSnapshot":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"MethodSnapshot"} + }, + "MapOfStringToBoolean":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"NullableBoolean"} + }, + "MapOfStringToString":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "Method":{ + "type":"structure", + "members":{ + "httpMethod":{"shape":"String"}, + "authorizationType":{"shape":"String"}, + "apiKeyRequired":{"shape":"NullableBoolean"}, + "requestParameters":{"shape":"MapOfStringToBoolean"}, + "requestModels":{"shape":"MapOfStringToString"}, + "methodResponses":{"shape":"MapOfMethodResponse"}, + "methodIntegration":{"shape":"Integration"} + } + }, + "MethodResponse":{ + "type":"structure", + "members":{ + "statusCode":{"shape":"StatusCode"}, + "responseParameters":{"shape":"MapOfStringToBoolean"}, + "responseModels":{"shape":"MapOfStringToString"} + } + }, + "MethodSetting":{ + "type":"structure", + "members":{ + "metricsEnabled":{"shape":"Boolean"}, + "loggingLevel":{"shape":"String"}, + "dataTraceEnabled":{"shape":"Boolean"}, + "throttlingBurstLimit":{"shape":"Integer"}, + "throttlingRateLimit":{"shape":"Double"}, + "cachingEnabled":{"shape":"Boolean"}, + "cacheTtlInSeconds":{"shape":"Integer"}, + "cacheDataEncrypted":{"shape":"Boolean"} + } + }, + "MethodSnapshot":{ + "type":"structure", + "members":{ + "authorizationType":{"shape":"String"}, + "apiKeyRequired":{"shape":"Boolean"} + } + }, + "Model":{ + "type":"structure", + "members":{ + "id":{"shape":"String"}, + "name":{"shape":"String"}, + "description":{"shape":"String"}, + "schema":{"shape":"String"}, + "contentType":{"shape":"String"} + } + }, + "Models":{ + "type":"structure", + "members":{ + "position":{"shape":"String"}, + "items":{ + "shape":"ListOfModel", + "locationName":"item" + } + } + }, + "NotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NullableBoolean":{"type":"boolean"}, + "NullableInteger":{"type":"integer"}, + "PatchOperation":{ + "type":"structure", + "members":{ + "op":{"shape":"op"}, + "path":{"shape":"String"}, + "value":{"shape":"String"}, + "from":{"shape":"String"} + } + }, + "PathToMapOfMethodSnapshot":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"MapOfMethodSnapshot"} + }, + "PutIntegrationRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod", + "type" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + }, + "type":{"shape":"IntegrationType"}, + "integrationHttpMethod":{ + "shape":"String", + "locationName":"httpMethod" + }, + "uri":{"shape":"String"}, + "credentials":{"shape":"String"}, + "requestParameters":{"shape":"MapOfStringToString"}, + "requestTemplates":{"shape":"MapOfStringToString"}, + "cacheNamespace":{"shape":"String"}, + "cacheKeyParameters":{"shape":"ListOfString"} + } + }, + "PutIntegrationResponseRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod", + "statusCode" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + }, + "statusCode":{ + "shape":"StatusCode", + "location":"uri", + "locationName":"status_code" + }, + "selectionPattern":{"shape":"String"}, + "responseParameters":{"shape":"MapOfStringToString"}, + "responseTemplates":{"shape":"MapOfStringToString"} + } + }, + "PutMethodRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod", + "authorizationType" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + }, + "authorizationType":{"shape":"String"}, + "apiKeyRequired":{"shape":"Boolean"}, + "requestParameters":{"shape":"MapOfStringToBoolean"}, + "requestModels":{"shape":"MapOfStringToString"} + } + }, + "PutMethodResponseRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod", + "statusCode" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + }, + "statusCode":{ + "shape":"StatusCode", + "location":"uri", + "locationName":"status_code" + }, + "responseParameters":{"shape":"MapOfStringToBoolean"}, + "responseModels":{"shape":"MapOfStringToString"} + } + }, + "Resource":{ + "type":"structure", + "members":{ + "id":{"shape":"String"}, + "parentId":{"shape":"String"}, + "pathPart":{"shape":"String"}, + "path":{"shape":"String"}, + "resourceMethods":{"shape":"MapOfMethod"} + } + }, + "Resources":{ + "type":"structure", + "members":{ + "position":{"shape":"String"}, + "items":{ + "shape":"ListOfResource", + "locationName":"item" + } + } + }, + "RestApi":{ + "type":"structure", + "members":{ + "id":{"shape":"String"}, + "name":{"shape":"String"}, + "description":{"shape":"String"}, + "createdDate":{"shape":"Timestamp"} + } + }, + "RestApis":{ + "type":"structure", + "members":{ + "position":{"shape":"String"}, + "items":{ + "shape":"ListOfRestApi", + "locationName":"item" + } + } + }, + "SdkResponse":{ + "type":"structure", + "members":{ + "contentType":{ + "shape":"String", + "location":"header", + "locationName":"Content-Type" + }, + "contentDisposition":{ + "shape":"String", + "location":"header", + "locationName":"Content-Disposition" + }, + "body":{"shape":"Blob"} + }, + "payload":"body" + }, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + "retryAfterSeconds":{ + "shape":"String", + "location":"header", + "locationName":"Retry-After" + }, + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + "Stage":{ + "type":"structure", + "members":{ + "deploymentId":{"shape":"String"}, + "clientCertificateId":{"shape":"String"}, + "stageName":{"shape":"String"}, + "description":{"shape":"String"}, + "cacheClusterEnabled":{"shape":"Boolean"}, + "cacheClusterSize":{"shape":"CacheClusterSize"}, + "cacheClusterStatus":{"shape":"CacheClusterStatus"}, + "methodSettings":{"shape":"MapOfMethodSettings"}, + "variables":{"shape":"MapOfStringToString"}, + "createdDate":{"shape":"Timestamp"}, + "lastUpdatedDate":{"shape":"Timestamp"} + } + }, + "StageKey":{ + "type":"structure", + "members":{ + "restApiId":{"shape":"String"}, + "stageName":{"shape":"String"} + } + }, + "Stages":{ + "type":"structure", + "members":{ + "item":{"shape":"ListOfStage"} + } + }, + "StatusCode":{ + "type":"string", + "pattern":"[1-5]\\d\\d" + }, + "String":{"type":"string"}, + "Template":{ + "type":"structure", + "members":{ + "value":{"shape":"String"} + } + }, + "TestInvokeMethodRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + }, + "pathWithQueryString":{"shape":"String"}, + "body":{"shape":"String"}, + "headers":{"shape":"MapOfHeaderValues"}, + "clientCertificateId":{"shape":"String"}, + "stageVariables":{"shape":"MapOfStringToString"} + } + }, + "TestInvokeMethodResponse":{ + "type":"structure", + "members":{ + "status":{"shape":"Integer"}, + "body":{"shape":"String"}, + "headers":{"shape":"MapOfHeaderValues"}, + "log":{"shape":"String"}, + "latency":{"shape":"Long"} + } + }, + "ThrottleSettings":{ + "type":"structure", + "members":{ + "burstLimit":{"shape":"Integer"}, + "rateLimit":{"shape":"Double"} + } + }, + "Timestamp":{"type":"timestamp"}, + "TooManyRequestsException":{ + "type":"structure", + "members":{ + "retryAfterSeconds":{ + "shape":"String", + "location":"header", + "locationName":"Retry-After" + }, + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":429}, + "exception":true + }, + "UnauthorizedException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":401}, + "exception":true + }, + "UpdateAccountRequest":{ + "type":"structure", + "members":{ + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "UpdateApiKeyRequest":{ + "type":"structure", + "required":["apiKey"], + "members":{ + "apiKey":{ + "shape":"String", + "location":"uri", + "locationName":"api_Key" + }, + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "UpdateBasePathMappingRequest":{ + "type":"structure", + "required":[ + "domainName", + "basePath" + ], + "members":{ + "domainName":{ + "shape":"String", + "location":"uri", + "locationName":"domain_name" + }, + "basePath":{ + "shape":"String", + "location":"uri", + "locationName":"base_path" + }, + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "UpdateClientCertificateRequest":{ + "type":"structure", + "required":["clientCertificateId"], + "members":{ + "clientCertificateId":{ + "shape":"String", + "location":"uri", + "locationName":"clientcertificate_id" + }, + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "UpdateDeploymentRequest":{ + "type":"structure", + "required":[ + "restApiId", + "deploymentId" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "deploymentId":{ + "shape":"String", + "location":"uri", + "locationName":"deployment_id" + }, + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "UpdateDomainNameRequest":{ + "type":"structure", + "required":["domainName"], + "members":{ + "domainName":{ + "shape":"String", + "location":"uri", + "locationName":"domain_name" + }, + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "UpdateIntegrationRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + }, + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "UpdateIntegrationResponseRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod", + "statusCode" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + }, + "statusCode":{ + "shape":"StatusCode", + "location":"uri", + "locationName":"status_code" + }, + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "UpdateMethodRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + }, + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "UpdateMethodResponseRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId", + "httpMethod", + "statusCode" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "httpMethod":{ + "shape":"String", + "location":"uri", + "locationName":"http_method" + }, + "statusCode":{ + "shape":"StatusCode", + "location":"uri", + "locationName":"status_code" + }, + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "UpdateModelRequest":{ + "type":"structure", + "required":[ + "restApiId", + "modelName" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "modelName":{ + "shape":"String", + "location":"uri", + "locationName":"model_name" + }, + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "UpdateResourceRequest":{ + "type":"structure", + "required":[ + "restApiId", + "resourceId" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "resourceId":{ + "shape":"String", + "location":"uri", + "locationName":"resource_id" + }, + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "UpdateRestApiRequest":{ + "type":"structure", + "required":["restApiId"], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "UpdateStageRequest":{ + "type":"structure", + "required":[ + "restApiId", + "stageName" + ], + "members":{ + "restApiId":{ + "shape":"String", + "location":"uri", + "locationName":"restapi_id" + }, + "stageName":{ + "shape":"String", + "location":"uri", + "locationName":"stage_name" + }, + "patchOperations":{"shape":"ListOfPatchOperation"} + } + }, + "op":{ + "type":"string", + "enum":[ + "add", + "remove", + "replace", + "move", + "copy", + "test" + ] + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/apigateway/2015-07-09/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/apigateway/2015-07-09/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/apigateway/2015-07-09/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/apigateway/2015-07-09/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1139 @@ +{ + "version": "2.0", + "operations": { + "CreateApiKey": null, + "CreateBasePathMapping": "

    Creates a new BasePathMapping resource.

    ", + "CreateDeployment": "

    Creates a Deployment resource, which makes a specified RestApi callable over the internet.

    ", + "CreateDomainName": "

    Creates a new domain name.

    ", + "CreateModel": "

    Adds a new Model resource to an existing RestApi resource.

    ", + "CreateResource": "

    Creates a Resource resource.

    ", + "CreateRestApi": "

    Creates a new RestApi resource.

    ", + "CreateStage": "

    Creates a Stage resource.

    ", + "DeleteApiKey": "

    Deletes the ApiKey resource.

    ", + "DeleteBasePathMapping": "

    Deletes the BasePathMapping resource.

    ", + "DeleteClientCertificate": null, + "DeleteDeployment": "

    Deletes a Deployment resource. Deleting a deployment will only succeed if there are no Stage resources associated with it.

    ", + "DeleteDomainName": "

    Deletes the DomainName resource.

    ", + "DeleteIntegration": "

    Represents a delete integration.

    ", + "DeleteIntegrationResponse": "

    Represents a delete integration response.

    ", + "DeleteMethod": "

    Deletes an existing Method resource.

    ", + "DeleteMethodResponse": "

    Deletes an existing MethodResponse resource.

    ", + "DeleteModel": "

    Deletes a model.

    ", + "DeleteResource": "

    Deletes a Resource resource.

    ", + "DeleteRestApi": "

    Deletes the specified API.

    ", + "DeleteStage": "

    Deletes a Stage resource.

    ", + "FlushStageCache": "

    Flushes a stage's cache.

    ", + "GenerateClientCertificate": null, + "GetAccount": "

    Gets information about the current Account resource.

    ", + "GetApiKey": "

    Gets information about the current ApiKey resource.

    ", + "GetApiKeys": "

    Gets information about the current ApiKeys resource.

    ", + "GetBasePathMapping": "

    Describe a BasePathMapping resource.

    ", + "GetBasePathMappings": "

    Represents a collection of BasePathMapping resources.

    ", + "GetClientCertificate": null, + "GetClientCertificates": null, + "GetDeployment": "

    Gets information about a Deployment resource.

    ", + "GetDeployments": "

    Gets information about a Deployments collection.

    ", + "GetDomainName": "

    Represents a domain name that is contained in a simpler, more intuitive URL that can be called.

    ", + "GetDomainNames": "

    Represents a collection of DomainName resources.

    ", + "GetIntegration": "

    Represents a get integration.

    ", + "GetIntegrationResponse": "

    Represents a get integration response.

    ", + "GetMethod": "

    Describe an existing Method resource.

    ", + "GetMethodResponse": "

    Describes a MethodResponse resource.

    ", + "GetModel": "

    Describes an existing model defined for a RestApi resource.

    ", + "GetModelTemplate": "

    Generates a sample mapping template that can be used to transform a payload into the structure of a model.

    ", + "GetModels": "

    Describes existing Models defined for a RestApi resource.

    ", + "GetResource": "

    Lists information about a resource.

    ", + "GetResources": "

    Lists information about a collection of Resource resources.

    ", + "GetRestApi": "

    Lists the RestApi resource in the collection.

    ", + "GetRestApis": "

    Lists the RestApis resources for your collection.

    ", + "GetSdk": null, + "GetStage": "

    Gets information about a Stage resource.

    ", + "GetStages": "

    Gets information about one or more Stage resources.

    ", + "PutIntegration": "

    Represents a put integration.

    ", + "PutIntegrationResponse": "

    Represents a put integration.

    ", + "PutMethod": "

    Add a method to an existing Resource resource.

    ", + "PutMethodResponse": "

    Adds a MethodResponse to an existing Method resource.

    ", + "TestInvokeMethod": null, + "UpdateAccount": "

    Changes information about the current Account resource.

    ", + "UpdateApiKey": "

    Changes information about an ApiKey resource.

    ", + "UpdateBasePathMapping": "

    Changes information about the BasePathMapping resource.

    ", + "UpdateClientCertificate": null, + "UpdateDeployment": "

    Changes information about a Deployment resource.

    ", + "UpdateDomainName": "

    Changes information about the DomainName resource.

    ", + "UpdateIntegration": "

    Represents an update integration.

    ", + "UpdateIntegrationResponse": "

    Represents an update integration response.

    ", + "UpdateMethod": "

    Updates an existing Method resource.

    ", + "UpdateMethodResponse": "

    Updates an existing MethodResponse resource.

    ", + "UpdateModel": "

    Changes information about a model.

    ", + "UpdateResource": "

    Changes information about a Resource resource.

    ", + "UpdateRestApi": "

    Changes information about the specified API.

    ", + "UpdateStage": "

    Changes information about a Stage resource.

    " + }, + "service": "Amazon API Gateway

    Amazon API Gateway helps developers deliver robust, secure and scalable mobile and web application backends. Amazon API Gateway allows developers to securely connect mobile and web applications to APIs that run on AWS Lambda, Amazon EC2, or other publicly addressable web services that are hosted outside of AWS.

    ", + "shapes": { + "Account": { + "base": "

    Represents an AWS account that is associated with Amazon API Gateway.

    ", + "refs": { + } + }, + "ApiKey": { + "base": "

    A resource that can be distributed to callers for executing Method resources that require an API key. API keys can be mapped to any Stage on any RestApi, which indicates that the callers with the API key can make requests to that stage.

    ", + "refs": { + "ListOfApiKey$member": null + } + }, + "ApiKeys": { + "base": "

    Represents a collection of ApiKey resources.

    ", + "refs": { + } + }, + "BadRequestException": { + "base": null, + "refs": { + } + }, + "BasePathMapping": { + "base": "

    Represents the base path that callers of the API that must provide as part of the URL after the domain name.

    ", + "refs": { + "ListOfBasePathMapping$member": null + } + }, + "BasePathMappings": { + "base": "

    Represents a collection of BasePathMapping resources.

    ", + "refs": { + } + }, + "Blob": { + "base": null, + "refs": { + "SdkResponse$body": null + } + }, + "Boolean": { + "base": null, + "refs": { + "ApiKey$enabled": "

    Specifies whether the API Key can be used by callers.

    ", + "CreateApiKeyRequest$enabled": "

    Specifies whether the ApiKey can be used by callers.

    ", + "CreateStageRequest$cacheClusterEnabled": "

    Whether cache clustering is enabled for the stage.

    ", + "GetModelRequest$flatten": "

    Resolves all external model references and returns a flattened model schema.

    ", + "MethodSetting$metricsEnabled": "

    Specifies whether Amazon CloudWatch metrics are enabled for this method. The PATCH path for this setting is /{method_setting_key}/metrics/enabled, and the value is a Boolean.

    ", + "MethodSetting$dataTraceEnabled": "

    Specifies the whether data trace logging is enabled for this method, which effects the log entries pushed to Amazon CloudWatch Logs. The PATCH path for this setting is /{method_setting_key}/logging/dataTrace, and the value is a Boolean.

    ", + "MethodSetting$cachingEnabled": "

    Specifies whether responses should be cached and returned for requests. A cache cluster must be enabled on the stage for responses to be cached. The PATCH path for this setting is /{method_setting_key}/caching/enabled, and the value is a Boolean.

    ", + "MethodSetting$cacheDataEncrypted": "

    Specifies whether the cached responses are encrypted. The PATCH path for this setting is /{method_setting_key}/caching/dataEncrypted, and the value is a Boolean.

    ", + "MethodSnapshot$apiKeyRequired": "

    Specifies whether the method requires a valid ApiKey.

    ", + "PutMethodRequest$apiKeyRequired": "

    Specifies whether the method required a valid ApiKey.

    ", + "Stage$cacheClusterEnabled": "

    Specifies whether a cache cluster is enabled for the stage.

    " + } + }, + "CacheClusterSize": { + "base": "

    Returns the size of the CacheCluster.

    ", + "refs": { + "CreateDeploymentRequest$cacheClusterSize": "

    Specifies the cache cluster size for the Stage resource specified in the input, if a cache cluster is enabled.

    ", + "CreateStageRequest$cacheClusterSize": "

    The stage's cache cluster size.

    ", + "Stage$cacheClusterSize": "

    The size of the cache cluster for the stage, if enabled.

    " + } + }, + "CacheClusterStatus": { + "base": "

    Returns the status of the CacheCluster.

    ", + "refs": { + "Stage$cacheClusterStatus": "

    The status of the cache cluster for the stage, if enabled.

    " + } + }, + "ClientCertificate": { + "base": null, + "refs": { + "ListOfClientCertificate$member": null + } + }, + "ClientCertificates": { + "base": null, + "refs": { + } + }, + "ConflictException": { + "base": null, + "refs": { + } + }, + "CreateApiKeyRequest": { + "base": null, + "refs": { + } + }, + "CreateBasePathMappingRequest": { + "base": "

    Requests Amazon API Gateway to create a new BasePathMapping resource.

    ", + "refs": { + } + }, + "CreateDeploymentRequest": { + "base": "

    Requests Amazon API Gateway to create a Deployment resource.

    ", + "refs": { + } + }, + "CreateDomainNameRequest": { + "base": "

    A request to create a new domain name.

    ", + "refs": { + } + }, + "CreateModelRequest": { + "base": "

    Request to add a new Model to an existing RestApi resource.

    ", + "refs": { + } + }, + "CreateResourceRequest": { + "base": "

    Requests Amazon API Gateway to create a Resource resource.

    ", + "refs": { + } + }, + "CreateRestApiRequest": { + "base": "

    Request to add a new RestApi resource to your collection.

    ", + "refs": { + } + }, + "CreateStageRequest": { + "base": "

    Requests Amazon API Gateway to create a Stage resource.

    ", + "refs": { + } + }, + "DeleteApiKeyRequest": { + "base": "

    A request to delete the ApiKey resource.

    ", + "refs": { + } + }, + "DeleteBasePathMappingRequest": { + "base": "

    A request to delete the BasePathMapping resource.

    ", + "refs": { + } + }, + "DeleteClientCertificateRequest": { + "base": null, + "refs": { + } + }, + "DeleteDeploymentRequest": { + "base": "

    Requests Amazon API Gateway to delete a Deployment resource.

    ", + "refs": { + } + }, + "DeleteDomainNameRequest": { + "base": "

    A request to delete the DomainName resource.

    ", + "refs": { + } + }, + "DeleteIntegrationRequest": { + "base": "

    Represents a delete integration request.

    ", + "refs": { + } + }, + "DeleteIntegrationResponseRequest": { + "base": "

    Represents a delete integration response request.

    ", + "refs": { + } + }, + "DeleteMethodRequest": { + "base": "

    Request to delete an existing Method resource.

    ", + "refs": { + } + }, + "DeleteMethodResponseRequest": { + "base": "

    A request to delete an existing MethodResponse resource.

    ", + "refs": { + } + }, + "DeleteModelRequest": { + "base": "

    Request to delete an existing model in an existing RestApi resource.

    ", + "refs": { + } + }, + "DeleteResourceRequest": { + "base": "

    Request to delete a Resource.

    ", + "refs": { + } + }, + "DeleteRestApiRequest": { + "base": "

    Request to delete the specified API from your collection.

    ", + "refs": { + } + }, + "DeleteStageRequest": { + "base": "

    Requests Amazon API Gateway to delete a Stage resource.

    ", + "refs": { + } + }, + "Deployment": { + "base": "

    An immutable representation of a RestApi resource that can be called by users using Stages. A deployment must be associated with a Stage for it to be callable over the Internet.

    ", + "refs": { + "ListOfDeployment$member": null + } + }, + "Deployments": { + "base": "

    Represents a collection resource that contains zero or more references to your existing deployments, and links that guide you on ways to interact with your collection. The collection offers a paginated view of the contained deployments.

    ", + "refs": { + } + }, + "DomainName": { + "base": "

    Represents a domain name that is contained in a simpler, more intuitive URL that can be called.

    ", + "refs": { + "ListOfDomainName$member": null + } + }, + "DomainNames": { + "base": "

    Represents a collection of DomainName resources.

    ", + "refs": { + } + }, + "Double": { + "base": null, + "refs": { + "MethodSetting$throttlingRateLimit": "

    Specifies the throttling rate limit. The PATCH path for this setting is /{method_setting_key}/throttling/rateLimit, and the value is a double.

    ", + "ThrottleSettings$rateLimit": "

    Returns the rateLimit when ThrottleSettings is called.

    " + } + }, + "FlushStageCacheRequest": { + "base": "

    Requests Amazon API Gateway to flush a stage's cache.

    ", + "refs": { + } + }, + "GenerateClientCertificateRequest": { + "base": null, + "refs": { + } + }, + "GetAccountRequest": { + "base": "

    Requests Amazon API Gateway to get information about the current Account resource.

    ", + "refs": { + } + }, + "GetApiKeyRequest": { + "base": "

    A request to get information about the current ApiKey resource.

    ", + "refs": { + } + }, + "GetApiKeysRequest": { + "base": "

    A request to get information about the current ApiKeys resource.

    ", + "refs": { + } + }, + "GetBasePathMappingRequest": { + "base": "

    Request to describe a BasePathMapping resource.

    ", + "refs": { + } + }, + "GetBasePathMappingsRequest": { + "base": "

    A request to get information about a collection of BasePathMapping resources.

    ", + "refs": { + } + }, + "GetClientCertificateRequest": { + "base": null, + "refs": { + } + }, + "GetClientCertificatesRequest": { + "base": null, + "refs": { + } + }, + "GetDeploymentRequest": { + "base": "

    Requests Amazon API Gateway to get information about a Deployment resource.

    ", + "refs": { + } + }, + "GetDeploymentsRequest": { + "base": "

    Requests Amazon API Gateway to get information about a Deployments collection.

    ", + "refs": { + } + }, + "GetDomainNameRequest": { + "base": "

    Request to get the name of a DomainName resource.

    ", + "refs": { + } + }, + "GetDomainNamesRequest": { + "base": "

    Request to describe a collection of DomainName resources.

    ", + "refs": { + } + }, + "GetIntegrationRequest": { + "base": "

    Represents a get integration request.

    ", + "refs": { + } + }, + "GetIntegrationResponseRequest": { + "base": "

    Represents a get integration response request.

    ", + "refs": { + } + }, + "GetMethodRequest": { + "base": "

    Request to describe an existing Method resource.

    ", + "refs": { + } + }, + "GetMethodResponseRequest": { + "base": "

    Request to describe a MethodResponse resource.

    ", + "refs": { + } + }, + "GetModelRequest": { + "base": "

    Request to list information about a model in an existing RestApi resource.

    ", + "refs": { + } + }, + "GetModelTemplateRequest": { + "base": "

    Request to generate a sample mapping template used to transform the payload.

    ", + "refs": { + } + }, + "GetModelsRequest": { + "base": "

    Request to list existing Models defined for a RestApi resource.

    ", + "refs": { + } + }, + "GetResourceRequest": { + "base": "

    Request to list information about a resource.

    ", + "refs": { + } + }, + "GetResourcesRequest": { + "base": "

    Request to list information about a collection of resources.

    ", + "refs": { + } + }, + "GetRestApiRequest": { + "base": "

    Request to list an existing RestApi defined for your collection.

    ", + "refs": { + } + }, + "GetRestApisRequest": { + "base": "

    Request to list existing RestApis defined for your collection.

    ", + "refs": { + } + }, + "GetSdkRequest": { + "base": null, + "refs": { + } + }, + "GetStageRequest": { + "base": "

    Requests Amazon API Gateway to get information about a Stage resource.

    ", + "refs": { + } + }, + "GetStagesRequest": { + "base": "

    Requests Amazon API Gateway to get information about one or more Stage resources.

    ", + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "MethodSetting$throttlingBurstLimit": "

    Specifies the throttling burst limit. The PATCH path for this setting is /{method_setting_key}/throttling/burstLimit, and the value is an integer.

    ", + "MethodSetting$cacheTtlInSeconds": "

    Specifies the time to live (TTL) in seconds, for cached responses. The higher a the TTL, the longer the response will be cached. The PATCH path for this setting is /{method_setting_key}/caching/ttlInSeconds, and the value is an integer.

    ", + "TestInvokeMethodResponse$status": "

    The HTTP status code.

    ", + "ThrottleSettings$burstLimit": "

    Returns the burstLimit when ThrottleSettings is called.

    " + } + }, + "Integration": { + "base": "

    Represents a HTTP, AWS, or Mock integration.

    ", + "refs": { + "Method$methodIntegration": "

    The method's integration.

    " + } + }, + "IntegrationResponse": { + "base": "

    Represents an integration response. The status code must map to an existing MethodResponse, and parameters and templates can be used to transform the backend response.

    ", + "refs": { + "MapOfIntegrationResponse$value": null + } + }, + "IntegrationType": { + "base": "

    The integration type. Possible values are HTTP, AWS, or Mock.

    ", + "refs": { + "Integration$type": "

    Specifies the integration's type.

    ", + "PutIntegrationRequest$type": "

    Specifies a put integration input's type.

    " + } + }, + "LimitExceededException": { + "base": null, + "refs": { + } + }, + "ListOfApiKey": { + "base": null, + "refs": { + "ApiKeys$items": "

    The current page of any ApiKey resources in the collection of ApiKey resources.

    " + } + }, + "ListOfBasePathMapping": { + "base": null, + "refs": { + "BasePathMappings$items": "

    The current page of any BasePathMapping resources in the collection of base path mapping resources.

    " + } + }, + "ListOfClientCertificate": { + "base": null, + "refs": { + "ClientCertificates$items": null + } + }, + "ListOfDeployment": { + "base": null, + "refs": { + "Deployments$items": "

    The current page of any Deployment resources in the collection of deployment resources.

    " + } + }, + "ListOfDomainName": { + "base": null, + "refs": { + "DomainNames$items": "

    The current page of any DomainName resources in the collection of DomainName resources.

    " + } + }, + "ListOfModel": { + "base": null, + "refs": { + "Models$items": "

    Gets the current Model resource in the collection.

    " + } + }, + "ListOfPatchOperation": { + "base": "A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.", + "refs": { + "UpdateAccountRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    ", + "UpdateApiKeyRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    ", + "UpdateBasePathMappingRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    ", + "UpdateClientCertificateRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    ", + "UpdateDeploymentRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    ", + "UpdateDomainNameRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    ", + "UpdateIntegrationRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    ", + "UpdateIntegrationResponseRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    ", + "UpdateMethodRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    ", + "UpdateMethodResponseRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    ", + "UpdateModelRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    ", + "UpdateResourceRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    ", + "UpdateRestApiRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    ", + "UpdateStageRequest$patchOperations": "

    A list of operations describing the updates to apply to the specified resource. The patches are applied in the order specified in the list.

    " + } + }, + "ListOfResource": { + "base": null, + "refs": { + "Resources$items": "

    Gets the current Resource resource in the collection.

    " + } + }, + "ListOfRestApi": { + "base": null, + "refs": { + "RestApis$items": "

    An array of links to the current page of RestApi resources.

    " + } + }, + "ListOfStage": { + "base": null, + "refs": { + "Stages$item": "

    An individual Stage resource.

    " + } + }, + "ListOfStageKeys": { + "base": null, + "refs": { + "CreateApiKeyRequest$stageKeys": "

    Specifies whether the ApiKey can be used by callers.

    " + } + }, + "ListOfString": { + "base": null, + "refs": { + "ApiKey$stageKeys": "

    A list of Stage resources that are associated with the ApiKey resource.

    ", + "Integration$cacheKeyParameters": "

    Specifies the integration's cache key parameters.

    ", + "PutIntegrationRequest$cacheKeyParameters": "

    Specifies a put integration input's cache key parameters.

    " + } + }, + "Long": { + "base": null, + "refs": { + "TestInvokeMethodResponse$latency": "

    The execution latency of the test invoke request.

    " + } + }, + "MapOfHeaderValues": { + "base": null, + "refs": { + "TestInvokeMethodRequest$headers": null, + "TestInvokeMethodResponse$headers": "

    The headers of HTTP response.

    " + } + }, + "MapOfIntegrationResponse": { + "base": null, + "refs": { + "Integration$integrationResponses": "

    Specifies the integration's responses.

    " + } + }, + "MapOfMethod": { + "base": null, + "refs": { + "Resource$resourceMethods": "

    Map of methods for this resource, which is included only if requested using the embed option.

    " + } + }, + "MapOfMethodResponse": { + "base": null, + "refs": { + "Method$methodResponses": "

    Represents available responses that can be sent to the caller. Method responses are represented as a key/value map, with an HTTP status code as the key and a MethodResponse as the value. The status codes are available for the Integration responses to map to.

    " + } + }, + "MapOfMethodSettings": { + "base": null, + "refs": { + "Stage$methodSettings": "

    A map that defines the method settings for a Stage resource. Keys are defined as {resource_path}/{http_method} for an individual method override, or \\*/\\* for the settings applied to all methods in the stage.

    " + } + }, + "MapOfMethodSnapshot": { + "base": null, + "refs": { + "PathToMapOfMethodSnapshot$value": null + } + }, + "MapOfStringToBoolean": { + "base": null, + "refs": { + "Method$requestParameters": "

    Represents request parameters that can be accepted by Amazon API Gateway. Request parameters are represented as a key/value map, with a source as the key and a Boolean flag as the value. The Boolean flag is used to specify whether the parameter is required. A source must match the pattern method.request.{location}.{name}, where location is either querystring, path, or header. name is a valid, unique parameter name. Sources specified here are available to the integration for mapping to integration request parameters or templates.

    ", + "MethodResponse$responseParameters": "

    Represents response parameters that can be sent back to the caller by Amazon API Gateway. Response parameters are represented as a key/value map, with a destination as the key and a boolean flag as the value, which is used to specify whether the parameter is required. A destination must match the pattern method.response.header.{name}, where name is a valid, unique header name. Destinations specified here are available to the integration for mapping from integration response parameters.

    ", + "PutMethodRequest$requestParameters": "

    Represents requests parameters that are sent with the backend request. Request parameters are represented as a key/value map, with a destination as the key and a source as the value. A source must match an existing method request parameter, or a static value. Static values must be enclosed with single quotes, and be pre-encoded based on their destination in the request. The destination must match the pattern integration.request.{location}.{name}, where location is either querystring, path, or header. name must be a valid, unique parameter name.

    ", + "PutMethodResponseRequest$responseParameters": "

    Represents response parameters that can be sent back to the caller by Amazon API Gateway. Response parameters are represented as a key/value map, with a destination as the key and a Boolean flag as the value. The Boolean flag is used to specify whether the parameter is required. A destination must match the pattern method.response.header.{name}, where name is a valid, unique header name. Destinations specified here are available to the integration for mapping from integration response parameters.

    " + } + }, + "MapOfStringToString": { + "base": null, + "refs": { + "CreateDeploymentRequest$variables": "

    A map that defines the stage variables for the Stage resource that is associated with the new deployment. Variable names can have alphabetic characters, and the values must match [A-Za-z0-9-._~:/?#&=,]+

    ", + "CreateStageRequest$variables": "

    A map that defines the stage variables for the new Stage resource. Variable names can have alphabetic characters, and the values must match [A-Za-z0-9-._~:/?#&=,]+

    ", + "GetSdkRequest$parameters": null, + "Integration$requestParameters": "

    Represents requests parameters that are sent with the backend request. Request parameters are represented as a key/value map, with a destination as the key and a source as the value. A source must match an existing method request parameter, or a static value. Static values must be enclosed with single quotes, and be pre-encoded based on their destination in the request. The destination must match the pattern integration.request.{location}.{name}, where location is either querystring, path, or header. name must be a valid, unique parameter name.

    ", + "Integration$requestTemplates": "

    Specifies the integration's request templates.

    ", + "IntegrationResponse$responseParameters": "

    Represents response parameters that can be read from the backend response. Response parameters are represented as a key/value map, with a destination as the key and a source as the value. A destination must match an existing response parameter in the Method. The source can be a header from the backend response, or a static value. Static values are specified using enclosing single quotes, and backend response headers can be read using the pattern integration.response.header.{name}.

    ", + "IntegrationResponse$responseTemplates": "

    Specifies the templates used to transform the integration response body. Response templates are represented as a key/value map, with a content-type as the key and a template as the value.

    ", + "Method$requestModels": "

    Specifies the Model resources used for the request's content type. Request models are represented as a key/value map, with a content type as the key and a Model name as the value.

    ", + "MethodResponse$responseModels": "

    Specifies the Model resources used for the response's content-type. Response models are represented as a key/value map, with a content-type as the key and a Model name as the value.

    ", + "PutIntegrationRequest$requestParameters": "

    Represents request parameters that are sent with the backend request. Request parameters are represented as a key/value map, with a destination as the key and a source as the value. A source must match an existing method request parameter, or a static value. Static values must be enclosed with single quotes, and be pre-encoded based on their destination in the request. The destination must match the pattern integration.request.{location}.{name}, where location is either querystring, path, or header. name must be a valid, unique parameter name.

    ", + "PutIntegrationRequest$requestTemplates": "

    Specifies the templates used to transform the method request body. Request templates are represented as a key/value map, with a content-type as the key and a template as the value.

    ", + "PutIntegrationResponseRequest$responseParameters": "

    Represents response parameters that can be read from the backend response. Response parameters are represented as a key/value map, with a destination as the key and a source as the value. A destination must match an existing response parameter in the Method. The source can be a header from the backend response, or a static value. Static values are specified using enclosing single quotes, and backend response headers can be read using the pattern integration.response.header.{name}.

    ", + "PutIntegrationResponseRequest$responseTemplates": "

    Specifies a put integration response's templates.

    ", + "PutMethodRequest$requestModels": "

    Specifies the Model resources used for the request's content type. Request models are represented as a key/value map, with a content type as the key and a Model name as the value.

    ", + "PutMethodResponseRequest$responseModels": "

    Specifies the Model resources used for the response's content type. Response models are represented as a key/value map, with a content type as the key and a Model name as the value.

    ", + "Stage$variables": "

    A map that defines the stage variables for a Stage resource. Variable names can have alphabetic characters, and the values must match [A-Za-z0-9-._~:/?#&=,]+

    ", + "TestInvokeMethodRequest$stageVariables": null + } + }, + "Method": { + "base": "

    Represents a method.

    ", + "refs": { + "MapOfMethod$value": null + } + }, + "MethodResponse": { + "base": "

    Represents a method response. Amazon API Gateway sends back the status code to the caller as the HTTP status code. Parameters and models can be used to transform the response from the method's integration.

    ", + "refs": { + "MapOfMethodResponse$value": null + } + }, + "MethodSetting": { + "base": "

    Specifies the method setting properties.

    ", + "refs": { + "MapOfMethodSettings$value": null + } + }, + "MethodSnapshot": { + "base": "

    Represents a summary of a Method resource, given a particular date and time.

    ", + "refs": { + "MapOfMethodSnapshot$value": null + } + }, + "Model": { + "base": "

    Represents the structure of a request or response payload for a method.

    ", + "refs": { + "ListOfModel$member": null + } + }, + "Models": { + "base": "

    Represents a collection of Model resources.

    ", + "refs": { + } + }, + "NotFoundException": { + "base": null, + "refs": { + } + }, + "NullableBoolean": { + "base": null, + "refs": { + "CreateDeploymentRequest$cacheClusterEnabled": "

    Enables a cache cluster for the Stage resource specified in the input.

    ", + "MapOfStringToBoolean$value": null, + "Method$apiKeyRequired": "

    Specifies whether the method requires a valid ApiKey.

    " + } + }, + "NullableInteger": { + "base": null, + "refs": { + "GetApiKeysRequest$limit": "

    The maximum number of ApiKeys to get information about.

    ", + "GetBasePathMappingsRequest$limit": "

    The maximum number of BasePathMapping resources in the collection to get information about. The default limit is 25. It should be an integer between 1 - 500.

    ", + "GetClientCertificatesRequest$limit": null, + "GetDeploymentsRequest$limit": "

    The maximum number of Deployment resources in the collection to get information about. The default limit is 25. It should be an integer between 1 - 500.

    ", + "GetDomainNamesRequest$limit": "

    The maximum number of DomainName resources in the collection to get information about. The default limit is 25. It should be an integer between 1 - 500.

    ", + "GetModelsRequest$limit": "

    The maximum number of models in the collection to get information about. The default limit is 25. It should be an integer between 1 - 500.

    ", + "GetResourcesRequest$limit": "

    The maximum number of Resource resources in the collection to get information about. The default limit is 25. It should be an integer between 1 - 500.

    ", + "GetRestApisRequest$limit": "

    The maximum number of RestApi resources in the collection to get information about. The default limit is 25. It should be an integer between 1 - 500.

    " + } + }, + "PatchOperation": { + "base": "A single patch operation to apply to the specified resource. Please refer to http://tools.ietf.org/html/rfc6902#section-4 for an explanation of how each operation is used.", + "refs": { + "ListOfPatchOperation$member": null + } + }, + "PathToMapOfMethodSnapshot": { + "base": null, + "refs": { + "Deployment$apiSummary": "

    Gets a summary of the RestApi at the date and time that the deployment resource was created.

    " + } + }, + "PutIntegrationRequest": { + "base": "

    Represents a put integration request.

    ", + "refs": { + } + }, + "PutIntegrationResponseRequest": { + "base": "

    Represents a put integration response request.

    ", + "refs": { + } + }, + "PutMethodRequest": { + "base": "

    Request to add a method to an existing Resource resource.

    ", + "refs": { + } + }, + "PutMethodResponseRequest": { + "base": "

    Request to add a MethodResponse to an existing Method resource.

    ", + "refs": { + } + }, + "Resource": { + "base": "

    Represents a resource.

    ", + "refs": { + "ListOfResource$member": null + } + }, + "Resources": { + "base": "

    Represents a collection of Resource resources.

    ", + "refs": { + } + }, + "RestApi": { + "base": "

    Represents a REST API.

    ", + "refs": { + "ListOfRestApi$member": null + } + }, + "RestApis": { + "base": "

    Contains references to your APIs and links that guide you in ways to interact with your collection. A collection offers a paginated view of your APIs.

    ", + "refs": { + } + }, + "SdkResponse": { + "base": null, + "refs": { + } + }, + "ServiceUnavailableException": { + "base": null, + "refs": { + } + }, + "Stage": { + "base": "

    Represents a unique identifier for a version of a deployed RestApi that is callable by users.

    ", + "refs": { + "ListOfStage$member": null + } + }, + "StageKey": { + "base": "

    A reference to a unique stage identified in the format {restApiId}/{stage}.

    ", + "refs": { + "ListOfStageKeys$member": null + } + }, + "Stages": { + "base": "

    A list of Stage resource that are associated with the ApiKey resource.

    ", + "refs": { + } + }, + "StatusCode": { + "base": "

    The status code.

    ", + "refs": { + "DeleteIntegrationResponseRequest$statusCode": "

    Specifies a delete integration response request's status code.

    ", + "DeleteMethodResponseRequest$statusCode": "

    The status code identifier for the MethodResponse resource.

    ", + "GetIntegrationResponseRequest$statusCode": "

    Specifies a get integration response request's status code.

    ", + "GetMethodResponseRequest$statusCode": "

    The status code identifier for the MethodResponse resource.

    ", + "IntegrationResponse$statusCode": "

    Specifies the status code that is used to map the integration response to an existing MethodResponse.

    ", + "MethodResponse$statusCode": "

    The method response's status code.

    ", + "PutIntegrationResponseRequest$statusCode": "

    Specifies the status code that is used to map the integration response to an existing MethodResponse.

    ", + "PutMethodResponseRequest$statusCode": "

    The method response's status code.

    ", + "UpdateIntegrationResponseRequest$statusCode": "

    Specifies an update integration response request's status code.

    ", + "UpdateMethodResponseRequest$statusCode": "

    The status code identifier for the MethodResponse resource.

    " + } + }, + "String": { + "base": null, + "refs": { + "Account$cloudwatchRoleArn": "

    Specifies the Amazon resource name (ARN) of an Amazon CloudWatch role for the current Account resource.

    ", + "ApiKey$id": "

    The identifier of the API Key.

    ", + "ApiKey$name": "

    The name of the API Key.

    ", + "ApiKey$description": "

    The description of the API Key.

    ", + "ApiKeys$position": null, + "BadRequestException$message": null, + "BasePathMapping$basePath": "

    The base path name that callers of the API must provide as part of the URL after the domain name.

    ", + "BasePathMapping$restApiId": "

    The name of the API.

    ", + "BasePathMapping$stage": "

    The name of the API's stage.

    ", + "BasePathMappings$position": null, + "ClientCertificate$clientCertificateId": null, + "ClientCertificate$description": null, + "ClientCertificate$pemEncodedCertificate": null, + "ClientCertificates$position": null, + "ConflictException$message": null, + "CreateApiKeyRequest$name": "

    The name of the ApiKey.

    ", + "CreateApiKeyRequest$description": "

    The description of the ApiKey.

    ", + "CreateBasePathMappingRequest$domainName": "

    The domain name of the BasePathMapping resource to create.

    ", + "CreateBasePathMappingRequest$basePath": "

    The base path name that callers of the API must provide as part of the URL after the domain name. This value must be unique for all of the mappings across a single API. Leave this blank if you do not want callers to specify a base path name after the domain name.

    ", + "CreateBasePathMappingRequest$restApiId": "

    The name of the API that you want to apply this mapping to.

    ", + "CreateBasePathMappingRequest$stage": "

    The name of the API's stage that you want to use for this mapping. Leave this blank if you do not want callers to explicitly specify the stage name after any base path name.

    ", + "CreateDeploymentRequest$restApiId": "

    The RestApi resource identifier for the Deployment resource to create.

    ", + "CreateDeploymentRequest$stageName": "

    The name of the Stage resource for the Deployment resource to create.

    ", + "CreateDeploymentRequest$stageDescription": "

    The description of the Stage resource for the Deployment resource to create.

    ", + "CreateDeploymentRequest$description": "

    The description for the Deployment resource to create.

    ", + "CreateDomainNameRequest$domainName": "

    The name of the DomainName resource.

    ", + "CreateDomainNameRequest$certificateName": "

    The name of the certificate.

    ", + "CreateDomainNameRequest$certificateBody": "

    The body of the server certificate provided by your certificate authority.

    ", + "CreateDomainNameRequest$certificatePrivateKey": "

    Your certificate's private key.

    ", + "CreateDomainNameRequest$certificateChain": "

    The intermediate certificates and optionally the root certificate, one after the other without any blank lines. If you include the root certificate, your certificate chain must start with intermediate certificates and end with the root certificate. Use the intermediate certificates that were provided by your certificate authority. Do not include any intermediaries that are not in the chain of trust path.

    ", + "CreateModelRequest$restApiId": "

    The RestApi identifier under which the Model will be created.

    ", + "CreateModelRequest$name": "

    The name of the model.

    ", + "CreateModelRequest$description": "

    The description of the model.

    ", + "CreateModelRequest$schema": "

    The schema for the model. For application/json models, this should be JSON-schema draft v4 model.

    ", + "CreateModelRequest$contentType": "

    The content-type for the model.

    ", + "CreateResourceRequest$restApiId": "

    The identifier of the RestApi for the resource.

    ", + "CreateResourceRequest$parentId": "

    The parent resource's identifier.

    ", + "CreateResourceRequest$pathPart": "

    The last path segment for this resource.

    ", + "CreateRestApiRequest$name": "

    The name of the RestApi.

    ", + "CreateRestApiRequest$description": "

    The description of the RestApi.

    ", + "CreateRestApiRequest$cloneFrom": "

    The name of the RestApi that you want to clone from.

    ", + "CreateStageRequest$restApiId": "

    The identifier of the RestApi resource for the Stage resource to create.

    ", + "CreateStageRequest$stageName": "

    The name for the Stage resource.

    ", + "CreateStageRequest$deploymentId": "

    The identifier of the Deployment resource for the Stage resource.

    ", + "CreateStageRequest$description": "

    The description of the Stage resource.

    ", + "DeleteApiKeyRequest$apiKey": "

    The identifier of the ApiKey resource to be deleted.

    ", + "DeleteBasePathMappingRequest$domainName": "

    The domain name of the BasePathMapping resource to delete.

    ", + "DeleteBasePathMappingRequest$basePath": "

    The base path name of the BasePathMapping resource to delete.

    ", + "DeleteClientCertificateRequest$clientCertificateId": null, + "DeleteDeploymentRequest$restApiId": "

    The identifier of the RestApi resource for the Deployment resource to delete.

    ", + "DeleteDeploymentRequest$deploymentId": "

    The identifier of the Deployment resource to delete.

    ", + "DeleteDomainNameRequest$domainName": "

    The name of the DomainName resource to be deleted.

    ", + "DeleteIntegrationRequest$restApiId": "

    Specifies a delete integration request's API identifier.

    ", + "DeleteIntegrationRequest$resourceId": "

    Specifies a delete integration request's resource identifier.

    ", + "DeleteIntegrationRequest$httpMethod": "

    Specifies a delete integration request's HTTP method.

    ", + "DeleteIntegrationResponseRequest$restApiId": "

    Specifies a delete integration response request's API identifier.

    ", + "DeleteIntegrationResponseRequest$resourceId": "

    Specifies a delete integration response request's resource identifier.

    ", + "DeleteIntegrationResponseRequest$httpMethod": "

    Specifies a delete integration response request's HTTP method.

    ", + "DeleteMethodRequest$restApiId": "

    The RestApi identifier for the Method resource.

    ", + "DeleteMethodRequest$resourceId": "

    The Resource identifier for the Method resource.

    ", + "DeleteMethodRequest$httpMethod": "

    The HTTP verb that identifies the Method resource.

    ", + "DeleteMethodResponseRequest$restApiId": "

    The RestApi identifier for the MethodResponse resource.

    ", + "DeleteMethodResponseRequest$resourceId": "

    The Resource identifier for the MethodResponse resource.

    ", + "DeleteMethodResponseRequest$httpMethod": "

    The HTTP verb identifier for the parent Method resource.

    ", + "DeleteModelRequest$restApiId": "

    The RestApi under which the model will be deleted.

    ", + "DeleteModelRequest$modelName": "

    The name of the model to delete.

    ", + "DeleteResourceRequest$restApiId": "

    The RestApi identifier for the Resource resource.

    ", + "DeleteResourceRequest$resourceId": "

    The identifier of the Resource resource.

    ", + "DeleteRestApiRequest$restApiId": "

    The ID of the RestApi you want to delete.

    ", + "DeleteStageRequest$restApiId": "

    The identifier of the RestApi resource for the Stage resource to delete.

    ", + "DeleteStageRequest$stageName": "

    The name of the Stage resource to delete.

    ", + "Deployment$id": "

    The identifier for the deployment resource.

    ", + "Deployment$description": "

    The description for the deployment resource.

    ", + "Deployments$position": null, + "DomainName$domainName": "

    The name of the DomainName resource.

    ", + "DomainName$certificateName": "

    The name of the certificate.

    ", + "DomainName$distributionDomainName": "

    The domain name of the Amazon CloudFront distribution. For more information, see the Amazon CloudFront documentation.

    ", + "DomainNames$position": null, + "FlushStageCacheRequest$restApiId": "

    The API identifier of the stage to flush its cache.

    ", + "FlushStageCacheRequest$stageName": "

    The name of the stage to flush its cache.

    ", + "GenerateClientCertificateRequest$description": null, + "GetApiKeyRequest$apiKey": "

    The identifier of the ApiKey resource.

    ", + "GetApiKeysRequest$position": "

    The position of the current ApiKeys resource to get information about.

    ", + "GetBasePathMappingRequest$domainName": "

    The domain name of the BasePathMapping resource to be described.

    ", + "GetBasePathMappingRequest$basePath": "

    The base path name that callers of the API must provide as part of the URL after the domain name. This value must be unique for all of the mappings across a single API. Leave this blank if you do not want callers to specify any base path name after the domain name.

    ", + "GetBasePathMappingsRequest$domainName": "

    The domain name of a BasePathMapping resource.

    ", + "GetBasePathMappingsRequest$position": "

    The position of the current BasePathMapping resource in the collection to get information about.

    ", + "GetClientCertificateRequest$clientCertificateId": null, + "GetClientCertificatesRequest$position": null, + "GetDeploymentRequest$restApiId": "

    The identifier of the RestApi resource for the Deployment resource to get information about.

    ", + "GetDeploymentRequest$deploymentId": "

    The identifier of the Deployment resource to get information about.

    ", + "GetDeploymentsRequest$restApiId": "

    The identifier of the RestApi resource for the collection of Deployment resources to get information about.

    ", + "GetDeploymentsRequest$position": "

    The position of the current Deployment resource in the collection to get information about.

    ", + "GetDomainNameRequest$domainName": "

    The name of the DomainName resource.

    ", + "GetDomainNamesRequest$position": "

    The position of the current domain names to get information about.

    ", + "GetIntegrationRequest$restApiId": "

    Specifies a get integration request's API identifier.

    ", + "GetIntegrationRequest$resourceId": "

    Specifies a get integration request's resource identifier

    ", + "GetIntegrationRequest$httpMethod": "

    Specifies a get integration request's HTTP method.

    ", + "GetIntegrationResponseRequest$restApiId": "

    Specifies a get integration response request's API identifier.

    ", + "GetIntegrationResponseRequest$resourceId": "

    Specifies a get integration response request's resource identifier.

    ", + "GetIntegrationResponseRequest$httpMethod": "

    Specifies a get integration response request's HTTP method.

    ", + "GetMethodRequest$restApiId": "

    The RestApi identifier for the Method resource.

    ", + "GetMethodRequest$resourceId": "

    The Resource identifier for the Method resource.

    ", + "GetMethodRequest$httpMethod": "

    Specifies the put method request's HTTP method type.

    ", + "GetMethodResponseRequest$restApiId": "

    The RestApi identifier for the MethodResponse resource.

    ", + "GetMethodResponseRequest$resourceId": "

    The Resource identifier for the MethodResponse resource.

    ", + "GetMethodResponseRequest$httpMethod": "

    The HTTP verb identifier for the parent Method resource.

    ", + "GetModelRequest$restApiId": "

    The RestApi identifier under which the Model exists.

    ", + "GetModelRequest$modelName": "

    The name of the model as an identifier.

    ", + "GetModelTemplateRequest$restApiId": "

    The ID of the RestApi under which the model exists.

    ", + "GetModelTemplateRequest$modelName": "

    The name of the model for which to generate a template.

    ", + "GetModelsRequest$restApiId": "

    The RestApi identifier.

    ", + "GetModelsRequest$position": "

    The position of the next set of results in the Models resource to get information about.

    ", + "GetResourceRequest$restApiId": "

    The RestApi identifier for the resource.

    ", + "GetResourceRequest$resourceId": "

    The identifier for the Resource resource.

    ", + "GetResourcesRequest$restApiId": "

    The RestApi identifier for the Resource.

    ", + "GetResourcesRequest$position": "

    The position of the next set of results in the current Resources resource to get information about.

    ", + "GetRestApiRequest$restApiId": "

    The identifier of the RestApi resource.

    ", + "GetRestApisRequest$position": "

    The position of the current RestApis resource in the collection to get information about.

    ", + "GetSdkRequest$restApiId": null, + "GetSdkRequest$stageName": null, + "GetSdkRequest$sdkType": null, + "GetStageRequest$restApiId": "

    The identifier of the RestApi resource for the Stage resource to get information about.

    ", + "GetStageRequest$stageName": "

    The name of the Stage resource to get information about.

    ", + "GetStagesRequest$restApiId": "

    The stages' API identifiers.

    ", + "GetStagesRequest$deploymentId": "

    The stages' deployment identifiers.

    ", + "Integration$httpMethod": "

    Specifies the integration's HTTP method type.

    ", + "Integration$uri": "

    Specifies the integration's Uniform Resource Identifier (URI). For HTTP integrations, the URI must be a fully formed, encoded HTTP(S) URL according to the RFC-3986 specification. For AWS integrations, the URI should be of the form arn:aws:apigateway:{region}:{service}:{path|action}/{service_api}. Region and service are used to determine the right endpoint. For AWS services that use the Action= query string parameter, service_api should be a valid action for the desired service. For RESTful AWS service APIs, path is used to indicate that the remaining substring in the URI should be treated as the path to the resource, including the initial /.

    ", + "Integration$credentials": "

    Specifies the credentials required for the integration, if any. For AWS integrations, three options are available. To specify an IAM Role for Amazon API Gateway to assume, use the role's Amazon Resource Name (ARN). To require that the caller's identity be passed through from the request, specify the string arn:aws:iam::\\*:user/\\*. To use resource-based permissions on supported AWS services, specify null.

    ", + "Integration$cacheNamespace": "

    Specifies the integration's cache namespace.

    ", + "IntegrationResponse$selectionPattern": "

    Specifies the regular expression (regex) pattern used to choose an integration response based on the response from the backend. If the backend is an AWS Lambda function, the AWS Lambda function error header is matched. For all other HTTP and AWS backends, the HTTP status code is matched.

    ", + "LimitExceededException$retryAfterSeconds": null, + "LimitExceededException$message": null, + "ListOfString$member": null, + "MapOfHeaderValues$key": null, + "MapOfHeaderValues$value": null, + "MapOfIntegrationResponse$key": null, + "MapOfMethod$key": null, + "MapOfMethodResponse$key": null, + "MapOfMethodSettings$key": null, + "MapOfMethodSnapshot$key": null, + "MapOfStringToBoolean$key": null, + "MapOfStringToString$key": null, + "MapOfStringToString$value": null, + "Method$httpMethod": "

    The HTTP method.

    ", + "Method$authorizationType": "

    The method's authorization type.

    ", + "MethodSetting$loggingLevel": "

    Specifies the logging level for this method, which effects the log entries pushed to Amazon CloudWatch Logs. The PATCH path for this setting is /{method_setting_key}/logging/loglevel, and the available levels are OFF, ERROR, and INFO.

    ", + "MethodSnapshot$authorizationType": "

    Specifies the type of authorization used for the method.

    ", + "Model$id": "

    The identifier for the model resource.

    ", + "Model$name": "

    The name of the model.

    ", + "Model$description": "

    The description of the model.

    ", + "Model$schema": "

    The schema for the model. For application/json models, this should be JSON-schema draft v4 model.

    ", + "Model$contentType": "

    The content-type for the model.

    ", + "Models$position": null, + "NotFoundException$message": null, + "PatchOperation$path": "

    Operation objects MUST have exactly one \"path\" member. That member's value is a string containing a `JSON-Pointer` value that references a location within the target document (the \"target location\") where the operation is performed.

    ", + "PatchOperation$value": "

    The actual value content.

    ", + "PatchOperation$from": "

    The \"move\" and \"copy\" operation object MUST contain a \"from\" member, which is a string containing a JSON Pointer value that references the location in the target document to move the value from.

    ", + "PathToMapOfMethodSnapshot$key": null, + "PutIntegrationRequest$restApiId": "

    Specifies a put integration request's API identifier.

    ", + "PutIntegrationRequest$resourceId": "

    Specifies a put integration request's resource ID.

    ", + "PutIntegrationRequest$httpMethod": "

    Specifies a put integration request's HTTP method.

    ", + "PutIntegrationRequest$integrationHttpMethod": "

    Specifies a put integration HTTP method.

    ", + "PutIntegrationRequest$uri": "

    Specifies a put integration input's Uniform Resource Identifier (URI).

    ", + "PutIntegrationRequest$credentials": "

    Specifies whether credentials are required for a put integration.

    ", + "PutIntegrationRequest$cacheNamespace": "

    Specifies a put integration input's cache namespace.

    ", + "PutIntegrationResponseRequest$restApiId": "

    Specifies a put integration response request's API identifier.

    ", + "PutIntegrationResponseRequest$resourceId": "

    Specifies a put integration response request's resource identifier.

    ", + "PutIntegrationResponseRequest$httpMethod": "

    Specifies a put integration response request's HTTP method.

    ", + "PutIntegrationResponseRequest$selectionPattern": "

    Specifies the selection pattern of a put integration response.

    ", + "PutMethodRequest$restApiId": "

    The RestApi identifier for the new Method resource.

    ", + "PutMethodRequest$resourceId": "

    The Resource identifier for the new Method resource.

    ", + "PutMethodRequest$httpMethod": "

    Specifies the put method request's HTTP method type.

    ", + "PutMethodRequest$authorizationType": "

    Specifies the type of authorization used for the method.

    ", + "PutMethodResponseRequest$restApiId": "

    The RestApi identifier for the Method resource.

    ", + "PutMethodResponseRequest$resourceId": "

    The Resource identifier for the Method resource.

    ", + "PutMethodResponseRequest$httpMethod": "

    The HTTP verb that identifies the Method resource.

    ", + "Resource$id": "

    The resource's identifier.

    ", + "Resource$parentId": "

    The parent resource's identifier.

    ", + "Resource$pathPart": "

    The last path segment for this resource.

    ", + "Resource$path": "

    The full path for this resource.

    ", + "Resources$position": null, + "RestApi$id": "

    The API's identifier. This identifier is unique across all of your APIs in Amazon API Gateway.

    ", + "RestApi$name": "

    The API's name.

    ", + "RestApi$description": "

    The API's description.

    ", + "RestApis$position": null, + "SdkResponse$contentType": null, + "SdkResponse$contentDisposition": null, + "ServiceUnavailableException$retryAfterSeconds": null, + "ServiceUnavailableException$message": null, + "Stage$deploymentId": "

    The identifier of the Deployment that the stage points to.

    ", + "Stage$clientCertificateId": null, + "Stage$stageName": "

    The name of the stage is the first path segment in the Uniform Resource Identifier (URI) of a call to Amazon API Gateway.

    ", + "Stage$description": "

    The stage's description.

    ", + "StageKey$restApiId": "

    A list of Stage resources that are associated with the ApiKey resource.

    ", + "StageKey$stageName": "

    The stage name in the RestApi that the stage key references.

    ", + "Template$value": "

    The Apache Velocity Template Language (VTL) template content used for the template resource.

    ", + "TestInvokeMethodRequest$restApiId": null, + "TestInvokeMethodRequest$resourceId": null, + "TestInvokeMethodRequest$httpMethod": null, + "TestInvokeMethodRequest$pathWithQueryString": null, + "TestInvokeMethodRequest$body": null, + "TestInvokeMethodRequest$clientCertificateId": null, + "TestInvokeMethodResponse$body": "

    The body of HTTP response.

    ", + "TestInvokeMethodResponse$log": "

    The Amazon API Gateway execution log for the test invoke request.

    ", + "TooManyRequestsException$retryAfterSeconds": null, + "TooManyRequestsException$message": null, + "UnauthorizedException$message": null, + "UpdateApiKeyRequest$apiKey": "

    The identifier of the ApiKey resource to be updated.

    ", + "UpdateBasePathMappingRequest$domainName": "

    The domain name of the BasePathMapping resource to change.

    ", + "UpdateBasePathMappingRequest$basePath": "

    The base path of the BasePathMapping resource to change.

    ", + "UpdateClientCertificateRequest$clientCertificateId": null, + "UpdateDeploymentRequest$restApiId": "

    The replacement identifier of the RestApi resource for the Deployment resource to change information about.

    ", + "UpdateDeploymentRequest$deploymentId": "

    The replacment identifier for the Deployment resource to change information about.

    ", + "UpdateDomainNameRequest$domainName": "

    The name of the DomainName resource to be changed.

    ", + "UpdateIntegrationRequest$restApiId": "

    Represents an update integration request's API identifier.

    ", + "UpdateIntegrationRequest$resourceId": "

    Represents an update integration request's resource identifier.

    ", + "UpdateIntegrationRequest$httpMethod": "

    Represents an update integration request's HTTP method.

    ", + "UpdateIntegrationResponseRequest$restApiId": "

    Specifies an update integration response request's API identifier.

    ", + "UpdateIntegrationResponseRequest$resourceId": "

    Specifies an update integration response request's resource identifier.

    ", + "UpdateIntegrationResponseRequest$httpMethod": "

    Specifies an update integration response request's HTTP method.

    ", + "UpdateMethodRequest$restApiId": "

    The RestApi identifier for the Method resource.

    ", + "UpdateMethodRequest$resourceId": "

    The Resource identifier for the Method resource.

    ", + "UpdateMethodRequest$httpMethod": "

    The HTTP verb that identifies the Method resource.

    ", + "UpdateMethodResponseRequest$restApiId": "

    The RestApi identifier for the MethodResponse resource.

    ", + "UpdateMethodResponseRequest$resourceId": "

    The Resource identifier for the MethodResponse resource.

    ", + "UpdateMethodResponseRequest$httpMethod": "

    The HTTP verb identifier for the parent Method resource.

    ", + "UpdateModelRequest$restApiId": "

    The RestApi identifier under which the model exists.

    ", + "UpdateModelRequest$modelName": "

    The name of the model to update.

    ", + "UpdateResourceRequest$restApiId": "

    The RestApi identifier for the Resource resource.

    ", + "UpdateResourceRequest$resourceId": "

    The identifier of the Resource resource.

    ", + "UpdateRestApiRequest$restApiId": "

    The ID of the RestApi you want to update.

    ", + "UpdateStageRequest$restApiId": "

    The identifier of the RestApi resource for the Stage resource to change information about.

    ", + "UpdateStageRequest$stageName": "

    The name of the Stage resource to change information about.

    " + } + }, + "Template": { + "base": "

    Represents a mapping template used to transform a payload.

    ", + "refs": { + } + }, + "TestInvokeMethodRequest": { + "base": null, + "refs": { + } + }, + "TestInvokeMethodResponse": { + "base": "

    Represents the response of the test invoke request in HTTP method.

    ", + "refs": { + } + }, + "ThrottleSettings": { + "base": "

    Returns the throttle settings.

    ", + "refs": { + "Account$throttleSettings": "

    Specifies the application programming interface (API) throttle settings for the current Account resource.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "ApiKey$createdDate": "

    The date when the API Key was created, in ISO 8601 format.

    ", + "ApiKey$lastUpdatedDate": "

    When the API Key was last updated, in ISO 8601 format.

    ", + "ClientCertificate$createdDate": null, + "ClientCertificate$expirationDate": null, + "Deployment$createdDate": "

    The date and time that the deployment resource was created.

    ", + "DomainName$certificateUploadDate": "

    The date when the certificate was uploaded, in ISO 8601 format.

    ", + "RestApi$createdDate": "

    The date when the API was created, in ISO 8601 format.

    ", + "Stage$createdDate": "

    The date and time that the stage was created, in ISO 8601 format.

    ", + "Stage$lastUpdatedDate": "

    The date and time that information about the stage was last updated, in ISO 8601 format.

    " + } + }, + "TooManyRequestsException": { + "base": null, + "refs": { + } + }, + "UnauthorizedException": { + "base": null, + "refs": { + } + }, + "UpdateAccountRequest": { + "base": "

    Requests Amazon API Gateway to change information about the current Account resource.

    ", + "refs": { + } + }, + "UpdateApiKeyRequest": { + "base": "

    A request to change information about an ApiKey resource.

    ", + "refs": { + } + }, + "UpdateBasePathMappingRequest": { + "base": "

    A request to change information about the BasePathMapping resource.

    ", + "refs": { + } + }, + "UpdateClientCertificateRequest": { + "base": null, + "refs": { + } + }, + "UpdateDeploymentRequest": { + "base": "

    Requests Amazon API Gateway to change information about a Deployment resource.

    ", + "refs": { + } + }, + "UpdateDomainNameRequest": { + "base": "

    A request to change information about the DomainName resource.

    ", + "refs": { + } + }, + "UpdateIntegrationRequest": { + "base": "

    Represents an update integration request.

    ", + "refs": { + } + }, + "UpdateIntegrationResponseRequest": { + "base": "

    Represents an update integration response request.

    ", + "refs": { + } + }, + "UpdateMethodRequest": { + "base": "

    Request to update an existing Method resource.

    ", + "refs": { + } + }, + "UpdateMethodResponseRequest": { + "base": "

    A request to update an existing MethodResponse resource.

    ", + "refs": { + } + }, + "UpdateModelRequest": { + "base": "

    Request to update an existing model in an existing RestApi resource.

    ", + "refs": { + } + }, + "UpdateResourceRequest": { + "base": "

    Request to change information about a Resource resource.

    ", + "refs": { + } + }, + "UpdateRestApiRequest": { + "base": "

    Request to update an existing RestApi resource in your collection.

    ", + "refs": { + } + }, + "UpdateStageRequest": { + "base": "

    Requests Amazon API Gateway to change information about a Stage resource.

    ", + "refs": { + } + }, + "op": { + "base": null, + "refs": { + "PatchOperation$op": "

    A patch operation whose value indicates the operation to perform. Its value MUST be one of \"add\", \"remove\", \"replace\", \"move\", \"copy\", or \"test\"; other values are errors.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/apigateway/2015-07-09/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/apigateway/2015-07-09/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/apigateway/2015-07-09/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/apigateway/2015-07-09/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,52 @@ +{ + "pagination": { + "GetApiKeys": { + "input_token": "position", + "output_token": "position", + "limit_key": "limit", + "result_key": "items" + }, + "GetBasePathMappings": { + "input_token": "position", + "output_token": "position", + "limit_key": "limit", + "result_key": "items" + }, + "GetClientCertificates": { + "input_token": "position", + "output_token": "position", + "limit_key": "limit", + "result_key": "items" + }, + "GetDeployments": { + "input_token": "position", + "output_token": "position", + "limit_key": "limit", + "result_key": "items" + }, + "GetDomainNames": { + "input_token": "position", + "output_token": "position", + "limit_key": "limit", + "result_key": "items" + }, + "GetModels": { + "input_token": "position", + "output_token": "position", + "limit_key": "limit", + "result_key": "items" + }, + "GetResources": { + "input_token": "position", + "output_token": "position", + "limit_key": "limit", + "result_key": "items" + }, + "GetRestApis": { + "input_token": "position", + "output_token": "position", + "limit_key": "limit", + "result_key": "items" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2032 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2011-01-01", + "endpointPrefix":"autoscaling", + "protocol":"query", + "serviceFullName":"Auto Scaling", + "signatureVersion":"v4", + "xmlNamespace":"http://autoscaling.amazonaws.com/doc/2011-01-01/" + }, + "operations":{ + "AttachInstances":{ + "name":"AttachInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachInstancesQuery"}, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "AttachLoadBalancers":{ + "name":"AttachLoadBalancers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachLoadBalancersType"}, + "output":{ + "shape":"AttachLoadBalancersResultType", + "resultWrapper":"AttachLoadBalancersResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "CompleteLifecycleAction":{ + "name":"CompleteLifecycleAction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CompleteLifecycleActionType"}, + "output":{ + "shape":"CompleteLifecycleActionAnswer", + "resultWrapper":"CompleteLifecycleActionResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "CreateAutoScalingGroup":{ + "name":"CreateAutoScalingGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAutoScalingGroupType"}, + "errors":[ + {"shape":"AlreadyExistsFault"}, + {"shape":"LimitExceededFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "CreateLaunchConfiguration":{ + "name":"CreateLaunchConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLaunchConfigurationType"}, + "errors":[ + {"shape":"AlreadyExistsFault"}, + {"shape":"LimitExceededFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "CreateOrUpdateTags":{ + "name":"CreateOrUpdateTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateOrUpdateTagsType"}, + "errors":[ + {"shape":"LimitExceededFault"}, + {"shape":"AlreadyExistsFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "DeleteAutoScalingGroup":{ + "name":"DeleteAutoScalingGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAutoScalingGroupType"}, + "errors":[ + {"shape":"ScalingActivityInProgressFault"}, + {"shape":"ResourceInUseFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "DeleteLaunchConfiguration":{ + "name":"DeleteLaunchConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"LaunchConfigurationNameType"}, + "errors":[ + {"shape":"ResourceInUseFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "DeleteLifecycleHook":{ + "name":"DeleteLifecycleHook", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteLifecycleHookType"}, + "output":{ + "shape":"DeleteLifecycleHookAnswer", + "resultWrapper":"DeleteLifecycleHookResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DeleteNotificationConfiguration":{ + "name":"DeleteNotificationConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteNotificationConfigurationType"}, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DeletePolicy":{ + "name":"DeletePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePolicyType"}, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DeleteScheduledAction":{ + "name":"DeleteScheduledAction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteScheduledActionType"}, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DeleteTags":{ + "name":"DeleteTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTagsType"}, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeAccountLimits":{ + "name":"DescribeAccountLimits", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"DescribeAccountLimitsAnswer", + "resultWrapper":"DescribeAccountLimitsResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeAdjustmentTypes":{ + "name":"DescribeAdjustmentTypes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"DescribeAdjustmentTypesAnswer", + "resultWrapper":"DescribeAdjustmentTypesResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeAutoScalingGroups":{ + "name":"DescribeAutoScalingGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AutoScalingGroupNamesType"}, + "output":{ + "shape":"AutoScalingGroupsType", + "resultWrapper":"DescribeAutoScalingGroupsResult" + }, + "errors":[ + {"shape":"InvalidNextToken"}, + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeAutoScalingInstances":{ + "name":"DescribeAutoScalingInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAutoScalingInstancesType"}, + "output":{ + "shape":"AutoScalingInstancesType", + "resultWrapper":"DescribeAutoScalingInstancesResult" + }, + "errors":[ + {"shape":"InvalidNextToken"}, + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeAutoScalingNotificationTypes":{ + "name":"DescribeAutoScalingNotificationTypes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"DescribeAutoScalingNotificationTypesAnswer", + "resultWrapper":"DescribeAutoScalingNotificationTypesResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeLaunchConfigurations":{ + "name":"DescribeLaunchConfigurations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"LaunchConfigurationNamesType"}, + "output":{ + "shape":"LaunchConfigurationsType", + "resultWrapper":"DescribeLaunchConfigurationsResult" + }, + "errors":[ + {"shape":"InvalidNextToken"}, + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeLifecycleHookTypes":{ + "name":"DescribeLifecycleHookTypes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"DescribeLifecycleHookTypesAnswer", + "resultWrapper":"DescribeLifecycleHookTypesResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeLifecycleHooks":{ + "name":"DescribeLifecycleHooks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLifecycleHooksType"}, + "output":{ + "shape":"DescribeLifecycleHooksAnswer", + "resultWrapper":"DescribeLifecycleHooksResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeLoadBalancers":{ + "name":"DescribeLoadBalancers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLoadBalancersRequest"}, + "output":{ + "shape":"DescribeLoadBalancersResponse", + "resultWrapper":"DescribeLoadBalancersResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeMetricCollectionTypes":{ + "name":"DescribeMetricCollectionTypes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"DescribeMetricCollectionTypesAnswer", + "resultWrapper":"DescribeMetricCollectionTypesResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeNotificationConfigurations":{ + "name":"DescribeNotificationConfigurations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeNotificationConfigurationsType"}, + "output":{ + "shape":"DescribeNotificationConfigurationsAnswer", + "resultWrapper":"DescribeNotificationConfigurationsResult" + }, + "errors":[ + {"shape":"InvalidNextToken"}, + {"shape":"ResourceContentionFault"} + ] + }, + "DescribePolicies":{ + "name":"DescribePolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePoliciesType"}, + "output":{ + "shape":"PoliciesType", + "resultWrapper":"DescribePoliciesResult" + }, + "errors":[ + {"shape":"InvalidNextToken"}, + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeScalingActivities":{ + "name":"DescribeScalingActivities", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeScalingActivitiesType"}, + "output":{ + "shape":"ActivitiesType", + "resultWrapper":"DescribeScalingActivitiesResult" + }, + "errors":[ + {"shape":"InvalidNextToken"}, + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeScalingProcessTypes":{ + "name":"DescribeScalingProcessTypes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"ProcessesType", + "resultWrapper":"DescribeScalingProcessTypesResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeScheduledActions":{ + "name":"DescribeScheduledActions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeScheduledActionsType"}, + "output":{ + "shape":"ScheduledActionsType", + "resultWrapper":"DescribeScheduledActionsResult" + }, + "errors":[ + {"shape":"InvalidNextToken"}, + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeTags":{ + "name":"DescribeTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTagsType"}, + "output":{ + "shape":"TagsType", + "resultWrapper":"DescribeTagsResult" + }, + "errors":[ + {"shape":"InvalidNextToken"}, + {"shape":"ResourceContentionFault"} + ] + }, + "DescribeTerminationPolicyTypes":{ + "name":"DescribeTerminationPolicyTypes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"DescribeTerminationPolicyTypesAnswer", + "resultWrapper":"DescribeTerminationPolicyTypesResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DetachInstances":{ + "name":"DetachInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachInstancesQuery"}, + "output":{ + "shape":"DetachInstancesAnswer", + "resultWrapper":"DetachInstancesResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DetachLoadBalancers":{ + "name":"DetachLoadBalancers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachLoadBalancersType"}, + "output":{ + "shape":"DetachLoadBalancersResultType", + "resultWrapper":"DetachLoadBalancersResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "DisableMetricsCollection":{ + "name":"DisableMetricsCollection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableMetricsCollectionQuery"}, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "EnableMetricsCollection":{ + "name":"EnableMetricsCollection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableMetricsCollectionQuery"}, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "EnterStandby":{ + "name":"EnterStandby", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnterStandbyQuery"}, + "output":{ + "shape":"EnterStandbyAnswer", + "resultWrapper":"EnterStandbyResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "ExecutePolicy":{ + "name":"ExecutePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExecutePolicyType"}, + "errors":[ + {"shape":"ScalingActivityInProgressFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "ExitStandby":{ + "name":"ExitStandby", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExitStandbyQuery"}, + "output":{ + "shape":"ExitStandbyAnswer", + "resultWrapper":"ExitStandbyResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "PutLifecycleHook":{ + "name":"PutLifecycleHook", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutLifecycleHookType"}, + "output":{ + "shape":"PutLifecycleHookAnswer", + "resultWrapper":"PutLifecycleHookResult" + }, + "errors":[ + {"shape":"LimitExceededFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "PutNotificationConfiguration":{ + "name":"PutNotificationConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutNotificationConfigurationType"}, + "errors":[ + {"shape":"LimitExceededFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "PutScalingPolicy":{ + "name":"PutScalingPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutScalingPolicyType"}, + "output":{ + "shape":"PolicyARNType", + "resultWrapper":"PutScalingPolicyResult" + }, + "errors":[ + {"shape":"LimitExceededFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "PutScheduledUpdateGroupAction":{ + "name":"PutScheduledUpdateGroupAction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutScheduledUpdateGroupActionType"}, + "errors":[ + {"shape":"AlreadyExistsFault"}, + {"shape":"LimitExceededFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "RecordLifecycleActionHeartbeat":{ + "name":"RecordLifecycleActionHeartbeat", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RecordLifecycleActionHeartbeatType"}, + "output":{ + "shape":"RecordLifecycleActionHeartbeatAnswer", + "resultWrapper":"RecordLifecycleActionHeartbeatResult" + }, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "ResumeProcesses":{ + "name":"ResumeProcesses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ScalingProcessQuery"}, + "errors":[ + {"shape":"ResourceInUseFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "SetDesiredCapacity":{ + "name":"SetDesiredCapacity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetDesiredCapacityType"}, + "errors":[ + {"shape":"ScalingActivityInProgressFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "SetInstanceHealth":{ + "name":"SetInstanceHealth", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetInstanceHealthQuery"}, + "errors":[ + {"shape":"ResourceContentionFault"} + ] + }, + "SetInstanceProtection":{ + "name":"SetInstanceProtection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetInstanceProtectionQuery"}, + "output":{ + "shape":"SetInstanceProtectionAnswer", + "resultWrapper":"SetInstanceProtectionResult" + }, + "errors":[ + {"shape":"LimitExceededFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "SuspendProcesses":{ + "name":"SuspendProcesses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ScalingProcessQuery"}, + "errors":[ + {"shape":"ResourceInUseFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "TerminateInstanceInAutoScalingGroup":{ + "name":"TerminateInstanceInAutoScalingGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TerminateInstanceInAutoScalingGroupType"}, + "output":{ + "shape":"ActivityType", + "resultWrapper":"TerminateInstanceInAutoScalingGroupResult" + }, + "errors":[ + {"shape":"ScalingActivityInProgressFault"}, + {"shape":"ResourceContentionFault"} + ] + }, + "UpdateAutoScalingGroup":{ + "name":"UpdateAutoScalingGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAutoScalingGroupType"}, + "errors":[ + {"shape":"ScalingActivityInProgressFault"}, + {"shape":"ResourceContentionFault"} + ] + } + }, + "shapes":{ + "Activities":{ + "type":"list", + "member":{"shape":"Activity"} + }, + "ActivitiesType":{ + "type":"structure", + "required":["Activities"], + "members":{ + "Activities":{"shape":"Activities"}, + "NextToken":{"shape":"XmlString"} + } + }, + "Activity":{ + "type":"structure", + "required":[ + "ActivityId", + "AutoScalingGroupName", + "Cause", + "StartTime", + "StatusCode" + ], + "members":{ + "ActivityId":{"shape":"XmlString"}, + "AutoScalingGroupName":{"shape":"XmlStringMaxLen255"}, + "Description":{"shape":"XmlString"}, + "Cause":{"shape":"XmlStringMaxLen1023"}, + "StartTime":{"shape":"TimestampType"}, + "EndTime":{"shape":"TimestampType"}, + "StatusCode":{"shape":"ScalingActivityStatusCode"}, + "StatusMessage":{"shape":"XmlStringMaxLen255"}, + "Progress":{"shape":"Progress"}, + "Details":{"shape":"XmlString"} + } + }, + "ActivityIds":{ + "type":"list", + "member":{"shape":"XmlString"} + }, + "ActivityType":{ + "type":"structure", + "members":{ + "Activity":{"shape":"Activity"} + } + }, + "AdjustmentType":{ + "type":"structure", + "members":{ + "AdjustmentType":{"shape":"XmlStringMaxLen255"} + } + }, + "AdjustmentTypes":{ + "type":"list", + "member":{"shape":"AdjustmentType"} + }, + "Alarm":{ + "type":"structure", + "members":{ + "AlarmName":{"shape":"XmlStringMaxLen255"}, + "AlarmARN":{"shape":"ResourceName"} + } + }, + "Alarms":{ + "type":"list", + "member":{"shape":"Alarm"} + }, + "AlreadyExistsFault":{ + "type":"structure", + "members":{ + "message":{"shape":"XmlStringMaxLen255"} + }, + "error":{ + "code":"AlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AsciiStringMaxLen255":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[A-Za-z0-9\\-_\\/]+" + }, + "AssociatePublicIpAddress":{"type":"boolean"}, + "AttachInstancesQuery":{ + "type":"structure", + "required":["AutoScalingGroupName"], + "members":{ + "InstanceIds":{"shape":"InstanceIds"}, + "AutoScalingGroupName":{"shape":"ResourceName"} + } + }, + "AttachLoadBalancersResultType":{ + "type":"structure", + "members":{ + } + }, + "AttachLoadBalancersType":{ + "type":"structure", + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "LoadBalancerNames":{"shape":"LoadBalancerNames"} + } + }, + "AutoScalingGroup":{ + "type":"structure", + "required":[ + "AutoScalingGroupName", + "MinSize", + "MaxSize", + "DesiredCapacity", + "DefaultCooldown", + "AvailabilityZones", + "HealthCheckType", + "CreatedTime" + ], + "members":{ + "AutoScalingGroupName":{"shape":"XmlStringMaxLen255"}, + "AutoScalingGroupARN":{"shape":"ResourceName"}, + "LaunchConfigurationName":{"shape":"XmlStringMaxLen255"}, + "MinSize":{"shape":"AutoScalingGroupMinSize"}, + "MaxSize":{"shape":"AutoScalingGroupMaxSize"}, + "DesiredCapacity":{"shape":"AutoScalingGroupDesiredCapacity"}, + "DefaultCooldown":{"shape":"Cooldown"}, + "AvailabilityZones":{"shape":"AvailabilityZones"}, + "LoadBalancerNames":{"shape":"LoadBalancerNames"}, + "HealthCheckType":{"shape":"XmlStringMaxLen32"}, + "HealthCheckGracePeriod":{"shape":"HealthCheckGracePeriod"}, + "Instances":{"shape":"Instances"}, + "CreatedTime":{"shape":"TimestampType"}, + "SuspendedProcesses":{"shape":"SuspendedProcesses"}, + "PlacementGroup":{"shape":"XmlStringMaxLen255"}, + "VPCZoneIdentifier":{"shape":"XmlStringMaxLen255"}, + "EnabledMetrics":{"shape":"EnabledMetrics"}, + "Status":{"shape":"XmlStringMaxLen255"}, + "Tags":{"shape":"TagDescriptionList"}, + "TerminationPolicies":{"shape":"TerminationPolicies"}, + "NewInstancesProtectedFromScaleIn":{"shape":"InstanceProtected"} + } + }, + "AutoScalingGroupDesiredCapacity":{"type":"integer"}, + "AutoScalingGroupMaxSize":{"type":"integer"}, + "AutoScalingGroupMinSize":{"type":"integer"}, + "AutoScalingGroupNames":{ + "type":"list", + "member":{"shape":"ResourceName"} + }, + "AutoScalingGroupNamesType":{ + "type":"structure", + "members":{ + "AutoScalingGroupNames":{"shape":"AutoScalingGroupNames"}, + "NextToken":{"shape":"XmlString"}, + "MaxRecords":{"shape":"MaxRecords"} + } + }, + "AutoScalingGroups":{ + "type":"list", + "member":{"shape":"AutoScalingGroup"} + }, + "AutoScalingGroupsType":{ + "type":"structure", + "required":["AutoScalingGroups"], + "members":{ + "AutoScalingGroups":{"shape":"AutoScalingGroups"}, + "NextToken":{"shape":"XmlString"} + } + }, + "AutoScalingInstanceDetails":{ + "type":"structure", + "required":[ + "InstanceId", + "AutoScalingGroupName", + "AvailabilityZone", + "LifecycleState", + "HealthStatus", + "LaunchConfigurationName", + "ProtectedFromScaleIn" + ], + "members":{ + "InstanceId":{"shape":"XmlStringMaxLen19"}, + "AutoScalingGroupName":{"shape":"XmlStringMaxLen255"}, + "AvailabilityZone":{"shape":"XmlStringMaxLen255"}, + "LifecycleState":{"shape":"XmlStringMaxLen32"}, + "HealthStatus":{"shape":"XmlStringMaxLen32"}, + "LaunchConfigurationName":{"shape":"XmlStringMaxLen255"}, + "ProtectedFromScaleIn":{"shape":"InstanceProtected"} + } + }, + "AutoScalingInstances":{ + "type":"list", + "member":{"shape":"AutoScalingInstanceDetails"} + }, + "AutoScalingInstancesType":{ + "type":"structure", + "members":{ + "AutoScalingInstances":{"shape":"AutoScalingInstances"}, + "NextToken":{"shape":"XmlString"} + } + }, + "AutoScalingNotificationTypes":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen255"} + }, + "AvailabilityZones":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen255"}, + "min":1 + }, + "BlockDeviceEbsDeleteOnTermination":{"type":"boolean"}, + "BlockDeviceEbsEncrypted":{"type":"boolean"}, + "BlockDeviceEbsIops":{ + "type":"integer", + "max":20000, + "min":100 + }, + "BlockDeviceEbsVolumeSize":{ + "type":"integer", + "max":16384, + "min":1 + }, + "BlockDeviceEbsVolumeType":{ + "type":"string", + "max":255, + "min":1 + }, + "BlockDeviceMapping":{ + "type":"structure", + "required":["DeviceName"], + "members":{ + "VirtualName":{"shape":"XmlStringMaxLen255"}, + "DeviceName":{"shape":"XmlStringMaxLen255"}, + "Ebs":{"shape":"Ebs"}, + "NoDevice":{"shape":"NoDevice"} + } + }, + "BlockDeviceMappings":{ + "type":"list", + "member":{"shape":"BlockDeviceMapping"} + }, + "ClassicLinkVPCSecurityGroups":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen255"} + }, + "CompleteLifecycleActionAnswer":{ + "type":"structure", + "members":{ + } + }, + "CompleteLifecycleActionType":{ + "type":"structure", + "required":[ + "LifecycleHookName", + "AutoScalingGroupName", + "LifecycleActionToken", + "LifecycleActionResult" + ], + "members":{ + "LifecycleHookName":{"shape":"AsciiStringMaxLen255"}, + "AutoScalingGroupName":{"shape":"ResourceName"}, + "LifecycleActionToken":{"shape":"LifecycleActionToken"}, + "LifecycleActionResult":{"shape":"LifecycleActionResult"} + } + }, + "Cooldown":{"type":"integer"}, + "CreateAutoScalingGroupType":{ + "type":"structure", + "required":[ + "AutoScalingGroupName", + "MinSize", + "MaxSize" + ], + "members":{ + "AutoScalingGroupName":{"shape":"XmlStringMaxLen255"}, + "LaunchConfigurationName":{"shape":"ResourceName"}, + "InstanceId":{"shape":"XmlStringMaxLen19"}, + "MinSize":{"shape":"AutoScalingGroupMinSize"}, + "MaxSize":{"shape":"AutoScalingGroupMaxSize"}, + "DesiredCapacity":{"shape":"AutoScalingGroupDesiredCapacity"}, + "DefaultCooldown":{"shape":"Cooldown"}, + "AvailabilityZones":{"shape":"AvailabilityZones"}, + "LoadBalancerNames":{"shape":"LoadBalancerNames"}, + "HealthCheckType":{"shape":"XmlStringMaxLen32"}, + "HealthCheckGracePeriod":{"shape":"HealthCheckGracePeriod"}, + "PlacementGroup":{"shape":"XmlStringMaxLen255"}, + "VPCZoneIdentifier":{"shape":"XmlStringMaxLen255"}, + "TerminationPolicies":{"shape":"TerminationPolicies"}, + "NewInstancesProtectedFromScaleIn":{"shape":"InstanceProtected"}, + "Tags":{"shape":"Tags"} + } + }, + "CreateLaunchConfigurationType":{ + "type":"structure", + "required":["LaunchConfigurationName"], + "members":{ + "LaunchConfigurationName":{"shape":"XmlStringMaxLen255"}, + "ImageId":{"shape":"XmlStringMaxLen255"}, + "KeyName":{"shape":"XmlStringMaxLen255"}, + "SecurityGroups":{"shape":"SecurityGroups"}, + "ClassicLinkVPCId":{"shape":"XmlStringMaxLen255"}, + "ClassicLinkVPCSecurityGroups":{"shape":"ClassicLinkVPCSecurityGroups"}, + "UserData":{"shape":"XmlStringUserData"}, + "InstanceId":{"shape":"XmlStringMaxLen19"}, + "InstanceType":{"shape":"XmlStringMaxLen255"}, + "KernelId":{"shape":"XmlStringMaxLen255"}, + "RamdiskId":{"shape":"XmlStringMaxLen255"}, + "BlockDeviceMappings":{"shape":"BlockDeviceMappings"}, + "InstanceMonitoring":{"shape":"InstanceMonitoring"}, + "SpotPrice":{"shape":"SpotPrice"}, + "IamInstanceProfile":{"shape":"XmlStringMaxLen1600"}, + "EbsOptimized":{"shape":"EbsOptimized"}, + "AssociatePublicIpAddress":{"shape":"AssociatePublicIpAddress"}, + "PlacementTenancy":{"shape":"XmlStringMaxLen64"} + } + }, + "CreateOrUpdateTagsType":{ + "type":"structure", + "required":["Tags"], + "members":{ + "Tags":{"shape":"Tags"} + } + }, + "DeleteAutoScalingGroupType":{ + "type":"structure", + "required":["AutoScalingGroupName"], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "ForceDelete":{"shape":"ForceDelete"} + } + }, + "DeleteLifecycleHookAnswer":{ + "type":"structure", + "members":{ + } + }, + "DeleteLifecycleHookType":{ + "type":"structure", + "required":[ + "LifecycleHookName", + "AutoScalingGroupName" + ], + "members":{ + "LifecycleHookName":{"shape":"AsciiStringMaxLen255"}, + "AutoScalingGroupName":{"shape":"ResourceName"} + } + }, + "DeleteNotificationConfigurationType":{ + "type":"structure", + "required":[ + "AutoScalingGroupName", + "TopicARN" + ], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "TopicARN":{"shape":"ResourceName"} + } + }, + "DeletePolicyType":{ + "type":"structure", + "required":["PolicyName"], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "PolicyName":{"shape":"ResourceName"} + } + }, + "DeleteScheduledActionType":{ + "type":"structure", + "required":["ScheduledActionName"], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "ScheduledActionName":{"shape":"ResourceName"} + } + }, + "DeleteTagsType":{ + "type":"structure", + "required":["Tags"], + "members":{ + "Tags":{"shape":"Tags"} + } + }, + "DescribeAccountLimitsAnswer":{ + "type":"structure", + "members":{ + "MaxNumberOfAutoScalingGroups":{"shape":"MaxNumberOfAutoScalingGroups"}, + "MaxNumberOfLaunchConfigurations":{"shape":"MaxNumberOfLaunchConfigurations"}, + "NumberOfAutoScalingGroups":{"shape":"NumberOfAutoScalingGroups"}, + "NumberOfLaunchConfigurations":{"shape":"NumberOfLaunchConfigurations"} + } + }, + "DescribeAdjustmentTypesAnswer":{ + "type":"structure", + "members":{ + "AdjustmentTypes":{"shape":"AdjustmentTypes"} + } + }, + "DescribeAutoScalingInstancesType":{ + "type":"structure", + "members":{ + "InstanceIds":{"shape":"InstanceIds"}, + "MaxRecords":{"shape":"MaxRecords"}, + "NextToken":{"shape":"XmlString"} + } + }, + "DescribeAutoScalingNotificationTypesAnswer":{ + "type":"structure", + "members":{ + "AutoScalingNotificationTypes":{"shape":"AutoScalingNotificationTypes"} + } + }, + "DescribeLifecycleHookTypesAnswer":{ + "type":"structure", + "members":{ + "LifecycleHookTypes":{"shape":"AutoScalingNotificationTypes"} + } + }, + "DescribeLifecycleHooksAnswer":{ + "type":"structure", + "members":{ + "LifecycleHooks":{"shape":"LifecycleHooks"} + } + }, + "DescribeLifecycleHooksType":{ + "type":"structure", + "required":["AutoScalingGroupName"], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "LifecycleHookNames":{"shape":"LifecycleHookNames"} + } + }, + "DescribeLoadBalancersRequest":{ + "type":"structure", + "required":["AutoScalingGroupName"], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "NextToken":{"shape":"XmlString"}, + "MaxRecords":{"shape":"MaxRecords"} + } + }, + "DescribeLoadBalancersResponse":{ + "type":"structure", + "members":{ + "LoadBalancers":{"shape":"LoadBalancerStates"}, + "NextToken":{"shape":"XmlString"} + } + }, + "DescribeMetricCollectionTypesAnswer":{ + "type":"structure", + "members":{ + "Metrics":{"shape":"MetricCollectionTypes"}, + "Granularities":{"shape":"MetricGranularityTypes"} + } + }, + "DescribeNotificationConfigurationsAnswer":{ + "type":"structure", + "required":["NotificationConfigurations"], + "members":{ + "NotificationConfigurations":{"shape":"NotificationConfigurations"}, + "NextToken":{"shape":"XmlString"} + } + }, + "DescribeNotificationConfigurationsType":{ + "type":"structure", + "members":{ + "AutoScalingGroupNames":{"shape":"AutoScalingGroupNames"}, + "NextToken":{"shape":"XmlString"}, + "MaxRecords":{"shape":"MaxRecords"} + } + }, + "DescribePoliciesType":{ + "type":"structure", + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "PolicyNames":{"shape":"PolicyNames"}, + "PolicyTypes":{"shape":"PolicyTypes"}, + "NextToken":{"shape":"XmlString"}, + "MaxRecords":{"shape":"MaxRecords"} + } + }, + "DescribeScalingActivitiesType":{ + "type":"structure", + "members":{ + "ActivityIds":{"shape":"ActivityIds"}, + "AutoScalingGroupName":{"shape":"ResourceName"}, + "MaxRecords":{"shape":"MaxRecords"}, + "NextToken":{"shape":"XmlString"} + } + }, + "DescribeScheduledActionsType":{ + "type":"structure", + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "ScheduledActionNames":{"shape":"ScheduledActionNames"}, + "StartTime":{"shape":"TimestampType"}, + "EndTime":{"shape":"TimestampType"}, + "NextToken":{"shape":"XmlString"}, + "MaxRecords":{"shape":"MaxRecords"} + } + }, + "DescribeTagsType":{ + "type":"structure", + "members":{ + "Filters":{"shape":"Filters"}, + "NextToken":{"shape":"XmlString"}, + "MaxRecords":{"shape":"MaxRecords"} + } + }, + "DescribeTerminationPolicyTypesAnswer":{ + "type":"structure", + "members":{ + "TerminationPolicyTypes":{"shape":"TerminationPolicies"} + } + }, + "DetachInstancesAnswer":{ + "type":"structure", + "members":{ + "Activities":{"shape":"Activities"} + } + }, + "DetachInstancesQuery":{ + "type":"structure", + "required":[ + "AutoScalingGroupName", + "ShouldDecrementDesiredCapacity" + ], + "members":{ + "InstanceIds":{"shape":"InstanceIds"}, + "AutoScalingGroupName":{"shape":"ResourceName"}, + "ShouldDecrementDesiredCapacity":{"shape":"ShouldDecrementDesiredCapacity"} + } + }, + "DetachLoadBalancersResultType":{ + "type":"structure", + "members":{ + } + }, + "DetachLoadBalancersType":{ + "type":"structure", + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "LoadBalancerNames":{"shape":"LoadBalancerNames"} + } + }, + "DisableMetricsCollectionQuery":{ + "type":"structure", + "required":["AutoScalingGroupName"], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "Metrics":{"shape":"Metrics"} + } + }, + "Ebs":{ + "type":"structure", + "members":{ + "SnapshotId":{"shape":"XmlStringMaxLen255"}, + "VolumeSize":{"shape":"BlockDeviceEbsVolumeSize"}, + "VolumeType":{"shape":"BlockDeviceEbsVolumeType"}, + "DeleteOnTermination":{"shape":"BlockDeviceEbsDeleteOnTermination"}, + "Iops":{"shape":"BlockDeviceEbsIops"}, + "Encrypted":{"shape":"BlockDeviceEbsEncrypted"} + } + }, + "EbsOptimized":{"type":"boolean"}, + "EnableMetricsCollectionQuery":{ + "type":"structure", + "required":[ + "AutoScalingGroupName", + "Granularity" + ], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "Metrics":{"shape":"Metrics"}, + "Granularity":{"shape":"XmlStringMaxLen255"} + } + }, + "EnabledMetric":{ + "type":"structure", + "members":{ + "Metric":{"shape":"XmlStringMaxLen255"}, + "Granularity":{"shape":"XmlStringMaxLen255"} + } + }, + "EnabledMetrics":{ + "type":"list", + "member":{"shape":"EnabledMetric"} + }, + "EnterStandbyAnswer":{ + "type":"structure", + "members":{ + "Activities":{"shape":"Activities"} + } + }, + "EnterStandbyQuery":{ + "type":"structure", + "required":[ + "AutoScalingGroupName", + "ShouldDecrementDesiredCapacity" + ], + "members":{ + "InstanceIds":{"shape":"InstanceIds"}, + "AutoScalingGroupName":{"shape":"ResourceName"}, + "ShouldDecrementDesiredCapacity":{"shape":"ShouldDecrementDesiredCapacity"} + } + }, + "EstimatedInstanceWarmup":{"type":"integer"}, + "ExecutePolicyType":{ + "type":"structure", + "required":["PolicyName"], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "PolicyName":{"shape":"ResourceName"}, + "HonorCooldown":{"shape":"HonorCooldown"}, + "MetricValue":{"shape":"MetricScale"}, + "BreachThreshold":{"shape":"MetricScale"} + } + }, + "ExitStandbyAnswer":{ + "type":"structure", + "members":{ + "Activities":{"shape":"Activities"} + } + }, + "ExitStandbyQuery":{ + "type":"structure", + "required":["AutoScalingGroupName"], + "members":{ + "InstanceIds":{"shape":"InstanceIds"}, + "AutoScalingGroupName":{"shape":"ResourceName"} + } + }, + "Filter":{ + "type":"structure", + "members":{ + "Name":{"shape":"XmlString"}, + "Values":{"shape":"Values"} + } + }, + "Filters":{ + "type":"list", + "member":{"shape":"Filter"} + }, + "ForceDelete":{"type":"boolean"}, + "GlobalTimeout":{"type":"integer"}, + "HealthCheckGracePeriod":{"type":"integer"}, + "HeartbeatTimeout":{"type":"integer"}, + "HonorCooldown":{"type":"boolean"}, + "Instance":{ + "type":"structure", + "required":[ + "InstanceId", + "AvailabilityZone", + "LifecycleState", + "HealthStatus", + "LaunchConfigurationName", + "ProtectedFromScaleIn" + ], + "members":{ + "InstanceId":{"shape":"XmlStringMaxLen19"}, + "AvailabilityZone":{"shape":"XmlStringMaxLen255"}, + "LifecycleState":{"shape":"LifecycleState"}, + "HealthStatus":{"shape":"XmlStringMaxLen32"}, + "LaunchConfigurationName":{"shape":"XmlStringMaxLen255"}, + "ProtectedFromScaleIn":{"shape":"InstanceProtected"} + } + }, + "InstanceIds":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen19"} + }, + "InstanceMonitoring":{ + "type":"structure", + "members":{ + "Enabled":{"shape":"MonitoringEnabled"} + } + }, + "InstanceProtected":{"type":"boolean"}, + "Instances":{ + "type":"list", + "member":{"shape":"Instance"} + }, + "InvalidNextToken":{ + "type":"structure", + "members":{ + "message":{"shape":"XmlStringMaxLen255"} + }, + "error":{ + "code":"InvalidNextToken", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "LaunchConfiguration":{ + "type":"structure", + "required":[ + "LaunchConfigurationName", + "ImageId", + "InstanceType", + "CreatedTime" + ], + "members":{ + "LaunchConfigurationName":{"shape":"XmlStringMaxLen255"}, + "LaunchConfigurationARN":{"shape":"ResourceName"}, + "ImageId":{"shape":"XmlStringMaxLen255"}, + "KeyName":{"shape":"XmlStringMaxLen255"}, + "SecurityGroups":{"shape":"SecurityGroups"}, + "ClassicLinkVPCId":{"shape":"XmlStringMaxLen255"}, + "ClassicLinkVPCSecurityGroups":{"shape":"ClassicLinkVPCSecurityGroups"}, + "UserData":{"shape":"XmlStringUserData"}, + "InstanceType":{"shape":"XmlStringMaxLen255"}, + "KernelId":{"shape":"XmlStringMaxLen255"}, + "RamdiskId":{"shape":"XmlStringMaxLen255"}, + "BlockDeviceMappings":{"shape":"BlockDeviceMappings"}, + "InstanceMonitoring":{"shape":"InstanceMonitoring"}, + "SpotPrice":{"shape":"SpotPrice"}, + "IamInstanceProfile":{"shape":"XmlStringMaxLen1600"}, + "CreatedTime":{"shape":"TimestampType"}, + "EbsOptimized":{"shape":"EbsOptimized"}, + "AssociatePublicIpAddress":{"shape":"AssociatePublicIpAddress"}, + "PlacementTenancy":{"shape":"XmlStringMaxLen64"} + } + }, + "LaunchConfigurationNameType":{ + "type":"structure", + "required":["LaunchConfigurationName"], + "members":{ + "LaunchConfigurationName":{"shape":"ResourceName"} + } + }, + "LaunchConfigurationNames":{ + "type":"list", + "member":{"shape":"ResourceName"} + }, + "LaunchConfigurationNamesType":{ + "type":"structure", + "members":{ + "LaunchConfigurationNames":{"shape":"LaunchConfigurationNames"}, + "NextToken":{"shape":"XmlString"}, + "MaxRecords":{"shape":"MaxRecords"} + } + }, + "LaunchConfigurations":{ + "type":"list", + "member":{"shape":"LaunchConfiguration"} + }, + "LaunchConfigurationsType":{ + "type":"structure", + "required":["LaunchConfigurations"], + "members":{ + "LaunchConfigurations":{"shape":"LaunchConfigurations"}, + "NextToken":{"shape":"XmlString"} + } + }, + "LifecycleActionResult":{"type":"string"}, + "LifecycleActionToken":{ + "type":"string", + "max":36, + "min":36 + }, + "LifecycleHook":{ + "type":"structure", + "members":{ + "LifecycleHookName":{"shape":"AsciiStringMaxLen255"}, + "AutoScalingGroupName":{"shape":"ResourceName"}, + "LifecycleTransition":{"shape":"LifecycleTransition"}, + "NotificationTargetARN":{"shape":"ResourceName"}, + "RoleARN":{"shape":"ResourceName"}, + "NotificationMetadata":{"shape":"XmlStringMaxLen1023"}, + "HeartbeatTimeout":{"shape":"HeartbeatTimeout"}, + "GlobalTimeout":{"shape":"GlobalTimeout"}, + "DefaultResult":{"shape":"LifecycleActionResult"} + } + }, + "LifecycleHookNames":{ + "type":"list", + "member":{"shape":"AsciiStringMaxLen255"} + }, + "LifecycleHooks":{ + "type":"list", + "member":{"shape":"LifecycleHook"} + }, + "LifecycleState":{ + "type":"string", + "enum":[ + "Pending", + "Pending:Wait", + "Pending:Proceed", + "Quarantined", + "InService", + "Terminating", + "Terminating:Wait", + "Terminating:Proceed", + "Terminated", + "Detaching", + "Detached", + "EnteringStandby", + "Standby" + ] + }, + "LifecycleTransition":{"type":"string"}, + "LimitExceededFault":{ + "type":"structure", + "members":{ + "message":{"shape":"XmlStringMaxLen255"} + }, + "error":{ + "code":"LimitExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "LoadBalancerNames":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen255"} + }, + "LoadBalancerState":{ + "type":"structure", + "members":{ + "LoadBalancerName":{"shape":"XmlStringMaxLen255"}, + "State":{"shape":"XmlStringMaxLen255"} + } + }, + "LoadBalancerStates":{ + "type":"list", + "member":{"shape":"LoadBalancerState"} + }, + "MaxNumberOfAutoScalingGroups":{"type":"integer"}, + "MaxNumberOfLaunchConfigurations":{"type":"integer"}, + "MaxRecords":{"type":"integer"}, + "MetricCollectionType":{ + "type":"structure", + "members":{ + "Metric":{"shape":"XmlStringMaxLen255"} + } + }, + "MetricCollectionTypes":{ + "type":"list", + "member":{"shape":"MetricCollectionType"} + }, + "MetricGranularityType":{ + "type":"structure", + "members":{ + "Granularity":{"shape":"XmlStringMaxLen255"} + } + }, + "MetricGranularityTypes":{ + "type":"list", + "member":{"shape":"MetricGranularityType"} + }, + "MetricScale":{"type":"double"}, + "Metrics":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen255"} + }, + "MinAdjustmentMagnitude":{"type":"integer"}, + "MinAdjustmentStep":{ + "type":"integer", + "deprecated":true + }, + "MonitoringEnabled":{"type":"boolean"}, + "NoDevice":{"type":"boolean"}, + "NotificationConfiguration":{ + "type":"structure", + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "TopicARN":{"shape":"ResourceName"}, + "NotificationType":{"shape":"XmlStringMaxLen255"} + } + }, + "NotificationConfigurations":{ + "type":"list", + "member":{"shape":"NotificationConfiguration"} + }, + "NumberOfAutoScalingGroups":{"type":"integer"}, + "NumberOfLaunchConfigurations":{"type":"integer"}, + "PoliciesType":{ + "type":"structure", + "members":{ + "ScalingPolicies":{"shape":"ScalingPolicies"}, + "NextToken":{"shape":"XmlString"} + } + }, + "PolicyARNType":{ + "type":"structure", + "members":{ + "PolicyARN":{"shape":"ResourceName"} + } + }, + "PolicyIncrement":{"type":"integer"}, + "PolicyNames":{ + "type":"list", + "member":{"shape":"ResourceName"} + }, + "PolicyTypes":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen64"} + }, + "ProcessNames":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen255"} + }, + "ProcessType":{ + "type":"structure", + "required":["ProcessName"], + "members":{ + "ProcessName":{"shape":"XmlStringMaxLen255"} + } + }, + "Processes":{ + "type":"list", + "member":{"shape":"ProcessType"} + }, + "ProcessesType":{ + "type":"structure", + "members":{ + "Processes":{"shape":"Processes"} + } + }, + "Progress":{"type":"integer"}, + "PropagateAtLaunch":{"type":"boolean"}, + "ProtectedFromScaleIn":{"type":"boolean"}, + "PutLifecycleHookAnswer":{ + "type":"structure", + "members":{ + } + }, + "PutLifecycleHookType":{ + "type":"structure", + "required":[ + "LifecycleHookName", + "AutoScalingGroupName" + ], + "members":{ + "LifecycleHookName":{"shape":"AsciiStringMaxLen255"}, + "AutoScalingGroupName":{"shape":"ResourceName"}, + "LifecycleTransition":{"shape":"LifecycleTransition"}, + "RoleARN":{"shape":"ResourceName"}, + "NotificationTargetARN":{"shape":"ResourceName"}, + "NotificationMetadata":{"shape":"XmlStringMaxLen1023"}, + "HeartbeatTimeout":{"shape":"HeartbeatTimeout"}, + "DefaultResult":{"shape":"LifecycleActionResult"} + } + }, + "PutNotificationConfigurationType":{ + "type":"structure", + "required":[ + "AutoScalingGroupName", + "TopicARN", + "NotificationTypes" + ], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "TopicARN":{"shape":"ResourceName"}, + "NotificationTypes":{"shape":"AutoScalingNotificationTypes"} + } + }, + "PutScalingPolicyType":{ + "type":"structure", + "required":[ + "AutoScalingGroupName", + "PolicyName", + "AdjustmentType" + ], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "PolicyName":{"shape":"XmlStringMaxLen255"}, + "PolicyType":{"shape":"XmlStringMaxLen64"}, + "AdjustmentType":{"shape":"XmlStringMaxLen255"}, + "MinAdjustmentStep":{"shape":"MinAdjustmentStep"}, + "MinAdjustmentMagnitude":{"shape":"MinAdjustmentMagnitude"}, + "ScalingAdjustment":{"shape":"PolicyIncrement"}, + "Cooldown":{"shape":"Cooldown"}, + "MetricAggregationType":{"shape":"XmlStringMaxLen32"}, + "StepAdjustments":{"shape":"StepAdjustments"}, + "EstimatedInstanceWarmup":{"shape":"EstimatedInstanceWarmup"} + } + }, + "PutScheduledUpdateGroupActionType":{ + "type":"structure", + "required":[ + "AutoScalingGroupName", + "ScheduledActionName" + ], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "ScheduledActionName":{"shape":"XmlStringMaxLen255"}, + "Time":{"shape":"TimestampType"}, + "StartTime":{"shape":"TimestampType"}, + "EndTime":{"shape":"TimestampType"}, + "Recurrence":{"shape":"XmlStringMaxLen255"}, + "MinSize":{"shape":"AutoScalingGroupMinSize"}, + "MaxSize":{"shape":"AutoScalingGroupMaxSize"}, + "DesiredCapacity":{"shape":"AutoScalingGroupDesiredCapacity"} + } + }, + "RecordLifecycleActionHeartbeatAnswer":{ + "type":"structure", + "members":{ + } + }, + "RecordLifecycleActionHeartbeatType":{ + "type":"structure", + "required":[ + "LifecycleHookName", + "AutoScalingGroupName", + "LifecycleActionToken" + ], + "members":{ + "LifecycleHookName":{"shape":"AsciiStringMaxLen255"}, + "AutoScalingGroupName":{"shape":"ResourceName"}, + "LifecycleActionToken":{"shape":"LifecycleActionToken"} + } + }, + "ResourceContentionFault":{ + "type":"structure", + "members":{ + "message":{"shape":"XmlStringMaxLen255"} + }, + "error":{ + "code":"ResourceContention", + "httpStatusCode":500, + "senderFault":true + }, + "exception":true + }, + "ResourceInUseFault":{ + "type":"structure", + "members":{ + "message":{"shape":"XmlStringMaxLen255"} + }, + "error":{ + "code":"ResourceInUse", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ResourceName":{ + "type":"string", + "max":1600, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "ScalingActivityInProgressFault":{ + "type":"structure", + "members":{ + "message":{"shape":"XmlStringMaxLen255"} + }, + "error":{ + "code":"ScalingActivityInProgress", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ScalingActivityStatusCode":{ + "type":"string", + "enum":[ + "PendingSpotBidPlacement", + "WaitingForSpotInstanceRequestId", + "WaitingForSpotInstanceId", + "WaitingForInstanceId", + "PreInService", + "InProgress", + "WaitingForELBConnectionDraining", + "MidLifecycleAction", + "WaitingForInstanceWarmup", + "Successful", + "Failed", + "Cancelled" + ] + }, + "ScalingPolicies":{ + "type":"list", + "member":{"shape":"ScalingPolicy"} + }, + "ScalingPolicy":{ + "type":"structure", + "members":{ + "AutoScalingGroupName":{"shape":"XmlStringMaxLen255"}, + "PolicyName":{"shape":"XmlStringMaxLen255"}, + "PolicyARN":{"shape":"ResourceName"}, + "PolicyType":{"shape":"XmlStringMaxLen64"}, + "AdjustmentType":{"shape":"XmlStringMaxLen255"}, + "MinAdjustmentStep":{"shape":"MinAdjustmentStep"}, + "MinAdjustmentMagnitude":{"shape":"MinAdjustmentMagnitude"}, + "ScalingAdjustment":{"shape":"PolicyIncrement"}, + "Cooldown":{"shape":"Cooldown"}, + "StepAdjustments":{"shape":"StepAdjustments"}, + "MetricAggregationType":{"shape":"XmlStringMaxLen32"}, + "EstimatedInstanceWarmup":{"shape":"EstimatedInstanceWarmup"}, + "Alarms":{"shape":"Alarms"} + } + }, + "ScalingProcessQuery":{ + "type":"structure", + "required":["AutoScalingGroupName"], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "ScalingProcesses":{"shape":"ProcessNames"} + } + }, + "ScheduledActionNames":{ + "type":"list", + "member":{"shape":"ResourceName"} + }, + "ScheduledActionsType":{ + "type":"structure", + "members":{ + "ScheduledUpdateGroupActions":{"shape":"ScheduledUpdateGroupActions"}, + "NextToken":{"shape":"XmlString"} + } + }, + "ScheduledUpdateGroupAction":{ + "type":"structure", + "members":{ + "AutoScalingGroupName":{"shape":"XmlStringMaxLen255"}, + "ScheduledActionName":{"shape":"XmlStringMaxLen255"}, + "ScheduledActionARN":{"shape":"ResourceName"}, + "Time":{"shape":"TimestampType"}, + "StartTime":{"shape":"TimestampType"}, + "EndTime":{"shape":"TimestampType"}, + "Recurrence":{"shape":"XmlStringMaxLen255"}, + "MinSize":{"shape":"AutoScalingGroupMinSize"}, + "MaxSize":{"shape":"AutoScalingGroupMaxSize"}, + "DesiredCapacity":{"shape":"AutoScalingGroupDesiredCapacity"} + } + }, + "ScheduledUpdateGroupActions":{ + "type":"list", + "member":{"shape":"ScheduledUpdateGroupAction"} + }, + "SecurityGroups":{ + "type":"list", + "member":{"shape":"XmlString"} + }, + "SetDesiredCapacityType":{ + "type":"structure", + "required":[ + "AutoScalingGroupName", + "DesiredCapacity" + ], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "DesiredCapacity":{"shape":"AutoScalingGroupDesiredCapacity"}, + "HonorCooldown":{"shape":"HonorCooldown"} + } + }, + "SetInstanceHealthQuery":{ + "type":"structure", + "required":[ + "InstanceId", + "HealthStatus" + ], + "members":{ + "InstanceId":{"shape":"XmlStringMaxLen19"}, + "HealthStatus":{"shape":"XmlStringMaxLen32"}, + "ShouldRespectGracePeriod":{"shape":"ShouldRespectGracePeriod"} + } + }, + "SetInstanceProtectionAnswer":{ + "type":"structure", + "members":{ + } + }, + "SetInstanceProtectionQuery":{ + "type":"structure", + "required":[ + "InstanceIds", + "AutoScalingGroupName", + "ProtectedFromScaleIn" + ], + "members":{ + "InstanceIds":{"shape":"InstanceIds"}, + "AutoScalingGroupName":{"shape":"ResourceName"}, + "ProtectedFromScaleIn":{"shape":"ProtectedFromScaleIn"} + } + }, + "ShouldDecrementDesiredCapacity":{"type":"boolean"}, + "ShouldRespectGracePeriod":{"type":"boolean"}, + "SpotPrice":{ + "type":"string", + "max":255, + "min":1 + }, + "StepAdjustment":{ + "type":"structure", + "required":["ScalingAdjustment"], + "members":{ + "MetricIntervalLowerBound":{"shape":"MetricScale"}, + "MetricIntervalUpperBound":{"shape":"MetricScale"}, + "ScalingAdjustment":{"shape":"PolicyIncrement"} + } + }, + "StepAdjustments":{ + "type":"list", + "member":{"shape":"StepAdjustment"} + }, + "SuspendedProcess":{ + "type":"structure", + "members":{ + "ProcessName":{"shape":"XmlStringMaxLen255"}, + "SuspensionReason":{"shape":"XmlStringMaxLen255"} + } + }, + "SuspendedProcesses":{ + "type":"list", + "member":{"shape":"SuspendedProcess"} + }, + "Tag":{ + "type":"structure", + "required":["Key"], + "members":{ + "ResourceId":{"shape":"XmlString"}, + "ResourceType":{"shape":"XmlString"}, + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"}, + "PropagateAtLaunch":{"shape":"PropagateAtLaunch"} + } + }, + "TagDescription":{ + "type":"structure", + "members":{ + "ResourceId":{"shape":"XmlString"}, + "ResourceType":{"shape":"XmlString"}, + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"}, + "PropagateAtLaunch":{"shape":"PropagateAtLaunch"} + } + }, + "TagDescriptionList":{ + "type":"list", + "member":{"shape":"TagDescription"} + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "Tags":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "TagsType":{ + "type":"structure", + "members":{ + "Tags":{"shape":"TagDescriptionList"}, + "NextToken":{"shape":"XmlString"} + } + }, + "TerminateInstanceInAutoScalingGroupType":{ + "type":"structure", + "required":[ + "InstanceId", + "ShouldDecrementDesiredCapacity" + ], + "members":{ + "InstanceId":{"shape":"XmlStringMaxLen19"}, + "ShouldDecrementDesiredCapacity":{"shape":"ShouldDecrementDesiredCapacity"} + } + }, + "TerminationPolicies":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen1600"} + }, + "TimestampType":{"type":"timestamp"}, + "UpdateAutoScalingGroupType":{ + "type":"structure", + "required":["AutoScalingGroupName"], + "members":{ + "AutoScalingGroupName":{"shape":"ResourceName"}, + "LaunchConfigurationName":{"shape":"ResourceName"}, + "MinSize":{"shape":"AutoScalingGroupMinSize"}, + "MaxSize":{"shape":"AutoScalingGroupMaxSize"}, + "DesiredCapacity":{"shape":"AutoScalingGroupDesiredCapacity"}, + "DefaultCooldown":{"shape":"Cooldown"}, + "AvailabilityZones":{"shape":"AvailabilityZones"}, + "HealthCheckType":{"shape":"XmlStringMaxLen32"}, + "HealthCheckGracePeriod":{"shape":"HealthCheckGracePeriod"}, + "PlacementGroup":{"shape":"XmlStringMaxLen255"}, + "VPCZoneIdentifier":{"shape":"XmlStringMaxLen255"}, + "TerminationPolicies":{"shape":"TerminationPolicies"}, + "NewInstancesProtectedFromScaleIn":{"shape":"InstanceProtected"} + } + }, + "Values":{ + "type":"list", + "member":{"shape":"XmlString"} + }, + "XmlString":{ + "type":"string", + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "XmlStringMaxLen1023":{ + "type":"string", + "max":1023, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "XmlStringMaxLen1600":{ + "type":"string", + "max":1600, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "XmlStringMaxLen19":{ + "type":"string", + "max":19, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "XmlStringMaxLen255":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "XmlStringMaxLen32":{ + "type":"string", + "max":32, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "XmlStringMaxLen64":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "XmlStringUserData":{ + "type":"string", + "max":21847, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1403 @@ +{ + "version": "2.0", + "service": "Auto Scaling

    Auto Scaling is designed to automatically launch or terminate EC2 instances based on user-defined policies, schedules, and health checks. Use this service in conjunction with the Amazon CloudWatch and Elastic Load Balancing services.

    ", + "operations": { + "AttachInstances": "

    Attaches one or more EC2 instances to the specified Auto Scaling group.

    When you attach instances, Auto Scaling increases the desired capacity of the group by the number of instances being attached. If the number of instances being attached plus the desired capacity of the group exceeds the maximum size of the group, the operation fails.

    For more information, see Attach EC2 Instances to Your Auto Scaling Group in the Auto Scaling Developer Guide.

    ", + "AttachLoadBalancers": "

    Attaches one or more load balancers to the specified Auto Scaling group.

    To describe the load balancers for an Auto Scaling group, use DescribeLoadBalancers. To detach the load balancer from the Auto Scaling group, use DetachLoadBalancers.

    For more information, see Attach a Load Balancer to Your Auto Scaling Group in the Auto Scaling Developer Guide.

    ", + "CompleteLifecycleAction": "

    Completes the lifecycle action for the associated token initiated under the given lifecycle hook with the specified result.

    This operation is a part of the basic sequence for adding a lifecycle hook to an Auto Scaling group:

    1. Create a notification target. A target can be either an Amazon SQS queue or an Amazon SNS topic.
    2. Create an IAM role. This role allows Auto Scaling to publish lifecycle notifications to the designated SQS queue or SNS topic.
    3. Create the lifecycle hook. You can create a hook that acts when instances launch or when instances terminate.
    4. If necessary, record the lifecycle action heartbeat to keep the instance in a pending state.
    5. Complete the lifecycle action.

    For more information, see Auto Scaling Pending State and Auto Scaling Terminating State in the Auto Scaling Developer Guide.

    ", + "CreateAutoScalingGroup": "

    Creates an Auto Scaling group with the specified name and attributes.

    If you exceed your maximum limit of Auto Scaling groups, which by default is 20 per region, the call fails. For information about viewing and updating this limit, see DescribeAccountLimits.

    For more information, see Auto Scaling Groups in the Auto Scaling Developer Guide.

    ", + "CreateLaunchConfiguration": "

    Creates a launch configuration.

    If you exceed your maximum limit of launch configurations, which by default is 100 per region, the call fails. For information about viewing and updating this limit, see DescribeAccountLimits.

    For more information, see Launch Configurations in the Auto Scaling Developer Guide.

    ", + "CreateOrUpdateTags": "

    Creates or updates tags for the specified Auto Scaling group.

    A tag is defined by its resource ID, resource type, key, value, and propagate flag. The value and the propagate flag are optional parameters. The only supported resource type is auto-scaling-group, and the resource ID must be the name of the group. The PropagateAtLaunch flag determines whether the tag is added to instances launched in the group. Valid values are true or false.

    When you specify a tag with a key that already exists, the operation overwrites the previous tag definition, and you do not get an error message.

    For more information, see Tagging Auto Scaling Groups and Instances in the Auto Scaling Developer Guide.

    ", + "DeleteAutoScalingGroup": "

    Deletes the specified Auto Scaling group.

    If the group has instances or scaling activities in progress, you must specify the option to force the deletion in order for it to succeed.

    If the group has policies, deleting the group deletes the policies, the underlying alarm actions, and any alarm that no longer has an associated action.

    To remove instances from the Auto Scaling group before deleting it, call DetachInstances with the list of instances and the option to decrement the desired capacity so that Auto Scaling does not launch replacement instances.

    To terminate all instances before deleting the Auto Scaling group, call UpdateAutoScalingGroup and set the minimum size and desired capacity of the Auto Scaling group to zero.

    ", + "DeleteLaunchConfiguration": "

    Deletes the specified launch configuration.

    The launch configuration must not be attached to an Auto Scaling group. When this call completes, the launch configuration is no longer available for use.

    ", + "DeleteLifecycleHook": "

    Deletes the specified lifecycle hook.

    If there are any outstanding lifecycle actions, they are completed first (ABANDON for launching instances, CONTINUE for terminating instances).

    ", + "DeleteNotificationConfiguration": "

    Deletes the specified notification.

    ", + "DeletePolicy": "

    Deletes the specified Auto Scaling policy.

    Deleting a policy deletes the underlying alarm action, but does not delete the alarm, even if it no longer has an associated action.

    ", + "DeleteScheduledAction": "

    Deletes the specified scheduled action.

    ", + "DeleteTags": "

    Deletes the specified tags.

    ", + "DescribeAccountLimits": "

    Describes the current Auto Scaling resource limits for your AWS account.

    For information about requesting an increase in these limits, see AWS Service Limits in the Amazon Web Services General Reference.

    ", + "DescribeAdjustmentTypes": "

    Describes the policy adjustment types for use with PutScalingPolicy.

    ", + "DescribeAutoScalingGroups": "

    Describes one or more Auto Scaling groups. If a list of names is not provided, the call describes all Auto Scaling groups.

    ", + "DescribeAutoScalingInstances": "

    Describes one or more Auto Scaling instances. If a list is not provided, the call describes all instances.

    ", + "DescribeAutoScalingNotificationTypes": "

    Describes the notification types that are supported by Auto Scaling.

    ", + "DescribeLaunchConfigurations": "

    Describes one or more launch configurations. If you omit the list of names, then the call describes all launch configurations.

    ", + "DescribeLifecycleHookTypes": "

    Describes the available types of lifecycle hooks.

    ", + "DescribeLifecycleHooks": "

    Describes the lifecycle hooks for the specified Auto Scaling group.

    ", + "DescribeLoadBalancers": "

    Describes the load balancers for the specified Auto Scaling group.

    ", + "DescribeMetricCollectionTypes": "

    Describes the available CloudWatch metrics for Auto Scaling.

    Note that the GroupStandbyInstances metric is not returned by default. You must explicitly request this metric when calling EnableMetricsCollection.

    ", + "DescribeNotificationConfigurations": "

    Describes the notification actions associated with the specified Auto Scaling group.

    ", + "DescribePolicies": "

    Describes the policies for the specified Auto Scaling group.

    ", + "DescribeScalingActivities": "

    Describes one or more scaling activities for the specified Auto Scaling group. If you omit the ActivityIds, the call returns all activities from the past six weeks. Activities are sorted by the start time. Activities still in progress appear first on the list.

    ", + "DescribeScalingProcessTypes": "

    Describes the scaling process types for use with ResumeProcesses and SuspendProcesses.

    ", + "DescribeScheduledActions": "

    Describes the actions scheduled for your Auto Scaling group that haven't run. To describe the actions that have already run, use DescribeScalingActivities.

    ", + "DescribeTags": "

    Describes the specified tags.

    You can use filters to limit the results. For example, you can query for the tags for a specific Auto Scaling group. You can specify multiple values for a filter. A tag must match at least one of the specified values for it to be included in the results.

    You can also specify multiple filters. The result includes information for a particular tag only if it matches all the filters. If there's no match, no special message is returned.

    ", + "DescribeTerminationPolicyTypes": "

    Describes the termination policies supported by Auto Scaling.

    ", + "DetachInstances": "

    Removes one or more instances from the specified Auto Scaling group.

    After the instances are detached, you can manage them independently from the rest of the Auto Scaling group.

    If you do not specify the option to decrement the desired capacity, Auto Scaling launches instances to replace the ones that are detached.

    For more information, see Detach EC2 Instances from Your Auto Scaling Group in the Auto Scaling Developer Guide.

    ", + "DetachLoadBalancers": "

    Removes one or more load balancers from the specified Auto Scaling group.

    When you detach a load balancer, it enters the Removing state while deregistering the instances in the group. When all instances are deregistered, then you can no longer describe the load balancer using DescribeLoadBalancers. Note that the instances remain running.

    ", + "DisableMetricsCollection": "

    Disables monitoring of the specified metrics for the specified Auto Scaling group.

    ", + "EnableMetricsCollection": "

    Enables monitoring of the specified metrics for the specified Auto Scaling group.

    You can only enable metrics collection if InstanceMonitoring in the launch configuration for the group is set to True.

    ", + "EnterStandby": "

    Moves the specified instances into Standby mode.

    For more information, see Auto Scaling InService State in the Auto Scaling Developer Guide.

    ", + "ExecutePolicy": "

    Executes the specified policy.

    ", + "ExitStandby": "

    Moves the specified instances out of Standby mode.

    For more information, see Auto Scaling InService State in the Auto Scaling Developer Guide.

    ", + "PutLifecycleHook": "

    Creates or updates a lifecycle hook for the specified Auto Scaling Group.

    A lifecycle hook tells Auto Scaling that you want to perform an action on an instance that is not actively in service; for example, either when the instance launches or before the instance terminates.

    This operation is a part of the basic sequence for adding a lifecycle hook to an Auto Scaling group:

    1. Create a notification target. A target can be either an Amazon SQS queue or an Amazon SNS topic.
    2. Create an IAM role. This role allows Auto Scaling to publish lifecycle notifications to the designated SQS queue or SNS topic.
    3. Create the lifecycle hook. You can create a hook that acts when instances launch or when instances terminate.
    4. If necessary, record the lifecycle action heartbeat to keep the instance in a pending state.
    5. Complete the lifecycle action.

    For more information, see Auto Scaling Pending State and Auto Scaling Terminating State in the Auto Scaling Developer Guide.

    If you exceed your maximum limit of lifecycle hooks, which by default is 50 per region, the call fails. For information about updating this limit, see AWS Service Limits in the Amazon Web Services General Reference.

    ", + "PutNotificationConfiguration": "

    Configures an Auto Scaling group to send notifications when specified events take place. Subscribers to this topic can have messages for events delivered to an endpoint such as a web server or email address.

    For more information see Getting Notifications When Your Auto Scaling Group Changes in the Auto Scaling Developer Guide.

    This configuration overwrites an existing configuration.

    ", + "PutScalingPolicy": "

    Creates or updates a policy for an Auto Scaling group. To update an existing policy, use the existing policy name and set the parameters you want to change. Any existing parameter not changed in an update to an existing policy is not changed in this update request.

    If you exceed your maximum limit of step adjustments, which by default is 20 per region, the call fails. For information about updating this limit, see AWS Service Limits in the Amazon Web Services General Reference.

    ", + "PutScheduledUpdateGroupAction": "

    Creates or updates a scheduled scaling action for an Auto Scaling group. When updating a scheduled scaling action, if you leave a parameter unspecified, the corresponding value remains unchanged in the affected Auto Scaling group.

    For more information, see Scheduled Scaling in the Auto Scaling Developer Guide.

    ", + "RecordLifecycleActionHeartbeat": "

    Records a heartbeat for the lifecycle action associated with a specific token. This extends the timeout by the length of time defined by the HeartbeatTimeout parameter of PutLifecycleHook.

    This operation is a part of the basic sequence for adding a lifecycle hook to an Auto Scaling group:

    1. Create a notification target. A target can be either an Amazon SQS queue or an Amazon SNS topic.
    2. Create an IAM role. This role allows Auto Scaling to publish lifecycle notifications to the designated SQS queue or SNS topic.
    3. Create the lifecycle hook. You can create a hook that acts when instances launch or when instances terminate.
    4. If necessary, record the lifecycle action heartbeat to keep the instance in a pending state.
    5. Complete the lifecycle action.

    For more information, see Auto Scaling Pending State and Auto Scaling Terminating State in the Auto Scaling Developer Guide.

    ", + "ResumeProcesses": "

    Resumes the specified suspended Auto Scaling processes for the specified Auto Scaling group. To resume specific processes, use the ScalingProcesses parameter. To resume all processes, omit the ScalingProcesses parameter. For more information, see Suspend and Resume Auto Scaling Processes in the Auto Scaling Developer Guide.

    ", + "SetDesiredCapacity": "

    Sets the size of the specified Auto Scaling group.

    For more information about desired capacity, see What Is Auto Scaling? in the Auto Scaling Developer Guide.

    ", + "SetInstanceHealth": "

    Sets the health status of the specified instance.

    For more information, see Health Checks in the Auto Scaling Developer Guide.

    ", + "SetInstanceProtection": "

    Updates the instance protection settings of the specified instances.

    For more information, see Instance Protection in the Auto Scaling Developer Guide.

    ", + "SuspendProcesses": "

    Suspends the specified Auto Scaling processes for the specified Auto Scaling group. To suspend specific processes, use the ScalingProcesses parameter. To suspend all processes, omit the ScalingProcesses parameter.

    Note that if you suspend either the Launch or Terminate process types, it can prevent other process types from functioning properly.

    To resume processes that have been suspended, use ResumeProcesses.

    For more information, see Suspend and Resume Auto Scaling Processes in the Auto Scaling Developer Guide.

    ", + "TerminateInstanceInAutoScalingGroup": "

    Terminates the specified instance and optionally adjusts the desired group size.

    This call simply makes a termination request. The instance is not terminated immediately.

    ", + "UpdateAutoScalingGroup": "

    Updates the configuration for the specified Auto Scaling group.

    To update an Auto Scaling group with a launch configuration with InstanceMonitoring set to False, you must first disable the collection of group metrics. Otherwise, you will get an error. If you have previously enabled the collection of group metrics, you can disable it using DisableMetricsCollection.

    The new settings are registered upon the completion of this call. Any launch configuration settings take effect on any triggers after this call returns. Scaling activities that are currently in progress aren't affected.

    Note the following:

    • If you specify a new value for MinSize without specifying a value for DesiredCapacity, and the new MinSize is larger than the current size of the group, we implicitly call SetDesiredCapacity to set the size of the group to the new value of MinSize.

    • If you specify a new value for MaxSize without specifying a value for DesiredCapacity, and the new MaxSize is smaller than the current size of the group, we implicitly call SetDesiredCapacity to set the size of the group to the new value of MaxSize.

    • All other optional parameters are left unchanged if not specified.

    " + }, + "shapes": { + "Activities": { + "base": null, + "refs": { + "ActivitiesType$Activities": "

    The scaling activities.

    ", + "DetachInstancesAnswer$Activities": "

    The activities related to detaching the instances from the Auto Scaling group.

    ", + "EnterStandbyAnswer$Activities": "

    The activities related to moving instances into Standby mode.

    ", + "ExitStandbyAnswer$Activities": "

    The activities related to moving instances out of Standby mode.

    " + } + }, + "ActivitiesType": { + "base": null, + "refs": { + } + }, + "Activity": { + "base": "

    Describes scaling activity, which is a long-running process that represents a change to your Auto Scaling group, such as changing its size or replacing an instance.

    ", + "refs": { + "Activities$member": null, + "ActivityType$Activity": "

    A scaling activity.

    " + } + }, + "ActivityIds": { + "base": null, + "refs": { + "DescribeScalingActivitiesType$ActivityIds": "

    The activity IDs of the desired scaling activities. If this list is omitted, all activities are described. If the AutoScalingGroupName parameter is provided, the results are limited to that group. The list of requested activities cannot contain more than 50 items. If unknown activities are requested, they are ignored with no error.

    " + } + }, + "ActivityType": { + "base": null, + "refs": { + } + }, + "AdjustmentType": { + "base": "

    Describes a policy adjustment type.

    For more information, see Dynamic Scaling in the Auto Scaling Developer Guide.

    ", + "refs": { + "AdjustmentTypes$member": null + } + }, + "AdjustmentTypes": { + "base": null, + "refs": { + "DescribeAdjustmentTypesAnswer$AdjustmentTypes": "

    The policy adjustment types.

    " + } + }, + "Alarm": { + "base": "

    Describes an alarm.

    ", + "refs": { + "Alarms$member": null + } + }, + "Alarms": { + "base": null, + "refs": { + "ScalingPolicy$Alarms": "

    The CloudWatch alarms related to the policy.

    " + } + }, + "AlreadyExistsFault": { + "base": "

    You already have an Auto Scaling group or launch configuration with this name.

    ", + "refs": { + } + }, + "AsciiStringMaxLen255": { + "base": null, + "refs": { + "CompleteLifecycleActionType$LifecycleHookName": "

    The name of the lifecycle hook.

    ", + "DeleteLifecycleHookType$LifecycleHookName": "

    The name of the lifecycle hook.

    ", + "LifecycleHook$LifecycleHookName": "

    The name of the lifecycle hook.

    ", + "LifecycleHookNames$member": null, + "PutLifecycleHookType$LifecycleHookName": "

    The name of the lifecycle hook.

    ", + "RecordLifecycleActionHeartbeatType$LifecycleHookName": "

    The name of the lifecycle hook.

    " + } + }, + "AssociatePublicIpAddress": { + "base": null, + "refs": { + "CreateLaunchConfigurationType$AssociatePublicIpAddress": "

    Used for groups that launch instances into a virtual private cloud (VPC). Specifies whether to assign a public IP address to each instance. For more information, see Auto Scaling and Amazon Virtual Private Cloud in the Auto Scaling Developer Guide.

    If you specify a value for this parameter, be sure to specify at least one subnet using the VPCZoneIdentifier parameter when you create your group.

    Default: If the instance is launched into a default subnet, the default is true. If the instance is launched into a nondefault subnet, the default is false. For more information, see Supported Platforms in the Amazon Elastic Compute Cloud User Guide.

    ", + "LaunchConfiguration$AssociatePublicIpAddress": "

    [EC2-VPC] Indicates whether to assign a public IP address to each instance.

    " + } + }, + "AttachInstancesQuery": { + "base": null, + "refs": { + } + }, + "AttachLoadBalancersResultType": { + "base": null, + "refs": { + } + }, + "AttachLoadBalancersType": { + "base": null, + "refs": { + } + }, + "AutoScalingGroup": { + "base": "

    Describes an Auto Scaling group.

    ", + "refs": { + "AutoScalingGroups$member": null + } + }, + "AutoScalingGroupDesiredCapacity": { + "base": null, + "refs": { + "AutoScalingGroup$DesiredCapacity": "

    The desired size of the group.

    ", + "CreateAutoScalingGroupType$DesiredCapacity": "

    The number of EC2 instances that should be running in the group. This number must be greater than or equal to the minimum size of the group and less than or equal to the maximum size of the group.

    ", + "PutScheduledUpdateGroupActionType$DesiredCapacity": "

    The number of EC2 instances that should be running in the group.

    ", + "ScheduledUpdateGroupAction$DesiredCapacity": "

    The number of instances you prefer to maintain in the group.

    ", + "SetDesiredCapacityType$DesiredCapacity": "

    The number of EC2 instances that should be running in the Auto Scaling group.

    ", + "UpdateAutoScalingGroupType$DesiredCapacity": "

    The number of EC2 instances that should be running in the Auto Scaling group. This number must be greater than or equal to the minimum size of the group and less than or equal to the maximum size of the group.

    " + } + }, + "AutoScalingGroupMaxSize": { + "base": null, + "refs": { + "AutoScalingGroup$MaxSize": "

    The maximum size of the group.

    ", + "CreateAutoScalingGroupType$MaxSize": "

    The maximum size of the group.

    ", + "PutScheduledUpdateGroupActionType$MaxSize": "

    The maximum size for the Auto Scaling group.

    ", + "ScheduledUpdateGroupAction$MaxSize": "

    The maximum size of the group.

    ", + "UpdateAutoScalingGroupType$MaxSize": "

    The maximum size of the Auto Scaling group.

    " + } + }, + "AutoScalingGroupMinSize": { + "base": null, + "refs": { + "AutoScalingGroup$MinSize": "

    The minimum size of the group.

    ", + "CreateAutoScalingGroupType$MinSize": "

    The minimum size of the group.

    ", + "PutScheduledUpdateGroupActionType$MinSize": "

    The minimum size for the Auto Scaling group.

    ", + "ScheduledUpdateGroupAction$MinSize": "

    The minimum size of the group.

    ", + "UpdateAutoScalingGroupType$MinSize": "

    The minimum size of the Auto Scaling group.

    " + } + }, + "AutoScalingGroupNames": { + "base": null, + "refs": { + "AutoScalingGroupNamesType$AutoScalingGroupNames": "

    The group names.

    ", + "DescribeNotificationConfigurationsType$AutoScalingGroupNames": "

    The name of the group.

    " + } + }, + "AutoScalingGroupNamesType": { + "base": null, + "refs": { + } + }, + "AutoScalingGroups": { + "base": null, + "refs": { + "AutoScalingGroupsType$AutoScalingGroups": "

    The groups.

    " + } + }, + "AutoScalingGroupsType": { + "base": null, + "refs": { + } + }, + "AutoScalingInstanceDetails": { + "base": "

    Describes an EC2 instance associated with an Auto Scaling group.

    ", + "refs": { + "AutoScalingInstances$member": null + } + }, + "AutoScalingInstances": { + "base": null, + "refs": { + "AutoScalingInstancesType$AutoScalingInstances": "

    The instances.

    " + } + }, + "AutoScalingInstancesType": { + "base": null, + "refs": { + } + }, + "AutoScalingNotificationTypes": { + "base": null, + "refs": { + "DescribeAutoScalingNotificationTypesAnswer$AutoScalingNotificationTypes": "

    One or more of the following notification types:

    • autoscaling:EC2_INSTANCE_LAUNCH

    • autoscaling:EC2_INSTANCE_LAUNCH_ERROR

    • autoscaling:EC2_INSTANCE_TERMINATE

    • autoscaling:EC2_INSTANCE_TERMINATE_ERROR

    • autoscaling:TEST_NOTIFICATION

    ", + "DescribeLifecycleHookTypesAnswer$LifecycleHookTypes": "

    One or more of the following notification types:

    • autoscaling:EC2_INSTANCE_LAUNCHING

    • autoscaling:EC2_INSTANCE_TERMINATING

    ", + "PutNotificationConfigurationType$NotificationTypes": "

    The type of event that will cause the notification to be sent. For details about notification types supported by Auto Scaling, see DescribeAutoScalingNotificationTypes.

    " + } + }, + "AvailabilityZones": { + "base": null, + "refs": { + "AutoScalingGroup$AvailabilityZones": "

    One or more Availability Zones for the group.

    ", + "CreateAutoScalingGroupType$AvailabilityZones": "

    One or more Availability Zones for the group. This parameter is optional if you specify subnets using the VPCZoneIdentifier parameter.

    ", + "UpdateAutoScalingGroupType$AvailabilityZones": "

    One or more Availability Zones for the group.

    " + } + }, + "BlockDeviceEbsDeleteOnTermination": { + "base": null, + "refs": { + "Ebs$DeleteOnTermination": "

    Indicates whether to delete the volume on instance termination.

    Default: true

    " + } + }, + "BlockDeviceEbsEncrypted": { + "base": null, + "refs": { + "Ebs$Encrypted": "

    Indicates whether the volume should be encrypted. Encrypted EBS volumes must be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are automatically encrypted. There is no way to create an encrypted volume from an unencrypted snapshot or an unencrypted volume from an encrypted snapshot. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    " + } + }, + "BlockDeviceEbsIops": { + "base": null, + "refs": { + "Ebs$Iops": "

    For Provisioned IOPS (SSD) volumes only. The number of I/O operations per second (IOPS) to provision for the volume.

    Default: None

    " + } + }, + "BlockDeviceEbsVolumeSize": { + "base": null, + "refs": { + "Ebs$VolumeSize": "

    The volume size, in gigabytes.

    Valid values: If the volume type is io1, the minimum size of the volume is 10 GiB. If you specify SnapshotId and VolumeSize, VolumeSize must be equal to or larger than the size of the snapshot.

    Default: If you create a volume from a snapshot and you don't specify a volume size, the default is the size of the snapshot.

    Required: Required when the volume type is io1.

    " + } + }, + "BlockDeviceEbsVolumeType": { + "base": null, + "refs": { + "Ebs$VolumeType": "

    The volume type.

    Valid values: standard | io1 | gp2

    Default: standard

    " + } + }, + "BlockDeviceMapping": { + "base": "

    Describes a block device mapping.

    ", + "refs": { + "BlockDeviceMappings$member": null + } + }, + "BlockDeviceMappings": { + "base": null, + "refs": { + "CreateLaunchConfigurationType$BlockDeviceMappings": "

    One or more mappings that specify how block devices are exposed to the instance. For more information, see Block Device Mapping in the Amazon Elastic Compute Cloud User Guide.

    ", + "LaunchConfiguration$BlockDeviceMappings": "

    A block device mapping, which specifies the block devices for the instance.

    " + } + }, + "ClassicLinkVPCSecurityGroups": { + "base": null, + "refs": { + "CreateLaunchConfigurationType$ClassicLinkVPCSecurityGroups": "

    The IDs of one or more security groups for the VPC specified in ClassicLinkVPCId. This parameter is required if ClassicLinkVPCId is specified, and is not supported otherwise. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

    ", + "LaunchConfiguration$ClassicLinkVPCSecurityGroups": "

    The IDs of one or more security groups for the VPC specified in ClassicLinkVPCId. This parameter is required if ClassicLinkVPCId is specified, and cannot be used otherwise. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

    " + } + }, + "CompleteLifecycleActionAnswer": { + "base": null, + "refs": { + } + }, + "CompleteLifecycleActionType": { + "base": null, + "refs": { + } + }, + "Cooldown": { + "base": null, + "refs": { + "AutoScalingGroup$DefaultCooldown": "

    The amount of time, in seconds, after a scaling activity completes before another scaling activity can start.

    ", + "CreateAutoScalingGroupType$DefaultCooldown": "

    The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. The default is 300.

    For more information, see Understanding Auto Scaling Cooldowns in the Auto Scaling Developer Guide.

    ", + "PutScalingPolicyType$Cooldown": "

    The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start. If this parameter is not specified, the default cooldown period for the group applies.

    This parameter is not supported unless the policy type is SimpleScaling.

    For more information, see Understanding Auto Scaling Cooldowns in the Auto Scaling Developer Guide.

    ", + "ScalingPolicy$Cooldown": "

    The amount of time, in seconds, after a scaling activity completes before any further trigger-related scaling activities can start.

    ", + "UpdateAutoScalingGroupType$DefaultCooldown": "

    The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. The default is 300.

    For more information, see Understanding Auto Scaling Cooldowns in the Auto Scaling Developer Guide.

    " + } + }, + "CreateAutoScalingGroupType": { + "base": null, + "refs": { + } + }, + "CreateLaunchConfigurationType": { + "base": null, + "refs": { + } + }, + "CreateOrUpdateTagsType": { + "base": null, + "refs": { + } + }, + "DeleteAutoScalingGroupType": { + "base": null, + "refs": { + } + }, + "DeleteLifecycleHookAnswer": { + "base": null, + "refs": { + } + }, + "DeleteLifecycleHookType": { + "base": null, + "refs": { + } + }, + "DeleteNotificationConfigurationType": { + "base": null, + "refs": { + } + }, + "DeletePolicyType": { + "base": "

    ", + "refs": { + } + }, + "DeleteScheduledActionType": { + "base": null, + "refs": { + } + }, + "DeleteTagsType": { + "base": null, + "refs": { + } + }, + "DescribeAccountLimitsAnswer": { + "base": null, + "refs": { + } + }, + "DescribeAdjustmentTypesAnswer": { + "base": null, + "refs": { + } + }, + "DescribeAutoScalingInstancesType": { + "base": null, + "refs": { + } + }, + "DescribeAutoScalingNotificationTypesAnswer": { + "base": null, + "refs": { + } + }, + "DescribeLifecycleHookTypesAnswer": { + "base": null, + "refs": { + } + }, + "DescribeLifecycleHooksAnswer": { + "base": null, + "refs": { + } + }, + "DescribeLifecycleHooksType": { + "base": null, + "refs": { + } + }, + "DescribeLoadBalancersRequest": { + "base": null, + "refs": { + } + }, + "DescribeLoadBalancersResponse": { + "base": null, + "refs": { + } + }, + "DescribeMetricCollectionTypesAnswer": { + "base": null, + "refs": { + } + }, + "DescribeNotificationConfigurationsAnswer": { + "base": null, + "refs": { + } + }, + "DescribeNotificationConfigurationsType": { + "base": null, + "refs": { + } + }, + "DescribePoliciesType": { + "base": null, + "refs": { + } + }, + "DescribeScalingActivitiesType": { + "base": null, + "refs": { + } + }, + "DescribeScheduledActionsType": { + "base": null, + "refs": { + } + }, + "DescribeTagsType": { + "base": null, + "refs": { + } + }, + "DescribeTerminationPolicyTypesAnswer": { + "base": null, + "refs": { + } + }, + "DetachInstancesAnswer": { + "base": null, + "refs": { + } + }, + "DetachInstancesQuery": { + "base": null, + "refs": { + } + }, + "DetachLoadBalancersResultType": { + "base": null, + "refs": { + } + }, + "DetachLoadBalancersType": { + "base": null, + "refs": { + } + }, + "DisableMetricsCollectionQuery": { + "base": null, + "refs": { + } + }, + "Ebs": { + "base": "

    Describes an Amazon EBS volume.

    ", + "refs": { + "BlockDeviceMapping$Ebs": "

    The information about the Amazon EBS volume.

    " + } + }, + "EbsOptimized": { + "base": null, + "refs": { + "CreateLaunchConfigurationType$EbsOptimized": "

    Indicates whether the instance is optimized for Amazon EBS I/O. By default, the instance is not optimized for EBS I/O. The optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization is not available with all instance types. Additional usage charges apply. For more information, see Amazon EBS-Optimized Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "LaunchConfiguration$EbsOptimized": "

    Controls whether the instance is optimized for EBS I/O (true) or not (false).

    " + } + }, + "EnableMetricsCollectionQuery": { + "base": null, + "refs": { + } + }, + "EnabledMetric": { + "base": "

    Describes an enabled metric.

    ", + "refs": { + "EnabledMetrics$member": null + } + }, + "EnabledMetrics": { + "base": null, + "refs": { + "AutoScalingGroup$EnabledMetrics": "

    The metrics enabled for the group.

    " + } + }, + "EnterStandbyAnswer": { + "base": null, + "refs": { + } + }, + "EnterStandbyQuery": { + "base": null, + "refs": { + } + }, + "EstimatedInstanceWarmup": { + "base": null, + "refs": { + "PutScalingPolicyType$EstimatedInstanceWarmup": "

    The estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics. The default is to use the value specified for the default cooldown period for the group.

    This parameter is not supported if the policy type is SimpleScaling.

    ", + "ScalingPolicy$EstimatedInstanceWarmup": "

    The estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics.

    " + } + }, + "ExecutePolicyType": { + "base": null, + "refs": { + } + }, + "ExitStandbyAnswer": { + "base": null, + "refs": { + } + }, + "ExitStandbyQuery": { + "base": null, + "refs": { + } + }, + "Filter": { + "base": "

    Describes a filter.

    ", + "refs": { + "Filters$member": null + } + }, + "Filters": { + "base": null, + "refs": { + "DescribeTagsType$Filters": "

    A filter used to scope the tags to return.

    " + } + }, + "ForceDelete": { + "base": null, + "refs": { + "DeleteAutoScalingGroupType$ForceDelete": "

    Specifies that the group will be deleted along with all instances associated with the group, without waiting for all instances to be terminated. This parameter also deletes any lifecycle actions associated with the group.

    " + } + }, + "GlobalTimeout": { + "base": null, + "refs": { + "LifecycleHook$GlobalTimeout": "

    The maximum time, in seconds, that an instance can remain in a Pending:Wait or Terminating:Wait state. The default is 172800 seconds (48 hours).

    " + } + }, + "HealthCheckGracePeriod": { + "base": null, + "refs": { + "AutoScalingGroup$HealthCheckGracePeriod": "

    The amount of time, in seconds, that Auto Scaling waits before checking the health status of an EC2 instance that has come into service.

    ", + "CreateAutoScalingGroupType$HealthCheckGracePeriod": "

    The amount of time, in seconds, that Auto Scaling waits before checking the health status of an EC2 instance that has come into service. During this time, any health check failures for the instance are ignored. The default is 300.

    This parameter is required if you are adding an ELB health check.

    For more information, see Health Checks for Auto Scaling Instances in the Auto Scaling Developer Guide.

    ", + "UpdateAutoScalingGroupType$HealthCheckGracePeriod": "

    The amount of time, in seconds, that Auto Scaling waits before checking the health status of an EC2 instance that has come into service. The default is 300.

    For more information, see Health Checks For Auto Scaling Instances in the Auto Scaling Developer Guide.

    " + } + }, + "HeartbeatTimeout": { + "base": null, + "refs": { + "LifecycleHook$HeartbeatTimeout": "

    The maximum time, in seconds, that can elapse before the lifecycle hook times out. The default is 3600 seconds (1 hour). When the lifecycle hook times out, Auto Scaling performs the action defined in the DefaultResult parameter. You can prevent the lifecycle hook from timing out by calling RecordLifecycleActionHeartbeat.

    ", + "PutLifecycleHookType$HeartbeatTimeout": "

    The amount of time, in seconds, that can elapse before the lifecycle hook times out. When the lifecycle hook times out, Auto Scaling performs the action defined in the DefaultResult parameter. You can prevent the lifecycle hook from timing out by calling RecordLifecycleActionHeartbeat. The default is 3600 seconds (1 hour).

    " + } + }, + "HonorCooldown": { + "base": null, + "refs": { + "ExecutePolicyType$HonorCooldown": "

    If this parameter is true, Auto Scaling waits for the cooldown period to complete before executing the policy. Otherwise, Auto Scaling executes the policy without waiting for the cooldown period to complete.

    This parameter is not supported if the policy type is StepScaling.

    For more information, see Understanding Auto Scaling Cooldowns in the Auto Scaling Developer Guide.

    ", + "SetDesiredCapacityType$HonorCooldown": "

    By default, SetDesiredCapacity overrides any cooldown period associated with the Auto Scaling group. Specify True to make Auto Scaling to wait for the cool-down period associated with the Auto Scaling group to complete before initiating a scaling activity to set your Auto Scaling group to its new capacity.

    " + } + }, + "Instance": { + "base": "

    Describes an EC2 instance.

    ", + "refs": { + "Instances$member": null + } + }, + "InstanceIds": { + "base": null, + "refs": { + "AttachInstancesQuery$InstanceIds": "

    One or more EC2 instance IDs.

    ", + "DescribeAutoScalingInstancesType$InstanceIds": "

    One or more Auto Scaling instances to describe, up to 50 instances. If you omit this parameter, all Auto Scaling instances are described. If you specify an ID that does not exist, it is ignored with no error.

    ", + "DetachInstancesQuery$InstanceIds": "

    One or more instance IDs.

    ", + "EnterStandbyQuery$InstanceIds": "

    One or more instances to move into Standby mode. You must specify at least one instance ID.

    ", + "ExitStandbyQuery$InstanceIds": "

    One or more instance IDs. You must specify at least one instance ID.

    ", + "SetInstanceProtectionQuery$InstanceIds": "

    One or more instance IDs.

    " + } + }, + "InstanceMonitoring": { + "base": "

    Describes whether instance monitoring is enabled.

    ", + "refs": { + "CreateLaunchConfigurationType$InstanceMonitoring": "

    Enables detailed monitoring if it is disabled. Detailed monitoring is enabled by default.

    When detailed monitoring is enabled, Amazon CloudWatch generates metrics every minute and your account is charged a fee. When you disable detailed monitoring, by specifying False, CloudWatch generates metrics every 5 minutes. For more information, see Monitor Your Auto Scaling Instances in the Auto Scaling Developer Guide.

    ", + "LaunchConfiguration$InstanceMonitoring": "

    Controls whether instances in this group are launched with detailed monitoring.

    " + } + }, + "InstanceProtected": { + "base": null, + "refs": { + "AutoScalingGroup$NewInstancesProtectedFromScaleIn": "

    Indicates whether newly launched instances are protected from termination by Auto Scaling when scaling in.

    ", + "AutoScalingInstanceDetails$ProtectedFromScaleIn": "

    Indicates whether the instance is protected from termination by Auto Scaling when scaling in.

    ", + "CreateAutoScalingGroupType$NewInstancesProtectedFromScaleIn": "

    Indicates whether newly launched instances are protected from termination by Auto Scaling when scaling in.

    ", + "Instance$ProtectedFromScaleIn": "

    Indicates whether the instance is protected from termination by Auto Scaling when scaling in.

    ", + "UpdateAutoScalingGroupType$NewInstancesProtectedFromScaleIn": "

    Indicates whether newly launched instances are protected from termination by Auto Scaling when scaling in.

    " + } + }, + "Instances": { + "base": null, + "refs": { + "AutoScalingGroup$Instances": "

    The EC2 instances associated with the group.

    " + } + }, + "InvalidNextToken": { + "base": "

    The NextToken value is not valid.

    ", + "refs": { + } + }, + "LaunchConfiguration": { + "base": "

    Describes a launch configuration.

    ", + "refs": { + "LaunchConfigurations$member": null + } + }, + "LaunchConfigurationNameType": { + "base": null, + "refs": { + } + }, + "LaunchConfigurationNames": { + "base": null, + "refs": { + "LaunchConfigurationNamesType$LaunchConfigurationNames": "

    The launch configuration names.

    " + } + }, + "LaunchConfigurationNamesType": { + "base": null, + "refs": { + } + }, + "LaunchConfigurations": { + "base": null, + "refs": { + "LaunchConfigurationsType$LaunchConfigurations": "

    The launch configurations.

    " + } + }, + "LaunchConfigurationsType": { + "base": null, + "refs": { + } + }, + "LifecycleActionResult": { + "base": null, + "refs": { + "CompleteLifecycleActionType$LifecycleActionResult": "

    The action for the group to take. This parameter can be either CONTINUE or ABANDON.

    ", + "LifecycleHook$DefaultResult": "

    Defines the action the Auto Scaling group should take when the lifecycle hook timeout elapses or if an unexpected failure occurs. The valid values are CONTINUE and ABANDON. The default value is CONTINUE.

    ", + "PutLifecycleHookType$DefaultResult": "

    Defines the action the Auto Scaling group should take when the lifecycle hook timeout elapses or if an unexpected failure occurs. The value for this parameter can be either CONTINUE or ABANDON. The default value for this parameter is ABANDON.

    " + } + }, + "LifecycleActionToken": { + "base": null, + "refs": { + "CompleteLifecycleActionType$LifecycleActionToken": "

    A universally unique identifier (UUID) that identifies a specific lifecycle action associated with an instance. Auto Scaling sends this token to the notification target you specified when you created the lifecycle hook.

    ", + "RecordLifecycleActionHeartbeatType$LifecycleActionToken": "

    A token that uniquely identifies a specific lifecycle action associated with an instance. Auto Scaling sends this token to the notification target you specified when you created the lifecycle hook.

    " + } + }, + "LifecycleHook": { + "base": "

    Describes a lifecycle hook, which tells Auto Scaling that you want to perform an action when an instance launches or terminates. When you have a lifecycle hook in place, the Auto Scaling group will either:

    • Pause the instance after it launches, but before it is put into service
    • Pause the instance as it terminates, but before it is fully terminated

    For more information, see Auto Scaling Pending State and Auto Scaling Terminating State in the Auto Scaling Developer Guide.

    ", + "refs": { + "LifecycleHooks$member": null + } + }, + "LifecycleHookNames": { + "base": null, + "refs": { + "DescribeLifecycleHooksType$LifecycleHookNames": "

    The names of one or more lifecycle hooks.

    " + } + }, + "LifecycleHooks": { + "base": null, + "refs": { + "DescribeLifecycleHooksAnswer$LifecycleHooks": "

    The lifecycle hooks for the specified group.

    " + } + }, + "LifecycleState": { + "base": null, + "refs": { + "Instance$LifecycleState": "

    A description of the current lifecycle state. Note that the Quarantined state is not used.

    " + } + }, + "LifecycleTransition": { + "base": null, + "refs": { + "LifecycleHook$LifecycleTransition": "

    The state of the EC2 instance to which you want to attach the lifecycle hook. For a list of lifecycle hook types, see DescribeLifecycleHookTypes.

    ", + "PutLifecycleHookType$LifecycleTransition": "

    The instance state to which you want to attach the lifecycle hook. For a list of lifecycle hook types, see DescribeLifecycleHookTypes.

    This parameter is required for new lifecycle hooks, but optional when updating existing hooks.

    " + } + }, + "LimitExceededFault": { + "base": "

    You have already reached a limit for your Auto Scaling resources (for example, groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits.

    ", + "refs": { + } + }, + "LoadBalancerNames": { + "base": null, + "refs": { + "AttachLoadBalancersType$LoadBalancerNames": "

    One or more load balancer names.

    ", + "AutoScalingGroup$LoadBalancerNames": "

    One or more load balancers associated with the group.

    ", + "CreateAutoScalingGroupType$LoadBalancerNames": "

    One or more load balancers.

    For more information, see Load Balance Your Auto Scaling Group in the Auto Scaling Developer Guide.

    ", + "DetachLoadBalancersType$LoadBalancerNames": "

    One or more load balancer names.

    " + } + }, + "LoadBalancerState": { + "base": "

    Describes the state of a load balancer.

    ", + "refs": { + "LoadBalancerStates$member": null + } + }, + "LoadBalancerStates": { + "base": null, + "refs": { + "DescribeLoadBalancersResponse$LoadBalancers": "

    The load balancers.

    " + } + }, + "MaxNumberOfAutoScalingGroups": { + "base": null, + "refs": { + "DescribeAccountLimitsAnswer$MaxNumberOfAutoScalingGroups": "

    The maximum number of groups allowed for your AWS account. The default limit is 20 per region.

    " + } + }, + "MaxNumberOfLaunchConfigurations": { + "base": null, + "refs": { + "DescribeAccountLimitsAnswer$MaxNumberOfLaunchConfigurations": "

    The maximum number of launch configurations allowed for your AWS account. The default limit is 100 per region.

    " + } + }, + "MaxRecords": { + "base": null, + "refs": { + "AutoScalingGroupNamesType$MaxRecords": "

    The maximum number of items to return with this call.

    ", + "DescribeAutoScalingInstancesType$MaxRecords": "

    The maximum number of items to return with this call.

    ", + "DescribeLoadBalancersRequest$MaxRecords": "

    The maximum number of items to return with this call.

    ", + "DescribeNotificationConfigurationsType$MaxRecords": "

    The maximum number of items to return with this call.

    ", + "DescribePoliciesType$MaxRecords": "

    The maximum number of items to be returned with each call.

    ", + "DescribeScalingActivitiesType$MaxRecords": "

    The maximum number of items to return with this call.

    ", + "DescribeScheduledActionsType$MaxRecords": "

    The maximum number of items to return with this call.

    ", + "DescribeTagsType$MaxRecords": "

    The maximum number of items to return with this call.

    ", + "LaunchConfigurationNamesType$MaxRecords": "

    The maximum number of items to return with this call. The default is 100.

    " + } + }, + "MetricCollectionType": { + "base": "

    Describes a metric.

    ", + "refs": { + "MetricCollectionTypes$member": null + } + }, + "MetricCollectionTypes": { + "base": null, + "refs": { + "DescribeMetricCollectionTypesAnswer$Metrics": "

    One or more metrics.

    " + } + }, + "MetricGranularityType": { + "base": "

    Describes a granularity of a metric.

    ", + "refs": { + "MetricGranularityTypes$member": null + } + }, + "MetricGranularityTypes": { + "base": null, + "refs": { + "DescribeMetricCollectionTypesAnswer$Granularities": "

    The granularities for the metrics.

    " + } + }, + "MetricScale": { + "base": null, + "refs": { + "ExecutePolicyType$MetricValue": "

    The metric value to compare to BreachThreshold. This enables you to execute a policy of type StepScaling and determine which step adjustment to use. For example, if the breach threshold is 50 and you want to use a step adjustment with a lower bound of 0 and an upper bound of 10, you can set the metric value to 59.

    If you specify a metric value that doesn't correspond to a step adjustment for the policy, the call returns an error.

    This parameter is required if the policy type is StepScaling and not supported otherwise.

    ", + "ExecutePolicyType$BreachThreshold": "

    The breach threshold for the alarm.

    This parameter is required if the policy type is StepScaling and not supported otherwise.

    ", + "StepAdjustment$MetricIntervalLowerBound": "

    The lower bound for the difference between the alarm threshold and the CloudWatch metric. If the metric value is above the breach threshold, the lower bound is inclusive (the metric must be greater than or equal to the threshold plus the lower bound). Otherwise, it is exclusive (the metric must be greater than the threshold plus the lower bound). A null value indicates negative infinity.

    ", + "StepAdjustment$MetricIntervalUpperBound": "

    The upper bound for the difference between the alarm threshold and the CloudWatch metric. If the metric value is above the breach threshold, the upper bound is exclusive (the metric must be less than the threshold plus the upper bound). Otherwise, it is inclusive (the metric must be less than or equal to the threshold plus the upper bound). A null value indicates positive infinity.

    The upper bound must be greater than the lower bound.

    " + } + }, + "Metrics": { + "base": null, + "refs": { + "DisableMetricsCollectionQuery$Metrics": "

    One or more of the following metrics. If you omit this parameter, all metrics are disabled.

    • GroupMinSize

    • GroupMaxSize

    • GroupDesiredCapacity

    • GroupInServiceInstances

    • GroupPendingInstances

    • GroupStandbyInstances

    • GroupTerminatingInstances

    • GroupTotalInstances

    ", + "EnableMetricsCollectionQuery$Metrics": "

    One or more of the following metrics. If you omit this parameter, all metrics are enabled.

    • GroupMinSize

    • GroupMaxSize

    • GroupDesiredCapacity

    • GroupInServiceInstances

    • GroupPendingInstances

    • GroupStandbyInstances

    • GroupTerminatingInstances

    • GroupTotalInstances

    Note that the GroupStandbyInstances metric is not enabled by default. You must explicitly request this metric.

    " + } + }, + "MinAdjustmentMagnitude": { + "base": null, + "refs": { + "PutScalingPolicyType$MinAdjustmentMagnitude": "

    The minimum number of instances to scale. If the value of AdjustmentType is PercentChangeInCapacity, the scaling policy changes the DesiredCapacity of the Auto Scaling group by at least this many instances. Otherwise, the error is ValidationError.

    ", + "ScalingPolicy$MinAdjustmentMagnitude": "

    The minimum number of instances to scale. If the value of AdjustmentType is PercentChangeInCapacity, the scaling policy changes the DesiredCapacity of the Auto Scaling group by at least this many instances. Otherwise, the error is ValidationError.

    " + } + }, + "MinAdjustmentStep": { + "base": null, + "refs": { + "PutScalingPolicyType$MinAdjustmentStep": "

    Available for backward compatibility. Use MinAdjustmentMagnitude instead.

    ", + "ScalingPolicy$MinAdjustmentStep": "

    Available for backward compatibility. Use MinAdjustmentMagnitude instead.

    " + } + }, + "MonitoringEnabled": { + "base": null, + "refs": { + "InstanceMonitoring$Enabled": "

    If True, instance monitoring is enabled.

    " + } + }, + "NoDevice": { + "base": null, + "refs": { + "BlockDeviceMapping$NoDevice": "

    Suppresses a device mapping.

    If this parameter is true for the root device, the instance might fail the EC2 health check. Auto Scaling launches a replacement instance if the instance fails the health check.

    " + } + }, + "NotificationConfiguration": { + "base": "

    Describes a notification.

    ", + "refs": { + "NotificationConfigurations$member": null + } + }, + "NotificationConfigurations": { + "base": null, + "refs": { + "DescribeNotificationConfigurationsAnswer$NotificationConfigurations": "

    The notification configurations.

    " + } + }, + "NumberOfAutoScalingGroups": { + "base": null, + "refs": { + "DescribeAccountLimitsAnswer$NumberOfAutoScalingGroups": "

    The current number of groups for your AWS account.

    " + } + }, + "NumberOfLaunchConfigurations": { + "base": null, + "refs": { + "DescribeAccountLimitsAnswer$NumberOfLaunchConfigurations": "

    The current number of launch configurations for your AWS account.

    " + } + }, + "PoliciesType": { + "base": null, + "refs": { + } + }, + "PolicyARNType": { + "base": null, + "refs": { + } + }, + "PolicyIncrement": { + "base": null, + "refs": { + "PutScalingPolicyType$ScalingAdjustment": "

    The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity.

    This parameter is required if the policy type is SimpleScaling and not supported otherwise.

    ", + "ScalingPolicy$ScalingAdjustment": "

    The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity.

    ", + "StepAdjustment$ScalingAdjustment": "

    The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity.

    " + } + }, + "PolicyNames": { + "base": null, + "refs": { + "DescribePoliciesType$PolicyNames": "

    One or more policy names or policy ARNs to be described. If you omit this list, all policy names are described. If an group name is provided, the results are limited to that group. This list is limited to 50 items. If you specify an unknown policy name, it is ignored with no error.

    " + } + }, + "PolicyTypes": { + "base": null, + "refs": { + "DescribePoliciesType$PolicyTypes": "

    One or more policy types. Valid values are SimpleScaling and StepScaling.

    " + } + }, + "ProcessNames": { + "base": null, + "refs": { + "ScalingProcessQuery$ScalingProcesses": "

    One or more of the following processes:

    • Launch

    • Terminate

    • HealthCheck

    • ReplaceUnhealthy

    • AZRebalance

    • AlarmNotification

    • ScheduledActions

    • AddToLoadBalancer

    " + } + }, + "ProcessType": { + "base": "

    Describes a process type.

    For more information, see Auto Scaling Processes in the Auto Scaling Developer Guide.

    ", + "refs": { + "Processes$member": null + } + }, + "Processes": { + "base": null, + "refs": { + "ProcessesType$Processes": "

    The names of the process types.

    " + } + }, + "ProcessesType": { + "base": null, + "refs": { + } + }, + "Progress": { + "base": null, + "refs": { + "Activity$Progress": "

    A value between 0 and 100 that indicates the progress of the activity.

    " + } + }, + "PropagateAtLaunch": { + "base": null, + "refs": { + "Tag$PropagateAtLaunch": "

    Determines whether the tag is added to new instances as they are launched in the group.

    ", + "TagDescription$PropagateAtLaunch": "

    Determines whether the tag is added to new instances as they are launched in the group.

    " + } + }, + "ProtectedFromScaleIn": { + "base": null, + "refs": { + "SetInstanceProtectionQuery$ProtectedFromScaleIn": "

    Indicates whether the instance is protected from termination by Auto Scaling when scaling in.

    " + } + }, + "PutLifecycleHookAnswer": { + "base": null, + "refs": { + } + }, + "PutLifecycleHookType": { + "base": null, + "refs": { + } + }, + "PutNotificationConfigurationType": { + "base": null, + "refs": { + } + }, + "PutScalingPolicyType": { + "base": null, + "refs": { + } + }, + "PutScheduledUpdateGroupActionType": { + "base": null, + "refs": { + } + }, + "RecordLifecycleActionHeartbeatAnswer": { + "base": null, + "refs": { + } + }, + "RecordLifecycleActionHeartbeatType": { + "base": null, + "refs": { + } + }, + "ResourceContentionFault": { + "base": "

    You already have a pending update to an Auto Scaling resource (for example, a group, instance, or load balancer).

    ", + "refs": { + } + }, + "ResourceInUseFault": { + "base": "

    The Auto Scaling group or launch configuration can't be deleted because it is in use.

    ", + "refs": { + } + }, + "ResourceName": { + "base": null, + "refs": { + "Alarm$AlarmARN": "

    The Amazon Resource Name (ARN) of the alarm.

    ", + "AttachInstancesQuery$AutoScalingGroupName": "

    The name of the group.

    ", + "AttachLoadBalancersType$AutoScalingGroupName": "

    The name of the group.

    ", + "AutoScalingGroup$AutoScalingGroupARN": "

    The Amazon Resource Name (ARN) of the group.

    ", + "AutoScalingGroupNames$member": null, + "CompleteLifecycleActionType$AutoScalingGroupName": "

    The name of the group for the lifecycle hook.

    ", + "CreateAutoScalingGroupType$LaunchConfigurationName": "

    The name of the launch configuration. Alternatively, use the InstanceId parameter to specify an EC2 instance instead of a launch configuration.

    ", + "DeleteAutoScalingGroupType$AutoScalingGroupName": "

    The name of the group to delete.

    ", + "DeleteLifecycleHookType$AutoScalingGroupName": "

    The name of the Auto Scaling group for the lifecycle hook.

    ", + "DeleteNotificationConfigurationType$AutoScalingGroupName": "

    The name of the Auto Scaling group.

    ", + "DeleteNotificationConfigurationType$TopicARN": "

    The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic.

    ", + "DeletePolicyType$AutoScalingGroupName": "

    The name of the Auto Scaling group.

    ", + "DeletePolicyType$PolicyName": "

    The name or Amazon Resource Name (ARN) of the policy.

    ", + "DeleteScheduledActionType$AutoScalingGroupName": "

    The name of the Auto Scaling group.

    ", + "DeleteScheduledActionType$ScheduledActionName": "

    The name of the action to delete.

    ", + "DescribeLifecycleHooksType$AutoScalingGroupName": "

    The name of the group.

    ", + "DescribeLoadBalancersRequest$AutoScalingGroupName": "

    The name of the group.

    ", + "DescribePoliciesType$AutoScalingGroupName": "

    The name of the group.

    ", + "DescribeScalingActivitiesType$AutoScalingGroupName": "

    The name of the group.

    ", + "DescribeScheduledActionsType$AutoScalingGroupName": "

    The name of the group.

    ", + "DetachInstancesQuery$AutoScalingGroupName": "

    The name of the group.

    ", + "DetachLoadBalancersType$AutoScalingGroupName": "

    The name of the group.

    ", + "DisableMetricsCollectionQuery$AutoScalingGroupName": "

    The name or Amazon Resource Name (ARN) of the group.

    ", + "EnableMetricsCollectionQuery$AutoScalingGroupName": "

    The name or ARN of the Auto Scaling group.

    ", + "EnterStandbyQuery$AutoScalingGroupName": "

    The name of the Auto Scaling group.

    ", + "ExecutePolicyType$AutoScalingGroupName": "

    The name or Amazon Resource Name (ARN) of the Auto Scaling group.

    ", + "ExecutePolicyType$PolicyName": "

    The name or ARN of the policy.

    ", + "ExitStandbyQuery$AutoScalingGroupName": "

    The name of the Auto Scaling group.

    ", + "LaunchConfiguration$LaunchConfigurationARN": "

    The Amazon Resource Name (ARN) of the launch configuration.

    ", + "LaunchConfigurationNameType$LaunchConfigurationName": "

    The name of the launch configuration.

    ", + "LaunchConfigurationNames$member": null, + "LifecycleHook$AutoScalingGroupName": "

    The name of the Auto Scaling group for the lifecycle hook.

    ", + "LifecycleHook$NotificationTargetARN": "

    The ARN of the notification target that Auto Scaling uses to notify you when an instance is in the transition state for the lifecycle hook. This ARN target can be either an SQS queue or an SNS topic. The notification message sent to the target includes the following:

    • Lifecycle action token
    • User account ID
    • Name of the Auto Scaling group
    • Lifecycle hook name
    • EC2 instance ID
    • Lifecycle transition
    • Notification metadata
    ", + "LifecycleHook$RoleARN": "

    The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target.

    ", + "NotificationConfiguration$AutoScalingGroupName": "

    The name of the group.

    ", + "NotificationConfiguration$TopicARN": "

    The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic.

    ", + "PolicyARNType$PolicyARN": "

    The Amazon Resource Name (ARN) of the policy.

    ", + "PolicyNames$member": null, + "PutLifecycleHookType$AutoScalingGroupName": "

    The name of the Auto Scaling group to which you want to assign the lifecycle hook.

    ", + "PutLifecycleHookType$RoleARN": "

    The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target.

    This parameter is required for new lifecycle hooks, but optional when updating existing hooks.

    ", + "PutLifecycleHookType$NotificationTargetARN": "

    The ARN of the notification target that Auto Scaling will use to notify you when an instance is in the transition state for the lifecycle hook. This ARN target can be either an SQS queue or an SNS topic.

    This parameter is required for new lifecycle hooks, but optional when updating existing hooks.

    The notification message sent to the target will include:

    • LifecycleActionToken. The Lifecycle action token.
    • AccountId. The user account ID.
    • AutoScalingGroupName. The name of the Auto Scaling group.
    • LifecycleHookName. The lifecycle hook name.
    • EC2InstanceId. The EC2 instance ID.
    • LifecycleTransition. The lifecycle transition.
    • NotificationMetadata. The notification metadata.

    This operation uses the JSON format when sending notifications to an Amazon SQS queue, and an email key/value pair format when sending notifications to an Amazon SNS topic.

    When you call this operation, a test message is sent to the notification target. This test message contains an additional key/value pair: Event:autoscaling:TEST_NOTIFICATION.

    ", + "PutNotificationConfigurationType$AutoScalingGroupName": "

    The name of the Auto Scaling group.

    ", + "PutNotificationConfigurationType$TopicARN": "

    The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic.

    ", + "PutScalingPolicyType$AutoScalingGroupName": "

    The name or ARN of the group.

    ", + "PutScheduledUpdateGroupActionType$AutoScalingGroupName": "

    The name or Amazon Resource Name (ARN) of the Auto Scaling group.

    ", + "RecordLifecycleActionHeartbeatType$AutoScalingGroupName": "

    The name of the Auto Scaling group for the hook.

    ", + "ScalingPolicy$PolicyARN": "

    The Amazon Resource Name (ARN) of the policy.

    ", + "ScalingProcessQuery$AutoScalingGroupName": "

    The name or Amazon Resource Name (ARN) of the Auto Scaling group.

    ", + "ScheduledActionNames$member": null, + "ScheduledUpdateGroupAction$ScheduledActionARN": "

    The Amazon Resource Name (ARN) of the scheduled action.

    ", + "SetDesiredCapacityType$AutoScalingGroupName": "

    The name of the Auto Scaling group.

    ", + "SetInstanceProtectionQuery$AutoScalingGroupName": "

    The name of the group.

    ", + "UpdateAutoScalingGroupType$AutoScalingGroupName": "

    The name of the Auto Scaling group.

    ", + "UpdateAutoScalingGroupType$LaunchConfigurationName": "

    The name of the launch configuration.

    " + } + }, + "ScalingActivityInProgressFault": { + "base": "

    The Auto Scaling group can't be deleted because there are scaling activities in progress.

    ", + "refs": { + } + }, + "ScalingActivityStatusCode": { + "base": null, + "refs": { + "Activity$StatusCode": "

    The current status of the activity.

    " + } + }, + "ScalingPolicies": { + "base": null, + "refs": { + "PoliciesType$ScalingPolicies": "

    The scaling policies.

    " + } + }, + "ScalingPolicy": { + "base": "

    Describes a scaling policy.

    ", + "refs": { + "ScalingPolicies$member": null + } + }, + "ScalingProcessQuery": { + "base": null, + "refs": { + } + }, + "ScheduledActionNames": { + "base": null, + "refs": { + "DescribeScheduledActionsType$ScheduledActionNames": "

    Describes one or more scheduled actions. If you omit this list, the call describes all scheduled actions. If you specify an unknown scheduled action it is ignored with no error.

    You can describe up to a maximum of 50 instances with a single call. If there are more items to return, the call returns a token. To get the next set of items, repeat the call with the returned token in the NextToken parameter.

    " + } + }, + "ScheduledActionsType": { + "base": null, + "refs": { + } + }, + "ScheduledUpdateGroupAction": { + "base": "

    Describes a scheduled update to an Auto Scaling group.

    ", + "refs": { + "ScheduledUpdateGroupActions$member": null + } + }, + "ScheduledUpdateGroupActions": { + "base": null, + "refs": { + "ScheduledActionsType$ScheduledUpdateGroupActions": "

    The scheduled actions.

    " + } + }, + "SecurityGroups": { + "base": null, + "refs": { + "CreateLaunchConfigurationType$SecurityGroups": "

    One or more security groups with which to associate the instances.

    If your instances are launched in EC2-Classic, you can either specify security group names or the security group IDs. For more information about security groups for EC2-Classic, see Amazon EC2 Security Groups in the Amazon Elastic Compute Cloud User Guide.

    If your instances are launched into a VPC, specify security group IDs. For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "LaunchConfiguration$SecurityGroups": "

    The security groups to associate with the instances.

    " + } + }, + "SetDesiredCapacityType": { + "base": null, + "refs": { + } + }, + "SetInstanceHealthQuery": { + "base": null, + "refs": { + } + }, + "SetInstanceProtectionAnswer": { + "base": null, + "refs": { + } + }, + "SetInstanceProtectionQuery": { + "base": null, + "refs": { + } + }, + "ShouldDecrementDesiredCapacity": { + "base": null, + "refs": { + "DetachInstancesQuery$ShouldDecrementDesiredCapacity": "

    If True, the Auto Scaling group decrements the desired capacity value by the number of instances detached.

    ", + "EnterStandbyQuery$ShouldDecrementDesiredCapacity": "

    Specifies whether the instances moved to Standby mode count as part of the Auto Scaling group's desired capacity. If set, the desired capacity for the Auto Scaling group decrements by the number of instances moved to Standby mode.

    ", + "TerminateInstanceInAutoScalingGroupType$ShouldDecrementDesiredCapacity": "

    If true, terminating the instance also decrements the size of the Auto Scaling group.

    " + } + }, + "ShouldRespectGracePeriod": { + "base": null, + "refs": { + "SetInstanceHealthQuery$ShouldRespectGracePeriod": "

    If the Auto Scaling group of the specified instance has a HealthCheckGracePeriod specified for the group, by default, this call will respect the grace period. Set this to False, if you do not want the call to respect the grace period associated with the group.

    For more information, see the HealthCheckGracePeriod parameter description for CreateAutoScalingGroup.

    " + } + }, + "SpotPrice": { + "base": null, + "refs": { + "CreateLaunchConfigurationType$SpotPrice": "

    The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot market price. For more information, see Launch Spot Instances in Your Auto Scaling Group in the Auto Scaling Developer Guide.

    ", + "LaunchConfiguration$SpotPrice": "

    The price to bid when launching Spot Instances.

    " + } + }, + "StepAdjustment": { + "base": "

    Describes an adjustment based on the difference between the value of the aggregated CloudWatch metric and the breach threshold that you've defined for the alarm.

    For the following examples, suppose that you have an alarm with a breach threshold of 50:

    • If you want the adjustment to be triggered when the metric is greater than or equal to 50 and less than 60, specify a lower bound of 0 and an upper bound of 10.

    • If you want the adjustment to be triggered when the metric is greater than 40 and less than or equal to 50, specify a lower bound of -10 and an upper bound of 0.

    There are a few rules for the step adjustments for your step policy:

    • The ranges of your step adjustments can't overlap or have a gap.

    • At most one step adjustment can have a null lower bound. If one step adjustment has a negative lower bound, then there must be a step adjustment with a null lower bound.

    • At most one step adjustment can have a null upper bound. If one step adjustment has a positive upper bound, then there must be a step adjustment with a null upper bound.

    • The upper and lower bound can't be null in the same step adjustment.

    ", + "refs": { + "StepAdjustments$member": null + } + }, + "StepAdjustments": { + "base": null, + "refs": { + "PutScalingPolicyType$StepAdjustments": "

    A set of adjustments that enable you to scale based on the size of the alarm breach.

    This parameter is required if the policy type is StepScaling and not supported otherwise.

    ", + "ScalingPolicy$StepAdjustments": "

    A set of adjustments that enable you to scale based on the size of the alarm breach.

    " + } + }, + "SuspendedProcess": { + "base": "

    Describes an Auto Scaling process that has been suspended. For more information, see ProcessType.

    ", + "refs": { + "SuspendedProcesses$member": null + } + }, + "SuspendedProcesses": { + "base": null, + "refs": { + "AutoScalingGroup$SuspendedProcesses": "

    The suspended processes associated with the group.

    " + } + }, + "Tag": { + "base": "

    Describes a tag for an Auto Scaling group.

    ", + "refs": { + "Tags$member": null + } + }, + "TagDescription": { + "base": "

    Describes a tag for an Auto Scaling group.

    ", + "refs": { + "TagDescriptionList$member": null + } + }, + "TagDescriptionList": { + "base": null, + "refs": { + "AutoScalingGroup$Tags": "

    The tags for the group.

    ", + "TagsType$Tags": "

    The tags.

    " + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": "

    The tag key.

    ", + "TagDescription$Key": "

    The tag key.

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": "

    The tag value.

    ", + "TagDescription$Value": "

    The tag value.

    " + } + }, + "Tags": { + "base": null, + "refs": { + "CreateAutoScalingGroupType$Tags": "

    The tag to be created or updated. Each tag should be defined by its resource type, resource ID, key, value, and a propagate flag. Valid values: key=value, value=value, propagate=true or false. Value and propagate are optional parameters.

    For more information, see Tagging Auto Scaling Groups and Instances in the Auto Scaling Developer Guide.

    ", + "CreateOrUpdateTagsType$Tags": "

    One or more tags.

    ", + "DeleteTagsType$Tags": "

    Each tag should be defined by its resource type, resource ID, key, value, and a propagate flag. Valid values are: Resource type = auto-scaling-group, Resource ID = AutoScalingGroupName, key=value, value=value, propagate=true or false.

    " + } + }, + "TagsType": { + "base": null, + "refs": { + } + }, + "TerminateInstanceInAutoScalingGroupType": { + "base": null, + "refs": { + } + }, + "TerminationPolicies": { + "base": null, + "refs": { + "AutoScalingGroup$TerminationPolicies": "

    The termination policies for the group.

    ", + "CreateAutoScalingGroupType$TerminationPolicies": "

    One or more termination policies used to select the instance to terminate. These policies are executed in the order that they are listed.

    For more information, see Choosing a Termination Policy for Your Auto Scaling Group in the Auto Scaling Developer Guide.

    ", + "DescribeTerminationPolicyTypesAnswer$TerminationPolicyTypes": "

    The termination policies supported by Auto Scaling (OldestInstance, OldestLaunchConfiguration, NewestInstance, ClosestToNextInstanceHour, and Default).

    ", + "UpdateAutoScalingGroupType$TerminationPolicies": "

    A standalone termination policy or a list of termination policies used to select the instance to terminate. The policies are executed in the order that they are listed.

    For more information, see Choosing a Termination Policy for Your Auto Scaling Group in the Auto Scaling Developer Guide.

    " + } + }, + "TimestampType": { + "base": null, + "refs": { + "Activity$StartTime": "

    The start time of the activity.

    ", + "Activity$EndTime": "

    The end time of the activity.

    ", + "AutoScalingGroup$CreatedTime": "

    The date and time the group was created.

    ", + "DescribeScheduledActionsType$StartTime": "

    The earliest scheduled start time to return. If scheduled action names are provided, this parameter is ignored.

    ", + "DescribeScheduledActionsType$EndTime": "

    The latest scheduled start time to return. If scheduled action names are provided, this parameter is ignored.

    ", + "LaunchConfiguration$CreatedTime": "

    The creation date and time for the launch configuration.

    ", + "PutScheduledUpdateGroupActionType$Time": "

    This parameter is deprecated; use StartTime instead.

    The time for this action to start. If both Time and StartTime are specified, their values must be identical.

    ", + "PutScheduledUpdateGroupActionType$StartTime": "

    The time for this action to start, in \"YYYY-MM-DDThh:mm:ssZ\" format in UTC/GMT only (for example, 2014-06-01T00:00:00Z).

    If you try to schedule your action in the past, Auto Scaling returns an error message.

    When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action starts and stops.

    ", + "PutScheduledUpdateGroupActionType$EndTime": "

    The time for this action to end.

    ", + "ScheduledUpdateGroupAction$Time": "

    This parameter is deprecated; use StartTime instead.

    ", + "ScheduledUpdateGroupAction$StartTime": "

    The date and time that the action is scheduled to begin. This date and time can be up to one month in the future.

    When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action will start and stop.

    ", + "ScheduledUpdateGroupAction$EndTime": "

    The date and time that the action is scheduled to end. This date and time can be up to one month in the future.

    " + } + }, + "UpdateAutoScalingGroupType": { + "base": null, + "refs": { + } + }, + "Values": { + "base": null, + "refs": { + "Filter$Values": "

    The value of the filter.

    " + } + }, + "XmlString": { + "base": null, + "refs": { + "ActivitiesType$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "Activity$ActivityId": "

    The ID of the activity.

    ", + "Activity$Description": "

    A friendly, more verbose description of the activity.

    ", + "Activity$Details": "

    The details about the activity.

    ", + "ActivityIds$member": null, + "AutoScalingGroupNamesType$NextToken": "

    The token for the next set of items to return. (You received this token from a previous call.)

    ", + "AutoScalingGroupsType$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "AutoScalingInstancesType$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "DescribeAutoScalingInstancesType$NextToken": "

    The token for the next set of items to return. (You received this token from a previous call.)

    ", + "DescribeLoadBalancersRequest$NextToken": "

    The token for the next set of items to return. (You received this token from a previous call.)

    ", + "DescribeLoadBalancersResponse$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "DescribeNotificationConfigurationsAnswer$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "DescribeNotificationConfigurationsType$NextToken": "

    The token for the next set of items to return. (You received this token from a previous call.)

    ", + "DescribePoliciesType$NextToken": "

    The token for the next set of items to return. (You received this token from a previous call.)

    ", + "DescribeScalingActivitiesType$NextToken": "

    The token for the next set of items to return. (You received this token from a previous call.)

    ", + "DescribeScheduledActionsType$NextToken": "

    The token for the next set of items to return. (You received this token from a previous call.)

    ", + "DescribeTagsType$NextToken": "

    The token for the next set of items to return. (You received this token from a previous call.)

    ", + "Filter$Name": "

    The name of the filter. The valid values are: \"auto-scaling-group\", \"key\", \"value\", and \"propagate-at-launch\".

    ", + "LaunchConfigurationNamesType$NextToken": "

    The token for the next set of items to return. (You received this token from a previous call.)

    ", + "LaunchConfigurationsType$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "PoliciesType$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "ScheduledActionsType$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "SecurityGroups$member": null, + "Tag$ResourceId": "

    The name of the group.

    ", + "Tag$ResourceType": "

    The type of resource. The only supported value is auto-scaling-group.

    ", + "TagDescription$ResourceId": "

    The name of the group.

    ", + "TagDescription$ResourceType": "

    The type of resource. The only supported value is auto-scaling-group.

    ", + "TagsType$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "Values$member": null + } + }, + "XmlStringMaxLen1023": { + "base": null, + "refs": { + "Activity$Cause": "

    The reason the activity began.

    ", + "LifecycleHook$NotificationMetadata": "

    Additional information that you want to include any time Auto Scaling sends a message to the notification target.

    ", + "PutLifecycleHookType$NotificationMetadata": "

    Contains additional information that you want to include any time Auto Scaling sends a message to the notification target.

    " + } + }, + "XmlStringMaxLen1600": { + "base": null, + "refs": { + "CreateLaunchConfigurationType$IamInstanceProfile": "

    The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance.

    EC2 instances launched with an IAM role will automatically have AWS security credentials available. You can use IAM roles with Auto Scaling to automatically enable applications running on your EC2 instances to securely access other AWS resources. For more information, see Launch Auto Scaling Instances with an IAM Role in the Auto Scaling Developer Guide.

    ", + "LaunchConfiguration$IamInstanceProfile": "

    The name or Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance.

    ", + "TerminationPolicies$member": null + } + }, + "XmlStringMaxLen19": { + "base": null, + "refs": { + "AutoScalingInstanceDetails$InstanceId": "

    The ID of the instance.

    ", + "CreateAutoScalingGroupType$InstanceId": "

    The ID of the EC2 instance used to create a launch configuration for the group. Alternatively, use the LaunchConfigurationName parameter to specify a launch configuration instead of an EC2 instance.

    When you specify an ID of an instance, Auto Scaling creates a new launch configuration and associates it with the group. This launch configuration derives its attributes from the specified instance, with the exception of the block device mapping.

    For more information, see Create an Auto Scaling Group from an EC2 Instance in the Auto Scaling Developer Guide.

    ", + "CreateLaunchConfigurationType$InstanceId": "

    The ID of the EC2 instance to use to create the launch configuration.

    The new launch configuration derives attributes from the instance, with the exception of the block device mapping.

    To create a launch configuration with a block device mapping or override any other instance attributes, specify them as part of the same request.

    For more information, see Create a Launch Configuration Using an EC2 Instance in the Auto Scaling Developer Guide.

    ", + "Instance$InstanceId": "

    The ID of the instance.

    ", + "InstanceIds$member": null, + "SetInstanceHealthQuery$InstanceId": "

    The ID of the EC2 instance.

    ", + "TerminateInstanceInAutoScalingGroupType$InstanceId": "

    The ID of the EC2 instance.

    " + } + }, + "XmlStringMaxLen255": { + "base": null, + "refs": { + "Activity$AutoScalingGroupName": "

    The name of the Auto Scaling group.

    ", + "Activity$StatusMessage": "

    A friendly, more verbose description of the activity status.

    ", + "AdjustmentType$AdjustmentType": "

    The policy adjustment type. The valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity.

    ", + "Alarm$AlarmName": "

    The name of the alarm.

    ", + "AlreadyExistsFault$message": null, + "AutoScalingGroup$AutoScalingGroupName": "

    The name of the group.

    ", + "AutoScalingGroup$LaunchConfigurationName": "

    The name of the associated launch configuration.

    ", + "AutoScalingGroup$PlacementGroup": "

    The name of the placement group into which you'll launch your instances, if any. For more information, see Placement Groups in the Amazon Elastic Compute Cloud User Guide.

    ", + "AutoScalingGroup$VPCZoneIdentifier": "

    One or more subnet IDs, if applicable, separated by commas.

    If you specify VPCZoneIdentifier and AvailabilityZones, ensure that the Availability Zones of the subnets match the values for AvailabilityZones.

    ", + "AutoScalingGroup$Status": "

    The current state of the group when DeleteAutoScalingGroup is in progress.

    ", + "AutoScalingInstanceDetails$AutoScalingGroupName": "

    The name of the Auto Scaling group associated with the instance.

    ", + "AutoScalingInstanceDetails$AvailabilityZone": "

    The Availability Zone for the instance.

    ", + "AutoScalingInstanceDetails$LaunchConfigurationName": "

    The launch configuration associated with the instance.

    ", + "AutoScalingNotificationTypes$member": null, + "AvailabilityZones$member": null, + "BlockDeviceMapping$VirtualName": "

    The name of the virtual device (for example, ephemeral0).

    ", + "BlockDeviceMapping$DeviceName": "

    The device name exposed to the EC2 instance (for example, /dev/sdh or xvdh).

    ", + "ClassicLinkVPCSecurityGroups$member": null, + "CreateAutoScalingGroupType$AutoScalingGroupName": "

    The name of the group. This name must be unique within the scope of your AWS account.

    ", + "CreateAutoScalingGroupType$PlacementGroup": "

    The name of the placement group into which you'll launch your instances, if any. For more information, see Placement Groups in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateAutoScalingGroupType$VPCZoneIdentifier": "

    A comma-separated list of subnet identifiers for your virtual private cloud (VPC).

    If you specify subnets and Availability Zones with this call, ensure that the subnets' Availability Zones match the Availability Zones specified.

    For more information, see Auto Scaling and Amazon Virtual Private Cloud in the Auto Scaling Developer Guide.

    ", + "CreateLaunchConfigurationType$LaunchConfigurationName": "

    The name of the launch configuration. This name must be unique within the scope of your AWS account.

    ", + "CreateLaunchConfigurationType$ImageId": "

    The ID of the Amazon Machine Image (AMI) to use to launch your EC2 instances. For more information, see Finding an AMI in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateLaunchConfigurationType$KeyName": "

    The name of the key pair. For more information, see Amazon EC2 Key Pairs in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateLaunchConfigurationType$ClassicLinkVPCId": "

    The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. This parameter is supported only if you are launching EC2-Classic instances. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateLaunchConfigurationType$InstanceType": "

    The instance type of the EC2 instance. For information about available instance types, see Available Instance Types in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateLaunchConfigurationType$KernelId": "

    The ID of the kernel associated with the AMI.

    ", + "CreateLaunchConfigurationType$RamdiskId": "

    The ID of the RAM disk associated with the AMI.

    ", + "Ebs$SnapshotId": "

    The ID of the snapshot.

    ", + "EnableMetricsCollectionQuery$Granularity": "

    The granularity to associate with the metrics to collect. The only valid value is 1Minute.

    ", + "EnabledMetric$Metric": "

    One of the following metrics:

    • GroupMinSize

    • GroupMaxSize

    • GroupDesiredCapacity

    • GroupInServiceInstances

    • GroupPendingInstances

    • GroupStandbyInstances

    • GroupTerminatingInstances

    • GroupTotalInstances

    ", + "EnabledMetric$Granularity": "

    The granularity of the metric. The only valid value is 1Minute.

    ", + "Instance$AvailabilityZone": "

    The Availability Zone in which the instance is running.

    ", + "Instance$LaunchConfigurationName": "

    The launch configuration associated with the instance.

    ", + "InvalidNextToken$message": null, + "LaunchConfiguration$LaunchConfigurationName": "

    The name of the launch configuration.

    ", + "LaunchConfiguration$ImageId": "

    The ID of the Amazon Machine Image (AMI).

    ", + "LaunchConfiguration$KeyName": "

    The name of the key pair.

    ", + "LaunchConfiguration$ClassicLinkVPCId": "

    The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. This parameter can only be used if you are launching EC2-Classic instances. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

    ", + "LaunchConfiguration$InstanceType": "

    The instance type for the instances.

    ", + "LaunchConfiguration$KernelId": "

    The ID of the kernel associated with the AMI.

    ", + "LaunchConfiguration$RamdiskId": "

    The ID of the RAM disk associated with the AMI.

    ", + "LimitExceededFault$message": null, + "LoadBalancerNames$member": null, + "LoadBalancerState$LoadBalancerName": "

    The name of the load balancer.

    ", + "LoadBalancerState$State": "

    One of the following load balancer states:

    • Adding - The instances in the group are being registered with the load balancer.

    • Added - All instances in the group are registered with the load balancer.

    • InService - At least one instance in the group passed an ELB health check.

    • Removing - The instances are being deregistered from the load balancer. If connection draining is enabled, Elastic Load Balancing waits for in-flight requests to complete before deregistering the instances.

    ", + "MetricCollectionType$Metric": "

    One of the following metrics:

    • GroupMinSize

    • GroupMaxSize

    • GroupDesiredCapacity

    • GroupInServiceInstances

    • GroupPendingInstances

    • GroupStandbyInstances

    • GroupTerminatingInstances

    • GroupTotalInstances

    ", + "MetricGranularityType$Granularity": "

    The granularity. The only valid value is 1Minute.

    ", + "Metrics$member": null, + "NotificationConfiguration$NotificationType": "

    One of the following event notification types:

    • autoscaling:EC2_INSTANCE_LAUNCH

    • autoscaling:EC2_INSTANCE_LAUNCH_ERROR

    • autoscaling:EC2_INSTANCE_TERMINATE

    • autoscaling:EC2_INSTANCE_TERMINATE_ERROR

    • autoscaling:TEST_NOTIFICATION

    ", + "ProcessNames$member": null, + "ProcessType$ProcessName": "

    One of the following processes:

    • Launch

    • Terminate

    • AddToLoadBalancer

    • AlarmNotification

    • AZRebalance

    • HealthCheck

    • ReplaceUnhealthy

    • ScheduledActions

    ", + "PutScalingPolicyType$PolicyName": "

    The name of the policy.

    ", + "PutScalingPolicyType$AdjustmentType": "

    The adjustment type. Valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity.

    For more information, see Dynamic Scaling in the Auto Scaling Developer Guide.

    ", + "PutScheduledUpdateGroupActionType$ScheduledActionName": "

    The name of this scaling action.

    ", + "PutScheduledUpdateGroupActionType$Recurrence": "

    The time when recurring future actions will start. Start time is specified by the user following the Unix cron syntax format. For more information, see Cron in Wikipedia.

    When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action will start and stop.

    ", + "ResourceContentionFault$message": null, + "ResourceInUseFault$message": null, + "ScalingActivityInProgressFault$message": null, + "ScalingPolicy$AutoScalingGroupName": "

    The name of the Auto Scaling group associated with this scaling policy.

    ", + "ScalingPolicy$PolicyName": "

    The name of the scaling policy.

    ", + "ScalingPolicy$AdjustmentType": "

    The adjustment type, which specifies how ScalingAdjustment is interpreted. Valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity.

    ", + "ScheduledUpdateGroupAction$AutoScalingGroupName": "

    The name of the group.

    ", + "ScheduledUpdateGroupAction$ScheduledActionName": "

    The name of the scheduled action.

    ", + "ScheduledUpdateGroupAction$Recurrence": "

    The recurring schedule for the action.

    ", + "SuspendedProcess$ProcessName": "

    The name of the suspended process.

    ", + "SuspendedProcess$SuspensionReason": "

    The reason that the process was suspended.

    ", + "UpdateAutoScalingGroupType$PlacementGroup": "

    The name of the placement group into which you'll launch your instances, if any. For more information, see Placement Groups in the Amazon Elastic Compute Cloud User Guide.

    ", + "UpdateAutoScalingGroupType$VPCZoneIdentifier": "

    The ID of the subnet, if you are launching into a VPC. You can specify several subnets in a comma-separated list.

    When you specify VPCZoneIdentifier with AvailabilityZones, ensure that the subnets' Availability Zones match the values you specify for AvailabilityZones.

    For more information, see Auto Scaling and Amazon Virtual Private Cloud in the Auto Scaling Developer Guide.

    " + } + }, + "XmlStringMaxLen32": { + "base": null, + "refs": { + "AutoScalingGroup$HealthCheckType": "

    The service to use for the health checks. The valid values are EC2 and ELB.

    ", + "AutoScalingInstanceDetails$LifecycleState": "

    The lifecycle state for the instance. For more information, see Auto Scaling Instance States in the Auto Scaling Developer Guide.

    ", + "AutoScalingInstanceDetails$HealthStatus": "

    The health status of this instance. \"Healthy\" means that the instance is healthy and should remain in service. \"Unhealthy\" means that the instance is unhealthy and Auto Scaling should terminate and replace it.

    ", + "CreateAutoScalingGroupType$HealthCheckType": "

    The service to use for the health checks. The valid values are EC2 and ELB.

    By default, health checks use Amazon EC2 instance status checks to determine the health of an instance. For more information, see Health Checks in the Auto Scaling Developer Guide.

    ", + "Instance$HealthStatus": "

    The health status of the instance.

    ", + "PutScalingPolicyType$MetricAggregationType": "

    The aggregation type for the CloudWatch metrics. Valid values are Minimum, Maximum, and Average. If the aggregation type is null, the value is treated as Average.

    This parameter is not supported if the policy type is SimpleScaling.

    ", + "ScalingPolicy$MetricAggregationType": "

    The aggregation type for the CloudWatch metrics. Valid values are Minimum, Maximum, and Average.

    ", + "SetInstanceHealthQuery$HealthStatus": "

    The health status of the instance. Set to Healthy if you want the instance to remain in service. Set to Unhealthy if you want the instance to be out of service. Auto Scaling will terminate and replace the unhealthy instance.

    ", + "UpdateAutoScalingGroupType$HealthCheckType": "

    The service to use for the health checks. The valid values are EC2 and ELB.

    " + } + }, + "XmlStringMaxLen64": { + "base": null, + "refs": { + "CreateLaunchConfigurationType$PlacementTenancy": "

    The tenancy of the instance. An instance with a tenancy of dedicated runs on single-tenant hardware and can only be launched into a VPC.

    You must set the value of this parameter to dedicated if want to launch Dedicated Instances into a shared tenancy VPC (VPC with instance placement tenancy attribute set to default).

    If you specify a value for this parameter, be sure to specify at least one subnet using the VPCZoneIdentifier parameter when you create your group.

    For more information, see Auto Scaling and Amazon Virtual Private Cloud in the Auto Scaling Developer Guide.

    Valid values: default | dedicated

    ", + "LaunchConfiguration$PlacementTenancy": "

    The tenancy of the instance, either default or dedicated. An instance with dedicated tenancy runs in an isolated, single-tenant hardware and can only be launched into a VPC.

    ", + "PolicyTypes$member": null, + "PutScalingPolicyType$PolicyType": "

    The policy type. Valid values are SimpleScaling and StepScaling. If the policy type is null, the value is treated as SimpleScaling.

    ", + "ScalingPolicy$PolicyType": "

    The policy type. Valid values are SimpleScaling and StepScaling.

    " + } + }, + "XmlStringUserData": { + "base": null, + "refs": { + "CreateLaunchConfigurationType$UserData": "

    The user data to make available to the launched EC2 instances. For more information, see Instance Metadata and User Data in the Amazon Elastic Compute Cloud User Guide.

    At this time, launch configurations don't support compressed (zipped) user data files.

    ", + "LaunchConfiguration$UserData": "

    The user data available to the instances.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/autoscaling/2011-01-01/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,52 @@ +{ + "pagination": { + "DescribeAutoScalingGroups": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxRecords", + "result_key": "AutoScalingGroups" + }, + "DescribeAutoScalingInstances": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxRecords", + "result_key": "AutoScalingInstances" + }, + "DescribeLaunchConfigurations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxRecords", + "result_key": "LaunchConfigurations" + }, + "DescribeNotificationConfigurations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxRecords", + "result_key": "NotificationConfigurations" + }, + "DescribePolicies": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxRecords", + "result_key": "ScalingPolicies" + }, + "DescribeScalingActivities": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxRecords", + "result_key": "Activities" + }, + "DescribeScheduledActions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxRecords", + "result_key": "ScheduledUpdateGroupActions" + }, + "DescribeTags": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxRecords", + "result_key": "Tags" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,923 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2010-05-15", + "endpointPrefix":"cloudformation", + "protocol":"query", + "serviceFullName":"AWS CloudFormation", + "signatureVersion":"v4", + "xmlNamespace":"http://cloudformation.amazonaws.com/doc/2010-05-15/" + }, + "operations":{ + "CancelUpdateStack":{ + "name":"CancelUpdateStack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelUpdateStackInput"} + }, + "ContinueUpdateRollback":{ + "name":"ContinueUpdateRollback", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ContinueUpdateRollbackInput"}, + "output":{ + "shape":"ContinueUpdateRollbackOutput", + "resultWrapper":"ContinueUpdateRollbackResult" + } + }, + "CreateStack":{ + "name":"CreateStack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateStackInput"}, + "output":{ + "shape":"CreateStackOutput", + "resultWrapper":"CreateStackResult" + }, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"InsufficientCapabilitiesException"} + ] + }, + "DeleteStack":{ + "name":"DeleteStack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteStackInput"} + }, + "DescribeAccountLimits":{ + "name":"DescribeAccountLimits", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAccountLimitsInput"}, + "output":{ + "shape":"DescribeAccountLimitsOutput", + "resultWrapper":"DescribeAccountLimitsResult" + } + }, + "DescribeStackEvents":{ + "name":"DescribeStackEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStackEventsInput"}, + "output":{ + "shape":"DescribeStackEventsOutput", + "resultWrapper":"DescribeStackEventsResult" + } + }, + "DescribeStackResource":{ + "name":"DescribeStackResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStackResourceInput"}, + "output":{ + "shape":"DescribeStackResourceOutput", + "resultWrapper":"DescribeStackResourceResult" + } + }, + "DescribeStackResources":{ + "name":"DescribeStackResources", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStackResourcesInput"}, + "output":{ + "shape":"DescribeStackResourcesOutput", + "resultWrapper":"DescribeStackResourcesResult" + } + }, + "DescribeStacks":{ + "name":"DescribeStacks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStacksInput"}, + "output":{ + "shape":"DescribeStacksOutput", + "resultWrapper":"DescribeStacksResult" + } + }, + "EstimateTemplateCost":{ + "name":"EstimateTemplateCost", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EstimateTemplateCostInput"}, + "output":{ + "shape":"EstimateTemplateCostOutput", + "resultWrapper":"EstimateTemplateCostResult" + } + }, + "GetStackPolicy":{ + "name":"GetStackPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetStackPolicyInput"}, + "output":{ + "shape":"GetStackPolicyOutput", + "resultWrapper":"GetStackPolicyResult" + } + }, + "GetTemplate":{ + "name":"GetTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetTemplateInput"}, + "output":{ + "shape":"GetTemplateOutput", + "resultWrapper":"GetTemplateResult" + } + }, + "GetTemplateSummary":{ + "name":"GetTemplateSummary", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetTemplateSummaryInput"}, + "output":{ + "shape":"GetTemplateSummaryOutput", + "resultWrapper":"GetTemplateSummaryResult" + } + }, + "ListStackResources":{ + "name":"ListStackResources", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListStackResourcesInput"}, + "output":{ + "shape":"ListStackResourcesOutput", + "resultWrapper":"ListStackResourcesResult" + } + }, + "ListStacks":{ + "name":"ListStacks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListStacksInput"}, + "output":{ + "shape":"ListStacksOutput", + "resultWrapper":"ListStacksResult" + } + }, + "SetStackPolicy":{ + "name":"SetStackPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetStackPolicyInput"} + }, + "SignalResource":{ + "name":"SignalResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SignalResourceInput"} + }, + "UpdateStack":{ + "name":"UpdateStack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateStackInput"}, + "output":{ + "shape":"UpdateStackOutput", + "resultWrapper":"UpdateStackResult" + }, + "errors":[ + {"shape":"InsufficientCapabilitiesException"} + ] + }, + "ValidateTemplate":{ + "name":"ValidateTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ValidateTemplateInput"}, + "output":{ + "shape":"ValidateTemplateOutput", + "resultWrapper":"ValidateTemplateResult" + } + } + }, + "shapes":{ + "AccountLimit":{ + "type":"structure", + "members":{ + "Name":{"shape":"LimitName"}, + "Value":{"shape":"LimitValue"} + } + }, + "AccountLimitList":{ + "type":"list", + "member":{"shape":"AccountLimit"} + }, + "AllowedValue":{"type":"string"}, + "AllowedValues":{ + "type":"list", + "member":{"shape":"AllowedValue"} + }, + "AlreadyExistsException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AlreadyExistsException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CancelUpdateStackInput":{ + "type":"structure", + "required":["StackName"], + "members":{ + "StackName":{"shape":"StackName"} + } + }, + "Capabilities":{ + "type":"list", + "member":{"shape":"Capability"} + }, + "CapabilitiesReason":{"type":"string"}, + "Capability":{ + "type":"string", + "enum":["CAPABILITY_IAM"] + }, + "ContinueUpdateRollbackInput":{ + "type":"structure", + "required":["StackName"], + "members":{ + "StackName":{"shape":"StackNameOrId"} + } + }, + "ContinueUpdateRollbackOutput":{ + "type":"structure", + "members":{ + } + }, + "CreateStackInput":{ + "type":"structure", + "required":["StackName"], + "members":{ + "StackName":{"shape":"StackName"}, + "TemplateBody":{"shape":"TemplateBody"}, + "TemplateURL":{"shape":"TemplateURL"}, + "Parameters":{"shape":"Parameters"}, + "DisableRollback":{"shape":"DisableRollback"}, + "TimeoutInMinutes":{"shape":"TimeoutMinutes"}, + "NotificationARNs":{"shape":"NotificationARNs"}, + "Capabilities":{"shape":"Capabilities"}, + "ResourceTypes":{"shape":"ResourceTypes"}, + "OnFailure":{"shape":"OnFailure"}, + "StackPolicyBody":{"shape":"StackPolicyBody"}, + "StackPolicyURL":{"shape":"StackPolicyURL"}, + "Tags":{"shape":"Tags"} + } + }, + "CreateStackOutput":{ + "type":"structure", + "members":{ + "StackId":{"shape":"StackId"} + } + }, + "CreationTime":{"type":"timestamp"}, + "DeleteStackInput":{ + "type":"structure", + "required":["StackName"], + "members":{ + "StackName":{"shape":"StackName"} + } + }, + "DeletionTime":{"type":"timestamp"}, + "DescribeAccountLimitsInput":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeAccountLimitsOutput":{ + "type":"structure", + "members":{ + "AccountLimits":{"shape":"AccountLimitList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeStackEventsInput":{ + "type":"structure", + "members":{ + "StackName":{"shape":"StackName"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeStackEventsOutput":{ + "type":"structure", + "members":{ + "StackEvents":{"shape":"StackEvents"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeStackResourceInput":{ + "type":"structure", + "required":[ + "StackName", + "LogicalResourceId" + ], + "members":{ + "StackName":{"shape":"StackName"}, + "LogicalResourceId":{"shape":"LogicalResourceId"} + } + }, + "DescribeStackResourceOutput":{ + "type":"structure", + "members":{ + "StackResourceDetail":{"shape":"StackResourceDetail"} + } + }, + "DescribeStackResourcesInput":{ + "type":"structure", + "members":{ + "StackName":{"shape":"StackName"}, + "LogicalResourceId":{"shape":"LogicalResourceId"}, + "PhysicalResourceId":{"shape":"PhysicalResourceId"} + } + }, + "DescribeStackResourcesOutput":{ + "type":"structure", + "members":{ + "StackResources":{"shape":"StackResources"} + } + }, + "DescribeStacksInput":{ + "type":"structure", + "members":{ + "StackName":{"shape":"StackName"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeStacksOutput":{ + "type":"structure", + "members":{ + "Stacks":{"shape":"Stacks"}, + "NextToken":{"shape":"NextToken"} + } + }, + "Description":{"type":"string"}, + "DisableRollback":{"type":"boolean"}, + "EstimateTemplateCostInput":{ + "type":"structure", + "members":{ + "TemplateBody":{"shape":"TemplateBody"}, + "TemplateURL":{"shape":"TemplateURL"}, + "Parameters":{"shape":"Parameters"} + } + }, + "EstimateTemplateCostOutput":{ + "type":"structure", + "members":{ + "Url":{"shape":"Url"} + } + }, + "EventId":{"type":"string"}, + "GetStackPolicyInput":{ + "type":"structure", + "required":["StackName"], + "members":{ + "StackName":{"shape":"StackName"} + } + }, + "GetStackPolicyOutput":{ + "type":"structure", + "members":{ + "StackPolicyBody":{"shape":"StackPolicyBody"} + } + }, + "GetTemplateInput":{ + "type":"structure", + "required":["StackName"], + "members":{ + "StackName":{"shape":"StackName"} + } + }, + "GetTemplateOutput":{ + "type":"structure", + "members":{ + "TemplateBody":{"shape":"TemplateBody"} + } + }, + "GetTemplateSummaryInput":{ + "type":"structure", + "members":{ + "TemplateBody":{"shape":"TemplateBody"}, + "TemplateURL":{"shape":"TemplateURL"}, + "StackName":{"shape":"StackNameOrId"} + } + }, + "GetTemplateSummaryOutput":{ + "type":"structure", + "members":{ + "Parameters":{"shape":"ParameterDeclarations"}, + "Description":{"shape":"Description"}, + "Capabilities":{"shape":"Capabilities"}, + "CapabilitiesReason":{"shape":"CapabilitiesReason"}, + "ResourceTypes":{"shape":"ResourceTypes"}, + "Version":{"shape":"Version"}, + "Metadata":{"shape":"Metadata"} + } + }, + "InsufficientCapabilitiesException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InsufficientCapabilitiesException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "LastUpdatedTime":{"type":"timestamp"}, + "LimitExceededException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"LimitExceededException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "LimitName":{"type":"string"}, + "LimitValue":{"type":"integer"}, + "ListStackResourcesInput":{ + "type":"structure", + "required":["StackName"], + "members":{ + "StackName":{"shape":"StackName"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListStackResourcesOutput":{ + "type":"structure", + "members":{ + "StackResourceSummaries":{"shape":"StackResourceSummaries"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListStacksInput":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"NextToken"}, + "StackStatusFilter":{"shape":"StackStatusFilter"} + } + }, + "ListStacksOutput":{ + "type":"structure", + "members":{ + "StackSummaries":{"shape":"StackSummaries"}, + "NextToken":{"shape":"NextToken"} + } + }, + "LogicalResourceId":{"type":"string"}, + "Metadata":{"type":"string"}, + "NextToken":{ + "type":"string", + "max":1024, + "min":1 + }, + "NoEcho":{"type":"boolean"}, + "NotificationARN":{"type":"string"}, + "NotificationARNs":{ + "type":"list", + "member":{"shape":"NotificationARN"}, + "max":5 + }, + "OnFailure":{ + "type":"string", + "enum":[ + "DO_NOTHING", + "ROLLBACK", + "DELETE" + ] + }, + "Output":{ + "type":"structure", + "members":{ + "OutputKey":{"shape":"OutputKey"}, + "OutputValue":{"shape":"OutputValue"}, + "Description":{"shape":"Description"} + } + }, + "OutputKey":{"type":"string"}, + "OutputValue":{"type":"string"}, + "Outputs":{ + "type":"list", + "member":{"shape":"Output"} + }, + "Parameter":{ + "type":"structure", + "members":{ + "ParameterKey":{"shape":"ParameterKey"}, + "ParameterValue":{"shape":"ParameterValue"}, + "UsePreviousValue":{"shape":"UsePreviousValue"} + } + }, + "ParameterConstraints":{ + "type":"structure", + "members":{ + "AllowedValues":{"shape":"AllowedValues"} + } + }, + "ParameterDeclaration":{ + "type":"structure", + "members":{ + "ParameterKey":{"shape":"ParameterKey"}, + "DefaultValue":{"shape":"ParameterValue"}, + "ParameterType":{"shape":"ParameterType"}, + "NoEcho":{"shape":"NoEcho"}, + "Description":{"shape":"Description"}, + "ParameterConstraints":{"shape":"ParameterConstraints"} + } + }, + "ParameterDeclarations":{ + "type":"list", + "member":{"shape":"ParameterDeclaration"} + }, + "ParameterKey":{"type":"string"}, + "ParameterType":{"type":"string"}, + "ParameterValue":{"type":"string"}, + "Parameters":{ + "type":"list", + "member":{"shape":"Parameter"} + }, + "PhysicalResourceId":{"type":"string"}, + "ResourceProperties":{"type":"string"}, + "ResourceSignalStatus":{ + "type":"string", + "enum":[ + "SUCCESS", + "FAILURE" + ] + }, + "ResourceSignalUniqueId":{ + "type":"string", + "max":64, + "min":1 + }, + "ResourceStatus":{ + "type":"string", + "enum":[ + "CREATE_IN_PROGRESS", + "CREATE_FAILED", + "CREATE_COMPLETE", + "DELETE_IN_PROGRESS", + "DELETE_FAILED", + "DELETE_COMPLETE", + "DELETE_SKIPPED", + "UPDATE_IN_PROGRESS", + "UPDATE_FAILED", + "UPDATE_COMPLETE" + ] + }, + "ResourceStatusReason":{"type":"string"}, + "ResourceType":{"type":"string"}, + "ResourceTypes":{ + "type":"list", + "member":{"shape":"ResourceType"} + }, + "SetStackPolicyInput":{ + "type":"structure", + "required":["StackName"], + "members":{ + "StackName":{"shape":"StackName"}, + "StackPolicyBody":{"shape":"StackPolicyBody"}, + "StackPolicyURL":{"shape":"StackPolicyURL"} + } + }, + "SignalResourceInput":{ + "type":"structure", + "required":[ + "StackName", + "LogicalResourceId", + "UniqueId", + "Status" + ], + "members":{ + "StackName":{"shape":"StackNameOrId"}, + "LogicalResourceId":{"shape":"LogicalResourceId"}, + "UniqueId":{"shape":"ResourceSignalUniqueId"}, + "Status":{"shape":"ResourceSignalStatus"} + } + }, + "Stack":{ + "type":"structure", + "required":[ + "StackName", + "CreationTime", + "StackStatus" + ], + "members":{ + "StackId":{"shape":"StackId"}, + "StackName":{"shape":"StackName"}, + "Description":{"shape":"Description"}, + "Parameters":{"shape":"Parameters"}, + "CreationTime":{"shape":"CreationTime"}, + "LastUpdatedTime":{"shape":"LastUpdatedTime"}, + "StackStatus":{"shape":"StackStatus"}, + "StackStatusReason":{"shape":"StackStatusReason"}, + "DisableRollback":{"shape":"DisableRollback"}, + "NotificationARNs":{"shape":"NotificationARNs"}, + "TimeoutInMinutes":{"shape":"TimeoutMinutes"}, + "Capabilities":{"shape":"Capabilities"}, + "Outputs":{"shape":"Outputs"}, + "Tags":{"shape":"Tags"} + } + }, + "StackEvent":{ + "type":"structure", + "required":[ + "StackId", + "EventId", + "StackName", + "Timestamp" + ], + "members":{ + "StackId":{"shape":"StackId"}, + "EventId":{"shape":"EventId"}, + "StackName":{"shape":"StackName"}, + "LogicalResourceId":{"shape":"LogicalResourceId"}, + "PhysicalResourceId":{"shape":"PhysicalResourceId"}, + "ResourceType":{"shape":"ResourceType"}, + "Timestamp":{"shape":"Timestamp"}, + "ResourceStatus":{"shape":"ResourceStatus"}, + "ResourceStatusReason":{"shape":"ResourceStatusReason"}, + "ResourceProperties":{"shape":"ResourceProperties"} + } + }, + "StackEvents":{ + "type":"list", + "member":{"shape":"StackEvent"} + }, + "StackId":{"type":"string"}, + "StackName":{"type":"string"}, + "StackNameOrId":{ + "type":"string", + "min":1, + "pattern":"([a-zA-Z][-a-zA-Z0-9]*)|(arn:\\b(aws|aws-us-gov|aws-cn)\\b:[-a-zA-Z0-9:/._+]*)" + }, + "StackPolicyBody":{ + "type":"string", + "max":16384, + "min":1 + }, + "StackPolicyDuringUpdateBody":{ + "type":"string", + "max":16384, + "min":1 + }, + "StackPolicyDuringUpdateURL":{ + "type":"string", + "max":1350, + "min":1 + }, + "StackPolicyURL":{ + "type":"string", + "max":1350, + "min":1 + }, + "StackResource":{ + "type":"structure", + "required":[ + "LogicalResourceId", + "ResourceType", + "Timestamp", + "ResourceStatus" + ], + "members":{ + "StackName":{"shape":"StackName"}, + "StackId":{"shape":"StackId"}, + "LogicalResourceId":{"shape":"LogicalResourceId"}, + "PhysicalResourceId":{"shape":"PhysicalResourceId"}, + "ResourceType":{"shape":"ResourceType"}, + "Timestamp":{"shape":"Timestamp"}, + "ResourceStatus":{"shape":"ResourceStatus"}, + "ResourceStatusReason":{"shape":"ResourceStatusReason"}, + "Description":{"shape":"Description"} + } + }, + "StackResourceDetail":{ + "type":"structure", + "required":[ + "LogicalResourceId", + "ResourceType", + "LastUpdatedTimestamp", + "ResourceStatus" + ], + "members":{ + "StackName":{"shape":"StackName"}, + "StackId":{"shape":"StackId"}, + "LogicalResourceId":{"shape":"LogicalResourceId"}, + "PhysicalResourceId":{"shape":"PhysicalResourceId"}, + "ResourceType":{"shape":"ResourceType"}, + "LastUpdatedTimestamp":{"shape":"Timestamp"}, + "ResourceStatus":{"shape":"ResourceStatus"}, + "ResourceStatusReason":{"shape":"ResourceStatusReason"}, + "Description":{"shape":"Description"}, + "Metadata":{"shape":"Metadata"} + } + }, + "StackResourceSummaries":{ + "type":"list", + "member":{"shape":"StackResourceSummary"} + }, + "StackResourceSummary":{ + "type":"structure", + "required":[ + "LogicalResourceId", + "ResourceType", + "LastUpdatedTimestamp", + "ResourceStatus" + ], + "members":{ + "LogicalResourceId":{"shape":"LogicalResourceId"}, + "PhysicalResourceId":{"shape":"PhysicalResourceId"}, + "ResourceType":{"shape":"ResourceType"}, + "LastUpdatedTimestamp":{"shape":"Timestamp"}, + "ResourceStatus":{"shape":"ResourceStatus"}, + "ResourceStatusReason":{"shape":"ResourceStatusReason"} + } + }, + "StackResources":{ + "type":"list", + "member":{"shape":"StackResource"} + }, + "StackStatus":{ + "type":"string", + "enum":[ + "CREATE_IN_PROGRESS", + "CREATE_FAILED", + "CREATE_COMPLETE", + "ROLLBACK_IN_PROGRESS", + "ROLLBACK_FAILED", + "ROLLBACK_COMPLETE", + "DELETE_IN_PROGRESS", + "DELETE_FAILED", + "DELETE_COMPLETE", + "UPDATE_IN_PROGRESS", + "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS", + "UPDATE_COMPLETE", + "UPDATE_ROLLBACK_IN_PROGRESS", + "UPDATE_ROLLBACK_FAILED", + "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS", + "UPDATE_ROLLBACK_COMPLETE" + ] + }, + "StackStatusFilter":{ + "type":"list", + "member":{"shape":"StackStatus"} + }, + "StackStatusReason":{"type":"string"}, + "StackSummaries":{ + "type":"list", + "member":{"shape":"StackSummary"} + }, + "StackSummary":{ + "type":"structure", + "required":[ + "StackName", + "CreationTime", + "StackStatus" + ], + "members":{ + "StackId":{"shape":"StackId"}, + "StackName":{"shape":"StackName"}, + "TemplateDescription":{"shape":"TemplateDescription"}, + "CreationTime":{"shape":"CreationTime"}, + "LastUpdatedTime":{"shape":"LastUpdatedTime"}, + "DeletionTime":{"shape":"DeletionTime"}, + "StackStatus":{"shape":"StackStatus"}, + "StackStatusReason":{"shape":"StackStatusReason"} + } + }, + "Stacks":{ + "type":"list", + "member":{"shape":"Stack"} + }, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{"type":"string"}, + "TagValue":{"type":"string"}, + "Tags":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "TemplateBody":{ + "type":"string", + "min":1 + }, + "TemplateDescription":{"type":"string"}, + "TemplateParameter":{ + "type":"structure", + "members":{ + "ParameterKey":{"shape":"ParameterKey"}, + "DefaultValue":{"shape":"ParameterValue"}, + "NoEcho":{"shape":"NoEcho"}, + "Description":{"shape":"Description"} + } + }, + "TemplateParameters":{ + "type":"list", + "member":{"shape":"TemplateParameter"} + }, + "TemplateURL":{ + "type":"string", + "max":1024, + "min":1 + }, + "TimeoutMinutes":{ + "type":"integer", + "min":1 + }, + "Timestamp":{"type":"timestamp"}, + "UpdateStackInput":{ + "type":"structure", + "required":["StackName"], + "members":{ + "StackName":{"shape":"StackName"}, + "TemplateBody":{"shape":"TemplateBody"}, + "TemplateURL":{"shape":"TemplateURL"}, + "UsePreviousTemplate":{"shape":"UsePreviousTemplate"}, + "StackPolicyDuringUpdateBody":{"shape":"StackPolicyDuringUpdateBody"}, + "StackPolicyDuringUpdateURL":{"shape":"StackPolicyDuringUpdateURL"}, + "Parameters":{"shape":"Parameters"}, + "Capabilities":{"shape":"Capabilities"}, + "ResourceTypes":{"shape":"ResourceTypes"}, + "StackPolicyBody":{"shape":"StackPolicyBody"}, + "StackPolicyURL":{"shape":"StackPolicyURL"}, + "NotificationARNs":{"shape":"NotificationARNs"} + } + }, + "UpdateStackOutput":{ + "type":"structure", + "members":{ + "StackId":{"shape":"StackId"} + } + }, + "Url":{"type":"string"}, + "UsePreviousTemplate":{"type":"boolean"}, + "UsePreviousValue":{"type":"boolean"}, + "ValidateTemplateInput":{ + "type":"structure", + "members":{ + "TemplateBody":{"shape":"TemplateBody"}, + "TemplateURL":{"shape":"TemplateURL"} + } + }, + "ValidateTemplateOutput":{ + "type":"structure", + "members":{ + "Parameters":{"shape":"TemplateParameters"}, + "Description":{"shape":"Description"}, + "Capabilities":{"shape":"Capabilities"}, + "CapabilitiesReason":{"shape":"CapabilitiesReason"} + } + }, + "Version":{"type":"string"} + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,783 @@ +{ + "version": "2.0", + "service": "AWS CloudFormation

    AWS CloudFormation enables you to create and manage AWS infrastructure deployments predictably and repeatedly. AWS CloudFormation helps you leverage AWS products such as Amazon EC2, EBS, Amazon SNS, ELB, and Auto Scaling to build highly-reliable, highly scalable, cost effective applications without worrying about creating and configuring the underlying AWS infrastructure.

    With AWS CloudFormation, you declare all of your resources and dependencies in a template file. The template defines a collection of resources as a single unit called a stack. AWS CloudFormation creates and deletes all member resources of the stack together and manages all dependencies between the resources for you.

    For more information about this product, go to the CloudFormation Product Page.

    Amazon CloudFormation makes use of other AWS products. If you need additional technical information about a specific AWS product, you can find the product's technical documentation at http://docs.aws.amazon.com/documentation/.

    ", + "operations": { + "CancelUpdateStack": "

    Cancels an update on the specified stack. If the call completes successfully, the stack rolls back the update and reverts to the previous stack configuration.

    You can cancel only stacks that are in the UPDATE_IN_PROGRESS state.", + "ContinueUpdateRollback": "

    For a specified stack that is in the UPDATE_ROLLBACK_FAILED state, continues rolling it back to the UPDATE_ROLLBACK_COMPLETE state. Depending on the cause of the failure, you can manually fix the error and continue the rollback. By continuing the rollback, you can return your stack to a working state (the UPDATE_ROLLBACK_COMPLETE state), return the stack to its original settings, and then try to update the stack again.

    A stack goes into the UPDATE_ROLLBACK_FAILED state when AWS CloudFormation cannot roll back all changes after a failed stack update. For example, you might have a stack that is rolling back to an old database instance that was deleted outside of AWS CloudFormation. Because AWS CloudFormation doesn't know the database was deleted, it assumes that the database instance still exists and attempts to roll back to it, causing the update rollback to fail.

    ", + "CreateStack": "

    Creates a stack as specified in the template. After the call completes successfully, the stack creation starts. You can check the status of the stack via the DescribeStacks API.

    ", + "DeleteStack": "

    Deletes a specified stack. Once the call completes successfully, stack deletion starts. Deleted stacks do not show up in the DescribeStacks API if the deletion has been completed successfully.

    ", + "DescribeAccountLimits": "

    Retrieves your account's AWS CloudFormation limits, such as the maximum number of stacks that you can create in your account.

    ", + "DescribeStackEvents": "

    Returns all stack related events for a specified stack. For more information about a stack's event history, go to Stacks in the AWS CloudFormation User Guide.

    You can list events for stacks that have failed to create or have been deleted by specifying the unique stack identifier (stack ID).", + "DescribeStackResource": "

    Returns a description of the specified resource in the specified stack.

    For deleted stacks, DescribeStackResource returns resource information for up to 90 days after the stack has been deleted.

    ", + "DescribeStackResources": "

    Returns AWS resource descriptions for running and deleted stacks. If StackName is specified, all the associated resources that are part of the stack are returned. If PhysicalResourceId is specified, the associated resources of the stack that the resource belongs to are returned.

    Only the first 100 resources will be returned. If your stack has more resources than this, you should use ListStackResources instead.

    For deleted stacks, DescribeStackResources returns resource information for up to 90 days after the stack has been deleted.

    You must specify either StackName or PhysicalResourceId, but not both. In addition, you can specify LogicalResourceId to filter the returned result. For more information about resources, the LogicalResourceId and PhysicalResourceId, go to the AWS CloudFormation User Guide.

    A ValidationError is returned if you specify both StackName and PhysicalResourceId in the same request.", + "DescribeStacks": "

    Returns the description for the specified stack; if no stack name was specified, then it returns the description for all the stacks created.

    ", + "EstimateTemplateCost": "

    Returns the estimated monthly cost of a template. The return value is an AWS Simple Monthly Calculator URL with a query string that describes the resources required to run the template.

    ", + "GetStackPolicy": "

    Returns the stack policy for a specified stack. If a stack doesn't have a policy, a null value is returned.

    ", + "GetTemplate": "

    Returns the template body for a specified stack. You can get the template for running or deleted stacks.

    For deleted stacks, GetTemplate returns the template for up to 90 days after the stack has been deleted.

    If the template does not exist, a ValidationError is returned. ", + "GetTemplateSummary": "

    Returns information about a new or existing template. The GetTemplateSummary action is useful for viewing parameter information, such as default parameter values and parameter types, before you create or update a stack.

    You can use the GetTemplateSummary action when you submit a template, or you can get template information for a running or deleted stack.

    For deleted stacks, GetTemplateSummary returns the template information for up to 90 days after the stack has been deleted. If the template does not exist, a ValidationError is returned.

    ", + "ListStackResources": "

    Returns descriptions of all resources of the specified stack.

    For deleted stacks, ListStackResources returns resource information for up to 90 days after the stack has been deleted.

    ", + "ListStacks": "

    Returns the summary information for stacks whose status matches the specified StackStatusFilter. Summary information for stacks that have been deleted is kept for 90 days after the stack is deleted. If no StackStatusFilter is specified, summary information for all stacks is returned (including existing stacks and stacks that have been deleted).

    ", + "SetStackPolicy": "

    Sets a stack policy for a specified stack.

    ", + "SignalResource": "

    Sends a signal to the specified resource with a success or failure status. You can use the SignalResource API in conjunction with a creation policy or update policy. AWS CloudFormation doesn't proceed with a stack creation or update until resources receive the required number of signals or the timeout period is exceeded. The SignalResource API is useful in cases where you want to send signals from anywhere other than an Amazon EC2 instance.

    ", + "UpdateStack": "

    Updates a stack as specified in the template. After the call completes successfully, the stack update starts. You can check the status of the stack via the DescribeStacks action.

    To get a copy of the template for an existing stack, you can use the GetTemplate action.

    Tags that were associated with this stack during creation time will still be associated with the stack after an UpdateStack operation.

    For more information about creating an update template, updating a stack, and monitoring the progress of the update, see Updating a Stack.

    ", + "ValidateTemplate": "

    Validates a specified template.

    " + }, + "shapes": { + "AccountLimit": { + "base": "

    The AccountLimit data type.

    ", + "refs": { + "AccountLimitList$member": null + } + }, + "AccountLimitList": { + "base": null, + "refs": { + "DescribeAccountLimitsOutput$AccountLimits": "

    An account limit structure that contain a list of AWS CloudFormation account limits and their values.

    " + } + }, + "AllowedValue": { + "base": null, + "refs": { + "AllowedValues$member": null + } + }, + "AllowedValues": { + "base": null, + "refs": { + "ParameterConstraints$AllowedValues": "

    A list of values that are permitted for a parameter.

    " + } + }, + "AlreadyExistsException": { + "base": "

    Resource with the name requested already exists.

    ", + "refs": { + } + }, + "CancelUpdateStackInput": { + "base": "

    The input for the CancelUpdateStack action.

    ", + "refs": { + } + }, + "Capabilities": { + "base": null, + "refs": { + "CreateStackInput$Capabilities": "

    A list of capabilities that you must specify before AWS CloudFormation can create or update certain stacks. Some stack templates might include resources that can affect permissions in your AWS account. For those stacks, you must explicitly acknowledge their capabilities by specifying this parameter.

    Currently, the only valid value is CAPABILITY_IAM, which is required for the following resources: AWS::IAM::AccessKey, AWS::IAM::Group, AWS::IAM::InstanceProfile, AWS::IAM::Policy, AWS::IAM::Role, AWS::IAM::User, and AWS::IAM::UserToGroupAddition. If your stack template contains these resources, we recommend that you review any permissions associated with them. If you don't specify this parameter, this action returns an InsufficientCapabilities error.

    ", + "GetTemplateSummaryOutput$Capabilities": "

    The capabilities found within the template. Currently, AWS CloudFormation supports only the CAPABILITY_IAM capability. If your template contains IAM resources, you must specify the CAPABILITY_IAM value for this parameter when you use the CreateStack or UpdateStack actions with your template; otherwise, those actions return an InsufficientCapabilities error.

    ", + "Stack$Capabilities": "

    The capabilities allowed in the stack.

    ", + "UpdateStackInput$Capabilities": "

    A list of capabilities that you must specify before AWS CloudFormation can create or update certain stacks. Some stack templates might include resources that can affect permissions in your AWS account. For those stacks, you must explicitly acknowledge their capabilities by specifying this parameter. Currently, the only valid value is CAPABILITY_IAM, which is required for the following resources: AWS::IAM::AccessKey, AWS::IAM::Group, AWS::IAM::InstanceProfile, AWS::IAM::Policy, AWS::IAM::Role, AWS::IAM::User, and AWS::IAM::UserToGroupAddition. If your stack template contains these resources, we recommend that you review any permissions associated with them. If you don't specify this parameter, this action returns an InsufficientCapabilities error.

    ", + "ValidateTemplateOutput$Capabilities": "

    The capabilities found within the template. Currently, AWS CloudFormation supports only the CAPABILITY_IAM capability. If your template contains IAM resources, you must specify the CAPABILITY_IAM value for this parameter when you use the CreateStack or UpdateStack actions with your template; otherwise, those actions return an InsufficientCapabilities error.

    " + } + }, + "CapabilitiesReason": { + "base": null, + "refs": { + "GetTemplateSummaryOutput$CapabilitiesReason": "

    The list of resources that generated the values in the Capabilities response element.

    ", + "ValidateTemplateOutput$CapabilitiesReason": "

    The list of resources that generated the values in the Capabilities response element.

    " + } + }, + "Capability": { + "base": null, + "refs": { + "Capabilities$member": null + } + }, + "ContinueUpdateRollbackInput": { + "base": "

    The input for the ContinueUpdateRollback action.

    ", + "refs": { + } + }, + "ContinueUpdateRollbackOutput": { + "base": "

    The output for a ContinueUpdateRollback action.

    ", + "refs": { + } + }, + "CreateStackInput": { + "base": "

    The input for CreateStack action.

    ", + "refs": { + } + }, + "CreateStackOutput": { + "base": "

    The output for a CreateStack action.

    ", + "refs": { + } + }, + "CreationTime": { + "base": null, + "refs": { + "Stack$CreationTime": "

    The time at which the stack was created.

    ", + "StackSummary$CreationTime": "

    The time the stack was created.

    " + } + }, + "DeleteStackInput": { + "base": "

    The input for DeleteStack action.

    ", + "refs": { + } + }, + "DeletionTime": { + "base": null, + "refs": { + "StackSummary$DeletionTime": "

    The time the stack was deleted.

    " + } + }, + "DescribeAccountLimitsInput": { + "base": "

    The input for the DescribeAccountLimits action.

    ", + "refs": { + } + }, + "DescribeAccountLimitsOutput": { + "base": "

    The output for the DescribeAccountLimits action.

    ", + "refs": { + } + }, + "DescribeStackEventsInput": { + "base": "

    The input for DescribeStackEvents action.

    ", + "refs": { + } + }, + "DescribeStackEventsOutput": { + "base": "

    The output for a DescribeStackEvents action.

    ", + "refs": { + } + }, + "DescribeStackResourceInput": { + "base": "

    The input for DescribeStackResource action.

    ", + "refs": { + } + }, + "DescribeStackResourceOutput": { + "base": "

    The output for a DescribeStackResource action.

    ", + "refs": { + } + }, + "DescribeStackResourcesInput": { + "base": "

    The input for DescribeStackResources action.

    ", + "refs": { + } + }, + "DescribeStackResourcesOutput": { + "base": "

    The output for a DescribeStackResources action.

    ", + "refs": { + } + }, + "DescribeStacksInput": { + "base": "

    The input for DescribeStacks action.

    ", + "refs": { + } + }, + "DescribeStacksOutput": { + "base": "

    The output for a DescribeStacks action.

    ", + "refs": { + } + }, + "Description": { + "base": null, + "refs": { + "GetTemplateSummaryOutput$Description": "

    The value that is defined in the Description property of the template.

    ", + "Output$Description": "

    User defined description associated with the output.

    ", + "ParameterDeclaration$Description": "

    The description that is associate with the parameter.

    ", + "Stack$Description": "

    A user-defined description associated with the stack.

    ", + "StackResource$Description": "

    User defined description associated with the resource.

    ", + "StackResourceDetail$Description": "

    User defined description associated with the resource.

    ", + "TemplateParameter$Description": "

    User defined description associated with the parameter.

    ", + "ValidateTemplateOutput$Description": "

    The description found within the template.

    " + } + }, + "DisableRollback": { + "base": null, + "refs": { + "CreateStackInput$DisableRollback": "

    Set to true to disable rollback of the stack if stack creation failed. You can specify either DisableRollback or OnFailure, but not both.

    Default: false

    ", + "Stack$DisableRollback": "

    Boolean to enable or disable rollback on stack creation failures:

    • true: disable rollback
    • false: enable rollback

    " + } + }, + "EstimateTemplateCostInput": { + "base": null, + "refs": { + } + }, + "EstimateTemplateCostOutput": { + "base": "

    The output for a EstimateTemplateCost action.

    ", + "refs": { + } + }, + "EventId": { + "base": null, + "refs": { + "StackEvent$EventId": "

    The unique ID of this event.

    " + } + }, + "GetStackPolicyInput": { + "base": "

    The input for the GetStackPolicy action.

    ", + "refs": { + } + }, + "GetStackPolicyOutput": { + "base": "

    The output for the GetStackPolicy action.

    ", + "refs": { + } + }, + "GetTemplateInput": { + "base": "

    The input for a GetTemplate action.

    ", + "refs": { + } + }, + "GetTemplateOutput": { + "base": "

    The output for GetTemplate action.

    ", + "refs": { + } + }, + "GetTemplateSummaryInput": { + "base": "

    The input for the GetTemplateSummary action.

    ", + "refs": { + } + }, + "GetTemplateSummaryOutput": { + "base": "

    The output for the GetTemplateSummary action.

    ", + "refs": { + } + }, + "InsufficientCapabilitiesException": { + "base": "

    The template contains resources with capabilities that were not specified in the Capabilities parameter.

    ", + "refs": { + } + }, + "LastUpdatedTime": { + "base": null, + "refs": { + "Stack$LastUpdatedTime": "

    The time the stack was last updated. This field will only be returned if the stack has been updated at least once.

    ", + "StackSummary$LastUpdatedTime": "

    The time the stack was last updated. This field will only be returned if the stack has been updated at least once.

    " + } + }, + "LimitExceededException": { + "base": "

    Quota for the resource has already been reached.

    ", + "refs": { + } + }, + "LimitName": { + "base": null, + "refs": { + "AccountLimit$Name": "

    The name of the account limit. Currently, the only account limit is StackLimit.

    " + } + }, + "LimitValue": { + "base": null, + "refs": { + "AccountLimit$Value": "

    The value that is associated with the account limit name.

    " + } + }, + "ListStackResourcesInput": { + "base": "

    The input for the ListStackResource action.

    ", + "refs": { + } + }, + "ListStackResourcesOutput": { + "base": "

    The output for a ListStackResources action.

    ", + "refs": { + } + }, + "ListStacksInput": { + "base": "

    The input for ListStacks action.

    ", + "refs": { + } + }, + "ListStacksOutput": { + "base": "

    The output for ListStacks action.

    ", + "refs": { + } + }, + "LogicalResourceId": { + "base": null, + "refs": { + "DescribeStackResourceInput$LogicalResourceId": "

    The logical name of the resource as specified in the template.

    Default: There is no default value.

    ", + "DescribeStackResourcesInput$LogicalResourceId": "

    The logical name of the resource as specified in the template.

    Default: There is no default value.

    ", + "SignalResourceInput$LogicalResourceId": "

    The logical ID of the resource that you want to signal. The logical ID is the name of the resource that given in the template.

    ", + "StackEvent$LogicalResourceId": "

    The logical name of the resource specified in the template.

    ", + "StackResource$LogicalResourceId": "

    The logical name of the resource specified in the template.

    ", + "StackResourceDetail$LogicalResourceId": "

    The logical name of the resource specified in the template.

    ", + "StackResourceSummary$LogicalResourceId": "

    The logical name of the resource specified in the template.

    " + } + }, + "Metadata": { + "base": null, + "refs": { + "GetTemplateSummaryOutput$Metadata": "

    The value that is defined for the Metadata property of the template.

    ", + "StackResourceDetail$Metadata": "

    The JSON format content of the Metadata attribute declared for the resource. For more information, see Metadata Attribute in the AWS CloudFormation User Guide.

    " + } + }, + "NextToken": { + "base": null, + "refs": { + "DescribeAccountLimitsInput$NextToken": "

    A string that identifies the next page of limits that you want to retrieve.

    ", + "DescribeAccountLimitsOutput$NextToken": "

    If the output exceeds 1 MB in size, a string that identifies the next page of limits. If no additional page exists, this value is null.

    ", + "DescribeStackEventsInput$NextToken": "

    A string that identifies the next page of events that you want to retrieve.

    ", + "DescribeStackEventsOutput$NextToken": "

    If the output exceeds 1 MB in size, a string that identifies the next page of events. If no additional page exists, this value is null.

    ", + "DescribeStacksInput$NextToken": "

    A string that identifies the next page of stacks that you want to retrieve.

    ", + "DescribeStacksOutput$NextToken": "

    If the output exceeds 1 MB in size, a string that identifies the next page of stacks. If no additional page exists, this value is null.

    ", + "ListStackResourcesInput$NextToken": "

    A string that identifies the next page of stack resources that you want to retrieve.

    ", + "ListStackResourcesOutput$NextToken": "

    If the output exceeds 1 MB in size, a string that identifies the next page of stack resources. If no additional page exists, this value is null.

    ", + "ListStacksInput$NextToken": "

    A string that identifies the next page of stacks that you want to retrieve.

    ", + "ListStacksOutput$NextToken": "

    If the output exceeds 1 MB in size, a string that identifies the next page of stacks. If no additional page exists, this value is null.

    " + } + }, + "NoEcho": { + "base": null, + "refs": { + "ParameterDeclaration$NoEcho": "

    Flag that indicates whether the parameter value is shown as plain text in logs and in the AWS Management Console.

    ", + "TemplateParameter$NoEcho": "

    Flag indicating whether the parameter should be displayed as plain text in logs and UIs.

    " + } + }, + "NotificationARN": { + "base": null, + "refs": { + "NotificationARNs$member": null + } + }, + "NotificationARNs": { + "base": null, + "refs": { + "CreateStackInput$NotificationARNs": "

    The Simple Notification Service (SNS) topic ARNs to publish stack related events. You can find your SNS topic ARNs using the SNS console or your Command Line Interface (CLI).

    ", + "Stack$NotificationARNs": "

    SNS topic ARNs to which stack related events are published.

    ", + "UpdateStackInput$NotificationARNs": "

    Amazon Simple Notification Service topic Amazon Resource Names (ARNs) that AWS CloudFormation associates with the stack. Specify an empty list to remove all notification topics.

    " + } + }, + "OnFailure": { + "base": null, + "refs": { + "CreateStackInput$OnFailure": "

    Determines what action will be taken if stack creation fails. This must be one of: DO_NOTHING, ROLLBACK, or DELETE. You can specify either OnFailure or DisableRollback, but not both.

    Default: ROLLBACK

    " + } + }, + "Output": { + "base": "

    The Output data type.

    ", + "refs": { + "Outputs$member": null + } + }, + "OutputKey": { + "base": null, + "refs": { + "Output$OutputKey": "

    The key associated with the output.

    " + } + }, + "OutputValue": { + "base": null, + "refs": { + "Output$OutputValue": "

    The value associated with the output.

    " + } + }, + "Outputs": { + "base": null, + "refs": { + "Stack$Outputs": "

    A list of output structures.

    " + } + }, + "Parameter": { + "base": "

    The Parameter data type.

    ", + "refs": { + "Parameters$member": null + } + }, + "ParameterConstraints": { + "base": "

    A set of criteria that AWS CloudFormation uses to validate parameter values. Although other constraints might be defined in the stack template, AWS CloudFormation returns only the AllowedValues property.

    ", + "refs": { + "ParameterDeclaration$ParameterConstraints": "

    The criteria that AWS CloudFormation uses to validate parameter values.

    " + } + }, + "ParameterDeclaration": { + "base": "

    The ParameterDeclaration data type.

    ", + "refs": { + "ParameterDeclarations$member": null + } + }, + "ParameterDeclarations": { + "base": null, + "refs": { + "GetTemplateSummaryOutput$Parameters": "

    A list of parameter declarations that describe various properties for each parameter.

    " + } + }, + "ParameterKey": { + "base": null, + "refs": { + "Parameter$ParameterKey": "

    The key associated with the parameter. If you don't specify a key and value for a particular parameter, AWS CloudFormation uses the default value that is specified in your template.

    ", + "ParameterDeclaration$ParameterKey": "

    The name that is associated with the parameter.

    ", + "TemplateParameter$ParameterKey": "

    The name associated with the parameter.

    " + } + }, + "ParameterType": { + "base": null, + "refs": { + "ParameterDeclaration$ParameterType": "

    The type of parameter.

    " + } + }, + "ParameterValue": { + "base": null, + "refs": { + "Parameter$ParameterValue": "

    The value associated with the parameter.

    ", + "ParameterDeclaration$DefaultValue": "

    The default value of the parameter.

    ", + "TemplateParameter$DefaultValue": "

    The default value associated with the parameter.

    " + } + }, + "Parameters": { + "base": null, + "refs": { + "CreateStackInput$Parameters": "

    A list of Parameter structures that specify input parameters for the stack. For more information, see the Parameter data type.

    ", + "EstimateTemplateCostInput$Parameters": "

    A list of Parameter structures that specify input parameters.

    ", + "Stack$Parameters": "

    A list of Parameter structures.

    ", + "UpdateStackInput$Parameters": "

    A list of Parameter structures that specify input parameters for the stack. For more information, see the Parameter data type.

    " + } + }, + "PhysicalResourceId": { + "base": null, + "refs": { + "DescribeStackResourcesInput$PhysicalResourceId": "

    The name or unique identifier that corresponds to a physical instance ID of a resource supported by AWS CloudFormation.

    For example, for an Amazon Elastic Compute Cloud (EC2) instance, PhysicalResourceId corresponds to the InstanceId. You can pass the EC2 InstanceId to DescribeStackResources to find which stack the instance belongs to and what other resources are part of the stack.

    Required: Conditional. If you do not specify PhysicalResourceId, you must specify StackName.

    Default: There is no default value.

    ", + "StackEvent$PhysicalResourceId": "

    The name or unique identifier associated with the physical instance of the resource.

    ", + "StackResource$PhysicalResourceId": "

    The name or unique identifier that corresponds to a physical instance ID of a resource supported by AWS CloudFormation.

    ", + "StackResourceDetail$PhysicalResourceId": "

    The name or unique identifier that corresponds to a physical instance ID of a resource supported by AWS CloudFormation.

    ", + "StackResourceSummary$PhysicalResourceId": "

    The name or unique identifier that corresponds to a physical instance ID of the resource.

    " + } + }, + "ResourceProperties": { + "base": null, + "refs": { + "StackEvent$ResourceProperties": "

    BLOB of the properties used to create the resource.

    " + } + }, + "ResourceSignalStatus": { + "base": null, + "refs": { + "SignalResourceInput$Status": "

    The status of the signal, which is either success or failure. A failure signal causes AWS CloudFormation to immediately fail the stack creation or update.

    " + } + }, + "ResourceSignalUniqueId": { + "base": null, + "refs": { + "SignalResourceInput$UniqueId": "

    A unique ID of the signal. When you signal Amazon EC2 instances or Auto Scaling groups, specify the instance ID that you are signaling as the unique ID. If you send multiple signals to a single resource (such as signaling a wait condition), each signal requires a different unique ID.

    " + } + }, + "ResourceStatus": { + "base": null, + "refs": { + "StackEvent$ResourceStatus": "

    Current status of the resource.

    ", + "StackResource$ResourceStatus": "

    Current status of the resource.

    ", + "StackResourceDetail$ResourceStatus": "

    Current status of the resource.

    ", + "StackResourceSummary$ResourceStatus": "

    Current status of the resource.

    " + } + }, + "ResourceStatusReason": { + "base": null, + "refs": { + "StackEvent$ResourceStatusReason": "

    Success/failure message associated with the resource.

    ", + "StackResource$ResourceStatusReason": "

    Success/failure message associated with the resource.

    ", + "StackResourceDetail$ResourceStatusReason": "

    Success/failure message associated with the resource.

    ", + "StackResourceSummary$ResourceStatusReason": "

    Success/failure message associated with the resource.

    " + } + }, + "ResourceType": { + "base": null, + "refs": { + "ResourceTypes$member": null, + "StackEvent$ResourceType": "

    Type of resource. (For more information, go to AWS Resource Types Reference in the AWS CloudFormation User Guide.)

    ", + "StackResource$ResourceType": "

    Type of resource. (For more information, go to AWS Resource Types Reference in the AWS CloudFormation User Guide.)

    ", + "StackResourceDetail$ResourceType": "

    Type of resource. ((For more information, go to AWS Resource Types Reference in the AWS CloudFormation User Guide.)

    ", + "StackResourceSummary$ResourceType": "

    Type of resource. (For more information, go to AWS Resource Types Reference in the AWS CloudFormation User Guide.)

    " + } + }, + "ResourceTypes": { + "base": null, + "refs": { + "CreateStackInput$ResourceTypes": "

    The template resource types that you have permissions to work with for this create stack action, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. Use the following syntax to describe template resource types: AWS::* (for all AWS resource), Custom::* (for all custom resources), Custom::logical_ID (for a specific custom resource), AWS::service_name::* (for all resources of a particular AWS service), and AWS::service_name::resource_logical_ID (for a specific AWS resource).

    If the list of resource types doesn't include a resource that you're creating, the stack creation fails. By default, AWS CloudFormation grants permissions to all resource types. AWS Identity and Access Management (IAM) uses this parameter for AWS CloudFormation-specific condition keys in IAM policies. For more information, see Controlling Access with AWS Identity and Access Management.

    ", + "GetTemplateSummaryOutput$ResourceTypes": "

    A list of all the template resource types that are defined in the template, such as AWS::EC2::Instance, AWS::Dynamo::Table, and Custom::MyCustomInstance.

    ", + "UpdateStackInput$ResourceTypes": "

    The template resource types that you have permissions to work with for this update stack action, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance.

    If the list of resource types doesn't include a resource that you're updating, the stack update fails. By default, AWS CloudFormation grants permissions to all resource types. AWS Identity and Access Management (IAM) uses this parameter for AWS CloudFormation-specific condition keys in IAM policies. For more information, see Controlling Access with AWS Identity and Access Management.

    " + } + }, + "SetStackPolicyInput": { + "base": "

    The input for the SetStackPolicy action.

    ", + "refs": { + } + }, + "SignalResourceInput": { + "base": "

    The input for the SignalResource action.

    ", + "refs": { + } + }, + "Stack": { + "base": "

    The Stack data type.

    ", + "refs": { + "Stacks$member": null + } + }, + "StackEvent": { + "base": "

    The StackEvent data type.

    ", + "refs": { + "StackEvents$member": null + } + }, + "StackEvents": { + "base": null, + "refs": { + "DescribeStackEventsOutput$StackEvents": "

    A list of StackEvents structures.

    " + } + }, + "StackId": { + "base": null, + "refs": { + "CreateStackOutput$StackId": "

    Unique identifier of the stack.

    ", + "Stack$StackId": "

    Unique identifier of the stack.

    ", + "StackEvent$StackId": "

    The unique ID name of the instance of the stack.

    ", + "StackResource$StackId": "

    Unique identifier of the stack.

    ", + "StackResourceDetail$StackId": "

    Unique identifier of the stack.

    ", + "StackSummary$StackId": "

    Unique stack identifier.

    ", + "UpdateStackOutput$StackId": "

    Unique identifier of the stack.

    " + } + }, + "StackName": { + "base": null, + "refs": { + "CancelUpdateStackInput$StackName": "

    The name or the unique stack ID that is associated with the stack.

    ", + "CreateStackInput$StackName": "

    The name that is associated with the stack. The name must be unique in the region in which you are creating the stack.

    A stack name can contain only alphanumeric characters (case sensitive) and hyphens. It must start with an alphabetic character and cannot be longer than 128 characters.", + "DeleteStackInput$StackName": "

    The name or the unique stack ID that is associated with the stack.

    ", + "DescribeStackEventsInput$StackName": "

    The name or the unique stack ID that is associated with the stack, which are not always interchangeable:

    • Running stacks: You can specify either the stack's name or its unique stack ID.
    • Deleted stacks: You must specify the unique stack ID.

    Default: There is no default value.

    ", + "DescribeStackResourceInput$StackName": "

    The name or the unique stack ID that is associated with the stack, which are not always interchangeable:

    • Running stacks: You can specify either the stack's name or its unique stack ID.
    • Deleted stacks: You must specify the unique stack ID.

    Default: There is no default value.

    ", + "DescribeStackResourcesInput$StackName": "

    The name or the unique stack ID that is associated with the stack, which are not always interchangeable:

    • Running stacks: You can specify either the stack's name or its unique stack ID.
    • Deleted stacks: You must specify the unique stack ID.

    Default: There is no default value.

    Required: Conditional. If you do not specify StackName, you must specify PhysicalResourceId.

    ", + "DescribeStacksInput$StackName": "

    The name or the unique stack ID that is associated with the stack, which are not always interchangeable:

    • Running stacks: You can specify either the stack's name or its unique stack ID.
    • Deleted stacks: You must specify the unique stack ID.

    Default: There is no default value.

    ", + "GetStackPolicyInput$StackName": "

    The name or unique stack ID that is associated with the stack whose policy you want to get.

    ", + "GetTemplateInput$StackName": "

    The name or the unique stack ID that is associated with the stack, which are not always interchangeable:

    • Running stacks: You can specify either the stack's name or its unique stack ID.
    • Deleted stacks: You must specify the unique stack ID.

    Default: There is no default value.

    ", + "ListStackResourcesInput$StackName": "

    The name or the unique stack ID that is associated with the stack, which are not always interchangeable:

    • Running stacks: You can specify either the stack's name or its unique stack ID.
    • Deleted stacks: You must specify the unique stack ID.

    Default: There is no default value.

    ", + "SetStackPolicyInput$StackName": "

    The name or unique stack ID that you want to associate a policy with.

    ", + "Stack$StackName": "

    The name associated with the stack.

    ", + "StackEvent$StackName": "

    The name associated with a stack.

    ", + "StackResource$StackName": "

    The name associated with the stack.

    ", + "StackResourceDetail$StackName": "

    The name associated with the stack.

    ", + "StackSummary$StackName": "

    The name associated with the stack.

    ", + "UpdateStackInput$StackName": "

    The name or unique stack ID of the stack to update.

    " + } + }, + "StackNameOrId": { + "base": null, + "refs": { + "ContinueUpdateRollbackInput$StackName": "

    The name or the unique ID of the stack that you want to continue rolling back.

    ", + "GetTemplateSummaryInput$StackName": "

    The name or the stack ID that is associated with the stack, which are not always interchangeable. For running stacks, you can specify either the stack's name or its unique stack ID. For deleted stack, you must specify the unique stack ID.

    Conditional: You must specify only one of the following parameters: StackName, TemplateBody, or TemplateURL.

    ", + "SignalResourceInput$StackName": "

    The stack name or unique stack ID that includes the resource that you want to signal.

    " + } + }, + "StackPolicyBody": { + "base": null, + "refs": { + "CreateStackInput$StackPolicyBody": "

    Structure containing the stack policy body. For more information, go to Prevent Updates to Stack Resources in the AWS CloudFormation User Guide. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

    ", + "GetStackPolicyOutput$StackPolicyBody": "

    Structure containing the stack policy body. (For more information, go to Prevent Updates to Stack Resources in the AWS CloudFormation User Guide.)

    ", + "SetStackPolicyInput$StackPolicyBody": "

    Structure containing the stack policy body. For more information, go to Prevent Updates to Stack Resources in the AWS CloudFormation User Guide. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

    ", + "UpdateStackInput$StackPolicyBody": "

    Structure containing a new stack policy body. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

    You might update the stack policy, for example, in order to protect a new resource that you created during a stack update. If you do not specify a stack policy, the current policy that is associated with the stack is unchanged.

    " + } + }, + "StackPolicyDuringUpdateBody": { + "base": null, + "refs": { + "UpdateStackInput$StackPolicyDuringUpdateBody": "

    Structure containing the temporary overriding stack policy body. You can specify either the StackPolicyDuringUpdateBody or the StackPolicyDuringUpdateURL parameter, but not both.

    If you want to update protected resources, specify a temporary overriding stack policy during this update. If you do not specify a stack policy, the current policy that is associated with the stack will be used.

    " + } + }, + "StackPolicyDuringUpdateURL": { + "base": null, + "refs": { + "UpdateStackInput$StackPolicyDuringUpdateURL": "

    Location of a file containing the temporary overriding stack policy. The URL must point to a policy (max size: 16KB) located in an S3 bucket in the same region as the stack. You can specify either the StackPolicyDuringUpdateBody or the StackPolicyDuringUpdateURL parameter, but not both.

    If you want to update protected resources, specify a temporary overriding stack policy during this update. If you do not specify a stack policy, the current policy that is associated with the stack will be used.

    " + } + }, + "StackPolicyURL": { + "base": null, + "refs": { + "CreateStackInput$StackPolicyURL": "

    Location of a file containing the stack policy. The URL must point to a policy (max size: 16KB) located in an S3 bucket in the same region as the stack. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

    ", + "SetStackPolicyInput$StackPolicyURL": "

    Location of a file containing the stack policy. The URL must point to a policy (max size: 16KB) located in an S3 bucket in the same region as the stack. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

    ", + "UpdateStackInput$StackPolicyURL": "

    Location of a file containing the updated stack policy. The URL must point to a policy (max size: 16KB) located in an S3 bucket in the same region as the stack. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

    You might update the stack policy, for example, in order to protect a new resource that you created during a stack update. If you do not specify a stack policy, the current policy that is associated with the stack is unchanged.

    " + } + }, + "StackResource": { + "base": "

    The StackResource data type.

    ", + "refs": { + "StackResources$member": null + } + }, + "StackResourceDetail": { + "base": "

    Contains detailed information about the specified stack resource.

    ", + "refs": { + "DescribeStackResourceOutput$StackResourceDetail": "

    A StackResourceDetail structure containing the description of the specified resource in the specified stack.

    " + } + }, + "StackResourceSummaries": { + "base": null, + "refs": { + "ListStackResourcesOutput$StackResourceSummaries": "

    A list of StackResourceSummary structures.

    " + } + }, + "StackResourceSummary": { + "base": "

    Contains high-level information about the specified stack resource.

    ", + "refs": { + "StackResourceSummaries$member": null + } + }, + "StackResources": { + "base": null, + "refs": { + "DescribeStackResourcesOutput$StackResources": "

    A list of StackResource structures.

    " + } + }, + "StackStatus": { + "base": null, + "refs": { + "Stack$StackStatus": "

    Current status of the stack.

    ", + "StackStatusFilter$member": null, + "StackSummary$StackStatus": "

    The current status of the stack.

    " + } + }, + "StackStatusFilter": { + "base": null, + "refs": { + "ListStacksInput$StackStatusFilter": "

    Stack status to use as a filter. Specify one or more stack status codes to list only stacks with the specified status codes. For a complete list of stack status codes, see the StackStatus parameter of the Stack data type.

    " + } + }, + "StackStatusReason": { + "base": null, + "refs": { + "Stack$StackStatusReason": "

    Success/failure message associated with the stack status.

    ", + "StackSummary$StackStatusReason": "

    Success/Failure message associated with the stack status.

    " + } + }, + "StackSummaries": { + "base": null, + "refs": { + "ListStacksOutput$StackSummaries": "

    A list of StackSummary structures containing information about the specified stacks.

    " + } + }, + "StackSummary": { + "base": "

    The StackSummary Data Type

    ", + "refs": { + "StackSummaries$member": null + } + }, + "Stacks": { + "base": null, + "refs": { + "DescribeStacksOutput$Stacks": "

    A list of stack structures.

    " + } + }, + "Tag": { + "base": "

    The Tag type is used by CreateStack in the Tags parameter. It allows you to specify a key-value pair that can be used to store information related to cost allocation for an AWS CloudFormation stack.

    ", + "refs": { + "Tags$member": null + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": "

    Required. A string used to identify this tag. You can specify a maximum of 128 characters for a tag key. Tags owned by Amazon Web Services (AWS) have the reserved prefix: aws:.

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": "

    Required. A string containing the value for this tag. You can specify a maximum of 256 characters for a tag value.

    " + } + }, + "Tags": { + "base": null, + "refs": { + "CreateStackInput$Tags": "

    Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to the resources created in the stack. A maximum number of 10 tags can be specified.

    ", + "Stack$Tags": "

    A list of Tags that specify cost allocation information for the stack.

    " + } + }, + "TemplateBody": { + "base": null, + "refs": { + "CreateStackInput$TemplateBody": "

    Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.

    Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

    ", + "EstimateTemplateCostInput$TemplateBody": "

    Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. (For more information, go to Template Anatomy in the AWS CloudFormation User Guide.)

    Conditional: You must pass TemplateBody or TemplateURL. If both are passed, only TemplateBody is used.

    ", + "GetTemplateOutput$TemplateBody": "

    Structure containing the template body. (For more information, go to Template Anatomy in the AWS CloudFormation User Guide.)

    ", + "GetTemplateSummaryInput$TemplateBody": "

    Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information about templates, see Template Anatomy in the AWS CloudFormation User Guide.

    Conditional: You must specify only one of the following parameters: StackName, TemplateBody, or TemplateURL.

    ", + "UpdateStackInput$TemplateBody": "

    Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. (For more information, go to Template Anatomy in the AWS CloudFormation User Guide.)

    Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

    ", + "ValidateTemplateInput$TemplateBody": "

    Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.

    Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used.

    " + } + }, + "TemplateDescription": { + "base": null, + "refs": { + "StackSummary$TemplateDescription": "

    The template description of the template used to create the stack.

    " + } + }, + "TemplateParameter": { + "base": "

    The TemplateParameter data type.

    ", + "refs": { + "TemplateParameters$member": null + } + }, + "TemplateParameters": { + "base": null, + "refs": { + "ValidateTemplateOutput$Parameters": "

    A list of TemplateParameter structures.

    " + } + }, + "TemplateURL": { + "base": null, + "refs": { + "CreateStackInput$TemplateURL": "

    Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket. For more information, go to the Template Anatomy in the AWS CloudFormation User Guide.

    Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

    ", + "EstimateTemplateCostInput$TemplateURL": "

    Location of file containing the template body. The URL must point to a template that is located in an Amazon S3 bucket. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.

    Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used.

    ", + "GetTemplateSummaryInput$TemplateURL": "

    Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket. For more information about templates, see Template Anatomy in the AWS CloudFormation User Guide.

    Conditional: You must specify only one of the following parameters: StackName, TemplateBody, or TemplateURL.

    ", + "UpdateStackInput$TemplateURL": "

    Location of file containing the template body. The URL must point to a template that is located in an Amazon S3 bucket. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.

    Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

    ", + "ValidateTemplateInput$TemplateURL": "

    Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.

    Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used.

    " + } + }, + "TimeoutMinutes": { + "base": null, + "refs": { + "CreateStackInput$TimeoutInMinutes": "

    The amount of time that can pass before the stack status becomes CREATE_FAILED; if DisableRollback is not set or is set to false, the stack will be rolled back.

    ", + "Stack$TimeoutInMinutes": "

    The amount of time within which stack creation should complete.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "StackEvent$Timestamp": "

    Time the status was updated.

    ", + "StackResource$Timestamp": "

    Time the status was updated.

    ", + "StackResourceDetail$LastUpdatedTimestamp": "

    Time the status was updated.

    ", + "StackResourceSummary$LastUpdatedTimestamp": "

    Time the status was updated.

    " + } + }, + "UpdateStackInput": { + "base": "

    The input for UpdateStack action.

    ", + "refs": { + } + }, + "UpdateStackOutput": { + "base": "

    The output for a UpdateStack action.

    ", + "refs": { + } + }, + "Url": { + "base": null, + "refs": { + "EstimateTemplateCostOutput$Url": "

    An AWS Simple Monthly Calculator URL with a query string that describes the resources required to run the template.

    " + } + }, + "UsePreviousTemplate": { + "base": null, + "refs": { + "UpdateStackInput$UsePreviousTemplate": "

    Reuse the existing template that is associated with the stack that you are updating.

    " + } + }, + "UsePreviousValue": { + "base": null, + "refs": { + "Parameter$UsePreviousValue": "

    During a stack update, use the existing parameter value that the stack is using for a given parameter key. If you specify true, do not specify a parameter value.

    " + } + }, + "ValidateTemplateInput": { + "base": "

    The input for ValidateTemplate action.

    ", + "refs": { + } + }, + "ValidateTemplateOutput": { + "base": "

    The output for ValidateTemplate action.

    ", + "refs": { + } + }, + "Version": { + "base": null, + "refs": { + "GetTemplateSummaryOutput$Version": "

    The AWS template format version, which identifies the capabilities of the template.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,27 @@ +{ + "pagination": { + "DescribeStackEvents": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "StackEvents" + }, + "DescribeStackResources": { + "result_key": "StackResources" + }, + "DescribeStacks": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Stacks" + }, + "ListStackResources": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "StackResourceSummaries" + }, + "ListStacks": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "StackSummaries" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/waiters-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/waiters-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/waiters-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/waiters-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,70 @@ +{ + "version": 2, + "waiters": { + "StackCreateComplete": { + "delay": 30, + "operation": "DescribeStacks", + "maxAttempts": 50, + "description": "Wait until stack status is CREATE_COMPLETE.", + "acceptors": [ + { + "expected": "CREATE_COMPLETE", + "matcher": "pathAll", + "state": "success", + "argument": "Stacks[].StackStatus" + }, + { + "expected": "CREATE_FAILED", + "matcher": "pathAny", + "state": "failure", + "argument": "Stacks[].StackStatus" + } + ] + }, + "StackDeleteComplete": { + "delay": 30, + "operation": "DescribeStacks", + "maxAttempts": 25, + "description": "Wait until stack status is DELETE_COMPLETE.", + "acceptors": [ + { + "expected": "DELETE_COMPLETE", + "matcher": "pathAll", + "state": "success", + "argument": "Stacks[].StackStatus" + }, + { + "expected": "ValidationError", + "matcher": "error", + "state": "success" + }, + { + "expected": "DELETE_FAILED", + "matcher": "pathAny", + "state": "failure", + "argument": "Stacks[].StackStatus" + } + ] + }, + "StackUpdateComplete": { + "delay": 30, + "operation": "DescribeStacks", + "maxAttempts": 5, + "description": "Wait until stack status is UPDATE_COMPLETE.", + "acceptors": [ + { + "expected": "UPDATE_COMPLETE", + "matcher": "pathAll", + "state": "success", + "argument": "Stacks[].StackStatus" + }, + { + "expected": "UPDATE_FAILED", + "matcher": "pathAny", + "state": "failure", + "argument": "Stacks[].StackStatus" + } + ] + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2651 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-04-17", + "endpointPrefix":"cloudfront", + "globalEndpoint":"cloudfront.amazonaws.com", + "serviceAbbreviation":"CloudFront", + "serviceFullName":"Amazon CloudFront", + "signatureVersion":"v4", + "protocol":"rest-xml" + }, + "operations":{ + "CreateCloudFrontOriginAccessIdentity":{ + "name":"CreateCloudFrontOriginAccessIdentity2015_04_17", + "http":{ + "method":"POST", + "requestUri":"/2015-04-17/origin-access-identity/cloudfront", + "responseCode":201 + }, + "input":{"shape":"CreateCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"CreateCloudFrontOriginAccessIdentityResult"}, + "errors":[ + { + "shape":"CloudFrontOriginAccessIdentityAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"MissingBody", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyCloudFrontOriginAccessIdentities", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InconsistentQuantities", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "CreateDistribution":{ + "name":"CreateDistribution2015_04_17", + "http":{ + "method":"POST", + "requestUri":"/2015-04-17/distribution", + "responseCode":201 + }, + "input":{"shape":"CreateDistributionRequest"}, + "output":{"shape":"CreateDistributionResult"}, + "errors":[ + { + "shape":"CNAMEAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"DistributionAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"InvalidOrigin", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidOriginAccessIdentity", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"TooManyTrustedSigners", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TrustedSignerDoesNotExist", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidViewerCertificate", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidMinimumProtocolVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingBody", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyDistributionCNAMEs", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyDistributions", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidDefaultRootObject", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidRelativePath", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidErrorCode", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidResponseCode", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidRequiredProtocol", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchOrigin", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyOrigins", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyCacheBehaviors", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyCookieNamesInWhiteList", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidForwardCookies", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyHeadersInForwardedValues", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidHeadersForS3Origin", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InconsistentQuantities", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyCertificates", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidLocationCode", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidGeoRestrictionParameter", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidProtocolSettings", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidTTLOrder", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "CreateInvalidation":{ + "name":"CreateInvalidation2015_04_17", + "http":{ + "method":"POST", + "requestUri":"/2015-04-17/distribution/{DistributionId}/invalidation", + "responseCode":201 + }, + "input":{"shape":"CreateInvalidationRequest"}, + "output":{"shape":"CreateInvalidationResult"}, + "errors":[ + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"MissingBody", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"BatchTooLarge", + "error":{"httpStatusCode":413}, + "exception":true + }, + { + "shape":"TooManyInvalidationsInProgress", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InconsistentQuantities", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "CreateStreamingDistribution":{ + "name":"CreateStreamingDistribution2015_04_17", + "http":{ + "method":"POST", + "requestUri":"/2015-04-17/streaming-distribution", + "responseCode":201 + }, + "input":{"shape":"CreateStreamingDistributionRequest"}, + "output":{"shape":"CreateStreamingDistributionResult"}, + "errors":[ + { + "shape":"CNAMEAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"StreamingDistributionAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"InvalidOrigin", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidOriginAccessIdentity", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"TooManyTrustedSigners", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TrustedSignerDoesNotExist", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingBody", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyStreamingDistributionCNAMEs", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyStreamingDistributions", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InconsistentQuantities", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "DeleteCloudFrontOriginAccessIdentity":{ + "name":"DeleteCloudFrontOriginAccessIdentity2015_04_17", + "http":{ + "method":"DELETE", + "requestUri":"/2015-04-17/origin-access-identity/cloudfront/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteCloudFrontOriginAccessIdentityRequest"}, + "errors":[ + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InvalidIfMatchVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchCloudFrontOriginAccessIdentity", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"PreconditionFailed", + "error":{"httpStatusCode":412}, + "exception":true + }, + { + "shape":"CloudFrontOriginAccessIdentityInUse", + "error":{"httpStatusCode":409}, + "exception":true + } + ] + }, + "DeleteDistribution":{ + "name":"DeleteDistribution2015_04_17", + "http":{ + "method":"DELETE", + "requestUri":"/2015-04-17/distribution/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteDistributionRequest"}, + "errors":[ + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"DistributionNotDisabled", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"InvalidIfMatchVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"PreconditionFailed", + "error":{"httpStatusCode":412}, + "exception":true + } + ] + }, + "DeleteStreamingDistribution":{ + "name":"DeleteStreamingDistribution2015_04_17", + "http":{ + "method":"DELETE", + "requestUri":"/2015-04-17/streaming-distribution/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteStreamingDistributionRequest"}, + "errors":[ + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"StreamingDistributionNotDisabled", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"InvalidIfMatchVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchStreamingDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"PreconditionFailed", + "error":{"httpStatusCode":412}, + "exception":true + } + ] + }, + "GetCloudFrontOriginAccessIdentity":{ + "name":"GetCloudFrontOriginAccessIdentity2015_04_17", + "http":{ + "method":"GET", + "requestUri":"/2015-04-17/origin-access-identity/cloudfront/{Id}" + }, + "input":{"shape":"GetCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"GetCloudFrontOriginAccessIdentityResult"}, + "errors":[ + { + "shape":"NoSuchCloudFrontOriginAccessIdentity", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "GetCloudFrontOriginAccessIdentityConfig":{ + "name":"GetCloudFrontOriginAccessIdentityConfig2015_04_17", + "http":{ + "method":"GET", + "requestUri":"/2015-04-17/origin-access-identity/cloudfront/{Id}/config" + }, + "input":{"shape":"GetCloudFrontOriginAccessIdentityConfigRequest"}, + "output":{"shape":"GetCloudFrontOriginAccessIdentityConfigResult"}, + "errors":[ + { + "shape":"NoSuchCloudFrontOriginAccessIdentity", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "GetDistribution":{ + "name":"GetDistribution2015_04_17", + "http":{ + "method":"GET", + "requestUri":"/2015-04-17/distribution/{Id}" + }, + "input":{"shape":"GetDistributionRequest"}, + "output":{"shape":"GetDistributionResult"}, + "errors":[ + { + "shape":"NoSuchDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "GetDistributionConfig":{ + "name":"GetDistributionConfig2015_04_17", + "http":{ + "method":"GET", + "requestUri":"/2015-04-17/distribution/{Id}/config" + }, + "input":{"shape":"GetDistributionConfigRequest"}, + "output":{"shape":"GetDistributionConfigResult"}, + "errors":[ + { + "shape":"NoSuchDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "GetInvalidation":{ + "name":"GetInvalidation2015_04_17", + "http":{ + "method":"GET", + "requestUri":"/2015-04-17/distribution/{DistributionId}/invalidation/{Id}" + }, + "input":{"shape":"GetInvalidationRequest"}, + "output":{"shape":"GetInvalidationResult"}, + "errors":[ + { + "shape":"NoSuchInvalidation", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"NoSuchDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "GetStreamingDistribution":{ + "name":"GetStreamingDistribution2015_04_17", + "http":{ + "method":"GET", + "requestUri":"/2015-04-17/streaming-distribution/{Id}" + }, + "input":{"shape":"GetStreamingDistributionRequest"}, + "output":{"shape":"GetStreamingDistributionResult"}, + "errors":[ + { + "shape":"NoSuchStreamingDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "GetStreamingDistributionConfig":{ + "name":"GetStreamingDistributionConfig2015_04_17", + "http":{ + "method":"GET", + "requestUri":"/2015-04-17/streaming-distribution/{Id}/config" + }, + "input":{"shape":"GetStreamingDistributionConfigRequest"}, + "output":{"shape":"GetStreamingDistributionConfigResult"}, + "errors":[ + { + "shape":"NoSuchStreamingDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "ListCloudFrontOriginAccessIdentities":{ + "name":"ListCloudFrontOriginAccessIdentities2015_04_17", + "http":{ + "method":"GET", + "requestUri":"/2015-04-17/origin-access-identity/cloudfront" + }, + "input":{"shape":"ListCloudFrontOriginAccessIdentitiesRequest"}, + "output":{"shape":"ListCloudFrontOriginAccessIdentitiesResult"}, + "errors":[ + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "ListDistributions":{ + "name":"ListDistributions2015_04_17", + "http":{ + "method":"GET", + "requestUri":"/2015-04-17/distribution" + }, + "input":{"shape":"ListDistributionsRequest"}, + "output":{"shape":"ListDistributionsResult"}, + "errors":[ + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "ListInvalidations":{ + "name":"ListInvalidations2015_04_17", + "http":{ + "method":"GET", + "requestUri":"/2015-04-17/distribution/{DistributionId}/invalidation" + }, + "input":{"shape":"ListInvalidationsRequest"}, + "output":{"shape":"ListInvalidationsResult"}, + "errors":[ + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "ListStreamingDistributions":{ + "name":"ListStreamingDistributions2015_04_17", + "http":{ + "method":"GET", + "requestUri":"/2015-04-17/streaming-distribution" + }, + "input":{"shape":"ListStreamingDistributionsRequest"}, + "output":{"shape":"ListStreamingDistributionsResult"}, + "errors":[ + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "UpdateCloudFrontOriginAccessIdentity":{ + "name":"UpdateCloudFrontOriginAccessIdentity2015_04_17", + "http":{ + "method":"PUT", + "requestUri":"/2015-04-17/origin-access-identity/cloudfront/{Id}/config" + }, + "input":{"shape":"UpdateCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"UpdateCloudFrontOriginAccessIdentityResult"}, + "errors":[ + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"IllegalUpdate", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidIfMatchVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingBody", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchCloudFrontOriginAccessIdentity", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"PreconditionFailed", + "error":{"httpStatusCode":412}, + "exception":true + }, + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InconsistentQuantities", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "UpdateDistribution":{ + "name":"UpdateDistribution2015_04_17", + "http":{ + "method":"PUT", + "requestUri":"/2015-04-17/distribution/{Id}/config" + }, + "input":{"shape":"UpdateDistributionRequest"}, + "output":{"shape":"UpdateDistributionResult"}, + "errors":[ + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"CNAMEAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"IllegalUpdate", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidIfMatchVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingBody", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"PreconditionFailed", + "error":{"httpStatusCode":412}, + "exception":true + }, + { + "shape":"TooManyDistributionCNAMEs", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidDefaultRootObject", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidRelativePath", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidErrorCode", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidResponseCode", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidOriginAccessIdentity", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyTrustedSigners", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TrustedSignerDoesNotExist", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidViewerCertificate", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidMinimumProtocolVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidRequiredProtocol", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchOrigin", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyOrigins", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyCacheBehaviors", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyCookieNamesInWhiteList", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidForwardCookies", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyHeadersInForwardedValues", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidHeadersForS3Origin", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InconsistentQuantities", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyCertificates", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidLocationCode", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidGeoRestrictionParameter", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidTTLOrder", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "UpdateStreamingDistribution":{ + "name":"UpdateStreamingDistribution2015_04_17", + "http":{ + "method":"PUT", + "requestUri":"/2015-04-17/streaming-distribution/{Id}/config" + }, + "input":{"shape":"UpdateStreamingDistributionRequest"}, + "output":{"shape":"UpdateStreamingDistributionResult"}, + "errors":[ + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"CNAMEAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"IllegalUpdate", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidIfMatchVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingBody", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchStreamingDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"PreconditionFailed", + "error":{"httpStatusCode":412}, + "exception":true + }, + { + "shape":"TooManyStreamingDistributionCNAMEs", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidOriginAccessIdentity", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyTrustedSigners", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TrustedSignerDoesNotExist", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InconsistentQuantities", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + } + }, + "shapes":{ + "AccessDenied":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":403}, + "exception":true + }, + "ActiveTrustedSigners":{ + "type":"structure", + "required":[ + "Enabled", + "Quantity" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"SignerList"} + } + }, + "AliasList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"CNAME" + } + }, + "Aliases":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"AliasList"} + } + }, + "AllowedMethods":{ + "type":"structure", + "required":[ + "Quantity", + "Items" + ], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"MethodsList"}, + "CachedMethods":{"shape":"CachedMethods"} + } + }, + "AwsAccountNumberList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"AwsAccountNumber" + } + }, + "BatchTooLarge":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":413}, + "exception":true + }, + "CNAMEAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CacheBehavior":{ + "type":"structure", + "required":[ + "PathPattern", + "TargetOriginId", + "ForwardedValues", + "TrustedSigners", + "ViewerProtocolPolicy", + "MinTTL" + ], + "members":{ + "PathPattern":{"shape":"string"}, + "TargetOriginId":{"shape":"string"}, + "ForwardedValues":{"shape":"ForwardedValues"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "ViewerProtocolPolicy":{"shape":"ViewerProtocolPolicy"}, + "MinTTL":{"shape":"long"}, + "AllowedMethods":{"shape":"AllowedMethods"}, + "SmoothStreaming":{"shape":"boolean"}, + "DefaultTTL":{"shape":"long"}, + "MaxTTL":{"shape":"long"} + } + }, + "CacheBehaviorList":{ + "type":"list", + "member":{ + "shape":"CacheBehavior", + "locationName":"CacheBehavior" + } + }, + "CacheBehaviors":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CacheBehaviorList"} + } + }, + "CachedMethods":{ + "type":"structure", + "required":[ + "Quantity", + "Items" + ], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"MethodsList"} + } + }, + "CloudFrontOriginAccessIdentity":{ + "type":"structure", + "required":[ + "Id", + "S3CanonicalUserId" + ], + "members":{ + "Id":{"shape":"string"}, + "S3CanonicalUserId":{"shape":"string"}, + "CloudFrontOriginAccessIdentityConfig":{"shape":"CloudFrontOriginAccessIdentityConfig"} + } + }, + "CloudFrontOriginAccessIdentityAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CloudFrontOriginAccessIdentityConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "Comment" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "Comment":{"shape":"string"} + } + }, + "CloudFrontOriginAccessIdentityInUse":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CloudFrontOriginAccessIdentityList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CloudFrontOriginAccessIdentitySummaryList"} + } + }, + "CloudFrontOriginAccessIdentitySummary":{ + "type":"structure", + "required":[ + "Id", + "S3CanonicalUserId", + "Comment" + ], + "members":{ + "Id":{"shape":"string"}, + "S3CanonicalUserId":{"shape":"string"}, + "Comment":{"shape":"string"} + } + }, + "CloudFrontOriginAccessIdentitySummaryList":{ + "type":"list", + "member":{ + "shape":"CloudFrontOriginAccessIdentitySummary", + "locationName":"CloudFrontOriginAccessIdentitySummary" + } + }, + "CookieNameList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Name" + } + }, + "CookieNames":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CookieNameList"} + } + }, + "CookiePreference":{ + "type":"structure", + "required":["Forward"], + "members":{ + "Forward":{"shape":"ItemSelection"}, + "WhitelistedNames":{"shape":"CookieNames"} + } + }, + "CreateCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":["CloudFrontOriginAccessIdentityConfig"], + "members":{ + "CloudFrontOriginAccessIdentityConfig":{ + "shape":"CloudFrontOriginAccessIdentityConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-04-17/"}, + "locationName":"CloudFrontOriginAccessIdentityConfig" + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "CreateCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "CreateDistributionRequest":{ + "type":"structure", + "required":["DistributionConfig"], + "members":{ + "DistributionConfig":{ + "shape":"DistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-04-17/"}, + "locationName":"DistributionConfig" + } + }, + "payload":"DistributionConfig" + }, + "CreateDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "CreateInvalidationRequest":{ + "type":"structure", + "required":[ + "DistributionId", + "InvalidationBatch" + ], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "InvalidationBatch":{ + "shape":"InvalidationBatch", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-04-17/"}, + "locationName":"InvalidationBatch" + } + }, + "payload":"InvalidationBatch" + }, + "CreateInvalidationResult":{ + "type":"structure", + "members":{ + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "Invalidation":{"shape":"Invalidation"} + }, + "payload":"Invalidation" + }, + "CreateStreamingDistributionRequest":{ + "type":"structure", + "required":["StreamingDistributionConfig"], + "members":{ + "StreamingDistributionConfig":{ + "shape":"StreamingDistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-04-17/"}, + "locationName":"StreamingDistributionConfig" + } + }, + "payload":"StreamingDistributionConfig" + }, + "CreateStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "CustomErrorResponse":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"integer"}, + "ResponsePagePath":{"shape":"string"}, + "ResponseCode":{"shape":"string"}, + "ErrorCachingMinTTL":{"shape":"long"} + } + }, + "CustomErrorResponseList":{ + "type":"list", + "member":{ + "shape":"CustomErrorResponse", + "locationName":"CustomErrorResponse" + } + }, + "CustomErrorResponses":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CustomErrorResponseList"} + } + }, + "CustomOriginConfig":{ + "type":"structure", + "required":[ + "HTTPPort", + "HTTPSPort", + "OriginProtocolPolicy" + ], + "members":{ + "HTTPPort":{"shape":"integer"}, + "HTTPSPort":{"shape":"integer"}, + "OriginProtocolPolicy":{"shape":"OriginProtocolPolicy"} + } + }, + "DefaultCacheBehavior":{ + "type":"structure", + "required":[ + "TargetOriginId", + "ForwardedValues", + "TrustedSigners", + "ViewerProtocolPolicy", + "MinTTL" + ], + "members":{ + "TargetOriginId":{"shape":"string"}, + "ForwardedValues":{"shape":"ForwardedValues"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "ViewerProtocolPolicy":{"shape":"ViewerProtocolPolicy"}, + "MinTTL":{"shape":"long"}, + "AllowedMethods":{"shape":"AllowedMethods"}, + "SmoothStreaming":{"shape":"boolean"}, + "DefaultTTL":{"shape":"long"}, + "MaxTTL":{"shape":"long"} + } + }, + "DeleteCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "required":["Id"] + }, + "DeleteDistributionRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "required":["Id"] + }, + "DeleteStreamingDistributionRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "required":["Id"] + }, + "Distribution":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "InProgressInvalidationBatches", + "DomainName", + "ActiveTrustedSigners", + "DistributionConfig" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "InProgressInvalidationBatches":{"shape":"integer"}, + "DomainName":{"shape":"string"}, + "ActiveTrustedSigners":{"shape":"ActiveTrustedSigners"}, + "DistributionConfig":{"shape":"DistributionConfig"} + } + }, + "DistributionAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "DistributionConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "Origins", + "DefaultCacheBehavior", + "Comment", + "Enabled" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "Aliases":{"shape":"Aliases"}, + "DefaultRootObject":{"shape":"string"}, + "Origins":{"shape":"Origins"}, + "DefaultCacheBehavior":{"shape":"DefaultCacheBehavior"}, + "CacheBehaviors":{"shape":"CacheBehaviors"}, + "CustomErrorResponses":{"shape":"CustomErrorResponses"}, + "Comment":{"shape":"string"}, + "Logging":{"shape":"LoggingConfig"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"}, + "ViewerCertificate":{"shape":"ViewerCertificate"}, + "Restrictions":{"shape":"Restrictions"} + } + }, + "DistributionList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"DistributionSummaryList"} + } + }, + "DistributionNotDisabled":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "DistributionSummary":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "DomainName", + "Aliases", + "Origins", + "DefaultCacheBehavior", + "CacheBehaviors", + "CustomErrorResponses", + "Comment", + "PriceClass", + "Enabled", + "ViewerCertificate", + "Restrictions" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "Aliases":{"shape":"Aliases"}, + "Origins":{"shape":"Origins"}, + "DefaultCacheBehavior":{"shape":"DefaultCacheBehavior"}, + "CacheBehaviors":{"shape":"CacheBehaviors"}, + "CustomErrorResponses":{"shape":"CustomErrorResponses"}, + "Comment":{"shape":"string"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"}, + "ViewerCertificate":{"shape":"ViewerCertificate"}, + "Restrictions":{"shape":"Restrictions"} + } + }, + "DistributionSummaryList":{ + "type":"list", + "member":{ + "shape":"DistributionSummary", + "locationName":"DistributionSummary" + } + }, + "ForwardedValues":{ + "type":"structure", + "required":[ + "QueryString", + "Cookies" + ], + "members":{ + "QueryString":{"shape":"boolean"}, + "Cookies":{"shape":"CookiePreference"}, + "Headers":{"shape":"Headers"} + } + }, + "GeoRestriction":{ + "type":"structure", + "required":[ + "RestrictionType", + "Quantity" + ], + "members":{ + "RestrictionType":{"shape":"GeoRestrictionType"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"LocationList"} + } + }, + "GeoRestrictionType":{ + "type":"string", + "enum":[ + "blacklist", + "whitelist", + "none" + ] + }, + "GetCloudFrontOriginAccessIdentityConfigRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + }, + "required":["Id"] + }, + "GetCloudFrontOriginAccessIdentityConfigResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentityConfig":{"shape":"CloudFrontOriginAccessIdentityConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "GetCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + }, + "required":["Id"] + }, + "GetCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "GetDistributionConfigRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + }, + "required":["Id"] + }, + "GetDistributionConfigResult":{ + "type":"structure", + "members":{ + "DistributionConfig":{"shape":"DistributionConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"DistributionConfig" + }, + "GetDistributionRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + }, + "required":["Id"] + }, + "GetDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "GetInvalidationRequest":{ + "type":"structure", + "required":[ + "DistributionId", + "Id" + ], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetInvalidationResult":{ + "type":"structure", + "members":{ + "Invalidation":{"shape":"Invalidation"} + }, + "payload":"Invalidation" + }, + "GetStreamingDistributionConfigRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + }, + "required":["Id"] + }, + "GetStreamingDistributionConfigResult":{ + "type":"structure", + "members":{ + "StreamingDistributionConfig":{"shape":"StreamingDistributionConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistributionConfig" + }, + "GetStreamingDistributionRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + }, + "required":["Id"] + }, + "GetStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "HeaderList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Name" + } + }, + "Headers":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"HeaderList"} + } + }, + "IllegalUpdate":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InconsistentQuantities":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidArgument":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidDefaultRootObject":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidErrorCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidForwardCookies":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidGeoRestrictionParameter":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidHeadersForS3Origin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidIfMatchVersion":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidLocationCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidMinimumProtocolVersion":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidOrigin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidOriginAccessIdentity":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidProtocolSettings":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRelativePath":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRequiredProtocol":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidResponseCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidTTLOrder":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidViewerCertificate":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "Invalidation":{ + "type":"structure", + "required":[ + "Id", + "Status", + "CreateTime", + "InvalidationBatch" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "CreateTime":{"shape":"timestamp"}, + "InvalidationBatch":{"shape":"InvalidationBatch"} + } + }, + "InvalidationBatch":{ + "type":"structure", + "required":[ + "Paths", + "CallerReference" + ], + "members":{ + "Paths":{"shape":"Paths"}, + "CallerReference":{"shape":"string"} + } + }, + "InvalidationList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"InvalidationSummaryList"} + } + }, + "InvalidationSummary":{ + "type":"structure", + "required":[ + "Id", + "CreateTime", + "Status" + ], + "members":{ + "Id":{"shape":"string"}, + "CreateTime":{"shape":"timestamp"}, + "Status":{"shape":"string"} + } + }, + "InvalidationSummaryList":{ + "type":"list", + "member":{ + "shape":"InvalidationSummary", + "locationName":"InvalidationSummary" + } + }, + "ItemSelection":{ + "type":"string", + "enum":[ + "none", + "whitelist", + "all" + ] + }, + "KeyPairIdList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"KeyPairId" + } + }, + "KeyPairIds":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"KeyPairIdList"} + } + }, + "ListCloudFrontOriginAccessIdentitiesRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListCloudFrontOriginAccessIdentitiesResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentityList":{"shape":"CloudFrontOriginAccessIdentityList"} + }, + "payload":"CloudFrontOriginAccessIdentityList" + }, + "ListDistributionsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListDistributionsResult":{ + "type":"structure", + "members":{ + "DistributionList":{"shape":"DistributionList"} + }, + "payload":"DistributionList" + }, + "ListInvalidationsRequest":{ + "type":"structure", + "required":["DistributionId"], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListInvalidationsResult":{ + "type":"structure", + "members":{ + "InvalidationList":{"shape":"InvalidationList"} + }, + "payload":"InvalidationList" + }, + "ListStreamingDistributionsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListStreamingDistributionsResult":{ + "type":"structure", + "members":{ + "StreamingDistributionList":{"shape":"StreamingDistributionList"} + }, + "payload":"StreamingDistributionList" + }, + "LocationList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Location" + } + }, + "LoggingConfig":{ + "type":"structure", + "required":[ + "Enabled", + "IncludeCookies", + "Bucket", + "Prefix" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "IncludeCookies":{"shape":"boolean"}, + "Bucket":{"shape":"string"}, + "Prefix":{"shape":"string"} + } + }, + "Method":{ + "type":"string", + "enum":[ + "GET", + "HEAD", + "POST", + "PUT", + "PATCH", + "OPTIONS", + "DELETE" + ] + }, + "MethodsList":{ + "type":"list", + "member":{ + "shape":"Method", + "locationName":"Method" + } + }, + "MinimumProtocolVersion":{ + "type":"string", + "enum":[ + "SSLv3", + "TLSv1" + ] + }, + "MissingBody":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "NoSuchCloudFrontOriginAccessIdentity":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchDistribution":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchInvalidation":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchOrigin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchStreamingDistribution":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "Origin":{ + "type":"structure", + "required":[ + "Id", + "DomainName" + ], + "members":{ + "Id":{"shape":"string"}, + "DomainName":{"shape":"string"}, + "OriginPath":{"shape":"string"}, + "S3OriginConfig":{"shape":"S3OriginConfig"}, + "CustomOriginConfig":{"shape":"CustomOriginConfig"} + } + }, + "OriginList":{ + "type":"list", + "member":{ + "shape":"Origin", + "locationName":"Origin" + }, + "min":1 + }, + "OriginProtocolPolicy":{ + "type":"string", + "enum":[ + "http-only", + "match-viewer" + ] + }, + "Origins":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"OriginList"} + } + }, + "PathList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Path" + } + }, + "Paths":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"PathList"} + } + }, + "PreconditionFailed":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":412}, + "exception":true + }, + "PriceClass":{ + "type":"string", + "enum":[ + "PriceClass_100", + "PriceClass_200", + "PriceClass_All" + ] + }, + "Restrictions":{ + "type":"structure", + "required":["GeoRestriction"], + "members":{ + "GeoRestriction":{"shape":"GeoRestriction"} + } + }, + "S3Origin":{ + "type":"structure", + "required":[ + "DomainName", + "OriginAccessIdentity" + ], + "members":{ + "DomainName":{"shape":"string"}, + "OriginAccessIdentity":{"shape":"string"} + } + }, + "S3OriginConfig":{ + "type":"structure", + "required":["OriginAccessIdentity"], + "members":{ + "OriginAccessIdentity":{"shape":"string"} + } + }, + "SSLSupportMethod":{ + "type":"string", + "enum":[ + "sni-only", + "vip" + ] + }, + "Signer":{ + "type":"structure", + "members":{ + "AwsAccountNumber":{"shape":"string"}, + "KeyPairIds":{"shape":"KeyPairIds"} + } + }, + "SignerList":{ + "type":"list", + "member":{ + "shape":"Signer", + "locationName":"Signer" + } + }, + "StreamingDistribution":{ + "type":"structure", + "required":[ + "Id", + "Status", + "DomainName", + "ActiveTrustedSigners", + "StreamingDistributionConfig" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "ActiveTrustedSigners":{"shape":"ActiveTrustedSigners"}, + "StreamingDistributionConfig":{"shape":"StreamingDistributionConfig"} + } + }, + "StreamingDistributionAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "StreamingDistributionConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "S3Origin", + "Comment", + "TrustedSigners", + "Enabled" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "S3Origin":{"shape":"S3Origin"}, + "Aliases":{"shape":"Aliases"}, + "Comment":{"shape":"string"}, + "Logging":{"shape":"StreamingLoggingConfig"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"} + } + }, + "StreamingDistributionList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"StreamingDistributionSummaryList"} + } + }, + "StreamingDistributionNotDisabled":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "StreamingDistributionSummary":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "DomainName", + "S3Origin", + "Aliases", + "TrustedSigners", + "Comment", + "PriceClass", + "Enabled" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "S3Origin":{"shape":"S3Origin"}, + "Aliases":{"shape":"Aliases"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "Comment":{"shape":"string"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"} + } + }, + "StreamingDistributionSummaryList":{ + "type":"list", + "member":{ + "shape":"StreamingDistributionSummary", + "locationName":"StreamingDistributionSummary" + } + }, + "StreamingLoggingConfig":{ + "type":"structure", + "required":[ + "Enabled", + "Bucket", + "Prefix" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Bucket":{"shape":"string"}, + "Prefix":{"shape":"string"} + } + }, + "TooManyCacheBehaviors":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCertificates":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCloudFrontOriginAccessIdentities":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCookieNamesInWhiteList":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyDistributionCNAMEs":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyDistributions":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyHeadersInForwardedValues":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyInvalidationsInProgress":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyOrigins":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyStreamingDistributionCNAMEs":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyStreamingDistributions":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyTrustedSigners":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TrustedSignerDoesNotExist":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TrustedSigners":{ + "type":"structure", + "required":[ + "Enabled", + "Quantity" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"AwsAccountNumberList"} + } + }, + "UpdateCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":[ + "CloudFrontOriginAccessIdentityConfig", + "Id" + ], + "members":{ + "CloudFrontOriginAccessIdentityConfig":{ + "shape":"CloudFrontOriginAccessIdentityConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-04-17/"}, + "locationName":"CloudFrontOriginAccessIdentityConfig" + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "UpdateCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "UpdateDistributionRequest":{ + "type":"structure", + "required":[ + "DistributionConfig", + "Id" + ], + "members":{ + "DistributionConfig":{ + "shape":"DistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-04-17/"}, + "locationName":"DistributionConfig" + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"DistributionConfig" + }, + "UpdateDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "UpdateStreamingDistributionRequest":{ + "type":"structure", + "required":[ + "StreamingDistributionConfig", + "Id" + ], + "members":{ + "StreamingDistributionConfig":{ + "shape":"StreamingDistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-04-17/"}, + "locationName":"StreamingDistributionConfig" + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"StreamingDistributionConfig" + }, + "UpdateStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "ViewerCertificate":{ + "type":"structure", + "members":{ + "IAMCertificateId":{"shape":"string"}, + "CloudFrontDefaultCertificate":{"shape":"boolean"}, + "SSLSupportMethod":{"shape":"SSLSupportMethod"}, + "MinimumProtocolVersion":{"shape":"MinimumProtocolVersion"} + } + }, + "ViewerProtocolPolicy":{ + "type":"string", + "enum":[ + "allow-all", + "https-only", + "redirect-to-https" + ] + }, + "boolean":{"type":"boolean"}, + "integer":{"type":"integer"}, + "long":{"type":"long"}, + "string":{"type":"string"}, + "timestamp":{"type":"timestamp"} + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1141 @@ +{ + "version": "2.0", + "operations": { + "CreateCloudFrontOriginAccessIdentity": "Create a new origin access identity.", + "CreateDistribution": "Create a new distribution.", + "CreateInvalidation": "Create a new invalidation.", + "CreateStreamingDistribution": "Create a new streaming distribution.", + "DeleteCloudFrontOriginAccessIdentity": "Delete an origin access identity.", + "DeleteDistribution": "Delete a distribution.", + "DeleteStreamingDistribution": "Delete a streaming distribution.", + "GetCloudFrontOriginAccessIdentity": "Get the information about an origin access identity.", + "GetCloudFrontOriginAccessIdentityConfig": "Get the configuration information about an origin access identity.", + "GetDistribution": "Get the information about a distribution.", + "GetDistributionConfig": "Get the configuration information about a distribution.", + "GetInvalidation": "Get the information about an invalidation.", + "GetStreamingDistribution": "Get the information about a streaming distribution.", + "GetStreamingDistributionConfig": "Get the configuration information about a streaming distribution.", + "ListCloudFrontOriginAccessIdentities": "List origin access identities.", + "ListDistributions": "List distributions.", + "ListInvalidations": "List invalidation batches.", + "ListStreamingDistributions": "List streaming distributions.", + "UpdateCloudFrontOriginAccessIdentity": "Update an origin access identity.", + "UpdateDistribution": "Update a distribution.", + "UpdateStreamingDistribution": "Update a streaming distribution." + }, + "service": null, + "shapes": { + "AccessDenied": { + "base": "Access denied.", + "refs": { + } + }, + "ActiveTrustedSigners": { + "base": "A complex type that lists the AWS accounts, if any, that you included in the TrustedSigners complex type for the default cache behavior or for any of the other cache behaviors for this distribution. These are accounts that you want to allow to create signed URLs for private content.", + "refs": { + "Distribution$ActiveTrustedSigners": "CloudFront automatically adds this element to the response only if you've set up the distribution to serve private content with signed URLs. The element lists the key pair IDs that CloudFront is aware of for each trusted signer. The Signer child element lists the AWS account number of the trusted signer (or an empty Self element if the signer is you). The Signer element also includes the IDs of any active key pairs associated with the trusted signer's AWS account. If no KeyPairId element appears for a Signer, that signer can't create working signed URLs.", + "StreamingDistribution$ActiveTrustedSigners": "CloudFront automatically adds this element to the response only if you've set up the distribution to serve private content with signed URLs. The element lists the key pair IDs that CloudFront is aware of for each trusted signer. The Signer child element lists the AWS account number of the trusted signer (or an empty Self element if the signer is you). The Signer element also includes the IDs of any active key pairs associated with the trusted signer's AWS account. If no KeyPairId element appears for a Signer, that signer can't create working signed URLs." + } + }, + "AliasList": { + "base": null, + "refs": { + "Aliases$Items": "Optional: A complex type that contains CNAME elements, if any, for this distribution. If Quantity is 0, you can omit Items." + } + }, + "Aliases": { + "base": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "refs": { + "DistributionConfig$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "DistributionSummary$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "StreamingDistributionConfig$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this streaming distribution.", + "StreamingDistributionSummary$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this streaming distribution." + } + }, + "AllowedMethods": { + "base": "A complex type that controls which HTTP methods CloudFront processes and forwards to your Amazon S3 bucket or your custom origin. There are three choices: - CloudFront forwards only GET and HEAD requests. - CloudFront forwards only GET, HEAD and OPTIONS requests. - CloudFront forwards GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests. If you pick the third choice, you may need to restrict access to your Amazon S3 bucket or to your custom origin so users can't perform operations that you don't want them to. For example, you may not want users to have permission to delete objects from your origin.", + "refs": { + "CacheBehavior$AllowedMethods": null, + "DefaultCacheBehavior$AllowedMethods": null + } + }, + "AwsAccountNumberList": { + "base": null, + "refs": { + "TrustedSigners$Items": "Optional: A complex type that contains trusted signers for this cache behavior. If Quantity is 0, you can omit Items." + } + }, + "BatchTooLarge": { + "base": null, + "refs": { + } + }, + "CNAMEAlreadyExists": { + "base": null, + "refs": { + } + }, + "CacheBehavior": { + "base": "A complex type that describes how CloudFront processes requests. You can create up to 10 cache behaviors.You must create at least as many cache behaviors (including the default cache behavior) as you have origins if you want CloudFront to distribute objects from all of the origins. Each cache behavior specifies the one origin from which you want CloudFront to get objects. If you have two origins and only the default cache behavior, the default cache behavior will cause CloudFront to get objects from one of the origins, but the other origin will never be used. If you don't want to specify any cache behaviors, include only an empty CacheBehaviors element. Don't include an empty CacheBehavior element, or CloudFront returns a MalformedXML error. To delete all cache behaviors in an existing distribution, update the distribution configuration and include only an empty CacheBehaviors element. To add, change, or remove one or more cache behaviors, update the distribution configuration and specify all of the cache behaviors that you want to include in the updated distribution.", + "refs": { + "CacheBehaviorList$member": null + } + }, + "CacheBehaviorList": { + "base": null, + "refs": { + "CacheBehaviors$Items": "Optional: A complex type that contains cache behaviors for this distribution. If Quantity is 0, you can omit Items." + } + }, + "CacheBehaviors": { + "base": "A complex type that contains zero or more CacheBehavior elements.", + "refs": { + "DistributionConfig$CacheBehaviors": "A complex type that contains zero or more CacheBehavior elements.", + "DistributionSummary$CacheBehaviors": "A complex type that contains zero or more CacheBehavior elements." + } + }, + "CachedMethods": { + "base": "A complex type that controls whether CloudFront caches the response to requests using the specified HTTP methods. There are two choices: - CloudFront caches responses to GET and HEAD requests. - CloudFront caches responses to GET, HEAD, and OPTIONS requests. If you pick the second choice for your S3 Origin, you may need to forward Access-Control-Request-Method, Access-Control-Request-Headers and Origin headers for the responses to be cached correctly.", + "refs": { + "AllowedMethods$CachedMethods": null + } + }, + "CloudFrontOriginAccessIdentity": { + "base": "CloudFront origin access identity.", + "refs": { + "CreateCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information.", + "GetCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information.", + "UpdateCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information." + } + }, + "CloudFrontOriginAccessIdentityAlreadyExists": { + "base": "If the CallerReference is a value you already sent in a previous request to create an identity but the content of the CloudFrontOriginAccessIdentityConfig is different from the original request, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists error.", + "refs": { + } + }, + "CloudFrontOriginAccessIdentityConfig": { + "base": "Origin access identity configuration.", + "refs": { + "CloudFrontOriginAccessIdentity$CloudFrontOriginAccessIdentityConfig": "The current configuration information for the identity.", + "CreateCloudFrontOriginAccessIdentityRequest$CloudFrontOriginAccessIdentityConfig": "The origin access identity's configuration information.", + "GetCloudFrontOriginAccessIdentityConfigResult$CloudFrontOriginAccessIdentityConfig": "The origin access identity's configuration information.", + "UpdateCloudFrontOriginAccessIdentityRequest$CloudFrontOriginAccessIdentityConfig": "The identity's configuration information." + } + }, + "CloudFrontOriginAccessIdentityInUse": { + "base": null, + "refs": { + } + }, + "CloudFrontOriginAccessIdentityList": { + "base": "The CloudFrontOriginAccessIdentityList type.", + "refs": { + "ListCloudFrontOriginAccessIdentitiesResult$CloudFrontOriginAccessIdentityList": "The CloudFrontOriginAccessIdentityList type." + } + }, + "CloudFrontOriginAccessIdentitySummary": { + "base": "Summary of the information about a CloudFront origin access identity.", + "refs": { + "CloudFrontOriginAccessIdentitySummaryList$member": null + } + }, + "CloudFrontOriginAccessIdentitySummaryList": { + "base": null, + "refs": { + "CloudFrontOriginAccessIdentityList$Items": "A complex type that contains one CloudFrontOriginAccessIdentitySummary element for each origin access identity that was created by the current AWS account." + } + }, + "CookieNameList": { + "base": null, + "refs": { + "CookieNames$Items": "Optional: A complex type that contains whitelisted cookies for this cache behavior. If Quantity is 0, you can omit Items." + } + }, + "CookieNames": { + "base": "A complex type that specifies the whitelisted cookies, if any, that you want CloudFront to forward to your origin that is associated with this cache behavior.", + "refs": { + "CookiePreference$WhitelistedNames": "A complex type that specifies the whitelisted cookies, if any, that you want CloudFront to forward to your origin that is associated with this cache behavior." + } + }, + "CookiePreference": { + "base": "A complex type that specifies the cookie preferences associated with this cache behavior.", + "refs": { + "ForwardedValues$Cookies": "A complex type that specifies how CloudFront handles cookies." + } + }, + "CreateCloudFrontOriginAccessIdentityRequest": { + "base": "The request to create a new origin access identity.", + "refs": { + } + }, + "CreateCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateDistributionRequest": { + "base": "The request to create a new distribution.", + "refs": { + } + }, + "CreateDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateInvalidationRequest": { + "base": "The request to create an invalidation.", + "refs": { + } + }, + "CreateInvalidationResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateStreamingDistributionRequest": { + "base": "The request to create a new streaming distribution.", + "refs": { + } + }, + "CreateStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CustomErrorResponse": { + "base": "A complex type that describes how you'd prefer CloudFront to respond to requests that result in either a 4xx or 5xx response. You can control whether a custom error page should be displayed, what the desired response code should be for this error page and how long should the error response be cached by CloudFront. If you don't want to specify any custom error responses, include only an empty CustomErrorResponses element. To delete all custom error responses in an existing distribution, update the distribution configuration and include only an empty CustomErrorResponses element. To add, change, or remove one or more custom error responses, update the distribution configuration and specify all of the custom error responses that you want to include in the updated distribution.", + "refs": { + "CustomErrorResponseList$member": null + } + }, + "CustomErrorResponseList": { + "base": null, + "refs": { + "CustomErrorResponses$Items": "Optional: A complex type that contains custom error responses for this distribution. If Quantity is 0, you can omit Items." + } + }, + "CustomErrorResponses": { + "base": "A complex type that contains zero or more CustomErrorResponse elements.", + "refs": { + "DistributionConfig$CustomErrorResponses": "A complex type that contains zero or more CustomErrorResponse elements.", + "DistributionSummary$CustomErrorResponses": "A complex type that contains zero or more CustomErrorResponses elements." + } + }, + "CustomOriginConfig": { + "base": "A customer origin.", + "refs": { + "Origin$CustomOriginConfig": "A complex type that contains information about a custom origin. If the origin is an Amazon S3 bucket, use the S3OriginConfig element instead." + } + }, + "DefaultCacheBehavior": { + "base": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior.", + "refs": { + "DistributionConfig$DefaultCacheBehavior": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior.", + "DistributionSummary$DefaultCacheBehavior": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior." + } + }, + "DeleteCloudFrontOriginAccessIdentityRequest": { + "base": "The request to delete a origin access identity.", + "refs": { + } + }, + "DeleteDistributionRequest": { + "base": "The request to delete a distribution.", + "refs": { + } + }, + "DeleteStreamingDistributionRequest": { + "base": "The request to delete a streaming distribution.", + "refs": { + } + }, + "Distribution": { + "base": "A distribution.", + "refs": { + "CreateDistributionResult$Distribution": "The distribution's information.", + "GetDistributionResult$Distribution": "The distribution's information.", + "UpdateDistributionResult$Distribution": "The distribution's information." + } + }, + "DistributionAlreadyExists": { + "base": "The caller reference you attempted to create the distribution with is associated with another distribution.", + "refs": { + } + }, + "DistributionConfig": { + "base": "A distribution Configuration.", + "refs": { + "CreateDistributionRequest$DistributionConfig": "The distribution's configuration information.", + "Distribution$DistributionConfig": "The current configuration information for the distribution.", + "GetDistributionConfigResult$DistributionConfig": "The distribution's configuration information.", + "UpdateDistributionRequest$DistributionConfig": "The distribution's configuration information." + } + }, + "DistributionList": { + "base": "A distribution list.", + "refs": { + "ListDistributionsResult$DistributionList": "The DistributionList type." + } + }, + "DistributionNotDisabled": { + "base": null, + "refs": { + } + }, + "DistributionSummary": { + "base": "A summary of the information for an Amazon CloudFront distribution.", + "refs": { + "DistributionSummaryList$member": null + } + }, + "DistributionSummaryList": { + "base": null, + "refs": { + "DistributionList$Items": "A complex type that contains one DistributionSummary element for each distribution that was created by the current AWS account." + } + }, + "ForwardedValues": { + "base": "A complex type that specifies how CloudFront handles query strings, cookies and headers.", + "refs": { + "CacheBehavior$ForwardedValues": "A complex type that specifies how CloudFront handles query strings, cookies and headers.", + "DefaultCacheBehavior$ForwardedValues": "A complex type that specifies how CloudFront handles query strings, cookies and headers." + } + }, + "GeoRestriction": { + "base": "A complex type that controls the countries in which your content is distributed. For more information about geo restriction, go to Customizing Error Responses in the Amazon CloudFront Developer Guide. CloudFront determines the location of your users using MaxMind GeoIP databases. For information about the accuracy of these databases, see How accurate are your GeoIP databases? on the MaxMind website.", + "refs": { + "Restrictions$GeoRestriction": null + } + }, + "GeoRestrictionType": { + "base": null, + "refs": { + "GeoRestriction$RestrictionType": "The method that you want to use to restrict distribution of your content by country: - none: No geo restriction is enabled, meaning access to content is not restricted by client geo location. - blacklist: The Location elements specify the countries in which you do not want CloudFront to distribute your content. - whitelist: The Location elements specify the countries in which you want CloudFront to distribute your content." + } + }, + "GetCloudFrontOriginAccessIdentityConfigRequest": { + "base": "The request to get an origin access identity's configuration.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityRequest": { + "base": "The request to get an origin access identity's information.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetDistributionConfigRequest": { + "base": "The request to get a distribution configuration.", + "refs": { + } + }, + "GetDistributionConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetDistributionRequest": { + "base": "The request to get a distribution's information.", + "refs": { + } + }, + "GetDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetInvalidationRequest": { + "base": "The request to get an invalidation's information.", + "refs": { + } + }, + "GetInvalidationResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetStreamingDistributionConfigRequest": { + "base": "To request to get a streaming distribution configuration.", + "refs": { + } + }, + "GetStreamingDistributionConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetStreamingDistributionRequest": { + "base": "The request to get a streaming distribution's information.", + "refs": { + } + }, + "GetStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "HeaderList": { + "base": null, + "refs": { + "Headers$Items": "Optional: A complex type that contains a Name element for each header that you want CloudFront to forward to the origin and to vary on for this cache behavior. If Quantity is 0, omit Items." + } + }, + "Headers": { + "base": "A complex type that specifies the headers that you want CloudFront to forward to the origin for this cache behavior. For the headers that you specify, CloudFront also caches separate versions of a given object based on the header values in viewer requests; this is known as varying on headers. For example, suppose viewer requests for logo.jpg contain a custom Product header that has a value of either Acme or Apex, and you configure CloudFront to vary on the Product header. CloudFront forwards the Product header to the origin and caches the response from the origin once for each header value.", + "refs": { + "ForwardedValues$Headers": "A complex type that specifies the Headers, if any, that you want CloudFront to vary upon for this cache behavior." + } + }, + "IllegalUpdate": { + "base": "Origin and CallerReference cannot be updated.", + "refs": { + } + }, + "InconsistentQuantities": { + "base": "The value of Quantity and the size of Items do not match.", + "refs": { + } + }, + "InvalidArgument": { + "base": "The argument is invalid.", + "refs": { + } + }, + "InvalidDefaultRootObject": { + "base": "The default root object file name is too big or contains an invalid character.", + "refs": { + } + }, + "InvalidErrorCode": { + "base": null, + "refs": { + } + }, + "InvalidForwardCookies": { + "base": "Your request contains forward cookies option which doesn't match with the expectation for the whitelisted list of cookie names. Either list of cookie names has been specified when not allowed or list of cookie names is missing when expected.", + "refs": { + } + }, + "InvalidGeoRestrictionParameter": { + "base": null, + "refs": { + } + }, + "InvalidHeadersForS3Origin": { + "base": null, + "refs": { + } + }, + "InvalidIfMatchVersion": { + "base": "The If-Match version is missing or not valid for the distribution.", + "refs": { + } + }, + "InvalidLocationCode": { + "base": null, + "refs": { + } + }, + "InvalidMinimumProtocolVersion": { + "base": null, + "refs": { + } + }, + "InvalidOrigin": { + "base": "The Amazon S3 origin server specified does not refer to a valid Amazon S3 bucket.", + "refs": { + } + }, + "InvalidOriginAccessIdentity": { + "base": "The origin access identity is not valid or doesn't exist.", + "refs": { + } + }, + "InvalidProtocolSettings": { + "base": "You cannot specify SSLv3 as the minimum protocol version if you only want to support only clients that Support Server Name Indication (SNI).", + "refs": { + } + }, + "InvalidRelativePath": { + "base": "The relative path is too big, is not URL-encoded, or does not begin with a slash (/).", + "refs": { + } + }, + "InvalidRequiredProtocol": { + "base": "This operation requires the HTTPS protocol. Ensure that you specify the HTTPS protocol in your request, or omit the RequiredProtocols element from your distribution configuration.", + "refs": { + } + }, + "InvalidResponseCode": { + "base": null, + "refs": { + } + }, + "InvalidTTLOrder": { + "base": null, + "refs": { + } + }, + "InvalidViewerCertificate": { + "base": null, + "refs": { + } + }, + "Invalidation": { + "base": "An invalidation.", + "refs": { + "CreateInvalidationResult$Invalidation": "The invalidation's information.", + "GetInvalidationResult$Invalidation": "The invalidation's information." + } + }, + "InvalidationBatch": { + "base": "An invalidation batch.", + "refs": { + "CreateInvalidationRequest$InvalidationBatch": "The batch information for the invalidation.", + "Invalidation$InvalidationBatch": "The current invalidation information for the batch request." + } + }, + "InvalidationList": { + "base": "An invalidation list.", + "refs": { + "ListInvalidationsResult$InvalidationList": "Information about invalidation batches." + } + }, + "InvalidationSummary": { + "base": "Summary of an invalidation request.", + "refs": { + "InvalidationSummaryList$member": null + } + }, + "InvalidationSummaryList": { + "base": null, + "refs": { + "InvalidationList$Items": "A complex type that contains one InvalidationSummary element for each invalidation batch that was created by the current AWS account." + } + }, + "ItemSelection": { + "base": null, + "refs": { + "CookiePreference$Forward": "Use this element to specify whether you want CloudFront to forward cookies to the origin that is associated with this cache behavior. You can specify all, none or whitelist. If you choose All, CloudFront forwards all cookies regardless of how many your application uses." + } + }, + "KeyPairIdList": { + "base": null, + "refs": { + "KeyPairIds$Items": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber." + } + }, + "KeyPairIds": { + "base": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber.", + "refs": { + "Signer$KeyPairIds": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber." + } + }, + "ListCloudFrontOriginAccessIdentitiesRequest": { + "base": "The request to list origin access identities.", + "refs": { + } + }, + "ListCloudFrontOriginAccessIdentitiesResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListDistributionsRequest": { + "base": "The request to list your distributions.", + "refs": { + } + }, + "ListDistributionsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListInvalidationsRequest": { + "base": "The request to list invalidations.", + "refs": { + } + }, + "ListInvalidationsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListStreamingDistributionsRequest": { + "base": "The request to list your streaming distributions.", + "refs": { + } + }, + "ListStreamingDistributionsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "LocationList": { + "base": null, + "refs": { + "GeoRestriction$Items": "A complex type that contains a Location element for each country in which you want CloudFront either to distribute your content (whitelist) or not distribute your content (blacklist). The Location element is a two-letter, uppercase country code for a country that you want to include in your blacklist or whitelist. Include one Location element for each country. CloudFront and MaxMind both use ISO 3166 country codes. For the current list of countries and the corresponding codes, see ISO 3166-1-alpha-2 code on the International Organization for Standardization website. You can also refer to the country list in the CloudFront console, which includes both country names and codes." + } + }, + "LoggingConfig": { + "base": "A complex type that controls whether access logs are written for the distribution.", + "refs": { + "DistributionConfig$Logging": "A complex type that controls whether access logs are written for the distribution." + } + }, + "Method": { + "base": null, + "refs": { + "MethodsList$member": null + } + }, + "MethodsList": { + "base": null, + "refs": { + "AllowedMethods$Items": "A complex type that contains the HTTP methods that you want CloudFront to process and forward to your origin.", + "CachedMethods$Items": "A complex type that contains the HTTP methods that you want CloudFront to cache responses to." + } + }, + "MinimumProtocolVersion": { + "base": null, + "refs": { + "ViewerCertificate$MinimumProtocolVersion": "Specify the minimum version of the SSL protocol that you want CloudFront to use, SSLv3 or TLSv1, for HTTPS connections. CloudFront will serve your objects only to browsers or devices that support at least the SSL version that you specify. The TLSv1 protocol is more secure, so we recommend that you specify SSLv3 only if your users are using browsers or devices that don't support TLSv1. If you're using a custom certificate (if you specify a value for IAMCertificateId) and if you're using dedicated IP (if you specify vip for SSLSupportMethod), you can choose SSLv3 or TLSv1 as the MinimumProtocolVersion. If you're using a custom certificate (if you specify a value for IAMCertificateId) and if you're using SNI (if you specify sni-only for SSLSupportMethod), you must specify TLSv1 for MinimumProtocolVersion." + } + }, + "MissingBody": { + "base": "This operation requires a body. Ensure that the body is present and the Content-Type header is set.", + "refs": { + } + }, + "NoSuchCloudFrontOriginAccessIdentity": { + "base": "The specified origin access identity does not exist.", + "refs": { + } + }, + "NoSuchDistribution": { + "base": "The specified distribution does not exist.", + "refs": { + } + }, + "NoSuchInvalidation": { + "base": "The specified invalidation does not exist.", + "refs": { + } + }, + "NoSuchOrigin": { + "base": "No origin exists with the specified Origin Id.", + "refs": { + } + }, + "NoSuchStreamingDistribution": { + "base": "The specified streaming distribution does not exist.", + "refs": { + } + }, + "Origin": { + "base": "A complex type that describes the Amazon S3 bucket or the HTTP server (for example, a web server) from which CloudFront gets your files.You must create at least one origin.", + "refs": { + "OriginList$member": null + } + }, + "OriginList": { + "base": null, + "refs": { + "Origins$Items": "A complex type that contains origins for this distribution." + } + }, + "OriginProtocolPolicy": { + "base": null, + "refs": { + "CustomOriginConfig$OriginProtocolPolicy": "The origin protocol policy to apply to your origin." + } + }, + "Origins": { + "base": "A complex type that contains information about origins for this distribution.", + "refs": { + "DistributionConfig$Origins": "A complex type that contains information about origins for this distribution.", + "DistributionSummary$Origins": "A complex type that contains information about origins for this distribution." + } + }, + "PathList": { + "base": null, + "refs": { + "Paths$Items": "A complex type that contains a list of the objects that you want to invalidate." + } + }, + "Paths": { + "base": "A complex type that contains information about the objects that you want to invalidate.", + "refs": { + "InvalidationBatch$Paths": "The path of the object to invalidate. The path is relative to the distribution and must begin with a slash (/). You must enclose each invalidation object with the Path element tags. If the path includes non-ASCII characters or unsafe characters as defined in RFC 1783 (http://www.ietf.org/rfc/rfc1738.txt), URL encode those characters. Do not URL encode any other characters in the path, or CloudFront will not invalidate the old version of the updated object." + } + }, + "PreconditionFailed": { + "base": "The precondition given in one or more of the request-header fields evaluated to false.", + "refs": { + } + }, + "PriceClass": { + "base": null, + "refs": { + "DistributionConfig$PriceClass": "A complex type that contains information about price class for this distribution.", + "DistributionSummary$PriceClass": null, + "StreamingDistributionConfig$PriceClass": "A complex type that contains information about price class for this streaming distribution.", + "StreamingDistributionSummary$PriceClass": null + } + }, + "Restrictions": { + "base": "A complex type that identifies ways in which you want to restrict distribution of your content.", + "refs": { + "DistributionConfig$Restrictions": null, + "DistributionSummary$Restrictions": null + } + }, + "S3Origin": { + "base": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution.", + "refs": { + "StreamingDistributionConfig$S3Origin": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution.", + "StreamingDistributionSummary$S3Origin": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution." + } + }, + "S3OriginConfig": { + "base": "A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead.", + "refs": { + "Origin$S3OriginConfig": "A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead." + } + }, + "SSLSupportMethod": { + "base": null, + "refs": { + "ViewerCertificate$SSLSupportMethod": "If you specify a value for IAMCertificateId, you must also specify how you want CloudFront to serve HTTPS requests. Valid values are vip and sni-only. If you specify vip, CloudFront uses dedicated IP addresses for your content and can respond to HTTPS requests from any viewer. However, you must request permission to use this feature, and you incur additional monthly charges. If you specify sni-only, CloudFront can only respond to HTTPS requests from viewers that support Server Name Indication (SNI). All modern browsers support SNI, but some browsers still in use don't support SNI. Do not specify a value for SSLSupportMethod if you specified true for CloudFrontDefaultCertificate." + } + }, + "Signer": { + "base": "A complex type that lists the AWS accounts that were included in the TrustedSigners complex type, as well as their active CloudFront key pair IDs, if any.", + "refs": { + "SignerList$member": null + } + }, + "SignerList": { + "base": null, + "refs": { + "ActiveTrustedSigners$Items": "A complex type that contains one Signer complex type for each unique trusted signer that is specified in the TrustedSigners complex type, including trusted signers in the default cache behavior and in all of the other cache behaviors." + } + }, + "StreamingDistribution": { + "base": "A streaming distribution.", + "refs": { + "CreateStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information.", + "GetStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information.", + "UpdateStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information." + } + }, + "StreamingDistributionAlreadyExists": { + "base": null, + "refs": { + } + }, + "StreamingDistributionConfig": { + "base": "The configuration for the streaming distribution.", + "refs": { + "CreateStreamingDistributionRequest$StreamingDistributionConfig": "The streaming distribution's configuration information.", + "GetStreamingDistributionConfigResult$StreamingDistributionConfig": "The streaming distribution's configuration information.", + "StreamingDistribution$StreamingDistributionConfig": "The current configuration information for the streaming distribution.", + "UpdateStreamingDistributionRequest$StreamingDistributionConfig": "The streaming distribution's configuration information." + } + }, + "StreamingDistributionList": { + "base": "A streaming distribution list.", + "refs": { + "ListStreamingDistributionsResult$StreamingDistributionList": "The StreamingDistributionList type." + } + }, + "StreamingDistributionNotDisabled": { + "base": null, + "refs": { + } + }, + "StreamingDistributionSummary": { + "base": "A summary of the information for an Amazon CloudFront streaming distribution.", + "refs": { + "StreamingDistributionSummaryList$member": null + } + }, + "StreamingDistributionSummaryList": { + "base": null, + "refs": { + "StreamingDistributionList$Items": "A complex type that contains one StreamingDistributionSummary element for each distribution that was created by the current AWS account." + } + }, + "StreamingLoggingConfig": { + "base": "A complex type that controls whether access logs are written for this streaming distribution.", + "refs": { + "StreamingDistributionConfig$Logging": "A complex type that controls whether access logs are written for the streaming distribution." + } + }, + "TooManyCacheBehaviors": { + "base": "You cannot create anymore cache behaviors for the distribution.", + "refs": { + } + }, + "TooManyCertificates": { + "base": "You cannot create anymore custom ssl certificates.", + "refs": { + } + }, + "TooManyCloudFrontOriginAccessIdentities": { + "base": "Processing your request would cause you to exceed the maximum number of origin access identities allowed.", + "refs": { + } + }, + "TooManyCookieNamesInWhiteList": { + "base": "Your request contains more cookie names in the whitelist than are allowed per cache behavior.", + "refs": { + } + }, + "TooManyDistributionCNAMEs": { + "base": "Your request contains more CNAMEs than are allowed per distribution.", + "refs": { + } + }, + "TooManyDistributions": { + "base": "Processing your request would cause you to exceed the maximum number of distributions allowed.", + "refs": { + } + }, + "TooManyHeadersInForwardedValues": { + "base": null, + "refs": { + } + }, + "TooManyInvalidationsInProgress": { + "base": "You have exceeded the maximum number of allowable InProgress invalidation batch requests, or invalidation objects.", + "refs": { + } + }, + "TooManyOrigins": { + "base": "You cannot create anymore origins for the distribution.", + "refs": { + } + }, + "TooManyStreamingDistributionCNAMEs": { + "base": null, + "refs": { + } + }, + "TooManyStreamingDistributions": { + "base": "Processing your request would cause you to exceed the maximum number of streaming distributions allowed.", + "refs": { + } + }, + "TooManyTrustedSigners": { + "base": "Your request contains more trusted signers than are allowed per distribution.", + "refs": { + } + }, + "TrustedSignerDoesNotExist": { + "base": "One or more of your trusted signers do not exist.", + "refs": { + } + }, + "TrustedSigners": { + "base": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "refs": { + "CacheBehavior$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "DefaultCacheBehavior$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "StreamingDistributionConfig$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "StreamingDistributionSummary$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution." + } + }, + "UpdateCloudFrontOriginAccessIdentityRequest": { + "base": "The request to update an origin access identity.", + "refs": { + } + }, + "UpdateCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "UpdateDistributionRequest": { + "base": "The request to update a distribution.", + "refs": { + } + }, + "UpdateDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "UpdateStreamingDistributionRequest": { + "base": "The request to update a streaming distribution.", + "refs": { + } + }, + "UpdateStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ViewerCertificate": { + "base": "A complex type that contains information about viewer certificates for this distribution.", + "refs": { + "DistributionConfig$ViewerCertificate": null, + "DistributionSummary$ViewerCertificate": null + } + }, + "ViewerProtocolPolicy": { + "base": null, + "refs": { + "CacheBehavior$ViewerProtocolPolicy": "Use this element to specify the protocol that users can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. If you want CloudFront to allow end users to use any available protocol, specify allow-all. If you want CloudFront to require HTTPS, specify https. If you want CloudFront to respond to an HTTP request with an HTTP status code of 301 (Moved Permanently) and the HTTPS URL, specify redirect-to-https. The viewer then resubmits the request using the HTTPS URL.", + "DefaultCacheBehavior$ViewerProtocolPolicy": "Use this element to specify the protocol that users can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. If you want CloudFront to allow end users to use any available protocol, specify allow-all. If you want CloudFront to require HTTPS, specify https. If you want CloudFront to respond to an HTTP request with an HTTP status code of 301 (Moved Permanently) and the HTTPS URL, specify redirect-to-https. The viewer then resubmits the request using the HTTPS URL." + } + }, + "boolean": { + "base": null, + "refs": { + "ActiveTrustedSigners$Enabled": "Each active trusted signer.", + "CacheBehavior$SmoothStreaming": "Indicates whether you want to distribute media files in Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "CloudFrontOriginAccessIdentityList$IsTruncated": "A flag that indicates whether more origin access identities remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more items in the list.", + "DefaultCacheBehavior$SmoothStreaming": "Indicates whether you want to distribute media files in Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "DistributionConfig$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "DistributionList$IsTruncated": "A flag that indicates whether more distributions remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more distributions in the list.", + "DistributionSummary$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "ForwardedValues$QueryString": "Indicates whether you want CloudFront to forward query strings to the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "InvalidationList$IsTruncated": "A flag that indicates whether more invalidation batch requests remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more invalidation batches in the list.", + "LoggingConfig$Enabled": "Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you do not want to enable logging when you create a distribution or if you want to disable logging for an existing distribution, specify false for Enabled, and specify empty Bucket and Prefix elements. If you specify false for Enabled but you specify values for Bucket, prefix and IncludeCookies, the values are automatically deleted.", + "LoggingConfig$IncludeCookies": "Specifies whether you want CloudFront to include cookies in access logs, specify true for IncludeCookies. If you choose to include cookies in logs, CloudFront logs all cookies regardless of how you configure the cache behaviors for this distribution. If you do not want to include cookies when you create a distribution or if you want to disable include cookies for an existing distribution, specify false for IncludeCookies.", + "StreamingDistributionConfig$Enabled": "Whether the streaming distribution is enabled to accept end user requests for content.", + "StreamingDistributionList$IsTruncated": "A flag that indicates whether more streaming distributions remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more distributions in the list.", + "StreamingDistributionSummary$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "StreamingLoggingConfig$Enabled": "Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you do not want to enable logging when you create a streaming distribution or if you want to disable logging for an existing streaming distribution, specify false for Enabled, and specify empty Bucket and Prefix elements. If you specify false for Enabled but you specify values for Bucket and Prefix, the values are automatically deleted.", + "TrustedSigners$Enabled": "Specifies whether you want to require end users to use signed URLs to access the files specified by PathPattern and TargetOriginId.", + "ViewerCertificate$CloudFrontDefaultCertificate": "If you want viewers to use HTTPS to request your objects and you're using the CloudFront domain name of your distribution in your object URLs (for example, https://d111111abcdef8.cloudfront.net/logo.jpg), set to true. Omit this value if you are setting an IAMCertificateId." + } + }, + "integer": { + "base": null, + "refs": { + "ActiveTrustedSigners$Quantity": "The number of unique trusted signers included in all cache behaviors. For example, if three cache behaviors all list the same three AWS accounts, the value of Quantity for ActiveTrustedSigners will be 3.", + "Aliases$Quantity": "The number of CNAMEs, if any, for this distribution.", + "AllowedMethods$Quantity": "The number of HTTP methods that you want CloudFront to forward to your origin. Valid values are 2 (for GET and HEAD requests), 3 (for GET, HEAD and OPTIONS requests) and 7 (for GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests).", + "CacheBehaviors$Quantity": "The number of cache behaviors for this distribution.", + "CachedMethods$Quantity": "The number of HTTP methods for which you want CloudFront to cache responses. Valid values are 2 (for caching responses to GET and HEAD requests) and 3 (for caching responses to GET, HEAD, and OPTIONS requests).", + "CloudFrontOriginAccessIdentityList$MaxItems": "The value you provided for the MaxItems request parameter.", + "CloudFrontOriginAccessIdentityList$Quantity": "The number of CloudFront origin access identities that were created by the current AWS account.", + "CookieNames$Quantity": "The number of whitelisted cookies for this cache behavior.", + "CustomErrorResponse$ErrorCode": "The 4xx or 5xx HTTP status code that you want to customize. For a list of HTTP status codes that you can customize, see CloudFront documentation.", + "CustomErrorResponses$Quantity": "The number of custom error responses for this distribution.", + "CustomOriginConfig$HTTPPort": "The HTTP port the custom origin listens on.", + "CustomOriginConfig$HTTPSPort": "The HTTPS port the custom origin listens on.", + "Distribution$InProgressInvalidationBatches": "The number of invalidation batches currently in progress.", + "DistributionList$MaxItems": "The value you provided for the MaxItems request parameter.", + "DistributionList$Quantity": "The number of distributions that were created by the current AWS account.", + "GeoRestriction$Quantity": "When geo restriction is enabled, this is the number of countries in your whitelist or blacklist. Otherwise, when it is not enabled, Quantity is 0, and you can omit Items.", + "Headers$Quantity": "The number of different headers that you want CloudFront to forward to the origin and to vary on for this cache behavior. The maximum number of headers that you can specify by name is 10. If you want CloudFront to forward all headers to the origin and vary on all of them, specify 1 for Quantity and * for Name. If you don't want CloudFront to forward any additional headers to the origin or to vary on any headers, specify 0 for Quantity and omit Items.", + "InvalidationList$MaxItems": "The value you provided for the MaxItems request parameter.", + "InvalidationList$Quantity": "The number of invalidation batches that were created by the current AWS account.", + "KeyPairIds$Quantity": "The number of active CloudFront key pairs for AwsAccountNumber.", + "Origins$Quantity": "The number of origins for this distribution.", + "Paths$Quantity": "The number of objects that you want to invalidate.", + "StreamingDistributionList$MaxItems": "The value you provided for the MaxItems request parameter.", + "StreamingDistributionList$Quantity": "The number of streaming distributions that were created by the current AWS account.", + "TrustedSigners$Quantity": "The number of trusted signers for this cache behavior." + } + }, + "long": { + "base": null, + "refs": { + "CacheBehavior$MinTTL": "The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated.You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CacheBehavior$DefaultTTL": "If you don't configure your origin to add a Cache-Control max-age directive or an Expires header, DefaultTTL is the default amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CacheBehavior$MaxTTL": "The maximum amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin adds HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CustomErrorResponse$ErrorCachingMinTTL": "The minimum amount of time you want HTTP error codes to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated. You can specify a value from 0 to 31,536,000.", + "DefaultCacheBehavior$MinTTL": "The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated.You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "DefaultCacheBehavior$DefaultTTL": "If you don't configure your origin to add a Cache-Control max-age directive or an Expires header, DefaultTTL is the default amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "DefaultCacheBehavior$MaxTTL": "The maximum amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin adds HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years)." + } + }, + "string": { + "base": null, + "refs": { + "AccessDenied$Message": null, + "AliasList$member": null, + "AwsAccountNumberList$member": null, + "BatchTooLarge$Message": null, + "CNAMEAlreadyExists$Message": null, + "CacheBehavior$PathPattern": "The pattern (for example, images/*.jpg) that specifies which requests you want this cache behavior to apply to. When CloudFront receives an end-user request, the requested path is compared with path patterns in the order in which cache behaviors are listed in the distribution. The path pattern for the default cache behavior is * and cannot be changed. If the request for an object does not match the path pattern for any cache behaviors, CloudFront applies the behavior in the default cache behavior.", + "CacheBehavior$TargetOriginId": "The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior.", + "CloudFrontOriginAccessIdentity$Id": "The ID for the origin access identity. For example: E74FTE3AJFJ256A.", + "CloudFrontOriginAccessIdentity$S3CanonicalUserId": "The Amazon S3 canonical user ID for the origin access identity, which you use when giving the origin access identity read permission to an object in Amazon S3.", + "CloudFrontOriginAccessIdentityAlreadyExists$Message": null, + "CloudFrontOriginAccessIdentityConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the CloudFrontOriginAccessIdentityConfig object), a new origin access identity is created. If the CallerReference is a value you already sent in a previous request to create an identity, and the content of the CloudFrontOriginAccessIdentityConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create an identity but the content of the CloudFrontOriginAccessIdentityConfig is different from the original request, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists error.", + "CloudFrontOriginAccessIdentityConfig$Comment": "Any comments you want to include about the origin access identity.", + "CloudFrontOriginAccessIdentityInUse$Message": null, + "CloudFrontOriginAccessIdentityList$Marker": "The value you provided for the Marker request parameter.", + "CloudFrontOriginAccessIdentityList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your origin access identities where they left off.", + "CloudFrontOriginAccessIdentitySummary$Id": "The ID for the origin access identity. For example: E74FTE3AJFJ256A.", + "CloudFrontOriginAccessIdentitySummary$S3CanonicalUserId": "The Amazon S3 canonical user ID for the origin access identity, which you use when giving the origin access identity read permission to an object in Amazon S3.", + "CloudFrontOriginAccessIdentitySummary$Comment": "The comment for this origin access identity, as originally specified when created.", + "CookieNameList$member": null, + "CreateCloudFrontOriginAccessIdentityResult$Location": "The fully qualified URI of the new origin access identity just created. For example: https://cloudfront.amazonaws.com/2010-11-01/origin-access-identity/cloudfront/E74FTE3AJFJ256A.", + "CreateCloudFrontOriginAccessIdentityResult$ETag": "The current version of the origin access identity created.", + "CreateDistributionResult$Location": "The fully qualified URI of the new distribution resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/distribution/EDFDVBD632BHDS5.", + "CreateDistributionResult$ETag": "The current version of the distribution created.", + "CreateInvalidationRequest$DistributionId": "The distribution's id.", + "CreateInvalidationResult$Location": "The fully qualified URI of the distribution and invalidation batch request, including the Invalidation ID.", + "CreateStreamingDistributionResult$Location": "The fully qualified URI of the new streaming distribution resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/streaming-distribution/EGTXBD79H29TRA8.", + "CreateStreamingDistributionResult$ETag": "The current version of the streaming distribution created.", + "CustomErrorResponse$ResponsePagePath": "The path of the custom error page (for example, /custom_404.html). The path is relative to the distribution and must begin with a slash (/). If the path includes any non-ASCII characters or unsafe characters as defined in RFC 1783 (http://www.ietf.org/rfc/rfc1738.txt), URL encode those characters. Do not URL encode any other characters in the path, or CloudFront will not return the custom error page to the viewer.", + "CustomErrorResponse$ResponseCode": "The HTTP status code that you want CloudFront to return with the custom error page to the viewer. For a list of HTTP status codes that you can replace, see CloudFront Documentation.", + "DefaultCacheBehavior$TargetOriginId": "The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior.", + "DeleteCloudFrontOriginAccessIdentityRequest$Id": "The origin access identity's id.", + "DeleteCloudFrontOriginAccessIdentityRequest$IfMatch": "The value of the ETag header you received from a previous GET or PUT request. For example: E2QWRUHAPOMQZL.", + "DeleteDistributionRequest$Id": "The distribution id.", + "DeleteDistributionRequest$IfMatch": "The value of the ETag header you received when you disabled the distribution. For example: E2QWRUHAPOMQZL.", + "DeleteStreamingDistributionRequest$Id": "The distribution id.", + "DeleteStreamingDistributionRequest$IfMatch": "The value of the ETag header you received when you disabled the streaming distribution. For example: E2QWRUHAPOMQZL.", + "Distribution$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "Distribution$Status": "This response element indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "Distribution$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "DistributionAlreadyExists$Message": null, + "DistributionConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the DistributionConfig object), a new distribution is created. If the CallerReference is a value you already sent in a previous request to create a distribution, and the content of the DistributionConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a distribution but the content of the DistributionConfig is different from the original request, CloudFront returns a DistributionAlreadyExists error.", + "DistributionConfig$DefaultRootObject": "The object that you want CloudFront to return (for example, index.html) when an end user requests the root URL for your distribution (http://www.example.com) instead of an object in your distribution (http://www.example.com/index.html). Specifying a default root object avoids exposing the contents of your distribution. If you don't want to specify a default root object when you create a distribution, include an empty DefaultRootObject element. To delete the default root object from an existing distribution, update the distribution configuration and include an empty DefaultRootObject element. To replace the default root object, update the distribution configuration and specify the new object.", + "DistributionConfig$Comment": "Any comments you want to include about the distribution.", + "DistributionList$Marker": "The value you provided for the Marker request parameter.", + "DistributionList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your distributions where they left off.", + "DistributionNotDisabled$Message": null, + "DistributionSummary$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "DistributionSummary$Status": "This response element indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "DistributionSummary$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "DistributionSummary$Comment": "The comment originally specified when this distribution was created.", + "GetCloudFrontOriginAccessIdentityConfigRequest$Id": "The identity's id.", + "GetCloudFrontOriginAccessIdentityConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetCloudFrontOriginAccessIdentityRequest$Id": "The identity's id.", + "GetCloudFrontOriginAccessIdentityResult$ETag": "The current version of the origin access identity's information. For example: E2QWRUHAPOMQZL.", + "GetDistributionConfigRequest$Id": "The distribution's id.", + "GetDistributionConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetDistributionRequest$Id": "The distribution's id.", + "GetDistributionResult$ETag": "The current version of the distribution's information. For example: E2QWRUHAPOMQZL.", + "GetInvalidationRequest$DistributionId": "The distribution's id.", + "GetInvalidationRequest$Id": "The invalidation's id.", + "GetStreamingDistributionConfigRequest$Id": "The streaming distribution's id.", + "GetStreamingDistributionConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetStreamingDistributionRequest$Id": "The streaming distribution's id.", + "GetStreamingDistributionResult$ETag": "The current version of the streaming distribution's information. For example: E2QWRUHAPOMQZL.", + "HeaderList$member": null, + "IllegalUpdate$Message": null, + "InconsistentQuantities$Message": null, + "InvalidArgument$Message": null, + "InvalidDefaultRootObject$Message": null, + "InvalidErrorCode$Message": null, + "InvalidForwardCookies$Message": null, + "InvalidGeoRestrictionParameter$Message": null, + "InvalidHeadersForS3Origin$Message": null, + "InvalidIfMatchVersion$Message": null, + "InvalidLocationCode$Message": null, + "InvalidMinimumProtocolVersion$Message": null, + "InvalidOrigin$Message": null, + "InvalidOriginAccessIdentity$Message": null, + "InvalidProtocolSettings$Message": null, + "InvalidRelativePath$Message": null, + "InvalidRequiredProtocol$Message": null, + "InvalidResponseCode$Message": null, + "InvalidTTLOrder$Message": null, + "InvalidViewerCertificate$Message": null, + "Invalidation$Id": "The identifier for the invalidation request. For example: IDFDVBD632BHDS5.", + "Invalidation$Status": "The status of the invalidation request. When the invalidation batch is finished, the status is Completed.", + "InvalidationBatch$CallerReference": "A unique name that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the Path object), a new distribution is created. If the CallerReference is a value you already sent in a previous request to create an invalidation batch, and the content of each Path element is identical to the original request, the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a distribution but the content of any Path is different from the original request, CloudFront returns an InvalidationBatchAlreadyExists error.", + "InvalidationList$Marker": "The value you provided for the Marker request parameter.", + "InvalidationList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your invalidation batches where they left off.", + "InvalidationSummary$Id": "The unique ID for an invalidation request.", + "InvalidationSummary$Status": "The status of an invalidation request.", + "KeyPairIdList$member": null, + "ListCloudFrontOriginAccessIdentitiesRequest$Marker": "Use this when paginating results to indicate where to begin in your list of origin access identities. The results include identities in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last identity on that page).", + "ListCloudFrontOriginAccessIdentitiesRequest$MaxItems": "The maximum number of origin access identities you want in the response body.", + "ListDistributionsRequest$Marker": "Use this when paginating results to indicate where to begin in your list of distributions. The results include distributions in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last distribution on that page).", + "ListDistributionsRequest$MaxItems": "The maximum number of distributions you want in the response body.", + "ListInvalidationsRequest$DistributionId": "The distribution's id.", + "ListInvalidationsRequest$Marker": "Use this parameter when paginating results to indicate where to begin in your list of invalidation batches. Because the results are returned in decreasing order from most recent to oldest, the most recent results are on the first page, the second page will contain earlier results, and so on. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response. This value is the same as the ID of the last invalidation batch on that page.", + "ListInvalidationsRequest$MaxItems": "The maximum number of invalidation batches you want in the response body.", + "ListStreamingDistributionsRequest$Marker": "Use this when paginating results to indicate where to begin in your list of streaming distributions. The results include distributions in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last distribution on that page).", + "ListStreamingDistributionsRequest$MaxItems": "The maximum number of streaming distributions you want in the response body.", + "LocationList$member": null, + "LoggingConfig$Bucket": "The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com.", + "LoggingConfig$Prefix": "An optional string that you want CloudFront to prefix to the access log filenames for this distribution, for example, myprefix/. If you want to enable logging, but you do not want to specify a prefix, you still must include an empty Prefix element in the Logging element.", + "MissingBody$Message": null, + "NoSuchCloudFrontOriginAccessIdentity$Message": null, + "NoSuchDistribution$Message": null, + "NoSuchInvalidation$Message": null, + "NoSuchOrigin$Message": null, + "NoSuchStreamingDistribution$Message": null, + "Origin$Id": "A unique identifier for the origin. The value of Id must be unique within the distribution. You use the value of Id when you create a cache behavior. The Id identifies the origin that CloudFront routes a request to when the request matches the path pattern for that cache behavior.", + "Origin$DomainName": "Amazon S3 origins: The DNS name of the Amazon S3 bucket from which you want CloudFront to get objects for this origin, for example, myawsbucket.s3.amazonaws.com. Custom origins: The DNS domain name for the HTTP server from which you want CloudFront to get objects for this origin, for example, www.example.com.", + "Origin$OriginPath": "An optional element that causes CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin. When you include the OriginPath element, specify the directory name, beginning with a /. CloudFront appends the directory name to the value of DomainName.", + "PathList$member": null, + "PreconditionFailed$Message": null, + "S3Origin$DomainName": "The DNS name of the S3 origin.", + "S3Origin$OriginAccessIdentity": "Your S3 origin's origin access identity.", + "S3OriginConfig$OriginAccessIdentity": "The CloudFront origin access identity to associate with the origin. Use an origin access identity to configure the origin so that end users can only access objects in an Amazon S3 bucket through CloudFront. If you want end users to be able to access objects using either the CloudFront URL or the Amazon S3 URL, specify an empty OriginAccessIdentity element. To delete the origin access identity from an existing distribution, update the distribution configuration and include an empty OriginAccessIdentity element. To replace the origin access identity, update the distribution configuration and specify the new origin access identity. Use the format origin-access-identity/cloudfront/Id where Id is the value that CloudFront returned in the Id element when you created the origin access identity.", + "Signer$AwsAccountNumber": "Specifies an AWS account that can create signed URLs. Values: self, which indicates that the AWS account that was used to create the distribution can created signed URLs, or an AWS account number. Omit the dashes in the account number.", + "StreamingDistribution$Id": "The identifier for the streaming distribution. For example: EGTXBD79H29TRA8.", + "StreamingDistribution$Status": "The current status of the streaming distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "StreamingDistribution$DomainName": "The domain name corresponding to the streaming distribution. For example: s5c39gqb8ow64r.cloudfront.net.", + "StreamingDistributionAlreadyExists$Message": null, + "StreamingDistributionConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the StreamingDistributionConfig object), a new streaming distribution is created. If the CallerReference is a value you already sent in a previous request to create a streaming distribution, and the content of the StreamingDistributionConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a streaming distribution but the content of the StreamingDistributionConfig is different from the original request, CloudFront returns a DistributionAlreadyExists error.", + "StreamingDistributionConfig$Comment": "Any comments you want to include about the streaming distribution.", + "StreamingDistributionList$Marker": "The value you provided for the Marker request parameter.", + "StreamingDistributionList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your streaming distributions where they left off.", + "StreamingDistributionNotDisabled$Message": null, + "StreamingDistributionSummary$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "StreamingDistributionSummary$Status": "Indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "StreamingDistributionSummary$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "StreamingDistributionSummary$Comment": "The comment originally specified when this distribution was created.", + "StreamingLoggingConfig$Bucket": "The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com.", + "StreamingLoggingConfig$Prefix": "An optional string that you want CloudFront to prefix to the access log filenames for this streaming distribution, for example, myprefix/. If you want to enable logging, but you do not want to specify a prefix, you still must include an empty Prefix element in the Logging element.", + "TooManyCacheBehaviors$Message": null, + "TooManyCertificates$Message": null, + "TooManyCloudFrontOriginAccessIdentities$Message": null, + "TooManyCookieNamesInWhiteList$Message": null, + "TooManyDistributionCNAMEs$Message": null, + "TooManyDistributions$Message": null, + "TooManyHeadersInForwardedValues$Message": null, + "TooManyInvalidationsInProgress$Message": null, + "TooManyOrigins$Message": null, + "TooManyStreamingDistributionCNAMEs$Message": null, + "TooManyStreamingDistributions$Message": null, + "TooManyTrustedSigners$Message": null, + "TrustedSignerDoesNotExist$Message": null, + "UpdateCloudFrontOriginAccessIdentityRequest$Id": "The identity's id.", + "UpdateCloudFrontOriginAccessIdentityRequest$IfMatch": "The value of the ETag header you received when retrieving the identity's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateCloudFrontOriginAccessIdentityResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "UpdateDistributionRequest$Id": "The distribution's id.", + "UpdateDistributionRequest$IfMatch": "The value of the ETag header you received when retrieving the distribution's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateDistributionResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "UpdateStreamingDistributionRequest$Id": "The streaming distribution's id.", + "UpdateStreamingDistributionRequest$IfMatch": "The value of the ETag header you received when retrieving the streaming distribution's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateStreamingDistributionResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "ViewerCertificate$IAMCertificateId": "If you want viewers to use HTTPS to request your objects and you're using an alternate domain name in your object URLs (for example, https://example.com/logo.jpg), specify the IAM certificate identifier of the custom viewer certificate for this distribution. Specify either this value or CloudFrontDefaultCertificate." + } + }, + "timestamp": { + "base": null, + "refs": { + "Distribution$LastModifiedTime": "The date and time the distribution was last modified.", + "DistributionSummary$LastModifiedTime": "The date and time the distribution was last modified.", + "Invalidation$CreateTime": "The date and time the invalidation request was first made.", + "InvalidationSummary$CreateTime": null, + "StreamingDistribution$LastModifiedTime": "The date and time the distribution was last modified.", + "StreamingDistributionSummary$LastModifiedTime": "The date and time the distribution was last modified." + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,32 @@ +{ + "pagination": { + "ListCloudFrontOriginAccessIdentities": { + "input_token": "Marker", + "output_token": "CloudFrontOriginAccessIdentityList.NextMarker", + "limit_key": "MaxItems", + "more_results": "CloudFrontOriginAccessIdentityList.IsTruncated", + "result_key": "CloudFrontOriginAccessIdentityList.Items" + }, + "ListDistributions": { + "input_token": "Marker", + "output_token": "DistributionList.NextMarker", + "limit_key": "MaxItems", + "more_results": "DistributionList.IsTruncated", + "result_key": "DistributionList.Items" + }, + "ListInvalidations": { + "input_token": "Marker", + "output_token": "InvalidationList.NextMarker", + "limit_key": "MaxItems", + "more_results": "InvalidationList.IsTruncated", + "result_key": "InvalidationList.Items" + }, + "ListStreamingDistributions": { + "input_token": "Marker", + "output_token": "StreamingDistributionList.NextMarker", + "limit_key": "MaxItems", + "more_results": "StreamingDistributionList.IsTruncated", + "result_key": "StreamingDistributionList.Items" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/waiters-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/waiters-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/waiters-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-04-17/waiters-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,47 @@ +{ + "version": 2, + "waiters": { + "DistributionDeployed": { + "delay": 60, + "operation": "GetDistribution", + "maxAttempts": 25, + "description": "Wait until a distribution is deployed.", + "acceptors": [ + { + "expected": "Deployed", + "matcher": "path", + "state": "success", + "argument": "Distribution.Status" + } + ] + }, + "InvalidationCompleted": { + "delay": 20, + "operation": "GetInvalidation", + "maxAttempts": 30, + "description": "Wait until an invalidation has completed.", + "acceptors": [ + { + "expected": "Completed", + "matcher": "path", + "state": "success", + "argument": "Invalidation.Status" + } + ] + }, + "StreamingDistributionDeployed": { + "delay": 60, + "operation": "GetStreamingDistribution", + "maxAttempts": 25, + "description": "Wait until a streaming distribution is deployed.", + "acceptors": [ + { + "expected": "Deployed", + "matcher": "path", + "state": "success", + "argument": "StreamingDistribution.Status" + } + ] + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2721 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-07-27", + "endpointPrefix":"cloudfront", + "globalEndpoint":"cloudfront.amazonaws.com", + "serviceAbbreviation":"CloudFront", + "serviceFullName":"Amazon CloudFront", + "signatureVersion":"v4", + "protocol":"rest-xml" + }, + "operations":{ + "CreateCloudFrontOriginAccessIdentity":{ + "name":"CreateCloudFrontOriginAccessIdentity2015_07_27", + "http":{ + "method":"POST", + "requestUri":"/2015-07-27/origin-access-identity/cloudfront", + "responseCode":201 + }, + "input":{"shape":"CreateCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"CreateCloudFrontOriginAccessIdentityResult"}, + "errors":[ + { + "shape":"CloudFrontOriginAccessIdentityAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"MissingBody", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyCloudFrontOriginAccessIdentities", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InconsistentQuantities", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "CreateDistribution":{ + "name":"CreateDistribution2015_07_27", + "http":{ + "method":"POST", + "requestUri":"/2015-07-27/distribution", + "responseCode":201 + }, + "input":{"shape":"CreateDistributionRequest"}, + "output":{"shape":"CreateDistributionResult"}, + "errors":[ + { + "shape":"CNAMEAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"DistributionAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"InvalidOrigin", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidOriginAccessIdentity", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"TooManyTrustedSigners", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TrustedSignerDoesNotExist", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidViewerCertificate", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidMinimumProtocolVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingBody", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyDistributionCNAMEs", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyDistributions", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidDefaultRootObject", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidRelativePath", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidErrorCode", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidResponseCode", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidRequiredProtocol", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchOrigin", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyOrigins", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyCacheBehaviors", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyCookieNamesInWhiteList", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidForwardCookies", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyHeadersInForwardedValues", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidHeadersForS3Origin", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InconsistentQuantities", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyCertificates", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidLocationCode", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidGeoRestrictionParameter", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidProtocolSettings", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidTTLOrder", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidWebACLId", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "CreateInvalidation":{ + "name":"CreateInvalidation2015_07_27", + "http":{ + "method":"POST", + "requestUri":"/2015-07-27/distribution/{DistributionId}/invalidation", + "responseCode":201 + }, + "input":{"shape":"CreateInvalidationRequest"}, + "output":{"shape":"CreateInvalidationResult"}, + "errors":[ + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"MissingBody", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"BatchTooLarge", + "error":{"httpStatusCode":413}, + "exception":true + }, + { + "shape":"TooManyInvalidationsInProgress", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InconsistentQuantities", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "CreateStreamingDistribution":{ + "name":"CreateStreamingDistribution2015_07_27", + "http":{ + "method":"POST", + "requestUri":"/2015-07-27/streaming-distribution", + "responseCode":201 + }, + "input":{"shape":"CreateStreamingDistributionRequest"}, + "output":{"shape":"CreateStreamingDistributionResult"}, + "errors":[ + { + "shape":"CNAMEAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"StreamingDistributionAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"InvalidOrigin", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidOriginAccessIdentity", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"TooManyTrustedSigners", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TrustedSignerDoesNotExist", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingBody", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyStreamingDistributionCNAMEs", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyStreamingDistributions", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InconsistentQuantities", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "DeleteCloudFrontOriginAccessIdentity":{ + "name":"DeleteCloudFrontOriginAccessIdentity2015_07_27", + "http":{ + "method":"DELETE", + "requestUri":"/2015-07-27/origin-access-identity/cloudfront/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteCloudFrontOriginAccessIdentityRequest"}, + "errors":[ + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InvalidIfMatchVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchCloudFrontOriginAccessIdentity", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"PreconditionFailed", + "error":{"httpStatusCode":412}, + "exception":true + }, + { + "shape":"CloudFrontOriginAccessIdentityInUse", + "error":{"httpStatusCode":409}, + "exception":true + } + ] + }, + "DeleteDistribution":{ + "name":"DeleteDistribution2015_07_27", + "http":{ + "method":"DELETE", + "requestUri":"/2015-07-27/distribution/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteDistributionRequest"}, + "errors":[ + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"DistributionNotDisabled", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"InvalidIfMatchVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"PreconditionFailed", + "error":{"httpStatusCode":412}, + "exception":true + } + ] + }, + "DeleteStreamingDistribution":{ + "name":"DeleteStreamingDistribution2015_07_27", + "http":{ + "method":"DELETE", + "requestUri":"/2015-07-27/streaming-distribution/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteStreamingDistributionRequest"}, + "errors":[ + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"StreamingDistributionNotDisabled", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"InvalidIfMatchVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchStreamingDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"PreconditionFailed", + "error":{"httpStatusCode":412}, + "exception":true + } + ] + }, + "GetCloudFrontOriginAccessIdentity":{ + "name":"GetCloudFrontOriginAccessIdentity2015_07_27", + "http":{ + "method":"GET", + "requestUri":"/2015-07-27/origin-access-identity/cloudfront/{Id}" + }, + "input":{"shape":"GetCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"GetCloudFrontOriginAccessIdentityResult"}, + "errors":[ + { + "shape":"NoSuchCloudFrontOriginAccessIdentity", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "GetCloudFrontOriginAccessIdentityConfig":{ + "name":"GetCloudFrontOriginAccessIdentityConfig2015_07_27", + "http":{ + "method":"GET", + "requestUri":"/2015-07-27/origin-access-identity/cloudfront/{Id}/config" + }, + "input":{"shape":"GetCloudFrontOriginAccessIdentityConfigRequest"}, + "output":{"shape":"GetCloudFrontOriginAccessIdentityConfigResult"}, + "errors":[ + { + "shape":"NoSuchCloudFrontOriginAccessIdentity", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "GetDistribution":{ + "name":"GetDistribution2015_07_27", + "http":{ + "method":"GET", + "requestUri":"/2015-07-27/distribution/{Id}" + }, + "input":{"shape":"GetDistributionRequest"}, + "output":{"shape":"GetDistributionResult"}, + "errors":[ + { + "shape":"NoSuchDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "GetDistributionConfig":{ + "name":"GetDistributionConfig2015_07_27", + "http":{ + "method":"GET", + "requestUri":"/2015-07-27/distribution/{Id}/config" + }, + "input":{"shape":"GetDistributionConfigRequest"}, + "output":{"shape":"GetDistributionConfigResult"}, + "errors":[ + { + "shape":"NoSuchDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "GetInvalidation":{ + "name":"GetInvalidation2015_07_27", + "http":{ + "method":"GET", + "requestUri":"/2015-07-27/distribution/{DistributionId}/invalidation/{Id}" + }, + "input":{"shape":"GetInvalidationRequest"}, + "output":{"shape":"GetInvalidationResult"}, + "errors":[ + { + "shape":"NoSuchInvalidation", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"NoSuchDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "GetStreamingDistribution":{ + "name":"GetStreamingDistribution2015_07_27", + "http":{ + "method":"GET", + "requestUri":"/2015-07-27/streaming-distribution/{Id}" + }, + "input":{"shape":"GetStreamingDistributionRequest"}, + "output":{"shape":"GetStreamingDistributionResult"}, + "errors":[ + { + "shape":"NoSuchStreamingDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "GetStreamingDistributionConfig":{ + "name":"GetStreamingDistributionConfig2015_07_27", + "http":{ + "method":"GET", + "requestUri":"/2015-07-27/streaming-distribution/{Id}/config" + }, + "input":{"shape":"GetStreamingDistributionConfigRequest"}, + "output":{"shape":"GetStreamingDistributionConfigResult"}, + "errors":[ + { + "shape":"NoSuchStreamingDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "ListCloudFrontOriginAccessIdentities":{ + "name":"ListCloudFrontOriginAccessIdentities2015_07_27", + "http":{ + "method":"GET", + "requestUri":"/2015-07-27/origin-access-identity/cloudfront" + }, + "input":{"shape":"ListCloudFrontOriginAccessIdentitiesRequest"}, + "output":{"shape":"ListCloudFrontOriginAccessIdentitiesResult"}, + "errors":[ + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "ListDistributions":{ + "name":"ListDistributions2015_07_27", + "http":{ + "method":"GET", + "requestUri":"/2015-07-27/distribution" + }, + "input":{"shape":"ListDistributionsRequest"}, + "output":{"shape":"ListDistributionsResult"}, + "errors":[ + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "ListDistributionsByWebACLId":{ + "name":"ListDistributionsByWebACLId2015_07_27", + "http":{ + "method":"GET", + "requestUri":"/2015-07-27/distributionsByWebACLId/{WebACLId}" + }, + "input":{"shape":"ListDistributionsByWebACLIdRequest"}, + "output":{"shape":"ListDistributionsByWebACLIdResult"}, + "errors":[ + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidWebACLId", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "ListInvalidations":{ + "name":"ListInvalidations2015_07_27", + "http":{ + "method":"GET", + "requestUri":"/2015-07-27/distribution/{DistributionId}/invalidation" + }, + "input":{"shape":"ListInvalidationsRequest"}, + "output":{"shape":"ListInvalidationsResult"}, + "errors":[ + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "ListStreamingDistributions":{ + "name":"ListStreamingDistributions2015_07_27", + "http":{ + "method":"GET", + "requestUri":"/2015-07-27/streaming-distribution" + }, + "input":{"shape":"ListStreamingDistributionsRequest"}, + "output":{"shape":"ListStreamingDistributionsResult"}, + "errors":[ + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "UpdateCloudFrontOriginAccessIdentity":{ + "name":"UpdateCloudFrontOriginAccessIdentity2015_07_27", + "http":{ + "method":"PUT", + "requestUri":"/2015-07-27/origin-access-identity/cloudfront/{Id}/config" + }, + "input":{"shape":"UpdateCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"UpdateCloudFrontOriginAccessIdentityResult"}, + "errors":[ + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"IllegalUpdate", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidIfMatchVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingBody", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchCloudFrontOriginAccessIdentity", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"PreconditionFailed", + "error":{"httpStatusCode":412}, + "exception":true + }, + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InconsistentQuantities", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "UpdateDistribution":{ + "name":"UpdateDistribution2015_07_27", + "http":{ + "method":"PUT", + "requestUri":"/2015-07-27/distribution/{Id}/config" + }, + "input":{"shape":"UpdateDistributionRequest"}, + "output":{"shape":"UpdateDistributionResult"}, + "errors":[ + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"CNAMEAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"IllegalUpdate", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidIfMatchVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingBody", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"PreconditionFailed", + "error":{"httpStatusCode":412}, + "exception":true + }, + { + "shape":"TooManyDistributionCNAMEs", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidDefaultRootObject", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidRelativePath", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidErrorCode", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidResponseCode", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidOriginAccessIdentity", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyTrustedSigners", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TrustedSignerDoesNotExist", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidViewerCertificate", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidMinimumProtocolVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidRequiredProtocol", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchOrigin", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyOrigins", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyCacheBehaviors", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyCookieNamesInWhiteList", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidForwardCookies", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyHeadersInForwardedValues", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidHeadersForS3Origin", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InconsistentQuantities", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyCertificates", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidLocationCode", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidGeoRestrictionParameter", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidTTLOrder", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidWebACLId", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "UpdateStreamingDistribution":{ + "name":"UpdateStreamingDistribution2015_07_27", + "http":{ + "method":"PUT", + "requestUri":"/2015-07-27/streaming-distribution/{Id}/config" + }, + "input":{"shape":"UpdateStreamingDistributionRequest"}, + "output":{"shape":"UpdateStreamingDistributionResult"}, + "errors":[ + { + "shape":"AccessDenied", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"CNAMEAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"IllegalUpdate", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidIfMatchVersion", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingBody", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoSuchStreamingDistribution", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"PreconditionFailed", + "error":{"httpStatusCode":412}, + "exception":true + }, + { + "shape":"TooManyStreamingDistributionCNAMEs", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidArgument", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidOriginAccessIdentity", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyTrustedSigners", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TrustedSignerDoesNotExist", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InconsistentQuantities", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + } + }, + "shapes":{ + "AccessDenied":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":403}, + "exception":true + }, + "ActiveTrustedSigners":{ + "type":"structure", + "required":[ + "Enabled", + "Quantity" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"SignerList"} + } + }, + "AliasList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"CNAME" + } + }, + "Aliases":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"AliasList"} + } + }, + "AllowedMethods":{ + "type":"structure", + "required":[ + "Quantity", + "Items" + ], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"MethodsList"}, + "CachedMethods":{"shape":"CachedMethods"} + } + }, + "AwsAccountNumberList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"AwsAccountNumber" + } + }, + "BatchTooLarge":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":413}, + "exception":true + }, + "CNAMEAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CacheBehavior":{ + "type":"structure", + "required":[ + "PathPattern", + "TargetOriginId", + "ForwardedValues", + "TrustedSigners", + "ViewerProtocolPolicy", + "MinTTL" + ], + "members":{ + "PathPattern":{"shape":"string"}, + "TargetOriginId":{"shape":"string"}, + "ForwardedValues":{"shape":"ForwardedValues"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "ViewerProtocolPolicy":{"shape":"ViewerProtocolPolicy"}, + "MinTTL":{"shape":"long"}, + "AllowedMethods":{"shape":"AllowedMethods"}, + "SmoothStreaming":{"shape":"boolean"}, + "DefaultTTL":{"shape":"long"}, + "MaxTTL":{"shape":"long"} + } + }, + "CacheBehaviorList":{ + "type":"list", + "member":{ + "shape":"CacheBehavior", + "locationName":"CacheBehavior" + } + }, + "CacheBehaviors":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CacheBehaviorList"} + } + }, + "CachedMethods":{ + "type":"structure", + "required":[ + "Quantity", + "Items" + ], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"MethodsList"} + } + }, + "CloudFrontOriginAccessIdentity":{ + "type":"structure", + "required":[ + "Id", + "S3CanonicalUserId" + ], + "members":{ + "Id":{"shape":"string"}, + "S3CanonicalUserId":{"shape":"string"}, + "CloudFrontOriginAccessIdentityConfig":{"shape":"CloudFrontOriginAccessIdentityConfig"} + } + }, + "CloudFrontOriginAccessIdentityAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CloudFrontOriginAccessIdentityConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "Comment" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "Comment":{"shape":"string"} + } + }, + "CloudFrontOriginAccessIdentityInUse":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CloudFrontOriginAccessIdentityList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CloudFrontOriginAccessIdentitySummaryList"} + } + }, + "CloudFrontOriginAccessIdentitySummary":{ + "type":"structure", + "required":[ + "Id", + "S3CanonicalUserId", + "Comment" + ], + "members":{ + "Id":{"shape":"string"}, + "S3CanonicalUserId":{"shape":"string"}, + "Comment":{"shape":"string"} + } + }, + "CloudFrontOriginAccessIdentitySummaryList":{ + "type":"list", + "member":{ + "shape":"CloudFrontOriginAccessIdentitySummary", + "locationName":"CloudFrontOriginAccessIdentitySummary" + } + }, + "CookieNameList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Name" + } + }, + "CookieNames":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CookieNameList"} + } + }, + "CookiePreference":{ + "type":"structure", + "required":["Forward"], + "members":{ + "Forward":{"shape":"ItemSelection"}, + "WhitelistedNames":{"shape":"CookieNames"} + } + }, + "CreateCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":["CloudFrontOriginAccessIdentityConfig"], + "members":{ + "CloudFrontOriginAccessIdentityConfig":{ + "shape":"CloudFrontOriginAccessIdentityConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-07-27/"}, + "locationName":"CloudFrontOriginAccessIdentityConfig" + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "CreateCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "CreateDistributionRequest":{ + "type":"structure", + "required":["DistributionConfig"], + "members":{ + "DistributionConfig":{ + "shape":"DistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-07-27/"}, + "locationName":"DistributionConfig" + } + }, + "payload":"DistributionConfig" + }, + "CreateDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "CreateInvalidationRequest":{ + "type":"structure", + "required":[ + "DistributionId", + "InvalidationBatch" + ], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "InvalidationBatch":{ + "shape":"InvalidationBatch", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-07-27/"}, + "locationName":"InvalidationBatch" + } + }, + "payload":"InvalidationBatch" + }, + "CreateInvalidationResult":{ + "type":"structure", + "members":{ + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "Invalidation":{"shape":"Invalidation"} + }, + "payload":"Invalidation" + }, + "CreateStreamingDistributionRequest":{ + "type":"structure", + "required":["StreamingDistributionConfig"], + "members":{ + "StreamingDistributionConfig":{ + "shape":"StreamingDistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-07-27/"}, + "locationName":"StreamingDistributionConfig" + } + }, + "payload":"StreamingDistributionConfig" + }, + "CreateStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "CustomErrorResponse":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"integer"}, + "ResponsePagePath":{"shape":"string"}, + "ResponseCode":{"shape":"string"}, + "ErrorCachingMinTTL":{"shape":"long"} + } + }, + "CustomErrorResponseList":{ + "type":"list", + "member":{ + "shape":"CustomErrorResponse", + "locationName":"CustomErrorResponse" + } + }, + "CustomErrorResponses":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CustomErrorResponseList"} + } + }, + "CustomOriginConfig":{ + "type":"structure", + "required":[ + "HTTPPort", + "HTTPSPort", + "OriginProtocolPolicy" + ], + "members":{ + "HTTPPort":{"shape":"integer"}, + "HTTPSPort":{"shape":"integer"}, + "OriginProtocolPolicy":{"shape":"OriginProtocolPolicy"} + } + }, + "DefaultCacheBehavior":{ + "type":"structure", + "required":[ + "TargetOriginId", + "ForwardedValues", + "TrustedSigners", + "ViewerProtocolPolicy", + "MinTTL" + ], + "members":{ + "TargetOriginId":{"shape":"string"}, + "ForwardedValues":{"shape":"ForwardedValues"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "ViewerProtocolPolicy":{"shape":"ViewerProtocolPolicy"}, + "MinTTL":{"shape":"long"}, + "AllowedMethods":{"shape":"AllowedMethods"}, + "SmoothStreaming":{"shape":"boolean"}, + "DefaultTTL":{"shape":"long"}, + "MaxTTL":{"shape":"long"} + } + }, + "DeleteCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "required":["Id"] + }, + "DeleteDistributionRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "required":["Id"] + }, + "DeleteStreamingDistributionRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "required":["Id"] + }, + "Distribution":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "InProgressInvalidationBatches", + "DomainName", + "ActiveTrustedSigners", + "DistributionConfig" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "InProgressInvalidationBatches":{"shape":"integer"}, + "DomainName":{"shape":"string"}, + "ActiveTrustedSigners":{"shape":"ActiveTrustedSigners"}, + "DistributionConfig":{"shape":"DistributionConfig"} + } + }, + "DistributionAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "DistributionConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "Origins", + "DefaultCacheBehavior", + "Comment", + "Enabled" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "Aliases":{"shape":"Aliases"}, + "DefaultRootObject":{"shape":"string"}, + "Origins":{"shape":"Origins"}, + "DefaultCacheBehavior":{"shape":"DefaultCacheBehavior"}, + "CacheBehaviors":{"shape":"CacheBehaviors"}, + "CustomErrorResponses":{"shape":"CustomErrorResponses"}, + "Comment":{"shape":"string"}, + "Logging":{"shape":"LoggingConfig"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"}, + "ViewerCertificate":{"shape":"ViewerCertificate"}, + "Restrictions":{"shape":"Restrictions"}, + "WebACLId":{"shape":"string"} + } + }, + "DistributionList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"DistributionSummaryList"} + } + }, + "DistributionNotDisabled":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "DistributionSummary":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "DomainName", + "Aliases", + "Origins", + "DefaultCacheBehavior", + "CacheBehaviors", + "CustomErrorResponses", + "Comment", + "PriceClass", + "Enabled", + "ViewerCertificate", + "Restrictions", + "WebACLId" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "Aliases":{"shape":"Aliases"}, + "Origins":{"shape":"Origins"}, + "DefaultCacheBehavior":{"shape":"DefaultCacheBehavior"}, + "CacheBehaviors":{"shape":"CacheBehaviors"}, + "CustomErrorResponses":{"shape":"CustomErrorResponses"}, + "Comment":{"shape":"string"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"}, + "ViewerCertificate":{"shape":"ViewerCertificate"}, + "Restrictions":{"shape":"Restrictions"}, + "WebACLId":{"shape":"string"} + } + }, + "DistributionSummaryList":{ + "type":"list", + "member":{ + "shape":"DistributionSummary", + "locationName":"DistributionSummary" + } + }, + "ForwardedValues":{ + "type":"structure", + "required":[ + "QueryString", + "Cookies" + ], + "members":{ + "QueryString":{"shape":"boolean"}, + "Cookies":{"shape":"CookiePreference"}, + "Headers":{"shape":"Headers"} + } + }, + "GeoRestriction":{ + "type":"structure", + "required":[ + "RestrictionType", + "Quantity" + ], + "members":{ + "RestrictionType":{"shape":"GeoRestrictionType"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"LocationList"} + } + }, + "GeoRestrictionType":{ + "type":"string", + "enum":[ + "blacklist", + "whitelist", + "none" + ] + }, + "GetCloudFrontOriginAccessIdentityConfigRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + }, + "required":["Id"] + }, + "GetCloudFrontOriginAccessIdentityConfigResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentityConfig":{"shape":"CloudFrontOriginAccessIdentityConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "GetCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + }, + "required":["Id"] + }, + "GetCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "GetDistributionConfigRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + }, + "required":["Id"] + }, + "GetDistributionConfigResult":{ + "type":"structure", + "members":{ + "DistributionConfig":{"shape":"DistributionConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"DistributionConfig" + }, + "GetDistributionRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + }, + "required":["Id"] + }, + "GetDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "GetInvalidationRequest":{ + "type":"structure", + "required":[ + "DistributionId", + "Id" + ], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetInvalidationResult":{ + "type":"structure", + "members":{ + "Invalidation":{"shape":"Invalidation"} + }, + "payload":"Invalidation" + }, + "GetStreamingDistributionConfigRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + }, + "required":["Id"] + }, + "GetStreamingDistributionConfigResult":{ + "type":"structure", + "members":{ + "StreamingDistributionConfig":{"shape":"StreamingDistributionConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistributionConfig" + }, + "GetStreamingDistributionRequest":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + }, + "required":["Id"] + }, + "GetStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "HeaderList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Name" + } + }, + "Headers":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"HeaderList"} + } + }, + "IllegalUpdate":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InconsistentQuantities":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidArgument":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidDefaultRootObject":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidErrorCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidForwardCookies":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidGeoRestrictionParameter":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidHeadersForS3Origin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidIfMatchVersion":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidLocationCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidMinimumProtocolVersion":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidOrigin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidOriginAccessIdentity":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidProtocolSettings":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRelativePath":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRequiredProtocol":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidResponseCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidTTLOrder":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidViewerCertificate":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidWebACLId":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "Invalidation":{ + "type":"structure", + "required":[ + "Id", + "Status", + "CreateTime", + "InvalidationBatch" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "CreateTime":{"shape":"timestamp"}, + "InvalidationBatch":{"shape":"InvalidationBatch"} + } + }, + "InvalidationBatch":{ + "type":"structure", + "required":[ + "Paths", + "CallerReference" + ], + "members":{ + "Paths":{"shape":"Paths"}, + "CallerReference":{"shape":"string"} + } + }, + "InvalidationList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"InvalidationSummaryList"} + } + }, + "InvalidationSummary":{ + "type":"structure", + "required":[ + "Id", + "CreateTime", + "Status" + ], + "members":{ + "Id":{"shape":"string"}, + "CreateTime":{"shape":"timestamp"}, + "Status":{"shape":"string"} + } + }, + "InvalidationSummaryList":{ + "type":"list", + "member":{ + "shape":"InvalidationSummary", + "locationName":"InvalidationSummary" + } + }, + "ItemSelection":{ + "type":"string", + "enum":[ + "none", + "whitelist", + "all" + ] + }, + "KeyPairIdList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"KeyPairId" + } + }, + "KeyPairIds":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"KeyPairIdList"} + } + }, + "ListCloudFrontOriginAccessIdentitiesRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListCloudFrontOriginAccessIdentitiesResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentityList":{"shape":"CloudFrontOriginAccessIdentityList"} + }, + "payload":"CloudFrontOriginAccessIdentityList" + }, + "ListDistributionsByWebACLIdRequest":{ + "type":"structure", + "required":["WebACLId"], + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + }, + "WebACLId":{ + "shape":"string", + "location":"uri", + "locationName":"WebACLId" + } + } + }, + "ListDistributionsByWebACLIdResult":{ + "type":"structure", + "members":{ + "DistributionList":{"shape":"DistributionList"} + }, + "payload":"DistributionList" + }, + "ListDistributionsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListDistributionsResult":{ + "type":"structure", + "members":{ + "DistributionList":{"shape":"DistributionList"} + }, + "payload":"DistributionList" + }, + "ListInvalidationsRequest":{ + "type":"structure", + "required":["DistributionId"], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListInvalidationsResult":{ + "type":"structure", + "members":{ + "InvalidationList":{"shape":"InvalidationList"} + }, + "payload":"InvalidationList" + }, + "ListStreamingDistributionsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListStreamingDistributionsResult":{ + "type":"structure", + "members":{ + "StreamingDistributionList":{"shape":"StreamingDistributionList"} + }, + "payload":"StreamingDistributionList" + }, + "LocationList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Location" + } + }, + "LoggingConfig":{ + "type":"structure", + "required":[ + "Enabled", + "IncludeCookies", + "Bucket", + "Prefix" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "IncludeCookies":{"shape":"boolean"}, + "Bucket":{"shape":"string"}, + "Prefix":{"shape":"string"} + } + }, + "Method":{ + "type":"string", + "enum":[ + "GET", + "HEAD", + "POST", + "PUT", + "PATCH", + "OPTIONS", + "DELETE" + ] + }, + "MethodsList":{ + "type":"list", + "member":{ + "shape":"Method", + "locationName":"Method" + } + }, + "MinimumProtocolVersion":{ + "type":"string", + "enum":[ + "SSLv3", + "TLSv1" + ] + }, + "MissingBody":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "NoSuchCloudFrontOriginAccessIdentity":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchDistribution":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchInvalidation":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchOrigin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchStreamingDistribution":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "Origin":{ + "type":"structure", + "required":[ + "Id", + "DomainName" + ], + "members":{ + "Id":{"shape":"string"}, + "DomainName":{"shape":"string"}, + "OriginPath":{"shape":"string"}, + "S3OriginConfig":{"shape":"S3OriginConfig"}, + "CustomOriginConfig":{"shape":"CustomOriginConfig"} + } + }, + "OriginList":{ + "type":"list", + "member":{ + "shape":"Origin", + "locationName":"Origin" + }, + "min":1 + }, + "OriginProtocolPolicy":{ + "type":"string", + "enum":[ + "http-only", + "match-viewer" + ] + }, + "Origins":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"OriginList"} + } + }, + "PathList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Path" + } + }, + "Paths":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"PathList"} + } + }, + "PreconditionFailed":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":412}, + "exception":true + }, + "PriceClass":{ + "type":"string", + "enum":[ + "PriceClass_100", + "PriceClass_200", + "PriceClass_All" + ] + }, + "Restrictions":{ + "type":"structure", + "required":["GeoRestriction"], + "members":{ + "GeoRestriction":{"shape":"GeoRestriction"} + } + }, + "S3Origin":{ + "type":"structure", + "required":[ + "DomainName", + "OriginAccessIdentity" + ], + "members":{ + "DomainName":{"shape":"string"}, + "OriginAccessIdentity":{"shape":"string"} + } + }, + "S3OriginConfig":{ + "type":"structure", + "required":["OriginAccessIdentity"], + "members":{ + "OriginAccessIdentity":{"shape":"string"} + } + }, + "SSLSupportMethod":{ + "type":"string", + "enum":[ + "sni-only", + "vip" + ] + }, + "Signer":{ + "type":"structure", + "members":{ + "AwsAccountNumber":{"shape":"string"}, + "KeyPairIds":{"shape":"KeyPairIds"} + } + }, + "SignerList":{ + "type":"list", + "member":{ + "shape":"Signer", + "locationName":"Signer" + } + }, + "StreamingDistribution":{ + "type":"structure", + "required":[ + "Id", + "Status", + "DomainName", + "ActiveTrustedSigners", + "StreamingDistributionConfig" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "ActiveTrustedSigners":{"shape":"ActiveTrustedSigners"}, + "StreamingDistributionConfig":{"shape":"StreamingDistributionConfig"} + } + }, + "StreamingDistributionAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "StreamingDistributionConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "S3Origin", + "Comment", + "TrustedSigners", + "Enabled" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "S3Origin":{"shape":"S3Origin"}, + "Aliases":{"shape":"Aliases"}, + "Comment":{"shape":"string"}, + "Logging":{"shape":"StreamingLoggingConfig"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"} + } + }, + "StreamingDistributionList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"StreamingDistributionSummaryList"} + } + }, + "StreamingDistributionNotDisabled":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "StreamingDistributionSummary":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "DomainName", + "S3Origin", + "Aliases", + "TrustedSigners", + "Comment", + "PriceClass", + "Enabled" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "S3Origin":{"shape":"S3Origin"}, + "Aliases":{"shape":"Aliases"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "Comment":{"shape":"string"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"} + } + }, + "StreamingDistributionSummaryList":{ + "type":"list", + "member":{ + "shape":"StreamingDistributionSummary", + "locationName":"StreamingDistributionSummary" + } + }, + "StreamingLoggingConfig":{ + "type":"structure", + "required":[ + "Enabled", + "Bucket", + "Prefix" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Bucket":{"shape":"string"}, + "Prefix":{"shape":"string"} + } + }, + "TooManyCacheBehaviors":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCertificates":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCloudFrontOriginAccessIdentities":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCookieNamesInWhiteList":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyDistributionCNAMEs":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyDistributions":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyHeadersInForwardedValues":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyInvalidationsInProgress":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyOrigins":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyStreamingDistributionCNAMEs":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyStreamingDistributions":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyTrustedSigners":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TrustedSignerDoesNotExist":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TrustedSigners":{ + "type":"structure", + "required":[ + "Enabled", + "Quantity" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"AwsAccountNumberList"} + } + }, + "UpdateCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":[ + "CloudFrontOriginAccessIdentityConfig", + "Id" + ], + "members":{ + "CloudFrontOriginAccessIdentityConfig":{ + "shape":"CloudFrontOriginAccessIdentityConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-07-27/"}, + "locationName":"CloudFrontOriginAccessIdentityConfig" + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "UpdateCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "UpdateDistributionRequest":{ + "type":"structure", + "required":[ + "DistributionConfig", + "Id" + ], + "members":{ + "DistributionConfig":{ + "shape":"DistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-07-27/"}, + "locationName":"DistributionConfig" + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"DistributionConfig" + }, + "UpdateDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "UpdateStreamingDistributionRequest":{ + "type":"structure", + "required":[ + "StreamingDistributionConfig", + "Id" + ], + "members":{ + "StreamingDistributionConfig":{ + "shape":"StreamingDistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-07-27/"}, + "locationName":"StreamingDistributionConfig" + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"StreamingDistributionConfig" + }, + "UpdateStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "ViewerCertificate":{ + "type":"structure", + "members":{ + "IAMCertificateId":{"shape":"string"}, + "CloudFrontDefaultCertificate":{"shape":"boolean"}, + "SSLSupportMethod":{"shape":"SSLSupportMethod"}, + "MinimumProtocolVersion":{"shape":"MinimumProtocolVersion"} + } + }, + "ViewerProtocolPolicy":{ + "type":"string", + "enum":[ + "allow-all", + "https-only", + "redirect-to-https" + ] + }, + "boolean":{"type":"boolean"}, + "integer":{"type":"integer"}, + "long":{"type":"long"}, + "string":{"type":"string"}, + "timestamp":{"type":"timestamp"} + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1164 @@ +{ + "version": "2.0", + "operations": { + "CreateCloudFrontOriginAccessIdentity": "Create a new origin access identity.", + "CreateDistribution": "Create a new distribution.", + "CreateInvalidation": "Create a new invalidation.", + "CreateStreamingDistribution": "Create a new streaming distribution.", + "DeleteCloudFrontOriginAccessIdentity": "Delete an origin access identity.", + "DeleteDistribution": "Delete a distribution.", + "DeleteStreamingDistribution": "Delete a streaming distribution.", + "GetCloudFrontOriginAccessIdentity": "Get the information about an origin access identity.", + "GetCloudFrontOriginAccessIdentityConfig": "Get the configuration information about an origin access identity.", + "GetDistribution": "Get the information about a distribution.", + "GetDistributionConfig": "Get the configuration information about a distribution.", + "GetInvalidation": "Get the information about an invalidation.", + "GetStreamingDistribution": "Get the information about a streaming distribution.", + "GetStreamingDistributionConfig": "Get the configuration information about a streaming distribution.", + "ListCloudFrontOriginAccessIdentities": "List origin access identities.", + "ListDistributions": "List distributions.", + "ListDistributionsByWebACLId": "List the distributions that are associated with a specified AWS WAF web ACL.", + "ListInvalidations": "List invalidation batches.", + "ListStreamingDistributions": "List streaming distributions.", + "UpdateCloudFrontOriginAccessIdentity": "Update an origin access identity.", + "UpdateDistribution": "Update a distribution.", + "UpdateStreamingDistribution": "Update a streaming distribution." + }, + "service": null, + "shapes": { + "AccessDenied": { + "base": "Access denied.", + "refs": { + } + }, + "ActiveTrustedSigners": { + "base": "A complex type that lists the AWS accounts, if any, that you included in the TrustedSigners complex type for the default cache behavior or for any of the other cache behaviors for this distribution. These are accounts that you want to allow to create signed URLs for private content.", + "refs": { + "Distribution$ActiveTrustedSigners": "CloudFront automatically adds this element to the response only if you've set up the distribution to serve private content with signed URLs. The element lists the key pair IDs that CloudFront is aware of for each trusted signer. The Signer child element lists the AWS account number of the trusted signer (or an empty Self element if the signer is you). The Signer element also includes the IDs of any active key pairs associated with the trusted signer's AWS account. If no KeyPairId element appears for a Signer, that signer can't create working signed URLs.", + "StreamingDistribution$ActiveTrustedSigners": "CloudFront automatically adds this element to the response only if you've set up the distribution to serve private content with signed URLs. The element lists the key pair IDs that CloudFront is aware of for each trusted signer. The Signer child element lists the AWS account number of the trusted signer (or an empty Self element if the signer is you). The Signer element also includes the IDs of any active key pairs associated with the trusted signer's AWS account. If no KeyPairId element appears for a Signer, that signer can't create working signed URLs." + } + }, + "AliasList": { + "base": null, + "refs": { + "Aliases$Items": "Optional: A complex type that contains CNAME elements, if any, for this distribution. If Quantity is 0, you can omit Items." + } + }, + "Aliases": { + "base": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "refs": { + "DistributionConfig$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "DistributionSummary$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "StreamingDistributionConfig$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this streaming distribution.", + "StreamingDistributionSummary$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this streaming distribution." + } + }, + "AllowedMethods": { + "base": "A complex type that controls which HTTP methods CloudFront processes and forwards to your Amazon S3 bucket or your custom origin. There are three choices: - CloudFront forwards only GET and HEAD requests. - CloudFront forwards only GET, HEAD and OPTIONS requests. - CloudFront forwards GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests. If you pick the third choice, you may need to restrict access to your Amazon S3 bucket or to your custom origin so users can't perform operations that you don't want them to. For example, you may not want users to have permission to delete objects from your origin.", + "refs": { + "CacheBehavior$AllowedMethods": null, + "DefaultCacheBehavior$AllowedMethods": null + } + }, + "AwsAccountNumberList": { + "base": null, + "refs": { + "TrustedSigners$Items": "Optional: A complex type that contains trusted signers for this cache behavior. If Quantity is 0, you can omit Items." + } + }, + "BatchTooLarge": { + "base": null, + "refs": { + } + }, + "CNAMEAlreadyExists": { + "base": null, + "refs": { + } + }, + "CacheBehavior": { + "base": "A complex type that describes how CloudFront processes requests. You can create up to 10 cache behaviors.You must create at least as many cache behaviors (including the default cache behavior) as you have origins if you want CloudFront to distribute objects from all of the origins. Each cache behavior specifies the one origin from which you want CloudFront to get objects. If you have two origins and only the default cache behavior, the default cache behavior will cause CloudFront to get objects from one of the origins, but the other origin will never be used. If you don't want to specify any cache behaviors, include only an empty CacheBehaviors element. Don't include an empty CacheBehavior element, or CloudFront returns a MalformedXML error. To delete all cache behaviors in an existing distribution, update the distribution configuration and include only an empty CacheBehaviors element. To add, change, or remove one or more cache behaviors, update the distribution configuration and specify all of the cache behaviors that you want to include in the updated distribution.", + "refs": { + "CacheBehaviorList$member": null + } + }, + "CacheBehaviorList": { + "base": null, + "refs": { + "CacheBehaviors$Items": "Optional: A complex type that contains cache behaviors for this distribution. If Quantity is 0, you can omit Items." + } + }, + "CacheBehaviors": { + "base": "A complex type that contains zero or more CacheBehavior elements.", + "refs": { + "DistributionConfig$CacheBehaviors": "A complex type that contains zero or more CacheBehavior elements.", + "DistributionSummary$CacheBehaviors": "A complex type that contains zero or more CacheBehavior elements." + } + }, + "CachedMethods": { + "base": "A complex type that controls whether CloudFront caches the response to requests using the specified HTTP methods. There are two choices: - CloudFront caches responses to GET and HEAD requests. - CloudFront caches responses to GET, HEAD, and OPTIONS requests. If you pick the second choice for your S3 Origin, you may need to forward Access-Control-Request-Method, Access-Control-Request-Headers and Origin headers for the responses to be cached correctly.", + "refs": { + "AllowedMethods$CachedMethods": null + } + }, + "CloudFrontOriginAccessIdentity": { + "base": "CloudFront origin access identity.", + "refs": { + "CreateCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information.", + "GetCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information.", + "UpdateCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information." + } + }, + "CloudFrontOriginAccessIdentityAlreadyExists": { + "base": "If the CallerReference is a value you already sent in a previous request to create an identity but the content of the CloudFrontOriginAccessIdentityConfig is different from the original request, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists error.", + "refs": { + } + }, + "CloudFrontOriginAccessIdentityConfig": { + "base": "Origin access identity configuration.", + "refs": { + "CloudFrontOriginAccessIdentity$CloudFrontOriginAccessIdentityConfig": "The current configuration information for the identity.", + "CreateCloudFrontOriginAccessIdentityRequest$CloudFrontOriginAccessIdentityConfig": "The origin access identity's configuration information.", + "GetCloudFrontOriginAccessIdentityConfigResult$CloudFrontOriginAccessIdentityConfig": "The origin access identity's configuration information.", + "UpdateCloudFrontOriginAccessIdentityRequest$CloudFrontOriginAccessIdentityConfig": "The identity's configuration information." + } + }, + "CloudFrontOriginAccessIdentityInUse": { + "base": null, + "refs": { + } + }, + "CloudFrontOriginAccessIdentityList": { + "base": "The CloudFrontOriginAccessIdentityList type.", + "refs": { + "ListCloudFrontOriginAccessIdentitiesResult$CloudFrontOriginAccessIdentityList": "The CloudFrontOriginAccessIdentityList type." + } + }, + "CloudFrontOriginAccessIdentitySummary": { + "base": "Summary of the information about a CloudFront origin access identity.", + "refs": { + "CloudFrontOriginAccessIdentitySummaryList$member": null + } + }, + "CloudFrontOriginAccessIdentitySummaryList": { + "base": null, + "refs": { + "CloudFrontOriginAccessIdentityList$Items": "A complex type that contains one CloudFrontOriginAccessIdentitySummary element for each origin access identity that was created by the current AWS account." + } + }, + "CookieNameList": { + "base": null, + "refs": { + "CookieNames$Items": "Optional: A complex type that contains whitelisted cookies for this cache behavior. If Quantity is 0, you can omit Items." + } + }, + "CookieNames": { + "base": "A complex type that specifies the whitelisted cookies, if any, that you want CloudFront to forward to your origin that is associated with this cache behavior.", + "refs": { + "CookiePreference$WhitelistedNames": "A complex type that specifies the whitelisted cookies, if any, that you want CloudFront to forward to your origin that is associated with this cache behavior." + } + }, + "CookiePreference": { + "base": "A complex type that specifies the cookie preferences associated with this cache behavior.", + "refs": { + "ForwardedValues$Cookies": "A complex type that specifies how CloudFront handles cookies." + } + }, + "CreateCloudFrontOriginAccessIdentityRequest": { + "base": "The request to create a new origin access identity.", + "refs": { + } + }, + "CreateCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateDistributionRequest": { + "base": "The request to create a new distribution.", + "refs": { + } + }, + "CreateDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateInvalidationRequest": { + "base": "The request to create an invalidation.", + "refs": { + } + }, + "CreateInvalidationResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateStreamingDistributionRequest": { + "base": "The request to create a new streaming distribution.", + "refs": { + } + }, + "CreateStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CustomErrorResponse": { + "base": "A complex type that describes how you'd prefer CloudFront to respond to requests that result in either a 4xx or 5xx response. You can control whether a custom error page should be displayed, what the desired response code should be for this error page and how long should the error response be cached by CloudFront. If you don't want to specify any custom error responses, include only an empty CustomErrorResponses element. To delete all custom error responses in an existing distribution, update the distribution configuration and include only an empty CustomErrorResponses element. To add, change, or remove one or more custom error responses, update the distribution configuration and specify all of the custom error responses that you want to include in the updated distribution.", + "refs": { + "CustomErrorResponseList$member": null + } + }, + "CustomErrorResponseList": { + "base": null, + "refs": { + "CustomErrorResponses$Items": "Optional: A complex type that contains custom error responses for this distribution. If Quantity is 0, you can omit Items." + } + }, + "CustomErrorResponses": { + "base": "A complex type that contains zero or more CustomErrorResponse elements.", + "refs": { + "DistributionConfig$CustomErrorResponses": "A complex type that contains zero or more CustomErrorResponse elements.", + "DistributionSummary$CustomErrorResponses": "A complex type that contains zero or more CustomErrorResponses elements." + } + }, + "CustomOriginConfig": { + "base": "A customer origin.", + "refs": { + "Origin$CustomOriginConfig": "A complex type that contains information about a custom origin. If the origin is an Amazon S3 bucket, use the S3OriginConfig element instead." + } + }, + "DefaultCacheBehavior": { + "base": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior.", + "refs": { + "DistributionConfig$DefaultCacheBehavior": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior.", + "DistributionSummary$DefaultCacheBehavior": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior." + } + }, + "DeleteCloudFrontOriginAccessIdentityRequest": { + "base": "The request to delete a origin access identity.", + "refs": { + } + }, + "DeleteDistributionRequest": { + "base": "The request to delete a distribution.", + "refs": { + } + }, + "DeleteStreamingDistributionRequest": { + "base": "The request to delete a streaming distribution.", + "refs": { + } + }, + "Distribution": { + "base": "A distribution.", + "refs": { + "CreateDistributionResult$Distribution": "The distribution's information.", + "GetDistributionResult$Distribution": "The distribution's information.", + "UpdateDistributionResult$Distribution": "The distribution's information." + } + }, + "DistributionAlreadyExists": { + "base": "The caller reference you attempted to create the distribution with is associated with another distribution.", + "refs": { + } + }, + "DistributionConfig": { + "base": "A distribution Configuration.", + "refs": { + "CreateDistributionRequest$DistributionConfig": "The distribution's configuration information.", + "Distribution$DistributionConfig": "The current configuration information for the distribution.", + "GetDistributionConfigResult$DistributionConfig": "The distribution's configuration information.", + "UpdateDistributionRequest$DistributionConfig": "The distribution's configuration information." + } + }, + "DistributionList": { + "base": "A distribution list.", + "refs": { + "ListDistributionsByWebACLIdResult$DistributionList": "The DistributionList type.", + "ListDistributionsResult$DistributionList": "The DistributionList type." + } + }, + "DistributionNotDisabled": { + "base": null, + "refs": { + } + }, + "DistributionSummary": { + "base": "A summary of the information for an Amazon CloudFront distribution.", + "refs": { + "DistributionSummaryList$member": null + } + }, + "DistributionSummaryList": { + "base": null, + "refs": { + "DistributionList$Items": "A complex type that contains one DistributionSummary element for each distribution that was created by the current AWS account." + } + }, + "ForwardedValues": { + "base": "A complex type that specifies how CloudFront handles query strings, cookies and headers.", + "refs": { + "CacheBehavior$ForwardedValues": "A complex type that specifies how CloudFront handles query strings, cookies and headers.", + "DefaultCacheBehavior$ForwardedValues": "A complex type that specifies how CloudFront handles query strings, cookies and headers." + } + }, + "GeoRestriction": { + "base": "A complex type that controls the countries in which your content is distributed. For more information about geo restriction, go to Customizing Error Responses in the Amazon CloudFront Developer Guide. CloudFront determines the location of your users using MaxMind GeoIP databases. For information about the accuracy of these databases, see How accurate are your GeoIP databases? on the MaxMind website.", + "refs": { + "Restrictions$GeoRestriction": null + } + }, + "GeoRestrictionType": { + "base": null, + "refs": { + "GeoRestriction$RestrictionType": "The method that you want to use to restrict distribution of your content by country: - none: No geo restriction is enabled, meaning access to content is not restricted by client geo location. - blacklist: The Location elements specify the countries in which you do not want CloudFront to distribute your content. - whitelist: The Location elements specify the countries in which you want CloudFront to distribute your content." + } + }, + "GetCloudFrontOriginAccessIdentityConfigRequest": { + "base": "The request to get an origin access identity's configuration.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityRequest": { + "base": "The request to get an origin access identity's information.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetDistributionConfigRequest": { + "base": "The request to get a distribution configuration.", + "refs": { + } + }, + "GetDistributionConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetDistributionRequest": { + "base": "The request to get a distribution's information.", + "refs": { + } + }, + "GetDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetInvalidationRequest": { + "base": "The request to get an invalidation's information.", + "refs": { + } + }, + "GetInvalidationResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetStreamingDistributionConfigRequest": { + "base": "To request to get a streaming distribution configuration.", + "refs": { + } + }, + "GetStreamingDistributionConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetStreamingDistributionRequest": { + "base": "The request to get a streaming distribution's information.", + "refs": { + } + }, + "GetStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "HeaderList": { + "base": null, + "refs": { + "Headers$Items": "Optional: A complex type that contains a Name element for each header that you want CloudFront to forward to the origin and to vary on for this cache behavior. If Quantity is 0, omit Items." + } + }, + "Headers": { + "base": "A complex type that specifies the headers that you want CloudFront to forward to the origin for this cache behavior. For the headers that you specify, CloudFront also caches separate versions of a given object based on the header values in viewer requests; this is known as varying on headers. For example, suppose viewer requests for logo.jpg contain a custom Product header that has a value of either Acme or Apex, and you configure CloudFront to vary on the Product header. CloudFront forwards the Product header to the origin and caches the response from the origin once for each header value.", + "refs": { + "ForwardedValues$Headers": "A complex type that specifies the Headers, if any, that you want CloudFront to vary upon for this cache behavior." + } + }, + "IllegalUpdate": { + "base": "Origin and CallerReference cannot be updated.", + "refs": { + } + }, + "InconsistentQuantities": { + "base": "The value of Quantity and the size of Items do not match.", + "refs": { + } + }, + "InvalidArgument": { + "base": "The argument is invalid.", + "refs": { + } + }, + "InvalidDefaultRootObject": { + "base": "The default root object file name is too big or contains an invalid character.", + "refs": { + } + }, + "InvalidErrorCode": { + "base": null, + "refs": { + } + }, + "InvalidForwardCookies": { + "base": "Your request contains forward cookies option which doesn't match with the expectation for the whitelisted list of cookie names. Either list of cookie names has been specified when not allowed or list of cookie names is missing when expected.", + "refs": { + } + }, + "InvalidGeoRestrictionParameter": { + "base": null, + "refs": { + } + }, + "InvalidHeadersForS3Origin": { + "base": null, + "refs": { + } + }, + "InvalidIfMatchVersion": { + "base": "The If-Match version is missing or not valid for the distribution.", + "refs": { + } + }, + "InvalidLocationCode": { + "base": null, + "refs": { + } + }, + "InvalidMinimumProtocolVersion": { + "base": null, + "refs": { + } + }, + "InvalidOrigin": { + "base": "The Amazon S3 origin server specified does not refer to a valid Amazon S3 bucket.", + "refs": { + } + }, + "InvalidOriginAccessIdentity": { + "base": "The origin access identity is not valid or doesn't exist.", + "refs": { + } + }, + "InvalidProtocolSettings": { + "base": "You cannot specify SSLv3 as the minimum protocol version if you only want to support only clients that Support Server Name Indication (SNI).", + "refs": { + } + }, + "InvalidRelativePath": { + "base": "The relative path is too big, is not URL-encoded, or does not begin with a slash (/).", + "refs": { + } + }, + "InvalidRequiredProtocol": { + "base": "This operation requires the HTTPS protocol. Ensure that you specify the HTTPS protocol in your request, or omit the RequiredProtocols element from your distribution configuration.", + "refs": { + } + }, + "InvalidResponseCode": { + "base": null, + "refs": { + } + }, + "InvalidTTLOrder": { + "base": null, + "refs": { + } + }, + "InvalidViewerCertificate": { + "base": null, + "refs": { + } + }, + "InvalidWebACLId": { + "base": null, + "refs": { + } + }, + "Invalidation": { + "base": "An invalidation.", + "refs": { + "CreateInvalidationResult$Invalidation": "The invalidation's information.", + "GetInvalidationResult$Invalidation": "The invalidation's information." + } + }, + "InvalidationBatch": { + "base": "An invalidation batch.", + "refs": { + "CreateInvalidationRequest$InvalidationBatch": "The batch information for the invalidation.", + "Invalidation$InvalidationBatch": "The current invalidation information for the batch request." + } + }, + "InvalidationList": { + "base": "An invalidation list.", + "refs": { + "ListInvalidationsResult$InvalidationList": "Information about invalidation batches." + } + }, + "InvalidationSummary": { + "base": "Summary of an invalidation request.", + "refs": { + "InvalidationSummaryList$member": null + } + }, + "InvalidationSummaryList": { + "base": null, + "refs": { + "InvalidationList$Items": "A complex type that contains one InvalidationSummary element for each invalidation batch that was created by the current AWS account." + } + }, + "ItemSelection": { + "base": null, + "refs": { + "CookiePreference$Forward": "Use this element to specify whether you want CloudFront to forward cookies to the origin that is associated with this cache behavior. You can specify all, none or whitelist. If you choose All, CloudFront forwards all cookies regardless of how many your application uses." + } + }, + "KeyPairIdList": { + "base": null, + "refs": { + "KeyPairIds$Items": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber." + } + }, + "KeyPairIds": { + "base": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber.", + "refs": { + "Signer$KeyPairIds": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber." + } + }, + "ListCloudFrontOriginAccessIdentitiesRequest": { + "base": "The request to list origin access identities.", + "refs": { + } + }, + "ListCloudFrontOriginAccessIdentitiesResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListDistributionsByWebACLIdRequest": { + "base": "The request to list distributions that are associated with a specified AWS WAF web ACL.", + "refs": { + } + }, + "ListDistributionsByWebACLIdResult": { + "base": "The response to a request to list the distributions that are associated with a specified AWS WAF web ACL.", + "refs": { + } + }, + "ListDistributionsRequest": { + "base": "The request to list your distributions.", + "refs": { + } + }, + "ListDistributionsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListInvalidationsRequest": { + "base": "The request to list invalidations.", + "refs": { + } + }, + "ListInvalidationsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListStreamingDistributionsRequest": { + "base": "The request to list your streaming distributions.", + "refs": { + } + }, + "ListStreamingDistributionsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "LocationList": { + "base": null, + "refs": { + "GeoRestriction$Items": "A complex type that contains a Location element for each country in which you want CloudFront either to distribute your content (whitelist) or not distribute your content (blacklist). The Location element is a two-letter, uppercase country code for a country that you want to include in your blacklist or whitelist. Include one Location element for each country. CloudFront and MaxMind both use ISO 3166 country codes. For the current list of countries and the corresponding codes, see ISO 3166-1-alpha-2 code on the International Organization for Standardization website. You can also refer to the country list in the CloudFront console, which includes both country names and codes." + } + }, + "LoggingConfig": { + "base": "A complex type that controls whether access logs are written for the distribution.", + "refs": { + "DistributionConfig$Logging": "A complex type that controls whether access logs are written for the distribution." + } + }, + "Method": { + "base": null, + "refs": { + "MethodsList$member": null + } + }, + "MethodsList": { + "base": null, + "refs": { + "AllowedMethods$Items": "A complex type that contains the HTTP methods that you want CloudFront to process and forward to your origin.", + "CachedMethods$Items": "A complex type that contains the HTTP methods that you want CloudFront to cache responses to." + } + }, + "MinimumProtocolVersion": { + "base": null, + "refs": { + "ViewerCertificate$MinimumProtocolVersion": "Specify the minimum version of the SSL protocol that you want CloudFront to use, SSLv3 or TLSv1, for HTTPS connections. CloudFront will serve your objects only to browsers or devices that support at least the SSL version that you specify. The TLSv1 protocol is more secure, so we recommend that you specify SSLv3 only if your users are using browsers or devices that don't support TLSv1. If you're using a custom certificate (if you specify a value for IAMCertificateId) and if you're using dedicated IP (if you specify vip for SSLSupportMethod), you can choose SSLv3 or TLSv1 as the MinimumProtocolVersion. If you're using a custom certificate (if you specify a value for IAMCertificateId) and if you're using SNI (if you specify sni-only for SSLSupportMethod), you must specify TLSv1 for MinimumProtocolVersion." + } + }, + "MissingBody": { + "base": "This operation requires a body. Ensure that the body is present and the Content-Type header is set.", + "refs": { + } + }, + "NoSuchCloudFrontOriginAccessIdentity": { + "base": "The specified origin access identity does not exist.", + "refs": { + } + }, + "NoSuchDistribution": { + "base": "The specified distribution does not exist.", + "refs": { + } + }, + "NoSuchInvalidation": { + "base": "The specified invalidation does not exist.", + "refs": { + } + }, + "NoSuchOrigin": { + "base": "No origin exists with the specified Origin Id.", + "refs": { + } + }, + "NoSuchStreamingDistribution": { + "base": "The specified streaming distribution does not exist.", + "refs": { + } + }, + "Origin": { + "base": "A complex type that describes the Amazon S3 bucket or the HTTP server (for example, a web server) from which CloudFront gets your files.You must create at least one origin.", + "refs": { + "OriginList$member": null + } + }, + "OriginList": { + "base": null, + "refs": { + "Origins$Items": "A complex type that contains origins for this distribution." + } + }, + "OriginProtocolPolicy": { + "base": null, + "refs": { + "CustomOriginConfig$OriginProtocolPolicy": "The origin protocol policy to apply to your origin." + } + }, + "Origins": { + "base": "A complex type that contains information about origins for this distribution.", + "refs": { + "DistributionConfig$Origins": "A complex type that contains information about origins for this distribution.", + "DistributionSummary$Origins": "A complex type that contains information about origins for this distribution." + } + }, + "PathList": { + "base": null, + "refs": { + "Paths$Items": "A complex type that contains a list of the objects that you want to invalidate." + } + }, + "Paths": { + "base": "A complex type that contains information about the objects that you want to invalidate.", + "refs": { + "InvalidationBatch$Paths": "The path of the object to invalidate. The path is relative to the distribution and must begin with a slash (/). You must enclose each invalidation object with the Path element tags. If the path includes non-ASCII characters or unsafe characters as defined in RFC 1783 (http://www.ietf.org/rfc/rfc1738.txt), URL encode those characters. Do not URL encode any other characters in the path, or CloudFront will not invalidate the old version of the updated object." + } + }, + "PreconditionFailed": { + "base": "The precondition given in one or more of the request-header fields evaluated to false.", + "refs": { + } + }, + "PriceClass": { + "base": null, + "refs": { + "DistributionConfig$PriceClass": "A complex type that contains information about price class for this distribution.", + "DistributionSummary$PriceClass": null, + "StreamingDistributionConfig$PriceClass": "A complex type that contains information about price class for this streaming distribution.", + "StreamingDistributionSummary$PriceClass": null + } + }, + "Restrictions": { + "base": "A complex type that identifies ways in which you want to restrict distribution of your content.", + "refs": { + "DistributionConfig$Restrictions": null, + "DistributionSummary$Restrictions": null + } + }, + "S3Origin": { + "base": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution.", + "refs": { + "StreamingDistributionConfig$S3Origin": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution.", + "StreamingDistributionSummary$S3Origin": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution." + } + }, + "S3OriginConfig": { + "base": "A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead.", + "refs": { + "Origin$S3OriginConfig": "A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead." + } + }, + "SSLSupportMethod": { + "base": null, + "refs": { + "ViewerCertificate$SSLSupportMethod": "If you specify a value for IAMCertificateId, you must also specify how you want CloudFront to serve HTTPS requests. Valid values are vip and sni-only. If you specify vip, CloudFront uses dedicated IP addresses for your content and can respond to HTTPS requests from any viewer. However, you must request permission to use this feature, and you incur additional monthly charges. If you specify sni-only, CloudFront can only respond to HTTPS requests from viewers that support Server Name Indication (SNI). All modern browsers support SNI, but some browsers still in use don't support SNI. Do not specify a value for SSLSupportMethod if you specified true for CloudFrontDefaultCertificate." + } + }, + "Signer": { + "base": "A complex type that lists the AWS accounts that were included in the TrustedSigners complex type, as well as their active CloudFront key pair IDs, if any.", + "refs": { + "SignerList$member": null + } + }, + "SignerList": { + "base": null, + "refs": { + "ActiveTrustedSigners$Items": "A complex type that contains one Signer complex type for each unique trusted signer that is specified in the TrustedSigners complex type, including trusted signers in the default cache behavior and in all of the other cache behaviors." + } + }, + "StreamingDistribution": { + "base": "A streaming distribution.", + "refs": { + "CreateStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information.", + "GetStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information.", + "UpdateStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information." + } + }, + "StreamingDistributionAlreadyExists": { + "base": null, + "refs": { + } + }, + "StreamingDistributionConfig": { + "base": "The configuration for the streaming distribution.", + "refs": { + "CreateStreamingDistributionRequest$StreamingDistributionConfig": "The streaming distribution's configuration information.", + "GetStreamingDistributionConfigResult$StreamingDistributionConfig": "The streaming distribution's configuration information.", + "StreamingDistribution$StreamingDistributionConfig": "The current configuration information for the streaming distribution.", + "UpdateStreamingDistributionRequest$StreamingDistributionConfig": "The streaming distribution's configuration information." + } + }, + "StreamingDistributionList": { + "base": "A streaming distribution list.", + "refs": { + "ListStreamingDistributionsResult$StreamingDistributionList": "The StreamingDistributionList type." + } + }, + "StreamingDistributionNotDisabled": { + "base": null, + "refs": { + } + }, + "StreamingDistributionSummary": { + "base": "A summary of the information for an Amazon CloudFront streaming distribution.", + "refs": { + "StreamingDistributionSummaryList$member": null + } + }, + "StreamingDistributionSummaryList": { + "base": null, + "refs": { + "StreamingDistributionList$Items": "A complex type that contains one StreamingDistributionSummary element for each distribution that was created by the current AWS account." + } + }, + "StreamingLoggingConfig": { + "base": "A complex type that controls whether access logs are written for this streaming distribution.", + "refs": { + "StreamingDistributionConfig$Logging": "A complex type that controls whether access logs are written for the streaming distribution." + } + }, + "TooManyCacheBehaviors": { + "base": "You cannot create anymore cache behaviors for the distribution.", + "refs": { + } + }, + "TooManyCertificates": { + "base": "You cannot create anymore custom ssl certificates.", + "refs": { + } + }, + "TooManyCloudFrontOriginAccessIdentities": { + "base": "Processing your request would cause you to exceed the maximum number of origin access identities allowed.", + "refs": { + } + }, + "TooManyCookieNamesInWhiteList": { + "base": "Your request contains more cookie names in the whitelist than are allowed per cache behavior.", + "refs": { + } + }, + "TooManyDistributionCNAMEs": { + "base": "Your request contains more CNAMEs than are allowed per distribution.", + "refs": { + } + }, + "TooManyDistributions": { + "base": "Processing your request would cause you to exceed the maximum number of distributions allowed.", + "refs": { + } + }, + "TooManyHeadersInForwardedValues": { + "base": null, + "refs": { + } + }, + "TooManyInvalidationsInProgress": { + "base": "You have exceeded the maximum number of allowable InProgress invalidation batch requests, or invalidation objects.", + "refs": { + } + }, + "TooManyOrigins": { + "base": "You cannot create anymore origins for the distribution.", + "refs": { + } + }, + "TooManyStreamingDistributionCNAMEs": { + "base": null, + "refs": { + } + }, + "TooManyStreamingDistributions": { + "base": "Processing your request would cause you to exceed the maximum number of streaming distributions allowed.", + "refs": { + } + }, + "TooManyTrustedSigners": { + "base": "Your request contains more trusted signers than are allowed per distribution.", + "refs": { + } + }, + "TrustedSignerDoesNotExist": { + "base": "One or more of your trusted signers do not exist.", + "refs": { + } + }, + "TrustedSigners": { + "base": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "refs": { + "CacheBehavior$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "DefaultCacheBehavior$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "StreamingDistributionConfig$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "StreamingDistributionSummary$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution." + } + }, + "UpdateCloudFrontOriginAccessIdentityRequest": { + "base": "The request to update an origin access identity.", + "refs": { + } + }, + "UpdateCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "UpdateDistributionRequest": { + "base": "The request to update a distribution.", + "refs": { + } + }, + "UpdateDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "UpdateStreamingDistributionRequest": { + "base": "The request to update a streaming distribution.", + "refs": { + } + }, + "UpdateStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ViewerCertificate": { + "base": "A complex type that contains information about viewer certificates for this distribution.", + "refs": { + "DistributionConfig$ViewerCertificate": null, + "DistributionSummary$ViewerCertificate": null + } + }, + "ViewerProtocolPolicy": { + "base": null, + "refs": { + "CacheBehavior$ViewerProtocolPolicy": "Use this element to specify the protocol that users can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. If you want CloudFront to allow end users to use any available protocol, specify allow-all. If you want CloudFront to require HTTPS, specify https. If you want CloudFront to respond to an HTTP request with an HTTP status code of 301 (Moved Permanently) and the HTTPS URL, specify redirect-to-https. The viewer then resubmits the request using the HTTPS URL.", + "DefaultCacheBehavior$ViewerProtocolPolicy": "Use this element to specify the protocol that users can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. If you want CloudFront to allow end users to use any available protocol, specify allow-all. If you want CloudFront to require HTTPS, specify https. If you want CloudFront to respond to an HTTP request with an HTTP status code of 301 (Moved Permanently) and the HTTPS URL, specify redirect-to-https. The viewer then resubmits the request using the HTTPS URL." + } + }, + "boolean": { + "base": null, + "refs": { + "ActiveTrustedSigners$Enabled": "Each active trusted signer.", + "CacheBehavior$SmoothStreaming": "Indicates whether you want to distribute media files in Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "CloudFrontOriginAccessIdentityList$IsTruncated": "A flag that indicates whether more origin access identities remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more items in the list.", + "DefaultCacheBehavior$SmoothStreaming": "Indicates whether you want to distribute media files in Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "DistributionConfig$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "DistributionList$IsTruncated": "A flag that indicates whether more distributions remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more distributions in the list.", + "DistributionSummary$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "ForwardedValues$QueryString": "Indicates whether you want CloudFront to forward query strings to the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "InvalidationList$IsTruncated": "A flag that indicates whether more invalidation batch requests remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more invalidation batches in the list.", + "LoggingConfig$Enabled": "Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you do not want to enable logging when you create a distribution or if you want to disable logging for an existing distribution, specify false for Enabled, and specify empty Bucket and Prefix elements. If you specify false for Enabled but you specify values for Bucket, prefix and IncludeCookies, the values are automatically deleted.", + "LoggingConfig$IncludeCookies": "Specifies whether you want CloudFront to include cookies in access logs, specify true for IncludeCookies. If you choose to include cookies in logs, CloudFront logs all cookies regardless of how you configure the cache behaviors for this distribution. If you do not want to include cookies when you create a distribution or if you want to disable include cookies for an existing distribution, specify false for IncludeCookies.", + "StreamingDistributionConfig$Enabled": "Whether the streaming distribution is enabled to accept end user requests for content.", + "StreamingDistributionList$IsTruncated": "A flag that indicates whether more streaming distributions remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more distributions in the list.", + "StreamingDistributionSummary$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "StreamingLoggingConfig$Enabled": "Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you do not want to enable logging when you create a streaming distribution or if you want to disable logging for an existing streaming distribution, specify false for Enabled, and specify empty Bucket and Prefix elements. If you specify false for Enabled but you specify values for Bucket and Prefix, the values are automatically deleted.", + "TrustedSigners$Enabled": "Specifies whether you want to require end users to use signed URLs to access the files specified by PathPattern and TargetOriginId.", + "ViewerCertificate$CloudFrontDefaultCertificate": "If you want viewers to use HTTPS to request your objects and you're using the CloudFront domain name of your distribution in your object URLs (for example, https://d111111abcdef8.cloudfront.net/logo.jpg), set to true. Omit this value if you are setting an IAMCertificateId." + } + }, + "integer": { + "base": null, + "refs": { + "ActiveTrustedSigners$Quantity": "The number of unique trusted signers included in all cache behaviors. For example, if three cache behaviors all list the same three AWS accounts, the value of Quantity for ActiveTrustedSigners will be 3.", + "Aliases$Quantity": "The number of CNAMEs, if any, for this distribution.", + "AllowedMethods$Quantity": "The number of HTTP methods that you want CloudFront to forward to your origin. Valid values are 2 (for GET and HEAD requests), 3 (for GET, HEAD and OPTIONS requests) and 7 (for GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests).", + "CacheBehaviors$Quantity": "The number of cache behaviors for this distribution.", + "CachedMethods$Quantity": "The number of HTTP methods for which you want CloudFront to cache responses. Valid values are 2 (for caching responses to GET and HEAD requests) and 3 (for caching responses to GET, HEAD, and OPTIONS requests).", + "CloudFrontOriginAccessIdentityList$MaxItems": "The value you provided for the MaxItems request parameter.", + "CloudFrontOriginAccessIdentityList$Quantity": "The number of CloudFront origin access identities that were created by the current AWS account.", + "CookieNames$Quantity": "The number of whitelisted cookies for this cache behavior.", + "CustomErrorResponse$ErrorCode": "The 4xx or 5xx HTTP status code that you want to customize. For a list of HTTP status codes that you can customize, see CloudFront documentation.", + "CustomErrorResponses$Quantity": "The number of custom error responses for this distribution.", + "CustomOriginConfig$HTTPPort": "The HTTP port the custom origin listens on.", + "CustomOriginConfig$HTTPSPort": "The HTTPS port the custom origin listens on.", + "Distribution$InProgressInvalidationBatches": "The number of invalidation batches currently in progress.", + "DistributionList$MaxItems": "The value you provided for the MaxItems request parameter.", + "DistributionList$Quantity": "The number of distributions that were created by the current AWS account.", + "GeoRestriction$Quantity": "When geo restriction is enabled, this is the number of countries in your whitelist or blacklist. Otherwise, when it is not enabled, Quantity is 0, and you can omit Items.", + "Headers$Quantity": "The number of different headers that you want CloudFront to forward to the origin and to vary on for this cache behavior. The maximum number of headers that you can specify by name is 10. If you want CloudFront to forward all headers to the origin and vary on all of them, specify 1 for Quantity and * for Name. If you don't want CloudFront to forward any additional headers to the origin or to vary on any headers, specify 0 for Quantity and omit Items.", + "InvalidationList$MaxItems": "The value you provided for the MaxItems request parameter.", + "InvalidationList$Quantity": "The number of invalidation batches that were created by the current AWS account.", + "KeyPairIds$Quantity": "The number of active CloudFront key pairs for AwsAccountNumber.", + "Origins$Quantity": "The number of origins for this distribution.", + "Paths$Quantity": "The number of objects that you want to invalidate.", + "StreamingDistributionList$MaxItems": "The value you provided for the MaxItems request parameter.", + "StreamingDistributionList$Quantity": "The number of streaming distributions that were created by the current AWS account.", + "TrustedSigners$Quantity": "The number of trusted signers for this cache behavior." + } + }, + "long": { + "base": null, + "refs": { + "CacheBehavior$MinTTL": "The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated.You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CacheBehavior$DefaultTTL": "If you don't configure your origin to add a Cache-Control max-age directive or an Expires header, DefaultTTL is the default amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CacheBehavior$MaxTTL": "The maximum amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin adds HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CustomErrorResponse$ErrorCachingMinTTL": "The minimum amount of time you want HTTP error codes to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated. You can specify a value from 0 to 31,536,000.", + "DefaultCacheBehavior$MinTTL": "The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated.You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "DefaultCacheBehavior$DefaultTTL": "If you don't configure your origin to add a Cache-Control max-age directive or an Expires header, DefaultTTL is the default amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "DefaultCacheBehavior$MaxTTL": "The maximum amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin adds HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years)." + } + }, + "string": { + "base": null, + "refs": { + "AccessDenied$Message": null, + "AliasList$member": null, + "AwsAccountNumberList$member": null, + "BatchTooLarge$Message": null, + "CNAMEAlreadyExists$Message": null, + "CacheBehavior$PathPattern": "The pattern (for example, images/*.jpg) that specifies which requests you want this cache behavior to apply to. When CloudFront receives an end-user request, the requested path is compared with path patterns in the order in which cache behaviors are listed in the distribution. The path pattern for the default cache behavior is * and cannot be changed. If the request for an object does not match the path pattern for any cache behaviors, CloudFront applies the behavior in the default cache behavior.", + "CacheBehavior$TargetOriginId": "The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior.", + "CloudFrontOriginAccessIdentity$Id": "The ID for the origin access identity. For example: E74FTE3AJFJ256A.", + "CloudFrontOriginAccessIdentity$S3CanonicalUserId": "The Amazon S3 canonical user ID for the origin access identity, which you use when giving the origin access identity read permission to an object in Amazon S3.", + "CloudFrontOriginAccessIdentityAlreadyExists$Message": null, + "CloudFrontOriginAccessIdentityConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the CloudFrontOriginAccessIdentityConfig object), a new origin access identity is created. If the CallerReference is a value you already sent in a previous request to create an identity, and the content of the CloudFrontOriginAccessIdentityConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create an identity but the content of the CloudFrontOriginAccessIdentityConfig is different from the original request, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists error.", + "CloudFrontOriginAccessIdentityConfig$Comment": "Any comments you want to include about the origin access identity.", + "CloudFrontOriginAccessIdentityInUse$Message": null, + "CloudFrontOriginAccessIdentityList$Marker": "The value you provided for the Marker request parameter.", + "CloudFrontOriginAccessIdentityList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your origin access identities where they left off.", + "CloudFrontOriginAccessIdentitySummary$Id": "The ID for the origin access identity. For example: E74FTE3AJFJ256A.", + "CloudFrontOriginAccessIdentitySummary$S3CanonicalUserId": "The Amazon S3 canonical user ID for the origin access identity, which you use when giving the origin access identity read permission to an object in Amazon S3.", + "CloudFrontOriginAccessIdentitySummary$Comment": "The comment for this origin access identity, as originally specified when created.", + "CookieNameList$member": null, + "CreateCloudFrontOriginAccessIdentityResult$Location": "The fully qualified URI of the new origin access identity just created. For example: https://cloudfront.amazonaws.com/2010-11-01/origin-access-identity/cloudfront/E74FTE3AJFJ256A.", + "CreateCloudFrontOriginAccessIdentityResult$ETag": "The current version of the origin access identity created.", + "CreateDistributionResult$Location": "The fully qualified URI of the new distribution resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/distribution/EDFDVBD632BHDS5.", + "CreateDistributionResult$ETag": "The current version of the distribution created.", + "CreateInvalidationRequest$DistributionId": "The distribution's id.", + "CreateInvalidationResult$Location": "The fully qualified URI of the distribution and invalidation batch request, including the Invalidation ID.", + "CreateStreamingDistributionResult$Location": "The fully qualified URI of the new streaming distribution resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/streaming-distribution/EGTXBD79H29TRA8.", + "CreateStreamingDistributionResult$ETag": "The current version of the streaming distribution created.", + "CustomErrorResponse$ResponsePagePath": "The path of the custom error page (for example, /custom_404.html). The path is relative to the distribution and must begin with a slash (/). If the path includes any non-ASCII characters or unsafe characters as defined in RFC 1783 (http://www.ietf.org/rfc/rfc1738.txt), URL encode those characters. Do not URL encode any other characters in the path, or CloudFront will not return the custom error page to the viewer.", + "CustomErrorResponse$ResponseCode": "The HTTP status code that you want CloudFront to return with the custom error page to the viewer. For a list of HTTP status codes that you can replace, see CloudFront Documentation.", + "DefaultCacheBehavior$TargetOriginId": "The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior.", + "DeleteCloudFrontOriginAccessIdentityRequest$Id": "The origin access identity's id.", + "DeleteCloudFrontOriginAccessIdentityRequest$IfMatch": "The value of the ETag header you received from a previous GET or PUT request. For example: E2QWRUHAPOMQZL.", + "DeleteDistributionRequest$Id": "The distribution id.", + "DeleteDistributionRequest$IfMatch": "The value of the ETag header you received when you disabled the distribution. For example: E2QWRUHAPOMQZL.", + "DeleteStreamingDistributionRequest$Id": "The distribution id.", + "DeleteStreamingDistributionRequest$IfMatch": "The value of the ETag header you received when you disabled the streaming distribution. For example: E2QWRUHAPOMQZL.", + "Distribution$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "Distribution$Status": "This response element indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "Distribution$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "DistributionAlreadyExists$Message": null, + "DistributionConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the DistributionConfig object), a new distribution is created. If the CallerReference is a value you already sent in a previous request to create a distribution, and the content of the DistributionConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a distribution but the content of the DistributionConfig is different from the original request, CloudFront returns a DistributionAlreadyExists error.", + "DistributionConfig$DefaultRootObject": "The object that you want CloudFront to return (for example, index.html) when an end user requests the root URL for your distribution (http://www.example.com) instead of an object in your distribution (http://www.example.com/index.html). Specifying a default root object avoids exposing the contents of your distribution. If you don't want to specify a default root object when you create a distribution, include an empty DefaultRootObject element. To delete the default root object from an existing distribution, update the distribution configuration and include an empty DefaultRootObject element. To replace the default root object, update the distribution configuration and specify the new object.", + "DistributionConfig$Comment": "Any comments you want to include about the distribution.", + "DistributionConfig$WebACLId": "(Optional) If you're using AWS WAF to filter CloudFront requests, the Id of the AWS WAF web ACL that is associated with the distribution.", + "DistributionList$Marker": "The value you provided for the Marker request parameter.", + "DistributionList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your distributions where they left off.", + "DistributionNotDisabled$Message": null, + "DistributionSummary$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "DistributionSummary$Status": "This response element indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "DistributionSummary$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "DistributionSummary$Comment": "The comment originally specified when this distribution was created.", + "DistributionSummary$WebACLId": "The Web ACL Id (if any) associated with the distribution.", + "GetCloudFrontOriginAccessIdentityConfigRequest$Id": "The identity's id.", + "GetCloudFrontOriginAccessIdentityConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetCloudFrontOriginAccessIdentityRequest$Id": "The identity's id.", + "GetCloudFrontOriginAccessIdentityResult$ETag": "The current version of the origin access identity's information. For example: E2QWRUHAPOMQZL.", + "GetDistributionConfigRequest$Id": "The distribution's id.", + "GetDistributionConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetDistributionRequest$Id": "The distribution's id.", + "GetDistributionResult$ETag": "The current version of the distribution's information. For example: E2QWRUHAPOMQZL.", + "GetInvalidationRequest$DistributionId": "The distribution's id.", + "GetInvalidationRequest$Id": "The invalidation's id.", + "GetStreamingDistributionConfigRequest$Id": "The streaming distribution's id.", + "GetStreamingDistributionConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetStreamingDistributionRequest$Id": "The streaming distribution's id.", + "GetStreamingDistributionResult$ETag": "The current version of the streaming distribution's information. For example: E2QWRUHAPOMQZL.", + "HeaderList$member": null, + "IllegalUpdate$Message": null, + "InconsistentQuantities$Message": null, + "InvalidArgument$Message": null, + "InvalidDefaultRootObject$Message": null, + "InvalidErrorCode$Message": null, + "InvalidForwardCookies$Message": null, + "InvalidGeoRestrictionParameter$Message": null, + "InvalidHeadersForS3Origin$Message": null, + "InvalidIfMatchVersion$Message": null, + "InvalidLocationCode$Message": null, + "InvalidMinimumProtocolVersion$Message": null, + "InvalidOrigin$Message": null, + "InvalidOriginAccessIdentity$Message": null, + "InvalidProtocolSettings$Message": null, + "InvalidRelativePath$Message": null, + "InvalidRequiredProtocol$Message": null, + "InvalidResponseCode$Message": null, + "InvalidTTLOrder$Message": null, + "InvalidViewerCertificate$Message": null, + "InvalidWebACLId$Message": null, + "Invalidation$Id": "The identifier for the invalidation request. For example: IDFDVBD632BHDS5.", + "Invalidation$Status": "The status of the invalidation request. When the invalidation batch is finished, the status is Completed.", + "InvalidationBatch$CallerReference": "A unique name that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the Path object), a new distribution is created. If the CallerReference is a value you already sent in a previous request to create an invalidation batch, and the content of each Path element is identical to the original request, the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a distribution but the content of any Path is different from the original request, CloudFront returns an InvalidationBatchAlreadyExists error.", + "InvalidationList$Marker": "The value you provided for the Marker request parameter.", + "InvalidationList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your invalidation batches where they left off.", + "InvalidationSummary$Id": "The unique ID for an invalidation request.", + "InvalidationSummary$Status": "The status of an invalidation request.", + "KeyPairIdList$member": null, + "ListCloudFrontOriginAccessIdentitiesRequest$Marker": "Use this when paginating results to indicate where to begin in your list of origin access identities. The results include identities in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last identity on that page).", + "ListCloudFrontOriginAccessIdentitiesRequest$MaxItems": "The maximum number of origin access identities you want in the response body.", + "ListDistributionsByWebACLIdRequest$Marker": "Use Marker and MaxItems to control pagination of results. If you have more than MaxItems distributions that satisfy the request, the response includes a NextMarker element. To get the next page of results, submit another request. For the value of Marker, specify the value of NextMarker from the last response. (For the first request, omit Marker.)", + "ListDistributionsByWebACLIdRequest$MaxItems": "The maximum number of distributions that you want CloudFront to return in the response body. The maximum and default values are both 100.", + "ListDistributionsByWebACLIdRequest$WebACLId": "The Id of the AWS WAF web ACL for which you want to list the associated distributions. If you specify \"null\" for the Id, the request returns a list of the distributions that aren't associated with a web ACL.", + "ListDistributionsRequest$Marker": "Use Marker and MaxItems to control pagination of results. If you have more than MaxItems distributions that satisfy the request, the response includes a NextMarker element. To get the next page of results, submit another request. For the value of Marker, specify the value of NextMarker from the last response. (For the first request, omit Marker.)", + "ListDistributionsRequest$MaxItems": "The maximum number of distributions that you want CloudFront to return in the response body. The maximum and default values are both 100.", + "ListInvalidationsRequest$DistributionId": "The distribution's id.", + "ListInvalidationsRequest$Marker": "Use this parameter when paginating results to indicate where to begin in your list of invalidation batches. Because the results are returned in decreasing order from most recent to oldest, the most recent results are on the first page, the second page will contain earlier results, and so on. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response. This value is the same as the ID of the last invalidation batch on that page.", + "ListInvalidationsRequest$MaxItems": "The maximum number of invalidation batches you want in the response body.", + "ListStreamingDistributionsRequest$Marker": "Use this when paginating results to indicate where to begin in your list of streaming distributions. The results include distributions in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last distribution on that page).", + "ListStreamingDistributionsRequest$MaxItems": "The maximum number of streaming distributions you want in the response body.", + "LocationList$member": null, + "LoggingConfig$Bucket": "The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com.", + "LoggingConfig$Prefix": "An optional string that you want CloudFront to prefix to the access log filenames for this distribution, for example, myprefix/. If you want to enable logging, but you do not want to specify a prefix, you still must include an empty Prefix element in the Logging element.", + "MissingBody$Message": null, + "NoSuchCloudFrontOriginAccessIdentity$Message": null, + "NoSuchDistribution$Message": null, + "NoSuchInvalidation$Message": null, + "NoSuchOrigin$Message": null, + "NoSuchStreamingDistribution$Message": null, + "Origin$Id": "A unique identifier for the origin. The value of Id must be unique within the distribution. You use the value of Id when you create a cache behavior. The Id identifies the origin that CloudFront routes a request to when the request matches the path pattern for that cache behavior.", + "Origin$DomainName": "Amazon S3 origins: The DNS name of the Amazon S3 bucket from which you want CloudFront to get objects for this origin, for example, myawsbucket.s3.amazonaws.com. Custom origins: The DNS domain name for the HTTP server from which you want CloudFront to get objects for this origin, for example, www.example.com.", + "Origin$OriginPath": "An optional element that causes CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin. When you include the OriginPath element, specify the directory name, beginning with a /. CloudFront appends the directory name to the value of DomainName.", + "PathList$member": null, + "PreconditionFailed$Message": null, + "S3Origin$DomainName": "The DNS name of the S3 origin.", + "S3Origin$OriginAccessIdentity": "Your S3 origin's origin access identity.", + "S3OriginConfig$OriginAccessIdentity": "The CloudFront origin access identity to associate with the origin. Use an origin access identity to configure the origin so that end users can only access objects in an Amazon S3 bucket through CloudFront. If you want end users to be able to access objects using either the CloudFront URL or the Amazon S3 URL, specify an empty OriginAccessIdentity element. To delete the origin access identity from an existing distribution, update the distribution configuration and include an empty OriginAccessIdentity element. To replace the origin access identity, update the distribution configuration and specify the new origin access identity. Use the format origin-access-identity/cloudfront/Id where Id is the value that CloudFront returned in the Id element when you created the origin access identity.", + "Signer$AwsAccountNumber": "Specifies an AWS account that can create signed URLs. Values: self, which indicates that the AWS account that was used to create the distribution can created signed URLs, or an AWS account number. Omit the dashes in the account number.", + "StreamingDistribution$Id": "The identifier for the streaming distribution. For example: EGTXBD79H29TRA8.", + "StreamingDistribution$Status": "The current status of the streaming distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "StreamingDistribution$DomainName": "The domain name corresponding to the streaming distribution. For example: s5c39gqb8ow64r.cloudfront.net.", + "StreamingDistributionAlreadyExists$Message": null, + "StreamingDistributionConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the StreamingDistributionConfig object), a new streaming distribution is created. If the CallerReference is a value you already sent in a previous request to create a streaming distribution, and the content of the StreamingDistributionConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a streaming distribution but the content of the StreamingDistributionConfig is different from the original request, CloudFront returns a DistributionAlreadyExists error.", + "StreamingDistributionConfig$Comment": "Any comments you want to include about the streaming distribution.", + "StreamingDistributionList$Marker": "The value you provided for the Marker request parameter.", + "StreamingDistributionList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your streaming distributions where they left off.", + "StreamingDistributionNotDisabled$Message": null, + "StreamingDistributionSummary$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "StreamingDistributionSummary$Status": "Indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "StreamingDistributionSummary$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "StreamingDistributionSummary$Comment": "The comment originally specified when this distribution was created.", + "StreamingLoggingConfig$Bucket": "The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com.", + "StreamingLoggingConfig$Prefix": "An optional string that you want CloudFront to prefix to the access log filenames for this streaming distribution, for example, myprefix/. If you want to enable logging, but you do not want to specify a prefix, you still must include an empty Prefix element in the Logging element.", + "TooManyCacheBehaviors$Message": null, + "TooManyCertificates$Message": null, + "TooManyCloudFrontOriginAccessIdentities$Message": null, + "TooManyCookieNamesInWhiteList$Message": null, + "TooManyDistributionCNAMEs$Message": null, + "TooManyDistributions$Message": null, + "TooManyHeadersInForwardedValues$Message": null, + "TooManyInvalidationsInProgress$Message": null, + "TooManyOrigins$Message": null, + "TooManyStreamingDistributionCNAMEs$Message": null, + "TooManyStreamingDistributions$Message": null, + "TooManyTrustedSigners$Message": null, + "TrustedSignerDoesNotExist$Message": null, + "UpdateCloudFrontOriginAccessIdentityRequest$Id": "The identity's id.", + "UpdateCloudFrontOriginAccessIdentityRequest$IfMatch": "The value of the ETag header you received when retrieving the identity's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateCloudFrontOriginAccessIdentityResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "UpdateDistributionRequest$Id": "The distribution's id.", + "UpdateDistributionRequest$IfMatch": "The value of the ETag header you received when retrieving the distribution's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateDistributionResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "UpdateStreamingDistributionRequest$Id": "The streaming distribution's id.", + "UpdateStreamingDistributionRequest$IfMatch": "The value of the ETag header you received when retrieving the streaming distribution's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateStreamingDistributionResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "ViewerCertificate$IAMCertificateId": "If you want viewers to use HTTPS to request your objects and you're using an alternate domain name in your object URLs (for example, https://example.com/logo.jpg), specify the IAM certificate identifier of the custom viewer certificate for this distribution. Specify either this value or CloudFrontDefaultCertificate." + } + }, + "timestamp": { + "base": null, + "refs": { + "Distribution$LastModifiedTime": "The date and time the distribution was last modified.", + "DistributionSummary$LastModifiedTime": "The date and time the distribution was last modified.", + "Invalidation$CreateTime": "The date and time the invalidation request was first made.", + "InvalidationSummary$CreateTime": null, + "StreamingDistribution$LastModifiedTime": "The date and time the distribution was last modified.", + "StreamingDistributionSummary$LastModifiedTime": "The date and time the distribution was last modified." + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,32 @@ +{ + "pagination": { + "ListCloudFrontOriginAccessIdentities": { + "input_token": "Marker", + "output_token": "CloudFrontOriginAccessIdentityList.NextMarker", + "limit_key": "MaxItems", + "more_results": "CloudFrontOriginAccessIdentityList.IsTruncated", + "result_key": "CloudFrontOriginAccessIdentityList.Items" + }, + "ListDistributions": { + "input_token": "Marker", + "output_token": "DistributionList.NextMarker", + "limit_key": "MaxItems", + "more_results": "DistributionList.IsTruncated", + "result_key": "DistributionList.Items" + }, + "ListInvalidations": { + "input_token": "Marker", + "output_token": "InvalidationList.NextMarker", + "limit_key": "MaxItems", + "more_results": "InvalidationList.IsTruncated", + "result_key": "InvalidationList.Items" + }, + "ListStreamingDistributions": { + "input_token": "Marker", + "output_token": "StreamingDistributionList.NextMarker", + "limit_key": "MaxItems", + "more_results": "StreamingDistributionList.IsTruncated", + "result_key": "StreamingDistributionList.Items" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/waiters-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/waiters-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/waiters-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-07-27/waiters-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,47 @@ +{ + "version": 2, + "waiters": { + "DistributionDeployed": { + "delay": 60, + "operation": "GetDistribution", + "maxAttempts": 25, + "description": "Wait until a distribution is deployed.", + "acceptors": [ + { + "expected": "Deployed", + "matcher": "path", + "state": "success", + "argument": "Status" + } + ] + }, + "InvalidationCompleted": { + "delay": 20, + "operation": "GetInvalidation", + "maxAttempts": 30, + "description": "Wait until an invalidation has completed.", + "acceptors": [ + { + "expected": "Completed", + "matcher": "path", + "state": "success", + "argument": "Status" + } + ] + }, + "StreamingDistributionDeployed": { + "delay": 60, + "operation": "GetStreamingDistribution", + "maxAttempts": 25, + "description": "Wait until a streaming distribution is deployed.", + "acceptors": [ + { + "expected": "Deployed", + "matcher": "path", + "state": "success", + "argument": "Status" + } + ] + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2150 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-09-17", + "endpointPrefix":"cloudfront", + "globalEndpoint":"cloudfront.amazonaws.com", + "protocol":"rest-xml", + "serviceAbbreviation":"CloudFront", + "serviceFullName":"Amazon CloudFront", + "signatureVersion":"v4" + }, + "operations":{ + "CreateCloudFrontOriginAccessIdentity":{ + "name":"CreateCloudFrontOriginAccessIdentity2015_09_17", + "http":{ + "method":"POST", + "requestUri":"/2015-09-17/origin-access-identity/cloudfront", + "responseCode":201 + }, + "input":{"shape":"CreateCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"CreateCloudFrontOriginAccessIdentityResult"}, + "errors":[ + {"shape":"CloudFrontOriginAccessIdentityAlreadyExists"}, + {"shape":"MissingBody"}, + {"shape":"TooManyCloudFrontOriginAccessIdentities"}, + {"shape":"InvalidArgument"}, + {"shape":"InconsistentQuantities"} + ] + }, + "CreateDistribution":{ + "name":"CreateDistribution2015_09_17", + "http":{ + "method":"POST", + "requestUri":"/2015-09-17/distribution", + "responseCode":201 + }, + "input":{"shape":"CreateDistributionRequest"}, + "output":{"shape":"CreateDistributionResult"}, + "errors":[ + {"shape":"CNAMEAlreadyExists"}, + {"shape":"DistributionAlreadyExists"}, + {"shape":"InvalidOrigin"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"AccessDenied"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"InvalidViewerCertificate"}, + {"shape":"InvalidMinimumProtocolVersion"}, + {"shape":"MissingBody"}, + {"shape":"TooManyDistributionCNAMEs"}, + {"shape":"TooManyDistributions"}, + {"shape":"InvalidDefaultRootObject"}, + {"shape":"InvalidRelativePath"}, + {"shape":"InvalidErrorCode"}, + {"shape":"InvalidResponseCode"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidRequiredProtocol"}, + {"shape":"NoSuchOrigin"}, + {"shape":"TooManyOrigins"}, + {"shape":"TooManyCacheBehaviors"}, + {"shape":"TooManyCookieNamesInWhiteList"}, + {"shape":"InvalidForwardCookies"}, + {"shape":"TooManyHeadersInForwardedValues"}, + {"shape":"InvalidHeadersForS3Origin"}, + {"shape":"InconsistentQuantities"}, + {"shape":"TooManyCertificates"}, + {"shape":"InvalidLocationCode"}, + {"shape":"InvalidGeoRestrictionParameter"}, + {"shape":"InvalidProtocolSettings"}, + {"shape":"InvalidTTLOrder"}, + {"shape":"InvalidWebACLId"} + ] + }, + "CreateInvalidation":{ + "name":"CreateInvalidation2015_09_17", + "http":{ + "method":"POST", + "requestUri":"/2015-09-17/distribution/{DistributionId}/invalidation", + "responseCode":201 + }, + "input":{"shape":"CreateInvalidationRequest"}, + "output":{"shape":"CreateInvalidationResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"MissingBody"}, + {"shape":"InvalidArgument"}, + {"shape":"NoSuchDistribution"}, + {"shape":"BatchTooLarge"}, + {"shape":"TooManyInvalidationsInProgress"}, + {"shape":"InconsistentQuantities"} + ] + }, + "CreateStreamingDistribution":{ + "name":"CreateStreamingDistribution2015_09_17", + "http":{ + "method":"POST", + "requestUri":"/2015-09-17/streaming-distribution", + "responseCode":201 + }, + "input":{"shape":"CreateStreamingDistributionRequest"}, + "output":{"shape":"CreateStreamingDistributionResult"}, + "errors":[ + {"shape":"CNAMEAlreadyExists"}, + {"shape":"StreamingDistributionAlreadyExists"}, + {"shape":"InvalidOrigin"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"AccessDenied"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"MissingBody"}, + {"shape":"TooManyStreamingDistributionCNAMEs"}, + {"shape":"TooManyStreamingDistributions"}, + {"shape":"InvalidArgument"}, + {"shape":"InconsistentQuantities"} + ] + }, + "DeleteCloudFrontOriginAccessIdentity":{ + "name":"DeleteCloudFrontOriginAccessIdentity2015_09_17", + "http":{ + "method":"DELETE", + "requestUri":"/2015-09-17/origin-access-identity/cloudfront/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteCloudFrontOriginAccessIdentityRequest"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"NoSuchCloudFrontOriginAccessIdentity"}, + {"shape":"PreconditionFailed"}, + {"shape":"CloudFrontOriginAccessIdentityInUse"} + ] + }, + "DeleteDistribution":{ + "name":"DeleteDistribution2015_09_17", + "http":{ + "method":"DELETE", + "requestUri":"/2015-09-17/distribution/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteDistributionRequest"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"DistributionNotDisabled"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"NoSuchDistribution"}, + {"shape":"PreconditionFailed"} + ] + }, + "DeleteStreamingDistribution":{ + "name":"DeleteStreamingDistribution2015_09_17", + "http":{ + "method":"DELETE", + "requestUri":"/2015-09-17/streaming-distribution/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteStreamingDistributionRequest"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"StreamingDistributionNotDisabled"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"NoSuchStreamingDistribution"}, + {"shape":"PreconditionFailed"} + ] + }, + "GetCloudFrontOriginAccessIdentity":{ + "name":"GetCloudFrontOriginAccessIdentity2015_09_17", + "http":{ + "method":"GET", + "requestUri":"/2015-09-17/origin-access-identity/cloudfront/{Id}" + }, + "input":{"shape":"GetCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"GetCloudFrontOriginAccessIdentityResult"}, + "errors":[ + {"shape":"NoSuchCloudFrontOriginAccessIdentity"}, + {"shape":"AccessDenied"} + ] + }, + "GetCloudFrontOriginAccessIdentityConfig":{ + "name":"GetCloudFrontOriginAccessIdentityConfig2015_09_17", + "http":{ + "method":"GET", + "requestUri":"/2015-09-17/origin-access-identity/cloudfront/{Id}/config" + }, + "input":{"shape":"GetCloudFrontOriginAccessIdentityConfigRequest"}, + "output":{"shape":"GetCloudFrontOriginAccessIdentityConfigResult"}, + "errors":[ + {"shape":"NoSuchCloudFrontOriginAccessIdentity"}, + {"shape":"AccessDenied"} + ] + }, + "GetDistribution":{ + "name":"GetDistribution2015_09_17", + "http":{ + "method":"GET", + "requestUri":"/2015-09-17/distribution/{Id}" + }, + "input":{"shape":"GetDistributionRequest"}, + "output":{"shape":"GetDistributionResult"}, + "errors":[ + {"shape":"NoSuchDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "GetDistributionConfig":{ + "name":"GetDistributionConfig2015_09_17", + "http":{ + "method":"GET", + "requestUri":"/2015-09-17/distribution/{Id}/config" + }, + "input":{"shape":"GetDistributionConfigRequest"}, + "output":{"shape":"GetDistributionConfigResult"}, + "errors":[ + {"shape":"NoSuchDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "GetInvalidation":{ + "name":"GetInvalidation2015_09_17", + "http":{ + "method":"GET", + "requestUri":"/2015-09-17/distribution/{DistributionId}/invalidation/{Id}" + }, + "input":{"shape":"GetInvalidationRequest"}, + "output":{"shape":"GetInvalidationResult"}, + "errors":[ + {"shape":"NoSuchInvalidation"}, + {"shape":"NoSuchDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "GetStreamingDistribution":{ + "name":"GetStreamingDistribution2015_09_17", + "http":{ + "method":"GET", + "requestUri":"/2015-09-17/streaming-distribution/{Id}" + }, + "input":{"shape":"GetStreamingDistributionRequest"}, + "output":{"shape":"GetStreamingDistributionResult"}, + "errors":[ + {"shape":"NoSuchStreamingDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "GetStreamingDistributionConfig":{ + "name":"GetStreamingDistributionConfig2015_09_17", + "http":{ + "method":"GET", + "requestUri":"/2015-09-17/streaming-distribution/{Id}/config" + }, + "input":{"shape":"GetStreamingDistributionConfigRequest"}, + "output":{"shape":"GetStreamingDistributionConfigResult"}, + "errors":[ + {"shape":"NoSuchStreamingDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "ListCloudFrontOriginAccessIdentities":{ + "name":"ListCloudFrontOriginAccessIdentities2015_09_17", + "http":{ + "method":"GET", + "requestUri":"/2015-09-17/origin-access-identity/cloudfront" + }, + "input":{"shape":"ListCloudFrontOriginAccessIdentitiesRequest"}, + "output":{"shape":"ListCloudFrontOriginAccessIdentitiesResult"}, + "errors":[ + {"shape":"InvalidArgument"} + ] + }, + "ListDistributions":{ + "name":"ListDistributions2015_09_17", + "http":{ + "method":"GET", + "requestUri":"/2015-09-17/distribution" + }, + "input":{"shape":"ListDistributionsRequest"}, + "output":{"shape":"ListDistributionsResult"}, + "errors":[ + {"shape":"InvalidArgument"} + ] + }, + "ListDistributionsByWebACLId":{ + "name":"ListDistributionsByWebACLId2015_09_17", + "http":{ + "method":"GET", + "requestUri":"/2015-09-17/distributionsByWebACLId/{WebACLId}" + }, + "input":{"shape":"ListDistributionsByWebACLIdRequest"}, + "output":{"shape":"ListDistributionsByWebACLIdResult"}, + "errors":[ + {"shape":"InvalidArgument"}, + {"shape":"InvalidWebACLId"} + ] + }, + "ListInvalidations":{ + "name":"ListInvalidations2015_09_17", + "http":{ + "method":"GET", + "requestUri":"/2015-09-17/distribution/{DistributionId}/invalidation" + }, + "input":{"shape":"ListInvalidationsRequest"}, + "output":{"shape":"ListInvalidationsResult"}, + "errors":[ + {"shape":"InvalidArgument"}, + {"shape":"NoSuchDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "ListStreamingDistributions":{ + "name":"ListStreamingDistributions2015_09_17", + "http":{ + "method":"GET", + "requestUri":"/2015-09-17/streaming-distribution" + }, + "input":{"shape":"ListStreamingDistributionsRequest"}, + "output":{"shape":"ListStreamingDistributionsResult"}, + "errors":[ + {"shape":"InvalidArgument"} + ] + }, + "UpdateCloudFrontOriginAccessIdentity":{ + "name":"UpdateCloudFrontOriginAccessIdentity2015_09_17", + "http":{ + "method":"PUT", + "requestUri":"/2015-09-17/origin-access-identity/cloudfront/{Id}/config" + }, + "input":{"shape":"UpdateCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"UpdateCloudFrontOriginAccessIdentityResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"IllegalUpdate"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"MissingBody"}, + {"shape":"NoSuchCloudFrontOriginAccessIdentity"}, + {"shape":"PreconditionFailed"}, + {"shape":"InvalidArgument"}, + {"shape":"InconsistentQuantities"} + ] + }, + "UpdateDistribution":{ + "name":"UpdateDistribution2015_09_17", + "http":{ + "method":"PUT", + "requestUri":"/2015-09-17/distribution/{Id}/config" + }, + "input":{"shape":"UpdateDistributionRequest"}, + "output":{"shape":"UpdateDistributionResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"CNAMEAlreadyExists"}, + {"shape":"IllegalUpdate"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"MissingBody"}, + {"shape":"NoSuchDistribution"}, + {"shape":"PreconditionFailed"}, + {"shape":"TooManyDistributionCNAMEs"}, + {"shape":"InvalidDefaultRootObject"}, + {"shape":"InvalidRelativePath"}, + {"shape":"InvalidErrorCode"}, + {"shape":"InvalidResponseCode"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"InvalidViewerCertificate"}, + {"shape":"InvalidMinimumProtocolVersion"}, + {"shape":"InvalidRequiredProtocol"}, + {"shape":"NoSuchOrigin"}, + {"shape":"TooManyOrigins"}, + {"shape":"TooManyCacheBehaviors"}, + {"shape":"TooManyCookieNamesInWhiteList"}, + {"shape":"InvalidForwardCookies"}, + {"shape":"TooManyHeadersInForwardedValues"}, + {"shape":"InvalidHeadersForS3Origin"}, + {"shape":"InconsistentQuantities"}, + {"shape":"TooManyCertificates"}, + {"shape":"InvalidLocationCode"}, + {"shape":"InvalidGeoRestrictionParameter"}, + {"shape":"InvalidTTLOrder"}, + {"shape":"InvalidWebACLId"} + ] + }, + "UpdateStreamingDistribution":{ + "name":"UpdateStreamingDistribution2015_09_17", + "http":{ + "method":"PUT", + "requestUri":"/2015-09-17/streaming-distribution/{Id}/config" + }, + "input":{"shape":"UpdateStreamingDistributionRequest"}, + "output":{"shape":"UpdateStreamingDistributionResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"CNAMEAlreadyExists"}, + {"shape":"IllegalUpdate"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"MissingBody"}, + {"shape":"NoSuchStreamingDistribution"}, + {"shape":"PreconditionFailed"}, + {"shape":"TooManyStreamingDistributionCNAMEs"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"InconsistentQuantities"} + ] + } + }, + "shapes":{ + "AccessDenied":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":403}, + "exception":true + }, + "ActiveTrustedSigners":{ + "type":"structure", + "required":[ + "Enabled", + "Quantity" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"SignerList"} + } + }, + "AliasList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"CNAME" + } + }, + "Aliases":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"AliasList"} + } + }, + "AllowedMethods":{ + "type":"structure", + "required":[ + "Quantity", + "Items" + ], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"MethodsList"}, + "CachedMethods":{"shape":"CachedMethods"} + } + }, + "AwsAccountNumberList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"AwsAccountNumber" + } + }, + "BatchTooLarge":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":413}, + "exception":true + }, + "CNAMEAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CacheBehavior":{ + "type":"structure", + "required":[ + "PathPattern", + "TargetOriginId", + "ForwardedValues", + "TrustedSigners", + "ViewerProtocolPolicy", + "MinTTL" + ], + "members":{ + "PathPattern":{"shape":"string"}, + "TargetOriginId":{"shape":"string"}, + "ForwardedValues":{"shape":"ForwardedValues"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "ViewerProtocolPolicy":{"shape":"ViewerProtocolPolicy"}, + "MinTTL":{"shape":"long"}, + "AllowedMethods":{"shape":"AllowedMethods"}, + "SmoothStreaming":{"shape":"boolean"}, + "DefaultTTL":{"shape":"long"}, + "MaxTTL":{"shape":"long"}, + "Compress":{"shape":"boolean"} + } + }, + "CacheBehaviorList":{ + "type":"list", + "member":{ + "shape":"CacheBehavior", + "locationName":"CacheBehavior" + } + }, + "CacheBehaviors":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CacheBehaviorList"} + } + }, + "CachedMethods":{ + "type":"structure", + "required":[ + "Quantity", + "Items" + ], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"MethodsList"} + } + }, + "CertificateSource":{ + "type":"string", + "enum":[ + "cloudfront", + "iam" + ] + }, + "CloudFrontOriginAccessIdentity":{ + "type":"structure", + "required":[ + "Id", + "S3CanonicalUserId" + ], + "members":{ + "Id":{"shape":"string"}, + "S3CanonicalUserId":{"shape":"string"}, + "CloudFrontOriginAccessIdentityConfig":{"shape":"CloudFrontOriginAccessIdentityConfig"} + } + }, + "CloudFrontOriginAccessIdentityAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CloudFrontOriginAccessIdentityConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "Comment" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "Comment":{"shape":"string"} + } + }, + "CloudFrontOriginAccessIdentityInUse":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CloudFrontOriginAccessIdentityList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CloudFrontOriginAccessIdentitySummaryList"} + } + }, + "CloudFrontOriginAccessIdentitySummary":{ + "type":"structure", + "required":[ + "Id", + "S3CanonicalUserId", + "Comment" + ], + "members":{ + "Id":{"shape":"string"}, + "S3CanonicalUserId":{"shape":"string"}, + "Comment":{"shape":"string"} + } + }, + "CloudFrontOriginAccessIdentitySummaryList":{ + "type":"list", + "member":{ + "shape":"CloudFrontOriginAccessIdentitySummary", + "locationName":"CloudFrontOriginAccessIdentitySummary" + } + }, + "CookieNameList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Name" + } + }, + "CookieNames":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CookieNameList"} + } + }, + "CookiePreference":{ + "type":"structure", + "required":["Forward"], + "members":{ + "Forward":{"shape":"ItemSelection"}, + "WhitelistedNames":{"shape":"CookieNames"} + } + }, + "CreateCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":["CloudFrontOriginAccessIdentityConfig"], + "members":{ + "CloudFrontOriginAccessIdentityConfig":{ + "shape":"CloudFrontOriginAccessIdentityConfig", + "locationName":"CloudFrontOriginAccessIdentityConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-09-17/"} + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "CreateCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "CreateDistributionRequest":{ + "type":"structure", + "required":["DistributionConfig"], + "members":{ + "DistributionConfig":{ + "shape":"DistributionConfig", + "locationName":"DistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-09-17/"} + } + }, + "payload":"DistributionConfig" + }, + "CreateDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "CreateInvalidationRequest":{ + "type":"structure", + "required":[ + "DistributionId", + "InvalidationBatch" + ], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "InvalidationBatch":{ + "shape":"InvalidationBatch", + "locationName":"InvalidationBatch", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-09-17/"} + } + }, + "payload":"InvalidationBatch" + }, + "CreateInvalidationResult":{ + "type":"structure", + "members":{ + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "Invalidation":{"shape":"Invalidation"} + }, + "payload":"Invalidation" + }, + "CreateStreamingDistributionRequest":{ + "type":"structure", + "required":["StreamingDistributionConfig"], + "members":{ + "StreamingDistributionConfig":{ + "shape":"StreamingDistributionConfig", + "locationName":"StreamingDistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-09-17/"} + } + }, + "payload":"StreamingDistributionConfig" + }, + "CreateStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "CustomErrorResponse":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"integer"}, + "ResponsePagePath":{"shape":"string"}, + "ResponseCode":{"shape":"string"}, + "ErrorCachingMinTTL":{"shape":"long"} + } + }, + "CustomErrorResponseList":{ + "type":"list", + "member":{ + "shape":"CustomErrorResponse", + "locationName":"CustomErrorResponse" + } + }, + "CustomErrorResponses":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CustomErrorResponseList"} + } + }, + "CustomOriginConfig":{ + "type":"structure", + "required":[ + "HTTPPort", + "HTTPSPort", + "OriginProtocolPolicy" + ], + "members":{ + "HTTPPort":{"shape":"integer"}, + "HTTPSPort":{"shape":"integer"}, + "OriginProtocolPolicy":{"shape":"OriginProtocolPolicy"} + } + }, + "DefaultCacheBehavior":{ + "type":"structure", + "required":[ + "TargetOriginId", + "ForwardedValues", + "TrustedSigners", + "ViewerProtocolPolicy", + "MinTTL" + ], + "members":{ + "TargetOriginId":{"shape":"string"}, + "ForwardedValues":{"shape":"ForwardedValues"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "ViewerProtocolPolicy":{"shape":"ViewerProtocolPolicy"}, + "MinTTL":{"shape":"long"}, + "AllowedMethods":{"shape":"AllowedMethods"}, + "SmoothStreaming":{"shape":"boolean"}, + "DefaultTTL":{"shape":"long"}, + "MaxTTL":{"shape":"long"}, + "Compress":{"shape":"boolean"} + } + }, + "DeleteCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + } + }, + "DeleteDistributionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + } + }, + "DeleteStreamingDistributionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + } + }, + "Distribution":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "InProgressInvalidationBatches", + "DomainName", + "ActiveTrustedSigners", + "DistributionConfig" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "InProgressInvalidationBatches":{"shape":"integer"}, + "DomainName":{"shape":"string"}, + "ActiveTrustedSigners":{"shape":"ActiveTrustedSigners"}, + "DistributionConfig":{"shape":"DistributionConfig"} + } + }, + "DistributionAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "DistributionConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "Origins", + "DefaultCacheBehavior", + "Comment", + "Enabled" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "Aliases":{"shape":"Aliases"}, + "DefaultRootObject":{"shape":"string"}, + "Origins":{"shape":"Origins"}, + "DefaultCacheBehavior":{"shape":"DefaultCacheBehavior"}, + "CacheBehaviors":{"shape":"CacheBehaviors"}, + "CustomErrorResponses":{"shape":"CustomErrorResponses"}, + "Comment":{"shape":"string"}, + "Logging":{"shape":"LoggingConfig"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"}, + "ViewerCertificate":{"shape":"ViewerCertificate"}, + "Restrictions":{"shape":"Restrictions"}, + "WebACLId":{"shape":"string"} + } + }, + "DistributionList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"DistributionSummaryList"} + } + }, + "DistributionNotDisabled":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "DistributionSummary":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "DomainName", + "Aliases", + "Origins", + "DefaultCacheBehavior", + "CacheBehaviors", + "CustomErrorResponses", + "Comment", + "PriceClass", + "Enabled", + "ViewerCertificate", + "Restrictions", + "WebACLId" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "Aliases":{"shape":"Aliases"}, + "Origins":{"shape":"Origins"}, + "DefaultCacheBehavior":{"shape":"DefaultCacheBehavior"}, + "CacheBehaviors":{"shape":"CacheBehaviors"}, + "CustomErrorResponses":{"shape":"CustomErrorResponses"}, + "Comment":{"shape":"string"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"}, + "ViewerCertificate":{"shape":"ViewerCertificate"}, + "Restrictions":{"shape":"Restrictions"}, + "WebACLId":{"shape":"string"} + } + }, + "DistributionSummaryList":{ + "type":"list", + "member":{ + "shape":"DistributionSummary", + "locationName":"DistributionSummary" + } + }, + "ForwardedValues":{ + "type":"structure", + "required":[ + "QueryString", + "Cookies" + ], + "members":{ + "QueryString":{"shape":"boolean"}, + "Cookies":{"shape":"CookiePreference"}, + "Headers":{"shape":"Headers"} + } + }, + "GeoRestriction":{ + "type":"structure", + "required":[ + "RestrictionType", + "Quantity" + ], + "members":{ + "RestrictionType":{"shape":"GeoRestrictionType"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"LocationList"} + } + }, + "GeoRestrictionType":{ + "type":"string", + "enum":[ + "blacklist", + "whitelist", + "none" + ] + }, + "GetCloudFrontOriginAccessIdentityConfigRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetCloudFrontOriginAccessIdentityConfigResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentityConfig":{"shape":"CloudFrontOriginAccessIdentityConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "GetCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "GetDistributionConfigRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetDistributionConfigResult":{ + "type":"structure", + "members":{ + "DistributionConfig":{"shape":"DistributionConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"DistributionConfig" + }, + "GetDistributionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "GetInvalidationRequest":{ + "type":"structure", + "required":[ + "DistributionId", + "Id" + ], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetInvalidationResult":{ + "type":"structure", + "members":{ + "Invalidation":{"shape":"Invalidation"} + }, + "payload":"Invalidation" + }, + "GetStreamingDistributionConfigRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetStreamingDistributionConfigResult":{ + "type":"structure", + "members":{ + "StreamingDistributionConfig":{"shape":"StreamingDistributionConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistributionConfig" + }, + "GetStreamingDistributionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "HeaderList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Name" + } + }, + "Headers":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"HeaderList"} + } + }, + "IllegalUpdate":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InconsistentQuantities":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidArgument":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidDefaultRootObject":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidErrorCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidForwardCookies":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidGeoRestrictionParameter":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidHeadersForS3Origin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidIfMatchVersion":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidLocationCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidMinimumProtocolVersion":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidOrigin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidOriginAccessIdentity":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidProtocolSettings":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRelativePath":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRequiredProtocol":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidResponseCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidTTLOrder":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidViewerCertificate":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidWebACLId":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "Invalidation":{ + "type":"structure", + "required":[ + "Id", + "Status", + "CreateTime", + "InvalidationBatch" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "CreateTime":{"shape":"timestamp"}, + "InvalidationBatch":{"shape":"InvalidationBatch"} + } + }, + "InvalidationBatch":{ + "type":"structure", + "required":[ + "Paths", + "CallerReference" + ], + "members":{ + "Paths":{"shape":"Paths"}, + "CallerReference":{"shape":"string"} + } + }, + "InvalidationList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"InvalidationSummaryList"} + } + }, + "InvalidationSummary":{ + "type":"structure", + "required":[ + "Id", + "CreateTime", + "Status" + ], + "members":{ + "Id":{"shape":"string"}, + "CreateTime":{"shape":"timestamp"}, + "Status":{"shape":"string"} + } + }, + "InvalidationSummaryList":{ + "type":"list", + "member":{ + "shape":"InvalidationSummary", + "locationName":"InvalidationSummary" + } + }, + "ItemSelection":{ + "type":"string", + "enum":[ + "none", + "whitelist", + "all" + ] + }, + "KeyPairIdList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"KeyPairId" + } + }, + "KeyPairIds":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"KeyPairIdList"} + } + }, + "ListCloudFrontOriginAccessIdentitiesRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListCloudFrontOriginAccessIdentitiesResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentityList":{"shape":"CloudFrontOriginAccessIdentityList"} + }, + "payload":"CloudFrontOriginAccessIdentityList" + }, + "ListDistributionsByWebACLIdRequest":{ + "type":"structure", + "required":["WebACLId"], + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + }, + "WebACLId":{ + "shape":"string", + "location":"uri", + "locationName":"WebACLId" + } + } + }, + "ListDistributionsByWebACLIdResult":{ + "type":"structure", + "members":{ + "DistributionList":{"shape":"DistributionList"} + }, + "payload":"DistributionList" + }, + "ListDistributionsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListDistributionsResult":{ + "type":"structure", + "members":{ + "DistributionList":{"shape":"DistributionList"} + }, + "payload":"DistributionList" + }, + "ListInvalidationsRequest":{ + "type":"structure", + "required":["DistributionId"], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListInvalidationsResult":{ + "type":"structure", + "members":{ + "InvalidationList":{"shape":"InvalidationList"} + }, + "payload":"InvalidationList" + }, + "ListStreamingDistributionsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListStreamingDistributionsResult":{ + "type":"structure", + "members":{ + "StreamingDistributionList":{"shape":"StreamingDistributionList"} + }, + "payload":"StreamingDistributionList" + }, + "LocationList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Location" + } + }, + "LoggingConfig":{ + "type":"structure", + "required":[ + "Enabled", + "IncludeCookies", + "Bucket", + "Prefix" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "IncludeCookies":{"shape":"boolean"}, + "Bucket":{"shape":"string"}, + "Prefix":{"shape":"string"} + } + }, + "Method":{ + "type":"string", + "enum":[ + "GET", + "HEAD", + "POST", + "PUT", + "PATCH", + "OPTIONS", + "DELETE" + ] + }, + "MethodsList":{ + "type":"list", + "member":{ + "shape":"Method", + "locationName":"Method" + } + }, + "MinimumProtocolVersion":{ + "type":"string", + "enum":[ + "SSLv3", + "TLSv1" + ] + }, + "MissingBody":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "NoSuchCloudFrontOriginAccessIdentity":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchDistribution":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchInvalidation":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchOrigin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchStreamingDistribution":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "Origin":{ + "type":"structure", + "required":[ + "Id", + "DomainName" + ], + "members":{ + "Id":{"shape":"string"}, + "DomainName":{"shape":"string"}, + "OriginPath":{"shape":"string"}, + "S3OriginConfig":{"shape":"S3OriginConfig"}, + "CustomOriginConfig":{"shape":"CustomOriginConfig"} + } + }, + "OriginList":{ + "type":"list", + "member":{ + "shape":"Origin", + "locationName":"Origin" + }, + "min":1 + }, + "OriginProtocolPolicy":{ + "type":"string", + "enum":[ + "http-only", + "match-viewer" + ] + }, + "Origins":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"OriginList"} + } + }, + "PathList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Path" + } + }, + "Paths":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"PathList"} + } + }, + "PreconditionFailed":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":412}, + "exception":true + }, + "PriceClass":{ + "type":"string", + "enum":[ + "PriceClass_100", + "PriceClass_200", + "PriceClass_All" + ] + }, + "Restrictions":{ + "type":"structure", + "required":["GeoRestriction"], + "members":{ + "GeoRestriction":{"shape":"GeoRestriction"} + } + }, + "S3Origin":{ + "type":"structure", + "required":[ + "DomainName", + "OriginAccessIdentity" + ], + "members":{ + "DomainName":{"shape":"string"}, + "OriginAccessIdentity":{"shape":"string"} + } + }, + "S3OriginConfig":{ + "type":"structure", + "required":["OriginAccessIdentity"], + "members":{ + "OriginAccessIdentity":{"shape":"string"} + } + }, + "SSLSupportMethod":{ + "type":"string", + "enum":[ + "sni-only", + "vip" + ] + }, + "Signer":{ + "type":"structure", + "members":{ + "AwsAccountNumber":{"shape":"string"}, + "KeyPairIds":{"shape":"KeyPairIds"} + } + }, + "SignerList":{ + "type":"list", + "member":{ + "shape":"Signer", + "locationName":"Signer" + } + }, + "StreamingDistribution":{ + "type":"structure", + "required":[ + "Id", + "Status", + "DomainName", + "ActiveTrustedSigners", + "StreamingDistributionConfig" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "ActiveTrustedSigners":{"shape":"ActiveTrustedSigners"}, + "StreamingDistributionConfig":{"shape":"StreamingDistributionConfig"} + } + }, + "StreamingDistributionAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "StreamingDistributionConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "S3Origin", + "Comment", + "TrustedSigners", + "Enabled" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "S3Origin":{"shape":"S3Origin"}, + "Aliases":{"shape":"Aliases"}, + "Comment":{"shape":"string"}, + "Logging":{"shape":"StreamingLoggingConfig"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"} + } + }, + "StreamingDistributionList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"StreamingDistributionSummaryList"} + } + }, + "StreamingDistributionNotDisabled":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "StreamingDistributionSummary":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "DomainName", + "S3Origin", + "Aliases", + "TrustedSigners", + "Comment", + "PriceClass", + "Enabled" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "S3Origin":{"shape":"S3Origin"}, + "Aliases":{"shape":"Aliases"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "Comment":{"shape":"string"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"} + } + }, + "StreamingDistributionSummaryList":{ + "type":"list", + "member":{ + "shape":"StreamingDistributionSummary", + "locationName":"StreamingDistributionSummary" + } + }, + "StreamingLoggingConfig":{ + "type":"structure", + "required":[ + "Enabled", + "Bucket", + "Prefix" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Bucket":{"shape":"string"}, + "Prefix":{"shape":"string"} + } + }, + "TooManyCacheBehaviors":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCertificates":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCloudFrontOriginAccessIdentities":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCookieNamesInWhiteList":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyDistributionCNAMEs":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyDistributions":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyHeadersInForwardedValues":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyInvalidationsInProgress":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyOrigins":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyStreamingDistributionCNAMEs":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyStreamingDistributions":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyTrustedSigners":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TrustedSignerDoesNotExist":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TrustedSigners":{ + "type":"structure", + "required":[ + "Enabled", + "Quantity" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"AwsAccountNumberList"} + } + }, + "UpdateCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":[ + "CloudFrontOriginAccessIdentityConfig", + "Id" + ], + "members":{ + "CloudFrontOriginAccessIdentityConfig":{ + "shape":"CloudFrontOriginAccessIdentityConfig", + "locationName":"CloudFrontOriginAccessIdentityConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-09-17/"} + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "UpdateCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "UpdateDistributionRequest":{ + "type":"structure", + "required":[ + "DistributionConfig", + "Id" + ], + "members":{ + "DistributionConfig":{ + "shape":"DistributionConfig", + "locationName":"DistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-09-17/"} + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"DistributionConfig" + }, + "UpdateDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "UpdateStreamingDistributionRequest":{ + "type":"structure", + "required":[ + "StreamingDistributionConfig", + "Id" + ], + "members":{ + "StreamingDistributionConfig":{ + "shape":"StreamingDistributionConfig", + "locationName":"StreamingDistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2015-09-17/"} + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"StreamingDistributionConfig" + }, + "UpdateStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "ViewerCertificate":{ + "type":"structure", + "members":{ + "Certificate":{"shape":"string"}, + "CertificateSource":{"shape":"CertificateSource"}, + "SSLSupportMethod":{"shape":"SSLSupportMethod"}, + "MinimumProtocolVersion":{"shape":"MinimumProtocolVersion"}, + "IAMCertificateId":{ + "shape":"string", + "deprecated":true + }, + "CloudFrontDefaultCertificate":{ + "shape":"boolean", + "deprecated":true + } + } + }, + "ViewerProtocolPolicy":{ + "type":"string", + "enum":[ + "allow-all", + "https-only", + "redirect-to-https" + ] + }, + "boolean":{"type":"boolean"}, + "integer":{"type":"integer"}, + "long":{"type":"long"}, + "string":{"type":"string"}, + "timestamp":{"type":"timestamp"} + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1173 @@ +{ + "version": "2.0", + "service": null, + "operations": { + "CreateCloudFrontOriginAccessIdentity": "Create a new origin access identity.", + "CreateDistribution": "Create a new distribution.", + "CreateInvalidation": "Create a new invalidation.", + "CreateStreamingDistribution": "Create a new streaming distribution.", + "DeleteCloudFrontOriginAccessIdentity": "Delete an origin access identity.", + "DeleteDistribution": "Delete a distribution.", + "DeleteStreamingDistribution": "Delete a streaming distribution.", + "GetCloudFrontOriginAccessIdentity": "Get the information about an origin access identity.", + "GetCloudFrontOriginAccessIdentityConfig": "Get the configuration information about an origin access identity.", + "GetDistribution": "Get the information about a distribution.", + "GetDistributionConfig": "Get the configuration information about a distribution.", + "GetInvalidation": "Get the information about an invalidation.", + "GetStreamingDistribution": "Get the information about a streaming distribution.", + "GetStreamingDistributionConfig": "Get the configuration information about a streaming distribution.", + "ListCloudFrontOriginAccessIdentities": "List origin access identities.", + "ListDistributions": "List distributions.", + "ListDistributionsByWebACLId": "List the distributions that are associated with a specified AWS WAF web ACL.", + "ListInvalidations": "List invalidation batches.", + "ListStreamingDistributions": "List streaming distributions.", + "UpdateCloudFrontOriginAccessIdentity": "Update an origin access identity.", + "UpdateDistribution": "Update a distribution.", + "UpdateStreamingDistribution": "Update a streaming distribution." + }, + "shapes": { + "AccessDenied": { + "base": "Access denied.", + "refs": { + } + }, + "ActiveTrustedSigners": { + "base": "A complex type that lists the AWS accounts, if any, that you included in the TrustedSigners complex type for the default cache behavior or for any of the other cache behaviors for this distribution. These are accounts that you want to allow to create signed URLs for private content.", + "refs": { + "Distribution$ActiveTrustedSigners": "CloudFront automatically adds this element to the response only if you've set up the distribution to serve private content with signed URLs. The element lists the key pair IDs that CloudFront is aware of for each trusted signer. The Signer child element lists the AWS account number of the trusted signer (or an empty Self element if the signer is you). The Signer element also includes the IDs of any active key pairs associated with the trusted signer's AWS account. If no KeyPairId element appears for a Signer, that signer can't create working signed URLs.", + "StreamingDistribution$ActiveTrustedSigners": "CloudFront automatically adds this element to the response only if you've set up the distribution to serve private content with signed URLs. The element lists the key pair IDs that CloudFront is aware of for each trusted signer. The Signer child element lists the AWS account number of the trusted signer (or an empty Self element if the signer is you). The Signer element also includes the IDs of any active key pairs associated with the trusted signer's AWS account. If no KeyPairId element appears for a Signer, that signer can't create working signed URLs." + } + }, + "AliasList": { + "base": null, + "refs": { + "Aliases$Items": "Optional: A complex type that contains CNAME elements, if any, for this distribution. If Quantity is 0, you can omit Items." + } + }, + "Aliases": { + "base": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "refs": { + "DistributionConfig$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "DistributionSummary$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "StreamingDistributionConfig$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this streaming distribution.", + "StreamingDistributionSummary$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this streaming distribution." + } + }, + "AllowedMethods": { + "base": "A complex type that controls which HTTP methods CloudFront processes and forwards to your Amazon S3 bucket or your custom origin. There are three choices: - CloudFront forwards only GET and HEAD requests. - CloudFront forwards only GET, HEAD and OPTIONS requests. - CloudFront forwards GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests. If you pick the third choice, you may need to restrict access to your Amazon S3 bucket or to your custom origin so users can't perform operations that you don't want them to. For example, you may not want users to have permission to delete objects from your origin.", + "refs": { + "CacheBehavior$AllowedMethods": null, + "DefaultCacheBehavior$AllowedMethods": null + } + }, + "AwsAccountNumberList": { + "base": null, + "refs": { + "TrustedSigners$Items": "Optional: A complex type that contains trusted signers for this cache behavior. If Quantity is 0, you can omit Items." + } + }, + "BatchTooLarge": { + "base": null, + "refs": { + } + }, + "CNAMEAlreadyExists": { + "base": null, + "refs": { + } + }, + "CacheBehavior": { + "base": "A complex type that describes how CloudFront processes requests. You can create up to 10 cache behaviors.You must create at least as many cache behaviors (including the default cache behavior) as you have origins if you want CloudFront to distribute objects from all of the origins. Each cache behavior specifies the one origin from which you want CloudFront to get objects. If you have two origins and only the default cache behavior, the default cache behavior will cause CloudFront to get objects from one of the origins, but the other origin will never be used. If you don't want to specify any cache behaviors, include only an empty CacheBehaviors element. Don't include an empty CacheBehavior element, or CloudFront returns a MalformedXML error. To delete all cache behaviors in an existing distribution, update the distribution configuration and include only an empty CacheBehaviors element. To add, change, or remove one or more cache behaviors, update the distribution configuration and specify all of the cache behaviors that you want to include in the updated distribution.", + "refs": { + "CacheBehaviorList$member": null + } + }, + "CacheBehaviorList": { + "base": null, + "refs": { + "CacheBehaviors$Items": "Optional: A complex type that contains cache behaviors for this distribution. If Quantity is 0, you can omit Items." + } + }, + "CacheBehaviors": { + "base": "A complex type that contains zero or more CacheBehavior elements.", + "refs": { + "DistributionConfig$CacheBehaviors": "A complex type that contains zero or more CacheBehavior elements.", + "DistributionSummary$CacheBehaviors": "A complex type that contains zero or more CacheBehavior elements." + } + }, + "CachedMethods": { + "base": "A complex type that controls whether CloudFront caches the response to requests using the specified HTTP methods. There are two choices: - CloudFront caches responses to GET and HEAD requests. - CloudFront caches responses to GET, HEAD, and OPTIONS requests. If you pick the second choice for your S3 Origin, you may need to forward Access-Control-Request-Method, Access-Control-Request-Headers and Origin headers for the responses to be cached correctly.", + "refs": { + "AllowedMethods$CachedMethods": null + } + }, + "CertificateSource": { + "base": null, + "refs": { + "ViewerCertificate$CertificateSource": "If you want viewers to use HTTPS to request your objects and you're using the CloudFront domain name of your distribution in your object URLs (for example, https://d111111abcdef8.cloudfront.net/logo.jpg), set to \"cloudfront\". If you want viewers to use HTTPS to request your objects and you're using an alternate domain name in your object URLs (for example, https://example.com/logo.jpg), set to \"iam\", and update the Certificate field with the IAM certificate identifier of the custom viewer certificate for this distribution." + } + }, + "CloudFrontOriginAccessIdentity": { + "base": "CloudFront origin access identity.", + "refs": { + "CreateCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information.", + "GetCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information.", + "UpdateCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information." + } + }, + "CloudFrontOriginAccessIdentityAlreadyExists": { + "base": "If the CallerReference is a value you already sent in a previous request to create an identity but the content of the CloudFrontOriginAccessIdentityConfig is different from the original request, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists error.", + "refs": { + } + }, + "CloudFrontOriginAccessIdentityConfig": { + "base": "Origin access identity configuration.", + "refs": { + "CloudFrontOriginAccessIdentity$CloudFrontOriginAccessIdentityConfig": "The current configuration information for the identity.", + "CreateCloudFrontOriginAccessIdentityRequest$CloudFrontOriginAccessIdentityConfig": "The origin access identity's configuration information.", + "GetCloudFrontOriginAccessIdentityConfigResult$CloudFrontOriginAccessIdentityConfig": "The origin access identity's configuration information.", + "UpdateCloudFrontOriginAccessIdentityRequest$CloudFrontOriginAccessIdentityConfig": "The identity's configuration information." + } + }, + "CloudFrontOriginAccessIdentityInUse": { + "base": null, + "refs": { + } + }, + "CloudFrontOriginAccessIdentityList": { + "base": "The CloudFrontOriginAccessIdentityList type.", + "refs": { + "ListCloudFrontOriginAccessIdentitiesResult$CloudFrontOriginAccessIdentityList": "The CloudFrontOriginAccessIdentityList type." + } + }, + "CloudFrontOriginAccessIdentitySummary": { + "base": "Summary of the information about a CloudFront origin access identity.", + "refs": { + "CloudFrontOriginAccessIdentitySummaryList$member": null + } + }, + "CloudFrontOriginAccessIdentitySummaryList": { + "base": null, + "refs": { + "CloudFrontOriginAccessIdentityList$Items": "A complex type that contains one CloudFrontOriginAccessIdentitySummary element for each origin access identity that was created by the current AWS account." + } + }, + "CookieNameList": { + "base": null, + "refs": { + "CookieNames$Items": "Optional: A complex type that contains whitelisted cookies for this cache behavior. If Quantity is 0, you can omit Items." + } + }, + "CookieNames": { + "base": "A complex type that specifies the whitelisted cookies, if any, that you want CloudFront to forward to your origin that is associated with this cache behavior.", + "refs": { + "CookiePreference$WhitelistedNames": "A complex type that specifies the whitelisted cookies, if any, that you want CloudFront to forward to your origin that is associated with this cache behavior." + } + }, + "CookiePreference": { + "base": "A complex type that specifies the cookie preferences associated with this cache behavior.", + "refs": { + "ForwardedValues$Cookies": "A complex type that specifies how CloudFront handles cookies." + } + }, + "CreateCloudFrontOriginAccessIdentityRequest": { + "base": "The request to create a new origin access identity.", + "refs": { + } + }, + "CreateCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateDistributionRequest": { + "base": "The request to create a new distribution.", + "refs": { + } + }, + "CreateDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateInvalidationRequest": { + "base": "The request to create an invalidation.", + "refs": { + } + }, + "CreateInvalidationResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateStreamingDistributionRequest": { + "base": "The request to create a new streaming distribution.", + "refs": { + } + }, + "CreateStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CustomErrorResponse": { + "base": "A complex type that describes how you'd prefer CloudFront to respond to requests that result in either a 4xx or 5xx response. You can control whether a custom error page should be displayed, what the desired response code should be for this error page and how long should the error response be cached by CloudFront. If you don't want to specify any custom error responses, include only an empty CustomErrorResponses element. To delete all custom error responses in an existing distribution, update the distribution configuration and include only an empty CustomErrorResponses element. To add, change, or remove one or more custom error responses, update the distribution configuration and specify all of the custom error responses that you want to include in the updated distribution.", + "refs": { + "CustomErrorResponseList$member": null + } + }, + "CustomErrorResponseList": { + "base": null, + "refs": { + "CustomErrorResponses$Items": "Optional: A complex type that contains custom error responses for this distribution. If Quantity is 0, you can omit Items." + } + }, + "CustomErrorResponses": { + "base": "A complex type that contains zero or more CustomErrorResponse elements.", + "refs": { + "DistributionConfig$CustomErrorResponses": "A complex type that contains zero or more CustomErrorResponse elements.", + "DistributionSummary$CustomErrorResponses": "A complex type that contains zero or more CustomErrorResponses elements." + } + }, + "CustomOriginConfig": { + "base": "A customer origin.", + "refs": { + "Origin$CustomOriginConfig": "A complex type that contains information about a custom origin. If the origin is an Amazon S3 bucket, use the S3OriginConfig element instead." + } + }, + "DefaultCacheBehavior": { + "base": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior.", + "refs": { + "DistributionConfig$DefaultCacheBehavior": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior.", + "DistributionSummary$DefaultCacheBehavior": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior." + } + }, + "DeleteCloudFrontOriginAccessIdentityRequest": { + "base": "The request to delete a origin access identity.", + "refs": { + } + }, + "DeleteDistributionRequest": { + "base": "The request to delete a distribution.", + "refs": { + } + }, + "DeleteStreamingDistributionRequest": { + "base": "The request to delete a streaming distribution.", + "refs": { + } + }, + "Distribution": { + "base": "A distribution.", + "refs": { + "CreateDistributionResult$Distribution": "The distribution's information.", + "GetDistributionResult$Distribution": "The distribution's information.", + "UpdateDistributionResult$Distribution": "The distribution's information." + } + }, + "DistributionAlreadyExists": { + "base": "The caller reference you attempted to create the distribution with is associated with another distribution.", + "refs": { + } + }, + "DistributionConfig": { + "base": "A distribution Configuration.", + "refs": { + "CreateDistributionRequest$DistributionConfig": "The distribution's configuration information.", + "Distribution$DistributionConfig": "The current configuration information for the distribution.", + "GetDistributionConfigResult$DistributionConfig": "The distribution's configuration information.", + "UpdateDistributionRequest$DistributionConfig": "The distribution's configuration information." + } + }, + "DistributionList": { + "base": "A distribution list.", + "refs": { + "ListDistributionsByWebACLIdResult$DistributionList": "The DistributionList type.", + "ListDistributionsResult$DistributionList": "The DistributionList type." + } + }, + "DistributionNotDisabled": { + "base": null, + "refs": { + } + }, + "DistributionSummary": { + "base": "A summary of the information for an Amazon CloudFront distribution.", + "refs": { + "DistributionSummaryList$member": null + } + }, + "DistributionSummaryList": { + "base": null, + "refs": { + "DistributionList$Items": "A complex type that contains one DistributionSummary element for each distribution that was created by the current AWS account." + } + }, + "ForwardedValues": { + "base": "A complex type that specifies how CloudFront handles query strings, cookies and headers.", + "refs": { + "CacheBehavior$ForwardedValues": "A complex type that specifies how CloudFront handles query strings, cookies and headers.", + "DefaultCacheBehavior$ForwardedValues": "A complex type that specifies how CloudFront handles query strings, cookies and headers." + } + }, + "GeoRestriction": { + "base": "A complex type that controls the countries in which your content is distributed. For more information about geo restriction, go to Customizing Error Responses in the Amazon CloudFront Developer Guide. CloudFront determines the location of your users using MaxMind GeoIP databases. For information about the accuracy of these databases, see How accurate are your GeoIP databases? on the MaxMind website.", + "refs": { + "Restrictions$GeoRestriction": null + } + }, + "GeoRestrictionType": { + "base": null, + "refs": { + "GeoRestriction$RestrictionType": "The method that you want to use to restrict distribution of your content by country: - none: No geo restriction is enabled, meaning access to content is not restricted by client geo location. - blacklist: The Location elements specify the countries in which you do not want CloudFront to distribute your content. - whitelist: The Location elements specify the countries in which you want CloudFront to distribute your content." + } + }, + "GetCloudFrontOriginAccessIdentityConfigRequest": { + "base": "The request to get an origin access identity's configuration.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityRequest": { + "base": "The request to get an origin access identity's information.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetDistributionConfigRequest": { + "base": "The request to get a distribution configuration.", + "refs": { + } + }, + "GetDistributionConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetDistributionRequest": { + "base": "The request to get a distribution's information.", + "refs": { + } + }, + "GetDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetInvalidationRequest": { + "base": "The request to get an invalidation's information.", + "refs": { + } + }, + "GetInvalidationResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetStreamingDistributionConfigRequest": { + "base": "To request to get a streaming distribution configuration.", + "refs": { + } + }, + "GetStreamingDistributionConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetStreamingDistributionRequest": { + "base": "The request to get a streaming distribution's information.", + "refs": { + } + }, + "GetStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "HeaderList": { + "base": null, + "refs": { + "Headers$Items": "Optional: A complex type that contains a Name element for each header that you want CloudFront to forward to the origin and to vary on for this cache behavior. If Quantity is 0, omit Items." + } + }, + "Headers": { + "base": "A complex type that specifies the headers that you want CloudFront to forward to the origin for this cache behavior. For the headers that you specify, CloudFront also caches separate versions of a given object based on the header values in viewer requests; this is known as varying on headers. For example, suppose viewer requests for logo.jpg contain a custom Product header that has a value of either Acme or Apex, and you configure CloudFront to vary on the Product header. CloudFront forwards the Product header to the origin and caches the response from the origin once for each header value.", + "refs": { + "ForwardedValues$Headers": "A complex type that specifies the Headers, if any, that you want CloudFront to vary upon for this cache behavior." + } + }, + "IllegalUpdate": { + "base": "Origin and CallerReference cannot be updated.", + "refs": { + } + }, + "InconsistentQuantities": { + "base": "The value of Quantity and the size of Items do not match.", + "refs": { + } + }, + "InvalidArgument": { + "base": "The argument is invalid.", + "refs": { + } + }, + "InvalidDefaultRootObject": { + "base": "The default root object file name is too big or contains an invalid character.", + "refs": { + } + }, + "InvalidErrorCode": { + "base": null, + "refs": { + } + }, + "InvalidForwardCookies": { + "base": "Your request contains forward cookies option which doesn't match with the expectation for the whitelisted list of cookie names. Either list of cookie names has been specified when not allowed or list of cookie names is missing when expected.", + "refs": { + } + }, + "InvalidGeoRestrictionParameter": { + "base": null, + "refs": { + } + }, + "InvalidHeadersForS3Origin": { + "base": null, + "refs": { + } + }, + "InvalidIfMatchVersion": { + "base": "The If-Match version is missing or not valid for the distribution.", + "refs": { + } + }, + "InvalidLocationCode": { + "base": null, + "refs": { + } + }, + "InvalidMinimumProtocolVersion": { + "base": null, + "refs": { + } + }, + "InvalidOrigin": { + "base": "The Amazon S3 origin server specified does not refer to a valid Amazon S3 bucket.", + "refs": { + } + }, + "InvalidOriginAccessIdentity": { + "base": "The origin access identity is not valid or doesn't exist.", + "refs": { + } + }, + "InvalidProtocolSettings": { + "base": "You cannot specify SSLv3 as the minimum protocol version if you only want to support only clients that Support Server Name Indication (SNI).", + "refs": { + } + }, + "InvalidRelativePath": { + "base": "The relative path is too big, is not URL-encoded, or does not begin with a slash (/).", + "refs": { + } + }, + "InvalidRequiredProtocol": { + "base": "This operation requires the HTTPS protocol. Ensure that you specify the HTTPS protocol in your request, or omit the RequiredProtocols element from your distribution configuration.", + "refs": { + } + }, + "InvalidResponseCode": { + "base": null, + "refs": { + } + }, + "InvalidTTLOrder": { + "base": null, + "refs": { + } + }, + "InvalidViewerCertificate": { + "base": null, + "refs": { + } + }, + "InvalidWebACLId": { + "base": null, + "refs": { + } + }, + "Invalidation": { + "base": "An invalidation.", + "refs": { + "CreateInvalidationResult$Invalidation": "The invalidation's information.", + "GetInvalidationResult$Invalidation": "The invalidation's information." + } + }, + "InvalidationBatch": { + "base": "An invalidation batch.", + "refs": { + "CreateInvalidationRequest$InvalidationBatch": "The batch information for the invalidation.", + "Invalidation$InvalidationBatch": "The current invalidation information for the batch request." + } + }, + "InvalidationList": { + "base": "An invalidation list.", + "refs": { + "ListInvalidationsResult$InvalidationList": "Information about invalidation batches." + } + }, + "InvalidationSummary": { + "base": "Summary of an invalidation request.", + "refs": { + "InvalidationSummaryList$member": null + } + }, + "InvalidationSummaryList": { + "base": null, + "refs": { + "InvalidationList$Items": "A complex type that contains one InvalidationSummary element for each invalidation batch that was created by the current AWS account." + } + }, + "ItemSelection": { + "base": null, + "refs": { + "CookiePreference$Forward": "Use this element to specify whether you want CloudFront to forward cookies to the origin that is associated with this cache behavior. You can specify all, none or whitelist. If you choose All, CloudFront forwards all cookies regardless of how many your application uses." + } + }, + "KeyPairIdList": { + "base": null, + "refs": { + "KeyPairIds$Items": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber." + } + }, + "KeyPairIds": { + "base": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber.", + "refs": { + "Signer$KeyPairIds": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber." + } + }, + "ListCloudFrontOriginAccessIdentitiesRequest": { + "base": "The request to list origin access identities.", + "refs": { + } + }, + "ListCloudFrontOriginAccessIdentitiesResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListDistributionsByWebACLIdRequest": { + "base": "The request to list distributions that are associated with a specified AWS WAF web ACL.", + "refs": { + } + }, + "ListDistributionsByWebACLIdResult": { + "base": "The response to a request to list the distributions that are associated with a specified AWS WAF web ACL.", + "refs": { + } + }, + "ListDistributionsRequest": { + "base": "The request to list your distributions.", + "refs": { + } + }, + "ListDistributionsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListInvalidationsRequest": { + "base": "The request to list invalidations.", + "refs": { + } + }, + "ListInvalidationsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListStreamingDistributionsRequest": { + "base": "The request to list your streaming distributions.", + "refs": { + } + }, + "ListStreamingDistributionsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "LocationList": { + "base": null, + "refs": { + "GeoRestriction$Items": "A complex type that contains a Location element for each country in which you want CloudFront either to distribute your content (whitelist) or not distribute your content (blacklist). The Location element is a two-letter, uppercase country code for a country that you want to include in your blacklist or whitelist. Include one Location element for each country. CloudFront and MaxMind both use ISO 3166 country codes. For the current list of countries and the corresponding codes, see ISO 3166-1-alpha-2 code on the International Organization for Standardization website. You can also refer to the country list in the CloudFront console, which includes both country names and codes." + } + }, + "LoggingConfig": { + "base": "A complex type that controls whether access logs are written for the distribution.", + "refs": { + "DistributionConfig$Logging": "A complex type that controls whether access logs are written for the distribution." + } + }, + "Method": { + "base": null, + "refs": { + "MethodsList$member": null + } + }, + "MethodsList": { + "base": null, + "refs": { + "AllowedMethods$Items": "A complex type that contains the HTTP methods that you want CloudFront to process and forward to your origin.", + "CachedMethods$Items": "A complex type that contains the HTTP methods that you want CloudFront to cache responses to." + } + }, + "MinimumProtocolVersion": { + "base": null, + "refs": { + "ViewerCertificate$MinimumProtocolVersion": "Specify the minimum version of the SSL protocol that you want CloudFront to use, SSLv3 or TLSv1, for HTTPS connections. CloudFront will serve your objects only to browsers or devices that support at least the SSL version that you specify. The TLSv1 protocol is more secure, so we recommend that you specify SSLv3 only if your users are using browsers or devices that don't support TLSv1. If you're using a custom certificate (if you specify a value for IAMCertificateId) and if you're using dedicated IP (if you specify vip for SSLSupportMethod), you can choose SSLv3 or TLSv1 as the MinimumProtocolVersion. If you're using a custom certificate (if you specify a value for IAMCertificateId) and if you're using SNI (if you specify sni-only for SSLSupportMethod), you must specify TLSv1 for MinimumProtocolVersion." + } + }, + "MissingBody": { + "base": "This operation requires a body. Ensure that the body is present and the Content-Type header is set.", + "refs": { + } + }, + "NoSuchCloudFrontOriginAccessIdentity": { + "base": "The specified origin access identity does not exist.", + "refs": { + } + }, + "NoSuchDistribution": { + "base": "The specified distribution does not exist.", + "refs": { + } + }, + "NoSuchInvalidation": { + "base": "The specified invalidation does not exist.", + "refs": { + } + }, + "NoSuchOrigin": { + "base": "No origin exists with the specified Origin Id.", + "refs": { + } + }, + "NoSuchStreamingDistribution": { + "base": "The specified streaming distribution does not exist.", + "refs": { + } + }, + "Origin": { + "base": "A complex type that describes the Amazon S3 bucket or the HTTP server (for example, a web server) from which CloudFront gets your files.You must create at least one origin.", + "refs": { + "OriginList$member": null + } + }, + "OriginList": { + "base": null, + "refs": { + "Origins$Items": "A complex type that contains origins for this distribution." + } + }, + "OriginProtocolPolicy": { + "base": null, + "refs": { + "CustomOriginConfig$OriginProtocolPolicy": "The origin protocol policy to apply to your origin." + } + }, + "Origins": { + "base": "A complex type that contains information about origins for this distribution.", + "refs": { + "DistributionConfig$Origins": "A complex type that contains information about origins for this distribution.", + "DistributionSummary$Origins": "A complex type that contains information about origins for this distribution." + } + }, + "PathList": { + "base": null, + "refs": { + "Paths$Items": "A complex type that contains a list of the objects that you want to invalidate." + } + }, + "Paths": { + "base": "A complex type that contains information about the objects that you want to invalidate.", + "refs": { + "InvalidationBatch$Paths": "The path of the object to invalidate. The path is relative to the distribution and must begin with a slash (/). You must enclose each invalidation object with the Path element tags. If the path includes non-ASCII characters or unsafe characters as defined in RFC 1783 (http://www.ietf.org/rfc/rfc1738.txt), URL encode those characters. Do not URL encode any other characters in the path, or CloudFront will not invalidate the old version of the updated object." + } + }, + "PreconditionFailed": { + "base": "The precondition given in one or more of the request-header fields evaluated to false.", + "refs": { + } + }, + "PriceClass": { + "base": null, + "refs": { + "DistributionConfig$PriceClass": "A complex type that contains information about price class for this distribution.", + "DistributionSummary$PriceClass": null, + "StreamingDistributionConfig$PriceClass": "A complex type that contains information about price class for this streaming distribution.", + "StreamingDistributionSummary$PriceClass": null + } + }, + "Restrictions": { + "base": "A complex type that identifies ways in which you want to restrict distribution of your content.", + "refs": { + "DistributionConfig$Restrictions": null, + "DistributionSummary$Restrictions": null + } + }, + "S3Origin": { + "base": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution.", + "refs": { + "StreamingDistributionConfig$S3Origin": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution.", + "StreamingDistributionSummary$S3Origin": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution." + } + }, + "S3OriginConfig": { + "base": "A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead.", + "refs": { + "Origin$S3OriginConfig": "A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead." + } + }, + "SSLSupportMethod": { + "base": null, + "refs": { + "ViewerCertificate$SSLSupportMethod": "If you specify a value for IAMCertificateId, you must also specify how you want CloudFront to serve HTTPS requests. Valid values are vip and sni-only. If you specify vip, CloudFront uses dedicated IP addresses for your content and can respond to HTTPS requests from any viewer. However, you must request permission to use this feature, and you incur additional monthly charges. If you specify sni-only, CloudFront can only respond to HTTPS requests from viewers that support Server Name Indication (SNI). All modern browsers support SNI, but some browsers still in use don't support SNI. Do not specify a value for SSLSupportMethod if you specified true for CloudFrontDefaultCertificate." + } + }, + "Signer": { + "base": "A complex type that lists the AWS accounts that were included in the TrustedSigners complex type, as well as their active CloudFront key pair IDs, if any.", + "refs": { + "SignerList$member": null + } + }, + "SignerList": { + "base": null, + "refs": { + "ActiveTrustedSigners$Items": "A complex type that contains one Signer complex type for each unique trusted signer that is specified in the TrustedSigners complex type, including trusted signers in the default cache behavior and in all of the other cache behaviors." + } + }, + "StreamingDistribution": { + "base": "A streaming distribution.", + "refs": { + "CreateStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information.", + "GetStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information.", + "UpdateStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information." + } + }, + "StreamingDistributionAlreadyExists": { + "base": null, + "refs": { + } + }, + "StreamingDistributionConfig": { + "base": "The configuration for the streaming distribution.", + "refs": { + "CreateStreamingDistributionRequest$StreamingDistributionConfig": "The streaming distribution's configuration information.", + "GetStreamingDistributionConfigResult$StreamingDistributionConfig": "The streaming distribution's configuration information.", + "StreamingDistribution$StreamingDistributionConfig": "The current configuration information for the streaming distribution.", + "UpdateStreamingDistributionRequest$StreamingDistributionConfig": "The streaming distribution's configuration information." + } + }, + "StreamingDistributionList": { + "base": "A streaming distribution list.", + "refs": { + "ListStreamingDistributionsResult$StreamingDistributionList": "The StreamingDistributionList type." + } + }, + "StreamingDistributionNotDisabled": { + "base": null, + "refs": { + } + }, + "StreamingDistributionSummary": { + "base": "A summary of the information for an Amazon CloudFront streaming distribution.", + "refs": { + "StreamingDistributionSummaryList$member": null + } + }, + "StreamingDistributionSummaryList": { + "base": null, + "refs": { + "StreamingDistributionList$Items": "A complex type that contains one StreamingDistributionSummary element for each distribution that was created by the current AWS account." + } + }, + "StreamingLoggingConfig": { + "base": "A complex type that controls whether access logs are written for this streaming distribution.", + "refs": { + "StreamingDistributionConfig$Logging": "A complex type that controls whether access logs are written for the streaming distribution." + } + }, + "TooManyCacheBehaviors": { + "base": "You cannot create anymore cache behaviors for the distribution.", + "refs": { + } + }, + "TooManyCertificates": { + "base": "You cannot create anymore custom ssl certificates.", + "refs": { + } + }, + "TooManyCloudFrontOriginAccessIdentities": { + "base": "Processing your request would cause you to exceed the maximum number of origin access identities allowed.", + "refs": { + } + }, + "TooManyCookieNamesInWhiteList": { + "base": "Your request contains more cookie names in the whitelist than are allowed per cache behavior.", + "refs": { + } + }, + "TooManyDistributionCNAMEs": { + "base": "Your request contains more CNAMEs than are allowed per distribution.", + "refs": { + } + }, + "TooManyDistributions": { + "base": "Processing your request would cause you to exceed the maximum number of distributions allowed.", + "refs": { + } + }, + "TooManyHeadersInForwardedValues": { + "base": null, + "refs": { + } + }, + "TooManyInvalidationsInProgress": { + "base": "You have exceeded the maximum number of allowable InProgress invalidation batch requests, or invalidation objects.", + "refs": { + } + }, + "TooManyOrigins": { + "base": "You cannot create anymore origins for the distribution.", + "refs": { + } + }, + "TooManyStreamingDistributionCNAMEs": { + "base": null, + "refs": { + } + }, + "TooManyStreamingDistributions": { + "base": "Processing your request would cause you to exceed the maximum number of streaming distributions allowed.", + "refs": { + } + }, + "TooManyTrustedSigners": { + "base": "Your request contains more trusted signers than are allowed per distribution.", + "refs": { + } + }, + "TrustedSignerDoesNotExist": { + "base": "One or more of your trusted signers do not exist.", + "refs": { + } + }, + "TrustedSigners": { + "base": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "refs": { + "CacheBehavior$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "DefaultCacheBehavior$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "StreamingDistributionConfig$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "StreamingDistributionSummary$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution." + } + }, + "UpdateCloudFrontOriginAccessIdentityRequest": { + "base": "The request to update an origin access identity.", + "refs": { + } + }, + "UpdateCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "UpdateDistributionRequest": { + "base": "The request to update a distribution.", + "refs": { + } + }, + "UpdateDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "UpdateStreamingDistributionRequest": { + "base": "The request to update a streaming distribution.", + "refs": { + } + }, + "UpdateStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ViewerCertificate": { + "base": "A complex type that contains information about viewer certificates for this distribution.", + "refs": { + "DistributionConfig$ViewerCertificate": null, + "DistributionSummary$ViewerCertificate": null + } + }, + "ViewerProtocolPolicy": { + "base": null, + "refs": { + "CacheBehavior$ViewerProtocolPolicy": "Use this element to specify the protocol that users can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. If you want CloudFront to allow end users to use any available protocol, specify allow-all. If you want CloudFront to require HTTPS, specify https. If you want CloudFront to respond to an HTTP request with an HTTP status code of 301 (Moved Permanently) and the HTTPS URL, specify redirect-to-https. The viewer then resubmits the request using the HTTPS URL.", + "DefaultCacheBehavior$ViewerProtocolPolicy": "Use this element to specify the protocol that users can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. If you want CloudFront to allow end users to use any available protocol, specify allow-all. If you want CloudFront to require HTTPS, specify https. If you want CloudFront to respond to an HTTP request with an HTTP status code of 301 (Moved Permanently) and the HTTPS URL, specify redirect-to-https. The viewer then resubmits the request using the HTTPS URL." + } + }, + "boolean": { + "base": null, + "refs": { + "ActiveTrustedSigners$Enabled": "Each active trusted signer.", + "CacheBehavior$SmoothStreaming": "Indicates whether you want to distribute media files in Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "CacheBehavior$Compress": "Whether you want CloudFront to automatically compress content for web requests that include Accept-Encoding: gzip in the request header. If so, specify true; if not, specify false. CloudFront compresses files larger than 1000 bytes and less than 1 megabyte for both Amazon S3 and custom origins. When a CloudFront edge location is unusually busy, some files might not be compressed. The value of the Content-Type header must be on the list of file types that CloudFront will compress. For the current list, see Serving Compressed Content in the Amazon CloudFront Developer Guide. If you configure CloudFront to compress content, CloudFront removes the ETag response header from the objects that it compresses. The ETag header indicates that the version in a CloudFront edge cache is identical to the version on the origin server, but after compression the two versions are no longer identical. As a result, for compressed objects, CloudFront can't use the ETag header to determine whether an expired object in the CloudFront edge cache is still the latest version.", + "CloudFrontOriginAccessIdentityList$IsTruncated": "A flag that indicates whether more origin access identities remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more items in the list.", + "DefaultCacheBehavior$SmoothStreaming": "Indicates whether you want to distribute media files in Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "DefaultCacheBehavior$Compress": "Whether you want CloudFront to automatically compress content for web requests that include Accept-Encoding: gzip in the request header. If so, specify true; if not, specify false. CloudFront compresses files larger than 1000 bytes and less than 1 megabyte for both Amazon S3 and custom origins. When a CloudFront edge location is unusually busy, some files might not be compressed. The value of the Content-Type header must be on the list of file types that CloudFront will compress. For the current list, see Serving Compressed Content in the Amazon CloudFront Developer Guide. If you configure CloudFront to compress content, CloudFront removes the ETag response header from the objects that it compresses. The ETag header indicates that the version in a CloudFront edge cache is identical to the version on the origin server, but after compression the two versions are no longer identical. As a result, for compressed objects, CloudFront can't use the ETag header to determine whether an expired object in the CloudFront edge cache is still the latest version.", + "DistributionConfig$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "DistributionList$IsTruncated": "A flag that indicates whether more distributions remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more distributions in the list.", + "DistributionSummary$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "ForwardedValues$QueryString": "Indicates whether you want CloudFront to forward query strings to the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "InvalidationList$IsTruncated": "A flag that indicates whether more invalidation batch requests remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more invalidation batches in the list.", + "LoggingConfig$Enabled": "Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you do not want to enable logging when you create a distribution or if you want to disable logging for an existing distribution, specify false for Enabled, and specify empty Bucket and Prefix elements. If you specify false for Enabled but you specify values for Bucket, prefix and IncludeCookies, the values are automatically deleted.", + "LoggingConfig$IncludeCookies": "Specifies whether you want CloudFront to include cookies in access logs, specify true for IncludeCookies. If you choose to include cookies in logs, CloudFront logs all cookies regardless of how you configure the cache behaviors for this distribution. If you do not want to include cookies when you create a distribution or if you want to disable include cookies for an existing distribution, specify false for IncludeCookies.", + "StreamingDistributionConfig$Enabled": "Whether the streaming distribution is enabled to accept end user requests for content.", + "StreamingDistributionList$IsTruncated": "A flag that indicates whether more streaming distributions remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more distributions in the list.", + "StreamingDistributionSummary$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "StreamingLoggingConfig$Enabled": "Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you do not want to enable logging when you create a streaming distribution or if you want to disable logging for an existing streaming distribution, specify false for Enabled, and specify empty Bucket and Prefix elements. If you specify false for Enabled but you specify values for Bucket and Prefix, the values are automatically deleted.", + "TrustedSigners$Enabled": "Specifies whether you want to require end users to use signed URLs to access the files specified by PathPattern and TargetOriginId.", + "ViewerCertificate$CloudFrontDefaultCertificate": "Note: this field is deprecated. Please use \"cloudfront\" as CertificateSource and omit specifying a Certificate. If you want viewers to use HTTPS to request your objects and you're using the CloudFront domain name of your distribution in your object URLs (for example, https://d111111abcdef8.cloudfront.net/logo.jpg), set to true. Omit this value if you are setting an IAMCertificateId." + } + }, + "integer": { + "base": null, + "refs": { + "ActiveTrustedSigners$Quantity": "The number of unique trusted signers included in all cache behaviors. For example, if three cache behaviors all list the same three AWS accounts, the value of Quantity for ActiveTrustedSigners will be 3.", + "Aliases$Quantity": "The number of CNAMEs, if any, for this distribution.", + "AllowedMethods$Quantity": "The number of HTTP methods that you want CloudFront to forward to your origin. Valid values are 2 (for GET and HEAD requests), 3 (for GET, HEAD and OPTIONS requests) and 7 (for GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests).", + "CacheBehaviors$Quantity": "The number of cache behaviors for this distribution.", + "CachedMethods$Quantity": "The number of HTTP methods for which you want CloudFront to cache responses. Valid values are 2 (for caching responses to GET and HEAD requests) and 3 (for caching responses to GET, HEAD, and OPTIONS requests).", + "CloudFrontOriginAccessIdentityList$MaxItems": "The value you provided for the MaxItems request parameter.", + "CloudFrontOriginAccessIdentityList$Quantity": "The number of CloudFront origin access identities that were created by the current AWS account.", + "CookieNames$Quantity": "The number of whitelisted cookies for this cache behavior.", + "CustomErrorResponse$ErrorCode": "The 4xx or 5xx HTTP status code that you want to customize. For a list of HTTP status codes that you can customize, see CloudFront documentation.", + "CustomErrorResponses$Quantity": "The number of custom error responses for this distribution.", + "CustomOriginConfig$HTTPPort": "The HTTP port the custom origin listens on.", + "CustomOriginConfig$HTTPSPort": "The HTTPS port the custom origin listens on.", + "Distribution$InProgressInvalidationBatches": "The number of invalidation batches currently in progress.", + "DistributionList$MaxItems": "The value you provided for the MaxItems request parameter.", + "DistributionList$Quantity": "The number of distributions that were created by the current AWS account.", + "GeoRestriction$Quantity": "When geo restriction is enabled, this is the number of countries in your whitelist or blacklist. Otherwise, when it is not enabled, Quantity is 0, and you can omit Items.", + "Headers$Quantity": "The number of different headers that you want CloudFront to forward to the origin and to vary on for this cache behavior. The maximum number of headers that you can specify by name is 10. If you want CloudFront to forward all headers to the origin and vary on all of them, specify 1 for Quantity and * for Name. If you don't want CloudFront to forward any additional headers to the origin or to vary on any headers, specify 0 for Quantity and omit Items.", + "InvalidationList$MaxItems": "The value you provided for the MaxItems request parameter.", + "InvalidationList$Quantity": "The number of invalidation batches that were created by the current AWS account.", + "KeyPairIds$Quantity": "The number of active CloudFront key pairs for AwsAccountNumber.", + "Origins$Quantity": "The number of origins for this distribution.", + "Paths$Quantity": "The number of objects that you want to invalidate.", + "StreamingDistributionList$MaxItems": "The value you provided for the MaxItems request parameter.", + "StreamingDistributionList$Quantity": "The number of streaming distributions that were created by the current AWS account.", + "TrustedSigners$Quantity": "The number of trusted signers for this cache behavior." + } + }, + "long": { + "base": null, + "refs": { + "CacheBehavior$MinTTL": "The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated.You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CacheBehavior$DefaultTTL": "If you don't configure your origin to add a Cache-Control max-age directive or an Expires header, DefaultTTL is the default amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CacheBehavior$MaxTTL": "The maximum amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin adds HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CustomErrorResponse$ErrorCachingMinTTL": "The minimum amount of time you want HTTP error codes to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated. You can specify a value from 0 to 31,536,000.", + "DefaultCacheBehavior$MinTTL": "The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated.You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "DefaultCacheBehavior$DefaultTTL": "If you don't configure your origin to add a Cache-Control max-age directive or an Expires header, DefaultTTL is the default amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "DefaultCacheBehavior$MaxTTL": "The maximum amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin adds HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years)." + } + }, + "string": { + "base": null, + "refs": { + "AccessDenied$Message": null, + "AliasList$member": null, + "AwsAccountNumberList$member": null, + "BatchTooLarge$Message": null, + "CNAMEAlreadyExists$Message": null, + "CacheBehavior$PathPattern": "The pattern (for example, images/*.jpg) that specifies which requests you want this cache behavior to apply to. When CloudFront receives an end-user request, the requested path is compared with path patterns in the order in which cache behaviors are listed in the distribution. The path pattern for the default cache behavior is * and cannot be changed. If the request for an object does not match the path pattern for any cache behaviors, CloudFront applies the behavior in the default cache behavior.", + "CacheBehavior$TargetOriginId": "The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior.", + "CloudFrontOriginAccessIdentity$Id": "The ID for the origin access identity. For example: E74FTE3AJFJ256A.", + "CloudFrontOriginAccessIdentity$S3CanonicalUserId": "The Amazon S3 canonical user ID for the origin access identity, which you use when giving the origin access identity read permission to an object in Amazon S3.", + "CloudFrontOriginAccessIdentityAlreadyExists$Message": null, + "CloudFrontOriginAccessIdentityConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the CloudFrontOriginAccessIdentityConfig object), a new origin access identity is created. If the CallerReference is a value you already sent in a previous request to create an identity, and the content of the CloudFrontOriginAccessIdentityConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create an identity but the content of the CloudFrontOriginAccessIdentityConfig is different from the original request, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists error.", + "CloudFrontOriginAccessIdentityConfig$Comment": "Any comments you want to include about the origin access identity.", + "CloudFrontOriginAccessIdentityInUse$Message": null, + "CloudFrontOriginAccessIdentityList$Marker": "The value you provided for the Marker request parameter.", + "CloudFrontOriginAccessIdentityList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your origin access identities where they left off.", + "CloudFrontOriginAccessIdentitySummary$Id": "The ID for the origin access identity. For example: E74FTE3AJFJ256A.", + "CloudFrontOriginAccessIdentitySummary$S3CanonicalUserId": "The Amazon S3 canonical user ID for the origin access identity, which you use when giving the origin access identity read permission to an object in Amazon S3.", + "CloudFrontOriginAccessIdentitySummary$Comment": "The comment for this origin access identity, as originally specified when created.", + "CookieNameList$member": null, + "CreateCloudFrontOriginAccessIdentityResult$Location": "The fully qualified URI of the new origin access identity just created. For example: https://cloudfront.amazonaws.com/2010-11-01/origin-access-identity/cloudfront/E74FTE3AJFJ256A.", + "CreateCloudFrontOriginAccessIdentityResult$ETag": "The current version of the origin access identity created.", + "CreateDistributionResult$Location": "The fully qualified URI of the new distribution resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/distribution/EDFDVBD632BHDS5.", + "CreateDistributionResult$ETag": "The current version of the distribution created.", + "CreateInvalidationRequest$DistributionId": "The distribution's id.", + "CreateInvalidationResult$Location": "The fully qualified URI of the distribution and invalidation batch request, including the Invalidation ID.", + "CreateStreamingDistributionResult$Location": "The fully qualified URI of the new streaming distribution resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/streaming-distribution/EGTXBD79H29TRA8.", + "CreateStreamingDistributionResult$ETag": "The current version of the streaming distribution created.", + "CustomErrorResponse$ResponsePagePath": "The path of the custom error page (for example, /custom_404.html). The path is relative to the distribution and must begin with a slash (/). If the path includes any non-ASCII characters or unsafe characters as defined in RFC 1783 (http://www.ietf.org/rfc/rfc1738.txt), URL encode those characters. Do not URL encode any other characters in the path, or CloudFront will not return the custom error page to the viewer.", + "CustomErrorResponse$ResponseCode": "The HTTP status code that you want CloudFront to return with the custom error page to the viewer. For a list of HTTP status codes that you can replace, see CloudFront Documentation.", + "DefaultCacheBehavior$TargetOriginId": "The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior.", + "DeleteCloudFrontOriginAccessIdentityRequest$Id": "The origin access identity's id.", + "DeleteCloudFrontOriginAccessIdentityRequest$IfMatch": "The value of the ETag header you received from a previous GET or PUT request. For example: E2QWRUHAPOMQZL.", + "DeleteDistributionRequest$Id": "The distribution id.", + "DeleteDistributionRequest$IfMatch": "The value of the ETag header you received when you disabled the distribution. For example: E2QWRUHAPOMQZL.", + "DeleteStreamingDistributionRequest$Id": "The distribution id.", + "DeleteStreamingDistributionRequest$IfMatch": "The value of the ETag header you received when you disabled the streaming distribution. For example: E2QWRUHAPOMQZL.", + "Distribution$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "Distribution$Status": "This response element indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "Distribution$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "DistributionAlreadyExists$Message": null, + "DistributionConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the DistributionConfig object), a new distribution is created. If the CallerReference is a value you already sent in a previous request to create a distribution, and the content of the DistributionConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a distribution but the content of the DistributionConfig is different from the original request, CloudFront returns a DistributionAlreadyExists error.", + "DistributionConfig$DefaultRootObject": "The object that you want CloudFront to return (for example, index.html) when an end user requests the root URL for your distribution (http://www.example.com) instead of an object in your distribution (http://www.example.com/index.html). Specifying a default root object avoids exposing the contents of your distribution. If you don't want to specify a default root object when you create a distribution, include an empty DefaultRootObject element. To delete the default root object from an existing distribution, update the distribution configuration and include an empty DefaultRootObject element. To replace the default root object, update the distribution configuration and specify the new object.", + "DistributionConfig$Comment": "Any comments you want to include about the distribution.", + "DistributionConfig$WebACLId": "(Optional) If you're using AWS WAF to filter CloudFront requests, the Id of the AWS WAF web ACL that is associated with the distribution.", + "DistributionList$Marker": "The value you provided for the Marker request parameter.", + "DistributionList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your distributions where they left off.", + "DistributionNotDisabled$Message": null, + "DistributionSummary$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "DistributionSummary$Status": "This response element indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "DistributionSummary$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "DistributionSummary$Comment": "The comment originally specified when this distribution was created.", + "DistributionSummary$WebACLId": "The Web ACL Id (if any) associated with the distribution.", + "GetCloudFrontOriginAccessIdentityConfigRequest$Id": "The identity's id.", + "GetCloudFrontOriginAccessIdentityConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetCloudFrontOriginAccessIdentityRequest$Id": "The identity's id.", + "GetCloudFrontOriginAccessIdentityResult$ETag": "The current version of the origin access identity's information. For example: E2QWRUHAPOMQZL.", + "GetDistributionConfigRequest$Id": "The distribution's id.", + "GetDistributionConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetDistributionRequest$Id": "The distribution's id.", + "GetDistributionResult$ETag": "The current version of the distribution's information. For example: E2QWRUHAPOMQZL.", + "GetInvalidationRequest$DistributionId": "The distribution's id.", + "GetInvalidationRequest$Id": "The invalidation's id.", + "GetStreamingDistributionConfigRequest$Id": "The streaming distribution's id.", + "GetStreamingDistributionConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetStreamingDistributionRequest$Id": "The streaming distribution's id.", + "GetStreamingDistributionResult$ETag": "The current version of the streaming distribution's information. For example: E2QWRUHAPOMQZL.", + "HeaderList$member": null, + "IllegalUpdate$Message": null, + "InconsistentQuantities$Message": null, + "InvalidArgument$Message": null, + "InvalidDefaultRootObject$Message": null, + "InvalidErrorCode$Message": null, + "InvalidForwardCookies$Message": null, + "InvalidGeoRestrictionParameter$Message": null, + "InvalidHeadersForS3Origin$Message": null, + "InvalidIfMatchVersion$Message": null, + "InvalidLocationCode$Message": null, + "InvalidMinimumProtocolVersion$Message": null, + "InvalidOrigin$Message": null, + "InvalidOriginAccessIdentity$Message": null, + "InvalidProtocolSettings$Message": null, + "InvalidRelativePath$Message": null, + "InvalidRequiredProtocol$Message": null, + "InvalidResponseCode$Message": null, + "InvalidTTLOrder$Message": null, + "InvalidViewerCertificate$Message": null, + "InvalidWebACLId$Message": null, + "Invalidation$Id": "The identifier for the invalidation request. For example: IDFDVBD632BHDS5.", + "Invalidation$Status": "The status of the invalidation request. When the invalidation batch is finished, the status is Completed.", + "InvalidationBatch$CallerReference": "A unique name that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the Path object), a new distribution is created. If the CallerReference is a value you already sent in a previous request to create an invalidation batch, and the content of each Path element is identical to the original request, the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a distribution but the content of any Path is different from the original request, CloudFront returns an InvalidationBatchAlreadyExists error.", + "InvalidationList$Marker": "The value you provided for the Marker request parameter.", + "InvalidationList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your invalidation batches where they left off.", + "InvalidationSummary$Id": "The unique ID for an invalidation request.", + "InvalidationSummary$Status": "The status of an invalidation request.", + "KeyPairIdList$member": null, + "ListCloudFrontOriginAccessIdentitiesRequest$Marker": "Use this when paginating results to indicate where to begin in your list of origin access identities. The results include identities in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last identity on that page).", + "ListCloudFrontOriginAccessIdentitiesRequest$MaxItems": "The maximum number of origin access identities you want in the response body.", + "ListDistributionsByWebACLIdRequest$Marker": "Use Marker and MaxItems to control pagination of results. If you have more than MaxItems distributions that satisfy the request, the response includes a NextMarker element. To get the next page of results, submit another request. For the value of Marker, specify the value of NextMarker from the last response. (For the first request, omit Marker.)", + "ListDistributionsByWebACLIdRequest$MaxItems": "The maximum number of distributions that you want CloudFront to return in the response body. The maximum and default values are both 100.", + "ListDistributionsByWebACLIdRequest$WebACLId": "The Id of the AWS WAF web ACL for which you want to list the associated distributions. If you specify \"null\" for the Id, the request returns a list of the distributions that aren't associated with a web ACL.", + "ListDistributionsRequest$Marker": "Use Marker and MaxItems to control pagination of results. If you have more than MaxItems distributions that satisfy the request, the response includes a NextMarker element. To get the next page of results, submit another request. For the value of Marker, specify the value of NextMarker from the last response. (For the first request, omit Marker.)", + "ListDistributionsRequest$MaxItems": "The maximum number of distributions that you want CloudFront to return in the response body. The maximum and default values are both 100.", + "ListInvalidationsRequest$DistributionId": "The distribution's id.", + "ListInvalidationsRequest$Marker": "Use this parameter when paginating results to indicate where to begin in your list of invalidation batches. Because the results are returned in decreasing order from most recent to oldest, the most recent results are on the first page, the second page will contain earlier results, and so on. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response. This value is the same as the ID of the last invalidation batch on that page.", + "ListInvalidationsRequest$MaxItems": "The maximum number of invalidation batches you want in the response body.", + "ListStreamingDistributionsRequest$Marker": "Use this when paginating results to indicate where to begin in your list of streaming distributions. The results include distributions in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last distribution on that page).", + "ListStreamingDistributionsRequest$MaxItems": "The maximum number of streaming distributions you want in the response body.", + "LocationList$member": null, + "LoggingConfig$Bucket": "The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com.", + "LoggingConfig$Prefix": "An optional string that you want CloudFront to prefix to the access log filenames for this distribution, for example, myprefix/. If you want to enable logging, but you do not want to specify a prefix, you still must include an empty Prefix element in the Logging element.", + "MissingBody$Message": null, + "NoSuchCloudFrontOriginAccessIdentity$Message": null, + "NoSuchDistribution$Message": null, + "NoSuchInvalidation$Message": null, + "NoSuchOrigin$Message": null, + "NoSuchStreamingDistribution$Message": null, + "Origin$Id": "A unique identifier for the origin. The value of Id must be unique within the distribution. You use the value of Id when you create a cache behavior. The Id identifies the origin that CloudFront routes a request to when the request matches the path pattern for that cache behavior.", + "Origin$DomainName": "Amazon S3 origins: The DNS name of the Amazon S3 bucket from which you want CloudFront to get objects for this origin, for example, myawsbucket.s3.amazonaws.com. Custom origins: The DNS domain name for the HTTP server from which you want CloudFront to get objects for this origin, for example, www.example.com.", + "Origin$OriginPath": "An optional element that causes CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin. When you include the OriginPath element, specify the directory name, beginning with a /. CloudFront appends the directory name to the value of DomainName.", + "PathList$member": null, + "PreconditionFailed$Message": null, + "S3Origin$DomainName": "The DNS name of the S3 origin.", + "S3Origin$OriginAccessIdentity": "Your S3 origin's origin access identity.", + "S3OriginConfig$OriginAccessIdentity": "The CloudFront origin access identity to associate with the origin. Use an origin access identity to configure the origin so that end users can only access objects in an Amazon S3 bucket through CloudFront. If you want end users to be able to access objects using either the CloudFront URL or the Amazon S3 URL, specify an empty OriginAccessIdentity element. To delete the origin access identity from an existing distribution, update the distribution configuration and include an empty OriginAccessIdentity element. To replace the origin access identity, update the distribution configuration and specify the new origin access identity. Use the format origin-access-identity/cloudfront/Id where Id is the value that CloudFront returned in the Id element when you created the origin access identity.", + "Signer$AwsAccountNumber": "Specifies an AWS account that can create signed URLs. Values: self, which indicates that the AWS account that was used to create the distribution can created signed URLs, or an AWS account number. Omit the dashes in the account number.", + "StreamingDistribution$Id": "The identifier for the streaming distribution. For example: EGTXBD79H29TRA8.", + "StreamingDistribution$Status": "The current status of the streaming distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "StreamingDistribution$DomainName": "The domain name corresponding to the streaming distribution. For example: s5c39gqb8ow64r.cloudfront.net.", + "StreamingDistributionAlreadyExists$Message": null, + "StreamingDistributionConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the StreamingDistributionConfig object), a new streaming distribution is created. If the CallerReference is a value you already sent in a previous request to create a streaming distribution, and the content of the StreamingDistributionConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a streaming distribution but the content of the StreamingDistributionConfig is different from the original request, CloudFront returns a DistributionAlreadyExists error.", + "StreamingDistributionConfig$Comment": "Any comments you want to include about the streaming distribution.", + "StreamingDistributionList$Marker": "The value you provided for the Marker request parameter.", + "StreamingDistributionList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your streaming distributions where they left off.", + "StreamingDistributionNotDisabled$Message": null, + "StreamingDistributionSummary$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "StreamingDistributionSummary$Status": "Indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "StreamingDistributionSummary$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "StreamingDistributionSummary$Comment": "The comment originally specified when this distribution was created.", + "StreamingLoggingConfig$Bucket": "The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com.", + "StreamingLoggingConfig$Prefix": "An optional string that you want CloudFront to prefix to the access log filenames for this streaming distribution, for example, myprefix/. If you want to enable logging, but you do not want to specify a prefix, you still must include an empty Prefix element in the Logging element.", + "TooManyCacheBehaviors$Message": null, + "TooManyCertificates$Message": null, + "TooManyCloudFrontOriginAccessIdentities$Message": null, + "TooManyCookieNamesInWhiteList$Message": null, + "TooManyDistributionCNAMEs$Message": null, + "TooManyDistributions$Message": null, + "TooManyHeadersInForwardedValues$Message": null, + "TooManyInvalidationsInProgress$Message": null, + "TooManyOrigins$Message": null, + "TooManyStreamingDistributionCNAMEs$Message": null, + "TooManyStreamingDistributions$Message": null, + "TooManyTrustedSigners$Message": null, + "TrustedSignerDoesNotExist$Message": null, + "UpdateCloudFrontOriginAccessIdentityRequest$Id": "The identity's id.", + "UpdateCloudFrontOriginAccessIdentityRequest$IfMatch": "The value of the ETag header you received when retrieving the identity's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateCloudFrontOriginAccessIdentityResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "UpdateDistributionRequest$Id": "The distribution's id.", + "UpdateDistributionRequest$IfMatch": "The value of the ETag header you received when retrieving the distribution's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateDistributionResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "UpdateStreamingDistributionRequest$Id": "The streaming distribution's id.", + "UpdateStreamingDistributionRequest$IfMatch": "The value of the ETag header you received when retrieving the streaming distribution's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateStreamingDistributionResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "ViewerCertificate$Certificate": "If you want viewers to use HTTPS to request your objects and you're using an alternate domain name in your object URLs (for example, https://example.com/logo.jpg), set to the IAM certificate identifier of the custom viewer certificate for this distribution.", + "ViewerCertificate$IAMCertificateId": "Note: this field is deprecated. Please use \"iam\" as CertificateSource and specify the IAM certificate Id as the Certificate. If you want viewers to use HTTPS to request your objects and you're using an alternate domain name in your object URLs (for example, https://example.com/logo.jpg), specify the IAM certificate identifier of the custom viewer certificate for this distribution. Specify either this value or CloudFrontDefaultCertificate." + } + }, + "timestamp": { + "base": null, + "refs": { + "Distribution$LastModifiedTime": "The date and time the distribution was last modified.", + "DistributionSummary$LastModifiedTime": "The date and time the distribution was last modified.", + "Invalidation$CreateTime": "The date and time the invalidation request was first made.", + "InvalidationSummary$CreateTime": null, + "StreamingDistribution$LastModifiedTime": "The date and time the distribution was last modified.", + "StreamingDistributionSummary$LastModifiedTime": "The date and time the distribution was last modified." + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,32 @@ +{ + "pagination": { + "ListCloudFrontOriginAccessIdentities": { + "input_token": "Marker", + "output_token": "CloudFrontOriginAccessIdentityList.NextMarker", + "limit_key": "MaxItems", + "more_results": "CloudFrontOriginAccessIdentityList.IsTruncated", + "result_key": "CloudFrontOriginAccessIdentityList.Items" + }, + "ListDistributions": { + "input_token": "Marker", + "output_token": "DistributionList.NextMarker", + "limit_key": "MaxItems", + "more_results": "DistributionList.IsTruncated", + "result_key": "DistributionList.Items" + }, + "ListInvalidations": { + "input_token": "Marker", + "output_token": "InvalidationList.NextMarker", + "limit_key": "MaxItems", + "more_results": "InvalidationList.IsTruncated", + "result_key": "InvalidationList.Items" + }, + "ListStreamingDistributions": { + "input_token": "Marker", + "output_token": "StreamingDistributionList.NextMarker", + "limit_key": "MaxItems", + "more_results": "StreamingDistributionList.IsTruncated", + "result_key": "StreamingDistributionList.Items" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/waiters-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/waiters-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/waiters-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2015-09-17/waiters-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,47 @@ +{ + "version": 2, + "waiters": { + "DistributionDeployed": { + "delay": 60, + "operation": "GetDistribution", + "maxAttempts": 25, + "description": "Wait until a distribution is deployed.", + "acceptors": [ + { + "expected": "Deployed", + "matcher": "path", + "state": "success", + "argument": "Status" + } + ] + }, + "InvalidationCompleted": { + "delay": 20, + "operation": "GetInvalidation", + "maxAttempts": 30, + "description": "Wait until an invalidation has completed.", + "acceptors": [ + { + "expected": "Completed", + "matcher": "path", + "state": "success", + "argument": "Status" + } + ] + }, + "StreamingDistributionDeployed": { + "delay": 60, + "operation": "GetStreamingDistribution", + "maxAttempts": 25, + "description": "Wait until a streaming distribution is deployed.", + "acceptors": [ + { + "expected": "Deployed", + "matcher": "path", + "state": "success", + "argument": "Status" + } + ] + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2216 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2016-01-13", + "endpointPrefix":"cloudfront", + "globalEndpoint":"cloudfront.amazonaws.com", + "protocol":"rest-xml", + "serviceAbbreviation":"CloudFront", + "serviceFullName":"Amazon CloudFront", + "signatureVersion":"v4" + }, + "operations":{ + "CreateCloudFrontOriginAccessIdentity":{ + "name":"CreateCloudFrontOriginAccessIdentity2016_01_13", + "http":{ + "method":"POST", + "requestUri":"/2016-01-13/origin-access-identity/cloudfront", + "responseCode":201 + }, + "input":{"shape":"CreateCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"CreateCloudFrontOriginAccessIdentityResult"}, + "errors":[ + {"shape":"CloudFrontOriginAccessIdentityAlreadyExists"}, + {"shape":"MissingBody"}, + {"shape":"TooManyCloudFrontOriginAccessIdentities"}, + {"shape":"InvalidArgument"}, + {"shape":"InconsistentQuantities"} + ] + }, + "CreateDistribution":{ + "name":"CreateDistribution2016_01_13", + "http":{ + "method":"POST", + "requestUri":"/2016-01-13/distribution", + "responseCode":201 + }, + "input":{"shape":"CreateDistributionRequest"}, + "output":{"shape":"CreateDistributionResult"}, + "errors":[ + {"shape":"CNAMEAlreadyExists"}, + {"shape":"DistributionAlreadyExists"}, + {"shape":"InvalidOrigin"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"AccessDenied"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"InvalidViewerCertificate"}, + {"shape":"InvalidMinimumProtocolVersion"}, + {"shape":"MissingBody"}, + {"shape":"TooManyDistributionCNAMEs"}, + {"shape":"TooManyDistributions"}, + {"shape":"InvalidDefaultRootObject"}, + {"shape":"InvalidRelativePath"}, + {"shape":"InvalidErrorCode"}, + {"shape":"InvalidResponseCode"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidRequiredProtocol"}, + {"shape":"NoSuchOrigin"}, + {"shape":"TooManyOrigins"}, + {"shape":"TooManyCacheBehaviors"}, + {"shape":"TooManyCookieNamesInWhiteList"}, + {"shape":"InvalidForwardCookies"}, + {"shape":"TooManyHeadersInForwardedValues"}, + {"shape":"InvalidHeadersForS3Origin"}, + {"shape":"InconsistentQuantities"}, + {"shape":"TooManyCertificates"}, + {"shape":"InvalidLocationCode"}, + {"shape":"InvalidGeoRestrictionParameter"}, + {"shape":"InvalidProtocolSettings"}, + {"shape":"InvalidTTLOrder"}, + {"shape":"InvalidWebACLId"}, + {"shape":"TooManyOriginCustomHeaders"} + ] + }, + "CreateInvalidation":{ + "name":"CreateInvalidation2016_01_13", + "http":{ + "method":"POST", + "requestUri":"/2016-01-13/distribution/{DistributionId}/invalidation", + "responseCode":201 + }, + "input":{"shape":"CreateInvalidationRequest"}, + "output":{"shape":"CreateInvalidationResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"MissingBody"}, + {"shape":"InvalidArgument"}, + {"shape":"NoSuchDistribution"}, + {"shape":"BatchTooLarge"}, + {"shape":"TooManyInvalidationsInProgress"}, + {"shape":"InconsistentQuantities"} + ] + }, + "CreateStreamingDistribution":{ + "name":"CreateStreamingDistribution2016_01_13", + "http":{ + "method":"POST", + "requestUri":"/2016-01-13/streaming-distribution", + "responseCode":201 + }, + "input":{"shape":"CreateStreamingDistributionRequest"}, + "output":{"shape":"CreateStreamingDistributionResult"}, + "errors":[ + {"shape":"CNAMEAlreadyExists"}, + {"shape":"StreamingDistributionAlreadyExists"}, + {"shape":"InvalidOrigin"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"AccessDenied"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"MissingBody"}, + {"shape":"TooManyStreamingDistributionCNAMEs"}, + {"shape":"TooManyStreamingDistributions"}, + {"shape":"InvalidArgument"}, + {"shape":"InconsistentQuantities"} + ] + }, + "DeleteCloudFrontOriginAccessIdentity":{ + "name":"DeleteCloudFrontOriginAccessIdentity2016_01_13", + "http":{ + "method":"DELETE", + "requestUri":"/2016-01-13/origin-access-identity/cloudfront/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteCloudFrontOriginAccessIdentityRequest"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"NoSuchCloudFrontOriginAccessIdentity"}, + {"shape":"PreconditionFailed"}, + {"shape":"CloudFrontOriginAccessIdentityInUse"} + ] + }, + "DeleteDistribution":{ + "name":"DeleteDistribution2016_01_13", + "http":{ + "method":"DELETE", + "requestUri":"/2016-01-13/distribution/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteDistributionRequest"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"DistributionNotDisabled"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"NoSuchDistribution"}, + {"shape":"PreconditionFailed"} + ] + }, + "DeleteStreamingDistribution":{ + "name":"DeleteStreamingDistribution2016_01_13", + "http":{ + "method":"DELETE", + "requestUri":"/2016-01-13/streaming-distribution/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteStreamingDistributionRequest"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"StreamingDistributionNotDisabled"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"NoSuchStreamingDistribution"}, + {"shape":"PreconditionFailed"} + ] + }, + "GetCloudFrontOriginAccessIdentity":{ + "name":"GetCloudFrontOriginAccessIdentity2016_01_13", + "http":{ + "method":"GET", + "requestUri":"/2016-01-13/origin-access-identity/cloudfront/{Id}" + }, + "input":{"shape":"GetCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"GetCloudFrontOriginAccessIdentityResult"}, + "errors":[ + {"shape":"NoSuchCloudFrontOriginAccessIdentity"}, + {"shape":"AccessDenied"} + ] + }, + "GetCloudFrontOriginAccessIdentityConfig":{ + "name":"GetCloudFrontOriginAccessIdentityConfig2016_01_13", + "http":{ + "method":"GET", + "requestUri":"/2016-01-13/origin-access-identity/cloudfront/{Id}/config" + }, + "input":{"shape":"GetCloudFrontOriginAccessIdentityConfigRequest"}, + "output":{"shape":"GetCloudFrontOriginAccessIdentityConfigResult"}, + "errors":[ + {"shape":"NoSuchCloudFrontOriginAccessIdentity"}, + {"shape":"AccessDenied"} + ] + }, + "GetDistribution":{ + "name":"GetDistribution2016_01_13", + "http":{ + "method":"GET", + "requestUri":"/2016-01-13/distribution/{Id}" + }, + "input":{"shape":"GetDistributionRequest"}, + "output":{"shape":"GetDistributionResult"}, + "errors":[ + {"shape":"NoSuchDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "GetDistributionConfig":{ + "name":"GetDistributionConfig2016_01_13", + "http":{ + "method":"GET", + "requestUri":"/2016-01-13/distribution/{Id}/config" + }, + "input":{"shape":"GetDistributionConfigRequest"}, + "output":{"shape":"GetDistributionConfigResult"}, + "errors":[ + {"shape":"NoSuchDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "GetInvalidation":{ + "name":"GetInvalidation2016_01_13", + "http":{ + "method":"GET", + "requestUri":"/2016-01-13/distribution/{DistributionId}/invalidation/{Id}" + }, + "input":{"shape":"GetInvalidationRequest"}, + "output":{"shape":"GetInvalidationResult"}, + "errors":[ + {"shape":"NoSuchInvalidation"}, + {"shape":"NoSuchDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "GetStreamingDistribution":{ + "name":"GetStreamingDistribution2016_01_13", + "http":{ + "method":"GET", + "requestUri":"/2016-01-13/streaming-distribution/{Id}" + }, + "input":{"shape":"GetStreamingDistributionRequest"}, + "output":{"shape":"GetStreamingDistributionResult"}, + "errors":[ + {"shape":"NoSuchStreamingDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "GetStreamingDistributionConfig":{ + "name":"GetStreamingDistributionConfig2016_01_13", + "http":{ + "method":"GET", + "requestUri":"/2016-01-13/streaming-distribution/{Id}/config" + }, + "input":{"shape":"GetStreamingDistributionConfigRequest"}, + "output":{"shape":"GetStreamingDistributionConfigResult"}, + "errors":[ + {"shape":"NoSuchStreamingDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "ListCloudFrontOriginAccessIdentities":{ + "name":"ListCloudFrontOriginAccessIdentities2016_01_13", + "http":{ + "method":"GET", + "requestUri":"/2016-01-13/origin-access-identity/cloudfront" + }, + "input":{"shape":"ListCloudFrontOriginAccessIdentitiesRequest"}, + "output":{"shape":"ListCloudFrontOriginAccessIdentitiesResult"}, + "errors":[ + {"shape":"InvalidArgument"} + ] + }, + "ListDistributions":{ + "name":"ListDistributions2016_01_13", + "http":{ + "method":"GET", + "requestUri":"/2016-01-13/distribution" + }, + "input":{"shape":"ListDistributionsRequest"}, + "output":{"shape":"ListDistributionsResult"}, + "errors":[ + {"shape":"InvalidArgument"} + ] + }, + "ListDistributionsByWebACLId":{ + "name":"ListDistributionsByWebACLId2016_01_13", + "http":{ + "method":"GET", + "requestUri":"/2016-01-13/distributionsByWebACLId/{WebACLId}" + }, + "input":{"shape":"ListDistributionsByWebACLIdRequest"}, + "output":{"shape":"ListDistributionsByWebACLIdResult"}, + "errors":[ + {"shape":"InvalidArgument"}, + {"shape":"InvalidWebACLId"} + ] + }, + "ListInvalidations":{ + "name":"ListInvalidations2016_01_13", + "http":{ + "method":"GET", + "requestUri":"/2016-01-13/distribution/{DistributionId}/invalidation" + }, + "input":{"shape":"ListInvalidationsRequest"}, + "output":{"shape":"ListInvalidationsResult"}, + "errors":[ + {"shape":"InvalidArgument"}, + {"shape":"NoSuchDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "ListStreamingDistributions":{ + "name":"ListStreamingDistributions2016_01_13", + "http":{ + "method":"GET", + "requestUri":"/2016-01-13/streaming-distribution" + }, + "input":{"shape":"ListStreamingDistributionsRequest"}, + "output":{"shape":"ListStreamingDistributionsResult"}, + "errors":[ + {"shape":"InvalidArgument"} + ] + }, + "UpdateCloudFrontOriginAccessIdentity":{ + "name":"UpdateCloudFrontOriginAccessIdentity2016_01_13", + "http":{ + "method":"PUT", + "requestUri":"/2016-01-13/origin-access-identity/cloudfront/{Id}/config" + }, + "input":{"shape":"UpdateCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"UpdateCloudFrontOriginAccessIdentityResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"IllegalUpdate"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"MissingBody"}, + {"shape":"NoSuchCloudFrontOriginAccessIdentity"}, + {"shape":"PreconditionFailed"}, + {"shape":"InvalidArgument"}, + {"shape":"InconsistentQuantities"} + ] + }, + "UpdateDistribution":{ + "name":"UpdateDistribution2016_01_13", + "http":{ + "method":"PUT", + "requestUri":"/2016-01-13/distribution/{Id}/config" + }, + "input":{"shape":"UpdateDistributionRequest"}, + "output":{"shape":"UpdateDistributionResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"CNAMEAlreadyExists"}, + {"shape":"IllegalUpdate"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"MissingBody"}, + {"shape":"NoSuchDistribution"}, + {"shape":"PreconditionFailed"}, + {"shape":"TooManyDistributionCNAMEs"}, + {"shape":"InvalidDefaultRootObject"}, + {"shape":"InvalidRelativePath"}, + {"shape":"InvalidErrorCode"}, + {"shape":"InvalidResponseCode"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"InvalidViewerCertificate"}, + {"shape":"InvalidMinimumProtocolVersion"}, + {"shape":"InvalidRequiredProtocol"}, + {"shape":"NoSuchOrigin"}, + {"shape":"TooManyOrigins"}, + {"shape":"TooManyCacheBehaviors"}, + {"shape":"TooManyCookieNamesInWhiteList"}, + {"shape":"InvalidForwardCookies"}, + {"shape":"TooManyHeadersInForwardedValues"}, + {"shape":"InvalidHeadersForS3Origin"}, + {"shape":"InconsistentQuantities"}, + {"shape":"TooManyCertificates"}, + {"shape":"InvalidLocationCode"}, + {"shape":"InvalidGeoRestrictionParameter"}, + {"shape":"InvalidTTLOrder"}, + {"shape":"InvalidWebACLId"}, + {"shape":"TooManyOriginCustomHeaders"} + ] + }, + "UpdateStreamingDistribution":{ + "name":"UpdateStreamingDistribution2016_01_13", + "http":{ + "method":"PUT", + "requestUri":"/2016-01-13/streaming-distribution/{Id}/config" + }, + "input":{"shape":"UpdateStreamingDistributionRequest"}, + "output":{"shape":"UpdateStreamingDistributionResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"CNAMEAlreadyExists"}, + {"shape":"IllegalUpdate"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"MissingBody"}, + {"shape":"NoSuchStreamingDistribution"}, + {"shape":"PreconditionFailed"}, + {"shape":"TooManyStreamingDistributionCNAMEs"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"InconsistentQuantities"} + ] + } + }, + "shapes":{ + "AccessDenied":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":403}, + "exception":true + }, + "ActiveTrustedSigners":{ + "type":"structure", + "required":[ + "Enabled", + "Quantity" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"SignerList"} + } + }, + "AliasList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"CNAME" + } + }, + "Aliases":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"AliasList"} + } + }, + "AllowedMethods":{ + "type":"structure", + "required":[ + "Quantity", + "Items" + ], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"MethodsList"}, + "CachedMethods":{"shape":"CachedMethods"} + } + }, + "AwsAccountNumberList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"AwsAccountNumber" + } + }, + "BatchTooLarge":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":413}, + "exception":true + }, + "CNAMEAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CacheBehavior":{ + "type":"structure", + "required":[ + "PathPattern", + "TargetOriginId", + "ForwardedValues", + "TrustedSigners", + "ViewerProtocolPolicy", + "MinTTL" + ], + "members":{ + "PathPattern":{"shape":"string"}, + "TargetOriginId":{"shape":"string"}, + "ForwardedValues":{"shape":"ForwardedValues"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "ViewerProtocolPolicy":{"shape":"ViewerProtocolPolicy"}, + "MinTTL":{"shape":"long"}, + "AllowedMethods":{"shape":"AllowedMethods"}, + "SmoothStreaming":{"shape":"boolean"}, + "DefaultTTL":{"shape":"long"}, + "MaxTTL":{"shape":"long"}, + "Compress":{"shape":"boolean"} + } + }, + "CacheBehaviorList":{ + "type":"list", + "member":{ + "shape":"CacheBehavior", + "locationName":"CacheBehavior" + } + }, + "CacheBehaviors":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CacheBehaviorList"} + } + }, + "CachedMethods":{ + "type":"structure", + "required":[ + "Quantity", + "Items" + ], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"MethodsList"} + } + }, + "CertificateSource":{ + "type":"string", + "enum":[ + "cloudfront", + "iam" + ] + }, + "CloudFrontOriginAccessIdentity":{ + "type":"structure", + "required":[ + "Id", + "S3CanonicalUserId" + ], + "members":{ + "Id":{"shape":"string"}, + "S3CanonicalUserId":{"shape":"string"}, + "CloudFrontOriginAccessIdentityConfig":{"shape":"CloudFrontOriginAccessIdentityConfig"} + } + }, + "CloudFrontOriginAccessIdentityAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CloudFrontOriginAccessIdentityConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "Comment" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "Comment":{"shape":"string"} + } + }, + "CloudFrontOriginAccessIdentityInUse":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CloudFrontOriginAccessIdentityList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CloudFrontOriginAccessIdentitySummaryList"} + } + }, + "CloudFrontOriginAccessIdentitySummary":{ + "type":"structure", + "required":[ + "Id", + "S3CanonicalUserId", + "Comment" + ], + "members":{ + "Id":{"shape":"string"}, + "S3CanonicalUserId":{"shape":"string"}, + "Comment":{"shape":"string"} + } + }, + "CloudFrontOriginAccessIdentitySummaryList":{ + "type":"list", + "member":{ + "shape":"CloudFrontOriginAccessIdentitySummary", + "locationName":"CloudFrontOriginAccessIdentitySummary" + } + }, + "CookieNameList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Name" + } + }, + "CookieNames":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CookieNameList"} + } + }, + "CookiePreference":{ + "type":"structure", + "required":["Forward"], + "members":{ + "Forward":{"shape":"ItemSelection"}, + "WhitelistedNames":{"shape":"CookieNames"} + } + }, + "CreateCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":["CloudFrontOriginAccessIdentityConfig"], + "members":{ + "CloudFrontOriginAccessIdentityConfig":{ + "shape":"CloudFrontOriginAccessIdentityConfig", + "locationName":"CloudFrontOriginAccessIdentityConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2016-01-13/"} + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "CreateCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "CreateDistributionRequest":{ + "type":"structure", + "required":["DistributionConfig"], + "members":{ + "DistributionConfig":{ + "shape":"DistributionConfig", + "locationName":"DistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2016-01-13/"} + } + }, + "payload":"DistributionConfig" + }, + "CreateDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "CreateInvalidationRequest":{ + "type":"structure", + "required":[ + "DistributionId", + "InvalidationBatch" + ], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "InvalidationBatch":{ + "shape":"InvalidationBatch", + "locationName":"InvalidationBatch", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2016-01-13/"} + } + }, + "payload":"InvalidationBatch" + }, + "CreateInvalidationResult":{ + "type":"structure", + "members":{ + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "Invalidation":{"shape":"Invalidation"} + }, + "payload":"Invalidation" + }, + "CreateStreamingDistributionRequest":{ + "type":"structure", + "required":["StreamingDistributionConfig"], + "members":{ + "StreamingDistributionConfig":{ + "shape":"StreamingDistributionConfig", + "locationName":"StreamingDistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2016-01-13/"} + } + }, + "payload":"StreamingDistributionConfig" + }, + "CreateStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "CustomErrorResponse":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"integer"}, + "ResponsePagePath":{"shape":"string"}, + "ResponseCode":{"shape":"string"}, + "ErrorCachingMinTTL":{"shape":"long"} + } + }, + "CustomErrorResponseList":{ + "type":"list", + "member":{ + "shape":"CustomErrorResponse", + "locationName":"CustomErrorResponse" + } + }, + "CustomErrorResponses":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CustomErrorResponseList"} + } + }, + "CustomHeaders":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"OriginCustomHeadersList"} + } + }, + "CustomOriginConfig":{ + "type":"structure", + "required":[ + "HTTPPort", + "HTTPSPort", + "OriginProtocolPolicy" + ], + "members":{ + "HTTPPort":{"shape":"integer"}, + "HTTPSPort":{"shape":"integer"}, + "OriginProtocolPolicy":{"shape":"OriginProtocolPolicy"}, + "OriginSslProtocols":{"shape":"OriginSslProtocols"} + } + }, + "DefaultCacheBehavior":{ + "type":"structure", + "required":[ + "TargetOriginId", + "ForwardedValues", + "TrustedSigners", + "ViewerProtocolPolicy", + "MinTTL" + ], + "members":{ + "TargetOriginId":{"shape":"string"}, + "ForwardedValues":{"shape":"ForwardedValues"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "ViewerProtocolPolicy":{"shape":"ViewerProtocolPolicy"}, + "MinTTL":{"shape":"long"}, + "AllowedMethods":{"shape":"AllowedMethods"}, + "SmoothStreaming":{"shape":"boolean"}, + "DefaultTTL":{"shape":"long"}, + "MaxTTL":{"shape":"long"}, + "Compress":{"shape":"boolean"} + } + }, + "DeleteCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + } + }, + "DeleteDistributionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + } + }, + "DeleteStreamingDistributionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + } + }, + "Distribution":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "InProgressInvalidationBatches", + "DomainName", + "ActiveTrustedSigners", + "DistributionConfig" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "InProgressInvalidationBatches":{"shape":"integer"}, + "DomainName":{"shape":"string"}, + "ActiveTrustedSigners":{"shape":"ActiveTrustedSigners"}, + "DistributionConfig":{"shape":"DistributionConfig"} + } + }, + "DistributionAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "DistributionConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "Origins", + "DefaultCacheBehavior", + "Comment", + "Enabled" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "Aliases":{"shape":"Aliases"}, + "DefaultRootObject":{"shape":"string"}, + "Origins":{"shape":"Origins"}, + "DefaultCacheBehavior":{"shape":"DefaultCacheBehavior"}, + "CacheBehaviors":{"shape":"CacheBehaviors"}, + "CustomErrorResponses":{"shape":"CustomErrorResponses"}, + "Comment":{"shape":"string"}, + "Logging":{"shape":"LoggingConfig"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"}, + "ViewerCertificate":{"shape":"ViewerCertificate"}, + "Restrictions":{"shape":"Restrictions"}, + "WebACLId":{"shape":"string"} + } + }, + "DistributionList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"DistributionSummaryList"} + } + }, + "DistributionNotDisabled":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "DistributionSummary":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "DomainName", + "Aliases", + "Origins", + "DefaultCacheBehavior", + "CacheBehaviors", + "CustomErrorResponses", + "Comment", + "PriceClass", + "Enabled", + "ViewerCertificate", + "Restrictions", + "WebACLId" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "Aliases":{"shape":"Aliases"}, + "Origins":{"shape":"Origins"}, + "DefaultCacheBehavior":{"shape":"DefaultCacheBehavior"}, + "CacheBehaviors":{"shape":"CacheBehaviors"}, + "CustomErrorResponses":{"shape":"CustomErrorResponses"}, + "Comment":{"shape":"string"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"}, + "ViewerCertificate":{"shape":"ViewerCertificate"}, + "Restrictions":{"shape":"Restrictions"}, + "WebACLId":{"shape":"string"} + } + }, + "DistributionSummaryList":{ + "type":"list", + "member":{ + "shape":"DistributionSummary", + "locationName":"DistributionSummary" + } + }, + "ForwardedValues":{ + "type":"structure", + "required":[ + "QueryString", + "Cookies" + ], + "members":{ + "QueryString":{"shape":"boolean"}, + "Cookies":{"shape":"CookiePreference"}, + "Headers":{"shape":"Headers"} + } + }, + "GeoRestriction":{ + "type":"structure", + "required":[ + "RestrictionType", + "Quantity" + ], + "members":{ + "RestrictionType":{"shape":"GeoRestrictionType"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"LocationList"} + } + }, + "GeoRestrictionType":{ + "type":"string", + "enum":[ + "blacklist", + "whitelist", + "none" + ] + }, + "GetCloudFrontOriginAccessIdentityConfigRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetCloudFrontOriginAccessIdentityConfigResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentityConfig":{"shape":"CloudFrontOriginAccessIdentityConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "GetCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "GetDistributionConfigRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetDistributionConfigResult":{ + "type":"structure", + "members":{ + "DistributionConfig":{"shape":"DistributionConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"DistributionConfig" + }, + "GetDistributionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "GetInvalidationRequest":{ + "type":"structure", + "required":[ + "DistributionId", + "Id" + ], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetInvalidationResult":{ + "type":"structure", + "members":{ + "Invalidation":{"shape":"Invalidation"} + }, + "payload":"Invalidation" + }, + "GetStreamingDistributionConfigRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetStreamingDistributionConfigResult":{ + "type":"structure", + "members":{ + "StreamingDistributionConfig":{"shape":"StreamingDistributionConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistributionConfig" + }, + "GetStreamingDistributionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "HeaderList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Name" + } + }, + "Headers":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"HeaderList"} + } + }, + "IllegalUpdate":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InconsistentQuantities":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidArgument":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidDefaultRootObject":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidErrorCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidForwardCookies":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidGeoRestrictionParameter":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidHeadersForS3Origin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidIfMatchVersion":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidLocationCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidMinimumProtocolVersion":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidOrigin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidOriginAccessIdentity":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidProtocolSettings":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRelativePath":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRequiredProtocol":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidResponseCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidTTLOrder":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidViewerCertificate":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidWebACLId":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "Invalidation":{ + "type":"structure", + "required":[ + "Id", + "Status", + "CreateTime", + "InvalidationBatch" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "CreateTime":{"shape":"timestamp"}, + "InvalidationBatch":{"shape":"InvalidationBatch"} + } + }, + "InvalidationBatch":{ + "type":"structure", + "required":[ + "Paths", + "CallerReference" + ], + "members":{ + "Paths":{"shape":"Paths"}, + "CallerReference":{"shape":"string"} + } + }, + "InvalidationList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"InvalidationSummaryList"} + } + }, + "InvalidationSummary":{ + "type":"structure", + "required":[ + "Id", + "CreateTime", + "Status" + ], + "members":{ + "Id":{"shape":"string"}, + "CreateTime":{"shape":"timestamp"}, + "Status":{"shape":"string"} + } + }, + "InvalidationSummaryList":{ + "type":"list", + "member":{ + "shape":"InvalidationSummary", + "locationName":"InvalidationSummary" + } + }, + "ItemSelection":{ + "type":"string", + "enum":[ + "none", + "whitelist", + "all" + ] + }, + "KeyPairIdList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"KeyPairId" + } + }, + "KeyPairIds":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"KeyPairIdList"} + } + }, + "ListCloudFrontOriginAccessIdentitiesRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListCloudFrontOriginAccessIdentitiesResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentityList":{"shape":"CloudFrontOriginAccessIdentityList"} + }, + "payload":"CloudFrontOriginAccessIdentityList" + }, + "ListDistributionsByWebACLIdRequest":{ + "type":"structure", + "required":["WebACLId"], + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + }, + "WebACLId":{ + "shape":"string", + "location":"uri", + "locationName":"WebACLId" + } + } + }, + "ListDistributionsByWebACLIdResult":{ + "type":"structure", + "members":{ + "DistributionList":{"shape":"DistributionList"} + }, + "payload":"DistributionList" + }, + "ListDistributionsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListDistributionsResult":{ + "type":"structure", + "members":{ + "DistributionList":{"shape":"DistributionList"} + }, + "payload":"DistributionList" + }, + "ListInvalidationsRequest":{ + "type":"structure", + "required":["DistributionId"], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListInvalidationsResult":{ + "type":"structure", + "members":{ + "InvalidationList":{"shape":"InvalidationList"} + }, + "payload":"InvalidationList" + }, + "ListStreamingDistributionsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListStreamingDistributionsResult":{ + "type":"structure", + "members":{ + "StreamingDistributionList":{"shape":"StreamingDistributionList"} + }, + "payload":"StreamingDistributionList" + }, + "LocationList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Location" + } + }, + "LoggingConfig":{ + "type":"structure", + "required":[ + "Enabled", + "IncludeCookies", + "Bucket", + "Prefix" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "IncludeCookies":{"shape":"boolean"}, + "Bucket":{"shape":"string"}, + "Prefix":{"shape":"string"} + } + }, + "Method":{ + "type":"string", + "enum":[ + "GET", + "HEAD", + "POST", + "PUT", + "PATCH", + "OPTIONS", + "DELETE" + ] + }, + "MethodsList":{ + "type":"list", + "member":{ + "shape":"Method", + "locationName":"Method" + } + }, + "MinimumProtocolVersion":{ + "type":"string", + "enum":[ + "SSLv3", + "TLSv1" + ] + }, + "MissingBody":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "NoSuchCloudFrontOriginAccessIdentity":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchDistribution":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchInvalidation":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchOrigin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchStreamingDistribution":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "Origin":{ + "type":"structure", + "required":[ + "Id", + "DomainName" + ], + "members":{ + "Id":{"shape":"string"}, + "DomainName":{"shape":"string"}, + "OriginPath":{"shape":"string"}, + "CustomHeaders":{"shape":"CustomHeaders"}, + "S3OriginConfig":{"shape":"S3OriginConfig"}, + "CustomOriginConfig":{"shape":"CustomOriginConfig"} + } + }, + "OriginCustomHeader":{ + "type":"structure", + "required":[ + "HeaderName", + "HeaderValue" + ], + "members":{ + "HeaderName":{"shape":"string"}, + "HeaderValue":{"shape":"string"} + } + }, + "OriginCustomHeadersList":{ + "type":"list", + "member":{ + "shape":"OriginCustomHeader", + "locationName":"OriginCustomHeader" + } + }, + "OriginList":{ + "type":"list", + "member":{ + "shape":"Origin", + "locationName":"Origin" + }, + "min":1 + }, + "OriginProtocolPolicy":{ + "type":"string", + "enum":[ + "http-only", + "match-viewer", + "https-only" + ] + }, + "OriginSslProtocols":{ + "type":"structure", + "required":[ + "Quantity", + "Items" + ], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"SslProtocolsList"} + } + }, + "Origins":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"OriginList"} + } + }, + "PathList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Path" + } + }, + "Paths":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"PathList"} + } + }, + "PreconditionFailed":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":412}, + "exception":true + }, + "PriceClass":{ + "type":"string", + "enum":[ + "PriceClass_100", + "PriceClass_200", + "PriceClass_All" + ] + }, + "Restrictions":{ + "type":"structure", + "required":["GeoRestriction"], + "members":{ + "GeoRestriction":{"shape":"GeoRestriction"} + } + }, + "S3Origin":{ + "type":"structure", + "required":[ + "DomainName", + "OriginAccessIdentity" + ], + "members":{ + "DomainName":{"shape":"string"}, + "OriginAccessIdentity":{"shape":"string"} + } + }, + "S3OriginConfig":{ + "type":"structure", + "required":["OriginAccessIdentity"], + "members":{ + "OriginAccessIdentity":{"shape":"string"} + } + }, + "SSLSupportMethod":{ + "type":"string", + "enum":[ + "sni-only", + "vip" + ] + }, + "Signer":{ + "type":"structure", + "members":{ + "AwsAccountNumber":{"shape":"string"}, + "KeyPairIds":{"shape":"KeyPairIds"} + } + }, + "SignerList":{ + "type":"list", + "member":{ + "shape":"Signer", + "locationName":"Signer" + } + }, + "SslProtocol":{ + "type":"string", + "enum":[ + "SSLv3", + "TLSv1", + "TLSv1.1", + "TLSv1.2" + ] + }, + "SslProtocolsList":{ + "type":"list", + "member":{ + "shape":"SslProtocol", + "locationName":"SslProtocol" + } + }, + "StreamingDistribution":{ + "type":"structure", + "required":[ + "Id", + "Status", + "DomainName", + "ActiveTrustedSigners", + "StreamingDistributionConfig" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "ActiveTrustedSigners":{"shape":"ActiveTrustedSigners"}, + "StreamingDistributionConfig":{"shape":"StreamingDistributionConfig"} + } + }, + "StreamingDistributionAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "StreamingDistributionConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "S3Origin", + "Comment", + "TrustedSigners", + "Enabled" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "S3Origin":{"shape":"S3Origin"}, + "Aliases":{"shape":"Aliases"}, + "Comment":{"shape":"string"}, + "Logging":{"shape":"StreamingLoggingConfig"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"} + } + }, + "StreamingDistributionList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"StreamingDistributionSummaryList"} + } + }, + "StreamingDistributionNotDisabled":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "StreamingDistributionSummary":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "DomainName", + "S3Origin", + "Aliases", + "TrustedSigners", + "Comment", + "PriceClass", + "Enabled" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "S3Origin":{"shape":"S3Origin"}, + "Aliases":{"shape":"Aliases"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "Comment":{"shape":"string"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"} + } + }, + "StreamingDistributionSummaryList":{ + "type":"list", + "member":{ + "shape":"StreamingDistributionSummary", + "locationName":"StreamingDistributionSummary" + } + }, + "StreamingLoggingConfig":{ + "type":"structure", + "required":[ + "Enabled", + "Bucket", + "Prefix" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Bucket":{"shape":"string"}, + "Prefix":{"shape":"string"} + } + }, + "TooManyCacheBehaviors":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCertificates":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCloudFrontOriginAccessIdentities":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCookieNamesInWhiteList":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyDistributionCNAMEs":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyDistributions":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyHeadersInForwardedValues":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyInvalidationsInProgress":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyOriginCustomHeaders":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyOrigins":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyStreamingDistributionCNAMEs":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyStreamingDistributions":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyTrustedSigners":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TrustedSignerDoesNotExist":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TrustedSigners":{ + "type":"structure", + "required":[ + "Enabled", + "Quantity" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"AwsAccountNumberList"} + } + }, + "UpdateCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":[ + "CloudFrontOriginAccessIdentityConfig", + "Id" + ], + "members":{ + "CloudFrontOriginAccessIdentityConfig":{ + "shape":"CloudFrontOriginAccessIdentityConfig", + "locationName":"CloudFrontOriginAccessIdentityConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2016-01-13/"} + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "UpdateCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "UpdateDistributionRequest":{ + "type":"structure", + "required":[ + "DistributionConfig", + "Id" + ], + "members":{ + "DistributionConfig":{ + "shape":"DistributionConfig", + "locationName":"DistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2016-01-13/"} + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"DistributionConfig" + }, + "UpdateDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "UpdateStreamingDistributionRequest":{ + "type":"structure", + "required":[ + "StreamingDistributionConfig", + "Id" + ], + "members":{ + "StreamingDistributionConfig":{ + "shape":"StreamingDistributionConfig", + "locationName":"StreamingDistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2016-01-13/"} + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"StreamingDistributionConfig" + }, + "UpdateStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "ViewerCertificate":{ + "type":"structure", + "members":{ + "Certificate":{"shape":"string"}, + "CertificateSource":{"shape":"CertificateSource"}, + "SSLSupportMethod":{"shape":"SSLSupportMethod"}, + "MinimumProtocolVersion":{"shape":"MinimumProtocolVersion"}, + "IAMCertificateId":{ + "shape":"string", + "deprecated":true + }, + "CloudFrontDefaultCertificate":{ + "shape":"boolean", + "deprecated":true + } + } + }, + "ViewerProtocolPolicy":{ + "type":"string", + "enum":[ + "allow-all", + "https-only", + "redirect-to-https" + ] + }, + "boolean":{"type":"boolean"}, + "integer":{"type":"integer"}, + "long":{"type":"long"}, + "string":{"type":"string"}, + "timestamp":{"type":"timestamp"} + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1219 @@ +{ + "version": "2.0", + "service": null, + "operations": { + "CreateCloudFrontOriginAccessIdentity": "Create a new origin access identity.", + "CreateDistribution": "Create a new distribution.", + "CreateInvalidation": "Create a new invalidation.", + "CreateStreamingDistribution": "Create a new streaming distribution.", + "DeleteCloudFrontOriginAccessIdentity": "Delete an origin access identity.", + "DeleteDistribution": "Delete a distribution.", + "DeleteStreamingDistribution": "Delete a streaming distribution.", + "GetCloudFrontOriginAccessIdentity": "Get the information about an origin access identity.", + "GetCloudFrontOriginAccessIdentityConfig": "Get the configuration information about an origin access identity.", + "GetDistribution": "Get the information about a distribution.", + "GetDistributionConfig": "Get the configuration information about a distribution.", + "GetInvalidation": "Get the information about an invalidation.", + "GetStreamingDistribution": "Get the information about a streaming distribution.", + "GetStreamingDistributionConfig": "Get the configuration information about a streaming distribution.", + "ListCloudFrontOriginAccessIdentities": "List origin access identities.", + "ListDistributions": "List distributions.", + "ListDistributionsByWebACLId": "List the distributions that are associated with a specified AWS WAF web ACL.", + "ListInvalidations": "List invalidation batches.", + "ListStreamingDistributions": "List streaming distributions.", + "UpdateCloudFrontOriginAccessIdentity": "Update an origin access identity.", + "UpdateDistribution": "Update a distribution.", + "UpdateStreamingDistribution": "Update a streaming distribution." + }, + "shapes": { + "AccessDenied": { + "base": "Access denied.", + "refs": { + } + }, + "ActiveTrustedSigners": { + "base": "A complex type that lists the AWS accounts, if any, that you included in the TrustedSigners complex type for the default cache behavior or for any of the other cache behaviors for this distribution. These are accounts that you want to allow to create signed URLs for private content.", + "refs": { + "Distribution$ActiveTrustedSigners": "CloudFront automatically adds this element to the response only if you've set up the distribution to serve private content with signed URLs. The element lists the key pair IDs that CloudFront is aware of for each trusted signer. The Signer child element lists the AWS account number of the trusted signer (or an empty Self element if the signer is you). The Signer element also includes the IDs of any active key pairs associated with the trusted signer's AWS account. If no KeyPairId element appears for a Signer, that signer can't create working signed URLs.", + "StreamingDistribution$ActiveTrustedSigners": "CloudFront automatically adds this element to the response only if you've set up the distribution to serve private content with signed URLs. The element lists the key pair IDs that CloudFront is aware of for each trusted signer. The Signer child element lists the AWS account number of the trusted signer (or an empty Self element if the signer is you). The Signer element also includes the IDs of any active key pairs associated with the trusted signer's AWS account. If no KeyPairId element appears for a Signer, that signer can't create working signed URLs." + } + }, + "AliasList": { + "base": null, + "refs": { + "Aliases$Items": "Optional: A complex type that contains CNAME elements, if any, for this distribution. If Quantity is 0, you can omit Items." + } + }, + "Aliases": { + "base": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "refs": { + "DistributionConfig$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "DistributionSummary$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "StreamingDistributionConfig$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this streaming distribution.", + "StreamingDistributionSummary$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this streaming distribution." + } + }, + "AllowedMethods": { + "base": "A complex type that controls which HTTP methods CloudFront processes and forwards to your Amazon S3 bucket or your custom origin. There are three choices: - CloudFront forwards only GET and HEAD requests. - CloudFront forwards only GET, HEAD and OPTIONS requests. - CloudFront forwards GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests. If you pick the third choice, you may need to restrict access to your Amazon S3 bucket or to your custom origin so users can't perform operations that you don't want them to. For example, you may not want users to have permission to delete objects from your origin.", + "refs": { + "CacheBehavior$AllowedMethods": null, + "DefaultCacheBehavior$AllowedMethods": null + } + }, + "AwsAccountNumberList": { + "base": null, + "refs": { + "TrustedSigners$Items": "Optional: A complex type that contains trusted signers for this cache behavior. If Quantity is 0, you can omit Items." + } + }, + "BatchTooLarge": { + "base": null, + "refs": { + } + }, + "CNAMEAlreadyExists": { + "base": null, + "refs": { + } + }, + "CacheBehavior": { + "base": "A complex type that describes how CloudFront processes requests. You can create up to 10 cache behaviors.You must create at least as many cache behaviors (including the default cache behavior) as you have origins if you want CloudFront to distribute objects from all of the origins. Each cache behavior specifies the one origin from which you want CloudFront to get objects. If you have two origins and only the default cache behavior, the default cache behavior will cause CloudFront to get objects from one of the origins, but the other origin will never be used. If you don't want to specify any cache behaviors, include only an empty CacheBehaviors element. Don't include an empty CacheBehavior element, or CloudFront returns a MalformedXML error. To delete all cache behaviors in an existing distribution, update the distribution configuration and include only an empty CacheBehaviors element. To add, change, or remove one or more cache behaviors, update the distribution configuration and specify all of the cache behaviors that you want to include in the updated distribution.", + "refs": { + "CacheBehaviorList$member": null + } + }, + "CacheBehaviorList": { + "base": null, + "refs": { + "CacheBehaviors$Items": "Optional: A complex type that contains cache behaviors for this distribution. If Quantity is 0, you can omit Items." + } + }, + "CacheBehaviors": { + "base": "A complex type that contains zero or more CacheBehavior elements.", + "refs": { + "DistributionConfig$CacheBehaviors": "A complex type that contains zero or more CacheBehavior elements.", + "DistributionSummary$CacheBehaviors": "A complex type that contains zero or more CacheBehavior elements." + } + }, + "CachedMethods": { + "base": "A complex type that controls whether CloudFront caches the response to requests using the specified HTTP methods. There are two choices: - CloudFront caches responses to GET and HEAD requests. - CloudFront caches responses to GET, HEAD, and OPTIONS requests. If you pick the second choice for your S3 Origin, you may need to forward Access-Control-Request-Method, Access-Control-Request-Headers and Origin headers for the responses to be cached correctly.", + "refs": { + "AllowedMethods$CachedMethods": null + } + }, + "CertificateSource": { + "base": null, + "refs": { + "ViewerCertificate$CertificateSource": "If you want viewers to use HTTPS to request your objects and you're using the CloudFront domain name of your distribution in your object URLs (for example, https://d111111abcdef8.cloudfront.net/logo.jpg), set to \"cloudfront\". If you want viewers to use HTTPS to request your objects and you're using an alternate domain name in your object URLs (for example, https://example.com/logo.jpg), you can use your own IAM or ACM certificate. To use an ACM certificate, set to \"acm\" and update the Certificate to the ACM certificate ARN. To use an IAM certificate, set to \"iam\" and update the Certificate to the IAM certificate identifier." + } + }, + "CloudFrontOriginAccessIdentity": { + "base": "CloudFront origin access identity.", + "refs": { + "CreateCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information.", + "GetCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information.", + "UpdateCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information." + } + }, + "CloudFrontOriginAccessIdentityAlreadyExists": { + "base": "If the CallerReference is a value you already sent in a previous request to create an identity but the content of the CloudFrontOriginAccessIdentityConfig is different from the original request, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists error.", + "refs": { + } + }, + "CloudFrontOriginAccessIdentityConfig": { + "base": "Origin access identity configuration.", + "refs": { + "CloudFrontOriginAccessIdentity$CloudFrontOriginAccessIdentityConfig": "The current configuration information for the identity.", + "CreateCloudFrontOriginAccessIdentityRequest$CloudFrontOriginAccessIdentityConfig": "The origin access identity's configuration information.", + "GetCloudFrontOriginAccessIdentityConfigResult$CloudFrontOriginAccessIdentityConfig": "The origin access identity's configuration information.", + "UpdateCloudFrontOriginAccessIdentityRequest$CloudFrontOriginAccessIdentityConfig": "The identity's configuration information." + } + }, + "CloudFrontOriginAccessIdentityInUse": { + "base": null, + "refs": { + } + }, + "CloudFrontOriginAccessIdentityList": { + "base": "The CloudFrontOriginAccessIdentityList type.", + "refs": { + "ListCloudFrontOriginAccessIdentitiesResult$CloudFrontOriginAccessIdentityList": "The CloudFrontOriginAccessIdentityList type." + } + }, + "CloudFrontOriginAccessIdentitySummary": { + "base": "Summary of the information about a CloudFront origin access identity.", + "refs": { + "CloudFrontOriginAccessIdentitySummaryList$member": null + } + }, + "CloudFrontOriginAccessIdentitySummaryList": { + "base": null, + "refs": { + "CloudFrontOriginAccessIdentityList$Items": "A complex type that contains one CloudFrontOriginAccessIdentitySummary element for each origin access identity that was created by the current AWS account." + } + }, + "CookieNameList": { + "base": null, + "refs": { + "CookieNames$Items": "Optional: A complex type that contains whitelisted cookies for this cache behavior. If Quantity is 0, you can omit Items." + } + }, + "CookieNames": { + "base": "A complex type that specifies the whitelisted cookies, if any, that you want CloudFront to forward to your origin that is associated with this cache behavior.", + "refs": { + "CookiePreference$WhitelistedNames": "A complex type that specifies the whitelisted cookies, if any, that you want CloudFront to forward to your origin that is associated with this cache behavior." + } + }, + "CookiePreference": { + "base": "A complex type that specifies the cookie preferences associated with this cache behavior.", + "refs": { + "ForwardedValues$Cookies": "A complex type that specifies how CloudFront handles cookies." + } + }, + "CreateCloudFrontOriginAccessIdentityRequest": { + "base": "The request to create a new origin access identity.", + "refs": { + } + }, + "CreateCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateDistributionRequest": { + "base": "The request to create a new distribution.", + "refs": { + } + }, + "CreateDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateInvalidationRequest": { + "base": "The request to create an invalidation.", + "refs": { + } + }, + "CreateInvalidationResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateStreamingDistributionRequest": { + "base": "The request to create a new streaming distribution.", + "refs": { + } + }, + "CreateStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CustomErrorResponse": { + "base": "A complex type that describes how you'd prefer CloudFront to respond to requests that result in either a 4xx or 5xx response. You can control whether a custom error page should be displayed, what the desired response code should be for this error page and how long should the error response be cached by CloudFront. If you don't want to specify any custom error responses, include only an empty CustomErrorResponses element. To delete all custom error responses in an existing distribution, update the distribution configuration and include only an empty CustomErrorResponses element. To add, change, or remove one or more custom error responses, update the distribution configuration and specify all of the custom error responses that you want to include in the updated distribution.", + "refs": { + "CustomErrorResponseList$member": null + } + }, + "CustomErrorResponseList": { + "base": null, + "refs": { + "CustomErrorResponses$Items": "Optional: A complex type that contains custom error responses for this distribution. If Quantity is 0, you can omit Items." + } + }, + "CustomErrorResponses": { + "base": "A complex type that contains zero or more CustomErrorResponse elements.", + "refs": { + "DistributionConfig$CustomErrorResponses": "A complex type that contains zero or more CustomErrorResponse elements.", + "DistributionSummary$CustomErrorResponses": "A complex type that contains zero or more CustomErrorResponses elements." + } + }, + "CustomHeaders": { + "base": "A complex type that contains the list of Custom Headers for each origin.", + "refs": { + "Origin$CustomHeaders": "A complex type that contains information about the custom headers associated with this Origin." + } + }, + "CustomOriginConfig": { + "base": "A customer origin.", + "refs": { + "Origin$CustomOriginConfig": "A complex type that contains information about a custom origin. If the origin is an Amazon S3 bucket, use the S3OriginConfig element instead." + } + }, + "DefaultCacheBehavior": { + "base": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior.", + "refs": { + "DistributionConfig$DefaultCacheBehavior": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior.", + "DistributionSummary$DefaultCacheBehavior": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior." + } + }, + "DeleteCloudFrontOriginAccessIdentityRequest": { + "base": "The request to delete a origin access identity.", + "refs": { + } + }, + "DeleteDistributionRequest": { + "base": "The request to delete a distribution.", + "refs": { + } + }, + "DeleteStreamingDistributionRequest": { + "base": "The request to delete a streaming distribution.", + "refs": { + } + }, + "Distribution": { + "base": "A distribution.", + "refs": { + "CreateDistributionResult$Distribution": "The distribution's information.", + "GetDistributionResult$Distribution": "The distribution's information.", + "UpdateDistributionResult$Distribution": "The distribution's information." + } + }, + "DistributionAlreadyExists": { + "base": "The caller reference you attempted to create the distribution with is associated with another distribution.", + "refs": { + } + }, + "DistributionConfig": { + "base": "A distribution Configuration.", + "refs": { + "CreateDistributionRequest$DistributionConfig": "The distribution's configuration information.", + "Distribution$DistributionConfig": "The current configuration information for the distribution.", + "GetDistributionConfigResult$DistributionConfig": "The distribution's configuration information.", + "UpdateDistributionRequest$DistributionConfig": "The distribution's configuration information." + } + }, + "DistributionList": { + "base": "A distribution list.", + "refs": { + "ListDistributionsByWebACLIdResult$DistributionList": "The DistributionList type.", + "ListDistributionsResult$DistributionList": "The DistributionList type." + } + }, + "DistributionNotDisabled": { + "base": null, + "refs": { + } + }, + "DistributionSummary": { + "base": "A summary of the information for an Amazon CloudFront distribution.", + "refs": { + "DistributionSummaryList$member": null + } + }, + "DistributionSummaryList": { + "base": null, + "refs": { + "DistributionList$Items": "A complex type that contains one DistributionSummary element for each distribution that was created by the current AWS account." + } + }, + "ForwardedValues": { + "base": "A complex type that specifies how CloudFront handles query strings, cookies and headers.", + "refs": { + "CacheBehavior$ForwardedValues": "A complex type that specifies how CloudFront handles query strings, cookies and headers.", + "DefaultCacheBehavior$ForwardedValues": "A complex type that specifies how CloudFront handles query strings, cookies and headers." + } + }, + "GeoRestriction": { + "base": "A complex type that controls the countries in which your content is distributed. For more information about geo restriction, go to Customizing Error Responses in the Amazon CloudFront Developer Guide. CloudFront determines the location of your users using MaxMind GeoIP databases. For information about the accuracy of these databases, see How accurate are your GeoIP databases? on the MaxMind website.", + "refs": { + "Restrictions$GeoRestriction": null + } + }, + "GeoRestrictionType": { + "base": null, + "refs": { + "GeoRestriction$RestrictionType": "The method that you want to use to restrict distribution of your content by country: - none: No geo restriction is enabled, meaning access to content is not restricted by client geo location. - blacklist: The Location elements specify the countries in which you do not want CloudFront to distribute your content. - whitelist: The Location elements specify the countries in which you want CloudFront to distribute your content." + } + }, + "GetCloudFrontOriginAccessIdentityConfigRequest": { + "base": "The request to get an origin access identity's configuration.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityRequest": { + "base": "The request to get an origin access identity's information.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetDistributionConfigRequest": { + "base": "The request to get a distribution configuration.", + "refs": { + } + }, + "GetDistributionConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetDistributionRequest": { + "base": "The request to get a distribution's information.", + "refs": { + } + }, + "GetDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetInvalidationRequest": { + "base": "The request to get an invalidation's information.", + "refs": { + } + }, + "GetInvalidationResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetStreamingDistributionConfigRequest": { + "base": "To request to get a streaming distribution configuration.", + "refs": { + } + }, + "GetStreamingDistributionConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetStreamingDistributionRequest": { + "base": "The request to get a streaming distribution's information.", + "refs": { + } + }, + "GetStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "HeaderList": { + "base": null, + "refs": { + "Headers$Items": "Optional: A complex type that contains a Name element for each header that you want CloudFront to forward to the origin and to vary on for this cache behavior. If Quantity is 0, omit Items." + } + }, + "Headers": { + "base": "A complex type that specifies the headers that you want CloudFront to forward to the origin for this cache behavior. For the headers that you specify, CloudFront also caches separate versions of a given object based on the header values in viewer requests; this is known as varying on headers. For example, suppose viewer requests for logo.jpg contain a custom Product header that has a value of either Acme or Apex, and you configure CloudFront to vary on the Product header. CloudFront forwards the Product header to the origin and caches the response from the origin once for each header value.", + "refs": { + "ForwardedValues$Headers": "A complex type that specifies the Headers, if any, that you want CloudFront to vary upon for this cache behavior." + } + }, + "IllegalUpdate": { + "base": "Origin and CallerReference cannot be updated.", + "refs": { + } + }, + "InconsistentQuantities": { + "base": "The value of Quantity and the size of Items do not match.", + "refs": { + } + }, + "InvalidArgument": { + "base": "The argument is invalid.", + "refs": { + } + }, + "InvalidDefaultRootObject": { + "base": "The default root object file name is too big or contains an invalid character.", + "refs": { + } + }, + "InvalidErrorCode": { + "base": null, + "refs": { + } + }, + "InvalidForwardCookies": { + "base": "Your request contains forward cookies option which doesn't match with the expectation for the whitelisted list of cookie names. Either list of cookie names has been specified when not allowed or list of cookie names is missing when expected.", + "refs": { + } + }, + "InvalidGeoRestrictionParameter": { + "base": null, + "refs": { + } + }, + "InvalidHeadersForS3Origin": { + "base": null, + "refs": { + } + }, + "InvalidIfMatchVersion": { + "base": "The If-Match version is missing or not valid for the distribution.", + "refs": { + } + }, + "InvalidLocationCode": { + "base": null, + "refs": { + } + }, + "InvalidMinimumProtocolVersion": { + "base": null, + "refs": { + } + }, + "InvalidOrigin": { + "base": "The Amazon S3 origin server specified does not refer to a valid Amazon S3 bucket.", + "refs": { + } + }, + "InvalidOriginAccessIdentity": { + "base": "The origin access identity is not valid or doesn't exist.", + "refs": { + } + }, + "InvalidProtocolSettings": { + "base": "You cannot specify SSLv3 as the minimum protocol version if you only want to support only clients that Support Server Name Indication (SNI).", + "refs": { + } + }, + "InvalidRelativePath": { + "base": "The relative path is too big, is not URL-encoded, or does not begin with a slash (/).", + "refs": { + } + }, + "InvalidRequiredProtocol": { + "base": "This operation requires the HTTPS protocol. Ensure that you specify the HTTPS protocol in your request, or omit the RequiredProtocols element from your distribution configuration.", + "refs": { + } + }, + "InvalidResponseCode": { + "base": null, + "refs": { + } + }, + "InvalidTTLOrder": { + "base": null, + "refs": { + } + }, + "InvalidViewerCertificate": { + "base": null, + "refs": { + } + }, + "InvalidWebACLId": { + "base": null, + "refs": { + } + }, + "Invalidation": { + "base": "An invalidation.", + "refs": { + "CreateInvalidationResult$Invalidation": "The invalidation's information.", + "GetInvalidationResult$Invalidation": "The invalidation's information." + } + }, + "InvalidationBatch": { + "base": "An invalidation batch.", + "refs": { + "CreateInvalidationRequest$InvalidationBatch": "The batch information for the invalidation.", + "Invalidation$InvalidationBatch": "The current invalidation information for the batch request." + } + }, + "InvalidationList": { + "base": "An invalidation list.", + "refs": { + "ListInvalidationsResult$InvalidationList": "Information about invalidation batches." + } + }, + "InvalidationSummary": { + "base": "Summary of an invalidation request.", + "refs": { + "InvalidationSummaryList$member": null + } + }, + "InvalidationSummaryList": { + "base": null, + "refs": { + "InvalidationList$Items": "A complex type that contains one InvalidationSummary element for each invalidation batch that was created by the current AWS account." + } + }, + "ItemSelection": { + "base": null, + "refs": { + "CookiePreference$Forward": "Use this element to specify whether you want CloudFront to forward cookies to the origin that is associated with this cache behavior. You can specify all, none or whitelist. If you choose All, CloudFront forwards all cookies regardless of how many your application uses." + } + }, + "KeyPairIdList": { + "base": null, + "refs": { + "KeyPairIds$Items": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber." + } + }, + "KeyPairIds": { + "base": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber.", + "refs": { + "Signer$KeyPairIds": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber." + } + }, + "ListCloudFrontOriginAccessIdentitiesRequest": { + "base": "The request to list origin access identities.", + "refs": { + } + }, + "ListCloudFrontOriginAccessIdentitiesResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListDistributionsByWebACLIdRequest": { + "base": "The request to list distributions that are associated with a specified AWS WAF web ACL.", + "refs": { + } + }, + "ListDistributionsByWebACLIdResult": { + "base": "The response to a request to list the distributions that are associated with a specified AWS WAF web ACL.", + "refs": { + } + }, + "ListDistributionsRequest": { + "base": "The request to list your distributions.", + "refs": { + } + }, + "ListDistributionsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListInvalidationsRequest": { + "base": "The request to list invalidations.", + "refs": { + } + }, + "ListInvalidationsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListStreamingDistributionsRequest": { + "base": "The request to list your streaming distributions.", + "refs": { + } + }, + "ListStreamingDistributionsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "LocationList": { + "base": null, + "refs": { + "GeoRestriction$Items": "A complex type that contains a Location element for each country in which you want CloudFront either to distribute your content (whitelist) or not distribute your content (blacklist). The Location element is a two-letter, uppercase country code for a country that you want to include in your blacklist or whitelist. Include one Location element for each country. CloudFront and MaxMind both use ISO 3166 country codes. For the current list of countries and the corresponding codes, see ISO 3166-1-alpha-2 code on the International Organization for Standardization website. You can also refer to the country list in the CloudFront console, which includes both country names and codes." + } + }, + "LoggingConfig": { + "base": "A complex type that controls whether access logs are written for the distribution.", + "refs": { + "DistributionConfig$Logging": "A complex type that controls whether access logs are written for the distribution." + } + }, + "Method": { + "base": null, + "refs": { + "MethodsList$member": null + } + }, + "MethodsList": { + "base": null, + "refs": { + "AllowedMethods$Items": "A complex type that contains the HTTP methods that you want CloudFront to process and forward to your origin.", + "CachedMethods$Items": "A complex type that contains the HTTP methods that you want CloudFront to cache responses to." + } + }, + "MinimumProtocolVersion": { + "base": null, + "refs": { + "ViewerCertificate$MinimumProtocolVersion": "Specify the minimum version of the SSL protocol that you want CloudFront to use, SSLv3 or TLSv1, for HTTPS connections. CloudFront will serve your objects only to browsers or devices that support at least the SSL version that you specify. The TLSv1 protocol is more secure, so we recommend that you specify SSLv3 only if your users are using browsers or devices that don't support TLSv1. If you're using a custom certificate (if you specify a value for IAMCertificateId) and if you're using dedicated IP (if you specify vip for SSLSupportMethod), you can choose SSLv3 or TLSv1 as the MinimumProtocolVersion. If you're using a custom certificate (if you specify a value for IAMCertificateId) and if you're using SNI (if you specify sni-only for SSLSupportMethod), you must specify TLSv1 for MinimumProtocolVersion." + } + }, + "MissingBody": { + "base": "This operation requires a body. Ensure that the body is present and the Content-Type header is set.", + "refs": { + } + }, + "NoSuchCloudFrontOriginAccessIdentity": { + "base": "The specified origin access identity does not exist.", + "refs": { + } + }, + "NoSuchDistribution": { + "base": "The specified distribution does not exist.", + "refs": { + } + }, + "NoSuchInvalidation": { + "base": "The specified invalidation does not exist.", + "refs": { + } + }, + "NoSuchOrigin": { + "base": "No origin exists with the specified Origin Id.", + "refs": { + } + }, + "NoSuchStreamingDistribution": { + "base": "The specified streaming distribution does not exist.", + "refs": { + } + }, + "Origin": { + "base": "A complex type that describes the Amazon S3 bucket or the HTTP server (for example, a web server) from which CloudFront gets your files.You must create at least one origin.", + "refs": { + "OriginList$member": null + } + }, + "OriginCustomHeader": { + "base": "A complex type that contains information related to a Header", + "refs": { + "OriginCustomHeadersList$member": null + } + }, + "OriginCustomHeadersList": { + "base": null, + "refs": { + "CustomHeaders$Items": "A complex type that contains the custom headers for this Origin." + } + }, + "OriginList": { + "base": null, + "refs": { + "Origins$Items": "A complex type that contains origins for this distribution." + } + }, + "OriginProtocolPolicy": { + "base": null, + "refs": { + "CustomOriginConfig$OriginProtocolPolicy": "The origin protocol policy to apply to your origin." + } + }, + "OriginSslProtocols": { + "base": "A complex type that contains the list of SSL/TLS protocols that you want CloudFront to use when communicating with your origin over HTTPS.", + "refs": { + "CustomOriginConfig$OriginSslProtocols": "The SSL/TLS protocols that you want CloudFront to use when communicating with your origin over HTTPS." + } + }, + "Origins": { + "base": "A complex type that contains information about origins for this distribution.", + "refs": { + "DistributionConfig$Origins": "A complex type that contains information about origins for this distribution.", + "DistributionSummary$Origins": "A complex type that contains information about origins for this distribution." + } + }, + "PathList": { + "base": null, + "refs": { + "Paths$Items": "A complex type that contains a list of the objects that you want to invalidate." + } + }, + "Paths": { + "base": "A complex type that contains information about the objects that you want to invalidate.", + "refs": { + "InvalidationBatch$Paths": "The path of the object to invalidate. The path is relative to the distribution and must begin with a slash (/). You must enclose each invalidation object with the Path element tags. If the path includes non-ASCII characters or unsafe characters as defined in RFC 1783 (http://www.ietf.org/rfc/rfc1738.txt), URL encode those characters. Do not URL encode any other characters in the path, or CloudFront will not invalidate the old version of the updated object." + } + }, + "PreconditionFailed": { + "base": "The precondition given in one or more of the request-header fields evaluated to false.", + "refs": { + } + }, + "PriceClass": { + "base": null, + "refs": { + "DistributionConfig$PriceClass": "A complex type that contains information about price class for this distribution.", + "DistributionSummary$PriceClass": null, + "StreamingDistributionConfig$PriceClass": "A complex type that contains information about price class for this streaming distribution.", + "StreamingDistributionSummary$PriceClass": null + } + }, + "Restrictions": { + "base": "A complex type that identifies ways in which you want to restrict distribution of your content.", + "refs": { + "DistributionConfig$Restrictions": null, + "DistributionSummary$Restrictions": null + } + }, + "S3Origin": { + "base": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution.", + "refs": { + "StreamingDistributionConfig$S3Origin": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution.", + "StreamingDistributionSummary$S3Origin": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution." + } + }, + "S3OriginConfig": { + "base": "A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead.", + "refs": { + "Origin$S3OriginConfig": "A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead." + } + }, + "SSLSupportMethod": { + "base": null, + "refs": { + "ViewerCertificate$SSLSupportMethod": "If you specify a value for IAMCertificateId, you must also specify how you want CloudFront to serve HTTPS requests. Valid values are vip and sni-only. If you specify vip, CloudFront uses dedicated IP addresses for your content and can respond to HTTPS requests from any viewer. However, you must request permission to use this feature, and you incur additional monthly charges. If you specify sni-only, CloudFront can only respond to HTTPS requests from viewers that support Server Name Indication (SNI). All modern browsers support SNI, but some browsers still in use don't support SNI. Do not specify a value for SSLSupportMethod if you specified true for CloudFrontDefaultCertificate." + } + }, + "Signer": { + "base": "A complex type that lists the AWS accounts that were included in the TrustedSigners complex type, as well as their active CloudFront key pair IDs, if any.", + "refs": { + "SignerList$member": null + } + }, + "SignerList": { + "base": null, + "refs": { + "ActiveTrustedSigners$Items": "A complex type that contains one Signer complex type for each unique trusted signer that is specified in the TrustedSigners complex type, including trusted signers in the default cache behavior and in all of the other cache behaviors." + } + }, + "SslProtocol": { + "base": null, + "refs": { + "SslProtocolsList$member": null + } + }, + "SslProtocolsList": { + "base": null, + "refs": { + "OriginSslProtocols$Items": "A complex type that contains one SslProtocol element for each SSL/TLS protocol that you want to allow CloudFront to use when establishing an HTTPS connection with this origin." + } + }, + "StreamingDistribution": { + "base": "A streaming distribution.", + "refs": { + "CreateStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information.", + "GetStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information.", + "UpdateStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information." + } + }, + "StreamingDistributionAlreadyExists": { + "base": null, + "refs": { + } + }, + "StreamingDistributionConfig": { + "base": "The configuration for the streaming distribution.", + "refs": { + "CreateStreamingDistributionRequest$StreamingDistributionConfig": "The streaming distribution's configuration information.", + "GetStreamingDistributionConfigResult$StreamingDistributionConfig": "The streaming distribution's configuration information.", + "StreamingDistribution$StreamingDistributionConfig": "The current configuration information for the streaming distribution.", + "UpdateStreamingDistributionRequest$StreamingDistributionConfig": "The streaming distribution's configuration information." + } + }, + "StreamingDistributionList": { + "base": "A streaming distribution list.", + "refs": { + "ListStreamingDistributionsResult$StreamingDistributionList": "The StreamingDistributionList type." + } + }, + "StreamingDistributionNotDisabled": { + "base": null, + "refs": { + } + }, + "StreamingDistributionSummary": { + "base": "A summary of the information for an Amazon CloudFront streaming distribution.", + "refs": { + "StreamingDistributionSummaryList$member": null + } + }, + "StreamingDistributionSummaryList": { + "base": null, + "refs": { + "StreamingDistributionList$Items": "A complex type that contains one StreamingDistributionSummary element for each distribution that was created by the current AWS account." + } + }, + "StreamingLoggingConfig": { + "base": "A complex type that controls whether access logs are written for this streaming distribution.", + "refs": { + "StreamingDistributionConfig$Logging": "A complex type that controls whether access logs are written for the streaming distribution." + } + }, + "TooManyCacheBehaviors": { + "base": "You cannot create anymore cache behaviors for the distribution.", + "refs": { + } + }, + "TooManyCertificates": { + "base": "You cannot create anymore custom ssl certificates.", + "refs": { + } + }, + "TooManyCloudFrontOriginAccessIdentities": { + "base": "Processing your request would cause you to exceed the maximum number of origin access identities allowed.", + "refs": { + } + }, + "TooManyCookieNamesInWhiteList": { + "base": "Your request contains more cookie names in the whitelist than are allowed per cache behavior.", + "refs": { + } + }, + "TooManyDistributionCNAMEs": { + "base": "Your request contains more CNAMEs than are allowed per distribution.", + "refs": { + } + }, + "TooManyDistributions": { + "base": "Processing your request would cause you to exceed the maximum number of distributions allowed.", + "refs": { + } + }, + "TooManyHeadersInForwardedValues": { + "base": null, + "refs": { + } + }, + "TooManyInvalidationsInProgress": { + "base": "You have exceeded the maximum number of allowable InProgress invalidation batch requests, or invalidation objects.", + "refs": { + } + }, + "TooManyOriginCustomHeaders": { + "base": null, + "refs": { + } + }, + "TooManyOrigins": { + "base": "You cannot create anymore origins for the distribution.", + "refs": { + } + }, + "TooManyStreamingDistributionCNAMEs": { + "base": null, + "refs": { + } + }, + "TooManyStreamingDistributions": { + "base": "Processing your request would cause you to exceed the maximum number of streaming distributions allowed.", + "refs": { + } + }, + "TooManyTrustedSigners": { + "base": "Your request contains more trusted signers than are allowed per distribution.", + "refs": { + } + }, + "TrustedSignerDoesNotExist": { + "base": "One or more of your trusted signers do not exist.", + "refs": { + } + }, + "TrustedSigners": { + "base": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "refs": { + "CacheBehavior$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "DefaultCacheBehavior$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "StreamingDistributionConfig$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "StreamingDistributionSummary$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution." + } + }, + "UpdateCloudFrontOriginAccessIdentityRequest": { + "base": "The request to update an origin access identity.", + "refs": { + } + }, + "UpdateCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "UpdateDistributionRequest": { + "base": "The request to update a distribution.", + "refs": { + } + }, + "UpdateDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "UpdateStreamingDistributionRequest": { + "base": "The request to update a streaming distribution.", + "refs": { + } + }, + "UpdateStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ViewerCertificate": { + "base": "A complex type that contains information about viewer certificates for this distribution.", + "refs": { + "DistributionConfig$ViewerCertificate": null, + "DistributionSummary$ViewerCertificate": null + } + }, + "ViewerProtocolPolicy": { + "base": null, + "refs": { + "CacheBehavior$ViewerProtocolPolicy": "Use this element to specify the protocol that users can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. If you want CloudFront to allow end users to use any available protocol, specify allow-all. If you want CloudFront to require HTTPS, specify https. If you want CloudFront to respond to an HTTP request with an HTTP status code of 301 (Moved Permanently) and the HTTPS URL, specify redirect-to-https. The viewer then resubmits the request using the HTTPS URL.", + "DefaultCacheBehavior$ViewerProtocolPolicy": "Use this element to specify the protocol that users can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. If you want CloudFront to allow end users to use any available protocol, specify allow-all. If you want CloudFront to require HTTPS, specify https. If you want CloudFront to respond to an HTTP request with an HTTP status code of 301 (Moved Permanently) and the HTTPS URL, specify redirect-to-https. The viewer then resubmits the request using the HTTPS URL." + } + }, + "boolean": { + "base": null, + "refs": { + "ActiveTrustedSigners$Enabled": "Each active trusted signer.", + "CacheBehavior$SmoothStreaming": "Indicates whether you want to distribute media files in Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "CacheBehavior$Compress": "Whether you want CloudFront to automatically compress content for web requests that include Accept-Encoding: gzip in the request header. If so, specify true; if not, specify false. CloudFront compresses files larger than 1000 bytes and less than 1 megabyte for both Amazon S3 and custom origins. When a CloudFront edge location is unusually busy, some files might not be compressed. The value of the Content-Type header must be on the list of file types that CloudFront will compress. For the current list, see Serving Compressed Content in the Amazon CloudFront Developer Guide. If you configure CloudFront to compress content, CloudFront removes the ETag response header from the objects that it compresses. The ETag header indicates that the version in a CloudFront edge cache is identical to the version on the origin server, but after compression the two versions are no longer identical. As a result, for compressed objects, CloudFront can't use the ETag header to determine whether an expired object in the CloudFront edge cache is still the latest version.", + "CloudFrontOriginAccessIdentityList$IsTruncated": "A flag that indicates whether more origin access identities remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more items in the list.", + "DefaultCacheBehavior$SmoothStreaming": "Indicates whether you want to distribute media files in Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "DefaultCacheBehavior$Compress": "Whether you want CloudFront to automatically compress content for web requests that include Accept-Encoding: gzip in the request header. If so, specify true; if not, specify false. CloudFront compresses files larger than 1000 bytes and less than 1 megabyte for both Amazon S3 and custom origins. When a CloudFront edge location is unusually busy, some files might not be compressed. The value of the Content-Type header must be on the list of file types that CloudFront will compress. For the current list, see Serving Compressed Content in the Amazon CloudFront Developer Guide. If you configure CloudFront to compress content, CloudFront removes the ETag response header from the objects that it compresses. The ETag header indicates that the version in a CloudFront edge cache is identical to the version on the origin server, but after compression the two versions are no longer identical. As a result, for compressed objects, CloudFront can't use the ETag header to determine whether an expired object in the CloudFront edge cache is still the latest version.", + "DistributionConfig$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "DistributionList$IsTruncated": "A flag that indicates whether more distributions remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more distributions in the list.", + "DistributionSummary$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "ForwardedValues$QueryString": "Indicates whether you want CloudFront to forward query strings to the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "InvalidationList$IsTruncated": "A flag that indicates whether more invalidation batch requests remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more invalidation batches in the list.", + "LoggingConfig$Enabled": "Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you do not want to enable logging when you create a distribution or if you want to disable logging for an existing distribution, specify false for Enabled, and specify empty Bucket and Prefix elements. If you specify false for Enabled but you specify values for Bucket, prefix and IncludeCookies, the values are automatically deleted.", + "LoggingConfig$IncludeCookies": "Specifies whether you want CloudFront to include cookies in access logs, specify true for IncludeCookies. If you choose to include cookies in logs, CloudFront logs all cookies regardless of how you configure the cache behaviors for this distribution. If you do not want to include cookies when you create a distribution or if you want to disable include cookies for an existing distribution, specify false for IncludeCookies.", + "StreamingDistributionConfig$Enabled": "Whether the streaming distribution is enabled to accept end user requests for content.", + "StreamingDistributionList$IsTruncated": "A flag that indicates whether more streaming distributions remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more distributions in the list.", + "StreamingDistributionSummary$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "StreamingLoggingConfig$Enabled": "Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you do not want to enable logging when you create a streaming distribution or if you want to disable logging for an existing streaming distribution, specify false for Enabled, and specify empty Bucket and Prefix elements. If you specify false for Enabled but you specify values for Bucket and Prefix, the values are automatically deleted.", + "TrustedSigners$Enabled": "Specifies whether you want to require end users to use signed URLs to access the files specified by PathPattern and TargetOriginId.", + "ViewerCertificate$CloudFrontDefaultCertificate": "Note: this field is deprecated. Please use \"cloudfront\" as CertificateSource and omit specifying a Certificate. If you want viewers to use HTTPS to request your objects and you're using the CloudFront domain name of your distribution in your object URLs (for example, https://d111111abcdef8.cloudfront.net/logo.jpg), set to true. Omit this value if you are setting an IAMCertificateId." + } + }, + "integer": { + "base": null, + "refs": { + "ActiveTrustedSigners$Quantity": "The number of unique trusted signers included in all cache behaviors. For example, if three cache behaviors all list the same three AWS accounts, the value of Quantity for ActiveTrustedSigners will be 3.", + "Aliases$Quantity": "The number of CNAMEs, if any, for this distribution.", + "AllowedMethods$Quantity": "The number of HTTP methods that you want CloudFront to forward to your origin. Valid values are 2 (for GET and HEAD requests), 3 (for GET, HEAD and OPTIONS requests) and 7 (for GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests).", + "CacheBehaviors$Quantity": "The number of cache behaviors for this distribution.", + "CachedMethods$Quantity": "The number of HTTP methods for which you want CloudFront to cache responses. Valid values are 2 (for caching responses to GET and HEAD requests) and 3 (for caching responses to GET, HEAD, and OPTIONS requests).", + "CloudFrontOriginAccessIdentityList$MaxItems": "The value you provided for the MaxItems request parameter.", + "CloudFrontOriginAccessIdentityList$Quantity": "The number of CloudFront origin access identities that were created by the current AWS account.", + "CookieNames$Quantity": "The number of whitelisted cookies for this cache behavior.", + "CustomErrorResponse$ErrorCode": "The 4xx or 5xx HTTP status code that you want to customize. For a list of HTTP status codes that you can customize, see CloudFront documentation.", + "CustomErrorResponses$Quantity": "The number of custom error responses for this distribution.", + "CustomHeaders$Quantity": "The number of custom headers for this origin.", + "CustomOriginConfig$HTTPPort": "The HTTP port the custom origin listens on.", + "CustomOriginConfig$HTTPSPort": "The HTTPS port the custom origin listens on.", + "Distribution$InProgressInvalidationBatches": "The number of invalidation batches currently in progress.", + "DistributionList$MaxItems": "The value you provided for the MaxItems request parameter.", + "DistributionList$Quantity": "The number of distributions that were created by the current AWS account.", + "GeoRestriction$Quantity": "When geo restriction is enabled, this is the number of countries in your whitelist or blacklist. Otherwise, when it is not enabled, Quantity is 0, and you can omit Items.", + "Headers$Quantity": "The number of different headers that you want CloudFront to forward to the origin and to vary on for this cache behavior. The maximum number of headers that you can specify by name is 10. If you want CloudFront to forward all headers to the origin and vary on all of them, specify 1 for Quantity and * for Name. If you don't want CloudFront to forward any additional headers to the origin or to vary on any headers, specify 0 for Quantity and omit Items.", + "InvalidationList$MaxItems": "The value you provided for the MaxItems request parameter.", + "InvalidationList$Quantity": "The number of invalidation batches that were created by the current AWS account.", + "KeyPairIds$Quantity": "The number of active CloudFront key pairs for AwsAccountNumber.", + "OriginSslProtocols$Quantity": "The number of SSL/TLS protocols that you want to allow CloudFront to use when establishing an HTTPS connection with this origin.", + "Origins$Quantity": "The number of origins for this distribution.", + "Paths$Quantity": "The number of objects that you want to invalidate.", + "StreamingDistributionList$MaxItems": "The value you provided for the MaxItems request parameter.", + "StreamingDistributionList$Quantity": "The number of streaming distributions that were created by the current AWS account.", + "TrustedSigners$Quantity": "The number of trusted signers for this cache behavior." + } + }, + "long": { + "base": null, + "refs": { + "CacheBehavior$MinTTL": "The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated.You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CacheBehavior$DefaultTTL": "If you don't configure your origin to add a Cache-Control max-age directive or an Expires header, DefaultTTL is the default amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CacheBehavior$MaxTTL": "The maximum amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin adds HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CustomErrorResponse$ErrorCachingMinTTL": "The minimum amount of time you want HTTP error codes to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated. You can specify a value from 0 to 31,536,000.", + "DefaultCacheBehavior$MinTTL": "The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated.You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "DefaultCacheBehavior$DefaultTTL": "If you don't configure your origin to add a Cache-Control max-age directive or an Expires header, DefaultTTL is the default amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "DefaultCacheBehavior$MaxTTL": "The maximum amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin adds HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years)." + } + }, + "string": { + "base": null, + "refs": { + "AccessDenied$Message": null, + "AliasList$member": null, + "AwsAccountNumberList$member": null, + "BatchTooLarge$Message": null, + "CNAMEAlreadyExists$Message": null, + "CacheBehavior$PathPattern": "The pattern (for example, images/*.jpg) that specifies which requests you want this cache behavior to apply to. When CloudFront receives an end-user request, the requested path is compared with path patterns in the order in which cache behaviors are listed in the distribution. The path pattern for the default cache behavior is * and cannot be changed. If the request for an object does not match the path pattern for any cache behaviors, CloudFront applies the behavior in the default cache behavior.", + "CacheBehavior$TargetOriginId": "The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior.", + "CloudFrontOriginAccessIdentity$Id": "The ID for the origin access identity. For example: E74FTE3AJFJ256A.", + "CloudFrontOriginAccessIdentity$S3CanonicalUserId": "The Amazon S3 canonical user ID for the origin access identity, which you use when giving the origin access identity read permission to an object in Amazon S3.", + "CloudFrontOriginAccessIdentityAlreadyExists$Message": null, + "CloudFrontOriginAccessIdentityConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the CloudFrontOriginAccessIdentityConfig object), a new origin access identity is created. If the CallerReference is a value you already sent in a previous request to create an identity, and the content of the CloudFrontOriginAccessIdentityConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create an identity but the content of the CloudFrontOriginAccessIdentityConfig is different from the original request, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists error.", + "CloudFrontOriginAccessIdentityConfig$Comment": "Any comments you want to include about the origin access identity.", + "CloudFrontOriginAccessIdentityInUse$Message": null, + "CloudFrontOriginAccessIdentityList$Marker": "The value you provided for the Marker request parameter.", + "CloudFrontOriginAccessIdentityList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your origin access identities where they left off.", + "CloudFrontOriginAccessIdentitySummary$Id": "The ID for the origin access identity. For example: E74FTE3AJFJ256A.", + "CloudFrontOriginAccessIdentitySummary$S3CanonicalUserId": "The Amazon S3 canonical user ID for the origin access identity, which you use when giving the origin access identity read permission to an object in Amazon S3.", + "CloudFrontOriginAccessIdentitySummary$Comment": "The comment for this origin access identity, as originally specified when created.", + "CookieNameList$member": null, + "CreateCloudFrontOriginAccessIdentityResult$Location": "The fully qualified URI of the new origin access identity just created. For example: https://cloudfront.amazonaws.com/2010-11-01/origin-access-identity/cloudfront/E74FTE3AJFJ256A.", + "CreateCloudFrontOriginAccessIdentityResult$ETag": "The current version of the origin access identity created.", + "CreateDistributionResult$Location": "The fully qualified URI of the new distribution resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/distribution/EDFDVBD632BHDS5.", + "CreateDistributionResult$ETag": "The current version of the distribution created.", + "CreateInvalidationRequest$DistributionId": "The distribution's id.", + "CreateInvalidationResult$Location": "The fully qualified URI of the distribution and invalidation batch request, including the Invalidation ID.", + "CreateStreamingDistributionResult$Location": "The fully qualified URI of the new streaming distribution resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/streaming-distribution/EGTXBD79H29TRA8.", + "CreateStreamingDistributionResult$ETag": "The current version of the streaming distribution created.", + "CustomErrorResponse$ResponsePagePath": "The path of the custom error page (for example, /custom_404.html). The path is relative to the distribution and must begin with a slash (/). If the path includes any non-ASCII characters or unsafe characters as defined in RFC 1783 (http://www.ietf.org/rfc/rfc1738.txt), URL encode those characters. Do not URL encode any other characters in the path, or CloudFront will not return the custom error page to the viewer.", + "CustomErrorResponse$ResponseCode": "The HTTP status code that you want CloudFront to return with the custom error page to the viewer. For a list of HTTP status codes that you can replace, see CloudFront Documentation.", + "DefaultCacheBehavior$TargetOriginId": "The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior.", + "DeleteCloudFrontOriginAccessIdentityRequest$Id": "The origin access identity's id.", + "DeleteCloudFrontOriginAccessIdentityRequest$IfMatch": "The value of the ETag header you received from a previous GET or PUT request. For example: E2QWRUHAPOMQZL.", + "DeleteDistributionRequest$Id": "The distribution id.", + "DeleteDistributionRequest$IfMatch": "The value of the ETag header you received when you disabled the distribution. For example: E2QWRUHAPOMQZL.", + "DeleteStreamingDistributionRequest$Id": "The distribution id.", + "DeleteStreamingDistributionRequest$IfMatch": "The value of the ETag header you received when you disabled the streaming distribution. For example: E2QWRUHAPOMQZL.", + "Distribution$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "Distribution$Status": "This response element indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "Distribution$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "DistributionAlreadyExists$Message": null, + "DistributionConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the DistributionConfig object), a new distribution is created. If the CallerReference is a value you already sent in a previous request to create a distribution, and the content of the DistributionConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a distribution but the content of the DistributionConfig is different from the original request, CloudFront returns a DistributionAlreadyExists error.", + "DistributionConfig$DefaultRootObject": "The object that you want CloudFront to return (for example, index.html) when an end user requests the root URL for your distribution (http://www.example.com) instead of an object in your distribution (http://www.example.com/index.html). Specifying a default root object avoids exposing the contents of your distribution. If you don't want to specify a default root object when you create a distribution, include an empty DefaultRootObject element. To delete the default root object from an existing distribution, update the distribution configuration and include an empty DefaultRootObject element. To replace the default root object, update the distribution configuration and specify the new object.", + "DistributionConfig$Comment": "Any comments you want to include about the distribution.", + "DistributionConfig$WebACLId": "(Optional) If you're using AWS WAF to filter CloudFront requests, the Id of the AWS WAF web ACL that is associated with the distribution.", + "DistributionList$Marker": "The value you provided for the Marker request parameter.", + "DistributionList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your distributions where they left off.", + "DistributionNotDisabled$Message": null, + "DistributionSummary$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "DistributionSummary$Status": "This response element indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "DistributionSummary$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "DistributionSummary$Comment": "The comment originally specified when this distribution was created.", + "DistributionSummary$WebACLId": "The Web ACL Id (if any) associated with the distribution.", + "GetCloudFrontOriginAccessIdentityConfigRequest$Id": "The identity's id.", + "GetCloudFrontOriginAccessIdentityConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetCloudFrontOriginAccessIdentityRequest$Id": "The identity's id.", + "GetCloudFrontOriginAccessIdentityResult$ETag": "The current version of the origin access identity's information. For example: E2QWRUHAPOMQZL.", + "GetDistributionConfigRequest$Id": "The distribution's id.", + "GetDistributionConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetDistributionRequest$Id": "The distribution's id.", + "GetDistributionResult$ETag": "The current version of the distribution's information. For example: E2QWRUHAPOMQZL.", + "GetInvalidationRequest$DistributionId": "The distribution's id.", + "GetInvalidationRequest$Id": "The invalidation's id.", + "GetStreamingDistributionConfigRequest$Id": "The streaming distribution's id.", + "GetStreamingDistributionConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetStreamingDistributionRequest$Id": "The streaming distribution's id.", + "GetStreamingDistributionResult$ETag": "The current version of the streaming distribution's information. For example: E2QWRUHAPOMQZL.", + "HeaderList$member": null, + "IllegalUpdate$Message": null, + "InconsistentQuantities$Message": null, + "InvalidArgument$Message": null, + "InvalidDefaultRootObject$Message": null, + "InvalidErrorCode$Message": null, + "InvalidForwardCookies$Message": null, + "InvalidGeoRestrictionParameter$Message": null, + "InvalidHeadersForS3Origin$Message": null, + "InvalidIfMatchVersion$Message": null, + "InvalidLocationCode$Message": null, + "InvalidMinimumProtocolVersion$Message": null, + "InvalidOrigin$Message": null, + "InvalidOriginAccessIdentity$Message": null, + "InvalidProtocolSettings$Message": null, + "InvalidRelativePath$Message": null, + "InvalidRequiredProtocol$Message": null, + "InvalidResponseCode$Message": null, + "InvalidTTLOrder$Message": null, + "InvalidViewerCertificate$Message": null, + "InvalidWebACLId$Message": null, + "Invalidation$Id": "The identifier for the invalidation request. For example: IDFDVBD632BHDS5.", + "Invalidation$Status": "The status of the invalidation request. When the invalidation batch is finished, the status is Completed.", + "InvalidationBatch$CallerReference": "A unique name that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the Path object), a new distribution is created. If the CallerReference is a value you already sent in a previous request to create an invalidation batch, and the content of each Path element is identical to the original request, the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a distribution but the content of any Path is different from the original request, CloudFront returns an InvalidationBatchAlreadyExists error.", + "InvalidationList$Marker": "The value you provided for the Marker request parameter.", + "InvalidationList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your invalidation batches where they left off.", + "InvalidationSummary$Id": "The unique ID for an invalidation request.", + "InvalidationSummary$Status": "The status of an invalidation request.", + "KeyPairIdList$member": null, + "ListCloudFrontOriginAccessIdentitiesRequest$Marker": "Use this when paginating results to indicate where to begin in your list of origin access identities. The results include identities in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last identity on that page).", + "ListCloudFrontOriginAccessIdentitiesRequest$MaxItems": "The maximum number of origin access identities you want in the response body.", + "ListDistributionsByWebACLIdRequest$Marker": "Use Marker and MaxItems to control pagination of results. If you have more than MaxItems distributions that satisfy the request, the response includes a NextMarker element. To get the next page of results, submit another request. For the value of Marker, specify the value of NextMarker from the last response. (For the first request, omit Marker.)", + "ListDistributionsByWebACLIdRequest$MaxItems": "The maximum number of distributions that you want CloudFront to return in the response body. The maximum and default values are both 100.", + "ListDistributionsByWebACLIdRequest$WebACLId": "The Id of the AWS WAF web ACL for which you want to list the associated distributions. If you specify \"null\" for the Id, the request returns a list of the distributions that aren't associated with a web ACL.", + "ListDistributionsRequest$Marker": "Use Marker and MaxItems to control pagination of results. If you have more than MaxItems distributions that satisfy the request, the response includes a NextMarker element. To get the next page of results, submit another request. For the value of Marker, specify the value of NextMarker from the last response. (For the first request, omit Marker.)", + "ListDistributionsRequest$MaxItems": "The maximum number of distributions that you want CloudFront to return in the response body. The maximum and default values are both 100.", + "ListInvalidationsRequest$DistributionId": "The distribution's id.", + "ListInvalidationsRequest$Marker": "Use this parameter when paginating results to indicate where to begin in your list of invalidation batches. Because the results are returned in decreasing order from most recent to oldest, the most recent results are on the first page, the second page will contain earlier results, and so on. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response. This value is the same as the ID of the last invalidation batch on that page.", + "ListInvalidationsRequest$MaxItems": "The maximum number of invalidation batches you want in the response body.", + "ListStreamingDistributionsRequest$Marker": "Use this when paginating results to indicate where to begin in your list of streaming distributions. The results include distributions in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last distribution on that page).", + "ListStreamingDistributionsRequest$MaxItems": "The maximum number of streaming distributions you want in the response body.", + "LocationList$member": null, + "LoggingConfig$Bucket": "The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com.", + "LoggingConfig$Prefix": "An optional string that you want CloudFront to prefix to the access log filenames for this distribution, for example, myprefix/. If you want to enable logging, but you do not want to specify a prefix, you still must include an empty Prefix element in the Logging element.", + "MissingBody$Message": null, + "NoSuchCloudFrontOriginAccessIdentity$Message": null, + "NoSuchDistribution$Message": null, + "NoSuchInvalidation$Message": null, + "NoSuchOrigin$Message": null, + "NoSuchStreamingDistribution$Message": null, + "Origin$Id": "A unique identifier for the origin. The value of Id must be unique within the distribution. You use the value of Id when you create a cache behavior. The Id identifies the origin that CloudFront routes a request to when the request matches the path pattern for that cache behavior.", + "Origin$DomainName": "Amazon S3 origins: The DNS name of the Amazon S3 bucket from which you want CloudFront to get objects for this origin, for example, myawsbucket.s3.amazonaws.com. Custom origins: The DNS domain name for the HTTP server from which you want CloudFront to get objects for this origin, for example, www.example.com.", + "Origin$OriginPath": "An optional element that causes CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin. When you include the OriginPath element, specify the directory name, beginning with a /. CloudFront appends the directory name to the value of DomainName.", + "OriginCustomHeader$HeaderName": "The header's name.", + "OriginCustomHeader$HeaderValue": "The header's value.", + "PathList$member": null, + "PreconditionFailed$Message": null, + "S3Origin$DomainName": "The DNS name of the S3 origin.", + "S3Origin$OriginAccessIdentity": "Your S3 origin's origin access identity.", + "S3OriginConfig$OriginAccessIdentity": "The CloudFront origin access identity to associate with the origin. Use an origin access identity to configure the origin so that end users can only access objects in an Amazon S3 bucket through CloudFront. If you want end users to be able to access objects using either the CloudFront URL or the Amazon S3 URL, specify an empty OriginAccessIdentity element. To delete the origin access identity from an existing distribution, update the distribution configuration and include an empty OriginAccessIdentity element. To replace the origin access identity, update the distribution configuration and specify the new origin access identity. Use the format origin-access-identity/cloudfront/Id where Id is the value that CloudFront returned in the Id element when you created the origin access identity.", + "Signer$AwsAccountNumber": "Specifies an AWS account that can create signed URLs. Values: self, which indicates that the AWS account that was used to create the distribution can created signed URLs, or an AWS account number. Omit the dashes in the account number.", + "StreamingDistribution$Id": "The identifier for the streaming distribution. For example: EGTXBD79H29TRA8.", + "StreamingDistribution$Status": "The current status of the streaming distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "StreamingDistribution$DomainName": "The domain name corresponding to the streaming distribution. For example: s5c39gqb8ow64r.cloudfront.net.", + "StreamingDistributionAlreadyExists$Message": null, + "StreamingDistributionConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the StreamingDistributionConfig object), a new streaming distribution is created. If the CallerReference is a value you already sent in a previous request to create a streaming distribution, and the content of the StreamingDistributionConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a streaming distribution but the content of the StreamingDistributionConfig is different from the original request, CloudFront returns a DistributionAlreadyExists error.", + "StreamingDistributionConfig$Comment": "Any comments you want to include about the streaming distribution.", + "StreamingDistributionList$Marker": "The value you provided for the Marker request parameter.", + "StreamingDistributionList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your streaming distributions where they left off.", + "StreamingDistributionNotDisabled$Message": null, + "StreamingDistributionSummary$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "StreamingDistributionSummary$Status": "Indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "StreamingDistributionSummary$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "StreamingDistributionSummary$Comment": "The comment originally specified when this distribution was created.", + "StreamingLoggingConfig$Bucket": "The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com.", + "StreamingLoggingConfig$Prefix": "An optional string that you want CloudFront to prefix to the access log filenames for this streaming distribution, for example, myprefix/. If you want to enable logging, but you do not want to specify a prefix, you still must include an empty Prefix element in the Logging element.", + "TooManyCacheBehaviors$Message": null, + "TooManyCertificates$Message": null, + "TooManyCloudFrontOriginAccessIdentities$Message": null, + "TooManyCookieNamesInWhiteList$Message": null, + "TooManyDistributionCNAMEs$Message": null, + "TooManyDistributions$Message": null, + "TooManyHeadersInForwardedValues$Message": null, + "TooManyInvalidationsInProgress$Message": null, + "TooManyOriginCustomHeaders$Message": null, + "TooManyOrigins$Message": null, + "TooManyStreamingDistributionCNAMEs$Message": null, + "TooManyStreamingDistributions$Message": null, + "TooManyTrustedSigners$Message": null, + "TrustedSignerDoesNotExist$Message": null, + "UpdateCloudFrontOriginAccessIdentityRequest$Id": "The identity's id.", + "UpdateCloudFrontOriginAccessIdentityRequest$IfMatch": "The value of the ETag header you received when retrieving the identity's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateCloudFrontOriginAccessIdentityResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "UpdateDistributionRequest$Id": "The distribution's id.", + "UpdateDistributionRequest$IfMatch": "The value of the ETag header you received when retrieving the distribution's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateDistributionResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "UpdateStreamingDistributionRequest$Id": "The streaming distribution's id.", + "UpdateStreamingDistributionRequest$IfMatch": "The value of the ETag header you received when retrieving the streaming distribution's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateStreamingDistributionResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "ViewerCertificate$Certificate": "If you want viewers to use HTTPS to request your objects and you're using an alternate domain name in your object URLs (for example, https://example.com/logo.jpg), you can use your own IAM or ACM certificate. For ACM, set to the ACM certificate ARN. For IAM, set to the IAM certificate identifier.", + "ViewerCertificate$IAMCertificateId": "Note: this field is deprecated. Please use \"iam\" as CertificateSource and specify the IAM certificate Id as the Certificate. If you want viewers to use HTTPS to request your objects and you're using an alternate domain name in your object URLs (for example, https://example.com/logo.jpg), specify the IAM certificate identifier of the custom viewer certificate for this distribution. Specify either this value or CloudFrontDefaultCertificate." + } + }, + "timestamp": { + "base": null, + "refs": { + "Distribution$LastModifiedTime": "The date and time the distribution was last modified.", + "DistributionSummary$LastModifiedTime": "The date and time the distribution was last modified.", + "Invalidation$CreateTime": "The date and time the invalidation request was first made.", + "InvalidationSummary$CreateTime": null, + "StreamingDistribution$LastModifiedTime": "The date and time the distribution was last modified.", + "StreamingDistributionSummary$LastModifiedTime": "The date and time the distribution was last modified." + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,32 @@ +{ + "pagination": { + "ListCloudFrontOriginAccessIdentities": { + "input_token": "Marker", + "output_token": "CloudFrontOriginAccessIdentityList.NextMarker", + "limit_key": "MaxItems", + "more_results": "CloudFrontOriginAccessIdentityList.IsTruncated", + "result_key": "CloudFrontOriginAccessIdentityList.Items" + }, + "ListDistributions": { + "input_token": "Marker", + "output_token": "DistributionList.NextMarker", + "limit_key": "MaxItems", + "more_results": "DistributionList.IsTruncated", + "result_key": "DistributionList.Items" + }, + "ListInvalidations": { + "input_token": "Marker", + "output_token": "InvalidationList.NextMarker", + "limit_key": "MaxItems", + "more_results": "InvalidationList.IsTruncated", + "result_key": "InvalidationList.Items" + }, + "ListStreamingDistributions": { + "input_token": "Marker", + "output_token": "StreamingDistributionList.NextMarker", + "limit_key": "MaxItems", + "more_results": "StreamingDistributionList.IsTruncated", + "result_key": "StreamingDistributionList.Items" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/waiters-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/waiters-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/waiters-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-13/waiters-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,47 @@ +{ + "version": 2, + "waiters": { + "DistributionDeployed": { + "delay": 60, + "operation": "GetDistribution", + "maxAttempts": 25, + "description": "Wait until a distribution is deployed.", + "acceptors": [ + { + "expected": "Deployed", + "matcher": "path", + "state": "success", + "argument": "Status" + } + ] + }, + "InvalidationCompleted": { + "delay": 20, + "operation": "GetInvalidation", + "maxAttempts": 30, + "description": "Wait until an invalidation has completed.", + "acceptors": [ + { + "expected": "Completed", + "matcher": "path", + "state": "success", + "argument": "Status" + } + ] + }, + "StreamingDistributionDeployed": { + "delay": 60, + "operation": "GetStreamingDistribution", + "maxAttempts": 25, + "description": "Wait until a streaming distribution is deployed.", + "acceptors": [ + { + "expected": "Deployed", + "matcher": "path", + "state": "success", + "argument": "Status" + } + ] + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2217 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2016-01-28", + "endpointPrefix":"cloudfront", + "globalEndpoint":"cloudfront.amazonaws.com", + "protocol":"rest-xml", + "serviceAbbreviation":"CloudFront", + "serviceFullName":"Amazon CloudFront", + "signatureVersion":"v4" + }, + "operations":{ + "CreateCloudFrontOriginAccessIdentity":{ + "name":"CreateCloudFrontOriginAccessIdentity2016_01_28", + "http":{ + "method":"POST", + "requestUri":"/2016-01-28/origin-access-identity/cloudfront", + "responseCode":201 + }, + "input":{"shape":"CreateCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"CreateCloudFrontOriginAccessIdentityResult"}, + "errors":[ + {"shape":"CloudFrontOriginAccessIdentityAlreadyExists"}, + {"shape":"MissingBody"}, + {"shape":"TooManyCloudFrontOriginAccessIdentities"}, + {"shape":"InvalidArgument"}, + {"shape":"InconsistentQuantities"} + ] + }, + "CreateDistribution":{ + "name":"CreateDistribution2016_01_28", + "http":{ + "method":"POST", + "requestUri":"/2016-01-28/distribution", + "responseCode":201 + }, + "input":{"shape":"CreateDistributionRequest"}, + "output":{"shape":"CreateDistributionResult"}, + "errors":[ + {"shape":"CNAMEAlreadyExists"}, + {"shape":"DistributionAlreadyExists"}, + {"shape":"InvalidOrigin"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"AccessDenied"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"InvalidViewerCertificate"}, + {"shape":"InvalidMinimumProtocolVersion"}, + {"shape":"MissingBody"}, + {"shape":"TooManyDistributionCNAMEs"}, + {"shape":"TooManyDistributions"}, + {"shape":"InvalidDefaultRootObject"}, + {"shape":"InvalidRelativePath"}, + {"shape":"InvalidErrorCode"}, + {"shape":"InvalidResponseCode"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidRequiredProtocol"}, + {"shape":"NoSuchOrigin"}, + {"shape":"TooManyOrigins"}, + {"shape":"TooManyCacheBehaviors"}, + {"shape":"TooManyCookieNamesInWhiteList"}, + {"shape":"InvalidForwardCookies"}, + {"shape":"TooManyHeadersInForwardedValues"}, + {"shape":"InvalidHeadersForS3Origin"}, + {"shape":"InconsistentQuantities"}, + {"shape":"TooManyCertificates"}, + {"shape":"InvalidLocationCode"}, + {"shape":"InvalidGeoRestrictionParameter"}, + {"shape":"InvalidProtocolSettings"}, + {"shape":"InvalidTTLOrder"}, + {"shape":"InvalidWebACLId"}, + {"shape":"TooManyOriginCustomHeaders"} + ] + }, + "CreateInvalidation":{ + "name":"CreateInvalidation2016_01_28", + "http":{ + "method":"POST", + "requestUri":"/2016-01-28/distribution/{DistributionId}/invalidation", + "responseCode":201 + }, + "input":{"shape":"CreateInvalidationRequest"}, + "output":{"shape":"CreateInvalidationResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"MissingBody"}, + {"shape":"InvalidArgument"}, + {"shape":"NoSuchDistribution"}, + {"shape":"BatchTooLarge"}, + {"shape":"TooManyInvalidationsInProgress"}, + {"shape":"InconsistentQuantities"} + ] + }, + "CreateStreamingDistribution":{ + "name":"CreateStreamingDistribution2016_01_28", + "http":{ + "method":"POST", + "requestUri":"/2016-01-28/streaming-distribution", + "responseCode":201 + }, + "input":{"shape":"CreateStreamingDistributionRequest"}, + "output":{"shape":"CreateStreamingDistributionResult"}, + "errors":[ + {"shape":"CNAMEAlreadyExists"}, + {"shape":"StreamingDistributionAlreadyExists"}, + {"shape":"InvalidOrigin"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"AccessDenied"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"MissingBody"}, + {"shape":"TooManyStreamingDistributionCNAMEs"}, + {"shape":"TooManyStreamingDistributions"}, + {"shape":"InvalidArgument"}, + {"shape":"InconsistentQuantities"} + ] + }, + "DeleteCloudFrontOriginAccessIdentity":{ + "name":"DeleteCloudFrontOriginAccessIdentity2016_01_28", + "http":{ + "method":"DELETE", + "requestUri":"/2016-01-28/origin-access-identity/cloudfront/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteCloudFrontOriginAccessIdentityRequest"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"NoSuchCloudFrontOriginAccessIdentity"}, + {"shape":"PreconditionFailed"}, + {"shape":"CloudFrontOriginAccessIdentityInUse"} + ] + }, + "DeleteDistribution":{ + "name":"DeleteDistribution2016_01_28", + "http":{ + "method":"DELETE", + "requestUri":"/2016-01-28/distribution/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteDistributionRequest"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"DistributionNotDisabled"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"NoSuchDistribution"}, + {"shape":"PreconditionFailed"} + ] + }, + "DeleteStreamingDistribution":{ + "name":"DeleteStreamingDistribution2016_01_28", + "http":{ + "method":"DELETE", + "requestUri":"/2016-01-28/streaming-distribution/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteStreamingDistributionRequest"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"StreamingDistributionNotDisabled"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"NoSuchStreamingDistribution"}, + {"shape":"PreconditionFailed"} + ] + }, + "GetCloudFrontOriginAccessIdentity":{ + "name":"GetCloudFrontOriginAccessIdentity2016_01_28", + "http":{ + "method":"GET", + "requestUri":"/2016-01-28/origin-access-identity/cloudfront/{Id}" + }, + "input":{"shape":"GetCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"GetCloudFrontOriginAccessIdentityResult"}, + "errors":[ + {"shape":"NoSuchCloudFrontOriginAccessIdentity"}, + {"shape":"AccessDenied"} + ] + }, + "GetCloudFrontOriginAccessIdentityConfig":{ + "name":"GetCloudFrontOriginAccessIdentityConfig2016_01_28", + "http":{ + "method":"GET", + "requestUri":"/2016-01-28/origin-access-identity/cloudfront/{Id}/config" + }, + "input":{"shape":"GetCloudFrontOriginAccessIdentityConfigRequest"}, + "output":{"shape":"GetCloudFrontOriginAccessIdentityConfigResult"}, + "errors":[ + {"shape":"NoSuchCloudFrontOriginAccessIdentity"}, + {"shape":"AccessDenied"} + ] + }, + "GetDistribution":{ + "name":"GetDistribution2016_01_28", + "http":{ + "method":"GET", + "requestUri":"/2016-01-28/distribution/{Id}" + }, + "input":{"shape":"GetDistributionRequest"}, + "output":{"shape":"GetDistributionResult"}, + "errors":[ + {"shape":"NoSuchDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "GetDistributionConfig":{ + "name":"GetDistributionConfig2016_01_28", + "http":{ + "method":"GET", + "requestUri":"/2016-01-28/distribution/{Id}/config" + }, + "input":{"shape":"GetDistributionConfigRequest"}, + "output":{"shape":"GetDistributionConfigResult"}, + "errors":[ + {"shape":"NoSuchDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "GetInvalidation":{ + "name":"GetInvalidation2016_01_28", + "http":{ + "method":"GET", + "requestUri":"/2016-01-28/distribution/{DistributionId}/invalidation/{Id}" + }, + "input":{"shape":"GetInvalidationRequest"}, + "output":{"shape":"GetInvalidationResult"}, + "errors":[ + {"shape":"NoSuchInvalidation"}, + {"shape":"NoSuchDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "GetStreamingDistribution":{ + "name":"GetStreamingDistribution2016_01_28", + "http":{ + "method":"GET", + "requestUri":"/2016-01-28/streaming-distribution/{Id}" + }, + "input":{"shape":"GetStreamingDistributionRequest"}, + "output":{"shape":"GetStreamingDistributionResult"}, + "errors":[ + {"shape":"NoSuchStreamingDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "GetStreamingDistributionConfig":{ + "name":"GetStreamingDistributionConfig2016_01_28", + "http":{ + "method":"GET", + "requestUri":"/2016-01-28/streaming-distribution/{Id}/config" + }, + "input":{"shape":"GetStreamingDistributionConfigRequest"}, + "output":{"shape":"GetStreamingDistributionConfigResult"}, + "errors":[ + {"shape":"NoSuchStreamingDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "ListCloudFrontOriginAccessIdentities":{ + "name":"ListCloudFrontOriginAccessIdentities2016_01_28", + "http":{ + "method":"GET", + "requestUri":"/2016-01-28/origin-access-identity/cloudfront" + }, + "input":{"shape":"ListCloudFrontOriginAccessIdentitiesRequest"}, + "output":{"shape":"ListCloudFrontOriginAccessIdentitiesResult"}, + "errors":[ + {"shape":"InvalidArgument"} + ] + }, + "ListDistributions":{ + "name":"ListDistributions2016_01_28", + "http":{ + "method":"GET", + "requestUri":"/2016-01-28/distribution" + }, + "input":{"shape":"ListDistributionsRequest"}, + "output":{"shape":"ListDistributionsResult"}, + "errors":[ + {"shape":"InvalidArgument"} + ] + }, + "ListDistributionsByWebACLId":{ + "name":"ListDistributionsByWebACLId2016_01_28", + "http":{ + "method":"GET", + "requestUri":"/2016-01-28/distributionsByWebACLId/{WebACLId}" + }, + "input":{"shape":"ListDistributionsByWebACLIdRequest"}, + "output":{"shape":"ListDistributionsByWebACLIdResult"}, + "errors":[ + {"shape":"InvalidArgument"}, + {"shape":"InvalidWebACLId"} + ] + }, + "ListInvalidations":{ + "name":"ListInvalidations2016_01_28", + "http":{ + "method":"GET", + "requestUri":"/2016-01-28/distribution/{DistributionId}/invalidation" + }, + "input":{"shape":"ListInvalidationsRequest"}, + "output":{"shape":"ListInvalidationsResult"}, + "errors":[ + {"shape":"InvalidArgument"}, + {"shape":"NoSuchDistribution"}, + {"shape":"AccessDenied"} + ] + }, + "ListStreamingDistributions":{ + "name":"ListStreamingDistributions2016_01_28", + "http":{ + "method":"GET", + "requestUri":"/2016-01-28/streaming-distribution" + }, + "input":{"shape":"ListStreamingDistributionsRequest"}, + "output":{"shape":"ListStreamingDistributionsResult"}, + "errors":[ + {"shape":"InvalidArgument"} + ] + }, + "UpdateCloudFrontOriginAccessIdentity":{ + "name":"UpdateCloudFrontOriginAccessIdentity2016_01_28", + "http":{ + "method":"PUT", + "requestUri":"/2016-01-28/origin-access-identity/cloudfront/{Id}/config" + }, + "input":{"shape":"UpdateCloudFrontOriginAccessIdentityRequest"}, + "output":{"shape":"UpdateCloudFrontOriginAccessIdentityResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"IllegalUpdate"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"MissingBody"}, + {"shape":"NoSuchCloudFrontOriginAccessIdentity"}, + {"shape":"PreconditionFailed"}, + {"shape":"InvalidArgument"}, + {"shape":"InconsistentQuantities"} + ] + }, + "UpdateDistribution":{ + "name":"UpdateDistribution2016_01_28", + "http":{ + "method":"PUT", + "requestUri":"/2016-01-28/distribution/{Id}/config" + }, + "input":{"shape":"UpdateDistributionRequest"}, + "output":{"shape":"UpdateDistributionResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"CNAMEAlreadyExists"}, + {"shape":"IllegalUpdate"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"MissingBody"}, + {"shape":"NoSuchDistribution"}, + {"shape":"PreconditionFailed"}, + {"shape":"TooManyDistributionCNAMEs"}, + {"shape":"InvalidDefaultRootObject"}, + {"shape":"InvalidRelativePath"}, + {"shape":"InvalidErrorCode"}, + {"shape":"InvalidResponseCode"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"InvalidViewerCertificate"}, + {"shape":"InvalidMinimumProtocolVersion"}, + {"shape":"InvalidRequiredProtocol"}, + {"shape":"NoSuchOrigin"}, + {"shape":"TooManyOrigins"}, + {"shape":"TooManyCacheBehaviors"}, + {"shape":"TooManyCookieNamesInWhiteList"}, + {"shape":"InvalidForwardCookies"}, + {"shape":"TooManyHeadersInForwardedValues"}, + {"shape":"InvalidHeadersForS3Origin"}, + {"shape":"InconsistentQuantities"}, + {"shape":"TooManyCertificates"}, + {"shape":"InvalidLocationCode"}, + {"shape":"InvalidGeoRestrictionParameter"}, + {"shape":"InvalidTTLOrder"}, + {"shape":"InvalidWebACLId"}, + {"shape":"TooManyOriginCustomHeaders"} + ] + }, + "UpdateStreamingDistribution":{ + "name":"UpdateStreamingDistribution2016_01_28", + "http":{ + "method":"PUT", + "requestUri":"/2016-01-28/streaming-distribution/{Id}/config" + }, + "input":{"shape":"UpdateStreamingDistributionRequest"}, + "output":{"shape":"UpdateStreamingDistributionResult"}, + "errors":[ + {"shape":"AccessDenied"}, + {"shape":"CNAMEAlreadyExists"}, + {"shape":"IllegalUpdate"}, + {"shape":"InvalidIfMatchVersion"}, + {"shape":"MissingBody"}, + {"shape":"NoSuchStreamingDistribution"}, + {"shape":"PreconditionFailed"}, + {"shape":"TooManyStreamingDistributionCNAMEs"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidOriginAccessIdentity"}, + {"shape":"TooManyTrustedSigners"}, + {"shape":"TrustedSignerDoesNotExist"}, + {"shape":"InconsistentQuantities"} + ] + } + }, + "shapes":{ + "AccessDenied":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":403}, + "exception":true + }, + "ActiveTrustedSigners":{ + "type":"structure", + "required":[ + "Enabled", + "Quantity" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"SignerList"} + } + }, + "AliasList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"CNAME" + } + }, + "Aliases":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"AliasList"} + } + }, + "AllowedMethods":{ + "type":"structure", + "required":[ + "Quantity", + "Items" + ], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"MethodsList"}, + "CachedMethods":{"shape":"CachedMethods"} + } + }, + "AwsAccountNumberList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"AwsAccountNumber" + } + }, + "BatchTooLarge":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":413}, + "exception":true + }, + "CNAMEAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CacheBehavior":{ + "type":"structure", + "required":[ + "PathPattern", + "TargetOriginId", + "ForwardedValues", + "TrustedSigners", + "ViewerProtocolPolicy", + "MinTTL" + ], + "members":{ + "PathPattern":{"shape":"string"}, + "TargetOriginId":{"shape":"string"}, + "ForwardedValues":{"shape":"ForwardedValues"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "ViewerProtocolPolicy":{"shape":"ViewerProtocolPolicy"}, + "MinTTL":{"shape":"long"}, + "AllowedMethods":{"shape":"AllowedMethods"}, + "SmoothStreaming":{"shape":"boolean"}, + "DefaultTTL":{"shape":"long"}, + "MaxTTL":{"shape":"long"}, + "Compress":{"shape":"boolean"} + } + }, + "CacheBehaviorList":{ + "type":"list", + "member":{ + "shape":"CacheBehavior", + "locationName":"CacheBehavior" + } + }, + "CacheBehaviors":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CacheBehaviorList"} + } + }, + "CachedMethods":{ + "type":"structure", + "required":[ + "Quantity", + "Items" + ], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"MethodsList"} + } + }, + "CertificateSource":{ + "type":"string", + "enum":[ + "cloudfront", + "iam", + "acm" + ] + }, + "CloudFrontOriginAccessIdentity":{ + "type":"structure", + "required":[ + "Id", + "S3CanonicalUserId" + ], + "members":{ + "Id":{"shape":"string"}, + "S3CanonicalUserId":{"shape":"string"}, + "CloudFrontOriginAccessIdentityConfig":{"shape":"CloudFrontOriginAccessIdentityConfig"} + } + }, + "CloudFrontOriginAccessIdentityAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CloudFrontOriginAccessIdentityConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "Comment" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "Comment":{"shape":"string"} + } + }, + "CloudFrontOriginAccessIdentityInUse":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CloudFrontOriginAccessIdentityList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CloudFrontOriginAccessIdentitySummaryList"} + } + }, + "CloudFrontOriginAccessIdentitySummary":{ + "type":"structure", + "required":[ + "Id", + "S3CanonicalUserId", + "Comment" + ], + "members":{ + "Id":{"shape":"string"}, + "S3CanonicalUserId":{"shape":"string"}, + "Comment":{"shape":"string"} + } + }, + "CloudFrontOriginAccessIdentitySummaryList":{ + "type":"list", + "member":{ + "shape":"CloudFrontOriginAccessIdentitySummary", + "locationName":"CloudFrontOriginAccessIdentitySummary" + } + }, + "CookieNameList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Name" + } + }, + "CookieNames":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CookieNameList"} + } + }, + "CookiePreference":{ + "type":"structure", + "required":["Forward"], + "members":{ + "Forward":{"shape":"ItemSelection"}, + "WhitelistedNames":{"shape":"CookieNames"} + } + }, + "CreateCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":["CloudFrontOriginAccessIdentityConfig"], + "members":{ + "CloudFrontOriginAccessIdentityConfig":{ + "shape":"CloudFrontOriginAccessIdentityConfig", + "locationName":"CloudFrontOriginAccessIdentityConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2016-01-28/"} + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "CreateCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "CreateDistributionRequest":{ + "type":"structure", + "required":["DistributionConfig"], + "members":{ + "DistributionConfig":{ + "shape":"DistributionConfig", + "locationName":"DistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2016-01-28/"} + } + }, + "payload":"DistributionConfig" + }, + "CreateDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "CreateInvalidationRequest":{ + "type":"structure", + "required":[ + "DistributionId", + "InvalidationBatch" + ], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "InvalidationBatch":{ + "shape":"InvalidationBatch", + "locationName":"InvalidationBatch", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2016-01-28/"} + } + }, + "payload":"InvalidationBatch" + }, + "CreateInvalidationResult":{ + "type":"structure", + "members":{ + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "Invalidation":{"shape":"Invalidation"} + }, + "payload":"Invalidation" + }, + "CreateStreamingDistributionRequest":{ + "type":"structure", + "required":["StreamingDistributionConfig"], + "members":{ + "StreamingDistributionConfig":{ + "shape":"StreamingDistributionConfig", + "locationName":"StreamingDistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2016-01-28/"} + } + }, + "payload":"StreamingDistributionConfig" + }, + "CreateStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "Location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "CustomErrorResponse":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"integer"}, + "ResponsePagePath":{"shape":"string"}, + "ResponseCode":{"shape":"string"}, + "ErrorCachingMinTTL":{"shape":"long"} + } + }, + "CustomErrorResponseList":{ + "type":"list", + "member":{ + "shape":"CustomErrorResponse", + "locationName":"CustomErrorResponse" + } + }, + "CustomErrorResponses":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"CustomErrorResponseList"} + } + }, + "CustomHeaders":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"OriginCustomHeadersList"} + } + }, + "CustomOriginConfig":{ + "type":"structure", + "required":[ + "HTTPPort", + "HTTPSPort", + "OriginProtocolPolicy" + ], + "members":{ + "HTTPPort":{"shape":"integer"}, + "HTTPSPort":{"shape":"integer"}, + "OriginProtocolPolicy":{"shape":"OriginProtocolPolicy"}, + "OriginSslProtocols":{"shape":"OriginSslProtocols"} + } + }, + "DefaultCacheBehavior":{ + "type":"structure", + "required":[ + "TargetOriginId", + "ForwardedValues", + "TrustedSigners", + "ViewerProtocolPolicy", + "MinTTL" + ], + "members":{ + "TargetOriginId":{"shape":"string"}, + "ForwardedValues":{"shape":"ForwardedValues"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "ViewerProtocolPolicy":{"shape":"ViewerProtocolPolicy"}, + "MinTTL":{"shape":"long"}, + "AllowedMethods":{"shape":"AllowedMethods"}, + "SmoothStreaming":{"shape":"boolean"}, + "DefaultTTL":{"shape":"long"}, + "MaxTTL":{"shape":"long"}, + "Compress":{"shape":"boolean"} + } + }, + "DeleteCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + } + }, + "DeleteDistributionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + } + }, + "DeleteStreamingDistributionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + } + }, + "Distribution":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "InProgressInvalidationBatches", + "DomainName", + "ActiveTrustedSigners", + "DistributionConfig" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "InProgressInvalidationBatches":{"shape":"integer"}, + "DomainName":{"shape":"string"}, + "ActiveTrustedSigners":{"shape":"ActiveTrustedSigners"}, + "DistributionConfig":{"shape":"DistributionConfig"} + } + }, + "DistributionAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "DistributionConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "Origins", + "DefaultCacheBehavior", + "Comment", + "Enabled" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "Aliases":{"shape":"Aliases"}, + "DefaultRootObject":{"shape":"string"}, + "Origins":{"shape":"Origins"}, + "DefaultCacheBehavior":{"shape":"DefaultCacheBehavior"}, + "CacheBehaviors":{"shape":"CacheBehaviors"}, + "CustomErrorResponses":{"shape":"CustomErrorResponses"}, + "Comment":{"shape":"string"}, + "Logging":{"shape":"LoggingConfig"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"}, + "ViewerCertificate":{"shape":"ViewerCertificate"}, + "Restrictions":{"shape":"Restrictions"}, + "WebACLId":{"shape":"string"} + } + }, + "DistributionList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"DistributionSummaryList"} + } + }, + "DistributionNotDisabled":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "DistributionSummary":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "DomainName", + "Aliases", + "Origins", + "DefaultCacheBehavior", + "CacheBehaviors", + "CustomErrorResponses", + "Comment", + "PriceClass", + "Enabled", + "ViewerCertificate", + "Restrictions", + "WebACLId" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "Aliases":{"shape":"Aliases"}, + "Origins":{"shape":"Origins"}, + "DefaultCacheBehavior":{"shape":"DefaultCacheBehavior"}, + "CacheBehaviors":{"shape":"CacheBehaviors"}, + "CustomErrorResponses":{"shape":"CustomErrorResponses"}, + "Comment":{"shape":"string"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"}, + "ViewerCertificate":{"shape":"ViewerCertificate"}, + "Restrictions":{"shape":"Restrictions"}, + "WebACLId":{"shape":"string"} + } + }, + "DistributionSummaryList":{ + "type":"list", + "member":{ + "shape":"DistributionSummary", + "locationName":"DistributionSummary" + } + }, + "ForwardedValues":{ + "type":"structure", + "required":[ + "QueryString", + "Cookies" + ], + "members":{ + "QueryString":{"shape":"boolean"}, + "Cookies":{"shape":"CookiePreference"}, + "Headers":{"shape":"Headers"} + } + }, + "GeoRestriction":{ + "type":"structure", + "required":[ + "RestrictionType", + "Quantity" + ], + "members":{ + "RestrictionType":{"shape":"GeoRestrictionType"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"LocationList"} + } + }, + "GeoRestrictionType":{ + "type":"string", + "enum":[ + "blacklist", + "whitelist", + "none" + ] + }, + "GetCloudFrontOriginAccessIdentityConfigRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetCloudFrontOriginAccessIdentityConfigResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentityConfig":{"shape":"CloudFrontOriginAccessIdentityConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "GetCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "GetDistributionConfigRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetDistributionConfigResult":{ + "type":"structure", + "members":{ + "DistributionConfig":{"shape":"DistributionConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"DistributionConfig" + }, + "GetDistributionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "GetInvalidationRequest":{ + "type":"structure", + "required":[ + "DistributionId", + "Id" + ], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetInvalidationResult":{ + "type":"structure", + "members":{ + "Invalidation":{"shape":"Invalidation"} + }, + "payload":"Invalidation" + }, + "GetStreamingDistributionConfigRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetStreamingDistributionConfigResult":{ + "type":"structure", + "members":{ + "StreamingDistributionConfig":{"shape":"StreamingDistributionConfig"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistributionConfig" + }, + "GetStreamingDistributionRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "HeaderList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Name" + } + }, + "Headers":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"HeaderList"} + } + }, + "IllegalUpdate":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InconsistentQuantities":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidArgument":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidDefaultRootObject":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidErrorCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidForwardCookies":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidGeoRestrictionParameter":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidHeadersForS3Origin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidIfMatchVersion":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidLocationCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidMinimumProtocolVersion":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidOrigin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidOriginAccessIdentity":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidProtocolSettings":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRelativePath":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRequiredProtocol":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidResponseCode":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidTTLOrder":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidViewerCertificate":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidWebACLId":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "Invalidation":{ + "type":"structure", + "required":[ + "Id", + "Status", + "CreateTime", + "InvalidationBatch" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "CreateTime":{"shape":"timestamp"}, + "InvalidationBatch":{"shape":"InvalidationBatch"} + } + }, + "InvalidationBatch":{ + "type":"structure", + "required":[ + "Paths", + "CallerReference" + ], + "members":{ + "Paths":{"shape":"Paths"}, + "CallerReference":{"shape":"string"} + } + }, + "InvalidationList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"InvalidationSummaryList"} + } + }, + "InvalidationSummary":{ + "type":"structure", + "required":[ + "Id", + "CreateTime", + "Status" + ], + "members":{ + "Id":{"shape":"string"}, + "CreateTime":{"shape":"timestamp"}, + "Status":{"shape":"string"} + } + }, + "InvalidationSummaryList":{ + "type":"list", + "member":{ + "shape":"InvalidationSummary", + "locationName":"InvalidationSummary" + } + }, + "ItemSelection":{ + "type":"string", + "enum":[ + "none", + "whitelist", + "all" + ] + }, + "KeyPairIdList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"KeyPairId" + } + }, + "KeyPairIds":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"KeyPairIdList"} + } + }, + "ListCloudFrontOriginAccessIdentitiesRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListCloudFrontOriginAccessIdentitiesResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentityList":{"shape":"CloudFrontOriginAccessIdentityList"} + }, + "payload":"CloudFrontOriginAccessIdentityList" + }, + "ListDistributionsByWebACLIdRequest":{ + "type":"structure", + "required":["WebACLId"], + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + }, + "WebACLId":{ + "shape":"string", + "location":"uri", + "locationName":"WebACLId" + } + } + }, + "ListDistributionsByWebACLIdResult":{ + "type":"structure", + "members":{ + "DistributionList":{"shape":"DistributionList"} + }, + "payload":"DistributionList" + }, + "ListDistributionsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListDistributionsResult":{ + "type":"structure", + "members":{ + "DistributionList":{"shape":"DistributionList"} + }, + "payload":"DistributionList" + }, + "ListInvalidationsRequest":{ + "type":"structure", + "required":["DistributionId"], + "members":{ + "DistributionId":{ + "shape":"string", + "location":"uri", + "locationName":"DistributionId" + }, + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListInvalidationsResult":{ + "type":"structure", + "members":{ + "InvalidationList":{"shape":"InvalidationList"} + }, + "payload":"InvalidationList" + }, + "ListStreamingDistributionsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListStreamingDistributionsResult":{ + "type":"structure", + "members":{ + "StreamingDistributionList":{"shape":"StreamingDistributionList"} + }, + "payload":"StreamingDistributionList" + }, + "LocationList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Location" + } + }, + "LoggingConfig":{ + "type":"structure", + "required":[ + "Enabled", + "IncludeCookies", + "Bucket", + "Prefix" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "IncludeCookies":{"shape":"boolean"}, + "Bucket":{"shape":"string"}, + "Prefix":{"shape":"string"} + } + }, + "Method":{ + "type":"string", + "enum":[ + "GET", + "HEAD", + "POST", + "PUT", + "PATCH", + "OPTIONS", + "DELETE" + ] + }, + "MethodsList":{ + "type":"list", + "member":{ + "shape":"Method", + "locationName":"Method" + } + }, + "MinimumProtocolVersion":{ + "type":"string", + "enum":[ + "SSLv3", + "TLSv1" + ] + }, + "MissingBody":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "NoSuchCloudFrontOriginAccessIdentity":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchDistribution":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchInvalidation":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchOrigin":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchStreamingDistribution":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "Origin":{ + "type":"structure", + "required":[ + "Id", + "DomainName" + ], + "members":{ + "Id":{"shape":"string"}, + "DomainName":{"shape":"string"}, + "OriginPath":{"shape":"string"}, + "CustomHeaders":{"shape":"CustomHeaders"}, + "S3OriginConfig":{"shape":"S3OriginConfig"}, + "CustomOriginConfig":{"shape":"CustomOriginConfig"} + } + }, + "OriginCustomHeader":{ + "type":"structure", + "required":[ + "HeaderName", + "HeaderValue" + ], + "members":{ + "HeaderName":{"shape":"string"}, + "HeaderValue":{"shape":"string"} + } + }, + "OriginCustomHeadersList":{ + "type":"list", + "member":{ + "shape":"OriginCustomHeader", + "locationName":"OriginCustomHeader" + } + }, + "OriginList":{ + "type":"list", + "member":{ + "shape":"Origin", + "locationName":"Origin" + }, + "min":1 + }, + "OriginProtocolPolicy":{ + "type":"string", + "enum":[ + "http-only", + "match-viewer", + "https-only" + ] + }, + "OriginSslProtocols":{ + "type":"structure", + "required":[ + "Quantity", + "Items" + ], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"SslProtocolsList"} + } + }, + "Origins":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"OriginList"} + } + }, + "PathList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"Path" + } + }, + "Paths":{ + "type":"structure", + "required":["Quantity"], + "members":{ + "Quantity":{"shape":"integer"}, + "Items":{"shape":"PathList"} + } + }, + "PreconditionFailed":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":412}, + "exception":true + }, + "PriceClass":{ + "type":"string", + "enum":[ + "PriceClass_100", + "PriceClass_200", + "PriceClass_All" + ] + }, + "Restrictions":{ + "type":"structure", + "required":["GeoRestriction"], + "members":{ + "GeoRestriction":{"shape":"GeoRestriction"} + } + }, + "S3Origin":{ + "type":"structure", + "required":[ + "DomainName", + "OriginAccessIdentity" + ], + "members":{ + "DomainName":{"shape":"string"}, + "OriginAccessIdentity":{"shape":"string"} + } + }, + "S3OriginConfig":{ + "type":"structure", + "required":["OriginAccessIdentity"], + "members":{ + "OriginAccessIdentity":{"shape":"string"} + } + }, + "SSLSupportMethod":{ + "type":"string", + "enum":[ + "sni-only", + "vip" + ] + }, + "Signer":{ + "type":"structure", + "members":{ + "AwsAccountNumber":{"shape":"string"}, + "KeyPairIds":{"shape":"KeyPairIds"} + } + }, + "SignerList":{ + "type":"list", + "member":{ + "shape":"Signer", + "locationName":"Signer" + } + }, + "SslProtocol":{ + "type":"string", + "enum":[ + "SSLv3", + "TLSv1", + "TLSv1.1", + "TLSv1.2" + ] + }, + "SslProtocolsList":{ + "type":"list", + "member":{ + "shape":"SslProtocol", + "locationName":"SslProtocol" + } + }, + "StreamingDistribution":{ + "type":"structure", + "required":[ + "Id", + "Status", + "DomainName", + "ActiveTrustedSigners", + "StreamingDistributionConfig" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "ActiveTrustedSigners":{"shape":"ActiveTrustedSigners"}, + "StreamingDistributionConfig":{"shape":"StreamingDistributionConfig"} + } + }, + "StreamingDistributionAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "StreamingDistributionConfig":{ + "type":"structure", + "required":[ + "CallerReference", + "S3Origin", + "Comment", + "TrustedSigners", + "Enabled" + ], + "members":{ + "CallerReference":{"shape":"string"}, + "S3Origin":{"shape":"S3Origin"}, + "Aliases":{"shape":"Aliases"}, + "Comment":{"shape":"string"}, + "Logging":{"shape":"StreamingLoggingConfig"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"} + } + }, + "StreamingDistributionList":{ + "type":"structure", + "required":[ + "Marker", + "MaxItems", + "IsTruncated", + "Quantity" + ], + "members":{ + "Marker":{"shape":"string"}, + "NextMarker":{"shape":"string"}, + "MaxItems":{"shape":"integer"}, + "IsTruncated":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"StreamingDistributionSummaryList"} + } + }, + "StreamingDistributionNotDisabled":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "StreamingDistributionSummary":{ + "type":"structure", + "required":[ + "Id", + "Status", + "LastModifiedTime", + "DomainName", + "S3Origin", + "Aliases", + "TrustedSigners", + "Comment", + "PriceClass", + "Enabled" + ], + "members":{ + "Id":{"shape":"string"}, + "Status":{"shape":"string"}, + "LastModifiedTime":{"shape":"timestamp"}, + "DomainName":{"shape":"string"}, + "S3Origin":{"shape":"S3Origin"}, + "Aliases":{"shape":"Aliases"}, + "TrustedSigners":{"shape":"TrustedSigners"}, + "Comment":{"shape":"string"}, + "PriceClass":{"shape":"PriceClass"}, + "Enabled":{"shape":"boolean"} + } + }, + "StreamingDistributionSummaryList":{ + "type":"list", + "member":{ + "shape":"StreamingDistributionSummary", + "locationName":"StreamingDistributionSummary" + } + }, + "StreamingLoggingConfig":{ + "type":"structure", + "required":[ + "Enabled", + "Bucket", + "Prefix" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Bucket":{"shape":"string"}, + "Prefix":{"shape":"string"} + } + }, + "TooManyCacheBehaviors":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCertificates":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCloudFrontOriginAccessIdentities":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyCookieNamesInWhiteList":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyDistributionCNAMEs":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyDistributions":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyHeadersInForwardedValues":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyInvalidationsInProgress":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyOriginCustomHeaders":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyOrigins":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyStreamingDistributionCNAMEs":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyStreamingDistributions":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyTrustedSigners":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TrustedSignerDoesNotExist":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TrustedSigners":{ + "type":"structure", + "required":[ + "Enabled", + "Quantity" + ], + "members":{ + "Enabled":{"shape":"boolean"}, + "Quantity":{"shape":"integer"}, + "Items":{"shape":"AwsAccountNumberList"} + } + }, + "UpdateCloudFrontOriginAccessIdentityRequest":{ + "type":"structure", + "required":[ + "CloudFrontOriginAccessIdentityConfig", + "Id" + ], + "members":{ + "CloudFrontOriginAccessIdentityConfig":{ + "shape":"CloudFrontOriginAccessIdentityConfig", + "locationName":"CloudFrontOriginAccessIdentityConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2016-01-28/"} + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"CloudFrontOriginAccessIdentityConfig" + }, + "UpdateCloudFrontOriginAccessIdentityResult":{ + "type":"structure", + "members":{ + "CloudFrontOriginAccessIdentity":{"shape":"CloudFrontOriginAccessIdentity"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"CloudFrontOriginAccessIdentity" + }, + "UpdateDistributionRequest":{ + "type":"structure", + "required":[ + "DistributionConfig", + "Id" + ], + "members":{ + "DistributionConfig":{ + "shape":"DistributionConfig", + "locationName":"DistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2016-01-28/"} + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"DistributionConfig" + }, + "UpdateDistributionResult":{ + "type":"structure", + "members":{ + "Distribution":{"shape":"Distribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"Distribution" + }, + "UpdateStreamingDistributionRequest":{ + "type":"structure", + "required":[ + "StreamingDistributionConfig", + "Id" + ], + "members":{ + "StreamingDistributionConfig":{ + "shape":"StreamingDistributionConfig", + "locationName":"StreamingDistributionConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2016-01-28/"} + }, + "Id":{ + "shape":"string", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"StreamingDistributionConfig" + }, + "UpdateStreamingDistributionResult":{ + "type":"structure", + "members":{ + "StreamingDistribution":{"shape":"StreamingDistribution"}, + "ETag":{ + "shape":"string", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"StreamingDistribution" + }, + "ViewerCertificate":{ + "type":"structure", + "members":{ + "Certificate":{"shape":"string"}, + "CertificateSource":{"shape":"CertificateSource"}, + "SSLSupportMethod":{"shape":"SSLSupportMethod"}, + "MinimumProtocolVersion":{"shape":"MinimumProtocolVersion"}, + "IAMCertificateId":{ + "shape":"string", + "deprecated":true + }, + "CloudFrontDefaultCertificate":{ + "shape":"boolean", + "deprecated":true + } + } + }, + "ViewerProtocolPolicy":{ + "type":"string", + "enum":[ + "allow-all", + "https-only", + "redirect-to-https" + ] + }, + "boolean":{"type":"boolean"}, + "integer":{"type":"integer"}, + "long":{"type":"long"}, + "string":{"type":"string"}, + "timestamp":{"type":"timestamp"} + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1219 @@ +{ + "version": "2.0", + "service": null, + "operations": { + "CreateCloudFrontOriginAccessIdentity": "Create a new origin access identity.", + "CreateDistribution": "Create a new distribution.", + "CreateInvalidation": "Create a new invalidation.", + "CreateStreamingDistribution": "Create a new streaming distribution.", + "DeleteCloudFrontOriginAccessIdentity": "Delete an origin access identity.", + "DeleteDistribution": "Delete a distribution.", + "DeleteStreamingDistribution": "Delete a streaming distribution.", + "GetCloudFrontOriginAccessIdentity": "Get the information about an origin access identity.", + "GetCloudFrontOriginAccessIdentityConfig": "Get the configuration information about an origin access identity.", + "GetDistribution": "Get the information about a distribution.", + "GetDistributionConfig": "Get the configuration information about a distribution.", + "GetInvalidation": "Get the information about an invalidation.", + "GetStreamingDistribution": "Get the information about a streaming distribution.", + "GetStreamingDistributionConfig": "Get the configuration information about a streaming distribution.", + "ListCloudFrontOriginAccessIdentities": "List origin access identities.", + "ListDistributions": "List distributions.", + "ListDistributionsByWebACLId": "List the distributions that are associated with a specified AWS WAF web ACL.", + "ListInvalidations": "List invalidation batches.", + "ListStreamingDistributions": "List streaming distributions.", + "UpdateCloudFrontOriginAccessIdentity": "Update an origin access identity.", + "UpdateDistribution": "Update a distribution.", + "UpdateStreamingDistribution": "Update a streaming distribution." + }, + "shapes": { + "AccessDenied": { + "base": "Access denied.", + "refs": { + } + }, + "ActiveTrustedSigners": { + "base": "A complex type that lists the AWS accounts, if any, that you included in the TrustedSigners complex type for the default cache behavior or for any of the other cache behaviors for this distribution. These are accounts that you want to allow to create signed URLs for private content.", + "refs": { + "Distribution$ActiveTrustedSigners": "CloudFront automatically adds this element to the response only if you've set up the distribution to serve private content with signed URLs. The element lists the key pair IDs that CloudFront is aware of for each trusted signer. The Signer child element lists the AWS account number of the trusted signer (or an empty Self element if the signer is you). The Signer element also includes the IDs of any active key pairs associated with the trusted signer's AWS account. If no KeyPairId element appears for a Signer, that signer can't create working signed URLs.", + "StreamingDistribution$ActiveTrustedSigners": "CloudFront automatically adds this element to the response only if you've set up the distribution to serve private content with signed URLs. The element lists the key pair IDs that CloudFront is aware of for each trusted signer. The Signer child element lists the AWS account number of the trusted signer (or an empty Self element if the signer is you). The Signer element also includes the IDs of any active key pairs associated with the trusted signer's AWS account. If no KeyPairId element appears for a Signer, that signer can't create working signed URLs." + } + }, + "AliasList": { + "base": null, + "refs": { + "Aliases$Items": "Optional: A complex type that contains CNAME elements, if any, for this distribution. If Quantity is 0, you can omit Items." + } + }, + "Aliases": { + "base": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "refs": { + "DistributionConfig$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "DistributionSummary$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "StreamingDistributionConfig$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this streaming distribution.", + "StreamingDistributionSummary$Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this streaming distribution." + } + }, + "AllowedMethods": { + "base": "A complex type that controls which HTTP methods CloudFront processes and forwards to your Amazon S3 bucket or your custom origin. There are three choices: - CloudFront forwards only GET and HEAD requests. - CloudFront forwards only GET, HEAD and OPTIONS requests. - CloudFront forwards GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests. If you pick the third choice, you may need to restrict access to your Amazon S3 bucket or to your custom origin so users can't perform operations that you don't want them to. For example, you may not want users to have permission to delete objects from your origin.", + "refs": { + "CacheBehavior$AllowedMethods": null, + "DefaultCacheBehavior$AllowedMethods": null + } + }, + "AwsAccountNumberList": { + "base": null, + "refs": { + "TrustedSigners$Items": "Optional: A complex type that contains trusted signers for this cache behavior. If Quantity is 0, you can omit Items." + } + }, + "BatchTooLarge": { + "base": null, + "refs": { + } + }, + "CNAMEAlreadyExists": { + "base": null, + "refs": { + } + }, + "CacheBehavior": { + "base": "A complex type that describes how CloudFront processes requests. You can create up to 10 cache behaviors.You must create at least as many cache behaviors (including the default cache behavior) as you have origins if you want CloudFront to distribute objects from all of the origins. Each cache behavior specifies the one origin from which you want CloudFront to get objects. If you have two origins and only the default cache behavior, the default cache behavior will cause CloudFront to get objects from one of the origins, but the other origin will never be used. If you don't want to specify any cache behaviors, include only an empty CacheBehaviors element. Don't include an empty CacheBehavior element, or CloudFront returns a MalformedXML error. To delete all cache behaviors in an existing distribution, update the distribution configuration and include only an empty CacheBehaviors element. To add, change, or remove one or more cache behaviors, update the distribution configuration and specify all of the cache behaviors that you want to include in the updated distribution.", + "refs": { + "CacheBehaviorList$member": null + } + }, + "CacheBehaviorList": { + "base": null, + "refs": { + "CacheBehaviors$Items": "Optional: A complex type that contains cache behaviors for this distribution. If Quantity is 0, you can omit Items." + } + }, + "CacheBehaviors": { + "base": "A complex type that contains zero or more CacheBehavior elements.", + "refs": { + "DistributionConfig$CacheBehaviors": "A complex type that contains zero or more CacheBehavior elements.", + "DistributionSummary$CacheBehaviors": "A complex type that contains zero or more CacheBehavior elements." + } + }, + "CachedMethods": { + "base": "A complex type that controls whether CloudFront caches the response to requests using the specified HTTP methods. There are two choices: - CloudFront caches responses to GET and HEAD requests. - CloudFront caches responses to GET, HEAD, and OPTIONS requests. If you pick the second choice for your S3 Origin, you may need to forward Access-Control-Request-Method, Access-Control-Request-Headers and Origin headers for the responses to be cached correctly.", + "refs": { + "AllowedMethods$CachedMethods": null + } + }, + "CertificateSource": { + "base": null, + "refs": { + "ViewerCertificate$CertificateSource": "If you want viewers to use HTTPS to request your objects and you're using the CloudFront domain name of your distribution in your object URLs (for example, https://d111111abcdef8.cloudfront.net/logo.jpg), set to \"cloudfront\". If you want viewers to use HTTPS to request your objects and you're using an alternate domain name in your object URLs (for example, https://example.com/logo.jpg), you can use your own IAM or ACM certificate. To use an ACM certificate, set to \"acm\" and update the Certificate to the ACM certificate ARN. To use an IAM certificate, set to \"iam\" and update the Certificate to the IAM certificate identifier." + } + }, + "CloudFrontOriginAccessIdentity": { + "base": "CloudFront origin access identity.", + "refs": { + "CreateCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information.", + "GetCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information.", + "UpdateCloudFrontOriginAccessIdentityResult$CloudFrontOriginAccessIdentity": "The origin access identity's information." + } + }, + "CloudFrontOriginAccessIdentityAlreadyExists": { + "base": "If the CallerReference is a value you already sent in a previous request to create an identity but the content of the CloudFrontOriginAccessIdentityConfig is different from the original request, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists error.", + "refs": { + } + }, + "CloudFrontOriginAccessIdentityConfig": { + "base": "Origin access identity configuration.", + "refs": { + "CloudFrontOriginAccessIdentity$CloudFrontOriginAccessIdentityConfig": "The current configuration information for the identity.", + "CreateCloudFrontOriginAccessIdentityRequest$CloudFrontOriginAccessIdentityConfig": "The origin access identity's configuration information.", + "GetCloudFrontOriginAccessIdentityConfigResult$CloudFrontOriginAccessIdentityConfig": "The origin access identity's configuration information.", + "UpdateCloudFrontOriginAccessIdentityRequest$CloudFrontOriginAccessIdentityConfig": "The identity's configuration information." + } + }, + "CloudFrontOriginAccessIdentityInUse": { + "base": null, + "refs": { + } + }, + "CloudFrontOriginAccessIdentityList": { + "base": "The CloudFrontOriginAccessIdentityList type.", + "refs": { + "ListCloudFrontOriginAccessIdentitiesResult$CloudFrontOriginAccessIdentityList": "The CloudFrontOriginAccessIdentityList type." + } + }, + "CloudFrontOriginAccessIdentitySummary": { + "base": "Summary of the information about a CloudFront origin access identity.", + "refs": { + "CloudFrontOriginAccessIdentitySummaryList$member": null + } + }, + "CloudFrontOriginAccessIdentitySummaryList": { + "base": null, + "refs": { + "CloudFrontOriginAccessIdentityList$Items": "A complex type that contains one CloudFrontOriginAccessIdentitySummary element for each origin access identity that was created by the current AWS account." + } + }, + "CookieNameList": { + "base": null, + "refs": { + "CookieNames$Items": "Optional: A complex type that contains whitelisted cookies for this cache behavior. If Quantity is 0, you can omit Items." + } + }, + "CookieNames": { + "base": "A complex type that specifies the whitelisted cookies, if any, that you want CloudFront to forward to your origin that is associated with this cache behavior.", + "refs": { + "CookiePreference$WhitelistedNames": "A complex type that specifies the whitelisted cookies, if any, that you want CloudFront to forward to your origin that is associated with this cache behavior." + } + }, + "CookiePreference": { + "base": "A complex type that specifies the cookie preferences associated with this cache behavior.", + "refs": { + "ForwardedValues$Cookies": "A complex type that specifies how CloudFront handles cookies." + } + }, + "CreateCloudFrontOriginAccessIdentityRequest": { + "base": "The request to create a new origin access identity.", + "refs": { + } + }, + "CreateCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateDistributionRequest": { + "base": "The request to create a new distribution.", + "refs": { + } + }, + "CreateDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateInvalidationRequest": { + "base": "The request to create an invalidation.", + "refs": { + } + }, + "CreateInvalidationResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CreateStreamingDistributionRequest": { + "base": "The request to create a new streaming distribution.", + "refs": { + } + }, + "CreateStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "CustomErrorResponse": { + "base": "A complex type that describes how you'd prefer CloudFront to respond to requests that result in either a 4xx or 5xx response. You can control whether a custom error page should be displayed, what the desired response code should be for this error page and how long should the error response be cached by CloudFront. If you don't want to specify any custom error responses, include only an empty CustomErrorResponses element. To delete all custom error responses in an existing distribution, update the distribution configuration and include only an empty CustomErrorResponses element. To add, change, or remove one or more custom error responses, update the distribution configuration and specify all of the custom error responses that you want to include in the updated distribution.", + "refs": { + "CustomErrorResponseList$member": null + } + }, + "CustomErrorResponseList": { + "base": null, + "refs": { + "CustomErrorResponses$Items": "Optional: A complex type that contains custom error responses for this distribution. If Quantity is 0, you can omit Items." + } + }, + "CustomErrorResponses": { + "base": "A complex type that contains zero or more CustomErrorResponse elements.", + "refs": { + "DistributionConfig$CustomErrorResponses": "A complex type that contains zero or more CustomErrorResponse elements.", + "DistributionSummary$CustomErrorResponses": "A complex type that contains zero or more CustomErrorResponses elements." + } + }, + "CustomHeaders": { + "base": "A complex type that contains the list of Custom Headers for each origin.", + "refs": { + "Origin$CustomHeaders": "A complex type that contains information about the custom headers associated with this Origin." + } + }, + "CustomOriginConfig": { + "base": "A customer origin.", + "refs": { + "Origin$CustomOriginConfig": "A complex type that contains information about a custom origin. If the origin is an Amazon S3 bucket, use the S3OriginConfig element instead." + } + }, + "DefaultCacheBehavior": { + "base": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior.", + "refs": { + "DistributionConfig$DefaultCacheBehavior": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior.", + "DistributionSummary$DefaultCacheBehavior": "A complex type that describes the default cache behavior if you do not specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements.You must create exactly one default cache behavior." + } + }, + "DeleteCloudFrontOriginAccessIdentityRequest": { + "base": "The request to delete a origin access identity.", + "refs": { + } + }, + "DeleteDistributionRequest": { + "base": "The request to delete a distribution.", + "refs": { + } + }, + "DeleteStreamingDistributionRequest": { + "base": "The request to delete a streaming distribution.", + "refs": { + } + }, + "Distribution": { + "base": "A distribution.", + "refs": { + "CreateDistributionResult$Distribution": "The distribution's information.", + "GetDistributionResult$Distribution": "The distribution's information.", + "UpdateDistributionResult$Distribution": "The distribution's information." + } + }, + "DistributionAlreadyExists": { + "base": "The caller reference you attempted to create the distribution with is associated with another distribution.", + "refs": { + } + }, + "DistributionConfig": { + "base": "A distribution Configuration.", + "refs": { + "CreateDistributionRequest$DistributionConfig": "The distribution's configuration information.", + "Distribution$DistributionConfig": "The current configuration information for the distribution.", + "GetDistributionConfigResult$DistributionConfig": "The distribution's configuration information.", + "UpdateDistributionRequest$DistributionConfig": "The distribution's configuration information." + } + }, + "DistributionList": { + "base": "A distribution list.", + "refs": { + "ListDistributionsByWebACLIdResult$DistributionList": "The DistributionList type.", + "ListDistributionsResult$DistributionList": "The DistributionList type." + } + }, + "DistributionNotDisabled": { + "base": null, + "refs": { + } + }, + "DistributionSummary": { + "base": "A summary of the information for an Amazon CloudFront distribution.", + "refs": { + "DistributionSummaryList$member": null + } + }, + "DistributionSummaryList": { + "base": null, + "refs": { + "DistributionList$Items": "A complex type that contains one DistributionSummary element for each distribution that was created by the current AWS account." + } + }, + "ForwardedValues": { + "base": "A complex type that specifies how CloudFront handles query strings, cookies and headers.", + "refs": { + "CacheBehavior$ForwardedValues": "A complex type that specifies how CloudFront handles query strings, cookies and headers.", + "DefaultCacheBehavior$ForwardedValues": "A complex type that specifies how CloudFront handles query strings, cookies and headers." + } + }, + "GeoRestriction": { + "base": "A complex type that controls the countries in which your content is distributed. For more information about geo restriction, go to Customizing Error Responses in the Amazon CloudFront Developer Guide. CloudFront determines the location of your users using MaxMind GeoIP databases. For information about the accuracy of these databases, see How accurate are your GeoIP databases? on the MaxMind website.", + "refs": { + "Restrictions$GeoRestriction": null + } + }, + "GeoRestrictionType": { + "base": null, + "refs": { + "GeoRestriction$RestrictionType": "The method that you want to use to restrict distribution of your content by country: - none: No geo restriction is enabled, meaning access to content is not restricted by client geo location. - blacklist: The Location elements specify the countries in which you do not want CloudFront to distribute your content. - whitelist: The Location elements specify the countries in which you want CloudFront to distribute your content." + } + }, + "GetCloudFrontOriginAccessIdentityConfigRequest": { + "base": "The request to get an origin access identity's configuration.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityRequest": { + "base": "The request to get an origin access identity's information.", + "refs": { + } + }, + "GetCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetDistributionConfigRequest": { + "base": "The request to get a distribution configuration.", + "refs": { + } + }, + "GetDistributionConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetDistributionRequest": { + "base": "The request to get a distribution's information.", + "refs": { + } + }, + "GetDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetInvalidationRequest": { + "base": "The request to get an invalidation's information.", + "refs": { + } + }, + "GetInvalidationResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetStreamingDistributionConfigRequest": { + "base": "To request to get a streaming distribution configuration.", + "refs": { + } + }, + "GetStreamingDistributionConfigResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "GetStreamingDistributionRequest": { + "base": "The request to get a streaming distribution's information.", + "refs": { + } + }, + "GetStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "HeaderList": { + "base": null, + "refs": { + "Headers$Items": "Optional: A complex type that contains a Name element for each header that you want CloudFront to forward to the origin and to vary on for this cache behavior. If Quantity is 0, omit Items." + } + }, + "Headers": { + "base": "A complex type that specifies the headers that you want CloudFront to forward to the origin for this cache behavior. For the headers that you specify, CloudFront also caches separate versions of a given object based on the header values in viewer requests; this is known as varying on headers. For example, suppose viewer requests for logo.jpg contain a custom Product header that has a value of either Acme or Apex, and you configure CloudFront to vary on the Product header. CloudFront forwards the Product header to the origin and caches the response from the origin once for each header value.", + "refs": { + "ForwardedValues$Headers": "A complex type that specifies the Headers, if any, that you want CloudFront to vary upon for this cache behavior." + } + }, + "IllegalUpdate": { + "base": "Origin and CallerReference cannot be updated.", + "refs": { + } + }, + "InconsistentQuantities": { + "base": "The value of Quantity and the size of Items do not match.", + "refs": { + } + }, + "InvalidArgument": { + "base": "The argument is invalid.", + "refs": { + } + }, + "InvalidDefaultRootObject": { + "base": "The default root object file name is too big or contains an invalid character.", + "refs": { + } + }, + "InvalidErrorCode": { + "base": null, + "refs": { + } + }, + "InvalidForwardCookies": { + "base": "Your request contains forward cookies option which doesn't match with the expectation for the whitelisted list of cookie names. Either list of cookie names has been specified when not allowed or list of cookie names is missing when expected.", + "refs": { + } + }, + "InvalidGeoRestrictionParameter": { + "base": null, + "refs": { + } + }, + "InvalidHeadersForS3Origin": { + "base": null, + "refs": { + } + }, + "InvalidIfMatchVersion": { + "base": "The If-Match version is missing or not valid for the distribution.", + "refs": { + } + }, + "InvalidLocationCode": { + "base": null, + "refs": { + } + }, + "InvalidMinimumProtocolVersion": { + "base": null, + "refs": { + } + }, + "InvalidOrigin": { + "base": "The Amazon S3 origin server specified does not refer to a valid Amazon S3 bucket.", + "refs": { + } + }, + "InvalidOriginAccessIdentity": { + "base": "The origin access identity is not valid or doesn't exist.", + "refs": { + } + }, + "InvalidProtocolSettings": { + "base": "You cannot specify SSLv3 as the minimum protocol version if you only want to support only clients that Support Server Name Indication (SNI).", + "refs": { + } + }, + "InvalidRelativePath": { + "base": "The relative path is too big, is not URL-encoded, or does not begin with a slash (/).", + "refs": { + } + }, + "InvalidRequiredProtocol": { + "base": "This operation requires the HTTPS protocol. Ensure that you specify the HTTPS protocol in your request, or omit the RequiredProtocols element from your distribution configuration.", + "refs": { + } + }, + "InvalidResponseCode": { + "base": null, + "refs": { + } + }, + "InvalidTTLOrder": { + "base": null, + "refs": { + } + }, + "InvalidViewerCertificate": { + "base": null, + "refs": { + } + }, + "InvalidWebACLId": { + "base": null, + "refs": { + } + }, + "Invalidation": { + "base": "An invalidation.", + "refs": { + "CreateInvalidationResult$Invalidation": "The invalidation's information.", + "GetInvalidationResult$Invalidation": "The invalidation's information." + } + }, + "InvalidationBatch": { + "base": "An invalidation batch.", + "refs": { + "CreateInvalidationRequest$InvalidationBatch": "The batch information for the invalidation.", + "Invalidation$InvalidationBatch": "The current invalidation information for the batch request." + } + }, + "InvalidationList": { + "base": "An invalidation list.", + "refs": { + "ListInvalidationsResult$InvalidationList": "Information about invalidation batches." + } + }, + "InvalidationSummary": { + "base": "Summary of an invalidation request.", + "refs": { + "InvalidationSummaryList$member": null + } + }, + "InvalidationSummaryList": { + "base": null, + "refs": { + "InvalidationList$Items": "A complex type that contains one InvalidationSummary element for each invalidation batch that was created by the current AWS account." + } + }, + "ItemSelection": { + "base": null, + "refs": { + "CookiePreference$Forward": "Use this element to specify whether you want CloudFront to forward cookies to the origin that is associated with this cache behavior. You can specify all, none or whitelist. If you choose All, CloudFront forwards all cookies regardless of how many your application uses." + } + }, + "KeyPairIdList": { + "base": null, + "refs": { + "KeyPairIds$Items": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber." + } + }, + "KeyPairIds": { + "base": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber.", + "refs": { + "Signer$KeyPairIds": "A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber." + } + }, + "ListCloudFrontOriginAccessIdentitiesRequest": { + "base": "The request to list origin access identities.", + "refs": { + } + }, + "ListCloudFrontOriginAccessIdentitiesResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListDistributionsByWebACLIdRequest": { + "base": "The request to list distributions that are associated with a specified AWS WAF web ACL.", + "refs": { + } + }, + "ListDistributionsByWebACLIdResult": { + "base": "The response to a request to list the distributions that are associated with a specified AWS WAF web ACL.", + "refs": { + } + }, + "ListDistributionsRequest": { + "base": "The request to list your distributions.", + "refs": { + } + }, + "ListDistributionsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListInvalidationsRequest": { + "base": "The request to list invalidations.", + "refs": { + } + }, + "ListInvalidationsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ListStreamingDistributionsRequest": { + "base": "The request to list your streaming distributions.", + "refs": { + } + }, + "ListStreamingDistributionsResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "LocationList": { + "base": null, + "refs": { + "GeoRestriction$Items": "A complex type that contains a Location element for each country in which you want CloudFront either to distribute your content (whitelist) or not distribute your content (blacklist). The Location element is a two-letter, uppercase country code for a country that you want to include in your blacklist or whitelist. Include one Location element for each country. CloudFront and MaxMind both use ISO 3166 country codes. For the current list of countries and the corresponding codes, see ISO 3166-1-alpha-2 code on the International Organization for Standardization website. You can also refer to the country list in the CloudFront console, which includes both country names and codes." + } + }, + "LoggingConfig": { + "base": "A complex type that controls whether access logs are written for the distribution.", + "refs": { + "DistributionConfig$Logging": "A complex type that controls whether access logs are written for the distribution." + } + }, + "Method": { + "base": null, + "refs": { + "MethodsList$member": null + } + }, + "MethodsList": { + "base": null, + "refs": { + "AllowedMethods$Items": "A complex type that contains the HTTP methods that you want CloudFront to process and forward to your origin.", + "CachedMethods$Items": "A complex type that contains the HTTP methods that you want CloudFront to cache responses to." + } + }, + "MinimumProtocolVersion": { + "base": null, + "refs": { + "ViewerCertificate$MinimumProtocolVersion": "Specify the minimum version of the SSL protocol that you want CloudFront to use, SSLv3 or TLSv1, for HTTPS connections. CloudFront will serve your objects only to browsers or devices that support at least the SSL version that you specify. The TLSv1 protocol is more secure, so we recommend that you specify SSLv3 only if your users are using browsers or devices that don't support TLSv1. If you're using a custom certificate (if you specify a value for IAMCertificateId) and if you're using dedicated IP (if you specify vip for SSLSupportMethod), you can choose SSLv3 or TLSv1 as the MinimumProtocolVersion. If you're using a custom certificate (if you specify a value for IAMCertificateId) and if you're using SNI (if you specify sni-only for SSLSupportMethod), you must specify TLSv1 for MinimumProtocolVersion." + } + }, + "MissingBody": { + "base": "This operation requires a body. Ensure that the body is present and the Content-Type header is set.", + "refs": { + } + }, + "NoSuchCloudFrontOriginAccessIdentity": { + "base": "The specified origin access identity does not exist.", + "refs": { + } + }, + "NoSuchDistribution": { + "base": "The specified distribution does not exist.", + "refs": { + } + }, + "NoSuchInvalidation": { + "base": "The specified invalidation does not exist.", + "refs": { + } + }, + "NoSuchOrigin": { + "base": "No origin exists with the specified Origin Id.", + "refs": { + } + }, + "NoSuchStreamingDistribution": { + "base": "The specified streaming distribution does not exist.", + "refs": { + } + }, + "Origin": { + "base": "A complex type that describes the Amazon S3 bucket or the HTTP server (for example, a web server) from which CloudFront gets your files.You must create at least one origin.", + "refs": { + "OriginList$member": null + } + }, + "OriginCustomHeader": { + "base": "A complex type that contains information related to a Header", + "refs": { + "OriginCustomHeadersList$member": null + } + }, + "OriginCustomHeadersList": { + "base": null, + "refs": { + "CustomHeaders$Items": "A complex type that contains the custom headers for this Origin." + } + }, + "OriginList": { + "base": null, + "refs": { + "Origins$Items": "A complex type that contains origins for this distribution." + } + }, + "OriginProtocolPolicy": { + "base": null, + "refs": { + "CustomOriginConfig$OriginProtocolPolicy": "The origin protocol policy to apply to your origin." + } + }, + "OriginSslProtocols": { + "base": "A complex type that contains the list of SSL/TLS protocols that you want CloudFront to use when communicating with your origin over HTTPS.", + "refs": { + "CustomOriginConfig$OriginSslProtocols": "The SSL/TLS protocols that you want CloudFront to use when communicating with your origin over HTTPS." + } + }, + "Origins": { + "base": "A complex type that contains information about origins for this distribution.", + "refs": { + "DistributionConfig$Origins": "A complex type that contains information about origins for this distribution.", + "DistributionSummary$Origins": "A complex type that contains information about origins for this distribution." + } + }, + "PathList": { + "base": null, + "refs": { + "Paths$Items": "A complex type that contains a list of the objects that you want to invalidate." + } + }, + "Paths": { + "base": "A complex type that contains information about the objects that you want to invalidate.", + "refs": { + "InvalidationBatch$Paths": "The path of the object to invalidate. The path is relative to the distribution and must begin with a slash (/). You must enclose each invalidation object with the Path element tags. If the path includes non-ASCII characters or unsafe characters as defined in RFC 1783 (http://www.ietf.org/rfc/rfc1738.txt), URL encode those characters. Do not URL encode any other characters in the path, or CloudFront will not invalidate the old version of the updated object." + } + }, + "PreconditionFailed": { + "base": "The precondition given in one or more of the request-header fields evaluated to false.", + "refs": { + } + }, + "PriceClass": { + "base": null, + "refs": { + "DistributionConfig$PriceClass": "A complex type that contains information about price class for this distribution.", + "DistributionSummary$PriceClass": null, + "StreamingDistributionConfig$PriceClass": "A complex type that contains information about price class for this streaming distribution.", + "StreamingDistributionSummary$PriceClass": null + } + }, + "Restrictions": { + "base": "A complex type that identifies ways in which you want to restrict distribution of your content.", + "refs": { + "DistributionConfig$Restrictions": null, + "DistributionSummary$Restrictions": null + } + }, + "S3Origin": { + "base": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution.", + "refs": { + "StreamingDistributionConfig$S3Origin": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution.", + "StreamingDistributionSummary$S3Origin": "A complex type that contains information about the Amazon S3 bucket from which you want CloudFront to get your media files for distribution." + } + }, + "S3OriginConfig": { + "base": "A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead.", + "refs": { + "Origin$S3OriginConfig": "A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead." + } + }, + "SSLSupportMethod": { + "base": null, + "refs": { + "ViewerCertificate$SSLSupportMethod": "If you specify a value for IAMCertificateId, you must also specify how you want CloudFront to serve HTTPS requests. Valid values are vip and sni-only. If you specify vip, CloudFront uses dedicated IP addresses for your content and can respond to HTTPS requests from any viewer. However, you must request permission to use this feature, and you incur additional monthly charges. If you specify sni-only, CloudFront can only respond to HTTPS requests from viewers that support Server Name Indication (SNI). All modern browsers support SNI, but some browsers still in use don't support SNI. Do not specify a value for SSLSupportMethod if you specified true for CloudFrontDefaultCertificate." + } + }, + "Signer": { + "base": "A complex type that lists the AWS accounts that were included in the TrustedSigners complex type, as well as their active CloudFront key pair IDs, if any.", + "refs": { + "SignerList$member": null + } + }, + "SignerList": { + "base": null, + "refs": { + "ActiveTrustedSigners$Items": "A complex type that contains one Signer complex type for each unique trusted signer that is specified in the TrustedSigners complex type, including trusted signers in the default cache behavior and in all of the other cache behaviors." + } + }, + "SslProtocol": { + "base": null, + "refs": { + "SslProtocolsList$member": null + } + }, + "SslProtocolsList": { + "base": null, + "refs": { + "OriginSslProtocols$Items": "A complex type that contains one SslProtocol element for each SSL/TLS protocol that you want to allow CloudFront to use when establishing an HTTPS connection with this origin." + } + }, + "StreamingDistribution": { + "base": "A streaming distribution.", + "refs": { + "CreateStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information.", + "GetStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information.", + "UpdateStreamingDistributionResult$StreamingDistribution": "The streaming distribution's information." + } + }, + "StreamingDistributionAlreadyExists": { + "base": null, + "refs": { + } + }, + "StreamingDistributionConfig": { + "base": "The configuration for the streaming distribution.", + "refs": { + "CreateStreamingDistributionRequest$StreamingDistributionConfig": "The streaming distribution's configuration information.", + "GetStreamingDistributionConfigResult$StreamingDistributionConfig": "The streaming distribution's configuration information.", + "StreamingDistribution$StreamingDistributionConfig": "The current configuration information for the streaming distribution.", + "UpdateStreamingDistributionRequest$StreamingDistributionConfig": "The streaming distribution's configuration information." + } + }, + "StreamingDistributionList": { + "base": "A streaming distribution list.", + "refs": { + "ListStreamingDistributionsResult$StreamingDistributionList": "The StreamingDistributionList type." + } + }, + "StreamingDistributionNotDisabled": { + "base": null, + "refs": { + } + }, + "StreamingDistributionSummary": { + "base": "A summary of the information for an Amazon CloudFront streaming distribution.", + "refs": { + "StreamingDistributionSummaryList$member": null + } + }, + "StreamingDistributionSummaryList": { + "base": null, + "refs": { + "StreamingDistributionList$Items": "A complex type that contains one StreamingDistributionSummary element for each distribution that was created by the current AWS account." + } + }, + "StreamingLoggingConfig": { + "base": "A complex type that controls whether access logs are written for this streaming distribution.", + "refs": { + "StreamingDistributionConfig$Logging": "A complex type that controls whether access logs are written for the streaming distribution." + } + }, + "TooManyCacheBehaviors": { + "base": "You cannot create anymore cache behaviors for the distribution.", + "refs": { + } + }, + "TooManyCertificates": { + "base": "You cannot create anymore custom ssl certificates.", + "refs": { + } + }, + "TooManyCloudFrontOriginAccessIdentities": { + "base": "Processing your request would cause you to exceed the maximum number of origin access identities allowed.", + "refs": { + } + }, + "TooManyCookieNamesInWhiteList": { + "base": "Your request contains more cookie names in the whitelist than are allowed per cache behavior.", + "refs": { + } + }, + "TooManyDistributionCNAMEs": { + "base": "Your request contains more CNAMEs than are allowed per distribution.", + "refs": { + } + }, + "TooManyDistributions": { + "base": "Processing your request would cause you to exceed the maximum number of distributions allowed.", + "refs": { + } + }, + "TooManyHeadersInForwardedValues": { + "base": null, + "refs": { + } + }, + "TooManyInvalidationsInProgress": { + "base": "You have exceeded the maximum number of allowable InProgress invalidation batch requests, or invalidation objects.", + "refs": { + } + }, + "TooManyOriginCustomHeaders": { + "base": null, + "refs": { + } + }, + "TooManyOrigins": { + "base": "You cannot create anymore origins for the distribution.", + "refs": { + } + }, + "TooManyStreamingDistributionCNAMEs": { + "base": null, + "refs": { + } + }, + "TooManyStreamingDistributions": { + "base": "Processing your request would cause you to exceed the maximum number of streaming distributions allowed.", + "refs": { + } + }, + "TooManyTrustedSigners": { + "base": "Your request contains more trusted signers than are allowed per distribution.", + "refs": { + } + }, + "TrustedSignerDoesNotExist": { + "base": "One or more of your trusted signers do not exist.", + "refs": { + } + }, + "TrustedSigners": { + "base": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "refs": { + "CacheBehavior$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "DefaultCacheBehavior$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "StreamingDistributionConfig$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.", + "StreamingDistributionSummary$TrustedSigners": "A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content. If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, go to Using a Signed URL to Serve Private Content in the Amazon CloudFront Developer Guide. If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution." + } + }, + "UpdateCloudFrontOriginAccessIdentityRequest": { + "base": "The request to update an origin access identity.", + "refs": { + } + }, + "UpdateCloudFrontOriginAccessIdentityResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "UpdateDistributionRequest": { + "base": "The request to update a distribution.", + "refs": { + } + }, + "UpdateDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "UpdateStreamingDistributionRequest": { + "base": "The request to update a streaming distribution.", + "refs": { + } + }, + "UpdateStreamingDistributionResult": { + "base": "The returned result of the corresponding request.", + "refs": { + } + }, + "ViewerCertificate": { + "base": "A complex type that contains information about viewer certificates for this distribution.", + "refs": { + "DistributionConfig$ViewerCertificate": null, + "DistributionSummary$ViewerCertificate": null + } + }, + "ViewerProtocolPolicy": { + "base": null, + "refs": { + "CacheBehavior$ViewerProtocolPolicy": "Use this element to specify the protocol that users can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. If you want CloudFront to allow end users to use any available protocol, specify allow-all. If you want CloudFront to require HTTPS, specify https. If you want CloudFront to respond to an HTTP request with an HTTP status code of 301 (Moved Permanently) and the HTTPS URL, specify redirect-to-https. The viewer then resubmits the request using the HTTPS URL.", + "DefaultCacheBehavior$ViewerProtocolPolicy": "Use this element to specify the protocol that users can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. If you want CloudFront to allow end users to use any available protocol, specify allow-all. If you want CloudFront to require HTTPS, specify https. If you want CloudFront to respond to an HTTP request with an HTTP status code of 301 (Moved Permanently) and the HTTPS URL, specify redirect-to-https. The viewer then resubmits the request using the HTTPS URL." + } + }, + "boolean": { + "base": null, + "refs": { + "ActiveTrustedSigners$Enabled": "Each active trusted signer.", + "CacheBehavior$SmoothStreaming": "Indicates whether you want to distribute media files in Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "CacheBehavior$Compress": "Whether you want CloudFront to automatically compress content for web requests that include Accept-Encoding: gzip in the request header. If so, specify true; if not, specify false. CloudFront compresses files larger than 1000 bytes and less than 1 megabyte for both Amazon S3 and custom origins. When a CloudFront edge location is unusually busy, some files might not be compressed. The value of the Content-Type header must be on the list of file types that CloudFront will compress. For the current list, see Serving Compressed Content in the Amazon CloudFront Developer Guide. If you configure CloudFront to compress content, CloudFront removes the ETag response header from the objects that it compresses. The ETag header indicates that the version in a CloudFront edge cache is identical to the version on the origin server, but after compression the two versions are no longer identical. As a result, for compressed objects, CloudFront can't use the ETag header to determine whether an expired object in the CloudFront edge cache is still the latest version.", + "CloudFrontOriginAccessIdentityList$IsTruncated": "A flag that indicates whether more origin access identities remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more items in the list.", + "DefaultCacheBehavior$SmoothStreaming": "Indicates whether you want to distribute media files in Microsoft Smooth Streaming format using the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "DefaultCacheBehavior$Compress": "Whether you want CloudFront to automatically compress content for web requests that include Accept-Encoding: gzip in the request header. If so, specify true; if not, specify false. CloudFront compresses files larger than 1000 bytes and less than 1 megabyte for both Amazon S3 and custom origins. When a CloudFront edge location is unusually busy, some files might not be compressed. The value of the Content-Type header must be on the list of file types that CloudFront will compress. For the current list, see Serving Compressed Content in the Amazon CloudFront Developer Guide. If you configure CloudFront to compress content, CloudFront removes the ETag response header from the objects that it compresses. The ETag header indicates that the version in a CloudFront edge cache is identical to the version on the origin server, but after compression the two versions are no longer identical. As a result, for compressed objects, CloudFront can't use the ETag header to determine whether an expired object in the CloudFront edge cache is still the latest version.", + "DistributionConfig$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "DistributionList$IsTruncated": "A flag that indicates whether more distributions remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more distributions in the list.", + "DistributionSummary$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "ForwardedValues$QueryString": "Indicates whether you want CloudFront to forward query strings to the origin that is associated with this cache behavior. If so, specify true; if not, specify false.", + "InvalidationList$IsTruncated": "A flag that indicates whether more invalidation batch requests remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more invalidation batches in the list.", + "LoggingConfig$Enabled": "Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you do not want to enable logging when you create a distribution or if you want to disable logging for an existing distribution, specify false for Enabled, and specify empty Bucket and Prefix elements. If you specify false for Enabled but you specify values for Bucket, prefix and IncludeCookies, the values are automatically deleted.", + "LoggingConfig$IncludeCookies": "Specifies whether you want CloudFront to include cookies in access logs, specify true for IncludeCookies. If you choose to include cookies in logs, CloudFront logs all cookies regardless of how you configure the cache behaviors for this distribution. If you do not want to include cookies when you create a distribution or if you want to disable include cookies for an existing distribution, specify false for IncludeCookies.", + "StreamingDistributionConfig$Enabled": "Whether the streaming distribution is enabled to accept end user requests for content.", + "StreamingDistributionList$IsTruncated": "A flag that indicates whether more streaming distributions remain to be listed. If your results were truncated, you can make a follow-up pagination request using the Marker request parameter to retrieve more distributions in the list.", + "StreamingDistributionSummary$Enabled": "Whether the distribution is enabled to accept end user requests for content.", + "StreamingLoggingConfig$Enabled": "Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you do not want to enable logging when you create a streaming distribution or if you want to disable logging for an existing streaming distribution, specify false for Enabled, and specify empty Bucket and Prefix elements. If you specify false for Enabled but you specify values for Bucket and Prefix, the values are automatically deleted.", + "TrustedSigners$Enabled": "Specifies whether you want to require end users to use signed URLs to access the files specified by PathPattern and TargetOriginId.", + "ViewerCertificate$CloudFrontDefaultCertificate": "Note: this field is deprecated. Please use \"cloudfront\" as CertificateSource and omit specifying a Certificate. If you want viewers to use HTTPS to request your objects and you're using the CloudFront domain name of your distribution in your object URLs (for example, https://d111111abcdef8.cloudfront.net/logo.jpg), set to true. Omit this value if you are setting an IAMCertificateId." + } + }, + "integer": { + "base": null, + "refs": { + "ActiveTrustedSigners$Quantity": "The number of unique trusted signers included in all cache behaviors. For example, if three cache behaviors all list the same three AWS accounts, the value of Quantity for ActiveTrustedSigners will be 3.", + "Aliases$Quantity": "The number of CNAMEs, if any, for this distribution.", + "AllowedMethods$Quantity": "The number of HTTP methods that you want CloudFront to forward to your origin. Valid values are 2 (for GET and HEAD requests), 3 (for GET, HEAD and OPTIONS requests) and 7 (for GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests).", + "CacheBehaviors$Quantity": "The number of cache behaviors for this distribution.", + "CachedMethods$Quantity": "The number of HTTP methods for which you want CloudFront to cache responses. Valid values are 2 (for caching responses to GET and HEAD requests) and 3 (for caching responses to GET, HEAD, and OPTIONS requests).", + "CloudFrontOriginAccessIdentityList$MaxItems": "The value you provided for the MaxItems request parameter.", + "CloudFrontOriginAccessIdentityList$Quantity": "The number of CloudFront origin access identities that were created by the current AWS account.", + "CookieNames$Quantity": "The number of whitelisted cookies for this cache behavior.", + "CustomErrorResponse$ErrorCode": "The 4xx or 5xx HTTP status code that you want to customize. For a list of HTTP status codes that you can customize, see CloudFront documentation.", + "CustomErrorResponses$Quantity": "The number of custom error responses for this distribution.", + "CustomHeaders$Quantity": "The number of custom headers for this origin.", + "CustomOriginConfig$HTTPPort": "The HTTP port the custom origin listens on.", + "CustomOriginConfig$HTTPSPort": "The HTTPS port the custom origin listens on.", + "Distribution$InProgressInvalidationBatches": "The number of invalidation batches currently in progress.", + "DistributionList$MaxItems": "The value you provided for the MaxItems request parameter.", + "DistributionList$Quantity": "The number of distributions that were created by the current AWS account.", + "GeoRestriction$Quantity": "When geo restriction is enabled, this is the number of countries in your whitelist or blacklist. Otherwise, when it is not enabled, Quantity is 0, and you can omit Items.", + "Headers$Quantity": "The number of different headers that you want CloudFront to forward to the origin and to vary on for this cache behavior. The maximum number of headers that you can specify by name is 10. If you want CloudFront to forward all headers to the origin and vary on all of them, specify 1 for Quantity and * for Name. If you don't want CloudFront to forward any additional headers to the origin or to vary on any headers, specify 0 for Quantity and omit Items.", + "InvalidationList$MaxItems": "The value you provided for the MaxItems request parameter.", + "InvalidationList$Quantity": "The number of invalidation batches that were created by the current AWS account.", + "KeyPairIds$Quantity": "The number of active CloudFront key pairs for AwsAccountNumber.", + "OriginSslProtocols$Quantity": "The number of SSL/TLS protocols that you want to allow CloudFront to use when establishing an HTTPS connection with this origin.", + "Origins$Quantity": "The number of origins for this distribution.", + "Paths$Quantity": "The number of objects that you want to invalidate.", + "StreamingDistributionList$MaxItems": "The value you provided for the MaxItems request parameter.", + "StreamingDistributionList$Quantity": "The number of streaming distributions that were created by the current AWS account.", + "TrustedSigners$Quantity": "The number of trusted signers for this cache behavior." + } + }, + "long": { + "base": null, + "refs": { + "CacheBehavior$MinTTL": "The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated.You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CacheBehavior$DefaultTTL": "If you don't configure your origin to add a Cache-Control max-age directive or an Expires header, DefaultTTL is the default amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CacheBehavior$MaxTTL": "The maximum amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin adds HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "CustomErrorResponse$ErrorCachingMinTTL": "The minimum amount of time you want HTTP error codes to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated. You can specify a value from 0 to 31,536,000.", + "DefaultCacheBehavior$MinTTL": "The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated.You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "DefaultCacheBehavior$DefaultTTL": "If you don't configure your origin to add a Cache-Control max-age directive or an Expires header, DefaultTTL is the default amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years).", + "DefaultCacheBehavior$MaxTTL": "The maximum amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin adds HTTP headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 years)." + } + }, + "string": { + "base": null, + "refs": { + "AccessDenied$Message": null, + "AliasList$member": null, + "AwsAccountNumberList$member": null, + "BatchTooLarge$Message": null, + "CNAMEAlreadyExists$Message": null, + "CacheBehavior$PathPattern": "The pattern (for example, images/*.jpg) that specifies which requests you want this cache behavior to apply to. When CloudFront receives an end-user request, the requested path is compared with path patterns in the order in which cache behaviors are listed in the distribution. The path pattern for the default cache behavior is * and cannot be changed. If the request for an object does not match the path pattern for any cache behaviors, CloudFront applies the behavior in the default cache behavior.", + "CacheBehavior$TargetOriginId": "The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior.", + "CloudFrontOriginAccessIdentity$Id": "The ID for the origin access identity. For example: E74FTE3AJFJ256A.", + "CloudFrontOriginAccessIdentity$S3CanonicalUserId": "The Amazon S3 canonical user ID for the origin access identity, which you use when giving the origin access identity read permission to an object in Amazon S3.", + "CloudFrontOriginAccessIdentityAlreadyExists$Message": null, + "CloudFrontOriginAccessIdentityConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the CloudFrontOriginAccessIdentityConfig object), a new origin access identity is created. If the CallerReference is a value you already sent in a previous request to create an identity, and the content of the CloudFrontOriginAccessIdentityConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create an identity but the content of the CloudFrontOriginAccessIdentityConfig is different from the original request, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists error.", + "CloudFrontOriginAccessIdentityConfig$Comment": "Any comments you want to include about the origin access identity.", + "CloudFrontOriginAccessIdentityInUse$Message": null, + "CloudFrontOriginAccessIdentityList$Marker": "The value you provided for the Marker request parameter.", + "CloudFrontOriginAccessIdentityList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your origin access identities where they left off.", + "CloudFrontOriginAccessIdentitySummary$Id": "The ID for the origin access identity. For example: E74FTE3AJFJ256A.", + "CloudFrontOriginAccessIdentitySummary$S3CanonicalUserId": "The Amazon S3 canonical user ID for the origin access identity, which you use when giving the origin access identity read permission to an object in Amazon S3.", + "CloudFrontOriginAccessIdentitySummary$Comment": "The comment for this origin access identity, as originally specified when created.", + "CookieNameList$member": null, + "CreateCloudFrontOriginAccessIdentityResult$Location": "The fully qualified URI of the new origin access identity just created. For example: https://cloudfront.amazonaws.com/2010-11-01/origin-access-identity/cloudfront/E74FTE3AJFJ256A.", + "CreateCloudFrontOriginAccessIdentityResult$ETag": "The current version of the origin access identity created.", + "CreateDistributionResult$Location": "The fully qualified URI of the new distribution resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/distribution/EDFDVBD632BHDS5.", + "CreateDistributionResult$ETag": "The current version of the distribution created.", + "CreateInvalidationRequest$DistributionId": "The distribution's id.", + "CreateInvalidationResult$Location": "The fully qualified URI of the distribution and invalidation batch request, including the Invalidation ID.", + "CreateStreamingDistributionResult$Location": "The fully qualified URI of the new streaming distribution resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/streaming-distribution/EGTXBD79H29TRA8.", + "CreateStreamingDistributionResult$ETag": "The current version of the streaming distribution created.", + "CustomErrorResponse$ResponsePagePath": "The path of the custom error page (for example, /custom_404.html). The path is relative to the distribution and must begin with a slash (/). If the path includes any non-ASCII characters or unsafe characters as defined in RFC 1783 (http://www.ietf.org/rfc/rfc1738.txt), URL encode those characters. Do not URL encode any other characters in the path, or CloudFront will not return the custom error page to the viewer.", + "CustomErrorResponse$ResponseCode": "The HTTP status code that you want CloudFront to return with the custom error page to the viewer. For a list of HTTP status codes that you can replace, see CloudFront Documentation.", + "DefaultCacheBehavior$TargetOriginId": "The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior.", + "DeleteCloudFrontOriginAccessIdentityRequest$Id": "The origin access identity's id.", + "DeleteCloudFrontOriginAccessIdentityRequest$IfMatch": "The value of the ETag header you received from a previous GET or PUT request. For example: E2QWRUHAPOMQZL.", + "DeleteDistributionRequest$Id": "The distribution id.", + "DeleteDistributionRequest$IfMatch": "The value of the ETag header you received when you disabled the distribution. For example: E2QWRUHAPOMQZL.", + "DeleteStreamingDistributionRequest$Id": "The distribution id.", + "DeleteStreamingDistributionRequest$IfMatch": "The value of the ETag header you received when you disabled the streaming distribution. For example: E2QWRUHAPOMQZL.", + "Distribution$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "Distribution$Status": "This response element indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "Distribution$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "DistributionAlreadyExists$Message": null, + "DistributionConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the DistributionConfig object), a new distribution is created. If the CallerReference is a value you already sent in a previous request to create a distribution, and the content of the DistributionConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a distribution but the content of the DistributionConfig is different from the original request, CloudFront returns a DistributionAlreadyExists error.", + "DistributionConfig$DefaultRootObject": "The object that you want CloudFront to return (for example, index.html) when an end user requests the root URL for your distribution (http://www.example.com) instead of an object in your distribution (http://www.example.com/index.html). Specifying a default root object avoids exposing the contents of your distribution. If you don't want to specify a default root object when you create a distribution, include an empty DefaultRootObject element. To delete the default root object from an existing distribution, update the distribution configuration and include an empty DefaultRootObject element. To replace the default root object, update the distribution configuration and specify the new object.", + "DistributionConfig$Comment": "Any comments you want to include about the distribution.", + "DistributionConfig$WebACLId": "(Optional) If you're using AWS WAF to filter CloudFront requests, the Id of the AWS WAF web ACL that is associated with the distribution.", + "DistributionList$Marker": "The value you provided for the Marker request parameter.", + "DistributionList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your distributions where they left off.", + "DistributionNotDisabled$Message": null, + "DistributionSummary$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "DistributionSummary$Status": "This response element indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "DistributionSummary$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "DistributionSummary$Comment": "The comment originally specified when this distribution was created.", + "DistributionSummary$WebACLId": "The Web ACL Id (if any) associated with the distribution.", + "GetCloudFrontOriginAccessIdentityConfigRequest$Id": "The identity's id.", + "GetCloudFrontOriginAccessIdentityConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetCloudFrontOriginAccessIdentityRequest$Id": "The identity's id.", + "GetCloudFrontOriginAccessIdentityResult$ETag": "The current version of the origin access identity's information. For example: E2QWRUHAPOMQZL.", + "GetDistributionConfigRequest$Id": "The distribution's id.", + "GetDistributionConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetDistributionRequest$Id": "The distribution's id.", + "GetDistributionResult$ETag": "The current version of the distribution's information. For example: E2QWRUHAPOMQZL.", + "GetInvalidationRequest$DistributionId": "The distribution's id.", + "GetInvalidationRequest$Id": "The invalidation's id.", + "GetStreamingDistributionConfigRequest$Id": "The streaming distribution's id.", + "GetStreamingDistributionConfigResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "GetStreamingDistributionRequest$Id": "The streaming distribution's id.", + "GetStreamingDistributionResult$ETag": "The current version of the streaming distribution's information. For example: E2QWRUHAPOMQZL.", + "HeaderList$member": null, + "IllegalUpdate$Message": null, + "InconsistentQuantities$Message": null, + "InvalidArgument$Message": null, + "InvalidDefaultRootObject$Message": null, + "InvalidErrorCode$Message": null, + "InvalidForwardCookies$Message": null, + "InvalidGeoRestrictionParameter$Message": null, + "InvalidHeadersForS3Origin$Message": null, + "InvalidIfMatchVersion$Message": null, + "InvalidLocationCode$Message": null, + "InvalidMinimumProtocolVersion$Message": null, + "InvalidOrigin$Message": null, + "InvalidOriginAccessIdentity$Message": null, + "InvalidProtocolSettings$Message": null, + "InvalidRelativePath$Message": null, + "InvalidRequiredProtocol$Message": null, + "InvalidResponseCode$Message": null, + "InvalidTTLOrder$Message": null, + "InvalidViewerCertificate$Message": null, + "InvalidWebACLId$Message": null, + "Invalidation$Id": "The identifier for the invalidation request. For example: IDFDVBD632BHDS5.", + "Invalidation$Status": "The status of the invalidation request. When the invalidation batch is finished, the status is Completed.", + "InvalidationBatch$CallerReference": "A unique name that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the Path object), a new distribution is created. If the CallerReference is a value you already sent in a previous request to create an invalidation batch, and the content of each Path element is identical to the original request, the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a distribution but the content of any Path is different from the original request, CloudFront returns an InvalidationBatchAlreadyExists error.", + "InvalidationList$Marker": "The value you provided for the Marker request parameter.", + "InvalidationList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your invalidation batches where they left off.", + "InvalidationSummary$Id": "The unique ID for an invalidation request.", + "InvalidationSummary$Status": "The status of an invalidation request.", + "KeyPairIdList$member": null, + "ListCloudFrontOriginAccessIdentitiesRequest$Marker": "Use this when paginating results to indicate where to begin in your list of origin access identities. The results include identities in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last identity on that page).", + "ListCloudFrontOriginAccessIdentitiesRequest$MaxItems": "The maximum number of origin access identities you want in the response body.", + "ListDistributionsByWebACLIdRequest$Marker": "Use Marker and MaxItems to control pagination of results. If you have more than MaxItems distributions that satisfy the request, the response includes a NextMarker element. To get the next page of results, submit another request. For the value of Marker, specify the value of NextMarker from the last response. (For the first request, omit Marker.)", + "ListDistributionsByWebACLIdRequest$MaxItems": "The maximum number of distributions that you want CloudFront to return in the response body. The maximum and default values are both 100.", + "ListDistributionsByWebACLIdRequest$WebACLId": "The Id of the AWS WAF web ACL for which you want to list the associated distributions. If you specify \"null\" for the Id, the request returns a list of the distributions that aren't associated with a web ACL.", + "ListDistributionsRequest$Marker": "Use Marker and MaxItems to control pagination of results. If you have more than MaxItems distributions that satisfy the request, the response includes a NextMarker element. To get the next page of results, submit another request. For the value of Marker, specify the value of NextMarker from the last response. (For the first request, omit Marker.)", + "ListDistributionsRequest$MaxItems": "The maximum number of distributions that you want CloudFront to return in the response body. The maximum and default values are both 100.", + "ListInvalidationsRequest$DistributionId": "The distribution's id.", + "ListInvalidationsRequest$Marker": "Use this parameter when paginating results to indicate where to begin in your list of invalidation batches. Because the results are returned in decreasing order from most recent to oldest, the most recent results are on the first page, the second page will contain earlier results, and so on. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response. This value is the same as the ID of the last invalidation batch on that page.", + "ListInvalidationsRequest$MaxItems": "The maximum number of invalidation batches you want in the response body.", + "ListStreamingDistributionsRequest$Marker": "Use this when paginating results to indicate where to begin in your list of streaming distributions. The results include distributions in the list that occur after the marker. To get the next page of results, set the Marker to the value of the NextMarker from the current page's response (which is also the ID of the last distribution on that page).", + "ListStreamingDistributionsRequest$MaxItems": "The maximum number of streaming distributions you want in the response body.", + "LocationList$member": null, + "LoggingConfig$Bucket": "The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com.", + "LoggingConfig$Prefix": "An optional string that you want CloudFront to prefix to the access log filenames for this distribution, for example, myprefix/. If you want to enable logging, but you do not want to specify a prefix, you still must include an empty Prefix element in the Logging element.", + "MissingBody$Message": null, + "NoSuchCloudFrontOriginAccessIdentity$Message": null, + "NoSuchDistribution$Message": null, + "NoSuchInvalidation$Message": null, + "NoSuchOrigin$Message": null, + "NoSuchStreamingDistribution$Message": null, + "Origin$Id": "A unique identifier for the origin. The value of Id must be unique within the distribution. You use the value of Id when you create a cache behavior. The Id identifies the origin that CloudFront routes a request to when the request matches the path pattern for that cache behavior.", + "Origin$DomainName": "Amazon S3 origins: The DNS name of the Amazon S3 bucket from which you want CloudFront to get objects for this origin, for example, myawsbucket.s3.amazonaws.com. Custom origins: The DNS domain name for the HTTP server from which you want CloudFront to get objects for this origin, for example, www.example.com.", + "Origin$OriginPath": "An optional element that causes CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin. When you include the OriginPath element, specify the directory name, beginning with a /. CloudFront appends the directory name to the value of DomainName.", + "OriginCustomHeader$HeaderName": "The header's name.", + "OriginCustomHeader$HeaderValue": "The header's value.", + "PathList$member": null, + "PreconditionFailed$Message": null, + "S3Origin$DomainName": "The DNS name of the S3 origin.", + "S3Origin$OriginAccessIdentity": "Your S3 origin's origin access identity.", + "S3OriginConfig$OriginAccessIdentity": "The CloudFront origin access identity to associate with the origin. Use an origin access identity to configure the origin so that end users can only access objects in an Amazon S3 bucket through CloudFront. If you want end users to be able to access objects using either the CloudFront URL or the Amazon S3 URL, specify an empty OriginAccessIdentity element. To delete the origin access identity from an existing distribution, update the distribution configuration and include an empty OriginAccessIdentity element. To replace the origin access identity, update the distribution configuration and specify the new origin access identity. Use the format origin-access-identity/cloudfront/Id where Id is the value that CloudFront returned in the Id element when you created the origin access identity.", + "Signer$AwsAccountNumber": "Specifies an AWS account that can create signed URLs. Values: self, which indicates that the AWS account that was used to create the distribution can created signed URLs, or an AWS account number. Omit the dashes in the account number.", + "StreamingDistribution$Id": "The identifier for the streaming distribution. For example: EGTXBD79H29TRA8.", + "StreamingDistribution$Status": "The current status of the streaming distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "StreamingDistribution$DomainName": "The domain name corresponding to the streaming distribution. For example: s5c39gqb8ow64r.cloudfront.net.", + "StreamingDistributionAlreadyExists$Message": null, + "StreamingDistributionConfig$CallerReference": "A unique number that ensures the request can't be replayed. If the CallerReference is new (no matter the content of the StreamingDistributionConfig object), a new streaming distribution is created. If the CallerReference is a value you already sent in a previous request to create a streaming distribution, and the content of the StreamingDistributionConfig is identical to the original request (ignoring white space), the response includes the same information returned to the original request. If the CallerReference is a value you already sent in a previous request to create a streaming distribution but the content of the StreamingDistributionConfig is different from the original request, CloudFront returns a DistributionAlreadyExists error.", + "StreamingDistributionConfig$Comment": "Any comments you want to include about the streaming distribution.", + "StreamingDistributionList$Marker": "The value you provided for the Marker request parameter.", + "StreamingDistributionList$NextMarker": "If IsTruncated is true, this element is present and contains the value you can use for the Marker request parameter to continue listing your streaming distributions where they left off.", + "StreamingDistributionNotDisabled$Message": null, + "StreamingDistributionSummary$Id": "The identifier for the distribution. For example: EDFDVBD632BHDS5.", + "StreamingDistributionSummary$Status": "Indicates the current status of the distribution. When the status is Deployed, the distribution's information is fully propagated throughout the Amazon CloudFront system.", + "StreamingDistributionSummary$DomainName": "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net.", + "StreamingDistributionSummary$Comment": "The comment originally specified when this distribution was created.", + "StreamingLoggingConfig$Bucket": "The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com.", + "StreamingLoggingConfig$Prefix": "An optional string that you want CloudFront to prefix to the access log filenames for this streaming distribution, for example, myprefix/. If you want to enable logging, but you do not want to specify a prefix, you still must include an empty Prefix element in the Logging element.", + "TooManyCacheBehaviors$Message": null, + "TooManyCertificates$Message": null, + "TooManyCloudFrontOriginAccessIdentities$Message": null, + "TooManyCookieNamesInWhiteList$Message": null, + "TooManyDistributionCNAMEs$Message": null, + "TooManyDistributions$Message": null, + "TooManyHeadersInForwardedValues$Message": null, + "TooManyInvalidationsInProgress$Message": null, + "TooManyOriginCustomHeaders$Message": null, + "TooManyOrigins$Message": null, + "TooManyStreamingDistributionCNAMEs$Message": null, + "TooManyStreamingDistributions$Message": null, + "TooManyTrustedSigners$Message": null, + "TrustedSignerDoesNotExist$Message": null, + "UpdateCloudFrontOriginAccessIdentityRequest$Id": "The identity's id.", + "UpdateCloudFrontOriginAccessIdentityRequest$IfMatch": "The value of the ETag header you received when retrieving the identity's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateCloudFrontOriginAccessIdentityResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "UpdateDistributionRequest$Id": "The distribution's id.", + "UpdateDistributionRequest$IfMatch": "The value of the ETag header you received when retrieving the distribution's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateDistributionResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "UpdateStreamingDistributionRequest$Id": "The streaming distribution's id.", + "UpdateStreamingDistributionRequest$IfMatch": "The value of the ETag header you received when retrieving the streaming distribution's configuration. For example: E2QWRUHAPOMQZL.", + "UpdateStreamingDistributionResult$ETag": "The current version of the configuration. For example: E2QWRUHAPOMQZL.", + "ViewerCertificate$Certificate": "If you want viewers to use HTTPS to request your objects and you're using an alternate domain name in your object URLs (for example, https://example.com/logo.jpg), you can use your own IAM or ACM certificate. For ACM, set to the ACM certificate ARN. For IAM, set to the IAM certificate identifier.", + "ViewerCertificate$IAMCertificateId": "Note: this field is deprecated. Please use \"iam\" as CertificateSource and specify the IAM certificate Id as the Certificate. If you want viewers to use HTTPS to request your objects and you're using an alternate domain name in your object URLs (for example, https://example.com/logo.jpg), specify the IAM certificate identifier of the custom viewer certificate for this distribution. Specify either this value or CloudFrontDefaultCertificate." + } + }, + "timestamp": { + "base": null, + "refs": { + "Distribution$LastModifiedTime": "The date and time the distribution was last modified.", + "DistributionSummary$LastModifiedTime": "The date and time the distribution was last modified.", + "Invalidation$CreateTime": "The date and time the invalidation request was first made.", + "InvalidationSummary$CreateTime": null, + "StreamingDistribution$LastModifiedTime": "The date and time the distribution was last modified.", + "StreamingDistributionSummary$LastModifiedTime": "The date and time the distribution was last modified." + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,32 @@ +{ + "pagination": { + "ListCloudFrontOriginAccessIdentities": { + "input_token": "Marker", + "output_token": "CloudFrontOriginAccessIdentityList.NextMarker", + "limit_key": "MaxItems", + "more_results": "CloudFrontOriginAccessIdentityList.IsTruncated", + "result_key": "CloudFrontOriginAccessIdentityList.Items" + }, + "ListDistributions": { + "input_token": "Marker", + "output_token": "DistributionList.NextMarker", + "limit_key": "MaxItems", + "more_results": "DistributionList.IsTruncated", + "result_key": "DistributionList.Items" + }, + "ListInvalidations": { + "input_token": "Marker", + "output_token": "InvalidationList.NextMarker", + "limit_key": "MaxItems", + "more_results": "InvalidationList.IsTruncated", + "result_key": "InvalidationList.Items" + }, + "ListStreamingDistributions": { + "input_token": "Marker", + "output_token": "StreamingDistributionList.NextMarker", + "limit_key": "MaxItems", + "more_results": "StreamingDistributionList.IsTruncated", + "result_key": "StreamingDistributionList.Items" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/waiters-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/waiters-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/waiters-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudfront/2016-01-28/waiters-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,47 @@ +{ + "version": 2, + "waiters": { + "DistributionDeployed": { + "delay": 60, + "operation": "GetDistribution", + "maxAttempts": 25, + "description": "Wait until a distribution is deployed.", + "acceptors": [ + { + "expected": "Deployed", + "matcher": "path", + "state": "success", + "argument": "Distribution.Status" + } + ] + }, + "InvalidationCompleted": { + "delay": 20, + "operation": "GetInvalidation", + "maxAttempts": 30, + "description": "Wait until an invalidation has completed.", + "acceptors": [ + { + "expected": "Completed", + "matcher": "path", + "state": "success", + "argument": "Invalidation.Status" + } + ] + }, + "StreamingDistributionDeployed": { + "delay": 60, + "operation": "GetStreamingDistribution", + "maxAttempts": 25, + "description": "Wait until a streaming distribution is deployed.", + "acceptors": [ + { + "expected": "Deployed", + "matcher": "path", + "state": "success", + "argument": "StreamingDistribution.Status" + } + ] + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudhsm/2014-05-30/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudhsm/2014-05-30/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudhsm/2014-05-30/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudhsm/2014-05-30/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,928 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-05-30", + "endpointPrefix":"cloudhsm", + "jsonVersion":"1.1", + "serviceAbbreviation":"CloudHSM", + "serviceFullName":"Amazon CloudHSM", + "signatureVersion":"v4", + "targetPrefix":"CloudHsmFrontendService", + "protocol":"json" + }, + "operations":{ + "CreateHapg":{ + "name":"CreateHapg", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateHapgRequest"}, + "output":{"shape":"CreateHapgResponse"}, + "errors":[ + { + "shape":"CloudHsmServiceException", + "exception":true + }, + { + "shape":"CloudHsmInternalException", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "CreateHsm":{ + "name":"CreateHsm", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{ + "shape":"CreateHsmRequest", + "locationName":"CreateHsmRequest" + }, + "output":{"shape":"CreateHsmResponse"}, + "errors":[ + { + "shape":"CloudHsmServiceException", + "exception":true + }, + { + "shape":"CloudHsmInternalException", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "CreateLunaClient":{ + "name":"CreateLunaClient", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLunaClientRequest"}, + "output":{"shape":"CreateLunaClientResponse"}, + "errors":[ + { + "shape":"CloudHsmServiceException", + "exception":true + }, + { + "shape":"CloudHsmInternalException", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "DeleteHapg":{ + "name":"DeleteHapg", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteHapgRequest"}, + "output":{"shape":"DeleteHapgResponse"}, + "errors":[ + { + "shape":"CloudHsmServiceException", + "exception":true + }, + { + "shape":"CloudHsmInternalException", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "DeleteHsm":{ + "name":"DeleteHsm", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{ + "shape":"DeleteHsmRequest", + "locationName":"DeleteHsmRequest" + }, + "output":{"shape":"DeleteHsmResponse"}, + "errors":[ + { + "shape":"CloudHsmServiceException", + "exception":true + }, + { + "shape":"CloudHsmInternalException", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "DeleteLunaClient":{ + "name":"DeleteLunaClient", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteLunaClientRequest"}, + "output":{"shape":"DeleteLunaClientResponse"}, + "errors":[ + { + "shape":"CloudHsmServiceException", + "exception":true + }, + { + "shape":"CloudHsmInternalException", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "DescribeHapg":{ + "name":"DescribeHapg", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeHapgRequest"}, + "output":{"shape":"DescribeHapgResponse"}, + "errors":[ + { + "shape":"CloudHsmServiceException", + "exception":true + }, + { + "shape":"CloudHsmInternalException", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "DescribeHsm":{ + "name":"DescribeHsm", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeHsmRequest"}, + "output":{"shape":"DescribeHsmResponse"}, + "errors":[ + { + "shape":"CloudHsmServiceException", + "exception":true + }, + { + "shape":"CloudHsmInternalException", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "DescribeLunaClient":{ + "name":"DescribeLunaClient", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLunaClientRequest"}, + "output":{"shape":"DescribeLunaClientResponse"}, + "errors":[ + { + "shape":"CloudHsmServiceException", + "exception":true + }, + { + "shape":"CloudHsmInternalException", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "GetConfig":{ + "name":"GetConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetConfigRequest"}, + "output":{"shape":"GetConfigResponse"}, + "errors":[ + { + "shape":"CloudHsmServiceException", + "exception":true + }, + { + "shape":"CloudHsmInternalException", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "ListAvailableZones":{ + "name":"ListAvailableZones", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAvailableZonesRequest"}, + "output":{"shape":"ListAvailableZonesResponse"}, + "errors":[ + { + "shape":"CloudHsmServiceException", + "exception":true + }, + { + "shape":"CloudHsmInternalException", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "ListHapgs":{ + "name":"ListHapgs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListHapgsRequest"}, + "output":{"shape":"ListHapgsResponse"}, + "errors":[ + { + "shape":"CloudHsmServiceException", + "exception":true + }, + { + "shape":"CloudHsmInternalException", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "ListHsms":{ + "name":"ListHsms", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListHsmsRequest"}, + "output":{"shape":"ListHsmsResponse"}, + "errors":[ + { + "shape":"CloudHsmServiceException", + "exception":true + }, + { + "shape":"CloudHsmInternalException", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "ListLunaClients":{ + "name":"ListLunaClients", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListLunaClientsRequest"}, + "output":{"shape":"ListLunaClientsResponse"}, + "errors":[ + { + "shape":"CloudHsmServiceException", + "exception":true + }, + { + "shape":"CloudHsmInternalException", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "ModifyHapg":{ + "name":"ModifyHapg", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyHapgRequest"}, + "output":{"shape":"ModifyHapgResponse"}, + "errors":[ + { + "shape":"CloudHsmServiceException", + "exception":true + }, + { + "shape":"CloudHsmInternalException", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "ModifyHsm":{ + "name":"ModifyHsm", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{ + "shape":"ModifyHsmRequest", + "locationName":"ModifyHsmRequest" + }, + "output":{"shape":"ModifyHsmResponse"}, + "errors":[ + { + "shape":"CloudHsmServiceException", + "exception":true + }, + { + "shape":"CloudHsmInternalException", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "ModifyLunaClient":{ + "name":"ModifyLunaClient", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyLunaClientRequest"}, + "output":{"shape":"ModifyLunaClientResponse"}, + "errors":[ + { + "shape":"CloudHsmServiceException", + "exception":true + } + ] + } + }, + "shapes":{ + "AZ":{ + "type":"string", + "pattern":"[a-zA-Z0-9\\-]*" + }, + "AZList":{ + "type":"list", + "member":{"shape":"AZ"} + }, + "Boolean":{"type":"boolean"}, + "Certificate":{ + "type":"string", + "min":600, + "max":2400, + "pattern":"[\\w :+=./\\n-]*" + }, + "CertificateFingerprint":{ + "type":"string", + "pattern":"([0-9a-fA-F][0-9a-fA-F]:){15}[0-9a-fA-F][0-9a-fA-F]" + }, + "ClientArn":{ + "type":"string", + "pattern":"arn:aws(-iso)?:cloudhsm:[a-zA-Z0-9\\-]*:[0-9]{12}:client-[0-9a-f]{8}" + }, + "ClientLabel":{ + "type":"string", + "pattern":"[a-zA-Z0-9_.-]{2,64}" + }, + "ClientList":{ + "type":"list", + "member":{"shape":"ClientArn"} + }, + "ClientToken":{ + "type":"string", + "pattern":"[a-zA-Z0-9]{1,64}" + }, + "ClientVersion":{ + "type":"string", + "enum":[ + "5.1", + "5.3" + ] + }, + "CloudHsmInternalException":{ + "type":"structure", + "members":{ + }, + "exception":true, + "fault":true + }, + "CloudHsmObjectState":{ + "type":"string", + "enum":[ + "READY", + "UPDATING", + "DEGRADED" + ] + }, + "CloudHsmServiceException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"}, + "retryable":{"shape":"Boolean"} + }, + "exception":true + }, + "CreateHapgRequest":{ + "type":"structure", + "required":["Label"], + "members":{ + "Label":{"shape":"Label"} + } + }, + "CreateHapgResponse":{ + "type":"structure", + "members":{ + "HapgArn":{"shape":"HapgArn"} + } + }, + "CreateHsmRequest":{ + "type":"structure", + "required":[ + "SubnetId", + "SshKey", + "IamRoleArn", + "SubscriptionType" + ], + "members":{ + "SubnetId":{ + "shape":"SubnetId", + "locationName":"SubnetId" + }, + "SshKey":{ + "shape":"SshKey", + "locationName":"SshKey" + }, + "EniIp":{ + "shape":"IpAddress", + "locationName":"EniIp" + }, + "IamRoleArn":{ + "shape":"IamRoleArn", + "locationName":"IamRoleArn" + }, + "ExternalId":{ + "shape":"ExternalId", + "locationName":"ExternalId" + }, + "SubscriptionType":{ + "shape":"SubscriptionType", + "locationName":"SubscriptionType" + }, + "ClientToken":{ + "shape":"ClientToken", + "locationName":"ClientToken" + }, + "SyslogIp":{ + "shape":"IpAddress", + "locationName":"SyslogIp" + } + }, + "locationName":"CreateHsmRequest" + }, + "CreateHsmResponse":{ + "type":"structure", + "members":{ + "HsmArn":{"shape":"HsmArn"} + } + }, + "CreateLunaClientRequest":{ + "type":"structure", + "required":["Certificate"], + "members":{ + "Label":{"shape":"ClientLabel"}, + "Certificate":{"shape":"Certificate"} + } + }, + "CreateLunaClientResponse":{ + "type":"structure", + "members":{ + "ClientArn":{"shape":"ClientArn"} + } + }, + "DeleteHapgRequest":{ + "type":"structure", + "required":["HapgArn"], + "members":{ + "HapgArn":{"shape":"HapgArn"} + } + }, + "DeleteHapgResponse":{ + "type":"structure", + "required":["Status"], + "members":{ + "Status":{"shape":"String"} + } + }, + "DeleteHsmRequest":{ + "type":"structure", + "required":["HsmArn"], + "members":{ + "HsmArn":{ + "shape":"HsmArn", + "locationName":"HsmArn" + } + }, + "locationName":"DeleteHsmRequest" + }, + "DeleteHsmResponse":{ + "type":"structure", + "required":["Status"], + "members":{ + "Status":{"shape":"String"} + } + }, + "DeleteLunaClientRequest":{ + "type":"structure", + "required":["ClientArn"], + "members":{ + "ClientArn":{"shape":"ClientArn"} + } + }, + "DeleteLunaClientResponse":{ + "type":"structure", + "required":["Status"], + "members":{ + "Status":{"shape":"String"} + } + }, + "DescribeHapgRequest":{ + "type":"structure", + "required":["HapgArn"], + "members":{ + "HapgArn":{"shape":"HapgArn"} + } + }, + "DescribeHapgResponse":{ + "type":"structure", + "members":{ + "HapgArn":{"shape":"HapgArn"}, + "HapgSerial":{"shape":"String"}, + "HsmsLastActionFailed":{"shape":"HsmList"}, + "HsmsPendingDeletion":{"shape":"HsmList"}, + "HsmsPendingRegistration":{"shape":"HsmList"}, + "Label":{"shape":"Label"}, + "LastModifiedTimestamp":{"shape":"Timestamp"}, + "PartitionSerialList":{"shape":"PartitionSerialList"}, + "State":{"shape":"CloudHsmObjectState"} + } + }, + "DescribeHsmRequest":{ + "type":"structure", + "members":{ + "HsmArn":{"shape":"HsmArn"}, + "HsmSerialNumber":{"shape":"HsmSerialNumber"} + } + }, + "DescribeHsmResponse":{ + "type":"structure", + "members":{ + "HsmArn":{"shape":"HsmArn"}, + "Status":{"shape":"HsmStatus"}, + "StatusDetails":{"shape":"String"}, + "AvailabilityZone":{"shape":"AZ"}, + "EniId":{"shape":"EniId"}, + "EniIp":{"shape":"IpAddress"}, + "SubscriptionType":{"shape":"SubscriptionType"}, + "SubscriptionStartDate":{"shape":"Timestamp"}, + "SubscriptionEndDate":{"shape":"Timestamp"}, + "VpcId":{"shape":"VpcId"}, + "SubnetId":{"shape":"SubnetId"}, + "IamRoleArn":{"shape":"IamRoleArn"}, + "SerialNumber":{"shape":"HsmSerialNumber"}, + "VendorName":{"shape":"String"}, + "HsmType":{"shape":"String"}, + "SoftwareVersion":{"shape":"String"}, + "SshPublicKey":{"shape":"SshKey"}, + "SshKeyLastUpdated":{"shape":"Timestamp"}, + "ServerCertUri":{"shape":"String"}, + "ServerCertLastUpdated":{"shape":"Timestamp"}, + "Partitions":{"shape":"PartitionList"} + } + }, + "DescribeLunaClientRequest":{ + "type":"structure", + "members":{ + "ClientArn":{"shape":"ClientArn"}, + "CertificateFingerprint":{"shape":"CertificateFingerprint"} + } + }, + "DescribeLunaClientResponse":{ + "type":"structure", + "members":{ + "ClientArn":{"shape":"ClientArn"}, + "Certificate":{"shape":"Certificate"}, + "CertificateFingerprint":{"shape":"CertificateFingerprint"}, + "LastModifiedTimestamp":{"shape":"Timestamp"}, + "Label":{"shape":"Label"} + } + }, + "EniId":{ + "type":"string", + "pattern":"eni-[0-9a-f]{8}" + }, + "ExternalId":{ + "type":"string", + "pattern":"[\\w :+=./-]*" + }, + "GetConfigRequest":{ + "type":"structure", + "required":[ + "ClientArn", + "ClientVersion", + "HapgList" + ], + "members":{ + "ClientArn":{"shape":"ClientArn"}, + "ClientVersion":{"shape":"ClientVersion"}, + "HapgList":{"shape":"HapgList"} + } + }, + "GetConfigResponse":{ + "type":"structure", + "members":{ + "ConfigType":{"shape":"String"}, + "ConfigFile":{"shape":"String"}, + "ConfigCred":{"shape":"String"} + } + }, + "HapgArn":{ + "type":"string", + "pattern":"arn:aws(-iso)?:cloudhsm:[a-zA-Z0-9\\-]*:[0-9]{12}:hapg-[0-9a-f]{8}" + }, + "HapgList":{ + "type":"list", + "member":{"shape":"HapgArn"} + }, + "HsmArn":{ + "type":"string", + "pattern":"arn:aws(-iso)?:cloudhsm:[a-zA-Z0-9\\-]*:[0-9]{12}:hsm-[0-9a-f]{8}" + }, + "HsmList":{ + "type":"list", + "member":{"shape":"HsmArn"} + }, + "HsmSerialNumber":{ + "type":"string", + "pattern":"\\d{1,16}" + }, + "HsmStatus":{ + "type":"string", + "enum":[ + "PENDING", + "RUNNING", + "UPDATING", + "SUSPENDED", + "TERMINATING", + "TERMINATED", + "DEGRADED" + ] + }, + "IamRoleArn":{ + "type":"string", + "pattern":"arn:aws(-iso)?:iam::[0-9]{12}:role/[a-zA-Z0-9_\\+=,\\.\\-@]{1,64}" + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "IpAddress":{ + "type":"string", + "pattern":"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}" + }, + "Label":{ + "type":"string", + "pattern":"[a-zA-Z0-9_.-]{1,64}" + }, + "ListAvailableZonesRequest":{ + "type":"structure", + "members":{ + } + }, + "ListAvailableZonesResponse":{ + "type":"structure", + "members":{ + "AZList":{"shape":"AZList"} + } + }, + "ListHapgsRequest":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"PaginationToken"} + } + }, + "ListHapgsResponse":{ + "type":"structure", + "required":["HapgList"], + "members":{ + "HapgList":{"shape":"HapgList"}, + "NextToken":{"shape":"PaginationToken"} + } + }, + "ListHsmsRequest":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"PaginationToken"} + } + }, + "ListHsmsResponse":{ + "type":"structure", + "members":{ + "HsmList":{"shape":"HsmList"}, + "NextToken":{"shape":"PaginationToken"} + } + }, + "ListLunaClientsRequest":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"PaginationToken"} + } + }, + "ListLunaClientsResponse":{ + "type":"structure", + "required":["ClientList"], + "members":{ + "ClientList":{"shape":"ClientList"}, + "NextToken":{"shape":"PaginationToken"} + } + }, + "ModifyHapgRequest":{ + "type":"structure", + "required":["HapgArn"], + "members":{ + "HapgArn":{"shape":"HapgArn"}, + "Label":{"shape":"Label"}, + "PartitionSerialList":{"shape":"PartitionSerialList"} + } + }, + "ModifyHapgResponse":{ + "type":"structure", + "members":{ + "HapgArn":{"shape":"HapgArn"} + } + }, + "ModifyHsmRequest":{ + "type":"structure", + "required":["HsmArn"], + "members":{ + "HsmArn":{ + "shape":"HsmArn", + "locationName":"HsmArn" + }, + "SubnetId":{ + "shape":"SubnetId", + "locationName":"SubnetId" + }, + "EniIp":{ + "shape":"IpAddress", + "locationName":"EniIp" + }, + "IamRoleArn":{ + "shape":"IamRoleArn", + "locationName":"IamRoleArn" + }, + "ExternalId":{ + "shape":"ExternalId", + "locationName":"ExternalId" + }, + "SyslogIp":{ + "shape":"IpAddress", + "locationName":"SyslogIp" + } + }, + "locationName":"ModifyHsmRequest" + }, + "ModifyHsmResponse":{ + "type":"structure", + "members":{ + "HsmArn":{"shape":"HsmArn"} + } + }, + "ModifyLunaClientRequest":{ + "type":"structure", + "required":[ + "ClientArn", + "Certificate" + ], + "members":{ + "ClientArn":{"shape":"ClientArn"}, + "Certificate":{"shape":"Certificate"} + } + }, + "ModifyLunaClientResponse":{ + "type":"structure", + "members":{ + "ClientArn":{"shape":"ClientArn"} + } + }, + "PaginationToken":{ + "type":"string", + "pattern":"[a-zA-Z0-9+/]*" + }, + "PartitionArn":{ + "type":"string", + "pattern":"arn:aws(-iso)?:cloudhsm:[a-zA-Z0-9\\-]*:[0-9]{12}:hsm-[0-9a-f]{8}/partition-[0-9]{6,12}" + }, + "PartitionList":{ + "type":"list", + "member":{"shape":"PartitionArn"} + }, + "PartitionSerial":{ + "type":"string", + "pattern":"\\d{9}" + }, + "PartitionSerialList":{ + "type":"list", + "member":{"shape":"PartitionSerial"} + }, + "SshKey":{ + "type":"string", + "pattern":"[a-zA-Z0-9+/= ._:\\\\@-]*" + }, + "String":{ + "type":"string", + "pattern":"[\\w :+=./\\\\-]*" + }, + "SubnetId":{ + "type":"string", + "pattern":"subnet-[0-9a-f]{8}" + }, + "SubscriptionType":{ + "type":"string", + "enum":["PRODUCTION"] + }, + "Timestamp":{ + "type":"string", + "pattern":"\\d*" + }, + "VpcId":{ + "type":"string", + "pattern":"vpc-[0-9a-f]{8}" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudhsm/2014-05-30/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudhsm/2014-05-30/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudhsm/2014-05-30/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudhsm/2014-05-30/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,473 @@ +{ + "version": "2.0", + "operations": { + "CreateHapg": "

    Creates a high-availability partition group. A high-availability partition group is a group of partitions that spans multiple physical HSMs.

    ", + "CreateHsm": "

    Creates an uninitialized HSM instance.

    There is an upfront fee charged for each HSM instance that you create with the CreateHsm operation. If you accidentally provision an HSM and want to request a refund, delete the instance using the DeleteHsm operation, go to the AWS Support Center, create a new case, and select Account and Billing Support.

    It can take up to 20 minutes to create and provision an HSM. You can monitor the status of the HSM with the DescribeHsm operation. The HSM is ready to be initialized when the status changes to RUNNING.

    ", + "CreateLunaClient": "

    Creates an HSM client.

    ", + "DeleteHapg": "

    Deletes a high-availability partition group.

    ", + "DeleteHsm": "

    Deletes an HSM. After completion, this operation cannot be undone and your key material cannot be recovered.

    ", + "DeleteLunaClient": "

    Deletes a client.

    ", + "DescribeHapg": "

    Retrieves information about a high-availability partition group.

    ", + "DescribeHsm": "

    Retrieves information about an HSM. You can identify the HSM by its ARN or its serial number.

    ", + "DescribeLunaClient": "

    Retrieves information about an HSM client.

    ", + "GetConfig": "

    Gets the configuration files necessary to connect to all high availability partition groups the client is associated with.

    ", + "ListAvailableZones": "

    Lists the Availability Zones that have available AWS CloudHSM capacity.

    ", + "ListHapgs": "

    Lists the high-availability partition groups for the account.

    This operation supports pagination with the use of the NextToken member. If more results are available, the NextToken member of the response contains a token that you pass in the next call to ListHapgs to retrieve the next set of items.

    ", + "ListHsms": "

    Retrieves the identifiers of all of the HSMs provisioned for the current customer.

    This operation supports pagination with the use of the NextToken member. If more results are available, the NextToken member of the response contains a token that you pass in the next call to ListHsms to retrieve the next set of items.

    ", + "ListLunaClients": "

    Lists all of the clients.

    This operation supports pagination with the use of the NextToken member. If more results are available, the NextToken member of the response contains a token that you pass in the next call to ListLunaClients to retrieve the next set of items.

    ", + "ModifyHapg": "

    Modifies an existing high-availability partition group.

    ", + "ModifyHsm": "

    Modifies an HSM.

    This operation can result in the HSM being offline for up to 15 minutes while the AWS CloudHSM service is reconfigured. If you are modifying a production HSM, you should ensure that your AWS CloudHSM service is configured for high availability, and consider executing this operation during a maintenance window.

    ", + "ModifyLunaClient": "

    Modifies the certificate used by the client.

    This action can potentially start a workflow to install the new certificate on the client's HSMs.

    " + }, + "service": "AWS CloudHSM Service", + "shapes": { + "AZ": { + "base": null, + "refs": { + "AZList$member": null, + "DescribeHsmResponse$AvailabilityZone": "

    The Availability Zone that the HSM is in.

    " + } + }, + "AZList": { + "base": null, + "refs": { + "ListAvailableZonesResponse$AZList": "

    The list of Availability Zones that have available AWS CloudHSM capacity.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "CloudHsmServiceException$retryable": "

    Indicates if the action can be retried.

    " + } + }, + "Certificate": { + "base": null, + "refs": { + "CreateLunaClientRequest$Certificate": "

    The contents of a Base64-Encoded X.509 v3 certificate to be installed on the HSMs used by this client.

    ", + "DescribeLunaClientResponse$Certificate": "

    The certificate installed on the HSMs used by this client.

    ", + "ModifyLunaClientRequest$Certificate": "

    The new certificate for the client.

    " + } + }, + "CertificateFingerprint": { + "base": null, + "refs": { + "DescribeLunaClientRequest$CertificateFingerprint": "

    The certificate fingerprint.

    ", + "DescribeLunaClientResponse$CertificateFingerprint": "

    The certificate fingerprint.

    " + } + }, + "ClientArn": { + "base": null, + "refs": { + "ClientList$member": null, + "CreateLunaClientResponse$ClientArn": "

    The ARN of the client.

    ", + "DeleteLunaClientRequest$ClientArn": "

    The ARN of the client to delete.

    ", + "DescribeLunaClientRequest$ClientArn": "

    The ARN of the client.

    ", + "DescribeLunaClientResponse$ClientArn": "

    The ARN of the client.

    ", + "GetConfigRequest$ClientArn": "

    The ARN of the client.

    ", + "ModifyLunaClientRequest$ClientArn": "

    The ARN of the client.

    ", + "ModifyLunaClientResponse$ClientArn": "

    The ARN of the client.

    " + } + }, + "ClientLabel": { + "base": null, + "refs": { + "CreateLunaClientRequest$Label": "

    The label for the client.

    " + } + }, + "ClientList": { + "base": null, + "refs": { + "ListLunaClientsResponse$ClientList": "

    The list of clients.

    " + } + }, + "ClientToken": { + "base": null, + "refs": { + "CreateHsmRequest$ClientToken": "

    A user-defined token to ensure idempotence. Subsequent calls to this operation with the same token will be ignored.

    " + } + }, + "ClientVersion": { + "base": null, + "refs": { + "GetConfigRequest$ClientVersion": "

    The client version.

    " + } + }, + "CloudHsmInternalException": { + "base": "

    Indicates that an internal error occurred.

    ", + "refs": { + } + }, + "CloudHsmObjectState": { + "base": null, + "refs": { + "DescribeHapgResponse$State": "

    The state of the high-availability partition group.

    " + } + }, + "CloudHsmServiceException": { + "base": "

    Indicates that an exception occurred in the AWS CloudHSM service.

    ", + "refs": { + } + }, + "CreateHapgRequest": { + "base": "

    Contains the inputs for the CreateHapgRequest action.

    ", + "refs": { + } + }, + "CreateHapgResponse": { + "base": "

    Contains the output of the CreateHAPartitionGroup action.

    ", + "refs": { + } + }, + "CreateHsmRequest": { + "base": "

    Contains the inputs for the CreateHsm operation.

    ", + "refs": { + } + }, + "CreateHsmResponse": { + "base": "

    Contains the output of the CreateHsm operation.

    ", + "refs": { + } + }, + "CreateLunaClientRequest": { + "base": "

    Contains the inputs for the CreateLunaClient action.

    ", + "refs": { + } + }, + "CreateLunaClientResponse": { + "base": "

    Contains the output of the CreateLunaClient action.

    ", + "refs": { + } + }, + "DeleteHapgRequest": { + "base": "

    Contains the inputs for the DeleteHapg action.

    ", + "refs": { + } + }, + "DeleteHapgResponse": { + "base": "

    Contains the output of the DeleteHapg action.

    ", + "refs": { + } + }, + "DeleteHsmRequest": { + "base": "

    Contains the inputs for the DeleteHsm operation.

    ", + "refs": { + } + }, + "DeleteHsmResponse": { + "base": "

    Contains the output of the DeleteHsm operation.

    ", + "refs": { + } + }, + "DeleteLunaClientRequest": { + "base": null, + "refs": { + } + }, + "DeleteLunaClientResponse": { + "base": null, + "refs": { + } + }, + "DescribeHapgRequest": { + "base": "

    Contains the inputs for the DescribeHapg action.

    ", + "refs": { + } + }, + "DescribeHapgResponse": { + "base": "

    Contains the output of the DescribeHapg action.

    ", + "refs": { + } + }, + "DescribeHsmRequest": { + "base": "

    Contains the inputs for the DescribeHsm operation.

    ", + "refs": { + } + }, + "DescribeHsmResponse": { + "base": "

    Contains the output of the DescribeHsm operation.

    ", + "refs": { + } + }, + "DescribeLunaClientRequest": { + "base": null, + "refs": { + } + }, + "DescribeLunaClientResponse": { + "base": null, + "refs": { + } + }, + "EniId": { + "base": null, + "refs": { + "DescribeHsmResponse$EniId": "

    The identifier of the elastic network interface (ENI) attached to the HSM.

    " + } + }, + "ExternalId": { + "base": null, + "refs": { + "CreateHsmRequest$ExternalId": "

    The external ID from IamRoleArn, if present.

    ", + "ModifyHsmRequest$ExternalId": "

    The new external ID.

    " + } + }, + "GetConfigRequest": { + "base": null, + "refs": { + } + }, + "GetConfigResponse": { + "base": null, + "refs": { + } + }, + "HapgArn": { + "base": null, + "refs": { + "CreateHapgResponse$HapgArn": "

    The ARN of the high-availability partition group.

    ", + "DeleteHapgRequest$HapgArn": "

    The ARN of the high-availability partition group to delete.

    ", + "DescribeHapgRequest$HapgArn": "

    The ARN of the high-availability partition group to describe.

    ", + "DescribeHapgResponse$HapgArn": "

    The ARN of the high-availability partition group.

    ", + "HapgList$member": null, + "ModifyHapgRequest$HapgArn": "

    The ARN of the high-availability partition group to modify.

    ", + "ModifyHapgResponse$HapgArn": "

    The ARN of the high-availability partition group.

    " + } + }, + "HapgList": { + "base": null, + "refs": { + "GetConfigRequest$HapgList": "

    A list of ARNs that identify the high-availability partition groups that are associated with the client.

    ", + "ListHapgsResponse$HapgList": "

    The list of high-availability partition groups.

    " + } + }, + "HsmArn": { + "base": "

    An ARN that identifies an HSM.

    ", + "refs": { + "CreateHsmResponse$HsmArn": "

    The ARN of the HSM.

    ", + "DeleteHsmRequest$HsmArn": "

    The ARN of the HSM to delete.

    ", + "DescribeHsmRequest$HsmArn": "

    The ARN of the HSM. Either the HsmArn or the SerialNumber parameter must be specified.

    ", + "DescribeHsmResponse$HsmArn": "

    The ARN of the HSM.

    ", + "HsmList$member": null, + "ModifyHsmRequest$HsmArn": "

    The ARN of the HSM to modify.

    ", + "ModifyHsmResponse$HsmArn": "

    The ARN of the HSM.

    " + } + }, + "HsmList": { + "base": "

    Contains a list of ARNs that identify the HSMs.

    ", + "refs": { + "DescribeHapgResponse$HsmsLastActionFailed": null, + "DescribeHapgResponse$HsmsPendingDeletion": null, + "DescribeHapgResponse$HsmsPendingRegistration": null, + "ListHsmsResponse$HsmList": "

    The list of ARNs that identify the HSMs.

    " + } + }, + "HsmSerialNumber": { + "base": null, + "refs": { + "DescribeHsmRequest$HsmSerialNumber": "

    The serial number of the HSM. Either the HsmArn or the HsmSerialNumber parameter must be specified.

    ", + "DescribeHsmResponse$SerialNumber": "

    The serial number of the HSM.

    " + } + }, + "HsmStatus": { + "base": null, + "refs": { + "DescribeHsmResponse$Status": "

    The status of the HSM.

    " + } + }, + "IamRoleArn": { + "base": null, + "refs": { + "CreateHsmRequest$IamRoleArn": "

    The ARN of an IAM role to enable the AWS CloudHSM service to allocate an ENI on your behalf.

    ", + "DescribeHsmResponse$IamRoleArn": "

    The ARN of the IAM role assigned to the HSM.

    ", + "ModifyHsmRequest$IamRoleArn": "

    The new IAM role ARN.

    " + } + }, + "InvalidRequestException": { + "base": "

    Indicates that one or more of the request parameters are not valid.

    ", + "refs": { + } + }, + "IpAddress": { + "base": null, + "refs": { + "CreateHsmRequest$EniIp": "

    The IP address to assign to the HSM's ENI.

    If an IP address is not specified, an IP address will be randomly chosen from the CIDR range of the subnet.

    ", + "CreateHsmRequest$SyslogIp": "

    The IP address for the syslog monitoring server. The AWS CloudHSM service only supports one syslog monitoring server.

    ", + "DescribeHsmResponse$EniIp": "

    The IP address assigned to the HSM's ENI.

    ", + "ModifyHsmRequest$EniIp": "

    The new IP address for the elastic network interface (ENI) attached to the HSM.

    If the HSM is moved to a different subnet, and an IP address is not specified, an IP address will be randomly chosen from the CIDR range of the new subnet.

    ", + "ModifyHsmRequest$SyslogIp": "

    The new IP address for the syslog monitoring server. The AWS CloudHSM service only supports one syslog monitoring server.

    " + } + }, + "Label": { + "base": null, + "refs": { + "CreateHapgRequest$Label": "

    The label of the new high-availability partition group.

    ", + "DescribeHapgResponse$Label": "

    The label for the high-availability partition group.

    ", + "DescribeLunaClientResponse$Label": "

    The label of the client.

    ", + "ModifyHapgRequest$Label": "

    The new label for the high-availability partition group.

    " + } + }, + "ListAvailableZonesRequest": { + "base": "

    Contains the inputs for the ListAvailableZones action.

    ", + "refs": { + } + }, + "ListAvailableZonesResponse": { + "base": null, + "refs": { + } + }, + "ListHapgsRequest": { + "base": null, + "refs": { + } + }, + "ListHapgsResponse": { + "base": null, + "refs": { + } + }, + "ListHsmsRequest": { + "base": null, + "refs": { + } + }, + "ListHsmsResponse": { + "base": "

    Contains the output of the ListHsms operation.

    ", + "refs": { + } + }, + "ListLunaClientsRequest": { + "base": null, + "refs": { + } + }, + "ListLunaClientsResponse": { + "base": null, + "refs": { + } + }, + "ModifyHapgRequest": { + "base": null, + "refs": { + } + }, + "ModifyHapgResponse": { + "base": null, + "refs": { + } + }, + "ModifyHsmRequest": { + "base": "

    Contains the inputs for the ModifyHsm operation.

    ", + "refs": { + } + }, + "ModifyHsmResponse": { + "base": "

    Contains the output of the ModifyHsm operation.

    ", + "refs": { + } + }, + "ModifyLunaClientRequest": { + "base": null, + "refs": { + } + }, + "ModifyLunaClientResponse": { + "base": null, + "refs": { + } + }, + "PaginationToken": { + "base": null, + "refs": { + "ListHapgsRequest$NextToken": "

    The NextToken value from a previous call to ListHapgs. Pass null if this is the first call.

    ", + "ListHapgsResponse$NextToken": "

    If not null, more results are available. Pass this value to ListHapgs to retrieve the next set of items.

    ", + "ListHsmsRequest$NextToken": "

    The NextToken value from a previous call to ListHsms. Pass null if this is the first call.

    ", + "ListHsmsResponse$NextToken": "

    If not null, more results are available. Pass this value to ListHsms to retrieve the next set of items.

    ", + "ListLunaClientsRequest$NextToken": "

    The NextToken value from a previous call to ListLunaClients. Pass null if this is the first call.

    ", + "ListLunaClientsResponse$NextToken": "

    If not null, more results are available. Pass this to ListLunaClients to retrieve the next set of items.

    " + } + }, + "PartitionArn": { + "base": null, + "refs": { + "PartitionList$member": null + } + }, + "PartitionList": { + "base": null, + "refs": { + "DescribeHsmResponse$Partitions": "

    The list of partitions on the HSM.

    " + } + }, + "PartitionSerial": { + "base": null, + "refs": { + "PartitionSerialList$member": null + } + }, + "PartitionSerialList": { + "base": null, + "refs": { + "DescribeHapgResponse$PartitionSerialList": "

    The list of partition serial numbers that belong to the high-availability partition group.

    ", + "ModifyHapgRequest$PartitionSerialList": "

    The list of partition serial numbers to make members of the high-availability partition group.

    " + } + }, + "SshKey": { + "base": null, + "refs": { + "CreateHsmRequest$SshKey": "

    The SSH public key to install on the HSM.

    ", + "DescribeHsmResponse$SshPublicKey": "

    The public SSH key.

    " + } + }, + "String": { + "base": null, + "refs": { + "CloudHsmServiceException$message": "

    Additional information about the error.

    ", + "DeleteHapgResponse$Status": "

    The status of the action.

    ", + "DeleteHsmResponse$Status": "

    The status of the operation.

    ", + "DeleteLunaClientResponse$Status": "

    The status of the action.

    ", + "DescribeHapgResponse$HapgSerial": "

    The serial number of the high-availability partition group.

    ", + "DescribeHsmResponse$StatusDetails": "

    Contains additional information about the status of the HSM.

    ", + "DescribeHsmResponse$VendorName": "

    The name of the HSM vendor.

    ", + "DescribeHsmResponse$HsmType": "

    The HSM model type.

    ", + "DescribeHsmResponse$SoftwareVersion": "

    The HSM software version.

    ", + "DescribeHsmResponse$ServerCertUri": "

    The URI of the certificate server.

    ", + "GetConfigResponse$ConfigType": "

    The type of credentials.

    ", + "GetConfigResponse$ConfigFile": "

    The chrystoki.conf configuration file.

    ", + "GetConfigResponse$ConfigCred": "

    The certificate file containing the server.pem files of the HSMs.

    " + } + }, + "SubnetId": { + "base": null, + "refs": { + "CreateHsmRequest$SubnetId": "

    The identifier of the subnet in your VPC in which to place the HSM.

    ", + "DescribeHsmResponse$SubnetId": "

    The identifier of the subnet that the HSM is in.

    ", + "ModifyHsmRequest$SubnetId": "

    The new identifier of the subnet that the HSM is in. The new subnet must be in the same Availability Zone as the current subnet.

    " + } + }, + "SubscriptionType": { + "base": "

    Specifies the type of subscription for the HSM.

    • PRODUCTION - The HSM is being used in a production environment.
    • TRIAL - The HSM is being used in a product trial.
    ", + "refs": { + "CreateHsmRequest$SubscriptionType": null, + "DescribeHsmResponse$SubscriptionType": null + } + }, + "Timestamp": { + "base": null, + "refs": { + "DescribeHapgResponse$LastModifiedTimestamp": "

    The date and time the high-availability partition group was last modified.

    ", + "DescribeHsmResponse$SubscriptionStartDate": "

    The subscription start date.

    ", + "DescribeHsmResponse$SubscriptionEndDate": "

    The subscription end date.

    ", + "DescribeHsmResponse$SshKeyLastUpdated": "

    The date and time that the SSH key was last updated.

    ", + "DescribeHsmResponse$ServerCertLastUpdated": "

    The date and time that the server certificate was last updated.

    ", + "DescribeLunaClientResponse$LastModifiedTimestamp": "

    The date and time the client was last modified.

    " + } + }, + "VpcId": { + "base": null, + "refs": { + "DescribeHsmResponse$VpcId": "

    The identifier of the VPC that the HSM is in.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudsearch/2013-01-01/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudsearch/2013-01-01/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudsearch/2013-01-01/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudsearch/2013-01-01/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2001 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2013-01-01", + "endpointPrefix":"cloudsearch", + "serviceFullName":"Amazon CloudSearch", + "signatureVersion":"v4", + "xmlNamespace":"http://cloudsearch.amazonaws.com/doc/2013-01-01/", + "protocol":"query" + }, + "operations":{ + "BuildSuggesters":{ + "name":"BuildSuggesters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BuildSuggestersRequest"}, + "output":{ + "shape":"BuildSuggestersResponse", + "resultWrapper":"BuildSuggestersResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateDomain":{ + "name":"CreateDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDomainRequest"}, + "output":{ + "shape":"CreateDomainResponse", + "resultWrapper":"CreateDomainResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DefineAnalysisScheme":{ + "name":"DefineAnalysisScheme", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DefineAnalysisSchemeRequest"}, + "output":{ + "shape":"DefineAnalysisSchemeResponse", + "resultWrapper":"DefineAnalysisSchemeResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTypeException", + "error":{ + "code":"InvalidType", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DefineExpression":{ + "name":"DefineExpression", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DefineExpressionRequest"}, + "output":{ + "shape":"DefineExpressionResponse", + "resultWrapper":"DefineExpressionResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTypeException", + "error":{ + "code":"InvalidType", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DefineIndexField":{ + "name":"DefineIndexField", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DefineIndexFieldRequest"}, + "output":{ + "shape":"DefineIndexFieldResponse", + "resultWrapper":"DefineIndexFieldResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTypeException", + "error":{ + "code":"InvalidType", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DefineSuggester":{ + "name":"DefineSuggester", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DefineSuggesterRequest"}, + "output":{ + "shape":"DefineSuggesterResponse", + "resultWrapper":"DefineSuggesterResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTypeException", + "error":{ + "code":"InvalidType", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteAnalysisScheme":{ + "name":"DeleteAnalysisScheme", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAnalysisSchemeRequest"}, + "output":{ + "shape":"DeleteAnalysisSchemeResponse", + "resultWrapper":"DeleteAnalysisSchemeResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"InvalidTypeException", + "error":{ + "code":"InvalidType", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteDomain":{ + "name":"DeleteDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDomainRequest"}, + "output":{ + "shape":"DeleteDomainResponse", + "resultWrapper":"DeleteDomainResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + } + ] + }, + "DeleteExpression":{ + "name":"DeleteExpression", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteExpressionRequest"}, + "output":{ + "shape":"DeleteExpressionResponse", + "resultWrapper":"DeleteExpressionResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"InvalidTypeException", + "error":{ + "code":"InvalidType", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteIndexField":{ + "name":"DeleteIndexField", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteIndexFieldRequest"}, + "output":{ + "shape":"DeleteIndexFieldResponse", + "resultWrapper":"DeleteIndexFieldResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"InvalidTypeException", + "error":{ + "code":"InvalidType", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteSuggester":{ + "name":"DeleteSuggester", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSuggesterRequest"}, + "output":{ + "shape":"DeleteSuggesterResponse", + "resultWrapper":"DeleteSuggesterResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"InvalidTypeException", + "error":{ + "code":"InvalidType", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeAnalysisSchemes":{ + "name":"DescribeAnalysisSchemes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAnalysisSchemesRequest"}, + "output":{ + "shape":"DescribeAnalysisSchemesResponse", + "resultWrapper":"DescribeAnalysisSchemesResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeAvailabilityOptions":{ + "name":"DescribeAvailabilityOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAvailabilityOptionsRequest"}, + "output":{ + "shape":"DescribeAvailabilityOptionsResponse", + "resultWrapper":"DescribeAvailabilityOptionsResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"InvalidTypeException", + "error":{ + "code":"InvalidType", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DisabledOperationException", + "error":{ + "code":"DisabledAction", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeDomains":{ + "name":"DescribeDomains", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDomainsRequest"}, + "output":{ + "shape":"DescribeDomainsResponse", + "resultWrapper":"DescribeDomainsResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + } + ] + }, + "DescribeExpressions":{ + "name":"DescribeExpressions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeExpressionsRequest"}, + "output":{ + "shape":"DescribeExpressionsResponse", + "resultWrapper":"DescribeExpressionsResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeIndexFields":{ + "name":"DescribeIndexFields", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeIndexFieldsRequest"}, + "output":{ + "shape":"DescribeIndexFieldsResponse", + "resultWrapper":"DescribeIndexFieldsResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeScalingParameters":{ + "name":"DescribeScalingParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeScalingParametersRequest"}, + "output":{ + "shape":"DescribeScalingParametersResponse", + "resultWrapper":"DescribeScalingParametersResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeServiceAccessPolicies":{ + "name":"DescribeServiceAccessPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeServiceAccessPoliciesRequest"}, + "output":{ + "shape":"DescribeServiceAccessPoliciesResponse", + "resultWrapper":"DescribeServiceAccessPoliciesResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeSuggesters":{ + "name":"DescribeSuggesters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSuggestersRequest"}, + "output":{ + "shape":"DescribeSuggestersResponse", + "resultWrapper":"DescribeSuggestersResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "IndexDocuments":{ + "name":"IndexDocuments", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"IndexDocumentsRequest"}, + "output":{ + "shape":"IndexDocumentsResponse", + "resultWrapper":"IndexDocumentsResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "ListDomainNames":{ + "name":"ListDomainNames", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"ListDomainNamesResponse", + "resultWrapper":"ListDomainNamesResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + } + ] + }, + "UpdateAvailabilityOptions":{ + "name":"UpdateAvailabilityOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAvailabilityOptionsRequest"}, + "output":{ + "shape":"UpdateAvailabilityOptionsResponse", + "resultWrapper":"UpdateAvailabilityOptionsResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"InvalidTypeException", + "error":{ + "code":"InvalidType", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DisabledOperationException", + "error":{ + "code":"DisabledAction", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "UpdateScalingParameters":{ + "name":"UpdateScalingParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateScalingParametersRequest"}, + "output":{ + "shape":"UpdateScalingParametersResponse", + "resultWrapper":"UpdateScalingParametersResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTypeException", + "error":{ + "code":"InvalidType", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "UpdateServiceAccessPolicies":{ + "name":"UpdateServiceAccessPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateServiceAccessPoliciesRequest"}, + "output":{ + "shape":"UpdateServiceAccessPoliciesResponse", + "resultWrapper":"UpdateServiceAccessPoliciesResult" + }, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTypeException", + "error":{ + "code":"InvalidType", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + } + }, + "shapes":{ + "APIVersion":{"type":"string"}, + "ARN":{"type":"string"}, + "AccessPoliciesStatus":{ + "type":"structure", + "required":[ + "Options", + "Status" + ], + "members":{ + "Options":{"shape":"PolicyDocument"}, + "Status":{"shape":"OptionStatus"} + } + }, + "AlgorithmicStemming":{ + "type":"string", + "enum":[ + "none", + "minimal", + "light", + "full" + ] + }, + "AnalysisOptions":{ + "type":"structure", + "members":{ + "Synonyms":{"shape":"String"}, + "Stopwords":{"shape":"String"}, + "StemmingDictionary":{"shape":"String"}, + "JapaneseTokenizationDictionary":{"shape":"String"}, + "AlgorithmicStemming":{"shape":"AlgorithmicStemming"} + } + }, + "AnalysisScheme":{ + "type":"structure", + "required":[ + "AnalysisSchemeName", + "AnalysisSchemeLanguage" + ], + "members":{ + "AnalysisSchemeName":{"shape":"StandardName"}, + "AnalysisSchemeLanguage":{"shape":"AnalysisSchemeLanguage"}, + "AnalysisOptions":{"shape":"AnalysisOptions"} + } + }, + "AnalysisSchemeLanguage":{ + "type":"string", + "enum":[ + "ar", + "bg", + "ca", + "cs", + "da", + "de", + "el", + "en", + "es", + "eu", + "fa", + "fi", + "fr", + "ga", + "gl", + "he", + "hi", + "hu", + "hy", + "id", + "it", + "ja", + "ko", + "lv", + "mul", + "nl", + "no", + "pt", + "ro", + "ru", + "sv", + "th", + "tr", + "zh-Hans", + "zh-Hant" + ] + }, + "AnalysisSchemeStatus":{ + "type":"structure", + "required":[ + "Options", + "Status" + ], + "members":{ + "Options":{"shape":"AnalysisScheme"}, + "Status":{"shape":"OptionStatus"} + } + }, + "AnalysisSchemeStatusList":{ + "type":"list", + "member":{"shape":"AnalysisSchemeStatus"} + }, + "AvailabilityOptionsStatus":{ + "type":"structure", + "required":[ + "Options", + "Status" + ], + "members":{ + "Options":{"shape":"MultiAZ"}, + "Status":{"shape":"OptionStatus"} + } + }, + "BaseException":{ + "type":"structure", + "members":{ + "Code":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "Boolean":{"type":"boolean"}, + "BuildSuggestersRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"} + } + }, + "BuildSuggestersResponse":{ + "type":"structure", + "members":{ + "FieldNames":{"shape":"FieldNameList"} + } + }, + "CreateDomainRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"} + } + }, + "CreateDomainResponse":{ + "type":"structure", + "members":{ + "DomainStatus":{"shape":"DomainStatus"} + } + }, + "DateArrayOptions":{ + "type":"structure", + "members":{ + "DefaultValue":{"shape":"FieldValue"}, + "SourceFields":{"shape":"FieldNameCommaList"}, + "FacetEnabled":{"shape":"Boolean"}, + "SearchEnabled":{"shape":"Boolean"}, + "ReturnEnabled":{"shape":"Boolean"} + } + }, + "DateOptions":{ + "type":"structure", + "members":{ + "DefaultValue":{"shape":"FieldValue"}, + "SourceField":{"shape":"FieldName"}, + "FacetEnabled":{"shape":"Boolean"}, + "SearchEnabled":{"shape":"Boolean"}, + "ReturnEnabled":{"shape":"Boolean"}, + "SortEnabled":{"shape":"Boolean"} + } + }, + "DefineAnalysisSchemeRequest":{ + "type":"structure", + "required":[ + "DomainName", + "AnalysisScheme" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "AnalysisScheme":{"shape":"AnalysisScheme"} + } + }, + "DefineAnalysisSchemeResponse":{ + "type":"structure", + "required":["AnalysisScheme"], + "members":{ + "AnalysisScheme":{"shape":"AnalysisSchemeStatus"} + } + }, + "DefineExpressionRequest":{ + "type":"structure", + "required":[ + "DomainName", + "Expression" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "Expression":{"shape":"Expression"} + } + }, + "DefineExpressionResponse":{ + "type":"structure", + "required":["Expression"], + "members":{ + "Expression":{"shape":"ExpressionStatus"} + } + }, + "DefineIndexFieldRequest":{ + "type":"structure", + "required":[ + "DomainName", + "IndexField" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "IndexField":{"shape":"IndexField"} + } + }, + "DefineIndexFieldResponse":{ + "type":"structure", + "required":["IndexField"], + "members":{ + "IndexField":{"shape":"IndexFieldStatus"} + } + }, + "DefineSuggesterRequest":{ + "type":"structure", + "required":[ + "DomainName", + "Suggester" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "Suggester":{"shape":"Suggester"} + } + }, + "DefineSuggesterResponse":{ + "type":"structure", + "required":["Suggester"], + "members":{ + "Suggester":{"shape":"SuggesterStatus"} + } + }, + "DeleteAnalysisSchemeRequest":{ + "type":"structure", + "required":[ + "DomainName", + "AnalysisSchemeName" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "AnalysisSchemeName":{"shape":"StandardName"} + } + }, + "DeleteAnalysisSchemeResponse":{ + "type":"structure", + "required":["AnalysisScheme"], + "members":{ + "AnalysisScheme":{"shape":"AnalysisSchemeStatus"} + } + }, + "DeleteDomainRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"} + } + }, + "DeleteDomainResponse":{ + "type":"structure", + "members":{ + "DomainStatus":{"shape":"DomainStatus"} + } + }, + "DeleteExpressionRequest":{ + "type":"structure", + "required":[ + "DomainName", + "ExpressionName" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "ExpressionName":{"shape":"StandardName"} + } + }, + "DeleteExpressionResponse":{ + "type":"structure", + "required":["Expression"], + "members":{ + "Expression":{"shape":"ExpressionStatus"} + } + }, + "DeleteIndexFieldRequest":{ + "type":"structure", + "required":[ + "DomainName", + "IndexFieldName" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "IndexFieldName":{"shape":"DynamicFieldName"} + } + }, + "DeleteIndexFieldResponse":{ + "type":"structure", + "required":["IndexField"], + "members":{ + "IndexField":{"shape":"IndexFieldStatus"} + } + }, + "DeleteSuggesterRequest":{ + "type":"structure", + "required":[ + "DomainName", + "SuggesterName" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "SuggesterName":{"shape":"StandardName"} + } + }, + "DeleteSuggesterResponse":{ + "type":"structure", + "required":["Suggester"], + "members":{ + "Suggester":{"shape":"SuggesterStatus"} + } + }, + "DescribeAnalysisSchemesRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"}, + "AnalysisSchemeNames":{"shape":"StandardNameList"}, + "Deployed":{"shape":"Boolean"} + } + }, + "DescribeAnalysisSchemesResponse":{ + "type":"structure", + "required":["AnalysisSchemes"], + "members":{ + "AnalysisSchemes":{"shape":"AnalysisSchemeStatusList"} + } + }, + "DescribeAvailabilityOptionsRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"}, + "Deployed":{"shape":"Boolean"} + } + }, + "DescribeAvailabilityOptionsResponse":{ + "type":"structure", + "members":{ + "AvailabilityOptions":{"shape":"AvailabilityOptionsStatus"} + } + }, + "DescribeDomainsRequest":{ + "type":"structure", + "members":{ + "DomainNames":{"shape":"DomainNameList"} + } + }, + "DescribeDomainsResponse":{ + "type":"structure", + "required":["DomainStatusList"], + "members":{ + "DomainStatusList":{"shape":"DomainStatusList"} + } + }, + "DescribeExpressionsRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"}, + "ExpressionNames":{"shape":"StandardNameList"}, + "Deployed":{"shape":"Boolean"} + } + }, + "DescribeExpressionsResponse":{ + "type":"structure", + "required":["Expressions"], + "members":{ + "Expressions":{"shape":"ExpressionStatusList"} + } + }, + "DescribeIndexFieldsRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"}, + "FieldNames":{"shape":"DynamicFieldNameList"}, + "Deployed":{"shape":"Boolean"} + } + }, + "DescribeIndexFieldsResponse":{ + "type":"structure", + "required":["IndexFields"], + "members":{ + "IndexFields":{"shape":"IndexFieldStatusList"} + } + }, + "DescribeScalingParametersRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"} + } + }, + "DescribeScalingParametersResponse":{ + "type":"structure", + "required":["ScalingParameters"], + "members":{ + "ScalingParameters":{"shape":"ScalingParametersStatus"} + } + }, + "DescribeServiceAccessPoliciesRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"}, + "Deployed":{"shape":"Boolean"} + } + }, + "DescribeServiceAccessPoliciesResponse":{ + "type":"structure", + "required":["AccessPolicies"], + "members":{ + "AccessPolicies":{"shape":"AccessPoliciesStatus"} + } + }, + "DescribeSuggestersRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"}, + "SuggesterNames":{"shape":"StandardNameList"}, + "Deployed":{"shape":"Boolean"} + } + }, + "DescribeSuggestersResponse":{ + "type":"structure", + "required":["Suggesters"], + "members":{ + "Suggesters":{"shape":"SuggesterStatusList"} + } + }, + "DisabledOperationException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DisabledAction", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "DocumentSuggesterOptions":{ + "type":"structure", + "required":["SourceField"], + "members":{ + "SourceField":{"shape":"FieldName"}, + "FuzzyMatching":{"shape":"SuggesterFuzzyMatching"}, + "SortExpression":{"shape":"String"} + } + }, + "DomainId":{ + "type":"string", + "min":1, + "max":64 + }, + "DomainName":{ + "type":"string", + "min":3, + "max":28, + "pattern":"[a-z][a-z0-9\\-]+" + }, + "DomainNameList":{ + "type":"list", + "member":{"shape":"DomainName"} + }, + "DomainNameMap":{ + "type":"map", + "key":{"shape":"DomainName"}, + "value":{"shape":"APIVersion"} + }, + "DomainStatus":{ + "type":"structure", + "required":[ + "DomainId", + "DomainName", + "RequiresIndexDocuments" + ], + "members":{ + "DomainId":{"shape":"DomainId"}, + "DomainName":{"shape":"DomainName"}, + "ARN":{"shape":"ARN"}, + "Created":{"shape":"Boolean"}, + "Deleted":{"shape":"Boolean"}, + "DocService":{"shape":"ServiceEndpoint"}, + "SearchService":{"shape":"ServiceEndpoint"}, + "RequiresIndexDocuments":{"shape":"Boolean"}, + "Processing":{"shape":"Boolean"}, + "SearchInstanceType":{"shape":"SearchInstanceType"}, + "SearchPartitionCount":{"shape":"PartitionCount"}, + "SearchInstanceCount":{"shape":"InstanceCount"}, + "Limits":{"shape":"Limits"} + } + }, + "DomainStatusList":{ + "type":"list", + "member":{"shape":"DomainStatus"} + }, + "Double":{"type":"double"}, + "DoubleArrayOptions":{ + "type":"structure", + "members":{ + "DefaultValue":{"shape":"Double"}, + "SourceFields":{"shape":"FieldNameCommaList"}, + "FacetEnabled":{"shape":"Boolean"}, + "SearchEnabled":{"shape":"Boolean"}, + "ReturnEnabled":{"shape":"Boolean"} + } + }, + "DoubleOptions":{ + "type":"structure", + "members":{ + "DefaultValue":{"shape":"Double"}, + "SourceField":{"shape":"FieldName"}, + "FacetEnabled":{"shape":"Boolean"}, + "SearchEnabled":{"shape":"Boolean"}, + "ReturnEnabled":{"shape":"Boolean"}, + "SortEnabled":{"shape":"Boolean"} + } + }, + "DynamicFieldName":{ + "type":"string", + "min":1, + "max":64, + "pattern":"([a-z][a-z0-9_]*\\*?|\\*[a-z0-9_]*)" + }, + "DynamicFieldNameList":{ + "type":"list", + "member":{"shape":"DynamicFieldName"} + }, + "ErrorCode":{"type":"string"}, + "ErrorMessage":{"type":"string"}, + "Expression":{ + "type":"structure", + "required":[ + "ExpressionName", + "ExpressionValue" + ], + "members":{ + "ExpressionName":{"shape":"StandardName"}, + "ExpressionValue":{"shape":"ExpressionValue"} + } + }, + "ExpressionStatus":{ + "type":"structure", + "required":[ + "Options", + "Status" + ], + "members":{ + "Options":{"shape":"Expression"}, + "Status":{"shape":"OptionStatus"} + } + }, + "ExpressionStatusList":{ + "type":"list", + "member":{"shape":"ExpressionStatus"} + }, + "ExpressionValue":{ + "type":"string", + "min":1, + "max":10240 + }, + "FieldName":{ + "type":"string", + "min":1, + "max":64, + "pattern":"[a-z][a-z0-9_]*" + }, + "FieldNameCommaList":{ + "type":"string", + "pattern":"\\s*[a-z*][a-z0-9_]*\\*?\\s*(,\\s*[a-z*][a-z0-9_]*\\*?\\s*)*" + }, + "FieldNameList":{ + "type":"list", + "member":{"shape":"FieldName"} + }, + "FieldValue":{ + "type":"string", + "min":0, + "max":1024 + }, + "IndexDocumentsRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"} + } + }, + "IndexDocumentsResponse":{ + "type":"structure", + "members":{ + "FieldNames":{"shape":"FieldNameList"} + } + }, + "IndexField":{ + "type":"structure", + "required":[ + "IndexFieldName", + "IndexFieldType" + ], + "members":{ + "IndexFieldName":{"shape":"DynamicFieldName"}, + "IndexFieldType":{"shape":"IndexFieldType"}, + "IntOptions":{"shape":"IntOptions"}, + "DoubleOptions":{"shape":"DoubleOptions"}, + "LiteralOptions":{"shape":"LiteralOptions"}, + "TextOptions":{"shape":"TextOptions"}, + "DateOptions":{"shape":"DateOptions"}, + "LatLonOptions":{"shape":"LatLonOptions"}, + "IntArrayOptions":{"shape":"IntArrayOptions"}, + "DoubleArrayOptions":{"shape":"DoubleArrayOptions"}, + "LiteralArrayOptions":{"shape":"LiteralArrayOptions"}, + "TextArrayOptions":{"shape":"TextArrayOptions"}, + "DateArrayOptions":{"shape":"DateArrayOptions"} + } + }, + "IndexFieldStatus":{ + "type":"structure", + "required":[ + "Options", + "Status" + ], + "members":{ + "Options":{"shape":"IndexField"}, + "Status":{"shape":"OptionStatus"} + } + }, + "IndexFieldStatusList":{ + "type":"list", + "member":{"shape":"IndexFieldStatus"} + }, + "IndexFieldType":{ + "type":"string", + "enum":[ + "int", + "double", + "literal", + "text", + "date", + "latlon", + "int-array", + "double-array", + "literal-array", + "text-array", + "date-array" + ] + }, + "InstanceCount":{ + "type":"integer", + "min":1 + }, + "IntArrayOptions":{ + "type":"structure", + "members":{ + "DefaultValue":{"shape":"Long"}, + "SourceFields":{"shape":"FieldNameCommaList"}, + "FacetEnabled":{"shape":"Boolean"}, + "SearchEnabled":{"shape":"Boolean"}, + "ReturnEnabled":{"shape":"Boolean"} + } + }, + "IntOptions":{ + "type":"structure", + "members":{ + "DefaultValue":{"shape":"Long"}, + "SourceField":{"shape":"FieldName"}, + "FacetEnabled":{"shape":"Boolean"}, + "SearchEnabled":{"shape":"Boolean"}, + "ReturnEnabled":{"shape":"Boolean"}, + "SortEnabled":{"shape":"Boolean"} + } + }, + "InternalException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InternalException", + "httpStatusCode":500 + }, + "exception":true + }, + "InvalidTypeException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidType", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "LatLonOptions":{ + "type":"structure", + "members":{ + "DefaultValue":{"shape":"FieldValue"}, + "SourceField":{"shape":"FieldName"}, + "FacetEnabled":{"shape":"Boolean"}, + "SearchEnabled":{"shape":"Boolean"}, + "ReturnEnabled":{"shape":"Boolean"}, + "SortEnabled":{"shape":"Boolean"} + } + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"LimitExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "Limits":{ + "type":"structure", + "required":[ + "MaximumReplicationCount", + "MaximumPartitionCount" + ], + "members":{ + "MaximumReplicationCount":{"shape":"MaximumReplicationCount"}, + "MaximumPartitionCount":{"shape":"MaximumPartitionCount"} + } + }, + "ListDomainNamesResponse":{ + "type":"structure", + "members":{ + "DomainNames":{"shape":"DomainNameMap"} + } + }, + "LiteralArrayOptions":{ + "type":"structure", + "members":{ + "DefaultValue":{"shape":"FieldValue"}, + "SourceFields":{"shape":"FieldNameCommaList"}, + "FacetEnabled":{"shape":"Boolean"}, + "SearchEnabled":{"shape":"Boolean"}, + "ReturnEnabled":{"shape":"Boolean"} + } + }, + "LiteralOptions":{ + "type":"structure", + "members":{ + "DefaultValue":{"shape":"FieldValue"}, + "SourceField":{"shape":"FieldName"}, + "FacetEnabled":{"shape":"Boolean"}, + "SearchEnabled":{"shape":"Boolean"}, + "ReturnEnabled":{"shape":"Boolean"}, + "SortEnabled":{"shape":"Boolean"} + } + }, + "Long":{"type":"long"}, + "MaximumPartitionCount":{ + "type":"integer", + "min":1 + }, + "MaximumReplicationCount":{ + "type":"integer", + "min":1 + }, + "MultiAZ":{"type":"boolean"}, + "OptionState":{ + "type":"string", + "enum":[ + "RequiresIndexDocuments", + "Processing", + "Active", + "FailedToValidate" + ] + }, + "OptionStatus":{ + "type":"structure", + "required":[ + "CreationDate", + "UpdateDate", + "State" + ], + "members":{ + "CreationDate":{"shape":"UpdateTimestamp"}, + "UpdateDate":{"shape":"UpdateTimestamp"}, + "UpdateVersion":{"shape":"UIntValue"}, + "State":{"shape":"OptionState"}, + "PendingDeletion":{"shape":"Boolean"} + } + }, + "PartitionCount":{ + "type":"integer", + "min":1 + }, + "PartitionInstanceType":{ + "type":"string", + "enum":[ + "search.m1.small", + "search.m1.large", + "search.m2.xlarge", + "search.m2.2xlarge", + "search.m3.medium", + "search.m3.large", + "search.m3.xlarge", + "search.m3.2xlarge" + ] + }, + "PolicyDocument":{"type":"string"}, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "ScalingParameters":{ + "type":"structure", + "members":{ + "DesiredInstanceType":{"shape":"PartitionInstanceType"}, + "DesiredReplicationCount":{"shape":"UIntValue"}, + "DesiredPartitionCount":{"shape":"UIntValue"} + } + }, + "ScalingParametersStatus":{ + "type":"structure", + "required":[ + "Options", + "Status" + ], + "members":{ + "Options":{"shape":"ScalingParameters"}, + "Status":{"shape":"OptionStatus"} + } + }, + "SearchInstanceType":{"type":"string"}, + "ServiceEndpoint":{ + "type":"structure", + "members":{ + "Endpoint":{"shape":"ServiceUrl"} + } + }, + "ServiceUrl":{"type":"string"}, + "StandardName":{ + "type":"string", + "min":1, + "max":64, + "pattern":"[a-z][a-z0-9_]*" + }, + "StandardNameList":{ + "type":"list", + "member":{"shape":"StandardName"} + }, + "String":{"type":"string"}, + "Suggester":{ + "type":"structure", + "required":[ + "SuggesterName", + "DocumentSuggesterOptions" + ], + "members":{ + "SuggesterName":{"shape":"StandardName"}, + "DocumentSuggesterOptions":{"shape":"DocumentSuggesterOptions"} + } + }, + "SuggesterFuzzyMatching":{ + "type":"string", + "enum":[ + "none", + "low", + "high" + ] + }, + "SuggesterStatus":{ + "type":"structure", + "required":[ + "Options", + "Status" + ], + "members":{ + "Options":{"shape":"Suggester"}, + "Status":{"shape":"OptionStatus"} + } + }, + "SuggesterStatusList":{ + "type":"list", + "member":{"shape":"SuggesterStatus"} + }, + "TextArrayOptions":{ + "type":"structure", + "members":{ + "DefaultValue":{"shape":"FieldValue"}, + "SourceFields":{"shape":"FieldNameCommaList"}, + "ReturnEnabled":{"shape":"Boolean"}, + "HighlightEnabled":{"shape":"Boolean"}, + "AnalysisScheme":{"shape":"Word"} + } + }, + "TextOptions":{ + "type":"structure", + "members":{ + "DefaultValue":{"shape":"FieldValue"}, + "SourceField":{"shape":"FieldName"}, + "ReturnEnabled":{"shape":"Boolean"}, + "SortEnabled":{"shape":"Boolean"}, + "HighlightEnabled":{"shape":"Boolean"}, + "AnalysisScheme":{"shape":"Word"} + } + }, + "UIntValue":{ + "type":"integer", + "min":0 + }, + "UpdateAvailabilityOptionsRequest":{ + "type":"structure", + "required":[ + "DomainName", + "MultiAZ" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "MultiAZ":{"shape":"Boolean"} + } + }, + "UpdateAvailabilityOptionsResponse":{ + "type":"structure", + "members":{ + "AvailabilityOptions":{"shape":"AvailabilityOptionsStatus"} + } + }, + "UpdateScalingParametersRequest":{ + "type":"structure", + "required":[ + "DomainName", + "ScalingParameters" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "ScalingParameters":{"shape":"ScalingParameters"} + } + }, + "UpdateScalingParametersResponse":{ + "type":"structure", + "required":["ScalingParameters"], + "members":{ + "ScalingParameters":{"shape":"ScalingParametersStatus"} + } + }, + "UpdateServiceAccessPoliciesRequest":{ + "type":"structure", + "required":[ + "DomainName", + "AccessPolicies" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "AccessPolicies":{"shape":"PolicyDocument"} + } + }, + "UpdateServiceAccessPoliciesResponse":{ + "type":"structure", + "required":["AccessPolicies"], + "members":{ + "AccessPolicies":{"shape":"AccessPoliciesStatus"} + } + }, + "UpdateTimestamp":{"type":"timestamp"}, + "Word":{ + "type":"string", + "pattern":"[\\S]+" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudsearch/2013-01-01/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudsearch/2013-01-01/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudsearch/2013-01-01/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudsearch/2013-01-01/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,865 @@ +{ + "version": "2.0", + "operations": { + "BuildSuggesters": "

    Indexes the search suggestions. For more information, see Configuring Suggesters in the Amazon CloudSearch Developer Guide.

    ", + "CreateDomain": "

    Creates a new search domain. For more information, see Creating a Search Domain in the Amazon CloudSearch Developer Guide.

    ", + "DefineAnalysisScheme": "

    Configures an analysis scheme that can be applied to a text or text-array field to define language-specific text processing options. For more information, see Configuring Analysis Schemes in the Amazon CloudSearch Developer Guide.

    ", + "DefineExpression": "

    Configures an Expression for the search domain. Used to create new expressions and modify existing ones. If the expression exists, the new configuration replaces the old one. For more information, see Configuring Expressions in the Amazon CloudSearch Developer Guide.

    ", + "DefineIndexField": "

    Configures an IndexField for the search domain. Used to create new fields and modify existing ones. You must specify the name of the domain you are configuring and an index field configuration. The index field configuration specifies a unique name, the index field type, and the options you want to configure for the field. The options you can specify depend on the IndexFieldType. If the field exists, the new configuration replaces the old one. For more information, see Configuring Index Fields in the Amazon CloudSearch Developer Guide.

    ", + "DefineSuggester": "

    Configures a suggester for a domain. A suggester enables you to display possible matches before users finish typing their queries. When you configure a suggester, you must specify the name of the text field you want to search for possible matches and a unique name for the suggester. For more information, see Getting Search Suggestions in the Amazon CloudSearch Developer Guide.

    ", + "DeleteAnalysisScheme": "

    Deletes an analysis scheme. For more information, see Configuring Analysis Schemes in the Amazon CloudSearch Developer Guide.

    ", + "DeleteDomain": "

    Permanently deletes a search domain and all of its data. Once a domain has been deleted, it cannot be recovered. For more information, see Deleting a Search Domain in the Amazon CloudSearch Developer Guide.

    ", + "DeleteExpression": "

    Removes an Expression from the search domain. For more information, see Configuring Expressions in the Amazon CloudSearch Developer Guide.

    ", + "DeleteIndexField": "

    Removes an IndexField from the search domain. For more information, see Configuring Index Fields in the Amazon CloudSearch Developer Guide.

    ", + "DeleteSuggester": "

    Deletes a suggester. For more information, see Getting Search Suggestions in the Amazon CloudSearch Developer Guide.

    ", + "DescribeAnalysisSchemes": "

    Gets the analysis schemes configured for a domain. An analysis scheme defines language-specific text processing options for a text field. Can be limited to specific analysis schemes by name. By default, shows all analysis schemes and includes any pending changes to the configuration. Set the Deployed option to true to show the active configuration and exclude pending changes. For more information, see Configuring Analysis Schemes in the Amazon CloudSearch Developer Guide.

    ", + "DescribeAvailabilityOptions": "

    Gets the availability options configured for a domain. By default, shows the configuration with any pending changes. Set the Deployed option to true to show the active configuration and exclude pending changes. For more information, see Configuring Availability Options in the Amazon CloudSearch Developer Guide.

    ", + "DescribeDomains": "

    Gets information about the search domains owned by this account. Can be limited to specific domains. Shows all domains by default. To get the number of searchable documents in a domain, use the console or submit a matchall request to your domain's search endpoint: q=matchall&amp;q.parser=structured&amp;size=0. For more information, see Getting Information about a Search Domain in the Amazon CloudSearch Developer Guide.

    ", + "DescribeExpressions": "

    Gets the expressions configured for the search domain. Can be limited to specific expressions by name. By default, shows all expressions and includes any pending changes to the configuration. Set the Deployed option to true to show the active configuration and exclude pending changes. For more information, see Configuring Expressions in the Amazon CloudSearch Developer Guide.

    ", + "DescribeIndexFields": "

    Gets information about the index fields configured for the search domain. Can be limited to specific fields by name. By default, shows all fields and includes any pending changes to the configuration. Set the Deployed option to true to show the active configuration and exclude pending changes. For more information, see Getting Domain Information in the Amazon CloudSearch Developer Guide.

    ", + "DescribeScalingParameters": "

    Gets the scaling parameters configured for a domain. A domain's scaling parameters specify the desired search instance type and replication count. For more information, see Configuring Scaling Options in the Amazon CloudSearch Developer Guide.

    ", + "DescribeServiceAccessPolicies": "

    Gets information about the access policies that control access to the domain's document and search endpoints. By default, shows the configuration with any pending changes. Set the Deployed option to true to show the active configuration and exclude pending changes. For more information, see Configuring Access for a Search Domain in the Amazon CloudSearch Developer Guide.

    ", + "DescribeSuggesters": "

    Gets the suggesters configured for a domain. A suggester enables you to display possible matches before users finish typing their queries. Can be limited to specific suggesters by name. By default, shows all suggesters and includes any pending changes to the configuration. Set the Deployed option to true to show the active configuration and exclude pending changes. For more information, see Getting Search Suggestions in the Amazon CloudSearch Developer Guide.

    ", + "IndexDocuments": "

    Tells the search domain to start indexing its documents using the latest indexing options. This operation must be invoked to activate options whose OptionStatus is RequiresIndexDocuments.

    ", + "ListDomainNames": "

    Lists all search domains owned by an account.

    ", + "UpdateAvailabilityOptions": "

    Configures the availability options for a domain. Enabling the Multi-AZ option expands an Amazon CloudSearch domain to an additional Availability Zone in the same Region to increase fault tolerance in the event of a service disruption. Changes to the Multi-AZ option can take about half an hour to become active. For more information, see Configuring Availability Options in the Amazon CloudSearch Developer Guide.

    ", + "UpdateScalingParameters": "

    Configures scaling parameters for a domain. A domain's scaling parameters specify the desired search instance type and replication count. Amazon CloudSearch will still automatically scale your domain based on the volume of data and traffic, but not below the desired instance type and replication count. If the Multi-AZ option is enabled, these values control the resources used per Availability Zone. For more information, see Configuring Scaling Options in the Amazon CloudSearch Developer Guide.

    ", + "UpdateServiceAccessPolicies": "

    Configures the access rules that control access to the domain's document and search endpoints. For more information, see Configuring Access for an Amazon CloudSearch Domain.

    " + }, + "service": "Amazon CloudSearch Configuration Service

    You use the Amazon CloudSearch configuration service to create, configure, and manage search domains. Configuration service requests are submitted using the AWS Query protocol. AWS Query requests are HTTP or HTTPS requests submitted via HTTP GET or POST with a query parameter named Action.

    The endpoint for configuration service requests is region-specific: cloudsearch.region.amazonaws.com. For example, cloudsearch.us-east-1.amazonaws.com. For a current list of supported regions and endpoints, see Regions and Endpoints.

    ", + "shapes": { + "APIVersion": { + "base": "

    The Amazon CloudSearch API version for a domain: 2011-02-01 or 2013-01-01.

    ", + "refs": { + "DomainNameMap$value": null + } + }, + "ARN": { + "base": "

    The Amazon Resource Name (ARN) of the search domain. See Identifiers for IAM Entities in Using AWS Identity and Access Management for more information.

    ", + "refs": { + "DomainStatus$ARN": null + } + }, + "AccessPoliciesStatus": { + "base": "

    The configured access rules for the domain's document and search endpoints, and the current status of those rules.

    ", + "refs": { + "DescribeServiceAccessPoliciesResponse$AccessPolicies": "

    The access rules configured for the domain specified in the request.

    ", + "UpdateServiceAccessPoliciesResponse$AccessPolicies": "

    The access rules configured for the domain.

    " + } + }, + "AlgorithmicStemming": { + "base": null, + "refs": { + "AnalysisOptions$AlgorithmicStemming": "

    The level of algorithmic stemming to perform: none, minimal, light, or full. The available levels vary depending on the language. For more information, see Language Specific Text Processing Settings in the Amazon CloudSearch Developer Guide

    " + } + }, + "AnalysisOptions": { + "base": "

    Synonyms, stopwords, and stemming options for an analysis scheme. Includes tokenization dictionary for Japanese.

    ", + "refs": { + "AnalysisScheme$AnalysisOptions": null + } + }, + "AnalysisScheme": { + "base": "

    Configuration information for an analysis scheme. Each analysis scheme has a unique name and specifies the language of the text to be processed. The following options can be configured for an analysis scheme: Synonyms, Stopwords, StemmingDictionary, JapaneseTokenizationDictionary and AlgorithmicStemming.

    ", + "refs": { + "AnalysisSchemeStatus$Options": null, + "DefineAnalysisSchemeRequest$AnalysisScheme": null + } + }, + "AnalysisSchemeLanguage": { + "base": "

    An IETF RFC 4646 language code or mul for multiple languages.

    ", + "refs": { + "AnalysisScheme$AnalysisSchemeLanguage": null + } + }, + "AnalysisSchemeStatus": { + "base": "

    The status and configuration of an AnalysisScheme.

    ", + "refs": { + "AnalysisSchemeStatusList$member": null, + "DefineAnalysisSchemeResponse$AnalysisScheme": null, + "DeleteAnalysisSchemeResponse$AnalysisScheme": "

    The status of the analysis scheme being deleted.

    " + } + }, + "AnalysisSchemeStatusList": { + "base": "

    A list of the analysis schemes configured for a domain.

    ", + "refs": { + "DescribeAnalysisSchemesResponse$AnalysisSchemes": "

    The analysis scheme descriptions.

    " + } + }, + "AvailabilityOptionsStatus": { + "base": "

    The status and configuration of the domain's availability options.

    ", + "refs": { + "DescribeAvailabilityOptionsResponse$AvailabilityOptions": "

    The availability options configured for the domain. Indicates whether Multi-AZ is enabled for the domain.

    ", + "UpdateAvailabilityOptionsResponse$AvailabilityOptions": "

    The newly-configured availability options. Indicates whether Multi-AZ is enabled for the domain.

    " + } + }, + "BaseException": { + "base": "

    An error occurred while processing the request.

    ", + "refs": { + } + }, + "Boolean": { + "base": null, + "refs": { + "DateArrayOptions$FacetEnabled": "

    Whether facet information can be returned for the field.

    ", + "DateArrayOptions$SearchEnabled": "

    Whether the contents of the field are searchable.

    ", + "DateArrayOptions$ReturnEnabled": "

    Whether the contents of the field can be returned in the search results.

    ", + "DateOptions$FacetEnabled": "

    Whether facet information can be returned for the field.

    ", + "DateOptions$SearchEnabled": "

    Whether the contents of the field are searchable.

    ", + "DateOptions$ReturnEnabled": "

    Whether the contents of the field can be returned in the search results.

    ", + "DateOptions$SortEnabled": "

    Whether the field can be used to sort the search results.

    ", + "DescribeAnalysisSchemesRequest$Deployed": "

    Whether to display the deployed configuration (true) or include any pending changes (false). Defaults to false.

    ", + "DescribeAvailabilityOptionsRequest$Deployed": "

    Whether to display the deployed configuration (true) or include any pending changes (false). Defaults to false.

    ", + "DescribeExpressionsRequest$Deployed": "

    Whether to display the deployed configuration (true) or include any pending changes (false). Defaults to false.

    ", + "DescribeIndexFieldsRequest$Deployed": "

    Whether to display the deployed configuration (true) or include any pending changes (false). Defaults to false.

    ", + "DescribeServiceAccessPoliciesRequest$Deployed": "

    Whether to display the deployed configuration (true) or include any pending changes (false). Defaults to false.

    ", + "DescribeSuggestersRequest$Deployed": "

    Whether to display the deployed configuration (true) or include any pending changes (false). Defaults to false.

    ", + "DomainStatus$Created": "

    True if the search domain is created. It can take several minutes to initialize a domain when CreateDomain is called. Newly created search domains are returned from DescribeDomains with a false value for Created until domain creation is complete.

    ", + "DomainStatus$Deleted": "

    True if the search domain has been deleted. The system must clean up resources dedicated to the search domain when DeleteDomain is called. Newly deleted search domains are returned from DescribeDomains with a true value for IsDeleted for several minutes until resource cleanup is complete.

    ", + "DomainStatus$RequiresIndexDocuments": "

    True if IndexDocuments needs to be called to activate the current domain configuration.

    ", + "DomainStatus$Processing": "

    True if processing is being done to activate the current domain configuration.

    ", + "DoubleArrayOptions$FacetEnabled": "

    Whether facet information can be returned for the field.

    ", + "DoubleArrayOptions$SearchEnabled": "

    Whether the contents of the field are searchable.

    ", + "DoubleArrayOptions$ReturnEnabled": "

    Whether the contents of the field can be returned in the search results.

    ", + "DoubleOptions$FacetEnabled": "

    Whether facet information can be returned for the field.

    ", + "DoubleOptions$SearchEnabled": "

    Whether the contents of the field are searchable.

    ", + "DoubleOptions$ReturnEnabled": "

    Whether the contents of the field can be returned in the search results.

    ", + "DoubleOptions$SortEnabled": "

    Whether the field can be used to sort the search results.

    ", + "IntArrayOptions$FacetEnabled": "

    Whether facet information can be returned for the field.

    ", + "IntArrayOptions$SearchEnabled": "

    Whether the contents of the field are searchable.

    ", + "IntArrayOptions$ReturnEnabled": "

    Whether the contents of the field can be returned in the search results.

    ", + "IntOptions$FacetEnabled": "

    Whether facet information can be returned for the field.

    ", + "IntOptions$SearchEnabled": "

    Whether the contents of the field are searchable.

    ", + "IntOptions$ReturnEnabled": "

    Whether the contents of the field can be returned in the search results.

    ", + "IntOptions$SortEnabled": "

    Whether the field can be used to sort the search results.

    ", + "LatLonOptions$FacetEnabled": "

    Whether facet information can be returned for the field.

    ", + "LatLonOptions$SearchEnabled": "

    Whether the contents of the field are searchable.

    ", + "LatLonOptions$ReturnEnabled": "

    Whether the contents of the field can be returned in the search results.

    ", + "LatLonOptions$SortEnabled": "

    Whether the field can be used to sort the search results.

    ", + "LiteralArrayOptions$FacetEnabled": "

    Whether facet information can be returned for the field.

    ", + "LiteralArrayOptions$SearchEnabled": "

    Whether the contents of the field are searchable.

    ", + "LiteralArrayOptions$ReturnEnabled": "

    Whether the contents of the field can be returned in the search results.

    ", + "LiteralOptions$FacetEnabled": "

    Whether facet information can be returned for the field.

    ", + "LiteralOptions$SearchEnabled": "

    Whether the contents of the field are searchable.

    ", + "LiteralOptions$ReturnEnabled": "

    Whether the contents of the field can be returned in the search results.

    ", + "LiteralOptions$SortEnabled": "

    Whether the field can be used to sort the search results.

    ", + "OptionStatus$PendingDeletion": "

    Indicates that the option will be deleted once processing is complete.

    ", + "TextArrayOptions$ReturnEnabled": "

    Whether the contents of the field can be returned in the search results.

    ", + "TextArrayOptions$HighlightEnabled": "

    Whether highlights can be returned for the field.

    ", + "TextOptions$ReturnEnabled": "

    Whether the contents of the field can be returned in the search results.

    ", + "TextOptions$SortEnabled": "

    Whether the field can be used to sort the search results.

    ", + "TextOptions$HighlightEnabled": "

    Whether highlights can be returned for the field.

    ", + "UpdateAvailabilityOptionsRequest$MultiAZ": "

    You expand an existing search domain to a second Availability Zone by setting the Multi-AZ option to true. Similarly, you can turn off the Multi-AZ option to downgrade the domain to a single Availability Zone by setting the Multi-AZ option to false.

    " + } + }, + "BuildSuggestersRequest": { + "base": "

    Container for the parameters to the BuildSuggester operation. Specifies the name of the domain you want to update.

    ", + "refs": { + } + }, + "BuildSuggestersResponse": { + "base": "

    The result of a BuildSuggester request. Contains a list of the fields used for suggestions.

    ", + "refs": { + } + }, + "CreateDomainRequest": { + "base": "

    Container for the parameters to the CreateDomain operation. Specifies a name for the new search domain.

    ", + "refs": { + } + }, + "CreateDomainResponse": { + "base": "

    The result of a CreateDomainRequest. Contains the status of a newly created domain.

    ", + "refs": { + } + }, + "DateArrayOptions": { + "base": "

    Options for a field that contains an array of dates. Present if IndexFieldType specifies the field is of type date-array. All options are enabled by default.

    ", + "refs": { + "IndexField$DateArrayOptions": null + } + }, + "DateOptions": { + "base": "

    Options for a date field. Dates and times are specified in UTC (Coordinated Universal Time) according to IETF RFC3339: yyyy-mm-ddT00:00:00Z. Present if IndexFieldType specifies the field is of type date. All options are enabled by default.

    ", + "refs": { + "IndexField$DateOptions": null + } + }, + "DefineAnalysisSchemeRequest": { + "base": "

    Container for the parameters to the DefineAnalysisScheme operation. Specifies the name of the domain you want to update and the analysis scheme configuration.

    ", + "refs": { + } + }, + "DefineAnalysisSchemeResponse": { + "base": "

    The result of a DefineAnalysisScheme request. Contains the status of the newly-configured analysis scheme.

    ", + "refs": { + } + }, + "DefineExpressionRequest": { + "base": "

    Container for the parameters to the DefineExpression operation. Specifies the name of the domain you want to update and the expression you want to configure.

    ", + "refs": { + } + }, + "DefineExpressionResponse": { + "base": "

    The result of a DefineExpression request. Contains the status of the newly-configured expression.

    ", + "refs": { + } + }, + "DefineIndexFieldRequest": { + "base": "

    Container for the parameters to the DefineIndexField operation. Specifies the name of the domain you want to update and the index field configuration.

    ", + "refs": { + } + }, + "DefineIndexFieldResponse": { + "base": "

    The result of a DefineIndexField request. Contains the status of the newly-configured index field.

    ", + "refs": { + } + }, + "DefineSuggesterRequest": { + "base": "

    Container for the parameters to the DefineSuggester operation. Specifies the name of the domain you want to update and the suggester configuration.

    ", + "refs": { + } + }, + "DefineSuggesterResponse": { + "base": "

    The result of a DefineSuggester request. Contains the status of the newly-configured suggester.

    ", + "refs": { + } + }, + "DeleteAnalysisSchemeRequest": { + "base": "

    Container for the parameters to the DeleteAnalysisScheme operation. Specifies the name of the domain you want to update and the analysis scheme you want to delete.

    ", + "refs": { + } + }, + "DeleteAnalysisSchemeResponse": { + "base": "

    The result of a DeleteAnalysisScheme request. Contains the status of the deleted analysis scheme.

    ", + "refs": { + } + }, + "DeleteDomainRequest": { + "base": "

    Container for the parameters to the DeleteDomain operation. Specifies the name of the domain you want to delete.

    ", + "refs": { + } + }, + "DeleteDomainResponse": { + "base": "

    The result of a DeleteDomain request. Contains the status of a newly deleted domain, or no status if the domain has already been completely deleted.

    ", + "refs": { + } + }, + "DeleteExpressionRequest": { + "base": "

    Container for the parameters to the DeleteExpression operation. Specifies the name of the domain you want to update and the name of the expression you want to delete.

    ", + "refs": { + } + }, + "DeleteExpressionResponse": { + "base": "

    The result of a DeleteExpression request. Specifies the expression being deleted.

    ", + "refs": { + } + }, + "DeleteIndexFieldRequest": { + "base": "

    Container for the parameters to the DeleteIndexField operation. Specifies the name of the domain you want to update and the name of the index field you want to delete.

    ", + "refs": { + } + }, + "DeleteIndexFieldResponse": { + "base": "

    The result of a DeleteIndexField request.

    ", + "refs": { + } + }, + "DeleteSuggesterRequest": { + "base": "

    Container for the parameters to the DeleteSuggester operation. Specifies the name of the domain you want to update and name of the suggester you want to delete.

    ", + "refs": { + } + }, + "DeleteSuggesterResponse": { + "base": "

    The result of a DeleteSuggester request. Contains the status of the deleted suggester.

    ", + "refs": { + } + }, + "DescribeAnalysisSchemesRequest": { + "base": "

    Container for the parameters to the DescribeAnalysisSchemes operation. Specifies the name of the domain you want to describe. To limit the response to particular analysis schemes, specify the names of the analysis schemes you want to describe. To show the active configuration and exclude any pending changes, set the Deployed option to true.

    ", + "refs": { + } + }, + "DescribeAnalysisSchemesResponse": { + "base": "

    The result of a DescribeAnalysisSchemes request. Contains the analysis schemes configured for the domain specified in the request.

    ", + "refs": { + } + }, + "DescribeAvailabilityOptionsRequest": { + "base": "

    Container for the parameters to the DescribeAvailabilityOptions operation. Specifies the name of the domain you want to describe. To show the active configuration and exclude any pending changes, set the Deployed option to true.

    ", + "refs": { + } + }, + "DescribeAvailabilityOptionsResponse": { + "base": "

    The result of a DescribeAvailabilityOptions request. Indicates whether or not the Multi-AZ option is enabled for the domain specified in the request.

    ", + "refs": { + } + }, + "DescribeDomainsRequest": { + "base": "

    Container for the parameters to the DescribeDomains operation. By default shows the status of all domains. To restrict the response to particular domains, specify the names of the domains you want to describe.

    ", + "refs": { + } + }, + "DescribeDomainsResponse": { + "base": "

    The result of a DescribeDomains request. Contains the status of the domains specified in the request or all domains owned by the account.

    ", + "refs": { + } + }, + "DescribeExpressionsRequest": { + "base": "

    Container for the parameters to the DescribeDomains operation. Specifies the name of the domain you want to describe. To restrict the response to particular expressions, specify the names of the expressions you want to describe. To show the active configuration and exclude any pending changes, set the Deployed option to true.

    ", + "refs": { + } + }, + "DescribeExpressionsResponse": { + "base": "

    The result of a DescribeExpressions request. Contains the expressions configured for the domain specified in the request.

    ", + "refs": { + } + }, + "DescribeIndexFieldsRequest": { + "base": "

    Container for the parameters to the DescribeIndexFields operation. Specifies the name of the domain you want to describe. To restrict the response to particular index fields, specify the names of the index fields you want to describe. To show the active configuration and exclude any pending changes, set the Deployed option to true.

    ", + "refs": { + } + }, + "DescribeIndexFieldsResponse": { + "base": "

    The result of a DescribeIndexFields request. Contains the index fields configured for the domain specified in the request.

    ", + "refs": { + } + }, + "DescribeScalingParametersRequest": { + "base": "

    Container for the parameters to the DescribeScalingParameters operation. Specifies the name of the domain you want to describe.

    ", + "refs": { + } + }, + "DescribeScalingParametersResponse": { + "base": "

    The result of a DescribeScalingParameters request. Contains the scaling parameters configured for the domain specified in the request.

    ", + "refs": { + } + }, + "DescribeServiceAccessPoliciesRequest": { + "base": "

    Container for the parameters to the DescribeServiceAccessPolicies operation. Specifies the name of the domain you want to describe. To show the active configuration and exclude any pending changes, set the Deployed option to true.

    ", + "refs": { + } + }, + "DescribeServiceAccessPoliciesResponse": { + "base": "

    The result of a DescribeServiceAccessPolicies request.

    ", + "refs": { + } + }, + "DescribeSuggestersRequest": { + "base": "

    Container for the parameters to the DescribeSuggester operation. Specifies the name of the domain you want to describe. To restrict the response to particular suggesters, specify the names of the suggesters you want to describe. To show the active configuration and exclude any pending changes, set the Deployed option to true.

    ", + "refs": { + } + }, + "DescribeSuggestersResponse": { + "base": "

    The result of a DescribeSuggesters request.

    ", + "refs": { + } + }, + "DisabledOperationException": { + "base": "

    The request was rejected because it attempted an operation which is not enabled.

    ", + "refs": { + } + }, + "DocumentSuggesterOptions": { + "base": "

    Options for a search suggester.

    ", + "refs": { + "Suggester$DocumentSuggesterOptions": null + } + }, + "DomainId": { + "base": "

    An internally generated unique identifier for a domain.

    ", + "refs": { + "DomainStatus$DomainId": null + } + }, + "DomainName": { + "base": "

    A string that represents the name of a domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen).

    ", + "refs": { + "BuildSuggestersRequest$DomainName": null, + "CreateDomainRequest$DomainName": "

    A name for the domain you are creating. Allowed characters are a-z (lower-case letters), 0-9, and hyphen (-). Domain names must start with a letter or number and be at least 3 and no more than 28 characters long.

    ", + "DefineAnalysisSchemeRequest$DomainName": null, + "DefineExpressionRequest$DomainName": null, + "DefineIndexFieldRequest$DomainName": null, + "DefineSuggesterRequest$DomainName": null, + "DeleteAnalysisSchemeRequest$DomainName": null, + "DeleteDomainRequest$DomainName": "

    The name of the domain you want to permanently delete.

    ", + "DeleteExpressionRequest$DomainName": null, + "DeleteIndexFieldRequest$DomainName": null, + "DeleteSuggesterRequest$DomainName": null, + "DescribeAnalysisSchemesRequest$DomainName": "

    The name of the domain you want to describe.

    ", + "DescribeAvailabilityOptionsRequest$DomainName": "

    The name of the domain you want to describe.

    ", + "DescribeExpressionsRequest$DomainName": "

    The name of the domain you want to describe.

    ", + "DescribeIndexFieldsRequest$DomainName": "

    The name of the domain you want to describe.

    ", + "DescribeScalingParametersRequest$DomainName": null, + "DescribeServiceAccessPoliciesRequest$DomainName": "

    The name of the domain you want to describe.

    ", + "DescribeSuggestersRequest$DomainName": "

    The name of the domain you want to describe.

    ", + "DomainNameList$member": null, + "DomainNameMap$key": null, + "DomainStatus$DomainName": null, + "IndexDocumentsRequest$DomainName": null, + "UpdateAvailabilityOptionsRequest$DomainName": null, + "UpdateScalingParametersRequest$DomainName": null, + "UpdateServiceAccessPoliciesRequest$DomainName": null + } + }, + "DomainNameList": { + "base": "

    A list of domain names.

    ", + "refs": { + "DescribeDomainsRequest$DomainNames": "

    The names of the domains you want to include in the response.

    " + } + }, + "DomainNameMap": { + "base": "

    A collection of domain names.

    ", + "refs": { + "ListDomainNamesResponse$DomainNames": "

    The names of the search domains owned by an account.

    " + } + }, + "DomainStatus": { + "base": "

    The current status of the search domain.

    ", + "refs": { + "CreateDomainResponse$DomainStatus": null, + "DeleteDomainResponse$DomainStatus": null, + "DomainStatusList$member": null + } + }, + "DomainStatusList": { + "base": "

    A list that contains the status of each requested domain.

    ", + "refs": { + "DescribeDomainsResponse$DomainStatusList": null + } + }, + "Double": { + "base": null, + "refs": { + "DoubleArrayOptions$DefaultValue": "A value to use for the field if the field isn't specified for a document.", + "DoubleOptions$DefaultValue": "

    A value to use for the field if the field isn't specified for a document. This can be important if you are using the field in an expression and that field is not present in every document.

    " + } + }, + "DoubleArrayOptions": { + "base": "

    Options for a field that contains an array of double-precision 64-bit floating point values. Present if IndexFieldType specifies the field is of type double-array. All options are enabled by default.

    ", + "refs": { + "IndexField$DoubleArrayOptions": null + } + }, + "DoubleOptions": { + "base": "

    Options for a double-precision 64-bit floating point field. Present if IndexFieldType specifies the field is of type double. All options are enabled by default.

    ", + "refs": { + "IndexField$DoubleOptions": null + } + }, + "DynamicFieldName": { + "base": null, + "refs": { + "DeleteIndexFieldRequest$IndexFieldName": "

    The name of the index field your want to remove from the domain's indexing options.

    ", + "DynamicFieldNameList$member": null, + "IndexField$IndexFieldName": "

    A string that represents the name of an index field. CloudSearch supports regular index fields as well as dynamic fields. A dynamic field's name defines a pattern that begins or ends with a wildcard. Any document fields that don't map to a regular index field but do match a dynamic field's pattern are configured with the dynamic field's indexing options.

    Regular field names begin with a letter and can contain the following characters: a-z (lowercase), 0-9, and _ (underscore). Dynamic field names must begin or end with a wildcard (*). The wildcard can also be the only character in a dynamic field name. Multiple wildcards, and wildcards embedded within a string are not supported.

    The name score is reserved and cannot be used as a field name. To reference a document's ID, you can use the name _id.

    " + } + }, + "DynamicFieldNameList": { + "base": null, + "refs": { + "DescribeIndexFieldsRequest$FieldNames": "

    A list of the index fields you want to describe. If not specified, information is returned for all configured index fields.

    " + } + }, + "ErrorCode": { + "base": "

    A machine-parsable string error or warning code.

    ", + "refs": { + "BaseException$Code": null + } + }, + "ErrorMessage": { + "base": "

    A human-readable string error or warning message.

    ", + "refs": { + "BaseException$Message": null + } + }, + "Expression": { + "base": "

    A named expression that can be evaluated at search time. Can be used to sort the search results, define other expressions, or return computed information in the search results.

    ", + "refs": { + "DefineExpressionRequest$Expression": null, + "ExpressionStatus$Options": "

    The expression that is evaluated for sorting while processing a search request.

    " + } + }, + "ExpressionStatus": { + "base": "

    The value of an Expression and its current status.

    ", + "refs": { + "DefineExpressionResponse$Expression": null, + "DeleteExpressionResponse$Expression": "

    The status of the expression being deleted.

    ", + "ExpressionStatusList$member": null + } + }, + "ExpressionStatusList": { + "base": "

    Contains the status of multiple expressions.

    ", + "refs": { + "DescribeExpressionsResponse$Expressions": "

    The expressions configured for the domain.

    " + } + }, + "ExpressionValue": { + "base": "

    The expression to evaluate for sorting while processing a search request. The Expression syntax is based on JavaScript expressions. For more information, see Configuring Expressions in the Amazon CloudSearch Developer Guide.

    ", + "refs": { + "Expression$ExpressionValue": null + } + }, + "FieldName": { + "base": "

    A string that represents the name of an index field. CloudSearch supports regular index fields as well as dynamic fields. A dynamic field's name defines a pattern that begins or ends with a wildcard. Any document fields that don't map to a regular index field but do match a dynamic field's pattern are configured with the dynamic field's indexing options.

    Regular field names begin with a letter and can contain the following characters: a-z (lowercase), 0-9, and _ (underscore). Dynamic field names must begin or end with a wildcard (*). The wildcard can also be the only character in a dynamic field name. Multiple wildcards, and wildcards embedded within a string are not supported.

    The name score is reserved and cannot be used as a field name. To reference a document's ID, you can use the name _id.

    ", + "refs": { + "DateOptions$SourceField": null, + "DocumentSuggesterOptions$SourceField": "

    The name of the index field you want to use for suggestions.

    ", + "DoubleOptions$SourceField": "

    The name of the source field to map to the field.

    ", + "FieldNameList$member": null, + "IntOptions$SourceField": "

    The name of the source field to map to the field.

    ", + "LatLonOptions$SourceField": null, + "LiteralOptions$SourceField": null, + "TextOptions$SourceField": null + } + }, + "FieldNameCommaList": { + "base": null, + "refs": { + "DateArrayOptions$SourceFields": "

    A list of source fields to map to the field.

    ", + "DoubleArrayOptions$SourceFields": "

    A list of source fields to map to the field.

    ", + "IntArrayOptions$SourceFields": "

    A list of source fields to map to the field.

    ", + "LiteralArrayOptions$SourceFields": "

    A list of source fields to map to the field.

    ", + "TextArrayOptions$SourceFields": "

    A list of source fields to map to the field.

    " + } + }, + "FieldNameList": { + "base": "

    A list of field names.

    ", + "refs": { + "BuildSuggestersResponse$FieldNames": null, + "IndexDocumentsResponse$FieldNames": "

    The names of the fields that are currently being indexed.

    " + } + }, + "FieldValue": { + "base": "

    The value of a field attribute.

    ", + "refs": { + "DateArrayOptions$DefaultValue": "A value to use for the field if the field isn't specified for a document.", + "DateOptions$DefaultValue": "A value to use for the field if the field isn't specified for a document.", + "LatLonOptions$DefaultValue": "A value to use for the field if the field isn't specified for a document.", + "LiteralArrayOptions$DefaultValue": "A value to use for the field if the field isn't specified for a document.", + "LiteralOptions$DefaultValue": "A value to use for the field if the field isn't specified for a document.", + "TextArrayOptions$DefaultValue": "A value to use for the field if the field isn't specified for a document.", + "TextOptions$DefaultValue": "A value to use for the field if the field isn't specified for a document." + } + }, + "IndexDocumentsRequest": { + "base": "

    Container for the parameters to the IndexDocuments operation. Specifies the name of the domain you want to re-index.

    ", + "refs": { + } + }, + "IndexDocumentsResponse": { + "base": "

    The result of an IndexDocuments request. Contains the status of the indexing operation, including the fields being indexed.

    ", + "refs": { + } + }, + "IndexField": { + "base": "

    Configuration information for a field in the index, including its name, type, and options. The supported options depend on the IndexFieldType.

    ", + "refs": { + "DefineIndexFieldRequest$IndexField": "

    The index field and field options you want to configure.

    ", + "IndexFieldStatus$Options": null + } + }, + "IndexFieldStatus": { + "base": "

    The value of an IndexField and its current status.

    ", + "refs": { + "DefineIndexFieldResponse$IndexField": null, + "DeleteIndexFieldResponse$IndexField": "

    The status of the index field being deleted.

    ", + "IndexFieldStatusList$member": null + } + }, + "IndexFieldStatusList": { + "base": "

    Contains the status of multiple index fields.

    ", + "refs": { + "DescribeIndexFieldsResponse$IndexFields": "

    The index fields configured for the domain.

    " + } + }, + "IndexFieldType": { + "base": "

    The type of field. The valid options for a field depend on the field type. For more information about the supported field types, see Configuring Index Fields in the Amazon CloudSearch Developer Guide.

    ", + "refs": { + "IndexField$IndexFieldType": null + } + }, + "InstanceCount": { + "base": null, + "refs": { + "DomainStatus$SearchInstanceCount": "

    The number of search instances that are available to process search requests.

    " + } + }, + "IntArrayOptions": { + "base": "

    Options for a field that contains an array of 64-bit signed integers. Present if IndexFieldType specifies the field is of type int-array. All options are enabled by default.

    ", + "refs": { + "IndexField$IntArrayOptions": null + } + }, + "IntOptions": { + "base": "

    Options for a 64-bit signed integer field. Present if IndexFieldType specifies the field is of type int. All options are enabled by default.

    ", + "refs": { + "IndexField$IntOptions": null + } + }, + "InternalException": { + "base": "

    An internal error occurred while processing the request. If this problem persists, report an issue from the Service Health Dashboard.

    ", + "refs": { + } + }, + "InvalidTypeException": { + "base": "

    The request was rejected because it specified an invalid type definition.

    ", + "refs": { + } + }, + "LatLonOptions": { + "base": "

    Options for a latlon field. A latlon field contains a location stored as a latitude and longitude value pair. Present if IndexFieldType specifies the field is of type latlon. All options are enabled by default.

    ", + "refs": { + "IndexField$LatLonOptions": null + } + }, + "LimitExceededException": { + "base": "

    The request was rejected because a resource limit has already been met.

    ", + "refs": { + } + }, + "Limits": { + "base": null, + "refs": { + "DomainStatus$Limits": null + } + }, + "ListDomainNamesResponse": { + "base": "

    The result of a ListDomainNames request. Contains a list of the domains owned by an account.

    ", + "refs": { + } + }, + "LiteralArrayOptions": { + "base": "

    Options for a field that contains an array of literal strings. Present if IndexFieldType specifies the field is of type literal-array. All options are enabled by default.

    ", + "refs": { + "IndexField$LiteralArrayOptions": null + } + }, + "LiteralOptions": { + "base": "

    Options for literal field. Present if IndexFieldType specifies the field is of type literal. All options are enabled by default.

    ", + "refs": { + "IndexField$LiteralOptions": null + } + }, + "Long": { + "base": null, + "refs": { + "IntArrayOptions$DefaultValue": "A value to use for the field if the field isn't specified for a document.", + "IntOptions$DefaultValue": "A value to use for the field if the field isn't specified for a document. This can be important if you are using the field in an expression and that field is not present in every document." + } + }, + "MaximumPartitionCount": { + "base": null, + "refs": { + "Limits$MaximumPartitionCount": null + } + }, + "MaximumReplicationCount": { + "base": null, + "refs": { + "Limits$MaximumReplicationCount": null + } + }, + "MultiAZ": { + "base": null, + "refs": { + "AvailabilityOptionsStatus$Options": "

    The availability options configured for the domain.

    " + } + }, + "OptionState": { + "base": "

    The state of processing a change to an option. One of:

    • RequiresIndexDocuments: The option's latest value will not be deployed until IndexDocuments has been called and indexing is complete.
    • Processing: The option's latest value is in the process of being activated.
    • Active: The option's latest value is fully deployed.
    • FailedToValidate: The option value is not compatible with the domain's data and cannot be used to index the data. You must either modify the option value or update or remove the incompatible documents.
    ", + "refs": { + "OptionStatus$State": "

    The state of processing a change to an option. Possible values:

    • RequiresIndexDocuments: the option's latest value will not be deployed until IndexDocuments has been called and indexing is complete.
    • Processing: the option's latest value is in the process of being activated.
    • Active: the option's latest value is completely deployed.
    • FailedToValidate: the option value is not compatible with the domain's data and cannot be used to index the data. You must either modify the option value or update or remove the incompatible documents.
    " + } + }, + "OptionStatus": { + "base": "

    The status of domain configuration option.

    ", + "refs": { + "AccessPoliciesStatus$Status": null, + "AnalysisSchemeStatus$Status": null, + "AvailabilityOptionsStatus$Status": null, + "ExpressionStatus$Status": null, + "IndexFieldStatus$Status": null, + "ScalingParametersStatus$Status": null, + "SuggesterStatus$Status": null + } + }, + "PartitionCount": { + "base": "

    The number of partitions used to hold the domain's index.

    ", + "refs": { + "DomainStatus$SearchPartitionCount": "

    The number of partitions across which the search index is spread.

    " + } + }, + "PartitionInstanceType": { + "base": "

    The instance type (such as search.m1.small) on which an index partition is hosted.

    ", + "refs": { + "ScalingParameters$DesiredInstanceType": "

    The instance type that you want to preconfigure for your domain. For example, search.m1.small.

    " + } + }, + "PolicyDocument": { + "base": "

    Access rules for a domain's document or search service endpoints. For more information, see Configuring Access for a Search Domain in the Amazon CloudSearch Developer Guide. The maximum size of a policy document is 100 KB.

    ", + "refs": { + "AccessPoliciesStatus$Options": null, + "UpdateServiceAccessPoliciesRequest$AccessPolicies": "

    The access rules you want to configure. These rules replace any existing rules.

    " + } + }, + "ResourceNotFoundException": { + "base": "

    The request was rejected because it attempted to reference a resource that does not exist.

    ", + "refs": { + } + }, + "ScalingParameters": { + "base": "

    The desired instance type and desired number of replicas of each index partition.

    ", + "refs": { + "ScalingParametersStatus$Options": null, + "UpdateScalingParametersRequest$ScalingParameters": null + } + }, + "ScalingParametersStatus": { + "base": "

    The status and configuration of a search domain's scaling parameters.

    ", + "refs": { + "DescribeScalingParametersResponse$ScalingParameters": null, + "UpdateScalingParametersResponse$ScalingParameters": null + } + }, + "SearchInstanceType": { + "base": "

    The instance type (such as search.m1.small) that is being used to process search requests.

    ", + "refs": { + "DomainStatus$SearchInstanceType": "

    The instance type that is being used to process search requests.

    " + } + }, + "ServiceEndpoint": { + "base": "

    The endpoint to which service requests can be submitted.

    ", + "refs": { + "DomainStatus$DocService": "

    The service endpoint for updating documents in a search domain.

    ", + "DomainStatus$SearchService": "

    The service endpoint for requesting search results from a search domain.

    " + } + }, + "ServiceUrl": { + "base": "

    The endpoint to which service requests can be submitted. For example, search-imdb-movies-oopcnjfn6ugofer3zx5iadxxca.eu-west-1.cloudsearch.amazonaws.com or doc-imdb-movies-oopcnjfn6ugofer3zx5iadxxca.eu-west-1.cloudsearch.amazonaws.com.

    ", + "refs": { + "ServiceEndpoint$Endpoint": null + } + }, + "StandardName": { + "base": "

    Names must begin with a letter and can contain the following characters: a-z (lowercase), 0-9, and _ (underscore).

    ", + "refs": { + "AnalysisScheme$AnalysisSchemeName": null, + "DeleteAnalysisSchemeRequest$AnalysisSchemeName": "

    The name of the analysis scheme you want to delete.

    ", + "DeleteExpressionRequest$ExpressionName": "

    The name of the Expression to delete.

    ", + "DeleteSuggesterRequest$SuggesterName": "

    Specifies the name of the suggester you want to delete.

    ", + "Expression$ExpressionName": null, + "StandardNameList$member": null, + "Suggester$SuggesterName": null + } + }, + "StandardNameList": { + "base": null, + "refs": { + "DescribeAnalysisSchemesRequest$AnalysisSchemeNames": "

    The analysis schemes you want to describe.

    ", + "DescribeExpressionsRequest$ExpressionNames": "

    Limits the DescribeExpressions response to the specified expressions. If not specified, all expressions are shown.

    ", + "DescribeSuggestersRequest$SuggesterNames": "

    The suggesters you want to describe.

    " + } + }, + "String": { + "base": null, + "refs": { + "AnalysisOptions$Synonyms": "

    A JSON object that defines synonym groups and aliases. A synonym group is an array of arrays, where each sub-array is a group of terms where each term in the group is considered a synonym of every other term in the group. The aliases value is an object that contains a collection of string:value pairs where the string specifies a term and the array of values specifies each of the aliases for that term. An alias is considered a synonym of the specified term, but the term is not considered a synonym of the alias. For more information about specifying synonyms, see Synonyms in the Amazon CloudSearch Developer Guide.

    ", + "AnalysisOptions$Stopwords": "

    A JSON array of terms to ignore during indexing and searching. For example, [\"a\", \"an\", \"the\", \"of\"]. The stopwords dictionary must explicitly list each word you want to ignore. Wildcards and regular expressions are not supported.

    ", + "AnalysisOptions$StemmingDictionary": "

    A JSON object that contains a collection of string:value pairs that each map a term to its stem. For example, {\"term1\": \"stem1\", \"term2\": \"stem2\", \"term3\": \"stem3\"}. The stemming dictionary is applied in addition to any algorithmic stemming. This enables you to override the results of the algorithmic stemming to correct specific cases of overstemming or understemming. The maximum size of a stemming dictionary is 500 KB.

    ", + "AnalysisOptions$JapaneseTokenizationDictionary": "

    A JSON array that contains a collection of terms, tokens, readings and part of speech for Japanese Tokenizaiton. The Japanese tokenization dictionary enables you to override the default tokenization for selected terms. This is only valid for Japanese language fields.

    ", + "DocumentSuggesterOptions$SortExpression": "

    An expression that computes a score for each suggestion to control how they are sorted. The scores are rounded to the nearest integer, with a floor of 0 and a ceiling of 2^31-1. A document's relevance score is not computed for suggestions, so sort expressions cannot reference the _score value. To sort suggestions using a numeric field or existing expression, simply specify the name of the field or expression. If no expression is configured for the suggester, the suggestions are sorted with the closest matches listed first.

    " + } + }, + "Suggester": { + "base": "

    Configuration information for a search suggester. Each suggester has a unique name and specifies the text field you want to use for suggestions. The following options can be configured for a suggester: FuzzyMatching, SortExpression.

    ", + "refs": { + "DefineSuggesterRequest$Suggester": null, + "SuggesterStatus$Options": null + } + }, + "SuggesterFuzzyMatching": { + "base": null, + "refs": { + "DocumentSuggesterOptions$FuzzyMatching": "

    The level of fuzziness allowed when suggesting matches for a string: none, low, or high. With none, the specified string is treated as an exact prefix. With low, suggestions must differ from the specified string by no more than one character. With high, suggestions can differ by up to two characters. The default is none.

    " + } + }, + "SuggesterStatus": { + "base": "

    The value of a Suggester and its current status.

    ", + "refs": { + "DefineSuggesterResponse$Suggester": null, + "DeleteSuggesterResponse$Suggester": "

    The status of the suggester being deleted.

    ", + "SuggesterStatusList$member": null + } + }, + "SuggesterStatusList": { + "base": "

    Contains the status of multiple suggesters.

    ", + "refs": { + "DescribeSuggestersResponse$Suggesters": "

    The suggesters configured for the domain specified in the request.

    " + } + }, + "TextArrayOptions": { + "base": "

    Options for a field that contains an array of text strings. Present if IndexFieldType specifies the field is of type text-array. A text-array field is always searchable. All options are enabled by default.

    ", + "refs": { + "IndexField$TextArrayOptions": null + } + }, + "TextOptions": { + "base": "

    Options for text field. Present if IndexFieldType specifies the field is of type text. A text field is always searchable. All options are enabled by default.

    ", + "refs": { + "IndexField$TextOptions": null + } + }, + "UIntValue": { + "base": null, + "refs": { + "OptionStatus$UpdateVersion": "

    A unique integer that indicates when this option was last updated.

    ", + "ScalingParameters$DesiredReplicationCount": "

    The number of replicas you want to preconfigure for each index partition.

    ", + "ScalingParameters$DesiredPartitionCount": "

    The number of partitions you want to preconfigure for your domain. Only valid when you select m2.2xlarge as the desired instance type.

    " + } + }, + "UpdateAvailabilityOptionsRequest": { + "base": "

    Container for the parameters to the UpdateAvailabilityOptions operation. Specifies the name of the domain you want to update and the Multi-AZ availability option.

    ", + "refs": { + } + }, + "UpdateAvailabilityOptionsResponse": { + "base": "

    The result of a UpdateAvailabilityOptions request. Contains the status of the domain's availability options.

    ", + "refs": { + } + }, + "UpdateScalingParametersRequest": { + "base": "

    Container for the parameters to the UpdateScalingParameters operation. Specifies the name of the domain you want to update and the scaling parameters you want to configure.

    ", + "refs": { + } + }, + "UpdateScalingParametersResponse": { + "base": "

    The result of a UpdateScalingParameters request. Contains the status of the newly-configured scaling parameters.

    ", + "refs": { + } + }, + "UpdateServiceAccessPoliciesRequest": { + "base": "

    Container for the parameters to the UpdateServiceAccessPolicies operation. Specifies the name of the domain you want to update and the access rules you want to configure.

    ", + "refs": { + } + }, + "UpdateServiceAccessPoliciesResponse": { + "base": "

    The result of an UpdateServiceAccessPolicies request. Contains the new access policies.

    ", + "refs": { + } + }, + "UpdateTimestamp": { + "base": null, + "refs": { + "OptionStatus$CreationDate": "

    A timestamp for when this option was created.

    ", + "OptionStatus$UpdateDate": "

    A timestamp for when this option was last updated.

    " + } + }, + "Word": { + "base": null, + "refs": { + "TextArrayOptions$AnalysisScheme": "

    The name of an analysis scheme for a text-array field.

    ", + "TextOptions$AnalysisScheme": "

    The name of an analysis scheme for a text field.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudsearch/2013-01-01/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudsearch/2013-01-01/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudsearch/2013-01-01/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudsearch/2013-01-01/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,20 @@ +{ + "pagination": { + "DescribeAnalysisSchemes": { + "result_key": "AnalysisSchemes" + }, + "DescribeDomains": { + "result_key": "DomainStatusList" + }, + "DescribeExpressions": { + "result_key": "Expressions" + }, + "DescribeIndexFields": { + "result_key": "IndexFields" + }, + "DescribeSuggesters": { + "result_key": "Suggesters" + } + } +} + diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudsearchdomain/2013-01-01/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudsearchdomain/2013-01-01/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudsearchdomain/2013-01-01/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudsearchdomain/2013-01-01/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,356 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2013-01-01", + "endpointPrefix":"cloudsearchdomain", + "jsonVersion":"1.1", + "serviceFullName":"Amazon CloudSearch Domain", + "signatureVersion":"v4", + "signingName":"cloudsearch", + "protocol":"rest-json" + }, + "operations":{ + "Search":{ + "name":"Search", + "http":{ + "method":"GET", + "requestUri":"/2013-01-01/search?format=sdk&pretty=true" + }, + "input":{"shape":"SearchRequest"}, + "output":{"shape":"SearchResponse"}, + "errors":[ + { + "shape":"SearchException", + "exception":true + } + ] + }, + "Suggest":{ + "name":"Suggest", + "http":{ + "method":"GET", + "requestUri":"/2013-01-01/suggest?format=sdk&pretty=true" + }, + "input":{"shape":"SuggestRequest"}, + "output":{"shape":"SuggestResponse"}, + "errors":[ + { + "shape":"SearchException", + "exception":true + } + ] + }, + "UploadDocuments":{ + "name":"UploadDocuments", + "http":{ + "method":"POST", + "requestUri":"/2013-01-01/documents/batch?format=sdk" + }, + "input":{"shape":"UploadDocumentsRequest"}, + "output":{"shape":"UploadDocumentsResponse"}, + "errors":[ + { + "shape":"DocumentServiceException", + "exception":true + } + ] + } + }, + "shapes":{ + "Adds":{"type":"long"}, + "Blob":{ + "type":"blob", + "streaming":true + }, + "Bucket":{ + "type":"structure", + "members":{ + "value":{"shape":"String"}, + "count":{"shape":"Long"} + } + }, + "BucketInfo":{ + "type":"structure", + "members":{ + "buckets":{"shape":"BucketList"} + } + }, + "BucketList":{ + "type":"list", + "member":{"shape":"Bucket"} + }, + "ContentType":{ + "type":"string", + "enum":[ + "application/json", + "application/xml" + ] + }, + "Cursor":{"type":"string"}, + "Deletes":{"type":"long"}, + "DocumentServiceException":{ + "type":"structure", + "members":{ + "status":{"shape":"String"}, + "message":{"shape":"String"} + }, + "exception":true + }, + "DocumentServiceWarning":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + } + }, + "DocumentServiceWarnings":{ + "type":"list", + "member":{"shape":"DocumentServiceWarning"} + }, + "Expr":{"type":"string"}, + "Exprs":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "Facet":{"type":"string"}, + "Facets":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"BucketInfo"} + }, + "FieldValue":{ + "type":"list", + "member":{"shape":"String"} + }, + "Fields":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"FieldValue"} + }, + "FilterQuery":{"type":"string"}, + "Highlight":{"type":"string"}, + "Highlights":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "Hit":{ + "type":"structure", + "members":{ + "id":{"shape":"String"}, + "fields":{"shape":"Fields"}, + "exprs":{"shape":"Exprs"}, + "highlights":{"shape":"Highlights"} + } + }, + "HitList":{ + "type":"list", + "member":{"shape":"Hit"} + }, + "Hits":{ + "type":"structure", + "members":{ + "found":{"shape":"Long"}, + "start":{"shape":"Long"}, + "cursor":{"shape":"String"}, + "hit":{"shape":"HitList"} + } + }, + "Long":{"type":"long"}, + "Partial":{"type":"boolean"}, + "Query":{"type":"string"}, + "QueryOptions":{"type":"string"}, + "QueryParser":{ + "type":"string", + "enum":[ + "simple", + "structured", + "lucene", + "dismax" + ] + }, + "Return":{"type":"string"}, + "SearchException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "SearchRequest":{ + "type":"structure", + "required":["query"], + "members":{ + "cursor":{ + "shape":"Cursor", + "location":"querystring", + "locationName":"cursor" + }, + "expr":{ + "shape":"Expr", + "location":"querystring", + "locationName":"expr" + }, + "facet":{ + "shape":"Facet", + "location":"querystring", + "locationName":"facet" + }, + "filterQuery":{ + "shape":"FilterQuery", + "location":"querystring", + "locationName":"fq" + }, + "highlight":{ + "shape":"Highlight", + "location":"querystring", + "locationName":"highlight" + }, + "partial":{ + "shape":"Partial", + "location":"querystring", + "locationName":"partial" + }, + "query":{ + "shape":"Query", + "location":"querystring", + "locationName":"q" + }, + "queryOptions":{ + "shape":"QueryOptions", + "location":"querystring", + "locationName":"q.options" + }, + "queryParser":{ + "shape":"QueryParser", + "location":"querystring", + "locationName":"q.parser" + }, + "return":{ + "shape":"Return", + "location":"querystring", + "locationName":"return" + }, + "size":{ + "shape":"Size", + "location":"querystring", + "locationName":"size" + }, + "sort":{ + "shape":"Sort", + "location":"querystring", + "locationName":"sort" + }, + "start":{ + "shape":"Start", + "location":"querystring", + "locationName":"start" + } + } + }, + "SearchResponse":{ + "type":"structure", + "members":{ + "status":{"shape":"SearchStatus"}, + "hits":{"shape":"Hits"}, + "facets":{"shape":"Facets"} + } + }, + "SearchStatus":{ + "type":"structure", + "members":{ + "timems":{"shape":"Long"}, + "rid":{"shape":"String"} + } + }, + "Size":{"type":"long"}, + "Sort":{"type":"string"}, + "Start":{"type":"long"}, + "String":{"type":"string"}, + "SuggestModel":{ + "type":"structure", + "members":{ + "query":{"shape":"String"}, + "found":{"shape":"Long"}, + "suggestions":{"shape":"Suggestions"} + } + }, + "SuggestRequest":{ + "type":"structure", + "required":[ + "query", + "suggester" + ], + "members":{ + "query":{ + "shape":"Query", + "location":"querystring", + "locationName":"q" + }, + "suggester":{ + "shape":"Suggester", + "location":"querystring", + "locationName":"suggester" + }, + "size":{ + "shape":"SuggestionsSize", + "location":"querystring", + "locationName":"size" + } + } + }, + "SuggestResponse":{ + "type":"structure", + "members":{ + "status":{"shape":"SuggestStatus"}, + "suggest":{"shape":"SuggestModel"} + } + }, + "SuggestStatus":{ + "type":"structure", + "members":{ + "timems":{"shape":"Long"}, + "rid":{"shape":"String"} + } + }, + "Suggester":{"type":"string"}, + "SuggestionMatch":{ + "type":"structure", + "members":{ + "suggestion":{"shape":"String"}, + "score":{"shape":"Long"}, + "id":{"shape":"String"} + } + }, + "Suggestions":{ + "type":"list", + "member":{"shape":"SuggestionMatch"} + }, + "SuggestionsSize":{"type":"long"}, + "UploadDocumentsRequest":{ + "type":"structure", + "required":[ + "documents", + "contentType" + ], + "members":{ + "documents":{"shape":"Blob"}, + "contentType":{ + "shape":"ContentType", + "location":"header", + "locationName":"Content-Type" + } + }, + "payload":"documents" + }, + "UploadDocumentsResponse":{ + "type":"structure", + "members":{ + "status":{"shape":"String"}, + "adds":{"shape":"Adds"}, + "deletes":{"shape":"Deletes"}, + "warnings":{"shape":"DocumentServiceWarnings"} + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudsearchdomain/2013-01-01/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudsearchdomain/2013-01-01/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudsearchdomain/2013-01-01/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudsearchdomain/2013-01-01/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,311 @@ +{ + "version": "2.0", + "operations": { + "Search": "

    Retrieves a list of documents that match the specified search criteria. How you specify the search criteria depends on which query parser you use. Amazon CloudSearch supports four query parsers:

    • simple: search all text and text-array fields for the specified string. Search for phrases, individual terms, and prefixes.
    • structured: search specific fields, construct compound queries using Boolean operators, and use advanced features such as term boosting and proximity searching.
    • lucene: specify search criteria using the Apache Lucene query parser syntax.
    • dismax: specify search criteria using the simplified subset of the Apache Lucene query parser syntax defined by the DisMax query parser.

    For more information, see Searching Your Data in the Amazon CloudSearch Developer Guide.

    The endpoint for submitting Search requests is domain-specific. You submit search requests to a domain's search endpoint. To get the search endpoint for your domain, use the Amazon CloudSearch configuration service DescribeDomains action. A domain's endpoints are also displayed on the domain dashboard in the Amazon CloudSearch console.

    ", + "Suggest": "

    Retrieves autocomplete suggestions for a partial query string. You can use suggestions enable you to display likely matches before users finish typing. In Amazon CloudSearch, suggestions are based on the contents of a particular text field. When you request suggestions, Amazon CloudSearch finds all of the documents whose values in the suggester field start with the specified query string. The beginning of the field must match the query string to be considered a match.

    For more information about configuring suggesters and retrieving suggestions, see Getting Suggestions in the Amazon CloudSearch Developer Guide.

    The endpoint for submitting Suggest requests is domain-specific. You submit suggest requests to a domain's search endpoint. To get the search endpoint for your domain, use the Amazon CloudSearch configuration service DescribeDomains action. A domain's endpoints are also displayed on the domain dashboard in the Amazon CloudSearch console.

    ", + "UploadDocuments": "

    Posts a batch of documents to a search domain for indexing. A document batch is a collection of add and delete operations that represent the documents you want to add, update, or delete from your domain. Batches can be described in either JSON or XML. Each item that you want Amazon CloudSearch to return as a search result (such as a product) is represented as a document. Every document has a unique ID and one or more fields that contain the data that you want to search and return in results. Individual documents cannot contain more than 1 MB of data. The entire batch cannot exceed 5 MB. To get the best possible upload performance, group add and delete operations in batches that are close the 5 MB limit. Submitting a large volume of single-document batches can overload a domain's document service.

    The endpoint for submitting UploadDocuments requests is domain-specific. To get the document endpoint for your domain, use the Amazon CloudSearch configuration service DescribeDomains action. A domain's endpoints are also displayed on the domain dashboard in the Amazon CloudSearch console.

    For more information about formatting your data for Amazon CloudSearch, see Preparing Your Data in the Amazon CloudSearch Developer Guide. For more information about uploading data for indexing, see Uploading Data in the Amazon CloudSearch Developer Guide.

    " + }, + "service": "

    You use the AmazonCloudSearch2013 API to upload documents to a search domain and search those documents.

    The endpoints for submitting UploadDocuments, Search, and Suggest requests are domain-specific. To get the endpoints for your domain, use the Amazon CloudSearch configuration service DescribeDomains action. The domain endpoints are also displayed on the domain dashboard in the Amazon CloudSearch console. You submit suggest requests to the search endpoint.

    For more information, see the Amazon CloudSearch Developer Guide.

    ", + "shapes": { + "Adds": { + "base": null, + "refs": { + "UploadDocumentsResponse$adds": "

    The number of documents that were added to the search domain.

    " + } + }, + "Blob": { + "base": null, + "refs": { + "UploadDocumentsRequest$documents": "

    A batch of documents formatted in JSON or HTML.

    " + } + }, + "Bucket": { + "base": "

    A container for facet information.

    ", + "refs": { + "BucketList$member": null + } + }, + "BucketInfo": { + "base": "

    A container for the calculated facet values and counts.

    ", + "refs": { + "Facets$value": null + } + }, + "BucketList": { + "base": null, + "refs": { + "BucketInfo$buckets": "

    A list of the calculated facet values and counts.

    " + } + }, + "ContentType": { + "base": null, + "refs": { + "UploadDocumentsRequest$contentType": "

    The format of the batch you are uploading. Amazon CloudSearch supports two document batch formats:

    • application/json
    • application/xml
    " + } + }, + "Cursor": { + "base": null, + "refs": { + "SearchRequest$cursor": "

    Retrieves a cursor value you can use to page through large result sets. Use the size parameter to control the number of hits to include in each response. You can specify either the cursor or start parameter in a request; they are mutually exclusive. To get the first cursor, set the cursor value to initial. In subsequent requests, specify the cursor value returned in the hits section of the response.

    For more information, see Paginating Results in the Amazon CloudSearch Developer Guide.

    " + } + }, + "Deletes": { + "base": null, + "refs": { + "UploadDocumentsResponse$deletes": "

    The number of documents that were deleted from the search domain.

    " + } + }, + "DocumentServiceException": { + "base": "

    Information about any problems encountered while processing an upload request.

    ", + "refs": { + } + }, + "DocumentServiceWarning": { + "base": "

    A warning returned by the document service when an issue is discovered while processing an upload request.

    ", + "refs": { + "DocumentServiceWarnings$member": null + } + }, + "DocumentServiceWarnings": { + "base": null, + "refs": { + "UploadDocumentsResponse$warnings": "

    Any warnings returned by the document service about the documents being uploaded.

    " + } + }, + "Expr": { + "base": null, + "refs": { + "SearchRequest$expr": "

    Defines one or more numeric expressions that can be used to sort results or specify search or filter criteria. You can also specify expressions as return fields.

    You specify the expressions in JSON using the form {\"EXPRESSIONNAME\":\"EXPRESSION\"}. You can define and use multiple expressions in a search request. For example:

    {\"expression1\":\"_score*rating\", \"expression2\":\"(1/rank)*year\"}

    For information about the variables, operators, and functions you can use in expressions, see Writing Expressions in the Amazon CloudSearch Developer Guide.

    " + } + }, + "Exprs": { + "base": null, + "refs": { + "Hit$exprs": "

    The expressions returned from a document that matches the search request.

    " + } + }, + "Facet": { + "base": null, + "refs": { + "SearchRequest$facet": "

    Specifies one or more fields for which to get facet information, and options that control how the facet information is returned. Each specified field must be facet-enabled in the domain configuration. The fields and options are specified in JSON using the form {\"FIELD\":{\"OPTION\":VALUE,\"OPTION:\"STRING\"},\"FIELD\":{\"OPTION\":VALUE,\"OPTION\":\"STRING\"}}.

    You can specify the following faceting options:

    • buckets specifies an array of the facet values or ranges to count. Ranges are specified using the same syntax that you use to search for a range of values. For more information, see Searching for a Range of Values in the Amazon CloudSearch Developer Guide. Buckets are returned in the order they are specified in the request. The sort and size options are not valid if you specify buckets.

    • size specifies the maximum number of facets to include in the results. By default, Amazon CloudSearch returns counts for the top 10. The size parameter is only valid when you specify the sort option; it cannot be used in conjunction with buckets.

    • sort specifies how you want to sort the facets in the results: bucket or count. Specify bucket to sort alphabetically or numerically by facet value (in ascending order). Specify count to sort by the facet counts computed for each facet value (in descending order). To retrieve facet counts for particular values or ranges of values, use the buckets option instead of sort.

    If no facet options are specified, facet counts are computed for all field values, the facets are sorted by facet count, and the top 10 facets are returned in the results.

    To count particular buckets of values, use the buckets option. For example, the following request uses the buckets option to calculate and return facet counts by decade.

    {\"year\":{\"buckets\":[\"[1970,1979]\",\"[1980,1989]\",\"[1990,1999]\",\"[2000,2009]\",\"[2010,}\"]}}

    To sort facets by facet count, use the count option. For example, the following request sets the sort option to count to sort the facet values by facet count, with the facet values that have the most matching documents listed first. Setting the size option to 3 returns only the top three facet values.

    {\"year\":{\"sort\":\"count\",\"size\":3}}

    To sort the facets by value, use the bucket option. For example, the following request sets the sort option to bucket to sort the facet values numerically by year, with earliest year listed first.

    {\"year\":{\"sort\":\"bucket\"}}

    For more information, see Getting and Using Facet Information in the Amazon CloudSearch Developer Guide.

    " + } + }, + "Facets": { + "base": null, + "refs": { + "SearchResponse$facets": "

    The requested facet information.

    " + } + }, + "FieldValue": { + "base": null, + "refs": { + "Fields$value": null + } + }, + "Fields": { + "base": null, + "refs": { + "Hit$fields": "

    The fields returned from a document that matches the search request.

    " + } + }, + "FilterQuery": { + "base": null, + "refs": { + "SearchRequest$filterQuery": "

    Specifies a structured query that filters the results of a search without affecting how the results are scored and sorted. You use filterQuery in conjunction with the query parameter to filter the documents that match the constraints specified in the query parameter. Specifying a filter controls only which matching documents are included in the results, it has no effect on how they are scored and sorted. The filterQuery parameter supports the full structured query syntax.

    For more information about using filters, see Filtering Matching Documents in the Amazon CloudSearch Developer Guide.

    " + } + }, + "Highlight": { + "base": null, + "refs": { + "SearchRequest$highlight": "

    Retrieves highlights for matches in the specified text or text-array fields. Each specified field must be highlight enabled in the domain configuration. The fields and options are specified in JSON using the form {\"FIELD\":{\"OPTION\":VALUE,\"OPTION:\"STRING\"},\"FIELD\":{\"OPTION\":VALUE,\"OPTION\":\"STRING\"}}.

    You can specify the following highlight options:

    • format: specifies the format of the data in the text field: text or html. When data is returned as HTML, all non-alphanumeric characters are encoded. The default is html.
    • max_phrases: specifies the maximum number of occurrences of the search term(s) you want to highlight. By default, the first occurrence is highlighted.
    • pre_tag: specifies the string to prepend to an occurrence of a search term. The default for HTML highlights is &lt;em&gt;. The default for text highlights is *.
    • post_tag: specifies the string to append to an occurrence of a search term. The default for HTML highlights is &lt;/em&gt;. The default for text highlights is *.

    If no highlight options are specified for a field, the returned field text is treated as HTML and the first match is highlighted with emphasis tags: &lt;em>search-term&lt;/em&gt;.

    For example, the following request retrieves highlights for the actors and title fields.

    { \"actors\": {}, \"title\": {\"format\": \"text\",\"max_phrases\": 2,\"pre_tag\": \"\",\"post_tag\": \"\"} }

    " + } + }, + "Highlights": { + "base": null, + "refs": { + "Hit$highlights": "

    The highlights returned from a document that matches the search request.

    " + } + }, + "Hit": { + "base": "

    Information about a document that matches the search request.

    ", + "refs": { + "HitList$member": null + } + }, + "HitList": { + "base": null, + "refs": { + "Hits$hit": "

    A document that matches the search request.

    " + } + }, + "Hits": { + "base": "

    The collection of documents that match the search request.

    ", + "refs": { + "SearchResponse$hits": "

    The documents that match the search criteria.

    " + } + }, + "Long": { + "base": null, + "refs": { + "Bucket$count": "

    The number of hits that contain the facet value in the specified facet field.

    ", + "Hits$found": "

    The total number of documents that match the search request.

    ", + "Hits$start": "

    The index of the first matching document.

    ", + "SearchStatus$timems": "

    How long it took to process the request, in milliseconds.

    ", + "SuggestModel$found": "

    The number of documents that were found to match the query string.

    ", + "SuggestStatus$timems": "

    How long it took to process the request, in milliseconds.

    ", + "SuggestionMatch$score": "

    The relevance score of a suggested match.

    " + } + }, + "Partial": { + "base": null, + "refs": { + "SearchRequest$partial": "

    Enables partial results to be returned if one or more index partitions are unavailable. When your search index is partitioned across multiple search instances, by default Amazon CloudSearch only returns results if every partition can be queried. This means that the failure of a single search instance can result in 5xx (internal server) errors. When you enable partial results, Amazon CloudSearch returns whatever results are available and includes the percentage of documents searched in the search results (percent-searched). This enables you to more gracefully degrade your users' search experience. For example, rather than displaying no results, you could display the partial results and a message indicating that the results might be incomplete due to a temporary system outage.

    " + } + }, + "Query": { + "base": null, + "refs": { + "SearchRequest$query": "

    Specifies the search criteria for the request. How you specify the search criteria depends on the query parser used for the request and the parser options specified in the queryOptions parameter. By default, the simple query parser is used to process requests. To use the structured, lucene, or dismax query parser, you must also specify the queryParser parameter.

    For more information about specifying search criteria, see Searching Your Data in the Amazon CloudSearch Developer Guide.

    ", + "SuggestRequest$query": "

    Specifies the string for which you want to get suggestions.

    " + } + }, + "QueryOptions": { + "base": null, + "refs": { + "SearchRequest$queryOptions": "

    Configures options for the query parser specified in the queryParser parameter. You specify the options in JSON using the following form {\"OPTION1\":\"VALUE1\",\"OPTION2\":VALUE2\"...\"OPTIONN\":\"VALUEN\"}.

    The options you can configure vary according to which parser you use:

    • defaultOperator: The default operator used to combine individual terms in the search string. For example: defaultOperator: 'or'. For the dismax parser, you specify a percentage that represents the percentage of terms in the search string (rounded down) that must match, rather than a default operator. A value of 0% is the equivalent to OR, and a value of 100% is equivalent to AND. The percentage must be specified as a value in the range 0-100 followed by the percent (%) symbol. For example, defaultOperator: 50%. Valid values: and, or, a percentage in the range 0%-100% (dismax). Default: and (simple, structured, lucene) or 100 (dismax). Valid for: simple, structured, lucene, and dismax.
    • fields: An array of the fields to search when no fields are specified in a search. If no fields are specified in a search and this option is not specified, all text and text-array fields are searched. You can specify a weight for each field to control the relative importance of each field when Amazon CloudSearch calculates relevance scores. To specify a field weight, append a caret (^) symbol and the weight to the field name. For example, to boost the importance of the title field over the description field you could specify: \"fields\":[\"title^5\",\"description\"]. Valid values: The name of any configured field and an optional numeric value greater than zero. Default: All text and text-array fields. Valid for: simple, structured, lucene, and dismax.
    • operators: An array of the operators or special characters you want to disable for the simple query parser. If you disable the and, or, or not operators, the corresponding operators (+, |, -) have no special meaning and are dropped from the search string. Similarly, disabling prefix disables the wildcard operator (*) and disabling phrase disables the ability to search for phrases by enclosing phrases in double quotes. Disabling precedence disables the ability to control order of precedence using parentheses. Disabling near disables the ability to use the ~ operator to perform a sloppy phrase search. Disabling the fuzzy operator disables the ability to use the ~ operator to perform a fuzzy search. escape disables the ability to use a backslash (\\) to escape special characters within the search string. Disabling whitespace is an advanced option that prevents the parser from tokenizing on whitespace, which can be useful for Vietnamese. (It prevents Vietnamese words from being split incorrectly.) For example, you could disable all operators other than the phrase operator to support just simple term and phrase queries: \"operators\":[\"and\",\"not\",\"or\", \"prefix\"]. Valid values: and, escape, fuzzy, near, not, or, phrase, precedence, prefix, whitespace. Default: All operators and special characters are enabled. Valid for: simple.
    • phraseFields: An array of the text or text-array fields you want to use for phrase searches. When the terms in the search string appear in close proximity within a field, the field scores higher. You can specify a weight for each field to boost that score. The phraseSlop option controls how much the matches can deviate from the search string and still be boosted. To specify a field weight, append a caret (^) symbol and the weight to the field name. For example, to boost phrase matches in the title field over the abstract field, you could specify: \"phraseFields\":[\"title^3\", \"plot\"] Valid values: The name of any text or text-array field and an optional numeric value greater than zero. Default: No fields. If you don't specify any fields with phraseFields, proximity scoring is disabled even if phraseSlop is specified. Valid for: dismax.
    • phraseSlop: An integer value that specifies how much matches can deviate from the search phrase and still be boosted according to the weights specified in the phraseFields option; for example, phraseSlop: 2. You must also specify phraseFields to enable proximity scoring. Valid values: positive integers. Default: 0. Valid for: dismax.
    • explicitPhraseSlop: An integer value that specifies how much a match can deviate from the search phrase when the phrase is enclosed in double quotes in the search string. (Phrases that exceed this proximity distance are not considered a match.) For example, to specify a slop of three for dismax phrase queries, you would specify \"explicitPhraseSlop\":3. Valid values: positive integers. Default: 0. Valid for: dismax.
    • tieBreaker: When a term in the search string is found in a document's field, a score is calculated for that field based on how common the word is in that field compared to other documents. If the term occurs in multiple fields within a document, by default only the highest scoring field contributes to the document's overall score. You can specify a tieBreaker value to enable the matches in lower-scoring fields to contribute to the document's score. That way, if two documents have the same max field score for a particular term, the score for the document that has matches in more fields will be higher. The formula for calculating the score with a tieBreaker is (max field score) + (tieBreaker) * (sum of the scores for the rest of the matching fields). Set tieBreaker to 0 to disregard all but the highest scoring field (pure max): \"tieBreaker\":0. Set to 1 to sum the scores from all fields (pure sum): \"tieBreaker\":1. Valid values: 0.0 to 1.0. Default: 0.0. Valid for: dismax.
    " + } + }, + "QueryParser": { + "base": null, + "refs": { + "SearchRequest$queryParser": "

    Specifies which query parser to use to process the request. If queryParser is not specified, Amazon CloudSearch uses the simple query parser.

    Amazon CloudSearch supports four query parsers:

    • simple: perform simple searches of text and text-array fields. By default, the simple query parser searches all text and text-array fields. You can specify which fields to search by with the queryOptions parameter. If you prefix a search term with a plus sign (+) documents must contain the term to be considered a match. (This is the default, unless you configure the default operator with the queryOptions parameter.) You can use the - (NOT), | (OR), and * (wildcard) operators to exclude particular terms, find results that match any of the specified terms, or search for a prefix. To search for a phrase rather than individual terms, enclose the phrase in double quotes. For more information, see Searching for Text in the Amazon CloudSearch Developer Guide.
    • structured: perform advanced searches by combining multiple expressions to define the search criteria. You can also search within particular fields, search for values and ranges of values, and use advanced options such as term boosting, matchall, and near. For more information, see Constructing Compound Queries in the Amazon CloudSearch Developer Guide.
    • lucene: search using the Apache Lucene query parser syntax. For more information, see Apache Lucene Query Parser Syntax.
    • dismax: search using the simplified subset of the Apache Lucene query parser syntax defined by the DisMax query parser. For more information, see DisMax Query Parser Syntax.
    " + } + }, + "Return": { + "base": null, + "refs": { + "SearchRequest$return": "

    Specifies the field and expression values to include in the response. Multiple fields or expressions are specified as a comma-separated list. By default, a search response includes all return enabled fields (_all_fields). To return only the document IDs for the matching documents, specify _no_fields. To retrieve the relevance score calculated for each document, specify _score.

    " + } + }, + "SearchException": { + "base": "

    Information about any problems encountered while processing a search request.

    ", + "refs": { + } + }, + "SearchRequest": { + "base": "

    Container for the parameters to the Search request.

    ", + "refs": { + } + }, + "SearchResponse": { + "base": "

    The result of a Search request. Contains the documents that match the specified search criteria and any requested fields, highlights, and facet information.

    ", + "refs": { + } + }, + "SearchStatus": { + "base": "

    Contains the resource id (rid) and the time it took to process the request (timems).

    ", + "refs": { + "SearchResponse$status": "

    The status information returned for the search request.

    " + } + }, + "Size": { + "base": null, + "refs": { + "SearchRequest$size": "

    Specifies the maximum number of search hits to include in the response.

    " + } + }, + "Sort": { + "base": null, + "refs": { + "SearchRequest$sort": "

    Specifies the fields or custom expressions to use to sort the search results. Multiple fields or expressions are specified as a comma-separated list. You must specify the sort direction (asc or desc) for each field; for example, year desc,title asc. To use a field to sort results, the field must be sort-enabled in the domain configuration. Array type fields cannot be used for sorting. If no sort parameter is specified, results are sorted by their default relevance scores in descending order: _score desc. You can also sort by document ID (_id asc) and version (_version desc).

    For more information, see Sorting Results in the Amazon CloudSearch Developer Guide.

    " + } + }, + "Start": { + "base": null, + "refs": { + "SearchRequest$start": "

    Specifies the offset of the first search hit you want to return. Note that the result set is zero-based; the first result is at index 0. You can specify either the start or cursor parameter in a request, they are mutually exclusive.

    For more information, see Paginating Results in the Amazon CloudSearch Developer Guide.

    " + } + }, + "String": { + "base": null, + "refs": { + "Bucket$value": "

    The facet value being counted.

    ", + "DocumentServiceException$status": "

    The return status of a document upload request, error or success.

    ", + "DocumentServiceException$message": "

    The description of the errors returned by the document service.

    ", + "DocumentServiceWarning$message": "

    The description for a warning returned by the document service.

    ", + "Exprs$key": null, + "Exprs$value": null, + "Facets$key": null, + "FieldValue$member": null, + "Fields$key": null, + "Highlights$key": null, + "Highlights$value": null, + "Hit$id": "

    The document ID of a document that matches the search request.

    ", + "Hits$cursor": "

    A cursor that can be used to retrieve the next set of matching documents when you want to page through a large result set.

    ", + "SearchException$message": "

    A description of the error returned by the search service.

    ", + "SearchStatus$rid": "

    The encrypted resource ID for the request.

    ", + "SuggestModel$query": "

    The query string specified in the suggest request.

    ", + "SuggestStatus$rid": "

    The encrypted resource ID for the request.

    ", + "SuggestionMatch$suggestion": "

    The string that matches the query string specified in the SuggestRequest.

    ", + "SuggestionMatch$id": "

    The document ID of the suggested document.

    ", + "UploadDocumentsResponse$status": "

    The status of an UploadDocumentsRequest.

    " + } + }, + "SuggestModel": { + "base": "

    Container for the suggestion information returned in a SuggestResponse.

    ", + "refs": { + "SuggestResponse$suggest": "

    Container for the matching search suggestion information.

    " + } + }, + "SuggestRequest": { + "base": "

    Container for the parameters to the Suggest request.

    ", + "refs": { + } + }, + "SuggestResponse": { + "base": "

    Contains the response to a Suggest request.

    ", + "refs": { + } + }, + "SuggestStatus": { + "base": "

    Contains the resource id (rid) and the time it took to process the request (timems).

    ", + "refs": { + "SuggestResponse$status": "

    The status of a SuggestRequest. Contains the resource ID (rid) and how long it took to process the request (timems).

    " + } + }, + "Suggester": { + "base": null, + "refs": { + "SuggestRequest$suggester": "

    Specifies the name of the suggester to use to find suggested matches.

    " + } + }, + "SuggestionMatch": { + "base": "

    An autocomplete suggestion that matches the query string specified in a SuggestRequest.

    ", + "refs": { + "Suggestions$member": null + } + }, + "Suggestions": { + "base": null, + "refs": { + "SuggestModel$suggestions": "

    The documents that match the query string.

    " + } + }, + "SuggestionsSize": { + "base": null, + "refs": { + "SuggestRequest$size": "

    Specifies the maximum number of suggestions to return.

    " + } + }, + "UploadDocumentsRequest": { + "base": "

    Container for the parameters to the UploadDocuments request.

    ", + "refs": { + } + }, + "UploadDocumentsResponse": { + "base": "

    Contains the response to an UploadDocuments request.

    ", + "refs": { + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,768 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2013-11-01", + "endpointPrefix":"cloudtrail", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"CloudTrail", + "serviceFullName":"AWS CloudTrail", + "signatureVersion":"v4", + "targetPrefix":"com.amazonaws.cloudtrail.v20131101.CloudTrail_20131101" + }, + "operations":{ + "AddTags":{ + "name":"AddTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsRequest"}, + "output":{"shape":"AddTagsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"CloudTrailARNInvalidException"}, + {"shape":"ResourceTypeNotSupportedException"}, + {"shape":"TagsLimitExceededException"}, + {"shape":"InvalidTrailNameException"}, + {"shape":"InvalidTagParameterException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"OperationNotPermittedException"} + ] + }, + "CreateTrail":{ + "name":"CreateTrail", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTrailRequest"}, + "output":{"shape":"CreateTrailResponse"}, + "errors":[ + {"shape":"MaximumNumberOfTrailsExceededException"}, + {"shape":"TrailAlreadyExistsException"}, + {"shape":"S3BucketDoesNotExistException"}, + {"shape":"InsufficientS3BucketPolicyException"}, + {"shape":"InsufficientSnsTopicPolicyException"}, + {"shape":"InsufficientEncryptionPolicyException"}, + {"shape":"InvalidS3BucketNameException"}, + {"shape":"InvalidS3PrefixException"}, + {"shape":"InvalidSnsTopicNameException"}, + {"shape":"InvalidKmsKeyIdException"}, + {"shape":"InvalidTrailNameException"}, + {"shape":"TrailNotProvidedException"}, + {"shape":"InvalidParameterCombinationException"}, + {"shape":"KmsKeyNotFoundException"}, + {"shape":"KmsKeyDisabledException"}, + {"shape":"InvalidCloudWatchLogsLogGroupArnException"}, + {"shape":"InvalidCloudWatchLogsRoleArnException"}, + {"shape":"CloudWatchLogsDeliveryUnavailableException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"OperationNotPermittedException"} + ] + }, + "DeleteTrail":{ + "name":"DeleteTrail", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTrailRequest"}, + "output":{"shape":"DeleteTrailResponse"}, + "errors":[ + {"shape":"TrailNotFoundException"}, + {"shape":"InvalidTrailNameException"}, + {"shape":"InvalidHomeRegionException"} + ] + }, + "DescribeTrails":{ + "name":"DescribeTrails", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTrailsRequest"}, + "output":{"shape":"DescribeTrailsResponse"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"OperationNotPermittedException"} + ] + }, + "GetTrailStatus":{ + "name":"GetTrailStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetTrailStatusRequest"}, + "output":{"shape":"GetTrailStatusResponse"}, + "errors":[ + {"shape":"TrailNotFoundException"}, + {"shape":"InvalidTrailNameException"} + ] + }, + "ListPublicKeys":{ + "name":"ListPublicKeys", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPublicKeysRequest"}, + "output":{"shape":"ListPublicKeysResponse"}, + "errors":[ + {"shape":"InvalidTimeRangeException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"OperationNotPermittedException"}, + {"shape":"InvalidTokenException"} + ] + }, + "ListTags":{ + "name":"ListTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsRequest"}, + "output":{"shape":"ListTagsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"CloudTrailARNInvalidException"}, + {"shape":"ResourceTypeNotSupportedException"}, + {"shape":"InvalidTrailNameException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"OperationNotPermittedException"}, + {"shape":"InvalidTokenException"} + ] + }, + "LookupEvents":{ + "name":"LookupEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"LookupEventsRequest"}, + "output":{"shape":"LookupEventsResponse"}, + "errors":[ + {"shape":"InvalidLookupAttributesException"}, + {"shape":"InvalidTimeRangeException"}, + {"shape":"InvalidMaxResultsException"}, + {"shape":"InvalidNextTokenException"} + ] + }, + "RemoveTags":{ + "name":"RemoveTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsRequest"}, + "output":{"shape":"RemoveTagsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"CloudTrailARNInvalidException"}, + {"shape":"ResourceTypeNotSupportedException"}, + {"shape":"InvalidTrailNameException"}, + {"shape":"InvalidTagParameterException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"OperationNotPermittedException"} + ] + }, + "StartLogging":{ + "name":"StartLogging", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartLoggingRequest"}, + "output":{"shape":"StartLoggingResponse"}, + "errors":[ + {"shape":"TrailNotFoundException"}, + {"shape":"InvalidTrailNameException"}, + {"shape":"InvalidHomeRegionException"} + ] + }, + "StopLogging":{ + "name":"StopLogging", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopLoggingRequest"}, + "output":{"shape":"StopLoggingResponse"}, + "errors":[ + {"shape":"TrailNotFoundException"}, + {"shape":"InvalidTrailNameException"}, + {"shape":"InvalidHomeRegionException"} + ] + }, + "UpdateTrail":{ + "name":"UpdateTrail", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateTrailRequest"}, + "output":{"shape":"UpdateTrailResponse"}, + "errors":[ + {"shape":"S3BucketDoesNotExistException"}, + {"shape":"InsufficientS3BucketPolicyException"}, + {"shape":"InsufficientSnsTopicPolicyException"}, + {"shape":"InsufficientEncryptionPolicyException"}, + {"shape":"TrailNotFoundException"}, + {"shape":"InvalidS3BucketNameException"}, + {"shape":"InvalidS3PrefixException"}, + {"shape":"InvalidSnsTopicNameException"}, + {"shape":"InvalidKmsKeyIdException"}, + {"shape":"InvalidTrailNameException"}, + {"shape":"TrailNotProvidedException"}, + {"shape":"InvalidParameterCombinationException"}, + {"shape":"InvalidHomeRegionException"}, + {"shape":"KmsKeyNotFoundException"}, + {"shape":"KmsKeyDisabledException"}, + {"shape":"InvalidCloudWatchLogsLogGroupArnException"}, + {"shape":"InvalidCloudWatchLogsRoleArnException"}, + {"shape":"CloudWatchLogsDeliveryUnavailableException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"OperationNotPermittedException"} + ] + } + }, + "shapes":{ + "AddTagsRequest":{ + "type":"structure", + "required":["ResourceId"], + "members":{ + "ResourceId":{"shape":"String"}, + "TagsList":{"shape":"TagsList"} + } + }, + "AddTagsResponse":{ + "type":"structure", + "members":{ + } + }, + "Boolean":{"type":"boolean"}, + "ByteBuffer":{"type":"blob"}, + "CloudTrailARNInvalidException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "CloudWatchLogsDeliveryUnavailableException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "CreateTrailRequest":{ + "type":"structure", + "required":[ + "Name", + "S3BucketName" + ], + "members":{ + "Name":{"shape":"String"}, + "S3BucketName":{"shape":"String"}, + "S3KeyPrefix":{"shape":"String"}, + "SnsTopicName":{"shape":"String"}, + "IncludeGlobalServiceEvents":{"shape":"Boolean"}, + "IsMultiRegionTrail":{"shape":"Boolean"}, + "EnableLogFileValidation":{"shape":"Boolean"}, + "CloudWatchLogsLogGroupArn":{"shape":"String"}, + "CloudWatchLogsRoleArn":{"shape":"String"}, + "KmsKeyId":{"shape":"String"} + } + }, + "CreateTrailResponse":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "S3BucketName":{"shape":"String"}, + "S3KeyPrefix":{"shape":"String"}, + "SnsTopicName":{"shape":"String"}, + "IncludeGlobalServiceEvents":{"shape":"Boolean"}, + "IsMultiRegionTrail":{"shape":"Boolean"}, + "TrailARN":{"shape":"String"}, + "LogFileValidationEnabled":{"shape":"Boolean"}, + "CloudWatchLogsLogGroupArn":{"shape":"String"}, + "CloudWatchLogsRoleArn":{"shape":"String"}, + "KmsKeyId":{"shape":"String"} + } + }, + "Date":{"type":"timestamp"}, + "DeleteTrailRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"String"} + } + }, + "DeleteTrailResponse":{ + "type":"structure", + "members":{ + } + }, + "DescribeTrailsRequest":{ + "type":"structure", + "members":{ + "trailNameList":{"shape":"TrailNameList"}, + "includeShadowTrails":{"shape":"Boolean"} + } + }, + "DescribeTrailsResponse":{ + "type":"structure", + "members":{ + "trailList":{"shape":"TrailList"} + } + }, + "Event":{ + "type":"structure", + "members":{ + "EventId":{"shape":"String"}, + "EventName":{"shape":"String"}, + "EventTime":{"shape":"Date"}, + "Username":{"shape":"String"}, + "Resources":{"shape":"ResourceList"}, + "CloudTrailEvent":{"shape":"String"} + } + }, + "EventsList":{ + "type":"list", + "member":{"shape":"Event"} + }, + "GetTrailStatusRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"String"} + } + }, + "GetTrailStatusResponse":{ + "type":"structure", + "members":{ + "IsLogging":{"shape":"Boolean"}, + "LatestDeliveryError":{"shape":"String"}, + "LatestNotificationError":{"shape":"String"}, + "LatestDeliveryTime":{"shape":"Date"}, + "LatestNotificationTime":{"shape":"Date"}, + "StartLoggingTime":{"shape":"Date"}, + "StopLoggingTime":{"shape":"Date"}, + "LatestCloudWatchLogsDeliveryError":{"shape":"String"}, + "LatestCloudWatchLogsDeliveryTime":{"shape":"Date"}, + "LatestDigestDeliveryTime":{"shape":"Date"}, + "LatestDigestDeliveryError":{"shape":"String"}, + "LatestDeliveryAttemptTime":{"shape":"String"}, + "LatestNotificationAttemptTime":{"shape":"String"}, + "LatestNotificationAttemptSucceeded":{"shape":"String"}, + "LatestDeliveryAttemptSucceeded":{"shape":"String"}, + "TimeLoggingStarted":{"shape":"String"}, + "TimeLoggingStopped":{"shape":"String"} + } + }, + "InsufficientEncryptionPolicyException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InsufficientS3BucketPolicyException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InsufficientSnsTopicPolicyException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidCloudWatchLogsLogGroupArnException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidCloudWatchLogsRoleArnException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidHomeRegionException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidKmsKeyIdException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidLookupAttributesException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidMaxResultsException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidNextTokenException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidParameterCombinationException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidS3BucketNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidS3PrefixException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidSnsTopicNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidTagParameterException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidTimeRangeException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidTokenException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidTrailNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "KmsKeyDisabledException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "KmsKeyNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ListPublicKeysRequest":{ + "type":"structure", + "members":{ + "StartTime":{"shape":"Date"}, + "EndTime":{"shape":"Date"}, + "NextToken":{"shape":"String"} + } + }, + "ListPublicKeysResponse":{ + "type":"structure", + "members":{ + "PublicKeyList":{"shape":"PublicKeyList"}, + "NextToken":{"shape":"String"} + } + }, + "ListTagsRequest":{ + "type":"structure", + "required":["ResourceIdList"], + "members":{ + "ResourceIdList":{"shape":"ResourceIdList"}, + "NextToken":{"shape":"String"} + } + }, + "ListTagsResponse":{ + "type":"structure", + "members":{ + "ResourceTagList":{"shape":"ResourceTagList"}, + "NextToken":{"shape":"String"} + } + }, + "LookupAttribute":{ + "type":"structure", + "required":[ + "AttributeKey", + "AttributeValue" + ], + "members":{ + "AttributeKey":{"shape":"LookupAttributeKey"}, + "AttributeValue":{"shape":"String"} + } + }, + "LookupAttributeKey":{ + "type":"string", + "enum":[ + "EventId", + "EventName", + "Username", + "ResourceType", + "ResourceName" + ] + }, + "LookupAttributesList":{ + "type":"list", + "member":{"shape":"LookupAttribute"} + }, + "LookupEventsRequest":{ + "type":"structure", + "members":{ + "LookupAttributes":{"shape":"LookupAttributesList"}, + "StartTime":{"shape":"Date"}, + "EndTime":{"shape":"Date"}, + "MaxResults":{"shape":"MaxResults"}, + "NextToken":{"shape":"NextToken"} + } + }, + "LookupEventsResponse":{ + "type":"structure", + "members":{ + "Events":{"shape":"EventsList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "MaxResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "MaximumNumberOfTrailsExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "NextToken":{"type":"string"}, + "OperationNotPermittedException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "PublicKey":{ + "type":"structure", + "members":{ + "Value":{"shape":"ByteBuffer"}, + "ValidityStartTime":{"shape":"Date"}, + "ValidityEndTime":{"shape":"Date"}, + "Fingerprint":{"shape":"String"} + } + }, + "PublicKeyList":{ + "type":"list", + "member":{"shape":"PublicKey"} + }, + "RemoveTagsRequest":{ + "type":"structure", + "required":["ResourceId"], + "members":{ + "ResourceId":{"shape":"String"}, + "TagsList":{"shape":"TagsList"} + } + }, + "RemoveTagsResponse":{ + "type":"structure", + "members":{ + } + }, + "Resource":{ + "type":"structure", + "members":{ + "ResourceType":{"shape":"String"}, + "ResourceName":{"shape":"String"} + } + }, + "ResourceIdList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ResourceList":{ + "type":"list", + "member":{"shape":"Resource"} + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ResourceTag":{ + "type":"structure", + "members":{ + "ResourceId":{"shape":"String"}, + "TagsList":{"shape":"TagsList"} + } + }, + "ResourceTagList":{ + "type":"list", + "member":{"shape":"ResourceTag"} + }, + "ResourceTypeNotSupportedException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "S3BucketDoesNotExistException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "StartLoggingRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"String"} + } + }, + "StartLoggingResponse":{ + "type":"structure", + "members":{ + } + }, + "StopLoggingRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"String"} + } + }, + "StopLoggingResponse":{ + "type":"structure", + "members":{ + } + }, + "String":{"type":"string"}, + "Tag":{ + "type":"structure", + "required":["Key"], + "members":{ + "Key":{"shape":"String"}, + "Value":{"shape":"String"} + } + }, + "TagsLimitExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "TagsList":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "Trail":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "S3BucketName":{"shape":"String"}, + "S3KeyPrefix":{"shape":"String"}, + "SnsTopicName":{"shape":"String"}, + "IncludeGlobalServiceEvents":{"shape":"Boolean"}, + "IsMultiRegionTrail":{"shape":"Boolean"}, + "HomeRegion":{"shape":"String"}, + "TrailARN":{"shape":"String"}, + "LogFileValidationEnabled":{"shape":"Boolean"}, + "CloudWatchLogsLogGroupArn":{"shape":"String"}, + "CloudWatchLogsRoleArn":{"shape":"String"}, + "KmsKeyId":{"shape":"String"} + } + }, + "TrailAlreadyExistsException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "TrailList":{ + "type":"list", + "member":{"shape":"Trail"} + }, + "TrailNameList":{ + "type":"list", + "member":{"shape":"String"} + }, + "TrailNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "TrailNotProvidedException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "UnsupportedOperationException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "UpdateTrailRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"String"}, + "S3BucketName":{"shape":"String"}, + "S3KeyPrefix":{"shape":"String"}, + "SnsTopicName":{"shape":"String"}, + "IncludeGlobalServiceEvents":{"shape":"Boolean"}, + "IsMultiRegionTrail":{"shape":"Boolean"}, + "EnableLogFileValidation":{"shape":"Boolean"}, + "CloudWatchLogsLogGroupArn":{"shape":"String"}, + "CloudWatchLogsRoleArn":{"shape":"String"}, + "KmsKeyId":{"shape":"String"} + } + }, + "UpdateTrailResponse":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "S3BucketName":{"shape":"String"}, + "S3KeyPrefix":{"shape":"String"}, + "SnsTopicName":{"shape":"String"}, + "IncludeGlobalServiceEvents":{"shape":"Boolean"}, + "IsMultiRegionTrail":{"shape":"Boolean"}, + "TrailARN":{"shape":"String"}, + "LogFileValidationEnabled":{"shape":"Boolean"}, + "CloudWatchLogsLogGroupArn":{"shape":"String"}, + "CloudWatchLogsRoleArn":{"shape":"String"}, + "KmsKeyId":{"shape":"String"} + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,540 @@ +{ + "version": "2.0", + "service": "AWS CloudTrail

    This is the CloudTrail API Reference. It provides descriptions of actions, data types, common parameters, and common errors for CloudTrail.

    CloudTrail is a web service that records AWS API calls for your AWS account and delivers log files to an Amazon S3 bucket. The recorded information includes the identity of the user, the start time of the AWS API call, the source IP address, the request parameters, and the response elements returned by the service.

    As an alternative to using the API, you can use one of the AWS SDKs, which consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient way to create programmatic access to AWSCloudTrail. For example, the SDKs take care of cryptographically signing requests, managing errors, and retrying requests automatically. For information about the AWS SDKs, including how to download and install them, see the Tools for Amazon Web Services page.

    See the CloudTrail User Guide for information about the data that is included with each AWS API call listed in the log files.

    ", + "operations": { + "AddTags": "

    Adds one or more tags to a trail, up to a limit of 10. Tags must be unique per trail. Overwrites an existing tag's value when a new value is specified for an existing tag key. If you specify a key without a value, the tag will be created with the specified key and a value of null. You can tag a trail that applies to all regions only from the region in which the trail was created (that is, from its home region).

    ", + "CreateTrail": "

    Creates a trail that specifies the settings for delivery of log data to an Amazon S3 bucket. A maximum of five trails can exist in a region, irrespective of the region in which they were created.

    ", + "DeleteTrail": "

    Deletes a trail. This operation must be called from the region in which the trail was created. DeleteTrail cannot be called on the shadow trails (replicated trails in other regions) of a trail that is enabled in all regions.

    ", + "DescribeTrails": "

    Retrieves settings for the trail associated with the current region for your account.

    ", + "GetTrailStatus": "

    Returns a JSON-formatted list of information about the specified trail. Fields include information on delivery errors, Amazon SNS and Amazon S3 errors, and start and stop logging times for each trail. This operation returns trail status from a single region. To return trail status from all regions, you must call the operation on each region.

    ", + "ListPublicKeys": "

    Returns all public keys whose private keys were used to sign the digest files within the specified time range. The public key is needed to validate digest files that were signed with its corresponding private key.

    CloudTrail uses different private/public key pairs per region. Each digest file is signed with a private key unique to its region. Therefore, when you validate a digest file from a particular region, you must look in the same region for its corresponding public key.", + "ListTags": "

    Lists the tags for the specified trail or trails in the current region.

    Lists the tags for the trail in the current region.

    ", + "LookupEvents": "

    Looks up API activity events captured by CloudTrail that create, update, or delete resources in your account. Events for a region can be looked up for the times in which you had CloudTrail turned on in that region during the last seven days. Lookup supports five different attributes: time range (defined by a start time and end time), user name, event name, resource type, and resource name. All attributes are optional. The maximum number of attributes that can be specified in any one lookup request are time range and one other attribute. The default number of results returned is 10, with a maximum of 50 possible. The response includes a token that you can use to get the next page of results.

    The rate of lookup requests is limited to one per second per account. If this limit is exceeded, a throttling error occurs. Events that occurred during the selected time range will not be available for lookup if CloudTrail logging was not enabled when the events occurred.", + "RemoveTags": "

    Removes the specified tags from a trail.

    ", + "StartLogging": "

    Starts the recording of AWS API calls and log file delivery for a trail. For a trail that is enabled in all regions, this operation must be called from the region in which the trail was created. This operation cannot be called on the shadow trails (replicated trails in other regions) of a trail that is enabled in all regions.

    ", + "StopLogging": "

    Suspends the recording of AWS API calls and log file delivery for the specified trail. Under most circumstances, there is no need to use this action. You can update a trail without stopping it first. This action is the only way to stop recording. For a trail enabled in all regions, this operation must be called from the region in which the trail was created, or an InvalidHomeRegionException will occur. This operation cannot be called on the shadow trails (replicated trails in other regions) of a trail enabled in all regions.

    ", + "UpdateTrail": "

    Updates the settings that specify delivery of log files. Changes to a trail do not require stopping the CloudTrail service. Use this action to designate an existing bucket for log delivery. If the existing bucket has previously been a target for CloudTrail log files, an IAM policy exists for the bucket. UpdateTrail must be called from the region in which the trail was created; otherwise, an InvalidHomeRegionException is thrown.

    " + }, + "shapes": { + "AddTagsRequest": { + "base": "

    Specifies the tags to add to a trail.

    ", + "refs": { + } + }, + "AddTagsResponse": { + "base": "

    Returns the objects or data listed below if successful. Otherwise, returns an error.

    ", + "refs": { + } + }, + "Boolean": { + "base": null, + "refs": { + "CreateTrailRequest$IncludeGlobalServiceEvents": "

    Specifies whether the trail is publishing events from global services such as IAM to the log files.

    ", + "CreateTrailRequest$IsMultiRegionTrail": "

    Specifies whether the trail is created in the current region or in all regions. The default is false.

    ", + "CreateTrailRequest$EnableLogFileValidation": "

    Specifies whether log file integrity validation is enabled. The default is false.

    When you disable log file integrity validation, the chain of digest files is broken after one hour. CloudTrail will not create digest files for log files that were delivered during a period in which log file integrity validation was disabled. For example, if you enable log file integrity validation at noon on January 1, disable it at noon on January 2, and re-enable it at noon on January 10, digest files will not be created for the log files delivered from noon on January 2 to noon on January 10. The same applies whenever you stop CloudTrail logging or delete a trail.", + "CreateTrailResponse$IncludeGlobalServiceEvents": "

    Specifies whether the trail is publishing events from global services such as IAM to the log files.

    ", + "CreateTrailResponse$IsMultiRegionTrail": "

    Specifies whether the trail exists in one region or in all regions.

    ", + "CreateTrailResponse$LogFileValidationEnabled": "

    Specifies whether log file integrity validation is enabled.

    ", + "DescribeTrailsRequest$includeShadowTrails": "

    Specifies whether to include shadow trails in the response. A shadow trail is the replication in a region of a trail that was created in a different region. The default is true.

    ", + "GetTrailStatusResponse$IsLogging": "

    Whether the CloudTrail is currently logging AWS API calls.

    ", + "Trail$IncludeGlobalServiceEvents": "

    Set to True to include AWS API calls from AWS global services such as IAM. Otherwise, False.

    ", + "Trail$IsMultiRegionTrail": "

    Specifies whether the trail belongs only to one region or exists in all regions.

    ", + "Trail$LogFileValidationEnabled": "

    Specifies whether log file validation is enabled.

    ", + "UpdateTrailRequest$IncludeGlobalServiceEvents": "

    Specifies whether the trail is publishing events from global services such as IAM to the log files.

    ", + "UpdateTrailRequest$IsMultiRegionTrail": "

    Specifies whether the trail applies only to the current region or to all regions. The default is false. If the trail exists only in the current region and this value is set to true, shadow trails (replications of the trail) will be created in the other regions. If the trail exists in all regions and this value is set to false, the trail will remain in the region where it was created, and its shadow trails in other regions will be deleted.

    ", + "UpdateTrailRequest$EnableLogFileValidation": "

    Specifies whether log file validation is enabled. The default is false.

    When you disable log file integrity validation, the chain of digest files is broken after one hour. CloudTrail will not create digest files for log files that were delivered during a period in which log file integrity validation was disabled. For example, if you enable log file integrity validation at noon on January 1, disable it at noon on January 2, and re-enable it at noon on January 10, digest files will not be created for the log files delivered from noon on January 2 to noon on January 10. The same applies whenever you stop CloudTrail logging or delete a trail.", + "UpdateTrailResponse$IncludeGlobalServiceEvents": "

    Specifies whether the trail is publishing events from global services such as IAM to the log files.

    ", + "UpdateTrailResponse$IsMultiRegionTrail": "

    Specifies whether the trail exists in one region or in all regions.

    ", + "UpdateTrailResponse$LogFileValidationEnabled": "

    Specifies whether log file integrity validation is enabled.

    " + } + }, + "ByteBuffer": { + "base": null, + "refs": { + "PublicKey$Value": "

    The DER encoded public key value in PKCS#1 format.

    " + } + }, + "CloudTrailARNInvalidException": { + "base": "

    This exception is thrown when an operation is called with an invalid trail ARN. The format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail.

    ", + "refs": { + } + }, + "CloudWatchLogsDeliveryUnavailableException": { + "base": "

    Cannot set a CloudWatch Logs delivery for this region.

    ", + "refs": { + } + }, + "CreateTrailRequest": { + "base": "

    Specifies the settings for each trail.

    ", + "refs": { + } + }, + "CreateTrailResponse": { + "base": "Returns the objects or data listed below if successful. Otherwise, returns an error.", + "refs": { + } + }, + "Date": { + "base": null, + "refs": { + "Event$EventTime": "

    The date and time of the event returned.

    ", + "GetTrailStatusResponse$LatestDeliveryTime": "

    Specifies the date and time that CloudTrail last delivered log files to an account's Amazon S3 bucket.

    ", + "GetTrailStatusResponse$LatestNotificationTime": "

    Specifies the date and time of the most recent Amazon SNS notification that CloudTrail has written a new log file to an account's Amazon S3 bucket.

    ", + "GetTrailStatusResponse$StartLoggingTime": "

    Specifies the most recent date and time when CloudTrail started recording API calls for an AWS account.

    ", + "GetTrailStatusResponse$StopLoggingTime": "

    Specifies the most recent date and time when CloudTrail stopped recording API calls for an AWS account.

    ", + "GetTrailStatusResponse$LatestCloudWatchLogsDeliveryTime": "

    Displays the most recent date and time when CloudTrail delivered logs to CloudWatch Logs.

    ", + "GetTrailStatusResponse$LatestDigestDeliveryTime": "

    Specifies the date and time that CloudTrail last delivered a digest file to an account's Amazon S3 bucket.

    ", + "ListPublicKeysRequest$StartTime": "

    Optionally specifies, in UTC, the start of the time range to look up public keys for CloudTrail digest files. If not specified, the current time is used, and the current public key is returned.

    ", + "ListPublicKeysRequest$EndTime": "

    Optionally specifies, in UTC, the end of the time range to look up public keys for CloudTrail digest files. If not specified, the current time is used.

    ", + "LookupEventsRequest$StartTime": "

    Specifies that only events that occur after or at the specified time are returned. If the specified start time is after the specified end time, an error is returned.

    ", + "LookupEventsRequest$EndTime": "

    Specifies that only events that occur before or at the specified time are returned. If the specified end time is before the specified start time, an error is returned.

    ", + "PublicKey$ValidityStartTime": "

    The starting time of validity of the public key.

    ", + "PublicKey$ValidityEndTime": "

    The ending time of validity of the public key.

    " + } + }, + "DeleteTrailRequest": { + "base": "The request that specifies the name of a trail to delete.", + "refs": { + } + }, + "DeleteTrailResponse": { + "base": "

    Returns the objects or data listed below if successful. Otherwise, returns an error.

    ", + "refs": { + } + }, + "DescribeTrailsRequest": { + "base": "

    Returns information about the trail.

    ", + "refs": { + } + }, + "DescribeTrailsResponse": { + "base": "

    Returns the objects or data listed below if successful. Otherwise, returns an error.

    ", + "refs": { + } + }, + "Event": { + "base": "

    Contains information about an event that was returned by a lookup request. The result includes a representation of a CloudTrail event.

    ", + "refs": { + "EventsList$member": null + } + }, + "EventsList": { + "base": null, + "refs": { + "LookupEventsResponse$Events": "

    A list of events returned based on the lookup attributes specified and the CloudTrail event. The events list is sorted by time. The most recent event is listed first.

    " + } + }, + "GetTrailStatusRequest": { + "base": "

    The name of a trail about which you want the current status.

    ", + "refs": { + } + }, + "GetTrailStatusResponse": { + "base": "

    Returns the objects or data listed below if successful. Otherwise, returns an error.

    ", + "refs": { + } + }, + "InsufficientEncryptionPolicyException": { + "base": "This exception is thrown when the policy on the S3 bucket or KMS key is not sufficient.", + "refs": { + } + }, + "InsufficientS3BucketPolicyException": { + "base": "

    This exception is thrown when the policy on the S3 bucket is not sufficient.

    ", + "refs": { + } + }, + "InsufficientSnsTopicPolicyException": { + "base": "

    This exception is thrown when the policy on the SNS topic is not sufficient.

    ", + "refs": { + } + }, + "InvalidCloudWatchLogsLogGroupArnException": { + "base": "

    This exception is thrown when the provided CloudWatch log group is not valid.

    ", + "refs": { + } + }, + "InvalidCloudWatchLogsRoleArnException": { + "base": "

    This exception is thrown when the provided role is not valid.

    ", + "refs": { + } + }, + "InvalidHomeRegionException": { + "base": "

    This exception is thrown when an operation is called on a trail from a region other than the region in which the trail was created.

    ", + "refs": { + } + }, + "InvalidKmsKeyIdException": { + "base": "This exception is thrown when the KMS key ARN is invalid.", + "refs": { + } + }, + "InvalidLookupAttributesException": { + "base": "

    Occurs when an invalid lookup attribute is specified.

    ", + "refs": { + } + }, + "InvalidMaxResultsException": { + "base": "

    This exception is thrown if the limit specified is invalid.

    ", + "refs": { + } + }, + "InvalidNextTokenException": { + "base": "

    Invalid token or token that was previously used in a request with different parameters. This exception is thrown if the token is invalid.

    ", + "refs": { + } + }, + "InvalidParameterCombinationException": { + "base": "

    This exception is thrown when the combination of parameters provided is not valid.

    ", + "refs": { + } + }, + "InvalidS3BucketNameException": { + "base": "

    This exception is thrown when the provided S3 bucket name is not valid.

    ", + "refs": { + } + }, + "InvalidS3PrefixException": { + "base": "

    This exception is thrown when the provided S3 prefix is not valid.

    ", + "refs": { + } + }, + "InvalidSnsTopicNameException": { + "base": "

    This exception is thrown when the provided SNS topic name is not valid.

    ", + "refs": { + } + }, + "InvalidTagParameterException": { + "base": "

    This exception is thrown when the key or value specified for the tag does not match the regular expression ^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$.

    ", + "refs": { + } + }, + "InvalidTimeRangeException": { + "base": "

    Occurs if the timestamp values are invalid. Either the start time occurs after the end time or the time range is outside the range of possible values.

    ", + "refs": { + } + }, + "InvalidTokenException": { + "base": "

    Reserved for future use.

    ", + "refs": { + } + }, + "InvalidTrailNameException": { + "base": "

    This exception is thrown when the provided trail name is not valid. Trail names must meet the following requirements:

    • Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-)
    • Start with a letter or number, and end with a letter or number
    • Be between 3 and 128 characters
    • Have no adjacent periods, underscores or dashes. Names like my-_namespace and my--namespace are invalid.
    • Not be in IP address format (for example, 192.168.5.4)
    ", + "refs": { + } + }, + "KmsKeyDisabledException": { + "base": "

    This exception is thrown when the KMS key is disabled.

    ", + "refs": { + } + }, + "KmsKeyNotFoundException": { + "base": "This exception is thrown when the KMS key does not exist, or when the S3 bucket and the KMS key are not in the same region.", + "refs": { + } + }, + "ListPublicKeysRequest": { + "base": "

    Requests the public keys for a specified time range.

    ", + "refs": { + } + }, + "ListPublicKeysResponse": { + "base": "Returns the objects or data listed below if successful. Otherwise, returns an error.", + "refs": { + } + }, + "ListTagsRequest": { + "base": "

    Specifies a list of trail tags to return.

    ", + "refs": { + } + }, + "ListTagsResponse": { + "base": "

    Returns the objects or data listed below if successful. Otherwise, returns an error.

    ", + "refs": { + } + }, + "LookupAttribute": { + "base": "

    Specifies an attribute and value that filter the events returned.

    ", + "refs": { + "LookupAttributesList$member": null + } + }, + "LookupAttributeKey": { + "base": null, + "refs": { + "LookupAttribute$AttributeKey": "

    Specifies an attribute on which to filter the events returned.

    " + } + }, + "LookupAttributesList": { + "base": null, + "refs": { + "LookupEventsRequest$LookupAttributes": "

    Contains a list of lookup attributes. Currently the list can contain only one item.

    " + } + }, + "LookupEventsRequest": { + "base": "

    Contains a request for LookupEvents.

    ", + "refs": { + } + }, + "LookupEventsResponse": { + "base": "

    Contains a response to a LookupEvents action.

    ", + "refs": { + } + }, + "MaxResults": { + "base": null, + "refs": { + "LookupEventsRequest$MaxResults": "

    The number of events to return. Possible values are 1 through 50. The default is 10.

    " + } + }, + "MaximumNumberOfTrailsExceededException": { + "base": "

    This exception is thrown when the maximum number of trails is reached.

    ", + "refs": { + } + }, + "NextToken": { + "base": null, + "refs": { + "LookupEventsRequest$NextToken": "

    The token to use to get the next page of results after a previous API call. This token must be passed in with the same parameters that were specified in the the original call. For example, if the original call specified an AttributeKey of 'Username' with a value of 'root', the call with NextToken should include those same parameters.

    ", + "LookupEventsResponse$NextToken": "

    The token to use to get the next page of results after a previous API call. If the token does not appear, there are no more results to return. The token must be passed in with the same parameters as the previous call. For example, if the original call specified an AttributeKey of 'Username' with a value of 'root', the call with NextToken should include those same parameters.

    " + } + }, + "OperationNotPermittedException": { + "base": "

    This exception is thrown when the requested operation is not permitted.

    ", + "refs": { + } + }, + "PublicKey": { + "base": "

    Contains information about a returned public key.

    ", + "refs": { + "PublicKeyList$member": null + } + }, + "PublicKeyList": { + "base": null, + "refs": { + "ListPublicKeysResponse$PublicKeyList": "

    Contains an array of PublicKey objects.

    The returned public keys may have validity time ranges that overlap." + } + }, + "RemoveTagsRequest": { + "base": "

    Specifies the tags to remove from a trail.

    ", + "refs": { + } + }, + "RemoveTagsResponse": { + "base": "

    Returns the objects or data listed below if successful. Otherwise, returns an error.

    ", + "refs": { + } + }, + "Resource": { + "base": "

    Specifies the type and name of a resource referenced by an event.

    ", + "refs": { + "ResourceList$member": null + } + }, + "ResourceIdList": { + "base": null, + "refs": { + "ListTagsRequest$ResourceIdList": "

    Specifies a list of trail ARNs whose tags will be listed. The list has a limit of 20 ARNs. The format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail.

    " + } + }, + "ResourceList": { + "base": "

    A list of resources referenced by the event returned.

    ", + "refs": { + "Event$Resources": "

    A list of resources referenced by the event returned.

    " + } + }, + "ResourceNotFoundException": { + "base": "

    This exception is thrown when the specified resource is not found.

    ", + "refs": { + } + }, + "ResourceTag": { + "base": "

    A resource tag.

    ", + "refs": { + "ResourceTagList$member": null + } + }, + "ResourceTagList": { + "base": "

    A list of resource tags.

    ", + "refs": { + "ListTagsResponse$ResourceTagList": null + } + }, + "ResourceTypeNotSupportedException": { + "base": "

    This exception is thrown when the specified resource type is not supported by CloudTrail.

    ", + "refs": { + } + }, + "S3BucketDoesNotExistException": { + "base": "

    This exception is thrown when the specified S3 bucket does not exist.

    ", + "refs": { + } + }, + "StartLoggingRequest": { + "base": "

    The request to CloudTrail to start logging AWS API calls for an account.

    ", + "refs": { + } + }, + "StartLoggingResponse": { + "base": "

    Returns the objects or data listed below if successful. Otherwise, returns an error.

    ", + "refs": { + } + }, + "StopLoggingRequest": { + "base": "

    Passes the request to CloudTrail to stop logging AWS API calls for the specified account.

    ", + "refs": { + } + }, + "StopLoggingResponse": { + "base": "

    Returns the objects or data listed below if successful. Otherwise, returns an error.

    ", + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "AddTagsRequest$ResourceId": "

    Specifies the ARN of the trail to which one or more tags will be added. The format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail.

    ", + "CreateTrailRequest$Name": "

    Specifies the name of the trail. The name must meet the following requirements:

    • Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-)
    • Start with a letter or number, and end with a letter or number
    • Be between 3 and 128 characters
    • Have no adjacent periods, underscores or dashes. Names like my-_namespace and my--namespace are invalid.
    • Not be in IP address format (for example, 192.168.5.4)
    ", + "CreateTrailRequest$S3BucketName": "

    Specifies the name of the Amazon S3 bucket designated for publishing log files. See Amazon S3 Bucket Naming Requirements.

    ", + "CreateTrailRequest$S3KeyPrefix": "

    Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters.

    ", + "CreateTrailRequest$SnsTopicName": "

    Specifies the name of the Amazon SNS topic defined for notification of log file delivery. The maximum length is 256 characters.

    ", + "CreateTrailRequest$CloudWatchLogsLogGroupArn": "

    Specifies a log group name using an Amazon Resource Name (ARN), a unique identifier that represents the log group to which CloudTrail logs will be delivered. Not required unless you specify CloudWatchLogsRoleArn.

    ", + "CreateTrailRequest$CloudWatchLogsRoleArn": "

    Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group.

    ", + "CreateTrailRequest$KmsKeyId": "

    Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. The value can be a an alias name prefixed by \"alias/\", a fully specified ARN to an alias, a fully specified ARN to a key, or a globally unique identifier.

    Examples:

    • alias/MyAliasName
    • arn:aws:kms:us-east-1:123456789012:alias/MyAliasName
    • arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
    • 12345678-1234-1234-1234-123456789012
    ", + "CreateTrailResponse$Name": "

    Specifies the name of the trail.

    ", + "CreateTrailResponse$S3BucketName": "

    Specifies the name of the Amazon S3 bucket designated for publishing log files.

    ", + "CreateTrailResponse$S3KeyPrefix": "

    Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files.

    ", + "CreateTrailResponse$SnsTopicName": "

    Specifies the name of the Amazon SNS topic defined for notification of log file delivery.

    ", + "CreateTrailResponse$TrailARN": "

    Specifies the ARN of the trail that was created.

    ", + "CreateTrailResponse$CloudWatchLogsLogGroupArn": "

    Specifies the Amazon Resource Name (ARN) of the log group to which CloudTrail logs will be delivered.

    ", + "CreateTrailResponse$CloudWatchLogsRoleArn": "

    Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group.

    ", + "CreateTrailResponse$KmsKeyId": "

    Specifies the KMS key ID that encrypts the logs delivered by CloudTrail. The value is a fully specified ARN to a KMS key in the format:

    arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012", + "DeleteTrailRequest$Name": "

    Specifies the name or the CloudTrail ARN of the trail to be deleted. The format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail.

    ", + "Event$EventId": "

    The CloudTrail ID of the event returned.

    ", + "Event$EventName": "

    The name of the event returned.

    ", + "Event$Username": "

    A user name or role name of the requester that called the API in the event returned.

    ", + "Event$CloudTrailEvent": "

    A JSON string that contains a representation of the event returned.

    ", + "GetTrailStatusRequest$Name": "

    Specifies the name or the CloudTrail ARN of the trail for which you are requesting status. To get the status of a shadow trail (a replication of the trail in another region), you must specify its ARN. The format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail.

    ", + "GetTrailStatusResponse$LatestDeliveryError": "

    Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver log files to the designated bucket. For more information see the topic Error Responses in the Amazon S3 API Reference.

    This error occurs only when there is a problem with the destination S3 bucket and will not occur for timeouts. To resolve the issue, create a new bucket and call UpdateTrail to specify the new bucket, or fix the existing objects so that CloudTrail can again write to the bucket. ", + "GetTrailStatusResponse$LatestNotificationError": "

    Displays any Amazon SNS error that CloudTrail encountered when attempting to send a notification. For more information about Amazon SNS errors, see the Amazon SNS Developer Guide.

    ", + "GetTrailStatusResponse$LatestCloudWatchLogsDeliveryError": "

    Displays any CloudWatch Logs error that CloudTrail encountered when attempting to deliver logs to CloudWatch Logs.

    ", + "GetTrailStatusResponse$LatestDigestDeliveryError": "

    Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver a digest file to the designated bucket. For more information see the topic Error Responses in the Amazon S3 API Reference.

    This error occurs only when there is a problem with the destination S3 bucket and will not occur for timeouts. To resolve the issue, create a new bucket and call UpdateTrail to specify the new bucket, or fix the existing objects so that CloudTrail can again write to the bucket. ", + "GetTrailStatusResponse$LatestDeliveryAttemptTime": "

    This field is deprecated.

    ", + "GetTrailStatusResponse$LatestNotificationAttemptTime": "

    This field is deprecated.

    ", + "GetTrailStatusResponse$LatestNotificationAttemptSucceeded": "

    This field is deprecated.

    ", + "GetTrailStatusResponse$LatestDeliveryAttemptSucceeded": "

    This field is deprecated.

    ", + "GetTrailStatusResponse$TimeLoggingStarted": "

    This field is deprecated.

    ", + "GetTrailStatusResponse$TimeLoggingStopped": "

    This field is deprecated.

    ", + "ListPublicKeysRequest$NextToken": "

    Reserved for future use.

    ", + "ListPublicKeysResponse$NextToken": "

    Reserved for future use.

    ", + "ListTagsRequest$NextToken": "

    Reserved for future use.

    ", + "ListTagsResponse$NextToken": "

    Reserved for future use.

    ", + "LookupAttribute$AttributeValue": "

    Specifies a value for the specified AttributeKey.

    ", + "PublicKey$Fingerprint": "

    The fingerprint of the public key.

    ", + "RemoveTagsRequest$ResourceId": "

    Specifies the ARN of the trail from which tags should be removed. The format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail.

    ", + "Resource$ResourceType": "

    The type of a resource referenced by the event returned. When the resource type cannot be determined, null is returned. Some examples of resource types are: Instance for EC2, Trail for CloudTrail, DBInstance for RDS, and AccessKey for IAM. For a list of resource types supported for event lookup, see Resource Types Supported for Event Lookup.

    ", + "Resource$ResourceName": "

    The name of the resource referenced by the event returned. These are user-created names whose values will depend on the environment. For example, the resource name might be \"auto-scaling-test-group\" for an Auto Scaling Group or \"i-1234567\" for an EC2 Instance.

    ", + "ResourceIdList$member": null, + "ResourceTag$ResourceId": "

    Specifies the ARN of the resource.

    ", + "StartLoggingRequest$Name": "

    Specifies the name or the CloudTrail ARN of the trail for which CloudTrail logs AWS API calls. The format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail.

    ", + "StopLoggingRequest$Name": "

    Specifies the name or the CloudTrail ARN of the trail for which CloudTrail will stop logging AWS API calls. The format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail.

    ", + "Tag$Key": "

    The key in a key-value pair. The key must be must be no longer than 128 Unicode characters. The key must be unique for the resource to which it applies.

    ", + "Tag$Value": "

    The value in a key-value pair of a tag. The value must be no longer than 256 Unicode characters.

    ", + "Trail$Name": "

    Name of the trail set by calling CreateTrail. The maximum length is 128 characters.

    ", + "Trail$S3BucketName": "

    Name of the Amazon S3 bucket into which CloudTrail delivers your trail files. See Amazon S3 Bucket Naming Requirements.

    ", + "Trail$S3KeyPrefix": "

    Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files.The maximum length is 200 characters.

    ", + "Trail$SnsTopicName": "

    Name of the existing Amazon SNS topic that CloudTrail uses to notify the account owner when new CloudTrail log files have been delivered. The maximum length is 256 characters.

    ", + "Trail$HomeRegion": "

    The region in which the trail was created.

    ", + "Trail$TrailARN": "

    The Amazon Resource Name of the trail. The TrailARN format is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail.

    ", + "Trail$CloudWatchLogsLogGroupArn": "

    Specifies an Amazon Resource Name (ARN), a unique identifier that represents the log group to which CloudTrail logs will be delivered.

    ", + "Trail$CloudWatchLogsRoleArn": "

    Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group.

    ", + "Trail$KmsKeyId": "

    Specifies the KMS key ID that encrypts the logs delivered by CloudTrail. The value is a fully specified ARN to a KMS key in the format:

    arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012", + "TrailNameList$member": null, + "UpdateTrailRequest$Name": "

    Specifies the name of the trail or trail ARN. If Name is a trail name, the string must meet the following requirements:

    • Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-)
    • Start with a letter or number, and end with a letter or number
    • Be between 3 and 128 characters
    • Have no adjacent periods, underscores or dashes. Names like my-_namespace and my--namespace are invalid.
    • Not be in IP address format (for example, 192.168.5.4)

    If Name is a trail ARN, it must be in the format arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail.

    ", + "UpdateTrailRequest$S3BucketName": "

    Specifies the name of the Amazon S3 bucket designated for publishing log files. See Amazon S3 Bucket Naming Requirements.

    ", + "UpdateTrailRequest$S3KeyPrefix": "

    Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters.

    ", + "UpdateTrailRequest$SnsTopicName": "

    Specifies the name of the Amazon SNS topic defined for notification of log file delivery. The maximum length is 256 characters.

    ", + "UpdateTrailRequest$CloudWatchLogsLogGroupArn": "

    Specifies a log group name using an Amazon Resource Name (ARN), a unique identifier that represents the log group to which CloudTrail logs will be delivered. Not required unless you specify CloudWatchLogsRoleArn.

    ", + "UpdateTrailRequest$CloudWatchLogsRoleArn": "

    Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group.

    ", + "UpdateTrailRequest$KmsKeyId": "

    Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. The value can be a an alias name prefixed by \"alias/\", a fully specified ARN to an alias, a fully specified ARN to a key, or a globally unique identifier.

    Examples:

    • alias/MyAliasName
    • arn:aws:kms:us-east-1:123456789012:alias/MyAliasName
    • arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
    • 12345678-1234-1234-1234-123456789012
    ", + "UpdateTrailResponse$Name": "

    Specifies the name of the trail.

    ", + "UpdateTrailResponse$S3BucketName": "

    Specifies the name of the Amazon S3 bucket designated for publishing log files.

    ", + "UpdateTrailResponse$S3KeyPrefix": "

    Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files.

    ", + "UpdateTrailResponse$SnsTopicName": "

    Specifies the name of the Amazon SNS topic defined for notification of log file delivery.

    ", + "UpdateTrailResponse$TrailARN": "

    Specifies the ARN of the trail that was updated.

    ", + "UpdateTrailResponse$CloudWatchLogsLogGroupArn": "

    Specifies the Amazon Resource Name (ARN) of the log group to which CloudTrail logs will be delivered.

    ", + "UpdateTrailResponse$CloudWatchLogsRoleArn": "

    Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group.

    ", + "UpdateTrailResponse$KmsKeyId": "

    Specifies the KMS key ID that encrypts the logs delivered by CloudTrail. The value is a fully specified ARN to a KMS key in the format:

    arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012" + } + }, + "Tag": { + "base": "

    A custom key-value pair associated with a resource such as a CloudTrail trail.

    ", + "refs": { + "TagsList$member": null + } + }, + "TagsLimitExceededException": { + "base": "

    The number of tags per trail has exceeded the permitted amount. Currently, the limit is 10.

    ", + "refs": { + } + }, + "TagsList": { + "base": "

    A list of tags.

    ", + "refs": { + "AddTagsRequest$TagsList": "

    Contains a list of CloudTrail tags, up to a limit of 10.

    ", + "RemoveTagsRequest$TagsList": "

    Specifies a list of tags to be removed.

    ", + "ResourceTag$TagsList": null + } + }, + "Trail": { + "base": "

    The settings for a trail.

    ", + "refs": { + "TrailList$member": null + } + }, + "TrailAlreadyExistsException": { + "base": "

    This exception is thrown when the specified trail already exists.

    ", + "refs": { + } + }, + "TrailList": { + "base": null, + "refs": { + "DescribeTrailsResponse$trailList": "

    The list of trail objects.

    " + } + }, + "TrailNameList": { + "base": null, + "refs": { + "DescribeTrailsRequest$trailNameList": "

    Specifies a list of trail names, trail ARNs, or both, of the trails to describe. The format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. If an empty list is specified, information for the trail in the current region is returned.

    • If an empty list is specified and IncludeShadowTrails is false, then information for all trails in the current region is returned.
    • If an empty list is specified and IncludeShadowTrails is null or true, then information for all trails in the current region and any associated shadow trails in other regions is returned.
    If one or more trail names are specified, information is returned only if the names match the names of trails belonging only to the current region. To return information about a trail in another region, you must specify its trail ARN." + } + }, + "TrailNotFoundException": { + "base": "

    This exception is thrown when the trail with the given name is not found.

    ", + "refs": { + } + }, + "TrailNotProvidedException": { + "base": "

    This exception is deprecated.

    ", + "refs": { + } + }, + "UnsupportedOperationException": { + "base": "

    This exception is thrown when the requested operation is not supported.

    ", + "refs": { + } + }, + "UpdateTrailRequest": { + "base": "

    Specifies settings to update for the trail.

    ", + "refs": { + } + }, + "UpdateTrailResponse": { + "base": "Returns the objects or data listed below if successful. Otherwise, returns an error.", + "refs": { + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cloudtrail/2013-11-01/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,7 @@ +{ + "pagination": { + "DescribeTrails": { + "result_key": "trailList" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codecommit/2015-04-13/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codecommit/2015-04-13/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codecommit/2015-04-13/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codecommit/2015-04-13/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,886 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-04-13", + "endpointPrefix":"codecommit", + "jsonVersion":"1.1", + "serviceAbbreviation":"CodeCommit", + "serviceFullName":"AWS CodeCommit", + "signatureVersion":"v4", + "targetPrefix":"CodeCommit_20150413", + "timestampFormat":"unixTimestamp", + "protocol":"json" + }, + "operations":{ + "BatchGetRepositories":{ + "name":"BatchGetRepositories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchGetRepositoriesInput"}, + "output":{"shape":"BatchGetRepositoriesOutput"}, + "errors":[ + { + "shape":"RepositoryNamesRequiredException", + "exception":true + }, + { + "shape":"MaximumRepositoryNamesExceededException", + "exception":true + }, + { + "shape":"InvalidRepositoryNameException", + "exception":true + }, + { + "shape":"EncryptionIntegrityChecksFailedException", + "exception":true, + "fault":true + }, + { + "shape":"EncryptionKeyAccessDeniedException", + "exception":true + }, + { + "shape":"EncryptionKeyDisabledException", + "exception":true + }, + { + "shape":"EncryptionKeyNotFoundException", + "exception":true + }, + { + "shape":"EncryptionKeyUnavailableException", + "exception":true + } + ] + }, + "CreateBranch":{ + "name":"CreateBranch", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateBranchInput"}, + "errors":[ + { + "shape":"RepositoryNameRequiredException", + "exception":true + }, + { + "shape":"InvalidRepositoryNameException", + "exception":true + }, + { + "shape":"RepositoryDoesNotExistException", + "exception":true + }, + { + "shape":"BranchNameRequiredException", + "exception":true + }, + { + "shape":"BranchNameExistsException", + "exception":true + }, + { + "shape":"InvalidBranchNameException", + "exception":true + }, + { + "shape":"CommitIdRequiredException", + "exception":true + }, + { + "shape":"CommitDoesNotExistException", + "exception":true + }, + { + "shape":"InvalidCommitIdException", + "exception":true + }, + { + "shape":"EncryptionIntegrityChecksFailedException", + "exception":true, + "fault":true + }, + { + "shape":"EncryptionKeyAccessDeniedException", + "exception":true + }, + { + "shape":"EncryptionKeyDisabledException", + "exception":true + }, + { + "shape":"EncryptionKeyNotFoundException", + "exception":true + }, + { + "shape":"EncryptionKeyUnavailableException", + "exception":true + } + ] + }, + "CreateRepository":{ + "name":"CreateRepository", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRepositoryInput"}, + "output":{"shape":"CreateRepositoryOutput"}, + "errors":[ + { + "shape":"RepositoryNameExistsException", + "exception":true + }, + { + "shape":"RepositoryNameRequiredException", + "exception":true + }, + { + "shape":"InvalidRepositoryNameException", + "exception":true + }, + { + "shape":"InvalidRepositoryDescriptionException", + "exception":true + }, + { + "shape":"RepositoryLimitExceededException", + "exception":true + }, + { + "shape":"EncryptionIntegrityChecksFailedException", + "exception":true, + "fault":true + }, + { + "shape":"EncryptionKeyAccessDeniedException", + "exception":true + }, + { + "shape":"EncryptionKeyDisabledException", + "exception":true + }, + { + "shape":"EncryptionKeyNotFoundException", + "exception":true + }, + { + "shape":"EncryptionKeyUnavailableException", + "exception":true + } + ] + }, + "DeleteRepository":{ + "name":"DeleteRepository", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRepositoryInput"}, + "output":{"shape":"DeleteRepositoryOutput"}, + "errors":[ + { + "shape":"RepositoryNameRequiredException", + "exception":true + }, + { + "shape":"InvalidRepositoryNameException", + "exception":true + }, + { + "shape":"EncryptionIntegrityChecksFailedException", + "exception":true, + "fault":true + }, + { + "shape":"EncryptionKeyAccessDeniedException", + "exception":true + }, + { + "shape":"EncryptionKeyDisabledException", + "exception":true + }, + { + "shape":"EncryptionKeyNotFoundException", + "exception":true + }, + { + "shape":"EncryptionKeyUnavailableException", + "exception":true + } + ] + }, + "GetBranch":{ + "name":"GetBranch", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetBranchInput"}, + "output":{"shape":"GetBranchOutput"}, + "errors":[ + { + "shape":"RepositoryNameRequiredException", + "exception":true + }, + { + "shape":"RepositoryDoesNotExistException", + "exception":true + }, + { + "shape":"InvalidRepositoryNameException", + "exception":true + }, + { + "shape":"BranchNameRequiredException", + "exception":true + }, + { + "shape":"InvalidBranchNameException", + "exception":true + }, + { + "shape":"BranchDoesNotExistException", + "exception":true + }, + { + "shape":"EncryptionIntegrityChecksFailedException", + "exception":true, + "fault":true + }, + { + "shape":"EncryptionKeyAccessDeniedException", + "exception":true + }, + { + "shape":"EncryptionKeyDisabledException", + "exception":true + }, + { + "shape":"EncryptionKeyNotFoundException", + "exception":true + }, + { + "shape":"EncryptionKeyUnavailableException", + "exception":true + } + ] + }, + "GetRepository":{ + "name":"GetRepository", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRepositoryInput"}, + "output":{"shape":"GetRepositoryOutput"}, + "errors":[ + { + "shape":"RepositoryNameRequiredException", + "exception":true + }, + { + "shape":"RepositoryDoesNotExistException", + "exception":true + }, + { + "shape":"InvalidRepositoryNameException", + "exception":true + }, + { + "shape":"EncryptionIntegrityChecksFailedException", + "exception":true, + "fault":true + }, + { + "shape":"EncryptionKeyAccessDeniedException", + "exception":true + }, + { + "shape":"EncryptionKeyDisabledException", + "exception":true + }, + { + "shape":"EncryptionKeyNotFoundException", + "exception":true + }, + { + "shape":"EncryptionKeyUnavailableException", + "exception":true + } + ] + }, + "ListBranches":{ + "name":"ListBranches", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListBranchesInput"}, + "output":{"shape":"ListBranchesOutput"}, + "errors":[ + { + "shape":"RepositoryNameRequiredException", + "exception":true + }, + { + "shape":"RepositoryDoesNotExistException", + "exception":true + }, + { + "shape":"InvalidRepositoryNameException", + "exception":true + }, + { + "shape":"EncryptionIntegrityChecksFailedException", + "exception":true, + "fault":true + }, + { + "shape":"EncryptionKeyAccessDeniedException", + "exception":true + }, + { + "shape":"EncryptionKeyDisabledException", + "exception":true + }, + { + "shape":"EncryptionKeyNotFoundException", + "exception":true + }, + { + "shape":"EncryptionKeyUnavailableException", + "exception":true + }, + { + "shape":"InvalidContinuationTokenException", + "exception":true + } + ] + }, + "ListRepositories":{ + "name":"ListRepositories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRepositoriesInput"}, + "output":{"shape":"ListRepositoriesOutput"}, + "errors":[ + { + "shape":"InvalidSortByException", + "exception":true + }, + { + "shape":"InvalidOrderException", + "exception":true + }, + { + "shape":"InvalidContinuationTokenException", + "exception":true + } + ] + }, + "UpdateDefaultBranch":{ + "name":"UpdateDefaultBranch", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDefaultBranchInput"}, + "errors":[ + { + "shape":"RepositoryNameRequiredException", + "exception":true + }, + { + "shape":"RepositoryDoesNotExistException", + "exception":true + }, + { + "shape":"InvalidRepositoryNameException", + "exception":true + }, + { + "shape":"BranchNameRequiredException", + "exception":true + }, + { + "shape":"InvalidBranchNameException", + "exception":true + }, + { + "shape":"BranchDoesNotExistException", + "exception":true + }, + { + "shape":"EncryptionIntegrityChecksFailedException", + "exception":true, + "fault":true + }, + { + "shape":"EncryptionKeyAccessDeniedException", + "exception":true + }, + { + "shape":"EncryptionKeyDisabledException", + "exception":true + }, + { + "shape":"EncryptionKeyNotFoundException", + "exception":true + }, + { + "shape":"EncryptionKeyUnavailableException", + "exception":true + } + ] + }, + "UpdateRepositoryDescription":{ + "name":"UpdateRepositoryDescription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateRepositoryDescriptionInput"}, + "errors":[ + { + "shape":"RepositoryNameRequiredException", + "exception":true + }, + { + "shape":"RepositoryDoesNotExistException", + "exception":true + }, + { + "shape":"InvalidRepositoryNameException", + "exception":true + }, + { + "shape":"InvalidRepositoryDescriptionException", + "exception":true + }, + { + "shape":"EncryptionIntegrityChecksFailedException", + "exception":true, + "fault":true + }, + { + "shape":"EncryptionKeyAccessDeniedException", + "exception":true + }, + { + "shape":"EncryptionKeyDisabledException", + "exception":true + }, + { + "shape":"EncryptionKeyNotFoundException", + "exception":true + }, + { + "shape":"EncryptionKeyUnavailableException", + "exception":true + } + ] + }, + "UpdateRepositoryName":{ + "name":"UpdateRepositoryName", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateRepositoryNameInput"}, + "errors":[ + { + "shape":"RepositoryDoesNotExistException", + "exception":true + }, + { + "shape":"RepositoryNameExistsException", + "exception":true + }, + { + "shape":"RepositoryNameRequiredException", + "exception":true + }, + { + "shape":"InvalidRepositoryNameException", + "exception":true + } + ] + } + }, + "shapes":{ + "AccountId":{"type":"string"}, + "Arn":{"type":"string"}, + "BatchGetRepositoriesInput":{ + "type":"structure", + "required":["repositoryNames"], + "members":{ + "repositoryNames":{"shape":"RepositoryNameList"} + } + }, + "BatchGetRepositoriesOutput":{ + "type":"structure", + "members":{ + "repositories":{"shape":"RepositoryMetadataList"}, + "repositoriesNotFound":{"shape":"RepositoryNotFoundList"} + } + }, + "BranchDoesNotExistException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "BranchInfo":{ + "type":"structure", + "members":{ + "branchName":{"shape":"BranchName"}, + "commitId":{"shape":"CommitId"} + } + }, + "BranchName":{ + "type":"string", + "min":1, + "max":100 + }, + "BranchNameExistsException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "BranchNameList":{ + "type":"list", + "member":{"shape":"BranchName"} + }, + "BranchNameRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "CloneUrlHttp":{"type":"string"}, + "CloneUrlSsh":{"type":"string"}, + "CommitDoesNotExistException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "CommitId":{"type":"string"}, + "CommitIdRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "CreateBranchInput":{ + "type":"structure", + "required":[ + "repositoryName", + "branchName", + "commitId" + ], + "members":{ + "repositoryName":{"shape":"RepositoryName"}, + "branchName":{"shape":"BranchName"}, + "commitId":{"shape":"CommitId"} + } + }, + "CreateRepositoryInput":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "repositoryName":{"shape":"RepositoryName"}, + "repositoryDescription":{"shape":"RepositoryDescription"} + } + }, + "CreateRepositoryOutput":{ + "type":"structure", + "members":{ + "repositoryMetadata":{"shape":"RepositoryMetadata"} + } + }, + "CreationDate":{"type":"timestamp"}, + "DeleteRepositoryInput":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "repositoryName":{"shape":"RepositoryName"} + } + }, + "DeleteRepositoryOutput":{ + "type":"structure", + "members":{ + "repositoryId":{"shape":"RepositoryId"} + } + }, + "EncryptionIntegrityChecksFailedException":{ + "type":"structure", + "members":{ + }, + "exception":true, + "fault":true + }, + "EncryptionKeyAccessDeniedException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "EncryptionKeyDisabledException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "EncryptionKeyNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "EncryptionKeyUnavailableException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "GetBranchInput":{ + "type":"structure", + "members":{ + "repositoryName":{"shape":"RepositoryName"}, + "branchName":{"shape":"BranchName"} + } + }, + "GetBranchOutput":{ + "type":"structure", + "members":{ + "branch":{"shape":"BranchInfo"} + } + }, + "GetRepositoryInput":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "repositoryName":{"shape":"RepositoryName"} + } + }, + "GetRepositoryOutput":{ + "type":"structure", + "members":{ + "repositoryMetadata":{"shape":"RepositoryMetadata"} + } + }, + "InvalidBranchNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidCommitIdException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidContinuationTokenException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidOrderException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidRepositoryDescriptionException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidRepositoryNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidSortByException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "LastModifiedDate":{"type":"timestamp"}, + "ListBranchesInput":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "repositoryName":{"shape":"RepositoryName"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListBranchesOutput":{ + "type":"structure", + "members":{ + "branches":{"shape":"BranchNameList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListRepositoriesInput":{ + "type":"structure", + "members":{ + "nextToken":{"shape":"NextToken"}, + "sortBy":{"shape":"SortByEnum"}, + "order":{"shape":"OrderEnum"} + } + }, + "ListRepositoriesOutput":{ + "type":"structure", + "members":{ + "repositories":{"shape":"RepositoryNameIdPairList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "MaximumRepositoryNamesExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "NextToken":{"type":"string"}, + "OrderEnum":{ + "type":"string", + "enum":[ + "ascending", + "descending" + ] + }, + "RepositoryDescription":{ + "type":"string", + "max":1000 + }, + "RepositoryDoesNotExistException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "RepositoryId":{"type":"string"}, + "RepositoryLimitExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "RepositoryMetadata":{ + "type":"structure", + "members":{ + "accountId":{"shape":"AccountId"}, + "repositoryId":{"shape":"RepositoryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "repositoryDescription":{"shape":"RepositoryDescription"}, + "defaultBranch":{"shape":"BranchName"}, + "lastModifiedDate":{"shape":"LastModifiedDate"}, + "creationDate":{"shape":"CreationDate"}, + "cloneUrlHttp":{"shape":"CloneUrlHttp"}, + "cloneUrlSsh":{"shape":"CloneUrlSsh"}, + "Arn":{"shape":"Arn"} + } + }, + "RepositoryMetadataList":{ + "type":"list", + "member":{"shape":"RepositoryMetadata"} + }, + "RepositoryName":{ + "type":"string", + "min":1, + "max":100, + "pattern":"[\\\\w\\\\.-]+" + }, + "RepositoryNameExistsException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "RepositoryNameIdPair":{ + "type":"structure", + "members":{ + "repositoryName":{"shape":"RepositoryName"}, + "repositoryId":{"shape":"RepositoryId"} + } + }, + "RepositoryNameIdPairList":{ + "type":"list", + "member":{"shape":"RepositoryNameIdPair"} + }, + "RepositoryNameList":{ + "type":"list", + "member":{"shape":"RepositoryName"} + }, + "RepositoryNameRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "RepositoryNamesRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "RepositoryNotFoundList":{ + "type":"list", + "member":{"shape":"RepositoryName"} + }, + "SortByEnum":{ + "type":"string", + "enum":[ + "repositoryName", + "lastModifiedDate" + ] + }, + "UpdateDefaultBranchInput":{ + "type":"structure", + "required":[ + "repositoryName", + "defaultBranchName" + ], + "members":{ + "repositoryName":{"shape":"RepositoryName"}, + "defaultBranchName":{"shape":"BranchName"} + } + }, + "UpdateRepositoryDescriptionInput":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "repositoryName":{"shape":"RepositoryName"}, + "repositoryDescription":{"shape":"RepositoryDescription"} + } + }, + "UpdateRepositoryNameInput":{ + "type":"structure", + "required":[ + "oldName", + "newName" + ], + "members":{ + "oldName":{"shape":"RepositoryName"}, + "newName":{"shape":"RepositoryName"} + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codecommit/2015-04-13/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codecommit/2015-04-13/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codecommit/2015-04-13/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codecommit/2015-04-13/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,384 @@ +{ + "version": "2.0", + "operations": { + "BatchGetRepositories": "

    Gets information about one or more repositories.

    The description field for a repository accepts all HTML characters and all valid Unicode characters. Applications that do not HTML-encode the description and display it in a web page could expose users to potentially malicious code. Make sure that you HTML-encode the description field in any application that uses this API to display the repository description on a web page.

    ", + "CreateBranch": "

    Creates a new branch in a repository and points the branch to a commit.

    Calling the create branch operation does not set a repository's default branch. To do this, call the update default branch operation.", + "CreateRepository": "

    Creates a new, empty repository.

    ", + "DeleteRepository": "

    Deletes a repository. If a specified repository was already deleted, a null repository ID will be returned.

    Deleting a repository also deletes all associated objects and metadata. After a repository is deleted, all future push calls to the deleted repository will fail.", + "GetBranch": "

    Retrieves information about a repository branch, including its name and the last commit ID.

    ", + "GetRepository": "

    Gets information about a repository.

    The description field for a repository accepts all HTML characters and all valid Unicode characters. Applications that do not HTML-encode the description and display it in a web page could expose users to potentially malicious code. Make sure that you HTML-encode the description field in any application that uses this API to display the repository description on a web page.

    ", + "ListBranches": "

    Gets information about one or more branches in a repository.

    ", + "ListRepositories": "

    Gets information about one or more repositories.

    ", + "UpdateDefaultBranch": "

    Sets or changes the default branch name for the specified repository.

    If you use this operation to change the default branch name to the current default branch name, a success message is returned even though the default branch did not change.", + "UpdateRepositoryDescription": "

    Sets or changes the comment or description for a repository.

    The description field for a repository accepts all HTML characters and all valid Unicode characters. Applications that do not HTML-encode the description and display it in a web page could expose users to potentially malicious code. Make sure that you HTML-encode the description field in any application that uses this API to display the repository description on a web page.

    ", + "UpdateRepositoryName": "

    Renames a repository.

    " + }, + "service": "AWS CodeCommit

    This is the AWS CodeCommit API Reference. This reference provides descriptions of the AWS CodeCommit API.

    You can use the AWS CodeCommit API to work with the following objects:

    • Repositories
    • Branches
    • Commits

    For information about how to use AWS CodeCommit, see the AWS CodeCommit User Guide.

    ", + "shapes": { + "AccountId": { + "base": null, + "refs": { + "RepositoryMetadata$accountId": "

    The ID of the AWS account associated with the repository.

    " + } + }, + "Arn": { + "base": null, + "refs": { + "RepositoryMetadata$Arn": "

    The Amazon Resource Name (ARN) of the repository.

    " + } + }, + "BatchGetRepositoriesInput": { + "base": "

    Represents the input of a batch get repositories operation.

    ", + "refs": { + } + }, + "BatchGetRepositoriesOutput": { + "base": "

    Represents the output of a batch get repositories operation.

    ", + "refs": { + } + }, + "BranchDoesNotExistException": { + "base": "

    The specified branch does not exist.

    ", + "refs": { + } + }, + "BranchInfo": { + "base": "

    Returns information about a branch.

    ", + "refs": { + "GetBranchOutput$branch": "

    The name of the branch.

    " + } + }, + "BranchName": { + "base": null, + "refs": { + "BranchInfo$branchName": "

    The name of the branch.

    ", + "BranchNameList$member": null, + "CreateBranchInput$branchName": "

    The name of the new branch to create.

    ", + "GetBranchInput$branchName": "

    The name of the branch for which you want to retrieve information.

    ", + "RepositoryMetadata$defaultBranch": "

    The repository's default branch name.

    ", + "UpdateDefaultBranchInput$defaultBranchName": "

    The name of the branch to set as the default.

    " + } + }, + "BranchNameExistsException": { + "base": "

    The specified branch name already exists.

    ", + "refs": { + } + }, + "BranchNameList": { + "base": null, + "refs": { + "ListBranchesOutput$branches": "

    The list of branch names.

    " + } + }, + "BranchNameRequiredException": { + "base": "

    A branch name is required but was not specified.

    ", + "refs": { + } + }, + "CloneUrlHttp": { + "base": null, + "refs": { + "RepositoryMetadata$cloneUrlHttp": "

    The URL to use for cloning the repository over HTTPS.

    " + } + }, + "CloneUrlSsh": { + "base": null, + "refs": { + "RepositoryMetadata$cloneUrlSsh": "

    The URL to use for cloning the repository over SSH.

    " + } + }, + "CommitDoesNotExistException": { + "base": "

    The specified commit does not exist or no commit was specified, and the specified repository has no default branch.

    ", + "refs": { + } + }, + "CommitId": { + "base": null, + "refs": { + "BranchInfo$commitId": "

    The ID of the last commit made to the branch.

    ", + "CreateBranchInput$commitId": "

    The ID of the commit to point the new branch to.

    If this commit ID is not specified, the new branch will point to the commit that is pointed to by the repository's default branch." + } + }, + "CommitIdRequiredException": { + "base": "

    A commit ID was not specified.

    ", + "refs": { + } + }, + "CreateBranchInput": { + "base": "

    Represents the input of a create branch operation.

    ", + "refs": { + } + }, + "CreateRepositoryInput": { + "base": "

    Represents the input of a create repository operation.

    ", + "refs": { + } + }, + "CreateRepositoryOutput": { + "base": "

    Represents the output of a create repository operation.

    ", + "refs": { + } + }, + "CreationDate": { + "base": null, + "refs": { + "RepositoryMetadata$creationDate": "

    The date and time the repository was created, in timestamp format.

    " + } + }, + "DeleteRepositoryInput": { + "base": "

    Represents the input of a delete repository operation.

    ", + "refs": { + } + }, + "DeleteRepositoryOutput": { + "base": "

    Represents the output of a delete repository operation.

    ", + "refs": { + } + }, + "EncryptionIntegrityChecksFailedException": { + "base": "

    An encryption integrity check failed.

    ", + "refs": { + } + }, + "EncryptionKeyAccessDeniedException": { + "base": "

    An encryption key could not be accessed.

    ", + "refs": { + } + }, + "EncryptionKeyDisabledException": { + "base": "

    The encryption key is disabled.

    ", + "refs": { + } + }, + "EncryptionKeyNotFoundException": { + "base": "

    No encryption key was found.

    ", + "refs": { + } + }, + "EncryptionKeyUnavailableException": { + "base": "

    The encryption key is not available.

    ", + "refs": { + } + }, + "GetBranchInput": { + "base": "

    Represents the input of a get branch operation.

    ", + "refs": { + } + }, + "GetBranchOutput": { + "base": "

    Represents the output of a get branch operation.

    ", + "refs": { + } + }, + "GetRepositoryInput": { + "base": "

    Represents the input of a get repository operation.

    ", + "refs": { + } + }, + "GetRepositoryOutput": { + "base": "

    Represents the output of a get repository operation.

    ", + "refs": { + } + }, + "InvalidBranchNameException": { + "base": "

    The specified branch name is not valid.

    ", + "refs": { + } + }, + "InvalidCommitIdException": { + "base": "

    The specified commit ID is not valid.

    ", + "refs": { + } + }, + "InvalidContinuationTokenException": { + "base": "

    The specified continuation token is not valid.

    ", + "refs": { + } + }, + "InvalidOrderException": { + "base": "

    The specified sort order is not valid.

    ", + "refs": { + } + }, + "InvalidRepositoryDescriptionException": { + "base": "

    The specified repository description is not valid.

    ", + "refs": { + } + }, + "InvalidRepositoryNameException": { + "base": "

    At least one specified repository name is not valid.

    This exception only occurs when a specified repository name is not valid. Other exceptions occur when a required repository parameter is missing, or when a specified repository does not exist.", + "refs": { + } + }, + "InvalidSortByException": { + "base": "

    The specified sort by value is not valid.

    ", + "refs": { + } + }, + "LastModifiedDate": { + "base": null, + "refs": { + "RepositoryMetadata$lastModifiedDate": "

    The date and time the repository was last modified, in timestamp format.

    " + } + }, + "ListBranchesInput": { + "base": "

    Represents the input of a list branches operation.

    ", + "refs": { + } + }, + "ListBranchesOutput": { + "base": "

    Represents the output of a list branches operation.

    ", + "refs": { + } + }, + "ListRepositoriesInput": { + "base": "

    Represents the input of a list repositories operation.

    ", + "refs": { + } + }, + "ListRepositoriesOutput": { + "base": "

    Represents the output of a list repositories operation.

    ", + "refs": { + } + }, + "MaximumRepositoryNamesExceededException": { + "base": "

    The maximum number of allowed repository names was exceeded. Currently, this number is 25.

    ", + "refs": { + } + }, + "NextToken": { + "base": null, + "refs": { + "ListBranchesInput$nextToken": "

    An enumeration token that allows the operation to batch the results.

    ", + "ListBranchesOutput$nextToken": "

    An enumeration token that returns the batch of the results.

    ", + "ListRepositoriesInput$nextToken": "

    An enumeration token that allows the operation to batch the results of the operation. Batch sizes are 1,000 for list repository operations. When the client sends the token back to AWS CodeCommit, another page of 1,000 records is retrieved.

    ", + "ListRepositoriesOutput$nextToken": "

    An enumeration token that allows the operation to batch the results of the operation. Batch sizes are 1,000 for list repository operations. When the client sends the token back to AWS CodeCommit, another page of 1,000 records is retrieved.

    " + } + }, + "OrderEnum": { + "base": null, + "refs": { + "ListRepositoriesInput$order": "

    The order in which to sort the results of a list repositories operation.

    " + } + }, + "RepositoryDescription": { + "base": null, + "refs": { + "CreateRepositoryInput$repositoryDescription": "

    A comment or description about the new repository.

    ", + "RepositoryMetadata$repositoryDescription": "

    A comment or description about the repository.

    ", + "UpdateRepositoryDescriptionInput$repositoryDescription": "

    The new comment or description for the specified repository.

    " + } + }, + "RepositoryDoesNotExistException": { + "base": "

    The specified repository does not exist.

    ", + "refs": { + } + }, + "RepositoryId": { + "base": null, + "refs": { + "DeleteRepositoryOutput$repositoryId": "

    The ID of the repository that was deleted.

    ", + "RepositoryMetadata$repositoryId": "

    The ID of the repository.

    ", + "RepositoryNameIdPair$repositoryId": "

    The ID associated with the repository name.

    " + } + }, + "RepositoryLimitExceededException": { + "base": "

    A repository resource limit was exceeded.

    ", + "refs": { + } + }, + "RepositoryMetadata": { + "base": "

    Information about a repository.

    ", + "refs": { + "CreateRepositoryOutput$repositoryMetadata": "

    Information about the newly created repository.

    ", + "GetRepositoryOutput$repositoryMetadata": "

    Information about the repository.

    ", + "RepositoryMetadataList$member": null + } + }, + "RepositoryMetadataList": { + "base": null, + "refs": { + "BatchGetRepositoriesOutput$repositories": "

    A list of repositories returned by the batch get repositories operation.

    " + } + }, + "RepositoryName": { + "base": "Repository name is restricted to alphanumeric characters (a-z, A-Z, 0-9), \".\", \"_\", and \"-\". Additionally, the suffix \".git\" is prohibited in a repository name.", + "refs": { + "CreateBranchInput$repositoryName": "

    The name of the repository in which you want to create the new branch.

    ", + "CreateRepositoryInput$repositoryName": "

    The name of the new repository to be created.

    The repository name must be unique across the calling AWS account. In addition, repository names are restricted to alphanumeric characters. The suffix \".git\" is prohibited.", + "DeleteRepositoryInput$repositoryName": "

    The name of the repository to delete.

    ", + "GetBranchInput$repositoryName": null, + "GetRepositoryInput$repositoryName": "

    The name of the repository to get information about.

    ", + "ListBranchesInput$repositoryName": "

    The name of the repository that contains the branches.

    ", + "RepositoryMetadata$repositoryName": "

    The repository's name.

    ", + "RepositoryNameIdPair$repositoryName": null, + "RepositoryNameList$member": null, + "RepositoryNotFoundList$member": null, + "UpdateDefaultBranchInput$repositoryName": "

    The name of the repository to set or change the default branch for.

    ", + "UpdateRepositoryDescriptionInput$repositoryName": "

    The name of the repository to set or change the comment or description for.

    ", + "UpdateRepositoryNameInput$oldName": null, + "UpdateRepositoryNameInput$newName": null + } + }, + "RepositoryNameExistsException": { + "base": "

    The specified repository name already exists.

    ", + "refs": { + } + }, + "RepositoryNameIdPair": { + "base": "

    Information about a repository name and ID.

    ", + "refs": { + "RepositoryNameIdPairList$member": null + } + }, + "RepositoryNameIdPairList": { + "base": null, + "refs": { + "ListRepositoriesOutput$repositories": "

    Lists the repositories called by the list repositories operation.

    " + } + }, + "RepositoryNameList": { + "base": null, + "refs": { + "BatchGetRepositoriesInput$repositoryNames": "

    The names of the repositories to get information about.

    " + } + }, + "RepositoryNameRequiredException": { + "base": "

    A repository name is required but was not specified.

    ", + "refs": { + } + }, + "RepositoryNamesRequiredException": { + "base": "

    A repository names object is required but was not specified.

    ", + "refs": { + } + }, + "RepositoryNotFoundList": { + "base": null, + "refs": { + "BatchGetRepositoriesOutput$repositoriesNotFound": "

    Returns a list of repository names for which information could not be found.

    " + } + }, + "SortByEnum": { + "base": null, + "refs": { + "ListRepositoriesInput$sortBy": "

    The criteria used to sort the results of a list repositories operation.

    " + } + }, + "UpdateDefaultBranchInput": { + "base": "

    Represents the input of an update default branch operation.

    ", + "refs": { + } + }, + "UpdateRepositoryDescriptionInput": { + "base": "

    Represents the input of an update repository description operation.

    ", + "refs": { + } + }, + "UpdateRepositoryNameInput": { + "base": "

    Represents the input of an update repository description operation.

    ", + "refs": { + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1762 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-10-06", + "endpointPrefix":"codedeploy", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"CodeDeploy", + "serviceFullName":"AWS CodeDeploy", + "signatureVersion":"v4", + "targetPrefix":"CodeDeploy_20141006", + "timestampFormat":"unixTimestamp" + }, + "operations":{ + "AddTagsToOnPremisesInstances":{ + "name":"AddTagsToOnPremisesInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsToOnPremisesInstancesInput"}, + "errors":[ + {"shape":"InstanceNameRequiredException"}, + {"shape":"TagRequiredException"}, + {"shape":"InvalidTagException"}, + {"shape":"TagLimitExceededException"}, + {"shape":"InstanceLimitExceededException"}, + {"shape":"InstanceNotRegisteredException"} + ] + }, + "BatchGetApplications":{ + "name":"BatchGetApplications", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchGetApplicationsInput"}, + "output":{"shape":"BatchGetApplicationsOutput"}, + "errors":[ + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"ApplicationDoesNotExistException"} + ] + }, + "BatchGetDeployments":{ + "name":"BatchGetDeployments", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchGetDeploymentsInput"}, + "output":{"shape":"BatchGetDeploymentsOutput"}, + "errors":[ + {"shape":"DeploymentIdRequiredException"}, + {"shape":"InvalidDeploymentIdException"} + ] + }, + "BatchGetOnPremisesInstances":{ + "name":"BatchGetOnPremisesInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchGetOnPremisesInstancesInput"}, + "output":{"shape":"BatchGetOnPremisesInstancesOutput"}, + "errors":[ + {"shape":"InstanceNameRequiredException"}, + {"shape":"InvalidInstanceNameException"} + ] + }, + "CreateApplication":{ + "name":"CreateApplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateApplicationInput"}, + "output":{"shape":"CreateApplicationOutput"}, + "errors":[ + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"ApplicationAlreadyExistsException"}, + {"shape":"ApplicationLimitExceededException"} + ] + }, + "CreateDeployment":{ + "name":"CreateDeployment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDeploymentInput"}, + "output":{"shape":"CreateDeploymentOutput"}, + "errors":[ + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"ApplicationDoesNotExistException"}, + {"shape":"DeploymentGroupNameRequiredException"}, + {"shape":"InvalidDeploymentGroupNameException"}, + {"shape":"DeploymentGroupDoesNotExistException"}, + {"shape":"RevisionRequiredException"}, + {"shape":"InvalidRevisionException"}, + {"shape":"InvalidDeploymentConfigNameException"}, + {"shape":"DeploymentConfigDoesNotExistException"}, + {"shape":"DescriptionTooLongException"}, + {"shape":"DeploymentLimitExceededException"} + ] + }, + "CreateDeploymentConfig":{ + "name":"CreateDeploymentConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDeploymentConfigInput"}, + "output":{"shape":"CreateDeploymentConfigOutput"}, + "errors":[ + {"shape":"InvalidDeploymentConfigNameException"}, + {"shape":"DeploymentConfigNameRequiredException"}, + {"shape":"DeploymentConfigAlreadyExistsException"}, + {"shape":"InvalidMinimumHealthyHostValueException"}, + {"shape":"DeploymentConfigLimitExceededException"} + ] + }, + "CreateDeploymentGroup":{ + "name":"CreateDeploymentGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDeploymentGroupInput"}, + "output":{"shape":"CreateDeploymentGroupOutput"}, + "errors":[ + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"ApplicationDoesNotExistException"}, + {"shape":"DeploymentGroupNameRequiredException"}, + {"shape":"InvalidDeploymentGroupNameException"}, + {"shape":"DeploymentGroupAlreadyExistsException"}, + {"shape":"InvalidEC2TagException"}, + {"shape":"InvalidTagException"}, + {"shape":"InvalidAutoScalingGroupException"}, + {"shape":"InvalidDeploymentConfigNameException"}, + {"shape":"DeploymentConfigDoesNotExistException"}, + {"shape":"RoleRequiredException"}, + {"shape":"InvalidRoleException"}, + {"shape":"DeploymentGroupLimitExceededException"} + ] + }, + "DeleteApplication":{ + "name":"DeleteApplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteApplicationInput"}, + "errors":[ + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"} + ] + }, + "DeleteDeploymentConfig":{ + "name":"DeleteDeploymentConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDeploymentConfigInput"}, + "errors":[ + {"shape":"InvalidDeploymentConfigNameException"}, + {"shape":"DeploymentConfigNameRequiredException"}, + {"shape":"DeploymentConfigInUseException"}, + {"shape":"InvalidOperationException"} + ] + }, + "DeleteDeploymentGroup":{ + "name":"DeleteDeploymentGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDeploymentGroupInput"}, + "output":{"shape":"DeleteDeploymentGroupOutput"}, + "errors":[ + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"DeploymentGroupNameRequiredException"}, + {"shape":"InvalidDeploymentGroupNameException"}, + {"shape":"InvalidRoleException"} + ] + }, + "DeregisterOnPremisesInstance":{ + "name":"DeregisterOnPremisesInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterOnPremisesInstanceInput"}, + "errors":[ + {"shape":"InstanceNameRequiredException"}, + {"shape":"InvalidInstanceNameException"} + ] + }, + "GetApplication":{ + "name":"GetApplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetApplicationInput"}, + "output":{"shape":"GetApplicationOutput"}, + "errors":[ + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"ApplicationDoesNotExistException"} + ] + }, + "GetApplicationRevision":{ + "name":"GetApplicationRevision", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetApplicationRevisionInput"}, + "output":{"shape":"GetApplicationRevisionOutput"}, + "errors":[ + {"shape":"ApplicationDoesNotExistException"}, + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"RevisionDoesNotExistException"}, + {"shape":"RevisionRequiredException"}, + {"shape":"InvalidRevisionException"} + ] + }, + "GetDeployment":{ + "name":"GetDeployment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDeploymentInput"}, + "output":{"shape":"GetDeploymentOutput"}, + "errors":[ + {"shape":"DeploymentIdRequiredException"}, + {"shape":"InvalidDeploymentIdException"}, + {"shape":"DeploymentDoesNotExistException"} + ] + }, + "GetDeploymentConfig":{ + "name":"GetDeploymentConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDeploymentConfigInput"}, + "output":{"shape":"GetDeploymentConfigOutput"}, + "errors":[ + {"shape":"InvalidDeploymentConfigNameException"}, + {"shape":"DeploymentConfigNameRequiredException"}, + {"shape":"DeploymentConfigDoesNotExistException"} + ] + }, + "GetDeploymentGroup":{ + "name":"GetDeploymentGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDeploymentGroupInput"}, + "output":{"shape":"GetDeploymentGroupOutput"}, + "errors":[ + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"ApplicationDoesNotExistException"}, + {"shape":"DeploymentGroupNameRequiredException"}, + {"shape":"InvalidDeploymentGroupNameException"}, + {"shape":"DeploymentGroupDoesNotExistException"} + ] + }, + "GetDeploymentInstance":{ + "name":"GetDeploymentInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDeploymentInstanceInput"}, + "output":{"shape":"GetDeploymentInstanceOutput"}, + "errors":[ + {"shape":"DeploymentIdRequiredException"}, + {"shape":"DeploymentDoesNotExistException"}, + {"shape":"InstanceIdRequiredException"}, + {"shape":"InvalidDeploymentIdException"}, + {"shape":"InstanceDoesNotExistException"} + ] + }, + "GetOnPremisesInstance":{ + "name":"GetOnPremisesInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetOnPremisesInstanceInput"}, + "output":{"shape":"GetOnPremisesInstanceOutput"}, + "errors":[ + {"shape":"InstanceNameRequiredException"}, + {"shape":"InstanceNotRegisteredException"}, + {"shape":"InvalidInstanceNameException"} + ] + }, + "ListApplicationRevisions":{ + "name":"ListApplicationRevisions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListApplicationRevisionsInput"}, + "output":{"shape":"ListApplicationRevisionsOutput"}, + "errors":[ + {"shape":"ApplicationDoesNotExistException"}, + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"InvalidSortByException"}, + {"shape":"InvalidSortOrderException"}, + {"shape":"InvalidBucketNameFilterException"}, + {"shape":"InvalidKeyPrefixFilterException"}, + {"shape":"BucketNameFilterRequiredException"}, + {"shape":"InvalidDeployedStateFilterException"}, + {"shape":"InvalidNextTokenException"} + ] + }, + "ListApplications":{ + "name":"ListApplications", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListApplicationsInput"}, + "output":{"shape":"ListApplicationsOutput"}, + "errors":[ + {"shape":"InvalidNextTokenException"} + ] + }, + "ListDeploymentConfigs":{ + "name":"ListDeploymentConfigs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDeploymentConfigsInput"}, + "output":{"shape":"ListDeploymentConfigsOutput"}, + "errors":[ + {"shape":"InvalidNextTokenException"} + ] + }, + "ListDeploymentGroups":{ + "name":"ListDeploymentGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDeploymentGroupsInput"}, + "output":{"shape":"ListDeploymentGroupsOutput"}, + "errors":[ + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"ApplicationDoesNotExistException"}, + {"shape":"InvalidNextTokenException"} + ] + }, + "ListDeploymentInstances":{ + "name":"ListDeploymentInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDeploymentInstancesInput"}, + "output":{"shape":"ListDeploymentInstancesOutput"}, + "errors":[ + {"shape":"DeploymentIdRequiredException"}, + {"shape":"DeploymentDoesNotExistException"}, + {"shape":"DeploymentNotStartedException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidDeploymentIdException"}, + {"shape":"InvalidInstanceStatusException"} + ] + }, + "ListDeployments":{ + "name":"ListDeployments", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDeploymentsInput"}, + "output":{"shape":"ListDeploymentsOutput"}, + "errors":[ + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"ApplicationDoesNotExistException"}, + {"shape":"InvalidDeploymentGroupNameException"}, + {"shape":"DeploymentGroupDoesNotExistException"}, + {"shape":"DeploymentGroupNameRequiredException"}, + {"shape":"InvalidTimeRangeException"}, + {"shape":"InvalidDeploymentStatusException"}, + {"shape":"InvalidNextTokenException"} + ] + }, + "ListOnPremisesInstances":{ + "name":"ListOnPremisesInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListOnPremisesInstancesInput"}, + "output":{"shape":"ListOnPremisesInstancesOutput"}, + "errors":[ + {"shape":"InvalidRegistrationStatusException"}, + {"shape":"InvalidTagFilterException"}, + {"shape":"InvalidNextTokenException"} + ] + }, + "RegisterApplicationRevision":{ + "name":"RegisterApplicationRevision", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterApplicationRevisionInput"}, + "errors":[ + {"shape":"ApplicationDoesNotExistException"}, + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"DescriptionTooLongException"}, + {"shape":"RevisionRequiredException"}, + {"shape":"InvalidRevisionException"} + ] + }, + "RegisterOnPremisesInstance":{ + "name":"RegisterOnPremisesInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterOnPremisesInstanceInput"}, + "errors":[ + {"shape":"InstanceNameAlreadyRegisteredException"}, + {"shape":"IamUserArnAlreadyRegisteredException"}, + {"shape":"InstanceNameRequiredException"}, + {"shape":"IamUserArnRequiredException"}, + {"shape":"InvalidInstanceNameException"}, + {"shape":"InvalidIamUserArnException"} + ] + }, + "RemoveTagsFromOnPremisesInstances":{ + "name":"RemoveTagsFromOnPremisesInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsFromOnPremisesInstancesInput"}, + "errors":[ + {"shape":"InstanceNameRequiredException"}, + {"shape":"TagRequiredException"}, + {"shape":"InvalidTagException"}, + {"shape":"TagLimitExceededException"}, + {"shape":"InstanceLimitExceededException"}, + {"shape":"InstanceNotRegisteredException"} + ] + }, + "StopDeployment":{ + "name":"StopDeployment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopDeploymentInput"}, + "output":{"shape":"StopDeploymentOutput"}, + "errors":[ + {"shape":"DeploymentIdRequiredException"}, + {"shape":"DeploymentDoesNotExistException"}, + {"shape":"DeploymentAlreadyCompletedException"}, + {"shape":"InvalidDeploymentIdException"} + ] + }, + "UpdateApplication":{ + "name":"UpdateApplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateApplicationInput"}, + "errors":[ + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"ApplicationAlreadyExistsException"}, + {"shape":"ApplicationDoesNotExistException"} + ] + }, + "UpdateDeploymentGroup":{ + "name":"UpdateDeploymentGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDeploymentGroupInput"}, + "output":{"shape":"UpdateDeploymentGroupOutput"}, + "errors":[ + {"shape":"ApplicationNameRequiredException"}, + {"shape":"InvalidApplicationNameException"}, + {"shape":"ApplicationDoesNotExistException"}, + {"shape":"InvalidDeploymentGroupNameException"}, + {"shape":"DeploymentGroupAlreadyExistsException"}, + {"shape":"DeploymentGroupNameRequiredException"}, + {"shape":"DeploymentGroupDoesNotExistException"}, + {"shape":"InvalidEC2TagException"}, + {"shape":"InvalidTagException"}, + {"shape":"InvalidAutoScalingGroupException"}, + {"shape":"InvalidDeploymentConfigNameException"}, + {"shape":"DeploymentConfigDoesNotExistException"}, + {"shape":"InvalidRoleException"} + ] + } + }, + "shapes":{ + "AddTagsToOnPremisesInstancesInput":{ + "type":"structure", + "required":[ + "tags", + "instanceNames" + ], + "members":{ + "tags":{"shape":"TagList"}, + "instanceNames":{"shape":"InstanceNameList"} + } + }, + "ApplicationAlreadyExistsException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ApplicationDoesNotExistException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ApplicationId":{"type":"string"}, + "ApplicationInfo":{ + "type":"structure", + "members":{ + "applicationId":{"shape":"ApplicationId"}, + "applicationName":{"shape":"ApplicationName"}, + "createTime":{"shape":"Timestamp"}, + "linkedToGitHub":{"shape":"Boolean"} + } + }, + "ApplicationLimitExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ApplicationName":{ + "type":"string", + "max":100, + "min":1 + }, + "ApplicationNameRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ApplicationRevisionSortBy":{ + "type":"string", + "enum":[ + "registerTime", + "firstUsedTime", + "lastUsedTime" + ] + }, + "ApplicationsInfoList":{ + "type":"list", + "member":{"shape":"ApplicationInfo"} + }, + "ApplicationsList":{ + "type":"list", + "member":{"shape":"ApplicationName"} + }, + "AutoScalingGroup":{ + "type":"structure", + "members":{ + "name":{"shape":"AutoScalingGroupName"}, + "hook":{"shape":"AutoScalingGroupHook"} + } + }, + "AutoScalingGroupHook":{"type":"string"}, + "AutoScalingGroupList":{ + "type":"list", + "member":{"shape":"AutoScalingGroup"} + }, + "AutoScalingGroupName":{"type":"string"}, + "AutoScalingGroupNameList":{ + "type":"list", + "member":{"shape":"AutoScalingGroupName"} + }, + "BatchGetApplicationsInput":{ + "type":"structure", + "members":{ + "applicationNames":{"shape":"ApplicationsList"} + } + }, + "BatchGetApplicationsOutput":{ + "type":"structure", + "members":{ + "applicationsInfo":{"shape":"ApplicationsInfoList"} + } + }, + "BatchGetDeploymentsInput":{ + "type":"structure", + "members":{ + "deploymentIds":{"shape":"DeploymentsList"} + } + }, + "BatchGetDeploymentsOutput":{ + "type":"structure", + "members":{ + "deploymentsInfo":{"shape":"DeploymentsInfoList"} + } + }, + "BatchGetOnPremisesInstancesInput":{ + "type":"structure", + "members":{ + "instanceNames":{"shape":"InstanceNameList"} + } + }, + "BatchGetOnPremisesInstancesOutput":{ + "type":"structure", + "members":{ + "instanceInfos":{"shape":"InstanceInfoList"} + } + }, + "Boolean":{"type":"boolean"}, + "BucketNameFilterRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "BundleType":{ + "type":"string", + "enum":[ + "tar", + "tgz", + "zip" + ] + }, + "CommitId":{"type":"string"}, + "CreateApplicationInput":{ + "type":"structure", + "required":["applicationName"], + "members":{ + "applicationName":{"shape":"ApplicationName"} + } + }, + "CreateApplicationOutput":{ + "type":"structure", + "members":{ + "applicationId":{"shape":"ApplicationId"} + } + }, + "CreateDeploymentConfigInput":{ + "type":"structure", + "required":["deploymentConfigName"], + "members":{ + "deploymentConfigName":{"shape":"DeploymentConfigName"}, + "minimumHealthyHosts":{"shape":"MinimumHealthyHosts"} + } + }, + "CreateDeploymentConfigOutput":{ + "type":"structure", + "members":{ + "deploymentConfigId":{"shape":"DeploymentConfigId"} + } + }, + "CreateDeploymentGroupInput":{ + "type":"structure", + "required":[ + "applicationName", + "deploymentGroupName", + "serviceRoleArn" + ], + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "deploymentGroupName":{"shape":"DeploymentGroupName"}, + "deploymentConfigName":{"shape":"DeploymentConfigName"}, + "ec2TagFilters":{"shape":"EC2TagFilterList"}, + "onPremisesInstanceTagFilters":{"shape":"TagFilterList"}, + "autoScalingGroups":{"shape":"AutoScalingGroupNameList"}, + "serviceRoleArn":{"shape":"Role"} + } + }, + "CreateDeploymentGroupOutput":{ + "type":"structure", + "members":{ + "deploymentGroupId":{"shape":"DeploymentGroupId"} + } + }, + "CreateDeploymentInput":{ + "type":"structure", + "required":["applicationName"], + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "deploymentGroupName":{"shape":"DeploymentGroupName"}, + "revision":{"shape":"RevisionLocation"}, + "deploymentConfigName":{"shape":"DeploymentConfigName"}, + "description":{"shape":"Description"}, + "ignoreApplicationStopFailures":{"shape":"Boolean"} + } + }, + "CreateDeploymentOutput":{ + "type":"structure", + "members":{ + "deploymentId":{"shape":"DeploymentId"} + } + }, + "DeleteApplicationInput":{ + "type":"structure", + "required":["applicationName"], + "members":{ + "applicationName":{"shape":"ApplicationName"} + } + }, + "DeleteDeploymentConfigInput":{ + "type":"structure", + "required":["deploymentConfigName"], + "members":{ + "deploymentConfigName":{"shape":"DeploymentConfigName"} + } + }, + "DeleteDeploymentGroupInput":{ + "type":"structure", + "required":[ + "applicationName", + "deploymentGroupName" + ], + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "deploymentGroupName":{"shape":"DeploymentGroupName"} + } + }, + "DeleteDeploymentGroupOutput":{ + "type":"structure", + "members":{ + "hooksNotCleanedUp":{"shape":"AutoScalingGroupList"} + } + }, + "DeploymentAlreadyCompletedException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeploymentConfigAlreadyExistsException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeploymentConfigDoesNotExistException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeploymentConfigId":{"type":"string"}, + "DeploymentConfigInUseException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeploymentConfigInfo":{ + "type":"structure", + "members":{ + "deploymentConfigId":{"shape":"DeploymentConfigId"}, + "deploymentConfigName":{"shape":"DeploymentConfigName"}, + "minimumHealthyHosts":{"shape":"MinimumHealthyHosts"}, + "createTime":{"shape":"Timestamp"} + } + }, + "DeploymentConfigLimitExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeploymentConfigName":{ + "type":"string", + "max":100, + "min":1 + }, + "DeploymentConfigNameRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeploymentConfigsList":{ + "type":"list", + "member":{"shape":"DeploymentConfigName"} + }, + "DeploymentCreator":{ + "type":"string", + "enum":[ + "user", + "autoscaling" + ] + }, + "DeploymentDoesNotExistException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeploymentGroupAlreadyExistsException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeploymentGroupDoesNotExistException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeploymentGroupId":{"type":"string"}, + "DeploymentGroupInfo":{ + "type":"structure", + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "deploymentGroupId":{"shape":"DeploymentGroupId"}, + "deploymentGroupName":{"shape":"DeploymentGroupName"}, + "deploymentConfigName":{"shape":"DeploymentConfigName"}, + "ec2TagFilters":{"shape":"EC2TagFilterList"}, + "onPremisesInstanceTagFilters":{"shape":"TagFilterList"}, + "autoScalingGroups":{"shape":"AutoScalingGroupList"}, + "serviceRoleArn":{"shape":"Role"}, + "targetRevision":{"shape":"RevisionLocation"} + } + }, + "DeploymentGroupLimitExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeploymentGroupName":{ + "type":"string", + "max":100, + "min":1 + }, + "DeploymentGroupNameRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeploymentGroupsList":{ + "type":"list", + "member":{"shape":"DeploymentGroupName"} + }, + "DeploymentId":{"type":"string"}, + "DeploymentIdRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeploymentInfo":{ + "type":"structure", + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "deploymentGroupName":{"shape":"DeploymentGroupName"}, + "deploymentConfigName":{"shape":"DeploymentConfigName"}, + "deploymentId":{"shape":"DeploymentId"}, + "revision":{"shape":"RevisionLocation"}, + "status":{"shape":"DeploymentStatus"}, + "errorInformation":{"shape":"ErrorInformation"}, + "createTime":{"shape":"Timestamp"}, + "startTime":{"shape":"Timestamp"}, + "completeTime":{"shape":"Timestamp"}, + "deploymentOverview":{"shape":"DeploymentOverview"}, + "description":{"shape":"Description"}, + "creator":{"shape":"DeploymentCreator"}, + "ignoreApplicationStopFailures":{"shape":"Boolean"} + } + }, + "DeploymentLimitExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeploymentNotStartedException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeploymentOverview":{ + "type":"structure", + "members":{ + "Pending":{"shape":"InstanceCount"}, + "InProgress":{"shape":"InstanceCount"}, + "Succeeded":{"shape":"InstanceCount"}, + "Failed":{"shape":"InstanceCount"}, + "Skipped":{"shape":"InstanceCount"} + } + }, + "DeploymentStatus":{ + "type":"string", + "enum":[ + "Created", + "Queued", + "InProgress", + "Succeeded", + "Failed", + "Stopped" + ] + }, + "DeploymentStatusList":{ + "type":"list", + "member":{"shape":"DeploymentStatus"} + }, + "DeploymentsInfoList":{ + "type":"list", + "member":{"shape":"DeploymentInfo"} + }, + "DeploymentsList":{ + "type":"list", + "member":{"shape":"DeploymentId"} + }, + "DeregisterOnPremisesInstanceInput":{ + "type":"structure", + "required":["instanceName"], + "members":{ + "instanceName":{"shape":"InstanceName"} + } + }, + "Description":{"type":"string"}, + "DescriptionTooLongException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Diagnostics":{ + "type":"structure", + "members":{ + "errorCode":{"shape":"LifecycleErrorCode"}, + "scriptName":{"shape":"ScriptName"}, + "message":{"shape":"LifecycleMessage"}, + "logTail":{"shape":"LogTail"} + } + }, + "EC2TagFilter":{ + "type":"structure", + "members":{ + "Key":{"shape":"Key"}, + "Value":{"shape":"Value"}, + "Type":{"shape":"EC2TagFilterType"} + } + }, + "EC2TagFilterList":{ + "type":"list", + "member":{"shape":"EC2TagFilter"} + }, + "EC2TagFilterType":{ + "type":"string", + "enum":[ + "KEY_ONLY", + "VALUE_ONLY", + "KEY_AND_VALUE" + ] + }, + "ETag":{"type":"string"}, + "ErrorCode":{ + "type":"string", + "enum":[ + "DEPLOYMENT_GROUP_MISSING", + "APPLICATION_MISSING", + "REVISION_MISSING", + "IAM_ROLE_MISSING", + "IAM_ROLE_PERMISSIONS", + "NO_EC2_SUBSCRIPTION", + "OVER_MAX_INSTANCES", + "NO_INSTANCES", + "TIMEOUT", + "HEALTH_CONSTRAINTS_INVALID", + "HEALTH_CONSTRAINTS", + "INTERNAL_ERROR", + "THROTTLED" + ] + }, + "ErrorInformation":{ + "type":"structure", + "members":{ + "code":{"shape":"ErrorCode"}, + "message":{"shape":"ErrorMessage"} + } + }, + "ErrorMessage":{"type":"string"}, + "GenericRevisionInfo":{ + "type":"structure", + "members":{ + "description":{"shape":"Description"}, + "deploymentGroups":{"shape":"DeploymentGroupsList"}, + "firstUsedTime":{"shape":"Timestamp"}, + "lastUsedTime":{"shape":"Timestamp"}, + "registerTime":{"shape":"Timestamp"} + } + }, + "GetApplicationInput":{ + "type":"structure", + "required":["applicationName"], + "members":{ + "applicationName":{"shape":"ApplicationName"} + } + }, + "GetApplicationOutput":{ + "type":"structure", + "members":{ + "application":{"shape":"ApplicationInfo"} + } + }, + "GetApplicationRevisionInput":{ + "type":"structure", + "required":[ + "applicationName", + "revision" + ], + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "revision":{"shape":"RevisionLocation"} + } + }, + "GetApplicationRevisionOutput":{ + "type":"structure", + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "revision":{"shape":"RevisionLocation"}, + "revisionInfo":{"shape":"GenericRevisionInfo"} + } + }, + "GetDeploymentConfigInput":{ + "type":"structure", + "required":["deploymentConfigName"], + "members":{ + "deploymentConfigName":{"shape":"DeploymentConfigName"} + } + }, + "GetDeploymentConfigOutput":{ + "type":"structure", + "members":{ + "deploymentConfigInfo":{"shape":"DeploymentConfigInfo"} + } + }, + "GetDeploymentGroupInput":{ + "type":"structure", + "required":[ + "applicationName", + "deploymentGroupName" + ], + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "deploymentGroupName":{"shape":"DeploymentGroupName"} + } + }, + "GetDeploymentGroupOutput":{ + "type":"structure", + "members":{ + "deploymentGroupInfo":{"shape":"DeploymentGroupInfo"} + } + }, + "GetDeploymentInput":{ + "type":"structure", + "required":["deploymentId"], + "members":{ + "deploymentId":{"shape":"DeploymentId"} + } + }, + "GetDeploymentInstanceInput":{ + "type":"structure", + "required":[ + "deploymentId", + "instanceId" + ], + "members":{ + "deploymentId":{"shape":"DeploymentId"}, + "instanceId":{"shape":"InstanceId"} + } + }, + "GetDeploymentInstanceOutput":{ + "type":"structure", + "members":{ + "instanceSummary":{"shape":"InstanceSummary"} + } + }, + "GetDeploymentOutput":{ + "type":"structure", + "members":{ + "deploymentInfo":{"shape":"DeploymentInfo"} + } + }, + "GetOnPremisesInstanceInput":{ + "type":"structure", + "required":["instanceName"], + "members":{ + "instanceName":{"shape":"InstanceName"} + } + }, + "GetOnPremisesInstanceOutput":{ + "type":"structure", + "members":{ + "instanceInfo":{"shape":"InstanceInfo"} + } + }, + "GitHubLocation":{ + "type":"structure", + "members":{ + "repository":{"shape":"Repository"}, + "commitId":{"shape":"CommitId"} + } + }, + "IamUserArn":{"type":"string"}, + "IamUserArnAlreadyRegisteredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "IamUserArnRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InstanceArn":{"type":"string"}, + "InstanceCount":{"type":"long"}, + "InstanceDoesNotExistException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InstanceId":{"type":"string"}, + "InstanceIdRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InstanceInfo":{ + "type":"structure", + "members":{ + "instanceName":{"shape":"InstanceName"}, + "iamUserArn":{"shape":"IamUserArn"}, + "instanceArn":{"shape":"InstanceArn"}, + "registerTime":{"shape":"Timestamp"}, + "deregisterTime":{"shape":"Timestamp"}, + "tags":{"shape":"TagList"} + } + }, + "InstanceInfoList":{ + "type":"list", + "member":{"shape":"InstanceInfo"} + }, + "InstanceLimitExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InstanceName":{"type":"string"}, + "InstanceNameAlreadyRegisteredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InstanceNameList":{ + "type":"list", + "member":{"shape":"InstanceName"} + }, + "InstanceNameRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InstanceNotRegisteredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InstanceStatus":{ + "type":"string", + "enum":[ + "Pending", + "InProgress", + "Succeeded", + "Failed", + "Skipped", + "Unknown" + ] + }, + "InstanceStatusList":{ + "type":"list", + "member":{"shape":"InstanceStatus"} + }, + "InstanceSummary":{ + "type":"structure", + "members":{ + "deploymentId":{"shape":"DeploymentId"}, + "instanceId":{"shape":"InstanceId"}, + "status":{"shape":"InstanceStatus"}, + "lastUpdatedAt":{"shape":"Timestamp"}, + "lifecycleEvents":{"shape":"LifecycleEventList"} + } + }, + "InstancesList":{ + "type":"list", + "member":{"shape":"InstanceId"} + }, + "InvalidApplicationNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidAutoScalingGroupException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidBucketNameFilterException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidDeployedStateFilterException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidDeploymentConfigNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidDeploymentGroupNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidDeploymentIdException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidDeploymentStatusException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidEC2TagException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidIamUserArnException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidInstanceNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidInstanceStatusException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidKeyPrefixFilterException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidMinimumHealthyHostValueException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidNextTokenException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidOperationException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidRegistrationStatusException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidRevisionException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidRoleException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidSortByException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidSortOrderException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidTagException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidTagFilterException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidTimeRangeException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Key":{"type":"string"}, + "LifecycleErrorCode":{ + "type":"string", + "enum":[ + "Success", + "ScriptMissing", + "ScriptNotExecutable", + "ScriptTimedOut", + "ScriptFailed", + "UnknownError" + ] + }, + "LifecycleEvent":{ + "type":"structure", + "members":{ + "lifecycleEventName":{"shape":"LifecycleEventName"}, + "diagnostics":{"shape":"Diagnostics"}, + "startTime":{"shape":"Timestamp"}, + "endTime":{"shape":"Timestamp"}, + "status":{"shape":"LifecycleEventStatus"} + } + }, + "LifecycleEventList":{ + "type":"list", + "member":{"shape":"LifecycleEvent"} + }, + "LifecycleEventName":{"type":"string"}, + "LifecycleEventStatus":{ + "type":"string", + "enum":[ + "Pending", + "InProgress", + "Succeeded", + "Failed", + "Skipped", + "Unknown" + ] + }, + "LifecycleMessage":{"type":"string"}, + "ListApplicationRevisionsInput":{ + "type":"structure", + "required":["applicationName"], + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "sortBy":{"shape":"ApplicationRevisionSortBy"}, + "sortOrder":{"shape":"SortOrder"}, + "s3Bucket":{"shape":"S3Bucket"}, + "s3KeyPrefix":{"shape":"S3Key"}, + "deployed":{"shape":"ListStateFilterAction"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListApplicationRevisionsOutput":{ + "type":"structure", + "members":{ + "revisions":{"shape":"RevisionLocationList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListApplicationsInput":{ + "type":"structure", + "members":{ + "nextToken":{"shape":"NextToken"} + } + }, + "ListApplicationsOutput":{ + "type":"structure", + "members":{ + "applications":{"shape":"ApplicationsList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListDeploymentConfigsInput":{ + "type":"structure", + "members":{ + "nextToken":{"shape":"NextToken"} + } + }, + "ListDeploymentConfigsOutput":{ + "type":"structure", + "members":{ + "deploymentConfigsList":{"shape":"DeploymentConfigsList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListDeploymentGroupsInput":{ + "type":"structure", + "required":["applicationName"], + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListDeploymentGroupsOutput":{ + "type":"structure", + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "deploymentGroups":{"shape":"DeploymentGroupsList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListDeploymentInstancesInput":{ + "type":"structure", + "required":["deploymentId"], + "members":{ + "deploymentId":{"shape":"DeploymentId"}, + "nextToken":{"shape":"NextToken"}, + "instanceStatusFilter":{"shape":"InstanceStatusList"} + } + }, + "ListDeploymentInstancesOutput":{ + "type":"structure", + "members":{ + "instancesList":{"shape":"InstancesList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListDeploymentsInput":{ + "type":"structure", + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "deploymentGroupName":{"shape":"DeploymentGroupName"}, + "includeOnlyStatuses":{"shape":"DeploymentStatusList"}, + "createTimeRange":{"shape":"TimeRange"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListDeploymentsOutput":{ + "type":"structure", + "members":{ + "deployments":{"shape":"DeploymentsList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListOnPremisesInstancesInput":{ + "type":"structure", + "members":{ + "registrationStatus":{"shape":"RegistrationStatus"}, + "tagFilters":{"shape":"TagFilterList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListOnPremisesInstancesOutput":{ + "type":"structure", + "members":{ + "instanceNames":{"shape":"InstanceNameList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListStateFilterAction":{ + "type":"string", + "enum":[ + "include", + "exclude", + "ignore" + ] + }, + "LogTail":{"type":"string"}, + "Message":{"type":"string"}, + "MinimumHealthyHosts":{ + "type":"structure", + "members":{ + "value":{"shape":"MinimumHealthyHostsValue"}, + "type":{"shape":"MinimumHealthyHostsType"} + } + }, + "MinimumHealthyHostsType":{ + "type":"string", + "enum":[ + "HOST_COUNT", + "FLEET_PERCENT" + ] + }, + "MinimumHealthyHostsValue":{"type":"integer"}, + "NextToken":{"type":"string"}, + "RegisterApplicationRevisionInput":{ + "type":"structure", + "required":[ + "applicationName", + "revision" + ], + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "description":{"shape":"Description"}, + "revision":{"shape":"RevisionLocation"} + } + }, + "RegisterOnPremisesInstanceInput":{ + "type":"structure", + "required":[ + "instanceName", + "iamUserArn" + ], + "members":{ + "instanceName":{"shape":"InstanceName"}, + "iamUserArn":{"shape":"IamUserArn"} + } + }, + "RegistrationStatus":{ + "type":"string", + "enum":[ + "Registered", + "Deregistered" + ] + }, + "RemoveTagsFromOnPremisesInstancesInput":{ + "type":"structure", + "required":[ + "tags", + "instanceNames" + ], + "members":{ + "tags":{"shape":"TagList"}, + "instanceNames":{"shape":"InstanceNameList"} + } + }, + "Repository":{"type":"string"}, + "RevisionDoesNotExistException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "RevisionLocation":{ + "type":"structure", + "members":{ + "revisionType":{"shape":"RevisionLocationType"}, + "s3Location":{"shape":"S3Location"}, + "gitHubLocation":{"shape":"GitHubLocation"} + } + }, + "RevisionLocationList":{ + "type":"list", + "member":{"shape":"RevisionLocation"} + }, + "RevisionLocationType":{ + "type":"string", + "enum":[ + "S3", + "GitHub" + ] + }, + "RevisionRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Role":{"type":"string"}, + "RoleRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "S3Bucket":{"type":"string"}, + "S3Key":{"type":"string"}, + "S3Location":{ + "type":"structure", + "members":{ + "bucket":{"shape":"S3Bucket"}, + "key":{"shape":"S3Key"}, + "bundleType":{"shape":"BundleType"}, + "version":{"shape":"VersionId"}, + "eTag":{"shape":"ETag"} + } + }, + "ScriptName":{"type":"string"}, + "SortOrder":{ + "type":"string", + "enum":[ + "ascending", + "descending" + ] + }, + "StopDeploymentInput":{ + "type":"structure", + "required":["deploymentId"], + "members":{ + "deploymentId":{"shape":"DeploymentId"} + } + }, + "StopDeploymentOutput":{ + "type":"structure", + "members":{ + "status":{"shape":"StopStatus"}, + "statusMessage":{"shape":"Message"} + } + }, + "StopStatus":{ + "type":"string", + "enum":[ + "Pending", + "Succeeded" + ] + }, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"Key"}, + "Value":{"shape":"Value"} + } + }, + "TagFilter":{ + "type":"structure", + "members":{ + "Key":{"shape":"Key"}, + "Value":{"shape":"Value"}, + "Type":{"shape":"TagFilterType"} + } + }, + "TagFilterList":{ + "type":"list", + "member":{"shape":"TagFilter"} + }, + "TagFilterType":{ + "type":"string", + "enum":[ + "KEY_ONLY", + "VALUE_ONLY", + "KEY_AND_VALUE" + ] + }, + "TagLimitExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "TagRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "TimeRange":{ + "type":"structure", + "members":{ + "start":{"shape":"Timestamp"}, + "end":{"shape":"Timestamp"} + } + }, + "Timestamp":{"type":"timestamp"}, + "UpdateApplicationInput":{ + "type":"structure", + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "newApplicationName":{"shape":"ApplicationName"} + } + }, + "UpdateDeploymentGroupInput":{ + "type":"structure", + "required":[ + "applicationName", + "currentDeploymentGroupName" + ], + "members":{ + "applicationName":{"shape":"ApplicationName"}, + "currentDeploymentGroupName":{"shape":"DeploymentGroupName"}, + "newDeploymentGroupName":{"shape":"DeploymentGroupName"}, + "deploymentConfigName":{"shape":"DeploymentConfigName"}, + "ec2TagFilters":{"shape":"EC2TagFilterList"}, + "onPremisesInstanceTagFilters":{"shape":"TagFilterList"}, + "autoScalingGroups":{"shape":"AutoScalingGroupNameList"}, + "serviceRoleArn":{"shape":"Role"} + } + }, + "UpdateDeploymentGroupOutput":{ + "type":"structure", + "members":{ + "hooksNotCleanedUp":{"shape":"AutoScalingGroupList"} + } + }, + "Value":{"type":"string"}, + "VersionId":{"type":"string"} + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1261 @@ +{ + "version": "2.0", + "service": "AWS CodeDeploy Overview

    This is the AWS CodeDeploy API Reference. This guide provides descriptions of the AWS CodeDeploy APIs. For additional information, see the AWS CodeDeploy User Guide.

    Using the APIs

    You can use the AWS CodeDeploy APIs to work with the following items:

    • Applications are unique identifiers that AWS CodeDeploy uses to ensure that the correct combinations of revisions, deployment configurations, and deployment groups are being referenced during deployments.

      You can use the AWS CodeDeploy APIs to create, delete, get, list, and update applications.

    • Deployment configurations are sets of deployment rules and deployment success and failure conditions that AWS CodeDeploy uses during deployments.

      You can use the AWS CodeDeploy APIs to create, delete, get, and list deployment configurations.

    • Deployment groups are groups of instances to which application revisions can be deployed.

      You can use the AWS CodeDeploy APIs to create, delete, get, list, and update deployment groups.

    • Instances represent Amazon EC2 instances to which application revisions are deployed. Instances are identified by their Amazon EC2 tags or Auto Scaling group names. Instances belong to deployment groups.

      You can use the AWS CodeDeploy APIs to get and list instances.

    • Deployments represent the process of deploying revisions to instances.

      You can use the AWS CodeDeploy APIs to create, get, list, and stop deployments.

    • Application revisions are archive files that are stored in Amazon S3 buckets or GitHub repositories. These revisions contain source content (such as source code, web pages, executable files, any deployment scripts, and similar) along with an Application Specification file (AppSpec file). (The AppSpec file is unique to AWS CodeDeploy; it defines a series of deployment actions that you want AWS CodeDeploy to execute.) An application revision is uniquely identified by its Amazon S3 object key and its ETag, version, or both (for application revisions that are stored in Amazon S3 buckets) or by its repository name and commit ID (for applications revisions that are stored in GitHub repositories). Application revisions are deployed through deployment groups.

      You can use the AWS CodeDeploy APIs to get, list, and register application revisions.

    ", + "operations": { + "AddTagsToOnPremisesInstances": "

    Adds tags to on-premises instances.

    ", + "BatchGetApplications": "

    Gets information about one or more applications.

    ", + "BatchGetDeployments": "

    Gets information about one or more deployments.

    ", + "BatchGetOnPremisesInstances": "

    Gets information about one or more on-premises instances.

    ", + "CreateApplication": "

    Creates a new application.

    ", + "CreateDeployment": "

    Deploys an application revision through the specified deployment group.

    ", + "CreateDeploymentConfig": "

    Creates a new deployment configuration.

    ", + "CreateDeploymentGroup": "

    Creates a new deployment group for application revisions to be deployed to.

    ", + "DeleteApplication": "

    Deletes an application.

    ", + "DeleteDeploymentConfig": "

    Deletes a deployment configuration.

    A deployment configuration cannot be deleted if it is currently in use. Also, predefined configurations cannot be deleted.", + "DeleteDeploymentGroup": "

    Deletes a deployment group.

    ", + "DeregisterOnPremisesInstance": "

    Deregisters an on-premises instance.

    ", + "GetApplication": "

    Gets information about an application.

    ", + "GetApplicationRevision": "

    Gets information about an application revision.

    ", + "GetDeployment": "

    Gets information about a deployment.

    ", + "GetDeploymentConfig": "

    Gets information about a deployment configuration.

    ", + "GetDeploymentGroup": "

    Gets information about a deployment group.

    ", + "GetDeploymentInstance": "

    Gets information about an instance as part of a deployment.

    ", + "GetOnPremisesInstance": "

    Gets information about an on-premises instance.

    ", + "ListApplicationRevisions": "

    Lists information about revisions for an application.

    ", + "ListApplications": "

    Lists the applications registered with the applicable IAM user or AWS account.

    ", + "ListDeploymentConfigs": "

    Lists the deployment configurations with the applicable IAM user or AWS account.

    ", + "ListDeploymentGroups": "

    Lists the deployment groups for an application registered with the applicable IAM user or AWS account.

    ", + "ListDeploymentInstances": "

    Lists the instances for a deployment associated with the applicable IAM user or AWS account.

    ", + "ListDeployments": "

    Lists the deployments within a deployment group for an application registered with the applicable IAM user or AWS account.

    ", + "ListOnPremisesInstances": "

    Gets a list of one or more on-premises instance names.

    Unless otherwise specified, both registered and deregistered on-premises instance names will be listed. To list only registered or deregistered on-premises instance names, use the registration status parameter.

    ", + "RegisterApplicationRevision": "

    Registers with AWS CodeDeploy a revision for the specified application.

    ", + "RegisterOnPremisesInstance": "

    Registers an on-premises instance.

    ", + "RemoveTagsFromOnPremisesInstances": "

    Removes one or more tags from one or more on-premises instances.

    ", + "StopDeployment": "

    Attempts to stop an ongoing deployment.

    ", + "UpdateApplication": "

    Changes an existing application's name.

    ", + "UpdateDeploymentGroup": "

    Changes information about an existing deployment group.

    " + }, + "shapes": { + "AddTagsToOnPremisesInstancesInput": { + "base": "

    Represents the input of an adds tags to on-premises instance operation.

    ", + "refs": { + } + }, + "ApplicationAlreadyExistsException": { + "base": "

    An application with the specified name already exists with the applicable IAM user or AWS account.

    ", + "refs": { + } + }, + "ApplicationDoesNotExistException": { + "base": "

    The application does not exist with the applicable IAM user or AWS account.

    ", + "refs": { + } + }, + "ApplicationId": { + "base": null, + "refs": { + "ApplicationInfo$applicationId": "

    The application ID.

    ", + "CreateApplicationOutput$applicationId": "

    A unique application ID.

    " + } + }, + "ApplicationInfo": { + "base": "

    Information about an application.

    ", + "refs": { + "ApplicationsInfoList$member": null, + "GetApplicationOutput$application": "

    Information about the application.

    " + } + }, + "ApplicationLimitExceededException": { + "base": "

    More applications were attempted to be created than were allowed.

    ", + "refs": { + } + }, + "ApplicationName": { + "base": null, + "refs": { + "ApplicationInfo$applicationName": "

    The application name.

    ", + "ApplicationsList$member": null, + "CreateApplicationInput$applicationName": "

    The name of the application. This name must be unique with the applicable IAM user or AWS account.

    ", + "CreateDeploymentGroupInput$applicationName": "

    The name of an existing AWS CodeDeploy application associated with the applicable IAM user or AWS account.

    ", + "CreateDeploymentInput$applicationName": "

    The name of an existing AWS CodeDeploy application associated with the applicable IAM user or AWS account.

    ", + "DeleteApplicationInput$applicationName": "

    The name of an existing AWS CodeDeploy application associated with the applicable IAM user or AWS account.

    ", + "DeleteDeploymentGroupInput$applicationName": "

    The name of an existing AWS CodeDeploy application associated with the applicable IAM user or AWS account.

    ", + "DeploymentGroupInfo$applicationName": "

    The application name.

    ", + "DeploymentInfo$applicationName": "

    The application name.

    ", + "GetApplicationInput$applicationName": "

    The name of an existing AWS CodeDeploy application associated with the applicable IAM user or AWS account.

    ", + "GetApplicationRevisionInput$applicationName": "

    The name of the application that corresponds to the revision.

    ", + "GetApplicationRevisionOutput$applicationName": "

    The name of the application that corresponds to the revision.

    ", + "GetDeploymentGroupInput$applicationName": "

    The name of an existing AWS CodeDeploy application associated with the applicable IAM user or AWS account.

    ", + "ListApplicationRevisionsInput$applicationName": "

    The name of an existing AWS CodeDeploy application associated with the applicable IAM user or AWS account.

    ", + "ListDeploymentGroupsInput$applicationName": "

    The name of an existing AWS CodeDeploy application associated with the applicable IAM user or AWS account.

    ", + "ListDeploymentGroupsOutput$applicationName": "

    The application name.

    ", + "ListDeploymentsInput$applicationName": "

    The name of an existing AWS CodeDeploy application associated with the applicable IAM user or AWS account.

    ", + "RegisterApplicationRevisionInput$applicationName": "

    The name of an existing AWS CodeDeploy application associated with the applicable IAM user or AWS account.

    ", + "UpdateApplicationInput$applicationName": "

    The current name of the application that you want to change.

    ", + "UpdateApplicationInput$newApplicationName": "

    The new name that you want to change the application to.

    ", + "UpdateDeploymentGroupInput$applicationName": "

    The application name corresponding to the deployment group to update.

    " + } + }, + "ApplicationNameRequiredException": { + "base": "

    The minimum number of required application names was not specified.

    ", + "refs": { + } + }, + "ApplicationRevisionSortBy": { + "base": null, + "refs": { + "ListApplicationRevisionsInput$sortBy": "

    The column name to sort the list results by:

    • registerTime: Sort the list results by when the revisions were registered with AWS CodeDeploy.
    • firstUsedTime: Sort the list results by when the revisions were first used by in a deployment.
    • lastUsedTime: Sort the list results by when the revisions were last used in a deployment.

    If not specified or set to null, the results will be returned in an arbitrary order.

    " + } + }, + "ApplicationsInfoList": { + "base": null, + "refs": { + "BatchGetApplicationsOutput$applicationsInfo": "

    Information about the applications.

    " + } + }, + "ApplicationsList": { + "base": null, + "refs": { + "BatchGetApplicationsInput$applicationNames": "

    A list of application names, with multiple application names separated by spaces.

    ", + "ListApplicationsOutput$applications": "

    A list of application names.

    " + } + }, + "AutoScalingGroup": { + "base": "

    Information about an Auto Scaling group.

    ", + "refs": { + "AutoScalingGroupList$member": null + } + }, + "AutoScalingGroupHook": { + "base": null, + "refs": { + "AutoScalingGroup$hook": "

    An Auto Scaling lifecycle event hook name.

    " + } + }, + "AutoScalingGroupList": { + "base": null, + "refs": { + "DeleteDeploymentGroupOutput$hooksNotCleanedUp": "

    If the output contains no data, and the corresponding deployment group contained at least one Auto Scaling group, AWS CodeDeploy successfully removed all corresponding Auto Scaling lifecycle event hooks from the Amazon EC2 instances in the Auto Scaling. If the output does contain data, AWS CodeDeploy could not remove some Auto Scaling lifecycle event hooks from the Amazon EC2 instances in the Auto Scaling group.

    ", + "DeploymentGroupInfo$autoScalingGroups": "

    A list of associated Auto Scaling groups.

    ", + "UpdateDeploymentGroupOutput$hooksNotCleanedUp": "

    If the output contains no data, and the corresponding deployment group contained at least one Auto Scaling group, AWS CodeDeploy successfully removed all corresponding Auto Scaling lifecycle event hooks from the AWS account. If the output does contain data, AWS CodeDeploy could not remove some Auto Scaling lifecycle event hooks from the AWS account.

    " + } + }, + "AutoScalingGroupName": { + "base": null, + "refs": { + "AutoScalingGroup$name": "

    The Auto Scaling group name.

    ", + "AutoScalingGroupNameList$member": null + } + }, + "AutoScalingGroupNameList": { + "base": null, + "refs": { + "CreateDeploymentGroupInput$autoScalingGroups": "

    A list of associated Auto Scaling groups.

    ", + "UpdateDeploymentGroupInput$autoScalingGroups": "

    The replacement list of Auto Scaling groups to be included in the deployment group, if you want to change them.

    " + } + }, + "BatchGetApplicationsInput": { + "base": "

    Represents the input of a batch get applications operation.

    ", + "refs": { + } + }, + "BatchGetApplicationsOutput": { + "base": "

    Represents the output of a batch get applications operation.

    ", + "refs": { + } + }, + "BatchGetDeploymentsInput": { + "base": "

    Represents the input of a batch get deployments operation.

    ", + "refs": { + } + }, + "BatchGetDeploymentsOutput": { + "base": "

    Represents the output of a batch get deployments operation.

    ", + "refs": { + } + }, + "BatchGetOnPremisesInstancesInput": { + "base": "

    Represents the input of a batch get on-premises instances operation.

    ", + "refs": { + } + }, + "BatchGetOnPremisesInstancesOutput": { + "base": "

    Represents the output of a batch get on-premises instances operation.

    ", + "refs": { + } + }, + "Boolean": { + "base": null, + "refs": { + "ApplicationInfo$linkedToGitHub": "

    True if the user has authenticated with GitHub for the specified application; otherwise, false.

    ", + "CreateDeploymentInput$ignoreApplicationStopFailures": "

    If set to true, then if the deployment causes the ApplicationStop deployment lifecycle event to fail to a specific instance, the deployment will not be considered to have failed to that instance at that point and will continue on to the BeforeInstall deployment lifecycle event.

    If set to false or not specified, then if the deployment causes the ApplicationStop deployment lifecycle event to fail to a specific instance, the deployment will stop to that instance, and the deployment to that instance will be considered to have failed.

    ", + "DeploymentInfo$ignoreApplicationStopFailures": "

    If true, then if the deployment causes the ApplicationStop deployment lifecycle event to fail to a specific instance, the deployment will not be considered to have failed to that instance at that point and will continue on to the BeforeInstall deployment lifecycle event.

    If false or not specified, then if the deployment causes the ApplicationStop deployment lifecycle event to fail to a specific instance, the deployment will stop to that instance, and the deployment to that instance will be considered to have failed.

    " + } + }, + "BucketNameFilterRequiredException": { + "base": "

    A bucket name is required but was not provided.

    ", + "refs": { + } + }, + "BundleType": { + "base": null, + "refs": { + "S3Location$bundleType": "

    The file type of the application revision. Must be one of the following:

    • tar: A tar archive file.
    • tgz: A compressed tar archive file.
    • zip: A zip archive file.
    " + } + }, + "CommitId": { + "base": null, + "refs": { + "GitHubLocation$commitId": "

    The SHA1 commit ID of the GitHub commit that references the that represents the bundled artifacts for the application revision.

    " + } + }, + "CreateApplicationInput": { + "base": "

    Represents the input of a create application operation.

    ", + "refs": { + } + }, + "CreateApplicationOutput": { + "base": "

    Represents the output of a create application operation.

    ", + "refs": { + } + }, + "CreateDeploymentConfigInput": { + "base": "

    Represents the input of a create deployment configuration operation.

    ", + "refs": { + } + }, + "CreateDeploymentConfigOutput": { + "base": "

    Represents the output of a create deployment configuration operation.

    ", + "refs": { + } + }, + "CreateDeploymentGroupInput": { + "base": "

    Represents the input of a create deployment group operation.

    ", + "refs": { + } + }, + "CreateDeploymentGroupOutput": { + "base": "

    Represents the output of a create deployment group operation.

    ", + "refs": { + } + }, + "CreateDeploymentInput": { + "base": "

    Represents the input of a create deployment operation.

    ", + "refs": { + } + }, + "CreateDeploymentOutput": { + "base": "

    Represents the output of a create deployment operation.

    ", + "refs": { + } + }, + "DeleteApplicationInput": { + "base": "

    Represents the input of a delete application operation.

    ", + "refs": { + } + }, + "DeleteDeploymentConfigInput": { + "base": "

    Represents the input of a delete deployment configuration operation.

    ", + "refs": { + } + }, + "DeleteDeploymentGroupInput": { + "base": "

    Represents the input of a delete deployment group operation.

    ", + "refs": { + } + }, + "DeleteDeploymentGroupOutput": { + "base": "

    Represents the output of a delete deployment group operation.

    ", + "refs": { + } + }, + "DeploymentAlreadyCompletedException": { + "base": "

    The deployment is already completed.

    ", + "refs": { + } + }, + "DeploymentConfigAlreadyExistsException": { + "base": "

    A deployment configuration with the specified name already exists with the applicable IAM user or AWS account.

    ", + "refs": { + } + }, + "DeploymentConfigDoesNotExistException": { + "base": "

    The deployment configuration does not exist with the applicable IAM user or AWS account.

    ", + "refs": { + } + }, + "DeploymentConfigId": { + "base": null, + "refs": { + "CreateDeploymentConfigOutput$deploymentConfigId": "

    A unique deployment configuration ID.

    ", + "DeploymentConfigInfo$deploymentConfigId": "

    The deployment configuration ID.

    " + } + }, + "DeploymentConfigInUseException": { + "base": "

    The deployment configuration is still in use.

    ", + "refs": { + } + }, + "DeploymentConfigInfo": { + "base": "

    Information about a deployment configuration.

    ", + "refs": { + "GetDeploymentConfigOutput$deploymentConfigInfo": "

    Information about the deployment configuration.

    " + } + }, + "DeploymentConfigLimitExceededException": { + "base": "

    The deployment configurations limit was exceeded.

    ", + "refs": { + } + }, + "DeploymentConfigName": { + "base": null, + "refs": { + "CreateDeploymentConfigInput$deploymentConfigName": "

    The name of the deployment configuration to create.

    ", + "CreateDeploymentGroupInput$deploymentConfigName": "

    If specified, the deployment configuration name must be one of the predefined values, or it can be a custom deployment configuration:

    • CodeDeployDefault.AllAtOnce deploys an application revision to up to all of the instances at once. The overall deployment succeeds if the application revision deploys to at least one of the instances. The overall deployment fails after the application revision fails to deploy to all of the instances. For example, for 9 instances, deploy to up to all 9 instances at once. The overall deployment succeeds if any of the 9 instances is successfully deployed to, and it fails if all 9 instances fail to be deployed to.
    • CodeDeployDefault.HalfAtATime deploys to up to half of the instances at a time (with fractions rounded down). The overall deployment succeeds if the application revision deploys to at least half of the instances (with fractions rounded up); otherwise, the deployment fails. For example, for 9 instances, deploy to up to 4 instances at a time. The overall deployment succeeds if 5 or more instances are successfully deployed to; otherwise, the deployment fails. Note that the deployment may successfully deploy to some instances, even if the overall deployment fails.
    • CodeDeployDefault.OneAtATime deploys the application revision to only one of the instances at a time. The overall deployment succeeds if the application revision deploys to all of the instances. The overall deployment fails after the application revision first fails to deploy to any one instances. For example, for 9 instances, deploy to one instance at a time. The overall deployment succeeds if all 9 instances are successfully deployed to, and it fails if any of one of the 9 instances fail to be deployed to. Note that the deployment may successfully deploy to some instances, even if the overall deployment fails. This is the default deployment configuration if a configuration isn't specified for either the deployment or the deployment group.

    To create a custom deployment configuration, call the create deployment configuration operation.

    ", + "CreateDeploymentInput$deploymentConfigName": "

    The name of an existing deployment configuration associated with the applicable IAM user or AWS account.

    If not specified, the value configured in the deployment group will be used as the default. If the deployment group does not have a deployment configuration associated with it, then CodeDeployDefault.OneAtATime will be used by default.

    ", + "DeleteDeploymentConfigInput$deploymentConfigName": "

    The name of an existing deployment configuration associated with the applicable IAM user or AWS account.

    ", + "DeploymentConfigInfo$deploymentConfigName": "

    The deployment configuration name.

    ", + "DeploymentConfigsList$member": null, + "DeploymentGroupInfo$deploymentConfigName": "

    The deployment configuration name.

    ", + "DeploymentInfo$deploymentConfigName": "

    The deployment configuration name.

    ", + "GetDeploymentConfigInput$deploymentConfigName": "

    The name of an existing deployment configuration associated with the applicable IAM user or AWS account.

    ", + "UpdateDeploymentGroupInput$deploymentConfigName": "

    The replacement deployment configuration name to use, if you want to change it.

    " + } + }, + "DeploymentConfigNameRequiredException": { + "base": "

    The deployment configuration name was not specified.

    ", + "refs": { + } + }, + "DeploymentConfigsList": { + "base": null, + "refs": { + "ListDeploymentConfigsOutput$deploymentConfigsList": "

    A list of deployment configurations, including the built-in configurations such as CodeDeployDefault.OneAtATime.

    " + } + }, + "DeploymentCreator": { + "base": null, + "refs": { + "DeploymentInfo$creator": "

    How the deployment was created:

    • user: A user created the deployment.
    • autoscaling: Auto Scaling created the deployment.
    " + } + }, + "DeploymentDoesNotExistException": { + "base": "

    The deployment does not exist with the applicable IAM user or AWS account.

    ", + "refs": { + } + }, + "DeploymentGroupAlreadyExistsException": { + "base": "

    A deployment group with the specified name already exists with the applicable IAM user or AWS account.

    ", + "refs": { + } + }, + "DeploymentGroupDoesNotExistException": { + "base": "

    The named deployment group does not exist with the applicable IAM user or AWS account.

    ", + "refs": { + } + }, + "DeploymentGroupId": { + "base": null, + "refs": { + "CreateDeploymentGroupOutput$deploymentGroupId": "

    A unique deployment group ID.

    ", + "DeploymentGroupInfo$deploymentGroupId": "

    The deployment group ID.

    " + } + }, + "DeploymentGroupInfo": { + "base": "

    Information about a deployment group.

    ", + "refs": { + "GetDeploymentGroupOutput$deploymentGroupInfo": "

    Information about the deployment group.

    " + } + }, + "DeploymentGroupLimitExceededException": { + "base": "

    The deployment groups limit was exceeded.

    ", + "refs": { + } + }, + "DeploymentGroupName": { + "base": null, + "refs": { + "CreateDeploymentGroupInput$deploymentGroupName": "

    The name of an existing deployment group for the specified application.

    ", + "CreateDeploymentInput$deploymentGroupName": "

    The deployment group's name.

    ", + "DeleteDeploymentGroupInput$deploymentGroupName": "

    The name of an existing deployment group for the specified application.

    ", + "DeploymentGroupInfo$deploymentGroupName": "

    The deployment group name.

    ", + "DeploymentGroupsList$member": null, + "DeploymentInfo$deploymentGroupName": "

    The deployment group name.

    ", + "GetDeploymentGroupInput$deploymentGroupName": "

    The name of an existing deployment group for the specified application.

    ", + "ListDeploymentsInput$deploymentGroupName": "

    The name of an existing deployment group for the specified application.

    ", + "UpdateDeploymentGroupInput$currentDeploymentGroupName": "

    The current name of the existing deployment group.

    ", + "UpdateDeploymentGroupInput$newDeploymentGroupName": "

    The new name of the deployment group, if you want to change it.

    " + } + }, + "DeploymentGroupNameRequiredException": { + "base": "

    The deployment group name was not specified.

    ", + "refs": { + } + }, + "DeploymentGroupsList": { + "base": null, + "refs": { + "GenericRevisionInfo$deploymentGroups": "

    A list of deployment groups that use this revision.

    ", + "ListDeploymentGroupsOutput$deploymentGroups": "

    A list of corresponding deployment group names.

    " + } + }, + "DeploymentId": { + "base": null, + "refs": { + "CreateDeploymentOutput$deploymentId": "

    A unique deployment ID.

    ", + "DeploymentInfo$deploymentId": "

    The deployment ID.

    ", + "DeploymentsList$member": null, + "GetDeploymentInput$deploymentId": "

    An existing deployment ID associated with the applicable IAM user or AWS account.

    ", + "GetDeploymentInstanceInput$deploymentId": "

    The unique ID of a deployment.

    ", + "InstanceSummary$deploymentId": "

    The deployment ID.

    ", + "ListDeploymentInstancesInput$deploymentId": "

    The unique ID of a deployment.

    ", + "StopDeploymentInput$deploymentId": "

    The unique ID of a deployment.

    " + } + }, + "DeploymentIdRequiredException": { + "base": "

    At least one deployment ID must be specified.

    ", + "refs": { + } + }, + "DeploymentInfo": { + "base": "

    Information about a deployment.

    ", + "refs": { + "DeploymentsInfoList$member": null, + "GetDeploymentOutput$deploymentInfo": "

    Information about the deployment.

    " + } + }, + "DeploymentLimitExceededException": { + "base": "

    The number of allowed deployments was exceeded.

    ", + "refs": { + } + }, + "DeploymentNotStartedException": { + "base": "

    The specified deployment has not started.

    ", + "refs": { + } + }, + "DeploymentOverview": { + "base": "

    Information about the deployment status of the instances in the deployment.

    ", + "refs": { + "DeploymentInfo$deploymentOverview": "

    A summary of the deployment status of the instances in the deployment.

    " + } + }, + "DeploymentStatus": { + "base": null, + "refs": { + "DeploymentInfo$status": "

    The current state of the deployment as a whole.

    ", + "DeploymentStatusList$member": null + } + }, + "DeploymentStatusList": { + "base": null, + "refs": { + "ListDeploymentsInput$includeOnlyStatuses": "

    A subset of deployments to list, by status:

    • Created: Include in the resulting list created deployments.
    • Queued: Include in the resulting list queued deployments.
    • In Progress: Include in the resulting list in-progress deployments.
    • Succeeded: Include in the resulting list succeeded deployments.
    • Failed: Include in the resulting list failed deployments.
    • Aborted: Include in the resulting list aborted deployments.
    " + } + }, + "DeploymentsInfoList": { + "base": null, + "refs": { + "BatchGetDeploymentsOutput$deploymentsInfo": "

    Information about the deployments.

    " + } + }, + "DeploymentsList": { + "base": null, + "refs": { + "BatchGetDeploymentsInput$deploymentIds": "

    A list of deployment IDs, with multiple deployment IDs separated by spaces.

    ", + "ListDeploymentsOutput$deployments": "

    A list of deployment IDs.

    " + } + }, + "DeregisterOnPremisesInstanceInput": { + "base": "

    Represents the input of a deregister on-premises instance operation.

    ", + "refs": { + } + }, + "Description": { + "base": null, + "refs": { + "CreateDeploymentInput$description": "

    A comment about the deployment.

    ", + "DeploymentInfo$description": "

    A comment about the deployment.

    ", + "GenericRevisionInfo$description": "

    A comment about the revision.

    ", + "RegisterApplicationRevisionInput$description": "

    A comment about the revision.

    " + } + }, + "DescriptionTooLongException": { + "base": "

    The description that was provided is too long.

    ", + "refs": { + } + }, + "Diagnostics": { + "base": "

    Diagnostic information about executable scripts that are part of a deployment.

    ", + "refs": { + "LifecycleEvent$diagnostics": "

    Diagnostic information about the deployment lifecycle event.

    " + } + }, + "EC2TagFilter": { + "base": "

    Information about a tag filter.

    ", + "refs": { + "EC2TagFilterList$member": null + } + }, + "EC2TagFilterList": { + "base": null, + "refs": { + "CreateDeploymentGroupInput$ec2TagFilters": "

    The Amazon EC2 tags to filter on.

    ", + "DeploymentGroupInfo$ec2TagFilters": "

    The Amazon EC2 tags to filter on.

    ", + "UpdateDeploymentGroupInput$ec2TagFilters": "

    The replacement set of Amazon EC2 tags to filter on, if you want to change them.

    " + } + }, + "EC2TagFilterType": { + "base": null, + "refs": { + "EC2TagFilter$Type": "

    The tag filter type:

    • KEY_ONLY: Key only.
    • VALUE_ONLY: Value only.
    • KEY_AND_VALUE: Key and value.
    " + } + }, + "ETag": { + "base": null, + "refs": { + "S3Location$eTag": "

    The ETag of the Amazon S3 object that represents the bundled artifacts for the application revision.

    If the ETag is not specified as an input parameter, ETag validation of the object will be skipped.

    " + } + }, + "ErrorCode": { + "base": null, + "refs": { + "ErrorInformation$code": "

    The error code:

    • APPLICATION_MISSING: The application was missing. Note that this error code will most likely be raised if the application is deleted after the deployment is created but before it starts.
    • DEPLOYMENT_GROUP_MISSING: The deployment group was missing. Note that this error code will most likely be raised if the deployment group is deleted after the deployment is created but before it starts.
    • HEALTH_CONSTRAINTS: The deployment failed on too many instances to be able to successfully deploy within the specified instance health constraints.
    • HEALTH_CONSTRAINTS_INVALID: The revision can never successfully deploy within the instance health constraints as specified.
    • IAM_ROLE_MISSING: The service role cannot be accessed.
    • IAM_ROLE_PERMISSIONS: The service role does not have the correct permissions.
    • INTERNAL_ERROR: There was an internal error.
    • NO_EC2_SUBSCRIPTION: The calling account is not subscribed to the Amazon EC2 service.
    • NO_INSTANCES: No instances were specified, or no instances can be found.
    • OVER_MAX_INSTANCES: The maximum number of instances was exceeded.
    • THROTTLED: The operation was throttled because the calling account exceeded the throttling limits of one or more AWS services.
    • TIMEOUT: The deployment has timed out.
    • REVISION_MISSING: The revision ID was missing. Note that this error code will most likely be raised if the revision is deleted after the deployment is created but before it starts.
    " + } + }, + "ErrorInformation": { + "base": "

    Information about a deployment error.

    ", + "refs": { + "DeploymentInfo$errorInformation": "

    Information about any error associated with this deployment.

    " + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "ErrorInformation$message": "

    An accompanying error message.

    " + } + }, + "GenericRevisionInfo": { + "base": "

    Information about an application revision.

    ", + "refs": { + "GetApplicationRevisionOutput$revisionInfo": "

    General information about the revision.

    " + } + }, + "GetApplicationInput": { + "base": "

    Represents the input of a get application operation.

    ", + "refs": { + } + }, + "GetApplicationOutput": { + "base": "

    Represents the output of a get application operation.

    ", + "refs": { + } + }, + "GetApplicationRevisionInput": { + "base": "

    Represents the input of a get application revision operation.

    ", + "refs": { + } + }, + "GetApplicationRevisionOutput": { + "base": "

    Represents the output of a get application revision operation.

    ", + "refs": { + } + }, + "GetDeploymentConfigInput": { + "base": "

    Represents the input of a get deployment configuration operation.

    ", + "refs": { + } + }, + "GetDeploymentConfigOutput": { + "base": "

    Represents the output of a get deployment configuration operation.

    ", + "refs": { + } + }, + "GetDeploymentGroupInput": { + "base": "

    Represents the input of a get deployment group operation.

    ", + "refs": { + } + }, + "GetDeploymentGroupOutput": { + "base": "

    Represents the output of a get deployment group operation.

    ", + "refs": { + } + }, + "GetDeploymentInput": { + "base": "

    Represents the input of a get deployment operation.

    ", + "refs": { + } + }, + "GetDeploymentInstanceInput": { + "base": "

    Represents the input of a get deployment instance operation.

    ", + "refs": { + } + }, + "GetDeploymentInstanceOutput": { + "base": "

    Represents the output of a get deployment instance operation.

    ", + "refs": { + } + }, + "GetDeploymentOutput": { + "base": "

    Represents the output of a get deployment operation.

    ", + "refs": { + } + }, + "GetOnPremisesInstanceInput": { + "base": "

    Represents the input of a get on-premises instance operation.

    ", + "refs": { + } + }, + "GetOnPremisesInstanceOutput": { + "base": "

    Represents the output of a get on-premises instance operation.

    ", + "refs": { + } + }, + "GitHubLocation": { + "base": "

    Information about the location of application artifacts that are stored in GitHub.

    ", + "refs": { + "RevisionLocation$gitHubLocation": null + } + }, + "IamUserArn": { + "base": null, + "refs": { + "InstanceInfo$iamUserArn": "

    The IAM user ARN associated with the on-premises instance.

    ", + "RegisterOnPremisesInstanceInput$iamUserArn": "

    The ARN of the IAM user to associate with the on-premises instance.

    " + } + }, + "IamUserArnAlreadyRegisteredException": { + "base": "

    The specified IAM user ARN is already registered with an on-premises instance.

    ", + "refs": { + } + }, + "IamUserArnRequiredException": { + "base": "

    An IAM user ARN was not specified.

    ", + "refs": { + } + }, + "InstanceArn": { + "base": null, + "refs": { + "InstanceInfo$instanceArn": "

    The ARN of the on-premises instance.

    " + } + }, + "InstanceCount": { + "base": null, + "refs": { + "DeploymentOverview$Pending": "

    The number of instances that are pending in the deployment.

    ", + "DeploymentOverview$InProgress": "

    The number of instances that are in progress in the deployment.

    ", + "DeploymentOverview$Succeeded": "

    The number of instances that have succeeded in the deployment.

    ", + "DeploymentOverview$Failed": "

    The number of instances that have failed in the deployment.

    ", + "DeploymentOverview$Skipped": "

    The number of instances that have been skipped in the deployment.

    " + } + }, + "InstanceDoesNotExistException": { + "base": "

    The specified instance does not exist in the deployment group.

    ", + "refs": { + } + }, + "InstanceId": { + "base": null, + "refs": { + "GetDeploymentInstanceInput$instanceId": "

    The unique ID of an instance in the deployment's deployment group.

    ", + "InstanceSummary$instanceId": "

    The instance ID.

    ", + "InstancesList$member": null + } + }, + "InstanceIdRequiredException": { + "base": "

    The instance ID was not specified.

    ", + "refs": { + } + }, + "InstanceInfo": { + "base": "

    Information about an on-premises instance.

    ", + "refs": { + "GetOnPremisesInstanceOutput$instanceInfo": "

    Information about the on-premises instance.

    ", + "InstanceInfoList$member": null + } + }, + "InstanceInfoList": { + "base": null, + "refs": { + "BatchGetOnPremisesInstancesOutput$instanceInfos": "

    Information about the on-premises instances.

    " + } + }, + "InstanceLimitExceededException": { + "base": "

    The maximum number of allowed on-premises instances in a single call was exceeded.

    ", + "refs": { + } + }, + "InstanceName": { + "base": null, + "refs": { + "DeregisterOnPremisesInstanceInput$instanceName": "

    The name of the on-premises instance to deregister.

    ", + "GetOnPremisesInstanceInput$instanceName": "

    The name of the on-premises instance to get information about

    ", + "InstanceInfo$instanceName": "

    The name of the on-premises instance.

    ", + "InstanceNameList$member": null, + "RegisterOnPremisesInstanceInput$instanceName": "

    The name of the on-premises instance to register.

    " + } + }, + "InstanceNameAlreadyRegisteredException": { + "base": "

    The specified on-premises instance name is already registered.

    ", + "refs": { + } + }, + "InstanceNameList": { + "base": null, + "refs": { + "AddTagsToOnPremisesInstancesInput$instanceNames": "

    The names of the on-premises instances to add tags to.

    ", + "BatchGetOnPremisesInstancesInput$instanceNames": "

    The names of the on-premises instances to get information about.

    ", + "ListOnPremisesInstancesOutput$instanceNames": "

    The list of matching on-premises instance names.

    ", + "RemoveTagsFromOnPremisesInstancesInput$instanceNames": "

    The names of the on-premises instances to remove tags from.

    " + } + }, + "InstanceNameRequiredException": { + "base": "

    An on-premises instance name was not specified.

    ", + "refs": { + } + }, + "InstanceNotRegisteredException": { + "base": "

    The specified on-premises instance is not registered.

    ", + "refs": { + } + }, + "InstanceStatus": { + "base": null, + "refs": { + "InstanceStatusList$member": null, + "InstanceSummary$status": "

    The deployment status for this instance:

    • Pending: The deployment is pending for this instance.
    • In Progress: The deployment is in progress for this instance.
    • Succeeded: The deployment has succeeded for this instance.
    • Failed: The deployment has failed for this instance.
    • Skipped: The deployment has been skipped for this instance.
    • Unknown: The deployment status is unknown for this instance.
    " + } + }, + "InstanceStatusList": { + "base": null, + "refs": { + "ListDeploymentInstancesInput$instanceStatusFilter": "

    A subset of instances to list, by status:

    • Pending: Include in the resulting list those instances with pending deployments.
    • InProgress: Include in the resulting list those instances with in-progress deployments.
    • Succeeded: Include in the resulting list those instances with succeeded deployments.
    • Failed: Include in the resulting list those instances with failed deployments.
    • Skipped: Include in the resulting list those instances with skipped deployments.
    • Unknown: Include in the resulting list those instances with deployments in an unknown state.
    " + } + }, + "InstanceSummary": { + "base": "

    Information about an instance in a deployment.

    ", + "refs": { + "GetDeploymentInstanceOutput$instanceSummary": "

    Information about the instance.

    " + } + }, + "InstancesList": { + "base": null, + "refs": { + "ListDeploymentInstancesOutput$instancesList": "

    A list of instances IDs.

    " + } + }, + "InvalidApplicationNameException": { + "base": "

    The application name was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidAutoScalingGroupException": { + "base": "

    The Auto Scaling group was specified in an invalid format or does not exist.

    ", + "refs": { + } + }, + "InvalidBucketNameFilterException": { + "base": "

    The bucket name either doesn't exist or was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidDeployedStateFilterException": { + "base": "

    The deployed state filter was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidDeploymentConfigNameException": { + "base": "

    The deployment configuration name was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidDeploymentGroupNameException": { + "base": "

    The deployment group name was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidDeploymentIdException": { + "base": "

    At least one of the deployment IDs was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidDeploymentStatusException": { + "base": "

    The specified deployment status doesn't exist or cannot be determined.

    ", + "refs": { + } + }, + "InvalidEC2TagException": { + "base": "

    The tag was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidIamUserArnException": { + "base": "

    The IAM user ARN was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidInstanceNameException": { + "base": "

    The specified on-premises instance name was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidInstanceStatusException": { + "base": "

    The specified instance status does not exist.

    ", + "refs": { + } + }, + "InvalidKeyPrefixFilterException": { + "base": "

    The specified key prefix filter was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidMinimumHealthyHostValueException": { + "base": "

    The minimum healthy instances value was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidNextTokenException": { + "base": "

    The next token was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidOperationException": { + "base": "

    An invalid operation was detected.

    ", + "refs": { + } + }, + "InvalidRegistrationStatusException": { + "base": "

    The registration status was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidRevisionException": { + "base": "

    The revision was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidRoleException": { + "base": "

    The service role ARN was specified in an invalid format. Or, if an Auto Scaling group was specified, the specified service role does not grant the appropriate permissions to Auto Scaling.

    ", + "refs": { + } + }, + "InvalidSortByException": { + "base": "

    The column name to sort by is either not present or was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidSortOrderException": { + "base": "

    The sort order was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidTagException": { + "base": "

    The specified tag was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidTagFilterException": { + "base": "

    The specified tag filter was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidTimeRangeException": { + "base": "

    The specified time range was specified in an invalid format.

    ", + "refs": { + } + }, + "Key": { + "base": null, + "refs": { + "EC2TagFilter$Key": "

    The tag filter key.

    ", + "Tag$Key": "

    The tag's key.

    ", + "TagFilter$Key": "

    The on-premises instance tag filter key.

    " + } + }, + "LifecycleErrorCode": { + "base": null, + "refs": { + "Diagnostics$errorCode": "

    The associated error code:

    • Success: The specified script ran.
    • ScriptMissing: The specified script was not found in the specified location.
    • ScriptNotExecutable: The specified script is not a recognized executable file type.
    • ScriptTimedOut: The specified script did not finish running in the specified time period.
    • ScriptFailed: The specified script failed to run as expected.
    • UnknownError: The specified script did not run for an unknown reason.
    " + } + }, + "LifecycleEvent": { + "base": "

    Information about a deployment lifecycle event.

    ", + "refs": { + "LifecycleEventList$member": null + } + }, + "LifecycleEventList": { + "base": null, + "refs": { + "InstanceSummary$lifecycleEvents": "

    A list of lifecycle events for this instance.

    " + } + }, + "LifecycleEventName": { + "base": null, + "refs": { + "LifecycleEvent$lifecycleEventName": "

    The deployment lifecycle event name, such as ApplicationStop, BeforeInstall, AfterInstall, ApplicationStart, or ValidateService.

    " + } + }, + "LifecycleEventStatus": { + "base": null, + "refs": { + "LifecycleEvent$status": "

    The deployment lifecycle event status:

    • Pending: The deployment lifecycle event is pending.
    • InProgress: The deployment lifecycle event is in progress.
    • Succeeded: The deployment lifecycle event has succeeded.
    • Failed: The deployment lifecycle event has failed.
    • Skipped: The deployment lifecycle event has been skipped.
    • Unknown: The deployment lifecycle event is unknown.
    " + } + }, + "LifecycleMessage": { + "base": null, + "refs": { + "Diagnostics$message": "

    The message associated with the error.

    " + } + }, + "ListApplicationRevisionsInput": { + "base": "

    Represents the input of a list application revisions operation.

    ", + "refs": { + } + }, + "ListApplicationRevisionsOutput": { + "base": "

    Represents the output of a list application revisions operation.

    ", + "refs": { + } + }, + "ListApplicationsInput": { + "base": "

    Represents the input of a list applications operation.

    ", + "refs": { + } + }, + "ListApplicationsOutput": { + "base": "

    Represents the output of a list applications operation.

    ", + "refs": { + } + }, + "ListDeploymentConfigsInput": { + "base": "

    Represents the input of a list deployment configurations operation.

    ", + "refs": { + } + }, + "ListDeploymentConfigsOutput": { + "base": "

    Represents the output of a list deployment configurations operation.

    ", + "refs": { + } + }, + "ListDeploymentGroupsInput": { + "base": "

    Represents the input of a list deployment groups operation.

    ", + "refs": { + } + }, + "ListDeploymentGroupsOutput": { + "base": "

    Represents the output of a list deployment groups operation.

    ", + "refs": { + } + }, + "ListDeploymentInstancesInput": { + "base": "

    Represents the input of a list deployment instances operation.

    ", + "refs": { + } + }, + "ListDeploymentInstancesOutput": { + "base": "

    Represents the output of a list deployment instances operation.

    ", + "refs": { + } + }, + "ListDeploymentsInput": { + "base": "

    Represents the input of a list deployments operation.

    ", + "refs": { + } + }, + "ListDeploymentsOutput": { + "base": "

    Represents the output of a list deployments operation.

    ", + "refs": { + } + }, + "ListOnPremisesInstancesInput": { + "base": "

    Represents the input of a list on-premises instances operation.

    .", + "refs": { + } + }, + "ListOnPremisesInstancesOutput": { + "base": "

    Represents the output of list on-premises instances operation.

    ", + "refs": { + } + }, + "ListStateFilterAction": { + "base": null, + "refs": { + "ListApplicationRevisionsInput$deployed": "

    Whether to list revisions based on whether the revision is the target revision of an deployment group:

    • include: List revisions that are target revisions of a deployment group.
    • exclude: Do not list revisions that are target revisions of a deployment group.
    • ignore: List all revisions, regardless of whether they are target revisions of a deployment group.
    " + } + }, + "LogTail": { + "base": null, + "refs": { + "Diagnostics$logTail": "

    The last portion of the associated diagnostic log.

    " + } + }, + "Message": { + "base": null, + "refs": { + "StopDeploymentOutput$statusMessage": "

    An accompanying status message.

    " + } + }, + "MinimumHealthyHosts": { + "base": "

    Information about minimum healthy instances.

    ", + "refs": { + "CreateDeploymentConfigInput$minimumHealthyHosts": "

    The minimum number of healthy instances that should be available at any time during the deployment. There are two parameters expected in the input: type and value.

    The type parameter takes either of the following values:

    • HOST_COUNT: The value parameter represents the minimum number of healthy instances, as an absolute value.
    • FLEET_PERCENT: The value parameter represents the minimum number of healthy instances, as a percentage of the total number of instances in the deployment. If you specify FLEET_PERCENT, then at the start of the deployment AWS CodeDeploy converts the percentage to the equivalent number of instances and rounds fractional instances up.

    The value parameter takes an integer.

    For example, to set a minimum of 95% healthy instances, specify a type of FLEET_PERCENT and a value of 95.

    ", + "DeploymentConfigInfo$minimumHealthyHosts": "

    Information about the number or percentage of minimum healthy instances.

    " + } + }, + "MinimumHealthyHostsType": { + "base": null, + "refs": { + "MinimumHealthyHosts$type": "

    The minimum healthy instances type:

    • HOST_COUNT: The minimum number of healthy instances, as an absolute value.
    • FLEET_PERCENT: The minimum number of healthy instances, as a percentage of the total number of instances in the deployment.

    For example, for 9 instances, if a HOST_COUNT of 6 is specified, deploy to up to 3 instances at a time. The deployment succeeds if 6 or more instances are successfully deployed to; otherwise, the deployment fails. If a FLEET_PERCENT of 40 is specified, deploy to up to 5 instances at a time. The deployment succeeds if 4 or more instances are successfully deployed to; otherwise, the deployment fails.

    In a call to the get deployment configuration operation, CodeDeployDefault.OneAtATime will return a minimum healthy instances type of MOST_CONCURRENCY and a value of 1. This means a deployment to only one instances at a time. (You cannot set the type to MOST_CONCURRENCY, only to HOST_COUNT or FLEET_PERCENT.)" + } + }, + "MinimumHealthyHostsValue": { + "base": null, + "refs": { + "MinimumHealthyHosts$value": "

    The minimum healthy instances value.

    " + } + }, + "NextToken": { + "base": null, + "refs": { + "ListApplicationRevisionsInput$nextToken": "

    An identifier that was returned from the previous list application revisions call, which can be used to return the next set of applications in the list.

    ", + "ListApplicationRevisionsOutput$nextToken": "

    If the amount of information that is returned is significantly large, an identifier will also be returned, which can be used in a subsequent list application revisions call to return the next set of application revisions in the list.

    ", + "ListApplicationsInput$nextToken": "

    An identifier that was returned from the previous list applications call, which can be used to return the next set of applications in the list.

    ", + "ListApplicationsOutput$nextToken": "

    If the amount of information that is returned is significantly large, an identifier will also be returned, which can be used in a subsequent list applications call to return the next set of applications in the list.

    ", + "ListDeploymentConfigsInput$nextToken": "

    An identifier that was returned from the previous list deployment configurations call, which can be used to return the next set of deployment configurations in the list.

    ", + "ListDeploymentConfigsOutput$nextToken": "

    If the amount of information that is returned is significantly large, an identifier will also be returned, which can be used in a subsequent list deployment configurations call to return the next set of deployment configurations in the list.

    ", + "ListDeploymentGroupsInput$nextToken": "

    An identifier that was returned from the previous list deployment groups call, which can be used to return the next set of deployment groups in the list.

    ", + "ListDeploymentGroupsOutput$nextToken": "

    If the amount of information that is returned is significantly large, an identifier will also be returned, which can be used in a subsequent list deployment groups call to return the next set of deployment groups in the list.

    ", + "ListDeploymentInstancesInput$nextToken": "

    An identifier that was returned from the previous list deployment instances call, which can be used to return the next set of deployment instances in the list.

    ", + "ListDeploymentInstancesOutput$nextToken": "

    If the amount of information that is returned is significantly large, an identifier will also be returned, which can be used in a subsequent list deployment instances call to return the next set of deployment instances in the list.

    ", + "ListDeploymentsInput$nextToken": "

    An identifier that was returned from the previous list deployments call, which can be used to return the next set of deployments in the list.

    ", + "ListDeploymentsOutput$nextToken": "

    If the amount of information that is returned is significantly large, an identifier will also be returned, which can be used in a subsequent list deployments call to return the next set of deployments in the list.

    ", + "ListOnPremisesInstancesInput$nextToken": "

    An identifier that was returned from the previous list on-premises instances call, which can be used to return the next set of on-premises instances in the list.

    ", + "ListOnPremisesInstancesOutput$nextToken": "

    If the amount of information that is returned is significantly large, an identifier will also be returned, which can be used in a subsequent list on-premises instances call to return the next set of on-premises instances in the list.

    " + } + }, + "RegisterApplicationRevisionInput": { + "base": "

    Represents the input of a register application revision operation.

    ", + "refs": { + } + }, + "RegisterOnPremisesInstanceInput": { + "base": "

    Represents the input of register on-premises instance operation.

    ", + "refs": { + } + }, + "RegistrationStatus": { + "base": null, + "refs": { + "ListOnPremisesInstancesInput$registrationStatus": "

    The on-premises instances registration status:

    • Deregistered: Include in the resulting list deregistered on-premises instances.
    • Registered: Include in the resulting list registered on-premises instances.
    " + } + }, + "RemoveTagsFromOnPremisesInstancesInput": { + "base": "

    Represents the input of a remove tags from on-premises instances operation.

    ", + "refs": { + } + }, + "Repository": { + "base": null, + "refs": { + "GitHubLocation$repository": "

    The GitHub account and repository pair that stores a reference to the commit that represents the bundled artifacts for the application revision.

    Specified as account/repository.

    " + } + }, + "RevisionDoesNotExistException": { + "base": "

    The named revision does not exist with the applicable IAM user or AWS account.

    ", + "refs": { + } + }, + "RevisionLocation": { + "base": "

    Information about an application revision's location.

    ", + "refs": { + "CreateDeploymentInput$revision": "

    The type of revision to deploy, along with information about the revision's location.

    ", + "DeploymentGroupInfo$targetRevision": "

    Information about the deployment group's target revision, including the revision's type and its location.

    ", + "DeploymentInfo$revision": "

    Information about the location of application artifacts that are stored and the service to retrieve them from.

    ", + "GetApplicationRevisionInput$revision": "

    Information about the application revision to get, including the revision's type and its location.

    ", + "GetApplicationRevisionOutput$revision": "

    Additional information about the revision, including the revision's type and its location.

    ", + "RegisterApplicationRevisionInput$revision": "

    Information about the application revision to register, including the revision's type and its location.

    ", + "RevisionLocationList$member": null + } + }, + "RevisionLocationList": { + "base": null, + "refs": { + "ListApplicationRevisionsOutput$revisions": "

    A list of revision locations that contain the matching revisions.

    " + } + }, + "RevisionLocationType": { + "base": null, + "refs": { + "RevisionLocation$revisionType": "

    The application revision's type:

    • S3: An application revision stored in Amazon S3.
    • GitHub: An application revision stored in GitHub.
    " + } + }, + "RevisionRequiredException": { + "base": "

    The revision ID was not specified.

    ", + "refs": { + } + }, + "Role": { + "base": null, + "refs": { + "CreateDeploymentGroupInput$serviceRoleArn": "

    A service role ARN that allows AWS CodeDeploy to act on the user's behalf when interacting with AWS services.

    ", + "DeploymentGroupInfo$serviceRoleArn": "

    A service role ARN.

    ", + "UpdateDeploymentGroupInput$serviceRoleArn": "

    A replacement service role's ARN, if you want to change it.

    " + } + }, + "RoleRequiredException": { + "base": "

    The role ID was not specified.

    ", + "refs": { + } + }, + "S3Bucket": { + "base": null, + "refs": { + "ListApplicationRevisionsInput$s3Bucket": "

    A specific Amazon S3 bucket name to limit the search for revisions.

    If set to null, then all of the user's buckets will be searched.

    ", + "S3Location$bucket": "

    The name of the Amazon S3 bucket where the application revision is stored.

    " + } + }, + "S3Key": { + "base": null, + "refs": { + "ListApplicationRevisionsInput$s3KeyPrefix": "

    A specific key prefix for the set of Amazon S3 objects to limit the search for revisions.

    ", + "S3Location$key": "

    The name of the Amazon S3 object that represents the bundled artifacts for the application revision.

    " + } + }, + "S3Location": { + "base": "

    Information about the location of application artifacts that are stored in Amazon S3.

    ", + "refs": { + "RevisionLocation$s3Location": null + } + }, + "ScriptName": { + "base": null, + "refs": { + "Diagnostics$scriptName": "

    The name of the script.

    " + } + }, + "SortOrder": { + "base": null, + "refs": { + "ListApplicationRevisionsInput$sortOrder": "

    The order to sort the list results by:

    • ascending: Sort the list of results in ascending order.
    • descending: Sort the list of results in descending order.

    If not specified, the results will be sorted in ascending order.

    If set to null, the results will be sorted in an arbitrary order.

    " + } + }, + "StopDeploymentInput": { + "base": "

    Represents the input of a stop deployment operation.

    ", + "refs": { + } + }, + "StopDeploymentOutput": { + "base": "

    Represents the output of a stop deployment operation.

    ", + "refs": { + } + }, + "StopStatus": { + "base": null, + "refs": { + "StopDeploymentOutput$status": "

    The status of the stop deployment operation:

    • Pending: The stop operation is pending.
    • Succeeded: The stop operation succeeded.
    " + } + }, + "Tag": { + "base": "

    Information about a tag.

    ", + "refs": { + "TagList$member": null + } + }, + "TagFilter": { + "base": "

    Information about an on-premises instance tag filter.

    ", + "refs": { + "TagFilterList$member": null + } + }, + "TagFilterList": { + "base": null, + "refs": { + "CreateDeploymentGroupInput$onPremisesInstanceTagFilters": "

    The on-premises instance tags to filter on.

    ", + "DeploymentGroupInfo$onPremisesInstanceTagFilters": "

    The on-premises instance tags to filter on.

    ", + "ListOnPremisesInstancesInput$tagFilters": "

    The on-premises instance tags that will be used to restrict the corresponding on-premises instance names that are returned.

    ", + "UpdateDeploymentGroupInput$onPremisesInstanceTagFilters": "

    The replacement set of on-premises instance tags for filter on, if you want to change them.

    " + } + }, + "TagFilterType": { + "base": null, + "refs": { + "TagFilter$Type": "

    The on-premises instance tag filter type:

    • KEY_ONLY: Key only.
    • VALUE_ONLY: Value only.
    • KEY_AND_VALUE: Key and value.
    " + } + }, + "TagLimitExceededException": { + "base": "

    The maximum allowed number of tags was exceeded.

    ", + "refs": { + } + }, + "TagList": { + "base": null, + "refs": { + "AddTagsToOnPremisesInstancesInput$tags": "

    The tag key-value pairs to add to the on-premises instances.

    Keys and values are both required. Keys cannot be nulls or empty strings. Value-only tags are not allowed.

    ", + "InstanceInfo$tags": "

    The tags that are currently associated with the on-premises instance.

    ", + "RemoveTagsFromOnPremisesInstancesInput$tags": "

    The tag key-value pairs to remove from the on-premises instances.

    " + } + }, + "TagRequiredException": { + "base": "

    A tag was not specified.

    ", + "refs": { + } + }, + "TimeRange": { + "base": "

    Information about a time range.

    ", + "refs": { + "ListDeploymentsInput$createTimeRange": "

    A deployment creation start- and end-time range for returning a subset of the list of deployments.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "ApplicationInfo$createTime": "

    The time that the application was created.

    ", + "DeploymentConfigInfo$createTime": "

    The time that the deployment configuration was created.

    ", + "DeploymentInfo$createTime": "

    A timestamp indicating when the deployment was created.

    ", + "DeploymentInfo$startTime": "

    A timestamp indicating when the deployment began deploying to the deployment group.

    Note that in some cases, the reported value of the start time may be later than the complete time. This is due to differences in the clock settings of various back-end servers that participate in the overall deployment process.

    ", + "DeploymentInfo$completeTime": "

    A timestamp indicating when the deployment was completed.

    ", + "GenericRevisionInfo$firstUsedTime": "

    When the revision was first used by AWS CodeDeploy.

    ", + "GenericRevisionInfo$lastUsedTime": "

    When the revision was last used by AWS CodeDeploy.

    ", + "GenericRevisionInfo$registerTime": "

    When the revision was registered with AWS CodeDeploy.

    ", + "InstanceInfo$registerTime": "

    The time that the on-premises instance was registered.

    ", + "InstanceInfo$deregisterTime": "

    If the on-premises instance was deregistered, the time that the on-premises instance was deregistered.

    ", + "InstanceSummary$lastUpdatedAt": "

    A timestamp indicating when the instance information was last updated.

    ", + "LifecycleEvent$startTime": "

    A timestamp indicating when the deployment lifecycle event started.

    ", + "LifecycleEvent$endTime": "

    A timestamp indicating when the deployment lifecycle event ended.

    ", + "TimeRange$start": "

    The time range's start time.

    Specify null to leave the time range's start time open-ended.", + "TimeRange$end": "

    The time range's end time.

    Specify null to leave the time range's end time open-ended." + } + }, + "UpdateApplicationInput": { + "base": "

    Represents the input of an update application operation.

    ", + "refs": { + } + }, + "UpdateDeploymentGroupInput": { + "base": "

    Represents the input of an update deployment group operation.

    ", + "refs": { + } + }, + "UpdateDeploymentGroupOutput": { + "base": "

    Represents the output of an update deployment group operation.

    ", + "refs": { + } + }, + "Value": { + "base": null, + "refs": { + "EC2TagFilter$Value": "

    The tag filter value.

    ", + "Tag$Value": "

    The tag's value.

    ", + "TagFilter$Value": "

    The on-premises instance tag filter value.

    " + } + }, + "VersionId": { + "base": null, + "refs": { + "S3Location$version": "

    A specific version of the Amazon S3 object that represents the bundled artifacts for the application revision.

    If the version is not specified, the system will use the most recent version by default.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codedeploy/2014-10-06/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,34 @@ +{ + "pagination": { + "ListApplicationRevisions": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "revisions" + }, + "ListApplications": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "applications" + }, + "ListDeploymentConfigs": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "deploymentConfigsList" + }, + "ListDeploymentGroups": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "deploymentGroups" + }, + "ListDeploymentInstances": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "instancesList" + }, + "ListDeployments": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "deployments" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codepipeline/2015-07-09/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codepipeline/2015-07-09/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codepipeline/2015-07-09/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codepipeline/2015-07-09/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1465 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-07-09", + "endpointPrefix":"codepipeline", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"CodePipeline", + "serviceFullName":"AWS CodePipeline", + "signatureVersion":"v4", + "targetPrefix":"CodePipeline_20150709" + }, + "operations":{ + "AcknowledgeJob":{ + "name":"AcknowledgeJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AcknowledgeJobInput"}, + "output":{"shape":"AcknowledgeJobOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidNonceException"}, + {"shape":"JobNotFoundException"} + ] + }, + "AcknowledgeThirdPartyJob":{ + "name":"AcknowledgeThirdPartyJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AcknowledgeThirdPartyJobInput"}, + "output":{"shape":"AcknowledgeThirdPartyJobOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidNonceException"}, + {"shape":"JobNotFoundException"}, + {"shape":"InvalidClientTokenException"} + ] + }, + "CreateCustomActionType":{ + "name":"CreateCustomActionType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCustomActionTypeInput"}, + "output":{"shape":"CreateCustomActionTypeOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"} + ] + }, + "CreatePipeline":{ + "name":"CreatePipeline", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePipelineInput"}, + "output":{"shape":"CreatePipelineOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"PipelineNameInUseException"}, + {"shape":"InvalidStageDeclarationException"}, + {"shape":"InvalidActionDeclarationException"}, + {"shape":"InvalidBlockerDeclarationException"}, + {"shape":"InvalidStructureException"}, + {"shape":"LimitExceededException"} + ] + }, + "DeleteCustomActionType":{ + "name":"DeleteCustomActionType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCustomActionTypeInput"}, + "errors":[ + {"shape":"ValidationException"} + ] + }, + "DeletePipeline":{ + "name":"DeletePipeline", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePipelineInput"}, + "errors":[ + {"shape":"ValidationException"} + ] + }, + "DisableStageTransition":{ + "name":"DisableStageTransition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableStageTransitionInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"PipelineNotFoundException"}, + {"shape":"StageNotFoundException"} + ] + }, + "EnableStageTransition":{ + "name":"EnableStageTransition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableStageTransitionInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"PipelineNotFoundException"}, + {"shape":"StageNotFoundException"} + ] + }, + "GetJobDetails":{ + "name":"GetJobDetails", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetJobDetailsInput"}, + "output":{"shape":"GetJobDetailsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"JobNotFoundException"} + ] + }, + "GetPipeline":{ + "name":"GetPipeline", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPipelineInput"}, + "output":{"shape":"GetPipelineOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"PipelineNotFoundException"}, + {"shape":"PipelineVersionNotFoundException"} + ] + }, + "GetPipelineState":{ + "name":"GetPipelineState", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPipelineStateInput"}, + "output":{"shape":"GetPipelineStateOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"PipelineNotFoundException"} + ] + }, + "GetThirdPartyJobDetails":{ + "name":"GetThirdPartyJobDetails", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetThirdPartyJobDetailsInput"}, + "output":{"shape":"GetThirdPartyJobDetailsOutput"}, + "errors":[ + {"shape":"JobNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InvalidClientTokenException"}, + {"shape":"InvalidJobException"} + ] + }, + "ListActionTypes":{ + "name":"ListActionTypes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListActionTypesInput"}, + "output":{"shape":"ListActionTypesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidNextTokenException"} + ] + }, + "ListPipelines":{ + "name":"ListPipelines", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPipelinesInput"}, + "output":{"shape":"ListPipelinesOutput"}, + "errors":[ + {"shape":"InvalidNextTokenException"} + ] + }, + "PollForJobs":{ + "name":"PollForJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PollForJobsInput"}, + "output":{"shape":"PollForJobsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ActionTypeNotFoundException"} + ] + }, + "PollForThirdPartyJobs":{ + "name":"PollForThirdPartyJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PollForThirdPartyJobsInput"}, + "output":{"shape":"PollForThirdPartyJobsOutput"}, + "errors":[ + {"shape":"ActionTypeNotFoundException"}, + {"shape":"ValidationException"} + ] + }, + "PutActionRevision":{ + "name":"PutActionRevision", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutActionRevisionInput"}, + "output":{"shape":"PutActionRevisionOutput"}, + "errors":[ + {"shape":"PipelineNotFoundException"}, + {"shape":"StageNotFoundException"}, + {"shape":"ActionNotFoundException"}, + {"shape":"ValidationException"} + ] + }, + "PutJobFailureResult":{ + "name":"PutJobFailureResult", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutJobFailureResultInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"JobNotFoundException"}, + {"shape":"InvalidJobStateException"} + ] + }, + "PutJobSuccessResult":{ + "name":"PutJobSuccessResult", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutJobSuccessResultInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"JobNotFoundException"}, + {"shape":"InvalidJobStateException"} + ] + }, + "PutThirdPartyJobFailureResult":{ + "name":"PutThirdPartyJobFailureResult", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutThirdPartyJobFailureResultInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"JobNotFoundException"}, + {"shape":"InvalidJobStateException"}, + {"shape":"InvalidClientTokenException"} + ] + }, + "PutThirdPartyJobSuccessResult":{ + "name":"PutThirdPartyJobSuccessResult", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutThirdPartyJobSuccessResultInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"JobNotFoundException"}, + {"shape":"InvalidJobStateException"}, + {"shape":"InvalidClientTokenException"} + ] + }, + "StartPipelineExecution":{ + "name":"StartPipelineExecution", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartPipelineExecutionInput"}, + "output":{"shape":"StartPipelineExecutionOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"PipelineNotFoundException"} + ] + }, + "UpdatePipeline":{ + "name":"UpdatePipeline", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdatePipelineInput"}, + "output":{"shape":"UpdatePipelineOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidStageDeclarationException"}, + {"shape":"InvalidActionDeclarationException"}, + {"shape":"InvalidBlockerDeclarationException"}, + {"shape":"InvalidStructureException"} + ] + } + }, + "shapes":{ + "AWSSessionCredentials":{ + "type":"structure", + "required":[ + "accessKeyId", + "secretAccessKey", + "sessionToken" + ], + "members":{ + "accessKeyId":{"shape":"AccessKeyId"}, + "secretAccessKey":{"shape":"SecretAccessKey"}, + "sessionToken":{"shape":"SessionToken"} + }, + "sensitive":true + }, + "AccessKeyId":{"type":"string"}, + "AccountId":{ + "type":"string", + "pattern":"[0-9]{12}" + }, + "AcknowledgeJobInput":{ + "type":"structure", + "required":[ + "jobId", + "nonce" + ], + "members":{ + "jobId":{"shape":"JobId"}, + "nonce":{"shape":"Nonce"} + } + }, + "AcknowledgeJobOutput":{ + "type":"structure", + "members":{ + "status":{"shape":"JobStatus"} + } + }, + "AcknowledgeThirdPartyJobInput":{ + "type":"structure", + "required":[ + "jobId", + "nonce", + "clientToken" + ], + "members":{ + "jobId":{"shape":"ThirdPartyJobId"}, + "nonce":{"shape":"Nonce"}, + "clientToken":{"shape":"ClientToken"} + } + }, + "AcknowledgeThirdPartyJobOutput":{ + "type":"structure", + "members":{ + "status":{"shape":"JobStatus"} + } + }, + "ActionCategory":{ + "type":"string", + "enum":[ + "Source", + "Build", + "Deploy", + "Test", + "Invoke" + ] + }, + "ActionConfiguration":{ + "type":"structure", + "members":{ + "configuration":{"shape":"ActionConfigurationMap"} + } + }, + "ActionConfigurationKey":{ + "type":"string", + "max":50, + "min":1 + }, + "ActionConfigurationMap":{ + "type":"map", + "key":{"shape":"ActionConfigurationKey"}, + "value":{"shape":"ActionConfigurationValue"} + }, + "ActionConfigurationProperty":{ + "type":"structure", + "required":[ + "name", + "required", + "key", + "secret" + ], + "members":{ + "name":{"shape":"ActionConfigurationKey"}, + "required":{"shape":"Boolean"}, + "key":{"shape":"Boolean"}, + "secret":{"shape":"Boolean"}, + "queryable":{"shape":"Boolean"}, + "description":{"shape":"Description"}, + "type":{"shape":"ActionConfigurationPropertyType"} + } + }, + "ActionConfigurationPropertyList":{ + "type":"list", + "member":{"shape":"ActionConfigurationProperty"}, + "max":10 + }, + "ActionConfigurationPropertyType":{ + "type":"string", + "enum":[ + "String", + "Number", + "Boolean" + ] + }, + "ActionConfigurationQueryableValue":{ + "type":"string", + "max":20, + "min":1, + "pattern":"[a-zA-Z0-9_-]+" + }, + "ActionConfigurationValue":{ + "type":"string", + "max":250, + "min":1 + }, + "ActionContext":{ + "type":"structure", + "members":{ + "name":{"shape":"ActionName"} + } + }, + "ActionDeclaration":{ + "type":"structure", + "required":[ + "name", + "actionTypeId" + ], + "members":{ + "name":{"shape":"ActionName"}, + "actionTypeId":{"shape":"ActionTypeId"}, + "runOrder":{"shape":"ActionRunOrder"}, + "configuration":{"shape":"ActionConfigurationMap"}, + "outputArtifacts":{"shape":"OutputArtifactList"}, + "inputArtifacts":{"shape":"InputArtifactList"}, + "roleArn":{"shape":"RoleArn"} + } + }, + "ActionExecution":{ + "type":"structure", + "members":{ + "status":{"shape":"ActionExecutionStatus"}, + "summary":{"shape":"ExecutionSummary"}, + "lastStatusChange":{"shape":"Timestamp"}, + "externalExecutionId":{"shape":"ExecutionId"}, + "externalExecutionUrl":{"shape":"Url"}, + "percentComplete":{"shape":"Percentage"}, + "errorDetails":{"shape":"ErrorDetails"} + } + }, + "ActionExecutionStatus":{ + "type":"string", + "enum":[ + "InProgress", + "Succeeded", + "Failed" + ] + }, + "ActionName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[A-Za-z0-9.@\\-_]+" + }, + "ActionNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ActionOwner":{ + "type":"string", + "enum":[ + "AWS", + "ThirdParty", + "Custom" + ] + }, + "ActionProvider":{ + "type":"string", + "max":25, + "min":1, + "pattern":"[0-9A-Za-z_-]+" + }, + "ActionRevision":{ + "type":"structure", + "required":[ + "revisionId", + "created" + ], + "members":{ + "revisionId":{"shape":"RevisionId"}, + "revisionChangeId":{"shape":"RevisionChangeId"}, + "created":{"shape":"Timestamp"} + } + }, + "ActionRunOrder":{ + "type":"integer", + "max":999, + "min":1 + }, + "ActionState":{ + "type":"structure", + "members":{ + "actionName":{"shape":"ActionName"}, + "currentRevision":{"shape":"ActionRevision"}, + "latestExecution":{"shape":"ActionExecution"}, + "entityUrl":{"shape":"Url"}, + "revisionUrl":{"shape":"Url"} + } + }, + "ActionStateList":{ + "type":"list", + "member":{"shape":"ActionState"} + }, + "ActionType":{ + "type":"structure", + "required":[ + "id", + "inputArtifactDetails", + "outputArtifactDetails" + ], + "members":{ + "id":{"shape":"ActionTypeId"}, + "settings":{"shape":"ActionTypeSettings"}, + "actionConfigurationProperties":{"shape":"ActionConfigurationPropertyList"}, + "inputArtifactDetails":{"shape":"ArtifactDetails"}, + "outputArtifactDetails":{"shape":"ArtifactDetails"} + } + }, + "ActionTypeId":{ + "type":"structure", + "required":[ + "category", + "owner", + "provider", + "version" + ], + "members":{ + "category":{"shape":"ActionCategory"}, + "owner":{"shape":"ActionOwner"}, + "provider":{"shape":"ActionProvider"}, + "version":{"shape":"Version"} + } + }, + "ActionTypeList":{ + "type":"list", + "member":{"shape":"ActionType"} + }, + "ActionTypeNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ActionTypeSettings":{ + "type":"structure", + "members":{ + "thirdPartyConfigurationUrl":{"shape":"Url"}, + "entityUrlTemplate":{"shape":"UrlTemplate"}, + "executionUrlTemplate":{"shape":"UrlTemplate"}, + "revisionUrlTemplate":{"shape":"UrlTemplate"} + } + }, + "Artifact":{ + "type":"structure", + "members":{ + "name":{"shape":"ArtifactName"}, + "revision":{"shape":"Revision"}, + "location":{"shape":"ArtifactLocation"} + } + }, + "ArtifactDetails":{ + "type":"structure", + "required":[ + "minimumCount", + "maximumCount" + ], + "members":{ + "minimumCount":{"shape":"MinimumArtifactCount"}, + "maximumCount":{"shape":"MaximumArtifactCount"} + } + }, + "ArtifactList":{ + "type":"list", + "member":{"shape":"Artifact"} + }, + "ArtifactLocation":{ + "type":"structure", + "members":{ + "type":{"shape":"ArtifactLocationType"}, + "s3Location":{"shape":"S3ArtifactLocation"} + } + }, + "ArtifactLocationType":{ + "type":"string", + "enum":["S3"] + }, + "ArtifactName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[a-zA-Z0-9_\\-]+" + }, + "ArtifactStore":{ + "type":"structure", + "required":[ + "type", + "location" + ], + "members":{ + "type":{"shape":"ArtifactStoreType"}, + "location":{"shape":"ArtifactStoreLocation"}, + "encryptionKey":{"shape":"EncryptionKey"} + } + }, + "ArtifactStoreLocation":{ + "type":"string", + "max":63, + "min":3, + "pattern":"[a-zA-Z0-9\\-\\.]+" + }, + "ArtifactStoreType":{ + "type":"string", + "enum":["S3"] + }, + "BlockerDeclaration":{ + "type":"structure", + "required":[ + "name", + "type" + ], + "members":{ + "name":{"shape":"BlockerName"}, + "type":{"shape":"BlockerType"} + } + }, + "BlockerName":{ + "type":"string", + "max":100, + "min":1 + }, + "BlockerType":{ + "type":"string", + "enum":["Schedule"] + }, + "Boolean":{"type":"boolean"}, + "ClientId":{ + "type":"string", + "pattern":"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" + }, + "ClientToken":{"type":"string"}, + "Code":{"type":"string"}, + "ContinuationToken":{"type":"string"}, + "CreateCustomActionTypeInput":{ + "type":"structure", + "required":[ + "category", + "provider", + "version", + "inputArtifactDetails", + "outputArtifactDetails" + ], + "members":{ + "category":{"shape":"ActionCategory"}, + "provider":{"shape":"ActionProvider"}, + "version":{"shape":"Version"}, + "settings":{"shape":"ActionTypeSettings"}, + "configurationProperties":{"shape":"ActionConfigurationPropertyList"}, + "inputArtifactDetails":{"shape":"ArtifactDetails"}, + "outputArtifactDetails":{"shape":"ArtifactDetails"} + } + }, + "CreateCustomActionTypeOutput":{ + "type":"structure", + "required":["actionType"], + "members":{ + "actionType":{"shape":"ActionType"} + } + }, + "CreatePipelineInput":{ + "type":"structure", + "required":["pipeline"], + "members":{ + "pipeline":{"shape":"PipelineDeclaration"} + } + }, + "CreatePipelineOutput":{ + "type":"structure", + "members":{ + "pipeline":{"shape":"PipelineDeclaration"} + } + }, + "CurrentRevision":{ + "type":"structure", + "required":[ + "revision", + "changeIdentifier" + ], + "members":{ + "revision":{"shape":"Revision"}, + "changeIdentifier":{"shape":"RevisionChangeIdentifier"} + } + }, + "DeleteCustomActionTypeInput":{ + "type":"structure", + "required":[ + "category", + "provider", + "version" + ], + "members":{ + "category":{"shape":"ActionCategory"}, + "provider":{"shape":"ActionProvider"}, + "version":{"shape":"Version"} + } + }, + "DeletePipelineInput":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"PipelineName"} + } + }, + "Description":{ + "type":"string", + "max":2048, + "min":1 + }, + "DisableStageTransitionInput":{ + "type":"structure", + "required":[ + "pipelineName", + "stageName", + "transitionType", + "reason" + ], + "members":{ + "pipelineName":{"shape":"PipelineName"}, + "stageName":{"shape":"StageName"}, + "transitionType":{"shape":"StageTransitionType"}, + "reason":{"shape":"DisabledReason"} + } + }, + "DisabledReason":{ + "type":"string", + "max":300, + "min":1, + "pattern":"[a-zA-Z0-9!@ \\(\\)\\.\\*\\?\\-]+" + }, + "EnableStageTransitionInput":{ + "type":"structure", + "required":[ + "pipelineName", + "stageName", + "transitionType" + ], + "members":{ + "pipelineName":{"shape":"PipelineName"}, + "stageName":{"shape":"StageName"}, + "transitionType":{"shape":"StageTransitionType"} + } + }, + "Enabled":{"type":"boolean"}, + "EncryptionKey":{ + "type":"structure", + "required":[ + "id", + "type" + ], + "members":{ + "id":{"shape":"EncryptionKeyId"}, + "type":{"shape":"EncryptionKeyType"} + } + }, + "EncryptionKeyId":{ + "type":"string", + "max":100, + "min":1 + }, + "EncryptionKeyType":{ + "type":"string", + "enum":["KMS"] + }, + "ErrorDetails":{ + "type":"structure", + "members":{ + "code":{"shape":"Code"}, + "message":{"shape":"Message"} + } + }, + "ExecutionDetails":{ + "type":"structure", + "members":{ + "summary":{"shape":"ExecutionSummary"}, + "externalExecutionId":{"shape":"ExecutionId"}, + "percentComplete":{"shape":"Percentage"} + } + }, + "ExecutionId":{"type":"string"}, + "ExecutionSummary":{"type":"string"}, + "FailureDetails":{ + "type":"structure", + "required":[ + "type", + "message" + ], + "members":{ + "type":{"shape":"FailureType"}, + "message":{"shape":"Message"}, + "externalExecutionId":{"shape":"ExecutionId"} + } + }, + "FailureType":{ + "type":"string", + "enum":[ + "JobFailed", + "ConfigurationError", + "PermissionError", + "RevisionOutOfSync", + "RevisionUnavailable", + "SystemUnavailable" + ] + }, + "GetJobDetailsInput":{ + "type":"structure", + "required":["jobId"], + "members":{ + "jobId":{"shape":"JobId"} + } + }, + "GetJobDetailsOutput":{ + "type":"structure", + "members":{ + "jobDetails":{"shape":"JobDetails"} + } + }, + "GetPipelineInput":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"PipelineName"}, + "version":{"shape":"PipelineVersion"} + } + }, + "GetPipelineOutput":{ + "type":"structure", + "members":{ + "pipeline":{"shape":"PipelineDeclaration"} + } + }, + "GetPipelineStateInput":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"PipelineName"} + } + }, + "GetPipelineStateOutput":{ + "type":"structure", + "members":{ + "pipelineName":{"shape":"PipelineName"}, + "pipelineVersion":{"shape":"PipelineVersion"}, + "stageStates":{"shape":"StageStateList"}, + "created":{"shape":"Timestamp"}, + "updated":{"shape":"Timestamp"} + } + }, + "GetThirdPartyJobDetailsInput":{ + "type":"structure", + "required":[ + "jobId", + "clientToken" + ], + "members":{ + "jobId":{"shape":"ThirdPartyJobId"}, + "clientToken":{"shape":"ClientToken"} + } + }, + "GetThirdPartyJobDetailsOutput":{ + "type":"structure", + "members":{ + "jobDetails":{"shape":"ThirdPartyJobDetails"} + } + }, + "InputArtifact":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"ArtifactName"} + } + }, + "InputArtifactList":{ + "type":"list", + "member":{"shape":"InputArtifact"} + }, + "InvalidActionDeclarationException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidBlockerDeclarationException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidClientTokenException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidJobException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidJobStateException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidNextTokenException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidNonceException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidStageDeclarationException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidStructureException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Job":{ + "type":"structure", + "members":{ + "id":{"shape":"JobId"}, + "data":{"shape":"JobData"}, + "nonce":{"shape":"Nonce"}, + "accountId":{"shape":"AccountId"} + } + }, + "JobData":{ + "type":"structure", + "members":{ + "actionTypeId":{"shape":"ActionTypeId"}, + "actionConfiguration":{"shape":"ActionConfiguration"}, + "pipelineContext":{"shape":"PipelineContext"}, + "inputArtifacts":{"shape":"ArtifactList"}, + "outputArtifacts":{"shape":"ArtifactList"}, + "artifactCredentials":{"shape":"AWSSessionCredentials"}, + "continuationToken":{"shape":"ContinuationToken"}, + "encryptionKey":{"shape":"EncryptionKey"} + } + }, + "JobDetails":{ + "type":"structure", + "members":{ + "id":{"shape":"JobId"}, + "data":{"shape":"JobData"}, + "accountId":{"shape":"AccountId"} + } + }, + "JobId":{ + "type":"string", + "pattern":"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" + }, + "JobList":{ + "type":"list", + "member":{"shape":"Job"} + }, + "JobNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "JobStatus":{ + "type":"string", + "enum":[ + "Created", + "Queued", + "Dispatched", + "InProgress", + "TimedOut", + "Succeeded", + "Failed" + ] + }, + "LastChangedAt":{"type":"timestamp"}, + "LastChangedBy":{"type":"string"}, + "LimitExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ListActionTypesInput":{ + "type":"structure", + "members":{ + "actionOwnerFilter":{"shape":"ActionOwner"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListActionTypesOutput":{ + "type":"structure", + "required":["actionTypes"], + "members":{ + "actionTypes":{"shape":"ActionTypeList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListPipelinesInput":{ + "type":"structure", + "members":{ + "nextToken":{"shape":"NextToken"} + } + }, + "ListPipelinesOutput":{ + "type":"structure", + "members":{ + "pipelines":{"shape":"PipelineList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "MaxBatchSize":{ + "type":"integer", + "min":1 + }, + "MaximumArtifactCount":{ + "type":"integer", + "max":5, + "min":0 + }, + "Message":{"type":"string"}, + "MinimumArtifactCount":{ + "type":"integer", + "max":5, + "min":0 + }, + "NextToken":{"type":"string"}, + "Nonce":{"type":"string"}, + "OutputArtifact":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"ArtifactName"} + } + }, + "OutputArtifactList":{ + "type":"list", + "member":{"shape":"OutputArtifact"} + }, + "Percentage":{ + "type":"integer", + "max":100, + "min":0 + }, + "PipelineContext":{ + "type":"structure", + "members":{ + "pipelineName":{"shape":"PipelineName"}, + "stage":{"shape":"StageContext"}, + "action":{"shape":"ActionContext"} + } + }, + "PipelineDeclaration":{ + "type":"structure", + "required":[ + "name", + "roleArn", + "artifactStore", + "stages" + ], + "members":{ + "name":{"shape":"PipelineName"}, + "roleArn":{"shape":"RoleArn"}, + "artifactStore":{"shape":"ArtifactStore"}, + "stages":{"shape":"PipelineStageDeclarationList"}, + "version":{"shape":"PipelineVersion"} + } + }, + "PipelineExecutionId":{ + "type":"string", + "pattern":"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" + }, + "PipelineList":{ + "type":"list", + "member":{"shape":"PipelineSummary"} + }, + "PipelineName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[A-Za-z0-9.@\\-_]+" + }, + "PipelineNameInUseException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "PipelineNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "PipelineStageDeclarationList":{ + "type":"list", + "member":{"shape":"StageDeclaration"} + }, + "PipelineSummary":{ + "type":"structure", + "members":{ + "name":{"shape":"PipelineName"}, + "version":{"shape":"PipelineVersion"}, + "created":{"shape":"Timestamp"}, + "updated":{"shape":"Timestamp"} + } + }, + "PipelineVersion":{ + "type":"integer", + "min":1 + }, + "PipelineVersionNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "PollForJobsInput":{ + "type":"structure", + "required":["actionTypeId"], + "members":{ + "actionTypeId":{"shape":"ActionTypeId"}, + "maxBatchSize":{"shape":"MaxBatchSize"}, + "queryParam":{"shape":"QueryParamMap"} + } + }, + "PollForJobsOutput":{ + "type":"structure", + "members":{ + "jobs":{"shape":"JobList"} + } + }, + "PollForThirdPartyJobsInput":{ + "type":"structure", + "required":["actionTypeId"], + "members":{ + "actionTypeId":{"shape":"ActionTypeId"}, + "maxBatchSize":{"shape":"MaxBatchSize"} + } + }, + "PollForThirdPartyJobsOutput":{ + "type":"structure", + "members":{ + "jobs":{"shape":"ThirdPartyJobList"} + } + }, + "PutActionRevisionInput":{ + "type":"structure", + "required":[ + "pipelineName", + "stageName", + "actionName", + "actionRevision" + ], + "members":{ + "pipelineName":{"shape":"PipelineName"}, + "stageName":{"shape":"StageName"}, + "actionName":{"shape":"ActionName"}, + "actionRevision":{"shape":"ActionRevision"} + } + }, + "PutActionRevisionOutput":{ + "type":"structure", + "members":{ + "newRevision":{"shape":"Boolean"}, + "pipelineExecutionId":{"shape":"PipelineExecutionId"} + } + }, + "PutJobFailureResultInput":{ + "type":"structure", + "required":[ + "jobId", + "failureDetails" + ], + "members":{ + "jobId":{"shape":"JobId"}, + "failureDetails":{"shape":"FailureDetails"} + } + }, + "PutJobSuccessResultInput":{ + "type":"structure", + "required":["jobId"], + "members":{ + "jobId":{"shape":"JobId"}, + "currentRevision":{"shape":"CurrentRevision"}, + "continuationToken":{"shape":"ContinuationToken"}, + "executionDetails":{"shape":"ExecutionDetails"} + } + }, + "PutThirdPartyJobFailureResultInput":{ + "type":"structure", + "required":[ + "jobId", + "clientToken", + "failureDetails" + ], + "members":{ + "jobId":{"shape":"ThirdPartyJobId"}, + "clientToken":{"shape":"ClientToken"}, + "failureDetails":{"shape":"FailureDetails"} + } + }, + "PutThirdPartyJobSuccessResultInput":{ + "type":"structure", + "required":[ + "jobId", + "clientToken" + ], + "members":{ + "jobId":{"shape":"ThirdPartyJobId"}, + "clientToken":{"shape":"ClientToken"}, + "currentRevision":{"shape":"CurrentRevision"}, + "continuationToken":{"shape":"ContinuationToken"}, + "executionDetails":{"shape":"ExecutionDetails"} + } + }, + "QueryParamMap":{ + "type":"map", + "key":{"shape":"ActionConfigurationKey"}, + "value":{"shape":"ActionConfigurationQueryableValue"}, + "max":1, + "min":0 + }, + "Revision":{"type":"string"}, + "RevisionChangeId":{"type":"string"}, + "RevisionChangeIdentifier":{"type":"string"}, + "RevisionId":{"type":"string"}, + "RoleArn":{ + "type":"string", + "pattern":"arn:[^:]+:iam::[0-9]{12}:role/.*" + }, + "S3ArtifactLocation":{ + "type":"structure", + "required":[ + "bucketName", + "objectKey" + ], + "members":{ + "bucketName":{"shape":"S3BucketName"}, + "objectKey":{"shape":"S3ObjectKey"} + } + }, + "S3BucketName":{"type":"string"}, + "S3ObjectKey":{"type":"string"}, + "SecretAccessKey":{"type":"string"}, + "SessionToken":{"type":"string"}, + "StageActionDeclarationList":{ + "type":"list", + "member":{"shape":"ActionDeclaration"} + }, + "StageBlockerDeclarationList":{ + "type":"list", + "member":{"shape":"BlockerDeclaration"} + }, + "StageContext":{ + "type":"structure", + "members":{ + "name":{"shape":"StageName"} + } + }, + "StageDeclaration":{ + "type":"structure", + "required":[ + "name", + "actions" + ], + "members":{ + "name":{"shape":"StageName"}, + "blockers":{"shape":"StageBlockerDeclarationList"}, + "actions":{"shape":"StageActionDeclarationList"} + } + }, + "StageName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[A-Za-z0-9.@\\-_]+" + }, + "StageNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "StageState":{ + "type":"structure", + "members":{ + "stageName":{"shape":"StageName"}, + "inboundTransitionState":{"shape":"TransitionState"}, + "actionStates":{"shape":"ActionStateList"} + } + }, + "StageStateList":{ + "type":"list", + "member":{"shape":"StageState"} + }, + "StageTransitionType":{ + "type":"string", + "enum":[ + "Inbound", + "Outbound" + ] + }, + "StartPipelineExecutionInput":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"PipelineName"} + } + }, + "StartPipelineExecutionOutput":{ + "type":"structure", + "members":{ + "pipelineExecutionId":{"shape":"PipelineExecutionId"} + } + }, + "ThirdPartyJob":{ + "type":"structure", + "members":{ + "clientId":{"shape":"ClientId"}, + "jobId":{"shape":"JobId"} + } + }, + "ThirdPartyJobData":{ + "type":"structure", + "members":{ + "actionTypeId":{"shape":"ActionTypeId"}, + "actionConfiguration":{"shape":"ActionConfiguration"}, + "pipelineContext":{"shape":"PipelineContext"}, + "inputArtifacts":{"shape":"ArtifactList"}, + "outputArtifacts":{"shape":"ArtifactList"}, + "artifactCredentials":{"shape":"AWSSessionCredentials"}, + "continuationToken":{"shape":"ContinuationToken"}, + "encryptionKey":{"shape":"EncryptionKey"} + } + }, + "ThirdPartyJobDetails":{ + "type":"structure", + "members":{ + "id":{"shape":"ThirdPartyJobId"}, + "data":{"shape":"ThirdPartyJobData"}, + "nonce":{"shape":"Nonce"} + } + }, + "ThirdPartyJobId":{ + "type":"string", + "max":512, + "min":1 + }, + "ThirdPartyJobList":{ + "type":"list", + "member":{"shape":"ThirdPartyJob"} + }, + "Timestamp":{"type":"timestamp"}, + "TransitionState":{ + "type":"structure", + "members":{ + "enabled":{"shape":"Enabled"}, + "lastChangedBy":{"shape":"LastChangedBy"}, + "lastChangedAt":{"shape":"LastChangedAt"}, + "disabledReason":{"shape":"DisabledReason"} + } + }, + "UpdatePipelineInput":{ + "type":"structure", + "required":["pipeline"], + "members":{ + "pipeline":{"shape":"PipelineDeclaration"} + } + }, + "UpdatePipelineOutput":{ + "type":"structure", + "members":{ + "pipeline":{"shape":"PipelineDeclaration"} + } + }, + "Url":{ + "type":"string", + "max":2048, + "min":1 + }, + "UrlTemplate":{ + "type":"string", + "max":2048, + "min":1 + }, + "ValidationException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Version":{ + "type":"string", + "max":9, + "min":1, + "pattern":"[0-9A-Za-z_-]+" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codepipeline/2015-07-09/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codepipeline/2015-07-09/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codepipeline/2015-07-09/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codepipeline/2015-07-09/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1099 @@ +{ + "version": "2.0", + "service": "AWS CodePipeline Overview

    This is the AWS CodePipeline API Reference. This guide provides descriptions of the actions and data types for AWS CodePipeline. Some functionality for your pipeline is only configurable through the API. For additional information, see the AWS CodePipeline User Guide.

    You can use the AWS CodePipeline API to work with pipelines, stages, actions, gates, and transitions, as described below.

    Pipelines are models of automated release processes. Each pipeline is uniquely named, and consists of actions, gates, and stages.

    You can work with pipelines by calling:

    • CreatePipeline, which creates a uniquely-named pipeline.
    • DeletePipeline, which deletes the specified pipeline.
    • GetPipeline, which returns information about a pipeline structure.
    • GetPipelineState, which returns information about the current state of the stages and actions of a pipeline.
    • ListPipelines, which gets a summary of all of the pipelines associated with your account.
    • StartPipelineExecution, which runs the the most recent revision of an artifact through the pipeline.
    • UpdatePipeline, which updates a pipeline with edits or changes to the structure of the pipeline.

    Pipelines include stages, which are which are logical groupings of gates and actions. Each stage contains one or more actions that must complete before the next stage begins. A stage will result in success or failure. If a stage fails, then the pipeline stops at that stage and will remain stopped until either a new version of an artifact appears in the source location, or a user takes action to re-run the most recent artifact through the pipeline. You can call GetPipelineState, which displays the status of a pipeline, including the status of stages in the pipeline, or GetPipeline, which returns the entire structure of the pipeline, including the stages of that pipeline. For more information about the structure of stages and actions, also refer to the AWS CodePipeline Pipeline Structure Reference.

    Pipeline stages include actions, which are categorized into categories such as source or build actions performed within a stage of a pipeline. For example, you can use a source action to import artifacts into a pipeline from a source such as Amazon S3. Like stages, you do not work with actions directly in most cases, but you do define and interact with actions when working with pipeline operations such as CreatePipeline and GetPipelineState.

    Pipelines also include transitions, which allow the transition of artifacts from one stage to the next in a pipeline after the actions in one stage complete.

    You can work with transitions by calling:

    Using the API to integrate with AWS CodePipeline

    For third-party integrators or developers who want to create their own integrations with AWS CodePipeline, the expected sequence varies from the standard API user. In order to integrate with AWS CodePipeline, developers will need to work with the following items:

    • Jobs, which are instances of an action. For example, a job for a source action might import a revision of an artifact from a source.

      You can work with jobs by calling:

    • Third party jobs, which are instances of an action created by a partner action and integrated into AWS CodePipeline. Partner actions are created by members of the AWS Partner Network.

      You can work with third party jobs by calling:

    ", + "operations": { + "AcknowledgeJob": "

    Returns information about a specified job and whether that job has been received by the job worker. Only used for custom actions.

    ", + "AcknowledgeThirdPartyJob": "

    Confirms a job worker has received the specified job. Only used for partner actions.

    ", + "CreateCustomActionType": "

    Creates a new custom action that can be used in all pipelines associated with the AWS account. Only used for custom actions.

    ", + "CreatePipeline": "

    Creates a pipeline.

    ", + "DeleteCustomActionType": "

    Marks a custom action as deleted. PollForJobs for the custom action will fail after the action is marked for deletion. Only used for custom actions.

    You cannot recreate a custom action after it has been deleted unless you increase the version number of the action.

    ", + "DeletePipeline": "

    Deletes the specified pipeline.

    ", + "DisableStageTransition": "

    Prevents artifacts in a pipeline from transitioning to the next stage in the pipeline.

    ", + "EnableStageTransition": "

    Enables artifacts in a pipeline to transition to a stage in a pipeline.

    ", + "GetJobDetails": "

    Returns information about a job. Only used for custom actions.

    When this API is called, AWS CodePipeline returns temporary credentials for the Amazon S3 bucket used to store artifacts for the pipeline, if the action requires access to that Amazon S3 bucket for input or output artifacts. Additionally, this API returns any secret values defined for the action.

    ", + "GetPipeline": "

    Returns the metadata, structure, stages, and actions of a pipeline. Can be used to return the entire structure of a pipeline in JSON format, which can then be modified and used to update the pipeline structure with UpdatePipeline.

    ", + "GetPipelineState": "

    Returns information about the state of a pipeline, including the stages, actions, and details about the last run of the pipeline.

    ", + "GetThirdPartyJobDetails": "

    Requests the details of a job for a third party action. Only used for partner actions.

    When this API is called, AWS CodePipeline returns temporary credentials for the Amazon S3 bucket used to store artifacts for the pipeline, if the action requires access to that Amazon S3 bucket for input or output artifacts. Additionally, this API returns any secret values defined for the action.

    ", + "ListActionTypes": "

    Gets a summary of all AWS CodePipeline action types associated with your account.

    ", + "ListPipelines": "

    Gets a summary of all of the pipelines associated with your account.

    ", + "PollForJobs": "

    Returns information about any jobs for AWS CodePipeline to act upon.

    When this API is called, AWS CodePipeline returns temporary credentials for the Amazon S3 bucket used to store artifacts for the pipeline, if the action requires access to that Amazon S3 bucket for input or output artifacts. Additionally, this API returns any secret values defined for the action.

    ", + "PollForThirdPartyJobs": "

    Determines whether there are any third party jobs for a job worker to act on. Only used for partner actions.

    When this API is called, AWS CodePipeline returns temporary credentials for the Amazon S3 bucket used to store artifacts for the pipeline, if the action requires access to that Amazon S3 bucket for input or output artifacts.

    ", + "PutActionRevision": "

    Provides information to AWS CodePipeline about new revisions to a source.

    ", + "PutJobFailureResult": "

    Represents the failure of a job as returned to the pipeline by a job worker. Only used for custom actions.

    ", + "PutJobSuccessResult": "

    Represents the success of a job as returned to the pipeline by a job worker. Only used for custom actions.

    ", + "PutThirdPartyJobFailureResult": "

    Represents the failure of a third party job as returned to the pipeline by a job worker. Only used for partner actions.

    ", + "PutThirdPartyJobSuccessResult": "

    Represents the success of a third party job as returned to the pipeline by a job worker. Only used for partner actions.

    ", + "StartPipelineExecution": "

    Starts the specified pipeline. Specifically, it begins processing the latest commit to the source location specified as part of the pipeline.

    ", + "UpdatePipeline": "

    Updates a specified pipeline with edits or changes to its structure. Use a JSON file with the pipeline structure in conjunction with UpdatePipeline to provide the full structure of the pipeline. Updating the pipeline increases the version number of the pipeline by 1.

    " + }, + "shapes": { + "AWSSessionCredentials": { + "base": "

    Represents an AWS session credentials object. These credentials are temporary credentials that are issued by AWS Secure Token Service (STS). They can be used to access input and output artifacts in the Amazon S3 bucket used to store artifact for the pipeline in AWS CodePipeline.

    ", + "refs": { + "JobData$artifactCredentials": null, + "ThirdPartyJobData$artifactCredentials": null + } + }, + "AccessKeyId": { + "base": null, + "refs": { + "AWSSessionCredentials$accessKeyId": "

    The access key for the session.

    " + } + }, + "AccountId": { + "base": null, + "refs": { + "Job$accountId": "

    The ID of the AWS account to use when performing the job.

    ", + "JobDetails$accountId": "

    The AWS account ID associated with the job.

    " + } + }, + "AcknowledgeJobInput": { + "base": "

    Represents the input of an acknowledge job action.

    ", + "refs": { + } + }, + "AcknowledgeJobOutput": { + "base": "

    Represents the output of an acknowledge job action.

    ", + "refs": { + } + }, + "AcknowledgeThirdPartyJobInput": { + "base": "

    Represents the input of an acknowledge third party job action.

    ", + "refs": { + } + }, + "AcknowledgeThirdPartyJobOutput": { + "base": "

    Represents the output of an acknowledge third party job action.

    ", + "refs": { + } + }, + "ActionCategory": { + "base": null, + "refs": { + "ActionTypeId$category": "

    A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the values below.

    ", + "CreateCustomActionTypeInput$category": "

    The category of the custom action, such as a source action or a build action.

    ", + "DeleteCustomActionTypeInput$category": "

    The category of the custom action that you want to delete, such as source or deploy.

    " + } + }, + "ActionConfiguration": { + "base": "

    Represents information about an action configuration.

    ", + "refs": { + "JobData$actionConfiguration": null, + "ThirdPartyJobData$actionConfiguration": null + } + }, + "ActionConfigurationKey": { + "base": null, + "refs": { + "ActionConfigurationMap$key": null, + "ActionConfigurationProperty$name": "

    The name of the action configuration property.

    ", + "QueryParamMap$key": null + } + }, + "ActionConfigurationMap": { + "base": null, + "refs": { + "ActionConfiguration$configuration": "

    The configuration data for the action.

    ", + "ActionDeclaration$configuration": "

    The action declaration's configuration.

    " + } + }, + "ActionConfigurationProperty": { + "base": "

    Represents information about an action configuration property.

    ", + "refs": { + "ActionConfigurationPropertyList$member": null + } + }, + "ActionConfigurationPropertyList": { + "base": null, + "refs": { + "ActionType$actionConfigurationProperties": "

    The configuration properties for the action type.

    ", + "CreateCustomActionTypeInput$configurationProperties": "

    The configuration properties for the custom action.

    " + } + }, + "ActionConfigurationPropertyType": { + "base": null, + "refs": { + "ActionConfigurationProperty$type": "

    The type of the configuration property.

    " + } + }, + "ActionConfigurationQueryableValue": { + "base": null, + "refs": { + "QueryParamMap$value": null + } + }, + "ActionConfigurationValue": { + "base": null, + "refs": { + "ActionConfigurationMap$value": null + } + }, + "ActionContext": { + "base": "

    Represents the context of an action within the stage of a pipeline to a job worker.

    ", + "refs": { + "PipelineContext$action": null + } + }, + "ActionDeclaration": { + "base": "

    Represents information about an action declaration.

    ", + "refs": { + "StageActionDeclarationList$member": null + } + }, + "ActionExecution": { + "base": "

    Represents information about how an action runs.

    ", + "refs": { + "ActionState$latestExecution": null + } + }, + "ActionExecutionStatus": { + "base": null, + "refs": { + "ActionExecution$status": "

    The status of the action, or for a completed action, the last status of the action.

    " + } + }, + "ActionName": { + "base": null, + "refs": { + "ActionContext$name": "

    The name of the action within the context of a job.

    ", + "ActionDeclaration$name": "

    The action declaration's name.

    ", + "ActionState$actionName": "

    The name of the action.

    ", + "PutActionRevisionInput$actionName": "

    The name of the action that will process the revision.

    " + } + }, + "ActionNotFoundException": { + "base": "

    The specified action cannot be found.

    ", + "refs": { + } + }, + "ActionOwner": { + "base": null, + "refs": { + "ActionTypeId$owner": "

    The creator of the action being called.

    ", + "ListActionTypesInput$actionOwnerFilter": "

    Filters the list of action types to those created by a specified entity.

    " + } + }, + "ActionProvider": { + "base": null, + "refs": { + "ActionTypeId$provider": "

    The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy.

    ", + "CreateCustomActionTypeInput$provider": "

    The provider of the service used in the custom action, such as AWS CodeDeploy.

    ", + "DeleteCustomActionTypeInput$provider": "

    The provider of the service used in the custom action, such as AWS CodeDeploy.

    " + } + }, + "ActionRevision": { + "base": "

    Represents information about the version (or revision) of an action.

    ", + "refs": { + "ActionState$currentRevision": null, + "PutActionRevisionInput$actionRevision": null + } + }, + "ActionRunOrder": { + "base": null, + "refs": { + "ActionDeclaration$runOrder": "

    The order in which actions are run.

    " + } + }, + "ActionState": { + "base": "

    Represents information about the state of an action.

    ", + "refs": { + "ActionStateList$member": null + } + }, + "ActionStateList": { + "base": null, + "refs": { + "StageState$actionStates": "

    The state of the stage.

    " + } + }, + "ActionType": { + "base": "

    Returns information about the details of an action type.

    ", + "refs": { + "ActionTypeList$member": null, + "CreateCustomActionTypeOutput$actionType": null + } + }, + "ActionTypeId": { + "base": "

    Represents information about an action type.

    ", + "refs": { + "ActionDeclaration$actionTypeId": "

    The configuration information for the action type.

    ", + "ActionType$id": null, + "JobData$actionTypeId": null, + "PollForJobsInput$actionTypeId": null, + "PollForThirdPartyJobsInput$actionTypeId": null, + "ThirdPartyJobData$actionTypeId": null + } + }, + "ActionTypeList": { + "base": null, + "refs": { + "ListActionTypesOutput$actionTypes": "

    Provides details of the action types.

    " + } + }, + "ActionTypeNotFoundException": { + "base": "

    The specified action type cannot be found.

    ", + "refs": { + } + }, + "ActionTypeSettings": { + "base": "

    Returns information about the settings for an action type.

    ", + "refs": { + "ActionType$settings": "

    The settings for the action type.

    ", + "CreateCustomActionTypeInput$settings": null + } + }, + "Artifact": { + "base": "

    Represents information about an artifact that will be worked upon by actions in the pipeline.

    ", + "refs": { + "ArtifactList$member": null + } + }, + "ArtifactDetails": { + "base": "

    Returns information about the details of an artifact.

    ", + "refs": { + "ActionType$inputArtifactDetails": "

    The details of the input artifact for the action, such as its commit ID.

    ", + "ActionType$outputArtifactDetails": "

    The details of the output artifact of the action, such as its commit ID.

    ", + "CreateCustomActionTypeInput$inputArtifactDetails": null, + "CreateCustomActionTypeInput$outputArtifactDetails": null + } + }, + "ArtifactList": { + "base": null, + "refs": { + "JobData$inputArtifacts": "

    The artifact supplied to the job.

    ", + "JobData$outputArtifacts": "

    The output of the job.

    ", + "ThirdPartyJobData$inputArtifacts": "

    The name of the artifact that will be worked upon by the action, if any. This name might be system-generated, such as \"MyApp\", or might be defined by the user when the action is created. The input artifact name must match the name of an output artifact generated by an action in an earlier action or stage of the pipeline.

    ", + "ThirdPartyJobData$outputArtifacts": "

    The name of the artifact that will be the result of the action, if any. This name might be system-generated, such as \"MyBuiltApp\", or might be defined by the user when the action is created.

    " + } + }, + "ArtifactLocation": { + "base": "

    Represents information about the location of an artifact.

    ", + "refs": { + "Artifact$location": "

    The location of an artifact.

    " + } + }, + "ArtifactLocationType": { + "base": null, + "refs": { + "ArtifactLocation$type": "

    The type of artifact in the location.

    " + } + }, + "ArtifactName": { + "base": null, + "refs": { + "Artifact$name": "

    The artifact's name.

    ", + "InputArtifact$name": "

    The name of the artifact to be worked on, for example, \"My App\".

    The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.

    ", + "OutputArtifact$name": "

    The name of the output of an artifact, such as \"My App\".

    The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.

    Output artifact names must be unique within a pipeline.

    " + } + }, + "ArtifactStore": { + "base": "

    The Amazon S3 location where artifacts are stored for the pipeline. If this Amazon S3 bucket is created manually, it must meet the requirements for AWS CodePipeline. For more information, see the Concepts.

    ", + "refs": { + "PipelineDeclaration$artifactStore": null + } + }, + "ArtifactStoreLocation": { + "base": null, + "refs": { + "ArtifactStore$location": "

    The location for storing the artifacts for a pipeline, such as an S3 bucket or folder.

    " + } + }, + "ArtifactStoreType": { + "base": null, + "refs": { + "ArtifactStore$type": "

    The type of the artifact store, such as S3.

    " + } + }, + "BlockerDeclaration": { + "base": "

    Represents information about a gate declaration.

    ", + "refs": { + "StageBlockerDeclarationList$member": null + } + }, + "BlockerName": { + "base": null, + "refs": { + "BlockerDeclaration$name": "

    The name of the gate declaration.

    " + } + }, + "BlockerType": { + "base": null, + "refs": { + "BlockerDeclaration$type": "

    The type of the gate declaration.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "ActionConfigurationProperty$required": "

    Whether the configuration property is a required value.

    ", + "ActionConfigurationProperty$key": "

    Whether the configuration property is a key.

    ", + "ActionConfigurationProperty$secret": "

    Whether the configuration property is secret. Secrets are hidden from all calls except for GetJobDetails, GetThirdPartyJobDetails, PollForJobs, and PollForThirdPartyJobs.

    When updating a pipeline, passing * * * * * without changing any other values of the action will preserve the prior value of the secret.

    ", + "ActionConfigurationProperty$queryable": "

    Indicates that the proprety will be used in conjunction with PollForJobs. When creating a custom action, an action can have up to one queryable property. If it has one, that property must be both required and not secret.

    If you create a pipeline with a custom action type, and that custom action contains a queryable property, the value for that configuration property is subject to additional restrictions. The value must be less than or equal to twenty (20) characters. The value can contain only alphanumeric characters, underscores, and hyphens.

    ", + "PutActionRevisionOutput$newRevision": "

    The new revision number or ID for the revision after the action completes.

    " + } + }, + "ClientId": { + "base": null, + "refs": { + "ThirdPartyJob$clientId": "

    The clientToken portion of the clientId and clientToken pair used to verify that the calling entity is allowed access to the job and its details.

    " + } + }, + "ClientToken": { + "base": null, + "refs": { + "AcknowledgeThirdPartyJobInput$clientToken": "

    The clientToken portion of the clientId and clientToken pair used to verify that the calling entity is allowed access to the job and its details.

    ", + "GetThirdPartyJobDetailsInput$clientToken": "

    The clientToken portion of the clientId and clientToken pair used to verify that the calling entity is allowed access to the job and its details.

    ", + "PutThirdPartyJobFailureResultInput$clientToken": "

    The clientToken portion of the clientId and clientToken pair used to verify that the calling entity is allowed access to the job and its details.

    ", + "PutThirdPartyJobSuccessResultInput$clientToken": "

    The clientToken portion of the clientId and clientToken pair used to verify that the calling entity is allowed access to the job and its details.

    " + } + }, + "Code": { + "base": null, + "refs": { + "ErrorDetails$code": "

    The system ID or error number code of the error.

    " + } + }, + "ContinuationToken": { + "base": null, + "refs": { + "JobData$continuationToken": "

    A system-generated token, such as a AWS CodeDeploy deployment ID, that a job requires in order to continue the job asynchronously.

    ", + "PutJobSuccessResultInput$continuationToken": "

    A system-generated token, such as a AWS CodeDeploy deployment ID, that the successful job used to complete a job asynchronously.

    ", + "PutThirdPartyJobSuccessResultInput$continuationToken": "

    A system-generated token, such as a AWS CodeDeploy deployment ID, that a job uses in order to continue the job asynchronously.

    ", + "ThirdPartyJobData$continuationToken": "

    A system-generated token, such as a AWS CodeDeploy deployment ID, that a job requires in order to continue the job asynchronously.

    " + } + }, + "CreateCustomActionTypeInput": { + "base": "

    Represents the input of a create custom action operation.

    ", + "refs": { + } + }, + "CreateCustomActionTypeOutput": { + "base": "

    Represents the output of a create custom action operation.

    ", + "refs": { + } + }, + "CreatePipelineInput": { + "base": "

    Represents the input of a create pipeline action.

    ", + "refs": { + } + }, + "CreatePipelineOutput": { + "base": "

    Represents the output of a create pipeline action.

    ", + "refs": { + } + }, + "CurrentRevision": { + "base": "

    Represents information about a current revision.

    ", + "refs": { + "PutJobSuccessResultInput$currentRevision": "

    The ID of the current revision of the artifact successfully worked upon by the job.

    ", + "PutThirdPartyJobSuccessResultInput$currentRevision": null + } + }, + "DeleteCustomActionTypeInput": { + "base": "

    Represents the input of a delete custom action operation. The custom action will be marked as deleted.

    ", + "refs": { + } + }, + "DeletePipelineInput": { + "base": "

    Represents the input of a delete pipeline action.

    ", + "refs": { + } + }, + "Description": { + "base": null, + "refs": { + "ActionConfigurationProperty$description": "

    The description of the action configuration property that will be displayed to users.

    " + } + }, + "DisableStageTransitionInput": { + "base": "

    Represents the input of a disable stage transition input action.

    ", + "refs": { + } + }, + "DisabledReason": { + "base": null, + "refs": { + "DisableStageTransitionInput$reason": "

    The reason given to the user why a stage is disabled, such as waiting for manual approval or manual tests. This message is displayed in the pipeline console UI.

    ", + "TransitionState$disabledReason": "

    The user-specified reason why the transition between two stages of a pipeline was disabled.

    " + } + }, + "EnableStageTransitionInput": { + "base": "

    Represents the input of an enable stage transition action.

    ", + "refs": { + } + }, + "Enabled": { + "base": null, + "refs": { + "TransitionState$enabled": "

    Whether the transition between stages is enabled (true) or disabled (false).

    " + } + }, + "EncryptionKey": { + "base": "

    Represents information about the AWS Key Management Service (AWS KMS) key used to encrypt data in the artifact store.

    ", + "refs": { + "ArtifactStore$encryptionKey": "

    The AWS Key Management Service (AWS KMS) key used to encrypt the data in the artifact store. If this is undefined, the default key for Amazon S3 is used.

    ", + "JobData$encryptionKey": null, + "ThirdPartyJobData$encryptionKey": "

    The AWS Key Management Service (AWS KMS) key used to encrypt and decrypt data in the artifact store for the pipeline.

    " + } + }, + "EncryptionKeyId": { + "base": null, + "refs": { + "EncryptionKey$id": "

    The ID of the AWS KMS key.

    " + } + }, + "EncryptionKeyType": { + "base": null, + "refs": { + "EncryptionKey$type": "

    The type of AWS KMS key, such as a customer master key.

    " + } + }, + "ErrorDetails": { + "base": "

    Represents information about an error in AWS CodePipeline.

    ", + "refs": { + "ActionExecution$errorDetails": "

    The details of an error returned by a URL external to AWS.

    " + } + }, + "ExecutionDetails": { + "base": "

    The details of the actions taken and results produced on an artifact as it passes through stages in the pipeline.

    ", + "refs": { + "PutJobSuccessResultInput$executionDetails": "

    The execution details of the successful job, such as the actions taken by the job worker.

    ", + "PutThirdPartyJobSuccessResultInput$executionDetails": null + } + }, + "ExecutionId": { + "base": null, + "refs": { + "ActionExecution$externalExecutionId": "

    The external ID of the run of the action.

    ", + "ExecutionDetails$externalExecutionId": "

    The system-generated unique ID of this action used to identify this job worker in any external systems, such as AWS CodeDeploy.

    ", + "FailureDetails$externalExecutionId": "

    The external ID of the run of the action that failed.

    " + } + }, + "ExecutionSummary": { + "base": null, + "refs": { + "ActionExecution$summary": "

    A summary of the run of the action.

    ", + "ExecutionDetails$summary": "

    The summary of the current status of the actions.

    " + } + }, + "FailureDetails": { + "base": "

    Represents information about failure details.

    ", + "refs": { + "PutJobFailureResultInput$failureDetails": "

    The details about the failure of a job.

    ", + "PutThirdPartyJobFailureResultInput$failureDetails": null + } + }, + "FailureType": { + "base": null, + "refs": { + "FailureDetails$type": "

    The type of the failure.

    " + } + }, + "GetJobDetailsInput": { + "base": "

    Represents the input of a get job details action.

    ", + "refs": { + } + }, + "GetJobDetailsOutput": { + "base": "

    Represents the output of a get job details action.

    ", + "refs": { + } + }, + "GetPipelineInput": { + "base": "

    Represents the input of a get pipeline action.

    ", + "refs": { + } + }, + "GetPipelineOutput": { + "base": "

    Represents the output of a get pipeline action.

    ", + "refs": { + } + }, + "GetPipelineStateInput": { + "base": "

    Represents the input of a get pipeline state action.

    ", + "refs": { + } + }, + "GetPipelineStateOutput": { + "base": "

    Represents the output of a get pipeline state action.

    ", + "refs": { + } + }, + "GetThirdPartyJobDetailsInput": { + "base": "

    Represents the input of a get third party job details action.

    ", + "refs": { + } + }, + "GetThirdPartyJobDetailsOutput": { + "base": "

    Represents the output of a get third party job details action.

    ", + "refs": { + } + }, + "InputArtifact": { + "base": "

    Represents information about an artifact to be worked on, such as a test or build artifact.

    ", + "refs": { + "InputArtifactList$member": null + } + }, + "InputArtifactList": { + "base": null, + "refs": { + "ActionDeclaration$inputArtifacts": "

    The name or ID of the artifact consumed by the action, such as a test or build artifact.

    " + } + }, + "InvalidActionDeclarationException": { + "base": "

    The specified action declaration was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidBlockerDeclarationException": { + "base": "

    The specified gate declaration was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidClientTokenException": { + "base": "

    The client token was specified in an invalid format

    ", + "refs": { + } + }, + "InvalidJobException": { + "base": "

    The specified job was specified in an invalid format or cannot be found.

    ", + "refs": { + } + }, + "InvalidJobStateException": { + "base": "

    The specified job state was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidNextTokenException": { + "base": "

    The next token was specified in an invalid format. Make sure that the next token you provided is the token returned by a previous call.

    ", + "refs": { + } + }, + "InvalidNonceException": { + "base": "

    The specified nonce was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidStageDeclarationException": { + "base": "

    The specified stage declaration was specified in an invalid format.

    ", + "refs": { + } + }, + "InvalidStructureException": { + "base": "

    The specified structure was specified in an invalid format.

    ", + "refs": { + } + }, + "Job": { + "base": "

    Represents information about a job.

    ", + "refs": { + "JobList$member": null + } + }, + "JobData": { + "base": "

    Represents additional information about a job required for a job worker to complete the job.

    ", + "refs": { + "Job$data": "

    Additional data about a job.

    ", + "JobDetails$data": null + } + }, + "JobDetails": { + "base": "

    Represents information about the details of a job.

    ", + "refs": { + "GetJobDetailsOutput$jobDetails": "

    The details of the job.

    If AWSSessionCredentials is used, a long-running job can call GetJobDetails again to obtain new credentials.

    " + } + }, + "JobId": { + "base": null, + "refs": { + "AcknowledgeJobInput$jobId": "

    The unique system-generated ID of the job for which you want to confirm receipt.

    ", + "GetJobDetailsInput$jobId": "

    The unique system-generated ID for the job.

    ", + "Job$id": "

    The unique system-generated ID of the job.

    ", + "JobDetails$id": "

    The unique system-generated ID of the job.

    ", + "PutJobFailureResultInput$jobId": "

    The unique system-generated ID of the job that failed. This is the same ID returned from PollForJobs.

    ", + "PutJobSuccessResultInput$jobId": "

    The unique system-generated ID of the job that succeeded. This is the same ID returned from PollForJobs.

    ", + "ThirdPartyJob$jobId": "

    The identifier used to identify the job in AWS CodePipeline.

    " + } + }, + "JobList": { + "base": null, + "refs": { + "PollForJobsOutput$jobs": "

    Information about the jobs to take action on.

    " + } + }, + "JobNotFoundException": { + "base": "

    The specified job was specified in an invalid format or cannot be found.

    ", + "refs": { + } + }, + "JobStatus": { + "base": null, + "refs": { + "AcknowledgeJobOutput$status": "

    Whether the job worker has received the specified job.

    ", + "AcknowledgeThirdPartyJobOutput$status": "

    The status information for the third party job, if any.

    " + } + }, + "LastChangedAt": { + "base": null, + "refs": { + "TransitionState$lastChangedAt": "

    The timestamp when the transition state was last changed.

    " + } + }, + "LastChangedBy": { + "base": null, + "refs": { + "TransitionState$lastChangedBy": "

    The ID of the user who last changed the transition state.

    " + } + }, + "LimitExceededException": { + "base": "

    The number of pipelines associated with the AWS account has exceeded the limit allowed for the account.

    ", + "refs": { + } + }, + "ListActionTypesInput": { + "base": "

    Represents the input of a list action types action.

    ", + "refs": { + } + }, + "ListActionTypesOutput": { + "base": "

    Represents the output of a list action types action.

    ", + "refs": { + } + }, + "ListPipelinesInput": { + "base": "

    Represents the input of a list pipelines action.

    ", + "refs": { + } + }, + "ListPipelinesOutput": { + "base": "

    Represents the output of a list pipelines action.

    ", + "refs": { + } + }, + "MaxBatchSize": { + "base": null, + "refs": { + "PollForJobsInput$maxBatchSize": "

    The maximum number of jobs to return in a poll for jobs call.

    ", + "PollForThirdPartyJobsInput$maxBatchSize": "

    The maximum number of jobs to return in a poll for jobs call.

    " + } + }, + "MaximumArtifactCount": { + "base": null, + "refs": { + "ArtifactDetails$maximumCount": "

    The maximum number of artifacts allowed for the action type.

    " + } + }, + "Message": { + "base": null, + "refs": { + "ErrorDetails$message": "

    The text of the error message.

    ", + "FailureDetails$message": "

    The message about the failure.

    " + } + }, + "MinimumArtifactCount": { + "base": null, + "refs": { + "ArtifactDetails$minimumCount": "

    The minimum number of artifacts allowed for the action type.

    " + } + }, + "NextToken": { + "base": null, + "refs": { + "ListActionTypesInput$nextToken": "

    An identifier that was returned from the previous list action types call, which can be used to return the next set of action types in the list.

    ", + "ListActionTypesOutput$nextToken": "

    If the amount of returned information is significantly large, an identifier is also returned which can be used in a subsequent list action types call to return the next set of action types in the list.

    ", + "ListPipelinesInput$nextToken": "

    An identifier that was returned from the previous list pipelines call, which can be used to return the next set of pipelines in the list.

    ", + "ListPipelinesOutput$nextToken": "

    If the amount of returned information is significantly large, an identifier is also returned which can be used in a subsequent list pipelines call to return the next set of pipelines in the list.

    " + } + }, + "Nonce": { + "base": null, + "refs": { + "AcknowledgeJobInput$nonce": "

    A system-generated random number that AWS CodePipeline uses to ensure that the job is being worked on by only one job worker. This number must be returned in the response.

    ", + "AcknowledgeThirdPartyJobInput$nonce": "

    A system-generated random number that AWS CodePipeline uses to ensure that the job is being worked on by only one job worker. This number must be returned in the response.

    ", + "Job$nonce": "

    A system-generated random number that AWS CodePipeline uses to ensure that the job is being worked on by only one job worker. This number must be returned in the response.

    ", + "ThirdPartyJobDetails$nonce": "

    A system-generated random number that AWS CodePipeline uses to ensure that the job is being worked on by only one job worker. This number must be returned in the response.

    " + } + }, + "OutputArtifact": { + "base": "

    Represents information about the output of an action.

    ", + "refs": { + "OutputArtifactList$member": null + } + }, + "OutputArtifactList": { + "base": null, + "refs": { + "ActionDeclaration$outputArtifacts": "

    The name or ID of the result of the action declaration, such as a test or build artifact.

    " + } + }, + "Percentage": { + "base": null, + "refs": { + "ActionExecution$percentComplete": "

    A percentage of completeness of the action as it runs.

    ", + "ExecutionDetails$percentComplete": "

    The percentage of work completed on the action, represented on a scale of zero to one hundred percent.

    " + } + }, + "PipelineContext": { + "base": "

    Represents information about a pipeline to a job worker.

    ", + "refs": { + "JobData$pipelineContext": null, + "ThirdPartyJobData$pipelineContext": null + } + }, + "PipelineDeclaration": { + "base": "

    Represents the structure of actions and stages to be performed in the pipeline.

    ", + "refs": { + "CreatePipelineInput$pipeline": null, + "CreatePipelineOutput$pipeline": null, + "GetPipelineOutput$pipeline": null, + "UpdatePipelineInput$pipeline": "

    The name of the pipeline to be updated.

    ", + "UpdatePipelineOutput$pipeline": "

    The structure of the updated pipeline.

    " + } + }, + "PipelineExecutionId": { + "base": null, + "refs": { + "PutActionRevisionOutput$pipelineExecutionId": "

    The ID of the current workflow state of the pipeline.

    ", + "StartPipelineExecutionOutput$pipelineExecutionId": "

    The unique system-generated ID of the pipeline that was started.

    " + } + }, + "PipelineList": { + "base": null, + "refs": { + "ListPipelinesOutput$pipelines": "

    The list of pipelines.

    " + } + }, + "PipelineName": { + "base": null, + "refs": { + "DeletePipelineInput$name": "

    The name of the pipeline to be deleted.

    ", + "DisableStageTransitionInput$pipelineName": "

    The name of the pipeline in which you want to disable the flow of artifacts from one stage to another.

    ", + "EnableStageTransitionInput$pipelineName": "

    The name of the pipeline in which you want to enable the flow of artifacts from one stage to another.

    ", + "GetPipelineInput$name": "

    The name of the pipeline for which you want to get information. Pipeline names must be unique under an Amazon Web Services (AWS) user account.

    ", + "GetPipelineStateInput$name": "

    The name of the pipeline about which you want to get information.

    ", + "GetPipelineStateOutput$pipelineName": "

    The name of the pipeline for which you want to get the state.

    ", + "PipelineContext$pipelineName": "

    The name of the pipeline. This is a user-specified value. Pipeline names must be unique across all pipeline names under an Amazon Web Services account.

    ", + "PipelineDeclaration$name": "

    The name of the action to be performed.

    ", + "PipelineSummary$name": "

    The name of the pipeline.

    ", + "PutActionRevisionInput$pipelineName": "

    The name of the pipeline that will start processing the revision to the source.

    ", + "StartPipelineExecutionInput$name": "

    The name of the pipeline to start.

    " + } + }, + "PipelineNameInUseException": { + "base": "

    The specified pipeline name is already in use.

    ", + "refs": { + } + }, + "PipelineNotFoundException": { + "base": "

    The specified pipeline was specified in an invalid format or cannot be found.

    ", + "refs": { + } + }, + "PipelineStageDeclarationList": { + "base": null, + "refs": { + "PipelineDeclaration$stages": "

    The stage in which to perform the action.

    " + } + }, + "PipelineSummary": { + "base": "

    Returns a summary of a pipeline.

    ", + "refs": { + "PipelineList$member": null + } + }, + "PipelineVersion": { + "base": null, + "refs": { + "GetPipelineInput$version": "

    The version number of the pipeline. If you do not specify a version, defaults to the most current version.

    ", + "GetPipelineStateOutput$pipelineVersion": "

    The version number of the pipeline.

    A newly-created pipeline is always assigned a version number of 1.", + "PipelineDeclaration$version": "

    The version number of the pipeline. A new pipeline always has a version number of 1. This number is automatically incremented when a pipeline is updated.

    ", + "PipelineSummary$version": "

    The version number of the pipeline.

    " + } + }, + "PipelineVersionNotFoundException": { + "base": "

    The specified pipeline version was specified in an invalid format or cannot be found.

    ", + "refs": { + } + }, + "PollForJobsInput": { + "base": "

    Represents the input of a poll for jobs action.

    ", + "refs": { + } + }, + "PollForJobsOutput": { + "base": "

    Represents the output of a poll for jobs action.

    ", + "refs": { + } + }, + "PollForThirdPartyJobsInput": { + "base": "

    Represents the input of a poll for third party jobs action.

    ", + "refs": { + } + }, + "PollForThirdPartyJobsOutput": { + "base": "

    Represents the output of a poll for third party jobs action.

    ", + "refs": { + } + }, + "PutActionRevisionInput": { + "base": "

    Represents the input of a put action revision action.

    ", + "refs": { + } + }, + "PutActionRevisionOutput": { + "base": "

    Represents the output of a put action revision action.

    ", + "refs": { + } + }, + "PutJobFailureResultInput": { + "base": "

    Represents the input of a put job failure result action.

    ", + "refs": { + } + }, + "PutJobSuccessResultInput": { + "base": "

    Represents the input of a put job success result action.

    ", + "refs": { + } + }, + "PutThirdPartyJobFailureResultInput": { + "base": "

    Represents the input of a third party job failure result action.

    ", + "refs": { + } + }, + "PutThirdPartyJobSuccessResultInput": { + "base": "

    Represents the input of a put third party job success result action.

    ", + "refs": { + } + }, + "QueryParamMap": { + "base": null, + "refs": { + "PollForJobsInput$queryParam": "

    A map of property names and values. For an action type with no queryable properties, this value must be null or an empty map. For an action type with a queryable property, you must supply that property as a key in the map. Only jobs whose action configuration matches the mapped value will be returned.

    " + } + }, + "Revision": { + "base": null, + "refs": { + "Artifact$revision": "

    The artifact's revision ID. Depending on the type of object, this could be a commit ID (GitHub) or a revision ID (Amazon S3).

    ", + "CurrentRevision$revision": "

    The revision ID of the current version of an artifact.

    " + } + }, + "RevisionChangeId": { + "base": null, + "refs": { + "ActionRevision$revisionChangeId": "

    The unique identifier of the change that set the state to this revision, for example a deployment ID or timestamp.

    " + } + }, + "RevisionChangeIdentifier": { + "base": null, + "refs": { + "CurrentRevision$changeIdentifier": "

    The change identifier for the current revision.

    " + } + }, + "RevisionId": { + "base": null, + "refs": { + "ActionRevision$revisionId": "

    The system-generated unique ID that identifies the revision number of the action.

    " + } + }, + "RoleArn": { + "base": null, + "refs": { + "ActionDeclaration$roleArn": "

    The ARN of the IAM service role that will perform the declared action. This is assumed through the roleArn for the pipeline.

    ", + "PipelineDeclaration$roleArn": "

    The Amazon Resource Name (ARN) for AWS CodePipeline to use to either perform actions with no actionRoleArn, or to use to assume roles for actions with an actionRoleArn.

    " + } + }, + "S3ArtifactLocation": { + "base": "

    The location of the Amazon S3 bucket that contains a revision.

    ", + "refs": { + "ArtifactLocation$s3Location": "

    The Amazon S3 bucket that contains the artifact.

    " + } + }, + "S3BucketName": { + "base": null, + "refs": { + "S3ArtifactLocation$bucketName": "

    The name of the Amazon S3 bucket.

    " + } + }, + "S3ObjectKey": { + "base": null, + "refs": { + "S3ArtifactLocation$objectKey": "

    The key of the object in the Amazon S3 bucket, which uniquely identifies the object in the bucket.

    " + } + }, + "SecretAccessKey": { + "base": null, + "refs": { + "AWSSessionCredentials$secretAccessKey": "

    The secret access key for the session.

    " + } + }, + "SessionToken": { + "base": null, + "refs": { + "AWSSessionCredentials$sessionToken": "

    The token for the session.

    " + } + }, + "StageActionDeclarationList": { + "base": null, + "refs": { + "StageDeclaration$actions": "

    The actions included in a stage.

    " + } + }, + "StageBlockerDeclarationList": { + "base": null, + "refs": { + "StageDeclaration$blockers": "

    The gates included in a stage.

    " + } + }, + "StageContext": { + "base": "

    Represents information about a stage to a job worker.

    ", + "refs": { + "PipelineContext$stage": "

    The stage of the pipeline.

    " + } + }, + "StageDeclaration": { + "base": "

    Represents information about a stage and its definition.

    ", + "refs": { + "PipelineStageDeclarationList$member": null + } + }, + "StageName": { + "base": null, + "refs": { + "DisableStageTransitionInput$stageName": "

    The name of the stage where you want to disable the inbound or outbound transition of artifacts.

    ", + "EnableStageTransitionInput$stageName": "

    The name of the stage where you want to enable the transition of artifacts, either into the stage (inbound) or from that stage to the next stage (outbound).

    ", + "PutActionRevisionInput$stageName": "

    The name of the stage that contains the action that will act upon the revision.

    ", + "StageContext$name": "

    The name of the stage.

    ", + "StageDeclaration$name": "

    The name of the stage.

    ", + "StageState$stageName": "

    The name of the stage.

    " + } + }, + "StageNotFoundException": { + "base": "

    The specified stage was specified in an invalid format or cannot be found.

    ", + "refs": { + } + }, + "StageState": { + "base": "

    Represents information about the state of the stage.

    ", + "refs": { + "StageStateList$member": null + } + }, + "StageStateList": { + "base": null, + "refs": { + "GetPipelineStateOutput$stageStates": "

    A list of the pipeline stage output information, including stage name, state, most recent run details, whether the stage is disabled, and other data.

    " + } + }, + "StageTransitionType": { + "base": null, + "refs": { + "DisableStageTransitionInput$transitionType": "

    Specifies whether artifacts will be prevented from transitioning into the stage and being processed by the actions in that stage (inbound), or prevented from transitioning from the stage after they have been processed by the actions in that stage (outbound).

    ", + "EnableStageTransitionInput$transitionType": "

    Specifies whether artifacts will be allowed to enter the stage and be processed by the actions in that stage (inbound) or whether already-processed artifacts will be allowed to transition to the next stage (outbound).

    " + } + }, + "StartPipelineExecutionInput": { + "base": "

    Represents the input of a start pipeline execution action.

    ", + "refs": { + } + }, + "StartPipelineExecutionOutput": { + "base": "

    Represents the output of a start pipeline execution action.

    ", + "refs": { + } + }, + "ThirdPartyJob": { + "base": "

    A response to a PollForThirdPartyJobs request returned by AWS CodePipeline when there is a job to be worked upon by a partner action.

    ", + "refs": { + "ThirdPartyJobList$member": null + } + }, + "ThirdPartyJobData": { + "base": "

    Represents information about the job data for a partner action.

    ", + "refs": { + "ThirdPartyJobDetails$data": "

    The data to be returned by the third party job worker.

    " + } + }, + "ThirdPartyJobDetails": { + "base": "

    The details of a job sent in response to a GetThirdPartyJobDetails request.

    ", + "refs": { + "GetThirdPartyJobDetailsOutput$jobDetails": "

    The details of the job, including any protected values defined for the job.

    " + } + }, + "ThirdPartyJobId": { + "base": null, + "refs": { + "AcknowledgeThirdPartyJobInput$jobId": "

    The unique system-generated ID of the job.

    ", + "GetThirdPartyJobDetailsInput$jobId": "

    The unique system-generated ID used for identifying the job.

    ", + "PutThirdPartyJobFailureResultInput$jobId": "

    The ID of the job that failed. This is the same ID returned from PollForThirdPartyJobs.

    ", + "PutThirdPartyJobSuccessResultInput$jobId": "

    The ID of the job that successfully completed. This is the same ID returned from PollForThirdPartyJobs.

    ", + "ThirdPartyJobDetails$id": "

    The identifier used to identify the job details in AWS CodePipeline.

    " + } + }, + "ThirdPartyJobList": { + "base": null, + "refs": { + "PollForThirdPartyJobsOutput$jobs": "

    Information about the jobs to take action on.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "ActionExecution$lastStatusChange": "

    The last status change of the action.

    ", + "ActionRevision$created": "

    The date and time when the most recent version of the action was created, in timestamp format.

    ", + "GetPipelineStateOutput$created": "

    The date and time the pipeline was created, in timestamp format.

    ", + "GetPipelineStateOutput$updated": "

    The date and time the pipeline was last updated, in timestamp format.

    ", + "PipelineSummary$created": "

    The date and time the pipeline was created, in timestamp format.

    ", + "PipelineSummary$updated": "

    The date and time of the last update to the pipeline, in timestamp format.

    " + } + }, + "TransitionState": { + "base": "

    Represents information about the state of transitions between one stage and another stage.

    ", + "refs": { + "StageState$inboundTransitionState": "

    The state of the inbound transition, which is either enabled or disabled.

    " + } + }, + "UpdatePipelineInput": { + "base": "

    Represents the input of an update pipeline action.

    ", + "refs": { + } + }, + "UpdatePipelineOutput": { + "base": "

    Represents the output of an update pipeline action.

    ", + "refs": { + } + }, + "Url": { + "base": null, + "refs": { + "ActionExecution$externalExecutionUrl": "

    The URL of a resource external to AWS that will be used when running the action, for example an external repository URL.

    ", + "ActionState$entityUrl": "

    A URL link for more information about the state of the action, such as a deployment group details page.

    ", + "ActionState$revisionUrl": "

    A URL link for more information about the revision, such as a commit details page.

    ", + "ActionTypeSettings$thirdPartyConfigurationUrl": "

    The URL of a sign-up page where users can sign up for an external service and perform initial configuration of the action provided by that service.

    " + } + }, + "UrlTemplate": { + "base": null, + "refs": { + "ActionTypeSettings$entityUrlTemplate": "

    The URL returned to the AWS CodePipeline console that provides a deep link to the resources of the external system, such as the configuration page for an AWS CodeDeploy deployment group. This link is provided as part of the action display within the pipeline.

    ", + "ActionTypeSettings$executionUrlTemplate": "

    The URL returned to the AWS CodePipeline console that contains a link to the top-level landing page for the external system, such as console page for AWS CodeDeploy. This link is shown on the pipeline view page in the AWS CodePipeline console and provides a link to the execution entity of the external action.

    ", + "ActionTypeSettings$revisionUrlTemplate": "

    The URL returned to the AWS CodePipeline console that contains a link to the page where customers can update or change the configuration of the external action.

    " + } + }, + "ValidationException": { + "base": "

    The validation was specified in an invalid format.

    ", + "refs": { + } + }, + "Version": { + "base": null, + "refs": { + "ActionTypeId$version": "

    A string that identifies the action type.

    ", + "CreateCustomActionTypeInput$version": "

    The version number of the custom action.

    A newly-created custom action is always assigned a version number of 1. This is required.", + "DeleteCustomActionTypeInput$version": "

    The version of the custom action to delete.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codepipeline/2015-07-09/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codepipeline/2015-07-09/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codepipeline/2015-07-09/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/codepipeline/2015-07-09/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cognito-identity/2014-06-30/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cognito-identity/2014-06-30/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cognito-identity/2014-06-30/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cognito-identity/2014-06-30/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1270 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-06-30", + "endpointPrefix":"cognito-identity", + "jsonVersion":"1.1", + "serviceFullName":"Amazon Cognito Identity", + "signatureVersion":"v4", + "targetPrefix":"AWSCognitoIdentityService", + "protocol":"json" + }, + "operations":{ + "CreateIdentityPool":{ + "name":"CreateIdentityPool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateIdentityPoolInput"}, + "output":{"shape":"IdentityPool"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NotAuthorizedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"ResourceConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InternalErrorException", + "exception":true, + "fault":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "DeleteIdentities":{ + "name":"DeleteIdentities", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteIdentitiesInput"}, + "output":{"shape":"DeleteIdentitiesResponse"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InternalErrorException", + "exception":true, + "fault":true + } + ] + }, + "DeleteIdentityPool":{ + "name":"DeleteIdentityPool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteIdentityPoolInput"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"NotAuthorizedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InternalErrorException", + "exception":true, + "fault":true + } + ] + }, + "DescribeIdentity":{ + "name":"DescribeIdentity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeIdentityInput"}, + "output":{"shape":"IdentityDescription"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"NotAuthorizedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InternalErrorException", + "exception":true, + "fault":true + } + ] + }, + "DescribeIdentityPool":{ + "name":"DescribeIdentityPool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeIdentityPoolInput"}, + "output":{"shape":"IdentityPool"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"NotAuthorizedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InternalErrorException", + "exception":true, + "fault":true + } + ] + }, + "GetCredentialsForIdentity":{ + "name":"GetCredentialsForIdentity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetCredentialsForIdentityInput"}, + "output":{"shape":"GetCredentialsForIdentityResponse"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"NotAuthorizedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"ResourceConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InvalidIdentityPoolConfigurationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalErrorException", + "exception":true, + "fault":true + }, + { + "shape":"ExternalServiceException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "GetId":{ + "name":"GetId", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetIdInput"}, + "output":{"shape":"GetIdResponse"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"NotAuthorizedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"ResourceConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InternalErrorException", + "exception":true, + "fault":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ExternalServiceException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "GetIdentityPoolRoles":{ + "name":"GetIdentityPoolRoles", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetIdentityPoolRolesInput"}, + "output":{"shape":"GetIdentityPoolRolesResponse"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"NotAuthorizedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"ResourceConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InternalErrorException", + "exception":true, + "fault":true + } + ] + }, + "GetOpenIdToken":{ + "name":"GetOpenIdToken", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetOpenIdTokenInput"}, + "output":{"shape":"GetOpenIdTokenResponse"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"NotAuthorizedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"ResourceConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InternalErrorException", + "exception":true, + "fault":true + }, + { + "shape":"ExternalServiceException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "GetOpenIdTokenForDeveloperIdentity":{ + "name":"GetOpenIdTokenForDeveloperIdentity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetOpenIdTokenForDeveloperIdentityInput"}, + "output":{"shape":"GetOpenIdTokenForDeveloperIdentityResponse"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"NotAuthorizedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"ResourceConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InternalErrorException", + "exception":true, + "fault":true + }, + { + "shape":"DeveloperUserAlreadyRegisteredException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "ListIdentities":{ + "name":"ListIdentities", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListIdentitiesInput"}, + "output":{"shape":"ListIdentitiesResponse"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"NotAuthorizedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InternalErrorException", + "exception":true, + "fault":true + } + ] + }, + "ListIdentityPools":{ + "name":"ListIdentityPools", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListIdentityPoolsInput"}, + "output":{"shape":"ListIdentityPoolsResponse"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NotAuthorizedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InternalErrorException", + "exception":true, + "fault":true + } + ] + }, + "LookupDeveloperIdentity":{ + "name":"LookupDeveloperIdentity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"LookupDeveloperIdentityInput"}, + "output":{"shape":"LookupDeveloperIdentityResponse"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"NotAuthorizedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"ResourceConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InternalErrorException", + "exception":true, + "fault":true + } + ] + }, + "MergeDeveloperIdentities":{ + "name":"MergeDeveloperIdentities", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"MergeDeveloperIdentitiesInput"}, + "output":{"shape":"MergeDeveloperIdentitiesResponse"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"NotAuthorizedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"ResourceConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InternalErrorException", + "exception":true, + "fault":true + } + ] + }, + "SetIdentityPoolRoles":{ + "name":"SetIdentityPoolRoles", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetIdentityPoolRolesInput"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"NotAuthorizedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"ResourceConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InternalErrorException", + "exception":true, + "fault":true + }, + { + "shape":"ConcurrentModificationException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "UnlinkDeveloperIdentity":{ + "name":"UnlinkDeveloperIdentity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UnlinkDeveloperIdentityInput"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"NotAuthorizedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"ResourceConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InternalErrorException", + "exception":true, + "fault":true + } + ] + }, + "UnlinkIdentity":{ + "name":"UnlinkIdentity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UnlinkIdentityInput"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"NotAuthorizedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"ResourceConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InternalErrorException", + "exception":true, + "fault":true + }, + { + "shape":"ExternalServiceException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "UpdateIdentityPool":{ + "name":"UpdateIdentityPool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"IdentityPool"}, + "output":{"shape":"IdentityPool"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"NotAuthorizedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"ResourceConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InternalErrorException", + "exception":true, + "fault":true + }, + { + "shape":"ConcurrentModificationException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + } + }, + "shapes":{ + "ARNString":{ + "type":"string", + "min":20, + "max":2048 + }, + "AccessKeyString":{"type":"string"}, + "AccountId":{ + "type":"string", + "min":1, + "max":15, + "pattern":"\\d+" + }, + "ConcurrentModificationException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "CreateIdentityPoolInput":{ + "type":"structure", + "required":[ + "IdentityPoolName", + "AllowUnauthenticatedIdentities" + ], + "members":{ + "IdentityPoolName":{"shape":"IdentityPoolName"}, + "AllowUnauthenticatedIdentities":{"shape":"IdentityPoolUnauthenticated"}, + "SupportedLoginProviders":{"shape":"IdentityProviders"}, + "DeveloperProviderName":{"shape":"DeveloperProviderName"}, + "OpenIdConnectProviderARNs":{"shape":"OIDCProviderList"} + } + }, + "Credentials":{ + "type":"structure", + "members":{ + "AccessKeyId":{"shape":"AccessKeyString"}, + "SecretKey":{"shape":"SecretKeyString"}, + "SessionToken":{"shape":"SessionTokenString"}, + "Expiration":{"shape":"DateType"} + } + }, + "DateType":{"type":"timestamp"}, + "DeleteIdentitiesInput":{ + "type":"structure", + "required":["IdentityIdsToDelete"], + "members":{ + "IdentityIdsToDelete":{"shape":"IdentityIdList"} + } + }, + "DeleteIdentitiesResponse":{ + "type":"structure", + "members":{ + "UnprocessedIdentityIds":{"shape":"UnprocessedIdentityIdList"} + } + }, + "DeleteIdentityPoolInput":{ + "type":"structure", + "required":["IdentityPoolId"], + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"} + } + }, + "DescribeIdentityInput":{ + "type":"structure", + "required":["IdentityId"], + "members":{ + "IdentityId":{"shape":"IdentityId"} + } + }, + "DescribeIdentityPoolInput":{ + "type":"structure", + "required":["IdentityPoolId"], + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"} + } + }, + "DeveloperProviderName":{ + "type":"string", + "min":1, + "max":128, + "pattern":"[\\w._-]+" + }, + "DeveloperUserAlreadyRegisteredException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "DeveloperUserIdentifier":{ + "type":"string", + "min":1, + "max":1024, + "pattern":"[\\w.@_-]+" + }, + "DeveloperUserIdentifierList":{ + "type":"list", + "member":{"shape":"DeveloperUserIdentifier"} + }, + "ErrorCode":{ + "type":"string", + "enum":[ + "AccessDenied", + "InternalServerError" + ] + }, + "ExternalServiceException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "GetCredentialsForIdentityInput":{ + "type":"structure", + "required":["IdentityId"], + "members":{ + "IdentityId":{"shape":"IdentityId"}, + "Logins":{"shape":"LoginsMap"} + } + }, + "GetCredentialsForIdentityResponse":{ + "type":"structure", + "members":{ + "IdentityId":{"shape":"IdentityId"}, + "Credentials":{"shape":"Credentials"} + } + }, + "GetIdInput":{ + "type":"structure", + "required":["IdentityPoolId"], + "members":{ + "AccountId":{"shape":"AccountId"}, + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "Logins":{"shape":"LoginsMap"} + } + }, + "GetIdResponse":{ + "type":"structure", + "members":{ + "IdentityId":{"shape":"IdentityId"} + } + }, + "GetIdentityPoolRolesInput":{ + "type":"structure", + "required":["IdentityPoolId"], + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"} + } + }, + "GetIdentityPoolRolesResponse":{ + "type":"structure", + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "Roles":{"shape":"RolesMap"} + } + }, + "GetOpenIdTokenForDeveloperIdentityInput":{ + "type":"structure", + "required":[ + "IdentityPoolId", + "Logins" + ], + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "IdentityId":{"shape":"IdentityId"}, + "Logins":{"shape":"LoginsMap"}, + "TokenDuration":{"shape":"TokenDuration"} + } + }, + "GetOpenIdTokenForDeveloperIdentityResponse":{ + "type":"structure", + "members":{ + "IdentityId":{"shape":"IdentityId"}, + "Token":{"shape":"OIDCToken"} + } + }, + "GetOpenIdTokenInput":{ + "type":"structure", + "required":["IdentityId"], + "members":{ + "IdentityId":{"shape":"IdentityId"}, + "Logins":{"shape":"LoginsMap"} + } + }, + "GetOpenIdTokenResponse":{ + "type":"structure", + "members":{ + "IdentityId":{"shape":"IdentityId"}, + "Token":{"shape":"OIDCToken"} + } + }, + "HideDisabled":{"type":"boolean"}, + "IdentitiesList":{ + "type":"list", + "member":{"shape":"IdentityDescription"} + }, + "IdentityDescription":{ + "type":"structure", + "members":{ + "IdentityId":{"shape":"IdentityId"}, + "Logins":{"shape":"LoginsList"}, + "CreationDate":{"shape":"DateType"}, + "LastModifiedDate":{"shape":"DateType"} + } + }, + "IdentityId":{ + "type":"string", + "min":1, + "max":50, + "pattern":"[\\w-]+:[0-9a-f-]+" + }, + "IdentityIdList":{ + "type":"list", + "member":{"shape":"IdentityId"}, + "min":1, + "max":60 + }, + "IdentityPool":{ + "type":"structure", + "required":[ + "IdentityPoolId", + "IdentityPoolName", + "AllowUnauthenticatedIdentities" + ], + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "IdentityPoolName":{"shape":"IdentityPoolName"}, + "AllowUnauthenticatedIdentities":{"shape":"IdentityPoolUnauthenticated"}, + "SupportedLoginProviders":{"shape":"IdentityProviders"}, + "DeveloperProviderName":{"shape":"DeveloperProviderName"}, + "OpenIdConnectProviderARNs":{"shape":"OIDCProviderList"} + } + }, + "IdentityPoolId":{ + "type":"string", + "min":1, + "max":50, + "pattern":"[\\w-]+:[0-9a-f-]+" + }, + "IdentityPoolName":{ + "type":"string", + "min":1, + "max":128, + "pattern":"[\\w ]+" + }, + "IdentityPoolShortDescription":{ + "type":"structure", + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "IdentityPoolName":{"shape":"IdentityPoolName"} + } + }, + "IdentityPoolUnauthenticated":{"type":"boolean"}, + "IdentityPoolsList":{ + "type":"list", + "member":{"shape":"IdentityPoolShortDescription"} + }, + "IdentityProviderId":{ + "type":"string", + "min":1, + "max":128, + "pattern":"[\\w.;_-]+" + }, + "IdentityProviderName":{ + "type":"string", + "min":1, + "max":128, + "pattern":"[\\w._/-]+" + }, + "IdentityProviderToken":{ + "type":"string", + "min":1, + "max":2048, + "pattern":"[\\S]+" + }, + "IdentityProviders":{ + "type":"map", + "key":{"shape":"IdentityProviderName"}, + "value":{"shape":"IdentityProviderId"}, + "max":10 + }, + "InternalErrorException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true, + "fault":true + }, + "InvalidIdentityPoolConfigurationException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidParameterException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "ListIdentitiesInput":{ + "type":"structure", + "required":[ + "IdentityPoolId", + "MaxResults" + ], + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "MaxResults":{"shape":"QueryLimit"}, + "NextToken":{"shape":"PaginationKey"}, + "HideDisabled":{"shape":"HideDisabled"} + } + }, + "ListIdentitiesResponse":{ + "type":"structure", + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "Identities":{"shape":"IdentitiesList"}, + "NextToken":{"shape":"PaginationKey"} + } + }, + "ListIdentityPoolsInput":{ + "type":"structure", + "required":["MaxResults"], + "members":{ + "MaxResults":{"shape":"QueryLimit"}, + "NextToken":{"shape":"PaginationKey"} + } + }, + "ListIdentityPoolsResponse":{ + "type":"structure", + "members":{ + "IdentityPools":{"shape":"IdentityPoolsList"}, + "NextToken":{"shape":"PaginationKey"} + } + }, + "LoginsList":{ + "type":"list", + "member":{"shape":"IdentityProviderName"} + }, + "LoginsMap":{ + "type":"map", + "key":{"shape":"IdentityProviderName"}, + "value":{"shape":"IdentityProviderToken"}, + "max":10 + }, + "LookupDeveloperIdentityInput":{ + "type":"structure", + "required":["IdentityPoolId"], + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "IdentityId":{"shape":"IdentityId"}, + "DeveloperUserIdentifier":{"shape":"DeveloperUserIdentifier"}, + "MaxResults":{"shape":"QueryLimit"}, + "NextToken":{"shape":"PaginationKey"} + } + }, + "LookupDeveloperIdentityResponse":{ + "type":"structure", + "members":{ + "IdentityId":{"shape":"IdentityId"}, + "DeveloperUserIdentifierList":{"shape":"DeveloperUserIdentifierList"}, + "NextToken":{"shape":"PaginationKey"} + } + }, + "MergeDeveloperIdentitiesInput":{ + "type":"structure", + "required":[ + "SourceUserIdentifier", + "DestinationUserIdentifier", + "DeveloperProviderName", + "IdentityPoolId" + ], + "members":{ + "SourceUserIdentifier":{"shape":"DeveloperUserIdentifier"}, + "DestinationUserIdentifier":{"shape":"DeveloperUserIdentifier"}, + "DeveloperProviderName":{"shape":"DeveloperProviderName"}, + "IdentityPoolId":{"shape":"IdentityPoolId"} + } + }, + "MergeDeveloperIdentitiesResponse":{ + "type":"structure", + "members":{ + "IdentityId":{"shape":"IdentityId"} + } + }, + "NotAuthorizedException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":403}, + "exception":true + }, + "OIDCProviderList":{ + "type":"list", + "member":{"shape":"ARNString"} + }, + "OIDCToken":{"type":"string"}, + "PaginationKey":{ + "type":"string", + "min":1, + "pattern":"[\\S]+" + }, + "QueryLimit":{ + "type":"integer", + "min":1, + "max":60 + }, + "ResourceConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "RoleType":{ + "type":"string", + "pattern":"(un)?authenticated" + }, + "RolesMap":{ + "type":"map", + "key":{"shape":"RoleType"}, + "value":{"shape":"ARNString"}, + "max":2 + }, + "SecretKeyString":{"type":"string"}, + "SessionTokenString":{"type":"string"}, + "SetIdentityPoolRolesInput":{ + "type":"structure", + "required":[ + "IdentityPoolId", + "Roles" + ], + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "Roles":{"shape":"RolesMap"} + } + }, + "String":{"type":"string"}, + "TokenDuration":{ + "type":"long", + "min":1, + "max":86400 + }, + "TooManyRequestsException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":429}, + "exception":true + }, + "UnlinkDeveloperIdentityInput":{ + "type":"structure", + "required":[ + "IdentityId", + "IdentityPoolId", + "DeveloperProviderName", + "DeveloperUserIdentifier" + ], + "members":{ + "IdentityId":{"shape":"IdentityId"}, + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "DeveloperProviderName":{"shape":"DeveloperProviderName"}, + "DeveloperUserIdentifier":{"shape":"DeveloperUserIdentifier"} + } + }, + "UnlinkIdentityInput":{ + "type":"structure", + "required":[ + "IdentityId", + "Logins", + "LoginsToRemove" + ], + "members":{ + "IdentityId":{"shape":"IdentityId"}, + "Logins":{"shape":"LoginsMap"}, + "LoginsToRemove":{"shape":"LoginsList"} + } + }, + "UnprocessedIdentityId":{ + "type":"structure", + "members":{ + "IdentityId":{"shape":"IdentityId"}, + "ErrorCode":{"shape":"ErrorCode"} + } + }, + "UnprocessedIdentityIdList":{ + "type":"list", + "member":{"shape":"UnprocessedIdentityId"}, + "max":60 + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cognito-identity/2014-06-30/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cognito-identity/2014-06-30/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cognito-identity/2014-06-30/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cognito-identity/2014-06-30/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,512 @@ +{ + "version": "2.0", + "operations": { + "CreateIdentityPool": "

    Creates a new identity pool. The identity pool is a store of user identity information that is specific to your AWS account. The limit on identity pools is 60 per account. You must use AWS Developer credentials to call this API.

    ", + "DeleteIdentities": "

    Deletes identities from an identity pool. You can specify a list of 1-60 identities that you want to delete.

    You must use AWS Developer credentials to call this API.

    ", + "DeleteIdentityPool": "

    Deletes a user pool. Once a pool is deleted, users will not be able to authenticate with the pool.

    You must use AWS Developer credentials to call this API.

    ", + "DescribeIdentity": "

    Returns metadata related to the given identity, including when the identity was created and any associated linked logins.

    You must use AWS Developer credentials to call this API.

    ", + "DescribeIdentityPool": "

    Gets details about a particular identity pool, including the pool name, ID description, creation date, and current number of users.

    You must use AWS Developer credentials to call this API.

    ", + "GetCredentialsForIdentity": "

    Returns credentials for the the provided identity ID. Any provided logins will be validated against supported login providers. If the token is for cognito-identity.amazonaws.com, it will be passed through to AWS Security Token Service with the appropriate role for the token.

    This is a public API. You do not need any credentials to call this API.

    ", + "GetId": "

    Generates (or retrieves) a Cognito ID. Supplying multiple logins will create an implicit linked account.

    token+\";\"+tokenSecret.

    This is a public API. You do not need any credentials to call this API.

    ", + "GetIdentityPoolRoles": "

    Gets the roles for an identity pool.

    You must use AWS Developer credentials to call this API.

    ", + "GetOpenIdToken": "

    Gets an OpenID token, using a known Cognito ID. This known Cognito ID is returned by GetId. You can optionally add additional logins for the identity. Supplying multiple logins creates an implicit link.

    The OpenId token is valid for 15 minutes.

    This is a public API. You do not need any credentials to call this API.

    ", + "GetOpenIdTokenForDeveloperIdentity": "

    Registers (or retrieves) a Cognito IdentityId and an OpenID Connect token for a user authenticated by your backend authentication process. Supplying multiple logins will create an implicit linked account. You can only specify one developer provider as part of the Logins map, which is linked to the identity pool. The developer provider is the \"domain\" by which Cognito will refer to your users.

    You can use GetOpenIdTokenForDeveloperIdentity to create a new identity and to link new logins (that is, user credentials issued by a public provider or developer provider) to an existing identity. When you want to create a new identity, the IdentityId should be null. When you want to associate a new login with an existing authenticated/unauthenticated identity, you can do so by providing the existing IdentityId. This API will create the identity in the specified IdentityPoolId.

    You must use AWS Developer credentials to call this API.

    ", + "ListIdentities": "

    Lists the identities in a pool.

    You must use AWS Developer credentials to call this API.

    ", + "ListIdentityPools": "

    Lists all of the Cognito identity pools registered for your account.

    This is a public API. You do not need any credentials to call this API.

    ", + "LookupDeveloperIdentity": "

    Retrieves the IdentityID associated with a DeveloperUserIdentifier or the list of DeveloperUserIdentifiers associated with an IdentityId for an existing identity. Either IdentityID or DeveloperUserIdentifier must not be null. If you supply only one of these values, the other value will be searched in the database and returned as a part of the response. If you supply both, DeveloperUserIdentifier will be matched against IdentityID. If the values are verified against the database, the response returns both values and is the same as the request. Otherwise a ResourceConflictException is thrown.

    You must use AWS Developer credentials to call this API.

    ", + "MergeDeveloperIdentities": "

    Merges two users having different IdentityIds, existing in the same identity pool, and identified by the same developer provider. You can use this action to request that discrete users be merged and identified as a single user in the Cognito environment. Cognito associates the given source user (SourceUserIdentifier) with the IdentityId of the DestinationUserIdentifier. Only developer-authenticated users can be merged. If the users to be merged are associated with the same public provider, but as two different users, an exception will be thrown.

    You must use AWS Developer credentials to call this API.

    ", + "SetIdentityPoolRoles": "

    Sets the roles for an identity pool. These roles are used when making calls to GetCredentialsForIdentity action.

    You must use AWS Developer credentials to call this API.

    ", + "UnlinkDeveloperIdentity": "

    Unlinks a DeveloperUserIdentifier from an existing identity. Unlinked developer users will be considered new identities next time they are seen. If, for a given Cognito identity, you remove all federated identities as well as the developer user identifier, the Cognito identity becomes inaccessible.

    This is a public API. You do not need any credentials to call this API.

    ", + "UnlinkIdentity": "

    Unlinks a federated identity from an existing account. Unlinked logins will be considered new identities next time they are seen. Removing the last linked login will make this identity inaccessible.

    This is a public API. You do not need any credentials to call this API.

    ", + "UpdateIdentityPool": "

    Updates a user pool.

    You must use AWS Developer credentials to call this API.

    " + }, + "service": "Amazon Cognito

    Amazon Cognito is a web service that delivers scoped temporary credentials to mobile devices and other untrusted environments. Amazon Cognito uniquely identifies a device and supplies the user with a consistent identity over the lifetime of an application.

    Using Amazon Cognito, you can enable authentication with one or more third-party identity providers (Facebook, Google, or Login with Amazon), and you can also choose to support unauthenticated access from your app. Cognito delivers a unique identifier for each user and acts as an OpenID token provider trusted by AWS Security Token Service (STS) to access temporary, limited-privilege AWS credentials.

    To provide end-user credentials, first make an unsigned call to GetId. If the end user is authenticated with one of the supported identity providers, set the Logins map with the identity provider token. GetId returns a unique identifier for the user.

    Next, make an unsigned call to GetCredentialsForIdentity. This call expects the same Logins map as the GetId call, as well as the IdentityID originally returned by GetId. Assuming your identity pool has been configured via the SetIdentityPoolRoles operation, GetCredentialsForIdentity will return AWS credentials for your use. If your pool has not been configured with SetIdentityPoolRoles, or if you want to follow legacy flow, make an unsigned call to GetOpenIdToken, which returns the OpenID token necessary to call STS and retrieve AWS credentials. This call expects the same Logins map as the GetId call, as well as the IdentityID originally returned by GetId. The token returned by GetOpenIdToken can be passed to the STS operation AssumeRoleWithWebIdentity to retrieve AWS credentials.

    If you want to use Amazon Cognito in an Android, iOS, or Unity application, you will probably want to make API calls via the AWS Mobile SDK. To learn more, see the AWS Mobile SDK Developer Guide.

    ", + "shapes": { + "ARNString": { + "base": null, + "refs": { + "OIDCProviderList$member": null, + "RolesMap$value": null + } + }, + "AccessKeyString": { + "base": null, + "refs": { + "Credentials$AccessKeyId": "

    The Access Key portion of the credentials.

    " + } + }, + "AccountId": { + "base": null, + "refs": { + "GetIdInput$AccountId": "A standard AWS account ID (9+ digits)." + } + }, + "ConcurrentModificationException": { + "base": "

    Thrown if there are parallel requests to modify a resource.

    ", + "refs": { + } + }, + "CreateIdentityPoolInput": { + "base": "

    Input to the CreateIdentityPool action.

    ", + "refs": { + } + }, + "Credentials": { + "base": "

    Credentials for the the provided identity ID.

    ", + "refs": { + "GetCredentialsForIdentityResponse$Credentials": "

    Credentials for the the provided identity ID.

    " + } + }, + "DateType": { + "base": null, + "refs": { + "Credentials$Expiration": "

    The date at which these credentials will expire.

    ", + "IdentityDescription$CreationDate": "

    Date on which the identity was created.

    ", + "IdentityDescription$LastModifiedDate": "

    Date on which the identity was last modified.

    " + } + }, + "DeleteIdentitiesInput": { + "base": "

    Input to the DeleteIdentities action.

    ", + "refs": { + } + }, + "DeleteIdentitiesResponse": { + "base": "

    Returned in response to a successful DeleteIdentities operation.

    ", + "refs": { + } + }, + "DeleteIdentityPoolInput": { + "base": "

    Input to the DeleteIdentityPool action.

    ", + "refs": { + } + }, + "DescribeIdentityInput": { + "base": "

    Input to the DescribeIdentity action.

    ", + "refs": { + } + }, + "DescribeIdentityPoolInput": { + "base": "Input to the DescribeIdentityPool action.", + "refs": { + } + }, + "DeveloperProviderName": { + "base": null, + "refs": { + "CreateIdentityPoolInput$DeveloperProviderName": "

    The \"domain\" by which Cognito will refer to your users. This name acts as a placeholder that allows your backend and the Cognito service to communicate about the developer provider. For the DeveloperProviderName, you can use letters as well as period (.), underscore (_), and dash (-).

    Once you have set a developer provider name, you cannot change it. Please take care in setting this parameter.

    ", + "IdentityPool$DeveloperProviderName": "

    The \"domain\" by which Cognito will refer to your users.

    ", + "MergeDeveloperIdentitiesInput$DeveloperProviderName": "

    The \"domain\" by which Cognito will refer to your users. This is a (pseudo) domain name that you provide while creating an identity pool. This name acts as a placeholder that allows your backend and the Cognito service to communicate about the developer provider. For the DeveloperProviderName, you can use letters as well as period (.), underscore (_), and dash (-).

    ", + "UnlinkDeveloperIdentityInput$DeveloperProviderName": "

    The \"domain\" by which Cognito will refer to your users.

    " + } + }, + "DeveloperUserAlreadyRegisteredException": { + "base": "

    The provided developer user identifier is already registered with Cognito under a different identity ID.

    ", + "refs": { + } + }, + "DeveloperUserIdentifier": { + "base": null, + "refs": { + "DeveloperUserIdentifierList$member": null, + "LookupDeveloperIdentityInput$DeveloperUserIdentifier": "

    A unique ID used by your backend authentication process to identify a user. Typically, a developer identity provider would issue many developer user identifiers, in keeping with the number of users.

    ", + "MergeDeveloperIdentitiesInput$SourceUserIdentifier": "

    User identifier for the source user. The value should be a DeveloperUserIdentifier.

    ", + "MergeDeveloperIdentitiesInput$DestinationUserIdentifier": "

    User identifier for the destination user. The value should be a DeveloperUserIdentifier.

    ", + "UnlinkDeveloperIdentityInput$DeveloperUserIdentifier": "A unique ID used by your backend authentication process to identify a user." + } + }, + "DeveloperUserIdentifierList": { + "base": null, + "refs": { + "LookupDeveloperIdentityResponse$DeveloperUserIdentifierList": "

    This is the list of developer user identifiers associated with an identity ID. Cognito supports the association of multiple developer user identifiers with an identity ID.

    " + } + }, + "ErrorCode": { + "base": null, + "refs": { + "UnprocessedIdentityId$ErrorCode": "

    The error code indicating the type of error that occurred.

    " + } + }, + "ExternalServiceException": { + "base": "

    An exception thrown when a dependent service such as Facebook or Twitter is not responding

    ", + "refs": { + } + }, + "GetCredentialsForIdentityInput": { + "base": "

    Input to the GetCredentialsForIdentity action.

    ", + "refs": { + } + }, + "GetCredentialsForIdentityResponse": { + "base": "

    Returned in response to a successful GetCredentialsForIdentity operation.

    ", + "refs": { + } + }, + "GetIdInput": { + "base": "Input to the GetId action.", + "refs": { + } + }, + "GetIdResponse": { + "base": "Returned in response to a GetId request.", + "refs": { + } + }, + "GetIdentityPoolRolesInput": { + "base": "

    Input to the GetIdentityPoolRoles action.

    ", + "refs": { + } + }, + "GetIdentityPoolRolesResponse": { + "base": "

    Returned in response to a successful GetIdentityPoolRoles operation.

    ", + "refs": { + } + }, + "GetOpenIdTokenForDeveloperIdentityInput": { + "base": "

    Input to the GetOpenIdTokenForDeveloperIdentity action.

    ", + "refs": { + } + }, + "GetOpenIdTokenForDeveloperIdentityResponse": { + "base": "

    Returned in response to a successful GetOpenIdTokenForDeveloperIdentity request.

    ", + "refs": { + } + }, + "GetOpenIdTokenInput": { + "base": "Input to the GetOpenIdToken action.", + "refs": { + } + }, + "GetOpenIdTokenResponse": { + "base": "Returned in response to a successful GetOpenIdToken request.", + "refs": { + } + }, + "HideDisabled": { + "base": null, + "refs": { + "ListIdentitiesInput$HideDisabled": "

    An optional boolean parameter that allows you to hide disabled identities. If omitted, the ListIdentities API will include disabled identities in the response.

    " + } + }, + "IdentitiesList": { + "base": null, + "refs": { + "ListIdentitiesResponse$Identities": "An object containing a set of identities and associated mappings." + } + }, + "IdentityDescription": { + "base": "A description of the identity.", + "refs": { + "IdentitiesList$member": null + } + }, + "IdentityId": { + "base": null, + "refs": { + "DescribeIdentityInput$IdentityId": "

    A unique identifier in the format REGION:GUID.

    ", + "GetCredentialsForIdentityInput$IdentityId": "

    A unique identifier in the format REGION:GUID.

    ", + "GetCredentialsForIdentityResponse$IdentityId": "

    A unique identifier in the format REGION:GUID.

    ", + "GetIdResponse$IdentityId": "A unique identifier in the format REGION:GUID.", + "GetOpenIdTokenForDeveloperIdentityInput$IdentityId": "

    A unique identifier in the format REGION:GUID.

    ", + "GetOpenIdTokenForDeveloperIdentityResponse$IdentityId": "

    A unique identifier in the format REGION:GUID.

    ", + "GetOpenIdTokenInput$IdentityId": "A unique identifier in the format REGION:GUID.", + "GetOpenIdTokenResponse$IdentityId": "A unique identifier in the format REGION:GUID. Note that the IdentityId returned may not match the one passed on input.", + "IdentityDescription$IdentityId": "A unique identifier in the format REGION:GUID.", + "IdentityIdList$member": null, + "LookupDeveloperIdentityInput$IdentityId": "

    A unique identifier in the format REGION:GUID.

    ", + "LookupDeveloperIdentityResponse$IdentityId": "

    A unique identifier in the format REGION:GUID.

    ", + "MergeDeveloperIdentitiesResponse$IdentityId": "

    A unique identifier in the format REGION:GUID.

    ", + "UnlinkDeveloperIdentityInput$IdentityId": "

    A unique identifier in the format REGION:GUID.

    ", + "UnlinkIdentityInput$IdentityId": "A unique identifier in the format REGION:GUID.", + "UnprocessedIdentityId$IdentityId": "

    A unique identifier in the format REGION:GUID.

    " + } + }, + "IdentityIdList": { + "base": null, + "refs": { + "DeleteIdentitiesInput$IdentityIdsToDelete": "

    A list of 1-60 identities that you want to delete.

    " + } + }, + "IdentityPool": { + "base": "An object representing a Cognito identity pool.", + "refs": { + } + }, + "IdentityPoolId": { + "base": null, + "refs": { + "DeleteIdentityPoolInput$IdentityPoolId": "An identity pool ID in the format REGION:GUID.", + "DescribeIdentityPoolInput$IdentityPoolId": "An identity pool ID in the format REGION:GUID.", + "GetIdInput$IdentityPoolId": "An identity pool ID in the format REGION:GUID.", + "GetIdentityPoolRolesInput$IdentityPoolId": "

    An identity pool ID in the format REGION:GUID.

    ", + "GetIdentityPoolRolesResponse$IdentityPoolId": "

    An identity pool ID in the format REGION:GUID.

    ", + "GetOpenIdTokenForDeveloperIdentityInput$IdentityPoolId": "

    An identity pool ID in the format REGION:GUID.

    ", + "IdentityPool$IdentityPoolId": "An identity pool ID in the format REGION:GUID.", + "IdentityPoolShortDescription$IdentityPoolId": "An identity pool ID in the format REGION:GUID.", + "ListIdentitiesInput$IdentityPoolId": "An identity pool ID in the format REGION:GUID.", + "ListIdentitiesResponse$IdentityPoolId": "An identity pool ID in the format REGION:GUID.", + "LookupDeveloperIdentityInput$IdentityPoolId": "

    An identity pool ID in the format REGION:GUID.

    ", + "MergeDeveloperIdentitiesInput$IdentityPoolId": "

    An identity pool ID in the format REGION:GUID.

    ", + "SetIdentityPoolRolesInput$IdentityPoolId": "

    An identity pool ID in the format REGION:GUID.

    ", + "UnlinkDeveloperIdentityInput$IdentityPoolId": "

    An identity pool ID in the format REGION:GUID.

    " + } + }, + "IdentityPoolName": { + "base": null, + "refs": { + "CreateIdentityPoolInput$IdentityPoolName": "

    A string that you provide.

    ", + "IdentityPool$IdentityPoolName": "

    A string that you provide.

    ", + "IdentityPoolShortDescription$IdentityPoolName": "A string that you provide." + } + }, + "IdentityPoolShortDescription": { + "base": "A description of the identity pool.", + "refs": { + "IdentityPoolsList$member": null + } + }, + "IdentityPoolUnauthenticated": { + "base": null, + "refs": { + "CreateIdentityPoolInput$AllowUnauthenticatedIdentities": "

    TRUE if the identity pool supports unauthenticated logins.

    ", + "IdentityPool$AllowUnauthenticatedIdentities": "TRUE if the identity pool supports unauthenticated logins." + } + }, + "IdentityPoolsList": { + "base": null, + "refs": { + "ListIdentityPoolsResponse$IdentityPools": "The identity pools returned by the ListIdentityPools action." + } + }, + "IdentityProviderId": { + "base": null, + "refs": { + "IdentityProviders$value": null + } + }, + "IdentityProviderName": { + "base": null, + "refs": { + "IdentityProviders$key": null, + "LoginsList$member": null, + "LoginsMap$key": null + } + }, + "IdentityProviderToken": { + "base": null, + "refs": { + "LoginsMap$value": null + } + }, + "IdentityProviders": { + "base": null, + "refs": { + "CreateIdentityPoolInput$SupportedLoginProviders": "

    Optional key:value pairs mapping provider names to provider app IDs.

    ", + "IdentityPool$SupportedLoginProviders": "

    Optional key:value pairs mapping provider names to provider app IDs.

    " + } + }, + "InternalErrorException": { + "base": "Thrown when the service encounters an error during processing the request.", + "refs": { + } + }, + "InvalidIdentityPoolConfigurationException": { + "base": "

    Thrown if the identity pool has no role associated for the given auth type (auth/unauth) or if the AssumeRole fails.

    ", + "refs": { + } + }, + "InvalidParameterException": { + "base": "Thrown for missing or bad input parameter(s).", + "refs": { + } + }, + "LimitExceededException": { + "base": "Thrown when the total number of user pools has exceeded a preset limit.", + "refs": { + } + }, + "ListIdentitiesInput": { + "base": "Input to the ListIdentities action.", + "refs": { + } + }, + "ListIdentitiesResponse": { + "base": "The response to a ListIdentities request.", + "refs": { + } + }, + "ListIdentityPoolsInput": { + "base": "Input to the ListIdentityPools action.", + "refs": { + } + }, + "ListIdentityPoolsResponse": { + "base": "The result of a successful ListIdentityPools action.", + "refs": { + } + }, + "LoginsList": { + "base": null, + "refs": { + "IdentityDescription$Logins": "A set of optional name-value pairs that map provider names to provider tokens.", + "UnlinkIdentityInput$LoginsToRemove": "Provider names to unlink from this identity." + } + }, + "LoginsMap": { + "base": null, + "refs": { + "GetCredentialsForIdentityInput$Logins": "

    A set of optional name-value pairs that map provider names to provider tokens.

    ", + "GetIdInput$Logins": "

    A set of optional name-value pairs that map provider names to provider tokens.

    The available provider names for Logins are as follows:

    • Facebook: graph.facebook.com
    • Google: accounts.google.com
    • Amazon: www.amazon.com
    • Twitter: www.twitter.com
    • Digits: www.digits.com

    ", + "GetOpenIdTokenForDeveloperIdentityInput$Logins": "

    A set of optional name-value pairs that map provider names to provider tokens. Each name-value pair represents a user from a public provider or developer provider. If the user is from a developer provider, the name-value pair will follow the syntax \"developer_provider_name\": \"developer_user_identifier\". The developer provider is the \"domain\" by which Cognito will refer to your users; you provided this domain while creating/updating the identity pool. The developer user identifier is an identifier from your backend that uniquely identifies a user. When you create an identity pool, you can specify the supported logins.

    ", + "GetOpenIdTokenInput$Logins": "A set of optional name-value pairs that map provider names to provider tokens. When using graph.facebook.com and www.amazon.com, supply the access_token returned from the provider's authflow. For accounts.google.com or any other OpenId Connect provider, always include the id_token.", + "UnlinkIdentityInput$Logins": "A set of optional name-value pairs that map provider names to provider tokens." + } + }, + "LookupDeveloperIdentityInput": { + "base": "

    Input to the LookupDeveloperIdentityInput action.

    ", + "refs": { + } + }, + "LookupDeveloperIdentityResponse": { + "base": "

    Returned in response to a successful LookupDeveloperIdentity action.

    ", + "refs": { + } + }, + "MergeDeveloperIdentitiesInput": { + "base": "

    Input to the MergeDeveloperIdentities action.

    ", + "refs": { + } + }, + "MergeDeveloperIdentitiesResponse": { + "base": "

    Returned in response to a successful MergeDeveloperIdentities action.

    ", + "refs": { + } + }, + "NotAuthorizedException": { + "base": "Thrown when a user is not authorized to access the requested resource.", + "refs": { + } + }, + "OIDCProviderList": { + "base": null, + "refs": { + "CreateIdentityPoolInput$OpenIdConnectProviderARNs": "

    A list of OpendID Connect provider ARNs.

    ", + "IdentityPool$OpenIdConnectProviderARNs": "

    A list of OpendID Connect provider ARNs.

    " + } + }, + "OIDCToken": { + "base": null, + "refs": { + "GetOpenIdTokenForDeveloperIdentityResponse$Token": "

    An OpenID token.

    ", + "GetOpenIdTokenResponse$Token": "An OpenID token, valid for 15 minutes." + } + }, + "PaginationKey": { + "base": null, + "refs": { + "ListIdentitiesInput$NextToken": "A pagination token.", + "ListIdentitiesResponse$NextToken": "A pagination token.", + "ListIdentityPoolsInput$NextToken": "A pagination token.", + "ListIdentityPoolsResponse$NextToken": "A pagination token.", + "LookupDeveloperIdentityInput$NextToken": "

    A pagination token. The first call you make will have NextToken set to null. After that the service will return NextToken values as needed. For example, let's say you make a request with MaxResults set to 10, and there are 20 matches in the database. The service will return a pagination token as a part of the response. This token can be used to call the API again and get results starting from the 11th match.

    ", + "LookupDeveloperIdentityResponse$NextToken": "

    A pagination token. The first call you make will have NextToken set to null. After that the service will return NextToken values as needed. For example, let's say you make a request with MaxResults set to 10, and there are 20 matches in the database. The service will return a pagination token as a part of the response. This token can be used to call the API again and get results starting from the 11th match.

    " + } + }, + "QueryLimit": { + "base": null, + "refs": { + "ListIdentitiesInput$MaxResults": "The maximum number of identities to return.", + "ListIdentityPoolsInput$MaxResults": "The maximum number of identities to return.", + "LookupDeveloperIdentityInput$MaxResults": "

    The maximum number of identities to return.

    " + } + }, + "ResourceConflictException": { + "base": "Thrown when a user tries to use a login which is already linked to another account.", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "Thrown when the requested resource (for example, a dataset or record) does not exist.", + "refs": { + } + }, + "RoleType": { + "base": null, + "refs": { + "RolesMap$key": null + } + }, + "RolesMap": { + "base": null, + "refs": { + "GetIdentityPoolRolesResponse$Roles": "

    The map of roles associated with this pool. Currently only authenticated and unauthenticated roles are supported.

    ", + "SetIdentityPoolRolesInput$Roles": "

    The map of roles associated with this pool. For a given role, the key will be either \"authenticated\" or \"unauthenticated\" and the value will be the Role ARN.

    " + } + }, + "SecretKeyString": { + "base": null, + "refs": { + "Credentials$SecretKey": "

    The Secret Access Key portion of the credentials

    " + } + }, + "SessionTokenString": { + "base": null, + "refs": { + "Credentials$SessionToken": "

    The Session Token portion of the credentials

    " + } + }, + "SetIdentityPoolRolesInput": { + "base": "

    Input to the SetIdentityPoolRoles action.

    ", + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "ConcurrentModificationException$message": "

    The message returned by a ConcurrentModificationException.

    ", + "DeveloperUserAlreadyRegisteredException$message": "

    This developer user identifier is already registered with Cognito.

    ", + "ExternalServiceException$message": "

    The message returned by an ExternalServiceException

    ", + "InternalErrorException$message": "The message returned by an InternalErrorException.", + "InvalidIdentityPoolConfigurationException$message": "

    The message returned for an InvalidIdentityPoolConfigurationException

    ", + "InvalidParameterException$message": "The message returned by an InvalidParameterException.", + "LimitExceededException$message": "The message returned by a LimitExceededException.", + "NotAuthorizedException$message": "The message returned by a NotAuthorizedException", + "ResourceConflictException$message": "The message returned by a ResourceConflictException.", + "ResourceNotFoundException$message": "The message returned by a ResourceNotFoundException.", + "TooManyRequestsException$message": "Message returned by a TooManyRequestsException" + } + }, + "TokenDuration": { + "base": null, + "refs": { + "GetOpenIdTokenForDeveloperIdentityInput$TokenDuration": "

    The expiration time of the token, in seconds. You can specify a custom expiration time for the token so that you can cache it. If you don't provide an expiration time, the token is valid for 15 minutes. You can exchange the token with Amazon STS for temporary AWS credentials, which are valid for a maximum of one hour. The maximum token duration you can set is 24 hours. You should take care in setting the expiration time for a token, as there are significant security implications: an attacker could use a leaked token to access your AWS resources for the token's duration.

    " + } + }, + "TooManyRequestsException": { + "base": "Thrown when a request is throttled.", + "refs": { + } + }, + "UnlinkDeveloperIdentityInput": { + "base": "

    Input to the UnlinkDeveloperIdentity action.

    ", + "refs": { + } + }, + "UnlinkIdentityInput": { + "base": "Input to the UnlinkIdentity action.", + "refs": { + } + }, + "UnprocessedIdentityId": { + "base": "

    An array of UnprocessedIdentityId objects, each of which contains an ErrorCode and IdentityId.

    ", + "refs": { + "UnprocessedIdentityIdList$member": null + } + }, + "UnprocessedIdentityIdList": { + "base": null, + "refs": { + "DeleteIdentitiesResponse$UnprocessedIdentityIds": "

    An array of UnprocessedIdentityId objects, each of which contains an ErrorCode and IdentityId.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cognito-sync/2014-06-30/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cognito-sync/2014-06-30/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cognito-sync/2014-06-30/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cognito-sync/2014-06-30/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1874 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-06-30", + "endpointPrefix":"cognito-sync", + "jsonVersion":"1.1", + "serviceFullName":"Amazon Cognito Sync", + "signatureVersion":"v4", + "protocol":"rest-json" + }, + "operations":{ + "BulkPublish":{ + "name":"BulkPublish", + "http":{ + "method":"POST", + "requestUri":"/identitypools/{IdentityPoolId}/bulkpublish", + "responseCode":200 + }, + "input":{"shape":"BulkPublishRequest"}, + "output":{"shape":"BulkPublishResponse"}, + "errors":[ + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"DuplicateRequestException", + "error":{ + "code":"DuplicateRequest", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"AlreadyStreamedException", + "error":{ + "code":"AlreadyStreamed", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteDataset":{ + "name":"DeleteDataset", + "http":{ + "method":"DELETE", + "requestUri":"/identitypools/{IdentityPoolId}/identities/{IdentityId}/datasets/{DatasetName}", + "responseCode":200 + }, + "input":{"shape":"DeleteDatasetRequest"}, + "output":{"shape":"DeleteDatasetResponse"}, + "errors":[ + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceConflictException", + "error":{ + "code":"ResourceConflict", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeDataset":{ + "name":"DescribeDataset", + "http":{ + "method":"GET", + "requestUri":"/identitypools/{IdentityPoolId}/identities/{IdentityId}/datasets/{DatasetName}", + "responseCode":200 + }, + "input":{"shape":"DescribeDatasetRequest"}, + "output":{"shape":"DescribeDatasetResponse"}, + "errors":[ + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeIdentityPoolUsage":{ + "name":"DescribeIdentityPoolUsage", + "http":{ + "method":"GET", + "requestUri":"/identitypools/{IdentityPoolId}", + "responseCode":200 + }, + "input":{"shape":"DescribeIdentityPoolUsageRequest"}, + "output":{"shape":"DescribeIdentityPoolUsageResponse"}, + "errors":[ + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeIdentityUsage":{ + "name":"DescribeIdentityUsage", + "http":{ + "method":"GET", + "requestUri":"/identitypools/{IdentityPoolId}/identities/{IdentityId}", + "responseCode":200 + }, + "input":{"shape":"DescribeIdentityUsageRequest"}, + "output":{"shape":"DescribeIdentityUsageResponse"}, + "errors":[ + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + } + ] + }, + "GetBulkPublishDetails":{ + "name":"GetBulkPublishDetails", + "http":{ + "method":"POST", + "requestUri":"/identitypools/{IdentityPoolId}/getBulkPublishDetails", + "responseCode":200 + }, + "input":{"shape":"GetBulkPublishDetailsRequest"}, + "output":{"shape":"GetBulkPublishDetailsResponse"}, + "errors":[ + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + } + ] + }, + "GetCognitoEvents":{ + "name":"GetCognitoEvents", + "http":{ + "method":"GET", + "requestUri":"/identitypools/{IdentityPoolId}/events", + "responseCode":200 + }, + "input":{"shape":"GetCognitoEventsRequest"}, + "output":{"shape":"GetCognitoEventsResponse"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + } + ] + }, + "GetIdentityPoolConfiguration":{ + "name":"GetIdentityPoolConfiguration", + "http":{ + "method":"GET", + "requestUri":"/identitypools/{IdentityPoolId}/configuration", + "responseCode":200 + }, + "input":{"shape":"GetIdentityPoolConfigurationRequest"}, + "output":{"shape":"GetIdentityPoolConfigurationResponse"}, + "errors":[ + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + } + ] + }, + "ListDatasets":{ + "name":"ListDatasets", + "http":{ + "method":"GET", + "requestUri":"/identitypools/{IdentityPoolId}/identities/{IdentityId}/datasets", + "responseCode":200 + }, + "input":{"shape":"ListDatasetsRequest"}, + "output":{"shape":"ListDatasetsResponse"}, + "errors":[ + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + } + ] + }, + "ListIdentityPoolUsage":{ + "name":"ListIdentityPoolUsage", + "http":{ + "method":"GET", + "requestUri":"/identitypools", + "responseCode":200 + }, + "input":{"shape":"ListIdentityPoolUsageRequest"}, + "output":{"shape":"ListIdentityPoolUsageResponse"}, + "errors":[ + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + } + ] + }, + "ListRecords":{ + "name":"ListRecords", + "http":{ + "method":"GET", + "requestUri":"/identitypools/{IdentityPoolId}/identities/{IdentityId}/datasets/{DatasetName}/records", + "responseCode":200 + }, + "input":{"shape":"ListRecordsRequest"}, + "output":{"shape":"ListRecordsResponse"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + } + ] + }, + "RegisterDevice":{ + "name":"RegisterDevice", + "http":{ + "method":"POST", + "requestUri":"/identitypools/{IdentityPoolId}/identity/{IdentityId}/device", + "responseCode":200 + }, + "input":{"shape":"RegisterDeviceRequest"}, + "output":{"shape":"RegisterDeviceResponse"}, + "errors":[ + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"InvalidConfigurationException", + "error":{ + "code":"InvalidConfiguration", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + } + ] + }, + "SetCognitoEvents":{ + "name":"SetCognitoEvents", + "http":{ + "method":"POST", + "requestUri":"/identitypools/{IdentityPoolId}/events", + "responseCode":200 + }, + "input":{"shape":"SetCognitoEventsRequest"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + } + ] + }, + "SetIdentityPoolConfiguration":{ + "name":"SetIdentityPoolConfiguration", + "http":{ + "method":"POST", + "requestUri":"/identitypools/{IdentityPoolId}/configuration", + "responseCode":200 + }, + "input":{"shape":"SetIdentityPoolConfigurationRequest"}, + "output":{"shape":"SetIdentityPoolConfigurationResponse"}, + "errors":[ + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ConcurrentModificationException", + "error":{ + "code":"ConcurrentModification", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "SubscribeToDataset":{ + "name":"SubscribeToDataset", + "http":{ + "method":"POST", + "requestUri":"/identitypools/{IdentityPoolId}/identities/{IdentityId}/datasets/{DatasetName}/subscriptions/{DeviceId}", + "responseCode":200 + }, + "input":{"shape":"SubscribeToDatasetRequest"}, + "output":{"shape":"SubscribeToDatasetResponse"}, + "errors":[ + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"InvalidConfigurationException", + "error":{ + "code":"InvalidConfiguration", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + } + ] + }, + "UnsubscribeFromDataset":{ + "name":"UnsubscribeFromDataset", + "http":{ + "method":"DELETE", + "requestUri":"/identitypools/{IdentityPoolId}/identities/{IdentityId}/datasets/{DatasetName}/subscriptions/{DeviceId}", + "responseCode":200 + }, + "input":{"shape":"UnsubscribeFromDatasetRequest"}, + "output":{"shape":"UnsubscribeFromDatasetResponse"}, + "errors":[ + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"InvalidConfigurationException", + "error":{ + "code":"InvalidConfiguration", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + } + ] + }, + "UpdateRecords":{ + "name":"UpdateRecords", + "http":{ + "method":"POST", + "requestUri":"/identitypools/{IdentityPoolId}/identities/{IdentityId}/datasets/{DatasetName}", + "responseCode":200 + }, + "input":{"shape":"UpdateRecordsRequest"}, + "output":{"shape":"UpdateRecordsResponse"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NotAuthorizedException", + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceConflictException", + "error":{ + "code":"ResourceConflict", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidLambdaFunctionOutputException", + "error":{ + "code":"InvalidLambdaFunctionOutput", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"LambdaThrottledException", + "error":{ + "code":"LambdaThrottled", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + } + ] + } + }, + "shapes":{ + "AlreadyStreamedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "error":{ + "code":"AlreadyStreamed", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ApplicationArn":{ + "type":"string", + "pattern":"arn:aws:sns:[-0-9a-z]+:\\d+:app/[A-Z_]+/[a-zA-Z0-9_.-]+" + }, + "ApplicationArnList":{ + "type":"list", + "member":{"shape":"ApplicationArn"} + }, + "AssumeRoleArn":{ + "type":"string", + "min":20, + "max":2048, + "pattern":"arn:aws:iam::\\d+:role/.*" + }, + "Boolean":{"type":"boolean"}, + "BulkPublishRequest":{ + "type":"structure", + "required":["IdentityPoolId"], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + } + } + }, + "BulkPublishResponse":{ + "type":"structure", + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"} + } + }, + "BulkPublishStatus":{ + "type":"string", + "enum":[ + "NOT_STARTED", + "IN_PROGRESS", + "FAILED", + "SUCCEEDED" + ] + }, + "ClientContext":{"type":"string"}, + "CognitoEventType":{"type":"string"}, + "CognitoStreams":{ + "type":"structure", + "members":{ + "StreamName":{"shape":"StreamName"}, + "RoleArn":{"shape":"AssumeRoleArn"}, + "StreamingStatus":{"shape":"StreamingStatus"} + } + }, + "ConcurrentModificationException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "error":{ + "code":"ConcurrentModification", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Dataset":{ + "type":"structure", + "members":{ + "IdentityId":{"shape":"IdentityId"}, + "DatasetName":{"shape":"DatasetName"}, + "CreationDate":{"shape":"Date"}, + "LastModifiedDate":{"shape":"Date"}, + "LastModifiedBy":{"shape":"String"}, + "DataStorage":{"shape":"Long"}, + "NumRecords":{"shape":"Long"} + } + }, + "DatasetList":{ + "type":"list", + "member":{"shape":"Dataset"} + }, + "DatasetName":{ + "type":"string", + "min":1, + "max":128, + "pattern":"[a-zA-Z0-9_.:-]+" + }, + "Date":{"type":"timestamp"}, + "DeleteDatasetRequest":{ + "type":"structure", + "required":[ + "IdentityPoolId", + "IdentityId", + "DatasetName" + ], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + }, + "IdentityId":{ + "shape":"IdentityId", + "location":"uri", + "locationName":"IdentityId" + }, + "DatasetName":{ + "shape":"DatasetName", + "location":"uri", + "locationName":"DatasetName" + } + } + }, + "DeleteDatasetResponse":{ + "type":"structure", + "members":{ + "Dataset":{"shape":"Dataset"} + } + }, + "DescribeDatasetRequest":{ + "type":"structure", + "required":[ + "IdentityPoolId", + "IdentityId", + "DatasetName" + ], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + }, + "IdentityId":{ + "shape":"IdentityId", + "location":"uri", + "locationName":"IdentityId" + }, + "DatasetName":{ + "shape":"DatasetName", + "location":"uri", + "locationName":"DatasetName" + } + } + }, + "DescribeDatasetResponse":{ + "type":"structure", + "members":{ + "Dataset":{"shape":"Dataset"} + } + }, + "DescribeIdentityPoolUsageRequest":{ + "type":"structure", + "required":["IdentityPoolId"], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + } + } + }, + "DescribeIdentityPoolUsageResponse":{ + "type":"structure", + "members":{ + "IdentityPoolUsage":{"shape":"IdentityPoolUsage"} + } + }, + "DescribeIdentityUsageRequest":{ + "type":"structure", + "required":[ + "IdentityPoolId", + "IdentityId" + ], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + }, + "IdentityId":{ + "shape":"IdentityId", + "location":"uri", + "locationName":"IdentityId" + } + } + }, + "DescribeIdentityUsageResponse":{ + "type":"structure", + "members":{ + "IdentityUsage":{"shape":"IdentityUsage"} + } + }, + "DeviceId":{ + "type":"string", + "min":1, + "max":256 + }, + "DuplicateRequestException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "error":{ + "code":"DuplicateRequest", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Events":{ + "type":"map", + "key":{"shape":"CognitoEventType"}, + "value":{"shape":"LambdaFunctionArn"}, + "max":1 + }, + "ExceptionMessage":{"type":"string"}, + "GetBulkPublishDetailsRequest":{ + "type":"structure", + "required":["IdentityPoolId"], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + } + } + }, + "GetBulkPublishDetailsResponse":{ + "type":"structure", + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "BulkPublishStartTime":{"shape":"Date"}, + "BulkPublishCompleteTime":{"shape":"Date"}, + "BulkPublishStatus":{"shape":"BulkPublishStatus"}, + "FailureMessage":{"shape":"String"} + } + }, + "GetCognitoEventsRequest":{ + "type":"structure", + "required":["IdentityPoolId"], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + } + } + }, + "GetCognitoEventsResponse":{ + "type":"structure", + "members":{ + "Events":{"shape":"Events"} + } + }, + "GetIdentityPoolConfigurationRequest":{ + "type":"structure", + "required":["IdentityPoolId"], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + } + } + }, + "GetIdentityPoolConfigurationResponse":{ + "type":"structure", + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "PushSync":{"shape":"PushSync"}, + "CognitoStreams":{"shape":"CognitoStreams"} + } + }, + "IdentityId":{ + "type":"string", + "min":1, + "max":50, + "pattern":"[\\w-]+:[0-9a-f-]+" + }, + "IdentityPoolId":{ + "type":"string", + "min":1, + "max":50, + "pattern":"[\\w-]+:[0-9a-f-]+" + }, + "IdentityPoolUsage":{ + "type":"structure", + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "SyncSessionsCount":{"shape":"Long"}, + "DataStorage":{"shape":"Long"}, + "LastModifiedDate":{"shape":"Date"} + } + }, + "IdentityPoolUsageList":{ + "type":"list", + "member":{"shape":"IdentityPoolUsage"} + }, + "IdentityUsage":{ + "type":"structure", + "members":{ + "IdentityId":{"shape":"IdentityId"}, + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "LastModifiedDate":{"shape":"Date"}, + "DatasetCount":{"shape":"Integer"}, + "DataStorage":{"shape":"Long"} + } + }, + "Integer":{"type":"integer"}, + "IntegerString":{"type":"integer"}, + "InternalErrorException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + "InvalidConfigurationException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "error":{ + "code":"InvalidConfiguration", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidLambdaFunctionOutputException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "error":{ + "code":"InvalidLambdaFunctionOutput", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidParameterException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "LambdaFunctionArn":{"type":"string"}, + "LambdaThrottledException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "error":{ + "code":"LambdaThrottled", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "error":{ + "code":"LimitExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ListDatasetsRequest":{ + "type":"structure", + "required":[ + "IdentityId", + "IdentityPoolId" + ], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + }, + "IdentityId":{ + "shape":"IdentityId", + "location":"uri", + "locationName":"IdentityId" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"IntegerString", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListDatasetsResponse":{ + "type":"structure", + "members":{ + "Datasets":{"shape":"DatasetList"}, + "Count":{"shape":"Integer"}, + "NextToken":{"shape":"String"} + } + }, + "ListIdentityPoolUsageRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"IntegerString", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListIdentityPoolUsageResponse":{ + "type":"structure", + "members":{ + "IdentityPoolUsages":{"shape":"IdentityPoolUsageList"}, + "MaxResults":{"shape":"Integer"}, + "Count":{"shape":"Integer"}, + "NextToken":{"shape":"String"} + } + }, + "ListRecordsRequest":{ + "type":"structure", + "required":[ + "IdentityPoolId", + "IdentityId", + "DatasetName" + ], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + }, + "IdentityId":{ + "shape":"IdentityId", + "location":"uri", + "locationName":"IdentityId" + }, + "DatasetName":{ + "shape":"DatasetName", + "location":"uri", + "locationName":"DatasetName" + }, + "LastSyncCount":{ + "shape":"Long", + "location":"querystring", + "locationName":"lastSyncCount" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"IntegerString", + "location":"querystring", + "locationName":"maxResults" + }, + "SyncSessionToken":{ + "shape":"SyncSessionToken", + "location":"querystring", + "locationName":"syncSessionToken" + } + } + }, + "ListRecordsResponse":{ + "type":"structure", + "members":{ + "Records":{"shape":"RecordList"}, + "NextToken":{"shape":"String"}, + "Count":{"shape":"Integer"}, + "DatasetSyncCount":{"shape":"Long"}, + "LastModifiedBy":{"shape":"String"}, + "MergedDatasetNames":{"shape":"MergedDatasetNameList"}, + "DatasetExists":{"shape":"Boolean"}, + "DatasetDeletedAfterRequestedSyncCount":{"shape":"Boolean"}, + "SyncSessionToken":{"shape":"String"} + } + }, + "Long":{"type":"long"}, + "MergedDatasetNameList":{ + "type":"list", + "member":{"shape":"String"} + }, + "NotAuthorizedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "error":{ + "code":"NotAuthorizedError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "Operation":{ + "type":"string", + "enum":[ + "replace", + "remove" + ] + }, + "Platform":{ + "type":"string", + "enum":[ + "APNS", + "APNS_SANDBOX", + "GCM", + "ADM" + ] + }, + "PushSync":{ + "type":"structure", + "members":{ + "ApplicationArns":{"shape":"ApplicationArnList"}, + "RoleArn":{"shape":"AssumeRoleArn"} + } + }, + "PushToken":{"type":"string"}, + "Record":{ + "type":"structure", + "members":{ + "Key":{"shape":"RecordKey"}, + "Value":{"shape":"RecordValue"}, + "SyncCount":{"shape":"Long"}, + "LastModifiedDate":{"shape":"Date"}, + "LastModifiedBy":{"shape":"String"}, + "DeviceLastModifiedDate":{"shape":"Date"} + } + }, + "RecordKey":{ + "type":"string", + "min":1, + "max":1024 + }, + "RecordList":{ + "type":"list", + "member":{"shape":"Record"} + }, + "RecordPatch":{ + "type":"structure", + "required":[ + "Op", + "Key", + "SyncCount" + ], + "members":{ + "Op":{"shape":"Operation"}, + "Key":{"shape":"RecordKey"}, + "Value":{"shape":"RecordValue"}, + "SyncCount":{"shape":"Long"}, + "DeviceLastModifiedDate":{"shape":"Date"} + } + }, + "RecordPatchList":{ + "type":"list", + "member":{"shape":"RecordPatch"} + }, + "RecordValue":{ + "type":"string", + "max":1048575 + }, + "RegisterDeviceRequest":{ + "type":"structure", + "required":[ + "IdentityPoolId", + "IdentityId", + "Platform", + "Token" + ], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + }, + "IdentityId":{ + "shape":"IdentityId", + "location":"uri", + "locationName":"IdentityId" + }, + "Platform":{"shape":"Platform"}, + "Token":{"shape":"PushToken"} + } + }, + "RegisterDeviceResponse":{ + "type":"structure", + "members":{ + "DeviceId":{"shape":"DeviceId"} + } + }, + "ResourceConflictException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "error":{ + "code":"ResourceConflict", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SetCognitoEventsRequest":{ + "type":"structure", + "required":[ + "IdentityPoolId", + "Events" + ], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + }, + "Events":{"shape":"Events"} + } + }, + "SetIdentityPoolConfigurationRequest":{ + "type":"structure", + "required":["IdentityPoolId"], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + }, + "PushSync":{"shape":"PushSync"}, + "CognitoStreams":{"shape":"CognitoStreams"} + } + }, + "SetIdentityPoolConfigurationResponse":{ + "type":"structure", + "members":{ + "IdentityPoolId":{"shape":"IdentityPoolId"}, + "PushSync":{"shape":"PushSync"}, + "CognitoStreams":{"shape":"CognitoStreams"} + } + }, + "StreamName":{ + "type":"string", + "min":1, + "max":128 + }, + "StreamingStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "String":{"type":"string"}, + "SubscribeToDatasetRequest":{ + "type":"structure", + "required":[ + "IdentityPoolId", + "IdentityId", + "DatasetName", + "DeviceId" + ], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + }, + "IdentityId":{ + "shape":"IdentityId", + "location":"uri", + "locationName":"IdentityId" + }, + "DatasetName":{ + "shape":"DatasetName", + "location":"uri", + "locationName":"DatasetName" + }, + "DeviceId":{ + "shape":"DeviceId", + "location":"uri", + "locationName":"DeviceId" + } + } + }, + "SubscribeToDatasetResponse":{ + "type":"structure", + "members":{ + } + }, + "SyncSessionToken":{"type":"string"}, + "TooManyRequestsException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "error":{ + "code":"TooManyRequests", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, + "UnsubscribeFromDatasetRequest":{ + "type":"structure", + "required":[ + "IdentityPoolId", + "IdentityId", + "DatasetName", + "DeviceId" + ], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + }, + "IdentityId":{ + "shape":"IdentityId", + "location":"uri", + "locationName":"IdentityId" + }, + "DatasetName":{ + "shape":"DatasetName", + "location":"uri", + "locationName":"DatasetName" + }, + "DeviceId":{ + "shape":"DeviceId", + "location":"uri", + "locationName":"DeviceId" + } + } + }, + "UnsubscribeFromDatasetResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateRecordsRequest":{ + "type":"structure", + "required":[ + "IdentityPoolId", + "IdentityId", + "DatasetName", + "SyncSessionToken" + ], + "members":{ + "IdentityPoolId":{ + "shape":"IdentityPoolId", + "location":"uri", + "locationName":"IdentityPoolId" + }, + "IdentityId":{ + "shape":"IdentityId", + "location":"uri", + "locationName":"IdentityId" + }, + "DatasetName":{ + "shape":"DatasetName", + "location":"uri", + "locationName":"DatasetName" + }, + "DeviceId":{"shape":"DeviceId"}, + "RecordPatches":{"shape":"RecordPatchList"}, + "SyncSessionToken":{"shape":"SyncSessionToken"}, + "ClientContext":{ + "shape":"ClientContext", + "location":"header", + "locationName":"x-amz-Client-Context" + } + } + }, + "UpdateRecordsResponse":{ + "type":"structure", + "members":{ + "Records":{"shape":"RecordList"} + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cognito-sync/2014-06-30/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cognito-sync/2014-06-30/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cognito-sync/2014-06-30/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/cognito-sync/2014-06-30/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,588 @@ +{ + "version": "2.0", + "operations": { + "BulkPublish": "

    Initiates a bulk publish of all existing datasets for an Identity Pool to the configured stream. Customers are limited to one successful bulk publish per 24 hours. Bulk publish is an asynchronous request, customers can see the status of the request via the GetBulkPublishDetails operation.

    This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity.

    ", + "DeleteDataset": "

    Deletes the specific dataset. The dataset will be deleted permanently, and the action can't be undone. Datasets that this dataset was merged with will no longer report the merge. Any subsequent operation on this dataset will result in a ResourceNotFoundException.

    This API can be called with temporary user credentials provided by Cognito Identity or with developer credentials.

    ", + "DescribeDataset": "

    Gets meta data about a dataset by identity and dataset name. With Amazon Cognito Sync, each identity has access only to its own data. Thus, the credentials used to make this API call need to have access to the identity data.

    This API can be called with temporary user credentials provided by Cognito Identity or with developer credentials. You should use Cognito Identity credentials to make this API call.

    ", + "DescribeIdentityPoolUsage": "

    Gets usage details (for example, data storage) about a particular identity pool.

    This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity.

    ", + "DescribeIdentityUsage": "

    Gets usage information for an identity, including number of datasets and data usage.

    This API can be called with temporary user credentials provided by Cognito Identity or with developer credentials.

    ", + "GetBulkPublishDetails": "

    Get the status of the last BulkPublish operation for an identity pool.

    This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity.

    ", + "GetCognitoEvents": "

    Gets the events and the corresponding Lambda functions associated with an identity pool.

    This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity.

    ", + "GetIdentityPoolConfiguration": "

    Gets the configuration settings of an identity pool.

    This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity.

    ", + "ListDatasets": "

    Lists datasets for an identity. With Amazon Cognito Sync, each identity has access only to its own data. Thus, the credentials used to make this API call need to have access to the identity data.

    ListDatasets can be called with temporary user credentials provided by Cognito Identity or with developer credentials. You should use the Cognito Identity credentials to make this API call.

    ", + "ListIdentityPoolUsage": "

    Gets a list of identity pools registered with Cognito.

    ListIdentityPoolUsage can only be called with developer credentials. You cannot make this API call with the temporary user credentials provided by Cognito Identity.

    ", + "ListRecords": "

    Gets paginated records, optionally changed after a particular sync count for a dataset and identity. With Amazon Cognito Sync, each identity has access only to its own data. Thus, the credentials used to make this API call need to have access to the identity data.

    ListRecords can be called with temporary user credentials provided by Cognito Identity or with developer credentials. You should use Cognito Identity credentials to make this API call.

    ", + "RegisterDevice": "

    Registers a device to receive push sync notifications.

    This API can only be called with temporary credentials provided by Cognito Identity. You cannot call this API with developer credentials.

    ", + "SetCognitoEvents": "

    Sets the AWS Lambda function for a given event type for an identity pool. This request only updates the key/value pair specified. Other key/values pairs are not updated. To remove a key value pair, pass a empty value for the particular key.

    This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity.

    ", + "SetIdentityPoolConfiguration": "

    Sets the necessary configuration for push sync.

    This API can only be called with developer credentials. You cannot call this API with the temporary user credentials provided by Cognito Identity.

    ", + "SubscribeToDataset": "

    Subscribes to receive notifications when a dataset is modified by another device.

    This API can only be called with temporary credentials provided by Cognito Identity. You cannot call this API with developer credentials.

    ", + "UnsubscribeFromDataset": "

    Unsubscribes from receiving notifications when a dataset is modified by another device.

    This API can only be called with temporary credentials provided by Cognito Identity. You cannot call this API with developer credentials.

    ", + "UpdateRecords": "

    Posts updates to records and adds and deletes records for a dataset and user.

    The sync count in the record patch is your last known sync count for that record. The server will reject an UpdateRecords request with a ResourceConflictException if you try to patch a record with a new value but a stale sync count.

    For example, if the sync count on the server is 5 for a key called highScore and you try and submit a new highScore with sync count of 4, the request will be rejected. To obtain the current sync count for a record, call ListRecords. On a successful update of the record, the response returns the new sync count for that record. You should present that sync count the next time you try to update that same record. When the record does not exist, specify the sync count as 0.

    This API can be called with temporary user credentials provided by Cognito Identity or with developer credentials.

    " + }, + "service": "Amazon Cognito Sync

    Amazon Cognito Sync provides an AWS service and client library that enable cross-device syncing of application-related user data. High-level client libraries are available for both iOS and Android. You can use these libraries to persist data locally so that it's available even if the device is offline. Developer credentials don't need to be stored on the mobile device to access the service. You can use Amazon Cognito to obtain a normalized user ID and credentials. User data is persisted in a dataset that can store up to 1 MB of key-value pairs, and you can have up to 20 datasets per user identity.

    With Amazon Cognito Sync, the data stored for each identity is accessible only to credentials assigned to that identity. In order to use the Cognito Sync service, you need to make API calls using credentials retrieved with Amazon Cognito Identity service.

    If you want to use Cognito Sync in an Android or iOS application, you will probably want to make API calls via the AWS Mobile SDK. To learn more, see the Developer Guide for Android and the Developer Guide for iOS.

    ", + "shapes": { + "AlreadyStreamedException": { + "base": "An exception thrown when a bulk publish operation is requested less than 24 hours after a previous bulk publish operation completed successfully.", + "refs": { + } + }, + "ApplicationArn": { + "base": null, + "refs": { + "ApplicationArnList$member": null + } + }, + "ApplicationArnList": { + "base": null, + "refs": { + "PushSync$ApplicationArns": "

    List of SNS platform application ARNs that could be used by clients.

    " + } + }, + "AssumeRoleArn": { + "base": null, + "refs": { + "CognitoStreams$RoleArn": "The ARN of the role Amazon Cognito can assume in order to publish to the stream. This role must grant access to Amazon Cognito (cognito-sync) to invoke PutRecord on your Cognito stream.", + "PushSync$RoleArn": "

    A role configured to allow Cognito to call SNS on behalf of the developer.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "ListRecordsResponse$DatasetExists": "Indicates whether the dataset exists.", + "ListRecordsResponse$DatasetDeletedAfterRequestedSyncCount": "A boolean value specifying whether to delete the dataset locally." + } + }, + "BulkPublishRequest": { + "base": "The input for the BulkPublish operation.", + "refs": { + } + }, + "BulkPublishResponse": { + "base": "The output for the BulkPublish operation.", + "refs": { + } + }, + "BulkPublishStatus": { + "base": null, + "refs": { + "GetBulkPublishDetailsResponse$BulkPublishStatus": "Status of the last bulk publish operation, valid values are:

    NOT_STARTED - No bulk publish has been requested for this identity pool

    IN_PROGRESS - Data is being published to the configured stream

    SUCCEEDED - All data for the identity pool has been published to the configured stream

    FAILED - Some portion of the data has failed to publish, check FailureMessage for the cause.

    " + } + }, + "ClientContext": { + "base": null, + "refs": { + "UpdateRecordsRequest$ClientContext": "Intended to supply a device ID that will populate the lastModifiedBy field referenced in other methods. The ClientContext field is not yet implemented." + } + }, + "CognitoEventType": { + "base": null, + "refs": { + "Events$key": null + } + }, + "CognitoStreams": { + "base": "Configuration options for configure Cognito streams.", + "refs": { + "GetIdentityPoolConfigurationResponse$CognitoStreams": "Options to apply to this identity pool for Amazon Cognito streams.", + "SetIdentityPoolConfigurationRequest$CognitoStreams": "Options to apply to this identity pool for Amazon Cognito streams.", + "SetIdentityPoolConfigurationResponse$CognitoStreams": "Options to apply to this identity pool for Amazon Cognito streams." + } + }, + "ConcurrentModificationException": { + "base": "

    Thrown if there are parallel requests to modify a resource.

    ", + "refs": { + } + }, + "Dataset": { + "base": "A collection of data for an identity pool. An identity pool can have multiple datasets. A dataset is per identity and can be general or associated with a particular entity in an application (like a saved game). Datasets are automatically created if they don't exist. Data is synced by dataset, and a dataset can hold up to 1MB of key-value pairs.", + "refs": { + "DatasetList$member": null, + "DeleteDatasetResponse$Dataset": "A collection of data for an identity pool. An identity pool can have multiple datasets. A dataset is per identity and can be general or associated with a particular entity in an application (like a saved game). Datasets are automatically created if they don't exist. Data is synced by dataset, and a dataset can hold up to 1MB of key-value pairs.", + "DescribeDatasetResponse$Dataset": "Meta data for a collection of data for an identity. An identity can have multiple datasets. A dataset can be general or associated with a particular entity in an application (like a saved game). Datasets are automatically created if they don't exist. Data is synced by dataset, and a dataset can hold up to 1MB of key-value pairs." + } + }, + "DatasetList": { + "base": null, + "refs": { + "ListDatasetsResponse$Datasets": "A set of datasets." + } + }, + "DatasetName": { + "base": null, + "refs": { + "Dataset$DatasetName": "A string of up to 128 characters. Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' (dash), and '.' (dot).", + "DeleteDatasetRequest$DatasetName": "A string of up to 128 characters. Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' (dash), and '.' (dot).", + "DescribeDatasetRequest$DatasetName": "A string of up to 128 characters. Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' (dash), and '.' (dot).", + "ListRecordsRequest$DatasetName": "A string of up to 128 characters. Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' (dash), and '.' (dot).", + "SubscribeToDatasetRequest$DatasetName": "

    The name of the dataset to subcribe to.

    ", + "UnsubscribeFromDatasetRequest$DatasetName": "

    The name of the dataset from which to unsubcribe.

    ", + "UpdateRecordsRequest$DatasetName": "A string of up to 128 characters. Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' (dash), and '.' (dot)." + } + }, + "Date": { + "base": null, + "refs": { + "Dataset$CreationDate": "Date on which the dataset was created.", + "Dataset$LastModifiedDate": "Date when the dataset was last modified.", + "GetBulkPublishDetailsResponse$BulkPublishStartTime": "The date/time at which the last bulk publish was initiated.", + "GetBulkPublishDetailsResponse$BulkPublishCompleteTime": "If BulkPublishStatus is SUCCEEDED, the time the last bulk publish operation completed.", + "IdentityPoolUsage$LastModifiedDate": "Date on which the identity pool was last modified.", + "IdentityUsage$LastModifiedDate": "Date on which the identity was last modified.", + "Record$LastModifiedDate": "The date on which the record was last modified.", + "Record$DeviceLastModifiedDate": "The last modified date of the client device.", + "RecordPatch$DeviceLastModifiedDate": "The last modified date of the client device." + } + }, + "DeleteDatasetRequest": { + "base": "A request to delete the specific dataset.", + "refs": { + } + }, + "DeleteDatasetResponse": { + "base": "Response to a successful DeleteDataset request.", + "refs": { + } + }, + "DescribeDatasetRequest": { + "base": "A request for meta data about a dataset (creation date, number of records, size) by owner and dataset name.", + "refs": { + } + }, + "DescribeDatasetResponse": { + "base": "Response to a successful DescribeDataset request.", + "refs": { + } + }, + "DescribeIdentityPoolUsageRequest": { + "base": "A request for usage information about the identity pool.", + "refs": { + } + }, + "DescribeIdentityPoolUsageResponse": { + "base": "Response to a successful DescribeIdentityPoolUsage request.", + "refs": { + } + }, + "DescribeIdentityUsageRequest": { + "base": "A request for information about the usage of an identity pool.", + "refs": { + } + }, + "DescribeIdentityUsageResponse": { + "base": "The response to a successful DescribeIdentityUsage request.", + "refs": { + } + }, + "DeviceId": { + "base": null, + "refs": { + "RegisterDeviceResponse$DeviceId": "

    The unique ID generated for this device by Cognito.

    ", + "SubscribeToDatasetRequest$DeviceId": "

    The unique ID generated for this device by Cognito.

    ", + "UnsubscribeFromDatasetRequest$DeviceId": "

    The unique ID generated for this device by Cognito.

    ", + "UpdateRecordsRequest$DeviceId": "

    The unique ID generated for this device by Cognito.

    " + } + }, + "DuplicateRequestException": { + "base": "An exception thrown when there is an IN_PROGRESS bulk publish operation for the given identity pool.", + "refs": { + } + }, + "Events": { + "base": null, + "refs": { + "GetCognitoEventsResponse$Events": "

    The Cognito Events returned from the GetCognitoEvents request

    ", + "SetCognitoEventsRequest$Events": "

    The events to configure

    " + } + }, + "ExceptionMessage": { + "base": null, + "refs": { + "AlreadyStreamedException$message": "The message associated with the AlreadyStreamedException exception.", + "DuplicateRequestException$message": "The message associated with the DuplicateRequestException exception.", + "InternalErrorException$message": "Message returned by InternalErrorException.", + "InvalidConfigurationException$message": "Message returned by InvalidConfigurationException.", + "InvalidLambdaFunctionOutputException$message": "

    A message returned when an InvalidLambdaFunctionOutputException occurs

    ", + "InvalidParameterException$message": "Message returned by InvalidParameterException.", + "LambdaThrottledException$message": "

    A message returned when an LambdaThrottledException is thrown

    ", + "LimitExceededException$message": "Message returned by LimitExceededException.", + "NotAuthorizedException$message": "The message returned by a NotAuthorizedException.", + "ResourceConflictException$message": "The message returned by a ResourceConflictException.", + "ResourceNotFoundException$message": "Message returned by a ResourceNotFoundException.", + "TooManyRequestsException$message": "Message returned by a TooManyRequestsException." + } + }, + "GetBulkPublishDetailsRequest": { + "base": "The input for the GetBulkPublishDetails operation.", + "refs": { + } + }, + "GetBulkPublishDetailsResponse": { + "base": "The output for the GetBulkPublishDetails operation.", + "refs": { + } + }, + "GetCognitoEventsRequest": { + "base": "

    A request for a list of the configured Cognito Events

    ", + "refs": { + } + }, + "GetCognitoEventsResponse": { + "base": "

    The response from the GetCognitoEvents request

    ", + "refs": { + } + }, + "GetIdentityPoolConfigurationRequest": { + "base": "

    The input for the GetIdentityPoolConfiguration operation.

    ", + "refs": { + } + }, + "GetIdentityPoolConfigurationResponse": { + "base": "

    The output for the GetIdentityPoolConfiguration operation.

    ", + "refs": { + } + }, + "IdentityId": { + "base": null, + "refs": { + "Dataset$IdentityId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "DeleteDatasetRequest$IdentityId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "DescribeDatasetRequest$IdentityId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "DescribeIdentityUsageRequest$IdentityId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "IdentityUsage$IdentityId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "ListDatasetsRequest$IdentityId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "ListRecordsRequest$IdentityId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "RegisterDeviceRequest$IdentityId": "

    The unique ID for this identity.

    ", + "SubscribeToDatasetRequest$IdentityId": "

    Unique ID for this identity.

    ", + "UnsubscribeFromDatasetRequest$IdentityId": "

    Unique ID for this identity.

    ", + "UpdateRecordsRequest$IdentityId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region." + } + }, + "IdentityPoolId": { + "base": null, + "refs": { + "BulkPublishRequest$IdentityPoolId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "BulkPublishResponse$IdentityPoolId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "DeleteDatasetRequest$IdentityPoolId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "DescribeDatasetRequest$IdentityPoolId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "DescribeIdentityPoolUsageRequest$IdentityPoolId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "DescribeIdentityUsageRequest$IdentityPoolId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "GetBulkPublishDetailsRequest$IdentityPoolId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "GetBulkPublishDetailsResponse$IdentityPoolId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "GetCognitoEventsRequest$IdentityPoolId": "

    The Cognito Identity Pool ID for the request

    ", + "GetIdentityPoolConfigurationRequest$IdentityPoolId": "

    A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. This is the ID of the pool for which to return a configuration.

    ", + "GetIdentityPoolConfigurationResponse$IdentityPoolId": "

    A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito.

    ", + "IdentityPoolUsage$IdentityPoolId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "IdentityUsage$IdentityPoolId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "ListDatasetsRequest$IdentityPoolId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "ListRecordsRequest$IdentityPoolId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region.", + "RegisterDeviceRequest$IdentityPoolId": "

    A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. Here, the ID of the pool that the identity belongs to.

    ", + "SetCognitoEventsRequest$IdentityPoolId": "

    The Cognito Identity Pool to use when configuring Cognito Events

    ", + "SetIdentityPoolConfigurationRequest$IdentityPoolId": "

    A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. This is the ID of the pool to modify.

    ", + "SetIdentityPoolConfigurationResponse$IdentityPoolId": "

    A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito.

    ", + "SubscribeToDatasetRequest$IdentityPoolId": "

    A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. The ID of the pool to which the identity belongs.

    ", + "UnsubscribeFromDatasetRequest$IdentityPoolId": "

    A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. The ID of the pool to which this identity belongs.

    ", + "UpdateRecordsRequest$IdentityPoolId": "A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region." + } + }, + "IdentityPoolUsage": { + "base": "Usage information for the identity pool.", + "refs": { + "DescribeIdentityPoolUsageResponse$IdentityPoolUsage": "Information about the usage of the identity pool.", + "IdentityPoolUsageList$member": null + } + }, + "IdentityPoolUsageList": { + "base": null, + "refs": { + "ListIdentityPoolUsageResponse$IdentityPoolUsages": "Usage information for the identity pools." + } + }, + "IdentityUsage": { + "base": "Usage information for the identity.", + "refs": { + "DescribeIdentityUsageResponse$IdentityUsage": "Usage information for the identity." + } + }, + "Integer": { + "base": null, + "refs": { + "IdentityUsage$DatasetCount": "Number of datasets for the identity.", + "ListDatasetsResponse$Count": "Number of datasets returned.", + "ListIdentityPoolUsageResponse$MaxResults": "The maximum number of results to be returned.", + "ListIdentityPoolUsageResponse$Count": "Total number of identities for the identity pool.", + "ListRecordsResponse$Count": "Total number of records." + } + }, + "IntegerString": { + "base": null, + "refs": { + "ListDatasetsRequest$MaxResults": "The maximum number of results to be returned.", + "ListIdentityPoolUsageRequest$MaxResults": "The maximum number of results to be returned.", + "ListRecordsRequest$MaxResults": "The maximum number of results to be returned." + } + }, + "InternalErrorException": { + "base": "Indicates an internal service error.", + "refs": { + } + }, + "InvalidConfigurationException": { + "base": null, + "refs": { + } + }, + "InvalidLambdaFunctionOutputException": { + "base": "

    The AWS Lambda function returned invalid output or an exception.

    ", + "refs": { + } + }, + "InvalidParameterException": { + "base": "Thrown when a request parameter does not comply with the associated constraints.", + "refs": { + } + }, + "LambdaFunctionArn": { + "base": null, + "refs": { + "Events$value": null + } + }, + "LambdaThrottledException": { + "base": "

    AWS Lambda throttled your account, please contact AWS Support

    ", + "refs": { + } + }, + "LimitExceededException": { + "base": "Thrown when the limit on the number of objects or operations has been exceeded.", + "refs": { + } + }, + "ListDatasetsRequest": { + "base": "Request for a list of datasets for an identity.", + "refs": { + } + }, + "ListDatasetsResponse": { + "base": "Returned for a successful ListDatasets request.", + "refs": { + } + }, + "ListIdentityPoolUsageRequest": { + "base": "A request for usage information on an identity pool.", + "refs": { + } + }, + "ListIdentityPoolUsageResponse": { + "base": "Returned for a successful ListIdentityPoolUsage request.", + "refs": { + } + }, + "ListRecordsRequest": { + "base": "A request for a list of records.", + "refs": { + } + }, + "ListRecordsResponse": { + "base": "Returned for a successful ListRecordsRequest.", + "refs": { + } + }, + "Long": { + "base": null, + "refs": { + "Dataset$DataStorage": "Total size in bytes of the records in this dataset.", + "Dataset$NumRecords": "Number of records in this dataset.", + "IdentityPoolUsage$SyncSessionsCount": "Number of sync sessions for the identity pool.", + "IdentityPoolUsage$DataStorage": "Data storage information for the identity pool.", + "IdentityUsage$DataStorage": "Total data storage for this identity.", + "ListRecordsRequest$LastSyncCount": "The last server sync count for this record.", + "ListRecordsResponse$DatasetSyncCount": "Server sync count for this dataset.", + "Record$SyncCount": "The server sync count for this record.", + "RecordPatch$SyncCount": "Last known server sync count for this record. Set to 0 if unknown." + } + }, + "MergedDatasetNameList": { + "base": null, + "refs": { + "ListRecordsResponse$MergedDatasetNames": "Names of merged datasets." + } + }, + "NotAuthorizedException": { + "base": "Thrown when a user is not authorized to access the requested resource.", + "refs": { + } + }, + "Operation": { + "base": null, + "refs": { + "RecordPatch$Op": "An operation, either replace or remove." + } + }, + "Platform": { + "base": null, + "refs": { + "RegisterDeviceRequest$Platform": "

    The SNS platform type (e.g. GCM, SDM, APNS, APNS_SANDBOX).

    " + } + }, + "PushSync": { + "base": "

    Configuration options to be applied to the identity pool.

    ", + "refs": { + "GetIdentityPoolConfigurationResponse$PushSync": "

    Options to apply to this identity pool for push synchronization.

    ", + "SetIdentityPoolConfigurationRequest$PushSync": "

    Options to apply to this identity pool for push synchronization.

    ", + "SetIdentityPoolConfigurationResponse$PushSync": "

    Options to apply to this identity pool for push synchronization.

    " + } + }, + "PushToken": { + "base": null, + "refs": { + "RegisterDeviceRequest$Token": "

    The push token.

    " + } + }, + "Record": { + "base": "The basic data structure of a dataset.", + "refs": { + "RecordList$member": null + } + }, + "RecordKey": { + "base": null, + "refs": { + "Record$Key": "The key for the record.", + "RecordPatch$Key": "The key associated with the record patch." + } + }, + "RecordList": { + "base": null, + "refs": { + "ListRecordsResponse$Records": "A list of all records.", + "UpdateRecordsResponse$Records": "A list of records that have been updated." + } + }, + "RecordPatch": { + "base": "An update operation for a record.", + "refs": { + "RecordPatchList$member": null + } + }, + "RecordPatchList": { + "base": null, + "refs": { + "UpdateRecordsRequest$RecordPatches": "A list of patch operations." + } + }, + "RecordValue": { + "base": null, + "refs": { + "Record$Value": "The value for the record.", + "RecordPatch$Value": "The value associated with the record patch." + } + }, + "RegisterDeviceRequest": { + "base": "

    A request to RegisterDevice.

    ", + "refs": { + } + }, + "RegisterDeviceResponse": { + "base": "

    Response to a RegisterDevice request.

    ", + "refs": { + } + }, + "ResourceConflictException": { + "base": "Thrown if an update can't be applied because the resource was changed by another call and this would result in a conflict.", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "Thrown if the resource doesn't exist.", + "refs": { + } + }, + "SetCognitoEventsRequest": { + "base": "

    A request to configure Cognito Events\"

    \"", + "refs": { + } + }, + "SetIdentityPoolConfigurationRequest": { + "base": "

    The input for the SetIdentityPoolConfiguration operation.

    ", + "refs": { + } + }, + "SetIdentityPoolConfigurationResponse": { + "base": "

    The output for the SetIdentityPoolConfiguration operation

    ", + "refs": { + } + }, + "StreamName": { + "base": null, + "refs": { + "CognitoStreams$StreamName": "The name of the Cognito stream to receive updates. This stream must be in the developers account and in the same region as the identity pool." + } + }, + "StreamingStatus": { + "base": null, + "refs": { + "CognitoStreams$StreamingStatus": "Status of the Cognito streams. Valid values are:

    ENABLED - Streaming of updates to identity pool is enabled.

    DISABLED - Streaming of updates to identity pool is disabled. Bulk publish will also fail if StreamingStatus is DISABLED.

    " + } + }, + "String": { + "base": null, + "refs": { + "ConcurrentModificationException$message": "

    The message returned by a ConcurrentModicationException.

    ", + "Dataset$LastModifiedBy": "The device that made the last change to this dataset.", + "GetBulkPublishDetailsResponse$FailureMessage": "If BulkPublishStatus is FAILED this field will contain the error message that caused the bulk publish to fail.", + "ListDatasetsRequest$NextToken": "A pagination token for obtaining the next page of results.", + "ListDatasetsResponse$NextToken": "A pagination token for obtaining the next page of results.", + "ListIdentityPoolUsageRequest$NextToken": "A pagination token for obtaining the next page of results.", + "ListIdentityPoolUsageResponse$NextToken": "A pagination token for obtaining the next page of results.", + "ListRecordsRequest$NextToken": "A pagination token for obtaining the next page of results.", + "ListRecordsResponse$NextToken": "A pagination token for obtaining the next page of results.", + "ListRecordsResponse$LastModifiedBy": "The user/device that made the last change to this record.", + "ListRecordsResponse$SyncSessionToken": "A token containing a session ID, identity ID, and expiration.", + "MergedDatasetNameList$member": null, + "Record$LastModifiedBy": "The user/device that made the last change to this record." + } + }, + "SubscribeToDatasetRequest": { + "base": "

    A request to SubscribeToDatasetRequest.

    ", + "refs": { + } + }, + "SubscribeToDatasetResponse": { + "base": "

    Response to a SubscribeToDataset request.

    ", + "refs": { + } + }, + "SyncSessionToken": { + "base": null, + "refs": { + "ListRecordsRequest$SyncSessionToken": "A token containing a session ID, identity ID, and expiration.", + "UpdateRecordsRequest$SyncSessionToken": "The SyncSessionToken returned by a previous call to ListRecords for this dataset and identity." + } + }, + "TooManyRequestsException": { + "base": "Thrown if the request is throttled.", + "refs": { + } + }, + "UnsubscribeFromDatasetRequest": { + "base": "

    A request to UnsubscribeFromDataset.

    ", + "refs": { + } + }, + "UnsubscribeFromDatasetResponse": { + "base": "

    Response to an UnsubscribeFromDataset request.

    ", + "refs": { + } + }, + "UpdateRecordsRequest": { + "base": "A request to post updates to records or add and delete records for a dataset and user.", + "refs": { + } + }, + "UpdateRecordsResponse": { + "base": "Returned for a successful UpdateRecordsRequest.", + "refs": { + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1284 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-11-12", + "endpointPrefix":"config", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"Config Service", + "serviceFullName":"AWS Config", + "signatureVersion":"v4", + "targetPrefix":"StarlingDoveService" + }, + "operations":{ + "DeleteConfigRule":{ + "name":"DeleteConfigRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteConfigRuleRequest"}, + "errors":[ + {"shape":"NoSuchConfigRuleException"}, + {"shape":"ResourceInUseException"} + ] + }, + "DeleteDeliveryChannel":{ + "name":"DeleteDeliveryChannel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDeliveryChannelRequest"}, + "errors":[ + {"shape":"NoSuchDeliveryChannelException"}, + {"shape":"LastDeliveryChannelDeleteFailedException"} + ] + }, + "DeliverConfigSnapshot":{ + "name":"DeliverConfigSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeliverConfigSnapshotRequest"}, + "output":{"shape":"DeliverConfigSnapshotResponse"}, + "errors":[ + {"shape":"NoSuchDeliveryChannelException"}, + {"shape":"NoAvailableConfigurationRecorderException"}, + {"shape":"NoRunningConfigurationRecorderException"} + ] + }, + "DescribeComplianceByConfigRule":{ + "name":"DescribeComplianceByConfigRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeComplianceByConfigRuleRequest"}, + "output":{"shape":"DescribeComplianceByConfigRuleResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"NoSuchConfigRuleException"} + ] + }, + "DescribeComplianceByResource":{ + "name":"DescribeComplianceByResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeComplianceByResourceRequest"}, + "output":{"shape":"DescribeComplianceByResourceResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidNextTokenException"} + ] + }, + "DescribeConfigRuleEvaluationStatus":{ + "name":"DescribeConfigRuleEvaluationStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConfigRuleEvaluationStatusRequest"}, + "output":{"shape":"DescribeConfigRuleEvaluationStatusResponse"}, + "errors":[ + {"shape":"NoSuchConfigRuleException"} + ] + }, + "DescribeConfigRules":{ + "name":"DescribeConfigRules", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConfigRulesRequest"}, + "output":{"shape":"DescribeConfigRulesResponse"}, + "errors":[ + {"shape":"NoSuchConfigRuleException"} + ] + }, + "DescribeConfigurationRecorderStatus":{ + "name":"DescribeConfigurationRecorderStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConfigurationRecorderStatusRequest"}, + "output":{"shape":"DescribeConfigurationRecorderStatusResponse"}, + "errors":[ + {"shape":"NoSuchConfigurationRecorderException"} + ] + }, + "DescribeConfigurationRecorders":{ + "name":"DescribeConfigurationRecorders", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConfigurationRecordersRequest"}, + "output":{"shape":"DescribeConfigurationRecordersResponse"}, + "errors":[ + {"shape":"NoSuchConfigurationRecorderException"} + ] + }, + "DescribeDeliveryChannelStatus":{ + "name":"DescribeDeliveryChannelStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDeliveryChannelStatusRequest"}, + "output":{"shape":"DescribeDeliveryChannelStatusResponse"}, + "errors":[ + {"shape":"NoSuchDeliveryChannelException"} + ] + }, + "DescribeDeliveryChannels":{ + "name":"DescribeDeliveryChannels", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDeliveryChannelsRequest"}, + "output":{"shape":"DescribeDeliveryChannelsResponse"}, + "errors":[ + {"shape":"NoSuchDeliveryChannelException"} + ] + }, + "GetComplianceDetailsByConfigRule":{ + "name":"GetComplianceDetailsByConfigRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetComplianceDetailsByConfigRuleRequest"}, + "output":{"shape":"GetComplianceDetailsByConfigRuleResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"NoSuchConfigRuleException"} + ] + }, + "GetComplianceDetailsByResource":{ + "name":"GetComplianceDetailsByResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetComplianceDetailsByResourceRequest"}, + "output":{"shape":"GetComplianceDetailsByResourceResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"} + ] + }, + "GetComplianceSummaryByConfigRule":{ + "name":"GetComplianceSummaryByConfigRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{"shape":"GetComplianceSummaryByConfigRuleResponse"} + }, + "GetComplianceSummaryByResourceType":{ + "name":"GetComplianceSummaryByResourceType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetComplianceSummaryByResourceTypeRequest"}, + "output":{"shape":"GetComplianceSummaryByResourceTypeResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"} + ] + }, + "GetResourceConfigHistory":{ + "name":"GetResourceConfigHistory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetResourceConfigHistoryRequest"}, + "output":{"shape":"GetResourceConfigHistoryResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidTimeRangeException"}, + {"shape":"InvalidLimitException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"NoAvailableConfigurationRecorderException"}, + {"shape":"ResourceNotDiscoveredException"} + ] + }, + "ListDiscoveredResources":{ + "name":"ListDiscoveredResources", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDiscoveredResourcesRequest"}, + "output":{"shape":"ListDiscoveredResourcesResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidLimitException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"NoAvailableConfigurationRecorderException"} + ] + }, + "PutConfigRule":{ + "name":"PutConfigRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutConfigRuleRequest"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"MaxNumberOfConfigRulesExceededException"}, + {"shape":"ResourceInUseException"}, + {"shape":"InsufficientPermissionsException"} + ] + }, + "PutConfigurationRecorder":{ + "name":"PutConfigurationRecorder", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutConfigurationRecorderRequest"}, + "errors":[ + {"shape":"MaxNumberOfConfigurationRecordersExceededException"}, + {"shape":"InvalidConfigurationRecorderNameException"}, + {"shape":"InvalidRoleException"}, + {"shape":"InvalidRecordingGroupException"} + ] + }, + "PutDeliveryChannel":{ + "name":"PutDeliveryChannel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutDeliveryChannelRequest"}, + "errors":[ + {"shape":"MaxNumberOfDeliveryChannelsExceededException"}, + {"shape":"NoAvailableConfigurationRecorderException"}, + {"shape":"InvalidDeliveryChannelNameException"}, + {"shape":"NoSuchBucketException"}, + {"shape":"InvalidS3KeyPrefixException"}, + {"shape":"InvalidSNSTopicARNException"}, + {"shape":"InsufficientDeliveryPolicyException"} + ] + }, + "PutEvaluations":{ + "name":"PutEvaluations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutEvaluationsRequest"}, + "output":{"shape":"PutEvaluationsResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidResultTokenException"}, + {"shape":"NoSuchConfigRuleException"} + ] + }, + "StartConfigurationRecorder":{ + "name":"StartConfigurationRecorder", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartConfigurationRecorderRequest"}, + "errors":[ + {"shape":"NoSuchConfigurationRecorderException"}, + {"shape":"NoAvailableDeliveryChannelException"} + ] + }, + "StopConfigurationRecorder":{ + "name":"StopConfigurationRecorder", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopConfigurationRecorderRequest"}, + "errors":[ + {"shape":"NoSuchConfigurationRecorderException"} + ] + } + }, + "shapes":{ + "ARN":{"type":"string"}, + "AccountId":{"type":"string"}, + "AllSupported":{"type":"boolean"}, + "AvailabilityZone":{"type":"string"}, + "AwsRegion":{"type":"string"}, + "Boolean":{"type":"boolean"}, + "ChannelName":{ + "type":"string", + "max":256, + "min":1 + }, + "ChronologicalOrder":{ + "type":"string", + "enum":[ + "Reverse", + "Forward" + ] + }, + "Compliance":{ + "type":"structure", + "members":{ + "ComplianceType":{"shape":"ComplianceType"}, + "ComplianceContributorCount":{"shape":"ComplianceContributorCount"} + } + }, + "ComplianceByConfigRule":{ + "type":"structure", + "members":{ + "ConfigRuleName":{"shape":"StringWithCharLimit64"}, + "Compliance":{"shape":"Compliance"} + } + }, + "ComplianceByConfigRules":{ + "type":"list", + "member":{"shape":"ComplianceByConfigRule"} + }, + "ComplianceByResource":{ + "type":"structure", + "members":{ + "ResourceType":{"shape":"StringWithCharLimit256"}, + "ResourceId":{"shape":"StringWithCharLimit256"}, + "Compliance":{"shape":"Compliance"} + } + }, + "ComplianceByResources":{ + "type":"list", + "member":{"shape":"ComplianceByResource"} + }, + "ComplianceContributorCount":{ + "type":"structure", + "members":{ + "CappedCount":{"shape":"Integer"}, + "CapExceeded":{"shape":"Boolean"} + } + }, + "ComplianceResourceTypes":{ + "type":"list", + "member":{"shape":"StringWithCharLimit256"}, + "max":100, + "min":0 + }, + "ComplianceSummariesByResourceType":{ + "type":"list", + "member":{"shape":"ComplianceSummaryByResourceType"} + }, + "ComplianceSummary":{ + "type":"structure", + "members":{ + "CompliantResourceCount":{"shape":"ComplianceContributorCount"}, + "NonCompliantResourceCount":{"shape":"ComplianceContributorCount"}, + "ComplianceSummaryTimestamp":{"shape":"Date"} + } + }, + "ComplianceSummaryByResourceType":{ + "type":"structure", + "members":{ + "ResourceType":{"shape":"StringWithCharLimit256"}, + "ComplianceSummary":{"shape":"ComplianceSummary"} + } + }, + "ComplianceType":{ + "type":"string", + "enum":[ + "COMPLIANT", + "NON_COMPLIANT", + "NOT_APPLICABLE", + "INSUFFICIENT_DATA" + ] + }, + "ComplianceTypes":{ + "type":"list", + "member":{"shape":"ComplianceType"}, + "max":3, + "min":0 + }, + "ConfigExportDeliveryInfo":{ + "type":"structure", + "members":{ + "lastStatus":{"shape":"DeliveryStatus"}, + "lastErrorCode":{"shape":"String"}, + "lastErrorMessage":{"shape":"String"}, + "lastAttemptTime":{"shape":"Date"}, + "lastSuccessfulTime":{"shape":"Date"}, + "nextDeliveryTime":{"shape":"Date"} + } + }, + "ConfigRule":{ + "type":"structure", + "required":["Source"], + "members":{ + "ConfigRuleName":{"shape":"StringWithCharLimit64"}, + "ConfigRuleArn":{"shape":"String"}, + "ConfigRuleId":{"shape":"String"}, + "Description":{"shape":"EmptiableStringWithCharLimit256"}, + "Scope":{"shape":"Scope"}, + "Source":{"shape":"Source"}, + "InputParameters":{"shape":"StringWithCharLimit256"}, + "MaximumExecutionFrequency":{"shape":"MaximumExecutionFrequency"}, + "ConfigRuleState":{"shape":"ConfigRuleState"} + } + }, + "ConfigRuleEvaluationStatus":{ + "type":"structure", + "members":{ + "ConfigRuleName":{"shape":"StringWithCharLimit64"}, + "ConfigRuleArn":{"shape":"String"}, + "ConfigRuleId":{"shape":"String"}, + "LastSuccessfulInvocationTime":{"shape":"Date"}, + "LastFailedInvocationTime":{"shape":"Date"}, + "LastSuccessfulEvaluationTime":{"shape":"Date"}, + "LastFailedEvaluationTime":{"shape":"Date"}, + "FirstActivatedTime":{"shape":"Date"}, + "LastErrorCode":{"shape":"String"}, + "LastErrorMessage":{"shape":"String"}, + "FirstEvaluationStarted":{"shape":"Boolean"} + } + }, + "ConfigRuleEvaluationStatusList":{ + "type":"list", + "member":{"shape":"ConfigRuleEvaluationStatus"} + }, + "ConfigRuleNames":{ + "type":"list", + "member":{"shape":"StringWithCharLimit64"}, + "max":25, + "min":0 + }, + "ConfigRuleState":{ + "type":"string", + "enum":[ + "ACTIVE", + "DELETING" + ] + }, + "ConfigRules":{ + "type":"list", + "member":{"shape":"ConfigRule"} + }, + "ConfigSnapshotDeliveryProperties":{ + "type":"structure", + "members":{ + "deliveryFrequency":{"shape":"MaximumExecutionFrequency"} + } + }, + "ConfigStreamDeliveryInfo":{ + "type":"structure", + "members":{ + "lastStatus":{"shape":"DeliveryStatus"}, + "lastErrorCode":{"shape":"String"}, + "lastErrorMessage":{"shape":"String"}, + "lastStatusChangeTime":{"shape":"Date"} + } + }, + "Configuration":{"type":"string"}, + "ConfigurationItem":{ + "type":"structure", + "members":{ + "version":{"shape":"Version"}, + "accountId":{"shape":"AccountId"}, + "configurationItemCaptureTime":{"shape":"ConfigurationItemCaptureTime"}, + "configurationItemStatus":{"shape":"ConfigurationItemStatus"}, + "configurationStateId":{"shape":"ConfigurationStateId"}, + "configurationItemMD5Hash":{"shape":"ConfigurationItemMD5Hash"}, + "arn":{"shape":"ARN"}, + "resourceType":{"shape":"ResourceType"}, + "resourceId":{"shape":"ResourceId"}, + "resourceName":{"shape":"ResourceName"}, + "awsRegion":{"shape":"AwsRegion"}, + "availabilityZone":{"shape":"AvailabilityZone"}, + "resourceCreationTime":{"shape":"ResourceCreationTime"}, + "tags":{"shape":"Tags"}, + "relatedEvents":{"shape":"RelatedEventList"}, + "relationships":{"shape":"RelationshipList"}, + "configuration":{"shape":"Configuration"} + } + }, + "ConfigurationItemCaptureTime":{"type":"timestamp"}, + "ConfigurationItemList":{ + "type":"list", + "member":{"shape":"ConfigurationItem"} + }, + "ConfigurationItemMD5Hash":{"type":"string"}, + "ConfigurationItemStatus":{ + "type":"string", + "enum":[ + "Ok", + "Failed", + "Discovered", + "Deleted" + ] + }, + "ConfigurationRecorder":{ + "type":"structure", + "members":{ + "name":{"shape":"RecorderName"}, + "roleARN":{"shape":"String"}, + "recordingGroup":{"shape":"RecordingGroup"} + } + }, + "ConfigurationRecorderList":{ + "type":"list", + "member":{"shape":"ConfigurationRecorder"} + }, + "ConfigurationRecorderNameList":{ + "type":"list", + "member":{"shape":"RecorderName"} + }, + "ConfigurationRecorderStatus":{ + "type":"structure", + "members":{ + "name":{"shape":"String"}, + "lastStartTime":{"shape":"Date"}, + "lastStopTime":{"shape":"Date"}, + "recording":{"shape":"Boolean"}, + "lastStatus":{"shape":"RecorderStatus"}, + "lastErrorCode":{"shape":"String"}, + "lastErrorMessage":{"shape":"String"}, + "lastStatusChangeTime":{"shape":"Date"} + } + }, + "ConfigurationRecorderStatusList":{ + "type":"list", + "member":{"shape":"ConfigurationRecorderStatus"} + }, + "ConfigurationStateId":{"type":"string"}, + "Date":{"type":"timestamp"}, + "DeleteConfigRuleRequest":{ + "type":"structure", + "required":["ConfigRuleName"], + "members":{ + "ConfigRuleName":{"shape":"StringWithCharLimit64"} + } + }, + "DeleteDeliveryChannelRequest":{ + "type":"structure", + "required":["DeliveryChannelName"], + "members":{ + "DeliveryChannelName":{"shape":"ChannelName"} + } + }, + "DeliverConfigSnapshotRequest":{ + "type":"structure", + "required":["deliveryChannelName"], + "members":{ + "deliveryChannelName":{"shape":"ChannelName"} + } + }, + "DeliverConfigSnapshotResponse":{ + "type":"structure", + "members":{ + "configSnapshotId":{"shape":"String"} + } + }, + "DeliveryChannel":{ + "type":"structure", + "members":{ + "name":{"shape":"ChannelName"}, + "s3BucketName":{"shape":"String"}, + "s3KeyPrefix":{"shape":"String"}, + "snsTopicARN":{"shape":"String"}, + "configSnapshotDeliveryProperties":{"shape":"ConfigSnapshotDeliveryProperties"} + } + }, + "DeliveryChannelList":{ + "type":"list", + "member":{"shape":"DeliveryChannel"} + }, + "DeliveryChannelNameList":{ + "type":"list", + "member":{"shape":"ChannelName"} + }, + "DeliveryChannelStatus":{ + "type":"structure", + "members":{ + "name":{"shape":"String"}, + "configSnapshotDeliveryInfo":{"shape":"ConfigExportDeliveryInfo"}, + "configHistoryDeliveryInfo":{"shape":"ConfigExportDeliveryInfo"}, + "configStreamDeliveryInfo":{"shape":"ConfigStreamDeliveryInfo"} + } + }, + "DeliveryChannelStatusList":{ + "type":"list", + "member":{"shape":"DeliveryChannelStatus"} + }, + "DeliveryStatus":{ + "type":"string", + "enum":[ + "Success", + "Failure", + "Not_Applicable" + ] + }, + "DescribeComplianceByConfigRuleRequest":{ + "type":"structure", + "members":{ + "ConfigRuleNames":{"shape":"ConfigRuleNames"}, + "ComplianceTypes":{"shape":"ComplianceTypes"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeComplianceByConfigRuleResponse":{ + "type":"structure", + "members":{ + "ComplianceByConfigRules":{"shape":"ComplianceByConfigRules"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeComplianceByResourceRequest":{ + "type":"structure", + "members":{ + "ResourceType":{"shape":"StringWithCharLimit256"}, + "ResourceId":{"shape":"StringWithCharLimit256"}, + "ComplianceTypes":{"shape":"ComplianceTypes"}, + "Limit":{"shape":"Limit"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeComplianceByResourceResponse":{ + "type":"structure", + "members":{ + "ComplianceByResources":{"shape":"ComplianceByResources"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeConfigRuleEvaluationStatusRequest":{ + "type":"structure", + "members":{ + "ConfigRuleNames":{"shape":"ConfigRuleNames"} + } + }, + "DescribeConfigRuleEvaluationStatusResponse":{ + "type":"structure", + "members":{ + "ConfigRulesEvaluationStatus":{"shape":"ConfigRuleEvaluationStatusList"} + } + }, + "DescribeConfigRulesRequest":{ + "type":"structure", + "members":{ + "ConfigRuleNames":{"shape":"ConfigRuleNames"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeConfigRulesResponse":{ + "type":"structure", + "members":{ + "ConfigRules":{"shape":"ConfigRules"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeConfigurationRecorderStatusRequest":{ + "type":"structure", + "members":{ + "ConfigurationRecorderNames":{"shape":"ConfigurationRecorderNameList"} + } + }, + "DescribeConfigurationRecorderStatusResponse":{ + "type":"structure", + "members":{ + "ConfigurationRecordersStatus":{"shape":"ConfigurationRecorderStatusList"} + } + }, + "DescribeConfigurationRecordersRequest":{ + "type":"structure", + "members":{ + "ConfigurationRecorderNames":{"shape":"ConfigurationRecorderNameList"} + } + }, + "DescribeConfigurationRecordersResponse":{ + "type":"structure", + "members":{ + "ConfigurationRecorders":{"shape":"ConfigurationRecorderList"} + } + }, + "DescribeDeliveryChannelStatusRequest":{ + "type":"structure", + "members":{ + "DeliveryChannelNames":{"shape":"DeliveryChannelNameList"} + } + }, + "DescribeDeliveryChannelStatusResponse":{ + "type":"structure", + "members":{ + "DeliveryChannelsStatus":{"shape":"DeliveryChannelStatusList"} + } + }, + "DescribeDeliveryChannelsRequest":{ + "type":"structure", + "members":{ + "DeliveryChannelNames":{"shape":"DeliveryChannelNameList"} + } + }, + "DescribeDeliveryChannelsResponse":{ + "type":"structure", + "members":{ + "DeliveryChannels":{"shape":"DeliveryChannelList"} + } + }, + "EarlierTime":{"type":"timestamp"}, + "EmptiableStringWithCharLimit256":{ + "type":"string", + "max":256, + "min":0 + }, + "Evaluation":{ + "type":"structure", + "required":[ + "ComplianceResourceType", + "ComplianceResourceId", + "ComplianceType", + "OrderingTimestamp" + ], + "members":{ + "ComplianceResourceType":{"shape":"StringWithCharLimit256"}, + "ComplianceResourceId":{"shape":"StringWithCharLimit256"}, + "ComplianceType":{"shape":"ComplianceType"}, + "Annotation":{"shape":"StringWithCharLimit256"}, + "OrderingTimestamp":{"shape":"OrderingTimestamp"} + } + }, + "EvaluationResult":{ + "type":"structure", + "members":{ + "EvaluationResultIdentifier":{"shape":"EvaluationResultIdentifier"}, + "ComplianceType":{"shape":"ComplianceType"}, + "ResultRecordedTime":{"shape":"Date"}, + "ConfigRuleInvokedTime":{"shape":"Date"}, + "Annotation":{"shape":"StringWithCharLimit256"}, + "ResultToken":{"shape":"String"} + } + }, + "EvaluationResultIdentifier":{ + "type":"structure", + "members":{ + "EvaluationResultQualifier":{"shape":"EvaluationResultQualifier"}, + "OrderingTimestamp":{"shape":"Date"} + } + }, + "EvaluationResultQualifier":{ + "type":"structure", + "members":{ + "ConfigRuleName":{"shape":"StringWithCharLimit64"}, + "ResourceType":{"shape":"StringWithCharLimit256"}, + "ResourceId":{"shape":"StringWithCharLimit256"} + } + }, + "EvaluationResults":{ + "type":"list", + "member":{"shape":"EvaluationResult"} + }, + "Evaluations":{ + "type":"list", + "member":{"shape":"Evaluation"}, + "max":100, + "min":0 + }, + "EventSource":{ + "type":"string", + "enum":["aws.config"] + }, + "GetComplianceDetailsByConfigRuleRequest":{ + "type":"structure", + "required":["ConfigRuleName"], + "members":{ + "ConfigRuleName":{"shape":"StringWithCharLimit64"}, + "ComplianceTypes":{"shape":"ComplianceTypes"}, + "Limit":{"shape":"Limit"}, + "NextToken":{"shape":"NextToken"} + } + }, + "GetComplianceDetailsByConfigRuleResponse":{ + "type":"structure", + "members":{ + "EvaluationResults":{"shape":"EvaluationResults"}, + "NextToken":{"shape":"NextToken"} + } + }, + "GetComplianceDetailsByResourceRequest":{ + "type":"structure", + "required":[ + "ResourceType", + "ResourceId" + ], + "members":{ + "ResourceType":{"shape":"StringWithCharLimit256"}, + "ResourceId":{"shape":"StringWithCharLimit256"}, + "ComplianceTypes":{"shape":"ComplianceTypes"}, + "NextToken":{"shape":"String"} + } + }, + "GetComplianceDetailsByResourceResponse":{ + "type":"structure", + "members":{ + "EvaluationResults":{"shape":"EvaluationResults"}, + "NextToken":{"shape":"String"} + } + }, + "GetComplianceSummaryByConfigRuleResponse":{ + "type":"structure", + "members":{ + "ComplianceSummary":{"shape":"ComplianceSummary"} + } + }, + "GetComplianceSummaryByResourceTypeRequest":{ + "type":"structure", + "members":{ + "ResourceTypes":{"shape":"ResourceTypes"} + } + }, + "GetComplianceSummaryByResourceTypeResponse":{ + "type":"structure", + "members":{ + "ComplianceSummariesByResourceType":{"shape":"ComplianceSummariesByResourceType"} + } + }, + "GetResourceConfigHistoryRequest":{ + "type":"structure", + "required":[ + "resourceType", + "resourceId" + ], + "members":{ + "resourceType":{"shape":"ResourceType"}, + "resourceId":{"shape":"ResourceId"}, + "laterTime":{"shape":"LaterTime"}, + "earlierTime":{"shape":"EarlierTime"}, + "chronologicalOrder":{"shape":"ChronologicalOrder"}, + "limit":{"shape":"Limit"}, + "nextToken":{"shape":"NextToken"} + } + }, + "GetResourceConfigHistoryResponse":{ + "type":"structure", + "members":{ + "configurationItems":{"shape":"ConfigurationItemList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "IncludeGlobalResourceTypes":{"type":"boolean"}, + "InsufficientDeliveryPolicyException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InsufficientPermissionsException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Integer":{"type":"integer"}, + "InvalidConfigurationRecorderNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidDeliveryChannelNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidLimitException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidNextTokenException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidParameterValueException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidRecordingGroupException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidResultTokenException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidRoleException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidS3KeyPrefixException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidSNSTopicARNException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidTimeRangeException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "LastDeliveryChannelDeleteFailedException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "LaterTime":{"type":"timestamp"}, + "Limit":{ + "type":"integer", + "max":100, + "min":0 + }, + "ListDiscoveredResourcesRequest":{ + "type":"structure", + "required":["resourceType"], + "members":{ + "resourceType":{"shape":"ResourceType"}, + "resourceIds":{"shape":"ResourceIdList"}, + "resourceName":{"shape":"ResourceName"}, + "limit":{"shape":"Limit"}, + "includeDeletedResources":{"shape":"Boolean"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListDiscoveredResourcesResponse":{ + "type":"structure", + "members":{ + "resourceIdentifiers":{"shape":"ResourceIdentifierList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "MaxNumberOfConfigRulesExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "MaxNumberOfConfigurationRecordersExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "MaxNumberOfDeliveryChannelsExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "MaximumExecutionFrequency":{ + "type":"string", + "enum":[ + "One_Hour", + "Three_Hours", + "Six_Hours", + "Twelve_Hours", + "TwentyFour_Hours" + ] + }, + "MessageType":{ + "type":"string", + "enum":[ + "ConfigurationItemChangeNotification", + "ConfigurationSnapshotDeliveryCompleted" + ] + }, + "Name":{"type":"string"}, + "NextToken":{"type":"string"}, + "NoAvailableConfigurationRecorderException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "NoAvailableDeliveryChannelException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "NoRunningConfigurationRecorderException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "NoSuchBucketException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "NoSuchConfigRuleException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "NoSuchConfigurationRecorderException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "NoSuchDeliveryChannelException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "OrderingTimestamp":{"type":"timestamp"}, + "Owner":{ + "type":"string", + "enum":[ + "CUSTOM_LAMBDA", + "AWS" + ] + }, + "PutConfigRuleRequest":{ + "type":"structure", + "required":["ConfigRule"], + "members":{ + "ConfigRule":{"shape":"ConfigRule"} + } + }, + "PutConfigurationRecorderRequest":{ + "type":"structure", + "required":["ConfigurationRecorder"], + "members":{ + "ConfigurationRecorder":{"shape":"ConfigurationRecorder"} + } + }, + "PutDeliveryChannelRequest":{ + "type":"structure", + "required":["DeliveryChannel"], + "members":{ + "DeliveryChannel":{"shape":"DeliveryChannel"} + } + }, + "PutEvaluationsRequest":{ + "type":"structure", + "required":["ResultToken"], + "members":{ + "Evaluations":{"shape":"Evaluations"}, + "ResultToken":{"shape":"String"} + } + }, + "PutEvaluationsResponse":{ + "type":"structure", + "members":{ + "FailedEvaluations":{"shape":"Evaluations"} + } + }, + "RecorderName":{ + "type":"string", + "max":256, + "min":1 + }, + "RecorderStatus":{ + "type":"string", + "enum":[ + "Pending", + "Success", + "Failure" + ] + }, + "RecordingGroup":{ + "type":"structure", + "members":{ + "allSupported":{"shape":"AllSupported"}, + "includeGlobalResourceTypes":{"shape":"IncludeGlobalResourceTypes"}, + "resourceTypes":{"shape":"ResourceTypeList"} + } + }, + "RelatedEvent":{"type":"string"}, + "RelatedEventList":{ + "type":"list", + "member":{"shape":"RelatedEvent"} + }, + "Relationship":{ + "type":"structure", + "members":{ + "resourceType":{"shape":"ResourceType"}, + "resourceId":{"shape":"ResourceId"}, + "resourceName":{"shape":"ResourceName"}, + "relationshipName":{"shape":"RelationshipName"} + } + }, + "RelationshipList":{ + "type":"list", + "member":{"shape":"Relationship"} + }, + "RelationshipName":{"type":"string"}, + "ResourceCreationTime":{"type":"timestamp"}, + "ResourceDeletionTime":{"type":"timestamp"}, + "ResourceId":{"type":"string"}, + "ResourceIdList":{ + "type":"list", + "member":{"shape":"ResourceId"} + }, + "ResourceIdentifier":{ + "type":"structure", + "members":{ + "resourceType":{"shape":"ResourceType"}, + "resourceId":{"shape":"ResourceId"}, + "resourceName":{"shape":"ResourceName"}, + "resourceDeletionTime":{"shape":"ResourceDeletionTime"} + } + }, + "ResourceIdentifierList":{ + "type":"list", + "member":{"shape":"ResourceIdentifier"} + }, + "ResourceInUseException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ResourceName":{"type":"string"}, + "ResourceNotDiscoveredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ResourceType":{ + "type":"string", + "enum":[ + "AWS::EC2::CustomerGateway", + "AWS::EC2::EIP", + "AWS::EC2::Host", + "AWS::EC2::Instance", + "AWS::EC2::InternetGateway", + "AWS::EC2::NetworkAcl", + "AWS::EC2::NetworkInterface", + "AWS::EC2::RouteTable", + "AWS::EC2::SecurityGroup", + "AWS::EC2::Subnet", + "AWS::CloudTrail::Trail", + "AWS::EC2::Volume", + "AWS::EC2::VPC", + "AWS::EC2::VPNConnection", + "AWS::EC2::VPNGateway", + "AWS::IAM::Group", + "AWS::IAM::Policy", + "AWS::IAM::Role", + "AWS::IAM::User" + ] + }, + "ResourceTypeList":{ + "type":"list", + "member":{"shape":"ResourceType"} + }, + "ResourceTypes":{ + "type":"list", + "member":{"shape":"StringWithCharLimit256"}, + "max":100, + "min":0 + }, + "Scope":{ + "type":"structure", + "members":{ + "ComplianceResourceTypes":{"shape":"ComplianceResourceTypes"}, + "TagKey":{"shape":"StringWithCharLimit128"}, + "TagValue":{"shape":"StringWithCharLimit256"}, + "ComplianceResourceId":{"shape":"StringWithCharLimit256"} + } + }, + "Source":{ + "type":"structure", + "members":{ + "Owner":{"shape":"Owner"}, + "SourceIdentifier":{"shape":"StringWithCharLimit256"}, + "SourceDetails":{"shape":"SourceDetails"} + } + }, + "SourceDetail":{ + "type":"structure", + "members":{ + "EventSource":{"shape":"EventSource"}, + "MessageType":{"shape":"MessageType"} + } + }, + "SourceDetails":{ + "type":"list", + "member":{"shape":"SourceDetail"}, + "max":25, + "min":0 + }, + "StartConfigurationRecorderRequest":{ + "type":"structure", + "required":["ConfigurationRecorderName"], + "members":{ + "ConfigurationRecorderName":{"shape":"RecorderName"} + } + }, + "StopConfigurationRecorderRequest":{ + "type":"structure", + "required":["ConfigurationRecorderName"], + "members":{ + "ConfigurationRecorderName":{"shape":"RecorderName"} + } + }, + "String":{"type":"string"}, + "StringWithCharLimit128":{ + "type":"string", + "max":128, + "min":1 + }, + "StringWithCharLimit256":{ + "type":"string", + "max":256, + "min":1 + }, + "StringWithCharLimit64":{ + "type":"string", + "max":64, + "min":1 + }, + "Tags":{ + "type":"map", + "key":{"shape":"Name"}, + "value":{"shape":"Value"} + }, + "ValidationException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Value":{"type":"string"}, + "Version":{"type":"string"} + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1050 @@ +{ + "version": "2.0", + "service": "AWS Config

    AWS Config provides a way to keep track of the configurations of all the AWS resources associated with your AWS account. You can use AWS Config to get the current and historical configurations of each AWS resource and also to get information about the relationship between the resources. An AWS resource can be an Amazon Compute Cloud (Amazon EC2) instance, an Elastic Block Store (EBS) volume, an Elastic network Interface (ENI), or a security group. For a complete list of resources currently supported by AWS Config, see Supported AWS Resources.

    You can access and manage AWS Config through the AWS Management Console, the AWS Command Line Interface (AWS CLI), the AWS Config API, or the AWS SDKs for AWS Config

    This reference guide contains documentation for the AWS Config API and the AWS CLI commands that you can use to manage AWS Config.

    The AWS Config API uses the Signature Version 4 protocol for signing requests. For more information about how to sign a request with this protocol, see Signature Version 4 Signing Process.

    For detailed information about AWS Config features and their associated actions or commands, as well as how to work with AWS Management Console, see What Is AWS Config? in the AWS Config Developer Guide.

    ", + "operations": { + "DeleteConfigRule": "

    Deletes the specified AWS Config rule and all of its evaluation results.

    AWS Config sets the state of a rule to DELETING until the deletion is complete. You cannot update a rule while it is in this state. If you make a PutConfigRule request for the rule, you will receive a ResourceInUseException.

    You can check the state of a rule by using the DescribeConfigRules request.

    ", + "DeleteDeliveryChannel": "

    Deletes the specified delivery channel.

    The delivery channel cannot be deleted if it is the only delivery channel and the configuration recorder is still running. To delete the delivery channel, stop the running configuration recorder using the StopConfigurationRecorder action.

    ", + "DeliverConfigSnapshot": "

    Schedules delivery of a configuration snapshot to the Amazon S3 bucket in the specified delivery channel. After the delivery has started, AWS Config sends following notifications using an Amazon SNS topic that you have specified.

    • Notification of starting the delivery.
    • Notification of delivery completed, if the delivery was successfully completed.
    • Notification of delivery failure, if the delivery failed to complete.
    ", + "DescribeComplianceByConfigRule": "

    Indicates whether the specified AWS Config rules are compliant. If a rule is noncompliant, this action returns the number of AWS resources that do not comply with the rule.

    A rule is compliant if all of the evaluated resources comply with it, and it is noncompliant if any of these resources do not comply.

    If AWS Config has no current evaluation results for the rule, it returns InsufficientData. This result might indicate one of the following conditions:

    • AWS Config has never invoked an evaluation for the rule. To check whether it has, use the DescribeConfigRuleEvaluationStatus action to get the LastSuccessfulInvocationTime and LastFailedInvocationTime.
    • The rule's AWS Lambda function is failing to send evaluation results to AWS Config. Verify that the role that you assigned to your configuration recorder includes the config:PutEvaluations permission. If the rule is a customer managed rule, verify that the AWS Lambda execution role includes the config:PutEvaluations permission.
    • The rule's AWS Lambda function has returned NOT_APPLICABLE for all evaluation results. This can occur if the resources were deleted or removed from the rule's scope.

    ", + "DescribeComplianceByResource": "

    Indicates whether the specified AWS resources are compliant. If a resource is noncompliant, this action returns the number of AWS Config rules that the resource does not comply with.

    A resource is compliant if it complies with all the AWS Config rules that evaluate it. It is noncompliant if it does not comply with one or more of these rules.

    If AWS Config has no current evaluation results for the resource, it returns InsufficientData. This result might indicate one of the following conditions about the rules that evaluate the resource:

    • AWS Config has never invoked an evaluation for the rule. To check whether it has, use the DescribeConfigRuleEvaluationStatus action to get the LastSuccessfulInvocationTime and LastFailedInvocationTime.
    • The rule's AWS Lambda function is failing to send evaluation results to AWS Config. Verify that the role that you assigned to your configuration recorder includes the config:PutEvaluations permission. If the rule is a customer managed rule, verify that the AWS Lambda execution role includes the config:PutEvaluations permission.
    • The rule's AWS Lambda function has returned NOT_APPLICABLE for all evaluation results. This can occur if the resources were deleted or removed from the rule's scope.

    ", + "DescribeConfigRuleEvaluationStatus": "

    Returns status information for each of your AWS managed Config rules. The status includes information such as the last time AWS Config invoked the rule, the last time AWS Config failed to invoke the rule, and the related error for the last failure.

    ", + "DescribeConfigRules": "

    Returns details about your AWS Config rules.

    ", + "DescribeConfigurationRecorderStatus": "

    Returns the current status of the specified configuration recorder. If a configuration recorder is not specified, this action returns the status of all configuration recorder associated with the account.

    Currently, you can specify only one configuration recorder per account.", + "DescribeConfigurationRecorders": "

    Returns the name of one or more specified configuration recorders. If the recorder name is not specified, this action returns the names of all the configuration recorders associated with the account.

    Currently, you can specify only one configuration recorder per account.

    ", + "DescribeDeliveryChannelStatus": "

    Returns the current status of the specified delivery channel. If a delivery channel is not specified, this action returns the current status of all delivery channels associated with the account.

    Currently, you can specify only one delivery channel per account.", + "DescribeDeliveryChannels": "

    Returns details about the specified delivery channel. If a delivery channel is not specified, this action returns the details of all delivery channels associated with the account.

    Currently, you can specify only one delivery channel per account.

    ", + "GetComplianceDetailsByConfigRule": "

    Returns the evaluation results for the specified AWS Config rule. The results indicate which AWS resources were evaluated by the rule, when each resource was last evaluated, and whether each resource complies with the rule.

    ", + "GetComplianceDetailsByResource": "

    Returns the evaluation results for the specified AWS resource. The results indicate which AWS Config rules were used to evaluate the resource, when each rule was last used, and whether the resource complies with each rule.

    ", + "GetComplianceSummaryByConfigRule": "

    Returns the number of AWS Config rules that are compliant and noncompliant, up to a maximum of 25 for each.

    ", + "GetComplianceSummaryByResourceType": "

    Returns the number of resources that are compliant and the number that are noncompliant. You can specify one or more resource types to get these numbers for each resource type. The maximum number returned is 100.

    ", + "GetResourceConfigHistory": "

    Returns a list of configuration items for the specified resource. The list contains details about each state of the resource during the specified time interval.

    The response is paginated, and by default, AWS Config returns a limit of 10 configuration items per page. You can customize this number with the limit parameter. The response includes a nextToken string, and to get the next page of results, run the request again and enter this string for the nextToken parameter.

    Each call to the API is limited to span a duration of seven days. It is likely that the number of records returned is smaller than the specified limit. In such cases, you can make another call, using the nextToken.

    ", + "ListDiscoveredResources": "

    Accepts a resource type and returns a list of resource identifiers for the resources of that type. A resource identifier includes the resource type, ID, and (if available) the custom resource name. The results consist of resources that AWS Config has discovered, including those that AWS Config is not currently recording. You can narrow the results to include only resources that have specific resource IDs or a resource name.

    You can specify either resource IDs or a resource name but not both in the same request.

    The response is paginated, and by default AWS Config lists 100 resource identifiers on each page. You can customize this number with the limit parameter. The response includes a nextToken string, and to get the next page of results, run the request again and enter this string for the nextToken parameter.

    ", + "PutConfigRule": "

    Adds or updates an AWS Config rule for evaluating whether your AWS resources comply with your desired configurations.

    You can use this action for customer managed Config rules and AWS managed Config rules. A customer managed Config rule is a custom rule that you develop and maintain. An AWS managed Config rule is a customizable, predefined rule that is provided by AWS Config.

    If you are adding a new customer managed Config rule, you must first create the AWS Lambda function that the rule invokes to evaluate your resources. When you use the PutConfigRule action to add the rule to AWS Config, you must specify the Amazon Resource Name (ARN) that AWS Lambda assigns to the function. Specify the ARN for the SourceIdentifier key. This key is part of the Source object, which is part of the ConfigRule object.

    If you are adding a new AWS managed Config rule, specify the rule's identifier for the SourceIdentifier key. To reference AWS managed Config rule identifiers, see Using AWS Managed Config Rules.

    For any new rule that you add, specify the ConfigRuleName in the ConfigRule object. Do not specify the ConfigRuleArn or the ConfigRuleId. These values are generated by AWS Config for new rules.

    If you are updating a rule that you have added previously, specify the rule's ConfigRuleName, ConfigRuleId, or ConfigRuleArn in the ConfigRule data type that you use in this request.

    The maximum number of rules that AWS Config supports is 25.

    For more information about developing and using AWS Config rules, see Evaluating AWS Resource Configurations with AWS Config in the AWS Config Developer Guide.

    ", + "PutConfigurationRecorder": "

    Creates a new configuration recorder to record the selected resource configurations.

    You can use this action to change the role roleARN and/or the recordingGroup of an existing recorder. To change the role, call the action on the existing configuration recorder and specify a role.

    Currently, you can specify only one configuration recorder per account.

    If ConfigurationRecorder does not have the recordingGroup parameter specified, the default is to record all supported resource types.

    ", + "PutDeliveryChannel": "

    Creates a new delivery channel object to deliver the configuration information to an Amazon S3 bucket, and to an Amazon SNS topic.

    You can use this action to change the Amazon S3 bucket or an Amazon SNS topic of the existing delivery channel. To change the Amazon S3 bucket or an Amazon SNS topic, call this action and specify the changed values for the S3 bucket and the SNS topic. If you specify a different value for either the S3 bucket or the SNS topic, this action will keep the existing value for the parameter that is not changed.

    Currently, you can specify only one delivery channel per account.

    ", + "PutEvaluations": "

    Used by an AWS Lambda function to deliver evaluation results to AWS Config. This action is required in every AWS Lambda function that is invoked by an AWS Config rule.

    ", + "StartConfigurationRecorder": "

    Starts recording configurations of the AWS resources you have selected to record in your AWS account.

    You must have created at least one delivery channel to successfully start the configuration recorder.

    ", + "StopConfigurationRecorder": "

    Stops recording configurations of the AWS resources you have selected to record in your AWS account.

    " + }, + "shapes": { + "ARN": { + "base": null, + "refs": { + "ConfigurationItem$arn": "

    The Amazon Resource Name (ARN) of the resource.

    " + } + }, + "AccountId": { + "base": null, + "refs": { + "ConfigurationItem$accountId": "

    The 12 digit AWS account ID associated with the resource.

    " + } + }, + "AllSupported": { + "base": null, + "refs": { + "RecordingGroup$allSupported": "

    Specifies whether AWS Config records configuration changes for every supported type of regional resource.

    If you set this option to true, when AWS Config adds support for a new type of regional resource, it automatically starts recording resources of that type.

    If you set this option to true, you cannot enumerate a list of resourceTypes.

    " + } + }, + "AvailabilityZone": { + "base": null, + "refs": { + "ConfigurationItem$availabilityZone": "

    The Availability Zone associated with the resource.

    " + } + }, + "AwsRegion": { + "base": null, + "refs": { + "ConfigurationItem$awsRegion": "

    The region where the resource resides.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "ComplianceContributorCount$CapExceeded": "

    Indicates whether the maximum count is reached.

    ", + "ConfigRuleEvaluationStatus$FirstEvaluationStarted": "

    Indicates whether AWS Config has evaluated your resources against the rule at least once.

    • true - AWS Config has evaluated your AWS resources against the rule at least once.
    • false - AWS Config has not once finished evaluating your AWS resources against the rule.
    ", + "ConfigurationRecorderStatus$recording": "

    Specifies whether the recorder is currently recording or not.

    ", + "ListDiscoveredResourcesRequest$includeDeletedResources": "

    Specifies whether AWS Config includes deleted resources in the results. By default, deleted resources are not included.

    " + } + }, + "ChannelName": { + "base": null, + "refs": { + "DeleteDeliveryChannelRequest$DeliveryChannelName": "

    The name of the delivery channel to delete.

    ", + "DeliverConfigSnapshotRequest$deliveryChannelName": "

    The name of the delivery channel through which the snapshot is delivered.

    ", + "DeliveryChannel$name": "

    The name of the delivery channel. By default, AWS Config automatically assigns the name "default" when creating the delivery channel. You cannot change the assigned name.

    ", + "DeliveryChannelNameList$member": null + } + }, + "ChronologicalOrder": { + "base": null, + "refs": { + "GetResourceConfigHistoryRequest$chronologicalOrder": "

    The chronological order for configuration items listed. By default the results are listed in reverse chronological order.

    " + } + }, + "Compliance": { + "base": "

    Indicates whether an AWS resource or AWS Config rule is compliant and provides the number of contributors that affect the compliance.

    ", + "refs": { + "ComplianceByConfigRule$Compliance": "

    Indicates whether the AWS Config rule is compliant.

    ", + "ComplianceByResource$Compliance": "

    Indicates whether the AWS resource complies with all of the AWS Config rules that evaluated it.

    " + } + }, + "ComplianceByConfigRule": { + "base": "

    Indicates whether an AWS Config rule is compliant. A rule is compliant if all of the resources that the rule evaluated comply with it, and it is noncompliant if any of these resources do not comply.

    ", + "refs": { + "ComplianceByConfigRules$member": null + } + }, + "ComplianceByConfigRules": { + "base": null, + "refs": { + "DescribeComplianceByConfigRuleResponse$ComplianceByConfigRules": "

    Indicates whether each of the specified AWS Config rules is compliant.

    " + } + }, + "ComplianceByResource": { + "base": "

    Indicates whether an AWS resource that is evaluated according to one or more AWS Config rules is compliant. A resource is compliant if it complies with all of the rules that evaluate it, and it is noncompliant if it does not comply with one or more of these rules.

    ", + "refs": { + "ComplianceByResources$member": null + } + }, + "ComplianceByResources": { + "base": null, + "refs": { + "DescribeComplianceByResourceResponse$ComplianceByResources": "

    Indicates whether the specified AWS resource complies with all of the AWS Config rules that evaluate it.

    " + } + }, + "ComplianceContributorCount": { + "base": "

    The number of AWS resources or AWS Config rules responsible for the current compliance of the item, up to a maximum number.

    ", + "refs": { + "Compliance$ComplianceContributorCount": "

    The number of AWS resources or AWS Config rules that cause a result of NON_COMPLIANT, up to a maximum of 25.

    ", + "ComplianceSummary$CompliantResourceCount": "

    The number of AWS Config rules or AWS resources that are compliant, up to a maximum of 25 for rules and 100 for resources.

    ", + "ComplianceSummary$NonCompliantResourceCount": "

    The number of AWS Config rules or AWS resources that are noncompliant, up to a maximum of 25 for rules and 100 for resources.

    " + } + }, + "ComplianceResourceTypes": { + "base": null, + "refs": { + "Scope$ComplianceResourceTypes": "

    The resource types of only those AWS resources that you want AWS Config to evaluate against the rule. You can specify only one type if you also specify resource IDs for ComplianceResourceId.

    " + } + }, + "ComplianceSummariesByResourceType": { + "base": null, + "refs": { + "GetComplianceSummaryByResourceTypeResponse$ComplianceSummariesByResourceType": "

    The number of resources that are compliant and the number that are noncompliant. If one or more resource types were provided with the request, the numbers are returned for each resource type. The maximum number returned is 100.

    " + } + }, + "ComplianceSummary": { + "base": "

    The number of AWS Config rules or AWS resources that are compliant and noncompliant, up to a maximum.

    ", + "refs": { + "ComplianceSummaryByResourceType$ComplianceSummary": "

    The number of AWS resources that are compliant or noncompliant, up to a maximum of 100 for each compliance.

    ", + "GetComplianceSummaryByConfigRuleResponse$ComplianceSummary": "

    The number of AWS Config rules that are compliant and the number that are noncompliant, up to a maximum of 25 for each.

    " + } + }, + "ComplianceSummaryByResourceType": { + "base": "

    The number of AWS resources of a specific type that are compliant or noncompliant, up to a maximum of 100 for each compliance.

    ", + "refs": { + "ComplianceSummariesByResourceType$member": null + } + }, + "ComplianceType": { + "base": null, + "refs": { + "Compliance$ComplianceType": "

    Indicates whether an AWS resource or AWS Config rule is compliant.

    A resource is compliant if it complies with all of the AWS Config rules that evaluate it, and it is noncompliant if it does not comply with one or more of these rules.

    A rule is compliant if all of the resources that the rule evaluates comply with it, and it is noncompliant if any of these resources do not comply.

    ", + "ComplianceTypes$member": null, + "Evaluation$ComplianceType": "

    Indicates whether the AWS resource complies with the AWS Config rule that it was evaluated against.

    ", + "EvaluationResult$ComplianceType": "

    Indicates whether the AWS resource complies with the AWS Config rule that evaluated it.

    " + } + }, + "ComplianceTypes": { + "base": null, + "refs": { + "DescribeComplianceByConfigRuleRequest$ComplianceTypes": "

    Filters the results by compliance. The valid values are Compliant and NonCompliant.

    ", + "DescribeComplianceByResourceRequest$ComplianceTypes": "

    Filters the results by compliance. The valid values are Compliant and NonCompliant.

    ", + "GetComplianceDetailsByConfigRuleRequest$ComplianceTypes": "

    Specify to filter the results by compliance. The valid values are Compliant, NonCompliant, and NotApplicable.

    ", + "GetComplianceDetailsByResourceRequest$ComplianceTypes": "

    Specify to filter the results by compliance. The valid values are Compliant, NonCompliant, and NotApplicable.

    " + } + }, + "ConfigExportDeliveryInfo": { + "base": "

    A list that contains the status of the delivery of either the snapshot or the configuration history to the specified Amazon S3 bucket.

    ", + "refs": { + "DeliveryChannelStatus$configSnapshotDeliveryInfo": "

    A list containing the status of the delivery of the snapshot to the specified Amazon S3 bucket.

    ", + "DeliveryChannelStatus$configHistoryDeliveryInfo": "

    A list that contains the status of the delivery of the configuration history to the specified Amazon S3 bucket.

    " + } + }, + "ConfigRule": { + "base": "

    An AWS Lambda function that evaluates configuration items to assess whether your AWS resources comply with your desired configurations. This function can run when AWS Config detects a configuration change or delivers a configuration snapshot. This function can evaluate any resource in the recording group. To define which of these are evaluated, specify a value for the Scope key.

    For more information about developing and using AWS Config rules, see Evaluating AWS Resource Configurations with AWS Config in the AWS Config Developer Guide.

    ", + "refs": { + "ConfigRules$member": null, + "PutConfigRuleRequest$ConfigRule": null + } + }, + "ConfigRuleEvaluationStatus": { + "base": "

    Status information for your AWS managed Config rules. The status includes information such as the last time the rule ran, the last time it failed, and the related error for the last failure.

    This action does not return status information about customer managed Config rules.

    ", + "refs": { + "ConfigRuleEvaluationStatusList$member": null + } + }, + "ConfigRuleEvaluationStatusList": { + "base": null, + "refs": { + "DescribeConfigRuleEvaluationStatusResponse$ConfigRulesEvaluationStatus": "

    Status information about your AWS managed Config rules.

    " + } + }, + "ConfigRuleNames": { + "base": null, + "refs": { + "DescribeComplianceByConfigRuleRequest$ConfigRuleNames": "

    Specify one or more AWS Config rule names to filter the results by rule.

    ", + "DescribeConfigRuleEvaluationStatusRequest$ConfigRuleNames": "

    The name of the AWS managed Config rules for which you want status information. If you do not specify any names, AWS Config returns status information for all AWS managed Config rules that you use.

    ", + "DescribeConfigRulesRequest$ConfigRuleNames": "

    The names of the AWS Config rules for which you want details. If you do not specify any names, AWS Config returns details for all your rules.

    " + } + }, + "ConfigRuleState": { + "base": null, + "refs": { + "ConfigRule$ConfigRuleState": "

    Indicates whether the AWS Config rule is active or currently being deleted by AWS Config.

    AWS Config sets the state of a rule to DELETING temporarily after you use the DeleteConfigRule request to delete the rule. After AWS Config finishes deleting a rule, the rule and all of its evaluations are erased and no longer available.

    You cannot add a rule to AWS Config that has the state set to DELETING. If you want to delete a rule, you must use the DeleteConfigRule request.

    " + } + }, + "ConfigRules": { + "base": null, + "refs": { + "DescribeConfigRulesResponse$ConfigRules": "

    The details about your AWS Config rules.

    " + } + }, + "ConfigSnapshotDeliveryProperties": { + "base": "

    Options for how AWS Config delivers configuration snapshots to the Amazon S3 bucket in your delivery channel.

    ", + "refs": { + "DeliveryChannel$configSnapshotDeliveryProperties": null + } + }, + "ConfigStreamDeliveryInfo": { + "base": "

    A list that contains the status of the delivery of the configuration stream notification to the Amazon SNS topic.

    ", + "refs": { + "DeliveryChannelStatus$configStreamDeliveryInfo": "

    A list containing the status of the delivery of the configuration stream notification to the specified Amazon SNS topic.

    " + } + }, + "Configuration": { + "base": null, + "refs": { + "ConfigurationItem$configuration": "

    The description of the resource configuration.

    " + } + }, + "ConfigurationItem": { + "base": "

    A list that contains detailed configurations of a specified resource.

    Currently, the list does not contain information about non-AWS components (for example, applications on your Amazon EC2 instances).

    ", + "refs": { + "ConfigurationItemList$member": null + } + }, + "ConfigurationItemCaptureTime": { + "base": null, + "refs": { + "ConfigurationItem$configurationItemCaptureTime": "

    The time when the configuration recording was initiated.

    " + } + }, + "ConfigurationItemList": { + "base": null, + "refs": { + "GetResourceConfigHistoryResponse$configurationItems": "

    A list that contains the configuration history of one or more resources.

    " + } + }, + "ConfigurationItemMD5Hash": { + "base": null, + "refs": { + "ConfigurationItem$configurationItemMD5Hash": "

    Unique MD5 hash that represents the configuration item's state.

    You can use MD5 hash to compare the states of two or more configuration items that are associated with the same resource.

    " + } + }, + "ConfigurationItemStatus": { + "base": null, + "refs": { + "ConfigurationItem$configurationItemStatus": "

    The configuration item status.

    " + } + }, + "ConfigurationRecorder": { + "base": "

    An object that represents the recording of configuration changes of an AWS resource.

    ", + "refs": { + "ConfigurationRecorderList$member": null, + "PutConfigurationRecorderRequest$ConfigurationRecorder": "

    The configuration recorder object that records each configuration change made to the resources.

    " + } + }, + "ConfigurationRecorderList": { + "base": null, + "refs": { + "DescribeConfigurationRecordersResponse$ConfigurationRecorders": "

    A list that contains the descriptions of the specified configuration recorders.

    " + } + }, + "ConfigurationRecorderNameList": { + "base": null, + "refs": { + "DescribeConfigurationRecorderStatusRequest$ConfigurationRecorderNames": "

    The name(s) of the configuration recorder. If the name is not specified, the action returns the current status of all the configuration recorders associated with the account.

    ", + "DescribeConfigurationRecordersRequest$ConfigurationRecorderNames": "

    A list of configuration recorder names.

    " + } + }, + "ConfigurationRecorderStatus": { + "base": "

    The current status of the configuration recorder.

    ", + "refs": { + "ConfigurationRecorderStatusList$member": null + } + }, + "ConfigurationRecorderStatusList": { + "base": null, + "refs": { + "DescribeConfigurationRecorderStatusResponse$ConfigurationRecordersStatus": "

    A list that contains status of the specified recorders.

    " + } + }, + "ConfigurationStateId": { + "base": null, + "refs": { + "ConfigurationItem$configurationStateId": "

    An identifier that indicates the ordering of the configuration items of a resource.

    " + } + }, + "Date": { + "base": null, + "refs": { + "ComplianceSummary$ComplianceSummaryTimestamp": "

    The time that AWS Config created the compliance summary.

    ", + "ConfigExportDeliveryInfo$lastAttemptTime": "

    The time of the last attempted delivery.

    ", + "ConfigExportDeliveryInfo$lastSuccessfulTime": "

    The time of the last successful delivery.

    ", + "ConfigExportDeliveryInfo$nextDeliveryTime": "

    The time that the next delivery occurs.

    ", + "ConfigRuleEvaluationStatus$LastSuccessfulInvocationTime": "

    The time that AWS Config last successfully invoked the AWS Config rule to evaluate your AWS resources.

    ", + "ConfigRuleEvaluationStatus$LastFailedInvocationTime": "

    The time that AWS Config last failed to invoke the AWS Config rule to evaluate your AWS resources.

    ", + "ConfigRuleEvaluationStatus$LastSuccessfulEvaluationTime": "

    The time that AWS Config last successfully evaluated your AWS resources against the rule.

    ", + "ConfigRuleEvaluationStatus$LastFailedEvaluationTime": "

    The time that AWS Config last failed to evaluate your AWS resources against the rule.

    ", + "ConfigRuleEvaluationStatus$FirstActivatedTime": "

    The time that you first activated the AWS Config rule.

    ", + "ConfigStreamDeliveryInfo$lastStatusChangeTime": "

    The time from the last status change.

    ", + "ConfigurationRecorderStatus$lastStartTime": "

    The time the recorder was last started.

    ", + "ConfigurationRecorderStatus$lastStopTime": "

    The time the recorder was last stopped.

    ", + "ConfigurationRecorderStatus$lastStatusChangeTime": "

    The time when the status was last changed.

    ", + "EvaluationResult$ResultRecordedTime": "

    The time when AWS Config recorded the evaluation result.

    ", + "EvaluationResult$ConfigRuleInvokedTime": "

    The time when the AWS Config rule evaluated the AWS resource.

    ", + "EvaluationResultIdentifier$OrderingTimestamp": "

    The time of the event that triggered the evaluation of your AWS resources. The time can indicate when AWS Config delivered a configuration item change notification, or it can indicate when AWS Config delivered the configuration snapshot, depending on which event triggered the evaluation.

    " + } + }, + "DeleteConfigRuleRequest": { + "base": null, + "refs": { + } + }, + "DeleteDeliveryChannelRequest": { + "base": "

    The input for the DeleteDeliveryChannel action. The action accepts the following data in JSON format.

    ", + "refs": { + } + }, + "DeliverConfigSnapshotRequest": { + "base": "

    The input for the DeliverConfigSnapshot action.

    ", + "refs": { + } + }, + "DeliverConfigSnapshotResponse": { + "base": "

    The output for the DeliverConfigSnapshot action in JSON format.

    ", + "refs": { + } + }, + "DeliveryChannel": { + "base": "

    A logical container used for storing the configuration changes of an AWS resource.

    ", + "refs": { + "DeliveryChannelList$member": null, + "PutDeliveryChannelRequest$DeliveryChannel": "

    The configuration delivery channel object that delivers the configuration information to an Amazon S3 bucket, and to an Amazon SNS topic.

    " + } + }, + "DeliveryChannelList": { + "base": null, + "refs": { + "DescribeDeliveryChannelsResponse$DeliveryChannels": "

    A list that contains the descriptions of the specified delivery channel.

    " + } + }, + "DeliveryChannelNameList": { + "base": null, + "refs": { + "DescribeDeliveryChannelStatusRequest$DeliveryChannelNames": "

    A list of delivery channel names.

    ", + "DescribeDeliveryChannelsRequest$DeliveryChannelNames": "

    A list of delivery channel names.

    " + } + }, + "DeliveryChannelStatus": { + "base": "

    The status of a specified delivery channel.

    Valid values: Success | Failure

    ", + "refs": { + "DeliveryChannelStatusList$member": null + } + }, + "DeliveryChannelStatusList": { + "base": null, + "refs": { + "DescribeDeliveryChannelStatusResponse$DeliveryChannelsStatus": "

    A list that contains the status of a specified delivery channel.

    " + } + }, + "DeliveryStatus": { + "base": null, + "refs": { + "ConfigExportDeliveryInfo$lastStatus": "

    Status of the last attempted delivery.

    ", + "ConfigStreamDeliveryInfo$lastStatus": "

    Status of the last attempted delivery.

    Note Providing an SNS topic on a DeliveryChannel for AWS Config is optional. If the SNS delivery is turned off, the last status will be Not_Applicable.

    " + } + }, + "DescribeComplianceByConfigRuleRequest": { + "base": null, + "refs": { + } + }, + "DescribeComplianceByConfigRuleResponse": { + "base": null, + "refs": { + } + }, + "DescribeComplianceByResourceRequest": { + "base": null, + "refs": { + } + }, + "DescribeComplianceByResourceResponse": { + "base": null, + "refs": { + } + }, + "DescribeConfigRuleEvaluationStatusRequest": { + "base": null, + "refs": { + } + }, + "DescribeConfigRuleEvaluationStatusResponse": { + "base": null, + "refs": { + } + }, + "DescribeConfigRulesRequest": { + "base": null, + "refs": { + } + }, + "DescribeConfigRulesResponse": { + "base": null, + "refs": { + } + }, + "DescribeConfigurationRecorderStatusRequest": { + "base": "

    The input for the DescribeConfigurationRecorderStatus action.

    ", + "refs": { + } + }, + "DescribeConfigurationRecorderStatusResponse": { + "base": "

    The output for the DescribeConfigurationRecorderStatus action in JSON format.

    ", + "refs": { + } + }, + "DescribeConfigurationRecordersRequest": { + "base": "

    The input for the DescribeConfigurationRecorders action.

    ", + "refs": { + } + }, + "DescribeConfigurationRecordersResponse": { + "base": "

    The output for the DescribeConfigurationRecorders action.

    ", + "refs": { + } + }, + "DescribeDeliveryChannelStatusRequest": { + "base": "

    The input for the DeliveryChannelStatus action.

    ", + "refs": { + } + }, + "DescribeDeliveryChannelStatusResponse": { + "base": "

    The output for the DescribeDeliveryChannelStatus action.

    ", + "refs": { + } + }, + "DescribeDeliveryChannelsRequest": { + "base": "

    The input for the DescribeDeliveryChannels action.

    ", + "refs": { + } + }, + "DescribeDeliveryChannelsResponse": { + "base": "

    The output for the DescribeDeliveryChannels action.

    ", + "refs": { + } + }, + "EarlierTime": { + "base": null, + "refs": { + "GetResourceConfigHistoryRequest$earlierTime": "

    The time stamp that indicates an earlier time. If not specified, the action returns paginated results that contain configuration items that start from when the first configuration item was recorded.

    " + } + }, + "EmptiableStringWithCharLimit256": { + "base": null, + "refs": { + "ConfigRule$Description": "

    The description that you provide for the AWS Config rule.

    " + } + }, + "Evaluation": { + "base": "

    Identifies an AWS resource and indicates whether it complies with the AWS Config rule that it was evaluated against.

    ", + "refs": { + "Evaluations$member": null + } + }, + "EvaluationResult": { + "base": "

    The details of an AWS Config evaluation. Provides the AWS resource that was evaluated, the compliance of the resource, related timestamps, and supplementary information.

    ", + "refs": { + "EvaluationResults$member": null + } + }, + "EvaluationResultIdentifier": { + "base": "

    Uniquely identifies an evaluation result.

    ", + "refs": { + "EvaluationResult$EvaluationResultIdentifier": "

    Uniquely identifies the evaluation result.

    " + } + }, + "EvaluationResultQualifier": { + "base": "

    Identifies an AWS Config rule that evaluated an AWS resource, and provides the type and ID of the resource that the rule evaluated.

    ", + "refs": { + "EvaluationResultIdentifier$EvaluationResultQualifier": "

    Identifies an AWS Config rule used to evaluate an AWS resource, and provides the type and ID of the evaluated resource.

    " + } + }, + "EvaluationResults": { + "base": null, + "refs": { + "GetComplianceDetailsByConfigRuleResponse$EvaluationResults": "

    Indicates whether the AWS resource complies with the specified AWS Config rule.

    ", + "GetComplianceDetailsByResourceResponse$EvaluationResults": "

    Indicates whether the specified AWS resource complies each AWS Config rule.

    " + } + }, + "Evaluations": { + "base": null, + "refs": { + "PutEvaluationsRequest$Evaluations": "

    The assessments that the AWS Lambda function performs. Each evaluation identifies an AWS resource and indicates whether it complies with the AWS Config rule that invokes the AWS Lambda function.

    ", + "PutEvaluationsResponse$FailedEvaluations": "

    Requests that failed because of a client or server error.

    " + } + }, + "EventSource": { + "base": null, + "refs": { + "SourceDetail$EventSource": "

    The source of the event, such as an AWS service, that triggers AWS Config to evaluate your AWS resources.

    " + } + }, + "GetComplianceDetailsByConfigRuleRequest": { + "base": null, + "refs": { + } + }, + "GetComplianceDetailsByConfigRuleResponse": { + "base": null, + "refs": { + } + }, + "GetComplianceDetailsByResourceRequest": { + "base": null, + "refs": { + } + }, + "GetComplianceDetailsByResourceResponse": { + "base": null, + "refs": { + } + }, + "GetComplianceSummaryByConfigRuleResponse": { + "base": null, + "refs": { + } + }, + "GetComplianceSummaryByResourceTypeRequest": { + "base": null, + "refs": { + } + }, + "GetComplianceSummaryByResourceTypeResponse": { + "base": null, + "refs": { + } + }, + "GetResourceConfigHistoryRequest": { + "base": "

    The input for the GetResourceConfigHistory action.

    ", + "refs": { + } + }, + "GetResourceConfigHistoryResponse": { + "base": "

    The output for the GetResourceConfigHistory action.

    ", + "refs": { + } + }, + "IncludeGlobalResourceTypes": { + "base": null, + "refs": { + "RecordingGroup$includeGlobalResourceTypes": "

    Specifies whether AWS Config includes all supported types of global resources with the resources that it records.

    Before you can set this option to true, you must set the allSupported option to true.

    If you set this option to true, when AWS Config adds support for a new type of global resource, it automatically starts recording resources of that type.

    " + } + }, + "InsufficientDeliveryPolicyException": { + "base": "

    Your Amazon S3 bucket policy does not permit AWS Config to write to it.

    ", + "refs": { + } + }, + "InsufficientPermissionsException": { + "base": "

    Indicates one of the following errors:

    • The rule cannot be created because the IAM role assigned to AWS Config lacks permissions to perform the config:Put* action.
    • The AWS Lambda function cannot be invoked. Check the function ARN, and check the function's permissions.
    ", + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "ComplianceContributorCount$CappedCount": "

    The number of AWS resources or AWS Config rules responsible for the current compliance of the item.

    " + } + }, + "InvalidConfigurationRecorderNameException": { + "base": "

    You have provided a configuration recorder name that is not valid.

    ", + "refs": { + } + }, + "InvalidDeliveryChannelNameException": { + "base": "

    The specified delivery channel name is not valid.

    ", + "refs": { + } + }, + "InvalidLimitException": { + "base": "

    The specified limit is outside the allowable range.

    ", + "refs": { + } + }, + "InvalidNextTokenException": { + "base": "

    The specified next token is invalid. Specify the nextToken string that was returned in the previous response to get the next page of results.

    ", + "refs": { + } + }, + "InvalidParameterValueException": { + "base": "

    One or more of the specified parameters are invalid. Verify that your parameters are valid and try again.

    ", + "refs": { + } + }, + "InvalidRecordingGroupException": { + "base": "

    AWS Config throws an exception if the recording group does not contain a valid list of resource types. Invalid values could also be incorrectly formatted.

    ", + "refs": { + } + }, + "InvalidResultTokenException": { + "base": "

    The result token is invalid.

    ", + "refs": { + } + }, + "InvalidRoleException": { + "base": "

    You have provided a null or empty role ARN.

    ", + "refs": { + } + }, + "InvalidS3KeyPrefixException": { + "base": "

    The specified Amazon S3 key prefix is not valid.

    ", + "refs": { + } + }, + "InvalidSNSTopicARNException": { + "base": "

    The specified Amazon SNS topic does not exist.

    ", + "refs": { + } + }, + "InvalidTimeRangeException": { + "base": "

    The specified time range is not valid. The earlier time is not chronologically before the later time.

    ", + "refs": { + } + }, + "LastDeliveryChannelDeleteFailedException": { + "base": "

    You cannot delete the delivery channel you specified because the configuration recorder is running.

    ", + "refs": { + } + }, + "LaterTime": { + "base": null, + "refs": { + "GetResourceConfigHistoryRequest$laterTime": "

    The time stamp that indicates a later time. If not specified, current time is taken.

    " + } + }, + "Limit": { + "base": null, + "refs": { + "DescribeComplianceByResourceRequest$Limit": "

    The maximum number of evaluation results returned on each page. The default is 10. You cannot specify a limit greater than 100. If you specify 0, AWS Config uses the default.

    ", + "GetComplianceDetailsByConfigRuleRequest$Limit": "

    The maximum number of evaluation results returned on each page. The default is 10. You cannot specify a limit greater than 100. If you specify 0, AWS Config uses the default.

    ", + "GetResourceConfigHistoryRequest$limit": "

    The maximum number of configuration items returned on each page. The default is 10. You cannot specify a limit greater than 100. If you specify 0, AWS Config uses the default.

    ", + "ListDiscoveredResourcesRequest$limit": "

    The maximum number of resource identifiers returned on each page. The default is 100. You cannot specify a limit greater than 100. If you specify 0, AWS Config uses the default.

    " + } + }, + "ListDiscoveredResourcesRequest": { + "base": null, + "refs": { + } + }, + "ListDiscoveredResourcesResponse": { + "base": null, + "refs": { + } + }, + "MaxNumberOfConfigRulesExceededException": { + "base": "

    Failed to add the AWS Config rule because the account already contains the maximum number of 25 rules. Consider deleting any deactivated rules before adding new rules.

    ", + "refs": { + } + }, + "MaxNumberOfConfigurationRecordersExceededException": { + "base": "

    You have reached the limit on the number of recorders you can create.

    ", + "refs": { + } + }, + "MaxNumberOfDeliveryChannelsExceededException": { + "base": "

    You have reached the limit on the number of delivery channels you can create.

    ", + "refs": { + } + }, + "MaximumExecutionFrequency": { + "base": null, + "refs": { + "ConfigRule$MaximumExecutionFrequency": "

    The maximum frequency at which the AWS Config rule runs evaluations.

    If your rule is periodic, meaning it runs an evaluation when AWS Config delivers a configuration snapshot, then it cannot run evaluations more frequently than AWS Config delivers the snapshots. For periodic rules, set the value of the MaximumExecutionFrequency key to be equal to or greater than the value of the deliveryFrequency key, which is part of ConfigSnapshotDeliveryProperties. To update the frequency with which AWS Config delivers your snapshots, use the PutDeliveryChannel action.

    ", + "ConfigSnapshotDeliveryProperties$deliveryFrequency": "

    The frequency with which a AWS Config recurringly delivers configuration snapshots.

    " + } + }, + "MessageType": { + "base": null, + "refs": { + "SourceDetail$MessageType": "

    The type of SNS message that triggers AWS Config to run an evaluation. For evaluations that are initiated when AWS Config delivers a configuration item change notification, you must use ConfigurationItemChangeNotification. For evaluations that are initiated when AWS Config delivers a configuration snapshot, you must use ConfigurationSnapshotDeliveryCompleted.

    " + } + }, + "Name": { + "base": null, + "refs": { + "Tags$key": null + } + }, + "NextToken": { + "base": null, + "refs": { + "DescribeComplianceByResourceRequest$NextToken": "

    The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

    ", + "DescribeComplianceByResourceResponse$NextToken": "

    The string that you use in a subsequent request to get the next page of results in a paginated response.

    ", + "GetComplianceDetailsByConfigRuleRequest$NextToken": "

    The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

    ", + "GetComplianceDetailsByConfigRuleResponse$NextToken": "

    The string that you use in a subsequent request to get the next page of results in a paginated response.

    ", + "GetResourceConfigHistoryRequest$nextToken": "

    The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

    ", + "GetResourceConfigHistoryResponse$nextToken": "

    The string that you use in a subsequent request to get the next page of results in a paginated response.

    ", + "ListDiscoveredResourcesRequest$nextToken": "

    The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

    ", + "ListDiscoveredResourcesResponse$nextToken": "

    The string that you use in a subsequent request to get the next page of results in a paginated response.

    " + } + }, + "NoAvailableConfigurationRecorderException": { + "base": "

    There are no configuration recorders available to provide the role needed to describe your resources. Create a configuration recorder.

    ", + "refs": { + } + }, + "NoAvailableDeliveryChannelException": { + "base": "

    There is no delivery channel available to record configurations.

    ", + "refs": { + } + }, + "NoRunningConfigurationRecorderException": { + "base": "

    There is no configuration recorder running.

    ", + "refs": { + } + }, + "NoSuchBucketException": { + "base": "

    The specified Amazon S3 bucket does not exist.

    ", + "refs": { + } + }, + "NoSuchConfigRuleException": { + "base": "

    One or more AWS Config rules in the request are invalid. Verify that the rule names are correct and try again.

    ", + "refs": { + } + }, + "NoSuchConfigurationRecorderException": { + "base": "

    You have specified a configuration recorder that does not exist.

    ", + "refs": { + } + }, + "NoSuchDeliveryChannelException": { + "base": "

    You have specified a delivery channel that does not exist.

    ", + "refs": { + } + }, + "OrderingTimestamp": { + "base": null, + "refs": { + "Evaluation$OrderingTimestamp": "

    The time of the event in AWS Config that triggered the evaluation. For event-based evaluations, the time indicates when AWS Config created the configuration item that triggered the evaluation. For periodic evaluations, the time indicates when AWS Config delivered the configuration snapshot that triggered the evaluation.

    " + } + }, + "Owner": { + "base": null, + "refs": { + "Source$Owner": "

    Indicates whether AWS or the customer owns and manages the AWS Config rule.

    " + } + }, + "PutConfigRuleRequest": { + "base": null, + "refs": { + } + }, + "PutConfigurationRecorderRequest": { + "base": "

    The input for the PutConfigurationRecorder action.

    ", + "refs": { + } + }, + "PutDeliveryChannelRequest": { + "base": "

    The input for the PutDeliveryChannel action.

    ", + "refs": { + } + }, + "PutEvaluationsRequest": { + "base": null, + "refs": { + } + }, + "PutEvaluationsResponse": { + "base": null, + "refs": { + } + }, + "RecorderName": { + "base": null, + "refs": { + "ConfigurationRecorder$name": "

    The name of the recorder. By default, AWS Config automatically assigns the name "default" when creating the configuration recorder. You cannot change the assigned name.

    ", + "ConfigurationRecorderNameList$member": null, + "StartConfigurationRecorderRequest$ConfigurationRecorderName": "

    The name of the recorder object that records each configuration change made to the resources.

    ", + "StopConfigurationRecorderRequest$ConfigurationRecorderName": "

    The name of the recorder object that records each configuration change made to the resources.

    " + } + }, + "RecorderStatus": { + "base": null, + "refs": { + "ConfigurationRecorderStatus$lastStatus": "

    The last (previous) status of the recorder.

    " + } + }, + "RecordingGroup": { + "base": "

    Specifies the types of AWS resource for which AWS Config records configuration changes.

    In the recording group, you specify whether all supported types or specific types of resources are recorded.

    By default, AWS Config records configuration changes for all supported types of regional resources that AWS Config discovers in the region in which it is running. Regional resources are tied to a region and can be used only in that region. Examples of regional resources are EC2 instances and EBS volumes.

    You can also have AWS Config record configuration changes for supported types of global resources. Global resources are not tied to an individual region and can be used in all regions.

    The configuration details for any global resource are the same in all regions. If you customize AWS Config in multiple regions to record global resources, it will create multiple configuration items each time a global resource changes: one configuration item for each region. These configuration items will contain identical data. To prevent duplicate configuration items, you should consider customizing AWS Config in only one region to record global resources, unless you want the configuration items to be available in multiple regions.

    If you don't want AWS Config to record all resources, you can specify which types of resources it will record with the resourceTypes parameter.

    For a list of supported resource types, see Supported resource types.

    For more information, see Selecting Which Resources AWS Config Records.

    ", + "refs": { + "ConfigurationRecorder$recordingGroup": "

    Specifies the types of AWS resource for which AWS Config records configuration changes.

    " + } + }, + "RelatedEvent": { + "base": null, + "refs": { + "RelatedEventList$member": null + } + }, + "RelatedEventList": { + "base": null, + "refs": { + "ConfigurationItem$relatedEvents": "

    A list of CloudTrail event IDs.

    A populated field indicates that the current configuration was initiated by the events recorded in the CloudTrail log. For more information about CloudTrail, see What is AWS CloudTrail?.

    An empty field indicates that the current configuration was not initiated by any event.

    " + } + }, + "Relationship": { + "base": "

    The relationship of the related resource to the main resource.

    ", + "refs": { + "RelationshipList$member": null + } + }, + "RelationshipList": { + "base": null, + "refs": { + "ConfigurationItem$relationships": "

    A list of related AWS resources.

    " + } + }, + "RelationshipName": { + "base": null, + "refs": { + "Relationship$relationshipName": "

    The type of relationship with the related resource.

    " + } + }, + "ResourceCreationTime": { + "base": null, + "refs": { + "ConfigurationItem$resourceCreationTime": "

    The time stamp when the resource was created.

    " + } + }, + "ResourceDeletionTime": { + "base": null, + "refs": { + "ResourceIdentifier$resourceDeletionTime": "

    The time that the resource was deleted.

    " + } + }, + "ResourceId": { + "base": null, + "refs": { + "ConfigurationItem$resourceId": "

    The ID of the resource (for example., sg-xxxxxx).

    ", + "GetResourceConfigHistoryRequest$resourceId": "

    The ID of the resource (for example., sg-xxxxxx).

    ", + "Relationship$resourceId": "

    The ID of the related resource (for example, sg-xxxxxx).

    ", + "ResourceIdList$member": null, + "ResourceIdentifier$resourceId": "

    The ID of the resource (for example., sg-xxxxxx).

    " + } + }, + "ResourceIdList": { + "base": null, + "refs": { + "ListDiscoveredResourcesRequest$resourceIds": "

    The IDs of only those resources that you want AWS Config to list in the response. If you do not specify this parameter, AWS Config lists all resources of the specified type that it has discovered.

    " + } + }, + "ResourceIdentifier": { + "base": "

    The details that identify a resource that is discovered by AWS Config, including the resource type, ID, and (if available) the custom resource name.

    ", + "refs": { + "ResourceIdentifierList$member": null + } + }, + "ResourceIdentifierList": { + "base": null, + "refs": { + "ListDiscoveredResourcesResponse$resourceIdentifiers": "

    The details that identify a resource that is discovered by AWS Config, including the resource type, ID, and (if available) the custom resource name.

    " + } + }, + "ResourceInUseException": { + "base": "

    The rule is currently being deleted. Wait for a while and try again.

    ", + "refs": { + } + }, + "ResourceName": { + "base": null, + "refs": { + "ConfigurationItem$resourceName": "

    The custom name of the resource, if available.

    ", + "ListDiscoveredResourcesRequest$resourceName": "

    The custom name of only those resources that you want AWS Config to list in the response. If you do not specify this parameter, AWS Config lists all resources of the specified type that it has discovered.

    ", + "Relationship$resourceName": "

    The custom name of the related resource, if available.

    ", + "ResourceIdentifier$resourceName": "

    The custom name of the resource (if available).

    " + } + }, + "ResourceNotDiscoveredException": { + "base": "

    You have specified a resource that is either unknown or has not been discovered.

    ", + "refs": { + } + }, + "ResourceType": { + "base": null, + "refs": { + "ConfigurationItem$resourceType": "

    The type of AWS resource.

    ", + "GetResourceConfigHistoryRequest$resourceType": "

    The resource type.

    ", + "ListDiscoveredResourcesRequest$resourceType": "

    The type of resources that you want AWS Config to list in the response.

    ", + "Relationship$resourceType": "

    The resource type of the related resource.

    ", + "ResourceIdentifier$resourceType": "

    The type of resource.

    ", + "ResourceTypeList$member": null + } + }, + "ResourceTypeList": { + "base": null, + "refs": { + "RecordingGroup$resourceTypes": "

    A comma-separated list that specifies the types of AWS resources for which AWS Config records configuration changes (for example, AWS::EC2::Instance or AWS::CloudTrail::Trail).

    Before you can set this option to true, you must set the allSupported option to false.

    If you set this option to true, when AWS Config adds support for a new type of resource, it will not record resources of that type unless you manually add that type to your recording group.

    For a list of valid resourceTypes values, see the resourceType Value column in Supported AWS Resource Types.

    " + } + }, + "ResourceTypes": { + "base": null, + "refs": { + "GetComplianceSummaryByResourceTypeRequest$ResourceTypes": "

    Specify one or more resource types to get the number of resources that are compliant and the number that are noncompliant for each resource type.

    For this request, you can specify an AWS resource type such as AWS::EC2::Instance, and you can specify that the resource type is an AWS account by specifying AWS::::Account.

    " + } + }, + "Scope": { + "base": "

    Defines which resources AWS Config evaluates against a rule. The scope can include one or more resource types, a combination of a tag key and value, or a combination of one resource type and one or more resource IDs. Specify a scope to constrain the resources to be evaluated. If you do not specify a scope, all resources in your recording group are evaluated against the rule.

    ", + "refs": { + "ConfigRule$Scope": "

    Defines which resources the AWS Config rule evaluates. The scope can include one or more resource types, a combination of a tag key and value, or a combination of one resource type and one or more resource IDs. Specify a scope to constrain the resources that are evaluated. If you do not specify a scope, the AWS Config Rule evaluates all resources in the recording group.

    " + } + }, + "Source": { + "base": "

    Provides the AWS Config rule owner (AWS or customer), the rule identifier, and the events that trigger the evaluation of your AWS resources.

    ", + "refs": { + "ConfigRule$Source": "

    Provides the rule owner (AWS or customer), the rule identifier, and the events that cause the function to evaluate your AWS resources.

    " + } + }, + "SourceDetail": { + "base": "

    Provides the source and type of the event that triggers AWS Config to evaluate your AWS resources against a rule.

    ", + "refs": { + "SourceDetails$member": null + } + }, + "SourceDetails": { + "base": null, + "refs": { + "Source$SourceDetails": "

    Provides the source and type of the event that causes AWS Config to evaluate your AWS resources.

    " + } + }, + "StartConfigurationRecorderRequest": { + "base": "

    The input for the StartConfigurationRecorder action.

    ", + "refs": { + } + }, + "StopConfigurationRecorderRequest": { + "base": "

    The input for the StopConfigurationRecorder action.

    ", + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "ConfigExportDeliveryInfo$lastErrorCode": "

    The error code from the last attempted delivery.

    ", + "ConfigExportDeliveryInfo$lastErrorMessage": "

    The error message from the last attempted delivery.

    ", + "ConfigRule$ConfigRuleArn": "

    The Amazon Resource Name (ARN) of the AWS Config rule.

    ", + "ConfigRule$ConfigRuleId": "

    The ID of the AWS Config rule.

    ", + "ConfigRuleEvaluationStatus$ConfigRuleArn": "

    The Amazon Resource Name (ARN) of the AWS Config rule.

    ", + "ConfigRuleEvaluationStatus$ConfigRuleId": "

    The ID of the AWS Config rule.

    ", + "ConfigRuleEvaluationStatus$LastErrorCode": "

    The error code that AWS Config returned when the rule last failed.

    ", + "ConfigRuleEvaluationStatus$LastErrorMessage": "

    The error message that AWS Config returned when the rule last failed.

    ", + "ConfigStreamDeliveryInfo$lastErrorCode": "

    The error code from the last attempted delivery.

    ", + "ConfigStreamDeliveryInfo$lastErrorMessage": "

    The error message from the last attempted delivery.

    ", + "ConfigurationRecorder$roleARN": "

    Amazon Resource Name (ARN) of the IAM role used to describe the AWS resources associated with the account.

    ", + "ConfigurationRecorderStatus$name": "

    The name of the configuration recorder.

    ", + "ConfigurationRecorderStatus$lastErrorCode": "

    The error code indicating that the recording failed.

    ", + "ConfigurationRecorderStatus$lastErrorMessage": "

    The message indicating that the recording failed due to an error.

    ", + "DeliverConfigSnapshotResponse$configSnapshotId": "

    The ID of the snapshot that is being created.

    ", + "DeliveryChannel$s3BucketName": "

    The name of the Amazon S3 bucket used to store configuration history for the delivery channel.

    ", + "DeliveryChannel$s3KeyPrefix": "

    The prefix for the specified Amazon S3 bucket.

    ", + "DeliveryChannel$snsTopicARN": "

    The Amazon Resource Name (ARN) of the SNS topic that AWS Config delivers notifications to.

    ", + "DeliveryChannelStatus$name": "

    The name of the delivery channel.

    ", + "DescribeComplianceByConfigRuleRequest$NextToken": "

    The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

    ", + "DescribeComplianceByConfigRuleResponse$NextToken": "

    The string that you use in a subsequent request to get the next page of results in a paginated response.

    ", + "DescribeConfigRulesRequest$NextToken": "

    The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

    ", + "DescribeConfigRulesResponse$NextToken": "

    The string that you use in a subsequent request to get the next page of results in a paginated response.

    ", + "EvaluationResult$ResultToken": "

    An encrypted token that associates an evaluation with an AWS Config rule. The token identifies the rule, the AWS resource being evaluated, and the event that triggered the evaluation.

    ", + "GetComplianceDetailsByResourceRequest$NextToken": "

    The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

    ", + "GetComplianceDetailsByResourceResponse$NextToken": "

    The string that you use in a subsequent request to get the next page of results in a paginated response.

    ", + "PutEvaluationsRequest$ResultToken": "

    An encrypted token that associates an evaluation with an AWS Config rule. Identifies the rule and the event that triggered the evaluation

    " + } + }, + "StringWithCharLimit128": { + "base": null, + "refs": { + "Scope$TagKey": "

    The tag key that is applied to only those AWS resources that you want AWS Config to evaluate against the rule.

    " + } + }, + "StringWithCharLimit256": { + "base": null, + "refs": { + "ComplianceByResource$ResourceType": "

    The type of the AWS resource that was evaluated.

    ", + "ComplianceByResource$ResourceId": "

    The ID of the AWS resource that was evaluated.

    ", + "ComplianceResourceTypes$member": null, + "ComplianceSummaryByResourceType$ResourceType": "

    The type of AWS resource.

    ", + "ConfigRule$InputParameters": "

    A string in JSON format that is passed to the AWS Config rule Lambda function.

    ", + "DescribeComplianceByResourceRequest$ResourceType": "

    The types of AWS resources for which you want compliance information; for example, AWS::EC2::Instance. For this action, you can specify that the resource type is an AWS account by specifying AWS::::Account.

    ", + "DescribeComplianceByResourceRequest$ResourceId": "

    The ID of the AWS resource for which you want compliance information. You can specify only one resource ID. If you specify a resource ID, you must also specify a type for ResourceType.

    ", + "Evaluation$ComplianceResourceType": "

    The type of AWS resource that was evaluated.

    ", + "Evaluation$ComplianceResourceId": "

    The ID of the AWS resource that was evaluated.

    ", + "Evaluation$Annotation": "

    Supplementary information about how the evaluation determined the compliance.

    ", + "EvaluationResult$Annotation": "

    Supplementary information about how the evaluation determined the compliance.

    ", + "EvaluationResultQualifier$ResourceType": "

    The type of AWS resource that was evaluated.

    ", + "EvaluationResultQualifier$ResourceId": "

    The ID of the evaluated AWS resource.

    ", + "GetComplianceDetailsByResourceRequest$ResourceType": "

    The type of the AWS resource for which you want compliance information.

    ", + "GetComplianceDetailsByResourceRequest$ResourceId": "

    The ID of the AWS resource for which you want compliance information.

    ", + "ResourceTypes$member": null, + "Scope$TagValue": "

    The tag value applied to only those AWS resources that you want AWS Config to evaluate against the rule. If you specify a value for TagValue, you must also specify a value for TagKey.

    ", + "Scope$ComplianceResourceId": "

    The IDs of only those AWS resources that you want AWS Config to evaluate against the rule. If you specify a resource ID, you must specify one resource type for ComplianceResourceTypes.

    ", + "Source$SourceIdentifier": "

    For AWS managed Config rules, a pre-defined identifier from a list. To reference the list, see Using AWS Managed Config Rules.

    For customer managed Config rules, the identifier is the Amazon Resource Name (ARN) of the rule's AWS Lambda function.

    " + } + }, + "StringWithCharLimit64": { + "base": null, + "refs": { + "ComplianceByConfigRule$ConfigRuleName": "

    The name of the AWS Config rule.

    ", + "ConfigRule$ConfigRuleName": "

    The name that you assign to the AWS Config rule. The name is required if you are adding a new rule.

    ", + "ConfigRuleEvaluationStatus$ConfigRuleName": "

    The name of the AWS Config rule.

    ", + "ConfigRuleNames$member": null, + "DeleteConfigRuleRequest$ConfigRuleName": "

    The name of the AWS Config rule that you want to delete.

    ", + "EvaluationResultQualifier$ConfigRuleName": "

    The name of the AWS Config rule that was used in the evaluation.

    ", + "GetComplianceDetailsByConfigRuleRequest$ConfigRuleName": "

    The name of the AWS Config rule for which you want compliance information.

    " + } + }, + "Tags": { + "base": null, + "refs": { + "ConfigurationItem$tags": "

    A mapping of key value tags associated with the resource.

    " + } + }, + "ValidationException": { + "base": "

    The requested action is not valid.

    ", + "refs": { + } + }, + "Value": { + "base": null, + "refs": { + "Tags$value": null + } + }, + "Version": { + "base": null, + "refs": { + "ConfigurationItem$version": "

    The version number of the resource configuration.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "pagination": { + "GetResourceConfigHistory": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "limit", + "result_key": "configurationItems" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/datapipeline/2012-10-29/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/datapipeline/2012-10-29/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/datapipeline/2012-10-29/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/datapipeline/2012-10-29/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1167 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2012-10-29", + "endpointPrefix":"datapipeline", + "jsonVersion":"1.1", + "serviceFullName":"AWS Data Pipeline", + "signatureVersion":"v4", + "targetPrefix":"DataPipeline", + "protocol":"json" + }, + "operations":{ + "ActivatePipeline":{ + "name":"ActivatePipeline", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ActivatePipelineInput"}, + "output":{"shape":"ActivatePipelineOutput"}, + "errors":[ + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"PipelineDeletedException", + "exception":true + }, + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "AddTags":{ + "name":"AddTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsInput"}, + "output":{"shape":"AddTagsOutput"}, + "errors":[ + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"PipelineDeletedException", + "exception":true + } + ] + }, + "CreatePipeline":{ + "name":"CreatePipeline", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePipelineInput"}, + "output":{"shape":"CreatePipelineOutput"}, + "errors":[ + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "DeactivatePipeline":{ + "name":"DeactivatePipeline", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeactivatePipelineInput"}, + "output":{"shape":"DeactivatePipelineOutput"}, + "errors":[ + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"PipelineDeletedException", + "exception":true + }, + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "DeletePipeline":{ + "name":"DeletePipeline", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePipelineInput"}, + "errors":[ + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "DescribeObjects":{ + "name":"DescribeObjects", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeObjectsInput"}, + "output":{"shape":"DescribeObjectsOutput"}, + "errors":[ + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"PipelineDeletedException", + "exception":true + } + ] + }, + "DescribePipelines":{ + "name":"DescribePipelines", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePipelinesInput"}, + "output":{"shape":"DescribePipelinesOutput"}, + "errors":[ + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"PipelineDeletedException", + "exception":true + }, + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "EvaluateExpression":{ + "name":"EvaluateExpression", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EvaluateExpressionInput"}, + "output":{"shape":"EvaluateExpressionOutput"}, + "errors":[ + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"TaskNotFoundException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"PipelineDeletedException", + "exception":true + } + ] + }, + "GetPipelineDefinition":{ + "name":"GetPipelineDefinition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPipelineDefinitionInput"}, + "output":{"shape":"GetPipelineDefinitionOutput"}, + "errors":[ + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"PipelineDeletedException", + "exception":true + } + ] + }, + "ListPipelines":{ + "name":"ListPipelines", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPipelinesInput"}, + "output":{"shape":"ListPipelinesOutput"}, + "errors":[ + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "PollForTask":{ + "name":"PollForTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PollForTaskInput"}, + "output":{"shape":"PollForTaskOutput"}, + "errors":[ + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"TaskNotFoundException", + "exception":true + } + ] + }, + "PutPipelineDefinition":{ + "name":"PutPipelineDefinition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutPipelineDefinitionInput"}, + "output":{"shape":"PutPipelineDefinitionOutput"}, + "errors":[ + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"PipelineDeletedException", + "exception":true + } + ] + }, + "QueryObjects":{ + "name":"QueryObjects", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"QueryObjectsInput"}, + "output":{"shape":"QueryObjectsOutput"}, + "errors":[ + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"PipelineDeletedException", + "exception":true + }, + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "RemoveTags":{ + "name":"RemoveTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsInput"}, + "output":{"shape":"RemoveTagsOutput"}, + "errors":[ + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"PipelineDeletedException", + "exception":true + } + ] + }, + "ReportTaskProgress":{ + "name":"ReportTaskProgress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReportTaskProgressInput"}, + "output":{"shape":"ReportTaskProgressOutput"}, + "errors":[ + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"TaskNotFoundException", + "exception":true + }, + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"PipelineDeletedException", + "exception":true + } + ] + }, + "ReportTaskRunnerHeartbeat":{ + "name":"ReportTaskRunnerHeartbeat", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReportTaskRunnerHeartbeatInput"}, + "output":{"shape":"ReportTaskRunnerHeartbeatOutput"}, + "errors":[ + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "SetStatus":{ + "name":"SetStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetStatusInput"}, + "errors":[ + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"PipelineDeletedException", + "exception":true + }, + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + } + ] + }, + "SetTaskStatus":{ + "name":"SetTaskStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetTaskStatusInput"}, + "output":{"shape":"SetTaskStatusOutput"}, + "errors":[ + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"TaskNotFoundException", + "exception":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"PipelineDeletedException", + "exception":true + } + ] + }, + "ValidatePipelineDefinition":{ + "name":"ValidatePipelineDefinition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ValidatePipelineDefinitionInput"}, + "output":{"shape":"ValidatePipelineDefinitionOutput"}, + "errors":[ + { + "shape":"InternalServiceError", + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "exception":true + }, + { + "shape":"PipelineNotFoundException", + "exception":true + }, + { + "shape":"PipelineDeletedException", + "exception":true + } + ] + } + }, + "shapes":{ + "ActivatePipelineInput":{ + "type":"structure", + "required":["pipelineId"], + "members":{ + "pipelineId":{"shape":"id"}, + "parameterValues":{"shape":"ParameterValueList"}, + "startTimestamp":{"shape":"timestamp"} + } + }, + "ActivatePipelineOutput":{ + "type":"structure", + "members":{ + } + }, + "AddTagsInput":{ + "type":"structure", + "required":[ + "pipelineId", + "tags" + ], + "members":{ + "pipelineId":{"shape":"id"}, + "tags":{"shape":"tagList"} + } + }, + "AddTagsOutput":{ + "type":"structure", + "members":{ + } + }, + "CreatePipelineInput":{ + "type":"structure", + "required":[ + "name", + "uniqueId" + ], + "members":{ + "name":{"shape":"id"}, + "uniqueId":{"shape":"id"}, + "description":{"shape":"string"}, + "tags":{"shape":"tagList"} + } + }, + "CreatePipelineOutput":{ + "type":"structure", + "required":["pipelineId"], + "members":{ + "pipelineId":{"shape":"id"} + } + }, + "DeactivatePipelineInput":{ + "type":"structure", + "required":["pipelineId"], + "members":{ + "pipelineId":{"shape":"id"}, + "cancelActive":{"shape":"cancelActive"} + } + }, + "DeactivatePipelineOutput":{ + "type":"structure", + "members":{ + } + }, + "DeletePipelineInput":{ + "type":"structure", + "required":["pipelineId"], + "members":{ + "pipelineId":{"shape":"id"} + } + }, + "DescribeObjectsInput":{ + "type":"structure", + "required":[ + "pipelineId", + "objectIds" + ], + "members":{ + "pipelineId":{"shape":"id"}, + "objectIds":{"shape":"idList"}, + "evaluateExpressions":{"shape":"boolean"}, + "marker":{"shape":"string"} + } + }, + "DescribeObjectsOutput":{ + "type":"structure", + "required":["pipelineObjects"], + "members":{ + "pipelineObjects":{"shape":"PipelineObjectList"}, + "marker":{"shape":"string"}, + "hasMoreResults":{"shape":"boolean"} + } + }, + "DescribePipelinesInput":{ + "type":"structure", + "required":["pipelineIds"], + "members":{ + "pipelineIds":{"shape":"idList"} + } + }, + "DescribePipelinesOutput":{ + "type":"structure", + "required":["pipelineDescriptionList"], + "members":{ + "pipelineDescriptionList":{"shape":"PipelineDescriptionList"} + } + }, + "EvaluateExpressionInput":{ + "type":"structure", + "required":[ + "pipelineId", + "objectId", + "expression" + ], + "members":{ + "pipelineId":{"shape":"id"}, + "objectId":{"shape":"id"}, + "expression":{"shape":"longString"} + } + }, + "EvaluateExpressionOutput":{ + "type":"structure", + "required":["evaluatedExpression"], + "members":{ + "evaluatedExpression":{"shape":"longString"} + } + }, + "Field":{ + "type":"structure", + "required":["key"], + "members":{ + "key":{"shape":"fieldNameString"}, + "stringValue":{"shape":"fieldStringValue"}, + "refValue":{"shape":"fieldNameString"} + } + }, + "GetPipelineDefinitionInput":{ + "type":"structure", + "required":["pipelineId"], + "members":{ + "pipelineId":{"shape":"id"}, + "version":{"shape":"string"} + } + }, + "GetPipelineDefinitionOutput":{ + "type":"structure", + "members":{ + "pipelineObjects":{"shape":"PipelineObjectList"}, + "parameterObjects":{"shape":"ParameterObjectList"}, + "parameterValues":{"shape":"ParameterValueList"} + } + }, + "InstanceIdentity":{ + "type":"structure", + "members":{ + "document":{"shape":"string"}, + "signature":{"shape":"string"} + } + }, + "InternalServiceError":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true, + "fault":true + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "ListPipelinesInput":{ + "type":"structure", + "members":{ + "marker":{"shape":"string"} + } + }, + "ListPipelinesOutput":{ + "type":"structure", + "required":["pipelineIdList"], + "members":{ + "pipelineIdList":{"shape":"pipelineList"}, + "marker":{"shape":"string"}, + "hasMoreResults":{"shape":"boolean"} + } + }, + "Operator":{ + "type":"structure", + "members":{ + "type":{"shape":"OperatorType"}, + "values":{"shape":"stringList"} + } + }, + "OperatorType":{ + "type":"string", + "enum":[ + "EQ", + "REF_EQ", + "LE", + "GE", + "BETWEEN" + ] + }, + "ParameterAttribute":{ + "type":"structure", + "required":[ + "key", + "stringValue" + ], + "members":{ + "key":{"shape":"attributeNameString"}, + "stringValue":{"shape":"attributeValueString"} + } + }, + "ParameterAttributeList":{ + "type":"list", + "member":{"shape":"ParameterAttribute"} + }, + "ParameterObject":{ + "type":"structure", + "required":[ + "id", + "attributes" + ], + "members":{ + "id":{"shape":"fieldNameString"}, + "attributes":{"shape":"ParameterAttributeList"} + } + }, + "ParameterObjectList":{ + "type":"list", + "member":{"shape":"ParameterObject"} + }, + "ParameterValue":{ + "type":"structure", + "required":[ + "id", + "stringValue" + ], + "members":{ + "id":{"shape":"fieldNameString"}, + "stringValue":{"shape":"fieldStringValue"} + } + }, + "ParameterValueList":{ + "type":"list", + "member":{"shape":"ParameterValue"} + }, + "PipelineDeletedException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "PipelineDescription":{ + "type":"structure", + "required":[ + "pipelineId", + "name", + "fields" + ], + "members":{ + "pipelineId":{"shape":"id"}, + "name":{"shape":"id"}, + "fields":{"shape":"fieldList"}, + "description":{"shape":"string"}, + "tags":{"shape":"tagList"} + } + }, + "PipelineDescriptionList":{ + "type":"list", + "member":{"shape":"PipelineDescription"} + }, + "PipelineIdName":{ + "type":"structure", + "members":{ + "id":{"shape":"id"}, + "name":{"shape":"id"} + } + }, + "PipelineNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "PipelineObject":{ + "type":"structure", + "required":[ + "id", + "name", + "fields" + ], + "members":{ + "id":{"shape":"id"}, + "name":{"shape":"id"}, + "fields":{"shape":"fieldList"} + } + }, + "PipelineObjectList":{ + "type":"list", + "member":{"shape":"PipelineObject"} + }, + "PipelineObjectMap":{ + "type":"map", + "key":{"shape":"id"}, + "value":{"shape":"PipelineObject"} + }, + "PollForTaskInput":{ + "type":"structure", + "required":["workerGroup"], + "members":{ + "workerGroup":{"shape":"string"}, + "hostname":{"shape":"id"}, + "instanceIdentity":{"shape":"InstanceIdentity"} + } + }, + "PollForTaskOutput":{ + "type":"structure", + "members":{ + "taskObject":{"shape":"TaskObject"} + } + }, + "PutPipelineDefinitionInput":{ + "type":"structure", + "required":[ + "pipelineId", + "pipelineObjects" + ], + "members":{ + "pipelineId":{"shape":"id"}, + "pipelineObjects":{"shape":"PipelineObjectList"}, + "parameterObjects":{"shape":"ParameterObjectList"}, + "parameterValues":{"shape":"ParameterValueList"} + } + }, + "PutPipelineDefinitionOutput":{ + "type":"structure", + "required":["errored"], + "members":{ + "validationErrors":{"shape":"ValidationErrors"}, + "validationWarnings":{"shape":"ValidationWarnings"}, + "errored":{"shape":"boolean"} + } + }, + "Query":{ + "type":"structure", + "members":{ + "selectors":{"shape":"SelectorList"} + } + }, + "QueryObjectsInput":{ + "type":"structure", + "required":[ + "pipelineId", + "sphere" + ], + "members":{ + "pipelineId":{"shape":"id"}, + "query":{"shape":"Query"}, + "sphere":{"shape":"string"}, + "marker":{"shape":"string"}, + "limit":{"shape":"int"} + } + }, + "QueryObjectsOutput":{ + "type":"structure", + "members":{ + "ids":{"shape":"idList"}, + "marker":{"shape":"string"}, + "hasMoreResults":{"shape":"boolean"} + } + }, + "RemoveTagsInput":{ + "type":"structure", + "required":[ + "pipelineId", + "tagKeys" + ], + "members":{ + "pipelineId":{"shape":"id"}, + "tagKeys":{"shape":"stringList"} + } + }, + "RemoveTagsOutput":{ + "type":"structure", + "members":{ + } + }, + "ReportTaskProgressInput":{ + "type":"structure", + "required":["taskId"], + "members":{ + "taskId":{"shape":"taskId"}, + "fields":{"shape":"fieldList"} + } + }, + "ReportTaskProgressOutput":{ + "type":"structure", + "required":["canceled"], + "members":{ + "canceled":{"shape":"boolean"} + } + }, + "ReportTaskRunnerHeartbeatInput":{ + "type":"structure", + "required":["taskrunnerId"], + "members":{ + "taskrunnerId":{"shape":"id"}, + "workerGroup":{"shape":"string"}, + "hostname":{"shape":"id"} + } + }, + "ReportTaskRunnerHeartbeatOutput":{ + "type":"structure", + "required":["terminate"], + "members":{ + "terminate":{"shape":"boolean"} + } + }, + "Selector":{ + "type":"structure", + "members":{ + "fieldName":{"shape":"string"}, + "operator":{"shape":"Operator"} + } + }, + "SelectorList":{ + "type":"list", + "member":{"shape":"Selector"} + }, + "SetStatusInput":{ + "type":"structure", + "required":[ + "pipelineId", + "objectIds", + "status" + ], + "members":{ + "pipelineId":{"shape":"id"}, + "objectIds":{"shape":"idList"}, + "status":{"shape":"string"} + } + }, + "SetTaskStatusInput":{ + "type":"structure", + "required":[ + "taskId", + "taskStatus" + ], + "members":{ + "taskId":{"shape":"taskId"}, + "taskStatus":{"shape":"TaskStatus"}, + "errorId":{"shape":"string"}, + "errorMessage":{"shape":"errorMessage"}, + "errorStackTrace":{"shape":"string"} + } + }, + "SetTaskStatusOutput":{ + "type":"structure", + "members":{ + } + }, + "Tag":{ + "type":"structure", + "required":[ + "key", + "value" + ], + "members":{ + "key":{"shape":"tagKey"}, + "value":{"shape":"tagValue"} + } + }, + "TaskNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "TaskObject":{ + "type":"structure", + "members":{ + "taskId":{"shape":"taskId"}, + "pipelineId":{"shape":"id"}, + "attemptId":{"shape":"id"}, + "objects":{"shape":"PipelineObjectMap"} + } + }, + "TaskStatus":{ + "type":"string", + "enum":[ + "FINISHED", + "FAILED", + "FALSE" + ] + }, + "ValidatePipelineDefinitionInput":{ + "type":"structure", + "required":[ + "pipelineId", + "pipelineObjects" + ], + "members":{ + "pipelineId":{"shape":"id"}, + "pipelineObjects":{"shape":"PipelineObjectList"}, + "parameterObjects":{"shape":"ParameterObjectList"}, + "parameterValues":{"shape":"ParameterValueList"} + } + }, + "ValidatePipelineDefinitionOutput":{ + "type":"structure", + "required":["errored"], + "members":{ + "validationErrors":{"shape":"ValidationErrors"}, + "validationWarnings":{"shape":"ValidationWarnings"}, + "errored":{"shape":"boolean"} + } + }, + "ValidationError":{ + "type":"structure", + "members":{ + "id":{"shape":"id"}, + "errors":{"shape":"validationMessages"} + } + }, + "ValidationErrors":{ + "type":"list", + "member":{"shape":"ValidationError"} + }, + "ValidationWarning":{ + "type":"structure", + "members":{ + "id":{"shape":"id"}, + "warnings":{"shape":"validationMessages"} + } + }, + "ValidationWarnings":{ + "type":"list", + "member":{"shape":"ValidationWarning"} + }, + "attributeNameString":{ + "type":"string", + "min":1, + "max":256, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "attributeValueString":{ + "type":"string", + "min":0, + "max":10240, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "boolean":{"type":"boolean"}, + "cancelActive":{"type":"boolean"}, + "errorMessage":{"type":"string"}, + "fieldList":{ + "type":"list", + "member":{"shape":"Field"} + }, + "fieldNameString":{ + "type":"string", + "min":1, + "max":256, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "fieldStringValue":{ + "type":"string", + "min":0, + "max":10240, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "id":{ + "type":"string", + "min":1, + "max":1024, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "idList":{ + "type":"list", + "member":{"shape":"id"} + }, + "int":{"type":"integer"}, + "longString":{ + "type":"string", + "min":0, + "max":20971520, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "pipelineList":{ + "type":"list", + "member":{"shape":"PipelineIdName"} + }, + "string":{ + "type":"string", + "min":0, + "max":1024, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "stringList":{ + "type":"list", + "member":{"shape":"string"} + }, + "tagKey":{ + "type":"string", + "min":1, + "max":128 + }, + "tagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "min":0, + "max":10 + }, + "tagValue":{ + "type":"string", + "min":0, + "max":256 + }, + "taskId":{ + "type":"string", + "min":1, + "max":2048, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "timestamp":{"type":"timestamp"}, + "validationMessage":{ + "type":"string", + "min":0, + "max":10000, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "validationMessages":{ + "type":"list", + "member":{"shape":"validationMessage"} + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/datapipeline/2012-10-29/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/datapipeline/2012-10-29/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/datapipeline/2012-10-29/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/datapipeline/2012-10-29/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,607 @@ +{ + "version": "2.0", + "operations": { + "ActivatePipeline": "

    Validates the specified pipeline and starts processing pipeline tasks. If the pipeline does not pass validation, activation fails.

    If you need to pause the pipeline to investigate an issue with a component, such as a data source or script, call DeactivatePipeline.

    To activate a finished pipeline, modify the end date for the pipeline and then activate it.

    ", + "AddTags": "

    Adds or modifies tags for the specified pipeline.

    ", + "CreatePipeline": "

    Creates a new, empty pipeline. Use PutPipelineDefinition to populate the pipeline.

    ", + "DeactivatePipeline": "

    Deactivates the specified running pipeline. The pipeline is set to the DEACTIVATING state until the deactivation process completes.

    To resume a deactivated pipeline, use ActivatePipeline. By default, the pipeline resumes from the last completed execution. Optionally, you can specify the date and time to resume the pipeline.

    ", + "DeletePipeline": "

    Deletes a pipeline, its pipeline definition, and its run history. AWS Data Pipeline attempts to cancel instances associated with the pipeline that are currently being processed by task runners.

    Deleting a pipeline cannot be undone. You cannot query or restore a deleted pipeline. To temporarily pause a pipeline instead of deleting it, call SetStatus with the status set to PAUSE on individual components. Components that are paused by SetStatus can be resumed.

    ", + "DescribeObjects": "

    Gets the object definitions for a set of objects associated with the pipeline. Object definitions are composed of a set of fields that define the properties of the object.

    ", + "DescribePipelines": "

    Retrieves metadata about one or more pipelines. The information retrieved includes the name of the pipeline, the pipeline identifier, its current state, and the user account that owns the pipeline. Using account credentials, you can retrieve metadata about pipelines that you or your IAM users have created. If you are using an IAM user account, you can retrieve metadata about only those pipelines for which you have read permissions.

    To retrieve the full pipeline definition instead of metadata about the pipeline, call GetPipelineDefinition.

    ", + "EvaluateExpression": "

    Task runners call EvaluateExpression to evaluate a string in the context of the specified object. For example, a task runner can evaluate SQL queries stored in Amazon S3.

    ", + "GetPipelineDefinition": "

    Gets the definition of the specified pipeline. You can call GetPipelineDefinition to retrieve the pipeline definition that you provided using PutPipelineDefinition.

    ", + "ListPipelines": "

    Lists the pipeline identifiers for all active pipelines that you have permission to access.

    ", + "PollForTask": "

    Task runners call PollForTask to receive a task to perform from AWS Data Pipeline. The task runner specifies which tasks it can perform by setting a value for the workerGroup parameter. The task returned can come from any of the pipelines that match the workerGroup value passed in by the task runner and that was launched using the IAM user credentials specified by the task runner.

    If tasks are ready in the work queue, PollForTask returns a response immediately. If no tasks are available in the queue, PollForTask uses long-polling and holds on to a poll connection for up to a 90 seconds, during which time the first newly scheduled task is handed to the task runner. To accomodate this, set the socket timeout in your task runner to 90 seconds. The task runner should not call PollForTask again on the same workerGroup until it receives a response, and this can take up to 90 seconds.

    ", + "PutPipelineDefinition": "

    Adds tasks, schedules, and preconditions to the specified pipeline. You can use PutPipelineDefinition to populate a new pipeline.

    PutPipelineDefinition also validates the configuration as it adds it to the pipeline. Changes to the pipeline are saved unless one of the following three validation errors exists in the pipeline.

    1. An object is missing a name or identifier field.
    2. A string or reference field is empty.
    3. The number of objects in the pipeline exceeds the maximum allowed objects.
    4. The pipeline is in a FINISHED state.

    Pipeline object definitions are passed to the PutPipelineDefinition action and returned by the GetPipelineDefinition action.

    ", + "QueryObjects": "

    Queries the specified pipeline for the names of objects that match the specified set of conditions.

    ", + "RemoveTags": "

    Removes existing tags from the specified pipeline.

    ", + "ReportTaskProgress": "

    Task runners call ReportTaskProgress when assigned a task to acknowledge that it has the task. If the web service does not receive this acknowledgement within 2 minutes, it assigns the task in a subsequent PollForTask call. After this initial acknowledgement, the task runner only needs to report progress every 15 minutes to maintain its ownership of the task. You can change this reporting time from 15 minutes by specifying a reportProgressTimeout field in your pipeline.

    If a task runner does not report its status after 5 minutes, AWS Data Pipeline assumes that the task runner is unable to process the task and reassigns the task in a subsequent response to PollForTask. Task runners should call ReportTaskProgress every 60 seconds.

    ", + "ReportTaskRunnerHeartbeat": "

    Task runners call ReportTaskRunnerHeartbeat every 15 minutes to indicate that they are operational. If the AWS Data Pipeline Task Runner is launched on a resource managed by AWS Data Pipeline, the web service can use this call to detect when the task runner application has failed and restart a new instance.

    ", + "SetStatus": "

    Requests that the status of the specified physical or logical pipeline objects be updated in the specified pipeline. This update might not occur immediately, but is eventually consistent. The status that can be set depends on the type of object (for example, DataNode or Activity). You cannot perform this operation on FINISHED pipelines and attempting to do so returns InvalidRequestException.

    ", + "SetTaskStatus": "

    Task runners call SetTaskStatus to notify AWS Data Pipeline that a task is completed and provide information about the final status. A task runner makes this call regardless of whether the task was sucessful. A task runner does not need to call SetTaskStatus for tasks that are canceled by the web service during a call to ReportTaskProgress.

    ", + "ValidatePipelineDefinition": "

    Validates the specified pipeline definition to ensure that it is well formed and can be run without error.

    " + }, + "service": "

    AWS Data Pipeline configures and manages a data-driven workflow called a pipeline. AWS Data Pipeline handles the details of scheduling and ensuring that data dependencies are met so that your application can focus on processing the data.

    AWS Data Pipeline provides a JAR implementation of a task runner called AWS Data Pipeline Task Runner. AWS Data Pipeline Task Runner provides logic for common data management scenarios, such as performing database queries and running data analysis using Amazon Elastic MapReduce (Amazon EMR). You can use AWS Data Pipeline Task Runner as your task runner, or you can write your own task runner to provide custom data management.

    AWS Data Pipeline implements two main sets of functionality. Use the first set to create a pipeline and define data sources, schedules, dependencies, and the transforms to be performed on the data. Use the second set in your task runner application to receive the next task ready for processing. The logic for performing the task, such as querying the data, running data analysis, or converting the data from one format to another, is contained within the task runner. The task runner performs the task assigned to it by the web service, reporting progress to the web service as it does so. When the task is done, the task runner reports the final success or failure of the task to the web service.

    ", + "shapes": { + "ActivatePipelineInput": { + "base": "

    Contains the parameters for ActivatePipeline.

    ", + "refs": { + } + }, + "ActivatePipelineOutput": { + "base": "

    Contains the output of ActivatePipeline.

    ", + "refs": { + } + }, + "AddTagsInput": { + "base": "

    Contains the parameters for AddTags.

    ", + "refs": { + } + }, + "AddTagsOutput": { + "base": "

    Contains the output of AddTags.

    ", + "refs": { + } + }, + "CreatePipelineInput": { + "base": "

    Contains the parameters for CreatePipeline.

    ", + "refs": { + } + }, + "CreatePipelineOutput": { + "base": "

    Contains the output of CreatePipeline.

    ", + "refs": { + } + }, + "DeactivatePipelineInput": { + "base": "

    Contains the parameters for DeactivatePipeline.

    ", + "refs": { + } + }, + "DeactivatePipelineOutput": { + "base": "

    Contains the output of DeactivatePipeline.

    ", + "refs": { + } + }, + "DeletePipelineInput": { + "base": "

    Contains the parameters for DeletePipeline.

    ", + "refs": { + } + }, + "DescribeObjectsInput": { + "base": "

    Contains the parameters for DescribeObjects.

    ", + "refs": { + } + }, + "DescribeObjectsOutput": { + "base": "

    Contains the output of DescribeObjects.

    ", + "refs": { + } + }, + "DescribePipelinesInput": { + "base": "

    Contains the parameters for DescribePipelines.

    ", + "refs": { + } + }, + "DescribePipelinesOutput": { + "base": "

    Contains the output of DescribePipelines.

    ", + "refs": { + } + }, + "EvaluateExpressionInput": { + "base": "

    Contains the parameters for EvaluateExpression.

    ", + "refs": { + } + }, + "EvaluateExpressionOutput": { + "base": "

    Contains the output of EvaluateExpression.

    ", + "refs": { + } + }, + "Field": { + "base": "

    A key-value pair that describes a property of a pipeline object. The value is specified as either a string value (StringValue) or a reference to another object (RefValue) but not as both.

    ", + "refs": { + "fieldList$member": null + } + }, + "GetPipelineDefinitionInput": { + "base": "

    Contains the parameters for GetPipelineDefinition.

    ", + "refs": { + } + }, + "GetPipelineDefinitionOutput": { + "base": "

    Contains the output of GetPipelineDefinition.

    ", + "refs": { + } + }, + "InstanceIdentity": { + "base": "

    Identity information for the EC2 instance that is hosting the task runner. You can get this value by calling a metadata URI from the EC2 instance. For more information, see Instance Metadata in the Amazon Elastic Compute Cloud User Guide. Passing in this value proves that your task runner is running on an EC2 instance, and ensures the proper AWS Data Pipeline service charges are applied to your pipeline.

    ", + "refs": { + "PollForTaskInput$instanceIdentity": "

    Identity information for the EC2 instance that is hosting the task runner. You can get this value from the instance using http://169.254.169.254/latest/meta-data/instance-id. For more information, see Instance Metadata in the Amazon Elastic Compute Cloud User Guide. Passing in this value proves that your task runner is running on an EC2 instance, and ensures the proper AWS Data Pipeline service charges are applied to your pipeline.

    " + } + }, + "InternalServiceError": { + "base": "

    An internal service error occurred.

    ", + "refs": { + } + }, + "InvalidRequestException": { + "base": "

    The request was not valid. Verify that your request was properly formatted, that the signature was generated with the correct credentials, and that you haven't exceeded any of the service limits for your account.

    ", + "refs": { + } + }, + "ListPipelinesInput": { + "base": "

    Contains the parameters for ListPipelines.

    ", + "refs": { + } + }, + "ListPipelinesOutput": { + "base": "

    Contains the output of ListPipelines.

    ", + "refs": { + } + }, + "Operator": { + "base": "

    Contains a logical operation for comparing the value of a field with a specified value.

    ", + "refs": { + "Selector$operator": null + } + }, + "OperatorType": { + "base": null, + "refs": { + "Operator$type": "

    The logical operation to be performed: equal (EQ), equal reference (REF_EQ), less than or equal (LE), greater than or equal (GE), or between (BETWEEN). Equal reference (REF_EQ) can be used only with reference fields. The other comparison types can be used only with String fields. The comparison types you can use apply only to certain object fields, as detailed below.

    The comparison operators EQ and REF_EQ act on the following fields:

    • name
    • @sphere
    • parent
    • @componentParent
    • @instanceParent
    • @status
    • @scheduledStartTime
    • @scheduledEndTime
    • @actualStartTime
    • @actualEndTime

    The comparison operators GE, LE, and BETWEEN act on the following fields:

    • @scheduledStartTime
    • @scheduledEndTime
    • @actualStartTime
    • @actualEndTime

    Note that fields beginning with the at sign (@) are read-only and set by the web service. When you name fields, you should choose names containing only alpha-numeric values, as symbols may be reserved by AWS Data Pipeline. User-defined fields that you add to a pipeline should prefix their name with the string \"my\".

    " + } + }, + "ParameterAttribute": { + "base": "

    The attributes allowed or specified with a parameter object.

    ", + "refs": { + "ParameterAttributeList$member": null + } + }, + "ParameterAttributeList": { + "base": null, + "refs": { + "ParameterObject$attributes": "

    The attributes of the parameter object.

    " + } + }, + "ParameterObject": { + "base": "

    Contains information about a parameter object.

    ", + "refs": { + "ParameterObjectList$member": null + } + }, + "ParameterObjectList": { + "base": null, + "refs": { + "GetPipelineDefinitionOutput$parameterObjects": "

    The parameter objects used in the pipeline definition.

    ", + "PutPipelineDefinitionInput$parameterObjects": "

    The parameter objects used with the pipeline.

    ", + "ValidatePipelineDefinitionInput$parameterObjects": "

    The parameter objects used with the pipeline.

    " + } + }, + "ParameterValue": { + "base": "

    A value or list of parameter values.

    ", + "refs": { + "ParameterValueList$member": null + } + }, + "ParameterValueList": { + "base": null, + "refs": { + "ActivatePipelineInput$parameterValues": "

    A list of parameter values to pass to the pipeline at activation.

    ", + "GetPipelineDefinitionOutput$parameterValues": "

    The parameter values used in the pipeline definition.

    ", + "PutPipelineDefinitionInput$parameterValues": "

    The parameter values used with the pipeline.

    ", + "ValidatePipelineDefinitionInput$parameterValues": "

    The parameter values used with the pipeline.

    " + } + }, + "PipelineDeletedException": { + "base": "

    The specified pipeline has been deleted.

    ", + "refs": { + } + }, + "PipelineDescription": { + "base": "

    Contains pipeline metadata.

    ", + "refs": { + "PipelineDescriptionList$member": null + } + }, + "PipelineDescriptionList": { + "base": null, + "refs": { + "DescribePipelinesOutput$pipelineDescriptionList": "

    An array of descriptions for the specified pipelines.

    " + } + }, + "PipelineIdName": { + "base": "

    Contains the name and identifier of a pipeline.

    ", + "refs": { + "pipelineList$member": null + } + }, + "PipelineNotFoundException": { + "base": "

    The specified pipeline was not found. Verify that you used the correct user and account identifiers.

    ", + "refs": { + } + }, + "PipelineObject": { + "base": "

    Contains information about a pipeline object. This can be a logical, physical, or physical attempt pipeline object. The complete set of components of a pipeline defines the pipeline.

    ", + "refs": { + "PipelineObjectList$member": null, + "PipelineObjectMap$value": null + } + }, + "PipelineObjectList": { + "base": null, + "refs": { + "DescribeObjectsOutput$pipelineObjects": "

    An array of object definitions.

    ", + "GetPipelineDefinitionOutput$pipelineObjects": "

    The objects defined in the pipeline.

    ", + "PutPipelineDefinitionInput$pipelineObjects": "

    The objects that define the pipeline. These objects overwrite the existing pipeline definition.

    ", + "ValidatePipelineDefinitionInput$pipelineObjects": "

    The objects that define the pipeline changes to validate against the pipeline.

    " + } + }, + "PipelineObjectMap": { + "base": null, + "refs": { + "TaskObject$objects": "

    Connection information for the location where the task runner will publish the output of the task.

    " + } + }, + "PollForTaskInput": { + "base": "

    Contains the parameters for PollForTask.

    ", + "refs": { + } + }, + "PollForTaskOutput": { + "base": "

    Contains the output of PollForTask.

    ", + "refs": { + } + }, + "PutPipelineDefinitionInput": { + "base": "

    Contains the parameters for PutPipelineDefinition.

    ", + "refs": { + } + }, + "PutPipelineDefinitionOutput": { + "base": "

    Contains the output of PutPipelineDefinition.

    ", + "refs": { + } + }, + "Query": { + "base": "

    Defines the query to run against an object.

    ", + "refs": { + "QueryObjectsInput$query": "

    The query that defines the objects to be returned. The Query object can contain a maximum of ten selectors. The conditions in the query are limited to top-level String fields in the object. These filters can be applied to components, instances, and attempts.

    " + } + }, + "QueryObjectsInput": { + "base": "

    Contains the parameters for QueryObjects.

    ", + "refs": { + } + }, + "QueryObjectsOutput": { + "base": "

    Contains the output of QueryObjects.

    ", + "refs": { + } + }, + "RemoveTagsInput": { + "base": "

    Contains the parameters for RemoveTags.

    ", + "refs": { + } + }, + "RemoveTagsOutput": { + "base": "

    Contains the output of RemoveTags.

    ", + "refs": { + } + }, + "ReportTaskProgressInput": { + "base": "

    Contains the parameters for ReportTaskProgress.

    ", + "refs": { + } + }, + "ReportTaskProgressOutput": { + "base": "

    Contains the output of ReportTaskProgress.

    ", + "refs": { + } + }, + "ReportTaskRunnerHeartbeatInput": { + "base": "

    Contains the parameters for ReportTaskRunnerHeartbeat.

    ", + "refs": { + } + }, + "ReportTaskRunnerHeartbeatOutput": { + "base": "

    Contains the output of ReportTaskRunnerHeartbeat.

    ", + "refs": { + } + }, + "Selector": { + "base": "

    A comparision that is used to determine whether a query should return this object.

    ", + "refs": { + "SelectorList$member": null + } + }, + "SelectorList": { + "base": "

    The list of Selectors that define queries on individual fields.

    ", + "refs": { + "Query$selectors": "

    List of selectors that define the query. An object must satisfy all of the selectors to match the query.

    " + } + }, + "SetStatusInput": { + "base": "

    Contains the parameters for SetStatus.

    ", + "refs": { + } + }, + "SetTaskStatusInput": { + "base": "

    Contains the parameters for SetTaskStatus.

    ", + "refs": { + } + }, + "SetTaskStatusOutput": { + "base": "

    Contains the output of SetTaskStatus.

    ", + "refs": { + } + }, + "Tag": { + "base": "

    Tags are key/value pairs defined by a user and associated with a pipeline to control access. AWS Data Pipeline allows you to associate ten tags per pipeline. For more information, see Controlling User Access to Pipelines in the AWS Data Pipeline Developer Guide.

    ", + "refs": { + "tagList$member": null + } + }, + "TaskNotFoundException": { + "base": "

    The specified task was not found.

    ", + "refs": { + } + }, + "TaskObject": { + "base": "

    Contains information about a pipeline task that is assigned to a task runner.

    ", + "refs": { + "PollForTaskOutput$taskObject": "

    The information needed to complete the task that is being assigned to the task runner. One of the fields returned in this object is taskId, which contains an identifier for the task being assigned. The calling task runner uses taskId in subsequent calls to ReportTaskProgress and SetTaskStatus.

    " + } + }, + "TaskStatus": { + "base": null, + "refs": { + "SetTaskStatusInput$taskStatus": "

    If FINISHED, the task successfully completed. If FAILED, the task ended unsuccessfully. Preconditions use false.

    " + } + }, + "ValidatePipelineDefinitionInput": { + "base": "

    Contains the parameters for ValidatePipelineDefinition.

    ", + "refs": { + } + }, + "ValidatePipelineDefinitionOutput": { + "base": "

    Contains the output of ValidatePipelineDefinition.

    ", + "refs": { + } + }, + "ValidationError": { + "base": "

    Defines a validation error. Validation errors prevent pipeline activation. The set of validation errors that can be returned are defined by AWS Data Pipeline.

    ", + "refs": { + "ValidationErrors$member": null + } + }, + "ValidationErrors": { + "base": null, + "refs": { + "PutPipelineDefinitionOutput$validationErrors": "

    The validation errors that are associated with the objects defined in pipelineObjects.

    ", + "ValidatePipelineDefinitionOutput$validationErrors": "

    Any validation errors that were found.

    " + } + }, + "ValidationWarning": { + "base": "

    Defines a validation warning. Validation warnings do not prevent pipeline activation. The set of validation warnings that can be returned are defined by AWS Data Pipeline.

    ", + "refs": { + "ValidationWarnings$member": null + } + }, + "ValidationWarnings": { + "base": null, + "refs": { + "PutPipelineDefinitionOutput$validationWarnings": "

    The validation warnings that are associated with the objects defined in pipelineObjects.

    ", + "ValidatePipelineDefinitionOutput$validationWarnings": "

    Any validation warnings that were found.

    " + } + }, + "attributeNameString": { + "base": null, + "refs": { + "ParameterAttribute$key": "

    The field identifier.

    " + } + }, + "attributeValueString": { + "base": null, + "refs": { + "ParameterAttribute$stringValue": "

    The field value, expressed as a String.

    " + } + }, + "boolean": { + "base": null, + "refs": { + "DescribeObjectsInput$evaluateExpressions": "

    Indicates whether any expressions in the object should be evaluated when the object descriptions are returned.

    ", + "DescribeObjectsOutput$hasMoreResults": "

    Indicates whether there are more results to return.

    ", + "ListPipelinesOutput$hasMoreResults": "

    Indicates whether there are more results that can be obtained by a subsequent call.

    ", + "PutPipelineDefinitionOutput$errored": "

    Indicates whether there were validation errors, and the pipeline definition is stored but cannot be activated until you correct the pipeline and call PutPipelineDefinition to commit the corrected pipeline.

    ", + "QueryObjectsOutput$hasMoreResults": "

    Indicates whether there are more results that can be obtained by a subsequent call.

    ", + "ReportTaskProgressOutput$canceled": "

    If true, the calling task runner should cancel processing of the task. The task runner does not need to call SetTaskStatus for canceled tasks.

    ", + "ReportTaskRunnerHeartbeatOutput$terminate": "

    Indicates whether the calling task runner should terminate.

    ", + "ValidatePipelineDefinitionOutput$errored": "

    Indicates whether there were validation errors.

    " + } + }, + "cancelActive": { + "base": null, + "refs": { + "DeactivatePipelineInput$cancelActive": "

    Indicates whether to cancel any running objects. The default is true, which sets the state of any running objects to CANCELED. If this value is false, the pipeline is deactivated after all running objects finish.

    " + } + }, + "errorMessage": { + "base": null, + "refs": { + "InternalServiceError$message": "

    Description of the error message.

    ", + "InvalidRequestException$message": "

    Description of the error message.

    ", + "PipelineDeletedException$message": "

    Description of the error message.

    ", + "PipelineNotFoundException$message": "

    Description of the error message.

    ", + "SetTaskStatusInput$errorMessage": "

    If an error occurred during the task, this value specifies a text description of the error. This value is set on the physical attempt object. It is used to display error information to the user. The web service does not parse this value.

    ", + "TaskNotFoundException$message": "

    Description of the error message.

    " + } + }, + "fieldList": { + "base": null, + "refs": { + "PipelineDescription$fields": "

    A list of read-only fields that contain metadata about the pipeline: @userId, @accountId, and @pipelineState.

    ", + "PipelineObject$fields": "

    Key-value pairs that define the properties of the object.

    ", + "ReportTaskProgressInput$fields": "

    Key-value pairs that define the properties of the ReportTaskProgressInput object.

    " + } + }, + "fieldNameString": { + "base": null, + "refs": { + "Field$key": "

    The field identifier.

    ", + "Field$refValue": "

    The field value, expressed as the identifier of another object.

    ", + "ParameterObject$id": "

    The ID of the parameter object.

    ", + "ParameterValue$id": "

    The ID of the parameter value.

    " + } + }, + "fieldStringValue": { + "base": null, + "refs": { + "Field$stringValue": "

    The field value, expressed as a String.

    ", + "ParameterValue$stringValue": "

    The field value, expressed as a String.

    " + } + }, + "id": { + "base": null, + "refs": { + "ActivatePipelineInput$pipelineId": "

    The ID of the pipeline.

    ", + "AddTagsInput$pipelineId": "

    The ID of the pipeline.

    ", + "CreatePipelineInput$name": "

    The name for the pipeline. You can use the same name for multiple pipelines associated with your AWS account, because AWS Data Pipeline assigns each pipeline a unique pipeline identifier.

    ", + "CreatePipelineInput$uniqueId": "

    A unique identifier. This identifier is not the same as the pipeline identifier assigned by AWS Data Pipeline. You are responsible for defining the format and ensuring the uniqueness of this identifier. You use this parameter to ensure idempotency during repeated calls to CreatePipeline. For example, if the first call to CreatePipeline does not succeed, you can pass in the same unique identifier and pipeline name combination on a subsequent call to CreatePipeline. CreatePipeline ensures that if a pipeline already exists with the same name and unique identifier, a new pipeline is not created. Instead, you'll receive the pipeline identifier from the previous attempt. The uniqueness of the name and unique identifier combination is scoped to the AWS account or IAM user credentials.

    ", + "CreatePipelineOutput$pipelineId": "

    The ID that AWS Data Pipeline assigns the newly created pipeline. For example, df-06372391ZG65EXAMPLE.

    ", + "DeactivatePipelineInput$pipelineId": "

    The ID of the pipeline.

    ", + "DeletePipelineInput$pipelineId": "

    The ID of the pipeline.

    ", + "DescribeObjectsInput$pipelineId": "

    The ID of the pipeline that contains the object definitions.

    ", + "EvaluateExpressionInput$pipelineId": "

    The ID of the pipeline.

    ", + "EvaluateExpressionInput$objectId": "

    The ID of the object.

    ", + "GetPipelineDefinitionInput$pipelineId": "

    The ID of the pipeline.

    ", + "PipelineDescription$pipelineId": "

    The pipeline identifier that was assigned by AWS Data Pipeline. This is a string of the form df-297EG78HU43EEXAMPLE.

    ", + "PipelineDescription$name": "

    The name of the pipeline.

    ", + "PipelineIdName$id": "

    The ID of the pipeline that was assigned by AWS Data Pipeline. This is a string of the form df-297EG78HU43EEXAMPLE.

    ", + "PipelineIdName$name": "

    The name of the pipeline.

    ", + "PipelineObject$id": "

    The ID of the object.

    ", + "PipelineObject$name": "

    The name of the object.

    ", + "PipelineObjectMap$key": null, + "PollForTaskInput$hostname": "

    The public DNS name of the calling task runner.

    ", + "PutPipelineDefinitionInput$pipelineId": "

    The ID of the pipeline.

    ", + "QueryObjectsInput$pipelineId": "

    The ID of the pipeline.

    ", + "RemoveTagsInput$pipelineId": "

    The ID of the pipeline.

    ", + "ReportTaskRunnerHeartbeatInput$taskrunnerId": "

    The ID of the task runner. This value should be unique across your AWS account. In the case of AWS Data Pipeline Task Runner launched on a resource managed by AWS Data Pipeline, the web service provides a unique identifier when it launches the application. If you have written a custom task runner, you should assign a unique identifier for the task runner.

    ", + "ReportTaskRunnerHeartbeatInput$hostname": "

    The public DNS name of the task runner.

    ", + "SetStatusInput$pipelineId": "

    The ID of the pipeline that contains the objects.

    ", + "TaskObject$pipelineId": "

    The ID of the pipeline that provided the task.

    ", + "TaskObject$attemptId": "

    The ID of the pipeline task attempt object. AWS Data Pipeline uses this value to track how many times a task is attempted.

    ", + "ValidatePipelineDefinitionInput$pipelineId": "

    The ID of the pipeline.

    ", + "ValidationError$id": "

    The identifier of the object that contains the validation error.

    ", + "ValidationWarning$id": "

    The identifier of the object that contains the validation warning.

    ", + "idList$member": null + } + }, + "idList": { + "base": null, + "refs": { + "DescribeObjectsInput$objectIds": "

    The IDs of the pipeline objects that contain the definitions to be described. You can pass as many as 25 identifiers in a single call to DescribeObjects.

    ", + "DescribePipelinesInput$pipelineIds": "

    The IDs of the pipelines to describe. You can pass as many as 25 identifiers in a single call. To obtain pipeline IDs, call ListPipelines.

    ", + "QueryObjectsOutput$ids": "

    The identifiers that match the query selectors.

    ", + "SetStatusInput$objectIds": "

    The IDs of the objects. The corresponding objects can be either physical or components, but not a mix of both types.

    " + } + }, + "int": { + "base": null, + "refs": { + "QueryObjectsInput$limit": "

    The maximum number of object names that QueryObjects will return in a single call. The default value is 100.

    " + } + }, + "longString": { + "base": null, + "refs": { + "EvaluateExpressionInput$expression": "

    The expression to evaluate.

    ", + "EvaluateExpressionOutput$evaluatedExpression": "

    The evaluated expression.

    " + } + }, + "pipelineList": { + "base": null, + "refs": { + "ListPipelinesOutput$pipelineIdList": "

    The pipeline identifiers. If you require additional information about the pipelines, you can use these identifiers to call DescribePipelines and GetPipelineDefinition.

    " + } + }, + "string": { + "base": null, + "refs": { + "CreatePipelineInput$description": "

    The description for the pipeline.

    ", + "DescribeObjectsInput$marker": "

    The starting point for the results to be returned. For the first call, this value should be empty. As long as there are more results, continue to call DescribeObjects with the marker value from the previous call to retrieve the next set of results.

    ", + "DescribeObjectsOutput$marker": "

    The starting point for the next page of results. To view the next page of results, call DescribeObjects again with this marker value. If the value is null, there are no more results.

    ", + "GetPipelineDefinitionInput$version": "

    The version of the pipeline definition to retrieve. Set this parameter to latest (default) to use the last definition saved to the pipeline or active to use the last definition that was activated.

    ", + "InstanceIdentity$document": "

    A description of an EC2 instance that is generated when the instance is launched and exposed to the instance via the instance metadata service in the form of a JSON representation of an object.

    ", + "InstanceIdentity$signature": "

    A signature which can be used to verify the accuracy and authenticity of the information provided in the instance identity document.

    ", + "ListPipelinesInput$marker": "

    The starting point for the results to be returned. For the first call, this value should be empty. As long as there are more results, continue to call ListPipelines with the marker value from the previous call to retrieve the next set of results.

    ", + "ListPipelinesOutput$marker": "

    The starting point for the next page of results. To view the next page of results, call ListPipelinesOutput again with this marker value. If the value is null, there are no more results.

    ", + "PipelineDescription$description": "

    Description of the pipeline.

    ", + "PollForTaskInput$workerGroup": "

    The type of task the task runner is configured to accept and process. The worker group is set as a field on objects in the pipeline when they are created. You can only specify a single value for workerGroup in the call to PollForTask. There are no wildcard values permitted in workerGroup; the string must be an exact, case-sensitive, match.

    ", + "QueryObjectsInput$sphere": "

    Indicates whether the query applies to components or instances. The possible values are: COMPONENT, INSTANCE, and ATTEMPT.

    ", + "QueryObjectsInput$marker": "

    The starting point for the results to be returned. For the first call, this value should be empty. As long as there are more results, continue to call QueryObjects with the marker value from the previous call to retrieve the next set of results.

    ", + "QueryObjectsOutput$marker": "

    The starting point for the next page of results. To view the next page of results, call QueryObjects again with this marker value. If the value is null, there are no more results.

    ", + "ReportTaskRunnerHeartbeatInput$workerGroup": "

    The type of task the task runner is configured to accept and process. The worker group is set as a field on objects in the pipeline when they are created. You can only specify a single value for workerGroup. There are no wildcard values permitted in workerGroup; the string must be an exact, case-sensitive, match.

    ", + "Selector$fieldName": "

    The name of the field that the operator will be applied to. The field name is the \"key\" portion of the field definition in the pipeline definition syntax that is used by the AWS Data Pipeline API. If the field is not set on the object, the condition fails.

    ", + "SetStatusInput$status": "

    The status to be set on all the objects specified in objectIds. For components, use PAUSE or RESUME. For instances, use TRY_CANCEL, RERUN, or MARK_FINISHED.

    ", + "SetTaskStatusInput$errorId": "

    If an error occurred during the task, this value specifies the error code. This value is set on the physical attempt object. It is used to display error information to the user. It should not start with string \"Service_\" which is reserved by the system.

    ", + "SetTaskStatusInput$errorStackTrace": "

    If an error occurred during the task, this value specifies the stack trace associated with the error. This value is set on the physical attempt object. It is used to display error information to the user. The web service does not parse this value.

    ", + "stringList$member": null + } + }, + "stringList": { + "base": null, + "refs": { + "Operator$values": "

    The value that the actual field value will be compared with.

    ", + "RemoveTagsInput$tagKeys": "

    The keys of the tags to remove.

    " + } + }, + "tagKey": { + "base": null, + "refs": { + "Tag$key": "

    The key name of a tag defined by a user. For more information, see Controlling User Access to Pipelines in the AWS Data Pipeline Developer Guide.

    " + } + }, + "tagList": { + "base": null, + "refs": { + "AddTagsInput$tags": "

    The tags to add, as key/value pairs.

    ", + "CreatePipelineInput$tags": "

    A list of tags to associate with the pipeline at creation. Tags let you control access to pipelines. For more information, see Controlling User Access to Pipelines in the AWS Data Pipeline Developer Guide.

    ", + "PipelineDescription$tags": "

    A list of tags to associated with a pipeline. Tags let you control access to pipelines. For more information, see Controlling User Access to Pipelines in the AWS Data Pipeline Developer Guide.

    " + } + }, + "tagValue": { + "base": null, + "refs": { + "Tag$value": "

    The optional value portion of a tag defined by a user. For more information, see Controlling User Access to Pipelines in the AWS Data Pipeline Developer Guide.

    " + } + }, + "taskId": { + "base": null, + "refs": { + "ReportTaskProgressInput$taskId": "

    The ID of the task assigned to the task runner. This value is provided in the response for PollForTask.

    ", + "SetTaskStatusInput$taskId": "

    The ID of the task assigned to the task runner. This value is provided in the response for PollForTask.

    ", + "TaskObject$taskId": "

    An internal identifier for the task. This ID is passed to the SetTaskStatus and ReportTaskProgress actions.

    " + } + }, + "timestamp": { + "base": null, + "refs": { + "ActivatePipelineInput$startTimestamp": "

    The date and time to resume the pipeline. By default, the pipeline resumes from the last completed execution.

    " + } + }, + "validationMessage": { + "base": null, + "refs": { + "validationMessages$member": null + } + }, + "validationMessages": { + "base": null, + "refs": { + "ValidationError$errors": "

    A description of the validation error.

    ", + "ValidationWarning$warnings": "

    A description of the validation warning.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/datapipeline/2012-10-29/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/datapipeline/2012-10-29/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/datapipeline/2012-10-29/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/datapipeline/2012-10-29/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,26 @@ +{ + "pagination": { + "ListPipelines": { + "input_token": "marker", + "output_token": "marker", + "more_results": "hasMoreResults", + "result_key": "pipelineIdList" + }, + "DescribeObjects": { + "input_token": "marker", + "output_token": "marker", + "more_results": "hasMoreResults", + "result_key": "pipelineObjects" + }, + "DescribePipelines": { + "result_key": "pipelineDescriptionList" + }, + "QueryObjects": { + "input_token": "marker", + "output_token": "marker", + "more_results": "hasMoreResults", + "limit_key": "limit", + "result_key": "ids" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1542 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-06-23", + "endpointPrefix":"devicefarm", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"AWS Device Farm", + "signatureVersion":"v4", + "targetPrefix":"DeviceFarm_20150623" + }, + "operations":{ + "CreateDevicePool":{ + "name":"CreateDevicePool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDevicePoolRequest"}, + "output":{"shape":"CreateDevicePoolResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "CreateProject":{ + "name":"CreateProject", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateProjectRequest"}, + "output":{"shape":"CreateProjectResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "CreateUpload":{ + "name":"CreateUpload", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateUploadRequest"}, + "output":{"shape":"CreateUploadResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "DeleteDevicePool":{ + "name":"DeleteDevicePool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDevicePoolRequest"}, + "output":{"shape":"DeleteDevicePoolResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "DeleteProject":{ + "name":"DeleteProject", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteProjectRequest"}, + "output":{"shape":"DeleteProjectResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "DeleteRun":{ + "name":"DeleteRun", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRunRequest"}, + "output":{"shape":"DeleteRunResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "DeleteUpload":{ + "name":"DeleteUpload", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteUploadRequest"}, + "output":{"shape":"DeleteUploadResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "GetAccountSettings":{ + "name":"GetAccountSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAccountSettingsRequest"}, + "output":{"shape":"GetAccountSettingsResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "GetDevice":{ + "name":"GetDevice", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDeviceRequest"}, + "output":{"shape":"GetDeviceResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "GetDevicePool":{ + "name":"GetDevicePool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDevicePoolRequest"}, + "output":{"shape":"GetDevicePoolResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "GetDevicePoolCompatibility":{ + "name":"GetDevicePoolCompatibility", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDevicePoolCompatibilityRequest"}, + "output":{"shape":"GetDevicePoolCompatibilityResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "GetJob":{ + "name":"GetJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetJobRequest"}, + "output":{"shape":"GetJobResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "GetProject":{ + "name":"GetProject", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetProjectRequest"}, + "output":{"shape":"GetProjectResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "GetRun":{ + "name":"GetRun", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRunRequest"}, + "output":{"shape":"GetRunResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "GetSuite":{ + "name":"GetSuite", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSuiteRequest"}, + "output":{"shape":"GetSuiteResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "GetTest":{ + "name":"GetTest", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetTestRequest"}, + "output":{"shape":"GetTestResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "GetUpload":{ + "name":"GetUpload", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetUploadRequest"}, + "output":{"shape":"GetUploadResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "ListArtifacts":{ + "name":"ListArtifacts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListArtifactsRequest"}, + "output":{"shape":"ListArtifactsResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "ListDevicePools":{ + "name":"ListDevicePools", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDevicePoolsRequest"}, + "output":{"shape":"ListDevicePoolsResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "ListDevices":{ + "name":"ListDevices", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDevicesRequest"}, + "output":{"shape":"ListDevicesResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "ListJobs":{ + "name":"ListJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListJobsRequest"}, + "output":{"shape":"ListJobsResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "ListProjects":{ + "name":"ListProjects", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListProjectsRequest"}, + "output":{"shape":"ListProjectsResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "ListRuns":{ + "name":"ListRuns", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRunsRequest"}, + "output":{"shape":"ListRunsResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "ListSamples":{ + "name":"ListSamples", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSamplesRequest"}, + "output":{"shape":"ListSamplesResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "ListSuites":{ + "name":"ListSuites", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSuitesRequest"}, + "output":{"shape":"ListSuitesResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "ListTests":{ + "name":"ListTests", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTestsRequest"}, + "output":{"shape":"ListTestsResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "ListUniqueProblems":{ + "name":"ListUniqueProblems", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListUniqueProblemsRequest"}, + "output":{"shape":"ListUniqueProblemsResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "ListUploads":{ + "name":"ListUploads", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListUploadsRequest"}, + "output":{"shape":"ListUploadsResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "ScheduleRun":{ + "name":"ScheduleRun", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ScheduleRunRequest"}, + "output":{"shape":"ScheduleRunResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"IdempotencyException"}, + {"shape":"ServiceAccountException"} + ] + }, + "UpdateDevicePool":{ + "name":"UpdateDevicePool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDevicePoolRequest"}, + "output":{"shape":"UpdateDevicePoolResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + }, + "UpdateProject":{ + "name":"UpdateProject", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateProjectRequest"}, + "output":{"shape":"UpdateProjectResult"}, + "errors":[ + {"shape":"ArgumentException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceAccountException"} + ] + } + }, + "shapes":{ + "AWSAccountNumber":{ + "type":"string", + "max":16, + "min":2 + }, + "AccountSettings":{ + "type":"structure", + "members":{ + "awsAccountNumber":{"shape":"AWSAccountNumber"}, + "unmeteredDevices":{"shape":"PurchasedDevicesMap"} + } + }, + "AmazonResourceName":{ + "type":"string", + "min":32 + }, + "AmazonResourceNames":{ + "type":"list", + "member":{"shape":"AmazonResourceName"} + }, + "ArgumentException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "exception":true + }, + "Artifact":{ + "type":"structure", + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"}, + "type":{"shape":"ArtifactType"}, + "extension":{"shape":"String"}, + "url":{"shape":"URL"} + } + }, + "ArtifactCategory":{ + "type":"string", + "enum":[ + "SCREENSHOT", + "FILE", + "LOG" + ] + }, + "ArtifactType":{ + "type":"string", + "enum":[ + "UNKNOWN", + "SCREENSHOT", + "DEVICE_LOG", + "MESSAGE_LOG", + "RESULT_LOG", + "SERVICE_LOG", + "WEBKIT_LOG", + "INSTRUMENTATION_OUTPUT", + "EXERCISER_MONKEY_OUTPUT", + "CALABASH_JSON_OUTPUT", + "CALABASH_PRETTY_OUTPUT", + "CALABASH_STANDARD_OUTPUT", + "CALABASH_JAVA_XML_OUTPUT", + "AUTOMATION_OUTPUT", + "APPIUM_SERVER_OUTPUT", + "APPIUM_JAVA_OUTPUT", + "APPIUM_JAVA_XML_OUTPUT", + "APPIUM_PYTHON_OUTPUT", + "APPIUM_PYTHON_XML_OUTPUT", + "EXPLORER_EVENT_LOG", + "EXPLORER_SUMMARY_LOG", + "APPLICATION_CRASH_REPORT" + ] + }, + "Artifacts":{ + "type":"list", + "member":{"shape":"Artifact"} + }, + "BillingMethod":{ + "type":"string", + "enum":[ + "METERED", + "UNMETERED" + ] + }, + "Boolean":{"type":"boolean"}, + "CPU":{ + "type":"structure", + "members":{ + "frequency":{"shape":"String"}, + "architecture":{"shape":"String"}, + "clock":{"shape":"Double"} + } + }, + "ContentType":{ + "type":"string", + "max":64, + "min":0 + }, + "Counters":{ + "type":"structure", + "members":{ + "total":{"shape":"Integer"}, + "passed":{"shape":"Integer"}, + "failed":{"shape":"Integer"}, + "warned":{"shape":"Integer"}, + "errored":{"shape":"Integer"}, + "stopped":{"shape":"Integer"}, + "skipped":{"shape":"Integer"} + } + }, + "CreateDevicePoolRequest":{ + "type":"structure", + "required":[ + "projectArn", + "name", + "rules" + ], + "members":{ + "projectArn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"}, + "description":{"shape":"Message"}, + "rules":{"shape":"Rules"} + } + }, + "CreateDevicePoolResult":{ + "type":"structure", + "members":{ + "devicePool":{"shape":"DevicePool"} + } + }, + "CreateProjectRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"Name"} + } + }, + "CreateProjectResult":{ + "type":"structure", + "members":{ + "project":{"shape":"Project"} + } + }, + "CreateUploadRequest":{ + "type":"structure", + "required":[ + "projectArn", + "name", + "type" + ], + "members":{ + "projectArn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"}, + "type":{"shape":"UploadType"}, + "contentType":{"shape":"ContentType"} + } + }, + "CreateUploadResult":{ + "type":"structure", + "members":{ + "upload":{"shape":"Upload"} + } + }, + "DateTime":{"type":"timestamp"}, + "DeleteDevicePoolRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"} + } + }, + "DeleteDevicePoolResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteProjectRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"} + } + }, + "DeleteProjectResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteRunRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"} + } + }, + "DeleteRunResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteUploadRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"} + } + }, + "DeleteUploadResult":{ + "type":"structure", + "members":{ + } + }, + "Device":{ + "type":"structure", + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"}, + "manufacturer":{"shape":"String"}, + "model":{"shape":"String"}, + "formFactor":{"shape":"DeviceFormFactor"}, + "platform":{"shape":"DevicePlatform"}, + "os":{"shape":"String"}, + "cpu":{"shape":"CPU"}, + "resolution":{"shape":"Resolution"}, + "heapSize":{"shape":"Long"}, + "memory":{"shape":"Long"}, + "image":{"shape":"String"}, + "carrier":{"shape":"String"}, + "radio":{"shape":"String"} + } + }, + "DeviceAttribute":{ + "type":"string", + "enum":[ + "ARN", + "PLATFORM", + "FORM_FACTOR", + "MANUFACTURER" + ] + }, + "DeviceFormFactor":{ + "type":"string", + "enum":[ + "PHONE", + "TABLET" + ] + }, + "DeviceMinutes":{ + "type":"structure", + "members":{ + "total":{"shape":"Double"}, + "metered":{"shape":"Double"}, + "unmetered":{"shape":"Double"} + } + }, + "DevicePlatform":{ + "type":"string", + "enum":[ + "ANDROID", + "IOS" + ] + }, + "DevicePool":{ + "type":"structure", + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"}, + "description":{"shape":"Message"}, + "type":{"shape":"DevicePoolType"}, + "rules":{"shape":"Rules"} + } + }, + "DevicePoolCompatibilityResult":{ + "type":"structure", + "members":{ + "device":{"shape":"Device"}, + "compatible":{"shape":"Boolean"}, + "incompatibilityMessages":{"shape":"IncompatibilityMessages"} + } + }, + "DevicePoolCompatibilityResults":{ + "type":"list", + "member":{"shape":"DevicePoolCompatibilityResult"} + }, + "DevicePoolType":{ + "type":"string", + "enum":[ + "CURATED", + "PRIVATE" + ] + }, + "DevicePools":{ + "type":"list", + "member":{"shape":"DevicePool"} + }, + "Devices":{ + "type":"list", + "member":{"shape":"Device"} + }, + "Double":{"type":"double"}, + "ExecutionResult":{ + "type":"string", + "enum":[ + "PENDING", + "PASSED", + "WARNED", + "FAILED", + "SKIPPED", + "ERRORED", + "STOPPED" + ] + }, + "ExecutionStatus":{ + "type":"string", + "enum":[ + "PENDING", + "PROCESSING", + "SCHEDULING", + "RUNNING", + "COMPLETED" + ] + }, + "Filter":{ + "type":"string", + "max":8192, + "min":0 + }, + "GetAccountSettingsRequest":{ + "type":"structure", + "members":{ + } + }, + "GetAccountSettingsResult":{ + "type":"structure", + "members":{ + "accountSettings":{"shape":"AccountSettings"} + } + }, + "GetDevicePoolCompatibilityRequest":{ + "type":"structure", + "required":["devicePoolArn"], + "members":{ + "devicePoolArn":{"shape":"AmazonResourceName"}, + "appArn":{"shape":"AmazonResourceName"}, + "testType":{"shape":"TestType"} + } + }, + "GetDevicePoolCompatibilityResult":{ + "type":"structure", + "members":{ + "compatibleDevices":{"shape":"DevicePoolCompatibilityResults"}, + "incompatibleDevices":{"shape":"DevicePoolCompatibilityResults"} + } + }, + "GetDevicePoolRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"} + } + }, + "GetDevicePoolResult":{ + "type":"structure", + "members":{ + "devicePool":{"shape":"DevicePool"} + } + }, + "GetDeviceRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"} + } + }, + "GetDeviceResult":{ + "type":"structure", + "members":{ + "device":{"shape":"Device"} + } + }, + "GetJobRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"} + } + }, + "GetJobResult":{ + "type":"structure", + "members":{ + "job":{"shape":"Job"} + } + }, + "GetProjectRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"} + } + }, + "GetProjectResult":{ + "type":"structure", + "members":{ + "project":{"shape":"Project"} + } + }, + "GetRunRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"} + } + }, + "GetRunResult":{ + "type":"structure", + "members":{ + "run":{"shape":"Run"} + } + }, + "GetSuiteRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"} + } + }, + "GetSuiteResult":{ + "type":"structure", + "members":{ + "suite":{"shape":"Suite"} + } + }, + "GetTestRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"} + } + }, + "GetTestResult":{ + "type":"structure", + "members":{ + "test":{"shape":"Test"} + } + }, + "GetUploadRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"} + } + }, + "GetUploadResult":{ + "type":"structure", + "members":{ + "upload":{"shape":"Upload"} + } + }, + "IdempotencyException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "exception":true + }, + "IncompatibilityMessage":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"}, + "type":{"shape":"DeviceAttribute"} + } + }, + "IncompatibilityMessages":{ + "type":"list", + "member":{"shape":"IncompatibilityMessage"} + }, + "Integer":{"type":"integer"}, + "Job":{ + "type":"structure", + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"}, + "type":{"shape":"TestType"}, + "created":{"shape":"DateTime"}, + "status":{"shape":"ExecutionStatus"}, + "result":{"shape":"ExecutionResult"}, + "started":{"shape":"DateTime"}, + "stopped":{"shape":"DateTime"}, + "counters":{"shape":"Counters"}, + "message":{"shape":"Message"}, + "device":{"shape":"Device"}, + "deviceMinutes":{"shape":"DeviceMinutes"} + } + }, + "Jobs":{ + "type":"list", + "member":{"shape":"Job"} + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "exception":true + }, + "ListArtifactsRequest":{ + "type":"structure", + "required":[ + "arn", + "type" + ], + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "type":{"shape":"ArtifactCategory"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListArtifactsResult":{ + "type":"structure", + "members":{ + "artifacts":{"shape":"Artifacts"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListDevicePoolsRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "type":{"shape":"DevicePoolType"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListDevicePoolsResult":{ + "type":"structure", + "members":{ + "devicePools":{"shape":"DevicePools"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListDevicesRequest":{ + "type":"structure", + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListDevicesResult":{ + "type":"structure", + "members":{ + "devices":{"shape":"Devices"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListJobsRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListJobsResult":{ + "type":"structure", + "members":{ + "jobs":{"shape":"Jobs"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListProjectsRequest":{ + "type":"structure", + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListProjectsResult":{ + "type":"structure", + "members":{ + "projects":{"shape":"Projects"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListRunsRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListRunsResult":{ + "type":"structure", + "members":{ + "runs":{"shape":"Runs"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListSamplesRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListSamplesResult":{ + "type":"structure", + "members":{ + "samples":{"shape":"Samples"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListSuitesRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListSuitesResult":{ + "type":"structure", + "members":{ + "suites":{"shape":"Suites"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListTestsRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListTestsResult":{ + "type":"structure", + "members":{ + "tests":{"shape":"Tests"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListUniqueProblemsRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListUniqueProblemsResult":{ + "type":"structure", + "members":{ + "uniqueProblems":{"shape":"UniqueProblemsByExecutionResultMap"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListUploadsRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListUploadsResult":{ + "type":"structure", + "members":{ + "uploads":{"shape":"Uploads"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "Location":{ + "type":"structure", + "required":[ + "latitude", + "longitude" + ], + "members":{ + "latitude":{"shape":"Double"}, + "longitude":{"shape":"Double"} + } + }, + "Long":{"type":"long"}, + "Message":{ + "type":"string", + "max":8192, + "min":0 + }, + "Metadata":{ + "type":"string", + "max":8192, + "min":0 + }, + "Name":{ + "type":"string", + "max":256, + "min":0 + }, + "NotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "exception":true + }, + "PaginationToken":{ + "type":"string", + "max":1024, + "min":4 + }, + "Problem":{ + "type":"structure", + "members":{ + "run":{"shape":"ProblemDetail"}, + "job":{"shape":"ProblemDetail"}, + "suite":{"shape":"ProblemDetail"}, + "test":{"shape":"ProblemDetail"}, + "device":{"shape":"Device"}, + "result":{"shape":"ExecutionResult"}, + "message":{"shape":"Message"} + } + }, + "ProblemDetail":{ + "type":"structure", + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"} + } + }, + "Problems":{ + "type":"list", + "member":{"shape":"Problem"} + }, + "Project":{ + "type":"structure", + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"}, + "created":{"shape":"DateTime"} + } + }, + "Projects":{ + "type":"list", + "member":{"shape":"Project"} + }, + "PurchasedDevicesMap":{ + "type":"map", + "key":{"shape":"DevicePlatform"}, + "value":{"shape":"Integer"} + }, + "Radios":{ + "type":"structure", + "members":{ + "wifi":{"shape":"Boolean"}, + "bluetooth":{"shape":"Boolean"}, + "nfc":{"shape":"Boolean"}, + "gps":{"shape":"Boolean"} + } + }, + "Resolution":{ + "type":"structure", + "members":{ + "width":{"shape":"Integer"}, + "height":{"shape":"Integer"} + } + }, + "Rule":{ + "type":"structure", + "members":{ + "attribute":{"shape":"DeviceAttribute"}, + "operator":{"shape":"RuleOperator"}, + "value":{"shape":"String"} + } + }, + "RuleOperator":{ + "type":"string", + "enum":[ + "EQUALS", + "LESS_THAN", + "GREATER_THAN", + "IN", + "NOT_IN" + ] + }, + "Rules":{ + "type":"list", + "member":{"shape":"Rule"} + }, + "Run":{ + "type":"structure", + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"}, + "type":{"shape":"TestType"}, + "platform":{"shape":"DevicePlatform"}, + "created":{"shape":"DateTime"}, + "status":{"shape":"ExecutionStatus"}, + "result":{"shape":"ExecutionResult"}, + "started":{"shape":"DateTime"}, + "stopped":{"shape":"DateTime"}, + "counters":{"shape":"Counters"}, + "message":{"shape":"Message"}, + "totalJobs":{"shape":"Integer"}, + "completedJobs":{"shape":"Integer"}, + "billingMethod":{"shape":"BillingMethod"}, + "deviceMinutes":{"shape":"DeviceMinutes"} + } + }, + "Runs":{ + "type":"list", + "member":{"shape":"Run"} + }, + "Sample":{ + "type":"structure", + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "type":{"shape":"SampleType"}, + "url":{"shape":"URL"} + } + }, + "SampleType":{ + "type":"string", + "enum":[ + "CPU", + "MEMORY", + "THREADS", + "RX_RATE", + "TX_RATE", + "RX", + "TX", + "NATIVE_FRAMES", + "NATIVE_FPS", + "NATIVE_MIN_DRAWTIME", + "NATIVE_AVG_DRAWTIME", + "NATIVE_MAX_DRAWTIME", + "OPENGL_FRAMES", + "OPENGL_FPS", + "OPENGL_MIN_DRAWTIME", + "OPENGL_AVG_DRAWTIME", + "OPENGL_MAX_DRAWTIME" + ] + }, + "Samples":{ + "type":"list", + "member":{"shape":"Sample"} + }, + "ScheduleRunConfiguration":{ + "type":"structure", + "members":{ + "extraDataPackageArn":{"shape":"AmazonResourceName"}, + "networkProfileArn":{"shape":"AmazonResourceName"}, + "locale":{"shape":"String"}, + "location":{"shape":"Location"}, + "radios":{"shape":"Radios"}, + "auxiliaryApps":{"shape":"AmazonResourceNames"}, + "billingMethod":{"shape":"BillingMethod"} + } + }, + "ScheduleRunRequest":{ + "type":"structure", + "required":[ + "projectArn", + "devicePoolArn", + "test" + ], + "members":{ + "projectArn":{"shape":"AmazonResourceName"}, + "appArn":{"shape":"AmazonResourceName"}, + "devicePoolArn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"}, + "test":{"shape":"ScheduleRunTest"}, + "configuration":{"shape":"ScheduleRunConfiguration"} + } + }, + "ScheduleRunResult":{ + "type":"structure", + "members":{ + "run":{"shape":"Run"} + } + }, + "ScheduleRunTest":{ + "type":"structure", + "required":["type"], + "members":{ + "type":{"shape":"TestType"}, + "testPackageArn":{"shape":"AmazonResourceName"}, + "filter":{"shape":"Filter"}, + "parameters":{"shape":"TestParameters"} + } + }, + "ServiceAccountException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "exception":true + }, + "String":{"type":"string"}, + "Suite":{ + "type":"structure", + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"}, + "type":{"shape":"TestType"}, + "created":{"shape":"DateTime"}, + "status":{"shape":"ExecutionStatus"}, + "result":{"shape":"ExecutionResult"}, + "started":{"shape":"DateTime"}, + "stopped":{"shape":"DateTime"}, + "counters":{"shape":"Counters"}, + "message":{"shape":"Message"}, + "deviceMinutes":{"shape":"DeviceMinutes"} + } + }, + "Suites":{ + "type":"list", + "member":{"shape":"Suite"} + }, + "Test":{ + "type":"structure", + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"}, + "type":{"shape":"TestType"}, + "created":{"shape":"DateTime"}, + "status":{"shape":"ExecutionStatus"}, + "result":{"shape":"ExecutionResult"}, + "started":{"shape":"DateTime"}, + "stopped":{"shape":"DateTime"}, + "counters":{"shape":"Counters"}, + "message":{"shape":"Message"}, + "deviceMinutes":{"shape":"DeviceMinutes"} + } + }, + "TestParameters":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "TestType":{ + "type":"string", + "enum":[ + "BUILTIN_FUZZ", + "BUILTIN_EXPLORER", + "APPIUM_JAVA_JUNIT", + "APPIUM_JAVA_TESTNG", + "APPIUM_PYTHON", + "APPIUM_WEB_JAVA_JUNIT", + "APPIUM_WEB_JAVA_TESTNG", + "APPIUM_WEB_PYTHON", + "CALABASH", + "INSTRUMENTATION", + "UIAUTOMATION", + "UIAUTOMATOR", + "XCTEST" + ] + }, + "Tests":{ + "type":"list", + "member":{"shape":"Test"} + }, + "URL":{ + "type":"string", + "max":2048, + "min":0 + }, + "UniqueProblem":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"}, + "problems":{"shape":"Problems"} + } + }, + "UniqueProblems":{ + "type":"list", + "member":{"shape":"UniqueProblem"} + }, + "UniqueProblemsByExecutionResultMap":{ + "type":"map", + "key":{"shape":"ExecutionResult"}, + "value":{"shape":"UniqueProblems"} + }, + "UpdateDevicePoolRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"}, + "description":{"shape":"Message"}, + "rules":{"shape":"Rules"} + } + }, + "UpdateDevicePoolResult":{ + "type":"structure", + "members":{ + "devicePool":{"shape":"DevicePool"} + } + }, + "UpdateProjectRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"} + } + }, + "UpdateProjectResult":{ + "type":"structure", + "members":{ + "project":{"shape":"Project"} + } + }, + "Upload":{ + "type":"structure", + "members":{ + "arn":{"shape":"AmazonResourceName"}, + "name":{"shape":"Name"}, + "created":{"shape":"DateTime"}, + "type":{"shape":"UploadType"}, + "status":{"shape":"UploadStatus"}, + "url":{"shape":"URL"}, + "metadata":{"shape":"Metadata"}, + "contentType":{"shape":"ContentType"}, + "message":{"shape":"Message"} + } + }, + "UploadStatus":{ + "type":"string", + "enum":[ + "INITIALIZED", + "PROCESSING", + "SUCCEEDED", + "FAILED" + ] + }, + "UploadType":{ + "type":"string", + "enum":[ + "ANDROID_APP", + "IOS_APP", + "WEB_APP", + "EXTERNAL_DATA", + "APPIUM_JAVA_JUNIT_TEST_PACKAGE", + "APPIUM_JAVA_TESTNG_TEST_PACKAGE", + "APPIUM_PYTHON_TEST_PACKAGE", + "APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE", + "APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE", + "APPIUM_WEB_PYTHON_TEST_PACKAGE", + "CALABASH_TEST_PACKAGE", + "INSTRUMENTATION_TEST_PACKAGE", + "UIAUTOMATION_TEST_PACKAGE", + "UIAUTOMATOR_TEST_PACKAGE", + "XCTEST_TEST_PACKAGE" + ] + }, + "Uploads":{ + "type":"list", + "member":{"shape":"Upload"} + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1006 @@ +{ + "version": "2.0", + "service": "

    AWS Device Farm is a service that enables mobile app developers to test Android, iOS, and Fire OS apps on physical phones, tablets, and other devices in the cloud.

    ", + "operations": { + "CreateDevicePool": "

    Creates a device pool.

    ", + "CreateProject": "

    Creates a new project.

    ", + "CreateUpload": "

    Uploads an app or test scripts.

    ", + "DeleteDevicePool": "

    Deletes a device pool given the pool ARN. Does not allow deletion of curated pools owned by the system.

    ", + "DeleteProject": "

    Deletes an AWS Device Farm project, given the project ARN.

    Note Deleting this resource does not stop an in-progress run.

    ", + "DeleteRun": "

    Deletes the run, given the run ARN.

    Note Deleting this resource does not stop an in-progress run.

    ", + "DeleteUpload": "

    Deletes an upload given the upload ARN.

    ", + "GetAccountSettings": "

    Returns the number of unmetered iOS and/or unmetered Android devices that have been purchased by the account.

    ", + "GetDevice": "

    Gets information about a unique device type.

    ", + "GetDevicePool": "

    Gets information about a device pool.

    ", + "GetDevicePoolCompatibility": "

    Gets information about compatibility with a device pool.

    ", + "GetJob": "

    Gets information about a job.

    ", + "GetProject": "

    Gets information about a project.

    ", + "GetRun": "

    Gets information about a run.

    ", + "GetSuite": "

    Gets information about a suite.

    ", + "GetTest": "

    Gets information about a test.

    ", + "GetUpload": "

    Gets information about an upload.

    ", + "ListArtifacts": "

    Gets information about artifacts.

    ", + "ListDevicePools": "

    Gets information about device pools.

    ", + "ListDevices": "

    Gets information about unique device types.

    ", + "ListJobs": "

    Gets information about jobs.

    ", + "ListProjects": "

    Gets information about projects.

    ", + "ListRuns": "

    Gets information about runs.

    ", + "ListSamples": "

    Gets information about samples.

    ", + "ListSuites": "

    Gets information about suites.

    ", + "ListTests": "

    Gets information about tests.

    ", + "ListUniqueProblems": "

    Gets information about unique problems.

    ", + "ListUploads": "

    Gets information about uploads.

    ", + "ScheduleRun": "

    Schedules a run.

    ", + "UpdateDevicePool": "

    Modifies the name, description, and rules in a device pool given the attributes and the pool ARN. Rule updates are all-or-nothing, meaning they can only be updated as a whole (or not at all).

    ", + "UpdateProject": "

    Modifies the specified project name, given the project ARN and a new name.

    " + }, + "shapes": { + "AWSAccountNumber": { + "base": null, + "refs": { + "AccountSettings$awsAccountNumber": "

    The AWS account number specified in the AccountSettings container.

    " + } + }, + "AccountSettings": { + "base": "

    A container for account-level settings within AWS Device Farm.

    ", + "refs": { + "GetAccountSettingsResult$accountSettings": null + } + }, + "AmazonResourceName": { + "base": null, + "refs": { + "AmazonResourceNames$member": null, + "Artifact$arn": "

    The artifact's ARN.

    ", + "CreateDevicePoolRequest$projectArn": "

    The ARN of the project for the device pool.

    ", + "CreateUploadRequest$projectArn": "

    The ARN of the project for the upload.

    ", + "DeleteDevicePoolRequest$arn": "

    Represents the Amazon Resource Name (ARN) of the Device Farm device pool you wish to delete.

    ", + "DeleteProjectRequest$arn": "

    Represents the Amazon Resource Name (ARN) of the Device Farm project you wish to delete.

    ", + "DeleteRunRequest$arn": "

    The Amazon Resource Name (ARN) for the run you wish to delete.

    ", + "DeleteUploadRequest$arn": "

    Represents the Amazon Resource Name (ARN) of the Device Farm upload you wish to delete.

    ", + "Device$arn": "

    The device's ARN.

    ", + "DevicePool$arn": "

    The device pool's ARN.

    ", + "GetDevicePoolCompatibilityRequest$devicePoolArn": "

    The device pool's ARN.

    ", + "GetDevicePoolCompatibilityRequest$appArn": "

    The ARN of the app that is associated with the specified device pool.

    ", + "GetDevicePoolRequest$arn": "

    The device pool's ARN.

    ", + "GetDeviceRequest$arn": "

    The device type's ARN.

    ", + "GetJobRequest$arn": "

    The job's ARN.

    ", + "GetProjectRequest$arn": "

    The project's ARN.

    ", + "GetRunRequest$arn": "

    The run's ARN.

    ", + "GetSuiteRequest$arn": "

    The suite's ARN.

    ", + "GetTestRequest$arn": "

    The test's ARN.

    ", + "GetUploadRequest$arn": "

    The upload's ARN.

    ", + "Job$arn": "

    The job's ARN.

    ", + "ListArtifactsRequest$arn": "

    The Run, Job, Suite, or Test ARN.

    ", + "ListDevicePoolsRequest$arn": "

    The project ARN.

    ", + "ListDevicesRequest$arn": "

    The device types' ARNs.

    ", + "ListJobsRequest$arn": "

    The jobs' ARNs.

    ", + "ListProjectsRequest$arn": "

    The projects' ARNs.

    ", + "ListRunsRequest$arn": "

    The runs' ARNs.

    ", + "ListSamplesRequest$arn": "

    The samples' ARNs.

    ", + "ListSuitesRequest$arn": "

    The suites' ARNs.

    ", + "ListTestsRequest$arn": "

    The tests' ARNs.

    ", + "ListUniqueProblemsRequest$arn": "

    The unique problems' ARNs.

    ", + "ListUploadsRequest$arn": "

    The uploads' ARNs.

    ", + "ProblemDetail$arn": "

    The problem detail's ARN.

    ", + "Project$arn": "

    The project's ARN.

    ", + "Run$arn": "

    The run's ARN.

    ", + "Sample$arn": "

    The sample's ARN.

    ", + "ScheduleRunConfiguration$extraDataPackageArn": "

    The ARN of the extra data for the run. The extra data is a .zip file that AWS Device Farm will extract to external data for Android or the app's sandbox for iOS.

    ", + "ScheduleRunConfiguration$networkProfileArn": "

    Reserved for internal use.

    ", + "ScheduleRunRequest$projectArn": "

    The ARN of the project for the run to be scheduled.

    ", + "ScheduleRunRequest$appArn": "

    The ARN of the app to schedule a run.

    ", + "ScheduleRunRequest$devicePoolArn": "

    The ARN of the device pool for the run to be scheduled.

    ", + "ScheduleRunTest$testPackageArn": "

    The ARN of the uploaded test that will be run.

    ", + "Suite$arn": "

    The suite's ARN.

    ", + "Test$arn": "

    The test's ARN.

    ", + "UpdateDevicePoolRequest$arn": "

    The Amazon Resourc Name (ARN) of the Device Farm device pool you wish to update.

    ", + "UpdateProjectRequest$arn": "

    The Amazon Resource Name (ARN) of the project whose name you wish to update.

    ", + "Upload$arn": "

    The upload's ARN.

    " + } + }, + "AmazonResourceNames": { + "base": null, + "refs": { + "ScheduleRunConfiguration$auxiliaryApps": "

    A list of auxiliary apps for the run.

    " + } + }, + "ArgumentException": { + "base": "

    An invalid argument was specified.

    ", + "refs": { + } + }, + "Artifact": { + "base": "

    Represents the output of a test. Examples of artifacts include logs and screenshots.

    ", + "refs": { + "Artifacts$member": null + } + }, + "ArtifactCategory": { + "base": null, + "refs": { + "ListArtifactsRequest$type": "

    The artifacts' type.

    Allowed values include:

    • FILE: The artifacts are files.
    • LOG: The artifacts are logs.
    • SCREENSHOT: The artifacts are screenshots.
    " + } + }, + "ArtifactType": { + "base": null, + "refs": { + "Artifact$type": "

    The artifact's type.

    Allowed values include the following:

    • APPIUM_JAVA_OUTPUT: The Appium Java output type.

    • APPIUM_JAVA_XML_OUTPUT: The Appium Java XML output type.

    • APPIUM_PYTHON_OUTPUT: The Appium Python output type.

    • APPIUM_PYTHON_XML_OUTPUT: The Appium Python XML output type.

    • APPIUM_SERVER_OUTPUT: The Appium server output type.

    • AUTOMATION_OUTPUT: The automation output type.

    • CALABASH_JSON_OUTPUT: The Calabash JSON output type.

    • CALABASH_JAVA_XML_OUTPUT: The Calabash Java XML output type.

    • CALABASH_PRETTY_OUTPUT: The Calabash pretty output type.

    • CALABASH_STANDARD_OUTPUT: The Calabash standard output type.

    • DEVICE_LOG: The device log type.

    • EXERCISER_MONKEY_OUTPUT: For Android, the artifact (log) generated by an Android fuzz test.

    • INSTRUMENTATION_OUTPUT: The instrumentation type.

    • MESSAGE_LOG: The message log type.

    • RESULT_LOG: The result log type.

    • SCREENSHOT: The screenshot type.

    • SERVICE_LOG: The service log type.

    • UNKNOWN: An unknown type.

    " + } + }, + "Artifacts": { + "base": null, + "refs": { + "ListArtifactsResult$artifacts": "

    Information about the artifacts.

    " + } + }, + "BillingMethod": { + "base": null, + "refs": { + "Run$billingMethod": "

    Specifies the billing method for a test run: metered or unmetered. If the parameter is not specified, the default value is unmetered.

    ", + "ScheduleRunConfiguration$billingMethod": "

    Specifies the billing method for a test run: metered or unmetered. If the parameter is not specified, the default value is unmetered.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "DevicePoolCompatibilityResult$compatible": "

    Whether the result was compatible with the device pool.

    ", + "Radios$wifi": "

    True if Wi-Fi is enabled at the beginning of the test; otherwise, false.

    ", + "Radios$bluetooth": "

    True if Bluetooth is enabled at the beginning of the test; otherwise, false.

    ", + "Radios$nfc": "

    True if NFC is enabled at the beginning of the test; otherwise, false.

    ", + "Radios$gps": "

    True if GPS is enabled at the beginning of the test; otherwise, false.

    " + } + }, + "CPU": { + "base": "

    Represents the amount of CPU that an app is using on a physical device.

    Note that this does not represent system-wide CPU usage.

    ", + "refs": { + "Device$cpu": "

    Information about the device's CPU.

    " + } + }, + "ContentType": { + "base": null, + "refs": { + "CreateUploadRequest$contentType": "

    The upload's content type (for example, \"application/octet-stream\").

    ", + "Upload$contentType": "

    The upload's content type (for example, \"application/octet-stream\").

    " + } + }, + "Counters": { + "base": "

    Represents entity counters.

    ", + "refs": { + "Job$counters": "

    The job's result counters.

    ", + "Run$counters": "

    The run's result counters.

    ", + "Suite$counters": "

    The suite's result counters.

    ", + "Test$counters": "

    The test's result counters.

    " + } + }, + "CreateDevicePoolRequest": { + "base": "

    Represents a request to the create device pool operation.

    ", + "refs": { + } + }, + "CreateDevicePoolResult": { + "base": "

    Represents the result of a create device pool request.

    ", + "refs": { + } + }, + "CreateProjectRequest": { + "base": "

    Represents a request to the create project operation.

    ", + "refs": { + } + }, + "CreateProjectResult": { + "base": "

    Represents the result of a create project request.

    ", + "refs": { + } + }, + "CreateUploadRequest": { + "base": "

    Represents a request to the create upload operation.

    ", + "refs": { + } + }, + "CreateUploadResult": { + "base": "

    Represents the result of a create upload request.

    ", + "refs": { + } + }, + "DateTime": { + "base": null, + "refs": { + "Job$created": "

    When the job was created.

    ", + "Job$started": "

    The job's start time.

    ", + "Job$stopped": "

    The job's stop time.

    ", + "Project$created": "

    When the project was created.

    ", + "Run$created": "

    When the run was created.

    ", + "Run$started": "

    The run's start time.

    ", + "Run$stopped": "

    The run's stop time.

    ", + "Suite$created": "

    When the suite was created.

    ", + "Suite$started": "

    The suite's start time.

    ", + "Suite$stopped": "

    The suite's stop time.

    ", + "Test$created": "

    When the test was created.

    ", + "Test$started": "

    The test's start time.

    ", + "Test$stopped": "

    The test's stop time.

    ", + "Upload$created": "

    When the upload was created.

    " + } + }, + "DeleteDevicePoolRequest": { + "base": "

    Represents a request to the delete device pool operation.

    ", + "refs": { + } + }, + "DeleteDevicePoolResult": { + "base": "

    Represents the result of a delete device pool request.

    ", + "refs": { + } + }, + "DeleteProjectRequest": { + "base": "

    Represents a request to the delete project operation.

    ", + "refs": { + } + }, + "DeleteProjectResult": { + "base": "

    Represents the result of a delete project request.

    ", + "refs": { + } + }, + "DeleteRunRequest": { + "base": "

    Represents a request to the delete run operation.

    ", + "refs": { + } + }, + "DeleteRunResult": { + "base": "

    Represents the result of a delete run request.

    ", + "refs": { + } + }, + "DeleteUploadRequest": { + "base": "

    Represents a request to the delete upload operation.

    ", + "refs": { + } + }, + "DeleteUploadResult": { + "base": "

    Represents the result of a delete upload request.

    ", + "refs": { + } + }, + "Device": { + "base": "

    Represents a device type that an app is tested against.

    ", + "refs": { + "DevicePoolCompatibilityResult$device": null, + "Devices$member": null, + "GetDeviceResult$device": null, + "Job$device": null, + "Problem$device": "

    Information about the associated device.

    " + } + }, + "DeviceAttribute": { + "base": null, + "refs": { + "IncompatibilityMessage$type": "

    The type of incompatibility.

    Allowed values include:

    • ARN: The ARN.

    • FORM_FACTOR: The form factor (for example, phone or tablet).

    • MANUFACTURER: The manufacturer.

    • PLATFORM: The platform (for example, Android or iOS).

    ", + "Rule$attribute": "

    The rule's attribute.

    Allowed values include:

    • ARN: The ARN.

    • FORM_FACTOR: The form factor (for example, phone or tablet).

    • MANUFACTURER: The manufacturer.

    • PLATFORM: The platform (for example, Android or iOS).

    " + } + }, + "DeviceFormFactor": { + "base": null, + "refs": { + "Device$formFactor": "

    The device's form factor.

    Allowed values include:

    • PHONE: The phone form factor.

    • TABLET: The tablet form factor.

    " + } + }, + "DeviceMinutes": { + "base": "

    Represents the total (metered or unmetered) minutes used by the resource to run tests. Contains the sum of minutes consumed by all children.

    ", + "refs": { + "Job$deviceMinutes": "

    Represents the total (metered or unmetered) minutes used by the job.

    ", + "Run$deviceMinutes": "

    Represents the total (metered or unmetered) minutes used by the test run.

    ", + "Suite$deviceMinutes": "

    Represents the total (metered or unmetered) minutes used by the test suite.

    ", + "Test$deviceMinutes": "

    Represents the total (metered or unmetered) minutes used by the test.

    " + } + }, + "DevicePlatform": { + "base": null, + "refs": { + "Device$platform": "

    The device's platform.

    Allowed values include:

    • ANDROID: The Android platform.

    • IOS: The iOS platform.

    ", + "PurchasedDevicesMap$key": null, + "Run$platform": "

    The run's platform.

    Allowed values include:

    • ANDROID: The Android platform.

    • IOS: The iOS platform.

    " + } + }, + "DevicePool": { + "base": "

    Represents a collection of device types.

    ", + "refs": { + "CreateDevicePoolResult$devicePool": "

    The newly created device pool.

    ", + "DevicePools$member": null, + "GetDevicePoolResult$devicePool": null, + "UpdateDevicePoolResult$devicePool": null + } + }, + "DevicePoolCompatibilityResult": { + "base": "

    Represents a device pool compatibility result.

    ", + "refs": { + "DevicePoolCompatibilityResults$member": null + } + }, + "DevicePoolCompatibilityResults": { + "base": null, + "refs": { + "GetDevicePoolCompatibilityResult$compatibleDevices": "

    Information about compatible devices.

    ", + "GetDevicePoolCompatibilityResult$incompatibleDevices": "

    Information about incompatible devices.

    " + } + }, + "DevicePoolType": { + "base": null, + "refs": { + "DevicePool$type": "

    The device pool's type.

    Allowed values include:

    • CURATED: A device pool that is created and managed by AWS Device Farm.

    • PRIVATE: A device pool that is created and managed by the device pool developer.

    ", + "ListDevicePoolsRequest$type": "

    The device pools' type.

    Allowed values include:

    • CURATED: A device pool that is created and managed by AWS Device Farm.

    • PRIVATE: A device pool that is created and managed by the device pool developer.

    " + } + }, + "DevicePools": { + "base": null, + "refs": { + "ListDevicePoolsResult$devicePools": "

    Information about the device pools.

    " + } + }, + "Devices": { + "base": null, + "refs": { + "ListDevicesResult$devices": "

    Information about the devices.

    " + } + }, + "Double": { + "base": null, + "refs": { + "CPU$clock": "

    The clock speed of the device's CPU, expressed in hertz (Hz). For example, a 1.2 GHz CPU is expressed as 1200000000.

    ", + "DeviceMinutes$total": "

    When specified, represents the total minutes used by the resource to run tests.

    ", + "DeviceMinutes$metered": "

    When specified, represents only the sum of metered minutes used by the resource to run tests.

    ", + "DeviceMinutes$unmetered": "

    When specified, represents only the sum of unmetered minutes used by the resource to run tests.

    ", + "Location$latitude": "

    The latitude.

    ", + "Location$longitude": "

    The longitude.

    " + } + }, + "ExecutionResult": { + "base": null, + "refs": { + "Job$result": "

    The job's result.

    Allowed values include:

    • ERRORED: An error condition.

    • FAILED: A failed condition.

    • SKIPPED: A skipped condition.

    • STOPPED: A stopped condition.

    • PASSED: A passing condition.

    • PENDING: A pending condition.

    • WARNED: A warning condition.

    ", + "Problem$result": "

    The problem's result.

    Allowed values include:

    • ERRORED: An error condition.

    • FAILED: A failed condition.

    • SKIPPED: A skipped condition.

    • STOPPED: A stopped condition.

    • PASSED: A passing condition.

    • PENDING: A pending condition.

    • WARNED: A warning condition.

    ", + "Run$result": "

    The run's result.

    Allowed values include:

    • ERRORED: An error condition.

    • FAILED: A failed condition.

    • SKIPPED: A skipped condition.

    • STOPPED: A stopped condition.

    • PASSED: A passing condition.

    • PENDING: A pending condition.

    • WARNED: A warning condition.

    ", + "Suite$result": "

    The suite's result.

    Allowed values include:

    • ERRORED: An error condition.

    • FAILED: A failed condition.

    • SKIPPED: A skipped condition.

    • STOPPED: A stopped condition.

    • PASSED: A passing condition.

    • PENDING: A pending condition.

    • WARNED: A warning condition.

    ", + "Test$result": "

    The test's result.

    Allowed values include:

    • ERRORED: An error condition.

    • FAILED: A failed condition.

    • SKIPPED: A skipped condition.

    • STOPPED: A stopped condition.

    • PASSED: A passing condition.

    • PENDING: A pending condition.

    • WARNED: A warning condition.

    ", + "UniqueProblemsByExecutionResultMap$key": null + } + }, + "ExecutionStatus": { + "base": null, + "refs": { + "Job$status": "

    The job's status.

    Allowed values include:

    • COMPLETED: A completed status.

    • PENDING: A pending status.

    • PROCESSING: A processing status.

    • RUNNING: A running status.

    • SCHEDULING: A scheduling status.

    ", + "Run$status": "

    The run's status.

    Allowed values include:

    • COMPLETED: A completed status.

    • PENDING: A pending status.

    • PROCESSING: A processing status.

    • RUNNING: A running status.

    • SCHEDULING: A scheduling status.

    ", + "Suite$status": "

    The suite's status.

    Allowed values include:

    • COMPLETED: A completed status.

    • PENDING: A pending status.

    • PROCESSING: A processing status.

    • RUNNING: A running status.

    • SCHEDULING: A scheduling status.

    ", + "Test$status": "

    The test's status.

    Allowed values include:

    • COMPLETED: A completed status.

    • PENDING: A pending status.

    • PROCESSING: A processing status.

    • RUNNING: A running status.

    • SCHEDULING: A scheduling status.

    " + } + }, + "Filter": { + "base": null, + "refs": { + "ScheduleRunTest$filter": "

    The test's filter.

    " + } + }, + "GetAccountSettingsRequest": { + "base": null, + "refs": { + } + }, + "GetAccountSettingsResult": { + "base": null, + "refs": { + } + }, + "GetDevicePoolCompatibilityRequest": { + "base": "

    Represents a request to the get device pool compatibility operation.

    ", + "refs": { + } + }, + "GetDevicePoolCompatibilityResult": { + "base": "

    Represents the result of describe device pool compatibility request.

    ", + "refs": { + } + }, + "GetDevicePoolRequest": { + "base": "

    Represents a request to the get device pool operation.

    ", + "refs": { + } + }, + "GetDevicePoolResult": { + "base": "

    Represents the result of a get device pool request.

    ", + "refs": { + } + }, + "GetDeviceRequest": { + "base": "

    Represents a request to the get device request.

    ", + "refs": { + } + }, + "GetDeviceResult": { + "base": "

    Represents the result of a get device request.

    ", + "refs": { + } + }, + "GetJobRequest": { + "base": "

    Represents a request to the get job operation.

    ", + "refs": { + } + }, + "GetJobResult": { + "base": "

    Represents the result of a get job request.

    ", + "refs": { + } + }, + "GetProjectRequest": { + "base": "

    Represents a request to the get project operation.

    ", + "refs": { + } + }, + "GetProjectResult": { + "base": "

    Represents the result of a get project request.

    ", + "refs": { + } + }, + "GetRunRequest": { + "base": "

    Represents a request to the get run operation.

    ", + "refs": { + } + }, + "GetRunResult": { + "base": "

    Represents the result of a get run request.

    ", + "refs": { + } + }, + "GetSuiteRequest": { + "base": "

    Represents a request to the get suite operation.

    ", + "refs": { + } + }, + "GetSuiteResult": { + "base": "

    Represents the result of a get suite request.

    ", + "refs": { + } + }, + "GetTestRequest": { + "base": "

    Represents a request to the get test operation.

    ", + "refs": { + } + }, + "GetTestResult": { + "base": "

    Represents the result of a get test request.

    ", + "refs": { + } + }, + "GetUploadRequest": { + "base": "

    Represents a request to the get upload operation.

    ", + "refs": { + } + }, + "GetUploadResult": { + "base": "

    Represents the result of a get upload request.

    ", + "refs": { + } + }, + "IdempotencyException": { + "base": "

    An entity with the same name already exists.

    ", + "refs": { + } + }, + "IncompatibilityMessage": { + "base": "

    Represents information about incompatibility.

    ", + "refs": { + "IncompatibilityMessages$member": null + } + }, + "IncompatibilityMessages": { + "base": null, + "refs": { + "DevicePoolCompatibilityResult$incompatibilityMessages": "

    Information about the compatibility.

    " + } + }, + "Integer": { + "base": null, + "refs": { + "Counters$total": "

    The total number of entities.

    ", + "Counters$passed": "

    The number of passed entities.

    ", + "Counters$failed": "

    The number of failed entities.

    ", + "Counters$warned": "

    The number of warned entities.

    ", + "Counters$errored": "

    The number of errored entities.

    ", + "Counters$stopped": "

    The number of stopped entities.

    ", + "Counters$skipped": "

    The number of skipped entities.

    ", + "PurchasedDevicesMap$value": null, + "Resolution$width": "

    The screen resolution's width, expressed in pixels.

    ", + "Resolution$height": "

    The screen resolution's height, expressed in pixels.

    ", + "Run$totalJobs": "

    The total number of jobs for the run.

    ", + "Run$completedJobs": "

    The total number of completed jobs.

    " + } + }, + "Job": { + "base": "

    Represents a device.

    ", + "refs": { + "GetJobResult$job": null, + "Jobs$member": null + } + }, + "Jobs": { + "base": null, + "refs": { + "ListJobsResult$jobs": "

    Information about the jobs.

    " + } + }, + "LimitExceededException": { + "base": "

    A limit was exceeded.

    ", + "refs": { + } + }, + "ListArtifactsRequest": { + "base": "

    Represents a request to the list artifacts operation.

    ", + "refs": { + } + }, + "ListArtifactsResult": { + "base": "

    Represents the result of a list artifacts operation.

    ", + "refs": { + } + }, + "ListDevicePoolsRequest": { + "base": "

    Represents the result of a list device pools request.

    ", + "refs": { + } + }, + "ListDevicePoolsResult": { + "base": "

    Represents the result of a list device pools request.

    ", + "refs": { + } + }, + "ListDevicesRequest": { + "base": "

    Represents the result of a list devices request.

    ", + "refs": { + } + }, + "ListDevicesResult": { + "base": "

    Represents the result of a list devices operation.

    ", + "refs": { + } + }, + "ListJobsRequest": { + "base": "

    Represents a request to the list jobs operation.

    ", + "refs": { + } + }, + "ListJobsResult": { + "base": "

    Represents the result of a list jobs request.

    ", + "refs": { + } + }, + "ListProjectsRequest": { + "base": "

    Represents a request to the list projects operation.

    ", + "refs": { + } + }, + "ListProjectsResult": { + "base": "

    Represents the result of a list projects request.

    ", + "refs": { + } + }, + "ListRunsRequest": { + "base": "

    Represents a request to the list runs operation.

    ", + "refs": { + } + }, + "ListRunsResult": { + "base": "

    Represents the result of a list runs request.

    ", + "refs": { + } + }, + "ListSamplesRequest": { + "base": "

    Represents a request to the list samples operation.

    ", + "refs": { + } + }, + "ListSamplesResult": { + "base": "

    Represents the result of a list samples request.

    ", + "refs": { + } + }, + "ListSuitesRequest": { + "base": "

    Represents a request to the list suites operation.

    ", + "refs": { + } + }, + "ListSuitesResult": { + "base": "

    Represents the result of a list suites request.

    ", + "refs": { + } + }, + "ListTestsRequest": { + "base": "

    Represents a request to the list tests operation.

    ", + "refs": { + } + }, + "ListTestsResult": { + "base": "

    Represents the result of a list tests request.

    ", + "refs": { + } + }, + "ListUniqueProblemsRequest": { + "base": "

    Represents a request to the list unique problems operation.

    ", + "refs": { + } + }, + "ListUniqueProblemsResult": { + "base": "

    Represents the result of a list unique problems request.

    ", + "refs": { + } + }, + "ListUploadsRequest": { + "base": "

    Represents a request to the list uploads operation.

    ", + "refs": { + } + }, + "ListUploadsResult": { + "base": "

    Represents the result of a list uploads request.

    ", + "refs": { + } + }, + "Location": { + "base": "

    Represents a latitude and longitude pair, expressed in geographic coordinate system degrees (for example 47.6204, -122.3491).

    Elevation is currently not supported.

    ", + "refs": { + "ScheduleRunConfiguration$location": "

    Information about the location that is used for the run.

    " + } + }, + "Long": { + "base": null, + "refs": { + "Device$heapSize": "

    The device's heap size, expressed in bytes.

    ", + "Device$memory": "

    The device's total memory size, expressed in bytes.

    " + } + }, + "Message": { + "base": null, + "refs": { + "ArgumentException$message": "

    Any additional information about the exception.

    ", + "CreateDevicePoolRequest$description": "

    The device pool's description.

    ", + "DevicePool$description": "

    The device pool's description.

    ", + "IdempotencyException$message": "

    Any additional information about the exception.

    ", + "IncompatibilityMessage$message": "

    A message about the incompatibility.

    ", + "Job$message": "

    A message about the job's result.

    ", + "LimitExceededException$message": "

    Any additional information about the exception.

    ", + "NotFoundException$message": "

    Any additional information about the exception.

    ", + "Problem$message": "

    A message about the problem's result.

    ", + "Run$message": "

    A message about the run's result.

    ", + "ServiceAccountException$message": "

    Any additional information about the exception.

    ", + "Suite$message": "

    A message about the suite's result.

    ", + "Test$message": "

    A message about the test's result.

    ", + "UniqueProblem$message": "

    A message about the unique problems' result.

    ", + "UpdateDevicePoolRequest$description": "

    A description of the device pool you wish to update.

    ", + "Upload$message": "

    A message about the upload's result.

    " + } + }, + "Metadata": { + "base": null, + "refs": { + "Upload$metadata": "

    The upload's metadata. For example, for Android, this contains information that is parsed from the manifest and is displayed in the AWS Device Farm console after the associated app is uploaded.

    " + } + }, + "Name": { + "base": null, + "refs": { + "Artifact$name": "

    The artifact's name.

    ", + "CreateDevicePoolRequest$name": "

    The device pool's name.

    ", + "CreateProjectRequest$name": "

    The project's name.

    ", + "CreateUploadRequest$name": "

    The upload's file name.

    ", + "Device$name": "

    The device's display name.

    ", + "DevicePool$name": "

    The device pool's name.

    ", + "Job$name": "

    The job's name.

    ", + "ProblemDetail$name": "

    The problem detail's name.

    ", + "Project$name": "

    The project's name.

    ", + "Run$name": "

    The run's name.

    ", + "ScheduleRunRequest$name": "

    The name for the run to be scheduled.

    ", + "Suite$name": "

    The suite's name.

    ", + "Test$name": "

    The test's name.

    ", + "UpdateDevicePoolRequest$name": "

    A string representing the name of the device pool you wish to update.

    ", + "UpdateProjectRequest$name": "

    A string representing the new name of the project that you are updating.

    ", + "Upload$name": "

    The upload's file name.

    " + } + }, + "NotFoundException": { + "base": "

    The specified entity was not found.

    ", + "refs": { + } + }, + "PaginationToken": { + "base": null, + "refs": { + "ListArtifactsRequest$nextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListArtifactsResult$nextToken": "

    If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

    ", + "ListDevicePoolsRequest$nextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListDevicePoolsResult$nextToken": "

    If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

    ", + "ListDevicesRequest$nextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListDevicesResult$nextToken": "

    If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

    ", + "ListJobsRequest$nextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListJobsResult$nextToken": "

    If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

    ", + "ListProjectsRequest$nextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListProjectsResult$nextToken": "

    If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

    ", + "ListRunsRequest$nextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListRunsResult$nextToken": "

    If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

    ", + "ListSamplesRequest$nextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListSamplesResult$nextToken": "

    If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

    ", + "ListSuitesRequest$nextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListSuitesResult$nextToken": "

    If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

    ", + "ListTestsRequest$nextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListTestsResult$nextToken": "

    If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

    ", + "ListUniqueProblemsRequest$nextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListUniqueProblemsResult$nextToken": "

    If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

    ", + "ListUploadsRequest$nextToken": "

    An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

    ", + "ListUploadsResult$nextToken": "

    If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

    " + } + }, + "Problem": { + "base": "

    Represents a specific warning or failure.

    ", + "refs": { + "Problems$member": null + } + }, + "ProblemDetail": { + "base": "

    Information about a problem detail.

    ", + "refs": { + "Problem$run": "

    Information about the associated run.

    ", + "Problem$job": "

    Information about the associated job.

    ", + "Problem$suite": "

    Information about the associated suite.

    ", + "Problem$test": "

    Information about the associated test.

    " + } + }, + "Problems": { + "base": null, + "refs": { + "UniqueProblem$problems": "

    Information about the problems.

    " + } + }, + "Project": { + "base": "

    Represents an operating-system neutral workspace for running and managing tests.

    ", + "refs": { + "CreateProjectResult$project": "

    The newly created project.

    ", + "GetProjectResult$project": null, + "Projects$member": null, + "UpdateProjectResult$project": null + } + }, + "Projects": { + "base": null, + "refs": { + "ListProjectsResult$projects": "

    Information about the projects.

    " + } + }, + "PurchasedDevicesMap": { + "base": null, + "refs": { + "AccountSettings$unmeteredDevices": "

    Returns the unmetered devices you have purchased.

    " + } + }, + "Radios": { + "base": "

    Represents the set of radios and their states on a device. Examples of radios include Wi-Fi, GPS, Bluetooth, and NFC.

    ", + "refs": { + "ScheduleRunConfiguration$radios": "

    Information about the radio states for the run.

    " + } + }, + "Resolution": { + "base": "

    Represents the screen resolution of a device in height and width, expressed in pixels.

    ", + "refs": { + "Device$resolution": null + } + }, + "Rule": { + "base": "

    Represents a condition for a device pool.

    ", + "refs": { + "Rules$member": null + } + }, + "RuleOperator": { + "base": null, + "refs": { + "Rule$operator": "

    The rule's operator.

    • EQUALS: The equals operator.

    • GREATER_THAN: The greater-than operator.

    • IN: The in operator.

    • LESS_THAN: The less-than operator.

    • NOT_IN: The not-in operator.

    " + } + }, + "Rules": { + "base": null, + "refs": { + "CreateDevicePoolRequest$rules": "

    The device pool's rules.

    ", + "DevicePool$rules": "

    Information about the device pool's rules.

    ", + "UpdateDevicePoolRequest$rules": "

    Represents the rules you wish to modify for the device pool. Updating rules is optional; however, if you choose to update rules for your request, the update will replace the existing rules.

    " + } + }, + "Run": { + "base": "

    Represents an app on a set of devices with a specific test and configuration.

    ", + "refs": { + "GetRunResult$run": null, + "Runs$member": null, + "ScheduleRunResult$run": "

    Information about the scheduled run.

    " + } + }, + "Runs": { + "base": null, + "refs": { + "ListRunsResult$runs": "

    Information about the runs.

    " + } + }, + "Sample": { + "base": "

    Represents a sample of performance data.

    ", + "refs": { + "Samples$member": null + } + }, + "SampleType": { + "base": null, + "refs": { + "Sample$type": "

    The sample's type.

    Must be one of the following values:

    • CPU: A CPU sample type. This is expressed as the app processing CPU time (including child processes) as reported by process, as a percentage.

    • MEMORY: A memory usage sample type. This is expressed as the total proportional set size of an app process, in kilobytes.

    • NATIVE_AVG_DRAWTIME

    • NATIVE_FPS

    • NATIVE_FRAMES

    • NATIVE_MAX_DRAWTIME

    • NATIVE_MIN_DRAWTIME

    • OPENGL_AVG_DRAWTIME

    • OPENGL_FPS

    • OPENGL_FRAMES

    • OPENGL_MAX_DRAWTIME

    • OPENGL_MIN_DRAWTIME

    • RX

    • RX_RATE: The total number of bytes per second (TCP and UDP) that are sent, by app process.

    • THREADS: A threads sample type. This is expressed as the total number of threads per app process.

    • TX

    • TX_RATE: The total number of bytes per second (TCP and UDP) that are received, by app process.

    " + } + }, + "Samples": { + "base": null, + "refs": { + "ListSamplesResult$samples": "

    Information about the samples.

    " + } + }, + "ScheduleRunConfiguration": { + "base": "

    Represents the settings for a run. Includes things like location, radio states, auxiliary apps, and network profiles.

    ", + "refs": { + "ScheduleRunRequest$configuration": "

    Information about the settings for the run to be scheduled.

    " + } + }, + "ScheduleRunRequest": { + "base": "

    Represents a request to the schedule run operation.

    ", + "refs": { + } + }, + "ScheduleRunResult": { + "base": "

    Represents the result of a schedule run request.

    ", + "refs": { + } + }, + "ScheduleRunTest": { + "base": "

    Represents additional test settings.

    ", + "refs": { + "ScheduleRunRequest$test": "

    Information about the test for the run to be scheduled.

    " + } + }, + "ServiceAccountException": { + "base": "

    There was a problem with the service account.

    ", + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "Artifact$extension": "

    The artifact's file extension.

    ", + "CPU$frequency": "

    The CPU's frequency.

    ", + "CPU$architecture": "

    The CPU's architecture, for example x86 or ARM.

    ", + "Device$manufacturer": "

    The device's manufacturer name.

    ", + "Device$model": "

    The device's model name.

    ", + "Device$os": "

    The device's operating system type.

    ", + "Device$image": "

    The device's image name.

    ", + "Device$carrier": "

    The device's carrier.

    ", + "Device$radio": "

    The device's radio.

    ", + "Rule$value": "

    The rule's value.

    ", + "ScheduleRunConfiguration$locale": "

    Information about the locale that is used for the run.

    ", + "TestParameters$key": null, + "TestParameters$value": null + } + }, + "Suite": { + "base": "

    Represents a collection of one or more tests.

    ", + "refs": { + "GetSuiteResult$suite": null, + "Suites$member": null + } + }, + "Suites": { + "base": null, + "refs": { + "ListSuitesResult$suites": "

    Information about the suites.

    " + } + }, + "Test": { + "base": "

    Represents a condition that is evaluated.

    ", + "refs": { + "GetTestResult$test": null, + "Tests$member": null + } + }, + "TestParameters": { + "base": null, + "refs": { + "ScheduleRunTest$parameters": "

    The test's parameters, such as test framework parameters and fixture settings.

    " + } + }, + "TestType": { + "base": null, + "refs": { + "GetDevicePoolCompatibilityRequest$testType": "

    The test type for the specified device pool.

    Allowed values include the following:

    • BUILTIN_FUZZ: The built-in fuzz type.

    • BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

    • APPIUM_JAVA_JUNIT: The Appium Java JUnit type.

    • APPIUM_JAVA_TESTNG: The Appium Java TestNG type.

    • APPIUM_PYTHON: The Appium Python type.

    • CALABASH: The Calabash type.

    • INSTRUMENTATION: The Instrumentation type.

    • UIAUTOMATION: The uiautomation type.

    • UIAUTOMATOR: The uiautomator type.

    • XCTEST: The XCode test type.

    • APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps.

    • APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps.

    • APPIUM_WEB_PYTHON: The Appium Python type for Web apps.

    ", + "Job$type": "

    The job's type.

    Allowed values include the following:

    • BUILTIN_FUZZ: The built-in fuzz type.

    • BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

    • APPIUM_JAVA_JUNIT: The Appium Java JUnit type.

    • APPIUM_JAVA_TESTNG: The Appium Java TestNG type.

    • APPIUM_PYTHON: The Appium Python type.

    • CALABASH: The Calabash type.

    • INSTRUMENTATION: The Instrumentation type.

    • UIAUTOMATION: The uiautomation type.

    • UIAUTOMATOR: The uiautomator type.

    • XCTEST: The XCode test type.

    • APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps.

    • APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps.

    • APPIUM_WEB_PYTHON: The Appium Python type for Web apps.

    ", + "Run$type": "

    The run's type.

    Must be one of the following values:

    • BUILTIN_FUZZ: The built-in fuzz type.

    • BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

    • APPIUM_JAVA_JUNIT: The Appium Java JUnit type.

    • APPIUM_JAVA_TESTNG: The Appium Java TestNG type.

    • APPIUM_PYTHON: The Appium Python type.

    • CALABASH: The Calabash type.

    • INSTRUMENTATION: The Instrumentation type.

    • UIAUTOMATION: The uiautomation type.

    • UIAUTOMATOR: The uiautomator type.

    • XCTEST: The XCode test type.

    • APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps.

    • APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps.

    • APPIUM_WEB_PYTHON: The Appium Python type for Web apps.

    ", + "ScheduleRunTest$type": "

    The test's type.

    Must be one of the following values:

    • BUILTIN_FUZZ: The built-in fuzz type.

    • BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

    • APPIUM_JAVA_JUNIT: The Appium Java JUnit type.

    • APPIUM_JAVA_TESTNG: The Appium Java TestNG type.

    • APPIUM_PYTHON: The Appium Python type.

    • CALABASH: The Calabash type.

    • INSTRUMENTATION: The Instrumentation type.

    • UIAUTOMATION: The uiautomation type.

    • UIAUTOMATOR: The uiautomator type.

    • XCTEST: The XCode test type.

    • APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps.

    • APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps.

    • APPIUM_WEB_PYTHON: The Appium Python type for Web apps.

    ", + "Suite$type": "

    The suite's type.

    Must be one of the following values:

    • BUILTIN_FUZZ: The built-in fuzz type.

    • BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

    • APPIUM_JAVA_JUNIT: The Appium Java JUnit type.

    • APPIUM_JAVA_TESTNG: The Appium Java TestNG type.

    • APPIUM_PYTHON: The Appium Python type.

    • CALABASH: The Calabash type.

    • INSTRUMENTATION: The Instrumentation type.

    • UIAUTOMATION: The uiautomation type.

    • UIAUTOMATOR: The uiautomator type.

    • XCTEST: The XCode test type.

    • APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps.

    • APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps.

    • APPIUM_WEB_PYTHON: The Appium Python type for Web apps.

    ", + "Test$type": "

    The test's type.

    Must be one of the following values:

    • BUILTIN_FUZZ: The built-in fuzz type.

    • BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.

    • APPIUM_JAVA_JUNIT: The Appium Java JUnit type.

    • APPIUM_JAVA_TESTNG: The Appium Java TestNG type.

    • APPIUM_PYTHON: The Appium Python type.

    • CALABASH: The Calabash type.

    • INSTRUMENTATION: The Instrumentation type.

    • UIAUTOMATION: The uiautomation type.

    • UIAUTOMATOR: The uiautomator type.

    • XCTEST: The XCode test type.

    • APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps.

    • APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps.

    • APPIUM_WEB_PYTHON: The Appium Python type for Web apps.

    " + } + }, + "Tests": { + "base": null, + "refs": { + "ListTestsResult$tests": "

    Information about the tests.

    " + } + }, + "URL": { + "base": null, + "refs": { + "Artifact$url": "

    The pre-signed Amazon S3 URL that can be used with a corresponding GET request to download the artifact's file.

    ", + "Sample$url": "

    The pre-signed Amazon S3 URL that can be used with a corresponding GET request to download the sample's file.

    ", + "Upload$url": "

    The pre-signed Amazon S3 URL that was used to store a file through a corresponding PUT request.

    " + } + }, + "UniqueProblem": { + "base": "

    A collection of one or more problems, grouped by their result.

    ", + "refs": { + "UniqueProblems$member": null + } + }, + "UniqueProblems": { + "base": null, + "refs": { + "UniqueProblemsByExecutionResultMap$value": null + } + }, + "UniqueProblemsByExecutionResultMap": { + "base": null, + "refs": { + "ListUniqueProblemsResult$uniqueProblems": "

    Information about the unique problems.

    Allowed values include:

    • ERRORED: An error condition.

    • FAILED: A failed condition.

    • SKIPPED: A skipped condition.

    • STOPPED: A stopped condition.

    • PASSED: A passing condition.

    • PENDING: A pending condition.

    • WARNED: A warning condition.

    " + } + }, + "UpdateDevicePoolRequest": { + "base": "

    Represents a request to the update device pool operation.

    ", + "refs": { + } + }, + "UpdateDevicePoolResult": { + "base": "

    Represents the result of an update device pool request.

    ", + "refs": { + } + }, + "UpdateProjectRequest": { + "base": "

    Represents a request to the update project operation.

    ", + "refs": { + } + }, + "UpdateProjectResult": { + "base": "

    Represents the result of an update project request.

    ", + "refs": { + } + }, + "Upload": { + "base": "

    An app or a set of one or more tests to upload or that have been uploaded.

    ", + "refs": { + "CreateUploadResult$upload": "

    The newly created upload.

    ", + "GetUploadResult$upload": null, + "Uploads$member": null + } + }, + "UploadStatus": { + "base": null, + "refs": { + "Upload$status": "

    The upload's status.

    Must be one of the following values:

    • FAILED: A failed status.

    • INITIALIZED: An initialized status.

    • PROCESSING: A processing status.

    • SUCCEEDED: A succeeded status.

    " + } + }, + "UploadType": { + "base": null, + "refs": { + "CreateUploadRequest$type": "

    The upload's upload type.

    Must be one of the following values:

    • ANDROID_APP: An Android upload.

    • IOS_APP: An iOS upload.

    • EXTERNAL_DATA: An external data upload.

    • APPIUM_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.

    • APPIUM_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.

    • APPIUM_PYTHON_TEST_PACKAGE: An Appium Python test package upload.

    • CALABASH_TEST_PACKAGE: A Calabash test package upload.

    • INSTRUMENTATION_TEST_PACKAGE: An instrumentation upload.

    • UIAUTOMATOR_TEST_PACKAGE: A uiautomator test package upload.

    • XCTEST_TEST_PACKAGE: An XCode test package upload.

    • APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.

    • APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.

    • APPIUM_WEB_PYTHON_TEST_PACKAGE: An Appium Python test package upload.

    Note If you call CreateUpload with WEB_APP specified, AWS Device Farm throws an ArgumentException error.

    ", + "Upload$type": "

    The upload's type.

    Must be one of the following values:

    • ANDROID_APP: An Android upload.

    • IOS_APP: An iOS upload.

    • EXTERNAL_DATA: An external data upload.

    • APPIUM_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.

    • APPIUM_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.

    • APPIUM_PYTHON_TEST_PACKAGE: An Appium Python test package upload.

    • CALABASH_TEST_PACKAGE: A Calabash test package upload.

    • INSTRUMENTATION_TEST_PACKAGE: An instrumentation upload.

    • UIAUTOMATOR_TEST_PACKAGE: A uiautomator test package upload.

    • XCTEST_TEST_PACKAGE: An XCode test package upload.

    • APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.

    • APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.

    • APPIUM_WEB_PYTHON_TEST_PACKAGE: An Appium Python test package upload.

    " + } + }, + "Uploads": { + "base": null, + "refs": { + "ListUploadsResult$uploads": "

    Information about the uploads.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/devicefarm/2015-06-23/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,59 @@ +{ + "pagination": { + "ListArtifacts": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "artifacts" + }, + "ListDevicePools": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "devicePools" + }, + "ListDevices": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "devices" + }, + "ListJobs": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "jobs" + }, + "ListProjects": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "projects" + }, + "ListRuns": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "runs" + }, + "ListSamples": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "samples" + }, + "ListSuites": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "suites" + }, + "ListTests": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "tests" + }, + "ListUniqueProblems": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "uniqueProblems" + }, + "ListUploads": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "uploads" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,720 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2012-10-25", + "endpointPrefix":"directconnect", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"AWS Direct Connect", + "signatureVersion":"v4", + "targetPrefix":"OvertureService" + }, + "operations":{ + "AllocateConnectionOnInterconnect":{ + "name":"AllocateConnectionOnInterconnect", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AllocateConnectionOnInterconnectRequest"}, + "output":{"shape":"Connection"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "AllocatePrivateVirtualInterface":{ + "name":"AllocatePrivateVirtualInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AllocatePrivateVirtualInterfaceRequest"}, + "output":{"shape":"VirtualInterface"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "AllocatePublicVirtualInterface":{ + "name":"AllocatePublicVirtualInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AllocatePublicVirtualInterfaceRequest"}, + "output":{"shape":"VirtualInterface"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "ConfirmConnection":{ + "name":"ConfirmConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ConfirmConnectionRequest"}, + "output":{"shape":"ConfirmConnectionResponse"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "ConfirmPrivateVirtualInterface":{ + "name":"ConfirmPrivateVirtualInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ConfirmPrivateVirtualInterfaceRequest"}, + "output":{"shape":"ConfirmPrivateVirtualInterfaceResponse"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "ConfirmPublicVirtualInterface":{ + "name":"ConfirmPublicVirtualInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ConfirmPublicVirtualInterfaceRequest"}, + "output":{"shape":"ConfirmPublicVirtualInterfaceResponse"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "CreateConnection":{ + "name":"CreateConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateConnectionRequest"}, + "output":{"shape":"Connection"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "CreateInterconnect":{ + "name":"CreateInterconnect", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateInterconnectRequest"}, + "output":{"shape":"Interconnect"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "CreatePrivateVirtualInterface":{ + "name":"CreatePrivateVirtualInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePrivateVirtualInterfaceRequest"}, + "output":{"shape":"VirtualInterface"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "CreatePublicVirtualInterface":{ + "name":"CreatePublicVirtualInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePublicVirtualInterfaceRequest"}, + "output":{"shape":"VirtualInterface"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "DeleteConnection":{ + "name":"DeleteConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteConnectionRequest"}, + "output":{"shape":"Connection"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "DeleteInterconnect":{ + "name":"DeleteInterconnect", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteInterconnectRequest"}, + "output":{"shape":"DeleteInterconnectResponse"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "DeleteVirtualInterface":{ + "name":"DeleteVirtualInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVirtualInterfaceRequest"}, + "output":{"shape":"DeleteVirtualInterfaceResponse"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "DescribeConnections":{ + "name":"DescribeConnections", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConnectionsRequest"}, + "output":{"shape":"Connections"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "DescribeConnectionsOnInterconnect":{ + "name":"DescribeConnectionsOnInterconnect", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConnectionsOnInterconnectRequest"}, + "output":{"shape":"Connections"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "DescribeInterconnects":{ + "name":"DescribeInterconnects", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInterconnectsRequest"}, + "output":{"shape":"Interconnects"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "DescribeLocations":{ + "name":"DescribeLocations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{"shape":"Locations"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "DescribeVirtualGateways":{ + "name":"DescribeVirtualGateways", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{"shape":"VirtualGateways"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + }, + "DescribeVirtualInterfaces":{ + "name":"DescribeVirtualInterfaces", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVirtualInterfacesRequest"}, + "output":{"shape":"VirtualInterfaces"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ] + } + }, + "shapes":{ + "ASN":{"type":"integer"}, + "AllocateConnectionOnInterconnectRequest":{ + "type":"structure", + "required":[ + "bandwidth", + "connectionName", + "ownerAccount", + "interconnectId", + "vlan" + ], + "members":{ + "bandwidth":{"shape":"Bandwidth"}, + "connectionName":{"shape":"ConnectionName"}, + "ownerAccount":{"shape":"OwnerAccount"}, + "interconnectId":{"shape":"InterconnectId"}, + "vlan":{"shape":"VLAN"} + } + }, + "AllocatePrivateVirtualInterfaceRequest":{ + "type":"structure", + "required":[ + "connectionId", + "ownerAccount", + "newPrivateVirtualInterfaceAllocation" + ], + "members":{ + "connectionId":{"shape":"ConnectionId"}, + "ownerAccount":{"shape":"OwnerAccount"}, + "newPrivateVirtualInterfaceAllocation":{"shape":"NewPrivateVirtualInterfaceAllocation"} + } + }, + "AllocatePublicVirtualInterfaceRequest":{ + "type":"structure", + "required":[ + "connectionId", + "ownerAccount", + "newPublicVirtualInterfaceAllocation" + ], + "members":{ + "connectionId":{"shape":"ConnectionId"}, + "ownerAccount":{"shape":"OwnerAccount"}, + "newPublicVirtualInterfaceAllocation":{"shape":"NewPublicVirtualInterfaceAllocation"} + } + }, + "AmazonAddress":{"type":"string"}, + "BGPAuthKey":{"type":"string"}, + "Bandwidth":{"type":"string"}, + "CIDR":{"type":"string"}, + "ConfirmConnectionRequest":{ + "type":"structure", + "required":["connectionId"], + "members":{ + "connectionId":{"shape":"ConnectionId"} + } + }, + "ConfirmConnectionResponse":{ + "type":"structure", + "members":{ + "connectionState":{"shape":"ConnectionState"} + } + }, + "ConfirmPrivateVirtualInterfaceRequest":{ + "type":"structure", + "required":[ + "virtualInterfaceId", + "virtualGatewayId" + ], + "members":{ + "virtualInterfaceId":{"shape":"VirtualInterfaceId"}, + "virtualGatewayId":{"shape":"VirtualGatewayId"} + } + }, + "ConfirmPrivateVirtualInterfaceResponse":{ + "type":"structure", + "members":{ + "virtualInterfaceState":{"shape":"VirtualInterfaceState"} + } + }, + "ConfirmPublicVirtualInterfaceRequest":{ + "type":"structure", + "required":["virtualInterfaceId"], + "members":{ + "virtualInterfaceId":{"shape":"VirtualInterfaceId"} + } + }, + "ConfirmPublicVirtualInterfaceResponse":{ + "type":"structure", + "members":{ + "virtualInterfaceState":{"shape":"VirtualInterfaceState"} + } + }, + "Connection":{ + "type":"structure", + "members":{ + "ownerAccount":{"shape":"OwnerAccount"}, + "connectionId":{"shape":"ConnectionId"}, + "connectionName":{"shape":"ConnectionName"}, + "connectionState":{"shape":"ConnectionState"}, + "region":{"shape":"Region"}, + "location":{"shape":"LocationCode"}, + "bandwidth":{"shape":"Bandwidth"}, + "vlan":{"shape":"VLAN"}, + "partnerName":{"shape":"PartnerName"} + } + }, + "ConnectionId":{"type":"string"}, + "ConnectionList":{ + "type":"list", + "member":{"shape":"Connection"} + }, + "ConnectionName":{"type":"string"}, + "ConnectionState":{ + "type":"string", + "enum":[ + "ordering", + "requested", + "pending", + "available", + "down", + "deleting", + "deleted", + "rejected" + ] + }, + "Connections":{ + "type":"structure", + "members":{ + "connections":{"shape":"ConnectionList"} + } + }, + "CreateConnectionRequest":{ + "type":"structure", + "required":[ + "location", + "bandwidth", + "connectionName" + ], + "members":{ + "location":{"shape":"LocationCode"}, + "bandwidth":{"shape":"Bandwidth"}, + "connectionName":{"shape":"ConnectionName"} + } + }, + "CreateInterconnectRequest":{ + "type":"structure", + "required":[ + "interconnectName", + "bandwidth", + "location" + ], + "members":{ + "interconnectName":{"shape":"InterconnectName"}, + "bandwidth":{"shape":"Bandwidth"}, + "location":{"shape":"LocationCode"} + } + }, + "CreatePrivateVirtualInterfaceRequest":{ + "type":"structure", + "required":[ + "connectionId", + "newPrivateVirtualInterface" + ], + "members":{ + "connectionId":{"shape":"ConnectionId"}, + "newPrivateVirtualInterface":{"shape":"NewPrivateVirtualInterface"} + } + }, + "CreatePublicVirtualInterfaceRequest":{ + "type":"structure", + "required":[ + "connectionId", + "newPublicVirtualInterface" + ], + "members":{ + "connectionId":{"shape":"ConnectionId"}, + "newPublicVirtualInterface":{"shape":"NewPublicVirtualInterface"} + } + }, + "CustomerAddress":{"type":"string"}, + "DeleteConnectionRequest":{ + "type":"structure", + "required":["connectionId"], + "members":{ + "connectionId":{"shape":"ConnectionId"} + } + }, + "DeleteInterconnectRequest":{ + "type":"structure", + "required":["interconnectId"], + "members":{ + "interconnectId":{"shape":"InterconnectId"} + } + }, + "DeleteInterconnectResponse":{ + "type":"structure", + "members":{ + "interconnectState":{"shape":"InterconnectState"} + } + }, + "DeleteVirtualInterfaceRequest":{ + "type":"structure", + "required":["virtualInterfaceId"], + "members":{ + "virtualInterfaceId":{"shape":"VirtualInterfaceId"} + } + }, + "DeleteVirtualInterfaceResponse":{ + "type":"structure", + "members":{ + "virtualInterfaceState":{"shape":"VirtualInterfaceState"} + } + }, + "DescribeConnectionsOnInterconnectRequest":{ + "type":"structure", + "required":["interconnectId"], + "members":{ + "interconnectId":{"shape":"InterconnectId"} + } + }, + "DescribeConnectionsRequest":{ + "type":"structure", + "members":{ + "connectionId":{"shape":"ConnectionId"} + } + }, + "DescribeInterconnectsRequest":{ + "type":"structure", + "members":{ + "interconnectId":{"shape":"InterconnectId"} + } + }, + "DescribeVirtualInterfacesRequest":{ + "type":"structure", + "members":{ + "connectionId":{"shape":"ConnectionId"}, + "virtualInterfaceId":{"shape":"VirtualInterfaceId"} + } + }, + "DirectConnectClientException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "DirectConnectServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ErrorMessage":{"type":"string"}, + "Interconnect":{ + "type":"structure", + "members":{ + "interconnectId":{"shape":"InterconnectId"}, + "interconnectName":{"shape":"InterconnectName"}, + "interconnectState":{"shape":"InterconnectState"}, + "region":{"shape":"Region"}, + "location":{"shape":"LocationCode"}, + "bandwidth":{"shape":"Bandwidth"} + } + }, + "InterconnectId":{"type":"string"}, + "InterconnectList":{ + "type":"list", + "member":{"shape":"Interconnect"} + }, + "InterconnectName":{"type":"string"}, + "InterconnectState":{ + "type":"string", + "enum":[ + "requested", + "pending", + "available", + "down", + "deleting", + "deleted" + ] + }, + "Interconnects":{ + "type":"structure", + "members":{ + "interconnects":{"shape":"InterconnectList"} + } + }, + "Location":{ + "type":"structure", + "members":{ + "locationCode":{"shape":"LocationCode"}, + "locationName":{"shape":"LocationName"} + } + }, + "LocationCode":{"type":"string"}, + "LocationList":{ + "type":"list", + "member":{"shape":"Location"} + }, + "LocationName":{"type":"string"}, + "Locations":{ + "type":"structure", + "members":{ + "locations":{"shape":"LocationList"} + } + }, + "NewPrivateVirtualInterface":{ + "type":"structure", + "required":[ + "virtualInterfaceName", + "vlan", + "asn", + "virtualGatewayId" + ], + "members":{ + "virtualInterfaceName":{"shape":"VirtualInterfaceName"}, + "vlan":{"shape":"VLAN"}, + "asn":{"shape":"ASN"}, + "authKey":{"shape":"BGPAuthKey"}, + "amazonAddress":{"shape":"AmazonAddress"}, + "customerAddress":{"shape":"CustomerAddress"}, + "virtualGatewayId":{"shape":"VirtualGatewayId"} + } + }, + "NewPrivateVirtualInterfaceAllocation":{ + "type":"structure", + "required":[ + "virtualInterfaceName", + "vlan", + "asn" + ], + "members":{ + "virtualInterfaceName":{"shape":"VirtualInterfaceName"}, + "vlan":{"shape":"VLAN"}, + "asn":{"shape":"ASN"}, + "authKey":{"shape":"BGPAuthKey"}, + "amazonAddress":{"shape":"AmazonAddress"}, + "customerAddress":{"shape":"CustomerAddress"} + } + }, + "NewPublicVirtualInterface":{ + "type":"structure", + "required":[ + "virtualInterfaceName", + "vlan", + "asn", + "amazonAddress", + "customerAddress", + "routeFilterPrefixes" + ], + "members":{ + "virtualInterfaceName":{"shape":"VirtualInterfaceName"}, + "vlan":{"shape":"VLAN"}, + "asn":{"shape":"ASN"}, + "authKey":{"shape":"BGPAuthKey"}, + "amazonAddress":{"shape":"AmazonAddress"}, + "customerAddress":{"shape":"CustomerAddress"}, + "routeFilterPrefixes":{"shape":"RouteFilterPrefixList"} + } + }, + "NewPublicVirtualInterfaceAllocation":{ + "type":"structure", + "required":[ + "virtualInterfaceName", + "vlan", + "asn", + "amazonAddress", + "customerAddress", + "routeFilterPrefixes" + ], + "members":{ + "virtualInterfaceName":{"shape":"VirtualInterfaceName"}, + "vlan":{"shape":"VLAN"}, + "asn":{"shape":"ASN"}, + "authKey":{"shape":"BGPAuthKey"}, + "amazonAddress":{"shape":"AmazonAddress"}, + "customerAddress":{"shape":"CustomerAddress"}, + "routeFilterPrefixes":{"shape":"RouteFilterPrefixList"} + } + }, + "OwnerAccount":{"type":"string"}, + "PartnerName":{"type":"string"}, + "Region":{"type":"string"}, + "RouteFilterPrefix":{ + "type":"structure", + "members":{ + "cidr":{"shape":"CIDR"} + } + }, + "RouteFilterPrefixList":{ + "type":"list", + "member":{"shape":"RouteFilterPrefix"} + }, + "RouterConfig":{"type":"string"}, + "VLAN":{"type":"integer"}, + "VirtualGateway":{ + "type":"structure", + "members":{ + "virtualGatewayId":{"shape":"VirtualGatewayId"}, + "virtualGatewayState":{"shape":"VirtualGatewayState"} + } + }, + "VirtualGatewayId":{"type":"string"}, + "VirtualGatewayList":{ + "type":"list", + "member":{"shape":"VirtualGateway"} + }, + "VirtualGatewayState":{"type":"string"}, + "VirtualGateways":{ + "type":"structure", + "members":{ + "virtualGateways":{"shape":"VirtualGatewayList"} + } + }, + "VirtualInterface":{ + "type":"structure", + "members":{ + "ownerAccount":{"shape":"OwnerAccount"}, + "virtualInterfaceId":{"shape":"VirtualInterfaceId"}, + "location":{"shape":"LocationCode"}, + "connectionId":{"shape":"ConnectionId"}, + "virtualInterfaceType":{"shape":"VirtualInterfaceType"}, + "virtualInterfaceName":{"shape":"VirtualInterfaceName"}, + "vlan":{"shape":"VLAN"}, + "asn":{"shape":"ASN"}, + "authKey":{"shape":"BGPAuthKey"}, + "amazonAddress":{"shape":"AmazonAddress"}, + "customerAddress":{"shape":"CustomerAddress"}, + "virtualInterfaceState":{"shape":"VirtualInterfaceState"}, + "customerRouterConfig":{"shape":"RouterConfig"}, + "virtualGatewayId":{"shape":"VirtualGatewayId"}, + "routeFilterPrefixes":{"shape":"RouteFilterPrefixList"} + } + }, + "VirtualInterfaceId":{"type":"string"}, + "VirtualInterfaceList":{ + "type":"list", + "member":{"shape":"VirtualInterface"} + }, + "VirtualInterfaceName":{"type":"string"}, + "VirtualInterfaceState":{ + "type":"string", + "enum":[ + "confirming", + "verifying", + "pending", + "available", + "deleting", + "deleted", + "rejected" + ] + }, + "VirtualInterfaceType":{"type":"string"}, + "VirtualInterfaces":{ + "type":"structure", + "members":{ + "virtualInterfaces":{"shape":"VirtualInterfaceList"} + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,495 @@ +{ + "version": "2.0", + "service": "

    AWS Direct Connect makes it easy to establish a dedicated network connection from your premises to Amazon Web Services (AWS). Using AWS Direct Connect, you can establish private connectivity between AWS and your data center, office, or colocation environment, which in many cases can reduce your network costs, increase bandwidth throughput, and provide a more consistent network experience than Internet-based connections.

    The AWS Direct Connect API Reference provides descriptions, syntax, and usage examples for each of the actions and data types for AWS Direct Connect. Use the following links to get started using the AWS Direct Connect API Reference:

    • Actions: An alphabetical list of all AWS Direct Connect actions.
    • Data Types: An alphabetical list of all AWS Direct Connect data types.
    • Common Query Parameters: Parameters that all Query actions can use.
    • Common Errors: Client and server errors that all actions can return.
    ", + "operations": { + "AllocateConnectionOnInterconnect": "

    Creates a hosted connection on an interconnect.

    Allocates a VLAN number and a specified amount of bandwidth for use by a hosted connection on the given interconnect.

    ", + "AllocatePrivateVirtualInterface": "

    Provisions a private virtual interface to be owned by a different customer.

    The owner of a connection calls this function to provision a private virtual interface which will be owned by another AWS customer.

    Virtual interfaces created using this function must be confirmed by the virtual interface owner by calling ConfirmPrivateVirtualInterface. Until this step has been completed, the virtual interface will be in 'Confirming' state, and will not be available for handling traffic.

    ", + "AllocatePublicVirtualInterface": "

    Provisions a public virtual interface to be owned by a different customer.

    The owner of a connection calls this function to provision a public virtual interface which will be owned by another AWS customer.

    Virtual interfaces created using this function must be confirmed by the virtual interface owner by calling ConfirmPublicVirtualInterface. Until this step has been completed, the virtual interface will be in 'Confirming' state, and will not be available for handling traffic.

    ", + "ConfirmConnection": "

    Confirm the creation of a hosted connection on an interconnect.

    Upon creation, the hosted connection is initially in the 'Ordering' state, and will remain in this state until the owner calls ConfirmConnection to confirm creation of the hosted connection.

    ", + "ConfirmPrivateVirtualInterface": "

    Accept ownership of a private virtual interface created by another customer.

    After the virtual interface owner calls this function, the virtual interface will be created and attached to the given virtual private gateway, and will be available for handling traffic.

    ", + "ConfirmPublicVirtualInterface": "

    Accept ownership of a public virtual interface created by another customer.

    After the virtual interface owner calls this function, the specified virtual interface will be created and made available for handling traffic.

    ", + "CreateConnection": "

    Creates a new connection between the customer network and a specific AWS Direct Connect location.

    A connection links your internal network to an AWS Direct Connect location over a standard 1 gigabit or 10 gigabit Ethernet fiber-optic cable. One end of the cable is connected to your router, the other to an AWS Direct Connect router. An AWS Direct Connect location provides access to Amazon Web Services in the region it is associated with. You can establish connections with AWS Direct Connect locations in multiple regions, but a connection in one region does not provide connectivity to other regions.

    ", + "CreateInterconnect": "

    Creates a new interconnect between a AWS Direct Connect partner's network and a specific AWS Direct Connect location.

    An interconnect is a connection which is capable of hosting other connections. The AWS Direct Connect partner can use an interconnect to provide sub-1Gbps AWS Direct Connect service to tier 2 customers who do not have their own connections. Like a standard connection, an interconnect links the AWS Direct Connect partner's network to an AWS Direct Connect location over a standard 1 Gbps or 10 Gbps Ethernet fiber-optic cable. One end is connected to the partner's router, the other to an AWS Direct Connect router.

    For each end customer, the AWS Direct Connect partner provisions a connection on their interconnect by calling AllocateConnectionOnInterconnect. The end customer can then connect to AWS resources by creating a virtual interface on their connection, using the VLAN assigned to them by the AWS Direct Connect partner.

    ", + "CreatePrivateVirtualInterface": "

    Creates a new private virtual interface. A virtual interface is the VLAN that transports AWS Direct Connect traffic. A private virtual interface supports sending traffic to a single virtual private cloud (VPC).

    ", + "CreatePublicVirtualInterface": "

    Creates a new public virtual interface. A virtual interface is the VLAN that transports AWS Direct Connect traffic. A public virtual interface supports sending traffic to public services of AWS such as Amazon Simple Storage Service (Amazon S3).

    ", + "DeleteConnection": "

    Deletes the connection.

    Deleting a connection only stops the AWS Direct Connect port hour and data transfer charges. You need to cancel separately with the providers any services or charges for cross-connects or network circuits that connect you to the AWS Direct Connect location.

    ", + "DeleteInterconnect": "

    Deletes the specified interconnect.

    ", + "DeleteVirtualInterface": "

    Deletes a virtual interface.

    ", + "DescribeConnections": "

    Displays all connections in this region.

    If a connection ID is provided, the call returns only that particular connection.

    ", + "DescribeConnectionsOnInterconnect": "

    Return a list of connections that have been provisioned on the given interconnect.

    ", + "DescribeInterconnects": "

    Returns a list of interconnects owned by the AWS account.

    If an interconnect ID is provided, it will only return this particular interconnect.

    ", + "DescribeLocations": "

    Returns the list of AWS Direct Connect locations in the current AWS region. These are the locations that may be selected when calling CreateConnection or CreateInterconnect.

    ", + "DescribeVirtualGateways": "

    Returns a list of virtual private gateways owned by the AWS account.

    You can create one or more AWS Direct Connect private virtual interfaces linking to a virtual private gateway. A virtual private gateway can be managed via Amazon Virtual Private Cloud (VPC) console or the EC2 CreateVpnGateway action.

    ", + "DescribeVirtualInterfaces": "

    Displays all virtual interfaces for an AWS account. Virtual interfaces deleted fewer than 15 minutes before DescribeVirtualInterfaces is called are also returned. If a connection ID is included then only virtual interfaces associated with this connection will be returned. If a virtual interface ID is included then only a single virtual interface will be returned.

    A virtual interface (VLAN) transmits the traffic between the AWS Direct Connect location and the customer.

    If a connection ID is provided, only virtual interfaces provisioned on the specified connection will be returned. If a virtual interface ID is provided, only this particular virtual interface will be returned.

    " + }, + "shapes": { + "ASN": { + "base": "

    Autonomous system (AS) number for Border Gateway Protocol (BGP) configuration.

    Example: 65000

    ", + "refs": { + "NewPrivateVirtualInterface$asn": null, + "NewPrivateVirtualInterfaceAllocation$asn": null, + "NewPublicVirtualInterface$asn": null, + "NewPublicVirtualInterfaceAllocation$asn": null, + "VirtualInterface$asn": null + } + }, + "AllocateConnectionOnInterconnectRequest": { + "base": "

    Container for the parameters to the AllocateConnectionOnInterconnect operation.

    ", + "refs": { + } + }, + "AllocatePrivateVirtualInterfaceRequest": { + "base": "

    Container for the parameters to the AllocatePrivateVirtualInterface operation.

    ", + "refs": { + } + }, + "AllocatePublicVirtualInterfaceRequest": { + "base": "

    Container for the parameters to the AllocatePublicVirtualInterface operation.

    ", + "refs": { + } + }, + "AmazonAddress": { + "base": "

    IP address assigned to the Amazon interface.

    Example: 192.168.1.1/30

    ", + "refs": { + "NewPrivateVirtualInterface$amazonAddress": null, + "NewPrivateVirtualInterfaceAllocation$amazonAddress": null, + "NewPublicVirtualInterface$amazonAddress": null, + "NewPublicVirtualInterfaceAllocation$amazonAddress": null, + "VirtualInterface$amazonAddress": null + } + }, + "BGPAuthKey": { + "base": "

    Authentication key for BGP configuration.

    Example: asdf34example

    ", + "refs": { + "NewPrivateVirtualInterface$authKey": null, + "NewPrivateVirtualInterfaceAllocation$authKey": null, + "NewPublicVirtualInterface$authKey": null, + "NewPublicVirtualInterfaceAllocation$authKey": null, + "VirtualInterface$authKey": null + } + }, + "Bandwidth": { + "base": "

    Bandwidth of the connection.

    Example: 1Gbps

    Default: None

    ", + "refs": { + "AllocateConnectionOnInterconnectRequest$bandwidth": "

    Bandwidth of the connection.

    Example: \"500Mbps\"

    Default: None

    ", + "Connection$bandwidth": "

    Bandwidth of the connection.

    Example: 1Gbps (for regular connections), or 500Mbps (for hosted connections)

    Default: None

    ", + "CreateConnectionRequest$bandwidth": null, + "CreateInterconnectRequest$bandwidth": "

    The port bandwidth

    Example: 1Gbps

    Default: None

    Available values: 1Gbps,10Gbps

    ", + "Interconnect$bandwidth": null + } + }, + "CIDR": { + "base": null, + "refs": { + "RouteFilterPrefix$cidr": "

    CIDR notation for the advertised route. Multiple routes are separated by commas.

    Example: 10.10.10.0/24,10.10.11.0/24

    " + } + }, + "ConfirmConnectionRequest": { + "base": "

    Container for the parameters to the ConfirmConnection operation.

    ", + "refs": { + } + }, + "ConfirmConnectionResponse": { + "base": "

    The response received when ConfirmConnection is called.

    ", + "refs": { + } + }, + "ConfirmPrivateVirtualInterfaceRequest": { + "base": "

    Container for the parameters to the ConfirmPrivateVirtualInterface operation.

    ", + "refs": { + } + }, + "ConfirmPrivateVirtualInterfaceResponse": { + "base": "

    The response received when ConfirmPrivateVirtualInterface is called.

    ", + "refs": { + } + }, + "ConfirmPublicVirtualInterfaceRequest": { + "base": "

    Container for the parameters to the ConfirmPublicVirtualInterface operation.

    ", + "refs": { + } + }, + "ConfirmPublicVirtualInterfaceResponse": { + "base": "

    The response received when ConfirmPublicVirtualInterface is called.

    ", + "refs": { + } + }, + "Connection": { + "base": "

    A connection represents the physical network connection between the AWS Direct Connect location and the customer.

    ", + "refs": { + "ConnectionList$member": null + } + }, + "ConnectionId": { + "base": "

    ID of the connection.

    Example: dxcon-fg5678gh

    Default: None

    ", + "refs": { + "AllocatePrivateVirtualInterfaceRequest$connectionId": "

    The connection ID on which the private virtual interface is provisioned.

    Default: None

    ", + "AllocatePublicVirtualInterfaceRequest$connectionId": "

    The connection ID on which the public virtual interface is provisioned.

    Default: None

    ", + "ConfirmConnectionRequest$connectionId": null, + "Connection$connectionId": null, + "CreatePrivateVirtualInterfaceRequest$connectionId": null, + "CreatePublicVirtualInterfaceRequest$connectionId": null, + "DeleteConnectionRequest$connectionId": null, + "DescribeConnectionsRequest$connectionId": null, + "DescribeVirtualInterfacesRequest$connectionId": null, + "VirtualInterface$connectionId": null + } + }, + "ConnectionList": { + "base": "

    A list of connections.

    ", + "refs": { + "Connections$connections": "

    A list of connections.

    " + } + }, + "ConnectionName": { + "base": "

    The name of the connection.

    Example: \"My Connection to AWS\"

    Default: None

    ", + "refs": { + "AllocateConnectionOnInterconnectRequest$connectionName": "

    Name of the provisioned connection.

    Example: \"500M Connection to AWS\"

    Default: None

    ", + "Connection$connectionName": null, + "CreateConnectionRequest$connectionName": null + } + }, + "ConnectionState": { + "base": "State of the connection.
    • Ordering: The initial state of a hosted connection provisioned on an interconnect. The connection stays in the ordering state until the owner of the hosted connection confirms or declines the connection order.
    • Requested: The initial state of a standard connection. The connection stays in the requested state until the Letter of Authorization (LOA) is sent to the customer.
    • Pending: The connection has been approved, and is being initialized.
    • Available: The network link is up, and the connection is ready for use.
    • Down: The network link is down.
    • Deleting: The connection is in the process of being deleted.
    • Deleted: The connection has been deleted.
    • Rejected: A hosted connection in the 'Ordering' state will enter the 'Rejected' state if it is deleted by the end customer.
    ", + "refs": { + "ConfirmConnectionResponse$connectionState": null, + "Connection$connectionState": null + } + }, + "Connections": { + "base": "

    A structure containing a list of connections.

    ", + "refs": { + } + }, + "CreateConnectionRequest": { + "base": "

    Container for the parameters to the CreateConnection operation.

    ", + "refs": { + } + }, + "CreateInterconnectRequest": { + "base": "

    Container for the parameters to the CreateInterconnect operation.

    ", + "refs": { + } + }, + "CreatePrivateVirtualInterfaceRequest": { + "base": "

    Container for the parameters to the CreatePrivateVirtualInterface operation.

    ", + "refs": { + } + }, + "CreatePublicVirtualInterfaceRequest": { + "base": "

    Container for the parameters to the CreatePublicVirtualInterface operation.

    ", + "refs": { + } + }, + "CustomerAddress": { + "base": "

    IP address assigned to the customer interface.

    Example: 192.168.1.2/30

    ", + "refs": { + "NewPrivateVirtualInterface$customerAddress": null, + "NewPrivateVirtualInterfaceAllocation$customerAddress": null, + "NewPublicVirtualInterface$customerAddress": null, + "NewPublicVirtualInterfaceAllocation$customerAddress": null, + "VirtualInterface$customerAddress": null + } + }, + "DeleteConnectionRequest": { + "base": "

    Container for the parameters to the DeleteConnection operation.

    ", + "refs": { + } + }, + "DeleteInterconnectRequest": { + "base": "

    Container for the parameters to the DeleteInterconnect operation.

    ", + "refs": { + } + }, + "DeleteInterconnectResponse": { + "base": "

    The response received when DeleteInterconnect is called.

    ", + "refs": { + } + }, + "DeleteVirtualInterfaceRequest": { + "base": "

    Container for the parameters to the DeleteVirtualInterface operation.

    ", + "refs": { + } + }, + "DeleteVirtualInterfaceResponse": { + "base": "

    The response received when DeleteVirtualInterface is called.

    ", + "refs": { + } + }, + "DescribeConnectionsOnInterconnectRequest": { + "base": "

    Container for the parameters to the DescribeConnectionsOnInterconnect operation.

    ", + "refs": { + } + }, + "DescribeConnectionsRequest": { + "base": "

    Container for the parameters to the DescribeConnections operation.

    ", + "refs": { + } + }, + "DescribeInterconnectsRequest": { + "base": "

    Container for the parameters to the DescribeInterconnects operation.

    ", + "refs": { + } + }, + "DescribeVirtualInterfacesRequest": { + "base": "

    Container for the parameters to the DescribeVirtualInterfaces operation.

    ", + "refs": { + } + }, + "DirectConnectClientException": { + "base": "

    The API was called with invalid parameters. The error message will contain additional details about the cause.

    ", + "refs": { + } + }, + "DirectConnectServerException": { + "base": "

    A server-side error occurred during the API call. The error message will contain additional details about the cause.

    ", + "refs": { + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "DirectConnectClientException$message": "

    This is an exception thrown when there is an issue with the input provided by the API call. For example, the name provided for a connection contains a pound sign (#). This can also occur when a valid value is provided, but is otherwise constrained. For example, the valid VLAN tag range is 1-4096 but each can only be used once per connection.

    ", + "DirectConnectServerException$message": "

    This is an exception thrown when there is a backend issue on the server side.

    " + } + }, + "Interconnect": { + "base": "

    An interconnect is a connection that can host other connections.

    Like a standard AWS Direct Connect connection, an interconnect represents the physical connection between an AWS Direct Connect partner's network and a specific Direct Connect location. An AWS Direct Connect partner who owns an interconnect can provision hosted connections on the interconnect for their end customers, thereby providing the end customers with connectivity to AWS services.

    The resources of the interconnect, including bandwidth and VLAN numbers, are shared by all of the hosted connections on the interconnect, and the owner of the interconnect determines how these resources are assigned.

    ", + "refs": { + "InterconnectList$member": null + } + }, + "InterconnectId": { + "base": "

    The ID of the interconnect.

    Example: dxcon-abc123

    ", + "refs": { + "AllocateConnectionOnInterconnectRequest$interconnectId": "

    ID of the interconnect on which the connection will be provisioned.

    Example: dxcon-456abc78

    Default: None

    ", + "DeleteInterconnectRequest$interconnectId": null, + "DescribeConnectionsOnInterconnectRequest$interconnectId": "

    ID of the interconnect on which a list of connection is provisioned.

    Example: dxcon-abc123

    Default: None

    ", + "DescribeInterconnectsRequest$interconnectId": null, + "Interconnect$interconnectId": null + } + }, + "InterconnectList": { + "base": "

    A list of interconnects.

    ", + "refs": { + "Interconnects$interconnects": "

    A list of interconnects.

    " + } + }, + "InterconnectName": { + "base": "

    The name of the interconnect.

    Example: \"1G Interconnect to AWS\"

    ", + "refs": { + "CreateInterconnectRequest$interconnectName": "

    The name of the interconnect.

    Example: \"1G Interconnect to AWS\"

    Default: None

    ", + "Interconnect$interconnectName": null + } + }, + "InterconnectState": { + "base": "State of the interconnect.
    • Requested: The initial state of an interconnect. The interconnect stays in the requested state until the Letter of Authorization (LOA) is sent to the customer.
    • Pending: The interconnect has been approved, and is being initialized.
    • Available: The network link is up, and the interconnect is ready for use.
    • Down: The network link is down.
    • Deleting: The interconnect is in the process of being deleted.
    • Deleted: The interconnect has been deleted.
    ", + "refs": { + "DeleteInterconnectResponse$interconnectState": null, + "Interconnect$interconnectState": null + } + }, + "Interconnects": { + "base": "

    A structure containing a list of interconnects.

    ", + "refs": { + } + }, + "Location": { + "base": "

    An AWS Direct Connect location where connections and interconnects can be requested.

    ", + "refs": { + "LocationList$member": null + } + }, + "LocationCode": { + "base": "

    Where the connection is located.

    Example: EqSV5

    Default: None

    ", + "refs": { + "Connection$location": null, + "CreateConnectionRequest$location": null, + "CreateInterconnectRequest$location": "

    Where the interconnect is located

    Example: EqSV5

    Default: None

    ", + "Interconnect$location": null, + "Location$locationCode": "

    The code used to indicate the AWS Direct Connect location.

    ", + "VirtualInterface$location": null + } + }, + "LocationList": { + "base": null, + "refs": { + "Locations$locations": "

    A list of colocation hubs where network providers have equipment. Most regions have multiple locations available.

    " + } + }, + "LocationName": { + "base": null, + "refs": { + "Location$locationName": "

    The name of the AWS Direct Connect location. The name includes the colocation partner name and the physical site of the lit building.

    " + } + }, + "Locations": { + "base": "

    A location is a network facility where AWS Direct Connect routers are available to be connected. Generally, these are colocation hubs where many network providers have equipment, and where cross connects can be delivered. Locations include a name and facility code, and must be provided when creating a connection.

    ", + "refs": { + } + }, + "NewPrivateVirtualInterface": { + "base": "

    A structure containing information about a new private virtual interface.

    ", + "refs": { + "CreatePrivateVirtualInterfaceRequest$newPrivateVirtualInterface": "

    Detailed information for the private virtual interface to be created.

    Default: None

    " + } + }, + "NewPrivateVirtualInterfaceAllocation": { + "base": "

    A structure containing information about a private virtual interface that will be provisioned on a connection.

    ", + "refs": { + "AllocatePrivateVirtualInterfaceRequest$newPrivateVirtualInterfaceAllocation": "

    Detailed information for the private virtual interface to be provisioned.

    Default: None

    " + } + }, + "NewPublicVirtualInterface": { + "base": "

    A structure containing information about a new public virtual interface.

    ", + "refs": { + "CreatePublicVirtualInterfaceRequest$newPublicVirtualInterface": "

    Detailed information for the public virtual interface to be created.

    Default: None

    " + } + }, + "NewPublicVirtualInterfaceAllocation": { + "base": "

    A structure containing information about a public virtual interface that will be provisioned on a connection.

    ", + "refs": { + "AllocatePublicVirtualInterfaceRequest$newPublicVirtualInterfaceAllocation": "

    Detailed information for the public virtual interface to be provisioned.

    Default: None

    " + } + }, + "OwnerAccount": { + "base": null, + "refs": { + "AllocateConnectionOnInterconnectRequest$ownerAccount": "

    Numeric account Id of the customer for whom the connection will be provisioned.

    Example: 123443215678

    Default: None

    ", + "AllocatePrivateVirtualInterfaceRequest$ownerAccount": "

    The AWS account that will own the new private virtual interface.

    Default: None

    ", + "AllocatePublicVirtualInterfaceRequest$ownerAccount": "

    The AWS account that will own the new public virtual interface.

    Default: None

    ", + "Connection$ownerAccount": null, + "VirtualInterface$ownerAccount": null + } + }, + "PartnerName": { + "base": null, + "refs": { + "Connection$partnerName": null + } + }, + "Region": { + "base": "

    The AWS region where the connection is located.

    Example: us-east-1

    Default: None

    ", + "refs": { + "Connection$region": null, + "Interconnect$region": null + } + }, + "RouteFilterPrefix": { + "base": "

    A route filter prefix that the customer can advertise through Border Gateway Protocol (BGP) over a public virtual interface.

    ", + "refs": { + "RouteFilterPrefixList$member": null + } + }, + "RouteFilterPrefixList": { + "base": "

    A list of routes to be advertised to the AWS network in this region (public virtual interface).

    ", + "refs": { + "NewPublicVirtualInterface$routeFilterPrefixes": null, + "NewPublicVirtualInterfaceAllocation$routeFilterPrefixes": null, + "VirtualInterface$routeFilterPrefixes": null + } + }, + "RouterConfig": { + "base": null, + "refs": { + "VirtualInterface$customerRouterConfig": "

    Information for generating the customer router configuration.

    " + } + }, + "VLAN": { + "base": "

    The VLAN ID.

    Example: 101

    ", + "refs": { + "AllocateConnectionOnInterconnectRequest$vlan": "

    The dedicated VLAN provisioned to the connection.

    Example: 101

    Default: None

    ", + "Connection$vlan": null, + "NewPrivateVirtualInterface$vlan": null, + "NewPrivateVirtualInterfaceAllocation$vlan": null, + "NewPublicVirtualInterface$vlan": null, + "NewPublicVirtualInterfaceAllocation$vlan": null, + "VirtualInterface$vlan": null + } + }, + "VirtualGateway": { + "base": "

    You can create one or more AWS Direct Connect private virtual interfaces linking to your virtual private gateway.

    Virtual private gateways can be managed using the Amazon Virtual Private Cloud (Amazon VPC) console or the Amazon EC2 CreateVpnGateway action.

    ", + "refs": { + "VirtualGatewayList$member": null + } + }, + "VirtualGatewayId": { + "base": "

    The ID of the virtual private gateway to a VPC. This only applies to private virtual interfaces.

    Example: vgw-123er56

    ", + "refs": { + "ConfirmPrivateVirtualInterfaceRequest$virtualGatewayId": "

    ID of the virtual private gateway that will be attached to the virtual interface.

    A virtual private gateway can be managed via the Amazon Virtual Private Cloud (VPC) console or the EC2 CreateVpnGateway action.

    Default: None

    ", + "NewPrivateVirtualInterface$virtualGatewayId": null, + "VirtualGateway$virtualGatewayId": null, + "VirtualInterface$virtualGatewayId": null + } + }, + "VirtualGatewayList": { + "base": "

    A list of virtual private gateways.

    ", + "refs": { + "VirtualGateways$virtualGateways": "

    A list of virtual private gateways.

    " + } + }, + "VirtualGatewayState": { + "base": "State of the virtual private gateway.
    • Pending: This is the initial state after calling CreateVpnGateway.
    • Available: Ready for use by a private virtual interface.
    • Deleting: This is the initial state after calling DeleteVpnGateway.
    • Deleted: In this state, a private virtual interface is unable to send traffic over this gateway.
    ", + "refs": { + "VirtualGateway$virtualGatewayState": null + } + }, + "VirtualGateways": { + "base": "

    A structure containing a list of virtual private gateways.

    ", + "refs": { + } + }, + "VirtualInterface": { + "base": "

    A virtual interface (VLAN) transmits the traffic between the AWS Direct Connect location and the customer.

    ", + "refs": { + "VirtualInterfaceList$member": null + } + }, + "VirtualInterfaceId": { + "base": "

    ID of the virtual interface.

    Example: dxvif-123dfg56

    Default: None

    ", + "refs": { + "ConfirmPrivateVirtualInterfaceRequest$virtualInterfaceId": null, + "ConfirmPublicVirtualInterfaceRequest$virtualInterfaceId": null, + "DeleteVirtualInterfaceRequest$virtualInterfaceId": null, + "DescribeVirtualInterfacesRequest$virtualInterfaceId": null, + "VirtualInterface$virtualInterfaceId": null + } + }, + "VirtualInterfaceList": { + "base": "

    A list of virtual interfaces.

    ", + "refs": { + "VirtualInterfaces$virtualInterfaces": "

    A list of virtual interfaces.

    " + } + }, + "VirtualInterfaceName": { + "base": "

    The name of the virtual interface assigned by the customer.

    Example: \"My VPC\"

    ", + "refs": { + "NewPrivateVirtualInterface$virtualInterfaceName": null, + "NewPrivateVirtualInterfaceAllocation$virtualInterfaceName": null, + "NewPublicVirtualInterface$virtualInterfaceName": null, + "NewPublicVirtualInterfaceAllocation$virtualInterfaceName": null, + "VirtualInterface$virtualInterfaceName": null + } + }, + "VirtualInterfaceState": { + "base": "State of the virtual interface.
    • Confirming: The creation of the virtual interface is pending confirmation from the virtual interface owner. If the owner of the virtual interface is different from the owner of the connection on which it is provisioned, then the virtual interface will remain in this state until it is confirmed by the virtual interface owner.
    • Verifying: This state only applies to public virtual interfaces. Each public virtual interface needs validation before the virtual interface can be created.
    • Pending: A virtual interface is in this state from the time that it is created until the virtual interface is ready to forward traffic.
    • Available: A virtual interface that is able to forward traffic.
    • Down: A virtual interface that is BGP down.
    • Deleting: A virtual interface is in this state immediately after calling DeleteVirtualInterface until it can no longer forward traffic.
    • Deleted: A virtual interface that cannot forward traffic.
    • Rejected: The virtual interface owner has declined creation of the virtual interface. If a virtual interface in the 'Confirming' state is deleted by the virtual interface owner, the virtual interface will enter the 'Rejected' state.
    ", + "refs": { + "ConfirmPrivateVirtualInterfaceResponse$virtualInterfaceState": null, + "ConfirmPublicVirtualInterfaceResponse$virtualInterfaceState": null, + "DeleteVirtualInterfaceResponse$virtualInterfaceState": null, + "VirtualInterface$virtualInterfaceState": null + } + }, + "VirtualInterfaceType": { + "base": "

    The type of virtual interface.

    Example: private (Amazon VPC) or public (Amazon S3, Amazon DynamoDB, and so on.)

    ", + "refs": { + "VirtualInterface$virtualInterfaceType": null + } + }, + "VirtualInterfaces": { + "base": "

    A structure containing a list of virtual interfaces.

    ", + "refs": { + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/directconnect/2012-10-25/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,22 @@ +{ + "pagination": { + "DescribeConnections": { + "result_key": "connections" + }, + "DescribeConnectionsOnInterconnect": { + "result_key": "connections" + }, + "DescribeInterconnects": { + "result_key": "interconnects" + }, + "DescribeLocations": { + "result_key": "locations" + }, + "DescribeVirtualGateways": { + "result_key": "virtualGateways" + }, + "DescribeVirtualInterfaces": { + "result_key": "virtualInterfaces" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1252 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-04-16", + "endpointPrefix":"ds", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"Directory Service", + "serviceFullName":"AWS Directory Service", + "signatureVersion":"v4", + "targetPrefix":"DirectoryService_20150416" + }, + "operations":{ + "ConnectDirectory":{ + "name":"ConnectDirectory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ConnectDirectoryRequest"}, + "output":{"shape":"ConnectDirectoryResult"}, + "errors":[ + {"shape":"DirectoryLimitExceededException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "CreateAlias":{ + "name":"CreateAlias", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAliasRequest"}, + "output":{"shape":"CreateAliasResult"}, + "errors":[ + {"shape":"EntityAlreadyExistsException"}, + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "CreateComputer":{ + "name":"CreateComputer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateComputerRequest"}, + "output":{"shape":"CreateComputerResult"}, + "errors":[ + {"shape":"AuthenticationFailedException"}, + {"shape":"DirectoryUnavailableException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidParameterException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "CreateDirectory":{ + "name":"CreateDirectory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDirectoryRequest"}, + "output":{"shape":"CreateDirectoryResult"}, + "errors":[ + {"shape":"DirectoryLimitExceededException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "CreateMicrosoftAD":{ + "name":"CreateMicrosoftAD", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateMicrosoftADRequest"}, + "output":{"shape":"CreateMicrosoftADResult"}, + "errors":[ + {"shape":"DirectoryLimitExceededException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"}, + {"shape":"UnsupportedOperationException"} + ] + }, + "CreateSnapshot":{ + "name":"CreateSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSnapshotRequest"}, + "output":{"shape":"CreateSnapshotResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidParameterException"}, + {"shape":"SnapshotLimitExceededException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "CreateTrust":{ + "name":"CreateTrust", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTrustRequest"}, + "output":{"shape":"CreateTrustResult"}, + "errors":[ + {"shape":"EntityAlreadyExistsException"}, + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"}, + {"shape":"UnsupportedOperationException"} + ] + }, + "DeleteDirectory":{ + "name":"DeleteDirectory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDirectoryRequest"}, + "output":{"shape":"DeleteDirectoryResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "DeleteSnapshot":{ + "name":"DeleteSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSnapshotRequest"}, + "output":{"shape":"DeleteSnapshotResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "DeleteTrust":{ + "name":"DeleteTrust", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTrustRequest"}, + "output":{"shape":"DeleteTrustResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"}, + {"shape":"UnsupportedOperationException"} + ] + }, + "DescribeDirectories":{ + "name":"DescribeDirectories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDirectoriesRequest"}, + "output":{"shape":"DescribeDirectoriesResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "DescribeSnapshots":{ + "name":"DescribeSnapshots", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSnapshotsRequest"}, + "output":{"shape":"DescribeSnapshotsResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "DescribeTrusts":{ + "name":"DescribeTrusts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTrustsRequest"}, + "output":{"shape":"DescribeTrustsResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"}, + {"shape":"UnsupportedOperationException"} + ] + }, + "DisableRadius":{ + "name":"DisableRadius", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableRadiusRequest"}, + "output":{"shape":"DisableRadiusResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "DisableSso":{ + "name":"DisableSso", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableSsoRequest"}, + "output":{"shape":"DisableSsoResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InsufficientPermissionsException"}, + {"shape":"AuthenticationFailedException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "EnableRadius":{ + "name":"EnableRadius", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableRadiusRequest"}, + "output":{"shape":"EnableRadiusResult"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"EntityDoesNotExistException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "EnableSso":{ + "name":"EnableSso", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableSsoRequest"}, + "output":{"shape":"EnableSsoResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InsufficientPermissionsException"}, + {"shape":"AuthenticationFailedException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "GetDirectoryLimits":{ + "name":"GetDirectoryLimits", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDirectoryLimitsRequest"}, + "output":{"shape":"GetDirectoryLimitsResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "GetSnapshotLimits":{ + "name":"GetSnapshotLimits", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSnapshotLimitsRequest"}, + "output":{"shape":"GetSnapshotLimitsResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "RestoreFromSnapshot":{ + "name":"RestoreFromSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreFromSnapshotRequest"}, + "output":{"shape":"RestoreFromSnapshotResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "UpdateRadius":{ + "name":"UpdateRadius", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateRadiusRequest"}, + "output":{"shape":"UpdateRadiusResult"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"EntityDoesNotExistException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ] + }, + "VerifyTrust":{ + "name":"VerifyTrust", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"VerifyTrustRequest"}, + "output":{"shape":"VerifyTrustResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"}, + {"shape":"UnsupportedOperationException"} + ] + } + }, + "shapes":{ + "AccessUrl":{ + "type":"string", + "max":128, + "min":1 + }, + "AliasName":{ + "type":"string", + "max":62, + "min":1, + "pattern":"^(?!d-)([\\da-zA-Z]+)([-]*[\\da-zA-Z])*" + }, + "Attribute":{ + "type":"structure", + "members":{ + "Name":{"shape":"AttributeName"}, + "Value":{"shape":"AttributeValue"} + } + }, + "AttributeName":{ + "type":"string", + "min":1 + }, + "AttributeValue":{"type":"string"}, + "Attributes":{ + "type":"list", + "member":{"shape":"Attribute"} + }, + "AuthenticationFailedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "exception":true + }, + "AvailabilityZone":{"type":"string"}, + "AvailabilityZones":{ + "type":"list", + "member":{"shape":"AvailabilityZone"} + }, + "ClientException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "exception":true + }, + "CloudOnlyDirectoriesLimitReached":{"type":"boolean"}, + "Computer":{ + "type":"structure", + "members":{ + "ComputerId":{"shape":"SID"}, + "ComputerName":{"shape":"ComputerName"}, + "ComputerAttributes":{"shape":"Attributes"} + } + }, + "ComputerName":{ + "type":"string", + "max":15, + "min":1 + }, + "ComputerPassword":{ + "type":"string", + "max":64, + "min":8, + "pattern":"[\\u0020-\\u00FF]+", + "sensitive":true + }, + "ConnectDirectoryRequest":{ + "type":"structure", + "required":[ + "Name", + "Password", + "Size", + "ConnectSettings" + ], + "members":{ + "Name":{"shape":"DirectoryName"}, + "ShortName":{"shape":"DirectoryShortName"}, + "Password":{"shape":"ConnectPassword"}, + "Description":{"shape":"Description"}, + "Size":{"shape":"DirectorySize"}, + "ConnectSettings":{"shape":"DirectoryConnectSettings"} + } + }, + "ConnectDirectoryResult":{ + "type":"structure", + "members":{ + "DirectoryId":{"shape":"DirectoryId"} + } + }, + "ConnectPassword":{ + "type":"string", + "max":128, + "min":1, + "sensitive":true + }, + "ConnectedDirectoriesLimitReached":{"type":"boolean"}, + "CreateAliasRequest":{ + "type":"structure", + "required":[ + "DirectoryId", + "Alias" + ], + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "Alias":{"shape":"AliasName"} + } + }, + "CreateAliasResult":{ + "type":"structure", + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "Alias":{"shape":"AliasName"} + } + }, + "CreateComputerRequest":{ + "type":"structure", + "required":[ + "DirectoryId", + "ComputerName", + "Password" + ], + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "ComputerName":{"shape":"ComputerName"}, + "Password":{"shape":"ComputerPassword"}, + "OrganizationalUnitDistinguishedName":{"shape":"OrganizationalUnitDN"}, + "ComputerAttributes":{"shape":"Attributes"} + } + }, + "CreateComputerResult":{ + "type":"structure", + "members":{ + "Computer":{"shape":"Computer"} + } + }, + "CreateDirectoryRequest":{ + "type":"structure", + "required":[ + "Name", + "Password", + "Size" + ], + "members":{ + "Name":{"shape":"DirectoryName"}, + "ShortName":{"shape":"DirectoryShortName"}, + "Password":{"shape":"Password"}, + "Description":{"shape":"Description"}, + "Size":{"shape":"DirectorySize"}, + "VpcSettings":{"shape":"DirectoryVpcSettings"} + } + }, + "CreateDirectoryResult":{ + "type":"structure", + "members":{ + "DirectoryId":{"shape":"DirectoryId"} + } + }, + "CreateMicrosoftADRequest":{ + "type":"structure", + "required":[ + "Name", + "Password", + "VpcSettings" + ], + "members":{ + "Name":{"shape":"DirectoryName"}, + "ShortName":{"shape":"DirectoryShortName"}, + "Password":{"shape":"Password"}, + "Description":{"shape":"Description"}, + "VpcSettings":{"shape":"DirectoryVpcSettings"} + } + }, + "CreateMicrosoftADResult":{ + "type":"structure", + "members":{ + "DirectoryId":{"shape":"DirectoryId"} + } + }, + "CreateSnapshotRequest":{ + "type":"structure", + "required":["DirectoryId"], + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "Name":{"shape":"SnapshotName"} + } + }, + "CreateSnapshotResult":{ + "type":"structure", + "members":{ + "SnapshotId":{"shape":"SnapshotId"} + } + }, + "CreateTrustRequest":{ + "type":"structure", + "required":[ + "DirectoryId", + "RemoteDomainName", + "TrustPassword", + "TrustDirection" + ], + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "RemoteDomainName":{"shape":"RemoteDomainName"}, + "TrustPassword":{"shape":"TrustPassword"}, + "TrustDirection":{"shape":"TrustDirection"}, + "TrustType":{"shape":"TrustType"} + } + }, + "CreateTrustResult":{ + "type":"structure", + "members":{ + "TrustId":{"shape":"TrustId"} + } + }, + "CreatedDateTime":{"type":"timestamp"}, + "DeleteDirectoryRequest":{ + "type":"structure", + "required":["DirectoryId"], + "members":{ + "DirectoryId":{"shape":"DirectoryId"} + } + }, + "DeleteDirectoryResult":{ + "type":"structure", + "members":{ + "DirectoryId":{"shape":"DirectoryId"} + } + }, + "DeleteSnapshotRequest":{ + "type":"structure", + "required":["SnapshotId"], + "members":{ + "SnapshotId":{"shape":"SnapshotId"} + } + }, + "DeleteSnapshotResult":{ + "type":"structure", + "members":{ + "SnapshotId":{"shape":"SnapshotId"} + } + }, + "DeleteTrustRequest":{ + "type":"structure", + "required":["TrustId"], + "members":{ + "TrustId":{"shape":"TrustId"} + } + }, + "DeleteTrustResult":{ + "type":"structure", + "members":{ + "TrustId":{"shape":"TrustId"} + } + }, + "DescribeDirectoriesRequest":{ + "type":"structure", + "members":{ + "DirectoryIds":{"shape":"DirectoryIds"}, + "NextToken":{"shape":"NextToken"}, + "Limit":{"shape":"Limit"} + } + }, + "DescribeDirectoriesResult":{ + "type":"structure", + "members":{ + "DirectoryDescriptions":{"shape":"DirectoryDescriptions"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeSnapshotsRequest":{ + "type":"structure", + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "SnapshotIds":{"shape":"SnapshotIds"}, + "NextToken":{"shape":"NextToken"}, + "Limit":{"shape":"Limit"} + } + }, + "DescribeSnapshotsResult":{ + "type":"structure", + "members":{ + "Snapshots":{"shape":"Snapshots"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeTrustsRequest":{ + "type":"structure", + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "TrustIds":{"shape":"TrustIds"}, + "NextToken":{"shape":"NextToken"}, + "Limit":{"shape":"Limit"} + } + }, + "DescribeTrustsResult":{ + "type":"structure", + "members":{ + "Trusts":{"shape":"Trusts"}, + "NextToken":{"shape":"NextToken"} + } + }, + "Description":{ + "type":"string", + "max":128, + "min":0, + "pattern":"^([a-zA-Z0-9_])[\\\\a-zA-Z0-9_@#%*+=:?./!\\s-]*$" + }, + "DirectoryConnectSettings":{ + "type":"structure", + "required":[ + "VpcId", + "SubnetIds", + "CustomerDnsIps", + "CustomerUserName" + ], + "members":{ + "VpcId":{"shape":"VpcId"}, + "SubnetIds":{"shape":"SubnetIds"}, + "CustomerDnsIps":{"shape":"DnsIpAddrs"}, + "CustomerUserName":{"shape":"UserName"} + } + }, + "DirectoryConnectSettingsDescription":{ + "type":"structure", + "members":{ + "VpcId":{"shape":"VpcId"}, + "SubnetIds":{"shape":"SubnetIds"}, + "CustomerUserName":{"shape":"UserName"}, + "SecurityGroupId":{"shape":"SecurityGroupId"}, + "AvailabilityZones":{"shape":"AvailabilityZones"}, + "ConnectIps":{"shape":"IpAddrs"} + } + }, + "DirectoryDescription":{ + "type":"structure", + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "Name":{"shape":"DirectoryName"}, + "ShortName":{"shape":"DirectoryShortName"}, + "Size":{"shape":"DirectorySize"}, + "Alias":{"shape":"AliasName"}, + "AccessUrl":{"shape":"AccessUrl"}, + "Description":{"shape":"Description"}, + "DnsIpAddrs":{"shape":"DnsIpAddrs"}, + "Stage":{"shape":"DirectoryStage"}, + "LaunchTime":{"shape":"LaunchTime"}, + "StageLastUpdatedDateTime":{"shape":"LastUpdatedDateTime"}, + "Type":{"shape":"DirectoryType"}, + "VpcSettings":{"shape":"DirectoryVpcSettingsDescription"}, + "ConnectSettings":{"shape":"DirectoryConnectSettingsDescription"}, + "RadiusSettings":{"shape":"RadiusSettings"}, + "RadiusStatus":{"shape":"RadiusStatus"}, + "StageReason":{"shape":"StageReason"}, + "SsoEnabled":{"shape":"SsoEnabled"} + } + }, + "DirectoryDescriptions":{ + "type":"list", + "member":{"shape":"DirectoryDescription"} + }, + "DirectoryId":{ + "type":"string", + "pattern":"^d-[0-9a-f]{10}$" + }, + "DirectoryIds":{ + "type":"list", + "member":{"shape":"DirectoryId"} + }, + "DirectoryLimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "exception":true + }, + "DirectoryLimits":{ + "type":"structure", + "members":{ + "CloudOnlyDirectoriesLimit":{"shape":"Limit"}, + "CloudOnlyDirectoriesCurrentCount":{"shape":"Limit"}, + "CloudOnlyDirectoriesLimitReached":{"shape":"CloudOnlyDirectoriesLimitReached"}, + "CloudOnlyMicrosoftADLimit":{"shape":"Limit"}, + "CloudOnlyMicrosoftADCurrentCount":{"shape":"Limit"}, + "CloudOnlyMicrosoftADLimitReached":{"shape":"CloudOnlyDirectoriesLimitReached"}, + "ConnectedDirectoriesLimit":{"shape":"Limit"}, + "ConnectedDirectoriesCurrentCount":{"shape":"Limit"}, + "ConnectedDirectoriesLimitReached":{"shape":"ConnectedDirectoriesLimitReached"} + } + }, + "DirectoryName":{ + "type":"string", + "pattern":"^([a-zA-Z0-9]+[\\\\.-])+([a-zA-Z0-9])+$" + }, + "DirectoryShortName":{ + "type":"string", + "pattern":"^[^\\\\/:*?\\\"\\<\\>|.]+[^\\\\/:*?\\\"<>|]*$" + }, + "DirectorySize":{ + "type":"string", + "enum":[ + "Small", + "Large" + ] + }, + "DirectoryStage":{ + "type":"string", + "enum":[ + "Requested", + "Creating", + "Created", + "Active", + "Inoperable", + "Impaired", + "Restoring", + "RestoreFailed", + "Deleting", + "Deleted", + "Failed" + ] + }, + "DirectoryType":{ + "type":"string", + "enum":[ + "SimpleAD", + "ADConnector", + "MicrosoftAD" + ] + }, + "DirectoryUnavailableException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "exception":true + }, + "DirectoryVpcSettings":{ + "type":"structure", + "required":[ + "VpcId", + "SubnetIds" + ], + "members":{ + "VpcId":{"shape":"VpcId"}, + "SubnetIds":{"shape":"SubnetIds"} + } + }, + "DirectoryVpcSettingsDescription":{ + "type":"structure", + "members":{ + "VpcId":{"shape":"VpcId"}, + "SubnetIds":{"shape":"SubnetIds"}, + "SecurityGroupId":{"shape":"SecurityGroupId"}, + "AvailabilityZones":{"shape":"AvailabilityZones"} + } + }, + "DisableRadiusRequest":{ + "type":"structure", + "required":["DirectoryId"], + "members":{ + "DirectoryId":{"shape":"DirectoryId"} + } + }, + "DisableRadiusResult":{ + "type":"structure", + "members":{ + } + }, + "DisableSsoRequest":{ + "type":"structure", + "required":["DirectoryId"], + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "UserName":{"shape":"UserName"}, + "Password":{"shape":"ConnectPassword"} + } + }, + "DisableSsoResult":{ + "type":"structure", + "members":{ + } + }, + "DnsIpAddrs":{ + "type":"list", + "member":{"shape":"IpAddr"} + }, + "EnableRadiusRequest":{ + "type":"structure", + "required":[ + "DirectoryId", + "RadiusSettings" + ], + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "RadiusSettings":{"shape":"RadiusSettings"} + } + }, + "EnableRadiusResult":{ + "type":"structure", + "members":{ + } + }, + "EnableSsoRequest":{ + "type":"structure", + "required":["DirectoryId"], + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "UserName":{"shape":"UserName"}, + "Password":{"shape":"ConnectPassword"} + } + }, + "EnableSsoResult":{ + "type":"structure", + "members":{ + } + }, + "EntityAlreadyExistsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "exception":true + }, + "EntityDoesNotExistException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "exception":true + }, + "ExceptionMessage":{"type":"string"}, + "GetDirectoryLimitsRequest":{ + "type":"structure", + "members":{ + } + }, + "GetDirectoryLimitsResult":{ + "type":"structure", + "members":{ + "DirectoryLimits":{"shape":"DirectoryLimits"} + } + }, + "GetSnapshotLimitsRequest":{ + "type":"structure", + "required":["DirectoryId"], + "members":{ + "DirectoryId":{"shape":"DirectoryId"} + } + }, + "GetSnapshotLimitsResult":{ + "type":"structure", + "members":{ + "SnapshotLimits":{"shape":"SnapshotLimits"} + } + }, + "InsufficientPermissionsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "exception":true + }, + "InvalidNextTokenException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "exception":true + }, + "InvalidParameterException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "exception":true + }, + "IpAddr":{ + "type":"string", + "pattern":"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$" + }, + "IpAddrs":{ + "type":"list", + "member":{"shape":"IpAddr"} + }, + "LastUpdatedDateTime":{"type":"timestamp"}, + "LaunchTime":{"type":"timestamp"}, + "Limit":{ + "type":"integer", + "min":0 + }, + "ManualSnapshotsLimitReached":{"type":"boolean"}, + "NextToken":{"type":"string"}, + "OrganizationalUnitDN":{ + "type":"string", + "max":2000, + "min":1 + }, + "Password":{ + "type":"string", + "pattern":"(?=^.{8,64}$)((?=.*\\d)(?=.*[A-Z])(?=.*[a-z])|(?=.*\\d)(?=.*[^A-Za-z0-9\\s])(?=.*[a-z])|(?=.*[^A-Za-z0-9\\s])(?=.*[A-Z])(?=.*[a-z])|(?=.*\\d)(?=.*[A-Z])(?=.*[^A-Za-z0-9\\s]))^.*", + "sensitive":true + }, + "PortNumber":{ + "type":"integer", + "max":65535, + "min":1025 + }, + "RadiusAuthenticationProtocol":{ + "type":"string", + "enum":[ + "PAP", + "CHAP", + "MS-CHAPv1", + "MS-CHAPv2" + ] + }, + "RadiusDisplayLabel":{ + "type":"string", + "max":64, + "min":1 + }, + "RadiusRetries":{ + "type":"integer", + "max":10, + "min":0 + }, + "RadiusSettings":{ + "type":"structure", + "members":{ + "RadiusServers":{"shape":"Servers"}, + "RadiusPort":{"shape":"PortNumber"}, + "RadiusTimeout":{"shape":"RadiusTimeout"}, + "RadiusRetries":{"shape":"RadiusRetries"}, + "SharedSecret":{"shape":"RadiusSharedSecret"}, + "AuthenticationProtocol":{"shape":"RadiusAuthenticationProtocol"}, + "DisplayLabel":{"shape":"RadiusDisplayLabel"}, + "UseSameUsername":{"shape":"UseSameUsername"} + } + }, + "RadiusSharedSecret":{ + "type":"string", + "max":512, + "min":8, + "sensitive":true + }, + "RadiusStatus":{ + "type":"string", + "enum":[ + "Creating", + "Completed", + "Failed" + ] + }, + "RadiusTimeout":{ + "type":"integer", + "max":20, + "min":1 + }, + "RemoteDomainName":{ + "type":"string", + "pattern":"^([a-zA-Z0-9]+[\\\\.-])+([a-zA-Z0-9])+[.]?$" + }, + "RequestId":{ + "type":"string", + "pattern":"^([A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12})$" + }, + "RestoreFromSnapshotRequest":{ + "type":"structure", + "required":["SnapshotId"], + "members":{ + "SnapshotId":{"shape":"SnapshotId"} + } + }, + "RestoreFromSnapshotResult":{ + "type":"structure", + "members":{ + } + }, + "SID":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[&\\w+-.@]+" + }, + "SecurityGroupId":{ + "type":"string", + "pattern":"^(sg-[0-9a-f]{8})$" + }, + "Server":{ + "type":"string", + "max":256, + "min":1 + }, + "Servers":{ + "type":"list", + "member":{"shape":"Server"} + }, + "ServiceException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "exception":true, + "fault":true + }, + "Snapshot":{ + "type":"structure", + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "SnapshotId":{"shape":"SnapshotId"}, + "Type":{"shape":"SnapshotType"}, + "Name":{"shape":"SnapshotName"}, + "Status":{"shape":"SnapshotStatus"}, + "StartTime":{"shape":"StartTime"} + } + }, + "SnapshotId":{ + "type":"string", + "pattern":"^s-[0-9a-f]{10}$" + }, + "SnapshotIds":{ + "type":"list", + "member":{"shape":"SnapshotId"} + }, + "SnapshotLimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "exception":true + }, + "SnapshotLimits":{ + "type":"structure", + "members":{ + "ManualSnapshotsLimit":{"shape":"Limit"}, + "ManualSnapshotsCurrentCount":{"shape":"Limit"}, + "ManualSnapshotsLimitReached":{"shape":"ManualSnapshotsLimitReached"} + } + }, + "SnapshotName":{ + "type":"string", + "max":128, + "min":0, + "pattern":"^([a-zA-Z0-9_])[\\\\a-zA-Z0-9_@#%*+=:?./!\\s-]*$" + }, + "SnapshotStatus":{ + "type":"string", + "enum":[ + "Creating", + "Completed", + "Failed" + ] + }, + "SnapshotType":{ + "type":"string", + "enum":[ + "Auto", + "Manual" + ] + }, + "Snapshots":{ + "type":"list", + "member":{"shape":"Snapshot"} + }, + "SsoEnabled":{"type":"boolean"}, + "StageReason":{"type":"string"}, + "StartTime":{"type":"timestamp"}, + "StateLastUpdatedDateTime":{"type":"timestamp"}, + "SubnetId":{ + "type":"string", + "pattern":"^(subnet-[0-9a-f]{8})$" + }, + "SubnetIds":{ + "type":"list", + "member":{"shape":"SubnetId"} + }, + "Trust":{ + "type":"structure", + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "TrustId":{"shape":"TrustId"}, + "RemoteDomainName":{"shape":"RemoteDomainName"}, + "TrustType":{"shape":"TrustType"}, + "TrustDirection":{"shape":"TrustDirection"}, + "TrustState":{"shape":"TrustState"}, + "CreatedDateTime":{"shape":"CreatedDateTime"}, + "LastUpdatedDateTime":{"shape":"LastUpdatedDateTime"}, + "StateLastUpdatedDateTime":{"shape":"StateLastUpdatedDateTime"} + } + }, + "TrustDirection":{ + "type":"string", + "enum":[ + "One-Way: Outgoing", + "One-Way: Incoming", + "Two-Way" + ] + }, + "TrustId":{ + "type":"string", + "pattern":"^t-[0-9a-f]{10}$" + }, + "TrustIds":{ + "type":"list", + "member":{"shape":"TrustId"} + }, + "TrustPassword":{ + "type":"string", + "max":128, + "min":1, + "sensitive":true + }, + "TrustState":{ + "type":"string", + "enum":[ + "Creating", + "Created", + "Verifying", + "VerifyFailed", + "Verified", + "Deleting", + "Deleted", + "Failed" + ] + }, + "TrustType":{ + "type":"string", + "enum":["Forest"] + }, + "Trusts":{ + "type":"list", + "member":{"shape":"Trust"} + }, + "UnsupportedOperationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "exception":true + }, + "UpdateRadiusRequest":{ + "type":"structure", + "required":[ + "DirectoryId", + "RadiusSettings" + ], + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "RadiusSettings":{"shape":"RadiusSettings"} + } + }, + "UpdateRadiusResult":{ + "type":"structure", + "members":{ + } + }, + "UseSameUsername":{"type":"boolean"}, + "UserName":{ + "type":"string", + "min":1, + "pattern":"[a-zA-Z0-9._-]+" + }, + "VerifyTrustRequest":{ + "type":"structure", + "required":["TrustId"], + "members":{ + "TrustId":{"shape":"TrustId"} + } + }, + "VerifyTrustResult":{ + "type":"structure", + "members":{ + "TrustId":{"shape":"TrustId"} + } + }, + "VpcId":{ + "type":"string", + "pattern":"^(vpc-[0-9a-f]{8})$" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,899 @@ +{ + "version": "2.0", + "service": "AWS Directory Service

    This is the AWS Directory Service API Reference. This guide provides detailed information about AWS Directory Service operations, data types, parameters, and errors.

    ", + "operations": { + "ConnectDirectory": "

    Creates an AD Connector to connect to an on-premises directory.

    ", + "CreateAlias": "

    Creates an alias for a directory and assigns the alias to the directory. The alias is used to construct the access URL for the directory, such as http://alias.awsapps.com.

    After an alias has been created, it cannot be deleted or reused, so this operation should only be used when absolutely necessary.

    ", + "CreateComputer": "

    Creates a computer account in the specified directory, and joins the computer to the directory.

    ", + "CreateDirectory": "

    Creates a Simple AD directory.

    ", + "CreateMicrosoftAD": "Creates a Microsoft AD in the AWS cloud.", + "CreateSnapshot": "

    Creates a snapshot of a Simple AD directory.

    You cannot take snapshots of AD Connector directories.

    ", + "CreateTrust": "

    AWS Directory Service for Microsoft Active Directory allows you to configure trust relationships. For example, you can establish a trust between your Microsoft AD in the AWS cloud, and your existing on-premises Microsoft Active Directory. This would allow you to provide users and groups access to resources in either domain, with a single set of credentials.

    This action initiates the creation of the AWS side of a trust relationship between a Microsoft AD in the AWS cloud and an external domain.

    ", + "DeleteDirectory": "

    Deletes an AWS Directory Service directory.

    ", + "DeleteSnapshot": "

    Deletes a directory snapshot.

    ", + "DeleteTrust": "Deletes an existing trust relationship between your Microsoft AD in the AWS cloud and an external domain.", + "DescribeDirectories": "

    Obtains information about the directories that belong to this account.

    You can retrieve information about specific directories by passing the directory identifiers in the DirectoryIds parameter. Otherwise, all directories that belong to the current account are returned.

    This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the DescribeDirectoriesResult.NextToken member contains a token that you pass in the next call to DescribeDirectories to retrieve the next set of items.

    You can also specify a maximum number of return results with the Limit parameter.

    ", + "DescribeSnapshots": "

    Obtains information about the directory snapshots that belong to this account.

    This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the DescribeSnapshots.NextToken member contains a token that you pass in the next call to DescribeSnapshots to retrieve the next set of items.

    You can also specify a maximum number of return results with the Limit parameter.

    ", + "DescribeTrusts": "

    Obtains information about the trust relationships for this account.

    If no input parameters are provided, such as DirectoryId or TrustIds, this request describes all the trust relationships belonging to the account.

    ", + "DisableRadius": "

    Disables multi-factor authentication (MFA) with the Remote Authentication Dial In User Service (RADIUS) server for an AD Connector directory.

    ", + "DisableSso": "

    Disables single-sign on for a directory.

    ", + "EnableRadius": "

    Enables multi-factor authentication (MFA) with the Remote Authentication Dial In User Service (RADIUS) server for an AD Connector directory.

    ", + "EnableSso": "

    Enables single-sign on for a directory.

    ", + "GetDirectoryLimits": "

    Obtains directory limit information for the current region.

    ", + "GetSnapshotLimits": "

    Obtains the manual snapshot limits for a directory.

    ", + "RestoreFromSnapshot": "

    Restores a directory using an existing directory snapshot.

    When you restore a directory from a snapshot, any changes made to the directory after the snapshot date are overwritten.

    This action returns as soon as the restore operation is initiated. You can monitor the progress of the restore operation by calling the DescribeDirectories operation with the directory identifier. When the DirectoryDescription.Stage value changes to Active, the restore operation is complete.

    ", + "UpdateRadius": "

    Updates the Remote Authentication Dial In User Service (RADIUS) server information for an AD Connector directory.

    ", + "VerifyTrust": "

    AWS Directory Service for Microsoft Active Directory allows you to configure and verify trust relationships.

    This action verifies a trust relationship between your Microsoft AD in the AWS cloud and an external domain.

    " + }, + "shapes": { + "AccessUrl": { + "base": null, + "refs": { + "DirectoryDescription$AccessUrl": "

    The access URL for the directory, such as http://alias.awsapps.com. If no alias has been created for the directory, alias is the directory identifier, such as d-XXXXXXXXXX.

    " + } + }, + "AliasName": { + "base": null, + "refs": { + "CreateAliasRequest$Alias": "

    The requested alias.

    The alias must be unique amongst all aliases in AWS. This operation throws an EntityAlreadyExistsException error if the alias already exists.

    ", + "CreateAliasResult$Alias": "

    The alias for the directory.

    ", + "DirectoryDescription$Alias": "

    The alias for the directory. If no alias has been created for the directory, the alias is the directory identifier, such as d-XXXXXXXXXX.

    " + } + }, + "Attribute": { + "base": "

    Represents a named directory attribute.

    ", + "refs": { + "Attributes$member": null + } + }, + "AttributeName": { + "base": null, + "refs": { + "Attribute$Name": "

    The name of the attribute.

    " + } + }, + "AttributeValue": { + "base": null, + "refs": { + "Attribute$Value": "

    The value of the attribute.

    " + } + }, + "Attributes": { + "base": null, + "refs": { + "Computer$ComputerAttributes": "

    An array of Attribute objects containing the LDAP attributes that belong to the computer account.

    ", + "CreateComputerRequest$ComputerAttributes": "

    An array of Attribute objects that contain any LDAP attributes to apply to the computer account.

    " + } + }, + "AuthenticationFailedException": { + "base": "

    An authentication error occurred.

    ", + "refs": { + } + }, + "AvailabilityZone": { + "base": null, + "refs": { + "AvailabilityZones$member": null + } + }, + "AvailabilityZones": { + "base": null, + "refs": { + "DirectoryConnectSettingsDescription$AvailabilityZones": "

    A list of the Availability Zones that the directory is in.

    ", + "DirectoryVpcSettingsDescription$AvailabilityZones": "

    The list of Availability Zones that the directory is in.

    " + } + }, + "ClientException": { + "base": "

    A client exception has occurred.

    ", + "refs": { + } + }, + "CloudOnlyDirectoriesLimitReached": { + "base": null, + "refs": { + "DirectoryLimits$CloudOnlyDirectoriesLimitReached": "

    Indicates if the cloud directory limit has been reached.

    ", + "DirectoryLimits$CloudOnlyMicrosoftADLimitReached": "Indicates if the Microsoft AD directory limit has been reached." + } + }, + "Computer": { + "base": "

    Contains information about a computer account in a directory.

    ", + "refs": { + "CreateComputerResult$Computer": "

    A Computer object that represents the computer account.

    " + } + }, + "ComputerName": { + "base": null, + "refs": { + "Computer$ComputerName": "

    The computer name.

    ", + "CreateComputerRequest$ComputerName": "

    The name of the computer account.

    " + } + }, + "ComputerPassword": { + "base": null, + "refs": { + "CreateComputerRequest$Password": "

    A one-time password that is used to join the computer to the directory. You should generate a random, strong password to use for this parameter.

    " + } + }, + "ConnectDirectoryRequest": { + "base": "

    Contains the inputs for the ConnectDirectory operation.

    ", + "refs": { + } + }, + "ConnectDirectoryResult": { + "base": "

    Contains the results of the ConnectDirectory operation.

    ", + "refs": { + } + }, + "ConnectPassword": { + "base": null, + "refs": { + "ConnectDirectoryRequest$Password": "

    The password for the on-premises user account.

    ", + "DisableSsoRequest$Password": "

    The password of an alternate account to use to disable single-sign on. This is only used for AD Connector directories. For more information, see the UserName parameter.

    ", + "EnableSsoRequest$Password": "

    The password of an alternate account to use to enable single-sign on. This is only used for AD Connector directories. For more information, see the UserName parameter.

    " + } + }, + "ConnectedDirectoriesLimitReached": { + "base": null, + "refs": { + "DirectoryLimits$ConnectedDirectoriesLimitReached": "

    Indicates if the connected directory limit has been reached.

    " + } + }, + "CreateAliasRequest": { + "base": "

    Contains the inputs for the CreateAlias operation.

    ", + "refs": { + } + }, + "CreateAliasResult": { + "base": "

    Contains the results of the CreateAlias operation.

    ", + "refs": { + } + }, + "CreateComputerRequest": { + "base": "

    Contains the inputs for the CreateComputer operation.

    ", + "refs": { + } + }, + "CreateComputerResult": { + "base": "

    Contains the results for the CreateComputer operation.

    ", + "refs": { + } + }, + "CreateDirectoryRequest": { + "base": "

    Contains the inputs for the CreateDirectory operation.

    ", + "refs": { + } + }, + "CreateDirectoryResult": { + "base": "

    Contains the results of the CreateDirectory operation.

    ", + "refs": { + } + }, + "CreateMicrosoftADRequest": { + "base": "Creates a Microsoft AD in the AWS cloud.", + "refs": { + } + }, + "CreateMicrosoftADResult": { + "base": null, + "refs": { + } + }, + "CreateSnapshotRequest": { + "base": "

    Contains the inputs for the CreateSnapshot operation.

    ", + "refs": { + } + }, + "CreateSnapshotResult": { + "base": "

    Contains the results of the CreateSnapshot operation.

    ", + "refs": { + } + }, + "CreateTrustRequest": { + "base": "

    AWS Directory Service for Microsoft Active Directory allows you to configure trust relationships. For example, you can establish a trust between your Microsoft AD in the AWS cloud, and your existing on-premises Microsoft Active Directory. This would allow you to provide users and groups access to resources in either domain, with a single set of credentials.

    This action initiates the creation of the AWS side of a trust relationship between a Microsoft AD in the AWS cloud and an external domain.

    ", + "refs": { + } + }, + "CreateTrustResult": { + "base": null, + "refs": { + } + }, + "CreatedDateTime": { + "base": null, + "refs": { + "Trust$CreatedDateTime": "The date and time that the trust relationship was created." + } + }, + "DeleteDirectoryRequest": { + "base": "

    Contains the inputs for the DeleteDirectory operation.

    ", + "refs": { + } + }, + "DeleteDirectoryResult": { + "base": "

    Contains the results of the DeleteDirectory operation.

    ", + "refs": { + } + }, + "DeleteSnapshotRequest": { + "base": "

    Contains the inputs for the DeleteSnapshot operation.

    ", + "refs": { + } + }, + "DeleteSnapshotResult": { + "base": "

    Contains the results of the DeleteSnapshot operation.

    ", + "refs": { + } + }, + "DeleteTrustRequest": { + "base": "Deletes the local side of an existing trust relationship between the Microsoft AD in the AWS cloud and the external domain.", + "refs": { + } + }, + "DeleteTrustResult": { + "base": null, + "refs": { + } + }, + "DescribeDirectoriesRequest": { + "base": "

    Contains the inputs for the DescribeDirectories operation.

    ", + "refs": { + } + }, + "DescribeDirectoriesResult": { + "base": "

    Contains the results of the DescribeDirectories operation.

    ", + "refs": { + } + }, + "DescribeSnapshotsRequest": { + "base": "

    Contains the inputs for the DescribeSnapshots operation.

    ", + "refs": { + } + }, + "DescribeSnapshotsResult": { + "base": "

    Contains the results of the DescribeSnapshots operation.

    ", + "refs": { + } + }, + "DescribeTrustsRequest": { + "base": "Describes the trust relationships for a particular Microsoft AD in the AWS cloud. If no input parameters are are provided, such as directory ID or trust ID, this request describes all the trust relationships.", + "refs": { + } + }, + "DescribeTrustsResult": { + "base": null, + "refs": { + } + }, + "Description": { + "base": null, + "refs": { + "ConnectDirectoryRequest$Description": "

    A textual description for the directory.

    ", + "CreateDirectoryRequest$Description": "

    A textual description for the directory.

    ", + "CreateMicrosoftADRequest$Description": "

    A textual description for the directory. This label will appear on the AWS console Directory Details page after the directory is created.

    ", + "DirectoryDescription$Description": "

    The textual description for the directory.

    " + } + }, + "DirectoryConnectSettings": { + "base": "

    Contains information for the ConnectDirectory operation when an AD Connector directory is being created.

    ", + "refs": { + "ConnectDirectoryRequest$ConnectSettings": "

    A DirectoryConnectSettings object that contains additional information for the operation.

    " + } + }, + "DirectoryConnectSettingsDescription": { + "base": "

    Contains information about an AD Connector directory.

    ", + "refs": { + "DirectoryDescription$ConnectSettings": "

    A DirectoryConnectSettingsDescription object that contains additional information about an AD Connector directory. This member is only present if the directory is an AD Connector directory.

    " + } + }, + "DirectoryDescription": { + "base": "

    Contains information about an AWS Directory Service directory.

    ", + "refs": { + "DirectoryDescriptions$member": null + } + }, + "DirectoryDescriptions": { + "base": "

    A list of directory descriptions.

    ", + "refs": { + "DescribeDirectoriesResult$DirectoryDescriptions": "

    The list of DirectoryDescription objects that were retrieved.

    It is possible that this list contains less than the number of items specified in the Limit member of the request. This occurs if there are less than the requested number of items left to retrieve, or if the limitations of the operation have been exceeded.

    " + } + }, + "DirectoryId": { + "base": null, + "refs": { + "ConnectDirectoryResult$DirectoryId": "

    The identifier of the new directory.

    ", + "CreateAliasRequest$DirectoryId": "

    The identifier of the directory for which to create the alias.

    ", + "CreateAliasResult$DirectoryId": "

    The identifier of the directory.

    ", + "CreateComputerRequest$DirectoryId": "

    The identifier of the directory in which to create the computer account.

    ", + "CreateDirectoryResult$DirectoryId": "

    The identifier of the directory that was created.

    ", + "CreateMicrosoftADResult$DirectoryId": "The identifier of the directory that was created.", + "CreateSnapshotRequest$DirectoryId": "

    The identifier of the directory to take a snapshot of.

    ", + "CreateTrustRequest$DirectoryId": "The Directory ID of the Microsoft AD in the AWS cloud for which to establish the trust relationship.", + "DeleteDirectoryRequest$DirectoryId": "

    The identifier of the directory to delete.

    ", + "DeleteDirectoryResult$DirectoryId": "

    The directory identifier.

    ", + "DescribeSnapshotsRequest$DirectoryId": "

    The identifier of the directory for which to retrieve snapshot information.

    ", + "DescribeTrustsRequest$DirectoryId": "The Directory ID of the AWS directory that is a part of the requested trust relationship.", + "DirectoryDescription$DirectoryId": "

    The directory identifier.

    ", + "DirectoryIds$member": null, + "DisableRadiusRequest$DirectoryId": "

    The identifier of the directory for which to disable MFA.

    ", + "DisableSsoRequest$DirectoryId": "

    The identifier of the directory for which to disable single-sign on.

    ", + "EnableRadiusRequest$DirectoryId": "

    The identifier of the directory for which to enable MFA.

    ", + "EnableSsoRequest$DirectoryId": "

    The identifier of the directory for which to enable single-sign on.

    ", + "GetSnapshotLimitsRequest$DirectoryId": "

    Contains the identifier of the directory to obtain the limits for.

    ", + "Snapshot$DirectoryId": "

    The directory identifier.

    ", + "Trust$DirectoryId": "The Directory ID of the AWS directory involved in the trust relationship.", + "UpdateRadiusRequest$DirectoryId": "

    The identifier of the directory for which to update the RADIUS server information.

    " + } + }, + "DirectoryIds": { + "base": "

    A list of directory identifiers.

    ", + "refs": { + "DescribeDirectoriesRequest$DirectoryIds": "

    A list of identifiers of the directories for which to obtain the information. If this member is null, all directories that belong to the current account are returned.

    An empty list results in an InvalidParameterException being thrown.

    " + } + }, + "DirectoryLimitExceededException": { + "base": "

    The maximum number of directories in the region has been reached. You can use the GetDirectoryLimits operation to determine your directory limits in the region.

    ", + "refs": { + } + }, + "DirectoryLimits": { + "base": "

    Contains directory limit information for a region.

    ", + "refs": { + "GetDirectoryLimitsResult$DirectoryLimits": "

    A DirectoryLimits object that contains the directory limits for the current region.

    " + } + }, + "DirectoryName": { + "base": null, + "refs": { + "ConnectDirectoryRequest$Name": "

    The fully-qualified name of the on-premises directory, such as corp.example.com.

    ", + "CreateDirectoryRequest$Name": "

    The fully qualified name for the directory, such as corp.example.com.

    ", + "CreateMicrosoftADRequest$Name": "

    The fully qualified domain name for the directory, such as corp.example.com. This name will resolve inside your VPC only. It does not need to be publicly resolvable.

    ", + "DirectoryDescription$Name": "

    The fully-qualified name of the directory.

    " + } + }, + "DirectoryShortName": { + "base": null, + "refs": { + "ConnectDirectoryRequest$ShortName": "

    The NetBIOS name of the on-premises directory, such as CORP.

    ", + "CreateDirectoryRequest$ShortName": "

    The short name of the directory, such as CORP.

    ", + "CreateMicrosoftADRequest$ShortName": "

    The NetBIOS name for your domain. A short identifier for your domain, such as CORP. If you don't specify a NetBIOS name, it will default to the first part of your directory DNS. For example, CORP for the directory DNS corp.example.com.

    ", + "DirectoryDescription$ShortName": "

    The short name of the directory.

    " + } + }, + "DirectorySize": { + "base": null, + "refs": { + "ConnectDirectoryRequest$Size": "

    The size of the directory.

    ", + "CreateDirectoryRequest$Size": "

    The size of the directory.

    ", + "DirectoryDescription$Size": "

    The directory size.

    " + } + }, + "DirectoryStage": { + "base": null, + "refs": { + "DirectoryDescription$Stage": "

    The current stage of the directory.

    " + } + }, + "DirectoryType": { + "base": null, + "refs": { + "DirectoryDescription$Type": "

    The directory size.

    " + } + }, + "DirectoryUnavailableException": { + "base": "

    The specified directory is unavailable or could not be found.

    ", + "refs": { + } + }, + "DirectoryVpcSettings": { + "base": "

    Contains VPC information for the CreateDirectory or CreateMicrosoftAD operation.

    ", + "refs": { + "CreateDirectoryRequest$VpcSettings": "

    A DirectoryVpcSettings object that contains additional information for the operation.

    ", + "CreateMicrosoftADRequest$VpcSettings": null + } + }, + "DirectoryVpcSettingsDescription": { + "base": "

    Contains information about the directory.

    ", + "refs": { + "DirectoryDescription$VpcSettings": "

    A DirectoryVpcSettingsDescription object that contains additional information about a directory. This member is only present if the directory is a Simple AD or Managed AD directory.

    " + } + }, + "DisableRadiusRequest": { + "base": "

    Contains the inputs for the DisableRadius operation.

    ", + "refs": { + } + }, + "DisableRadiusResult": { + "base": "

    Contains the results of the DisableRadius operation.

    ", + "refs": { + } + }, + "DisableSsoRequest": { + "base": "

    Contains the inputs for the DisableSso operation.

    ", + "refs": { + } + }, + "DisableSsoResult": { + "base": "

    Contains the results of the DisableSso operation.

    ", + "refs": { + } + }, + "DnsIpAddrs": { + "base": null, + "refs": { + "DirectoryConnectSettings$CustomerDnsIps": "

    A list of one or more IP addresses of DNS servers or domain controllers in the on-premises directory.

    ", + "DirectoryDescription$DnsIpAddrs": "

    The IP addresses of the DNS servers for the directory. For a Simple AD or Microsoft AD directory, these are the IP addresses of the Simple AD or Microsoft AD directory servers. For an AD Connector directory, these are the IP addresses of the DNS servers or domain controllers in the on-premises directory to which the AD Connector is connected.

    " + } + }, + "EnableRadiusRequest": { + "base": "

    Contains the inputs for the EnableRadius operation.

    ", + "refs": { + } + }, + "EnableRadiusResult": { + "base": "

    Contains the results of the EnableRadius operation.

    ", + "refs": { + } + }, + "EnableSsoRequest": { + "base": "

    Contains the inputs for the EnableSso operation.

    ", + "refs": { + } + }, + "EnableSsoResult": { + "base": "

    Contains the results of the EnableSso operation.

    ", + "refs": { + } + }, + "EntityAlreadyExistsException": { + "base": "

    The specified entity already exists.

    ", + "refs": { + } + }, + "EntityDoesNotExistException": { + "base": "

    The specified entity could not be found.

    ", + "refs": { + } + }, + "ExceptionMessage": { + "base": "

    The descriptive message for the exception.

    ", + "refs": { + "AuthenticationFailedException$Message": "

    The textual message for the exception.

    ", + "ClientException$Message": null, + "DirectoryLimitExceededException$Message": null, + "DirectoryUnavailableException$Message": null, + "EntityAlreadyExistsException$Message": null, + "EntityDoesNotExistException$Message": null, + "InsufficientPermissionsException$Message": null, + "InvalidNextTokenException$Message": null, + "InvalidParameterException$Message": null, + "ServiceException$Message": null, + "SnapshotLimitExceededException$Message": null, + "UnsupportedOperationException$Message": null + } + }, + "GetDirectoryLimitsRequest": { + "base": "

    Contains the inputs for the GetDirectoryLimits operation.

    ", + "refs": { + } + }, + "GetDirectoryLimitsResult": { + "base": "

    Contains the results of the GetDirectoryLimits operation.

    ", + "refs": { + } + }, + "GetSnapshotLimitsRequest": { + "base": "

    Contains the inputs for the GetSnapshotLimits operation.

    ", + "refs": { + } + }, + "GetSnapshotLimitsResult": { + "base": "

    Contains the results of the GetSnapshotLimits operation.

    ", + "refs": { + } + }, + "InsufficientPermissionsException": { + "base": "

    The account does not have sufficient permission to perform the operation.

    ", + "refs": { + } + }, + "InvalidNextTokenException": { + "base": "

    The NextToken value is not valid.

    ", + "refs": { + } + }, + "InvalidParameterException": { + "base": "

    One or more parameters are not valid.

    ", + "refs": { + } + }, + "IpAddr": { + "base": null, + "refs": { + "DnsIpAddrs$member": null, + "IpAddrs$member": null + } + }, + "IpAddrs": { + "base": null, + "refs": { + "DirectoryConnectSettingsDescription$ConnectIps": "

    The IP addresses of the AD Connector servers.

    " + } + }, + "LastUpdatedDateTime": { + "base": null, + "refs": { + "DirectoryDescription$StageLastUpdatedDateTime": "

    The date and time that the stage was last updated.

    ", + "Trust$LastUpdatedDateTime": "The date and time that the trust relationship was last updated." + } + }, + "LaunchTime": { + "base": null, + "refs": { + "DirectoryDescription$LaunchTime": "

    Specifies when the directory was created.

    " + } + }, + "Limit": { + "base": null, + "refs": { + "DescribeDirectoriesRequest$Limit": "

    The maximum number of items to return. If this value is zero, the maximum number of items is specified by the limitations of the operation.

    ", + "DescribeSnapshotsRequest$Limit": "

    The maximum number of objects to return.

    ", + "DescribeTrustsRequest$Limit": "The maximum number of objects to return.", + "DirectoryLimits$CloudOnlyDirectoriesLimit": "

    The maximum number of cloud directories allowed in the region.

    ", + "DirectoryLimits$CloudOnlyDirectoriesCurrentCount": "

    The current number of cloud directories in the region.

    ", + "DirectoryLimits$CloudOnlyMicrosoftADLimit": "The maximum number of Microsoft AD directories allowed in the region.", + "DirectoryLimits$CloudOnlyMicrosoftADCurrentCount": "The current number of Microsoft AD directories in the region.", + "DirectoryLimits$ConnectedDirectoriesLimit": "

    The maximum number of connected directories allowed in the region.

    ", + "DirectoryLimits$ConnectedDirectoriesCurrentCount": "

    The current number of connected directories in the region.

    ", + "SnapshotLimits$ManualSnapshotsLimit": "

    The maximum number of manual snapshots allowed.

    ", + "SnapshotLimits$ManualSnapshotsCurrentCount": "

    The current number of manual snapshots of the directory.

    " + } + }, + "ManualSnapshotsLimitReached": { + "base": null, + "refs": { + "SnapshotLimits$ManualSnapshotsLimitReached": "

    Indicates if the manual snapshot limit has been reached.

    " + } + }, + "NextToken": { + "base": null, + "refs": { + "DescribeDirectoriesRequest$NextToken": "

    The DescribeDirectoriesResult.NextToken value from a previous call to DescribeDirectories. Pass null if this is the first call.

    ", + "DescribeDirectoriesResult$NextToken": "

    If not null, more results are available. Pass this value for the NextToken parameter in a subsequent call to DescribeDirectories to retrieve the next set of items.

    ", + "DescribeSnapshotsRequest$NextToken": "

    The DescribeSnapshotsResult.NextToken value from a previous call to DescribeSnapshots. Pass null if this is the first call.

    ", + "DescribeSnapshotsResult$NextToken": "

    If not null, more results are available. Pass this value in the NextToken member of a subsequent call to DescribeSnapshots.

    ", + "DescribeTrustsRequest$NextToken": "

    The DescribeTrustsResult.NextToken value from a previous call to DescribeTrusts. Pass null if this is the first call.

    ", + "DescribeTrustsResult$NextToken": "

    If not null, more results are available. Pass this value for the NextToken parameter in a subsequent call to DescribeTrusts to retrieve the next set of items.

    " + } + }, + "OrganizationalUnitDN": { + "base": null, + "refs": { + "CreateComputerRequest$OrganizationalUnitDistinguishedName": "

    The fully-qualified distinguished name of the organizational unit to place the computer account in.

    " + } + }, + "Password": { + "base": null, + "refs": { + "CreateDirectoryRequest$Password": "

    The password for the directory administrator. The directory creation process creates a directory administrator account with the username Administrator and this password.

    ", + "CreateMicrosoftADRequest$Password": "

    The password for the default administrative user named Admin.

    " + } + }, + "PortNumber": { + "base": null, + "refs": { + "RadiusSettings$RadiusPort": "

    The port that your RADIUS server is using for communications. Your on-premises network must allow inbound traffic over this port from the AWS Directory Service servers.

    " + } + }, + "RadiusAuthenticationProtocol": { + "base": null, + "refs": { + "RadiusSettings$AuthenticationProtocol": "

    The protocol specified for your RADIUS endpoints.

    " + } + }, + "RadiusDisplayLabel": { + "base": null, + "refs": { + "RadiusSettings$DisplayLabel": "

    Not currently used.

    " + } + }, + "RadiusRetries": { + "base": null, + "refs": { + "RadiusSettings$RadiusRetries": "

    The maximum number of times that communication with the RADIUS server is attempted.

    " + } + }, + "RadiusSettings": { + "base": "

    Contains information about a Remote Authentication Dial In User Service (RADIUS) server.

    ", + "refs": { + "DirectoryDescription$RadiusSettings": "

    A RadiusSettings object that contains information about the RADIUS server configured for this directory.

    ", + "EnableRadiusRequest$RadiusSettings": "

    A RadiusSettings object that contains information about the RADIUS server.

    ", + "UpdateRadiusRequest$RadiusSettings": "

    A RadiusSettings object that contains information about the RADIUS server.

    " + } + }, + "RadiusSharedSecret": { + "base": null, + "refs": { + "RadiusSettings$SharedSecret": "

    The shared secret code that was specified when your RADIUS endpoints were created.

    " + } + }, + "RadiusStatus": { + "base": null, + "refs": { + "DirectoryDescription$RadiusStatus": "

    The status of the RADIUS MFA server connection.

    " + } + }, + "RadiusTimeout": { + "base": null, + "refs": { + "RadiusSettings$RadiusTimeout": "

    The amount of time, in seconds, to wait for the RADIUS server to respond.

    " + } + }, + "RemoteDomainName": { + "base": null, + "refs": { + "CreateTrustRequest$RemoteDomainName": "The Fully Qualified Domain Name (FQDN) of the external domain for which to create the trust relationship.", + "Trust$RemoteDomainName": "The Fully Qualified Domain Name (FQDN) of the external domain involved in the trust relationship." + } + }, + "RequestId": { + "base": "

    The AWS request identifier.

    ", + "refs": { + "AuthenticationFailedException$RequestId": "

    The identifier of the request that caused the exception.

    ", + "ClientException$RequestId": null, + "DirectoryLimitExceededException$RequestId": null, + "DirectoryUnavailableException$RequestId": null, + "EntityAlreadyExistsException$RequestId": null, + "EntityDoesNotExistException$RequestId": null, + "InsufficientPermissionsException$RequestId": null, + "InvalidNextTokenException$RequestId": null, + "InvalidParameterException$RequestId": null, + "ServiceException$RequestId": null, + "SnapshotLimitExceededException$RequestId": null, + "UnsupportedOperationException$RequestId": null + } + }, + "RestoreFromSnapshotRequest": { + "base": "

    An object representing the inputs for the RestoreFromSnapshot operation.

    ", + "refs": { + } + }, + "RestoreFromSnapshotResult": { + "base": "

    Contains the results of the RestoreFromSnapshot operation.

    ", + "refs": { + } + }, + "SID": { + "base": null, + "refs": { + "Computer$ComputerId": "

    The identifier of the computer.

    " + } + }, + "SecurityGroupId": { + "base": null, + "refs": { + "DirectoryConnectSettingsDescription$SecurityGroupId": "

    The security group identifier for the AD Connector directory.

    ", + "DirectoryVpcSettingsDescription$SecurityGroupId": "

    The security group identifier for the directory. If the directory was created before 8/1/2014, this is the identifier of the directory members security group that was created when the directory was created. If the directory was created after this date, this value is null.

    " + } + }, + "Server": { + "base": null, + "refs": { + "Servers$member": null + } + }, + "Servers": { + "base": null, + "refs": { + "RadiusSettings$RadiusServers": "

    An array of strings that contains the IP addresses of the RADIUS server endpoints, or the IP addresses of your RADIUS server load balancer.

    " + } + }, + "ServiceException": { + "base": "

    An exception has occurred in AWS Directory Service.

    ", + "refs": { + } + }, + "Snapshot": { + "base": "

    Describes a directory snapshot.

    ", + "refs": { + "Snapshots$member": null + } + }, + "SnapshotId": { + "base": null, + "refs": { + "CreateSnapshotResult$SnapshotId": "

    The identifier of the snapshot that was created.

    ", + "DeleteSnapshotRequest$SnapshotId": "

    The identifier of the directory snapshot to be deleted.

    ", + "DeleteSnapshotResult$SnapshotId": "

    The identifier of the directory snapshot that was deleted.

    ", + "RestoreFromSnapshotRequest$SnapshotId": "

    The identifier of the snapshot to restore from.

    ", + "Snapshot$SnapshotId": "

    The snapshot identifier.

    ", + "SnapshotIds$member": null + } + }, + "SnapshotIds": { + "base": "

    A list of directory snapshot identifiers.

    ", + "refs": { + "DescribeSnapshotsRequest$SnapshotIds": "

    A list of identifiers of the snapshots to obtain the information for. If this member is null or empty, all snapshots are returned using the Limit and NextToken members.

    " + } + }, + "SnapshotLimitExceededException": { + "base": "

    The maximum number of manual snapshots for the directory has been reached. You can use the GetSnapshotLimits operation to determine the snapshot limits for a directory.

    ", + "refs": { + } + }, + "SnapshotLimits": { + "base": "

    Contains manual snapshot limit information for a directory.

    ", + "refs": { + "GetSnapshotLimitsResult$SnapshotLimits": "

    A SnapshotLimits object that contains the manual snapshot limits for the specified directory.

    " + } + }, + "SnapshotName": { + "base": null, + "refs": { + "CreateSnapshotRequest$Name": "

    The descriptive name to apply to the snapshot.

    ", + "Snapshot$Name": "

    The descriptive name of the snapshot.

    " + } + }, + "SnapshotStatus": { + "base": null, + "refs": { + "Snapshot$Status": "

    The snapshot status.

    " + } + }, + "SnapshotType": { + "base": null, + "refs": { + "Snapshot$Type": "

    The snapshot type.

    " + } + }, + "Snapshots": { + "base": "

    A list of descriptions of directory snapshots.

    ", + "refs": { + "DescribeSnapshotsResult$Snapshots": "

    The list of Snapshot objects that were retrieved.

    It is possible that this list contains less than the number of items specified in the Limit member of the request. This occurs if there are less than the requested number of items left to retrieve, or if the limitations of the operation have been exceeded.

    " + } + }, + "SsoEnabled": { + "base": null, + "refs": { + "DirectoryDescription$SsoEnabled": "

    Indicates if single-sign on is enabled for the directory. For more information, see EnableSso and DisableSso.

    " + } + }, + "StageReason": { + "base": null, + "refs": { + "DirectoryDescription$StageReason": "

    Additional information about the directory stage.

    " + } + }, + "StartTime": { + "base": null, + "refs": { + "Snapshot$StartTime": "

    The date and time that the snapshot was taken.

    " + } + }, + "StateLastUpdatedDateTime": { + "base": null, + "refs": { + "Trust$StateLastUpdatedDateTime": "The date and time that the TrustState was last updated." + } + }, + "SubnetId": { + "base": null, + "refs": { + "SubnetIds$member": null + } + }, + "SubnetIds": { + "base": null, + "refs": { + "DirectoryConnectSettings$SubnetIds": "

    A list of subnet identifiers in the VPC in which the AD Connector is created.

    ", + "DirectoryConnectSettingsDescription$SubnetIds": "

    A list of subnet identifiers in the VPC that the AD connector is in.

    ", + "DirectoryVpcSettings$SubnetIds": "

    The identifiers of the subnets for the directory servers. The two subnets must be in different Availability Zones. AWS Directory Service creates a directory server and a DNS server in each of these subnets.

    ", + "DirectoryVpcSettingsDescription$SubnetIds": "

    The identifiers of the subnets for the directory servers.

    " + } + }, + "Trust": { + "base": "Describes a trust relationship between an Microsoft AD in the AWS cloud and an external domain.", + "refs": { + "Trusts$member": null + } + }, + "TrustDirection": { + "base": null, + "refs": { + "CreateTrustRequest$TrustDirection": "The direction of the trust relationship.", + "Trust$TrustDirection": "The trust relationship direction." + } + }, + "TrustId": { + "base": null, + "refs": { + "CreateTrustResult$TrustId": "A unique identifier for the trust relationship that was created.", + "DeleteTrustRequest$TrustId": "The Trust ID of the trust relationship to be deleted.", + "DeleteTrustResult$TrustId": "The Trust ID of the trust relationship that was deleted.", + "Trust$TrustId": "The unique ID of the trust relationship.", + "TrustIds$member": null, + "VerifyTrustRequest$TrustId": "The unique Trust ID of the trust relationship to verify.", + "VerifyTrustResult$TrustId": "The unique Trust ID of the trust relationship that was verified." + } + }, + "TrustIds": { + "base": null, + "refs": { + "DescribeTrustsRequest$TrustIds": "

    A list of identifiers of the trust relationships for which to obtain the information. If this member is null, all trust relationships that belong to the current account are returned.

    An empty list results in an InvalidParameterException being thrown.

    " + } + }, + "TrustPassword": { + "base": null, + "refs": { + "CreateTrustRequest$TrustPassword": "The trust password. The must be the same password that was used when creating the trust relationship on the external domain." + } + }, + "TrustState": { + "base": null, + "refs": { + "Trust$TrustState": "The trust relationship state." + } + }, + "TrustType": { + "base": null, + "refs": { + "CreateTrustRequest$TrustType": "The trust relationship type.", + "Trust$TrustType": "The trust relationship type." + } + }, + "Trusts": { + "base": null, + "refs": { + "DescribeTrustsResult$Trusts": "

    The list of Trust objects that were retrieved.

    It is possible that this list contains less than the number of items specified in the Limit member of the request. This occurs if there are less than the requested number of items left to retrieve, or if the limitations of the operation have been exceeded.

    " + } + }, + "UnsupportedOperationException": { + "base": "

    The operation is not supported.

    ", + "refs": { + } + }, + "UpdateRadiusRequest": { + "base": "

    Contains the inputs for the UpdateRadius operation.

    ", + "refs": { + } + }, + "UpdateRadiusResult": { + "base": "

    Contains the results of the UpdateRadius operation.

    ", + "refs": { + } + }, + "UseSameUsername": { + "base": null, + "refs": { + "RadiusSettings$UseSameUsername": "

    Not currently used.

    " + } + }, + "UserName": { + "base": null, + "refs": { + "DirectoryConnectSettings$CustomerUserName": "

    The username of an account in the on-premises directory that is used to connect to the directory. This account must have the following privileges:

    • Read users and groups
    • Create computer objects
    • Join computers to the domain
    ", + "DirectoryConnectSettingsDescription$CustomerUserName": "

    The username of the service account in the on-premises directory.

    ", + "DisableSsoRequest$UserName": "

    The username of an alternate account to use to disable single-sign on. This is only used for AD Connector directories. This account must have privileges to remove a service principal name.

    If the AD Connector service account does not have privileges to remove a service principal name, you can specify an alternate account with the UserName and Password parameters. These credentials are only used to disable single sign-on and are not stored by the service. The AD Connector service account is not changed.

    ", + "EnableSsoRequest$UserName": "

    The username of an alternate account to use to enable single-sign on. This is only used for AD Connector directories. This account must have privileges to add a service principal name.

    If the AD Connector service account does not have privileges to add a service principal name, you can specify an alternate account with the UserName and Password parameters. These credentials are only used to enable single sign-on and are not stored by the service. The AD Connector service account is not changed.

    " + } + }, + "VerifyTrustRequest": { + "base": "Initiates the verification of an existing trust relationship between a Microsoft AD in the AWS cloud and an external domain.", + "refs": { + } + }, + "VerifyTrustResult": { + "base": null, + "refs": { + } + }, + "VpcId": { + "base": null, + "refs": { + "DirectoryConnectSettings$VpcId": "

    The identifier of the VPC in which the AD Connector is created.

    ", + "DirectoryConnectSettingsDescription$VpcId": "

    The identifier of the VPC that the AD Connector is in.

    ", + "DirectoryVpcSettings$VpcId": "

    The identifier of the VPC in which to create the directory.

    ", + "DirectoryVpcSettingsDescription$VpcId": "

    The identifier of the VPC that the directory is in.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ds/2015-04-16/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1322 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2012-08-10", + "endpointPrefix":"dynamodb", + "jsonVersion":"1.0", + "serviceAbbreviation":"DynamoDB", + "serviceFullName":"Amazon DynamoDB", + "signatureVersion":"v4", + "targetPrefix":"DynamoDB_20120810", + "protocol":"json" + }, + "operations":{ + "BatchGetItem":{ + "name":"BatchGetItem", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchGetItemInput"}, + "output":{"shape":"BatchGetItemOutput"}, + "errors":[ + { + "shape":"ProvisionedThroughputExceededException", + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + }, + "BatchWriteItem":{ + "name":"BatchWriteItem", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchWriteItemInput"}, + "output":{"shape":"BatchWriteItemOutput"}, + "errors":[ + { + "shape":"ProvisionedThroughputExceededException", + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"ItemCollectionSizeLimitExceededException", + "exception":true + }, + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + }, + "CreateTable":{ + "name":"CreateTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTableInput"}, + "output":{"shape":"CreateTableOutput"}, + "errors":[ + { + "shape":"ResourceInUseException", + "exception":true + }, + { + "shape":"LimitExceededException", + "exception":true + }, + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + }, + "DeleteItem":{ + "name":"DeleteItem", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteItemInput"}, + "output":{"shape":"DeleteItemOutput"}, + "errors":[ + { + "shape":"ConditionalCheckFailedException", + "exception":true + }, + { + "shape":"ProvisionedThroughputExceededException", + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"ItemCollectionSizeLimitExceededException", + "exception":true + }, + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + }, + "DeleteTable":{ + "name":"DeleteTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTableInput"}, + "output":{"shape":"DeleteTableOutput"}, + "errors":[ + { + "shape":"ResourceInUseException", + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"LimitExceededException", + "exception":true + }, + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + }, + "DescribeTable":{ + "name":"DescribeTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTableInput"}, + "output":{"shape":"DescribeTableOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + }, + "GetItem":{ + "name":"GetItem", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetItemInput"}, + "output":{"shape":"GetItemOutput"}, + "errors":[ + { + "shape":"ProvisionedThroughputExceededException", + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + }, + "ListTables":{ + "name":"ListTables", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTablesInput"}, + "output":{"shape":"ListTablesOutput"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + }, + "PutItem":{ + "name":"PutItem", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutItemInput"}, + "output":{"shape":"PutItemOutput"}, + "errors":[ + { + "shape":"ConditionalCheckFailedException", + "exception":true + }, + { + "shape":"ProvisionedThroughputExceededException", + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"ItemCollectionSizeLimitExceededException", + "exception":true + }, + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + }, + "Query":{ + "name":"Query", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"QueryInput"}, + "output":{"shape":"QueryOutput"}, + "errors":[ + { + "shape":"ProvisionedThroughputExceededException", + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + }, + "Scan":{ + "name":"Scan", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ScanInput"}, + "output":{"shape":"ScanOutput"}, + "errors":[ + { + "shape":"ProvisionedThroughputExceededException", + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + }, + "UpdateItem":{ + "name":"UpdateItem", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateItemInput"}, + "output":{"shape":"UpdateItemOutput"}, + "errors":[ + { + "shape":"ConditionalCheckFailedException", + "exception":true + }, + { + "shape":"ProvisionedThroughputExceededException", + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"ItemCollectionSizeLimitExceededException", + "exception":true + }, + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + }, + "UpdateTable":{ + "name":"UpdateTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateTableInput"}, + "output":{"shape":"UpdateTableOutput"}, + "errors":[ + { + "shape":"ResourceInUseException", + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"LimitExceededException", + "exception":true + }, + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + } + }, + "shapes":{ + "AttributeAction":{ + "type":"string", + "enum":[ + "ADD", + "PUT", + "DELETE" + ] + }, + "AttributeDefinition":{ + "type":"structure", + "required":[ + "AttributeName", + "AttributeType" + ], + "members":{ + "AttributeName":{"shape":"KeySchemaAttributeName"}, + "AttributeType":{"shape":"ScalarAttributeType"} + } + }, + "AttributeDefinitions":{ + "type":"list", + "member":{"shape":"AttributeDefinition"} + }, + "AttributeMap":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValue"} + }, + "AttributeName":{ + "type":"string", + "max":65535 + }, + "AttributeNameList":{ + "type":"list", + "member":{"shape":"AttributeName"}, + "min":1 + }, + "AttributeUpdates":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValueUpdate"} + }, + "AttributeValue":{ + "type":"structure", + "members":{ + "S":{"shape":"StringAttributeValue"}, + "N":{"shape":"NumberAttributeValue"}, + "B":{"shape":"BinaryAttributeValue"}, + "SS":{"shape":"StringSetAttributeValue"}, + "NS":{"shape":"NumberSetAttributeValue"}, + "BS":{"shape":"BinarySetAttributeValue"}, + "M":{"shape":"MapAttributeValue"}, + "L":{"shape":"ListAttributeValue"}, + "NULL":{"shape":"NullAttributeValue"}, + "BOOL":{"shape":"BooleanAttributeValue"} + } + }, + "AttributeValueList":{ + "type":"list", + "member":{"shape":"AttributeValue"} + }, + "AttributeValueUpdate":{ + "type":"structure", + "members":{ + "Value":{"shape":"AttributeValue"}, + "Action":{"shape":"AttributeAction"} + } + }, + "Backfilling":{"type":"boolean"}, + "BatchGetItemInput":{ + "type":"structure", + "required":["RequestItems"], + "members":{ + "RequestItems":{"shape":"BatchGetRequestMap"}, + "ReturnConsumedCapacity":{"shape":"ReturnConsumedCapacity"} + } + }, + "BatchGetItemOutput":{ + "type":"structure", + "members":{ + "Responses":{"shape":"BatchGetResponseMap"}, + "UnprocessedKeys":{"shape":"BatchGetRequestMap"}, + "ConsumedCapacity":{"shape":"ConsumedCapacityMultiple"} + } + }, + "BatchGetRequestMap":{ + "type":"map", + "key":{"shape":"TableName"}, + "value":{"shape":"KeysAndAttributes"}, + "min":1, + "max":100 + }, + "BatchGetResponseMap":{ + "type":"map", + "key":{"shape":"TableName"}, + "value":{"shape":"ItemList"} + }, + "BatchWriteItemInput":{ + "type":"structure", + "required":["RequestItems"], + "members":{ + "RequestItems":{"shape":"BatchWriteItemRequestMap"}, + "ReturnConsumedCapacity":{"shape":"ReturnConsumedCapacity"}, + "ReturnItemCollectionMetrics":{"shape":"ReturnItemCollectionMetrics"} + } + }, + "BatchWriteItemOutput":{ + "type":"structure", + "members":{ + "UnprocessedItems":{"shape":"BatchWriteItemRequestMap"}, + "ItemCollectionMetrics":{"shape":"ItemCollectionMetricsPerTable"}, + "ConsumedCapacity":{"shape":"ConsumedCapacityMultiple"} + } + }, + "BatchWriteItemRequestMap":{ + "type":"map", + "key":{"shape":"TableName"}, + "value":{"shape":"WriteRequests"}, + "min":1, + "max":25 + }, + "BinaryAttributeValue":{"type":"blob"}, + "BinarySetAttributeValue":{ + "type":"list", + "member":{"shape":"BinaryAttributeValue"} + }, + "BooleanAttributeValue":{"type":"boolean"}, + "BooleanObject":{"type":"boolean"}, + "Capacity":{ + "type":"structure", + "members":{ + "CapacityUnits":{"shape":"ConsumedCapacityUnits"} + } + }, + "ComparisonOperator":{ + "type":"string", + "enum":[ + "EQ", + "NE", + "IN", + "LE", + "LT", + "GE", + "GT", + "BETWEEN", + "NOT_NULL", + "NULL", + "CONTAINS", + "NOT_CONTAINS", + "BEGINS_WITH" + ] + }, + "Condition":{ + "type":"structure", + "required":["ComparisonOperator"], + "members":{ + "AttributeValueList":{"shape":"AttributeValueList"}, + "ComparisonOperator":{"shape":"ComparisonOperator"} + } + }, + "ConditionExpression":{"type":"string"}, + "ConditionalCheckFailedException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ConditionalOperator":{ + "type":"string", + "enum":[ + "AND", + "OR" + ] + }, + "ConsistentRead":{"type":"boolean"}, + "ConsumedCapacity":{ + "type":"structure", + "members":{ + "TableName":{"shape":"TableName"}, + "CapacityUnits":{"shape":"ConsumedCapacityUnits"}, + "Table":{"shape":"Capacity"}, + "LocalSecondaryIndexes":{"shape":"SecondaryIndexesCapacityMap"}, + "GlobalSecondaryIndexes":{"shape":"SecondaryIndexesCapacityMap"} + } + }, + "ConsumedCapacityMultiple":{ + "type":"list", + "member":{"shape":"ConsumedCapacity"} + }, + "ConsumedCapacityUnits":{"type":"double"}, + "CreateGlobalSecondaryIndexAction":{ + "type":"structure", + "required":[ + "IndexName", + "KeySchema", + "Projection", + "ProvisionedThroughput" + ], + "members":{ + "IndexName":{"shape":"IndexName"}, + "KeySchema":{"shape":"KeySchema"}, + "Projection":{"shape":"Projection"}, + "ProvisionedThroughput":{"shape":"ProvisionedThroughput"} + } + }, + "CreateTableInput":{ + "type":"structure", + "required":[ + "AttributeDefinitions", + "TableName", + "KeySchema", + "ProvisionedThroughput" + ], + "members":{ + "AttributeDefinitions":{"shape":"AttributeDefinitions"}, + "TableName":{"shape":"TableName"}, + "KeySchema":{"shape":"KeySchema"}, + "LocalSecondaryIndexes":{"shape":"LocalSecondaryIndexList"}, + "GlobalSecondaryIndexes":{"shape":"GlobalSecondaryIndexList"}, + "ProvisionedThroughput":{"shape":"ProvisionedThroughput"}, + "StreamSpecification":{"shape":"StreamSpecification"} + } + }, + "CreateTableOutput":{ + "type":"structure", + "members":{ + "TableDescription":{"shape":"TableDescription"} + } + }, + "Date":{"type":"timestamp"}, + "DeleteGlobalSecondaryIndexAction":{ + "type":"structure", + "required":["IndexName"], + "members":{ + "IndexName":{"shape":"IndexName"} + } + }, + "DeleteItemInput":{ + "type":"structure", + "required":[ + "TableName", + "Key" + ], + "members":{ + "TableName":{"shape":"TableName"}, + "Key":{"shape":"Key"}, + "Expected":{"shape":"ExpectedAttributeMap"}, + "ConditionalOperator":{"shape":"ConditionalOperator"}, + "ReturnValues":{"shape":"ReturnValue"}, + "ReturnConsumedCapacity":{"shape":"ReturnConsumedCapacity"}, + "ReturnItemCollectionMetrics":{"shape":"ReturnItemCollectionMetrics"}, + "ConditionExpression":{"shape":"ConditionExpression"}, + "ExpressionAttributeNames":{"shape":"ExpressionAttributeNameMap"}, + "ExpressionAttributeValues":{"shape":"ExpressionAttributeValueMap"} + } + }, + "DeleteItemOutput":{ + "type":"structure", + "members":{ + "Attributes":{"shape":"AttributeMap"}, + "ConsumedCapacity":{"shape":"ConsumedCapacity"}, + "ItemCollectionMetrics":{"shape":"ItemCollectionMetrics"} + } + }, + "DeleteRequest":{ + "type":"structure", + "required":["Key"], + "members":{ + "Key":{"shape":"Key"} + } + }, + "DeleteTableInput":{ + "type":"structure", + "required":["TableName"], + "members":{ + "TableName":{"shape":"TableName"} + } + }, + "DeleteTableOutput":{ + "type":"structure", + "members":{ + "TableDescription":{"shape":"TableDescription"} + } + }, + "DescribeTableInput":{ + "type":"structure", + "required":["TableName"], + "members":{ + "TableName":{"shape":"TableName"} + } + }, + "DescribeTableOutput":{ + "type":"structure", + "members":{ + "Table":{"shape":"TableDescription"} + } + }, + "ErrorMessage":{"type":"string"}, + "ExpectedAttributeMap":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"ExpectedAttributeValue"} + }, + "ExpectedAttributeValue":{ + "type":"structure", + "members":{ + "Value":{"shape":"AttributeValue"}, + "Exists":{"shape":"BooleanObject"}, + "ComparisonOperator":{"shape":"ComparisonOperator"}, + "AttributeValueList":{"shape":"AttributeValueList"} + } + }, + "ExpressionAttributeNameMap":{ + "type":"map", + "key":{"shape":"ExpressionAttributeNameVariable"}, + "value":{"shape":"AttributeName"} + }, + "ExpressionAttributeNameVariable":{"type":"string"}, + "ExpressionAttributeValueMap":{ + "type":"map", + "key":{"shape":"ExpressionAttributeValueVariable"}, + "value":{"shape":"AttributeValue"} + }, + "ExpressionAttributeValueVariable":{"type":"string"}, + "FilterConditionMap":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"Condition"} + }, + "GetItemInput":{ + "type":"structure", + "required":[ + "TableName", + "Key" + ], + "members":{ + "TableName":{"shape":"TableName"}, + "Key":{"shape":"Key"}, + "AttributesToGet":{"shape":"AttributeNameList"}, + "ConsistentRead":{"shape":"ConsistentRead"}, + "ReturnConsumedCapacity":{"shape":"ReturnConsumedCapacity"}, + "ProjectionExpression":{"shape":"ProjectionExpression"}, + "ExpressionAttributeNames":{"shape":"ExpressionAttributeNameMap"} + } + }, + "GetItemOutput":{ + "type":"structure", + "members":{ + "Item":{"shape":"AttributeMap"}, + "ConsumedCapacity":{"shape":"ConsumedCapacity"} + } + }, + "GlobalSecondaryIndex":{ + "type":"structure", + "required":[ + "IndexName", + "KeySchema", + "Projection", + "ProvisionedThroughput" + ], + "members":{ + "IndexName":{"shape":"IndexName"}, + "KeySchema":{"shape":"KeySchema"}, + "Projection":{"shape":"Projection"}, + "ProvisionedThroughput":{"shape":"ProvisionedThroughput"} + } + }, + "GlobalSecondaryIndexDescription":{ + "type":"structure", + "members":{ + "IndexName":{"shape":"IndexName"}, + "KeySchema":{"shape":"KeySchema"}, + "Projection":{"shape":"Projection"}, + "IndexStatus":{"shape":"IndexStatus"}, + "Backfilling":{"shape":"Backfilling"}, + "ProvisionedThroughput":{"shape":"ProvisionedThroughputDescription"}, + "IndexSizeBytes":{"shape":"Long"}, + "ItemCount":{"shape":"Long"}, + "IndexArn":{"shape":"String"} + } + }, + "GlobalSecondaryIndexDescriptionList":{ + "type":"list", + "member":{"shape":"GlobalSecondaryIndexDescription"} + }, + "GlobalSecondaryIndexList":{ + "type":"list", + "member":{"shape":"GlobalSecondaryIndex"} + }, + "GlobalSecondaryIndexUpdate":{ + "type":"structure", + "members":{ + "Update":{"shape":"UpdateGlobalSecondaryIndexAction"}, + "Create":{"shape":"CreateGlobalSecondaryIndexAction"}, + "Delete":{"shape":"DeleteGlobalSecondaryIndexAction"} + } + }, + "GlobalSecondaryIndexUpdateList":{ + "type":"list", + "member":{"shape":"GlobalSecondaryIndexUpdate"} + }, + "IndexName":{ + "type":"string", + "min":3, + "max":255, + "pattern":"[a-zA-Z0-9_.-]+" + }, + "IndexStatus":{ + "type":"string", + "enum":[ + "CREATING", + "UPDATING", + "DELETING", + "ACTIVE" + ] + }, + "Integer":{"type":"integer"}, + "InternalServerError":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true, + "fault":true + }, + "ItemCollectionKeyAttributeMap":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValue"} + }, + "ItemCollectionMetrics":{ + "type":"structure", + "members":{ + "ItemCollectionKey":{"shape":"ItemCollectionKeyAttributeMap"}, + "SizeEstimateRangeGB":{"shape":"ItemCollectionSizeEstimateRange"} + } + }, + "ItemCollectionMetricsMultiple":{ + "type":"list", + "member":{"shape":"ItemCollectionMetrics"} + }, + "ItemCollectionMetricsPerTable":{ + "type":"map", + "key":{"shape":"TableName"}, + "value":{"shape":"ItemCollectionMetricsMultiple"} + }, + "ItemCollectionSizeEstimateBound":{"type":"double"}, + "ItemCollectionSizeEstimateRange":{ + "type":"list", + "member":{"shape":"ItemCollectionSizeEstimateBound"} + }, + "ItemCollectionSizeLimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ItemList":{ + "type":"list", + "member":{"shape":"AttributeMap"} + }, + "Key":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValue"} + }, + "KeyConditions":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"Condition"} + }, + "KeyExpression":{"type":"string"}, + "KeyList":{ + "type":"list", + "member":{"shape":"Key"}, + "min":1, + "max":100 + }, + "KeySchema":{ + "type":"list", + "member":{"shape":"KeySchemaElement"}, + "min":1, + "max":2 + }, + "KeySchemaAttributeName":{ + "type":"string", + "min":1, + "max":255 + }, + "KeySchemaElement":{ + "type":"structure", + "required":[ + "AttributeName", + "KeyType" + ], + "members":{ + "AttributeName":{"shape":"KeySchemaAttributeName"}, + "KeyType":{"shape":"KeyType"} + } + }, + "KeyType":{ + "type":"string", + "enum":[ + "HASH", + "RANGE" + ] + }, + "KeysAndAttributes":{ + "type":"structure", + "required":["Keys"], + "members":{ + "Keys":{"shape":"KeyList"}, + "AttributesToGet":{"shape":"AttributeNameList"}, + "ConsistentRead":{"shape":"ConsistentRead"}, + "ProjectionExpression":{"shape":"ProjectionExpression"}, + "ExpressionAttributeNames":{"shape":"ExpressionAttributeNameMap"} + } + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ListAttributeValue":{ + "type":"list", + "member":{"shape":"AttributeValue"} + }, + "ListTablesInput":{ + "type":"structure", + "members":{ + "ExclusiveStartTableName":{"shape":"TableName"}, + "Limit":{"shape":"ListTablesInputLimit"} + } + }, + "ListTablesInputLimit":{ + "type":"integer", + "min":1, + "max":100 + }, + "ListTablesOutput":{ + "type":"structure", + "members":{ + "TableNames":{"shape":"TableNameList"}, + "LastEvaluatedTableName":{"shape":"TableName"} + } + }, + "LocalSecondaryIndex":{ + "type":"structure", + "required":[ + "IndexName", + "KeySchema", + "Projection" + ], + "members":{ + "IndexName":{"shape":"IndexName"}, + "KeySchema":{"shape":"KeySchema"}, + "Projection":{"shape":"Projection"} + } + }, + "LocalSecondaryIndexDescription":{ + "type":"structure", + "members":{ + "IndexName":{"shape":"IndexName"}, + "KeySchema":{"shape":"KeySchema"}, + "Projection":{"shape":"Projection"}, + "IndexSizeBytes":{"shape":"Long"}, + "ItemCount":{"shape":"Long"}, + "IndexArn":{"shape":"String"} + } + }, + "LocalSecondaryIndexDescriptionList":{ + "type":"list", + "member":{"shape":"LocalSecondaryIndexDescription"} + }, + "LocalSecondaryIndexList":{ + "type":"list", + "member":{"shape":"LocalSecondaryIndex"} + }, + "Long":{"type":"long"}, + "MapAttributeValue":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValue"} + }, + "NonKeyAttributeName":{ + "type":"string", + "min":1, + "max":255 + }, + "NonKeyAttributeNameList":{ + "type":"list", + "member":{"shape":"NonKeyAttributeName"}, + "min":1, + "max":20 + }, + "NullAttributeValue":{"type":"boolean"}, + "NumberAttributeValue":{"type":"string"}, + "NumberSetAttributeValue":{ + "type":"list", + "member":{"shape":"NumberAttributeValue"} + }, + "PositiveIntegerObject":{ + "type":"integer", + "min":1 + }, + "PositiveLongObject":{ + "type":"long", + "min":1 + }, + "Projection":{ + "type":"structure", + "members":{ + "ProjectionType":{"shape":"ProjectionType"}, + "NonKeyAttributes":{"shape":"NonKeyAttributeNameList"} + } + }, + "ProjectionExpression":{"type":"string"}, + "ProjectionType":{ + "type":"string", + "enum":[ + "ALL", + "KEYS_ONLY", + "INCLUDE" + ] + }, + "ProvisionedThroughput":{ + "type":"structure", + "required":[ + "ReadCapacityUnits", + "WriteCapacityUnits" + ], + "members":{ + "ReadCapacityUnits":{"shape":"PositiveLongObject"}, + "WriteCapacityUnits":{"shape":"PositiveLongObject"} + } + }, + "ProvisionedThroughputDescription":{ + "type":"structure", + "members":{ + "LastIncreaseDateTime":{"shape":"Date"}, + "LastDecreaseDateTime":{"shape":"Date"}, + "NumberOfDecreasesToday":{"shape":"PositiveLongObject"}, + "ReadCapacityUnits":{"shape":"PositiveLongObject"}, + "WriteCapacityUnits":{"shape":"PositiveLongObject"} + } + }, + "ProvisionedThroughputExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "PutItemInput":{ + "type":"structure", + "required":[ + "TableName", + "Item" + ], + "members":{ + "TableName":{"shape":"TableName"}, + "Item":{"shape":"PutItemInputAttributeMap"}, + "Expected":{"shape":"ExpectedAttributeMap"}, + "ReturnValues":{"shape":"ReturnValue"}, + "ReturnConsumedCapacity":{"shape":"ReturnConsumedCapacity"}, + "ReturnItemCollectionMetrics":{"shape":"ReturnItemCollectionMetrics"}, + "ConditionalOperator":{"shape":"ConditionalOperator"}, + "ConditionExpression":{"shape":"ConditionExpression"}, + "ExpressionAttributeNames":{"shape":"ExpressionAttributeNameMap"}, + "ExpressionAttributeValues":{"shape":"ExpressionAttributeValueMap"} + } + }, + "PutItemInputAttributeMap":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValue"} + }, + "PutItemOutput":{ + "type":"structure", + "members":{ + "Attributes":{"shape":"AttributeMap"}, + "ConsumedCapacity":{"shape":"ConsumedCapacity"}, + "ItemCollectionMetrics":{"shape":"ItemCollectionMetrics"} + } + }, + "PutRequest":{ + "type":"structure", + "required":["Item"], + "members":{ + "Item":{"shape":"PutItemInputAttributeMap"} + } + }, + "QueryInput":{ + "type":"structure", + "required":["TableName"], + "members":{ + "TableName":{"shape":"TableName"}, + "IndexName":{"shape":"IndexName"}, + "Select":{"shape":"Select"}, + "AttributesToGet":{"shape":"AttributeNameList"}, + "Limit":{"shape":"PositiveIntegerObject"}, + "ConsistentRead":{"shape":"ConsistentRead"}, + "KeyConditions":{"shape":"KeyConditions"}, + "QueryFilter":{"shape":"FilterConditionMap"}, + "ConditionalOperator":{"shape":"ConditionalOperator"}, + "ScanIndexForward":{"shape":"BooleanObject"}, + "ExclusiveStartKey":{"shape":"Key"}, + "ReturnConsumedCapacity":{"shape":"ReturnConsumedCapacity"}, + "ProjectionExpression":{"shape":"ProjectionExpression"}, + "FilterExpression":{"shape":"ConditionExpression"}, + "KeyConditionExpression":{"shape":"KeyExpression"}, + "ExpressionAttributeNames":{"shape":"ExpressionAttributeNameMap"}, + "ExpressionAttributeValues":{"shape":"ExpressionAttributeValueMap"} + } + }, + "QueryOutput":{ + "type":"structure", + "members":{ + "Items":{"shape":"ItemList"}, + "Count":{"shape":"Integer"}, + "ScannedCount":{"shape":"Integer"}, + "LastEvaluatedKey":{"shape":"Key"}, + "ConsumedCapacity":{"shape":"ConsumedCapacity"} + } + }, + "ResourceInUseException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ReturnConsumedCapacity":{ + "type":"string", + "enum":[ + "INDEXES", + "TOTAL", + "NONE" + ] + }, + "ReturnItemCollectionMetrics":{ + "type":"string", + "enum":[ + "SIZE", + "NONE" + ] + }, + "ReturnValue":{ + "type":"string", + "enum":[ + "NONE", + "ALL_OLD", + "UPDATED_OLD", + "ALL_NEW", + "UPDATED_NEW" + ] + }, + "ScalarAttributeType":{ + "type":"string", + "enum":[ + "S", + "N", + "B" + ] + }, + "ScanInput":{ + "type":"structure", + "required":["TableName"], + "members":{ + "TableName":{"shape":"TableName"}, + "IndexName":{"shape":"IndexName"}, + "AttributesToGet":{"shape":"AttributeNameList"}, + "Limit":{"shape":"PositiveIntegerObject"}, + "Select":{"shape":"Select"}, + "ScanFilter":{"shape":"FilterConditionMap"}, + "ConditionalOperator":{"shape":"ConditionalOperator"}, + "ExclusiveStartKey":{"shape":"Key"}, + "ReturnConsumedCapacity":{"shape":"ReturnConsumedCapacity"}, + "TotalSegments":{"shape":"ScanTotalSegments"}, + "Segment":{"shape":"ScanSegment"}, + "ProjectionExpression":{"shape":"ProjectionExpression"}, + "FilterExpression":{"shape":"ConditionExpression"}, + "ExpressionAttributeNames":{"shape":"ExpressionAttributeNameMap"}, + "ExpressionAttributeValues":{"shape":"ExpressionAttributeValueMap"}, + "ConsistentRead":{"shape":"ConsistentRead"} + } + }, + "ScanOutput":{ + "type":"structure", + "members":{ + "Items":{"shape":"ItemList"}, + "Count":{"shape":"Integer"}, + "ScannedCount":{"shape":"Integer"}, + "LastEvaluatedKey":{"shape":"Key"}, + "ConsumedCapacity":{"shape":"ConsumedCapacity"} + } + }, + "ScanSegment":{ + "type":"integer", + "min":0, + "max":999999 + }, + "ScanTotalSegments":{ + "type":"integer", + "min":1, + "max":1000000 + }, + "SecondaryIndexesCapacityMap":{ + "type":"map", + "key":{"shape":"IndexName"}, + "value":{"shape":"Capacity"} + }, + "Select":{ + "type":"string", + "enum":[ + "ALL_ATTRIBUTES", + "ALL_PROJECTED_ATTRIBUTES", + "SPECIFIC_ATTRIBUTES", + "COUNT" + ] + }, + "StreamArn":{ + "type":"string", + "min":37, + "max":1024 + }, + "StreamEnabled":{"type":"boolean"}, + "StreamSpecification":{ + "type":"structure", + "members":{ + "StreamEnabled":{"shape":"StreamEnabled"}, + "StreamViewType":{"shape":"StreamViewType"} + } + }, + "StreamViewType":{ + "type":"string", + "enum":[ + "NEW_IMAGE", + "OLD_IMAGE", + "NEW_AND_OLD_IMAGES", + "KEYS_ONLY" + ] + }, + "String":{"type":"string"}, + "StringAttributeValue":{"type":"string"}, + "StringSetAttributeValue":{ + "type":"list", + "member":{"shape":"StringAttributeValue"} + }, + "TableDescription":{ + "type":"structure", + "members":{ + "AttributeDefinitions":{"shape":"AttributeDefinitions"}, + "TableName":{"shape":"TableName"}, + "KeySchema":{"shape":"KeySchema"}, + "TableStatus":{"shape":"TableStatus"}, + "CreationDateTime":{"shape":"Date"}, + "ProvisionedThroughput":{"shape":"ProvisionedThroughputDescription"}, + "TableSizeBytes":{"shape":"Long"}, + "ItemCount":{"shape":"Long"}, + "TableArn":{"shape":"String"}, + "LocalSecondaryIndexes":{"shape":"LocalSecondaryIndexDescriptionList"}, + "GlobalSecondaryIndexes":{"shape":"GlobalSecondaryIndexDescriptionList"}, + "StreamSpecification":{"shape":"StreamSpecification"}, + "LatestStreamLabel":{"shape":"String"}, + "LatestStreamArn":{"shape":"StreamArn"} + } + }, + "TableName":{ + "type":"string", + "min":3, + "max":255, + "pattern":"[a-zA-Z0-9_.-]+" + }, + "TableNameList":{ + "type":"list", + "member":{"shape":"TableName"} + }, + "TableStatus":{ + "type":"string", + "enum":[ + "CREATING", + "UPDATING", + "DELETING", + "ACTIVE" + ] + }, + "UpdateExpression":{"type":"string"}, + "UpdateGlobalSecondaryIndexAction":{ + "type":"structure", + "required":[ + "IndexName", + "ProvisionedThroughput" + ], + "members":{ + "IndexName":{"shape":"IndexName"}, + "ProvisionedThroughput":{"shape":"ProvisionedThroughput"} + } + }, + "UpdateItemInput":{ + "type":"structure", + "required":[ + "TableName", + "Key" + ], + "members":{ + "TableName":{"shape":"TableName"}, + "Key":{"shape":"Key"}, + "AttributeUpdates":{"shape":"AttributeUpdates"}, + "Expected":{"shape":"ExpectedAttributeMap"}, + "ConditionalOperator":{"shape":"ConditionalOperator"}, + "ReturnValues":{"shape":"ReturnValue"}, + "ReturnConsumedCapacity":{"shape":"ReturnConsumedCapacity"}, + "ReturnItemCollectionMetrics":{"shape":"ReturnItemCollectionMetrics"}, + "UpdateExpression":{"shape":"UpdateExpression"}, + "ConditionExpression":{"shape":"ConditionExpression"}, + "ExpressionAttributeNames":{"shape":"ExpressionAttributeNameMap"}, + "ExpressionAttributeValues":{"shape":"ExpressionAttributeValueMap"} + } + }, + "UpdateItemOutput":{ + "type":"structure", + "members":{ + "Attributes":{"shape":"AttributeMap"}, + "ConsumedCapacity":{"shape":"ConsumedCapacity"}, + "ItemCollectionMetrics":{"shape":"ItemCollectionMetrics"} + } + }, + "UpdateTableInput":{ + "type":"structure", + "required":["TableName"], + "members":{ + "AttributeDefinitions":{"shape":"AttributeDefinitions"}, + "TableName":{"shape":"TableName"}, + "ProvisionedThroughput":{"shape":"ProvisionedThroughput"}, + "GlobalSecondaryIndexUpdates":{"shape":"GlobalSecondaryIndexUpdateList"}, + "StreamSpecification":{"shape":"StreamSpecification"} + } + }, + "UpdateTableOutput":{ + "type":"structure", + "members":{ + "TableDescription":{"shape":"TableDescription"} + } + }, + "WriteRequest":{ + "type":"structure", + "members":{ + "PutRequest":{"shape":"PutRequest"}, + "DeleteRequest":{"shape":"DeleteRequest"} + } + }, + "WriteRequests":{ + "type":"list", + "member":{"shape":"WriteRequest"}, + "min":1, + "max":25 + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1013 @@ +{ + "version": "2.0", + "operations": { + "BatchGetItem": "

    The BatchGetItem operation returns the attributes of one or more items from one or more tables. You identify requested items by primary key.

    A single operation can retrieve up to 16 MB of data, which can contain as many as 100 items. BatchGetItem will return a partial result if the response size limit is exceeded, the table's provisioned throughput is exceeded, or an internal processing failure occurs. If a partial result is returned, the operation returns a value for UnprocessedKeys. You can use this value to retry the operation starting with the next item to get.

    If you request more than 100 items BatchGetItem will return a ValidationException with the message \"Too many items requested for the BatchGetItem call\".

    For example, if you ask to retrieve 100 items, but each individual item is 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns an appropriate UnprocessedKeys value so you can get the next page of results. If desired, your application can include its own logic to assemble the pages of results into one data set.

    If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchGetItem will return a ProvisionedThroughputExceededException. If at least one of the items is successfully processed, then BatchGetItem completes successfully, while returning the keys of the unread items in UnprocessedKeys.

    If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

    For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

    By default, BatchGetItem performs eventually consistent reads on every table in the request. If you want strongly consistent reads instead, you can set ConsistentRead to true for any or all tables.

    In order to minimize response latency, BatchGetItem retrieves items in parallel.

    When designing your application, keep in mind that DynamoDB does not return attributes in any particular order. To help parse the response by item, include the primary key values for the items in your request in the AttributesToGet parameter.

    If a requested item does not exist, it is not returned in the result. Requests for nonexistent items consume the minimum read capacity units according to the type of read. For more information, see Capacity Units Calculations in the Amazon DynamoDB Developer Guide.

    ", + "BatchWriteItem": "

    The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem can write up to 16 MB of data, which can comprise as many as 25 put or delete requests. Individual items to be written can be as large as 400 KB.

    BatchWriteItem cannot update items. To update items, use the UpdateItem API.

    The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed.

    Note that if none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem will return a ProvisionedThroughputExceededException.

    If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

    For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

    With BatchWriteItem, you can efficiently write or delete large amounts of data, such as from Amazon Elastic MapReduce (EMR), or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem does not behave in the same way as individual PutItem and DeleteItem calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem does not return deleted items in the response.

    If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, you must update or delete the specified items one at a time. In both situations, BatchWriteItem provides an alternative where the API performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application.

    Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit.

    If one or more of the following is true, DynamoDB rejects the entire batch write operation:

    • One or more tables specified in the BatchWriteItem request does not exist.

    • Primary key attributes specified on an item in the request do not match those in the corresponding table's primary key schema.

    • You try to perform multiple operations on the same item in the same BatchWriteItem request. For example, you cannot put and delete the same item in the same BatchWriteItem request.

    • There are more than 25 requests in the batch.

    • Any individual item in a batch exceeds 400 KB.

    • The total request size exceeds 16 MB.

    ", + "CreateTable": "

    The CreateTable operation adds a new table to your account. In an AWS account, table names must be unique within each region. That is, you can have two tables with same name if you create the tables in different regions.

    CreateTable is an asynchronous operation. Upon receiving a CreateTable request, DynamoDB immediately returns a response with a TableStatus of CREATING. After the table is created, DynamoDB sets the TableStatus to ACTIVE. You can perform read and write operations only on an ACTIVE table.

    You can optionally define secondary indexes on the new table, as part of the CreateTable operation. If you want to create multiple tables with secondary indexes on them, you must create the tables sequentially. Only one table with secondary indexes can be in the CREATING state at any given time.

    You can use the DescribeTable API to check the table status.

    ", + "DeleteItem": "

    Deletes a single item in a table by primary key. You can perform a conditional delete operation that deletes the item if it exists, or if it has an expected attribute value.

    In addition to deleting an item, you can also return the item's attribute values in the same operation, using the ReturnValues parameter.

    Unless you specify conditions, the DeleteItem is an idempotent operation; running it multiple times on the same item or attribute does not result in an error response.

    Conditional deletes are useful for deleting items only if specific conditions are met. If those conditions are met, DynamoDB performs the delete. Otherwise, the item is not deleted.

    ", + "DeleteTable": "

    The DeleteTable operation deletes a table and all of its items. After a DeleteTable request, the specified table is in the DELETING state until DynamoDB completes the deletion. If the table is in the ACTIVE state, you can delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. If table is already in the DELETING state, no error is returned.

    DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem, on a table in the DELETING state until the table deletion is complete.

    When you delete a table, any indexes on that table are also deleted.

    If you have DynamoDB Streams enabled on the table, then the corresponding stream on that table goes into the DISABLED state, and the stream is automatically deleted after 24 hours.

    Use the DescribeTable API to check the status of the table.

    ", + "DescribeTable": "

    Returns information about the table, including the current status of the table, when it was created, the primary key schema, and any indexes on the table.

    If you issue a DescribeTable request immediately after a CreateTable request, DynamoDB might return a ResourceNotFoundException. This is because DescribeTable uses an eventually consistent query, and the metadata for your table might not be available at that moment. Wait for a few seconds, and then try the DescribeTable request again.

    ", + "GetItem": "

    The GetItem operation returns a set of attributes for the item with the given primary key. If there is no matching item, GetItem does not return any data.

    GetItem provides an eventually consistent read by default. If your application requires a strongly consistent read, set ConsistentRead to true. Although a strongly consistent read might take more time than an eventually consistent read, it always returns the last updated value.

    ", + "ListTables": "

    Returns an array of table names associated with the current account and endpoint. The output from ListTables is paginated, with each page returning a maximum of 100 table names.

    ", + "PutItem": "

    Creates a new item, or replaces an old item with a new item. If an item that has the same primary key as the new item already exists in the specified table, the new item completely replaces the existing item. You can perform a conditional put operation (add a new item if one with the specified primary key doesn't exist), or replace an existing item if it has certain attribute values.

    In addition to putting an item, you can also return the item's attribute values in the same operation, using the ReturnValues parameter.

    When you add an item, the primary key attribute(s) are the only required attributes. Attribute values cannot be null. String and Binary type attributes must have lengths greater than zero. Set type attributes cannot be empty. Requests with empty values will be rejected with a ValidationException exception.

    You can request that PutItem return either a copy of the original item (before the update) or a copy of the updated item (after the update). For more information, see the ReturnValues description below.

    To prevent a new item from replacing an existing item, use a conditional put operation with ComparisonOperator set to NULL for the primary key attribute, or attributes.

    For more information about using this API, see Working with Items in the Amazon DynamoDB Developer Guide.

    ", + "Query": "

    A Query operation uses the primary key of a table or a secondary index to directly access items from that table or index.

    Use the KeyConditionExpression parameter to provide a specific hash key value. The Query operation will return all of the items from the table or index with that hash key value. You can optionally narrow the scope of the Query operation by specifying a range key value and a comparison operator in KeyConditionExpression. You can use the ScanIndexForward parameter to get results in forward or reverse order, by range key or by index key.

    Queries that do not return results consume the minimum number of read capacity units for that type of read operation.

    If the total number of items meeting the query criteria exceeds the result set size limit of 1 MB, the query stops and results are returned to the user with the LastEvaluatedKey element to continue the query in a subsequent operation. Unlike a Scan operation, a Query operation never returns both an empty result set and a LastEvaluatedKey value. LastEvaluatedKey is only provided if the results exceed 1 MB, or if you have used the Limit parameter.

    You can query a table, a local secondary index, or a global secondary index. For a query on a table or on a local secondary index, you can set the ConsistentRead parameter to true and obtain a strongly consistent result. Global secondary indexes support eventually consistent reads only, so do not specify ConsistentRead when querying a global secondary index.

    ", + "Scan": "

    The Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a ScanFilter operation.

    If the total number of scanned items exceeds the maximum data set size limit of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey value to continue the scan in a subsequent operation. The results also include the number of items exceeding the limit. A scan can result in no table data meeting the filter criteria.

    By default, Scan operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan operation by providing the Segment and TotalSegments parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide.

    By default, Scan uses eventually consistent reads when acessing the data in the table or local secondary index. However, you can use strongly consistent reads instead by setting the ConsistentRead parameter to true.

    ", + "UpdateItem": "

    Edits an existing item's attributes, or adds a new item to the table if it does not already exist. You can put, delete, or add attribute values. You can also perform a conditional update on an existing item (insert a new attribute name-value pair if it doesn't exist, or replace an existing name-value pair if it has certain expected attribute values). If conditions are specified and the item does not exist, then the operation fails and a new item is not created.

    You can also return the item's attribute values in the same UpdateItem operation using the ReturnValues parameter.

    ", + "UpdateTable": "

    Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.

    You can only perform one of the following operations at once:

    • Modify the provisioned throughput settings of the table.

    • Enable or disable Streams on the table.

    • Remove a global secondary index from the table.

    • Create a new global secondary index on the table. Once the index begins backfilling, you can use UpdateTable to perform other operations.

    UpdateTable is an asynchronous operation; while it is executing, the table status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot issue another UpdateTable request. When the table returns to the ACTIVE state, the UpdateTable operation is complete.

    " + }, + "service": "Amazon DynamoDB

    Overview

    This is the Amazon DynamoDB API Reference. This guide provides descriptions and samples of the low-level DynamoDB API. For information about DynamoDB application development, see the Amazon DynamoDB Developer Guide.

    Instead of making the requests to the low-level DynamoDB API directly from your application, we recommend that you use the AWS Software Development Kits (SDKs). The easy-to-use libraries in the AWS SDKs make it unnecessary to call the low-level DynamoDB API directly from your application. The libraries take care of request authentication, serialization, and connection management. For more information, see Using the AWS SDKs with DynamoDB in the Amazon DynamoDB Developer Guide.

    If you decide to code against the low-level DynamoDB API directly, you will need to write the necessary code to authenticate your requests. For more information on signing your requests, see Using the DynamoDB API in the Amazon DynamoDB Developer Guide.

    The following are short descriptions of each low-level API action, organized by function.

    Managing Tables

    • CreateTable - Creates a table with user-specified provisioned throughput settings. You must designate one attribute as the hash primary key for the table; you can optionally designate a second attribute as the range primary key. DynamoDB creates indexes on these key attributes for fast data access. Optionally, you can create one or more secondary indexes, which provide fast data access using non-key attributes.

    • DescribeTable - Returns metadata for a table, such as table size, status, and index information.

    • UpdateTable - Modifies the provisioned throughput settings for a table. Optionally, you can modify the provisioned throughput settings for global secondary indexes on the table.

    • ListTables - Returns a list of all tables associated with the current AWS account and endpoint.

    • DeleteTable - Deletes a table and all of its indexes.

    For conceptual information about managing tables, see Working with Tables in the Amazon DynamoDB Developer Guide.

    Reading Data

    • GetItem - Returns a set of attributes for the item that has a given primary key. By default, GetItem performs an eventually consistent read; however, applications can request a strongly consistent read instead.

    • BatchGetItem - Performs multiple GetItem requests for data items using their primary keys, from one table or multiple tables. The response from BatchGetItem has a size limit of 16 MB and returns a maximum of 100 items. Both eventually consistent and strongly consistent reads can be used.

    • Query - Returns one or more items from a table or a secondary index. You must provide a specific hash key value. You can narrow the scope of the query using comparison operators against a range key value, or on the index key. Query supports either eventual or strong consistency. A single response has a size limit of 1 MB.

    • Scan - Reads every item in a table; the result set is eventually consistent. You can limit the number of items returned by filtering the data attributes, using conditional expressions. Scan can be used to enable ad-hoc querying of a table against non-key attributes; however, since this is a full table scan without using an index, Scan should not be used for any application query use case that requires predictable performance.

    For conceptual information about reading data, see Working with Items and Query and Scan Operations in the Amazon DynamoDB Developer Guide.

    Modifying Data

    • PutItem - Creates a new item, or replaces an existing item with a new item (including all the attributes). By default, if an item in the table already exists with the same primary key, the new item completely replaces the existing item. You can use conditional operators to replace an item only if its attribute values match certain conditions, or to insert a new item only if that item doesn't already exist.

    • UpdateItem - Modifies the attributes of an existing item. You can also use conditional operators to perform an update only if the item's attribute values match certain conditions.

    • DeleteItem - Deletes an item in a table by primary key. You can use conditional operators to perform a delete an item only if the item's attribute values match certain conditions.

    • BatchWriteItem - Performs multiple PutItem and DeleteItem requests across multiple tables in a single request. A failure of any request(s) in the batch will not cause the entire BatchWriteItem operation to fail. Supports batches of up to 25 items to put or delete, with a maximum total request size of 16 MB.

    For conceptual information about modifying data, see Working with Items and Query and Scan Operations in the Amazon DynamoDB Developer Guide.

    ", + "shapes": { + "AttributeAction": { + "base": null, + "refs": { + "AttributeValueUpdate$Action": "

    Specifies how to perform the update. Valid values are PUT (default), DELETE, and ADD. The behavior depends on whether the specified primary key already exists in the table.

    If an item with the specified Key is found in the table:

    • PUT - Adds the specified attribute to the item. If the attribute already exists, it is replaced by the new value.

    • DELETE - If no value is specified, the attribute and its value are removed from the item. The data type of the specified value must match the existing value's data type.

      If a set of values is specified, then those values are subtracted from the old set. For example, if the attribute value was the set [a,b,c] and the DELETE action specified [a,c], then the final attribute value would be [b]. Specifying an empty set is an error.

    • ADD - If the attribute does not already exist, then the attribute and its values are added to the item. If the attribute does exist, then the behavior of ADD depends on the data type of the attribute:

      • If the existing attribute is a number, and if Value is also a number, then the Value is mathematically added to the existing attribute. If Value is a negative number, then it is subtracted from the existing attribute.

        If you use ADD to increment or decrement a number value for an item that doesn't exist before the update, DynamoDB uses 0 as the initial value.

        In addition, if you use ADD to update an existing item, and intend to increment or decrement an attribute value which does not yet exist, DynamoDB uses 0 as the initial value. For example, suppose that the item you want to update does not yet have an attribute named itemcount, but you decide to ADD the number 3 to this attribute anyway, even though it currently does not exist. DynamoDB will create the itemcount attribute, set its initial value to 0, and finally add 3 to it. The result will be a new itemcount attribute in the item, with a value of 3.

      • If the existing data type is a set, and if the Value is also a set, then the Value is added to the existing set. (This is a set operation, not mathematical addition.) For example, if the attribute value was the set [1,2], and the ADD action specified [3], then the final attribute value would be [1,2,3]. An error occurs if an Add action is specified for a set attribute and the attribute type specified does not match the existing set type.

        Both sets must have the same primitive data type. For example, if the existing data type is a set of strings, the Value must also be a set of strings. The same holds true for number sets and binary sets.

      This action is only valid for an existing attribute whose data type is number or is a set. Do not use ADD for any other data types.

    If no item with the specified Key is found:

    • PUT - DynamoDB creates a new item with the specified primary key, and then adds the attribute.

    • DELETE - Nothing happens; there is no attribute to delete.

    • ADD - DynamoDB creates an item with the supplied primary key and number (or set of numbers) for the attribute value. The only data types allowed are number and number set; no other data types can be specified.

    " + } + }, + "AttributeDefinition": { + "base": "

    Represents an attribute for describing the key schema for the table and indexes.

    ", + "refs": { + "AttributeDefinitions$member": null + } + }, + "AttributeDefinitions": { + "base": null, + "refs": { + "CreateTableInput$AttributeDefinitions": "

    An array of attributes that describe the key schema for the table and indexes.

    ", + "TableDescription$AttributeDefinitions": "

    An array of AttributeDefinition objects. Each of these objects describes one attribute in the table and index key schema.

    Each AttributeDefinition object in this array is composed of:

    • AttributeName - The name of the attribute.

    • AttributeType - The data type for the attribute.

    ", + "UpdateTableInput$AttributeDefinitions": "

    An array of attributes that describe the key schema for the table and indexes. If you are adding a new global secondary index to the table, AttributeDefinitions must include the key element(s) of the new index.

    " + } + }, + "AttributeMap": { + "base": null, + "refs": { + "DeleteItemOutput$Attributes": "

    A map of attribute names to AttributeValue objects, representing the item as it appeared before the DeleteItem operation. This map appears in the response only if ReturnValues was specified as ALL_OLD in the request.

    ", + "GetItemOutput$Item": "

    A map of attribute names to AttributeValue objects, as specified by AttributesToGet.

    ", + "ItemList$member": null, + "PutItemOutput$Attributes": "

    The attribute values as they appeared before the PutItem operation, but only if ReturnValues is specified as ALL_OLD in the request. Each element consists of an attribute name and an attribute value.

    ", + "UpdateItemOutput$Attributes": "

    A map of attribute values as they appeared before the UpdateItem operation. This map only appears if ReturnValues was specified as something other than NONE in the request. Each element represents one attribute.

    " + } + }, + "AttributeName": { + "base": null, + "refs": { + "AttributeMap$key": null, + "AttributeNameList$member": null, + "AttributeUpdates$key": null, + "ExpectedAttributeMap$key": null, + "ExpressionAttributeNameMap$value": null, + "FilterConditionMap$key": null, + "ItemCollectionKeyAttributeMap$key": null, + "Key$key": null, + "KeyConditions$key": null, + "MapAttributeValue$key": null, + "PutItemInputAttributeMap$key": null + } + }, + "AttributeNameList": { + "base": null, + "refs": { + "GetItemInput$AttributesToGet": "

    This is a legacy parameter, for backward compatibility. New applications should use ProjectionExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    This parameter allows you to retrieve attributes of type List or Map; however, it cannot retrieve individual elements within a List or a Map.

    The names of one or more attributes to retrieve. If no attribute names are provided, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.

    Note that AttributesToGet has no effect on provisioned throughput consumption. DynamoDB determines capacity units consumed based on item size, not on the amount of data that is returned to an application.

    ", + "KeysAndAttributes$AttributesToGet": "

    One or more attributes to retrieve from the table or index. If no attribute names are specified then all attributes will be returned. If any of the specified attributes are not found, they will not appear in the result.

    ", + "QueryInput$AttributesToGet": "

    This is a legacy parameter, for backward compatibility. New applications should use ProjectionExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    This parameter allows you to retrieve attributes of type List or Map; however, it cannot retrieve individual elements within a List or a Map.

    The names of one or more attributes to retrieve. If no attribute names are provided, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.

    Note that AttributesToGet has no effect on provisioned throughput consumption. DynamoDB determines capacity units consumed based on item size, not on the amount of data that is returned to an application.

    You cannot use both AttributesToGet and Select together in a Query request, unless the value for Select is SPECIFIC_ATTRIBUTES. (This usage is equivalent to specifying AttributesToGet without any value for Select.)

    If you query a local secondary index and request only attributes that are projected into that index, the operation will read only the index and not the table. If any of the requested attributes are not projected into the local secondary index, DynamoDB will fetch each of these attributes from the parent table. This extra fetching incurs additional throughput cost and latency.

    If you query a global secondary index, you can only request attributes that are projected into the index. Global secondary index queries cannot fetch attributes from the parent table.

    ", + "ScanInput$AttributesToGet": "

    This is a legacy parameter, for backward compatibility. New applications should use ProjectionExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    This parameter allows you to retrieve attributes of type List or Map; however, it cannot retrieve individual elements within a List or a Map.

    The names of one or more attributes to retrieve. If no attribute names are provided, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.

    Note that AttributesToGet has no effect on provisioned throughput consumption. DynamoDB determines capacity units consumed based on item size, not on the amount of data that is returned to an application.

    " + } + }, + "AttributeUpdates": { + "base": null, + "refs": { + "UpdateItemInput$AttributeUpdates": "

    This is a legacy parameter, for backward compatibility. New applications should use UpdateExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    This parameter can be used for modifying top-level attributes; however, it does not support individual list or map elements.

    The names of attributes to be modified, the action to perform on each, and the new value for each. If you are updating an attribute that is an index key attribute for any indexes on that table, the attribute type must match the index key type defined in the AttributesDefinition of the table description. You can use UpdateItem to update any nonkey attributes.

    Attribute values cannot be null. String and Binary type attributes must have lengths greater than zero. Set type attributes must not be empty. Requests with empty values will be rejected with a ValidationException exception.

    Each AttributeUpdates element consists of an attribute name to modify, along with the following:

    • Value - The new value, if applicable, for this attribute.

    • Action - A value that specifies how to perform the update. This action is only valid for an existing attribute whose data type is Number or is a set; do not use ADD for other data types.

      If an item with the specified primary key is found in the table, the following values perform the following actions:

      • PUT - Adds the specified attribute to the item. If the attribute already exists, it is replaced by the new value.

      • DELETE - Removes the attribute and its value, if no value is specified for DELETE. The data type of the specified value must match the existing value's data type.

        If a set of values is specified, then those values are subtracted from the old set. For example, if the attribute value was the set [a,b,c] and the DELETE action specifies [a,c], then the final attribute value is [b]. Specifying an empty set is an error.

      • ADD - Adds the specified value to the item, if the attribute does not already exist. If the attribute does exist, then the behavior of ADD depends on the data type of the attribute:

        • If the existing attribute is a number, and if Value is also a number, then Value is mathematically added to the existing attribute. If Value is a negative number, then it is subtracted from the existing attribute.

          If you use ADD to increment or decrement a number value for an item that doesn't exist before the update, DynamoDB uses 0 as the initial value.

          Similarly, if you use ADD for an existing item to increment or decrement an attribute value that doesn't exist before the update, DynamoDB uses 0 as the initial value. For example, suppose that the item you want to update doesn't have an attribute named itemcount, but you decide to ADD the number 3 to this attribute anyway. DynamoDB will create the itemcount attribute, set its initial value to 0, and finally add 3 to it. The result will be a new itemcount attribute, with a value of 3.

        • If the existing data type is a set, and if Value is also a set, then Value is appended to the existing set. For example, if the attribute value is the set [1,2], and the ADD action specified [3], then the final attribute value is [1,2,3]. An error occurs if an ADD action is specified for a set attribute and the attribute type specified does not match the existing set type.

          Both sets must have the same primitive data type. For example, if the existing data type is a set of strings, Value must also be a set of strings.

      If no item with the specified key is found in the table, the following values perform the following actions:

      • PUT - Causes DynamoDB to create a new item with the specified primary key, and then adds the attribute.

      • DELETE - Nothing happens, because attributes cannot be deleted from a nonexistent item. The operation succeeds, but DynamoDB does not create a new item.

      • ADD - Causes DynamoDB to create an item with the supplied primary key and number (or set of numbers) for the attribute value. The only data types allowed are Number and Number Set.

    If you provide any attributes that are part of an index key, then the data types for those attributes must match those of the schema in the table's attribute definition.

    " + } + }, + "AttributeValue": { + "base": "

    Represents the data for an attribute. You can set one, and only one, of the elements.

    Each attribute in an item is a name-value pair. An attribute can be single-valued or multi-valued set. For example, a book item can have title and authors attributes. Each book has one title but can have many authors. The multi-valued attribute is a set; duplicate values are not allowed.

    ", + "refs": { + "AttributeMap$value": null, + "AttributeValueList$member": null, + "AttributeValueUpdate$Value": null, + "ExpectedAttributeValue$Value": null, + "ExpressionAttributeValueMap$value": null, + "ItemCollectionKeyAttributeMap$value": null, + "Key$value": null, + "ListAttributeValue$member": null, + "MapAttributeValue$value": null, + "PutItemInputAttributeMap$value": null + } + }, + "AttributeValueList": { + "base": null, + "refs": { + "Condition$AttributeValueList": "

    One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used.

    For type Number, value comparisons are numeric.

    String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A, and a is greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters.

    For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.

    ", + "ExpectedAttributeValue$AttributeValueList": "

    One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used.

    For type Number, value comparisons are numeric.

    String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A, and a is greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters.

    For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.

    For information on specifying data types in JSON, see JSON Data Format in the Amazon DynamoDB Developer Guide.

    " + } + }, + "AttributeValueUpdate": { + "base": "

    For the UpdateItem operation, represents the attributes to be modified, the action to perform on each, and the new value for each.

    You cannot use UpdateItem to update any primary key attributes. Instead, you will need to delete the item, and then use PutItem to create a new item with new attributes.

    Attribute values cannot be null; string and binary type attributes must have lengths greater than zero; and set type attributes must not be empty. Requests with empty values will be rejected with a ValidationException exception.

    ", + "refs": { + "AttributeUpdates$value": null + } + }, + "Backfilling": { + "base": null, + "refs": { + "GlobalSecondaryIndexDescription$Backfilling": "

    Indicates whether the index is currently backfilling. Backfilling is the process of reading items from the table and determining whether they can be added to the index. (Not all items will qualify: For example, a hash key attribute cannot have any duplicates.) If an item can be added to the index, DynamoDB will do so. After all items have been processed, the backfilling operation is complete and Backfilling is false.

    For indexes that were created during a CreateTable operation, the Backfilling attribute does not appear in the DescribeTable output.

    " + } + }, + "BatchGetItemInput": { + "base": "

    Represents the input of a BatchGetItem operation.

    ", + "refs": { + } + }, + "BatchGetItemOutput": { + "base": "

    Represents the output of a BatchGetItem operation.

    ", + "refs": { + } + }, + "BatchGetRequestMap": { + "base": null, + "refs": { + "BatchGetItemInput$RequestItems": "

    A map of one or more table names and, for each table, a map that describes one or more items to retrieve from that table. Each table name can be used only once per BatchGetItem request.

    Each element in the map of items to retrieve consists of the following:

    • ConsistentRead - If true, a strongly consistent read is used; if false (the default), an eventually consistent read is used.

    • ExpressionAttributeNames - One or more substitution tokens for attribute names in the ProjectionExpression parameter. The following are some use cases for using ExpressionAttributeNames:

      • To access an attribute whose name conflicts with a DynamoDB reserved word.

      • To create a placeholder for repeating occurrences of an attribute name in an expression.

      • To prevent special characters in an attribute name from being misinterpreted in an expression.

      Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

      • Percentile

      The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

      • {\"#P\":\"Percentile\"}

      You could then use this substitution in an expression, as in this example:

      • #P = :val

      Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

      For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

    • Keys - An array of primary key attribute values that define specific items in the table. For each primary key, you must provide all of the key attributes. For example, with a hash type primary key, you only need to provide the hash attribute. For a hash-and-range type primary key, you must provide both the hash attribute and the range attribute.

    • ProjectionExpression - A string that identifies one or more attributes to retrieve from the table. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the expression must be separated by commas.

      If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.

      For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

    • AttributesToGet -

      This is a legacy parameter, for backward compatibility. New applications should use ProjectionExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

      This parameter allows you to retrieve attributes of type List or Map; however, it cannot retrieve individual elements within a List or a Map.

      The names of one or more attributes to retrieve. If no attribute names are provided, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.

      Note that AttributesToGet has no effect on provisioned throughput consumption. DynamoDB determines capacity units consumed based on item size, not on the amount of data that is returned to an application.

    ", + "BatchGetItemOutput$UnprocessedKeys": "

    A map of tables and their respective keys that were not processed with the current response. The UnprocessedKeys value is in the same form as RequestItems, so the value can be provided directly to a subsequent BatchGetItem operation. For more information, see RequestItems in the Request Parameters section.

    Each element consists of:

    • Keys - An array of primary key attribute values that define specific items in the table.

    • AttributesToGet - One or more attributes to be retrieved from the table or index. By default, all attributes are returned. If a requested attribute is not found, it does not appear in the result.

    • ConsistentRead - The consistency of a read operation. If set to true, then a strongly consistent read is used; otherwise, an eventually consistent read is used.

    If there are no unprocessed keys remaining, the response contains an empty UnprocessedKeys map.

    " + } + }, + "BatchGetResponseMap": { + "base": null, + "refs": { + "BatchGetItemOutput$Responses": "

    A map of table name to a list of items. Each object in Responses consists of a table name, along with a map of attribute data consisting of the data type and attribute value.

    " + } + }, + "BatchWriteItemInput": { + "base": "

    Represents the input of a BatchWriteItem operation.

    ", + "refs": { + } + }, + "BatchWriteItemOutput": { + "base": "

    Represents the output of a BatchWriteItem operation.

    ", + "refs": { + } + }, + "BatchWriteItemRequestMap": { + "base": null, + "refs": { + "BatchWriteItemInput$RequestItems": "

    A map of one or more table names and, for each table, a list of operations to be performed (DeleteRequest or PutRequest). Each element in the map consists of the following:

    • DeleteRequest - Perform a DeleteItem operation on the specified item. The item to be deleted is identified by a Key subelement:

      • Key - A map of primary key attribute values that uniquely identify the ! item. Each entry in this map consists of an attribute name and an attribute value. For each primary key, you must provide all of the key attributes. For example, with a hash type primary key, you only need to provide the hash attribute. For a hash-and-range type primary key, you must provide both the hash attribute and the range attribute.

    • PutRequest - Perform a PutItem operation on the specified item. The item to be put is identified by an Item subelement:

      • Item - A map of attributes and their values. Each entry in this map consists of an attribute name and an attribute value. Attribute values must not be null; string and binary type attributes must have lengths greater than zero; and set type attributes must not be empty. Requests that contain empty values will be rejected with a ValidationException exception.

        If you specify any attributes that are part of an index key, then the data types for those attributes must match those of the schema in the table's attribute definition.

    ", + "BatchWriteItemOutput$UnprocessedItems": "

    A map of tables and requests against those tables that were not processed. The UnprocessedItems value is in the same form as RequestItems, so you can provide this value directly to a subsequent BatchGetItem operation. For more information, see RequestItems in the Request Parameters section.

    Each UnprocessedItems entry consists of a table name and, for that table, a list of operations to perform (DeleteRequest or PutRequest).

    • DeleteRequest - Perform a DeleteItem operation on the specified item. The item to be deleted is identified by a Key subelement:

      • Key - A map of primary key attribute values that uniquely identify the item. Each entry in this map consists of an attribute name and an attribute value.

    • PutRequest - Perform a PutItem operation on the specified item. The item to be put is identified by an Item subelement:

      • Item - A map of attributes and their values. Each entry in this map consists of an attribute name and an attribute value. Attribute values must not be null; string and binary type attributes must have lengths greater than zero; and set type attributes must not be empty. Requests that contain empty values will be rejected with a ValidationException exception.

        If you specify any attributes that are part of an index key, then the data types for those attributes must match those of the schema in the table's attribute definition.

    If there are no unprocessed items remaining, the response contains an empty UnprocessedItems map.

    " + } + }, + "BinaryAttributeValue": { + "base": null, + "refs": { + "AttributeValue$B": "

    A Binary data type.

    ", + "BinarySetAttributeValue$member": null + } + }, + "BinarySetAttributeValue": { + "base": null, + "refs": { + "AttributeValue$BS": "

    A Binary Set data type.

    " + } + }, + "BooleanAttributeValue": { + "base": null, + "refs": { + "AttributeValue$BOOL": "

    A Boolean data type.

    " + } + }, + "BooleanObject": { + "base": null, + "refs": { + "ExpectedAttributeValue$Exists": "

    Causes DynamoDB to evaluate the value before attempting a conditional operation:

    • If Exists is true, DynamoDB will check to see if that attribute value already exists in the table. If it is found, then the operation succeeds. If it is not found, the operation fails with a ConditionalCheckFailedException.

    • If Exists is false, DynamoDB assumes that the attribute value does not exist in the table. If in fact the value does not exist, then the assumption is valid and the operation succeeds. If the value is found, despite the assumption that it does not exist, the operation fails with a ConditionalCheckFailedException.

    The default setting for Exists is true. If you supply a Value all by itself, DynamoDB assumes the attribute exists: You don't have to set Exists to true, because it is implied.

    DynamoDB returns a ValidationException if:

    • Exists is true but there is no Value to check. (You expect a value to exist, but don't specify what that value is.)

    • Exists is false but you also provide a Value. (You cannot expect an attribute to have a value, while also expecting it not to exist.)

    ", + "QueryInput$ScanIndexForward": "

    Specifies the order in which to return the query results - either ascending (true) or descending (false).

    Items with the same hash key are stored in sorted order by range key .If the range key data type is Number, the results are stored in numeric order. For type String, the results are returned in order of ASCII character code values. For type Binary, DynamoDB treats each byte of the binary data as unsigned.

    If ScanIndexForward is true, DynamoDB returns the results in order, by range key. This is the default behavior.

    If ScanIndexForward is false, DynamoDB sorts the results in descending order by range key, and then returns the results to the client.

    " + } + }, + "Capacity": { + "base": "

    Represents the amount of provisioned throughput capacity consumed on a table or an index.

    ", + "refs": { + "ConsumedCapacity$Table": "

    The amount of throughput consumed on the table affected by the operation.

    ", + "SecondaryIndexesCapacityMap$value": null + } + }, + "ComparisonOperator": { + "base": null, + "refs": { + "Condition$ComparisonOperator": "

    A comparator for evaluating attributes. For example, equals, greater than, less than, etc.

    The following comparison operators are available:

    EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN

    The following are descriptions of each comparison operator.

    • EQ : Equal. EQ is supported for all datatypes, including lists and maps.

      AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not equal {\"NS\":[\"6\", \"2\", \"1\"]}.

    • NE : Not equal. NE is supported for all datatypes, including lists and maps.

      AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not equal {\"NS\":[\"6\", \"2\", \"1\"]}.

    • LE : Less than or equal.

      AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

    • LT : Less than.

      AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

    • GE : Greater than or equal.

      AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

    • GT : Greater than.

      AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

    • NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, including lists and maps.

      This operator tests for the existence of an attribute, not its data type. If the data type of attribute \"a\" is null, and you evaluate it using NOT_NULL, the result is a Boolean true. This result is because the attribute \"a\" exists; its data type is not relevant to the NOT_NULL comparison operator.

    • NULL : The attribute does not exist. NULL is supported for all datatypes, including lists and maps.

      This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute \"a\" is null, and you evaluate it using NULL, the result is a Boolean false. This is because the attribute \"a\" exists; its data type is not relevant to the NULL comparison operator.

    • CONTAINS : Checks for a subsequence, or value in a set.

      AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set (\"SS\", \"NS\", or \"BS\"), then the operator evaluates to true if it finds an exact match with any member of the set.

      CONTAINS is supported for lists: When evaluating \"a CONTAINS b\", \"a\" can be a list; however, \"b\" cannot be a set, a map, or a list.

    • NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set.

      AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set (\"SS\", \"NS\", or \"BS\"), then the operator evaluates to true if it does not find an exact match with any member of the set.

      NOT_CONTAINS is supported for lists: When evaluating \"a NOT CONTAINS b\", \"a\" can be a list; however, \"b\" cannot be a set, a map, or a list.

    • BEGINS_WITH : Checks for a prefix.

      AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type).

    • IN : Checks for matching elements within two sets.

      AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary (not a set type). These attributes are compared against an existing set type attribute of an item. If any elements of the input set are present in the item attribute, the expression evaluates to true.

    • BETWEEN : Greater than or equal to the first value, and less than or equal to the second value.

      AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not compare to {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}

    For usage examples of AttributeValueList and ComparisonOperator, see Legacy Conditional Parameters in the Amazon DynamoDB Developer Guide.

    ", + "ExpectedAttributeValue$ComparisonOperator": "

    A comparator for evaluating attributes in the AttributeValueList. For example, equals, greater than, less than, etc.

    The following comparison operators are available:

    EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN

    The following are descriptions of each comparison operator.

    • EQ : Equal. EQ is supported for all datatypes, including lists and maps.

      AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not equal {\"NS\":[\"6\", \"2\", \"1\"]}.

    • NE : Not equal. NE is supported for all datatypes, including lists and maps.

      AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not equal {\"NS\":[\"6\", \"2\", \"1\"]}.

    • LE : Less than or equal.

      AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

    • LT : Less than.

      AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

    • GE : Greater than or equal.

      AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

    • GT : Greater than.

      AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

    • NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, including lists and maps.

      This operator tests for the existence of an attribute, not its data type. If the data type of attribute \"a\" is null, and you evaluate it using NOT_NULL, the result is a Boolean true. This result is because the attribute \"a\" exists; its data type is not relevant to the NOT_NULL comparison operator.

    • NULL : The attribute does not exist. NULL is supported for all datatypes, including lists and maps.

      This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute \"a\" is null, and you evaluate it using NULL, the result is a Boolean false. This is because the attribute \"a\" exists; its data type is not relevant to the NULL comparison operator.

    • CONTAINS : Checks for a subsequence, or value in a set.

      AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set (\"SS\", \"NS\", or \"BS\"), then the operator evaluates to true if it finds an exact match with any member of the set.

      CONTAINS is supported for lists: When evaluating \"a CONTAINS b\", \"a\" can be a list; however, \"b\" cannot be a set, a map, or a list.

    • NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set.

      AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set (\"SS\", \"NS\", or \"BS\"), then the operator evaluates to true if it does not find an exact match with any member of the set.

      NOT_CONTAINS is supported for lists: When evaluating \"a NOT CONTAINS b\", \"a\" can be a list; however, \"b\" cannot be a set, a map, or a list.

    • BEGINS_WITH : Checks for a prefix.

      AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type).

    • IN : Checks for matching elements within two sets.

      AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary (not a set type). These attributes are compared against an existing set type attribute of an item. If any elements of the input set are present in the item attribute, the expression evaluates to true.

    • BETWEEN : Greater than or equal to the first value, and less than or equal to the second value.

      AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not compare to {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}

    " + } + }, + "Condition": { + "base": "

    Represents the selection criteria for a Query or Scan operation:

    • For a Query operation, Condition is used for specifying the KeyConditions to use when querying a table or an index. For KeyConditions, only the following comparison operators are supported:

      EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN

      Condition is also used in a QueryFilter, which evaluates the query results and returns only the desired values.

    • For a Scan operation, Condition is used in a ScanFilter, which evaluates the scan results and returns only the desired values.

    ", + "refs": { + "FilterConditionMap$value": null, + "KeyConditions$value": null + } + }, + "ConditionExpression": { + "base": null, + "refs": { + "DeleteItemInput$ConditionExpression": "

    A condition that must be satisfied in order for a conditional DeleteItem to succeed.

    An expression can contain any of the following:

    • Functions: attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size

      These function names are case-sensitive.

    • Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN

    • Logical operators: AND | OR | NOT

    For more information on condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

    ConditionExpression replaces the legacy ConditionalOperator and Expected parameters.

    ", + "PutItemInput$ConditionExpression": "

    A condition that must be satisfied in order for a conditional PutItem operation to succeed.

    An expression can contain any of the following:

    • Functions: attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size

      These function names are case-sensitive.

    • Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN

    • Logical operators: AND | OR | NOT

    For more information on condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

    ConditionExpression replaces the legacy ConditionalOperator and Expected parameters.

    ", + "QueryInput$FilterExpression": "

    A string that contains conditions that DynamoDB applies after the Query operation, but before the data is returned to you. Items that do not satisfy the FilterExpression criteria are not returned.

    A FilterExpression is applied after the items have already been read; the process of filtering does not consume any additional read capacity units.

    For more information, see Filter Expressions in the Amazon DynamoDB Developer Guide.

    FilterExpression replaces the legacy QueryFilter and ConditionalOperator parameters.

    ", + "ScanInput$FilterExpression": "

    A string that contains conditions that DynamoDB applies after the Scan operation, but before the data is returned to you. Items that do not satisfy the FilterExpression criteria are not returned.

    A FilterExpression is applied after the items have already been read; the process of filtering does not consume any additional read capacity units.

    For more information, see Filter Expressions in the Amazon DynamoDB Developer Guide.

    FilterExpression replaces the legacy ScanFilter and ConditionalOperator parameters.

    ", + "UpdateItemInput$ConditionExpression": "

    A condition that must be satisfied in order for a conditional update to succeed.

    An expression can contain any of the following:

    • Functions: attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size

      These function names are case-sensitive.

    • Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN

    • Logical operators: AND | OR | NOT

    For more information on condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

    ConditionExpression replaces the legacy ConditionalOperator and Expected parameters.

    " + } + }, + "ConditionalCheckFailedException": { + "base": "

    A condition specified in the operation could not be evaluated.

    ", + "refs": { + } + }, + "ConditionalOperator": { + "base": null, + "refs": { + "DeleteItemInput$ConditionalOperator": "

    This is a legacy parameter, for backward compatibility. New applications should use ConditionExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    A logical operator to apply to the conditions in the Expected map:

    • AND - If all of the conditions evaluate to true, then the entire map evaluates to true.

    • OR - If at least one of the conditions evaluate to true, then the entire map evaluates to true.

    If you omit ConditionalOperator, then AND is the default.

    The operation will succeed only if the entire map evaluates to true.

    This parameter does not support attributes of type List or Map.

    ", + "PutItemInput$ConditionalOperator": "

    This is a legacy parameter, for backward compatibility. New applications should use ConditionExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    A logical operator to apply to the conditions in the Expected map:

    • AND - If all of the conditions evaluate to true, then the entire map evaluates to true.

    • OR - If at least one of the conditions evaluate to true, then the entire map evaluates to true.

    If you omit ConditionalOperator, then AND is the default.

    The operation will succeed only if the entire map evaluates to true.

    This parameter does not support attributes of type List or Map.

    ", + "QueryInput$ConditionalOperator": "

    This is a legacy parameter, for backward compatibility. New applications should use FilterExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    A logical operator to apply to the conditions in a QueryFilter map:

    • AND - If all of the conditions evaluate to true, then the entire map evaluates to true.

    • OR - If at least one of the conditions evaluate to true, then the entire map evaluates to true.

    If you omit ConditionalOperator, then AND is the default.

    The operation will succeed only if the entire map evaluates to true.

    This parameter does not support attributes of type List or Map.

    ", + "ScanInput$ConditionalOperator": "

    This is a legacy parameter, for backward compatibility. New applications should use FilterExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    A logical operator to apply to the conditions in a ScanFilter map:

    • AND - If all of the conditions evaluate to true, then the entire map evaluates to true.

    • OR - If at least one of the conditions evaluate to true, then the entire map evaluates to true.

    If you omit ConditionalOperator, then AND is the default.

    The operation will succeed only if the entire map evaluates to true.

    This parameter does not support attributes of type List or Map.

    ", + "UpdateItemInput$ConditionalOperator": "

    This is a legacy parameter, for backward compatibility. New applications should use ConditionExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    A logical operator to apply to the conditions in the Expected map:

    • AND - If all of the conditions evaluate to true, then the entire map evaluates to true.

    • OR - If at least one of the conditions evaluate to true, then the entire map evaluates to true.

    If you omit ConditionalOperator, then AND is the default.

    The operation will succeed only if the entire map evaluates to true.

    This parameter does not support attributes of type List or Map.

    " + } + }, + "ConsistentRead": { + "base": null, + "refs": { + "GetItemInput$ConsistentRead": "

    Determines the read consistency model: If set to true, then the operation uses strongly consistent reads; otherwise, the operation uses eventually consistent reads.

    ", + "KeysAndAttributes$ConsistentRead": "

    The consistency of a read operation. If set to true, then a strongly consistent read is used; otherwise, an eventually consistent read is used.

    ", + "QueryInput$ConsistentRead": "

    Determines the read consistency model: If set to true, then the operation uses strongly consistent reads; otherwise, the operation uses eventually consistent reads.

    Strongly consistent reads are not supported on global secondary indexes. If you query a global secondary index with ConsistentRead set to true, you will receive a ValidationException.

    ", + "ScanInput$ConsistentRead": "

    A Boolean value that determines the read consistency model during the scan:

    • If ConsistentRead is false, then Scan will use eventually consistent reads. The data returned from Scan might not contain the results of other recently completed write operations (PutItem, UpdateItem or DeleteItem). The Scan response might include some stale data.

    • If ConsistentRead is true, then Scan will use strongly consistent reads. All of the write operations that completed before the Scan began are guaranteed to be contained in the Scan response.

    The default setting for ConsistentRead is false, meaning that eventually consistent reads will be used.

    Strongly consistent reads are not supported on global secondary indexes. If you scan a global secondary index with ConsistentRead set to true, you will receive a ValidationException.

    " + } + }, + "ConsumedCapacity": { + "base": "

    The capacity units consumed by an operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the request asked for it. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide.

    ", + "refs": { + "ConsumedCapacityMultiple$member": null, + "DeleteItemOutput$ConsumedCapacity": null, + "GetItemOutput$ConsumedCapacity": null, + "PutItemOutput$ConsumedCapacity": null, + "QueryOutput$ConsumedCapacity": null, + "ScanOutput$ConsumedCapacity": null, + "UpdateItemOutput$ConsumedCapacity": null + } + }, + "ConsumedCapacityMultiple": { + "base": null, + "refs": { + "BatchGetItemOutput$ConsumedCapacity": "

    The read capacity units consumed by the operation.

    Each element consists of:

    • TableName - The table that consumed the provisioned throughput.

    • CapacityUnits - The total number of capacity units consumed.

    ", + "BatchWriteItemOutput$ConsumedCapacity": "

    The capacity units consumed by the operation.

    Each element consists of:

    • TableName - The table that consumed the provisioned throughput.

    • CapacityUnits - The total number of capacity units consumed.

    " + } + }, + "ConsumedCapacityUnits": { + "base": null, + "refs": { + "Capacity$CapacityUnits": "

    The total number of capacity units consumed on a table or an index.

    ", + "ConsumedCapacity$CapacityUnits": "

    The total number of capacity units consumed by the operation.

    " + } + }, + "CreateGlobalSecondaryIndexAction": { + "base": "

    Represents a new global secondary index to be added to an existing table.

    ", + "refs": { + "GlobalSecondaryIndexUpdate$Create": "

    The parameters required for creating a global secondary index on an existing table:

    • IndexName

    • KeySchema

    • AttributeDefinitions

    • Projection

    • ProvisionedThroughput

    " + } + }, + "CreateTableInput": { + "base": "

    Represents the input of a CreateTable operation.

    ", + "refs": { + } + }, + "CreateTableOutput": { + "base": "

    Represents the output of a CreateTable operation.

    ", + "refs": { + } + }, + "Date": { + "base": null, + "refs": { + "ProvisionedThroughputDescription$LastIncreaseDateTime": "

    The date and time of the last provisioned throughput increase for this table.

    ", + "ProvisionedThroughputDescription$LastDecreaseDateTime": "

    The date and time of the last provisioned throughput decrease for this table.

    ", + "TableDescription$CreationDateTime": "

    The date and time when the table was created, in UNIX epoch time format.

    " + } + }, + "DeleteGlobalSecondaryIndexAction": { + "base": "

    Represents a global secondary index to be deleted from an existing table.

    ", + "refs": { + "GlobalSecondaryIndexUpdate$Delete": "

    The name of an existing global secondary index to be removed.

    " + } + }, + "DeleteItemInput": { + "base": "

    Represents the input of a DeleteItem operation.

    ", + "refs": { + } + }, + "DeleteItemOutput": { + "base": "

    Represents the output of a DeleteItem operation.

    ", + "refs": { + } + }, + "DeleteRequest": { + "base": "

    Represents a request to perform a DeleteItem operation on an item.

    ", + "refs": { + "WriteRequest$DeleteRequest": "

    A request to perform a DeleteItem operation.

    " + } + }, + "DeleteTableInput": { + "base": "

    Represents the input of a DeleteTable operation.

    ", + "refs": { + } + }, + "DeleteTableOutput": { + "base": "

    Represents the output of a DeleteTable operation.

    ", + "refs": { + } + }, + "DescribeTableInput": { + "base": "

    Represents the input of a DescribeTable operation.

    ", + "refs": { + } + }, + "DescribeTableOutput": { + "base": "

    Represents the output of a DescribeTable operation.

    ", + "refs": { + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "ConditionalCheckFailedException$message": "

    The conditional request failed.

    ", + "InternalServerError$message": "

    The server encountered an internal error trying to fulfill the request.

    ", + "ItemCollectionSizeLimitExceededException$message": "

    The total size of an item collection has exceeded the maximum limit of 10 gigabytes.

    ", + "LimitExceededException$message": "

    Too many operations for a given subscriber.

    ", + "ProvisionedThroughputExceededException$message": "

    You exceeded your maximum allowed provisioned throughput.

    ", + "ResourceInUseException$message": "

    The resource which is being attempted to be changed is in use.

    ", + "ResourceNotFoundException$message": "

    The resource which is being requested does not exist.

    " + } + }, + "ExpectedAttributeMap": { + "base": null, + "refs": { + "DeleteItemInput$Expected": "

    This is a legacy parameter, for backward compatibility. New applications should use ConditionExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    A map of attribute/condition pairs. Expected provides a conditional block for the DeleteItem operation.

    Each element of Expected consists of an attribute name, a comparison operator, and one or more values. DynamoDB compares the attribute with the value(s) you supplied, using the comparison operator. For each Expected element, the result of the evaluation is either true or false.

    If you specify more than one element in the Expected map, then by default all of the conditions must evaluate to true. In other words, the conditions are ANDed together. (You can use the ConditionalOperator parameter to OR the conditions instead. If you do this, then at least one of the conditions must evaluate to true, rather than all of them.)

    If the Expected map evaluates to true, then the conditional operation succeeds; otherwise, it fails.

    Expected contains the following:

    • AttributeValueList - One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used.

      For type Number, value comparisons are numeric.

      String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A, and a is greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters.

      For type Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.

    • ComparisonOperator - A comparator for evaluating attributes in the AttributeValueList. When performing the comparison, DynamoDB uses strongly consistent reads.

      The following comparison operators are available:

      EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN

      The following are descriptions of each comparison operator.

      • EQ : Equal. EQ is supported for all datatypes, including lists and maps.

        AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not equal {\"NS\":[\"6\", \"2\", \"1\"]}.

      • NE : Not equal. NE is supported for all datatypes, including lists and maps.

        AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not equal {\"NS\":[\"6\", \"2\", \"1\"]}.

      • LE : Less than or equal.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • LT : Less than.

        AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • GE : Greater than or equal.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • GT : Greater than.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, including lists and maps.

        This operator tests for the existence of an attribute, not its data type. If the data type of attribute \"a\" is null, and you evaluate it using NOT_NULL, the result is a Boolean true. This result is because the attribute \"a\" exists; its data type is not relevant to the NOT_NULL comparison operator.

      • NULL : The attribute does not exist. NULL is supported for all datatypes, including lists and maps.

        This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute \"a\" is null, and you evaluate it using NULL, the result is a Boolean false. This is because the attribute \"a\" exists; its data type is not relevant to the NULL comparison operator.

      • CONTAINS : Checks for a subsequence, or value in a set.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set (\"SS\", \"NS\", or \"BS\"), then the operator evaluates to true if it finds an exact match with any member of the set.

        CONTAINS is supported for lists: When evaluating \"a CONTAINS b\", \"a\" can be a list; however, \"b\" cannot be a set, a map, or a list.

      • NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set (\"SS\", \"NS\", or \"BS\"), then the operator evaluates to true if it does not find an exact match with any member of the set.

        NOT_CONTAINS is supported for lists: When evaluating \"a NOT CONTAINS b\", \"a\" can be a list; however, \"b\" cannot be a set, a map, or a list.

      • BEGINS_WITH : Checks for a prefix.

        AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type).

      • IN : Checks for matching elements within two sets.

        AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary (not a set type). These attributes are compared against an existing set type attribute of an item. If any elements of the input set are present in the item attribute, the expression evaluates to true.

      • BETWEEN : Greater than or equal to the first value, and less than or equal to the second value.

        AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not compare to {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}

    For usage examples of AttributeValueList and ComparisonOperator, see Legacy Conditional Parameters in the Amazon DynamoDB Developer Guide.

    For backward compatibility with previous DynamoDB releases, the following parameters can be used instead of AttributeValueList and ComparisonOperator:

    • Value - A value for DynamoDB to compare with an attribute.

    • Exists - A Boolean value that causes DynamoDB to evaluate the value before attempting the conditional operation:

      • If Exists is true, DynamoDB will check to see if that attribute value already exists in the table. If it is found, then the condition evaluates to true; otherwise the condition evaluate to false.

      • If Exists is false, DynamoDB assumes that the attribute value does not exist in the table. If in fact the value does not exist, then the assumption is valid and the condition evaluates to true. If the value is found, despite the assumption that it does not exist, the condition evaluates to false.

      Note that the default value for Exists is true.

    The Value and Exists parameters are incompatible with AttributeValueList and ComparisonOperator. Note that if you use both sets of parameters at once, DynamoDB will return a ValidationException exception.

    This parameter does not support attributes of type List or Map.

    ", + "PutItemInput$Expected": "

    This is a legacy parameter, for backward compatibility. New applications should use ConditionExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    A map of attribute/condition pairs. Expected provides a conditional block for the PutItem operation.

    This parameter does not support attributes of type List or Map.

    Each element of Expected consists of an attribute name, a comparison operator, and one or more values. DynamoDB compares the attribute with the value(s) you supplied, using the comparison operator. For each Expected element, the result of the evaluation is either true or false.

    If you specify more than one element in the Expected map, then by default all of the conditions must evaluate to true. In other words, the conditions are ANDed together. (You can use the ConditionalOperator parameter to OR the conditions instead. If you do this, then at least one of the conditions must evaluate to true, rather than all of them.)

    If the Expected map evaluates to true, then the conditional operation succeeds; otherwise, it fails.

    Expected contains the following:

    • AttributeValueList - One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used.

      For type Number, value comparisons are numeric.

      String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A, and a is greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters.

      For type Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.

    • ComparisonOperator - A comparator for evaluating attributes in the AttributeValueList. When performing the comparison, DynamoDB uses strongly consistent reads.

      The following comparison operators are available:

      EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN

      The following are descriptions of each comparison operator.

      • EQ : Equal. EQ is supported for all datatypes, including lists and maps.

        AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not equal {\"NS\":[\"6\", \"2\", \"1\"]}.

      • NE : Not equal. NE is supported for all datatypes, including lists and maps.

        AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not equal {\"NS\":[\"6\", \"2\", \"1\"]}.

      • LE : Less than or equal.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • LT : Less than.

        AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • GE : Greater than or equal.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • GT : Greater than.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, including lists and maps.

        This operator tests for the existence of an attribute, not its data type. If the data type of attribute \"a\" is null, and you evaluate it using NOT_NULL, the result is a Boolean true. This result is because the attribute \"a\" exists; its data type is not relevant to the NOT_NULL comparison operator.

      • NULL : The attribute does not exist. NULL is supported for all datatypes, including lists and maps.

        This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute \"a\" is null, and you evaluate it using NULL, the result is a Boolean false. This is because the attribute \"a\" exists; its data type is not relevant to the NULL comparison operator.

      • CONTAINS : Checks for a subsequence, or value in a set.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set (\"SS\", \"NS\", or \"BS\"), then the operator evaluates to true if it finds an exact match with any member of the set.

        CONTAINS is supported for lists: When evaluating \"a CONTAINS b\", \"a\" can be a list; however, \"b\" cannot be a set, a map, or a list.

      • NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set (\"SS\", \"NS\", or \"BS\"), then the operator evaluates to true if it does not find an exact match with any member of the set.

        NOT_CONTAINS is supported for lists: When evaluating \"a NOT CONTAINS b\", \"a\" can be a list; however, \"b\" cannot be a set, a map, or a list.

      • BEGINS_WITH : Checks for a prefix.

        AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type).

      • IN : Checks for matching elements within two sets.

        AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary (not a set type). These attributes are compared against an existing set type attribute of an item. If any elements of the input set are present in the item attribute, the expression evaluates to true.

      • BETWEEN : Greater than or equal to the first value, and less than or equal to the second value.

        AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not compare to {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}

    For usage examples of AttributeValueList and ComparisonOperator, see Legacy Conditional Parameters in the Amazon DynamoDB Developer Guide.

    For backward compatibility with previous DynamoDB releases, the following parameters can be used instead of AttributeValueList and ComparisonOperator:

    • Value - A value for DynamoDB to compare with an attribute.

    • Exists - A Boolean value that causes DynamoDB to evaluate the value before attempting the conditional operation:

      • If Exists is true, DynamoDB will check to see if that attribute value already exists in the table. If it is found, then the condition evaluates to true; otherwise the condition evaluate to false.

      • If Exists is false, DynamoDB assumes that the attribute value does not exist in the table. If in fact the value does not exist, then the assumption is valid and the condition evaluates to true. If the value is found, despite the assumption that it does not exist, the condition evaluates to false.

      Note that the default value for Exists is true.

    The Value and Exists parameters are incompatible with AttributeValueList and ComparisonOperator. Note that if you use both sets of parameters at once, DynamoDB will return a ValidationException exception.

    ", + "UpdateItemInput$Expected": "

    This is a legacy parameter, for backward compatibility. New applications should use ConditionExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    A map of attribute/condition pairs. Expected provides a conditional block for the UpdateItem operation.

    Each element of Expected consists of an attribute name, a comparison operator, and one or more values. DynamoDB compares the attribute with the value(s) you supplied, using the comparison operator. For each Expected element, the result of the evaluation is either true or false.

    If you specify more than one element in the Expected map, then by default all of the conditions must evaluate to true. In other words, the conditions are ANDed together. (You can use the ConditionalOperator parameter to OR the conditions instead. If you do this, then at least one of the conditions must evaluate to true, rather than all of them.)

    If the Expected map evaluates to true, then the conditional operation succeeds; otherwise, it fails.

    Expected contains the following:

    • AttributeValueList - One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used.

      For type Number, value comparisons are numeric.

      String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A, and a is greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters.

      For type Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.

    • ComparisonOperator - A comparator for evaluating attributes in the AttributeValueList. When performing the comparison, DynamoDB uses strongly consistent reads.

      The following comparison operators are available:

      EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN

      The following are descriptions of each comparison operator.

      • EQ : Equal. EQ is supported for all datatypes, including lists and maps.

        AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not equal {\"NS\":[\"6\", \"2\", \"1\"]}.

      • NE : Not equal. NE is supported for all datatypes, including lists and maps.

        AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not equal {\"NS\":[\"6\", \"2\", \"1\"]}.

      • LE : Less than or equal.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • LT : Less than.

        AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • GE : Greater than or equal.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • GT : Greater than.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, including lists and maps.

        This operator tests for the existence of an attribute, not its data type. If the data type of attribute \"a\" is null, and you evaluate it using NOT_NULL, the result is a Boolean true. This result is because the attribute \"a\" exists; its data type is not relevant to the NOT_NULL comparison operator.

      • NULL : The attribute does not exist. NULL is supported for all datatypes, including lists and maps.

        This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute \"a\" is null, and you evaluate it using NULL, the result is a Boolean false. This is because the attribute \"a\" exists; its data type is not relevant to the NULL comparison operator.

      • CONTAINS : Checks for a subsequence, or value in a set.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set (\"SS\", \"NS\", or \"BS\"), then the operator evaluates to true if it finds an exact match with any member of the set.

        CONTAINS is supported for lists: When evaluating \"a CONTAINS b\", \"a\" can be a list; however, \"b\" cannot be a set, a map, or a list.

      • NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set (\"SS\", \"NS\", or \"BS\"), then the operator evaluates to true if it does not find an exact match with any member of the set.

        NOT_CONTAINS is supported for lists: When evaluating \"a NOT CONTAINS b\", \"a\" can be a list; however, \"b\" cannot be a set, a map, or a list.

      • BEGINS_WITH : Checks for a prefix.

        AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type).

      • IN : Checks for matching elements within two sets.

        AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary (not a set type). These attributes are compared against an existing set type attribute of an item. If any elements of the input set are present in the item attribute, the expression evaluates to true.

      • BETWEEN : Greater than or equal to the first value, and less than or equal to the second value.

        AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not compare to {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}

    For usage examples of AttributeValueList and ComparisonOperator, see Legacy Conditional Parameters in the Amazon DynamoDB Developer Guide.

    For backward compatibility with previous DynamoDB releases, the following parameters can be used instead of AttributeValueList and ComparisonOperator:

    • Value - A value for DynamoDB to compare with an attribute.

    • Exists - A Boolean value that causes DynamoDB to evaluate the value before attempting the conditional operation:

      • If Exists is true, DynamoDB will check to see if that attribute value already exists in the table. If it is found, then the condition evaluates to true; otherwise the condition evaluate to false.

      • If Exists is false, DynamoDB assumes that the attribute value does not exist in the table. If in fact the value does not exist, then the assumption is valid and the condition evaluates to true. If the value is found, despite the assumption that it does not exist, the condition evaluates to false.

      Note that the default value for Exists is true.

    The Value and Exists parameters are incompatible with AttributeValueList and ComparisonOperator. Note that if you use both sets of parameters at once, DynamoDB will return a ValidationException exception.

    This parameter does not support attributes of type List or Map.

    " + } + }, + "ExpectedAttributeValue": { + "base": "

    Represents a condition to be compared with an attribute value. This condition can be used with DeleteItem, PutItem or UpdateItem operations; if the comparison evaluates to true, the operation succeeds; if not, the operation fails. You can use ExpectedAttributeValue in one of two different ways:

    • Use AttributeValueList to specify one or more values to compare against an attribute. Use ComparisonOperator to specify how you want to perform the comparison. If the comparison evaluates to true, then the conditional operation succeeds.

    • Use Value to specify a value that DynamoDB will compare against an attribute. If the values match, then ExpectedAttributeValue evaluates to true and the conditional operation succeeds. Optionally, you can also set Exists to false, indicating that you do not expect to find the attribute value in the table. In this case, the conditional operation succeeds only if the comparison evaluates to false.

    Value and Exists are incompatible with AttributeValueList and ComparisonOperator. Note that if you use both sets of parameters at once, DynamoDB will return a ValidationException exception.

    ", + "refs": { + "ExpectedAttributeMap$value": null + } + }, + "ExpressionAttributeNameMap": { + "base": null, + "refs": { + "DeleteItemInput$ExpressionAttributeNames": "

    One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

    • To access an attribute whose name conflicts with a DynamoDB reserved word.

    • To create a placeholder for repeating occurrences of an attribute name in an expression.

    • To prevent special characters in an attribute name from being misinterpreted in an expression.

    Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

    • Percentile

    The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

    • {\"#P\":\"Percentile\"}

    You could then use this substitution in an expression, as in this example:

    • #P = :val

    Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

    For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

    ", + "GetItemInput$ExpressionAttributeNames": "

    One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

    • To access an attribute whose name conflicts with a DynamoDB reserved word.

    • To create a placeholder for repeating occurrences of an attribute name in an expression.

    • To prevent special characters in an attribute name from being misinterpreted in an expression.

    Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

    • Percentile

    The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

    • {\"#P\":\"Percentile\"}

    You could then use this substitution in an expression, as in this example:

    • #P = :val

    Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

    For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

    ", + "KeysAndAttributes$ExpressionAttributeNames": "

    One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

    • To access an attribute whose name conflicts with a DynamoDB reserved word.

    • To create a placeholder for repeating occurrences of an attribute name in an expression.

    • To prevent special characters in an attribute name from being misinterpreted in an expression.

    Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

    • Percentile

    The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

    • {\"#P\":\"Percentile\"}

    You could then use this substitution in an expression, as in this example:

    • #P = :val

    Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

    For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

    ", + "PutItemInput$ExpressionAttributeNames": "

    One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

    • To access an attribute whose name conflicts with a DynamoDB reserved word.

    • To create a placeholder for repeating occurrences of an attribute name in an expression.

    • To prevent special characters in an attribute name from being misinterpreted in an expression.

    Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

    • Percentile

    The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

    • {\"#P\":\"Percentile\"}

    You could then use this substitution in an expression, as in this example:

    • #P = :val

    Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

    For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

    ", + "QueryInput$ExpressionAttributeNames": "

    One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

    • To access an attribute whose name conflicts with a DynamoDB reserved word.

    • To create a placeholder for repeating occurrences of an attribute name in an expression.

    • To prevent special characters in an attribute name from being misinterpreted in an expression.

    Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

    • Percentile

    The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

    • {\"#P\":\"Percentile\"}

    You could then use this substitution in an expression, as in this example:

    • #P = :val

    Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

    For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

    ", + "ScanInput$ExpressionAttributeNames": "

    One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

    • To access an attribute whose name conflicts with a DynamoDB reserved word.

    • To create a placeholder for repeating occurrences of an attribute name in an expression.

    • To prevent special characters in an attribute name from being misinterpreted in an expression.

    Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

    • Percentile

    The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

    • {\"#P\":\"Percentile\"}

    You could then use this substitution in an expression, as in this example:

    • #P = :val

    Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

    For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

    ", + "UpdateItemInput$ExpressionAttributeNames": "

    One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

    • To access an attribute whose name conflicts with a DynamoDB reserved word.

    • To create a placeholder for repeating occurrences of an attribute name in an expression.

    • To prevent special characters in an attribute name from being misinterpreted in an expression.

    Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

    • Percentile

    The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

    • {\"#P\":\"Percentile\"}

    You could then use this substitution in an expression, as in this example:

    • #P = :val

    Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

    For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

    " + } + }, + "ExpressionAttributeNameVariable": { + "base": null, + "refs": { + "ExpressionAttributeNameMap$key": null + } + }, + "ExpressionAttributeValueMap": { + "base": null, + "refs": { + "DeleteItemInput$ExpressionAttributeValues": "

    One or more values that can be substituted in an expression.

    Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:

    Available | Backordered | Discontinued

    You would first need to specify ExpressionAttributeValues as follows:

    { \":avail\":{\"S\":\"Available\"}, \":back\":{\"S\":\"Backordered\"}, \":disc\":{\"S\":\"Discontinued\"} }

    You could then use these values in an expression, such as this:

    ProductStatus IN (:avail, :back, :disc)

    For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

    ", + "PutItemInput$ExpressionAttributeValues": "

    One or more values that can be substituted in an expression.

    Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:

    Available | Backordered | Discontinued

    You would first need to specify ExpressionAttributeValues as follows:

    { \":avail\":{\"S\":\"Available\"}, \":back\":{\"S\":\"Backordered\"}, \":disc\":{\"S\":\"Discontinued\"} }

    You could then use these values in an expression, such as this:

    ProductStatus IN (:avail, :back, :disc)

    For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

    ", + "QueryInput$ExpressionAttributeValues": "

    One or more values that can be substituted in an expression.

    Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:

    Available | Backordered | Discontinued

    You would first need to specify ExpressionAttributeValues as follows:

    { \":avail\":{\"S\":\"Available\"}, \":back\":{\"S\":\"Backordered\"}, \":disc\":{\"S\":\"Discontinued\"} }

    You could then use these values in an expression, such as this:

    ProductStatus IN (:avail, :back, :disc)

    For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

    ", + "ScanInput$ExpressionAttributeValues": "

    One or more values that can be substituted in an expression.

    Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:

    Available | Backordered | Discontinued

    You would first need to specify ExpressionAttributeValues as follows:

    { \":avail\":{\"S\":\"Available\"}, \":back\":{\"S\":\"Backordered\"}, \":disc\":{\"S\":\"Discontinued\"} }

    You could then use these values in an expression, such as this:

    ProductStatus IN (:avail, :back, :disc)

    For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

    ", + "UpdateItemInput$ExpressionAttributeValues": "

    One or more values that can be substituted in an expression.

    Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:

    Available | Backordered | Discontinued

    You would first need to specify ExpressionAttributeValues as follows:

    { \":avail\":{\"S\":\"Available\"}, \":back\":{\"S\":\"Backordered\"}, \":disc\":{\"S\":\"Discontinued\"} }

    You could then use these values in an expression, such as this:

    ProductStatus IN (:avail, :back, :disc)

    For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

    " + } + }, + "ExpressionAttributeValueVariable": { + "base": null, + "refs": { + "ExpressionAttributeValueMap$key": null + } + }, + "FilterConditionMap": { + "base": null, + "refs": { + "QueryInput$QueryFilter": "

    This is a legacy parameter, for backward compatibility. New applications should use FilterExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    A condition that evaluates the query results after the items are read and returns only the desired values.

    This parameter does not support attributes of type List or Map.

    A QueryFilter is applied after the items have already been read; the process of filtering does not consume any additional read capacity units.

    If you provide more than one condition in the QueryFilter map, then by default all of the conditions must evaluate to true. In other words, the conditions are ANDed together. (You can use the ConditionalOperator parameter to OR the conditions instead. If you do this, then at least one of the conditions must evaluate to true, rather than all of them.)

    Note that QueryFilter does not allow key attributes. You cannot define a filter condition on a hash key or range key.

    Each QueryFilter element consists of an attribute name to compare, along with the following:

    • AttributeValueList - One or more values to evaluate against the supplied attribute. The number of values in the list depends on the operator specified in ComparisonOperator.

      For type Number, value comparisons are numeric.

      String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A, and a is greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters.

      For type Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.

      For information on specifying data types in JSON, see JSON Data Format in the Amazon DynamoDB Developer Guide.

    • ComparisonOperator - A comparator for evaluating attributes. For example, equals, greater than, less than, etc.

      The following comparison operators are available:

      EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN

      For complete descriptions of all comparison operators, see the Condition data type.

    ", + "ScanInput$ScanFilter": "

    This is a legacy parameter, for backward compatibility. New applications should use FilterExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    A condition that evaluates the scan results and returns only the desired values.

    This parameter does not support attributes of type List or Map.

    If you specify more than one condition in the ScanFilter map, then by default all of the conditions must evaluate to true. In other words, the conditions are ANDed together. (You can use the ConditionalOperator parameter to OR the conditions instead. If you do this, then at least one of the conditions must evaluate to true, rather than all of them.)

    Each ScanFilter element consists of an attribute name to compare, along with the following:

    • AttributeValueList - One or more values to evaluate against the supplied attribute. The number of values in the list depends on the operator specified in ComparisonOperator .

      For type Number, value comparisons are numeric.

      String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A, and a is greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters.

      For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.

      For information on specifying data types in JSON, see JSON Data Format in the Amazon DynamoDB Developer Guide.

    • ComparisonOperator - A comparator for evaluating attributes. For example, equals, greater than, less than, etc.

      The following comparison operators are available:

      EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN

      For complete descriptions of all comparison operators, see Condition.

    " + } + }, + "GetItemInput": { + "base": "

    Represents the input of a GetItem operation.

    ", + "refs": { + } + }, + "GetItemOutput": { + "base": "

    Represents the output of a GetItem operation.

    ", + "refs": { + } + }, + "GlobalSecondaryIndex": { + "base": "

    Represents the properties of a global secondary index.

    ", + "refs": { + "GlobalSecondaryIndexList$member": null + } + }, + "GlobalSecondaryIndexDescription": { + "base": "

    Represents the properties of a global secondary index.

    ", + "refs": { + "GlobalSecondaryIndexDescriptionList$member": null + } + }, + "GlobalSecondaryIndexDescriptionList": { + "base": null, + "refs": { + "TableDescription$GlobalSecondaryIndexes": "

    The global secondary indexes, if any, on the table. Each index is scoped to a given hash key value. Each element is composed of:

    • Backfilling - If true, then the index is currently in the backfilling phase. Backfilling occurs only when a new global secondary index is added to the table; it is the process by which DynamoDB populates the new index with data from the table. (This attribute does not appear for indexes that were created during a CreateTable operation.)

    • IndexName - The name of the global secondary index.

    • IndexSizeBytes - The total size of the global secondary index, in bytes. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.

    • IndexStatus - The current status of the global secondary index:

      • CREATING - The index is being created.

      • UPDATING - The index is being updated.

      • DELETING - The index is being deleted.

      • ACTIVE - The index is ready for use.

    • ItemCount - The number of items in the global secondary index. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.

    • KeySchema - Specifies the complete index key schema. The attribute names in the key schema must be between 1 and 255 characters (inclusive). The key schema must begin with the same hash key attribute as the table.

    • Projection - Specifies attributes that are copied (projected) from the table into the index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Each attribute specification is composed of:

      • ProjectionType - One of the following:

        • KEYS_ONLY - Only the index and primary keys are projected into the index.

        • INCLUDE - Only the specified table attributes are projected into the index. The list of projected attributes are in NonKeyAttributes.

        • ALL - All of the table attributes are projected into the index.

      • NonKeyAttributes - A list of one or more non-key attribute names that are projected into the secondary index. The total count of attributes provided in NonKeyAttributes, summed across all of the secondary indexes, must not exceed 20. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.

    • ProvisionedThroughput - The provisioned throughput settings for the global secondary index, consisting of read and write capacity units, along with data about increases and decreases.

    If the table is in the DELETING state, no information about indexes will be returned.

    " + } + }, + "GlobalSecondaryIndexList": { + "base": null, + "refs": { + "CreateTableInput$GlobalSecondaryIndexes": "

    One or more global secondary indexes (the maximum is five) to be created on the table. Each global secondary index in the array includes the following:

    • IndexName - The name of the global secondary index. Must be unique only for this table.

    • KeySchema - Specifies the key schema for the global secondary index.

    • Projection - Specifies attributes that are copied (projected) from the table into the index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Each attribute specification is composed of:

      • ProjectionType - One of the following:

        • KEYS_ONLY - Only the index and primary keys are projected into the index.

        • INCLUDE - Only the specified table attributes are projected into the index. The list of projected attributes are in NonKeyAttributes.

        • ALL - All of the table attributes are projected into the index.

      • NonKeyAttributes - A list of one or more non-key attribute names that are projected into the secondary index. The total count of attributes provided in NonKeyAttributes, summed across all of the secondary indexes, must not exceed 20. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.

    • ProvisionedThroughput - The provisioned throughput settings for the global secondary index, consisting of read and write capacity units.

    " + } + }, + "GlobalSecondaryIndexUpdate": { + "base": "

    Represents one of the following:

    • A new global secondary index to be added to an existing table.

    • New provisioned throughput parameters for an existing global secondary index.

    • An existing global secondary index to be removed from an existing table.

    ", + "refs": { + "GlobalSecondaryIndexUpdateList$member": null + } + }, + "GlobalSecondaryIndexUpdateList": { + "base": null, + "refs": { + "UpdateTableInput$GlobalSecondaryIndexUpdates": "

    An array of one or more global secondary indexes for the table. For each index in the array, you can request one action:

    • Create - add a new global secondary index to the table.

    • Update - modify the provisioned throughput settings of an existing global secondary index.

    • Delete - remove a global secondary index from the table.

    For more information, see Managing Global Secondary Indexes in the Amazon DynamoDB Developer Guide.

    " + } + }, + "IndexName": { + "base": null, + "refs": { + "CreateGlobalSecondaryIndexAction$IndexName": "

    The name of the global secondary index to be created.

    ", + "DeleteGlobalSecondaryIndexAction$IndexName": "

    The name of the global secondary index to be deleted.

    ", + "GlobalSecondaryIndex$IndexName": "

    The name of the global secondary index. The name must be unique among all other indexes on this table.

    ", + "GlobalSecondaryIndexDescription$IndexName": "

    The name of the global secondary index.

    ", + "LocalSecondaryIndex$IndexName": "

    The name of the local secondary index. The name must be unique among all other indexes on this table.

    ", + "LocalSecondaryIndexDescription$IndexName": "

    Represents the name of the local secondary index.

    ", + "QueryInput$IndexName": "

    The name of an index to query. This index can be any local secondary index or global secondary index on the table. Note that if you use the IndexName parameter, you must also provide TableName.

    ", + "ScanInput$IndexName": "

    The name of a secondary index to scan. This index can be any local secondary index or global secondary index. Note that if you use the IndexName parameter, you must also provide TableName.

    ", + "SecondaryIndexesCapacityMap$key": null, + "UpdateGlobalSecondaryIndexAction$IndexName": "

    The name of the global secondary index to be updated.

    " + } + }, + "IndexStatus": { + "base": null, + "refs": { + "GlobalSecondaryIndexDescription$IndexStatus": "

    The current state of the global secondary index:

    • CREATING - The index is being created.

    • UPDATING - The index is being updated.

    • DELETING - The index is being deleted.

    • ACTIVE - The index is ready for use.

    " + } + }, + "Integer": { + "base": null, + "refs": { + "QueryOutput$Count": "

    The number of items in the response.

    If you used a QueryFilter in the request, then Count is the number of items returned after the filter was applied, and ScannedCount is the number of matching items before> the filter was applied.

    If you did not use a filter in the request, then Count and ScannedCount are the same.

    ", + "QueryOutput$ScannedCount": "

    The number of items evaluated, before any QueryFilter is applied. A high ScannedCount value with few, or no, Count results indicates an inefficient Query operation. For more information, see Count and ScannedCount in the Amazon DynamoDB Developer Guide.

    If you did not use a filter in the request, then ScannedCount is the same as Count.

    ", + "ScanOutput$Count": "

    The number of items in the response.

    If you set ScanFilter in the request, then Count is the number of items returned after the filter was applied, and ScannedCount is the number of matching items before the filter was applied.

    If you did not use a filter in the request, then Count is the same as ScannedCount.

    ", + "ScanOutput$ScannedCount": "

    The number of items evaluated, before any ScanFilter is applied. A high ScannedCount value with few, or no, Count results indicates an inefficient Scan operation. For more information, see Count and ScannedCount in the Amazon DynamoDB Developer Guide.

    If you did not use a filter in the request, then ScannedCount is the same as Count.

    " + } + }, + "InternalServerError": { + "base": "

    An error occurred on the server side.

    ", + "refs": { + } + }, + "ItemCollectionKeyAttributeMap": { + "base": null, + "refs": { + "ItemCollectionMetrics$ItemCollectionKey": "

    The hash key value of the item collection. This value is the same as the hash key of the item.

    " + } + }, + "ItemCollectionMetrics": { + "base": "

    Information about item collections, if any, that were affected by the operation. ItemCollectionMetrics is only returned if the request asked for it. If the table does not have any local secondary indexes, this information is not returned in the response.

    ", + "refs": { + "DeleteItemOutput$ItemCollectionMetrics": "

    Information about item collections, if any, that were affected by the operation. ItemCollectionMetrics is only returned if the request asked for it. If the table does not have any local secondary indexes, this information is not returned in the response.

    Each ItemCollectionMetrics element consists of:

    • ItemCollectionKey - The hash key value of the item collection. This is the same as the hash key of the item.

    • SizeEstimateRange - An estimate of item collection size, in gigabytes. This value is a two-element array containing a lower bound and an upper bound for the estimate. The estimate includes the size of all the items in the table, plus the size of all attributes projected into all of the local secondary indexes on that table. Use this estimate to measure whether a local secondary index is approaching its size limit.

      The estimate is subject to change over time; therefore, do not rely on the precision or accuracy of the estimate.

    ", + "ItemCollectionMetricsMultiple$member": null, + "PutItemOutput$ItemCollectionMetrics": "

    Information about item collections, if any, that were affected by the operation. ItemCollectionMetrics is only returned if the request asked for it. If the table does not have any local secondary indexes, this information is not returned in the response.

    Each ItemCollectionMetrics element consists of:

    • ItemCollectionKey - The hash key value of the item collection. This is the same as the hash key of the item.

    • SizeEstimateRange - An estimate of item collection size, in gigabytes. This value is a two-element array containing a lower bound and an upper bound for the estimate. The estimate includes the size of all the items in the table, plus the size of all attributes projected into all of the local secondary indexes on that table. Use this estimate to measure whether a local secondary index is approaching its size limit.

      The estimate is subject to change over time; therefore, do not rely on the precision or accuracy of the estimate.

    ", + "UpdateItemOutput$ItemCollectionMetrics": null + } + }, + "ItemCollectionMetricsMultiple": { + "base": null, + "refs": { + "ItemCollectionMetricsPerTable$value": null + } + }, + "ItemCollectionMetricsPerTable": { + "base": null, + "refs": { + "BatchWriteItemOutput$ItemCollectionMetrics": "

    A list of tables that were processed by BatchWriteItem and, for each table, information about any item collections that were affected by individual DeleteItem or PutItem operations.

    Each entry consists of the following subelements:

    • ItemCollectionKey - The hash key value of the item collection. This is the same as the hash key of the item.

    • SizeEstimateRange - An estimate of item collection size, expressed in GB. This is a two-element array containing a lower bound and an upper bound for the estimate. The estimate includes the size of all the items in the table, plus the size of all attributes projected into all of the local secondary indexes on the table. Use this estimate to measure whether a local secondary index is approaching its size limit.

      The estimate is subject to change over time; therefore, do not rely on the precision or accuracy of the estimate.

    " + } + }, + "ItemCollectionSizeEstimateBound": { + "base": null, + "refs": { + "ItemCollectionSizeEstimateRange$member": null + } + }, + "ItemCollectionSizeEstimateRange": { + "base": null, + "refs": { + "ItemCollectionMetrics$SizeEstimateRangeGB": "

    An estimate of item collection size, in gigabytes. This value is a two-element array containing a lower bound and an upper bound for the estimate. The estimate includes the size of all the items in the table, plus the size of all attributes projected into all of the local secondary indexes on that table. Use this estimate to measure whether a local secondary index is approaching its size limit.

    The estimate is subject to change over time; therefore, do not rely on the precision or accuracy of the estimate.

    " + } + }, + "ItemCollectionSizeLimitExceededException": { + "base": "

    An item collection is too large. This exception is only returned for tables that have one or more local secondary indexes.

    ", + "refs": { + } + }, + "ItemList": { + "base": null, + "refs": { + "BatchGetResponseMap$value": null, + "QueryOutput$Items": "

    An array of item attributes that match the query criteria. Each element in this array consists of an attribute name and the value for that attribute.

    ", + "ScanOutput$Items": "

    An array of item attributes that match the scan criteria. Each element in this array consists of an attribute name and the value for that attribute.

    " + } + }, + "Key": { + "base": null, + "refs": { + "DeleteItemInput$Key": "

    A map of attribute names to AttributeValue objects, representing the primary key of the item to delete.

    For the primary key, you must provide all of the attributes. For example, with a hash type primary key, you only need to provide the hash attribute. For a hash-and-range type primary key, you must provide both the hash attribute and the range attribute.

    ", + "DeleteRequest$Key": "

    A map of attribute name to attribute values, representing the primary key of the item to delete. All of the table's primary key attributes must be specified, and their data types must match those of the table's key schema.

    ", + "GetItemInput$Key": "

    A map of attribute names to AttributeValue objects, representing the primary key of the item to retrieve.

    For the primary key, you must provide all of the attributes. For example, with a hash type primary key, you only need to provide the hash attribute. For a hash-and-range type primary key, you must provide both the hash attribute and the range attribute.

    ", + "KeyList$member": null, + "QueryInput$ExclusiveStartKey": "

    The primary key of the first item that this operation will evaluate. Use the value that was returned for LastEvaluatedKey in the previous operation.

    The data type for ExclusiveStartKey must be String, Number or Binary. No set data types are allowed.

    ", + "QueryOutput$LastEvaluatedKey": "

    The primary key of the item where the operation stopped, inclusive of the previous result set. Use this value to start a new operation, excluding this value in the new request.

    If LastEvaluatedKey is empty, then the \"last page\" of results has been processed and there is no more data to be retrieved.

    If LastEvaluatedKey is not empty, it does not necessarily mean that there is more data in the result set. The only way to know when you have reached the end of the result set is when LastEvaluatedKey is empty.

    ", + "ScanInput$ExclusiveStartKey": "

    The primary key of the first item that this operation will evaluate. Use the value that was returned for LastEvaluatedKey in the previous operation.

    The data type for ExclusiveStartKey must be String, Number or Binary. No set data types are allowed.

    In a parallel scan, a Scan request that includes ExclusiveStartKey must specify the same segment whose previous Scan returned the corresponding value of LastEvaluatedKey.

    ", + "ScanOutput$LastEvaluatedKey": "

    The primary key of the item where the operation stopped, inclusive of the previous result set. Use this value to start a new operation, excluding this value in the new request.

    If LastEvaluatedKey is empty, then the \"last page\" of results has been processed and there is no more data to be retrieved.

    If LastEvaluatedKey is not empty, it does not necessarily mean that there is more data in the result set. The only way to know when you have reached the end of the result set is when LastEvaluatedKey is empty.

    ", + "UpdateItemInput$Key": "

    The primary key of the item to be updated. Each element consists of an attribute name and a value for that attribute.

    For the primary key, you must provide all of the attributes. For example, with a hash type primary key, you only need to provide the hash attribute. For a hash-and-range type primary key, you must provide both the hash attribute and the range attribute.

    " + } + }, + "KeyConditions": { + "base": null, + "refs": { + "QueryInput$KeyConditions": "

    This is a legacy parameter, for backward compatibility. New applications should use KeyConditionExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

    The selection criteria for the query. For a query on a table, you can have conditions only on the table primary key attributes. You must provide the hash key attribute name and value as an EQ condition. You can optionally provide a second condition, referring to the range key attribute.

    If you don't provide a range key condition, all of the items that match the hash key will be retrieved. If a FilterExpression or QueryFilter is present, it will be applied after the items are retrieved.

    For a query on an index, you can have conditions only on the index key attributes. You must provide the index hash attribute name and value as an EQ condition. You can optionally provide a second condition, referring to the index key range attribute.

    Each KeyConditions element consists of an attribute name to compare, along with the following:

    • AttributeValueList - One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used.

      For type Number, value comparisons are numeric.

      String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A, and a is greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters.

      For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.

    • ComparisonOperator - A comparator for evaluating attributes, for example, equals, greater than, less than, and so on.

      For KeyConditions, only the following comparison operators are supported:

      EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN

      The following are descriptions of these comparison operators.

      • EQ : Equal.

        AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one specified in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not equal {\"NS\":[\"6\", \"2\", \"1\"]}.

      • LE : Less than or equal.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • LT : Less than.

        AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • GE : Greater than or equal.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • GT : Greater than.

        AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not equal {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}.

      • BEGINS_WITH : Checks for a prefix.

        AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type).

      • BETWEEN : Greater than or equal to the first value, and less than or equal to the second value.

        AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {\"S\":\"6\"} does not compare to {\"N\":\"6\"}. Also, {\"N\":\"6\"} does not compare to {\"NS\":[\"6\", \"2\", \"1\"]}

    For usage examples of AttributeValueList and ComparisonOperator, see Legacy Conditional Parameters in the Amazon DynamoDB Developer Guide.

    " + } + }, + "KeyExpression": { + "base": null, + "refs": { + "QueryInput$KeyConditionExpression": "

    The condition that specifies the key value(s) for items to be retrieved by the Query action.

    The condition must perform an equality test on a single hash key value. The condition can also perform one of several comparison tests on a single range key value. Query can use KeyConditionExpression to retrieve one item with a given hash and range key value, or several items that have the same hash key value but different range key values.

    The hash key equality test is required, and must be specified in the following format:

    hashAttributeName = :hashval

    If you also want to provide a range key condition, it must be combined using AND with the hash key condition. Following is an example, using the = comparison operator for the range key:

    hashAttributeName = :hashval AND rangeAttributeName = :rangeval

    Valid comparisons for the range key condition are as follows:

    • rangeAttributeName = :rangeval - true if the range key is equal to :rangeval.

    • rangeAttributeName < :rangeval - true if the range key is less than :rangeval.

    • rangeAttributeName <= :rangeval - true if the range key is less than or equal to :rangeval.

    • rangeAttributeName > :rangeval - true if the range key is greater than :rangeval.

    • rangeAttributeName >= :rangeval - true if the range key is greater than or equal to :rangeval.

    • rangeAttributeName BETWEEN :rangeval1 AND :rangeval2 - true if the range key is greater than or equal to :rangeval1, and less than or equal to :rangeval2.

    • begins_with (rangeAttributeName, :rangeval) - true if the range key begins with a particular operand. (You cannot use this function with a range key that is of type Number.) Note that the function name begins_with is case-sensitive.

    Use the ExpressionAttributeValues parameter to replace tokens such as :hashval and :rangeval with actual values at runtime.

    You can optionally use the ExpressionAttributeNames parameter to replace the names of the hash and range attributes with placeholder tokens. This option might be necessary if an attribute name conflicts with a DynamoDB reserved word. For example, the following KeyConditionExpression parameter causes an error because Size is a reserved word:

    • Size = :myval

    To work around this, define a placeholder (such a #S) to represent the attribute name Size. KeyConditionExpression then is as follows:

    • #S = :myval

    For a list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide.

    For more information on ExpressionAttributeNames and ExpressionAttributeValues, see Using Placeholders for Attribute Names and Values in the Amazon DynamoDB Developer Guide.

    KeyConditionExpression replaces the legacy KeyConditions parameter.

    " + } + }, + "KeyList": { + "base": null, + "refs": { + "KeysAndAttributes$Keys": "

    The primary key attribute values that define the items and the attributes associated with the items.

    " + } + }, + "KeySchema": { + "base": null, + "refs": { + "CreateGlobalSecondaryIndexAction$KeySchema": "

    The key schema for the global secondary index.

    ", + "CreateTableInput$KeySchema": "

    Specifies the attributes that make up the primary key for a table or an index. The attributes in KeySchema must also be defined in the AttributeDefinitions array. For more information, see Data Model in the Amazon DynamoDB Developer Guide.

    Each KeySchemaElement in the array is composed of:

    • AttributeName - The name of this key attribute.

    • KeyType - Determines whether the key attribute is HASH or RANGE.

    For a primary key that consists of a hash attribute, you must provide exactly one element with a KeyType of HASH.

    For a primary key that consists of hash and range attributes, you must provide exactly two elements, in this order: The first element must have a KeyType of HASH, and the second element must have a KeyType of RANGE.

    For more information, see Specifying the Primary Key in the Amazon DynamoDB Developer Guide.

    ", + "GlobalSecondaryIndex$KeySchema": "

    The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types (HASH or RANGE).

    ", + "GlobalSecondaryIndexDescription$KeySchema": "

    The complete key schema for the global secondary index, consisting of one or more pairs of attribute names and key types (HASH or RANGE).

    ", + "LocalSecondaryIndex$KeySchema": "

    The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types (HASH or RANGE).

    ", + "LocalSecondaryIndexDescription$KeySchema": "

    The complete index key schema, which consists of one or more pairs of attribute names and key types (HASH or RANGE).

    ", + "TableDescription$KeySchema": "

    The primary key structure for the table. Each KeySchemaElement consists of:

    • AttributeName - The name of the attribute.

    • KeyType - The key type for the attribute. Can be either HASH or RANGE.

    For more information about primary keys, see Primary Key in the Amazon DynamoDB Developer Guide.

    " + } + }, + "KeySchemaAttributeName": { + "base": null, + "refs": { + "AttributeDefinition$AttributeName": "

    A name for the attribute.

    ", + "KeySchemaElement$AttributeName": "

    The name of a key attribute.

    " + } + }, + "KeySchemaElement": { + "base": "

    Represents a single element of a key schema. A key schema specifies the attributes that make up the primary key of a table, or the key attributes of an index.

    A KeySchemaElement represents exactly one attribute of the primary key. For example, a hash type primary key would be represented by one KeySchemaElement. A hash-and-range type primary key would require one KeySchemaElement for the hash attribute, and another KeySchemaElement for the range attribute.

    ", + "refs": { + "KeySchema$member": null + } + }, + "KeyType": { + "base": null, + "refs": { + "KeySchemaElement$KeyType": "

    The attribute data, consisting of the data type and the attribute value itself.

    " + } + }, + "KeysAndAttributes": { + "base": "

    Represents a set of primary keys and, for each key, the attributes to retrieve from the table.

    For each primary key, you must provide all of the key attributes. For example, with a hash type primary key, you only need to provide the hash attribute. For a hash-and-range type primary key, you must provide both the hash attribute and the range attribute.

    ", + "refs": { + "BatchGetRequestMap$value": null + } + }, + "LimitExceededException": { + "base": "

    The number of concurrent table requests (cumulative number of tables in the CREATING, DELETING or UPDATING state) exceeds the maximum allowed of 10.

    Also, for tables with secondary indexes, only one of those tables can be in the CREATING state at any point in time. Do not attempt to create more than one such table simultaneously.

    The total limit of tables in the ACTIVE state is 250.

    ", + "refs": { + } + }, + "ListAttributeValue": { + "base": null, + "refs": { + "AttributeValue$L": "

    A List of attribute values.

    " + } + }, + "ListTablesInput": { + "base": "

    Represents the input of a ListTables operation.

    ", + "refs": { + } + }, + "ListTablesInputLimit": { + "base": null, + "refs": { + "ListTablesInput$Limit": "

    A maximum number of table names to return. If this parameter is not specified, the limit is 100.

    " + } + }, + "ListTablesOutput": { + "base": "

    Represents the output of a ListTables operation.

    ", + "refs": { + } + }, + "LocalSecondaryIndex": { + "base": "

    Represents the properties of a local secondary index.

    ", + "refs": { + "LocalSecondaryIndexList$member": null + } + }, + "LocalSecondaryIndexDescription": { + "base": "

    Represents the properties of a local secondary index.

    ", + "refs": { + "LocalSecondaryIndexDescriptionList$member": null + } + }, + "LocalSecondaryIndexDescriptionList": { + "base": null, + "refs": { + "TableDescription$LocalSecondaryIndexes": "

    Represents one or more local secondary indexes on the table. Each index is scoped to a given hash key value. Tables with one or more local secondary indexes are subject to an item collection size limit, where the amount of data within a given item collection cannot exceed 10 GB. Each element is composed of:

    • IndexName - The name of the local secondary index.

    • KeySchema - Specifies the complete index key schema. The attribute names in the key schema must be between 1 and 255 characters (inclusive). The key schema must begin with the same hash key attribute as the table.

    • Projection - Specifies attributes that are copied (projected) from the table into the index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Each attribute specification is composed of:

      • ProjectionType - One of the following:

        • KEYS_ONLY - Only the index and primary keys are projected into the index.

        • INCLUDE - Only the specified table attributes are projected into the index. The list of projected attributes are in NonKeyAttributes.

        • ALL - All of the table attributes are projected into the index.

      • NonKeyAttributes - A list of one or more non-key attribute names that are projected into the secondary index. The total count of attributes provided in NonKeyAttributes, summed across all of the secondary indexes, must not exceed 20. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.

    • IndexSizeBytes - Represents the total size of the index, in bytes. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.

    • ItemCount - Represents the number of items in the index. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.

    If the table is in the DELETING state, no information about indexes will be returned.

    " + } + }, + "LocalSecondaryIndexList": { + "base": null, + "refs": { + "CreateTableInput$LocalSecondaryIndexes": "

    One or more local secondary indexes (the maximum is five) to be created on the table. Each index is scoped to a given hash key value. There is a 10 GB size limit per hash key; otherwise, the size of a local secondary index is unconstrained.

    Each local secondary index in the array includes the following:

    • IndexName - The name of the local secondary index. Must be unique only for this table.

    • KeySchema - Specifies the key schema for the local secondary index. The key schema must begin with the same hash key attribute as the table.

    • Projection - Specifies attributes that are copied (projected) from the table into the index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Each attribute specification is composed of:

      • ProjectionType - One of the following:

        • KEYS_ONLY - Only the index and primary keys are projected into the index.

        • INCLUDE - Only the specified table attributes are projected into the index. The list of projected attributes are in NonKeyAttributes.

        • ALL - All of the table attributes are projected into the index.

      • NonKeyAttributes - A list of one or more non-key attribute names that are projected into the secondary index. The total count of attributes provided in NonKeyAttributes, summed across all of the secondary indexes, must not exceed 20. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.

    " + } + }, + "Long": { + "base": null, + "refs": { + "GlobalSecondaryIndexDescription$IndexSizeBytes": "

    The total size of the specified index, in bytes. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.

    ", + "GlobalSecondaryIndexDescription$ItemCount": "

    The number of items in the specified index. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.

    ", + "LocalSecondaryIndexDescription$IndexSizeBytes": "

    The total size of the specified index, in bytes. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.

    ", + "LocalSecondaryIndexDescription$ItemCount": "

    The number of items in the specified index. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.

    ", + "TableDescription$TableSizeBytes": "

    The total size of the specified table, in bytes. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.

    ", + "TableDescription$ItemCount": "

    The number of items in the specified table. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.

    " + } + }, + "MapAttributeValue": { + "base": null, + "refs": { + "AttributeValue$M": "

    A Map of attribute values.

    " + } + }, + "NonKeyAttributeName": { + "base": null, + "refs": { + "NonKeyAttributeNameList$member": null + } + }, + "NonKeyAttributeNameList": { + "base": null, + "refs": { + "Projection$NonKeyAttributes": "

    Represents the non-key attribute names which will be projected into the index.

    For local secondary indexes, the total count of NonKeyAttributes summed across all of the local secondary indexes, must not exceed 20. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.

    " + } + }, + "NullAttributeValue": { + "base": null, + "refs": { + "AttributeValue$NULL": "

    A Null data type.

    " + } + }, + "NumberAttributeValue": { + "base": null, + "refs": { + "AttributeValue$N": "

    A Number data type.

    ", + "NumberSetAttributeValue$member": null + } + }, + "NumberSetAttributeValue": { + "base": null, + "refs": { + "AttributeValue$NS": "

    A Number Set data type.

    " + } + }, + "PositiveIntegerObject": { + "base": null, + "refs": { + "QueryInput$Limit": "

    The maximum number of items to evaluate (not necessarily the number of matching items). If DynamoDB processes the number of items up to the limit while processing the results, it stops the operation and returns the matching values up to that point, and a key in LastEvaluatedKey to apply in a subsequent operation, so that you can pick up where you left off. Also, if the processed data set size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation and returns the matching values up to the limit, and a key in LastEvaluatedKey to apply in a subsequent operation to continue the operation. For more information, see Query and Scan in the Amazon DynamoDB Developer Guide.

    ", + "ScanInput$Limit": "

    The maximum number of items to evaluate (not necessarily the number of matching items). If DynamoDB processes the number of items up to the limit while processing the results, it stops the operation and returns the matching values up to that point, and a key in LastEvaluatedKey to apply in a subsequent operation, so that you can pick up where you left off. Also, if the processed data set size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation and returns the matching values up to the limit, and a key in LastEvaluatedKey to apply in a subsequent operation to continue the operation. For more information, see Query and Scan in the Amazon DynamoDB Developer Guide.

    " + } + }, + "PositiveLongObject": { + "base": null, + "refs": { + "ProvisionedThroughput$ReadCapacityUnits": "

    The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide.

    ", + "ProvisionedThroughput$WriteCapacityUnits": "

    The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide.

    ", + "ProvisionedThroughputDescription$NumberOfDecreasesToday": "

    The number of provisioned throughput decreases for this table during this UTC calendar day. For current maximums on provisioned throughput decreases, see Limits in the Amazon DynamoDB Developer Guide.

    ", + "ProvisionedThroughputDescription$ReadCapacityUnits": "

    The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException. Eventually consistent reads require less effort than strongly consistent reads, so a setting of 50 ReadCapacityUnits per second provides 100 eventually consistent ReadCapacityUnits per second.

    ", + "ProvisionedThroughputDescription$WriteCapacityUnits": "

    The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException.

    " + } + }, + "Projection": { + "base": "

    Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.

    ", + "refs": { + "CreateGlobalSecondaryIndexAction$Projection": null, + "GlobalSecondaryIndex$Projection": null, + "GlobalSecondaryIndexDescription$Projection": null, + "LocalSecondaryIndex$Projection": null, + "LocalSecondaryIndexDescription$Projection": null + } + }, + "ProjectionExpression": { + "base": null, + "refs": { + "GetItemInput$ProjectionExpression": "

    A string that identifies one or more attributes to retrieve from the table. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the expression must be separated by commas.

    If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.

    For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

    ProjectionExpression replaces the legacy AttributesToGet parameter.

    ", + "KeysAndAttributes$ProjectionExpression": "

    A string that identifies one or more attributes to retrieve from the table. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the ProjectionExpression must be separated by commas.

    If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.

    For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

    ProjectionExpression replaces the legacy AttributesToGet parameter.

    ", + "QueryInput$ProjectionExpression": "

    A string that identifies one or more attributes to retrieve from the table. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the expression must be separated by commas.

    If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.

    For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

    ProjectionExpression replaces the legacy AttributesToGet parameter.

    ", + "ScanInput$ProjectionExpression": "

    A string that identifies one or more attributes to retrieve from the specified table or index. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the expression must be separated by commas.

    If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.

    For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

    ProjectionExpression replaces the legacy AttributesToGet parameter.

    " + } + }, + "ProjectionType": { + "base": null, + "refs": { + "Projection$ProjectionType": "

    The set of attributes that are projected into the index:

    • KEYS_ONLY - Only the index and primary keys are projected into the index.

    • INCLUDE - Only the specified table attributes are projected into the index. The list of projected attributes are in NonKeyAttributes.

    • ALL - All of the table attributes are projected into the index.

    " + } + }, + "ProvisionedThroughput": { + "base": "

    Represents the provisioned throughput settings for a specified table or index. The settings can be modified using the UpdateTable operation.

    For current minimum and maximum provisioned throughput values, see Limits in the Amazon DynamoDB Developer Guide.

    ", + "refs": { + "CreateGlobalSecondaryIndexAction$ProvisionedThroughput": null, + "CreateTableInput$ProvisionedThroughput": null, + "GlobalSecondaryIndex$ProvisionedThroughput": null, + "UpdateGlobalSecondaryIndexAction$ProvisionedThroughput": null, + "UpdateTableInput$ProvisionedThroughput": null + } + }, + "ProvisionedThroughputDescription": { + "base": "

    Represents the provisioned throughput settings for the table, consisting of read and write capacity units, along with data about increases and decreases.

    ", + "refs": { + "GlobalSecondaryIndexDescription$ProvisionedThroughput": null, + "TableDescription$ProvisionedThroughput": "

    The provisioned throughput settings for the table, consisting of read and write capacity units, along with data about increases and decreases.

    " + } + }, + "ProvisionedThroughputExceededException": { + "base": "

    Your request rate is too high. The AWS SDKs for DynamoDB automatically retry requests that receive this exception. Your request is eventually successful, unless your retry queue is too large to finish. Reduce the frequency of requests and use exponential backoff. For more information, go to Error Retries and Exponential Backoff in the Amazon DynamoDB Developer Guide.

    ", + "refs": { + } + }, + "PutItemInput": { + "base": "

    Represents the input of a PutItem operation.

    ", + "refs": { + } + }, + "PutItemInputAttributeMap": { + "base": null, + "refs": { + "PutItemInput$Item": "

    A map of attribute name/value pairs, one for each attribute. Only the primary key attributes are required; you can optionally provide other attribute name-value pairs for the item.

    You must provide all of the attributes for the primary key. For example, with a hash type primary key, you only need to provide the hash attribute. For a hash-and-range type primary key, you must provide both the hash attribute and the range attribute.

    If you specify any attributes that are part of an index key, then the data types for those attributes must match those of the schema in the table's attribute definition.

    For more information about primary keys, see Primary Key in the Amazon DynamoDB Developer Guide.

    Each element in the Item map is an AttributeValue object.

    ", + "PutRequest$Item": "

    A map of attribute name to attribute values, representing the primary key of an item to be processed by PutItem. All of the table's primary key attributes must be specified, and their data types must match those of the table's key schema. If any attributes are present in the item which are part of an index key schema for the table, their types must match the index key schema.

    " + } + }, + "PutItemOutput": { + "base": "

    Represents the output of a PutItem operation.

    ", + "refs": { + } + }, + "PutRequest": { + "base": "

    Represents a request to perform a PutItem operation on an item.

    ", + "refs": { + "WriteRequest$PutRequest": "

    A request to perform a PutItem operation.

    " + } + }, + "QueryInput": { + "base": "

    Represents the input of a Query operation.

    ", + "refs": { + } + }, + "QueryOutput": { + "base": "

    Represents the output of a Query operation.

    ", + "refs": { + } + }, + "ResourceInUseException": { + "base": "

    The operation conflicts with the resource's availability. For example, you attempted to recreate an existing table, or tried to delete a table currently in the CREATING state.

    ", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "

    The operation tried to access a nonexistent table or index. The resource might not be specified correctly, or its status might not be ACTIVE.

    ", + "refs": { + } + }, + "ReturnConsumedCapacity": { + "base": "

    Determines the level of detail about provisioned throughput consumption that is returned in the response:

    • INDEXES - The response includes the aggregate ConsumedCapacity for the operation, together with ConsumedCapacity for each table and secondary index that was accessed.

      Note that some operations, such as GetItem and BatchGetItem, do not access any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity information for table(s).

    • TOTAL - The response includes only the aggregate ConsumedCapacity for the operation.

    • NONE - No ConsumedCapacity details are included in the response.

    ", + "refs": { + "BatchGetItemInput$ReturnConsumedCapacity": null, + "BatchWriteItemInput$ReturnConsumedCapacity": null, + "DeleteItemInput$ReturnConsumedCapacity": null, + "GetItemInput$ReturnConsumedCapacity": null, + "PutItemInput$ReturnConsumedCapacity": null, + "QueryInput$ReturnConsumedCapacity": null, + "ScanInput$ReturnConsumedCapacity": null, + "UpdateItemInput$ReturnConsumedCapacity": null + } + }, + "ReturnItemCollectionMetrics": { + "base": null, + "refs": { + "BatchWriteItemInput$ReturnItemCollectionMetrics": "

    Determines whether item collection metrics are returned. If set to SIZE, the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned.

    ", + "DeleteItemInput$ReturnItemCollectionMetrics": "

    Determines whether item collection metrics are returned. If set to SIZE, the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned.

    ", + "PutItemInput$ReturnItemCollectionMetrics": "

    Determines whether item collection metrics are returned. If set to SIZE, the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned.

    ", + "UpdateItemInput$ReturnItemCollectionMetrics": "

    Determines whether item collection metrics are returned. If set to SIZE, the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned.

    " + } + }, + "ReturnValue": { + "base": null, + "refs": { + "DeleteItemInput$ReturnValues": "

    Use ReturnValues if you want to get the item attributes as they appeared before they were deleted. For DeleteItem, the valid values are:

    • NONE - If ReturnValues is not specified, or if its value is NONE, then nothing is returned. (This setting is the default for ReturnValues.)

    • ALL_OLD - The content of the old item is returned.

    ", + "PutItemInput$ReturnValues": "

    Use ReturnValues if you want to get the item attributes as they appeared before they were updated with the PutItem request. For PutItem, the valid values are:

    • NONE - If ReturnValues is not specified, or if its value is NONE, then nothing is returned. (This setting is the default for ReturnValues.)

    • ALL_OLD - If PutItem overwrote an attribute name-value pair, then the content of the old item is returned.

    Other \"Valid Values\" are not relevant to PutItem.

    ", + "UpdateItemInput$ReturnValues": "

    Use ReturnValues if you want to get the item attributes as they appeared either before or after they were updated. For UpdateItem, the valid values are:

    • NONE - If ReturnValues is not specified, or if its value is NONE, then nothing is returned. (This setting is the default for ReturnValues.)

    • ALL_OLD - If UpdateItem overwrote an attribute name-value pair, then the content of the old item is returned.

    • UPDATED_OLD - The old versions of only the updated attributes are returned.

    • ALL_NEW - All of the attributes of the new version of the item are returned.

    • UPDATED_NEW - The new versions of only the updated attributes are returned.

    " + } + }, + "ScalarAttributeType": { + "base": null, + "refs": { + "AttributeDefinition$AttributeType": "

    The data type for the attribute.

    " + } + }, + "ScanInput": { + "base": "

    Represents the input of a Scan operation.

    ", + "refs": { + } + }, + "ScanOutput": { + "base": "

    Represents the output of a Scan operation.

    ", + "refs": { + } + }, + "ScanSegment": { + "base": null, + "refs": { + "ScanInput$Segment": "

    For a parallel Scan request, Segment identifies an individual segment to be scanned by an application worker.

    Segment IDs are zero-based, so the first segment is always 0. For example, if you want to use four application threads to scan a table or an index, then the first thread specifies a Segment value of 0, the second thread specifies 1, and so on.

    The value of LastEvaluatedKey returned from a parallel Scan request must be used as ExclusiveStartKey with the same segment ID in a subsequent Scan operation.

    The value for Segment must be greater than or equal to 0, and less than the value provided for TotalSegments.

    If you provide Segment, you must also provide TotalSegments.

    " + } + }, + "ScanTotalSegments": { + "base": null, + "refs": { + "ScanInput$TotalSegments": "

    For a parallel Scan request, TotalSegments represents the total number of segments into which the Scan operation will be divided. The value of TotalSegments corresponds to the number of application workers that will perform the parallel scan. For example, if you want to use four application threads to scan a table or an index, specify a TotalSegments value of 4.

    The value for TotalSegments must be greater than or equal to 1, and less than or equal to 1000000. If you specify a TotalSegments value of 1, the Scan operation will be sequential rather than parallel.

    If you specify TotalSegments, you must also specify Segment.

    " + } + }, + "SecondaryIndexesCapacityMap": { + "base": null, + "refs": { + "ConsumedCapacity$LocalSecondaryIndexes": "

    The amount of throughput consumed on each local index affected by the operation.

    ", + "ConsumedCapacity$GlobalSecondaryIndexes": "

    The amount of throughput consumed on each global index affected by the operation.

    " + } + }, + "Select": { + "base": null, + "refs": { + "QueryInput$Select": "

    The attributes to be returned in the result. You can retrieve all item attributes, specific item attributes, the count of matching items, or in the case of an index, some or all of the attributes projected into the index.

    • ALL_ATTRIBUTES - Returns all of the item attributes from the specified table or index. If you query a local secondary index, then for each matching item in the index DynamoDB will fetch the entire item from the parent table. If the index is configured to project all item attributes, then all of the data can be obtained from the local secondary index, and no fetching is required.

    • ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves all attributes that have been projected into the index. If the index is configured to project all attributes, this return value is equivalent to specifying ALL_ATTRIBUTES.

    • COUNT - Returns the number of matching items, rather than the matching items themselves.

    • SPECIFIC_ATTRIBUTES - Returns only the attributes listed in AttributesToGet. This return value is equivalent to specifying AttributesToGet without specifying any value for Select.

      If you query a local secondary index and request only attributes that are projected into that index, the operation will read only the index and not the table. If any of the requested attributes are not projected into the local secondary index, DynamoDB will fetch each of these attributes from the parent table. This extra fetching incurs additional throughput cost and latency.

      If you query a global secondary index, you can only request attributes that are projected into the index. Global secondary index queries cannot fetch attributes from the parent table.

    If neither Select nor AttributesToGet are specified, DynamoDB defaults to ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when accessing an index. You cannot use both Select and AttributesToGet together in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES. (This usage is equivalent to specifying AttributesToGet without any value for Select.)

    If you use the ProjectionExpression parameter, then the value for Select can only be SPECIFIC_ATTRIBUTES. Any other value for Select will return an error.

    ", + "ScanInput$Select": "

    The attributes to be returned in the result. You can retrieve all item attributes, specific item attributes, or the count of matching items.

    • ALL_ATTRIBUTES - Returns all of the item attributes.

    • COUNT - Returns the number of matching items, rather than the matching items themselves.

    • SPECIFIC_ATTRIBUTES - Returns only the attributes listed in AttributesToGet. This return value is equivalent to specifying AttributesToGet without specifying any value for Select.

    If neither Select nor AttributesToGet are specified, DynamoDB defaults to ALL_ATTRIBUTES. You cannot use both AttributesToGet and Select together in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES. (This usage is equivalent to specifying AttributesToGet without any value for Select.)

    " + } + }, + "StreamArn": { + "base": null, + "refs": { + "TableDescription$LatestStreamArn": "

    The Amazon Resource Name (ARN) that uniquely identifies the latest stream for this table.

    " + } + }, + "StreamEnabled": { + "base": null, + "refs": { + "StreamSpecification$StreamEnabled": "

    Indicates whether DynamoDB Streams is enabled (true) or disabled (false) on the table.

    " + } + }, + "StreamSpecification": { + "base": "

    Represents the DynamoDB Streams configuration for a table in DynamoDB.

    ", + "refs": { + "CreateTableInput$StreamSpecification": "

    The settings for DynamoDB Streams on the table. These settings consist of:

    • StreamEnabled - Indicates whether Streams is to be enabled (true) or disabled (false).

    • StreamViewType - When an item in the table is modified, StreamViewType determines what information is written to the table's stream. Valid values for StreamViewType are:

      • KEYS_ONLY - Only the key attributes of the modified item are written to the stream.

      • NEW_IMAGE - The entire item, as it appears after it was modified, is written to the stream.

      • OLD_IMAGE - The entire item, as it appeared before it was modified, is written to the stream.

      • NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are written to the stream.

    ", + "TableDescription$StreamSpecification": "

    The current DynamoDB Streams configuration for the table.

    ", + "UpdateTableInput$StreamSpecification": "

    Represents the DynamoDB Streams configuration for the table.

    You will receive a ResourceInUseException if you attempt to enable a stream on a table that already has a stream, or if you attempt to disable a stream on a table which does not have a stream.

    " + } + }, + "StreamViewType": { + "base": null, + "refs": { + "StreamSpecification$StreamViewType": "

    The DynamoDB Streams settings for the table. These settings consist of:

    • StreamEnabled - Indicates whether DynamoDB Streams is enabled (true) or disabled (false) on the table.

    • StreamViewType - When an item in the table is modified, StreamViewType determines what information is written to the stream for this table. Valid values for StreamViewType are:

      • KEYS_ONLY - Only the key attributes of the modified item are written to the stream.

      • NEW_IMAGE - The entire item, as it appears after it was modified, is written to the stream.

      • OLD_IMAGE - The entire item, as it appeared before it was modified, is written to the stream.

      • NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are written to the stream.

    " + } + }, + "String": { + "base": null, + "refs": { + "GlobalSecondaryIndexDescription$IndexArn": "

    The Amazon Resource Name (ARN) that uniquely identifies the index.

    ", + "LocalSecondaryIndexDescription$IndexArn": "

    The Amazon Resource Name (ARN) that uniquely identifies the index.

    ", + "TableDescription$TableArn": "

    The Amazon Resource Name (ARN) that uniquely identifies the table.

    ", + "TableDescription$LatestStreamLabel": "

    A timestamp, in ISO 8601 format, for this stream.

    Note that LatestStreamLabel is not a unique identifier for the stream, because it is possible that a stream from another table might have the same timestamp. However, the combination of the following three elements is guaranteed to be unique:

    • the AWS customer ID.

    • the table name.

    • the StreamLabel.

    " + } + }, + "StringAttributeValue": { + "base": null, + "refs": { + "AttributeValue$S": "

    A String data type.

    ", + "StringSetAttributeValue$member": null + } + }, + "StringSetAttributeValue": { + "base": null, + "refs": { + "AttributeValue$SS": "

    A String Set data type.

    " + } + }, + "TableDescription": { + "base": "

    Represents the properties of a table.

    ", + "refs": { + "CreateTableOutput$TableDescription": null, + "DeleteTableOutput$TableDescription": null, + "DescribeTableOutput$Table": null, + "UpdateTableOutput$TableDescription": null + } + }, + "TableName": { + "base": null, + "refs": { + "BatchGetRequestMap$key": null, + "BatchGetResponseMap$key": null, + "BatchWriteItemRequestMap$key": null, + "ConsumedCapacity$TableName": "

    The name of the table that was affected by the operation.

    ", + "CreateTableInput$TableName": "

    The name of the table to create.

    ", + "DeleteItemInput$TableName": "

    The name of the table from which to delete the item.

    ", + "DeleteTableInput$TableName": "

    The name of the table to delete.

    ", + "DescribeTableInput$TableName": "

    The name of the table to describe.

    ", + "GetItemInput$TableName": "

    The name of the table containing the requested item.

    ", + "ItemCollectionMetricsPerTable$key": null, + "ListTablesInput$ExclusiveStartTableName": "

    The first table name that this operation will evaluate. Use the value that was returned for LastEvaluatedTableName in a previous operation, so that you can obtain the next page of results.

    ", + "ListTablesOutput$LastEvaluatedTableName": "

    The name of the last table in the current page of results. Use this value as the ExclusiveStartTableName in a new request to obtain the next page of results, until all the table names are returned.

    If you do not receive a LastEvaluatedTableName value in the response, this means that there are no more table names to be retrieved.

    ", + "PutItemInput$TableName": "

    The name of the table to contain the item.

    ", + "QueryInput$TableName": "

    The name of the table containing the requested items.

    ", + "ScanInput$TableName": "

    The name of the table containing the requested items; or, if you provide IndexName, the name of the table to which that index belongs.

    ", + "TableDescription$TableName": "

    The name of the table.

    ", + "TableNameList$member": null, + "UpdateItemInput$TableName": "

    The name of the table containing the item to update.

    ", + "UpdateTableInput$TableName": "

    The name of the table to be updated.

    " + } + }, + "TableNameList": { + "base": null, + "refs": { + "ListTablesOutput$TableNames": "

    The names of the tables associated with the current account at the current endpoint. The maximum size of this array is 100.

    If LastEvaluatedTableName also appears in the output, you can use this value as the ExclusiveStartTableName parameter in a subsequent ListTables request and obtain the next page of results.

    " + } + }, + "TableStatus": { + "base": null, + "refs": { + "TableDescription$TableStatus": "

    The current state of the table:

    • CREATING - The table is being created.

    • UPDATING - The table is being updated.

    • DELETING - The table is being deleted.

    • ACTIVE - The table is ready for use.

    " + } + }, + "UpdateExpression": { + "base": null, + "refs": { + "UpdateItemInput$UpdateExpression": "

    An expression that defines one or more attributes to be updated, the action to be performed on them, and new value(s) for them.

    The following action values are available for UpdateExpression.

    • SET - Adds one or more attributes and values to an item. If any of these attribute already exist, they are replaced by the new values. You can also use SET to add or subtract from an attribute that is of type Number. For example: SET myNum = myNum + :val

      SET supports the following functions:

      • if_not_exists (path, operand) - if the item does not contain an attribute at the specified path, then if_not_exists evaluates to operand; otherwise, it evaluates to path. You can use this function to avoid overwriting an attribute that may already be present in the item.

      • list_append (operand, operand) - evaluates to a list with a new element added to it. You can append the new element to the start or the end of the list by reversing the order of the operands.

      These function names are case-sensitive.

    • REMOVE - Removes one or more attributes from an item.

    • ADD - Adds the specified value to the item, if the attribute does not already exist. If the attribute does exist, then the behavior of ADD depends on the data type of the attribute:

      • If the existing attribute is a number, and if Value is also a number, then Value is mathematically added to the existing attribute. If Value is a negative number, then it is subtracted from the existing attribute.

        If you use ADD to increment or decrement a number value for an item that doesn't exist before the update, DynamoDB uses 0 as the initial value.

        Similarly, if you use ADD for an existing item to increment or decrement an attribute value that doesn't exist before the update, DynamoDB uses 0 as the initial value. For example, suppose that the item you want to update doesn't have an attribute named itemcount, but you decide to ADD the number 3 to this attribute anyway. DynamoDB will create the itemcount attribute, set its initial value to 0, and finally add 3 to it. The result will be a new itemcount attribute in the item, with a value of 3.

      • If the existing data type is a set and if Value is also a set, then Value is added to the existing set. For example, if the attribute value is the set [1,2], and the ADD action specified [3], then the final attribute value is [1,2,3]. An error occurs if an ADD action is specified for a set attribute and the attribute type specified does not match the existing set type.

        Both sets must have the same primitive data type. For example, if the existing data type is a set of strings, the Value must also be a set of strings.

      The ADD action only supports Number and set data types. In addition, ADD can only be used on top-level attributes, not nested attributes.

    • DELETE - Deletes an element from a set.

      If a set of values is specified, then those values are subtracted from the old set. For example, if the attribute value was the set [a,b,c] and the DELETE action specifies [a,c], then the final attribute value is [b]. Specifying an empty set is an error.

      The DELETE action only supports set data types. In addition, DELETE can only be used on top-level attributes, not nested attributes.

    You can have many actions in a single expression, such as the following: SET a=:value1, b=:value2 DELETE :value3, :value4, :value5

    For more information on update expressions, see Modifying Items and Attributes in the Amazon DynamoDB Developer Guide.

    UpdateExpression replaces the legacy AttributeUpdates parameter.

    " + } + }, + "UpdateGlobalSecondaryIndexAction": { + "base": "

    Represents the new provisioned throughput settings to be applied to a global secondary index.

    ", + "refs": { + "GlobalSecondaryIndexUpdate$Update": "

    The name of an existing global secondary index, along with new provisioned throughput settings to be applied to that index.

    " + } + }, + "UpdateItemInput": { + "base": "

    Represents the input of an UpdateItem operation.

    ", + "refs": { + } + }, + "UpdateItemOutput": { + "base": "

    Represents the output of an UpdateItem operation.

    ", + "refs": { + } + }, + "UpdateTableInput": { + "base": "

    Represents the input of an UpdateTable operation.

    ", + "refs": { + } + }, + "UpdateTableOutput": { + "base": "

    Represents the output of an UpdateTable operation.

    ", + "refs": { + } + }, + "WriteRequest": { + "base": "

    Represents an operation to perform - either DeleteItem or PutItem. You can only request one of these operations, not both, in a single WriteRequest. If you do need to perform both of these operations, you will need to provide two separate WriteRequest objects.

    ", + "refs": { + "WriteRequests$member": null + } + }, + "WriteRequests": { + "base": null, + "refs": { + "BatchWriteItemRequestMap$value": null + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,26 @@ +{ + "pagination": { + "BatchGetItem": { + "input_token": "RequestItems", + "output_token": "UnprocessedKeys" + }, + "ListTables": { + "input_token": "ExclusiveStartTableName", + "output_token": "LastEvaluatedTableName", + "limit_key": "Limit", + "result_key": "TableNames" + }, + "Query": { + "input_token": "ExclusiveStartKey", + "output_token": "LastEvaluatedKey", + "limit_key": "Limit", + "result_key": "Items" + }, + "Scan": { + "input_token": "ExclusiveStartKey", + "output_token": "LastEvaluatedKey", + "limit_key": "Limit", + "result_key": "Items" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/waiters-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/waiters-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/waiters-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/dynamodb/2012-08-10/waiters-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,35 @@ +{ + "version": 2, + "waiters": { + "TableExists": { + "delay": 20, + "operation": "DescribeTable", + "maxAttempts": 25, + "acceptors": [ + { + "expected": "ACTIVE", + "matcher": "path", + "state": "success", + "argument": "Table.TableStatus" + }, + { + "expected": "ResourceNotFoundException", + "matcher": "error", + "state": "retry" + } + ] + }, + "TableNotExists": { + "delay": 20, + "operation": "DescribeTable", + "maxAttempts": 25, + "acceptors": [ + { + "expected": "ResourceNotFoundException", + "matcher": "error", + "state": "success" + } + ] + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,12049 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-04-15", + "endpointPrefix":"ec2", + "serviceAbbreviation":"Amazon EC2", + "serviceFullName":"Amazon Elastic Compute Cloud", + "signatureVersion":"v4", + "xmlNamespace":"http://ec2.amazonaws.com/doc/2015-04-15", + "protocol":"ec2" + }, + "operations":{ + "AcceptVpcPeeringConnection":{ + "name":"AcceptVpcPeeringConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AcceptVpcPeeringConnectionRequest"}, + "output":{"shape":"AcceptVpcPeeringConnectionResult"} + }, + "AllocateAddress":{ + "name":"AllocateAddress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AllocateAddressRequest"}, + "output":{"shape":"AllocateAddressResult"} + }, + "AssignPrivateIpAddresses":{ + "name":"AssignPrivateIpAddresses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssignPrivateIpAddressesRequest"} + }, + "AssociateAddress":{ + "name":"AssociateAddress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateAddressRequest"}, + "output":{"shape":"AssociateAddressResult"} + }, + "AssociateDhcpOptions":{ + "name":"AssociateDhcpOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateDhcpOptionsRequest"} + }, + "AssociateRouteTable":{ + "name":"AssociateRouteTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateRouteTableRequest"}, + "output":{"shape":"AssociateRouteTableResult"} + }, + "AttachClassicLinkVpc":{ + "name":"AttachClassicLinkVpc", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachClassicLinkVpcRequest"}, + "output":{"shape":"AttachClassicLinkVpcResult"} + }, + "AttachInternetGateway":{ + "name":"AttachInternetGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachInternetGatewayRequest"} + }, + "AttachNetworkInterface":{ + "name":"AttachNetworkInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachNetworkInterfaceRequest"}, + "output":{"shape":"AttachNetworkInterfaceResult"} + }, + "AttachVolume":{ + "name":"AttachVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachVolumeRequest"}, + "output":{ + "shape":"VolumeAttachment", + "locationName":"attachment" + } + }, + "AttachVpnGateway":{ + "name":"AttachVpnGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachVpnGatewayRequest"}, + "output":{"shape":"AttachVpnGatewayResult"} + }, + "AuthorizeSecurityGroupEgress":{ + "name":"AuthorizeSecurityGroupEgress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AuthorizeSecurityGroupEgressRequest"} + }, + "AuthorizeSecurityGroupIngress":{ + "name":"AuthorizeSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AuthorizeSecurityGroupIngressRequest"} + }, + "BundleInstance":{ + "name":"BundleInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BundleInstanceRequest"}, + "output":{"shape":"BundleInstanceResult"} + }, + "CancelBundleTask":{ + "name":"CancelBundleTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelBundleTaskRequest"}, + "output":{"shape":"CancelBundleTaskResult"} + }, + "CancelConversionTask":{ + "name":"CancelConversionTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelConversionRequest"} + }, + "CancelExportTask":{ + "name":"CancelExportTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelExportTaskRequest"} + }, + "CancelImportTask":{ + "name":"CancelImportTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelImportTaskRequest"}, + "output":{"shape":"CancelImportTaskResult"} + }, + "CancelReservedInstancesListing":{ + "name":"CancelReservedInstancesListing", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelReservedInstancesListingRequest"}, + "output":{"shape":"CancelReservedInstancesListingResult"} + }, + "CancelSpotFleetRequests":{ + "name":"CancelSpotFleetRequests", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelSpotFleetRequestsRequest"}, + "output":{"shape":"CancelSpotFleetRequestsResponse"} + }, + "CancelSpotInstanceRequests":{ + "name":"CancelSpotInstanceRequests", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelSpotInstanceRequestsRequest"}, + "output":{"shape":"CancelSpotInstanceRequestsResult"} + }, + "ConfirmProductInstance":{ + "name":"ConfirmProductInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ConfirmProductInstanceRequest"}, + "output":{"shape":"ConfirmProductInstanceResult"} + }, + "CopyImage":{ + "name":"CopyImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyImageRequest"}, + "output":{"shape":"CopyImageResult"} + }, + "CopySnapshot":{ + "name":"CopySnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopySnapshotRequest"}, + "output":{"shape":"CopySnapshotResult"} + }, + "CreateCustomerGateway":{ + "name":"CreateCustomerGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCustomerGatewayRequest"}, + "output":{"shape":"CreateCustomerGatewayResult"} + }, + "CreateDhcpOptions":{ + "name":"CreateDhcpOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDhcpOptionsRequest"}, + "output":{"shape":"CreateDhcpOptionsResult"} + }, + "CreateFlowLogs":{ + "name":"CreateFlowLogs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateFlowLogsRequest"}, + "output":{"shape":"CreateFlowLogsResult"} + }, + "CreateImage":{ + "name":"CreateImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateImageRequest"}, + "output":{"shape":"CreateImageResult"} + }, + "CreateInstanceExportTask":{ + "name":"CreateInstanceExportTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateInstanceExportTaskRequest"}, + "output":{"shape":"CreateInstanceExportTaskResult"} + }, + "CreateInternetGateway":{ + "name":"CreateInternetGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateInternetGatewayRequest"}, + "output":{"shape":"CreateInternetGatewayResult"} + }, + "CreateKeyPair":{ + "name":"CreateKeyPair", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateKeyPairRequest"}, + "output":{ + "shape":"KeyPair", + "locationName":"keyPair" + } + }, + "CreateNetworkAcl":{ + "name":"CreateNetworkAcl", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateNetworkAclRequest"}, + "output":{"shape":"CreateNetworkAclResult"} + }, + "CreateNetworkAclEntry":{ + "name":"CreateNetworkAclEntry", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateNetworkAclEntryRequest"} + }, + "CreateNetworkInterface":{ + "name":"CreateNetworkInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateNetworkInterfaceRequest"}, + "output":{"shape":"CreateNetworkInterfaceResult"} + }, + "CreatePlacementGroup":{ + "name":"CreatePlacementGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePlacementGroupRequest"} + }, + "CreateReservedInstancesListing":{ + "name":"CreateReservedInstancesListing", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateReservedInstancesListingRequest"}, + "output":{"shape":"CreateReservedInstancesListingResult"} + }, + "CreateRoute":{ + "name":"CreateRoute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRouteRequest"}, + "output":{"shape":"CreateRouteResult"} + }, + "CreateRouteTable":{ + "name":"CreateRouteTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRouteTableRequest"}, + "output":{"shape":"CreateRouteTableResult"} + }, + "CreateSecurityGroup":{ + "name":"CreateSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSecurityGroupRequest"}, + "output":{"shape":"CreateSecurityGroupResult"} + }, + "CreateSnapshot":{ + "name":"CreateSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSnapshotRequest"}, + "output":{ + "shape":"Snapshot", + "locationName":"snapshot" + } + }, + "CreateSpotDatafeedSubscription":{ + "name":"CreateSpotDatafeedSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSpotDatafeedSubscriptionRequest"}, + "output":{"shape":"CreateSpotDatafeedSubscriptionResult"} + }, + "CreateSubnet":{ + "name":"CreateSubnet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSubnetRequest"}, + "output":{"shape":"CreateSubnetResult"} + }, + "CreateTags":{ + "name":"CreateTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTagsRequest"} + }, + "CreateVolume":{ + "name":"CreateVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVolumeRequest"}, + "output":{ + "shape":"Volume", + "locationName":"volume" + } + }, + "CreateVpc":{ + "name":"CreateVpc", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpcRequest"}, + "output":{"shape":"CreateVpcResult"} + }, + "CreateVpcEndpoint":{ + "name":"CreateVpcEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpcEndpointRequest"}, + "output":{"shape":"CreateVpcEndpointResult"} + }, + "CreateVpcPeeringConnection":{ + "name":"CreateVpcPeeringConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpcPeeringConnectionRequest"}, + "output":{"shape":"CreateVpcPeeringConnectionResult"} + }, + "CreateVpnConnection":{ + "name":"CreateVpnConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpnConnectionRequest"}, + "output":{"shape":"CreateVpnConnectionResult"} + }, + "CreateVpnConnectionRoute":{ + "name":"CreateVpnConnectionRoute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpnConnectionRouteRequest"} + }, + "CreateVpnGateway":{ + "name":"CreateVpnGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpnGatewayRequest"}, + "output":{"shape":"CreateVpnGatewayResult"} + }, + "DeleteCustomerGateway":{ + "name":"DeleteCustomerGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCustomerGatewayRequest"} + }, + "DeleteDhcpOptions":{ + "name":"DeleteDhcpOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDhcpOptionsRequest"} + }, + "DeleteFlowLogs":{ + "name":"DeleteFlowLogs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteFlowLogsRequest"}, + "output":{"shape":"DeleteFlowLogsResult"} + }, + "DeleteInternetGateway":{ + "name":"DeleteInternetGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteInternetGatewayRequest"} + }, + "DeleteKeyPair":{ + "name":"DeleteKeyPair", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteKeyPairRequest"} + }, + "DeleteNetworkAcl":{ + "name":"DeleteNetworkAcl", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteNetworkAclRequest"} + }, + "DeleteNetworkAclEntry":{ + "name":"DeleteNetworkAclEntry", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteNetworkAclEntryRequest"} + }, + "DeleteNetworkInterface":{ + "name":"DeleteNetworkInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteNetworkInterfaceRequest"} + }, + "DeletePlacementGroup":{ + "name":"DeletePlacementGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePlacementGroupRequest"} + }, + "DeleteRoute":{ + "name":"DeleteRoute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRouteRequest"} + }, + "DeleteRouteTable":{ + "name":"DeleteRouteTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRouteTableRequest"} + }, + "DeleteSecurityGroup":{ + "name":"DeleteSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSecurityGroupRequest"} + }, + "DeleteSnapshot":{ + "name":"DeleteSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSnapshotRequest"} + }, + "DeleteSpotDatafeedSubscription":{ + "name":"DeleteSpotDatafeedSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSpotDatafeedSubscriptionRequest"} + }, + "DeleteSubnet":{ + "name":"DeleteSubnet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSubnetRequest"} + }, + "DeleteTags":{ + "name":"DeleteTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTagsRequest"} + }, + "DeleteVolume":{ + "name":"DeleteVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVolumeRequest"} + }, + "DeleteVpc":{ + "name":"DeleteVpc", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpcRequest"} + }, + "DeleteVpcEndpoints":{ + "name":"DeleteVpcEndpoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpcEndpointsRequest"}, + "output":{"shape":"DeleteVpcEndpointsResult"} + }, + "DeleteVpcPeeringConnection":{ + "name":"DeleteVpcPeeringConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpcPeeringConnectionRequest"}, + "output":{"shape":"DeleteVpcPeeringConnectionResult"} + }, + "DeleteVpnConnection":{ + "name":"DeleteVpnConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpnConnectionRequest"} + }, + "DeleteVpnConnectionRoute":{ + "name":"DeleteVpnConnectionRoute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpnConnectionRouteRequest"} + }, + "DeleteVpnGateway":{ + "name":"DeleteVpnGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpnGatewayRequest"} + }, + "DeregisterImage":{ + "name":"DeregisterImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterImageRequest"} + }, + "DescribeAccountAttributes":{ + "name":"DescribeAccountAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAccountAttributesRequest"}, + "output":{"shape":"DescribeAccountAttributesResult"} + }, + "DescribeAddresses":{ + "name":"DescribeAddresses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAddressesRequest"}, + "output":{"shape":"DescribeAddressesResult"} + }, + "DescribeAvailabilityZones":{ + "name":"DescribeAvailabilityZones", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAvailabilityZonesRequest"}, + "output":{"shape":"DescribeAvailabilityZonesResult"} + }, + "DescribeBundleTasks":{ + "name":"DescribeBundleTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeBundleTasksRequest"}, + "output":{"shape":"DescribeBundleTasksResult"} + }, + "DescribeClassicLinkInstances":{ + "name":"DescribeClassicLinkInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeClassicLinkInstancesRequest"}, + "output":{"shape":"DescribeClassicLinkInstancesResult"} + }, + "DescribeConversionTasks":{ + "name":"DescribeConversionTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConversionTasksRequest"}, + "output":{"shape":"DescribeConversionTasksResult"} + }, + "DescribeCustomerGateways":{ + "name":"DescribeCustomerGateways", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCustomerGatewaysRequest"}, + "output":{"shape":"DescribeCustomerGatewaysResult"} + }, + "DescribeDhcpOptions":{ + "name":"DescribeDhcpOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDhcpOptionsRequest"}, + "output":{"shape":"DescribeDhcpOptionsResult"} + }, + "DescribeExportTasks":{ + "name":"DescribeExportTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeExportTasksRequest"}, + "output":{"shape":"DescribeExportTasksResult"} + }, + "DescribeFlowLogs":{ + "name":"DescribeFlowLogs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFlowLogsRequest"}, + "output":{"shape":"DescribeFlowLogsResult"} + }, + "DescribeImageAttribute":{ + "name":"DescribeImageAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImageAttributeRequest"}, + "output":{ + "shape":"ImageAttribute", + "locationName":"imageAttribute" + } + }, + "DescribeImages":{ + "name":"DescribeImages", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImagesRequest"}, + "output":{"shape":"DescribeImagesResult"} + }, + "DescribeImportImageTasks":{ + "name":"DescribeImportImageTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImportImageTasksRequest"}, + "output":{"shape":"DescribeImportImageTasksResult"} + }, + "DescribeImportSnapshotTasks":{ + "name":"DescribeImportSnapshotTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImportSnapshotTasksRequest"}, + "output":{"shape":"DescribeImportSnapshotTasksResult"} + }, + "DescribeInstanceAttribute":{ + "name":"DescribeInstanceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstanceAttributeRequest"}, + "output":{"shape":"InstanceAttribute"} + }, + "DescribeInstanceStatus":{ + "name":"DescribeInstanceStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstanceStatusRequest"}, + "output":{"shape":"DescribeInstanceStatusResult"} + }, + "DescribeInstances":{ + "name":"DescribeInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstancesRequest"}, + "output":{"shape":"DescribeInstancesResult"} + }, + "DescribeInternetGateways":{ + "name":"DescribeInternetGateways", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInternetGatewaysRequest"}, + "output":{"shape":"DescribeInternetGatewaysResult"} + }, + "DescribeKeyPairs":{ + "name":"DescribeKeyPairs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeKeyPairsRequest"}, + "output":{"shape":"DescribeKeyPairsResult"} + }, + "DescribeMovingAddresses":{ + "name":"DescribeMovingAddresses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMovingAddressesRequest"}, + "output":{"shape":"DescribeMovingAddressesResult"} + }, + "DescribeNetworkAcls":{ + "name":"DescribeNetworkAcls", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeNetworkAclsRequest"}, + "output":{"shape":"DescribeNetworkAclsResult"} + }, + "DescribeNetworkInterfaceAttribute":{ + "name":"DescribeNetworkInterfaceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeNetworkInterfaceAttributeRequest"}, + "output":{"shape":"DescribeNetworkInterfaceAttributeResult"} + }, + "DescribeNetworkInterfaces":{ + "name":"DescribeNetworkInterfaces", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeNetworkInterfacesRequest"}, + "output":{"shape":"DescribeNetworkInterfacesResult"} + }, + "DescribePlacementGroups":{ + "name":"DescribePlacementGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePlacementGroupsRequest"}, + "output":{"shape":"DescribePlacementGroupsResult"} + }, + "DescribePrefixLists":{ + "name":"DescribePrefixLists", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePrefixListsRequest"}, + "output":{"shape":"DescribePrefixListsResult"} + }, + "DescribeRegions":{ + "name":"DescribeRegions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRegionsRequest"}, + "output":{"shape":"DescribeRegionsResult"} + }, + "DescribeReservedInstances":{ + "name":"DescribeReservedInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedInstancesRequest"}, + "output":{"shape":"DescribeReservedInstancesResult"} + }, + "DescribeReservedInstancesListings":{ + "name":"DescribeReservedInstancesListings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedInstancesListingsRequest"}, + "output":{"shape":"DescribeReservedInstancesListingsResult"} + }, + "DescribeReservedInstancesModifications":{ + "name":"DescribeReservedInstancesModifications", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedInstancesModificationsRequest"}, + "output":{"shape":"DescribeReservedInstancesModificationsResult"} + }, + "DescribeReservedInstancesOfferings":{ + "name":"DescribeReservedInstancesOfferings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedInstancesOfferingsRequest"}, + "output":{"shape":"DescribeReservedInstancesOfferingsResult"} + }, + "DescribeRouteTables":{ + "name":"DescribeRouteTables", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRouteTablesRequest"}, + "output":{"shape":"DescribeRouteTablesResult"} + }, + "DescribeSecurityGroups":{ + "name":"DescribeSecurityGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSecurityGroupsRequest"}, + "output":{"shape":"DescribeSecurityGroupsResult"} + }, + "DescribeSnapshotAttribute":{ + "name":"DescribeSnapshotAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSnapshotAttributeRequest"}, + "output":{"shape":"DescribeSnapshotAttributeResult"} + }, + "DescribeSnapshots":{ + "name":"DescribeSnapshots", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSnapshotsRequest"}, + "output":{"shape":"DescribeSnapshotsResult"} + }, + "DescribeSpotDatafeedSubscription":{ + "name":"DescribeSpotDatafeedSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotDatafeedSubscriptionRequest"}, + "output":{"shape":"DescribeSpotDatafeedSubscriptionResult"} + }, + "DescribeSpotFleetInstances":{ + "name":"DescribeSpotFleetInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotFleetInstancesRequest"}, + "output":{"shape":"DescribeSpotFleetInstancesResponse"} + }, + "DescribeSpotFleetRequestHistory":{ + "name":"DescribeSpotFleetRequestHistory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotFleetRequestHistoryRequest"}, + "output":{"shape":"DescribeSpotFleetRequestHistoryResponse"} + }, + "DescribeSpotFleetRequests":{ + "name":"DescribeSpotFleetRequests", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotFleetRequestsRequest"}, + "output":{"shape":"DescribeSpotFleetRequestsResponse"} + }, + "DescribeSpotInstanceRequests":{ + "name":"DescribeSpotInstanceRequests", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotInstanceRequestsRequest"}, + "output":{"shape":"DescribeSpotInstanceRequestsResult"} + }, + "DescribeSpotPriceHistory":{ + "name":"DescribeSpotPriceHistory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotPriceHistoryRequest"}, + "output":{"shape":"DescribeSpotPriceHistoryResult"} + }, + "DescribeSubnets":{ + "name":"DescribeSubnets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSubnetsRequest"}, + "output":{"shape":"DescribeSubnetsResult"} + }, + "DescribeTags":{ + "name":"DescribeTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTagsRequest"}, + "output":{"shape":"DescribeTagsResult"} + }, + "DescribeVolumeAttribute":{ + "name":"DescribeVolumeAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVolumeAttributeRequest"}, + "output":{"shape":"DescribeVolumeAttributeResult"} + }, + "DescribeVolumeStatus":{ + "name":"DescribeVolumeStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVolumeStatusRequest"}, + "output":{"shape":"DescribeVolumeStatusResult"} + }, + "DescribeVolumes":{ + "name":"DescribeVolumes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVolumesRequest"}, + "output":{"shape":"DescribeVolumesResult"} + }, + "DescribeVpcAttribute":{ + "name":"DescribeVpcAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcAttributeRequest"}, + "output":{"shape":"DescribeVpcAttributeResult"} + }, + "DescribeVpcClassicLink":{ + "name":"DescribeVpcClassicLink", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcClassicLinkRequest"}, + "output":{"shape":"DescribeVpcClassicLinkResult"} + }, + "DescribeVpcEndpointServices":{ + "name":"DescribeVpcEndpointServices", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcEndpointServicesRequest"}, + "output":{"shape":"DescribeVpcEndpointServicesResult"} + }, + "DescribeVpcEndpoints":{ + "name":"DescribeVpcEndpoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcEndpointsRequest"}, + "output":{"shape":"DescribeVpcEndpointsResult"} + }, + "DescribeVpcPeeringConnections":{ + "name":"DescribeVpcPeeringConnections", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcPeeringConnectionsRequest"}, + "output":{"shape":"DescribeVpcPeeringConnectionsResult"} + }, + "DescribeVpcs":{ + "name":"DescribeVpcs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcsRequest"}, + "output":{"shape":"DescribeVpcsResult"} + }, + "DescribeVpnConnections":{ + "name":"DescribeVpnConnections", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpnConnectionsRequest"}, + "output":{"shape":"DescribeVpnConnectionsResult"} + }, + "DescribeVpnGateways":{ + "name":"DescribeVpnGateways", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpnGatewaysRequest"}, + "output":{"shape":"DescribeVpnGatewaysResult"} + }, + "DetachClassicLinkVpc":{ + "name":"DetachClassicLinkVpc", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachClassicLinkVpcRequest"}, + "output":{"shape":"DetachClassicLinkVpcResult"} + }, + "DetachInternetGateway":{ + "name":"DetachInternetGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachInternetGatewayRequest"} + }, + "DetachNetworkInterface":{ + "name":"DetachNetworkInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachNetworkInterfaceRequest"} + }, + "DetachVolume":{ + "name":"DetachVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachVolumeRequest"}, + "output":{ + "shape":"VolumeAttachment", + "locationName":"attachment" + } + }, + "DetachVpnGateway":{ + "name":"DetachVpnGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachVpnGatewayRequest"} + }, + "DisableVgwRoutePropagation":{ + "name":"DisableVgwRoutePropagation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableVgwRoutePropagationRequest"} + }, + "DisableVpcClassicLink":{ + "name":"DisableVpcClassicLink", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableVpcClassicLinkRequest"}, + "output":{"shape":"DisableVpcClassicLinkResult"} + }, + "DisassociateAddress":{ + "name":"DisassociateAddress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateAddressRequest"} + }, + "DisassociateRouteTable":{ + "name":"DisassociateRouteTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateRouteTableRequest"} + }, + "EnableVgwRoutePropagation":{ + "name":"EnableVgwRoutePropagation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableVgwRoutePropagationRequest"} + }, + "EnableVolumeIO":{ + "name":"EnableVolumeIO", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableVolumeIORequest"} + }, + "EnableVpcClassicLink":{ + "name":"EnableVpcClassicLink", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableVpcClassicLinkRequest"}, + "output":{"shape":"EnableVpcClassicLinkResult"} + }, + "GetConsoleOutput":{ + "name":"GetConsoleOutput", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetConsoleOutputRequest"}, + "output":{"shape":"GetConsoleOutputResult"} + }, + "GetPasswordData":{ + "name":"GetPasswordData", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPasswordDataRequest"}, + "output":{"shape":"GetPasswordDataResult"} + }, + "ImportImage":{ + "name":"ImportImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportImageRequest"}, + "output":{"shape":"ImportImageResult"} + }, + "ImportInstance":{ + "name":"ImportInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportInstanceRequest"}, + "output":{"shape":"ImportInstanceResult"} + }, + "ImportKeyPair":{ + "name":"ImportKeyPair", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportKeyPairRequest"}, + "output":{"shape":"ImportKeyPairResult"} + }, + "ImportSnapshot":{ + "name":"ImportSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportSnapshotRequest"}, + "output":{"shape":"ImportSnapshotResult"} + }, + "ImportVolume":{ + "name":"ImportVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportVolumeRequest"}, + "output":{"shape":"ImportVolumeResult"} + }, + "ModifyImageAttribute":{ + "name":"ModifyImageAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyImageAttributeRequest"} + }, + "ModifyInstanceAttribute":{ + "name":"ModifyInstanceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyInstanceAttributeRequest"} + }, + "ModifyNetworkInterfaceAttribute":{ + "name":"ModifyNetworkInterfaceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyNetworkInterfaceAttributeRequest"} + }, + "ModifyReservedInstances":{ + "name":"ModifyReservedInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyReservedInstancesRequest"}, + "output":{"shape":"ModifyReservedInstancesResult"} + }, + "ModifySnapshotAttribute":{ + "name":"ModifySnapshotAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifySnapshotAttributeRequest"} + }, + "ModifySubnetAttribute":{ + "name":"ModifySubnetAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifySubnetAttributeRequest"} + }, + "ModifyVolumeAttribute":{ + "name":"ModifyVolumeAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyVolumeAttributeRequest"} + }, + "ModifyVpcAttribute":{ + "name":"ModifyVpcAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyVpcAttributeRequest"} + }, + "ModifyVpcEndpoint":{ + "name":"ModifyVpcEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyVpcEndpointRequest"}, + "output":{"shape":"ModifyVpcEndpointResult"} + }, + "MonitorInstances":{ + "name":"MonitorInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"MonitorInstancesRequest"}, + "output":{"shape":"MonitorInstancesResult"} + }, + "MoveAddressToVpc":{ + "name":"MoveAddressToVpc", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"MoveAddressToVpcRequest"}, + "output":{"shape":"MoveAddressToVpcResult"} + }, + "PurchaseReservedInstancesOffering":{ + "name":"PurchaseReservedInstancesOffering", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PurchaseReservedInstancesOfferingRequest"}, + "output":{"shape":"PurchaseReservedInstancesOfferingResult"} + }, + "RebootInstances":{ + "name":"RebootInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootInstancesRequest"} + }, + "RegisterImage":{ + "name":"RegisterImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterImageRequest"}, + "output":{"shape":"RegisterImageResult"} + }, + "RejectVpcPeeringConnection":{ + "name":"RejectVpcPeeringConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RejectVpcPeeringConnectionRequest"}, + "output":{"shape":"RejectVpcPeeringConnectionResult"} + }, + "ReleaseAddress":{ + "name":"ReleaseAddress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReleaseAddressRequest"} + }, + "ReplaceNetworkAclAssociation":{ + "name":"ReplaceNetworkAclAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReplaceNetworkAclAssociationRequest"}, + "output":{"shape":"ReplaceNetworkAclAssociationResult"} + }, + "ReplaceNetworkAclEntry":{ + "name":"ReplaceNetworkAclEntry", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReplaceNetworkAclEntryRequest"} + }, + "ReplaceRoute":{ + "name":"ReplaceRoute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReplaceRouteRequest"} + }, + "ReplaceRouteTableAssociation":{ + "name":"ReplaceRouteTableAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReplaceRouteTableAssociationRequest"}, + "output":{"shape":"ReplaceRouteTableAssociationResult"} + }, + "ReportInstanceStatus":{ + "name":"ReportInstanceStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReportInstanceStatusRequest"} + }, + "RequestSpotFleet":{ + "name":"RequestSpotFleet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RequestSpotFleetRequest"}, + "output":{"shape":"RequestSpotFleetResponse"} + }, + "RequestSpotInstances":{ + "name":"RequestSpotInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RequestSpotInstancesRequest"}, + "output":{"shape":"RequestSpotInstancesResult"} + }, + "ResetImageAttribute":{ + "name":"ResetImageAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetImageAttributeRequest"} + }, + "ResetInstanceAttribute":{ + "name":"ResetInstanceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetInstanceAttributeRequest"} + }, + "ResetNetworkInterfaceAttribute":{ + "name":"ResetNetworkInterfaceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetNetworkInterfaceAttributeRequest"} + }, + "ResetSnapshotAttribute":{ + "name":"ResetSnapshotAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetSnapshotAttributeRequest"} + }, + "RestoreAddressToClassic":{ + "name":"RestoreAddressToClassic", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreAddressToClassicRequest"}, + "output":{"shape":"RestoreAddressToClassicResult"} + }, + "RevokeSecurityGroupEgress":{ + "name":"RevokeSecurityGroupEgress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeSecurityGroupEgressRequest"} + }, + "RevokeSecurityGroupIngress":{ + "name":"RevokeSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeSecurityGroupIngressRequest"} + }, + "RunInstances":{ + "name":"RunInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RunInstancesRequest"}, + "output":{ + "shape":"Reservation", + "locationName":"reservation" + } + }, + "StartInstances":{ + "name":"StartInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartInstancesRequest"}, + "output":{"shape":"StartInstancesResult"} + }, + "StopInstances":{ + "name":"StopInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopInstancesRequest"}, + "output":{"shape":"StopInstancesResult"} + }, + "TerminateInstances":{ + "name":"TerminateInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TerminateInstancesRequest"}, + "output":{"shape":"TerminateInstancesResult"} + }, + "UnassignPrivateIpAddresses":{ + "name":"UnassignPrivateIpAddresses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UnassignPrivateIpAddressesRequest"} + }, + "UnmonitorInstances":{ + "name":"UnmonitorInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UnmonitorInstancesRequest"}, + "output":{"shape":"UnmonitorInstancesResult"} + } + }, + "shapes":{ + "AcceptVpcPeeringConnectionRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + } + } + }, + "AcceptVpcPeeringConnectionResult":{ + "type":"structure", + "members":{ + "VpcPeeringConnection":{ + "shape":"VpcPeeringConnection", + "locationName":"vpcPeeringConnection" + } + } + }, + "AccountAttribute":{ + "type":"structure", + "members":{ + "AttributeName":{ + "shape":"String", + "locationName":"attributeName" + }, + "AttributeValues":{ + "shape":"AccountAttributeValueList", + "locationName":"attributeValueSet" + } + } + }, + "AccountAttributeList":{ + "type":"list", + "member":{ + "shape":"AccountAttribute", + "locationName":"item" + } + }, + "AccountAttributeName":{ + "type":"string", + "enum":[ + "supported-platforms", + "default-vpc" + ] + }, + "AccountAttributeNameStringList":{ + "type":"list", + "member":{ + "shape":"AccountAttributeName", + "locationName":"attributeName" + } + }, + "AccountAttributeValue":{ + "type":"structure", + "members":{ + "AttributeValue":{ + "shape":"String", + "locationName":"attributeValue" + } + } + }, + "AccountAttributeValueList":{ + "type":"list", + "member":{ + "shape":"AccountAttributeValue", + "locationName":"item" + } + }, + "ActiveInstance":{ + "type":"structure", + "members":{ + "InstanceType":{ + "shape":"String", + "locationName":"instanceType" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "SpotInstanceRequestId":{ + "shape":"String", + "locationName":"spotInstanceRequestId" + } + } + }, + "ActiveInstanceSet":{ + "type":"list", + "member":{ + "shape":"ActiveInstance", + "locationName":"item" + } + }, + "Address":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + }, + "AllocationId":{ + "shape":"String", + "locationName":"allocationId" + }, + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + }, + "Domain":{ + "shape":"DomainType", + "locationName":"domain" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "NetworkInterfaceOwnerId":{ + "shape":"String", + "locationName":"networkInterfaceOwnerId" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + } + } + }, + "AddressList":{ + "type":"list", + "member":{ + "shape":"Address", + "locationName":"item" + } + }, + "AllocateAddressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Domain":{"shape":"DomainType"} + } + }, + "AllocateAddressResult":{ + "type":"structure", + "members":{ + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + }, + "Domain":{ + "shape":"DomainType", + "locationName":"domain" + }, + "AllocationId":{ + "shape":"String", + "locationName":"allocationId" + } + } + }, + "AllocationIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"AllocationId" + } + }, + "AllocationStrategy":{ + "type":"string", + "enum":[ + "lowestPrice", + "diversified" + ] + }, + "ArchitectureValues":{ + "type":"string", + "enum":[ + "i386", + "x86_64" + ] + }, + "AssignPrivateIpAddressesRequest":{ + "type":"structure", + "required":["NetworkInterfaceId"], + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "PrivateIpAddresses":{ + "shape":"PrivateIpAddressStringList", + "locationName":"privateIpAddress" + }, + "SecondaryPrivateIpAddressCount":{ + "shape":"Integer", + "locationName":"secondaryPrivateIpAddressCount" + }, + "AllowReassignment":{ + "shape":"Boolean", + "locationName":"allowReassignment" + } + } + }, + "AssociateAddressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{"shape":"String"}, + "PublicIp":{"shape":"String"}, + "AllocationId":{"shape":"String"}, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "AllowReassociation":{ + "shape":"Boolean", + "locationName":"allowReassociation" + } + } + }, + "AssociateAddressResult":{ + "type":"structure", + "members":{ + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + } + } + }, + "AssociateDhcpOptionsRequest":{ + "type":"structure", + "required":[ + "DhcpOptionsId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "DhcpOptionsId":{"shape":"String"}, + "VpcId":{"shape":"String"} + } + }, + "AssociateRouteTableRequest":{ + "type":"structure", + "required":[ + "SubnetId", + "RouteTableId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + } + } + }, + "AssociateRouteTableResult":{ + "type":"structure", + "members":{ + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + } + } + }, + "AttachClassicLinkVpcRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "VpcId", + "Groups" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "Groups":{ + "shape":"GroupIdStringList", + "locationName":"SecurityGroupId" + } + } + }, + "AttachClassicLinkVpcResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "AttachInternetGatewayRequest":{ + "type":"structure", + "required":[ + "InternetGatewayId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InternetGatewayId":{ + "shape":"String", + "locationName":"internetGatewayId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "AttachNetworkInterfaceRequest":{ + "type":"structure", + "required":[ + "NetworkInterfaceId", + "InstanceId", + "DeviceIndex" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "DeviceIndex":{ + "shape":"Integer", + "locationName":"deviceIndex" + } + } + }, + "AttachNetworkInterfaceResult":{ + "type":"structure", + "members":{ + "AttachmentId":{ + "shape":"String", + "locationName":"attachmentId" + } + } + }, + "AttachVolumeRequest":{ + "type":"structure", + "required":[ + "VolumeId", + "InstanceId", + "Device" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"}, + "InstanceId":{"shape":"String"}, + "Device":{"shape":"String"} + } + }, + "AttachVpnGatewayRequest":{ + "type":"structure", + "required":[ + "VpnGatewayId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnGatewayId":{"shape":"String"}, + "VpcId":{"shape":"String"} + } + }, + "AttachVpnGatewayResult":{ + "type":"structure", + "members":{ + "VpcAttachment":{ + "shape":"VpcAttachment", + "locationName":"attachment" + } + } + }, + "AttachmentStatus":{ + "type":"string", + "enum":[ + "attaching", + "attached", + "detaching", + "detached" + ] + }, + "AttributeBooleanValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"Boolean", + "locationName":"value" + } + } + }, + "AttributeValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"String", + "locationName":"value" + } + } + }, + "AuthorizeSecurityGroupEgressRequest":{ + "type":"structure", + "required":["GroupId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupId":{ + "shape":"String", + "locationName":"groupId" + }, + "SourceSecurityGroupName":{ + "shape":"String", + "locationName":"sourceSecurityGroupName" + }, + "SourceSecurityGroupOwnerId":{ + "shape":"String", + "locationName":"sourceSecurityGroupOwnerId" + }, + "IpProtocol":{ + "shape":"String", + "locationName":"ipProtocol" + }, + "FromPort":{ + "shape":"Integer", + "locationName":"fromPort" + }, + "ToPort":{ + "shape":"Integer", + "locationName":"toPort" + }, + "CidrIp":{ + "shape":"String", + "locationName":"cidrIp" + }, + "IpPermissions":{ + "shape":"IpPermissionList", + "locationName":"ipPermissions" + } + } + }, + "AuthorizeSecurityGroupIngressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{"shape":"String"}, + "GroupId":{"shape":"String"}, + "SourceSecurityGroupName":{"shape":"String"}, + "SourceSecurityGroupOwnerId":{"shape":"String"}, + "IpProtocol":{"shape":"String"}, + "FromPort":{"shape":"Integer"}, + "ToPort":{"shape":"Integer"}, + "CidrIp":{"shape":"String"}, + "IpPermissions":{"shape":"IpPermissionList"} + } + }, + "AvailabilityZone":{ + "type":"structure", + "members":{ + "ZoneName":{ + "shape":"String", + "locationName":"zoneName" + }, + "State":{ + "shape":"AvailabilityZoneState", + "locationName":"zoneState" + }, + "RegionName":{ + "shape":"String", + "locationName":"regionName" + }, + "Messages":{ + "shape":"AvailabilityZoneMessageList", + "locationName":"messageSet" + } + } + }, + "AvailabilityZoneList":{ + "type":"list", + "member":{ + "shape":"AvailabilityZone", + "locationName":"item" + } + }, + "AvailabilityZoneMessage":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "AvailabilityZoneMessageList":{ + "type":"list", + "member":{ + "shape":"AvailabilityZoneMessage", + "locationName":"item" + } + }, + "AvailabilityZoneState":{ + "type":"string", + "enum":["available"] + }, + "BatchState":{ + "type":"string", + "enum":[ + "submitted", + "active", + "cancelled", + "failed", + "cancelled_running", + "cancelled_terminating" + ] + }, + "BlockDeviceMapping":{ + "type":"structure", + "members":{ + "VirtualName":{ + "shape":"String", + "locationName":"virtualName" + }, + "DeviceName":{ + "shape":"String", + "locationName":"deviceName" + }, + "Ebs":{ + "shape":"EbsBlockDevice", + "locationName":"ebs" + }, + "NoDevice":{ + "shape":"String", + "locationName":"noDevice" + } + } + }, + "BlockDeviceMappingList":{ + "type":"list", + "member":{ + "shape":"BlockDeviceMapping", + "locationName":"item" + } + }, + "BlockDeviceMappingRequestList":{ + "type":"list", + "member":{ + "shape":"BlockDeviceMapping", + "locationName":"BlockDeviceMapping" + } + }, + "Boolean":{"type":"boolean"}, + "BundleIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"BundleId" + } + }, + "BundleInstanceRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "Storage" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{"shape":"String"}, + "Storage":{"shape":"Storage"} + } + }, + "BundleInstanceResult":{ + "type":"structure", + "members":{ + "BundleTask":{ + "shape":"BundleTask", + "locationName":"bundleInstanceTask" + } + } + }, + "BundleTask":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "BundleId":{ + "shape":"String", + "locationName":"bundleId" + }, + "State":{ + "shape":"BundleTaskState", + "locationName":"state" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "UpdateTime":{ + "shape":"DateTime", + "locationName":"updateTime" + }, + "Storage":{ + "shape":"Storage", + "locationName":"storage" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "BundleTaskError":{ + "shape":"BundleTaskError", + "locationName":"error" + } + } + }, + "BundleTaskError":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "BundleTaskList":{ + "type":"list", + "member":{ + "shape":"BundleTask", + "locationName":"item" + } + }, + "BundleTaskState":{ + "type":"string", + "enum":[ + "pending", + "waiting-for-shutdown", + "bundling", + "storing", + "cancelling", + "complete", + "failed" + ] + }, + "CancelBatchErrorCode":{ + "type":"string", + "enum":[ + "fleetRequestIdDoesNotExist", + "fleetRequestIdMalformed", + "fleetRequestNotInCancellableState", + "unexpectedError" + ] + }, + "CancelBundleTaskRequest":{ + "type":"structure", + "required":["BundleId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "BundleId":{"shape":"String"} + } + }, + "CancelBundleTaskResult":{ + "type":"structure", + "members":{ + "BundleTask":{ + "shape":"BundleTask", + "locationName":"bundleInstanceTask" + } + } + }, + "CancelConversionRequest":{ + "type":"structure", + "required":["ConversionTaskId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ConversionTaskId":{ + "shape":"String", + "locationName":"conversionTaskId" + }, + "ReasonMessage":{ + "shape":"String", + "locationName":"reasonMessage" + } + } + }, + "CancelExportTaskRequest":{ + "type":"structure", + "required":["ExportTaskId"], + "members":{ + "ExportTaskId":{ + "shape":"String", + "locationName":"exportTaskId" + } + } + }, + "CancelImportTaskRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "ImportTaskId":{"shape":"String"}, + "CancelReason":{"shape":"String"} + } + }, + "CancelImportTaskResult":{ + "type":"structure", + "members":{ + "ImportTaskId":{ + "shape":"String", + "locationName":"importTaskId" + }, + "State":{ + "shape":"String", + "locationName":"state" + }, + "PreviousState":{ + "shape":"String", + "locationName":"previousState" + } + } + }, + "CancelReservedInstancesListingRequest":{ + "type":"structure", + "required":["ReservedInstancesListingId"], + "members":{ + "ReservedInstancesListingId":{ + "shape":"String", + "locationName":"reservedInstancesListingId" + } + } + }, + "CancelReservedInstancesListingResult":{ + "type":"structure", + "members":{ + "ReservedInstancesListings":{ + "shape":"ReservedInstancesListingList", + "locationName":"reservedInstancesListingsSet" + } + } + }, + "CancelSpotFleetRequestsError":{ + "type":"structure", + "required":[ + "Code", + "Message" + ], + "members":{ + "Code":{ + "shape":"CancelBatchErrorCode", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "CancelSpotFleetRequestsErrorItem":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "Error" + ], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "Error":{ + "shape":"CancelSpotFleetRequestsError", + "locationName":"error" + } + } + }, + "CancelSpotFleetRequestsErrorSet":{ + "type":"list", + "member":{ + "shape":"CancelSpotFleetRequestsErrorItem", + "locationName":"item" + } + }, + "CancelSpotFleetRequestsRequest":{ + "type":"structure", + "required":[ + "SpotFleetRequestIds", + "TerminateInstances" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotFleetRequestIds":{ + "shape":"ValueStringList", + "locationName":"spotFleetRequestId" + }, + "TerminateInstances":{ + "shape":"Boolean", + "locationName":"terminateInstances" + } + } + }, + "CancelSpotFleetRequestsResponse":{ + "type":"structure", + "members":{ + "UnsuccessfulFleetRequests":{ + "shape":"CancelSpotFleetRequestsErrorSet", + "locationName":"unsuccessfulFleetRequestSet" + }, + "SuccessfulFleetRequests":{ + "shape":"CancelSpotFleetRequestsSuccessSet", + "locationName":"successfulFleetRequestSet" + } + } + }, + "CancelSpotFleetRequestsSuccessItem":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "CurrentSpotFleetRequestState", + "PreviousSpotFleetRequestState" + ], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "CurrentSpotFleetRequestState":{ + "shape":"BatchState", + "locationName":"currentSpotFleetRequestState" + }, + "PreviousSpotFleetRequestState":{ + "shape":"BatchState", + "locationName":"previousSpotFleetRequestState" + } + } + }, + "CancelSpotFleetRequestsSuccessSet":{ + "type":"list", + "member":{ + "shape":"CancelSpotFleetRequestsSuccessItem", + "locationName":"item" + } + }, + "CancelSpotInstanceRequestState":{ + "type":"string", + "enum":[ + "active", + "open", + "closed", + "cancelled", + "completed" + ] + }, + "CancelSpotInstanceRequestsRequest":{ + "type":"structure", + "required":["SpotInstanceRequestIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotInstanceRequestIds":{ + "shape":"SpotInstanceRequestIdList", + "locationName":"SpotInstanceRequestId" + } + } + }, + "CancelSpotInstanceRequestsResult":{ + "type":"structure", + "members":{ + "CancelledSpotInstanceRequests":{ + "shape":"CancelledSpotInstanceRequestList", + "locationName":"spotInstanceRequestSet" + } + } + }, + "CancelledSpotInstanceRequest":{ + "type":"structure", + "members":{ + "SpotInstanceRequestId":{ + "shape":"String", + "locationName":"spotInstanceRequestId" + }, + "State":{ + "shape":"CancelSpotInstanceRequestState", + "locationName":"state" + } + } + }, + "CancelledSpotInstanceRequestList":{ + "type":"list", + "member":{ + "shape":"CancelledSpotInstanceRequest", + "locationName":"item" + } + }, + "ClassicLinkInstance":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "ClassicLinkInstanceList":{ + "type":"list", + "member":{ + "shape":"ClassicLinkInstance", + "locationName":"item" + } + }, + "ClientData":{ + "type":"structure", + "members":{ + "UploadStart":{"shape":"DateTime"}, + "UploadEnd":{"shape":"DateTime"}, + "UploadSize":{"shape":"Double"}, + "Comment":{"shape":"String"} + } + }, + "ConfirmProductInstanceRequest":{ + "type":"structure", + "required":[ + "ProductCode", + "InstanceId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ProductCode":{"shape":"String"}, + "InstanceId":{"shape":"String"} + } + }, + "ConfirmProductInstanceResult":{ + "type":"structure", + "members":{ + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "ContainerFormat":{ + "type":"string", + "enum":["ova"] + }, + "ConversionIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "ConversionTask":{ + "type":"structure", + "required":[ + "ConversionTaskId", + "State" + ], + "members":{ + "ConversionTaskId":{ + "shape":"String", + "locationName":"conversionTaskId" + }, + "ExpirationTime":{ + "shape":"String", + "locationName":"expirationTime" + }, + "ImportInstance":{ + "shape":"ImportInstanceTaskDetails", + "locationName":"importInstance" + }, + "ImportVolume":{ + "shape":"ImportVolumeTaskDetails", + "locationName":"importVolume" + }, + "State":{ + "shape":"ConversionTaskState", + "locationName":"state" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "ConversionTaskState":{ + "type":"string", + "enum":[ + "active", + "cancelling", + "cancelled", + "completed" + ] + }, + "CopyImageRequest":{ + "type":"structure", + "required":[ + "SourceRegion", + "SourceImageId", + "Name" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SourceRegion":{"shape":"String"}, + "SourceImageId":{"shape":"String"}, + "Name":{"shape":"String"}, + "Description":{"shape":"String"}, + "ClientToken":{"shape":"String"} + } + }, + "CopyImageResult":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + } + } + }, + "CopySnapshotRequest":{ + "type":"structure", + "required":[ + "SourceRegion", + "SourceSnapshotId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SourceRegion":{"shape":"String"}, + "SourceSnapshotId":{"shape":"String"}, + "Description":{"shape":"String"}, + "DestinationRegion":{ + "shape":"String", + "locationName":"destinationRegion" + }, + "PresignedUrl":{ + "shape":"String", + "locationName":"presignedUrl" + }, + "Encrypted":{ + "shape":"Boolean", + "locationName":"encrypted" + }, + "KmsKeyId":{ + "shape":"String", + "locationName":"kmsKeyId" + } + } + }, + "CopySnapshotResult":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + } + } + }, + "CreateCustomerGatewayRequest":{ + "type":"structure", + "required":[ + "Type", + "PublicIp", + "BgpAsn" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Type":{"shape":"GatewayType"}, + "PublicIp":{ + "shape":"String", + "locationName":"IpAddress" + }, + "BgpAsn":{"shape":"Integer"} + } + }, + "CreateCustomerGatewayResult":{ + "type":"structure", + "members":{ + "CustomerGateway":{ + "shape":"CustomerGateway", + "locationName":"customerGateway" + } + } + }, + "CreateDhcpOptionsRequest":{ + "type":"structure", + "required":["DhcpConfigurations"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "DhcpConfigurations":{ + "shape":"NewDhcpConfigurationList", + "locationName":"dhcpConfiguration" + } + } + }, + "CreateDhcpOptionsResult":{ + "type":"structure", + "members":{ + "DhcpOptions":{ + "shape":"DhcpOptions", + "locationName":"dhcpOptions" + } + } + }, + "CreateFlowLogsRequest":{ + "type":"structure", + "required":[ + "ResourceIds", + "ResourceType", + "TrafficType", + "LogGroupName", + "DeliverLogsPermissionArn" + ], + "members":{ + "ResourceIds":{ + "shape":"ValueStringList", + "locationName":"ResourceId" + }, + "ResourceType":{"shape":"FlowLogsResourceType"}, + "TrafficType":{"shape":"TrafficType"}, + "LogGroupName":{"shape":"String"}, + "DeliverLogsPermissionArn":{"shape":"String"}, + "ClientToken":{"shape":"String"} + } + }, + "CreateFlowLogsResult":{ + "type":"structure", + "members":{ + "FlowLogIds":{ + "shape":"ValueStringList", + "locationName":"flowLogIdSet" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "Unsuccessful":{ + "shape":"UnsuccessfulItemSet", + "locationName":"unsuccessful" + } + } + }, + "CreateImageRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "Name" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Name":{ + "shape":"String", + "locationName":"name" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "NoReboot":{ + "shape":"Boolean", + "locationName":"noReboot" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingRequestList", + "locationName":"blockDeviceMapping" + } + } + }, + "CreateImageResult":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + } + } + }, + "CreateInstanceExportTaskRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "Description":{ + "shape":"String", + "locationName":"description" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "TargetEnvironment":{ + "shape":"ExportEnvironment", + "locationName":"targetEnvironment" + }, + "ExportToS3Task":{ + "shape":"ExportToS3TaskSpecification", + "locationName":"exportToS3" + } + } + }, + "CreateInstanceExportTaskResult":{ + "type":"structure", + "members":{ + "ExportTask":{ + "shape":"ExportTask", + "locationName":"exportTask" + } + } + }, + "CreateInternetGatewayRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + } + } + }, + "CreateInternetGatewayResult":{ + "type":"structure", + "members":{ + "InternetGateway":{ + "shape":"InternetGateway", + "locationName":"internetGateway" + } + } + }, + "CreateKeyPairRequest":{ + "type":"structure", + "required":["KeyName"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "KeyName":{"shape":"String"} + } + }, + "CreateNetworkAclEntryRequest":{ + "type":"structure", + "required":[ + "NetworkAclId", + "RuleNumber", + "Protocol", + "RuleAction", + "Egress", + "CidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + }, + "RuleNumber":{ + "shape":"Integer", + "locationName":"ruleNumber" + }, + "Protocol":{ + "shape":"String", + "locationName":"protocol" + }, + "RuleAction":{ + "shape":"RuleAction", + "locationName":"ruleAction" + }, + "Egress":{ + "shape":"Boolean", + "locationName":"egress" + }, + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "IcmpTypeCode":{ + "shape":"IcmpTypeCode", + "locationName":"Icmp" + }, + "PortRange":{ + "shape":"PortRange", + "locationName":"portRange" + } + } + }, + "CreateNetworkAclRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "CreateNetworkAclResult":{ + "type":"structure", + "members":{ + "NetworkAcl":{ + "shape":"NetworkAcl", + "locationName":"networkAcl" + } + } + }, + "CreateNetworkInterfaceRequest":{ + "type":"structure", + "required":["SubnetId"], + "members":{ + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "Groups":{ + "shape":"SecurityGroupIdStringList", + "locationName":"SecurityGroupId" + }, + "PrivateIpAddresses":{ + "shape":"PrivateIpAddressSpecificationList", + "locationName":"privateIpAddresses" + }, + "SecondaryPrivateIpAddressCount":{ + "shape":"Integer", + "locationName":"secondaryPrivateIpAddressCount" + }, + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + } + } + }, + "CreateNetworkInterfaceResult":{ + "type":"structure", + "members":{ + "NetworkInterface":{ + "shape":"NetworkInterface", + "locationName":"networkInterface" + } + } + }, + "CreatePlacementGroupRequest":{ + "type":"structure", + "required":[ + "GroupName", + "Strategy" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "Strategy":{ + "shape":"PlacementStrategy", + "locationName":"strategy" + } + } + }, + "CreateReservedInstancesListingRequest":{ + "type":"structure", + "required":[ + "ReservedInstancesId", + "InstanceCount", + "PriceSchedules", + "ClientToken" + ], + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + }, + "InstanceCount":{ + "shape":"Integer", + "locationName":"instanceCount" + }, + "PriceSchedules":{ + "shape":"PriceScheduleSpecificationList", + "locationName":"priceSchedules" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + } + } + }, + "CreateReservedInstancesListingResult":{ + "type":"structure", + "members":{ + "ReservedInstancesListings":{ + "shape":"ReservedInstancesListingList", + "locationName":"reservedInstancesListingsSet" + } + } + }, + "CreateRouteRequest":{ + "type":"structure", + "required":[ + "RouteTableId", + "DestinationCidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + }, + "DestinationCidrBlock":{ + "shape":"String", + "locationName":"destinationCidrBlock" + }, + "GatewayId":{ + "shape":"String", + "locationName":"gatewayId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + } + } + }, + "CreateRouteResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "CreateRouteTableRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "CreateRouteTableResult":{ + "type":"structure", + "members":{ + "RouteTable":{ + "shape":"RouteTable", + "locationName":"routeTable" + } + } + }, + "CreateSecurityGroupRequest":{ + "type":"structure", + "required":[ + "GroupName", + "Description" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{"shape":"String"}, + "Description":{ + "shape":"String", + "locationName":"GroupDescription" + }, + "VpcId":{"shape":"String"} + } + }, + "CreateSecurityGroupResult":{ + "type":"structure", + "members":{ + "GroupId":{ + "shape":"String", + "locationName":"groupId" + } + } + }, + "CreateSnapshotRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"}, + "Description":{"shape":"String"} + } + }, + "CreateSpotDatafeedSubscriptionRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Bucket":{ + "shape":"String", + "locationName":"bucket" + }, + "Prefix":{ + "shape":"String", + "locationName":"prefix" + } + } + }, + "CreateSpotDatafeedSubscriptionResult":{ + "type":"structure", + "members":{ + "SpotDatafeedSubscription":{ + "shape":"SpotDatafeedSubscription", + "locationName":"spotDatafeedSubscription" + } + } + }, + "CreateSubnetRequest":{ + "type":"structure", + "required":[ + "VpcId", + "CidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{"shape":"String"}, + "CidrBlock":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"} + } + }, + "CreateSubnetResult":{ + "type":"structure", + "members":{ + "Subnet":{ + "shape":"Subnet", + "locationName":"subnet" + } + } + }, + "CreateTagsRequest":{ + "type":"structure", + "required":[ + "Resources", + "Tags" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Resources":{ + "shape":"ResourceIdList", + "locationName":"ResourceId" + }, + "Tags":{ + "shape":"TagList", + "locationName":"Tag" + } + } + }, + "CreateVolumePermission":{ + "type":"structure", + "members":{ + "UserId":{ + "shape":"String", + "locationName":"userId" + }, + "Group":{ + "shape":"PermissionGroup", + "locationName":"group" + } + } + }, + "CreateVolumePermissionList":{ + "type":"list", + "member":{ + "shape":"CreateVolumePermission", + "locationName":"item" + } + }, + "CreateVolumePermissionModifications":{ + "type":"structure", + "members":{ + "Add":{"shape":"CreateVolumePermissionList"}, + "Remove":{"shape":"CreateVolumePermissionList"} + } + }, + "CreateVolumeRequest":{ + "type":"structure", + "required":["AvailabilityZone"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Size":{"shape":"Integer"}, + "SnapshotId":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"}, + "VolumeType":{"shape":"VolumeType"}, + "Iops":{"shape":"Integer"}, + "Encrypted":{ + "shape":"Boolean", + "locationName":"encrypted" + }, + "KmsKeyId":{"shape":"String"} + } + }, + "CreateVpcEndpointRequest":{ + "type":"structure", + "required":[ + "VpcId", + "ServiceName" + ], + "members":{ + "DryRun":{"shape":"Boolean"}, + "VpcId":{"shape":"String"}, + "ServiceName":{"shape":"String"}, + "PolicyDocument":{"shape":"String"}, + "RouteTableIds":{ + "shape":"ValueStringList", + "locationName":"RouteTableId" + }, + "ClientToken":{"shape":"String"} + } + }, + "CreateVpcEndpointResult":{ + "type":"structure", + "members":{ + "VpcEndpoint":{ + "shape":"VpcEndpoint", + "locationName":"vpcEndpoint" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + } + } + }, + "CreateVpcPeeringConnectionRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "PeerVpcId":{ + "shape":"String", + "locationName":"peerVpcId" + }, + "PeerOwnerId":{ + "shape":"String", + "locationName":"peerOwnerId" + } + } + }, + "CreateVpcPeeringConnectionResult":{ + "type":"structure", + "members":{ + "VpcPeeringConnection":{ + "shape":"VpcPeeringConnection", + "locationName":"vpcPeeringConnection" + } + } + }, + "CreateVpcRequest":{ + "type":"structure", + "required":["CidrBlock"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "CidrBlock":{"shape":"String"}, + "InstanceTenancy":{ + "shape":"Tenancy", + "locationName":"instanceTenancy" + } + } + }, + "CreateVpcResult":{ + "type":"structure", + "members":{ + "Vpc":{ + "shape":"Vpc", + "locationName":"vpc" + } + } + }, + "CreateVpnConnectionRequest":{ + "type":"structure", + "required":[ + "Type", + "CustomerGatewayId", + "VpnGatewayId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Type":{"shape":"String"}, + "CustomerGatewayId":{"shape":"String"}, + "VpnGatewayId":{"shape":"String"}, + "Options":{ + "shape":"VpnConnectionOptionsSpecification", + "locationName":"options" + } + } + }, + "CreateVpnConnectionResult":{ + "type":"structure", + "members":{ + "VpnConnection":{ + "shape":"VpnConnection", + "locationName":"vpnConnection" + } + } + }, + "CreateVpnConnectionRouteRequest":{ + "type":"structure", + "required":[ + "VpnConnectionId", + "DestinationCidrBlock" + ], + "members":{ + "VpnConnectionId":{"shape":"String"}, + "DestinationCidrBlock":{"shape":"String"} + } + }, + "CreateVpnGatewayRequest":{ + "type":"structure", + "required":["Type"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Type":{"shape":"GatewayType"}, + "AvailabilityZone":{"shape":"String"} + } + }, + "CreateVpnGatewayResult":{ + "type":"structure", + "members":{ + "VpnGateway":{ + "shape":"VpnGateway", + "locationName":"vpnGateway" + } + } + }, + "CurrencyCodeValues":{ + "type":"string", + "enum":["USD"] + }, + "CustomerGateway":{ + "type":"structure", + "members":{ + "CustomerGatewayId":{ + "shape":"String", + "locationName":"customerGatewayId" + }, + "State":{ + "shape":"String", + "locationName":"state" + }, + "Type":{ + "shape":"String", + "locationName":"type" + }, + "IpAddress":{ + "shape":"String", + "locationName":"ipAddress" + }, + "BgpAsn":{ + "shape":"String", + "locationName":"bgpAsn" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "CustomerGatewayIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"CustomerGatewayId" + } + }, + "CustomerGatewayList":{ + "type":"list", + "member":{ + "shape":"CustomerGateway", + "locationName":"item" + } + }, + "DatafeedSubscriptionState":{ + "type":"string", + "enum":[ + "Active", + "Inactive" + ] + }, + "DateTime":{"type":"timestamp"}, + "DeleteCustomerGatewayRequest":{ + "type":"structure", + "required":["CustomerGatewayId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "CustomerGatewayId":{"shape":"String"} + } + }, + "DeleteDhcpOptionsRequest":{ + "type":"structure", + "required":["DhcpOptionsId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "DhcpOptionsId":{"shape":"String"} + } + }, + "DeleteFlowLogsRequest":{ + "type":"structure", + "required":["FlowLogIds"], + "members":{ + "FlowLogIds":{ + "shape":"ValueStringList", + "locationName":"FlowLogId" + } + } + }, + "DeleteFlowLogsResult":{ + "type":"structure", + "members":{ + "Unsuccessful":{ + "shape":"UnsuccessfulItemSet", + "locationName":"unsuccessful" + } + } + }, + "DeleteInternetGatewayRequest":{ + "type":"structure", + "required":["InternetGatewayId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InternetGatewayId":{ + "shape":"String", + "locationName":"internetGatewayId" + } + } + }, + "DeleteKeyPairRequest":{ + "type":"structure", + "required":["KeyName"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "KeyName":{"shape":"String"} + } + }, + "DeleteNetworkAclEntryRequest":{ + "type":"structure", + "required":[ + "NetworkAclId", + "RuleNumber", + "Egress" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + }, + "RuleNumber":{ + "shape":"Integer", + "locationName":"ruleNumber" + }, + "Egress":{ + "shape":"Boolean", + "locationName":"egress" + } + } + }, + "DeleteNetworkAclRequest":{ + "type":"structure", + "required":["NetworkAclId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + } + } + }, + "DeleteNetworkInterfaceRequest":{ + "type":"structure", + "required":["NetworkInterfaceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + } + } + }, + "DeletePlacementGroupRequest":{ + "type":"structure", + "required":["GroupName"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + } + } + }, + "DeleteRouteRequest":{ + "type":"structure", + "required":[ + "RouteTableId", + "DestinationCidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + }, + "DestinationCidrBlock":{ + "shape":"String", + "locationName":"destinationCidrBlock" + } + } + }, + "DeleteRouteTableRequest":{ + "type":"structure", + "required":["RouteTableId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + } + } + }, + "DeleteSecurityGroupRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{"shape":"String"}, + "GroupId":{"shape":"String"} + } + }, + "DeleteSnapshotRequest":{ + "type":"structure", + "required":["SnapshotId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SnapshotId":{"shape":"String"} + } + }, + "DeleteSpotDatafeedSubscriptionRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + } + } + }, + "DeleteSubnetRequest":{ + "type":"structure", + "required":["SubnetId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SubnetId":{"shape":"String"} + } + }, + "DeleteTagsRequest":{ + "type":"structure", + "required":["Resources"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Resources":{ + "shape":"ResourceIdList", + "locationName":"resourceId" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tag" + } + } + }, + "DeleteVolumeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"} + } + }, + "DeleteVpcEndpointsRequest":{ + "type":"structure", + "required":["VpcEndpointIds"], + "members":{ + "DryRun":{"shape":"Boolean"}, + "VpcEndpointIds":{ + "shape":"ValueStringList", + "locationName":"VpcEndpointId" + } + } + }, + "DeleteVpcEndpointsResult":{ + "type":"structure", + "members":{ + "Unsuccessful":{ + "shape":"UnsuccessfulItemSet", + "locationName":"unsuccessful" + } + } + }, + "DeleteVpcPeeringConnectionRequest":{ + "type":"structure", + "required":["VpcPeeringConnectionId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + } + } + }, + "DeleteVpcPeeringConnectionResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "DeleteVpcRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{"shape":"String"} + } + }, + "DeleteVpnConnectionRequest":{ + "type":"structure", + "required":["VpnConnectionId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnConnectionId":{"shape":"String"} + } + }, + "DeleteVpnConnectionRouteRequest":{ + "type":"structure", + "required":[ + "VpnConnectionId", + "DestinationCidrBlock" + ], + "members":{ + "VpnConnectionId":{"shape":"String"}, + "DestinationCidrBlock":{"shape":"String"} + } + }, + "DeleteVpnGatewayRequest":{ + "type":"structure", + "required":["VpnGatewayId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnGatewayId":{"shape":"String"} + } + }, + "DeregisterImageRequest":{ + "type":"structure", + "required":["ImageId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageId":{"shape":"String"} + } + }, + "DescribeAccountAttributesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AttributeNames":{ + "shape":"AccountAttributeNameStringList", + "locationName":"attributeName" + } + } + }, + "DescribeAccountAttributesResult":{ + "type":"structure", + "members":{ + "AccountAttributes":{ + "shape":"AccountAttributeList", + "locationName":"accountAttributeSet" + } + } + }, + "DescribeAddressesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIps":{ + "shape":"PublicIpStringList", + "locationName":"PublicIp" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "AllocationIds":{ + "shape":"AllocationIdList", + "locationName":"AllocationId" + } + } + }, + "DescribeAddressesResult":{ + "type":"structure", + "members":{ + "Addresses":{ + "shape":"AddressList", + "locationName":"addressesSet" + } + } + }, + "DescribeAvailabilityZonesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ZoneNames":{ + "shape":"ZoneNameStringList", + "locationName":"ZoneName" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeAvailabilityZonesResult":{ + "type":"structure", + "members":{ + "AvailabilityZones":{ + "shape":"AvailabilityZoneList", + "locationName":"availabilityZoneInfo" + } + } + }, + "DescribeBundleTasksRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "BundleIds":{ + "shape":"BundleIdStringList", + "locationName":"BundleId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeBundleTasksResult":{ + "type":"structure", + "members":{ + "BundleTasks":{ + "shape":"BundleTaskList", + "locationName":"bundleInstanceTasksSet" + } + } + }, + "DescribeClassicLinkInstancesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeClassicLinkInstancesResult":{ + "type":"structure", + "members":{ + "Instances":{ + "shape":"ClassicLinkInstanceList", + "locationName":"instancesSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeConversionTaskList":{ + "type":"list", + "member":{ + "shape":"ConversionTask", + "locationName":"item" + } + }, + "DescribeConversionTasksRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"filter" + }, + "ConversionTaskIds":{ + "shape":"ConversionIdStringList", + "locationName":"conversionTaskId" + } + } + }, + "DescribeConversionTasksResult":{ + "type":"structure", + "members":{ + "ConversionTasks":{ + "shape":"DescribeConversionTaskList", + "locationName":"conversionTasks" + } + } + }, + "DescribeCustomerGatewaysRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "CustomerGatewayIds":{ + "shape":"CustomerGatewayIdStringList", + "locationName":"CustomerGatewayId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeCustomerGatewaysResult":{ + "type":"structure", + "members":{ + "CustomerGateways":{ + "shape":"CustomerGatewayList", + "locationName":"customerGatewaySet" + } + } + }, + "DescribeDhcpOptionsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "DhcpOptionsIds":{ + "shape":"DhcpOptionsIdStringList", + "locationName":"DhcpOptionsId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeDhcpOptionsResult":{ + "type":"structure", + "members":{ + "DhcpOptions":{ + "shape":"DhcpOptionsList", + "locationName":"dhcpOptionsSet" + } + } + }, + "DescribeExportTasksRequest":{ + "type":"structure", + "members":{ + "ExportTaskIds":{ + "shape":"ExportTaskIdStringList", + "locationName":"exportTaskId" + } + } + }, + "DescribeExportTasksResult":{ + "type":"structure", + "members":{ + "ExportTasks":{ + "shape":"ExportTaskList", + "locationName":"exportTaskSet" + } + } + }, + "DescribeFlowLogsRequest":{ + "type":"structure", + "members":{ + "FlowLogIds":{ + "shape":"ValueStringList", + "locationName":"FlowLogId" + }, + "Filter":{"shape":"FilterList"}, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"} + } + }, + "DescribeFlowLogsResult":{ + "type":"structure", + "members":{ + "FlowLogs":{ + "shape":"FlowLogSet", + "locationName":"flowLogSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeImageAttributeRequest":{ + "type":"structure", + "required":[ + "ImageId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageId":{"shape":"String"}, + "Attribute":{"shape":"ImageAttributeName"} + } + }, + "DescribeImagesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageIds":{ + "shape":"ImageIdStringList", + "locationName":"ImageId" + }, + "Owners":{ + "shape":"OwnerStringList", + "locationName":"Owner" + }, + "ExecutableUsers":{ + "shape":"ExecutableByStringList", + "locationName":"ExecutableBy" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeImagesResult":{ + "type":"structure", + "members":{ + "Images":{ + "shape":"ImageList", + "locationName":"imagesSet" + } + } + }, + "DescribeImportImageTasksRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "ImportTaskIds":{ + "shape":"ImportTaskIdList", + "locationName":"ImportTaskId" + }, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"}, + "Filters":{"shape":"FilterList"} + } + }, + "DescribeImportImageTasksResult":{ + "type":"structure", + "members":{ + "ImportImageTasks":{ + "shape":"ImportImageTaskList", + "locationName":"importImageTaskSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeImportSnapshotTasksRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "ImportTaskIds":{ + "shape":"ImportTaskIdList", + "locationName":"ImportTaskId" + }, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"}, + "Filters":{"shape":"FilterList"} + } + }, + "DescribeImportSnapshotTasksResult":{ + "type":"structure", + "members":{ + "ImportSnapshotTasks":{ + "shape":"ImportSnapshotTaskList", + "locationName":"importSnapshotTaskSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeInstanceAttributeRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Attribute":{ + "shape":"InstanceAttributeName", + "locationName":"attribute" + } + } + }, + "DescribeInstanceStatusRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"}, + "IncludeAllInstances":{ + "shape":"Boolean", + "locationName":"includeAllInstances" + } + } + }, + "DescribeInstanceStatusResult":{ + "type":"structure", + "members":{ + "InstanceStatuses":{ + "shape":"InstanceStatusList", + "locationName":"instanceStatusSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeInstancesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeInstancesResult":{ + "type":"structure", + "members":{ + "Reservations":{ + "shape":"ReservationList", + "locationName":"reservationSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeInternetGatewaysRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InternetGatewayIds":{ + "shape":"ValueStringList", + "locationName":"internetGatewayId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeInternetGatewaysResult":{ + "type":"structure", + "members":{ + "InternetGateways":{ + "shape":"InternetGatewayList", + "locationName":"internetGatewaySet" + } + } + }, + "DescribeKeyPairsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "KeyNames":{ + "shape":"KeyNameStringList", + "locationName":"KeyName" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeKeyPairsResult":{ + "type":"structure", + "members":{ + "KeyPairs":{ + "shape":"KeyPairList", + "locationName":"keySet" + } + } + }, + "DescribeMovingAddressesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIps":{ + "shape":"ValueStringList", + "locationName":"publicIp" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"filter" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeMovingAddressesResult":{ + "type":"structure", + "members":{ + "MovingAddressStatuses":{ + "shape":"MovingAddressStatusSet", + "locationName":"movingAddressStatusSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeNetworkAclsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkAclIds":{ + "shape":"ValueStringList", + "locationName":"NetworkAclId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeNetworkAclsResult":{ + "type":"structure", + "members":{ + "NetworkAcls":{ + "shape":"NetworkAclList", + "locationName":"networkAclSet" + } + } + }, + "DescribeNetworkInterfaceAttributeRequest":{ + "type":"structure", + "required":["NetworkInterfaceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "Attribute":{ + "shape":"NetworkInterfaceAttribute", + "locationName":"attribute" + } + } + }, + "DescribeNetworkInterfaceAttributeResult":{ + "type":"structure", + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "Description":{ + "shape":"AttributeValue", + "locationName":"description" + }, + "SourceDestCheck":{ + "shape":"AttributeBooleanValue", + "locationName":"sourceDestCheck" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "Attachment":{ + "shape":"NetworkInterfaceAttachment", + "locationName":"attachment" + } + } + }, + "DescribeNetworkInterfacesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceIds":{ + "shape":"NetworkInterfaceIdList", + "locationName":"NetworkInterfaceId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"filter" + } + } + }, + "DescribeNetworkInterfacesResult":{ + "type":"structure", + "members":{ + "NetworkInterfaces":{ + "shape":"NetworkInterfaceList", + "locationName":"networkInterfaceSet" + } + } + }, + "DescribePlacementGroupsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupNames":{ + "shape":"PlacementGroupStringList", + "locationName":"groupName" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribePlacementGroupsResult":{ + "type":"structure", + "members":{ + "PlacementGroups":{ + "shape":"PlacementGroupList", + "locationName":"placementGroupSet" + } + } + }, + "DescribePrefixListsRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "PrefixListIds":{ + "shape":"ValueStringList", + "locationName":"PrefixListId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "MaxResults":{"shape":"Integer"}, + "NextToken":{"shape":"String"} + } + }, + "DescribePrefixListsResult":{ + "type":"structure", + "members":{ + "PrefixLists":{ + "shape":"PrefixListSet", + "locationName":"prefixListSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeRegionsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RegionNames":{ + "shape":"RegionNameStringList", + "locationName":"RegionName" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeRegionsResult":{ + "type":"structure", + "members":{ + "Regions":{ + "shape":"RegionList", + "locationName":"regionInfo" + } + } + }, + "DescribeReservedInstancesListingsRequest":{ + "type":"structure", + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + }, + "ReservedInstancesListingId":{ + "shape":"String", + "locationName":"reservedInstancesListingId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"filters" + } + } + }, + "DescribeReservedInstancesListingsResult":{ + "type":"structure", + "members":{ + "ReservedInstancesListings":{ + "shape":"ReservedInstancesListingList", + "locationName":"reservedInstancesListingsSet" + } + } + }, + "DescribeReservedInstancesModificationsRequest":{ + "type":"structure", + "members":{ + "ReservedInstancesModificationIds":{ + "shape":"ReservedInstancesModificationIdStringList", + "locationName":"ReservedInstancesModificationId" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeReservedInstancesModificationsResult":{ + "type":"structure", + "members":{ + "ReservedInstancesModifications":{ + "shape":"ReservedInstancesModificationList", + "locationName":"reservedInstancesModificationsSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeReservedInstancesOfferingsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ReservedInstancesOfferingIds":{ + "shape":"ReservedInstancesOfferingIdStringList", + "locationName":"ReservedInstancesOfferingId" + }, + "InstanceType":{"shape":"InstanceType"}, + "AvailabilityZone":{"shape":"String"}, + "ProductDescription":{"shape":"RIProductDescription"}, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "InstanceTenancy":{ + "shape":"Tenancy", + "locationName":"instanceTenancy" + }, + "OfferingType":{ + "shape":"OfferingTypeValues", + "locationName":"offeringType" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + }, + "IncludeMarketplace":{"shape":"Boolean"}, + "MinDuration":{"shape":"Long"}, + "MaxDuration":{"shape":"Long"}, + "MaxInstanceCount":{"shape":"Integer"} + } + }, + "DescribeReservedInstancesOfferingsResult":{ + "type":"structure", + "members":{ + "ReservedInstancesOfferings":{ + "shape":"ReservedInstancesOfferingList", + "locationName":"reservedInstancesOfferingsSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeReservedInstancesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ReservedInstancesIds":{ + "shape":"ReservedInstancesIdStringList", + "locationName":"ReservedInstancesId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "OfferingType":{ + "shape":"OfferingTypeValues", + "locationName":"offeringType" + } + } + }, + "DescribeReservedInstancesResult":{ + "type":"structure", + "members":{ + "ReservedInstances":{ + "shape":"ReservedInstancesList", + "locationName":"reservedInstancesSet" + } + } + }, + "DescribeRouteTablesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RouteTableIds":{ + "shape":"ValueStringList", + "locationName":"RouteTableId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeRouteTablesResult":{ + "type":"structure", + "members":{ + "RouteTables":{ + "shape":"RouteTableList", + "locationName":"routeTableSet" + } + } + }, + "DescribeSecurityGroupsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupNames":{ + "shape":"GroupNameStringList", + "locationName":"GroupName" + }, + "GroupIds":{ + "shape":"GroupIdStringList", + "locationName":"GroupId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeSecurityGroupsResult":{ + "type":"structure", + "members":{ + "SecurityGroups":{ + "shape":"SecurityGroupList", + "locationName":"securityGroupInfo" + } + } + }, + "DescribeSnapshotAttributeRequest":{ + "type":"structure", + "required":[ + "SnapshotId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SnapshotId":{"shape":"String"}, + "Attribute":{"shape":"SnapshotAttributeName"} + } + }, + "DescribeSnapshotAttributeResult":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "CreateVolumePermissions":{ + "shape":"CreateVolumePermissionList", + "locationName":"createVolumePermission" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + } + } + }, + "DescribeSnapshotsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SnapshotIds":{ + "shape":"SnapshotIdStringList", + "locationName":"SnapshotId" + }, + "OwnerIds":{ + "shape":"OwnerStringList", + "locationName":"Owner" + }, + "RestorableByUserIds":{ + "shape":"RestorableByStringList", + "locationName":"RestorableBy" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"} + } + }, + "DescribeSnapshotsResult":{ + "type":"structure", + "members":{ + "Snapshots":{ + "shape":"SnapshotList", + "locationName":"snapshotSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSpotDatafeedSubscriptionRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + } + } + }, + "DescribeSpotDatafeedSubscriptionResult":{ + "type":"structure", + "members":{ + "SpotDatafeedSubscription":{ + "shape":"SpotDatafeedSubscription", + "locationName":"spotDatafeedSubscription" + } + } + }, + "DescribeSpotFleetInstancesRequest":{ + "type":"structure", + "required":["SpotFleetRequestId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeSpotFleetInstancesResponse":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "ActiveInstances" + ], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "ActiveInstances":{ + "shape":"ActiveInstanceSet", + "locationName":"activeInstanceSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSpotFleetRequestHistoryRequest":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "StartTime" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "EventType":{ + "shape":"EventType", + "locationName":"eventType" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeSpotFleetRequestHistoryResponse":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "StartTime", + "LastEvaluatedTime", + "HistoryRecords" + ], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "LastEvaluatedTime":{ + "shape":"DateTime", + "locationName":"lastEvaluatedTime" + }, + "HistoryRecords":{ + "shape":"HistoryRecords", + "locationName":"historyRecordSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSpotFleetRequestsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotFleetRequestIds":{ + "shape":"ValueStringList", + "locationName":"spotFleetRequestId" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeSpotFleetRequestsResponse":{ + "type":"structure", + "required":["SpotFleetRequestConfigs"], + "members":{ + "SpotFleetRequestConfigs":{ + "shape":"SpotFleetRequestConfigSet", + "locationName":"spotFleetRequestConfigSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSpotInstanceRequestsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotInstanceRequestIds":{ + "shape":"SpotInstanceRequestIdList", + "locationName":"SpotInstanceRequestId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeSpotInstanceRequestsResult":{ + "type":"structure", + "members":{ + "SpotInstanceRequests":{ + "shape":"SpotInstanceRequestList", + "locationName":"spotInstanceRequestSet" + } + } + }, + "DescribeSpotPriceHistoryRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "EndTime":{ + "shape":"DateTime", + "locationName":"endTime" + }, + "InstanceTypes":{ + "shape":"InstanceTypeList", + "locationName":"InstanceType" + }, + "ProductDescriptions":{ + "shape":"ProductDescriptionList", + "locationName":"ProductDescription" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSpotPriceHistoryResult":{ + "type":"structure", + "members":{ + "SpotPriceHistory":{ + "shape":"SpotPriceHistoryList", + "locationName":"spotPriceHistorySet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSubnetsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SubnetIds":{ + "shape":"SubnetIdStringList", + "locationName":"SubnetId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeSubnetsResult":{ + "type":"structure", + "members":{ + "Subnets":{ + "shape":"SubnetList", + "locationName":"subnetSet" + } + } + }, + "DescribeTagsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeTagsResult":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagDescriptionList", + "locationName":"tagSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeVolumeAttributeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"}, + "Attribute":{"shape":"VolumeAttributeName"} + } + }, + "DescribeVolumeAttributeResult":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "AutoEnableIO":{ + "shape":"AttributeBooleanValue", + "locationName":"autoEnableIO" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + } + } + }, + "DescribeVolumeStatusRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeIds":{ + "shape":"VolumeIdStringList", + "locationName":"VolumeId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"} + } + }, + "DescribeVolumeStatusResult":{ + "type":"structure", + "members":{ + "VolumeStatuses":{ + "shape":"VolumeStatusList", + "locationName":"volumeStatusSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeVolumesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeIds":{ + "shape":"VolumeIdStringList", + "locationName":"VolumeId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeVolumesResult":{ + "type":"structure", + "members":{ + "Volumes":{ + "shape":"VolumeList", + "locationName":"volumeSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeVpcAttributeRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{"shape":"String"}, + "Attribute":{"shape":"VpcAttributeName"} + } + }, + "DescribeVpcAttributeResult":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "EnableDnsSupport":{ + "shape":"AttributeBooleanValue", + "locationName":"enableDnsSupport" + }, + "EnableDnsHostnames":{ + "shape":"AttributeBooleanValue", + "locationName":"enableDnsHostnames" + } + } + }, + "DescribeVpcClassicLinkRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcIds":{ + "shape":"VpcClassicLinkIdList", + "locationName":"VpcId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeVpcClassicLinkResult":{ + "type":"structure", + "members":{ + "Vpcs":{ + "shape":"VpcClassicLinkList", + "locationName":"vpcSet" + } + } + }, + "DescribeVpcEndpointServicesRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "MaxResults":{"shape":"Integer"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeVpcEndpointServicesResult":{ + "type":"structure", + "members":{ + "ServiceNames":{ + "shape":"ValueStringList", + "locationName":"serviceNameSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeVpcEndpointsRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "VpcEndpointIds":{ + "shape":"ValueStringList", + "locationName":"VpcEndpointId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "MaxResults":{"shape":"Integer"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeVpcEndpointsResult":{ + "type":"structure", + "members":{ + "VpcEndpoints":{ + "shape":"VpcEndpointSet", + "locationName":"vpcEndpointSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeVpcPeeringConnectionsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcPeeringConnectionIds":{ + "shape":"ValueStringList", + "locationName":"VpcPeeringConnectionId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeVpcPeeringConnectionsResult":{ + "type":"structure", + "members":{ + "VpcPeeringConnections":{ + "shape":"VpcPeeringConnectionList", + "locationName":"vpcPeeringConnectionSet" + } + } + }, + "DescribeVpcsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcIds":{ + "shape":"VpcIdStringList", + "locationName":"VpcId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeVpcsResult":{ + "type":"structure", + "members":{ + "Vpcs":{ + "shape":"VpcList", + "locationName":"vpcSet" + } + } + }, + "DescribeVpnConnectionsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnConnectionIds":{ + "shape":"VpnConnectionIdStringList", + "locationName":"VpnConnectionId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeVpnConnectionsResult":{ + "type":"structure", + "members":{ + "VpnConnections":{ + "shape":"VpnConnectionList", + "locationName":"vpnConnectionSet" + } + } + }, + "DescribeVpnGatewaysRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnGatewayIds":{ + "shape":"VpnGatewayIdStringList", + "locationName":"VpnGatewayId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeVpnGatewaysResult":{ + "type":"structure", + "members":{ + "VpnGateways":{ + "shape":"VpnGatewayList", + "locationName":"vpnGatewaySet" + } + } + }, + "DetachClassicLinkVpcRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "DetachClassicLinkVpcResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "DetachInternetGatewayRequest":{ + "type":"structure", + "required":[ + "InternetGatewayId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InternetGatewayId":{ + "shape":"String", + "locationName":"internetGatewayId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "DetachNetworkInterfaceRequest":{ + "type":"structure", + "required":["AttachmentId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AttachmentId":{ + "shape":"String", + "locationName":"attachmentId" + }, + "Force":{ + "shape":"Boolean", + "locationName":"force" + } + } + }, + "DetachVolumeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"}, + "InstanceId":{"shape":"String"}, + "Device":{"shape":"String"}, + "Force":{"shape":"Boolean"} + } + }, + "DetachVpnGatewayRequest":{ + "type":"structure", + "required":[ + "VpnGatewayId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnGatewayId":{"shape":"String"}, + "VpcId":{"shape":"String"} + } + }, + "DeviceType":{ + "type":"string", + "enum":[ + "ebs", + "instance-store" + ] + }, + "DhcpConfiguration":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"String", + "locationName":"key" + }, + "Values":{ + "shape":"DhcpConfigurationValueList", + "locationName":"valueSet" + } + } + }, + "DhcpConfigurationList":{ + "type":"list", + "member":{ + "shape":"DhcpConfiguration", + "locationName":"item" + } + }, + "DhcpOptions":{ + "type":"structure", + "members":{ + "DhcpOptionsId":{ + "shape":"String", + "locationName":"dhcpOptionsId" + }, + "DhcpConfigurations":{ + "shape":"DhcpConfigurationList", + "locationName":"dhcpConfigurationSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "DhcpOptionsIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"DhcpOptionsId" + } + }, + "DhcpOptionsList":{ + "type":"list", + "member":{ + "shape":"DhcpOptions", + "locationName":"item" + } + }, + "DisableVgwRoutePropagationRequest":{ + "type":"structure", + "required":[ + "RouteTableId", + "GatewayId" + ], + "members":{ + "RouteTableId":{"shape":"String"}, + "GatewayId":{"shape":"String"} + } + }, + "DisableVpcClassicLinkRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "DisableVpcClassicLinkResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "DisassociateAddressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIp":{"shape":"String"}, + "AssociationId":{"shape":"String"} + } + }, + "DisassociateRouteTableRequest":{ + "type":"structure", + "required":["AssociationId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + } + } + }, + "DiskImage":{ + "type":"structure", + "members":{ + "Image":{"shape":"DiskImageDetail"}, + "Description":{"shape":"String"}, + "Volume":{"shape":"VolumeDetail"} + } + }, + "DiskImageDescription":{ + "type":"structure", + "required":[ + "Format", + "Size", + "ImportManifestUrl" + ], + "members":{ + "Format":{ + "shape":"DiskImageFormat", + "locationName":"format" + }, + "Size":{ + "shape":"Long", + "locationName":"size" + }, + "ImportManifestUrl":{ + "shape":"String", + "locationName":"importManifestUrl" + }, + "Checksum":{ + "shape":"String", + "locationName":"checksum" + } + } + }, + "DiskImageDetail":{ + "type":"structure", + "required":[ + "Format", + "Bytes", + "ImportManifestUrl" + ], + "members":{ + "Format":{ + "shape":"DiskImageFormat", + "locationName":"format" + }, + "Bytes":{ + "shape":"Long", + "locationName":"bytes" + }, + "ImportManifestUrl":{ + "shape":"String", + "locationName":"importManifestUrl" + } + } + }, + "DiskImageFormat":{ + "type":"string", + "enum":[ + "VMDK", + "RAW", + "VHD" + ] + }, + "DiskImageList":{ + "type":"list", + "member":{"shape":"DiskImage"} + }, + "DiskImageVolumeDescription":{ + "type":"structure", + "required":["Id"], + "members":{ + "Size":{ + "shape":"Long", + "locationName":"size" + }, + "Id":{ + "shape":"String", + "locationName":"id" + } + } + }, + "DomainType":{ + "type":"string", + "enum":[ + "vpc", + "standard" + ] + }, + "Double":{"type":"double"}, + "EbsBlockDevice":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "VolumeSize":{ + "shape":"Integer", + "locationName":"volumeSize" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + }, + "VolumeType":{ + "shape":"VolumeType", + "locationName":"volumeType" + }, + "Iops":{ + "shape":"Integer", + "locationName":"iops" + }, + "Encrypted":{ + "shape":"Boolean", + "locationName":"encrypted" + } + } + }, + "EbsInstanceBlockDevice":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "Status":{ + "shape":"AttachmentStatus", + "locationName":"status" + }, + "AttachTime":{ + "shape":"DateTime", + "locationName":"attachTime" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "EbsInstanceBlockDeviceSpecification":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "EnableVgwRoutePropagationRequest":{ + "type":"structure", + "required":[ + "RouteTableId", + "GatewayId" + ], + "members":{ + "RouteTableId":{"shape":"String"}, + "GatewayId":{"shape":"String"} + } + }, + "EnableVolumeIORequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + } + } + }, + "EnableVpcClassicLinkRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "EnableVpcClassicLinkResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "EventCode":{ + "type":"string", + "enum":[ + "instance-reboot", + "system-reboot", + "system-maintenance", + "instance-retirement", + "instance-stop" + ] + }, + "EventInformation":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "EventSubType":{ + "shape":"String", + "locationName":"eventSubType" + }, + "EventDescription":{ + "shape":"String", + "locationName":"eventDescription" + } + } + }, + "EventType":{ + "type":"string", + "enum":[ + "instanceChange", + "fleetRequestChange", + "error" + ] + }, + "ExecutableByStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ExecutableBy" + } + }, + "ExportEnvironment":{ + "type":"string", + "enum":[ + "citrix", + "vmware", + "microsoft" + ] + }, + "ExportTask":{ + "type":"structure", + "members":{ + "ExportTaskId":{ + "shape":"String", + "locationName":"exportTaskId" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "State":{ + "shape":"ExportTaskState", + "locationName":"state" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "InstanceExportDetails":{ + "shape":"InstanceExportDetails", + "locationName":"instanceExport" + }, + "ExportToS3Task":{ + "shape":"ExportToS3Task", + "locationName":"exportToS3" + } + } + }, + "ExportTaskIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ExportTaskId" + } + }, + "ExportTaskList":{ + "type":"list", + "member":{ + "shape":"ExportTask", + "locationName":"item" + } + }, + "ExportTaskState":{ + "type":"string", + "enum":[ + "active", + "cancelling", + "cancelled", + "completed" + ] + }, + "ExportToS3Task":{ + "type":"structure", + "members":{ + "DiskImageFormat":{ + "shape":"DiskImageFormat", + "locationName":"diskImageFormat" + }, + "ContainerFormat":{ + "shape":"ContainerFormat", + "locationName":"containerFormat" + }, + "S3Bucket":{ + "shape":"String", + "locationName":"s3Bucket" + }, + "S3Key":{ + "shape":"String", + "locationName":"s3Key" + } + } + }, + "ExportToS3TaskSpecification":{ + "type":"structure", + "members":{ + "DiskImageFormat":{ + "shape":"DiskImageFormat", + "locationName":"diskImageFormat" + }, + "ContainerFormat":{ + "shape":"ContainerFormat", + "locationName":"containerFormat" + }, + "S3Bucket":{ + "shape":"String", + "locationName":"s3Bucket" + }, + "S3Prefix":{ + "shape":"String", + "locationName":"s3Prefix" + } + } + }, + "Filter":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Values":{ + "shape":"ValueStringList", + "locationName":"Value" + } + } + }, + "FilterList":{ + "type":"list", + "member":{ + "shape":"Filter", + "locationName":"Filter" + } + }, + "Float":{"type":"float"}, + "FlowLog":{ + "type":"structure", + "members":{ + "CreationTime":{ + "shape":"DateTime", + "locationName":"creationTime" + }, + "FlowLogId":{ + "shape":"String", + "locationName":"flowLogId" + }, + "FlowLogStatus":{ + "shape":"String", + "locationName":"flowLogStatus" + }, + "ResourceId":{ + "shape":"String", + "locationName":"resourceId" + }, + "TrafficType":{ + "shape":"TrafficType", + "locationName":"trafficType" + }, + "LogGroupName":{ + "shape":"String", + "locationName":"logGroupName" + }, + "DeliverLogsStatus":{ + "shape":"String", + "locationName":"deliverLogsStatus" + }, + "DeliverLogsErrorMessage":{ + "shape":"String", + "locationName":"deliverLogsErrorMessage" + }, + "DeliverLogsPermissionArn":{ + "shape":"String", + "locationName":"deliverLogsPermissionArn" + } + } + }, + "FlowLogSet":{ + "type":"list", + "member":{ + "shape":"FlowLog", + "locationName":"item" + } + }, + "FlowLogsResourceType":{ + "type":"string", + "enum":[ + "VPC", + "Subnet", + "NetworkInterface" + ] + }, + "GatewayType":{ + "type":"string", + "enum":["ipsec.1"] + }, + "GetConsoleOutputRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{"shape":"String"} + } + }, + "GetConsoleOutputResult":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Timestamp":{ + "shape":"DateTime", + "locationName":"timestamp" + }, + "Output":{ + "shape":"String", + "locationName":"output" + } + } + }, + "GetPasswordDataRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{"shape":"String"} + } + }, + "GetPasswordDataResult":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Timestamp":{ + "shape":"DateTime", + "locationName":"timestamp" + }, + "PasswordData":{ + "shape":"String", + "locationName":"passwordData" + } + } + }, + "GroupIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"groupId" + } + }, + "GroupIdentifier":{ + "type":"structure", + "members":{ + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "GroupId":{ + "shape":"String", + "locationName":"groupId" + } + } + }, + "GroupIdentifierList":{ + "type":"list", + "member":{ + "shape":"GroupIdentifier", + "locationName":"item" + } + }, + "GroupNameStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"GroupName" + } + }, + "HistoryRecord":{ + "type":"structure", + "required":[ + "Timestamp", + "EventType", + "EventInformation" + ], + "members":{ + "Timestamp":{ + "shape":"DateTime", + "locationName":"timestamp" + }, + "EventType":{ + "shape":"EventType", + "locationName":"eventType" + }, + "EventInformation":{ + "shape":"EventInformation", + "locationName":"eventInformation" + } + } + }, + "HistoryRecords":{ + "type":"list", + "member":{ + "shape":"HistoryRecord", + "locationName":"item" + } + }, + "HypervisorType":{ + "type":"string", + "enum":[ + "ovm", + "xen" + ] + }, + "IamInstanceProfile":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"String", + "locationName":"arn" + }, + "Id":{ + "shape":"String", + "locationName":"id" + } + } + }, + "IamInstanceProfileSpecification":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"String", + "locationName":"arn" + }, + "Name":{ + "shape":"String", + "locationName":"name" + } + } + }, + "IcmpTypeCode":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"Integer", + "locationName":"type" + }, + "Code":{ + "shape":"Integer", + "locationName":"code" + } + } + }, + "Image":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "ImageLocation":{ + "shape":"String", + "locationName":"imageLocation" + }, + "State":{ + "shape":"ImageState", + "locationName":"imageState" + }, + "OwnerId":{ + "shape":"String", + "locationName":"imageOwnerId" + }, + "CreationDate":{ + "shape":"String", + "locationName":"creationDate" + }, + "Public":{ + "shape":"Boolean", + "locationName":"isPublic" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + }, + "Architecture":{ + "shape":"ArchitectureValues", + "locationName":"architecture" + }, + "ImageType":{ + "shape":"ImageTypeValues", + "locationName":"imageType" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "Platform":{ + "shape":"PlatformValues", + "locationName":"platform" + }, + "SriovNetSupport":{ + "shape":"String", + "locationName":"sriovNetSupport" + }, + "StateReason":{ + "shape":"StateReason", + "locationName":"stateReason" + }, + "ImageOwnerAlias":{ + "shape":"String", + "locationName":"imageOwnerAlias" + }, + "Name":{ + "shape":"String", + "locationName":"name" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "RootDeviceType":{ + "shape":"DeviceType", + "locationName":"rootDeviceType" + }, + "RootDeviceName":{ + "shape":"String", + "locationName":"rootDeviceName" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "VirtualizationType":{ + "shape":"VirtualizationType", + "locationName":"virtualizationType" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "Hypervisor":{ + "shape":"HypervisorType", + "locationName":"hypervisor" + } + } + }, + "ImageAttribute":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "LaunchPermissions":{ + "shape":"LaunchPermissionList", + "locationName":"launchPermission" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + }, + "KernelId":{ + "shape":"AttributeValue", + "locationName":"kernel" + }, + "RamdiskId":{ + "shape":"AttributeValue", + "locationName":"ramdisk" + }, + "Description":{ + "shape":"AttributeValue", + "locationName":"description" + }, + "SriovNetSupport":{ + "shape":"AttributeValue", + "locationName":"sriovNetSupport" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingList", + "locationName":"blockDeviceMapping" + } + } + }, + "ImageAttributeName":{ + "type":"string", + "enum":[ + "description", + "kernel", + "ramdisk", + "launchPermission", + "productCodes", + "blockDeviceMapping", + "sriovNetSupport" + ] + }, + "ImageDiskContainer":{ + "type":"structure", + "members":{ + "Description":{"shape":"String"}, + "Format":{"shape":"String"}, + "Url":{"shape":"String"}, + "UserBucket":{"shape":"UserBucket"}, + "DeviceName":{"shape":"String"}, + "SnapshotId":{"shape":"String"} + } + }, + "ImageDiskContainerList":{ + "type":"list", + "member":{ + "shape":"ImageDiskContainer", + "locationName":"item" + } + }, + "ImageIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ImageId" + } + }, + "ImageList":{ + "type":"list", + "member":{ + "shape":"Image", + "locationName":"item" + } + }, + "ImageState":{ + "type":"string", + "enum":[ + "pending", + "available", + "invalid", + "deregistered", + "transient", + "failed", + "error" + ] + }, + "ImageTypeValues":{ + "type":"string", + "enum":[ + "machine", + "kernel", + "ramdisk" + ] + }, + "ImportImageRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "Description":{"shape":"String"}, + "DiskContainers":{ + "shape":"ImageDiskContainerList", + "locationName":"DiskContainer" + }, + "LicenseType":{"shape":"String"}, + "Hypervisor":{"shape":"String"}, + "Architecture":{"shape":"String"}, + "Platform":{"shape":"String"}, + "ClientData":{"shape":"ClientData"}, + "ClientToken":{"shape":"String"}, + "RoleName":{"shape":"String"} + } + }, + "ImportImageResult":{ + "type":"structure", + "members":{ + "ImportTaskId":{ + "shape":"String", + "locationName":"importTaskId" + }, + "Architecture":{ + "shape":"String", + "locationName":"architecture" + }, + "LicenseType":{ + "shape":"String", + "locationName":"licenseType" + }, + "Platform":{ + "shape":"String", + "locationName":"platform" + }, + "Hypervisor":{ + "shape":"String", + "locationName":"hypervisor" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "SnapshotDetails":{ + "shape":"SnapshotDetailList", + "locationName":"snapshotDetailSet" + }, + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Status":{ + "shape":"String", + "locationName":"status" + } + } + }, + "ImportImageTask":{ + "type":"structure", + "members":{ + "ImportTaskId":{ + "shape":"String", + "locationName":"importTaskId" + }, + "Architecture":{ + "shape":"String", + "locationName":"architecture" + }, + "LicenseType":{ + "shape":"String", + "locationName":"licenseType" + }, + "Platform":{ + "shape":"String", + "locationName":"platform" + }, + "Hypervisor":{ + "shape":"String", + "locationName":"hypervisor" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "SnapshotDetails":{ + "shape":"SnapshotDetailList", + "locationName":"snapshotDetailSet" + }, + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Status":{ + "shape":"String", + "locationName":"status" + } + } + }, + "ImportImageTaskList":{ + "type":"list", + "member":{ + "shape":"ImportImageTask", + "locationName":"item" + } + }, + "ImportInstanceLaunchSpecification":{ + "type":"structure", + "members":{ + "Architecture":{ + "shape":"ArchitectureValues", + "locationName":"architecture" + }, + "GroupNames":{ + "shape":"SecurityGroupStringList", + "locationName":"GroupName" + }, + "GroupIds":{ + "shape":"SecurityGroupIdStringList", + "locationName":"GroupId" + }, + "AdditionalInfo":{ + "shape":"String", + "locationName":"additionalInfo" + }, + "UserData":{ + "shape":"UserData", + "locationName":"userData" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "Placement":{ + "shape":"Placement", + "locationName":"placement" + }, + "Monitoring":{ + "shape":"Boolean", + "locationName":"monitoring" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "InstanceInitiatedShutdownBehavior":{ + "shape":"ShutdownBehavior", + "locationName":"instanceInitiatedShutdownBehavior" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + } + } + }, + "ImportInstanceRequest":{ + "type":"structure", + "required":["Platform"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "LaunchSpecification":{ + "shape":"ImportInstanceLaunchSpecification", + "locationName":"launchSpecification" + }, + "DiskImages":{ + "shape":"DiskImageList", + "locationName":"diskImage" + }, + "Platform":{ + "shape":"PlatformValues", + "locationName":"platform" + } + } + }, + "ImportInstanceResult":{ + "type":"structure", + "members":{ + "ConversionTask":{ + "shape":"ConversionTask", + "locationName":"conversionTask" + } + } + }, + "ImportInstanceTaskDetails":{ + "type":"structure", + "required":["Volumes"], + "members":{ + "Volumes":{ + "shape":"ImportInstanceVolumeDetailSet", + "locationName":"volumes" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Platform":{ + "shape":"PlatformValues", + "locationName":"platform" + }, + "Description":{ + "shape":"String", + "locationName":"description" + } + } + }, + "ImportInstanceVolumeDetailItem":{ + "type":"structure", + "required":[ + "BytesConverted", + "AvailabilityZone", + "Image", + "Volume", + "Status" + ], + "members":{ + "BytesConverted":{ + "shape":"Long", + "locationName":"bytesConverted" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Image":{ + "shape":"DiskImageDescription", + "locationName":"image" + }, + "Volume":{ + "shape":"DiskImageVolumeDescription", + "locationName":"volume" + }, + "Status":{ + "shape":"String", + "locationName":"status" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Description":{ + "shape":"String", + "locationName":"description" + } + } + }, + "ImportInstanceVolumeDetailSet":{ + "type":"list", + "member":{ + "shape":"ImportInstanceVolumeDetailItem", + "locationName":"item" + } + }, + "ImportKeyPairRequest":{ + "type":"structure", + "required":[ + "KeyName", + "PublicKeyMaterial" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "PublicKeyMaterial":{ + "shape":"Blob", + "locationName":"publicKeyMaterial" + } + } + }, + "ImportKeyPairResult":{ + "type":"structure", + "members":{ + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "KeyFingerprint":{ + "shape":"String", + "locationName":"keyFingerprint" + } + } + }, + "ImportSnapshotRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "Description":{"shape":"String"}, + "DiskContainer":{"shape":"SnapshotDiskContainer"}, + "ClientData":{"shape":"ClientData"}, + "ClientToken":{"shape":"String"}, + "RoleName":{"shape":"String"} + } + }, + "ImportSnapshotResult":{ + "type":"structure", + "members":{ + "ImportTaskId":{ + "shape":"String", + "locationName":"importTaskId" + }, + "SnapshotTaskDetail":{ + "shape":"SnapshotTaskDetail", + "locationName":"snapshotTaskDetail" + }, + "Description":{ + "shape":"String", + "locationName":"description" + } + } + }, + "ImportSnapshotTask":{ + "type":"structure", + "members":{ + "ImportTaskId":{ + "shape":"String", + "locationName":"importTaskId" + }, + "SnapshotTaskDetail":{ + "shape":"SnapshotTaskDetail", + "locationName":"snapshotTaskDetail" + }, + "Description":{ + "shape":"String", + "locationName":"description" + } + } + }, + "ImportSnapshotTaskList":{ + "type":"list", + "member":{ + "shape":"ImportSnapshotTask", + "locationName":"item" + } + }, + "ImportTaskIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ImportTaskId" + } + }, + "ImportVolumeRequest":{ + "type":"structure", + "required":[ + "AvailabilityZone", + "Image", + "Volume" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Image":{ + "shape":"DiskImageDetail", + "locationName":"image" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "Volume":{ + "shape":"VolumeDetail", + "locationName":"volume" + } + } + }, + "ImportVolumeResult":{ + "type":"structure", + "members":{ + "ConversionTask":{ + "shape":"ConversionTask", + "locationName":"conversionTask" + } + } + }, + "ImportVolumeTaskDetails":{ + "type":"structure", + "required":[ + "BytesConverted", + "AvailabilityZone", + "Image", + "Volume" + ], + "members":{ + "BytesConverted":{ + "shape":"Long", + "locationName":"bytesConverted" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "Image":{ + "shape":"DiskImageDescription", + "locationName":"image" + }, + "Volume":{ + "shape":"DiskImageVolumeDescription", + "locationName":"volume" + } + } + }, + "Instance":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "State":{ + "shape":"InstanceState", + "locationName":"instanceState" + }, + "PrivateDnsName":{ + "shape":"String", + "locationName":"privateDnsName" + }, + "PublicDnsName":{ + "shape":"String", + "locationName":"dnsName" + }, + "StateTransitionReason":{ + "shape":"String", + "locationName":"reason" + }, + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "AmiLaunchIndex":{ + "shape":"Integer", + "locationName":"amiLaunchIndex" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "LaunchTime":{ + "shape":"DateTime", + "locationName":"launchTime" + }, + "Placement":{ + "shape":"Placement", + "locationName":"placement" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "Platform":{ + "shape":"PlatformValues", + "locationName":"platform" + }, + "Monitoring":{ + "shape":"Monitoring", + "locationName":"monitoring" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "PublicIpAddress":{ + "shape":"String", + "locationName":"ipAddress" + }, + "StateReason":{ + "shape":"StateReason", + "locationName":"stateReason" + }, + "Architecture":{ + "shape":"ArchitectureValues", + "locationName":"architecture" + }, + "RootDeviceType":{ + "shape":"DeviceType", + "locationName":"rootDeviceType" + }, + "RootDeviceName":{ + "shape":"String", + "locationName":"rootDeviceName" + }, + "BlockDeviceMappings":{ + "shape":"InstanceBlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "VirtualizationType":{ + "shape":"VirtualizationType", + "locationName":"virtualizationType" + }, + "InstanceLifecycle":{ + "shape":"InstanceLifecycleType", + "locationName":"instanceLifecycle" + }, + "SpotInstanceRequestId":{ + "shape":"String", + "locationName":"spotInstanceRequestId" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "SecurityGroups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "SourceDestCheck":{ + "shape":"Boolean", + "locationName":"sourceDestCheck" + }, + "Hypervisor":{ + "shape":"HypervisorType", + "locationName":"hypervisor" + }, + "NetworkInterfaces":{ + "shape":"InstanceNetworkInterfaceList", + "locationName":"networkInterfaceSet" + }, + "IamInstanceProfile":{ + "shape":"IamInstanceProfile", + "locationName":"iamInstanceProfile" + }, + "EbsOptimized":{ + "shape":"Boolean", + "locationName":"ebsOptimized" + }, + "SriovNetSupport":{ + "shape":"String", + "locationName":"sriovNetSupport" + } + } + }, + "InstanceAttribute":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "InstanceType":{ + "shape":"AttributeValue", + "locationName":"instanceType" + }, + "KernelId":{ + "shape":"AttributeValue", + "locationName":"kernel" + }, + "RamdiskId":{ + "shape":"AttributeValue", + "locationName":"ramdisk" + }, + "UserData":{ + "shape":"AttributeValue", + "locationName":"userData" + }, + "DisableApiTermination":{ + "shape":"AttributeBooleanValue", + "locationName":"disableApiTermination" + }, + "InstanceInitiatedShutdownBehavior":{ + "shape":"AttributeValue", + "locationName":"instanceInitiatedShutdownBehavior" + }, + "RootDeviceName":{ + "shape":"AttributeValue", + "locationName":"rootDeviceName" + }, + "BlockDeviceMappings":{ + "shape":"InstanceBlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + }, + "EbsOptimized":{ + "shape":"AttributeBooleanValue", + "locationName":"ebsOptimized" + }, + "SriovNetSupport":{ + "shape":"AttributeValue", + "locationName":"sriovNetSupport" + }, + "SourceDestCheck":{ + "shape":"AttributeBooleanValue", + "locationName":"sourceDestCheck" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + } + } + }, + "InstanceAttributeName":{ + "type":"string", + "enum":[ + "instanceType", + "kernel", + "ramdisk", + "userData", + "disableApiTermination", + "instanceInitiatedShutdownBehavior", + "rootDeviceName", + "blockDeviceMapping", + "productCodes", + "sourceDestCheck", + "groupSet", + "ebsOptimized", + "sriovNetSupport" + ] + }, + "InstanceBlockDeviceMapping":{ + "type":"structure", + "members":{ + "DeviceName":{ + "shape":"String", + "locationName":"deviceName" + }, + "Ebs":{ + "shape":"EbsInstanceBlockDevice", + "locationName":"ebs" + } + } + }, + "InstanceBlockDeviceMappingList":{ + "type":"list", + "member":{ + "shape":"InstanceBlockDeviceMapping", + "locationName":"item" + } + }, + "InstanceBlockDeviceMappingSpecification":{ + "type":"structure", + "members":{ + "DeviceName":{ + "shape":"String", + "locationName":"deviceName" + }, + "Ebs":{ + "shape":"EbsInstanceBlockDeviceSpecification", + "locationName":"ebs" + }, + "VirtualName":{ + "shape":"String", + "locationName":"virtualName" + }, + "NoDevice":{ + "shape":"String", + "locationName":"noDevice" + } + } + }, + "InstanceBlockDeviceMappingSpecificationList":{ + "type":"list", + "member":{ + "shape":"InstanceBlockDeviceMappingSpecification", + "locationName":"item" + } + }, + "InstanceCount":{ + "type":"structure", + "members":{ + "State":{ + "shape":"ListingState", + "locationName":"state" + }, + "InstanceCount":{ + "shape":"Integer", + "locationName":"instanceCount" + } + } + }, + "InstanceCountList":{ + "type":"list", + "member":{ + "shape":"InstanceCount", + "locationName":"item" + } + }, + "InstanceExportDetails":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "TargetEnvironment":{ + "shape":"ExportEnvironment", + "locationName":"targetEnvironment" + } + } + }, + "InstanceIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"InstanceId" + } + }, + "InstanceLifecycleType":{ + "type":"string", + "enum":["spot"] + }, + "InstanceList":{ + "type":"list", + "member":{ + "shape":"Instance", + "locationName":"item" + } + }, + "InstanceMonitoring":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Monitoring":{ + "shape":"Monitoring", + "locationName":"monitoring" + } + } + }, + "InstanceMonitoringList":{ + "type":"list", + "member":{ + "shape":"InstanceMonitoring", + "locationName":"item" + } + }, + "InstanceNetworkInterface":{ + "type":"structure", + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "Status":{ + "shape":"NetworkInterfaceStatus", + "locationName":"status" + }, + "MacAddress":{ + "shape":"String", + "locationName":"macAddress" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "PrivateDnsName":{ + "shape":"String", + "locationName":"privateDnsName" + }, + "SourceDestCheck":{ + "shape":"Boolean", + "locationName":"sourceDestCheck" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "Attachment":{ + "shape":"InstanceNetworkInterfaceAttachment", + "locationName":"attachment" + }, + "Association":{ + "shape":"InstanceNetworkInterfaceAssociation", + "locationName":"association" + }, + "PrivateIpAddresses":{ + "shape":"InstancePrivateIpAddressList", + "locationName":"privateIpAddressesSet" + } + } + }, + "InstanceNetworkInterfaceAssociation":{ + "type":"structure", + "members":{ + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + }, + "PublicDnsName":{ + "shape":"String", + "locationName":"publicDnsName" + }, + "IpOwnerId":{ + "shape":"String", + "locationName":"ipOwnerId" + } + } + }, + "InstanceNetworkInterfaceAttachment":{ + "type":"structure", + "members":{ + "AttachmentId":{ + "shape":"String", + "locationName":"attachmentId" + }, + "DeviceIndex":{ + "shape":"Integer", + "locationName":"deviceIndex" + }, + "Status":{ + "shape":"AttachmentStatus", + "locationName":"status" + }, + "AttachTime":{ + "shape":"DateTime", + "locationName":"attachTime" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "InstanceNetworkInterfaceList":{ + "type":"list", + "member":{ + "shape":"InstanceNetworkInterface", + "locationName":"item" + } + }, + "InstanceNetworkInterfaceSpecification":{ + "type":"structure", + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "DeviceIndex":{ + "shape":"Integer", + "locationName":"deviceIndex" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "Groups":{ + "shape":"SecurityGroupIdStringList", + "locationName":"SecurityGroupId" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + }, + "PrivateIpAddresses":{ + "shape":"PrivateIpAddressSpecificationList", + "locationName":"privateIpAddressesSet", + "queryName":"PrivateIpAddresses" + }, + "SecondaryPrivateIpAddressCount":{ + "shape":"Integer", + "locationName":"secondaryPrivateIpAddressCount" + }, + "AssociatePublicIpAddress":{ + "shape":"Boolean", + "locationName":"associatePublicIpAddress" + } + } + }, + "InstanceNetworkInterfaceSpecificationList":{ + "type":"list", + "member":{ + "shape":"InstanceNetworkInterfaceSpecification", + "locationName":"item" + } + }, + "InstancePrivateIpAddress":{ + "type":"structure", + "members":{ + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "PrivateDnsName":{ + "shape":"String", + "locationName":"privateDnsName" + }, + "Primary":{ + "shape":"Boolean", + "locationName":"primary" + }, + "Association":{ + "shape":"InstanceNetworkInterfaceAssociation", + "locationName":"association" + } + } + }, + "InstancePrivateIpAddressList":{ + "type":"list", + "member":{ + "shape":"InstancePrivateIpAddress", + "locationName":"item" + } + }, + "InstanceState":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"Integer", + "locationName":"code" + }, + "Name":{ + "shape":"InstanceStateName", + "locationName":"name" + } + } + }, + "InstanceStateChange":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "CurrentState":{ + "shape":"InstanceState", + "locationName":"currentState" + }, + "PreviousState":{ + "shape":"InstanceState", + "locationName":"previousState" + } + } + }, + "InstanceStateChangeList":{ + "type":"list", + "member":{ + "shape":"InstanceStateChange", + "locationName":"item" + } + }, + "InstanceStateName":{ + "type":"string", + "enum":[ + "pending", + "running", + "shutting-down", + "terminated", + "stopping", + "stopped" + ] + }, + "InstanceStatus":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Events":{ + "shape":"InstanceStatusEventList", + "locationName":"eventsSet" + }, + "InstanceState":{ + "shape":"InstanceState", + "locationName":"instanceState" + }, + "SystemStatus":{ + "shape":"InstanceStatusSummary", + "locationName":"systemStatus" + }, + "InstanceStatus":{ + "shape":"InstanceStatusSummary", + "locationName":"instanceStatus" + } + } + }, + "InstanceStatusDetails":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"StatusName", + "locationName":"name" + }, + "Status":{ + "shape":"StatusType", + "locationName":"status" + }, + "ImpairedSince":{ + "shape":"DateTime", + "locationName":"impairedSince" + } + } + }, + "InstanceStatusDetailsList":{ + "type":"list", + "member":{ + "shape":"InstanceStatusDetails", + "locationName":"item" + } + }, + "InstanceStatusEvent":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"EventCode", + "locationName":"code" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "NotBefore":{ + "shape":"DateTime", + "locationName":"notBefore" + }, + "NotAfter":{ + "shape":"DateTime", + "locationName":"notAfter" + } + } + }, + "InstanceStatusEventList":{ + "type":"list", + "member":{ + "shape":"InstanceStatusEvent", + "locationName":"item" + } + }, + "InstanceStatusList":{ + "type":"list", + "member":{ + "shape":"InstanceStatus", + "locationName":"item" + } + }, + "InstanceStatusSummary":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"SummaryStatus", + "locationName":"status" + }, + "Details":{ + "shape":"InstanceStatusDetailsList", + "locationName":"details" + } + } + }, + "InstanceType":{ + "type":"string", + "enum":[ + "t1.micro", + "m1.small", + "m1.medium", + "m1.large", + "m1.xlarge", + "m3.medium", + "m3.large", + "m3.xlarge", + "m3.2xlarge", + "m4.large", + "m4.xlarge", + "m4.2xlarge", + "m4.4xlarge", + "m4.10xlarge", + "t2.micro", + "t2.small", + "t2.medium", + "t2.large", + "m2.xlarge", + "m2.2xlarge", + "m2.4xlarge", + "cr1.8xlarge", + "i2.xlarge", + "i2.2xlarge", + "i2.4xlarge", + "i2.8xlarge", + "hi1.4xlarge", + "hs1.8xlarge", + "c1.medium", + "c1.xlarge", + "c3.large", + "c3.xlarge", + "c3.2xlarge", + "c3.4xlarge", + "c3.8xlarge", + "c4.large", + "c4.xlarge", + "c4.2xlarge", + "c4.4xlarge", + "c4.8xlarge", + "cc1.4xlarge", + "cc2.8xlarge", + "g2.2xlarge", + "cg1.4xlarge", + "r3.large", + "r3.xlarge", + "r3.2xlarge", + "r3.4xlarge", + "r3.8xlarge", + "d2.xlarge", + "d2.2xlarge", + "d2.4xlarge", + "d2.8xlarge" + ] + }, + "InstanceTypeList":{ + "type":"list", + "member":{"shape":"InstanceType"} + }, + "Integer":{"type":"integer"}, + "InternetGateway":{ + "type":"structure", + "members":{ + "InternetGatewayId":{ + "shape":"String", + "locationName":"internetGatewayId" + }, + "Attachments":{ + "shape":"InternetGatewayAttachmentList", + "locationName":"attachmentSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "InternetGatewayAttachment":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "State":{ + "shape":"AttachmentStatus", + "locationName":"state" + } + } + }, + "InternetGatewayAttachmentList":{ + "type":"list", + "member":{ + "shape":"InternetGatewayAttachment", + "locationName":"item" + } + }, + "InternetGatewayList":{ + "type":"list", + "member":{ + "shape":"InternetGateway", + "locationName":"item" + } + }, + "IpPermission":{ + "type":"structure", + "members":{ + "IpProtocol":{ + "shape":"String", + "locationName":"ipProtocol" + }, + "FromPort":{ + "shape":"Integer", + "locationName":"fromPort" + }, + "ToPort":{ + "shape":"Integer", + "locationName":"toPort" + }, + "UserIdGroupPairs":{ + "shape":"UserIdGroupPairList", + "locationName":"groups" + }, + "IpRanges":{ + "shape":"IpRangeList", + "locationName":"ipRanges" + }, + "PrefixListIds":{ + "shape":"PrefixListIdList", + "locationName":"prefixListIds" + } + } + }, + "IpPermissionList":{ + "type":"list", + "member":{ + "shape":"IpPermission", + "locationName":"item" + } + }, + "IpRange":{ + "type":"structure", + "members":{ + "CidrIp":{ + "shape":"String", + "locationName":"cidrIp" + } + } + }, + "IpRangeList":{ + "type":"list", + "member":{ + "shape":"IpRange", + "locationName":"item" + } + }, + "KeyNameStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"KeyName" + } + }, + "KeyPair":{ + "type":"structure", + "members":{ + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "KeyFingerprint":{ + "shape":"String", + "locationName":"keyFingerprint" + }, + "KeyMaterial":{ + "shape":"String", + "locationName":"keyMaterial" + } + } + }, + "KeyPairInfo":{ + "type":"structure", + "members":{ + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "KeyFingerprint":{ + "shape":"String", + "locationName":"keyFingerprint" + } + } + }, + "KeyPairList":{ + "type":"list", + "member":{ + "shape":"KeyPairInfo", + "locationName":"item" + } + }, + "LaunchPermission":{ + "type":"structure", + "members":{ + "UserId":{ + "shape":"String", + "locationName":"userId" + }, + "Group":{ + "shape":"PermissionGroup", + "locationName":"group" + } + } + }, + "LaunchPermissionList":{ + "type":"list", + "member":{ + "shape":"LaunchPermission", + "locationName":"item" + } + }, + "LaunchPermissionModifications":{ + "type":"structure", + "members":{ + "Add":{"shape":"LaunchPermissionList"}, + "Remove":{"shape":"LaunchPermissionList"} + } + }, + "LaunchSpecification":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "SecurityGroups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "UserData":{ + "shape":"String", + "locationName":"userData" + }, + "AddressingType":{ + "shape":"String", + "locationName":"addressingType" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "Placement":{ + "shape":"SpotPlacement", + "locationName":"placement" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "NetworkInterfaces":{ + "shape":"InstanceNetworkInterfaceSpecificationList", + "locationName":"networkInterfaceSet" + }, + "IamInstanceProfile":{ + "shape":"IamInstanceProfileSpecification", + "locationName":"iamInstanceProfile" + }, + "EbsOptimized":{ + "shape":"Boolean", + "locationName":"ebsOptimized" + }, + "Monitoring":{ + "shape":"RunInstancesMonitoringEnabled", + "locationName":"monitoring" + } + } + }, + "LaunchSpecsList":{ + "type":"list", + "member":{ + "shape":"SpotFleetLaunchSpecification", + "locationName":"item" + }, + "min":1 + }, + "ListingState":{ + "type":"string", + "enum":[ + "available", + "sold", + "cancelled", + "pending" + ] + }, + "ListingStatus":{ + "type":"string", + "enum":[ + "active", + "pending", + "cancelled", + "closed" + ] + }, + "Long":{"type":"long"}, + "ModifyImageAttributeRequest":{ + "type":"structure", + "required":["ImageId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageId":{"shape":"String"}, + "Attribute":{"shape":"String"}, + "OperationType":{"shape":"OperationType"}, + "UserIds":{ + "shape":"UserIdStringList", + "locationName":"UserId" + }, + "UserGroups":{ + "shape":"UserGroupStringList", + "locationName":"UserGroup" + }, + "ProductCodes":{ + "shape":"ProductCodeStringList", + "locationName":"ProductCode" + }, + "Value":{"shape":"String"}, + "LaunchPermission":{"shape":"LaunchPermissionModifications"}, + "Description":{"shape":"AttributeValue"} + } + }, + "ModifyInstanceAttributeRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Attribute":{ + "shape":"InstanceAttributeName", + "locationName":"attribute" + }, + "Value":{ + "shape":"String", + "locationName":"value" + }, + "BlockDeviceMappings":{ + "shape":"InstanceBlockDeviceMappingSpecificationList", + "locationName":"blockDeviceMapping" + }, + "SourceDestCheck":{"shape":"AttributeBooleanValue"}, + "DisableApiTermination":{ + "shape":"AttributeBooleanValue", + "locationName":"disableApiTermination" + }, + "InstanceType":{ + "shape":"AttributeValue", + "locationName":"instanceType" + }, + "Kernel":{ + "shape":"AttributeValue", + "locationName":"kernel" + }, + "Ramdisk":{ + "shape":"AttributeValue", + "locationName":"ramdisk" + }, + "UserData":{ + "shape":"BlobAttributeValue", + "locationName":"userData" + }, + "InstanceInitiatedShutdownBehavior":{ + "shape":"AttributeValue", + "locationName":"instanceInitiatedShutdownBehavior" + }, + "Groups":{ + "shape":"GroupIdStringList", + "locationName":"GroupId" + }, + "EbsOptimized":{ + "shape":"AttributeBooleanValue", + "locationName":"ebsOptimized" + }, + "SriovNetSupport":{ + "shape":"AttributeValue", + "locationName":"sriovNetSupport" + } + } + }, + "ModifyNetworkInterfaceAttributeRequest":{ + "type":"structure", + "required":["NetworkInterfaceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "Description":{ + "shape":"AttributeValue", + "locationName":"description" + }, + "SourceDestCheck":{ + "shape":"AttributeBooleanValue", + "locationName":"sourceDestCheck" + }, + "Groups":{ + "shape":"SecurityGroupIdStringList", + "locationName":"SecurityGroupId" + }, + "Attachment":{ + "shape":"NetworkInterfaceAttachmentChanges", + "locationName":"attachment" + } + } + }, + "ModifyReservedInstancesRequest":{ + "type":"structure", + "required":[ + "ReservedInstancesIds", + "TargetConfigurations" + ], + "members":{ + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "ReservedInstancesIds":{ + "shape":"ReservedInstancesIdStringList", + "locationName":"ReservedInstancesId" + }, + "TargetConfigurations":{ + "shape":"ReservedInstancesConfigurationList", + "locationName":"ReservedInstancesConfigurationSetItemType" + } + } + }, + "ModifyReservedInstancesResult":{ + "type":"structure", + "members":{ + "ReservedInstancesModificationId":{ + "shape":"String", + "locationName":"reservedInstancesModificationId" + } + } + }, + "ModifySnapshotAttributeRequest":{ + "type":"structure", + "required":["SnapshotId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SnapshotId":{"shape":"String"}, + "Attribute":{"shape":"SnapshotAttributeName"}, + "OperationType":{"shape":"OperationType"}, + "UserIds":{ + "shape":"UserIdStringList", + "locationName":"UserId" + }, + "GroupNames":{ + "shape":"GroupNameStringList", + "locationName":"UserGroup" + }, + "CreateVolumePermission":{"shape":"CreateVolumePermissionModifications"} + } + }, + "ModifySubnetAttributeRequest":{ + "type":"structure", + "required":["SubnetId"], + "members":{ + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "MapPublicIpOnLaunch":{"shape":"AttributeBooleanValue"} + } + }, + "ModifyVolumeAttributeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"}, + "AutoEnableIO":{"shape":"AttributeBooleanValue"} + } + }, + "ModifyVpcAttributeRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "EnableDnsSupport":{"shape":"AttributeBooleanValue"}, + "EnableDnsHostnames":{"shape":"AttributeBooleanValue"} + } + }, + "ModifyVpcEndpointRequest":{ + "type":"structure", + "required":["VpcEndpointId"], + "members":{ + "DryRun":{"shape":"Boolean"}, + "VpcEndpointId":{"shape":"String"}, + "ResetPolicy":{"shape":"Boolean"}, + "PolicyDocument":{"shape":"String"}, + "AddRouteTableIds":{ + "shape":"ValueStringList", + "locationName":"AddRouteTableId" + }, + "RemoveRouteTableIds":{ + "shape":"ValueStringList", + "locationName":"RemoveRouteTableId" + } + } + }, + "ModifyVpcEndpointResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "MonitorInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + } + } + }, + "MonitorInstancesResult":{ + "type":"structure", + "members":{ + "InstanceMonitorings":{ + "shape":"InstanceMonitoringList", + "locationName":"instancesSet" + } + } + }, + "Monitoring":{ + "type":"structure", + "members":{ + "State":{ + "shape":"MonitoringState", + "locationName":"state" + } + } + }, + "MonitoringState":{ + "type":"string", + "enum":[ + "disabled", + "disabling", + "enabled", + "pending" + ] + }, + "MoveAddressToVpcRequest":{ + "type":"structure", + "required":["PublicIp"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + } + } + }, + "MoveAddressToVpcResult":{ + "type":"structure", + "members":{ + "AllocationId":{ + "shape":"String", + "locationName":"allocationId" + }, + "Status":{ + "shape":"Status", + "locationName":"status" + } + } + }, + "MoveStatus":{ + "type":"string", + "enum":[ + "movingToVpc", + "restoringToClassic" + ] + }, + "MovingAddressStatus":{ + "type":"structure", + "members":{ + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + }, + "MoveStatus":{ + "shape":"MoveStatus", + "locationName":"moveStatus" + } + } + }, + "MovingAddressStatusSet":{ + "type":"list", + "member":{ + "shape":"MovingAddressStatus", + "locationName":"item" + } + }, + "NetworkAcl":{ + "type":"structure", + "members":{ + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "IsDefault":{ + "shape":"Boolean", + "locationName":"default" + }, + "Entries":{ + "shape":"NetworkAclEntryList", + "locationName":"entrySet" + }, + "Associations":{ + "shape":"NetworkAclAssociationList", + "locationName":"associationSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "NetworkAclAssociation":{ + "type":"structure", + "members":{ + "NetworkAclAssociationId":{ + "shape":"String", + "locationName":"networkAclAssociationId" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + } + } + }, + "NetworkAclAssociationList":{ + "type":"list", + "member":{ + "shape":"NetworkAclAssociation", + "locationName":"item" + } + }, + "NetworkAclEntry":{ + "type":"structure", + "members":{ + "RuleNumber":{ + "shape":"Integer", + "locationName":"ruleNumber" + }, + "Protocol":{ + "shape":"String", + "locationName":"protocol" + }, + "RuleAction":{ + "shape":"RuleAction", + "locationName":"ruleAction" + }, + "Egress":{ + "shape":"Boolean", + "locationName":"egress" + }, + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "IcmpTypeCode":{ + "shape":"IcmpTypeCode", + "locationName":"icmpTypeCode" + }, + "PortRange":{ + "shape":"PortRange", + "locationName":"portRange" + } + } + }, + "NetworkAclEntryList":{ + "type":"list", + "member":{ + "shape":"NetworkAclEntry", + "locationName":"item" + } + }, + "NetworkAclList":{ + "type":"list", + "member":{ + "shape":"NetworkAcl", + "locationName":"item" + } + }, + "NetworkInterface":{ + "type":"structure", + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "RequesterId":{ + "shape":"String", + "locationName":"requesterId" + }, + "RequesterManaged":{ + "shape":"Boolean", + "locationName":"requesterManaged" + }, + "Status":{ + "shape":"NetworkInterfaceStatus", + "locationName":"status" + }, + "MacAddress":{ + "shape":"String", + "locationName":"macAddress" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "PrivateDnsName":{ + "shape":"String", + "locationName":"privateDnsName" + }, + "SourceDestCheck":{ + "shape":"Boolean", + "locationName":"sourceDestCheck" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "Attachment":{ + "shape":"NetworkInterfaceAttachment", + "locationName":"attachment" + }, + "Association":{ + "shape":"NetworkInterfaceAssociation", + "locationName":"association" + }, + "TagSet":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "PrivateIpAddresses":{ + "shape":"NetworkInterfacePrivateIpAddressList", + "locationName":"privateIpAddressesSet" + } + } + }, + "NetworkInterfaceAssociation":{ + "type":"structure", + "members":{ + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + }, + "PublicDnsName":{ + "shape":"String", + "locationName":"publicDnsName" + }, + "IpOwnerId":{ + "shape":"String", + "locationName":"ipOwnerId" + }, + "AllocationId":{ + "shape":"String", + "locationName":"allocationId" + }, + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + } + } + }, + "NetworkInterfaceAttachment":{ + "type":"structure", + "members":{ + "AttachmentId":{ + "shape":"String", + "locationName":"attachmentId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "InstanceOwnerId":{ + "shape":"String", + "locationName":"instanceOwnerId" + }, + "DeviceIndex":{ + "shape":"Integer", + "locationName":"deviceIndex" + }, + "Status":{ + "shape":"AttachmentStatus", + "locationName":"status" + }, + "AttachTime":{ + "shape":"DateTime", + "locationName":"attachTime" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "NetworkInterfaceAttachmentChanges":{ + "type":"structure", + "members":{ + "AttachmentId":{ + "shape":"String", + "locationName":"attachmentId" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "NetworkInterfaceAttribute":{ + "type":"string", + "enum":[ + "description", + "groupSet", + "sourceDestCheck", + "attachment" + ] + }, + "NetworkInterfaceIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "NetworkInterfaceList":{ + "type":"list", + "member":{ + "shape":"NetworkInterface", + "locationName":"item" + } + }, + "NetworkInterfacePrivateIpAddress":{ + "type":"structure", + "members":{ + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "PrivateDnsName":{ + "shape":"String", + "locationName":"privateDnsName" + }, + "Primary":{ + "shape":"Boolean", + "locationName":"primary" + }, + "Association":{ + "shape":"NetworkInterfaceAssociation", + "locationName":"association" + } + } + }, + "NetworkInterfacePrivateIpAddressList":{ + "type":"list", + "member":{ + "shape":"NetworkInterfacePrivateIpAddress", + "locationName":"item" + } + }, + "NetworkInterfaceStatus":{ + "type":"string", + "enum":[ + "available", + "attaching", + "in-use", + "detaching" + ] + }, + "OfferingTypeValues":{ + "type":"string", + "enum":[ + "Heavy Utilization", + "Medium Utilization", + "Light Utilization", + "No Upfront", + "Partial Upfront", + "All Upfront" + ] + }, + "OperationType":{ + "type":"string", + "enum":[ + "add", + "remove" + ] + }, + "OwnerStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"Owner" + } + }, + "PermissionGroup":{ + "type":"string", + "enum":["all"] + }, + "Placement":{ + "type":"structure", + "members":{ + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "Tenancy":{ + "shape":"Tenancy", + "locationName":"tenancy" + } + } + }, + "PlacementGroup":{ + "type":"structure", + "members":{ + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "Strategy":{ + "shape":"PlacementStrategy", + "locationName":"strategy" + }, + "State":{ + "shape":"PlacementGroupState", + "locationName":"state" + } + } + }, + "PlacementGroupList":{ + "type":"list", + "member":{ + "shape":"PlacementGroup", + "locationName":"item" + } + }, + "PlacementGroupState":{ + "type":"string", + "enum":[ + "pending", + "available", + "deleting", + "deleted" + ] + }, + "PlacementGroupStringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "PlacementStrategy":{ + "type":"string", + "enum":["cluster"] + }, + "PlatformValues":{ + "type":"string", + "enum":["Windows"] + }, + "PortRange":{ + "type":"structure", + "members":{ + "From":{ + "shape":"Integer", + "locationName":"from" + }, + "To":{ + "shape":"Integer", + "locationName":"to" + } + } + }, + "PrefixList":{ + "type":"structure", + "members":{ + "PrefixListId":{ + "shape":"String", + "locationName":"prefixListId" + }, + "PrefixListName":{ + "shape":"String", + "locationName":"prefixListName" + }, + "Cidrs":{ + "shape":"ValueStringList", + "locationName":"cidrSet" + } + } + }, + "PrefixListId":{ + "type":"structure", + "members":{ + "PrefixListId":{ + "shape":"String", + "locationName":"prefixListId" + } + } + }, + "PrefixListIdList":{ + "type":"list", + "member":{ + "shape":"PrefixListId", + "locationName":"item" + } + }, + "PrefixListSet":{ + "type":"list", + "member":{ + "shape":"PrefixList", + "locationName":"item" + } + }, + "PriceSchedule":{ + "type":"structure", + "members":{ + "Term":{ + "shape":"Long", + "locationName":"term" + }, + "Price":{ + "shape":"Double", + "locationName":"price" + }, + "CurrencyCode":{ + "shape":"CurrencyCodeValues", + "locationName":"currencyCode" + }, + "Active":{ + "shape":"Boolean", + "locationName":"active" + } + } + }, + "PriceScheduleList":{ + "type":"list", + "member":{ + "shape":"PriceSchedule", + "locationName":"item" + } + }, + "PriceScheduleSpecification":{ + "type":"structure", + "members":{ + "Term":{ + "shape":"Long", + "locationName":"term" + }, + "Price":{ + "shape":"Double", + "locationName":"price" + }, + "CurrencyCode":{ + "shape":"CurrencyCodeValues", + "locationName":"currencyCode" + } + } + }, + "PriceScheduleSpecificationList":{ + "type":"list", + "member":{ + "shape":"PriceScheduleSpecification", + "locationName":"item" + } + }, + "PricingDetail":{ + "type":"structure", + "members":{ + "Price":{ + "shape":"Double", + "locationName":"price" + }, + "Count":{ + "shape":"Integer", + "locationName":"count" + } + } + }, + "PricingDetailsList":{ + "type":"list", + "member":{ + "shape":"PricingDetail", + "locationName":"item" + } + }, + "PrivateIpAddressSpecification":{ + "type":"structure", + "required":["PrivateIpAddress"], + "members":{ + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "Primary":{ + "shape":"Boolean", + "locationName":"primary" + } + } + }, + "PrivateIpAddressSpecificationList":{ + "type":"list", + "member":{ + "shape":"PrivateIpAddressSpecification", + "locationName":"item" + } + }, + "PrivateIpAddressStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"PrivateIpAddress" + } + }, + "ProductCode":{ + "type":"structure", + "members":{ + "ProductCodeId":{ + "shape":"String", + "locationName":"productCode" + }, + "ProductCodeType":{ + "shape":"ProductCodeValues", + "locationName":"type" + } + } + }, + "ProductCodeList":{ + "type":"list", + "member":{ + "shape":"ProductCode", + "locationName":"item" + } + }, + "ProductCodeStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ProductCode" + } + }, + "ProductCodeValues":{ + "type":"string", + "enum":[ + "devpay", + "marketplace" + ] + }, + "ProductDescriptionList":{ + "type":"list", + "member":{"shape":"String"} + }, + "PropagatingVgw":{ + "type":"structure", + "members":{ + "GatewayId":{ + "shape":"String", + "locationName":"gatewayId" + } + } + }, + "PropagatingVgwList":{ + "type":"list", + "member":{ + "shape":"PropagatingVgw", + "locationName":"item" + } + }, + "PublicIpStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"PublicIp" + } + }, + "PurchaseReservedInstancesOfferingRequest":{ + "type":"structure", + "required":[ + "ReservedInstancesOfferingId", + "InstanceCount" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ReservedInstancesOfferingId":{"shape":"String"}, + "InstanceCount":{"shape":"Integer"}, + "LimitPrice":{ + "shape":"ReservedInstanceLimitPrice", + "locationName":"limitPrice" + } + } + }, + "PurchaseReservedInstancesOfferingResult":{ + "type":"structure", + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + } + } + }, + "RIProductDescription":{ + "type":"string", + "enum":[ + "Linux/UNIX", + "Linux/UNIX (Amazon VPC)", + "Windows", + "Windows (Amazon VPC)" + ] + }, + "ReasonCodesList":{ + "type":"list", + "member":{ + "shape":"ReportInstanceReasonCodes", + "locationName":"item" + } + }, + "RebootInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + } + } + }, + "RecurringCharge":{ + "type":"structure", + "members":{ + "Frequency":{ + "shape":"RecurringChargeFrequency", + "locationName":"frequency" + }, + "Amount":{ + "shape":"Double", + "locationName":"amount" + } + } + }, + "RecurringChargeFrequency":{ + "type":"string", + "enum":["Hourly"] + }, + "RecurringChargesList":{ + "type":"list", + "member":{ + "shape":"RecurringCharge", + "locationName":"item" + } + }, + "Region":{ + "type":"structure", + "members":{ + "RegionName":{ + "shape":"String", + "locationName":"regionName" + }, + "Endpoint":{ + "shape":"String", + "locationName":"regionEndpoint" + } + } + }, + "RegionList":{ + "type":"list", + "member":{ + "shape":"Region", + "locationName":"item" + } + }, + "RegionNameStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"RegionName" + } + }, + "RegisterImageRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageLocation":{"shape":"String"}, + "Name":{ + "shape":"String", + "locationName":"name" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "Architecture":{ + "shape":"ArchitectureValues", + "locationName":"architecture" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "RootDeviceName":{ + "shape":"String", + "locationName":"rootDeviceName" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingRequestList", + "locationName":"BlockDeviceMapping" + }, + "VirtualizationType":{ + "shape":"String", + "locationName":"virtualizationType" + }, + "SriovNetSupport":{ + "shape":"String", + "locationName":"sriovNetSupport" + } + } + }, + "RegisterImageResult":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + } + } + }, + "RejectVpcPeeringConnectionRequest":{ + "type":"structure", + "required":["VpcPeeringConnectionId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + } + } + }, + "RejectVpcPeeringConnectionResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "ReleaseAddressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIp":{"shape":"String"}, + "AllocationId":{"shape":"String"} + } + }, + "ReplaceNetworkAclAssociationRequest":{ + "type":"structure", + "required":[ + "AssociationId", + "NetworkAclId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + } + } + }, + "ReplaceNetworkAclAssociationResult":{ + "type":"structure", + "members":{ + "NewAssociationId":{ + "shape":"String", + "locationName":"newAssociationId" + } + } + }, + "ReplaceNetworkAclEntryRequest":{ + "type":"structure", + "required":[ + "NetworkAclId", + "RuleNumber", + "Protocol", + "RuleAction", + "Egress", + "CidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + }, + "RuleNumber":{ + "shape":"Integer", + "locationName":"ruleNumber" + }, + "Protocol":{ + "shape":"String", + "locationName":"protocol" + }, + "RuleAction":{ + "shape":"RuleAction", + "locationName":"ruleAction" + }, + "Egress":{ + "shape":"Boolean", + "locationName":"egress" + }, + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "IcmpTypeCode":{ + "shape":"IcmpTypeCode", + "locationName":"Icmp" + }, + "PortRange":{ + "shape":"PortRange", + "locationName":"portRange" + } + } + }, + "ReplaceRouteRequest":{ + "type":"structure", + "required":[ + "RouteTableId", + "DestinationCidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + }, + "DestinationCidrBlock":{ + "shape":"String", + "locationName":"destinationCidrBlock" + }, + "GatewayId":{ + "shape":"String", + "locationName":"gatewayId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + } + } + }, + "ReplaceRouteTableAssociationRequest":{ + "type":"structure", + "required":[ + "AssociationId", + "RouteTableId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + } + } + }, + "ReplaceRouteTableAssociationResult":{ + "type":"structure", + "members":{ + "NewAssociationId":{ + "shape":"String", + "locationName":"newAssociationId" + } + } + }, + "ReportInstanceReasonCodes":{ + "type":"string", + "enum":[ + "instance-stuck-in-state", + "unresponsive", + "not-accepting-credentials", + "password-not-available", + "performance-network", + "performance-instance-store", + "performance-ebs-volume", + "performance-other", + "other" + ] + }, + "ReportInstanceStatusRequest":{ + "type":"structure", + "required":[ + "Instances", + "Status", + "ReasonCodes" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Instances":{ + "shape":"InstanceIdStringList", + "locationName":"instanceId" + }, + "Status":{ + "shape":"ReportStatusType", + "locationName":"status" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "EndTime":{ + "shape":"DateTime", + "locationName":"endTime" + }, + "ReasonCodes":{ + "shape":"ReasonCodesList", + "locationName":"reasonCode" + }, + "Description":{ + "shape":"String", + "locationName":"description" + } + } + }, + "ReportStatusType":{ + "type":"string", + "enum":[ + "ok", + "impaired" + ] + }, + "RequestSpotFleetRequest":{ + "type":"structure", + "required":["SpotFleetRequestConfig"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotFleetRequestConfig":{ + "shape":"SpotFleetRequestConfigData", + "locationName":"spotFleetRequestConfig" + } + } + }, + "RequestSpotFleetResponse":{ + "type":"structure", + "required":["SpotFleetRequestId"], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + } + } + }, + "RequestSpotInstancesRequest":{ + "type":"structure", + "required":["SpotPrice"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotPrice":{ + "shape":"String", + "locationName":"spotPrice" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "InstanceCount":{ + "shape":"Integer", + "locationName":"instanceCount" + }, + "Type":{ + "shape":"SpotInstanceType", + "locationName":"type" + }, + "ValidFrom":{ + "shape":"DateTime", + "locationName":"validFrom" + }, + "ValidUntil":{ + "shape":"DateTime", + "locationName":"validUntil" + }, + "LaunchGroup":{ + "shape":"String", + "locationName":"launchGroup" + }, + "AvailabilityZoneGroup":{ + "shape":"String", + "locationName":"availabilityZoneGroup" + }, + "LaunchSpecification":{"shape":"RequestSpotLaunchSpecification"} + } + }, + "RequestSpotInstancesResult":{ + "type":"structure", + "members":{ + "SpotInstanceRequests":{ + "shape":"SpotInstanceRequestList", + "locationName":"spotInstanceRequestSet" + } + } + }, + "Reservation":{ + "type":"structure", + "members":{ + "ReservationId":{ + "shape":"String", + "locationName":"reservationId" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "RequesterId":{ + "shape":"String", + "locationName":"requesterId" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "Instances":{ + "shape":"InstanceList", + "locationName":"instancesSet" + } + } + }, + "ReservationList":{ + "type":"list", + "member":{ + "shape":"Reservation", + "locationName":"item" + } + }, + "ReservedInstanceLimitPrice":{ + "type":"structure", + "members":{ + "Amount":{ + "shape":"Double", + "locationName":"amount" + }, + "CurrencyCode":{ + "shape":"CurrencyCodeValues", + "locationName":"currencyCode" + } + } + }, + "ReservedInstanceState":{ + "type":"string", + "enum":[ + "payment-pending", + "active", + "payment-failed", + "retired" + ] + }, + "ReservedInstances":{ + "type":"structure", + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Start":{ + "shape":"DateTime", + "locationName":"start" + }, + "End":{ + "shape":"DateTime", + "locationName":"end" + }, + "Duration":{ + "shape":"Long", + "locationName":"duration" + }, + "UsagePrice":{ + "shape":"Float", + "locationName":"usagePrice" + }, + "FixedPrice":{ + "shape":"Float", + "locationName":"fixedPrice" + }, + "InstanceCount":{ + "shape":"Integer", + "locationName":"instanceCount" + }, + "ProductDescription":{ + "shape":"RIProductDescription", + "locationName":"productDescription" + }, + "State":{ + "shape":"ReservedInstanceState", + "locationName":"state" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "InstanceTenancy":{ + "shape":"Tenancy", + "locationName":"instanceTenancy" + }, + "CurrencyCode":{ + "shape":"CurrencyCodeValues", + "locationName":"currencyCode" + }, + "OfferingType":{ + "shape":"OfferingTypeValues", + "locationName":"offeringType" + }, + "RecurringCharges":{ + "shape":"RecurringChargesList", + "locationName":"recurringCharges" + } + } + }, + "ReservedInstancesConfiguration":{ + "type":"structure", + "members":{ + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Platform":{ + "shape":"String", + "locationName":"platform" + }, + "InstanceCount":{ + "shape":"Integer", + "locationName":"instanceCount" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + } + } + }, + "ReservedInstancesConfigurationList":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesConfiguration", + "locationName":"item" + } + }, + "ReservedInstancesId":{ + "type":"structure", + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + } + } + }, + "ReservedInstancesIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ReservedInstancesId" + } + }, + "ReservedInstancesList":{ + "type":"list", + "member":{ + "shape":"ReservedInstances", + "locationName":"item" + } + }, + "ReservedInstancesListing":{ + "type":"structure", + "members":{ + "ReservedInstancesListingId":{ + "shape":"String", + "locationName":"reservedInstancesListingId" + }, + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + }, + "CreateDate":{ + "shape":"DateTime", + "locationName":"createDate" + }, + "UpdateDate":{ + "shape":"DateTime", + "locationName":"updateDate" + }, + "Status":{ + "shape":"ListingStatus", + "locationName":"status" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "InstanceCounts":{ + "shape":"InstanceCountList", + "locationName":"instanceCounts" + }, + "PriceSchedules":{ + "shape":"PriceScheduleList", + "locationName":"priceSchedules" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + } + } + }, + "ReservedInstancesListingList":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesListing", + "locationName":"item" + } + }, + "ReservedInstancesModification":{ + "type":"structure", + "members":{ + "ReservedInstancesModificationId":{ + "shape":"String", + "locationName":"reservedInstancesModificationId" + }, + "ReservedInstancesIds":{ + "shape":"ReservedIntancesIds", + "locationName":"reservedInstancesSet" + }, + "ModificationResults":{ + "shape":"ReservedInstancesModificationResultList", + "locationName":"modificationResultSet" + }, + "CreateDate":{ + "shape":"DateTime", + "locationName":"createDate" + }, + "UpdateDate":{ + "shape":"DateTime", + "locationName":"updateDate" + }, + "EffectiveDate":{ + "shape":"DateTime", + "locationName":"effectiveDate" + }, + "Status":{ + "shape":"String", + "locationName":"status" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + } + } + }, + "ReservedInstancesModificationIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ReservedInstancesModificationId" + } + }, + "ReservedInstancesModificationList":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesModification", + "locationName":"item" + } + }, + "ReservedInstancesModificationResult":{ + "type":"structure", + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + }, + "TargetConfiguration":{ + "shape":"ReservedInstancesConfiguration", + "locationName":"targetConfiguration" + } + } + }, + "ReservedInstancesModificationResultList":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesModificationResult", + "locationName":"item" + } + }, + "ReservedInstancesOffering":{ + "type":"structure", + "members":{ + "ReservedInstancesOfferingId":{ + "shape":"String", + "locationName":"reservedInstancesOfferingId" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Duration":{ + "shape":"Long", + "locationName":"duration" + }, + "UsagePrice":{ + "shape":"Float", + "locationName":"usagePrice" + }, + "FixedPrice":{ + "shape":"Float", + "locationName":"fixedPrice" + }, + "ProductDescription":{ + "shape":"RIProductDescription", + "locationName":"productDescription" + }, + "InstanceTenancy":{ + "shape":"Tenancy", + "locationName":"instanceTenancy" + }, + "CurrencyCode":{ + "shape":"CurrencyCodeValues", + "locationName":"currencyCode" + }, + "OfferingType":{ + "shape":"OfferingTypeValues", + "locationName":"offeringType" + }, + "RecurringCharges":{ + "shape":"RecurringChargesList", + "locationName":"recurringCharges" + }, + "Marketplace":{ + "shape":"Boolean", + "locationName":"marketplace" + }, + "PricingDetails":{ + "shape":"PricingDetailsList", + "locationName":"pricingDetailsSet" + } + } + }, + "ReservedInstancesOfferingIdStringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ReservedInstancesOfferingList":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesOffering", + "locationName":"item" + } + }, + "ReservedIntancesIds":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesId", + "locationName":"item" + } + }, + "ResetImageAttributeName":{ + "type":"string", + "enum":["launchPermission"] + }, + "ResetImageAttributeRequest":{ + "type":"structure", + "required":[ + "ImageId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageId":{"shape":"String"}, + "Attribute":{"shape":"ResetImageAttributeName"} + } + }, + "ResetInstanceAttributeRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Attribute":{ + "shape":"InstanceAttributeName", + "locationName":"attribute" + } + } + }, + "ResetNetworkInterfaceAttributeRequest":{ + "type":"structure", + "required":["NetworkInterfaceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "SourceDestCheck":{ + "shape":"String", + "locationName":"sourceDestCheck" + } + } + }, + "ResetSnapshotAttributeRequest":{ + "type":"structure", + "required":[ + "SnapshotId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SnapshotId":{"shape":"String"}, + "Attribute":{"shape":"SnapshotAttributeName"} + } + }, + "ResourceIdList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ResourceType":{ + "type":"string", + "enum":[ + "customer-gateway", + "dhcp-options", + "image", + "instance", + "internet-gateway", + "network-acl", + "network-interface", + "reserved-instances", + "route-table", + "snapshot", + "spot-instances-request", + "subnet", + "security-group", + "volume", + "vpc", + "vpn-connection", + "vpn-gateway" + ] + }, + "RestorableByStringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "RestoreAddressToClassicRequest":{ + "type":"structure", + "required":["PublicIp"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + } + } + }, + "RestoreAddressToClassicResult":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"Status", + "locationName":"status" + }, + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + } + } + }, + "RevokeSecurityGroupEgressRequest":{ + "type":"structure", + "required":["GroupId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupId":{ + "shape":"String", + "locationName":"groupId" + }, + "SourceSecurityGroupName":{ + "shape":"String", + "locationName":"sourceSecurityGroupName" + }, + "SourceSecurityGroupOwnerId":{ + "shape":"String", + "locationName":"sourceSecurityGroupOwnerId" + }, + "IpProtocol":{ + "shape":"String", + "locationName":"ipProtocol" + }, + "FromPort":{ + "shape":"Integer", + "locationName":"fromPort" + }, + "ToPort":{ + "shape":"Integer", + "locationName":"toPort" + }, + "CidrIp":{ + "shape":"String", + "locationName":"cidrIp" + }, + "IpPermissions":{ + "shape":"IpPermissionList", + "locationName":"ipPermissions" + } + } + }, + "RevokeSecurityGroupIngressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{"shape":"String"}, + "GroupId":{"shape":"String"}, + "SourceSecurityGroupName":{"shape":"String"}, + "SourceSecurityGroupOwnerId":{"shape":"String"}, + "IpProtocol":{"shape":"String"}, + "FromPort":{"shape":"Integer"}, + "ToPort":{"shape":"Integer"}, + "CidrIp":{"shape":"String"}, + "IpPermissions":{"shape":"IpPermissionList"} + } + }, + "Route":{ + "type":"structure", + "members":{ + "DestinationCidrBlock":{ + "shape":"String", + "locationName":"destinationCidrBlock" + }, + "DestinationPrefixListId":{ + "shape":"String", + "locationName":"destinationPrefixListId" + }, + "GatewayId":{ + "shape":"String", + "locationName":"gatewayId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "InstanceOwnerId":{ + "shape":"String", + "locationName":"instanceOwnerId" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + }, + "State":{ + "shape":"RouteState", + "locationName":"state" + }, + "Origin":{ + "shape":"RouteOrigin", + "locationName":"origin" + } + } + }, + "RouteList":{ + "type":"list", + "member":{ + "shape":"Route", + "locationName":"item" + } + }, + "RouteOrigin":{ + "type":"string", + "enum":[ + "CreateRouteTable", + "CreateRoute", + "EnableVgwRoutePropagation" + ] + }, + "RouteState":{ + "type":"string", + "enum":[ + "active", + "blackhole" + ] + }, + "RouteTable":{ + "type":"structure", + "members":{ + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "Routes":{ + "shape":"RouteList", + "locationName":"routeSet" + }, + "Associations":{ + "shape":"RouteTableAssociationList", + "locationName":"associationSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "PropagatingVgws":{ + "shape":"PropagatingVgwList", + "locationName":"propagatingVgwSet" + } + } + }, + "RouteTableAssociation":{ + "type":"structure", + "members":{ + "RouteTableAssociationId":{ + "shape":"String", + "locationName":"routeTableAssociationId" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "Main":{ + "shape":"Boolean", + "locationName":"main" + } + } + }, + "RouteTableAssociationList":{ + "type":"list", + "member":{ + "shape":"RouteTableAssociation", + "locationName":"item" + } + }, + "RouteTableList":{ + "type":"list", + "member":{ + "shape":"RouteTable", + "locationName":"item" + } + }, + "RuleAction":{ + "type":"string", + "enum":[ + "allow", + "deny" + ] + }, + "RunInstancesMonitoringEnabled":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "Enabled":{ + "shape":"Boolean", + "locationName":"enabled" + } + } + }, + "RunInstancesRequest":{ + "type":"structure", + "required":[ + "ImageId", + "MinCount", + "MaxCount" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageId":{"shape":"String"}, + "MinCount":{"shape":"Integer"}, + "MaxCount":{"shape":"Integer"}, + "KeyName":{"shape":"String"}, + "SecurityGroups":{ + "shape":"SecurityGroupStringList", + "locationName":"SecurityGroup" + }, + "SecurityGroupIds":{ + "shape":"SecurityGroupIdStringList", + "locationName":"SecurityGroupId" + }, + "UserData":{"shape":"String"}, + "InstanceType":{"shape":"InstanceType"}, + "Placement":{"shape":"Placement"}, + "KernelId":{"shape":"String"}, + "RamdiskId":{"shape":"String"}, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingRequestList", + "locationName":"BlockDeviceMapping" + }, + "Monitoring":{"shape":"RunInstancesMonitoringEnabled"}, + "SubnetId":{"shape":"String"}, + "DisableApiTermination":{ + "shape":"Boolean", + "locationName":"disableApiTermination" + }, + "InstanceInitiatedShutdownBehavior":{ + "shape":"ShutdownBehavior", + "locationName":"instanceInitiatedShutdownBehavior" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "AdditionalInfo":{ + "shape":"String", + "locationName":"additionalInfo" + }, + "NetworkInterfaces":{ + "shape":"InstanceNetworkInterfaceSpecificationList", + "locationName":"networkInterface" + }, + "IamInstanceProfile":{ + "shape":"IamInstanceProfileSpecification", + "locationName":"iamInstanceProfile" + }, + "EbsOptimized":{ + "shape":"Boolean", + "locationName":"ebsOptimized" + } + } + }, + "S3Storage":{ + "type":"structure", + "members":{ + "Bucket":{ + "shape":"String", + "locationName":"bucket" + }, + "Prefix":{ + "shape":"String", + "locationName":"prefix" + }, + "AWSAccessKeyId":{"shape":"String"}, + "UploadPolicy":{ + "shape":"Blob", + "locationName":"uploadPolicy" + }, + "UploadPolicySignature":{ + "shape":"String", + "locationName":"uploadPolicySignature" + } + } + }, + "SecurityGroup":{ + "type":"structure", + "members":{ + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "GroupId":{ + "shape":"String", + "locationName":"groupId" + }, + "Description":{ + "shape":"String", + "locationName":"groupDescription" + }, + "IpPermissions":{ + "shape":"IpPermissionList", + "locationName":"ipPermissions" + }, + "IpPermissionsEgress":{ + "shape":"IpPermissionList", + "locationName":"ipPermissionsEgress" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "SecurityGroupIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SecurityGroupId" + } + }, + "SecurityGroupList":{ + "type":"list", + "member":{ + "shape":"SecurityGroup", + "locationName":"item" + } + }, + "SecurityGroupStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SecurityGroup" + } + }, + "ShutdownBehavior":{ + "type":"string", + "enum":[ + "stop", + "terminate" + ] + }, + "Snapshot":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "State":{ + "shape":"SnapshotState", + "locationName":"status" + }, + "StateMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "VolumeSize":{ + "shape":"Integer", + "locationName":"volumeSize" + }, + "OwnerAlias":{ + "shape":"String", + "locationName":"ownerAlias" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "Encrypted":{ + "shape":"Boolean", + "locationName":"encrypted" + }, + "KmsKeyId":{ + "shape":"String", + "locationName":"kmsKeyId" + }, + "DataEncryptionKeyId":{ + "shape":"String", + "locationName":"dataEncryptionKeyId" + } + } + }, + "SnapshotAttributeName":{ + "type":"string", + "enum":[ + "productCodes", + "createVolumePermission" + ] + }, + "SnapshotDetail":{ + "type":"structure", + "members":{ + "DiskImageSize":{ + "shape":"Double", + "locationName":"diskImageSize" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "Format":{ + "shape":"String", + "locationName":"format" + }, + "Url":{ + "shape":"String", + "locationName":"url" + }, + "UserBucket":{ + "shape":"UserBucketDetails", + "locationName":"userBucket" + }, + "DeviceName":{ + "shape":"String", + "locationName":"deviceName" + }, + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Status":{ + "shape":"String", + "locationName":"status" + } + } + }, + "SnapshotDetailList":{ + "type":"list", + "member":{ + "shape":"SnapshotDetail", + "locationName":"item" + } + }, + "SnapshotDiskContainer":{ + "type":"structure", + "members":{ + "Description":{"shape":"String"}, + "Format":{"shape":"String"}, + "Url":{"shape":"String"}, + "UserBucket":{"shape":"UserBucket"} + } + }, + "SnapshotIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SnapshotId" + } + }, + "SnapshotList":{ + "type":"list", + "member":{ + "shape":"Snapshot", + "locationName":"item" + } + }, + "SnapshotState":{ + "type":"string", + "enum":[ + "pending", + "completed", + "error" + ] + }, + "SnapshotTaskDetail":{ + "type":"structure", + "members":{ + "DiskImageSize":{ + "shape":"Double", + "locationName":"diskImageSize" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "Format":{ + "shape":"String", + "locationName":"format" + }, + "Url":{ + "shape":"String", + "locationName":"url" + }, + "UserBucket":{ + "shape":"UserBucketDetails", + "locationName":"userBucket" + }, + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Status":{ + "shape":"String", + "locationName":"status" + } + } + }, + "SpotDatafeedSubscription":{ + "type":"structure", + "members":{ + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "Bucket":{ + "shape":"String", + "locationName":"bucket" + }, + "Prefix":{ + "shape":"String", + "locationName":"prefix" + }, + "State":{ + "shape":"DatafeedSubscriptionState", + "locationName":"state" + }, + "Fault":{ + "shape":"SpotInstanceStateFault", + "locationName":"fault" + } + } + }, + "SpotFleetLaunchSpecification":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "SecurityGroups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "UserData":{ + "shape":"String", + "locationName":"userData" + }, + "AddressingType":{ + "shape":"String", + "locationName":"addressingType" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "Placement":{ + "shape":"SpotPlacement", + "locationName":"placement" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "Monitoring":{ + "shape":"SpotFleetMonitoring", + "locationName":"monitoring" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "NetworkInterfaces":{ + "shape":"InstanceNetworkInterfaceSpecificationList", + "locationName":"networkInterfaceSet" + }, + "IamInstanceProfile":{ + "shape":"IamInstanceProfileSpecification", + "locationName":"iamInstanceProfile" + }, + "EbsOptimized":{ + "shape":"Boolean", + "locationName":"ebsOptimized" + }, + "WeightedCapacity":{ + "shape":"Double", + "locationName":"weightedCapacity" + }, + "SpotPrice":{ + "shape":"String", + "locationName":"spotPrice" + } + } + }, + "SpotFleetMonitoring":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Boolean", + "locationName":"enabled" + } + } + }, + "SpotFleetRequestConfig":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "SpotFleetRequestState", + "SpotFleetRequestConfig" + ], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "SpotFleetRequestState":{ + "shape":"BatchState", + "locationName":"spotFleetRequestState" + }, + "SpotFleetRequestConfig":{ + "shape":"SpotFleetRequestConfigData", + "locationName":"spotFleetRequestConfig" + } + } + }, + "SpotFleetRequestConfigData":{ + "type":"structure", + "required":[ + "SpotPrice", + "TargetCapacity", + "IamFleetRole", + "LaunchSpecifications" + ], + "members":{ + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "SpotPrice":{ + "shape":"String", + "locationName":"spotPrice" + }, + "TargetCapacity":{ + "shape":"Integer", + "locationName":"targetCapacity" + }, + "ValidFrom":{ + "shape":"DateTime", + "locationName":"validFrom" + }, + "ValidUntil":{ + "shape":"DateTime", + "locationName":"validUntil" + }, + "TerminateInstancesWithExpiration":{ + "shape":"Boolean", + "locationName":"terminateInstancesWithExpiration" + }, + "IamFleetRole":{ + "shape":"String", + "locationName":"iamFleetRole" + }, + "LaunchSpecifications":{ + "shape":"LaunchSpecsList", + "locationName":"launchSpecifications" + }, + "AllocationStrategy":{ + "shape":"AllocationStrategy", + "locationName":"allocationStrategy" + } + } + }, + "SpotFleetRequestConfigSet":{ + "type":"list", + "member":{ + "shape":"SpotFleetRequestConfig", + "locationName":"item" + } + }, + "SpotInstanceRequest":{ + "type":"structure", + "members":{ + "SpotInstanceRequestId":{ + "shape":"String", + "locationName":"spotInstanceRequestId" + }, + "SpotPrice":{ + "shape":"String", + "locationName":"spotPrice" + }, + "Type":{ + "shape":"SpotInstanceType", + "locationName":"type" + }, + "State":{ + "shape":"SpotInstanceState", + "locationName":"state" + }, + "Fault":{ + "shape":"SpotInstanceStateFault", + "locationName":"fault" + }, + "Status":{ + "shape":"SpotInstanceStatus", + "locationName":"status" + }, + "ValidFrom":{ + "shape":"DateTime", + "locationName":"validFrom" + }, + "ValidUntil":{ + "shape":"DateTime", + "locationName":"validUntil" + }, + "LaunchGroup":{ + "shape":"String", + "locationName":"launchGroup" + }, + "AvailabilityZoneGroup":{ + "shape":"String", + "locationName":"availabilityZoneGroup" + }, + "LaunchSpecification":{ + "shape":"LaunchSpecification", + "locationName":"launchSpecification" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "CreateTime":{ + "shape":"DateTime", + "locationName":"createTime" + }, + "ProductDescription":{ + "shape":"RIProductDescription", + "locationName":"productDescription" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "LaunchedAvailabilityZone":{ + "shape":"String", + "locationName":"launchedAvailabilityZone" + } + } + }, + "SpotInstanceRequestIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SpotInstanceRequestId" + } + }, + "SpotInstanceRequestList":{ + "type":"list", + "member":{ + "shape":"SpotInstanceRequest", + "locationName":"item" + } + }, + "SpotInstanceState":{ + "type":"string", + "enum":[ + "open", + "active", + "closed", + "cancelled", + "failed" + ] + }, + "SpotInstanceStateFault":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "SpotInstanceStatus":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "UpdateTime":{ + "shape":"DateTime", + "locationName":"updateTime" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "SpotInstanceType":{ + "type":"string", + "enum":[ + "one-time", + "persistent" + ] + }, + "SpotPlacement":{ + "type":"structure", + "members":{ + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + } + } + }, + "SpotPrice":{ + "type":"structure", + "members":{ + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "ProductDescription":{ + "shape":"RIProductDescription", + "locationName":"productDescription" + }, + "SpotPrice":{ + "shape":"String", + "locationName":"spotPrice" + }, + "Timestamp":{ + "shape":"DateTime", + "locationName":"timestamp" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + } + } + }, + "SpotPriceHistoryList":{ + "type":"list", + "member":{ + "shape":"SpotPrice", + "locationName":"item" + } + }, + "StartInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + }, + "AdditionalInfo":{ + "shape":"String", + "locationName":"additionalInfo" + }, + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + } + } + }, + "StartInstancesResult":{ + "type":"structure", + "members":{ + "StartingInstances":{ + "shape":"InstanceStateChangeList", + "locationName":"instancesSet" + } + } + }, + "State":{ + "type":"string", + "enum":[ + "Pending", + "Available", + "Deleting", + "Deleted" + ] + }, + "StateReason":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "Status":{ + "type":"string", + "enum":[ + "MoveInProgress", + "InVpc", + "InClassic" + ] + }, + "StatusName":{ + "type":"string", + "enum":["reachability"] + }, + "StatusType":{ + "type":"string", + "enum":[ + "passed", + "failed", + "insufficient-data", + "initializing" + ] + }, + "StopInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + }, + "Force":{ + "shape":"Boolean", + "locationName":"force" + } + } + }, + "StopInstancesResult":{ + "type":"structure", + "members":{ + "StoppingInstances":{ + "shape":"InstanceStateChangeList", + "locationName":"instancesSet" + } + } + }, + "Storage":{ + "type":"structure", + "members":{ + "S3":{"shape":"S3Storage"} + } + }, + "String":{"type":"string"}, + "Subnet":{ + "type":"structure", + "members":{ + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "State":{ + "shape":"SubnetState", + "locationName":"state" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "AvailableIpAddressCount":{ + "shape":"Integer", + "locationName":"availableIpAddressCount" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "DefaultForAz":{ + "shape":"Boolean", + "locationName":"defaultForAz" + }, + "MapPublicIpOnLaunch":{ + "shape":"Boolean", + "locationName":"mapPublicIpOnLaunch" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "SubnetIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SubnetId" + } + }, + "SubnetList":{ + "type":"list", + "member":{ + "shape":"Subnet", + "locationName":"item" + } + }, + "SubnetState":{ + "type":"string", + "enum":[ + "pending", + "available" + ] + }, + "SummaryStatus":{ + "type":"string", + "enum":[ + "ok", + "impaired", + "insufficient-data", + "not-applicable", + "initializing" + ] + }, + "Tag":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"String", + "locationName":"key" + }, + "Value":{ + "shape":"String", + "locationName":"value" + } + } + }, + "TagDescription":{ + "type":"structure", + "members":{ + "ResourceId":{ + "shape":"String", + "locationName":"resourceId" + }, + "ResourceType":{ + "shape":"ResourceType", + "locationName":"resourceType" + }, + "Key":{ + "shape":"String", + "locationName":"key" + }, + "Value":{ + "shape":"String", + "locationName":"value" + } + } + }, + "TagDescriptionList":{ + "type":"list", + "member":{ + "shape":"TagDescription", + "locationName":"item" + } + }, + "TagList":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"item" + } + }, + "TelemetryStatus":{ + "type":"string", + "enum":[ + "UP", + "DOWN" + ] + }, + "Tenancy":{ + "type":"string", + "enum":[ + "default", + "dedicated" + ] + }, + "TerminateInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + } + } + }, + "TerminateInstancesResult":{ + "type":"structure", + "members":{ + "TerminatingInstances":{ + "shape":"InstanceStateChangeList", + "locationName":"instancesSet" + } + } + }, + "TrafficType":{ + "type":"string", + "enum":[ + "ACCEPT", + "REJECT", + "ALL" + ] + }, + "UnassignPrivateIpAddressesRequest":{ + "type":"structure", + "required":[ + "NetworkInterfaceId", + "PrivateIpAddresses" + ], + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "PrivateIpAddresses":{ + "shape":"PrivateIpAddressStringList", + "locationName":"privateIpAddress" + } + } + }, + "UnmonitorInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + } + } + }, + "UnmonitorInstancesResult":{ + "type":"structure", + "members":{ + "InstanceMonitorings":{ + "shape":"InstanceMonitoringList", + "locationName":"instancesSet" + } + } + }, + "UnsuccessfulItem":{ + "type":"structure", + "required":["Error"], + "members":{ + "ResourceId":{ + "shape":"String", + "locationName":"resourceId" + }, + "Error":{ + "shape":"UnsuccessfulItemError", + "locationName":"error" + } + } + }, + "UnsuccessfulItemError":{ + "type":"structure", + "required":[ + "Code", + "Message" + ], + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "UnsuccessfulItemSet":{ + "type":"list", + "member":{ + "shape":"UnsuccessfulItem", + "locationName":"item" + } + }, + "UserBucket":{ + "type":"structure", + "members":{ + "S3Bucket":{"shape":"String"}, + "S3Key":{"shape":"String"} + } + }, + "UserBucketDetails":{ + "type":"structure", + "members":{ + "S3Bucket":{ + "shape":"String", + "locationName":"s3Bucket" + }, + "S3Key":{ + "shape":"String", + "locationName":"s3Key" + } + } + }, + "UserData":{ + "type":"structure", + "members":{ + "Data":{ + "shape":"String", + "locationName":"data" + } + } + }, + "UserGroupStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"UserGroup" + } + }, + "UserIdGroupPair":{ + "type":"structure", + "members":{ + "UserId":{ + "shape":"String", + "locationName":"userId" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "GroupId":{ + "shape":"String", + "locationName":"groupId" + } + } + }, + "UserIdGroupPairList":{ + "type":"list", + "member":{ + "shape":"UserIdGroupPair", + "locationName":"item" + } + }, + "UserIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"UserId" + } + }, + "ValueStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "VgwTelemetry":{ + "type":"structure", + "members":{ + "OutsideIpAddress":{ + "shape":"String", + "locationName":"outsideIpAddress" + }, + "Status":{ + "shape":"TelemetryStatus", + "locationName":"status" + }, + "LastStatusChange":{ + "shape":"DateTime", + "locationName":"lastStatusChange" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "AcceptedRouteCount":{ + "shape":"Integer", + "locationName":"acceptedRouteCount" + } + } + }, + "VgwTelemetryList":{ + "type":"list", + "member":{ + "shape":"VgwTelemetry", + "locationName":"item" + } + }, + "VirtualizationType":{ + "type":"string", + "enum":[ + "hvm", + "paravirtual" + ] + }, + "Volume":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "Size":{ + "shape":"Integer", + "locationName":"size" + }, + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "State":{ + "shape":"VolumeState", + "locationName":"status" + }, + "CreateTime":{ + "shape":"DateTime", + "locationName":"createTime" + }, + "Attachments":{ + "shape":"VolumeAttachmentList", + "locationName":"attachmentSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "VolumeType":{ + "shape":"VolumeType", + "locationName":"volumeType" + }, + "Iops":{ + "shape":"Integer", + "locationName":"iops" + }, + "Encrypted":{ + "shape":"Boolean", + "locationName":"encrypted" + }, + "KmsKeyId":{ + "shape":"String", + "locationName":"kmsKeyId" + } + } + }, + "VolumeAttachment":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Device":{ + "shape":"String", + "locationName":"device" + }, + "State":{ + "shape":"VolumeAttachmentState", + "locationName":"status" + }, + "AttachTime":{ + "shape":"DateTime", + "locationName":"attachTime" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "VolumeAttachmentList":{ + "type":"list", + "member":{ + "shape":"VolumeAttachment", + "locationName":"item" + } + }, + "VolumeAttachmentState":{ + "type":"string", + "enum":[ + "attaching", + "attached", + "detaching", + "detached" + ] + }, + "VolumeAttributeName":{ + "type":"string", + "enum":[ + "autoEnableIO", + "productCodes" + ] + }, + "VolumeDetail":{ + "type":"structure", + "required":["Size"], + "members":{ + "Size":{ + "shape":"Long", + "locationName":"size" + } + } + }, + "VolumeIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VolumeId" + } + }, + "VolumeList":{ + "type":"list", + "member":{ + "shape":"Volume", + "locationName":"item" + } + }, + "VolumeState":{ + "type":"string", + "enum":[ + "creating", + "available", + "in-use", + "deleting", + "deleted", + "error" + ] + }, + "VolumeStatusAction":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "EventType":{ + "shape":"String", + "locationName":"eventType" + }, + "EventId":{ + "shape":"String", + "locationName":"eventId" + } + } + }, + "VolumeStatusActionsList":{ + "type":"list", + "member":{ + "shape":"VolumeStatusAction", + "locationName":"item" + } + }, + "VolumeStatusDetails":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"VolumeStatusName", + "locationName":"name" + }, + "Status":{ + "shape":"String", + "locationName":"status" + } + } + }, + "VolumeStatusDetailsList":{ + "type":"list", + "member":{ + "shape":"VolumeStatusDetails", + "locationName":"item" + } + }, + "VolumeStatusEvent":{ + "type":"structure", + "members":{ + "EventType":{ + "shape":"String", + "locationName":"eventType" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "NotBefore":{ + "shape":"DateTime", + "locationName":"notBefore" + }, + "NotAfter":{ + "shape":"DateTime", + "locationName":"notAfter" + }, + "EventId":{ + "shape":"String", + "locationName":"eventId" + } + } + }, + "VolumeStatusEventsList":{ + "type":"list", + "member":{ + "shape":"VolumeStatusEvent", + "locationName":"item" + } + }, + "VolumeStatusInfo":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"VolumeStatusInfoStatus", + "locationName":"status" + }, + "Details":{ + "shape":"VolumeStatusDetailsList", + "locationName":"details" + } + } + }, + "VolumeStatusInfoStatus":{ + "type":"string", + "enum":[ + "ok", + "impaired", + "insufficient-data" + ] + }, + "VolumeStatusItem":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "VolumeStatus":{ + "shape":"VolumeStatusInfo", + "locationName":"volumeStatus" + }, + "Events":{ + "shape":"VolumeStatusEventsList", + "locationName":"eventsSet" + }, + "Actions":{ + "shape":"VolumeStatusActionsList", + "locationName":"actionsSet" + } + } + }, + "VolumeStatusList":{ + "type":"list", + "member":{ + "shape":"VolumeStatusItem", + "locationName":"item" + } + }, + "VolumeStatusName":{ + "type":"string", + "enum":[ + "io-enabled", + "io-performance" + ] + }, + "VolumeType":{ + "type":"string", + "enum":[ + "standard", + "io1", + "gp2" + ] + }, + "Vpc":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "State":{ + "shape":"VpcState", + "locationName":"state" + }, + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "DhcpOptionsId":{ + "shape":"String", + "locationName":"dhcpOptionsId" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "InstanceTenancy":{ + "shape":"Tenancy", + "locationName":"instanceTenancy" + }, + "IsDefault":{ + "shape":"Boolean", + "locationName":"isDefault" + } + } + }, + "VpcAttachment":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "State":{ + "shape":"AttachmentStatus", + "locationName":"state" + } + } + }, + "VpcAttachmentList":{ + "type":"list", + "member":{ + "shape":"VpcAttachment", + "locationName":"item" + } + }, + "VpcAttributeName":{ + "type":"string", + "enum":[ + "enableDnsSupport", + "enableDnsHostnames" + ] + }, + "VpcClassicLink":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "ClassicLinkEnabled":{ + "shape":"Boolean", + "locationName":"classicLinkEnabled" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "VpcClassicLinkIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpcId" + } + }, + "VpcClassicLinkList":{ + "type":"list", + "member":{ + "shape":"VpcClassicLink", + "locationName":"item" + } + }, + "VpcEndpoint":{ + "type":"structure", + "members":{ + "VpcEndpointId":{ + "shape":"String", + "locationName":"vpcEndpointId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "ServiceName":{ + "shape":"String", + "locationName":"serviceName" + }, + "State":{ + "shape":"State", + "locationName":"state" + }, + "PolicyDocument":{ + "shape":"String", + "locationName":"policyDocument" + }, + "RouteTableIds":{ + "shape":"ValueStringList", + "locationName":"routeTableIdSet" + }, + "CreationTimestamp":{ + "shape":"DateTime", + "locationName":"creationTimestamp" + } + } + }, + "VpcEndpointSet":{ + "type":"list", + "member":{ + "shape":"VpcEndpoint", + "locationName":"item" + } + }, + "VpcIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpcId" + } + }, + "VpcList":{ + "type":"list", + "member":{ + "shape":"Vpc", + "locationName":"item" + } + }, + "VpcPeeringConnection":{ + "type":"structure", + "members":{ + "AccepterVpcInfo":{ + "shape":"VpcPeeringConnectionVpcInfo", + "locationName":"accepterVpcInfo" + }, + "ExpirationTime":{ + "shape":"DateTime", + "locationName":"expirationTime" + }, + "RequesterVpcInfo":{ + "shape":"VpcPeeringConnectionVpcInfo", + "locationName":"requesterVpcInfo" + }, + "Status":{ + "shape":"VpcPeeringConnectionStateReason", + "locationName":"status" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + } + } + }, + "VpcPeeringConnectionList":{ + "type":"list", + "member":{ + "shape":"VpcPeeringConnection", + "locationName":"item" + } + }, + "VpcPeeringConnectionStateReason":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"VpcPeeringConnectionStateReasonCode", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "VpcPeeringConnectionStateReasonCode":{ + "type":"string", + "enum":[ + "initiating-request", + "pending-acceptance", + "active", + "deleted", + "rejected", + "failed", + "expired", + "provisioning", + "deleting" + ] + }, + "VpcPeeringConnectionVpcInfo":{ + "type":"structure", + "members":{ + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "VpcState":{ + "type":"string", + "enum":[ + "pending", + "available" + ] + }, + "VpnConnection":{ + "type":"structure", + "members":{ + "VpnConnectionId":{ + "shape":"String", + "locationName":"vpnConnectionId" + }, + "State":{ + "shape":"VpnState", + "locationName":"state" + }, + "CustomerGatewayConfiguration":{ + "shape":"String", + "locationName":"customerGatewayConfiguration" + }, + "Type":{ + "shape":"GatewayType", + "locationName":"type" + }, + "CustomerGatewayId":{ + "shape":"String", + "locationName":"customerGatewayId" + }, + "VpnGatewayId":{ + "shape":"String", + "locationName":"vpnGatewayId" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "VgwTelemetry":{ + "shape":"VgwTelemetryList", + "locationName":"vgwTelemetry" + }, + "Options":{ + "shape":"VpnConnectionOptions", + "locationName":"options" + }, + "Routes":{ + "shape":"VpnStaticRouteList", + "locationName":"routes" + } + } + }, + "VpnConnectionIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpnConnectionId" + } + }, + "VpnConnectionList":{ + "type":"list", + "member":{ + "shape":"VpnConnection", + "locationName":"item" + } + }, + "VpnConnectionOptions":{ + "type":"structure", + "members":{ + "StaticRoutesOnly":{ + "shape":"Boolean", + "locationName":"staticRoutesOnly" + } + } + }, + "VpnConnectionOptionsSpecification":{ + "type":"structure", + "members":{ + "StaticRoutesOnly":{ + "shape":"Boolean", + "locationName":"staticRoutesOnly" + } + } + }, + "VpnGateway":{ + "type":"structure", + "members":{ + "VpnGatewayId":{ + "shape":"String", + "locationName":"vpnGatewayId" + }, + "State":{ + "shape":"VpnState", + "locationName":"state" + }, + "Type":{ + "shape":"GatewayType", + "locationName":"type" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "VpcAttachments":{ + "shape":"VpcAttachmentList", + "locationName":"attachments" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "VpnGatewayIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpnGatewayId" + } + }, + "VpnGatewayList":{ + "type":"list", + "member":{ + "shape":"VpnGateway", + "locationName":"item" + } + }, + "VpnState":{ + "type":"string", + "enum":[ + "pending", + "available", + "deleting", + "deleted" + ] + }, + "VpnStaticRoute":{ + "type":"structure", + "members":{ + "DestinationCidrBlock":{ + "shape":"String", + "locationName":"destinationCidrBlock" + }, + "Source":{ + "shape":"VpnStaticRouteSource", + "locationName":"source" + }, + "State":{ + "shape":"VpnState", + "locationName":"state" + } + } + }, + "VpnStaticRouteList":{ + "type":"list", + "member":{ + "shape":"VpnStaticRoute", + "locationName":"item" + } + }, + "VpnStaticRouteSource":{ + "type":"string", + "enum":["Static"] + }, + "ZoneNameStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ZoneName" + } + }, + "NewDhcpConfigurationList":{ + "type":"list", + "member":{ + "shape":"NewDhcpConfiguration", + "locationName":"item" + } + }, + "NewDhcpConfiguration":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"String", + "locationName":"key" + }, + "Values":{ + "shape":"ValueStringList", + "locationName":"Value" + } + } + }, + "DhcpConfigurationValueList":{ + "type":"list", + "member":{ + "shape":"AttributeValue", + "locationName":"item" + } + }, + "Blob":{"type":"blob"}, + "BlobAttributeValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"Blob", + "locationName":"value" + } + } + }, + "RequestSpotLaunchSpecification":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "SecurityGroups":{ + "shape":"ValueStringList", + "locationName":"SecurityGroup" + }, + "UserData":{ + "shape":"String", + "locationName":"userData" + }, + "AddressingType":{ + "shape":"String", + "locationName":"addressingType" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "Placement":{ + "shape":"SpotPlacement", + "locationName":"placement" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "NetworkInterfaces":{ + "shape":"InstanceNetworkInterfaceSpecificationList", + "locationName":"NetworkInterface" + }, + "IamInstanceProfile":{ + "shape":"IamInstanceProfileSpecification", + "locationName":"iamInstanceProfile" + }, + "EbsOptimized":{ + "shape":"Boolean", + "locationName":"ebsOptimized" + }, + "Monitoring":{ + "shape":"RunInstancesMonitoringEnabled", + "locationName":"monitoring" + }, + "SecurityGroupIds":{ + "shape":"ValueStringList", + "locationName":"SecurityGroupId" + } + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5495 @@ +{ + "version": "2.0", + "operations": { + "AcceptVpcPeeringConnection": "

    Accept a VPC peering connection request. To accept a request, the VPC peering connection must be in the pending-acceptance state, and you must be the owner of the peer VPC. Use the DescribeVpcPeeringConnections request to view your outstanding VPC peering connection requests.

    ", + "AllocateAddress": "

    Acquires an Elastic IP address.

    An Elastic IP address is for use either in the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    ", + "AssignPrivateIpAddresses": "

    Assigns one or more secondary private IP addresses to the specified network interface. You can specify one or more specific secondary IP addresses, or you can specify the number of secondary IP addresses to be automatically assigned within the subnet's CIDR block range. The number of secondary IP addresses that you can assign to an instance varies by instance type. For information about instance types, see Instance Types in the Amazon Elastic Compute Cloud User Guide. For more information about Elastic IP addresses, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    AssignPrivateIpAddresses is available only in EC2-VPC.

    ", + "AssociateAddress": "

    Associates an Elastic IP address with an instance or a network interface.

    An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    [EC2-Classic, VPC in an EC2-VPC-only account] If the Elastic IP address is already associated with a different instance, it is disassociated from that instance and associated with the specified instance.

    [VPC in an EC2-Classic account] If you don't specify a private IP address, the Elastic IP address is associated with the primary IP address. If the Elastic IP address is already associated with a different instance or a network interface, you get an error unless you allow reassociation.

    This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.

    ", + "AssociateDhcpOptions": "

    Associates a set of DHCP options (that you've previously created) with the specified VPC, or associates no DHCP options with the VPC.

    After you associate the options with the VPC, any existing instances and all new instances that you launch in that VPC use the options. You don't need to restart or relaunch the instances. They automatically pick up the changes within a few hours, depending on how frequently the instance renews its DHCP lease. You can explicitly renew the lease using the operating system on the instance.

    For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

    ", + "AssociateRouteTable": "

    Associates a subnet with a route table. The subnet and route table must be in the same VPC. This association causes traffic originating from the subnet to be routed according to the routes in the route table. The action returns an association ID, which you need in order to disassociate the route table from the subnet later. A route table can be associated with multiple subnets.

    For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "AttachClassicLinkVpc": "

    Links an EC2-Classic instance to a ClassicLink-enabled VPC through one or more of the VPC's security groups. You cannot link an EC2-Classic instance to more than one VPC at a time. You can only link an instance that's in the running state. An instance is automatically unlinked from a VPC when it's stopped - you can link it to the VPC again when you restart it.

    After you've linked an instance, you cannot change the VPC security groups that are associated with it. To change the security groups, you must first unlink the instance, and then link it again.

    Linking your instance to a VPC is sometimes referred to as attaching your instance.

    ", + "AttachInternetGateway": "

    Attaches an Internet gateway to a VPC, enabling connectivity between the Internet and the VPC. For more information about your VPC and Internet gateway, see the Amazon Virtual Private Cloud User Guide.

    ", + "AttachNetworkInterface": "

    Attaches a network interface to an instance.

    ", + "AttachVolume": "

    Attaches an EBS volume to a running or stopped instance and exposes it to the instance with the specified device name.

    Encrypted EBS volumes may only be attached to instances that support Amazon EBS encryption. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    For a list of supported device names, see Attaching an EBS Volume to an Instance. Any device names that aren't reserved for instance store volumes can be used for EBS volumes. For more information, see Amazon EC2 Instance Store in the Amazon Elastic Compute Cloud User Guide.

    If a volume has an AWS Marketplace product code:

    • The volume can be attached only to a stopped instance.
    • AWS Marketplace product codes are copied from the volume to the instance.
    • You must be subscribed to the product.
    • The instance type and operating system of the instance must support the product. For example, you can't detach a volume from a Windows instance and attach it to a Linux instance.

    For an overview of the AWS Marketplace, see Introducing AWS Marketplace.

    For more information about EBS volumes, see Attaching Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

    ", + "AttachVpnGateway": "

    Attaches a virtual private gateway to a VPC. For more information, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "AuthorizeSecurityGroupEgress": "

    Adds one or more egress rules to a security group for use with a VPC. Specifically, this action permits instances to send traffic to one or more destination CIDR IP address ranges, or to one or more destination security groups for the same VPC.

    You can have up to 50 rules per security group (covering both ingress and egress rules).

    A security group is for use with instances either in the EC2-Classic platform or in a specific VPC. This action doesn't apply to security groups for use in EC2-Classic. For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

    Each rule consists of the protocol (for example, TCP), plus either a CIDR range or a source group. For the TCP and UDP protocols, you must also specify the destination port or port range. For the ICMP protocol, you must also specify the ICMP type and code. You can use -1 for the type or code to mean all types or all codes.

    Rule changes are propagated to affected instances as quickly as possible. However, a small delay might occur.

    ", + "AuthorizeSecurityGroupIngress": "

    Adds one or more ingress rules to a security group.

    EC2-Classic: You can have up to 100 rules per group.

    EC2-VPC: You can have up to 50 rules per group (covering both ingress and egress rules).

    Rule changes are propagated to instances within the security group as quickly as possible. However, a small delay might occur.

    [EC2-Classic] This action gives one or more CIDR IP address ranges permission to access a security group in your account, or gives one or more security groups (called the source groups) permission to access a security group for your account. A source group can be for your own AWS account, or another.

    [EC2-VPC] This action gives one or more CIDR IP address ranges permission to access a security group in your VPC, or gives one or more other security groups (called the source groups) permission to access a security group for your VPC. The security groups must all be for the same VPC.

    ", + "BundleInstance": "

    Bundles an Amazon instance store-backed Windows instance.

    During bundling, only the root device volume (C:\\) is bundled. Data on other instance store volumes is not preserved.

    This action is not applicable for Linux/Unix instances or Windows instances that are backed by Amazon EBS.

    For more information, see Creating an Instance Store-Backed Windows AMI.

    ", + "CancelBundleTask": "

    Cancels a bundling operation for an instance store-backed Windows instance.

    ", + "CancelConversionTask": "

    Cancels an active conversion task. The task can be the import of an instance or volume. The action removes all artifacts of the conversion, including a partially uploaded volume or instance. If the conversion is complete or is in the process of transferring the final disk image, the command fails and returns an exception.

    For more information, see Using the Command Line Tools to Import Your Virtual Machine to Amazon EC2 in the Amazon Elastic Compute Cloud User Guide.

    ", + "CancelExportTask": "

    Cancels an active export task. The request removes all artifacts of the export, including any partially-created Amazon S3 objects. If the export task is complete or is in the process of transferring the final disk image, the command fails and returns an error.

    ", + "CancelImportTask": "

    Cancels an in-process import virtual machine or import snapshot task.

    ", + "CancelReservedInstancesListing": "

    Cancels the specified Reserved Instance listing in the Reserved Instance Marketplace.

    For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

    ", + "CancelSpotFleetRequests": "

    Cancels the specified Spot fleet requests.

    ", + "CancelSpotInstanceRequests": "

    Cancels one or more Spot instance requests. Spot instances are instances that Amazon EC2 starts on your behalf when the bid price that you specify exceeds the current Spot price. Amazon EC2 periodically sets the Spot price based on available Spot instance capacity and current Spot instance requests. For more information, see Spot Instance Requests in the Amazon Elastic Compute Cloud User Guide.

    Canceling a Spot instance request does not terminate running Spot instances associated with the request.

    ", + "ConfirmProductInstance": "

    Determines whether a product code is associated with an instance. This action can only be used by the owner of the product code. It is useful when a product code owner needs to verify whether another user's instance is eligible for support.

    ", + "CopyImage": "

    Initiates the copy of an AMI from the specified source region to the current region. You specify the destination region by using its endpoint when making the request. AMIs that use encrypted EBS snapshots cannot be copied with this method.

    For more information, see Copying AMIs in the Amazon Elastic Compute Cloud User Guide.

    ", + "CopySnapshot": "

    Copies a point-in-time snapshot of an EBS volume and stores it in Amazon S3. You can copy the snapshot within the same region or from one region to another. You can use the snapshot to create EBS volumes or Amazon Machine Images (AMIs). The snapshot is copied to the regional endpoint that you send the HTTP request to.

    Copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted snapshots remain unencrypted, unless the Encrypted flag is specified during the snapshot copy operation. By default, encrypted snapshot copies use the default AWS Key Management Service (AWS KMS) customer master key (CMK); however, you can specify a non-default CMK with the KmsKeyId parameter.

    For more information, see Copying an Amazon EBS Snapshot in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateCustomerGateway": "

    Provides information to AWS about your VPN customer gateway device. The customer gateway is the appliance at your end of the VPN connection. (The device on the AWS side of the VPN connection is the virtual private gateway.) You must provide the Internet-routable IP address of the customer gateway's external interface. The IP address must be static and can't be behind a device performing network address translation (NAT).

    For devices that use Border Gateway Protocol (BGP), you can also provide the device's BGP Autonomous System Number (ASN). You can use an existing ASN assigned to your network. If you don't have an ASN already, you can use a private ASN (in the 64512 - 65534 range).

    Amazon EC2 supports all 2-byte ASN numbers in the range of 1 - 65534, with the exception of 7224, which is reserved in the us-east-1 region, and 9059, which is reserved in the eu-west-1 region.

    For more information about VPN customer gateways, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    You cannot create more than one customer gateway with the same VPN type, IP address, and BGP ASN parameter values. If you run an identical request more than one time, the first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent requests do not create new customer gateway resources.

    ", + "CreateDhcpOptions": "

    Creates a set of DHCP options for your VPC. After creating the set, you must associate it with the VPC, causing all existing and new instances that you launch in the VPC to use this set of DHCP options. The following are the individual DHCP options you can specify. For more information about the options, see RFC 2132.

    • domain-name-servers - The IP addresses of up to four domain name servers, or AmazonProvidedDNS. The default DHCP option set specifies AmazonProvidedDNS. If specifying more than one domain name server, specify the IP addresses in a single parameter, separated by commas.
    • domain-name - If you're using AmazonProvidedDNS in us-east-1, specify ec2.internal. If you're using AmazonProvidedDNS in another region, specify region.compute.internal (for example, ap-northeast-1.compute.internal). Otherwise, specify a domain name (for example, MyCompany.com). Important: Some Linux operating systems accept multiple domain names separated by spaces. However, Windows and other Linux operating systems treat the value as a single domain, which results in unexpected behavior. If your DHCP options set is associated with a VPC that has instances with multiple operating systems, specify only one domain name.
    • ntp-servers - The IP addresses of up to four Network Time Protocol (NTP) servers.
    • netbios-name-servers - The IP addresses of up to four NetBIOS name servers.
    • netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). We recommend that you specify 2 (broadcast and multicast are not currently supported). For more information about these node types, see RFC 2132.

    Your VPC automatically starts out with a set of DHCP options that includes only a DNS server that we provide (AmazonProvidedDNS). If you create a set of options, and if your VPC has an Internet gateway, make sure to set the domain-name-servers option either to AmazonProvidedDNS or to a domain name server of your choice. For more information about DHCP options, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateFlowLogs": "

    Creates one or more flow logs to capture IP traffic for a specific network interface, subnet, or VPC. Flow logs are delivered to a specified log group in Amazon CloudWatch Logs. If you specify a VPC or subnet in the request, a log stream is created in CloudWatch Logs for each network interface in the subnet or VPC. Log streams can include information about accepted and rejected traffic to a network interface. You can view the data in your log streams using Amazon CloudWatch Logs.

    In your request, you must also specify an IAM role that has permission to publish logs to CloudWatch Logs.

    ", + "CreateImage": "

    Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance that is either running or stopped.

    If you customized your instance with instance store volumes or EBS volumes in addition to the root device volume, the new AMI contains block device mapping information for those volumes. When you launch an instance from this new AMI, the instance automatically launches with those additional volumes.

    For more information, see Creating Amazon EBS-Backed Linux AMIs in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateInstanceExportTask": "

    Exports a running or stopped instance to an S3 bucket.

    For information about the supported operating systems, image formats, and known limitations for the types of instances you can export, see Exporting EC2 Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateInternetGateway": "

    Creates an Internet gateway for use with a VPC. After creating the Internet gateway, you attach it to a VPC using AttachInternetGateway.

    For more information about your VPC and Internet gateway, see the Amazon Virtual Private Cloud User Guide.

    ", + "CreateKeyPair": "

    Creates a 2048-bit RSA key pair with the specified name. Amazon EC2 stores the public key and displays the private key for you to save to a file. The private key is returned as an unencrypted PEM encoded PKCS#8 private key. If a key with the specified name already exists, Amazon EC2 returns an error.

    You can have up to five thousand key pairs per region.

    The key pair returned to you is available only in the region in which you create it. To create a key pair that is available in all regions, use ImportKeyPair.

    For more information about key pairs, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateNetworkAcl": "

    Creates a network ACL in a VPC. Network ACLs provide an optional layer of security (in addition to security groups) for the instances in your VPC.

    For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateNetworkAclEntry": "

    Creates an entry (a rule) in a network ACL with the specified rule number. Each network ACL has a set of numbered ingress rules and a separate set of numbered egress rules. When determining whether a packet should be allowed in or out of a subnet associated with the ACL, we process the entries in the ACL according to the rule numbers, in ascending order. Each network ACL has a set of ingress rules and a separate set of egress rules.

    We recommend that you leave room between the rule numbers (for example, 100, 110, 120, ...), and not number them one right after the other (for example, 101, 102, 103, ...). This makes it easier to add a rule between existing ones without having to renumber the rules.

    After you add an entry, you can't modify it; you must either replace it, or create an entry and delete the old one.

    For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateNetworkInterface": "

    Creates a network interface in the specified subnet.

    For more information about network interfaces, see Elastic Network Interfaces in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreatePlacementGroup": "

    Creates a placement group that you launch cluster instances into. You must give the group a name that's unique within the scope of your account.

    For more information about placement groups and cluster instances, see Cluster Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateReservedInstancesListing": "

    Creates a listing for Amazon EC2 Reserved Instances to be sold in the Reserved Instance Marketplace. You can submit one Reserved Instance listing at a time. To get a list of your Reserved Instances, you can use the DescribeReservedInstances operation.

    The Reserved Instance Marketplace matches sellers who want to resell Reserved Instance capacity that they no longer need with buyers who want to purchase additional capacity. Reserved Instances bought and sold through the Reserved Instance Marketplace work like any other Reserved Instances.

    To sell your Reserved Instances, you must first register as a seller in the Reserved Instance Marketplace. After completing the registration process, you can create a Reserved Instance Marketplace listing of some or all of your Reserved Instances, and specify the upfront price to receive for them. Your Reserved Instance listings then become available for purchase. To view the details of your Reserved Instance listing, you can use the DescribeReservedInstancesListings operation.

    For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateRoute": "

    Creates a route in a route table within a VPC.

    You must specify one of the following targets: Internet gateway or virtual private gateway, NAT instance, VPC peering connection, or network interface.

    When determining how to route traffic, we use the route with the most specific match. For example, let's say the traffic is destined for 192.0.2.3, and the route table includes the following two routes:

    • 192.0.2.0/24 (goes to some target A)

    • 192.0.2.0/28 (goes to some target B)

    Both routes apply to the traffic destined for 192.0.2.3. However, the second route in the list covers a smaller number of IP addresses and is therefore more specific, so we use that route to determine where to target the traffic.

    For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateRouteTable": "

    Creates a route table for the specified VPC. After you create a route table, you can add routes and associate the table with a subnet.

    For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateSecurityGroup": "

    Creates a security group.

    A security group is for use with instances either in the EC2-Classic platform or in a specific VPC. For more information, see Amazon EC2 Security Groups in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

    EC2-Classic: You can have up to 500 security groups.

    EC2-VPC: You can create up to 100 security groups per VPC.

    When you create a security group, you specify a friendly name of your choice. You can have a security group for use in EC2-Classic with the same name as a security group for use in a VPC. However, you can't have two security groups for use in EC2-Classic with the same name or two security groups for use in a VPC with the same name.

    You have a default security group for use in EC2-Classic and a default security group for use in your VPC. If you don't specify a security group when you launch an instance, the instance is launched into the appropriate default security group. A default security group includes a default rule that grants instances unrestricted network access to each other.

    You can add or remove rules from your security groups using AuthorizeSecurityGroupIngress, AuthorizeSecurityGroupEgress, RevokeSecurityGroupIngress, and RevokeSecurityGroupEgress.

    ", + "CreateSnapshot": "

    Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for backups, to make copies of EBS volumes, and to save data before shutting down an instance.

    When a snapshot is created, any AWS Marketplace product codes that are associated with the source volume are propagated to the snapshot.

    You can take a snapshot of an attached volume that is in use. However, snapshots only capture data that has been written to your EBS volume at the time the snapshot command is issued; this may exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the volume long enough to take a snapshot, your snapshot should be complete. However, if you cannot pause all file writes to the volume, you should unmount the volume from within the instance, issue the snapshot command, and then remount the volume to ensure a consistent and complete snapshot. You may remount and use your volume while the snapshot status is pending.

    To create a snapshot for EBS volumes that serve as root devices, you should stop the instance before taking the snapshot.

    Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected.

    For more information, see Amazon Elastic Block Store and Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateSpotDatafeedSubscription": "

    Creates a data feed for Spot instances, enabling you to view Spot instance usage logs. You can create one data feed per AWS account. For more information, see Spot Instance Data Feed in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateSubnet": "

    Creates a subnet in an existing VPC.

    When you create each subnet, you provide the VPC ID and the CIDR block you want for the subnet. After you create a subnet, you can't change its CIDR block. The subnet's CIDR block can be the same as the VPC's CIDR block (assuming you want only a single subnet in the VPC), or a subset of the VPC's CIDR block. If you create more than one subnet in a VPC, the subnets' CIDR blocks must not overlap. The smallest subnet (and VPC) you can create uses a /28 netmask (16 IP addresses), and the largest uses a /16 netmask (65,536 IP addresses).

    AWS reserves both the first four and the last IP address in each subnet's CIDR block. They're not available for use.

    If you add more than one subnet to a VPC, they're set up in a star topology with a logical router in the middle.

    If you launch an instance in a VPC using an Amazon EBS-backed AMI, the IP address doesn't change if you stop and restart the instance (unlike a similar instance launched outside a VPC, which gets a new IP address when restarted). It's therefore possible to have a subnet with no running instances (they're all stopped), but no remaining IP addresses available.

    For more information about subnets, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateTags": "

    Adds or overwrites one or more tags for the specified Amazon EC2 resource or resources. Each resource can have a maximum of 10 tags. Each tag consists of a key and optional value. Tag keys must be unique per resource.

    For more information about tags, see Tagging Your Resources in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateVolume": "

    Creates an EBS volume that can be attached to an instance in the same Availability Zone. The volume is created in the regional endpoint that you send the HTTP request to. For more information see Regions and Endpoints.

    You can create a new empty volume or restore a volume from an EBS snapshot. Any AWS Marketplace product codes from the snapshot are propagated to the volume.

    You can create encrypted volumes with the Encrypted parameter. Encrypted volumes may only be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are also automatically encrypted. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    For more information, see Creating or Restoring an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateVpc": "

    Creates a VPC with the specified CIDR block.

    The smallest VPC you can create uses a /28 netmask (16 IP addresses), and the largest uses a /16 netmask (65,536 IP addresses). To help you decide how big to make your VPC, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

    By default, each instance you launch in the VPC has the default DHCP options, which includes only a default DNS server that we provide (AmazonProvidedDNS). For more information about DHCP options, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateVpcEndpoint": "

    Creates a VPC endpoint for a specified AWS service. An endpoint enables you to create a private connection between your VPC and another AWS service in your account. You can specify an endpoint policy to attach to the endpoint that will control access to the service from your VPC. You can also specify the VPC route tables that use the endpoint.

    Currently, only endpoints to Amazon S3 are supported.

    ", + "CreateVpcPeeringConnection": "

    Requests a VPC peering connection between two VPCs: a requester VPC that you own and a peer VPC with which to create the connection. The peer VPC can belong to another AWS account. The requester VPC and peer VPC cannot have overlapping CIDR blocks.

    The owner of the peer VPC must accept the peering request to activate the peering connection. The VPC peering connection request expires after 7 days, after which it cannot be accepted or rejected.

    A CreateVpcPeeringConnection request between VPCs with overlapping CIDR blocks results in the VPC peering connection having a status of failed.

    ", + "CreateVpnConnection": "

    Creates a VPN connection between an existing virtual private gateway and a VPN customer gateway. The only supported connection type is ipsec.1.

    The response includes information that you need to give to your network administrator to configure your customer gateway.

    We strongly recommend that you use HTTPS when calling this operation because the response contains sensitive cryptographic information for configuring your customer gateway.

    If you decide to shut down your VPN connection for any reason and later create a new VPN connection, you must reconfigure your customer gateway with the new information returned from this call.

    For more information about VPN connections, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateVpnConnectionRoute": "

    Creates a static route associated with a VPN connection between an existing virtual private gateway and a VPN customer gateway. The static route allows traffic to be routed from the virtual private gateway to the VPN customer gateway.

    For more information about VPN connections, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateVpnGateway": "

    Creates a virtual private gateway. A virtual private gateway is the endpoint on the VPC side of your VPN connection. You can create a virtual private gateway before creating the VPC itself.

    For more information about virtual private gateways, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "DeleteCustomerGateway": "

    Deletes the specified customer gateway. You must delete the VPN connection before you can delete the customer gateway.

    ", + "DeleteDhcpOptions": "

    Deletes the specified set of DHCP options. You must disassociate the set of DHCP options before you can delete it. You can disassociate the set of DHCP options by associating either a new set of options or the default set of options with the VPC.

    ", + "DeleteFlowLogs": "

    Deletes one or more flow logs.

    ", + "DeleteInternetGateway": "

    Deletes the specified Internet gateway. You must detach the Internet gateway from the VPC before you can delete it.

    ", + "DeleteKeyPair": "

    Deletes the specified key pair, by removing the public key from Amazon EC2.

    ", + "DeleteNetworkAcl": "

    Deletes the specified network ACL. You can't delete the ACL if it's associated with any subnets. You can't delete the default network ACL.

    ", + "DeleteNetworkAclEntry": "

    Deletes the specified ingress or egress entry (rule) from the specified network ACL.

    ", + "DeleteNetworkInterface": "

    Deletes the specified network interface. You must detach the network interface before you can delete it.

    ", + "DeletePlacementGroup": "

    Deletes the specified placement group. You must terminate all instances in the placement group before you can delete the placement group. For more information about placement groups and cluster instances, see Cluster Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "DeleteRoute": "

    Deletes the specified route from the specified route table.

    ", + "DeleteRouteTable": "

    Deletes the specified route table. You must disassociate the route table from any subnets before you can delete it. You can't delete the main route table.

    ", + "DeleteSecurityGroup": "

    Deletes a security group.

    If you attempt to delete a security group that is associated with an instance, or is referenced by another security group, the operation fails with InvalidGroup.InUse in EC2-Classic or DependencyViolation in EC2-VPC.

    ", + "DeleteSnapshot": "

    Deletes the specified snapshot.

    When you make periodic snapshots of a volume, the snapshots are incremental, and only the blocks on the device that have changed since your last snapshot are saved in the new snapshot. When you delete a snapshot, only the data not needed for any other snapshot is removed. So regardless of which prior snapshots have been deleted, all active snapshots will have access to all the information needed to restore the volume.

    You cannot delete a snapshot of the root device of an EBS volume used by a registered AMI. You must first de-register the AMI before you can delete the snapshot.

    For more information, see Deleting an Amazon EBS Snapshot in the Amazon Elastic Compute Cloud User Guide.

    ", + "DeleteSpotDatafeedSubscription": "

    Deletes the data feed for Spot instances.

    ", + "DeleteSubnet": "

    Deletes the specified subnet. You must terminate all running instances in the subnet before you can delete the subnet.

    ", + "DeleteTags": "

    Deletes the specified set of tags from the specified set of resources. This call is designed to follow a DescribeTags request.

    For more information about tags, see Tagging Your Resources in the Amazon Elastic Compute Cloud User Guide.

    ", + "DeleteVolume": "

    Deletes the specified EBS volume. The volume must be in the available state (not attached to an instance).

    The volume may remain in the deleting state for several minutes.

    For more information, see Deleting an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

    ", + "DeleteVpc": "

    Deletes the specified VPC. You must detach or delete all gateways and resources that are associated with the VPC before you can delete it. For example, you must terminate all instances running in the VPC, delete all security groups associated with the VPC (except the default one), delete all route tables associated with the VPC (except the default one), and so on.

    ", + "DeleteVpcEndpoints": "

    Deletes one or more specified VPC endpoints. Deleting the endpoint also deletes the endpoint routes in the route tables that were associated with the endpoint.

    ", + "DeleteVpcPeeringConnection": "

    Deletes a VPC peering connection. Either the owner of the requester VPC or the owner of the peer VPC can delete the VPC peering connection if it's in the active state. The owner of the requester VPC can delete a VPC peering connection in the pending-acceptance state.

    ", + "DeleteVpnConnection": "

    Deletes the specified VPN connection.

    If you're deleting the VPC and its associated components, we recommend that you detach the virtual private gateway from the VPC and delete the VPC before deleting the VPN connection. If you believe that the tunnel credentials for your VPN connection have been compromised, you can delete the VPN connection and create a new one that has new keys, without needing to delete the VPC or virtual private gateway. If you create a new VPN connection, you must reconfigure the customer gateway using the new configuration information returned with the new VPN connection ID.

    ", + "DeleteVpnConnectionRoute": "

    Deletes the specified static route associated with a VPN connection between an existing virtual private gateway and a VPN customer gateway. The static route allows traffic to be routed from the virtual private gateway to the VPN customer gateway.

    ", + "DeleteVpnGateway": "

    Deletes the specified virtual private gateway. We recommend that before you delete a virtual private gateway, you detach it from the VPC and delete the VPN connection. Note that you don't need to delete the virtual private gateway if you plan to delete and recreate the VPN connection between your VPC and your network.

    ", + "DeregisterImage": "

    Deregisters the specified AMI. After you deregister an AMI, it can't be used to launch new instances.

    This command does not delete the AMI.

    ", + "DescribeAccountAttributes": "

    Describes attributes of your AWS account. The following are the supported account attributes:

    • supported-platforms: Indicates whether your account can launch instances into EC2-Classic and EC2-VPC, or only into EC2-VPC.

    • default-vpc: The ID of the default VPC for your account, or none.

    • max-instances: The maximum number of On-Demand instances that you can run.

    • vpc-max-security-groups-per-interface: The maximum number of security groups that you can assign to a network interface.

    • max-elastic-ips: The maximum number of Elastic IP addresses that you can allocate for use with EC2-Classic.

    • vpc-max-elastic-ips: The maximum number of Elastic IP addresses that you can allocate for use with EC2-VPC.

    ", + "DescribeAddresses": "

    Describes one or more of your Elastic IP addresses.

    An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeAvailabilityZones": "

    Describes one or more of the Availability Zones that are available to you. The results include zones only for the region you're currently using. If there is an event impacting an Availability Zone, you can use this request to view the state and any provided message for that Availability Zone.

    For more information, see Regions and Availability Zones in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeBundleTasks": "

    Describes one or more of your bundling tasks.

    Completed bundle tasks are listed for only a limited time. If your bundle task is no longer in the list, you can still register an AMI from it. Just use RegisterImage with the Amazon S3 bucket name and image manifest name you provided to the bundle task.

    ", + "DescribeClassicLinkInstances": "

    Describes one or more of your linked EC2-Classic instances. This request only returns information about EC2-Classic instances linked to a VPC through ClassicLink; you cannot use this request to return information about other instances.

    ", + "DescribeConversionTasks": "

    Describes one or more of your conversion tasks. For more information, see Using the Command Line Tools to Import Your Virtual Machine to Amazon EC2 in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeCustomerGateways": "

    Describes one or more of your VPN customer gateways.

    For more information about VPN customer gateways, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeDhcpOptions": "

    Describes one or more of your DHCP options sets.

    For more information about DHCP options sets, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeExportTasks": "

    Describes one or more of your export tasks.

    ", + "DescribeFlowLogs": "

    Describes one or more flow logs. To view the information in your flow logs (the log streams for the network interfaces), you must use the CloudWatch Logs console or the CloudWatch Logs API.

    ", + "DescribeImageAttribute": "

    Describes the specified attribute of the specified AMI. You can specify only one attribute at a time.

    ", + "DescribeImages": "

    Describes one or more of the images (AMIs, AKIs, and ARIs) available to you. Images available to you include public images, private images that you own, and private images owned by other AWS accounts but for which you have explicit launch permissions.

    Deregistered images are included in the returned results for an unspecified interval after deregistration.

    ", + "DescribeImportImageTasks": "

    Displays details about an import virtual machine or import snapshot tasks that are already created.

    ", + "DescribeImportSnapshotTasks": "

    Describes your import snapshot tasks.

    ", + "DescribeInstanceAttribute": "

    Describes the specified attribute of the specified instance. You can specify only one attribute at a time. Valid attribute values are: instanceType | kernel | ramdisk | userData | disableApiTermination | instanceInitiatedShutdownBehavior | rootDeviceName | blockDeviceMapping | productCodes | sourceDestCheck | groupSet | ebsOptimized | sriovNetSupport

    ", + "DescribeInstanceStatus": "

    Describes the status of one or more instances.

    Instance status includes the following components:

    • Status checks - Amazon EC2 performs status checks on running EC2 instances to identify hardware and software issues. For more information, see Status Checks for Your Instances and Troubleshooting Instances with Failed Status Checks in the Amazon Elastic Compute Cloud User Guide.

    • Scheduled events - Amazon EC2 can schedule events (such as reboot, stop, or terminate) for your instances related to hardware issues, software updates, or system maintenance. For more information, see Scheduled Events for Your Instances in the Amazon Elastic Compute Cloud User Guide.

    • Instance state - You can manage your instances from the moment you launch them through their termination. For more information, see Instance Lifecycle in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeInstances": "

    Describes one or more of your instances.

    If you specify one or more instance IDs, Amazon EC2 returns information for those instances. If you do not specify instance IDs, Amazon EC2 returns information for all relevant instances. If you specify an instance ID that is not valid, an error is returned. If you specify an instance that you do not own, it is not included in the returned results.

    Recently terminated instances might appear in the returned results. This interval is usually less than one hour.

    ", + "DescribeInternetGateways": "

    Describes one or more of your Internet gateways.

    ", + "DescribeKeyPairs": "

    Describes one or more of your key pairs.

    For more information about key pairs, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeMovingAddresses": "

    Describes your Elastic IP addresses that are being moved to the EC2-VPC platform, or that are being restored to the EC2-Classic platform. This request does not return information about any other Elastic IP addresses in your account.

    ", + "DescribeNetworkAcls": "

    Describes one or more of your network ACLs.

    For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeNetworkInterfaceAttribute": "

    Describes a network interface attribute. You can specify only one attribute at a time.

    ", + "DescribeNetworkInterfaces": "

    Describes one or more of your network interfaces.

    ", + "DescribePlacementGroups": "

    Describes one or more of your placement groups. For more information about placement groups and cluster instances, see Cluster Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribePrefixLists": "

    Describes available AWS services in a prefix list format, which includes the prefix list name and prefix list ID of the service and the IP address range for the service. A prefix list ID is required for creating an outbound security group rule that allows traffic from a VPC to access an AWS service through a VPC endpoint.

    ", + "DescribeRegions": "

    Describes one or more regions that are currently available to you.

    For a list of the regions supported by Amazon EC2, see Regions and Endpoints.

    ", + "DescribeReservedInstances": "

    Describes one or more of the Reserved Instances that you purchased.

    For more information about Reserved Instances, see Reserved Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeReservedInstancesListings": "

    Describes your account's Reserved Instance listings in the Reserved Instance Marketplace.

    The Reserved Instance Marketplace matches sellers who want to resell Reserved Instance capacity that they no longer need with buyers who want to purchase additional capacity. Reserved Instances bought and sold through the Reserved Instance Marketplace work like any other Reserved Instances.

    As a seller, you choose to list some or all of your Reserved Instances, and you specify the upfront price to receive for them. Your Reserved Instances are then listed in the Reserved Instance Marketplace and are available for purchase.

    As a buyer, you specify the configuration of the Reserved Instance to purchase, and the Marketplace matches what you're searching for with what's available. The Marketplace first sells the lowest priced Reserved Instances to you, and continues to sell available Reserved Instance listings to you until your demand is met. You are charged based on the total price of all of the listings that you purchase.

    For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeReservedInstancesModifications": "

    Describes the modifications made to your Reserved Instances. If no parameter is specified, information about all your Reserved Instances modification requests is returned. If a modification ID is specified, only information about the specific modification is returned.

    For more information, see Modifying Reserved Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeReservedInstancesOfferings": "

    Describes Reserved Instance offerings that are available for purchase. With Reserved Instances, you purchase the right to launch instances for a period of time. During that time period, you do not receive insufficient capacity errors, and you pay a lower usage rate than the rate charged for On-Demand instances for the actual time used.

    For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeRouteTables": "

    Describes one or more of your route tables.

    Each subnet in your VPC must be associated with a route table. If a subnet is not explicitly associated with any route table, it is implicitly associated with the main route table. This command does not return the subnet ID for implicit associations.

    For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeSecurityGroups": "

    Describes one or more of your security groups.

    A security group is for use with instances either in the EC2-Classic platform or in a specific VPC. For more information, see Amazon EC2 Security Groups in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeSnapshotAttribute": "

    Describes the specified attribute of the specified snapshot. You can specify only one attribute at a time.

    For more information about EBS snapshots, see Amazon EBS Snapshots in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeSnapshots": "

    Describes one or more of the EBS snapshots available to you. Available snapshots include public snapshots available for any AWS account to launch, private snapshots that you own, and private snapshots owned by another AWS account but for which you've been given explicit create volume permissions.

    The create volume permissions fall into the following categories:

    • public: The owner of the snapshot granted create volume permissions for the snapshot to the all group. All AWS accounts have create volume permissions for these snapshots.
    • explicit: The owner of the snapshot granted create volume permissions to a specific AWS account.
    • implicit: An AWS account has implicit create volume permissions for all snapshots it owns.

    The list of snapshots returned can be modified by specifying snapshot IDs, snapshot owners, or AWS accounts with create volume permissions. If no options are specified, Amazon EC2 returns all snapshots for which you have create volume permissions.

    If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned. If you specify an invalid snapshot ID, an error is returned. If you specify a snapshot ID for which you do not have access, it is not included in the returned results.

    If you specify one or more snapshot owners, only snapshots from the specified owners and for which you have access are returned. The results can include the AWS account IDs of the specified owners, amazon for snapshots owned by Amazon, or self for snapshots that you own.

    If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are returned. You can specify AWS account IDs (if you own the snapshots), self for snapshots for which you own or have explicit permissions, or all for public snapshots.

    If you are describing a long list of snapshots, you can paginate the output to make the list more manageable. The MaxResults parameter sets the maximum number of results returned in a single page. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeSnapshots request to retrieve the remaining results.

    For more information about EBS snapshots, see Amazon EBS Snapshots in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeSpotDatafeedSubscription": "

    Describes the data feed for Spot instances. For more information, see Spot Instance Data Feed in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeSpotFleetInstances": "

    Describes the running instances for the specified Spot fleet.

    ", + "DescribeSpotFleetRequestHistory": "

    Describes the events for the specified Spot fleet request during the specified time.

    Spot fleet events are delayed by up to 30 seconds before they can be described. This ensures that you can query by the last evaluated time and not miss a recorded event.

    ", + "DescribeSpotFleetRequests": "

    Describes your Spot fleet requests.

    ", + "DescribeSpotInstanceRequests": "

    Describes the Spot instance requests that belong to your account. Spot instances are instances that Amazon EC2 launches when the bid price that you specify exceeds the current Spot price. Amazon EC2 periodically sets the Spot price based on available Spot instance capacity and current Spot instance requests. For more information, see Spot Instance Requests in the Amazon Elastic Compute Cloud User Guide.

    You can use DescribeSpotInstanceRequests to find a running Spot instance by examining the response. If the status of the Spot instance is fulfilled, the instance ID appears in the response and contains the identifier of the instance. Alternatively, you can use DescribeInstances with a filter to look for instances where the instance lifecycle is spot.

    ", + "DescribeSpotPriceHistory": "

    Describes the Spot price history. The prices returned are listed in chronological order, from the oldest to the most recent, for up to the past 90 days. For more information, see Spot Instance Pricing History in the Amazon Elastic Compute Cloud User Guide.

    When you specify a start and end time, this operation returns the prices of the instance types within the time range that you specified and the time when the price changed. The price is valid within the time period that you specified; the response merely indicates the last time that the price changed.

    ", + "DescribeSubnets": "

    Describes one or more of your subnets.

    For more information about subnets, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeTags": "

    Describes one or more of the tags for your EC2 resources.

    For more information about tags, see Tagging Your Resources in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeVolumeAttribute": "

    Describes the specified attribute of the specified volume. You can specify only one attribute at a time.

    For more information about EBS volumes, see Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeVolumeStatus": "

    Describes the status of the specified volumes. Volume status provides the result of the checks performed on your volumes to determine events that can impair the performance of your volumes. The performance of a volume can be affected if an issue occurs on the volume's underlying host. If the volume's underlying host experiences a power outage or system issue, after the system is restored, there could be data inconsistencies on the volume. Volume events notify you if this occurs. Volume actions notify you if any action needs to be taken in response to the event.

    The DescribeVolumeStatus operation provides the following information about the specified volumes:

    Status: Reflects the current status of the volume. The possible values are ok, impaired , warning, or insufficient-data. If all checks pass, the overall status of the volume is ok. If the check fails, the overall status is impaired. If the status is insufficient-data, then the checks may still be taking place on your volume at the time. We recommend that you retry the request. For more information on volume status, see Monitoring the Status of Your Volumes.

    Events: Reflect the cause of a volume status and may require you to take action. For example, if your volume returns an impaired status, then the volume event might be potential-data-inconsistency. This means that your volume has been affected by an issue with the underlying host, has all I/O operations disabled, and may have inconsistent data.

    Actions: Reflect the actions you may have to take in response to an event. For example, if the status of the volume is impaired and the volume event shows potential-data-inconsistency, then the action shows enable-volume-io. This means that you may want to enable the I/O operations for the volume by calling the EnableVolumeIO action and then check the volume for data consistency.

    Volume status is based on the volume status checks, and does not reflect the volume state. Therefore, volume status does not indicate volumes in the error state (for example, when a volume is incapable of accepting I/O.)

    ", + "DescribeVolumes": "

    Describes the specified EBS volumes.

    If you are describing a long list of volumes, you can paginate the output to make the list more manageable. The MaxResults parameter sets the maximum number of results returned in a single page. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeVolumes request to retrieve the remaining results.

    For more information about EBS volumes, see Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeVpcAttribute": "

    Describes the specified attribute of the specified VPC. You can specify only one attribute at a time.

    ", + "DescribeVpcClassicLink": "

    Describes the ClassicLink status of one or more VPCs.

    ", + "DescribeVpcEndpointServices": "

    Describes all supported AWS services that can be specified when creating a VPC endpoint.

    ", + "DescribeVpcEndpoints": "

    Describes one or more of your VPC endpoints.

    ", + "DescribeVpcPeeringConnections": "

    Describes one or more of your VPC peering connections.

    ", + "DescribeVpcs": "

    Describes one or more of your VPCs.

    ", + "DescribeVpnConnections": "

    Describes one or more of your VPN connections.

    For more information about VPN connections, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeVpnGateways": "

    Describes one or more of your virtual private gateways.

    For more information about virtual private gateways, see Adding an IPsec Hardware VPN to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "DetachClassicLinkVpc": "

    Unlinks (detaches) a linked EC2-Classic instance from a VPC. After the instance has been unlinked, the VPC security groups are no longer associated with it. An instance is automatically unlinked from a VPC when it's stopped.

    ", + "DetachInternetGateway": "

    Detaches an Internet gateway from a VPC, disabling connectivity between the Internet and the VPC. The VPC must not contain any running instances with Elastic IP addresses.

    ", + "DetachNetworkInterface": "

    Detaches a network interface from an instance.

    ", + "DetachVolume": "

    Detaches an EBS volume from an instance. Make sure to unmount any file systems on the device within your operating system before detaching the volume. Failure to do so results in the volume being stuck in a busy state while detaching.

    If an Amazon EBS volume is the root device of an instance, it can't be detached while the instance is running. To detach the root volume, stop the instance first.

    When a volume with an AWS Marketplace product code is detached from an instance, the product code is no longer associated with the instance.

    For more information, see Detaching an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

    ", + "DetachVpnGateway": "

    Detaches a virtual private gateway from a VPC. You do this if you're planning to turn off the VPC and not use it anymore. You can confirm a virtual private gateway has been completely detached from a VPC by describing the virtual private gateway (any attachments to the virtual private gateway are also described).

    You must wait for the attachment's state to switch to detached before you can delete the VPC or attach a different VPC to the virtual private gateway.

    ", + "DisableVgwRoutePropagation": "

    Disables a virtual private gateway (VGW) from propagating routes to a specified route table of a VPC.

    ", + "DisableVpcClassicLink": "

    Disables ClassicLink for a VPC. You cannot disable ClassicLink for a VPC that has EC2-Classic instances linked to it.

    ", + "DisassociateAddress": "

    Disassociates an Elastic IP address from the instance or network interface it's associated with.

    An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.

    ", + "DisassociateRouteTable": "

    Disassociates a subnet from a route table.

    After you perform this action, the subnet no longer uses the routes in the route table. Instead, it uses the routes in the VPC's main route table. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "EnableVgwRoutePropagation": "

    Enables a virtual private gateway (VGW) to propagate routes to the specified route table of a VPC.

    ", + "EnableVolumeIO": "

    Enables I/O operations for a volume that had I/O operations disabled because the data on the volume was potentially inconsistent.

    ", + "EnableVpcClassicLink": "

    Enables a VPC for ClassicLink. You can then link EC2-Classic instances to your ClassicLink-enabled VPC to allow communication over private IP addresses. You cannot enable your VPC for ClassicLink if any of your VPC's route tables have existing routes for address ranges within the 10.0.0.0/8 IP address range, excluding local routes for VPCs in the 10.0.0.0/16 and 10.1.0.0/16 IP address ranges. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

    ", + "GetConsoleOutput": "

    Gets the console output for the specified instance.

    Instances do not have a physical monitor through which you can view their console output. They also lack physical controls that allow you to power up, reboot, or shut them down. To allow these actions, we provide them through the Amazon EC2 API and command line interface.

    Instance console output is buffered and posted shortly after instance boot, reboot, and termination. Amazon EC2 preserves the most recent 64 KB output which is available for at least one hour after the most recent post.

    For Linux instances, the instance console output displays the exact console output that would normally be displayed on a physical monitor attached to a computer. This output is buffered because the instance produces it and then posts it to a store where the instance's owner can retrieve it.

    For Windows instances, the instance console output includes output from the EC2Config service.

    ", + "GetPasswordData": "

    Retrieves the encrypted administrator password for an instance running Windows.

    The Windows password is generated at boot if the EC2Config service plugin, Ec2SetPassword, is enabled. This usually only happens the first time an AMI is launched, and then Ec2SetPassword is automatically disabled. The password is not generated for rebundled AMIs unless Ec2SetPassword is enabled before bundling.

    The password is encrypted using the key pair that you specified when you launched the instance. You must provide the corresponding key pair file.

    Password generation and encryption takes a few moments. We recommend that you wait up to 15 minutes after launching an instance before trying to retrieve the generated password.

    ", + "ImportImage": "

    Import single or multi-volume disk images or EBS snapshots into an Amazon Machine Image (AMI).

    ", + "ImportInstance": "

    Creates an import instance task using metadata from the specified disk image. ImportInstance only supports single-volume VMs. To import multi-volume VMs, use ImportImage. After importing the image, you then upload it using the ec2-import-volume command in the EC2 command line tools. For more information, see Using the Command Line Tools to Import Your Virtual Machine to Amazon EC2 in the Amazon Elastic Compute Cloud User Guide.

    ", + "ImportKeyPair": "

    Imports the public key from an RSA key pair that you created with a third-party tool. Compare this with CreateKeyPair, in which AWS creates the key pair and gives the keys to you (AWS keeps a copy of the public key). With ImportKeyPair, you create the key pair and give AWS just the public key. The private key is never transferred between you and AWS.

    For more information about key pairs, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

    ", + "ImportSnapshot": "

    Imports a disk into an EBS snapshot.

    ", + "ImportVolume": "

    Creates an import volume task using metadata from the specified disk image. After importing the image, you then upload it using the ec2-import-volume command in the Amazon EC2 command-line interface (CLI) tools. For more information, see Using the Command Line Tools to Import Your Virtual Machine to Amazon EC2 in the Amazon Elastic Compute Cloud User Guide.

    ", + "ModifyImageAttribute": "

    Modifies the specified attribute of the specified AMI. You can specify only one attribute at a time.

    AWS Marketplace product codes cannot be modified. Images with an AWS Marketplace product code cannot be made public.

    ", + "ModifyInstanceAttribute": "

    Modifies the specified attribute of the specified instance. You can specify only one attribute at a time.

    To modify some attributes, the instance must be stopped. For more information, see Modifying Attributes of a Stopped Instance in the Amazon Elastic Compute Cloud User Guide.

    ", + "ModifyNetworkInterfaceAttribute": "

    Modifies the specified network interface attribute. You can specify only one attribute at a time.

    ", + "ModifyReservedInstances": "

    Modifies the Availability Zone, instance count, instance type, or network platform (EC2-Classic or EC2-VPC) of your Reserved Instances. The Reserved Instances to be modified must be identical, except for Availability Zone, network platform, and instance type.

    For more information, see Modifying Reserved Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "ModifySnapshotAttribute": "

    Adds or removes permission settings for the specified snapshot. You may add or remove specified AWS account IDs from a snapshot's list of create volume permissions, but you cannot do both in a single API call. If you need to both add and remove account IDs for a snapshot, you must use multiple API calls.

    For more information on modifying snapshot permissions, see Sharing Snapshots in the Amazon Elastic Compute Cloud User Guide.

    Snapshots with AWS Marketplace product codes cannot be made public.

    ", + "ModifySubnetAttribute": "

    Modifies a subnet attribute.

    ", + "ModifyVolumeAttribute": "

    Modifies a volume attribute.

    By default, all I/O operations for the volume are suspended when the data on the volume is determined to be potentially inconsistent, to prevent undetectable, latent data corruption. The I/O access to the volume can be resumed by first enabling I/O access and then checking the data consistency on your volume.

    You can change the default behavior to resume I/O operations. We recommend that you change this only for boot volumes or for volumes that are stateless or disposable.

    ", + "ModifyVpcAttribute": "

    Modifies the specified attribute of the specified VPC.

    ", + "ModifyVpcEndpoint": "

    Modifies attributes of a specified VPC endpoint. You can modify the policy associated with the endpoint, and you can add and remove route tables associated with the endpoint.

    ", + "MonitorInstances": "

    Enables monitoring for a running instance. For more information about monitoring instances, see Monitoring Your Instances and Volumes in the Amazon Elastic Compute Cloud User Guide.

    ", + "MoveAddressToVpc": "

    Moves an Elastic IP address from the EC2-Classic platform to the EC2-VPC platform. The Elastic IP address must be allocated to your account, and it must not be associated with an instance. After the Elastic IP address is moved, it is no longer available for use in the EC2-Classic platform, unless you move it back using the RestoreAddressToClassic request. You cannot move an Elastic IP address that's allocated for use in the EC2-VPC platform to the EC2-Classic platform.

    ", + "PurchaseReservedInstancesOffering": "

    Purchases a Reserved Instance for use with your account. With Amazon EC2 Reserved Instances, you obtain a capacity reservation for a certain instance configuration over a specified period of time and pay a lower hourly rate compared to on-Demand Instance pricing.

    Use DescribeReservedInstancesOfferings to get a list of Reserved Instance offerings that match your specifications. After you've purchased a Reserved Instance, you can check for your new Reserved Instance with DescribeReservedInstances.

    For more information, see Reserved Instances and Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

    ", + "RebootInstances": "

    Requests a reboot of one or more instances. This operation is asynchronous; it only queues a request to reboot the specified instances. The operation succeeds if the instances are valid and belong to you. Requests to reboot terminated instances are ignored.

    If a Linux/Unix instance does not cleanly shut down within four minutes, Amazon EC2 performs a hard reboot.

    For more information about troubleshooting, see Getting Console Output and Rebooting Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "RegisterImage": "

    Registers an AMI. When you're creating an AMI, this is the final step you must complete before you can launch an instance from the AMI. For more information about creating AMIs, see Creating Your Own AMIs in the Amazon Elastic Compute Cloud User Guide.

    For Amazon EBS-backed instances, CreateImage creates and registers the AMI in a single request, so you don't have to register the AMI yourself.

    You can also use RegisterImage to create an Amazon EBS-backed Linux AMI from a snapshot of a root device volume. For more information, see Launching an Instance from a Snapshot in the Amazon Elastic Compute Cloud User Guide.

    Some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE Linux Enterprise Server (SLES), use the EC2 billingProduct code associated with an AMI to verify subscription status for package updates. Creating an AMI from an EBS snapshot does not maintain this billing code, and subsequent instances launched from such an AMI will not be able to connect to package update infrastructure.

    Similarly, although you can create a Windows AMI from a snapshot, you can't successfully launch an instance from the AMI.

    To create Windows AMIs or to create AMIs for Linux operating systems that must retain AMI billing codes to work properly, see CreateImage.

    If needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. If you make changes to an image, deregister the previous image and register the new image.

    You can't register an image where a secondary (non-root) snapshot has AWS Marketplace product codes.

    ", + "RejectVpcPeeringConnection": "

    Rejects a VPC peering connection request. The VPC peering connection must be in the pending-acceptance state. Use the DescribeVpcPeeringConnections request to view your outstanding VPC peering connection requests. To delete an active VPC peering connection, or to delete a VPC peering connection request that you initiated, use DeleteVpcPeeringConnection.

    ", + "ReleaseAddress": "

    Releases the specified Elastic IP address.

    After releasing an Elastic IP address, it is released to the IP address pool and might be unavailable to you. Be sure to update your DNS records and any servers or devices that communicate with the address. If you attempt to release an Elastic IP address that you already released, you'll get an AuthFailure error if the address is already allocated to another AWS account.

    [EC2-Classic, default VPC] Releasing an Elastic IP address automatically disassociates it from any instance that it's associated with. To disassociate an Elastic IP address without releasing it, use DisassociateAddress.

    [Nondefault VPC] You must use DisassociateAddress to disassociate the Elastic IP address before you try to release it. Otherwise, Amazon EC2 returns an error (InvalidIPAddress.InUse).

    ", + "ReplaceNetworkAclAssociation": "

    Changes which network ACL a subnet is associated with. By default when you create a subnet, it's automatically associated with the default network ACL. For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

    ", + "ReplaceNetworkAclEntry": "

    Replaces an entry (rule) in a network ACL. For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

    ", + "ReplaceRoute": "

    Replaces an existing route within a route table in a VPC. You must provide only one of the following: Internet gateway or virtual private gateway, NAT instance, VPC peering connection, or network interface.

    For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "ReplaceRouteTableAssociation": "

    Changes the route table associated with a given subnet in a VPC. After the operation completes, the subnet uses the routes in the new route table it's associated with. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    You can also use ReplaceRouteTableAssociation to change which table is the main route table in the VPC. You just specify the main route table's association ID and the route table to be the new main route table.

    ", + "ReportInstanceStatus": "

    Submits feedback about the status of an instance. The instance must be in the running state. If your experience with the instance differs from the instance status returned by DescribeInstanceStatus, use ReportInstanceStatus to report your experience with the instance. Amazon EC2 collects this information to improve the accuracy of status checks.

    Use of this action does not change the value returned by DescribeInstanceStatus.

    ", + "RequestSpotFleet": "

    Creates a Spot fleet request.

    You can submit a single request that includes multiple launch specifications that vary by instance type, AMI, Availability Zone, or subnet.

    By default, the Spot fleet requests Spot instances in the Spot pool where the price per unit is the lowest. Each launch specification can include its own instance weighting that reflects the value of the instance type to your application workload.

    Alternatively, you can specify that the Spot fleet distribute the target capacity across the Spot pools included in its launch specifications. By ensuring that the Spot instances in your Spot fleet are in different Spot pools, you can improve the availability of your fleet.

    For more information, see Spot Fleet Requests in the Amazon Elastic Compute Cloud User Guide.

    ", + "RequestSpotInstances": "

    Creates a Spot instance request. Spot instances are instances that Amazon EC2 launches when the bid price that you specify exceeds the current Spot price. Amazon EC2 periodically sets the Spot price based on available Spot Instance capacity and current Spot instance requests. For more information, see Spot Instance Requests in the Amazon Elastic Compute Cloud User Guide.

    ", + "ResetImageAttribute": "

    Resets an attribute of an AMI to its default value.

    The productCodes attribute can't be reset.

    ", + "ResetInstanceAttribute": "

    Resets an attribute of an instance to its default value. To reset the kernel or ramdisk, the instance must be in a stopped state. To reset the SourceDestCheck, the instance can be either running or stopped.

    The SourceDestCheck attribute controls whether source/destination checking is enabled. The default value is true, which means checking is enabled. This value must be false for a NAT instance to perform NAT. For more information, see NAT Instances in the Amazon Virtual Private Cloud User Guide.

    ", + "ResetNetworkInterfaceAttribute": "

    Resets a network interface attribute. You can specify only one attribute at a time.

    ", + "ResetSnapshotAttribute": "

    Resets permission settings for the specified snapshot.

    For more information on modifying snapshot permissions, see Sharing Snapshots in the Amazon Elastic Compute Cloud User Guide.

    ", + "RestoreAddressToClassic": "

    Restores an Elastic IP address that was previously moved to the EC2-VPC platform back to the EC2-Classic platform. You cannot move an Elastic IP address that was originally allocated for use in EC2-VPC. The Elastic IP address must not be associated with an instance or network interface.

    ", + "RevokeSecurityGroupEgress": "

    Removes one or more egress rules from a security group for EC2-VPC. The values that you specify in the revoke request (for example, ports) must match the existing rule's values for the rule to be revoked.

    Each rule consists of the protocol and the CIDR range or source security group. For the TCP and UDP protocols, you must also specify the destination port or range of ports. For the ICMP protocol, you must also specify the ICMP type and code.

    Rule changes are propagated to instances within the security group as quickly as possible. However, a small delay might occur.

    ", + "RevokeSecurityGroupIngress": "

    Removes one or more ingress rules from a security group. The values that you specify in the revoke request (for example, ports) must match the existing rule's values for the rule to be removed.

    Each rule consists of the protocol and the CIDR range or source security group. For the TCP and UDP protocols, you must also specify the destination port or range of ports. For the ICMP protocol, you must also specify the ICMP type and code.

    Rule changes are propagated to instances within the security group as quickly as possible. However, a small delay might occur.

    ", + "RunInstances": "

    Launches the specified number of instances using an AMI for which you have permissions.

    When you launch an instance, it enters the pending state. After the instance is ready for you, it enters the running state. To check the state of your instance, call DescribeInstances.

    If you don't specify a security group when launching an instance, Amazon EC2 uses the default security group. For more information, see Security Groups in the Amazon Elastic Compute Cloud User Guide.

    [EC2-VPC only accounts] If you don't specify a subnet in the request, we choose a default subnet from your default VPC for you.

    [EC2-Classic accounts] If you're launching into EC2-Classic and you don't specify an Availability Zone, we choose one for you.

    Linux instances have access to the public key of the key pair at boot. You can use this key to provide secure access to the instance. Amazon EC2 public images use this feature to provide secure access without passwords. For more information, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

    You can provide optional user data when launching an instance. For more information, see Instance Metadata in the Amazon Elastic Compute Cloud User Guide.

    If any of the AMIs have a product code attached for which the user has not subscribed, RunInstances fails.

    T2 instance types can only be launched into a VPC. If you do not have a default VPC, or if you do not specify a subnet ID in the request, RunInstances fails.

    For more information about troubleshooting, see What To Do If An Instance Immediately Terminates, and Troubleshooting Connecting to Your Instance in the Amazon Elastic Compute Cloud User Guide.

    ", + "StartInstances": "

    Starts an Amazon EBS-backed AMI that you've previously stopped.

    Instances that use Amazon EBS volumes as their root devices can be quickly stopped and started. When an instance is stopped, the compute resources are released and you are not billed for hourly instance usage. However, your root partition Amazon EBS volume remains, continues to persist your data, and you are charged for Amazon EBS volume usage. You can restart your instance at any time. Each time you transition an instance from stopped to started, Amazon EC2 charges a full instance hour, even if transitions happen multiple times within a single hour.

    Before stopping an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM.

    Performing this operation on an instance that uses an instance store as its root device returns an error.

    For more information, see Stopping Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "StopInstances": "

    Stops an Amazon EBS-backed instance. Each time you transition an instance from stopped to started, Amazon EC2 charges a full instance hour, even if transitions happen multiple times within a single hour.

    You can't start or stop Spot Instances.

    Instances that use Amazon EBS volumes as their root devices can be quickly stopped and started. When an instance is stopped, the compute resources are released and you are not billed for hourly instance usage. However, your root partition Amazon EBS volume remains, continues to persist your data, and you are charged for Amazon EBS volume usage. You can restart your instance at any time.

    Before stopping an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM.

    Performing this operation on an instance that uses an instance store as its root device returns an error.

    You can stop, start, and terminate EBS-backed instances. You can only terminate instance store-backed instances. What happens to an instance differs if you stop it or terminate it. For example, when you stop an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, the root device and any other devices attached during the instance launch are automatically deleted. For more information about the differences between stopping and terminating instances, see Instance Lifecycle in the Amazon Elastic Compute Cloud User Guide.

    For more information about troubleshooting, see Troubleshooting Stopping Your Instance in the Amazon Elastic Compute Cloud User Guide.

    ", + "TerminateInstances": "

    Shuts down one or more instances. This operation is idempotent; if you terminate an instance more than once, each call succeeds.

    Terminated instances remain visible after termination (for approximately one hour).

    By default, Amazon EC2 deletes all EBS volumes that were attached when the instance launched. Volumes attached after instance launch continue running.

    You can stop, start, and terminate EBS-backed instances. You can only terminate instance store-backed instances. What happens to an instance differs if you stop it or terminate it. For example, when you stop an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, any attached EBS volumes with the DeleteOnTermination block device mapping parameter set to true are automatically deleted. For more information about the differences between stopping and terminating instances, see Instance Lifecycle in the Amazon Elastic Compute Cloud User Guide.

    For more information about troubleshooting, see Troubleshooting Terminating Your Instance in the Amazon Elastic Compute Cloud User Guide.

    ", + "UnassignPrivateIpAddresses": "

    Unassigns one or more secondary private IP addresses from a network interface.

    ", + "UnmonitorInstances": "

    Disables monitoring for a running instance. For more information about monitoring instances, see Monitoring Your Instances and Volumes in the Amazon Elastic Compute Cloud User Guide.

    " + }, + "service": "Amazon Elastic Compute Cloud

    Amazon Elastic Compute Cloud (Amazon EC2) provides resizable computing capacity in the Amazon Web Services (AWS) cloud. Using Amazon EC2 eliminates your need to invest in hardware up front, so you can develop and deploy applications faster.

    ", + "shapes": { + "AcceptVpcPeeringConnectionRequest": { + "base": null, + "refs": { + } + }, + "AcceptVpcPeeringConnectionResult": { + "base": null, + "refs": { + } + }, + "AccountAttribute": { + "base": "

    Describes an account attribute.

    ", + "refs": { + "AccountAttributeList$member": null + } + }, + "AccountAttributeList": { + "base": null, + "refs": { + "DescribeAccountAttributesResult$AccountAttributes": "

    Information about one or more account attributes.

    " + } + }, + "AccountAttributeName": { + "base": null, + "refs": { + "AccountAttributeNameStringList$member": null + } + }, + "AccountAttributeNameStringList": { + "base": null, + "refs": { + "DescribeAccountAttributesRequest$AttributeNames": "

    One or more account attribute names.

    " + } + }, + "AccountAttributeValue": { + "base": "

    Describes a value of an account attribute.

    ", + "refs": { + "AccountAttributeValueList$member": null + } + }, + "AccountAttributeValueList": { + "base": null, + "refs": { + "AccountAttribute$AttributeValues": "

    One or more values for the account attribute.

    " + } + }, + "ActiveInstance": { + "base": "

    Describes a running instance in a Spot fleet.

    ", + "refs": { + "ActiveInstanceSet$member": null + } + }, + "ActiveInstanceSet": { + "base": null, + "refs": { + "DescribeSpotFleetInstancesResponse$ActiveInstances": "

    The running instances. Note that this list is refreshed periodically and might be out of date.

    " + } + }, + "Address": { + "base": "

    Describes an Elastic IP address.

    ", + "refs": { + "AddressList$member": null + } + }, + "AddressList": { + "base": null, + "refs": { + "DescribeAddressesResult$Addresses": "

    Information about one or more Elastic IP addresses.

    " + } + }, + "AllocateAddressRequest": { + "base": null, + "refs": { + } + }, + "AllocateAddressResult": { + "base": null, + "refs": { + } + }, + "AllocationIdList": { + "base": null, + "refs": { + "DescribeAddressesRequest$AllocationIds": "

    [EC2-VPC] One or more allocation IDs.

    Default: Describes all your Elastic IP addresses.

    " + } + }, + "AllocationStrategy": { + "base": null, + "refs": { + "SpotFleetRequestConfigData$AllocationStrategy": "

    Determines how to allocate the target capacity across the Spot pools specified by the Spot fleet request. The default is lowestPrice.

    " + } + }, + "ArchitectureValues": { + "base": null, + "refs": { + "Image$Architecture": "

    The architecture of the image.

    ", + "ImportInstanceLaunchSpecification$Architecture": "

    The architecture of the instance.

    ", + "Instance$Architecture": "

    The architecture of the image.

    ", + "RegisterImageRequest$Architecture": "

    The architecture of the AMI.

    Default: For Amazon EBS-backed AMIs, i386. For instance store-backed AMIs, the architecture specified in the manifest file.

    " + } + }, + "AssignPrivateIpAddressesRequest": { + "base": null, + "refs": { + } + }, + "AssociateAddressRequest": { + "base": null, + "refs": { + } + }, + "AssociateAddressResult": { + "base": null, + "refs": { + } + }, + "AssociateDhcpOptionsRequest": { + "base": null, + "refs": { + } + }, + "AssociateRouteTableRequest": { + "base": null, + "refs": { + } + }, + "AssociateRouteTableResult": { + "base": null, + "refs": { + } + }, + "AttachClassicLinkVpcRequest": { + "base": null, + "refs": { + } + }, + "AttachClassicLinkVpcResult": { + "base": null, + "refs": { + } + }, + "AttachInternetGatewayRequest": { + "base": null, + "refs": { + } + }, + "AttachNetworkInterfaceRequest": { + "base": null, + "refs": { + } + }, + "AttachNetworkInterfaceResult": { + "base": null, + "refs": { + } + }, + "AttachVolumeRequest": { + "base": null, + "refs": { + } + }, + "AttachVpnGatewayRequest": { + "base": null, + "refs": { + } + }, + "AttachVpnGatewayResult": { + "base": null, + "refs": { + } + }, + "AttachmentStatus": { + "base": null, + "refs": { + "EbsInstanceBlockDevice$Status": "

    The attachment state.

    ", + "InstanceNetworkInterfaceAttachment$Status": "

    The attachment state.

    ", + "InternetGatewayAttachment$State": "

    The current state of the attachment.

    ", + "NetworkInterfaceAttachment$Status": "

    The attachment state.

    ", + "VpcAttachment$State": "

    The current state of the attachment.

    " + } + }, + "AttributeBooleanValue": { + "base": "

    The value to use when a resource attribute accepts a Boolean value.

    ", + "refs": { + "DescribeNetworkInterfaceAttributeResult$SourceDestCheck": "

    Indicates whether source/destination checking is enabled.

    ", + "DescribeVolumeAttributeResult$AutoEnableIO": "

    The state of autoEnableIO attribute.

    ", + "DescribeVpcAttributeResult$EnableDnsSupport": "

    Indicates whether DNS resolution is enabled for the VPC. If this attribute is true, the Amazon DNS server resolves DNS hostnames for your instances to their corresponding IP addresses; otherwise, it does not.

    ", + "DescribeVpcAttributeResult$EnableDnsHostnames": "

    Indicates whether the instances launched in the VPC get DNS hostnames. If this attribute is true, instances in the VPC get DNS hostnames; otherwise, they do not.

    ", + "InstanceAttribute$DisableApiTermination": "

    If the value is true, you can't terminate the instance through the Amazon EC2 console, CLI, or API; otherwise, you can.

    ", + "InstanceAttribute$EbsOptimized": "

    Indicates whether the instance is optimized for EBS I/O.

    ", + "InstanceAttribute$SourceDestCheck": "

    Indicates whether source/destination checking is enabled. A value of true means checking is enabled, and false means checking is disabled. This value must be false for a NAT instance to perform NAT.

    ", + "ModifyInstanceAttributeRequest$SourceDestCheck": "

    Specifies whether source/destination checking is enabled. A value of true means that checking is enabled, and false means checking is disabled. This value must be false for a NAT instance to perform NAT.

    ", + "ModifyInstanceAttributeRequest$DisableApiTermination": "

    If the value is true, you can't terminate the instance using the Amazon EC2 console, CLI, or API; otherwise, you can. You cannot use this paramater for Spot Instances.

    ", + "ModifyInstanceAttributeRequest$EbsOptimized": "

    Specifies whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.

    ", + "ModifyNetworkInterfaceAttributeRequest$SourceDestCheck": "

    Indicates whether source/destination checking is enabled. A value of true means checking is enabled, and false means checking is disabled. This value must be false for a NAT instance to perform NAT. For more information, see NAT Instances in the Amazon Virtual Private Cloud User Guide.

    ", + "ModifySubnetAttributeRequest$MapPublicIpOnLaunch": "

    Specify true to indicate that instances launched into the specified subnet should be assigned public IP address.

    ", + "ModifyVolumeAttributeRequest$AutoEnableIO": "

    Indicates whether the volume should be auto-enabled for I/O operations.

    ", + "ModifyVpcAttributeRequest$EnableDnsSupport": "

    Indicates whether the DNS resolution is supported for the VPC. If enabled, queries to the Amazon provided DNS server at the 169.254.169.253 IP address, or the reserved IP address at the base of the VPC network range \"plus two\" will succeed. If disabled, the Amazon provided DNS service in the VPC that resolves public DNS hostnames to IP addresses is not enabled.

    ", + "ModifyVpcAttributeRequest$EnableDnsHostnames": "

    Indicates whether the instances launched in the VPC get DNS hostnames. If enabled, instances in the VPC get DNS hostnames; otherwise, they do not.

    You can only enable DNS hostnames if you also enable DNS support.

    " + } + }, + "AttributeValue": { + "base": "

    The value to use for a resource attribute.

    ", + "refs": { + "DescribeNetworkInterfaceAttributeResult$Description": "

    The description of the network interface.

    ", + "ImageAttribute$KernelId": "

    The kernel ID.

    ", + "ImageAttribute$RamdiskId": "

    The RAM disk ID.

    ", + "ImageAttribute$Description": "

    A description for the AMI.

    ", + "ImageAttribute$SriovNetSupport": null, + "InstanceAttribute$InstanceType": "

    The instance type.

    ", + "InstanceAttribute$KernelId": "

    The kernel ID.

    ", + "InstanceAttribute$RamdiskId": "

    The RAM disk ID.

    ", + "InstanceAttribute$UserData": "

    The Base64-encoded MIME user data.

    ", + "InstanceAttribute$InstanceInitiatedShutdownBehavior": "

    Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).

    ", + "InstanceAttribute$RootDeviceName": "

    The name of the root device (for example, /dev/sda1 or /dev/xvda).

    ", + "InstanceAttribute$SriovNetSupport": null, + "ModifyImageAttributeRequest$Description": "

    A description for the AMI.

    ", + "ModifyInstanceAttributeRequest$InstanceType": "

    Changes the instance type to the specified value. For more information, see Instance Types. If the instance type is not valid, the error returned is InvalidInstanceAttributeValue.

    ", + "ModifyInstanceAttributeRequest$Kernel": "

    Changes the instance's kernel to the specified value. We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB.

    ", + "ModifyInstanceAttributeRequest$Ramdisk": "

    Changes the instance's RAM disk to the specified value. We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB.

    ", + "ModifyInstanceAttributeRequest$InstanceInitiatedShutdownBehavior": "

    Specifies whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).

    ", + "ModifyInstanceAttributeRequest$SriovNetSupport": "

    Set to simple to enable enhanced networking for the instance.

    There is no way to disable enhanced networking at this time.

    This option is supported only for HVM instances. Specifying this option with a PV instance can make it unreachable.

    ", + "ModifyNetworkInterfaceAttributeRequest$Description": "

    A description for the network interface.

    ", + "DhcpConfigurationValueList$member": null + } + }, + "AuthorizeSecurityGroupEgressRequest": { + "base": null, + "refs": { + } + }, + "AuthorizeSecurityGroupIngressRequest": { + "base": null, + "refs": { + } + }, + "AvailabilityZone": { + "base": "

    Describes an Availability Zone.

    ", + "refs": { + "AvailabilityZoneList$member": null + } + }, + "AvailabilityZoneList": { + "base": null, + "refs": { + "DescribeAvailabilityZonesResult$AvailabilityZones": "

    Information about one or more Availability Zones.

    " + } + }, + "AvailabilityZoneMessage": { + "base": "

    Describes a message about an Availability Zone.

    ", + "refs": { + "AvailabilityZoneMessageList$member": null + } + }, + "AvailabilityZoneMessageList": { + "base": null, + "refs": { + "AvailabilityZone$Messages": "

    Any messages about the Availability Zone.

    " + } + }, + "AvailabilityZoneState": { + "base": null, + "refs": { + "AvailabilityZone$State": "

    The state of the Availability Zone (available | impaired | unavailable).

    " + } + }, + "BatchState": { + "base": null, + "refs": { + "CancelSpotFleetRequestsSuccessItem$CurrentSpotFleetRequestState": "

    The current state of the Spot fleet request.

    ", + "CancelSpotFleetRequestsSuccessItem$PreviousSpotFleetRequestState": "

    The previous state of the Spot fleet request.

    ", + "SpotFleetRequestConfig$SpotFleetRequestState": "

    The state of the Spot fleet request.

    " + } + }, + "BlockDeviceMapping": { + "base": "

    Describes a block device mapping.

    ", + "refs": { + "BlockDeviceMappingList$member": null, + "BlockDeviceMappingRequestList$member": null + } + }, + "BlockDeviceMappingList": { + "base": null, + "refs": { + "Image$BlockDeviceMappings": "

    Any block device mapping entries.

    ", + "ImageAttribute$BlockDeviceMappings": "

    One or more block device mapping entries.

    ", + "LaunchSpecification$BlockDeviceMappings": "

    One or more block device mapping entries.

    ", + "SpotFleetLaunchSpecification$BlockDeviceMappings": "

    One or more block device mapping entries.

    ", + "RequestSpotLaunchSpecification$BlockDeviceMappings": "

    One or more block device mapping entries.

    " + } + }, + "BlockDeviceMappingRequestList": { + "base": null, + "refs": { + "CreateImageRequest$BlockDeviceMappings": "

    Information about one or more block device mappings.

    ", + "RegisterImageRequest$BlockDeviceMappings": "

    One or more block device mapping entries.

    ", + "RunInstancesRequest$BlockDeviceMappings": "

    The block device mapping.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "AcceptVpcPeeringConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AllocateAddressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AssignPrivateIpAddressesRequest$AllowReassignment": "

    Indicates whether to allow an IP address that is already assigned to another network interface or instance to be reassigned to the specified network interface.

    ", + "AssociateAddressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AssociateAddressRequest$AllowReassociation": "

    [EC2-VPC] Allows an Elastic IP address that is already associated with an instance or network interface to be re-associated with the specified instance or network interface. Otherwise, the operation fails.

    Default: false

    ", + "AssociateDhcpOptionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AssociateRouteTableRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttachClassicLinkVpcRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttachClassicLinkVpcResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "AttachInternetGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttachNetworkInterfaceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttachVolumeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttachVpnGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttributeBooleanValue$Value": "

    Valid values are true or false.

    ", + "AuthorizeSecurityGroupEgressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AuthorizeSecurityGroupIngressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "BundleInstanceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CancelBundleTaskRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CancelConversionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CancelImportTaskRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CancelSpotFleetRequestsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CancelSpotFleetRequestsRequest$TerminateInstances": "

    Indicates whether to terminate instances for a Spot fleet request if it is canceled successfully.

    ", + "CancelSpotInstanceRequestsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ConfirmProductInstanceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ConfirmProductInstanceResult$Return": "

    The return value of the request. Returns true if the specified product code is owned by the requester and associated with the specified instance.

    ", + "CopyImageRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CopySnapshotRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CopySnapshotRequest$Encrypted": "

    Specifies whether the destination snapshot should be encrypted. There is no way to create an unencrypted snapshot copy from an encrypted snapshot; however, you can encrypt a copy of an unencrypted snapshot with this flag. The default CMK for EBS is used unless a non-default AWS Key Management Service (AWS KMS) CMK is specified with KmsKeyId. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateCustomerGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateDhcpOptionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateImageRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateImageRequest$NoReboot": "

    By default, this parameter is set to false, which means Amazon EC2 attempts to shut down the instance cleanly before image creation and then reboots the instance. When the parameter is set to true, Amazon EC2 doesn't shut down the instance before creating the image. When this option is used, file system integrity on the created image can't be guaranteed.

    ", + "CreateInternetGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateKeyPairRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateNetworkAclEntryRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateNetworkAclEntryRequest$Egress": "

    Indicates whether this is an egress rule (rule is applied to traffic leaving the subnet).

    ", + "CreateNetworkAclRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateNetworkInterfaceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreatePlacementGroupRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateRouteRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateRouteResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "CreateRouteTableRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateSecurityGroupRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateSnapshotRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateSpotDatafeedSubscriptionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateSubnetRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateTagsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVolumeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVolumeRequest$Encrypted": "

    Specifies whether the volume should be encrypted. Encrypted Amazon EBS volumes may only be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are automatically encrypted. There is no way to create an encrypted volume from an unencrypted snapshot or vice versa. If your AMI uses encrypted volumes, you can only launch it on supported instance types. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateVpcEndpointRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVpcPeeringConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVpcRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVpnConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVpnGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteCustomerGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteDhcpOptionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteInternetGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteKeyPairRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteNetworkAclEntryRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteNetworkAclEntryRequest$Egress": "

    Indicates whether the rule is an egress rule.

    ", + "DeleteNetworkAclRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteNetworkInterfaceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeletePlacementGroupRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteRouteRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteRouteTableRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteSecurityGroupRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteSnapshotRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteSpotDatafeedSubscriptionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteSubnetRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteTagsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVolumeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVpcEndpointsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVpcPeeringConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVpcPeeringConnectionResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "DeleteVpcRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVpnConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVpnGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeregisterImageRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeAccountAttributesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeAddressesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeAvailabilityZonesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeBundleTasksRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeClassicLinkInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeConversionTasksRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeCustomerGatewaysRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeDhcpOptionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeImageAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeImagesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeImportImageTasksRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeImportSnapshotTasksRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeInstanceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeInstanceStatusRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeInstanceStatusRequest$IncludeAllInstances": "

    When true, includes the health status for all instances. When false, includes the health status for running instances only.

    Default: false

    ", + "DescribeInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeInternetGatewaysRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeKeyPairsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeMovingAddressesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeNetworkAclsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeNetworkInterfaceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeNetworkInterfacesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribePlacementGroupsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribePrefixListsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeRegionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeReservedInstancesOfferingsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeReservedInstancesOfferingsRequest$IncludeMarketplace": "

    Include Marketplace offerings in the response.

    ", + "DescribeReservedInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeRouteTablesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSecurityGroupsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSnapshotAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSnapshotsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotDatafeedSubscriptionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotFleetInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotFleetRequestHistoryRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotFleetRequestsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotInstanceRequestsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotPriceHistoryRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSubnetsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeTagsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVolumeAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVolumeStatusRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVolumesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcClassicLinkRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcEndpointServicesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcEndpointsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcPeeringConnectionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpnConnectionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpnGatewaysRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DetachClassicLinkVpcRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DetachClassicLinkVpcResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "DetachInternetGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DetachNetworkInterfaceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DetachNetworkInterfaceRequest$Force": "

    Specifies whether to force a detachment.

    ", + "DetachVolumeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DetachVolumeRequest$Force": "

    Forces detachment if the previous detachment attempt did not occur cleanly (for example, logging into an instance, unmounting the volume, and detaching normally). This option can lead to data loss or a corrupted file system. Use this option only as a last resort to detach a volume from a failed instance. The instance won't have an opportunity to flush file system caches or file system metadata. If you use this option, you must perform file system check and repair procedures.

    ", + "DetachVpnGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DisableVpcClassicLinkRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DisableVpcClassicLinkResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "DisassociateAddressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DisassociateRouteTableRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "EbsBlockDevice$DeleteOnTermination": "

    Indicates whether the EBS volume is deleted on instance termination.

    ", + "EbsBlockDevice$Encrypted": "

    Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes may only be attached to instances that support Amazon EBS encryption.

    ", + "EbsInstanceBlockDevice$DeleteOnTermination": "

    Indicates whether the volume is deleted on instance termination.

    ", + "EbsInstanceBlockDeviceSpecification$DeleteOnTermination": "

    Indicates whether the volume is deleted on instance termination.

    ", + "EnableVolumeIORequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "EnableVpcClassicLinkRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "EnableVpcClassicLinkResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "GetConsoleOutputRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "GetPasswordDataRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "Image$Public": "

    Indicates whether the image has public launch permissions. The value is true if this image has public launch permissions or false if it has only implicit and explicit launch permissions.

    ", + "ImportImageRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ImportInstanceLaunchSpecification$Monitoring": "

    Indicates whether monitoring is enabled.

    ", + "ImportInstanceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ImportKeyPairRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ImportSnapshotRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ImportVolumeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "Instance$SourceDestCheck": "

    Specifies whether to enable an instance launched in a VPC to perform NAT. This controls whether source/destination checking is enabled on the instance. A value of true means checking is enabled, and false means checking is disabled. The value must be false for the instance to perform NAT. For more information, see NAT Instances in the Amazon Virtual Private Cloud User Guide.

    ", + "Instance$EbsOptimized": "

    Indicates whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.

    ", + "InstanceNetworkInterface$SourceDestCheck": "

    Indicates whether to validate network traffic to or from this network interface.

    ", + "InstanceNetworkInterfaceAttachment$DeleteOnTermination": "

    Indicates whether the network interface is deleted when the instance is terminated.

    ", + "InstanceNetworkInterfaceSpecification$DeleteOnTermination": "

    If set to true, the interface is deleted when the instance is terminated. You can specify true only if creating a new network interface when launching an instance.

    ", + "InstanceNetworkInterfaceSpecification$AssociatePublicIpAddress": "

    Indicates whether to assign a public IP address to an instance you launch in a VPC. The public IP address can only be assigned to a network interface for eth0, and can only be assigned to a new network interface, not an existing one. You cannot specify more than one network interface in the request. If launching into a default subnet, the default value is true.

    ", + "InstancePrivateIpAddress$Primary": "

    Indicates whether this IP address is the primary private IP address of the network interface.

    ", + "LaunchSpecification$EbsOptimized": "

    Indicates whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.

    Default: false

    ", + "ModifyImageAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifyInstanceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifyNetworkInterfaceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifySnapshotAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifyVolumeAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifyVpcEndpointRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifyVpcEndpointRequest$ResetPolicy": "

    Specify true to reset the policy document to the default policy. The default policy allows access to the service.

    ", + "ModifyVpcEndpointResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "MonitorInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "MoveAddressToVpcRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "NetworkAcl$IsDefault": "

    Indicates whether this is the default network ACL for the VPC.

    ", + "NetworkAclEntry$Egress": "

    Indicates whether the rule is an egress rule (applied to traffic leaving the subnet).

    ", + "NetworkInterface$RequesterManaged": "

    Indicates whether the network interface is being managed by AWS.

    ", + "NetworkInterface$SourceDestCheck": "

    Indicates whether traffic to or from the instance is validated.

    ", + "NetworkInterfaceAttachment$DeleteOnTermination": "

    Indicates whether the network interface is deleted when the instance is terminated.

    ", + "NetworkInterfaceAttachmentChanges$DeleteOnTermination": "

    Indicates whether the network interface is deleted when the instance is terminated.

    ", + "NetworkInterfacePrivateIpAddress$Primary": "

    Indicates whether this IP address is the primary private IP address of the network interface.

    ", + "PriceSchedule$Active": "

    The current price schedule, as determined by the term remaining for the Reserved Instance in the listing.

    A specific price schedule is always in effect, but only one price schedule can be active at any time. Take, for example, a Reserved Instance listing that has five months remaining in its term. When you specify price schedules for five months and two months, this means that schedule 1, covering the first three months of the remaining term, will be active during months 5, 4, and 3. Then schedule 2, covering the last two months of the term, will be active for months 2 and 1.

    ", + "PrivateIpAddressSpecification$Primary": "

    Indicates whether the private IP address is the primary private IP address. Only one IP address can be designated as primary.

    ", + "PurchaseReservedInstancesOfferingRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RebootInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RegisterImageRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RejectVpcPeeringConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RejectVpcPeeringConnectionResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "ReleaseAddressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ReplaceNetworkAclAssociationRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ReplaceNetworkAclEntryRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ReplaceNetworkAclEntryRequest$Egress": "

    Indicates whether to replace the egress rule.

    Default: If no value is specified, we replace the ingress rule.

    ", + "ReplaceRouteRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ReplaceRouteTableAssociationRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ReportInstanceStatusRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RequestSpotFleetRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RequestSpotInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ReservedInstancesOffering$Marketplace": "

    Indicates whether the offering is available through the Reserved Instance Marketplace (resale) or AWS. If it's a Reserved Instance Marketplace offering, this is true.

    ", + "ResetImageAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ResetInstanceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ResetNetworkInterfaceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ResetSnapshotAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RestoreAddressToClassicRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RevokeSecurityGroupEgressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RevokeSecurityGroupIngressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RouteTableAssociation$Main": "

    Indicates whether this is the main route table.

    ", + "RunInstancesMonitoringEnabled$Enabled": "

    Indicates whether monitoring is enabled for the instance.

    ", + "RunInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RunInstancesRequest$DisableApiTermination": "

    If you set this parameter to true, you can't terminate the instance using the Amazon EC2 console, CLI, or API; otherwise, you can. If you set this parameter to true and then later want to be able to terminate the instance, you must first change the value of the disableApiTermination attribute to false using ModifyInstanceAttribute. Alternatively, if you set InstanceInitiatedShutdownBehavior to terminate, you can terminate the instance by running the shutdown command from the instance.

    Default: false

    ", + "RunInstancesRequest$EbsOptimized": "

    Indicates whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS-optimized instance.

    Default: false

    ", + "Snapshot$Encrypted": "

    Indicates whether the snapshot is encrypted.

    ", + "SpotFleetLaunchSpecification$EbsOptimized": "

    Indicates whether the instances are optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.

    Default: false

    ", + "SpotFleetMonitoring$Enabled": "

    Enables monitoring for the instance.

    Default: false

    ", + "SpotFleetRequestConfigData$TerminateInstancesWithExpiration": "

    Indicates whether running Spot instances should be terminated when the Spot fleet request expires.

    ", + "StartInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "StopInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "StopInstancesRequest$Force": "

    Forces the instances to stop. The instances do not have an opportunity to flush file system caches or file system metadata. If you use this option, you must perform file system check and repair procedures. This option is not recommended for Windows instances.

    Default: false

    ", + "Subnet$DefaultForAz": "

    Indicates whether this is the default subnet for the Availability Zone.

    ", + "Subnet$MapPublicIpOnLaunch": "

    Indicates whether instances launched in this subnet receive a public IP address.

    ", + "TerminateInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "UnmonitorInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "Volume$Encrypted": "

    Indicates whether the volume will be encrypted.

    ", + "VolumeAttachment$DeleteOnTermination": "

    Indicates whether the EBS volume is deleted on instance termination.

    ", + "Vpc$IsDefault": "

    Indicates whether the VPC is the default VPC.

    ", + "VpcClassicLink$ClassicLinkEnabled": "

    Indicates whether the VPC is enabled for ClassicLink.

    ", + "VpnConnectionOptions$StaticRoutesOnly": "

    Indicates whether the VPN connection uses static routes only. Static routes must be used for devices that don't support BGP.

    ", + "VpnConnectionOptionsSpecification$StaticRoutesOnly": "

    Indicates whether the VPN connection uses static routes only. Static routes must be used for devices that don't support BGP.

    ", + "RequestSpotLaunchSpecification$EbsOptimized": "

    Indicates whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.

    Default: false

    " + } + }, + "BundleIdStringList": { + "base": null, + "refs": { + "DescribeBundleTasksRequest$BundleIds": "

    One or more bundle task IDs.

    Default: Describes all your bundle tasks.

    " + } + }, + "BundleInstanceRequest": { + "base": null, + "refs": { + } + }, + "BundleInstanceResult": { + "base": null, + "refs": { + } + }, + "BundleTask": { + "base": "

    Describes a bundle task.

    ", + "refs": { + "BundleInstanceResult$BundleTask": "

    Information about the bundle task.

    ", + "BundleTaskList$member": null, + "CancelBundleTaskResult$BundleTask": "

    Information about the bundle task.

    " + } + }, + "BundleTaskError": { + "base": "

    Describes an error for BundleInstance.

    ", + "refs": { + "BundleTask$BundleTaskError": "

    If the task fails, a description of the error.

    " + } + }, + "BundleTaskList": { + "base": null, + "refs": { + "DescribeBundleTasksResult$BundleTasks": "

    Information about one or more bundle tasks.

    " + } + }, + "BundleTaskState": { + "base": null, + "refs": { + "BundleTask$State": "

    The state of the task.

    " + } + }, + "CancelBatchErrorCode": { + "base": null, + "refs": { + "CancelSpotFleetRequestsError$Code": "

    The error code.

    " + } + }, + "CancelBundleTaskRequest": { + "base": null, + "refs": { + } + }, + "CancelBundleTaskResult": { + "base": null, + "refs": { + } + }, + "CancelConversionRequest": { + "base": null, + "refs": { + } + }, + "CancelExportTaskRequest": { + "base": null, + "refs": { + } + }, + "CancelImportTaskRequest": { + "base": null, + "refs": { + } + }, + "CancelImportTaskResult": { + "base": null, + "refs": { + } + }, + "CancelReservedInstancesListingRequest": { + "base": null, + "refs": { + } + }, + "CancelReservedInstancesListingResult": { + "base": null, + "refs": { + } + }, + "CancelSpotFleetRequestsError": { + "base": "

    Describes a Spot fleet error.

    ", + "refs": { + "CancelSpotFleetRequestsErrorItem$Error": "

    The error.

    " + } + }, + "CancelSpotFleetRequestsErrorItem": { + "base": "

    Describes a Spot fleet request that was not successfully canceled.

    ", + "refs": { + "CancelSpotFleetRequestsErrorSet$member": null + } + }, + "CancelSpotFleetRequestsErrorSet": { + "base": null, + "refs": { + "CancelSpotFleetRequestsResponse$UnsuccessfulFleetRequests": "

    Information about the Spot fleet requests that are not successfully canceled.

    " + } + }, + "CancelSpotFleetRequestsRequest": { + "base": "

    Contains the parameters for CancelSpotFleetRequests.

    ", + "refs": { + } + }, + "CancelSpotFleetRequestsResponse": { + "base": "

    Contains the output of CancelSpotFleetRequests.

    ", + "refs": { + } + }, + "CancelSpotFleetRequestsSuccessItem": { + "base": "

    Describes a Spot fleet request that was successfully canceled.

    ", + "refs": { + "CancelSpotFleetRequestsSuccessSet$member": null + } + }, + "CancelSpotFleetRequestsSuccessSet": { + "base": null, + "refs": { + "CancelSpotFleetRequestsResponse$SuccessfulFleetRequests": "

    Information about the Spot fleet requests that are successfully canceled.

    " + } + }, + "CancelSpotInstanceRequestState": { + "base": null, + "refs": { + "CancelledSpotInstanceRequest$State": "

    The state of the Spot instance request.

    " + } + }, + "CancelSpotInstanceRequestsRequest": { + "base": "

    Contains the parameters for CancelSpotInstanceRequests.

    ", + "refs": { + } + }, + "CancelSpotInstanceRequestsResult": { + "base": "

    Contains the output of CancelSpotInstanceRequests.

    ", + "refs": { + } + }, + "CancelledSpotInstanceRequest": { + "base": "

    Describes a request to cancel a Spot instance.

    ", + "refs": { + "CancelledSpotInstanceRequestList$member": null + } + }, + "CancelledSpotInstanceRequestList": { + "base": null, + "refs": { + "CancelSpotInstanceRequestsResult$CancelledSpotInstanceRequests": "

    One or more Spot instance requests.

    " + } + }, + "ClassicLinkInstance": { + "base": "

    Describes a linked EC2-Classic instance.

    ", + "refs": { + "ClassicLinkInstanceList$member": null + } + }, + "ClassicLinkInstanceList": { + "base": null, + "refs": { + "DescribeClassicLinkInstancesResult$Instances": "

    Information about one or more linked EC2-Classic instances.

    " + } + }, + "ClientData": { + "base": "

    Describes the client-specific data.

    ", + "refs": { + "ImportImageRequest$ClientData": "

    The client-specific data.

    ", + "ImportSnapshotRequest$ClientData": "

    The client-specific data.

    " + } + }, + "ConfirmProductInstanceRequest": { + "base": null, + "refs": { + } + }, + "ConfirmProductInstanceResult": { + "base": null, + "refs": { + } + }, + "ContainerFormat": { + "base": null, + "refs": { + "ExportToS3Task$ContainerFormat": "

    The container format used to combine disk images with metadata (such as OVF). If absent, only the disk image is exported.

    ", + "ExportToS3TaskSpecification$ContainerFormat": "

    The container format used to combine disk images with metadata (such as OVF). If absent, only the disk image is exported.

    " + } + }, + "ConversionIdStringList": { + "base": null, + "refs": { + "DescribeConversionTasksRequest$ConversionTaskIds": "

    One or more conversion task IDs.

    " + } + }, + "ConversionTask": { + "base": "

    Describes a conversion task.

    ", + "refs": { + "DescribeConversionTaskList$member": null, + "ImportInstanceResult$ConversionTask": "

    Information about the conversion task.

    ", + "ImportVolumeResult$ConversionTask": "

    Information about the conversion task.

    " + } + }, + "ConversionTaskState": { + "base": null, + "refs": { + "ConversionTask$State": "

    The state of the conversion task.

    " + } + }, + "CopyImageRequest": { + "base": null, + "refs": { + } + }, + "CopyImageResult": { + "base": null, + "refs": { + } + }, + "CopySnapshotRequest": { + "base": null, + "refs": { + } + }, + "CopySnapshotResult": { + "base": null, + "refs": { + } + }, + "CreateCustomerGatewayRequest": { + "base": null, + "refs": { + } + }, + "CreateCustomerGatewayResult": { + "base": null, + "refs": { + } + }, + "CreateDhcpOptionsRequest": { + "base": null, + "refs": { + } + }, + "CreateDhcpOptionsResult": { + "base": null, + "refs": { + } + }, + "CreateFlowLogsRequest": { + "base": null, + "refs": { + } + }, + "CreateFlowLogsResult": { + "base": null, + "refs": { + } + }, + "CreateImageRequest": { + "base": null, + "refs": { + } + }, + "CreateImageResult": { + "base": null, + "refs": { + } + }, + "CreateInstanceExportTaskRequest": { + "base": null, + "refs": { + } + }, + "CreateInstanceExportTaskResult": { + "base": null, + "refs": { + } + }, + "CreateInternetGatewayRequest": { + "base": null, + "refs": { + } + }, + "CreateInternetGatewayResult": { + "base": null, + "refs": { + } + }, + "CreateKeyPairRequest": { + "base": null, + "refs": { + } + }, + "CreateNetworkAclEntryRequest": { + "base": null, + "refs": { + } + }, + "CreateNetworkAclRequest": { + "base": null, + "refs": { + } + }, + "CreateNetworkAclResult": { + "base": null, + "refs": { + } + }, + "CreateNetworkInterfaceRequest": { + "base": null, + "refs": { + } + }, + "CreateNetworkInterfaceResult": { + "base": null, + "refs": { + } + }, + "CreatePlacementGroupRequest": { + "base": null, + "refs": { + } + }, + "CreateReservedInstancesListingRequest": { + "base": null, + "refs": { + } + }, + "CreateReservedInstancesListingResult": { + "base": null, + "refs": { + } + }, + "CreateRouteRequest": { + "base": null, + "refs": { + } + }, + "CreateRouteResult": { + "base": null, + "refs": { + } + }, + "CreateRouteTableRequest": { + "base": null, + "refs": { + } + }, + "CreateRouteTableResult": { + "base": null, + "refs": { + } + }, + "CreateSecurityGroupRequest": { + "base": null, + "refs": { + } + }, + "CreateSecurityGroupResult": { + "base": null, + "refs": { + } + }, + "CreateSnapshotRequest": { + "base": null, + "refs": { + } + }, + "CreateSpotDatafeedSubscriptionRequest": { + "base": "

    Contains the parameters for CreateSpotDatafeedSubscription.

    ", + "refs": { + } + }, + "CreateSpotDatafeedSubscriptionResult": { + "base": "

    Contains the output of CreateSpotDatafeedSubscription.

    ", + "refs": { + } + }, + "CreateSubnetRequest": { + "base": null, + "refs": { + } + }, + "CreateSubnetResult": { + "base": null, + "refs": { + } + }, + "CreateTagsRequest": { + "base": null, + "refs": { + } + }, + "CreateVolumePermission": { + "base": "

    Describes the user or group to be added or removed from the permissions for a volume.

    ", + "refs": { + "CreateVolumePermissionList$member": null + } + }, + "CreateVolumePermissionList": { + "base": null, + "refs": { + "CreateVolumePermissionModifications$Add": "

    Adds a specific AWS account ID or group to a volume's list of create volume permissions.

    ", + "CreateVolumePermissionModifications$Remove": "

    Removes a specific AWS account ID or group from a volume's list of create volume permissions.

    ", + "DescribeSnapshotAttributeResult$CreateVolumePermissions": "

    A list of permissions for creating volumes from the snapshot.

    " + } + }, + "CreateVolumePermissionModifications": { + "base": "

    Describes modifications to the permissions for a volume.

    ", + "refs": { + "ModifySnapshotAttributeRequest$CreateVolumePermission": "

    A JSON representation of the snapshot attribute modification.

    " + } + }, + "CreateVolumeRequest": { + "base": null, + "refs": { + } + }, + "CreateVpcEndpointRequest": { + "base": null, + "refs": { + } + }, + "CreateVpcEndpointResult": { + "base": null, + "refs": { + } + }, + "CreateVpcPeeringConnectionRequest": { + "base": null, + "refs": { + } + }, + "CreateVpcPeeringConnectionResult": { + "base": null, + "refs": { + } + }, + "CreateVpcRequest": { + "base": null, + "refs": { + } + }, + "CreateVpcResult": { + "base": null, + "refs": { + } + }, + "CreateVpnConnectionRequest": { + "base": null, + "refs": { + } + }, + "CreateVpnConnectionResult": { + "base": null, + "refs": { + } + }, + "CreateVpnConnectionRouteRequest": { + "base": null, + "refs": { + } + }, + "CreateVpnGatewayRequest": { + "base": null, + "refs": { + } + }, + "CreateVpnGatewayResult": { + "base": null, + "refs": { + } + }, + "CurrencyCodeValues": { + "base": null, + "refs": { + "PriceSchedule$CurrencyCode": "

    The currency for transacting the Reserved Instance resale. At this time, the only supported currency is USD.

    ", + "PriceScheduleSpecification$CurrencyCode": "

    The currency for transacting the Reserved Instance resale. At this time, the only supported currency is USD.

    ", + "ReservedInstanceLimitPrice$CurrencyCode": "

    The currency in which the limitPrice amount is specified. At this time, the only supported currency is USD.

    ", + "ReservedInstances$CurrencyCode": "

    The currency of the Reserved Instance. It's specified using ISO 4217 standard currency codes. At this time, the only supported currency is USD.

    ", + "ReservedInstancesOffering$CurrencyCode": "

    The currency of the Reserved Instance offering you are purchasing. It's specified using ISO 4217 standard currency codes. At this time, the only supported currency is USD.

    " + } + }, + "CustomerGateway": { + "base": "

    Describes a customer gateway.

    ", + "refs": { + "CreateCustomerGatewayResult$CustomerGateway": "

    Information about the customer gateway.

    ", + "CustomerGatewayList$member": null + } + }, + "CustomerGatewayIdStringList": { + "base": null, + "refs": { + "DescribeCustomerGatewaysRequest$CustomerGatewayIds": "

    One or more customer gateway IDs.

    Default: Describes all your customer gateways.

    " + } + }, + "CustomerGatewayList": { + "base": null, + "refs": { + "DescribeCustomerGatewaysResult$CustomerGateways": "

    Information about one or more customer gateways.

    " + } + }, + "DatafeedSubscriptionState": { + "base": null, + "refs": { + "SpotDatafeedSubscription$State": "

    The state of the Spot instance data feed subscription.

    " + } + }, + "DateTime": { + "base": null, + "refs": { + "BundleTask$StartTime": "

    The time this task started.

    ", + "BundleTask$UpdateTime": "

    The time of the most recent update for the task.

    ", + "ClientData$UploadStart": "

    The time that the disk upload starts.

    ", + "ClientData$UploadEnd": "

    The time that the disk upload ends.

    ", + "DescribeSpotFleetRequestHistoryRequest$StartTime": "

    The starting date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "DescribeSpotFleetRequestHistoryResponse$StartTime": "

    The starting date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "DescribeSpotFleetRequestHistoryResponse$LastEvaluatedTime": "

    The last date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). All records up to this time were retrieved.

    If nextToken indicates that there are more results, this value is not present.

    ", + "DescribeSpotPriceHistoryRequest$StartTime": "

    The date and time, up to the past 90 days, from which to start retrieving the price history data, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "DescribeSpotPriceHistoryRequest$EndTime": "

    The date and time, up to the current date, from which to stop retrieving the price history data, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "EbsInstanceBlockDevice$AttachTime": "

    The time stamp when the attachment initiated.

    ", + "FlowLog$CreationTime": "

    The date and time the flow log was created.

    ", + "GetConsoleOutputResult$Timestamp": "

    The time the output was last updated.

    ", + "GetPasswordDataResult$Timestamp": "

    The time the data was last updated.

    ", + "HistoryRecord$Timestamp": "

    The date and time of the event, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "Instance$LaunchTime": "

    The time the instance was launched.

    ", + "InstanceNetworkInterfaceAttachment$AttachTime": "

    The time stamp when the attachment initiated.

    ", + "InstanceStatusDetails$ImpairedSince": "

    The time when a status check failed. For an instance that was launched and impaired, this is the time when the instance was launched.

    ", + "InstanceStatusEvent$NotBefore": "

    The earliest scheduled start time for the event.

    ", + "InstanceStatusEvent$NotAfter": "

    The latest scheduled end time for the event.

    ", + "NetworkInterfaceAttachment$AttachTime": "

    The timestamp indicating when the attachment initiated.

    ", + "ReportInstanceStatusRequest$StartTime": "

    The time at which the reported instance health state began.

    ", + "ReportInstanceStatusRequest$EndTime": "

    The time at which the reported instance health state ended.

    ", + "RequestSpotInstancesRequest$ValidFrom": "

    The start date of the request. If this is a one-time request, the request becomes active at this date and time and remains active until all instances launch, the request expires, or the request is canceled. If the request is persistent, the request becomes active at this date and time and remains active until it expires or is canceled.

    Default: The request is effective indefinitely.

    ", + "RequestSpotInstancesRequest$ValidUntil": "

    The end date of the request. If this is a one-time request, the request remains active until all instances launch, the request is canceled, or this date is reached. If the request is persistent, it remains active until it is canceled or this date and time is reached.

    Default: The request is effective indefinitely.

    ", + "ReservedInstances$Start": "

    The date and time the Reserved Instance started.

    ", + "ReservedInstances$End": "

    The time when the Reserved Instance expires.

    ", + "ReservedInstancesListing$CreateDate": "

    The time the listing was created.

    ", + "ReservedInstancesListing$UpdateDate": "

    The last modified timestamp of the listing.

    ", + "ReservedInstancesModification$CreateDate": "

    The time when the modification request was created.

    ", + "ReservedInstancesModification$UpdateDate": "

    The time when the modification request was last updated.

    ", + "ReservedInstancesModification$EffectiveDate": "

    The time for the modification to become effective.

    ", + "Snapshot$StartTime": "

    The time stamp when the snapshot was initiated.

    ", + "SpotFleetRequestConfigData$ValidFrom": "

    The start date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). The default is to start fulfilling the request immediately.

    ", + "SpotFleetRequestConfigData$ValidUntil": "

    The end date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). At this point, no new Spot instance requests are placed or enabled to fulfill the request.

    ", + "SpotInstanceRequest$ValidFrom": "

    The start date of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). If this is a one-time request, the request becomes active at this date and time and remains active until all instances launch, the request expires, or the request is canceled. If the request is persistent, the request becomes active at this date and time and remains active until it expires or is canceled.

    ", + "SpotInstanceRequest$ValidUntil": "

    The end date of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). If this is a one-time request, the request remains active until all instances launch, the request is canceled, or this date is reached. If the request is persistent, it remains active until it is canceled or this date is reached.

    ", + "SpotInstanceRequest$CreateTime": "

    The date and time when the Spot instance request was created, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "SpotInstanceStatus$UpdateTime": "

    The date and time of the most recent status update, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "SpotPrice$Timestamp": "

    The date and time the request was created, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "VgwTelemetry$LastStatusChange": "

    The date and time of the last change in status.

    ", + "Volume$CreateTime": "

    The time stamp when volume creation was initiated.

    ", + "VolumeAttachment$AttachTime": "

    The time stamp when the attachment initiated.

    ", + "VolumeStatusEvent$NotBefore": "

    The earliest start time of the event.

    ", + "VolumeStatusEvent$NotAfter": "

    The latest end time of the event.

    ", + "VpcEndpoint$CreationTimestamp": "

    The date and time the VPC endpoint was created.

    ", + "VpcPeeringConnection$ExpirationTime": "

    The time that an unaccepted VPC peering connection will expire.

    " + } + }, + "DeleteCustomerGatewayRequest": { + "base": null, + "refs": { + } + }, + "DeleteDhcpOptionsRequest": { + "base": null, + "refs": { + } + }, + "DeleteFlowLogsRequest": { + "base": null, + "refs": { + } + }, + "DeleteFlowLogsResult": { + "base": null, + "refs": { + } + }, + "DeleteInternetGatewayRequest": { + "base": null, + "refs": { + } + }, + "DeleteKeyPairRequest": { + "base": null, + "refs": { + } + }, + "DeleteNetworkAclEntryRequest": { + "base": null, + "refs": { + } + }, + "DeleteNetworkAclRequest": { + "base": null, + "refs": { + } + }, + "DeleteNetworkInterfaceRequest": { + "base": null, + "refs": { + } + }, + "DeletePlacementGroupRequest": { + "base": null, + "refs": { + } + }, + "DeleteRouteRequest": { + "base": null, + "refs": { + } + }, + "DeleteRouteTableRequest": { + "base": null, + "refs": { + } + }, + "DeleteSecurityGroupRequest": { + "base": null, + "refs": { + } + }, + "DeleteSnapshotRequest": { + "base": null, + "refs": { + } + }, + "DeleteSpotDatafeedSubscriptionRequest": { + "base": "

    Contains the parameters for DeleteSpotDatafeedSubscription.

    ", + "refs": { + } + }, + "DeleteSubnetRequest": { + "base": null, + "refs": { + } + }, + "DeleteTagsRequest": { + "base": null, + "refs": { + } + }, + "DeleteVolumeRequest": { + "base": null, + "refs": { + } + }, + "DeleteVpcEndpointsRequest": { + "base": null, + "refs": { + } + }, + "DeleteVpcEndpointsResult": { + "base": null, + "refs": { + } + }, + "DeleteVpcPeeringConnectionRequest": { + "base": null, + "refs": { + } + }, + "DeleteVpcPeeringConnectionResult": { + "base": null, + "refs": { + } + }, + "DeleteVpcRequest": { + "base": null, + "refs": { + } + }, + "DeleteVpnConnectionRequest": { + "base": null, + "refs": { + } + }, + "DeleteVpnConnectionRouteRequest": { + "base": null, + "refs": { + } + }, + "DeleteVpnGatewayRequest": { + "base": null, + "refs": { + } + }, + "DeregisterImageRequest": { + "base": null, + "refs": { + } + }, + "DescribeAccountAttributesRequest": { + "base": null, + "refs": { + } + }, + "DescribeAccountAttributesResult": { + "base": null, + "refs": { + } + }, + "DescribeAddressesRequest": { + "base": null, + "refs": { + } + }, + "DescribeAddressesResult": { + "base": null, + "refs": { + } + }, + "DescribeAvailabilityZonesRequest": { + "base": null, + "refs": { + } + }, + "DescribeAvailabilityZonesResult": { + "base": null, + "refs": { + } + }, + "DescribeBundleTasksRequest": { + "base": null, + "refs": { + } + }, + "DescribeBundleTasksResult": { + "base": null, + "refs": { + } + }, + "DescribeClassicLinkInstancesRequest": { + "base": null, + "refs": { + } + }, + "DescribeClassicLinkInstancesResult": { + "base": null, + "refs": { + } + }, + "DescribeConversionTaskList": { + "base": null, + "refs": { + "DescribeConversionTasksResult$ConversionTasks": "

    Information about the conversion tasks.

    " + } + }, + "DescribeConversionTasksRequest": { + "base": null, + "refs": { + } + }, + "DescribeConversionTasksResult": { + "base": null, + "refs": { + } + }, + "DescribeCustomerGatewaysRequest": { + "base": null, + "refs": { + } + }, + "DescribeCustomerGatewaysResult": { + "base": null, + "refs": { + } + }, + "DescribeDhcpOptionsRequest": { + "base": null, + "refs": { + } + }, + "DescribeDhcpOptionsResult": { + "base": null, + "refs": { + } + }, + "DescribeExportTasksRequest": { + "base": null, + "refs": { + } + }, + "DescribeExportTasksResult": { + "base": null, + "refs": { + } + }, + "DescribeFlowLogsRequest": { + "base": null, + "refs": { + } + }, + "DescribeFlowLogsResult": { + "base": null, + "refs": { + } + }, + "DescribeImageAttributeRequest": { + "base": null, + "refs": { + } + }, + "DescribeImagesRequest": { + "base": null, + "refs": { + } + }, + "DescribeImagesResult": { + "base": null, + "refs": { + } + }, + "DescribeImportImageTasksRequest": { + "base": null, + "refs": { + } + }, + "DescribeImportImageTasksResult": { + "base": null, + "refs": { + } + }, + "DescribeImportSnapshotTasksRequest": { + "base": null, + "refs": { + } + }, + "DescribeImportSnapshotTasksResult": { + "base": null, + "refs": { + } + }, + "DescribeInstanceAttributeRequest": { + "base": null, + "refs": { + } + }, + "DescribeInstanceStatusRequest": { + "base": null, + "refs": { + } + }, + "DescribeInstanceStatusResult": { + "base": null, + "refs": { + } + }, + "DescribeInstancesRequest": { + "base": null, + "refs": { + } + }, + "DescribeInstancesResult": { + "base": null, + "refs": { + } + }, + "DescribeInternetGatewaysRequest": { + "base": null, + "refs": { + } + }, + "DescribeInternetGatewaysResult": { + "base": null, + "refs": { + } + }, + "DescribeKeyPairsRequest": { + "base": null, + "refs": { + } + }, + "DescribeKeyPairsResult": { + "base": null, + "refs": { + } + }, + "DescribeMovingAddressesRequest": { + "base": null, + "refs": { + } + }, + "DescribeMovingAddressesResult": { + "base": null, + "refs": { + } + }, + "DescribeNetworkAclsRequest": { + "base": null, + "refs": { + } + }, + "DescribeNetworkAclsResult": { + "base": null, + "refs": { + } + }, + "DescribeNetworkInterfaceAttributeRequest": { + "base": null, + "refs": { + } + }, + "DescribeNetworkInterfaceAttributeResult": { + "base": null, + "refs": { + } + }, + "DescribeNetworkInterfacesRequest": { + "base": null, + "refs": { + } + }, + "DescribeNetworkInterfacesResult": { + "base": null, + "refs": { + } + }, + "DescribePlacementGroupsRequest": { + "base": null, + "refs": { + } + }, + "DescribePlacementGroupsResult": { + "base": null, + "refs": { + } + }, + "DescribePrefixListsRequest": { + "base": null, + "refs": { + } + }, + "DescribePrefixListsResult": { + "base": null, + "refs": { + } + }, + "DescribeRegionsRequest": { + "base": null, + "refs": { + } + }, + "DescribeRegionsResult": { + "base": null, + "refs": { + } + }, + "DescribeReservedInstancesListingsRequest": { + "base": null, + "refs": { + } + }, + "DescribeReservedInstancesListingsResult": { + "base": null, + "refs": { + } + }, + "DescribeReservedInstancesModificationsRequest": { + "base": null, + "refs": { + } + }, + "DescribeReservedInstancesModificationsResult": { + "base": null, + "refs": { + } + }, + "DescribeReservedInstancesOfferingsRequest": { + "base": null, + "refs": { + } + }, + "DescribeReservedInstancesOfferingsResult": { + "base": null, + "refs": { + } + }, + "DescribeReservedInstancesRequest": { + "base": null, + "refs": { + } + }, + "DescribeReservedInstancesResult": { + "base": null, + "refs": { + } + }, + "DescribeRouteTablesRequest": { + "base": null, + "refs": { + } + }, + "DescribeRouteTablesResult": { + "base": null, + "refs": { + } + }, + "DescribeSecurityGroupsRequest": { + "base": null, + "refs": { + } + }, + "DescribeSecurityGroupsResult": { + "base": null, + "refs": { + } + }, + "DescribeSnapshotAttributeRequest": { + "base": null, + "refs": { + } + }, + "DescribeSnapshotAttributeResult": { + "base": null, + "refs": { + } + }, + "DescribeSnapshotsRequest": { + "base": null, + "refs": { + } + }, + "DescribeSnapshotsResult": { + "base": null, + "refs": { + } + }, + "DescribeSpotDatafeedSubscriptionRequest": { + "base": "

    Contains the parameters for DescribeSpotDatafeedSubscription.

    ", + "refs": { + } + }, + "DescribeSpotDatafeedSubscriptionResult": { + "base": "

    Contains the output of DescribeSpotDatafeedSubscription.

    ", + "refs": { + } + }, + "DescribeSpotFleetInstancesRequest": { + "base": "

    Contains the parameters for DescribeSpotFleetInstances.

    ", + "refs": { + } + }, + "DescribeSpotFleetInstancesResponse": { + "base": "

    Contains the output of DescribeSpotFleetInstances.

    ", + "refs": { + } + }, + "DescribeSpotFleetRequestHistoryRequest": { + "base": "

    Contains the parameters for DescribeSpotFleetRequestHistory.

    ", + "refs": { + } + }, + "DescribeSpotFleetRequestHistoryResponse": { + "base": "

    Contains the output of DescribeSpotFleetRequestHistory.

    ", + "refs": { + } + }, + "DescribeSpotFleetRequestsRequest": { + "base": "

    Contains the parameters for DescribeSpotFleetRequests.

    ", + "refs": { + } + }, + "DescribeSpotFleetRequestsResponse": { + "base": "

    Contains the output of DescribeSpotFleetRequests.

    ", + "refs": { + } + }, + "DescribeSpotInstanceRequestsRequest": { + "base": "

    Contains the parameters for DescribeSpotInstanceRequests.

    ", + "refs": { + } + }, + "DescribeSpotInstanceRequestsResult": { + "base": "

    Contains the output of DescribeSpotInstanceRequests.

    ", + "refs": { + } + }, + "DescribeSpotPriceHistoryRequest": { + "base": "

    Contains the parameters for DescribeSpotPriceHistory.

    ", + "refs": { + } + }, + "DescribeSpotPriceHistoryResult": { + "base": "

    Contains the output of DescribeSpotPriceHistory.

    ", + "refs": { + } + }, + "DescribeSubnetsRequest": { + "base": null, + "refs": { + } + }, + "DescribeSubnetsResult": { + "base": null, + "refs": { + } + }, + "DescribeTagsRequest": { + "base": null, + "refs": { + } + }, + "DescribeTagsResult": { + "base": null, + "refs": { + } + }, + "DescribeVolumeAttributeRequest": { + "base": null, + "refs": { + } + }, + "DescribeVolumeAttributeResult": { + "base": null, + "refs": { + } + }, + "DescribeVolumeStatusRequest": { + "base": null, + "refs": { + } + }, + "DescribeVolumeStatusResult": { + "base": null, + "refs": { + } + }, + "DescribeVolumesRequest": { + "base": null, + "refs": { + } + }, + "DescribeVolumesResult": { + "base": null, + "refs": { + } + }, + "DescribeVpcAttributeRequest": { + "base": null, + "refs": { + } + }, + "DescribeVpcAttributeResult": { + "base": null, + "refs": { + } + }, + "DescribeVpcClassicLinkRequest": { + "base": null, + "refs": { + } + }, + "DescribeVpcClassicLinkResult": { + "base": null, + "refs": { + } + }, + "DescribeVpcEndpointServicesRequest": { + "base": null, + "refs": { + } + }, + "DescribeVpcEndpointServicesResult": { + "base": null, + "refs": { + } + }, + "DescribeVpcEndpointsRequest": { + "base": null, + "refs": { + } + }, + "DescribeVpcEndpointsResult": { + "base": null, + "refs": { + } + }, + "DescribeVpcPeeringConnectionsRequest": { + "base": null, + "refs": { + } + }, + "DescribeVpcPeeringConnectionsResult": { + "base": null, + "refs": { + } + }, + "DescribeVpcsRequest": { + "base": null, + "refs": { + } + }, + "DescribeVpcsResult": { + "base": null, + "refs": { + } + }, + "DescribeVpnConnectionsRequest": { + "base": null, + "refs": { + } + }, + "DescribeVpnConnectionsResult": { + "base": null, + "refs": { + } + }, + "DescribeVpnGatewaysRequest": { + "base": null, + "refs": { + } + }, + "DescribeVpnGatewaysResult": { + "base": null, + "refs": { + } + }, + "DetachClassicLinkVpcRequest": { + "base": null, + "refs": { + } + }, + "DetachClassicLinkVpcResult": { + "base": null, + "refs": { + } + }, + "DetachInternetGatewayRequest": { + "base": null, + "refs": { + } + }, + "DetachNetworkInterfaceRequest": { + "base": null, + "refs": { + } + }, + "DetachVolumeRequest": { + "base": null, + "refs": { + } + }, + "DetachVpnGatewayRequest": { + "base": null, + "refs": { + } + }, + "DeviceType": { + "base": null, + "refs": { + "Image$RootDeviceType": "

    The type of root device used by the AMI. The AMI can use an EBS volume or an instance store volume.

    ", + "Instance$RootDeviceType": "

    The root device type used by the AMI. The AMI can use an EBS volume or an instance store volume.

    " + } + }, + "DhcpConfiguration": { + "base": "

    Describes a DHCP configuration option.

    ", + "refs": { + "DhcpConfigurationList$member": null + } + }, + "DhcpConfigurationList": { + "base": null, + "refs": { + "DhcpOptions$DhcpConfigurations": "

    One or more DHCP options in the set.

    " + } + }, + "DhcpOptions": { + "base": "

    Describes a set of DHCP options.

    ", + "refs": { + "CreateDhcpOptionsResult$DhcpOptions": "

    A set of DHCP options.

    ", + "DhcpOptionsList$member": null + } + }, + "DhcpOptionsIdStringList": { + "base": null, + "refs": { + "DescribeDhcpOptionsRequest$DhcpOptionsIds": "

    The IDs of one or more DHCP options sets.

    Default: Describes all your DHCP options sets.

    " + } + }, + "DhcpOptionsList": { + "base": null, + "refs": { + "DescribeDhcpOptionsResult$DhcpOptions": "

    Information about one or more DHCP options sets.

    " + } + }, + "DisableVgwRoutePropagationRequest": { + "base": null, + "refs": { + } + }, + "DisableVpcClassicLinkRequest": { + "base": null, + "refs": { + } + }, + "DisableVpcClassicLinkResult": { + "base": null, + "refs": { + } + }, + "DisassociateAddressRequest": { + "base": null, + "refs": { + } + }, + "DisassociateRouteTableRequest": { + "base": null, + "refs": { + } + }, + "DiskImage": { + "base": "

    Describes a disk image.

    ", + "refs": { + "DiskImageList$member": null + } + }, + "DiskImageDescription": { + "base": "

    Describes a disk image.

    ", + "refs": { + "ImportInstanceVolumeDetailItem$Image": "

    The image.

    ", + "ImportVolumeTaskDetails$Image": "

    The image.

    " + } + }, + "DiskImageDetail": { + "base": "

    Describes a disk image.

    ", + "refs": { + "DiskImage$Image": "

    Information about the disk image.

    ", + "ImportVolumeRequest$Image": "

    The disk image.

    " + } + }, + "DiskImageFormat": { + "base": null, + "refs": { + "DiskImageDescription$Format": "

    The disk image format.

    ", + "DiskImageDetail$Format": "

    The disk image format.

    ", + "ExportToS3Task$DiskImageFormat": "

    The format for the exported image.

    ", + "ExportToS3TaskSpecification$DiskImageFormat": "

    The format for the exported image.

    " + } + }, + "DiskImageList": { + "base": null, + "refs": { + "ImportInstanceRequest$DiskImages": "

    The disk image.

    " + } + }, + "DiskImageVolumeDescription": { + "base": "

    Describes a disk image volume.

    ", + "refs": { + "ImportInstanceVolumeDetailItem$Volume": "

    The volume.

    ", + "ImportVolumeTaskDetails$Volume": "

    The volume.

    " + } + }, + "DomainType": { + "base": null, + "refs": { + "Address$Domain": "

    Indicates whether this Elastic IP address is for use with instances in EC2-Classic (standard) or instances in a VPC (vpc).

    ", + "AllocateAddressRequest$Domain": "

    Set to vpc to allocate the address for use with instances in a VPC.

    Default: The address is for use with instances in EC2-Classic.

    ", + "AllocateAddressResult$Domain": "

    Indicates whether this Elastic IP address is for use with instances in EC2-Classic (standard) or instances in a VPC (vpc).

    " + } + }, + "Double": { + "base": null, + "refs": { + "ClientData$UploadSize": "

    The size of the uploaded disk image, in GiB.

    ", + "PriceSchedule$Price": "

    The fixed price for the term.

    ", + "PriceScheduleSpecification$Price": "

    The fixed price for the term.

    ", + "PricingDetail$Price": "

    The price per instance.

    ", + "RecurringCharge$Amount": "

    The amount of the recurring charge.

    ", + "ReservedInstanceLimitPrice$Amount": "

    Used for Reserved Instance Marketplace offerings. Specifies the limit price on the total order (instanceCount * price).

    ", + "SnapshotDetail$DiskImageSize": "

    The size of the disk in the snapshot, in GiB.

    ", + "SnapshotTaskDetail$DiskImageSize": "

    The size of the disk in the snapshot, in GiB.

    ", + "SpotFleetLaunchSpecification$WeightedCapacity": "

    The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms (instances or a performance characteristic such as vCPUs, memory, or I/O).

    If the target capacity divided by this value is not a whole number, we round the number of instances to the next whole number. If this value is not specified, the default is 1.

    " + } + }, + "EbsBlockDevice": { + "base": "

    Describes a block device for an EBS volume.

    ", + "refs": { + "BlockDeviceMapping$Ebs": "

    Parameters used to automatically set up EBS volumes when the instance is launched.

    " + } + }, + "EbsInstanceBlockDevice": { + "base": "

    Describes a parameter used to set up an EBS volume in a block device mapping.

    ", + "refs": { + "InstanceBlockDeviceMapping$Ebs": "

    Parameters used to automatically set up EBS volumes when the instance is launched.

    " + } + }, + "EbsInstanceBlockDeviceSpecification": { + "base": null, + "refs": { + "InstanceBlockDeviceMappingSpecification$Ebs": "

    Parameters used to automatically set up EBS volumes when the instance is launched.

    " + } + }, + "EnableVgwRoutePropagationRequest": { + "base": null, + "refs": { + } + }, + "EnableVolumeIORequest": { + "base": null, + "refs": { + } + }, + "EnableVpcClassicLinkRequest": { + "base": null, + "refs": { + } + }, + "EnableVpcClassicLinkResult": { + "base": null, + "refs": { + } + }, + "EventCode": { + "base": null, + "refs": { + "InstanceStatusEvent$Code": "

    The event code.

    " + } + }, + "EventInformation": { + "base": "

    Describes a Spot fleet event.

    ", + "refs": { + "HistoryRecord$EventInformation": "

    Information about the event.

    " + } + }, + "EventType": { + "base": null, + "refs": { + "DescribeSpotFleetRequestHistoryRequest$EventType": "

    The type of events to describe. By default, all events are described.

    ", + "HistoryRecord$EventType": "

    The event type.

    • error - Indicates an error with the Spot fleet request.

    • fleetRequestChange - Indicates a change in the status or configuration of the Spot fleet request.

    • instanceChange - Indicates that an instance was launched or terminated.

    " + } + }, + "ExecutableByStringList": { + "base": null, + "refs": { + "DescribeImagesRequest$ExecutableUsers": "

    Scopes the images by users with explicit launch permissions. Specify an AWS account ID, self (the sender of the request), or all (public AMIs).

    " + } + }, + "ExportEnvironment": { + "base": null, + "refs": { + "CreateInstanceExportTaskRequest$TargetEnvironment": "

    The target virtualization environment.

    ", + "InstanceExportDetails$TargetEnvironment": "

    The target virtualization environment.

    " + } + }, + "ExportTask": { + "base": "

    Describes an instance export task.

    ", + "refs": { + "CreateInstanceExportTaskResult$ExportTask": "

    Information about the instance export task.

    ", + "ExportTaskList$member": null + } + }, + "ExportTaskIdStringList": { + "base": null, + "refs": { + "DescribeExportTasksRequest$ExportTaskIds": "

    One or more export task IDs.

    " + } + }, + "ExportTaskList": { + "base": null, + "refs": { + "DescribeExportTasksResult$ExportTasks": "

    Information about the export tasks.

    " + } + }, + "ExportTaskState": { + "base": null, + "refs": { + "ExportTask$State": "

    The state of the export task.

    " + } + }, + "ExportToS3Task": { + "base": "

    Describes the format and location for an instance export task.

    ", + "refs": { + "ExportTask$ExportToS3Task": "

    Information about the export task.

    " + } + }, + "ExportToS3TaskSpecification": { + "base": "

    Describes an instance export task.

    ", + "refs": { + "CreateInstanceExportTaskRequest$ExportToS3Task": "

    The format and location for an instance export task.

    " + } + }, + "Filter": { + "base": "

    A filter name and value pair that is used to return a more specific list of results. Filters can be used to match a set of resources by various criteria, such as tags, attributes, or IDs.

    ", + "refs": { + "FilterList$member": null + } + }, + "FilterList": { + "base": null, + "refs": { + "DescribeAddressesRequest$Filters": "

    One or more filters. Filter names and values are case-sensitive.

    • allocation-id - [EC2-VPC] The allocation ID for the address.

    • association-id - [EC2-VPC] The association ID for the address.

    • domain - Indicates whether the address is for use in EC2-Classic (standard) or in a VPC (vpc).

    • instance-id - The ID of the instance the address is associated with, if any.

    • network-interface-id - [EC2-VPC] The ID of the network interface that the address is associated with, if any.

    • network-interface-owner-id - The AWS account ID of the owner.

    • private-ip-address - [EC2-VPC] The private IP address associated with the Elastic IP address.

    • public-ip - The Elastic IP address.

    ", + "DescribeAvailabilityZonesRequest$Filters": "

    One or more filters.

    • message - Information about the Availability Zone.

    • region-name - The name of the region for the Availability Zone (for example, us-east-1).

    • state - The state of the Availability Zone (available | impaired | unavailable).

    • zone-name - The name of the Availability Zone (for example, us-east-1a).

    ", + "DescribeBundleTasksRequest$Filters": "

    One or more filters.

    • bundle-id - The ID of the bundle task.

    • error-code - If the task failed, the error code returned.

    • error-message - If the task failed, the error message returned.

    • instance-id - The ID of the instance.

    • progress - The level of task completion, as a percentage (for example, 20%).

    • s3-bucket - The Amazon S3 bucket to store the AMI.

    • s3-prefix - The beginning of the AMI name.

    • start-time - The time the task started (for example, 2013-09-15T17:15:20.000Z).

    • state - The state of the task (pending | waiting-for-shutdown | bundling | storing | cancelling | complete | failed).

    • update-time - The time of the most recent update for the task.

    ", + "DescribeClassicLinkInstancesRequest$Filters": "

    One or more filters.

    • group-id - The ID of a VPC security group that's associated with the instance.

    • instance-id - The ID of the instance.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC that the instance is linked to.

    ", + "DescribeConversionTasksRequest$Filters": "

    One or more filters.

    ", + "DescribeCustomerGatewaysRequest$Filters": "

    One or more filters.

    • bgp-asn - The customer gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN).

    • customer-gateway-id - The ID of the customer gateway.

    • ip-address - The IP address of the customer gateway's Internet-routable external interface.

    • state - The state of the customer gateway (pending | available | deleting | deleted).

    • type - The type of customer gateway. Currently, the only supported type is ipsec.1.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    ", + "DescribeDhcpOptionsRequest$Filters": "

    One or more filters.

    • dhcp-options-id - The ID of a set of DHCP options.

    • key - The key for one of the options (for example, domain-name).

    • value - The value for one of the options.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    ", + "DescribeFlowLogsRequest$Filter": "

    One or more filters.

    • deliver-log-status - The status of the logs delivery (SUCCESS | FAILED).

    • flow-log-id - The ID of the flow log.

    • log-group-name - The name of the log group.

    • resource-id - The ID of the VPC, subnet, or network interface.

    • traffic-type - The type of traffic (ACCEPT | REJECT | ALL)

    ", + "DescribeImagesRequest$Filters": "

    One or more filters.

    • architecture - The image architecture (i386 | x86_64).

    • block-device-mapping.delete-on-termination - A Boolean value that indicates whether the Amazon EBS volume is deleted on instance termination.

    • block-device-mapping.device-name - The device name for the EBS volume (for example, /dev/sdh).

    • block-device-mapping.snapshot-id - The ID of the snapshot used for the EBS volume.

    • block-device-mapping.volume-size - The volume size of the EBS volume, in GiB.

    • block-device-mapping.volume-type - The volume type of the EBS volume (gp2 | standard | io1).

    • description - The description of the image (provided during image creation).

    • hypervisor - The hypervisor type (ovm | xen).

    • image-id - The ID of the image.

    • image-type - The image type (machine | kernel | ramdisk).

    • is-public - A Boolean that indicates whether the image is public.

    • kernel-id - The kernel ID.

    • manifest-location - The location of the image manifest.

    • name - The name of the AMI (provided during image creation).

    • owner-alias - The AWS account alias (for example, amazon).

    • owner-id - The AWS account ID of the image owner.

    • platform - The platform. To only list Windows-based AMIs, use windows.

    • product-code - The product code.

    • product-code.type - The type of the product code (devpay | marketplace).

    • ramdisk-id - The RAM disk ID.

    • root-device-name - The name of the root device volume (for example, /dev/sda1).

    • root-device-type - The type of the root device volume (ebs | instance-store).

    • state - The state of the image (available | pending | failed).

    • state-reason-code - The reason code for the state change.

    • state-reason-message - The message for the state change.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • virtualization-type - The virtualization type (paravirtual | hvm).

    ", + "DescribeImportImageTasksRequest$Filters": "

    One or more filters.

    ", + "DescribeImportSnapshotTasksRequest$Filters": "

    One or more filters.

    ", + "DescribeInstanceStatusRequest$Filters": "

    One or more filters.

    • availability-zone - The Availability Zone of the instance.

    • event.code - The code for the scheduled event (instance-reboot | system-reboot | system-maintenance | instance-retirement | instance-stop).

    • event.description - A description of the event.

    • event.not-after - The latest end time for the scheduled event (for example, 2014-09-15T17:15:20.000Z).

    • event.not-before - The earliest start time for the scheduled event (for example, 2014-09-15T17:15:20.000Z).

    • instance-state-code - The code for the instance state, as a 16-bit unsigned integer. The high byte is an opaque internal value and should be ignored. The low byte is set based on the state represented. The valid values are 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).

    • instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped).

    • instance-status.reachability - Filters on instance status where the name is reachability (passed | failed | initializing | insufficient-data).

    • instance-status.status - The status of the instance (ok | impaired | initializing | insufficient-data | not-applicable).

    • system-status.reachability - Filters on system status where the name is reachability (passed | failed | initializing | insufficient-data).

    • system-status.status - The system status of the instance (ok | impaired | initializing | insufficient-data | not-applicable).

    ", + "DescribeInstancesRequest$Filters": "

    One or more filters.

    • architecture - The instance architecture (i386 | x86_64).

    • availability-zone - The Availability Zone of the instance.

    • block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2010-09-15T17:15:20.000Z.

    • block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination.

    • block-device-mapping.device-name - The device name for the EBS volume (for example, /dev/sdh or xvdh).

    • block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached).

    • block-device-mapping.volume-id - The volume ID of the EBS volume.

    • client-token - The idempotency token you provided when you launched the instance.

    • dns-name - The public DNS name of the instance.

    • group-id - The ID of the security group for the instance. EC2-Classic only.

    • group-name - The name of the security group for the instance. EC2-Classic only.

    • hypervisor - The hypervisor type of the instance (ovm | xen).

    • iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN.

    • image-id - The ID of the image used to launch the instance.

    • instance-id - The ID of the instance.

    • instance-lifecycle - Indicates whether this is a Spot Instance (spot).

    • instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is an opaque internal value and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).

    • instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped).

    • instance-type - The type of instance (for example, t2.micro).

    • instance.group-id - The ID of the security group for the instance.

    • instance.group-name - The name of the security group for the instance.

    • ip-address - The public IP address of the instance.

    • kernel-id - The kernel ID.

    • key-name - The name of the key pair used when the instance was launched.

    • launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on).

    • launch-time - The time when the instance was launched.

    • monitoring-state - Indicates whether monitoring is enabled for the instance (disabled | enabled).

    • owner-id - The AWS account ID of the instance owner.

    • placement-group-name - The name of the placement group for the instance.

    • platform - The platform. Use windows if you have Windows instances; otherwise, leave blank.

    • private-dns-name - The private DNS name of the instance.

    • private-ip-address - The private IP address of the instance.

    • product-code - The product code associated with the AMI used to launch the instance.

    • product-code.type - The type of product code (devpay | marketplace).

    • ramdisk-id - The RAM disk ID.

    • reason - The reason for the current state of the instance (for example, shows \"User Initiated [date]\" when you stop or terminate the instance). Similar to the state-reason-code filter.

    • requester-id - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on).

    • reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you'll get one reservation ID. If you launch ten instances using the same launch request, you'll also get one reservation ID.

    • root-device-name - The name of the root device for the instance (for example, /dev/sda1 or /dev/xvda).

    • root-device-type - The type of root device that the instance uses (ebs | instance-store).

    • source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC.

    • spot-instance-request-id - The ID of the Spot Instance request.

    • state-reason-code - The reason code for the state change.

    • state-reason-message - A message that describes the state change.

    • subnet-id - The ID of the subnet for the instance.

    • tag:key=value - The key/value combination of a tag assigned to the resource, where tag:key is the tag's key.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • tenancy - The tenancy of an instance (dedicated | default).

    • virtualization-type - The virtualization type of the instance (paravirtual | hvm).

    • vpc-id - The ID of the VPC that the instance is running in.

    • network-interface.description - The description of the network interface.

    • network-interface.subnet-id - The ID of the subnet for the network interface.

    • network-interface.vpc-id - The ID of the VPC for the network interface.

    • network-interface.network-interface.id - The ID of the network interface.

    • network-interface.owner-id - The ID of the owner of the network interface.

    • network-interface.availability-zone - The Availability Zone for the network interface.

    • network-interface.requester-id - The requester ID for the network interface.

    • network-interface.requester-managed - Indicates whether the network interface is being managed by AWS.

    • network-interface.status - The status of the network interface (available) | in-use).

    • network-interface.mac-address - The MAC address of the network interface.

    • network-interface-private-dns-name - The private DNS name of the network interface.

    • network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means checking is enabled, and false means checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

    • network-interface.group-id - The ID of a security group associated with the network interface.

    • network-interface.group-name - The name of a security group associated with the network interface.

    • network-interface.attachment.attachment-id - The ID of the interface attachment.

    • network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached.

    • network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

    • network-interface.addresses.private-ip-address - The private IP address associated with the network interface.

    • network-interface.attachment.device-index - The device index to which the network interface is attached.

    • network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached).

    • network-interface.attachment.attach-time - The time that the network interface was attached to an instance.

    • network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated.

    • network-interface.addresses.primary - Specifies whether the IP address of the network interface is the primary private IP address.

    • network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address with a network interface.

    • network-interface.addresses.association.ip-owner-id - The owner ID of the private IP address associated with the network interface.

    • association.public-ip - The address of the Elastic IP address bound to the network interface.

    • association.ip-owner-id - The owner of the Elastic IP address associated with the network interface.

    • association.allocation-id - The allocation ID returned when you allocated the Elastic IP address for your network interface.

    • association.association-id - The association ID returned when the network interface was associated with an IP address.

    ", + "DescribeInternetGatewaysRequest$Filters": "

    One or more filters.

    • attachment.state - The current state of the attachment between the gateway and the VPC (available). Present only if a VPC is attached.

    • attachment.vpc-id - The ID of an attached VPC.

    • internet-gateway-id - The ID of the Internet gateway.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    ", + "DescribeKeyPairsRequest$Filters": "

    One or more filters.

    • fingerprint - The fingerprint of the key pair.

    • key-name - The name of the key pair.

    ", + "DescribeMovingAddressesRequest$Filters": "

    One or more filters.

    • moving-status - The status of the Elastic IP address (MovingToVpc | RestoringToClassic).

    ", + "DescribeNetworkAclsRequest$Filters": "

    One or more filters.

    • association.association-id - The ID of an association ID for the ACL.

    • association.network-acl-id - The ID of the network ACL involved in the association.

    • association.subnet-id - The ID of the subnet involved in the association.

    • default - Indicates whether the ACL is the default network ACL for the VPC.

    • entry.cidr - The CIDR range specified in the entry.

    • entry.egress - Indicates whether the entry applies to egress traffic.

    • entry.icmp.code - The ICMP code specified in the entry, if any.

    • entry.icmp.type - The ICMP type specified in the entry, if any.

    • entry.port-range.from - The start of the port range specified in the entry.

    • entry.port-range.to - The end of the port range specified in the entry.

    • entry.protocol - The protocol specified in the entry (tcp | udp | icmp or a protocol number).

    • entry.rule-action - Allows or denies the matching traffic (allow | deny).

    • entry.rule-number - The number of an entry (in other words, rule) in the ACL's set of entries.

    • network-acl-id - The ID of the network ACL.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC for the network ACL.

    ", + "DescribeNetworkInterfacesRequest$Filters": "

    One or more filters.

    • addresses.private-ip-address - The private IP addresses associated with the network interface.

    • addresses.primary - Whether the private IP address is the primary IP address associated with the network interface.

    • addresses.association.public-ip - The association ID returned when the network interface was associated with the Elastic IP address.

    • addresses.association.owner-id - The owner ID of the addresses associated with the network interface.

    • association.association-id - The association ID returned when the network interface was associated with an IP address.

    • association.allocation-id - The allocation ID returned when you allocated the Elastic IP address for your network interface.

    • association.ip-owner-id - The owner of the Elastic IP address associated with the network interface.

    • association.public-ip - The address of the Elastic IP address bound to the network interface.

    • association.public-dns-name - The public DNS name for the network interface.

    • attachment.attachment-id - The ID of the interface attachment.

    • attachment.instance-id - The ID of the instance to which the network interface is attached.

    • attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

    • attachment.device-index - The device index to which the network interface is attached.

    • attachment.status - The status of the attachment (attaching | attached | detaching | detached).

    • attachment.attach.time - The time that the network interface was attached to an instance.

    • attachment.delete-on-termination - Indicates whether the attachment is deleted when an instance is terminated.

    • availability-zone - The Availability Zone of the network interface.

    • description - The description of the network interface.

    • group-id - The ID of a security group associated with the network interface.

    • group-name - The name of a security group associated with the network interface.

    • mac-address - The MAC address of the network interface.

    • network-interface-id - The ID of the network interface.

    • owner-id - The AWS account ID of the network interface owner.

    • private-ip-address - The private IP address or addresses of the network interface.

    • private-dns-name - The private DNS name of the network interface.

    • requester-id - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on).

    • requester-managed - Indicates whether the network interface is being managed by an AWS service (for example, AWS Management Console, Auto Scaling, and so on).

    • source-desk-check - Indicates whether the network interface performs source/destination checking. A value of true means checking is enabled, and false means checking is disabled. The value must be false for the network interface to perform Network Address Translation (NAT) in your VPC.

    • status - The status of the network interface. If the network interface is not attached to an instance, the status is available; if a network interface is attached to an instance the status is in-use.

    • subnet-id - The ID of the subnet for the network interface.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC for the network interface.

    ", + "DescribePlacementGroupsRequest$Filters": "

    One or more filters.

    • group-name - The name of the placement group.

    • state - The state of the placement group (pending | available | deleting | deleted).

    • strategy - The strategy of the placement group (cluster).

    ", + "DescribePrefixListsRequest$Filters": "

    One or more filters.

    • prefix-list-id: The ID of a prefix list.

    • prefix-list-name: The name of a prefix list.

    ", + "DescribeRegionsRequest$Filters": "

    One or more filters.

    • endpoint - The endpoint of the region (for example, ec2.us-east-1.amazonaws.com).

    • region-name - The name of the region (for example, us-east-1).

    ", + "DescribeReservedInstancesListingsRequest$Filters": "

    One or more filters.

    • reserved-instances-id - The ID of the Reserved Instances.

    • reserved-instances-listing-id - The ID of the Reserved Instances listing.

    • status - The status of the Reserved Instance listing (pending | active | cancelled | closed).

    • status-message - The reason for the status.

    ", + "DescribeReservedInstancesModificationsRequest$Filters": "

    One or more filters.

    • client-token - The idempotency token for the modification request.

    • create-date - The time when the modification request was created.

    • effective-date - The time when the modification becomes effective.

    • modification-result.reserved-instances-id - The ID for the Reserved Instances created as part of the modification request. This ID is only available when the status of the modification is fulfilled.

    • modification-result.target-configuration.availability-zone - The Availability Zone for the new Reserved Instances.

    • modification-result.target-configuration.instance-count - The number of new Reserved Instances.

    • modification-result.target-configuration.instance-type - The instance type of the new Reserved Instances.

    • modification-result.target-configuration.platform - The network platform of the new Reserved Instances (EC2-Classic | EC2-VPC).

    • reserved-instances-id - The ID of the Reserved Instances modified.

    • reserved-instances-modification-id - The ID of the modification request.

    • status - The status of the Reserved Instances modification request (processing | fulfilled | failed).

    • status-message - The reason for the status.

    • update-date - The time when the modification request was last updated.

    ", + "DescribeReservedInstancesOfferingsRequest$Filters": "

    One or more filters.

    • availability-zone - The Availability Zone where the Reserved Instance can be used.

    • duration - The duration of the Reserved Instance (for example, one year or three years), in seconds (31536000 | 94608000).

    • fixed-price - The purchase price of the Reserved Instance (for example, 9800.0).

    • instance-type - The instance type on which the Reserved Instance can be used.

    • marketplace - Set to true to show only Reserved Instance Marketplace offerings. When this filter is not used, which is the default behavior, all offerings from AWS and Reserved Instance Marketplace are listed.

    • product-description - The Reserved Instance product platform description. Instances that include (Amazon VPC) in the product platform description will only be displayed to EC2-Classic account holders and are for use with Amazon VPC. (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | SUSE Linux (Amazon VPC) | Red Hat Enterprise Linux | Red Hat Enterprise Linux (Amazon VPC) | Windows | Windows (Amazon VPC) | Windows with SQL Server Standard | Windows with SQL Server Standard (Amazon VPC) | Windows with SQL Server Web | Windows with SQL Server Web (Amazon VPC) | Windows with SQL Server Enterprise | Windows with SQL Server Enterprise (Amazon VPC))

    • reserved-instances-offering-id - The Reserved Instances offering ID.

    • usage-price - The usage price of the Reserved Instance, per hour (for example, 0.84).

    ", + "DescribeReservedInstancesRequest$Filters": "

    One or more filters.

    • availability-zone - The Availability Zone where the Reserved Instance can be used.

    • duration - The duration of the Reserved Instance (one year or three years), in seconds (31536000 | 94608000).

    • end - The time when the Reserved Instance expires (for example, 2015-08-07T11:54:42.000Z).

    • fixed-price - The purchase price of the Reserved Instance (for example, 9800.0).

    • instance-type - The instance type on which the Reserved Instance can be used.

    • product-description - The Reserved Instance product platform description. Instances that include (Amazon VPC) in the product platform description will only be displayed to EC2-Classic account holders and are for use with Amazon VPC. (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | SUSE Linux (Amazon VPC) | Red Hat Enterprise Linux | Red Hat Enterprise Linux (Amazon VPC) | Windows | Windows (Amazon VPC) | Windows with SQL Server Standard | Windows with SQL Server Standard (Amazon VPC) | Windows with SQL Server Web | Windows with SQL Server Web (Amazon VPC) | Windows with SQL Server Enterprise | Windows with SQL Server Enterprise (Amazon VPC)).

    • reserved-instances-id - The ID of the Reserved Instance.

    • start - The time at which the Reserved Instance purchase request was placed (for example, 2014-08-07T11:54:42.000Z).

    • state - The state of the Reserved Instance (payment-pending | active | payment-failed | retired).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • usage-price - The usage price of the Reserved Instance, per hour (for example, 0.84).

    ", + "DescribeRouteTablesRequest$Filters": "

    One or more filters.

    • association.route-table-association-id - The ID of an association ID for the route table.

    • association.route-table-id - The ID of the route table involved in the association.

    • association.subnet-id - The ID of the subnet involved in the association.

    • association.main - Indicates whether the route table is the main route table for the VPC.

    • route-table-id - The ID of the route table.

    • route.destination-cidr-block - The CIDR range specified in a route in the table.

    • route.destination-prefix-list-id - The ID (prefix) of the AWS service specified in a route in the table.

    • route.gateway-id - The ID of a gateway specified in a route in the table.

    • route.instance-id - The ID of an instance specified in a route in the table.

    • route.origin - Describes how the route was created. CreateRouteTable indicates that the route was automatically created when the route table was created; CreateRoute indicates that the route was manually added to the route table; EnableVgwRoutePropagation indicates that the route was propagated by route propagation.

    • route.state - The state of a route in the route table (active | blackhole). The blackhole state indicates that the route's target isn't available (for example, the specified gateway isn't attached to the VPC, the specified NAT instance has been terminated, and so on).

    • route.vpc-peering-connection-id - The ID of a VPC peering connection specified in a route in the table.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC for the route table.

    ", + "DescribeSecurityGroupsRequest$Filters": "

    One or more filters. If using multiple filters for rules, the results include security groups for which any combination of rules - not necessarily a single rule - match all filters.

    • description - The description of the security group.

    • egress.ip-permission.prefix-list-id - The ID (prefix) of the AWS service to which the security group allows access.

    • group-id - The ID of the security group.

    • group-name - The name of the security group.

    • ip-permission.cidr - A CIDR range that has been granted permission.

    • ip-permission.from-port - The start of port range for the TCP and UDP protocols, or an ICMP type number.

    • ip-permission.group-id - The ID of a security group that has been granted permission.

    • ip-permission.group-name - The name of a security group that has been granted permission.

    • ip-permission.protocol - The IP protocol for the permission (tcp | udp | icmp or a protocol number).

    • ip-permission.to-port - The end of port range for the TCP and UDP protocols, or an ICMP code.

    • ip-permission.user-id - The ID of an AWS account that has been granted permission.

    • owner-id - The AWS account ID of the owner of the security group.

    • tag-key - The key of a tag assigned to the security group.

    • tag-value - The value of a tag assigned to the security group.

    • vpc-id - The ID of the VPC specified when the security group was created.

    ", + "DescribeSnapshotsRequest$Filters": "

    One or more filters.

    • description - A description of the snapshot.

    • owner-alias - The AWS account alias (for example, amazon) that owns the snapshot.

    • owner-id - The ID of the AWS account that owns the snapshot.

    • progress - The progress of the snapshot, as a percentage (for example, 80%).

    • snapshot-id - The snapshot ID.

    • start-time - The time stamp when the snapshot was initiated.

    • status - The status of the snapshot (pending | completed | error).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • volume-id - The ID of the volume the snapshot is for.

    • volume-size - The size of the volume, in GiB.

    ", + "DescribeSpotInstanceRequestsRequest$Filters": "

    One or more filters.

    • availability-zone-group - The Availability Zone group.

    • create-time - The time stamp when the Spot instance request was created.

    • fault-code - The fault code related to the request.

    • fault-message - The fault message related to the request.

    • instance-id - The ID of the instance that fulfilled the request.

    • launch-group - The Spot instance launch group.

    • launch.block-device-mapping.delete-on-termination - Indicates whether the Amazon EBS volume is deleted on instance termination.

    • launch.block-device-mapping.device-name - The device name for the Amazon EBS volume (for example, /dev/sdh).

    • launch.block-device-mapping.snapshot-id - The ID of the snapshot used for the Amazon EBS volume.

    • launch.block-device-mapping.volume-size - The size of the Amazon EBS volume, in GiB.

    • launch.block-device-mapping.volume-type - The type of the Amazon EBS volume (gp2 | standard | io1).

    • launch.group-id - The security group for the instance.

    • launch.image-id - The ID of the AMI.

    • launch.instance-type - The type of instance (for example, m1.small).

    • launch.kernel-id - The kernel ID.

    • launch.key-name - The name of the key pair the instance launched with.

    • launch.monitoring-enabled - Whether monitoring is enabled for the Spot instance.

    • launch.ramdisk-id - The RAM disk ID.

    • network-interface.network-interface-id - The ID of the network interface.

    • network-interface.device-index - The index of the device for the network interface attachment on the instance.

    • network-interface.subnet-id - The ID of the subnet for the instance.

    • network-interface.description - A description of the network interface.

    • network-interface.private-ip-address - The primary private IP address of the network interface.

    • network-interface.delete-on-termination - Indicates whether the network interface is deleted when the instance is terminated.

    • network-interface.group-id - The ID of the security group associated with the network interface.

    • network-interface.group-name - The name of the security group associated with the network interface.

    • network-interface.addresses.primary - Indicates whether the IP address is the primary private IP address.

    • product-description - The product description associated with the instance (Linux/UNIX | Windows).

    • spot-instance-request-id - The Spot instance request ID.

    • spot-price - The maximum hourly price for any Spot instance launched to fulfill the request.

    • state - The state of the Spot instance request (open | active | closed | cancelled | failed). Spot bid status information can help you track your Amazon EC2 Spot instance requests. For more information, see Spot Bid Status in the Amazon Elastic Compute Cloud User Guide.

    • status-code - The short code describing the most recent evaluation of your Spot instance request.

    • status-message - The message explaining the status of the Spot instance request.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • type - The type of Spot instance request (one-time | persistent).

    • launched-availability-zone - The Availability Zone in which the bid is launched.

    • valid-from - The start date of the request.

    • valid-until - The end date of the request.

    ", + "DescribeSpotPriceHistoryRequest$Filters": "

    One or more filters.

    • availability-zone - The Availability Zone for which prices should be returned.

    • instance-type - The type of instance (for example, m1.small).

    • product-description - The product description for the Spot price (Linux/UNIX | SUSE Linux | Windows | Linux/UNIX (Amazon VPC) | SUSE Linux (Amazon VPC) | Windows (Amazon VPC)).

    • spot-price - The Spot price. The value must match exactly (or use wildcards; greater than or less than comparison is not supported).

    • timestamp - The timestamp of the Spot price history, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). You can use wildcards (* and ?). Greater than or less than comparison is not supported.

    ", + "DescribeSubnetsRequest$Filters": "

    One or more filters.

    • availabilityZone - The Availability Zone for the subnet. You can also use availability-zone as the filter name.

    • available-ip-address-count - The number of IP addresses in the subnet that are available.

    • cidrBlock - The CIDR block of the subnet. The CIDR block you specify must exactly match the subnet's CIDR block for information to be returned for the subnet. You can also use cidr or cidr-block as the filter names.

    • defaultForAz - Indicates whether this is the default subnet for the Availability Zone. You can also use default-for-az as the filter name.

    • state - The state of the subnet (pending | available).

    • subnet-id - The ID of the subnet.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC for the subnet.

    ", + "DescribeTagsRequest$Filters": "

    One or more filters.

    • key - The tag key.

    • resource-id - The resource ID.

    • resource-type - The resource type (customer-gateway | dhcp-options | image | instance | internet-gateway | network-acl | network-interface | reserved-instances | route-table | security-group | snapshot | spot-instances-request | subnet | volume | vpc | vpn-connection | vpn-gateway).

    • value - The tag value.

    ", + "DescribeVolumeStatusRequest$Filters": "

    One or more filters.

    • action.code - The action code for the event (for example, enable-volume-io).

    • action.description - A description of the action.

    • action.event-id - The event ID associated with the action.

    • availability-zone - The Availability Zone of the instance.

    • event.description - A description of the event.

    • event.event-id - The event ID.

    • event.event-type - The event type (for io-enabled: passed | failed; for io-performance: io-performance:degraded | io-performance:severely-degraded | io-performance:stalled).

    • event.not-after - The latest end time for the event.

    • event.not-before - The earliest start time for the event.

    • volume-status.details-name - The cause for volume-status.status (io-enabled | io-performance).

    • volume-status.details-status - The status of volume-status.details-name (for io-enabled: passed | failed; for io-performance: normal | degraded | severely-degraded | stalled).

    • volume-status.status - The status of the volume (ok | impaired | warning | insufficient-data).

    ", + "DescribeVolumesRequest$Filters": "

    One or more filters.

    • attachment.attach-time - The time stamp when the attachment initiated.

    • attachment.delete-on-termination - Whether the volume is deleted on instance termination.

    • attachment.device - The device name that is exposed to the instance (for example, /dev/sda1).

    • attachment.instance-id - The ID of the instance the volume is attached to.

    • attachment.status - The attachment state (attaching | attached | detaching | detached).

    • availability-zone - The Availability Zone in which the volume was created.

    • create-time - The time stamp when the volume was created.

    • encrypted - The encryption status of the volume.

    • size - The size of the volume, in GiB.

    • snapshot-id - The snapshot from which the volume was created.

    • status - The status of the volume (creating | available | in-use | deleting | deleted | error).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • volume-id - The volume ID.

    • volume-type - The Amazon EBS volume type. This can be gp2 for General Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, or standard for Magnetic volumes.

    ", + "DescribeVpcClassicLinkRequest$Filters": "

    One or more filters.

    • is-classic-link-enabled - Whether the VPC is enabled for ClassicLink (true | false).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    ", + "DescribeVpcEndpointsRequest$Filters": "

    One or more filters.

    • service-name: The name of the AWS service.

    • vpc-id: The ID of the VPC in which the endpoint resides.

    • vpc-endpoint-id: The ID of the endpoint.

    • vpc-endpoint-state: The state of the endpoint. (pending | available | deleting | deleted)

    ", + "DescribeVpcPeeringConnectionsRequest$Filters": "

    One or more filters.

    • accepter-vpc-info.cidr-block - The CIDR block of the peer VPC.

    • accepter-vpc-info.owner-id - The AWS account ID of the owner of the peer VPC.

    • accepter-vpc-info.vpc-id - The ID of the peer VPC.

    • expiration-time - The expiration date and time for the VPC peering connection.

    • requester-vpc-info.cidr-block - The CIDR block of the requester's VPC.

    • requester-vpc-info.owner-id - The AWS account ID of the owner of the requester VPC.

    • requester-vpc-info.vpc-id - The ID of the requester VPC.

    • status-code - The status of the VPC peering connection (pending-acceptance | failed | expired | provisioning | active | deleted | rejected).

    • status-message - A message that provides more information about the status of the VPC peering connection, if applicable.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-peering-connection-id - The ID of the VPC peering connection.

    ", + "DescribeVpcsRequest$Filters": "

    One or more filters.

    • cidr - The CIDR block of the VPC. The CIDR block you specify must exactly match the VPC's CIDR block for information to be returned for the VPC. Must contain the slash followed by one or two digits (for example, /28).

    • dhcp-options-id - The ID of a set of DHCP options.

    • isDefault - Indicates whether the VPC is the default VPC.

    • state - The state of the VPC (pending | available).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC.

    ", + "DescribeVpnConnectionsRequest$Filters": "

    One or more filters.

    • customer-gateway-configuration - The configuration information for the customer gateway.

    • customer-gateway-id - The ID of a customer gateway associated with the VPN connection.

    • state - The state of the VPN connection (pending | available | deleting | deleted).

    • option.static-routes-only - Indicates whether the connection has static routes only. Used for devices that do not support Border Gateway Protocol (BGP).

    • route.destination-cidr-block - The destination CIDR block. This corresponds to the subnet used in a customer data center.

    • bgp-asn - The BGP Autonomous System Number (ASN) associated with a BGP device.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • type - The type of VPN connection. Currently the only supported type is ipsec.1.

    • vpn-connection-id - The ID of the VPN connection.

    • vpn-gateway-id - The ID of a virtual private gateway associated with the VPN connection.

    ", + "DescribeVpnGatewaysRequest$Filters": "

    One or more filters.

    • attachment.state - The current state of the attachment between the gateway and the VPC (attaching | attached | detaching | detached).

    • attachment.vpc-id - The ID of an attached VPC.

    • availability-zone - The Availability Zone for the virtual private gateway.

    • state - The state of the virtual private gateway (pending | available | deleting | deleted).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • type - The type of virtual private gateway. Currently the only supported type is ipsec.1.

    • vpn-gateway-id - The ID of the virtual private gateway.

    " + } + }, + "Float": { + "base": null, + "refs": { + "ReservedInstances$UsagePrice": "

    The usage price of the Reserved Instance, per hour.

    ", + "ReservedInstances$FixedPrice": "

    The purchase price of the Reserved Instance.

    ", + "ReservedInstancesOffering$UsagePrice": "

    The usage price of the Reserved Instance, per hour.

    ", + "ReservedInstancesOffering$FixedPrice": "

    The purchase price of the Reserved Instance.

    " + } + }, + "FlowLog": { + "base": "

    Describes a flow log.

    ", + "refs": { + "FlowLogSet$member": null + } + }, + "FlowLogSet": { + "base": null, + "refs": { + "DescribeFlowLogsResult$FlowLogs": "

    Information about the flow logs.

    " + } + }, + "FlowLogsResourceType": { + "base": null, + "refs": { + "CreateFlowLogsRequest$ResourceType": "

    The type of resource on which to create the flow log.

    " + } + }, + "GatewayType": { + "base": null, + "refs": { + "CreateCustomerGatewayRequest$Type": "

    The type of VPN connection that this customer gateway supports (ipsec.1).

    ", + "CreateVpnGatewayRequest$Type": "

    The type of VPN connection this virtual private gateway supports.

    ", + "VpnConnection$Type": "

    The type of VPN connection.

    ", + "VpnGateway$Type": "

    The type of VPN connection the virtual private gateway supports.

    " + } + }, + "GetConsoleOutputRequest": { + "base": null, + "refs": { + } + }, + "GetConsoleOutputResult": { + "base": null, + "refs": { + } + }, + "GetPasswordDataRequest": { + "base": null, + "refs": { + } + }, + "GetPasswordDataResult": { + "base": null, + "refs": { + } + }, + "GroupIdStringList": { + "base": null, + "refs": { + "AttachClassicLinkVpcRequest$Groups": "

    The ID of one or more of the VPC's security groups. You cannot specify security groups from a different VPC.

    ", + "DescribeSecurityGroupsRequest$GroupIds": "

    One or more security group IDs. Required for security groups in a nondefault VPC.

    Default: Describes all your security groups.

    ", + "ModifyInstanceAttributeRequest$Groups": "

    [EC2-VPC] Changes the security groups of the instance. You must specify at least one security group, even if it's just the default security group for the VPC. You must specify the security group ID, not the security group name.

    " + } + }, + "GroupIdentifier": { + "base": "

    Describes a security group.

    ", + "refs": { + "GroupIdentifierList$member": null + } + }, + "GroupIdentifierList": { + "base": null, + "refs": { + "ClassicLinkInstance$Groups": "

    A list of security groups.

    ", + "DescribeNetworkInterfaceAttributeResult$Groups": "

    The security groups associated with the network interface.

    ", + "Instance$SecurityGroups": "

    One or more security groups for the instance.

    ", + "InstanceAttribute$Groups": "

    The security groups associated with the instance.

    ", + "InstanceNetworkInterface$Groups": "

    One or more security groups.

    ", + "LaunchSpecification$SecurityGroups": "

    One or more security groups. To request an instance in a nondefault VPC, you must specify the ID of the security group. To request an instance in EC2-Classic or a default VPC, you can specify the name or the ID of the security group.

    ", + "NetworkInterface$Groups": "

    Any security groups for the network interface.

    ", + "Reservation$Groups": "

    One or more security groups.

    ", + "SpotFleetLaunchSpecification$SecurityGroups": "

    One or more security groups. To request an instance in a nondefault VPC, you must specify the ID of the security group. To request an instance in EC2-Classic or a default VPC, you can specify the name or the ID of the security group.

    " + } + }, + "GroupNameStringList": { + "base": null, + "refs": { + "DescribeSecurityGroupsRequest$GroupNames": "

    [EC2-Classic and default VPC only] One or more security group names. You can specify either the security group name or the security group ID. For security groups in a nondefault VPC, use the group-name filter to describe security groups by name.

    Default: Describes all your security groups.

    ", + "ModifySnapshotAttributeRequest$GroupNames": "

    The group to modify for the snapshot.

    " + } + }, + "HistoryRecord": { + "base": "

    Describes an event in the history of the Spot fleet request.

    ", + "refs": { + "HistoryRecords$member": null + } + }, + "HistoryRecords": { + "base": null, + "refs": { + "DescribeSpotFleetRequestHistoryResponse$HistoryRecords": "

    Information about the events in the history of the Spot fleet request.

    " + } + }, + "HypervisorType": { + "base": null, + "refs": { + "Image$Hypervisor": "

    The hypervisor type of the image.

    ", + "Instance$Hypervisor": "

    The hypervisor type of the instance.

    " + } + }, + "IamInstanceProfile": { + "base": "

    Describes an IAM instance profile.

    ", + "refs": { + "Instance$IamInstanceProfile": "

    The IAM instance profile associated with the instance.

    " + } + }, + "IamInstanceProfileSpecification": { + "base": "

    Describes an IAM instance profile.

    ", + "refs": { + "LaunchSpecification$IamInstanceProfile": "

    The IAM instance profile.

    ", + "RunInstancesRequest$IamInstanceProfile": "

    The IAM instance profile.

    ", + "SpotFleetLaunchSpecification$IamInstanceProfile": "

    The IAM instance profile.

    ", + "RequestSpotLaunchSpecification$IamInstanceProfile": "

    The IAM instance profile.

    " + } + }, + "IcmpTypeCode": { + "base": "

    Describes the ICMP type and code.

    ", + "refs": { + "CreateNetworkAclEntryRequest$IcmpTypeCode": "

    ICMP protocol: The ICMP type and code. Required if specifying ICMP for the protocol.

    ", + "NetworkAclEntry$IcmpTypeCode": "

    ICMP protocol: The ICMP type and code.

    ", + "ReplaceNetworkAclEntryRequest$IcmpTypeCode": "

    ICMP protocol: The ICMP type and code. Required if specifying 1 (ICMP) for the protocol.

    " + } + }, + "Image": { + "base": "

    Describes an image.

    ", + "refs": { + "ImageList$member": null + } + }, + "ImageAttribute": { + "base": "

    Describes an image attribute.

    ", + "refs": { + } + }, + "ImageAttributeName": { + "base": null, + "refs": { + "DescribeImageAttributeRequest$Attribute": "

    The AMI attribute.

    Note: Depending on your account privileges, the blockDeviceMapping attribute may return a Client.AuthFailure error. If this happens, use DescribeImages to get information about the block device mapping for the AMI.

    " + } + }, + "ImageDiskContainer": { + "base": "

    Describes the disk container object for an import image task.

    ", + "refs": { + "ImageDiskContainerList$member": null + } + }, + "ImageDiskContainerList": { + "base": null, + "refs": { + "ImportImageRequest$DiskContainers": "

    Information about the disk containers.

    " + } + }, + "ImageIdStringList": { + "base": null, + "refs": { + "DescribeImagesRequest$ImageIds": "

    One or more image IDs.

    Default: Describes all images available to you.

    " + } + }, + "ImageList": { + "base": null, + "refs": { + "DescribeImagesResult$Images": "

    Information about one or more images.

    " + } + }, + "ImageState": { + "base": null, + "refs": { + "Image$State": "

    The current state of the AMI. If the state is available, the image is successfully registered and can be used to launch an instance.

    " + } + }, + "ImageTypeValues": { + "base": null, + "refs": { + "Image$ImageType": "

    The type of image.

    " + } + }, + "ImportImageRequest": { + "base": null, + "refs": { + } + }, + "ImportImageResult": { + "base": null, + "refs": { + } + }, + "ImportImageTask": { + "base": "

    Describes an import image task.

    ", + "refs": { + "ImportImageTaskList$member": null + } + }, + "ImportImageTaskList": { + "base": null, + "refs": { + "DescribeImportImageTasksResult$ImportImageTasks": "

    A list of zero or more import image tasks that are currently active or were completed or canceled in the previous 7 days.

    " + } + }, + "ImportInstanceLaunchSpecification": { + "base": "

    Describes the launch specification for VM import.

    ", + "refs": { + "ImportInstanceRequest$LaunchSpecification": "

    The launch specification.

    " + } + }, + "ImportInstanceRequest": { + "base": null, + "refs": { + } + }, + "ImportInstanceResult": { + "base": null, + "refs": { + } + }, + "ImportInstanceTaskDetails": { + "base": "

    Describes an import instance task.

    ", + "refs": { + "ConversionTask$ImportInstance": "

    If the task is for importing an instance, this contains information about the import instance task.

    " + } + }, + "ImportInstanceVolumeDetailItem": { + "base": "

    Describes an import volume task.

    ", + "refs": { + "ImportInstanceVolumeDetailSet$member": null + } + }, + "ImportInstanceVolumeDetailSet": { + "base": null, + "refs": { + "ImportInstanceTaskDetails$Volumes": "

    One or more volumes.

    " + } + }, + "ImportKeyPairRequest": { + "base": null, + "refs": { + } + }, + "ImportKeyPairResult": { + "base": null, + "refs": { + } + }, + "ImportSnapshotRequest": { + "base": null, + "refs": { + } + }, + "ImportSnapshotResult": { + "base": null, + "refs": { + } + }, + "ImportSnapshotTask": { + "base": "

    Describes an import snapshot task.

    ", + "refs": { + "ImportSnapshotTaskList$member": null + } + }, + "ImportSnapshotTaskList": { + "base": null, + "refs": { + "DescribeImportSnapshotTasksResult$ImportSnapshotTasks": "

    A list of zero or more import snapshot tasks that are currently active or were completed or canceled in the previous 7 days.

    " + } + }, + "ImportTaskIdList": { + "base": null, + "refs": { + "DescribeImportImageTasksRequest$ImportTaskIds": "

    A list of import image task IDs.

    ", + "DescribeImportSnapshotTasksRequest$ImportTaskIds": "

    A list of import snapshot task IDs.

    " + } + }, + "ImportVolumeRequest": { + "base": null, + "refs": { + } + }, + "ImportVolumeResult": { + "base": null, + "refs": { + } + }, + "ImportVolumeTaskDetails": { + "base": "

    Describes an import volume task.

    ", + "refs": { + "ConversionTask$ImportVolume": "

    If the task is for importing a volume, this contains information about the import volume task.

    " + } + }, + "Instance": { + "base": "

    Describes an instance.

    ", + "refs": { + "InstanceList$member": null + } + }, + "InstanceAttribute": { + "base": "

    Describes an instance attribute.

    ", + "refs": { + } + }, + "InstanceAttributeName": { + "base": null, + "refs": { + "DescribeInstanceAttributeRequest$Attribute": "

    The instance attribute.

    ", + "ModifyInstanceAttributeRequest$Attribute": "

    The name of the attribute.

    ", + "ResetInstanceAttributeRequest$Attribute": "

    The attribute to reset.

    " + } + }, + "InstanceBlockDeviceMapping": { + "base": "

    Describes a block device mapping.

    ", + "refs": { + "InstanceBlockDeviceMappingList$member": null + } + }, + "InstanceBlockDeviceMappingList": { + "base": null, + "refs": { + "Instance$BlockDeviceMappings": "

    Any block device mapping entries for the instance.

    ", + "InstanceAttribute$BlockDeviceMappings": "

    The block device mapping of the instance.

    " + } + }, + "InstanceBlockDeviceMappingSpecification": { + "base": "

    Describes a block device mapping entry.

    ", + "refs": { + "InstanceBlockDeviceMappingSpecificationList$member": null + } + }, + "InstanceBlockDeviceMappingSpecificationList": { + "base": null, + "refs": { + "ModifyInstanceAttributeRequest$BlockDeviceMappings": "

    Modifies the DeleteOnTermination attribute for volumes that are currently attached. The volume must be owned by the caller. If no value is specified for DeleteOnTermination, the default is true and the volume is deleted when the instance is terminated.

    To add instance store volumes to an Amazon EBS-backed instance, you must add them when you launch the instance. For more information, see Updating the Block Device Mapping when Launching an Instance in the Amazon Elastic Compute Cloud User Guide.

    " + } + }, + "InstanceCount": { + "base": "

    Describes a Reserved Instance listing state.

    ", + "refs": { + "InstanceCountList$member": null + } + }, + "InstanceCountList": { + "base": null, + "refs": { + "ReservedInstancesListing$InstanceCounts": "

    The number of instances in this state.

    " + } + }, + "InstanceExportDetails": { + "base": "

    Describes an instance to export.

    ", + "refs": { + "ExportTask$InstanceExportDetails": "

    Information about the instance to export.

    " + } + }, + "InstanceIdStringList": { + "base": null, + "refs": { + "DescribeClassicLinkInstancesRequest$InstanceIds": "

    One or more instance IDs. Must be instances linked to a VPC through ClassicLink.

    ", + "DescribeInstanceStatusRequest$InstanceIds": "

    One or more instance IDs.

    Default: Describes all your instances.

    Constraints: Maximum 100 explicitly specified instance IDs.

    ", + "DescribeInstancesRequest$InstanceIds": "

    One or more instance IDs.

    Default: Describes all your instances.

    ", + "MonitorInstancesRequest$InstanceIds": "

    One or more instance IDs.

    ", + "RebootInstancesRequest$InstanceIds": "

    One or more instance IDs.

    ", + "ReportInstanceStatusRequest$Instances": "

    One or more instances.

    ", + "StartInstancesRequest$InstanceIds": "

    One or more instance IDs.

    ", + "StopInstancesRequest$InstanceIds": "

    One or more instance IDs.

    ", + "TerminateInstancesRequest$InstanceIds": "

    One or more instance IDs.

    ", + "UnmonitorInstancesRequest$InstanceIds": "

    One or more instance IDs.

    " + } + }, + "InstanceLifecycleType": { + "base": null, + "refs": { + "Instance$InstanceLifecycle": "

    Indicates whether this is a Spot Instance.

    " + } + }, + "InstanceList": { + "base": null, + "refs": { + "Reservation$Instances": "

    One or more instances.

    " + } + }, + "InstanceMonitoring": { + "base": "

    Describes the monitoring information of the instance.

    ", + "refs": { + "InstanceMonitoringList$member": null + } + }, + "InstanceMonitoringList": { + "base": null, + "refs": { + "MonitorInstancesResult$InstanceMonitorings": "

    Monitoring information for one or more instances.

    ", + "UnmonitorInstancesResult$InstanceMonitorings": "

    Monitoring information for one or more instances.

    " + } + }, + "InstanceNetworkInterface": { + "base": "

    Describes a network interface.

    ", + "refs": { + "InstanceNetworkInterfaceList$member": null + } + }, + "InstanceNetworkInterfaceAssociation": { + "base": "

    Describes association information for an Elastic IP address.

    ", + "refs": { + "InstanceNetworkInterface$Association": "

    The association information for an Elastic IP associated with the network interface.

    ", + "InstancePrivateIpAddress$Association": "

    The association information for an Elastic IP address for the network interface.

    " + } + }, + "InstanceNetworkInterfaceAttachment": { + "base": "

    Describes a network interface attachment.

    ", + "refs": { + "InstanceNetworkInterface$Attachment": "

    The network interface attachment.

    " + } + }, + "InstanceNetworkInterfaceList": { + "base": null, + "refs": { + "Instance$NetworkInterfaces": "

    [EC2-VPC] One or more network interfaces for the instance.

    " + } + }, + "InstanceNetworkInterfaceSpecification": { + "base": "

    Describes a network interface.

    ", + "refs": { + "InstanceNetworkInterfaceSpecificationList$member": null + } + }, + "InstanceNetworkInterfaceSpecificationList": { + "base": null, + "refs": { + "LaunchSpecification$NetworkInterfaces": "

    One or more network interfaces.

    ", + "RunInstancesRequest$NetworkInterfaces": "

    One or more network interfaces.

    ", + "SpotFleetLaunchSpecification$NetworkInterfaces": "

    One or more network interfaces.

    ", + "RequestSpotLaunchSpecification$NetworkInterfaces": "

    One or more network interfaces.

    " + } + }, + "InstancePrivateIpAddress": { + "base": "

    Describes a private IP address.

    ", + "refs": { + "InstancePrivateIpAddressList$member": null + } + }, + "InstancePrivateIpAddressList": { + "base": null, + "refs": { + "InstanceNetworkInterface$PrivateIpAddresses": "

    The private IP addresses associated with the network interface.

    " + } + }, + "InstanceState": { + "base": "

    Describes the current state of the instance.

    ", + "refs": { + "Instance$State": "

    The current state of the instance.

    ", + "InstanceStateChange$CurrentState": "

    The current state of the instance.

    ", + "InstanceStateChange$PreviousState": "

    The previous state of the instance.

    ", + "InstanceStatus$InstanceState": "

    The intended state of the instance. DescribeInstanceStatus requires that an instance be in the running state.

    " + } + }, + "InstanceStateChange": { + "base": "

    Describes an instance state change.

    ", + "refs": { + "InstanceStateChangeList$member": null + } + }, + "InstanceStateChangeList": { + "base": null, + "refs": { + "StartInstancesResult$StartingInstances": "

    Information about one or more started instances.

    ", + "StopInstancesResult$StoppingInstances": "

    Information about one or more stopped instances.

    ", + "TerminateInstancesResult$TerminatingInstances": "

    Information about one or more terminated instances.

    " + } + }, + "InstanceStateName": { + "base": null, + "refs": { + "InstanceState$Name": "

    The current state of the instance.

    " + } + }, + "InstanceStatus": { + "base": "

    Describes the status of an instance.

    ", + "refs": { + "InstanceStatusList$member": null + } + }, + "InstanceStatusDetails": { + "base": "

    Describes the instance status.

    ", + "refs": { + "InstanceStatusDetailsList$member": null + } + }, + "InstanceStatusDetailsList": { + "base": null, + "refs": { + "InstanceStatusSummary$Details": "

    The system instance health or application instance health.

    " + } + }, + "InstanceStatusEvent": { + "base": "

    Describes a scheduled event for an instance.

    ", + "refs": { + "InstanceStatusEventList$member": null + } + }, + "InstanceStatusEventList": { + "base": null, + "refs": { + "InstanceStatus$Events": "

    Any scheduled events associated with the instance.

    " + } + }, + "InstanceStatusList": { + "base": null, + "refs": { + "DescribeInstanceStatusResult$InstanceStatuses": "

    One or more instance status descriptions.

    " + } + }, + "InstanceStatusSummary": { + "base": "

    Describes the status of an instance.

    ", + "refs": { + "InstanceStatus$SystemStatus": "

    Reports impaired functionality that stems from issues related to the systems that support an instance, such as hardware failures and network connectivity problems.

    ", + "InstanceStatus$InstanceStatus": "

    Reports impaired functionality that stems from issues internal to the instance, such as impaired reachability.

    " + } + }, + "InstanceType": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsRequest$InstanceType": "

    The instance type on which the Reserved Instance can be used. For more information, see Instance Types in the Amazon Elastic Compute Cloud User Guide.

    ", + "ImportInstanceLaunchSpecification$InstanceType": "

    The instance type. For more information about the instance types that you can import, see Before You Get Started in the Amazon Elastic Compute Cloud User Guide.

    ", + "Instance$InstanceType": "

    The instance type.

    ", + "InstanceTypeList$member": null, + "LaunchSpecification$InstanceType": "

    The instance type.

    ", + "ReservedInstances$InstanceType": "

    The instance type on which the Reserved Instance can be used.

    ", + "ReservedInstancesConfiguration$InstanceType": "

    The instance type for the modified Reserved Instances.

    ", + "ReservedInstancesOffering$InstanceType": "

    The instance type on which the Reserved Instance can be used.

    ", + "RunInstancesRequest$InstanceType": "

    The instance type. For more information, see Instance Types in the Amazon Elastic Compute Cloud User Guide.

    Default: m1.small

    ", + "SpotFleetLaunchSpecification$InstanceType": "

    The instance type.

    ", + "SpotPrice$InstanceType": "

    The instance type.

    ", + "RequestSpotLaunchSpecification$InstanceType": "

    The instance type.

    " + } + }, + "InstanceTypeList": { + "base": null, + "refs": { + "DescribeSpotPriceHistoryRequest$InstanceTypes": "

    Filters the results by the specified instance types.

    " + } + }, + "Integer": { + "base": null, + "refs": { + "AssignPrivateIpAddressesRequest$SecondaryPrivateIpAddressCount": "

    The number of secondary IP addresses to assign to the network interface. You can't specify this parameter when also specifying private IP addresses.

    ", + "AttachNetworkInterfaceRequest$DeviceIndex": "

    The index of the device for the network interface attachment.

    ", + "AuthorizeSecurityGroupEgressRequest$FromPort": "

    The start of port range for the TCP and UDP protocols, or an ICMP type number. For the ICMP type number, use -1 to specify all ICMP types.

    ", + "AuthorizeSecurityGroupEgressRequest$ToPort": "

    The end of port range for the TCP and UDP protocols, or an ICMP code number. For the ICMP code number, use -1 to specify all ICMP codes for the ICMP type.

    ", + "AuthorizeSecurityGroupIngressRequest$FromPort": "

    The start of port range for the TCP and UDP protocols, or an ICMP type number. For the ICMP type number, use -1 to specify all ICMP types.

    ", + "AuthorizeSecurityGroupIngressRequest$ToPort": "

    The end of port range for the TCP and UDP protocols, or an ICMP code number. For the ICMP code number, use -1 to specify all ICMP codes for the ICMP type.

    ", + "CreateCustomerGatewayRequest$BgpAsn": "

    For devices that support BGP, the customer gateway's BGP ASN.

    Default: 65000

    ", + "CreateNetworkAclEntryRequest$RuleNumber": "

    The rule number for the entry (for example, 100). ACL entries are processed in ascending order by rule number.

    Constraints: Positive integer from 1 to 32766

    ", + "CreateNetworkInterfaceRequest$SecondaryPrivateIpAddressCount": "

    The number of secondary private IP addresses to assign to a network interface. When you specify a number of secondary IP addresses, Amazon EC2 selects these IP addresses within the subnet range. You can't specify this option and specify more than one private IP address using privateIpAddresses.

    The number of IP addresses you can assign to a network interface varies by instance type. For more information, see Private IP Addresses Per ENI Per Instance Type in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateReservedInstancesListingRequest$InstanceCount": "

    The number of instances that are a part of a Reserved Instance account to be listed in the Reserved Instance Marketplace. This number should be less than or equal to the instance count associated with the Reserved Instance ID specified in this call.

    ", + "CreateVolumeRequest$Size": "

    The size of the volume, in GiBs.

    Constraints: 1-1024 for standard volumes, 1-16384 for gp2 volumes, and 4-16384 for io1 volumes. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.

    Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.

    ", + "CreateVolumeRequest$Iops": "

    Only valid for Provisioned IOPS (SSD) volumes. The number of I/O operations per second (IOPS) to provision for the volume, with a maximum ratio of 30 IOPS/GiB.

    Constraint: Range is 100 to 20000 for Provisioned IOPS (SSD) volumes

    ", + "DeleteNetworkAclEntryRequest$RuleNumber": "

    The rule number of the entry to delete.

    ", + "DescribeClassicLinkInstancesRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. You cannot specify this parameter and the instance IDs parameter in the same request.

    Constraint: If the value is greater than 1000, we return only 1000 items.

    ", + "DescribeFlowLogsRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. You cannot specify this parameter and the flow log IDs parameter in the same request.

    ", + "DescribeImportImageTasksRequest$MaxResults": "

    The maximum number of results to return in a single request.

    ", + "DescribeImportSnapshotTasksRequest$MaxResults": "

    The maximum number of results to return in a single request.

    ", + "DescribeInstanceStatusRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. You cannot specify this parameter and the instance IDs parameter in the same request.

    ", + "DescribeInstancesRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. You cannot specify this parameter and the instance IDs parameter in the same request.

    ", + "DescribeMovingAddressesRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value outside of this range, an error is returned.

    Default: If no value is provided, the default is 1000.

    ", + "DescribePrefixListsRequest$MaxResults": "

    The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

    Constraint: If the value specified is greater than 1000, we return only 1000 items.

    ", + "DescribeReservedInstancesOfferingsRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. The maximum is 100.

    Default: 100

    ", + "DescribeReservedInstancesOfferingsRequest$MaxInstanceCount": "

    The maximum number of instances to filter when searching for offerings.

    Default: 20

    ", + "DescribeSnapshotsRequest$MaxResults": "

    The maximum number of snapshot results returned by DescribeSnapshots in paginated output. When this parameter is used, DescribeSnapshots only returns MaxResults results in a single page along with a NextToken response element. The remaining results of the initial request can be seen by sending another DescribeSnapshots request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. If this parameter is not used, then DescribeSnapshots returns all results. You cannot specify this parameter and the snapshot IDs parameter in the same request.

    ", + "DescribeSpotFleetInstancesRequest$MaxResults": "

    The maximum number of results to return in a single call. Specify a value between 1 and 1000. The default value is 1000. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeSpotFleetRequestHistoryRequest$MaxResults": "

    The maximum number of results to return in a single call. Specify a value between 1 and 1000. The default value is 1000. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeSpotFleetRequestsRequest$MaxResults": "

    The maximum number of results to return in a single call. Specify a value between 1 and 1000. The default value is 1000. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeSpotPriceHistoryRequest$MaxResults": "

    The maximum number of results to return in a single call. Specify a value between 1 and 1000. The default value is 1000. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeTagsRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned.

    ", + "DescribeVolumeStatusRequest$MaxResults": "

    The maximum number of volume results returned by DescribeVolumeStatus in paginated output. When this parameter is used, the request only returns MaxResults results in a single page along with a NextToken response element. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. If this parameter is not used, then DescribeVolumeStatus returns all results. You cannot specify this parameter and the volume IDs parameter in the same request.

    ", + "DescribeVolumesRequest$MaxResults": "

    The maximum number of volume results returned by DescribeVolumes in paginated output. When this parameter is used, DescribeVolumes only returns MaxResults results in a single page along with a NextToken response element. The remaining results of the initial request can be seen by sending another DescribeVolumes request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. If this parameter is not used, then DescribeVolumes returns all results. You cannot specify this parameter and the volume IDs parameter in the same request.

    ", + "DescribeVpcEndpointServicesRequest$MaxResults": "

    The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

    Constraint: If the value is greater than 1000, we return only 1000 items.

    ", + "DescribeVpcEndpointsRequest$MaxResults": "

    The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

    Constraint: If the value is greater than 1000, we return only 1000 items.

    ", + "EbsBlockDevice$VolumeSize": "

    The size of the volume, in GiB.

    Constraints: 1-1024 for standard volumes, 1-16384 for gp2 volumes, and 4-16384 for io1 volumes. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.

    Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.

    ", + "EbsBlockDevice$Iops": "

    The number of I/O operations per second (IOPS) that the volume supports. For Provisioned IOPS (SSD) volumes, this represents the number of IOPS that are provisioned for the volume. For General Purpose (SSD) volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information on General Purpose (SSD) baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

    Constraint: Range is 100 to 20000 for Provisioned IOPS (SSD) volumes and 3 to 10000 for General Purpose (SSD) volumes.

    Condition: This parameter is required for requests to create io1 volumes; it is not used in requests to create standard or gp2 volumes.

    ", + "IcmpTypeCode$Type": "

    The ICMP code. A value of -1 means all codes for the specified ICMP type.

    ", + "IcmpTypeCode$Code": "

    The ICMP type. A value of -1 means all types.

    ", + "Instance$AmiLaunchIndex": "

    The AMI launch index, which can be used to find this instance in the launch group.

    ", + "InstanceCount$InstanceCount": "

    The number of listed Reserved Instances in the state specified by the state.

    ", + "InstanceNetworkInterfaceAttachment$DeviceIndex": "

    The index of the device on the instance for the network interface attachment.

    ", + "InstanceNetworkInterfaceSpecification$DeviceIndex": "

    The index of the device on the instance for the network interface attachment. If you are specifying a network interface in a RunInstances request, you must provide the device index.

    ", + "InstanceNetworkInterfaceSpecification$SecondaryPrivateIpAddressCount": "

    The number of secondary private IP addresses. You can't specify this option and specify more than one private IP address using the private IP addresses option.

    ", + "InstanceState$Code": "

    The low byte represents the state. The high byte is an opaque internal value and should be ignored.

    • 0 : pending

    • 16 : running

    • 32 : shutting-down

    • 48 : terminated

    • 64 : stopping

    • 80 : stopped

    ", + "IpPermission$FromPort": "

    The start of port range for the TCP and UDP protocols, or an ICMP type number. A value of -1 indicates all ICMP types.

    ", + "IpPermission$ToPort": "

    The end of port range for the TCP and UDP protocols, or an ICMP code. A value of -1 indicates all ICMP codes for the specified ICMP type.

    ", + "NetworkAclEntry$RuleNumber": "

    The rule number for the entry. ACL entries are processed in ascending order by rule number.

    ", + "NetworkInterfaceAttachment$DeviceIndex": "

    The device index of the network interface attachment on the instance.

    ", + "PortRange$From": "

    The first port in the range.

    ", + "PortRange$To": "

    The last port in the range.

    ", + "PricingDetail$Count": "

    The number of instances available for the price.

    ", + "PurchaseReservedInstancesOfferingRequest$InstanceCount": "

    The number of Reserved Instances to purchase.

    ", + "ReplaceNetworkAclEntryRequest$RuleNumber": "

    The rule number of the entry to replace.

    ", + "RequestSpotInstancesRequest$InstanceCount": "

    The maximum number of Spot instances to launch.

    Default: 1

    ", + "ReservedInstances$InstanceCount": "

    The number of Reserved Instances purchased.

    ", + "ReservedInstancesConfiguration$InstanceCount": "

    The number of modified Reserved Instances.

    ", + "RevokeSecurityGroupEgressRequest$FromPort": "

    The start of port range for the TCP and UDP protocols, or an ICMP type number. For the ICMP type number, use -1 to specify all ICMP types.

    ", + "RevokeSecurityGroupEgressRequest$ToPort": "

    The end of port range for the TCP and UDP protocols, or an ICMP code number. For the ICMP code number, use -1 to specify all ICMP codes for the ICMP type.

    ", + "RevokeSecurityGroupIngressRequest$FromPort": "

    The start of port range for the TCP and UDP protocols, or an ICMP type number. For the ICMP type number, use -1 to specify all ICMP types.

    ", + "RevokeSecurityGroupIngressRequest$ToPort": "

    The end of port range for the TCP and UDP protocols, or an ICMP code number. For the ICMP code number, use -1 to specify all ICMP codes for the ICMP type.

    ", + "RunInstancesRequest$MinCount": "

    The minimum number of instances to launch. If you specify a minimum that is more instances than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches no instances.

    Constraints: Between 1 and the maximum number you're allowed for the specified instance type. For more information about the default limits, and how to request an increase, see How many instances can I run in Amazon EC2 in the Amazon EC2 General FAQ.

    ", + "RunInstancesRequest$MaxCount": "

    The maximum number of instances to launch. If you specify more instances than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches the largest possible number of instances above MinCount.

    Constraints: Between 1 and the maximum number you're allowed for the specified instance type. For more information about the default limits, and how to request an increase, see How many instances can I run in Amazon EC2 in the Amazon EC2 General FAQ.

    ", + "Snapshot$VolumeSize": "

    The size of the volume, in GiB.

    ", + "SpotFleetRequestConfigData$TargetCapacity": "

    The number of units to request. You can choose to set the target capacity in terms of instances or a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O.

    ", + "Subnet$AvailableIpAddressCount": "

    The number of unused IP addresses in the subnet. Note that the IP addresses for any stopped instances are considered unavailable.

    ", + "VgwTelemetry$AcceptedRouteCount": "

    The number of accepted routes.

    ", + "Volume$Size": "

    The size of the volume, in GiBs.

    ", + "Volume$Iops": "

    The number of I/O operations per second (IOPS) that the volume supports. For Provisioned IOPS (SSD) volumes, this represents the number of IOPS that are provisioned for the volume. For General Purpose (SSD) volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information on General Purpose (SSD) baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

    Constraint: Range is 100 to 20000 for Provisioned IOPS (SSD) volumes and 3 to 10000 for General Purpose (SSD) volumes.

    Condition: This parameter is required for requests to create io1 volumes; it is not used in requests to create standard or gp2 volumes.

    " + } + }, + "InternetGateway": { + "base": "

    Describes an Internet gateway.

    ", + "refs": { + "CreateInternetGatewayResult$InternetGateway": "

    Information about the Internet gateway.

    ", + "InternetGatewayList$member": null + } + }, + "InternetGatewayAttachment": { + "base": "

    Describes the attachment of a VPC to an Internet gateway.

    ", + "refs": { + "InternetGatewayAttachmentList$member": null + } + }, + "InternetGatewayAttachmentList": { + "base": null, + "refs": { + "InternetGateway$Attachments": "

    Any VPCs attached to the Internet gateway.

    " + } + }, + "InternetGatewayList": { + "base": null, + "refs": { + "DescribeInternetGatewaysResult$InternetGateways": "

    Information about one or more Internet gateways.

    " + } + }, + "IpPermission": { + "base": "

    Describes a security group rule.

    ", + "refs": { + "IpPermissionList$member": null + } + }, + "IpPermissionList": { + "base": null, + "refs": { + "AuthorizeSecurityGroupEgressRequest$IpPermissions": "

    A set of IP permissions. You can't specify a destination security group and a CIDR IP address range.

    ", + "AuthorizeSecurityGroupIngressRequest$IpPermissions": "

    A set of IP permissions. Can be used to specify multiple rules in a single command.

    ", + "RevokeSecurityGroupEgressRequest$IpPermissions": "

    A set of IP permissions. You can't specify a destination security group and a CIDR IP address range.

    ", + "RevokeSecurityGroupIngressRequest$IpPermissions": "

    A set of IP permissions. You can't specify a source security group and a CIDR IP address range.

    ", + "SecurityGroup$IpPermissions": "

    One or more inbound rules associated with the security group.

    ", + "SecurityGroup$IpPermissionsEgress": "

    [EC2-VPC] One or more outbound rules associated with the security group.

    " + } + }, + "IpRange": { + "base": "

    Describes an IP range.

    ", + "refs": { + "IpRangeList$member": null + } + }, + "IpRangeList": { + "base": null, + "refs": { + "IpPermission$IpRanges": "

    One or more IP ranges.

    " + } + }, + "KeyNameStringList": { + "base": null, + "refs": { + "DescribeKeyPairsRequest$KeyNames": "

    One or more key pair names.

    Default: Describes all your key pairs.

    " + } + }, + "KeyPair": { + "base": "

    Describes a key pair.

    ", + "refs": { + } + }, + "KeyPairInfo": { + "base": "

    Describes a key pair.

    ", + "refs": { + "KeyPairList$member": null + } + }, + "KeyPairList": { + "base": null, + "refs": { + "DescribeKeyPairsResult$KeyPairs": "

    Information about one or more key pairs.

    " + } + }, + "LaunchPermission": { + "base": "

    Describes a launch permission.

    ", + "refs": { + "LaunchPermissionList$member": null + } + }, + "LaunchPermissionList": { + "base": null, + "refs": { + "ImageAttribute$LaunchPermissions": "

    One or more launch permissions.

    ", + "LaunchPermissionModifications$Add": "

    The AWS account ID to add to the list of launch permissions for the AMI.

    ", + "LaunchPermissionModifications$Remove": "

    The AWS account ID to remove from the list of launch permissions for the AMI.

    " + } + }, + "LaunchPermissionModifications": { + "base": "

    Describes a launch permission modification.

    ", + "refs": { + "ModifyImageAttributeRequest$LaunchPermission": "

    A launch permission modification.

    " + } + }, + "LaunchSpecification": { + "base": "

    Describes the launch specification for an instance.

    ", + "refs": { + "SpotInstanceRequest$LaunchSpecification": "

    Additional information for launching instances.

    " + } + }, + "LaunchSpecsList": { + "base": null, + "refs": { + "SpotFleetRequestConfigData$LaunchSpecifications": "

    Information about the launch specifications for the Spot fleet request.

    " + } + }, + "ListingState": { + "base": null, + "refs": { + "InstanceCount$State": "

    The states of the listed Reserved Instances.

    " + } + }, + "ListingStatus": { + "base": null, + "refs": { + "ReservedInstancesListing$Status": "

    The status of the Reserved Instance listing.

    " + } + }, + "Long": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsRequest$MinDuration": "

    The minimum duration (in seconds) to filter when searching for offerings.

    Default: 2592000 (1 month)

    ", + "DescribeReservedInstancesOfferingsRequest$MaxDuration": "

    The maximum duration (in seconds) to filter when searching for offerings.

    Default: 94608000 (3 years)

    ", + "DiskImageDescription$Size": "

    The size of the disk image, in GiB.

    ", + "DiskImageDetail$Bytes": "

    The size of the disk image, in GiB.

    ", + "DiskImageVolumeDescription$Size": "

    The size of the volume, in GiB.

    ", + "ImportInstanceVolumeDetailItem$BytesConverted": "

    The number of bytes converted so far.

    ", + "ImportVolumeTaskDetails$BytesConverted": "

    The number of bytes converted so far.

    ", + "PriceSchedule$Term": "

    The number of months remaining in the reservation. For example, 2 is the second to the last month before the capacity reservation expires.

    ", + "PriceScheduleSpecification$Term": "

    The number of months remaining in the reservation. For example, 2 is the second to the last month before the capacity reservation expires.

    ", + "ReservedInstances$Duration": "

    The duration of the Reserved Instance, in seconds.

    ", + "ReservedInstancesOffering$Duration": "

    The duration of the Reserved Instance, in seconds.

    ", + "VolumeDetail$Size": "

    The size of the volume, in GiB.

    " + } + }, + "ModifyImageAttributeRequest": { + "base": null, + "refs": { + } + }, + "ModifyInstanceAttributeRequest": { + "base": null, + "refs": { + } + }, + "ModifyNetworkInterfaceAttributeRequest": { + "base": null, + "refs": { + } + }, + "ModifyReservedInstancesRequest": { + "base": null, + "refs": { + } + }, + "ModifyReservedInstancesResult": { + "base": null, + "refs": { + } + }, + "ModifySnapshotAttributeRequest": { + "base": null, + "refs": { + } + }, + "ModifySubnetAttributeRequest": { + "base": null, + "refs": { + } + }, + "ModifyVolumeAttributeRequest": { + "base": null, + "refs": { + } + }, + "ModifyVpcAttributeRequest": { + "base": null, + "refs": { + } + }, + "ModifyVpcEndpointRequest": { + "base": null, + "refs": { + } + }, + "ModifyVpcEndpointResult": { + "base": null, + "refs": { + } + }, + "MonitorInstancesRequest": { + "base": null, + "refs": { + } + }, + "MonitorInstancesResult": { + "base": null, + "refs": { + } + }, + "Monitoring": { + "base": "

    Describes the monitoring for the instance.

    ", + "refs": { + "Instance$Monitoring": "

    The monitoring information for the instance.

    ", + "InstanceMonitoring$Monitoring": "

    The monitoring information.

    " + } + }, + "MonitoringState": { + "base": null, + "refs": { + "Monitoring$State": "

    Indicates whether monitoring is enabled for the instance.

    " + } + }, + "MoveAddressToVpcRequest": { + "base": null, + "refs": { + } + }, + "MoveAddressToVpcResult": { + "base": null, + "refs": { + } + }, + "MoveStatus": { + "base": null, + "refs": { + "MovingAddressStatus$MoveStatus": "

    The status of the Elastic IP address that's being moved to the EC2-VPC platform, or restored to the EC2-Classic platform.

    " + } + }, + "MovingAddressStatus": { + "base": "

    Describes the status of a moving Elastic IP address.

    ", + "refs": { + "MovingAddressStatusSet$member": null + } + }, + "MovingAddressStatusSet": { + "base": null, + "refs": { + "DescribeMovingAddressesResult$MovingAddressStatuses": "

    The status for each Elastic IP address.

    " + } + }, + "NetworkAcl": { + "base": "

    Describes a network ACL.

    ", + "refs": { + "CreateNetworkAclResult$NetworkAcl": "

    Information about the network ACL.

    ", + "NetworkAclList$member": null + } + }, + "NetworkAclAssociation": { + "base": "

    Describes an association between a network ACL and a subnet.

    ", + "refs": { + "NetworkAclAssociationList$member": null + } + }, + "NetworkAclAssociationList": { + "base": null, + "refs": { + "NetworkAcl$Associations": "

    Any associations between the network ACL and one or more subnets

    " + } + }, + "NetworkAclEntry": { + "base": "

    Describes an entry in a network ACL.

    ", + "refs": { + "NetworkAclEntryList$member": null + } + }, + "NetworkAclEntryList": { + "base": null, + "refs": { + "NetworkAcl$Entries": "

    One or more entries (rules) in the network ACL.

    " + } + }, + "NetworkAclList": { + "base": null, + "refs": { + "DescribeNetworkAclsResult$NetworkAcls": "

    Information about one or more network ACLs.

    " + } + }, + "NetworkInterface": { + "base": "

    Describes a network interface.

    ", + "refs": { + "CreateNetworkInterfaceResult$NetworkInterface": "

    Information about the network interface.

    ", + "NetworkInterfaceList$member": null + } + }, + "NetworkInterfaceAssociation": { + "base": "

    Describes association information for an Elastic IP address.

    ", + "refs": { + "NetworkInterface$Association": "

    The association information for an Elastic IP associated with the network interface.

    ", + "NetworkInterfacePrivateIpAddress$Association": "

    The association information for an Elastic IP address associated with the network interface.

    " + } + }, + "NetworkInterfaceAttachment": { + "base": "

    Describes a network interface attachment.

    ", + "refs": { + "DescribeNetworkInterfaceAttributeResult$Attachment": "

    The attachment (if any) of the network interface.

    ", + "NetworkInterface$Attachment": "

    The network interface attachment.

    " + } + }, + "NetworkInterfaceAttachmentChanges": { + "base": "

    Describes an attachment change.

    ", + "refs": { + "ModifyNetworkInterfaceAttributeRequest$Attachment": "

    Information about the interface attachment. If modifying the 'delete on termination' attribute, you must specify the ID of the interface attachment.

    " + } + }, + "NetworkInterfaceAttribute": { + "base": null, + "refs": { + "DescribeNetworkInterfaceAttributeRequest$Attribute": "

    The attribute of the network interface.

    " + } + }, + "NetworkInterfaceIdList": { + "base": null, + "refs": { + "DescribeNetworkInterfacesRequest$NetworkInterfaceIds": "

    One or more network interface IDs.

    Default: Describes all your network interfaces.

    " + } + }, + "NetworkInterfaceList": { + "base": null, + "refs": { + "DescribeNetworkInterfacesResult$NetworkInterfaces": "

    Information about one or more network interfaces.

    " + } + }, + "NetworkInterfacePrivateIpAddress": { + "base": "

    Describes the private IP address of a network interface.

    ", + "refs": { + "NetworkInterfacePrivateIpAddressList$member": null + } + }, + "NetworkInterfacePrivateIpAddressList": { + "base": null, + "refs": { + "NetworkInterface$PrivateIpAddresses": "

    The private IP addresses associated with the network interface.

    " + } + }, + "NetworkInterfaceStatus": { + "base": null, + "refs": { + "InstanceNetworkInterface$Status": "

    The status of the network interface.

    ", + "NetworkInterface$Status": "

    The status of the network interface.

    " + } + }, + "OfferingTypeValues": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsRequest$OfferingType": "

    The Reserved Instance offering type. If you are using tools that predate the 2011-11-01 API version, you only have access to the Medium Utilization Reserved Instance offering type.

    ", + "DescribeReservedInstancesRequest$OfferingType": "

    The Reserved Instance offering type. If you are using tools that predate the 2011-11-01 API version, you only have access to the Medium Utilization Reserved Instance offering type.

    ", + "ReservedInstances$OfferingType": "

    The Reserved Instance offering type.

    ", + "ReservedInstancesOffering$OfferingType": "

    The Reserved Instance offering type.

    " + } + }, + "OperationType": { + "base": null, + "refs": { + "ModifyImageAttributeRequest$OperationType": "

    The operation type.

    ", + "ModifySnapshotAttributeRequest$OperationType": "

    The type of operation to perform to the attribute.

    " + } + }, + "OwnerStringList": { + "base": null, + "refs": { + "DescribeImagesRequest$Owners": "

    Filters the images by the owner. Specify an AWS account ID, amazon (owner is Amazon), aws-marketplace (owner is AWS Marketplace), self (owner is the sender of the request). Omitting this option returns all images for which you have launch permissions, regardless of ownership.

    ", + "DescribeSnapshotsRequest$OwnerIds": "

    Returns the snapshots owned by the specified owner. Multiple owners can be specified.

    " + } + }, + "PermissionGroup": { + "base": null, + "refs": { + "CreateVolumePermission$Group": "

    The specific group that is to be added or removed from a volume's list of create volume permissions.

    ", + "LaunchPermission$Group": "

    The name of the group.

    " + } + }, + "Placement": { + "base": "

    Describes the placement for the instance.

    ", + "refs": { + "ImportInstanceLaunchSpecification$Placement": "

    The placement information for the instance.

    ", + "Instance$Placement": "

    The location where the instance launched.

    ", + "RunInstancesRequest$Placement": "

    The placement for the instance.

    " + } + }, + "PlacementGroup": { + "base": "

    Describes a placement group.

    ", + "refs": { + "PlacementGroupList$member": null + } + }, + "PlacementGroupList": { + "base": null, + "refs": { + "DescribePlacementGroupsResult$PlacementGroups": "

    One or more placement groups.

    " + } + }, + "PlacementGroupState": { + "base": null, + "refs": { + "PlacementGroup$State": "

    The state of the placement group.

    " + } + }, + "PlacementGroupStringList": { + "base": null, + "refs": { + "DescribePlacementGroupsRequest$GroupNames": "

    One or more placement group names.

    Default: Describes all your placement groups, or only those otherwise specified.

    " + } + }, + "PlacementStrategy": { + "base": null, + "refs": { + "CreatePlacementGroupRequest$Strategy": "

    The placement strategy.

    ", + "PlacementGroup$Strategy": "

    The placement strategy.

    " + } + }, + "PlatformValues": { + "base": null, + "refs": { + "Image$Platform": "

    The value is Windows for Windows AMIs; otherwise blank.

    ", + "ImportInstanceRequest$Platform": "

    The instance operating system.

    ", + "ImportInstanceTaskDetails$Platform": "

    The instance operating system.

    ", + "Instance$Platform": "

    The value is Windows for Windows instances; otherwise blank.

    " + } + }, + "PortRange": { + "base": "

    Describes a range of ports.

    ", + "refs": { + "CreateNetworkAclEntryRequest$PortRange": "

    TCP or UDP protocols: The range of ports the rule applies to.

    ", + "NetworkAclEntry$PortRange": "

    TCP or UDP protocols: The range of ports the rule applies to.

    ", + "ReplaceNetworkAclEntryRequest$PortRange": "

    TCP or UDP protocols: The range of ports the rule applies to. Required if specifying 6 (TCP) or 17 (UDP) for the protocol.

    " + } + }, + "PrefixList": { + "base": "

    Describes prefixes for AWS services.

    ", + "refs": { + "PrefixListSet$member": null + } + }, + "PrefixListId": { + "base": "

    The ID of the prefix.

    ", + "refs": { + "PrefixListIdList$member": null + } + }, + "PrefixListIdList": { + "base": null, + "refs": { + "IpPermission$PrefixListIds": "

    (Valid for AuthorizeSecurityGroupEgress, RevokeSecurityGroupEgress and DescribeSecurityGroups only) One or more prefix list IDs for an AWS service. In an AuthorizeSecurityGroupEgress request, this is the AWS service that you want to access through a VPC endpoint from instances associated with the security group.

    " + } + }, + "PrefixListSet": { + "base": null, + "refs": { + "DescribePrefixListsResult$PrefixLists": "

    All available prefix lists.

    " + } + }, + "PriceSchedule": { + "base": "

    Describes the price for a Reserved Instance.

    ", + "refs": { + "PriceScheduleList$member": null + } + }, + "PriceScheduleList": { + "base": null, + "refs": { + "ReservedInstancesListing$PriceSchedules": "

    The price of the Reserved Instance listing.

    " + } + }, + "PriceScheduleSpecification": { + "base": "

    Describes the price for a Reserved Instance.

    ", + "refs": { + "PriceScheduleSpecificationList$member": null + } + }, + "PriceScheduleSpecificationList": { + "base": null, + "refs": { + "CreateReservedInstancesListingRequest$PriceSchedules": "

    A list specifying the price of the Reserved Instance for each month remaining in the Reserved Instance term.

    " + } + }, + "PricingDetail": { + "base": "

    Describes a Reserved Instance offering.

    ", + "refs": { + "PricingDetailsList$member": null + } + }, + "PricingDetailsList": { + "base": null, + "refs": { + "ReservedInstancesOffering$PricingDetails": "

    The pricing details of the Reserved Instance offering.

    " + } + }, + "PrivateIpAddressSpecification": { + "base": "

    Describes a secondary private IP address for a network interface.

    ", + "refs": { + "PrivateIpAddressSpecificationList$member": null + } + }, + "PrivateIpAddressSpecificationList": { + "base": null, + "refs": { + "CreateNetworkInterfaceRequest$PrivateIpAddresses": "

    One or more private IP addresses.

    ", + "InstanceNetworkInterfaceSpecification$PrivateIpAddresses": "

    One or more private IP addresses to assign to the network interface. Only one private IP address can be designated as primary.

    " + } + }, + "PrivateIpAddressStringList": { + "base": null, + "refs": { + "AssignPrivateIpAddressesRequest$PrivateIpAddresses": "

    One or more IP addresses to be assigned as a secondary private IP address to the network interface. You can't specify this parameter when also specifying a number of secondary IP addresses.

    If you don't specify an IP address, Amazon EC2 automatically selects an IP address within the subnet range.

    ", + "UnassignPrivateIpAddressesRequest$PrivateIpAddresses": "

    The secondary private IP addresses to unassign from the network interface. You can specify this option multiple times to unassign more than one IP address.

    " + } + }, + "ProductCode": { + "base": "

    Describes a product code.

    ", + "refs": { + "ProductCodeList$member": null + } + }, + "ProductCodeList": { + "base": null, + "refs": { + "DescribeSnapshotAttributeResult$ProductCodes": "

    A list of product codes.

    ", + "DescribeVolumeAttributeResult$ProductCodes": "

    A list of product codes.

    ", + "Image$ProductCodes": "

    Any product codes associated with the AMI.

    ", + "ImageAttribute$ProductCodes": "

    One or more product codes.

    ", + "Instance$ProductCodes": "

    The product codes attached to this instance.

    ", + "InstanceAttribute$ProductCodes": "

    A list of product codes.

    " + } + }, + "ProductCodeStringList": { + "base": null, + "refs": { + "ModifyImageAttributeRequest$ProductCodes": "

    One or more product codes. After you add a product code to an AMI, it can't be removed. This is only valid when modifying the productCodes attribute.

    " + } + }, + "ProductCodeValues": { + "base": null, + "refs": { + "ProductCode$ProductCodeType": "

    The type of product code.

    " + } + }, + "ProductDescriptionList": { + "base": null, + "refs": { + "DescribeSpotPriceHistoryRequest$ProductDescriptions": "

    Filters the results by the specified basic product descriptions.

    " + } + }, + "PropagatingVgw": { + "base": "

    Describes a virtual private gateway propagating route.

    ", + "refs": { + "PropagatingVgwList$member": null + } + }, + "PropagatingVgwList": { + "base": null, + "refs": { + "RouteTable$PropagatingVgws": "

    Any virtual private gateway (VGW) propagating routes.

    " + } + }, + "PublicIpStringList": { + "base": null, + "refs": { + "DescribeAddressesRequest$PublicIps": "

    [EC2-Classic] One or more Elastic IP addresses.

    Default: Describes all your Elastic IP addresses.

    " + } + }, + "PurchaseReservedInstancesOfferingRequest": { + "base": null, + "refs": { + } + }, + "PurchaseReservedInstancesOfferingResult": { + "base": null, + "refs": { + } + }, + "RIProductDescription": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsRequest$ProductDescription": "

    The Reserved Instance product platform description. Instances that include (Amazon VPC) in the description are for use with Amazon VPC.

    ", + "ReservedInstances$ProductDescription": "

    The Reserved Instance product platform description.

    ", + "ReservedInstancesOffering$ProductDescription": "

    The Reserved Instance product platform description.

    ", + "SpotInstanceRequest$ProductDescription": "

    The product description associated with the Spot instance.

    ", + "SpotPrice$ProductDescription": "

    A general description of the AMI.

    " + } + }, + "ReasonCodesList": { + "base": null, + "refs": { + "ReportInstanceStatusRequest$ReasonCodes": "

    One or more reason codes that describes the health state of your instance.

    • instance-stuck-in-state: My instance is stuck in a state.

    • unresponsive: My instance is unresponsive.

    • not-accepting-credentials: My instance is not accepting my credentials.

    • password-not-available: A password is not available for my instance.

    • performance-network: My instance is experiencing performance problems which I believe are network related.

    • performance-instance-store: My instance is experiencing performance problems which I believe are related to the instance stores.

    • performance-ebs-volume: My instance is experiencing performance problems which I believe are related to an EBS volume.

    • performance-other: My instance is experiencing performance problems.

    • other: [explain using the description parameter]

    " + } + }, + "RebootInstancesRequest": { + "base": null, + "refs": { + } + }, + "RecurringCharge": { + "base": "

    Describes a recurring charge.

    ", + "refs": { + "RecurringChargesList$member": null + } + }, + "RecurringChargeFrequency": { + "base": null, + "refs": { + "RecurringCharge$Frequency": "

    The frequency of the recurring charge.

    " + } + }, + "RecurringChargesList": { + "base": null, + "refs": { + "ReservedInstances$RecurringCharges": "

    The recurring charge tag assigned to the resource.

    ", + "ReservedInstancesOffering$RecurringCharges": "

    The recurring charge tag assigned to the resource.

    " + } + }, + "Region": { + "base": "

    Describes a region.

    ", + "refs": { + "RegionList$member": null + } + }, + "RegionList": { + "base": null, + "refs": { + "DescribeRegionsResult$Regions": "

    Information about one or more regions.

    " + } + }, + "RegionNameStringList": { + "base": null, + "refs": { + "DescribeRegionsRequest$RegionNames": "

    The names of one or more regions.

    " + } + }, + "RegisterImageRequest": { + "base": null, + "refs": { + } + }, + "RegisterImageResult": { + "base": null, + "refs": { + } + }, + "RejectVpcPeeringConnectionRequest": { + "base": null, + "refs": { + } + }, + "RejectVpcPeeringConnectionResult": { + "base": null, + "refs": { + } + }, + "ReleaseAddressRequest": { + "base": null, + "refs": { + } + }, + "ReplaceNetworkAclAssociationRequest": { + "base": null, + "refs": { + } + }, + "ReplaceNetworkAclAssociationResult": { + "base": null, + "refs": { + } + }, + "ReplaceNetworkAclEntryRequest": { + "base": null, + "refs": { + } + }, + "ReplaceRouteRequest": { + "base": null, + "refs": { + } + }, + "ReplaceRouteTableAssociationRequest": { + "base": null, + "refs": { + } + }, + "ReplaceRouteTableAssociationResult": { + "base": null, + "refs": { + } + }, + "ReportInstanceReasonCodes": { + "base": null, + "refs": { + "ReasonCodesList$member": null + } + }, + "ReportInstanceStatusRequest": { + "base": null, + "refs": { + } + }, + "ReportStatusType": { + "base": null, + "refs": { + "ReportInstanceStatusRequest$Status": "

    The status of all instances listed.

    " + } + }, + "RequestSpotFleetRequest": { + "base": "

    Contains the parameters for RequestSpotFleet.

    ", + "refs": { + } + }, + "RequestSpotFleetResponse": { + "base": "

    Contains the output of RequestSpotFleet.

    ", + "refs": { + } + }, + "RequestSpotInstancesRequest": { + "base": "

    Contains the parameters for RequestSpotInstances.

    ", + "refs": { + } + }, + "RequestSpotInstancesResult": { + "base": "

    Contains the output of RequestSpotInstances.

    ", + "refs": { + } + }, + "Reservation": { + "base": "

    Describes a reservation.

    ", + "refs": { + "ReservationList$member": null + } + }, + "ReservationList": { + "base": null, + "refs": { + "DescribeInstancesResult$Reservations": "

    One or more reservations.

    " + } + }, + "ReservedInstanceLimitPrice": { + "base": "

    Describes the limit price of a Reserved Instance offering.

    ", + "refs": { + "PurchaseReservedInstancesOfferingRequest$LimitPrice": "

    Specified for Reserved Instance Marketplace offerings to limit the total order and ensure that the Reserved Instances are not purchased at unexpected prices.

    " + } + }, + "ReservedInstanceState": { + "base": null, + "refs": { + "ReservedInstances$State": "

    The state of the Reserved Instance purchase.

    " + } + }, + "ReservedInstances": { + "base": "

    Describes a Reserved Instance.

    ", + "refs": { + "ReservedInstancesList$member": null + } + }, + "ReservedInstancesConfiguration": { + "base": "

    Describes the configuration settings for the modified Reserved Instances.

    ", + "refs": { + "ReservedInstancesConfigurationList$member": null, + "ReservedInstancesModificationResult$TargetConfiguration": "

    The target Reserved Instances configurations supplied as part of the modification request.

    " + } + }, + "ReservedInstancesConfigurationList": { + "base": null, + "refs": { + "ModifyReservedInstancesRequest$TargetConfigurations": "

    The configuration settings for the Reserved Instances to modify.

    " + } + }, + "ReservedInstancesId": { + "base": "

    Describes the ID of a Reserved Instance.

    ", + "refs": { + "ReservedIntancesIds$member": null + } + }, + "ReservedInstancesIdStringList": { + "base": null, + "refs": { + "DescribeReservedInstancesRequest$ReservedInstancesIds": "

    One or more Reserved Instance IDs.

    Default: Describes all your Reserved Instances, or only those otherwise specified.

    ", + "ModifyReservedInstancesRequest$ReservedInstancesIds": "

    The IDs of the Reserved Instances to modify.

    " + } + }, + "ReservedInstancesList": { + "base": null, + "refs": { + "DescribeReservedInstancesResult$ReservedInstances": "

    A list of Reserved Instances.

    " + } + }, + "ReservedInstancesListing": { + "base": "

    Describes a Reserved Instance listing.

    ", + "refs": { + "ReservedInstancesListingList$member": null + } + }, + "ReservedInstancesListingList": { + "base": null, + "refs": { + "CancelReservedInstancesListingResult$ReservedInstancesListings": "

    The Reserved Instance listing.

    ", + "CreateReservedInstancesListingResult$ReservedInstancesListings": "

    Information about the Reserved Instances listing.

    ", + "DescribeReservedInstancesListingsResult$ReservedInstancesListings": "

    Information about the Reserved Instance listing.

    " + } + }, + "ReservedInstancesModification": { + "base": "

    Describes a Reserved Instance modification.

    ", + "refs": { + "ReservedInstancesModificationList$member": null + } + }, + "ReservedInstancesModificationIdStringList": { + "base": null, + "refs": { + "DescribeReservedInstancesModificationsRequest$ReservedInstancesModificationIds": "

    IDs for the submitted modification request.

    " + } + }, + "ReservedInstancesModificationList": { + "base": null, + "refs": { + "DescribeReservedInstancesModificationsResult$ReservedInstancesModifications": "

    The Reserved Instance modification information.

    " + } + }, + "ReservedInstancesModificationResult": { + "base": null, + "refs": { + "ReservedInstancesModificationResultList$member": null + } + }, + "ReservedInstancesModificationResultList": { + "base": null, + "refs": { + "ReservedInstancesModification$ModificationResults": "

    Contains target configurations along with their corresponding new Reserved Instance IDs.

    " + } + }, + "ReservedInstancesOffering": { + "base": "

    Describes a Reserved Instance offering.

    ", + "refs": { + "ReservedInstancesOfferingList$member": null + } + }, + "ReservedInstancesOfferingIdStringList": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsRequest$ReservedInstancesOfferingIds": "

    One or more Reserved Instances offering IDs.

    " + } + }, + "ReservedInstancesOfferingList": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsResult$ReservedInstancesOfferings": "

    A list of Reserved Instances offerings.

    " + } + }, + "ReservedIntancesIds": { + "base": null, + "refs": { + "ReservedInstancesModification$ReservedInstancesIds": "

    The IDs of one or more Reserved Instances.

    " + } + }, + "ResetImageAttributeName": { + "base": null, + "refs": { + "ResetImageAttributeRequest$Attribute": "

    The attribute to reset (currently you can only reset the launch permission attribute).

    " + } + }, + "ResetImageAttributeRequest": { + "base": null, + "refs": { + } + }, + "ResetInstanceAttributeRequest": { + "base": null, + "refs": { + } + }, + "ResetNetworkInterfaceAttributeRequest": { + "base": null, + "refs": { + } + }, + "ResetSnapshotAttributeRequest": { + "base": null, + "refs": { + } + }, + "ResourceIdList": { + "base": null, + "refs": { + "CreateTagsRequest$Resources": "

    The IDs of one or more resources to tag. For example, ami-1a2b3c4d.

    ", + "DeleteTagsRequest$Resources": "

    The ID of the resource. For example, ami-1a2b3c4d. You can specify more than one resource ID.

    " + } + }, + "ResourceType": { + "base": null, + "refs": { + "TagDescription$ResourceType": "

    The resource type.

    " + } + }, + "RestorableByStringList": { + "base": null, + "refs": { + "DescribeSnapshotsRequest$RestorableByUserIds": "

    One or more AWS accounts IDs that can create volumes from the snapshot.

    " + } + }, + "RestoreAddressToClassicRequest": { + "base": null, + "refs": { + } + }, + "RestoreAddressToClassicResult": { + "base": null, + "refs": { + } + }, + "RevokeSecurityGroupEgressRequest": { + "base": null, + "refs": { + } + }, + "RevokeSecurityGroupIngressRequest": { + "base": null, + "refs": { + } + }, + "Route": { + "base": "

    Describes a route in a route table.

    ", + "refs": { + "RouteList$member": null + } + }, + "RouteList": { + "base": null, + "refs": { + "RouteTable$Routes": "

    The routes in the route table.

    " + } + }, + "RouteOrigin": { + "base": null, + "refs": { + "Route$Origin": "

    Describes how the route was created.

    • CreateRouteTable indicates that route was automatically created when the route table was created.
    • CreateRoute indicates that the route was manually added to the route table.
    • EnableVgwRoutePropagation indicates that the route was propagated by route propagation.
    " + } + }, + "RouteState": { + "base": null, + "refs": { + "Route$State": "

    The state of the route. The blackhole state indicates that the route's target isn't available (for example, the specified gateway isn't attached to the VPC, or the specified NAT instance has been terminated).

    " + } + }, + "RouteTable": { + "base": "

    Describes a route table.

    ", + "refs": { + "CreateRouteTableResult$RouteTable": "

    Information about the route table.

    ", + "RouteTableList$member": null + } + }, + "RouteTableAssociation": { + "base": "

    Describes an association between a route table and a subnet.

    ", + "refs": { + "RouteTableAssociationList$member": null + } + }, + "RouteTableAssociationList": { + "base": null, + "refs": { + "RouteTable$Associations": "

    The associations between the route table and one or more subnets.

    " + } + }, + "RouteTableList": { + "base": null, + "refs": { + "DescribeRouteTablesResult$RouteTables": "

    Information about one or more route tables.

    " + } + }, + "RuleAction": { + "base": null, + "refs": { + "CreateNetworkAclEntryRequest$RuleAction": "

    Indicates whether to allow or deny the traffic that matches the rule.

    ", + "NetworkAclEntry$RuleAction": "

    Indicates whether to allow or deny the traffic that matches the rule.

    ", + "ReplaceNetworkAclEntryRequest$RuleAction": "

    Indicates whether to allow or deny the traffic that matches the rule.

    " + } + }, + "RunInstancesMonitoringEnabled": { + "base": "

    Describes the monitoring for the instance.

    ", + "refs": { + "LaunchSpecification$Monitoring": null, + "RunInstancesRequest$Monitoring": "

    The monitoring for the instance.

    ", + "RequestSpotLaunchSpecification$Monitoring": null + } + }, + "RunInstancesRequest": { + "base": null, + "refs": { + } + }, + "S3Storage": { + "base": "

    Describes the storage parameters for S3 and S3 buckets for an instance store-backed AMI.

    ", + "refs": { + "Storage$S3": "

    An Amazon S3 storage location.

    " + } + }, + "SecurityGroup": { + "base": "

    Describes a security group

    ", + "refs": { + "SecurityGroupList$member": null + } + }, + "SecurityGroupIdStringList": { + "base": null, + "refs": { + "CreateNetworkInterfaceRequest$Groups": "

    The IDs of one or more security groups.

    ", + "ImportInstanceLaunchSpecification$GroupIds": "

    One or more security group IDs.

    ", + "InstanceNetworkInterfaceSpecification$Groups": "

    The IDs of the security groups for the network interface. Applies only if creating a network interface when launching an instance.

    ", + "ModifyNetworkInterfaceAttributeRequest$Groups": "

    Changes the security groups for the network interface. The new set of groups you specify replaces the current set. You must specify at least one group, even if it's just the default security group in the VPC. You must specify the ID of the security group, not the name.

    ", + "RunInstancesRequest$SecurityGroupIds": "

    One or more security group IDs. You can create a security group using CreateSecurityGroup.

    Default: Amazon EC2 uses the default security group.

    " + } + }, + "SecurityGroupList": { + "base": null, + "refs": { + "DescribeSecurityGroupsResult$SecurityGroups": "

    Information about one or more security groups.

    " + } + }, + "SecurityGroupStringList": { + "base": null, + "refs": { + "ImportInstanceLaunchSpecification$GroupNames": "

    One or more security group names.

    ", + "RunInstancesRequest$SecurityGroups": "

    [EC2-Classic, default VPC] One or more security group names. For a nondefault VPC, you must use security group IDs instead.

    Default: Amazon EC2 uses the default security group.

    " + } + }, + "ShutdownBehavior": { + "base": null, + "refs": { + "ImportInstanceLaunchSpecification$InstanceInitiatedShutdownBehavior": "

    Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).

    ", + "RunInstancesRequest$InstanceInitiatedShutdownBehavior": "

    Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).

    Default: stop

    " + } + }, + "Snapshot": { + "base": "

    Describes a snapshot.

    ", + "refs": { + "SnapshotList$member": null + } + }, + "SnapshotAttributeName": { + "base": null, + "refs": { + "DescribeSnapshotAttributeRequest$Attribute": "

    The snapshot attribute you would like to view.

    ", + "ModifySnapshotAttributeRequest$Attribute": "

    The snapshot attribute to modify.

    Only volume creation permissions may be modified at the customer level.

    ", + "ResetSnapshotAttributeRequest$Attribute": "

    The attribute to reset. Currently, only the attribute for permission to create volumes can be reset.

    " + } + }, + "SnapshotDetail": { + "base": "

    Describes the snapshot created from the imported disk.

    ", + "refs": { + "SnapshotDetailList$member": null + } + }, + "SnapshotDetailList": { + "base": null, + "refs": { + "ImportImageResult$SnapshotDetails": "

    Information about the snapshots.

    ", + "ImportImageTask$SnapshotDetails": "

    Information about the snapshots.

    " + } + }, + "SnapshotDiskContainer": { + "base": "

    The disk container object for the import snapshot request.

    ", + "refs": { + "ImportSnapshotRequest$DiskContainer": "

    Information about the disk container.

    " + } + }, + "SnapshotIdStringList": { + "base": null, + "refs": { + "DescribeSnapshotsRequest$SnapshotIds": "

    One or more snapshot IDs.

    Default: Describes snapshots for which you have launch permissions.

    " + } + }, + "SnapshotList": { + "base": null, + "refs": { + "DescribeSnapshotsResult$Snapshots": "

    Information about the snapshots.

    " + } + }, + "SnapshotState": { + "base": null, + "refs": { + "Snapshot$State": "

    The snapshot state.

    " + } + }, + "SnapshotTaskDetail": { + "base": "

    Details about the import snapshot task.

    ", + "refs": { + "ImportSnapshotResult$SnapshotTaskDetail": "

    Information about the import snapshot task.

    ", + "ImportSnapshotTask$SnapshotTaskDetail": "

    Describes an import snapshot task.

    " + } + }, + "SpotDatafeedSubscription": { + "base": "

    Describes the data feed for a Spot instance.

    ", + "refs": { + "CreateSpotDatafeedSubscriptionResult$SpotDatafeedSubscription": "

    The Spot instance data feed subscription.

    ", + "DescribeSpotDatafeedSubscriptionResult$SpotDatafeedSubscription": "

    The Spot instance data feed subscription.

    " + } + }, + "SpotFleetLaunchSpecification": { + "base": "

    Describes the launch specification for one or more Spot instances.

    ", + "refs": { + "LaunchSpecsList$member": null + } + }, + "SpotFleetMonitoring": { + "base": "

    Describes whether monitoring is enabled.

    ", + "refs": { + "SpotFleetLaunchSpecification$Monitoring": "

    Enable or disable monitoring for the instances.

    " + } + }, + "SpotFleetRequestConfig": { + "base": "

    Describes a Spot fleet request.

    ", + "refs": { + "SpotFleetRequestConfigSet$member": null + } + }, + "SpotFleetRequestConfigData": { + "base": "

    Describes the configuration of a Spot fleet request.

    ", + "refs": { + "RequestSpotFleetRequest$SpotFleetRequestConfig": "

    The configuration for the Spot fleet request.

    ", + "SpotFleetRequestConfig$SpotFleetRequestConfig": "

    Information about the configuration of the Spot fleet request.

    " + } + }, + "SpotFleetRequestConfigSet": { + "base": null, + "refs": { + "DescribeSpotFleetRequestsResponse$SpotFleetRequestConfigs": "

    Information about the configuration of your Spot fleet.

    " + } + }, + "SpotInstanceRequest": { + "base": "

    Describe a Spot instance request.

    ", + "refs": { + "SpotInstanceRequestList$member": null + } + }, + "SpotInstanceRequestIdList": { + "base": null, + "refs": { + "CancelSpotInstanceRequestsRequest$SpotInstanceRequestIds": "

    One or more Spot instance request IDs.

    ", + "DescribeSpotInstanceRequestsRequest$SpotInstanceRequestIds": "

    One or more Spot instance request IDs.

    " + } + }, + "SpotInstanceRequestList": { + "base": null, + "refs": { + "DescribeSpotInstanceRequestsResult$SpotInstanceRequests": "

    One or more Spot instance requests.

    ", + "RequestSpotInstancesResult$SpotInstanceRequests": "

    One or more Spot instance requests.

    " + } + }, + "SpotInstanceState": { + "base": null, + "refs": { + "SpotInstanceRequest$State": "

    The state of the Spot instance request. Spot bid status information can help you track your Spot instance requests. For more information, see Spot Bid Status in the Amazon Elastic Compute Cloud User Guide.

    " + } + }, + "SpotInstanceStateFault": { + "base": "

    Describes a Spot instance state change.

    ", + "refs": { + "SpotDatafeedSubscription$Fault": "

    The fault codes for the Spot instance request, if any.

    ", + "SpotInstanceRequest$Fault": "

    The fault codes for the Spot instance request, if any.

    " + } + }, + "SpotInstanceStatus": { + "base": "

    Describes the status of a Spot instance request.

    ", + "refs": { + "SpotInstanceRequest$Status": "

    The status code and status message describing the Spot instance request.

    " + } + }, + "SpotInstanceType": { + "base": null, + "refs": { + "RequestSpotInstancesRequest$Type": "

    The Spot instance request type.

    Default: one-time

    ", + "SpotInstanceRequest$Type": "

    The Spot instance request type.

    " + } + }, + "SpotPlacement": { + "base": "

    Describes Spot instance placement.

    ", + "refs": { + "LaunchSpecification$Placement": "

    The placement information for the instance.

    ", + "SpotFleetLaunchSpecification$Placement": "

    The placement information.

    ", + "RequestSpotLaunchSpecification$Placement": "

    The placement information for the instance.

    " + } + }, + "SpotPrice": { + "base": "

    Describes the maximum hourly price (bid) for any Spot instance launched to fulfill the request.

    ", + "refs": { + "SpotPriceHistoryList$member": null + } + }, + "SpotPriceHistoryList": { + "base": null, + "refs": { + "DescribeSpotPriceHistoryResult$SpotPriceHistory": "

    The historical Spot prices.

    " + } + }, + "StartInstancesRequest": { + "base": null, + "refs": { + } + }, + "StartInstancesResult": { + "base": null, + "refs": { + } + }, + "State": { + "base": null, + "refs": { + "VpcEndpoint$State": "

    The state of the VPC endpoint.

    " + } + }, + "StateReason": { + "base": "

    Describes a state change.

    ", + "refs": { + "Image$StateReason": "

    The reason for the state change.

    ", + "Instance$StateReason": "

    The reason for the most recent state transition.

    " + } + }, + "Status": { + "base": null, + "refs": { + "MoveAddressToVpcResult$Status": "

    The status of the move of the IP address.

    ", + "RestoreAddressToClassicResult$Status": "

    The move status for the IP address.

    " + } + }, + "StatusName": { + "base": null, + "refs": { + "InstanceStatusDetails$Name": "

    The type of instance status.

    " + } + }, + "StatusType": { + "base": null, + "refs": { + "InstanceStatusDetails$Status": "

    The status.

    " + } + }, + "StopInstancesRequest": { + "base": null, + "refs": { + } + }, + "StopInstancesResult": { + "base": null, + "refs": { + } + }, + "Storage": { + "base": "

    Describes the storage location for an instance store-backed AMI.

    ", + "refs": { + "BundleInstanceRequest$Storage": "

    The bucket in which to store the AMI. You can specify a bucket that you already own or a new bucket that Amazon EC2 creates on your behalf. If you specify a bucket that belongs to someone else, Amazon EC2 returns an error.

    ", + "BundleTask$Storage": "

    The Amazon S3 storage locations.

    " + } + }, + "String": { + "base": null, + "refs": { + "AcceptVpcPeeringConnectionRequest$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "AccountAttribute$AttributeName": "

    The name of the account attribute.

    ", + "AccountAttributeValue$AttributeValue": "

    The value of the attribute.

    ", + "ActiveInstance$InstanceType": "

    The instance type.

    ", + "ActiveInstance$InstanceId": "

    The ID of the instance.

    ", + "ActiveInstance$SpotInstanceRequestId": "

    The ID of the Spot instance request.

    ", + "Address$InstanceId": "

    The ID of the instance that the address is associated with (if any).

    ", + "Address$PublicIp": "

    The Elastic IP address.

    ", + "Address$AllocationId": "

    The ID representing the allocation of the address for use with EC2-VPC.

    ", + "Address$AssociationId": "

    The ID representing the association of the address with an instance in a VPC.

    ", + "Address$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "Address$NetworkInterfaceOwnerId": "

    The ID of the AWS account that owns the network interface.

    ", + "Address$PrivateIpAddress": "

    The private IP address associated with the Elastic IP address.

    ", + "AllocateAddressResult$PublicIp": "

    The Elastic IP address.

    ", + "AllocateAddressResult$AllocationId": "

    [EC2-VPC] The ID that AWS assigns to represent the allocation of the Elastic IP address for use with instances in a VPC.

    ", + "AllocationIdList$member": null, + "AssignPrivateIpAddressesRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "AssociateAddressRequest$InstanceId": "

    The ID of the instance. This is required for EC2-Classic. For EC2-VPC, you can specify either the instance ID or the network interface ID, but not both. The operation fails if you specify an instance ID unless exactly one network interface is attached.

    ", + "AssociateAddressRequest$PublicIp": "

    The Elastic IP address. This is required for EC2-Classic.

    ", + "AssociateAddressRequest$AllocationId": "

    [EC2-VPC] The allocation ID. This is required for EC2-VPC.

    ", + "AssociateAddressRequest$NetworkInterfaceId": "

    [EC2-VPC] The ID of the network interface. If the instance has more than one network interface, you must specify a network interface ID.

    ", + "AssociateAddressRequest$PrivateIpAddress": "

    [EC2-VPC] The primary or secondary private IP address to associate with the Elastic IP address. If no private IP address is specified, the Elastic IP address is associated with the primary private IP address.

    ", + "AssociateAddressResult$AssociationId": "

    [EC2-VPC] The ID that represents the association of the Elastic IP address with an instance.

    ", + "AssociateDhcpOptionsRequest$DhcpOptionsId": "

    The ID of the DHCP options set, or default to associate no DHCP options with the VPC.

    ", + "AssociateDhcpOptionsRequest$VpcId": "

    The ID of the VPC.

    ", + "AssociateRouteTableRequest$SubnetId": "

    The ID of the subnet.

    ", + "AssociateRouteTableRequest$RouteTableId": "

    The ID of the route table.

    ", + "AssociateRouteTableResult$AssociationId": "

    The route table association ID (needed to disassociate the route table).

    ", + "AttachClassicLinkVpcRequest$InstanceId": "

    The ID of an EC2-Classic instance to link to the ClassicLink-enabled VPC.

    ", + "AttachClassicLinkVpcRequest$VpcId": "

    The ID of a ClassicLink-enabled VPC.

    ", + "AttachInternetGatewayRequest$InternetGatewayId": "

    The ID of the Internet gateway.

    ", + "AttachInternetGatewayRequest$VpcId": "

    The ID of the VPC.

    ", + "AttachNetworkInterfaceRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "AttachNetworkInterfaceRequest$InstanceId": "

    The ID of the instance.

    ", + "AttachNetworkInterfaceResult$AttachmentId": "

    The ID of the network interface attachment.

    ", + "AttachVolumeRequest$VolumeId": "

    The ID of the EBS volume. The volume and instance must be within the same Availability Zone.

    ", + "AttachVolumeRequest$InstanceId": "

    The ID of the instance.

    ", + "AttachVolumeRequest$Device": "

    The device name to expose to the instance (for example, /dev/sdh or xvdh).

    ", + "AttachVpnGatewayRequest$VpnGatewayId": "

    The ID of the virtual private gateway.

    ", + "AttachVpnGatewayRequest$VpcId": "

    The ID of the VPC.

    ", + "AttributeValue$Value": "

    Valid values are case-sensitive and vary by action.

    ", + "AuthorizeSecurityGroupEgressRequest$GroupId": "

    The ID of the security group.

    ", + "AuthorizeSecurityGroupEgressRequest$SourceSecurityGroupName": "

    The name of a destination security group. To authorize outbound access to a destination security group, we recommend that you use a set of IP permissions instead.

    ", + "AuthorizeSecurityGroupEgressRequest$SourceSecurityGroupOwnerId": "

    The AWS account number for a destination security group. To authorize outbound access to a destination security group, we recommend that you use a set of IP permissions instead.

    ", + "AuthorizeSecurityGroupEgressRequest$IpProtocol": "

    The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers). Use -1 to specify all.

    ", + "AuthorizeSecurityGroupEgressRequest$CidrIp": "

    The CIDR IP address range. You can't specify this parameter when specifying a source security group.

    ", + "AuthorizeSecurityGroupIngressRequest$GroupName": "

    [EC2-Classic, default VPC] The name of the security group.

    ", + "AuthorizeSecurityGroupIngressRequest$GroupId": "

    The ID of the security group. Required for a nondefault VPC.

    ", + "AuthorizeSecurityGroupIngressRequest$SourceSecurityGroupName": "

    [EC2-Classic, default VPC] The name of the source security group. You can't specify this parameter in combination with the following parameters: the CIDR IP address range, the start of the port range, the IP protocol, and the end of the port range. For EC2-VPC, the source security group must be in the same VPC.

    ", + "AuthorizeSecurityGroupIngressRequest$SourceSecurityGroupOwnerId": "

    [EC2-Classic, default VPC] The AWS account number for the source security group. For EC2-VPC, the source security group must be in the same VPC. You can't specify this parameter in combination with the following parameters: the CIDR IP address range, the IP protocol, the start of the port range, and the end of the port range. Creates rules that grant full ICMP, UDP, and TCP access. To create a rule with a specific IP protocol and port range, use a set of IP permissions instead.

    ", + "AuthorizeSecurityGroupIngressRequest$IpProtocol": "

    The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers). (VPC only) Use -1 to specify all.

    ", + "AuthorizeSecurityGroupIngressRequest$CidrIp": "

    The CIDR IP address range. You can't specify this parameter when specifying a source security group.

    ", + "AvailabilityZone$ZoneName": "

    The name of the Availability Zone.

    ", + "AvailabilityZone$RegionName": "

    The name of the region.

    ", + "AvailabilityZoneMessage$Message": "

    The message about the Availability Zone.

    ", + "BlockDeviceMapping$VirtualName": "

    The virtual device name (ephemeralN). Instance store volumes are numbered starting from 0. An instance type with 2 available instance store volumes can specify mappings for ephemeral0 and ephemeral1.The number of available instance store volumes depends on the instance type. After you connect to the instance, you must mount the volume.

    Constraints: For M3 instances, you must specify instance store volumes in the block device mapping for the instance. When you launch an M3 instance, we ignore any instance store volumes specified in the block device mapping for the AMI.

    ", + "BlockDeviceMapping$DeviceName": "

    The device name exposed to the instance (for example, /dev/sdh or xvdh).

    ", + "BlockDeviceMapping$NoDevice": "

    Suppresses the specified device included in the block device mapping of the AMI.

    ", + "BundleIdStringList$member": null, + "BundleInstanceRequest$InstanceId": "

    The ID of the instance to bundle.

    Type: String

    Default: None

    Required: Yes

    ", + "BundleTask$InstanceId": "

    The ID of the instance associated with this bundle task.

    ", + "BundleTask$BundleId": "

    The ID of the bundle task.

    ", + "BundleTask$Progress": "

    The level of task completion, as a percent (for example, 20%).

    ", + "BundleTaskError$Code": "

    The error code.

    ", + "BundleTaskError$Message": "

    The error message.

    ", + "CancelBundleTaskRequest$BundleId": "

    The ID of the bundle task.

    ", + "CancelConversionRequest$ConversionTaskId": "

    The ID of the conversion task.

    ", + "CancelConversionRequest$ReasonMessage": "

    The reason for canceling the conversion task.

    ", + "CancelExportTaskRequest$ExportTaskId": "

    The ID of the export task. This is the ID returned by CreateInstanceExportTask.

    ", + "CancelImportTaskRequest$ImportTaskId": "

    The ID of the import image or import snapshot task to be canceled.

    ", + "CancelImportTaskRequest$CancelReason": "

    The reason for canceling the task.

    ", + "CancelImportTaskResult$ImportTaskId": "

    The ID of the task being canceled.

    ", + "CancelImportTaskResult$State": "

    The current state of the task being canceled.

    ", + "CancelImportTaskResult$PreviousState": "

    The current state of the task being canceled.

    ", + "CancelReservedInstancesListingRequest$ReservedInstancesListingId": "

    The ID of the Reserved Instance listing.

    ", + "CancelSpotFleetRequestsError$Message": "

    The description for the error code.

    ", + "CancelSpotFleetRequestsErrorItem$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "CancelSpotFleetRequestsSuccessItem$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "CancelledSpotInstanceRequest$SpotInstanceRequestId": "

    The ID of the Spot instance request.

    ", + "ClassicLinkInstance$InstanceId": "

    The ID of the instance.

    ", + "ClassicLinkInstance$VpcId": "

    The ID of the VPC.

    ", + "ClientData$Comment": "

    A user-defined comment about the disk upload.

    ", + "ConfirmProductInstanceRequest$ProductCode": "

    The product code. This must be a product code that you own.

    ", + "ConfirmProductInstanceRequest$InstanceId": "

    The ID of the instance.

    ", + "ConfirmProductInstanceResult$OwnerId": "

    The AWS account ID of the instance owner. This is only present if the product code is attached to the instance.

    ", + "ConversionIdStringList$member": null, + "ConversionTask$ConversionTaskId": "

    The ID of the conversion task.

    ", + "ConversionTask$ExpirationTime": "

    The time when the task expires. If the upload isn't complete before the expiration time, we automatically cancel the task.

    ", + "ConversionTask$StatusMessage": "

    The status message related to the conversion task.

    ", + "CopyImageRequest$SourceRegion": "

    The name of the region that contains the AMI to copy.

    ", + "CopyImageRequest$SourceImageId": "

    The ID of the AMI to copy.

    ", + "CopyImageRequest$Name": "

    The name of the new AMI in the destination region.

    ", + "CopyImageRequest$Description": "

    A description for the new AMI in the destination region.

    ", + "CopyImageRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

    ", + "CopyImageResult$ImageId": "

    The ID of the new AMI.

    ", + "CopySnapshotRequest$SourceRegion": "

    The ID of the region that contains the snapshot to be copied.

    ", + "CopySnapshotRequest$SourceSnapshotId": "

    The ID of the EBS snapshot to copy.

    ", + "CopySnapshotRequest$Description": "

    A description for the EBS snapshot.

    ", + "CopySnapshotRequest$DestinationRegion": "

    The destination region to use in the PresignedUrl parameter of a snapshot copy operation. This parameter is only valid for specifying the destination region in a PresignedUrl parameter, where it is required.

    CopySnapshot sends the snapshot copy to the regional endpoint that you send the HTTP request to, such as ec2.us-east-1.amazonaws.com (in the AWS CLI, this is specified with the --region parameter or the default region in your AWS configuration file).

    ", + "CopySnapshotRequest$PresignedUrl": "

    The pre-signed URL that facilitates copying an encrypted snapshot. This parameter is only required when copying an encrypted snapshot with the Amazon EC2 Query API; it is available as an optional parameter in all other cases. The PresignedUrl should use the snapshot source endpoint, the CopySnapshot action, and include the SourceRegion, SourceSnapshotId, and DestinationRegion parameters. The PresignedUrl must be signed using AWS Signature Version 4. Because EBS snapshots are stored in Amazon S3, the signing algorithm for this parameter uses the same logic that is described in Authenticating Requests by Using Query Parameters (AWS Signature Version 4) in the Amazon Simple Storage Service API Reference. An invalid or improperly signed PresignedUrl will cause the copy operation to fail asynchronously, and the snapshot will move to an error state.

    ", + "CopySnapshotRequest$KmsKeyId": "

    The full ARN of the AWS Key Management Service (AWS KMS) CMK to use when creating the snapshot copy. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. The ARN contains the arn:aws:kms namespace, followed by the region of the CMK, the AWS account ID of the CMK owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. The specified CMK must exist in the region that the snapshot is being copied to. If a KmsKeyId is specified, the Encrypted flag must also be set.

    ", + "CopySnapshotResult$SnapshotId": "

    The ID of the new snapshot.

    ", + "CreateCustomerGatewayRequest$PublicIp": "

    The Internet-routable IP address for the customer gateway's outside interface. The address must be static.

    ", + "CreateFlowLogsRequest$LogGroupName": "

    The name of the CloudWatch log group.

    ", + "CreateFlowLogsRequest$DeliverLogsPermissionArn": "

    The ARN for the IAM role that's used to post flow logs to a CloudWatch Logs log group.

    ", + "CreateFlowLogsRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

    ", + "CreateFlowLogsResult$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request.

    ", + "CreateImageRequest$InstanceId": "

    The ID of the instance.

    ", + "CreateImageRequest$Name": "

    A name for the new image.

    Constraints: 3-128 alphanumeric characters, parentheses (()), square brackets ([]), spaces ( ), periods (.), slashes (/), dashes (-), single quotes ('), at-signs (@), or underscores(_)

    ", + "CreateImageRequest$Description": "

    A description for the new image.

    ", + "CreateImageResult$ImageId": "

    The ID of the new AMI.

    ", + "CreateInstanceExportTaskRequest$Description": "

    A description for the conversion task or the resource being exported. The maximum length is 255 bytes.

    ", + "CreateInstanceExportTaskRequest$InstanceId": "

    The ID of the instance.

    ", + "CreateKeyPairRequest$KeyName": "

    A unique name for the key pair.

    Constraints: Up to 255 ASCII characters

    ", + "CreateNetworkAclEntryRequest$NetworkAclId": "

    The ID of the network ACL.

    ", + "CreateNetworkAclEntryRequest$Protocol": "

    The protocol. A value of -1 means all protocols.

    ", + "CreateNetworkAclEntryRequest$CidrBlock": "

    The network range to allow or deny, in CIDR notation (for example 172.16.0.0/24).

    ", + "CreateNetworkAclRequest$VpcId": "

    The ID of the VPC.

    ", + "CreateNetworkInterfaceRequest$SubnetId": "

    The ID of the subnet to associate with the network interface.

    ", + "CreateNetworkInterfaceRequest$Description": "

    A description for the network interface.

    ", + "CreateNetworkInterfaceRequest$PrivateIpAddress": "

    The primary private IP address of the network interface. If you don't specify an IP address, Amazon EC2 selects one for you from the subnet range. If you specify an IP address, you cannot indicate any IP addresses specified in privateIpAddresses as primary (only one IP address can be designated as primary).

    ", + "CreatePlacementGroupRequest$GroupName": "

    A name for the placement group.

    Constraints: Up to 255 ASCII characters

    ", + "CreateReservedInstancesListingRequest$ReservedInstancesId": "

    The ID of the active Reserved Instance.

    ", + "CreateReservedInstancesListingRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure idempotency of your listings. This helps avoid duplicate listings. For more information, see Ensuring Idempotency.

    ", + "CreateRouteRequest$RouteTableId": "

    The ID of the route table for the route.

    ", + "CreateRouteRequest$DestinationCidrBlock": "

    The CIDR address block used for the destination match. Routing decisions are based on the most specific match.

    ", + "CreateRouteRequest$GatewayId": "

    The ID of an Internet gateway or virtual private gateway attached to your VPC.

    ", + "CreateRouteRequest$InstanceId": "

    The ID of a NAT instance in your VPC. The operation fails if you specify an instance ID unless exactly one network interface is attached.

    ", + "CreateRouteRequest$NetworkInterfaceId": "

    The ID of a network interface.

    ", + "CreateRouteRequest$VpcPeeringConnectionId": "

    The ID of a VPC peering connection.

    ", + "CreateRouteTableRequest$VpcId": "

    The ID of the VPC.

    ", + "CreateSecurityGroupRequest$GroupName": "

    The name of the security group.

    Constraints: Up to 255 characters in length

    Constraints for EC2-Classic: ASCII characters

    Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$*

    ", + "CreateSecurityGroupRequest$Description": "

    A description for the security group. This is informational only.

    Constraints: Up to 255 characters in length

    Constraints for EC2-Classic: ASCII characters

    Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$*

    ", + "CreateSecurityGroupRequest$VpcId": "

    [EC2-VPC] The ID of the VPC. Required for EC2-VPC.

    ", + "CreateSecurityGroupResult$GroupId": "

    The ID of the security group.

    ", + "CreateSnapshotRequest$VolumeId": "

    The ID of the EBS volume.

    ", + "CreateSnapshotRequest$Description": "

    A description for the snapshot.

    ", + "CreateSpotDatafeedSubscriptionRequest$Bucket": "

    The Amazon S3 bucket in which to store the Spot instance data feed.

    ", + "CreateSpotDatafeedSubscriptionRequest$Prefix": "

    A prefix for the data feed file names.

    ", + "CreateSubnetRequest$VpcId": "

    The ID of the VPC.

    ", + "CreateSubnetRequest$CidrBlock": "

    The network range for the subnet, in CIDR notation. For example, 10.0.0.0/24.

    ", + "CreateSubnetRequest$AvailabilityZone": "

    The Availability Zone for the subnet.

    Default: Amazon EC2 selects one for you (recommended).

    ", + "CreateVolumePermission$UserId": "

    The specific AWS account ID that is to be added or removed from a volume's list of create volume permissions.

    ", + "CreateVolumeRequest$SnapshotId": "

    The snapshot from which to create the volume.

    ", + "CreateVolumeRequest$AvailabilityZone": "

    The Availability Zone in which to create the volume. Use DescribeAvailabilityZones to list the Availability Zones that are currently available to you.

    ", + "CreateVolumeRequest$KmsKeyId": "

    The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted volume. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. The ARN contains the arn:aws:kms namespace, followed by the region of the CMK, the AWS account ID of the CMK owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. If a KmsKeyId is specified, the Encrypted flag must also be set.

    ", + "CreateVpcEndpointRequest$VpcId": "

    The ID of the VPC in which the endpoint will be used.

    ", + "CreateVpcEndpointRequest$ServiceName": "

    The AWS service name, in the form com.amazonaws.region.service. To get a list of available services, use the DescribeVpcEndpointServices request.

    ", + "CreateVpcEndpointRequest$PolicyDocument": "

    A policy to attach to the endpoint that controls access to the service. The policy must be in valid JSON format. If this parameter is not specified, we attach a default policy that allows full access to the service.

    ", + "CreateVpcEndpointRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

    ", + "CreateVpcEndpointResult$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request.

    ", + "CreateVpcPeeringConnectionRequest$VpcId": "

    The ID of the requester VPC.

    ", + "CreateVpcPeeringConnectionRequest$PeerVpcId": "

    The ID of the VPC with which you are creating the VPC peering connection.

    ", + "CreateVpcPeeringConnectionRequest$PeerOwnerId": "

    The AWS account ID of the owner of the peer VPC.

    Default: Your AWS account ID

    ", + "CreateVpcRequest$CidrBlock": "

    The network range for the VPC, in CIDR notation. For example, 10.0.0.0/16.

    ", + "CreateVpnConnectionRequest$Type": "

    The type of VPN connection (ipsec.1).

    ", + "CreateVpnConnectionRequest$CustomerGatewayId": "

    The ID of the customer gateway.

    ", + "CreateVpnConnectionRequest$VpnGatewayId": "

    The ID of the virtual private gateway.

    ", + "CreateVpnConnectionRouteRequest$VpnConnectionId": "

    The ID of the VPN connection.

    ", + "CreateVpnConnectionRouteRequest$DestinationCidrBlock": "

    The CIDR block associated with the local subnet of the customer network.

    ", + "CreateVpnGatewayRequest$AvailabilityZone": "

    The Availability Zone for the virtual private gateway.

    ", + "CustomerGateway$CustomerGatewayId": "

    The ID of the customer gateway.

    ", + "CustomerGateway$State": "

    The current state of the customer gateway (pending | available | deleting | deleted).

    ", + "CustomerGateway$Type": "

    The type of VPN connection the customer gateway supports (ipsec.1).

    ", + "CustomerGateway$IpAddress": "

    The Internet-routable IP address of the customer gateway's outside interface.

    ", + "CustomerGateway$BgpAsn": "

    The customer gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN).

    ", + "CustomerGatewayIdStringList$member": null, + "DeleteCustomerGatewayRequest$CustomerGatewayId": "

    The ID of the customer gateway.

    ", + "DeleteDhcpOptionsRequest$DhcpOptionsId": "

    The ID of the DHCP options set.

    ", + "DeleteInternetGatewayRequest$InternetGatewayId": "

    The ID of the Internet gateway.

    ", + "DeleteKeyPairRequest$KeyName": "

    The name of the key pair.

    ", + "DeleteNetworkAclEntryRequest$NetworkAclId": "

    The ID of the network ACL.

    ", + "DeleteNetworkAclRequest$NetworkAclId": "

    The ID of the network ACL.

    ", + "DeleteNetworkInterfaceRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "DeletePlacementGroupRequest$GroupName": "

    The name of the placement group.

    ", + "DeleteRouteRequest$RouteTableId": "

    The ID of the route table.

    ", + "DeleteRouteRequest$DestinationCidrBlock": "

    The CIDR range for the route. The value you specify must match the CIDR for the route exactly.

    ", + "DeleteRouteTableRequest$RouteTableId": "

    The ID of the route table.

    ", + "DeleteSecurityGroupRequest$GroupName": "

    [EC2-Classic, default VPC] The name of the security group. You can specify either the security group name or the security group ID.

    ", + "DeleteSecurityGroupRequest$GroupId": "

    The ID of the security group. Required for a nondefault VPC.

    ", + "DeleteSnapshotRequest$SnapshotId": "

    The ID of the EBS snapshot.

    ", + "DeleteSubnetRequest$SubnetId": "

    The ID of the subnet.

    ", + "DeleteVolumeRequest$VolumeId": "

    The ID of the volume.

    ", + "DeleteVpcPeeringConnectionRequest$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "DeleteVpcRequest$VpcId": "

    The ID of the VPC.

    ", + "DeleteVpnConnectionRequest$VpnConnectionId": "

    The ID of the VPN connection.

    ", + "DeleteVpnConnectionRouteRequest$VpnConnectionId": "

    The ID of the VPN connection.

    ", + "DeleteVpnConnectionRouteRequest$DestinationCidrBlock": "

    The CIDR block associated with the local subnet of the customer network.

    ", + "DeleteVpnGatewayRequest$VpnGatewayId": "

    The ID of the virtual private gateway.

    ", + "DeregisterImageRequest$ImageId": "

    The ID of the AMI.

    ", + "DescribeClassicLinkInstancesRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeClassicLinkInstancesResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeFlowLogsRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeFlowLogsResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeImageAttributeRequest$ImageId": "

    The ID of the AMI.

    ", + "DescribeImportImageTasksRequest$NextToken": "

    A token that indicates the next page of results.

    ", + "DescribeImportImageTasksResult$NextToken": "

    The token to use to get the next page of results. This value is null when there are no more results to return.

    ", + "DescribeImportSnapshotTasksRequest$NextToken": "

    A token that indicates the next page of results.

    ", + "DescribeImportSnapshotTasksResult$NextToken": "

    The token to use to get the next page of results. This value is null when there are no more results to return.

    ", + "DescribeInstanceAttributeRequest$InstanceId": "

    The ID of the instance.

    ", + "DescribeInstanceStatusRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeInstanceStatusResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeInstancesRequest$NextToken": "

    The token to request the next page of results.

    ", + "DescribeInstancesResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeMovingAddressesRequest$NextToken": "

    The token to use to retrieve the next page of results.

    ", + "DescribeMovingAddressesResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeNetworkInterfaceAttributeRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "DescribeNetworkInterfaceAttributeResult$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "DescribePrefixListsRequest$NextToken": "

    The token for the next set of items to return. (You received this token from a prior call.)

    ", + "DescribePrefixListsResult$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "DescribeReservedInstancesListingsRequest$ReservedInstancesId": "

    One or more Reserved Instance IDs.

    ", + "DescribeReservedInstancesListingsRequest$ReservedInstancesListingId": "

    One or more Reserved Instance Listing IDs.

    ", + "DescribeReservedInstancesModificationsRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeReservedInstancesModificationsResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeReservedInstancesOfferingsRequest$AvailabilityZone": "

    The Availability Zone in which the Reserved Instance can be used.

    ", + "DescribeReservedInstancesOfferingsRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeReservedInstancesOfferingsResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeSnapshotAttributeRequest$SnapshotId": "

    The ID of the EBS snapshot.

    ", + "DescribeSnapshotAttributeResult$SnapshotId": "

    The ID of the EBS snapshot.

    ", + "DescribeSnapshotsRequest$NextToken": "

    The NextToken value returned from a previous paginated DescribeSnapshots request where MaxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the NextToken value. This value is null when there are no more results to return.

    ", + "DescribeSnapshotsResult$NextToken": "

    The NextToken value to include in a future DescribeSnapshots request. When the results of a DescribeSnapshots request exceed MaxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeSpotFleetInstancesRequest$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "DescribeSpotFleetInstancesRequest$NextToken": "

    The token for the next set of results.

    ", + "DescribeSpotFleetInstancesResponse$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "DescribeSpotFleetInstancesResponse$NextToken": "

    The token required to retrieve the next set of results. This value is null when there are no more results to return.

    ", + "DescribeSpotFleetRequestHistoryRequest$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "DescribeSpotFleetRequestHistoryRequest$NextToken": "

    The token for the next set of results.

    ", + "DescribeSpotFleetRequestHistoryResponse$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "DescribeSpotFleetRequestHistoryResponse$NextToken": "

    The token required to retrieve the next set of results. This value is null when there are no more results to return.

    ", + "DescribeSpotFleetRequestsRequest$NextToken": "

    The token for the next set of results.

    ", + "DescribeSpotFleetRequestsResponse$NextToken": "

    The token required to retrieve the next set of results. This value is null when there are no more results to return.

    ", + "DescribeSpotPriceHistoryRequest$AvailabilityZone": "

    Filters the results by the specified Availability Zone.

    ", + "DescribeSpotPriceHistoryRequest$NextToken": "

    The token for the next set of results.

    ", + "DescribeSpotPriceHistoryResult$NextToken": "

    The token required to retrieve the next set of results. This value is null when there are no more results to return.

    ", + "DescribeTagsRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeTagsResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return..

    ", + "DescribeVolumeAttributeRequest$VolumeId": "

    The ID of the volume.

    ", + "DescribeVolumeAttributeResult$VolumeId": "

    The ID of the volume.

    ", + "DescribeVolumeStatusRequest$NextToken": "

    The NextToken value to include in a future DescribeVolumeStatus request. When the results of the request exceed MaxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeVolumeStatusResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeVolumesRequest$NextToken": "

    The NextToken value returned from a previous paginated DescribeVolumes request where MaxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the NextToken value. This value is null when there are no more results to return.

    ", + "DescribeVolumesResult$NextToken": "

    The NextToken value to include in a future DescribeVolumes request. When the results of a DescribeVolumes request exceed MaxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeVpcAttributeRequest$VpcId": "

    The ID of the VPC.

    ", + "DescribeVpcAttributeResult$VpcId": "

    The ID of the VPC.

    ", + "DescribeVpcEndpointServicesRequest$NextToken": "

    The token for the next set of items to return. (You received this token from a prior call.)

    ", + "DescribeVpcEndpointServicesResult$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "DescribeVpcEndpointsRequest$NextToken": "

    The token for the next set of items to return. (You received this token from a prior call.)

    ", + "DescribeVpcEndpointsResult$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "DetachClassicLinkVpcRequest$InstanceId": "

    The ID of the instance to unlink from the VPC.

    ", + "DetachClassicLinkVpcRequest$VpcId": "

    The ID of the VPC to which the instance is linked.

    ", + "DetachInternetGatewayRequest$InternetGatewayId": "

    The ID of the Internet gateway.

    ", + "DetachInternetGatewayRequest$VpcId": "

    The ID of the VPC.

    ", + "DetachNetworkInterfaceRequest$AttachmentId": "

    The ID of the attachment.

    ", + "DetachVolumeRequest$VolumeId": "

    The ID of the volume.

    ", + "DetachVolumeRequest$InstanceId": "

    The ID of the instance.

    ", + "DetachVolumeRequest$Device": "

    The device name.

    ", + "DetachVpnGatewayRequest$VpnGatewayId": "

    The ID of the virtual private gateway.

    ", + "DetachVpnGatewayRequest$VpcId": "

    The ID of the VPC.

    ", + "DhcpConfiguration$Key": "

    The name of a DHCP option.

    ", + "DhcpOptions$DhcpOptionsId": "

    The ID of the set of DHCP options.

    ", + "DhcpOptionsIdStringList$member": null, + "DisableVgwRoutePropagationRequest$RouteTableId": "

    The ID of the route table.

    ", + "DisableVgwRoutePropagationRequest$GatewayId": "

    The ID of the virtual private gateway.

    ", + "DisableVpcClassicLinkRequest$VpcId": "

    The ID of the VPC.

    ", + "DisassociateAddressRequest$PublicIp": "

    [EC2-Classic] The Elastic IP address. Required for EC2-Classic.

    ", + "DisassociateAddressRequest$AssociationId": "

    [EC2-VPC] The association ID. Required for EC2-VPC.

    ", + "DisassociateRouteTableRequest$AssociationId": "

    The association ID representing the current association between the route table and subnet.

    ", + "DiskImage$Description": "

    A description of the disk image.

    ", + "DiskImageDescription$ImportManifestUrl": "

    A presigned URL for the import manifest stored in Amazon S3. For information about creating a presigned URL for an Amazon S3 object, read the \"Query String Request Authentication Alternative\" section of the Authenticating REST Requests topic in the Amazon Simple Storage Service Developer Guide.

    ", + "DiskImageDescription$Checksum": "

    The checksum computed for the disk image.

    ", + "DiskImageDetail$ImportManifestUrl": "

    A presigned URL for the import manifest stored in Amazon S3 and presented here as an Amazon S3 presigned URL. For information about creating a presigned URL for an Amazon S3 object, read the \"Query String Request Authentication Alternative\" section of the Authenticating REST Requests topic in the Amazon Simple Storage Service Developer Guide.

    ", + "DiskImageVolumeDescription$Id": "

    The volume identifier.

    ", + "EbsBlockDevice$SnapshotId": "

    The ID of the snapshot.

    ", + "EbsInstanceBlockDevice$VolumeId": "

    The ID of the EBS volume.

    ", + "EbsInstanceBlockDeviceSpecification$VolumeId": "

    The ID of the EBS volume.

    ", + "EnableVgwRoutePropagationRequest$RouteTableId": "

    The ID of the route table.

    ", + "EnableVgwRoutePropagationRequest$GatewayId": "

    The ID of the virtual private gateway.

    ", + "EnableVolumeIORequest$VolumeId": "

    The ID of the volume.

    ", + "EnableVpcClassicLinkRequest$VpcId": "

    The ID of the VPC.

    ", + "EventInformation$InstanceId": "

    The ID of the instance. This information is available only for instanceChange events.

    ", + "EventInformation$EventSubType": "

    The event.

    The following are the error events.

    • iamFleetRoleInvalid - Spot fleet did not have the required permissions either to launch or terminate an instance.

    • spotFleetRequestConfigurationInvalid - The configuration is not valid. For more information, see the description.

    • spotInstanceCountLimitExceeded - You've reached the limit on the number of Spot instances that you can launch.

    The following are the fleetRequestChange events.

    • active - The Spot fleet has been validated and Amazon EC2 is attempting to maintain the target number of running Spot instances.

    • cancelled - The Spot fleet is canceled and has no running Spot instances. The Spot fleet will be deleted two days after its instances were terminated.

    • cancelled_running - The Spot fleet is canceled and will not launch additional Spot instances, but its existing Spot instances continue to run until they are interrupted or terminated.

    • cancelled_terminating - The Spot fleet is canceled and its Spot instances are terminating.

    • expired - The Spot fleet request has expired. A subsequent event indicates that the instances were terminated, if the request was created with TerminateInstancesWithExpiration set.

    • price_update - The bid price for a launch configuration was adjusted because it was too high. This change is permanent.

    • submitted - The Spot fleet request is being evaluated and Amazon EC2 is preparing to launch the target number of Spot instances.

    The following are the instanceChange events.

    • launched - A bid was fulfilled and a new instance was launched.

    • terminated - An instance was terminated by the user.

    ", + "EventInformation$EventDescription": "

    The description of the event.

    ", + "ExecutableByStringList$member": null, + "ExportTask$ExportTaskId": "

    The ID of the export task.

    ", + "ExportTask$Description": "

    A description of the resource being exported.

    ", + "ExportTask$StatusMessage": "

    The status message related to the export task.

    ", + "ExportTaskIdStringList$member": null, + "ExportToS3Task$S3Bucket": "

    The S3 bucket for the destination image. The destination bucket must exist and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com.

    ", + "ExportToS3Task$S3Key": "

    The encryption key for your S3 bucket.

    ", + "ExportToS3TaskSpecification$S3Bucket": "

    The S3 bucket for the destination image. The destination bucket must exist and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com.

    ", + "ExportToS3TaskSpecification$S3Prefix": "

    The image is written to a single object in the S3 bucket at the S3 key s3prefix + exportTaskId + '.' + diskImageFormat.

    ", + "Filter$Name": "

    The name of the filter. Filter names are case-sensitive.

    ", + "FlowLog$FlowLogId": "

    The flow log ID.

    ", + "FlowLog$FlowLogStatus": "

    The status of the flow log (ACTIVE).

    ", + "FlowLog$ResourceId": "

    The ID of the resource on which the flow log was created.

    ", + "FlowLog$LogGroupName": "

    The name of the flow log group.

    ", + "FlowLog$DeliverLogsStatus": "

    The status of the logs delivery (SUCCESS | FAILED).

    ", + "FlowLog$DeliverLogsErrorMessage": "

    Information about the error that occurred. Rate limited indicates that CloudWatch logs throttling has been applied for one or more network interfaces. Access error indicates that the IAM role associated with the flow log does not have sufficient permissions to publish to CloudWatch Logs. Unknown error indicates an internal error.

    ", + "FlowLog$DeliverLogsPermissionArn": "

    The ARN of the IAM role that posts logs to CloudWatch Logs.

    ", + "GetConsoleOutputRequest$InstanceId": "

    The ID of the instance.

    ", + "GetConsoleOutputResult$InstanceId": "

    The ID of the instance.

    ", + "GetConsoleOutputResult$Output": "

    The console output, Base64 encoded.

    ", + "GetPasswordDataRequest$InstanceId": "

    The ID of the Windows instance.

    ", + "GetPasswordDataResult$InstanceId": "

    The ID of the Windows instance.

    ", + "GetPasswordDataResult$PasswordData": "

    The password of the instance.

    ", + "GroupIdStringList$member": null, + "GroupIdentifier$GroupName": "

    The name of the security group.

    ", + "GroupIdentifier$GroupId": "

    The ID of the security group.

    ", + "GroupNameStringList$member": null, + "IamInstanceProfile$Arn": "

    The Amazon Resource Name (ARN) of the instance profile.

    ", + "IamInstanceProfile$Id": "

    The ID of the instance profile.

    ", + "IamInstanceProfileSpecification$Arn": "

    The Amazon Resource Name (ARN) of the instance profile.

    ", + "IamInstanceProfileSpecification$Name": "

    The name of the instance profile.

    ", + "Image$ImageId": "

    The ID of the AMI.

    ", + "Image$ImageLocation": "

    The location of the AMI.

    ", + "Image$OwnerId": "

    The AWS account ID of the image owner.

    ", + "Image$CreationDate": "

    The date and time the image was created.

    ", + "Image$KernelId": "

    The kernel associated with the image, if any. Only applicable for machine images.

    ", + "Image$RamdiskId": "

    The RAM disk associated with the image, if any. Only applicable for machine images.

    ", + "Image$SriovNetSupport": "

    Specifies whether enhanced networking is enabled.

    ", + "Image$ImageOwnerAlias": "

    The AWS account alias (for example, amazon, self) or the AWS account ID of the AMI owner.

    ", + "Image$Name": "

    The name of the AMI that was provided during image creation.

    ", + "Image$Description": "

    The description of the AMI that was provided during image creation.

    ", + "Image$RootDeviceName": "

    The device name of the root device (for example, /dev/sda1 or /dev/xvda).

    ", + "ImageAttribute$ImageId": "

    The ID of the AMI.

    ", + "ImageDiskContainer$Description": "

    The description of the disk image.

    ", + "ImageDiskContainer$Format": "

    The format of the disk image being imported.

    Valid values: RAW | VHD | VMDK | OVA

    ", + "ImageDiskContainer$Url": "

    The URL to the Amazon S3-based disk image being imported. The URL can either be a https URL (https://..) or an Amazon S3 URL (s3://..)

    ", + "ImageDiskContainer$DeviceName": "

    The block device mapping for the disk.

    ", + "ImageDiskContainer$SnapshotId": "

    The ID of the EBS snapshot to be used for importing the snapshot.

    ", + "ImageIdStringList$member": null, + "ImportImageRequest$Description": "

    A description string for the import image task.

    ", + "ImportImageRequest$LicenseType": "

    The license type to be used for the Amazon Machine Image (AMI) after importing.

    Note: You may only use BYOL if you have existing licenses with rights to use these licenses in a third party cloud like AWS. For more information, see VM Import/Export Prerequisites in the Amazon Elastic Compute Cloud User Guide.

    Valid values: AWS | BYOL

    ", + "ImportImageRequest$Hypervisor": "

    The target hypervisor platform.

    Valid values: xen

    ", + "ImportImageRequest$Architecture": "

    The architecture of the virtual machine.

    Valid values: i386 | x86_64

    ", + "ImportImageRequest$Platform": "

    The operating system of the virtual machine.

    Valid values: Windows | Linux

    ", + "ImportImageRequest$ClientToken": "

    The token to enable idempotency for VM import requests.

    ", + "ImportImageRequest$RoleName": "

    The name of the role to use when not using the default role, 'vmimport'.

    ", + "ImportImageResult$ImportTaskId": "

    The task ID of the import image task.

    ", + "ImportImageResult$Architecture": "

    The architecture of the virtual machine.

    ", + "ImportImageResult$LicenseType": "

    The license type of the virtual machine.

    ", + "ImportImageResult$Platform": "

    The operating system of the virtual machine.

    ", + "ImportImageResult$Hypervisor": "

    The target hypervisor of the import task.

    ", + "ImportImageResult$Description": "

    A description of the import task.

    ", + "ImportImageResult$ImageId": "

    The ID of the Amazon Machine Image (AMI) created by the import task.

    ", + "ImportImageResult$Progress": "

    The progress of the task.

    ", + "ImportImageResult$StatusMessage": "

    A detailed status message of the import task.

    ", + "ImportImageResult$Status": "

    A brief status of the task.

    ", + "ImportImageTask$ImportTaskId": "

    The ID of the import image task.

    ", + "ImportImageTask$Architecture": "

    The architecture of the virtual machine.

    Valid values: i386 | x86_64

    ", + "ImportImageTask$LicenseType": "

    The license type of the virtual machine.

    ", + "ImportImageTask$Platform": "

    The description string for the import image task.

    ", + "ImportImageTask$Hypervisor": "

    The target hypervisor for the import task.

    Valid values: xen

    ", + "ImportImageTask$Description": "

    A description of the import task.

    ", + "ImportImageTask$ImageId": "

    The ID of the Amazon Machine Image (AMI) of the imported virtual machine.

    ", + "ImportImageTask$Progress": "

    The percentage of progress of the import image task.

    ", + "ImportImageTask$StatusMessage": "

    A descriptive status message for the import image task.

    ", + "ImportImageTask$Status": "

    A brief status for the import image task.

    ", + "ImportInstanceLaunchSpecification$AdditionalInfo": "

    Reserved.

    ", + "ImportInstanceLaunchSpecification$SubnetId": "

    [EC2-VPC] The ID of the subnet in which to launch the instance.

    ", + "ImportInstanceLaunchSpecification$PrivateIpAddress": "

    [EC2-VPC] An available IP address from the IP address range of the subnet.

    ", + "ImportInstanceRequest$Description": "

    A description for the instance being imported.

    ", + "ImportInstanceTaskDetails$InstanceId": "

    The ID of the instance.

    ", + "ImportInstanceTaskDetails$Description": "

    A description of the task.

    ", + "ImportInstanceVolumeDetailItem$AvailabilityZone": "

    The Availability Zone where the resulting instance will reside.

    ", + "ImportInstanceVolumeDetailItem$Status": "

    The status of the import of this particular disk image.

    ", + "ImportInstanceVolumeDetailItem$StatusMessage": "

    The status information or errors related to the disk image.

    ", + "ImportInstanceVolumeDetailItem$Description": "

    A description of the task.

    ", + "ImportKeyPairRequest$KeyName": "

    A unique name for the key pair.

    ", + "ImportKeyPairResult$KeyName": "

    The key pair name you provided.

    ", + "ImportKeyPairResult$KeyFingerprint": "

    The MD5 public key fingerprint as specified in section 4 of RFC 4716.

    ", + "ImportSnapshotRequest$Description": "

    The description string for the import snapshot task.

    ", + "ImportSnapshotRequest$ClientToken": "

    Token to enable idempotency for VM import requests.

    ", + "ImportSnapshotRequest$RoleName": "

    The name of the role to use when not using the default role, 'vmimport'.

    ", + "ImportSnapshotResult$ImportTaskId": "

    The ID of the import snapshot task.

    ", + "ImportSnapshotResult$Description": "

    A description of the import snapshot task.

    ", + "ImportSnapshotTask$ImportTaskId": "

    The ID of the import snapshot task.

    ", + "ImportSnapshotTask$Description": "

    A description of the import snapshot task.

    ", + "ImportTaskIdList$member": null, + "ImportVolumeRequest$AvailabilityZone": "

    The Availability Zone for the resulting EBS volume.

    ", + "ImportVolumeRequest$Description": "

    A description of the volume.

    ", + "ImportVolumeTaskDetails$AvailabilityZone": "

    The Availability Zone where the resulting volume will reside.

    ", + "ImportVolumeTaskDetails$Description": "

    The description you provided when starting the import volume task.

    ", + "Instance$InstanceId": "

    The ID of the instance.

    ", + "Instance$ImageId": "

    The ID of the AMI used to launch the instance.

    ", + "Instance$PrivateDnsName": "

    The private DNS name assigned to the instance. This DNS name can only be used inside the Amazon EC2 network. This name is not available until the instance enters the running state.

    ", + "Instance$PublicDnsName": "

    The public DNS name assigned to the instance. This name is not available until the instance enters the running state.

    ", + "Instance$StateTransitionReason": "

    The reason for the most recent state transition. This might be an empty string.

    ", + "Instance$KeyName": "

    The name of the key pair, if this instance was launched with an associated key pair.

    ", + "Instance$KernelId": "

    The kernel associated with this instance.

    ", + "Instance$RamdiskId": "

    The RAM disk associated with this instance.

    ", + "Instance$SubnetId": "

    The ID of the subnet in which the instance is running.

    ", + "Instance$VpcId": "

    The ID of the VPC in which the instance is running.

    ", + "Instance$PrivateIpAddress": "

    The private IP address assigned to the instance.

    ", + "Instance$PublicIpAddress": "

    The public IP address assigned to the instance.

    ", + "Instance$RootDeviceName": "

    The root device name (for example, /dev/sda1 or /dev/xvda).

    ", + "Instance$SpotInstanceRequestId": "

    The ID of the Spot Instance request.

    ", + "Instance$ClientToken": "

    The idempotency token you provided when you launched the instance.

    ", + "Instance$SriovNetSupport": "

    Specifies whether enhanced networking is enabled.

    ", + "InstanceAttribute$InstanceId": "

    The ID of the instance.

    ", + "InstanceBlockDeviceMapping$DeviceName": "

    The device name exposed to the instance (for example, /dev/sdh or xvdh).

    ", + "InstanceBlockDeviceMappingSpecification$DeviceName": "

    The device name exposed to the instance (for example, /dev/sdh or xvdh).

    ", + "InstanceBlockDeviceMappingSpecification$VirtualName": "

    The virtual device name.

    ", + "InstanceBlockDeviceMappingSpecification$NoDevice": "

    suppress the specified device included in the block device mapping.

    ", + "InstanceExportDetails$InstanceId": "

    The ID of the resource being exported.

    ", + "InstanceIdStringList$member": null, + "InstanceMonitoring$InstanceId": "

    The ID of the instance.

    ", + "InstanceNetworkInterface$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "InstanceNetworkInterface$SubnetId": "

    The ID of the subnet.

    ", + "InstanceNetworkInterface$VpcId": "

    The ID of the VPC.

    ", + "InstanceNetworkInterface$Description": "

    The description.

    ", + "InstanceNetworkInterface$OwnerId": "

    The ID of the AWS account that created the network interface.

    ", + "InstanceNetworkInterface$MacAddress": "

    The MAC address.

    ", + "InstanceNetworkInterface$PrivateIpAddress": "

    The IP address of the network interface within the subnet.

    ", + "InstanceNetworkInterface$PrivateDnsName": "

    The private DNS name.

    ", + "InstanceNetworkInterfaceAssociation$PublicIp": "

    The public IP address or Elastic IP address bound to the network interface.

    ", + "InstanceNetworkInterfaceAssociation$PublicDnsName": "

    The public DNS name.

    ", + "InstanceNetworkInterfaceAssociation$IpOwnerId": "

    The ID of the owner of the Elastic IP address.

    ", + "InstanceNetworkInterfaceAttachment$AttachmentId": "

    The ID of the network interface attachment.

    ", + "InstanceNetworkInterfaceSpecification$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "InstanceNetworkInterfaceSpecification$SubnetId": "

    The ID of the subnet associated with the network string. Applies only if creating a network interface when launching an instance.

    ", + "InstanceNetworkInterfaceSpecification$Description": "

    The description of the network interface. Applies only if creating a network interface when launching an instance.

    ", + "InstanceNetworkInterfaceSpecification$PrivateIpAddress": "

    The private IP address of the network interface. Applies only if creating a network interface when launching an instance.

    ", + "InstancePrivateIpAddress$PrivateIpAddress": "

    The private IP address of the network interface.

    ", + "InstancePrivateIpAddress$PrivateDnsName": "

    The private DNS name.

    ", + "InstanceStateChange$InstanceId": "

    The ID of the instance.

    ", + "InstanceStatus$InstanceId": "

    The ID of the instance.

    ", + "InstanceStatus$AvailabilityZone": "

    The Availability Zone of the instance.

    ", + "InstanceStatusEvent$Description": "

    A description of the event.

    After a scheduled event is completed, it can still be described for up to a week. If the event has been completed, this description starts with the following text: [Completed].

    ", + "InternetGateway$InternetGatewayId": "

    The ID of the Internet gateway.

    ", + "InternetGatewayAttachment$VpcId": "

    The ID of the VPC.

    ", + "IpPermission$IpProtocol": "

    The protocol.

    When you call DescribeSecurityGroups, the protocol value returned is the number. Exception: For TCP, UDP, and ICMP, the value returned is the name (for example, tcp, udp, or icmp). For a list of protocol numbers, see Protocol Numbers. (VPC only) When you call AuthorizeSecurityGroupIngress, you can use -1 to specify all.

    ", + "IpRange$CidrIp": "

    The CIDR range. You can either specify a CIDR range or a source security group, not both.

    ", + "KeyNameStringList$member": null, + "KeyPair$KeyName": "

    The name of the key pair.

    ", + "KeyPair$KeyFingerprint": "

    The SHA-1 digest of the DER encoded private key.

    ", + "KeyPair$KeyMaterial": "

    An unencrypted PEM encoded RSA private key.

    ", + "KeyPairInfo$KeyName": "

    The name of the key pair.

    ", + "KeyPairInfo$KeyFingerprint": "

    If you used CreateKeyPair to create the key pair, this is the SHA-1 digest of the DER encoded private key. If you used ImportKeyPair to provide AWS the public key, this is the MD5 public key fingerprint as specified in section 4 of RFC4716.

    ", + "LaunchPermission$UserId": "

    The AWS account ID.

    ", + "LaunchSpecification$ImageId": "

    The ID of the AMI.

    ", + "LaunchSpecification$KeyName": "

    The name of the key pair.

    ", + "LaunchSpecification$UserData": "

    The Base64-encoded MIME user data to make available to the instances.

    ", + "LaunchSpecification$AddressingType": "

    Deprecated.

    ", + "LaunchSpecification$KernelId": "

    The ID of the kernel.

    ", + "LaunchSpecification$RamdiskId": "

    The ID of the RAM disk.

    ", + "LaunchSpecification$SubnetId": "

    The ID of the subnet in which to launch the instance.

    ", + "ModifyImageAttributeRequest$ImageId": "

    The ID of the AMI.

    ", + "ModifyImageAttributeRequest$Attribute": "

    The name of the attribute to modify.

    ", + "ModifyImageAttributeRequest$Value": "

    The value of the attribute being modified. This is only valid when modifying the description attribute.

    ", + "ModifyInstanceAttributeRequest$InstanceId": "

    The ID of the instance.

    ", + "ModifyInstanceAttributeRequest$Value": "

    A new value for the attribute. Use only with the kernel, ramdisk, userData, disableApiTermination, or instanceInitiatedShutdownBehavior attribute.

    ", + "ModifyNetworkInterfaceAttributeRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "ModifyReservedInstancesRequest$ClientToken": "

    A unique, case-sensitive token you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

    ", + "ModifyReservedInstancesResult$ReservedInstancesModificationId": "

    The ID for the modification.

    ", + "ModifySnapshotAttributeRequest$SnapshotId": "

    The ID of the snapshot.

    ", + "ModifySubnetAttributeRequest$SubnetId": "

    The ID of the subnet.

    ", + "ModifyVolumeAttributeRequest$VolumeId": "

    The ID of the volume.

    ", + "ModifyVpcAttributeRequest$VpcId": "

    The ID of the VPC.

    ", + "ModifyVpcEndpointRequest$VpcEndpointId": "

    The ID of the endpoint.

    ", + "ModifyVpcEndpointRequest$PolicyDocument": "

    A policy document to attach to the endpoint. The policy must be in valid JSON format.

    ", + "MoveAddressToVpcRequest$PublicIp": "

    The Elastic IP address.

    ", + "MoveAddressToVpcResult$AllocationId": "

    The allocation ID for the Elastic IP address.

    ", + "MovingAddressStatus$PublicIp": "

    The Elastic IP address.

    ", + "NetworkAcl$NetworkAclId": "

    The ID of the network ACL.

    ", + "NetworkAcl$VpcId": "

    The ID of the VPC for the network ACL.

    ", + "NetworkAclAssociation$NetworkAclAssociationId": "

    The ID of the association between a network ACL and a subnet.

    ", + "NetworkAclAssociation$NetworkAclId": "

    The ID of the network ACL.

    ", + "NetworkAclAssociation$SubnetId": "

    The ID of the subnet.

    ", + "NetworkAclEntry$Protocol": "

    The protocol. A value of -1 means all protocols.

    ", + "NetworkAclEntry$CidrBlock": "

    The network range to allow or deny, in CIDR notation.

    ", + "NetworkInterface$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "NetworkInterface$SubnetId": "

    The ID of the subnet.

    ", + "NetworkInterface$VpcId": "

    The ID of the VPC.

    ", + "NetworkInterface$AvailabilityZone": "

    The Availability Zone.

    ", + "NetworkInterface$Description": "

    A description.

    ", + "NetworkInterface$OwnerId": "

    The AWS account ID of the owner of the network interface.

    ", + "NetworkInterface$RequesterId": "

    The ID of the entity that launched the instance on your behalf (for example, AWS Management Console or Auto Scaling).

    ", + "NetworkInterface$MacAddress": "

    The MAC address.

    ", + "NetworkInterface$PrivateIpAddress": "

    The IP address of the network interface within the subnet.

    ", + "NetworkInterface$PrivateDnsName": "

    The private DNS name.

    ", + "NetworkInterfaceAssociation$PublicIp": "

    The address of the Elastic IP address bound to the network interface.

    ", + "NetworkInterfaceAssociation$PublicDnsName": "

    The public DNS name.

    ", + "NetworkInterfaceAssociation$IpOwnerId": "

    The ID of the Elastic IP address owner.

    ", + "NetworkInterfaceAssociation$AllocationId": "

    The allocation ID.

    ", + "NetworkInterfaceAssociation$AssociationId": "

    The association ID.

    ", + "NetworkInterfaceAttachment$AttachmentId": "

    The ID of the network interface attachment.

    ", + "NetworkInterfaceAttachment$InstanceId": "

    The ID of the instance.

    ", + "NetworkInterfaceAttachment$InstanceOwnerId": "

    The AWS account ID of the owner of the instance.

    ", + "NetworkInterfaceAttachmentChanges$AttachmentId": "

    The ID of the network interface attachment.

    ", + "NetworkInterfaceIdList$member": null, + "NetworkInterfacePrivateIpAddress$PrivateIpAddress": "

    The private IP address.

    ", + "NetworkInterfacePrivateIpAddress$PrivateDnsName": "

    The private DNS name.

    ", + "OwnerStringList$member": null, + "Placement$AvailabilityZone": "

    The Availability Zone of the instance.

    ", + "Placement$GroupName": "

    The name of the placement group the instance is in (for cluster compute instances).

    ", + "PlacementGroup$GroupName": "

    The name of the placement group.

    ", + "PlacementGroupStringList$member": null, + "PrefixList$PrefixListId": "

    The ID of the prefix.

    ", + "PrefixList$PrefixListName": "

    The name of the prefix.

    ", + "PrefixListId$PrefixListId": "

    The ID of the prefix.

    ", + "PrivateIpAddressSpecification$PrivateIpAddress": "

    The private IP addresses.

    ", + "PrivateIpAddressStringList$member": null, + "ProductCode$ProductCodeId": "

    The product code.

    ", + "ProductCodeStringList$member": null, + "ProductDescriptionList$member": null, + "PropagatingVgw$GatewayId": "

    The ID of the virtual private gateway (VGW).

    ", + "PublicIpStringList$member": null, + "PurchaseReservedInstancesOfferingRequest$ReservedInstancesOfferingId": "

    The ID of the Reserved Instance offering to purchase.

    ", + "PurchaseReservedInstancesOfferingResult$ReservedInstancesId": "

    The IDs of the purchased Reserved Instances.

    ", + "Region$RegionName": "

    The name of the region.

    ", + "Region$Endpoint": "

    The region service endpoint.

    ", + "RegionNameStringList$member": null, + "RegisterImageRequest$ImageLocation": "

    The full path to your AMI manifest in Amazon S3 storage.

    ", + "RegisterImageRequest$Name": "

    A name for your AMI.

    Constraints: 3-128 alphanumeric characters, parentheses (()), square brackets ([]), spaces ( ), periods (.), slashes (/), dashes (-), single quotes ('), at-signs (@), or underscores(_)

    ", + "RegisterImageRequest$Description": "

    A description for your AMI.

    ", + "RegisterImageRequest$KernelId": "

    The ID of the kernel.

    ", + "RegisterImageRequest$RamdiskId": "

    The ID of the RAM disk.

    ", + "RegisterImageRequest$RootDeviceName": "

    The name of the root device (for example, /dev/sda1, or /dev/xvda).

    ", + "RegisterImageRequest$VirtualizationType": "

    The type of virtualization.

    Default: paravirtual

    ", + "RegisterImageRequest$SriovNetSupport": "

    Set to simple to enable enhanced networking for the AMI and any instances that you launch from the AMI.

    There is no way to disable enhanced networking at this time.

    This option is supported only for HVM AMIs. Specifying this option with a PV AMI can make instances launched from the AMI unreachable.

    ", + "RegisterImageResult$ImageId": "

    The ID of the newly registered AMI.

    ", + "RejectVpcPeeringConnectionRequest$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "ReleaseAddressRequest$PublicIp": "

    [EC2-Classic] The Elastic IP address. Required for EC2-Classic.

    ", + "ReleaseAddressRequest$AllocationId": "

    [EC2-VPC] The allocation ID. Required for EC2-VPC.

    ", + "ReplaceNetworkAclAssociationRequest$AssociationId": "

    The ID of the current association between the original network ACL and the subnet.

    ", + "ReplaceNetworkAclAssociationRequest$NetworkAclId": "

    The ID of the new network ACL to associate with the subnet.

    ", + "ReplaceNetworkAclAssociationResult$NewAssociationId": "

    The ID of the new association.

    ", + "ReplaceNetworkAclEntryRequest$NetworkAclId": "

    The ID of the ACL.

    ", + "ReplaceNetworkAclEntryRequest$Protocol": "

    The IP protocol. You can specify all or -1 to mean all protocols.

    ", + "ReplaceNetworkAclEntryRequest$CidrBlock": "

    The network range to allow or deny, in CIDR notation.

    ", + "ReplaceRouteRequest$RouteTableId": "

    The ID of the route table.

    ", + "ReplaceRouteRequest$DestinationCidrBlock": "

    The CIDR address block used for the destination match. The value you provide must match the CIDR of an existing route in the table.

    ", + "ReplaceRouteRequest$GatewayId": "

    The ID of an Internet gateway or virtual private gateway.

    ", + "ReplaceRouteRequest$InstanceId": "

    The ID of a NAT instance in your VPC.

    ", + "ReplaceRouteRequest$NetworkInterfaceId": "

    The ID of a network interface.

    ", + "ReplaceRouteRequest$VpcPeeringConnectionId": "

    The ID of a VPC peering connection.

    ", + "ReplaceRouteTableAssociationRequest$AssociationId": "

    The association ID.

    ", + "ReplaceRouteTableAssociationRequest$RouteTableId": "

    The ID of the new route table to associate with the subnet.

    ", + "ReplaceRouteTableAssociationResult$NewAssociationId": "

    The ID of the new association.

    ", + "ReportInstanceStatusRequest$Description": "

    Descriptive text about the health state of your instance.

    ", + "RequestSpotFleetResponse$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "RequestSpotInstancesRequest$SpotPrice": "

    The maximum hourly price (bid) for any Spot instance launched to fulfill the request.

    ", + "RequestSpotInstancesRequest$ClientToken": "

    Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

    ", + "RequestSpotInstancesRequest$LaunchGroup": "

    The instance launch group. Launch groups are Spot instances that launch together and terminate together.

    Default: Instances are launched and terminated individually

    ", + "RequestSpotInstancesRequest$AvailabilityZoneGroup": "

    The user-specified name for a logical grouping of bids.

    When you specify an Availability Zone group in a Spot Instance request, all Spot instances in the request are launched in the same Availability Zone. Instance proximity is maintained with this parameter, but the choice of Availability Zone is not. The group applies only to bids for Spot Instances of the same instance type. Any additional Spot instance requests that are specified with the same Availability Zone group name are launched in that same Availability Zone, as long as at least one instance from the group is still active.

    If there is no active instance running in the Availability Zone group that you specify for a new Spot instance request (all instances are terminated, the bid is expired, or the bid falls below current market), then Amazon EC2 launches the instance in any Availability Zone where the constraint can be met. Consequently, the subsequent set of Spot instances could be placed in a different zone from the original request, even if you specified the same Availability Zone group.

    Default: Instances are launched in any available Availability Zone.

    ", + "Reservation$ReservationId": "

    The ID of the reservation.

    ", + "Reservation$OwnerId": "

    The ID of the AWS account that owns the reservation.

    ", + "Reservation$RequesterId": "

    The ID of the requester that launched the instances on your behalf (for example, AWS Management Console or Auto Scaling).

    ", + "ReservedInstances$ReservedInstancesId": "

    The ID of the Reserved Instance.

    ", + "ReservedInstances$AvailabilityZone": "

    The Availability Zone in which the Reserved Instance can be used.

    ", + "ReservedInstancesConfiguration$AvailabilityZone": "

    The Availability Zone for the modified Reserved Instances.

    ", + "ReservedInstancesConfiguration$Platform": "

    The network platform of the modified Reserved Instances, which is either EC2-Classic or EC2-VPC.

    ", + "ReservedInstancesId$ReservedInstancesId": "

    The ID of the Reserved Instance.

    ", + "ReservedInstancesIdStringList$member": null, + "ReservedInstancesListing$ReservedInstancesListingId": "

    The ID of the Reserved Instance listing.

    ", + "ReservedInstancesListing$ReservedInstancesId": "

    The ID of the Reserved Instance.

    ", + "ReservedInstancesListing$StatusMessage": "

    The reason for the current status of the Reserved Instance listing. The response can be blank.

    ", + "ReservedInstancesListing$ClientToken": "

    A unique, case-sensitive key supplied by the client to ensure that the request is idempotent. For more information, see Ensuring Idempotency.

    ", + "ReservedInstancesModification$ReservedInstancesModificationId": "

    A unique ID for the Reserved Instance modification.

    ", + "ReservedInstancesModification$Status": "

    The status of the Reserved Instances modification request.

    ", + "ReservedInstancesModification$StatusMessage": "

    The reason for the status.

    ", + "ReservedInstancesModification$ClientToken": "

    A unique, case-sensitive key supplied by the client to ensure that the request is idempotent. For more information, see Ensuring Idempotency.

    ", + "ReservedInstancesModificationIdStringList$member": null, + "ReservedInstancesModificationResult$ReservedInstancesId": "

    The ID for the Reserved Instances that were created as part of the modification request. This field is only available when the modification is fulfilled.

    ", + "ReservedInstancesOffering$ReservedInstancesOfferingId": "

    The ID of the Reserved Instance offering.

    ", + "ReservedInstancesOffering$AvailabilityZone": "

    The Availability Zone in which the Reserved Instance can be used.

    ", + "ReservedInstancesOfferingIdStringList$member": null, + "ResetImageAttributeRequest$ImageId": "

    The ID of the AMI.

    ", + "ResetInstanceAttributeRequest$InstanceId": "

    The ID of the instance.

    ", + "ResetNetworkInterfaceAttributeRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "ResetNetworkInterfaceAttributeRequest$SourceDestCheck": "

    The source/destination checking attribute. Resets the value to true.

    ", + "ResetSnapshotAttributeRequest$SnapshotId": "

    The ID of the snapshot.

    ", + "ResourceIdList$member": null, + "RestorableByStringList$member": null, + "RestoreAddressToClassicRequest$PublicIp": "

    The Elastic IP address.

    ", + "RestoreAddressToClassicResult$PublicIp": "

    The Elastic IP address.

    ", + "RevokeSecurityGroupEgressRequest$GroupId": "

    The ID of the security group.

    ", + "RevokeSecurityGroupEgressRequest$SourceSecurityGroupName": "

    The name of a destination security group. To revoke outbound access to a destination security group, we recommend that you use a set of IP permissions instead.

    ", + "RevokeSecurityGroupEgressRequest$SourceSecurityGroupOwnerId": "

    The AWS account number for a destination security group. To revoke outbound access to a destination security group, we recommend that you use a set of IP permissions instead.

    ", + "RevokeSecurityGroupEgressRequest$IpProtocol": "

    The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers). Use -1 to specify all.

    ", + "RevokeSecurityGroupEgressRequest$CidrIp": "

    The CIDR IP address range. You can't specify this parameter when specifying a source security group.

    ", + "RevokeSecurityGroupIngressRequest$GroupName": "

    [EC2-Classic, default VPC] The name of the security group.

    ", + "RevokeSecurityGroupIngressRequest$GroupId": "

    The ID of the security group. Required for a security group in a nondefault VPC.

    ", + "RevokeSecurityGroupIngressRequest$SourceSecurityGroupName": "

    [EC2-Classic, default VPC] The name of the source security group. You can't specify this parameter in combination with the following parameters: the CIDR IP address range, the start of the port range, the IP protocol, and the end of the port range. For EC2-VPC, the source security group must be in the same VPC.

    ", + "RevokeSecurityGroupIngressRequest$SourceSecurityGroupOwnerId": "

    [EC2-Classic, default VPC] The AWS account ID of the source security group. For EC2-VPC, the source security group must be in the same VPC. You can't specify this parameter in combination with the following parameters: the CIDR IP address range, the IP protocol, the start of the port range, and the end of the port range. To revoke a specific rule for an IP protocol and port range, use a set of IP permissions instead.

    ", + "RevokeSecurityGroupIngressRequest$IpProtocol": "

    The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers). Use -1 to specify all.

    ", + "RevokeSecurityGroupIngressRequest$CidrIp": "

    The CIDR IP address range. You can't specify this parameter when specifying a source security group.

    ", + "Route$DestinationCidrBlock": "

    The CIDR block used for the destination match.

    ", + "Route$DestinationPrefixListId": "

    The prefix of the AWS service.

    ", + "Route$GatewayId": "

    The ID of a gateway attached to your VPC.

    ", + "Route$InstanceId": "

    The ID of a NAT instance in your VPC.

    ", + "Route$InstanceOwnerId": "

    The AWS account ID of the owner of the instance.

    ", + "Route$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "Route$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "RouteTable$RouteTableId": "

    The ID of the route table.

    ", + "RouteTable$VpcId": "

    The ID of the VPC.

    ", + "RouteTableAssociation$RouteTableAssociationId": "

    The ID of the association between a route table and a subnet.

    ", + "RouteTableAssociation$RouteTableId": "

    The ID of the route table.

    ", + "RouteTableAssociation$SubnetId": "

    The ID of the subnet. A subnet ID is not returned for an implicit association.

    ", + "RunInstancesRequest$ImageId": "

    The ID of the AMI, which you can get by calling DescribeImages.

    ", + "RunInstancesRequest$KeyName": "

    The name of the key pair. You can create a key pair using CreateKeyPair or ImportKeyPair.

    If you do not specify a key pair, you can't connect to the instance unless you choose an AMI that is configured to allow users another way to log in.

    ", + "RunInstancesRequest$UserData": "

    The Base64-encoded MIME user data for the instances.

    ", + "RunInstancesRequest$KernelId": "

    The ID of the kernel.

    We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB in the Amazon Elastic Compute Cloud User Guide.

    ", + "RunInstancesRequest$RamdiskId": "

    The ID of the RAM disk.

    We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB in the Amazon Elastic Compute Cloud User Guide.

    ", + "RunInstancesRequest$SubnetId": "

    [EC2-VPC] The ID of the subnet to launch the instance into.

    ", + "RunInstancesRequest$PrivateIpAddress": "

    [EC2-VPC] The primary IP address. You must specify a value from the IP address range of the subnet.

    Only one private IP address can be designated as primary. Therefore, you can't specify this parameter if PrivateIpAddresses.n.Primary is set to true and PrivateIpAddresses.n.PrivateIpAddress is set to an IP address.

    Default: We select an IP address from the IP address range of the subnet.

    ", + "RunInstancesRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

    Constraints: Maximum 64 ASCII characters

    ", + "RunInstancesRequest$AdditionalInfo": "

    Reserved.

    ", + "S3Storage$Bucket": "

    The bucket in which to store the AMI. You can specify a bucket that you already own or a new bucket that Amazon EC2 creates on your behalf. If you specify a bucket that belongs to someone else, Amazon EC2 returns an error.

    ", + "S3Storage$Prefix": "

    The beginning of the file name of the AMI.

    ", + "S3Storage$AWSAccessKeyId": "

    The access key ID of the owner of the bucket. Before you specify a value for your access key ID, review and follow the guidance in Best Practices for Managing AWS Access Keys.

    ", + "S3Storage$UploadPolicySignature": "

    The signature of the Base64 encoded JSON document.

    ", + "SecurityGroup$OwnerId": "

    The AWS account ID of the owner of the security group.

    ", + "SecurityGroup$GroupName": "

    The name of the security group.

    ", + "SecurityGroup$GroupId": "

    The ID of the security group.

    ", + "SecurityGroup$Description": "

    A description of the security group.

    ", + "SecurityGroup$VpcId": "

    [EC2-VPC] The ID of the VPC for the security group.

    ", + "SecurityGroupIdStringList$member": null, + "SecurityGroupStringList$member": null, + "Snapshot$SnapshotId": "

    The ID of the snapshot. Each snapshot receives a unique identifier when it is created.

    ", + "Snapshot$VolumeId": "

    The ID of the volume that was used to create the snapshot.

    ", + "Snapshot$StateMessage": "

    Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the error occurred. This parameter is only returned by the DescribeSnapshots API operation.

    ", + "Snapshot$Progress": "

    The progress of the snapshot, as a percentage.

    ", + "Snapshot$OwnerId": "

    The AWS account ID of the EBS snapshot owner.

    ", + "Snapshot$Description": "

    The description for the snapshot.

    ", + "Snapshot$OwnerAlias": "

    The AWS account alias (for example, amazon, self) or AWS account ID that owns the snapshot.

    ", + "Snapshot$KmsKeyId": "

    The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to protect the volume encryption key for the parent volume.

    ", + "Snapshot$DataEncryptionKeyId": "

    The data encryption key identifier for the snapshot. This value is a unique identifier that corresponds to the data encryption key that was used to encrypt the original volume or snapshot copy. Because data encryption keys are inherited by volumes created from snapshots, and vice versa, if snapshots share the same data encryption key identifier, then they belong to the same volume/snapshot lineage. This parameter is only returned by the DescribeSnapshots API operation.

    ", + "SnapshotDetail$Description": "

    A description for the snapshot.

    ", + "SnapshotDetail$Format": "

    The format of the disk image from which the snapshot is created.

    ", + "SnapshotDetail$Url": "

    The URL used to access the disk image.

    ", + "SnapshotDetail$DeviceName": "

    The block device mapping for the snapshot.

    ", + "SnapshotDetail$SnapshotId": "

    The snapshot ID of the disk being imported.

    ", + "SnapshotDetail$Progress": "

    The percentage of progress for the task.

    ", + "SnapshotDetail$StatusMessage": "

    A detailed status message for the snapshot creation.

    ", + "SnapshotDetail$Status": "

    A brief status of the snapshot creation.

    ", + "SnapshotDiskContainer$Description": "

    The description of the disk image being imported.

    ", + "SnapshotDiskContainer$Format": "

    The format of the disk image being imported.

    Valid values: RAW | VHD | VMDK | OVA

    ", + "SnapshotDiskContainer$Url": "

    The URL to the Amazon S3-based disk image being imported. It can either be a https URL (https://..) or an Amazon S3 URL (s3://..).

    ", + "SnapshotIdStringList$member": null, + "SnapshotTaskDetail$Description": "

    The description of the snapshot.

    ", + "SnapshotTaskDetail$Format": "

    The format of the disk image from which the snapshot is created.

    ", + "SnapshotTaskDetail$Url": "

    The URL of the disk image from which the snapshot is created.

    ", + "SnapshotTaskDetail$SnapshotId": "

    The snapshot ID of the disk being imported.

    ", + "SnapshotTaskDetail$Progress": "

    The percentage of completion for the import snapshot task.

    ", + "SnapshotTaskDetail$StatusMessage": "

    A detailed status message for the import snapshot task.

    ", + "SnapshotTaskDetail$Status": "

    A brief status for the import snapshot task.

    ", + "SpotDatafeedSubscription$OwnerId": "

    The AWS account ID of the account.

    ", + "SpotDatafeedSubscription$Bucket": "

    The Amazon S3 bucket where the Spot instance data feed is located.

    ", + "SpotDatafeedSubscription$Prefix": "

    The prefix that is prepended to data feed files.

    ", + "SpotFleetLaunchSpecification$ImageId": "

    The ID of the AMI.

    ", + "SpotFleetLaunchSpecification$KeyName": "

    The name of the key pair.

    ", + "SpotFleetLaunchSpecification$UserData": "

    The Base64-encoded MIME user data to make available to the instances.

    ", + "SpotFleetLaunchSpecification$AddressingType": "

    Deprecated.

    ", + "SpotFleetLaunchSpecification$KernelId": "

    The ID of the kernel.

    ", + "SpotFleetLaunchSpecification$RamdiskId": "

    The ID of the RAM disk.

    ", + "SpotFleetLaunchSpecification$SubnetId": "

    The ID of the subnet in which to launch the instances.

    ", + "SpotFleetLaunchSpecification$SpotPrice": "

    The bid price per unit hour for the specified instance type. If this value is not specified, the default is the Spot bid price specified for the fleet. To determine the bid price per unit hour, divide the Spot bid price by the value of WeightedCapacity.

    ", + "SpotFleetRequestConfig$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "SpotFleetRequestConfigData$ClientToken": "

    A unique, case-sensitive identifier you provide to ensure idempotency of your listings. This helps avoid duplicate listings. For more information, see Ensuring Idempotency.

    ", + "SpotFleetRequestConfigData$SpotPrice": "

    The bid price per unit hour.

    ", + "SpotFleetRequestConfigData$IamFleetRole": "

    Grants the Spot fleet permission to terminate Spot instances on your behalf when you cancel its Spot fleet request using CancelSpotFleetRequests or when the Spot fleet request expires, if you set terminateInstancesWithExpiration.

    ", + "SpotInstanceRequest$SpotInstanceRequestId": "

    The ID of the Spot instance request.

    ", + "SpotInstanceRequest$SpotPrice": "

    The maximum hourly price (bid) for any Spot instance launched to fulfill the request.

    ", + "SpotInstanceRequest$LaunchGroup": "

    The instance launch group. Launch groups are Spot instances that launch together and terminate together.

    ", + "SpotInstanceRequest$AvailabilityZoneGroup": "

    The Availability Zone group. If you specify the same Availability Zone group for all Spot instance requests, all Spot instances are launched in the same Availability Zone.

    ", + "SpotInstanceRequest$InstanceId": "

    The instance ID, if an instance has been launched to fulfill the Spot instance request.

    ", + "SpotInstanceRequest$LaunchedAvailabilityZone": "

    The Availability Zone in which the bid is launched.

    ", + "SpotInstanceRequestIdList$member": null, + "SpotInstanceStateFault$Code": "

    The reason code for the Spot instance state change.

    ", + "SpotInstanceStateFault$Message": "

    The message for the Spot instance state change.

    ", + "SpotInstanceStatus$Code": "

    The status code.

    ", + "SpotInstanceStatus$Message": "

    The description for the status code.

    ", + "SpotPlacement$AvailabilityZone": "

    The Availability Zone.

    ", + "SpotPlacement$GroupName": "

    The name of the placement group (for cluster instances).

    ", + "SpotPrice$SpotPrice": "

    The maximum price (bid) that you are willing to pay for a Spot instance.

    ", + "SpotPrice$AvailabilityZone": "

    The Availability Zone.

    ", + "StartInstancesRequest$AdditionalInfo": "

    Reserved.

    ", + "StateReason$Code": "

    The reason code for the state change.

    ", + "StateReason$Message": "

    The message for the state change.

    • Server.SpotInstanceTermination: A Spot Instance was terminated due to an increase in the market price.

    • Server.InternalError: An internal error occurred during instance launch, resulting in termination.

    • Server.InsufficientInstanceCapacity: There was insufficient instance capacity to satisfy the launch request.

    • Client.InternalError: A client error caused the instance to terminate on launch.

    • Client.InstanceInitiatedShutdown: The instance was shut down using the shutdown -h command from the instance.

    • Client.UserInitiatedShutdown: The instance was shut down using the Amazon EC2 API.

    • Client.VolumeLimitExceeded: The volume limit was exceeded.

    • Client.InvalidSnapshot.NotFound: The specified snapshot was not found.

    ", + "Subnet$SubnetId": "

    The ID of the subnet.

    ", + "Subnet$VpcId": "

    The ID of the VPC the subnet is in.

    ", + "Subnet$CidrBlock": "

    The CIDR block assigned to the subnet.

    ", + "Subnet$AvailabilityZone": "

    The Availability Zone of the subnet.

    ", + "SubnetIdStringList$member": null, + "Tag$Key": "

    The key of the tag.

    Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with aws:

    ", + "Tag$Value": "

    The value of the tag.

    Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.

    ", + "TagDescription$ResourceId": "

    The ID of the resource. For example, ami-1a2b3c4d.

    ", + "TagDescription$Key": "

    The tag key.

    ", + "TagDescription$Value": "

    The tag value.

    ", + "UnassignPrivateIpAddressesRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "UnsuccessfulItem$ResourceId": "

    The ID of the resource.

    ", + "UnsuccessfulItemError$Code": "

    The error code.

    ", + "UnsuccessfulItemError$Message": "

    The error message accompanying the error code.

    ", + "UserBucket$S3Bucket": "

    The name of the S3 bucket where the disk image is located.

    ", + "UserBucket$S3Key": "

    The key for the disk image.

    ", + "UserBucketDetails$S3Bucket": "

    The S3 bucket from which the disk image was created.

    ", + "UserBucketDetails$S3Key": "

    The key from which the disk image was created.

    ", + "UserData$Data": "

    The Base64-encoded MIME user data for the instance.

    ", + "UserGroupStringList$member": null, + "UserIdGroupPair$UserId": "

    The ID of an AWS account. EC2-Classic only.

    ", + "UserIdGroupPair$GroupName": "

    The name of the security group. In a request, use this parameter for a security group in EC2-Classic or a default VPC only. For a security group in a nondefault VPC, use GroupId.

    ", + "UserIdGroupPair$GroupId": "

    The ID of the security group.

    ", + "UserIdStringList$member": null, + "ValueStringList$member": null, + "VgwTelemetry$OutsideIpAddress": "

    The Internet-routable IP address of the virtual private gateway's outside interface.

    ", + "VgwTelemetry$StatusMessage": "

    If an error occurs, a description of the error.

    ", + "Volume$VolumeId": "

    The ID of the volume.

    ", + "Volume$SnapshotId": "

    The snapshot from which the volume was created, if applicable.

    ", + "Volume$AvailabilityZone": "

    The Availability Zone for the volume.

    ", + "Volume$KmsKeyId": "

    The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to protect the volume encryption key for the volume.

    ", + "VolumeAttachment$VolumeId": "

    The ID of the volume.

    ", + "VolumeAttachment$InstanceId": "

    The ID of the instance.

    ", + "VolumeAttachment$Device": "

    The device name.

    ", + "VolumeIdStringList$member": null, + "VolumeStatusAction$Code": "

    The code identifying the operation, for example, enable-volume-io.

    ", + "VolumeStatusAction$Description": "

    A description of the operation.

    ", + "VolumeStatusAction$EventType": "

    The event type associated with this operation.

    ", + "VolumeStatusAction$EventId": "

    The ID of the event associated with this operation.

    ", + "VolumeStatusDetails$Status": "

    The intended status of the volume status.

    ", + "VolumeStatusEvent$EventType": "

    The type of this event.

    ", + "VolumeStatusEvent$Description": "

    A description of the event.

    ", + "VolumeStatusEvent$EventId": "

    The ID of this event.

    ", + "VolumeStatusItem$VolumeId": "

    The volume ID.

    ", + "VolumeStatusItem$AvailabilityZone": "

    The Availability Zone of the volume.

    ", + "Vpc$VpcId": "

    The ID of the VPC.

    ", + "Vpc$CidrBlock": "

    The CIDR block for the VPC.

    ", + "Vpc$DhcpOptionsId": "

    The ID of the set of DHCP options you've associated with the VPC (or default if the default options are associated with the VPC).

    ", + "VpcAttachment$VpcId": "

    The ID of the VPC.

    ", + "VpcClassicLink$VpcId": "

    The ID of the VPC.

    ", + "VpcClassicLinkIdList$member": null, + "VpcEndpoint$VpcEndpointId": "

    The ID of the VPC endpoint.

    ", + "VpcEndpoint$VpcId": "

    The ID of the VPC to which the endpoint is associated.

    ", + "VpcEndpoint$ServiceName": "

    The name of the AWS service to which the endpoint is associated.

    ", + "VpcEndpoint$PolicyDocument": "

    The policy document associated with the endpoint.

    ", + "VpcIdStringList$member": null, + "VpcPeeringConnection$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "VpcPeeringConnectionStateReason$Message": "

    A message that provides more information about the status, if applicable.

    ", + "VpcPeeringConnectionVpcInfo$CidrBlock": "

    The CIDR block for the VPC.

    ", + "VpcPeeringConnectionVpcInfo$OwnerId": "

    The AWS account ID of the VPC owner.

    ", + "VpcPeeringConnectionVpcInfo$VpcId": "

    The ID of the VPC.

    ", + "VpnConnection$VpnConnectionId": "

    The ID of the VPN connection.

    ", + "VpnConnection$CustomerGatewayConfiguration": "

    The configuration information for the VPN connection's customer gateway (in the native XML format). This element is always present in the CreateVpnConnection response; however, it's present in the DescribeVpnConnections response only if the VPN connection is in the pending or available state.

    ", + "VpnConnection$CustomerGatewayId": "

    The ID of the customer gateway at your end of the VPN connection.

    ", + "VpnConnection$VpnGatewayId": "

    The ID of the virtual private gateway at the AWS side of the VPN connection.

    ", + "VpnConnectionIdStringList$member": null, + "VpnGateway$VpnGatewayId": "

    The ID of the virtual private gateway.

    ", + "VpnGateway$AvailabilityZone": "

    The Availability Zone where the virtual private gateway was created.

    ", + "VpnGatewayIdStringList$member": null, + "VpnStaticRoute$DestinationCidrBlock": "

    The CIDR block associated with the local subnet of the customer data center.

    ", + "ZoneNameStringList$member": null, + "NewDhcpConfiguration$Key": null, + "RequestSpotLaunchSpecification$ImageId": "

    The ID of the AMI.

    ", + "RequestSpotLaunchSpecification$KeyName": "

    The name of the key pair.

    ", + "RequestSpotLaunchSpecification$UserData": "

    The Base64-encoded MIME user data to make available to the instances.

    ", + "RequestSpotLaunchSpecification$AddressingType": "

    Deprecated.

    ", + "RequestSpotLaunchSpecification$KernelId": "

    The ID of the kernel.

    ", + "RequestSpotLaunchSpecification$RamdiskId": "

    The ID of the RAM disk.

    ", + "RequestSpotLaunchSpecification$SubnetId": "

    The ID of the subnet in which to launch the instance.

    " + } + }, + "Subnet": { + "base": "

    Describes a subnet.

    ", + "refs": { + "CreateSubnetResult$Subnet": "

    Information about the subnet.

    ", + "SubnetList$member": null + } + }, + "SubnetIdStringList": { + "base": null, + "refs": { + "DescribeSubnetsRequest$SubnetIds": "

    One or more subnet IDs.

    Default: Describes all your subnets.

    " + } + }, + "SubnetList": { + "base": null, + "refs": { + "DescribeSubnetsResult$Subnets": "

    Information about one or more subnets.

    " + } + }, + "SubnetState": { + "base": null, + "refs": { + "Subnet$State": "

    The current state of the subnet.

    " + } + }, + "SummaryStatus": { + "base": null, + "refs": { + "InstanceStatusSummary$Status": "

    The status.

    " + } + }, + "Tag": { + "base": "

    Describes a tag.

    ", + "refs": { + "TagList$member": null + } + }, + "TagDescription": { + "base": "

    Describes a tag.

    ", + "refs": { + "TagDescriptionList$member": null + } + }, + "TagDescriptionList": { + "base": null, + "refs": { + "DescribeTagsResult$Tags": "

    A list of tags.

    " + } + }, + "TagList": { + "base": null, + "refs": { + "ClassicLinkInstance$Tags": "

    Any tags assigned to the instance.

    ", + "ConversionTask$Tags": "

    Any tags assigned to the task.

    ", + "CreateTagsRequest$Tags": "

    One or more tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string.

    ", + "CustomerGateway$Tags": "

    Any tags assigned to the customer gateway.

    ", + "DeleteTagsRequest$Tags": "

    One or more tags to delete. If you omit the value parameter, we delete the tag regardless of its value. If you specify this parameter with an empty string as the value, we delete the key only if its value is an empty string.

    ", + "DhcpOptions$Tags": "

    Any tags assigned to the DHCP options set.

    ", + "Image$Tags": "

    Any tags assigned to the image.

    ", + "Instance$Tags": "

    Any tags assigned to the instance.

    ", + "InternetGateway$Tags": "

    Any tags assigned to the Internet gateway.

    ", + "NetworkAcl$Tags": "

    Any tags assigned to the network ACL.

    ", + "NetworkInterface$TagSet": "

    Any tags assigned to the network interface.

    ", + "ReservedInstances$Tags": "

    Any tags assigned to the resource.

    ", + "ReservedInstancesListing$Tags": "

    Any tags assigned to the resource.

    ", + "RouteTable$Tags": "

    Any tags assigned to the route table.

    ", + "SecurityGroup$Tags": "

    Any tags assigned to the security group.

    ", + "Snapshot$Tags": "

    Any tags assigned to the snapshot.

    ", + "SpotInstanceRequest$Tags": "

    Any tags assigned to the resource.

    ", + "Subnet$Tags": "

    Any tags assigned to the subnet.

    ", + "Volume$Tags": "

    Any tags assigned to the volume.

    ", + "Vpc$Tags": "

    Any tags assigned to the VPC.

    ", + "VpcClassicLink$Tags": "

    Any tags assigned to the VPC.

    ", + "VpcPeeringConnection$Tags": "

    Any tags assigned to the resource.

    ", + "VpnConnection$Tags": "

    Any tags assigned to the VPN connection.

    ", + "VpnGateway$Tags": "

    Any tags assigned to the virtual private gateway.

    " + } + }, + "TelemetryStatus": { + "base": null, + "refs": { + "VgwTelemetry$Status": "

    The status of the VPN tunnel.

    " + } + }, + "Tenancy": { + "base": null, + "refs": { + "CreateVpcRequest$InstanceTenancy": "

    The supported tenancy options for instances launched into the VPC. A value of default means that instances can be launched with any tenancy; a value of dedicated means all instances launched into the VPC are launched as dedicated tenancy instances regardless of the tenancy assigned to the instance at launch. Dedicated tenancy instances run on single-tenant hardware.

    Default: default

    ", + "DescribeReservedInstancesOfferingsRequest$InstanceTenancy": "

    The tenancy of the Reserved Instance offering. A Reserved Instance with dedicated tenancy runs on single-tenant hardware and can only be launched within a VPC.

    Default: default

    ", + "Placement$Tenancy": "

    The tenancy of the instance (if the instance is running in a VPC). An instance with a tenancy of dedicated runs on single-tenant hardware.

    ", + "ReservedInstances$InstanceTenancy": "

    The tenancy of the reserved instance.

    ", + "ReservedInstancesOffering$InstanceTenancy": "

    The tenancy of the reserved instance.

    ", + "Vpc$InstanceTenancy": "

    The allowed tenancy of instances launched into the VPC.

    " + } + }, + "TerminateInstancesRequest": { + "base": null, + "refs": { + } + }, + "TerminateInstancesResult": { + "base": null, + "refs": { + } + }, + "TrafficType": { + "base": null, + "refs": { + "CreateFlowLogsRequest$TrafficType": "

    The type of traffic to log.

    ", + "FlowLog$TrafficType": "

    The type of traffic captured for the flow log.

    " + } + }, + "UnassignPrivateIpAddressesRequest": { + "base": null, + "refs": { + } + }, + "UnmonitorInstancesRequest": { + "base": null, + "refs": { + } + }, + "UnmonitorInstancesResult": { + "base": null, + "refs": { + } + }, + "UnsuccessfulItem": { + "base": "

    Information about items that were not successfully processed in a batch call.

    ", + "refs": { + "UnsuccessfulItemSet$member": null + } + }, + "UnsuccessfulItemError": { + "base": "

    Information about the error that occurred. For more information about errors, see Error Codes.

    ", + "refs": { + "UnsuccessfulItem$Error": "

    Information about the error.

    " + } + }, + "UnsuccessfulItemSet": { + "base": null, + "refs": { + "CreateFlowLogsResult$Unsuccessful": "

    Information about the flow logs that could not be created successfully.

    ", + "DeleteFlowLogsResult$Unsuccessful": "

    Information about the flow logs that could not be deleted successfully.

    ", + "DeleteVpcEndpointsResult$Unsuccessful": "

    Information about the endpoints that were not successfully deleted.

    " + } + }, + "UserBucket": { + "base": "

    Describes the S3 bucket for the disk image.

    ", + "refs": { + "ImageDiskContainer$UserBucket": "

    The S3 bucket for the disk image.

    ", + "SnapshotDiskContainer$UserBucket": null + } + }, + "UserBucketDetails": { + "base": "

    Describes the S3 bucket for the disk image.

    ", + "refs": { + "SnapshotDetail$UserBucket": null, + "SnapshotTaskDetail$UserBucket": "

    The S3 bucket for the disk image.

    " + } + }, + "UserData": { + "base": "

    Describes the user data to be made available to an instance.

    ", + "refs": { + "ImportInstanceLaunchSpecification$UserData": "

    The Base64-encoded MIME user data to be made available to the instance.

    " + } + }, + "UserGroupStringList": { + "base": null, + "refs": { + "ModifyImageAttributeRequest$UserGroups": "

    One or more user groups. This is only valid when modifying the launchPermission attribute.

    " + } + }, + "UserIdGroupPair": { + "base": "

    Describes a security group and AWS account ID pair.

    ", + "refs": { + "UserIdGroupPairList$member": null + } + }, + "UserIdGroupPairList": { + "base": null, + "refs": { + "IpPermission$UserIdGroupPairs": "

    One or more security group and AWS account ID pairs.

    " + } + }, + "UserIdStringList": { + "base": null, + "refs": { + "ModifyImageAttributeRequest$UserIds": "

    One or more AWS account IDs. This is only valid when modifying the launchPermission attribute.

    ", + "ModifySnapshotAttributeRequest$UserIds": "

    The account ID to modify for the snapshot.

    " + } + }, + "ValueStringList": { + "base": null, + "refs": { + "CancelSpotFleetRequestsRequest$SpotFleetRequestIds": "

    The IDs of the Spot fleet requests.

    ", + "CreateFlowLogsRequest$ResourceIds": "

    One or more subnet, network interface, or VPC IDs.

    ", + "CreateFlowLogsResult$FlowLogIds": "

    The IDs of the flow logs.

    ", + "CreateVpcEndpointRequest$RouteTableIds": "

    One or more route table IDs.

    ", + "DeleteFlowLogsRequest$FlowLogIds": "

    One or more flow log IDs.

    ", + "DeleteVpcEndpointsRequest$VpcEndpointIds": "

    One or more endpoint IDs.

    ", + "DescribeFlowLogsRequest$FlowLogIds": "

    One or more flow log IDs.

    ", + "DescribeInternetGatewaysRequest$InternetGatewayIds": "

    One or more Internet gateway IDs.

    Default: Describes all your Internet gateways.

    ", + "DescribeMovingAddressesRequest$PublicIps": "

    One or more Elastic IP addresses.

    ", + "DescribeNetworkAclsRequest$NetworkAclIds": "

    One or more network ACL IDs.

    Default: Describes all your network ACLs.

    ", + "DescribePrefixListsRequest$PrefixListIds": "

    One or more prefix list IDs.

    ", + "DescribeRouteTablesRequest$RouteTableIds": "

    One or more route table IDs.

    Default: Describes all your route tables.

    ", + "DescribeSpotFleetRequestsRequest$SpotFleetRequestIds": "

    The IDs of the Spot fleet requests.

    ", + "DescribeVpcEndpointServicesResult$ServiceNames": "

    A list of supported AWS services.

    ", + "DescribeVpcEndpointsRequest$VpcEndpointIds": "

    One or more endpoint IDs.

    ", + "DescribeVpcPeeringConnectionsRequest$VpcPeeringConnectionIds": "

    One or more VPC peering connection IDs.

    Default: Describes all your VPC peering connections.

    ", + "Filter$Values": "

    One or more filter values. Filter values are case-sensitive.

    ", + "ModifyVpcEndpointRequest$AddRouteTableIds": "

    One or more route tables IDs to associate with the endpoint.

    ", + "ModifyVpcEndpointRequest$RemoveRouteTableIds": "

    One or more route table IDs to disassociate from the endpoint.

    ", + "PrefixList$Cidrs": "

    The IP address range of the AWS service.

    ", + "VpcEndpoint$RouteTableIds": "

    One or more route tables associated with the endpoint.

    ", + "NewDhcpConfiguration$Values": null, + "RequestSpotLaunchSpecification$SecurityGroups": null, + "RequestSpotLaunchSpecification$SecurityGroupIds": null + } + }, + "VgwTelemetry": { + "base": "

    Describes telemetry for a VPN tunnel.

    ", + "refs": { + "VgwTelemetryList$member": null + } + }, + "VgwTelemetryList": { + "base": null, + "refs": { + "VpnConnection$VgwTelemetry": "

    Information about the VPN tunnel.

    " + } + }, + "VirtualizationType": { + "base": null, + "refs": { + "Image$VirtualizationType": "

    The type of virtualization of the AMI.

    ", + "Instance$VirtualizationType": "

    The virtualization type of the instance.

    " + } + }, + "Volume": { + "base": "

    Describes a volume.

    ", + "refs": { + "VolumeList$member": null + } + }, + "VolumeAttachment": { + "base": "

    Describes volume attachment details.

    ", + "refs": { + "VolumeAttachmentList$member": null + } + }, + "VolumeAttachmentList": { + "base": null, + "refs": { + "Volume$Attachments": "

    Information about the volume attachments.

    " + } + }, + "VolumeAttachmentState": { + "base": null, + "refs": { + "VolumeAttachment$State": "

    The attachment state of the volume.

    " + } + }, + "VolumeAttributeName": { + "base": null, + "refs": { + "DescribeVolumeAttributeRequest$Attribute": "

    The instance attribute.

    " + } + }, + "VolumeDetail": { + "base": "

    Describes an EBS volume.

    ", + "refs": { + "DiskImage$Volume": "

    Information about the volume.

    ", + "ImportVolumeRequest$Volume": "

    The volume size.

    " + } + }, + "VolumeIdStringList": { + "base": null, + "refs": { + "DescribeVolumeStatusRequest$VolumeIds": "

    One or more volume IDs.

    Default: Describes all your volumes.

    ", + "DescribeVolumesRequest$VolumeIds": "

    One or more volume IDs.

    " + } + }, + "VolumeList": { + "base": null, + "refs": { + "DescribeVolumesResult$Volumes": "

    Information about the volumes.

    " + } + }, + "VolumeState": { + "base": null, + "refs": { + "Volume$State": "

    The volume state.

    " + } + }, + "VolumeStatusAction": { + "base": "

    Describes a volume status operation code.

    ", + "refs": { + "VolumeStatusActionsList$member": null + } + }, + "VolumeStatusActionsList": { + "base": null, + "refs": { + "VolumeStatusItem$Actions": "

    The details of the operation.

    " + } + }, + "VolumeStatusDetails": { + "base": "

    Describes a volume status.

    ", + "refs": { + "VolumeStatusDetailsList$member": null + } + }, + "VolumeStatusDetailsList": { + "base": null, + "refs": { + "VolumeStatusInfo$Details": "

    The details of the volume status.

    " + } + }, + "VolumeStatusEvent": { + "base": "

    Describes a volume status event.

    ", + "refs": { + "VolumeStatusEventsList$member": null + } + }, + "VolumeStatusEventsList": { + "base": null, + "refs": { + "VolumeStatusItem$Events": "

    A list of events associated with the volume.

    " + } + }, + "VolumeStatusInfo": { + "base": "

    Describes the status of a volume.

    ", + "refs": { + "VolumeStatusItem$VolumeStatus": "

    The volume status.

    " + } + }, + "VolumeStatusInfoStatus": { + "base": null, + "refs": { + "VolumeStatusInfo$Status": "

    The status of the volume.

    " + } + }, + "VolumeStatusItem": { + "base": "

    Describes the volume status.

    ", + "refs": { + "VolumeStatusList$member": null + } + }, + "VolumeStatusList": { + "base": null, + "refs": { + "DescribeVolumeStatusResult$VolumeStatuses": "

    A list of volumes.

    " + } + }, + "VolumeStatusName": { + "base": null, + "refs": { + "VolumeStatusDetails$Name": "

    The name of the volume status.

    " + } + }, + "VolumeType": { + "base": null, + "refs": { + "CreateVolumeRequest$VolumeType": "

    The volume type. This can be gp2 for General Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, or standard for Magnetic volumes.

    Default: standard

    ", + "EbsBlockDevice$VolumeType": "

    The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic volumes.

    Default: standard

    ", + "Volume$VolumeType": "

    The volume type. This can be gp2 for General Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, or standard for Magnetic volumes.

    " + } + }, + "Vpc": { + "base": "

    Describes a VPC.

    ", + "refs": { + "CreateVpcResult$Vpc": "

    Information about the VPC.

    ", + "VpcList$member": null + } + }, + "VpcAttachment": { + "base": "

    Describes an attachment between a virtual private gateway and a VPC.

    ", + "refs": { + "AttachVpnGatewayResult$VpcAttachment": "

    Information about the attachment.

    ", + "VpcAttachmentList$member": null + } + }, + "VpcAttachmentList": { + "base": null, + "refs": { + "VpnGateway$VpcAttachments": "

    Any VPCs attached to the virtual private gateway.

    " + } + }, + "VpcAttributeName": { + "base": null, + "refs": { + "DescribeVpcAttributeRequest$Attribute": "

    The VPC attribute.

    " + } + }, + "VpcClassicLink": { + "base": "

    Describes whether a VPC is enabled for ClassicLink.

    ", + "refs": { + "VpcClassicLinkList$member": null + } + }, + "VpcClassicLinkIdList": { + "base": null, + "refs": { + "DescribeVpcClassicLinkRequest$VpcIds": "

    One or more VPCs for which you want to describe the ClassicLink status.

    " + } + }, + "VpcClassicLinkList": { + "base": null, + "refs": { + "DescribeVpcClassicLinkResult$Vpcs": "

    The ClassicLink status of one or more VPCs.

    " + } + }, + "VpcEndpoint": { + "base": "

    Describes a VPC endpoint.

    ", + "refs": { + "CreateVpcEndpointResult$VpcEndpoint": "

    Information about the endpoint.

    ", + "VpcEndpointSet$member": null + } + }, + "VpcEndpointSet": { + "base": null, + "refs": { + "DescribeVpcEndpointsResult$VpcEndpoints": "

    Information about the endpoints.

    " + } + }, + "VpcIdStringList": { + "base": null, + "refs": { + "DescribeVpcsRequest$VpcIds": "

    One or more VPC IDs.

    Default: Describes all your VPCs.

    " + } + }, + "VpcList": { + "base": null, + "refs": { + "DescribeVpcsResult$Vpcs": "

    Information about one or more VPCs.

    " + } + }, + "VpcPeeringConnection": { + "base": "

    Describes a VPC peering connection.

    ", + "refs": { + "AcceptVpcPeeringConnectionResult$VpcPeeringConnection": "

    Information about the VPC peering connection.

    ", + "CreateVpcPeeringConnectionResult$VpcPeeringConnection": "

    Information about the VPC peering connection.

    ", + "VpcPeeringConnectionList$member": null + } + }, + "VpcPeeringConnectionList": { + "base": null, + "refs": { + "DescribeVpcPeeringConnectionsResult$VpcPeeringConnections": "

    Information about the VPC peering connections.

    " + } + }, + "VpcPeeringConnectionStateReason": { + "base": "

    Describes the status of a VPC peering connection.

    ", + "refs": { + "VpcPeeringConnection$Status": "

    The status of the VPC peering connection.

    " + } + }, + "VpcPeeringConnectionStateReasonCode": { + "base": null, + "refs": { + "VpcPeeringConnectionStateReason$Code": "

    The status of the VPC peering connection.

    " + } + }, + "VpcPeeringConnectionVpcInfo": { + "base": "

    Describes a VPC in a VPC peering connection.

    ", + "refs": { + "VpcPeeringConnection$AccepterVpcInfo": "

    The information of the peer VPC.

    ", + "VpcPeeringConnection$RequesterVpcInfo": "

    The information of the requester VPC.

    " + } + }, + "VpcState": { + "base": null, + "refs": { + "Vpc$State": "

    The current state of the VPC.

    " + } + }, + "VpnConnection": { + "base": "

    Describes a VPN connection.

    ", + "refs": { + "CreateVpnConnectionResult$VpnConnection": "

    Information about the VPN connection.

    ", + "VpnConnectionList$member": null + } + }, + "VpnConnectionIdStringList": { + "base": null, + "refs": { + "DescribeVpnConnectionsRequest$VpnConnectionIds": "

    One or more VPN connection IDs.

    Default: Describes your VPN connections.

    " + } + }, + "VpnConnectionList": { + "base": null, + "refs": { + "DescribeVpnConnectionsResult$VpnConnections": "

    Information about one or more VPN connections.

    " + } + }, + "VpnConnectionOptions": { + "base": "

    Describes VPN connection options.

    ", + "refs": { + "VpnConnection$Options": "

    The VPN connection options.

    " + } + }, + "VpnConnectionOptionsSpecification": { + "base": "

    Describes VPN connection options.

    ", + "refs": { + "CreateVpnConnectionRequest$Options": "

    Indicates whether the VPN connection requires static routes. If you are creating a VPN connection for a device that does not support BGP, you must specify true.

    Default: false

    " + } + }, + "VpnGateway": { + "base": "

    Describes a virtual private gateway.

    ", + "refs": { + "CreateVpnGatewayResult$VpnGateway": "

    Information about the virtual private gateway.

    ", + "VpnGatewayList$member": null + } + }, + "VpnGatewayIdStringList": { + "base": null, + "refs": { + "DescribeVpnGatewaysRequest$VpnGatewayIds": "

    One or more virtual private gateway IDs.

    Default: Describes all your virtual private gateways.

    " + } + }, + "VpnGatewayList": { + "base": null, + "refs": { + "DescribeVpnGatewaysResult$VpnGateways": "

    Information about one or more virtual private gateways.

    " + } + }, + "VpnState": { + "base": null, + "refs": { + "VpnConnection$State": "

    The current state of the VPN connection.

    ", + "VpnGateway$State": "

    The current state of the virtual private gateway.

    ", + "VpnStaticRoute$State": "

    The current state of the static route.

    " + } + }, + "VpnStaticRoute": { + "base": "

    Describes a static route for a VPN connection.

    ", + "refs": { + "VpnStaticRouteList$member": null + } + }, + "VpnStaticRouteList": { + "base": null, + "refs": { + "VpnConnection$Routes": "

    The static routes associated with the VPN connection.

    " + } + }, + "VpnStaticRouteSource": { + "base": null, + "refs": { + "VpnStaticRoute$Source": "

    Indicates how the routes were provided.

    " + } + }, + "ZoneNameStringList": { + "base": null, + "refs": { + "DescribeAvailabilityZonesRequest$ZoneNames": "

    The names of one or more Availability Zones.

    " + } + }, + "NewDhcpConfigurationList": { + "base": null, + "refs": { + "CreateDhcpOptionsRequest$DhcpConfigurations": "

    A DHCP configuration option.

    " + } + }, + "NewDhcpConfiguration": { + "base": null, + "refs": { + "NewDhcpConfigurationList$member": null + } + }, + "DhcpConfigurationValueList": { + "base": null, + "refs": { + "DhcpConfiguration$Values": "

    One or more values for the DHCP option.

    " + } + }, + "Blob": { + "base": null, + "refs": { + "ImportKeyPairRequest$PublicKeyMaterial": "

    The public key. You must base64 encode the public key material before sending it to AWS.

    ", + "S3Storage$UploadPolicy": "

    A Base64-encoded Amazon S3 upload policy that gives Amazon EC2 permission to upload items into Amazon S3 on your behalf.

    ", + "BlobAttributeValue$Value": null + } + }, + "BlobAttributeValue": { + "base": null, + "refs": { + "ModifyInstanceAttributeRequest$UserData": "

    Changes the instance's user data to the specified value.

    " + } + }, + "RequestSpotLaunchSpecification": { + "base": "

    Describes the launch specification for an instance.

    ", + "refs": { + "RequestSpotInstancesRequest$LaunchSpecification": null + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,125 @@ +{ + "pagination": { + "DescribeAccountAttributes": { + "result_key": "AccountAttributes" + }, + "DescribeAddresses": { + "result_key": "Addresses" + }, + "DescribeAvailabilityZones": { + "result_key": "AvailabilityZones" + }, + "DescribeBundleTasks": { + "result_key": "BundleTasks" + }, + "DescribeConversionTasks": { + "result_key": "ConversionTasks" + }, + "DescribeCustomerGateways": { + "result_key": "CustomerGateways" + }, + "DescribeDhcpOptions": { + "result_key": "DhcpOptions" + }, + "DescribeExportTasks": { + "result_key": "ExportTasks" + }, + "DescribeImages": { + "result_key": "Images" + }, + "DescribeInstanceStatus": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "InstanceStatuses" + }, + "DescribeInstances": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Reservations" + }, + "DescribeInternetGateways": { + "result_key": "InternetGateways" + }, + "DescribeKeyPairs": { + "result_key": "KeyPairs" + }, + "DescribeNetworkAcls": { + "result_key": "NetworkAcls" + }, + "DescribeNetworkInterfaces": { + "result_key": "NetworkInterfaces" + }, + "DescribePlacementGroups": { + "result_key": "PlacementGroups" + }, + "DescribeRegions": { + "result_key": "Regions" + }, + "DescribeReservedInstances": { + "result_key": "ReservedInstances" + }, + "DescribeReservedInstancesListings": { + "result_key": "ReservedInstancesListings" + }, + "DescribeReservedInstancesOfferings": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ReservedInstancesOfferings" + }, + "DescribeReservedInstancesModifications": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "ReservedInstancesModifications" + }, + "DescribeRouteTables": { + "result_key": "RouteTables" + }, + "DescribeSecurityGroups": { + "result_key": "SecurityGroups" + }, + "DescribeSnapshots": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Snapshots" + }, + "DescribeSpotInstanceRequests": { + "result_key": "SpotInstanceRequests" + }, + "DescribeSpotPriceHistory": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "SpotPriceHistory" + }, + "DescribeSubnets": { + "result_key": "Subnets" + }, + "DescribeTags": { + "result_key": "Tags" + }, + "DescribeVolumeStatus": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "VolumeStatuses" + }, + "DescribeVolumes": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Volumes" + }, + "DescribeVpcs": { + "result_key": "Vpcs" + }, + "DescribeVpnConnections": { + "result_key": "VpnConnections" + }, + "DescribeVpnGateways": { + "result_key": "VpnGateways" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/waiters-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/waiters-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/waiters-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-04-15/waiters-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,494 @@ +{ + "version": 2, + "waiters": { + "InstanceExists": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeInstances", + "acceptors": [ + { + "matcher": "status", + "expected": 200, + "state": "success" + }, + { + "matcher": "error", + "expected": "InvalidInstanceIDNotFound", + "state": "retry" + } + ] + }, + "BundleTaskComplete": { + "delay": 15, + "operation": "DescribeBundleTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "complete", + "matcher": "pathAll", + "state": "success", + "argument": "BundleTasks[].State" + }, + { + "expected": "failed", + "matcher": "pathAny", + "state": "failure", + "argument": "BundleTasks[].State" + } + ] + }, + "ConversionTaskCancelled": { + "delay": 15, + "operation": "DescribeConversionTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "cancelled", + "matcher": "pathAll", + "state": "success", + "argument": "ConversionTasks[].State" + } + ] + }, + "ConversionTaskCompleted": { + "delay": 15, + "operation": "DescribeConversionTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "completed", + "matcher": "pathAll", + "state": "success", + "argument": "ConversionTasks[].State" + }, + { + "expected": "cancelled", + "matcher": "pathAny", + "state": "failure", + "argument": "ConversionTasks[].State" + }, + { + "expected": "cancelling", + "matcher": "pathAny", + "state": "failure", + "argument": "ConversionTasks[].State" + } + ] + }, + "ConversionTaskDeleted": { + "delay": 15, + "operation": "DescribeConversionTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "deleted", + "matcher": "pathAll", + "state": "success", + "argument": "ConversionTasks[].State" + } + ] + }, + "CustomerGatewayAvailable": { + "delay": 15, + "operation": "DescribeCustomerGateways", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "CustomerGateways[].State" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "CustomerGateways[].State" + }, + { + "expected": "deleting", + "matcher": "pathAny", + "state": "failure", + "argument": "CustomerGateways[].State" + } + ] + }, + "ExportTaskCancelled": { + "delay": 15, + "operation": "DescribeExportTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "cancelled", + "matcher": "pathAll", + "state": "success", + "argument": "ExportTasks[].State" + } + ] + }, + "ExportTaskCompleted": { + "delay": 15, + "operation": "DescribeExportTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "completed", + "matcher": "pathAll", + "state": "success", + "argument": "ExportTasks[].State" + } + ] + }, + "ImageAvailable": { + "operation": "DescribeImages", + "maxAttempts": 40, + "delay": 15, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "Images[].State", + "expected": "available" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "Images[].State", + "expected": "failed" + } + ] + }, + "InstanceRunning": { + "delay": 15, + "operation": "DescribeInstances", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "running", + "matcher": "pathAll", + "state": "success", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "shutting-down", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "terminated", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "stopping", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + } + ] + }, + "InstanceStatusOk": { + "operation": "DescribeInstanceStatus", + "maxAttempts": 40, + "delay": 15, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "InstanceStatuses[].InstanceStatus.Status", + "expected": "ok" + } + ] + }, + "InstanceStopped": { + "delay": 15, + "operation": "DescribeInstances", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "stopped", + "matcher": "pathAll", + "state": "success", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "pending", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "terminated", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + } + ] + }, + "InstanceTerminated": { + "delay": 15, + "operation": "DescribeInstances", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "terminated", + "matcher": "pathAll", + "state": "success", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "pending", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "stopping", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + } + ] + }, + "KeyPairExists": { + "operation": "DescribeKeyPairs", + "delay": 5, + "maxAttempts": 6, + "acceptors": [ + { + "expected": true, + "matcher": "pathAll", + "state": "success", + "argument": "length(KeyPairs[].KeyName) > `0`" + }, + { + "expected": "InvalidKeyPairNotFound", + "matcher": "error", + "state": "retry" + } + ] + }, + "NetworkInterfaceAvailable": { + "operation": "DescribeNetworkInterfaces", + "delay": 20, + "maxAttempts": 10, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "NetworkInterfaces[].Status" + }, + { + "expected": "InvalidNetworkInterfaceIDNotFound", + "matcher": "error", + "state": "failure" + } + ] + }, + "PasswordDataAvailable": { + "operation": "GetPasswordData", + "maxAttempts": 40, + "delay": 15, + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "length(PasswordData) > `0`", + "expected": true + } + ] + }, + "SnapshotCompleted": { + "delay": 15, + "operation": "DescribeSnapshots", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "completed", + "matcher": "pathAll", + "state": "success", + "argument": "Snapshots[].State" + } + ] + }, + "SpotInstanceRequestFulfilled": { + "operation": "DescribeSpotInstanceRequests", + "maxAttempts": 40, + "delay": 15, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "SpotInstanceRequests[].Status.Code", + "expected": "fulfilled" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "SpotInstanceRequests[].Status.Code", + "expected": "schedule-expired" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "SpotInstanceRequests[].Status.Code", + "expected": "canceled-before-fulfillment" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "SpotInstanceRequests[].Status.Code", + "expected": "bad-parameters" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "SpotInstanceRequests[].Status.Code", + "expected": "system-error" + } + ] + }, + "SubnetAvailable": { + "delay": 15, + "operation": "DescribeSubnets", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "Subnets[].State" + } + ] + }, + "SystemStatusOk": { + "operation": "DescribeInstanceStatus", + "maxAttempts": 40, + "delay": 15, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "InstanceStatuses[].SystemStatus.Status", + "expected": "ok" + } + ] + }, + "VolumeAvailable": { + "delay": 15, + "operation": "DescribeVolumes", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "Volumes[].State" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "Volumes[].State" + } + ] + }, + "VolumeDeleted": { + "delay": 15, + "operation": "DescribeVolumes", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "deleted", + "matcher": "pathAll", + "state": "success", + "argument": "Volumes[].State" + }, + { + "matcher": "error", + "expected": "InvalidVolumeNotFound", + "state": "success" + } + ] + }, + "VolumeInUse": { + "delay": 15, + "operation": "DescribeVolumes", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "in-use", + "matcher": "pathAll", + "state": "success", + "argument": "Volumes[].State" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "Volumes[].State" + } + ] + }, + "VpcAvailable": { + "delay": 15, + "operation": "DescribeVpcs", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "Vpcs[].State" + } + ] + }, + "VpnConnectionAvailable": { + "delay": 15, + "operation": "DescribeVpnConnections", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "VpnConnections[].State" + }, + { + "expected": "deleting", + "matcher": "pathAny", + "state": "failure", + "argument": "VpnConnections[].State" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "VpnConnections[].State" + } + ] + }, + "VpnConnectionDeleted": { + "delay": 15, + "operation": "DescribeVpnConnections", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "deleted", + "matcher": "pathAll", + "state": "success", + "argument": "VpnConnections[].State" + }, + { + "expected": "pending", + "matcher": "pathAny", + "state": "failure", + "argument": "VpnConnections[].State" + } + ] + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,13391 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-10-01", + "endpointPrefix":"ec2", + "protocol":"ec2", + "serviceAbbreviation":"Amazon EC2", + "serviceFullName":"Amazon Elastic Compute Cloud", + "signatureVersion":"v4", + "xmlNamespace":"http://ec2.amazonaws.com/doc/2015-10-01" + }, + "operations":{ + "AcceptVpcPeeringConnection":{ + "name":"AcceptVpcPeeringConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AcceptVpcPeeringConnectionRequest"}, + "output":{"shape":"AcceptVpcPeeringConnectionResult"} + }, + "AllocateAddress":{ + "name":"AllocateAddress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AllocateAddressRequest"}, + "output":{"shape":"AllocateAddressResult"} + }, + "AllocateHosts":{ + "name":"AllocateHosts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AllocateHostsRequest"}, + "output":{"shape":"AllocateHostsResult"} + }, + "AssignPrivateIpAddresses":{ + "name":"AssignPrivateIpAddresses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssignPrivateIpAddressesRequest"} + }, + "AssociateAddress":{ + "name":"AssociateAddress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateAddressRequest"}, + "output":{"shape":"AssociateAddressResult"} + }, + "AssociateDhcpOptions":{ + "name":"AssociateDhcpOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateDhcpOptionsRequest"} + }, + "AssociateRouteTable":{ + "name":"AssociateRouteTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateRouteTableRequest"}, + "output":{"shape":"AssociateRouteTableResult"} + }, + "AttachClassicLinkVpc":{ + "name":"AttachClassicLinkVpc", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachClassicLinkVpcRequest"}, + "output":{"shape":"AttachClassicLinkVpcResult"} + }, + "AttachInternetGateway":{ + "name":"AttachInternetGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachInternetGatewayRequest"} + }, + "AttachNetworkInterface":{ + "name":"AttachNetworkInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachNetworkInterfaceRequest"}, + "output":{"shape":"AttachNetworkInterfaceResult"} + }, + "AttachVolume":{ + "name":"AttachVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachVolumeRequest"}, + "output":{"shape":"VolumeAttachment"} + }, + "AttachVpnGateway":{ + "name":"AttachVpnGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachVpnGatewayRequest"}, + "output":{"shape":"AttachVpnGatewayResult"} + }, + "AuthorizeSecurityGroupEgress":{ + "name":"AuthorizeSecurityGroupEgress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AuthorizeSecurityGroupEgressRequest"} + }, + "AuthorizeSecurityGroupIngress":{ + "name":"AuthorizeSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AuthorizeSecurityGroupIngressRequest"} + }, + "BundleInstance":{ + "name":"BundleInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BundleInstanceRequest"}, + "output":{"shape":"BundleInstanceResult"} + }, + "CancelBundleTask":{ + "name":"CancelBundleTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelBundleTaskRequest"}, + "output":{"shape":"CancelBundleTaskResult"} + }, + "CancelConversionTask":{ + "name":"CancelConversionTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelConversionRequest"} + }, + "CancelExportTask":{ + "name":"CancelExportTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelExportTaskRequest"} + }, + "CancelImportTask":{ + "name":"CancelImportTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelImportTaskRequest"}, + "output":{"shape":"CancelImportTaskResult"} + }, + "CancelReservedInstancesListing":{ + "name":"CancelReservedInstancesListing", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelReservedInstancesListingRequest"}, + "output":{"shape":"CancelReservedInstancesListingResult"} + }, + "CancelSpotFleetRequests":{ + "name":"CancelSpotFleetRequests", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelSpotFleetRequestsRequest"}, + "output":{"shape":"CancelSpotFleetRequestsResponse"} + }, + "CancelSpotInstanceRequests":{ + "name":"CancelSpotInstanceRequests", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelSpotInstanceRequestsRequest"}, + "output":{"shape":"CancelSpotInstanceRequestsResult"} + }, + "ConfirmProductInstance":{ + "name":"ConfirmProductInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ConfirmProductInstanceRequest"}, + "output":{"shape":"ConfirmProductInstanceResult"} + }, + "CopyImage":{ + "name":"CopyImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyImageRequest"}, + "output":{"shape":"CopyImageResult"} + }, + "CopySnapshot":{ + "name":"CopySnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopySnapshotRequest"}, + "output":{"shape":"CopySnapshotResult"} + }, + "CreateCustomerGateway":{ + "name":"CreateCustomerGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCustomerGatewayRequest"}, + "output":{"shape":"CreateCustomerGatewayResult"} + }, + "CreateDhcpOptions":{ + "name":"CreateDhcpOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDhcpOptionsRequest"}, + "output":{"shape":"CreateDhcpOptionsResult"} + }, + "CreateFlowLogs":{ + "name":"CreateFlowLogs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateFlowLogsRequest"}, + "output":{"shape":"CreateFlowLogsResult"} + }, + "CreateImage":{ + "name":"CreateImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateImageRequest"}, + "output":{"shape":"CreateImageResult"} + }, + "CreateInstanceExportTask":{ + "name":"CreateInstanceExportTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateInstanceExportTaskRequest"}, + "output":{"shape":"CreateInstanceExportTaskResult"} + }, + "CreateInternetGateway":{ + "name":"CreateInternetGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateInternetGatewayRequest"}, + "output":{"shape":"CreateInternetGatewayResult"} + }, + "CreateKeyPair":{ + "name":"CreateKeyPair", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateKeyPairRequest"}, + "output":{"shape":"KeyPair"} + }, + "CreateNatGateway":{ + "name":"CreateNatGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateNatGatewayRequest"}, + "output":{"shape":"CreateNatGatewayResult"} + }, + "CreateNetworkAcl":{ + "name":"CreateNetworkAcl", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateNetworkAclRequest"}, + "output":{"shape":"CreateNetworkAclResult"} + }, + "CreateNetworkAclEntry":{ + "name":"CreateNetworkAclEntry", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateNetworkAclEntryRequest"} + }, + "CreateNetworkInterface":{ + "name":"CreateNetworkInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateNetworkInterfaceRequest"}, + "output":{"shape":"CreateNetworkInterfaceResult"} + }, + "CreatePlacementGroup":{ + "name":"CreatePlacementGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePlacementGroupRequest"} + }, + "CreateReservedInstancesListing":{ + "name":"CreateReservedInstancesListing", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateReservedInstancesListingRequest"}, + "output":{"shape":"CreateReservedInstancesListingResult"} + }, + "CreateRoute":{ + "name":"CreateRoute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRouteRequest"}, + "output":{"shape":"CreateRouteResult"} + }, + "CreateRouteTable":{ + "name":"CreateRouteTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRouteTableRequest"}, + "output":{"shape":"CreateRouteTableResult"} + }, + "CreateSecurityGroup":{ + "name":"CreateSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSecurityGroupRequest"}, + "output":{"shape":"CreateSecurityGroupResult"} + }, + "CreateSnapshot":{ + "name":"CreateSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSnapshotRequest"}, + "output":{"shape":"Snapshot"} + }, + "CreateSpotDatafeedSubscription":{ + "name":"CreateSpotDatafeedSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSpotDatafeedSubscriptionRequest"}, + "output":{"shape":"CreateSpotDatafeedSubscriptionResult"} + }, + "CreateSubnet":{ + "name":"CreateSubnet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSubnetRequest"}, + "output":{"shape":"CreateSubnetResult"} + }, + "CreateTags":{ + "name":"CreateTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTagsRequest"} + }, + "CreateVolume":{ + "name":"CreateVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVolumeRequest"}, + "output":{"shape":"Volume"} + }, + "CreateVpc":{ + "name":"CreateVpc", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpcRequest"}, + "output":{"shape":"CreateVpcResult"} + }, + "CreateVpcEndpoint":{ + "name":"CreateVpcEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpcEndpointRequest"}, + "output":{"shape":"CreateVpcEndpointResult"} + }, + "CreateVpcPeeringConnection":{ + "name":"CreateVpcPeeringConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpcPeeringConnectionRequest"}, + "output":{"shape":"CreateVpcPeeringConnectionResult"} + }, + "CreateVpnConnection":{ + "name":"CreateVpnConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpnConnectionRequest"}, + "output":{"shape":"CreateVpnConnectionResult"} + }, + "CreateVpnConnectionRoute":{ + "name":"CreateVpnConnectionRoute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpnConnectionRouteRequest"} + }, + "CreateVpnGateway":{ + "name":"CreateVpnGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpnGatewayRequest"}, + "output":{"shape":"CreateVpnGatewayResult"} + }, + "DeleteCustomerGateway":{ + "name":"DeleteCustomerGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCustomerGatewayRequest"} + }, + "DeleteDhcpOptions":{ + "name":"DeleteDhcpOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDhcpOptionsRequest"} + }, + "DeleteFlowLogs":{ + "name":"DeleteFlowLogs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteFlowLogsRequest"}, + "output":{"shape":"DeleteFlowLogsResult"} + }, + "DeleteInternetGateway":{ + "name":"DeleteInternetGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteInternetGatewayRequest"} + }, + "DeleteKeyPair":{ + "name":"DeleteKeyPair", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteKeyPairRequest"} + }, + "DeleteNatGateway":{ + "name":"DeleteNatGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteNatGatewayRequest"}, + "output":{"shape":"DeleteNatGatewayResult"} + }, + "DeleteNetworkAcl":{ + "name":"DeleteNetworkAcl", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteNetworkAclRequest"} + }, + "DeleteNetworkAclEntry":{ + "name":"DeleteNetworkAclEntry", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteNetworkAclEntryRequest"} + }, + "DeleteNetworkInterface":{ + "name":"DeleteNetworkInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteNetworkInterfaceRequest"} + }, + "DeletePlacementGroup":{ + "name":"DeletePlacementGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePlacementGroupRequest"} + }, + "DeleteRoute":{ + "name":"DeleteRoute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRouteRequest"} + }, + "DeleteRouteTable":{ + "name":"DeleteRouteTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRouteTableRequest"} + }, + "DeleteSecurityGroup":{ + "name":"DeleteSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSecurityGroupRequest"} + }, + "DeleteSnapshot":{ + "name":"DeleteSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSnapshotRequest"} + }, + "DeleteSpotDatafeedSubscription":{ + "name":"DeleteSpotDatafeedSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSpotDatafeedSubscriptionRequest"} + }, + "DeleteSubnet":{ + "name":"DeleteSubnet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSubnetRequest"} + }, + "DeleteTags":{ + "name":"DeleteTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTagsRequest"} + }, + "DeleteVolume":{ + "name":"DeleteVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVolumeRequest"} + }, + "DeleteVpc":{ + "name":"DeleteVpc", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpcRequest"} + }, + "DeleteVpcEndpoints":{ + "name":"DeleteVpcEndpoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpcEndpointsRequest"}, + "output":{"shape":"DeleteVpcEndpointsResult"} + }, + "DeleteVpcPeeringConnection":{ + "name":"DeleteVpcPeeringConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpcPeeringConnectionRequest"}, + "output":{"shape":"DeleteVpcPeeringConnectionResult"} + }, + "DeleteVpnConnection":{ + "name":"DeleteVpnConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpnConnectionRequest"} + }, + "DeleteVpnConnectionRoute":{ + "name":"DeleteVpnConnectionRoute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpnConnectionRouteRequest"} + }, + "DeleteVpnGateway":{ + "name":"DeleteVpnGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpnGatewayRequest"} + }, + "DeregisterImage":{ + "name":"DeregisterImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterImageRequest"} + }, + "DescribeAccountAttributes":{ + "name":"DescribeAccountAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAccountAttributesRequest"}, + "output":{"shape":"DescribeAccountAttributesResult"} + }, + "DescribeAddresses":{ + "name":"DescribeAddresses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAddressesRequest"}, + "output":{"shape":"DescribeAddressesResult"} + }, + "DescribeAvailabilityZones":{ + "name":"DescribeAvailabilityZones", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAvailabilityZonesRequest"}, + "output":{"shape":"DescribeAvailabilityZonesResult"} + }, + "DescribeBundleTasks":{ + "name":"DescribeBundleTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeBundleTasksRequest"}, + "output":{"shape":"DescribeBundleTasksResult"} + }, + "DescribeClassicLinkInstances":{ + "name":"DescribeClassicLinkInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeClassicLinkInstancesRequest"}, + "output":{"shape":"DescribeClassicLinkInstancesResult"} + }, + "DescribeConversionTasks":{ + "name":"DescribeConversionTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConversionTasksRequest"}, + "output":{"shape":"DescribeConversionTasksResult"} + }, + "DescribeCustomerGateways":{ + "name":"DescribeCustomerGateways", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCustomerGatewaysRequest"}, + "output":{"shape":"DescribeCustomerGatewaysResult"} + }, + "DescribeDhcpOptions":{ + "name":"DescribeDhcpOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDhcpOptionsRequest"}, + "output":{"shape":"DescribeDhcpOptionsResult"} + }, + "DescribeExportTasks":{ + "name":"DescribeExportTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeExportTasksRequest"}, + "output":{"shape":"DescribeExportTasksResult"} + }, + "DescribeFlowLogs":{ + "name":"DescribeFlowLogs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFlowLogsRequest"}, + "output":{"shape":"DescribeFlowLogsResult"} + }, + "DescribeHosts":{ + "name":"DescribeHosts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeHostsRequest"}, + "output":{"shape":"DescribeHostsResult"} + }, + "DescribeIdFormat":{ + "name":"DescribeIdFormat", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeIdFormatRequest"}, + "output":{"shape":"DescribeIdFormatResult"} + }, + "DescribeImageAttribute":{ + "name":"DescribeImageAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImageAttributeRequest"}, + "output":{"shape":"ImageAttribute"} + }, + "DescribeImages":{ + "name":"DescribeImages", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImagesRequest"}, + "output":{"shape":"DescribeImagesResult"} + }, + "DescribeImportImageTasks":{ + "name":"DescribeImportImageTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImportImageTasksRequest"}, + "output":{"shape":"DescribeImportImageTasksResult"} + }, + "DescribeImportSnapshotTasks":{ + "name":"DescribeImportSnapshotTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImportSnapshotTasksRequest"}, + "output":{"shape":"DescribeImportSnapshotTasksResult"} + }, + "DescribeInstanceAttribute":{ + "name":"DescribeInstanceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstanceAttributeRequest"}, + "output":{"shape":"InstanceAttribute"} + }, + "DescribeInstanceStatus":{ + "name":"DescribeInstanceStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstanceStatusRequest"}, + "output":{"shape":"DescribeInstanceStatusResult"} + }, + "DescribeInstances":{ + "name":"DescribeInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstancesRequest"}, + "output":{"shape":"DescribeInstancesResult"} + }, + "DescribeInternetGateways":{ + "name":"DescribeInternetGateways", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInternetGatewaysRequest"}, + "output":{"shape":"DescribeInternetGatewaysResult"} + }, + "DescribeKeyPairs":{ + "name":"DescribeKeyPairs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeKeyPairsRequest"}, + "output":{"shape":"DescribeKeyPairsResult"} + }, + "DescribeMovingAddresses":{ + "name":"DescribeMovingAddresses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMovingAddressesRequest"}, + "output":{"shape":"DescribeMovingAddressesResult"} + }, + "DescribeNatGateways":{ + "name":"DescribeNatGateways", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeNatGatewaysRequest"}, + "output":{"shape":"DescribeNatGatewaysResult"} + }, + "DescribeNetworkAcls":{ + "name":"DescribeNetworkAcls", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeNetworkAclsRequest"}, + "output":{"shape":"DescribeNetworkAclsResult"} + }, + "DescribeNetworkInterfaceAttribute":{ + "name":"DescribeNetworkInterfaceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeNetworkInterfaceAttributeRequest"}, + "output":{"shape":"DescribeNetworkInterfaceAttributeResult"} + }, + "DescribeNetworkInterfaces":{ + "name":"DescribeNetworkInterfaces", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeNetworkInterfacesRequest"}, + "output":{"shape":"DescribeNetworkInterfacesResult"} + }, + "DescribePlacementGroups":{ + "name":"DescribePlacementGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePlacementGroupsRequest"}, + "output":{"shape":"DescribePlacementGroupsResult"} + }, + "DescribePrefixLists":{ + "name":"DescribePrefixLists", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePrefixListsRequest"}, + "output":{"shape":"DescribePrefixListsResult"} + }, + "DescribeRegions":{ + "name":"DescribeRegions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRegionsRequest"}, + "output":{"shape":"DescribeRegionsResult"} + }, + "DescribeReservedInstances":{ + "name":"DescribeReservedInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedInstancesRequest"}, + "output":{"shape":"DescribeReservedInstancesResult"} + }, + "DescribeReservedInstancesListings":{ + "name":"DescribeReservedInstancesListings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedInstancesListingsRequest"}, + "output":{"shape":"DescribeReservedInstancesListingsResult"} + }, + "DescribeReservedInstancesModifications":{ + "name":"DescribeReservedInstancesModifications", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedInstancesModificationsRequest"}, + "output":{"shape":"DescribeReservedInstancesModificationsResult"} + }, + "DescribeReservedInstancesOfferings":{ + "name":"DescribeReservedInstancesOfferings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedInstancesOfferingsRequest"}, + "output":{"shape":"DescribeReservedInstancesOfferingsResult"} + }, + "DescribeRouteTables":{ + "name":"DescribeRouteTables", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRouteTablesRequest"}, + "output":{"shape":"DescribeRouteTablesResult"} + }, + "DescribeScheduledInstanceAvailability":{ + "name":"DescribeScheduledInstanceAvailability", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeScheduledInstanceAvailabilityRequest"}, + "output":{"shape":"DescribeScheduledInstanceAvailabilityResult"} + }, + "DescribeScheduledInstances":{ + "name":"DescribeScheduledInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeScheduledInstancesRequest"}, + "output":{"shape":"DescribeScheduledInstancesResult"} + }, + "DescribeSecurityGroups":{ + "name":"DescribeSecurityGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSecurityGroupsRequest"}, + "output":{"shape":"DescribeSecurityGroupsResult"} + }, + "DescribeSnapshotAttribute":{ + "name":"DescribeSnapshotAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSnapshotAttributeRequest"}, + "output":{"shape":"DescribeSnapshotAttributeResult"} + }, + "DescribeSnapshots":{ + "name":"DescribeSnapshots", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSnapshotsRequest"}, + "output":{"shape":"DescribeSnapshotsResult"} + }, + "DescribeSpotDatafeedSubscription":{ + "name":"DescribeSpotDatafeedSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotDatafeedSubscriptionRequest"}, + "output":{"shape":"DescribeSpotDatafeedSubscriptionResult"} + }, + "DescribeSpotFleetInstances":{ + "name":"DescribeSpotFleetInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotFleetInstancesRequest"}, + "output":{"shape":"DescribeSpotFleetInstancesResponse"} + }, + "DescribeSpotFleetRequestHistory":{ + "name":"DescribeSpotFleetRequestHistory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotFleetRequestHistoryRequest"}, + "output":{"shape":"DescribeSpotFleetRequestHistoryResponse"} + }, + "DescribeSpotFleetRequests":{ + "name":"DescribeSpotFleetRequests", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotFleetRequestsRequest"}, + "output":{"shape":"DescribeSpotFleetRequestsResponse"} + }, + "DescribeSpotInstanceRequests":{ + "name":"DescribeSpotInstanceRequests", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotInstanceRequestsRequest"}, + "output":{"shape":"DescribeSpotInstanceRequestsResult"} + }, + "DescribeSpotPriceHistory":{ + "name":"DescribeSpotPriceHistory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpotPriceHistoryRequest"}, + "output":{"shape":"DescribeSpotPriceHistoryResult"} + }, + "DescribeSubnets":{ + "name":"DescribeSubnets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSubnetsRequest"}, + "output":{"shape":"DescribeSubnetsResult"} + }, + "DescribeTags":{ + "name":"DescribeTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTagsRequest"}, + "output":{"shape":"DescribeTagsResult"} + }, + "DescribeVolumeAttribute":{ + "name":"DescribeVolumeAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVolumeAttributeRequest"}, + "output":{"shape":"DescribeVolumeAttributeResult"} + }, + "DescribeVolumeStatus":{ + "name":"DescribeVolumeStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVolumeStatusRequest"}, + "output":{"shape":"DescribeVolumeStatusResult"} + }, + "DescribeVolumes":{ + "name":"DescribeVolumes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVolumesRequest"}, + "output":{"shape":"DescribeVolumesResult"} + }, + "DescribeVpcAttribute":{ + "name":"DescribeVpcAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcAttributeRequest"}, + "output":{"shape":"DescribeVpcAttributeResult"} + }, + "DescribeVpcClassicLink":{ + "name":"DescribeVpcClassicLink", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcClassicLinkRequest"}, + "output":{"shape":"DescribeVpcClassicLinkResult"} + }, + "DescribeVpcClassicLinkDnsSupport":{ + "name":"DescribeVpcClassicLinkDnsSupport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcClassicLinkDnsSupportRequest"}, + "output":{"shape":"DescribeVpcClassicLinkDnsSupportResult"} + }, + "DescribeVpcEndpointServices":{ + "name":"DescribeVpcEndpointServices", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcEndpointServicesRequest"}, + "output":{"shape":"DescribeVpcEndpointServicesResult"} + }, + "DescribeVpcEndpoints":{ + "name":"DescribeVpcEndpoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcEndpointsRequest"}, + "output":{"shape":"DescribeVpcEndpointsResult"} + }, + "DescribeVpcPeeringConnections":{ + "name":"DescribeVpcPeeringConnections", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcPeeringConnectionsRequest"}, + "output":{"shape":"DescribeVpcPeeringConnectionsResult"} + }, + "DescribeVpcs":{ + "name":"DescribeVpcs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcsRequest"}, + "output":{"shape":"DescribeVpcsResult"} + }, + "DescribeVpnConnections":{ + "name":"DescribeVpnConnections", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpnConnectionsRequest"}, + "output":{"shape":"DescribeVpnConnectionsResult"} + }, + "DescribeVpnGateways":{ + "name":"DescribeVpnGateways", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpnGatewaysRequest"}, + "output":{"shape":"DescribeVpnGatewaysResult"} + }, + "DetachClassicLinkVpc":{ + "name":"DetachClassicLinkVpc", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachClassicLinkVpcRequest"}, + "output":{"shape":"DetachClassicLinkVpcResult"} + }, + "DetachInternetGateway":{ + "name":"DetachInternetGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachInternetGatewayRequest"} + }, + "DetachNetworkInterface":{ + "name":"DetachNetworkInterface", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachNetworkInterfaceRequest"} + }, + "DetachVolume":{ + "name":"DetachVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachVolumeRequest"}, + "output":{"shape":"VolumeAttachment"} + }, + "DetachVpnGateway":{ + "name":"DetachVpnGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachVpnGatewayRequest"} + }, + "DisableVgwRoutePropagation":{ + "name":"DisableVgwRoutePropagation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableVgwRoutePropagationRequest"} + }, + "DisableVpcClassicLink":{ + "name":"DisableVpcClassicLink", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableVpcClassicLinkRequest"}, + "output":{"shape":"DisableVpcClassicLinkResult"} + }, + "DisableVpcClassicLinkDnsSupport":{ + "name":"DisableVpcClassicLinkDnsSupport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableVpcClassicLinkDnsSupportRequest"}, + "output":{"shape":"DisableVpcClassicLinkDnsSupportResult"} + }, + "DisassociateAddress":{ + "name":"DisassociateAddress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateAddressRequest"} + }, + "DisassociateRouteTable":{ + "name":"DisassociateRouteTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateRouteTableRequest"} + }, + "EnableVgwRoutePropagation":{ + "name":"EnableVgwRoutePropagation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableVgwRoutePropagationRequest"} + }, + "EnableVolumeIO":{ + "name":"EnableVolumeIO", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableVolumeIORequest"} + }, + "EnableVpcClassicLink":{ + "name":"EnableVpcClassicLink", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableVpcClassicLinkRequest"}, + "output":{"shape":"EnableVpcClassicLinkResult"} + }, + "EnableVpcClassicLinkDnsSupport":{ + "name":"EnableVpcClassicLinkDnsSupport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableVpcClassicLinkDnsSupportRequest"}, + "output":{"shape":"EnableVpcClassicLinkDnsSupportResult"} + }, + "GetConsoleOutput":{ + "name":"GetConsoleOutput", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetConsoleOutputRequest"}, + "output":{"shape":"GetConsoleOutputResult"} + }, + "GetPasswordData":{ + "name":"GetPasswordData", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPasswordDataRequest"}, + "output":{"shape":"GetPasswordDataResult"} + }, + "ImportImage":{ + "name":"ImportImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportImageRequest"}, + "output":{"shape":"ImportImageResult"} + }, + "ImportInstance":{ + "name":"ImportInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportInstanceRequest"}, + "output":{"shape":"ImportInstanceResult"} + }, + "ImportKeyPair":{ + "name":"ImportKeyPair", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportKeyPairRequest"}, + "output":{"shape":"ImportKeyPairResult"} + }, + "ImportSnapshot":{ + "name":"ImportSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportSnapshotRequest"}, + "output":{"shape":"ImportSnapshotResult"} + }, + "ImportVolume":{ + "name":"ImportVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportVolumeRequest"}, + "output":{"shape":"ImportVolumeResult"} + }, + "ModifyHosts":{ + "name":"ModifyHosts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyHostsRequest"}, + "output":{"shape":"ModifyHostsResult"} + }, + "ModifyIdFormat":{ + "name":"ModifyIdFormat", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyIdFormatRequest"} + }, + "ModifyImageAttribute":{ + "name":"ModifyImageAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyImageAttributeRequest"} + }, + "ModifyInstanceAttribute":{ + "name":"ModifyInstanceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyInstanceAttributeRequest"} + }, + "ModifyInstancePlacement":{ + "name":"ModifyInstancePlacement", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyInstancePlacementRequest"}, + "output":{"shape":"ModifyInstancePlacementResult"} + }, + "ModifyNetworkInterfaceAttribute":{ + "name":"ModifyNetworkInterfaceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyNetworkInterfaceAttributeRequest"} + }, + "ModifyReservedInstances":{ + "name":"ModifyReservedInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyReservedInstancesRequest"}, + "output":{"shape":"ModifyReservedInstancesResult"} + }, + "ModifySnapshotAttribute":{ + "name":"ModifySnapshotAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifySnapshotAttributeRequest"} + }, + "ModifySpotFleetRequest":{ + "name":"ModifySpotFleetRequest", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifySpotFleetRequestRequest"}, + "output":{"shape":"ModifySpotFleetRequestResponse"} + }, + "ModifySubnetAttribute":{ + "name":"ModifySubnetAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifySubnetAttributeRequest"} + }, + "ModifyVolumeAttribute":{ + "name":"ModifyVolumeAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyVolumeAttributeRequest"} + }, + "ModifyVpcAttribute":{ + "name":"ModifyVpcAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyVpcAttributeRequest"} + }, + "ModifyVpcEndpoint":{ + "name":"ModifyVpcEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyVpcEndpointRequest"}, + "output":{"shape":"ModifyVpcEndpointResult"} + }, + "MonitorInstances":{ + "name":"MonitorInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"MonitorInstancesRequest"}, + "output":{"shape":"MonitorInstancesResult"} + }, + "MoveAddressToVpc":{ + "name":"MoveAddressToVpc", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"MoveAddressToVpcRequest"}, + "output":{"shape":"MoveAddressToVpcResult"} + }, + "PurchaseReservedInstancesOffering":{ + "name":"PurchaseReservedInstancesOffering", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PurchaseReservedInstancesOfferingRequest"}, + "output":{"shape":"PurchaseReservedInstancesOfferingResult"} + }, + "PurchaseScheduledInstances":{ + "name":"PurchaseScheduledInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PurchaseScheduledInstancesRequest"}, + "output":{"shape":"PurchaseScheduledInstancesResult"} + }, + "RebootInstances":{ + "name":"RebootInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootInstancesRequest"} + }, + "RegisterImage":{ + "name":"RegisterImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterImageRequest"}, + "output":{"shape":"RegisterImageResult"} + }, + "RejectVpcPeeringConnection":{ + "name":"RejectVpcPeeringConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RejectVpcPeeringConnectionRequest"}, + "output":{"shape":"RejectVpcPeeringConnectionResult"} + }, + "ReleaseAddress":{ + "name":"ReleaseAddress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReleaseAddressRequest"} + }, + "ReleaseHosts":{ + "name":"ReleaseHosts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReleaseHostsRequest"}, + "output":{"shape":"ReleaseHostsResult"} + }, + "ReplaceNetworkAclAssociation":{ + "name":"ReplaceNetworkAclAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReplaceNetworkAclAssociationRequest"}, + "output":{"shape":"ReplaceNetworkAclAssociationResult"} + }, + "ReplaceNetworkAclEntry":{ + "name":"ReplaceNetworkAclEntry", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReplaceNetworkAclEntryRequest"} + }, + "ReplaceRoute":{ + "name":"ReplaceRoute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReplaceRouteRequest"} + }, + "ReplaceRouteTableAssociation":{ + "name":"ReplaceRouteTableAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReplaceRouteTableAssociationRequest"}, + "output":{"shape":"ReplaceRouteTableAssociationResult"} + }, + "ReportInstanceStatus":{ + "name":"ReportInstanceStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReportInstanceStatusRequest"} + }, + "RequestSpotFleet":{ + "name":"RequestSpotFleet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RequestSpotFleetRequest"}, + "output":{"shape":"RequestSpotFleetResponse"} + }, + "RequestSpotInstances":{ + "name":"RequestSpotInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RequestSpotInstancesRequest"}, + "output":{"shape":"RequestSpotInstancesResult"} + }, + "ResetImageAttribute":{ + "name":"ResetImageAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetImageAttributeRequest"} + }, + "ResetInstanceAttribute":{ + "name":"ResetInstanceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetInstanceAttributeRequest"} + }, + "ResetNetworkInterfaceAttribute":{ + "name":"ResetNetworkInterfaceAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetNetworkInterfaceAttributeRequest"} + }, + "ResetSnapshotAttribute":{ + "name":"ResetSnapshotAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetSnapshotAttributeRequest"} + }, + "RestoreAddressToClassic":{ + "name":"RestoreAddressToClassic", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreAddressToClassicRequest"}, + "output":{"shape":"RestoreAddressToClassicResult"} + }, + "RevokeSecurityGroupEgress":{ + "name":"RevokeSecurityGroupEgress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeSecurityGroupEgressRequest"} + }, + "RevokeSecurityGroupIngress":{ + "name":"RevokeSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeSecurityGroupIngressRequest"} + }, + "RunInstances":{ + "name":"RunInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RunInstancesRequest"}, + "output":{"shape":"Reservation"} + }, + "RunScheduledInstances":{ + "name":"RunScheduledInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RunScheduledInstancesRequest"}, + "output":{"shape":"RunScheduledInstancesResult"} + }, + "StartInstances":{ + "name":"StartInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartInstancesRequest"}, + "output":{"shape":"StartInstancesResult"} + }, + "StopInstances":{ + "name":"StopInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopInstancesRequest"}, + "output":{"shape":"StopInstancesResult"} + }, + "TerminateInstances":{ + "name":"TerminateInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TerminateInstancesRequest"}, + "output":{"shape":"TerminateInstancesResult"} + }, + "UnassignPrivateIpAddresses":{ + "name":"UnassignPrivateIpAddresses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UnassignPrivateIpAddressesRequest"} + }, + "UnmonitorInstances":{ + "name":"UnmonitorInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UnmonitorInstancesRequest"}, + "output":{"shape":"UnmonitorInstancesResult"} + } + }, + "shapes":{ + "AcceptVpcPeeringConnectionRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + } + } + }, + "AcceptVpcPeeringConnectionResult":{ + "type":"structure", + "members":{ + "VpcPeeringConnection":{ + "shape":"VpcPeeringConnection", + "locationName":"vpcPeeringConnection" + } + } + }, + "AccountAttribute":{ + "type":"structure", + "members":{ + "AttributeName":{ + "shape":"String", + "locationName":"attributeName" + }, + "AttributeValues":{ + "shape":"AccountAttributeValueList", + "locationName":"attributeValueSet" + } + } + }, + "AccountAttributeList":{ + "type":"list", + "member":{ + "shape":"AccountAttribute", + "locationName":"item" + } + }, + "AccountAttributeName":{ + "type":"string", + "enum":[ + "supported-platforms", + "default-vpc" + ] + }, + "AccountAttributeNameStringList":{ + "type":"list", + "member":{ + "shape":"AccountAttributeName", + "locationName":"attributeName" + } + }, + "AccountAttributeValue":{ + "type":"structure", + "members":{ + "AttributeValue":{ + "shape":"String", + "locationName":"attributeValue" + } + } + }, + "AccountAttributeValueList":{ + "type":"list", + "member":{ + "shape":"AccountAttributeValue", + "locationName":"item" + } + }, + "ActiveInstance":{ + "type":"structure", + "members":{ + "InstanceType":{ + "shape":"String", + "locationName":"instanceType" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "SpotInstanceRequestId":{ + "shape":"String", + "locationName":"spotInstanceRequestId" + } + } + }, + "ActiveInstanceSet":{ + "type":"list", + "member":{ + "shape":"ActiveInstance", + "locationName":"item" + } + }, + "Address":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + }, + "AllocationId":{ + "shape":"String", + "locationName":"allocationId" + }, + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + }, + "Domain":{ + "shape":"DomainType", + "locationName":"domain" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "NetworkInterfaceOwnerId":{ + "shape":"String", + "locationName":"networkInterfaceOwnerId" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + } + } + }, + "AddressList":{ + "type":"list", + "member":{ + "shape":"Address", + "locationName":"item" + } + }, + "Affinity":{ + "type":"string", + "enum":[ + "default", + "host" + ] + }, + "AllocateAddressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Domain":{"shape":"DomainType"} + } + }, + "AllocateAddressResult":{ + "type":"structure", + "members":{ + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + }, + "Domain":{ + "shape":"DomainType", + "locationName":"domain" + }, + "AllocationId":{ + "shape":"String", + "locationName":"allocationId" + } + } + }, + "AllocateHostsRequest":{ + "type":"structure", + "required":[ + "InstanceType", + "Quantity", + "AvailabilityZone" + ], + "members":{ + "AutoPlacement":{ + "shape":"AutoPlacement", + "locationName":"autoPlacement" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "InstanceType":{ + "shape":"String", + "locationName":"instanceType" + }, + "Quantity":{ + "shape":"Integer", + "locationName":"quantity" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + } + } + }, + "AllocateHostsResult":{ + "type":"structure", + "members":{ + "HostIds":{ + "shape":"ResponseHostIdList", + "locationName":"hostIdSet" + } + } + }, + "AllocationIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"AllocationId" + } + }, + "AllocationState":{ + "type":"string", + "enum":[ + "available", + "under-assessment", + "permanent-failure", + "released", + "released-permanent-failure" + ] + }, + "AllocationStrategy":{ + "type":"string", + "enum":[ + "lowestPrice", + "diversified" + ] + }, + "ArchitectureValues":{ + "type":"string", + "enum":[ + "i386", + "x86_64" + ] + }, + "AssignPrivateIpAddressesRequest":{ + "type":"structure", + "required":["NetworkInterfaceId"], + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "PrivateIpAddresses":{ + "shape":"PrivateIpAddressStringList", + "locationName":"privateIpAddress" + }, + "SecondaryPrivateIpAddressCount":{ + "shape":"Integer", + "locationName":"secondaryPrivateIpAddressCount" + }, + "AllowReassignment":{ + "shape":"Boolean", + "locationName":"allowReassignment" + } + } + }, + "AssociateAddressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{"shape":"String"}, + "PublicIp":{"shape":"String"}, + "AllocationId":{"shape":"String"}, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "AllowReassociation":{ + "shape":"Boolean", + "locationName":"allowReassociation" + } + } + }, + "AssociateAddressResult":{ + "type":"structure", + "members":{ + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + } + } + }, + "AssociateDhcpOptionsRequest":{ + "type":"structure", + "required":[ + "DhcpOptionsId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "DhcpOptionsId":{"shape":"String"}, + "VpcId":{"shape":"String"} + } + }, + "AssociateRouteTableRequest":{ + "type":"structure", + "required":[ + "SubnetId", + "RouteTableId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + } + } + }, + "AssociateRouteTableResult":{ + "type":"structure", + "members":{ + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + } + } + }, + "AttachClassicLinkVpcRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "VpcId", + "Groups" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "Groups":{ + "shape":"GroupIdStringList", + "locationName":"SecurityGroupId" + } + } + }, + "AttachClassicLinkVpcResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "AttachInternetGatewayRequest":{ + "type":"structure", + "required":[ + "InternetGatewayId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InternetGatewayId":{ + "shape":"String", + "locationName":"internetGatewayId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "AttachNetworkInterfaceRequest":{ + "type":"structure", + "required":[ + "NetworkInterfaceId", + "InstanceId", + "DeviceIndex" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "DeviceIndex":{ + "shape":"Integer", + "locationName":"deviceIndex" + } + } + }, + "AttachNetworkInterfaceResult":{ + "type":"structure", + "members":{ + "AttachmentId":{ + "shape":"String", + "locationName":"attachmentId" + } + } + }, + "AttachVolumeRequest":{ + "type":"structure", + "required":[ + "VolumeId", + "InstanceId", + "Device" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"}, + "InstanceId":{"shape":"String"}, + "Device":{"shape":"String"} + } + }, + "AttachVpnGatewayRequest":{ + "type":"structure", + "required":[ + "VpnGatewayId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnGatewayId":{"shape":"String"}, + "VpcId":{"shape":"String"} + } + }, + "AttachVpnGatewayResult":{ + "type":"structure", + "members":{ + "VpcAttachment":{ + "shape":"VpcAttachment", + "locationName":"attachment" + } + } + }, + "AttachmentStatus":{ + "type":"string", + "enum":[ + "attaching", + "attached", + "detaching", + "detached" + ] + }, + "AttributeBooleanValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"Boolean", + "locationName":"value" + } + } + }, + "AttributeValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"String", + "locationName":"value" + } + } + }, + "AuthorizeSecurityGroupEgressRequest":{ + "type":"structure", + "required":["GroupId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupId":{ + "shape":"String", + "locationName":"groupId" + }, + "SourceSecurityGroupName":{ + "shape":"String", + "locationName":"sourceSecurityGroupName" + }, + "SourceSecurityGroupOwnerId":{ + "shape":"String", + "locationName":"sourceSecurityGroupOwnerId" + }, + "IpProtocol":{ + "shape":"String", + "locationName":"ipProtocol" + }, + "FromPort":{ + "shape":"Integer", + "locationName":"fromPort" + }, + "ToPort":{ + "shape":"Integer", + "locationName":"toPort" + }, + "CidrIp":{ + "shape":"String", + "locationName":"cidrIp" + }, + "IpPermissions":{ + "shape":"IpPermissionList", + "locationName":"ipPermissions" + } + } + }, + "AuthorizeSecurityGroupIngressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{"shape":"String"}, + "GroupId":{"shape":"String"}, + "SourceSecurityGroupName":{"shape":"String"}, + "SourceSecurityGroupOwnerId":{"shape":"String"}, + "IpProtocol":{"shape":"String"}, + "FromPort":{"shape":"Integer"}, + "ToPort":{"shape":"Integer"}, + "CidrIp":{"shape":"String"}, + "IpPermissions":{"shape":"IpPermissionList"} + } + }, + "AutoPlacement":{ + "type":"string", + "enum":[ + "on", + "off" + ] + }, + "AvailabilityZone":{ + "type":"structure", + "members":{ + "ZoneName":{ + "shape":"String", + "locationName":"zoneName" + }, + "State":{ + "shape":"AvailabilityZoneState", + "locationName":"zoneState" + }, + "RegionName":{ + "shape":"String", + "locationName":"regionName" + }, + "Messages":{ + "shape":"AvailabilityZoneMessageList", + "locationName":"messageSet" + } + } + }, + "AvailabilityZoneList":{ + "type":"list", + "member":{ + "shape":"AvailabilityZone", + "locationName":"item" + } + }, + "AvailabilityZoneMessage":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "AvailabilityZoneMessageList":{ + "type":"list", + "member":{ + "shape":"AvailabilityZoneMessage", + "locationName":"item" + } + }, + "AvailabilityZoneState":{ + "type":"string", + "enum":[ + "available", + "information", + "impaired", + "unavailable" + ] + }, + "AvailableCapacity":{ + "type":"structure", + "members":{ + "AvailableInstanceCapacity":{ + "shape":"AvailableInstanceCapacityList", + "locationName":"availableInstanceCapacity" + }, + "AvailableVCpus":{ + "shape":"Integer", + "locationName":"availableVCpus" + } + } + }, + "AvailableInstanceCapacityList":{ + "type":"list", + "member":{ + "shape":"InstanceCapacity", + "locationName":"item" + } + }, + "BatchState":{ + "type":"string", + "enum":[ + "submitted", + "active", + "cancelled", + "failed", + "cancelled_running", + "cancelled_terminating", + "modifying" + ] + }, + "Blob":{"type":"blob"}, + "BlobAttributeValue":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"Blob", + "locationName":"value" + } + } + }, + "BlockDeviceMapping":{ + "type":"structure", + "members":{ + "VirtualName":{ + "shape":"String", + "locationName":"virtualName" + }, + "DeviceName":{ + "shape":"String", + "locationName":"deviceName" + }, + "Ebs":{ + "shape":"EbsBlockDevice", + "locationName":"ebs" + }, + "NoDevice":{ + "shape":"String", + "locationName":"noDevice" + } + } + }, + "BlockDeviceMappingList":{ + "type":"list", + "member":{ + "shape":"BlockDeviceMapping", + "locationName":"item" + } + }, + "BlockDeviceMappingRequestList":{ + "type":"list", + "member":{ + "shape":"BlockDeviceMapping", + "locationName":"BlockDeviceMapping" + } + }, + "Boolean":{"type":"boolean"}, + "BundleIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"BundleId" + } + }, + "BundleInstanceRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "Storage" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{"shape":"String"}, + "Storage":{"shape":"Storage"} + } + }, + "BundleInstanceResult":{ + "type":"structure", + "members":{ + "BundleTask":{ + "shape":"BundleTask", + "locationName":"bundleInstanceTask" + } + } + }, + "BundleTask":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "BundleId":{ + "shape":"String", + "locationName":"bundleId" + }, + "State":{ + "shape":"BundleTaskState", + "locationName":"state" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "UpdateTime":{ + "shape":"DateTime", + "locationName":"updateTime" + }, + "Storage":{ + "shape":"Storage", + "locationName":"storage" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "BundleTaskError":{ + "shape":"BundleTaskError", + "locationName":"error" + } + } + }, + "BundleTaskError":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "BundleTaskList":{ + "type":"list", + "member":{ + "shape":"BundleTask", + "locationName":"item" + } + }, + "BundleTaskState":{ + "type":"string", + "enum":[ + "pending", + "waiting-for-shutdown", + "bundling", + "storing", + "cancelling", + "complete", + "failed" + ] + }, + "CancelBatchErrorCode":{ + "type":"string", + "enum":[ + "fleetRequestIdDoesNotExist", + "fleetRequestIdMalformed", + "fleetRequestNotInCancellableState", + "unexpectedError" + ] + }, + "CancelBundleTaskRequest":{ + "type":"structure", + "required":["BundleId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "BundleId":{"shape":"String"} + } + }, + "CancelBundleTaskResult":{ + "type":"structure", + "members":{ + "BundleTask":{ + "shape":"BundleTask", + "locationName":"bundleInstanceTask" + } + } + }, + "CancelConversionRequest":{ + "type":"structure", + "required":["ConversionTaskId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ConversionTaskId":{ + "shape":"String", + "locationName":"conversionTaskId" + }, + "ReasonMessage":{ + "shape":"String", + "locationName":"reasonMessage" + } + } + }, + "CancelExportTaskRequest":{ + "type":"structure", + "required":["ExportTaskId"], + "members":{ + "ExportTaskId":{ + "shape":"String", + "locationName":"exportTaskId" + } + } + }, + "CancelImportTaskRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "ImportTaskId":{"shape":"String"}, + "CancelReason":{"shape":"String"} + } + }, + "CancelImportTaskResult":{ + "type":"structure", + "members":{ + "ImportTaskId":{ + "shape":"String", + "locationName":"importTaskId" + }, + "State":{ + "shape":"String", + "locationName":"state" + }, + "PreviousState":{ + "shape":"String", + "locationName":"previousState" + } + } + }, + "CancelReservedInstancesListingRequest":{ + "type":"structure", + "required":["ReservedInstancesListingId"], + "members":{ + "ReservedInstancesListingId":{ + "shape":"String", + "locationName":"reservedInstancesListingId" + } + } + }, + "CancelReservedInstancesListingResult":{ + "type":"structure", + "members":{ + "ReservedInstancesListings":{ + "shape":"ReservedInstancesListingList", + "locationName":"reservedInstancesListingsSet" + } + } + }, + "CancelSpotFleetRequestsError":{ + "type":"structure", + "required":[ + "Code", + "Message" + ], + "members":{ + "Code":{ + "shape":"CancelBatchErrorCode", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "CancelSpotFleetRequestsErrorItem":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "Error" + ], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "Error":{ + "shape":"CancelSpotFleetRequestsError", + "locationName":"error" + } + } + }, + "CancelSpotFleetRequestsErrorSet":{ + "type":"list", + "member":{ + "shape":"CancelSpotFleetRequestsErrorItem", + "locationName":"item" + } + }, + "CancelSpotFleetRequestsRequest":{ + "type":"structure", + "required":[ + "SpotFleetRequestIds", + "TerminateInstances" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotFleetRequestIds":{ + "shape":"ValueStringList", + "locationName":"spotFleetRequestId" + }, + "TerminateInstances":{ + "shape":"Boolean", + "locationName":"terminateInstances" + } + } + }, + "CancelSpotFleetRequestsResponse":{ + "type":"structure", + "members":{ + "UnsuccessfulFleetRequests":{ + "shape":"CancelSpotFleetRequestsErrorSet", + "locationName":"unsuccessfulFleetRequestSet" + }, + "SuccessfulFleetRequests":{ + "shape":"CancelSpotFleetRequestsSuccessSet", + "locationName":"successfulFleetRequestSet" + } + } + }, + "CancelSpotFleetRequestsSuccessItem":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "CurrentSpotFleetRequestState", + "PreviousSpotFleetRequestState" + ], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "CurrentSpotFleetRequestState":{ + "shape":"BatchState", + "locationName":"currentSpotFleetRequestState" + }, + "PreviousSpotFleetRequestState":{ + "shape":"BatchState", + "locationName":"previousSpotFleetRequestState" + } + } + }, + "CancelSpotFleetRequestsSuccessSet":{ + "type":"list", + "member":{ + "shape":"CancelSpotFleetRequestsSuccessItem", + "locationName":"item" + } + }, + "CancelSpotInstanceRequestState":{ + "type":"string", + "enum":[ + "active", + "open", + "closed", + "cancelled", + "completed" + ] + }, + "CancelSpotInstanceRequestsRequest":{ + "type":"structure", + "required":["SpotInstanceRequestIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotInstanceRequestIds":{ + "shape":"SpotInstanceRequestIdList", + "locationName":"SpotInstanceRequestId" + } + } + }, + "CancelSpotInstanceRequestsResult":{ + "type":"structure", + "members":{ + "CancelledSpotInstanceRequests":{ + "shape":"CancelledSpotInstanceRequestList", + "locationName":"spotInstanceRequestSet" + } + } + }, + "CancelledSpotInstanceRequest":{ + "type":"structure", + "members":{ + "SpotInstanceRequestId":{ + "shape":"String", + "locationName":"spotInstanceRequestId" + }, + "State":{ + "shape":"CancelSpotInstanceRequestState", + "locationName":"state" + } + } + }, + "CancelledSpotInstanceRequestList":{ + "type":"list", + "member":{ + "shape":"CancelledSpotInstanceRequest", + "locationName":"item" + } + }, + "ClassicLinkDnsSupport":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "ClassicLinkDnsSupported":{ + "shape":"Boolean", + "locationName":"classicLinkDnsSupported" + } + } + }, + "ClassicLinkDnsSupportList":{ + "type":"list", + "member":{ + "shape":"ClassicLinkDnsSupport", + "locationName":"item" + } + }, + "ClassicLinkInstance":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "ClassicLinkInstanceList":{ + "type":"list", + "member":{ + "shape":"ClassicLinkInstance", + "locationName":"item" + } + }, + "ClientData":{ + "type":"structure", + "members":{ + "UploadStart":{"shape":"DateTime"}, + "UploadEnd":{"shape":"DateTime"}, + "UploadSize":{"shape":"Double"}, + "Comment":{"shape":"String"} + } + }, + "ConfirmProductInstanceRequest":{ + "type":"structure", + "required":[ + "ProductCode", + "InstanceId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ProductCode":{"shape":"String"}, + "InstanceId":{"shape":"String"} + } + }, + "ConfirmProductInstanceResult":{ + "type":"structure", + "members":{ + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "ContainerFormat":{ + "type":"string", + "enum":["ova"] + }, + "ConversionIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "ConversionTask":{ + "type":"structure", + "required":[ + "ConversionTaskId", + "State" + ], + "members":{ + "ConversionTaskId":{ + "shape":"String", + "locationName":"conversionTaskId" + }, + "ExpirationTime":{ + "shape":"String", + "locationName":"expirationTime" + }, + "ImportInstance":{ + "shape":"ImportInstanceTaskDetails", + "locationName":"importInstance" + }, + "ImportVolume":{ + "shape":"ImportVolumeTaskDetails", + "locationName":"importVolume" + }, + "State":{ + "shape":"ConversionTaskState", + "locationName":"state" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "ConversionTaskState":{ + "type":"string", + "enum":[ + "active", + "cancelling", + "cancelled", + "completed" + ] + }, + "CopyImageRequest":{ + "type":"structure", + "required":[ + "SourceRegion", + "SourceImageId", + "Name" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SourceRegion":{"shape":"String"}, + "SourceImageId":{"shape":"String"}, + "Name":{"shape":"String"}, + "Description":{"shape":"String"}, + "ClientToken":{"shape":"String"}, + "Encrypted":{ + "shape":"Boolean", + "locationName":"encrypted" + }, + "KmsKeyId":{ + "shape":"String", + "locationName":"kmsKeyId" + } + } + }, + "CopyImageResult":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + } + } + }, + "CopySnapshotRequest":{ + "type":"structure", + "required":[ + "SourceRegion", + "SourceSnapshotId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SourceRegion":{"shape":"String"}, + "SourceSnapshotId":{"shape":"String"}, + "Description":{"shape":"String"}, + "DestinationRegion":{ + "shape":"String", + "locationName":"destinationRegion" + }, + "PresignedUrl":{ + "shape":"String", + "locationName":"presignedUrl" + }, + "Encrypted":{ + "shape":"Boolean", + "locationName":"encrypted" + }, + "KmsKeyId":{ + "shape":"String", + "locationName":"kmsKeyId" + } + } + }, + "CopySnapshotResult":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + } + } + }, + "CreateCustomerGatewayRequest":{ + "type":"structure", + "required":[ + "Type", + "PublicIp", + "BgpAsn" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Type":{"shape":"GatewayType"}, + "PublicIp":{ + "shape":"String", + "locationName":"IpAddress" + }, + "BgpAsn":{"shape":"Integer"} + } + }, + "CreateCustomerGatewayResult":{ + "type":"structure", + "members":{ + "CustomerGateway":{ + "shape":"CustomerGateway", + "locationName":"customerGateway" + } + } + }, + "CreateDhcpOptionsRequest":{ + "type":"structure", + "required":["DhcpConfigurations"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "DhcpConfigurations":{ + "shape":"NewDhcpConfigurationList", + "locationName":"dhcpConfiguration" + } + } + }, + "CreateDhcpOptionsResult":{ + "type":"structure", + "members":{ + "DhcpOptions":{ + "shape":"DhcpOptions", + "locationName":"dhcpOptions" + } + } + }, + "CreateFlowLogsRequest":{ + "type":"structure", + "required":[ + "ResourceIds", + "ResourceType", + "TrafficType", + "LogGroupName", + "DeliverLogsPermissionArn" + ], + "members":{ + "ResourceIds":{ + "shape":"ValueStringList", + "locationName":"ResourceId" + }, + "ResourceType":{"shape":"FlowLogsResourceType"}, + "TrafficType":{"shape":"TrafficType"}, + "LogGroupName":{"shape":"String"}, + "DeliverLogsPermissionArn":{"shape":"String"}, + "ClientToken":{"shape":"String"} + } + }, + "CreateFlowLogsResult":{ + "type":"structure", + "members":{ + "FlowLogIds":{ + "shape":"ValueStringList", + "locationName":"flowLogIdSet" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "Unsuccessful":{ + "shape":"UnsuccessfulItemSet", + "locationName":"unsuccessful" + } + } + }, + "CreateImageRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "Name" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Name":{ + "shape":"String", + "locationName":"name" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "NoReboot":{ + "shape":"Boolean", + "locationName":"noReboot" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingRequestList", + "locationName":"blockDeviceMapping" + } + } + }, + "CreateImageResult":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + } + } + }, + "CreateInstanceExportTaskRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "Description":{ + "shape":"String", + "locationName":"description" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "TargetEnvironment":{ + "shape":"ExportEnvironment", + "locationName":"targetEnvironment" + }, + "ExportToS3Task":{ + "shape":"ExportToS3TaskSpecification", + "locationName":"exportToS3" + } + } + }, + "CreateInstanceExportTaskResult":{ + "type":"structure", + "members":{ + "ExportTask":{ + "shape":"ExportTask", + "locationName":"exportTask" + } + } + }, + "CreateInternetGatewayRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + } + } + }, + "CreateInternetGatewayResult":{ + "type":"structure", + "members":{ + "InternetGateway":{ + "shape":"InternetGateway", + "locationName":"internetGateway" + } + } + }, + "CreateKeyPairRequest":{ + "type":"structure", + "required":["KeyName"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "KeyName":{"shape":"String"} + } + }, + "CreateNatGatewayRequest":{ + "type":"structure", + "required":[ + "SubnetId", + "AllocationId" + ], + "members":{ + "SubnetId":{"shape":"String"}, + "AllocationId":{"shape":"String"}, + "ClientToken":{"shape":"String"} + } + }, + "CreateNatGatewayResult":{ + "type":"structure", + "members":{ + "NatGateway":{ + "shape":"NatGateway", + "locationName":"natGateway" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + } + } + }, + "CreateNetworkAclEntryRequest":{ + "type":"structure", + "required":[ + "NetworkAclId", + "RuleNumber", + "Protocol", + "RuleAction", + "Egress", + "CidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + }, + "RuleNumber":{ + "shape":"Integer", + "locationName":"ruleNumber" + }, + "Protocol":{ + "shape":"String", + "locationName":"protocol" + }, + "RuleAction":{ + "shape":"RuleAction", + "locationName":"ruleAction" + }, + "Egress":{ + "shape":"Boolean", + "locationName":"egress" + }, + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "IcmpTypeCode":{ + "shape":"IcmpTypeCode", + "locationName":"Icmp" + }, + "PortRange":{ + "shape":"PortRange", + "locationName":"portRange" + } + } + }, + "CreateNetworkAclRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "CreateNetworkAclResult":{ + "type":"structure", + "members":{ + "NetworkAcl":{ + "shape":"NetworkAcl", + "locationName":"networkAcl" + } + } + }, + "CreateNetworkInterfaceRequest":{ + "type":"structure", + "required":["SubnetId"], + "members":{ + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "Groups":{ + "shape":"SecurityGroupIdStringList", + "locationName":"SecurityGroupId" + }, + "PrivateIpAddresses":{ + "shape":"PrivateIpAddressSpecificationList", + "locationName":"privateIpAddresses" + }, + "SecondaryPrivateIpAddressCount":{ + "shape":"Integer", + "locationName":"secondaryPrivateIpAddressCount" + }, + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + } + } + }, + "CreateNetworkInterfaceResult":{ + "type":"structure", + "members":{ + "NetworkInterface":{ + "shape":"NetworkInterface", + "locationName":"networkInterface" + } + } + }, + "CreatePlacementGroupRequest":{ + "type":"structure", + "required":[ + "GroupName", + "Strategy" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "Strategy":{ + "shape":"PlacementStrategy", + "locationName":"strategy" + } + } + }, + "CreateReservedInstancesListingRequest":{ + "type":"structure", + "required":[ + "ReservedInstancesId", + "InstanceCount", + "PriceSchedules", + "ClientToken" + ], + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + }, + "InstanceCount":{ + "shape":"Integer", + "locationName":"instanceCount" + }, + "PriceSchedules":{ + "shape":"PriceScheduleSpecificationList", + "locationName":"priceSchedules" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + } + } + }, + "CreateReservedInstancesListingResult":{ + "type":"structure", + "members":{ + "ReservedInstancesListings":{ + "shape":"ReservedInstancesListingList", + "locationName":"reservedInstancesListingsSet" + } + } + }, + "CreateRouteRequest":{ + "type":"structure", + "required":[ + "RouteTableId", + "DestinationCidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + }, + "DestinationCidrBlock":{ + "shape":"String", + "locationName":"destinationCidrBlock" + }, + "GatewayId":{ + "shape":"String", + "locationName":"gatewayId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + }, + "NatGatewayId":{ + "shape":"String", + "locationName":"natGatewayId" + } + } + }, + "CreateRouteResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "CreateRouteTableRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "CreateRouteTableResult":{ + "type":"structure", + "members":{ + "RouteTable":{ + "shape":"RouteTable", + "locationName":"routeTable" + } + } + }, + "CreateSecurityGroupRequest":{ + "type":"structure", + "required":[ + "GroupName", + "Description" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{"shape":"String"}, + "Description":{ + "shape":"String", + "locationName":"GroupDescription" + }, + "VpcId":{"shape":"String"} + } + }, + "CreateSecurityGroupResult":{ + "type":"structure", + "members":{ + "GroupId":{ + "shape":"String", + "locationName":"groupId" + } + } + }, + "CreateSnapshotRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"}, + "Description":{"shape":"String"} + } + }, + "CreateSpotDatafeedSubscriptionRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Bucket":{ + "shape":"String", + "locationName":"bucket" + }, + "Prefix":{ + "shape":"String", + "locationName":"prefix" + } + } + }, + "CreateSpotDatafeedSubscriptionResult":{ + "type":"structure", + "members":{ + "SpotDatafeedSubscription":{ + "shape":"SpotDatafeedSubscription", + "locationName":"spotDatafeedSubscription" + } + } + }, + "CreateSubnetRequest":{ + "type":"structure", + "required":[ + "VpcId", + "CidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{"shape":"String"}, + "CidrBlock":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"} + } + }, + "CreateSubnetResult":{ + "type":"structure", + "members":{ + "Subnet":{ + "shape":"Subnet", + "locationName":"subnet" + } + } + }, + "CreateTagsRequest":{ + "type":"structure", + "required":[ + "Resources", + "Tags" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Resources":{ + "shape":"ResourceIdList", + "locationName":"ResourceId" + }, + "Tags":{ + "shape":"TagList", + "locationName":"Tag" + } + } + }, + "CreateVolumePermission":{ + "type":"structure", + "members":{ + "UserId":{ + "shape":"String", + "locationName":"userId" + }, + "Group":{ + "shape":"PermissionGroup", + "locationName":"group" + } + } + }, + "CreateVolumePermissionList":{ + "type":"list", + "member":{ + "shape":"CreateVolumePermission", + "locationName":"item" + } + }, + "CreateVolumePermissionModifications":{ + "type":"structure", + "members":{ + "Add":{"shape":"CreateVolumePermissionList"}, + "Remove":{"shape":"CreateVolumePermissionList"} + } + }, + "CreateVolumeRequest":{ + "type":"structure", + "required":["AvailabilityZone"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Size":{"shape":"Integer"}, + "SnapshotId":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"}, + "VolumeType":{"shape":"VolumeType"}, + "Iops":{"shape":"Integer"}, + "Encrypted":{ + "shape":"Boolean", + "locationName":"encrypted" + }, + "KmsKeyId":{"shape":"String"} + } + }, + "CreateVpcEndpointRequest":{ + "type":"structure", + "required":[ + "VpcId", + "ServiceName" + ], + "members":{ + "DryRun":{"shape":"Boolean"}, + "VpcId":{"shape":"String"}, + "ServiceName":{"shape":"String"}, + "PolicyDocument":{"shape":"String"}, + "RouteTableIds":{ + "shape":"ValueStringList", + "locationName":"RouteTableId" + }, + "ClientToken":{"shape":"String"} + } + }, + "CreateVpcEndpointResult":{ + "type":"structure", + "members":{ + "VpcEndpoint":{ + "shape":"VpcEndpoint", + "locationName":"vpcEndpoint" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + } + } + }, + "CreateVpcPeeringConnectionRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "PeerVpcId":{ + "shape":"String", + "locationName":"peerVpcId" + }, + "PeerOwnerId":{ + "shape":"String", + "locationName":"peerOwnerId" + } + } + }, + "CreateVpcPeeringConnectionResult":{ + "type":"structure", + "members":{ + "VpcPeeringConnection":{ + "shape":"VpcPeeringConnection", + "locationName":"vpcPeeringConnection" + } + } + }, + "CreateVpcRequest":{ + "type":"structure", + "required":["CidrBlock"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "CidrBlock":{"shape":"String"}, + "InstanceTenancy":{ + "shape":"Tenancy", + "locationName":"instanceTenancy" + } + } + }, + "CreateVpcResult":{ + "type":"structure", + "members":{ + "Vpc":{ + "shape":"Vpc", + "locationName":"vpc" + } + } + }, + "CreateVpnConnectionRequest":{ + "type":"structure", + "required":[ + "Type", + "CustomerGatewayId", + "VpnGatewayId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Type":{"shape":"String"}, + "CustomerGatewayId":{"shape":"String"}, + "VpnGatewayId":{"shape":"String"}, + "Options":{ + "shape":"VpnConnectionOptionsSpecification", + "locationName":"options" + } + } + }, + "CreateVpnConnectionResult":{ + "type":"structure", + "members":{ + "VpnConnection":{ + "shape":"VpnConnection", + "locationName":"vpnConnection" + } + } + }, + "CreateVpnConnectionRouteRequest":{ + "type":"structure", + "required":[ + "VpnConnectionId", + "DestinationCidrBlock" + ], + "members":{ + "VpnConnectionId":{"shape":"String"}, + "DestinationCidrBlock":{"shape":"String"} + } + }, + "CreateVpnGatewayRequest":{ + "type":"structure", + "required":["Type"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Type":{"shape":"GatewayType"}, + "AvailabilityZone":{"shape":"String"} + } + }, + "CreateVpnGatewayResult":{ + "type":"structure", + "members":{ + "VpnGateway":{ + "shape":"VpnGateway", + "locationName":"vpnGateway" + } + } + }, + "CurrencyCodeValues":{ + "type":"string", + "enum":["USD"] + }, + "CustomerGateway":{ + "type":"structure", + "members":{ + "CustomerGatewayId":{ + "shape":"String", + "locationName":"customerGatewayId" + }, + "State":{ + "shape":"String", + "locationName":"state" + }, + "Type":{ + "shape":"String", + "locationName":"type" + }, + "IpAddress":{ + "shape":"String", + "locationName":"ipAddress" + }, + "BgpAsn":{ + "shape":"String", + "locationName":"bgpAsn" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "CustomerGatewayIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"CustomerGatewayId" + } + }, + "CustomerGatewayList":{ + "type":"list", + "member":{ + "shape":"CustomerGateway", + "locationName":"item" + } + }, + "DatafeedSubscriptionState":{ + "type":"string", + "enum":[ + "Active", + "Inactive" + ] + }, + "DateTime":{"type":"timestamp"}, + "DeleteCustomerGatewayRequest":{ + "type":"structure", + "required":["CustomerGatewayId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "CustomerGatewayId":{"shape":"String"} + } + }, + "DeleteDhcpOptionsRequest":{ + "type":"structure", + "required":["DhcpOptionsId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "DhcpOptionsId":{"shape":"String"} + } + }, + "DeleteFlowLogsRequest":{ + "type":"structure", + "required":["FlowLogIds"], + "members":{ + "FlowLogIds":{ + "shape":"ValueStringList", + "locationName":"FlowLogId" + } + } + }, + "DeleteFlowLogsResult":{ + "type":"structure", + "members":{ + "Unsuccessful":{ + "shape":"UnsuccessfulItemSet", + "locationName":"unsuccessful" + } + } + }, + "DeleteInternetGatewayRequest":{ + "type":"structure", + "required":["InternetGatewayId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InternetGatewayId":{ + "shape":"String", + "locationName":"internetGatewayId" + } + } + }, + "DeleteKeyPairRequest":{ + "type":"structure", + "required":["KeyName"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "KeyName":{"shape":"String"} + } + }, + "DeleteNatGatewayRequest":{ + "type":"structure", + "required":["NatGatewayId"], + "members":{ + "NatGatewayId":{"shape":"String"} + } + }, + "DeleteNatGatewayResult":{ + "type":"structure", + "members":{ + "NatGatewayId":{ + "shape":"String", + "locationName":"natGatewayId" + } + } + }, + "DeleteNetworkAclEntryRequest":{ + "type":"structure", + "required":[ + "NetworkAclId", + "RuleNumber", + "Egress" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + }, + "RuleNumber":{ + "shape":"Integer", + "locationName":"ruleNumber" + }, + "Egress":{ + "shape":"Boolean", + "locationName":"egress" + } + } + }, + "DeleteNetworkAclRequest":{ + "type":"structure", + "required":["NetworkAclId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + } + } + }, + "DeleteNetworkInterfaceRequest":{ + "type":"structure", + "required":["NetworkInterfaceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + } + } + }, + "DeletePlacementGroupRequest":{ + "type":"structure", + "required":["GroupName"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + } + } + }, + "DeleteRouteRequest":{ + "type":"structure", + "required":[ + "RouteTableId", + "DestinationCidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + }, + "DestinationCidrBlock":{ + "shape":"String", + "locationName":"destinationCidrBlock" + } + } + }, + "DeleteRouteTableRequest":{ + "type":"structure", + "required":["RouteTableId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + } + } + }, + "DeleteSecurityGroupRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{"shape":"String"}, + "GroupId":{"shape":"String"} + } + }, + "DeleteSnapshotRequest":{ + "type":"structure", + "required":["SnapshotId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SnapshotId":{"shape":"String"} + } + }, + "DeleteSpotDatafeedSubscriptionRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + } + } + }, + "DeleteSubnetRequest":{ + "type":"structure", + "required":["SubnetId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SubnetId":{"shape":"String"} + } + }, + "DeleteTagsRequest":{ + "type":"structure", + "required":["Resources"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Resources":{ + "shape":"ResourceIdList", + "locationName":"resourceId" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tag" + } + } + }, + "DeleteVolumeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"} + } + }, + "DeleteVpcEndpointsRequest":{ + "type":"structure", + "required":["VpcEndpointIds"], + "members":{ + "DryRun":{"shape":"Boolean"}, + "VpcEndpointIds":{ + "shape":"ValueStringList", + "locationName":"VpcEndpointId" + } + } + }, + "DeleteVpcEndpointsResult":{ + "type":"structure", + "members":{ + "Unsuccessful":{ + "shape":"UnsuccessfulItemSet", + "locationName":"unsuccessful" + } + } + }, + "DeleteVpcPeeringConnectionRequest":{ + "type":"structure", + "required":["VpcPeeringConnectionId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + } + } + }, + "DeleteVpcPeeringConnectionResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "DeleteVpcRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{"shape":"String"} + } + }, + "DeleteVpnConnectionRequest":{ + "type":"structure", + "required":["VpnConnectionId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnConnectionId":{"shape":"String"} + } + }, + "DeleteVpnConnectionRouteRequest":{ + "type":"structure", + "required":[ + "VpnConnectionId", + "DestinationCidrBlock" + ], + "members":{ + "VpnConnectionId":{"shape":"String"}, + "DestinationCidrBlock":{"shape":"String"} + } + }, + "DeleteVpnGatewayRequest":{ + "type":"structure", + "required":["VpnGatewayId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnGatewayId":{"shape":"String"} + } + }, + "DeregisterImageRequest":{ + "type":"structure", + "required":["ImageId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageId":{"shape":"String"} + } + }, + "DescribeAccountAttributesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AttributeNames":{ + "shape":"AccountAttributeNameStringList", + "locationName":"attributeName" + } + } + }, + "DescribeAccountAttributesResult":{ + "type":"structure", + "members":{ + "AccountAttributes":{ + "shape":"AccountAttributeList", + "locationName":"accountAttributeSet" + } + } + }, + "DescribeAddressesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIps":{ + "shape":"PublicIpStringList", + "locationName":"PublicIp" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "AllocationIds":{ + "shape":"AllocationIdList", + "locationName":"AllocationId" + } + } + }, + "DescribeAddressesResult":{ + "type":"structure", + "members":{ + "Addresses":{ + "shape":"AddressList", + "locationName":"addressesSet" + } + } + }, + "DescribeAvailabilityZonesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ZoneNames":{ + "shape":"ZoneNameStringList", + "locationName":"ZoneName" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeAvailabilityZonesResult":{ + "type":"structure", + "members":{ + "AvailabilityZones":{ + "shape":"AvailabilityZoneList", + "locationName":"availabilityZoneInfo" + } + } + }, + "DescribeBundleTasksRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "BundleIds":{ + "shape":"BundleIdStringList", + "locationName":"BundleId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeBundleTasksResult":{ + "type":"structure", + "members":{ + "BundleTasks":{ + "shape":"BundleTaskList", + "locationName":"bundleInstanceTasksSet" + } + } + }, + "DescribeClassicLinkInstancesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeClassicLinkInstancesResult":{ + "type":"structure", + "members":{ + "Instances":{ + "shape":"ClassicLinkInstanceList", + "locationName":"instancesSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeConversionTaskList":{ + "type":"list", + "member":{ + "shape":"ConversionTask", + "locationName":"item" + } + }, + "DescribeConversionTasksRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"filter" + }, + "ConversionTaskIds":{ + "shape":"ConversionIdStringList", + "locationName":"conversionTaskId" + } + } + }, + "DescribeConversionTasksResult":{ + "type":"structure", + "members":{ + "ConversionTasks":{ + "shape":"DescribeConversionTaskList", + "locationName":"conversionTasks" + } + } + }, + "DescribeCustomerGatewaysRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "CustomerGatewayIds":{ + "shape":"CustomerGatewayIdStringList", + "locationName":"CustomerGatewayId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeCustomerGatewaysResult":{ + "type":"structure", + "members":{ + "CustomerGateways":{ + "shape":"CustomerGatewayList", + "locationName":"customerGatewaySet" + } + } + }, + "DescribeDhcpOptionsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "DhcpOptionsIds":{ + "shape":"DhcpOptionsIdStringList", + "locationName":"DhcpOptionsId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeDhcpOptionsResult":{ + "type":"structure", + "members":{ + "DhcpOptions":{ + "shape":"DhcpOptionsList", + "locationName":"dhcpOptionsSet" + } + } + }, + "DescribeExportTasksRequest":{ + "type":"structure", + "members":{ + "ExportTaskIds":{ + "shape":"ExportTaskIdStringList", + "locationName":"exportTaskId" + } + } + }, + "DescribeExportTasksResult":{ + "type":"structure", + "members":{ + "ExportTasks":{ + "shape":"ExportTaskList", + "locationName":"exportTaskSet" + } + } + }, + "DescribeFlowLogsRequest":{ + "type":"structure", + "members":{ + "FlowLogIds":{ + "shape":"ValueStringList", + "locationName":"FlowLogId" + }, + "Filter":{"shape":"FilterList"}, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"} + } + }, + "DescribeFlowLogsResult":{ + "type":"structure", + "members":{ + "FlowLogs":{ + "shape":"FlowLogSet", + "locationName":"flowLogSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeHostsRequest":{ + "type":"structure", + "members":{ + "HostIds":{ + "shape":"RequestHostIdList", + "locationName":"hostId" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + }, + "Filter":{ + "shape":"FilterList", + "locationName":"filter" + } + } + }, + "DescribeHostsResult":{ + "type":"structure", + "members":{ + "Hosts":{ + "shape":"HostList", + "locationName":"hostSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeIdFormatRequest":{ + "type":"structure", + "members":{ + "Resource":{"shape":"String"} + } + }, + "DescribeIdFormatResult":{ + "type":"structure", + "members":{ + "Statuses":{ + "shape":"IdFormatList", + "locationName":"statusSet" + } + } + }, + "DescribeImageAttributeRequest":{ + "type":"structure", + "required":[ + "ImageId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageId":{"shape":"String"}, + "Attribute":{"shape":"ImageAttributeName"} + } + }, + "DescribeImagesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageIds":{ + "shape":"ImageIdStringList", + "locationName":"ImageId" + }, + "Owners":{ + "shape":"OwnerStringList", + "locationName":"Owner" + }, + "ExecutableUsers":{ + "shape":"ExecutableByStringList", + "locationName":"ExecutableBy" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeImagesResult":{ + "type":"structure", + "members":{ + "Images":{ + "shape":"ImageList", + "locationName":"imagesSet" + } + } + }, + "DescribeImportImageTasksRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "ImportTaskIds":{ + "shape":"ImportTaskIdList", + "locationName":"ImportTaskId" + }, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"}, + "Filters":{"shape":"FilterList"} + } + }, + "DescribeImportImageTasksResult":{ + "type":"structure", + "members":{ + "ImportImageTasks":{ + "shape":"ImportImageTaskList", + "locationName":"importImageTaskSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeImportSnapshotTasksRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "ImportTaskIds":{ + "shape":"ImportTaskIdList", + "locationName":"ImportTaskId" + }, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"}, + "Filters":{"shape":"FilterList"} + } + }, + "DescribeImportSnapshotTasksResult":{ + "type":"structure", + "members":{ + "ImportSnapshotTasks":{ + "shape":"ImportSnapshotTaskList", + "locationName":"importSnapshotTaskSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeInstanceAttributeRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Attribute":{ + "shape":"InstanceAttributeName", + "locationName":"attribute" + } + } + }, + "DescribeInstanceStatusRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"}, + "IncludeAllInstances":{ + "shape":"Boolean", + "locationName":"includeAllInstances" + } + } + }, + "DescribeInstanceStatusResult":{ + "type":"structure", + "members":{ + "InstanceStatuses":{ + "shape":"InstanceStatusList", + "locationName":"instanceStatusSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeInstancesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeInstancesResult":{ + "type":"structure", + "members":{ + "Reservations":{ + "shape":"ReservationList", + "locationName":"reservationSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeInternetGatewaysRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InternetGatewayIds":{ + "shape":"ValueStringList", + "locationName":"internetGatewayId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeInternetGatewaysResult":{ + "type":"structure", + "members":{ + "InternetGateways":{ + "shape":"InternetGatewayList", + "locationName":"internetGatewaySet" + } + } + }, + "DescribeKeyPairsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "KeyNames":{ + "shape":"KeyNameStringList", + "locationName":"KeyName" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeKeyPairsResult":{ + "type":"structure", + "members":{ + "KeyPairs":{ + "shape":"KeyPairList", + "locationName":"keySet" + } + } + }, + "DescribeMovingAddressesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIps":{ + "shape":"ValueStringList", + "locationName":"publicIp" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"filter" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeMovingAddressesResult":{ + "type":"structure", + "members":{ + "MovingAddressStatuses":{ + "shape":"MovingAddressStatusSet", + "locationName":"movingAddressStatusSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeNatGatewaysRequest":{ + "type":"structure", + "members":{ + "NatGatewayIds":{ + "shape":"ValueStringList", + "locationName":"NatGatewayId" + }, + "Filter":{"shape":"FilterList"}, + "MaxResults":{"shape":"Integer"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeNatGatewaysResult":{ + "type":"structure", + "members":{ + "NatGateways":{ + "shape":"NatGatewayList", + "locationName":"natGatewaySet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeNetworkAclsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkAclIds":{ + "shape":"ValueStringList", + "locationName":"NetworkAclId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeNetworkAclsResult":{ + "type":"structure", + "members":{ + "NetworkAcls":{ + "shape":"NetworkAclList", + "locationName":"networkAclSet" + } + } + }, + "DescribeNetworkInterfaceAttributeRequest":{ + "type":"structure", + "required":["NetworkInterfaceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "Attribute":{ + "shape":"NetworkInterfaceAttribute", + "locationName":"attribute" + } + } + }, + "DescribeNetworkInterfaceAttributeResult":{ + "type":"structure", + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "Description":{ + "shape":"AttributeValue", + "locationName":"description" + }, + "SourceDestCheck":{ + "shape":"AttributeBooleanValue", + "locationName":"sourceDestCheck" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "Attachment":{ + "shape":"NetworkInterfaceAttachment", + "locationName":"attachment" + } + } + }, + "DescribeNetworkInterfacesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceIds":{ + "shape":"NetworkInterfaceIdList", + "locationName":"NetworkInterfaceId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"filter" + } + } + }, + "DescribeNetworkInterfacesResult":{ + "type":"structure", + "members":{ + "NetworkInterfaces":{ + "shape":"NetworkInterfaceList", + "locationName":"networkInterfaceSet" + } + } + }, + "DescribePlacementGroupsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupNames":{ + "shape":"PlacementGroupStringList", + "locationName":"groupName" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribePlacementGroupsResult":{ + "type":"structure", + "members":{ + "PlacementGroups":{ + "shape":"PlacementGroupList", + "locationName":"placementGroupSet" + } + } + }, + "DescribePrefixListsRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "PrefixListIds":{ + "shape":"ValueStringList", + "locationName":"PrefixListId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "MaxResults":{"shape":"Integer"}, + "NextToken":{"shape":"String"} + } + }, + "DescribePrefixListsResult":{ + "type":"structure", + "members":{ + "PrefixLists":{ + "shape":"PrefixListSet", + "locationName":"prefixListSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeRegionsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RegionNames":{ + "shape":"RegionNameStringList", + "locationName":"RegionName" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeRegionsResult":{ + "type":"structure", + "members":{ + "Regions":{ + "shape":"RegionList", + "locationName":"regionInfo" + } + } + }, + "DescribeReservedInstancesListingsRequest":{ + "type":"structure", + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + }, + "ReservedInstancesListingId":{ + "shape":"String", + "locationName":"reservedInstancesListingId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"filters" + } + } + }, + "DescribeReservedInstancesListingsResult":{ + "type":"structure", + "members":{ + "ReservedInstancesListings":{ + "shape":"ReservedInstancesListingList", + "locationName":"reservedInstancesListingsSet" + } + } + }, + "DescribeReservedInstancesModificationsRequest":{ + "type":"structure", + "members":{ + "ReservedInstancesModificationIds":{ + "shape":"ReservedInstancesModificationIdStringList", + "locationName":"ReservedInstancesModificationId" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeReservedInstancesModificationsResult":{ + "type":"structure", + "members":{ + "ReservedInstancesModifications":{ + "shape":"ReservedInstancesModificationList", + "locationName":"reservedInstancesModificationsSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeReservedInstancesOfferingsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ReservedInstancesOfferingIds":{ + "shape":"ReservedInstancesOfferingIdStringList", + "locationName":"ReservedInstancesOfferingId" + }, + "InstanceType":{"shape":"InstanceType"}, + "AvailabilityZone":{"shape":"String"}, + "ProductDescription":{"shape":"RIProductDescription"}, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "InstanceTenancy":{ + "shape":"Tenancy", + "locationName":"instanceTenancy" + }, + "OfferingType":{ + "shape":"OfferingTypeValues", + "locationName":"offeringType" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + }, + "IncludeMarketplace":{"shape":"Boolean"}, + "MinDuration":{"shape":"Long"}, + "MaxDuration":{"shape":"Long"}, + "MaxInstanceCount":{"shape":"Integer"} + } + }, + "DescribeReservedInstancesOfferingsResult":{ + "type":"structure", + "members":{ + "ReservedInstancesOfferings":{ + "shape":"ReservedInstancesOfferingList", + "locationName":"reservedInstancesOfferingsSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeReservedInstancesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ReservedInstancesIds":{ + "shape":"ReservedInstancesIdStringList", + "locationName":"ReservedInstancesId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "OfferingType":{ + "shape":"OfferingTypeValues", + "locationName":"offeringType" + } + } + }, + "DescribeReservedInstancesResult":{ + "type":"structure", + "members":{ + "ReservedInstances":{ + "shape":"ReservedInstancesList", + "locationName":"reservedInstancesSet" + } + } + }, + "DescribeRouteTablesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RouteTableIds":{ + "shape":"ValueStringList", + "locationName":"RouteTableId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeRouteTablesResult":{ + "type":"structure", + "members":{ + "RouteTables":{ + "shape":"RouteTableList", + "locationName":"routeTableSet" + } + } + }, + "DescribeScheduledInstanceAvailabilityRequest":{ + "type":"structure", + "required":[ + "Recurrence", + "FirstSlotStartTimeRange" + ], + "members":{ + "DryRun":{"shape":"Boolean"}, + "Recurrence":{"shape":"ScheduledInstanceRecurrenceRequest"}, + "FirstSlotStartTimeRange":{"shape":"SlotDateTimeRangeRequest"}, + "MinSlotDurationInHours":{"shape":"Integer"}, + "MaxSlotDurationInHours":{"shape":"Integer"}, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"}, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeScheduledInstanceAvailabilityResult":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "ScheduledInstanceAvailabilitySet":{ + "shape":"ScheduledInstanceAvailabilitySet", + "locationName":"scheduledInstanceAvailabilitySet" + } + } + }, + "DescribeScheduledInstancesRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "ScheduledInstanceIds":{ + "shape":"ScheduledInstanceIdRequestSet", + "locationName":"ScheduledInstanceId" + }, + "SlotStartTimeRange":{"shape":"SlotStartTimeRangeRequest"}, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"}, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeScheduledInstancesResult":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "ScheduledInstanceSet":{ + "shape":"ScheduledInstanceSet", + "locationName":"scheduledInstanceSet" + } + } + }, + "DescribeSecurityGroupsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupNames":{ + "shape":"GroupNameStringList", + "locationName":"GroupName" + }, + "GroupIds":{ + "shape":"GroupIdStringList", + "locationName":"GroupId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeSecurityGroupsResult":{ + "type":"structure", + "members":{ + "SecurityGroups":{ + "shape":"SecurityGroupList", + "locationName":"securityGroupInfo" + } + } + }, + "DescribeSnapshotAttributeRequest":{ + "type":"structure", + "required":[ + "SnapshotId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SnapshotId":{"shape":"String"}, + "Attribute":{"shape":"SnapshotAttributeName"} + } + }, + "DescribeSnapshotAttributeResult":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "CreateVolumePermissions":{ + "shape":"CreateVolumePermissionList", + "locationName":"createVolumePermission" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + } + } + }, + "DescribeSnapshotsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SnapshotIds":{ + "shape":"SnapshotIdStringList", + "locationName":"SnapshotId" + }, + "OwnerIds":{ + "shape":"OwnerStringList", + "locationName":"Owner" + }, + "RestorableByUserIds":{ + "shape":"RestorableByStringList", + "locationName":"RestorableBy" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"} + } + }, + "DescribeSnapshotsResult":{ + "type":"structure", + "members":{ + "Snapshots":{ + "shape":"SnapshotList", + "locationName":"snapshotSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSpotDatafeedSubscriptionRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + } + } + }, + "DescribeSpotDatafeedSubscriptionResult":{ + "type":"structure", + "members":{ + "SpotDatafeedSubscription":{ + "shape":"SpotDatafeedSubscription", + "locationName":"spotDatafeedSubscription" + } + } + }, + "DescribeSpotFleetInstancesRequest":{ + "type":"structure", + "required":["SpotFleetRequestId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeSpotFleetInstancesResponse":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "ActiveInstances" + ], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "ActiveInstances":{ + "shape":"ActiveInstanceSet", + "locationName":"activeInstanceSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSpotFleetRequestHistoryRequest":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "StartTime" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "EventType":{ + "shape":"EventType", + "locationName":"eventType" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeSpotFleetRequestHistoryResponse":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "StartTime", + "LastEvaluatedTime", + "HistoryRecords" + ], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "LastEvaluatedTime":{ + "shape":"DateTime", + "locationName":"lastEvaluatedTime" + }, + "HistoryRecords":{ + "shape":"HistoryRecords", + "locationName":"historyRecordSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSpotFleetRequestsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotFleetRequestIds":{ + "shape":"ValueStringList", + "locationName":"spotFleetRequestId" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeSpotFleetRequestsResponse":{ + "type":"structure", + "required":["SpotFleetRequestConfigs"], + "members":{ + "SpotFleetRequestConfigs":{ + "shape":"SpotFleetRequestConfigSet", + "locationName":"spotFleetRequestConfigSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSpotInstanceRequestsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotInstanceRequestIds":{ + "shape":"SpotInstanceRequestIdList", + "locationName":"SpotInstanceRequestId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeSpotInstanceRequestsResult":{ + "type":"structure", + "members":{ + "SpotInstanceRequests":{ + "shape":"SpotInstanceRequestList", + "locationName":"spotInstanceRequestSet" + } + } + }, + "DescribeSpotPriceHistoryRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "EndTime":{ + "shape":"DateTime", + "locationName":"endTime" + }, + "InstanceTypes":{ + "shape":"InstanceTypeList", + "locationName":"InstanceType" + }, + "ProductDescriptions":{ + "shape":"ProductDescriptionList", + "locationName":"ProductDescription" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSpotPriceHistoryResult":{ + "type":"structure", + "members":{ + "SpotPriceHistory":{ + "shape":"SpotPriceHistoryList", + "locationName":"spotPriceHistorySet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeSubnetsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SubnetIds":{ + "shape":"SubnetIdStringList", + "locationName":"SubnetId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeSubnetsResult":{ + "type":"structure", + "members":{ + "Subnets":{ + "shape":"SubnetList", + "locationName":"subnetSet" + } + } + }, + "DescribeTagsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeTagsResult":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagDescriptionList", + "locationName":"tagSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeVolumeAttributeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"}, + "Attribute":{"shape":"VolumeAttributeName"} + } + }, + "DescribeVolumeAttributeResult":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "AutoEnableIO":{ + "shape":"AttributeBooleanValue", + "locationName":"autoEnableIO" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + } + } + }, + "DescribeVolumeStatusRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeIds":{ + "shape":"VolumeIdStringList", + "locationName":"VolumeId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"} + } + }, + "DescribeVolumeStatusResult":{ + "type":"structure", + "members":{ + "VolumeStatuses":{ + "shape":"VolumeStatusList", + "locationName":"volumeStatusSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeVolumesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeIds":{ + "shape":"VolumeIdStringList", + "locationName":"VolumeId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"Integer", + "locationName":"maxResults" + } + } + }, + "DescribeVolumesResult":{ + "type":"structure", + "members":{ + "Volumes":{ + "shape":"VolumeList", + "locationName":"volumeSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeVpcAttributeRequest":{ + "type":"structure", + "required":[ + "VpcId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{"shape":"String"}, + "Attribute":{"shape":"VpcAttributeName"} + } + }, + "DescribeVpcAttributeResult":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "EnableDnsSupport":{ + "shape":"AttributeBooleanValue", + "locationName":"enableDnsSupport" + }, + "EnableDnsHostnames":{ + "shape":"AttributeBooleanValue", + "locationName":"enableDnsHostnames" + } + } + }, + "DescribeVpcClassicLinkDnsSupportRequest":{ + "type":"structure", + "members":{ + "VpcIds":{"shape":"VpcClassicLinkIdList"}, + "MaxResults":{ + "shape":"MaxResults", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"NextToken", + "locationName":"nextToken" + } + } + }, + "DescribeVpcClassicLinkDnsSupportResult":{ + "type":"structure", + "members":{ + "Vpcs":{ + "shape":"ClassicLinkDnsSupportList", + "locationName":"vpcs" + }, + "NextToken":{ + "shape":"NextToken", + "locationName":"nextToken" + } + } + }, + "DescribeVpcClassicLinkRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcIds":{ + "shape":"VpcClassicLinkIdList", + "locationName":"VpcId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeVpcClassicLinkResult":{ + "type":"structure", + "members":{ + "Vpcs":{ + "shape":"VpcClassicLinkList", + "locationName":"vpcSet" + } + } + }, + "DescribeVpcEndpointServicesRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "MaxResults":{"shape":"Integer"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeVpcEndpointServicesResult":{ + "type":"structure", + "members":{ + "ServiceNames":{ + "shape":"ValueStringList", + "locationName":"serviceNameSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeVpcEndpointsRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "VpcEndpointIds":{ + "shape":"ValueStringList", + "locationName":"VpcEndpointId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "MaxResults":{"shape":"Integer"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeVpcEndpointsResult":{ + "type":"structure", + "members":{ + "VpcEndpoints":{ + "shape":"VpcEndpointSet", + "locationName":"vpcEndpointSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, + "DescribeVpcPeeringConnectionsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcPeeringConnectionIds":{ + "shape":"ValueStringList", + "locationName":"VpcPeeringConnectionId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeVpcPeeringConnectionsResult":{ + "type":"structure", + "members":{ + "VpcPeeringConnections":{ + "shape":"VpcPeeringConnectionList", + "locationName":"vpcPeeringConnectionSet" + } + } + }, + "DescribeVpcsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcIds":{ + "shape":"VpcIdStringList", + "locationName":"VpcId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeVpcsResult":{ + "type":"structure", + "members":{ + "Vpcs":{ + "shape":"VpcList", + "locationName":"vpcSet" + } + } + }, + "DescribeVpnConnectionsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnConnectionIds":{ + "shape":"VpnConnectionIdStringList", + "locationName":"VpnConnectionId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeVpnConnectionsResult":{ + "type":"structure", + "members":{ + "VpnConnections":{ + "shape":"VpnConnectionList", + "locationName":"vpnConnectionSet" + } + } + }, + "DescribeVpnGatewaysRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnGatewayIds":{ + "shape":"VpnGatewayIdStringList", + "locationName":"VpnGatewayId" + }, + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + } + } + }, + "DescribeVpnGatewaysResult":{ + "type":"structure", + "members":{ + "VpnGateways":{ + "shape":"VpnGatewayList", + "locationName":"vpnGatewaySet" + } + } + }, + "DetachClassicLinkVpcRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "DetachClassicLinkVpcResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "DetachInternetGatewayRequest":{ + "type":"structure", + "required":[ + "InternetGatewayId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InternetGatewayId":{ + "shape":"String", + "locationName":"internetGatewayId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "DetachNetworkInterfaceRequest":{ + "type":"structure", + "required":["AttachmentId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AttachmentId":{ + "shape":"String", + "locationName":"attachmentId" + }, + "Force":{ + "shape":"Boolean", + "locationName":"force" + } + } + }, + "DetachVolumeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"}, + "InstanceId":{"shape":"String"}, + "Device":{"shape":"String"}, + "Force":{"shape":"Boolean"} + } + }, + "DetachVpnGatewayRequest":{ + "type":"structure", + "required":[ + "VpnGatewayId", + "VpcId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpnGatewayId":{"shape":"String"}, + "VpcId":{"shape":"String"} + } + }, + "DeviceType":{ + "type":"string", + "enum":[ + "ebs", + "instance-store" + ] + }, + "DhcpConfiguration":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"String", + "locationName":"key" + }, + "Values":{ + "shape":"DhcpConfigurationValueList", + "locationName":"valueSet" + } + } + }, + "DhcpConfigurationList":{ + "type":"list", + "member":{ + "shape":"DhcpConfiguration", + "locationName":"item" + } + }, + "DhcpConfigurationValueList":{ + "type":"list", + "member":{ + "shape":"AttributeValue", + "locationName":"item" + } + }, + "DhcpOptions":{ + "type":"structure", + "members":{ + "DhcpOptionsId":{ + "shape":"String", + "locationName":"dhcpOptionsId" + }, + "DhcpConfigurations":{ + "shape":"DhcpConfigurationList", + "locationName":"dhcpConfigurationSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "DhcpOptionsIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"DhcpOptionsId" + } + }, + "DhcpOptionsList":{ + "type":"list", + "member":{ + "shape":"DhcpOptions", + "locationName":"item" + } + }, + "DisableVgwRoutePropagationRequest":{ + "type":"structure", + "required":[ + "RouteTableId", + "GatewayId" + ], + "members":{ + "RouteTableId":{"shape":"String"}, + "GatewayId":{"shape":"String"} + } + }, + "DisableVpcClassicLinkDnsSupportRequest":{ + "type":"structure", + "members":{ + "VpcId":{"shape":"String"} + } + }, + "DisableVpcClassicLinkDnsSupportResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "DisableVpcClassicLinkRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "DisableVpcClassicLinkResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "DisassociateAddressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIp":{"shape":"String"}, + "AssociationId":{"shape":"String"} + } + }, + "DisassociateRouteTableRequest":{ + "type":"structure", + "required":["AssociationId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + } + } + }, + "DiskImage":{ + "type":"structure", + "members":{ + "Image":{"shape":"DiskImageDetail"}, + "Description":{"shape":"String"}, + "Volume":{"shape":"VolumeDetail"} + } + }, + "DiskImageDescription":{ + "type":"structure", + "required":[ + "Format", + "Size", + "ImportManifestUrl" + ], + "members":{ + "Format":{ + "shape":"DiskImageFormat", + "locationName":"format" + }, + "Size":{ + "shape":"Long", + "locationName":"size" + }, + "ImportManifestUrl":{ + "shape":"String", + "locationName":"importManifestUrl" + }, + "Checksum":{ + "shape":"String", + "locationName":"checksum" + } + } + }, + "DiskImageDetail":{ + "type":"structure", + "required":[ + "Format", + "Bytes", + "ImportManifestUrl" + ], + "members":{ + "Format":{ + "shape":"DiskImageFormat", + "locationName":"format" + }, + "Bytes":{ + "shape":"Long", + "locationName":"bytes" + }, + "ImportManifestUrl":{ + "shape":"String", + "locationName":"importManifestUrl" + } + } + }, + "DiskImageFormat":{ + "type":"string", + "enum":[ + "VMDK", + "RAW", + "VHD" + ] + }, + "DiskImageList":{ + "type":"list", + "member":{"shape":"DiskImage"} + }, + "DiskImageVolumeDescription":{ + "type":"structure", + "required":["Id"], + "members":{ + "Size":{ + "shape":"Long", + "locationName":"size" + }, + "Id":{ + "shape":"String", + "locationName":"id" + } + } + }, + "DomainType":{ + "type":"string", + "enum":[ + "vpc", + "standard" + ] + }, + "Double":{"type":"double"}, + "EbsBlockDevice":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "VolumeSize":{ + "shape":"Integer", + "locationName":"volumeSize" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + }, + "VolumeType":{ + "shape":"VolumeType", + "locationName":"volumeType" + }, + "Iops":{ + "shape":"Integer", + "locationName":"iops" + }, + "Encrypted":{ + "shape":"Boolean", + "locationName":"encrypted" + } + } + }, + "EbsInstanceBlockDevice":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "Status":{ + "shape":"AttachmentStatus", + "locationName":"status" + }, + "AttachTime":{ + "shape":"DateTime", + "locationName":"attachTime" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "EbsInstanceBlockDeviceSpecification":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "EnableVgwRoutePropagationRequest":{ + "type":"structure", + "required":[ + "RouteTableId", + "GatewayId" + ], + "members":{ + "RouteTableId":{"shape":"String"}, + "GatewayId":{"shape":"String"} + } + }, + "EnableVolumeIORequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + } + } + }, + "EnableVpcClassicLinkDnsSupportRequest":{ + "type":"structure", + "members":{ + "VpcId":{"shape":"String"} + } + }, + "EnableVpcClassicLinkDnsSupportResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "EnableVpcClassicLinkRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "EnableVpcClassicLinkResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "EventCode":{ + "type":"string", + "enum":[ + "instance-reboot", + "system-reboot", + "system-maintenance", + "instance-retirement", + "instance-stop" + ] + }, + "EventInformation":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "EventSubType":{ + "shape":"String", + "locationName":"eventSubType" + }, + "EventDescription":{ + "shape":"String", + "locationName":"eventDescription" + } + } + }, + "EventType":{ + "type":"string", + "enum":[ + "instanceChange", + "fleetRequestChange", + "error" + ] + }, + "ExcessCapacityTerminationPolicy":{ + "type":"string", + "enum":[ + "noTermination", + "default" + ] + }, + "ExecutableByStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ExecutableBy" + } + }, + "ExportEnvironment":{ + "type":"string", + "enum":[ + "citrix", + "vmware", + "microsoft" + ] + }, + "ExportTask":{ + "type":"structure", + "members":{ + "ExportTaskId":{ + "shape":"String", + "locationName":"exportTaskId" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "State":{ + "shape":"ExportTaskState", + "locationName":"state" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "InstanceExportDetails":{ + "shape":"InstanceExportDetails", + "locationName":"instanceExport" + }, + "ExportToS3Task":{ + "shape":"ExportToS3Task", + "locationName":"exportToS3" + } + } + }, + "ExportTaskIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ExportTaskId" + } + }, + "ExportTaskList":{ + "type":"list", + "member":{ + "shape":"ExportTask", + "locationName":"item" + } + }, + "ExportTaskState":{ + "type":"string", + "enum":[ + "active", + "cancelling", + "cancelled", + "completed" + ] + }, + "ExportToS3Task":{ + "type":"structure", + "members":{ + "DiskImageFormat":{ + "shape":"DiskImageFormat", + "locationName":"diskImageFormat" + }, + "ContainerFormat":{ + "shape":"ContainerFormat", + "locationName":"containerFormat" + }, + "S3Bucket":{ + "shape":"String", + "locationName":"s3Bucket" + }, + "S3Key":{ + "shape":"String", + "locationName":"s3Key" + } + } + }, + "ExportToS3TaskSpecification":{ + "type":"structure", + "members":{ + "DiskImageFormat":{ + "shape":"DiskImageFormat", + "locationName":"diskImageFormat" + }, + "ContainerFormat":{ + "shape":"ContainerFormat", + "locationName":"containerFormat" + }, + "S3Bucket":{ + "shape":"String", + "locationName":"s3Bucket" + }, + "S3Prefix":{ + "shape":"String", + "locationName":"s3Prefix" + } + } + }, + "Filter":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Values":{ + "shape":"ValueStringList", + "locationName":"Value" + } + } + }, + "FilterList":{ + "type":"list", + "member":{ + "shape":"Filter", + "locationName":"Filter" + } + }, + "Float":{"type":"float"}, + "FlowLog":{ + "type":"structure", + "members":{ + "CreationTime":{ + "shape":"DateTime", + "locationName":"creationTime" + }, + "FlowLogId":{ + "shape":"String", + "locationName":"flowLogId" + }, + "FlowLogStatus":{ + "shape":"String", + "locationName":"flowLogStatus" + }, + "ResourceId":{ + "shape":"String", + "locationName":"resourceId" + }, + "TrafficType":{ + "shape":"TrafficType", + "locationName":"trafficType" + }, + "LogGroupName":{ + "shape":"String", + "locationName":"logGroupName" + }, + "DeliverLogsStatus":{ + "shape":"String", + "locationName":"deliverLogsStatus" + }, + "DeliverLogsErrorMessage":{ + "shape":"String", + "locationName":"deliverLogsErrorMessage" + }, + "DeliverLogsPermissionArn":{ + "shape":"String", + "locationName":"deliverLogsPermissionArn" + } + } + }, + "FlowLogSet":{ + "type":"list", + "member":{ + "shape":"FlowLog", + "locationName":"item" + } + }, + "FlowLogsResourceType":{ + "type":"string", + "enum":[ + "VPC", + "Subnet", + "NetworkInterface" + ] + }, + "GatewayType":{ + "type":"string", + "enum":["ipsec.1"] + }, + "GetConsoleOutputRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{"shape":"String"} + } + }, + "GetConsoleOutputResult":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Timestamp":{ + "shape":"DateTime", + "locationName":"timestamp" + }, + "Output":{ + "shape":"String", + "locationName":"output" + } + } + }, + "GetPasswordDataRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{"shape":"String"} + } + }, + "GetPasswordDataResult":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Timestamp":{ + "shape":"DateTime", + "locationName":"timestamp" + }, + "PasswordData":{ + "shape":"String", + "locationName":"passwordData" + } + } + }, + "GroupIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"groupId" + } + }, + "GroupIdentifier":{ + "type":"structure", + "members":{ + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "GroupId":{ + "shape":"String", + "locationName":"groupId" + } + } + }, + "GroupIdentifierList":{ + "type":"list", + "member":{ + "shape":"GroupIdentifier", + "locationName":"item" + } + }, + "GroupNameStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"GroupName" + } + }, + "HistoryRecord":{ + "type":"structure", + "required":[ + "Timestamp", + "EventType", + "EventInformation" + ], + "members":{ + "Timestamp":{ + "shape":"DateTime", + "locationName":"timestamp" + }, + "EventType":{ + "shape":"EventType", + "locationName":"eventType" + }, + "EventInformation":{ + "shape":"EventInformation", + "locationName":"eventInformation" + } + } + }, + "HistoryRecords":{ + "type":"list", + "member":{ + "shape":"HistoryRecord", + "locationName":"item" + } + }, + "Host":{ + "type":"structure", + "members":{ + "HostId":{ + "shape":"String", + "locationName":"hostId" + }, + "AutoPlacement":{ + "shape":"AutoPlacement", + "locationName":"autoPlacement" + }, + "HostReservationId":{ + "shape":"String", + "locationName":"hostReservationId" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "HostProperties":{ + "shape":"HostProperties", + "locationName":"hostProperties" + }, + "State":{ + "shape":"AllocationState", + "locationName":"state" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Instances":{ + "shape":"HostInstanceList", + "locationName":"instances" + }, + "AvailableCapacity":{ + "shape":"AvailableCapacity", + "locationName":"availableCapacity" + } + } + }, + "HostInstance":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "InstanceType":{ + "shape":"String", + "locationName":"instanceType" + } + } + }, + "HostInstanceList":{ + "type":"list", + "member":{ + "shape":"HostInstance", + "locationName":"item" + } + }, + "HostList":{ + "type":"list", + "member":{ + "shape":"Host", + "locationName":"item" + } + }, + "HostProperties":{ + "type":"structure", + "members":{ + "Sockets":{ + "shape":"Integer", + "locationName":"sockets" + }, + "Cores":{ + "shape":"Integer", + "locationName":"cores" + }, + "TotalVCpus":{ + "shape":"Integer", + "locationName":"totalVCpus" + }, + "InstanceType":{ + "shape":"String", + "locationName":"instanceType" + } + } + }, + "HostTenancy":{ + "type":"string", + "enum":[ + "dedicated", + "host" + ] + }, + "HypervisorType":{ + "type":"string", + "enum":[ + "ovm", + "xen" + ] + }, + "IamInstanceProfile":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"String", + "locationName":"arn" + }, + "Id":{ + "shape":"String", + "locationName":"id" + } + } + }, + "IamInstanceProfileSpecification":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"String", + "locationName":"arn" + }, + "Name":{ + "shape":"String", + "locationName":"name" + } + } + }, + "IcmpTypeCode":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"Integer", + "locationName":"type" + }, + "Code":{ + "shape":"Integer", + "locationName":"code" + } + } + }, + "IdFormat":{ + "type":"structure", + "members":{ + "Resource":{ + "shape":"String", + "locationName":"resource" + }, + "UseLongIds":{ + "shape":"Boolean", + "locationName":"useLongIds" + }, + "Deadline":{ + "shape":"DateTime", + "locationName":"deadline" + } + } + }, + "IdFormatList":{ + "type":"list", + "member":{ + "shape":"IdFormat", + "locationName":"item" + } + }, + "Image":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "ImageLocation":{ + "shape":"String", + "locationName":"imageLocation" + }, + "State":{ + "shape":"ImageState", + "locationName":"imageState" + }, + "OwnerId":{ + "shape":"String", + "locationName":"imageOwnerId" + }, + "CreationDate":{ + "shape":"String", + "locationName":"creationDate" + }, + "Public":{ + "shape":"Boolean", + "locationName":"isPublic" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + }, + "Architecture":{ + "shape":"ArchitectureValues", + "locationName":"architecture" + }, + "ImageType":{ + "shape":"ImageTypeValues", + "locationName":"imageType" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "Platform":{ + "shape":"PlatformValues", + "locationName":"platform" + }, + "SriovNetSupport":{ + "shape":"String", + "locationName":"sriovNetSupport" + }, + "StateReason":{ + "shape":"StateReason", + "locationName":"stateReason" + }, + "ImageOwnerAlias":{ + "shape":"String", + "locationName":"imageOwnerAlias" + }, + "Name":{ + "shape":"String", + "locationName":"name" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "RootDeviceType":{ + "shape":"DeviceType", + "locationName":"rootDeviceType" + }, + "RootDeviceName":{ + "shape":"String", + "locationName":"rootDeviceName" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "VirtualizationType":{ + "shape":"VirtualizationType", + "locationName":"virtualizationType" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "Hypervisor":{ + "shape":"HypervisorType", + "locationName":"hypervisor" + } + } + }, + "ImageAttribute":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "LaunchPermissions":{ + "shape":"LaunchPermissionList", + "locationName":"launchPermission" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + }, + "KernelId":{ + "shape":"AttributeValue", + "locationName":"kernel" + }, + "RamdiskId":{ + "shape":"AttributeValue", + "locationName":"ramdisk" + }, + "Description":{ + "shape":"AttributeValue", + "locationName":"description" + }, + "SriovNetSupport":{ + "shape":"AttributeValue", + "locationName":"sriovNetSupport" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingList", + "locationName":"blockDeviceMapping" + } + } + }, + "ImageAttributeName":{ + "type":"string", + "enum":[ + "description", + "kernel", + "ramdisk", + "launchPermission", + "productCodes", + "blockDeviceMapping", + "sriovNetSupport" + ] + }, + "ImageDiskContainer":{ + "type":"structure", + "members":{ + "Description":{"shape":"String"}, + "Format":{"shape":"String"}, + "Url":{"shape":"String"}, + "UserBucket":{"shape":"UserBucket"}, + "DeviceName":{"shape":"String"}, + "SnapshotId":{"shape":"String"} + } + }, + "ImageDiskContainerList":{ + "type":"list", + "member":{ + "shape":"ImageDiskContainer", + "locationName":"item" + } + }, + "ImageIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ImageId" + } + }, + "ImageList":{ + "type":"list", + "member":{ + "shape":"Image", + "locationName":"item" + } + }, + "ImageState":{ + "type":"string", + "enum":[ + "pending", + "available", + "invalid", + "deregistered", + "transient", + "failed", + "error" + ] + }, + "ImageTypeValues":{ + "type":"string", + "enum":[ + "machine", + "kernel", + "ramdisk" + ] + }, + "ImportImageRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "Description":{"shape":"String"}, + "DiskContainers":{ + "shape":"ImageDiskContainerList", + "locationName":"DiskContainer" + }, + "LicenseType":{"shape":"String"}, + "Hypervisor":{"shape":"String"}, + "Architecture":{"shape":"String"}, + "Platform":{"shape":"String"}, + "ClientData":{"shape":"ClientData"}, + "ClientToken":{"shape":"String"}, + "RoleName":{"shape":"String"} + } + }, + "ImportImageResult":{ + "type":"structure", + "members":{ + "ImportTaskId":{ + "shape":"String", + "locationName":"importTaskId" + }, + "Architecture":{ + "shape":"String", + "locationName":"architecture" + }, + "LicenseType":{ + "shape":"String", + "locationName":"licenseType" + }, + "Platform":{ + "shape":"String", + "locationName":"platform" + }, + "Hypervisor":{ + "shape":"String", + "locationName":"hypervisor" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "SnapshotDetails":{ + "shape":"SnapshotDetailList", + "locationName":"snapshotDetailSet" + }, + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Status":{ + "shape":"String", + "locationName":"status" + } + } + }, + "ImportImageTask":{ + "type":"structure", + "members":{ + "ImportTaskId":{ + "shape":"String", + "locationName":"importTaskId" + }, + "Architecture":{ + "shape":"String", + "locationName":"architecture" + }, + "LicenseType":{ + "shape":"String", + "locationName":"licenseType" + }, + "Platform":{ + "shape":"String", + "locationName":"platform" + }, + "Hypervisor":{ + "shape":"String", + "locationName":"hypervisor" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "SnapshotDetails":{ + "shape":"SnapshotDetailList", + "locationName":"snapshotDetailSet" + }, + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Status":{ + "shape":"String", + "locationName":"status" + } + } + }, + "ImportImageTaskList":{ + "type":"list", + "member":{ + "shape":"ImportImageTask", + "locationName":"item" + } + }, + "ImportInstanceLaunchSpecification":{ + "type":"structure", + "members":{ + "Architecture":{ + "shape":"ArchitectureValues", + "locationName":"architecture" + }, + "GroupNames":{ + "shape":"SecurityGroupStringList", + "locationName":"GroupName" + }, + "GroupIds":{ + "shape":"SecurityGroupIdStringList", + "locationName":"GroupId" + }, + "AdditionalInfo":{ + "shape":"String", + "locationName":"additionalInfo" + }, + "UserData":{ + "shape":"UserData", + "locationName":"userData" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "Placement":{ + "shape":"Placement", + "locationName":"placement" + }, + "Monitoring":{ + "shape":"Boolean", + "locationName":"monitoring" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "InstanceInitiatedShutdownBehavior":{ + "shape":"ShutdownBehavior", + "locationName":"instanceInitiatedShutdownBehavior" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + } + } + }, + "ImportInstanceRequest":{ + "type":"structure", + "required":["Platform"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "LaunchSpecification":{ + "shape":"ImportInstanceLaunchSpecification", + "locationName":"launchSpecification" + }, + "DiskImages":{ + "shape":"DiskImageList", + "locationName":"diskImage" + }, + "Platform":{ + "shape":"PlatformValues", + "locationName":"platform" + } + } + }, + "ImportInstanceResult":{ + "type":"structure", + "members":{ + "ConversionTask":{ + "shape":"ConversionTask", + "locationName":"conversionTask" + } + } + }, + "ImportInstanceTaskDetails":{ + "type":"structure", + "required":["Volumes"], + "members":{ + "Volumes":{ + "shape":"ImportInstanceVolumeDetailSet", + "locationName":"volumes" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Platform":{ + "shape":"PlatformValues", + "locationName":"platform" + }, + "Description":{ + "shape":"String", + "locationName":"description" + } + } + }, + "ImportInstanceVolumeDetailItem":{ + "type":"structure", + "required":[ + "BytesConverted", + "AvailabilityZone", + "Image", + "Volume", + "Status" + ], + "members":{ + "BytesConverted":{ + "shape":"Long", + "locationName":"bytesConverted" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Image":{ + "shape":"DiskImageDescription", + "locationName":"image" + }, + "Volume":{ + "shape":"DiskImageVolumeDescription", + "locationName":"volume" + }, + "Status":{ + "shape":"String", + "locationName":"status" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Description":{ + "shape":"String", + "locationName":"description" + } + } + }, + "ImportInstanceVolumeDetailSet":{ + "type":"list", + "member":{ + "shape":"ImportInstanceVolumeDetailItem", + "locationName":"item" + } + }, + "ImportKeyPairRequest":{ + "type":"structure", + "required":[ + "KeyName", + "PublicKeyMaterial" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "PublicKeyMaterial":{ + "shape":"Blob", + "locationName":"publicKeyMaterial" + } + } + }, + "ImportKeyPairResult":{ + "type":"structure", + "members":{ + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "KeyFingerprint":{ + "shape":"String", + "locationName":"keyFingerprint" + } + } + }, + "ImportSnapshotRequest":{ + "type":"structure", + "members":{ + "DryRun":{"shape":"Boolean"}, + "Description":{"shape":"String"}, + "DiskContainer":{"shape":"SnapshotDiskContainer"}, + "ClientData":{"shape":"ClientData"}, + "ClientToken":{"shape":"String"}, + "RoleName":{"shape":"String"} + } + }, + "ImportSnapshotResult":{ + "type":"structure", + "members":{ + "ImportTaskId":{ + "shape":"String", + "locationName":"importTaskId" + }, + "SnapshotTaskDetail":{ + "shape":"SnapshotTaskDetail", + "locationName":"snapshotTaskDetail" + }, + "Description":{ + "shape":"String", + "locationName":"description" + } + } + }, + "ImportSnapshotTask":{ + "type":"structure", + "members":{ + "ImportTaskId":{ + "shape":"String", + "locationName":"importTaskId" + }, + "SnapshotTaskDetail":{ + "shape":"SnapshotTaskDetail", + "locationName":"snapshotTaskDetail" + }, + "Description":{ + "shape":"String", + "locationName":"description" + } + } + }, + "ImportSnapshotTaskList":{ + "type":"list", + "member":{ + "shape":"ImportSnapshotTask", + "locationName":"item" + } + }, + "ImportTaskIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ImportTaskId" + } + }, + "ImportVolumeRequest":{ + "type":"structure", + "required":[ + "AvailabilityZone", + "Image", + "Volume" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Image":{ + "shape":"DiskImageDetail", + "locationName":"image" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "Volume":{ + "shape":"VolumeDetail", + "locationName":"volume" + } + } + }, + "ImportVolumeResult":{ + "type":"structure", + "members":{ + "ConversionTask":{ + "shape":"ConversionTask", + "locationName":"conversionTask" + } + } + }, + "ImportVolumeTaskDetails":{ + "type":"structure", + "required":[ + "BytesConverted", + "AvailabilityZone", + "Image", + "Volume" + ], + "members":{ + "BytesConverted":{ + "shape":"Long", + "locationName":"bytesConverted" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "Image":{ + "shape":"DiskImageDescription", + "locationName":"image" + }, + "Volume":{ + "shape":"DiskImageVolumeDescription", + "locationName":"volume" + } + } + }, + "Instance":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "State":{ + "shape":"InstanceState", + "locationName":"instanceState" + }, + "PrivateDnsName":{ + "shape":"String", + "locationName":"privateDnsName" + }, + "PublicDnsName":{ + "shape":"String", + "locationName":"dnsName" + }, + "StateTransitionReason":{ + "shape":"String", + "locationName":"reason" + }, + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "AmiLaunchIndex":{ + "shape":"Integer", + "locationName":"amiLaunchIndex" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "LaunchTime":{ + "shape":"DateTime", + "locationName":"launchTime" + }, + "Placement":{ + "shape":"Placement", + "locationName":"placement" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "Platform":{ + "shape":"PlatformValues", + "locationName":"platform" + }, + "Monitoring":{ + "shape":"Monitoring", + "locationName":"monitoring" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "PublicIpAddress":{ + "shape":"String", + "locationName":"ipAddress" + }, + "StateReason":{ + "shape":"StateReason", + "locationName":"stateReason" + }, + "Architecture":{ + "shape":"ArchitectureValues", + "locationName":"architecture" + }, + "RootDeviceType":{ + "shape":"DeviceType", + "locationName":"rootDeviceType" + }, + "RootDeviceName":{ + "shape":"String", + "locationName":"rootDeviceName" + }, + "BlockDeviceMappings":{ + "shape":"InstanceBlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "VirtualizationType":{ + "shape":"VirtualizationType", + "locationName":"virtualizationType" + }, + "InstanceLifecycle":{ + "shape":"InstanceLifecycleType", + "locationName":"instanceLifecycle" + }, + "SpotInstanceRequestId":{ + "shape":"String", + "locationName":"spotInstanceRequestId" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "SecurityGroups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "SourceDestCheck":{ + "shape":"Boolean", + "locationName":"sourceDestCheck" + }, + "Hypervisor":{ + "shape":"HypervisorType", + "locationName":"hypervisor" + }, + "NetworkInterfaces":{ + "shape":"InstanceNetworkInterfaceList", + "locationName":"networkInterfaceSet" + }, + "IamInstanceProfile":{ + "shape":"IamInstanceProfile", + "locationName":"iamInstanceProfile" + }, + "EbsOptimized":{ + "shape":"Boolean", + "locationName":"ebsOptimized" + }, + "SriovNetSupport":{ + "shape":"String", + "locationName":"sriovNetSupport" + } + } + }, + "InstanceAttribute":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "InstanceType":{ + "shape":"AttributeValue", + "locationName":"instanceType" + }, + "KernelId":{ + "shape":"AttributeValue", + "locationName":"kernel" + }, + "RamdiskId":{ + "shape":"AttributeValue", + "locationName":"ramdisk" + }, + "UserData":{ + "shape":"AttributeValue", + "locationName":"userData" + }, + "DisableApiTermination":{ + "shape":"AttributeBooleanValue", + "locationName":"disableApiTermination" + }, + "InstanceInitiatedShutdownBehavior":{ + "shape":"AttributeValue", + "locationName":"instanceInitiatedShutdownBehavior" + }, + "RootDeviceName":{ + "shape":"AttributeValue", + "locationName":"rootDeviceName" + }, + "BlockDeviceMappings":{ + "shape":"InstanceBlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "locationName":"productCodes" + }, + "EbsOptimized":{ + "shape":"AttributeBooleanValue", + "locationName":"ebsOptimized" + }, + "SriovNetSupport":{ + "shape":"AttributeValue", + "locationName":"sriovNetSupport" + }, + "SourceDestCheck":{ + "shape":"AttributeBooleanValue", + "locationName":"sourceDestCheck" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + } + } + }, + "InstanceAttributeName":{ + "type":"string", + "enum":[ + "instanceType", + "kernel", + "ramdisk", + "userData", + "disableApiTermination", + "instanceInitiatedShutdownBehavior", + "rootDeviceName", + "blockDeviceMapping", + "productCodes", + "sourceDestCheck", + "groupSet", + "ebsOptimized", + "sriovNetSupport" + ] + }, + "InstanceBlockDeviceMapping":{ + "type":"structure", + "members":{ + "DeviceName":{ + "shape":"String", + "locationName":"deviceName" + }, + "Ebs":{ + "shape":"EbsInstanceBlockDevice", + "locationName":"ebs" + } + } + }, + "InstanceBlockDeviceMappingList":{ + "type":"list", + "member":{ + "shape":"InstanceBlockDeviceMapping", + "locationName":"item" + } + }, + "InstanceBlockDeviceMappingSpecification":{ + "type":"structure", + "members":{ + "DeviceName":{ + "shape":"String", + "locationName":"deviceName" + }, + "Ebs":{ + "shape":"EbsInstanceBlockDeviceSpecification", + "locationName":"ebs" + }, + "VirtualName":{ + "shape":"String", + "locationName":"virtualName" + }, + "NoDevice":{ + "shape":"String", + "locationName":"noDevice" + } + } + }, + "InstanceBlockDeviceMappingSpecificationList":{ + "type":"list", + "member":{ + "shape":"InstanceBlockDeviceMappingSpecification", + "locationName":"item" + } + }, + "InstanceCapacity":{ + "type":"structure", + "members":{ + "InstanceType":{ + "shape":"String", + "locationName":"instanceType" + }, + "AvailableCapacity":{ + "shape":"Integer", + "locationName":"availableCapacity" + }, + "TotalCapacity":{ + "shape":"Integer", + "locationName":"totalCapacity" + } + } + }, + "InstanceCount":{ + "type":"structure", + "members":{ + "State":{ + "shape":"ListingState", + "locationName":"state" + }, + "InstanceCount":{ + "shape":"Integer", + "locationName":"instanceCount" + } + } + }, + "InstanceCountList":{ + "type":"list", + "member":{ + "shape":"InstanceCount", + "locationName":"item" + } + }, + "InstanceExportDetails":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "TargetEnvironment":{ + "shape":"ExportEnvironment", + "locationName":"targetEnvironment" + } + } + }, + "InstanceIdSet":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "InstanceIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"InstanceId" + } + }, + "InstanceLifecycleType":{ + "type":"string", + "enum":["spot"] + }, + "InstanceList":{ + "type":"list", + "member":{ + "shape":"Instance", + "locationName":"item" + } + }, + "InstanceMonitoring":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Monitoring":{ + "shape":"Monitoring", + "locationName":"monitoring" + } + } + }, + "InstanceMonitoringList":{ + "type":"list", + "member":{ + "shape":"InstanceMonitoring", + "locationName":"item" + } + }, + "InstanceNetworkInterface":{ + "type":"structure", + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "Status":{ + "shape":"NetworkInterfaceStatus", + "locationName":"status" + }, + "MacAddress":{ + "shape":"String", + "locationName":"macAddress" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "PrivateDnsName":{ + "shape":"String", + "locationName":"privateDnsName" + }, + "SourceDestCheck":{ + "shape":"Boolean", + "locationName":"sourceDestCheck" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "Attachment":{ + "shape":"InstanceNetworkInterfaceAttachment", + "locationName":"attachment" + }, + "Association":{ + "shape":"InstanceNetworkInterfaceAssociation", + "locationName":"association" + }, + "PrivateIpAddresses":{ + "shape":"InstancePrivateIpAddressList", + "locationName":"privateIpAddressesSet" + } + } + }, + "InstanceNetworkInterfaceAssociation":{ + "type":"structure", + "members":{ + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + }, + "PublicDnsName":{ + "shape":"String", + "locationName":"publicDnsName" + }, + "IpOwnerId":{ + "shape":"String", + "locationName":"ipOwnerId" + } + } + }, + "InstanceNetworkInterfaceAttachment":{ + "type":"structure", + "members":{ + "AttachmentId":{ + "shape":"String", + "locationName":"attachmentId" + }, + "DeviceIndex":{ + "shape":"Integer", + "locationName":"deviceIndex" + }, + "Status":{ + "shape":"AttachmentStatus", + "locationName":"status" + }, + "AttachTime":{ + "shape":"DateTime", + "locationName":"attachTime" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "InstanceNetworkInterfaceList":{ + "type":"list", + "member":{ + "shape":"InstanceNetworkInterface", + "locationName":"item" + } + }, + "InstanceNetworkInterfaceSpecification":{ + "type":"structure", + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "DeviceIndex":{ + "shape":"Integer", + "locationName":"deviceIndex" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "Groups":{ + "shape":"SecurityGroupIdStringList", + "locationName":"SecurityGroupId" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + }, + "PrivateIpAddresses":{ + "shape":"PrivateIpAddressSpecificationList", + "locationName":"privateIpAddressesSet", + "queryName":"PrivateIpAddresses" + }, + "SecondaryPrivateIpAddressCount":{ + "shape":"Integer", + "locationName":"secondaryPrivateIpAddressCount" + }, + "AssociatePublicIpAddress":{ + "shape":"Boolean", + "locationName":"associatePublicIpAddress" + } + } + }, + "InstanceNetworkInterfaceSpecificationList":{ + "type":"list", + "member":{ + "shape":"InstanceNetworkInterfaceSpecification", + "locationName":"item" + } + }, + "InstancePrivateIpAddress":{ + "type":"structure", + "members":{ + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "PrivateDnsName":{ + "shape":"String", + "locationName":"privateDnsName" + }, + "Primary":{ + "shape":"Boolean", + "locationName":"primary" + }, + "Association":{ + "shape":"InstanceNetworkInterfaceAssociation", + "locationName":"association" + } + } + }, + "InstancePrivateIpAddressList":{ + "type":"list", + "member":{ + "shape":"InstancePrivateIpAddress", + "locationName":"item" + } + }, + "InstanceState":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"Integer", + "locationName":"code" + }, + "Name":{ + "shape":"InstanceStateName", + "locationName":"name" + } + } + }, + "InstanceStateChange":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "CurrentState":{ + "shape":"InstanceState", + "locationName":"currentState" + }, + "PreviousState":{ + "shape":"InstanceState", + "locationName":"previousState" + } + } + }, + "InstanceStateChangeList":{ + "type":"list", + "member":{ + "shape":"InstanceStateChange", + "locationName":"item" + } + }, + "InstanceStateName":{ + "type":"string", + "enum":[ + "pending", + "running", + "shutting-down", + "terminated", + "stopping", + "stopped" + ] + }, + "InstanceStatus":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Events":{ + "shape":"InstanceStatusEventList", + "locationName":"eventsSet" + }, + "InstanceState":{ + "shape":"InstanceState", + "locationName":"instanceState" + }, + "SystemStatus":{ + "shape":"InstanceStatusSummary", + "locationName":"systemStatus" + }, + "InstanceStatus":{ + "shape":"InstanceStatusSummary", + "locationName":"instanceStatus" + } + } + }, + "InstanceStatusDetails":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"StatusName", + "locationName":"name" + }, + "Status":{ + "shape":"StatusType", + "locationName":"status" + }, + "ImpairedSince":{ + "shape":"DateTime", + "locationName":"impairedSince" + } + } + }, + "InstanceStatusDetailsList":{ + "type":"list", + "member":{ + "shape":"InstanceStatusDetails", + "locationName":"item" + } + }, + "InstanceStatusEvent":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"EventCode", + "locationName":"code" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "NotBefore":{ + "shape":"DateTime", + "locationName":"notBefore" + }, + "NotAfter":{ + "shape":"DateTime", + "locationName":"notAfter" + } + } + }, + "InstanceStatusEventList":{ + "type":"list", + "member":{ + "shape":"InstanceStatusEvent", + "locationName":"item" + } + }, + "InstanceStatusList":{ + "type":"list", + "member":{ + "shape":"InstanceStatus", + "locationName":"item" + } + }, + "InstanceStatusSummary":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"SummaryStatus", + "locationName":"status" + }, + "Details":{ + "shape":"InstanceStatusDetailsList", + "locationName":"details" + } + } + }, + "InstanceType":{ + "type":"string", + "enum":[ + "t1.micro", + "m1.small", + "m1.medium", + "m1.large", + "m1.xlarge", + "m3.medium", + "m3.large", + "m3.xlarge", + "m3.2xlarge", + "m4.large", + "m4.xlarge", + "m4.2xlarge", + "m4.4xlarge", + "m4.10xlarge", + "t2.nano", + "t2.micro", + "t2.small", + "t2.medium", + "t2.large", + "m2.xlarge", + "m2.2xlarge", + "m2.4xlarge", + "cr1.8xlarge", + "i2.xlarge", + "i2.2xlarge", + "i2.4xlarge", + "i2.8xlarge", + "hi1.4xlarge", + "hs1.8xlarge", + "c1.medium", + "c1.xlarge", + "c3.large", + "c3.xlarge", + "c3.2xlarge", + "c3.4xlarge", + "c3.8xlarge", + "c4.large", + "c4.xlarge", + "c4.2xlarge", + "c4.4xlarge", + "c4.8xlarge", + "cc1.4xlarge", + "cc2.8xlarge", + "g2.2xlarge", + "cg1.4xlarge", + "r3.large", + "r3.xlarge", + "r3.2xlarge", + "r3.4xlarge", + "r3.8xlarge", + "d2.xlarge", + "d2.2xlarge", + "d2.4xlarge", + "d2.8xlarge" + ] + }, + "InstanceTypeList":{ + "type":"list", + "member":{"shape":"InstanceType"} + }, + "Integer":{"type":"integer"}, + "InternetGateway":{ + "type":"structure", + "members":{ + "InternetGatewayId":{ + "shape":"String", + "locationName":"internetGatewayId" + }, + "Attachments":{ + "shape":"InternetGatewayAttachmentList", + "locationName":"attachmentSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "InternetGatewayAttachment":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "State":{ + "shape":"AttachmentStatus", + "locationName":"state" + } + } + }, + "InternetGatewayAttachmentList":{ + "type":"list", + "member":{ + "shape":"InternetGatewayAttachment", + "locationName":"item" + } + }, + "InternetGatewayList":{ + "type":"list", + "member":{ + "shape":"InternetGateway", + "locationName":"item" + } + }, + "IpPermission":{ + "type":"structure", + "members":{ + "IpProtocol":{ + "shape":"String", + "locationName":"ipProtocol" + }, + "FromPort":{ + "shape":"Integer", + "locationName":"fromPort" + }, + "ToPort":{ + "shape":"Integer", + "locationName":"toPort" + }, + "UserIdGroupPairs":{ + "shape":"UserIdGroupPairList", + "locationName":"groups" + }, + "IpRanges":{ + "shape":"IpRangeList", + "locationName":"ipRanges" + }, + "PrefixListIds":{ + "shape":"PrefixListIdList", + "locationName":"prefixListIds" + } + } + }, + "IpPermissionList":{ + "type":"list", + "member":{ + "shape":"IpPermission", + "locationName":"item" + } + }, + "IpRange":{ + "type":"structure", + "members":{ + "CidrIp":{ + "shape":"String", + "locationName":"cidrIp" + } + } + }, + "IpRangeList":{ + "type":"list", + "member":{ + "shape":"IpRange", + "locationName":"item" + } + }, + "KeyNameStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"KeyName" + } + }, + "KeyPair":{ + "type":"structure", + "members":{ + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "KeyFingerprint":{ + "shape":"String", + "locationName":"keyFingerprint" + }, + "KeyMaterial":{ + "shape":"String", + "locationName":"keyMaterial" + } + } + }, + "KeyPairInfo":{ + "type":"structure", + "members":{ + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "KeyFingerprint":{ + "shape":"String", + "locationName":"keyFingerprint" + } + } + }, + "KeyPairList":{ + "type":"list", + "member":{ + "shape":"KeyPairInfo", + "locationName":"item" + } + }, + "LaunchPermission":{ + "type":"structure", + "members":{ + "UserId":{ + "shape":"String", + "locationName":"userId" + }, + "Group":{ + "shape":"PermissionGroup", + "locationName":"group" + } + } + }, + "LaunchPermissionList":{ + "type":"list", + "member":{ + "shape":"LaunchPermission", + "locationName":"item" + } + }, + "LaunchPermissionModifications":{ + "type":"structure", + "members":{ + "Add":{"shape":"LaunchPermissionList"}, + "Remove":{"shape":"LaunchPermissionList"} + } + }, + "LaunchSpecification":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "SecurityGroups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "UserData":{ + "shape":"String", + "locationName":"userData" + }, + "AddressingType":{ + "shape":"String", + "locationName":"addressingType" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "Placement":{ + "shape":"SpotPlacement", + "locationName":"placement" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "NetworkInterfaces":{ + "shape":"InstanceNetworkInterfaceSpecificationList", + "locationName":"networkInterfaceSet" + }, + "IamInstanceProfile":{ + "shape":"IamInstanceProfileSpecification", + "locationName":"iamInstanceProfile" + }, + "EbsOptimized":{ + "shape":"Boolean", + "locationName":"ebsOptimized" + }, + "Monitoring":{ + "shape":"RunInstancesMonitoringEnabled", + "locationName":"monitoring" + } + } + }, + "LaunchSpecsList":{ + "type":"list", + "member":{ + "shape":"SpotFleetLaunchSpecification", + "locationName":"item" + }, + "min":1 + }, + "ListingState":{ + "type":"string", + "enum":[ + "available", + "sold", + "cancelled", + "pending" + ] + }, + "ListingStatus":{ + "type":"string", + "enum":[ + "active", + "pending", + "cancelled", + "closed" + ] + }, + "Long":{"type":"long"}, + "MaxResults":{ + "type":"integer", + "max":255, + "min":5 + }, + "ModifyHostsRequest":{ + "type":"structure", + "required":[ + "HostIds", + "AutoPlacement" + ], + "members":{ + "HostIds":{ + "shape":"RequestHostIdList", + "locationName":"hostId" + }, + "AutoPlacement":{ + "shape":"AutoPlacement", + "locationName":"autoPlacement" + } + } + }, + "ModifyHostsResult":{ + "type":"structure", + "members":{ + "Successful":{ + "shape":"ResponseHostIdList", + "locationName":"successful" + }, + "Unsuccessful":{ + "shape":"UnsuccessfulItemList", + "locationName":"unsuccessful" + } + } + }, + "ModifyIdFormatRequest":{ + "type":"structure", + "required":[ + "Resource", + "UseLongIds" + ], + "members":{ + "Resource":{"shape":"String"}, + "UseLongIds":{"shape":"Boolean"} + } + }, + "ModifyImageAttributeRequest":{ + "type":"structure", + "required":["ImageId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageId":{"shape":"String"}, + "Attribute":{"shape":"String"}, + "OperationType":{"shape":"OperationType"}, + "UserIds":{ + "shape":"UserIdStringList", + "locationName":"UserId" + }, + "UserGroups":{ + "shape":"UserGroupStringList", + "locationName":"UserGroup" + }, + "ProductCodes":{ + "shape":"ProductCodeStringList", + "locationName":"ProductCode" + }, + "Value":{"shape":"String"}, + "LaunchPermission":{"shape":"LaunchPermissionModifications"}, + "Description":{"shape":"AttributeValue"} + } + }, + "ModifyInstanceAttributeRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Attribute":{ + "shape":"InstanceAttributeName", + "locationName":"attribute" + }, + "Value":{ + "shape":"String", + "locationName":"value" + }, + "BlockDeviceMappings":{ + "shape":"InstanceBlockDeviceMappingSpecificationList", + "locationName":"blockDeviceMapping" + }, + "SourceDestCheck":{"shape":"AttributeBooleanValue"}, + "DisableApiTermination":{ + "shape":"AttributeBooleanValue", + "locationName":"disableApiTermination" + }, + "InstanceType":{ + "shape":"AttributeValue", + "locationName":"instanceType" + }, + "Kernel":{ + "shape":"AttributeValue", + "locationName":"kernel" + }, + "Ramdisk":{ + "shape":"AttributeValue", + "locationName":"ramdisk" + }, + "UserData":{ + "shape":"BlobAttributeValue", + "locationName":"userData" + }, + "InstanceInitiatedShutdownBehavior":{ + "shape":"AttributeValue", + "locationName":"instanceInitiatedShutdownBehavior" + }, + "Groups":{ + "shape":"GroupIdStringList", + "locationName":"GroupId" + }, + "EbsOptimized":{ + "shape":"AttributeBooleanValue", + "locationName":"ebsOptimized" + }, + "SriovNetSupport":{ + "shape":"AttributeValue", + "locationName":"sriovNetSupport" + } + } + }, + "ModifyInstancePlacementRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Tenancy":{ + "shape":"HostTenancy", + "locationName":"tenancy" + }, + "Affinity":{ + "shape":"Affinity", + "locationName":"affinity" + }, + "HostId":{ + "shape":"String", + "locationName":"hostId" + } + } + }, + "ModifyInstancePlacementResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "ModifyNetworkInterfaceAttributeRequest":{ + "type":"structure", + "required":["NetworkInterfaceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "Description":{ + "shape":"AttributeValue", + "locationName":"description" + }, + "SourceDestCheck":{ + "shape":"AttributeBooleanValue", + "locationName":"sourceDestCheck" + }, + "Groups":{ + "shape":"SecurityGroupIdStringList", + "locationName":"SecurityGroupId" + }, + "Attachment":{ + "shape":"NetworkInterfaceAttachmentChanges", + "locationName":"attachment" + } + } + }, + "ModifyReservedInstancesRequest":{ + "type":"structure", + "required":[ + "ReservedInstancesIds", + "TargetConfigurations" + ], + "members":{ + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "ReservedInstancesIds":{ + "shape":"ReservedInstancesIdStringList", + "locationName":"ReservedInstancesId" + }, + "TargetConfigurations":{ + "shape":"ReservedInstancesConfigurationList", + "locationName":"ReservedInstancesConfigurationSetItemType" + } + } + }, + "ModifyReservedInstancesResult":{ + "type":"structure", + "members":{ + "ReservedInstancesModificationId":{ + "shape":"String", + "locationName":"reservedInstancesModificationId" + } + } + }, + "ModifySnapshotAttributeRequest":{ + "type":"structure", + "required":["SnapshotId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SnapshotId":{"shape":"String"}, + "Attribute":{"shape":"SnapshotAttributeName"}, + "OperationType":{"shape":"OperationType"}, + "UserIds":{ + "shape":"UserIdStringList", + "locationName":"UserId" + }, + "GroupNames":{ + "shape":"GroupNameStringList", + "locationName":"UserGroup" + }, + "CreateVolumePermission":{"shape":"CreateVolumePermissionModifications"} + } + }, + "ModifySpotFleetRequestRequest":{ + "type":"structure", + "required":["SpotFleetRequestId"], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "TargetCapacity":{ + "shape":"Integer", + "locationName":"targetCapacity" + }, + "ExcessCapacityTerminationPolicy":{ + "shape":"ExcessCapacityTerminationPolicy", + "locationName":"excessCapacityTerminationPolicy" + } + } + }, + "ModifySpotFleetRequestResponse":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "ModifySubnetAttributeRequest":{ + "type":"structure", + "required":["SubnetId"], + "members":{ + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "MapPublicIpOnLaunch":{"shape":"AttributeBooleanValue"} + } + }, + "ModifyVolumeAttributeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VolumeId":{"shape":"String"}, + "AutoEnableIO":{"shape":"AttributeBooleanValue"} + } + }, + "ModifyVpcAttributeRequest":{ + "type":"structure", + "required":["VpcId"], + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "EnableDnsSupport":{"shape":"AttributeBooleanValue"}, + "EnableDnsHostnames":{"shape":"AttributeBooleanValue"} + } + }, + "ModifyVpcEndpointRequest":{ + "type":"structure", + "required":["VpcEndpointId"], + "members":{ + "DryRun":{"shape":"Boolean"}, + "VpcEndpointId":{"shape":"String"}, + "ResetPolicy":{"shape":"Boolean"}, + "PolicyDocument":{"shape":"String"}, + "AddRouteTableIds":{ + "shape":"ValueStringList", + "locationName":"AddRouteTableId" + }, + "RemoveRouteTableIds":{ + "shape":"ValueStringList", + "locationName":"RemoveRouteTableId" + } + } + }, + "ModifyVpcEndpointResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "MonitorInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + } + } + }, + "MonitorInstancesResult":{ + "type":"structure", + "members":{ + "InstanceMonitorings":{ + "shape":"InstanceMonitoringList", + "locationName":"instancesSet" + } + } + }, + "Monitoring":{ + "type":"structure", + "members":{ + "State":{ + "shape":"MonitoringState", + "locationName":"state" + } + } + }, + "MonitoringState":{ + "type":"string", + "enum":[ + "disabled", + "disabling", + "enabled", + "pending" + ] + }, + "MoveAddressToVpcRequest":{ + "type":"structure", + "required":["PublicIp"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + } + } + }, + "MoveAddressToVpcResult":{ + "type":"structure", + "members":{ + "AllocationId":{ + "shape":"String", + "locationName":"allocationId" + }, + "Status":{ + "shape":"Status", + "locationName":"status" + } + } + }, + "MoveStatus":{ + "type":"string", + "enum":[ + "movingToVpc", + "restoringToClassic" + ] + }, + "MovingAddressStatus":{ + "type":"structure", + "members":{ + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + }, + "MoveStatus":{ + "shape":"MoveStatus", + "locationName":"moveStatus" + } + } + }, + "MovingAddressStatusSet":{ + "type":"list", + "member":{ + "shape":"MovingAddressStatus", + "locationName":"item" + } + }, + "NatGateway":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "NatGatewayId":{ + "shape":"String", + "locationName":"natGatewayId" + }, + "CreateTime":{ + "shape":"DateTime", + "locationName":"createTime" + }, + "DeleteTime":{ + "shape":"DateTime", + "locationName":"deleteTime" + }, + "NatGatewayAddresses":{ + "shape":"NatGatewayAddressList", + "locationName":"natGatewayAddressSet" + }, + "State":{ + "shape":"NatGatewayState", + "locationName":"state" + }, + "FailureCode":{ + "shape":"String", + "locationName":"failureCode" + }, + "FailureMessage":{ + "shape":"String", + "locationName":"failureMessage" + } + } + }, + "NatGatewayAddress":{ + "type":"structure", + "members":{ + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + }, + "AllocationId":{ + "shape":"String", + "locationName":"allocationId" + }, + "PrivateIp":{ + "shape":"String", + "locationName":"privateIp" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + } + } + }, + "NatGatewayAddressList":{ + "type":"list", + "member":{ + "shape":"NatGatewayAddress", + "locationName":"item" + } + }, + "NatGatewayList":{ + "type":"list", + "member":{ + "shape":"NatGateway", + "locationName":"item" + } + }, + "NatGatewayState":{ + "type":"string", + "enum":[ + "pending", + "failed", + "available", + "deleting", + "deleted" + ] + }, + "NetworkAcl":{ + "type":"structure", + "members":{ + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "IsDefault":{ + "shape":"Boolean", + "locationName":"default" + }, + "Entries":{ + "shape":"NetworkAclEntryList", + "locationName":"entrySet" + }, + "Associations":{ + "shape":"NetworkAclAssociationList", + "locationName":"associationSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "NetworkAclAssociation":{ + "type":"structure", + "members":{ + "NetworkAclAssociationId":{ + "shape":"String", + "locationName":"networkAclAssociationId" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + } + } + }, + "NetworkAclAssociationList":{ + "type":"list", + "member":{ + "shape":"NetworkAclAssociation", + "locationName":"item" + } + }, + "NetworkAclEntry":{ + "type":"structure", + "members":{ + "RuleNumber":{ + "shape":"Integer", + "locationName":"ruleNumber" + }, + "Protocol":{ + "shape":"String", + "locationName":"protocol" + }, + "RuleAction":{ + "shape":"RuleAction", + "locationName":"ruleAction" + }, + "Egress":{ + "shape":"Boolean", + "locationName":"egress" + }, + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "IcmpTypeCode":{ + "shape":"IcmpTypeCode", + "locationName":"icmpTypeCode" + }, + "PortRange":{ + "shape":"PortRange", + "locationName":"portRange" + } + } + }, + "NetworkAclEntryList":{ + "type":"list", + "member":{ + "shape":"NetworkAclEntry", + "locationName":"item" + } + }, + "NetworkAclList":{ + "type":"list", + "member":{ + "shape":"NetworkAcl", + "locationName":"item" + } + }, + "NetworkInterface":{ + "type":"structure", + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "RequesterId":{ + "shape":"String", + "locationName":"requesterId" + }, + "RequesterManaged":{ + "shape":"Boolean", + "locationName":"requesterManaged" + }, + "Status":{ + "shape":"NetworkInterfaceStatus", + "locationName":"status" + }, + "MacAddress":{ + "shape":"String", + "locationName":"macAddress" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "PrivateDnsName":{ + "shape":"String", + "locationName":"privateDnsName" + }, + "SourceDestCheck":{ + "shape":"Boolean", + "locationName":"sourceDestCheck" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "Attachment":{ + "shape":"NetworkInterfaceAttachment", + "locationName":"attachment" + }, + "Association":{ + "shape":"NetworkInterfaceAssociation", + "locationName":"association" + }, + "TagSet":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "PrivateIpAddresses":{ + "shape":"NetworkInterfacePrivateIpAddressList", + "locationName":"privateIpAddressesSet" + }, + "InterfaceType":{ + "shape":"NetworkInterfaceType", + "locationName":"interfaceType" + } + } + }, + "NetworkInterfaceAssociation":{ + "type":"structure", + "members":{ + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + }, + "PublicDnsName":{ + "shape":"String", + "locationName":"publicDnsName" + }, + "IpOwnerId":{ + "shape":"String", + "locationName":"ipOwnerId" + }, + "AllocationId":{ + "shape":"String", + "locationName":"allocationId" + }, + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + } + } + }, + "NetworkInterfaceAttachment":{ + "type":"structure", + "members":{ + "AttachmentId":{ + "shape":"String", + "locationName":"attachmentId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "InstanceOwnerId":{ + "shape":"String", + "locationName":"instanceOwnerId" + }, + "DeviceIndex":{ + "shape":"Integer", + "locationName":"deviceIndex" + }, + "Status":{ + "shape":"AttachmentStatus", + "locationName":"status" + }, + "AttachTime":{ + "shape":"DateTime", + "locationName":"attachTime" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "NetworkInterfaceAttachmentChanges":{ + "type":"structure", + "members":{ + "AttachmentId":{ + "shape":"String", + "locationName":"attachmentId" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "NetworkInterfaceAttribute":{ + "type":"string", + "enum":[ + "description", + "groupSet", + "sourceDestCheck", + "attachment" + ] + }, + "NetworkInterfaceIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "NetworkInterfaceList":{ + "type":"list", + "member":{ + "shape":"NetworkInterface", + "locationName":"item" + } + }, + "NetworkInterfacePrivateIpAddress":{ + "type":"structure", + "members":{ + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "PrivateDnsName":{ + "shape":"String", + "locationName":"privateDnsName" + }, + "Primary":{ + "shape":"Boolean", + "locationName":"primary" + }, + "Association":{ + "shape":"NetworkInterfaceAssociation", + "locationName":"association" + } + } + }, + "NetworkInterfacePrivateIpAddressList":{ + "type":"list", + "member":{ + "shape":"NetworkInterfacePrivateIpAddress", + "locationName":"item" + } + }, + "NetworkInterfaceStatus":{ + "type":"string", + "enum":[ + "available", + "attaching", + "in-use", + "detaching" + ] + }, + "NetworkInterfaceType":{ + "type":"string", + "enum":[ + "interface", + "natGateway" + ] + }, + "NewDhcpConfiguration":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"String", + "locationName":"key" + }, + "Values":{ + "shape":"ValueStringList", + "locationName":"Value" + } + } + }, + "NewDhcpConfigurationList":{ + "type":"list", + "member":{ + "shape":"NewDhcpConfiguration", + "locationName":"item" + } + }, + "NextToken":{ + "type":"string", + "max":1024, + "min":1 + }, + "OccurrenceDayRequestSet":{ + "type":"list", + "member":{ + "shape":"Integer", + "locationName":"OccurenceDay" + } + }, + "OccurrenceDaySet":{ + "type":"list", + "member":{ + "shape":"Integer", + "locationName":"item" + } + }, + "OfferingTypeValues":{ + "type":"string", + "enum":[ + "Heavy Utilization", + "Medium Utilization", + "Light Utilization", + "No Upfront", + "Partial Upfront", + "All Upfront" + ] + }, + "OperationType":{ + "type":"string", + "enum":[ + "add", + "remove" + ] + }, + "OwnerStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"Owner" + } + }, + "PermissionGroup":{ + "type":"string", + "enum":["all"] + }, + "Placement":{ + "type":"structure", + "members":{ + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "Tenancy":{ + "shape":"Tenancy", + "locationName":"tenancy" + }, + "HostId":{ + "shape":"String", + "locationName":"hostId" + }, + "Affinity":{ + "shape":"String", + "locationName":"affinity" + } + } + }, + "PlacementGroup":{ + "type":"structure", + "members":{ + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "Strategy":{ + "shape":"PlacementStrategy", + "locationName":"strategy" + }, + "State":{ + "shape":"PlacementGroupState", + "locationName":"state" + } + } + }, + "PlacementGroupList":{ + "type":"list", + "member":{ + "shape":"PlacementGroup", + "locationName":"item" + } + }, + "PlacementGroupState":{ + "type":"string", + "enum":[ + "pending", + "available", + "deleting", + "deleted" + ] + }, + "PlacementGroupStringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "PlacementStrategy":{ + "type":"string", + "enum":["cluster"] + }, + "PlatformValues":{ + "type":"string", + "enum":["Windows"] + }, + "PortRange":{ + "type":"structure", + "members":{ + "From":{ + "shape":"Integer", + "locationName":"from" + }, + "To":{ + "shape":"Integer", + "locationName":"to" + } + } + }, + "PrefixList":{ + "type":"structure", + "members":{ + "PrefixListId":{ + "shape":"String", + "locationName":"prefixListId" + }, + "PrefixListName":{ + "shape":"String", + "locationName":"prefixListName" + }, + "Cidrs":{ + "shape":"ValueStringList", + "locationName":"cidrSet" + } + } + }, + "PrefixListId":{ + "type":"structure", + "members":{ + "PrefixListId":{ + "shape":"String", + "locationName":"prefixListId" + } + } + }, + "PrefixListIdList":{ + "type":"list", + "member":{ + "shape":"PrefixListId", + "locationName":"item" + } + }, + "PrefixListSet":{ + "type":"list", + "member":{ + "shape":"PrefixList", + "locationName":"item" + } + }, + "PriceSchedule":{ + "type":"structure", + "members":{ + "Term":{ + "shape":"Long", + "locationName":"term" + }, + "Price":{ + "shape":"Double", + "locationName":"price" + }, + "CurrencyCode":{ + "shape":"CurrencyCodeValues", + "locationName":"currencyCode" + }, + "Active":{ + "shape":"Boolean", + "locationName":"active" + } + } + }, + "PriceScheduleList":{ + "type":"list", + "member":{ + "shape":"PriceSchedule", + "locationName":"item" + } + }, + "PriceScheduleSpecification":{ + "type":"structure", + "members":{ + "Term":{ + "shape":"Long", + "locationName":"term" + }, + "Price":{ + "shape":"Double", + "locationName":"price" + }, + "CurrencyCode":{ + "shape":"CurrencyCodeValues", + "locationName":"currencyCode" + } + } + }, + "PriceScheduleSpecificationList":{ + "type":"list", + "member":{ + "shape":"PriceScheduleSpecification", + "locationName":"item" + } + }, + "PricingDetail":{ + "type":"structure", + "members":{ + "Price":{ + "shape":"Double", + "locationName":"price" + }, + "Count":{ + "shape":"Integer", + "locationName":"count" + } + } + }, + "PricingDetailsList":{ + "type":"list", + "member":{ + "shape":"PricingDetail", + "locationName":"item" + } + }, + "PrivateIpAddressConfigSet":{ + "type":"list", + "member":{ + "shape":"ScheduledInstancesPrivateIpAddressConfig", + "locationName":"PrivateIpAddressConfigSet" + } + }, + "PrivateIpAddressSpecification":{ + "type":"structure", + "required":["PrivateIpAddress"], + "members":{ + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "Primary":{ + "shape":"Boolean", + "locationName":"primary" + } + } + }, + "PrivateIpAddressSpecificationList":{ + "type":"list", + "member":{ + "shape":"PrivateIpAddressSpecification", + "locationName":"item" + } + }, + "PrivateIpAddressStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"PrivateIpAddress" + } + }, + "ProductCode":{ + "type":"structure", + "members":{ + "ProductCodeId":{ + "shape":"String", + "locationName":"productCode" + }, + "ProductCodeType":{ + "shape":"ProductCodeValues", + "locationName":"type" + } + } + }, + "ProductCodeList":{ + "type":"list", + "member":{ + "shape":"ProductCode", + "locationName":"item" + } + }, + "ProductCodeStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ProductCode" + } + }, + "ProductCodeValues":{ + "type":"string", + "enum":[ + "devpay", + "marketplace" + ] + }, + "ProductDescriptionList":{ + "type":"list", + "member":{"shape":"String"} + }, + "PropagatingVgw":{ + "type":"structure", + "members":{ + "GatewayId":{ + "shape":"String", + "locationName":"gatewayId" + } + } + }, + "PropagatingVgwList":{ + "type":"list", + "member":{ + "shape":"PropagatingVgw", + "locationName":"item" + } + }, + "PublicIpStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"PublicIp" + } + }, + "PurchaseRequest":{ + "type":"structure", + "members":{ + "PurchaseToken":{"shape":"String"}, + "InstanceCount":{"shape":"Integer"} + } + }, + "PurchaseRequestSet":{ + "type":"list", + "member":{ + "shape":"PurchaseRequest", + "locationName":"PurchaseRequest" + } + }, + "PurchaseReservedInstancesOfferingRequest":{ + "type":"structure", + "required":[ + "ReservedInstancesOfferingId", + "InstanceCount" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ReservedInstancesOfferingId":{"shape":"String"}, + "InstanceCount":{"shape":"Integer"}, + "LimitPrice":{ + "shape":"ReservedInstanceLimitPrice", + "locationName":"limitPrice" + } + } + }, + "PurchaseReservedInstancesOfferingResult":{ + "type":"structure", + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + } + } + }, + "PurchaseScheduledInstancesRequest":{ + "type":"structure", + "required":["PurchaseRequests"], + "members":{ + "DryRun":{"shape":"Boolean"}, + "ClientToken":{"shape":"String"}, + "PurchaseRequests":{ + "shape":"PurchaseRequestSet", + "locationName":"PurchaseRequest" + } + } + }, + "PurchaseScheduledInstancesResult":{ + "type":"structure", + "members":{ + "ScheduledInstanceSet":{ + "shape":"PurchasedScheduledInstanceSet", + "locationName":"scheduledInstanceSet" + } + } + }, + "PurchasedScheduledInstanceSet":{ + "type":"list", + "member":{ + "shape":"ScheduledInstance", + "locationName":"item" + } + }, + "RIProductDescription":{ + "type":"string", + "enum":[ + "Linux/UNIX", + "Linux/UNIX (Amazon VPC)", + "Windows", + "Windows (Amazon VPC)" + ] + }, + "ReasonCodesList":{ + "type":"list", + "member":{ + "shape":"ReportInstanceReasonCodes", + "locationName":"item" + } + }, + "RebootInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + } + } + }, + "RecurringCharge":{ + "type":"structure", + "members":{ + "Frequency":{ + "shape":"RecurringChargeFrequency", + "locationName":"frequency" + }, + "Amount":{ + "shape":"Double", + "locationName":"amount" + } + } + }, + "RecurringChargeFrequency":{ + "type":"string", + "enum":["Hourly"] + }, + "RecurringChargesList":{ + "type":"list", + "member":{ + "shape":"RecurringCharge", + "locationName":"item" + } + }, + "Region":{ + "type":"structure", + "members":{ + "RegionName":{ + "shape":"String", + "locationName":"regionName" + }, + "Endpoint":{ + "shape":"String", + "locationName":"regionEndpoint" + } + } + }, + "RegionList":{ + "type":"list", + "member":{ + "shape":"Region", + "locationName":"item" + } + }, + "RegionNameStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"RegionName" + } + }, + "RegisterImageRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageLocation":{"shape":"String"}, + "Name":{ + "shape":"String", + "locationName":"name" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "Architecture":{ + "shape":"ArchitectureValues", + "locationName":"architecture" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "RootDeviceName":{ + "shape":"String", + "locationName":"rootDeviceName" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingRequestList", + "locationName":"BlockDeviceMapping" + }, + "VirtualizationType":{ + "shape":"String", + "locationName":"virtualizationType" + }, + "SriovNetSupport":{ + "shape":"String", + "locationName":"sriovNetSupport" + } + } + }, + "RegisterImageResult":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + } + } + }, + "RejectVpcPeeringConnectionRequest":{ + "type":"structure", + "required":["VpcPeeringConnectionId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + } + } + }, + "RejectVpcPeeringConnectionResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + } + } + }, + "ReleaseAddressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIp":{"shape":"String"}, + "AllocationId":{"shape":"String"} + } + }, + "ReleaseHostsRequest":{ + "type":"structure", + "required":["HostIds"], + "members":{ + "HostIds":{ + "shape":"RequestHostIdList", + "locationName":"hostId" + } + } + }, + "ReleaseHostsResult":{ + "type":"structure", + "members":{ + "Successful":{ + "shape":"ResponseHostIdList", + "locationName":"successful" + }, + "Unsuccessful":{ + "shape":"UnsuccessfulItemList", + "locationName":"unsuccessful" + } + } + }, + "ReplaceNetworkAclAssociationRequest":{ + "type":"structure", + "required":[ + "AssociationId", + "NetworkAclId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + } + } + }, + "ReplaceNetworkAclAssociationResult":{ + "type":"structure", + "members":{ + "NewAssociationId":{ + "shape":"String", + "locationName":"newAssociationId" + } + } + }, + "ReplaceNetworkAclEntryRequest":{ + "type":"structure", + "required":[ + "NetworkAclId", + "RuleNumber", + "Protocol", + "RuleAction", + "Egress", + "CidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkAclId":{ + "shape":"String", + "locationName":"networkAclId" + }, + "RuleNumber":{ + "shape":"Integer", + "locationName":"ruleNumber" + }, + "Protocol":{ + "shape":"String", + "locationName":"protocol" + }, + "RuleAction":{ + "shape":"RuleAction", + "locationName":"ruleAction" + }, + "Egress":{ + "shape":"Boolean", + "locationName":"egress" + }, + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "IcmpTypeCode":{ + "shape":"IcmpTypeCode", + "locationName":"Icmp" + }, + "PortRange":{ + "shape":"PortRange", + "locationName":"portRange" + } + } + }, + "ReplaceRouteRequest":{ + "type":"structure", + "required":[ + "RouteTableId", + "DestinationCidrBlock" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + }, + "DestinationCidrBlock":{ + "shape":"String", + "locationName":"destinationCidrBlock" + }, + "GatewayId":{ + "shape":"String", + "locationName":"gatewayId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + }, + "NatGatewayId":{ + "shape":"String", + "locationName":"natGatewayId" + } + } + }, + "ReplaceRouteTableAssociationRequest":{ + "type":"structure", + "required":[ + "AssociationId", + "RouteTableId" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "AssociationId":{ + "shape":"String", + "locationName":"associationId" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + } + } + }, + "ReplaceRouteTableAssociationResult":{ + "type":"structure", + "members":{ + "NewAssociationId":{ + "shape":"String", + "locationName":"newAssociationId" + } + } + }, + "ReportInstanceReasonCodes":{ + "type":"string", + "enum":[ + "instance-stuck-in-state", + "unresponsive", + "not-accepting-credentials", + "password-not-available", + "performance-network", + "performance-instance-store", + "performance-ebs-volume", + "performance-other", + "other" + ] + }, + "ReportInstanceStatusRequest":{ + "type":"structure", + "required":[ + "Instances", + "Status", + "ReasonCodes" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "Instances":{ + "shape":"InstanceIdStringList", + "locationName":"instanceId" + }, + "Status":{ + "shape":"ReportStatusType", + "locationName":"status" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "EndTime":{ + "shape":"DateTime", + "locationName":"endTime" + }, + "ReasonCodes":{ + "shape":"ReasonCodesList", + "locationName":"reasonCode" + }, + "Description":{ + "shape":"String", + "locationName":"description" + } + } + }, + "ReportStatusType":{ + "type":"string", + "enum":[ + "ok", + "impaired" + ] + }, + "RequestHostIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "RequestSpotFleetRequest":{ + "type":"structure", + "required":["SpotFleetRequestConfig"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotFleetRequestConfig":{ + "shape":"SpotFleetRequestConfigData", + "locationName":"spotFleetRequestConfig" + } + } + }, + "RequestSpotFleetResponse":{ + "type":"structure", + "required":["SpotFleetRequestId"], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + } + } + }, + "RequestSpotInstancesRequest":{ + "type":"structure", + "required":["SpotPrice"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SpotPrice":{ + "shape":"String", + "locationName":"spotPrice" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "InstanceCount":{ + "shape":"Integer", + "locationName":"instanceCount" + }, + "Type":{ + "shape":"SpotInstanceType", + "locationName":"type" + }, + "ValidFrom":{ + "shape":"DateTime", + "locationName":"validFrom" + }, + "ValidUntil":{ + "shape":"DateTime", + "locationName":"validUntil" + }, + "LaunchGroup":{ + "shape":"String", + "locationName":"launchGroup" + }, + "AvailabilityZoneGroup":{ + "shape":"String", + "locationName":"availabilityZoneGroup" + }, + "BlockDurationMinutes":{ + "shape":"Integer", + "locationName":"blockDurationMinutes" + }, + "LaunchSpecification":{"shape":"RequestSpotLaunchSpecification"} + } + }, + "RequestSpotInstancesResult":{ + "type":"structure", + "members":{ + "SpotInstanceRequests":{ + "shape":"SpotInstanceRequestList", + "locationName":"spotInstanceRequestSet" + } + } + }, + "RequestSpotLaunchSpecification":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "SecurityGroups":{ + "shape":"ValueStringList", + "locationName":"SecurityGroup" + }, + "UserData":{ + "shape":"String", + "locationName":"userData" + }, + "AddressingType":{ + "shape":"String", + "locationName":"addressingType" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "Placement":{ + "shape":"SpotPlacement", + "locationName":"placement" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "NetworkInterfaces":{ + "shape":"InstanceNetworkInterfaceSpecificationList", + "locationName":"NetworkInterface" + }, + "IamInstanceProfile":{ + "shape":"IamInstanceProfileSpecification", + "locationName":"iamInstanceProfile" + }, + "EbsOptimized":{ + "shape":"Boolean", + "locationName":"ebsOptimized" + }, + "Monitoring":{ + "shape":"RunInstancesMonitoringEnabled", + "locationName":"monitoring" + }, + "SecurityGroupIds":{ + "shape":"ValueStringList", + "locationName":"SecurityGroupId" + } + } + }, + "Reservation":{ + "type":"structure", + "members":{ + "ReservationId":{ + "shape":"String", + "locationName":"reservationId" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "RequesterId":{ + "shape":"String", + "locationName":"requesterId" + }, + "Groups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "Instances":{ + "shape":"InstanceList", + "locationName":"instancesSet" + } + } + }, + "ReservationList":{ + "type":"list", + "member":{ + "shape":"Reservation", + "locationName":"item" + } + }, + "ReservedInstanceLimitPrice":{ + "type":"structure", + "members":{ + "Amount":{ + "shape":"Double", + "locationName":"amount" + }, + "CurrencyCode":{ + "shape":"CurrencyCodeValues", + "locationName":"currencyCode" + } + } + }, + "ReservedInstanceState":{ + "type":"string", + "enum":[ + "payment-pending", + "active", + "payment-failed", + "retired" + ] + }, + "ReservedInstances":{ + "type":"structure", + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Start":{ + "shape":"DateTime", + "locationName":"start" + }, + "End":{ + "shape":"DateTime", + "locationName":"end" + }, + "Duration":{ + "shape":"Long", + "locationName":"duration" + }, + "UsagePrice":{ + "shape":"Float", + "locationName":"usagePrice" + }, + "FixedPrice":{ + "shape":"Float", + "locationName":"fixedPrice" + }, + "InstanceCount":{ + "shape":"Integer", + "locationName":"instanceCount" + }, + "ProductDescription":{ + "shape":"RIProductDescription", + "locationName":"productDescription" + }, + "State":{ + "shape":"ReservedInstanceState", + "locationName":"state" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "InstanceTenancy":{ + "shape":"Tenancy", + "locationName":"instanceTenancy" + }, + "CurrencyCode":{ + "shape":"CurrencyCodeValues", + "locationName":"currencyCode" + }, + "OfferingType":{ + "shape":"OfferingTypeValues", + "locationName":"offeringType" + }, + "RecurringCharges":{ + "shape":"RecurringChargesList", + "locationName":"recurringCharges" + } + } + }, + "ReservedInstancesConfiguration":{ + "type":"structure", + "members":{ + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Platform":{ + "shape":"String", + "locationName":"platform" + }, + "InstanceCount":{ + "shape":"Integer", + "locationName":"instanceCount" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + } + } + }, + "ReservedInstancesConfigurationList":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesConfiguration", + "locationName":"item" + } + }, + "ReservedInstancesId":{ + "type":"structure", + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + } + } + }, + "ReservedInstancesIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ReservedInstancesId" + } + }, + "ReservedInstancesList":{ + "type":"list", + "member":{ + "shape":"ReservedInstances", + "locationName":"item" + } + }, + "ReservedInstancesListing":{ + "type":"structure", + "members":{ + "ReservedInstancesListingId":{ + "shape":"String", + "locationName":"reservedInstancesListingId" + }, + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + }, + "CreateDate":{ + "shape":"DateTime", + "locationName":"createDate" + }, + "UpdateDate":{ + "shape":"DateTime", + "locationName":"updateDate" + }, + "Status":{ + "shape":"ListingStatus", + "locationName":"status" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "InstanceCounts":{ + "shape":"InstanceCountList", + "locationName":"instanceCounts" + }, + "PriceSchedules":{ + "shape":"PriceScheduleList", + "locationName":"priceSchedules" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + } + } + }, + "ReservedInstancesListingList":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesListing", + "locationName":"item" + } + }, + "ReservedInstancesModification":{ + "type":"structure", + "members":{ + "ReservedInstancesModificationId":{ + "shape":"String", + "locationName":"reservedInstancesModificationId" + }, + "ReservedInstancesIds":{ + "shape":"ReservedIntancesIds", + "locationName":"reservedInstancesSet" + }, + "ModificationResults":{ + "shape":"ReservedInstancesModificationResultList", + "locationName":"modificationResultSet" + }, + "CreateDate":{ + "shape":"DateTime", + "locationName":"createDate" + }, + "UpdateDate":{ + "shape":"DateTime", + "locationName":"updateDate" + }, + "EffectiveDate":{ + "shape":"DateTime", + "locationName":"effectiveDate" + }, + "Status":{ + "shape":"String", + "locationName":"status" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + } + } + }, + "ReservedInstancesModificationIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ReservedInstancesModificationId" + } + }, + "ReservedInstancesModificationList":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesModification", + "locationName":"item" + } + }, + "ReservedInstancesModificationResult":{ + "type":"structure", + "members":{ + "ReservedInstancesId":{ + "shape":"String", + "locationName":"reservedInstancesId" + }, + "TargetConfiguration":{ + "shape":"ReservedInstancesConfiguration", + "locationName":"targetConfiguration" + } + } + }, + "ReservedInstancesModificationResultList":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesModificationResult", + "locationName":"item" + } + }, + "ReservedInstancesOffering":{ + "type":"structure", + "members":{ + "ReservedInstancesOfferingId":{ + "shape":"String", + "locationName":"reservedInstancesOfferingId" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Duration":{ + "shape":"Long", + "locationName":"duration" + }, + "UsagePrice":{ + "shape":"Float", + "locationName":"usagePrice" + }, + "FixedPrice":{ + "shape":"Float", + "locationName":"fixedPrice" + }, + "ProductDescription":{ + "shape":"RIProductDescription", + "locationName":"productDescription" + }, + "InstanceTenancy":{ + "shape":"Tenancy", + "locationName":"instanceTenancy" + }, + "CurrencyCode":{ + "shape":"CurrencyCodeValues", + "locationName":"currencyCode" + }, + "OfferingType":{ + "shape":"OfferingTypeValues", + "locationName":"offeringType" + }, + "RecurringCharges":{ + "shape":"RecurringChargesList", + "locationName":"recurringCharges" + }, + "Marketplace":{ + "shape":"Boolean", + "locationName":"marketplace" + }, + "PricingDetails":{ + "shape":"PricingDetailsList", + "locationName":"pricingDetailsSet" + } + } + }, + "ReservedInstancesOfferingIdStringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ReservedInstancesOfferingList":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesOffering", + "locationName":"item" + } + }, + "ReservedIntancesIds":{ + "type":"list", + "member":{ + "shape":"ReservedInstancesId", + "locationName":"item" + } + }, + "ResetImageAttributeName":{ + "type":"string", + "enum":["launchPermission"] + }, + "ResetImageAttributeRequest":{ + "type":"structure", + "required":[ + "ImageId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageId":{"shape":"String"}, + "Attribute":{"shape":"ResetImageAttributeName"} + } + }, + "ResetInstanceAttributeRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Attribute":{ + "shape":"InstanceAttributeName", + "locationName":"attribute" + } + } + }, + "ResetNetworkInterfaceAttributeRequest":{ + "type":"structure", + "required":["NetworkInterfaceId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "SourceDestCheck":{ + "shape":"String", + "locationName":"sourceDestCheck" + } + } + }, + "ResetSnapshotAttributeRequest":{ + "type":"structure", + "required":[ + "SnapshotId", + "Attribute" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "SnapshotId":{"shape":"String"}, + "Attribute":{"shape":"SnapshotAttributeName"} + } + }, + "ResourceIdList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ResourceType":{ + "type":"string", + "enum":[ + "customer-gateway", + "dhcp-options", + "image", + "instance", + "internet-gateway", + "network-acl", + "network-interface", + "reserved-instances", + "route-table", + "snapshot", + "spot-instances-request", + "subnet", + "security-group", + "volume", + "vpc", + "vpn-connection", + "vpn-gateway" + ] + }, + "ResponseHostIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "RestorableByStringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "RestoreAddressToClassicRequest":{ + "type":"structure", + "required":["PublicIp"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + } + } + }, + "RestoreAddressToClassicResult":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"Status", + "locationName":"status" + }, + "PublicIp":{ + "shape":"String", + "locationName":"publicIp" + } + } + }, + "RevokeSecurityGroupEgressRequest":{ + "type":"structure", + "required":["GroupId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupId":{ + "shape":"String", + "locationName":"groupId" + }, + "SourceSecurityGroupName":{ + "shape":"String", + "locationName":"sourceSecurityGroupName" + }, + "SourceSecurityGroupOwnerId":{ + "shape":"String", + "locationName":"sourceSecurityGroupOwnerId" + }, + "IpProtocol":{ + "shape":"String", + "locationName":"ipProtocol" + }, + "FromPort":{ + "shape":"Integer", + "locationName":"fromPort" + }, + "ToPort":{ + "shape":"Integer", + "locationName":"toPort" + }, + "CidrIp":{ + "shape":"String", + "locationName":"cidrIp" + }, + "IpPermissions":{ + "shape":"IpPermissionList", + "locationName":"ipPermissions" + } + } + }, + "RevokeSecurityGroupIngressRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "GroupName":{"shape":"String"}, + "GroupId":{"shape":"String"}, + "SourceSecurityGroupName":{"shape":"String"}, + "SourceSecurityGroupOwnerId":{"shape":"String"}, + "IpProtocol":{"shape":"String"}, + "FromPort":{"shape":"Integer"}, + "ToPort":{"shape":"Integer"}, + "CidrIp":{"shape":"String"}, + "IpPermissions":{"shape":"IpPermissionList"} + } + }, + "Route":{ + "type":"structure", + "members":{ + "DestinationCidrBlock":{ + "shape":"String", + "locationName":"destinationCidrBlock" + }, + "DestinationPrefixListId":{ + "shape":"String", + "locationName":"destinationPrefixListId" + }, + "GatewayId":{ + "shape":"String", + "locationName":"gatewayId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "InstanceOwnerId":{ + "shape":"String", + "locationName":"instanceOwnerId" + }, + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + }, + "NatGatewayId":{ + "shape":"String", + "locationName":"natGatewayId" + }, + "State":{ + "shape":"RouteState", + "locationName":"state" + }, + "Origin":{ + "shape":"RouteOrigin", + "locationName":"origin" + } + } + }, + "RouteList":{ + "type":"list", + "member":{ + "shape":"Route", + "locationName":"item" + } + }, + "RouteOrigin":{ + "type":"string", + "enum":[ + "CreateRouteTable", + "CreateRoute", + "EnableVgwRoutePropagation" + ] + }, + "RouteState":{ + "type":"string", + "enum":[ + "active", + "blackhole" + ] + }, + "RouteTable":{ + "type":"structure", + "members":{ + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "Routes":{ + "shape":"RouteList", + "locationName":"routeSet" + }, + "Associations":{ + "shape":"RouteTableAssociationList", + "locationName":"associationSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "PropagatingVgws":{ + "shape":"PropagatingVgwList", + "locationName":"propagatingVgwSet" + } + } + }, + "RouteTableAssociation":{ + "type":"structure", + "members":{ + "RouteTableAssociationId":{ + "shape":"String", + "locationName":"routeTableAssociationId" + }, + "RouteTableId":{ + "shape":"String", + "locationName":"routeTableId" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "Main":{ + "shape":"Boolean", + "locationName":"main" + } + } + }, + "RouteTableAssociationList":{ + "type":"list", + "member":{ + "shape":"RouteTableAssociation", + "locationName":"item" + } + }, + "RouteTableList":{ + "type":"list", + "member":{ + "shape":"RouteTable", + "locationName":"item" + } + }, + "RuleAction":{ + "type":"string", + "enum":[ + "allow", + "deny" + ] + }, + "RunInstancesMonitoringEnabled":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "Enabled":{ + "shape":"Boolean", + "locationName":"enabled" + } + } + }, + "RunInstancesRequest":{ + "type":"structure", + "required":[ + "ImageId", + "MinCount", + "MaxCount" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "ImageId":{"shape":"String"}, + "MinCount":{"shape":"Integer"}, + "MaxCount":{"shape":"Integer"}, + "KeyName":{"shape":"String"}, + "SecurityGroups":{ + "shape":"SecurityGroupStringList", + "locationName":"SecurityGroup" + }, + "SecurityGroupIds":{ + "shape":"SecurityGroupIdStringList", + "locationName":"SecurityGroupId" + }, + "UserData":{"shape":"String"}, + "InstanceType":{"shape":"InstanceType"}, + "Placement":{"shape":"Placement"}, + "KernelId":{"shape":"String"}, + "RamdiskId":{"shape":"String"}, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingRequestList", + "locationName":"BlockDeviceMapping" + }, + "Monitoring":{"shape":"RunInstancesMonitoringEnabled"}, + "SubnetId":{"shape":"String"}, + "DisableApiTermination":{ + "shape":"Boolean", + "locationName":"disableApiTermination" + }, + "InstanceInitiatedShutdownBehavior":{ + "shape":"ShutdownBehavior", + "locationName":"instanceInitiatedShutdownBehavior" + }, + "PrivateIpAddress":{ + "shape":"String", + "locationName":"privateIpAddress" + }, + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "AdditionalInfo":{ + "shape":"String", + "locationName":"additionalInfo" + }, + "NetworkInterfaces":{ + "shape":"InstanceNetworkInterfaceSpecificationList", + "locationName":"networkInterface" + }, + "IamInstanceProfile":{ + "shape":"IamInstanceProfileSpecification", + "locationName":"iamInstanceProfile" + }, + "EbsOptimized":{ + "shape":"Boolean", + "locationName":"ebsOptimized" + } + } + }, + "RunScheduledInstancesRequest":{ + "type":"structure", + "required":[ + "ScheduledInstanceId", + "LaunchSpecification" + ], + "members":{ + "DryRun":{"shape":"Boolean"}, + "ClientToken":{"shape":"String"}, + "InstanceCount":{"shape":"Integer"}, + "ScheduledInstanceId":{"shape":"String"}, + "LaunchSpecification":{"shape":"ScheduledInstancesLaunchSpecification"} + } + }, + "RunScheduledInstancesResult":{ + "type":"structure", + "members":{ + "InstanceIdSet":{ + "shape":"InstanceIdSet", + "locationName":"instanceIdSet" + } + } + }, + "S3Storage":{ + "type":"structure", + "members":{ + "Bucket":{ + "shape":"String", + "locationName":"bucket" + }, + "Prefix":{ + "shape":"String", + "locationName":"prefix" + }, + "AWSAccessKeyId":{"shape":"String"}, + "UploadPolicy":{ + "shape":"Blob", + "locationName":"uploadPolicy" + }, + "UploadPolicySignature":{ + "shape":"String", + "locationName":"uploadPolicySignature" + } + } + }, + "ScheduledInstance":{ + "type":"structure", + "members":{ + "ScheduledInstanceId":{ + "shape":"String", + "locationName":"scheduledInstanceId" + }, + "InstanceType":{ + "shape":"String", + "locationName":"instanceType" + }, + "Platform":{ + "shape":"String", + "locationName":"platform" + }, + "NetworkPlatform":{ + "shape":"String", + "locationName":"networkPlatform" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "SlotDurationInHours":{ + "shape":"Integer", + "locationName":"slotDurationInHours" + }, + "Recurrence":{ + "shape":"ScheduledInstanceRecurrence", + "locationName":"recurrence" + }, + "PreviousSlotEndTime":{ + "shape":"DateTime", + "locationName":"previousSlotEndTime" + }, + "NextSlotStartTime":{ + "shape":"DateTime", + "locationName":"nextSlotStartTime" + }, + "HourlyPrice":{ + "shape":"String", + "locationName":"hourlyPrice" + }, + "TotalScheduledInstanceHours":{ + "shape":"Integer", + "locationName":"totalScheduledInstanceHours" + }, + "InstanceCount":{ + "shape":"Integer", + "locationName":"instanceCount" + }, + "TermStartDate":{ + "shape":"DateTime", + "locationName":"termStartDate" + }, + "TermEndDate":{ + "shape":"DateTime", + "locationName":"termEndDate" + }, + "CreateDate":{ + "shape":"DateTime", + "locationName":"createDate" + } + } + }, + "ScheduledInstanceAvailability":{ + "type":"structure", + "members":{ + "InstanceType":{ + "shape":"String", + "locationName":"instanceType" + }, + "Platform":{ + "shape":"String", + "locationName":"platform" + }, + "NetworkPlatform":{ + "shape":"String", + "locationName":"networkPlatform" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "PurchaseToken":{ + "shape":"String", + "locationName":"purchaseToken" + }, + "SlotDurationInHours":{ + "shape":"Integer", + "locationName":"slotDurationInHours" + }, + "Recurrence":{ + "shape":"ScheduledInstanceRecurrence", + "locationName":"recurrence" + }, + "FirstSlotStartTime":{ + "shape":"DateTime", + "locationName":"firstSlotStartTime" + }, + "HourlyPrice":{ + "shape":"String", + "locationName":"hourlyPrice" + }, + "TotalScheduledInstanceHours":{ + "shape":"Integer", + "locationName":"totalScheduledInstanceHours" + }, + "AvailableInstanceCount":{ + "shape":"Integer", + "locationName":"availableInstanceCount" + }, + "MinTermDurationInDays":{ + "shape":"Integer", + "locationName":"minTermDurationInDays" + }, + "MaxTermDurationInDays":{ + "shape":"Integer", + "locationName":"maxTermDurationInDays" + } + } + }, + "ScheduledInstanceAvailabilitySet":{ + "type":"list", + "member":{ + "shape":"ScheduledInstanceAvailability", + "locationName":"item" + } + }, + "ScheduledInstanceIdRequestSet":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ScheduledInstanceId" + } + }, + "ScheduledInstanceRecurrence":{ + "type":"structure", + "members":{ + "Frequency":{ + "shape":"String", + "locationName":"frequency" + }, + "Interval":{ + "shape":"Integer", + "locationName":"interval" + }, + "OccurrenceDaySet":{ + "shape":"OccurrenceDaySet", + "locationName":"occurrenceDaySet" + }, + "OccurrenceRelativeToEnd":{ + "shape":"Boolean", + "locationName":"occurrenceRelativeToEnd" + }, + "OccurrenceUnit":{ + "shape":"String", + "locationName":"occurrenceUnit" + } + } + }, + "ScheduledInstanceRecurrenceRequest":{ + "type":"structure", + "members":{ + "Frequency":{"shape":"String"}, + "Interval":{"shape":"Integer"}, + "OccurrenceDays":{ + "shape":"OccurrenceDayRequestSet", + "locationName":"OccurrenceDay" + }, + "OccurrenceRelativeToEnd":{"shape":"Boolean"}, + "OccurrenceUnit":{"shape":"String"} + } + }, + "ScheduledInstanceSet":{ + "type":"list", + "member":{ + "shape":"ScheduledInstance", + "locationName":"item" + } + }, + "ScheduledInstancesBlockDeviceMapping":{ + "type":"structure", + "members":{ + "DeviceName":{"shape":"String"}, + "NoDevice":{"shape":"String"}, + "VirtualName":{"shape":"String"}, + "Ebs":{"shape":"ScheduledInstancesEbs"} + } + }, + "ScheduledInstancesBlockDeviceMappingSet":{ + "type":"list", + "member":{ + "shape":"ScheduledInstancesBlockDeviceMapping", + "locationName":"BlockDeviceMapping" + } + }, + "ScheduledInstancesEbs":{ + "type":"structure", + "members":{ + "SnapshotId":{"shape":"String"}, + "VolumeSize":{"shape":"Integer"}, + "DeleteOnTermination":{"shape":"Boolean"}, + "VolumeType":{"shape":"String"}, + "Iops":{"shape":"Integer"}, + "Encrypted":{"shape":"Boolean"} + } + }, + "ScheduledInstancesIamInstanceProfile":{ + "type":"structure", + "members":{ + "Arn":{"shape":"String"}, + "Name":{"shape":"String"} + } + }, + "ScheduledInstancesLaunchSpecification":{ + "type":"structure", + "required":["ImageId"], + "members":{ + "ImageId":{"shape":"String"}, + "KeyName":{"shape":"String"}, + "SecurityGroupIds":{ + "shape":"ScheduledInstancesSecurityGroupIdSet", + "locationName":"SecurityGroupId" + }, + "UserData":{"shape":"String"}, + "Placement":{"shape":"ScheduledInstancesPlacement"}, + "KernelId":{"shape":"String"}, + "InstanceType":{"shape":"String"}, + "RamdiskId":{"shape":"String"}, + "BlockDeviceMappings":{ + "shape":"ScheduledInstancesBlockDeviceMappingSet", + "locationName":"BlockDeviceMapping" + }, + "Monitoring":{"shape":"ScheduledInstancesMonitoring"}, + "SubnetId":{"shape":"String"}, + "NetworkInterfaces":{ + "shape":"ScheduledInstancesNetworkInterfaceSet", + "locationName":"NetworkInterface" + }, + "IamInstanceProfile":{"shape":"ScheduledInstancesIamInstanceProfile"}, + "EbsOptimized":{"shape":"Boolean"} + } + }, + "ScheduledInstancesMonitoring":{ + "type":"structure", + "members":{ + "Enabled":{"shape":"Boolean"} + } + }, + "ScheduledInstancesNetworkInterface":{ + "type":"structure", + "members":{ + "NetworkInterfaceId":{"shape":"String"}, + "DeviceIndex":{"shape":"Integer"}, + "SubnetId":{"shape":"String"}, + "Description":{"shape":"String"}, + "PrivateIpAddress":{"shape":"String"}, + "PrivateIpAddressConfigs":{ + "shape":"PrivateIpAddressConfigSet", + "locationName":"PrivateIpAddressConfig" + }, + "SecondaryPrivateIpAddressCount":{"shape":"Integer"}, + "AssociatePublicIpAddress":{"shape":"Boolean"}, + "Groups":{ + "shape":"ScheduledInstancesSecurityGroupIdSet", + "locationName":"Group" + }, + "DeleteOnTermination":{"shape":"Boolean"} + } + }, + "ScheduledInstancesNetworkInterfaceSet":{ + "type":"list", + "member":{ + "shape":"ScheduledInstancesNetworkInterface", + "locationName":"NetworkInterface" + } + }, + "ScheduledInstancesPlacement":{ + "type":"structure", + "members":{ + "AvailabilityZone":{"shape":"String"}, + "GroupName":{"shape":"String"} + } + }, + "ScheduledInstancesPrivateIpAddressConfig":{ + "type":"structure", + "members":{ + "PrivateIpAddress":{"shape":"String"}, + "Primary":{"shape":"Boolean"} + } + }, + "ScheduledInstancesSecurityGroupIdSet":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SecurityGroupId" + } + }, + "SecurityGroup":{ + "type":"structure", + "members":{ + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "GroupId":{ + "shape":"String", + "locationName":"groupId" + }, + "Description":{ + "shape":"String", + "locationName":"groupDescription" + }, + "IpPermissions":{ + "shape":"IpPermissionList", + "locationName":"ipPermissions" + }, + "IpPermissionsEgress":{ + "shape":"IpPermissionList", + "locationName":"ipPermissionsEgress" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "SecurityGroupIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SecurityGroupId" + } + }, + "SecurityGroupList":{ + "type":"list", + "member":{ + "shape":"SecurityGroup", + "locationName":"item" + } + }, + "SecurityGroupStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SecurityGroup" + } + }, + "ShutdownBehavior":{ + "type":"string", + "enum":[ + "stop", + "terminate" + ] + }, + "SlotDateTimeRangeRequest":{ + "type":"structure", + "required":[ + "EarliestTime", + "LatestTime" + ], + "members":{ + "EarliestTime":{"shape":"DateTime"}, + "LatestTime":{"shape":"DateTime"} + } + }, + "SlotStartTimeRangeRequest":{ + "type":"structure", + "members":{ + "EarliestTime":{"shape":"DateTime"}, + "LatestTime":{"shape":"DateTime"} + } + }, + "Snapshot":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "State":{ + "shape":"SnapshotState", + "locationName":"status" + }, + "StateMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "StartTime":{ + "shape":"DateTime", + "locationName":"startTime" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "VolumeSize":{ + "shape":"Integer", + "locationName":"volumeSize" + }, + "OwnerAlias":{ + "shape":"String", + "locationName":"ownerAlias" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "Encrypted":{ + "shape":"Boolean", + "locationName":"encrypted" + }, + "KmsKeyId":{ + "shape":"String", + "locationName":"kmsKeyId" + }, + "DataEncryptionKeyId":{ + "shape":"String", + "locationName":"dataEncryptionKeyId" + } + } + }, + "SnapshotAttributeName":{ + "type":"string", + "enum":[ + "productCodes", + "createVolumePermission" + ] + }, + "SnapshotDetail":{ + "type":"structure", + "members":{ + "DiskImageSize":{ + "shape":"Double", + "locationName":"diskImageSize" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "Format":{ + "shape":"String", + "locationName":"format" + }, + "Url":{ + "shape":"String", + "locationName":"url" + }, + "UserBucket":{ + "shape":"UserBucketDetails", + "locationName":"userBucket" + }, + "DeviceName":{ + "shape":"String", + "locationName":"deviceName" + }, + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Status":{ + "shape":"String", + "locationName":"status" + } + } + }, + "SnapshotDetailList":{ + "type":"list", + "member":{ + "shape":"SnapshotDetail", + "locationName":"item" + } + }, + "SnapshotDiskContainer":{ + "type":"structure", + "members":{ + "Description":{"shape":"String"}, + "Format":{"shape":"String"}, + "Url":{"shape":"String"}, + "UserBucket":{"shape":"UserBucket"} + } + }, + "SnapshotIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SnapshotId" + } + }, + "SnapshotList":{ + "type":"list", + "member":{ + "shape":"Snapshot", + "locationName":"item" + } + }, + "SnapshotState":{ + "type":"string", + "enum":[ + "pending", + "completed", + "error" + ] + }, + "SnapshotTaskDetail":{ + "type":"structure", + "members":{ + "DiskImageSize":{ + "shape":"Double", + "locationName":"diskImageSize" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "Format":{ + "shape":"String", + "locationName":"format" + }, + "Url":{ + "shape":"String", + "locationName":"url" + }, + "UserBucket":{ + "shape":"UserBucketDetails", + "locationName":"userBucket" + }, + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "Progress":{ + "shape":"String", + "locationName":"progress" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "Status":{ + "shape":"String", + "locationName":"status" + } + } + }, + "SpotDatafeedSubscription":{ + "type":"structure", + "members":{ + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "Bucket":{ + "shape":"String", + "locationName":"bucket" + }, + "Prefix":{ + "shape":"String", + "locationName":"prefix" + }, + "State":{ + "shape":"DatafeedSubscriptionState", + "locationName":"state" + }, + "Fault":{ + "shape":"SpotInstanceStateFault", + "locationName":"fault" + } + } + }, + "SpotFleetLaunchSpecification":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"String", + "locationName":"imageId" + }, + "KeyName":{ + "shape":"String", + "locationName":"keyName" + }, + "SecurityGroups":{ + "shape":"GroupIdentifierList", + "locationName":"groupSet" + }, + "UserData":{ + "shape":"String", + "locationName":"userData" + }, + "AddressingType":{ + "shape":"String", + "locationName":"addressingType" + }, + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "Placement":{ + "shape":"SpotPlacement", + "locationName":"placement" + }, + "KernelId":{ + "shape":"String", + "locationName":"kernelId" + }, + "RamdiskId":{ + "shape":"String", + "locationName":"ramdiskId" + }, + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappingList", + "locationName":"blockDeviceMapping" + }, + "Monitoring":{ + "shape":"SpotFleetMonitoring", + "locationName":"monitoring" + }, + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "NetworkInterfaces":{ + "shape":"InstanceNetworkInterfaceSpecificationList", + "locationName":"networkInterfaceSet" + }, + "IamInstanceProfile":{ + "shape":"IamInstanceProfileSpecification", + "locationName":"iamInstanceProfile" + }, + "EbsOptimized":{ + "shape":"Boolean", + "locationName":"ebsOptimized" + }, + "WeightedCapacity":{ + "shape":"Double", + "locationName":"weightedCapacity" + }, + "SpotPrice":{ + "shape":"String", + "locationName":"spotPrice" + } + } + }, + "SpotFleetMonitoring":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Boolean", + "locationName":"enabled" + } + } + }, + "SpotFleetRequestConfig":{ + "type":"structure", + "required":[ + "SpotFleetRequestId", + "SpotFleetRequestState", + "SpotFleetRequestConfig", + "CreateTime" + ], + "members":{ + "SpotFleetRequestId":{ + "shape":"String", + "locationName":"spotFleetRequestId" + }, + "SpotFleetRequestState":{ + "shape":"BatchState", + "locationName":"spotFleetRequestState" + }, + "SpotFleetRequestConfig":{ + "shape":"SpotFleetRequestConfigData", + "locationName":"spotFleetRequestConfig" + }, + "CreateTime":{ + "shape":"DateTime", + "locationName":"createTime" + } + } + }, + "SpotFleetRequestConfigData":{ + "type":"structure", + "required":[ + "SpotPrice", + "TargetCapacity", + "IamFleetRole", + "LaunchSpecifications" + ], + "members":{ + "ClientToken":{ + "shape":"String", + "locationName":"clientToken" + }, + "SpotPrice":{ + "shape":"String", + "locationName":"spotPrice" + }, + "TargetCapacity":{ + "shape":"Integer", + "locationName":"targetCapacity" + }, + "ValidFrom":{ + "shape":"DateTime", + "locationName":"validFrom" + }, + "ValidUntil":{ + "shape":"DateTime", + "locationName":"validUntil" + }, + "TerminateInstancesWithExpiration":{ + "shape":"Boolean", + "locationName":"terminateInstancesWithExpiration" + }, + "IamFleetRole":{ + "shape":"String", + "locationName":"iamFleetRole" + }, + "LaunchSpecifications":{ + "shape":"LaunchSpecsList", + "locationName":"launchSpecifications" + }, + "ExcessCapacityTerminationPolicy":{ + "shape":"ExcessCapacityTerminationPolicy", + "locationName":"excessCapacityTerminationPolicy" + }, + "AllocationStrategy":{ + "shape":"AllocationStrategy", + "locationName":"allocationStrategy" + } + } + }, + "SpotFleetRequestConfigSet":{ + "type":"list", + "member":{ + "shape":"SpotFleetRequestConfig", + "locationName":"item" + } + }, + "SpotInstanceRequest":{ + "type":"structure", + "members":{ + "SpotInstanceRequestId":{ + "shape":"String", + "locationName":"spotInstanceRequestId" + }, + "SpotPrice":{ + "shape":"String", + "locationName":"spotPrice" + }, + "Type":{ + "shape":"SpotInstanceType", + "locationName":"type" + }, + "State":{ + "shape":"SpotInstanceState", + "locationName":"state" + }, + "Fault":{ + "shape":"SpotInstanceStateFault", + "locationName":"fault" + }, + "Status":{ + "shape":"SpotInstanceStatus", + "locationName":"status" + }, + "ValidFrom":{ + "shape":"DateTime", + "locationName":"validFrom" + }, + "ValidUntil":{ + "shape":"DateTime", + "locationName":"validUntil" + }, + "LaunchGroup":{ + "shape":"String", + "locationName":"launchGroup" + }, + "AvailabilityZoneGroup":{ + "shape":"String", + "locationName":"availabilityZoneGroup" + }, + "LaunchSpecification":{ + "shape":"LaunchSpecification", + "locationName":"launchSpecification" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "CreateTime":{ + "shape":"DateTime", + "locationName":"createTime" + }, + "ProductDescription":{ + "shape":"RIProductDescription", + "locationName":"productDescription" + }, + "BlockDurationMinutes":{ + "shape":"Integer", + "locationName":"blockDurationMinutes" + }, + "ActualBlockHourlyPrice":{ + "shape":"String", + "locationName":"actualBlockHourlyPrice" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "LaunchedAvailabilityZone":{ + "shape":"String", + "locationName":"launchedAvailabilityZone" + } + } + }, + "SpotInstanceRequestIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SpotInstanceRequestId" + } + }, + "SpotInstanceRequestList":{ + "type":"list", + "member":{ + "shape":"SpotInstanceRequest", + "locationName":"item" + } + }, + "SpotInstanceState":{ + "type":"string", + "enum":[ + "open", + "active", + "closed", + "cancelled", + "failed" + ] + }, + "SpotInstanceStateFault":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "SpotInstanceStatus":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "UpdateTime":{ + "shape":"DateTime", + "locationName":"updateTime" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "SpotInstanceType":{ + "type":"string", + "enum":[ + "one-time", + "persistent" + ] + }, + "SpotPlacement":{ + "type":"structure", + "members":{ + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + } + } + }, + "SpotPrice":{ + "type":"structure", + "members":{ + "InstanceType":{ + "shape":"InstanceType", + "locationName":"instanceType" + }, + "ProductDescription":{ + "shape":"RIProductDescription", + "locationName":"productDescription" + }, + "SpotPrice":{ + "shape":"String", + "locationName":"spotPrice" + }, + "Timestamp":{ + "shape":"DateTime", + "locationName":"timestamp" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + } + } + }, + "SpotPriceHistoryList":{ + "type":"list", + "member":{ + "shape":"SpotPrice", + "locationName":"item" + } + }, + "StartInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + }, + "AdditionalInfo":{ + "shape":"String", + "locationName":"additionalInfo" + }, + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + } + } + }, + "StartInstancesResult":{ + "type":"structure", + "members":{ + "StartingInstances":{ + "shape":"InstanceStateChangeList", + "locationName":"instancesSet" + } + } + }, + "State":{ + "type":"string", + "enum":[ + "Pending", + "Available", + "Deleting", + "Deleted" + ] + }, + "StateReason":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "Status":{ + "type":"string", + "enum":[ + "MoveInProgress", + "InVpc", + "InClassic" + ] + }, + "StatusName":{ + "type":"string", + "enum":["reachability"] + }, + "StatusType":{ + "type":"string", + "enum":[ + "passed", + "failed", + "insufficient-data", + "initializing" + ] + }, + "StopInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + }, + "Force":{ + "shape":"Boolean", + "locationName":"force" + } + } + }, + "StopInstancesResult":{ + "type":"structure", + "members":{ + "StoppingInstances":{ + "shape":"InstanceStateChangeList", + "locationName":"instancesSet" + } + } + }, + "Storage":{ + "type":"structure", + "members":{ + "S3":{"shape":"S3Storage"} + } + }, + "String":{"type":"string"}, + "Subnet":{ + "type":"structure", + "members":{ + "SubnetId":{ + "shape":"String", + "locationName":"subnetId" + }, + "State":{ + "shape":"SubnetState", + "locationName":"state" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "AvailableIpAddressCount":{ + "shape":"Integer", + "locationName":"availableIpAddressCount" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "DefaultForAz":{ + "shape":"Boolean", + "locationName":"defaultForAz" + }, + "MapPublicIpOnLaunch":{ + "shape":"Boolean", + "locationName":"mapPublicIpOnLaunch" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "SubnetIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SubnetId" + } + }, + "SubnetList":{ + "type":"list", + "member":{ + "shape":"Subnet", + "locationName":"item" + } + }, + "SubnetState":{ + "type":"string", + "enum":[ + "pending", + "available" + ] + }, + "SummaryStatus":{ + "type":"string", + "enum":[ + "ok", + "impaired", + "insufficient-data", + "not-applicable", + "initializing" + ] + }, + "Tag":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"String", + "locationName":"key" + }, + "Value":{ + "shape":"String", + "locationName":"value" + } + } + }, + "TagDescription":{ + "type":"structure", + "members":{ + "ResourceId":{ + "shape":"String", + "locationName":"resourceId" + }, + "ResourceType":{ + "shape":"ResourceType", + "locationName":"resourceType" + }, + "Key":{ + "shape":"String", + "locationName":"key" + }, + "Value":{ + "shape":"String", + "locationName":"value" + } + } + }, + "TagDescriptionList":{ + "type":"list", + "member":{ + "shape":"TagDescription", + "locationName":"item" + } + }, + "TagList":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"item" + } + }, + "TelemetryStatus":{ + "type":"string", + "enum":[ + "UP", + "DOWN" + ] + }, + "Tenancy":{ + "type":"string", + "enum":[ + "default", + "dedicated", + "host" + ] + }, + "TerminateInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + } + } + }, + "TerminateInstancesResult":{ + "type":"structure", + "members":{ + "TerminatingInstances":{ + "shape":"InstanceStateChangeList", + "locationName":"instancesSet" + } + } + }, + "TrafficType":{ + "type":"string", + "enum":[ + "ACCEPT", + "REJECT", + "ALL" + ] + }, + "UnassignPrivateIpAddressesRequest":{ + "type":"structure", + "required":[ + "NetworkInterfaceId", + "PrivateIpAddresses" + ], + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "locationName":"networkInterfaceId" + }, + "PrivateIpAddresses":{ + "shape":"PrivateIpAddressStringList", + "locationName":"privateIpAddress" + } + } + }, + "UnmonitorInstancesRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "locationName":"dryRun" + }, + "InstanceIds":{ + "shape":"InstanceIdStringList", + "locationName":"InstanceId" + } + } + }, + "UnmonitorInstancesResult":{ + "type":"structure", + "members":{ + "InstanceMonitorings":{ + "shape":"InstanceMonitoringList", + "locationName":"instancesSet" + } + } + }, + "UnsuccessfulItem":{ + "type":"structure", + "required":["Error"], + "members":{ + "Error":{ + "shape":"UnsuccessfulItemError", + "locationName":"error" + }, + "ResourceId":{ + "shape":"String", + "locationName":"resourceId" + } + } + }, + "UnsuccessfulItemError":{ + "type":"structure", + "required":[ + "Code", + "Message" + ], + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "UnsuccessfulItemList":{ + "type":"list", + "member":{ + "shape":"UnsuccessfulItem", + "locationName":"item" + } + }, + "UnsuccessfulItemSet":{ + "type":"list", + "member":{ + "shape":"UnsuccessfulItem", + "locationName":"item" + } + }, + "UserBucket":{ + "type":"structure", + "members":{ + "S3Bucket":{"shape":"String"}, + "S3Key":{"shape":"String"} + } + }, + "UserBucketDetails":{ + "type":"structure", + "members":{ + "S3Bucket":{ + "shape":"String", + "locationName":"s3Bucket" + }, + "S3Key":{ + "shape":"String", + "locationName":"s3Key" + } + } + }, + "UserData":{ + "type":"structure", + "members":{ + "Data":{ + "shape":"String", + "locationName":"data" + } + } + }, + "UserGroupStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"UserGroup" + } + }, + "UserIdGroupPair":{ + "type":"structure", + "members":{ + "UserId":{ + "shape":"String", + "locationName":"userId" + }, + "GroupName":{ + "shape":"String", + "locationName":"groupName" + }, + "GroupId":{ + "shape":"String", + "locationName":"groupId" + } + } + }, + "UserIdGroupPairList":{ + "type":"list", + "member":{ + "shape":"UserIdGroupPair", + "locationName":"item" + } + }, + "UserIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"UserId" + } + }, + "ValueStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "VgwTelemetry":{ + "type":"structure", + "members":{ + "OutsideIpAddress":{ + "shape":"String", + "locationName":"outsideIpAddress" + }, + "Status":{ + "shape":"TelemetryStatus", + "locationName":"status" + }, + "LastStatusChange":{ + "shape":"DateTime", + "locationName":"lastStatusChange" + }, + "StatusMessage":{ + "shape":"String", + "locationName":"statusMessage" + }, + "AcceptedRouteCount":{ + "shape":"Integer", + "locationName":"acceptedRouteCount" + } + } + }, + "VgwTelemetryList":{ + "type":"list", + "member":{ + "shape":"VgwTelemetry", + "locationName":"item" + } + }, + "VirtualizationType":{ + "type":"string", + "enum":[ + "hvm", + "paravirtual" + ] + }, + "Volume":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "Size":{ + "shape":"Integer", + "locationName":"size" + }, + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "State":{ + "shape":"VolumeState", + "locationName":"status" + }, + "CreateTime":{ + "shape":"DateTime", + "locationName":"createTime" + }, + "Attachments":{ + "shape":"VolumeAttachmentList", + "locationName":"attachmentSet" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "VolumeType":{ + "shape":"VolumeType", + "locationName":"volumeType" + }, + "Iops":{ + "shape":"Integer", + "locationName":"iops" + }, + "Encrypted":{ + "shape":"Boolean", + "locationName":"encrypted" + }, + "KmsKeyId":{ + "shape":"String", + "locationName":"kmsKeyId" + } + } + }, + "VolumeAttachment":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "Device":{ + "shape":"String", + "locationName":"device" + }, + "State":{ + "shape":"VolumeAttachmentState", + "locationName":"status" + }, + "AttachTime":{ + "shape":"DateTime", + "locationName":"attachTime" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "locationName":"deleteOnTermination" + } + } + }, + "VolumeAttachmentList":{ + "type":"list", + "member":{ + "shape":"VolumeAttachment", + "locationName":"item" + } + }, + "VolumeAttachmentState":{ + "type":"string", + "enum":[ + "attaching", + "attached", + "detaching", + "detached" + ] + }, + "VolumeAttributeName":{ + "type":"string", + "enum":[ + "autoEnableIO", + "productCodes" + ] + }, + "VolumeDetail":{ + "type":"structure", + "required":["Size"], + "members":{ + "Size":{ + "shape":"Long", + "locationName":"size" + } + } + }, + "VolumeIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VolumeId" + } + }, + "VolumeList":{ + "type":"list", + "member":{ + "shape":"Volume", + "locationName":"item" + } + }, + "VolumeState":{ + "type":"string", + "enum":[ + "creating", + "available", + "in-use", + "deleting", + "deleted", + "error" + ] + }, + "VolumeStatusAction":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "EventType":{ + "shape":"String", + "locationName":"eventType" + }, + "EventId":{ + "shape":"String", + "locationName":"eventId" + } + } + }, + "VolumeStatusActionsList":{ + "type":"list", + "member":{ + "shape":"VolumeStatusAction", + "locationName":"item" + } + }, + "VolumeStatusDetails":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"VolumeStatusName", + "locationName":"name" + }, + "Status":{ + "shape":"String", + "locationName":"status" + } + } + }, + "VolumeStatusDetailsList":{ + "type":"list", + "member":{ + "shape":"VolumeStatusDetails", + "locationName":"item" + } + }, + "VolumeStatusEvent":{ + "type":"structure", + "members":{ + "EventType":{ + "shape":"String", + "locationName":"eventType" + }, + "Description":{ + "shape":"String", + "locationName":"description" + }, + "NotBefore":{ + "shape":"DateTime", + "locationName":"notBefore" + }, + "NotAfter":{ + "shape":"DateTime", + "locationName":"notAfter" + }, + "EventId":{ + "shape":"String", + "locationName":"eventId" + } + } + }, + "VolumeStatusEventsList":{ + "type":"list", + "member":{ + "shape":"VolumeStatusEvent", + "locationName":"item" + } + }, + "VolumeStatusInfo":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"VolumeStatusInfoStatus", + "locationName":"status" + }, + "Details":{ + "shape":"VolumeStatusDetailsList", + "locationName":"details" + } + } + }, + "VolumeStatusInfoStatus":{ + "type":"string", + "enum":[ + "ok", + "impaired", + "insufficient-data" + ] + }, + "VolumeStatusItem":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"String", + "locationName":"volumeId" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "VolumeStatus":{ + "shape":"VolumeStatusInfo", + "locationName":"volumeStatus" + }, + "Events":{ + "shape":"VolumeStatusEventsList", + "locationName":"eventsSet" + }, + "Actions":{ + "shape":"VolumeStatusActionsList", + "locationName":"actionsSet" + } + } + }, + "VolumeStatusList":{ + "type":"list", + "member":{ + "shape":"VolumeStatusItem", + "locationName":"item" + } + }, + "VolumeStatusName":{ + "type":"string", + "enum":[ + "io-enabled", + "io-performance" + ] + }, + "VolumeType":{ + "type":"string", + "enum":[ + "standard", + "io1", + "gp2" + ] + }, + "Vpc":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "State":{ + "shape":"VpcState", + "locationName":"state" + }, + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "DhcpOptionsId":{ + "shape":"String", + "locationName":"dhcpOptionsId" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "InstanceTenancy":{ + "shape":"Tenancy", + "locationName":"instanceTenancy" + }, + "IsDefault":{ + "shape":"Boolean", + "locationName":"isDefault" + } + } + }, + "VpcAttachment":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "State":{ + "shape":"AttachmentStatus", + "locationName":"state" + } + } + }, + "VpcAttachmentList":{ + "type":"list", + "member":{ + "shape":"VpcAttachment", + "locationName":"item" + } + }, + "VpcAttributeName":{ + "type":"string", + "enum":[ + "enableDnsSupport", + "enableDnsHostnames" + ] + }, + "VpcClassicLink":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "ClassicLinkEnabled":{ + "shape":"Boolean", + "locationName":"classicLinkEnabled" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "VpcClassicLinkIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpcId" + } + }, + "VpcClassicLinkList":{ + "type":"list", + "member":{ + "shape":"VpcClassicLink", + "locationName":"item" + } + }, + "VpcEndpoint":{ + "type":"structure", + "members":{ + "VpcEndpointId":{ + "shape":"String", + "locationName":"vpcEndpointId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + }, + "ServiceName":{ + "shape":"String", + "locationName":"serviceName" + }, + "State":{ + "shape":"State", + "locationName":"state" + }, + "PolicyDocument":{ + "shape":"String", + "locationName":"policyDocument" + }, + "RouteTableIds":{ + "shape":"ValueStringList", + "locationName":"routeTableIdSet" + }, + "CreationTimestamp":{ + "shape":"DateTime", + "locationName":"creationTimestamp" + } + } + }, + "VpcEndpointSet":{ + "type":"list", + "member":{ + "shape":"VpcEndpoint", + "locationName":"item" + } + }, + "VpcIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpcId" + } + }, + "VpcList":{ + "type":"list", + "member":{ + "shape":"Vpc", + "locationName":"item" + } + }, + "VpcPeeringConnection":{ + "type":"structure", + "members":{ + "AccepterVpcInfo":{ + "shape":"VpcPeeringConnectionVpcInfo", + "locationName":"accepterVpcInfo" + }, + "ExpirationTime":{ + "shape":"DateTime", + "locationName":"expirationTime" + }, + "RequesterVpcInfo":{ + "shape":"VpcPeeringConnectionVpcInfo", + "locationName":"requesterVpcInfo" + }, + "Status":{ + "shape":"VpcPeeringConnectionStateReason", + "locationName":"status" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "VpcPeeringConnectionId":{ + "shape":"String", + "locationName":"vpcPeeringConnectionId" + } + } + }, + "VpcPeeringConnectionList":{ + "type":"list", + "member":{ + "shape":"VpcPeeringConnection", + "locationName":"item" + } + }, + "VpcPeeringConnectionStateReason":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"VpcPeeringConnectionStateReasonCode", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "VpcPeeringConnectionStateReasonCode":{ + "type":"string", + "enum":[ + "initiating-request", + "pending-acceptance", + "active", + "deleted", + "rejected", + "failed", + "expired", + "provisioning", + "deleting" + ] + }, + "VpcPeeringConnectionVpcInfo":{ + "type":"structure", + "members":{ + "CidrBlock":{ + "shape":"String", + "locationName":"cidrBlock" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "VpcId":{ + "shape":"String", + "locationName":"vpcId" + } + } + }, + "VpcState":{ + "type":"string", + "enum":[ + "pending", + "available" + ] + }, + "VpnConnection":{ + "type":"structure", + "members":{ + "VpnConnectionId":{ + "shape":"String", + "locationName":"vpnConnectionId" + }, + "State":{ + "shape":"VpnState", + "locationName":"state" + }, + "CustomerGatewayConfiguration":{ + "shape":"String", + "locationName":"customerGatewayConfiguration" + }, + "Type":{ + "shape":"GatewayType", + "locationName":"type" + }, + "CustomerGatewayId":{ + "shape":"String", + "locationName":"customerGatewayId" + }, + "VpnGatewayId":{ + "shape":"String", + "locationName":"vpnGatewayId" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + }, + "VgwTelemetry":{ + "shape":"VgwTelemetryList", + "locationName":"vgwTelemetry" + }, + "Options":{ + "shape":"VpnConnectionOptions", + "locationName":"options" + }, + "Routes":{ + "shape":"VpnStaticRouteList", + "locationName":"routes" + } + } + }, + "VpnConnectionIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpnConnectionId" + } + }, + "VpnConnectionList":{ + "type":"list", + "member":{ + "shape":"VpnConnection", + "locationName":"item" + } + }, + "VpnConnectionOptions":{ + "type":"structure", + "members":{ + "StaticRoutesOnly":{ + "shape":"Boolean", + "locationName":"staticRoutesOnly" + } + } + }, + "VpnConnectionOptionsSpecification":{ + "type":"structure", + "members":{ + "StaticRoutesOnly":{ + "shape":"Boolean", + "locationName":"staticRoutesOnly" + } + } + }, + "VpnGateway":{ + "type":"structure", + "members":{ + "VpnGatewayId":{ + "shape":"String", + "locationName":"vpnGatewayId" + }, + "State":{ + "shape":"VpnState", + "locationName":"state" + }, + "Type":{ + "shape":"GatewayType", + "locationName":"type" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "VpcAttachments":{ + "shape":"VpcAttachmentList", + "locationName":"attachments" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" + } + } + }, + "VpnGatewayIdStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpnGatewayId" + } + }, + "VpnGatewayList":{ + "type":"list", + "member":{ + "shape":"VpnGateway", + "locationName":"item" + } + }, + "VpnState":{ + "type":"string", + "enum":[ + "pending", + "available", + "deleting", + "deleted" + ] + }, + "VpnStaticRoute":{ + "type":"structure", + "members":{ + "DestinationCidrBlock":{ + "shape":"String", + "locationName":"destinationCidrBlock" + }, + "Source":{ + "shape":"VpnStaticRouteSource", + "locationName":"source" + }, + "State":{ + "shape":"VpnState", + "locationName":"state" + } + } + }, + "VpnStaticRouteList":{ + "type":"list", + "member":{ + "shape":"VpnStaticRoute", + "locationName":"item" + } + }, + "VpnStaticRouteSource":{ + "type":"string", + "enum":["Static"] + }, + "ZoneNameStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ZoneName" + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,6203 @@ +{ + "version": "2.0", + "service": "Amazon Elastic Compute Cloud

    Amazon Elastic Compute Cloud (Amazon EC2) provides resizable computing capacity in the Amazon Web Services (AWS) cloud. Using Amazon EC2 eliminates your need to invest in hardware up front, so you can develop and deploy applications faster.

    ", + "operations": { + "AcceptVpcPeeringConnection": "

    Accept a VPC peering connection request. To accept a request, the VPC peering connection must be in the pending-acceptance state, and you must be the owner of the peer VPC. Use the DescribeVpcPeeringConnections request to view your outstanding VPC peering connection requests.

    ", + "AllocateAddress": "

    Acquires an Elastic IP address.

    An Elastic IP address is for use either in the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    ", + "AllocateHosts": "

    Allocates a Dedicated host to your account. At minimum you need to specify the instance size type, Availability Zone, and quantity of hosts you want to allocate.

    ", + "AssignPrivateIpAddresses": "

    Assigns one or more secondary private IP addresses to the specified network interface. You can specify one or more specific secondary IP addresses, or you can specify the number of secondary IP addresses to be automatically assigned within the subnet's CIDR block range. The number of secondary IP addresses that you can assign to an instance varies by instance type. For information about instance types, see Instance Types in the Amazon Elastic Compute Cloud User Guide. For more information about Elastic IP addresses, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    AssignPrivateIpAddresses is available only in EC2-VPC.

    ", + "AssociateAddress": "

    Associates an Elastic IP address with an instance or a network interface.

    An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    [EC2-Classic, VPC in an EC2-VPC-only account] If the Elastic IP address is already associated with a different instance, it is disassociated from that instance and associated with the specified instance.

    [VPC in an EC2-Classic account] If you don't specify a private IP address, the Elastic IP address is associated with the primary IP address. If the Elastic IP address is already associated with a different instance or a network interface, you get an error unless you allow reassociation.

    This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.

    ", + "AssociateDhcpOptions": "

    Associates a set of DHCP options (that you've previously created) with the specified VPC, or associates no DHCP options with the VPC.

    After you associate the options with the VPC, any existing instances and all new instances that you launch in that VPC use the options. You don't need to restart or relaunch the instances. They automatically pick up the changes within a few hours, depending on how frequently the instance renews its DHCP lease. You can explicitly renew the lease using the operating system on the instance.

    For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

    ", + "AssociateRouteTable": "

    Associates a subnet with a route table. The subnet and route table must be in the same VPC. This association causes traffic originating from the subnet to be routed according to the routes in the route table. The action returns an association ID, which you need in order to disassociate the route table from the subnet later. A route table can be associated with multiple subnets.

    For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "AttachClassicLinkVpc": "

    Links an EC2-Classic instance to a ClassicLink-enabled VPC through one or more of the VPC's security groups. You cannot link an EC2-Classic instance to more than one VPC at a time. You can only link an instance that's in the running state. An instance is automatically unlinked from a VPC when it's stopped - you can link it to the VPC again when you restart it.

    After you've linked an instance, you cannot change the VPC security groups that are associated with it. To change the security groups, you must first unlink the instance, and then link it again.

    Linking your instance to a VPC is sometimes referred to as attaching your instance.

    ", + "AttachInternetGateway": "

    Attaches an Internet gateway to a VPC, enabling connectivity between the Internet and the VPC. For more information about your VPC and Internet gateway, see the Amazon Virtual Private Cloud User Guide.

    ", + "AttachNetworkInterface": "

    Attaches a network interface to an instance.

    ", + "AttachVolume": "

    Attaches an EBS volume to a running or stopped instance and exposes it to the instance with the specified device name.

    Encrypted EBS volumes may only be attached to instances that support Amazon EBS encryption. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    For a list of supported device names, see Attaching an EBS Volume to an Instance. Any device names that aren't reserved for instance store volumes can be used for EBS volumes. For more information, see Amazon EC2 Instance Store in the Amazon Elastic Compute Cloud User Guide.

    If a volume has an AWS Marketplace product code:

    • The volume can be attached only to a stopped instance.
    • AWS Marketplace product codes are copied from the volume to the instance.
    • You must be subscribed to the product.
    • The instance type and operating system of the instance must support the product. For example, you can't detach a volume from a Windows instance and attach it to a Linux instance.

    For an overview of the AWS Marketplace, see Introducing AWS Marketplace.

    For more information about EBS volumes, see Attaching Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

    ", + "AttachVpnGateway": "

    Attaches a virtual private gateway to a VPC. For more information, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "AuthorizeSecurityGroupEgress": "

    [EC2-VPC only] Adds one or more egress rules to a security group for use with a VPC. Specifically, this action permits instances to send traffic to one or more destination CIDR IP address ranges, or to one or more destination security groups for the same VPC. This action doesn't apply to security groups for use in EC2-Classic. For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

    You can have up to 50 rules per security group (covering both ingress and egress rules).

    Each rule consists of the protocol (for example, TCP), plus either a CIDR range or a source group. For the TCP and UDP protocols, you must also specify the destination port or port range. For the ICMP protocol, you must also specify the ICMP type and code. You can use -1 for the type or code to mean all types or all codes.

    Rule changes are propagated to affected instances as quickly as possible. However, a small delay might occur.

    ", + "AuthorizeSecurityGroupIngress": "

    Adds one or more ingress rules to a security group.

    EC2-Classic: You can have up to 100 rules per group.

    EC2-VPC: You can have up to 50 rules per group (covering both ingress and egress rules).

    Rule changes are propagated to instances within the security group as quickly as possible. However, a small delay might occur.

    [EC2-Classic] This action gives one or more CIDR IP address ranges permission to access a security group in your account, or gives one or more security groups (called the source groups) permission to access a security group for your account. A source group can be for your own AWS account, or another.

    [EC2-VPC] This action gives one or more CIDR IP address ranges permission to access a security group in your VPC, or gives one or more other security groups (called the source groups) permission to access a security group for your VPC. The security groups must all be for the same VPC.

    ", + "BundleInstance": "

    Bundles an Amazon instance store-backed Windows instance.

    During bundling, only the root device volume (C:\\) is bundled. Data on other instance store volumes is not preserved.

    This action is not applicable for Linux/Unix instances or Windows instances that are backed by Amazon EBS.

    For more information, see Creating an Instance Store-Backed Windows AMI.

    ", + "CancelBundleTask": "

    Cancels a bundling operation for an instance store-backed Windows instance.

    ", + "CancelConversionTask": "

    Cancels an active conversion task. The task can be the import of an instance or volume. The action removes all artifacts of the conversion, including a partially uploaded volume or instance. If the conversion is complete or is in the process of transferring the final disk image, the command fails and returns an exception.

    For more information, see Using the Command Line Tools to Import Your Virtual Machine to Amazon EC2 in the Amazon Elastic Compute Cloud User Guide.

    ", + "CancelExportTask": "

    Cancels an active export task. The request removes all artifacts of the export, including any partially-created Amazon S3 objects. If the export task is complete or is in the process of transferring the final disk image, the command fails and returns an error.

    ", + "CancelImportTask": "

    Cancels an in-process import virtual machine or import snapshot task.

    ", + "CancelReservedInstancesListing": "

    Cancels the specified Reserved Instance listing in the Reserved Instance Marketplace.

    For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

    ", + "CancelSpotFleetRequests": "

    Cancels the specified Spot fleet requests.

    After you cancel a Spot fleet request, the Spot fleet launches no new Spot instances. You must specify whether the Spot fleet should also terminate its Spot instances. If you terminate the instances, the Spot fleet request enters the cancelled_terminating state. Otherwise, the Spot fleet request enters the cancelled_running state and the instances continue to run until they are interrupted or you terminate them manually.

    ", + "CancelSpotInstanceRequests": "

    Cancels one or more Spot instance requests. Spot instances are instances that Amazon EC2 starts on your behalf when the bid price that you specify exceeds the current Spot price. Amazon EC2 periodically sets the Spot price based on available Spot instance capacity and current Spot instance requests. For more information, see Spot Instance Requests in the Amazon Elastic Compute Cloud User Guide.

    Canceling a Spot instance request does not terminate running Spot instances associated with the request.

    ", + "ConfirmProductInstance": "

    Determines whether a product code is associated with an instance. This action can only be used by the owner of the product code. It is useful when a product code owner needs to verify whether another user's instance is eligible for support.

    ", + "CopyImage": "

    Initiates the copy of an AMI from the specified source region to the current region. You specify the destination region by using its endpoint when making the request.

    For more information, see Copying AMIs in the Amazon Elastic Compute Cloud User Guide.

    ", + "CopySnapshot": "

    Copies a point-in-time snapshot of an EBS volume and stores it in Amazon S3. You can copy the snapshot within the same region or from one region to another. You can use the snapshot to create EBS volumes or Amazon Machine Images (AMIs). The snapshot is copied to the regional endpoint that you send the HTTP request to.

    Copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted snapshots remain unencrypted, unless the Encrypted flag is specified during the snapshot copy operation. By default, encrypted snapshot copies use the default AWS Key Management Service (AWS KMS) customer master key (CMK); however, you can specify a non-default CMK with the KmsKeyId parameter.

    For more information, see Copying an Amazon EBS Snapshot in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateCustomerGateway": "

    Provides information to AWS about your VPN customer gateway device. The customer gateway is the appliance at your end of the VPN connection. (The device on the AWS side of the VPN connection is the virtual private gateway.) You must provide the Internet-routable IP address of the customer gateway's external interface. The IP address must be static and may be behind a device performing network address translation (NAT).

    For devices that use Border Gateway Protocol (BGP), you can also provide the device's BGP Autonomous System Number (ASN). You can use an existing ASN assigned to your network. If you don't have an ASN already, you can use a private ASN (in the 64512 - 65534 range).

    Amazon EC2 supports all 2-byte ASN numbers in the range of 1 - 65534, with the exception of 7224, which is reserved in the us-east-1 region, and 9059, which is reserved in the eu-west-1 region.

    For more information about VPN customer gateways, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    You cannot create more than one customer gateway with the same VPN type, IP address, and BGP ASN parameter values. If you run an identical request more than one time, the first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent requests do not create new customer gateway resources.

    ", + "CreateDhcpOptions": "

    Creates a set of DHCP options for your VPC. After creating the set, you must associate it with the VPC, causing all existing and new instances that you launch in the VPC to use this set of DHCP options. The following are the individual DHCP options you can specify. For more information about the options, see RFC 2132.

    • domain-name-servers - The IP addresses of up to four domain name servers, or AmazonProvidedDNS. The default DHCP option set specifies AmazonProvidedDNS. If specifying more than one domain name server, specify the IP addresses in a single parameter, separated by commas.
    • domain-name - If you're using AmazonProvidedDNS in us-east-1, specify ec2.internal. If you're using AmazonProvidedDNS in another region, specify region.compute.internal (for example, ap-northeast-1.compute.internal). Otherwise, specify a domain name (for example, MyCompany.com). Important: Some Linux operating systems accept multiple domain names separated by spaces. However, Windows and other Linux operating systems treat the value as a single domain, which results in unexpected behavior. If your DHCP options set is associated with a VPC that has instances with multiple operating systems, specify only one domain name.
    • ntp-servers - The IP addresses of up to four Network Time Protocol (NTP) servers.
    • netbios-name-servers - The IP addresses of up to four NetBIOS name servers.
    • netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). We recommend that you specify 2 (broadcast and multicast are not currently supported). For more information about these node types, see RFC 2132.

    Your VPC automatically starts out with a set of DHCP options that includes only a DNS server that we provide (AmazonProvidedDNS). If you create a set of options, and if your VPC has an Internet gateway, make sure to set the domain-name-servers option either to AmazonProvidedDNS or to a domain name server of your choice. For more information about DHCP options, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateFlowLogs": "

    Creates one or more flow logs to capture IP traffic for a specific network interface, subnet, or VPC. Flow logs are delivered to a specified log group in Amazon CloudWatch Logs. If you specify a VPC or subnet in the request, a log stream is created in CloudWatch Logs for each network interface in the subnet or VPC. Log streams can include information about accepted and rejected traffic to a network interface. You can view the data in your log streams using Amazon CloudWatch Logs.

    In your request, you must also specify an IAM role that has permission to publish logs to CloudWatch Logs.

    ", + "CreateImage": "

    Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance that is either running or stopped.

    If you customized your instance with instance store volumes or EBS volumes in addition to the root device volume, the new AMI contains block device mapping information for those volumes. When you launch an instance from this new AMI, the instance automatically launches with those additional volumes.

    For more information, see Creating Amazon EBS-Backed Linux AMIs in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateInstanceExportTask": "

    Exports a running or stopped instance to an S3 bucket.

    For information about the supported operating systems, image formats, and known limitations for the types of instances you can export, see Exporting EC2 Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateInternetGateway": "

    Creates an Internet gateway for use with a VPC. After creating the Internet gateway, you attach it to a VPC using AttachInternetGateway.

    For more information about your VPC and Internet gateway, see the Amazon Virtual Private Cloud User Guide.

    ", + "CreateKeyPair": "

    Creates a 2048-bit RSA key pair with the specified name. Amazon EC2 stores the public key and displays the private key for you to save to a file. The private key is returned as an unencrypted PEM encoded PKCS#8 private key. If a key with the specified name already exists, Amazon EC2 returns an error.

    You can have up to five thousand key pairs per region.

    The key pair returned to you is available only in the region in which you create it. To create a key pair that is available in all regions, use ImportKeyPair.

    For more information about key pairs, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateNatGateway": "

    Creates a NAT gateway in the specified subnet. A NAT gateway can be used to enable instances in a private subnet to connect to the Internet. This action creates a network interface in the specified subnet with a private IP address from the IP address range of the subnet. For more information, see NAT Gateways in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateNetworkAcl": "

    Creates a network ACL in a VPC. Network ACLs provide an optional layer of security (in addition to security groups) for the instances in your VPC.

    For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateNetworkAclEntry": "

    Creates an entry (a rule) in a network ACL with the specified rule number. Each network ACL has a set of numbered ingress rules and a separate set of numbered egress rules. When determining whether a packet should be allowed in or out of a subnet associated with the ACL, we process the entries in the ACL according to the rule numbers, in ascending order. Each network ACL has a set of ingress rules and a separate set of egress rules.

    We recommend that you leave room between the rule numbers (for example, 100, 110, 120, ...), and not number them one right after the other (for example, 101, 102, 103, ...). This makes it easier to add a rule between existing ones without having to renumber the rules.

    After you add an entry, you can't modify it; you must either replace it, or create an entry and delete the old one.

    For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateNetworkInterface": "

    Creates a network interface in the specified subnet.

    For more information about network interfaces, see Elastic Network Interfaces in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreatePlacementGroup": "

    Creates a placement group that you launch cluster instances into. You must give the group a name that's unique within the scope of your account.

    For more information about placement groups and cluster instances, see Cluster Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateReservedInstancesListing": "

    Creates a listing for Amazon EC2 Reserved Instances to be sold in the Reserved Instance Marketplace. You can submit one Reserved Instance listing at a time. To get a list of your Reserved Instances, you can use the DescribeReservedInstances operation.

    The Reserved Instance Marketplace matches sellers who want to resell Reserved Instance capacity that they no longer need with buyers who want to purchase additional capacity. Reserved Instances bought and sold through the Reserved Instance Marketplace work like any other Reserved Instances.

    To sell your Reserved Instances, you must first register as a seller in the Reserved Instance Marketplace. After completing the registration process, you can create a Reserved Instance Marketplace listing of some or all of your Reserved Instances, and specify the upfront price to receive for them. Your Reserved Instance listings then become available for purchase. To view the details of your Reserved Instance listing, you can use the DescribeReservedInstancesListings operation.

    For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateRoute": "

    Creates a route in a route table within a VPC.

    You must specify one of the following targets: Internet gateway or virtual private gateway, NAT instance, NAT gateway, VPC peering connection, or network interface.

    When determining how to route traffic, we use the route with the most specific match. For example, let's say the traffic is destined for 192.0.2.3, and the route table includes the following two routes:

    • 192.0.2.0/24 (goes to some target A)

    • 192.0.2.0/28 (goes to some target B)

    Both routes apply to the traffic destined for 192.0.2.3. However, the second route in the list covers a smaller number of IP addresses and is therefore more specific, so we use that route to determine where to target the traffic.

    For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateRouteTable": "

    Creates a route table for the specified VPC. After you create a route table, you can add routes and associate the table with a subnet.

    For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateSecurityGroup": "

    Creates a security group.

    A security group is for use with instances either in the EC2-Classic platform or in a specific VPC. For more information, see Amazon EC2 Security Groups in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

    EC2-Classic: You can have up to 500 security groups.

    EC2-VPC: You can create up to 500 security groups per VPC.

    When you create a security group, you specify a friendly name of your choice. You can have a security group for use in EC2-Classic with the same name as a security group for use in a VPC. However, you can't have two security groups for use in EC2-Classic with the same name or two security groups for use in a VPC with the same name.

    You have a default security group for use in EC2-Classic and a default security group for use in your VPC. If you don't specify a security group when you launch an instance, the instance is launched into the appropriate default security group. A default security group includes a default rule that grants instances unrestricted network access to each other.

    You can add or remove rules from your security groups using AuthorizeSecurityGroupIngress, AuthorizeSecurityGroupEgress, RevokeSecurityGroupIngress, and RevokeSecurityGroupEgress.

    ", + "CreateSnapshot": "

    Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for backups, to make copies of EBS volumes, and to save data before shutting down an instance.

    When a snapshot is created, any AWS Marketplace product codes that are associated with the source volume are propagated to the snapshot.

    You can take a snapshot of an attached volume that is in use. However, snapshots only capture data that has been written to your EBS volume at the time the snapshot command is issued; this may exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the volume long enough to take a snapshot, your snapshot should be complete. However, if you cannot pause all file writes to the volume, you should unmount the volume from within the instance, issue the snapshot command, and then remount the volume to ensure a consistent and complete snapshot. You may remount and use your volume while the snapshot status is pending.

    To create a snapshot for EBS volumes that serve as root devices, you should stop the instance before taking the snapshot.

    Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected.

    For more information, see Amazon Elastic Block Store and Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateSpotDatafeedSubscription": "

    Creates a data feed for Spot instances, enabling you to view Spot instance usage logs. You can create one data feed per AWS account. For more information, see Spot Instance Data Feed in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateSubnet": "

    Creates a subnet in an existing VPC.

    When you create each subnet, you provide the VPC ID and the CIDR block you want for the subnet. After you create a subnet, you can't change its CIDR block. The subnet's CIDR block can be the same as the VPC's CIDR block (assuming you want only a single subnet in the VPC), or a subset of the VPC's CIDR block. If you create more than one subnet in a VPC, the subnets' CIDR blocks must not overlap. The smallest subnet (and VPC) you can create uses a /28 netmask (16 IP addresses), and the largest uses a /16 netmask (65,536 IP addresses).

    AWS reserves both the first four and the last IP address in each subnet's CIDR block. They're not available for use.

    If you add more than one subnet to a VPC, they're set up in a star topology with a logical router in the middle.

    If you launch an instance in a VPC using an Amazon EBS-backed AMI, the IP address doesn't change if you stop and restart the instance (unlike a similar instance launched outside a VPC, which gets a new IP address when restarted). It's therefore possible to have a subnet with no running instances (they're all stopped), but no remaining IP addresses available.

    For more information about subnets, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateTags": "

    Adds or overwrites one or more tags for the specified Amazon EC2 resource or resources. Each resource can have a maximum of 10 tags. Each tag consists of a key and optional value. Tag keys must be unique per resource.

    For more information about tags, see Tagging Your Resources in the Amazon Elastic Compute Cloud User Guide. For more information about creating IAM policies that control users' access to resources based on tags, see Supported Resource-Level Permissions for Amazon EC2 API Actions in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateVolume": "

    Creates an EBS volume that can be attached to an instance in the same Availability Zone. The volume is created in the regional endpoint that you send the HTTP request to. For more information see Regions and Endpoints.

    You can create a new empty volume or restore a volume from an EBS snapshot. Any AWS Marketplace product codes from the snapshot are propagated to the volume.

    You can create encrypted volumes with the Encrypted parameter. Encrypted volumes may only be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are also automatically encrypted. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    For more information, see Creating or Restoring an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateVpc": "

    Creates a VPC with the specified CIDR block.

    The smallest VPC you can create uses a /28 netmask (16 IP addresses), and the largest uses a /16 netmask (65,536 IP addresses). To help you decide how big to make your VPC, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

    By default, each instance you launch in the VPC has the default DHCP options, which includes only a default DNS server that we provide (AmazonProvidedDNS). For more information about DHCP options, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateVpcEndpoint": "

    Creates a VPC endpoint for a specified AWS service. An endpoint enables you to create a private connection between your VPC and another AWS service in your account. You can specify an endpoint policy to attach to the endpoint that will control access to the service from your VPC. You can also specify the VPC route tables that use the endpoint.

    Currently, only endpoints to Amazon S3 are supported.

    ", + "CreateVpcPeeringConnection": "

    Requests a VPC peering connection between two VPCs: a requester VPC that you own and a peer VPC with which to create the connection. The peer VPC can belong to another AWS account. The requester VPC and peer VPC cannot have overlapping CIDR blocks.

    The owner of the peer VPC must accept the peering request to activate the peering connection. The VPC peering connection request expires after 7 days, after which it cannot be accepted or rejected.

    A CreateVpcPeeringConnection request between VPCs with overlapping CIDR blocks results in the VPC peering connection having a status of failed.

    ", + "CreateVpnConnection": "

    Creates a VPN connection between an existing virtual private gateway and a VPN customer gateway. The only supported connection type is ipsec.1.

    The response includes information that you need to give to your network administrator to configure your customer gateway.

    We strongly recommend that you use HTTPS when calling this operation because the response contains sensitive cryptographic information for configuring your customer gateway.

    If you decide to shut down your VPN connection for any reason and later create a new VPN connection, you must reconfigure your customer gateway with the new information returned from this call.

    For more information about VPN connections, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateVpnConnectionRoute": "

    Creates a static route associated with a VPN connection between an existing virtual private gateway and a VPN customer gateway. The static route allows traffic to be routed from the virtual private gateway to the VPN customer gateway.

    For more information about VPN connections, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "CreateVpnGateway": "

    Creates a virtual private gateway. A virtual private gateway is the endpoint on the VPC side of your VPN connection. You can create a virtual private gateway before creating the VPC itself.

    For more information about virtual private gateways, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "DeleteCustomerGateway": "

    Deletes the specified customer gateway. You must delete the VPN connection before you can delete the customer gateway.

    ", + "DeleteDhcpOptions": "

    Deletes the specified set of DHCP options. You must disassociate the set of DHCP options before you can delete it. You can disassociate the set of DHCP options by associating either a new set of options or the default set of options with the VPC.

    ", + "DeleteFlowLogs": "

    Deletes one or more flow logs.

    ", + "DeleteInternetGateway": "

    Deletes the specified Internet gateway. You must detach the Internet gateway from the VPC before you can delete it.

    ", + "DeleteKeyPair": "

    Deletes the specified key pair, by removing the public key from Amazon EC2.

    ", + "DeleteNatGateway": "

    Deletes the specified NAT gateway. Deleting a NAT gateway disassociates its Elastic IP address, but does not release the address from your account. Deleting a NAT gateway does not delete any NAT gateway routes in your route tables.

    ", + "DeleteNetworkAcl": "

    Deletes the specified network ACL. You can't delete the ACL if it's associated with any subnets. You can't delete the default network ACL.

    ", + "DeleteNetworkAclEntry": "

    Deletes the specified ingress or egress entry (rule) from the specified network ACL.

    ", + "DeleteNetworkInterface": "

    Deletes the specified network interface. You must detach the network interface before you can delete it.

    ", + "DeletePlacementGroup": "

    Deletes the specified placement group. You must terminate all instances in the placement group before you can delete the placement group. For more information about placement groups and cluster instances, see Cluster Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "DeleteRoute": "

    Deletes the specified route from the specified route table.

    ", + "DeleteRouteTable": "

    Deletes the specified route table. You must disassociate the route table from any subnets before you can delete it. You can't delete the main route table.

    ", + "DeleteSecurityGroup": "

    Deletes a security group.

    If you attempt to delete a security group that is associated with an instance, or is referenced by another security group, the operation fails with InvalidGroup.InUse in EC2-Classic or DependencyViolation in EC2-VPC.

    ", + "DeleteSnapshot": "

    Deletes the specified snapshot.

    When you make periodic snapshots of a volume, the snapshots are incremental, and only the blocks on the device that have changed since your last snapshot are saved in the new snapshot. When you delete a snapshot, only the data not needed for any other snapshot is removed. So regardless of which prior snapshots have been deleted, all active snapshots will have access to all the information needed to restore the volume.

    You cannot delete a snapshot of the root device of an EBS volume used by a registered AMI. You must first de-register the AMI before you can delete the snapshot.

    For more information, see Deleting an Amazon EBS Snapshot in the Amazon Elastic Compute Cloud User Guide.

    ", + "DeleteSpotDatafeedSubscription": "

    Deletes the data feed for Spot instances.

    ", + "DeleteSubnet": "

    Deletes the specified subnet. You must terminate all running instances in the subnet before you can delete the subnet.

    ", + "DeleteTags": "

    Deletes the specified set of tags from the specified set of resources. This call is designed to follow a DescribeTags request.

    For more information about tags, see Tagging Your Resources in the Amazon Elastic Compute Cloud User Guide.

    ", + "DeleteVolume": "

    Deletes the specified EBS volume. The volume must be in the available state (not attached to an instance).

    The volume may remain in the deleting state for several minutes.

    For more information, see Deleting an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

    ", + "DeleteVpc": "

    Deletes the specified VPC. You must detach or delete all gateways and resources that are associated with the VPC before you can delete it. For example, you must terminate all instances running in the VPC, delete all security groups associated with the VPC (except the default one), delete all route tables associated with the VPC (except the default one), and so on.

    ", + "DeleteVpcEndpoints": "

    Deletes one or more specified VPC endpoints. Deleting the endpoint also deletes the endpoint routes in the route tables that were associated with the endpoint.

    ", + "DeleteVpcPeeringConnection": "

    Deletes a VPC peering connection. Either the owner of the requester VPC or the owner of the peer VPC can delete the VPC peering connection if it's in the active state. The owner of the requester VPC can delete a VPC peering connection in the pending-acceptance state.

    ", + "DeleteVpnConnection": "

    Deletes the specified VPN connection.

    If you're deleting the VPC and its associated components, we recommend that you detach the virtual private gateway from the VPC and delete the VPC before deleting the VPN connection. If you believe that the tunnel credentials for your VPN connection have been compromised, you can delete the VPN connection and create a new one that has new keys, without needing to delete the VPC or virtual private gateway. If you create a new VPN connection, you must reconfigure the customer gateway using the new configuration information returned with the new VPN connection ID.

    ", + "DeleteVpnConnectionRoute": "

    Deletes the specified static route associated with a VPN connection between an existing virtual private gateway and a VPN customer gateway. The static route allows traffic to be routed from the virtual private gateway to the VPN customer gateway.

    ", + "DeleteVpnGateway": "

    Deletes the specified virtual private gateway. We recommend that before you delete a virtual private gateway, you detach it from the VPC and delete the VPN connection. Note that you don't need to delete the virtual private gateway if you plan to delete and recreate the VPN connection between your VPC and your network.

    ", + "DeregisterImage": "

    Deregisters the specified AMI. After you deregister an AMI, it can't be used to launch new instances.

    This command does not delete the AMI.

    ", + "DescribeAccountAttributes": "

    Describes attributes of your AWS account. The following are the supported account attributes:

    • supported-platforms: Indicates whether your account can launch instances into EC2-Classic and EC2-VPC, or only into EC2-VPC.

    • default-vpc: The ID of the default VPC for your account, or none.

    • max-instances: The maximum number of On-Demand instances that you can run.

    • vpc-max-security-groups-per-interface: The maximum number of security groups that you can assign to a network interface.

    • max-elastic-ips: The maximum number of Elastic IP addresses that you can allocate for use with EC2-Classic.

    • vpc-max-elastic-ips: The maximum number of Elastic IP addresses that you can allocate for use with EC2-VPC.

    ", + "DescribeAddresses": "

    Describes one or more of your Elastic IP addresses.

    An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeAvailabilityZones": "

    Describes one or more of the Availability Zones that are available to you. The results include zones only for the region you're currently using. If there is an event impacting an Availability Zone, you can use this request to view the state and any provided message for that Availability Zone.

    For more information, see Regions and Availability Zones in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeBundleTasks": "

    Describes one or more of your bundling tasks.

    Completed bundle tasks are listed for only a limited time. If your bundle task is no longer in the list, you can still register an AMI from it. Just use RegisterImage with the Amazon S3 bucket name and image manifest name you provided to the bundle task.

    ", + "DescribeClassicLinkInstances": "

    Describes one or more of your linked EC2-Classic instances. This request only returns information about EC2-Classic instances linked to a VPC through ClassicLink; you cannot use this request to return information about other instances.

    ", + "DescribeConversionTasks": "

    Describes one or more of your conversion tasks. For more information, see Using the Command Line Tools to Import Your Virtual Machine to Amazon EC2 in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeCustomerGateways": "

    Describes one or more of your VPN customer gateways.

    For more information about VPN customer gateways, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeDhcpOptions": "

    Describes one or more of your DHCP options sets.

    For more information about DHCP options sets, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeExportTasks": "

    Describes one or more of your export tasks.

    ", + "DescribeFlowLogs": "

    Describes one or more flow logs. To view the information in your flow logs (the log streams for the network interfaces), you must use the CloudWatch Logs console or the CloudWatch Logs API.

    ", + "DescribeHosts": "

    Describes one or more of your Dedicated hosts.

    The results describe only the Dedicated hosts in the region you're currently using. All listed instances consume capacity on your Dedicated host. Dedicated hosts that have recently been released will be listed with the state released.

    ", + "DescribeIdFormat": "

    Describes the ID format settings for your resources on a per-region basis, for example, to view which resource types are enabled for longer IDs. This request only returns information about resource types whose ID formats can be modified; it does not return information about other resource types.

    The following resource types support longer IDs: instance | reservation.

    These settings apply to the IAM user who makes the request; they do not apply to the entire AWS account. By default, an IAM user defaults to the same settings as the root user, unless they explicitly override the settings by running the ModifyIdFormat command. Resources created with longer IDs are visible to all IAM users, regardless of these settings and provided that they have permission to use the relevant Describe command for the resource type.

    ", + "DescribeImageAttribute": "

    Describes the specified attribute of the specified AMI. You can specify only one attribute at a time.

    ", + "DescribeImages": "

    Describes one or more of the images (AMIs, AKIs, and ARIs) available to you. Images available to you include public images, private images that you own, and private images owned by other AWS accounts but for which you have explicit launch permissions.

    Deregistered images are included in the returned results for an unspecified interval after deregistration.

    ", + "DescribeImportImageTasks": "

    Displays details about an import virtual machine or import snapshot tasks that are already created.

    ", + "DescribeImportSnapshotTasks": "

    Describes your import snapshot tasks.

    ", + "DescribeInstanceAttribute": "

    Describes the specified attribute of the specified instance. You can specify only one attribute at a time. Valid attribute values are: instanceType | kernel | ramdisk | userData | disableApiTermination | instanceInitiatedShutdownBehavior | rootDeviceName | blockDeviceMapping | productCodes | sourceDestCheck | groupSet | ebsOptimized | sriovNetSupport

    ", + "DescribeInstanceStatus": "

    Describes the status of one or more instances.

    Instance status includes the following components:

    • Status checks - Amazon EC2 performs status checks on running EC2 instances to identify hardware and software issues. For more information, see Status Checks for Your Instances and Troubleshooting Instances with Failed Status Checks in the Amazon Elastic Compute Cloud User Guide.

    • Scheduled events - Amazon EC2 can schedule events (such as reboot, stop, or terminate) for your instances related to hardware issues, software updates, or system maintenance. For more information, see Scheduled Events for Your Instances in the Amazon Elastic Compute Cloud User Guide.

    • Instance state - You can manage your instances from the moment you launch them through their termination. For more information, see Instance Lifecycle in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeInstances": "

    Describes one or more of your instances.

    If you specify one or more instance IDs, Amazon EC2 returns information for those instances. If you do not specify instance IDs, Amazon EC2 returns information for all relevant instances. If you specify an instance ID that is not valid, an error is returned. If you specify an instance that you do not own, it is not included in the returned results.

    Recently terminated instances might appear in the returned results. This interval is usually less than one hour.

    ", + "DescribeInternetGateways": "

    Describes one or more of your Internet gateways.

    ", + "DescribeKeyPairs": "

    Describes one or more of your key pairs.

    For more information about key pairs, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeMovingAddresses": "

    Describes your Elastic IP addresses that are being moved to the EC2-VPC platform, or that are being restored to the EC2-Classic platform. This request does not return information about any other Elastic IP addresses in your account.

    ", + "DescribeNatGateways": "

    Describes one or more of the your NAT gateways.

    ", + "DescribeNetworkAcls": "

    Describes one or more of your network ACLs.

    For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeNetworkInterfaceAttribute": "

    Describes a network interface attribute. You can specify only one attribute at a time.

    ", + "DescribeNetworkInterfaces": "

    Describes one or more of your network interfaces.

    ", + "DescribePlacementGroups": "

    Describes one or more of your placement groups. For more information about placement groups and cluster instances, see Cluster Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribePrefixLists": "

    Describes available AWS services in a prefix list format, which includes the prefix list name and prefix list ID of the service and the IP address range for the service. A prefix list ID is required for creating an outbound security group rule that allows traffic from a VPC to access an AWS service through a VPC endpoint.

    ", + "DescribeRegions": "

    Describes one or more regions that are currently available to you.

    For a list of the regions supported by Amazon EC2, see Regions and Endpoints.

    ", + "DescribeReservedInstances": "

    Describes one or more of the Reserved Instances that you purchased.

    For more information about Reserved Instances, see Reserved Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeReservedInstancesListings": "

    Describes your account's Reserved Instance listings in the Reserved Instance Marketplace.

    The Reserved Instance Marketplace matches sellers who want to resell Reserved Instance capacity that they no longer need with buyers who want to purchase additional capacity. Reserved Instances bought and sold through the Reserved Instance Marketplace work like any other Reserved Instances.

    As a seller, you choose to list some or all of your Reserved Instances, and you specify the upfront price to receive for them. Your Reserved Instances are then listed in the Reserved Instance Marketplace and are available for purchase.

    As a buyer, you specify the configuration of the Reserved Instance to purchase, and the Marketplace matches what you're searching for with what's available. The Marketplace first sells the lowest priced Reserved Instances to you, and continues to sell available Reserved Instance listings to you until your demand is met. You are charged based on the total price of all of the listings that you purchase.

    For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeReservedInstancesModifications": "

    Describes the modifications made to your Reserved Instances. If no parameter is specified, information about all your Reserved Instances modification requests is returned. If a modification ID is specified, only information about the specific modification is returned.

    For more information, see Modifying Reserved Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeReservedInstancesOfferings": "

    Describes Reserved Instance offerings that are available for purchase. With Reserved Instances, you purchase the right to launch instances for a period of time. During that time period, you do not receive insufficient capacity errors, and you pay a lower usage rate than the rate charged for On-Demand instances for the actual time used.

    If you have listed your own Reserved Instances for sale in the Reserved Instance Marketplace, they will be excluded from these results. This is to ensure that you do not purchase your own Reserved Instances.

    For more information, see Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeRouteTables": "

    Describes one or more of your route tables.

    Each subnet in your VPC must be associated with a route table. If a subnet is not explicitly associated with any route table, it is implicitly associated with the main route table. This command does not return the subnet ID for implicit associations.

    For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeScheduledInstanceAvailability": "

    Finds available schedules that meet the specified criteria.

    You can search for an available schedule no more than 3 months in advance. You must meet the minimum required duration of 1,200 hours per year. For example, the minimum daily schedule is 4 hours, the minimum weekly schedule is 24 hours, and the minimum monthly schedule is 100 hours.

    After you find a schedule that meets your needs, call PurchaseScheduledInstances to purchase Scheduled Instances with that schedule.

    ", + "DescribeScheduledInstances": "

    Describes one or more of your Scheduled Instances.

    ", + "DescribeSecurityGroups": "

    Describes one or more of your security groups.

    A security group is for use with instances either in the EC2-Classic platform or in a specific VPC. For more information, see Amazon EC2 Security Groups in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeSnapshotAttribute": "

    Describes the specified attribute of the specified snapshot. You can specify only one attribute at a time.

    For more information about EBS snapshots, see Amazon EBS Snapshots in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeSnapshots": "

    Describes one or more of the EBS snapshots available to you. Available snapshots include public snapshots available for any AWS account to launch, private snapshots that you own, and private snapshots owned by another AWS account but for which you've been given explicit create volume permissions.

    The create volume permissions fall into the following categories:

    • public: The owner of the snapshot granted create volume permissions for the snapshot to the all group. All AWS accounts have create volume permissions for these snapshots.
    • explicit: The owner of the snapshot granted create volume permissions to a specific AWS account.
    • implicit: An AWS account has implicit create volume permissions for all snapshots it owns.

    The list of snapshots returned can be modified by specifying snapshot IDs, snapshot owners, or AWS accounts with create volume permissions. If no options are specified, Amazon EC2 returns all snapshots for which you have create volume permissions.

    If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned. If you specify an invalid snapshot ID, an error is returned. If you specify a snapshot ID for which you do not have access, it is not included in the returned results.

    If you specify one or more snapshot owners, only snapshots from the specified owners and for which you have access are returned. The results can include the AWS account IDs of the specified owners, amazon for snapshots owned by Amazon, or self for snapshots that you own.

    If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are returned. You can specify AWS account IDs (if you own the snapshots), self for snapshots for which you own or have explicit permissions, or all for public snapshots.

    If you are describing a long list of snapshots, you can paginate the output to make the list more manageable. The MaxResults parameter sets the maximum number of results returned in a single page. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeSnapshots request to retrieve the remaining results.

    For more information about EBS snapshots, see Amazon EBS Snapshots in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeSpotDatafeedSubscription": "

    Describes the data feed for Spot instances. For more information, see Spot Instance Data Feed in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeSpotFleetInstances": "

    Describes the running instances for the specified Spot fleet.

    ", + "DescribeSpotFleetRequestHistory": "

    Describes the events for the specified Spot fleet request during the specified time.

    Spot fleet events are delayed by up to 30 seconds before they can be described. This ensures that you can query by the last evaluated time and not miss a recorded event.

    ", + "DescribeSpotFleetRequests": "

    Describes your Spot fleet requests.

    ", + "DescribeSpotInstanceRequests": "

    Describes the Spot instance requests that belong to your account. Spot instances are instances that Amazon EC2 launches when the bid price that you specify exceeds the current Spot price. Amazon EC2 periodically sets the Spot price based on available Spot instance capacity and current Spot instance requests. For more information, see Spot Instance Requests in the Amazon Elastic Compute Cloud User Guide.

    You can use DescribeSpotInstanceRequests to find a running Spot instance by examining the response. If the status of the Spot instance is fulfilled, the instance ID appears in the response and contains the identifier of the instance. Alternatively, you can use DescribeInstances with a filter to look for instances where the instance lifecycle is spot.

    ", + "DescribeSpotPriceHistory": "

    Describes the Spot price history. The prices returned are listed in chronological order, from the oldest to the most recent, for up to the past 90 days. For more information, see Spot Instance Pricing History in the Amazon Elastic Compute Cloud User Guide.

    When you specify a start and end time, this operation returns the prices of the instance types within the time range that you specified and the time when the price changed. The price is valid within the time period that you specified; the response merely indicates the last time that the price changed.

    ", + "DescribeSubnets": "

    Describes one or more of your subnets.

    For more information about subnets, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeTags": "

    Describes one or more of the tags for your EC2 resources.

    For more information about tags, see Tagging Your Resources in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeVolumeAttribute": "

    Describes the specified attribute of the specified volume. You can specify only one attribute at a time.

    For more information about EBS volumes, see Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeVolumeStatus": "

    Describes the status of the specified volumes. Volume status provides the result of the checks performed on your volumes to determine events that can impair the performance of your volumes. The performance of a volume can be affected if an issue occurs on the volume's underlying host. If the volume's underlying host experiences a power outage or system issue, after the system is restored, there could be data inconsistencies on the volume. Volume events notify you if this occurs. Volume actions notify you if any action needs to be taken in response to the event.

    The DescribeVolumeStatus operation provides the following information about the specified volumes:

    Status: Reflects the current status of the volume. The possible values are ok, impaired , warning, or insufficient-data. If all checks pass, the overall status of the volume is ok. If the check fails, the overall status is impaired. If the status is insufficient-data, then the checks may still be taking place on your volume at the time. We recommend that you retry the request. For more information on volume status, see Monitoring the Status of Your Volumes.

    Events: Reflect the cause of a volume status and may require you to take action. For example, if your volume returns an impaired status, then the volume event might be potential-data-inconsistency. This means that your volume has been affected by an issue with the underlying host, has all I/O operations disabled, and may have inconsistent data.

    Actions: Reflect the actions you may have to take in response to an event. For example, if the status of the volume is impaired and the volume event shows potential-data-inconsistency, then the action shows enable-volume-io. This means that you may want to enable the I/O operations for the volume by calling the EnableVolumeIO action and then check the volume for data consistency.

    Volume status is based on the volume status checks, and does not reflect the volume state. Therefore, volume status does not indicate volumes in the error state (for example, when a volume is incapable of accepting I/O.)

    ", + "DescribeVolumes": "

    Describes the specified EBS volumes.

    If you are describing a long list of volumes, you can paginate the output to make the list more manageable. The MaxResults parameter sets the maximum number of results returned in a single page. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeVolumes request to retrieve the remaining results.

    For more information about EBS volumes, see Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeVpcAttribute": "

    Describes the specified attribute of the specified VPC. You can specify only one attribute at a time.

    ", + "DescribeVpcClassicLink": "

    Describes the ClassicLink status of one or more VPCs.

    ", + "DescribeVpcClassicLinkDnsSupport": "

    Describes the ClassicLink DNS support status of one or more VPCs. If enabled, the DNS hostname of a linked EC2-Classic instance resolves to its private IP address when addressed from an instance in the VPC to which it's linked. Similarly, the DNS hostname of an instance in a VPC resolves to its private IP address when addressed from a linked EC2-Classic instance. For more information about ClassicLink, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

    ", + "DescribeVpcEndpointServices": "

    Describes all supported AWS services that can be specified when creating a VPC endpoint.

    ", + "DescribeVpcEndpoints": "

    Describes one or more of your VPC endpoints.

    ", + "DescribeVpcPeeringConnections": "

    Describes one or more of your VPC peering connections.

    ", + "DescribeVpcs": "

    Describes one or more of your VPCs.

    ", + "DescribeVpnConnections": "

    Describes one or more of your VPN connections.

    For more information about VPN connections, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "DescribeVpnGateways": "

    Describes one or more of your virtual private gateways.

    For more information about virtual private gateways, see Adding an IPsec Hardware VPN to Your VPC in the Amazon Virtual Private Cloud User Guide.

    ", + "DetachClassicLinkVpc": "

    Unlinks (detaches) a linked EC2-Classic instance from a VPC. After the instance has been unlinked, the VPC security groups are no longer associated with it. An instance is automatically unlinked from a VPC when it's stopped.

    ", + "DetachInternetGateway": "

    Detaches an Internet gateway from a VPC, disabling connectivity between the Internet and the VPC. The VPC must not contain any running instances with Elastic IP addresses.

    ", + "DetachNetworkInterface": "

    Detaches a network interface from an instance.

    ", + "DetachVolume": "

    Detaches an EBS volume from an instance. Make sure to unmount any file systems on the device within your operating system before detaching the volume. Failure to do so results in the volume being stuck in a busy state while detaching.

    If an Amazon EBS volume is the root device of an instance, it can't be detached while the instance is running. To detach the root volume, stop the instance first.

    When a volume with an AWS Marketplace product code is detached from an instance, the product code is no longer associated with the instance.

    For more information, see Detaching an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

    ", + "DetachVpnGateway": "

    Detaches a virtual private gateway from a VPC. You do this if you're planning to turn off the VPC and not use it anymore. You can confirm a virtual private gateway has been completely detached from a VPC by describing the virtual private gateway (any attachments to the virtual private gateway are also described).

    You must wait for the attachment's state to switch to detached before you can delete the VPC or attach a different VPC to the virtual private gateway.

    ", + "DisableVgwRoutePropagation": "

    Disables a virtual private gateway (VGW) from propagating routes to a specified route table of a VPC.

    ", + "DisableVpcClassicLink": "

    Disables ClassicLink for a VPC. You cannot disable ClassicLink for a VPC that has EC2-Classic instances linked to it.

    ", + "DisableVpcClassicLinkDnsSupport": "

    Disables ClassicLink DNS support for a VPC. If disabled, DNS hostnames resolve to public IP addresses when addressed between a linked EC2-Classic instance and instances in the VPC to which it's linked. For more information about ClassicLink, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

    ", + "DisassociateAddress": "

    Disassociates an Elastic IP address from the instance or network interface it's associated with.

    An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.

    ", + "DisassociateRouteTable": "

    Disassociates a subnet from a route table.

    After you perform this action, the subnet no longer uses the routes in the route table. Instead, it uses the routes in the VPC's main route table. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "EnableVgwRoutePropagation": "

    Enables a virtual private gateway (VGW) to propagate routes to the specified route table of a VPC.

    ", + "EnableVolumeIO": "

    Enables I/O operations for a volume that had I/O operations disabled because the data on the volume was potentially inconsistent.

    ", + "EnableVpcClassicLink": "

    Enables a VPC for ClassicLink. You can then link EC2-Classic instances to your ClassicLink-enabled VPC to allow communication over private IP addresses. You cannot enable your VPC for ClassicLink if any of your VPC's route tables have existing routes for address ranges within the 10.0.0.0/8 IP address range, excluding local routes for VPCs in the 10.0.0.0/16 and 10.1.0.0/16 IP address ranges. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

    ", + "EnableVpcClassicLinkDnsSupport": "

    Enables a VPC to support DNS hostname resolution for ClassicLink. If enabled, the DNS hostname of a linked EC2-Classic instance resolves to its private IP address when addressed from an instance in the VPC to which it's linked. Similarly, the DNS hostname of an instance in a VPC resolves to its private IP address when addressed from a linked EC2-Classic instance. For more information about ClassicLink, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

    ", + "GetConsoleOutput": "

    Gets the console output for the specified instance.

    Instances do not have a physical monitor through which you can view their console output. They also lack physical controls that allow you to power up, reboot, or shut them down. To allow these actions, we provide them through the Amazon EC2 API and command line interface.

    Instance console output is buffered and posted shortly after instance boot, reboot, and termination. Amazon EC2 preserves the most recent 64 KB output which is available for at least one hour after the most recent post.

    For Linux instances, the instance console output displays the exact console output that would normally be displayed on a physical monitor attached to a computer. This output is buffered because the instance produces it and then posts it to a store where the instance's owner can retrieve it.

    For Windows instances, the instance console output includes output from the EC2Config service.

    ", + "GetPasswordData": "

    Retrieves the encrypted administrator password for an instance running Windows.

    The Windows password is generated at boot if the EC2Config service plugin, Ec2SetPassword, is enabled. This usually only happens the first time an AMI is launched, and then Ec2SetPassword is automatically disabled. The password is not generated for rebundled AMIs unless Ec2SetPassword is enabled before bundling.

    The password is encrypted using the key pair that you specified when you launched the instance. You must provide the corresponding key pair file.

    Password generation and encryption takes a few moments. We recommend that you wait up to 15 minutes after launching an instance before trying to retrieve the generated password.

    ", + "ImportImage": "

    Import single or multi-volume disk images or EBS snapshots into an Amazon Machine Image (AMI).

    ", + "ImportInstance": "

    Creates an import instance task using metadata from the specified disk image. ImportInstance only supports single-volume VMs. To import multi-volume VMs, use ImportImage. After importing the image, you then upload it using the ec2-import-volume command in the EC2 command line tools. For more information, see Using the Command Line Tools to Import Your Virtual Machine to Amazon EC2 in the Amazon Elastic Compute Cloud User Guide.

    ", + "ImportKeyPair": "

    Imports the public key from an RSA key pair that you created with a third-party tool. Compare this with CreateKeyPair, in which AWS creates the key pair and gives the keys to you (AWS keeps a copy of the public key). With ImportKeyPair, you create the key pair and give AWS just the public key. The private key is never transferred between you and AWS.

    For more information about key pairs, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

    ", + "ImportSnapshot": "

    Imports a disk into an EBS snapshot.

    ", + "ImportVolume": "

    Creates an import volume task using metadata from the specified disk image. After importing the image, you then upload it using the ec2-import-volume command in the Amazon EC2 command-line interface (CLI) tools. For more information, see Using the Command Line Tools to Import Your Virtual Machine to Amazon EC2 in the Amazon Elastic Compute Cloud User Guide.

    ", + "ModifyHosts": "

    Modify the auto-placement setting of a Dedicated host. When auto-placement is enabled, AWS will place instances that you launch with a tenancy of host, but without targeting a specific host ID, onto any available Dedicated host in your account which has auto-placement enabled. When auto-placement is disabled, you need to provide a host ID if you want the instance to launch onto a specific host. If no host ID is provided, the instance will be launched onto a suitable host which has auto-placement enabled.

    ", + "ModifyIdFormat": "

    Modifies the ID format for the specified resource on a per-region basis. You can specify that resources should receive longer IDs (17-character IDs) when they are created. The following resource types support longer IDs: instance | reservation.

    This setting applies to the IAM user who makes the request; it does not apply to the entire AWS account. By default, an IAM user defaults to the same settings as the root user, unless they explicitly override the settings by running this request. Resources created with longer IDs are visible to all IAM users, regardless of these settings and provided that they have permission to use the relevant Describe command for the resource type.

    ", + "ModifyImageAttribute": "

    Modifies the specified attribute of the specified AMI. You can specify only one attribute at a time.

    AWS Marketplace product codes cannot be modified. Images with an AWS Marketplace product code cannot be made public.

    ", + "ModifyInstanceAttribute": "

    Modifies the specified attribute of the specified instance. You can specify only one attribute at a time.

    To modify some attributes, the instance must be stopped. For more information, see Modifying Attributes of a Stopped Instance in the Amazon Elastic Compute Cloud User Guide.

    ", + "ModifyInstancePlacement": "

    Set the instance affinity value for a specific stopped instance and modify the instance tenancy setting.

    Instance affinity is disabled by default. When instance affinity is host and it is not associated with a specific Dedicated host, the next time it is launched it will automatically be associated with the host it lands on. This relationship will persist if the instance is stopped/started, or rebooted.

    You can modify the host ID associated with a stopped instance. If a stopped instance has a new host ID association, the instance will target that host when restarted.

    You can modify the tenancy of a stopped instance with a tenancy of host or dedicated.

    Affinity, hostID, and tenancy are not required parameters, but at least one of them must be specified in the request. Affinity and tenancy can be modified in the same request, but tenancy can only be modified on instances that are stopped.

    ", + "ModifyNetworkInterfaceAttribute": "

    Modifies the specified network interface attribute. You can specify only one attribute at a time.

    ", + "ModifyReservedInstances": "

    Modifies the Availability Zone, instance count, instance type, or network platform (EC2-Classic or EC2-VPC) of your Reserved Instances. The Reserved Instances to be modified must be identical, except for Availability Zone, network platform, and instance type.

    For more information, see Modifying Reserved Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "ModifySnapshotAttribute": "

    Adds or removes permission settings for the specified snapshot. You may add or remove specified AWS account IDs from a snapshot's list of create volume permissions, but you cannot do both in a single API call. If you need to both add and remove account IDs for a snapshot, you must use multiple API calls.

    For more information on modifying snapshot permissions, see Sharing Snapshots in the Amazon Elastic Compute Cloud User Guide.

    Snapshots with AWS Marketplace product codes cannot be made public.

    ", + "ModifySpotFleetRequest": "

    Modifies the specified Spot fleet request.

    While the Spot fleet request is being modified, it is in the modifying state.

    To scale up your Spot fleet, increase its target capacity. The Spot fleet launches the additional Spot instances according to the allocation strategy for the Spot fleet request. If the allocation strategy is lowestPrice, the Spot fleet launches instances using the Spot pool with the lowest price. If the allocation strategy is diversified, the Spot fleet distributes the instances across the Spot pools.

    To scale down your Spot fleet, decrease its target capacity. First, the Spot fleet cancels any open bids that exceed the new target capacity. You can request that the Spot fleet terminate Spot instances until the size of the fleet no longer exceeds the new target capacity. If the allocation strategy is lowestPrice, the Spot fleet terminates the instances with the highest price per unit. If the allocation strategy is diversified, the Spot fleet terminates instances across the Spot pools. Alternatively, you can request that the Spot fleet keep the fleet at its current size, but not replace any Spot instances that are interrupted or that you terminate manually.

    ", + "ModifySubnetAttribute": "

    Modifies a subnet attribute.

    ", + "ModifyVolumeAttribute": "

    Modifies a volume attribute.

    By default, all I/O operations for the volume are suspended when the data on the volume is determined to be potentially inconsistent, to prevent undetectable, latent data corruption. The I/O access to the volume can be resumed by first enabling I/O access and then checking the data consistency on your volume.

    You can change the default behavior to resume I/O operations. We recommend that you change this only for boot volumes or for volumes that are stateless or disposable.

    ", + "ModifyVpcAttribute": "

    Modifies the specified attribute of the specified VPC.

    ", + "ModifyVpcEndpoint": "

    Modifies attributes of a specified VPC endpoint. You can modify the policy associated with the endpoint, and you can add and remove route tables associated with the endpoint.

    ", + "MonitorInstances": "

    Enables monitoring for a running instance. For more information about monitoring instances, see Monitoring Your Instances and Volumes in the Amazon Elastic Compute Cloud User Guide.

    ", + "MoveAddressToVpc": "

    Moves an Elastic IP address from the EC2-Classic platform to the EC2-VPC platform. The Elastic IP address must be allocated to your account for more than 24 hours, and it must not be associated with an instance. After the Elastic IP address is moved, it is no longer available for use in the EC2-Classic platform, unless you move it back using the RestoreAddressToClassic request. You cannot move an Elastic IP address that's allocated for use in the EC2-VPC platform to the EC2-Classic platform. You cannot migrate an Elastic IP address that's associated with a reverse DNS record. Contact AWS account and billing support to remove the reverse DNS record.

    ", + "PurchaseReservedInstancesOffering": "

    Purchases a Reserved Instance for use with your account. With Reserved Instances, you obtain a capacity reservation for a certain instance configuration over a specified period of time and pay a lower hourly rate compared to On-Demand instance pricing.

    Use DescribeReservedInstancesOfferings to get a list of Reserved Instance offerings that match your specifications. After you've purchased a Reserved Instance, you can check for your new Reserved Instance with DescribeReservedInstances.

    For more information, see Reserved Instances and Reserved Instance Marketplace in the Amazon Elastic Compute Cloud User Guide.

    ", + "PurchaseScheduledInstances": "

    Purchases one or more Scheduled Instances with the specified schedule.

    Scheduled Instances enable you to purchase Amazon EC2 compute capacity by the hour for a one-year term. Before you can purchase a Scheduled Instance, you must call DescribeScheduledInstanceAvailability to check for available schedules and obtain a purchase token.

    ", + "RebootInstances": "

    Requests a reboot of one or more instances. This operation is asynchronous; it only queues a request to reboot the specified instances. The operation succeeds if the instances are valid and belong to you. Requests to reboot terminated instances are ignored.

    If a Linux/Unix instance does not cleanly shut down within four minutes, Amazon EC2 performs a hard reboot.

    For more information about troubleshooting, see Getting Console Output and Rebooting Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "RegisterImage": "

    Registers an AMI. When you're creating an AMI, this is the final step you must complete before you can launch an instance from the AMI. For more information about creating AMIs, see Creating Your Own AMIs in the Amazon Elastic Compute Cloud User Guide.

    For Amazon EBS-backed instances, CreateImage creates and registers the AMI in a single request, so you don't have to register the AMI yourself.

    You can also use RegisterImage to create an Amazon EBS-backed Linux AMI from a snapshot of a root device volume. For more information, see Launching an Instance from a Snapshot in the Amazon Elastic Compute Cloud User Guide.

    Some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE Linux Enterprise Server (SLES), use the EC2 billingProduct code associated with an AMI to verify subscription status for package updates. Creating an AMI from an EBS snapshot does not maintain this billing code, and subsequent instances launched from such an AMI will not be able to connect to package update infrastructure.

    Similarly, although you can create a Windows AMI from a snapshot, you can't successfully launch an instance from the AMI.

    To create Windows AMIs or to create AMIs for Linux operating systems that must retain AMI billing codes to work properly, see CreateImage.

    If needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. If you make changes to an image, deregister the previous image and register the new image.

    You can't register an image where a secondary (non-root) snapshot has AWS Marketplace product codes.

    ", + "RejectVpcPeeringConnection": "

    Rejects a VPC peering connection request. The VPC peering connection must be in the pending-acceptance state. Use the DescribeVpcPeeringConnections request to view your outstanding VPC peering connection requests. To delete an active VPC peering connection, or to delete a VPC peering connection request that you initiated, use DeleteVpcPeeringConnection.

    ", + "ReleaseAddress": "

    Releases the specified Elastic IP address.

    After releasing an Elastic IP address, it is released to the IP address pool and might be unavailable to you. Be sure to update your DNS records and any servers or devices that communicate with the address. If you attempt to release an Elastic IP address that you already released, you'll get an AuthFailure error if the address is already allocated to another AWS account.

    [EC2-Classic, default VPC] Releasing an Elastic IP address automatically disassociates it from any instance that it's associated with. To disassociate an Elastic IP address without releasing it, use DisassociateAddress.

    [Nondefault VPC] You must use DisassociateAddress to disassociate the Elastic IP address before you try to release it. Otherwise, Amazon EC2 returns an error (InvalidIPAddress.InUse).

    ", + "ReleaseHosts": "

    When you no longer want to use a Dedicated host it can be released. On-Demand billing is stopped and the host goes into released state. The host ID of Dedicated hosts that have been released can no longer be specified in another request, e.g., ModifyHosts. You must stop or terminate all instances on a host before it can be released.

    When Dedicated hosts are released, it make take some time for them to stop counting toward your limit and you may receive capacity errors when trying to allocate new Dedicated hosts. Try waiting a few minutes, and then try again.

    Released hosts will still appear in a DescribeHosts response.

    ", + "ReplaceNetworkAclAssociation": "

    Changes which network ACL a subnet is associated with. By default when you create a subnet, it's automatically associated with the default network ACL. For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

    ", + "ReplaceNetworkAclEntry": "

    Replaces an entry (rule) in a network ACL. For more information about network ACLs, see Network ACLs in the Amazon Virtual Private Cloud User Guide.

    ", + "ReplaceRoute": "

    Replaces an existing route within a route table in a VPC. You must provide only one of the following: Internet gateway or virtual private gateway, NAT instance, NAT gateway, VPC peering connection, or network interface.

    For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    ", + "ReplaceRouteTableAssociation": "

    Changes the route table associated with a given subnet in a VPC. After the operation completes, the subnet uses the routes in the new route table it's associated with. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

    You can also use ReplaceRouteTableAssociation to change which table is the main route table in the VPC. You just specify the main route table's association ID and the route table to be the new main route table.

    ", + "ReportInstanceStatus": "

    Submits feedback about the status of an instance. The instance must be in the running state. If your experience with the instance differs from the instance status returned by DescribeInstanceStatus, use ReportInstanceStatus to report your experience with the instance. Amazon EC2 collects this information to improve the accuracy of status checks.

    Use of this action does not change the value returned by DescribeInstanceStatus.

    ", + "RequestSpotFleet": "

    Creates a Spot fleet request.

    You can submit a single request that includes multiple launch specifications that vary by instance type, AMI, Availability Zone, or subnet.

    By default, the Spot fleet requests Spot instances in the Spot pool where the price per unit is the lowest. Each launch specification can include its own instance weighting that reflects the value of the instance type to your application workload.

    Alternatively, you can specify that the Spot fleet distribute the target capacity across the Spot pools included in its launch specifications. By ensuring that the Spot instances in your Spot fleet are in different Spot pools, you can improve the availability of your fleet.

    For more information, see Spot Fleet Requests in the Amazon Elastic Compute Cloud User Guide.

    ", + "RequestSpotInstances": "

    Creates a Spot instance request. Spot instances are instances that Amazon EC2 launches when the bid price that you specify exceeds the current Spot price. Amazon EC2 periodically sets the Spot price based on available Spot Instance capacity and current Spot instance requests. For more information, see Spot Instance Requests in the Amazon Elastic Compute Cloud User Guide.

    ", + "ResetImageAttribute": "

    Resets an attribute of an AMI to its default value.

    The productCodes attribute can't be reset.

    ", + "ResetInstanceAttribute": "

    Resets an attribute of an instance to its default value. To reset the kernel or ramdisk, the instance must be in a stopped state. To reset the SourceDestCheck, the instance can be either running or stopped.

    The SourceDestCheck attribute controls whether source/destination checking is enabled. The default value is true, which means checking is enabled. This value must be false for a NAT instance to perform NAT. For more information, see NAT Instances in the Amazon Virtual Private Cloud User Guide.

    ", + "ResetNetworkInterfaceAttribute": "

    Resets a network interface attribute. You can specify only one attribute at a time.

    ", + "ResetSnapshotAttribute": "

    Resets permission settings for the specified snapshot.

    For more information on modifying snapshot permissions, see Sharing Snapshots in the Amazon Elastic Compute Cloud User Guide.

    ", + "RestoreAddressToClassic": "

    Restores an Elastic IP address that was previously moved to the EC2-VPC platform back to the EC2-Classic platform. You cannot move an Elastic IP address that was originally allocated for use in EC2-VPC. The Elastic IP address must not be associated with an instance or network interface. You cannot restore an Elastic IP address that's associated with a reverse DNS record. Contact AWS account and billing support to remove the reverse DNS record.

    ", + "RevokeSecurityGroupEgress": "

    [EC2-VPC only] Removes one or more egress rules from a security group for EC2-VPC. This action doesn't apply to security groups for use in EC2-Classic. The values that you specify in the revoke request (for example, ports) must match the existing rule's values for the rule to be revoked.

    Each rule consists of the protocol and the CIDR range or source security group. For the TCP and UDP protocols, you must also specify the destination port or range of ports. For the ICMP protocol, you must also specify the ICMP type and code.

    Rule changes are propagated to instances within the security group as quickly as possible. However, a small delay might occur.

    ", + "RevokeSecurityGroupIngress": "

    Removes one or more ingress rules from a security group. The values that you specify in the revoke request (for example, ports) must match the existing rule's values for the rule to be removed.

    Each rule consists of the protocol and the CIDR range or source security group. For the TCP and UDP protocols, you must also specify the destination port or range of ports. For the ICMP protocol, you must also specify the ICMP type and code.

    Rule changes are propagated to instances within the security group as quickly as possible. However, a small delay might occur.

    ", + "RunInstances": "

    Launches the specified number of instances using an AMI for which you have permissions.

    When you launch an instance, it enters the pending state. After the instance is ready for you, it enters the running state. To check the state of your instance, call DescribeInstances.

    If you don't specify a security group when launching an instance, Amazon EC2 uses the default security group. For more information, see Security Groups in the Amazon Elastic Compute Cloud User Guide.

    [EC2-VPC only accounts] If you don't specify a subnet in the request, we choose a default subnet from your default VPC for you.

    [EC2-Classic accounts] If you're launching into EC2-Classic and you don't specify an Availability Zone, we choose one for you.

    Linux instances have access to the public key of the key pair at boot. You can use this key to provide secure access to the instance. Amazon EC2 public images use this feature to provide secure access without passwords. For more information, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

    You can provide optional user data when launching an instance. For more information, see Instance Metadata in the Amazon Elastic Compute Cloud User Guide.

    If any of the AMIs have a product code attached for which the user has not subscribed, RunInstances fails.

    T2 instance types can only be launched into a VPC. If you do not have a default VPC, or if you do not specify a subnet ID in the request, RunInstances fails.

    For more information about troubleshooting, see What To Do If An Instance Immediately Terminates, and Troubleshooting Connecting to Your Instance in the Amazon Elastic Compute Cloud User Guide.

    ", + "RunScheduledInstances": "

    Launches the specified Scheduled Instances.

    Before you can launch a Scheduled Instance, you must purchase it and obtain an identifier using PurchaseScheduledInstances.

    You must launch a Scheduled Instance during its scheduled time period. You can't stop or reboot a Scheduled Instance, but you can terminate it as needed. If you terminate a Scheduled Instance before the current scheduled time period ends, you can launch it again after a few minutes.

    ", + "StartInstances": "

    Starts an Amazon EBS-backed AMI that you've previously stopped.

    Instances that use Amazon EBS volumes as their root devices can be quickly stopped and started. When an instance is stopped, the compute resources are released and you are not billed for hourly instance usage. However, your root partition Amazon EBS volume remains, continues to persist your data, and you are charged for Amazon EBS volume usage. You can restart your instance at any time. Each time you transition an instance from stopped to started, Amazon EC2 charges a full instance hour, even if transitions happen multiple times within a single hour.

    Before stopping an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM.

    Performing this operation on an instance that uses an instance store as its root device returns an error.

    For more information, see Stopping Instances in the Amazon Elastic Compute Cloud User Guide.

    ", + "StopInstances": "

    Stops an Amazon EBS-backed instance. Each time you transition an instance from stopped to started, Amazon EC2 charges a full instance hour, even if transitions happen multiple times within a single hour.

    You can't start or stop Spot instances.

    Instances that use Amazon EBS volumes as their root devices can be quickly stopped and started. When an instance is stopped, the compute resources are released and you are not billed for hourly instance usage. However, your root partition Amazon EBS volume remains, continues to persist your data, and you are charged for Amazon EBS volume usage. You can restart your instance at any time.

    Before stopping an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM.

    Performing this operation on an instance that uses an instance store as its root device returns an error.

    You can stop, start, and terminate EBS-backed instances. You can only terminate instance store-backed instances. What happens to an instance differs if you stop it or terminate it. For example, when you stop an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, the root device and any other devices attached during the instance launch are automatically deleted. For more information about the differences between stopping and terminating instances, see Instance Lifecycle in the Amazon Elastic Compute Cloud User Guide.

    For more information about troubleshooting, see Troubleshooting Stopping Your Instance in the Amazon Elastic Compute Cloud User Guide.

    ", + "TerminateInstances": "

    Shuts down one or more instances. This operation is idempotent; if you terminate an instance more than once, each call succeeds.

    Terminated instances remain visible after termination (for approximately one hour).

    By default, Amazon EC2 deletes all EBS volumes that were attached when the instance launched. Volumes attached after instance launch continue running.

    You can stop, start, and terminate EBS-backed instances. You can only terminate instance store-backed instances. What happens to an instance differs if you stop it or terminate it. For example, when you stop an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, any attached EBS volumes with the DeleteOnTermination block device mapping parameter set to true are automatically deleted. For more information about the differences between stopping and terminating instances, see Instance Lifecycle in the Amazon Elastic Compute Cloud User Guide.

    For more information about troubleshooting, see Troubleshooting Terminating Your Instance in the Amazon Elastic Compute Cloud User Guide.

    ", + "UnassignPrivateIpAddresses": "

    Unassigns one or more secondary private IP addresses from a network interface.

    ", + "UnmonitorInstances": "

    Disables monitoring for a running instance. For more information about monitoring instances, see Monitoring Your Instances and Volumes in the Amazon Elastic Compute Cloud User Guide.

    " + }, + "shapes": { + "AcceptVpcPeeringConnectionRequest": { + "base": null, + "refs": { + } + }, + "AcceptVpcPeeringConnectionResult": { + "base": null, + "refs": { + } + }, + "AccountAttribute": { + "base": "

    Describes an account attribute.

    ", + "refs": { + "AccountAttributeList$member": null + } + }, + "AccountAttributeList": { + "base": null, + "refs": { + "DescribeAccountAttributesResult$AccountAttributes": "

    Information about one or more account attributes.

    " + } + }, + "AccountAttributeName": { + "base": null, + "refs": { + "AccountAttributeNameStringList$member": null + } + }, + "AccountAttributeNameStringList": { + "base": null, + "refs": { + "DescribeAccountAttributesRequest$AttributeNames": "

    One or more account attribute names.

    " + } + }, + "AccountAttributeValue": { + "base": "

    Describes a value of an account attribute.

    ", + "refs": { + "AccountAttributeValueList$member": null + } + }, + "AccountAttributeValueList": { + "base": null, + "refs": { + "AccountAttribute$AttributeValues": "

    One or more values for the account attribute.

    " + } + }, + "ActiveInstance": { + "base": "

    Describes a running instance in a Spot fleet.

    ", + "refs": { + "ActiveInstanceSet$member": null + } + }, + "ActiveInstanceSet": { + "base": null, + "refs": { + "DescribeSpotFleetInstancesResponse$ActiveInstances": "

    The running instances. Note that this list is refreshed periodically and might be out of date.

    " + } + }, + "Address": { + "base": "

    Describes an Elastic IP address.

    ", + "refs": { + "AddressList$member": null + } + }, + "AddressList": { + "base": null, + "refs": { + "DescribeAddressesResult$Addresses": "

    Information about one or more Elastic IP addresses.

    " + } + }, + "Affinity": { + "base": null, + "refs": { + "ModifyInstancePlacementRequest$Affinity": "

    The new affinity setting for the instance.

    " + } + }, + "AllocateAddressRequest": { + "base": null, + "refs": { + } + }, + "AllocateAddressResult": { + "base": null, + "refs": { + } + }, + "AllocateHostsRequest": { + "base": null, + "refs": { + } + }, + "AllocateHostsResult": { + "base": null, + "refs": { + } + }, + "AllocationIdList": { + "base": null, + "refs": { + "DescribeAddressesRequest$AllocationIds": "

    [EC2-VPC] One or more allocation IDs.

    Default: Describes all your Elastic IP addresses.

    " + } + }, + "AllocationState": { + "base": null, + "refs": { + "Host$State": "

    The Dedicated host's state.

    " + } + }, + "AllocationStrategy": { + "base": null, + "refs": { + "SpotFleetRequestConfigData$AllocationStrategy": "

    Indicates how to allocate the target capacity across the Spot pools specified by the Spot fleet request. The default is lowestPrice.

    " + } + }, + "ArchitectureValues": { + "base": null, + "refs": { + "Image$Architecture": "

    The architecture of the image.

    ", + "ImportInstanceLaunchSpecification$Architecture": "

    The architecture of the instance.

    ", + "Instance$Architecture": "

    The architecture of the image.

    ", + "RegisterImageRequest$Architecture": "

    The architecture of the AMI.

    Default: For Amazon EBS-backed AMIs, i386. For instance store-backed AMIs, the architecture specified in the manifest file.

    " + } + }, + "AssignPrivateIpAddressesRequest": { + "base": null, + "refs": { + } + }, + "AssociateAddressRequest": { + "base": null, + "refs": { + } + }, + "AssociateAddressResult": { + "base": null, + "refs": { + } + }, + "AssociateDhcpOptionsRequest": { + "base": null, + "refs": { + } + }, + "AssociateRouteTableRequest": { + "base": null, + "refs": { + } + }, + "AssociateRouteTableResult": { + "base": null, + "refs": { + } + }, + "AttachClassicLinkVpcRequest": { + "base": null, + "refs": { + } + }, + "AttachClassicLinkVpcResult": { + "base": null, + "refs": { + } + }, + "AttachInternetGatewayRequest": { + "base": null, + "refs": { + } + }, + "AttachNetworkInterfaceRequest": { + "base": null, + "refs": { + } + }, + "AttachNetworkInterfaceResult": { + "base": null, + "refs": { + } + }, + "AttachVolumeRequest": { + "base": null, + "refs": { + } + }, + "AttachVpnGatewayRequest": { + "base": null, + "refs": { + } + }, + "AttachVpnGatewayResult": { + "base": null, + "refs": { + } + }, + "AttachmentStatus": { + "base": null, + "refs": { + "EbsInstanceBlockDevice$Status": "

    The attachment state.

    ", + "InstanceNetworkInterfaceAttachment$Status": "

    The attachment state.

    ", + "InternetGatewayAttachment$State": "

    The current state of the attachment.

    ", + "NetworkInterfaceAttachment$Status": "

    The attachment state.

    ", + "VpcAttachment$State": "

    The current state of the attachment.

    " + } + }, + "AttributeBooleanValue": { + "base": "

    The value to use when a resource attribute accepts a Boolean value.

    ", + "refs": { + "DescribeNetworkInterfaceAttributeResult$SourceDestCheck": "

    Indicates whether source/destination checking is enabled.

    ", + "DescribeVolumeAttributeResult$AutoEnableIO": "

    The state of autoEnableIO attribute.

    ", + "DescribeVpcAttributeResult$EnableDnsSupport": "

    Indicates whether DNS resolution is enabled for the VPC. If this attribute is true, the Amazon DNS server resolves DNS hostnames for your instances to their corresponding IP addresses; otherwise, it does not.

    ", + "DescribeVpcAttributeResult$EnableDnsHostnames": "

    Indicates whether the instances launched in the VPC get DNS hostnames. If this attribute is true, instances in the VPC get DNS hostnames; otherwise, they do not.

    ", + "InstanceAttribute$DisableApiTermination": "

    If the value is true, you can't terminate the instance through the Amazon EC2 console, CLI, or API; otherwise, you can.

    ", + "InstanceAttribute$EbsOptimized": "

    Indicates whether the instance is optimized for EBS I/O.

    ", + "InstanceAttribute$SourceDestCheck": "

    Indicates whether source/destination checking is enabled. A value of true means checking is enabled, and false means checking is disabled. This value must be false for a NAT instance to perform NAT.

    ", + "ModifyInstanceAttributeRequest$SourceDestCheck": "

    Specifies whether source/destination checking is enabled. A value of true means that checking is enabled, and false means checking is disabled. This value must be false for a NAT instance to perform NAT.

    ", + "ModifyInstanceAttributeRequest$DisableApiTermination": "

    If the value is true, you can't terminate the instance using the Amazon EC2 console, CLI, or API; otherwise, you can. You cannot use this paramater for Spot Instances.

    ", + "ModifyInstanceAttributeRequest$EbsOptimized": "

    Specifies whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.

    ", + "ModifyNetworkInterfaceAttributeRequest$SourceDestCheck": "

    Indicates whether source/destination checking is enabled. A value of true means checking is enabled, and false means checking is disabled. This value must be false for a NAT instance to perform NAT. For more information, see NAT Instances in the Amazon Virtual Private Cloud User Guide.

    ", + "ModifySubnetAttributeRequest$MapPublicIpOnLaunch": "

    Specify true to indicate that instances launched into the specified subnet should be assigned public IP address.

    ", + "ModifyVolumeAttributeRequest$AutoEnableIO": "

    Indicates whether the volume should be auto-enabled for I/O operations.

    ", + "ModifyVpcAttributeRequest$EnableDnsSupport": "

    Indicates whether the DNS resolution is supported for the VPC. If enabled, queries to the Amazon provided DNS server at the 169.254.169.253 IP address, or the reserved IP address at the base of the VPC network range \"plus two\" will succeed. If disabled, the Amazon provided DNS service in the VPC that resolves public DNS hostnames to IP addresses is not enabled.

    You cannot modify the DNS resolution and DNS hostnames attributes in the same request. Use separate requests for each attribute.

    ", + "ModifyVpcAttributeRequest$EnableDnsHostnames": "

    Indicates whether the instances launched in the VPC get DNS hostnames. If enabled, instances in the VPC get DNS hostnames; otherwise, they do not.

    You cannot modify the DNS resolution and DNS hostnames attributes in the same request. Use separate requests for each attribute. You can only enable DNS hostnames if you've enabled DNS support.

    " + } + }, + "AttributeValue": { + "base": "

    The value to use for a resource attribute.

    ", + "refs": { + "DescribeNetworkInterfaceAttributeResult$Description": "

    The description of the network interface.

    ", + "DhcpConfigurationValueList$member": null, + "ImageAttribute$KernelId": "

    The kernel ID.

    ", + "ImageAttribute$RamdiskId": "

    The RAM disk ID.

    ", + "ImageAttribute$Description": "

    A description for the AMI.

    ", + "ImageAttribute$SriovNetSupport": null, + "InstanceAttribute$InstanceType": "

    The instance type.

    ", + "InstanceAttribute$KernelId": "

    The kernel ID.

    ", + "InstanceAttribute$RamdiskId": "

    The RAM disk ID.

    ", + "InstanceAttribute$UserData": "

    The Base64-encoded MIME user data.

    ", + "InstanceAttribute$InstanceInitiatedShutdownBehavior": "

    Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).

    ", + "InstanceAttribute$RootDeviceName": "

    The name of the root device (for example, /dev/sda1 or /dev/xvda).

    ", + "InstanceAttribute$SriovNetSupport": null, + "ModifyImageAttributeRequest$Description": "

    A description for the AMI.

    ", + "ModifyInstanceAttributeRequest$InstanceType": "

    Changes the instance type to the specified value. For more information, see Instance Types. If the instance type is not valid, the error returned is InvalidInstanceAttributeValue.

    ", + "ModifyInstanceAttributeRequest$Kernel": "

    Changes the instance's kernel to the specified value. We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB.

    ", + "ModifyInstanceAttributeRequest$Ramdisk": "

    Changes the instance's RAM disk to the specified value. We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB.

    ", + "ModifyInstanceAttributeRequest$InstanceInitiatedShutdownBehavior": "

    Specifies whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).

    ", + "ModifyInstanceAttributeRequest$SriovNetSupport": "

    Set to simple to enable enhanced networking for the instance.

    There is no way to disable enhanced networking at this time.

    This option is supported only for HVM instances. Specifying this option with a PV instance can make it unreachable.

    ", + "ModifyNetworkInterfaceAttributeRequest$Description": "

    A description for the network interface.

    " + } + }, + "AuthorizeSecurityGroupEgressRequest": { + "base": null, + "refs": { + } + }, + "AuthorizeSecurityGroupIngressRequest": { + "base": null, + "refs": { + } + }, + "AutoPlacement": { + "base": null, + "refs": { + "AllocateHostsRequest$AutoPlacement": "

    This is enabled by default. This property allows instances to be automatically placed onto available Dedicated hosts, when you are launching instances without specifying a host ID.

    Default: Enabled

    ", + "Host$AutoPlacement": "

    Whether auto-placement is on or off.

    ", + "ModifyHostsRequest$AutoPlacement": "

    Specify whether to enable or disable auto-placement.

    " + } + }, + "AvailabilityZone": { + "base": "

    Describes an Availability Zone.

    ", + "refs": { + "AvailabilityZoneList$member": null + } + }, + "AvailabilityZoneList": { + "base": null, + "refs": { + "DescribeAvailabilityZonesResult$AvailabilityZones": "

    Information about one or more Availability Zones.

    " + } + }, + "AvailabilityZoneMessage": { + "base": "

    Describes a message about an Availability Zone.

    ", + "refs": { + "AvailabilityZoneMessageList$member": null + } + }, + "AvailabilityZoneMessageList": { + "base": null, + "refs": { + "AvailabilityZone$Messages": "

    Any messages about the Availability Zone.

    " + } + }, + "AvailabilityZoneState": { + "base": null, + "refs": { + "AvailabilityZone$State": "

    The state of the Availability Zone.

    " + } + }, + "AvailableCapacity": { + "base": "

    The capacity information for instances launched onto the Dedicated host.

    ", + "refs": { + "Host$AvailableCapacity": "

    The number of new instances that can be launched onto the Dedicated host.

    " + } + }, + "AvailableInstanceCapacityList": { + "base": null, + "refs": { + "AvailableCapacity$AvailableInstanceCapacity": "

    The total number of instances that the Dedicated host supports.

    " + } + }, + "BatchState": { + "base": null, + "refs": { + "CancelSpotFleetRequestsSuccessItem$CurrentSpotFleetRequestState": "

    The current state of the Spot fleet request.

    ", + "CancelSpotFleetRequestsSuccessItem$PreviousSpotFleetRequestState": "

    The previous state of the Spot fleet request.

    ", + "SpotFleetRequestConfig$SpotFleetRequestState": "

    The state of the Spot fleet request.

    " + } + }, + "Blob": { + "base": null, + "refs": { + "BlobAttributeValue$Value": null, + "ImportKeyPairRequest$PublicKeyMaterial": "

    The public key. You must base64 encode the public key material before sending it to AWS.

    ", + "S3Storage$UploadPolicy": "

    A Base64-encoded Amazon S3 upload policy that gives Amazon EC2 permission to upload items into Amazon S3 on your behalf.

    " + } + }, + "BlobAttributeValue": { + "base": null, + "refs": { + "ModifyInstanceAttributeRequest$UserData": "

    Changes the instance's user data to the specified value.

    " + } + }, + "BlockDeviceMapping": { + "base": "

    Describes a block device mapping.

    ", + "refs": { + "BlockDeviceMappingList$member": null, + "BlockDeviceMappingRequestList$member": null + } + }, + "BlockDeviceMappingList": { + "base": null, + "refs": { + "Image$BlockDeviceMappings": "

    Any block device mapping entries.

    ", + "ImageAttribute$BlockDeviceMappings": "

    One or more block device mapping entries.

    ", + "LaunchSpecification$BlockDeviceMappings": "

    One or more block device mapping entries.

    ", + "RequestSpotLaunchSpecification$BlockDeviceMappings": "

    One or more block device mapping entries.

    ", + "SpotFleetLaunchSpecification$BlockDeviceMappings": "

    One or more block device mapping entries.

    " + } + }, + "BlockDeviceMappingRequestList": { + "base": null, + "refs": { + "CreateImageRequest$BlockDeviceMappings": "

    Information about one or more block device mappings.

    ", + "RegisterImageRequest$BlockDeviceMappings": "

    One or more block device mapping entries.

    ", + "RunInstancesRequest$BlockDeviceMappings": "

    The block device mapping.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "AcceptVpcPeeringConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AllocateAddressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AssignPrivateIpAddressesRequest$AllowReassignment": "

    Indicates whether to allow an IP address that is already assigned to another network interface or instance to be reassigned to the specified network interface.

    ", + "AssociateAddressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AssociateAddressRequest$AllowReassociation": "

    [EC2-VPC] For a VPC in an EC2-Classic account, specify true to allow an Elastic IP address that is already associated with an instance or network interface to be reassociated with the specified instance or network interface. Otherwise, the operation fails. In a VPC in an EC2-VPC-only account, reassociation is automatic, therefore you can specify false to ensure the operation fails if the Elastic IP address is already associated with another resource.

    ", + "AssociateDhcpOptionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AssociateRouteTableRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttachClassicLinkVpcRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttachClassicLinkVpcResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "AttachInternetGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttachNetworkInterfaceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttachVolumeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttachVpnGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AttributeBooleanValue$Value": "

    Valid values are true or false.

    ", + "AuthorizeSecurityGroupEgressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "AuthorizeSecurityGroupIngressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "BundleInstanceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CancelBundleTaskRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CancelConversionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CancelImportTaskRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CancelSpotFleetRequestsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CancelSpotFleetRequestsRequest$TerminateInstances": "

    Indicates whether to terminate instances for a Spot fleet request if it is canceled successfully.

    ", + "CancelSpotInstanceRequestsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ClassicLinkDnsSupport$ClassicLinkDnsSupported": "

    Indicates whether ClassicLink DNS support is enabled for the VPC.

    ", + "ConfirmProductInstanceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ConfirmProductInstanceResult$Return": "

    The return value of the request. Returns true if the specified product code is owned by the requester and associated with the specified instance.

    ", + "CopyImageRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CopyImageRequest$Encrypted": "

    Specifies whether the destination snapshots of the copied image should be encrypted. The default CMK for EBS is used unless a non-default AWS Key Management Service (AWS KMS) CMK is specified with KmsKeyId. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    ", + "CopySnapshotRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CopySnapshotRequest$Encrypted": "

    Specifies whether the destination snapshot should be encrypted. There is no way to create an unencrypted snapshot copy from an encrypted snapshot; however, you can encrypt a copy of an unencrypted snapshot with this flag. The default CMK for EBS is used unless a non-default AWS Key Management Service (AWS KMS) CMK is specified with KmsKeyId. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateCustomerGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateDhcpOptionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateImageRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateImageRequest$NoReboot": "

    By default, this parameter is set to false, which means Amazon EC2 attempts to shut down the instance cleanly before image creation and then reboots the instance. When the parameter is set to true, Amazon EC2 doesn't shut down the instance before creating the image. When this option is used, file system integrity on the created image can't be guaranteed.

    ", + "CreateInternetGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateKeyPairRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateNetworkAclEntryRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateNetworkAclEntryRequest$Egress": "

    Indicates whether this is an egress rule (rule is applied to traffic leaving the subnet).

    ", + "CreateNetworkAclRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateNetworkInterfaceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreatePlacementGroupRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateRouteRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateRouteResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "CreateRouteTableRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateSecurityGroupRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateSnapshotRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateSpotDatafeedSubscriptionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateSubnetRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateTagsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVolumeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVolumeRequest$Encrypted": "

    Specifies whether the volume should be encrypted. Encrypted Amazon EBS volumes may only be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are automatically encrypted. There is no way to create an encrypted volume from an unencrypted snapshot or vice versa. If your AMI uses encrypted volumes, you can only launch it on supported instance types. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateVpcEndpointRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVpcPeeringConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVpcRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVpnConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "CreateVpnGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteCustomerGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteDhcpOptionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteInternetGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteKeyPairRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteNetworkAclEntryRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteNetworkAclEntryRequest$Egress": "

    Indicates whether the rule is an egress rule.

    ", + "DeleteNetworkAclRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteNetworkInterfaceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeletePlacementGroupRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteRouteRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteRouteTableRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteSecurityGroupRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteSnapshotRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteSpotDatafeedSubscriptionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteSubnetRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteTagsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVolumeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVpcEndpointsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVpcPeeringConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVpcPeeringConnectionResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "DeleteVpcRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVpnConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeleteVpnGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DeregisterImageRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeAccountAttributesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeAddressesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeAvailabilityZonesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeBundleTasksRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeClassicLinkInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeConversionTasksRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeCustomerGatewaysRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeDhcpOptionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeImageAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeImagesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeImportImageTasksRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeImportSnapshotTasksRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeInstanceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeInstanceStatusRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeInstanceStatusRequest$IncludeAllInstances": "

    When true, includes the health status for all instances. When false, includes the health status for running instances only.

    Default: false

    ", + "DescribeInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeInternetGatewaysRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeKeyPairsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeMovingAddressesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeNetworkAclsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeNetworkInterfaceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeNetworkInterfacesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribePlacementGroupsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribePrefixListsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeRegionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeReservedInstancesOfferingsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeReservedInstancesOfferingsRequest$IncludeMarketplace": "

    Include Reserved Instance Marketplace offerings in the response.

    ", + "DescribeReservedInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeRouteTablesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeScheduledInstanceAvailabilityRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeScheduledInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSecurityGroupsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSnapshotAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSnapshotsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotDatafeedSubscriptionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotFleetInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotFleetRequestHistoryRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotFleetRequestsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotInstanceRequestsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSpotPriceHistoryRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeSubnetsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeTagsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVolumeAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVolumeStatusRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVolumesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcClassicLinkRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcEndpointServicesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcEndpointsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcPeeringConnectionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpcsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpnConnectionsRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DescribeVpnGatewaysRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DetachClassicLinkVpcRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DetachClassicLinkVpcResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "DetachInternetGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DetachNetworkInterfaceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DetachNetworkInterfaceRequest$Force": "

    Specifies whether to force a detachment.

    ", + "DetachVolumeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DetachVolumeRequest$Force": "

    Forces detachment if the previous detachment attempt did not occur cleanly (for example, logging into an instance, unmounting the volume, and detaching normally). This option can lead to data loss or a corrupted file system. Use this option only as a last resort to detach a volume from a failed instance. The instance won't have an opportunity to flush file system caches or file system metadata. If you use this option, you must perform file system check and repair procedures.

    ", + "DetachVpnGatewayRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DisableVpcClassicLinkDnsSupportResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "DisableVpcClassicLinkRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DisableVpcClassicLinkResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "DisassociateAddressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "DisassociateRouteTableRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "EbsBlockDevice$DeleteOnTermination": "

    Indicates whether the EBS volume is deleted on instance termination.

    ", + "EbsBlockDevice$Encrypted": "

    Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes may only be attached to instances that support Amazon EBS encryption.

    ", + "EbsInstanceBlockDevice$DeleteOnTermination": "

    Indicates whether the volume is deleted on instance termination.

    ", + "EbsInstanceBlockDeviceSpecification$DeleteOnTermination": "

    Indicates whether the volume is deleted on instance termination.

    ", + "EnableVolumeIORequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "EnableVpcClassicLinkDnsSupportResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "EnableVpcClassicLinkRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "EnableVpcClassicLinkResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "GetConsoleOutputRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "GetPasswordDataRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "IdFormat$UseLongIds": "

    Indicates whether longer IDs (17-character IDs) are enabled for the resource.

    ", + "Image$Public": "

    Indicates whether the image has public launch permissions. The value is true if this image has public launch permissions or false if it has only implicit and explicit launch permissions.

    ", + "ImportImageRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ImportInstanceLaunchSpecification$Monitoring": "

    Indicates whether monitoring is enabled.

    ", + "ImportInstanceRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ImportKeyPairRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ImportSnapshotRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ImportVolumeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "Instance$SourceDestCheck": "

    Specifies whether to enable an instance launched in a VPC to perform NAT. This controls whether source/destination checking is enabled on the instance. A value of true means checking is enabled, and false means checking is disabled. The value must be false for the instance to perform NAT. For more information, see NAT Instances in the Amazon Virtual Private Cloud User Guide.

    ", + "Instance$EbsOptimized": "

    Indicates whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.

    ", + "InstanceNetworkInterface$SourceDestCheck": "

    Indicates whether to validate network traffic to or from this network interface.

    ", + "InstanceNetworkInterfaceAttachment$DeleteOnTermination": "

    Indicates whether the network interface is deleted when the instance is terminated.

    ", + "InstanceNetworkInterfaceSpecification$DeleteOnTermination": "

    If set to true, the interface is deleted when the instance is terminated. You can specify true only if creating a new network interface when launching an instance.

    ", + "InstanceNetworkInterfaceSpecification$AssociatePublicIpAddress": "

    Indicates whether to assign a public IP address to an instance you launch in a VPC. The public IP address can only be assigned to a network interface for eth0, and can only be assigned to a new network interface, not an existing one. You cannot specify more than one network interface in the request. If launching into a default subnet, the default value is true.

    ", + "InstancePrivateIpAddress$Primary": "

    Indicates whether this IP address is the primary private IP address of the network interface.

    ", + "LaunchSpecification$EbsOptimized": "

    Indicates whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.

    Default: false

    ", + "ModifyIdFormatRequest$UseLongIds": "

    Indicate whether the resource should use longer IDs (17-character IDs).

    ", + "ModifyImageAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifyInstanceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifyInstancePlacementResult$Return": "

    Is true if the request succeeds, and an error otherwise.

    ", + "ModifyNetworkInterfaceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifySnapshotAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifySpotFleetRequestResponse$Return": "

    Is true if the request succeeds, and an error otherwise.

    ", + "ModifyVolumeAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifyVpcEndpointRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ModifyVpcEndpointRequest$ResetPolicy": "

    Specify true to reset the policy document to the default policy. The default policy allows access to the service.

    ", + "ModifyVpcEndpointResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "MonitorInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "MoveAddressToVpcRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "NetworkAcl$IsDefault": "

    Indicates whether this is the default network ACL for the VPC.

    ", + "NetworkAclEntry$Egress": "

    Indicates whether the rule is an egress rule (applied to traffic leaving the subnet).

    ", + "NetworkInterface$RequesterManaged": "

    Indicates whether the network interface is being managed by AWS.

    ", + "NetworkInterface$SourceDestCheck": "

    Indicates whether traffic to or from the instance is validated.

    ", + "NetworkInterfaceAttachment$DeleteOnTermination": "

    Indicates whether the network interface is deleted when the instance is terminated.

    ", + "NetworkInterfaceAttachmentChanges$DeleteOnTermination": "

    Indicates whether the network interface is deleted when the instance is terminated.

    ", + "NetworkInterfacePrivateIpAddress$Primary": "

    Indicates whether this IP address is the primary private IP address of the network interface.

    ", + "PriceSchedule$Active": "

    The current price schedule, as determined by the term remaining for the Reserved Instance in the listing.

    A specific price schedule is always in effect, but only one price schedule can be active at any time. Take, for example, a Reserved Instance listing that has five months remaining in its term. When you specify price schedules for five months and two months, this means that schedule 1, covering the first three months of the remaining term, will be active during months 5, 4, and 3. Then schedule 2, covering the last two months of the term, will be active for months 2 and 1.

    ", + "PrivateIpAddressSpecification$Primary": "

    Indicates whether the private IP address is the primary private IP address. Only one IP address can be designated as primary.

    ", + "PurchaseReservedInstancesOfferingRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "PurchaseScheduledInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RebootInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RegisterImageRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RejectVpcPeeringConnectionRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RejectVpcPeeringConnectionResult$Return": "

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "ReleaseAddressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ReplaceNetworkAclAssociationRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ReplaceNetworkAclEntryRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ReplaceNetworkAclEntryRequest$Egress": "

    Indicates whether to replace the egress rule.

    Default: If no value is specified, we replace the ingress rule.

    ", + "ReplaceRouteRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ReplaceRouteTableAssociationRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ReportInstanceStatusRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RequestSpotFleetRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RequestSpotInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RequestSpotLaunchSpecification$EbsOptimized": "

    Indicates whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.

    Default: false

    ", + "ReservedInstancesOffering$Marketplace": "

    Indicates whether the offering is available through the Reserved Instance Marketplace (resale) or AWS. If it's a Reserved Instance Marketplace offering, this is true.

    ", + "ResetImageAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ResetInstanceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ResetNetworkInterfaceAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ResetSnapshotAttributeRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RestoreAddressToClassicRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RevokeSecurityGroupEgressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RevokeSecurityGroupIngressRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RouteTableAssociation$Main": "

    Indicates whether this is the main route table.

    ", + "RunInstancesMonitoringEnabled$Enabled": "

    Indicates whether monitoring is enabled for the instance.

    ", + "RunInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "RunInstancesRequest$DisableApiTermination": "

    If you set this parameter to true, you can't terminate the instance using the Amazon EC2 console, CLI, or API; otherwise, you can. If you set this parameter to true and then later want to be able to terminate the instance, you must first change the value of the disableApiTermination attribute to false using ModifyInstanceAttribute. Alternatively, if you set InstanceInitiatedShutdownBehavior to terminate, you can terminate the instance by running the shutdown command from the instance.

    Default: false

    ", + "RunInstancesRequest$EbsOptimized": "

    Indicates whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS-optimized instance.

    Default: false

    ", + "RunScheduledInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "ScheduledInstanceRecurrence$OccurrenceRelativeToEnd": "

    Indicates whether the occurrence is relative to the end of the specified week or month.

    ", + "ScheduledInstanceRecurrenceRequest$OccurrenceRelativeToEnd": "

    Indicates whether the occurrence is relative to the end of the specified week or month. You can't specify this value with a daily schedule.

    ", + "ScheduledInstancesEbs$DeleteOnTermination": "

    Indicates whether the volume is deleted on instance termination.

    ", + "ScheduledInstancesEbs$Encrypted": "

    Indicates whether the volume is encrypted. You can attached encrypted volumes only to instances that support them.

    ", + "ScheduledInstancesLaunchSpecification$EbsOptimized": "

    Indicates whether the instances are optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS-optimized instance.

    Default: false

    ", + "ScheduledInstancesMonitoring$Enabled": "

    Indicates whether monitoring is enabled.

    ", + "ScheduledInstancesNetworkInterface$AssociatePublicIpAddress": "

    Indicates whether to assign a public IP address to instances launched in a VPC. The public IP address can only be assigned to a network interface for eth0, and can only be assigned to a new network interface, not an existing one. You cannot specify more than one network interface in the request. If launching into a default subnet, the default value is true.

    ", + "ScheduledInstancesNetworkInterface$DeleteOnTermination": "

    Indicates whether to delete the interface when the instance is terminated.

    ", + "ScheduledInstancesPrivateIpAddressConfig$Primary": "

    Indicates whether this is a primary IP address. Otherwise, this is a secondary IP address.

    ", + "Snapshot$Encrypted": "

    Indicates whether the snapshot is encrypted.

    ", + "SpotFleetLaunchSpecification$EbsOptimized": "

    Indicates whether the instances are optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.

    Default: false

    ", + "SpotFleetMonitoring$Enabled": "

    Enables monitoring for the instance.

    Default: false

    ", + "SpotFleetRequestConfigData$TerminateInstancesWithExpiration": "

    Indicates whether running Spot instances should be terminated when the Spot fleet request expires.

    ", + "StartInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "StopInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "StopInstancesRequest$Force": "

    Forces the instances to stop. The instances do not have an opportunity to flush file system caches or file system metadata. If you use this option, you must perform file system check and repair procedures. This option is not recommended for Windows instances.

    Default: false

    ", + "Subnet$DefaultForAz": "

    Indicates whether this is the default subnet for the Availability Zone.

    ", + "Subnet$MapPublicIpOnLaunch": "

    Indicates whether instances launched in this subnet receive a public IP address.

    ", + "TerminateInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "UnmonitorInstancesRequest$DryRun": "

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", + "Volume$Encrypted": "

    Indicates whether the volume will be encrypted.

    ", + "VolumeAttachment$DeleteOnTermination": "

    Indicates whether the EBS volume is deleted on instance termination.

    ", + "Vpc$IsDefault": "

    Indicates whether the VPC is the default VPC.

    ", + "VpcClassicLink$ClassicLinkEnabled": "

    Indicates whether the VPC is enabled for ClassicLink.

    ", + "VpnConnectionOptions$StaticRoutesOnly": "

    Indicates whether the VPN connection uses static routes only. Static routes must be used for devices that don't support BGP.

    ", + "VpnConnectionOptionsSpecification$StaticRoutesOnly": "

    Indicates whether the VPN connection uses static routes only. Static routes must be used for devices that don't support BGP.

    " + } + }, + "BundleIdStringList": { + "base": null, + "refs": { + "DescribeBundleTasksRequest$BundleIds": "

    One or more bundle task IDs.

    Default: Describes all your bundle tasks.

    " + } + }, + "BundleInstanceRequest": { + "base": null, + "refs": { + } + }, + "BundleInstanceResult": { + "base": null, + "refs": { + } + }, + "BundleTask": { + "base": "

    Describes a bundle task.

    ", + "refs": { + "BundleInstanceResult$BundleTask": "

    Information about the bundle task.

    ", + "BundleTaskList$member": null, + "CancelBundleTaskResult$BundleTask": "

    Information about the bundle task.

    " + } + }, + "BundleTaskError": { + "base": "

    Describes an error for BundleInstance.

    ", + "refs": { + "BundleTask$BundleTaskError": "

    If the task fails, a description of the error.

    " + } + }, + "BundleTaskList": { + "base": null, + "refs": { + "DescribeBundleTasksResult$BundleTasks": "

    Information about one or more bundle tasks.

    " + } + }, + "BundleTaskState": { + "base": null, + "refs": { + "BundleTask$State": "

    The state of the task.

    " + } + }, + "CancelBatchErrorCode": { + "base": null, + "refs": { + "CancelSpotFleetRequestsError$Code": "

    The error code.

    " + } + }, + "CancelBundleTaskRequest": { + "base": null, + "refs": { + } + }, + "CancelBundleTaskResult": { + "base": null, + "refs": { + } + }, + "CancelConversionRequest": { + "base": null, + "refs": { + } + }, + "CancelExportTaskRequest": { + "base": null, + "refs": { + } + }, + "CancelImportTaskRequest": { + "base": null, + "refs": { + } + }, + "CancelImportTaskResult": { + "base": null, + "refs": { + } + }, + "CancelReservedInstancesListingRequest": { + "base": null, + "refs": { + } + }, + "CancelReservedInstancesListingResult": { + "base": null, + "refs": { + } + }, + "CancelSpotFleetRequestsError": { + "base": "

    Describes a Spot fleet error.

    ", + "refs": { + "CancelSpotFleetRequestsErrorItem$Error": "

    The error.

    " + } + }, + "CancelSpotFleetRequestsErrorItem": { + "base": "

    Describes a Spot fleet request that was not successfully canceled.

    ", + "refs": { + "CancelSpotFleetRequestsErrorSet$member": null + } + }, + "CancelSpotFleetRequestsErrorSet": { + "base": null, + "refs": { + "CancelSpotFleetRequestsResponse$UnsuccessfulFleetRequests": "

    Information about the Spot fleet requests that are not successfully canceled.

    " + } + }, + "CancelSpotFleetRequestsRequest": { + "base": "

    Contains the parameters for CancelSpotFleetRequests.

    ", + "refs": { + } + }, + "CancelSpotFleetRequestsResponse": { + "base": "

    Contains the output of CancelSpotFleetRequests.

    ", + "refs": { + } + }, + "CancelSpotFleetRequestsSuccessItem": { + "base": "

    Describes a Spot fleet request that was successfully canceled.

    ", + "refs": { + "CancelSpotFleetRequestsSuccessSet$member": null + } + }, + "CancelSpotFleetRequestsSuccessSet": { + "base": null, + "refs": { + "CancelSpotFleetRequestsResponse$SuccessfulFleetRequests": "

    Information about the Spot fleet requests that are successfully canceled.

    " + } + }, + "CancelSpotInstanceRequestState": { + "base": null, + "refs": { + "CancelledSpotInstanceRequest$State": "

    The state of the Spot instance request.

    " + } + }, + "CancelSpotInstanceRequestsRequest": { + "base": "

    Contains the parameters for CancelSpotInstanceRequests.

    ", + "refs": { + } + }, + "CancelSpotInstanceRequestsResult": { + "base": "

    Contains the output of CancelSpotInstanceRequests.

    ", + "refs": { + } + }, + "CancelledSpotInstanceRequest": { + "base": "

    Describes a request to cancel a Spot instance.

    ", + "refs": { + "CancelledSpotInstanceRequestList$member": null + } + }, + "CancelledSpotInstanceRequestList": { + "base": null, + "refs": { + "CancelSpotInstanceRequestsResult$CancelledSpotInstanceRequests": "

    One or more Spot instance requests.

    " + } + }, + "ClassicLinkDnsSupport": { + "base": "

    Describes the ClassicLink DNS support status of a VPC.

    ", + "refs": { + "ClassicLinkDnsSupportList$member": null + } + }, + "ClassicLinkDnsSupportList": { + "base": null, + "refs": { + "DescribeVpcClassicLinkDnsSupportResult$Vpcs": "

    Information about the ClassicLink DNS support status of the VPCs.

    " + } + }, + "ClassicLinkInstance": { + "base": "

    Describes a linked EC2-Classic instance.

    ", + "refs": { + "ClassicLinkInstanceList$member": null + } + }, + "ClassicLinkInstanceList": { + "base": null, + "refs": { + "DescribeClassicLinkInstancesResult$Instances": "

    Information about one or more linked EC2-Classic instances.

    " + } + }, + "ClientData": { + "base": "

    Describes the client-specific data.

    ", + "refs": { + "ImportImageRequest$ClientData": "

    The client-specific data.

    ", + "ImportSnapshotRequest$ClientData": "

    The client-specific data.

    " + } + }, + "ConfirmProductInstanceRequest": { + "base": null, + "refs": { + } + }, + "ConfirmProductInstanceResult": { + "base": null, + "refs": { + } + }, + "ContainerFormat": { + "base": null, + "refs": { + "ExportToS3Task$ContainerFormat": "

    The container format used to combine disk images with metadata (such as OVF). If absent, only the disk image is exported.

    ", + "ExportToS3TaskSpecification$ContainerFormat": "

    The container format used to combine disk images with metadata (such as OVF). If absent, only the disk image is exported.

    " + } + }, + "ConversionIdStringList": { + "base": null, + "refs": { + "DescribeConversionTasksRequest$ConversionTaskIds": "

    One or more conversion task IDs.

    " + } + }, + "ConversionTask": { + "base": "

    Describes a conversion task.

    ", + "refs": { + "DescribeConversionTaskList$member": null, + "ImportInstanceResult$ConversionTask": "

    Information about the conversion task.

    ", + "ImportVolumeResult$ConversionTask": "

    Information about the conversion task.

    " + } + }, + "ConversionTaskState": { + "base": null, + "refs": { + "ConversionTask$State": "

    The state of the conversion task.

    " + } + }, + "CopyImageRequest": { + "base": null, + "refs": { + } + }, + "CopyImageResult": { + "base": null, + "refs": { + } + }, + "CopySnapshotRequest": { + "base": null, + "refs": { + } + }, + "CopySnapshotResult": { + "base": null, + "refs": { + } + }, + "CreateCustomerGatewayRequest": { + "base": null, + "refs": { + } + }, + "CreateCustomerGatewayResult": { + "base": null, + "refs": { + } + }, + "CreateDhcpOptionsRequest": { + "base": null, + "refs": { + } + }, + "CreateDhcpOptionsResult": { + "base": null, + "refs": { + } + }, + "CreateFlowLogsRequest": { + "base": null, + "refs": { + } + }, + "CreateFlowLogsResult": { + "base": null, + "refs": { + } + }, + "CreateImageRequest": { + "base": null, + "refs": { + } + }, + "CreateImageResult": { + "base": null, + "refs": { + } + }, + "CreateInstanceExportTaskRequest": { + "base": null, + "refs": { + } + }, + "CreateInstanceExportTaskResult": { + "base": null, + "refs": { + } + }, + "CreateInternetGatewayRequest": { + "base": null, + "refs": { + } + }, + "CreateInternetGatewayResult": { + "base": null, + "refs": { + } + }, + "CreateKeyPairRequest": { + "base": null, + "refs": { + } + }, + "CreateNatGatewayRequest": { + "base": null, + "refs": { + } + }, + "CreateNatGatewayResult": { + "base": null, + "refs": { + } + }, + "CreateNetworkAclEntryRequest": { + "base": null, + "refs": { + } + }, + "CreateNetworkAclRequest": { + "base": null, + "refs": { + } + }, + "CreateNetworkAclResult": { + "base": null, + "refs": { + } + }, + "CreateNetworkInterfaceRequest": { + "base": null, + "refs": { + } + }, + "CreateNetworkInterfaceResult": { + "base": null, + "refs": { + } + }, + "CreatePlacementGroupRequest": { + "base": null, + "refs": { + } + }, + "CreateReservedInstancesListingRequest": { + "base": null, + "refs": { + } + }, + "CreateReservedInstancesListingResult": { + "base": null, + "refs": { + } + }, + "CreateRouteRequest": { + "base": null, + "refs": { + } + }, + "CreateRouteResult": { + "base": null, + "refs": { + } + }, + "CreateRouteTableRequest": { + "base": null, + "refs": { + } + }, + "CreateRouteTableResult": { + "base": null, + "refs": { + } + }, + "CreateSecurityGroupRequest": { + "base": null, + "refs": { + } + }, + "CreateSecurityGroupResult": { + "base": null, + "refs": { + } + }, + "CreateSnapshotRequest": { + "base": null, + "refs": { + } + }, + "CreateSpotDatafeedSubscriptionRequest": { + "base": "

    Contains the parameters for CreateSpotDatafeedSubscription.

    ", + "refs": { + } + }, + "CreateSpotDatafeedSubscriptionResult": { + "base": "

    Contains the output of CreateSpotDatafeedSubscription.

    ", + "refs": { + } + }, + "CreateSubnetRequest": { + "base": null, + "refs": { + } + }, + "CreateSubnetResult": { + "base": null, + "refs": { + } + }, + "CreateTagsRequest": { + "base": null, + "refs": { + } + }, + "CreateVolumePermission": { + "base": "

    Describes the user or group to be added or removed from the permissions for a volume.

    ", + "refs": { + "CreateVolumePermissionList$member": null + } + }, + "CreateVolumePermissionList": { + "base": null, + "refs": { + "CreateVolumePermissionModifications$Add": "

    Adds a specific AWS account ID or group to a volume's list of create volume permissions.

    ", + "CreateVolumePermissionModifications$Remove": "

    Removes a specific AWS account ID or group from a volume's list of create volume permissions.

    ", + "DescribeSnapshotAttributeResult$CreateVolumePermissions": "

    A list of permissions for creating volumes from the snapshot.

    " + } + }, + "CreateVolumePermissionModifications": { + "base": "

    Describes modifications to the permissions for a volume.

    ", + "refs": { + "ModifySnapshotAttributeRequest$CreateVolumePermission": "

    A JSON representation of the snapshot attribute modification.

    " + } + }, + "CreateVolumeRequest": { + "base": null, + "refs": { + } + }, + "CreateVpcEndpointRequest": { + "base": null, + "refs": { + } + }, + "CreateVpcEndpointResult": { + "base": null, + "refs": { + } + }, + "CreateVpcPeeringConnectionRequest": { + "base": null, + "refs": { + } + }, + "CreateVpcPeeringConnectionResult": { + "base": null, + "refs": { + } + }, + "CreateVpcRequest": { + "base": null, + "refs": { + } + }, + "CreateVpcResult": { + "base": null, + "refs": { + } + }, + "CreateVpnConnectionRequest": { + "base": null, + "refs": { + } + }, + "CreateVpnConnectionResult": { + "base": null, + "refs": { + } + }, + "CreateVpnConnectionRouteRequest": { + "base": null, + "refs": { + } + }, + "CreateVpnGatewayRequest": { + "base": null, + "refs": { + } + }, + "CreateVpnGatewayResult": { + "base": null, + "refs": { + } + }, + "CurrencyCodeValues": { + "base": null, + "refs": { + "PriceSchedule$CurrencyCode": "

    The currency for transacting the Reserved Instance resale. At this time, the only supported currency is USD.

    ", + "PriceScheduleSpecification$CurrencyCode": "

    The currency for transacting the Reserved Instance resale. At this time, the only supported currency is USD.

    ", + "ReservedInstanceLimitPrice$CurrencyCode": "

    The currency in which the limitPrice amount is specified. At this time, the only supported currency is USD.

    ", + "ReservedInstances$CurrencyCode": "

    The currency of the Reserved Instance. It's specified using ISO 4217 standard currency codes. At this time, the only supported currency is USD.

    ", + "ReservedInstancesOffering$CurrencyCode": "

    The currency of the Reserved Instance offering you are purchasing. It's specified using ISO 4217 standard currency codes. At this time, the only supported currency is USD.

    " + } + }, + "CustomerGateway": { + "base": "

    Describes a customer gateway.

    ", + "refs": { + "CreateCustomerGatewayResult$CustomerGateway": "

    Information about the customer gateway.

    ", + "CustomerGatewayList$member": null + } + }, + "CustomerGatewayIdStringList": { + "base": null, + "refs": { + "DescribeCustomerGatewaysRequest$CustomerGatewayIds": "

    One or more customer gateway IDs.

    Default: Describes all your customer gateways.

    " + } + }, + "CustomerGatewayList": { + "base": null, + "refs": { + "DescribeCustomerGatewaysResult$CustomerGateways": "

    Information about one or more customer gateways.

    " + } + }, + "DatafeedSubscriptionState": { + "base": null, + "refs": { + "SpotDatafeedSubscription$State": "

    The state of the Spot instance data feed subscription.

    " + } + }, + "DateTime": { + "base": null, + "refs": { + "BundleTask$StartTime": "

    The time this task started.

    ", + "BundleTask$UpdateTime": "

    The time of the most recent update for the task.

    ", + "ClientData$UploadStart": "

    The time that the disk upload starts.

    ", + "ClientData$UploadEnd": "

    The time that the disk upload ends.

    ", + "DescribeSpotFleetRequestHistoryRequest$StartTime": "

    The starting date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "DescribeSpotFleetRequestHistoryResponse$StartTime": "

    The starting date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "DescribeSpotFleetRequestHistoryResponse$LastEvaluatedTime": "

    The last date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). All records up to this time were retrieved.

    If nextToken indicates that there are more results, this value is not present.

    ", + "DescribeSpotPriceHistoryRequest$StartTime": "

    The date and time, up to the past 90 days, from which to start retrieving the price history data, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "DescribeSpotPriceHistoryRequest$EndTime": "

    The date and time, up to the current date, from which to stop retrieving the price history data, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "EbsInstanceBlockDevice$AttachTime": "

    The time stamp when the attachment initiated.

    ", + "FlowLog$CreationTime": "

    The date and time the flow log was created.

    ", + "GetConsoleOutputResult$Timestamp": "

    The time the output was last updated.

    ", + "GetPasswordDataResult$Timestamp": "

    The time the data was last updated.

    ", + "HistoryRecord$Timestamp": "

    The date and time of the event, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "IdFormat$Deadline": "

    The date in UTC at which you are permanently switched over to using longer IDs.

    ", + "Instance$LaunchTime": "

    The time the instance was launched.

    ", + "InstanceNetworkInterfaceAttachment$AttachTime": "

    The time stamp when the attachment initiated.

    ", + "InstanceStatusDetails$ImpairedSince": "

    The time when a status check failed. For an instance that was launched and impaired, this is the time when the instance was launched.

    ", + "InstanceStatusEvent$NotBefore": "

    The earliest scheduled start time for the event.

    ", + "InstanceStatusEvent$NotAfter": "

    The latest scheduled end time for the event.

    ", + "NatGateway$CreateTime": "

    The date and time the NAT gateway was created.

    ", + "NatGateway$DeleteTime": "

    The date and time the NAT gateway was deleted, if applicable.

    ", + "NetworkInterfaceAttachment$AttachTime": "

    The timestamp indicating when the attachment initiated.

    ", + "ReportInstanceStatusRequest$StartTime": "

    The time at which the reported instance health state began.

    ", + "ReportInstanceStatusRequest$EndTime": "

    The time at which the reported instance health state ended.

    ", + "RequestSpotInstancesRequest$ValidFrom": "

    The start date of the request. If this is a one-time request, the request becomes active at this date and time and remains active until all instances launch, the request expires, or the request is canceled. If the request is persistent, the request becomes active at this date and time and remains active until it expires or is canceled.

    Default: The request is effective indefinitely.

    ", + "RequestSpotInstancesRequest$ValidUntil": "

    The end date of the request. If this is a one-time request, the request remains active until all instances launch, the request is canceled, or this date is reached. If the request is persistent, it remains active until it is canceled or this date and time is reached.

    Default: The request is effective indefinitely.

    ", + "ReservedInstances$Start": "

    The date and time the Reserved Instance started.

    ", + "ReservedInstances$End": "

    The time when the Reserved Instance expires.

    ", + "ReservedInstancesListing$CreateDate": "

    The time the listing was created.

    ", + "ReservedInstancesListing$UpdateDate": "

    The last modified timestamp of the listing.

    ", + "ReservedInstancesModification$CreateDate": "

    The time when the modification request was created.

    ", + "ReservedInstancesModification$UpdateDate": "

    The time when the modification request was last updated.

    ", + "ReservedInstancesModification$EffectiveDate": "

    The time for the modification to become effective.

    ", + "ScheduledInstance$PreviousSlotEndTime": "

    The time that the previous schedule ended or will end.

    ", + "ScheduledInstance$NextSlotStartTime": "

    The time for the next schedule to start.

    ", + "ScheduledInstance$TermStartDate": "

    The start date for the Scheduled Instance.

    ", + "ScheduledInstance$TermEndDate": "

    The end date for the Scheduled Instance.

    ", + "ScheduledInstance$CreateDate": "

    The date when the Scheduled Instance was purchased.

    ", + "ScheduledInstanceAvailability$FirstSlotStartTime": "

    The time period for the first schedule to start.

    ", + "SlotDateTimeRangeRequest$EarliestTime": "

    The earliest date and time, in UTC, for the Scheduled Instance to start.

    ", + "SlotDateTimeRangeRequest$LatestTime": "

    The latest date and time, in UTC, for the Scheduled Instance to start. This value must be later than or equal to the earliest date and at most three months in the future.

    ", + "SlotStartTimeRangeRequest$EarliestTime": "

    The earliest date and time, in UTC, for the Scheduled Instance to start.

    ", + "SlotStartTimeRangeRequest$LatestTime": "

    The latest date and time, in UTC, for the Scheduled Instance to start.

    ", + "Snapshot$StartTime": "

    The time stamp when the snapshot was initiated.

    ", + "SpotFleetRequestConfig$CreateTime": "

    The creation date and time of the request.

    ", + "SpotFleetRequestConfigData$ValidFrom": "

    The start date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). The default is to start fulfilling the request immediately.

    ", + "SpotFleetRequestConfigData$ValidUntil": "

    The end date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). At this point, no new Spot instance requests are placed or enabled to fulfill the request.

    ", + "SpotInstanceRequest$ValidFrom": "

    The start date of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). The request becomes active at this date and time.

    ", + "SpotInstanceRequest$ValidUntil": "

    The end date of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). If this is a one-time request, it remains active until all instances launch, the request is canceled, or this date is reached. If the request is persistent, it remains active until it is canceled or this date is reached.

    ", + "SpotInstanceRequest$CreateTime": "

    The date and time when the Spot instance request was created, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "SpotInstanceStatus$UpdateTime": "

    The date and time of the most recent status update, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "SpotPrice$Timestamp": "

    The date and time the request was created, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

    ", + "VgwTelemetry$LastStatusChange": "

    The date and time of the last change in status.

    ", + "Volume$CreateTime": "

    The time stamp when volume creation was initiated.

    ", + "VolumeAttachment$AttachTime": "

    The time stamp when the attachment initiated.

    ", + "VolumeStatusEvent$NotBefore": "

    The earliest start time of the event.

    ", + "VolumeStatusEvent$NotAfter": "

    The latest end time of the event.

    ", + "VpcEndpoint$CreationTimestamp": "

    The date and time the VPC endpoint was created.

    ", + "VpcPeeringConnection$ExpirationTime": "

    The time that an unaccepted VPC peering connection will expire.

    " + } + }, + "DeleteCustomerGatewayRequest": { + "base": null, + "refs": { + } + }, + "DeleteDhcpOptionsRequest": { + "base": null, + "refs": { + } + }, + "DeleteFlowLogsRequest": { + "base": null, + "refs": { + } + }, + "DeleteFlowLogsResult": { + "base": null, + "refs": { + } + }, + "DeleteInternetGatewayRequest": { + "base": null, + "refs": { + } + }, + "DeleteKeyPairRequest": { + "base": null, + "refs": { + } + }, + "DeleteNatGatewayRequest": { + "base": null, + "refs": { + } + }, + "DeleteNatGatewayResult": { + "base": null, + "refs": { + } + }, + "DeleteNetworkAclEntryRequest": { + "base": null, + "refs": { + } + }, + "DeleteNetworkAclRequest": { + "base": null, + "refs": { + } + }, + "DeleteNetworkInterfaceRequest": { + "base": null, + "refs": { + } + }, + "DeletePlacementGroupRequest": { + "base": null, + "refs": { + } + }, + "DeleteRouteRequest": { + "base": null, + "refs": { + } + }, + "DeleteRouteTableRequest": { + "base": null, + "refs": { + } + }, + "DeleteSecurityGroupRequest": { + "base": null, + "refs": { + } + }, + "DeleteSnapshotRequest": { + "base": null, + "refs": { + } + }, + "DeleteSpotDatafeedSubscriptionRequest": { + "base": "

    Contains the parameters for DeleteSpotDatafeedSubscription.

    ", + "refs": { + } + }, + "DeleteSubnetRequest": { + "base": null, + "refs": { + } + }, + "DeleteTagsRequest": { + "base": null, + "refs": { + } + }, + "DeleteVolumeRequest": { + "base": null, + "refs": { + } + }, + "DeleteVpcEndpointsRequest": { + "base": null, + "refs": { + } + }, + "DeleteVpcEndpointsResult": { + "base": null, + "refs": { + } + }, + "DeleteVpcPeeringConnectionRequest": { + "base": null, + "refs": { + } + }, + "DeleteVpcPeeringConnectionResult": { + "base": null, + "refs": { + } + }, + "DeleteVpcRequest": { + "base": null, + "refs": { + } + }, + "DeleteVpnConnectionRequest": { + "base": null, + "refs": { + } + }, + "DeleteVpnConnectionRouteRequest": { + "base": null, + "refs": { + } + }, + "DeleteVpnGatewayRequest": { + "base": null, + "refs": { + } + }, + "DeregisterImageRequest": { + "base": null, + "refs": { + } + }, + "DescribeAccountAttributesRequest": { + "base": null, + "refs": { + } + }, + "DescribeAccountAttributesResult": { + "base": null, + "refs": { + } + }, + "DescribeAddressesRequest": { + "base": null, + "refs": { + } + }, + "DescribeAddressesResult": { + "base": null, + "refs": { + } + }, + "DescribeAvailabilityZonesRequest": { + "base": null, + "refs": { + } + }, + "DescribeAvailabilityZonesResult": { + "base": null, + "refs": { + } + }, + "DescribeBundleTasksRequest": { + "base": null, + "refs": { + } + }, + "DescribeBundleTasksResult": { + "base": null, + "refs": { + } + }, + "DescribeClassicLinkInstancesRequest": { + "base": null, + "refs": { + } + }, + "DescribeClassicLinkInstancesResult": { + "base": null, + "refs": { + } + }, + "DescribeConversionTaskList": { + "base": null, + "refs": { + "DescribeConversionTasksResult$ConversionTasks": "

    Information about the conversion tasks.

    " + } + }, + "DescribeConversionTasksRequest": { + "base": null, + "refs": { + } + }, + "DescribeConversionTasksResult": { + "base": null, + "refs": { + } + }, + "DescribeCustomerGatewaysRequest": { + "base": null, + "refs": { + } + }, + "DescribeCustomerGatewaysResult": { + "base": null, + "refs": { + } + }, + "DescribeDhcpOptionsRequest": { + "base": null, + "refs": { + } + }, + "DescribeDhcpOptionsResult": { + "base": null, + "refs": { + } + }, + "DescribeExportTasksRequest": { + "base": null, + "refs": { + } + }, + "DescribeExportTasksResult": { + "base": null, + "refs": { + } + }, + "DescribeFlowLogsRequest": { + "base": null, + "refs": { + } + }, + "DescribeFlowLogsResult": { + "base": null, + "refs": { + } + }, + "DescribeHostsRequest": { + "base": null, + "refs": { + } + }, + "DescribeHostsResult": { + "base": null, + "refs": { + } + }, + "DescribeIdFormatRequest": { + "base": null, + "refs": { + } + }, + "DescribeIdFormatResult": { + "base": null, + "refs": { + } + }, + "DescribeImageAttributeRequest": { + "base": null, + "refs": { + } + }, + "DescribeImagesRequest": { + "base": null, + "refs": { + } + }, + "DescribeImagesResult": { + "base": null, + "refs": { + } + }, + "DescribeImportImageTasksRequest": { + "base": null, + "refs": { + } + }, + "DescribeImportImageTasksResult": { + "base": null, + "refs": { + } + }, + "DescribeImportSnapshotTasksRequest": { + "base": null, + "refs": { + } + }, + "DescribeImportSnapshotTasksResult": { + "base": null, + "refs": { + } + }, + "DescribeInstanceAttributeRequest": { + "base": null, + "refs": { + } + }, + "DescribeInstanceStatusRequest": { + "base": null, + "refs": { + } + }, + "DescribeInstanceStatusResult": { + "base": null, + "refs": { + } + }, + "DescribeInstancesRequest": { + "base": null, + "refs": { + } + }, + "DescribeInstancesResult": { + "base": null, + "refs": { + } + }, + "DescribeInternetGatewaysRequest": { + "base": null, + "refs": { + } + }, + "DescribeInternetGatewaysResult": { + "base": null, + "refs": { + } + }, + "DescribeKeyPairsRequest": { + "base": null, + "refs": { + } + }, + "DescribeKeyPairsResult": { + "base": null, + "refs": { + } + }, + "DescribeMovingAddressesRequest": { + "base": null, + "refs": { + } + }, + "DescribeMovingAddressesResult": { + "base": null, + "refs": { + } + }, + "DescribeNatGatewaysRequest": { + "base": null, + "refs": { + } + }, + "DescribeNatGatewaysResult": { + "base": null, + "refs": { + } + }, + "DescribeNetworkAclsRequest": { + "base": null, + "refs": { + } + }, + "DescribeNetworkAclsResult": { + "base": null, + "refs": { + } + }, + "DescribeNetworkInterfaceAttributeRequest": { + "base": null, + "refs": { + } + }, + "DescribeNetworkInterfaceAttributeResult": { + "base": null, + "refs": { + } + }, + "DescribeNetworkInterfacesRequest": { + "base": null, + "refs": { + } + }, + "DescribeNetworkInterfacesResult": { + "base": null, + "refs": { + } + }, + "DescribePlacementGroupsRequest": { + "base": null, + "refs": { + } + }, + "DescribePlacementGroupsResult": { + "base": null, + "refs": { + } + }, + "DescribePrefixListsRequest": { + "base": null, + "refs": { + } + }, + "DescribePrefixListsResult": { + "base": null, + "refs": { + } + }, + "DescribeRegionsRequest": { + "base": null, + "refs": { + } + }, + "DescribeRegionsResult": { + "base": null, + "refs": { + } + }, + "DescribeReservedInstancesListingsRequest": { + "base": null, + "refs": { + } + }, + "DescribeReservedInstancesListingsResult": { + "base": null, + "refs": { + } + }, + "DescribeReservedInstancesModificationsRequest": { + "base": null, + "refs": { + } + }, + "DescribeReservedInstancesModificationsResult": { + "base": null, + "refs": { + } + }, + "DescribeReservedInstancesOfferingsRequest": { + "base": null, + "refs": { + } + }, + "DescribeReservedInstancesOfferingsResult": { + "base": null, + "refs": { + } + }, + "DescribeReservedInstancesRequest": { + "base": null, + "refs": { + } + }, + "DescribeReservedInstancesResult": { + "base": null, + "refs": { + } + }, + "DescribeRouteTablesRequest": { + "base": null, + "refs": { + } + }, + "DescribeRouteTablesResult": { + "base": null, + "refs": { + } + }, + "DescribeScheduledInstanceAvailabilityRequest": { + "base": "

    Contains the parameters for DescribeScheduledInstanceAvailability.

    ", + "refs": { + } + }, + "DescribeScheduledInstanceAvailabilityResult": { + "base": "

    Contains the output of DescribeScheduledInstanceAvailability.

    ", + "refs": { + } + }, + "DescribeScheduledInstancesRequest": { + "base": "

    Contains the parameters for DescribeScheduledInstances.

    ", + "refs": { + } + }, + "DescribeScheduledInstancesResult": { + "base": "

    Contains the output of DescribeScheduledInstances.

    ", + "refs": { + } + }, + "DescribeSecurityGroupsRequest": { + "base": null, + "refs": { + } + }, + "DescribeSecurityGroupsResult": { + "base": null, + "refs": { + } + }, + "DescribeSnapshotAttributeRequest": { + "base": null, + "refs": { + } + }, + "DescribeSnapshotAttributeResult": { + "base": null, + "refs": { + } + }, + "DescribeSnapshotsRequest": { + "base": null, + "refs": { + } + }, + "DescribeSnapshotsResult": { + "base": null, + "refs": { + } + }, + "DescribeSpotDatafeedSubscriptionRequest": { + "base": "

    Contains the parameters for DescribeSpotDatafeedSubscription.

    ", + "refs": { + } + }, + "DescribeSpotDatafeedSubscriptionResult": { + "base": "

    Contains the output of DescribeSpotDatafeedSubscription.

    ", + "refs": { + } + }, + "DescribeSpotFleetInstancesRequest": { + "base": "

    Contains the parameters for DescribeSpotFleetInstances.

    ", + "refs": { + } + }, + "DescribeSpotFleetInstancesResponse": { + "base": "

    Contains the output of DescribeSpotFleetInstances.

    ", + "refs": { + } + }, + "DescribeSpotFleetRequestHistoryRequest": { + "base": "

    Contains the parameters for DescribeSpotFleetRequestHistory.

    ", + "refs": { + } + }, + "DescribeSpotFleetRequestHistoryResponse": { + "base": "

    Contains the output of DescribeSpotFleetRequestHistory.

    ", + "refs": { + } + }, + "DescribeSpotFleetRequestsRequest": { + "base": "

    Contains the parameters for DescribeSpotFleetRequests.

    ", + "refs": { + } + }, + "DescribeSpotFleetRequestsResponse": { + "base": "

    Contains the output of DescribeSpotFleetRequests.

    ", + "refs": { + } + }, + "DescribeSpotInstanceRequestsRequest": { + "base": "

    Contains the parameters for DescribeSpotInstanceRequests.

    ", + "refs": { + } + }, + "DescribeSpotInstanceRequestsResult": { + "base": "

    Contains the output of DescribeSpotInstanceRequests.

    ", + "refs": { + } + }, + "DescribeSpotPriceHistoryRequest": { + "base": "

    Contains the parameters for DescribeSpotPriceHistory.

    ", + "refs": { + } + }, + "DescribeSpotPriceHistoryResult": { + "base": "

    Contains the output of DescribeSpotPriceHistory.

    ", + "refs": { + } + }, + "DescribeSubnetsRequest": { + "base": null, + "refs": { + } + }, + "DescribeSubnetsResult": { + "base": null, + "refs": { + } + }, + "DescribeTagsRequest": { + "base": null, + "refs": { + } + }, + "DescribeTagsResult": { + "base": null, + "refs": { + } + }, + "DescribeVolumeAttributeRequest": { + "base": null, + "refs": { + } + }, + "DescribeVolumeAttributeResult": { + "base": null, + "refs": { + } + }, + "DescribeVolumeStatusRequest": { + "base": null, + "refs": { + } + }, + "DescribeVolumeStatusResult": { + "base": null, + "refs": { + } + }, + "DescribeVolumesRequest": { + "base": null, + "refs": { + } + }, + "DescribeVolumesResult": { + "base": null, + "refs": { + } + }, + "DescribeVpcAttributeRequest": { + "base": null, + "refs": { + } + }, + "DescribeVpcAttributeResult": { + "base": null, + "refs": { + } + }, + "DescribeVpcClassicLinkDnsSupportRequest": { + "base": null, + "refs": { + } + }, + "DescribeVpcClassicLinkDnsSupportResult": { + "base": null, + "refs": { + } + }, + "DescribeVpcClassicLinkRequest": { + "base": null, + "refs": { + } + }, + "DescribeVpcClassicLinkResult": { + "base": null, + "refs": { + } + }, + "DescribeVpcEndpointServicesRequest": { + "base": null, + "refs": { + } + }, + "DescribeVpcEndpointServicesResult": { + "base": null, + "refs": { + } + }, + "DescribeVpcEndpointsRequest": { + "base": null, + "refs": { + } + }, + "DescribeVpcEndpointsResult": { + "base": null, + "refs": { + } + }, + "DescribeVpcPeeringConnectionsRequest": { + "base": null, + "refs": { + } + }, + "DescribeVpcPeeringConnectionsResult": { + "base": null, + "refs": { + } + }, + "DescribeVpcsRequest": { + "base": null, + "refs": { + } + }, + "DescribeVpcsResult": { + "base": null, + "refs": { + } + }, + "DescribeVpnConnectionsRequest": { + "base": null, + "refs": { + } + }, + "DescribeVpnConnectionsResult": { + "base": null, + "refs": { + } + }, + "DescribeVpnGatewaysRequest": { + "base": null, + "refs": { + } + }, + "DescribeVpnGatewaysResult": { + "base": null, + "refs": { + } + }, + "DetachClassicLinkVpcRequest": { + "base": null, + "refs": { + } + }, + "DetachClassicLinkVpcResult": { + "base": null, + "refs": { + } + }, + "DetachInternetGatewayRequest": { + "base": null, + "refs": { + } + }, + "DetachNetworkInterfaceRequest": { + "base": null, + "refs": { + } + }, + "DetachVolumeRequest": { + "base": null, + "refs": { + } + }, + "DetachVpnGatewayRequest": { + "base": null, + "refs": { + } + }, + "DeviceType": { + "base": null, + "refs": { + "Image$RootDeviceType": "

    The type of root device used by the AMI. The AMI can use an EBS volume or an instance store volume.

    ", + "Instance$RootDeviceType": "

    The root device type used by the AMI. The AMI can use an EBS volume or an instance store volume.

    " + } + }, + "DhcpConfiguration": { + "base": "

    Describes a DHCP configuration option.

    ", + "refs": { + "DhcpConfigurationList$member": null + } + }, + "DhcpConfigurationList": { + "base": null, + "refs": { + "DhcpOptions$DhcpConfigurations": "

    One or more DHCP options in the set.

    " + } + }, + "DhcpConfigurationValueList": { + "base": null, + "refs": { + "DhcpConfiguration$Values": "

    One or more values for the DHCP option.

    " + } + }, + "DhcpOptions": { + "base": "

    Describes a set of DHCP options.

    ", + "refs": { + "CreateDhcpOptionsResult$DhcpOptions": "

    A set of DHCP options.

    ", + "DhcpOptionsList$member": null + } + }, + "DhcpOptionsIdStringList": { + "base": null, + "refs": { + "DescribeDhcpOptionsRequest$DhcpOptionsIds": "

    The IDs of one or more DHCP options sets.

    Default: Describes all your DHCP options sets.

    " + } + }, + "DhcpOptionsList": { + "base": null, + "refs": { + "DescribeDhcpOptionsResult$DhcpOptions": "

    Information about one or more DHCP options sets.

    " + } + }, + "DisableVgwRoutePropagationRequest": { + "base": null, + "refs": { + } + }, + "DisableVpcClassicLinkDnsSupportRequest": { + "base": null, + "refs": { + } + }, + "DisableVpcClassicLinkDnsSupportResult": { + "base": null, + "refs": { + } + }, + "DisableVpcClassicLinkRequest": { + "base": null, + "refs": { + } + }, + "DisableVpcClassicLinkResult": { + "base": null, + "refs": { + } + }, + "DisassociateAddressRequest": { + "base": null, + "refs": { + } + }, + "DisassociateRouteTableRequest": { + "base": null, + "refs": { + } + }, + "DiskImage": { + "base": "

    Describes a disk image.

    ", + "refs": { + "DiskImageList$member": null + } + }, + "DiskImageDescription": { + "base": "

    Describes a disk image.

    ", + "refs": { + "ImportInstanceVolumeDetailItem$Image": "

    The image.

    ", + "ImportVolumeTaskDetails$Image": "

    The image.

    " + } + }, + "DiskImageDetail": { + "base": "

    Describes a disk image.

    ", + "refs": { + "DiskImage$Image": "

    Information about the disk image.

    ", + "ImportVolumeRequest$Image": "

    The disk image.

    " + } + }, + "DiskImageFormat": { + "base": null, + "refs": { + "DiskImageDescription$Format": "

    The disk image format.

    ", + "DiskImageDetail$Format": "

    The disk image format.

    ", + "ExportToS3Task$DiskImageFormat": "

    The format for the exported image.

    ", + "ExportToS3TaskSpecification$DiskImageFormat": "

    The format for the exported image.

    " + } + }, + "DiskImageList": { + "base": null, + "refs": { + "ImportInstanceRequest$DiskImages": "

    The disk image.

    " + } + }, + "DiskImageVolumeDescription": { + "base": "

    Describes a disk image volume.

    ", + "refs": { + "ImportInstanceVolumeDetailItem$Volume": "

    The volume.

    ", + "ImportVolumeTaskDetails$Volume": "

    The volume.

    " + } + }, + "DomainType": { + "base": null, + "refs": { + "Address$Domain": "

    Indicates whether this Elastic IP address is for use with instances in EC2-Classic (standard) or instances in a VPC (vpc).

    ", + "AllocateAddressRequest$Domain": "

    Set to vpc to allocate the address for use with instances in a VPC.

    Default: The address is for use with instances in EC2-Classic.

    ", + "AllocateAddressResult$Domain": "

    Indicates whether this Elastic IP address is for use with instances in EC2-Classic (standard) or instances in a VPC (vpc).

    " + } + }, + "Double": { + "base": null, + "refs": { + "ClientData$UploadSize": "

    The size of the uploaded disk image, in GiB.

    ", + "PriceSchedule$Price": "

    The fixed price for the term.

    ", + "PriceScheduleSpecification$Price": "

    The fixed price for the term.

    ", + "PricingDetail$Price": "

    The price per instance.

    ", + "RecurringCharge$Amount": "

    The amount of the recurring charge.

    ", + "ReservedInstanceLimitPrice$Amount": "

    Used for Reserved Instance Marketplace offerings. Specifies the limit price on the total order (instanceCount * price).

    ", + "SnapshotDetail$DiskImageSize": "

    The size of the disk in the snapshot, in GiB.

    ", + "SnapshotTaskDetail$DiskImageSize": "

    The size of the disk in the snapshot, in GiB.

    ", + "SpotFleetLaunchSpecification$WeightedCapacity": "

    The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms (instances or a performance characteristic such as vCPUs, memory, or I/O).

    If the target capacity divided by this value is not a whole number, we round the number of instances to the next whole number. If this value is not specified, the default is 1.

    " + } + }, + "EbsBlockDevice": { + "base": "

    Describes a block device for an EBS volume.

    ", + "refs": { + "BlockDeviceMapping$Ebs": "

    Parameters used to automatically set up EBS volumes when the instance is launched.

    " + } + }, + "EbsInstanceBlockDevice": { + "base": "

    Describes a parameter used to set up an EBS volume in a block device mapping.

    ", + "refs": { + "InstanceBlockDeviceMapping$Ebs": "

    Parameters used to automatically set up EBS volumes when the instance is launched.

    " + } + }, + "EbsInstanceBlockDeviceSpecification": { + "base": null, + "refs": { + "InstanceBlockDeviceMappingSpecification$Ebs": "

    Parameters used to automatically set up EBS volumes when the instance is launched.

    " + } + }, + "EnableVgwRoutePropagationRequest": { + "base": null, + "refs": { + } + }, + "EnableVolumeIORequest": { + "base": null, + "refs": { + } + }, + "EnableVpcClassicLinkDnsSupportRequest": { + "base": null, + "refs": { + } + }, + "EnableVpcClassicLinkDnsSupportResult": { + "base": null, + "refs": { + } + }, + "EnableVpcClassicLinkRequest": { + "base": null, + "refs": { + } + }, + "EnableVpcClassicLinkResult": { + "base": null, + "refs": { + } + }, + "EventCode": { + "base": null, + "refs": { + "InstanceStatusEvent$Code": "

    The event code.

    " + } + }, + "EventInformation": { + "base": "

    Describes a Spot fleet event.

    ", + "refs": { + "HistoryRecord$EventInformation": "

    Information about the event.

    " + } + }, + "EventType": { + "base": null, + "refs": { + "DescribeSpotFleetRequestHistoryRequest$EventType": "

    The type of events to describe. By default, all events are described.

    ", + "HistoryRecord$EventType": "

    The event type.

    • error - Indicates an error with the Spot fleet request.

    • fleetRequestChange - Indicates a change in the status or configuration of the Spot fleet request.

    • instanceChange - Indicates that an instance was launched or terminated.

    " + } + }, + "ExcessCapacityTerminationPolicy": { + "base": null, + "refs": { + "ModifySpotFleetRequestRequest$ExcessCapacityTerminationPolicy": "

    Indicates whether running Spot instances should be terminated if the target capacity of the Spot fleet request is decreased below the current size of the Spot fleet.

    ", + "SpotFleetRequestConfigData$ExcessCapacityTerminationPolicy": "

    Indicates whether running Spot instances should be terminated if the target capacity of the Spot fleet request is decreased below the current size of the Spot fleet.

    " + } + }, + "ExecutableByStringList": { + "base": null, + "refs": { + "DescribeImagesRequest$ExecutableUsers": "

    Scopes the images by users with explicit launch permissions. Specify an AWS account ID, self (the sender of the request), or all (public AMIs).

    " + } + }, + "ExportEnvironment": { + "base": null, + "refs": { + "CreateInstanceExportTaskRequest$TargetEnvironment": "

    The target virtualization environment.

    ", + "InstanceExportDetails$TargetEnvironment": "

    The target virtualization environment.

    " + } + }, + "ExportTask": { + "base": "

    Describes an instance export task.

    ", + "refs": { + "CreateInstanceExportTaskResult$ExportTask": "

    Information about the instance export task.

    ", + "ExportTaskList$member": null + } + }, + "ExportTaskIdStringList": { + "base": null, + "refs": { + "DescribeExportTasksRequest$ExportTaskIds": "

    One or more export task IDs.

    " + } + }, + "ExportTaskList": { + "base": null, + "refs": { + "DescribeExportTasksResult$ExportTasks": "

    Information about the export tasks.

    " + } + }, + "ExportTaskState": { + "base": null, + "refs": { + "ExportTask$State": "

    The state of the export task.

    " + } + }, + "ExportToS3Task": { + "base": "

    Describes the format and location for an instance export task.

    ", + "refs": { + "ExportTask$ExportToS3Task": "

    Information about the export task.

    " + } + }, + "ExportToS3TaskSpecification": { + "base": "

    Describes an instance export task.

    ", + "refs": { + "CreateInstanceExportTaskRequest$ExportToS3Task": "

    The format and location for an instance export task.

    " + } + }, + "Filter": { + "base": "

    A filter name and value pair that is used to return a more specific list of results. Filters can be used to match a set of resources by various criteria, such as tags, attributes, or IDs.

    ", + "refs": { + "FilterList$member": null + } + }, + "FilterList": { + "base": null, + "refs": { + "DescribeAddressesRequest$Filters": "

    One or more filters. Filter names and values are case-sensitive.

    • allocation-id - [EC2-VPC] The allocation ID for the address.

    • association-id - [EC2-VPC] The association ID for the address.

    • domain - Indicates whether the address is for use in EC2-Classic (standard) or in a VPC (vpc).

    • instance-id - The ID of the instance the address is associated with, if any.

    • network-interface-id - [EC2-VPC] The ID of the network interface that the address is associated with, if any.

    • network-interface-owner-id - The AWS account ID of the owner.

    • private-ip-address - [EC2-VPC] The private IP address associated with the Elastic IP address.

    • public-ip - The Elastic IP address.

    ", + "DescribeAvailabilityZonesRequest$Filters": "

    One or more filters.

    • message - Information about the Availability Zone.

    • region-name - The name of the region for the Availability Zone (for example, us-east-1).

    • state - The state of the Availability Zone (available | information | impaired | unavailable).

    • zone-name - The name of the Availability Zone (for example, us-east-1a).

    ", + "DescribeBundleTasksRequest$Filters": "

    One or more filters.

    • bundle-id - The ID of the bundle task.

    • error-code - If the task failed, the error code returned.

    • error-message - If the task failed, the error message returned.

    • instance-id - The ID of the instance.

    • progress - The level of task completion, as a percentage (for example, 20%).

    • s3-bucket - The Amazon S3 bucket to store the AMI.

    • s3-prefix - The beginning of the AMI name.

    • start-time - The time the task started (for example, 2013-09-15T17:15:20.000Z).

    • state - The state of the task (pending | waiting-for-shutdown | bundling | storing | cancelling | complete | failed).

    • update-time - The time of the most recent update for the task.

    ", + "DescribeClassicLinkInstancesRequest$Filters": "

    One or more filters.

    • group-id - The ID of a VPC security group that's associated with the instance.

    • instance-id - The ID of the instance.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC that the instance is linked to.

    ", + "DescribeConversionTasksRequest$Filters": "

    One or more filters.

    ", + "DescribeCustomerGatewaysRequest$Filters": "

    One or more filters.

    • bgp-asn - The customer gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN).

    • customer-gateway-id - The ID of the customer gateway.

    • ip-address - The IP address of the customer gateway's Internet-routable external interface.

    • state - The state of the customer gateway (pending | available | deleting | deleted).

    • type - The type of customer gateway. Currently, the only supported type is ipsec.1.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    ", + "DescribeDhcpOptionsRequest$Filters": "

    One or more filters.

    • dhcp-options-id - The ID of a set of DHCP options.

    • key - The key for one of the options (for example, domain-name).

    • value - The value for one of the options.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    ", + "DescribeFlowLogsRequest$Filter": "

    One or more filters.

    • deliver-log-status - The status of the logs delivery (SUCCESS | FAILED).

    • flow-log-id - The ID of the flow log.

    • log-group-name - The name of the log group.

    • resource-id - The ID of the VPC, subnet, or network interface.

    • traffic-type - The type of traffic (ACCEPT | REJECT | ALL)

    ", + "DescribeHostsRequest$Filter": "

    One or more filters.

    • instance-type - The instance type size that the Dedicated host is configured to support.

    • auto-placement - Whether auto-placement is enabled or disabled (on | off).

    • host-reservation-id - The ID of the reservation associated with this host.

    • client-token - The idempotency token you provided when you launched the instance

    • state- The allocation state of the Dedicated host (available | under-assessment | permanent-failure | released | released-permanent-failure).

    • availability-zone - The Availability Zone of the host.

    ", + "DescribeImagesRequest$Filters": "

    One or more filters.

    • architecture - The image architecture (i386 | x86_64).

    • block-device-mapping.delete-on-termination - A Boolean value that indicates whether the Amazon EBS volume is deleted on instance termination.

    • block-device-mapping.device-name - The device name for the EBS volume (for example, /dev/sdh).

    • block-device-mapping.snapshot-id - The ID of the snapshot used for the EBS volume.

    • block-device-mapping.volume-size - The volume size of the EBS volume, in GiB.

    • block-device-mapping.volume-type - The volume type of the EBS volume (gp2 | standard | io1).

    • description - The description of the image (provided during image creation).

    • hypervisor - The hypervisor type (ovm | xen).

    • image-id - The ID of the image.

    • image-type - The image type (machine | kernel | ramdisk).

    • is-public - A Boolean that indicates whether the image is public.

    • kernel-id - The kernel ID.

    • manifest-location - The location of the image manifest.

    • name - The name of the AMI (provided during image creation).

    • owner-alias - The AWS account alias (for example, amazon).

    • owner-id - The AWS account ID of the image owner.

    • platform - The platform. To only list Windows-based AMIs, use windows.

    • product-code - The product code.

    • product-code.type - The type of the product code (devpay | marketplace).

    • ramdisk-id - The RAM disk ID.

    • root-device-name - The name of the root device volume (for example, /dev/sda1).

    • root-device-type - The type of the root device volume (ebs | instance-store).

    • state - The state of the image (available | pending | failed).

    • state-reason-code - The reason code for the state change.

    • state-reason-message - The message for the state change.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • virtualization-type - The virtualization type (paravirtual | hvm).

    ", + "DescribeImportImageTasksRequest$Filters": "

    One or more filters.

    ", + "DescribeImportSnapshotTasksRequest$Filters": "

    One or more filters.

    ", + "DescribeInstanceStatusRequest$Filters": "

    One or more filters.

    • availability-zone - The Availability Zone of the instance.

    • event.code - The code for the scheduled event (instance-reboot | system-reboot | system-maintenance | instance-retirement | instance-stop).

    • event.description - A description of the event.

    • event.not-after - The latest end time for the scheduled event (for example, 2014-09-15T17:15:20.000Z).

    • event.not-before - The earliest start time for the scheduled event (for example, 2014-09-15T17:15:20.000Z).

    • instance-state-code - The code for the instance state, as a 16-bit unsigned integer. The high byte is an opaque internal value and should be ignored. The low byte is set based on the state represented. The valid values are 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).

    • instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped).

    • instance-status.reachability - Filters on instance status where the name is reachability (passed | failed | initializing | insufficient-data).

    • instance-status.status - The status of the instance (ok | impaired | initializing | insufficient-data | not-applicable).

    • system-status.reachability - Filters on system status where the name is reachability (passed | failed | initializing | insufficient-data).

    • system-status.status - The system status of the instance (ok | impaired | initializing | insufficient-data | not-applicable).

    ", + "DescribeInstancesRequest$Filters": "

    One or more filters.

    • affinity - The affinity setting for an instance running on a Dedicated host (default | host).

    • architecture - The instance architecture (i386 | x86_64).

    • availability-zone - The Availability Zone of the instance.

    • block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2010-09-15T17:15:20.000Z.

    • block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination.

    • block-device-mapping.device-name - The device name for the EBS volume (for example, /dev/sdh or xvdh).

    • block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached).

    • block-device-mapping.volume-id - The volume ID of the EBS volume.

    • client-token - The idempotency token you provided when you launched the instance.

    • dns-name - The public DNS name of the instance.

    • group-id - The ID of the security group for the instance. EC2-Classic only.

    • group-name - The name of the security group for the instance. EC2-Classic only.

    • host-Id - The ID of the Dedicated host on which the instance is running, if applicable.

    • hypervisor - The hypervisor type of the instance (ovm | xen).

    • iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN.

    • image-id - The ID of the image used to launch the instance.

    • instance-id - The ID of the instance.

    • instance-lifecycle - Indicates whether this is a Spot Instance (spot).

    • instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is an opaque internal value and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).

    • instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped).

    • instance-type - The type of instance (for example, t2.micro).

    • instance.group-id - The ID of the security group for the instance.

    • instance.group-name - The name of the security group for the instance.

    • ip-address - The public IP address of the instance.

    • kernel-id - The kernel ID.

    • key-name - The name of the key pair used when the instance was launched.

    • launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on).

    • launch-time - The time when the instance was launched.

    • monitoring-state - Indicates whether monitoring is enabled for the instance (disabled | enabled).

    • owner-id - The AWS account ID of the instance owner.

    • placement-group-name - The name of the placement group for the instance.

    • platform - The platform. Use windows if you have Windows instances; otherwise, leave blank.

    • private-dns-name - The private DNS name of the instance.

    • private-ip-address - The private IP address of the instance.

    • product-code - The product code associated with the AMI used to launch the instance.

    • product-code.type - The type of product code (devpay | marketplace).

    • ramdisk-id - The RAM disk ID.

    • reason - The reason for the current state of the instance (for example, shows \"User Initiated [date]\" when you stop or terminate the instance). Similar to the state-reason-code filter.

    • requester-id - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on).

    • reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you'll get one reservation ID. If you launch ten instances using the same launch request, you'll also get one reservation ID.

    • root-device-name - The name of the root device for the instance (for example, /dev/sda1 or /dev/xvda).

    • root-device-type - The type of root device that the instance uses (ebs | instance-store).

    • source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC.

    • spot-instance-request-id - The ID of the Spot instance request.

    • state-reason-code - The reason code for the state change.

    • state-reason-message - A message that describes the state change.

    • subnet-id - The ID of the subnet for the instance.

    • tag:key=value - The key/value combination of a tag assigned to the resource, where tag:key is the tag's key.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • tenancy - The tenancy of an instance (dedicated | default | host).

    • virtualization-type - The virtualization type of the instance (paravirtual | hvm).

    • vpc-id - The ID of the VPC that the instance is running in.

    • network-interface.description - The description of the network interface.

    • network-interface.subnet-id - The ID of the subnet for the network interface.

    • network-interface.vpc-id - The ID of the VPC for the network interface.

    • network-interface.network-interface-id - The ID of the network interface.

    • network-interface.owner-id - The ID of the owner of the network interface.

    • network-interface.availability-zone - The Availability Zone for the network interface.

    • network-interface.requester-id - The requester ID for the network interface.

    • network-interface.requester-managed - Indicates whether the network interface is being managed by AWS.

    • network-interface.status - The status of the network interface (available) | in-use).

    • network-interface.mac-address - The MAC address of the network interface.

    • network-interface.private-dns-name - The private DNS name of the network interface.

    • network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means checking is enabled, and false means checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

    • network-interface.group-id - The ID of a security group associated with the network interface.

    • network-interface.group-name - The name of a security group associated with the network interface.

    • network-interface.attachment.attachment-id - The ID of the interface attachment.

    • network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached.

    • network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

    • network-interface.addresses.private-ip-address - The private IP address associated with the network interface.

    • network-interface.attachment.device-index - The device index to which the network interface is attached.

    • network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached).

    • network-interface.attachment.attach-time - The time that the network interface was attached to an instance.

    • network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated.

    • network-interface.addresses.primary - Specifies whether the IP address of the network interface is the primary private IP address.

    • network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address with a network interface.

    • network-interface.addresses.association.ip-owner-id - The owner ID of the private IP address associated with the network interface.

    • association.public-ip - The address of the Elastic IP address bound to the network interface.

    • association.ip-owner-id - The owner of the Elastic IP address associated with the network interface.

    • association.allocation-id - The allocation ID returned when you allocated the Elastic IP address for your network interface.

    • association.association-id - The association ID returned when the network interface was associated with an IP address.

    ", + "DescribeInternetGatewaysRequest$Filters": "

    One or more filters.

    • attachment.state - The current state of the attachment between the gateway and the VPC (available). Present only if a VPC is attached.

    • attachment.vpc-id - The ID of an attached VPC.

    • internet-gateway-id - The ID of the Internet gateway.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    ", + "DescribeKeyPairsRequest$Filters": "

    One or more filters.

    • fingerprint - The fingerprint of the key pair.

    • key-name - The name of the key pair.

    ", + "DescribeMovingAddressesRequest$Filters": "

    One or more filters.

    • moving-status - The status of the Elastic IP address (MovingToVpc | RestoringToClassic).

    ", + "DescribeNatGatewaysRequest$Filter": "

    One or more filters.

    • nat-gateway-id - The ID of the NAT gateway.

    • state - The state of the NAT gateway (pending | failed | available | deleting | deleted).

    • subnet-id - The ID of the subnet in which the NAT gateway resides.

    • vpc-id - The ID of the VPC in which the NAT gateway resides.

    ", + "DescribeNetworkAclsRequest$Filters": "

    One or more filters.

    • association.association-id - The ID of an association ID for the ACL.

    • association.network-acl-id - The ID of the network ACL involved in the association.

    • association.subnet-id - The ID of the subnet involved in the association.

    • default - Indicates whether the ACL is the default network ACL for the VPC.

    • entry.cidr - The CIDR range specified in the entry.

    • entry.egress - Indicates whether the entry applies to egress traffic.

    • entry.icmp.code - The ICMP code specified in the entry, if any.

    • entry.icmp.type - The ICMP type specified in the entry, if any.

    • entry.port-range.from - The start of the port range specified in the entry.

    • entry.port-range.to - The end of the port range specified in the entry.

    • entry.protocol - The protocol specified in the entry (tcp | udp | icmp or a protocol number).

    • entry.rule-action - Allows or denies the matching traffic (allow | deny).

    • entry.rule-number - The number of an entry (in other words, rule) in the ACL's set of entries.

    • network-acl-id - The ID of the network ACL.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC for the network ACL.

    ", + "DescribeNetworkInterfacesRequest$Filters": "

    One or more filters.

    • addresses.private-ip-address - The private IP addresses associated with the network interface.

    • addresses.primary - Whether the private IP address is the primary IP address associated with the network interface.

    • addresses.association.public-ip - The association ID returned when the network interface was associated with the Elastic IP address.

    • addresses.association.owner-id - The owner ID of the addresses associated with the network interface.

    • association.association-id - The association ID returned when the network interface was associated with an IP address.

    • association.allocation-id - The allocation ID returned when you allocated the Elastic IP address for your network interface.

    • association.ip-owner-id - The owner of the Elastic IP address associated with the network interface.

    • association.public-ip - The address of the Elastic IP address bound to the network interface.

    • association.public-dns-name - The public DNS name for the network interface.

    • attachment.attachment-id - The ID of the interface attachment.

    • attachment.attach.time - The time that the network interface was attached to an instance.

    • attachment.delete-on-termination - Indicates whether the attachment is deleted when an instance is terminated.

    • attachment.device-index - The device index to which the network interface is attached.

    • attachment.instance-id - The ID of the instance to which the network interface is attached.

    • attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

    • attachment.nat-gateway-id - The ID of the NAT gateway to which the network interface is attached.

    • attachment.status - The status of the attachment (attaching | attached | detaching | detached).

    • availability-zone - The Availability Zone of the network interface.

    • description - The description of the network interface.

    • group-id - The ID of a security group associated with the network interface.

    • group-name - The name of a security group associated with the network interface.

    • mac-address - The MAC address of the network interface.

    • network-interface-id - The ID of the network interface.

    • owner-id - The AWS account ID of the network interface owner.

    • private-ip-address - The private IP address or addresses of the network interface.

    • private-dns-name - The private DNS name of the network interface.

    • requester-id - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on).

    • requester-managed - Indicates whether the network interface is being managed by an AWS service (for example, AWS Management Console, Auto Scaling, and so on).

    • source-desk-check - Indicates whether the network interface performs source/destination checking. A value of true means checking is enabled, and false means checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

    • status - The status of the network interface. If the network interface is not attached to an instance, the status is available; if a network interface is attached to an instance the status is in-use.

    • subnet-id - The ID of the subnet for the network interface.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC for the network interface.

    ", + "DescribePlacementGroupsRequest$Filters": "

    One or more filters.

    • group-name - The name of the placement group.

    • state - The state of the placement group (pending | available | deleting | deleted).

    • strategy - The strategy of the placement group (cluster).

    ", + "DescribePrefixListsRequest$Filters": "

    One or more filters.

    • prefix-list-id: The ID of a prefix list.

    • prefix-list-name: The name of a prefix list.

    ", + "DescribeRegionsRequest$Filters": "

    One or more filters.

    • endpoint - The endpoint of the region (for example, ec2.us-east-1.amazonaws.com).

    • region-name - The name of the region (for example, us-east-1).

    ", + "DescribeReservedInstancesListingsRequest$Filters": "

    One or more filters.

    • reserved-instances-id - The ID of the Reserved Instances.

    • reserved-instances-listing-id - The ID of the Reserved Instances listing.

    • status - The status of the Reserved Instance listing (pending | active | cancelled | closed).

    • status-message - The reason for the status.

    ", + "DescribeReservedInstancesModificationsRequest$Filters": "

    One or more filters.

    • client-token - The idempotency token for the modification request.

    • create-date - The time when the modification request was created.

    • effective-date - The time when the modification becomes effective.

    • modification-result.reserved-instances-id - The ID for the Reserved Instances created as part of the modification request. This ID is only available when the status of the modification is fulfilled.

    • modification-result.target-configuration.availability-zone - The Availability Zone for the new Reserved Instances.

    • modification-result.target-configuration.instance-count - The number of new Reserved Instances.

    • modification-result.target-configuration.instance-type - The instance type of the new Reserved Instances.

    • modification-result.target-configuration.platform - The network platform of the new Reserved Instances (EC2-Classic | EC2-VPC).

    • reserved-instances-id - The ID of the Reserved Instances modified.

    • reserved-instances-modification-id - The ID of the modification request.

    • status - The status of the Reserved Instances modification request (processing | fulfilled | failed).

    • status-message - The reason for the status.

    • update-date - The time when the modification request was last updated.

    ", + "DescribeReservedInstancesOfferingsRequest$Filters": "

    One or more filters.

    • availability-zone - The Availability Zone where the Reserved Instance can be used.

    • duration - The duration of the Reserved Instance (for example, one year or three years), in seconds (31536000 | 94608000).

    • fixed-price - The purchase price of the Reserved Instance (for example, 9800.0).

    • instance-type - The instance type that is covered by the reservation.

    • marketplace - Set to true to show only Reserved Instance Marketplace offerings. When this filter is not used, which is the default behavior, all offerings from both AWS and the Reserved Instance Marketplace are listed.

    • product-description - The Reserved Instance product platform description. Instances that include (Amazon VPC) in the product platform description will only be displayed to EC2-Classic account holders and are for use with Amazon VPC. (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | SUSE Linux (Amazon VPC) | Red Hat Enterprise Linux | Red Hat Enterprise Linux (Amazon VPC) | Windows | Windows (Amazon VPC) | Windows with SQL Server Standard | Windows with SQL Server Standard (Amazon VPC) | Windows with SQL Server Web | Windows with SQL Server Web (Amazon VPC) | Windows with SQL Server Enterprise | Windows with SQL Server Enterprise (Amazon VPC))

    • reserved-instances-offering-id - The Reserved Instances offering ID.

    • usage-price - The usage price of the Reserved Instance, per hour (for example, 0.84).

    ", + "DescribeReservedInstancesRequest$Filters": "

    One or more filters.

    • availability-zone - The Availability Zone where the Reserved Instance can be used.

    • duration - The duration of the Reserved Instance (one year or three years), in seconds (31536000 | 94608000).

    • end - The time when the Reserved Instance expires (for example, 2015-08-07T11:54:42.000Z).

    • fixed-price - The purchase price of the Reserved Instance (for example, 9800.0).

    • instance-type - The instance type that is covered by the reservation.

    • product-description - The Reserved Instance product platform description. Instances that include (Amazon VPC) in the product platform description will only be displayed to EC2-Classic account holders and are for use with Amazon VPC (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | SUSE Linux (Amazon VPC) | Red Hat Enterprise Linux | Red Hat Enterprise Linux (Amazon VPC) | Windows | Windows (Amazon VPC) | Windows with SQL Server Standard | Windows with SQL Server Standard (Amazon VPC) | Windows with SQL Server Web | Windows with SQL Server Web (Amazon VPC) | Windows with SQL Server Enterprise | Windows with SQL Server Enterprise (Amazon VPC)).

    • reserved-instances-id - The ID of the Reserved Instance.

    • start - The time at which the Reserved Instance purchase request was placed (for example, 2014-08-07T11:54:42.000Z).

    • state - The state of the Reserved Instance (payment-pending | active | payment-failed | retired).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • usage-price - The usage price of the Reserved Instance, per hour (for example, 0.84).

    ", + "DescribeRouteTablesRequest$Filters": "

    One or more filters.

    • association.route-table-association-id - The ID of an association ID for the route table.

    • association.route-table-id - The ID of the route table involved in the association.

    • association.subnet-id - The ID of the subnet involved in the association.

    • association.main - Indicates whether the route table is the main route table for the VPC (true | false).

    • route-table-id - The ID of the route table.

    • route.destination-cidr-block - The CIDR range specified in a route in the table.

    • route.destination-prefix-list-id - The ID (prefix) of the AWS service specified in a route in the table.

    • route.gateway-id - The ID of a gateway specified in a route in the table.

    • route.instance-id - The ID of an instance specified in a route in the table.

    • route.nat-gateway-id - The ID of a NAT gateway.

    • route.origin - Describes how the route was created. CreateRouteTable indicates that the route was automatically created when the route table was created; CreateRoute indicates that the route was manually added to the route table; EnableVgwRoutePropagation indicates that the route was propagated by route propagation.

    • route.state - The state of a route in the route table (active | blackhole). The blackhole state indicates that the route's target isn't available (for example, the specified gateway isn't attached to the VPC, the specified NAT instance has been terminated, and so on).

    • route.vpc-peering-connection-id - The ID of a VPC peering connection specified in a route in the table.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC for the route table.

    ", + "DescribeScheduledInstanceAvailabilityRequest$Filters": "

    One or more filters.

    • availability-zone - The Availability Zone (for example, us-west-2a).

    • instance-type - The instance type (for example, c4.large).

    • network-platform - The network platform (EC2-Classic or EC2-VPC).

    • platform - The platform (Linux/UNIX or Windows).

    ", + "DescribeScheduledInstancesRequest$Filters": "

    One or more filters.

    • availability-zone - The Availability Zone (for example, us-west-2a).

    • instance-type - The instance type (for example, c4.large).

    • network-platform - The network platform (EC2-Classic or EC2-VPC).

    • platform - The platform (Linux/UNIX or Windows).

    ", + "DescribeSecurityGroupsRequest$Filters": "

    One or more filters. If using multiple filters for rules, the results include security groups for which any combination of rules - not necessarily a single rule - match all filters.

    • description - The description of the security group.

    • egress.ip-permission.prefix-list-id - The ID (prefix) of the AWS service to which the security group allows access.

    • group-id - The ID of the security group.

    • group-name - The name of the security group.

    • ip-permission.cidr - A CIDR range that has been granted permission.

    • ip-permission.from-port - The start of port range for the TCP and UDP protocols, or an ICMP type number.

    • ip-permission.group-id - The ID of a security group that has been granted permission.

    • ip-permission.group-name - The name of a security group that has been granted permission.

    • ip-permission.protocol - The IP protocol for the permission (tcp | udp | icmp or a protocol number).

    • ip-permission.to-port - The end of port range for the TCP and UDP protocols, or an ICMP code.

    • ip-permission.user-id - The ID of an AWS account that has been granted permission.

    • owner-id - The AWS account ID of the owner of the security group.

    • tag-key - The key of a tag assigned to the security group.

    • tag-value - The value of a tag assigned to the security group.

    • vpc-id - The ID of the VPC specified when the security group was created.

    ", + "DescribeSnapshotsRequest$Filters": "

    One or more filters.

    • description - A description of the snapshot.

    • owner-alias - The AWS account alias (for example, amazon) that owns the snapshot.

    • owner-id - The ID of the AWS account that owns the snapshot.

    • progress - The progress of the snapshot, as a percentage (for example, 80%).

    • snapshot-id - The snapshot ID.

    • start-time - The time stamp when the snapshot was initiated.

    • status - The status of the snapshot (pending | completed | error).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • volume-id - The ID of the volume the snapshot is for.

    • volume-size - The size of the volume, in GiB.

    ", + "DescribeSpotInstanceRequestsRequest$Filters": "

    One or more filters.

    • availability-zone-group - The Availability Zone group.

    • create-time - The time stamp when the Spot instance request was created.

    • fault-code - The fault code related to the request.

    • fault-message - The fault message related to the request.

    • instance-id - The ID of the instance that fulfilled the request.

    • launch-group - The Spot instance launch group.

    • launch.block-device-mapping.delete-on-termination - Indicates whether the Amazon EBS volume is deleted on instance termination.

    • launch.block-device-mapping.device-name - The device name for the Amazon EBS volume (for example, /dev/sdh).

    • launch.block-device-mapping.snapshot-id - The ID of the snapshot used for the Amazon EBS volume.

    • launch.block-device-mapping.volume-size - The size of the Amazon EBS volume, in GiB.

    • launch.block-device-mapping.volume-type - The type of the Amazon EBS volume (gp2 | standard | io1).

    • launch.group-id - The security group for the instance.

    • launch.image-id - The ID of the AMI.

    • launch.instance-type - The type of instance (for example, m3.medium).

    • launch.kernel-id - The kernel ID.

    • launch.key-name - The name of the key pair the instance launched with.

    • launch.monitoring-enabled - Whether monitoring is enabled for the Spot instance.

    • launch.ramdisk-id - The RAM disk ID.

    • network-interface.network-interface-id - The ID of the network interface.

    • network-interface.device-index - The index of the device for the network interface attachment on the instance.

    • network-interface.subnet-id - The ID of the subnet for the instance.

    • network-interface.description - A description of the network interface.

    • network-interface.private-ip-address - The primary private IP address of the network interface.

    • network-interface.delete-on-termination - Indicates whether the network interface is deleted when the instance is terminated.

    • network-interface.group-id - The ID of the security group associated with the network interface.

    • network-interface.group-name - The name of the security group associated with the network interface.

    • network-interface.addresses.primary - Indicates whether the IP address is the primary private IP address.

    • product-description - The product description associated with the instance (Linux/UNIX | Windows).

    • spot-instance-request-id - The Spot instance request ID.

    • spot-price - The maximum hourly price for any Spot instance launched to fulfill the request.

    • state - The state of the Spot instance request (open | active | closed | cancelled | failed). Spot bid status information can help you track your Amazon EC2 Spot instance requests. For more information, see Spot Bid Status in the Amazon Elastic Compute Cloud User Guide.

    • status-code - The short code describing the most recent evaluation of your Spot instance request.

    • status-message - The message explaining the status of the Spot instance request.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • type - The type of Spot instance request (one-time | persistent).

    • launched-availability-zone - The Availability Zone in which the bid is launched.

    • valid-from - The start date of the request.

    • valid-until - The end date of the request.

    ", + "DescribeSpotPriceHistoryRequest$Filters": "

    One or more filters.

    • availability-zone - The Availability Zone for which prices should be returned.

    • instance-type - The type of instance (for example, m3.medium).

    • product-description - The product description for the Spot price (Linux/UNIX | SUSE Linux | Windows | Linux/UNIX (Amazon VPC) | SUSE Linux (Amazon VPC) | Windows (Amazon VPC)).

    • spot-price - The Spot price. The value must match exactly (or use wildcards; greater than or less than comparison is not supported).

    • timestamp - The timestamp of the Spot price history, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). You can use wildcards (* and ?). Greater than or less than comparison is not supported.

    ", + "DescribeSubnetsRequest$Filters": "

    One or more filters.

    • availabilityZone - The Availability Zone for the subnet. You can also use availability-zone as the filter name.

    • available-ip-address-count - The number of IP addresses in the subnet that are available.

    • cidrBlock - The CIDR block of the subnet. The CIDR block you specify must exactly match the subnet's CIDR block for information to be returned for the subnet. You can also use cidr or cidr-block as the filter names.

    • defaultForAz - Indicates whether this is the default subnet for the Availability Zone. You can also use default-for-az as the filter name.

    • state - The state of the subnet (pending | available).

    • subnet-id - The ID of the subnet.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC for the subnet.

    ", + "DescribeTagsRequest$Filters": "

    One or more filters.

    • key - The tag key.

    • resource-id - The resource ID.

    • resource-type - The resource type (customer-gateway | dhcp-options | image | instance | internet-gateway | network-acl | network-interface | reserved-instances | route-table | security-group | snapshot | spot-instances-request | subnet | volume | vpc | vpn-connection | vpn-gateway).

    • value - The tag value.

    ", + "DescribeVolumeStatusRequest$Filters": "

    One or more filters.

    • action.code - The action code for the event (for example, enable-volume-io).

    • action.description - A description of the action.

    • action.event-id - The event ID associated with the action.

    • availability-zone - The Availability Zone of the instance.

    • event.description - A description of the event.

    • event.event-id - The event ID.

    • event.event-type - The event type (for io-enabled: passed | failed; for io-performance: io-performance:degraded | io-performance:severely-degraded | io-performance:stalled).

    • event.not-after - The latest end time for the event.

    • event.not-before - The earliest start time for the event.

    • volume-status.details-name - The cause for volume-status.status (io-enabled | io-performance).

    • volume-status.details-status - The status of volume-status.details-name (for io-enabled: passed | failed; for io-performance: normal | degraded | severely-degraded | stalled).

    • volume-status.status - The status of the volume (ok | impaired | warning | insufficient-data).

    ", + "DescribeVolumesRequest$Filters": "

    One or more filters.

    • attachment.attach-time - The time stamp when the attachment initiated.

    • attachment.delete-on-termination - Whether the volume is deleted on instance termination.

    • attachment.device - The device name that is exposed to the instance (for example, /dev/sda1).

    • attachment.instance-id - The ID of the instance the volume is attached to.

    • attachment.status - The attachment state (attaching | attached | detaching | detached).

    • availability-zone - The Availability Zone in which the volume was created.

    • create-time - The time stamp when the volume was created.

    • encrypted - The encryption status of the volume.

    • size - The size of the volume, in GiB.

    • snapshot-id - The snapshot from which the volume was created.

    • status - The status of the volume (creating | available | in-use | deleting | deleted | error).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • volume-id - The volume ID.

    • volume-type - The Amazon EBS volume type. This can be gp2 for General Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, or standard for Magnetic volumes.

    ", + "DescribeVpcClassicLinkRequest$Filters": "

    One or more filters.

    • is-classic-link-enabled - Whether the VPC is enabled for ClassicLink (true | false).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    ", + "DescribeVpcEndpointsRequest$Filters": "

    One or more filters.

    • service-name: The name of the AWS service.

    • vpc-id: The ID of the VPC in which the endpoint resides.

    • vpc-endpoint-id: The ID of the endpoint.

    • vpc-endpoint-state: The state of the endpoint. (pending | available | deleting | deleted)

    ", + "DescribeVpcPeeringConnectionsRequest$Filters": "

    One or more filters.

    • accepter-vpc-info.cidr-block - The CIDR block of the peer VPC.

    • accepter-vpc-info.owner-id - The AWS account ID of the owner of the peer VPC.

    • accepter-vpc-info.vpc-id - The ID of the peer VPC.

    • expiration-time - The expiration date and time for the VPC peering connection.

    • requester-vpc-info.cidr-block - The CIDR block of the requester's VPC.

    • requester-vpc-info.owner-id - The AWS account ID of the owner of the requester VPC.

    • requester-vpc-info.vpc-id - The ID of the requester VPC.

    • status-code - The status of the VPC peering connection (pending-acceptance | failed | expired | provisioning | active | deleted | rejected).

    • status-message - A message that provides more information about the status of the VPC peering connection, if applicable.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-peering-connection-id - The ID of the VPC peering connection.

    ", + "DescribeVpcsRequest$Filters": "

    One or more filters.

    • cidr - The CIDR block of the VPC. The CIDR block you specify must exactly match the VPC's CIDR block for information to be returned for the VPC. Must contain the slash followed by one or two digits (for example, /28).

    • dhcp-options-id - The ID of a set of DHCP options.

    • isDefault - Indicates whether the VPC is the default VPC.

    • state - The state of the VPC (pending | available).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • vpc-id - The ID of the VPC.

    ", + "DescribeVpnConnectionsRequest$Filters": "

    One or more filters.

    • customer-gateway-configuration - The configuration information for the customer gateway.

    • customer-gateway-id - The ID of a customer gateway associated with the VPN connection.

    • state - The state of the VPN connection (pending | available | deleting | deleted).

    • option.static-routes-only - Indicates whether the connection has static routes only. Used for devices that do not support Border Gateway Protocol (BGP).

    • route.destination-cidr-block - The destination CIDR block. This corresponds to the subnet used in a customer data center.

    • bgp-asn - The BGP Autonomous System Number (ASN) associated with a BGP device.

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • type - The type of VPN connection. Currently the only supported type is ipsec.1.

    • vpn-connection-id - The ID of the VPN connection.

    • vpn-gateway-id - The ID of a virtual private gateway associated with the VPN connection.

    ", + "DescribeVpnGatewaysRequest$Filters": "

    One or more filters.

    • attachment.state - The current state of the attachment between the gateway and the VPC (attaching | attached | detaching | detached).

    • attachment.vpc-id - The ID of an attached VPC.

    • availability-zone - The Availability Zone for the virtual private gateway (if applicable).

    • state - The state of the virtual private gateway (pending | available | deleting | deleted).

    • tag:key=value - The key/value combination of a tag assigned to the resource.

    • tag-key - The key of a tag assigned to the resource. This filter is independent of the tag-value filter. For example, if you use both the filter \"tag-key=Purpose\" and the filter \"tag-value=X\", you get any resources assigned both the tag key Purpose (regardless of what the tag's value is), and the tag value X (regardless of what the tag's key is). If you want to list only resources where Purpose is X, see the tag:key=value filter.

    • tag-value - The value of a tag assigned to the resource. This filter is independent of the tag-key filter.

    • type - The type of virtual private gateway. Currently the only supported type is ipsec.1.

    • vpn-gateway-id - The ID of the virtual private gateway.

    " + } + }, + "Float": { + "base": null, + "refs": { + "ReservedInstances$UsagePrice": "

    The usage price of the Reserved Instance, per hour.

    ", + "ReservedInstances$FixedPrice": "

    The purchase price of the Reserved Instance.

    ", + "ReservedInstancesOffering$UsagePrice": "

    The usage price of the Reserved Instance, per hour.

    ", + "ReservedInstancesOffering$FixedPrice": "

    The purchase price of the Reserved Instance.

    " + } + }, + "FlowLog": { + "base": "

    Describes a flow log.

    ", + "refs": { + "FlowLogSet$member": null + } + }, + "FlowLogSet": { + "base": null, + "refs": { + "DescribeFlowLogsResult$FlowLogs": "

    Information about the flow logs.

    " + } + }, + "FlowLogsResourceType": { + "base": null, + "refs": { + "CreateFlowLogsRequest$ResourceType": "

    The type of resource on which to create the flow log.

    " + } + }, + "GatewayType": { + "base": null, + "refs": { + "CreateCustomerGatewayRequest$Type": "

    The type of VPN connection that this customer gateway supports (ipsec.1).

    ", + "CreateVpnGatewayRequest$Type": "

    The type of VPN connection this virtual private gateway supports.

    ", + "VpnConnection$Type": "

    The type of VPN connection.

    ", + "VpnGateway$Type": "

    The type of VPN connection the virtual private gateway supports.

    " + } + }, + "GetConsoleOutputRequest": { + "base": null, + "refs": { + } + }, + "GetConsoleOutputResult": { + "base": null, + "refs": { + } + }, + "GetPasswordDataRequest": { + "base": null, + "refs": { + } + }, + "GetPasswordDataResult": { + "base": null, + "refs": { + } + }, + "GroupIdStringList": { + "base": null, + "refs": { + "AttachClassicLinkVpcRequest$Groups": "

    The ID of one or more of the VPC's security groups. You cannot specify security groups from a different VPC.

    ", + "DescribeSecurityGroupsRequest$GroupIds": "

    One or more security group IDs. Required for security groups in a nondefault VPC.

    Default: Describes all your security groups.

    ", + "ModifyInstanceAttributeRequest$Groups": "

    [EC2-VPC] Changes the security groups of the instance. You must specify at least one security group, even if it's just the default security group for the VPC. You must specify the security group ID, not the security group name.

    " + } + }, + "GroupIdentifier": { + "base": "

    Describes a security group.

    ", + "refs": { + "GroupIdentifierList$member": null + } + }, + "GroupIdentifierList": { + "base": null, + "refs": { + "ClassicLinkInstance$Groups": "

    A list of security groups.

    ", + "DescribeNetworkInterfaceAttributeResult$Groups": "

    The security groups associated with the network interface.

    ", + "Instance$SecurityGroups": "

    One or more security groups for the instance.

    ", + "InstanceAttribute$Groups": "

    The security groups associated with the instance.

    ", + "InstanceNetworkInterface$Groups": "

    One or more security groups.

    ", + "LaunchSpecification$SecurityGroups": "

    One or more security groups. When requesting instances in a VPC, you must specify the IDs of the security groups. When requesting instances in EC2-Classic, you can specify the names or the IDs of the security groups.

    ", + "NetworkInterface$Groups": "

    Any security groups for the network interface.

    ", + "Reservation$Groups": "

    One or more security groups.

    ", + "SpotFleetLaunchSpecification$SecurityGroups": "

    One or more security groups. When requesting instances in a VPC, you must specify the IDs of the security groups. When requesting instances in EC2-Classic, you can specify the names or the IDs of the security groups.

    " + } + }, + "GroupNameStringList": { + "base": null, + "refs": { + "DescribeSecurityGroupsRequest$GroupNames": "

    [EC2-Classic and default VPC only] One or more security group names. You can specify either the security group name or the security group ID. For security groups in a nondefault VPC, use the group-name filter to describe security groups by name.

    Default: Describes all your security groups.

    ", + "ModifySnapshotAttributeRequest$GroupNames": "

    The group to modify for the snapshot.

    " + } + }, + "HistoryRecord": { + "base": "

    Describes an event in the history of the Spot fleet request.

    ", + "refs": { + "HistoryRecords$member": null + } + }, + "HistoryRecords": { + "base": null, + "refs": { + "DescribeSpotFleetRequestHistoryResponse$HistoryRecords": "

    Information about the events in the history of the Spot fleet request.

    " + } + }, + "Host": { + "base": "

    Describes the properties of the Dedicated host.

    ", + "refs": { + "HostList$member": null + } + }, + "HostInstance": { + "base": null, + "refs": { + "HostInstanceList$member": null + } + }, + "HostInstanceList": { + "base": null, + "refs": { + "Host$Instances": "

    The IDs and instance type that are currently running on the Dedicated host.

    " + } + }, + "HostList": { + "base": null, + "refs": { + "DescribeHostsResult$Hosts": "

    Information about the Dedicated hosts.

    " + } + }, + "HostProperties": { + "base": null, + "refs": { + "Host$HostProperties": "

    The hardware specifications of the Dedicated host.

    " + } + }, + "HostTenancy": { + "base": null, + "refs": { + "ModifyInstancePlacementRequest$Tenancy": "

    The tenancy of the instance that you are modifying.

    " + } + }, + "HypervisorType": { + "base": null, + "refs": { + "Image$Hypervisor": "

    The hypervisor type of the image.

    ", + "Instance$Hypervisor": "

    The hypervisor type of the instance.

    " + } + }, + "IamInstanceProfile": { + "base": "

    Describes an IAM instance profile.

    ", + "refs": { + "Instance$IamInstanceProfile": "

    The IAM instance profile associated with the instance, if applicable.

    " + } + }, + "IamInstanceProfileSpecification": { + "base": "

    Describes an IAM instance profile.

    ", + "refs": { + "LaunchSpecification$IamInstanceProfile": "

    The IAM instance profile.

    ", + "RequestSpotLaunchSpecification$IamInstanceProfile": "

    The IAM instance profile.

    ", + "RunInstancesRequest$IamInstanceProfile": "

    The IAM instance profile.

    ", + "SpotFleetLaunchSpecification$IamInstanceProfile": "

    The IAM instance profile.

    " + } + }, + "IcmpTypeCode": { + "base": "

    Describes the ICMP type and code.

    ", + "refs": { + "CreateNetworkAclEntryRequest$IcmpTypeCode": "

    ICMP protocol: The ICMP type and code. Required if specifying ICMP for the protocol.

    ", + "NetworkAclEntry$IcmpTypeCode": "

    ICMP protocol: The ICMP type and code.

    ", + "ReplaceNetworkAclEntryRequest$IcmpTypeCode": "

    ICMP protocol: The ICMP type and code. Required if specifying 1 (ICMP) for the protocol.

    " + } + }, + "IdFormat": { + "base": "

    Describes the ID format for a resource.

    ", + "refs": { + "IdFormatList$member": null + } + }, + "IdFormatList": { + "base": null, + "refs": { + "DescribeIdFormatResult$Statuses": "

    Information about the ID format for the resource.

    " + } + }, + "Image": { + "base": "

    Describes an image.

    ", + "refs": { + "ImageList$member": null + } + }, + "ImageAttribute": { + "base": "

    Describes an image attribute.

    ", + "refs": { + } + }, + "ImageAttributeName": { + "base": null, + "refs": { + "DescribeImageAttributeRequest$Attribute": "

    The AMI attribute.

    Note: Depending on your account privileges, the blockDeviceMapping attribute may return a Client.AuthFailure error. If this happens, use DescribeImages to get information about the block device mapping for the AMI.

    " + } + }, + "ImageDiskContainer": { + "base": "

    Describes the disk container object for an import image task.

    ", + "refs": { + "ImageDiskContainerList$member": null + } + }, + "ImageDiskContainerList": { + "base": null, + "refs": { + "ImportImageRequest$DiskContainers": "

    Information about the disk containers.

    " + } + }, + "ImageIdStringList": { + "base": null, + "refs": { + "DescribeImagesRequest$ImageIds": "

    One or more image IDs.

    Default: Describes all images available to you.

    " + } + }, + "ImageList": { + "base": null, + "refs": { + "DescribeImagesResult$Images": "

    Information about one or more images.

    " + } + }, + "ImageState": { + "base": null, + "refs": { + "Image$State": "

    The current state of the AMI. If the state is available, the image is successfully registered and can be used to launch an instance.

    " + } + }, + "ImageTypeValues": { + "base": null, + "refs": { + "Image$ImageType": "

    The type of image.

    " + } + }, + "ImportImageRequest": { + "base": null, + "refs": { + } + }, + "ImportImageResult": { + "base": null, + "refs": { + } + }, + "ImportImageTask": { + "base": "

    Describes an import image task.

    ", + "refs": { + "ImportImageTaskList$member": null + } + }, + "ImportImageTaskList": { + "base": null, + "refs": { + "DescribeImportImageTasksResult$ImportImageTasks": "

    A list of zero or more import image tasks that are currently active or were completed or canceled in the previous 7 days.

    " + } + }, + "ImportInstanceLaunchSpecification": { + "base": "

    Describes the launch specification for VM import.

    ", + "refs": { + "ImportInstanceRequest$LaunchSpecification": "

    The launch specification.

    " + } + }, + "ImportInstanceRequest": { + "base": null, + "refs": { + } + }, + "ImportInstanceResult": { + "base": null, + "refs": { + } + }, + "ImportInstanceTaskDetails": { + "base": "

    Describes an import instance task.

    ", + "refs": { + "ConversionTask$ImportInstance": "

    If the task is for importing an instance, this contains information about the import instance task.

    " + } + }, + "ImportInstanceVolumeDetailItem": { + "base": "

    Describes an import volume task.

    ", + "refs": { + "ImportInstanceVolumeDetailSet$member": null + } + }, + "ImportInstanceVolumeDetailSet": { + "base": null, + "refs": { + "ImportInstanceTaskDetails$Volumes": "

    One or more volumes.

    " + } + }, + "ImportKeyPairRequest": { + "base": null, + "refs": { + } + }, + "ImportKeyPairResult": { + "base": null, + "refs": { + } + }, + "ImportSnapshotRequest": { + "base": null, + "refs": { + } + }, + "ImportSnapshotResult": { + "base": null, + "refs": { + } + }, + "ImportSnapshotTask": { + "base": "

    Describes an import snapshot task.

    ", + "refs": { + "ImportSnapshotTaskList$member": null + } + }, + "ImportSnapshotTaskList": { + "base": null, + "refs": { + "DescribeImportSnapshotTasksResult$ImportSnapshotTasks": "

    A list of zero or more import snapshot tasks that are currently active or were completed or canceled in the previous 7 days.

    " + } + }, + "ImportTaskIdList": { + "base": null, + "refs": { + "DescribeImportImageTasksRequest$ImportTaskIds": "

    A list of import image task IDs.

    ", + "DescribeImportSnapshotTasksRequest$ImportTaskIds": "

    A list of import snapshot task IDs.

    " + } + }, + "ImportVolumeRequest": { + "base": null, + "refs": { + } + }, + "ImportVolumeResult": { + "base": null, + "refs": { + } + }, + "ImportVolumeTaskDetails": { + "base": "

    Describes an import volume task.

    ", + "refs": { + "ConversionTask$ImportVolume": "

    If the task is for importing a volume, this contains information about the import volume task.

    " + } + }, + "Instance": { + "base": "

    Describes an instance.

    ", + "refs": { + "InstanceList$member": null + } + }, + "InstanceAttribute": { + "base": "

    Describes an instance attribute.

    ", + "refs": { + } + }, + "InstanceAttributeName": { + "base": null, + "refs": { + "DescribeInstanceAttributeRequest$Attribute": "

    The instance attribute.

    ", + "ModifyInstanceAttributeRequest$Attribute": "

    The name of the attribute.

    ", + "ResetInstanceAttributeRequest$Attribute": "

    The attribute to reset.

    " + } + }, + "InstanceBlockDeviceMapping": { + "base": "

    Describes a block device mapping.

    ", + "refs": { + "InstanceBlockDeviceMappingList$member": null + } + }, + "InstanceBlockDeviceMappingList": { + "base": null, + "refs": { + "Instance$BlockDeviceMappings": "

    Any block device mapping entries for the instance.

    ", + "InstanceAttribute$BlockDeviceMappings": "

    The block device mapping of the instance.

    " + } + }, + "InstanceBlockDeviceMappingSpecification": { + "base": "

    Describes a block device mapping entry.

    ", + "refs": { + "InstanceBlockDeviceMappingSpecificationList$member": null + } + }, + "InstanceBlockDeviceMappingSpecificationList": { + "base": null, + "refs": { + "ModifyInstanceAttributeRequest$BlockDeviceMappings": "

    Modifies the DeleteOnTermination attribute for volumes that are currently attached. The volume must be owned by the caller. If no value is specified for DeleteOnTermination, the default is true and the volume is deleted when the instance is terminated.

    To add instance store volumes to an Amazon EBS-backed instance, you must add them when you launch the instance. For more information, see Updating the Block Device Mapping when Launching an Instance in the Amazon Elastic Compute Cloud User Guide.

    " + } + }, + "InstanceCapacity": { + "base": "

    Information about the instance type that the Dedicated host supports.

    ", + "refs": { + "AvailableInstanceCapacityList$member": null + } + }, + "InstanceCount": { + "base": "

    Describes a Reserved Instance listing state.

    ", + "refs": { + "InstanceCountList$member": null + } + }, + "InstanceCountList": { + "base": null, + "refs": { + "ReservedInstancesListing$InstanceCounts": "

    The number of instances in this state.

    " + } + }, + "InstanceExportDetails": { + "base": "

    Describes an instance to export.

    ", + "refs": { + "ExportTask$InstanceExportDetails": "

    Information about the instance to export.

    " + } + }, + "InstanceIdSet": { + "base": null, + "refs": { + "RunScheduledInstancesResult$InstanceIdSet": "

    The IDs of the newly launched instances.

    " + } + }, + "InstanceIdStringList": { + "base": null, + "refs": { + "DescribeClassicLinkInstancesRequest$InstanceIds": "

    One or more instance IDs. Must be instances linked to a VPC through ClassicLink.

    ", + "DescribeInstanceStatusRequest$InstanceIds": "

    One or more instance IDs.

    Default: Describes all your instances.

    Constraints: Maximum 100 explicitly specified instance IDs.

    ", + "DescribeInstancesRequest$InstanceIds": "

    One or more instance IDs.

    Default: Describes all your instances.

    ", + "MonitorInstancesRequest$InstanceIds": "

    One or more instance IDs.

    ", + "RebootInstancesRequest$InstanceIds": "

    One or more instance IDs.

    ", + "ReportInstanceStatusRequest$Instances": "

    One or more instances.

    ", + "StartInstancesRequest$InstanceIds": "

    One or more instance IDs.

    ", + "StopInstancesRequest$InstanceIds": "

    One or more instance IDs.

    ", + "TerminateInstancesRequest$InstanceIds": "

    One or more instance IDs.

    ", + "UnmonitorInstancesRequest$InstanceIds": "

    One or more instance IDs.

    " + } + }, + "InstanceLifecycleType": { + "base": null, + "refs": { + "Instance$InstanceLifecycle": "

    Indicates whether this is a Spot instance.

    " + } + }, + "InstanceList": { + "base": null, + "refs": { + "Reservation$Instances": "

    One or more instances.

    " + } + }, + "InstanceMonitoring": { + "base": "

    Describes the monitoring information of the instance.

    ", + "refs": { + "InstanceMonitoringList$member": null + } + }, + "InstanceMonitoringList": { + "base": null, + "refs": { + "MonitorInstancesResult$InstanceMonitorings": "

    Monitoring information for one or more instances.

    ", + "UnmonitorInstancesResult$InstanceMonitorings": "

    Monitoring information for one or more instances.

    " + } + }, + "InstanceNetworkInterface": { + "base": "

    Describes a network interface.

    ", + "refs": { + "InstanceNetworkInterfaceList$member": null + } + }, + "InstanceNetworkInterfaceAssociation": { + "base": "

    Describes association information for an Elastic IP address.

    ", + "refs": { + "InstanceNetworkInterface$Association": "

    The association information for an Elastic IP associated with the network interface.

    ", + "InstancePrivateIpAddress$Association": "

    The association information for an Elastic IP address for the network interface.

    " + } + }, + "InstanceNetworkInterfaceAttachment": { + "base": "

    Describes a network interface attachment.

    ", + "refs": { + "InstanceNetworkInterface$Attachment": "

    The network interface attachment.

    " + } + }, + "InstanceNetworkInterfaceList": { + "base": null, + "refs": { + "Instance$NetworkInterfaces": "

    [EC2-VPC] One or more network interfaces for the instance.

    " + } + }, + "InstanceNetworkInterfaceSpecification": { + "base": "

    Describes a network interface.

    ", + "refs": { + "InstanceNetworkInterfaceSpecificationList$member": null + } + }, + "InstanceNetworkInterfaceSpecificationList": { + "base": null, + "refs": { + "LaunchSpecification$NetworkInterfaces": "

    One or more network interfaces.

    ", + "RequestSpotLaunchSpecification$NetworkInterfaces": "

    One or more network interfaces.

    ", + "RunInstancesRequest$NetworkInterfaces": "

    One or more network interfaces.

    ", + "SpotFleetLaunchSpecification$NetworkInterfaces": "

    One or more network interfaces.

    " + } + }, + "InstancePrivateIpAddress": { + "base": "

    Describes a private IP address.

    ", + "refs": { + "InstancePrivateIpAddressList$member": null + } + }, + "InstancePrivateIpAddressList": { + "base": null, + "refs": { + "InstanceNetworkInterface$PrivateIpAddresses": "

    The private IP addresses associated with the network interface.

    " + } + }, + "InstanceState": { + "base": "

    Describes the current state of the instance.

    ", + "refs": { + "Instance$State": "

    The current state of the instance.

    ", + "InstanceStateChange$CurrentState": "

    The current state of the instance.

    ", + "InstanceStateChange$PreviousState": "

    The previous state of the instance.

    ", + "InstanceStatus$InstanceState": "

    The intended state of the instance. DescribeInstanceStatus requires that an instance be in the running state.

    " + } + }, + "InstanceStateChange": { + "base": "

    Describes an instance state change.

    ", + "refs": { + "InstanceStateChangeList$member": null + } + }, + "InstanceStateChangeList": { + "base": null, + "refs": { + "StartInstancesResult$StartingInstances": "

    Information about one or more started instances.

    ", + "StopInstancesResult$StoppingInstances": "

    Information about one or more stopped instances.

    ", + "TerminateInstancesResult$TerminatingInstances": "

    Information about one or more terminated instances.

    " + } + }, + "InstanceStateName": { + "base": null, + "refs": { + "InstanceState$Name": "

    The current state of the instance.

    " + } + }, + "InstanceStatus": { + "base": "

    Describes the status of an instance.

    ", + "refs": { + "InstanceStatusList$member": null + } + }, + "InstanceStatusDetails": { + "base": "

    Describes the instance status.

    ", + "refs": { + "InstanceStatusDetailsList$member": null + } + }, + "InstanceStatusDetailsList": { + "base": null, + "refs": { + "InstanceStatusSummary$Details": "

    The system instance health or application instance health.

    " + } + }, + "InstanceStatusEvent": { + "base": "

    Describes a scheduled event for an instance.

    ", + "refs": { + "InstanceStatusEventList$member": null + } + }, + "InstanceStatusEventList": { + "base": null, + "refs": { + "InstanceStatus$Events": "

    Any scheduled events associated with the instance.

    " + } + }, + "InstanceStatusList": { + "base": null, + "refs": { + "DescribeInstanceStatusResult$InstanceStatuses": "

    One or more instance status descriptions.

    " + } + }, + "InstanceStatusSummary": { + "base": "

    Describes the status of an instance.

    ", + "refs": { + "InstanceStatus$SystemStatus": "

    Reports impaired functionality that stems from issues related to the systems that support an instance, such as hardware failures and network connectivity problems.

    ", + "InstanceStatus$InstanceStatus": "

    Reports impaired functionality that stems from issues internal to the instance, such as impaired reachability.

    " + } + }, + "InstanceType": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsRequest$InstanceType": "

    The instance type that the reservation will cover (for example, m1.small). For more information, see Instance Types in the Amazon Elastic Compute Cloud User Guide.

    ", + "ImportInstanceLaunchSpecification$InstanceType": "

    The instance type. For more information about the instance types that you can import, see Before You Get Started in the Amazon Elastic Compute Cloud User Guide.

    ", + "Instance$InstanceType": "

    The instance type.

    ", + "InstanceTypeList$member": null, + "LaunchSpecification$InstanceType": "

    The instance type.

    ", + "RequestSpotLaunchSpecification$InstanceType": "

    The instance type.

    ", + "ReservedInstances$InstanceType": "

    The instance type on which the Reserved Instance can be used.

    ", + "ReservedInstancesConfiguration$InstanceType": "

    The instance type for the modified Reserved Instances.

    ", + "ReservedInstancesOffering$InstanceType": "

    The instance type on which the Reserved Instance can be used.

    ", + "RunInstancesRequest$InstanceType": "

    The instance type. For more information, see Instance Types in the Amazon Elastic Compute Cloud User Guide.

    Default: m1.small

    ", + "SpotFleetLaunchSpecification$InstanceType": "

    The instance type.

    ", + "SpotPrice$InstanceType": "

    The instance type.

    " + } + }, + "InstanceTypeList": { + "base": null, + "refs": { + "DescribeSpotPriceHistoryRequest$InstanceTypes": "

    Filters the results by the specified instance types.

    " + } + }, + "Integer": { + "base": null, + "refs": { + "AllocateHostsRequest$Quantity": "

    The number of Dedicated hosts you want to allocate to your account with these parameters.

    ", + "AssignPrivateIpAddressesRequest$SecondaryPrivateIpAddressCount": "

    The number of secondary IP addresses to assign to the network interface. You can't specify this parameter when also specifying private IP addresses.

    ", + "AttachNetworkInterfaceRequest$DeviceIndex": "

    The index of the device for the network interface attachment.

    ", + "AuthorizeSecurityGroupEgressRequest$FromPort": "

    The start of port range for the TCP and UDP protocols, or an ICMP type number. We recommend that you specify the port range in a set of IP permissions instead.

    ", + "AuthorizeSecurityGroupEgressRequest$ToPort": "

    The end of port range for the TCP and UDP protocols, or an ICMP type number. We recommend that you specify the port range in a set of IP permissions instead.

    ", + "AuthorizeSecurityGroupIngressRequest$FromPort": "

    The start of port range for the TCP and UDP protocols, or an ICMP type number. For the ICMP type number, use -1 to specify all ICMP types.

    ", + "AuthorizeSecurityGroupIngressRequest$ToPort": "

    The end of port range for the TCP and UDP protocols, or an ICMP code number. For the ICMP code number, use -1 to specify all ICMP codes for the ICMP type.

    ", + "AvailableCapacity$AvailableVCpus": "

    The number of vCPUs available on the Dedicated host.

    ", + "CreateCustomerGatewayRequest$BgpAsn": "

    For devices that support BGP, the customer gateway's BGP ASN.

    Default: 65000

    ", + "CreateNetworkAclEntryRequest$RuleNumber": "

    The rule number for the entry (for example, 100). ACL entries are processed in ascending order by rule number.

    Constraints: Positive integer from 1 to 32766

    ", + "CreateNetworkInterfaceRequest$SecondaryPrivateIpAddressCount": "

    The number of secondary private IP addresses to assign to a network interface. When you specify a number of secondary IP addresses, Amazon EC2 selects these IP addresses within the subnet range. You can't specify this option and specify more than one private IP address using privateIpAddresses.

    The number of IP addresses you can assign to a network interface varies by instance type. For more information, see Private IP Addresses Per ENI Per Instance Type in the Amazon Elastic Compute Cloud User Guide.

    ", + "CreateReservedInstancesListingRequest$InstanceCount": "

    The number of instances that are a part of a Reserved Instance account to be listed in the Reserved Instance Marketplace. This number should be less than or equal to the instance count associated with the Reserved Instance ID specified in this call.

    ", + "CreateVolumeRequest$Size": "

    The size of the volume, in GiBs.

    Constraints: 1-1024 for standard volumes, 1-16384 for gp2 volumes, and 4-16384 for io1 volumes. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.

    Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.

    ", + "CreateVolumeRequest$Iops": "

    Only valid for Provisioned IOPS (SSD) volumes. The number of I/O operations per second (IOPS) to provision for the volume, with a maximum ratio of 30 IOPS/GiB.

    Constraint: Range is 100 to 20000 for Provisioned IOPS (SSD) volumes

    ", + "DeleteNetworkAclEntryRequest$RuleNumber": "

    The rule number of the entry to delete.

    ", + "DescribeClassicLinkInstancesRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. You cannot specify this parameter and the instance IDs parameter in the same request.

    Constraint: If the value is greater than 1000, we return only 1000 items.

    ", + "DescribeFlowLogsRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. You cannot specify this parameter and the flow log IDs parameter in the same request.

    ", + "DescribeHostsRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken value. This value can be between 5 and 500; if maxResults is given a larger value than 500, you will receive an error. You cannot specify this parameter and the host IDs parameter in the same request.

    ", + "DescribeImportImageTasksRequest$MaxResults": "

    The maximum number of results to return in a single request.

    ", + "DescribeImportSnapshotTasksRequest$MaxResults": "

    The maximum number of results to return in a single request.

    ", + "DescribeInstanceStatusRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. You cannot specify this parameter and the instance IDs parameter in the same request.

    ", + "DescribeInstancesRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. You cannot specify this parameter and the instance IDs parameter in the same request.

    ", + "DescribeMovingAddressesRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value outside of this range, an error is returned.

    Default: If no value is provided, the default is 1000.

    ", + "DescribeNatGatewaysRequest$MaxResults": "

    The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

    Constraint: If the value specified is greater than 1000, we return only 1000 items.

    ", + "DescribePrefixListsRequest$MaxResults": "

    The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

    Constraint: If the value specified is greater than 1000, we return only 1000 items.

    ", + "DescribeReservedInstancesOfferingsRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. The maximum is 100.

    Default: 100

    ", + "DescribeReservedInstancesOfferingsRequest$MaxInstanceCount": "

    The maximum number of instances to filter when searching for offerings.

    Default: 20

    ", + "DescribeScheduledInstanceAvailabilityRequest$MinSlotDurationInHours": "

    The minimum available duration, in hours. The minimum required duration is 1,200 hours per year. For example, the minimum daily schedule is 4 hours, the minimum weekly schedule is 24 hours, and the minimum monthly schedule is 100 hours.

    ", + "DescribeScheduledInstanceAvailabilityRequest$MaxSlotDurationInHours": "

    The maximum available duration, in hours. This value must be greater than MinSlotDurationInHours and less than 1,720.

    ", + "DescribeScheduledInstanceAvailabilityRequest$MaxResults": "

    The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeScheduledInstancesRequest$MaxResults": "

    The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeSnapshotsRequest$MaxResults": "

    The maximum number of snapshot results returned by DescribeSnapshots in paginated output. When this parameter is used, DescribeSnapshots only returns MaxResults results in a single page along with a NextToken response element. The remaining results of the initial request can be seen by sending another DescribeSnapshots request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. If this parameter is not used, then DescribeSnapshots returns all results. You cannot specify this parameter and the snapshot IDs parameter in the same request.

    ", + "DescribeSpotFleetInstancesRequest$MaxResults": "

    The maximum number of results to return in a single call. Specify a value between 1 and 1000. The default value is 1000. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeSpotFleetRequestHistoryRequest$MaxResults": "

    The maximum number of results to return in a single call. Specify a value between 1 and 1000. The default value is 1000. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeSpotFleetRequestsRequest$MaxResults": "

    The maximum number of results to return in a single call. Specify a value between 1 and 1000. The default value is 1000. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeSpotPriceHistoryRequest$MaxResults": "

    The maximum number of results to return in a single call. Specify a value between 1 and 1000. The default value is 1000. To retrieve the remaining results, make another call with the returned NextToken value.

    ", + "DescribeTagsRequest$MaxResults": "

    The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned.

    ", + "DescribeVolumeStatusRequest$MaxResults": "

    The maximum number of volume results returned by DescribeVolumeStatus in paginated output. When this parameter is used, the request only returns MaxResults results in a single page along with a NextToken response element. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. If this parameter is not used, then DescribeVolumeStatus returns all results. You cannot specify this parameter and the volume IDs parameter in the same request.

    ", + "DescribeVolumesRequest$MaxResults": "

    The maximum number of volume results returned by DescribeVolumes in paginated output. When this parameter is used, DescribeVolumes only returns MaxResults results in a single page along with a NextToken response element. The remaining results of the initial request can be seen by sending another DescribeVolumes request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. If this parameter is not used, then DescribeVolumes returns all results. You cannot specify this parameter and the volume IDs parameter in the same request.

    ", + "DescribeVpcEndpointServicesRequest$MaxResults": "

    The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

    Constraint: If the value is greater than 1000, we return only 1000 items.

    ", + "DescribeVpcEndpointsRequest$MaxResults": "

    The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

    Constraint: If the value is greater than 1000, we return only 1000 items.

    ", + "EbsBlockDevice$VolumeSize": "

    The size of the volume, in GiB.

    Constraints: 1-1024 for standard volumes, 1-16384 for gp2 volumes, and 4-16384 for io1 volumes. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.

    Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.

    ", + "EbsBlockDevice$Iops": "

    The number of I/O operations per second (IOPS) that the volume supports. For Provisioned IOPS (SSD) volumes, this represents the number of IOPS that are provisioned for the volume. For General Purpose (SSD) volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information on General Purpose (SSD) baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

    Constraint: Range is 100 to 20000 for Provisioned IOPS (SSD) volumes and 3 to 10000 for General Purpose (SSD) volumes.

    Condition: This parameter is required for requests to create io1 volumes; it is not used in requests to create standard or gp2 volumes.

    ", + "HostProperties$Sockets": "

    The number of sockets on the Dedicated host.

    ", + "HostProperties$Cores": "

    The number of cores on the Dedicated host.

    ", + "HostProperties$TotalVCpus": "

    The number of vCPUs on the Dedicated host.

    ", + "IcmpTypeCode$Type": "

    The ICMP code. A value of -1 means all codes for the specified ICMP type.

    ", + "IcmpTypeCode$Code": "

    The ICMP type. A value of -1 means all types.

    ", + "Instance$AmiLaunchIndex": "

    The AMI launch index, which can be used to find this instance in the launch group.

    ", + "InstanceCapacity$AvailableCapacity": "

    The number of instances that can still be launched onto the Dedicated host.

    ", + "InstanceCapacity$TotalCapacity": "

    The total number of instances that can be launched onto the Dedicated host.

    ", + "InstanceCount$InstanceCount": "

    The number of listed Reserved Instances in the state specified by the state.

    ", + "InstanceNetworkInterfaceAttachment$DeviceIndex": "

    The index of the device on the instance for the network interface attachment.

    ", + "InstanceNetworkInterfaceSpecification$DeviceIndex": "

    The index of the device on the instance for the network interface attachment. If you are specifying a network interface in a RunInstances request, you must provide the device index.

    ", + "InstanceNetworkInterfaceSpecification$SecondaryPrivateIpAddressCount": "

    The number of secondary private IP addresses. You can't specify this option and specify more than one private IP address using the private IP addresses option.

    ", + "InstanceState$Code": "

    The low byte represents the state. The high byte is an opaque internal value and should be ignored.

    • 0 : pending

    • 16 : running

    • 32 : shutting-down

    • 48 : terminated

    • 64 : stopping

    • 80 : stopped

    ", + "IpPermission$FromPort": "

    The start of port range for the TCP and UDP protocols, or an ICMP type number. A value of -1 indicates all ICMP types.

    ", + "IpPermission$ToPort": "

    The end of port range for the TCP and UDP protocols, or an ICMP code. A value of -1 indicates all ICMP codes for the specified ICMP type.

    ", + "ModifySpotFleetRequestRequest$TargetCapacity": "

    The size of the fleet.

    ", + "NetworkAclEntry$RuleNumber": "

    The rule number for the entry. ACL entries are processed in ascending order by rule number.

    ", + "NetworkInterfaceAttachment$DeviceIndex": "

    The device index of the network interface attachment on the instance.

    ", + "OccurrenceDayRequestSet$member": null, + "OccurrenceDaySet$member": null, + "PortRange$From": "

    The first port in the range.

    ", + "PortRange$To": "

    The last port in the range.

    ", + "PricingDetail$Count": "

    The number of reservations available for the price.

    ", + "PurchaseRequest$InstanceCount": "

    The number of instances.

    ", + "PurchaseReservedInstancesOfferingRequest$InstanceCount": "

    The number of Reserved Instances to purchase.

    ", + "ReplaceNetworkAclEntryRequest$RuleNumber": "

    The rule number of the entry to replace.

    ", + "RequestSpotInstancesRequest$InstanceCount": "

    The maximum number of Spot instances to launch.

    Default: 1

    ", + "RequestSpotInstancesRequest$BlockDurationMinutes": "

    The required duration for the Spot instances, in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360).

    The duration period starts as soon as your Spot instance receives its instance ID. At the end of the duration period, Amazon EC2 marks the Spot instance for termination and provides a Spot instance termination notice, which gives the instance a two-minute warning before it terminates.

    Note that you can't specify an Availability Zone group or a launch group if you specify a duration.

    ", + "ReservedInstances$InstanceCount": "

    The number of reservations purchased.

    ", + "ReservedInstancesConfiguration$InstanceCount": "

    The number of modified Reserved Instances.

    ", + "RevokeSecurityGroupEgressRequest$FromPort": "

    The start of port range for the TCP and UDP protocols, or an ICMP type number. We recommend that you specify the port range in a set of IP permissions instead.

    ", + "RevokeSecurityGroupEgressRequest$ToPort": "

    The end of port range for the TCP and UDP protocols, or an ICMP type number. We recommend that you specify the port range in a set of IP permissions instead.

    ", + "RevokeSecurityGroupIngressRequest$FromPort": "

    The start of port range for the TCP and UDP protocols, or an ICMP type number. For the ICMP type number, use -1 to specify all ICMP types.

    ", + "RevokeSecurityGroupIngressRequest$ToPort": "

    The end of port range for the TCP and UDP protocols, or an ICMP code number. For the ICMP code number, use -1 to specify all ICMP codes for the ICMP type.

    ", + "RunInstancesRequest$MinCount": "

    The minimum number of instances to launch. If you specify a minimum that is more instances than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches no instances.

    Constraints: Between 1 and the maximum number you're allowed for the specified instance type. For more information about the default limits, and how to request an increase, see How many instances can I run in Amazon EC2 in the Amazon EC2 General FAQ.

    ", + "RunInstancesRequest$MaxCount": "

    The maximum number of instances to launch. If you specify more instances than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches the largest possible number of instances above MinCount.

    Constraints: Between 1 and the maximum number you're allowed for the specified instance type. For more information about the default limits, and how to request an increase, see How many instances can I run in Amazon EC2 in the Amazon EC2 General FAQ.

    ", + "RunScheduledInstancesRequest$InstanceCount": "

    The number of instances.

    Default: 1

    ", + "ScheduledInstance$SlotDurationInHours": "

    The number of hours in the schedule.

    ", + "ScheduledInstance$TotalScheduledInstanceHours": "

    The total number of hours for a single instance for the entire term.

    ", + "ScheduledInstance$InstanceCount": "

    The number of instances.

    ", + "ScheduledInstanceAvailability$SlotDurationInHours": "

    The number of hours in the schedule.

    ", + "ScheduledInstanceAvailability$TotalScheduledInstanceHours": "

    The total number of hours for a single instance for the entire term.

    ", + "ScheduledInstanceAvailability$AvailableInstanceCount": "

    The number of available instances.

    ", + "ScheduledInstanceAvailability$MinTermDurationInDays": "

    The minimum term. The only possible value is 365 days.

    ", + "ScheduledInstanceAvailability$MaxTermDurationInDays": "

    The maximum term. The only possible value is 365 days.

    ", + "ScheduledInstanceRecurrence$Interval": "

    The interval quantity. The interval unit depends on the value of frequency. For example, every 2 weeks or every 2 months.

    ", + "ScheduledInstanceRecurrenceRequest$Interval": "

    The interval quantity. The interval unit depends on the value of Frequency. For example, every 2 weeks or every 2 months.

    ", + "ScheduledInstancesEbs$VolumeSize": "

    The size of the volume, in GiB.

    Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.

    ", + "ScheduledInstancesEbs$Iops": "

    The number of I/O operations per second (IOPS) that the volume supports. For Provisioned IOPS (SSD) volumes, this represents the number of IOPS that are provisioned for the volume. For General Purpose (SSD) volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information about General Purpose (SSD) baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

    Constraint: Range is 100 to 20000 for Provisioned IOPS (SSD) volumes and 3 to 10000 for General Purpose (SSD) volumes.

    Condition: This parameter is required for requests to create io1 volumes; it is not used in requests to create standard or gp2 volumes.

    ", + "ScheduledInstancesNetworkInterface$DeviceIndex": "

    The index of the device for the network interface attachment.

    ", + "ScheduledInstancesNetworkInterface$SecondaryPrivateIpAddressCount": "

    The number of secondary private IP addresses.

    ", + "Snapshot$VolumeSize": "

    The size of the volume, in GiB.

    ", + "SpotFleetRequestConfigData$TargetCapacity": "

    The number of units to request. You can choose to set the target capacity in terms of instances or a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O.

    ", + "SpotInstanceRequest$BlockDurationMinutes": "

    The duration for the Spot instance, in minutes.

    ", + "Subnet$AvailableIpAddressCount": "

    The number of unused IP addresses in the subnet. Note that the IP addresses for any stopped instances are considered unavailable.

    ", + "VgwTelemetry$AcceptedRouteCount": "

    The number of accepted routes.

    ", + "Volume$Size": "

    The size of the volume, in GiBs.

    ", + "Volume$Iops": "

    The number of I/O operations per second (IOPS) that the volume supports. For Provisioned IOPS (SSD) volumes, this represents the number of IOPS that are provisioned for the volume. For General Purpose (SSD) volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information on General Purpose (SSD) baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

    Constraint: Range is 100 to 20000 for Provisioned IOPS (SSD) volumes and 3 to 10000 for General Purpose (SSD) volumes.

    Condition: This parameter is required for requests to create io1 volumes; it is not used in requests to create standard or gp2 volumes.

    " + } + }, + "InternetGateway": { + "base": "

    Describes an Internet gateway.

    ", + "refs": { + "CreateInternetGatewayResult$InternetGateway": "

    Information about the Internet gateway.

    ", + "InternetGatewayList$member": null + } + }, + "InternetGatewayAttachment": { + "base": "

    Describes the attachment of a VPC to an Internet gateway.

    ", + "refs": { + "InternetGatewayAttachmentList$member": null + } + }, + "InternetGatewayAttachmentList": { + "base": null, + "refs": { + "InternetGateway$Attachments": "

    Any VPCs attached to the Internet gateway.

    " + } + }, + "InternetGatewayList": { + "base": null, + "refs": { + "DescribeInternetGatewaysResult$InternetGateways": "

    Information about one or more Internet gateways.

    " + } + }, + "IpPermission": { + "base": "

    Describes a security group rule.

    ", + "refs": { + "IpPermissionList$member": null + } + }, + "IpPermissionList": { + "base": null, + "refs": { + "AuthorizeSecurityGroupEgressRequest$IpPermissions": "

    A set of IP permissions. You can't specify a destination security group and a CIDR IP address range.

    ", + "AuthorizeSecurityGroupIngressRequest$IpPermissions": "

    A set of IP permissions. Can be used to specify multiple rules in a single command.

    ", + "RevokeSecurityGroupEgressRequest$IpPermissions": "

    A set of IP permissions. You can't specify a destination security group and a CIDR IP address range.

    ", + "RevokeSecurityGroupIngressRequest$IpPermissions": "

    A set of IP permissions. You can't specify a source security group and a CIDR IP address range.

    ", + "SecurityGroup$IpPermissions": "

    One or more inbound rules associated with the security group.

    ", + "SecurityGroup$IpPermissionsEgress": "

    [EC2-VPC] One or more outbound rules associated with the security group.

    " + } + }, + "IpRange": { + "base": "

    Describes an IP range.

    ", + "refs": { + "IpRangeList$member": null + } + }, + "IpRangeList": { + "base": null, + "refs": { + "IpPermission$IpRanges": "

    One or more IP ranges.

    " + } + }, + "KeyNameStringList": { + "base": null, + "refs": { + "DescribeKeyPairsRequest$KeyNames": "

    One or more key pair names.

    Default: Describes all your key pairs.

    " + } + }, + "KeyPair": { + "base": "

    Describes a key pair.

    ", + "refs": { + } + }, + "KeyPairInfo": { + "base": "

    Describes a key pair.

    ", + "refs": { + "KeyPairList$member": null + } + }, + "KeyPairList": { + "base": null, + "refs": { + "DescribeKeyPairsResult$KeyPairs": "

    Information about one or more key pairs.

    " + } + }, + "LaunchPermission": { + "base": "

    Describes a launch permission.

    ", + "refs": { + "LaunchPermissionList$member": null + } + }, + "LaunchPermissionList": { + "base": null, + "refs": { + "ImageAttribute$LaunchPermissions": "

    One or more launch permissions.

    ", + "LaunchPermissionModifications$Add": "

    The AWS account ID to add to the list of launch permissions for the AMI.

    ", + "LaunchPermissionModifications$Remove": "

    The AWS account ID to remove from the list of launch permissions for the AMI.

    " + } + }, + "LaunchPermissionModifications": { + "base": "

    Describes a launch permission modification.

    ", + "refs": { + "ModifyImageAttributeRequest$LaunchPermission": "

    A launch permission modification.

    " + } + }, + "LaunchSpecification": { + "base": "

    Describes the launch specification for an instance.

    ", + "refs": { + "SpotInstanceRequest$LaunchSpecification": "

    Additional information for launching instances.

    " + } + }, + "LaunchSpecsList": { + "base": null, + "refs": { + "SpotFleetRequestConfigData$LaunchSpecifications": "

    Information about the launch specifications for the Spot fleet request.

    " + } + }, + "ListingState": { + "base": null, + "refs": { + "InstanceCount$State": "

    The states of the listed Reserved Instances.

    " + } + }, + "ListingStatus": { + "base": null, + "refs": { + "ReservedInstancesListing$Status": "

    The status of the Reserved Instance listing.

    " + } + }, + "Long": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsRequest$MinDuration": "

    The minimum duration (in seconds) to filter when searching for offerings.

    Default: 2592000 (1 month)

    ", + "DescribeReservedInstancesOfferingsRequest$MaxDuration": "

    The maximum duration (in seconds) to filter when searching for offerings.

    Default: 94608000 (3 years)

    ", + "DiskImageDescription$Size": "

    The size of the disk image, in GiB.

    ", + "DiskImageDetail$Bytes": "

    The size of the disk image, in GiB.

    ", + "DiskImageVolumeDescription$Size": "

    The size of the volume, in GiB.

    ", + "ImportInstanceVolumeDetailItem$BytesConverted": "

    The number of bytes converted so far.

    ", + "ImportVolumeTaskDetails$BytesConverted": "

    The number of bytes converted so far.

    ", + "PriceSchedule$Term": "

    The number of months remaining in the reservation. For example, 2 is the second to the last month before the capacity reservation expires.

    ", + "PriceScheduleSpecification$Term": "

    The number of months remaining in the reservation. For example, 2 is the second to the last month before the capacity reservation expires.

    ", + "ReservedInstances$Duration": "

    The duration of the Reserved Instance, in seconds.

    ", + "ReservedInstancesOffering$Duration": "

    The duration of the Reserved Instance, in seconds.

    ", + "VolumeDetail$Size": "

    The size of the volume, in GiB.

    " + } + }, + "MaxResults": { + "base": null, + "refs": { + "DescribeVpcClassicLinkDnsSupportRequest$MaxResults": "

    The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

    " + } + }, + "ModifyHostsRequest": { + "base": null, + "refs": { + } + }, + "ModifyHostsResult": { + "base": null, + "refs": { + } + }, + "ModifyIdFormatRequest": { + "base": null, + "refs": { + } + }, + "ModifyImageAttributeRequest": { + "base": null, + "refs": { + } + }, + "ModifyInstanceAttributeRequest": { + "base": null, + "refs": { + } + }, + "ModifyInstancePlacementRequest": { + "base": null, + "refs": { + } + }, + "ModifyInstancePlacementResult": { + "base": null, + "refs": { + } + }, + "ModifyNetworkInterfaceAttributeRequest": { + "base": null, + "refs": { + } + }, + "ModifyReservedInstancesRequest": { + "base": null, + "refs": { + } + }, + "ModifyReservedInstancesResult": { + "base": null, + "refs": { + } + }, + "ModifySnapshotAttributeRequest": { + "base": null, + "refs": { + } + }, + "ModifySpotFleetRequestRequest": { + "base": "

    Contains the parameters for ModifySpotFleetRequest.

    ", + "refs": { + } + }, + "ModifySpotFleetRequestResponse": { + "base": "

    Contains the output of ModifySpotFleetRequest.

    ", + "refs": { + } + }, + "ModifySubnetAttributeRequest": { + "base": null, + "refs": { + } + }, + "ModifyVolumeAttributeRequest": { + "base": null, + "refs": { + } + }, + "ModifyVpcAttributeRequest": { + "base": null, + "refs": { + } + }, + "ModifyVpcEndpointRequest": { + "base": null, + "refs": { + } + }, + "ModifyVpcEndpointResult": { + "base": null, + "refs": { + } + }, + "MonitorInstancesRequest": { + "base": null, + "refs": { + } + }, + "MonitorInstancesResult": { + "base": null, + "refs": { + } + }, + "Monitoring": { + "base": "

    Describes the monitoring for the instance.

    ", + "refs": { + "Instance$Monitoring": "

    The monitoring information for the instance.

    ", + "InstanceMonitoring$Monitoring": "

    The monitoring information.

    " + } + }, + "MonitoringState": { + "base": null, + "refs": { + "Monitoring$State": "

    Indicates whether monitoring is enabled for the instance.

    " + } + }, + "MoveAddressToVpcRequest": { + "base": null, + "refs": { + } + }, + "MoveAddressToVpcResult": { + "base": null, + "refs": { + } + }, + "MoveStatus": { + "base": null, + "refs": { + "MovingAddressStatus$MoveStatus": "

    The status of the Elastic IP address that's being moved to the EC2-VPC platform, or restored to the EC2-Classic platform.

    " + } + }, + "MovingAddressStatus": { + "base": "

    Describes the status of a moving Elastic IP address.

    ", + "refs": { + "MovingAddressStatusSet$member": null + } + }, + "MovingAddressStatusSet": { + "base": null, + "refs": { + "DescribeMovingAddressesResult$MovingAddressStatuses": "

    The status for each Elastic IP address.

    " + } + }, + "NatGateway": { + "base": "

    Describes a NAT gateway.

    ", + "refs": { + "CreateNatGatewayResult$NatGateway": "

    Information about the NAT gateway.

    ", + "NatGatewayList$member": null + } + }, + "NatGatewayAddress": { + "base": "

    Describes the IP addresses and network interface associated with a NAT gateway.

    ", + "refs": { + "NatGatewayAddressList$member": null + } + }, + "NatGatewayAddressList": { + "base": null, + "refs": { + "NatGateway$NatGatewayAddresses": "

    Information about the IP addresses and network interface associated with the NAT gateway.

    " + } + }, + "NatGatewayList": { + "base": null, + "refs": { + "DescribeNatGatewaysResult$NatGateways": "

    Information about the NAT gateways.

    " + } + }, + "NatGatewayState": { + "base": null, + "refs": { + "NatGateway$State": "

    The state of the NAT gateway.

    " + } + }, + "NetworkAcl": { + "base": "

    Describes a network ACL.

    ", + "refs": { + "CreateNetworkAclResult$NetworkAcl": "

    Information about the network ACL.

    ", + "NetworkAclList$member": null + } + }, + "NetworkAclAssociation": { + "base": "

    Describes an association between a network ACL and a subnet.

    ", + "refs": { + "NetworkAclAssociationList$member": null + } + }, + "NetworkAclAssociationList": { + "base": null, + "refs": { + "NetworkAcl$Associations": "

    Any associations between the network ACL and one or more subnets

    " + } + }, + "NetworkAclEntry": { + "base": "

    Describes an entry in a network ACL.

    ", + "refs": { + "NetworkAclEntryList$member": null + } + }, + "NetworkAclEntryList": { + "base": null, + "refs": { + "NetworkAcl$Entries": "

    One or more entries (rules) in the network ACL.

    " + } + }, + "NetworkAclList": { + "base": null, + "refs": { + "DescribeNetworkAclsResult$NetworkAcls": "

    Information about one or more network ACLs.

    " + } + }, + "NetworkInterface": { + "base": "

    Describes a network interface.

    ", + "refs": { + "CreateNetworkInterfaceResult$NetworkInterface": "

    Information about the network interface.

    ", + "NetworkInterfaceList$member": null + } + }, + "NetworkInterfaceAssociation": { + "base": "

    Describes association information for an Elastic IP address.

    ", + "refs": { + "NetworkInterface$Association": "

    The association information for an Elastic IP associated with the network interface.

    ", + "NetworkInterfacePrivateIpAddress$Association": "

    The association information for an Elastic IP address associated with the network interface.

    " + } + }, + "NetworkInterfaceAttachment": { + "base": "

    Describes a network interface attachment.

    ", + "refs": { + "DescribeNetworkInterfaceAttributeResult$Attachment": "

    The attachment (if any) of the network interface.

    ", + "NetworkInterface$Attachment": "

    The network interface attachment.

    " + } + }, + "NetworkInterfaceAttachmentChanges": { + "base": "

    Describes an attachment change.

    ", + "refs": { + "ModifyNetworkInterfaceAttributeRequest$Attachment": "

    Information about the interface attachment. If modifying the 'delete on termination' attribute, you must specify the ID of the interface attachment.

    " + } + }, + "NetworkInterfaceAttribute": { + "base": null, + "refs": { + "DescribeNetworkInterfaceAttributeRequest$Attribute": "

    The attribute of the network interface.

    " + } + }, + "NetworkInterfaceIdList": { + "base": null, + "refs": { + "DescribeNetworkInterfacesRequest$NetworkInterfaceIds": "

    One or more network interface IDs.

    Default: Describes all your network interfaces.

    " + } + }, + "NetworkInterfaceList": { + "base": null, + "refs": { + "DescribeNetworkInterfacesResult$NetworkInterfaces": "

    Information about one or more network interfaces.

    " + } + }, + "NetworkInterfacePrivateIpAddress": { + "base": "

    Describes the private IP address of a network interface.

    ", + "refs": { + "NetworkInterfacePrivateIpAddressList$member": null + } + }, + "NetworkInterfacePrivateIpAddressList": { + "base": null, + "refs": { + "NetworkInterface$PrivateIpAddresses": "

    The private IP addresses associated with the network interface.

    " + } + }, + "NetworkInterfaceStatus": { + "base": null, + "refs": { + "InstanceNetworkInterface$Status": "

    The status of the network interface.

    ", + "NetworkInterface$Status": "

    The status of the network interface.

    " + } + }, + "NetworkInterfaceType": { + "base": null, + "refs": { + "NetworkInterface$InterfaceType": "

    The type of interface.

    " + } + }, + "NewDhcpConfiguration": { + "base": null, + "refs": { + "NewDhcpConfigurationList$member": null + } + }, + "NewDhcpConfigurationList": { + "base": null, + "refs": { + "CreateDhcpOptionsRequest$DhcpConfigurations": "

    A DHCP configuration option.

    " + } + }, + "NextToken": { + "base": null, + "refs": { + "DescribeVpcClassicLinkDnsSupportRequest$NextToken": "

    The token for the next set of items to return. (You received this token from a prior call.)

    ", + "DescribeVpcClassicLinkDnsSupportResult$NextToken": "

    The token to use when requesting the next set of items.

    " + } + }, + "OccurrenceDayRequestSet": { + "base": null, + "refs": { + "ScheduledInstanceRecurrenceRequest$OccurrenceDays": "

    The days. For a monthly schedule, this is one or more days of the month (1-31). For a weekly schedule, this is one or more days of the week (1-7, where 1 is Sunday). You can't specify this value with a daily schedule. If the occurrence is relative to the end of the month, you can specify only a single day.

    " + } + }, + "OccurrenceDaySet": { + "base": null, + "refs": { + "ScheduledInstanceRecurrence$OccurrenceDaySet": "

    The days. For a monthly schedule, this is one or more days of the month (1-31). For a weekly schedule, this is one or more days of the week (1-7, where 1 is Sunday).

    " + } + }, + "OfferingTypeValues": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsRequest$OfferingType": "

    The Reserved Instance offering type. If you are using tools that predate the 2011-11-01 API version, you only have access to the Medium Utilization Reserved Instance offering type.

    ", + "DescribeReservedInstancesRequest$OfferingType": "

    The Reserved Instance offering type. If you are using tools that predate the 2011-11-01 API version, you only have access to the Medium Utilization Reserved Instance offering type.

    ", + "ReservedInstances$OfferingType": "

    The Reserved Instance offering type.

    ", + "ReservedInstancesOffering$OfferingType": "

    The Reserved Instance offering type.

    " + } + }, + "OperationType": { + "base": null, + "refs": { + "ModifyImageAttributeRequest$OperationType": "

    The operation type.

    ", + "ModifySnapshotAttributeRequest$OperationType": "

    The type of operation to perform to the attribute.

    " + } + }, + "OwnerStringList": { + "base": null, + "refs": { + "DescribeImagesRequest$Owners": "

    Filters the images by the owner. Specify an AWS account ID, amazon (owner is Amazon), aws-marketplace (owner is AWS Marketplace), self (owner is the sender of the request). Omitting this option returns all images for which you have launch permissions, regardless of ownership.

    ", + "DescribeSnapshotsRequest$OwnerIds": "

    Returns the snapshots owned by the specified owner. Multiple owners can be specified.

    " + } + }, + "PermissionGroup": { + "base": null, + "refs": { + "CreateVolumePermission$Group": "

    The specific group that is to be added or removed from a volume's list of create volume permissions.

    ", + "LaunchPermission$Group": "

    The name of the group.

    " + } + }, + "Placement": { + "base": "

    Describes the placement for the instance.

    ", + "refs": { + "ImportInstanceLaunchSpecification$Placement": "

    The placement information for the instance.

    ", + "Instance$Placement": "

    The location where the instance launched, if applicable.

    ", + "RunInstancesRequest$Placement": "

    The placement for the instance.

    " + } + }, + "PlacementGroup": { + "base": "

    Describes a placement group.

    ", + "refs": { + "PlacementGroupList$member": null + } + }, + "PlacementGroupList": { + "base": null, + "refs": { + "DescribePlacementGroupsResult$PlacementGroups": "

    One or more placement groups.

    " + } + }, + "PlacementGroupState": { + "base": null, + "refs": { + "PlacementGroup$State": "

    The state of the placement group.

    " + } + }, + "PlacementGroupStringList": { + "base": null, + "refs": { + "DescribePlacementGroupsRequest$GroupNames": "

    One or more placement group names.

    Default: Describes all your placement groups, or only those otherwise specified.

    " + } + }, + "PlacementStrategy": { + "base": null, + "refs": { + "CreatePlacementGroupRequest$Strategy": "

    The placement strategy.

    ", + "PlacementGroup$Strategy": "

    The placement strategy.

    " + } + }, + "PlatformValues": { + "base": null, + "refs": { + "Image$Platform": "

    The value is Windows for Windows AMIs; otherwise blank.

    ", + "ImportInstanceRequest$Platform": "

    The instance operating system.

    ", + "ImportInstanceTaskDetails$Platform": "

    The instance operating system.

    ", + "Instance$Platform": "

    The value is Windows for Windows instances; otherwise blank.

    " + } + }, + "PortRange": { + "base": "

    Describes a range of ports.

    ", + "refs": { + "CreateNetworkAclEntryRequest$PortRange": "

    TCP or UDP protocols: The range of ports the rule applies to.

    ", + "NetworkAclEntry$PortRange": "

    TCP or UDP protocols: The range of ports the rule applies to.

    ", + "ReplaceNetworkAclEntryRequest$PortRange": "

    TCP or UDP protocols: The range of ports the rule applies to. Required if specifying 6 (TCP) or 17 (UDP) for the protocol.

    " + } + }, + "PrefixList": { + "base": "

    Describes prefixes for AWS services.

    ", + "refs": { + "PrefixListSet$member": null + } + }, + "PrefixListId": { + "base": "

    The ID of the prefix.

    ", + "refs": { + "PrefixListIdList$member": null + } + }, + "PrefixListIdList": { + "base": null, + "refs": { + "IpPermission$PrefixListIds": "

    (Valid for AuthorizeSecurityGroupEgress, RevokeSecurityGroupEgress and DescribeSecurityGroups only) One or more prefix list IDs for an AWS service. In an AuthorizeSecurityGroupEgress request, this is the AWS service that you want to access through a VPC endpoint from instances associated with the security group.

    " + } + }, + "PrefixListSet": { + "base": null, + "refs": { + "DescribePrefixListsResult$PrefixLists": "

    All available prefix lists.

    " + } + }, + "PriceSchedule": { + "base": "

    Describes the price for a Reserved Instance.

    ", + "refs": { + "PriceScheduleList$member": null + } + }, + "PriceScheduleList": { + "base": null, + "refs": { + "ReservedInstancesListing$PriceSchedules": "

    The price of the Reserved Instance listing.

    " + } + }, + "PriceScheduleSpecification": { + "base": "

    Describes the price for a Reserved Instance.

    ", + "refs": { + "PriceScheduleSpecificationList$member": null + } + }, + "PriceScheduleSpecificationList": { + "base": null, + "refs": { + "CreateReservedInstancesListingRequest$PriceSchedules": "

    A list specifying the price of the Reserved Instance for each month remaining in the Reserved Instance term.

    " + } + }, + "PricingDetail": { + "base": "

    Describes a Reserved Instance offering.

    ", + "refs": { + "PricingDetailsList$member": null + } + }, + "PricingDetailsList": { + "base": null, + "refs": { + "ReservedInstancesOffering$PricingDetails": "

    The pricing details of the Reserved Instance offering.

    " + } + }, + "PrivateIpAddressConfigSet": { + "base": null, + "refs": { + "ScheduledInstancesNetworkInterface$PrivateIpAddressConfigs": "

    The private IP addresses.

    " + } + }, + "PrivateIpAddressSpecification": { + "base": "

    Describes a secondary private IP address for a network interface.

    ", + "refs": { + "PrivateIpAddressSpecificationList$member": null + } + }, + "PrivateIpAddressSpecificationList": { + "base": null, + "refs": { + "CreateNetworkInterfaceRequest$PrivateIpAddresses": "

    One or more private IP addresses.

    ", + "InstanceNetworkInterfaceSpecification$PrivateIpAddresses": "

    One or more private IP addresses to assign to the network interface. Only one private IP address can be designated as primary.

    " + } + }, + "PrivateIpAddressStringList": { + "base": null, + "refs": { + "AssignPrivateIpAddressesRequest$PrivateIpAddresses": "

    One or more IP addresses to be assigned as a secondary private IP address to the network interface. You can't specify this parameter when also specifying a number of secondary IP addresses.

    If you don't specify an IP address, Amazon EC2 automatically selects an IP address within the subnet range.

    ", + "UnassignPrivateIpAddressesRequest$PrivateIpAddresses": "

    The secondary private IP addresses to unassign from the network interface. You can specify this option multiple times to unassign more than one IP address.

    " + } + }, + "ProductCode": { + "base": "

    Describes a product code.

    ", + "refs": { + "ProductCodeList$member": null + } + }, + "ProductCodeList": { + "base": null, + "refs": { + "DescribeSnapshotAttributeResult$ProductCodes": "

    A list of product codes.

    ", + "DescribeVolumeAttributeResult$ProductCodes": "

    A list of product codes.

    ", + "Image$ProductCodes": "

    Any product codes associated with the AMI.

    ", + "ImageAttribute$ProductCodes": "

    One or more product codes.

    ", + "Instance$ProductCodes": "

    The product codes attached to this instance, if applicable.

    ", + "InstanceAttribute$ProductCodes": "

    A list of product codes.

    " + } + }, + "ProductCodeStringList": { + "base": null, + "refs": { + "ModifyImageAttributeRequest$ProductCodes": "

    One or more product codes. After you add a product code to an AMI, it can't be removed. This is only valid when modifying the productCodes attribute.

    " + } + }, + "ProductCodeValues": { + "base": null, + "refs": { + "ProductCode$ProductCodeType": "

    The type of product code.

    " + } + }, + "ProductDescriptionList": { + "base": null, + "refs": { + "DescribeSpotPriceHistoryRequest$ProductDescriptions": "

    Filters the results by the specified basic product descriptions.

    " + } + }, + "PropagatingVgw": { + "base": "

    Describes a virtual private gateway propagating route.

    ", + "refs": { + "PropagatingVgwList$member": null + } + }, + "PropagatingVgwList": { + "base": null, + "refs": { + "RouteTable$PropagatingVgws": "

    Any virtual private gateway (VGW) propagating routes.

    " + } + }, + "PublicIpStringList": { + "base": null, + "refs": { + "DescribeAddressesRequest$PublicIps": "

    [EC2-Classic] One or more Elastic IP addresses.

    Default: Describes all your Elastic IP addresses.

    " + } + }, + "PurchaseRequest": { + "base": "

    Describes a request to purchase Scheduled Instances.

    ", + "refs": { + "PurchaseRequestSet$member": null + } + }, + "PurchaseRequestSet": { + "base": null, + "refs": { + "PurchaseScheduledInstancesRequest$PurchaseRequests": "

    One or more purchase requests.

    " + } + }, + "PurchaseReservedInstancesOfferingRequest": { + "base": null, + "refs": { + } + }, + "PurchaseReservedInstancesOfferingResult": { + "base": null, + "refs": { + } + }, + "PurchaseScheduledInstancesRequest": { + "base": "

    Contains the parameters for PurchaseScheduledInstances.

    ", + "refs": { + } + }, + "PurchaseScheduledInstancesResult": { + "base": "

    Contains the output of PurchaseScheduledInstances.

    ", + "refs": { + } + }, + "PurchasedScheduledInstanceSet": { + "base": null, + "refs": { + "PurchaseScheduledInstancesResult$ScheduledInstanceSet": "

    Information about the Scheduled Instances.

    " + } + }, + "RIProductDescription": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsRequest$ProductDescription": "

    The Reserved Instance product platform description. Instances that include (Amazon VPC) in the description are for use with Amazon VPC.

    ", + "ReservedInstances$ProductDescription": "

    The Reserved Instance product platform description.

    ", + "ReservedInstancesOffering$ProductDescription": "

    The Reserved Instance product platform description.

    ", + "SpotInstanceRequest$ProductDescription": "

    The product description associated with the Spot instance.

    ", + "SpotPrice$ProductDescription": "

    A general description of the AMI.

    " + } + }, + "ReasonCodesList": { + "base": null, + "refs": { + "ReportInstanceStatusRequest$ReasonCodes": "

    One or more reason codes that describes the health state of your instance.

    • instance-stuck-in-state: My instance is stuck in a state.

    • unresponsive: My instance is unresponsive.

    • not-accepting-credentials: My instance is not accepting my credentials.

    • password-not-available: A password is not available for my instance.

    • performance-network: My instance is experiencing performance problems which I believe are network related.

    • performance-instance-store: My instance is experiencing performance problems which I believe are related to the instance stores.

    • performance-ebs-volume: My instance is experiencing performance problems which I believe are related to an EBS volume.

    • performance-other: My instance is experiencing performance problems.

    • other: [explain using the description parameter]

    " + } + }, + "RebootInstancesRequest": { + "base": null, + "refs": { + } + }, + "RecurringCharge": { + "base": "

    Describes a recurring charge.

    ", + "refs": { + "RecurringChargesList$member": null + } + }, + "RecurringChargeFrequency": { + "base": null, + "refs": { + "RecurringCharge$Frequency": "

    The frequency of the recurring charge.

    " + } + }, + "RecurringChargesList": { + "base": null, + "refs": { + "ReservedInstances$RecurringCharges": "

    The recurring charge tag assigned to the resource.

    ", + "ReservedInstancesOffering$RecurringCharges": "

    The recurring charge tag assigned to the resource.

    " + } + }, + "Region": { + "base": "

    Describes a region.

    ", + "refs": { + "RegionList$member": null + } + }, + "RegionList": { + "base": null, + "refs": { + "DescribeRegionsResult$Regions": "

    Information about one or more regions.

    " + } + }, + "RegionNameStringList": { + "base": null, + "refs": { + "DescribeRegionsRequest$RegionNames": "

    The names of one or more regions.

    " + } + }, + "RegisterImageRequest": { + "base": null, + "refs": { + } + }, + "RegisterImageResult": { + "base": null, + "refs": { + } + }, + "RejectVpcPeeringConnectionRequest": { + "base": null, + "refs": { + } + }, + "RejectVpcPeeringConnectionResult": { + "base": null, + "refs": { + } + }, + "ReleaseAddressRequest": { + "base": null, + "refs": { + } + }, + "ReleaseHostsRequest": { + "base": null, + "refs": { + } + }, + "ReleaseHostsResult": { + "base": null, + "refs": { + } + }, + "ReplaceNetworkAclAssociationRequest": { + "base": null, + "refs": { + } + }, + "ReplaceNetworkAclAssociationResult": { + "base": null, + "refs": { + } + }, + "ReplaceNetworkAclEntryRequest": { + "base": null, + "refs": { + } + }, + "ReplaceRouteRequest": { + "base": null, + "refs": { + } + }, + "ReplaceRouteTableAssociationRequest": { + "base": null, + "refs": { + } + }, + "ReplaceRouteTableAssociationResult": { + "base": null, + "refs": { + } + }, + "ReportInstanceReasonCodes": { + "base": null, + "refs": { + "ReasonCodesList$member": null + } + }, + "ReportInstanceStatusRequest": { + "base": null, + "refs": { + } + }, + "ReportStatusType": { + "base": null, + "refs": { + "ReportInstanceStatusRequest$Status": "

    The status of all instances listed.

    " + } + }, + "RequestHostIdList": { + "base": null, + "refs": { + "DescribeHostsRequest$HostIds": "

    The IDs of the Dedicated hosts. The IDs are used for targeted instance launches.

    ", + "ModifyHostsRequest$HostIds": "

    The host IDs of the Dedicated hosts you want to modify.

    ", + "ReleaseHostsRequest$HostIds": "

    The IDs of the Dedicated hosts you want to release.

    " + } + }, + "RequestSpotFleetRequest": { + "base": "

    Contains the parameters for RequestSpotFleet.

    ", + "refs": { + } + }, + "RequestSpotFleetResponse": { + "base": "

    Contains the output of RequestSpotFleet.

    ", + "refs": { + } + }, + "RequestSpotInstancesRequest": { + "base": "

    Contains the parameters for RequestSpotInstances.

    ", + "refs": { + } + }, + "RequestSpotInstancesResult": { + "base": "

    Contains the output of RequestSpotInstances.

    ", + "refs": { + } + }, + "RequestSpotLaunchSpecification": { + "base": "

    Describes the launch specification for an instance.

    ", + "refs": { + "RequestSpotInstancesRequest$LaunchSpecification": null + } + }, + "Reservation": { + "base": "

    Describes a reservation.

    ", + "refs": { + "ReservationList$member": null + } + }, + "ReservationList": { + "base": null, + "refs": { + "DescribeInstancesResult$Reservations": "

    Zero or more reservations.

    " + } + }, + "ReservedInstanceLimitPrice": { + "base": "

    Describes the limit price of a Reserved Instance offering.

    ", + "refs": { + "PurchaseReservedInstancesOfferingRequest$LimitPrice": "

    Specified for Reserved Instance Marketplace offerings to limit the total order and ensure that the Reserved Instances are not purchased at unexpected prices.

    " + } + }, + "ReservedInstanceState": { + "base": null, + "refs": { + "ReservedInstances$State": "

    The state of the Reserved Instance purchase.

    " + } + }, + "ReservedInstances": { + "base": "

    Describes a Reserved Instance.

    ", + "refs": { + "ReservedInstancesList$member": null + } + }, + "ReservedInstancesConfiguration": { + "base": "

    Describes the configuration settings for the modified Reserved Instances.

    ", + "refs": { + "ReservedInstancesConfigurationList$member": null, + "ReservedInstancesModificationResult$TargetConfiguration": "

    The target Reserved Instances configurations supplied as part of the modification request.

    " + } + }, + "ReservedInstancesConfigurationList": { + "base": null, + "refs": { + "ModifyReservedInstancesRequest$TargetConfigurations": "

    The configuration settings for the Reserved Instances to modify.

    " + } + }, + "ReservedInstancesId": { + "base": "

    Describes the ID of a Reserved Instance.

    ", + "refs": { + "ReservedIntancesIds$member": null + } + }, + "ReservedInstancesIdStringList": { + "base": null, + "refs": { + "DescribeReservedInstancesRequest$ReservedInstancesIds": "

    One or more Reserved Instance IDs.

    Default: Describes all your Reserved Instances, or only those otherwise specified.

    ", + "ModifyReservedInstancesRequest$ReservedInstancesIds": "

    The IDs of the Reserved Instances to modify.

    " + } + }, + "ReservedInstancesList": { + "base": null, + "refs": { + "DescribeReservedInstancesResult$ReservedInstances": "

    A list of Reserved Instances.

    " + } + }, + "ReservedInstancesListing": { + "base": "

    Describes a Reserved Instance listing.

    ", + "refs": { + "ReservedInstancesListingList$member": null + } + }, + "ReservedInstancesListingList": { + "base": null, + "refs": { + "CancelReservedInstancesListingResult$ReservedInstancesListings": "

    The Reserved Instance listing.

    ", + "CreateReservedInstancesListingResult$ReservedInstancesListings": "

    Information about the Reserved Instance listing.

    ", + "DescribeReservedInstancesListingsResult$ReservedInstancesListings": "

    Information about the Reserved Instance listing.

    " + } + }, + "ReservedInstancesModification": { + "base": "

    Describes a Reserved Instance modification.

    ", + "refs": { + "ReservedInstancesModificationList$member": null + } + }, + "ReservedInstancesModificationIdStringList": { + "base": null, + "refs": { + "DescribeReservedInstancesModificationsRequest$ReservedInstancesModificationIds": "

    IDs for the submitted modification request.

    " + } + }, + "ReservedInstancesModificationList": { + "base": null, + "refs": { + "DescribeReservedInstancesModificationsResult$ReservedInstancesModifications": "

    The Reserved Instance modification information.

    " + } + }, + "ReservedInstancesModificationResult": { + "base": null, + "refs": { + "ReservedInstancesModificationResultList$member": null + } + }, + "ReservedInstancesModificationResultList": { + "base": null, + "refs": { + "ReservedInstancesModification$ModificationResults": "

    Contains target configurations along with their corresponding new Reserved Instance IDs.

    " + } + }, + "ReservedInstancesOffering": { + "base": "

    Describes a Reserved Instance offering.

    ", + "refs": { + "ReservedInstancesOfferingList$member": null + } + }, + "ReservedInstancesOfferingIdStringList": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsRequest$ReservedInstancesOfferingIds": "

    One or more Reserved Instances offering IDs.

    " + } + }, + "ReservedInstancesOfferingList": { + "base": null, + "refs": { + "DescribeReservedInstancesOfferingsResult$ReservedInstancesOfferings": "

    A list of Reserved Instances offerings.

    " + } + }, + "ReservedIntancesIds": { + "base": null, + "refs": { + "ReservedInstancesModification$ReservedInstancesIds": "

    The IDs of one or more Reserved Instances.

    " + } + }, + "ResetImageAttributeName": { + "base": null, + "refs": { + "ResetImageAttributeRequest$Attribute": "

    The attribute to reset (currently you can only reset the launch permission attribute).

    " + } + }, + "ResetImageAttributeRequest": { + "base": null, + "refs": { + } + }, + "ResetInstanceAttributeRequest": { + "base": null, + "refs": { + } + }, + "ResetNetworkInterfaceAttributeRequest": { + "base": null, + "refs": { + } + }, + "ResetSnapshotAttributeRequest": { + "base": null, + "refs": { + } + }, + "ResourceIdList": { + "base": null, + "refs": { + "CreateTagsRequest$Resources": "

    The IDs of one or more resources to tag. For example, ami-1a2b3c4d.

    ", + "DeleteTagsRequest$Resources": "

    The ID of the resource. For example, ami-1a2b3c4d. You can specify more than one resource ID.

    " + } + }, + "ResourceType": { + "base": null, + "refs": { + "TagDescription$ResourceType": "

    The resource type.

    " + } + }, + "ResponseHostIdList": { + "base": null, + "refs": { + "AllocateHostsResult$HostIds": "

    The ID of the allocated Dedicated host. This is used when you want to launch an instance onto a specific host.

    ", + "ModifyHostsResult$Successful": "

    The IDs of the Dedicated hosts that were successfully modified.

    ", + "ReleaseHostsResult$Successful": "

    The IDs of the Dedicated hosts that were successfully released.

    " + } + }, + "RestorableByStringList": { + "base": null, + "refs": { + "DescribeSnapshotsRequest$RestorableByUserIds": "

    One or more AWS accounts IDs that can create volumes from the snapshot.

    " + } + }, + "RestoreAddressToClassicRequest": { + "base": null, + "refs": { + } + }, + "RestoreAddressToClassicResult": { + "base": null, + "refs": { + } + }, + "RevokeSecurityGroupEgressRequest": { + "base": null, + "refs": { + } + }, + "RevokeSecurityGroupIngressRequest": { + "base": null, + "refs": { + } + }, + "Route": { + "base": "

    Describes a route in a route table.

    ", + "refs": { + "RouteList$member": null + } + }, + "RouteList": { + "base": null, + "refs": { + "RouteTable$Routes": "

    The routes in the route table.

    " + } + }, + "RouteOrigin": { + "base": null, + "refs": { + "Route$Origin": "

    Describes how the route was created.

    • CreateRouteTable indicates that route was automatically created when the route table was created.
    • CreateRoute indicates that the route was manually added to the route table.
    • EnableVgwRoutePropagation indicates that the route was propagated by route propagation.
    " + } + }, + "RouteState": { + "base": null, + "refs": { + "Route$State": "

    The state of the route. The blackhole state indicates that the route's target isn't available (for example, the specified gateway isn't attached to the VPC, or the specified NAT instance has been terminated).

    " + } + }, + "RouteTable": { + "base": "

    Describes a route table.

    ", + "refs": { + "CreateRouteTableResult$RouteTable": "

    Information about the route table.

    ", + "RouteTableList$member": null + } + }, + "RouteTableAssociation": { + "base": "

    Describes an association between a route table and a subnet.

    ", + "refs": { + "RouteTableAssociationList$member": null + } + }, + "RouteTableAssociationList": { + "base": null, + "refs": { + "RouteTable$Associations": "

    The associations between the route table and one or more subnets.

    " + } + }, + "RouteTableList": { + "base": null, + "refs": { + "DescribeRouteTablesResult$RouteTables": "

    Information about one or more route tables.

    " + } + }, + "RuleAction": { + "base": null, + "refs": { + "CreateNetworkAclEntryRequest$RuleAction": "

    Indicates whether to allow or deny the traffic that matches the rule.

    ", + "NetworkAclEntry$RuleAction": "

    Indicates whether to allow or deny the traffic that matches the rule.

    ", + "ReplaceNetworkAclEntryRequest$RuleAction": "

    Indicates whether to allow or deny the traffic that matches the rule.

    " + } + }, + "RunInstancesMonitoringEnabled": { + "base": "

    Describes the monitoring for the instance.

    ", + "refs": { + "LaunchSpecification$Monitoring": null, + "RequestSpotLaunchSpecification$Monitoring": null, + "RunInstancesRequest$Monitoring": "

    The monitoring for the instance.

    " + } + }, + "RunInstancesRequest": { + "base": null, + "refs": { + } + }, + "RunScheduledInstancesRequest": { + "base": "

    Contains the parameters for RunScheduledInstances.

    ", + "refs": { + } + }, + "RunScheduledInstancesResult": { + "base": "

    Contains the output of RunScheduledInstances.

    ", + "refs": { + } + }, + "S3Storage": { + "base": "

    Describes the storage parameters for S3 and S3 buckets for an instance store-backed AMI.

    ", + "refs": { + "Storage$S3": "

    An Amazon S3 storage location.

    " + } + }, + "ScheduledInstance": { + "base": "

    Describes a Scheduled Instance.

    ", + "refs": { + "PurchasedScheduledInstanceSet$member": null, + "ScheduledInstanceSet$member": null + } + }, + "ScheduledInstanceAvailability": { + "base": "

    Describes a schedule that is available for your Scheduled Instances.

    ", + "refs": { + "ScheduledInstanceAvailabilitySet$member": null + } + }, + "ScheduledInstanceAvailabilitySet": { + "base": null, + "refs": { + "DescribeScheduledInstanceAvailabilityResult$ScheduledInstanceAvailabilitySet": "

    Information about the available Scheduled Instances.

    " + } + }, + "ScheduledInstanceIdRequestSet": { + "base": null, + "refs": { + "DescribeScheduledInstancesRequest$ScheduledInstanceIds": "

    One or more Scheduled Instance IDs.

    " + } + }, + "ScheduledInstanceRecurrence": { + "base": "

    Describes the recurring schedule for a Scheduled Instance.

    ", + "refs": { + "ScheduledInstance$Recurrence": "

    The schedule recurrence.

    ", + "ScheduledInstanceAvailability$Recurrence": "

    The schedule recurrence.

    " + } + }, + "ScheduledInstanceRecurrenceRequest": { + "base": "

    Describes the recurring schedule for a Scheduled Instance.

    ", + "refs": { + "DescribeScheduledInstanceAvailabilityRequest$Recurrence": "

    The schedule recurrence.

    " + } + }, + "ScheduledInstanceSet": { + "base": null, + "refs": { + "DescribeScheduledInstancesResult$ScheduledInstanceSet": "

    Information about the Scheduled Instances.

    " + } + }, + "ScheduledInstancesBlockDeviceMapping": { + "base": "

    Describes a block device mapping for a Scheduled Instance.

    ", + "refs": { + "ScheduledInstancesBlockDeviceMappingSet$member": null + } + }, + "ScheduledInstancesBlockDeviceMappingSet": { + "base": null, + "refs": { + "ScheduledInstancesLaunchSpecification$BlockDeviceMappings": "

    One or more block device mapping entries.

    " + } + }, + "ScheduledInstancesEbs": { + "base": "

    Describes an EBS volume for a Scheduled Instance.

    ", + "refs": { + "ScheduledInstancesBlockDeviceMapping$Ebs": "

    Parameters used to set up EBS volumes automatically when the instance is launched.

    " + } + }, + "ScheduledInstancesIamInstanceProfile": { + "base": "

    Describes an IAM instance profile for a Scheduled Instance.

    ", + "refs": { + "ScheduledInstancesLaunchSpecification$IamInstanceProfile": "

    The IAM instance profile.

    " + } + }, + "ScheduledInstancesLaunchSpecification": { + "base": "

    Describes the launch specification for a Scheduled Instance.

    ", + "refs": { + "RunScheduledInstancesRequest$LaunchSpecification": "

    The launch specification.

    " + } + }, + "ScheduledInstancesMonitoring": { + "base": "

    Describes whether monitoring is enabled for a Scheduled Instance.

    ", + "refs": { + "ScheduledInstancesLaunchSpecification$Monitoring": "

    Enable or disable monitoring for the instances.

    " + } + }, + "ScheduledInstancesNetworkInterface": { + "base": "

    Describes a network interface for a Scheduled Instance.

    ", + "refs": { + "ScheduledInstancesNetworkInterfaceSet$member": null + } + }, + "ScheduledInstancesNetworkInterfaceSet": { + "base": null, + "refs": { + "ScheduledInstancesLaunchSpecification$NetworkInterfaces": "

    One or more network interfaces.

    " + } + }, + "ScheduledInstancesPlacement": { + "base": "

    Describes the placement for a Scheduled Instance.

    ", + "refs": { + "ScheduledInstancesLaunchSpecification$Placement": "

    The placement information.

    " + } + }, + "ScheduledInstancesPrivateIpAddressConfig": { + "base": "

    Describes a private IP address for a Scheduled Instance.

    ", + "refs": { + "PrivateIpAddressConfigSet$member": null + } + }, + "ScheduledInstancesSecurityGroupIdSet": { + "base": null, + "refs": { + "ScheduledInstancesLaunchSpecification$SecurityGroupIds": "

    The IDs of one or more security groups.

    ", + "ScheduledInstancesNetworkInterface$Groups": "

    The IDs of one or more security groups.

    " + } + }, + "SecurityGroup": { + "base": "

    Describes a security group

    ", + "refs": { + "SecurityGroupList$member": null + } + }, + "SecurityGroupIdStringList": { + "base": null, + "refs": { + "CreateNetworkInterfaceRequest$Groups": "

    The IDs of one or more security groups.

    ", + "ImportInstanceLaunchSpecification$GroupIds": "

    One or more security group IDs.

    ", + "InstanceNetworkInterfaceSpecification$Groups": "

    The IDs of the security groups for the network interface. Applies only if creating a network interface when launching an instance.

    ", + "ModifyNetworkInterfaceAttributeRequest$Groups": "

    Changes the security groups for the network interface. The new set of groups you specify replaces the current set. You must specify at least one group, even if it's just the default security group in the VPC. You must specify the ID of the security group, not the name.

    ", + "RunInstancesRequest$SecurityGroupIds": "

    One or more security group IDs. You can create a security group using CreateSecurityGroup.

    Default: Amazon EC2 uses the default security group.

    " + } + }, + "SecurityGroupList": { + "base": null, + "refs": { + "DescribeSecurityGroupsResult$SecurityGroups": "

    Information about one or more security groups.

    " + } + }, + "SecurityGroupStringList": { + "base": null, + "refs": { + "ImportInstanceLaunchSpecification$GroupNames": "

    One or more security group names.

    ", + "RunInstancesRequest$SecurityGroups": "

    [EC2-Classic, default VPC] One or more security group names. For a nondefault VPC, you must use security group IDs instead.

    Default: Amazon EC2 uses the default security group.

    " + } + }, + "ShutdownBehavior": { + "base": null, + "refs": { + "ImportInstanceLaunchSpecification$InstanceInitiatedShutdownBehavior": "

    Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).

    ", + "RunInstancesRequest$InstanceInitiatedShutdownBehavior": "

    Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).

    Default: stop

    " + } + }, + "SlotDateTimeRangeRequest": { + "base": "

    Describes the time period for a Scheduled Instance to start its first schedule. The time period must span less than one day.

    ", + "refs": { + "DescribeScheduledInstanceAvailabilityRequest$FirstSlotStartTimeRange": "

    The time period for the first schedule to start.

    " + } + }, + "SlotStartTimeRangeRequest": { + "base": "

    Describes the time period for a Scheduled Instance to start its first schedule.

    ", + "refs": { + "DescribeScheduledInstancesRequest$SlotStartTimeRange": "

    The time period for the first schedule to start.

    " + } + }, + "Snapshot": { + "base": "

    Describes a snapshot.

    ", + "refs": { + "SnapshotList$member": null + } + }, + "SnapshotAttributeName": { + "base": null, + "refs": { + "DescribeSnapshotAttributeRequest$Attribute": "

    The snapshot attribute you would like to view.

    ", + "ModifySnapshotAttributeRequest$Attribute": "

    The snapshot attribute to modify.

    Only volume creation permissions may be modified at the customer level.

    ", + "ResetSnapshotAttributeRequest$Attribute": "

    The attribute to reset. Currently, only the attribute for permission to create volumes can be reset.

    " + } + }, + "SnapshotDetail": { + "base": "

    Describes the snapshot created from the imported disk.

    ", + "refs": { + "SnapshotDetailList$member": null + } + }, + "SnapshotDetailList": { + "base": null, + "refs": { + "ImportImageResult$SnapshotDetails": "

    Information about the snapshots.

    ", + "ImportImageTask$SnapshotDetails": "

    Information about the snapshots.

    " + } + }, + "SnapshotDiskContainer": { + "base": "

    The disk container object for the import snapshot request.

    ", + "refs": { + "ImportSnapshotRequest$DiskContainer": "

    Information about the disk container.

    " + } + }, + "SnapshotIdStringList": { + "base": null, + "refs": { + "DescribeSnapshotsRequest$SnapshotIds": "

    One or more snapshot IDs.

    Default: Describes snapshots for which you have launch permissions.

    " + } + }, + "SnapshotList": { + "base": null, + "refs": { + "DescribeSnapshotsResult$Snapshots": "

    Information about the snapshots.

    " + } + }, + "SnapshotState": { + "base": null, + "refs": { + "Snapshot$State": "

    The snapshot state.

    " + } + }, + "SnapshotTaskDetail": { + "base": "

    Details about the import snapshot task.

    ", + "refs": { + "ImportSnapshotResult$SnapshotTaskDetail": "

    Information about the import snapshot task.

    ", + "ImportSnapshotTask$SnapshotTaskDetail": "

    Describes an import snapshot task.

    " + } + }, + "SpotDatafeedSubscription": { + "base": "

    Describes the data feed for a Spot instance.

    ", + "refs": { + "CreateSpotDatafeedSubscriptionResult$SpotDatafeedSubscription": "

    The Spot instance data feed subscription.

    ", + "DescribeSpotDatafeedSubscriptionResult$SpotDatafeedSubscription": "

    The Spot instance data feed subscription.

    " + } + }, + "SpotFleetLaunchSpecification": { + "base": "

    Describes the launch specification for one or more Spot instances.

    ", + "refs": { + "LaunchSpecsList$member": null + } + }, + "SpotFleetMonitoring": { + "base": "

    Describes whether monitoring is enabled.

    ", + "refs": { + "SpotFleetLaunchSpecification$Monitoring": "

    Enable or disable monitoring for the instances.

    " + } + }, + "SpotFleetRequestConfig": { + "base": "

    Describes a Spot fleet request.

    ", + "refs": { + "SpotFleetRequestConfigSet$member": null + } + }, + "SpotFleetRequestConfigData": { + "base": "

    Describes the configuration of a Spot fleet request.

    ", + "refs": { + "RequestSpotFleetRequest$SpotFleetRequestConfig": "

    The configuration for the Spot fleet request.

    ", + "SpotFleetRequestConfig$SpotFleetRequestConfig": "

    Information about the configuration of the Spot fleet request.

    " + } + }, + "SpotFleetRequestConfigSet": { + "base": null, + "refs": { + "DescribeSpotFleetRequestsResponse$SpotFleetRequestConfigs": "

    Information about the configuration of your Spot fleet.

    " + } + }, + "SpotInstanceRequest": { + "base": "

    Describes a Spot instance request.

    ", + "refs": { + "SpotInstanceRequestList$member": null + } + }, + "SpotInstanceRequestIdList": { + "base": null, + "refs": { + "CancelSpotInstanceRequestsRequest$SpotInstanceRequestIds": "

    One or more Spot instance request IDs.

    ", + "DescribeSpotInstanceRequestsRequest$SpotInstanceRequestIds": "

    One or more Spot instance request IDs.

    " + } + }, + "SpotInstanceRequestList": { + "base": null, + "refs": { + "DescribeSpotInstanceRequestsResult$SpotInstanceRequests": "

    One or more Spot instance requests.

    ", + "RequestSpotInstancesResult$SpotInstanceRequests": "

    One or more Spot instance requests.

    " + } + }, + "SpotInstanceState": { + "base": null, + "refs": { + "SpotInstanceRequest$State": "

    The state of the Spot instance request. Spot bid status information can help you track your Spot instance requests. For more information, see Spot Bid Status in the Amazon Elastic Compute Cloud User Guide.

    " + } + }, + "SpotInstanceStateFault": { + "base": "

    Describes a Spot instance state change.

    ", + "refs": { + "SpotDatafeedSubscription$Fault": "

    The fault codes for the Spot instance request, if any.

    ", + "SpotInstanceRequest$Fault": "

    The fault codes for the Spot instance request, if any.

    " + } + }, + "SpotInstanceStatus": { + "base": "

    Describes the status of a Spot instance request.

    ", + "refs": { + "SpotInstanceRequest$Status": "

    The status code and status message describing the Spot instance request.

    " + } + }, + "SpotInstanceType": { + "base": null, + "refs": { + "RequestSpotInstancesRequest$Type": "

    The Spot instance request type.

    Default: one-time

    ", + "SpotInstanceRequest$Type": "

    The Spot instance request type.

    " + } + }, + "SpotPlacement": { + "base": "

    Describes Spot instance placement.

    ", + "refs": { + "LaunchSpecification$Placement": "

    The placement information for the instance.

    ", + "RequestSpotLaunchSpecification$Placement": "

    The placement information for the instance.

    ", + "SpotFleetLaunchSpecification$Placement": "

    The placement information.

    " + } + }, + "SpotPrice": { + "base": "

    Describes the maximum hourly price (bid) for any Spot instance launched to fulfill the request.

    ", + "refs": { + "SpotPriceHistoryList$member": null + } + }, + "SpotPriceHistoryList": { + "base": null, + "refs": { + "DescribeSpotPriceHistoryResult$SpotPriceHistory": "

    The historical Spot prices.

    " + } + }, + "StartInstancesRequest": { + "base": null, + "refs": { + } + }, + "StartInstancesResult": { + "base": null, + "refs": { + } + }, + "State": { + "base": null, + "refs": { + "VpcEndpoint$State": "

    The state of the VPC endpoint.

    " + } + }, + "StateReason": { + "base": "

    Describes a state change.

    ", + "refs": { + "Image$StateReason": "

    The reason for the state change.

    ", + "Instance$StateReason": "

    The reason for the most recent state transition.

    " + } + }, + "Status": { + "base": null, + "refs": { + "MoveAddressToVpcResult$Status": "

    The status of the move of the IP address.

    ", + "RestoreAddressToClassicResult$Status": "

    The move status for the IP address.

    " + } + }, + "StatusName": { + "base": null, + "refs": { + "InstanceStatusDetails$Name": "

    The type of instance status.

    " + } + }, + "StatusType": { + "base": null, + "refs": { + "InstanceStatusDetails$Status": "

    The status.

    " + } + }, + "StopInstancesRequest": { + "base": null, + "refs": { + } + }, + "StopInstancesResult": { + "base": null, + "refs": { + } + }, + "Storage": { + "base": "

    Describes the storage location for an instance store-backed AMI.

    ", + "refs": { + "BundleInstanceRequest$Storage": "

    The bucket in which to store the AMI. You can specify a bucket that you already own or a new bucket that Amazon EC2 creates on your behalf. If you specify a bucket that belongs to someone else, Amazon EC2 returns an error.

    ", + "BundleTask$Storage": "

    The Amazon S3 storage locations.

    " + } + }, + "String": { + "base": null, + "refs": { + "AcceptVpcPeeringConnectionRequest$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "AccountAttribute$AttributeName": "

    The name of the account attribute.

    ", + "AccountAttributeValue$AttributeValue": "

    The value of the attribute.

    ", + "ActiveInstance$InstanceType": "

    The instance type.

    ", + "ActiveInstance$InstanceId": "

    The ID of the instance.

    ", + "ActiveInstance$SpotInstanceRequestId": "

    The ID of the Spot instance request.

    ", + "Address$InstanceId": "

    The ID of the instance that the address is associated with (if any).

    ", + "Address$PublicIp": "

    The Elastic IP address.

    ", + "Address$AllocationId": "

    The ID representing the allocation of the address for use with EC2-VPC.

    ", + "Address$AssociationId": "

    The ID representing the association of the address with an instance in a VPC.

    ", + "Address$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "Address$NetworkInterfaceOwnerId": "

    The ID of the AWS account that owns the network interface.

    ", + "Address$PrivateIpAddress": "

    The private IP address associated with the Elastic IP address.

    ", + "AllocateAddressResult$PublicIp": "

    The Elastic IP address.

    ", + "AllocateAddressResult$AllocationId": "

    [EC2-VPC] The ID that AWS assigns to represent the allocation of the Elastic IP address for use with instances in a VPC.

    ", + "AllocateHostsRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

    ", + "AllocateHostsRequest$InstanceType": "

    Specify the instance type that you want your Dedicated hosts to be configured for. When you specify the instance type, that is the only instance type that you can launch onto that host.

    ", + "AllocateHostsRequest$AvailabilityZone": "

    The Availability Zone for the Dedicated hosts.

    ", + "AllocationIdList$member": null, + "AssignPrivateIpAddressesRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "AssociateAddressRequest$InstanceId": "

    The ID of the instance. This is required for EC2-Classic. For EC2-VPC, you can specify either the instance ID or the network interface ID, but not both. The operation fails if you specify an instance ID unless exactly one network interface is attached.

    ", + "AssociateAddressRequest$PublicIp": "

    The Elastic IP address. This is required for EC2-Classic.

    ", + "AssociateAddressRequest$AllocationId": "

    [EC2-VPC] The allocation ID. This is required for EC2-VPC.

    ", + "AssociateAddressRequest$NetworkInterfaceId": "

    [EC2-VPC] The ID of the network interface. If the instance has more than one network interface, you must specify a network interface ID.

    ", + "AssociateAddressRequest$PrivateIpAddress": "

    [EC2-VPC] The primary or secondary private IP address to associate with the Elastic IP address. If no private IP address is specified, the Elastic IP address is associated with the primary private IP address.

    ", + "AssociateAddressResult$AssociationId": "

    [EC2-VPC] The ID that represents the association of the Elastic IP address with an instance.

    ", + "AssociateDhcpOptionsRequest$DhcpOptionsId": "

    The ID of the DHCP options set, or default to associate no DHCP options with the VPC.

    ", + "AssociateDhcpOptionsRequest$VpcId": "

    The ID of the VPC.

    ", + "AssociateRouteTableRequest$SubnetId": "

    The ID of the subnet.

    ", + "AssociateRouteTableRequest$RouteTableId": "

    The ID of the route table.

    ", + "AssociateRouteTableResult$AssociationId": "

    The route table association ID (needed to disassociate the route table).

    ", + "AttachClassicLinkVpcRequest$InstanceId": "

    The ID of an EC2-Classic instance to link to the ClassicLink-enabled VPC.

    ", + "AttachClassicLinkVpcRequest$VpcId": "

    The ID of a ClassicLink-enabled VPC.

    ", + "AttachInternetGatewayRequest$InternetGatewayId": "

    The ID of the Internet gateway.

    ", + "AttachInternetGatewayRequest$VpcId": "

    The ID of the VPC.

    ", + "AttachNetworkInterfaceRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "AttachNetworkInterfaceRequest$InstanceId": "

    The ID of the instance.

    ", + "AttachNetworkInterfaceResult$AttachmentId": "

    The ID of the network interface attachment.

    ", + "AttachVolumeRequest$VolumeId": "

    The ID of the EBS volume. The volume and instance must be within the same Availability Zone.

    ", + "AttachVolumeRequest$InstanceId": "

    The ID of the instance.

    ", + "AttachVolumeRequest$Device": "

    The device name to expose to the instance (for example, /dev/sdh or xvdh).

    ", + "AttachVpnGatewayRequest$VpnGatewayId": "

    The ID of the virtual private gateway.

    ", + "AttachVpnGatewayRequest$VpcId": "

    The ID of the VPC.

    ", + "AttributeValue$Value": "

    Valid values are case-sensitive and vary by action.

    ", + "AuthorizeSecurityGroupEgressRequest$GroupId": "

    The ID of the security group.

    ", + "AuthorizeSecurityGroupEgressRequest$SourceSecurityGroupName": "

    The name of a destination security group. To authorize outbound access to a destination security group, we recommend that you use a set of IP permissions instead.

    ", + "AuthorizeSecurityGroupEgressRequest$SourceSecurityGroupOwnerId": "

    The AWS account number for a destination security group. To authorize outbound access to a destination security group, we recommend that you use a set of IP permissions instead.

    ", + "AuthorizeSecurityGroupEgressRequest$IpProtocol": "

    The IP protocol name or number. We recommend that you specify the protocol in a set of IP permissions instead.

    ", + "AuthorizeSecurityGroupEgressRequest$CidrIp": "

    The CIDR IP address range. We recommend that you specify the CIDR range in a set of IP permissions instead.

    ", + "AuthorizeSecurityGroupIngressRequest$GroupName": "

    [EC2-Classic, default VPC] The name of the security group.

    ", + "AuthorizeSecurityGroupIngressRequest$GroupId": "

    The ID of the security group. Required for a nondefault VPC.

    ", + "AuthorizeSecurityGroupIngressRequest$SourceSecurityGroupName": "

    [EC2-Classic, default VPC] The name of the source security group. You can't specify this parameter in combination with the following parameters: the CIDR IP address range, the start of the port range, the IP protocol, and the end of the port range. For EC2-VPC, the source security group must be in the same VPC.

    ", + "AuthorizeSecurityGroupIngressRequest$SourceSecurityGroupOwnerId": "

    [EC2-Classic, default VPC] The AWS account number for the source security group. For EC2-VPC, the source security group must be in the same VPC. You can't specify this parameter in combination with the following parameters: the CIDR IP address range, the IP protocol, the start of the port range, and the end of the port range. Creates rules that grant full ICMP, UDP, and TCP access. To create a rule with a specific IP protocol and port range, use a set of IP permissions instead.

    ", + "AuthorizeSecurityGroupIngressRequest$IpProtocol": "

    The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers). (VPC only) Use -1 to specify all.

    ", + "AuthorizeSecurityGroupIngressRequest$CidrIp": "

    The CIDR IP address range. You can't specify this parameter when specifying a source security group.

    ", + "AvailabilityZone$ZoneName": "

    The name of the Availability Zone.

    ", + "AvailabilityZone$RegionName": "

    The name of the region.

    ", + "AvailabilityZoneMessage$Message": "

    The message about the Availability Zone.

    ", + "BlockDeviceMapping$VirtualName": "

    The virtual device name (ephemeralN). Instance store volumes are numbered starting from 0. An instance type with 2 available instance store volumes can specify mappings for ephemeral0 and ephemeral1.The number of available instance store volumes depends on the instance type. After you connect to the instance, you must mount the volume.

    Constraints: For M3 instances, you must specify instance store volumes in the block device mapping for the instance. When you launch an M3 instance, we ignore any instance store volumes specified in the block device mapping for the AMI.

    ", + "BlockDeviceMapping$DeviceName": "

    The device name exposed to the instance (for example, /dev/sdh or xvdh).

    ", + "BlockDeviceMapping$NoDevice": "

    Suppresses the specified device included in the block device mapping of the AMI.

    ", + "BundleIdStringList$member": null, + "BundleInstanceRequest$InstanceId": "

    The ID of the instance to bundle.

    Type: String

    Default: None

    Required: Yes

    ", + "BundleTask$InstanceId": "

    The ID of the instance associated with this bundle task.

    ", + "BundleTask$BundleId": "

    The ID of the bundle task.

    ", + "BundleTask$Progress": "

    The level of task completion, as a percent (for example, 20%).

    ", + "BundleTaskError$Code": "

    The error code.

    ", + "BundleTaskError$Message": "

    The error message.

    ", + "CancelBundleTaskRequest$BundleId": "

    The ID of the bundle task.

    ", + "CancelConversionRequest$ConversionTaskId": "

    The ID of the conversion task.

    ", + "CancelConversionRequest$ReasonMessage": "

    The reason for canceling the conversion task.

    ", + "CancelExportTaskRequest$ExportTaskId": "

    The ID of the export task. This is the ID returned by CreateInstanceExportTask.

    ", + "CancelImportTaskRequest$ImportTaskId": "

    The ID of the import image or import snapshot task to be canceled.

    ", + "CancelImportTaskRequest$CancelReason": "

    The reason for canceling the task.

    ", + "CancelImportTaskResult$ImportTaskId": "

    The ID of the task being canceled.

    ", + "CancelImportTaskResult$State": "

    The current state of the task being canceled.

    ", + "CancelImportTaskResult$PreviousState": "

    The current state of the task being canceled.

    ", + "CancelReservedInstancesListingRequest$ReservedInstancesListingId": "

    The ID of the Reserved Instance listing.

    ", + "CancelSpotFleetRequestsError$Message": "

    The description for the error code.

    ", + "CancelSpotFleetRequestsErrorItem$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "CancelSpotFleetRequestsSuccessItem$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "CancelledSpotInstanceRequest$SpotInstanceRequestId": "

    The ID of the Spot instance request.

    ", + "ClassicLinkDnsSupport$VpcId": "

    The ID of the VPC.

    ", + "ClassicLinkInstance$InstanceId": "

    The ID of the instance.

    ", + "ClassicLinkInstance$VpcId": "

    The ID of the VPC.

    ", + "ClientData$Comment": "

    A user-defined comment about the disk upload.

    ", + "ConfirmProductInstanceRequest$ProductCode": "

    The product code. This must be a product code that you own.

    ", + "ConfirmProductInstanceRequest$InstanceId": "

    The ID of the instance.

    ", + "ConfirmProductInstanceResult$OwnerId": "

    The AWS account ID of the instance owner. This is only present if the product code is attached to the instance.

    ", + "ConversionIdStringList$member": null, + "ConversionTask$ConversionTaskId": "

    The ID of the conversion task.

    ", + "ConversionTask$ExpirationTime": "

    The time when the task expires. If the upload isn't complete before the expiration time, we automatically cancel the task.

    ", + "ConversionTask$StatusMessage": "

    The status message related to the conversion task.

    ", + "CopyImageRequest$SourceRegion": "

    The name of the region that contains the AMI to copy.

    ", + "CopyImageRequest$SourceImageId": "

    The ID of the AMI to copy.

    ", + "CopyImageRequest$Name": "

    The name of the new AMI in the destination region.

    ", + "CopyImageRequest$Description": "

    A description for the new AMI in the destination region.

    ", + "CopyImageRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

    ", + "CopyImageRequest$KmsKeyId": "

    The full ARN of the AWS Key Management Service (AWS KMS) CMK to use when encrypting the snapshots of an image during a copy operation. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. The ARN contains the arn:aws:kms namespace, followed by the region of the CMK, the AWS account ID of the CMK owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. The specified CMK must exist in the region that the snapshot is being copied to. If a KmsKeyId is specified, the Encrypted flag must also be set.

    ", + "CopyImageResult$ImageId": "

    The ID of the new AMI.

    ", + "CopySnapshotRequest$SourceRegion": "

    The ID of the region that contains the snapshot to be copied.

    ", + "CopySnapshotRequest$SourceSnapshotId": "

    The ID of the EBS snapshot to copy.

    ", + "CopySnapshotRequest$Description": "

    A description for the EBS snapshot.

    ", + "CopySnapshotRequest$DestinationRegion": "

    The destination region to use in the PresignedUrl parameter of a snapshot copy operation. This parameter is only valid for specifying the destination region in a PresignedUrl parameter, where it is required.

    CopySnapshot sends the snapshot copy to the regional endpoint that you send the HTTP request to, such as ec2.us-east-1.amazonaws.com (in the AWS CLI, this is specified with the --region parameter or the default region in your AWS configuration file).

    ", + "CopySnapshotRequest$PresignedUrl": "

    The pre-signed URL that facilitates copying an encrypted snapshot. This parameter is only required when copying an encrypted snapshot with the Amazon EC2 Query API; it is available as an optional parameter in all other cases. The PresignedUrl should use the snapshot source endpoint, the CopySnapshot action, and include the SourceRegion, SourceSnapshotId, and DestinationRegion parameters. The PresignedUrl must be signed using AWS Signature Version 4. Because EBS snapshots are stored in Amazon S3, the signing algorithm for this parameter uses the same logic that is described in Authenticating Requests by Using Query Parameters (AWS Signature Version 4) in the Amazon Simple Storage Service API Reference. An invalid or improperly signed PresignedUrl will cause the copy operation to fail asynchronously, and the snapshot will move to an error state.

    ", + "CopySnapshotRequest$KmsKeyId": "

    The full ARN of the AWS Key Management Service (AWS KMS) CMK to use when creating the snapshot copy. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. The ARN contains the arn:aws:kms namespace, followed by the region of the CMK, the AWS account ID of the CMK owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. The specified CMK must exist in the region that the snapshot is being copied to. If a KmsKeyId is specified, the Encrypted flag must also be set.

    ", + "CopySnapshotResult$SnapshotId": "

    The ID of the new snapshot.

    ", + "CreateCustomerGatewayRequest$PublicIp": "

    The Internet-routable IP address for the customer gateway's outside interface. The address must be static.

    ", + "CreateFlowLogsRequest$LogGroupName": "

    The name of the CloudWatch log group.

    ", + "CreateFlowLogsRequest$DeliverLogsPermissionArn": "

    The ARN for the IAM role that's used to post flow logs to a CloudWatch Logs log group.

    ", + "CreateFlowLogsRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

    ", + "CreateFlowLogsResult$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request.

    ", + "CreateImageRequest$InstanceId": "

    The ID of the instance.

    ", + "CreateImageRequest$Name": "

    A name for the new image.

    Constraints: 3-128 alphanumeric characters, parentheses (()), square brackets ([]), spaces ( ), periods (.), slashes (/), dashes (-), single quotes ('), at-signs (@), or underscores(_)

    ", + "CreateImageRequest$Description": "

    A description for the new image.

    ", + "CreateImageResult$ImageId": "

    The ID of the new AMI.

    ", + "CreateInstanceExportTaskRequest$Description": "

    A description for the conversion task or the resource being exported. The maximum length is 255 bytes.

    ", + "CreateInstanceExportTaskRequest$InstanceId": "

    The ID of the instance.

    ", + "CreateKeyPairRequest$KeyName": "

    A unique name for the key pair.

    Constraints: Up to 255 ASCII characters

    ", + "CreateNatGatewayRequest$SubnetId": "

    The subnet in which to create the NAT gateway.

    ", + "CreateNatGatewayRequest$AllocationId": "

    The allocation ID of an Elastic IP address to associate with the NAT gateway. If the Elastic IP address is associated with another resource, you must first disassociate it.

    ", + "CreateNatGatewayRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

    Constraint: Maximum 64 ASCII characters.

    ", + "CreateNatGatewayResult$ClientToken": "

    Unique, case-sensitive identifier to ensure the idempotency of the request. Only returned if a client token was provided in the request.

    ", + "CreateNetworkAclEntryRequest$NetworkAclId": "

    The ID of the network ACL.

    ", + "CreateNetworkAclEntryRequest$Protocol": "

    The protocol. A value of -1 means all protocols.

    ", + "CreateNetworkAclEntryRequest$CidrBlock": "

    The network range to allow or deny, in CIDR notation (for example 172.16.0.0/24).

    ", + "CreateNetworkAclRequest$VpcId": "

    The ID of the VPC.

    ", + "CreateNetworkInterfaceRequest$SubnetId": "

    The ID of the subnet to associate with the network interface.

    ", + "CreateNetworkInterfaceRequest$Description": "

    A description for the network interface.

    ", + "CreateNetworkInterfaceRequest$PrivateIpAddress": "

    The primary private IP address of the network interface. If you don't specify an IP address, Amazon EC2 selects one for you from the subnet range. If you specify an IP address, you cannot indicate any IP addresses specified in privateIpAddresses as primary (only one IP address can be designated as primary).

    ", + "CreatePlacementGroupRequest$GroupName": "

    A name for the placement group.

    Constraints: Up to 255 ASCII characters

    ", + "CreateReservedInstancesListingRequest$ReservedInstancesId": "

    The ID of the active Reserved Instance.

    ", + "CreateReservedInstancesListingRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure idempotency of your listings. This helps avoid duplicate listings. For more information, see Ensuring Idempotency.

    ", + "CreateRouteRequest$RouteTableId": "

    The ID of the route table for the route.

    ", + "CreateRouteRequest$DestinationCidrBlock": "

    The CIDR address block used for the destination match. Routing decisions are based on the most specific match.

    ", + "CreateRouteRequest$GatewayId": "

    The ID of an Internet gateway or virtual private gateway attached to your VPC.

    ", + "CreateRouteRequest$InstanceId": "

    The ID of a NAT instance in your VPC. The operation fails if you specify an instance ID unless exactly one network interface is attached.

    ", + "CreateRouteRequest$NetworkInterfaceId": "

    The ID of a network interface.

    ", + "CreateRouteRequest$VpcPeeringConnectionId": "

    The ID of a VPC peering connection.

    ", + "CreateRouteRequest$NatGatewayId": "

    The ID of a NAT gateway.

    ", + "CreateRouteTableRequest$VpcId": "

    The ID of the VPC.

    ", + "CreateSecurityGroupRequest$GroupName": "

    The name of the security group.

    Constraints: Up to 255 characters in length

    Constraints for EC2-Classic: ASCII characters

    Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$*

    ", + "CreateSecurityGroupRequest$Description": "

    A description for the security group. This is informational only.

    Constraints: Up to 255 characters in length

    Constraints for EC2-Classic: ASCII characters

    Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$*

    ", + "CreateSecurityGroupRequest$VpcId": "

    [EC2-VPC] The ID of the VPC. Required for EC2-VPC.

    ", + "CreateSecurityGroupResult$GroupId": "

    The ID of the security group.

    ", + "CreateSnapshotRequest$VolumeId": "

    The ID of the EBS volume.

    ", + "CreateSnapshotRequest$Description": "

    A description for the snapshot.

    ", + "CreateSpotDatafeedSubscriptionRequest$Bucket": "

    The Amazon S3 bucket in which to store the Spot instance data feed.

    ", + "CreateSpotDatafeedSubscriptionRequest$Prefix": "

    A prefix for the data feed file names.

    ", + "CreateSubnetRequest$VpcId": "

    The ID of the VPC.

    ", + "CreateSubnetRequest$CidrBlock": "

    The network range for the subnet, in CIDR notation. For example, 10.0.0.0/24.

    ", + "CreateSubnetRequest$AvailabilityZone": "

    The Availability Zone for the subnet.

    Default: AWS selects one for you. If you create more than one subnet in your VPC, we may not necessarily select a different zone for each subnet.

    ", + "CreateVolumePermission$UserId": "

    The specific AWS account ID that is to be added or removed from a volume's list of create volume permissions.

    ", + "CreateVolumeRequest$SnapshotId": "

    The snapshot from which to create the volume.

    ", + "CreateVolumeRequest$AvailabilityZone": "

    The Availability Zone in which to create the volume. Use DescribeAvailabilityZones to list the Availability Zones that are currently available to you.

    ", + "CreateVolumeRequest$KmsKeyId": "

    The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted volume. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. The ARN contains the arn:aws:kms namespace, followed by the region of the CMK, the AWS account ID of the CMK owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. If a KmsKeyId is specified, the Encrypted flag must also be set.

    ", + "CreateVpcEndpointRequest$VpcId": "

    The ID of the VPC in which the endpoint will be used.

    ", + "CreateVpcEndpointRequest$ServiceName": "

    The AWS service name, in the form com.amazonaws.region.service. To get a list of available services, use the DescribeVpcEndpointServices request.

    ", + "CreateVpcEndpointRequest$PolicyDocument": "

    A policy to attach to the endpoint that controls access to the service. The policy must be in valid JSON format. If this parameter is not specified, we attach a default policy that allows full access to the service.

    ", + "CreateVpcEndpointRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

    ", + "CreateVpcEndpointResult$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request.

    ", + "CreateVpcPeeringConnectionRequest$VpcId": "

    The ID of the requester VPC.

    ", + "CreateVpcPeeringConnectionRequest$PeerVpcId": "

    The ID of the VPC with which you are creating the VPC peering connection.

    ", + "CreateVpcPeeringConnectionRequest$PeerOwnerId": "

    The AWS account ID of the owner of the peer VPC.

    Default: Your AWS account ID

    ", + "CreateVpcRequest$CidrBlock": "

    The network range for the VPC, in CIDR notation. For example, 10.0.0.0/16.

    ", + "CreateVpnConnectionRequest$Type": "

    The type of VPN connection (ipsec.1).

    ", + "CreateVpnConnectionRequest$CustomerGatewayId": "

    The ID of the customer gateway.

    ", + "CreateVpnConnectionRequest$VpnGatewayId": "

    The ID of the virtual private gateway.

    ", + "CreateVpnConnectionRouteRequest$VpnConnectionId": "

    The ID of the VPN connection.

    ", + "CreateVpnConnectionRouteRequest$DestinationCidrBlock": "

    The CIDR block associated with the local subnet of the customer network.

    ", + "CreateVpnGatewayRequest$AvailabilityZone": "

    The Availability Zone for the virtual private gateway.

    ", + "CustomerGateway$CustomerGatewayId": "

    The ID of the customer gateway.

    ", + "CustomerGateway$State": "

    The current state of the customer gateway (pending | available | deleting | deleted).

    ", + "CustomerGateway$Type": "

    The type of VPN connection the customer gateway supports (ipsec.1).

    ", + "CustomerGateway$IpAddress": "

    The Internet-routable IP address of the customer gateway's outside interface.

    ", + "CustomerGateway$BgpAsn": "

    The customer gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN).

    ", + "CustomerGatewayIdStringList$member": null, + "DeleteCustomerGatewayRequest$CustomerGatewayId": "

    The ID of the customer gateway.

    ", + "DeleteDhcpOptionsRequest$DhcpOptionsId": "

    The ID of the DHCP options set.

    ", + "DeleteInternetGatewayRequest$InternetGatewayId": "

    The ID of the Internet gateway.

    ", + "DeleteKeyPairRequest$KeyName": "

    The name of the key pair.

    ", + "DeleteNatGatewayRequest$NatGatewayId": "

    The ID of the NAT gateway.

    ", + "DeleteNatGatewayResult$NatGatewayId": "

    The ID of the NAT gateway.

    ", + "DeleteNetworkAclEntryRequest$NetworkAclId": "

    The ID of the network ACL.

    ", + "DeleteNetworkAclRequest$NetworkAclId": "

    The ID of the network ACL.

    ", + "DeleteNetworkInterfaceRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "DeletePlacementGroupRequest$GroupName": "

    The name of the placement group.

    ", + "DeleteRouteRequest$RouteTableId": "

    The ID of the route table.

    ", + "DeleteRouteRequest$DestinationCidrBlock": "

    The CIDR range for the route. The value you specify must match the CIDR for the route exactly.

    ", + "DeleteRouteTableRequest$RouteTableId": "

    The ID of the route table.

    ", + "DeleteSecurityGroupRequest$GroupName": "

    [EC2-Classic, default VPC] The name of the security group. You can specify either the security group name or the security group ID.

    ", + "DeleteSecurityGroupRequest$GroupId": "

    The ID of the security group. Required for a nondefault VPC.

    ", + "DeleteSnapshotRequest$SnapshotId": "

    The ID of the EBS snapshot.

    ", + "DeleteSubnetRequest$SubnetId": "

    The ID of the subnet.

    ", + "DeleteVolumeRequest$VolumeId": "

    The ID of the volume.

    ", + "DeleteVpcPeeringConnectionRequest$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "DeleteVpcRequest$VpcId": "

    The ID of the VPC.

    ", + "DeleteVpnConnectionRequest$VpnConnectionId": "

    The ID of the VPN connection.

    ", + "DeleteVpnConnectionRouteRequest$VpnConnectionId": "

    The ID of the VPN connection.

    ", + "DeleteVpnConnectionRouteRequest$DestinationCidrBlock": "

    The CIDR block associated with the local subnet of the customer network.

    ", + "DeleteVpnGatewayRequest$VpnGatewayId": "

    The ID of the virtual private gateway.

    ", + "DeregisterImageRequest$ImageId": "

    The ID of the AMI.

    ", + "DescribeClassicLinkInstancesRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeClassicLinkInstancesResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeFlowLogsRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeFlowLogsResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeHostsRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeHostsResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeIdFormatRequest$Resource": "

    The type of resource.

    ", + "DescribeImageAttributeRequest$ImageId": "

    The ID of the AMI.

    ", + "DescribeImportImageTasksRequest$NextToken": "

    A token that indicates the next page of results.

    ", + "DescribeImportImageTasksResult$NextToken": "

    The token to use to get the next page of results. This value is null when there are no more results to return.

    ", + "DescribeImportSnapshotTasksRequest$NextToken": "

    A token that indicates the next page of results.

    ", + "DescribeImportSnapshotTasksResult$NextToken": "

    The token to use to get the next page of results. This value is null when there are no more results to return.

    ", + "DescribeInstanceAttributeRequest$InstanceId": "

    The ID of the instance.

    ", + "DescribeInstanceStatusRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeInstanceStatusResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeInstancesRequest$NextToken": "

    The token to request the next page of results.

    ", + "DescribeInstancesResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeMovingAddressesRequest$NextToken": "

    The token to use to retrieve the next page of results.

    ", + "DescribeMovingAddressesResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeNatGatewaysRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeNatGatewaysResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeNetworkInterfaceAttributeRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "DescribeNetworkInterfaceAttributeResult$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "DescribePrefixListsRequest$NextToken": "

    The token for the next set of items to return. (You received this token from a prior call.)

    ", + "DescribePrefixListsResult$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "DescribeReservedInstancesListingsRequest$ReservedInstancesId": "

    One or more Reserved Instance IDs.

    ", + "DescribeReservedInstancesListingsRequest$ReservedInstancesListingId": "

    One or more Reserved Instance listing IDs.

    ", + "DescribeReservedInstancesModificationsRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeReservedInstancesModificationsResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeReservedInstancesOfferingsRequest$AvailabilityZone": "

    The Availability Zone in which the Reserved Instance can be used.

    ", + "DescribeReservedInstancesOfferingsRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeReservedInstancesOfferingsResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeScheduledInstanceAvailabilityRequest$NextToken": "

    The token for the next set of results.

    ", + "DescribeScheduledInstanceAvailabilityResult$NextToken": "

    The token required to retrieve the next set of results. This value is null when there are no more results to return.

    ", + "DescribeScheduledInstancesRequest$NextToken": "

    The token for the next set of results.

    ", + "DescribeScheduledInstancesResult$NextToken": "

    The token required to retrieve the next set of results. This value is null when there are no more results to return.

    ", + "DescribeSnapshotAttributeRequest$SnapshotId": "

    The ID of the EBS snapshot.

    ", + "DescribeSnapshotAttributeResult$SnapshotId": "

    The ID of the EBS snapshot.

    ", + "DescribeSnapshotsRequest$NextToken": "

    The NextToken value returned from a previous paginated DescribeSnapshots request where MaxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the NextToken value. This value is null when there are no more results to return.

    ", + "DescribeSnapshotsResult$NextToken": "

    The NextToken value to include in a future DescribeSnapshots request. When the results of a DescribeSnapshots request exceed MaxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeSpotFleetInstancesRequest$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "DescribeSpotFleetInstancesRequest$NextToken": "

    The token for the next set of results.

    ", + "DescribeSpotFleetInstancesResponse$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "DescribeSpotFleetInstancesResponse$NextToken": "

    The token required to retrieve the next set of results. This value is null when there are no more results to return.

    ", + "DescribeSpotFleetRequestHistoryRequest$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "DescribeSpotFleetRequestHistoryRequest$NextToken": "

    The token for the next set of results.

    ", + "DescribeSpotFleetRequestHistoryResponse$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "DescribeSpotFleetRequestHistoryResponse$NextToken": "

    The token required to retrieve the next set of results. This value is null when there are no more results to return.

    ", + "DescribeSpotFleetRequestsRequest$NextToken": "

    The token for the next set of results.

    ", + "DescribeSpotFleetRequestsResponse$NextToken": "

    The token required to retrieve the next set of results. This value is null when there are no more results to return.

    ", + "DescribeSpotPriceHistoryRequest$AvailabilityZone": "

    Filters the results by the specified Availability Zone.

    ", + "DescribeSpotPriceHistoryRequest$NextToken": "

    The token for the next set of results.

    ", + "DescribeSpotPriceHistoryResult$NextToken": "

    The token required to retrieve the next set of results. This value is null when there are no more results to return.

    ", + "DescribeTagsRequest$NextToken": "

    The token to retrieve the next page of results.

    ", + "DescribeTagsResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return..

    ", + "DescribeVolumeAttributeRequest$VolumeId": "

    The ID of the volume.

    ", + "DescribeVolumeAttributeResult$VolumeId": "

    The ID of the volume.

    ", + "DescribeVolumeStatusRequest$NextToken": "

    The NextToken value to include in a future DescribeVolumeStatus request. When the results of the request exceed MaxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeVolumeStatusResult$NextToken": "

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeVolumesRequest$NextToken": "

    The NextToken value returned from a previous paginated DescribeVolumes request where MaxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the NextToken value. This value is null when there are no more results to return.

    ", + "DescribeVolumesResult$NextToken": "

    The NextToken value to include in a future DescribeVolumes request. When the results of a DescribeVolumes request exceed MaxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "DescribeVpcAttributeRequest$VpcId": "

    The ID of the VPC.

    ", + "DescribeVpcAttributeResult$VpcId": "

    The ID of the VPC.

    ", + "DescribeVpcEndpointServicesRequest$NextToken": "

    The token for the next set of items to return. (You received this token from a prior call.)

    ", + "DescribeVpcEndpointServicesResult$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "DescribeVpcEndpointsRequest$NextToken": "

    The token for the next set of items to return. (You received this token from a prior call.)

    ", + "DescribeVpcEndpointsResult$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "DetachClassicLinkVpcRequest$InstanceId": "

    The ID of the instance to unlink from the VPC.

    ", + "DetachClassicLinkVpcRequest$VpcId": "

    The ID of the VPC to which the instance is linked.

    ", + "DetachInternetGatewayRequest$InternetGatewayId": "

    The ID of the Internet gateway.

    ", + "DetachInternetGatewayRequest$VpcId": "

    The ID of the VPC.

    ", + "DetachNetworkInterfaceRequest$AttachmentId": "

    The ID of the attachment.

    ", + "DetachVolumeRequest$VolumeId": "

    The ID of the volume.

    ", + "DetachVolumeRequest$InstanceId": "

    The ID of the instance.

    ", + "DetachVolumeRequest$Device": "

    The device name.

    ", + "DetachVpnGatewayRequest$VpnGatewayId": "

    The ID of the virtual private gateway.

    ", + "DetachVpnGatewayRequest$VpcId": "

    The ID of the VPC.

    ", + "DhcpConfiguration$Key": "

    The name of a DHCP option.

    ", + "DhcpOptions$DhcpOptionsId": "

    The ID of the set of DHCP options.

    ", + "DhcpOptionsIdStringList$member": null, + "DisableVgwRoutePropagationRequest$RouteTableId": "

    The ID of the route table.

    ", + "DisableVgwRoutePropagationRequest$GatewayId": "

    The ID of the virtual private gateway.

    ", + "DisableVpcClassicLinkDnsSupportRequest$VpcId": "

    The ID of the VPC.

    ", + "DisableVpcClassicLinkRequest$VpcId": "

    The ID of the VPC.

    ", + "DisassociateAddressRequest$PublicIp": "

    [EC2-Classic] The Elastic IP address. Required for EC2-Classic.

    ", + "DisassociateAddressRequest$AssociationId": "

    [EC2-VPC] The association ID. Required for EC2-VPC.

    ", + "DisassociateRouteTableRequest$AssociationId": "

    The association ID representing the current association between the route table and subnet.

    ", + "DiskImage$Description": "

    A description of the disk image.

    ", + "DiskImageDescription$ImportManifestUrl": "

    A presigned URL for the import manifest stored in Amazon S3. For information about creating a presigned URL for an Amazon S3 object, read the \"Query String Request Authentication Alternative\" section of the Authenticating REST Requests topic in the Amazon Simple Storage Service Developer Guide.

    ", + "DiskImageDescription$Checksum": "

    The checksum computed for the disk image.

    ", + "DiskImageDetail$ImportManifestUrl": "

    A presigned URL for the import manifest stored in Amazon S3 and presented here as an Amazon S3 presigned URL. For information about creating a presigned URL for an Amazon S3 object, read the \"Query String Request Authentication Alternative\" section of the Authenticating REST Requests topic in the Amazon Simple Storage Service Developer Guide.

    ", + "DiskImageVolumeDescription$Id": "

    The volume identifier.

    ", + "EbsBlockDevice$SnapshotId": "

    The ID of the snapshot.

    ", + "EbsInstanceBlockDevice$VolumeId": "

    The ID of the EBS volume.

    ", + "EbsInstanceBlockDeviceSpecification$VolumeId": "

    The ID of the EBS volume.

    ", + "EnableVgwRoutePropagationRequest$RouteTableId": "

    The ID of the route table.

    ", + "EnableVgwRoutePropagationRequest$GatewayId": "

    The ID of the virtual private gateway.

    ", + "EnableVolumeIORequest$VolumeId": "

    The ID of the volume.

    ", + "EnableVpcClassicLinkDnsSupportRequest$VpcId": "

    The ID of the VPC.

    ", + "EnableVpcClassicLinkRequest$VpcId": "

    The ID of the VPC.

    ", + "EventInformation$InstanceId": "

    The ID of the instance. This information is available only for instanceChange events.

    ", + "EventInformation$EventSubType": "

    The event.

    The following are the error events.

    • iamFleetRoleInvalid - The Spot fleet did not have the required permissions either to launch or terminate an instance.

    • launchSpecTemporarilyBlacklisted - The configuration is not valid and several attempts to launch instances have failed. For more information, see the description of the event.

    • spotFleetRequestConfigurationInvalid - The configuration is not valid. For more information, see the description of the event.

    • spotInstanceCountLimitExceeded - You've reached the limit on the number of Spot instances that you can launch.

    The following are the fleetRequestChange events.

    • active - The Spot fleet has been validated and Amazon EC2 is attempting to maintain the target number of running Spot instances.

    • cancelled - The Spot fleet is canceled and has no running Spot instances. The Spot fleet will be deleted two days after its instances were terminated.

    • cancelled_running - The Spot fleet is canceled and will not launch additional Spot instances, but its existing Spot instances continue to run until they are interrupted or terminated.

    • cancelled_terminating - The Spot fleet is canceled and its Spot instances are terminating.

    • expired - The Spot fleet request has expired. A subsequent event indicates that the instances were terminated, if the request was created with TerminateInstancesWithExpiration set.

    • modify_in_progress - A request to modify the Spot fleet request was accepted and is in progress.

    • modify_successful - The Spot fleet request was modified.

    • price_update - The bid price for a launch configuration was adjusted because it was too high. This change is permanent.

    • submitted - The Spot fleet request is being evaluated and Amazon EC2 is preparing to launch the target number of Spot instances.

    The following are the instanceChange events.

    • launched - A bid was fulfilled and a new instance was launched.

    • terminated - An instance was terminated by the user.

    ", + "EventInformation$EventDescription": "

    The description of the event.

    ", + "ExecutableByStringList$member": null, + "ExportTask$ExportTaskId": "

    The ID of the export task.

    ", + "ExportTask$Description": "

    A description of the resource being exported.

    ", + "ExportTask$StatusMessage": "

    The status message related to the export task.

    ", + "ExportTaskIdStringList$member": null, + "ExportToS3Task$S3Bucket": "

    The S3 bucket for the destination image. The destination bucket must exist and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com.

    ", + "ExportToS3Task$S3Key": "

    The encryption key for your S3 bucket.

    ", + "ExportToS3TaskSpecification$S3Bucket": "

    The S3 bucket for the destination image. The destination bucket must exist and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com.

    ", + "ExportToS3TaskSpecification$S3Prefix": "

    The image is written to a single object in the S3 bucket at the S3 key s3prefix + exportTaskId + '.' + diskImageFormat.

    ", + "Filter$Name": "

    The name of the filter. Filter names are case-sensitive.

    ", + "FlowLog$FlowLogId": "

    The flow log ID.

    ", + "FlowLog$FlowLogStatus": "

    The status of the flow log (ACTIVE).

    ", + "FlowLog$ResourceId": "

    The ID of the resource on which the flow log was created.

    ", + "FlowLog$LogGroupName": "

    The name of the flow log group.

    ", + "FlowLog$DeliverLogsStatus": "

    The status of the logs delivery (SUCCESS | FAILED).

    ", + "FlowLog$DeliverLogsErrorMessage": "

    Information about the error that occurred. Rate limited indicates that CloudWatch logs throttling has been applied for one or more network interfaces, or that you've reached the limit on the number of CloudWatch Logs log groups that you can create. Access error indicates that the IAM role associated with the flow log does not have sufficient permissions to publish to CloudWatch Logs. Unknown error indicates an internal error.

    ", + "FlowLog$DeliverLogsPermissionArn": "

    The ARN of the IAM role that posts logs to CloudWatch Logs.

    ", + "GetConsoleOutputRequest$InstanceId": "

    The ID of the instance.

    ", + "GetConsoleOutputResult$InstanceId": "

    The ID of the instance.

    ", + "GetConsoleOutputResult$Output": "

    The console output, Base64 encoded. If using a command line tool, the tools decode the output for you.

    ", + "GetPasswordDataRequest$InstanceId": "

    The ID of the Windows instance.

    ", + "GetPasswordDataResult$InstanceId": "

    The ID of the Windows instance.

    ", + "GetPasswordDataResult$PasswordData": "

    The password of the instance.

    ", + "GroupIdStringList$member": null, + "GroupIdentifier$GroupName": "

    The name of the security group.

    ", + "GroupIdentifier$GroupId": "

    The ID of the security group.

    ", + "GroupNameStringList$member": null, + "Host$HostId": "

    The ID of the Dedicated host.

    ", + "Host$HostReservationId": "

    The reservation ID of the Dedicated host. This returns a null response if the Dedicated host doesn't have an associated reservation.

    ", + "Host$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

    ", + "Host$AvailabilityZone": "

    The Availability Zone of the Dedicated host.

    ", + "HostInstance$InstanceId": "

    the IDs of instances that are running on the Dedicated host.

    ", + "HostInstance$InstanceType": "

    The instance type size (e.g., m3.medium) of the running instance.

    ", + "HostProperties$InstanceType": "

    The instance type size that the Dedicated host supports (e.g., m3.medium).

    ", + "IamInstanceProfile$Arn": "

    The Amazon Resource Name (ARN) of the instance profile.

    ", + "IamInstanceProfile$Id": "

    The ID of the instance profile.

    ", + "IamInstanceProfileSpecification$Arn": "

    The Amazon Resource Name (ARN) of the instance profile.

    ", + "IamInstanceProfileSpecification$Name": "

    The name of the instance profile.

    ", + "IdFormat$Resource": "

    The type of resource.

    ", + "Image$ImageId": "

    The ID of the AMI.

    ", + "Image$ImageLocation": "

    The location of the AMI.

    ", + "Image$OwnerId": "

    The AWS account ID of the image owner.

    ", + "Image$CreationDate": "

    The date and time the image was created.

    ", + "Image$KernelId": "

    The kernel associated with the image, if any. Only applicable for machine images.

    ", + "Image$RamdiskId": "

    The RAM disk associated with the image, if any. Only applicable for machine images.

    ", + "Image$SriovNetSupport": "

    Specifies whether enhanced networking is enabled.

    ", + "Image$ImageOwnerAlias": "

    The AWS account alias (for example, amazon, self) or the AWS account ID of the AMI owner.

    ", + "Image$Name": "

    The name of the AMI that was provided during image creation.

    ", + "Image$Description": "

    The description of the AMI that was provided during image creation.

    ", + "Image$RootDeviceName": "

    The device name of the root device (for example, /dev/sda1 or /dev/xvda).

    ", + "ImageAttribute$ImageId": "

    The ID of the AMI.

    ", + "ImageDiskContainer$Description": "

    The description of the disk image.

    ", + "ImageDiskContainer$Format": "

    The format of the disk image being imported.

    Valid values: RAW | VHD | VMDK | OVA

    ", + "ImageDiskContainer$Url": "

    The URL to the Amazon S3-based disk image being imported. The URL can either be a https URL (https://..) or an Amazon S3 URL (s3://..)

    ", + "ImageDiskContainer$DeviceName": "

    The block device mapping for the disk.

    ", + "ImageDiskContainer$SnapshotId": "

    The ID of the EBS snapshot to be used for importing the snapshot.

    ", + "ImageIdStringList$member": null, + "ImportImageRequest$Description": "

    A description string for the import image task.

    ", + "ImportImageRequest$LicenseType": "

    The license type to be used for the Amazon Machine Image (AMI) after importing.

    Note: You may only use BYOL if you have existing licenses with rights to use these licenses in a third party cloud like AWS. For more information, see VM Import/Export Prerequisites in the Amazon Elastic Compute Cloud User Guide.

    Valid values: AWS | BYOL

    ", + "ImportImageRequest$Hypervisor": "

    The target hypervisor platform.

    Valid values: xen

    ", + "ImportImageRequest$Architecture": "

    The architecture of the virtual machine.

    Valid values: i386 | x86_64

    ", + "ImportImageRequest$Platform": "

    The operating system of the virtual machine.

    Valid values: Windows | Linux

    ", + "ImportImageRequest$ClientToken": "

    The token to enable idempotency for VM import requests.

    ", + "ImportImageRequest$RoleName": "

    The name of the role to use when not using the default role, 'vmimport'.

    ", + "ImportImageResult$ImportTaskId": "

    The task ID of the import image task.

    ", + "ImportImageResult$Architecture": "

    The architecture of the virtual machine.

    ", + "ImportImageResult$LicenseType": "

    The license type of the virtual machine.

    ", + "ImportImageResult$Platform": "

    The operating system of the virtual machine.

    ", + "ImportImageResult$Hypervisor": "

    The target hypervisor of the import task.

    ", + "ImportImageResult$Description": "

    A description of the import task.

    ", + "ImportImageResult$ImageId": "

    The ID of the Amazon Machine Image (AMI) created by the import task.

    ", + "ImportImageResult$Progress": "

    The progress of the task.

    ", + "ImportImageResult$StatusMessage": "

    A detailed status message of the import task.

    ", + "ImportImageResult$Status": "

    A brief status of the task.

    ", + "ImportImageTask$ImportTaskId": "

    The ID of the import image task.

    ", + "ImportImageTask$Architecture": "

    The architecture of the virtual machine.

    Valid values: i386 | x86_64

    ", + "ImportImageTask$LicenseType": "

    The license type of the virtual machine.

    ", + "ImportImageTask$Platform": "

    The description string for the import image task.

    ", + "ImportImageTask$Hypervisor": "

    The target hypervisor for the import task.

    Valid values: xen

    ", + "ImportImageTask$Description": "

    A description of the import task.

    ", + "ImportImageTask$ImageId": "

    The ID of the Amazon Machine Image (AMI) of the imported virtual machine.

    ", + "ImportImageTask$Progress": "

    The percentage of progress of the import image task.

    ", + "ImportImageTask$StatusMessage": "

    A descriptive status message for the import image task.

    ", + "ImportImageTask$Status": "

    A brief status for the import image task.

    ", + "ImportInstanceLaunchSpecification$AdditionalInfo": "

    Reserved.

    ", + "ImportInstanceLaunchSpecification$SubnetId": "

    [EC2-VPC] The ID of the subnet in which to launch the instance.

    ", + "ImportInstanceLaunchSpecification$PrivateIpAddress": "

    [EC2-VPC] An available IP address from the IP address range of the subnet.

    ", + "ImportInstanceRequest$Description": "

    A description for the instance being imported.

    ", + "ImportInstanceTaskDetails$InstanceId": "

    The ID of the instance.

    ", + "ImportInstanceTaskDetails$Description": "

    A description of the task.

    ", + "ImportInstanceVolumeDetailItem$AvailabilityZone": "

    The Availability Zone where the resulting instance will reside.

    ", + "ImportInstanceVolumeDetailItem$Status": "

    The status of the import of this particular disk image.

    ", + "ImportInstanceVolumeDetailItem$StatusMessage": "

    The status information or errors related to the disk image.

    ", + "ImportInstanceVolumeDetailItem$Description": "

    A description of the task.

    ", + "ImportKeyPairRequest$KeyName": "

    A unique name for the key pair.

    ", + "ImportKeyPairResult$KeyName": "

    The key pair name you provided.

    ", + "ImportKeyPairResult$KeyFingerprint": "

    The MD5 public key fingerprint as specified in section 4 of RFC 4716.

    ", + "ImportSnapshotRequest$Description": "

    The description string for the import snapshot task.

    ", + "ImportSnapshotRequest$ClientToken": "

    Token to enable idempotency for VM import requests.

    ", + "ImportSnapshotRequest$RoleName": "

    The name of the role to use when not using the default role, 'vmimport'.

    ", + "ImportSnapshotResult$ImportTaskId": "

    The ID of the import snapshot task.

    ", + "ImportSnapshotResult$Description": "

    A description of the import snapshot task.

    ", + "ImportSnapshotTask$ImportTaskId": "

    The ID of the import snapshot task.

    ", + "ImportSnapshotTask$Description": "

    A description of the import snapshot task.

    ", + "ImportTaskIdList$member": null, + "ImportVolumeRequest$AvailabilityZone": "

    The Availability Zone for the resulting EBS volume.

    ", + "ImportVolumeRequest$Description": "

    A description of the volume.

    ", + "ImportVolumeTaskDetails$AvailabilityZone": "

    The Availability Zone where the resulting volume will reside.

    ", + "ImportVolumeTaskDetails$Description": "

    The description you provided when starting the import volume task.

    ", + "Instance$InstanceId": "

    The ID of the instance.

    ", + "Instance$ImageId": "

    The ID of the AMI used to launch the instance.

    ", + "Instance$PrivateDnsName": "

    The private DNS name assigned to the instance. This DNS name can only be used inside the Amazon EC2 network. This name is not available until the instance enters the running state. For EC2-VPC, this name is only available if you've enabled DNS hostnames for your VPC.

    ", + "Instance$PublicDnsName": "

    The public DNS name assigned to the instance. This name is not available until the instance enters the running state. For EC2-VPC, this name is only available if you've enabled DNS hostnames for your VPC.

    ", + "Instance$StateTransitionReason": "

    The reason for the most recent state transition. This might be an empty string.

    ", + "Instance$KeyName": "

    The name of the key pair, if this instance was launched with an associated key pair.

    ", + "Instance$KernelId": "

    The kernel associated with this instance, if applicable.

    ", + "Instance$RamdiskId": "

    The RAM disk associated with this instance, if applicable.

    ", + "Instance$SubnetId": "

    [EC2-VPC] The ID of the subnet in which the instance is running.

    ", + "Instance$VpcId": "

    [EC2-VPC] The ID of the VPC in which the instance is running.

    ", + "Instance$PrivateIpAddress": "

    The private IP address assigned to the instance.

    ", + "Instance$PublicIpAddress": "

    The public IP address assigned to the instance, if applicable.

    ", + "Instance$RootDeviceName": "

    The root device name (for example, /dev/sda1 or /dev/xvda).

    ", + "Instance$SpotInstanceRequestId": "

    If the request is a Spot instance request, the ID of the request.

    ", + "Instance$ClientToken": "

    The idempotency token you provided when you launched the instance, if applicable.

    ", + "Instance$SriovNetSupport": "

    Specifies whether enhanced networking is enabled.

    ", + "InstanceAttribute$InstanceId": "

    The ID of the instance.

    ", + "InstanceBlockDeviceMapping$DeviceName": "

    The device name exposed to the instance (for example, /dev/sdh or xvdh).

    ", + "InstanceBlockDeviceMappingSpecification$DeviceName": "

    The device name exposed to the instance (for example, /dev/sdh or xvdh).

    ", + "InstanceBlockDeviceMappingSpecification$VirtualName": "

    The virtual device name.

    ", + "InstanceBlockDeviceMappingSpecification$NoDevice": "

    suppress the specified device included in the block device mapping.

    ", + "InstanceCapacity$InstanceType": "

    The instance type size supported by the Dedicated host.

    ", + "InstanceExportDetails$InstanceId": "

    The ID of the resource being exported.

    ", + "InstanceIdSet$member": null, + "InstanceIdStringList$member": null, + "InstanceMonitoring$InstanceId": "

    The ID of the instance.

    ", + "InstanceNetworkInterface$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "InstanceNetworkInterface$SubnetId": "

    The ID of the subnet.

    ", + "InstanceNetworkInterface$VpcId": "

    The ID of the VPC.

    ", + "InstanceNetworkInterface$Description": "

    The description.

    ", + "InstanceNetworkInterface$OwnerId": "

    The ID of the AWS account that created the network interface.

    ", + "InstanceNetworkInterface$MacAddress": "

    The MAC address.

    ", + "InstanceNetworkInterface$PrivateIpAddress": "

    The IP address of the network interface within the subnet.

    ", + "InstanceNetworkInterface$PrivateDnsName": "

    The private DNS name.

    ", + "InstanceNetworkInterfaceAssociation$PublicIp": "

    The public IP address or Elastic IP address bound to the network interface.

    ", + "InstanceNetworkInterfaceAssociation$PublicDnsName": "

    The public DNS name.

    ", + "InstanceNetworkInterfaceAssociation$IpOwnerId": "

    The ID of the owner of the Elastic IP address.

    ", + "InstanceNetworkInterfaceAttachment$AttachmentId": "

    The ID of the network interface attachment.

    ", + "InstanceNetworkInterfaceSpecification$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "InstanceNetworkInterfaceSpecification$SubnetId": "

    The ID of the subnet associated with the network string. Applies only if creating a network interface when launching an instance.

    ", + "InstanceNetworkInterfaceSpecification$Description": "

    The description of the network interface. Applies only if creating a network interface when launching an instance.

    ", + "InstanceNetworkInterfaceSpecification$PrivateIpAddress": "

    The private IP address of the network interface. Applies only if creating a network interface when launching an instance.

    ", + "InstancePrivateIpAddress$PrivateIpAddress": "

    The private IP address of the network interface.

    ", + "InstancePrivateIpAddress$PrivateDnsName": "

    The private DNS name.

    ", + "InstanceStateChange$InstanceId": "

    The ID of the instance.

    ", + "InstanceStatus$InstanceId": "

    The ID of the instance.

    ", + "InstanceStatus$AvailabilityZone": "

    The Availability Zone of the instance.

    ", + "InstanceStatusEvent$Description": "

    A description of the event.

    After a scheduled event is completed, it can still be described for up to a week. If the event has been completed, this description starts with the following text: [Completed].

    ", + "InternetGateway$InternetGatewayId": "

    The ID of the Internet gateway.

    ", + "InternetGatewayAttachment$VpcId": "

    The ID of the VPC.

    ", + "IpPermission$IpProtocol": "

    The IP protocol name (for tcp, udp, and icmp) or number (see Protocol Numbers).

    [EC2-VPC only] When you authorize or revoke security group rules, you can use -1 to specify all.

    ", + "IpRange$CidrIp": "

    The CIDR range. You can either specify a CIDR range or a source security group, not both.

    ", + "KeyNameStringList$member": null, + "KeyPair$KeyName": "

    The name of the key pair.

    ", + "KeyPair$KeyFingerprint": "

    The SHA-1 digest of the DER encoded private key.

    ", + "KeyPair$KeyMaterial": "

    An unencrypted PEM encoded RSA private key.

    ", + "KeyPairInfo$KeyName": "

    The name of the key pair.

    ", + "KeyPairInfo$KeyFingerprint": "

    If you used CreateKeyPair to create the key pair, this is the SHA-1 digest of the DER encoded private key. If you used ImportKeyPair to provide AWS the public key, this is the MD5 public key fingerprint as specified in section 4 of RFC4716.

    ", + "LaunchPermission$UserId": "

    The AWS account ID.

    ", + "LaunchSpecification$ImageId": "

    The ID of the AMI.

    ", + "LaunchSpecification$KeyName": "

    The name of the key pair.

    ", + "LaunchSpecification$UserData": "

    The Base64-encoded MIME user data to make available to the instances.

    ", + "LaunchSpecification$AddressingType": "

    Deprecated.

    ", + "LaunchSpecification$KernelId": "

    The ID of the kernel.

    ", + "LaunchSpecification$RamdiskId": "

    The ID of the RAM disk.

    ", + "LaunchSpecification$SubnetId": "

    The ID of the subnet in which to launch the instance.

    ", + "ModifyIdFormatRequest$Resource": "

    The type of resource.

    ", + "ModifyImageAttributeRequest$ImageId": "

    The ID of the AMI.

    ", + "ModifyImageAttributeRequest$Attribute": "

    The name of the attribute to modify.

    ", + "ModifyImageAttributeRequest$Value": "

    The value of the attribute being modified. This is only valid when modifying the description attribute.

    ", + "ModifyInstanceAttributeRequest$InstanceId": "

    The ID of the instance.

    ", + "ModifyInstanceAttributeRequest$Value": "

    A new value for the attribute. Use only with the kernel, ramdisk, userData, disableApiTermination, or instanceInitiatedShutdownBehavior attribute.

    ", + "ModifyInstancePlacementRequest$InstanceId": "

    The ID of the instance that you are modifying.

    ", + "ModifyInstancePlacementRequest$HostId": "

    The ID of the Dedicated host that the instance will have affinity with.

    ", + "ModifyNetworkInterfaceAttributeRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "ModifyReservedInstancesRequest$ClientToken": "

    A unique, case-sensitive token you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

    ", + "ModifyReservedInstancesResult$ReservedInstancesModificationId": "

    The ID for the modification.

    ", + "ModifySnapshotAttributeRequest$SnapshotId": "

    The ID of the snapshot.

    ", + "ModifySpotFleetRequestRequest$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "ModifySubnetAttributeRequest$SubnetId": "

    The ID of the subnet.

    ", + "ModifyVolumeAttributeRequest$VolumeId": "

    The ID of the volume.

    ", + "ModifyVpcAttributeRequest$VpcId": "

    The ID of the VPC.

    ", + "ModifyVpcEndpointRequest$VpcEndpointId": "

    The ID of the endpoint.

    ", + "ModifyVpcEndpointRequest$PolicyDocument": "

    A policy document to attach to the endpoint. The policy must be in valid JSON format.

    ", + "MoveAddressToVpcRequest$PublicIp": "

    The Elastic IP address.

    ", + "MoveAddressToVpcResult$AllocationId": "

    The allocation ID for the Elastic IP address.

    ", + "MovingAddressStatus$PublicIp": "

    The Elastic IP address.

    ", + "NatGateway$VpcId": "

    The ID of the VPC in which the NAT gateway is located.

    ", + "NatGateway$SubnetId": "

    The ID of the subnet in which the NAT gateway is located.

    ", + "NatGateway$NatGatewayId": "

    The ID of the NAT gateway.

    ", + "NatGateway$FailureCode": "

    If the NAT gateway could not be created, specifies the error code for the failure. (InsufficientFreeAddressesInSubnet | Gateway.NotAttached | InvalidAllocationID.NotFound | Resource.AlreadyAssociated | InternalError)

    ", + "NatGateway$FailureMessage": "

    If the NAT gateway could not be created, specifies the error message for the failure, that corresponds to the error code.

    • For InsufficientFreeAddressesInSubnet: Subnet has insufficient free addresses to create this NAT gateway
    • For Gateway.NotAttached: Network vpc-xxxxxxxx has no Internet gateway attached
    • For InvalidAllocationID.NotFound: Elastic IP address eipalloc-xxxxxxxx could not be associated with this NAT gateway
    • For Resource.AlreadyAssociated: Elastic IP address eipalloc-xxxxxxxx is already associated
    • For InternalError: Network interface eni-xxxxxxxx, created and used internally by this NAT gateway is in an invalid state. Please try again.
    ", + "NatGatewayAddress$PublicIp": "

    The Elastic IP address associated with the NAT gateway.

    ", + "NatGatewayAddress$AllocationId": "

    The allocation ID of the Elastic IP address that's associated with the NAT gateway.

    ", + "NatGatewayAddress$PrivateIp": "

    The private IP address associated with the Elastic IP address.

    ", + "NatGatewayAddress$NetworkInterfaceId": "

    The ID of the network interface associated with the NAT gateway.

    ", + "NetworkAcl$NetworkAclId": "

    The ID of the network ACL.

    ", + "NetworkAcl$VpcId": "

    The ID of the VPC for the network ACL.

    ", + "NetworkAclAssociation$NetworkAclAssociationId": "

    The ID of the association between a network ACL and a subnet.

    ", + "NetworkAclAssociation$NetworkAclId": "

    The ID of the network ACL.

    ", + "NetworkAclAssociation$SubnetId": "

    The ID of the subnet.

    ", + "NetworkAclEntry$Protocol": "

    The protocol. A value of -1 means all protocols.

    ", + "NetworkAclEntry$CidrBlock": "

    The network range to allow or deny, in CIDR notation.

    ", + "NetworkInterface$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "NetworkInterface$SubnetId": "

    The ID of the subnet.

    ", + "NetworkInterface$VpcId": "

    The ID of the VPC.

    ", + "NetworkInterface$AvailabilityZone": "

    The Availability Zone.

    ", + "NetworkInterface$Description": "

    A description.

    ", + "NetworkInterface$OwnerId": "

    The AWS account ID of the owner of the network interface.

    ", + "NetworkInterface$RequesterId": "

    The ID of the entity that launched the instance on your behalf (for example, AWS Management Console or Auto Scaling).

    ", + "NetworkInterface$MacAddress": "

    The MAC address.

    ", + "NetworkInterface$PrivateIpAddress": "

    The IP address of the network interface within the subnet.

    ", + "NetworkInterface$PrivateDnsName": "

    The private DNS name.

    ", + "NetworkInterfaceAssociation$PublicIp": "

    The address of the Elastic IP address bound to the network interface.

    ", + "NetworkInterfaceAssociation$PublicDnsName": "

    The public DNS name.

    ", + "NetworkInterfaceAssociation$IpOwnerId": "

    The ID of the Elastic IP address owner.

    ", + "NetworkInterfaceAssociation$AllocationId": "

    The allocation ID.

    ", + "NetworkInterfaceAssociation$AssociationId": "

    The association ID.

    ", + "NetworkInterfaceAttachment$AttachmentId": "

    The ID of the network interface attachment.

    ", + "NetworkInterfaceAttachment$InstanceId": "

    The ID of the instance.

    ", + "NetworkInterfaceAttachment$InstanceOwnerId": "

    The AWS account ID of the owner of the instance.

    ", + "NetworkInterfaceAttachmentChanges$AttachmentId": "

    The ID of the network interface attachment.

    ", + "NetworkInterfaceIdList$member": null, + "NetworkInterfacePrivateIpAddress$PrivateIpAddress": "

    The private IP address.

    ", + "NetworkInterfacePrivateIpAddress$PrivateDnsName": "

    The private DNS name.

    ", + "NewDhcpConfiguration$Key": null, + "OwnerStringList$member": null, + "Placement$AvailabilityZone": "

    The Availability Zone of the instance.

    ", + "Placement$GroupName": "

    The name of the placement group the instance is in (for cluster compute instances).

    ", + "Placement$HostId": "

    The ID of the Dedicted host on which the instance resides. This parameter is not support for the ImportInstance command.

    ", + "Placement$Affinity": "

    The affinity setting for the instance on the Dedicated host. This parameter is not supported for the ImportInstance command.

    ", + "PlacementGroup$GroupName": "

    The name of the placement group.

    ", + "PlacementGroupStringList$member": null, + "PrefixList$PrefixListId": "

    The ID of the prefix.

    ", + "PrefixList$PrefixListName": "

    The name of the prefix.

    ", + "PrefixListId$PrefixListId": "

    The ID of the prefix.

    ", + "PrivateIpAddressSpecification$PrivateIpAddress": "

    The private IP addresses.

    ", + "PrivateIpAddressStringList$member": null, + "ProductCode$ProductCodeId": "

    The product code.

    ", + "ProductCodeStringList$member": null, + "ProductDescriptionList$member": null, + "PropagatingVgw$GatewayId": "

    The ID of the virtual private gateway (VGW).

    ", + "PublicIpStringList$member": null, + "PurchaseRequest$PurchaseToken": "

    The purchase token.

    ", + "PurchaseReservedInstancesOfferingRequest$ReservedInstancesOfferingId": "

    The ID of the Reserved Instance offering to purchase.

    ", + "PurchaseReservedInstancesOfferingResult$ReservedInstancesId": "

    The IDs of the purchased Reserved Instances.

    ", + "PurchaseScheduledInstancesRequest$ClientToken": "

    Unique, case-sensitive identifier that ensures the idempotency of the request. For more information, see Ensuring Idempotency.

    ", + "Region$RegionName": "

    The name of the region.

    ", + "Region$Endpoint": "

    The region service endpoint.

    ", + "RegionNameStringList$member": null, + "RegisterImageRequest$ImageLocation": "

    The full path to your AMI manifest in Amazon S3 storage.

    ", + "RegisterImageRequest$Name": "

    A name for your AMI.

    Constraints: 3-128 alphanumeric characters, parentheses (()), square brackets ([]), spaces ( ), periods (.), slashes (/), dashes (-), single quotes ('), at-signs (@), or underscores(_)

    ", + "RegisterImageRequest$Description": "

    A description for your AMI.

    ", + "RegisterImageRequest$KernelId": "

    The ID of the kernel.

    ", + "RegisterImageRequest$RamdiskId": "

    The ID of the RAM disk.

    ", + "RegisterImageRequest$RootDeviceName": "

    The name of the root device (for example, /dev/sda1, or /dev/xvda).

    ", + "RegisterImageRequest$VirtualizationType": "

    The type of virtualization.

    Default: paravirtual

    ", + "RegisterImageRequest$SriovNetSupport": "

    Set to simple to enable enhanced networking for the AMI and any instances that you launch from the AMI.

    There is no way to disable enhanced networking at this time.

    This option is supported only for HVM AMIs. Specifying this option with a PV AMI can make instances launched from the AMI unreachable.

    ", + "RegisterImageResult$ImageId": "

    The ID of the newly registered AMI.

    ", + "RejectVpcPeeringConnectionRequest$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "ReleaseAddressRequest$PublicIp": "

    [EC2-Classic] The Elastic IP address. Required for EC2-Classic.

    ", + "ReleaseAddressRequest$AllocationId": "

    [EC2-VPC] The allocation ID. Required for EC2-VPC.

    ", + "ReplaceNetworkAclAssociationRequest$AssociationId": "

    The ID of the current association between the original network ACL and the subnet.

    ", + "ReplaceNetworkAclAssociationRequest$NetworkAclId": "

    The ID of the new network ACL to associate with the subnet.

    ", + "ReplaceNetworkAclAssociationResult$NewAssociationId": "

    The ID of the new association.

    ", + "ReplaceNetworkAclEntryRequest$NetworkAclId": "

    The ID of the ACL.

    ", + "ReplaceNetworkAclEntryRequest$Protocol": "

    The IP protocol. You can specify all or -1 to mean all protocols.

    ", + "ReplaceNetworkAclEntryRequest$CidrBlock": "

    The network range to allow or deny, in CIDR notation.

    ", + "ReplaceRouteRequest$RouteTableId": "

    The ID of the route table.

    ", + "ReplaceRouteRequest$DestinationCidrBlock": "

    The CIDR address block used for the destination match. The value you provide must match the CIDR of an existing route in the table.

    ", + "ReplaceRouteRequest$GatewayId": "

    The ID of an Internet gateway or virtual private gateway.

    ", + "ReplaceRouteRequest$InstanceId": "

    The ID of a NAT instance in your VPC.

    ", + "ReplaceRouteRequest$NetworkInterfaceId": "

    The ID of a network interface.

    ", + "ReplaceRouteRequest$VpcPeeringConnectionId": "

    The ID of a VPC peering connection.

    ", + "ReplaceRouteRequest$NatGatewayId": "

    The ID of a NAT gateway.

    ", + "ReplaceRouteTableAssociationRequest$AssociationId": "

    The association ID.

    ", + "ReplaceRouteTableAssociationRequest$RouteTableId": "

    The ID of the new route table to associate with the subnet.

    ", + "ReplaceRouteTableAssociationResult$NewAssociationId": "

    The ID of the new association.

    ", + "ReportInstanceStatusRequest$Description": "

    Descriptive text about the health state of your instance.

    ", + "RequestHostIdList$member": null, + "RequestSpotFleetResponse$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "RequestSpotInstancesRequest$SpotPrice": "

    The maximum hourly price (bid) for any Spot instance launched to fulfill the request.

    ", + "RequestSpotInstancesRequest$ClientToken": "

    Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

    ", + "RequestSpotInstancesRequest$LaunchGroup": "

    The instance launch group. Launch groups are Spot instances that launch together and terminate together.

    Default: Instances are launched and terminated individually

    ", + "RequestSpotInstancesRequest$AvailabilityZoneGroup": "

    The user-specified name for a logical grouping of bids.

    When you specify an Availability Zone group in a Spot Instance request, all Spot instances in the request are launched in the same Availability Zone. Instance proximity is maintained with this parameter, but the choice of Availability Zone is not. The group applies only to bids for Spot Instances of the same instance type. Any additional Spot instance requests that are specified with the same Availability Zone group name are launched in that same Availability Zone, as long as at least one instance from the group is still active.

    If there is no active instance running in the Availability Zone group that you specify for a new Spot instance request (all instances are terminated, the bid is expired, or the bid falls below current market), then Amazon EC2 launches the instance in any Availability Zone where the constraint can be met. Consequently, the subsequent set of Spot instances could be placed in a different zone from the original request, even if you specified the same Availability Zone group.

    Default: Instances are launched in any available Availability Zone.

    ", + "RequestSpotLaunchSpecification$ImageId": "

    The ID of the AMI.

    ", + "RequestSpotLaunchSpecification$KeyName": "

    The name of the key pair.

    ", + "RequestSpotLaunchSpecification$UserData": "

    The Base64-encoded MIME user data to make available to the instances.

    ", + "RequestSpotLaunchSpecification$AddressingType": "

    Deprecated.

    ", + "RequestSpotLaunchSpecification$KernelId": "

    The ID of the kernel.

    ", + "RequestSpotLaunchSpecification$RamdiskId": "

    The ID of the RAM disk.

    ", + "RequestSpotLaunchSpecification$SubnetId": "

    The ID of the subnet in which to launch the instance.

    ", + "Reservation$ReservationId": "

    The ID of the reservation.

    ", + "Reservation$OwnerId": "

    The ID of the AWS account that owns the reservation.

    ", + "Reservation$RequesterId": "

    The ID of the requester that launched the instances on your behalf (for example, AWS Management Console or Auto Scaling).

    ", + "ReservedInstances$ReservedInstancesId": "

    The ID of the Reserved Instance.

    ", + "ReservedInstances$AvailabilityZone": "

    The Availability Zone in which the Reserved Instance can be used.

    ", + "ReservedInstancesConfiguration$AvailabilityZone": "

    The Availability Zone for the modified Reserved Instances.

    ", + "ReservedInstancesConfiguration$Platform": "

    The network platform of the modified Reserved Instances, which is either EC2-Classic or EC2-VPC.

    ", + "ReservedInstancesId$ReservedInstancesId": "

    The ID of the Reserved Instance.

    ", + "ReservedInstancesIdStringList$member": null, + "ReservedInstancesListing$ReservedInstancesListingId": "

    The ID of the Reserved Instance listing.

    ", + "ReservedInstancesListing$ReservedInstancesId": "

    The ID of the Reserved Instance.

    ", + "ReservedInstancesListing$StatusMessage": "

    The reason for the current status of the Reserved Instance listing. The response can be blank.

    ", + "ReservedInstancesListing$ClientToken": "

    A unique, case-sensitive key supplied by the client to ensure that the request is idempotent. For more information, see Ensuring Idempotency.

    ", + "ReservedInstancesModification$ReservedInstancesModificationId": "

    A unique ID for the Reserved Instance modification.

    ", + "ReservedInstancesModification$Status": "

    The status of the Reserved Instances modification request.

    ", + "ReservedInstancesModification$StatusMessage": "

    The reason for the status.

    ", + "ReservedInstancesModification$ClientToken": "

    A unique, case-sensitive key supplied by the client to ensure that the request is idempotent. For more information, see Ensuring Idempotency.

    ", + "ReservedInstancesModificationIdStringList$member": null, + "ReservedInstancesModificationResult$ReservedInstancesId": "

    The ID for the Reserved Instances that were created as part of the modification request. This field is only available when the modification is fulfilled.

    ", + "ReservedInstancesOffering$ReservedInstancesOfferingId": "

    The ID of the Reserved Instance offering.

    ", + "ReservedInstancesOffering$AvailabilityZone": "

    The Availability Zone in which the Reserved Instance can be used.

    ", + "ReservedInstancesOfferingIdStringList$member": null, + "ResetImageAttributeRequest$ImageId": "

    The ID of the AMI.

    ", + "ResetInstanceAttributeRequest$InstanceId": "

    The ID of the instance.

    ", + "ResetNetworkInterfaceAttributeRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "ResetNetworkInterfaceAttributeRequest$SourceDestCheck": "

    The source/destination checking attribute. Resets the value to true.

    ", + "ResetSnapshotAttributeRequest$SnapshotId": "

    The ID of the snapshot.

    ", + "ResourceIdList$member": null, + "ResponseHostIdList$member": null, + "RestorableByStringList$member": null, + "RestoreAddressToClassicRequest$PublicIp": "

    The Elastic IP address.

    ", + "RestoreAddressToClassicResult$PublicIp": "

    The Elastic IP address.

    ", + "RevokeSecurityGroupEgressRequest$GroupId": "

    The ID of the security group.

    ", + "RevokeSecurityGroupEgressRequest$SourceSecurityGroupName": "

    The name of a destination security group. To revoke outbound access to a destination security group, we recommend that you use a set of IP permissions instead.

    ", + "RevokeSecurityGroupEgressRequest$SourceSecurityGroupOwnerId": "

    The AWS account number for a destination security group. To revoke outbound access to a destination security group, we recommend that you use a set of IP permissions instead.

    ", + "RevokeSecurityGroupEgressRequest$IpProtocol": "

    The IP protocol name or number. We recommend that you specify the protocol in a set of IP permissions instead.

    ", + "RevokeSecurityGroupEgressRequest$CidrIp": "

    The CIDR IP address range. We recommend that you specify the CIDR range in a set of IP permissions instead.

    ", + "RevokeSecurityGroupIngressRequest$GroupName": "

    [EC2-Classic, default VPC] The name of the security group.

    ", + "RevokeSecurityGroupIngressRequest$GroupId": "

    The ID of the security group. Required for a security group in a nondefault VPC.

    ", + "RevokeSecurityGroupIngressRequest$SourceSecurityGroupName": "

    [EC2-Classic, default VPC] The name of the source security group. You can't specify this parameter in combination with the following parameters: the CIDR IP address range, the start of the port range, the IP protocol, and the end of the port range. For EC2-VPC, the source security group must be in the same VPC.

    ", + "RevokeSecurityGroupIngressRequest$SourceSecurityGroupOwnerId": "

    [EC2-Classic, default VPC] The AWS account ID of the source security group. For EC2-VPC, the source security group must be in the same VPC. You can't specify this parameter in combination with the following parameters: the CIDR IP address range, the IP protocol, the start of the port range, and the end of the port range. To revoke a specific rule for an IP protocol and port range, use a set of IP permissions instead.

    ", + "RevokeSecurityGroupIngressRequest$IpProtocol": "

    The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers). Use -1 to specify all.

    ", + "RevokeSecurityGroupIngressRequest$CidrIp": "

    The CIDR IP address range. You can't specify this parameter when specifying a source security group.

    ", + "Route$DestinationCidrBlock": "

    The CIDR block used for the destination match.

    ", + "Route$DestinationPrefixListId": "

    The prefix of the AWS service.

    ", + "Route$GatewayId": "

    The ID of a gateway attached to your VPC.

    ", + "Route$InstanceId": "

    The ID of a NAT instance in your VPC.

    ", + "Route$InstanceOwnerId": "

    The AWS account ID of the owner of the instance.

    ", + "Route$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "Route$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "Route$NatGatewayId": "

    The ID of a NAT gateway.

    ", + "RouteTable$RouteTableId": "

    The ID of the route table.

    ", + "RouteTable$VpcId": "

    The ID of the VPC.

    ", + "RouteTableAssociation$RouteTableAssociationId": "

    The ID of the association between a route table and a subnet.

    ", + "RouteTableAssociation$RouteTableId": "

    The ID of the route table.

    ", + "RouteTableAssociation$SubnetId": "

    The ID of the subnet. A subnet ID is not returned for an implicit association.

    ", + "RunInstancesRequest$ImageId": "

    The ID of the AMI, which you can get by calling DescribeImages.

    ", + "RunInstancesRequest$KeyName": "

    The name of the key pair. You can create a key pair using CreateKeyPair or ImportKeyPair.

    If you do not specify a key pair, you can't connect to the instance unless you choose an AMI that is configured to allow users another way to log in.

    ", + "RunInstancesRequest$UserData": "

    Data to configure the instance, or a script to run during instance launch. For more information, see Running Commands on Your Linux Instance at Launch (Linux) and Adding User Data (Windows). For API calls, the text must be base64-encoded. Command line tools perform encoding for you.

    ", + "RunInstancesRequest$KernelId": "

    The ID of the kernel.

    We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB in the Amazon Elastic Compute Cloud User Guide.

    ", + "RunInstancesRequest$RamdiskId": "

    The ID of the RAM disk.

    We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB in the Amazon Elastic Compute Cloud User Guide.

    ", + "RunInstancesRequest$SubnetId": "

    [EC2-VPC] The ID of the subnet to launch the instance into.

    ", + "RunInstancesRequest$PrivateIpAddress": "

    [EC2-VPC] The primary IP address. You must specify a value from the IP address range of the subnet.

    Only one private IP address can be designated as primary. Therefore, you can't specify this parameter if PrivateIpAddresses.n.Primary is set to true and PrivateIpAddresses.n.PrivateIpAddress is set to an IP address.

    Default: We select an IP address from the IP address range of the subnet.

    ", + "RunInstancesRequest$ClientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

    Constraints: Maximum 64 ASCII characters

    ", + "RunInstancesRequest$AdditionalInfo": "

    Reserved.

    ", + "RunScheduledInstancesRequest$ClientToken": "

    Unique, case-sensitive identifier that ensures the idempotency of the request. For more information, see Ensuring Idempotency.

    ", + "RunScheduledInstancesRequest$ScheduledInstanceId": "

    The Scheduled Instance ID.

    ", + "S3Storage$Bucket": "

    The bucket in which to store the AMI. You can specify a bucket that you already own or a new bucket that Amazon EC2 creates on your behalf. If you specify a bucket that belongs to someone else, Amazon EC2 returns an error.

    ", + "S3Storage$Prefix": "

    The beginning of the file name of the AMI.

    ", + "S3Storage$AWSAccessKeyId": "

    The access key ID of the owner of the bucket. Before you specify a value for your access key ID, review and follow the guidance in Best Practices for Managing AWS Access Keys.

    ", + "S3Storage$UploadPolicySignature": "

    The signature of the Base64 encoded JSON document.

    ", + "ScheduledInstance$ScheduledInstanceId": "

    The Scheduled Instance ID.

    ", + "ScheduledInstance$InstanceType": "

    The instance type.

    ", + "ScheduledInstance$Platform": "

    The platform (Linux/UNIX or Windows).

    ", + "ScheduledInstance$NetworkPlatform": "

    The network platform (EC2-Classic or EC2-VPC).

    ", + "ScheduledInstance$AvailabilityZone": "

    The Availability Zone.

    ", + "ScheduledInstance$HourlyPrice": "

    The hourly price for a single instance.

    ", + "ScheduledInstanceAvailability$InstanceType": "

    The instance type. You can specify one of the C3, C4, M4, or R3 instance types.

    ", + "ScheduledInstanceAvailability$Platform": "

    The platform (Linux/UNIX or Windows).

    ", + "ScheduledInstanceAvailability$NetworkPlatform": "

    The network platform (EC2-Classic or EC2-VPC).

    ", + "ScheduledInstanceAvailability$AvailabilityZone": "

    The Availability Zone.

    ", + "ScheduledInstanceAvailability$PurchaseToken": "

    The purchase token. This token expires in two hours.

    ", + "ScheduledInstanceAvailability$HourlyPrice": "

    The hourly price for a single instance.

    ", + "ScheduledInstanceIdRequestSet$member": null, + "ScheduledInstanceRecurrence$Frequency": "

    The frequency (Daily, Weekly, or Monthly).

    ", + "ScheduledInstanceRecurrence$OccurrenceUnit": "

    The unit for occurrenceDaySet (DayOfWeek or DayOfMonth).

    ", + "ScheduledInstanceRecurrenceRequest$Frequency": "

    The frequency (Daily, Weekly, or Monthly).

    ", + "ScheduledInstanceRecurrenceRequest$OccurrenceUnit": "

    The unit for OccurrenceDays (DayOfWeek or DayOfMonth). This value is required for a monthly schedule. You can't specify DayOfWeek with a weekly schedule. You can't specify this value with a daily schedule.

    ", + "ScheduledInstancesBlockDeviceMapping$DeviceName": "

    The device name exposed to the instance (for example, /dev/sdh or xvdh).

    ", + "ScheduledInstancesBlockDeviceMapping$NoDevice": "

    Suppresses the specified device included in the block device mapping of the AMI.

    ", + "ScheduledInstancesBlockDeviceMapping$VirtualName": "

    The virtual device name (ephemeralN). Instance store volumes are numbered starting from 0. An instance type with two available instance store volumes can specify mappings for ephemeral0 and ephemeral1.The number of available instance store volumes depends on the instance type. After you connect to the instance, you must mount the volume.

    Constraints: For M3 instances, you must specify instance store volumes in the block device mapping for the instance. When you launch an M3 instance, we ignore any instance store volumes specified in the block device mapping for the AMI.

    ", + "ScheduledInstancesEbs$SnapshotId": "

    The ID of the snapshot.

    ", + "ScheduledInstancesEbs$VolumeType": "

    The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic volumes.

    Default: standard

    ", + "ScheduledInstancesIamInstanceProfile$Arn": "

    The Amazon Resource Name (ARN).

    ", + "ScheduledInstancesIamInstanceProfile$Name": "

    The name.

    ", + "ScheduledInstancesLaunchSpecification$ImageId": "

    The ID of the Amazon Machine Image (AMI).

    ", + "ScheduledInstancesLaunchSpecification$KeyName": "

    The name of the key pair.

    ", + "ScheduledInstancesLaunchSpecification$UserData": "

    The base64-encoded MIME user data.

    ", + "ScheduledInstancesLaunchSpecification$KernelId": "

    The ID of the kernel.

    ", + "ScheduledInstancesLaunchSpecification$InstanceType": "

    The instance type.

    ", + "ScheduledInstancesLaunchSpecification$RamdiskId": "

    The ID of the RAM disk.

    ", + "ScheduledInstancesLaunchSpecification$SubnetId": "

    The ID of the subnet in which to launch the instances.

    ", + "ScheduledInstancesNetworkInterface$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "ScheduledInstancesNetworkInterface$SubnetId": "

    The ID of the subnet.

    ", + "ScheduledInstancesNetworkInterface$Description": "

    The description.

    ", + "ScheduledInstancesNetworkInterface$PrivateIpAddress": "

    The IP address of the network interface within the subnet.

    ", + "ScheduledInstancesPlacement$AvailabilityZone": "

    The Availability Zone.

    ", + "ScheduledInstancesPlacement$GroupName": "

    The name of the placement group.

    ", + "ScheduledInstancesPrivateIpAddressConfig$PrivateIpAddress": "

    The IP address.

    ", + "ScheduledInstancesSecurityGroupIdSet$member": null, + "SecurityGroup$OwnerId": "

    The AWS account ID of the owner of the security group.

    ", + "SecurityGroup$GroupName": "

    The name of the security group.

    ", + "SecurityGroup$GroupId": "

    The ID of the security group.

    ", + "SecurityGroup$Description": "

    A description of the security group.

    ", + "SecurityGroup$VpcId": "

    [EC2-VPC] The ID of the VPC for the security group.

    ", + "SecurityGroupIdStringList$member": null, + "SecurityGroupStringList$member": null, + "Snapshot$SnapshotId": "

    The ID of the snapshot. Each snapshot receives a unique identifier when it is created.

    ", + "Snapshot$VolumeId": "

    The ID of the volume that was used to create the snapshot.

    ", + "Snapshot$StateMessage": "

    Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the error occurred. This parameter is only returned by the DescribeSnapshots API operation.

    ", + "Snapshot$Progress": "

    The progress of the snapshot, as a percentage.

    ", + "Snapshot$OwnerId": "

    The AWS account ID of the EBS snapshot owner.

    ", + "Snapshot$Description": "

    The description for the snapshot.

    ", + "Snapshot$OwnerAlias": "

    The AWS account alias (for example, amazon, self) or AWS account ID that owns the snapshot.

    ", + "Snapshot$KmsKeyId": "

    The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to protect the volume encryption key for the parent volume.

    ", + "Snapshot$DataEncryptionKeyId": "

    The data encryption key identifier for the snapshot. This value is a unique identifier that corresponds to the data encryption key that was used to encrypt the original volume or snapshot copy. Because data encryption keys are inherited by volumes created from snapshots, and vice versa, if snapshots share the same data encryption key identifier, then they belong to the same volume/snapshot lineage. This parameter is only returned by the DescribeSnapshots API operation.

    ", + "SnapshotDetail$Description": "

    A description for the snapshot.

    ", + "SnapshotDetail$Format": "

    The format of the disk image from which the snapshot is created.

    ", + "SnapshotDetail$Url": "

    The URL used to access the disk image.

    ", + "SnapshotDetail$DeviceName": "

    The block device mapping for the snapshot.

    ", + "SnapshotDetail$SnapshotId": "

    The snapshot ID of the disk being imported.

    ", + "SnapshotDetail$Progress": "

    The percentage of progress for the task.

    ", + "SnapshotDetail$StatusMessage": "

    A detailed status message for the snapshot creation.

    ", + "SnapshotDetail$Status": "

    A brief status of the snapshot creation.

    ", + "SnapshotDiskContainer$Description": "

    The description of the disk image being imported.

    ", + "SnapshotDiskContainer$Format": "

    The format of the disk image being imported.

    Valid values: RAW | VHD | VMDK | OVA

    ", + "SnapshotDiskContainer$Url": "

    The URL to the Amazon S3-based disk image being imported. It can either be a https URL (https://..) or an Amazon S3 URL (s3://..).

    ", + "SnapshotIdStringList$member": null, + "SnapshotTaskDetail$Description": "

    The description of the snapshot.

    ", + "SnapshotTaskDetail$Format": "

    The format of the disk image from which the snapshot is created.

    ", + "SnapshotTaskDetail$Url": "

    The URL of the disk image from which the snapshot is created.

    ", + "SnapshotTaskDetail$SnapshotId": "

    The snapshot ID of the disk being imported.

    ", + "SnapshotTaskDetail$Progress": "

    The percentage of completion for the import snapshot task.

    ", + "SnapshotTaskDetail$StatusMessage": "

    A detailed status message for the import snapshot task.

    ", + "SnapshotTaskDetail$Status": "

    A brief status for the import snapshot task.

    ", + "SpotDatafeedSubscription$OwnerId": "

    The AWS account ID of the account.

    ", + "SpotDatafeedSubscription$Bucket": "

    The Amazon S3 bucket where the Spot instance data feed is located.

    ", + "SpotDatafeedSubscription$Prefix": "

    The prefix that is prepended to data feed files.

    ", + "SpotFleetLaunchSpecification$ImageId": "

    The ID of the AMI.

    ", + "SpotFleetLaunchSpecification$KeyName": "

    The name of the key pair.

    ", + "SpotFleetLaunchSpecification$UserData": "

    The Base64-encoded MIME user data to make available to the instances.

    ", + "SpotFleetLaunchSpecification$AddressingType": "

    Deprecated.

    ", + "SpotFleetLaunchSpecification$KernelId": "

    The ID of the kernel.

    ", + "SpotFleetLaunchSpecification$RamdiskId": "

    The ID of the RAM disk.

    ", + "SpotFleetLaunchSpecification$SubnetId": "

    The ID of the subnet in which to launch the instances. To specify multiple subnets, separate them using commas; for example, \"subnet-a61dafcf, subnet-65ea5f08\".

    ", + "SpotFleetLaunchSpecification$SpotPrice": "

    The bid price per unit hour for the specified instance type. If this value is not specified, the default is the Spot bid price specified for the fleet. To determine the bid price per unit hour, divide the Spot bid price by the value of WeightedCapacity.

    ", + "SpotFleetRequestConfig$SpotFleetRequestId": "

    The ID of the Spot fleet request.

    ", + "SpotFleetRequestConfigData$ClientToken": "

    A unique, case-sensitive identifier you provide to ensure idempotency of your listings. This helps avoid duplicate listings. For more information, see Ensuring Idempotency.

    ", + "SpotFleetRequestConfigData$SpotPrice": "

    The bid price per unit hour.

    ", + "SpotFleetRequestConfigData$IamFleetRole": "

    Grants the Spot fleet permission to terminate Spot instances on your behalf when you cancel its Spot fleet request using CancelSpotFleetRequests or when the Spot fleet request expires, if you set terminateInstancesWithExpiration.

    ", + "SpotInstanceRequest$SpotInstanceRequestId": "

    The ID of the Spot instance request.

    ", + "SpotInstanceRequest$SpotPrice": "

    The maximum hourly price (bid) for the Spot instance launched to fulfill the request.

    ", + "SpotInstanceRequest$LaunchGroup": "

    The instance launch group. Launch groups are Spot instances that launch together and terminate together.

    ", + "SpotInstanceRequest$AvailabilityZoneGroup": "

    The Availability Zone group. If you specify the same Availability Zone group for all Spot instance requests, all Spot instances are launched in the same Availability Zone.

    ", + "SpotInstanceRequest$InstanceId": "

    The instance ID, if an instance has been launched to fulfill the Spot instance request.

    ", + "SpotInstanceRequest$ActualBlockHourlyPrice": "

    If you specified a duration and your Spot instance request was fulfilled, this is the fixed hourly price in effect for the Spot instance while it runs.

    ", + "SpotInstanceRequest$LaunchedAvailabilityZone": "

    The Availability Zone in which the bid is launched.

    ", + "SpotInstanceRequestIdList$member": null, + "SpotInstanceStateFault$Code": "

    The reason code for the Spot instance state change.

    ", + "SpotInstanceStateFault$Message": "

    The message for the Spot instance state change.

    ", + "SpotInstanceStatus$Code": "

    The status code. For a list of status codes, see Spot Bid Status Codes in the Amazon Elastic Compute Cloud User Guide.

    ", + "SpotInstanceStatus$Message": "

    The description for the status code.

    ", + "SpotPlacement$AvailabilityZone": "

    The Availability Zones. To specify multiple Availability Zones, separate them using commas; for example, \"us-west-2a, us-west-2b\".

    ", + "SpotPlacement$GroupName": "

    The name of the placement group (for cluster instances).

    ", + "SpotPrice$SpotPrice": "

    The maximum price (bid) that you are willing to pay for a Spot instance.

    ", + "SpotPrice$AvailabilityZone": "

    The Availability Zone.

    ", + "StartInstancesRequest$AdditionalInfo": "

    Reserved.

    ", + "StateReason$Code": "

    The reason code for the state change.

    ", + "StateReason$Message": "

    The message for the state change.

    • Server.SpotInstanceTermination: A Spot instance was terminated due to an increase in the market price.

    • Server.InternalError: An internal error occurred during instance launch, resulting in termination.

    • Server.InsufficientInstanceCapacity: There was insufficient instance capacity to satisfy the launch request.

    • Client.InternalError: A client error caused the instance to terminate on launch.

    • Client.InstanceInitiatedShutdown: The instance was shut down using the shutdown -h command from the instance.

    • Client.UserInitiatedShutdown: The instance was shut down using the Amazon EC2 API.

    • Client.VolumeLimitExceeded: The limit on the number of EBS volumes or total storage was exceeded. Decrease usage or request an increase in your limits.

    • Client.InvalidSnapshot.NotFound: The specified snapshot was not found.

    ", + "Subnet$SubnetId": "

    The ID of the subnet.

    ", + "Subnet$VpcId": "

    The ID of the VPC the subnet is in.

    ", + "Subnet$CidrBlock": "

    The CIDR block assigned to the subnet.

    ", + "Subnet$AvailabilityZone": "

    The Availability Zone of the subnet.

    ", + "SubnetIdStringList$member": null, + "Tag$Key": "

    The key of the tag.

    Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with aws:

    ", + "Tag$Value": "

    The value of the tag.

    Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.

    ", + "TagDescription$ResourceId": "

    The ID of the resource. For example, ami-1a2b3c4d.

    ", + "TagDescription$Key": "

    The tag key.

    ", + "TagDescription$Value": "

    The tag value.

    ", + "UnassignPrivateIpAddressesRequest$NetworkInterfaceId": "

    The ID of the network interface.

    ", + "UnsuccessfulItem$ResourceId": "

    The ID of the resource.

    ", + "UnsuccessfulItemError$Code": "

    The error code.

    ", + "UnsuccessfulItemError$Message": "

    The error message accompanying the error code.

    ", + "UserBucket$S3Bucket": "

    The name of the S3 bucket where the disk image is located.

    ", + "UserBucket$S3Key": "

    The key for the disk image.

    ", + "UserBucketDetails$S3Bucket": "

    The S3 bucket from which the disk image was created.

    ", + "UserBucketDetails$S3Key": "

    The key from which the disk image was created.

    ", + "UserData$Data": "

    The Base64-encoded MIME user data for the instance.

    ", + "UserGroupStringList$member": null, + "UserIdGroupPair$UserId": "

    The ID of an AWS account. EC2-Classic only.

    ", + "UserIdGroupPair$GroupName": "

    The name of the security group. In a request, use this parameter for a security group in EC2-Classic or a default VPC only. For a security group in a nondefault VPC, use GroupId.

    ", + "UserIdGroupPair$GroupId": "

    The ID of the security group.

    ", + "UserIdStringList$member": null, + "ValueStringList$member": null, + "VgwTelemetry$OutsideIpAddress": "

    The Internet-routable IP address of the virtual private gateway's outside interface.

    ", + "VgwTelemetry$StatusMessage": "

    If an error occurs, a description of the error.

    ", + "Volume$VolumeId": "

    The ID of the volume.

    ", + "Volume$SnapshotId": "

    The snapshot from which the volume was created, if applicable.

    ", + "Volume$AvailabilityZone": "

    The Availability Zone for the volume.

    ", + "Volume$KmsKeyId": "

    The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to protect the volume encryption key for the volume.

    ", + "VolumeAttachment$VolumeId": "

    The ID of the volume.

    ", + "VolumeAttachment$InstanceId": "

    The ID of the instance.

    ", + "VolumeAttachment$Device": "

    The device name.

    ", + "VolumeIdStringList$member": null, + "VolumeStatusAction$Code": "

    The code identifying the operation, for example, enable-volume-io.

    ", + "VolumeStatusAction$Description": "

    A description of the operation.

    ", + "VolumeStatusAction$EventType": "

    The event type associated with this operation.

    ", + "VolumeStatusAction$EventId": "

    The ID of the event associated with this operation.

    ", + "VolumeStatusDetails$Status": "

    The intended status of the volume status.

    ", + "VolumeStatusEvent$EventType": "

    The type of this event.

    ", + "VolumeStatusEvent$Description": "

    A description of the event.

    ", + "VolumeStatusEvent$EventId": "

    The ID of this event.

    ", + "VolumeStatusItem$VolumeId": "

    The volume ID.

    ", + "VolumeStatusItem$AvailabilityZone": "

    The Availability Zone of the volume.

    ", + "Vpc$VpcId": "

    The ID of the VPC.

    ", + "Vpc$CidrBlock": "

    The CIDR block for the VPC.

    ", + "Vpc$DhcpOptionsId": "

    The ID of the set of DHCP options you've associated with the VPC (or default if the default options are associated with the VPC).

    ", + "VpcAttachment$VpcId": "

    The ID of the VPC.

    ", + "VpcClassicLink$VpcId": "

    The ID of the VPC.

    ", + "VpcClassicLinkIdList$member": null, + "VpcEndpoint$VpcEndpointId": "

    The ID of the VPC endpoint.

    ", + "VpcEndpoint$VpcId": "

    The ID of the VPC to which the endpoint is associated.

    ", + "VpcEndpoint$ServiceName": "

    The name of the AWS service to which the endpoint is associated.

    ", + "VpcEndpoint$PolicyDocument": "

    The policy document associated with the endpoint.

    ", + "VpcIdStringList$member": null, + "VpcPeeringConnection$VpcPeeringConnectionId": "

    The ID of the VPC peering connection.

    ", + "VpcPeeringConnectionStateReason$Message": "

    A message that provides more information about the status, if applicable.

    ", + "VpcPeeringConnectionVpcInfo$CidrBlock": "

    The CIDR block for the VPC.

    ", + "VpcPeeringConnectionVpcInfo$OwnerId": "

    The AWS account ID of the VPC owner.

    ", + "VpcPeeringConnectionVpcInfo$VpcId": "

    The ID of the VPC.

    ", + "VpnConnection$VpnConnectionId": "

    The ID of the VPN connection.

    ", + "VpnConnection$CustomerGatewayConfiguration": "

    The configuration information for the VPN connection's customer gateway (in the native XML format). This element is always present in the CreateVpnConnection response; however, it's present in the DescribeVpnConnections response only if the VPN connection is in the pending or available state.

    ", + "VpnConnection$CustomerGatewayId": "

    The ID of the customer gateway at your end of the VPN connection.

    ", + "VpnConnection$VpnGatewayId": "

    The ID of the virtual private gateway at the AWS side of the VPN connection.

    ", + "VpnConnectionIdStringList$member": null, + "VpnGateway$VpnGatewayId": "

    The ID of the virtual private gateway.

    ", + "VpnGateway$AvailabilityZone": "

    The Availability Zone where the virtual private gateway was created, if applicable. This field may be empty or not returned.

    ", + "VpnGatewayIdStringList$member": null, + "VpnStaticRoute$DestinationCidrBlock": "

    The CIDR block associated with the local subnet of the customer data center.

    ", + "ZoneNameStringList$member": null + } + }, + "Subnet": { + "base": "

    Describes a subnet.

    ", + "refs": { + "CreateSubnetResult$Subnet": "

    Information about the subnet.

    ", + "SubnetList$member": null + } + }, + "SubnetIdStringList": { + "base": null, + "refs": { + "DescribeSubnetsRequest$SubnetIds": "

    One or more subnet IDs.

    Default: Describes all your subnets.

    " + } + }, + "SubnetList": { + "base": null, + "refs": { + "DescribeSubnetsResult$Subnets": "

    Information about one or more subnets.

    " + } + }, + "SubnetState": { + "base": null, + "refs": { + "Subnet$State": "

    The current state of the subnet.

    " + } + }, + "SummaryStatus": { + "base": null, + "refs": { + "InstanceStatusSummary$Status": "

    The status.

    " + } + }, + "Tag": { + "base": "

    Describes a tag.

    ", + "refs": { + "TagList$member": null + } + }, + "TagDescription": { + "base": "

    Describes a tag.

    ", + "refs": { + "TagDescriptionList$member": null + } + }, + "TagDescriptionList": { + "base": null, + "refs": { + "DescribeTagsResult$Tags": "

    A list of tags.

    " + } + }, + "TagList": { + "base": null, + "refs": { + "ClassicLinkInstance$Tags": "

    Any tags assigned to the instance.

    ", + "ConversionTask$Tags": "

    Any tags assigned to the task.

    ", + "CreateTagsRequest$Tags": "

    One or more tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string.

    ", + "CustomerGateway$Tags": "

    Any tags assigned to the customer gateway.

    ", + "DeleteTagsRequest$Tags": "

    One or more tags to delete. If you omit the value parameter, we delete the tag regardless of its value. If you specify this parameter with an empty string as the value, we delete the key only if its value is an empty string.

    ", + "DhcpOptions$Tags": "

    Any tags assigned to the DHCP options set.

    ", + "Image$Tags": "

    Any tags assigned to the image.

    ", + "Instance$Tags": "

    Any tags assigned to the instance.

    ", + "InternetGateway$Tags": "

    Any tags assigned to the Internet gateway.

    ", + "NetworkAcl$Tags": "

    Any tags assigned to the network ACL.

    ", + "NetworkInterface$TagSet": "

    Any tags assigned to the network interface.

    ", + "ReservedInstances$Tags": "

    Any tags assigned to the resource.

    ", + "ReservedInstancesListing$Tags": "

    Any tags assigned to the resource.

    ", + "RouteTable$Tags": "

    Any tags assigned to the route table.

    ", + "SecurityGroup$Tags": "

    Any tags assigned to the security group.

    ", + "Snapshot$Tags": "

    Any tags assigned to the snapshot.

    ", + "SpotInstanceRequest$Tags": "

    Any tags assigned to the resource.

    ", + "Subnet$Tags": "

    Any tags assigned to the subnet.

    ", + "Volume$Tags": "

    Any tags assigned to the volume.

    ", + "Vpc$Tags": "

    Any tags assigned to the VPC.

    ", + "VpcClassicLink$Tags": "

    Any tags assigned to the VPC.

    ", + "VpcPeeringConnection$Tags": "

    Any tags assigned to the resource.

    ", + "VpnConnection$Tags": "

    Any tags assigned to the VPN connection.

    ", + "VpnGateway$Tags": "

    Any tags assigned to the virtual private gateway.

    " + } + }, + "TelemetryStatus": { + "base": null, + "refs": { + "VgwTelemetry$Status": "

    The status of the VPN tunnel.

    " + } + }, + "Tenancy": { + "base": null, + "refs": { + "CreateVpcRequest$InstanceTenancy": "

    The supported tenancy options for instances launched into the VPC. A value of default means that instances can be launched with any tenancy; a value of dedicated means all instances launched into the VPC are launched as dedicated tenancy instances regardless of the tenancy assigned to the instance at launch. Dedicated tenancy instances run on single-tenant hardware.

    Important: The host value cannot be used with this parameter. Use the default or dedicated values only.

    Default: default

    ", + "DescribeReservedInstancesOfferingsRequest$InstanceTenancy": "

    The tenancy of the instances covered by the reservation. A Reserved Instance with a tenancy of dedicated is applied to instances that run in a VPC on single-tenant hardware (i.e., Dedicated Instances).

    Default: default

    ", + "Placement$Tenancy": "

    The tenancy of the instance (if the instance is running in a VPC). An instance with a tenancy of dedicated runs on single-tenant hardware. The host tenancy is not supported for the ImportInstance command.

    ", + "ReservedInstances$InstanceTenancy": "

    The tenancy of the instance.

    ", + "ReservedInstancesOffering$InstanceTenancy": "

    The tenancy of the instance.

    ", + "Vpc$InstanceTenancy": "

    The allowed tenancy of instances launched into the VPC.

    " + } + }, + "TerminateInstancesRequest": { + "base": null, + "refs": { + } + }, + "TerminateInstancesResult": { + "base": null, + "refs": { + } + }, + "TrafficType": { + "base": null, + "refs": { + "CreateFlowLogsRequest$TrafficType": "

    The type of traffic to log.

    ", + "FlowLog$TrafficType": "

    The type of traffic captured for the flow log.

    " + } + }, + "UnassignPrivateIpAddressesRequest": { + "base": null, + "refs": { + } + }, + "UnmonitorInstancesRequest": { + "base": null, + "refs": { + } + }, + "UnmonitorInstancesResult": { + "base": null, + "refs": { + } + }, + "UnsuccessfulItem": { + "base": "

    Information about items that were not successfully processed in a batch call.

    ", + "refs": { + "UnsuccessfulItemList$member": null, + "UnsuccessfulItemSet$member": null + } + }, + "UnsuccessfulItemError": { + "base": "

    Information about the error that occurred. For more information about errors, see Error Codes.

    ", + "refs": { + "UnsuccessfulItem$Error": "

    Information about the error.

    " + } + }, + "UnsuccessfulItemList": { + "base": null, + "refs": { + "ModifyHostsResult$Unsuccessful": "

    The IDs of the Dedicated hosts that could not be modified. Check whether the setting you requested can be used.

    ", + "ReleaseHostsResult$Unsuccessful": "

    The IDs of the Dedicated hosts that could not be released, including an error message.

    " + } + }, + "UnsuccessfulItemSet": { + "base": null, + "refs": { + "CreateFlowLogsResult$Unsuccessful": "

    Information about the flow logs that could not be created successfully.

    ", + "DeleteFlowLogsResult$Unsuccessful": "

    Information about the flow logs that could not be deleted successfully.

    ", + "DeleteVpcEndpointsResult$Unsuccessful": "

    Information about the endpoints that were not successfully deleted.

    " + } + }, + "UserBucket": { + "base": "

    Describes the S3 bucket for the disk image.

    ", + "refs": { + "ImageDiskContainer$UserBucket": "

    The S3 bucket for the disk image.

    ", + "SnapshotDiskContainer$UserBucket": null + } + }, + "UserBucketDetails": { + "base": "

    Describes the S3 bucket for the disk image.

    ", + "refs": { + "SnapshotDetail$UserBucket": null, + "SnapshotTaskDetail$UserBucket": "

    The S3 bucket for the disk image.

    " + } + }, + "UserData": { + "base": "

    Describes the user data to be made available to an instance.

    ", + "refs": { + "ImportInstanceLaunchSpecification$UserData": "

    The Base64-encoded MIME user data to be made available to the instance.

    " + } + }, + "UserGroupStringList": { + "base": null, + "refs": { + "ModifyImageAttributeRequest$UserGroups": "

    One or more user groups. This is only valid when modifying the launchPermission attribute.

    " + } + }, + "UserIdGroupPair": { + "base": "

    Describes a security group and AWS account ID pair.

    ", + "refs": { + "UserIdGroupPairList$member": null + } + }, + "UserIdGroupPairList": { + "base": null, + "refs": { + "IpPermission$UserIdGroupPairs": "

    One or more security group and AWS account ID pairs.

    " + } + }, + "UserIdStringList": { + "base": null, + "refs": { + "ModifyImageAttributeRequest$UserIds": "

    One or more AWS account IDs. This is only valid when modifying the launchPermission attribute.

    ", + "ModifySnapshotAttributeRequest$UserIds": "

    The account ID to modify for the snapshot.

    " + } + }, + "ValueStringList": { + "base": null, + "refs": { + "CancelSpotFleetRequestsRequest$SpotFleetRequestIds": "

    The IDs of the Spot fleet requests.

    ", + "CreateFlowLogsRequest$ResourceIds": "

    One or more subnet, network interface, or VPC IDs.

    ", + "CreateFlowLogsResult$FlowLogIds": "

    The IDs of the flow logs.

    ", + "CreateVpcEndpointRequest$RouteTableIds": "

    One or more route table IDs.

    ", + "DeleteFlowLogsRequest$FlowLogIds": "

    One or more flow log IDs.

    ", + "DeleteVpcEndpointsRequest$VpcEndpointIds": "

    One or more endpoint IDs.

    ", + "DescribeFlowLogsRequest$FlowLogIds": "

    One or more flow log IDs.

    ", + "DescribeInternetGatewaysRequest$InternetGatewayIds": "

    One or more Internet gateway IDs.

    Default: Describes all your Internet gateways.

    ", + "DescribeMovingAddressesRequest$PublicIps": "

    One or more Elastic IP addresses.

    ", + "DescribeNatGatewaysRequest$NatGatewayIds": "

    One or more NAT gateway IDs.

    ", + "DescribeNetworkAclsRequest$NetworkAclIds": "

    One or more network ACL IDs.

    Default: Describes all your network ACLs.

    ", + "DescribePrefixListsRequest$PrefixListIds": "

    One or more prefix list IDs.

    ", + "DescribeRouteTablesRequest$RouteTableIds": "

    One or more route table IDs.

    Default: Describes all your route tables.

    ", + "DescribeSpotFleetRequestsRequest$SpotFleetRequestIds": "

    The IDs of the Spot fleet requests.

    ", + "DescribeVpcEndpointServicesResult$ServiceNames": "

    A list of supported AWS services.

    ", + "DescribeVpcEndpointsRequest$VpcEndpointIds": "

    One or more endpoint IDs.

    ", + "DescribeVpcPeeringConnectionsRequest$VpcPeeringConnectionIds": "

    One or more VPC peering connection IDs.

    Default: Describes all your VPC peering connections.

    ", + "Filter$Values": "

    One or more filter values. Filter values are case-sensitive.

    ", + "ModifyVpcEndpointRequest$AddRouteTableIds": "

    One or more route tables IDs to associate with the endpoint.

    ", + "ModifyVpcEndpointRequest$RemoveRouteTableIds": "

    One or more route table IDs to disassociate from the endpoint.

    ", + "NewDhcpConfiguration$Values": null, + "PrefixList$Cidrs": "

    The IP address range of the AWS service.

    ", + "RequestSpotLaunchSpecification$SecurityGroups": null, + "RequestSpotLaunchSpecification$SecurityGroupIds": null, + "VpcEndpoint$RouteTableIds": "

    One or more route tables associated with the endpoint.

    " + } + }, + "VgwTelemetry": { + "base": "

    Describes telemetry for a VPN tunnel.

    ", + "refs": { + "VgwTelemetryList$member": null + } + }, + "VgwTelemetryList": { + "base": null, + "refs": { + "VpnConnection$VgwTelemetry": "

    Information about the VPN tunnel.

    " + } + }, + "VirtualizationType": { + "base": null, + "refs": { + "Image$VirtualizationType": "

    The type of virtualization of the AMI.

    ", + "Instance$VirtualizationType": "

    The virtualization type of the instance.

    " + } + }, + "Volume": { + "base": "

    Describes a volume.

    ", + "refs": { + "VolumeList$member": null + } + }, + "VolumeAttachment": { + "base": "

    Describes volume attachment details.

    ", + "refs": { + "VolumeAttachmentList$member": null + } + }, + "VolumeAttachmentList": { + "base": null, + "refs": { + "Volume$Attachments": "

    Information about the volume attachments.

    " + } + }, + "VolumeAttachmentState": { + "base": null, + "refs": { + "VolumeAttachment$State": "

    The attachment state of the volume.

    " + } + }, + "VolumeAttributeName": { + "base": null, + "refs": { + "DescribeVolumeAttributeRequest$Attribute": "

    The instance attribute.

    " + } + }, + "VolumeDetail": { + "base": "

    Describes an EBS volume.

    ", + "refs": { + "DiskImage$Volume": "

    Information about the volume.

    ", + "ImportVolumeRequest$Volume": "

    The volume size.

    " + } + }, + "VolumeIdStringList": { + "base": null, + "refs": { + "DescribeVolumeStatusRequest$VolumeIds": "

    One or more volume IDs.

    Default: Describes all your volumes.

    ", + "DescribeVolumesRequest$VolumeIds": "

    One or more volume IDs.

    " + } + }, + "VolumeList": { + "base": null, + "refs": { + "DescribeVolumesResult$Volumes": "

    Information about the volumes.

    " + } + }, + "VolumeState": { + "base": null, + "refs": { + "Volume$State": "

    The volume state.

    " + } + }, + "VolumeStatusAction": { + "base": "

    Describes a volume status operation code.

    ", + "refs": { + "VolumeStatusActionsList$member": null + } + }, + "VolumeStatusActionsList": { + "base": null, + "refs": { + "VolumeStatusItem$Actions": "

    The details of the operation.

    " + } + }, + "VolumeStatusDetails": { + "base": "

    Describes a volume status.

    ", + "refs": { + "VolumeStatusDetailsList$member": null + } + }, + "VolumeStatusDetailsList": { + "base": null, + "refs": { + "VolumeStatusInfo$Details": "

    The details of the volume status.

    " + } + }, + "VolumeStatusEvent": { + "base": "

    Describes a volume status event.

    ", + "refs": { + "VolumeStatusEventsList$member": null + } + }, + "VolumeStatusEventsList": { + "base": null, + "refs": { + "VolumeStatusItem$Events": "

    A list of events associated with the volume.

    " + } + }, + "VolumeStatusInfo": { + "base": "

    Describes the status of a volume.

    ", + "refs": { + "VolumeStatusItem$VolumeStatus": "

    The volume status.

    " + } + }, + "VolumeStatusInfoStatus": { + "base": null, + "refs": { + "VolumeStatusInfo$Status": "

    The status of the volume.

    " + } + }, + "VolumeStatusItem": { + "base": "

    Describes the volume status.

    ", + "refs": { + "VolumeStatusList$member": null + } + }, + "VolumeStatusList": { + "base": null, + "refs": { + "DescribeVolumeStatusResult$VolumeStatuses": "

    A list of volumes.

    " + } + }, + "VolumeStatusName": { + "base": null, + "refs": { + "VolumeStatusDetails$Name": "

    The name of the volume status.

    " + } + }, + "VolumeType": { + "base": null, + "refs": { + "CreateVolumeRequest$VolumeType": "

    The volume type. This can be gp2 for General Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, or standard for Magnetic volumes.

    Default: standard

    ", + "EbsBlockDevice$VolumeType": "

    The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic volumes.

    Default: standard

    ", + "Volume$VolumeType": "

    The volume type. This can be gp2 for General Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, or standard for Magnetic volumes.

    " + } + }, + "Vpc": { + "base": "

    Describes a VPC.

    ", + "refs": { + "CreateVpcResult$Vpc": "

    Information about the VPC.

    ", + "VpcList$member": null + } + }, + "VpcAttachment": { + "base": "

    Describes an attachment between a virtual private gateway and a VPC.

    ", + "refs": { + "AttachVpnGatewayResult$VpcAttachment": "

    Information about the attachment.

    ", + "VpcAttachmentList$member": null + } + }, + "VpcAttachmentList": { + "base": null, + "refs": { + "VpnGateway$VpcAttachments": "

    Any VPCs attached to the virtual private gateway.

    " + } + }, + "VpcAttributeName": { + "base": null, + "refs": { + "DescribeVpcAttributeRequest$Attribute": "

    The VPC attribute.

    " + } + }, + "VpcClassicLink": { + "base": "

    Describes whether a VPC is enabled for ClassicLink.

    ", + "refs": { + "VpcClassicLinkList$member": null + } + }, + "VpcClassicLinkIdList": { + "base": null, + "refs": { + "DescribeVpcClassicLinkDnsSupportRequest$VpcIds": "

    One or more VPC IDs.

    ", + "DescribeVpcClassicLinkRequest$VpcIds": "

    One or more VPCs for which you want to describe the ClassicLink status.

    " + } + }, + "VpcClassicLinkList": { + "base": null, + "refs": { + "DescribeVpcClassicLinkResult$Vpcs": "

    The ClassicLink status of one or more VPCs.

    " + } + }, + "VpcEndpoint": { + "base": "

    Describes a VPC endpoint.

    ", + "refs": { + "CreateVpcEndpointResult$VpcEndpoint": "

    Information about the endpoint.

    ", + "VpcEndpointSet$member": null + } + }, + "VpcEndpointSet": { + "base": null, + "refs": { + "DescribeVpcEndpointsResult$VpcEndpoints": "

    Information about the endpoints.

    " + } + }, + "VpcIdStringList": { + "base": null, + "refs": { + "DescribeVpcsRequest$VpcIds": "

    One or more VPC IDs.

    Default: Describes all your VPCs.

    " + } + }, + "VpcList": { + "base": null, + "refs": { + "DescribeVpcsResult$Vpcs": "

    Information about one or more VPCs.

    " + } + }, + "VpcPeeringConnection": { + "base": "

    Describes a VPC peering connection.

    ", + "refs": { + "AcceptVpcPeeringConnectionResult$VpcPeeringConnection": "

    Information about the VPC peering connection.

    ", + "CreateVpcPeeringConnectionResult$VpcPeeringConnection": "

    Information about the VPC peering connection.

    ", + "VpcPeeringConnectionList$member": null + } + }, + "VpcPeeringConnectionList": { + "base": null, + "refs": { + "DescribeVpcPeeringConnectionsResult$VpcPeeringConnections": "

    Information about the VPC peering connections.

    " + } + }, + "VpcPeeringConnectionStateReason": { + "base": "

    Describes the status of a VPC peering connection.

    ", + "refs": { + "VpcPeeringConnection$Status": "

    The status of the VPC peering connection.

    " + } + }, + "VpcPeeringConnectionStateReasonCode": { + "base": null, + "refs": { + "VpcPeeringConnectionStateReason$Code": "

    The status of the VPC peering connection.

    " + } + }, + "VpcPeeringConnectionVpcInfo": { + "base": "

    Describes a VPC in a VPC peering connection.

    ", + "refs": { + "VpcPeeringConnection$AccepterVpcInfo": "

    The information of the peer VPC.

    ", + "VpcPeeringConnection$RequesterVpcInfo": "

    The information of the requester VPC.

    " + } + }, + "VpcState": { + "base": null, + "refs": { + "Vpc$State": "

    The current state of the VPC.

    " + } + }, + "VpnConnection": { + "base": "

    Describes a VPN connection.

    ", + "refs": { + "CreateVpnConnectionResult$VpnConnection": "

    Information about the VPN connection.

    ", + "VpnConnectionList$member": null + } + }, + "VpnConnectionIdStringList": { + "base": null, + "refs": { + "DescribeVpnConnectionsRequest$VpnConnectionIds": "

    One or more VPN connection IDs.

    Default: Describes your VPN connections.

    " + } + }, + "VpnConnectionList": { + "base": null, + "refs": { + "DescribeVpnConnectionsResult$VpnConnections": "

    Information about one or more VPN connections.

    " + } + }, + "VpnConnectionOptions": { + "base": "

    Describes VPN connection options.

    ", + "refs": { + "VpnConnection$Options": "

    The VPN connection options.

    " + } + }, + "VpnConnectionOptionsSpecification": { + "base": "

    Describes VPN connection options.

    ", + "refs": { + "CreateVpnConnectionRequest$Options": "

    Indicates whether the VPN connection requires static routes. If you are creating a VPN connection for a device that does not support BGP, you must specify true.

    Default: false

    " + } + }, + "VpnGateway": { + "base": "

    Describes a virtual private gateway.

    ", + "refs": { + "CreateVpnGatewayResult$VpnGateway": "

    Information about the virtual private gateway.

    ", + "VpnGatewayList$member": null + } + }, + "VpnGatewayIdStringList": { + "base": null, + "refs": { + "DescribeVpnGatewaysRequest$VpnGatewayIds": "

    One or more virtual private gateway IDs.

    Default: Describes all your virtual private gateways.

    " + } + }, + "VpnGatewayList": { + "base": null, + "refs": { + "DescribeVpnGatewaysResult$VpnGateways": "

    Information about one or more virtual private gateways.

    " + } + }, + "VpnState": { + "base": null, + "refs": { + "VpnConnection$State": "

    The current state of the VPN connection.

    ", + "VpnGateway$State": "

    The current state of the virtual private gateway.

    ", + "VpnStaticRoute$State": "

    The current state of the static route.

    " + } + }, + "VpnStaticRoute": { + "base": "

    Describes a static route for a VPN connection.

    ", + "refs": { + "VpnStaticRouteList$member": null + } + }, + "VpnStaticRouteList": { + "base": null, + "refs": { + "VpnConnection$Routes": "

    The static routes associated with the VPN connection.

    " + } + }, + "VpnStaticRouteSource": { + "base": null, + "refs": { + "VpnStaticRoute$Source": "

    Indicates how the routes were provided.

    " + } + }, + "ZoneNameStringList": { + "base": null, + "refs": { + "DescribeAvailabilityZonesRequest$ZoneNames": "

    The names of one or more Availability Zones.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,129 @@ +{ + "pagination": { + "DescribeAccountAttributes": { + "result_key": "AccountAttributes" + }, + "DescribeAddresses": { + "result_key": "Addresses" + }, + "DescribeAvailabilityZones": { + "result_key": "AvailabilityZones" + }, + "DescribeBundleTasks": { + "result_key": "BundleTasks" + }, + "DescribeConversionTasks": { + "result_key": "ConversionTasks" + }, + "DescribeCustomerGateways": { + "result_key": "CustomerGateways" + }, + "DescribeDhcpOptions": { + "result_key": "DhcpOptions" + }, + "DescribeExportTasks": { + "result_key": "ExportTasks" + }, + "DescribeImages": { + "result_key": "Images" + }, + "DescribeInstanceStatus": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "InstanceStatuses" + }, + "DescribeInstances": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Reservations" + }, + "DescribeInternetGateways": { + "result_key": "InternetGateways" + }, + "DescribeKeyPairs": { + "result_key": "KeyPairs" + }, + "DescribeNetworkAcls": { + "result_key": "NetworkAcls" + }, + "DescribeNetworkInterfaces": { + "result_key": "NetworkInterfaces" + }, + "DescribePlacementGroups": { + "result_key": "PlacementGroups" + }, + "DescribeRegions": { + "result_key": "Regions" + }, + "DescribeReservedInstances": { + "result_key": "ReservedInstances" + }, + "DescribeReservedInstancesListings": { + "result_key": "ReservedInstancesListings" + }, + "DescribeReservedInstancesOfferings": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ReservedInstancesOfferings" + }, + "DescribeReservedInstancesModifications": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "ReservedInstancesModifications" + }, + "DescribeRouteTables": { + "result_key": "RouteTables" + }, + "DescribeSecurityGroups": { + "result_key": "SecurityGroups" + }, + "DescribeSnapshots": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Snapshots" + }, + "DescribeSpotInstanceRequests": { + "result_key": "SpotInstanceRequests" + }, + "DescribeSpotPriceHistory": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "SpotPriceHistory" + }, + "DescribeSubnets": { + "result_key": "Subnets" + }, + "DescribeTags": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Tags" + }, + "DescribeVolumeStatus": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "VolumeStatuses" + }, + "DescribeVolumes": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Volumes" + }, + "DescribeVpcs": { + "result_key": "Vpcs" + }, + "DescribeVpnConnections": { + "result_key": "VpnConnections" + }, + "DescribeVpnGateways": { + "result_key": "VpnGateways" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/waiters-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/waiters-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/waiters-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ec2/2015-10-01/waiters-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,494 @@ +{ + "version": 2, + "waiters": { + "InstanceExists": { + "delay": 5, + "maxAttempts": 40, + "operation": "DescribeInstances", + "acceptors": [ + { + "matcher": "status", + "expected": 200, + "state": "success" + }, + { + "matcher": "error", + "expected": "InvalidInstanceIDNotFound", + "state": "retry" + } + ] + }, + "BundleTaskComplete": { + "delay": 15, + "operation": "DescribeBundleTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "complete", + "matcher": "pathAll", + "state": "success", + "argument": "BundleTasks[].State" + }, + { + "expected": "failed", + "matcher": "pathAny", + "state": "failure", + "argument": "BundleTasks[].State" + } + ] + }, + "ConversionTaskCancelled": { + "delay": 15, + "operation": "DescribeConversionTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "cancelled", + "matcher": "pathAll", + "state": "success", + "argument": "ConversionTasks[].State" + } + ] + }, + "ConversionTaskCompleted": { + "delay": 15, + "operation": "DescribeConversionTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "completed", + "matcher": "pathAll", + "state": "success", + "argument": "ConversionTasks[].State" + }, + { + "expected": "cancelled", + "matcher": "pathAny", + "state": "failure", + "argument": "ConversionTasks[].State" + }, + { + "expected": "cancelling", + "matcher": "pathAny", + "state": "failure", + "argument": "ConversionTasks[].State" + } + ] + }, + "ConversionTaskDeleted": { + "delay": 15, + "operation": "DescribeConversionTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "deleted", + "matcher": "pathAll", + "state": "success", + "argument": "ConversionTasks[].State" + } + ] + }, + "CustomerGatewayAvailable": { + "delay": 15, + "operation": "DescribeCustomerGateways", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "CustomerGateways[].State" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "CustomerGateways[].State" + }, + { + "expected": "deleting", + "matcher": "pathAny", + "state": "failure", + "argument": "CustomerGateways[].State" + } + ] + }, + "ExportTaskCancelled": { + "delay": 15, + "operation": "DescribeExportTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "cancelled", + "matcher": "pathAll", + "state": "success", + "argument": "ExportTasks[].State" + } + ] + }, + "ExportTaskCompleted": { + "delay": 15, + "operation": "DescribeExportTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "completed", + "matcher": "pathAll", + "state": "success", + "argument": "ExportTasks[].State" + } + ] + }, + "ImageAvailable": { + "operation": "DescribeImages", + "maxAttempts": 40, + "delay": 15, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "Images[].State", + "expected": "available" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "Images[].State", + "expected": "failed" + } + ] + }, + "InstanceRunning": { + "delay": 15, + "operation": "DescribeInstances", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "running", + "matcher": "pathAll", + "state": "success", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "shutting-down", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "terminated", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "stopping", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + } + ] + }, + "InstanceStatusOk": { + "operation": "DescribeInstanceStatus", + "maxAttempts": 40, + "delay": 15, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "InstanceStatuses[].InstanceStatus.Status", + "expected": "ok" + } + ] + }, + "InstanceStopped": { + "delay": 15, + "operation": "DescribeInstances", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "stopped", + "matcher": "pathAll", + "state": "success", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "pending", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "terminated", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + } + ] + }, + "InstanceTerminated": { + "delay": 15, + "operation": "DescribeInstances", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "terminated", + "matcher": "pathAll", + "state": "success", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "pending", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + }, + { + "expected": "stopping", + "matcher": "pathAny", + "state": "failure", + "argument": "Reservations[].Instances[].State.Name" + } + ] + }, + "KeyPairExists": { + "operation": "DescribeKeyPairs", + "delay": 5, + "maxAttempts": 6, + "acceptors": [ + { + "expected": true, + "matcher": "pathAll", + "state": "success", + "argument": "length(KeyPairs[].KeyName) > `0`" + }, + { + "expected": "InvalidKeyPairNotFound", + "matcher": "error", + "state": "retry" + } + ] + }, + "NetworkInterfaceAvailable": { + "operation": "DescribeNetworkInterfaces", + "delay": 20, + "maxAttempts": 10, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "NetworkInterfaces[].Status" + }, + { + "expected": "InvalidNetworkInterfaceIDNotFound", + "matcher": "error", + "state": "failure" + } + ] + }, + "PasswordDataAvailable": { + "operation": "GetPasswordData", + "maxAttempts": 40, + "delay": 15, + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "length(PasswordData) > `0`", + "expected": true + } + ] + }, + "SnapshotCompleted": { + "delay": 15, + "operation": "DescribeSnapshots", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "completed", + "matcher": "pathAll", + "state": "success", + "argument": "Snapshots[].State" + } + ] + }, + "SpotInstanceRequestFulfilled": { + "operation": "DescribeSpotInstanceRequests", + "maxAttempts": 40, + "delay": 15, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "SpotInstanceRequests[].Status.Code", + "expected": "fulfilled" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "SpotInstanceRequests[].Status.Code", + "expected": "schedule-expired" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "SpotInstanceRequests[].Status.Code", + "expected": "canceled-before-fulfillment" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "SpotInstanceRequests[].Status.Code", + "expected": "bad-parameters" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "SpotInstanceRequests[].Status.Code", + "expected": "system-error" + } + ] + }, + "SubnetAvailable": { + "delay": 15, + "operation": "DescribeSubnets", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "Subnets[].State" + } + ] + }, + "SystemStatusOk": { + "operation": "DescribeInstanceStatus", + "maxAttempts": 40, + "delay": 15, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "InstanceStatuses[].SystemStatus.Status", + "expected": "ok" + } + ] + }, + "VolumeAvailable": { + "delay": 15, + "operation": "DescribeVolumes", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "Volumes[].State" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "Volumes[].State" + } + ] + }, + "VolumeDeleted": { + "delay": 15, + "operation": "DescribeVolumes", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "deleted", + "matcher": "pathAll", + "state": "success", + "argument": "Volumes[].State" + }, + { + "matcher": "error", + "expected": "InvalidVolumeNotFound", + "state": "success" + } + ] + }, + "VolumeInUse": { + "delay": 15, + "operation": "DescribeVolumes", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "in-use", + "matcher": "pathAll", + "state": "success", + "argument": "Volumes[].State" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "Volumes[].State" + } + ] + }, + "VpcAvailable": { + "delay": 15, + "operation": "DescribeVpcs", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "Vpcs[].State" + } + ] + }, + "VpnConnectionAvailable": { + "delay": 15, + "operation": "DescribeVpnConnections", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "VpnConnections[].State" + }, + { + "expected": "deleting", + "matcher": "pathAny", + "state": "failure", + "argument": "VpnConnections[].State" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "VpnConnections[].State" + } + ] + }, + "VpnConnectionDeleted": { + "delay": 15, + "operation": "DescribeVpnConnections", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "deleted", + "matcher": "pathAll", + "state": "success", + "argument": "VpnConnections[].State" + }, + { + "expected": "pending", + "matcher": "pathAny", + "state": "failure", + "argument": "VpnConnections[].State" + } + ] + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecr/2015-09-21/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecr/2015-09-21/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecr/2015-09-21/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecr/2015-09-21/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,848 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-09-21", + "endpointPrefix":"ecr", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"Amazon ECR", + "serviceFullName":"Amazon EC2 Container Registry", + "signatureVersion":"v4", + "targetPrefix":"AmazonEC2ContainerRegistry_V20150921" + }, + "operations":{ + "BatchCheckLayerAvailability":{ + "name":"BatchCheckLayerAvailability", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchCheckLayerAvailabilityRequest"}, + "output":{"shape":"BatchCheckLayerAvailabilityResponse"}, + "errors":[ + {"shape":"RepositoryNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ServerException"} + ] + }, + "BatchDeleteImage":{ + "name":"BatchDeleteImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchDeleteImageRequest"}, + "output":{"shape":"BatchDeleteImageResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"} + ] + }, + "BatchGetImage":{ + "name":"BatchGetImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchGetImageRequest"}, + "output":{"shape":"BatchGetImageResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"} + ] + }, + "CompleteLayerUpload":{ + "name":"CompleteLayerUpload", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CompleteLayerUploadRequest"}, + "output":{"shape":"CompleteLayerUploadResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"UploadNotFoundException"}, + {"shape":"InvalidLayerException"}, + {"shape":"LayerPartTooSmallException"}, + {"shape":"LayerAlreadyExistsException"}, + {"shape":"EmptyUploadException"} + ] + }, + "CreateRepository":{ + "name":"CreateRepository", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRepositoryRequest"}, + "output":{"shape":"CreateRepositoryResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryAlreadyExistsException"}, + {"shape":"LimitExceededException"} + ] + }, + "DeleteRepository":{ + "name":"DeleteRepository", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRepositoryRequest"}, + "output":{"shape":"DeleteRepositoryResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"RepositoryNotEmptyException"} + ] + }, + "DeleteRepositoryPolicy":{ + "name":"DeleteRepositoryPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRepositoryPolicyRequest"}, + "output":{"shape":"DeleteRepositoryPolicyResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"RepositoryPolicyNotFoundException"} + ] + }, + "DescribeRepositories":{ + "name":"DescribeRepositories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRepositoriesRequest"}, + "output":{"shape":"DescribeRepositoriesResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"} + ] + }, + "GetAuthorizationToken":{ + "name":"GetAuthorizationToken", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAuthorizationTokenRequest"}, + "output":{"shape":"GetAuthorizationTokenResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"} + ] + }, + "GetDownloadUrlForLayer":{ + "name":"GetDownloadUrlForLayer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDownloadUrlForLayerRequest"}, + "output":{"shape":"GetDownloadUrlForLayerResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"LayersNotFoundException"}, + {"shape":"LayerInaccessibleException"}, + {"shape":"RepositoryNotFoundException"} + ] + }, + "GetRepositoryPolicy":{ + "name":"GetRepositoryPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRepositoryPolicyRequest"}, + "output":{"shape":"GetRepositoryPolicyResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"RepositoryPolicyNotFoundException"} + ] + }, + "InitiateLayerUpload":{ + "name":"InitiateLayerUpload", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"InitiateLayerUploadRequest"}, + "output":{"shape":"InitiateLayerUploadResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"} + ] + }, + "ListImages":{ + "name":"ListImages", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListImagesRequest"}, + "output":{"shape":"ListImagesResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"} + ] + }, + "PutImage":{ + "name":"PutImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutImageRequest"}, + "output":{"shape":"PutImageResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"ImageAlreadyExistsException"}, + {"shape":"LayersNotFoundException"}, + {"shape":"LimitExceededException"} + ] + }, + "SetRepositoryPolicy":{ + "name":"SetRepositoryPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetRepositoryPolicyRequest"}, + "output":{"shape":"SetRepositoryPolicyResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"} + ] + }, + "UploadLayerPart":{ + "name":"UploadLayerPart", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UploadLayerPartRequest"}, + "output":{"shape":"UploadLayerPartResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidLayerPartException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"UploadNotFoundException"}, + {"shape":"LimitExceededException"} + ] + } + }, + "shapes":{ + "Arn":{"type":"string"}, + "AuthorizationData":{ + "type":"structure", + "members":{ + "authorizationToken":{"shape":"Base64"}, + "expiresAt":{"shape":"ExpirationTimestamp"}, + "proxyEndpoint":{"shape":"ProxyEndpoint"} + } + }, + "AuthorizationDataList":{ + "type":"list", + "member":{"shape":"AuthorizationData"} + }, + "Base64":{ + "type":"string", + "pattern":"^\\S+$" + }, + "BatchCheckLayerAvailabilityRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "layerDigests" + ], + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "layerDigests":{"shape":"BatchedOperationLayerDigestList"} + } + }, + "BatchCheckLayerAvailabilityResponse":{ + "type":"structure", + "members":{ + "layers":{"shape":"LayerList"}, + "failures":{"shape":"LayerFailureList"} + } + }, + "BatchDeleteImageRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "imageIds" + ], + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "imageIds":{"shape":"ImageIdentifierList"} + } + }, + "BatchDeleteImageResponse":{ + "type":"structure", + "members":{ + "imageIds":{"shape":"ImageIdentifierList"}, + "failures":{"shape":"ImageFailureList"} + } + }, + "BatchGetImageRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "imageIds" + ], + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "imageIds":{"shape":"ImageIdentifierList"} + } + }, + "BatchGetImageResponse":{ + "type":"structure", + "members":{ + "images":{"shape":"ImageList"}, + "failures":{"shape":"ImageFailureList"} + } + }, + "BatchedOperationLayerDigest":{ + "type":"string", + "max":1000, + "min":0 + }, + "BatchedOperationLayerDigestList":{ + "type":"list", + "member":{"shape":"BatchedOperationLayerDigest"}, + "max":100, + "min":1 + }, + "CompleteLayerUploadRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "uploadId", + "layerDigests" + ], + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "uploadId":{"shape":"UploadId"}, + "layerDigests":{"shape":"LayerDigestList"} + } + }, + "CompleteLayerUploadResponse":{ + "type":"structure", + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "uploadId":{"shape":"UploadId"}, + "layerDigest":{"shape":"LayerDigest"} + } + }, + "CreateRepositoryRequest":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "repositoryName":{"shape":"RepositoryName"} + } + }, + "CreateRepositoryResponse":{ + "type":"structure", + "members":{ + "repository":{"shape":"Repository"} + } + }, + "DeleteRepositoryPolicyRequest":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"} + } + }, + "DeleteRepositoryPolicyResponse":{ + "type":"structure", + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "policyText":{"shape":"RepositoryPolicyText"} + } + }, + "DeleteRepositoryRequest":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "force":{"shape":"ForceFlag"} + } + }, + "DeleteRepositoryResponse":{ + "type":"structure", + "members":{ + "repository":{"shape":"Repository"} + } + }, + "DescribeRepositoriesRequest":{ + "type":"structure", + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryNames":{"shape":"RepositoryNameList"}, + "nextToken":{"shape":"NextToken"}, + "maxResults":{"shape":"MaxResults"} + } + }, + "DescribeRepositoriesResponse":{ + "type":"structure", + "members":{ + "repositories":{"shape":"RepositoryList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "EmptyUploadException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "ExceptionMessage":{"type":"string"}, + "ExpirationTimestamp":{"type":"timestamp"}, + "ForceFlag":{"type":"boolean"}, + "GetAuthorizationTokenRegistryIdList":{ + "type":"list", + "member":{"shape":"RegistryId"}, + "max":10, + "min":1 + }, + "GetAuthorizationTokenRequest":{ + "type":"structure", + "members":{ + "registryIds":{"shape":"GetAuthorizationTokenRegistryIdList"} + } + }, + "GetAuthorizationTokenResponse":{ + "type":"structure", + "members":{ + "authorizationData":{"shape":"AuthorizationDataList"} + } + }, + "GetDownloadUrlForLayerRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "layerDigest" + ], + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "layerDigest":{"shape":"LayerDigest"} + } + }, + "GetDownloadUrlForLayerResponse":{ + "type":"structure", + "members":{ + "downloadUrl":{"shape":"Url"}, + "layerDigest":{"shape":"LayerDigest"} + } + }, + "GetRepositoryPolicyRequest":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"} + } + }, + "GetRepositoryPolicyResponse":{ + "type":"structure", + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "policyText":{"shape":"RepositoryPolicyText"} + } + }, + "Image":{ + "type":"structure", + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "imageId":{"shape":"ImageIdentifier"}, + "imageManifest":{"shape":"ImageManifest"} + } + }, + "ImageAlreadyExistsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "ImageDigest":{"type":"string"}, + "ImageFailure":{ + "type":"structure", + "members":{ + "imageId":{"shape":"ImageIdentifier"}, + "failureCode":{"shape":"ImageFailureCode"}, + "failureReason":{"shape":"ImageFailureReason"} + } + }, + "ImageFailureCode":{ + "type":"string", + "enum":[ + "InvalidImageDigest", + "InvalidImageTag", + "ImageTagDoesNotMatchDigest", + "ImageNotFound", + "MissingDigestAndTag" + ] + }, + "ImageFailureList":{ + "type":"list", + "member":{"shape":"ImageFailure"} + }, + "ImageFailureReason":{"type":"string"}, + "ImageIdentifier":{ + "type":"structure", + "members":{ + "imageDigest":{"shape":"ImageDigest"}, + "imageTag":{"shape":"ImageTag"} + } + }, + "ImageIdentifierList":{ + "type":"list", + "member":{"shape":"ImageIdentifier"}, + "max":100, + "min":1 + }, + "ImageList":{ + "type":"list", + "member":{"shape":"Image"} + }, + "ImageManifest":{"type":"string"}, + "ImageTag":{"type":"string"}, + "InitiateLayerUploadRequest":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"} + } + }, + "InitiateLayerUploadResponse":{ + "type":"structure", + "members":{ + "uploadId":{"shape":"UploadId"}, + "partSize":{"shape":"PartSize"} + } + }, + "InvalidLayerException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "InvalidLayerPartException":{ + "type":"structure", + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "uploadId":{"shape":"UploadId"}, + "lastValidByteReceived":{"shape":"PartSize"}, + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "InvalidParameterException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "Layer":{ + "type":"structure", + "members":{ + "layerDigest":{"shape":"LayerDigest"}, + "layerAvailability":{"shape":"LayerAvailability"}, + "layerSize":{"shape":"LayerSizeInBytes"} + } + }, + "LayerAlreadyExistsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "LayerAvailability":{ + "type":"string", + "enum":[ + "AVAILABLE", + "UNAVAILABLE" + ] + }, + "LayerDigest":{ + "type":"string", + "pattern":"[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+" + }, + "LayerDigestList":{ + "type":"list", + "member":{"shape":"LayerDigest"}, + "max":100, + "min":1 + }, + "LayerFailure":{ + "type":"structure", + "members":{ + "layerDigest":{"shape":"BatchedOperationLayerDigest"}, + "failureCode":{"shape":"LayerFailureCode"}, + "failureReason":{"shape":"LayerFailureReason"} + } + }, + "LayerFailureCode":{ + "type":"string", + "enum":[ + "InvalidLayerDigest", + "MissingLayerDigest" + ] + }, + "LayerFailureList":{ + "type":"list", + "member":{"shape":"LayerFailure"} + }, + "LayerFailureReason":{"type":"string"}, + "LayerInaccessibleException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "LayerList":{ + "type":"list", + "member":{"shape":"Layer"} + }, + "LayerPartBlob":{"type":"blob"}, + "LayerPartTooSmallException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "LayerSizeInBytes":{"type":"long"}, + "LayersNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "ListImagesRequest":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "nextToken":{"shape":"NextToken"}, + "maxResults":{"shape":"MaxResults"} + } + }, + "ListImagesResponse":{ + "type":"structure", + "members":{ + "imageIds":{"shape":"ImageIdentifierList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "NextToken":{"type":"string"}, + "PartSize":{ + "type":"long", + "min":0 + }, + "ProxyEndpoint":{"type":"string"}, + "PutImageRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "imageManifest" + ], + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "imageManifest":{"shape":"ImageManifest"} + } + }, + "PutImageResponse":{ + "type":"structure", + "members":{ + "image":{"shape":"Image"} + } + }, + "RegistryId":{ + "type":"string", + "pattern":"[0-9]{12}" + }, + "Repository":{ + "type":"structure", + "members":{ + "repositoryArn":{"shape":"Arn"}, + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"} + } + }, + "RepositoryAlreadyExistsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "RepositoryList":{ + "type":"list", + "member":{"shape":"Repository"} + }, + "RepositoryName":{ + "type":"string", + "max":256, + "min":2, + "pattern":"(?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)*[a-z0-9]+(?:[._-][a-z0-9]+)*" + }, + "RepositoryNameList":{ + "type":"list", + "member":{"shape":"RepositoryName"}, + "max":100, + "min":1 + }, + "RepositoryNotEmptyException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "RepositoryNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "RepositoryPolicyNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "RepositoryPolicyText":{ + "type":"string", + "max":10240, + "min":0 + }, + "ServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true, + "fault":true + }, + "SetRepositoryPolicyRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "policyText" + ], + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "policyText":{"shape":"RepositoryPolicyText"}, + "force":{"shape":"ForceFlag"} + } + }, + "SetRepositoryPolicyResponse":{ + "type":"structure", + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "policyText":{"shape":"RepositoryPolicyText"} + } + }, + "UploadId":{ + "type":"string", + "pattern":"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}" + }, + "UploadLayerPartRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "uploadId", + "partFirstByte", + "partLastByte", + "layerPartBlob" + ], + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "uploadId":{"shape":"UploadId"}, + "partFirstByte":{"shape":"PartSize"}, + "partLastByte":{"shape":"PartSize"}, + "layerPartBlob":{"shape":"LayerPartBlob"} + } + }, + "UploadLayerPartResponse":{ + "type":"structure", + "members":{ + "registryId":{"shape":"RegistryId"}, + "repositoryName":{"shape":"RepositoryName"}, + "uploadId":{"shape":"UploadId"}, + "lastByteReceived":{"shape":"PartSize"} + } + }, + "UploadNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "Url":{"type":"string"} + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecr/2015-09-21/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecr/2015-09-21/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecr/2015-09-21/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecr/2015-09-21/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,619 @@ +{ + "version": "2.0", + "service": "

    Amazon EC2 Container Registry (Amazon ECR) is a managed AWS Docker registry service. Customers can use the familiar Docker CLI to push, pull, and manage images. Amazon ECR provides a secure, scalable, and reliable registry. Amazon ECR supports private Docker repositories with resource-based permissions using AWS IAM so that specific users or Amazon EC2 instances can access repositories and images. Developers can use the Docker CLI to author and manage images.

    ", + "operations": { + "BatchCheckLayerAvailability": "

    Check the availability of multiple image layers in a specified registry and repository.

    This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers. Use the docker CLI to pull, tag, and push images.

    ", + "BatchDeleteImage": "

    Deletes a list of specified images within a specified repository. Images are specified with either imageTag or imageDigest.

    ", + "BatchGetImage": "

    Gets detailed information for specified images within a specified repository. Images are specified with either imageTag or imageDigest.

    ", + "CompleteLayerUpload": "

    Inform Amazon ECR that the image layer upload for a specified registry, repository name, and upload ID, has completed. You can optionally provide a sha256 digest of the image layer for data validation purposes.

    This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers. Use the docker CLI to pull, tag, and push images.

    ", + "CreateRepository": "

    Creates an image repository.

    ", + "DeleteRepository": "

    Deletes an existing image repository. If a repository contains images, you must use the force option to delete it.

    ", + "DeleteRepositoryPolicy": "

    Deletes the repository policy from a specified repository.

    ", + "DescribeRepositories": "

    Describes image repositories in a registry.

    ", + "GetAuthorizationToken": "

    Retrieves a token that is valid for a specified registry for 12 hours. This command allows you to use the docker CLI to push and pull images with Amazon ECR. If you do not specify a registry, the default registry is assumed.

    The authorizationToken returned for each registry specified is a base64 encoded string that can be decoded and used in a docker login command to authenticate to a registry. The AWS CLI offers an aws ecr get-login command that simplifies the login process.

    ", + "GetDownloadUrlForLayer": "

    Retrieves the pre-signed Amazon S3 download URL corresponding to an image layer. You can only get URLs for image layers that are referenced in an image.

    This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers. Use the docker CLI to pull, tag, and push images.

    ", + "GetRepositoryPolicy": "

    Retrieves the repository policy for a specified repository.

    ", + "InitiateLayerUpload": "

    Notify Amazon ECR that you intend to upload an image layer.

    This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers. Use the docker CLI to pull, tag, and push images.

    ", + "ListImages": "

    Lists all the image IDs for a given repository.

    ", + "PutImage": "

    Creates or updates the image manifest associated with an image.

    This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers. Use the docker CLI to pull, tag, and push images.

    ", + "SetRepositoryPolicy": "

    Applies a repository policy on a specified repository to control access permissions.

    ", + "UploadLayerPart": "

    Uploads an image layer part to Amazon ECR.

    This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers. Use the docker CLI to pull, tag, and push images.

    " + }, + "shapes": { + "Arn": { + "base": null, + "refs": { + "Repository$repositoryArn": "

    The Amazon Resource Name (ARN) that identifies the repository. The ARN contains the arn:aws:ecr namespace, followed by the region of the repository, the AWS account ID of the repository owner, the repository namespace, and then the repository name. For example, arn:aws:ecr:region:012345678910:repository/test.

    " + } + }, + "AuthorizationData": { + "base": "

    An object representing authorization data for an Amazon ECR registry.

    ", + "refs": { + "AuthorizationDataList$member": null + } + }, + "AuthorizationDataList": { + "base": null, + "refs": { + "GetAuthorizationTokenResponse$authorizationData": "

    A list of authorization token data objects that correspond to the registryIds values in the request.

    " + } + }, + "Base64": { + "base": null, + "refs": { + "AuthorizationData$authorizationToken": "

    A base64-encoded string that contains authorization data for the specified Amazon ECR registry. When the string is decoded, it is presented in the format user:password for private registry authentication using docker login.

    " + } + }, + "BatchCheckLayerAvailabilityRequest": { + "base": null, + "refs": { + } + }, + "BatchCheckLayerAvailabilityResponse": { + "base": null, + "refs": { + } + }, + "BatchDeleteImageRequest": { + "base": "

    Deletes specified images within a specified repository. Images are specified with either the imageTag or imageDigest.

    ", + "refs": { + } + }, + "BatchDeleteImageResponse": { + "base": null, + "refs": { + } + }, + "BatchGetImageRequest": { + "base": null, + "refs": { + } + }, + "BatchGetImageResponse": { + "base": null, + "refs": { + } + }, + "BatchedOperationLayerDigest": { + "base": null, + "refs": { + "BatchedOperationLayerDigestList$member": null, + "LayerFailure$layerDigest": "

    The layer digest associated with the failure.

    " + } + }, + "BatchedOperationLayerDigestList": { + "base": null, + "refs": { + "BatchCheckLayerAvailabilityRequest$layerDigests": "

    The digests of the image layers to check.

    " + } + }, + "CompleteLayerUploadRequest": { + "base": null, + "refs": { + } + }, + "CompleteLayerUploadResponse": { + "base": null, + "refs": { + } + }, + "CreateRepositoryRequest": { + "base": null, + "refs": { + } + }, + "CreateRepositoryResponse": { + "base": null, + "refs": { + } + }, + "DeleteRepositoryPolicyRequest": { + "base": null, + "refs": { + } + }, + "DeleteRepositoryPolicyResponse": { + "base": null, + "refs": { + } + }, + "DeleteRepositoryRequest": { + "base": null, + "refs": { + } + }, + "DeleteRepositoryResponse": { + "base": null, + "refs": { + } + }, + "DescribeRepositoriesRequest": { + "base": null, + "refs": { + } + }, + "DescribeRepositoriesResponse": { + "base": null, + "refs": { + } + }, + "EmptyUploadException": { + "base": "

    The specified layer upload does not contain any layer parts.

    ", + "refs": { + } + }, + "ExceptionMessage": { + "base": null, + "refs": { + "EmptyUploadException$message": "

    The error message associated with the exception.

    ", + "ImageAlreadyExistsException$message": "

    The error message associated with the exception.

    ", + "InvalidLayerException$message": "

    The error message associated with the exception.

    ", + "InvalidLayerPartException$message": "

    The error message associated with the exception.

    ", + "InvalidParameterException$message": "

    The error message associated with the exception.

    ", + "LayerAlreadyExistsException$message": "

    The error message associated with the exception.

    ", + "LayerInaccessibleException$message": "

    The error message associated with the exception.

    ", + "LayerPartTooSmallException$message": "

    The error message associated with the exception.

    ", + "LayersNotFoundException$message": "

    The error message associated with the exception.

    ", + "LimitExceededException$message": "

    The error message associated with the exception.

    ", + "RepositoryAlreadyExistsException$message": "

    The error message associated with the exception.

    ", + "RepositoryNotEmptyException$message": "

    The error message associated with the exception.

    ", + "RepositoryNotFoundException$message": "

    The error message associated with the exception.

    ", + "RepositoryPolicyNotFoundException$message": "

    The error message associated with the exception.

    ", + "ServerException$message": "

    The error message associated with the exception.

    ", + "UploadNotFoundException$message": "

    The error message associated with the exception.

    " + } + }, + "ExpirationTimestamp": { + "base": null, + "refs": { + "AuthorizationData$expiresAt": "

    The Unix time in seconds and milliseconds when the authorization token expires. Authorization tokens are valid for 12 hours.

    " + } + }, + "ForceFlag": { + "base": null, + "refs": { + "DeleteRepositoryRequest$force": "

    Force the deletion of the repository if it contains images.

    ", + "SetRepositoryPolicyRequest$force": "

    If the policy you are attempting to set on a repository policy would prevent you from setting another policy in the future, you must force the SetRepositoryPolicy operation. This is intended to prevent accidental repository lock outs.

    " + } + }, + "GetAuthorizationTokenRegistryIdList": { + "base": null, + "refs": { + "GetAuthorizationTokenRequest$registryIds": "

    A list of AWS account IDs that are associated with the registries for which to get authorization tokens. If you do not specify a registry, the default registry is assumed.

    " + } + }, + "GetAuthorizationTokenRequest": { + "base": null, + "refs": { + } + }, + "GetAuthorizationTokenResponse": { + "base": null, + "refs": { + } + }, + "GetDownloadUrlForLayerRequest": { + "base": null, + "refs": { + } + }, + "GetDownloadUrlForLayerResponse": { + "base": null, + "refs": { + } + }, + "GetRepositoryPolicyRequest": { + "base": null, + "refs": { + } + }, + "GetRepositoryPolicyResponse": { + "base": null, + "refs": { + } + }, + "Image": { + "base": "

    Object representing an image.

    ", + "refs": { + "ImageList$member": null, + "PutImageResponse$image": "

    Details of the image uploaded.

    " + } + }, + "ImageAlreadyExistsException": { + "base": "

    The specified image has already been pushed, and there are no changes to the manifest or image tag since the last push.

    ", + "refs": { + } + }, + "ImageDigest": { + "base": null, + "refs": { + "ImageIdentifier$imageDigest": "

    The sha256 digest of the image manifest.

    " + } + }, + "ImageFailure": { + "base": null, + "refs": { + "ImageFailureList$member": null + } + }, + "ImageFailureCode": { + "base": null, + "refs": { + "ImageFailure$failureCode": "

    The code associated with the failure.

    " + } + }, + "ImageFailureList": { + "base": null, + "refs": { + "BatchDeleteImageResponse$failures": "

    Any failures associated with the call.

    ", + "BatchGetImageResponse$failures": "

    Any failures associated with the call.

    " + } + }, + "ImageFailureReason": { + "base": null, + "refs": { + "ImageFailure$failureReason": "

    The reason for the failure.

    " + } + }, + "ImageIdentifier": { + "base": null, + "refs": { + "Image$imageId": "

    An object containing the image tag and image digest associated with an image.

    ", + "ImageFailure$imageId": "

    The image ID associated with the failure.

    ", + "ImageIdentifierList$member": null + } + }, + "ImageIdentifierList": { + "base": null, + "refs": { + "BatchDeleteImageRequest$imageIds": "

    A list of image ID references that correspond to images to delete. The format of the imageIds reference is imageTag=tag or imageDigest=digest.

    ", + "BatchDeleteImageResponse$imageIds": "

    The image IDs of the deleted images.

    ", + "BatchGetImageRequest$imageIds": "

    A list of image ID references that correspond to images to describe. The format of the imageIds reference is imageTag=tag or imageDigest=digest.

    ", + "ListImagesResponse$imageIds": "

    The list of image IDs for the requested repository.

    " + } + }, + "ImageList": { + "base": null, + "refs": { + "BatchGetImageResponse$images": "

    A list of image objects corresponding to the image references in the request.

    " + } + }, + "ImageManifest": { + "base": null, + "refs": { + "Image$imageManifest": "

    The image manifest associated with the image.

    ", + "PutImageRequest$imageManifest": "

    The image manifest corresponding to the image to be uploaded.

    " + } + }, + "ImageTag": { + "base": null, + "refs": { + "ImageIdentifier$imageTag": "

    The tag used for the image.

    " + } + }, + "InitiateLayerUploadRequest": { + "base": null, + "refs": { + } + }, + "InitiateLayerUploadResponse": { + "base": null, + "refs": { + } + }, + "InvalidLayerException": { + "base": "

    The layer digest calculation performed by Amazon ECR upon receipt of the image layer does not match the digest specified.

    ", + "refs": { + } + }, + "InvalidLayerPartException": { + "base": "

    The layer part size is not valid, or the first byte specified is not consecutive to the last byte of a previous layer part upload.

    ", + "refs": { + } + }, + "InvalidParameterException": { + "base": "

    The specified parameter is invalid. Review the available parameters for the API request.

    ", + "refs": { + } + }, + "Layer": { + "base": null, + "refs": { + "LayerList$member": null + } + }, + "LayerAlreadyExistsException": { + "base": "

    The image layer already exists in the associated repository.

    ", + "refs": { + } + }, + "LayerAvailability": { + "base": null, + "refs": { + "Layer$layerAvailability": "

    The availability status of the image layer. Valid values are AVAILABLE and UNAVAILABLE.

    " + } + }, + "LayerDigest": { + "base": null, + "refs": { + "CompleteLayerUploadResponse$layerDigest": "

    The sha256 digest of the image layer.

    ", + "GetDownloadUrlForLayerRequest$layerDigest": "

    The digest of the image layer to download.

    ", + "GetDownloadUrlForLayerResponse$layerDigest": "

    The digest of the image layer to download.

    ", + "Layer$layerDigest": "

    The sha256 digest of the image layer.

    ", + "LayerDigestList$member": null + } + }, + "LayerDigestList": { + "base": null, + "refs": { + "CompleteLayerUploadRequest$layerDigests": "

    The sha256 digest of the image layer.

    " + } + }, + "LayerFailure": { + "base": null, + "refs": { + "LayerFailureList$member": null + } + }, + "LayerFailureCode": { + "base": null, + "refs": { + "LayerFailure$failureCode": "

    The failure code associated with the failure.

    " + } + }, + "LayerFailureList": { + "base": null, + "refs": { + "BatchCheckLayerAvailabilityResponse$failures": "

    Any failures associated with the call.

    " + } + }, + "LayerFailureReason": { + "base": null, + "refs": { + "LayerFailure$failureReason": "

    The reason for the failure.

    " + } + }, + "LayerInaccessibleException": { + "base": "

    The specified layer is not available because it is not associated with an image. Unassociated image layers may be cleaned up at any time.

    ", + "refs": { + } + }, + "LayerList": { + "base": null, + "refs": { + "BatchCheckLayerAvailabilityResponse$layers": "

    A list of image layer objects corresponding to the image layer references in the request.

    " + } + }, + "LayerPartBlob": { + "base": null, + "refs": { + "UploadLayerPartRequest$layerPartBlob": "

    The base64-encoded layer part payload.

    " + } + }, + "LayerPartTooSmallException": { + "base": "

    Layer parts must be at least 5 MiB in size.

    ", + "refs": { + } + }, + "LayerSizeInBytes": { + "base": null, + "refs": { + "Layer$layerSize": "

    The size, in bytes, of the image layer.

    " + } + }, + "LayersNotFoundException": { + "base": "

    The specified layers could not be found, or the specified layer is not valid for this repository.

    ", + "refs": { + } + }, + "LimitExceededException": { + "base": "

    The operation did not succeed because it would have exceeded a service limit for your account. For more information, see Amazon ECR Default Service Limits in the Amazon EC2 Container Registry User Guide.

    ", + "refs": { + } + }, + "ListImagesRequest": { + "base": null, + "refs": { + } + }, + "ListImagesResponse": { + "base": null, + "refs": { + } + }, + "MaxResults": { + "base": null, + "refs": { + "DescribeRepositoriesRequest$maxResults": "

    The maximum number of repository results returned by DescribeRepositories in paginated output. When this parameter is used, DescribeRepositories only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeRepositories request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then DescribeRepositories returns up to 100 results and a nextToken value, if applicable.

    ", + "ListImagesRequest$maxResults": "

    The maximum number of image results returned by ListImages in paginated output. When this parameter is used, ListImages only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListImages request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListImages returns up to 100 results and a nextToken value, if applicable.

    " + } + }, + "NextToken": { + "base": null, + "refs": { + "DescribeRepositoriesRequest$nextToken": "

    The nextToken value returned from a previous paginated DescribeRepositories request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return.

    ", + "DescribeRepositoriesResponse$nextToken": "

    The nextToken value to include in a future DescribeRepositories request. When the results of a DescribeRepositories request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "ListImagesRequest$nextToken": "

    The nextToken value returned from a previous paginated ListImages request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return.

    ", + "ListImagesResponse$nextToken": "

    The nextToken value to include in a future ListImages request. When the results of a ListImages request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    " + } + }, + "PartSize": { + "base": null, + "refs": { + "InitiateLayerUploadResponse$partSize": "

    The size, in bytes, that Amazon ECR expects future layer part uploads to be.

    ", + "InvalidLayerPartException$lastValidByteReceived": "

    The last valid byte received from the layer part upload that is associated with the exception.

    ", + "UploadLayerPartRequest$partFirstByte": "

    The integer value of the first byte of the layer part.

    ", + "UploadLayerPartRequest$partLastByte": "

    The integer value of the last byte of the layer part.

    ", + "UploadLayerPartResponse$lastByteReceived": "

    The integer value of the last byte received in the request.

    " + } + }, + "ProxyEndpoint": { + "base": null, + "refs": { + "AuthorizationData$proxyEndpoint": "

    The registry URL to use for this authorization token in a docker login command. The Amazon ECR registry URL format is https://aws_account_id.dkr.ecr.region.amazonaws.com. For example, https://012345678910.dkr.ecr.us-east-1.amazonaws.com.

    " + } + }, + "PutImageRequest": { + "base": null, + "refs": { + } + }, + "PutImageResponse": { + "base": null, + "refs": { + } + }, + "RegistryId": { + "base": null, + "refs": { + "BatchCheckLayerAvailabilityRequest$registryId": "

    The AWS account ID associated with the registry that contains the image layers to check. If you do not specify a registry, the default registry is assumed.

    ", + "BatchDeleteImageRequest$registryId": "

    The AWS account ID associated with the registry that contains the image to delete. If you do not specify a registry, the default registry is assumed.

    ", + "BatchGetImageRequest$registryId": "

    The AWS account ID associated with the registry that contains the images to describe. If you do not specify a registry, the default registry is assumed.

    ", + "CompleteLayerUploadRequest$registryId": "

    The AWS account ID associated with the registry to which to upload layers. If you do not specify a registry, the default registry is assumed.

    ", + "CompleteLayerUploadResponse$registryId": "

    The registry ID associated with the request.

    ", + "DeleteRepositoryPolicyRequest$registryId": "

    The AWS account ID associated with the registry that contains the repository policy to delete. If you do not specify a registry, the default registry is assumed.

    ", + "DeleteRepositoryPolicyResponse$registryId": "

    The registry ID associated with the request.

    ", + "DeleteRepositoryRequest$registryId": "

    The AWS account ID associated with the registry that contains the repository to delete. If you do not specify a registry, the default registry is assumed.

    ", + "DescribeRepositoriesRequest$registryId": "

    The AWS account ID associated with the registry that contains the repositories to be described. If you do not specify a registry, the default registry is assumed.

    ", + "GetAuthorizationTokenRegistryIdList$member": null, + "GetDownloadUrlForLayerRequest$registryId": "

    The AWS account ID associated with the registry that contains the image layer to download. If you do not specify a registry, the default registry is assumed.

    ", + "GetRepositoryPolicyRequest$registryId": "

    The AWS account ID associated with the registry that contains the repository. If you do not specify a registry, the default registry is assumed.

    ", + "GetRepositoryPolicyResponse$registryId": "

    The registry ID associated with the request.

    ", + "Image$registryId": "

    The AWS account ID associated with the registry containing the image.

    ", + "InitiateLayerUploadRequest$registryId": "

    The AWS account ID associated with the registry that you intend to upload layers to. If you do not specify a registry, the default registry is assumed.

    ", + "InvalidLayerPartException$registryId": "

    The registry ID associated with the exception.

    ", + "ListImagesRequest$registryId": "

    The AWS account ID associated with the registry that contains the repository to list images in. If you do not specify a registry, the default registry is assumed.

    ", + "PutImageRequest$registryId": "

    The AWS account ID associated with the registry that contains the repository in which to put the image. If you do not specify a registry, the default registry is assumed.

    ", + "Repository$registryId": "

    The AWS account ID associated with the registry that contains the repository.

    ", + "SetRepositoryPolicyRequest$registryId": "

    The AWS account ID associated with the registry that contains the repository. If you do not specify a registry, the default registry is assumed.

    ", + "SetRepositoryPolicyResponse$registryId": "

    The registry ID associated with the request.

    ", + "UploadLayerPartRequest$registryId": "

    The AWS account ID associated with the registry that you are uploading layer parts to. If you do not specify a registry, the default registry is assumed.

    ", + "UploadLayerPartResponse$registryId": "

    The registry ID associated with the request.

    " + } + }, + "Repository": { + "base": "

    Object representing a repository.

    ", + "refs": { + "CreateRepositoryResponse$repository": null, + "DeleteRepositoryResponse$repository": null, + "RepositoryList$member": null + } + }, + "RepositoryAlreadyExistsException": { + "base": "

    The specified repository already exists in the specified registry.

    ", + "refs": { + } + }, + "RepositoryList": { + "base": null, + "refs": { + "DescribeRepositoriesResponse$repositories": "

    A list of repository objects corresponding to valid repositories.

    " + } + }, + "RepositoryName": { + "base": null, + "refs": { + "BatchCheckLayerAvailabilityRequest$repositoryName": "

    The name of the repository that is associated with the image layers to check.

    ", + "BatchDeleteImageRequest$repositoryName": "

    The repository that contains the image to delete.

    ", + "BatchGetImageRequest$repositoryName": "

    The repository that contains the images to describe.

    ", + "CompleteLayerUploadRequest$repositoryName": "

    The name of the repository to associate with the image layer.

    ", + "CompleteLayerUploadResponse$repositoryName": "

    The repository name associated with the request.

    ", + "CreateRepositoryRequest$repositoryName": "

    The name to use for the repository. The repository name may be specified on its own (such as nginx-web-app) or it can be prepended with a namespace to group the repository into a category (such as project-a/nginx-web-app).

    ", + "DeleteRepositoryPolicyRequest$repositoryName": "

    The name of the repository that is associated with the repository policy to delete.

    ", + "DeleteRepositoryPolicyResponse$repositoryName": "

    The repository name associated with the request.

    ", + "DeleteRepositoryRequest$repositoryName": "

    The name of the repository to delete.

    ", + "GetDownloadUrlForLayerRequest$repositoryName": "

    The name of the repository that is associated with the image layer to download.

    ", + "GetRepositoryPolicyRequest$repositoryName": "

    The name of the repository whose policy you want to retrieve.

    ", + "GetRepositoryPolicyResponse$repositoryName": "

    The repository name associated with the request.

    ", + "Image$repositoryName": "

    The name of the repository associated with the image.

    ", + "InitiateLayerUploadRequest$repositoryName": "

    The name of the repository that you intend to upload layers to.

    ", + "InvalidLayerPartException$repositoryName": "

    The repository name associated with the exception.

    ", + "ListImagesRequest$repositoryName": "

    The repository whose image IDs are to be listed.

    ", + "PutImageRequest$repositoryName": "

    The name of the repository in which to put the image.

    ", + "Repository$repositoryName": "

    The name of the repository.

    ", + "RepositoryNameList$member": null, + "SetRepositoryPolicyRequest$repositoryName": "

    The name of the repository to receive the policy.

    ", + "SetRepositoryPolicyResponse$repositoryName": "

    The repository name associated with the request.

    ", + "UploadLayerPartRequest$repositoryName": "

    The name of the repository that you are uploading layer parts to.

    ", + "UploadLayerPartResponse$repositoryName": "

    The repository name associated with the request.

    " + } + }, + "RepositoryNameList": { + "base": null, + "refs": { + "DescribeRepositoriesRequest$repositoryNames": "

    A list of repositories to describe. If this parameter is omitted, then all repositories in a registry are described.

    " + } + }, + "RepositoryNotEmptyException": { + "base": "

    The specified repository contains images. To delete a repository that contains images, you must force the deletion with the force parameter.

    ", + "refs": { + } + }, + "RepositoryNotFoundException": { + "base": "

    The specified repository could not be found. Check the spelling of the specified repository and ensure that you are performing operations on the correct registry.

    ", + "refs": { + } + }, + "RepositoryPolicyNotFoundException": { + "base": "

    The specified repository and registry combination does not have an associated repository policy.

    ", + "refs": { + } + }, + "RepositoryPolicyText": { + "base": null, + "refs": { + "DeleteRepositoryPolicyResponse$policyText": "

    The JSON repository policy that was deleted from the repository.

    ", + "GetRepositoryPolicyResponse$policyText": "

    The JSON repository policy text associated with the repository.

    ", + "SetRepositoryPolicyRequest$policyText": "

    The JSON repository policy text to apply to the repository.

    ", + "SetRepositoryPolicyResponse$policyText": "

    The JSON repository policy text applied to the repository.

    " + } + }, + "ServerException": { + "base": "

    These errors are usually caused by a server-side issue.

    ", + "refs": { + } + }, + "SetRepositoryPolicyRequest": { + "base": null, + "refs": { + } + }, + "SetRepositoryPolicyResponse": { + "base": null, + "refs": { + } + }, + "UploadId": { + "base": null, + "refs": { + "CompleteLayerUploadRequest$uploadId": "

    The upload ID from a previous InitiateLayerUpload operation to associate with the image layer.

    ", + "CompleteLayerUploadResponse$uploadId": "

    The upload ID associated with the layer.

    ", + "InitiateLayerUploadResponse$uploadId": "

    The upload ID for the layer upload. This parameter is passed to further UploadLayerPart and CompleteLayerUpload operations.

    ", + "InvalidLayerPartException$uploadId": "

    The upload ID associated with the exception.

    ", + "UploadLayerPartRequest$uploadId": "

    The upload ID from a previous InitiateLayerUpload operation to associate with the layer part upload.

    ", + "UploadLayerPartResponse$uploadId": "

    The upload ID associated with the request.

    " + } + }, + "UploadLayerPartRequest": { + "base": null, + "refs": { + } + }, + "UploadLayerPartResponse": { + "base": null, + "refs": { + } + }, + "UploadNotFoundException": { + "base": "

    The upload could not be found, or the specified upload id is not valid for this repository.

    ", + "refs": { + } + }, + "Url": { + "base": null, + "refs": { + "GetDownloadUrlForLayerResponse$downloadUrl": "

    The pre-signed Amazon S3 download URL for the requested layer.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecr/2015-09-21/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecr/2015-09-21/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecr/2015-09-21/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecr/2015-09-21/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1382 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-11-13", + "endpointPrefix":"ecs", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"Amazon ECS", + "serviceFullName":"Amazon EC2 Container Service", + "signatureVersion":"v4", + "targetPrefix":"AmazonEC2ContainerServiceV20141113" + }, + "operations":{ + "CreateCluster":{ + "name":"CreateCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateClusterRequest"}, + "output":{"shape":"CreateClusterResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ] + }, + "CreateService":{ + "name":"CreateService", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateServiceRequest"}, + "output":{"shape":"CreateServiceResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "DeleteCluster":{ + "name":"DeleteCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteClusterRequest"}, + "output":{"shape":"DeleteClusterResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"}, + {"shape":"ClusterContainsContainerInstancesException"}, + {"shape":"ClusterContainsServicesException"} + ] + }, + "DeleteService":{ + "name":"DeleteService", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteServiceRequest"}, + "output":{"shape":"DeleteServiceResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"}, + {"shape":"ServiceNotFoundException"} + ] + }, + "DeregisterContainerInstance":{ + "name":"DeregisterContainerInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterContainerInstanceRequest"}, + "output":{"shape":"DeregisterContainerInstanceResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "DeregisterTaskDefinition":{ + "name":"DeregisterTaskDefinition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterTaskDefinitionRequest"}, + "output":{"shape":"DeregisterTaskDefinitionResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ] + }, + "DescribeClusters":{ + "name":"DescribeClusters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeClustersRequest"}, + "output":{"shape":"DescribeClustersResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ] + }, + "DescribeContainerInstances":{ + "name":"DescribeContainerInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeContainerInstancesRequest"}, + "output":{"shape":"DescribeContainerInstancesResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "DescribeServices":{ + "name":"DescribeServices", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeServicesRequest"}, + "output":{"shape":"DescribeServicesResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "DescribeTaskDefinition":{ + "name":"DescribeTaskDefinition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTaskDefinitionRequest"}, + "output":{"shape":"DescribeTaskDefinitionResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ] + }, + "DescribeTasks":{ + "name":"DescribeTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTasksRequest"}, + "output":{"shape":"DescribeTasksResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "DiscoverPollEndpoint":{ + "name":"DiscoverPollEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DiscoverPollEndpointRequest"}, + "output":{"shape":"DiscoverPollEndpointResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"} + ] + }, + "ListClusters":{ + "name":"ListClusters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListClustersRequest"}, + "output":{"shape":"ListClustersResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ] + }, + "ListContainerInstances":{ + "name":"ListContainerInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListContainerInstancesRequest"}, + "output":{"shape":"ListContainerInstancesResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "ListServices":{ + "name":"ListServices", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListServicesRequest"}, + "output":{"shape":"ListServicesResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "ListTaskDefinitionFamilies":{ + "name":"ListTaskDefinitionFamilies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTaskDefinitionFamiliesRequest"}, + "output":{"shape":"ListTaskDefinitionFamiliesResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ] + }, + "ListTaskDefinitions":{ + "name":"ListTaskDefinitions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTaskDefinitionsRequest"}, + "output":{"shape":"ListTaskDefinitionsResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ] + }, + "ListTasks":{ + "name":"ListTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTasksRequest"}, + "output":{"shape":"ListTasksResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"}, + {"shape":"ServiceNotFoundException"} + ] + }, + "RegisterContainerInstance":{ + "name":"RegisterContainerInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterContainerInstanceRequest"}, + "output":{"shape":"RegisterContainerInstanceResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"} + ] + }, + "RegisterTaskDefinition":{ + "name":"RegisterTaskDefinition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterTaskDefinitionRequest"}, + "output":{"shape":"RegisterTaskDefinitionResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ] + }, + "RunTask":{ + "name":"RunTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RunTaskRequest"}, + "output":{"shape":"RunTaskResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "StartTask":{ + "name":"StartTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartTaskRequest"}, + "output":{"shape":"StartTaskResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "StopTask":{ + "name":"StopTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopTaskRequest"}, + "output":{"shape":"StopTaskResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "SubmitContainerStateChange":{ + "name":"SubmitContainerStateChange", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SubmitContainerStateChangeRequest"}, + "output":{"shape":"SubmitContainerStateChangeResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"} + ] + }, + "SubmitTaskStateChange":{ + "name":"SubmitTaskStateChange", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SubmitTaskStateChangeRequest"}, + "output":{"shape":"SubmitTaskStateChangeResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"} + ] + }, + "UpdateContainerAgent":{ + "name":"UpdateContainerAgent", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateContainerAgentRequest"}, + "output":{"shape":"UpdateContainerAgentResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"}, + {"shape":"UpdateInProgressException"}, + {"shape":"NoUpdateAvailableException"}, + {"shape":"MissingVersionException"} + ] + }, + "UpdateService":{ + "name":"UpdateService", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateServiceRequest"}, + "output":{"shape":"UpdateServiceResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"}, + {"shape":"ServiceNotFoundException"}, + {"shape":"ServiceNotActiveException"} + ] + } + }, + "shapes":{ + "AgentUpdateStatus":{ + "type":"string", + "enum":[ + "PENDING", + "STAGING", + "STAGED", + "UPDATING", + "UPDATED", + "FAILED" + ] + }, + "Attribute":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"String"}, + "value":{"shape":"String"} + } + }, + "Attributes":{ + "type":"list", + "member":{"shape":"Attribute"} + }, + "Boolean":{"type":"boolean"}, + "BoxedBoolean":{ + "type":"boolean", + "box":true + }, + "BoxedInteger":{ + "type":"integer", + "box":true + }, + "ClientException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "Cluster":{ + "type":"structure", + "members":{ + "clusterArn":{"shape":"String"}, + "clusterName":{"shape":"String"}, + "status":{"shape":"String"}, + "registeredContainerInstancesCount":{"shape":"Integer"}, + "runningTasksCount":{"shape":"Integer"}, + "pendingTasksCount":{"shape":"Integer"}, + "activeServicesCount":{"shape":"Integer"} + } + }, + "ClusterContainsContainerInstancesException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ClusterContainsServicesException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ClusterNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Clusters":{ + "type":"list", + "member":{"shape":"Cluster"} + }, + "Container":{ + "type":"structure", + "members":{ + "containerArn":{"shape":"String"}, + "taskArn":{"shape":"String"}, + "name":{"shape":"String"}, + "lastStatus":{"shape":"String"}, + "exitCode":{"shape":"BoxedInteger"}, + "reason":{"shape":"String"}, + "networkBindings":{"shape":"NetworkBindings"} + } + }, + "ContainerDefinition":{ + "type":"structure", + "members":{ + "name":{"shape":"String"}, + "image":{"shape":"String"}, + "cpu":{"shape":"Integer"}, + "memory":{"shape":"Integer"}, + "links":{"shape":"StringList"}, + "portMappings":{"shape":"PortMappingList"}, + "essential":{"shape":"BoxedBoolean"}, + "entryPoint":{"shape":"StringList"}, + "command":{"shape":"StringList"}, + "environment":{"shape":"EnvironmentVariables"}, + "mountPoints":{"shape":"MountPointList"}, + "volumesFrom":{"shape":"VolumeFromList"}, + "hostname":{"shape":"String"}, + "user":{"shape":"String"}, + "workingDirectory":{"shape":"String"}, + "disableNetworking":{"shape":"BoxedBoolean"}, + "privileged":{"shape":"BoxedBoolean"}, + "readonlyRootFilesystem":{"shape":"BoxedBoolean"}, + "dnsServers":{"shape":"StringList"}, + "dnsSearchDomains":{"shape":"StringList"}, + "extraHosts":{"shape":"HostEntryList"}, + "dockerSecurityOptions":{"shape":"StringList"}, + "dockerLabels":{"shape":"DockerLabelsMap"}, + "ulimits":{"shape":"UlimitList"}, + "logConfiguration":{"shape":"LogConfiguration"} + } + }, + "ContainerDefinitions":{ + "type":"list", + "member":{"shape":"ContainerDefinition"} + }, + "ContainerInstance":{ + "type":"structure", + "members":{ + "containerInstanceArn":{"shape":"String"}, + "ec2InstanceId":{"shape":"String"}, + "versionInfo":{"shape":"VersionInfo"}, + "remainingResources":{"shape":"Resources"}, + "registeredResources":{"shape":"Resources"}, + "status":{"shape":"String"}, + "agentConnected":{"shape":"Boolean"}, + "runningTasksCount":{"shape":"Integer"}, + "pendingTasksCount":{"shape":"Integer"}, + "agentUpdateStatus":{"shape":"AgentUpdateStatus"}, + "attributes":{"shape":"Attributes"} + } + }, + "ContainerInstances":{ + "type":"list", + "member":{"shape":"ContainerInstance"} + }, + "ContainerOverride":{ + "type":"structure", + "members":{ + "name":{"shape":"String"}, + "command":{"shape":"StringList"}, + "environment":{"shape":"EnvironmentVariables"} + } + }, + "ContainerOverrides":{ + "type":"list", + "member":{"shape":"ContainerOverride"} + }, + "Containers":{ + "type":"list", + "member":{"shape":"Container"} + }, + "CreateClusterRequest":{ + "type":"structure", + "members":{ + "clusterName":{"shape":"String"} + } + }, + "CreateClusterResponse":{ + "type":"structure", + "members":{ + "cluster":{"shape":"Cluster"} + } + }, + "CreateServiceRequest":{ + "type":"structure", + "required":[ + "serviceName", + "taskDefinition", + "desiredCount" + ], + "members":{ + "cluster":{"shape":"String"}, + "serviceName":{"shape":"String"}, + "taskDefinition":{"shape":"String"}, + "loadBalancers":{"shape":"LoadBalancers"}, + "desiredCount":{"shape":"BoxedInteger"}, + "clientToken":{"shape":"String"}, + "role":{"shape":"String"}, + "deploymentConfiguration":{"shape":"DeploymentConfiguration"} + } + }, + "CreateServiceResponse":{ + "type":"structure", + "members":{ + "service":{"shape":"Service"} + } + }, + "DeleteClusterRequest":{ + "type":"structure", + "required":["cluster"], + "members":{ + "cluster":{"shape":"String"} + } + }, + "DeleteClusterResponse":{ + "type":"structure", + "members":{ + "cluster":{"shape":"Cluster"} + } + }, + "DeleteServiceRequest":{ + "type":"structure", + "required":["service"], + "members":{ + "cluster":{"shape":"String"}, + "service":{"shape":"String"} + } + }, + "DeleteServiceResponse":{ + "type":"structure", + "members":{ + "service":{"shape":"Service"} + } + }, + "Deployment":{ + "type":"structure", + "members":{ + "id":{"shape":"String"}, + "status":{"shape":"String"}, + "taskDefinition":{"shape":"String"}, + "desiredCount":{"shape":"Integer"}, + "pendingCount":{"shape":"Integer"}, + "runningCount":{"shape":"Integer"}, + "createdAt":{"shape":"Timestamp"}, + "updatedAt":{"shape":"Timestamp"} + } + }, + "DeploymentConfiguration":{ + "type":"structure", + "members":{ + "maximumPercent":{"shape":"BoxedInteger"}, + "minimumHealthyPercent":{"shape":"BoxedInteger"} + } + }, + "Deployments":{ + "type":"list", + "member":{"shape":"Deployment"} + }, + "DeregisterContainerInstanceRequest":{ + "type":"structure", + "required":["containerInstance"], + "members":{ + "cluster":{"shape":"String"}, + "containerInstance":{"shape":"String"}, + "force":{"shape":"BoxedBoolean"} + } + }, + "DeregisterContainerInstanceResponse":{ + "type":"structure", + "members":{ + "containerInstance":{"shape":"ContainerInstance"} + } + }, + "DeregisterTaskDefinitionRequest":{ + "type":"structure", + "required":["taskDefinition"], + "members":{ + "taskDefinition":{"shape":"String"} + } + }, + "DeregisterTaskDefinitionResponse":{ + "type":"structure", + "members":{ + "taskDefinition":{"shape":"TaskDefinition"} + } + }, + "DescribeClustersRequest":{ + "type":"structure", + "members":{ + "clusters":{"shape":"StringList"} + } + }, + "DescribeClustersResponse":{ + "type":"structure", + "members":{ + "clusters":{"shape":"Clusters"}, + "failures":{"shape":"Failures"} + } + }, + "DescribeContainerInstancesRequest":{ + "type":"structure", + "required":["containerInstances"], + "members":{ + "cluster":{"shape":"String"}, + "containerInstances":{"shape":"StringList"} + } + }, + "DescribeContainerInstancesResponse":{ + "type":"structure", + "members":{ + "containerInstances":{"shape":"ContainerInstances"}, + "failures":{"shape":"Failures"} + } + }, + "DescribeServicesRequest":{ + "type":"structure", + "required":["services"], + "members":{ + "cluster":{"shape":"String"}, + "services":{"shape":"StringList"} + } + }, + "DescribeServicesResponse":{ + "type":"structure", + "members":{ + "services":{"shape":"Services"}, + "failures":{"shape":"Failures"} + } + }, + "DescribeTaskDefinitionRequest":{ + "type":"structure", + "required":["taskDefinition"], + "members":{ + "taskDefinition":{"shape":"String"} + } + }, + "DescribeTaskDefinitionResponse":{ + "type":"structure", + "members":{ + "taskDefinition":{"shape":"TaskDefinition"} + } + }, + "DescribeTasksRequest":{ + "type":"structure", + "required":["tasks"], + "members":{ + "cluster":{"shape":"String"}, + "tasks":{"shape":"StringList"} + } + }, + "DescribeTasksResponse":{ + "type":"structure", + "members":{ + "tasks":{"shape":"Tasks"}, + "failures":{"shape":"Failures"} + } + }, + "DesiredStatus":{ + "type":"string", + "enum":[ + "RUNNING", + "PENDING", + "STOPPED" + ] + }, + "DiscoverPollEndpointRequest":{ + "type":"structure", + "members":{ + "containerInstance":{"shape":"String"}, + "cluster":{"shape":"String"} + } + }, + "DiscoverPollEndpointResponse":{ + "type":"structure", + "members":{ + "endpoint":{"shape":"String"}, + "telemetryEndpoint":{"shape":"String"} + } + }, + "DockerLabelsMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "Double":{"type":"double"}, + "EnvironmentVariables":{ + "type":"list", + "member":{"shape":"KeyValuePair"} + }, + "Failure":{ + "type":"structure", + "members":{ + "arn":{"shape":"String"}, + "reason":{"shape":"String"} + } + }, + "Failures":{ + "type":"list", + "member":{"shape":"Failure"} + }, + "HostEntry":{ + "type":"structure", + "required":[ + "hostname", + "ipAddress" + ], + "members":{ + "hostname":{"shape":"String"}, + "ipAddress":{"shape":"String"} + } + }, + "HostEntryList":{ + "type":"list", + "member":{"shape":"HostEntry"} + }, + "HostVolumeProperties":{ + "type":"structure", + "members":{ + "sourcePath":{"shape":"String"} + } + }, + "Integer":{"type":"integer"}, + "InvalidParameterException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "KeyValuePair":{ + "type":"structure", + "members":{ + "name":{"shape":"String"}, + "value":{"shape":"String"} + } + }, + "ListClustersRequest":{ + "type":"structure", + "members":{ + "nextToken":{"shape":"String"}, + "maxResults":{"shape":"BoxedInteger"} + } + }, + "ListClustersResponse":{ + "type":"structure", + "members":{ + "clusterArns":{"shape":"StringList"}, + "nextToken":{"shape":"String"} + } + }, + "ListContainerInstancesRequest":{ + "type":"structure", + "members":{ + "cluster":{"shape":"String"}, + "nextToken":{"shape":"String"}, + "maxResults":{"shape":"BoxedInteger"} + } + }, + "ListContainerInstancesResponse":{ + "type":"structure", + "members":{ + "containerInstanceArns":{"shape":"StringList"}, + "nextToken":{"shape":"String"} + } + }, + "ListServicesRequest":{ + "type":"structure", + "members":{ + "cluster":{"shape":"String"}, + "nextToken":{"shape":"String"}, + "maxResults":{"shape":"BoxedInteger"} + } + }, + "ListServicesResponse":{ + "type":"structure", + "members":{ + "serviceArns":{"shape":"StringList"}, + "nextToken":{"shape":"String"} + } + }, + "ListTaskDefinitionFamiliesRequest":{ + "type":"structure", + "members":{ + "familyPrefix":{"shape":"String"}, + "nextToken":{"shape":"String"}, + "maxResults":{"shape":"BoxedInteger"} + } + }, + "ListTaskDefinitionFamiliesResponse":{ + "type":"structure", + "members":{ + "families":{"shape":"StringList"}, + "nextToken":{"shape":"String"} + } + }, + "ListTaskDefinitionsRequest":{ + "type":"structure", + "members":{ + "familyPrefix":{"shape":"String"}, + "status":{"shape":"TaskDefinitionStatus"}, + "sort":{"shape":"SortOrder"}, + "nextToken":{"shape":"String"}, + "maxResults":{"shape":"BoxedInteger"} + } + }, + "ListTaskDefinitionsResponse":{ + "type":"structure", + "members":{ + "taskDefinitionArns":{"shape":"StringList"}, + "nextToken":{"shape":"String"} + } + }, + "ListTasksRequest":{ + "type":"structure", + "members":{ + "cluster":{"shape":"String"}, + "containerInstance":{"shape":"String"}, + "family":{"shape":"String"}, + "nextToken":{"shape":"String"}, + "maxResults":{"shape":"BoxedInteger"}, + "startedBy":{"shape":"String"}, + "serviceName":{"shape":"String"}, + "desiredStatus":{"shape":"DesiredStatus"} + } + }, + "ListTasksResponse":{ + "type":"structure", + "members":{ + "taskArns":{"shape":"StringList"}, + "nextToken":{"shape":"String"} + } + }, + "LoadBalancer":{ + "type":"structure", + "members":{ + "loadBalancerName":{"shape":"String"}, + "containerName":{"shape":"String"}, + "containerPort":{"shape":"BoxedInteger"} + } + }, + "LoadBalancers":{ + "type":"list", + "member":{"shape":"LoadBalancer"} + }, + "LogConfiguration":{ + "type":"structure", + "required":["logDriver"], + "members":{ + "logDriver":{"shape":"LogDriver"}, + "options":{"shape":"LogConfigurationOptionsMap"} + } + }, + "LogConfigurationOptionsMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "LogDriver":{ + "type":"string", + "enum":[ + "json-file", + "syslog", + "journald", + "gelf", + "fluentd" + ] + }, + "Long":{"type":"long"}, + "MissingVersionException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "MountPoint":{ + "type":"structure", + "members":{ + "sourceVolume":{"shape":"String"}, + "containerPath":{"shape":"String"}, + "readOnly":{"shape":"BoxedBoolean"} + } + }, + "MountPointList":{ + "type":"list", + "member":{"shape":"MountPoint"} + }, + "NetworkBinding":{ + "type":"structure", + "members":{ + "bindIP":{"shape":"String"}, + "containerPort":{"shape":"BoxedInteger"}, + "hostPort":{"shape":"BoxedInteger"}, + "protocol":{"shape":"TransportProtocol"} + } + }, + "NetworkBindings":{ + "type":"list", + "member":{"shape":"NetworkBinding"} + }, + "NoUpdateAvailableException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "PortMapping":{ + "type":"structure", + "members":{ + "containerPort":{"shape":"Integer"}, + "hostPort":{"shape":"Integer"}, + "protocol":{"shape":"TransportProtocol"} + } + }, + "PortMappingList":{ + "type":"list", + "member":{"shape":"PortMapping"} + }, + "RegisterContainerInstanceRequest":{ + "type":"structure", + "members":{ + "cluster":{"shape":"String"}, + "instanceIdentityDocument":{"shape":"String"}, + "instanceIdentityDocumentSignature":{"shape":"String"}, + "totalResources":{"shape":"Resources"}, + "versionInfo":{"shape":"VersionInfo"}, + "containerInstanceArn":{"shape":"String"}, + "attributes":{"shape":"Attributes"} + } + }, + "RegisterContainerInstanceResponse":{ + "type":"structure", + "members":{ + "containerInstance":{"shape":"ContainerInstance"} + } + }, + "RegisterTaskDefinitionRequest":{ + "type":"structure", + "required":[ + "family", + "containerDefinitions" + ], + "members":{ + "family":{"shape":"String"}, + "containerDefinitions":{"shape":"ContainerDefinitions"}, + "volumes":{"shape":"VolumeList"} + } + }, + "RegisterTaskDefinitionResponse":{ + "type":"structure", + "members":{ + "taskDefinition":{"shape":"TaskDefinition"} + } + }, + "RequiresAttributes":{ + "type":"list", + "member":{"shape":"Attribute"} + }, + "Resource":{ + "type":"structure", + "members":{ + "name":{"shape":"String"}, + "type":{"shape":"String"}, + "doubleValue":{"shape":"Double"}, + "longValue":{"shape":"Long"}, + "integerValue":{"shape":"Integer"}, + "stringSetValue":{"shape":"StringList"} + } + }, + "Resources":{ + "type":"list", + "member":{"shape":"Resource"} + }, + "RunTaskRequest":{ + "type":"structure", + "required":["taskDefinition"], + "members":{ + "cluster":{"shape":"String"}, + "taskDefinition":{"shape":"String"}, + "overrides":{"shape":"TaskOverride"}, + "count":{"shape":"BoxedInteger"}, + "startedBy":{"shape":"String"} + } + }, + "RunTaskResponse":{ + "type":"structure", + "members":{ + "tasks":{"shape":"Tasks"}, + "failures":{"shape":"Failures"} + } + }, + "ServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true, + "fault":true + }, + "Service":{ + "type":"structure", + "members":{ + "serviceArn":{"shape":"String"}, + "serviceName":{"shape":"String"}, + "clusterArn":{"shape":"String"}, + "loadBalancers":{"shape":"LoadBalancers"}, + "status":{"shape":"String"}, + "desiredCount":{"shape":"Integer"}, + "runningCount":{"shape":"Integer"}, + "pendingCount":{"shape":"Integer"}, + "taskDefinition":{"shape":"String"}, + "deploymentConfiguration":{"shape":"DeploymentConfiguration"}, + "deployments":{"shape":"Deployments"}, + "roleArn":{"shape":"String"}, + "events":{"shape":"ServiceEvents"} + } + }, + "ServiceEvent":{ + "type":"structure", + "members":{ + "id":{"shape":"String"}, + "createdAt":{"shape":"Timestamp"}, + "message":{"shape":"String"} + } + }, + "ServiceEvents":{ + "type":"list", + "member":{"shape":"ServiceEvent"} + }, + "ServiceNotActiveException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ServiceNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Services":{ + "type":"list", + "member":{"shape":"Service"} + }, + "SortOrder":{ + "type":"string", + "enum":[ + "ASC", + "DESC" + ] + }, + "StartTaskRequest":{ + "type":"structure", + "required":[ + "taskDefinition", + "containerInstances" + ], + "members":{ + "cluster":{"shape":"String"}, + "taskDefinition":{"shape":"String"}, + "overrides":{"shape":"TaskOverride"}, + "containerInstances":{"shape":"StringList"}, + "startedBy":{"shape":"String"} + } + }, + "StartTaskResponse":{ + "type":"structure", + "members":{ + "tasks":{"shape":"Tasks"}, + "failures":{"shape":"Failures"} + } + }, + "StopTaskRequest":{ + "type":"structure", + "required":["task"], + "members":{ + "cluster":{"shape":"String"}, + "task":{"shape":"String"}, + "reason":{"shape":"String"} + } + }, + "StopTaskResponse":{ + "type":"structure", + "members":{ + "task":{"shape":"Task"} + } + }, + "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "SubmitContainerStateChangeRequest":{ + "type":"structure", + "members":{ + "cluster":{"shape":"String"}, + "task":{"shape":"String"}, + "containerName":{"shape":"String"}, + "status":{"shape":"String"}, + "exitCode":{"shape":"BoxedInteger"}, + "reason":{"shape":"String"}, + "networkBindings":{"shape":"NetworkBindings"} + } + }, + "SubmitContainerStateChangeResponse":{ + "type":"structure", + "members":{ + "acknowledgment":{"shape":"String"} + } + }, + "SubmitTaskStateChangeRequest":{ + "type":"structure", + "members":{ + "cluster":{"shape":"String"}, + "task":{"shape":"String"}, + "status":{"shape":"String"}, + "reason":{"shape":"String"} + } + }, + "SubmitTaskStateChangeResponse":{ + "type":"structure", + "members":{ + "acknowledgment":{"shape":"String"} + } + }, + "Task":{ + "type":"structure", + "members":{ + "taskArn":{"shape":"String"}, + "clusterArn":{"shape":"String"}, + "taskDefinitionArn":{"shape":"String"}, + "containerInstanceArn":{"shape":"String"}, + "overrides":{"shape":"TaskOverride"}, + "lastStatus":{"shape":"String"}, + "desiredStatus":{"shape":"String"}, + "containers":{"shape":"Containers"}, + "startedBy":{"shape":"String"}, + "stoppedReason":{"shape":"String"}, + "createdAt":{"shape":"Timestamp"}, + "startedAt":{"shape":"Timestamp"}, + "stoppedAt":{"shape":"Timestamp"} + } + }, + "TaskDefinition":{ + "type":"structure", + "members":{ + "taskDefinitionArn":{"shape":"String"}, + "containerDefinitions":{"shape":"ContainerDefinitions"}, + "family":{"shape":"String"}, + "revision":{"shape":"Integer"}, + "volumes":{"shape":"VolumeList"}, + "status":{"shape":"TaskDefinitionStatus"}, + "requiresAttributes":{"shape":"RequiresAttributes"} + } + }, + "TaskDefinitionStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "INACTIVE" + ] + }, + "TaskOverride":{ + "type":"structure", + "members":{ + "containerOverrides":{"shape":"ContainerOverrides"} + } + }, + "Tasks":{ + "type":"list", + "member":{"shape":"Task"} + }, + "Timestamp":{"type":"timestamp"}, + "TransportProtocol":{ + "type":"string", + "enum":[ + "tcp", + "udp" + ] + }, + "Ulimit":{ + "type":"structure", + "required":[ + "name", + "softLimit", + "hardLimit" + ], + "members":{ + "name":{"shape":"UlimitName"}, + "softLimit":{"shape":"Integer"}, + "hardLimit":{"shape":"Integer"} + } + }, + "UlimitList":{ + "type":"list", + "member":{"shape":"Ulimit"} + }, + "UlimitName":{ + "type":"string", + "enum":[ + "core", + "cpu", + "data", + "fsize", + "locks", + "memlock", + "msgqueue", + "nice", + "nofile", + "nproc", + "rss", + "rtprio", + "rttime", + "sigpending", + "stack" + ] + }, + "UpdateContainerAgentRequest":{ + "type":"structure", + "required":["containerInstance"], + "members":{ + "cluster":{"shape":"String"}, + "containerInstance":{"shape":"String"} + } + }, + "UpdateContainerAgentResponse":{ + "type":"structure", + "members":{ + "containerInstance":{"shape":"ContainerInstance"} + } + }, + "UpdateInProgressException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "UpdateServiceRequest":{ + "type":"structure", + "required":["service"], + "members":{ + "cluster":{"shape":"String"}, + "service":{"shape":"String"}, + "desiredCount":{"shape":"BoxedInteger"}, + "taskDefinition":{"shape":"String"}, + "deploymentConfiguration":{"shape":"DeploymentConfiguration"} + } + }, + "UpdateServiceResponse":{ + "type":"structure", + "members":{ + "service":{"shape":"Service"} + } + }, + "VersionInfo":{ + "type":"structure", + "members":{ + "agentVersion":{"shape":"String"}, + "agentHash":{"shape":"String"}, + "dockerVersion":{"shape":"String"} + } + }, + "Volume":{ + "type":"structure", + "members":{ + "name":{"shape":"String"}, + "host":{"shape":"HostVolumeProperties"} + } + }, + "VolumeFrom":{ + "type":"structure", + "members":{ + "sourceContainer":{"shape":"String"}, + "readOnly":{"shape":"BoxedBoolean"} + } + }, + "VolumeFromList":{ + "type":"list", + "member":{"shape":"VolumeFrom"} + }, + "VolumeList":{ + "type":"list", + "member":{"shape":"Volume"} + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,995 @@ +{ + "version": "2.0", + "service": "

    Amazon EC2 Container Service (Amazon ECS) is a highly scalable, fast, container management service that makes it easy to run, stop, and manage Docker containers on a cluster of EC2 instances. Amazon ECS lets you launch and stop container-enabled applications with simple API calls, allows you to get the state of your cluster from a centralized service, and gives you access to many familiar Amazon EC2 features like security groups, Amazon EBS volumes, and IAM roles.

    You can use Amazon ECS to schedule the placement of containers across your cluster based on your resource needs, isolation policies, and availability requirements. Amazon EC2 Container Service eliminates the need for you to operate your own cluster management and configuration management systems or worry about scaling your management infrastructure.

    ", + "operations": { + "CreateCluster": "

    Creates a new Amazon ECS cluster. By default, your account receives a default cluster when you launch your first container instance. However, you can create your own cluster with a unique name with the CreateCluster action.

    ", + "CreateService": "

    Runs and maintains a desired number of tasks from a specified task definition. If the number of tasks running in a service drops below desiredCount, Amazon ECS spawns another instantiation of the task in the specified cluster. To update an existing service, see UpdateService.

    You can optionally specify a deployment configuration for your service. During a deployment (which is triggered by changing the task definition of a service with an UpdateService operation), the service scheduler uses the minimumHealthyPercent and maximumPercent parameters to determine the deployment strategy.

    If the minimumHealthyPercent is below 100%, the scheduler can ignore the desiredCount temporarily during a deployment. For example, if your service has a desiredCount of four tasks, a minimumHealthyPercent of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that do not use a load balancer are considered healthy if they are in the RUNNING state; tasks for services that do use a load balancer are considered healthy if they are in the RUNNING state and the container instance it is hosted on is reported as healthy by the load balancer. The default value for minimumHealthyPercent is 50% in the console and 100% for the AWS CLI, the AWS SDKs, and the APIs.

    The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment, which enables you to define the deployment batch size. For example, if your service has a desiredCount of four tasks, a maximumPercent value of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximumPercent is 200%.

    When the service scheduler launches new tasks, it attempts to balance them across the Availability Zones in your cluster with the following logic:

    • Determine which of the container instances in your cluster can support your service's task definition (for example, they have the required CPU, memory, ports, and container instance attributes).

    • Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.

    • Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.

    ", + "DeleteCluster": "

    Deletes the specified cluster. You must deregister all container instances from this cluster before you may delete it. You can list the container instances in a cluster with ListContainerInstances and deregister them with DeregisterContainerInstance.

    ", + "DeleteService": "

    Deletes a specified service within a cluster. You can delete a service if you have no running tasks in it and the desired task count is zero. If the service is actively maintaining tasks, you cannot delete it, and you must update the service to a desired task count of zero. For more information, see UpdateService.

    When you delete a service, if there are still running tasks that require cleanup, the service status moves from ACTIVE to DRAINING, and the service is no longer visible in the console or in ListServices API operations. After the tasks have stopped, then the service status moves from DRAINING to INACTIVE. Services in the DRAINING or INACTIVE status can still be viewed with DescribeServices API operations; however, in the future, INACTIVE services may be cleaned up and purged from Amazon ECS record keeping, and DescribeServices API operations on those services will return a ServiceNotFoundException error.

    ", + "DeregisterContainerInstance": "

    Deregisters an Amazon ECS container instance from the specified cluster. This instance is no longer available to run tasks.

    If you intend to use the container instance for some other purpose after deregistration, you should stop all of the tasks running on the container instance before deregistration to avoid any orphaned tasks from consuming resources.

    Deregistering a container instance removes the instance from a cluster, but it does not terminate the EC2 instance; if you are finished using the instance, be sure to terminate it in the Amazon EC2 console to stop billing.

    When you terminate a container instance, it is automatically deregistered from your cluster.

    ", + "DeregisterTaskDefinition": "

    Deregisters the specified task definition by family and revision. Upon deregistration, the task definition is marked as INACTIVE. Existing tasks and services that reference an INACTIVE task definition continue to run without disruption. Existing services that reference an INACTIVE task definition can still scale up or down by modifying the service's desired count.

    You cannot use an INACTIVE task definition to run new tasks or create new services, and you cannot update an existing service to reference an INACTIVE task definition (although there may be up to a 10 minute window following deregistration where these restrictions have not yet taken effect).

    ", + "DescribeClusters": "

    Describes one or more of your clusters.

    ", + "DescribeContainerInstances": "

    Describes Amazon EC2 Container Service container instances. Returns metadata about registered and remaining resources on each container instance requested.

    ", + "DescribeServices": "

    Describes the specified services running in your cluster.

    ", + "DescribeTaskDefinition": "

    Describes a task definition. You can specify a family and revision to find information about a specific task definition, or you can simply specify the family to find the latest ACTIVE revision in that family.

    You can only describe INACTIVE task definitions while an active task or service references them.

    ", + "DescribeTasks": "

    Describes a specified task or tasks.

    ", + "DiscoverPollEndpoint": "

    This action is only used by the Amazon EC2 Container Service agent, and it is not intended for use outside of the agent.

    Returns an endpoint for the Amazon EC2 Container Service agent to poll for updates.

    ", + "ListClusters": "

    Returns a list of existing clusters.

    ", + "ListContainerInstances": "

    Returns a list of container instances in a specified cluster.

    ", + "ListServices": "

    Lists the services that are running in a specified cluster.

    ", + "ListTaskDefinitionFamilies": "

    Returns a list of task definition families that are registered to your account (which may include task definition families that no longer have any ACTIVE task definitions). You can filter the results with the familyPrefix parameter.

    ", + "ListTaskDefinitions": "

    Returns a list of task definitions that are registered to your account. You can filter the results by family name with the familyPrefix parameter or by status with the status parameter.

    ", + "ListTasks": "

    Returns a list of tasks for a specified cluster. You can filter the results by family name, by a particular container instance, or by the desired status of the task with the family, containerInstance, and desiredStatus parameters.

    ", + "RegisterContainerInstance": "

    This action is only used by the Amazon EC2 Container Service agent, and it is not intended for use outside of the agent.

    Registers an EC2 instance into the specified cluster. This instance becomes available to place containers on.

    ", + "RegisterTaskDefinition": "

    Registers a new task definition from the supplied family and containerDefinitions. Optionally, you can add data volumes to your containers with the volumes parameter. For more information about task definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon EC2 Container Service Developer Guide.

    ", + "RunTask": "

    Start a task using random placement and the default Amazon ECS scheduler. To use your own scheduler or place a task on a specific container instance, use StartTask instead.

    The count parameter is limited to 10 tasks per call.

    ", + "StartTask": "

    Starts a new task from the specified task definition on the specified container instance or instances. To use the default Amazon ECS scheduler to place your task, use RunTask instead.

    The list of container instances to start tasks on is limited to 10.

    ", + "StopTask": "

    Stops a running task.

    When StopTask is called on a task, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout, after which SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent.

    ", + "SubmitContainerStateChange": "

    This action is only used by the Amazon EC2 Container Service agent, and it is not intended for use outside of the agent.

    Sent to acknowledge that a container changed states.

    ", + "SubmitTaskStateChange": "

    This action is only used by the Amazon EC2 Container Service agent, and it is not intended for use outside of the agent.

    Sent to acknowledge that a task changed states.

    ", + "UpdateContainerAgent": "

    Updates the Amazon ECS container agent on a specified container instance. Updating the Amazon ECS container agent does not interrupt running tasks or services on the container instance. The process for updating the agent differs depending on whether your container instance was launched with the Amazon ECS-optimized AMI or another operating system.

    UpdateContainerAgent requires the Amazon ECS-optimized AMI or Amazon Linux with the ecs-init service installed and running. For help updating the Amazon ECS container agent on other operating systems, see Manually Updating the Amazon ECS Container Agent in the Amazon EC2 Container Service Developer Guide.

    ", + "UpdateService": "

    Modifies the desired count, deployment configuration, or task definition used in a service.

    You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter.

    You can use UpdateService to modify your task definition and deploy a new version of your service.

    You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy.

    If the minimumHealthyPercent is below 100%, the scheduler can ignore the desiredCount temporarily during a deployment. For example, if your service has a desiredCount of four tasks, a minimumHealthyPercent of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that do not use a load balancer are considered healthy if they are in the RUNNING state; tasks for services that do use a load balancer are considered healthy if they are in the RUNNING state and the container instance it is hosted on is reported as healthy by the load balancer.

    The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment, which enables you to define the deployment batch size. For example, if your service has a desiredCount of four tasks, a maximumPercent value of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available).

    When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout, after which SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent.

    When the service scheduler launches new tasks, it attempts to balance them across the Availability Zones in your cluster with the following logic:

    • Determine which of the container instances in your cluster can support your service's task definition (for example, they have the required CPU, memory, ports, and container instance attributes).

    • Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.

    • Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.

    " + }, + "shapes": { + "AgentUpdateStatus": { + "base": null, + "refs": { + "ContainerInstance$agentUpdateStatus": "

    The status of the most recent agent update. If an update has never been requested, this value is NULL.

    " + } + }, + "Attribute": { + "base": "

    The attributes applicable to a container instance when it is registered.

    ", + "refs": { + "Attributes$member": null, + "RequiresAttributes$member": null + } + }, + "Attributes": { + "base": null, + "refs": { + "ContainerInstance$attributes": "

    The attributes set for the container instance by the Amazon ECS container agent at instance registration.

    ", + "RegisterContainerInstanceRequest$attributes": "

    The container instance attributes that this container instance supports.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "ContainerInstance$agentConnected": "

    This parameter returns true if the agent is actually connected to Amazon ECS. Registered instances with an agent that may be unhealthy or stopped return false, and instances without a connected agent cannot accept placement requests.

    " + } + }, + "BoxedBoolean": { + "base": null, + "refs": { + "ContainerDefinition$essential": "

    If the essential parameter of a container is marked as true, the failure of that container stops the task. If the essential parameter of a container is marked as false, then its failure does not affect the rest of the containers in a task. If this parameter is omitted, a container is assumed to be essential.

    All tasks must have at least one essential container.

    ", + "ContainerDefinition$disableNetworking": "

    When this parameter is true, networking is disabled within the container. This parameter maps to NetworkDisabled in the Create a container section of the Docker Remote API.

    ", + "ContainerDefinition$privileged": "

    When this parameter is true, the container is given elevated privileges on the host container instance (similar to the root user). This parameter maps to Privileged in the Create a container section of the Docker Remote API and the --privileged option to docker run.

    ", + "ContainerDefinition$readonlyRootFilesystem": "

    When this parameter is true, the container is given read-only access to its root file system. This parameter maps to ReadonlyRootfs in the Create a container section of the Docker Remote API and the --read-only option to docker run.

    ", + "DeregisterContainerInstanceRequest$force": "

    Forces the deregistration of the container instance. If you have tasks running on the container instance when you deregister it with the force option, these tasks remain running and they continue to pass Elastic Load Balancing load balancer health checks until you terminate the instance or the tasks stop through some other means, but they are orphaned (no longer monitored or accounted for by Amazon ECS). If an orphaned task on your container instance is part of an Amazon ECS service, then the service scheduler starts another copy of that task, on a different container instance if possible.

    ", + "MountPoint$readOnly": "

    If this value is true, the container has read-only access to the volume. If this value is false, then the container can write to the volume. The default value is false.

    ", + "VolumeFrom$readOnly": "

    If this value is true, the container has read-only access to the volume. If this value is false, then the container can write to the volume. The default value is false.

    " + } + }, + "BoxedInteger": { + "base": null, + "refs": { + "Container$exitCode": "

    The exit code returned from the container.

    ", + "CreateServiceRequest$desiredCount": "

    The number of instantiations of the specified task definition to place and keep running on your cluster.

    ", + "DeploymentConfiguration$maximumPercent": "

    The upper limit (as a percentage of the service's desiredCount) of the number of running tasks that can be running in a service during a deployment. The maximum number of tasks during a deployment is the desiredCount multiplied by the maximumPercent/100, rounded down to the nearest integer value.

    ", + "DeploymentConfiguration$minimumHealthyPercent": "

    The lower limit (as a percentage of the service's desiredCount) of the number of running tasks that must remain running and healthy in a service during a deployment. The minimum healthy tasks during a deployment is the desiredCount multiplied by the minimumHealthyPercent/100, rounded up to the nearest integer value.

    ", + "ListClustersRequest$maxResults": "

    The maximum number of cluster results returned by ListClusters in paginated output. When this parameter is used, ListClusters only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListClusters request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListClusters returns up to 100 results and a nextToken value if applicable.

    ", + "ListContainerInstancesRequest$maxResults": "

    The maximum number of container instance results returned by ListContainerInstances in paginated output. When this parameter is used, ListContainerInstances only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListContainerInstances request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListContainerInstances returns up to 100 results and a nextToken value if applicable.

    ", + "ListServicesRequest$maxResults": "

    The maximum number of container instance results returned by ListServices in paginated output. When this parameter is used, ListServices only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListServices request with the returned nextToken value. This value can be between 1 and 10. If this parameter is not used, then ListServices returns up to 10 results and a nextToken value if applicable.

    ", + "ListTaskDefinitionFamiliesRequest$maxResults": "

    The maximum number of task definition family results returned by ListTaskDefinitionFamilies in paginated output. When this parameter is used, ListTaskDefinitions only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListTaskDefinitionFamilies request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListTaskDefinitionFamilies returns up to 100 results and a nextToken value if applicable.

    ", + "ListTaskDefinitionsRequest$maxResults": "

    The maximum number of task definition results returned by ListTaskDefinitions in paginated output. When this parameter is used, ListTaskDefinitions only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListTaskDefinitions request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListTaskDefinitions returns up to 100 results and a nextToken value if applicable.

    ", + "ListTasksRequest$maxResults": "

    The maximum number of task results returned by ListTasks in paginated output. When this parameter is used, ListTasks only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListTasks request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListTasks returns up to 100 results and a nextToken value if applicable.

    ", + "LoadBalancer$containerPort": "

    The port on the container to associate with the load balancer. This port must correspond to a containerPort in the service's task definition. Your container instances must allow ingress traffic on the hostPort of the port mapping.

    ", + "NetworkBinding$containerPort": "

    The port number on the container that is be used with the network binding.

    ", + "NetworkBinding$hostPort": "

    The port number on the host that is used with the network binding.

    ", + "RunTaskRequest$count": "

    The number of instantiations of the specified task to place on your cluster.

    The count parameter is limited to 10 tasks per call.

    ", + "SubmitContainerStateChangeRequest$exitCode": "

    The exit code returned for the state change request.

    ", + "UpdateServiceRequest$desiredCount": "

    The number of instantiations of the task to place and keep running in your service.

    " + } + }, + "ClientException": { + "base": "

    These errors are usually caused by a client action, such as using an action or resource on behalf of a user that doesn't have permission to use the action or resource, or specifying an identifier that is not valid.

    ", + "refs": { + } + }, + "Cluster": { + "base": "

    A regional grouping of one or more container instances on which you can run task requests. Each account receives a default cluster the first time you use the Amazon ECS service, but you may also create other clusters. Clusters may contain more than one instance type simultaneously.

    ", + "refs": { + "Clusters$member": null, + "CreateClusterResponse$cluster": "

    The full description of your new cluster.

    ", + "DeleteClusterResponse$cluster": "

    The full description of the deleted cluster.

    " + } + }, + "ClusterContainsContainerInstancesException": { + "base": "

    You cannot delete a cluster that has registered container instances. You must first deregister the container instances before you can delete the cluster. For more information, see DeregisterContainerInstance.

    ", + "refs": { + } + }, + "ClusterContainsServicesException": { + "base": "

    You cannot delete a cluster that contains services. You must first update the service to reduce its desired task count to 0 and then delete the service. For more information, see UpdateService and DeleteService.

    ", + "refs": { + } + }, + "ClusterNotFoundException": { + "base": "

    The specified cluster could not be found. You can view your available clusters with ListClusters. Amazon ECS clusters are region-specific.

    ", + "refs": { + } + }, + "Clusters": { + "base": null, + "refs": { + "DescribeClustersResponse$clusters": "

    The list of clusters.

    " + } + }, + "Container": { + "base": "

    A Docker container that is part of a task.

    ", + "refs": { + "Containers$member": null + } + }, + "ContainerDefinition": { + "base": "

    Container definitions are used in task definitions to describe the different containers that are launched as part of a task.

    ", + "refs": { + "ContainerDefinitions$member": null + } + }, + "ContainerDefinitions": { + "base": null, + "refs": { + "RegisterTaskDefinitionRequest$containerDefinitions": "

    A list of container definitions in JSON format that describe the different containers that make up your task.

    ", + "TaskDefinition$containerDefinitions": "

    A list of container definitions in JSON format that describe the different containers that make up your task. For more information about container definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon EC2 Container Service Developer Guide.

    " + } + }, + "ContainerInstance": { + "base": "

    An EC2 instance that is running the Amazon ECS agent and has been registered with a cluster.

    ", + "refs": { + "ContainerInstances$member": null, + "DeregisterContainerInstanceResponse$containerInstance": null, + "RegisterContainerInstanceResponse$containerInstance": null, + "UpdateContainerAgentResponse$containerInstance": null + } + }, + "ContainerInstances": { + "base": null, + "refs": { + "DescribeContainerInstancesResponse$containerInstances": "

    The list of container instances.

    " + } + }, + "ContainerOverride": { + "base": "

    The overrides that should be sent to a container.

    ", + "refs": { + "ContainerOverrides$member": null + } + }, + "ContainerOverrides": { + "base": null, + "refs": { + "TaskOverride$containerOverrides": "

    One or more container overrides sent to a task.

    " + } + }, + "Containers": { + "base": null, + "refs": { + "Task$containers": "

    The containers associated with the task.

    " + } + }, + "CreateClusterRequest": { + "base": null, + "refs": { + } + }, + "CreateClusterResponse": { + "base": null, + "refs": { + } + }, + "CreateServiceRequest": { + "base": null, + "refs": { + } + }, + "CreateServiceResponse": { + "base": null, + "refs": { + } + }, + "DeleteClusterRequest": { + "base": null, + "refs": { + } + }, + "DeleteClusterResponse": { + "base": null, + "refs": { + } + }, + "DeleteServiceRequest": { + "base": null, + "refs": { + } + }, + "DeleteServiceResponse": { + "base": null, + "refs": { + } + }, + "Deployment": { + "base": "

    The details of an Amazon ECS service deployment.

    ", + "refs": { + "Deployments$member": null + } + }, + "DeploymentConfiguration": { + "base": "

    Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping and starting tasks.

    ", + "refs": { + "CreateServiceRequest$deploymentConfiguration": "

    Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping and starting tasks.

    ", + "Service$deploymentConfiguration": "

    Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping and starting tasks.

    ", + "UpdateServiceRequest$deploymentConfiguration": "

    Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping and starting tasks.

    " + } + }, + "Deployments": { + "base": null, + "refs": { + "Service$deployments": "

    The current state of deployments for the service.

    " + } + }, + "DeregisterContainerInstanceRequest": { + "base": null, + "refs": { + } + }, + "DeregisterContainerInstanceResponse": { + "base": null, + "refs": { + } + }, + "DeregisterTaskDefinitionRequest": { + "base": null, + "refs": { + } + }, + "DeregisterTaskDefinitionResponse": { + "base": null, + "refs": { + } + }, + "DescribeClustersRequest": { + "base": null, + "refs": { + } + }, + "DescribeClustersResponse": { + "base": null, + "refs": { + } + }, + "DescribeContainerInstancesRequest": { + "base": null, + "refs": { + } + }, + "DescribeContainerInstancesResponse": { + "base": null, + "refs": { + } + }, + "DescribeServicesRequest": { + "base": null, + "refs": { + } + }, + "DescribeServicesResponse": { + "base": null, + "refs": { + } + }, + "DescribeTaskDefinitionRequest": { + "base": null, + "refs": { + } + }, + "DescribeTaskDefinitionResponse": { + "base": null, + "refs": { + } + }, + "DescribeTasksRequest": { + "base": null, + "refs": { + } + }, + "DescribeTasksResponse": { + "base": null, + "refs": { + } + }, + "DesiredStatus": { + "base": null, + "refs": { + "ListTasksRequest$desiredStatus": "

    The task status with which to filter the ListTasks results. Specifying a desiredStatus of STOPPED limits the results to tasks that are in the STOPPED status, which can be useful for debugging tasks that are not starting properly or have died or finished. The default status filter is RUNNING.

    " + } + }, + "DiscoverPollEndpointRequest": { + "base": null, + "refs": { + } + }, + "DiscoverPollEndpointResponse": { + "base": null, + "refs": { + } + }, + "DockerLabelsMap": { + "base": null, + "refs": { + "ContainerDefinition$dockerLabels": "

    A key/value map of labels to add to the container. This parameter maps to Labels in the Create a container section of the Docker Remote API and the --label option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

    " + } + }, + "Double": { + "base": null, + "refs": { + "Resource$doubleValue": "

    When the doubleValue type is set, the value of the resource must be a double precision floating-point type.

    " + } + }, + "EnvironmentVariables": { + "base": null, + "refs": { + "ContainerDefinition$environment": "

    The environment variables to pass to a container. This parameter maps to Env in the Create a container section of the Docker Remote API and the --env option to docker run.

    We do not recommend using plain text environment variables for sensitive information, such as credential data.

    ", + "ContainerOverride$environment": "

    The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can override the existing environment variables from the Docker image or the task definition.

    " + } + }, + "Failure": { + "base": "

    A failed resource.

    ", + "refs": { + "Failures$member": null + } + }, + "Failures": { + "base": null, + "refs": { + "DescribeClustersResponse$failures": "

    Any failures associated with the call.

    ", + "DescribeContainerInstancesResponse$failures": "

    Any failures associated with the call.

    ", + "DescribeServicesResponse$failures": "

    Any failures associated with the call.

    ", + "DescribeTasksResponse$failures": "

    Any failures associated with the call.

    ", + "RunTaskResponse$failures": "

    Any failures associated with the call.

    ", + "StartTaskResponse$failures": "

    Any failures associated with the call.

    " + } + }, + "HostEntry": { + "base": "

    Hostnames and IP address entries that are added to the /etc/hosts file of a container via the extraHosts parameter of its ContainerDefinition.

    ", + "refs": { + "HostEntryList$member": null + } + }, + "HostEntryList": { + "base": null, + "refs": { + "ContainerDefinition$extraHosts": "

    A list of hostnames and IP address mappings to append to the /etc/hosts file on the container. This parameter maps to ExtraHosts in the Create a container section of the Docker Remote API and the --add-host option to docker run.

    " + } + }, + "HostVolumeProperties": { + "base": "

    Details on a container instance host volume.

    ", + "refs": { + "Volume$host": "

    The contents of the host parameter determine whether your data volume persists on the host container instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host path for your data volume, but the data is not guaranteed to persist after the containers associated with it stop running.

    " + } + }, + "Integer": { + "base": null, + "refs": { + "Cluster$registeredContainerInstancesCount": "

    The number of container instances registered into the cluster.

    ", + "Cluster$runningTasksCount": "

    The number of tasks in the cluster that are in the RUNNING state.

    ", + "Cluster$pendingTasksCount": "

    The number of tasks in the cluster that are in the PENDING state.

    ", + "Cluster$activeServicesCount": "

    The number of services that are running on the cluster in an ACTIVE state. You can view these services with ListServices.

    ", + "ContainerDefinition$cpu": "

    The number of cpu units reserved for the container. A container instance has 1,024 cpu units for every CPU core. This parameter specifies the minimum amount of CPU to reserve for a container, and containers share unallocated CPU units with other containers on the instance with the same ratio as their allocated amount. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run.

    You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page by 1,024.

    For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that is the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task would be guaranteed a minimum of 512 CPU units when needed, and each container could float to higher CPU usage if the other container was not using it, but if both tasks were 100% active all of the time, they would be limited to 512 CPU units.

    The Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. For more information, see CPU share constraint in the Docker documentation. The minimum valid CPU share value that the Linux kernel allows is 2; however, the CPU parameter is not required, and you can use CPU values below 2 in your container definitions. For CPU values below 2 (including null), the behavior varies based on your Amazon ECS container agent version:

    • Agent versions less than or equal to 1.1.0: Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to 2 CPU shares.
    • Agent versions greater than or equal to 1.2.0: Null, zero, and CPU values of 1 are passed to Docker as 2.
    ", + "ContainerDefinition$memory": "

    The number of MiB of memory to reserve for the container. You must specify a non-zero integer for this parameter; the Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers. If your container attempts to exceed the memory allocated here, the container is killed. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

    ", + "ContainerInstance$runningTasksCount": "

    The number of tasks on the container instance that are in the RUNNING status.

    ", + "ContainerInstance$pendingTasksCount": "

    The number of tasks on the container instance that are in the PENDING status.

    ", + "Deployment$desiredCount": "

    The most recent desired count of tasks that was specified for the service to deploy or maintain.

    ", + "Deployment$pendingCount": "

    The number of tasks in the deployment that are in the PENDING status.

    ", + "Deployment$runningCount": "

    The number of tasks in the deployment that are in the RUNNING status.

    ", + "PortMapping$containerPort": "

    The port number on the container that is bound to the user-specified or automatically assigned host port. If you specify a container port and not a host port, your container automatically receives a host port in the ephemeral port range (for more information, see hostPort).

    ", + "PortMapping$hostPort": "

    The port number on the container instance to reserve for your container. You can specify a non-reserved host port for your container port mapping, or you can omit the hostPort (or set it to 0) while specifying a containerPort and your container automatically receives a port in the ephemeral port range for your container instance operating system and Docker version.

    The default ephemeral port range is 49153 to 65535, and this range is used for Docker versions prior to 1.6.0. For Docker version 1.6.0 and later, the Docker daemon tries to read the ephemeral port range from /proc/sys/net/ipv4/ip_local_port_range; if this kernel parameter is unavailable, the default ephemeral port range is used. You should not attempt to specify a host port in the ephemeral port range, because these are reserved for automatic assignment. In general, ports below 32768 are outside of the ephemeral port range.

    The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, and the Amazon ECS container agent port 51678. Any host port that was previously specified in a running task is also reserved while the task is running (after a task stops, the host port is released).The current reserved ports are displayed in the remainingResources of DescribeContainerInstances output, and a container instance may have up to 50 reserved ports at a time, including the default reserved ports (automatically assigned ports do not count toward this limit).

    ", + "Resource$integerValue": "

    When the integerValue type is set, the value of the resource must be an integer.

    ", + "Service$desiredCount": "

    The desired number of instantiations of the task definition to keep running on the service. This value is specified when the service is created with CreateService, and it can be modified with UpdateService.

    ", + "Service$runningCount": "

    The number of tasks in the cluster that are in the RUNNING state.

    ", + "Service$pendingCount": "

    The number of tasks in the cluster that are in the PENDING state.

    ", + "TaskDefinition$revision": "

    The revision of the task in a particular family. The revision is a version number of a task definition in a family. When you register a task definition for the first time, the revision is 1; each time you register a new revision of a task definition in the same family, the revision value always increases by one (even if you have deregistered previous revisions in this family).

    ", + "Ulimit$softLimit": "

    The soft limit for the ulimit type.

    ", + "Ulimit$hardLimit": "

    The hard limit for the ulimit type.

    " + } + }, + "InvalidParameterException": { + "base": "

    The specified parameter is invalid. Review the available parameters for the API request.

    ", + "refs": { + } + }, + "KeyValuePair": { + "base": "

    A key and value pair object.

    ", + "refs": { + "EnvironmentVariables$member": null + } + }, + "ListClustersRequest": { + "base": null, + "refs": { + } + }, + "ListClustersResponse": { + "base": null, + "refs": { + } + }, + "ListContainerInstancesRequest": { + "base": null, + "refs": { + } + }, + "ListContainerInstancesResponse": { + "base": null, + "refs": { + } + }, + "ListServicesRequest": { + "base": null, + "refs": { + } + }, + "ListServicesResponse": { + "base": null, + "refs": { + } + }, + "ListTaskDefinitionFamiliesRequest": { + "base": null, + "refs": { + } + }, + "ListTaskDefinitionFamiliesResponse": { + "base": null, + "refs": { + } + }, + "ListTaskDefinitionsRequest": { + "base": null, + "refs": { + } + }, + "ListTaskDefinitionsResponse": { + "base": null, + "refs": { + } + }, + "ListTasksRequest": { + "base": null, + "refs": { + } + }, + "ListTasksResponse": { + "base": null, + "refs": { + } + }, + "LoadBalancer": { + "base": "

    Details on a load balancer that is used with a service.

    ", + "refs": { + "LoadBalancers$member": null + } + }, + "LoadBalancers": { + "base": null, + "refs": { + "CreateServiceRequest$loadBalancers": "

    A list of load balancer objects, containing the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer.

    ", + "Service$loadBalancers": "

    A list of load balancer objects, containing the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer.

    " + } + }, + "LogConfiguration": { + "base": "

    Log configuration options to send to a custom log driver for the container.

    ", + "refs": { + "ContainerDefinition$logConfiguration": "

    The log configuration specification for the container. This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. Valid log drivers are displayed in the LogConfiguration data type. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

    The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon EC2 Container Service Developer Guide.

    " + } + }, + "LogConfigurationOptionsMap": { + "base": null, + "refs": { + "LogConfiguration$options": "

    The configuration options to send to the log driver. This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

    " + } + }, + "LogDriver": { + "base": null, + "refs": { + "LogConfiguration$logDriver": "

    The log driver to use for the container. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

    " + } + }, + "Long": { + "base": null, + "refs": { + "Resource$longValue": "

    When the longValue type is set, the value of the resource must be an extended precision floating-point type.

    " + } + }, + "MissingVersionException": { + "base": "

    Amazon ECS is unable to determine the current version of the Amazon ECS container agent on the container instance and does not have enough information to proceed with an update. This could be because the agent running on the container instance is an older or custom version that does not use our version information.

    ", + "refs": { + } + }, + "MountPoint": { + "base": "

    Details on a volume mount point that is used in a container definition.

    ", + "refs": { + "MountPointList$member": null + } + }, + "MountPointList": { + "base": null, + "refs": { + "ContainerDefinition$mountPoints": "

    The mount points for data volumes in your container. This parameter maps to Volumes in the Create a container section of the Docker Remote API and the --volume option to docker run.

    " + } + }, + "NetworkBinding": { + "base": "

    Details on the network bindings between a container and its host container instance. After a task reaches the RUNNING status, manual and automatic host and container port assignments are visible in the networkBindings section of DescribeTasks API responses.

    ", + "refs": { + "NetworkBindings$member": null + } + }, + "NetworkBindings": { + "base": null, + "refs": { + "Container$networkBindings": "

    The network bindings associated with the container.

    ", + "SubmitContainerStateChangeRequest$networkBindings": "

    The network bindings of the container.

    " + } + }, + "NoUpdateAvailableException": { + "base": "

    There is no update available for this Amazon ECS container agent. This could be because the agent is already running the latest version, or it is so old that there is no update path to the current version.

    ", + "refs": { + } + }, + "PortMapping": { + "base": "

    Port mappings allow containers to access ports on the host container instance to send or receive traffic. Port mappings are specified as part of the container definition. After a task reaches the RUNNING status, manual and automatic host and container port assignments are visible in the networkBindings section of DescribeTasks API responses.

    ", + "refs": { + "PortMappingList$member": null + } + }, + "PortMappingList": { + "base": null, + "refs": { + "ContainerDefinition$portMappings": "

    The list of port mappings for the container. Port mappings allow containers to access ports on the host container instance to send or receive traffic. This parameter maps to PortBindings in the Create a container section of the Docker Remote API and the --publish option to docker run.

    After a task reaches the RUNNING status, manual and automatic host and container port assignments are visible in the Network Bindings section of a container description of a selected task in the Amazon ECS console, or the networkBindings section DescribeTasks responses.

    " + } + }, + "RegisterContainerInstanceRequest": { + "base": null, + "refs": { + } + }, + "RegisterContainerInstanceResponse": { + "base": null, + "refs": { + } + }, + "RegisterTaskDefinitionRequest": { + "base": null, + "refs": { + } + }, + "RegisterTaskDefinitionResponse": { + "base": null, + "refs": { + } + }, + "RequiresAttributes": { + "base": null, + "refs": { + "TaskDefinition$requiresAttributes": "

    The container instance attributes required by your task.

    " + } + }, + "Resource": { + "base": "

    Describes the resources available for a container instance.

    ", + "refs": { + "Resources$member": null + } + }, + "Resources": { + "base": null, + "refs": { + "ContainerInstance$remainingResources": "

    The remaining resources of the container instance that are available for new tasks.

    ", + "ContainerInstance$registeredResources": "

    The registered resources on the container instance that are in use by current tasks.

    ", + "RegisterContainerInstanceRequest$totalResources": "

    The resources available on the instance.

    " + } + }, + "RunTaskRequest": { + "base": null, + "refs": { + } + }, + "RunTaskResponse": { + "base": null, + "refs": { + } + }, + "ServerException": { + "base": "

    These errors are usually caused by a server issue.

    ", + "refs": { + } + }, + "Service": { + "base": "

    Details on a service within a cluster

    ", + "refs": { + "CreateServiceResponse$service": "

    The full description of your service following the create call.

    ", + "DeleteServiceResponse$service": "

    The full description of the deleted service.

    ", + "Services$member": null, + "UpdateServiceResponse$service": "

    The full description of your service following the update call.

    " + } + }, + "ServiceEvent": { + "base": "

    Details on an event associated with a service.

    ", + "refs": { + "ServiceEvents$member": null + } + }, + "ServiceEvents": { + "base": null, + "refs": { + "Service$events": "

    The event stream for your service. A maximum of 100 of the latest events are displayed.

    " + } + }, + "ServiceNotActiveException": { + "base": "

    The specified service is not active. You cannot update a service that is not active. If you have previously deleted a service, you can re-create it with CreateService.

    ", + "refs": { + } + }, + "ServiceNotFoundException": { + "base": "

    The specified service could not be found. You can view your available services with ListServices. Amazon ECS services are cluster-specific and region-specific.

    ", + "refs": { + } + }, + "Services": { + "base": null, + "refs": { + "DescribeServicesResponse$services": "

    The list of services described.

    " + } + }, + "SortOrder": { + "base": null, + "refs": { + "ListTaskDefinitionsRequest$sort": "

    The order in which to sort the results. Valid values are ASC and DESC. By default (ASC), task definitions are listed lexicographically by family name and in ascending numerical order by revision so that the newest task definitions in a family are listed last. Setting this parameter to DESC reverses the sort order on family name and revision so that the newest task definitions in a family are listed first.

    " + } + }, + "StartTaskRequest": { + "base": null, + "refs": { + } + }, + "StartTaskResponse": { + "base": null, + "refs": { + } + }, + "StopTaskRequest": { + "base": null, + "refs": { + } + }, + "StopTaskResponse": { + "base": null, + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "Attribute$name": "

    The name of the container instance attribute.

    ", + "Attribute$value": "

    The value of the container instance attribute (at this time, the value here is Null, but this could change in future revisions for expandability).

    ", + "ClientException$message": null, + "Cluster$clusterArn": "

    The Amazon Resource Name (ARN) that identifies the cluster. The ARN contains the arn:aws:ecs namespace, followed by the region of the cluster, the AWS account ID of the cluster owner, the cluster namespace, and then the cluster name. For example, arn:aws:ecs:region:012345678910:cluster/test.

    ", + "Cluster$clusterName": "

    A user-generated string that you use to identify your cluster.

    ", + "Cluster$status": "

    The status of the cluster. The valid values are ACTIVE or INACTIVE. ACTIVE indicates that you can register container instances with the cluster and the associated instances can accept tasks.

    ", + "Container$containerArn": "

    The Amazon Resource Name (ARN) of the container.

    ", + "Container$taskArn": "

    The Amazon Resource Name (ARN) of the task.

    ", + "Container$name": "

    The name of the container.

    ", + "Container$lastStatus": "

    The last known status of the container.

    ", + "Container$reason": "

    A short (255 max characters) human-readable string to provide additional detail about a running or stopped container.

    ", + "ContainerDefinition$name": "

    The name of a container. If you are linking multiple containers together in a task definition, the name of one container can be entered in the links of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. This parameter maps to name in the Create a container section of the Docker Remote API and the --name option to docker run.

    ", + "ContainerDefinition$image": "

    The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are specified with repository-url/image:tag. Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the Create a container section of the Docker Remote API and the IMAGE parameter of docker run.

    • Images in official repositories on Docker Hub use a single name (for example, ubuntu or mongo).
    • Images in other repositories on Docker Hub are qualified with an organization name (for example, amazon/amazon-ecs-agent).
    • Images in other online repositories are qualified further by a domain name (for example, quay.io/assemblyline/ubuntu).
    ", + "ContainerDefinition$hostname": "

    The hostname to use for your container. This parameter maps to Hostname in the Create a container section of the Docker Remote API and the --hostname option to docker run.

    ", + "ContainerDefinition$user": "

    The user name to use inside the container. This parameter maps to User in the Create a container section of the Docker Remote API and the --user option to docker run.

    ", + "ContainerDefinition$workingDirectory": "

    The working directory in which to run commands inside the container. This parameter maps to WorkingDir in the Create a container section of the Docker Remote API and the --workdir option to docker run.

    ", + "ContainerInstance$containerInstanceArn": "

    The Amazon Resource Name (ARN) of the container instance. The ARN contains the arn:aws:ecs namespace, followed by the region of the container instance, the AWS account ID of the container instance owner, the container-instance namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID.

    ", + "ContainerInstance$ec2InstanceId": "

    The EC2 instance ID of the container instance.

    ", + "ContainerInstance$status": "

    The status of the container instance. The valid values are ACTIVE or INACTIVE. ACTIVE indicates that the container instance can accept tasks.

    ", + "ContainerOverride$name": "

    The name of the container that receives the override.

    ", + "CreateClusterRequest$clusterName": "

    The name of your cluster. If you do not specify a name for your cluster, you create a cluster named default. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.

    ", + "CreateServiceRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster on which to run your service. If you do not specify a cluster, the default cluster is assumed.

    ", + "CreateServiceRequest$serviceName": "

    The name of your service. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. Service names must be unique within a cluster, but you can have similarly named services in multiple clusters within a region or across multiple regions.

    ", + "CreateServiceRequest$taskDefinition": "

    The family and revision (family:revision) or full Amazon Resource Name (ARN) of the task definition to run in your service. If a revision is not specified, the latest ACTIVE revision is used.

    ", + "CreateServiceRequest$clientToken": "

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request. Up to 32 ASCII characters are allowed.

    ", + "CreateServiceRequest$role": "

    The name or full Amazon Resource Name (ARN) of the IAM role that allows your Amazon ECS container agent to make calls to your load balancer on your behalf. This parameter is only required if you are using a load balancer with your service.

    ", + "DeleteClusterRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster to delete.

    ", + "DeleteServiceRequest$cluster": "

    The name of the cluster that hosts the service to delete. If you do not specify a cluster, the default cluster is assumed.

    ", + "DeleteServiceRequest$service": "

    The name of the service to delete.

    ", + "Deployment$id": "

    The ID of the deployment.

    ", + "Deployment$status": "

    The status of the deployment. Valid values are PRIMARY (for the most recent deployment), ACTIVE (for previous deployments that still have tasks running, but are being replaced with the PRIMARY deployment), and INACTIVE (for deployments that have been completely replaced).

    ", + "Deployment$taskDefinition": "

    The most recent task definition that was specified for the service to use.

    ", + "DeregisterContainerInstanceRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instance to deregister. If you do not specify a cluster, the default cluster is assumed.

    ", + "DeregisterContainerInstanceRequest$containerInstance": "

    The container instance ID or full Amazon Resource Name (ARN) of the container instance to deregister. The ARN contains the arn:aws:ecs namespace, followed by the region of the container instance, the AWS account ID of the container instance owner, the container-instance namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID.

    ", + "DeregisterTaskDefinitionRequest$taskDefinition": "

    The family and revision (family:revision) or full Amazon Resource Name (ARN) of the task definition to deregister. You must specify a revision.

    ", + "DescribeContainerInstancesRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instances to describe. If you do not specify a cluster, the default cluster is assumed.

    ", + "DescribeServicesRequest$cluster": "

    The name of the cluster that hosts the service to describe. If you do not specify a cluster, the default cluster is assumed.

    ", + "DescribeTaskDefinitionRequest$taskDefinition": "

    The family for the latest ACTIVE revision, family and revision (family:revision) for a specific revision in the family, or full Amazon Resource Name (ARN) of the task definition to describe.

    ", + "DescribeTasksRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task to describe. If you do not specify a cluster, the default cluster is assumed.

    ", + "DiscoverPollEndpointRequest$containerInstance": "

    The container instance ID or full Amazon Resource Name (ARN) of the container instance. The ARN contains the arn:aws:ecs namespace, followed by the region of the container instance, the AWS account ID of the container instance owner, the container-instance namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID.

    ", + "DiscoverPollEndpointRequest$cluster": "

    The cluster that the container instance belongs to.

    ", + "DiscoverPollEndpointResponse$endpoint": "

    The endpoint for the Amazon ECS agent to poll.

    ", + "DiscoverPollEndpointResponse$telemetryEndpoint": "

    The telemetry endpoint for the Amazon ECS agent.

    ", + "DockerLabelsMap$key": null, + "DockerLabelsMap$value": null, + "Failure$arn": "

    The Amazon Resource Name (ARN) of the failed resource.

    ", + "Failure$reason": "

    The reason for the failure.

    ", + "HostEntry$hostname": "

    The hostname to use in the /etc/hosts entry.

    ", + "HostEntry$ipAddress": "

    The IP address to use in the /etc/hosts entry.

    ", + "HostVolumeProperties$sourcePath": "

    The path on the host container instance that is presented to the container. If this parameter is empty, then the Docker daemon has assigned a host path for you. If the host parameter contains a sourcePath file location, then the data volume persists at the specified location on the host container instance until you delete it manually. If the sourcePath value does not exist on the host container instance, the Docker daemon creates it. If the location does exist, the contents of the source path folder are exported.

    ", + "KeyValuePair$name": "

    The name of the key value pair. For environment variables, this is the name of the environment variable.

    ", + "KeyValuePair$value": "

    The value of the key value pair. For environment variables, this is the value of the environment variable.

    ", + "ListClustersRequest$nextToken": "

    The nextToken value returned from a previous paginated ListClusters request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return.

    ", + "ListClustersResponse$nextToken": "

    The nextToken value to include in a future ListClusters request. When the results of a ListClusters request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "ListContainerInstancesRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instances to list. If you do not specify a cluster, the default cluster is assumed..

    ", + "ListContainerInstancesRequest$nextToken": "

    The nextToken value returned from a previous paginated ListContainerInstances request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return.

    ", + "ListContainerInstancesResponse$nextToken": "

    The nextToken value to include in a future ListContainerInstances request. When the results of a ListContainerInstances request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "ListServicesRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster that hosts the services to list. If you do not specify a cluster, the default cluster is assumed..

    ", + "ListServicesRequest$nextToken": "

    The nextToken value returned from a previous paginated ListServices request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return.

    ", + "ListServicesResponse$nextToken": "

    The nextToken value to include in a future ListServices request. When the results of a ListServices request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "ListTaskDefinitionFamiliesRequest$familyPrefix": "

    The familyPrefix is a string that is used to filter the results of ListTaskDefinitionFamilies. If you specify a familyPrefix, only task definition family names that begin with the familyPrefix string are returned.

    ", + "ListTaskDefinitionFamiliesRequest$nextToken": "

    The nextToken value returned from a previous paginated ListTaskDefinitionFamilies request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return.

    ", + "ListTaskDefinitionFamiliesResponse$nextToken": "

    The nextToken value to include in a future ListTaskDefinitionFamilies request. When the results of a ListTaskDefinitionFamilies request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "ListTaskDefinitionsRequest$familyPrefix": "

    The full family name with which to filter the ListTaskDefinitions results. Specifying a familyPrefix limits the listed task definitions to task definition revisions that belong to that family.

    ", + "ListTaskDefinitionsRequest$nextToken": "

    The nextToken value returned from a previous paginated ListTaskDefinitions request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return.

    ", + "ListTaskDefinitionsResponse$nextToken": "

    The nextToken value to include in a future ListTaskDefinitions request. When the results of a ListTaskDefinitions request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "ListTasksRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster that hosts the tasks to list. If you do not specify a cluster, the default cluster is assumed..

    ", + "ListTasksRequest$containerInstance": "

    The container instance ID or full Amazon Resource Name (ARN) of the container instance with which to filter the ListTasks results. Specifying a containerInstance limits the results to tasks that belong to that container instance.

    ", + "ListTasksRequest$family": "

    The name of the family with which to filter the ListTasks results. Specifying a family limits the results to tasks that belong to that family.

    ", + "ListTasksRequest$nextToken": "

    The nextToken value returned from a previous paginated ListTasks request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return.

    ", + "ListTasksRequest$startedBy": "

    The startedBy value with which to filter the task results. Specifying a startedBy value limits the results to tasks that were started with that value.

    ", + "ListTasksRequest$serviceName": "

    The name of the service with which to filter the ListTasks results. Specifying a serviceName limits the results to tasks that belong to that service.

    ", + "ListTasksResponse$nextToken": "

    The nextToken value to include in a future ListTasks request. When the results of a ListTasks request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "LoadBalancer$loadBalancerName": "

    The name of the load balancer.

    ", + "LoadBalancer$containerName": "

    The name of the container (as it appears in a container definition) to associate with the load balancer.

    ", + "LogConfigurationOptionsMap$key": null, + "LogConfigurationOptionsMap$value": null, + "MountPoint$sourceVolume": "

    The name of the volume to mount.

    ", + "MountPoint$containerPath": "

    The path on the container to mount the host volume at.

    ", + "NetworkBinding$bindIP": "

    The IP address that the container is bound to on the container instance.

    ", + "RegisterContainerInstanceRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster with which to register your container instance. If you do not specify a cluster, the default cluster is assumed..

    ", + "RegisterContainerInstanceRequest$instanceIdentityDocument": "

    The instance identity document for the EC2 instance to register. This document can be found by running the following command from the instance: curl http://169.254.169.254/latest/dynamic/instance-identity/document/

    ", + "RegisterContainerInstanceRequest$instanceIdentityDocumentSignature": "

    The instance identity document signature for the EC2 instance to register. This signature can be found by running the following command from the instance: curl http://169.254.169.254/latest/dynamic/instance-identity/signature/

    ", + "RegisterContainerInstanceRequest$containerInstanceArn": "

    The Amazon Resource Name (ARN) of the container instance (if it was previously registered).

    ", + "RegisterTaskDefinitionRequest$family": "

    You must specify a family for a task definition, which allows you to track multiple versions of the same task definition. The family is used as a name for your task definition. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.

    ", + "Resource$name": "

    The name of the resource, such as CPU, MEMORY, PORTS, or a user-defined resource.

    ", + "Resource$type": "

    The type of the resource, such as INTEGER, DOUBLE, LONG, or STRINGSET.

    ", + "RunTaskRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster on which to run your task. If you do not specify a cluster, the default cluster is assumed..

    ", + "RunTaskRequest$taskDefinition": "

    The family and revision (family:revision) or full Amazon Resource Name (ARN) of the task definition to run. If a revision is not specified, the latest ACTIVE revision is used.

    ", + "RunTaskRequest$startedBy": "

    An optional tag specified when a task is started. For example if you automatically trigger a task to run a batch process job, you could apply a unique identifier for that job to your task with the startedBy parameter. You can then identify which tasks belong to that job by filtering the results of a ListTasks call with the startedBy value.

    If a task is started by an Amazon ECS service, then the startedBy parameter contains the deployment ID of the service that starts it.

    ", + "ServerException$message": null, + "Service$serviceArn": "

    The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region:012345678910:service/my-service.

    ", + "Service$serviceName": "

    The name of your service. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. Service names must be unique within a cluster, but you can have similarly named services in multiple clusters within a region or across multiple regions.

    ", + "Service$clusterArn": "

    The Amazon Resource Name (ARN) of the of the cluster that hosts the service.

    ", + "Service$status": "

    The status of the service. The valid values are ACTIVE, DRAINING, or INACTIVE.

    ", + "Service$taskDefinition": "

    The task definition to use for tasks in the service. This value is specified when the service is created with CreateService, and it can be modified with UpdateService.

    ", + "Service$roleArn": "

    The Amazon Resource Name (ARN) of the IAM role associated with the service that allows the Amazon ECS container agent to register container instances with a load balancer.

    ", + "ServiceEvent$id": "

    The ID string of the event.

    ", + "ServiceEvent$message": "

    The event message.

    ", + "StartTaskRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster on which to start your task. If you do not specify a cluster, the default cluster is assumed..

    ", + "StartTaskRequest$taskDefinition": "

    The family and revision (family:revision) or full Amazon Resource Name (ARN) of the task definition to start. If a revision is not specified, the latest ACTIVE revision is used.

    ", + "StartTaskRequest$startedBy": "

    An optional tag specified when a task is started. For example if you automatically trigger a task to run a batch process job, you could apply a unique identifier for that job to your task with the startedBy parameter. You can then identify which tasks belong to that job by filtering the results of a ListTasks call with the startedBy value.

    If a task is started by an Amazon ECS service, then the startedBy parameter contains the deployment ID of the service that starts it.

    ", + "StopTaskRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task to stop. If you do not specify a cluster, the default cluster is assumed..

    ", + "StopTaskRequest$task": "

    The task ID or full Amazon Resource Name (ARN) entry of the task to stop.

    ", + "StopTaskRequest$reason": "

    An optional message specified when a task is stopped. For example, if you are using a custom scheduler, you can use this parameter to specify the reason for stopping the task here, and the message will appear in subsequent DescribeTasks API operations on this task. Up to 255 characters are allowed in this message.

    ", + "StringList$member": null, + "SubmitContainerStateChangeRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container.

    ", + "SubmitContainerStateChangeRequest$task": "

    The task ID or full Amazon Resource Name (ARN) of the task that hosts the container.

    ", + "SubmitContainerStateChangeRequest$containerName": "

    The name of the container.

    ", + "SubmitContainerStateChangeRequest$status": "

    The status of the state change request.

    ", + "SubmitContainerStateChangeRequest$reason": "

    The reason for the state change request.

    ", + "SubmitContainerStateChangeResponse$acknowledgment": "

    Acknowledgement of the state change.

    ", + "SubmitTaskStateChangeRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task.

    ", + "SubmitTaskStateChangeRequest$task": "

    The task ID or full Amazon Resource Name (ARN) of the task in the state change request.

    ", + "SubmitTaskStateChangeRequest$status": "

    The status of the state change request.

    ", + "SubmitTaskStateChangeRequest$reason": "

    The reason for the state change request.

    ", + "SubmitTaskStateChangeResponse$acknowledgment": "

    Acknowledgement of the state change.

    ", + "Task$taskArn": "

    The Amazon Resource Name (ARN) of the task.

    ", + "Task$clusterArn": "

    The Amazon Resource Name (ARN) of the of the cluster that hosts the task.

    ", + "Task$taskDefinitionArn": "

    The Amazon Resource Name (ARN) of the of the task definition that creates the task.

    ", + "Task$containerInstanceArn": "

    The Amazon Resource Name (ARN) of the container instances that host the task.

    ", + "Task$lastStatus": "

    The last known status of the task.

    ", + "Task$desiredStatus": "

    The desired status of the task.

    ", + "Task$startedBy": "

    The tag specified when a task is started. If the task is started by an Amazon ECS service, then the startedBy parameter contains the deployment ID of the service that starts it.

    ", + "Task$stoppedReason": "

    The reason the task was stopped.

    ", + "TaskDefinition$taskDefinitionArn": "

    The full Amazon Resource Name (ARN) of the of the task definition.

    ", + "TaskDefinition$family": "

    The family of your task definition, used as the definition name.

    ", + "UpdateContainerAgentRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster that your container instance is running on. If you do not specify a cluster, the default cluster is assumed.

    ", + "UpdateContainerAgentRequest$containerInstance": "

    The container instance ID or full Amazon Resource Name (ARN) entries for the container instance on which you would like to update the Amazon ECS container agent.

    ", + "UpdateServiceRequest$cluster": "

    The short name or full Amazon Resource Name (ARN) of the cluster that your service is running on. If you do not specify a cluster, the default cluster is assumed.

    ", + "UpdateServiceRequest$service": "

    The name of the service to update.

    ", + "UpdateServiceRequest$taskDefinition": "

    The family and revision (family:revision) or full Amazon Resource Name (ARN) of the task definition to run in your service. If a revision is not specified, the latest ACTIVE revision is used. If you modify the task definition with UpdateService, Amazon ECS spawns a task with the new version of the task definition and then stops an old task after the new version is running.

    ", + "VersionInfo$agentVersion": "

    The version number of the Amazon ECS container agent.

    ", + "VersionInfo$agentHash": "

    The Git commit hash for the Amazon ECS container agent build on the amazon-ecs-agent GitHub repository.

    ", + "VersionInfo$dockerVersion": "

    The Docker version running on the container instance.

    ", + "Volume$name": "

    The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. This name is referenced in the sourceVolume parameter of container definition mountPoints.

    ", + "VolumeFrom$sourceContainer": "

    The name of the container to mount volumes from.

    " + } + }, + "StringList": { + "base": null, + "refs": { + "ContainerDefinition$links": "

    The link parameter allows containers to communicate with each other without the need for port mappings, using the name parameter and optionally, an alias for the link. This construct is analogous to name:alias in Docker links. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed for each name and alias. For more information on linking Docker containers, see https://docs.docker.com/userguide/dockerlinks/. This parameter maps to Links in the Create a container section of the Docker Remote API and the --link option to docker run.

    Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.

    ", + "ContainerDefinition$entryPoint": "

    Early versions of the Amazon ECS container agent do not properly handle entryPoint parameters. If you have problems using entryPoint, update your container agent or enter your commands and arguments as command array items instead.

    The entry point that is passed to the container. This parameter maps to Entrypoint in the Create a container section of the Docker Remote API and the --entrypoint option to docker run. For more information, see https://docs.docker.com/reference/builder/#entrypoint.

    ", + "ContainerDefinition$command": "

    The command that is passed to the container. This parameter maps to Cmd in the Create a container section of the Docker Remote API and the COMMAND parameter to docker run. For more information, see https://docs.docker.com/reference/builder/#cmd.

    ", + "ContainerDefinition$dnsServers": "

    A list of DNS servers that are presented to the container. This parameter maps to Dns in the Create a container section of the Docker Remote API and the --dns option to docker run.

    ", + "ContainerDefinition$dnsSearchDomains": "

    A list of DNS search domains that are presented to the container. This parameter maps to DnsSearch in the Create a container section of the Docker Remote API and the --dns-search option to docker run.

    ", + "ContainerDefinition$dockerSecurityOptions": "

    A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems. This parameter maps to SecurityOpt in the Create a container section of the Docker Remote API and the --security-opt option to docker run.

    The Amazon ECS container agent running on a container instance must register with the ECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true environment variables before containers placed on that instance can use these security options. For more information, see Amazon ECS Container Agent Configuration in the Amazon EC2 Container Service Developer Guide.

    ", + "ContainerOverride$command": "

    The command to send to the container that overrides the default command from the Docker image or the task definition.

    ", + "DescribeClustersRequest$clusters": "

    A space-separated list of cluster names or full cluster Amazon Resource Name (ARN) entries. If you do not specify a cluster, the default cluster is assumed.

    ", + "DescribeContainerInstancesRequest$containerInstances": "

    A space-separated list of container instance IDs or full Amazon Resource Name (ARN) entries.

    ", + "DescribeServicesRequest$services": "

    A list of services to describe.

    ", + "DescribeTasksRequest$tasks": "

    A space-separated list of task IDs or full Amazon Resource Name (ARN) entries.

    ", + "ListClustersResponse$clusterArns": "

    The list of full Amazon Resource Name (ARN) entries for each cluster associated with your account.

    ", + "ListContainerInstancesResponse$containerInstanceArns": "

    The list of container instances with full Amazon Resource Name (ARN) entries for each container instance associated with the specified cluster.

    ", + "ListServicesResponse$serviceArns": "

    The list of full Amazon Resource Name (ARN) entries for each service associated with the specified cluster.

    ", + "ListTaskDefinitionFamiliesResponse$families": "

    The list of task definition family names that match the ListTaskDefinitionFamilies request.

    ", + "ListTaskDefinitionsResponse$taskDefinitionArns": "

    The list of task definition Amazon Resource Name (ARN) entries for the ListTaskDefinitions request.

    ", + "ListTasksResponse$taskArns": "

    The list of task Amazon Resource Name (ARN) entries for the ListTasks request.

    ", + "Resource$stringSetValue": "

    When the stringSetValue type is set, the value of the resource must be a string type.

    ", + "StartTaskRequest$containerInstances": "

    The container instance IDs or full Amazon Resource Name (ARN) entries for the container instances on which you would like to place your task.

    The list of container instances to start tasks on is limited to 10.

    " + } + }, + "SubmitContainerStateChangeRequest": { + "base": null, + "refs": { + } + }, + "SubmitContainerStateChangeResponse": { + "base": null, + "refs": { + } + }, + "SubmitTaskStateChangeRequest": { + "base": null, + "refs": { + } + }, + "SubmitTaskStateChangeResponse": { + "base": null, + "refs": { + } + }, + "Task": { + "base": "

    Details on a task in a cluster.

    ", + "refs": { + "StopTaskResponse$task": null, + "Tasks$member": null + } + }, + "TaskDefinition": { + "base": "

    Details of a task definition.

    ", + "refs": { + "DeregisterTaskDefinitionResponse$taskDefinition": "

    The full description of the deregistered task.

    ", + "DescribeTaskDefinitionResponse$taskDefinition": "

    The full task definition description.

    ", + "RegisterTaskDefinitionResponse$taskDefinition": "

    The full description of the registered task definition.

    " + } + }, + "TaskDefinitionStatus": { + "base": null, + "refs": { + "ListTaskDefinitionsRequest$status": "

    The task definition status with which to filter the ListTaskDefinitions results. By default, only ACTIVE task definitions are listed. By setting this parameter to INACTIVE, you can view task definitions that are INACTIVE as long as an active task or service still references them. If you paginate the resulting output, be sure to keep the status value constant in each subsequent request.

    ", + "TaskDefinition$status": "

    The status of the task definition.

    " + } + }, + "TaskOverride": { + "base": "

    The overrides associated with a task.

    ", + "refs": { + "RunTaskRequest$overrides": "

    A list of container overrides in JSON format that specify the name of a container in the specified task definition and the overrides it should receive. You can override the default command for a container (that is specified in the task definition or Docker image) with a command override. You can also override existing environment variables (that are specified in the task definition or Docker image) on a container or add new environment variables to it with an environment override.

    A total of 8192 characters are allowed for overrides. This limit includes the JSON formatting characters of the override structure.

    ", + "StartTaskRequest$overrides": "

    A list of container overrides in JSON format that specify the name of a container in the specified task definition and the overrides it should receive. You can override the default command for a container (that is specified in the task definition or Docker image) with a command override. You can also override existing environment variables (that are specified in the task definition or Docker image) on a container or add new environment variables to it with an environment override.

    A total of 8192 characters are allowed for overrides. This limit includes the JSON formatting characters of the override structure.

    ", + "Task$overrides": "

    One or more container overrides.

    " + } + }, + "Tasks": { + "base": null, + "refs": { + "DescribeTasksResponse$tasks": "

    The list of tasks.

    ", + "RunTaskResponse$tasks": "

    A full description of the tasks that were run. Each task that was successfully placed on your cluster are described here.

    ", + "StartTaskResponse$tasks": "

    A full description of the tasks that were started. Each task that was successfully placed on your container instances are described here.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "Deployment$createdAt": "

    The Unix time in seconds and milliseconds when the service was created.

    ", + "Deployment$updatedAt": "

    The Unix time in seconds and milliseconds when the service was last updated.

    ", + "ServiceEvent$createdAt": "

    The Unix time in seconds and milliseconds when the event was triggered.

    ", + "Task$createdAt": "

    The Unix time in seconds and milliseconds when the task was created (the task entered the PENDING state).

    ", + "Task$startedAt": "

    The Unix time in seconds and milliseconds when the task was started (the task transitioned from the PENDING state to the RUNNING state).

    ", + "Task$stoppedAt": "

    The Unix time in seconds and milliseconds when the task was stopped (the task transitioned from the RUNNING state to the STOPPED state).

    " + } + }, + "TransportProtocol": { + "base": null, + "refs": { + "NetworkBinding$protocol": "

    The protocol used for the network binding.

    ", + "PortMapping$protocol": "

    The protocol used for the port mapping. Valid values are tcp and udp. The default is tcp.

    " + } + }, + "Ulimit": { + "base": "

    The ulimit settings to pass to the container.

    ", + "refs": { + "UlimitList$member": null + } + }, + "UlimitList": { + "base": null, + "refs": { + "ContainerDefinition$ulimits": "

    A list of ulimits to set in the container. This parameter maps to Ulimits in the Create a container section of the Docker Remote API and the --ulimit option to docker run. Valid naming values are displayed in the Ulimit data type. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

    " + } + }, + "UlimitName": { + "base": null, + "refs": { + "Ulimit$name": "

    The type of the ulimit.

    " + } + }, + "UpdateContainerAgentRequest": { + "base": null, + "refs": { + } + }, + "UpdateContainerAgentResponse": { + "base": null, + "refs": { + } + }, + "UpdateInProgressException": { + "base": "

    There is already a current Amazon ECS container agent update in progress on the specified container instance. If the container agent becomes disconnected while it is in a transitional stage, such as PENDING or STAGING, the update process can get stuck in that state. However, when the agent reconnects, it resumes where it stopped previously.

    ", + "refs": { + } + }, + "UpdateServiceRequest": { + "base": null, + "refs": { + } + }, + "UpdateServiceResponse": { + "base": null, + "refs": { + } + }, + "VersionInfo": { + "base": "

    The Docker and Amazon ECS container agent version information about a container instance.

    ", + "refs": { + "ContainerInstance$versionInfo": "

    The version information for the Amazon ECS container agent and Docker daemon running on the container instance.

    ", + "RegisterContainerInstanceRequest$versionInfo": "

    The version information for the Amazon ECS container agent and Docker daemon running on the container instance.

    " + } + }, + "Volume": { + "base": "

    A data volume used in a task definition.

    ", + "refs": { + "VolumeList$member": null + } + }, + "VolumeFrom": { + "base": "

    Details on a data volume from another container.

    ", + "refs": { + "VolumeFromList$member": null + } + }, + "VolumeFromList": { + "base": null, + "refs": { + "ContainerDefinition$volumesFrom": "

    Data volumes to mount from another container. This parameter maps to VolumesFrom in the Create a container section of the Docker Remote API and the --volumes-from option to docker run.

    " + } + }, + "VolumeList": { + "base": null, + "refs": { + "RegisterTaskDefinitionRequest$volumes": "

    A list of volume definitions in JSON format that containers in your task may use.

    ", + "TaskDefinition$volumes": "

    The list of volumes in a task. For more information about volume definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon EC2 Container Service Developer Guide.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,40 @@ +{ + "pagination": { + "ListClusters": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "clusterArns" + }, + "ListContainerInstances": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "containerInstanceArns" + }, + "ListTaskDefinitions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "taskDefinitionArns" + }, + "ListTaskDefinitionFamilies": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "families" + }, + "ListTasks": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "taskArns" + }, + "ListServices": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "serviceArns" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/waiters-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/waiters-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/waiters-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/waiters-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,93 @@ +{ + "version": 2, + "waiters": { + "TasksRunning": { + "delay": 6, + "operation": "DescribeTasks", + "maxAttempts": 100, + "acceptors": [ + { + "expected": "STOPPED", + "matcher": "pathAny", + "state": "failure", + "argument": "tasks[].lastStatus" + }, + { + "expected": "MISSING", + "matcher": "pathAny", + "state": "failure", + "argument": "failures[].reason" + }, + { + "expected": "RUNNING", + "matcher": "pathAll", + "state": "success", + "argument": "tasks[].lastStatus" + } + ] + }, + "TasksStopped": { + "delay": 6, + "operation": "DescribeTasks", + "maxAttempts": 100, + "acceptors": [ + { + "expected": "STOPPED", + "matcher": "pathAll", + "state": "success", + "argument": "tasks[].lastStatus" + } + ] + }, + "ServicesStable": { + "delay": 15, + "operation": "DescribeServices", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "MISSING", + "matcher": "pathAny", + "state": "failure", + "argument": "failures[].reason" + }, + { + "expected": "DRAINING", + "matcher": "pathAny", + "state": "failure", + "argument": "services[].status" + }, + { + "expected": "INACTIVE", + "matcher": "pathAny", + "state": "failure", + "argument": "services[].status" + }, + { + "expected": true, + "matcher": "path", + "state": "success", + "argument": "services | [@[?length(deployments)!=`1`], @[?desiredCount!=runningCount]][] | length(@) == `0`" + } + ] + }, + "ServicesInactive": { + "delay": 15, + "operation": "DescribeServices", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "MISSING", + "matcher": "pathAny", + "state": "failure", + "argument": "failures[].reason" + }, + { + "expected": "INACTIVE", + "matcher": "pathAny", + "state": "success", + "argument": "services[].status" + } + ] + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,3807 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-02-02", + "endpointPrefix":"elasticache", + "serviceFullName":"Amazon ElastiCache", + "signatureVersion":"v4", + "xmlNamespace":"http://elasticache.amazonaws.com/doc/2015-02-02/", + "protocol":"query" + }, + "operations":{ + "AddTagsToResource":{ + "name":"AddTagsToResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsToResourceMessage"}, + "output":{ + "shape":"TagListMessage", + "resultWrapper":"AddTagsToResourceResult" + }, + "errors":[ + { + "shape":"CacheClusterNotFoundFault", + "error":{ + "code":"CacheClusterNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SnapshotNotFoundFault", + "error":{ + "code":"SnapshotNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TagQuotaPerResourceExceeded", + "error":{ + "code":"TagQuotaPerResourceExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidARNFault", + "error":{ + "code":"InvalidARN", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "AuthorizeCacheSecurityGroupIngress":{ + "name":"AuthorizeCacheSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AuthorizeCacheSecurityGroupIngressMessage"}, + "output":{ + "shape":"AuthorizeCacheSecurityGroupIngressResult", + "wrapper":true, + "resultWrapper":"AuthorizeCacheSecurityGroupIngressResult" + }, + "errors":[ + { + "shape":"CacheSecurityGroupNotFoundFault", + "error":{ + "code":"CacheSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidCacheSecurityGroupStateFault", + "error":{ + "code":"InvalidCacheSecurityGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"AuthorizationAlreadyExistsFault", + "error":{ + "code":"AuthorizationAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "CopySnapshot":{ + "name":"CopySnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopySnapshotMessage"}, + "output":{ + "shape":"CopySnapshotResult", + "wrapper":true, + "resultWrapper":"CopySnapshotResult" + }, + "errors":[ + { + "shape":"SnapshotAlreadyExistsFault", + "error":{ + "code":"SnapshotAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SnapshotNotFoundFault", + "error":{ + "code":"SnapshotNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SnapshotQuotaExceededFault", + "error":{ + "code":"SnapshotQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidSnapshotStateFault", + "error":{ + "code":"InvalidSnapshotState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateCacheCluster":{ + "name":"CreateCacheCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCacheClusterMessage"}, + "output":{ + "shape":"CreateCacheClusterResult", + "wrapper":true, + "resultWrapper":"CreateCacheClusterResult" + }, + "errors":[ + { + "shape":"ReplicationGroupNotFoundFault", + "error":{ + "code":"ReplicationGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidReplicationGroupStateFault", + "error":{ + "code":"InvalidReplicationGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CacheClusterAlreadyExistsFault", + "error":{ + "code":"CacheClusterAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InsufficientCacheClusterCapacityFault", + "error":{ + "code":"InsufficientCacheClusterCapacity", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CacheSecurityGroupNotFoundFault", + "error":{ + "code":"CacheSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CacheSubnetGroupNotFoundFault", + "error":{ + "code":"CacheSubnetGroupNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterQuotaForCustomerExceededFault", + "error":{ + "code":"ClusterQuotaForCustomerExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NodeQuotaForClusterExceededFault", + "error":{ + "code":"NodeQuotaForClusterExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NodeQuotaForCustomerExceededFault", + "error":{ + "code":"NodeQuotaForCustomerExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CacheParameterGroupNotFoundFault", + "error":{ + "code":"CacheParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidVPCNetworkStateFault", + "error":{ + "code":"InvalidVPCNetworkStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TagQuotaPerResourceExceeded", + "error":{ + "code":"TagQuotaPerResourceExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateCacheParameterGroup":{ + "name":"CreateCacheParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCacheParameterGroupMessage"}, + "output":{ + "shape":"CreateCacheParameterGroupResult", + "wrapper":true, + "resultWrapper":"CreateCacheParameterGroupResult" + }, + "errors":[ + { + "shape":"CacheParameterGroupQuotaExceededFault", + "error":{ + "code":"CacheParameterGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CacheParameterGroupAlreadyExistsFault", + "error":{ + "code":"CacheParameterGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidCacheParameterGroupStateFault", + "error":{ + "code":"InvalidCacheParameterGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateCacheSecurityGroup":{ + "name":"CreateCacheSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCacheSecurityGroupMessage"}, + "output":{ + "shape":"CreateCacheSecurityGroupResult", + "wrapper":true, + "resultWrapper":"CreateCacheSecurityGroupResult" + }, + "errors":[ + { + "shape":"CacheSecurityGroupAlreadyExistsFault", + "error":{ + "code":"CacheSecurityGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CacheSecurityGroupQuotaExceededFault", + "error":{ + "code":"QuotaExceeded.CacheSecurityGroup", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateCacheSubnetGroup":{ + "name":"CreateCacheSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCacheSubnetGroupMessage"}, + "output":{ + "shape":"CreateCacheSubnetGroupResult", + "wrapper":true, + "resultWrapper":"CreateCacheSubnetGroupResult" + }, + "errors":[ + { + "shape":"CacheSubnetGroupAlreadyExistsFault", + "error":{ + "code":"CacheSubnetGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CacheSubnetGroupQuotaExceededFault", + "error":{ + "code":"CacheSubnetGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CacheSubnetQuotaExceededFault", + "error":{ + "code":"CacheSubnetQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidSubnet", + "error":{ + "code":"InvalidSubnet", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateReplicationGroup":{ + "name":"CreateReplicationGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateReplicationGroupMessage"}, + "output":{ + "shape":"CreateReplicationGroupResult", + "wrapper":true, + "resultWrapper":"CreateReplicationGroupResult" + }, + "errors":[ + { + "shape":"CacheClusterNotFoundFault", + "error":{ + "code":"CacheClusterNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidCacheClusterStateFault", + "error":{ + "code":"InvalidCacheClusterState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ReplicationGroupAlreadyExistsFault", + "error":{ + "code":"ReplicationGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InsufficientCacheClusterCapacityFault", + "error":{ + "code":"InsufficientCacheClusterCapacity", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CacheSecurityGroupNotFoundFault", + "error":{ + "code":"CacheSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CacheSubnetGroupNotFoundFault", + "error":{ + "code":"CacheSubnetGroupNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterQuotaForCustomerExceededFault", + "error":{ + "code":"ClusterQuotaForCustomerExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NodeQuotaForClusterExceededFault", + "error":{ + "code":"NodeQuotaForClusterExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NodeQuotaForCustomerExceededFault", + "error":{ + "code":"NodeQuotaForCustomerExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CacheParameterGroupNotFoundFault", + "error":{ + "code":"CacheParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidVPCNetworkStateFault", + "error":{ + "code":"InvalidVPCNetworkStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TagQuotaPerResourceExceeded", + "error":{ + "code":"TagQuotaPerResourceExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateSnapshot":{ + "name":"CreateSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSnapshotMessage"}, + "output":{ + "shape":"CreateSnapshotResult", + "wrapper":true, + "resultWrapper":"CreateSnapshotResult" + }, + "errors":[ + { + "shape":"SnapshotAlreadyExistsFault", + "error":{ + "code":"SnapshotAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CacheClusterNotFoundFault", + "error":{ + "code":"CacheClusterNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidCacheClusterStateFault", + "error":{ + "code":"InvalidCacheClusterState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SnapshotQuotaExceededFault", + "error":{ + "code":"SnapshotQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SnapshotFeatureNotSupportedFault", + "error":{ + "code":"SnapshotFeatureNotSupportedFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteCacheCluster":{ + "name":"DeleteCacheCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCacheClusterMessage"}, + "output":{ + "shape":"DeleteCacheClusterResult", + "wrapper":true, + "resultWrapper":"DeleteCacheClusterResult" + }, + "errors":[ + { + "shape":"CacheClusterNotFoundFault", + "error":{ + "code":"CacheClusterNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidCacheClusterStateFault", + "error":{ + "code":"InvalidCacheClusterState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SnapshotAlreadyExistsFault", + "error":{ + "code":"SnapshotAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SnapshotFeatureNotSupportedFault", + "error":{ + "code":"SnapshotFeatureNotSupportedFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SnapshotQuotaExceededFault", + "error":{ + "code":"SnapshotQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteCacheParameterGroup":{ + "name":"DeleteCacheParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCacheParameterGroupMessage"}, + "errors":[ + { + "shape":"InvalidCacheParameterGroupStateFault", + "error":{ + "code":"InvalidCacheParameterGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CacheParameterGroupNotFoundFault", + "error":{ + "code":"CacheParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteCacheSecurityGroup":{ + "name":"DeleteCacheSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCacheSecurityGroupMessage"}, + "errors":[ + { + "shape":"InvalidCacheSecurityGroupStateFault", + "error":{ + "code":"InvalidCacheSecurityGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CacheSecurityGroupNotFoundFault", + "error":{ + "code":"CacheSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteCacheSubnetGroup":{ + "name":"DeleteCacheSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCacheSubnetGroupMessage"}, + "errors":[ + { + "shape":"CacheSubnetGroupInUse", + "error":{ + "code":"CacheSubnetGroupInUse", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CacheSubnetGroupNotFoundFault", + "error":{ + "code":"CacheSubnetGroupNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteReplicationGroup":{ + "name":"DeleteReplicationGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteReplicationGroupMessage"}, + "output":{ + "shape":"DeleteReplicationGroupResult", + "wrapper":true, + "resultWrapper":"DeleteReplicationGroupResult" + }, + "errors":[ + { + "shape":"ReplicationGroupNotFoundFault", + "error":{ + "code":"ReplicationGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidReplicationGroupStateFault", + "error":{ + "code":"InvalidReplicationGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SnapshotAlreadyExistsFault", + "error":{ + "code":"SnapshotAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SnapshotFeatureNotSupportedFault", + "error":{ + "code":"SnapshotFeatureNotSupportedFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SnapshotQuotaExceededFault", + "error":{ + "code":"SnapshotQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteSnapshot":{ + "name":"DeleteSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSnapshotMessage"}, + "output":{ + "shape":"DeleteSnapshotResult", + "wrapper":true, + "resultWrapper":"DeleteSnapshotResult" + }, + "errors":[ + { + "shape":"SnapshotNotFoundFault", + "error":{ + "code":"SnapshotNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidSnapshotStateFault", + "error":{ + "code":"InvalidSnapshotState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeCacheClusters":{ + "name":"DescribeCacheClusters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCacheClustersMessage"}, + "output":{ + "shape":"CacheClusterMessage", + "resultWrapper":"DescribeCacheClustersResult" + }, + "errors":[ + { + "shape":"CacheClusterNotFoundFault", + "error":{ + "code":"CacheClusterNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeCacheEngineVersions":{ + "name":"DescribeCacheEngineVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCacheEngineVersionsMessage"}, + "output":{ + "shape":"CacheEngineVersionMessage", + "resultWrapper":"DescribeCacheEngineVersionsResult" + } + }, + "DescribeCacheParameterGroups":{ + "name":"DescribeCacheParameterGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCacheParameterGroupsMessage"}, + "output":{ + "shape":"CacheParameterGroupsMessage", + "resultWrapper":"DescribeCacheParameterGroupsResult" + }, + "errors":[ + { + "shape":"CacheParameterGroupNotFoundFault", + "error":{ + "code":"CacheParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeCacheParameters":{ + "name":"DescribeCacheParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCacheParametersMessage"}, + "output":{ + "shape":"CacheParameterGroupDetails", + "resultWrapper":"DescribeCacheParametersResult" + }, + "errors":[ + { + "shape":"CacheParameterGroupNotFoundFault", + "error":{ + "code":"CacheParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeCacheSecurityGroups":{ + "name":"DescribeCacheSecurityGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCacheSecurityGroupsMessage"}, + "output":{ + "shape":"CacheSecurityGroupMessage", + "resultWrapper":"DescribeCacheSecurityGroupsResult" + }, + "errors":[ + { + "shape":"CacheSecurityGroupNotFoundFault", + "error":{ + "code":"CacheSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeCacheSubnetGroups":{ + "name":"DescribeCacheSubnetGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCacheSubnetGroupsMessage"}, + "output":{ + "shape":"CacheSubnetGroupMessage", + "resultWrapper":"DescribeCacheSubnetGroupsResult" + }, + "errors":[ + { + "shape":"CacheSubnetGroupNotFoundFault", + "error":{ + "code":"CacheSubnetGroupNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeEngineDefaultParameters":{ + "name":"DescribeEngineDefaultParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEngineDefaultParametersMessage"}, + "output":{ + "shape":"DescribeEngineDefaultParametersResult", + "wrapper":true, + "resultWrapper":"DescribeEngineDefaultParametersResult" + }, + "errors":[ + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeEvents":{ + "name":"DescribeEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventsMessage"}, + "output":{ + "shape":"EventsMessage", + "resultWrapper":"DescribeEventsResult" + }, + "errors":[ + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeReplicationGroups":{ + "name":"DescribeReplicationGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReplicationGroupsMessage"}, + "output":{ + "shape":"ReplicationGroupMessage", + "resultWrapper":"DescribeReplicationGroupsResult" + }, + "errors":[ + { + "shape":"ReplicationGroupNotFoundFault", + "error":{ + "code":"ReplicationGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeReservedCacheNodes":{ + "name":"DescribeReservedCacheNodes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedCacheNodesMessage"}, + "output":{ + "shape":"ReservedCacheNodeMessage", + "resultWrapper":"DescribeReservedCacheNodesResult" + }, + "errors":[ + { + "shape":"ReservedCacheNodeNotFoundFault", + "error":{ + "code":"ReservedCacheNodeNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeReservedCacheNodesOfferings":{ + "name":"DescribeReservedCacheNodesOfferings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedCacheNodesOfferingsMessage"}, + "output":{ + "shape":"ReservedCacheNodesOfferingMessage", + "resultWrapper":"DescribeReservedCacheNodesOfferingsResult" + }, + "errors":[ + { + "shape":"ReservedCacheNodesOfferingNotFoundFault", + "error":{ + "code":"ReservedCacheNodesOfferingNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeSnapshots":{ + "name":"DescribeSnapshots", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSnapshotsMessage"}, + "output":{ + "shape":"DescribeSnapshotsListMessage", + "resultWrapper":"DescribeSnapshotsResult" + }, + "errors":[ + { + "shape":"CacheClusterNotFoundFault", + "error":{ + "code":"CacheClusterNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SnapshotNotFoundFault", + "error":{ + "code":"SnapshotNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceMessage"}, + "output":{ + "shape":"TagListMessage", + "resultWrapper":"ListTagsForResourceResult" + }, + "errors":[ + { + "shape":"CacheClusterNotFoundFault", + "error":{ + "code":"CacheClusterNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SnapshotNotFoundFault", + "error":{ + "code":"SnapshotNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidARNFault", + "error":{ + "code":"InvalidARN", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "ModifyCacheCluster":{ + "name":"ModifyCacheCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyCacheClusterMessage"}, + "output":{ + "shape":"ModifyCacheClusterResult", + "wrapper":true, + "resultWrapper":"ModifyCacheClusterResult" + }, + "errors":[ + { + "shape":"InvalidCacheClusterStateFault", + "error":{ + "code":"InvalidCacheClusterState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidCacheSecurityGroupStateFault", + "error":{ + "code":"InvalidCacheSecurityGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InsufficientCacheClusterCapacityFault", + "error":{ + "code":"InsufficientCacheClusterCapacity", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CacheClusterNotFoundFault", + "error":{ + "code":"CacheClusterNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NodeQuotaForClusterExceededFault", + "error":{ + "code":"NodeQuotaForClusterExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NodeQuotaForCustomerExceededFault", + "error":{ + "code":"NodeQuotaForCustomerExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CacheSecurityGroupNotFoundFault", + "error":{ + "code":"CacheSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CacheParameterGroupNotFoundFault", + "error":{ + "code":"CacheParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidVPCNetworkStateFault", + "error":{ + "code":"InvalidVPCNetworkStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "ModifyCacheParameterGroup":{ + "name":"ModifyCacheParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyCacheParameterGroupMessage"}, + "output":{ + "shape":"CacheParameterGroupNameMessage", + "resultWrapper":"ModifyCacheParameterGroupResult" + }, + "errors":[ + { + "shape":"CacheParameterGroupNotFoundFault", + "error":{ + "code":"CacheParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidCacheParameterGroupStateFault", + "error":{ + "code":"InvalidCacheParameterGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "ModifyCacheSubnetGroup":{ + "name":"ModifyCacheSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyCacheSubnetGroupMessage"}, + "output":{ + "shape":"ModifyCacheSubnetGroupResult", + "wrapper":true, + "resultWrapper":"ModifyCacheSubnetGroupResult" + }, + "errors":[ + { + "shape":"CacheSubnetGroupNotFoundFault", + "error":{ + "code":"CacheSubnetGroupNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CacheSubnetQuotaExceededFault", + "error":{ + "code":"CacheSubnetQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SubnetInUse", + "error":{ + "code":"SubnetInUse", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidSubnet", + "error":{ + "code":"InvalidSubnet", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "ModifyReplicationGroup":{ + "name":"ModifyReplicationGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyReplicationGroupMessage"}, + "output":{ + "shape":"ModifyReplicationGroupResult", + "wrapper":true, + "resultWrapper":"ModifyReplicationGroupResult" + }, + "errors":[ + { + "shape":"ReplicationGroupNotFoundFault", + "error":{ + "code":"ReplicationGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidReplicationGroupStateFault", + "error":{ + "code":"InvalidReplicationGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidCacheClusterStateFault", + "error":{ + "code":"InvalidCacheClusterState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidCacheSecurityGroupStateFault", + "error":{ + "code":"InvalidCacheSecurityGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InsufficientCacheClusterCapacityFault", + "error":{ + "code":"InsufficientCacheClusterCapacity", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CacheClusterNotFoundFault", + "error":{ + "code":"CacheClusterNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NodeQuotaForClusterExceededFault", + "error":{ + "code":"NodeQuotaForClusterExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NodeQuotaForCustomerExceededFault", + "error":{ + "code":"NodeQuotaForCustomerExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CacheSecurityGroupNotFoundFault", + "error":{ + "code":"CacheSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CacheParameterGroupNotFoundFault", + "error":{ + "code":"CacheParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidVPCNetworkStateFault", + "error":{ + "code":"InvalidVPCNetworkStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "PurchaseReservedCacheNodesOffering":{ + "name":"PurchaseReservedCacheNodesOffering", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PurchaseReservedCacheNodesOfferingMessage"}, + "output":{ + "shape":"PurchaseReservedCacheNodesOfferingResult", + "wrapper":true, + "resultWrapper":"PurchaseReservedCacheNodesOfferingResult" + }, + "errors":[ + { + "shape":"ReservedCacheNodesOfferingNotFoundFault", + "error":{ + "code":"ReservedCacheNodesOfferingNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ReservedCacheNodeAlreadyExistsFault", + "error":{ + "code":"ReservedCacheNodeAlreadyExists", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ReservedCacheNodeQuotaExceededFault", + "error":{ + "code":"ReservedCacheNodeQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "RebootCacheCluster":{ + "name":"RebootCacheCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootCacheClusterMessage"}, + "output":{ + "shape":"RebootCacheClusterResult", + "wrapper":true, + "resultWrapper":"RebootCacheClusterResult" + }, + "errors":[ + { + "shape":"InvalidCacheClusterStateFault", + "error":{ + "code":"InvalidCacheClusterState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CacheClusterNotFoundFault", + "error":{ + "code":"CacheClusterNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + }, + "RemoveTagsFromResource":{ + "name":"RemoveTagsFromResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsFromResourceMessage"}, + "output":{ + "shape":"TagListMessage", + "resultWrapper":"RemoveTagsFromResourceResult" + }, + "errors":[ + { + "shape":"CacheClusterNotFoundFault", + "error":{ + "code":"CacheClusterNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SnapshotNotFoundFault", + "error":{ + "code":"SnapshotNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidARNFault", + "error":{ + "code":"InvalidARN", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TagNotFoundFault", + "error":{ + "code":"TagNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + }, + "ResetCacheParameterGroup":{ + "name":"ResetCacheParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetCacheParameterGroupMessage"}, + "output":{ + "shape":"CacheParameterGroupNameMessage", + "resultWrapper":"ResetCacheParameterGroupResult" + }, + "errors":[ + { + "shape":"InvalidCacheParameterGroupStateFault", + "error":{ + "code":"InvalidCacheParameterGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CacheParameterGroupNotFoundFault", + "error":{ + "code":"CacheParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "RevokeCacheSecurityGroupIngress":{ + "name":"RevokeCacheSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeCacheSecurityGroupIngressMessage"}, + "output":{ + "shape":"RevokeCacheSecurityGroupIngressResult", + "wrapper":true, + "resultWrapper":"RevokeCacheSecurityGroupIngressResult" + }, + "errors":[ + { + "shape":"CacheSecurityGroupNotFoundFault", + "error":{ + "code":"CacheSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"AuthorizationNotFoundFault", + "error":{ + "code":"AuthorizationNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidCacheSecurityGroupStateFault", + "error":{ + "code":"InvalidCacheSecurityGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + } + }, + "shapes":{ + "AZMode":{ + "type":"string", + "enum":[ + "single-az", + "cross-az" + ] + }, + "AddTagsToResourceMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "Tags" + ], + "members":{ + "ResourceName":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "AuthorizationAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AuthorizationNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "AuthorizeCacheSecurityGroupIngressMessage":{ + "type":"structure", + "required":[ + "CacheSecurityGroupName", + "EC2SecurityGroupName", + "EC2SecurityGroupOwnerId" + ], + "members":{ + "CacheSecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "AutomaticFailoverStatus":{ + "type":"string", + "enum":[ + "enabled", + "disabled", + "enabling", + "disabling" + ] + }, + "AvailabilityZone":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"} + }, + "wrapper":true + }, + "AvailabilityZonesList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"AvailabilityZone" + } + }, + "AwsQueryErrorMessage":{"type":"string"}, + "Boolean":{"type":"boolean"}, + "BooleanOptional":{"type":"boolean"}, + "CacheCluster":{ + "type":"structure", + "members":{ + "CacheClusterId":{"shape":"String"}, + "ConfigurationEndpoint":{"shape":"Endpoint"}, + "ClientDownloadLandingPage":{"shape":"String"}, + "CacheNodeType":{"shape":"String"}, + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "CacheClusterStatus":{"shape":"String"}, + "NumCacheNodes":{"shape":"IntegerOptional"}, + "PreferredAvailabilityZone":{"shape":"String"}, + "CacheClusterCreateTime":{"shape":"TStamp"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "PendingModifiedValues":{"shape":"PendingModifiedValues"}, + "NotificationConfiguration":{"shape":"NotificationConfiguration"}, + "CacheSecurityGroups":{"shape":"CacheSecurityGroupMembershipList"}, + "CacheParameterGroup":{"shape":"CacheParameterGroupStatus"}, + "CacheSubnetGroupName":{"shape":"String"}, + "CacheNodes":{"shape":"CacheNodeList"}, + "AutoMinorVersionUpgrade":{"shape":"Boolean"}, + "SecurityGroups":{"shape":"SecurityGroupMembershipList"}, + "ReplicationGroupId":{"shape":"String"}, + "SnapshotRetentionLimit":{"shape":"IntegerOptional"}, + "SnapshotWindow":{"shape":"String"} + }, + "wrapper":true + }, + "CacheClusterAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CacheClusterAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CacheClusterList":{ + "type":"list", + "member":{ + "shape":"CacheCluster", + "locationName":"CacheCluster" + } + }, + "CacheClusterMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "CacheClusters":{"shape":"CacheClusterList"} + } + }, + "CacheClusterNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CacheClusterNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "CacheEngineVersion":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "CacheParameterGroupFamily":{"shape":"String"}, + "CacheEngineDescription":{"shape":"String"}, + "CacheEngineVersionDescription":{"shape":"String"} + } + }, + "CacheEngineVersionList":{ + "type":"list", + "member":{ + "shape":"CacheEngineVersion", + "locationName":"CacheEngineVersion" + } + }, + "CacheEngineVersionMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "CacheEngineVersions":{"shape":"CacheEngineVersionList"} + } + }, + "CacheNode":{ + "type":"structure", + "members":{ + "CacheNodeId":{"shape":"String"}, + "CacheNodeStatus":{"shape":"String"}, + "CacheNodeCreateTime":{"shape":"TStamp"}, + "Endpoint":{"shape":"Endpoint"}, + "ParameterGroupStatus":{"shape":"String"}, + "SourceCacheNodeId":{"shape":"String"}, + "CustomerAvailabilityZone":{"shape":"String"} + } + }, + "CacheNodeIdsList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"CacheNodeId" + } + }, + "CacheNodeList":{ + "type":"list", + "member":{ + "shape":"CacheNode", + "locationName":"CacheNode" + } + }, + "CacheNodeTypeSpecificParameter":{ + "type":"structure", + "members":{ + "ParameterName":{"shape":"String"}, + "Description":{"shape":"String"}, + "Source":{"shape":"String"}, + "DataType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"}, + "MinimumEngineVersion":{"shape":"String"}, + "CacheNodeTypeSpecificValues":{"shape":"CacheNodeTypeSpecificValueList"} + } + }, + "CacheNodeTypeSpecificParametersList":{ + "type":"list", + "member":{ + "shape":"CacheNodeTypeSpecificParameter", + "locationName":"CacheNodeTypeSpecificParameter" + } + }, + "CacheNodeTypeSpecificValue":{ + "type":"structure", + "members":{ + "CacheNodeType":{"shape":"String"}, + "Value":{"shape":"String"} + } + }, + "CacheNodeTypeSpecificValueList":{ + "type":"list", + "member":{ + "shape":"CacheNodeTypeSpecificValue", + "locationName":"CacheNodeTypeSpecificValue" + } + }, + "CacheParameterGroup":{ + "type":"structure", + "members":{ + "CacheParameterGroupName":{"shape":"String"}, + "CacheParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"} + }, + "wrapper":true + }, + "CacheParameterGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CacheParameterGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CacheParameterGroupDetails":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"}, + "CacheNodeTypeSpecificParameters":{"shape":"CacheNodeTypeSpecificParametersList"} + } + }, + "CacheParameterGroupList":{ + "type":"list", + "member":{ + "shape":"CacheParameterGroup", + "locationName":"CacheParameterGroup" + } + }, + "CacheParameterGroupNameMessage":{ + "type":"structure", + "members":{ + "CacheParameterGroupName":{"shape":"String"} + } + }, + "CacheParameterGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CacheParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "CacheParameterGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CacheParameterGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CacheParameterGroupStatus":{ + "type":"structure", + "members":{ + "CacheParameterGroupName":{"shape":"String"}, + "ParameterApplyStatus":{"shape":"String"}, + "CacheNodeIdsToReboot":{"shape":"CacheNodeIdsList"} + } + }, + "CacheParameterGroupsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "CacheParameterGroups":{"shape":"CacheParameterGroupList"} + } + }, + "CacheSecurityGroup":{ + "type":"structure", + "members":{ + "OwnerId":{"shape":"String"}, + "CacheSecurityGroupName":{"shape":"String"}, + "Description":{"shape":"String"}, + "EC2SecurityGroups":{"shape":"EC2SecurityGroupList"} + }, + "wrapper":true + }, + "CacheSecurityGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CacheSecurityGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CacheSecurityGroupMembership":{ + "type":"structure", + "members":{ + "CacheSecurityGroupName":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "CacheSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"CacheSecurityGroupMembership", + "locationName":"CacheSecurityGroup" + } + }, + "CacheSecurityGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "CacheSecurityGroups":{"shape":"CacheSecurityGroups"} + } + }, + "CacheSecurityGroupNameList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"CacheSecurityGroupName" + } + }, + "CacheSecurityGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CacheSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "CacheSecurityGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"QuotaExceeded.CacheSecurityGroup", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CacheSecurityGroups":{ + "type":"list", + "member":{ + "shape":"CacheSecurityGroup", + "locationName":"CacheSecurityGroup" + } + }, + "CacheSubnetGroup":{ + "type":"structure", + "members":{ + "CacheSubnetGroupName":{"shape":"String"}, + "CacheSubnetGroupDescription":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "Subnets":{"shape":"SubnetList"} + }, + "wrapper":true + }, + "CacheSubnetGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CacheSubnetGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CacheSubnetGroupInUse":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CacheSubnetGroupInUse", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CacheSubnetGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "CacheSubnetGroups":{"shape":"CacheSubnetGroups"} + } + }, + "CacheSubnetGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CacheSubnetGroupNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CacheSubnetGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CacheSubnetGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CacheSubnetGroups":{ + "type":"list", + "member":{ + "shape":"CacheSubnetGroup", + "locationName":"CacheSubnetGroup" + } + }, + "CacheSubnetQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CacheSubnetQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ClusterIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ClusterId" + } + }, + "ClusterQuotaForCustomerExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterQuotaForCustomerExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CopySnapshotMessage":{ + "type":"structure", + "required":[ + "SourceSnapshotName", + "TargetSnapshotName" + ], + "members":{ + "SourceSnapshotName":{"shape":"String"}, + "TargetSnapshotName":{"shape":"String"} + } + }, + "CreateCacheClusterMessage":{ + "type":"structure", + "required":["CacheClusterId"], + "members":{ + "CacheClusterId":{"shape":"String"}, + "ReplicationGroupId":{"shape":"String"}, + "AZMode":{"shape":"AZMode"}, + "PreferredAvailabilityZone":{"shape":"String"}, + "PreferredAvailabilityZones":{"shape":"PreferredAvailabilityZoneList"}, + "NumCacheNodes":{"shape":"IntegerOptional"}, + "CacheNodeType":{"shape":"String"}, + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "CacheParameterGroupName":{"shape":"String"}, + "CacheSubnetGroupName":{"shape":"String"}, + "CacheSecurityGroupNames":{"shape":"CacheSecurityGroupNameList"}, + "SecurityGroupIds":{"shape":"SecurityGroupIdsList"}, + "Tags":{"shape":"TagList"}, + "SnapshotArns":{"shape":"SnapshotArnsList"}, + "SnapshotName":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "NotificationTopicArn":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "SnapshotRetentionLimit":{"shape":"IntegerOptional"}, + "SnapshotWindow":{"shape":"String"} + } + }, + "CreateCacheParameterGroupMessage":{ + "type":"structure", + "required":[ + "CacheParameterGroupName", + "CacheParameterGroupFamily", + "Description" + ], + "members":{ + "CacheParameterGroupName":{"shape":"String"}, + "CacheParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"} + } + }, + "CreateCacheSecurityGroupMessage":{ + "type":"structure", + "required":[ + "CacheSecurityGroupName", + "Description" + ], + "members":{ + "CacheSecurityGroupName":{"shape":"String"}, + "Description":{"shape":"String"} + } + }, + "CreateCacheSubnetGroupMessage":{ + "type":"structure", + "required":[ + "CacheSubnetGroupName", + "CacheSubnetGroupDescription", + "SubnetIds" + ], + "members":{ + "CacheSubnetGroupName":{"shape":"String"}, + "CacheSubnetGroupDescription":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"} + } + }, + "CreateReplicationGroupMessage":{ + "type":"structure", + "required":[ + "ReplicationGroupId", + "ReplicationGroupDescription" + ], + "members":{ + "ReplicationGroupId":{"shape":"String"}, + "ReplicationGroupDescription":{"shape":"String"}, + "PrimaryClusterId":{"shape":"String"}, + "AutomaticFailoverEnabled":{"shape":"BooleanOptional"}, + "NumCacheClusters":{"shape":"IntegerOptional"}, + "PreferredCacheClusterAZs":{"shape":"AvailabilityZonesList"}, + "CacheNodeType":{"shape":"String"}, + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "CacheParameterGroupName":{"shape":"String"}, + "CacheSubnetGroupName":{"shape":"String"}, + "CacheSecurityGroupNames":{"shape":"CacheSecurityGroupNameList"}, + "SecurityGroupIds":{"shape":"SecurityGroupIdsList"}, + "Tags":{"shape":"TagList"}, + "SnapshotArns":{"shape":"SnapshotArnsList"}, + "SnapshotName":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "NotificationTopicArn":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "SnapshotRetentionLimit":{"shape":"IntegerOptional"}, + "SnapshotWindow":{"shape":"String"} + } + }, + "CreateSnapshotMessage":{ + "type":"structure", + "required":[ + "CacheClusterId", + "SnapshotName" + ], + "members":{ + "CacheClusterId":{"shape":"String"}, + "SnapshotName":{"shape":"String"} + } + }, + "DeleteCacheClusterMessage":{ + "type":"structure", + "required":["CacheClusterId"], + "members":{ + "CacheClusterId":{"shape":"String"}, + "FinalSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteCacheParameterGroupMessage":{ + "type":"structure", + "required":["CacheParameterGroupName"], + "members":{ + "CacheParameterGroupName":{"shape":"String"} + } + }, + "DeleteCacheSecurityGroupMessage":{ + "type":"structure", + "required":["CacheSecurityGroupName"], + "members":{ + "CacheSecurityGroupName":{"shape":"String"} + } + }, + "DeleteCacheSubnetGroupMessage":{ + "type":"structure", + "required":["CacheSubnetGroupName"], + "members":{ + "CacheSubnetGroupName":{"shape":"String"} + } + }, + "DeleteReplicationGroupMessage":{ + "type":"structure", + "required":["ReplicationGroupId"], + "members":{ + "ReplicationGroupId":{"shape":"String"}, + "RetainPrimaryCluster":{"shape":"BooleanOptional"}, + "FinalSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteSnapshotMessage":{ + "type":"structure", + "required":["SnapshotName"], + "members":{ + "SnapshotName":{"shape":"String"} + } + }, + "DescribeCacheClustersMessage":{ + "type":"structure", + "members":{ + "CacheClusterId":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "ShowCacheNodeInfo":{"shape":"BooleanOptional"} + } + }, + "DescribeCacheEngineVersionsMessage":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "CacheParameterGroupFamily":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "DefaultOnly":{"shape":"Boolean"} + } + }, + "DescribeCacheParameterGroupsMessage":{ + "type":"structure", + "members":{ + "CacheParameterGroupName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeCacheParametersMessage":{ + "type":"structure", + "required":["CacheParameterGroupName"], + "members":{ + "CacheParameterGroupName":{"shape":"String"}, + "Source":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeCacheSecurityGroupsMessage":{ + "type":"structure", + "members":{ + "CacheSecurityGroupName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeCacheSubnetGroupsMessage":{ + "type":"structure", + "members":{ + "CacheSubnetGroupName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEngineDefaultParametersMessage":{ + "type":"structure", + "required":["CacheParameterGroupFamily"], + "members":{ + "CacheParameterGroupFamily":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEventsMessage":{ + "type":"structure", + "members":{ + "SourceIdentifier":{"shape":"String"}, + "SourceType":{"shape":"SourceType"}, + "StartTime":{"shape":"TStamp"}, + "EndTime":{"shape":"TStamp"}, + "Duration":{"shape":"IntegerOptional"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReplicationGroupsMessage":{ + "type":"structure", + "members":{ + "ReplicationGroupId":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReservedCacheNodesMessage":{ + "type":"structure", + "members":{ + "ReservedCacheNodeId":{"shape":"String"}, + "ReservedCacheNodesOfferingId":{"shape":"String"}, + "CacheNodeType":{"shape":"String"}, + "Duration":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReservedCacheNodesOfferingsMessage":{ + "type":"structure", + "members":{ + "ReservedCacheNodesOfferingId":{"shape":"String"}, + "CacheNodeType":{"shape":"String"}, + "Duration":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeSnapshotsListMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "Snapshots":{"shape":"SnapshotList"} + } + }, + "DescribeSnapshotsMessage":{ + "type":"structure", + "members":{ + "CacheClusterId":{"shape":"String"}, + "SnapshotName":{"shape":"String"}, + "SnapshotSource":{"shape":"String"}, + "Marker":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"} + } + }, + "Double":{"type":"double"}, + "EC2SecurityGroup":{ + "type":"structure", + "members":{ + "Status":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "EC2SecurityGroupList":{ + "type":"list", + "member":{ + "shape":"EC2SecurityGroup", + "locationName":"EC2SecurityGroup" + } + }, + "Endpoint":{ + "type":"structure", + "members":{ + "Address":{"shape":"String"}, + "Port":{"shape":"Integer"} + } + }, + "EngineDefaults":{ + "type":"structure", + "members":{ + "CacheParameterGroupFamily":{"shape":"String"}, + "Marker":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"}, + "CacheNodeTypeSpecificParameters":{"shape":"CacheNodeTypeSpecificParametersList"} + }, + "wrapper":true + }, + "Event":{ + "type":"structure", + "members":{ + "SourceIdentifier":{"shape":"String"}, + "SourceType":{"shape":"SourceType"}, + "Message":{"shape":"String"}, + "Date":{"shape":"TStamp"} + } + }, + "EventList":{ + "type":"list", + "member":{ + "shape":"Event", + "locationName":"Event" + } + }, + "EventsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "Events":{"shape":"EventList"} + } + }, + "InsufficientCacheClusterCapacityFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InsufficientCacheClusterCapacity", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Integer":{"type":"integer"}, + "IntegerOptional":{"type":"integer"}, + "InvalidARNFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidARN", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidCacheClusterStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidCacheClusterState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidCacheParameterGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidCacheParameterGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidCacheSecurityGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidCacheSecurityGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidParameterCombinationException":{ + "type":"structure", + "members":{ + "message":{"shape":"AwsQueryErrorMessage"} + }, + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidParameterValueException":{ + "type":"structure", + "members":{ + "message":{"shape":"AwsQueryErrorMessage"} + }, + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidReplicationGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidReplicationGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSnapshotStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidSnapshotState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSubnet":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidSubnet", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidVPCNetworkStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidVPCNetworkStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "KeyList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ListTagsForResourceMessage":{ + "type":"structure", + "required":["ResourceName"], + "members":{ + "ResourceName":{"shape":"String"} + } + }, + "ModifyCacheClusterMessage":{ + "type":"structure", + "required":["CacheClusterId"], + "members":{ + "CacheClusterId":{"shape":"String"}, + "NumCacheNodes":{"shape":"IntegerOptional"}, + "CacheNodeIdsToRemove":{"shape":"CacheNodeIdsList"}, + "AZMode":{"shape":"AZMode"}, + "NewAvailabilityZones":{"shape":"PreferredAvailabilityZoneList"}, + "CacheSecurityGroupNames":{"shape":"CacheSecurityGroupNameList"}, + "SecurityGroupIds":{"shape":"SecurityGroupIdsList"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "NotificationTopicArn":{"shape":"String"}, + "CacheParameterGroupName":{"shape":"String"}, + "NotificationTopicStatus":{"shape":"String"}, + "ApplyImmediately":{"shape":"Boolean"}, + "EngineVersion":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "SnapshotRetentionLimit":{"shape":"IntegerOptional"}, + "SnapshotWindow":{"shape":"String"} + } + }, + "ModifyCacheParameterGroupMessage":{ + "type":"structure", + "required":[ + "CacheParameterGroupName", + "ParameterNameValues" + ], + "members":{ + "CacheParameterGroupName":{"shape":"String"}, + "ParameterNameValues":{"shape":"ParameterNameValueList"} + } + }, + "ModifyCacheSubnetGroupMessage":{ + "type":"structure", + "required":["CacheSubnetGroupName"], + "members":{ + "CacheSubnetGroupName":{"shape":"String"}, + "CacheSubnetGroupDescription":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"} + } + }, + "ModifyReplicationGroupMessage":{ + "type":"structure", + "required":["ReplicationGroupId"], + "members":{ + "ReplicationGroupId":{"shape":"String"}, + "ReplicationGroupDescription":{"shape":"String"}, + "PrimaryClusterId":{"shape":"String"}, + "SnapshottingClusterId":{"shape":"String"}, + "AutomaticFailoverEnabled":{"shape":"BooleanOptional"}, + "CacheSecurityGroupNames":{"shape":"CacheSecurityGroupNameList"}, + "SecurityGroupIds":{"shape":"SecurityGroupIdsList"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "NotificationTopicArn":{"shape":"String"}, + "CacheParameterGroupName":{"shape":"String"}, + "NotificationTopicStatus":{"shape":"String"}, + "ApplyImmediately":{"shape":"Boolean"}, + "EngineVersion":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "SnapshotRetentionLimit":{"shape":"IntegerOptional"}, + "SnapshotWindow":{"shape":"String"} + } + }, + "NodeGroup":{ + "type":"structure", + "members":{ + "NodeGroupId":{"shape":"String"}, + "Status":{"shape":"String"}, + "PrimaryEndpoint":{"shape":"Endpoint"}, + "NodeGroupMembers":{"shape":"NodeGroupMemberList"} + } + }, + "NodeGroupList":{ + "type":"list", + "member":{ + "shape":"NodeGroup", + "locationName":"NodeGroup" + } + }, + "NodeGroupMember":{ + "type":"structure", + "members":{ + "CacheClusterId":{"shape":"String"}, + "CacheNodeId":{"shape":"String"}, + "ReadEndpoint":{"shape":"Endpoint"}, + "PreferredAvailabilityZone":{"shape":"String"}, + "CurrentRole":{"shape":"String"} + } + }, + "NodeGroupMemberList":{ + "type":"list", + "member":{ + "shape":"NodeGroupMember", + "locationName":"NodeGroupMember" + } + }, + "NodeQuotaForClusterExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"NodeQuotaForClusterExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "NodeQuotaForCustomerExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"NodeQuotaForCustomerExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "NodeSnapshot":{ + "type":"structure", + "members":{ + "CacheNodeId":{"shape":"String"}, + "CacheSize":{"shape":"String"}, + "CacheNodeCreateTime":{"shape":"TStamp"}, + "SnapshotCreateTime":{"shape":"TStamp"} + }, + "wrapper":true + }, + "NodeSnapshotList":{ + "type":"list", + "member":{ + "shape":"NodeSnapshot", + "locationName":"NodeSnapshot" + } + }, + "NotificationConfiguration":{ + "type":"structure", + "members":{ + "TopicArn":{"shape":"String"}, + "TopicStatus":{"shape":"String"} + } + }, + "Parameter":{ + "type":"structure", + "members":{ + "ParameterName":{"shape":"String"}, + "ParameterValue":{"shape":"String"}, + "Description":{"shape":"String"}, + "Source":{"shape":"String"}, + "DataType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"}, + "MinimumEngineVersion":{"shape":"String"} + } + }, + "ParameterNameValue":{ + "type":"structure", + "members":{ + "ParameterName":{"shape":"String"}, + "ParameterValue":{"shape":"String"} + } + }, + "ParameterNameValueList":{ + "type":"list", + "member":{ + "shape":"ParameterNameValue", + "locationName":"ParameterNameValue" + } + }, + "ParametersList":{ + "type":"list", + "member":{ + "shape":"Parameter", + "locationName":"Parameter" + } + }, + "PendingAutomaticFailoverStatus":{ + "type":"string", + "enum":[ + "enabled", + "disabled" + ] + }, + "PendingModifiedValues":{ + "type":"structure", + "members":{ + "NumCacheNodes":{"shape":"IntegerOptional"}, + "CacheNodeIdsToRemove":{"shape":"CacheNodeIdsList"}, + "EngineVersion":{"shape":"String"} + } + }, + "PreferredAvailabilityZoneList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"PreferredAvailabilityZone" + } + }, + "PurchaseReservedCacheNodesOfferingMessage":{ + "type":"structure", + "required":["ReservedCacheNodesOfferingId"], + "members":{ + "ReservedCacheNodesOfferingId":{"shape":"String"}, + "ReservedCacheNodeId":{"shape":"String"}, + "CacheNodeCount":{"shape":"IntegerOptional"} + } + }, + "RebootCacheClusterMessage":{ + "type":"structure", + "required":[ + "CacheClusterId", + "CacheNodeIdsToReboot" + ], + "members":{ + "CacheClusterId":{"shape":"String"}, + "CacheNodeIdsToReboot":{"shape":"CacheNodeIdsList"} + } + }, + "RecurringCharge":{ + "type":"structure", + "members":{ + "RecurringChargeAmount":{"shape":"Double"}, + "RecurringChargeFrequency":{"shape":"String"} + }, + "wrapper":true + }, + "RecurringChargeList":{ + "type":"list", + "member":{ + "shape":"RecurringCharge", + "locationName":"RecurringCharge" + } + }, + "RemoveTagsFromResourceMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "TagKeys" + ], + "members":{ + "ResourceName":{"shape":"String"}, + "TagKeys":{"shape":"KeyList"} + } + }, + "ReplicationGroup":{ + "type":"structure", + "members":{ + "ReplicationGroupId":{"shape":"String"}, + "Description":{"shape":"String"}, + "Status":{"shape":"String"}, + "PendingModifiedValues":{"shape":"ReplicationGroupPendingModifiedValues"}, + "MemberClusters":{"shape":"ClusterIdList"}, + "NodeGroups":{"shape":"NodeGroupList"}, + "SnapshottingClusterId":{"shape":"String"}, + "AutomaticFailover":{"shape":"AutomaticFailoverStatus"} + }, + "wrapper":true + }, + "ReplicationGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReplicationGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ReplicationGroupList":{ + "type":"list", + "member":{ + "shape":"ReplicationGroup", + "locationName":"ReplicationGroup" + } + }, + "ReplicationGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReplicationGroups":{"shape":"ReplicationGroupList"} + } + }, + "ReplicationGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReplicationGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReplicationGroupPendingModifiedValues":{ + "type":"structure", + "members":{ + "PrimaryClusterId":{"shape":"String"}, + "AutomaticFailoverStatus":{"shape":"PendingAutomaticFailoverStatus"} + } + }, + "ReservedCacheNode":{ + "type":"structure", + "members":{ + "ReservedCacheNodeId":{"shape":"String"}, + "ReservedCacheNodesOfferingId":{"shape":"String"}, + "CacheNodeType":{"shape":"String"}, + "StartTime":{"shape":"TStamp"}, + "Duration":{"shape":"Integer"}, + "FixedPrice":{"shape":"Double"}, + "UsagePrice":{"shape":"Double"}, + "CacheNodeCount":{"shape":"Integer"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "State":{"shape":"String"}, + "RecurringCharges":{"shape":"RecurringChargeList"} + }, + "wrapper":true + }, + "ReservedCacheNodeAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedCacheNodeAlreadyExists", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedCacheNodeList":{ + "type":"list", + "member":{ + "shape":"ReservedCacheNode", + "locationName":"ReservedCacheNode" + } + }, + "ReservedCacheNodeMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReservedCacheNodes":{"shape":"ReservedCacheNodeList"} + } + }, + "ReservedCacheNodeNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedCacheNodeNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedCacheNodeQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedCacheNodeQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ReservedCacheNodesOffering":{ + "type":"structure", + "members":{ + "ReservedCacheNodesOfferingId":{"shape":"String"}, + "CacheNodeType":{"shape":"String"}, + "Duration":{"shape":"Integer"}, + "FixedPrice":{"shape":"Double"}, + "UsagePrice":{"shape":"Double"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "RecurringCharges":{"shape":"RecurringChargeList"} + }, + "wrapper":true + }, + "ReservedCacheNodesOfferingList":{ + "type":"list", + "member":{ + "shape":"ReservedCacheNodesOffering", + "locationName":"ReservedCacheNodesOffering" + } + }, + "ReservedCacheNodesOfferingMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReservedCacheNodesOfferings":{"shape":"ReservedCacheNodesOfferingList"} + } + }, + "ReservedCacheNodesOfferingNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedCacheNodesOfferingNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResetCacheParameterGroupMessage":{ + "type":"structure", + "required":[ + "CacheParameterGroupName", + "ParameterNameValues" + ], + "members":{ + "CacheParameterGroupName":{"shape":"String"}, + "ResetAllParameters":{"shape":"Boolean"}, + "ParameterNameValues":{"shape":"ParameterNameValueList"} + } + }, + "RevokeCacheSecurityGroupIngressMessage":{ + "type":"structure", + "required":[ + "CacheSecurityGroupName", + "EC2SecurityGroupName", + "EC2SecurityGroupOwnerId" + ], + "members":{ + "CacheSecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "SecurityGroupIdsList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SecurityGroupId" + } + }, + "SecurityGroupMembership":{ + "type":"structure", + "members":{ + "SecurityGroupId":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "SecurityGroupMembershipList":{ + "type":"list", + "member":{"shape":"SecurityGroupMembership"} + }, + "Snapshot":{ + "type":"structure", + "members":{ + "SnapshotName":{"shape":"String"}, + "CacheClusterId":{"shape":"String"}, + "SnapshotStatus":{"shape":"String"}, + "SnapshotSource":{"shape":"String"}, + "CacheNodeType":{"shape":"String"}, + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "NumCacheNodes":{"shape":"IntegerOptional"}, + "PreferredAvailabilityZone":{"shape":"String"}, + "CacheClusterCreateTime":{"shape":"TStamp"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "TopicArn":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "CacheParameterGroupName":{"shape":"String"}, + "CacheSubnetGroupName":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"Boolean"}, + "SnapshotRetentionLimit":{"shape":"IntegerOptional"}, + "SnapshotWindow":{"shape":"String"}, + "NodeSnapshots":{"shape":"NodeSnapshotList"} + }, + "wrapper":true + }, + "SnapshotAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SnapshotArnsList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SnapshotArn" + } + }, + "SnapshotFeatureNotSupportedFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotFeatureNotSupportedFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SnapshotList":{ + "type":"list", + "member":{ + "shape":"Snapshot", + "locationName":"Snapshot" + } + }, + "SnapshotNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SnapshotQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SourceType":{ + "type":"string", + "enum":[ + "cache-cluster", + "cache-parameter-group", + "cache-security-group", + "cache-subnet-group" + ] + }, + "String":{"type":"string"}, + "Subnet":{ + "type":"structure", + "members":{ + "SubnetIdentifier":{"shape":"String"}, + "SubnetAvailabilityZone":{"shape":"AvailabilityZone"} + } + }, + "SubnetIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SubnetIdentifier" + } + }, + "SubnetInUse":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubnetInUse", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubnetList":{ + "type":"list", + "member":{ + "shape":"Subnet", + "locationName":"Subnet" + } + }, + "TStamp":{"type":"timestamp"}, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"String"}, + "Value":{"shape":"String"} + } + }, + "TagList":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"Tag" + } + }, + "TagListMessage":{ + "type":"structure", + "members":{ + "TagList":{"shape":"TagList"} + } + }, + "TagNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"TagNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "TagQuotaPerResourceExceeded":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"TagQuotaPerResourceExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AuthorizeCacheSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "CacheSecurityGroup":{"shape":"CacheSecurityGroup"} + } + }, + "CopySnapshotResult":{ + "type":"structure", + "members":{ + "Snapshot":{"shape":"Snapshot"} + } + }, + "CreateCacheClusterResult":{ + "type":"structure", + "members":{ + "CacheCluster":{"shape":"CacheCluster"} + } + }, + "CreateCacheParameterGroupResult":{ + "type":"structure", + "members":{ + "CacheParameterGroup":{"shape":"CacheParameterGroup"} + } + }, + "CreateCacheSecurityGroupResult":{ + "type":"structure", + "members":{ + "CacheSecurityGroup":{"shape":"CacheSecurityGroup"} + } + }, + "CreateCacheSubnetGroupResult":{ + "type":"structure", + "members":{ + "CacheSubnetGroup":{"shape":"CacheSubnetGroup"} + } + }, + "CreateReplicationGroupResult":{ + "type":"structure", + "members":{ + "ReplicationGroup":{"shape":"ReplicationGroup"} + } + }, + "CreateSnapshotResult":{ + "type":"structure", + "members":{ + "Snapshot":{"shape":"Snapshot"} + } + }, + "DeleteCacheClusterResult":{ + "type":"structure", + "members":{ + "CacheCluster":{"shape":"CacheCluster"} + } + }, + "DeleteReplicationGroupResult":{ + "type":"structure", + "members":{ + "ReplicationGroup":{"shape":"ReplicationGroup"} + } + }, + "DeleteSnapshotResult":{ + "type":"structure", + "members":{ + "Snapshot":{"shape":"Snapshot"} + } + }, + "DescribeEngineDefaultParametersResult":{ + "type":"structure", + "members":{ + "EngineDefaults":{"shape":"EngineDefaults"} + } + }, + "ModifyCacheClusterResult":{ + "type":"structure", + "members":{ + "CacheCluster":{"shape":"CacheCluster"} + } + }, + "ModifyCacheSubnetGroupResult":{ + "type":"structure", + "members":{ + "CacheSubnetGroup":{"shape":"CacheSubnetGroup"} + } + }, + "ModifyReplicationGroupResult":{ + "type":"structure", + "members":{ + "ReplicationGroup":{"shape":"ReplicationGroup"} + } + }, + "PurchaseReservedCacheNodesOfferingResult":{ + "type":"structure", + "members":{ + "ReservedCacheNode":{"shape":"ReservedCacheNode"} + } + }, + "RebootCacheClusterResult":{ + "type":"structure", + "members":{ + "CacheCluster":{"shape":"CacheCluster"} + } + }, + "RevokeCacheSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "CacheSecurityGroup":{"shape":"CacheSecurityGroup"} + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1390 @@ +{ + "version": "2.0", + "operations": { + "AddTagsToResource": "

    The AddTagsToResource action adds up to 10 cost allocation tags to the named resource. A cost allocation tag is a key-value pair where the key and value are case-sensitive. Cost allocation tags can be used to categorize and track your AWS costs.

    When you apply tags to your ElastiCache resources, AWS generates a cost allocation report as a comma-separated value (CSV) file with your usage and costs aggregated by your tags. You can apply tags that represent business categories (such as cost centers, application names, or owners) to organize your costs across multiple services. For more information, see Using Cost Allocation Tags in Amazon ElastiCache.

    ", + "AuthorizeCacheSecurityGroupIngress": "

    The AuthorizeCacheSecurityGroupIngress action allows network ingress to a cache security group. Applications using ElastiCache must be running on Amazon EC2, and Amazon EC2 security groups are used as the authorization mechanism.

    You cannot authorize ingress from an Amazon EC2 security group in one region to an ElastiCache cluster in another region. ", + "CopySnapshot": "

    The CopySnapshot action makes a copy of an existing snapshot.

    ", + "CreateCacheCluster": "

    The CreateCacheCluster action creates a cache cluster. All nodes in the cache cluster run the same protocol-compliant cache engine software, either Memcached or Redis.

    ", + "CreateCacheParameterGroup": "

    The CreateCacheParameterGroup action creates a new cache parameter group. A cache parameter group is a collection of parameters that you apply to all of the nodes in a cache cluster.

    ", + "CreateCacheSecurityGroup": "

    The CreateCacheSecurityGroup action creates a new cache security group. Use a cache security group to control access to one or more cache clusters.

    Cache security groups are only used when you are creating a cache cluster outside of an Amazon Virtual Private Cloud (VPC). If you are creating a cache cluster inside of a VPC, use a cache subnet group instead. For more information, see CreateCacheSubnetGroup.

    ", + "CreateCacheSubnetGroup": "

    The CreateCacheSubnetGroup action creates a new cache subnet group.

    Use this parameter only when you are creating a cluster in an Amazon Virtual Private Cloud (VPC).

    ", + "CreateReplicationGroup": "

    The CreateReplicationGroup action creates a replication group. A replication group is a collection of cache clusters, where one of the cache clusters is a read/write primary and the others are read-only replicas. Writes to the primary are automatically propagated to the replicas.

    When you create a replication group, you must specify an existing cache cluster that is in the primary role. When the replication group has been successfully created, you can add one or more read replica replicas to it, up to a total of five read replicas.

    Note: This action is valid only for Redis.

    ", + "CreateSnapshot": "

    The CreateSnapshot action creates a copy of an entire cache cluster at a specific moment in time.

    ", + "DeleteCacheCluster": "

    The DeleteCacheCluster action deletes a previously provisioned cache cluster. DeleteCacheCluster deletes all associated cache nodes, node endpoints and the cache cluster itself. When you receive a successful response from this action, Amazon ElastiCache immediately begins deleting the cache cluster; you cannot cancel or revert this action.

    This API cannot be used to delete a cache cluster that is the last read replica of a replication group that has Multi-AZ mode enabled.

    ", + "DeleteCacheParameterGroup": "

    The DeleteCacheParameterGroup action deletes the specified cache parameter group. You cannot delete a cache parameter group if it is associated with any cache clusters.

    ", + "DeleteCacheSecurityGroup": "

    The DeleteCacheSecurityGroup action deletes a cache security group.

    You cannot delete a cache security group if it is associated with any cache clusters.", + "DeleteCacheSubnetGroup": "

    The DeleteCacheSubnetGroup action deletes a cache subnet group.

    You cannot delete a cache subnet group if it is associated with any cache clusters.", + "DeleteReplicationGroup": "

    The DeleteReplicationGroup action deletes an existing replication group. By default, this action deletes the entire replication group, including the primary cluster and all of the read replicas. You can optionally delete only the read replicas, while retaining the primary cluster.

    When you receive a successful response from this action, Amazon ElastiCache immediately begins deleting the selected resources; you cannot cancel or revert this action.

    ", + "DeleteSnapshot": "

    The DeleteSnapshot action deletes an existing snapshot. When you receive a successful response from this action, ElastiCache immediately begins deleting the snapshot; you cannot cancel or revert this action.

    ", + "DescribeCacheClusters": "

    The DescribeCacheClusters action returns information about all provisioned cache clusters if no cache cluster identifier is specified, or about a specific cache cluster if a cache cluster identifier is supplied.

    By default, abbreviated information about the cache clusters(s) will be returned. You can use the optional ShowDetails flag to retrieve detailed information about the cache nodes associated with the cache clusters. These details include the DNS address and port for the cache node endpoint.

    If the cluster is in the CREATING state, only cluster level information will be displayed until all of the nodes are successfully provisioned.

    If the cluster is in the DELETING state, only cluster level information will be displayed.

    If cache nodes are currently being added to the cache cluster, node endpoint information and creation time for the additional nodes will not be displayed until they are completely provisioned. When the cache cluster state is available, the cluster is ready for use.

    If cache nodes are currently being removed from the cache cluster, no endpoint information for the removed nodes is displayed.

    ", + "DescribeCacheEngineVersions": "

    The DescribeCacheEngineVersions action returns a list of the available cache engines and their versions.

    ", + "DescribeCacheParameterGroups": "

    The DescribeCacheParameterGroups action returns a list of cache parameter group descriptions. If a cache parameter group name is specified, the list will contain only the descriptions for that group.

    ", + "DescribeCacheParameters": "

    The DescribeCacheParameters action returns the detailed parameter list for a particular cache parameter group.

    ", + "DescribeCacheSecurityGroups": "

    The DescribeCacheSecurityGroups action returns a list of cache security group descriptions. If a cache security group name is specified, the list will contain only the description of that group.

    ", + "DescribeCacheSubnetGroups": "

    The DescribeCacheSubnetGroups action returns a list of cache subnet group descriptions. If a subnet group name is specified, the list will contain only the description of that group.

    ", + "DescribeEngineDefaultParameters": "

    The DescribeEngineDefaultParameters action returns the default engine and system parameter information for the specified cache engine.

    ", + "DescribeEvents": "

    The DescribeEvents action returns events related to cache clusters, cache security groups, and cache parameter groups. You can obtain events specific to a particular cache cluster, cache security group, or cache parameter group by providing the name as a parameter.

    By default, only the events occurring within the last hour are returned; however, you can retrieve up to 14 days' worth of events if necessary.

    ", + "DescribeReplicationGroups": "

    The DescribeReplicationGroups action returns information about a particular replication group. If no identifier is specified, DescribeReplicationGroups returns information about all replication groups.

    ", + "DescribeReservedCacheNodes": "

    The DescribeReservedCacheNodes action returns information about reserved cache nodes for this account, or about a specified reserved cache node.

    ", + "DescribeReservedCacheNodesOfferings": "

    The DescribeReservedCacheNodesOfferings action lists available reserved cache node offerings.

    ", + "DescribeSnapshots": "

    The DescribeSnapshots action returns information about cache cluster snapshots. By default, DescribeSnapshots lists all of your snapshots; it can optionally describe a single snapshot, or just the snapshots associated with a particular cache cluster.

    ", + "ListTagsForResource": "

    The ListTagsForResource action lists all cost allocation tags currently on the named resource. A cost allocation tag is a key-value pair where the key is case-sensitive and the value is optional. Cost allocation tags can be used to categorize and track your AWS costs.

    You can have a maximum of 10 cost allocation tags on an ElastiCache resource. For more information, see Using Cost Allocation Tags in Amazon ElastiCache.

    ", + "ModifyCacheCluster": "

    The ModifyCacheCluster action modifies the settings for a cache cluster. You can use this action to change one or more cluster configuration parameters by specifying the parameters and the new values.

    ", + "ModifyCacheParameterGroup": "

    The ModifyCacheParameterGroup action modifies the parameters of a cache parameter group. You can modify up to 20 parameters in a single request by submitting a list parameter name and value pairs.

    ", + "ModifyCacheSubnetGroup": "

    The ModifyCacheSubnetGroup action modifies an existing cache subnet group.

    ", + "ModifyReplicationGroup": "

    The ModifyReplicationGroup action modifies the settings for a replication group.

    ", + "PurchaseReservedCacheNodesOffering": "

    The PurchaseReservedCacheNodesOffering action allows you to purchase a reserved cache node offering.

    ", + "RebootCacheCluster": "

    The RebootCacheCluster action reboots some, or all, of the cache nodes within a provisioned cache cluster. This API will apply any modified cache parameter groups to the cache cluster. The reboot action takes place as soon as possible, and results in a momentary outage to the cache cluster. During the reboot, the cache cluster status is set to REBOOTING.

    The reboot causes the contents of the cache (for each cache node being rebooted) to be lost.

    When the reboot is complete, a cache cluster event is created.

    ", + "RemoveTagsFromResource": "

    The RemoveTagsFromResource action removes the tags identified by the TagKeys list from the named resource.

    ", + "ResetCacheParameterGroup": "

    The ResetCacheParameterGroup action modifies the parameters of a cache parameter group to the engine or system default value. You can reset specific parameters by submitting a list of parameter names. To reset the entire cache parameter group, specify the ResetAllParameters and CacheParameterGroupName parameters.

    ", + "RevokeCacheSecurityGroupIngress": "

    The RevokeCacheSecurityGroupIngress action revokes ingress from a cache security group. Use this action to disallow access from an Amazon EC2 security group that had been previously authorized.

    " + }, + "service": "Amazon ElastiCache

    Amazon ElastiCache is a web service that makes it easier to set up, operate, and scale a distributed cache in the cloud.

    With ElastiCache, customers gain all of the benefits of a high-performance, in-memory cache with far less of the administrative burden of launching and managing a distributed cache. The service makes setup, scaling, and cluster failure handling much simpler than in a self-managed cache deployment.

    In addition, through integration with Amazon CloudWatch, customers get enhanced visibility into the key performance statistics associated with their cache and can receive alarms if a part of their cache runs hot.

    ", + "shapes": { + "AZMode": { + "base": null, + "refs": { + "CreateCacheClusterMessage$AZMode": "

    Specifies whether the nodes in this Memcached node group are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region.

    This parameter is only supported for Memcached cache clusters.

    If the AZMode and PreferredAvailabilityZones are not specified, ElastiCache assumes single-az mode.

    ", + "ModifyCacheClusterMessage$AZMode": "

    Specifies whether the new nodes in this Memcached cache cluster are all created in a single Availability Zone or created across multiple Availability Zones.

    Valid values: single-az | cross-az.

    This option is only supported for Memcached cache clusters.

    You cannot specify single-az if the Memcached cache cluster already has cache nodes in different Availability Zones. If cross-az is specified, existing Memcached nodes remain in their current Availability Zone.

    Only newly created nodes will be located in different Availability Zones. For instructions on how to move existing Memcached nodes to different Availability Zones, see the Availability Zone Considerations section of Cache Node Considerations for Memcached.

    " + } + }, + "AddTagsToResourceMessage": { + "base": "

    Represents the input of an AddTagsToResource action.

    ", + "refs": { + } + }, + "AuthorizationAlreadyExistsFault": { + "base": "

    The specified Amazon EC2 security group is already authorized for the specified cache security group.

    ", + "refs": { + } + }, + "AuthorizationNotFoundFault": { + "base": "

    The specified Amazon EC2 security group is not authorized for the specified cache security group.

    ", + "refs": { + } + }, + "AuthorizeCacheSecurityGroupIngressMessage": { + "base": "

    Represents the input of an AuthorizeCacheSecurityGroupIngress action.

    ", + "refs": { + } + }, + "AutomaticFailoverStatus": { + "base": null, + "refs": { + "ReplicationGroup$AutomaticFailover": "

    Indicates the status of Multi-AZ for this replication group.

    ElastiCache Multi-AZ replication groups are not supported on:

    • Redis versions earlier than 2.8.6.
    • T1 and T2 cache node types.
    " + } + }, + "AvailabilityZone": { + "base": "

    Describes an Availability Zone in which the cache cluster is launched.

    ", + "refs": { + "Subnet$SubnetAvailabilityZone": "

    The Availability Zone associated with the subnet.

    " + } + }, + "AvailabilityZonesList": { + "base": null, + "refs": { + "CreateReplicationGroupMessage$PreferredCacheClusterAZs": "

    A list of EC2 availability zones in which the replication group's cache clusters will be created. The order of the availability zones in the list is not important.

    If you are creating your replication group in an Amazon VPC (recommended), you can only locate cache clusters in availability zones associated with the subnets in the selected subnet group.

    The number of availability zones listed must equal the value of NumCacheClusters.

    Default: system chosen availability zones.

    Example: One Redis cache cluster in each of three availability zones. PreferredAvailabilityZones.member.1=us-west-2a PreferredAvailabilityZones.member.2=us-west-2c PreferredAvailabilityZones.member.3=us-west-2c

    " + } + }, + "AwsQueryErrorMessage": { + "base": null, + "refs": { + "InvalidParameterCombinationException$message": "

    Two or more parameters that must not be used together were used together.

    ", + "InvalidParameterValueException$message": "

    A parameter value is invalid.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "CacheCluster$AutoMinorVersionUpgrade": "

    This parameter is currently disabled.

    ", + "CacheNodeTypeSpecificParameter$IsModifiable": "

    Indicates whether (true) or not (false) the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.

    ", + "DescribeCacheEngineVersionsMessage$DefaultOnly": "

    If true, specifies that only the default version of the specified engine or engine and major version combination is to be returned.

    ", + "ModifyCacheClusterMessage$ApplyImmediately": "

    If true, this parameter causes the modifications in this request and any pending modifications to be applied, asynchronously and as soon as possible, regardless of the PreferredMaintenanceWindow setting for the cache cluster.

    If false, then changes to the cache cluster are applied on the next maintenance reboot, or the next failure reboot, whichever occurs first.

    If you perform a ModifyCacheCluster before a pending modification is applied, the pending modification is replaced by the newer modification.

    Valid values: true | false

    Default: false

    ", + "ModifyReplicationGroupMessage$ApplyImmediately": "

    If true, this parameter causes the modifications in this request and any pending modifications to be applied, asynchronously and as soon as possible, regardless of the PreferredMaintenanceWindow setting for the replication group.

    If false, then changes to the nodes in the replication group are applied on the next maintenance reboot, or the next failure reboot, whichever occurs first.

    Valid values: true | false

    Default: false

    ", + "Parameter$IsModifiable": "

    Indicates whether (true) or not (false) the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.

    ", + "ResetCacheParameterGroupMessage$ResetAllParameters": "

    If true, all parameters in the cache parameter group will be reset to default values. If false, no such action occurs.

    Valid values: true | false

    ", + "Snapshot$AutoMinorVersionUpgrade": "

    This parameter is currently disabled.

    " + } + }, + "BooleanOptional": { + "base": null, + "refs": { + "CreateCacheClusterMessage$AutoMinorVersionUpgrade": "

    This parameter is currently disabled.

    ", + "CreateReplicationGroupMessage$AutomaticFailoverEnabled": "

    Specifies whether a read-only replica will be automatically promoted to read/write primary if the existing primary fails.

    If true, Multi-AZ is enabled for this replication group. If false, Multi-AZ is disabled for this replication group.

    Default: false

    ElastiCache Multi-AZ replication groups is not supported on:

    • Redis versions earlier than 2.8.6.
    • T1 and T2 cache node types.
    ", + "CreateReplicationGroupMessage$AutoMinorVersionUpgrade": "

    This parameter is currently disabled.

    ", + "DeleteReplicationGroupMessage$RetainPrimaryCluster": "

    If set to true, all of the read replicas will be deleted, but the primary node will be retained.

    ", + "DescribeCacheClustersMessage$ShowCacheNodeInfo": "

    An optional flag that can be included in the DescribeCacheCluster request to retrieve information about the individual cache nodes.

    ", + "ModifyCacheClusterMessage$AutoMinorVersionUpgrade": "

    This parameter is currently disabled.

    ", + "ModifyReplicationGroupMessage$AutomaticFailoverEnabled": "

    Whether a read replica will be automatically promoted to read/write primary if the existing primary encounters a failure.

    Valid values: true | false

    ElastiCache Multi-AZ replication groups are not supported on:

    • Redis versions earlier than 2.8.6.
    • T1 and T2 cache node types.
    ", + "ModifyReplicationGroupMessage$AutoMinorVersionUpgrade": "

    This parameter is currently disabled.

    " + } + }, + "CacheCluster": { + "base": "

    Contains all of the attributes of a specific cache cluster.

    ", + "refs": { + "CacheClusterList$member": null, + "CreateCacheClusterResult$CacheCluster": null, + "DeleteCacheClusterResult$CacheCluster": null, + "ModifyCacheClusterResult$CacheCluster": null, + "RebootCacheClusterResult$CacheCluster": null + } + }, + "CacheClusterAlreadyExistsFault": { + "base": "

    You already have a cache cluster with the given identifier.

    ", + "refs": { + } + }, + "CacheClusterList": { + "base": null, + "refs": { + "CacheClusterMessage$CacheClusters": "

    A list of cache clusters. Each item in the list contains detailed information about one cache cluster.

    " + } + }, + "CacheClusterMessage": { + "base": "

    Represents the output of a DescribeCacheClusters action.

    ", + "refs": { + } + }, + "CacheClusterNotFoundFault": { + "base": "

    The requested cache cluster ID does not refer to an existing cache cluster.

    ", + "refs": { + } + }, + "CacheEngineVersion": { + "base": "

    Provides all of the details about a particular cache engine version.

    ", + "refs": { + "CacheEngineVersionList$member": null + } + }, + "CacheEngineVersionList": { + "base": null, + "refs": { + "CacheEngineVersionMessage$CacheEngineVersions": "

    A list of cache engine version details. Each element in the list contains detailed information about one cache engine version.

    " + } + }, + "CacheEngineVersionMessage": { + "base": "

    Represents the output of a DescribeCacheEngineVersions action.

    ", + "refs": { + } + }, + "CacheNode": { + "base": "

    Represents an individual cache node within a cache cluster. Each cache node runs its own instance of the cluster's protocol-compliant caching software - either Memcached or Redis.

    Valid node types are as follows:

    • General purpose:
      • Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge
      • Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge
    • Compute optimized: cache.c1.xlarge
    • Memory optimized
      • Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge
      • Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

    Notes:

    • All t2 instances are created in an Amazon Virtual Private Cloud (VPC).
    • Redis backup/restore is not supported for t2 instances.
    • Redis Append-only files (AOF) functionality is not supported for t1 or t2 instances.

    For a complete listing of cache node types and specifications, see Amazon ElastiCache Product Features and Details and Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis.

    ", + "refs": { + "CacheNodeList$member": null + } + }, + "CacheNodeIdsList": { + "base": null, + "refs": { + "CacheParameterGroupStatus$CacheNodeIdsToReboot": "

    A list of the cache node IDs which need to be rebooted for parameter changes to be applied. A node ID is a numeric identifier (0001, 0002, etc.).

    ", + "ModifyCacheClusterMessage$CacheNodeIdsToRemove": "

    A list of cache node IDs to be removed. A node ID is a numeric identifier (0001, 0002, etc.). This parameter is only valid when NumCacheNodes is less than the existing number of cache nodes. The number of cache node IDs supplied in this parameter must match the difference between the existing number of cache nodes in the cluster or pending cache nodes, whichever is greater, and the value of NumCacheNodes in the request.

    For example: If you have 3 active cache nodes, 7 pending cache nodes, and the number of cache nodes in this ModifyCacheCluser call is 5, you must list 2 (7 - 5) cache node IDs to remove.

    ", + "PendingModifiedValues$CacheNodeIdsToRemove": "

    A list of cache node IDs that are being removed (or will be removed) from the cache cluster. A node ID is a numeric identifier (0001, 0002, etc.).

    ", + "RebootCacheClusterMessage$CacheNodeIdsToReboot": "

    A list of cache node IDs to reboot. A node ID is a numeric identifier (0001, 0002, etc.). To reboot an entire cache cluster, specify all of the cache node IDs.

    " + } + }, + "CacheNodeList": { + "base": null, + "refs": { + "CacheCluster$CacheNodes": "

    A list of cache nodes that are members of the cache cluster.

    " + } + }, + "CacheNodeTypeSpecificParameter": { + "base": "

    A parameter that has a different value for each cache node type it is applied to. For example, in a Redis cache cluster, a cache.m1.large cache node type would have a larger maxmemory value than a cache.m1.small type.

    ", + "refs": { + "CacheNodeTypeSpecificParametersList$member": null + } + }, + "CacheNodeTypeSpecificParametersList": { + "base": null, + "refs": { + "CacheParameterGroupDetails$CacheNodeTypeSpecificParameters": "

    A list of parameters specific to a particular cache node type. Each element in the list contains detailed information about one parameter.

    ", + "EngineDefaults$CacheNodeTypeSpecificParameters": "

    A list of parameters specific to a particular cache node type. Each element in the list contains detailed information about one parameter.

    " + } + }, + "CacheNodeTypeSpecificValue": { + "base": "

    A value that applies only to a certain cache node type.

    ", + "refs": { + "CacheNodeTypeSpecificValueList$member": null + } + }, + "CacheNodeTypeSpecificValueList": { + "base": null, + "refs": { + "CacheNodeTypeSpecificParameter$CacheNodeTypeSpecificValues": "

    A list of cache node types and their corresponding values for this parameter.

    " + } + }, + "CacheParameterGroup": { + "base": "

    Represents the output of a CreateCacheParameterGroup action.

    ", + "refs": { + "CacheParameterGroupList$member": null, + "CreateCacheParameterGroupResult$CacheParameterGroup": null + } + }, + "CacheParameterGroupAlreadyExistsFault": { + "base": "

    A cache parameter group with the requested name already exists.

    ", + "refs": { + } + }, + "CacheParameterGroupDetails": { + "base": "

    Represents the output of a DescribeCacheParameters action.

    ", + "refs": { + } + }, + "CacheParameterGroupList": { + "base": null, + "refs": { + "CacheParameterGroupsMessage$CacheParameterGroups": "

    A list of cache parameter groups. Each element in the list contains detailed information about one cache parameter group.

    " + } + }, + "CacheParameterGroupNameMessage": { + "base": "

    Represents the output of one of the following actions:

    • ModifyCacheParameterGroup
    • ResetCacheParameterGroup
    ", + "refs": { + } + }, + "CacheParameterGroupNotFoundFault": { + "base": "

    The requested cache parameter group name does not refer to an existing cache parameter group.

    ", + "refs": { + } + }, + "CacheParameterGroupQuotaExceededFault": { + "base": "

    The request cannot be processed because it would exceed the maximum number of cache security groups.

    ", + "refs": { + } + }, + "CacheParameterGroupStatus": { + "base": "

    The status of the cache parameter group.

    ", + "refs": { + "CacheCluster$CacheParameterGroup": null + } + }, + "CacheParameterGroupsMessage": { + "base": "

    Represents the output of a DescribeCacheParameterGroups action.

    ", + "refs": { + } + }, + "CacheSecurityGroup": { + "base": "

    Represents the output of one of the following actions:

    • AuthorizeCacheSecurityGroupIngress
    • CreateCacheSecurityGroup
    • RevokeCacheSecurityGroupIngress
    ", + "refs": { + "CacheSecurityGroups$member": null, + "AuthorizeCacheSecurityGroupIngressResult$CacheSecurityGroup": null, + "CreateCacheSecurityGroupResult$CacheSecurityGroup": null, + "RevokeCacheSecurityGroupIngressResult$CacheSecurityGroup": null + } + }, + "CacheSecurityGroupAlreadyExistsFault": { + "base": "

    A cache security group with the specified name already exists.

    ", + "refs": { + } + }, + "CacheSecurityGroupMembership": { + "base": "

    Represents a cache cluster's status within a particular cache security group.

    ", + "refs": { + "CacheSecurityGroupMembershipList$member": null + } + }, + "CacheSecurityGroupMembershipList": { + "base": null, + "refs": { + "CacheCluster$CacheSecurityGroups": "

    A list of cache security group elements, composed of name and status sub-elements.

    " + } + }, + "CacheSecurityGroupMessage": { + "base": "

    Represents the output of a DescribeCacheSecurityGroups action.

    ", + "refs": { + } + }, + "CacheSecurityGroupNameList": { + "base": null, + "refs": { + "CreateCacheClusterMessage$CacheSecurityGroupNames": "

    A list of security group names to associate with this cache cluster.

    Use this parameter only when you are creating a cache cluster outside of an Amazon Virtual Private Cloud (VPC).

    ", + "CreateReplicationGroupMessage$CacheSecurityGroupNames": "

    A list of cache security group names to associate with this replication group.

    ", + "ModifyCacheClusterMessage$CacheSecurityGroupNames": "

    A list of cache security group names to authorize on this cache cluster. This change is asynchronously applied as soon as possible.

    This parameter can be used only with clusters that are created outside of an Amazon Virtual Private Cloud (VPC).

    Constraints: Must contain no more than 255 alphanumeric characters. Must not be \"Default\".

    ", + "ModifyReplicationGroupMessage$CacheSecurityGroupNames": "

    A list of cache security group names to authorize for the clusters in this replication group. This change is asynchronously applied as soon as possible.

    This parameter can be used only with replication group containing cache clusters running outside of an Amazon Virtual Private Cloud (VPC).

    Constraints: Must contain no more than 255 alphanumeric characters. Must not be \"Default\".

    " + } + }, + "CacheSecurityGroupNotFoundFault": { + "base": "

    The requested cache security group name does not refer to an existing cache security group.

    ", + "refs": { + } + }, + "CacheSecurityGroupQuotaExceededFault": { + "base": "

    The request cannot be processed because it would exceed the allowed number of cache security groups.

    ", + "refs": { + } + }, + "CacheSecurityGroups": { + "base": null, + "refs": { + "CacheSecurityGroupMessage$CacheSecurityGroups": "

    A list of cache security groups. Each element in the list contains detailed information about one group.

    " + } + }, + "CacheSubnetGroup": { + "base": "

    Represents the output of one of the following actions:

    • CreateCacheSubnetGroup
    • ModifyCacheSubnetGroup
    ", + "refs": { + "CacheSubnetGroups$member": null, + "CreateCacheSubnetGroupResult$CacheSubnetGroup": null, + "ModifyCacheSubnetGroupResult$CacheSubnetGroup": null + } + }, + "CacheSubnetGroupAlreadyExistsFault": { + "base": "

    The requested cache subnet group name is already in use by an existing cache subnet group.

    ", + "refs": { + } + }, + "CacheSubnetGroupInUse": { + "base": "

    The requested cache subnet group is currently in use.

    ", + "refs": { + } + }, + "CacheSubnetGroupMessage": { + "base": "

    Represents the output of a DescribeCacheSubnetGroups action.

    ", + "refs": { + } + }, + "CacheSubnetGroupNotFoundFault": { + "base": "

    The requested cache subnet group name does not refer to an existing cache subnet group.

    ", + "refs": { + } + }, + "CacheSubnetGroupQuotaExceededFault": { + "base": "

    The request cannot be processed because it would exceed the allowed number of cache subnet groups.

    ", + "refs": { + } + }, + "CacheSubnetGroups": { + "base": null, + "refs": { + "CacheSubnetGroupMessage$CacheSubnetGroups": "

    A list of cache subnet groups. Each element in the list contains detailed information about one group.

    " + } + }, + "CacheSubnetQuotaExceededFault": { + "base": "

    The request cannot be processed because it would exceed the allowed number of subnets in a cache subnet group.

    ", + "refs": { + } + }, + "ClusterIdList": { + "base": null, + "refs": { + "ReplicationGroup$MemberClusters": "

    The names of all the cache clusters that are part of this replication group.

    " + } + }, + "ClusterQuotaForCustomerExceededFault": { + "base": "

    The request cannot be processed because it would exceed the allowed number of cache clusters per customer.

    ", + "refs": { + } + }, + "CopySnapshotMessage": { + "base": "

    Represents the input of a CopySnapshotMessage action.

    ", + "refs": { + } + }, + "CreateCacheClusterMessage": { + "base": "

    Represents the input of a CreateCacheCluster action.

    ", + "refs": { + } + }, + "CreateCacheParameterGroupMessage": { + "base": "

    Represents the input of a CreateCacheParameterGroup action.

    ", + "refs": { + } + }, + "CreateCacheSecurityGroupMessage": { + "base": "

    Represents the input of a CreateCacheSecurityGroup action.

    ", + "refs": { + } + }, + "CreateCacheSubnetGroupMessage": { + "base": "

    Represents the input of a CreateCacheSubnetGroup action.

    ", + "refs": { + } + }, + "CreateReplicationGroupMessage": { + "base": "

    Represents the input of a CreateReplicationGroup action.

    ", + "refs": { + } + }, + "CreateSnapshotMessage": { + "base": "

    Represents the input of a CreateSnapshot action.

    ", + "refs": { + } + }, + "DeleteCacheClusterMessage": { + "base": "

    Represents the input of a DeleteCacheCluster action.

    ", + "refs": { + } + }, + "DeleteCacheParameterGroupMessage": { + "base": "

    Represents the input of a DeleteCacheParameterGroup action.

    ", + "refs": { + } + }, + "DeleteCacheSecurityGroupMessage": { + "base": "

    Represents the input of a DeleteCacheSecurityGroup action.

    ", + "refs": { + } + }, + "DeleteCacheSubnetGroupMessage": { + "base": "

    Represents the input of a DeleteCacheSubnetGroup action.

    ", + "refs": { + } + }, + "DeleteReplicationGroupMessage": { + "base": "

    Represents the input of a DeleteReplicationGroup action.

    ", + "refs": { + } + }, + "DeleteSnapshotMessage": { + "base": "

    Represents the input of a DeleteSnapshot action.

    ", + "refs": { + } + }, + "DescribeCacheClustersMessage": { + "base": "

    Represents the input of a DescribeCacheClusters action.

    ", + "refs": { + } + }, + "DescribeCacheEngineVersionsMessage": { + "base": "

    Represents the input of a DescribeCacheEngineVersions action.

    ", + "refs": { + } + }, + "DescribeCacheParameterGroupsMessage": { + "base": "

    Represents the input of a DescribeCacheParameterGroups action.

    ", + "refs": { + } + }, + "DescribeCacheParametersMessage": { + "base": "

    Represents the input of a DescribeCacheParameters action.

    ", + "refs": { + } + }, + "DescribeCacheSecurityGroupsMessage": { + "base": "

    Represents the input of a DescribeCacheSecurityGroups action.

    ", + "refs": { + } + }, + "DescribeCacheSubnetGroupsMessage": { + "base": "

    Represents the input of a DescribeCacheSubnetGroups action.

    ", + "refs": { + } + }, + "DescribeEngineDefaultParametersMessage": { + "base": "

    Represents the input of a DescribeEngineDefaultParameters action.

    ", + "refs": { + } + }, + "DescribeEventsMessage": { + "base": "

    Represents the input of a DescribeEvents action.

    ", + "refs": { + } + }, + "DescribeReplicationGroupsMessage": { + "base": "

    Represents the input of a DescribeReplicationGroups action.

    ", + "refs": { + } + }, + "DescribeReservedCacheNodesMessage": { + "base": "

    Represents the input of a DescribeReservedCacheNodes action.

    ", + "refs": { + } + }, + "DescribeReservedCacheNodesOfferingsMessage": { + "base": "

    Represents the input of a DescribeReservedCacheNodesOfferings action.

    ", + "refs": { + } + }, + "DescribeSnapshotsListMessage": { + "base": "

    Represents the output of a DescribeSnapshots action.

    ", + "refs": { + } + }, + "DescribeSnapshotsMessage": { + "base": "

    Represents the input of a DescribeSnapshotsMessage action.

    ", + "refs": { + } + }, + "Double": { + "base": null, + "refs": { + "RecurringCharge$RecurringChargeAmount": "

    The monetary amount of the recurring charge.

    ", + "ReservedCacheNode$FixedPrice": "

    The fixed price charged for this reserved cache node.

    ", + "ReservedCacheNode$UsagePrice": "

    The hourly price charged for this reserved cache node.

    ", + "ReservedCacheNodesOffering$FixedPrice": "

    The fixed price charged for this offering.

    ", + "ReservedCacheNodesOffering$UsagePrice": "

    The hourly price charged for this offering.

    " + } + }, + "EC2SecurityGroup": { + "base": "

    Provides ownership and status information for an Amazon EC2 security group.

    ", + "refs": { + "EC2SecurityGroupList$member": null + } + }, + "EC2SecurityGroupList": { + "base": null, + "refs": { + "CacheSecurityGroup$EC2SecurityGroups": "

    A list of Amazon EC2 security groups that are associated with this cache security group.

    " + } + }, + "Endpoint": { + "base": "

    Represents the information required for client programs to connect to a cache node.

    ", + "refs": { + "CacheCluster$ConfigurationEndpoint": null, + "CacheNode$Endpoint": "

    The hostname for connecting to this cache node.

    ", + "NodeGroup$PrimaryEndpoint": null, + "NodeGroupMember$ReadEndpoint": null + } + }, + "EngineDefaults": { + "base": "

    Represents the output of a DescribeEngineDefaultParameters action.

    ", + "refs": { + "DescribeEngineDefaultParametersResult$EngineDefaults": null + } + }, + "Event": { + "base": "

    Represents a single occurrence of something interesting within the system. Some examples of events are creating a cache cluster, adding or removing a cache node, or rebooting a node.

    ", + "refs": { + "EventList$member": null + } + }, + "EventList": { + "base": null, + "refs": { + "EventsMessage$Events": "

    A list of events. Each element in the list contains detailed information about one event.

    " + } + }, + "EventsMessage": { + "base": "

    Represents the output of a DescribeEvents action.

    ", + "refs": { + } + }, + "InsufficientCacheClusterCapacityFault": { + "base": "

    The requested cache node type is not available in the specified Availability Zone.

    ", + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "Endpoint$Port": "

    The port number that the cache engine is listening on.

    ", + "ReservedCacheNode$Duration": "

    The duration of the reservation in seconds.

    ", + "ReservedCacheNode$CacheNodeCount": "

    The number of cache nodes that have been reserved.

    ", + "ReservedCacheNodesOffering$Duration": "

    The duration of the offering. in seconds.

    " + } + }, + "IntegerOptional": { + "base": null, + "refs": { + "CacheCluster$NumCacheNodes": "

    The number of cache nodes in the cache cluster.

    For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 20.

    ", + "CacheCluster$SnapshotRetentionLimit": "

    The number of days for which ElastiCache will retain automatic cache cluster snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days before being deleted.

    Important
    If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off.

    ", + "CreateCacheClusterMessage$NumCacheNodes": "

    The initial number of cache nodes that the cache cluster will have.

    For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 20.

    If you need more than 20 nodes for your Memcached cluster, please fill out the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/.

    ", + "CreateCacheClusterMessage$Port": "

    The port number on which each of the cache nodes will accept connections.

    ", + "CreateCacheClusterMessage$SnapshotRetentionLimit": "

    The number of days for which ElastiCache will retain automatic snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days before being deleted.

    Note: This parameter is only valid if the Engine parameter is redis.

    Default: 0 (i.e., automatic backups are disabled for this cache cluster).

    ", + "CreateReplicationGroupMessage$NumCacheClusters": "

    The number of cache clusters this replication group will initially have.

    If Multi-AZ is enabled, the value of this parameter must be at least 2.

    The maximum permitted value for NumCacheClusters is 6 (primary plus 5 replicas). If you need to exceed this limit, please fill out the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request.

    ", + "CreateReplicationGroupMessage$Port": "

    The port number on which each member of the replication group will accept connections.

    ", + "CreateReplicationGroupMessage$SnapshotRetentionLimit": "

    The number of days for which ElastiCache will retain automatic snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days before being deleted.

    Note: This parameter is only valid if the Engine parameter is redis.

    Default: 0 (i.e., automatic backups are disabled for this cache cluster).

    ", + "DescribeCacheClustersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20; maximum 100.

    ", + "DescribeCacheEngineVersionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20; maximum 100.

    ", + "DescribeCacheParameterGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20; maximum 100.

    ", + "DescribeCacheParametersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20; maximum 100.

    ", + "DescribeCacheSecurityGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20; maximum 100.

    ", + "DescribeCacheSubnetGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20; maximum 100.

    ", + "DescribeEngineDefaultParametersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20; maximum 100.

    ", + "DescribeEventsMessage$Duration": "

    The number of minutes' worth of events to retrieve.

    ", + "DescribeEventsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20; maximum 100.

    ", + "DescribeReplicationGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20; maximum 100.

    ", + "DescribeReservedCacheNodesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20; maximum 100.

    ", + "DescribeReservedCacheNodesOfferingsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20; maximum 100.

    ", + "DescribeSnapshotsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved.

    Default: 50

    Constraints: minimum 20; maximum 50.

    ", + "ModifyCacheClusterMessage$NumCacheNodes": "

    The number of cache nodes that the cache cluster should have. If the value for NumCacheNodes is greater than the sum of the number of current cache nodes and the number of cache nodes pending creation (which may be zero), then more nodes will be added. If the value is less than the number of existing cache nodes, then nodes will be removed. If the value is equal to the number of current cache nodes, then any pending add or remove requests are canceled.

    If you are removing cache nodes, you must use the CacheNodeIdsToRemove parameter to provide the IDs of the specific cache nodes to remove.

    For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 20.

    Note:
    Adding or removing Memcached cache nodes can be applied immediately or as a pending action. See ApplyImmediately.
    A pending action to modify the number of cache nodes in a cluster during its maintenance window, whether by adding or removing nodes in accordance with the scale out architecture, is not queued. The customer's latest request to add or remove nodes to the cluster overrides any previous pending actions to modify the number of cache nodes in the cluster. For example, a request to remove 2 nodes would override a previous pending action to remove 3 nodes. Similarly, a request to add 2 nodes would override a previous pending action to remove 3 nodes and vice versa. As Memcached cache nodes may now be provisioned in different Availability Zones with flexible cache node placement, a request to add nodes does not automatically override a previous pending action to add nodes. The customer can modify the previous pending action to add more nodes or explicitly cancel the pending request and retry the new request. To cancel pending actions to modify the number of cache nodes in a cluster, use the ModifyCacheCluster request and set NumCacheNodes equal to the number of cache nodes currently in the cache cluster.

    ", + "ModifyCacheClusterMessage$SnapshotRetentionLimit": "

    The number of days for which ElastiCache will retain automatic cache cluster snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days before being deleted.

    Important
    If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off.

    ", + "ModifyReplicationGroupMessage$SnapshotRetentionLimit": "

    The number of days for which ElastiCache will retain automatic node group snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days before being deleted.

    Important
    If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off.

    ", + "PendingModifiedValues$NumCacheNodes": "

    The new number of cache nodes for the cache cluster.

    For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 20.

    ", + "PurchaseReservedCacheNodesOfferingMessage$CacheNodeCount": "

    The number of cache node instances to reserve.

    Default: 1

    ", + "Snapshot$NumCacheNodes": "

    The number of cache nodes in the source cache cluster.

    For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 20.

    ", + "Snapshot$Port": "

    The port number used by each cache nodes in the source cache cluster.

    ", + "Snapshot$SnapshotRetentionLimit": "

    For an automatic snapshot, the number of days for which ElastiCache will retain the snapshot before deleting it.

    For manual snapshots, this field reflects the SnapshotRetentionLimit for the source cache cluster when the snapshot was created. This field is otherwise ignored: Manual snapshots do not expire, and can only be deleted using the DeleteSnapshot action.

    Important
    If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off.

    " + } + }, + "InvalidARNFault": { + "base": "

    The requested Amazon Resource Name (ARN) does not refer to an existing resource.

    ", + "refs": { + } + }, + "InvalidCacheClusterStateFault": { + "base": "

    The requested cache cluster is not in the available state.

    ", + "refs": { + } + }, + "InvalidCacheParameterGroupStateFault": { + "base": "

    The current state of the cache parameter group does not allow the requested action to occur.

    ", + "refs": { + } + }, + "InvalidCacheSecurityGroupStateFault": { + "base": "

    The current state of the cache security group does not allow deletion.

    ", + "refs": { + } + }, + "InvalidParameterCombinationException": { + "base": "

    Two or more incompatible parameters were specified.

    ", + "refs": { + } + }, + "InvalidParameterValueException": { + "base": "

    The value for a parameter is invalid.

    ", + "refs": { + } + }, + "InvalidReplicationGroupStateFault": { + "base": "

    The requested replication group is not in the available state.

    ", + "refs": { + } + }, + "InvalidSnapshotStateFault": { + "base": "

    The current state of the snapshot does not allow the requested action to occur.

    ", + "refs": { + } + }, + "InvalidSubnet": { + "base": "

    An invalid subnet identifier was specified.

    ", + "refs": { + } + }, + "InvalidVPCNetworkStateFault": { + "base": "

    The VPC network is in an invalid state.

    ", + "refs": { + } + }, + "KeyList": { + "base": null, + "refs": { + "RemoveTagsFromResourceMessage$TagKeys": "

    A list of TagKeys identifying the tags you want removed from the named resource. For example, TagKeys.member.1=Region removes the cost allocation tag with the key name Region from the resource named by the ResourceName parameter.

    " + } + }, + "ListTagsForResourceMessage": { + "base": "

    The input parameters for the ListTagsForResource action.

    ", + "refs": { + } + }, + "ModifyCacheClusterMessage": { + "base": "

    Represents the input of a ModifyCacheCluster action.

    ", + "refs": { + } + }, + "ModifyCacheParameterGroupMessage": { + "base": "

    Represents the input of a ModifyCacheParameterGroup action.

    ", + "refs": { + } + }, + "ModifyCacheSubnetGroupMessage": { + "base": "

    Represents the input of a ModifyCacheSubnetGroup action.

    ", + "refs": { + } + }, + "ModifyReplicationGroupMessage": { + "base": "

    Represents the input of a ModifyReplicationGroups action.

    ", + "refs": { + } + }, + "NodeGroup": { + "base": "

    Represents a collection of cache nodes in a replication group.

    ", + "refs": { + "NodeGroupList$member": null + } + }, + "NodeGroupList": { + "base": null, + "refs": { + "ReplicationGroup$NodeGroups": "

    A single element list with information about the nodes in the replication group.

    " + } + }, + "NodeGroupMember": { + "base": "

    Represents a single node within a node group.

    ", + "refs": { + "NodeGroupMemberList$member": null + } + }, + "NodeGroupMemberList": { + "base": null, + "refs": { + "NodeGroup$NodeGroupMembers": "

    A list containing information about individual nodes within the node group.

    " + } + }, + "NodeQuotaForClusterExceededFault": { + "base": "

    The request cannot be processed because it would exceed the allowed number of cache nodes in a single cache cluster.

    ", + "refs": { + } + }, + "NodeQuotaForCustomerExceededFault": { + "base": "

    The request cannot be processed because it would exceed the allowed number of cache nodes per customer.

    ", + "refs": { + } + }, + "NodeSnapshot": { + "base": "

    Represents an individual cache node in a snapshot of a cache cluster.

    ", + "refs": { + "NodeSnapshotList$member": null + } + }, + "NodeSnapshotList": { + "base": null, + "refs": { + "Snapshot$NodeSnapshots": "

    A list of the cache nodes in the source cache cluster.

    " + } + }, + "NotificationConfiguration": { + "base": "

    Describes a notification topic and its status. Notification topics are used for publishing ElastiCache events to subscribers using Amazon Simple Notification Service (SNS).

    ", + "refs": { + "CacheCluster$NotificationConfiguration": null + } + }, + "Parameter": { + "base": "

    Describes an individual setting that controls some aspect of ElastiCache behavior.

    ", + "refs": { + "ParametersList$member": null + } + }, + "ParameterNameValue": { + "base": "

    Describes a name-value pair that is used to update the value of a parameter.

    ", + "refs": { + "ParameterNameValueList$member": null + } + }, + "ParameterNameValueList": { + "base": null, + "refs": { + "ModifyCacheParameterGroupMessage$ParameterNameValues": "

    An array of parameter names and values for the parameter update. You must supply at least one parameter name and value; subsequent arguments are optional. A maximum of 20 parameters may be modified per request.

    ", + "ResetCacheParameterGroupMessage$ParameterNameValues": "

    An array of parameter names to be reset. If you are not resetting the entire cache parameter group, you must specify at least one parameter name.

    " + } + }, + "ParametersList": { + "base": null, + "refs": { + "CacheParameterGroupDetails$Parameters": "

    A list of Parameter instances.

    ", + "EngineDefaults$Parameters": "

    Contains a list of engine default parameters.

    " + } + }, + "PendingAutomaticFailoverStatus": { + "base": null, + "refs": { + "ReplicationGroupPendingModifiedValues$AutomaticFailoverStatus": "

    Indicates the status of Multi-AZ for this replication group.

    ElastiCache Multi-AZ replication groups are not supported on:

    • Redis versions earlier than 2.8.6.
    • T1 and T2 cache node types.
    " + } + }, + "PendingModifiedValues": { + "base": "

    A group of settings that will be applied to the cache cluster in the future, or that are currently being applied.

    ", + "refs": { + "CacheCluster$PendingModifiedValues": null + } + }, + "PreferredAvailabilityZoneList": { + "base": null, + "refs": { + "CreateCacheClusterMessage$PreferredAvailabilityZones": "

    A list of the Availability Zones in which cache nodes will be created. The order of the zones in the list is not important.

    This option is only supported on Memcached.

    If you are creating your cache cluster in an Amazon VPC (recommended) you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group.

    The number of Availability Zones listed must equal the value of NumCacheNodes.

    If you want all the nodes in the same Availability Zone, use PreferredAvailabilityZone instead, or repeat the Availability Zone multiple times in the list.

    Default: System chosen Availability Zones.

    Example: One Memcached node in each of three different Availability Zones: PreferredAvailabilityZones.member.1=us-west-2a&PreferredAvailabilityZones.member.2=us-west-2b&PreferredAvailabilityZones.member.3=us-west-2c

    Example: All three Memcached nodes in one Availability Zone: PreferredAvailabilityZones.member.1=us-west-2a&PreferredAvailabilityZones.member.2=us-west-2a&PreferredAvailabilityZones.member.3=us-west-2a

    ", + "ModifyCacheClusterMessage$NewAvailabilityZones": "

    The list of Availability Zones where the new Memcached cache nodes will be created.

    This parameter is only valid when NumCacheNodes in the request is greater than the sum of the number of active cache nodes and the number of cache nodes pending creation (which may be zero). The number of Availability Zones supplied in this list must match the cache nodes being added in this request.

    This option is only supported on Memcached clusters.

    Scenarios:

    • Scenario 1: You have 3 active nodes and wish to add 2 nodes.
      Specify NumCacheNodes=5 (3 + 2) and optionally specify two Availability Zones for the two new nodes.
    • Scenario 2: You have 3 active nodes and 2 nodes pending creation (from the scenario 1 call) and want to add 1 more node.
      Specify NumCacheNodes=6 ((3 + 2) + 1)
    • and optionally specify an Availability Zone for the new node.
    • Scenario 3: You want to cancel all pending actions.
      Specify NumCacheNodes=3 to cancel all pending actions.

    The Availability Zone placement of nodes pending creation cannot be modified. If you wish to cancel any nodes pending creation, add 0 nodes by setting NumCacheNodes to the number of current nodes.

    If cross-az is specified, existing Memcached nodes remain in their current Availability Zone. Only newly created nodes can be located in different Availability Zones. For guidance on how to move existing Memcached nodes to different Availability Zones, see the Availability Zone Considerations section of Cache Node Considerations for Memcached.

    Impact of new add/remove requests upon pending requests

    Scenarios Pending action New Request Results
    Scenario-1 Delete Delete The new delete, pending or immediate, replaces the pending delete.
    Scenario-2 Delete Create The new create, pending or immediate, replaces the pending delete.
    Scenario-3 Create Delete The new delete, pending or immediate, replaces the pending create.
    Scenario-4 Create Create The new create is added to the pending create.
    Important:
    If the new create request is Apply Immediately - Yes, all creates are performed immediately. If the new create request is Apply Immediately - No, all creates are pending.

    Example: NewAvailabilityZones.member.1=us-west-2a&NewAvailabilityZones.member.2=us-west-2b&NewAvailabilityZones.member.3=us-west-2c

    " + } + }, + "PurchaseReservedCacheNodesOfferingMessage": { + "base": "

    Represents the input of a PurchaseReservedCacheNodesOffering action.

    ", + "refs": { + } + }, + "RebootCacheClusterMessage": { + "base": "

    Represents the input of a RebootCacheCluster action.

    ", + "refs": { + } + }, + "RecurringCharge": { + "base": "

    Contains the specific price and frequency of a recurring charges for a reserved cache node, or for a reserved cache node offering.

    ", + "refs": { + "RecurringChargeList$member": null + } + }, + "RecurringChargeList": { + "base": null, + "refs": { + "ReservedCacheNode$RecurringCharges": "

    The recurring price charged to run this reserved cache node.

    ", + "ReservedCacheNodesOffering$RecurringCharges": "

    The recurring price charged to run this reserved cache node.

    " + } + }, + "RemoveTagsFromResourceMessage": { + "base": "

    Represents the input of a RemoveTagsFromResource action.

    ", + "refs": { + } + }, + "ReplicationGroup": { + "base": "

    Contains all of the attributes of a specific replication group.

    ", + "refs": { + "ReplicationGroupList$member": null, + "CreateReplicationGroupResult$ReplicationGroup": null, + "DeleteReplicationGroupResult$ReplicationGroup": null, + "ModifyReplicationGroupResult$ReplicationGroup": null + } + }, + "ReplicationGroupAlreadyExistsFault": { + "base": "

    The specified replication group already exists.

    ", + "refs": { + } + }, + "ReplicationGroupList": { + "base": null, + "refs": { + "ReplicationGroupMessage$ReplicationGroups": "

    A list of replication groups. Each item in the list contains detailed information about one replication group.

    " + } + }, + "ReplicationGroupMessage": { + "base": "

    Represents the output of a DescribeReplicationGroups action.

    ", + "refs": { + } + }, + "ReplicationGroupNotFoundFault": { + "base": "

    The specified replication group does not exist.

    ", + "refs": { + } + }, + "ReplicationGroupPendingModifiedValues": { + "base": "

    The settings to be applied to the replication group, either immediately or during the next maintenance window.

    ", + "refs": { + "ReplicationGroup$PendingModifiedValues": "

    A group of settings to be applied to the replication group, either immediately or during the next maintenance window.

    " + } + }, + "ReservedCacheNode": { + "base": "

    Represents the output of a PurchaseReservedCacheNodesOffering action.

    ", + "refs": { + "ReservedCacheNodeList$member": null, + "PurchaseReservedCacheNodesOfferingResult$ReservedCacheNode": null + } + }, + "ReservedCacheNodeAlreadyExistsFault": { + "base": "

    You already have a reservation with the given identifier.

    ", + "refs": { + } + }, + "ReservedCacheNodeList": { + "base": null, + "refs": { + "ReservedCacheNodeMessage$ReservedCacheNodes": "

    A list of reserved cache nodes. Each element in the list contains detailed information about one node.

    " + } + }, + "ReservedCacheNodeMessage": { + "base": "

    Represents the output of a DescribeReservedCacheNodes action.

    ", + "refs": { + } + }, + "ReservedCacheNodeNotFoundFault": { + "base": "

    The requested reserved cache node was not found.

    ", + "refs": { + } + }, + "ReservedCacheNodeQuotaExceededFault": { + "base": "

    The request cannot be processed because it would exceed the user's cache node quota.

    ", + "refs": { + } + }, + "ReservedCacheNodesOffering": { + "base": "

    Describes all of the attributes of a reserved cache node offering.

    ", + "refs": { + "ReservedCacheNodesOfferingList$member": null + } + }, + "ReservedCacheNodesOfferingList": { + "base": null, + "refs": { + "ReservedCacheNodesOfferingMessage$ReservedCacheNodesOfferings": "

    A list of reserved cache node offerings. Each element in the list contains detailed information about one offering.

    " + } + }, + "ReservedCacheNodesOfferingMessage": { + "base": "

    Represents the output of a DescribeReservedCacheNodesOfferings action.

    ", + "refs": { + } + }, + "ReservedCacheNodesOfferingNotFoundFault": { + "base": "

    The requested cache node offering does not exist.

    ", + "refs": { + } + }, + "ResetCacheParameterGroupMessage": { + "base": "

    Represents the input of a ResetCacheParameterGroup action.

    ", + "refs": { + } + }, + "RevokeCacheSecurityGroupIngressMessage": { + "base": "

    Represents the input of a RevokeCacheSecurityGroupIngress action.

    ", + "refs": { + } + }, + "SecurityGroupIdsList": { + "base": null, + "refs": { + "CreateCacheClusterMessage$SecurityGroupIds": "

    One or more VPC security groups associated with the cache cluster.

    Use this parameter only when you are creating a cache cluster in an Amazon Virtual Private Cloud (VPC).

    ", + "CreateReplicationGroupMessage$SecurityGroupIds": "

    One or more Amazon VPC security groups associated with this replication group.

    Use this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud (VPC).

    ", + "ModifyCacheClusterMessage$SecurityGroupIds": "

    Specifies the VPC Security Groups associated with the cache cluster.

    This parameter can be used only with clusters that are created in an Amazon Virtual Private Cloud (VPC).

    ", + "ModifyReplicationGroupMessage$SecurityGroupIds": "

    Specifies the VPC Security Groups associated with the cache clusters in the replication group.

    This parameter can be used only with replication group containing cache clusters running in an Amazon Virtual Private Cloud (VPC).

    " + } + }, + "SecurityGroupMembership": { + "base": "

    Represents a single cache security group and its status.

    ", + "refs": { + "SecurityGroupMembershipList$member": null + } + }, + "SecurityGroupMembershipList": { + "base": null, + "refs": { + "CacheCluster$SecurityGroups": "

    A list of VPC Security Groups associated with the cache cluster.

    " + } + }, + "Snapshot": { + "base": "

    Represents a copy of an entire cache cluster as of the time when the snapshot was taken.

    ", + "refs": { + "SnapshotList$member": null, + "CopySnapshotResult$Snapshot": null, + "CreateSnapshotResult$Snapshot": null, + "DeleteSnapshotResult$Snapshot": null + } + }, + "SnapshotAlreadyExistsFault": { + "base": "

    You already have a snapshot with the given name.

    ", + "refs": { + } + }, + "SnapshotArnsList": { + "base": null, + "refs": { + "CreateCacheClusterMessage$SnapshotArns": "

    A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot file will be used to populate the node group. The Amazon S3 object name in the ARN cannot contain any commas.

    Note: This parameter is only valid if the Engine parameter is redis.

    Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb

    ", + "CreateReplicationGroupMessage$SnapshotArns": "

    A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot file will be used to populate the node group. The Amazon S3 object name in the ARN cannot contain any commas.

    Note: This parameter is only valid if the Engine parameter is redis.

    Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb

    " + } + }, + "SnapshotFeatureNotSupportedFault": { + "base": "

    You attempted one of the following actions:

    • Creating a snapshot of a Redis cache cluster running on a t1.micro cache node.

    • Creating a snapshot of a cache cluster that is running Memcached rather than Redis.

    Neither of these are supported by ElastiCache.

    ", + "refs": { + } + }, + "SnapshotList": { + "base": null, + "refs": { + "DescribeSnapshotsListMessage$Snapshots": "

    A list of snapshots. Each item in the list contains detailed information about one snapshot.

    " + } + }, + "SnapshotNotFoundFault": { + "base": "

    The requested snapshot name does not refer to an existing snapshot.

    ", + "refs": { + } + }, + "SnapshotQuotaExceededFault": { + "base": "

    The request cannot be processed because it would exceed the maximum number of snapshots.

    ", + "refs": { + } + }, + "SourceType": { + "base": null, + "refs": { + "DescribeEventsMessage$SourceType": "

    The event source to retrieve events for. If no value is specified, all events are returned.

    Valid values are: cache-cluster | cache-parameter-group | cache-security-group | cache-subnet-group

    ", + "Event$SourceType": "

    Specifies the origin of this event - a cache cluster, a parameter group, a security group, etc.

    " + } + }, + "String": { + "base": null, + "refs": { + "AddTagsToResourceMessage$ResourceName": "

    The name of the resource to which the tags are to be added, for example arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster.

    ", + "AuthorizeCacheSecurityGroupIngressMessage$CacheSecurityGroupName": "

    The cache security group which will allow network ingress.

    ", + "AuthorizeCacheSecurityGroupIngressMessage$EC2SecurityGroupName": "

    The Amazon EC2 security group to be authorized for ingress to the cache security group.

    ", + "AuthorizeCacheSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "

    The AWS account number of the Amazon EC2 security group owner. Note that this is not the same thing as an AWS access key ID - you must provide a valid AWS account number for this parameter.

    ", + "AvailabilityZone$Name": "

    The name of the Availability Zone.

    ", + "AvailabilityZonesList$member": null, + "CacheCluster$CacheClusterId": "

    The user-supplied identifier of the cache cluster. This identifier is a unique key that identifies a cache cluster.

    ", + "CacheCluster$ClientDownloadLandingPage": "

    The URL of the web page where you can download the latest ElastiCache client library.

    ", + "CacheCluster$CacheNodeType": "

    The name of the compute and memory capacity node type for the cache cluster.

    Valid node types are as follows:

    • General purpose:
      • Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge
      • Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge
    • Compute optimized: cache.c1.xlarge
    • Memory optimized
      • Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge
      • Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

    Notes:

    • All t2 instances are created in an Amazon Virtual Private Cloud (VPC).
    • Redis backup/restore is not supported for t2 instances.
    • Redis Append-only files (AOF) functionality is not supported for t1 or t2 instances.

    For a complete listing of cache node types and specifications, see Amazon ElastiCache Product Features and Details and Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis.

    ", + "CacheCluster$Engine": "

    The name of the cache engine (memcached or redis) to be used for this cache cluster.

    ", + "CacheCluster$EngineVersion": "

    The version of the cache engine version that is used in this cache cluster.

    ", + "CacheCluster$CacheClusterStatus": "

    The current state of this cache cluster, one of the following values: available, creating, deleted, deleting, incompatible-network, modifying, rebooting cache cluster nodes, restore-failed, or snapshotting.

    ", + "CacheCluster$PreferredAvailabilityZone": "

    The name of the Availability Zone in which the cache cluster is located or \"Multiple\" if the cache nodes are located in different Availability Zones.

    ", + "CacheCluster$PreferredMaintenanceWindow": "

    Specifies the weekly time range during which maintenance on the cache cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are:

    • sun
    • mon
    • tue
    • wed
    • thu
    • fri
    • sat

    Example: sun:05:00-sun:09:00

    ", + "CacheCluster$CacheSubnetGroupName": "

    The name of the cache subnet group associated with the cache cluster.

    ", + "CacheCluster$ReplicationGroupId": "

    The replication group to which this cache cluster belongs. If this field is empty, the cache cluster is not associated with any replication group.

    ", + "CacheCluster$SnapshotWindow": "

    The daily time range (in UTC) during which ElastiCache will begin taking a daily snapshot of your cache cluster.

    Example: 05:00-09:00

    ", + "CacheClusterMessage$Marker": "

    Provides an identifier to allow retrieval of paginated results.

    ", + "CacheEngineVersion$Engine": "

    The name of the cache engine.

    ", + "CacheEngineVersion$EngineVersion": "

    The version number of the cache engine.

    ", + "CacheEngineVersion$CacheParameterGroupFamily": "

    The name of the cache parameter group family associated with this cache engine.

    ", + "CacheEngineVersion$CacheEngineDescription": "

    The description of the cache engine.

    ", + "CacheEngineVersion$CacheEngineVersionDescription": "

    The description of the cache engine version.

    ", + "CacheEngineVersionMessage$Marker": "

    Provides an identifier to allow retrieval of paginated results.

    ", + "CacheNode$CacheNodeId": "

    The cache node identifier. A node ID is a numeric identifier (0001, 0002, etc.). The combination of cluster ID and node ID uniquely identifies every cache node used in a customer's AWS account.

    ", + "CacheNode$CacheNodeStatus": "

    The current state of this cache node.

    ", + "CacheNode$ParameterGroupStatus": "

    The status of the parameter group applied to this cache node.

    ", + "CacheNode$SourceCacheNodeId": "

    The ID of the primary node to which this read replica node is synchronized. If this field is empty, then this node is not associated with a primary cache cluster.

    ", + "CacheNode$CustomerAvailabilityZone": "

    The Availability Zone where this node was created and now resides.

    ", + "CacheNodeIdsList$member": null, + "CacheNodeTypeSpecificParameter$ParameterName": "

    The name of the parameter.

    ", + "CacheNodeTypeSpecificParameter$Description": "

    A description of the parameter.

    ", + "CacheNodeTypeSpecificParameter$Source": "

    The source of the parameter value.

    ", + "CacheNodeTypeSpecificParameter$DataType": "

    The valid data type for the parameter.

    ", + "CacheNodeTypeSpecificParameter$AllowedValues": "

    The valid range of values for the parameter.

    ", + "CacheNodeTypeSpecificParameter$MinimumEngineVersion": "

    The earliest cache engine version to which the parameter can apply.

    ", + "CacheNodeTypeSpecificValue$CacheNodeType": "

    The cache node type for which this value applies.

    ", + "CacheNodeTypeSpecificValue$Value": "

    The value for the cache node type.

    ", + "CacheParameterGroup$CacheParameterGroupName": "

    The name of the cache parameter group.

    ", + "CacheParameterGroup$CacheParameterGroupFamily": "

    The name of the cache parameter group family that this cache parameter group is compatible with.

    ", + "CacheParameterGroup$Description": "

    The description for this cache parameter group.

    ", + "CacheParameterGroupDetails$Marker": "

    Provides an identifier to allow retrieval of paginated results.

    ", + "CacheParameterGroupNameMessage$CacheParameterGroupName": "

    The name of the cache parameter group.

    ", + "CacheParameterGroupStatus$CacheParameterGroupName": "

    The name of the cache parameter group.

    ", + "CacheParameterGroupStatus$ParameterApplyStatus": "

    The status of parameter updates.

    ", + "CacheParameterGroupsMessage$Marker": "

    Provides an identifier to allow retrieval of paginated results.

    ", + "CacheSecurityGroup$OwnerId": "

    The AWS account ID of the cache security group owner.

    ", + "CacheSecurityGroup$CacheSecurityGroupName": "

    The name of the cache security group.

    ", + "CacheSecurityGroup$Description": "

    The description of the cache security group.

    ", + "CacheSecurityGroupMembership$CacheSecurityGroupName": "

    The name of the cache security group.

    ", + "CacheSecurityGroupMembership$Status": "

    The membership status in the cache security group. The status changes when a cache security group is modified, or when the cache security groups assigned to a cache cluster are modified.

    ", + "CacheSecurityGroupMessage$Marker": "

    Provides an identifier to allow retrieval of paginated results.

    ", + "CacheSecurityGroupNameList$member": null, + "CacheSubnetGroup$CacheSubnetGroupName": "

    The name of the cache subnet group.

    ", + "CacheSubnetGroup$CacheSubnetGroupDescription": "

    The description of the cache subnet group.

    ", + "CacheSubnetGroup$VpcId": "

    The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet group.

    ", + "CacheSubnetGroupMessage$Marker": "

    Provides an identifier to allow retrieval of paginated results.

    ", + "ClusterIdList$member": null, + "CopySnapshotMessage$SourceSnapshotName": "

    The name of an existing snapshot from which to copy.

    ", + "CopySnapshotMessage$TargetSnapshotName": "

    A name for the copied snapshot.

    ", + "CreateCacheClusterMessage$CacheClusterId": "

    The node group identifier. This parameter is stored as a lowercase string.

    Constraints:

    • A name must contain from 1 to 20 alphanumeric characters or hyphens.
    • The first character must be a letter.
    • A name cannot end with a hyphen or contain two consecutive hyphens.
    ", + "CreateCacheClusterMessage$ReplicationGroupId": "

    The ID of the replication group to which this cache cluster should belong. If this parameter is specified, the cache cluster will be added to the specified replication group as a read replica; otherwise, the cache cluster will be a standalone primary that is not part of any replication group.

    If the specified replication group is Multi-AZ enabled and the availability zone is not specified, the cache cluster will be created in availability zones that provide the best spread of read replicas across availability zones.

    Note: This parameter is only valid if the Engine parameter is redis.

    ", + "CreateCacheClusterMessage$PreferredAvailabilityZone": "

    The EC2 Availability Zone in which the cache cluster will be created.

    All nodes belonging to this Memcached cache cluster are placed in the preferred Availability Zone. If you want to create your nodes across multiple Availability Zones, use PreferredAvailabilityZones.

    Default: System chosen Availability Zone.

    ", + "CreateCacheClusterMessage$CacheNodeType": "

    The compute and memory capacity of the nodes in the node group.

    Valid node types are as follows:

    • General purpose:
      • Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge
      • Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge
    • Compute optimized: cache.c1.xlarge
    • Memory optimized
      • Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge
      • Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

    Notes:

    • All t2 instances are created in an Amazon Virtual Private Cloud (VPC).
    • Redis backup/restore is not supported for t2 instances.
    • Redis Append-only files (AOF) functionality is not supported for t1 or t2 instances.

    For a complete listing of cache node types and specifications, see Amazon ElastiCache Product Features and Details and Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis.

    ", + "CreateCacheClusterMessage$Engine": "

    The name of the cache engine to be used for this cache cluster.

    Valid values for this parameter are:

    memcached | redis

    ", + "CreateCacheClusterMessage$EngineVersion": "

    The version number of the cache engine to be used for this cache cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions action.

    ", + "CreateCacheClusterMessage$CacheParameterGroupName": "

    The name of the parameter group to associate with this cache cluster. If this argument is omitted, the default parameter group for the specified engine is used.

    ", + "CreateCacheClusterMessage$CacheSubnetGroupName": "

    The name of the subnet group to be used for the cache cluster.

    Use this parameter only when you are creating a cache cluster in an Amazon Virtual Private Cloud (VPC).

    ", + "CreateCacheClusterMessage$SnapshotName": "

    The name of a snapshot from which to restore data into the new node group. The snapshot status changes to restoring while the new node group is being created.

    Note: This parameter is only valid if the Engine parameter is redis.

    ", + "CreateCacheClusterMessage$PreferredMaintenanceWindow": "

    Specifies the weekly time range during which maintenance on the cache cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are:

    • sun
    • mon
    • tue
    • wed
    • thu
    • fri
    • sat

    Example: sun:05:00-sun:09:00

    ", + "CreateCacheClusterMessage$NotificationTopicArn": "

    The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications will be sent.

    The Amazon SNS topic owner must be the same as the cache cluster owner.", + "CreateCacheClusterMessage$SnapshotWindow": "

    The daily time range (in UTC) during which ElastiCache will begin taking a daily snapshot of your node group.

    Example: 05:00-09:00

    If you do not specify this parameter, then ElastiCache will automatically choose an appropriate time range.

    Note: This parameter is only valid if the Engine parameter is redis.

    ", + "CreateCacheParameterGroupMessage$CacheParameterGroupName": "

    A user-specified name for the cache parameter group.

    ", + "CreateCacheParameterGroupMessage$CacheParameterGroupFamily": "

    The name of the cache parameter group family the cache parameter group can be used with.

    Valid values are: memcached1.4 | redis2.6 | redis2.8

    ", + "CreateCacheParameterGroupMessage$Description": "

    A user-specified description for the cache parameter group.

    ", + "CreateCacheSecurityGroupMessage$CacheSecurityGroupName": "

    A name for the cache security group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters. Cannot be the word \"Default\".

    Example: mysecuritygroup

    ", + "CreateCacheSecurityGroupMessage$Description": "

    A description for the cache security group.

    ", + "CreateCacheSubnetGroupMessage$CacheSubnetGroupName": "

    A name for the cache subnet group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters or hyphens.

    Example: mysubnetgroup

    ", + "CreateCacheSubnetGroupMessage$CacheSubnetGroupDescription": "

    A description for the cache subnet group.

    ", + "CreateReplicationGroupMessage$ReplicationGroupId": "

    The replication group identifier. This parameter is stored as a lowercase string.

    Constraints:

    • A name must contain from 1 to 20 alphanumeric characters or hyphens.
    • The first character must be a letter.
    • A name cannot end with a hyphen or contain two consecutive hyphens.
    ", + "CreateReplicationGroupMessage$ReplicationGroupDescription": "

    A user-created description for the replication group.

    ", + "CreateReplicationGroupMessage$PrimaryClusterId": "

    The identifier of the cache cluster that will serve as the primary for this replication group. This cache cluster must already exist and have a status of available.

    This parameter is not required if NumCacheClusters is specified.

    ", + "CreateReplicationGroupMessage$CacheNodeType": "

    The compute and memory capacity of the nodes in the node group.

    Valid node types are as follows:

    • General purpose:
      • Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge
      • Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge
    • Compute optimized: cache.c1.xlarge
    • Memory optimized
      • Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge
      • Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

    Notes:

    • All t2 instances are created in an Amazon Virtual Private Cloud (VPC).
    • Redis backup/restore is not supported for t2 instances.
    • Redis Append-only files (AOF) functionality is not supported for t1 or t2 instances.

    For a complete listing of cache node types and specifications, see Amazon ElastiCache Product Features and Details and Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis.

    ", + "CreateReplicationGroupMessage$Engine": "

    The name of the cache engine to be used for the cache clusters in this replication group.

    Default: redis

    ", + "CreateReplicationGroupMessage$EngineVersion": "

    The version number of the cache engine to be used for the cache clusters in this replication group. To view the supported cache engine versions, use the DescribeCacheEngineVersions action.

    ", + "CreateReplicationGroupMessage$CacheParameterGroupName": "

    The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.

    ", + "CreateReplicationGroupMessage$CacheSubnetGroupName": "

    The name of the cache subnet group to be used for the replication group.

    ", + "CreateReplicationGroupMessage$SnapshotName": "

    The name of a snapshot from which to restore data into the new node group. The snapshot status changes to restoring while the new node group is being created.

    Note: This parameter is only valid if the Engine parameter is redis.

    ", + "CreateReplicationGroupMessage$PreferredMaintenanceWindow": "

    Specifies the weekly time range during which maintenance on the cache cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are:

    • sun
    • mon
    • tue
    • wed
    • thu
    • fri
    • sat

    Example: sun:05:00-sun:09:00

    ", + "CreateReplicationGroupMessage$NotificationTopicArn": "

    The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications will be sent.

    The Amazon SNS topic owner must be the same as the cache cluster owner.", + "CreateReplicationGroupMessage$SnapshotWindow": "

    The daily time range (in UTC) during which ElastiCache will begin taking a daily snapshot of your node group.

    Example: 05:00-09:00

    If you do not specify this parameter, then ElastiCache will automatically choose an appropriate time range.

    Note: This parameter is only valid if the Engine parameter is redis.

    ", + "CreateSnapshotMessage$CacheClusterId": "

    The identifier of an existing cache cluster. The snapshot will be created from this cache cluster.

    ", + "CreateSnapshotMessage$SnapshotName": "

    A name for the snapshot being created.

    ", + "DeleteCacheClusterMessage$CacheClusterId": "

    The cache cluster identifier for the cluster to be deleted. This parameter is not case sensitive.

    ", + "DeleteCacheClusterMessage$FinalSnapshotIdentifier": "

    The user-supplied name of a final cache cluster snapshot. This is the unique name that identifies the snapshot. ElastiCache creates the snapshot, and then deletes the cache cluster immediately afterward.

    ", + "DeleteCacheParameterGroupMessage$CacheParameterGroupName": "

    The name of the cache parameter group to delete.

    The specified cache security group must not be associated with any cache clusters.", + "DeleteCacheSecurityGroupMessage$CacheSecurityGroupName": "

    The name of the cache security group to delete.

    You cannot delete the default security group.", + "DeleteCacheSubnetGroupMessage$CacheSubnetGroupName": "

    The name of the cache subnet group to delete.

    Constraints: Must contain no more than 255 alphanumeric characters or hyphens.

    ", + "DeleteReplicationGroupMessage$ReplicationGroupId": "

    The identifier for the cluster to be deleted. This parameter is not case sensitive.

    ", + "DeleteReplicationGroupMessage$FinalSnapshotIdentifier": "

    The name of a final node group snapshot. ElastiCache creates the snapshot from the primary node in the cluster, rather than one of the replicas; this is to ensure that it captures the freshest data. After the final snapshot is taken, the cluster is immediately deleted.

    ", + "DeleteSnapshotMessage$SnapshotName": "

    The name of the snapshot to be deleted.

    ", + "DescribeCacheClustersMessage$CacheClusterId": "

    The user-supplied cluster identifier. If this parameter is specified, only information about that specific cache cluster is returned. This parameter isn't case sensitive.

    ", + "DescribeCacheClustersMessage$Marker": "

    An optional marker returned from a prior request. Use this marker for pagination of results from this action. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeCacheEngineVersionsMessage$Engine": "

    The cache engine to return. Valid values: memcached | redis

    ", + "DescribeCacheEngineVersionsMessage$EngineVersion": "

    The cache engine version to return.

    Example: 1.4.14

    ", + "DescribeCacheEngineVersionsMessage$CacheParameterGroupFamily": "

    The name of a specific cache parameter group family to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeCacheEngineVersionsMessage$Marker": "

    An optional marker returned from a prior request. Use this marker for pagination of results from this action. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeCacheParameterGroupsMessage$CacheParameterGroupName": "

    The name of a specific cache parameter group to return details for.

    ", + "DescribeCacheParameterGroupsMessage$Marker": "

    An optional marker returned from a prior request. Use this marker for pagination of results from this action. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeCacheParametersMessage$CacheParameterGroupName": "

    The name of a specific cache parameter group to return details for.

    ", + "DescribeCacheParametersMessage$Source": "

    The parameter types to return.

    Valid values: user | system | engine-default

    ", + "DescribeCacheParametersMessage$Marker": "

    An optional marker returned from a prior request. Use this marker for pagination of results from this action. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeCacheSecurityGroupsMessage$CacheSecurityGroupName": "

    The name of the cache security group to return details for.

    ", + "DescribeCacheSecurityGroupsMessage$Marker": "

    An optional marker returned from a prior request. Use this marker for pagination of results from this action. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeCacheSubnetGroupsMessage$CacheSubnetGroupName": "

    The name of the cache subnet group to return details for.

    ", + "DescribeCacheSubnetGroupsMessage$Marker": "

    An optional marker returned from a prior request. Use this marker for pagination of results from this action. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEngineDefaultParametersMessage$CacheParameterGroupFamily": "

    The name of the cache parameter group family. Valid values are: memcached1.4 | redis2.6 | redis2.8

    ", + "DescribeEngineDefaultParametersMessage$Marker": "

    An optional marker returned from a prior request. Use this marker for pagination of results from this action. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEventsMessage$SourceIdentifier": "

    The identifier of the event source for which events will be returned. If not specified, then all sources are included in the response.

    ", + "DescribeEventsMessage$Marker": "

    An optional marker returned from a prior request. Use this marker for pagination of results from this action. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeReplicationGroupsMessage$ReplicationGroupId": "

    The identifier for the replication group to be described. This parameter is not case sensitive.

    If you do not specify this parameter, information about all replication groups is returned.

    ", + "DescribeReplicationGroupsMessage$Marker": "

    An optional marker returned from a prior request. Use this marker for pagination of results from this action. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeReservedCacheNodesMessage$ReservedCacheNodeId": "

    The reserved cache node identifier filter value. Use this parameter to show only the reservation that matches the specified reservation ID.

    ", + "DescribeReservedCacheNodesMessage$ReservedCacheNodesOfferingId": "

    The offering identifier filter value. Use this parameter to show only purchased reservations matching the specified offering identifier.

    ", + "DescribeReservedCacheNodesMessage$CacheNodeType": "

    The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type.

    Valid node types are as follows:

    • General purpose:
      • Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge
      • Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge
    • Compute optimized: cache.c1.xlarge
    • Memory optimized
      • Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge
      • Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

    Notes:

    • All t2 instances are created in an Amazon Virtual Private Cloud (VPC).
    • Redis backup/restore is not supported for t2 instances.
    • Redis Append-only files (AOF) functionality is not supported for t1 or t2 instances.

    For a complete listing of cache node types and specifications, see Amazon ElastiCache Product Features and Details and Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis.

    ", + "DescribeReservedCacheNodesMessage$Duration": "

    The duration filter value, specified in years or seconds. Use this parameter to show only reservations for this duration.

    Valid Values: 1 | 3 | 31536000 | 94608000

    ", + "DescribeReservedCacheNodesMessage$ProductDescription": "

    The product description filter value. Use this parameter to show only those reservations matching the specified product description.

    ", + "DescribeReservedCacheNodesMessage$OfferingType": "

    The offering type filter value. Use this parameter to show only the available offerings matching the specified offering type.

    Valid values: \"Light Utilization\"|\"Medium Utilization\"|\"Heavy Utilization\"

    ", + "DescribeReservedCacheNodesMessage$Marker": "

    An optional marker returned from a prior request. Use this marker for pagination of results from this action. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeReservedCacheNodesOfferingsMessage$ReservedCacheNodesOfferingId": "

    The offering identifier filter value. Use this parameter to show only the available offering that matches the specified reservation identifier.

    Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706

    ", + "DescribeReservedCacheNodesOfferingsMessage$CacheNodeType": "

    The cache node type filter value. Use this parameter to show only the available offerings matching the specified cache node type.

    Valid node types are as follows:

    • General purpose:
      • Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge
      • Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge
    • Compute optimized: cache.c1.xlarge
    • Memory optimized
      • Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge
      • Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

    Notes:

    • All t2 instances are created in an Amazon Virtual Private Cloud (VPC).
    • Redis backup/restore is not supported for t2 instances.
    • Redis Append-only files (AOF) functionality is not supported for t1 or t2 instances.

    For a complete listing of cache node types and specifications, see Amazon ElastiCache Product Features and Details and Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis.

    ", + "DescribeReservedCacheNodesOfferingsMessage$Duration": "

    Duration filter value, specified in years or seconds. Use this parameter to show only reservations for a given duration.

    Valid Values: 1 | 3 | 31536000 | 94608000

    ", + "DescribeReservedCacheNodesOfferingsMessage$ProductDescription": "

    The product description filter value. Use this parameter to show only the available offerings matching the specified product description.

    ", + "DescribeReservedCacheNodesOfferingsMessage$OfferingType": "

    The offering type filter value. Use this parameter to show only the available offerings matching the specified offering type.

    Valid Values: \"Light Utilization\"|\"Medium Utilization\"|\"Heavy Utilization\"

    ", + "DescribeReservedCacheNodesOfferingsMessage$Marker": "

    An optional marker returned from a prior request. Use this marker for pagination of results from this action. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeSnapshotsListMessage$Marker": "

    An optional marker returned from a prior request. Use this marker for pagination of results from this action. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeSnapshotsMessage$CacheClusterId": "

    A user-supplied cluster identifier. If this parameter is specified, only snapshots associated with that specific cache cluster will be described.

    ", + "DescribeSnapshotsMessage$SnapshotName": "

    A user-supplied name of the snapshot. If this parameter is specified, only this snapshot will be described.

    ", + "DescribeSnapshotsMessage$SnapshotSource": "

    If set to system, the output shows snapshots that were automatically created by ElastiCache. If set to user the output shows snapshots that were manually created. If omitted, the output shows both automatically and manually created snapshots.

    ", + "DescribeSnapshotsMessage$Marker": "

    An optional marker returned from a prior request. Use this marker for pagination of results from this action. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "EC2SecurityGroup$Status": "

    The status of the Amazon EC2 security group.

    ", + "EC2SecurityGroup$EC2SecurityGroupName": "

    The name of the Amazon EC2 security group.

    ", + "EC2SecurityGroup$EC2SecurityGroupOwnerId": "

    The AWS account ID of the Amazon EC2 security group owner.

    ", + "Endpoint$Address": "

    The DNS hostname of the cache node.

    ", + "EngineDefaults$CacheParameterGroupFamily": "

    Specifies the name of the cache parameter group family to which the engine default parameters apply.

    ", + "EngineDefaults$Marker": "

    Provides an identifier to allow retrieval of paginated results.

    ", + "Event$SourceIdentifier": "

    The identifier for the source of the event. For example, if the event occurred at the cache cluster level, the identifier would be the name of the cache cluster.

    ", + "Event$Message": "

    The text of the event.

    ", + "EventsMessage$Marker": "

    Provides an identifier to allow retrieval of paginated results.

    ", + "KeyList$member": null, + "ListTagsForResourceMessage$ResourceName": "

    The name of the resource for which you want the list of tags, for example arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster.

    ", + "ModifyCacheClusterMessage$CacheClusterId": "

    The cache cluster identifier. This value is stored as a lowercase string.

    ", + "ModifyCacheClusterMessage$PreferredMaintenanceWindow": "

    Specifies the weekly time range during which maintenance on the cache cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are:

    • sun
    • mon
    • tue
    • wed
    • thu
    • fri
    • sat

    Example: sun:05:00-sun:09:00

    ", + "ModifyCacheClusterMessage$NotificationTopicArn": "

    The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications will be sent.

    The Amazon SNS topic owner must be same as the cache cluster owner. ", + "ModifyCacheClusterMessage$CacheParameterGroupName": "

    The name of the cache parameter group to apply to this cache cluster. This change is asynchronously applied as soon as possible for parameters when the ApplyImmediately parameter is specified as true for this request.

    ", + "ModifyCacheClusterMessage$NotificationTopicStatus": "

    The status of the Amazon SNS notification topic. Notifications are sent only if the status is active.

    Valid values: active | inactive

    ", + "ModifyCacheClusterMessage$EngineVersion": "

    The upgraded version of the cache engine to be run on the cache nodes.

    ", + "ModifyCacheClusterMessage$SnapshotWindow": "

    The daily time range (in UTC) during which ElastiCache will begin taking a daily snapshot of your cache cluster.

    ", + "ModifyCacheParameterGroupMessage$CacheParameterGroupName": "

    The name of the cache parameter group to modify.

    ", + "ModifyCacheSubnetGroupMessage$CacheSubnetGroupName": "

    The name for the cache subnet group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters or hyphens.

    Example: mysubnetgroup

    ", + "ModifyCacheSubnetGroupMessage$CacheSubnetGroupDescription": "

    A description for the cache subnet group.

    ", + "ModifyReplicationGroupMessage$ReplicationGroupId": "

    The identifier of the replication group to modify.

    ", + "ModifyReplicationGroupMessage$ReplicationGroupDescription": "

    A description for the replication group. Maximum length is 255 characters.

    ", + "ModifyReplicationGroupMessage$PrimaryClusterId": "

    If this parameter is specified, ElastiCache will promote each of the cache clusters in the specified replication group to the primary role. The nodes of all other cache clusters in the replication group will be read replicas.

    ", + "ModifyReplicationGroupMessage$SnapshottingClusterId": "

    The cache cluster ID that will be used as the daily snapshot source for the replication group.

    ", + "ModifyReplicationGroupMessage$PreferredMaintenanceWindow": "

    Specifies the weekly time range during which maintenance on the cache cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are:

    • sun
    • mon
    • tue
    • wed
    • thu
    • fri
    • sat

    Example: sun:05:00-sun:09:00

    ", + "ModifyReplicationGroupMessage$NotificationTopicArn": "

    The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications will be sent.

    The Amazon SNS topic owner must be same as the replication group owner. ", + "ModifyReplicationGroupMessage$CacheParameterGroupName": "

    The name of the cache parameter group to apply to all of the clusters in this replication group. This change is asynchronously applied as soon as possible for parameters when the ApplyImmediately parameter is specified as true for this request.

    ", + "ModifyReplicationGroupMessage$NotificationTopicStatus": "

    The status of the Amazon SNS notification topic for the replication group. Notifications are sent only if the status is active.

    Valid values: active | inactive

    ", + "ModifyReplicationGroupMessage$EngineVersion": "

    The upgraded version of the cache engine to be run on the cache clusters in the replication group.

    ", + "ModifyReplicationGroupMessage$SnapshotWindow": "

    The daily time range (in UTC) during which ElastiCache will begin taking a daily snapshot of the node group specified by SnapshottingClusterId.

    Example: 05:00-09:00

    If you do not specify this parameter, then ElastiCache will automatically choose an appropriate time range.

    ", + "NodeGroup$NodeGroupId": "

    The identifier for the node group. A replication group contains only one node group; therefore, the node group ID is 0001.

    ", + "NodeGroup$Status": "

    The current state of this replication group - creating, available, etc.

    ", + "NodeGroupMember$CacheClusterId": "

    The ID of the cache cluster to which the node belongs.

    ", + "NodeGroupMember$CacheNodeId": "

    The ID of the node within its cache cluster. A node ID is a numeric identifier (0001, 0002, etc.).

    ", + "NodeGroupMember$PreferredAvailabilityZone": "

    The name of the Availability Zone in which the node is located.

    ", + "NodeGroupMember$CurrentRole": "

    The role that is currently assigned to the node - primary or replica.

    ", + "NodeSnapshot$CacheNodeId": "

    The cache node identifier for the node in the source cache cluster.

    ", + "NodeSnapshot$CacheSize": "

    The size of the cache on the source cache node.

    ", + "NotificationConfiguration$TopicArn": "

    The Amazon Resource Name (ARN) that identifies the topic.

    ", + "NotificationConfiguration$TopicStatus": "

    The current state of the topic.

    ", + "Parameter$ParameterName": "

    The name of the parameter.

    ", + "Parameter$ParameterValue": "

    The value of the parameter.

    ", + "Parameter$Description": "

    A description of the parameter.

    ", + "Parameter$Source": "

    The source of the parameter.

    ", + "Parameter$DataType": "

    The valid data type for the parameter.

    ", + "Parameter$AllowedValues": "

    The valid range of values for the parameter.

    ", + "Parameter$MinimumEngineVersion": "

    The earliest cache engine version to which the parameter can apply.

    ", + "ParameterNameValue$ParameterName": "

    The name of the parameter.

    ", + "ParameterNameValue$ParameterValue": "

    The value of the parameter.

    ", + "PendingModifiedValues$EngineVersion": "

    The new cache engine version that the cache cluster will run.

    ", + "PreferredAvailabilityZoneList$member": null, + "PurchaseReservedCacheNodesOfferingMessage$ReservedCacheNodesOfferingId": "

    The ID of the reserved cache node offering to purchase.

    Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706

    ", + "PurchaseReservedCacheNodesOfferingMessage$ReservedCacheNodeId": "

    A customer-specified identifier to track this reservation.

    Example: myreservationID

    ", + "RebootCacheClusterMessage$CacheClusterId": "

    The cache cluster identifier. This parameter is stored as a lowercase string.

    ", + "RecurringCharge$RecurringChargeFrequency": "

    The frequency of the recurring charge.

    ", + "RemoveTagsFromResourceMessage$ResourceName": "

    The name of the ElastiCache resource from which you want the listed tags removed, for example arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster.

    ", + "ReplicationGroup$ReplicationGroupId": "

    The identifier for the replication group.

    ", + "ReplicationGroup$Description": "

    The description of the replication group.

    ", + "ReplicationGroup$Status": "

    The current state of this replication group - creating, available, etc.

    ", + "ReplicationGroup$SnapshottingClusterId": "

    The cache cluster ID that is used as the daily snapshot source for the replication group.

    ", + "ReplicationGroupMessage$Marker": "

    Provides an identifier to allow retrieval of paginated results.

    ", + "ReplicationGroupPendingModifiedValues$PrimaryClusterId": "

    The primary cluster ID which will be applied immediately (if --apply-immediately was specified), or during the next maintenance window.

    ", + "ReservedCacheNode$ReservedCacheNodeId": "

    The unique identifier for the reservation.

    ", + "ReservedCacheNode$ReservedCacheNodesOfferingId": "

    The offering identifier.

    ", + "ReservedCacheNode$CacheNodeType": "

    The cache node type for the reserved cache nodes.

    Valid node types are as follows:

    • General purpose:
      • Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge
      • Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge
    • Compute optimized: cache.c1.xlarge
    • Memory optimized
      • Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge
      • Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

    Notes:

    • All t2 instances are created in an Amazon Virtual Private Cloud (VPC).
    • Redis backup/restore is not supported for t2 instances.
    • Redis Append-only files (AOF) functionality is not supported for t1 or t2 instances.

    For a complete listing of cache node types and specifications, see Amazon ElastiCache Product Features and Details and Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis.

    ", + "ReservedCacheNode$ProductDescription": "

    The description of the reserved cache node.

    ", + "ReservedCacheNode$OfferingType": "

    The offering type of this reserved cache node.

    ", + "ReservedCacheNode$State": "

    The state of the reserved cache node.

    ", + "ReservedCacheNodeMessage$Marker": "

    Provides an identifier to allow retrieval of paginated results.

    ", + "ReservedCacheNodesOffering$ReservedCacheNodesOfferingId": "

    A unique identifier for the reserved cache node offering.

    ", + "ReservedCacheNodesOffering$CacheNodeType": "

    The cache node type for the reserved cache node.

    Valid node types are as follows:

    • General purpose:
      • Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge
      • Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge
    • Compute optimized: cache.c1.xlarge
    • Memory optimized
      • Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge
      • Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

    Notes:

    • All t2 instances are created in an Amazon Virtual Private Cloud (VPC).
    • Redis backup/restore is not supported for t2 instances.
    • Redis Append-only files (AOF) functionality is not supported for t1 or t2 instances.

    For a complete listing of cache node types and specifications, see Amazon ElastiCache Product Features and Details and Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis.

    ", + "ReservedCacheNodesOffering$ProductDescription": "

    The cache engine used by the offering.

    ", + "ReservedCacheNodesOffering$OfferingType": "

    The offering type.

    ", + "ReservedCacheNodesOfferingMessage$Marker": "

    Provides an identifier to allow retrieval of paginated results.

    ", + "ResetCacheParameterGroupMessage$CacheParameterGroupName": "

    The name of the cache parameter group to reset.

    ", + "RevokeCacheSecurityGroupIngressMessage$CacheSecurityGroupName": "

    The name of the cache security group to revoke ingress from.

    ", + "RevokeCacheSecurityGroupIngressMessage$EC2SecurityGroupName": "

    The name of the Amazon EC2 security group to revoke access from.

    ", + "RevokeCacheSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "

    The AWS account number of the Amazon EC2 security group owner. Note that this is not the same thing as an AWS access key ID - you must provide a valid AWS account number for this parameter.

    ", + "SecurityGroupIdsList$member": null, + "SecurityGroupMembership$SecurityGroupId": "

    The identifier of the cache security group.

    ", + "SecurityGroupMembership$Status": "

    The status of the cache security group membership. The status changes whenever a cache security group is modified, or when the cache security groups assigned to a cache cluster are modified.

    ", + "Snapshot$SnapshotName": "

    The name of a snapshot. For an automatic snapshot, the name is system-generated; for a manual snapshot, this is the user-provided name.

    ", + "Snapshot$CacheClusterId": "

    The user-supplied identifier of the source cache cluster.

    ", + "Snapshot$SnapshotStatus": "

    The status of the snapshot. Valid values: creating | available | restoring | copying | deleting.

    ", + "Snapshot$SnapshotSource": "

    Indicates whether the snapshot is from an automatic backup (automated) or was created manually (manual).

    ", + "Snapshot$CacheNodeType": "

    The name of the compute and memory capacity node type for the source cache cluster.

    Valid node types are as follows:

    • General purpose:
      • Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge
      • Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge
    • Compute optimized: cache.c1.xlarge
    • Memory optimized
      • Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge
      • Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

    Notes:

    • All t2 instances are created in an Amazon Virtual Private Cloud (VPC).
    • Redis backup/restore is not supported for t2 instances.
    • Redis Append-only files (AOF) functionality is not supported for t1 or t2 instances.

    For a complete listing of cache node types and specifications, see Amazon ElastiCache Product Features and Details and Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis.

    ", + "Snapshot$Engine": "

    The name of the cache engine (memcached or redis) used by the source cache cluster.

    ", + "Snapshot$EngineVersion": "

    The version of the cache engine version that is used by the source cache cluster.

    ", + "Snapshot$PreferredAvailabilityZone": "

    The name of the Availability Zone in which the source cache cluster is located.

    ", + "Snapshot$PreferredMaintenanceWindow": "

    Specifies the weekly time range during which maintenance on the cache cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are:

    • sun
    • mon
    • tue
    • wed
    • thu
    • fri
    • sat

    Example: sun:05:00-sun:09:00

    ", + "Snapshot$TopicArn": "

    The Amazon Resource Name (ARN) for the topic used by the source cache cluster for publishing notifications.

    ", + "Snapshot$CacheParameterGroupName": "

    The cache parameter group that is associated with the source cache cluster.

    ", + "Snapshot$CacheSubnetGroupName": "

    The name of the cache subnet group associated with the source cache cluster.

    ", + "Snapshot$VpcId": "

    The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet group for the source cache cluster.

    ", + "Snapshot$SnapshotWindow": "

    The daily time range during which ElastiCache takes daily snapshots of the source cache cluster.

    ", + "SnapshotArnsList$member": null, + "Subnet$SubnetIdentifier": "

    The unique identifier for the subnet.

    ", + "SubnetIdentifierList$member": null, + "Tag$Key": "

    The key for the tag.

    ", + "Tag$Value": "

    The tag's value. May not be null.

    " + } + }, + "Subnet": { + "base": "

    Represents the subnet associated with a cache cluster. This parameter refers to subnets defined in Amazon Virtual Private Cloud (Amazon VPC) and used with ElastiCache.

    ", + "refs": { + "SubnetList$member": null + } + }, + "SubnetIdentifierList": { + "base": null, + "refs": { + "CreateCacheSubnetGroupMessage$SubnetIds": "

    A list of VPC subnet IDs for the cache subnet group.

    ", + "ModifyCacheSubnetGroupMessage$SubnetIds": "

    The EC2 subnet IDs for the cache subnet group.

    " + } + }, + "SubnetInUse": { + "base": "

    The requested subnet is being used by another cache subnet group.

    ", + "refs": { + } + }, + "SubnetList": { + "base": null, + "refs": { + "CacheSubnetGroup$Subnets": "

    A list of subnets associated with the cache subnet group.

    " + } + }, + "TStamp": { + "base": null, + "refs": { + "CacheCluster$CacheClusterCreateTime": "

    The date and time when the cache cluster was created.

    ", + "CacheNode$CacheNodeCreateTime": "

    The date and time when the cache node was created.

    ", + "DescribeEventsMessage$StartTime": "

    The beginning of the time interval to retrieve events for, specified in ISO 8601 format.

    ", + "DescribeEventsMessage$EndTime": "

    The end of the time interval for which to retrieve events, specified in ISO 8601 format.

    ", + "Event$Date": "

    The date and time when the event occurred.

    ", + "NodeSnapshot$CacheNodeCreateTime": "

    The date and time when the cache node was created in the source cache cluster.

    ", + "NodeSnapshot$SnapshotCreateTime": "

    The date and time when the source node's metadata and cache data set was obtained for the snapshot.

    ", + "ReservedCacheNode$StartTime": "

    The time the reservation started.

    ", + "Snapshot$CacheClusterCreateTime": "

    The date and time when the source cache cluster was created.

    " + } + }, + "Tag": { + "base": "

    A cost allocation Tag that can be added to an ElastiCache cluster or replication group. Tags are composed of a Key/Value pair. A tag with a null Value is permitted.

    ", + "refs": { + "TagList$member": null + } + }, + "TagList": { + "base": null, + "refs": { + "AddTagsToResourceMessage$Tags": "

    A list of cost allocation tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value.

    ", + "CreateCacheClusterMessage$Tags": "

    A list of cost allocation tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value.

    ", + "CreateReplicationGroupMessage$Tags": "

    A list of cost allocation tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value.

    ", + "TagListMessage$TagList": "

    A list of cost allocation tags as key-value pairs.

    " + } + }, + "TagListMessage": { + "base": "

    Represents the output from the AddTagsToResource, ListTagsOnResource, and RemoveTagsFromResource actions.

    ", + "refs": { + } + }, + "TagNotFoundFault": { + "base": "

    The requested tag was not found on this resource.

    ", + "refs": { + } + }, + "TagQuotaPerResourceExceeded": { + "base": "

    The request cannot be processed because it would cause the resource to have more than the allowed number of tags. The maximum number of tags permitted on a resource is 10.

    ", + "refs": { + } + }, + "AuthorizeCacheSecurityGroupIngressResult": { + "base": null, + "refs": { + } + }, + "CopySnapshotResult": { + "base": null, + "refs": { + } + }, + "CreateCacheClusterResult": { + "base": null, + "refs": { + } + }, + "CreateCacheParameterGroupResult": { + "base": null, + "refs": { + } + }, + "CreateCacheSecurityGroupResult": { + "base": null, + "refs": { + } + }, + "CreateCacheSubnetGroupResult": { + "base": null, + "refs": { + } + }, + "CreateReplicationGroupResult": { + "base": null, + "refs": { + } + }, + "CreateSnapshotResult": { + "base": null, + "refs": { + } + }, + "DeleteCacheClusterResult": { + "base": null, + "refs": { + } + }, + "DeleteReplicationGroupResult": { + "base": null, + "refs": { + } + }, + "DeleteSnapshotResult": { + "base": null, + "refs": { + } + }, + "DescribeEngineDefaultParametersResult": { + "base": null, + "refs": { + } + }, + "ModifyCacheClusterResult": { + "base": null, + "refs": { + } + }, + "ModifyCacheSubnetGroupResult": { + "base": null, + "refs": { + } + }, + "ModifyReplicationGroupResult": { + "base": null, + "refs": { + } + }, + "PurchaseReservedCacheNodesOfferingResult": { + "base": null, + "refs": { + } + }, + "RebootCacheClusterResult": { + "base": null, + "refs": { + } + }, + "RevokeCacheSecurityGroupIngressResult": { + "base": null, + "refs": { + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,76 @@ +{ + "pagination": { + "DescribeCacheClusters": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "CacheClusters" + }, + "DescribeCacheEngineVersions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "CacheEngineVersions" + }, + "DescribeCacheParameterGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "CacheParameterGroups" + }, + "DescribeCacheParameters": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Parameters" + }, + "DescribeCacheSecurityGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "CacheSecurityGroups" + }, + "DescribeCacheSubnetGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "CacheSubnetGroups" + }, + "DescribeEngineDefaultParameters": { + "input_token": "Marker", + "output_token": "EngineDefaults.Marker", + "limit_key": "MaxRecords", + "result_key": "EngineDefaults.Parameters" + }, + "DescribeEvents": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Events" + }, + "DescribeReservedCacheNodes": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ReservedCacheNodes" + }, + "DescribeReservedCacheNodesOfferings": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ReservedCacheNodesOfferings" + }, + "DescribeReplicationGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ReplicationGroups" + }, + "DescribeSnapshots": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Snapshots" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/waiters-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/waiters-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/waiters-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticache/2015-02-02/waiters-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,143 @@ +{ + "version": 2, + "waiters": { + "CacheClusterAvailable": { + "delay": 15, + "operation": "DescribeCacheClusters", + "maxAttempts": 40, + "description": "Wait until ElastiCache cluster is available.", + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "CacheClusters[].CacheClusterStatus" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "CacheClusters[].CacheClusterStatus" + }, + { + "expected": "deleting", + "matcher": "pathAny", + "state": "failure", + "argument": "CacheClusters[].CacheClusterStatus" + }, + { + "expected": "incompatible-network", + "matcher": "pathAny", + "state": "failure", + "argument": "CacheClusters[].CacheClusterStatus" + }, + { + "expected": "restore-failed", + "matcher": "pathAny", + "state": "failure", + "argument": "CacheClusters[].CacheClusterStatus" + } + ] + }, + "CacheClusterDeleted": { + "delay": 15, + "operation": "DescribeCacheClusters", + "maxAttempts": 40, + "description": "Wait until ElastiCache cluster is deleted.", + "acceptors": [ + { + "expected": "deleted", + "matcher": "pathAll", + "state": "success", + "argument": "CacheClusters[].CacheClusterStatus" + }, + { + "expected": "CacheClusterNotFound", + "matcher": "error", + "state": "success" + }, + { + "expected": "available", + "matcher": "pathAny", + "state": "failure", + "argument": "CacheClusters[].CacheClusterStatus" + }, + { + "expected": "creating", + "matcher": "pathAny", + "state": "failure", + "argument": "CacheClusters[].CacheClusterStatus" + }, + { + "expected": "incompatible-network", + "matcher": "pathAny", + "state": "failure", + "argument": "CacheClusters[].CacheClusterStatus" + }, + { + "expected": "modifying", + "matcher": "pathAny", + "state": "failure", + "argument": "CacheClusters[].CacheClusterStatus" + }, + { + "expected": "restore-failed", + "matcher": "pathAny", + "state": "failure", + "argument": "CacheClusters[].CacheClusterStatus" + }, + { + "expected": "snapshotting", + "matcher": "pathAny", + "state": "failure", + "argument": "CacheClusters[].CacheClusterStatus" + } + ] + }, + "ReplicationGroupAvailable": { + "delay": 15, + "operation": "DescribeReplicationGroups", + "maxAttempts": 40, + "description": "Wait until ElastiCache replication group is available.", + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "ReplicationGroups[].Status" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "ReplicationGroups[].Status" + } + ] + }, + "ReplicationGroupDeleted": { + "delay": 15, + "operation": "DescribeReplicationGroups", + "maxAttempts": 40, + "description": "Wait until ElastiCache replication group is deleted.", + "acceptors": [ + { + "expected": "deleted", + "matcher": "pathAll", + "state": "success", + "argument": "ReplicationGroups[].Status" + }, + { + "expected": "available", + "matcher": "pathAny", + "state": "failure", + "argument": "ReplicationGroups[].Status" + }, + { + "expected": "ReplicationGroupNotFoundFault", + "matcher": "error", + "state": "success" + } + ] + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1689 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2010-12-01", + "endpointPrefix":"elasticbeanstalk", + "protocol":"query", + "serviceAbbreviation":"Elastic Beanstalk", + "serviceFullName":"AWS Elastic Beanstalk", + "signatureVersion":"v4", + "xmlNamespace":"http://elasticbeanstalk.amazonaws.com/docs/2010-12-01/" + }, + "operations":{ + "AbortEnvironmentUpdate":{ + "name":"AbortEnvironmentUpdate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AbortEnvironmentUpdateMessage"}, + "errors":[ + {"shape":"InsufficientPrivilegesException"} + ] + }, + "CheckDNSAvailability":{ + "name":"CheckDNSAvailability", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CheckDNSAvailabilityMessage"}, + "output":{ + "shape":"CheckDNSAvailabilityResultMessage", + "resultWrapper":"CheckDNSAvailabilityResult" + } + }, + "ComposeEnvironments":{ + "name":"ComposeEnvironments", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ComposeEnvironmentsMessage"}, + "output":{ + "shape":"EnvironmentDescriptionsMessage", + "resultWrapper":"ComposeEnvironmentsResult" + }, + "errors":[ + {"shape":"TooManyEnvironmentsException"}, + {"shape":"InsufficientPrivilegesException"} + ] + }, + "CreateApplication":{ + "name":"CreateApplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateApplicationMessage"}, + "output":{ + "shape":"ApplicationDescriptionMessage", + "resultWrapper":"CreateApplicationResult" + }, + "errors":[ + {"shape":"TooManyApplicationsException"} + ] + }, + "CreateApplicationVersion":{ + "name":"CreateApplicationVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateApplicationVersionMessage"}, + "output":{ + "shape":"ApplicationVersionDescriptionMessage", + "resultWrapper":"CreateApplicationVersionResult" + }, + "errors":[ + {"shape":"TooManyApplicationsException"}, + {"shape":"TooManyApplicationVersionsException"}, + {"shape":"InsufficientPrivilegesException"}, + {"shape":"S3LocationNotInServiceRegionException"} + ] + }, + "CreateConfigurationTemplate":{ + "name":"CreateConfigurationTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateConfigurationTemplateMessage"}, + "output":{ + "shape":"ConfigurationSettingsDescription", + "resultWrapper":"CreateConfigurationTemplateResult" + }, + "errors":[ + {"shape":"InsufficientPrivilegesException"}, + {"shape":"TooManyConfigurationTemplatesException"} + ] + }, + "CreateEnvironment":{ + "name":"CreateEnvironment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateEnvironmentMessage"}, + "output":{ + "shape":"EnvironmentDescription", + "resultWrapper":"CreateEnvironmentResult" + }, + "errors":[ + {"shape":"TooManyEnvironmentsException"}, + {"shape":"InsufficientPrivilegesException"} + ] + }, + "CreateStorageLocation":{ + "name":"CreateStorageLocation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"CreateStorageLocationResultMessage", + "resultWrapper":"CreateStorageLocationResult" + }, + "errors":[ + {"shape":"TooManyBucketsException"}, + {"shape":"S3SubscriptionRequiredException"}, + {"shape":"InsufficientPrivilegesException"} + ] + }, + "DeleteApplication":{ + "name":"DeleteApplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteApplicationMessage"}, + "errors":[ + {"shape":"OperationInProgressException"} + ] + }, + "DeleteApplicationVersion":{ + "name":"DeleteApplicationVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteApplicationVersionMessage"}, + "errors":[ + {"shape":"SourceBundleDeletionException"}, + {"shape":"InsufficientPrivilegesException"}, + {"shape":"OperationInProgressException"}, + {"shape":"S3LocationNotInServiceRegionException"} + ] + }, + "DeleteConfigurationTemplate":{ + "name":"DeleteConfigurationTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteConfigurationTemplateMessage"}, + "errors":[ + {"shape":"OperationInProgressException"} + ] + }, + "DeleteEnvironmentConfiguration":{ + "name":"DeleteEnvironmentConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEnvironmentConfigurationMessage"} + }, + "DescribeApplicationVersions":{ + "name":"DescribeApplicationVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeApplicationVersionsMessage"}, + "output":{ + "shape":"ApplicationVersionDescriptionsMessage", + "resultWrapper":"DescribeApplicationVersionsResult" + } + }, + "DescribeApplications":{ + "name":"DescribeApplications", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeApplicationsMessage"}, + "output":{ + "shape":"ApplicationDescriptionsMessage", + "resultWrapper":"DescribeApplicationsResult" + } + }, + "DescribeConfigurationOptions":{ + "name":"DescribeConfigurationOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConfigurationOptionsMessage"}, + "output":{ + "shape":"ConfigurationOptionsDescription", + "resultWrapper":"DescribeConfigurationOptionsResult" + } + }, + "DescribeConfigurationSettings":{ + "name":"DescribeConfigurationSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConfigurationSettingsMessage"}, + "output":{ + "shape":"ConfigurationSettingsDescriptions", + "resultWrapper":"DescribeConfigurationSettingsResult" + } + }, + "DescribeEnvironmentHealth":{ + "name":"DescribeEnvironmentHealth", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEnvironmentHealthRequest"}, + "output":{ + "shape":"DescribeEnvironmentHealthResult", + "resultWrapper":"DescribeEnvironmentHealthResult" + }, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ElasticBeanstalkServiceException"} + ] + }, + "DescribeEnvironmentResources":{ + "name":"DescribeEnvironmentResources", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEnvironmentResourcesMessage"}, + "output":{ + "shape":"EnvironmentResourceDescriptionsMessage", + "resultWrapper":"DescribeEnvironmentResourcesResult" + }, + "errors":[ + {"shape":"InsufficientPrivilegesException"} + ] + }, + "DescribeEnvironments":{ + "name":"DescribeEnvironments", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEnvironmentsMessage"}, + "output":{ + "shape":"EnvironmentDescriptionsMessage", + "resultWrapper":"DescribeEnvironmentsResult" + } + }, + "DescribeEvents":{ + "name":"DescribeEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventsMessage"}, + "output":{ + "shape":"EventDescriptionsMessage", + "resultWrapper":"DescribeEventsResult" + } + }, + "DescribeInstancesHealth":{ + "name":"DescribeInstancesHealth", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstancesHealthRequest"}, + "output":{ + "shape":"DescribeInstancesHealthResult", + "resultWrapper":"DescribeInstancesHealthResult" + }, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ElasticBeanstalkServiceException"} + ] + }, + "ListAvailableSolutionStacks":{ + "name":"ListAvailableSolutionStacks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"ListAvailableSolutionStacksResultMessage", + "resultWrapper":"ListAvailableSolutionStacksResult" + } + }, + "RebuildEnvironment":{ + "name":"RebuildEnvironment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebuildEnvironmentMessage"}, + "errors":[ + {"shape":"InsufficientPrivilegesException"} + ] + }, + "RequestEnvironmentInfo":{ + "name":"RequestEnvironmentInfo", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RequestEnvironmentInfoMessage"} + }, + "RestartAppServer":{ + "name":"RestartAppServer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestartAppServerMessage"} + }, + "RetrieveEnvironmentInfo":{ + "name":"RetrieveEnvironmentInfo", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RetrieveEnvironmentInfoMessage"}, + "output":{ + "shape":"RetrieveEnvironmentInfoResultMessage", + "resultWrapper":"RetrieveEnvironmentInfoResult" + } + }, + "SwapEnvironmentCNAMEs":{ + "name":"SwapEnvironmentCNAMEs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SwapEnvironmentCNAMEsMessage"} + }, + "TerminateEnvironment":{ + "name":"TerminateEnvironment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TerminateEnvironmentMessage"}, + "output":{ + "shape":"EnvironmentDescription", + "resultWrapper":"TerminateEnvironmentResult" + }, + "errors":[ + {"shape":"InsufficientPrivilegesException"} + ] + }, + "UpdateApplication":{ + "name":"UpdateApplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateApplicationMessage"}, + "output":{ + "shape":"ApplicationDescriptionMessage", + "resultWrapper":"UpdateApplicationResult" + } + }, + "UpdateApplicationVersion":{ + "name":"UpdateApplicationVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateApplicationVersionMessage"}, + "output":{ + "shape":"ApplicationVersionDescriptionMessage", + "resultWrapper":"UpdateApplicationVersionResult" + } + }, + "UpdateConfigurationTemplate":{ + "name":"UpdateConfigurationTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateConfigurationTemplateMessage"}, + "output":{ + "shape":"ConfigurationSettingsDescription", + "resultWrapper":"UpdateConfigurationTemplateResult" + }, + "errors":[ + {"shape":"InsufficientPrivilegesException"} + ] + }, + "UpdateEnvironment":{ + "name":"UpdateEnvironment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateEnvironmentMessage"}, + "output":{ + "shape":"EnvironmentDescription", + "resultWrapper":"UpdateEnvironmentResult" + }, + "errors":[ + {"shape":"InsufficientPrivilegesException"} + ] + }, + "ValidateConfigurationSettings":{ + "name":"ValidateConfigurationSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ValidateConfigurationSettingsMessage"}, + "output":{ + "shape":"ConfigurationSettingsValidationMessages", + "resultWrapper":"ValidateConfigurationSettingsResult" + }, + "errors":[ + {"shape":"InsufficientPrivilegesException"} + ] + } + }, + "shapes":{ + "AbortEnvironmentUpdateMessage":{ + "type":"structure", + "members":{ + "EnvironmentId":{"shape":"EnvironmentId"}, + "EnvironmentName":{"shape":"EnvironmentName"} + } + }, + "AbortableOperationInProgress":{"type":"boolean"}, + "ApplicationDescription":{ + "type":"structure", + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "Description":{"shape":"Description"}, + "DateCreated":{"shape":"CreationDate"}, + "DateUpdated":{"shape":"UpdateDate"}, + "Versions":{"shape":"VersionLabelsList"}, + "ConfigurationTemplates":{"shape":"ConfigurationTemplateNamesList"} + } + }, + "ApplicationDescriptionList":{ + "type":"list", + "member":{"shape":"ApplicationDescription"} + }, + "ApplicationDescriptionMessage":{ + "type":"structure", + "members":{ + "Application":{"shape":"ApplicationDescription"} + } + }, + "ApplicationDescriptionsMessage":{ + "type":"structure", + "members":{ + "Applications":{"shape":"ApplicationDescriptionList"} + } + }, + "ApplicationMetrics":{ + "type":"structure", + "members":{ + "Duration":{"shape":"NullableInteger"}, + "RequestCount":{"shape":"RequestCount"}, + "StatusCodes":{"shape":"StatusCodes"}, + "Latency":{"shape":"Latency"} + } + }, + "ApplicationName":{ + "type":"string", + "max":100, + "min":1 + }, + "ApplicationNamesList":{ + "type":"list", + "member":{"shape":"ApplicationName"} + }, + "ApplicationVersionDescription":{ + "type":"structure", + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "Description":{"shape":"Description"}, + "VersionLabel":{"shape":"VersionLabel"}, + "SourceBundle":{"shape":"S3Location"}, + "DateCreated":{"shape":"CreationDate"}, + "DateUpdated":{"shape":"UpdateDate"}, + "Status":{"shape":"ApplicationVersionStatus"} + } + }, + "ApplicationVersionDescriptionList":{ + "type":"list", + "member":{"shape":"ApplicationVersionDescription"} + }, + "ApplicationVersionDescriptionMessage":{ + "type":"structure", + "members":{ + "ApplicationVersion":{"shape":"ApplicationVersionDescription"} + } + }, + "ApplicationVersionDescriptionsMessage":{ + "type":"structure", + "members":{ + "ApplicationVersions":{"shape":"ApplicationVersionDescriptionList"} + } + }, + "ApplicationVersionProccess":{"type":"boolean"}, + "ApplicationVersionStatus":{ + "type":"string", + "enum":[ + "Processed", + "Unprocessed", + "Failed", + "Processing" + ] + }, + "AutoCreateApplication":{"type":"boolean"}, + "AutoScalingGroup":{ + "type":"structure", + "members":{ + "Name":{"shape":"ResourceId"} + } + }, + "AutoScalingGroupList":{ + "type":"list", + "member":{"shape":"AutoScalingGroup"} + }, + "AvailableSolutionStackDetailsList":{ + "type":"list", + "member":{"shape":"SolutionStackDescription"} + }, + "AvailableSolutionStackNamesList":{ + "type":"list", + "member":{"shape":"SolutionStackName"} + }, + "CPUUtilization":{ + "type":"structure", + "members":{ + "User":{"shape":"NullableDouble"}, + "Nice":{"shape":"NullableDouble"}, + "System":{"shape":"NullableDouble"}, + "Idle":{"shape":"NullableDouble"}, + "IOWait":{"shape":"NullableDouble"}, + "IRQ":{"shape":"NullableDouble"}, + "SoftIRQ":{"shape":"NullableDouble"} + } + }, + "Cause":{ + "type":"string", + "max":255, + "min":1 + }, + "Causes":{ + "type":"list", + "member":{"shape":"Cause"} + }, + "CheckDNSAvailabilityMessage":{ + "type":"structure", + "required":["CNAMEPrefix"], + "members":{ + "CNAMEPrefix":{"shape":"DNSCnamePrefix"} + } + }, + "CheckDNSAvailabilityResultMessage":{ + "type":"structure", + "members":{ + "Available":{"shape":"CnameAvailability"}, + "FullyQualifiedCNAME":{"shape":"DNSCname"} + } + }, + "CnameAvailability":{"type":"boolean"}, + "ComposeEnvironmentsMessage":{ + "type":"structure", + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "GroupName":{"shape":"GroupName"}, + "VersionLabels":{"shape":"VersionLabels"} + } + }, + "ConfigurationDeploymentStatus":{ + "type":"string", + "enum":[ + "deployed", + "pending", + "failed" + ] + }, + "ConfigurationOptionDefaultValue":{"type":"string"}, + "ConfigurationOptionDescription":{ + "type":"structure", + "members":{ + "Namespace":{"shape":"OptionNamespace"}, + "Name":{"shape":"ConfigurationOptionName"}, + "DefaultValue":{"shape":"ConfigurationOptionDefaultValue"}, + "ChangeSeverity":{"shape":"ConfigurationOptionSeverity"}, + "UserDefined":{"shape":"UserDefinedOption"}, + "ValueType":{"shape":"ConfigurationOptionValueType"}, + "ValueOptions":{"shape":"ConfigurationOptionPossibleValues"}, + "MinValue":{"shape":"OptionRestrictionMinValue"}, + "MaxValue":{"shape":"OptionRestrictionMaxValue"}, + "MaxLength":{"shape":"OptionRestrictionMaxLength"}, + "Regex":{"shape":"OptionRestrictionRegex"} + } + }, + "ConfigurationOptionDescriptionsList":{ + "type":"list", + "member":{"shape":"ConfigurationOptionDescription"} + }, + "ConfigurationOptionName":{"type":"string"}, + "ConfigurationOptionPossibleValue":{"type":"string"}, + "ConfigurationOptionPossibleValues":{ + "type":"list", + "member":{"shape":"ConfigurationOptionPossibleValue"} + }, + "ConfigurationOptionSetting":{ + "type":"structure", + "members":{ + "ResourceName":{"shape":"ResourceName"}, + "Namespace":{"shape":"OptionNamespace"}, + "OptionName":{"shape":"ConfigurationOptionName"}, + "Value":{"shape":"ConfigurationOptionValue"} + } + }, + "ConfigurationOptionSettingsList":{ + "type":"list", + "member":{"shape":"ConfigurationOptionSetting"} + }, + "ConfigurationOptionSeverity":{"type":"string"}, + "ConfigurationOptionValue":{"type":"string"}, + "ConfigurationOptionValueType":{ + "type":"string", + "enum":[ + "Scalar", + "List" + ] + }, + "ConfigurationOptionsDescription":{ + "type":"structure", + "members":{ + "SolutionStackName":{"shape":"SolutionStackName"}, + "Options":{"shape":"ConfigurationOptionDescriptionsList"} + } + }, + "ConfigurationSettingsDescription":{ + "type":"structure", + "members":{ + "SolutionStackName":{"shape":"SolutionStackName"}, + "ApplicationName":{"shape":"ApplicationName"}, + "TemplateName":{"shape":"ConfigurationTemplateName"}, + "Description":{"shape":"Description"}, + "EnvironmentName":{"shape":"EnvironmentName"}, + "DeploymentStatus":{"shape":"ConfigurationDeploymentStatus"}, + "DateCreated":{"shape":"CreationDate"}, + "DateUpdated":{"shape":"UpdateDate"}, + "OptionSettings":{"shape":"ConfigurationOptionSettingsList"} + } + }, + "ConfigurationSettingsDescriptionList":{ + "type":"list", + "member":{"shape":"ConfigurationSettingsDescription"} + }, + "ConfigurationSettingsDescriptions":{ + "type":"structure", + "members":{ + "ConfigurationSettings":{"shape":"ConfigurationSettingsDescriptionList"} + } + }, + "ConfigurationSettingsValidationMessages":{ + "type":"structure", + "members":{ + "Messages":{"shape":"ValidationMessagesList"} + } + }, + "ConfigurationTemplateName":{ + "type":"string", + "max":100, + "min":1 + }, + "ConfigurationTemplateNamesList":{ + "type":"list", + "member":{"shape":"ConfigurationTemplateName"} + }, + "CreateApplicationMessage":{ + "type":"structure", + "required":["ApplicationName"], + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "Description":{"shape":"Description"} + } + }, + "CreateApplicationVersionMessage":{ + "type":"structure", + "required":[ + "ApplicationName", + "VersionLabel" + ], + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "VersionLabel":{"shape":"VersionLabel"}, + "Description":{"shape":"Description"}, + "SourceBundle":{"shape":"S3Location"}, + "AutoCreateApplication":{"shape":"AutoCreateApplication"}, + "Process":{"shape":"ApplicationVersionProccess"} + } + }, + "CreateConfigurationTemplateMessage":{ + "type":"structure", + "required":[ + "ApplicationName", + "TemplateName" + ], + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "TemplateName":{"shape":"ConfigurationTemplateName"}, + "SolutionStackName":{"shape":"SolutionStackName"}, + "SourceConfiguration":{"shape":"SourceConfiguration"}, + "EnvironmentId":{"shape":"EnvironmentId"}, + "Description":{"shape":"Description"}, + "OptionSettings":{"shape":"ConfigurationOptionSettingsList"} + } + }, + "CreateEnvironmentMessage":{ + "type":"structure", + "required":["ApplicationName"], + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "EnvironmentName":{"shape":"EnvironmentName"}, + "GroupName":{"shape":"GroupName"}, + "Description":{"shape":"Description"}, + "CNAMEPrefix":{"shape":"DNSCnamePrefix"}, + "Tier":{"shape":"EnvironmentTier"}, + "Tags":{"shape":"Tags"}, + "VersionLabel":{"shape":"VersionLabel"}, + "TemplateName":{"shape":"ConfigurationTemplateName"}, + "SolutionStackName":{"shape":"SolutionStackName"}, + "OptionSettings":{"shape":"ConfigurationOptionSettingsList"}, + "OptionsToRemove":{"shape":"OptionsSpecifierList"} + } + }, + "CreateStorageLocationResultMessage":{ + "type":"structure", + "members":{ + "S3Bucket":{"shape":"S3Bucket"} + } + }, + "CreationDate":{"type":"timestamp"}, + "DNSCname":{ + "type":"string", + "max":255, + "min":1 + }, + "DNSCnamePrefix":{ + "type":"string", + "max":63, + "min":4 + }, + "DeleteApplicationMessage":{ + "type":"structure", + "required":["ApplicationName"], + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "TerminateEnvByForce":{"shape":"TerminateEnvForce"} + } + }, + "DeleteApplicationVersionMessage":{ + "type":"structure", + "required":[ + "ApplicationName", + "VersionLabel" + ], + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "VersionLabel":{"shape":"VersionLabel"}, + "DeleteSourceBundle":{"shape":"DeleteSourceBundle"} + } + }, + "DeleteConfigurationTemplateMessage":{ + "type":"structure", + "required":[ + "ApplicationName", + "TemplateName" + ], + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "TemplateName":{"shape":"ConfigurationTemplateName"} + } + }, + "DeleteEnvironmentConfigurationMessage":{ + "type":"structure", + "required":[ + "ApplicationName", + "EnvironmentName" + ], + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "EnvironmentName":{"shape":"EnvironmentName"} + } + }, + "DeleteSourceBundle":{"type":"boolean"}, + "DescribeApplicationVersionsMessage":{ + "type":"structure", + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "VersionLabels":{"shape":"VersionLabelsList"} + } + }, + "DescribeApplicationsMessage":{ + "type":"structure", + "members":{ + "ApplicationNames":{"shape":"ApplicationNamesList"} + } + }, + "DescribeConfigurationOptionsMessage":{ + "type":"structure", + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "TemplateName":{"shape":"ConfigurationTemplateName"}, + "EnvironmentName":{"shape":"EnvironmentName"}, + "SolutionStackName":{"shape":"SolutionStackName"}, + "Options":{"shape":"OptionsSpecifierList"} + } + }, + "DescribeConfigurationSettingsMessage":{ + "type":"structure", + "required":["ApplicationName"], + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "TemplateName":{"shape":"ConfigurationTemplateName"}, + "EnvironmentName":{"shape":"EnvironmentName"} + } + }, + "DescribeEnvironmentHealthRequest":{ + "type":"structure", + "members":{ + "EnvironmentName":{"shape":"EnvironmentName"}, + "EnvironmentId":{"shape":"EnvironmentId"}, + "AttributeNames":{"shape":"EnvironmentHealthAttributes"} + } + }, + "DescribeEnvironmentHealthResult":{ + "type":"structure", + "members":{ + "EnvironmentName":{"shape":"EnvironmentName"}, + "HealthStatus":{"shape":"String"}, + "Status":{"shape":"EnvironmentHealth"}, + "Color":{"shape":"String"}, + "Causes":{"shape":"Causes"}, + "ApplicationMetrics":{"shape":"ApplicationMetrics"}, + "InstancesHealth":{"shape":"InstanceHealthSummary"}, + "RefreshedAt":{"shape":"RefreshedAt"} + } + }, + "DescribeEnvironmentResourcesMessage":{ + "type":"structure", + "members":{ + "EnvironmentId":{"shape":"EnvironmentId"}, + "EnvironmentName":{"shape":"EnvironmentName"} + } + }, + "DescribeEnvironmentsMessage":{ + "type":"structure", + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "VersionLabel":{"shape":"VersionLabel"}, + "EnvironmentIds":{"shape":"EnvironmentIdList"}, + "EnvironmentNames":{"shape":"EnvironmentNamesList"}, + "IncludeDeleted":{"shape":"IncludeDeleted"}, + "IncludedDeletedBackTo":{"shape":"IncludeDeletedBackTo"} + } + }, + "DescribeEventsMessage":{ + "type":"structure", + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "VersionLabel":{"shape":"VersionLabel"}, + "TemplateName":{"shape":"ConfigurationTemplateName"}, + "EnvironmentId":{"shape":"EnvironmentId"}, + "EnvironmentName":{"shape":"EnvironmentName"}, + "RequestId":{"shape":"RequestId"}, + "Severity":{"shape":"EventSeverity"}, + "StartTime":{"shape":"TimeFilterStart"}, + "EndTime":{"shape":"TimeFilterEnd"}, + "MaxRecords":{"shape":"MaxRecords"}, + "NextToken":{"shape":"Token"} + } + }, + "DescribeInstancesHealthRequest":{ + "type":"structure", + "members":{ + "EnvironmentName":{"shape":"EnvironmentName"}, + "EnvironmentId":{"shape":"EnvironmentId"}, + "AttributeNames":{"shape":"InstancesHealthAttributes"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeInstancesHealthResult":{ + "type":"structure", + "members":{ + "InstanceHealthList":{"shape":"InstanceHealthList"}, + "RefreshedAt":{"shape":"RefreshedAt"}, + "NextToken":{"shape":"NextToken"} + } + }, + "Description":{ + "type":"string", + "max":200 + }, + "Ec2InstanceId":{"type":"string"}, + "ElasticBeanstalkServiceException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "EndpointURL":{"type":"string"}, + "EnvironmentDescription":{ + "type":"structure", + "members":{ + "EnvironmentName":{"shape":"EnvironmentName"}, + "EnvironmentId":{"shape":"EnvironmentId"}, + "ApplicationName":{"shape":"ApplicationName"}, + "VersionLabel":{"shape":"VersionLabel"}, + "SolutionStackName":{"shape":"SolutionStackName"}, + "TemplateName":{"shape":"ConfigurationTemplateName"}, + "Description":{"shape":"Description"}, + "EndpointURL":{"shape":"EndpointURL"}, + "CNAME":{"shape":"DNSCname"}, + "DateCreated":{"shape":"CreationDate"}, + "DateUpdated":{"shape":"UpdateDate"}, + "Status":{"shape":"EnvironmentStatus"}, + "AbortableOperationInProgress":{"shape":"AbortableOperationInProgress"}, + "Health":{"shape":"EnvironmentHealth"}, + "HealthStatus":{"shape":"EnvironmentHealthStatus"}, + "Resources":{"shape":"EnvironmentResourcesDescription"}, + "Tier":{"shape":"EnvironmentTier"}, + "EnvironmentLinks":{"shape":"EnvironmentLinks"} + } + }, + "EnvironmentDescriptionsList":{ + "type":"list", + "member":{"shape":"EnvironmentDescription"} + }, + "EnvironmentDescriptionsMessage":{ + "type":"structure", + "members":{ + "Environments":{"shape":"EnvironmentDescriptionsList"} + } + }, + "EnvironmentHealth":{ + "type":"string", + "enum":[ + "Green", + "Yellow", + "Red", + "Grey" + ] + }, + "EnvironmentHealthAttribute":{ + "type":"string", + "enum":[ + "Status", + "Color", + "Causes", + "ApplicationMetrics", + "InstancesHealth", + "All", + "HealthStatus", + "RefreshedAt" + ] + }, + "EnvironmentHealthAttributes":{ + "type":"list", + "member":{"shape":"EnvironmentHealthAttribute"} + }, + "EnvironmentHealthStatus":{ + "type":"string", + "enum":[ + "NoData", + "Unknown", + "Pending", + "Ok", + "Info", + "Warning", + "Degraded", + "Severe" + ] + }, + "EnvironmentId":{"type":"string"}, + "EnvironmentIdList":{ + "type":"list", + "member":{"shape":"EnvironmentId"} + }, + "EnvironmentInfoDescription":{ + "type":"structure", + "members":{ + "InfoType":{"shape":"EnvironmentInfoType"}, + "Ec2InstanceId":{"shape":"Ec2InstanceId"}, + "SampleTimestamp":{"shape":"SampleTimestamp"}, + "Message":{"shape":"Message"} + } + }, + "EnvironmentInfoDescriptionList":{ + "type":"list", + "member":{"shape":"EnvironmentInfoDescription"} + }, + "EnvironmentInfoType":{ + "type":"string", + "enum":[ + "tail", + "bundle" + ] + }, + "EnvironmentLink":{ + "type":"structure", + "members":{ + "LinkName":{"shape":"String"}, + "EnvironmentName":{"shape":"String"} + } + }, + "EnvironmentLinks":{ + "type":"list", + "member":{"shape":"EnvironmentLink"} + }, + "EnvironmentName":{ + "type":"string", + "max":23, + "min":4 + }, + "EnvironmentNamesList":{ + "type":"list", + "member":{"shape":"EnvironmentName"} + }, + "EnvironmentResourceDescription":{ + "type":"structure", + "members":{ + "EnvironmentName":{"shape":"EnvironmentName"}, + "AutoScalingGroups":{"shape":"AutoScalingGroupList"}, + "Instances":{"shape":"InstanceList"}, + "LaunchConfigurations":{"shape":"LaunchConfigurationList"}, + "LoadBalancers":{"shape":"LoadBalancerList"}, + "Triggers":{"shape":"TriggerList"}, + "Queues":{"shape":"QueueList"} + } + }, + "EnvironmentResourceDescriptionsMessage":{ + "type":"structure", + "members":{ + "EnvironmentResources":{"shape":"EnvironmentResourceDescription"} + } + }, + "EnvironmentResourcesDescription":{ + "type":"structure", + "members":{ + "LoadBalancer":{"shape":"LoadBalancerDescription"} + } + }, + "EnvironmentStatus":{ + "type":"string", + "enum":[ + "Launching", + "Updating", + "Ready", + "Terminating", + "Terminated" + ] + }, + "EnvironmentTier":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Type":{"shape":"String"}, + "Version":{"shape":"String"} + } + }, + "EventDate":{"type":"timestamp"}, + "EventDescription":{ + "type":"structure", + "members":{ + "EventDate":{"shape":"EventDate"}, + "Message":{"shape":"EventMessage"}, + "ApplicationName":{"shape":"ApplicationName"}, + "VersionLabel":{"shape":"VersionLabel"}, + "TemplateName":{"shape":"ConfigurationTemplateName"}, + "EnvironmentName":{"shape":"EnvironmentName"}, + "RequestId":{"shape":"RequestId"}, + "Severity":{"shape":"EventSeverity"} + } + }, + "EventDescriptionList":{ + "type":"list", + "member":{"shape":"EventDescription"} + }, + "EventDescriptionsMessage":{ + "type":"structure", + "members":{ + "Events":{"shape":"EventDescriptionList"}, + "NextToken":{"shape":"Token"} + } + }, + "EventMessage":{"type":"string"}, + "EventSeverity":{ + "type":"string", + "enum":[ + "TRACE", + "DEBUG", + "INFO", + "WARN", + "ERROR", + "FATAL" + ] + }, + "ExceptionMessage":{"type":"string"}, + "FileTypeExtension":{ + "type":"string", + "max":100, + "min":1 + }, + "ForceTerminate":{"type":"boolean"}, + "GroupName":{ + "type":"string", + "max":19, + "min":1 + }, + "IncludeDeleted":{"type":"boolean"}, + "IncludeDeletedBackTo":{"type":"timestamp"}, + "Instance":{ + "type":"structure", + "members":{ + "Id":{"shape":"ResourceId"} + } + }, + "InstanceHealthList":{ + "type":"list", + "member":{"shape":"SingleInstanceHealth"} + }, + "InstanceHealthSummary":{ + "type":"structure", + "members":{ + "NoData":{"shape":"NullableInteger"}, + "Unknown":{"shape":"NullableInteger"}, + "Pending":{"shape":"NullableInteger"}, + "Ok":{"shape":"NullableInteger"}, + "Info":{"shape":"NullableInteger"}, + "Warning":{"shape":"NullableInteger"}, + "Degraded":{"shape":"NullableInteger"}, + "Severe":{"shape":"NullableInteger"} + } + }, + "InstanceId":{ + "type":"string", + "max":255, + "min":1 + }, + "InstanceList":{ + "type":"list", + "member":{"shape":"Instance"} + }, + "InstancesHealthAttribute":{ + "type":"string", + "enum":[ + "HealthStatus", + "Color", + "Causes", + "ApplicationMetrics", + "RefreshedAt", + "LaunchedAt", + "System", + "All" + ] + }, + "InstancesHealthAttributes":{ + "type":"list", + "member":{"shape":"InstancesHealthAttribute"} + }, + "InsufficientPrivilegesException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InsufficientPrivilegesException", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "Integer":{"type":"integer"}, + "InvalidRequestException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidRequestException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Latency":{ + "type":"structure", + "members":{ + "P999":{"shape":"NullableDouble"}, + "P99":{"shape":"NullableDouble"}, + "P95":{"shape":"NullableDouble"}, + "P90":{"shape":"NullableDouble"}, + "P85":{"shape":"NullableDouble"}, + "P75":{"shape":"NullableDouble"}, + "P50":{"shape":"NullableDouble"}, + "P10":{"shape":"NullableDouble"} + } + }, + "LaunchConfiguration":{ + "type":"structure", + "members":{ + "Name":{"shape":"ResourceId"} + } + }, + "LaunchConfigurationList":{ + "type":"list", + "member":{"shape":"LaunchConfiguration"} + }, + "LaunchedAt":{"type":"timestamp"}, + "ListAvailableSolutionStacksResultMessage":{ + "type":"structure", + "members":{ + "SolutionStacks":{"shape":"AvailableSolutionStackNamesList"}, + "SolutionStackDetails":{"shape":"AvailableSolutionStackDetailsList"} + } + }, + "Listener":{ + "type":"structure", + "members":{ + "Protocol":{"shape":"String"}, + "Port":{"shape":"Integer"} + } + }, + "LoadAverage":{ + "type":"list", + "member":{"shape":"LoadAverageValue"} + }, + "LoadAverageValue":{"type":"double"}, + "LoadBalancer":{ + "type":"structure", + "members":{ + "Name":{"shape":"ResourceId"} + } + }, + "LoadBalancerDescription":{ + "type":"structure", + "members":{ + "LoadBalancerName":{"shape":"String"}, + "Domain":{"shape":"String"}, + "Listeners":{"shape":"LoadBalancerListenersDescription"} + } + }, + "LoadBalancerList":{ + "type":"list", + "member":{"shape":"LoadBalancer"} + }, + "LoadBalancerListenersDescription":{ + "type":"list", + "member":{"shape":"Listener"} + }, + "MaxRecords":{ + "type":"integer", + "max":1000, + "min":1 + }, + "Message":{"type":"string"}, + "NextToken":{ + "type":"string", + "max":100, + "min":1 + }, + "NullableDouble":{"type":"double"}, + "NullableInteger":{"type":"integer"}, + "OperationInProgressException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OperationInProgressFailure", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "OptionNamespace":{"type":"string"}, + "OptionRestrictionMaxLength":{"type":"integer"}, + "OptionRestrictionMaxValue":{"type":"integer"}, + "OptionRestrictionMinValue":{"type":"integer"}, + "OptionRestrictionRegex":{ + "type":"structure", + "members":{ + "Pattern":{"shape":"RegexPattern"}, + "Label":{"shape":"RegexLabel"} + } + }, + "OptionSpecification":{ + "type":"structure", + "members":{ + "ResourceName":{"shape":"ResourceName"}, + "Namespace":{"shape":"OptionNamespace"}, + "OptionName":{"shape":"ConfigurationOptionName"} + } + }, + "OptionsSpecifierList":{ + "type":"list", + "member":{"shape":"OptionSpecification"} + }, + "Queue":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "URL":{"shape":"String"} + } + }, + "QueueList":{ + "type":"list", + "member":{"shape":"Queue"} + }, + "RebuildEnvironmentMessage":{ + "type":"structure", + "members":{ + "EnvironmentId":{"shape":"EnvironmentId"}, + "EnvironmentName":{"shape":"EnvironmentName"} + } + }, + "RefreshedAt":{"type":"timestamp"}, + "RegexLabel":{"type":"string"}, + "RegexPattern":{"type":"string"}, + "RequestCount":{"type":"integer"}, + "RequestEnvironmentInfoMessage":{ + "type":"structure", + "required":["InfoType"], + "members":{ + "EnvironmentId":{"shape":"EnvironmentId"}, + "EnvironmentName":{"shape":"EnvironmentName"}, + "InfoType":{"shape":"EnvironmentInfoType"} + } + }, + "RequestId":{"type":"string"}, + "ResourceId":{"type":"string"}, + "ResourceName":{ + "type":"string", + "max":256, + "min":1 + }, + "RestartAppServerMessage":{ + "type":"structure", + "members":{ + "EnvironmentId":{"shape":"EnvironmentId"}, + "EnvironmentName":{"shape":"EnvironmentName"} + } + }, + "RetrieveEnvironmentInfoMessage":{ + "type":"structure", + "required":["InfoType"], + "members":{ + "EnvironmentId":{"shape":"EnvironmentId"}, + "EnvironmentName":{"shape":"EnvironmentName"}, + "InfoType":{"shape":"EnvironmentInfoType"} + } + }, + "RetrieveEnvironmentInfoResultMessage":{ + "type":"structure", + "members":{ + "EnvironmentInfo":{"shape":"EnvironmentInfoDescriptionList"} + } + }, + "S3Bucket":{ + "type":"string", + "max":255 + }, + "S3Key":{ + "type":"string", + "max":1024 + }, + "S3Location":{ + "type":"structure", + "members":{ + "S3Bucket":{"shape":"S3Bucket"}, + "S3Key":{"shape":"S3Key"} + } + }, + "S3LocationNotInServiceRegionException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"S3LocationNotInServiceRegionException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "S3SubscriptionRequiredException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"S3SubscriptionRequiredException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SampleTimestamp":{"type":"timestamp"}, + "SingleInstanceHealth":{ + "type":"structure", + "members":{ + "InstanceId":{"shape":"InstanceId"}, + "HealthStatus":{"shape":"String"}, + "Color":{"shape":"String"}, + "Causes":{"shape":"Causes"}, + "LaunchedAt":{"shape":"LaunchedAt"}, + "ApplicationMetrics":{"shape":"ApplicationMetrics"}, + "System":{"shape":"SystemStatus"} + } + }, + "SolutionStackDescription":{ + "type":"structure", + "members":{ + "SolutionStackName":{"shape":"SolutionStackName"}, + "PermittedFileTypes":{"shape":"SolutionStackFileTypeList"} + } + }, + "SolutionStackFileTypeList":{ + "type":"list", + "member":{"shape":"FileTypeExtension"} + }, + "SolutionStackName":{ + "type":"string", + "max":100 + }, + "SourceBundleDeletionException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SourceBundleDeletionFailure", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SourceConfiguration":{ + "type":"structure", + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "TemplateName":{"shape":"ConfigurationTemplateName"} + } + }, + "StatusCodes":{ + "type":"structure", + "members":{ + "Status2xx":{"shape":"NullableInteger"}, + "Status3xx":{"shape":"NullableInteger"}, + "Status4xx":{"shape":"NullableInteger"}, + "Status5xx":{"shape":"NullableInteger"} + } + }, + "String":{"type":"string"}, + "SwapEnvironmentCNAMEsMessage":{ + "type":"structure", + "members":{ + "SourceEnvironmentId":{"shape":"EnvironmentId"}, + "SourceEnvironmentName":{"shape":"EnvironmentName"}, + "DestinationEnvironmentId":{"shape":"EnvironmentId"}, + "DestinationEnvironmentName":{"shape":"EnvironmentName"} + } + }, + "SystemStatus":{ + "type":"structure", + "members":{ + "CPUUtilization":{"shape":"CPUUtilization"}, + "LoadAverage":{"shape":"LoadAverage"} + } + }, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagValue":{ + "type":"string", + "max":256, + "min":1 + }, + "Tags":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "TerminateEnvForce":{"type":"boolean"}, + "TerminateEnvironmentMessage":{ + "type":"structure", + "members":{ + "EnvironmentId":{"shape":"EnvironmentId"}, + "EnvironmentName":{"shape":"EnvironmentName"}, + "TerminateResources":{"shape":"TerminateEnvironmentResources"}, + "ForceTerminate":{"shape":"ForceTerminate"} + } + }, + "TerminateEnvironmentResources":{"type":"boolean"}, + "TimeFilterEnd":{"type":"timestamp"}, + "TimeFilterStart":{"type":"timestamp"}, + "Token":{"type":"string"}, + "TooManyApplicationVersionsException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "TooManyApplicationsException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"TooManyApplicationsException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "TooManyBucketsException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"TooManyBucketsException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "TooManyConfigurationTemplatesException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"TooManyConfigurationTemplatesException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "TooManyEnvironmentsException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"TooManyEnvironmentsException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Trigger":{ + "type":"structure", + "members":{ + "Name":{"shape":"ResourceId"} + } + }, + "TriggerList":{ + "type":"list", + "member":{"shape":"Trigger"} + }, + "UpdateApplicationMessage":{ + "type":"structure", + "required":["ApplicationName"], + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "Description":{"shape":"Description"} + } + }, + "UpdateApplicationVersionMessage":{ + "type":"structure", + "required":[ + "ApplicationName", + "VersionLabel" + ], + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "VersionLabel":{"shape":"VersionLabel"}, + "Description":{"shape":"Description"} + } + }, + "UpdateConfigurationTemplateMessage":{ + "type":"structure", + "required":[ + "ApplicationName", + "TemplateName" + ], + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "TemplateName":{"shape":"ConfigurationTemplateName"}, + "Description":{"shape":"Description"}, + "OptionSettings":{"shape":"ConfigurationOptionSettingsList"}, + "OptionsToRemove":{"shape":"OptionsSpecifierList"} + } + }, + "UpdateDate":{"type":"timestamp"}, + "UpdateEnvironmentMessage":{ + "type":"structure", + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "EnvironmentId":{"shape":"EnvironmentId"}, + "EnvironmentName":{"shape":"EnvironmentName"}, + "GroupName":{"shape":"GroupName"}, + "Description":{"shape":"Description"}, + "Tier":{"shape":"EnvironmentTier"}, + "VersionLabel":{"shape":"VersionLabel"}, + "TemplateName":{"shape":"ConfigurationTemplateName"}, + "SolutionStackName":{"shape":"SolutionStackName"}, + "OptionSettings":{"shape":"ConfigurationOptionSettingsList"}, + "OptionsToRemove":{"shape":"OptionsSpecifierList"} + } + }, + "UserDefinedOption":{"type":"boolean"}, + "ValidateConfigurationSettingsMessage":{ + "type":"structure", + "required":[ + "ApplicationName", + "OptionSettings" + ], + "members":{ + "ApplicationName":{"shape":"ApplicationName"}, + "TemplateName":{"shape":"ConfigurationTemplateName"}, + "EnvironmentName":{"shape":"EnvironmentName"}, + "OptionSettings":{"shape":"ConfigurationOptionSettingsList"} + } + }, + "ValidationMessage":{ + "type":"structure", + "members":{ + "Message":{"shape":"ValidationMessageString"}, + "Severity":{"shape":"ValidationSeverity"}, + "Namespace":{"shape":"OptionNamespace"}, + "OptionName":{"shape":"ConfigurationOptionName"} + } + }, + "ValidationMessageString":{"type":"string"}, + "ValidationMessagesList":{ + "type":"list", + "member":{"shape":"ValidationMessage"} + }, + "ValidationSeverity":{ + "type":"string", + "enum":[ + "error", + "warning" + ] + }, + "VersionLabel":{ + "type":"string", + "max":100, + "min":1 + }, + "VersionLabels":{ + "type":"list", + "member":{"shape":"VersionLabel"} + }, + "VersionLabelsList":{ + "type":"list", + "member":{"shape":"VersionLabel"} + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1391 @@ +{ + "version": "2.0", + "service": "AWS Elastic Beanstalk

    This is the AWS Elastic Beanstalk API Reference. This guide provides detailed information about AWS Elastic Beanstalk actions, data types, parameters, and errors.

    AWS Elastic Beanstalk is a tool that makes it easy for you to create, deploy, and manage scalable, fault-tolerant applications running on Amazon Web Services cloud resources.

    For more information about this product, go to the AWS Elastic Beanstalk details page. The location of the latest AWS Elastic Beanstalk WSDL is http://elasticbeanstalk.s3.amazonaws.com/doc/2010-12-01/AWSElasticBeanstalk.wsdl. To install the Software Development Kits (SDKs), Integrated Development Environment (IDE) Toolkits, and command line tools that enable you to access the API, go to Tools for Amazon Web Services.

    Endpoints

    For a list of region-specific endpoints that AWS Elastic Beanstalk supports, go to Regions and Endpoints in the Amazon Web Services Glossary.

    ", + "operations": { + "AbortEnvironmentUpdate": "

    Cancels in-progress environment configuration update or application version deployment.

    ", + "CheckDNSAvailability": "

    Checks if the specified CNAME is available.

    ", + "ComposeEnvironments": "

    Create or update a group of environments that each run a separate component of a single application. Takes a list of version labels that specify application source bundles for each of the environments to create or update. The name of each environment and other required information must be included in the source bundles in an environment manifest named env.yaml. See Compose Environments for details.

    ", + "CreateApplication": "

    Creates an application that has one configuration template named default and no application versions.

    ", + "CreateApplicationVersion": "

    Creates an application version for the specified application.

    Once you create an application version with a specified Amazon S3 bucket and key location, you cannot change that Amazon S3 location. If you change the Amazon S3 location, you receive an exception when you attempt to launch an environment from the application version. ", + "CreateConfigurationTemplate": "

    Creates a configuration template. Templates are associated with a specific application and are used to deploy different versions of the application with the same configuration settings.

    Related Topics

    ", + "CreateEnvironment": "

    Launches an environment for the specified application using the specified configuration.

    ", + "CreateStorageLocation": "

    Creates the Amazon S3 storage location for the account.

    This location is used to store user log files.

    ", + "DeleteApplication": "

    Deletes the specified application along with all associated versions and configurations. The application versions will not be deleted from your Amazon S3 bucket.

    You cannot delete an application that has a running environment. ", + "DeleteApplicationVersion": "

    Deletes the specified version from the specified application.

    You cannot delete an application version that is associated with a running environment.", + "DeleteConfigurationTemplate": "

    Deletes the specified configuration template.

    When you launch an environment using a configuration template, the environment gets a copy of the template. You can delete or modify the environment's copy of the template without affecting the running environment.", + "DeleteEnvironmentConfiguration": "

    Deletes the draft configuration associated with the running environment.

    Updating a running environment with any configuration changes creates a draft configuration set. You can get the draft configuration using DescribeConfigurationSettings while the update is in progress or if the update fails. The DeploymentStatus for the draft configuration indicates whether the deployment is in process or has failed. The draft configuration remains in existence until it is deleted with this action.

    ", + "DescribeApplicationVersions": "

    Retrieve a list of application versions stored in your AWS Elastic Beanstalk storage bucket.

    ", + "DescribeApplications": "

    Returns the descriptions of existing applications.

    ", + "DescribeConfigurationOptions": "

    Describes the configuration options that are used in a particular configuration template or environment, or that a specified solution stack defines. The description includes the values the options, their default values, and an indication of the required action on a running environment if an option value is changed.

    ", + "DescribeConfigurationSettings": "

    Returns a description of the settings for the specified configuration set, that is, either a configuration template or the configuration set associated with a running environment.

    When describing the settings for the configuration set associated with a running environment, it is possible to receive two sets of setting descriptions. One is the deployed configuration set, and the other is a draft configuration of an environment that is either in the process of deployment or that failed to deploy.

    Related Topics

    ", + "DescribeEnvironmentHealth": "

    Returns information about the overall health of the specified environment. The DescribeEnvironmentHealth operation is only available with AWS Elastic Beanstalk Enhanced Health.

    ", + "DescribeEnvironmentResources": "

    Returns AWS resources for this environment.

    ", + "DescribeEnvironments": "

    Returns descriptions for existing environments.

    ", + "DescribeEvents": "

    Returns list of event descriptions matching criteria up to the last 6 weeks.

    This action returns the most recent 1,000 events from the specified NextToken. ", + "DescribeInstancesHealth": "

    Returns more detailed information about the health of the specified instances (for example, CPU utilization, load average, and causes). The DescribeInstancesHealth operation is only available with AWS Elastic Beanstalk Enhanced Health.

    ", + "ListAvailableSolutionStacks": "

    Returns a list of the available solution stack names.

    ", + "RebuildEnvironment": "

    Deletes and recreates all of the AWS resources (for example: the Auto Scaling group, load balancer, etc.) for a specified environment and forces a restart.

    ", + "RequestEnvironmentInfo": "

    Initiates a request to compile the specified type of information of the deployed environment.

    Setting the InfoType to tail compiles the last lines from the application server log files of every Amazon EC2 instance in your environment.

    Setting the InfoType to bundle compresses the application server log files for every Amazon EC2 instance into a .zip file. Legacy and .NET containers do not support bundle logs.

    Use RetrieveEnvironmentInfo to obtain the set of logs.

    Related Topics

    ", + "RestartAppServer": "

    Causes the environment to restart the application container server running on each Amazon EC2 instance.

    ", + "RetrieveEnvironmentInfo": "

    Retrieves the compiled information from a RequestEnvironmentInfo request.

    Related Topics

    ", + "SwapEnvironmentCNAMEs": "

    Swaps the CNAMEs of two environments.

    ", + "TerminateEnvironment": "

    Terminates the specified environment.

    ", + "UpdateApplication": "

    Updates the specified application to have the specified properties.

    If a property (for example, description) is not provided, the value remains unchanged. To clear these properties, specify an empty string. ", + "UpdateApplicationVersion": "

    Updates the specified application version to have the specified properties.

    If a property (for example, description) is not provided, the value remains unchanged. To clear properties, specify an empty string. ", + "UpdateConfigurationTemplate": "

    Updates the specified configuration template to have the specified properties or configuration option values.

    If a property (for example, ApplicationName) is not provided, its value remains unchanged. To clear such properties, specify an empty string.

    Related Topics

    ", + "UpdateEnvironment": "

    Updates the environment description, deploys a new application version, updates the configuration settings to an entirely new configuration template, or updates select configuration option values in the running environment.

    Attempting to update both the release and configuration is not allowed and AWS Elastic Beanstalk returns an InvalidParameterCombination error.

    When updating the configuration settings to a new template or individual settings, a draft configuration is created and DescribeConfigurationSettings for this environment returns two setting descriptions with different DeploymentStatus values.

    ", + "ValidateConfigurationSettings": "

    Takes a set of configuration settings and either a configuration template or environment, and determines whether those values are valid.

    This action returns a list of messages indicating any errors or warnings associated with the selection of option values.

    " + }, + "shapes": { + "AbortEnvironmentUpdateMessage": { + "base": "

    ", + "refs": { + } + }, + "AbortableOperationInProgress": { + "base": null, + "refs": { + "EnvironmentDescription$AbortableOperationInProgress": "

    Indicates if there is an in-progress environment configuration update or application version deployment that you can cancel.

    true: There is an update in progress.

    false: There are no updates currently in progress.

    " + } + }, + "ApplicationDescription": { + "base": "

    Describes the properties of an application.

    ", + "refs": { + "ApplicationDescriptionList$member": null, + "ApplicationDescriptionMessage$Application": "

    The ApplicationDescription of the application.

    " + } + }, + "ApplicationDescriptionList": { + "base": null, + "refs": { + "ApplicationDescriptionsMessage$Applications": "

    This parameter contains a list of ApplicationDescription.

    " + } + }, + "ApplicationDescriptionMessage": { + "base": "

    Result message containing a single description of an application.

    ", + "refs": { + } + }, + "ApplicationDescriptionsMessage": { + "base": "

    Result message containing a list of application descriptions.

    ", + "refs": { + } + }, + "ApplicationMetrics": { + "base": "

    Represents the application metrics for a specified environment.

    ", + "refs": { + "DescribeEnvironmentHealthResult$ApplicationMetrics": null, + "SingleInstanceHealth$ApplicationMetrics": null + } + }, + "ApplicationName": { + "base": null, + "refs": { + "ApplicationDescription$ApplicationName": "

    The name of the application.

    ", + "ApplicationNamesList$member": null, + "ApplicationVersionDescription$ApplicationName": "

    The name of the application associated with this release.

    ", + "ComposeEnvironmentsMessage$ApplicationName": "

    The name of the application to which the specified source bundles belong.

    ", + "ConfigurationSettingsDescription$ApplicationName": "

    The name of the application associated with this configuration set.

    ", + "CreateApplicationMessage$ApplicationName": "

    The name of the application.

    Constraint: This name must be unique within your account. If the specified name already exists, the action returns an InvalidParameterValue error.

    ", + "CreateApplicationVersionMessage$ApplicationName": "

    The name of the application. If no application is found with this name, and AutoCreateApplication is false, returns an InvalidParameterValue error.

    ", + "CreateConfigurationTemplateMessage$ApplicationName": "

    The name of the application to associate with this configuration template. If no application is found with this name, AWS Elastic Beanstalk returns an InvalidParameterValue error.

    ", + "CreateEnvironmentMessage$ApplicationName": "

    The name of the application that contains the version to be deployed.

    If no application is found with this name, CreateEnvironment returns an InvalidParameterValue error.

    ", + "DeleteApplicationMessage$ApplicationName": "

    The name of the application to delete.

    ", + "DeleteApplicationVersionMessage$ApplicationName": "

    The name of the application to delete releases from.

    ", + "DeleteConfigurationTemplateMessage$ApplicationName": "

    The name of the application to delete the configuration template from.

    ", + "DeleteEnvironmentConfigurationMessage$ApplicationName": "

    The name of the application the environment is associated with.

    ", + "DescribeApplicationVersionsMessage$ApplicationName": "

    If specified, AWS Elastic Beanstalk restricts the returned descriptions to only include ones that are associated with the specified application.

    ", + "DescribeConfigurationOptionsMessage$ApplicationName": "

    The name of the application associated with the configuration template or environment. Only needed if you want to describe the configuration options associated with either the configuration template or environment.

    ", + "DescribeConfigurationSettingsMessage$ApplicationName": "

    The application for the environment or configuration template.

    ", + "DescribeEnvironmentsMessage$ApplicationName": "

    If specified, AWS Elastic Beanstalk restricts the returned descriptions to include only those that are associated with this application.

    ", + "DescribeEventsMessage$ApplicationName": "

    If specified, AWS Elastic Beanstalk restricts the returned descriptions to include only those associated with this application.

    ", + "EnvironmentDescription$ApplicationName": "

    The name of the application associated with this environment.

    ", + "EventDescription$ApplicationName": "

    The application associated with the event.

    ", + "SourceConfiguration$ApplicationName": "

    The name of the application associated with the configuration.

    ", + "UpdateApplicationMessage$ApplicationName": "

    The name of the application to update. If no such application is found, UpdateApplication returns an InvalidParameterValue error.

    ", + "UpdateApplicationVersionMessage$ApplicationName": "

    The name of the application associated with this version.

    If no application is found with this name, UpdateApplication returns an InvalidParameterValue error.

    ", + "UpdateConfigurationTemplateMessage$ApplicationName": "

    The name of the application associated with the configuration template to update.

    If no application is found with this name, UpdateConfigurationTemplate returns an InvalidParameterValue error.

    ", + "UpdateEnvironmentMessage$ApplicationName": "

    The name of the application with which the environment is associated.

    ", + "ValidateConfigurationSettingsMessage$ApplicationName": "

    The name of the application that the configuration template or environment belongs to.

    " + } + }, + "ApplicationNamesList": { + "base": null, + "refs": { + "DescribeApplicationsMessage$ApplicationNames": "

    If specified, AWS Elastic Beanstalk restricts the returned descriptions to only include those with the specified names.

    " + } + }, + "ApplicationVersionDescription": { + "base": "

    Describes the properties of an application version.

    ", + "refs": { + "ApplicationVersionDescriptionList$member": null, + "ApplicationVersionDescriptionMessage$ApplicationVersion": "

    The ApplicationVersionDescription of the application version.

    " + } + }, + "ApplicationVersionDescriptionList": { + "base": null, + "refs": { + "ApplicationVersionDescriptionsMessage$ApplicationVersions": "

    List of ApplicationVersionDescription objects sorted by order of creation.

    " + } + }, + "ApplicationVersionDescriptionMessage": { + "base": "

    Result message wrapping a single description of an application version.

    ", + "refs": { + } + }, + "ApplicationVersionDescriptionsMessage": { + "base": "

    Result message wrapping a list of application version descriptions.

    ", + "refs": { + } + }, + "ApplicationVersionProccess": { + "base": null, + "refs": { + "CreateApplicationVersionMessage$Process": "

    Preprocesses and validates the environment manifest and configuration files in the source bundle. Validating configuration files can identify issues prior to deploying the application version to an environment.

    " + } + }, + "ApplicationVersionStatus": { + "base": null, + "refs": { + "ApplicationVersionDescription$Status": "

    The processing status of the application version.

    " + } + }, + "AutoCreateApplication": { + "base": null, + "refs": { + "CreateApplicationVersionMessage$AutoCreateApplication": "

    Determines how the system behaves if the specified application for this version does not already exist:

    • true : Automatically creates the specified application for this release if it does not already exist.
    • false : Throws an InvalidParameterValue if the specified application for this release does not already exist.

    Default: false

    Valid Values: true | false

    " + } + }, + "AutoScalingGroup": { + "base": "

    Describes an Auto Scaling launch configuration.

    ", + "refs": { + "AutoScalingGroupList$member": null + } + }, + "AutoScalingGroupList": { + "base": null, + "refs": { + "EnvironmentResourceDescription$AutoScalingGroups": "

    The AutoScalingGroups used by this environment.

    " + } + }, + "AvailableSolutionStackDetailsList": { + "base": null, + "refs": { + "ListAvailableSolutionStacksResultMessage$SolutionStackDetails": "

    A list of available solution stacks and their SolutionStackDescription.

    " + } + }, + "AvailableSolutionStackNamesList": { + "base": null, + "refs": { + "ListAvailableSolutionStacksResultMessage$SolutionStacks": "

    A list of available solution stacks.

    " + } + }, + "CPUUtilization": { + "base": "

    Represents CPU utilization information from the specified instance that belongs to the AWS Elastic Beanstalk environment. Use the instanceId property to specify the application instance for which you'd like to return data.

    ", + "refs": { + "SystemStatus$CPUUtilization": null + } + }, + "Cause": { + "base": null, + "refs": { + "Causes$member": null + } + }, + "Causes": { + "base": null, + "refs": { + "DescribeEnvironmentHealthResult$Causes": "

    Returns potential causes for the reported status.

    ", + "SingleInstanceHealth$Causes": "

    Represents the causes, which provide more information about the current health status.

    " + } + }, + "CheckDNSAvailabilityMessage": { + "base": "

    Results message indicating whether a CNAME is available.

    ", + "refs": { + } + }, + "CheckDNSAvailabilityResultMessage": { + "base": "

    Indicates if the specified CNAME is available.

    ", + "refs": { + } + }, + "CnameAvailability": { + "base": null, + "refs": { + "CheckDNSAvailabilityResultMessage$Available": "

    Indicates if the specified CNAME is available:

    • true : The CNAME is available.
    • false : The CNAME is not available.
    " + } + }, + "ComposeEnvironmentsMessage": { + "base": "

    ", + "refs": { + } + }, + "ConfigurationDeploymentStatus": { + "base": null, + "refs": { + "ConfigurationSettingsDescription$DeploymentStatus": "

    If this configuration set is associated with an environment, the DeploymentStatus parameter indicates the deployment status of this configuration set:

    • null: This configuration is not associated with a running environment.
    • pending: This is a draft configuration that is not deployed to the associated environment but is in the process of deploying.
    • deployed: This is the configuration that is currently deployed to the associated running environment.
    • failed: This is a draft configuration that failed to successfully deploy.
    " + } + }, + "ConfigurationOptionDefaultValue": { + "base": null, + "refs": { + "ConfigurationOptionDescription$DefaultValue": "

    The default value for this configuration option.

    " + } + }, + "ConfigurationOptionDescription": { + "base": "

    Describes the possible values for a configuration option.

    ", + "refs": { + "ConfigurationOptionDescriptionsList$member": null + } + }, + "ConfigurationOptionDescriptionsList": { + "base": null, + "refs": { + "ConfigurationOptionsDescription$Options": "

    A list of ConfigurationOptionDescription.

    " + } + }, + "ConfigurationOptionName": { + "base": null, + "refs": { + "ConfigurationOptionDescription$Name": "

    The name of the configuration option.

    ", + "ConfigurationOptionSetting$OptionName": "

    The name of the configuration option.

    ", + "OptionSpecification$OptionName": "

    The name of the configuration option.

    ", + "ValidationMessage$OptionName": "

    " + } + }, + "ConfigurationOptionPossibleValue": { + "base": null, + "refs": { + "ConfigurationOptionPossibleValues$member": null + } + }, + "ConfigurationOptionPossibleValues": { + "base": null, + "refs": { + "ConfigurationOptionDescription$ValueOptions": "

    If specified, values for the configuration option are selected from this list.

    " + } + }, + "ConfigurationOptionSetting": { + "base": "

    A specification identifying an individual configuration option along with its current value. For a list of possible option values, go to Option Values in the AWS Elastic Beanstalk Developer Guide.

    ", + "refs": { + "ConfigurationOptionSettingsList$member": null + } + }, + "ConfigurationOptionSettingsList": { + "base": null, + "refs": { + "ConfigurationSettingsDescription$OptionSettings": "

    A list of the configuration options and their values in this configuration set.

    ", + "CreateConfigurationTemplateMessage$OptionSettings": "

    If specified, AWS Elastic Beanstalk sets the specified configuration option to the requested value. The new value overrides the value obtained from the solution stack or the source configuration template.

    ", + "CreateEnvironmentMessage$OptionSettings": "

    If specified, AWS Elastic Beanstalk sets the specified configuration options to the requested value in the configuration set for the new environment. These override the values obtained from the solution stack or the configuration template.

    ", + "UpdateConfigurationTemplateMessage$OptionSettings": "

    A list of configuration option settings to update with the new specified option value.

    ", + "UpdateEnvironmentMessage$OptionSettings": "

    If specified, AWS Elastic Beanstalk updates the configuration set associated with the running environment and sets the specified configuration options to the requested value.

    ", + "ValidateConfigurationSettingsMessage$OptionSettings": "

    A list of the options and desired values to evaluate.

    " + } + }, + "ConfigurationOptionSeverity": { + "base": null, + "refs": { + "ConfigurationOptionDescription$ChangeSeverity": "

    An indication of which action is required if the value for this configuration option changes:

    • NoInterruption : There is no interruption to the environment or application availability.
    • RestartEnvironment : The environment is entirely restarted, all AWS resources are deleted and recreated, and the environment is unavailable during the process.
    • RestartApplicationServer : The environment is available the entire time. However, a short application outage occurs when the application servers on the running Amazon EC2 instances are restarted.
    " + } + }, + "ConfigurationOptionValue": { + "base": null, + "refs": { + "ConfigurationOptionSetting$Value": "

    The current value for the configuration option.

    " + } + }, + "ConfigurationOptionValueType": { + "base": null, + "refs": { + "ConfigurationOptionDescription$ValueType": "

    An indication of which type of values this option has and whether it is allowable to select one or more than one of the possible values:

    • Scalar : Values for this option are a single selection from the possible values, or an unformatted string, or numeric value governed by the MIN/MAX/Regex constraints.
    • List : Values for this option are multiple selections from the possible values.
    • Boolean : Values for this option are either true or false .
    • Json : Values for this option are a JSON representation of a ConfigDocument.
    " + } + }, + "ConfigurationOptionsDescription": { + "base": "

    Describes the settings for a specified configuration set.

    ", + "refs": { + } + }, + "ConfigurationSettingsDescription": { + "base": "

    Describes the settings for a configuration set.

    ", + "refs": { + "ConfigurationSettingsDescriptionList$member": null + } + }, + "ConfigurationSettingsDescriptionList": { + "base": null, + "refs": { + "ConfigurationSettingsDescriptions$ConfigurationSettings": "

    A list of ConfigurationSettingsDescription.

    " + } + }, + "ConfigurationSettingsDescriptions": { + "base": "

    The results from a request to change the configuration settings of an environment.

    ", + "refs": { + } + }, + "ConfigurationSettingsValidationMessages": { + "base": "

    Provides a list of validation messages.

    ", + "refs": { + } + }, + "ConfigurationTemplateName": { + "base": null, + "refs": { + "ConfigurationSettingsDescription$TemplateName": "

    If not null, the name of the configuration template for this configuration set.

    ", + "ConfigurationTemplateNamesList$member": null, + "CreateConfigurationTemplateMessage$TemplateName": "

    The name of the configuration template.

    Constraint: This name must be unique per application.

    Default: If a configuration template already exists with this name, AWS Elastic Beanstalk returns an InvalidParameterValue error.

    ", + "CreateEnvironmentMessage$TemplateName": "

    The name of the configuration template to use in deployment. If no configuration template is found with this name, AWS Elastic Beanstalk returns an InvalidParameterValue error.

    Condition: You must specify either this parameter or a SolutionStackName, but not both. If you specify both, AWS Elastic Beanstalk returns an InvalidParameterCombination error. If you do not specify either, AWS Elastic Beanstalk returns a MissingRequiredParameter error.

    ", + "DeleteConfigurationTemplateMessage$TemplateName": "

    The name of the configuration template to delete.

    ", + "DescribeConfigurationOptionsMessage$TemplateName": "

    The name of the configuration template whose configuration options you want to describe.

    ", + "DescribeConfigurationSettingsMessage$TemplateName": "

    The name of the configuration template to describe.

    Conditional: You must specify either this parameter or an EnvironmentName, but not both. If you specify both, AWS Elastic Beanstalk returns an InvalidParameterCombination error. If you do not specify either, AWS Elastic Beanstalk returns a MissingRequiredParameter error.

    ", + "DescribeEventsMessage$TemplateName": "

    If specified, AWS Elastic Beanstalk restricts the returned descriptions to those that are associated with this environment configuration.

    ", + "EnvironmentDescription$TemplateName": "

    The name of the configuration template used to originally launch this environment.

    ", + "EventDescription$TemplateName": "

    The name of the configuration associated with this event.

    ", + "SourceConfiguration$TemplateName": "

    The name of the configuration template.

    ", + "UpdateConfigurationTemplateMessage$TemplateName": "

    The name of the configuration template to update.

    If no configuration template is found with this name, UpdateConfigurationTemplate returns an InvalidParameterValue error.

    ", + "UpdateEnvironmentMessage$TemplateName": "

    If this parameter is specified, AWS Elastic Beanstalk deploys this configuration template to the environment. If no such configuration template is found, AWS Elastic Beanstalk returns an InvalidParameterValue error.

    ", + "ValidateConfigurationSettingsMessage$TemplateName": "

    The name of the configuration template to validate the settings against.

    Condition: You cannot specify both this and an environment name.

    " + } + }, + "ConfigurationTemplateNamesList": { + "base": null, + "refs": { + "ApplicationDescription$ConfigurationTemplates": "

    The names of the configuration templates associated with this application.

    " + } + }, + "CreateApplicationMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateApplicationVersionMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateConfigurationTemplateMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateEnvironmentMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateStorageLocationResultMessage": { + "base": "

    Results of a CreateStorageLocationResult call.

    ", + "refs": { + } + }, + "CreationDate": { + "base": null, + "refs": { + "ApplicationDescription$DateCreated": "

    The date when the application was created.

    ", + "ApplicationVersionDescription$DateCreated": "

    The creation date of the application version.

    ", + "ConfigurationSettingsDescription$DateCreated": "

    The date (in UTC time) when this configuration set was created.

    ", + "EnvironmentDescription$DateCreated": "

    The creation date for this environment.

    " + } + }, + "DNSCname": { + "base": null, + "refs": { + "CheckDNSAvailabilityResultMessage$FullyQualifiedCNAME": "

    The fully qualified CNAME to reserve when CreateEnvironment is called with the provided prefix.

    ", + "EnvironmentDescription$CNAME": "

    The URL to the CNAME for this environment.

    " + } + }, + "DNSCnamePrefix": { + "base": null, + "refs": { + "CheckDNSAvailabilityMessage$CNAMEPrefix": "

    The prefix used when this CNAME is reserved.

    ", + "CreateEnvironmentMessage$CNAMEPrefix": "

    If specified, the environment attempts to use this value as the prefix for the CNAME. If not specified, the CNAME is generated automatically by appending a random alphanumeric string to the environment name.

    " + } + }, + "DeleteApplicationMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteApplicationVersionMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteConfigurationTemplateMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteEnvironmentConfigurationMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteSourceBundle": { + "base": null, + "refs": { + "DeleteApplicationVersionMessage$DeleteSourceBundle": "

    Indicates whether to delete the associated source bundle from Amazon S3:

    • true: An attempt is made to delete the associated Amazon S3 source bundle specified at time of creation.
    • false: No action is taken on the Amazon S3 source bundle specified at time of creation.

    Valid Values: true | false

    " + } + }, + "DescribeApplicationVersionsMessage": { + "base": "

    Result message containing a list of configuration descriptions.

    ", + "refs": { + } + }, + "DescribeApplicationsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeConfigurationOptionsMessage": { + "base": "

    Result message containig a list of application version descriptions.

    ", + "refs": { + } + }, + "DescribeConfigurationSettingsMessage": { + "base": "

    Result message containing all of the configuration settings for a specified solution stack or configuration template.

    ", + "refs": { + } + }, + "DescribeEnvironmentHealthRequest": { + "base": "

    See the example below to learn how to create a request body.

    ", + "refs": { + } + }, + "DescribeEnvironmentHealthResult": { + "base": "

    See the example below for a sample response.

    ", + "refs": { + } + }, + "DescribeEnvironmentResourcesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEnvironmentsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEventsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeInstancesHealthRequest": { + "base": "

    See the example below to learn how to create a request body.

    ", + "refs": { + } + }, + "DescribeInstancesHealthResult": { + "base": "

    See the example below for a sample response.

    ", + "refs": { + } + }, + "Description": { + "base": null, + "refs": { + "ApplicationDescription$Description": "

    User-defined description of the application.

    ", + "ApplicationVersionDescription$Description": "

    The description of this application version.

    ", + "ConfigurationSettingsDescription$Description": "

    Describes this configuration set.

    ", + "CreateApplicationMessage$Description": "

    Describes the application.

    ", + "CreateApplicationVersionMessage$Description": "

    Describes this version.

    ", + "CreateConfigurationTemplateMessage$Description": "

    Describes this configuration.

    ", + "CreateEnvironmentMessage$Description": "

    Describes this environment.

    ", + "EnvironmentDescription$Description": "

    Describes this environment.

    ", + "UpdateApplicationMessage$Description": "

    A new description for the application.

    Default: If not specified, AWS Elastic Beanstalk does not update the description.

    ", + "UpdateApplicationVersionMessage$Description": "

    A new description for this release.

    ", + "UpdateConfigurationTemplateMessage$Description": "

    A new description for the configuration.

    ", + "UpdateEnvironmentMessage$Description": "

    If this parameter is specified, AWS Elastic Beanstalk updates the description of this environment.

    " + } + }, + "Ec2InstanceId": { + "base": null, + "refs": { + "EnvironmentInfoDescription$Ec2InstanceId": "

    The Amazon EC2 Instance ID for this information.

    " + } + }, + "ElasticBeanstalkServiceException": { + "base": "

    A generic service exception has occurred.

    ", + "refs": { + } + }, + "EndpointURL": { + "base": null, + "refs": { + "EnvironmentDescription$EndpointURL": "

    For load-balanced, autoscaling environments, the URL to the LoadBalancer. For single-instance environments, the IP address of the instance.

    " + } + }, + "EnvironmentDescription": { + "base": "

    Describes the properties of an environment.

    ", + "refs": { + "EnvironmentDescriptionsList$member": null + } + }, + "EnvironmentDescriptionsList": { + "base": null, + "refs": { + "EnvironmentDescriptionsMessage$Environments": "

    Returns an EnvironmentDescription list.

    " + } + }, + "EnvironmentDescriptionsMessage": { + "base": "

    Result message containing a list of environment descriptions.

    ", + "refs": { + } + }, + "EnvironmentHealth": { + "base": null, + "refs": { + "DescribeEnvironmentHealthResult$Status": "

    Returns the health status value of the environment. For more information, see Health Colors and Statuses.

    ", + "EnvironmentDescription$Health": "

    Describes the health status of the environment. AWS Elastic Beanstalk indicates the failure levels for a running environment:

    • Red: Indicates the environment is not responsive. Occurs when three or more consecutive failures occur for an environment.
    • Yellow: Indicates that something is wrong. Occurs when two consecutive failures occur for an environment.
    • Green: Indicates the environment is healthy and fully functional.
    • Grey: Default health for a new environment. The environment is not fully launched and health checks have not started or health checks are suspended during an UpdateEnvironment or RestartEnvironement request.

    Default: Grey

    " + } + }, + "EnvironmentHealthAttribute": { + "base": null, + "refs": { + "EnvironmentHealthAttributes$member": null + } + }, + "EnvironmentHealthAttributes": { + "base": null, + "refs": { + "DescribeEnvironmentHealthRequest$AttributeNames": "

    Specifies the response elements you wish to receive. If no attribute names are specified, AWS Elastic Beanstalk only returns the name of the environment.

    " + } + }, + "EnvironmentHealthStatus": { + "base": null, + "refs": { + "EnvironmentDescription$HealthStatus": "

    Returns the health status of the application running in your environment. For more information, see Health Colors and Statuses.

    " + } + }, + "EnvironmentId": { + "base": null, + "refs": { + "AbortEnvironmentUpdateMessage$EnvironmentId": "

    This specifies the ID of the environment with the in-progress update that you want to cancel.

    ", + "CreateConfigurationTemplateMessage$EnvironmentId": "

    The ID of the environment used with this configuration template.

    ", + "DescribeEnvironmentHealthRequest$EnvironmentId": "

    Specifies the AWS Elastic Beanstalk environment ID.

    Condition: You must specify either this or an EnvironmentName, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "DescribeEnvironmentResourcesMessage$EnvironmentId": "

    The ID of the environment to retrieve AWS resource usage data.

    Condition: You must specify either this or an EnvironmentName, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "DescribeEventsMessage$EnvironmentId": "

    If specified, AWS Elastic Beanstalk restricts the returned descriptions to those associated with this environment.

    ", + "DescribeInstancesHealthRequest$EnvironmentId": "

    Specifies the AWS Elastic Beanstalk environment ID.

    ", + "EnvironmentDescription$EnvironmentId": "

    The ID of this environment.

    ", + "EnvironmentIdList$member": null, + "RebuildEnvironmentMessage$EnvironmentId": "

    The ID of the environment to rebuild.

    Condition: You must specify either this or an EnvironmentName, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "RequestEnvironmentInfoMessage$EnvironmentId": "

    The ID of the environment of the requested data.

    If no such environment is found, RequestEnvironmentInfo returns an InvalidParameterValue error.

    Condition: You must specify either this or an EnvironmentName, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "RestartAppServerMessage$EnvironmentId": "

    The ID of the environment to restart the server for.

    Condition: You must specify either this or an EnvironmentName, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "RetrieveEnvironmentInfoMessage$EnvironmentId": "

    The ID of the data's environment.

    If no such environment is found, returns an InvalidParameterValue error.

    Condition: You must specify either this or an EnvironmentName, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "SwapEnvironmentCNAMEsMessage$SourceEnvironmentId": "

    The ID of the source environment.

    Condition: You must specify at least the SourceEnvironmentID or the SourceEnvironmentName. You may also specify both. If you specify the SourceEnvironmentId, you must specify the DestinationEnvironmentId.

    ", + "SwapEnvironmentCNAMEsMessage$DestinationEnvironmentId": "

    The ID of the destination environment.

    Condition: You must specify at least the DestinationEnvironmentID or the DestinationEnvironmentName. You may also specify both. You must specify the SourceEnvironmentId with the DestinationEnvironmentId.

    ", + "TerminateEnvironmentMessage$EnvironmentId": "

    The ID of the environment to terminate.

    Condition: You must specify either this or an EnvironmentName, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "UpdateEnvironmentMessage$EnvironmentId": "

    The ID of the environment to update.

    If no environment with this ID exists, AWS Elastic Beanstalk returns an InvalidParameterValue error.

    Condition: You must specify either this or an EnvironmentName, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    " + } + }, + "EnvironmentIdList": { + "base": null, + "refs": { + "DescribeEnvironmentsMessage$EnvironmentIds": "

    If specified, AWS Elastic Beanstalk restricts the returned descriptions to include only those that have the specified IDs.

    " + } + }, + "EnvironmentInfoDescription": { + "base": "

    The information retrieved from the Amazon EC2 instances.

    ", + "refs": { + "EnvironmentInfoDescriptionList$member": null + } + }, + "EnvironmentInfoDescriptionList": { + "base": null, + "refs": { + "RetrieveEnvironmentInfoResultMessage$EnvironmentInfo": "

    The EnvironmentInfoDescription of the environment.

    " + } + }, + "EnvironmentInfoType": { + "base": null, + "refs": { + "EnvironmentInfoDescription$InfoType": "

    The type of information retrieved.

    ", + "RequestEnvironmentInfoMessage$InfoType": "

    The type of information to request.

    ", + "RetrieveEnvironmentInfoMessage$InfoType": "

    The type of information to retrieve.

    " + } + }, + "EnvironmentLink": { + "base": "

    A link to another environment, defined in the environment's manifest. Links provide connection information in system properties that can be used to connect to another environment in the same group. See Environment Manifest (env.yaml) for details.

    ", + "refs": { + "EnvironmentLinks$member": null + } + }, + "EnvironmentLinks": { + "base": null, + "refs": { + "EnvironmentDescription$EnvironmentLinks": "

    A list of links to other environments in the same group.

    " + } + }, + "EnvironmentName": { + "base": null, + "refs": { + "AbortEnvironmentUpdateMessage$EnvironmentName": "

    This specifies the name of the environment with the in-progress update that you want to cancel.

    ", + "ConfigurationSettingsDescription$EnvironmentName": "

    If not null, the name of the environment for this configuration set.

    ", + "CreateEnvironmentMessage$EnvironmentName": "

    A unique name for the deployment environment. Used in the application URL.

    Constraint: Must be from 4 to 23 characters in length. The name can contain only letters, numbers, and hyphens. It cannot start or end with a hyphen. This name must be unique in your account. If the specified name already exists, AWS Elastic Beanstalk returns an InvalidParameterValue error.

    Default: If the CNAME parameter is not specified, the environment name becomes part of the CNAME, and therefore part of the visible URL for your application.

    ", + "DeleteEnvironmentConfigurationMessage$EnvironmentName": "

    The name of the environment to delete the draft configuration from.

    ", + "DescribeConfigurationOptionsMessage$EnvironmentName": "

    The name of the environment whose configuration options you want to describe.

    ", + "DescribeConfigurationSettingsMessage$EnvironmentName": "

    The name of the environment to describe.

    Condition: You must specify either this or a TemplateName, but not both. If you specify both, AWS Elastic Beanstalk returns an InvalidParameterCombination error. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "DescribeEnvironmentHealthRequest$EnvironmentName": "

    Specifies the AWS Elastic Beanstalk environment name.

    Condition: You must specify either this or an EnvironmentId, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "DescribeEnvironmentHealthResult$EnvironmentName": "

    The AWS Elastic Beanstalk environment name.

    ", + "DescribeEnvironmentResourcesMessage$EnvironmentName": "

    The name of the environment to retrieve AWS resource usage data.

    Condition: You must specify either this or an EnvironmentId, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "DescribeEventsMessage$EnvironmentName": "

    If specified, AWS Elastic Beanstalk restricts the returned descriptions to those associated with this environment.

    ", + "DescribeInstancesHealthRequest$EnvironmentName": "

    Specifies the AWS Elastic Beanstalk environment name.

    ", + "EnvironmentDescription$EnvironmentName": "

    The name of this environment.

    ", + "EnvironmentNamesList$member": null, + "EnvironmentResourceDescription$EnvironmentName": "

    The name of the environment.

    ", + "EventDescription$EnvironmentName": "

    The name of the environment associated with this event.

    ", + "RebuildEnvironmentMessage$EnvironmentName": "

    The name of the environment to rebuild.

    Condition: You must specify either this or an EnvironmentId, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "RequestEnvironmentInfoMessage$EnvironmentName": "

    The name of the environment of the requested data.

    If no such environment is found, RequestEnvironmentInfo returns an InvalidParameterValue error.

    Condition: You must specify either this or an EnvironmentId, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "RestartAppServerMessage$EnvironmentName": "

    The name of the environment to restart the server for.

    Condition: You must specify either this or an EnvironmentId, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "RetrieveEnvironmentInfoMessage$EnvironmentName": "

    The name of the data's environment.

    If no such environment is found, returns an InvalidParameterValue error.

    Condition: You must specify either this or an EnvironmentId, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "SwapEnvironmentCNAMEsMessage$SourceEnvironmentName": "

    The name of the source environment.

    Condition: You must specify at least the SourceEnvironmentID or the SourceEnvironmentName. You may also specify both. If you specify the SourceEnvironmentName, you must specify the DestinationEnvironmentName.

    ", + "SwapEnvironmentCNAMEsMessage$DestinationEnvironmentName": "

    The name of the destination environment.

    Condition: You must specify at least the DestinationEnvironmentID or the DestinationEnvironmentName. You may also specify both. You must specify the SourceEnvironmentName with the DestinationEnvironmentName.

    ", + "TerminateEnvironmentMessage$EnvironmentName": "

    The name of the environment to terminate.

    Condition: You must specify either this or an EnvironmentId, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "UpdateEnvironmentMessage$EnvironmentName": "

    The name of the environment to update. If no environment with this name exists, AWS Elastic Beanstalk returns an InvalidParameterValue error.

    Condition: You must specify either this or an EnvironmentId, or both. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter error.

    ", + "ValidateConfigurationSettingsMessage$EnvironmentName": "

    The name of the environment to validate the settings against.

    Condition: You cannot specify both this and a configuration template name.

    " + } + }, + "EnvironmentNamesList": { + "base": null, + "refs": { + "DescribeEnvironmentsMessage$EnvironmentNames": "

    If specified, AWS Elastic Beanstalk restricts the returned descriptions to include only those that have the specified names.

    " + } + }, + "EnvironmentResourceDescription": { + "base": "

    Describes the AWS resources in use by this environment. This data is live.

    ", + "refs": { + "EnvironmentResourceDescriptionsMessage$EnvironmentResources": "

    A list of EnvironmentResourceDescription.

    " + } + }, + "EnvironmentResourceDescriptionsMessage": { + "base": "

    Result message containing a list of environment resource descriptions.

    ", + "refs": { + } + }, + "EnvironmentResourcesDescription": { + "base": "

    Describes the AWS resources in use by this environment. This data is not live data.

    ", + "refs": { + "EnvironmentDescription$Resources": "

    The description of the AWS resources used by this environment.

    " + } + }, + "EnvironmentStatus": { + "base": null, + "refs": { + "EnvironmentDescription$Status": "

    The current operational status of the environment:

    • Launching: Environment is in the process of initial deployment.
    • Updating: Environment is in the process of updating its configuration settings or application version.
    • Ready: Environment is available to have an action performed on it, such as update or terminate.
    • Terminating: Environment is in the shut-down process.
    • Terminated: Environment is not running.
    " + } + }, + "EnvironmentTier": { + "base": "

    Describes the properties of an environment tier

    ", + "refs": { + "CreateEnvironmentMessage$Tier": "

    This specifies the tier to use for creating this environment.

    ", + "EnvironmentDescription$Tier": "

    Describes the current tier of this environment.

    ", + "UpdateEnvironmentMessage$Tier": "

    This specifies the tier to use to update the environment.

    Condition: At this time, if you change the tier version, name, or type, AWS Elastic Beanstalk returns InvalidParameterValue error.

    " + } + }, + "EventDate": { + "base": null, + "refs": { + "EventDescription$EventDate": "

    The date when the event occurred.

    " + } + }, + "EventDescription": { + "base": "

    Describes an event.

    ", + "refs": { + "EventDescriptionList$member": null + } + }, + "EventDescriptionList": { + "base": null, + "refs": { + "EventDescriptionsMessage$Events": "

    A list of EventDescription.

    " + } + }, + "EventDescriptionsMessage": { + "base": "

    Result message wrapping a list of event descriptions.

    ", + "refs": { + } + }, + "EventMessage": { + "base": null, + "refs": { + "EventDescription$Message": "

    The event message.

    " + } + }, + "EventSeverity": { + "base": null, + "refs": { + "DescribeEventsMessage$Severity": "

    If specified, limits the events returned from this call to include only those with the specified severity or higher.

    ", + "EventDescription$Severity": "

    The severity level of this event.

    " + } + }, + "ExceptionMessage": { + "base": null, + "refs": { + "ElasticBeanstalkServiceException$message": "

    The exception error message.

    " + } + }, + "FileTypeExtension": { + "base": null, + "refs": { + "SolutionStackFileTypeList$member": null + } + }, + "ForceTerminate": { + "base": null, + "refs": { + "TerminateEnvironmentMessage$ForceTerminate": "

    Terminates the target environment even if another environment in the same group is dependent on it.

    " + } + }, + "GroupName": { + "base": null, + "refs": { + "ComposeEnvironmentsMessage$GroupName": "

    The name of the group to which the target environments belong. Specify a group name only if the environment name defined in each target environment's manifest ends with a + (plus) character. See Environment Manifest (env.yaml) for details.

    ", + "CreateEnvironmentMessage$GroupName": "

    The name of the group to which the target environment belongs. Specify a group name only if the environment's name is specified in an environment manifest and not with the environment name parameter. See Environment Manifest (env.yaml) for details.

    ", + "UpdateEnvironmentMessage$GroupName": "

    The name of the group to which the target environment belongs. Specify a group name only if the environment's name is specified in an environment manifest and not with the environment name or environment ID parameters. See Environment Manifest (env.yaml) for details.

    " + } + }, + "IncludeDeleted": { + "base": null, + "refs": { + "DescribeEnvironmentsMessage$IncludeDeleted": "

    Indicates whether to include deleted environments:

    true: Environments that have been deleted after IncludedDeletedBackTo are displayed.

    false: Do not include deleted environments.

    " + } + }, + "IncludeDeletedBackTo": { + "base": null, + "refs": { + "DescribeEnvironmentsMessage$IncludedDeletedBackTo": "

    If specified when IncludeDeleted is set to true, then environments deleted after this date are displayed.

    " + } + }, + "Instance": { + "base": "

    The description of an Amazon EC2 instance.

    ", + "refs": { + "InstanceList$member": null + } + }, + "InstanceHealthList": { + "base": null, + "refs": { + "DescribeInstancesHealthResult$InstanceHealthList": "

    Contains the response body with information about the health of the instance.

    " + } + }, + "InstanceHealthSummary": { + "base": "

    Represents summary information about the health of an instance. For more information, see Health Colors and Statuses.

    ", + "refs": { + "DescribeEnvironmentHealthResult$InstancesHealth": null + } + }, + "InstanceId": { + "base": null, + "refs": { + "SingleInstanceHealth$InstanceId": "

    The ID of the Amazon EC2 instance.

    " + } + }, + "InstanceList": { + "base": null, + "refs": { + "EnvironmentResourceDescription$Instances": "

    The Amazon EC2 instances used by this environment.

    " + } + }, + "InstancesHealthAttribute": { + "base": null, + "refs": { + "InstancesHealthAttributes$member": null + } + }, + "InstancesHealthAttributes": { + "base": null, + "refs": { + "DescribeInstancesHealthRequest$AttributeNames": "

    Specifies the response elements you wish to receive. If no attribute names are specified, AWS Elastic Beanstalk only returns a list of instances.

    " + } + }, + "InsufficientPrivilegesException": { + "base": "

    The specified account does not have sufficient privileges for one of more AWS services.

    ", + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "Listener$Port": "

    The port that is used by the Listener.

    " + } + }, + "InvalidRequestException": { + "base": "

    One or more input parameters is not valid. Please correct the input parameters and try the operation again.

    ", + "refs": { + } + }, + "Latency": { + "base": "

    Represents the average latency for the slowest X percent of requests over the last 10 seconds.

    ", + "refs": { + "ApplicationMetrics$Latency": "

    Represents the average latency for the slowest X percent of requests over the last 10 seconds. Latencies are in seconds with one milisecond resolution.

    " + } + }, + "LaunchConfiguration": { + "base": "

    Describes an Auto Scaling launch configuration.

    ", + "refs": { + "LaunchConfigurationList$member": null + } + }, + "LaunchConfigurationList": { + "base": null, + "refs": { + "EnvironmentResourceDescription$LaunchConfigurations": "

    The Auto Scaling launch configurations in use by this environment.

    " + } + }, + "LaunchedAt": { + "base": null, + "refs": { + "SingleInstanceHealth$LaunchedAt": "

    The time at which the EC2 instance was launched.

    " + } + }, + "ListAvailableSolutionStacksResultMessage": { + "base": "

    A list of available AWS Elastic Beanstalk solution stacks.

    ", + "refs": { + } + }, + "Listener": { + "base": "

    Describes the properties of a Listener for the LoadBalancer.

    ", + "refs": { + "LoadBalancerListenersDescription$member": null + } + }, + "LoadAverage": { + "base": null, + "refs": { + "SystemStatus$LoadAverage": "

    Load average in the last 1-minute and 5-minute periods. For more information, see Operating System Metrics.

    " + } + }, + "LoadAverageValue": { + "base": null, + "refs": { + "LoadAverage$member": null + } + }, + "LoadBalancer": { + "base": "

    Describes a LoadBalancer.

    ", + "refs": { + "LoadBalancerList$member": null + } + }, + "LoadBalancerDescription": { + "base": "

    Describes the details of a LoadBalancer.

    ", + "refs": { + "EnvironmentResourcesDescription$LoadBalancer": "

    Describes the LoadBalancer.

    " + } + }, + "LoadBalancerList": { + "base": null, + "refs": { + "EnvironmentResourceDescription$LoadBalancers": "

    The LoadBalancers in use by this environment.

    " + } + }, + "LoadBalancerListenersDescription": { + "base": null, + "refs": { + "LoadBalancerDescription$Listeners": "

    A list of Listeners used by the LoadBalancer.

    " + } + }, + "MaxRecords": { + "base": null, + "refs": { + "DescribeEventsMessage$MaxRecords": "

    Specifies the maximum number of events that can be returned, beginning with the most recent event.

    " + } + }, + "Message": { + "base": null, + "refs": { + "EnvironmentInfoDescription$Message": "

    The retrieved information.

    " + } + }, + "NextToken": { + "base": null, + "refs": { + "DescribeInstancesHealthRequest$NextToken": "

    Specifies the next token of the request.

    ", + "DescribeInstancesHealthResult$NextToken": "

    The next token.

    " + } + }, + "NullableDouble": { + "base": null, + "refs": { + "CPUUtilization$User": "

    Percentage of time that the CPU has spent in the User state over the last 10 seconds.

    ", + "CPUUtilization$Nice": "

    Percentage of time that the CPU has spent in the Nice state over the last 10 seconds.

    ", + "CPUUtilization$System": "

    Percentage of time that the CPU has spent in the System state over the last 10 seconds.

    ", + "CPUUtilization$Idle": "

    Percentage of time that the CPU has spent in the Idle state over the last 10 seconds.

    ", + "CPUUtilization$IOWait": "

    Percentage of time that the CPU has spent in the I/O Wait state over the last 10 seconds.

    ", + "CPUUtilization$IRQ": "

    Percentage of time that the CPU has spent in the IRQ state over the last 10 seconds.

    ", + "CPUUtilization$SoftIRQ": "

    Percentage of time that the CPU has spent in the SoftIRQ state over the last 10 seconds.

    ", + "Latency$P999": "

    The average latency for the slowest 0.1 percent of requests over the last 10 seconds.

    ", + "Latency$P99": "

    The average latency for the slowest 1 percent of requests over the last 10 seconds.

    ", + "Latency$P95": "

    The average latency for the slowest 5 percent of requests over the last 10 seconds.

    ", + "Latency$P90": "

    The average latency for the slowest 10 percent of requests over the last 10 seconds.

    ", + "Latency$P85": "

    The average latency for the slowest 15 percent of requests over the last 10 seconds.

    ", + "Latency$P75": "

    The average latency for the slowest 25 percent of requests over the last 10 seconds.

    ", + "Latency$P50": "

    The average latency for the slowest 50 percent of requests over the last 10 seconds.

    ", + "Latency$P10": "

    The average latency for the slowest 90 percent of requests over the last 10 seconds.

    " + } + }, + "NullableInteger": { + "base": null, + "refs": { + "ApplicationMetrics$Duration": "

    The amount of time that the metrics cover (usually 10 seconds). For example, you might have 5 requests (request_count) within the most recent time slice of 10 seconds (duration).

    ", + "InstanceHealthSummary$NoData": "

    Grey. AWS Elastic Beanstalk and the health agent are reporting no data on an instance.

    ", + "InstanceHealthSummary$Unknown": "

    Grey. AWS Elastic Beanstalk and the health agent are reporting an insufficient amount of data on an instance.

    ", + "InstanceHealthSummary$Pending": "

    Grey. An operation is in progress on an instance within the command timeout.

    ", + "InstanceHealthSummary$Ok": "

    Green. An instance is passing health checks and the health agent is not reporting any problems.

    ", + "InstanceHealthSummary$Info": "

    Green. An operation is in progress on an instance.

    ", + "InstanceHealthSummary$Warning": "

    Yellow. The health agent is reporting a moderate number of request failures or other issues for an instance or environment.

    ", + "InstanceHealthSummary$Degraded": "

    Red. The health agent is reporting a high number of request failures or other issues for an instance or environment.

    ", + "InstanceHealthSummary$Severe": "

    Red. The health agent is reporting a very high number of request failures or other issues for an instance or environment.

    ", + "StatusCodes$Status2xx": "

    The percentage of requests over the last 10 seconds that resulted in a 2xx (200, 201, etc.) status code.

    ", + "StatusCodes$Status3xx": "

    The percentage of requests over the last 10 seconds that resulted in a 3xx (300, 301, etc.) status code.

    ", + "StatusCodes$Status4xx": "

    The percentage of requests over the last 10 seconds that resulted in a 4xx (400, 401, etc.) status code.

    ", + "StatusCodes$Status5xx": "

    The percentage of requests over the last 10 seconds that resulted in a 5xx (500, 501, etc.) status code.

    " + } + }, + "OperationInProgressException": { + "base": "

    Unable to perform the specified operation because another operation that effects an element in this activity is already in progress.

    ", + "refs": { + } + }, + "OptionNamespace": { + "base": null, + "refs": { + "ConfigurationOptionDescription$Namespace": "

    A unique namespace identifying the option's associated AWS resource.

    ", + "ConfigurationOptionSetting$Namespace": "

    A unique namespace identifying the option's associated AWS resource.

    ", + "OptionSpecification$Namespace": "

    A unique namespace identifying the option's associated AWS resource.

    ", + "ValidationMessage$Namespace": "

    " + } + }, + "OptionRestrictionMaxLength": { + "base": null, + "refs": { + "ConfigurationOptionDescription$MaxLength": "

    If specified, the configuration option must be a string value no longer than this value.

    " + } + }, + "OptionRestrictionMaxValue": { + "base": null, + "refs": { + "ConfigurationOptionDescription$MaxValue": "

    If specified, the configuration option must be a numeric value less than this value.

    " + } + }, + "OptionRestrictionMinValue": { + "base": null, + "refs": { + "ConfigurationOptionDescription$MinValue": "

    If specified, the configuration option must be a numeric value greater than this value.

    " + } + }, + "OptionRestrictionRegex": { + "base": "

    A regular expression representing a restriction on a string configuration option value.

    ", + "refs": { + "ConfigurationOptionDescription$Regex": "

    If specified, the configuration option must be a string value that satisfies this regular expression.

    " + } + }, + "OptionSpecification": { + "base": "

    A specification identifying an individual configuration option.

    ", + "refs": { + "OptionsSpecifierList$member": null + } + }, + "OptionsSpecifierList": { + "base": null, + "refs": { + "CreateEnvironmentMessage$OptionsToRemove": "

    A list of custom user-defined configuration options to remove from the configuration set for this new environment.

    ", + "DescribeConfigurationOptionsMessage$Options": "

    If specified, restricts the descriptions to only the specified options.

    ", + "UpdateConfigurationTemplateMessage$OptionsToRemove": "

    A list of configuration options to remove from the configuration set.

    Constraint: You can remove only UserDefined configuration options.

    ", + "UpdateEnvironmentMessage$OptionsToRemove": "

    A list of custom user-defined configuration options to remove from the configuration set for this environment.

    " + } + }, + "Queue": { + "base": "

    Describes a queue.

    ", + "refs": { + "QueueList$member": null + } + }, + "QueueList": { + "base": null, + "refs": { + "EnvironmentResourceDescription$Queues": "

    The queues used by this environment.

    " + } + }, + "RebuildEnvironmentMessage": { + "base": "

    ", + "refs": { + } + }, + "RefreshedAt": { + "base": null, + "refs": { + "DescribeEnvironmentHealthResult$RefreshedAt": "

    The date and time the information was last refreshed.

    ", + "DescribeInstancesHealthResult$RefreshedAt": "

    The date and time the information was last refreshed.

    " + } + }, + "RegexLabel": { + "base": null, + "refs": { + "OptionRestrictionRegex$Label": "

    A unique name representing this regular expression.

    " + } + }, + "RegexPattern": { + "base": null, + "refs": { + "OptionRestrictionRegex$Pattern": "

    The regular expression pattern that a string configuration option value with this restriction must match.

    " + } + }, + "RequestCount": { + "base": null, + "refs": { + "ApplicationMetrics$RequestCount": "

    Average number of requests handled by the web server per second over the last 10 seconds.

    " + } + }, + "RequestEnvironmentInfoMessage": { + "base": "

    ", + "refs": { + } + }, + "RequestId": { + "base": null, + "refs": { + "DescribeEventsMessage$RequestId": "

    If specified, AWS Elastic Beanstalk restricts the described events to include only those associated with this request ID.

    ", + "EventDescription$RequestId": "

    The web service request ID for the activity of this event.

    " + } + }, + "ResourceId": { + "base": null, + "refs": { + "AutoScalingGroup$Name": "

    The name of the AutoScalingGroup .

    ", + "Instance$Id": "

    The ID of the Amazon EC2 instance.

    ", + "LaunchConfiguration$Name": "

    The name of the launch configuration.

    ", + "LoadBalancer$Name": "

    The name of the LoadBalancer.

    ", + "Trigger$Name": "

    The name of the trigger.

    " + } + }, + "ResourceName": { + "base": null, + "refs": { + "ConfigurationOptionSetting$ResourceName": "

    A unique resource name for a time-based scaling configuration option.

    ", + "OptionSpecification$ResourceName": "

    A unique resource name for a time-based scaling configuration option.

    " + } + }, + "RestartAppServerMessage": { + "base": "

    ", + "refs": { + } + }, + "RetrieveEnvironmentInfoMessage": { + "base": "

    ", + "refs": { + } + }, + "RetrieveEnvironmentInfoResultMessage": { + "base": "

    Result message containing a description of the requested environment info.

    ", + "refs": { + } + }, + "S3Bucket": { + "base": null, + "refs": { + "CreateStorageLocationResultMessage$S3Bucket": "

    The name of the Amazon S3 bucket created.

    ", + "S3Location$S3Bucket": "

    The Amazon S3 bucket where the data is located.

    " + } + }, + "S3Key": { + "base": null, + "refs": { + "S3Location$S3Key": "

    The Amazon S3 key where the data is located.

    " + } + }, + "S3Location": { + "base": "

    A specification of a location in Amazon S3.

    ", + "refs": { + "ApplicationVersionDescription$SourceBundle": "

    The location where the source bundle is located for this version.

    ", + "CreateApplicationVersionMessage$SourceBundle": "

    The Amazon S3 bucket and key that identify the location of the source bundle for this version.

    If data found at the Amazon S3 location exceeds the maximum allowed source bundle size, AWS Elastic Beanstalk returns an InvalidParameterValue error. The maximum size allowed is 512 MB.

    Default: If not specified, AWS Elastic Beanstalk uses a sample application. If only partially specified (for example, a bucket is provided but not the key) or if no data is found at the Amazon S3 location, AWS Elastic Beanstalk returns an InvalidParameterCombination error.

    " + } + }, + "S3LocationNotInServiceRegionException": { + "base": "

    The specified S3 bucket does not belong to the S3 region in which the service is running.

    ", + "refs": { + } + }, + "S3SubscriptionRequiredException": { + "base": "

    The specified account does not have a subscription to Amazon S3.

    ", + "refs": { + } + }, + "SampleTimestamp": { + "base": null, + "refs": { + "EnvironmentInfoDescription$SampleTimestamp": "

    The time stamp when this information was retrieved.

    " + } + }, + "SingleInstanceHealth": { + "base": "

    Represents health information from the specified instance that belongs to the AWS Elastic Beanstalk environment. Use the InstanceId property to specify the application instance for which you'd like to return data.

    ", + "refs": { + "InstanceHealthList$member": null + } + }, + "SolutionStackDescription": { + "base": "

    Describes the solution stack.

    ", + "refs": { + "AvailableSolutionStackDetailsList$member": null + } + }, + "SolutionStackFileTypeList": { + "base": null, + "refs": { + "SolutionStackDescription$PermittedFileTypes": "

    The permitted file types allowed for a solution stack.

    " + } + }, + "SolutionStackName": { + "base": null, + "refs": { + "AvailableSolutionStackNamesList$member": null, + "ConfigurationOptionsDescription$SolutionStackName": "

    The name of the solution stack these configuration options belong to.

    ", + "ConfigurationSettingsDescription$SolutionStackName": "

    The name of the solution stack this configuration set uses.

    ", + "CreateConfigurationTemplateMessage$SolutionStackName": "

    The name of the solution stack used by this configuration. The solution stack specifies the operating system, architecture, and application server for a configuration template. It determines the set of configuration options as well as the possible and default values.

    Use ListAvailableSolutionStacks to obtain a list of available solution stacks.

    A solution stack name or a source configuration parameter must be specified, otherwise AWS Elastic Beanstalk returns an InvalidParameterValue error.

    If a solution stack name is not specified and the source configuration parameter is specified, AWS Elastic Beanstalk uses the same solution stack as the source configuration template.

    ", + "CreateEnvironmentMessage$SolutionStackName": "

    This is an alternative to specifying a template name. If specified, AWS Elastic Beanstalk sets the configuration values to the default values associated with the specified solution stack.

    Condition: You must specify either this or a TemplateName, but not both. If you specify both, AWS Elastic Beanstalk returns an InvalidParameterCombination error. If you do not specify either, AWS Elastic Beanstalk returns a MissingRequiredParameter error.

    ", + "DescribeConfigurationOptionsMessage$SolutionStackName": "

    The name of the solution stack whose configuration options you want to describe.

    ", + "EnvironmentDescription$SolutionStackName": "

    The name of the SolutionStack deployed with this environment.

    ", + "SolutionStackDescription$SolutionStackName": "

    The name of the solution stack.

    ", + "UpdateEnvironmentMessage$SolutionStackName": "

    This specifies the platform version that the environment will run after the environment is updated.

    " + } + }, + "SourceBundleDeletionException": { + "base": "

    Unable to delete the Amazon S3 source bundle associated with the application version. The application version was deleted successfully.

    ", + "refs": { + } + }, + "SourceConfiguration": { + "base": "

    A specification for an environment configuration

    ", + "refs": { + "CreateConfigurationTemplateMessage$SourceConfiguration": "

    If specified, AWS Elastic Beanstalk uses the configuration values from the specified configuration template to create a new configuration.

    Values specified in the OptionSettings parameter of this call overrides any values obtained from the SourceConfiguration.

    If no configuration template is found, returns an InvalidParameterValue error.

    Constraint: If both the solution stack name parameter and the source configuration parameters are specified, the solution stack of the source configuration template must match the specified solution stack name or else AWS Elastic Beanstalk returns an InvalidParameterCombination error.

    " + } + }, + "StatusCodes": { + "base": "

    Represents the percentage of requests over the last 10 seconds that resulted in each type of status code response. For more information, see Status Code Definitions.

    ", + "refs": { + "ApplicationMetrics$StatusCodes": "

    Represents the percentage of requests over the last 10 seconds that resulted in each type of status code response.

    " + } + }, + "String": { + "base": null, + "refs": { + "DescribeEnvironmentHealthResult$HealthStatus": "

    Contains the response body with information about the health of the environment.

    ", + "DescribeEnvironmentHealthResult$Color": "

    Returns the color indicator that tells you information about the health of the environment. For more information, see Health Colors and Statuses.

    ", + "EnvironmentLink$LinkName": "

    The name of the link.

    ", + "EnvironmentLink$EnvironmentName": "

    The name of the linked environment (the dependency).

    ", + "EnvironmentTier$Name": "

    The name of this environment tier.

    ", + "EnvironmentTier$Type": "

    The type of this environment tier.

    ", + "EnvironmentTier$Version": "

    The version of this environment tier.

    ", + "Listener$Protocol": "

    The protocol that is used by the Listener.

    ", + "LoadBalancerDescription$LoadBalancerName": "

    The name of the LoadBalancer.

    ", + "LoadBalancerDescription$Domain": "

    The domain name of the LoadBalancer.

    ", + "Queue$Name": "

    The name of the queue.

    ", + "Queue$URL": "

    The URL of the queue.

    ", + "SingleInstanceHealth$HealthStatus": "

    Returns the health status of the specified instance. For more information, see Health Colors and Statuses.

    ", + "SingleInstanceHealth$Color": "

    Represents the color indicator that gives you information about the health of the EC2 instance. For more information, see Health Colors and Statuses.

    " + } + }, + "SwapEnvironmentCNAMEsMessage": { + "base": "

    Swaps the CNAMEs of two environments.

    ", + "refs": { + } + }, + "SystemStatus": { + "base": "

    Represents CPU utilization and load average information for applications running in the specified environment.

    ", + "refs": { + "SingleInstanceHealth$System": null + } + }, + "Tag": { + "base": "

    Describes a tag applied to a resource in an environment.

    ", + "refs": { + "Tags$member": null + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": "

    The key of the tag.

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": "

    The value of the tag.

    " + } + }, + "Tags": { + "base": null, + "refs": { + "CreateEnvironmentMessage$Tags": "

    This specifies the tags applied to resources in the environment.

    " + } + }, + "TerminateEnvForce": { + "base": null, + "refs": { + "DeleteApplicationMessage$TerminateEnvByForce": "

    When set to true, running environments will be terminated before deleting the application.

    " + } + }, + "TerminateEnvironmentMessage": { + "base": "

    ", + "refs": { + } + }, + "TerminateEnvironmentResources": { + "base": null, + "refs": { + "TerminateEnvironmentMessage$TerminateResources": "

    Indicates whether the associated AWS resources should shut down when the environment is terminated:

    • true: The specified environment as well as the associated AWS resources, such as Auto Scaling group and LoadBalancer, are terminated.
    • false: AWS Elastic Beanstalk resource management is removed from the environment, but the AWS resources continue to operate.

    For more information, see the AWS Elastic Beanstalk User Guide.

    Default: true

    Valid Values: true | false

    " + } + }, + "TimeFilterEnd": { + "base": null, + "refs": { + "DescribeEventsMessage$EndTime": "

    If specified, AWS Elastic Beanstalk restricts the returned descriptions to those that occur up to, but not including, the EndTime.

    " + } + }, + "TimeFilterStart": { + "base": null, + "refs": { + "DescribeEventsMessage$StartTime": "

    If specified, AWS Elastic Beanstalk restricts the returned descriptions to those that occur on or after this time.

    " + } + }, + "Token": { + "base": null, + "refs": { + "DescribeEventsMessage$NextToken": "

    Pagination token. If specified, the events return the next batch of results.

    ", + "EventDescriptionsMessage$NextToken": "

    If returned, this indicates that there are more results to obtain. Use this token in the next DescribeEvents call to get the next batch of events.

    " + } + }, + "TooManyApplicationVersionsException": { + "base": "

    The specified account has reached its limit of application versions.

    ", + "refs": { + } + }, + "TooManyApplicationsException": { + "base": "

    The specified account has reached its limit of applications.

    ", + "refs": { + } + }, + "TooManyBucketsException": { + "base": "

    The specified account has reached its limit of Amazon S3 buckets.

    ", + "refs": { + } + }, + "TooManyConfigurationTemplatesException": { + "base": "

    The specified account has reached its limit of configuration templates.

    ", + "refs": { + } + }, + "TooManyEnvironmentsException": { + "base": "

    The specified account has reached its limit of environments.

    ", + "refs": { + } + }, + "Trigger": { + "base": "

    Describes a trigger.

    ", + "refs": { + "TriggerList$member": null + } + }, + "TriggerList": { + "base": null, + "refs": { + "EnvironmentResourceDescription$Triggers": "

    The AutoScaling triggers in use by this environment.

    " + } + }, + "UpdateApplicationMessage": { + "base": "

    ", + "refs": { + } + }, + "UpdateApplicationVersionMessage": { + "base": "

    ", + "refs": { + } + }, + "UpdateConfigurationTemplateMessage": { + "base": "

    The result message containing the options for the specified solution stack.

    ", + "refs": { + } + }, + "UpdateDate": { + "base": null, + "refs": { + "ApplicationDescription$DateUpdated": "

    The date when the application was last modified.

    ", + "ApplicationVersionDescription$DateUpdated": "

    The last modified date of the application version.

    ", + "ConfigurationSettingsDescription$DateUpdated": "

    The date (in UTC time) when this configuration set was last modified.

    ", + "EnvironmentDescription$DateUpdated": "

    The last modified date for this environment.

    " + } + }, + "UpdateEnvironmentMessage": { + "base": "

    ", + "refs": { + } + }, + "UserDefinedOption": { + "base": null, + "refs": { + "ConfigurationOptionDescription$UserDefined": "

    An indication of whether the user defined this configuration option:

    • true : This configuration option was defined by the user. It is a valid choice for specifying if this as an Option to Remove when updating configuration settings.

    • false : This configuration was not defined by the user.

    Constraint: You can remove only UserDefined options from a configuration.

    Valid Values: true | false

    " + } + }, + "ValidateConfigurationSettingsMessage": { + "base": "

    A list of validation messages for a specified configuration template.

    ", + "refs": { + } + }, + "ValidationMessage": { + "base": "

    An error or warning for a desired configuration option value.

    ", + "refs": { + "ValidationMessagesList$member": null + } + }, + "ValidationMessageString": { + "base": null, + "refs": { + "ValidationMessage$Message": "

    A message describing the error or warning.

    " + } + }, + "ValidationMessagesList": { + "base": null, + "refs": { + "ConfigurationSettingsValidationMessages$Messages": "

    A list of ValidationMessage.

    " + } + }, + "ValidationSeverity": { + "base": null, + "refs": { + "ValidationMessage$Severity": "

    An indication of the severity of this message:

    • error: This message indicates that this is not a valid setting for an option.
    • warning: This message is providing information you should take into account.
    " + } + }, + "VersionLabel": { + "base": null, + "refs": { + "ApplicationVersionDescription$VersionLabel": "

    A label uniquely identifying the version for the associated application.

    ", + "CreateApplicationVersionMessage$VersionLabel": "

    A label identifying this version.

    Constraint: Must be unique per application. If an application version already exists with this label for the specified application, AWS Elastic Beanstalk returns an InvalidParameterValue error.

    ", + "CreateEnvironmentMessage$VersionLabel": "

    The name of the application version to deploy.

    If the specified application has no associated application versions, AWS Elastic Beanstalk UpdateEnvironment returns an InvalidParameterValue error.

    Default: If not specified, AWS Elastic Beanstalk attempts to launch the sample application in the container.

    ", + "DeleteApplicationVersionMessage$VersionLabel": "

    The label of the version to delete.

    ", + "DescribeEnvironmentsMessage$VersionLabel": "

    If specified, AWS Elastic Beanstalk restricts the returned descriptions to include only those that are associated with this application version.

    ", + "DescribeEventsMessage$VersionLabel": "

    If specified, AWS Elastic Beanstalk restricts the returned descriptions to those associated with this application version.

    ", + "EnvironmentDescription$VersionLabel": "

    The application version deployed in this environment.

    ", + "EventDescription$VersionLabel": "

    The release label for the application version associated with this event.

    ", + "UpdateApplicationVersionMessage$VersionLabel": "

    The name of the version to update.

    If no application version is found with this label, UpdateApplication returns an InvalidParameterValue error.

    ", + "UpdateEnvironmentMessage$VersionLabel": "

    If this parameter is specified, AWS Elastic Beanstalk deploys the named application version to the environment. If no such application version is found, returns an InvalidParameterValue error.

    ", + "VersionLabels$member": null, + "VersionLabelsList$member": null + } + }, + "VersionLabels": { + "base": null, + "refs": { + "ComposeEnvironmentsMessage$VersionLabels": "

    A list of version labels, specifying one or more application source bundles that belong to the target application. Each source bundle must include an environment manifest that specifies the name of the environment and the name of the solution stack to use, and optionally can specify environment links to create.

    " + } + }, + "VersionLabelsList": { + "base": null, + "refs": { + "ApplicationDescription$Versions": "

    The names of the versions for this application.

    ", + "DescribeApplicationVersionsMessage$VersionLabels": "

    If specified, restricts the returned descriptions to only include ones that have the specified version labels.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticbeanstalk/2010-12-01/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,25 @@ +{ + "pagination": { + "DescribeApplicationVersions": { + "result_key": "ApplicationVersions" + }, + "DescribeApplications": { + "result_key": "Applications" + }, + "DescribeConfigurationOptions": { + "result_key": "Options" + }, + "DescribeEnvironments": { + "result_key": "Environments" + }, + "DescribeEvents": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxRecords", + "result_key": "Events" + }, + "ListAvailableSolutionStacks": { + "result_key": "SolutionStacks" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticfilesystem/2015-02-01/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticfilesystem/2015-02-01/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticfilesystem/2015-02-01/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticfilesystem/2015-02-01/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,903 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-02-01", + "endpointPrefix":"elasticfilesystem", + "serviceAbbreviation":"EFS", + "serviceFullName":"Amazon Elastic File System", + "signatureVersion":"v4", + "protocol":"rest-json" + }, + "operations":{ + "CreateFileSystem":{ + "name":"CreateFileSystem", + "http":{ + "method":"POST", + "requestUri":"/2015-02-01/file-systems", + "responseCode":201 + }, + "input":{"shape":"CreateFileSystemRequest"}, + "output":{"shape":"FileSystemDescription"}, + "errors":[ + { + "shape":"BadRequest", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"FileSystemAlreadyExists", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"FileSystemLimitExceeded", + "error":{"httpStatusCode":403}, + "exception":true + } + ] + }, + "CreateMountTarget":{ + "name":"CreateMountTarget", + "http":{ + "method":"POST", + "requestUri":"/2015-02-01/mount-targets", + "responseCode":200 + }, + "input":{"shape":"CreateMountTargetRequest"}, + "output":{"shape":"MountTargetDescription"}, + "errors":[ + { + "shape":"BadRequest", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"FileSystemNotFound", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"IncorrectFileSystemLifeCycleState", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"MountTargetConflict", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"SubnetNotFound", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"NoFreeAddressesInSubnet", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"IpAddressInUse", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"NetworkInterfaceLimitExceeded", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"SecurityGroupLimitExceeded", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"SecurityGroupNotFound", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"UnsupportedAvailabilityZone", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "CreateTags":{ + "name":"CreateTags", + "http":{ + "method":"POST", + "requestUri":"/2015-02-01/create-tags/{FileSystemId}", + "responseCode":204 + }, + "input":{"shape":"CreateTagsRequest"}, + "errors":[ + { + "shape":"BadRequest", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"FileSystemNotFound", + "error":{"httpStatusCode":404}, + "exception":true + } + ] + }, + "DeleteFileSystem":{ + "name":"DeleteFileSystem", + "http":{ + "method":"DELETE", + "requestUri":"/2015-02-01/file-systems/{FileSystemId}", + "responseCode":204 + }, + "input":{"shape":"DeleteFileSystemRequest"}, + "errors":[ + { + "shape":"BadRequest", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"FileSystemNotFound", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"FileSystemInUse", + "error":{"httpStatusCode":409}, + "exception":true + } + ] + }, + "DeleteMountTarget":{ + "name":"DeleteMountTarget", + "http":{ + "method":"DELETE", + "requestUri":"/2015-02-01/mount-targets/{MountTargetId}", + "responseCode":204 + }, + "input":{"shape":"DeleteMountTargetRequest"}, + "errors":[ + { + "shape":"BadRequest", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"DependencyTimeout", + "error":{"httpStatusCode":504}, + "exception":true + }, + { + "shape":"MountTargetNotFound", + "error":{"httpStatusCode":404}, + "exception":true + } + ] + }, + "DeleteTags":{ + "name":"DeleteTags", + "http":{ + "method":"POST", + "requestUri":"/2015-02-01/delete-tags/{FileSystemId}", + "responseCode":204 + }, + "input":{"shape":"DeleteTagsRequest"}, + "errors":[ + { + "shape":"BadRequest", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"FileSystemNotFound", + "error":{"httpStatusCode":404}, + "exception":true + } + ] + }, + "DescribeFileSystems":{ + "name":"DescribeFileSystems", + "http":{ + "method":"GET", + "requestUri":"/2015-02-01/file-systems", + "responseCode":200 + }, + "input":{"shape":"DescribeFileSystemsRequest"}, + "output":{"shape":"DescribeFileSystemsResponse"}, + "errors":[ + { + "shape":"BadRequest", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"FileSystemNotFound", + "error":{"httpStatusCode":404}, + "exception":true + } + ] + }, + "DescribeMountTargetSecurityGroups":{ + "name":"DescribeMountTargetSecurityGroups", + "http":{ + "method":"GET", + "requestUri":"/2015-02-01/mount-targets/{MountTargetId}/security-groups", + "responseCode":200 + }, + "input":{"shape":"DescribeMountTargetSecurityGroupsRequest"}, + "output":{"shape":"DescribeMountTargetSecurityGroupsResponse"}, + "errors":[ + { + "shape":"BadRequest", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"MountTargetNotFound", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"IncorrectMountTargetState", + "error":{"httpStatusCode":409}, + "exception":true + } + ] + }, + "DescribeMountTargets":{ + "name":"DescribeMountTargets", + "http":{ + "method":"GET", + "requestUri":"/2015-02-01/mount-targets", + "responseCode":200 + }, + "input":{"shape":"DescribeMountTargetsRequest"}, + "output":{"shape":"DescribeMountTargetsResponse"}, + "errors":[ + { + "shape":"BadRequest", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"FileSystemNotFound", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"MountTargetNotFound", + "error":{"httpStatusCode":404}, + "exception":true + } + ] + }, + "DescribeTags":{ + "name":"DescribeTags", + "http":{ + "method":"GET", + "requestUri":"/2015-02-01/tags/{FileSystemId}/", + "responseCode":200 + }, + "input":{"shape":"DescribeTagsRequest"}, + "output":{"shape":"DescribeTagsResponse"}, + "errors":[ + { + "shape":"BadRequest", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"FileSystemNotFound", + "error":{"httpStatusCode":404}, + "exception":true + } + ] + }, + "ModifyMountTargetSecurityGroups":{ + "name":"ModifyMountTargetSecurityGroups", + "http":{ + "method":"PUT", + "requestUri":"/2015-02-01/mount-targets/{MountTargetId}/security-groups", + "responseCode":204 + }, + "input":{"shape":"ModifyMountTargetSecurityGroupsRequest"}, + "errors":[ + { + "shape":"BadRequest", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"MountTargetNotFound", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"IncorrectMountTargetState", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"SecurityGroupLimitExceeded", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"SecurityGroupNotFound", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + } + }, + "shapes":{ + "AwsAccountId":{"type":"string"}, + "BadRequest":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "CreateFileSystemRequest":{ + "type":"structure", + "required":["CreationToken"], + "members":{ + "CreationToken":{"shape":"CreationToken"} + } + }, + "CreateMountTargetRequest":{ + "type":"structure", + "required":[ + "FileSystemId", + "SubnetId" + ], + "members":{ + "FileSystemId":{"shape":"FileSystemId"}, + "SubnetId":{"shape":"SubnetId"}, + "IpAddress":{"shape":"IpAddress"}, + "SecurityGroups":{"shape":"SecurityGroups"} + } + }, + "CreateTagsRequest":{ + "type":"structure", + "required":[ + "FileSystemId", + "Tags" + ], + "members":{ + "FileSystemId":{ + "shape":"FileSystemId", + "location":"uri", + "locationName":"FileSystemId" + }, + "Tags":{"shape":"Tags"} + } + }, + "CreationToken":{ + "type":"string", + "min":1, + "max":64 + }, + "DeleteFileSystemRequest":{ + "type":"structure", + "required":["FileSystemId"], + "members":{ + "FileSystemId":{ + "shape":"FileSystemId", + "location":"uri", + "locationName":"FileSystemId" + } + } + }, + "DeleteMountTargetRequest":{ + "type":"structure", + "required":["MountTargetId"], + "members":{ + "MountTargetId":{ + "shape":"MountTargetId", + "location":"uri", + "locationName":"MountTargetId" + } + } + }, + "DeleteTagsRequest":{ + "type":"structure", + "required":[ + "FileSystemId", + "TagKeys" + ], + "members":{ + "FileSystemId":{ + "shape":"FileSystemId", + "location":"uri", + "locationName":"FileSystemId" + }, + "TagKeys":{"shape":"TagKeys"} + } + }, + "DependencyTimeout":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":504}, + "exception":true + }, + "DescribeFileSystemsRequest":{ + "type":"structure", + "members":{ + "MaxItems":{ + "shape":"MaxItems", + "location":"querystring", + "locationName":"MaxItems" + }, + "Marker":{ + "shape":"Marker", + "location":"querystring", + "locationName":"Marker" + }, + "CreationToken":{ + "shape":"CreationToken", + "location":"querystring", + "locationName":"CreationToken" + }, + "FileSystemId":{ + "shape":"FileSystemId", + "location":"querystring", + "locationName":"FileSystemId" + } + } + }, + "DescribeFileSystemsResponse":{ + "type":"structure", + "members":{ + "Marker":{"shape":"Marker"}, + "FileSystems":{"shape":"FileSystemDescriptions"}, + "NextMarker":{"shape":"Marker"} + } + }, + "DescribeMountTargetSecurityGroupsRequest":{ + "type":"structure", + "required":["MountTargetId"], + "members":{ + "MountTargetId":{ + "shape":"MountTargetId", + "location":"uri", + "locationName":"MountTargetId" + } + } + }, + "DescribeMountTargetSecurityGroupsResponse":{ + "type":"structure", + "required":["SecurityGroups"], + "members":{ + "SecurityGroups":{"shape":"SecurityGroups"} + } + }, + "DescribeMountTargetsRequest":{ + "type":"structure", + "members":{ + "MaxItems":{ + "shape":"MaxItems", + "location":"querystring", + "locationName":"MaxItems" + }, + "Marker":{ + "shape":"Marker", + "location":"querystring", + "locationName":"Marker" + }, + "FileSystemId":{ + "shape":"FileSystemId", + "location":"querystring", + "locationName":"FileSystemId" + }, + "MountTargetId":{ + "shape":"MountTargetId", + "location":"querystring", + "locationName":"MountTargetId" + } + } + }, + "DescribeMountTargetsResponse":{ + "type":"structure", + "members":{ + "Marker":{"shape":"Marker"}, + "MountTargets":{"shape":"MountTargetDescriptions"}, + "NextMarker":{"shape":"Marker"} + } + }, + "DescribeTagsRequest":{ + "type":"structure", + "required":["FileSystemId"], + "members":{ + "MaxItems":{ + "shape":"MaxItems", + "location":"querystring", + "locationName":"MaxItems" + }, + "Marker":{ + "shape":"Marker", + "location":"querystring", + "locationName":"Marker" + }, + "FileSystemId":{ + "shape":"FileSystemId", + "location":"uri", + "locationName":"FileSystemId" + } + } + }, + "DescribeTagsResponse":{ + "type":"structure", + "required":["Tags"], + "members":{ + "Marker":{"shape":"Marker"}, + "Tags":{"shape":"Tags"}, + "NextMarker":{"shape":"Marker"} + } + }, + "ErrorCode":{ + "type":"string", + "min":1 + }, + "ErrorMessage":{"type":"string"}, + "FileSystemAlreadyExists":{ + "type":"structure", + "required":[ + "ErrorCode", + "FileSystemId" + ], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"}, + "FileSystemId":{"shape":"FileSystemId"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "FileSystemDescription":{ + "type":"structure", + "required":[ + "OwnerId", + "CreationToken", + "FileSystemId", + "CreationTime", + "LifeCycleState", + "NumberOfMountTargets", + "SizeInBytes" + ], + "members":{ + "OwnerId":{"shape":"AwsAccountId"}, + "CreationToken":{"shape":"CreationToken"}, + "FileSystemId":{"shape":"FileSystemId"}, + "CreationTime":{"shape":"Timestamp"}, + "LifeCycleState":{"shape":"LifeCycleState"}, + "Name":{"shape":"TagValue"}, + "NumberOfMountTargets":{"shape":"MountTargetCount"}, + "SizeInBytes":{"shape":"FileSystemSize"} + } + }, + "FileSystemDescriptions":{ + "type":"list", + "member":{"shape":"FileSystemDescription"} + }, + "FileSystemId":{"type":"string"}, + "FileSystemInUse":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "FileSystemLimitExceeded":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":403}, + "exception":true + }, + "FileSystemNotFound":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "FileSystemSize":{ + "type":"structure", + "required":["Value"], + "members":{ + "Value":{"shape":"FileSystemSizeValue"}, + "Timestamp":{"shape":"Timestamp"} + } + }, + "FileSystemSizeValue":{ + "type":"long", + "min":0 + }, + "IncorrectFileSystemLifeCycleState":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "IncorrectMountTargetState":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "InternalServerError":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":500}, + "exception":true + }, + "IpAddress":{"type":"string"}, + "IpAddressInUse":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "LifeCycleState":{ + "type":"string", + "enum":[ + "creating", + "available", + "deleting", + "deleted" + ] + }, + "Marker":{"type":"string"}, + "MaxItems":{ + "type":"integer", + "min":1 + }, + "ModifyMountTargetSecurityGroupsRequest":{ + "type":"structure", + "required":["MountTargetId"], + "members":{ + "MountTargetId":{ + "shape":"MountTargetId", + "location":"uri", + "locationName":"MountTargetId" + }, + "SecurityGroups":{"shape":"SecurityGroups"} + } + }, + "MountTargetConflict":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "MountTargetCount":{ + "type":"integer", + "min":0 + }, + "MountTargetDescription":{ + "type":"structure", + "required":[ + "MountTargetId", + "FileSystemId", + "SubnetId", + "LifeCycleState" + ], + "members":{ + "OwnerId":{"shape":"AwsAccountId"}, + "MountTargetId":{"shape":"MountTargetId"}, + "FileSystemId":{"shape":"FileSystemId"}, + "SubnetId":{"shape":"SubnetId"}, + "LifeCycleState":{"shape":"LifeCycleState"}, + "IpAddress":{"shape":"IpAddress"}, + "NetworkInterfaceId":{"shape":"NetworkInterfaceId"} + } + }, + "MountTargetDescriptions":{ + "type":"list", + "member":{"shape":"MountTargetDescription"} + }, + "MountTargetId":{"type":"string"}, + "MountTargetNotFound":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NetworkInterfaceId":{"type":"string"}, + "NetworkInterfaceLimitExceeded":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "NoFreeAddressesInSubnet":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "SecurityGroup":{"type":"string"}, + "SecurityGroupLimitExceeded":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "SecurityGroupNotFound":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "SecurityGroups":{ + "type":"list", + "member":{"shape":"SecurityGroup"}, + "max":5 + }, + "SubnetId":{"type":"string"}, + "SubnetNotFound":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{ + "type":"string", + "min":1, + "max":128 + }, + "TagKeys":{ + "type":"list", + "member":{"shape":"TagKey"} + }, + "TagValue":{ + "type":"string", + "max":256 + }, + "Tags":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "Timestamp":{"type":"timestamp"}, + "UnsupportedAvailabilityZone":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticfilesystem/2015-02-01/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticfilesystem/2015-02-01/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticfilesystem/2015-02-01/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticfilesystem/2015-02-01/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,415 @@ +{ + "version": "2.0", + "operations": { + "CreateFileSystem": "

    Creates a new, empty file system. The operation requires a creation token in the request that Amazon EFS uses to ensure idempotent creation (calling the operation with same creation token has no effect). If a file system does not currently exist that is owned by the caller's AWS account with the specified creation token, this operation does the following:

    • Creates a new, empty file system. The file system will have an Amazon EFS assigned ID, and an initial lifecycle state \"creating\".
    • Returns with the description of the created file system.

    Otherwise, this operation returns a FileSystemAlreadyExists error with the ID of the existing file system.

    For basic use cases, you can use a randomly generated UUID for the creation token.

    The idempotent operation allows you to retry a CreateFileSystem call without risk of creating an extra file system. This can happen when an initial call fails in a way that leaves it uncertain whether or not a file system was actually created. An example might be that a transport level timeout occurred or your connection was reset. As long as you use the same creation token, if the initial call had succeeded in creating a file system, the client can learn of its existence from the FileSystemAlreadyExists error.

    The CreateFileSystem call returns while the file system's lifecycle state is still \"creating\". You can check the file system creation status by calling the DescribeFileSystems API, which among other things returns the file system state.

    After the file system is fully created, Amazon EFS sets its lifecycle state to \"available\", at which point you can create one or more mount targets for the file system (CreateMountTarget) in your VPC. You mount your Amazon EFS file system on an EC2 instances in your VPC via the mount target. For more information, see Amazon EFS: How it Works

    This operation requires permission for the elasticfilesystem:CreateFileSystem action.

    ", + "CreateMountTarget": "

    Creates a mount target for a file system. You can then mount the file system on EC2 instances via the mount target.

    You can create one mount target in each Availability Zone in your VPC. All EC2 instances in a VPC within a given Availability Zone share a single mount target for a given file system. If you have multiple subnets in an Availability Zone, you create a mount target in one of the subnets. EC2 instances do not need to be in the same subnet as the mount target in order to access their file system. For more information, see Amazon EFS: How it Works.

    In the request, you also specify a file system ID for which you are creating the mount target and the file system's lifecycle state must be \"available\" (see DescribeFileSystems).

    In the request, you also provide a subnet ID, which serves several purposes:

    • It determines the VPC in which Amazon EFS creates the mount target.
    • It determines the Availability Zone in which Amazon EFS creates the mount target.
    • It determines the IP address range from which Amazon EFS selects the IP address of the mount target if you don't specify an IP address in the request.

    After creating the mount target, Amazon EFS returns a response that includes, a MountTargetId and an IpAddress. You use this IP address when mounting the file system in an EC2 instance. You can also use the mount target's DNS name when mounting the file system. The EC2 instance on which you mount the file system via the mount target can resolve the mount target's DNS name to its IP address. For more information, see How it Works: Implementation Overview.

    Note that you can create mount targets for a file system in only one VPC, and there can be only one mount target per Availability Zone. That is, if the file system already has one or more mount targets created for it, the request to add another mount target must meet the following requirements:

    • The subnet specified in the request must belong to the same VPC as the subnets of the existing mount targets.

    • The subnet specified in the request must not be in the same Availability Zone as any of the subnets of the existing mount targets.

    If the request satisfies the requirements, Amazon EFS does the following:

    • Creates a new mount target in the specified subnet.
    • Also creates a new network interface in the subnet as follows:
      • If the request provides an IpAddress, Amazon EFS assigns that IP address to the network interface. Otherwise, Amazon EFS assigns a free address in the subnet (in the same way that the Amazon EC2 CreateNetworkInterface call does when a request does not specify a primary private IP address).
      • If the request provides SecurityGroups, this network interface is associated with those security groups. Otherwise, it belongs to the default security group for the subnet's VPC.
      • Assigns the description \"Mount target fsmt-id for file system fs-id\" where fsmt-id is the mount target ID, and fs-id is the FileSystemId.
      • Sets the requesterManaged property of the network interface to \"true\", and the requesterId value to \"EFS\".

      Each Amazon EFS mount target has one corresponding requestor-managed EC2 network interface. After the network interface is created, Amazon EFS sets the NetworkInterfaceId field in the mount target's description to the network interface ID, and the IpAddress field to its address. If network interface creation fails, the entire CreateMountTarget operation fails.

    The CreateMountTarget call returns only after creating the network interface, but while the mount target state is still \"creating\". You can check the mount target creation status by calling the DescribeFileSystems API, which among other things returns the mount target state.

    We recommend you create a mount target in each of the Availability Zones. There are cost considerations for using a file system in an Availability Zone through a mount target created in another Availability Zone. For more information, go to Amazon EFS product detail page. In addition, by always using a mount target local to the instance's Availability Zone, you eliminate a partial failure scenario; if the Availability Zone in which your mount target is created goes down, then you won't be able to access your file system through that mount target.

    This operation requires permission for the following action on the file system:

    • elasticfilesystem:CreateMountTarget

    This operation also requires permission for the following Amazon EC2 actions:

    • ec2:DescribeSubnets
    • ec2:DescribeNetworkInterfaces
    • ec2:CreateNetworkInterface
    ", + "CreateTags": "

    Creates or overwrites tags associated with a file system. Each tag is a key-value pair. If a tag key specified in the request already exists on the file system, this operation overwrites its value with the value provided in the request. If you add the \"Name\" tag to your file system, Amazon EFS returns it in the response to the DescribeFileSystems API.

    This operation requires permission for the elasticfilesystem:CreateTags action.

    ", + "DeleteFileSystem": "

    Deletes a file system, permanently severing access to its contents. Upon return, the file system no longer exists and you will not be able to access any contents of the deleted file system.

    You cannot delete a file system that is in use. That is, if the file system has any mount targets, you must first delete them. For more information, see DescribeMountTargets and DeleteMountTarget.

    The DeleteFileSystem call returns while the file system state is still \"deleting\". You can check the file system deletion status by calling the DescribeFileSystems API, which returns a list of file systems in your account. If you pass file system ID or creation token for the deleted file system, the DescribeFileSystems will return a 404 \"FileSystemNotFound\" error.

    This operation requires permission for the elasticfilesystem:DeleteFileSystem action.

    ", + "DeleteMountTarget": "

    Deletes the specified mount target.

    This operation forcibly breaks any mounts of the file system via the mount target being deleted, which might disrupt instances or applications using those mounts. To avoid applications getting cut off abruptly, you might consider unmounting any mounts of the mount target, if feasible. The operation also deletes the associated network interface. Uncommitted writes may be lost, but breaking a mount target using this operation does not corrupt the file system itself. The file system you created remains. You can mount an EC2 instance in your VPC using another mount target.

    This operation requires permission for the following action on the file system:

    • elasticfilesystem:DeleteMountTarget
    The DeleteMountTarget call returns while the mount target state is still \"deleting\". You can check the mount target deletion by calling the DescribeMountTargets API, which returns a list of mount target descriptions for the given file system.

    The operation also requires permission for the following Amazon EC2 action on the mount target's network interface:

    • ec2:DeleteNetworkInterface
    ", + "DeleteTags": "

    Deletes the specified tags from a file system. If the DeleteTags request includes a tag key that does not exist, Amazon EFS ignores it; it is not an error. For more information about tags and related restrictions, go to Tag Restrictions in the AWS Billing and Cost Management User Guide.

    This operation requires permission for the elasticfilesystem:DeleteTags action.

    ", + "DescribeFileSystems": "

    Returns the description of a specific Amazon EFS file system if either the file system CreationToken or the FileSystemId is provided; otherwise, returns descriptions of all file systems owned by the caller's AWS account in the AWS region of the endpoint that you're calling.

    When retrieving all file system descriptions, you can optionally specify the MaxItems parameter to limit the number of descriptions in a response. If more file system descriptions remain, Amazon EFS returns a NextMarker, an opaque token, in the response. In this case, you should send a subsequent request with the Marker request parameter set to the value of NextMarker.

    So to retrieve a list of your file system descriptions, the expected usage of this API is an iterative process of first calling DescribeFileSystems without the Marker and then continuing to call it with the Marker parameter set to the value of the NextMarker from the previous response until the response has no NextMarker.

    Note that the implementation may return fewer than MaxItems file system descriptions while still including a NextMarker value.

    The order of file systems returned in the response of one DescribeFileSystems call, and the order of file systems returned across the responses of a multi-call iteration, is unspecified.

    This operation requires permission for the elasticfilesystem:DescribeFileSystems action.

    ", + "DescribeMountTargetSecurityGroups": "

    Returns the security groups currently in effect for a mount target. This operation requires that the network interface of the mount target has been created and the life cycle state of the mount target is not \"deleted\".

    This operation requires permissions for the following actions:

    • elasticfilesystem:DescribeMountTargetSecurityGroups action on the mount target's file system.
    • ec2:DescribeNetworkInterfaceAttribute action on the mount target's network interface.
    ", + "DescribeMountTargets": "

    Returns the descriptions of all the current mount targets, or a specific mount target, for a file system. When requesting all of the current mount targets, the order of mount targets returned in the response is unspecified.

    This operation requires permission for the elasticfilesystem:DescribeMountTargets action, on either the file system id that you specify in FileSystemId, or on the file system of the mount target that you specify in MountTargetId.

    ", + "DescribeTags": "

    Returns the tags associated with a file system. The order of tags returned in the response of one DescribeTags call, and the order of tags returned across the responses of a multi-call iteration (when using pagination), is unspecified.

    This operation requires permission for the elasticfilesystem:DescribeTags action.

    ", + "ModifyMountTargetSecurityGroups": "

    Modifies the set of security groups in effect for a mount target.

    When you create a mount target, Amazon EFS also creates a new network interface (see CreateMountTarget). This operation replaces the security groups in effect for the network interface associated with a mount target, with the SecurityGroups provided in the request. This operation requires that the network interface of the mount target has been created and the life cycle state of the mount target is not \"deleted\".

    The operation requires permissions for the following actions:

    • elasticfilesystem:ModifyMountTargetSecurityGroups action on the mount target's file system.
    • ec2:ModifyNetworkInterfaceAttribute action on the mount target's network interface.
    " + }, + "service": "Amazon Elastic File System", + "shapes": { + "AwsAccountId": { + "base": null, + "refs": { + "FileSystemDescription$OwnerId": "

    The AWS account that created the file system. If the file system was created by an IAM user, the parent account to which the user belongs is the owner.

    ", + "MountTargetDescription$OwnerId": "

    The AWS account ID that owns the resource.

    " + } + }, + "BadRequest": { + "base": "

    Returned if the request is malformed or contains an error such as an invalid parameter value or a missing required parameter.

    ", + "refs": { + } + }, + "CreateFileSystemRequest": { + "base": null, + "refs": { + } + }, + "CreateMountTargetRequest": { + "base": null, + "refs": { + } + }, + "CreateTagsRequest": { + "base": null, + "refs": { + } + }, + "CreationToken": { + "base": null, + "refs": { + "CreateFileSystemRequest$CreationToken": "

    String of up to 64 ASCII characters. Amazon EFS uses this to ensure idempotent creation.

    ", + "DescribeFileSystemsRequest$CreationToken": "

    Optional string. Restricts the list to the file system with this creation token (you specify a creation token at the time of creating an Amazon EFS file system).

    ", + "FileSystemDescription$CreationToken": "

    Opaque string specified in the request.

    " + } + }, + "DeleteFileSystemRequest": { + "base": null, + "refs": { + } + }, + "DeleteMountTargetRequest": { + "base": null, + "refs": { + } + }, + "DeleteTagsRequest": { + "base": null, + "refs": { + } + }, + "DependencyTimeout": { + "base": "

    The service timed out trying to fulfill the request, and the client should try the call again.

    ", + "refs": { + } + }, + "DescribeFileSystemsRequest": { + "base": null, + "refs": { + } + }, + "DescribeFileSystemsResponse": { + "base": null, + "refs": { + } + }, + "DescribeMountTargetSecurityGroupsRequest": { + "base": null, + "refs": { + } + }, + "DescribeMountTargetSecurityGroupsResponse": { + "base": null, + "refs": { + } + }, + "DescribeMountTargetsRequest": { + "base": null, + "refs": { + } + }, + "DescribeMountTargetsResponse": { + "base": null, + "refs": { + } + }, + "DescribeTagsRequest": { + "base": null, + "refs": { + } + }, + "DescribeTagsResponse": { + "base": null, + "refs": { + } + }, + "ErrorCode": { + "base": null, + "refs": { + "BadRequest$ErrorCode": null, + "DependencyTimeout$ErrorCode": null, + "FileSystemAlreadyExists$ErrorCode": null, + "FileSystemInUse$ErrorCode": null, + "FileSystemLimitExceeded$ErrorCode": null, + "FileSystemNotFound$ErrorCode": null, + "IncorrectFileSystemLifeCycleState$ErrorCode": null, + "IncorrectMountTargetState$ErrorCode": null, + "InternalServerError$ErrorCode": null, + "IpAddressInUse$ErrorCode": null, + "MountTargetConflict$ErrorCode": null, + "MountTargetNotFound$ErrorCode": null, + "NetworkInterfaceLimitExceeded$ErrorCode": null, + "NoFreeAddressesInSubnet$ErrorCode": null, + "SecurityGroupLimitExceeded$ErrorCode": null, + "SecurityGroupNotFound$ErrorCode": null, + "SubnetNotFound$ErrorCode": null, + "UnsupportedAvailabilityZone$ErrorCode": null + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "BadRequest$Message": null, + "DependencyTimeout$Message": null, + "FileSystemAlreadyExists$Message": null, + "FileSystemInUse$Message": null, + "FileSystemLimitExceeded$Message": null, + "FileSystemNotFound$Message": null, + "IncorrectFileSystemLifeCycleState$Message": null, + "IncorrectMountTargetState$Message": null, + "InternalServerError$Message": null, + "IpAddressInUse$Message": null, + "MountTargetConflict$Message": null, + "MountTargetNotFound$Message": null, + "NetworkInterfaceLimitExceeded$Message": null, + "NoFreeAddressesInSubnet$Message": null, + "SecurityGroupLimitExceeded$Message": null, + "SecurityGroupNotFound$Message": null, + "SubnetNotFound$Message": null, + "UnsupportedAvailabilityZone$Message": null + } + }, + "FileSystemAlreadyExists": { + "base": "

    Returned if the file system you are trying to create already exists, with the creation token you provided.

    ", + "refs": { + } + }, + "FileSystemDescription": { + "base": "

    This object provides description of a file system.

    ", + "refs": { + "FileSystemDescriptions$member": null + } + }, + "FileSystemDescriptions": { + "base": null, + "refs": { + "DescribeFileSystemsResponse$FileSystems": "

    An array of file system descriptions.

    " + } + }, + "FileSystemId": { + "base": null, + "refs": { + "CreateMountTargetRequest$FileSystemId": "

    The ID of the file system for which to create the mount target.

    ", + "CreateTagsRequest$FileSystemId": "

    String. The ID of the file system whose tags you want to modify. This operation modifies only the tags and not the file system.

    ", + "DeleteFileSystemRequest$FileSystemId": "

    The ID of the file system you want to delete.

    ", + "DeleteTagsRequest$FileSystemId": "

    String. The ID of the file system whose tags you want to delete.

    ", + "DescribeFileSystemsRequest$FileSystemId": "

    Optional string. File system ID whose description you want to retrieve.

    ", + "DescribeMountTargetsRequest$FileSystemId": "

    Optional. String. The ID of the file system whose mount targets you want to list. It must be included in your request if MountTargetId is not included.

    ", + "DescribeTagsRequest$FileSystemId": "

    The ID of the file system whose tag set you want to retrieve.

    ", + "FileSystemAlreadyExists$FileSystemId": null, + "FileSystemDescription$FileSystemId": "

    The file system ID assigned by Amazon EFS.

    ", + "MountTargetDescription$FileSystemId": "

    The ID of the file system for which the mount target is intended.

    " + } + }, + "FileSystemInUse": { + "base": "

    Returned if a file system has mount targets.

    ", + "refs": { + } + }, + "FileSystemLimitExceeded": { + "base": "

    Returned if the AWS account has already created maximum number of file systems allowed per account.

    ", + "refs": { + } + }, + "FileSystemNotFound": { + "base": "

    Returned if the specified FileSystemId does not exist in the requester's AWS account.

    ", + "refs": { + } + }, + "FileSystemSize": { + "base": "

    This object provides the latest known metered size, in bytes, of data stored in the file system, in its Value field, and the time at which that size was determined in its Timestamp field. Note that the value does not represent the size of a consistent snapshot of the file system, but it is eventually consistent when there are no writes to the file system. That is, the value will represent the actual size only if the file system is not modified for a period longer than a couple of hours. Otherwise, the value is not necessarily the exact size the file system was at any instant in time.

    ", + "refs": { + "FileSystemDescription$SizeInBytes": "

    This object provides the latest known metered size of data stored in the file system, in bytes, in its Value field, and the time at which that size was determined in its Timestamp field. The Timestamp value is the integer number of seconds since 1970-01-01T00:00:00Z. Note that the value does not represent the size of a consistent snapshot of the file system, but it is eventually consistent when there are no writes to the file system. That is, the value will represent actual size only if the file system is not modified for a period longer than a couple of hours. Otherwise, the value is not the exact size the file system was at any instant in time.

    " + } + }, + "FileSystemSizeValue": { + "base": null, + "refs": { + "FileSystemSize$Value": "

    The latest known metered size, in bytes, of data stored in the file system.

    " + } + }, + "IncorrectFileSystemLifeCycleState": { + "base": "

    Returned if the file system's life cycle state is not \"created\".

    ", + "refs": { + } + }, + "IncorrectMountTargetState": { + "base": "

    Returned if the mount target is not in the correct state for the operation.

    ", + "refs": { + } + }, + "InternalServerError": { + "base": "

    Returned if an error occurred on the server side.

    ", + "refs": { + } + }, + "IpAddress": { + "base": null, + "refs": { + "CreateMountTargetRequest$IpAddress": "

    A valid IPv4 address within the address range of the specified subnet.

    ", + "MountTargetDescription$IpAddress": "

    The address at which the file system may be mounted via the mount target.

    " + } + }, + "IpAddressInUse": { + "base": "

    Returned if the request specified an IpAddress that is already in use in the subnet.

    ", + "refs": { + } + }, + "LifeCycleState": { + "base": null, + "refs": { + "FileSystemDescription$LifeCycleState": "

    A predefined string value that indicates the lifecycle phase of the file system.

    ", + "MountTargetDescription$LifeCycleState": "

    The lifecycle state the mount target is in.

    " + } + }, + "Marker": { + "base": null, + "refs": { + "DescribeFileSystemsRequest$Marker": "

    Optional string. Opaque pagination token returned from a previous DescribeFileSystems operation. If present, specifies to continue the list from where the returning call had left off.

    ", + "DescribeFileSystemsResponse$Marker": "

    A string, present if provided by caller in the request.

    ", + "DescribeFileSystemsResponse$NextMarker": "

    A string, present if there are more file systems than returned in the response. You can use the NextMarker in the subsequent request to fetch the descriptions.

    ", + "DescribeMountTargetsRequest$Marker": "

    Optional. String. Opaque pagination token returned from a previous DescribeMountTargets operation. If present, it specifies to continue the list from where the previous returning call left off.

    ", + "DescribeMountTargetsResponse$Marker": "

    If the request included the Marker, the response returns that value in this field.

    ", + "DescribeMountTargetsResponse$NextMarker": "

    If a value is present, there are more mount targets to return. In a subsequent request, you can provide Marker in your request with this value to retrieve the next set of mount targets.

    ", + "DescribeTagsRequest$Marker": "

    Optional. String. Opaque pagination token returned from a previous DescribeTags operation. If present, it specifies to continue the list from where the previous call left off.

    ", + "DescribeTagsResponse$Marker": "

    If the request included a Marker, the response returns that value in this field.

    ", + "DescribeTagsResponse$NextMarker": "

    If a value is present, there are more tags to return. In a subsequent request, you can provide the value of NextMarker as the value of the Marker parameter in your next request to retrieve the next set of tags.

    " + } + }, + "MaxItems": { + "base": null, + "refs": { + "DescribeFileSystemsRequest$MaxItems": "

    Optional integer. Specifies the maximum number of file systems to return in the response. This parameter value must be greater than 0. The number of items Amazon EFS returns will be the minimum of the MaxItems parameter specified in the request and the service's internal maximum number of items per page.

    ", + "DescribeMountTargetsRequest$MaxItems": "

    Optional. Maximum number of mount targets to return in the response. It must be an integer with a value greater than zero.

    ", + "DescribeTagsRequest$MaxItems": "

    Optional. Maximum number of file system tags to return in the response. It must be an integer with a value greater than zero.

    " + } + }, + "ModifyMountTargetSecurityGroupsRequest": { + "base": null, + "refs": { + } + }, + "MountTargetConflict": { + "base": "

    Returned if the mount target would violate one of the specified restrictions based on the file system's existing mount targets.

    ", + "refs": { + } + }, + "MountTargetCount": { + "base": null, + "refs": { + "FileSystemDescription$NumberOfMountTargets": "

    The current number of mount targets (see CreateMountTarget) the file system has.

    " + } + }, + "MountTargetDescription": { + "base": "

    This object provides description of a mount target.

    ", + "refs": { + "MountTargetDescriptions$member": null + } + }, + "MountTargetDescriptions": { + "base": null, + "refs": { + "DescribeMountTargetsResponse$MountTargets": "

    Returns the file system's mount targets as an array of MountTargetDescription objects.

    " + } + }, + "MountTargetId": { + "base": null, + "refs": { + "DeleteMountTargetRequest$MountTargetId": "

    String. The ID of the mount target to delete.

    ", + "DescribeMountTargetSecurityGroupsRequest$MountTargetId": "

    The ID of the mount target whose security groups you want to retrieve.

    ", + "DescribeMountTargetsRequest$MountTargetId": "

    Optional. String. The ID of the mount target that you want to have described. It must be included in your request if FileSystemId is not included.

    ", + "ModifyMountTargetSecurityGroupsRequest$MountTargetId": "

    The ID of the mount target whose security groups you want to modify.

    ", + "MountTargetDescription$MountTargetId": "

    The system-assigned mount target ID.

    " + } + }, + "MountTargetNotFound": { + "base": "

    Returned if there is no mount target with the specified ID found in the caller's account.

    ", + "refs": { + } + }, + "NetworkInterfaceId": { + "base": null, + "refs": { + "MountTargetDescription$NetworkInterfaceId": "

    The ID of the network interface that Amazon EFS created when it created the mount target.

    " + } + }, + "NetworkInterfaceLimitExceeded": { + "base": "

    The calling account has reached the ENI limit for the specific AWS region. Client should try to delete some ENIs or get its account limit raised. For more information, go to Amazon VPC Limits in the Amazon Virtual Private Cloud User Guide (see the Network interfaces per VPC entry in the table).

    ", + "refs": { + } + }, + "NoFreeAddressesInSubnet": { + "base": "

    Returned if IpAddress was not specified in the request and there are no free IP addresses in the subnet.

    ", + "refs": { + } + }, + "SecurityGroup": { + "base": null, + "refs": { + "SecurityGroups$member": null + } + }, + "SecurityGroupLimitExceeded": { + "base": "

    Returned if the size of SecurityGroups specified in the request is greater than five.

    ", + "refs": { + } + }, + "SecurityGroupNotFound": { + "base": "

    Returned if one of the specified security groups does not exist in the subnet's VPC.

    ", + "refs": { + } + }, + "SecurityGroups": { + "base": null, + "refs": { + "CreateMountTargetRequest$SecurityGroups": "

    Up to 5 VPC security group IDs, of the form \"sg-xxxxxxxx\". These must be for the same VPC as subnet specified.

    ", + "DescribeMountTargetSecurityGroupsResponse$SecurityGroups": "

    An array of security groups.

    ", + "ModifyMountTargetSecurityGroupsRequest$SecurityGroups": "

    An array of up to five VPC security group IDs.

    " + } + }, + "SubnetId": { + "base": null, + "refs": { + "CreateMountTargetRequest$SubnetId": "

    The ID of the subnet to add the mount target in.

    ", + "MountTargetDescription$SubnetId": "

    The ID of the subnet that the mount target is in.

    " + } + }, + "SubnetNotFound": { + "base": "

    Returned if there is no subnet with ID SubnetId provided in the request.

    ", + "refs": { + } + }, + "Tag": { + "base": "

    A tag is a pair of key and value. The allowed characters in keys and values are letters, whitespace, and numbers, representable in UTF-8, and the characters '+', '-', '=', '.', '_', ':', and '/'.

    ", + "refs": { + "Tags$member": null + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": "

    Tag key, a string. The key must not start with \"aws:\".

    ", + "TagKeys$member": null + } + }, + "TagKeys": { + "base": null, + "refs": { + "DeleteTagsRequest$TagKeys": "

    A list of tag keys to delete.

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "FileSystemDescription$Name": "

    You can add tags to a file system (see CreateTags) including a \"Name\" tag. If the file system has a \"Name\" tag, Amazon EFS returns the value in this field.

    ", + "Tag$Value": "

    Value of the tag key.

    " + } + }, + "Tags": { + "base": null, + "refs": { + "CreateTagsRequest$Tags": "

    An array of Tag objects to add. Each Tag object is a key-value pair.

    ", + "DescribeTagsResponse$Tags": "

    Returns tags associated with the file system as an array of Tag objects.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "FileSystemDescription$CreationTime": "

    The time at which the file system was created, in seconds, since 1970-01-01T00:00:00Z.

    ", + "FileSystemSize$Timestamp": "

    The time at which the size of data, returned in the Value field, was determined. The value is the integer number of seconds since 1970-01-01T00:00:00Z.

    " + } + }, + "UnsupportedAvailabilityZone": { + "base": null, + "refs": { + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2145 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2012-06-01", + "endpointPrefix":"elasticloadbalancing", + "serviceFullName":"Elastic Load Balancing", + "signatureVersion":"v4", + "xmlNamespace":"http://elasticloadbalancing.amazonaws.com/doc/2012-06-01/", + "protocol":"query" + }, + "operations":{ + "AddTags":{ + "name":"AddTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsInput"}, + "output":{ + "shape":"AddTagsOutput", + "resultWrapper":"AddTagsResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TooManyTagsException", + "error":{ + "code":"TooManyTags", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DuplicateTagKeysException", + "error":{ + "code":"DuplicateTagKeys", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "ApplySecurityGroupsToLoadBalancer":{ + "name":"ApplySecurityGroupsToLoadBalancer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ApplySecurityGroupsToLoadBalancerInput"}, + "output":{ + "shape":"ApplySecurityGroupsToLoadBalancerOutput", + "resultWrapper":"ApplySecurityGroupsToLoadBalancerResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidConfigurationRequestException", + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidSecurityGroupException", + "error":{ + "code":"InvalidSecurityGroup", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "AttachLoadBalancerToSubnets":{ + "name":"AttachLoadBalancerToSubnets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachLoadBalancerToSubnetsInput"}, + "output":{ + "shape":"AttachLoadBalancerToSubnetsOutput", + "resultWrapper":"AttachLoadBalancerToSubnetsResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidConfigurationRequestException", + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SubnetNotFoundException", + "error":{ + "code":"SubnetNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidSubnetException", + "error":{ + "code":"InvalidSubnet", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "ConfigureHealthCheck":{ + "name":"ConfigureHealthCheck", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ConfigureHealthCheckInput"}, + "output":{ + "shape":"ConfigureHealthCheckOutput", + "resultWrapper":"ConfigureHealthCheckResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateAppCookieStickinessPolicy":{ + "name":"CreateAppCookieStickinessPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAppCookieStickinessPolicyInput"}, + "output":{ + "shape":"CreateAppCookieStickinessPolicyOutput", + "resultWrapper":"CreateAppCookieStickinessPolicyResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DuplicatePolicyNameException", + "error":{ + "code":"DuplicatePolicyName", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TooManyPoliciesException", + "error":{ + "code":"TooManyPolicies", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidConfigurationRequestException", + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateLBCookieStickinessPolicy":{ + "name":"CreateLBCookieStickinessPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLBCookieStickinessPolicyInput"}, + "output":{ + "shape":"CreateLBCookieStickinessPolicyOutput", + "resultWrapper":"CreateLBCookieStickinessPolicyResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DuplicatePolicyNameException", + "error":{ + "code":"DuplicatePolicyName", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TooManyPoliciesException", + "error":{ + "code":"TooManyPolicies", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidConfigurationRequestException", + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateLoadBalancer":{ + "name":"CreateLoadBalancer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAccessPointInput"}, + "output":{ + "shape":"CreateAccessPointOutput", + "resultWrapper":"CreateLoadBalancerResult" + }, + "errors":[ + { + "shape":"DuplicateAccessPointNameException", + "error":{ + "code":"DuplicateLoadBalancerName", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TooManyAccessPointsException", + "error":{ + "code":"TooManyLoadBalancers", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CertificateNotFoundException", + "error":{ + "code":"CertificateNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidConfigurationRequestException", + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SubnetNotFoundException", + "error":{ + "code":"SubnetNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidSubnetException", + "error":{ + "code":"InvalidSubnet", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidSecurityGroupException", + "error":{ + "code":"InvalidSecurityGroup", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidSchemeException", + "error":{ + "code":"InvalidScheme", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TooManyTagsException", + "error":{ + "code":"TooManyTags", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DuplicateTagKeysException", + "error":{ + "code":"DuplicateTagKeys", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateLoadBalancerListeners":{ + "name":"CreateLoadBalancerListeners", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLoadBalancerListenerInput"}, + "output":{ + "shape":"CreateLoadBalancerListenerOutput", + "resultWrapper":"CreateLoadBalancerListenersResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DuplicateListenerException", + "error":{ + "code":"DuplicateListener", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CertificateNotFoundException", + "error":{ + "code":"CertificateNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidConfigurationRequestException", + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateLoadBalancerPolicy":{ + "name":"CreateLoadBalancerPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLoadBalancerPolicyInput"}, + "output":{ + "shape":"CreateLoadBalancerPolicyOutput", + "resultWrapper":"CreateLoadBalancerPolicyResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"PolicyTypeNotFoundException", + "error":{ + "code":"PolicyTypeNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DuplicatePolicyNameException", + "error":{ + "code":"DuplicatePolicyName", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TooManyPoliciesException", + "error":{ + "code":"TooManyPolicies", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidConfigurationRequestException", + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteLoadBalancer":{ + "name":"DeleteLoadBalancer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAccessPointInput"}, + "output":{ + "shape":"DeleteAccessPointOutput", + "resultWrapper":"DeleteLoadBalancerResult" + } + }, + "DeleteLoadBalancerListeners":{ + "name":"DeleteLoadBalancerListeners", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteLoadBalancerListenerInput"}, + "output":{ + "shape":"DeleteLoadBalancerListenerOutput", + "resultWrapper":"DeleteLoadBalancerListenersResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteLoadBalancerPolicy":{ + "name":"DeleteLoadBalancerPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteLoadBalancerPolicyInput"}, + "output":{ + "shape":"DeleteLoadBalancerPolicyOutput", + "resultWrapper":"DeleteLoadBalancerPolicyResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidConfigurationRequestException", + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeregisterInstancesFromLoadBalancer":{ + "name":"DeregisterInstancesFromLoadBalancer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterEndPointsInput"}, + "output":{ + "shape":"DeregisterEndPointsOutput", + "resultWrapper":"DeregisterInstancesFromLoadBalancerResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidEndPointException", + "error":{ + "code":"InvalidInstance", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeInstanceHealth":{ + "name":"DescribeInstanceHealth", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEndPointStateInput"}, + "output":{ + "shape":"DescribeEndPointStateOutput", + "resultWrapper":"DescribeInstanceHealthResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidEndPointException", + "error":{ + "code":"InvalidInstance", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeLoadBalancerAttributes":{ + "name":"DescribeLoadBalancerAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLoadBalancerAttributesInput"}, + "output":{ + "shape":"DescribeLoadBalancerAttributesOutput", + "resultWrapper":"DescribeLoadBalancerAttributesResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"LoadBalancerAttributeNotFoundException", + "error":{ + "code":"LoadBalancerAttributeNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeLoadBalancerPolicies":{ + "name":"DescribeLoadBalancerPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLoadBalancerPoliciesInput"}, + "output":{ + "shape":"DescribeLoadBalancerPoliciesOutput", + "resultWrapper":"DescribeLoadBalancerPoliciesResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"PolicyNotFoundException", + "error":{ + "code":"PolicyNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeLoadBalancerPolicyTypes":{ + "name":"DescribeLoadBalancerPolicyTypes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLoadBalancerPolicyTypesInput"}, + "output":{ + "shape":"DescribeLoadBalancerPolicyTypesOutput", + "resultWrapper":"DescribeLoadBalancerPolicyTypesResult" + }, + "errors":[ + { + "shape":"PolicyTypeNotFoundException", + "error":{ + "code":"PolicyTypeNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeLoadBalancers":{ + "name":"DescribeLoadBalancers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAccessPointsInput"}, + "output":{ + "shape":"DescribeAccessPointsOutput", + "resultWrapper":"DescribeLoadBalancersResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeTags":{ + "name":"DescribeTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTagsInput"}, + "output":{ + "shape":"DescribeTagsOutput", + "resultWrapper":"DescribeTagsResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DetachLoadBalancerFromSubnets":{ + "name":"DetachLoadBalancerFromSubnets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachLoadBalancerFromSubnetsInput"}, + "output":{ + "shape":"DetachLoadBalancerFromSubnetsOutput", + "resultWrapper":"DetachLoadBalancerFromSubnetsResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidConfigurationRequestException", + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DisableAvailabilityZonesForLoadBalancer":{ + "name":"DisableAvailabilityZonesForLoadBalancer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveAvailabilityZonesInput"}, + "output":{ + "shape":"RemoveAvailabilityZonesOutput", + "resultWrapper":"DisableAvailabilityZonesForLoadBalancerResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidConfigurationRequestException", + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "EnableAvailabilityZonesForLoadBalancer":{ + "name":"EnableAvailabilityZonesForLoadBalancer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddAvailabilityZonesInput"}, + "output":{ + "shape":"AddAvailabilityZonesOutput", + "resultWrapper":"EnableAvailabilityZonesForLoadBalancerResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "ModifyLoadBalancerAttributes":{ + "name":"ModifyLoadBalancerAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyLoadBalancerAttributesInput"}, + "output":{ + "shape":"ModifyLoadBalancerAttributesOutput", + "resultWrapper":"ModifyLoadBalancerAttributesResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"LoadBalancerAttributeNotFoundException", + "error":{ + "code":"LoadBalancerAttributeNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidConfigurationRequestException", + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "RegisterInstancesWithLoadBalancer":{ + "name":"RegisterInstancesWithLoadBalancer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterEndPointsInput"}, + "output":{ + "shape":"RegisterEndPointsOutput", + "resultWrapper":"RegisterInstancesWithLoadBalancerResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidEndPointException", + "error":{ + "code":"InvalidInstance", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "RemoveTags":{ + "name":"RemoveTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsInput"}, + "output":{ + "shape":"RemoveTagsOutput", + "resultWrapper":"RemoveTagsResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "SetLoadBalancerListenerSSLCertificate":{ + "name":"SetLoadBalancerListenerSSLCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetLoadBalancerListenerSSLCertificateInput"}, + "output":{ + "shape":"SetLoadBalancerListenerSSLCertificateOutput", + "resultWrapper":"SetLoadBalancerListenerSSLCertificateResult" + }, + "errors":[ + { + "shape":"CertificateNotFoundException", + "error":{ + "code":"CertificateNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ListenerNotFoundException", + "error":{ + "code":"ListenerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidConfigurationRequestException", + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "SetLoadBalancerPoliciesForBackendServer":{ + "name":"SetLoadBalancerPoliciesForBackendServer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetLoadBalancerPoliciesForBackendServerInput"}, + "output":{ + "shape":"SetLoadBalancerPoliciesForBackendServerOutput", + "resultWrapper":"SetLoadBalancerPoliciesForBackendServerResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"PolicyNotFoundException", + "error":{ + "code":"PolicyNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidConfigurationRequestException", + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "SetLoadBalancerPoliciesOfListener":{ + "name":"SetLoadBalancerPoliciesOfListener", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetLoadBalancerPoliciesOfListenerInput"}, + "output":{ + "shape":"SetLoadBalancerPoliciesOfListenerOutput", + "resultWrapper":"SetLoadBalancerPoliciesOfListenerResult" + }, + "errors":[ + { + "shape":"AccessPointNotFoundException", + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"PolicyNotFoundException", + "error":{ + "code":"PolicyNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ListenerNotFoundException", + "error":{ + "code":"ListenerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidConfigurationRequestException", + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + } + }, + "shapes":{ + "AccessLog":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "Enabled":{"shape":"AccessLogEnabled"}, + "S3BucketName":{"shape":"S3BucketName"}, + "EmitInterval":{"shape":"AccessLogInterval"}, + "S3BucketPrefix":{"shape":"AccessLogPrefix"} + } + }, + "AccessLogEnabled":{"type":"boolean"}, + "AccessLogInterval":{"type":"integer"}, + "AccessLogPrefix":{"type":"string"}, + "AccessPointName":{"type":"string"}, + "AccessPointNotFoundException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"LoadBalancerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AccessPointPort":{"type":"integer"}, + "AddAvailabilityZonesInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "AvailabilityZones" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "AvailabilityZones":{"shape":"AvailabilityZones"} + } + }, + "AddAvailabilityZonesOutput":{ + "type":"structure", + "members":{ + "AvailabilityZones":{"shape":"AvailabilityZones"} + } + }, + "AddTagsInput":{ + "type":"structure", + "required":[ + "LoadBalancerNames", + "Tags" + ], + "members":{ + "LoadBalancerNames":{"shape":"LoadBalancerNames"}, + "Tags":{"shape":"TagList"} + } + }, + "AddTagsOutput":{ + "type":"structure", + "members":{ + } + }, + "AdditionalAttribute":{ + "type":"structure", + "members":{ + "Key":{"shape":"StringVal"}, + "Value":{"shape":"StringVal"} + } + }, + "AdditionalAttributes":{ + "type":"list", + "member":{"shape":"AdditionalAttribute"} + }, + "AppCookieStickinessPolicies":{ + "type":"list", + "member":{"shape":"AppCookieStickinessPolicy"} + }, + "AppCookieStickinessPolicy":{ + "type":"structure", + "members":{ + "PolicyName":{"shape":"PolicyName"}, + "CookieName":{"shape":"CookieName"} + } + }, + "ApplySecurityGroupsToLoadBalancerInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "SecurityGroups" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "SecurityGroups":{"shape":"SecurityGroups"} + } + }, + "ApplySecurityGroupsToLoadBalancerOutput":{ + "type":"structure", + "members":{ + "SecurityGroups":{"shape":"SecurityGroups"} + } + }, + "AttachLoadBalancerToSubnetsInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "Subnets" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "Subnets":{"shape":"Subnets"} + } + }, + "AttachLoadBalancerToSubnetsOutput":{ + "type":"structure", + "members":{ + "Subnets":{"shape":"Subnets"} + } + }, + "AttributeName":{"type":"string"}, + "AttributeType":{"type":"string"}, + "AttributeValue":{"type":"string"}, + "AvailabilityZone":{"type":"string"}, + "AvailabilityZones":{ + "type":"list", + "member":{"shape":"AvailabilityZone"} + }, + "BackendServerDescription":{ + "type":"structure", + "members":{ + "InstancePort":{"shape":"InstancePort"}, + "PolicyNames":{"shape":"PolicyNames"} + } + }, + "BackendServerDescriptions":{ + "type":"list", + "member":{"shape":"BackendServerDescription"} + }, + "Cardinality":{"type":"string"}, + "CertificateNotFoundException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CertificateNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ConfigureHealthCheckInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "HealthCheck" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "HealthCheck":{"shape":"HealthCheck"} + } + }, + "ConfigureHealthCheckOutput":{ + "type":"structure", + "members":{ + "HealthCheck":{"shape":"HealthCheck"} + } + }, + "ConnectionDraining":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "Enabled":{"shape":"ConnectionDrainingEnabled"}, + "Timeout":{"shape":"ConnectionDrainingTimeout"} + } + }, + "ConnectionDrainingEnabled":{"type":"boolean"}, + "ConnectionDrainingTimeout":{"type":"integer"}, + "ConnectionSettings":{ + "type":"structure", + "required":["IdleTimeout"], + "members":{ + "IdleTimeout":{"shape":"IdleTimeout"} + } + }, + "CookieExpirationPeriod":{"type":"long"}, + "CookieName":{"type":"string"}, + "CreateAccessPointInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "Listeners" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "Listeners":{"shape":"Listeners"}, + "AvailabilityZones":{"shape":"AvailabilityZones"}, + "Subnets":{"shape":"Subnets"}, + "SecurityGroups":{"shape":"SecurityGroups"}, + "Scheme":{"shape":"LoadBalancerScheme"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateAccessPointOutput":{ + "type":"structure", + "members":{ + "DNSName":{"shape":"DNSName"} + } + }, + "CreateAppCookieStickinessPolicyInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "PolicyName", + "CookieName" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "PolicyName":{"shape":"PolicyName"}, + "CookieName":{"shape":"CookieName"} + } + }, + "CreateAppCookieStickinessPolicyOutput":{ + "type":"structure", + "members":{ + } + }, + "CreateLBCookieStickinessPolicyInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "PolicyName" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "PolicyName":{"shape":"PolicyName"}, + "CookieExpirationPeriod":{"shape":"CookieExpirationPeriod"} + } + }, + "CreateLBCookieStickinessPolicyOutput":{ + "type":"structure", + "members":{ + } + }, + "CreateLoadBalancerListenerInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "Listeners" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "Listeners":{"shape":"Listeners"} + } + }, + "CreateLoadBalancerListenerOutput":{ + "type":"structure", + "members":{ + } + }, + "CreateLoadBalancerPolicyInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "PolicyName", + "PolicyTypeName" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "PolicyName":{"shape":"PolicyName"}, + "PolicyTypeName":{"shape":"PolicyTypeName"}, + "PolicyAttributes":{"shape":"PolicyAttributes"} + } + }, + "CreateLoadBalancerPolicyOutput":{ + "type":"structure", + "members":{ + } + }, + "CreatedTime":{"type":"timestamp"}, + "CrossZoneLoadBalancing":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "Enabled":{"shape":"CrossZoneLoadBalancingEnabled"} + } + }, + "CrossZoneLoadBalancingEnabled":{"type":"boolean"}, + "DNSName":{"type":"string"}, + "DefaultValue":{"type":"string"}, + "DeleteAccessPointInput":{ + "type":"structure", + "required":["LoadBalancerName"], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"} + } + }, + "DeleteAccessPointOutput":{ + "type":"structure", + "members":{ + } + }, + "DeleteLoadBalancerListenerInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "LoadBalancerPorts" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "LoadBalancerPorts":{"shape":"Ports"} + } + }, + "DeleteLoadBalancerListenerOutput":{ + "type":"structure", + "members":{ + } + }, + "DeleteLoadBalancerPolicyInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "PolicyName" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "PolicyName":{"shape":"PolicyName"} + } + }, + "DeleteLoadBalancerPolicyOutput":{ + "type":"structure", + "members":{ + } + }, + "DeregisterEndPointsInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "Instances" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "Instances":{"shape":"Instances"} + } + }, + "DeregisterEndPointsOutput":{ + "type":"structure", + "members":{ + "Instances":{"shape":"Instances"} + } + }, + "DescribeAccessPointsInput":{ + "type":"structure", + "members":{ + "LoadBalancerNames":{"shape":"LoadBalancerNames"}, + "Marker":{"shape":"Marker"}, + "PageSize":{"shape":"PageSize"} + } + }, + "DescribeAccessPointsOutput":{ + "type":"structure", + "members":{ + "LoadBalancerDescriptions":{"shape":"LoadBalancerDescriptions"}, + "NextMarker":{"shape":"Marker"} + } + }, + "DescribeEndPointStateInput":{ + "type":"structure", + "required":["LoadBalancerName"], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "Instances":{"shape":"Instances"} + } + }, + "DescribeEndPointStateOutput":{ + "type":"structure", + "members":{ + "InstanceStates":{"shape":"InstanceStates"} + } + }, + "DescribeLoadBalancerAttributesInput":{ + "type":"structure", + "required":["LoadBalancerName"], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"} + } + }, + "DescribeLoadBalancerAttributesOutput":{ + "type":"structure", + "members":{ + "LoadBalancerAttributes":{"shape":"LoadBalancerAttributes"} + } + }, + "DescribeLoadBalancerPoliciesInput":{ + "type":"structure", + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "PolicyNames":{"shape":"PolicyNames"} + } + }, + "DescribeLoadBalancerPoliciesOutput":{ + "type":"structure", + "members":{ + "PolicyDescriptions":{"shape":"PolicyDescriptions"} + } + }, + "DescribeLoadBalancerPolicyTypesInput":{ + "type":"structure", + "members":{ + "PolicyTypeNames":{"shape":"PolicyTypeNames"} + } + }, + "DescribeLoadBalancerPolicyTypesOutput":{ + "type":"structure", + "members":{ + "PolicyTypeDescriptions":{"shape":"PolicyTypeDescriptions"} + } + }, + "DescribeTagsInput":{ + "type":"structure", + "required":["LoadBalancerNames"], + "members":{ + "LoadBalancerNames":{"shape":"LoadBalancerNamesMax20"} + } + }, + "DescribeTagsOutput":{ + "type":"structure", + "members":{ + "TagDescriptions":{"shape":"TagDescriptions"} + } + }, + "Description":{"type":"string"}, + "DetachLoadBalancerFromSubnetsInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "Subnets" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "Subnets":{"shape":"Subnets"} + } + }, + "DetachLoadBalancerFromSubnetsOutput":{ + "type":"structure", + "members":{ + "Subnets":{"shape":"Subnets"} + } + }, + "DuplicateAccessPointNameException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DuplicateLoadBalancerName", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DuplicateListenerException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DuplicateListener", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DuplicatePolicyNameException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DuplicatePolicyName", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DuplicateTagKeysException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DuplicateTagKeys", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "EndPointPort":{"type":"integer"}, + "HealthCheck":{ + "type":"structure", + "required":[ + "Target", + "Interval", + "Timeout", + "UnhealthyThreshold", + "HealthyThreshold" + ], + "members":{ + "Target":{"shape":"HealthCheckTarget"}, + "Interval":{"shape":"HealthCheckInterval"}, + "Timeout":{"shape":"HealthCheckTimeout"}, + "UnhealthyThreshold":{"shape":"UnhealthyThreshold"}, + "HealthyThreshold":{"shape":"HealthyThreshold"} + } + }, + "HealthCheckInterval":{ + "type":"integer", + "min":1, + "max":300 + }, + "HealthCheckTarget":{"type":"string"}, + "HealthCheckTimeout":{ + "type":"integer", + "min":1, + "max":300 + }, + "HealthyThreshold":{ + "type":"integer", + "min":2, + "max":10 + }, + "IdleTimeout":{ + "type":"integer", + "min":1, + "max":3600 + }, + "Instance":{ + "type":"structure", + "members":{ + "InstanceId":{"shape":"InstanceId"} + } + }, + "InstanceId":{"type":"string"}, + "InstancePort":{ + "type":"integer", + "min":1, + "max":65535 + }, + "InstanceState":{ + "type":"structure", + "members":{ + "InstanceId":{"shape":"InstanceId"}, + "State":{"shape":"State"}, + "ReasonCode":{"shape":"ReasonCode"}, + "Description":{"shape":"Description"} + } + }, + "InstanceStates":{ + "type":"list", + "member":{"shape":"InstanceState"} + }, + "Instances":{ + "type":"list", + "member":{"shape":"Instance"} + }, + "InvalidConfigurationRequestException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidConfigurationRequest", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "InvalidEndPointException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidInstance", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSchemeException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidScheme", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSecurityGroupException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidSecurityGroup", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSubnetException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidSubnet", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "LBCookieStickinessPolicies":{ + "type":"list", + "member":{"shape":"LBCookieStickinessPolicy"} + }, + "LBCookieStickinessPolicy":{ + "type":"structure", + "members":{ + "PolicyName":{"shape":"PolicyName"}, + "CookieExpirationPeriod":{"shape":"CookieExpirationPeriod"} + } + }, + "Listener":{ + "type":"structure", + "required":[ + "Protocol", + "LoadBalancerPort", + "InstancePort" + ], + "members":{ + "Protocol":{"shape":"Protocol"}, + "LoadBalancerPort":{"shape":"AccessPointPort"}, + "InstanceProtocol":{"shape":"Protocol"}, + "InstancePort":{"shape":"InstancePort"}, + "SSLCertificateId":{"shape":"SSLCertificateId"} + } + }, + "ListenerDescription":{ + "type":"structure", + "members":{ + "Listener":{"shape":"Listener"}, + "PolicyNames":{"shape":"PolicyNames"} + } + }, + "ListenerDescriptions":{ + "type":"list", + "member":{"shape":"ListenerDescription"} + }, + "ListenerNotFoundException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ListenerNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Listeners":{ + "type":"list", + "member":{"shape":"Listener"} + }, + "LoadBalancerAttributeNotFoundException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"LoadBalancerAttributeNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "LoadBalancerAttributes":{ + "type":"structure", + "members":{ + "CrossZoneLoadBalancing":{"shape":"CrossZoneLoadBalancing"}, + "AccessLog":{"shape":"AccessLog"}, + "ConnectionDraining":{"shape":"ConnectionDraining"}, + "ConnectionSettings":{"shape":"ConnectionSettings"}, + "AdditionalAttributes":{"shape":"AdditionalAttributes"} + } + }, + "LoadBalancerDescription":{ + "type":"structure", + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "DNSName":{"shape":"DNSName"}, + "CanonicalHostedZoneName":{"shape":"DNSName"}, + "CanonicalHostedZoneNameID":{"shape":"DNSName"}, + "ListenerDescriptions":{"shape":"ListenerDescriptions"}, + "Policies":{"shape":"Policies"}, + "BackendServerDescriptions":{"shape":"BackendServerDescriptions"}, + "AvailabilityZones":{"shape":"AvailabilityZones"}, + "Subnets":{"shape":"Subnets"}, + "VPCId":{"shape":"VPCId"}, + "Instances":{"shape":"Instances"}, + "HealthCheck":{"shape":"HealthCheck"}, + "SourceSecurityGroup":{"shape":"SourceSecurityGroup"}, + "SecurityGroups":{"shape":"SecurityGroups"}, + "CreatedTime":{"shape":"CreatedTime"}, + "Scheme":{"shape":"LoadBalancerScheme"} + } + }, + "LoadBalancerDescriptions":{ + "type":"list", + "member":{"shape":"LoadBalancerDescription"} + }, + "LoadBalancerNames":{ + "type":"list", + "member":{"shape":"AccessPointName"} + }, + "LoadBalancerNamesMax20":{ + "type":"list", + "member":{"shape":"AccessPointName"}, + "min":1, + "max":20 + }, + "LoadBalancerScheme":{"type":"string"}, + "Marker":{"type":"string"}, + "ModifyLoadBalancerAttributesInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "LoadBalancerAttributes" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "LoadBalancerAttributes":{"shape":"LoadBalancerAttributes"} + } + }, + "ModifyLoadBalancerAttributesOutput":{ + "type":"structure", + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "LoadBalancerAttributes":{"shape":"LoadBalancerAttributes"} + } + }, + "PageSize":{ + "type":"integer", + "min":1, + "max":400 + }, + "Policies":{ + "type":"structure", + "members":{ + "AppCookieStickinessPolicies":{"shape":"AppCookieStickinessPolicies"}, + "LBCookieStickinessPolicies":{"shape":"LBCookieStickinessPolicies"}, + "OtherPolicies":{"shape":"PolicyNames"} + } + }, + "PolicyAttribute":{ + "type":"structure", + "members":{ + "AttributeName":{"shape":"AttributeName"}, + "AttributeValue":{"shape":"AttributeValue"} + } + }, + "PolicyAttributeDescription":{ + "type":"structure", + "members":{ + "AttributeName":{"shape":"AttributeName"}, + "AttributeValue":{"shape":"AttributeValue"} + } + }, + "PolicyAttributeDescriptions":{ + "type":"list", + "member":{"shape":"PolicyAttributeDescription"} + }, + "PolicyAttributeTypeDescription":{ + "type":"structure", + "members":{ + "AttributeName":{"shape":"AttributeName"}, + "AttributeType":{"shape":"AttributeType"}, + "Description":{"shape":"Description"}, + "DefaultValue":{"shape":"DefaultValue"}, + "Cardinality":{"shape":"Cardinality"} + } + }, + "PolicyAttributeTypeDescriptions":{ + "type":"list", + "member":{"shape":"PolicyAttributeTypeDescription"} + }, + "PolicyAttributes":{ + "type":"list", + "member":{"shape":"PolicyAttribute"} + }, + "PolicyDescription":{ + "type":"structure", + "members":{ + "PolicyName":{"shape":"PolicyName"}, + "PolicyTypeName":{"shape":"PolicyTypeName"}, + "PolicyAttributeDescriptions":{"shape":"PolicyAttributeDescriptions"} + } + }, + "PolicyDescriptions":{ + "type":"list", + "member":{"shape":"PolicyDescription"} + }, + "PolicyName":{"type":"string"}, + "PolicyNames":{ + "type":"list", + "member":{"shape":"PolicyName"} + }, + "PolicyNotFoundException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"PolicyNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "PolicyTypeDescription":{ + "type":"structure", + "members":{ + "PolicyTypeName":{"shape":"PolicyTypeName"}, + "Description":{"shape":"Description"}, + "PolicyAttributeTypeDescriptions":{"shape":"PolicyAttributeTypeDescriptions"} + } + }, + "PolicyTypeDescriptions":{ + "type":"list", + "member":{"shape":"PolicyTypeDescription"} + }, + "PolicyTypeName":{"type":"string"}, + "PolicyTypeNames":{ + "type":"list", + "member":{"shape":"PolicyTypeName"} + }, + "PolicyTypeNotFoundException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"PolicyTypeNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Ports":{ + "type":"list", + "member":{"shape":"AccessPointPort"} + }, + "Protocol":{"type":"string"}, + "ReasonCode":{"type":"string"}, + "RegisterEndPointsInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "Instances" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "Instances":{"shape":"Instances"} + } + }, + "RegisterEndPointsOutput":{ + "type":"structure", + "members":{ + "Instances":{"shape":"Instances"} + } + }, + "RemoveAvailabilityZonesInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "AvailabilityZones" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "AvailabilityZones":{"shape":"AvailabilityZones"} + } + }, + "RemoveAvailabilityZonesOutput":{ + "type":"structure", + "members":{ + "AvailabilityZones":{"shape":"AvailabilityZones"} + } + }, + "RemoveTagsInput":{ + "type":"structure", + "required":[ + "LoadBalancerNames", + "Tags" + ], + "members":{ + "LoadBalancerNames":{"shape":"LoadBalancerNames"}, + "Tags":{"shape":"TagKeyList"} + } + }, + "RemoveTagsOutput":{ + "type":"structure", + "members":{ + } + }, + "S3BucketName":{"type":"string"}, + "SSLCertificateId":{"type":"string"}, + "SecurityGroupId":{"type":"string"}, + "SecurityGroupName":{"type":"string"}, + "SecurityGroupOwnerAlias":{"type":"string"}, + "SecurityGroups":{ + "type":"list", + "member":{"shape":"SecurityGroupId"} + }, + "SetLoadBalancerListenerSSLCertificateInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "LoadBalancerPort", + "SSLCertificateId" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "LoadBalancerPort":{"shape":"AccessPointPort"}, + "SSLCertificateId":{"shape":"SSLCertificateId"} + } + }, + "SetLoadBalancerListenerSSLCertificateOutput":{ + "type":"structure", + "members":{ + } + }, + "SetLoadBalancerPoliciesForBackendServerInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "InstancePort", + "PolicyNames" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "InstancePort":{"shape":"EndPointPort"}, + "PolicyNames":{"shape":"PolicyNames"} + } + }, + "SetLoadBalancerPoliciesForBackendServerOutput":{ + "type":"structure", + "members":{ + } + }, + "SetLoadBalancerPoliciesOfListenerInput":{ + "type":"structure", + "required":[ + "LoadBalancerName", + "LoadBalancerPort", + "PolicyNames" + ], + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "LoadBalancerPort":{"shape":"AccessPointPort"}, + "PolicyNames":{"shape":"PolicyNames"} + } + }, + "SetLoadBalancerPoliciesOfListenerOutput":{ + "type":"structure", + "members":{ + } + }, + "SourceSecurityGroup":{ + "type":"structure", + "members":{ + "OwnerAlias":{"shape":"SecurityGroupOwnerAlias"}, + "GroupName":{"shape":"SecurityGroupName"} + } + }, + "State":{"type":"string"}, + "StringVal":{"type":"string"}, + "SubnetId":{"type":"string"}, + "SubnetNotFoundException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubnetNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Subnets":{ + "type":"list", + "member":{"shape":"SubnetId"} + }, + "Tag":{ + "type":"structure", + "required":["Key"], + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagDescription":{ + "type":"structure", + "members":{ + "LoadBalancerName":{"shape":"AccessPointName"}, + "Tags":{"shape":"TagList"} + } + }, + "TagDescriptions":{ + "type":"list", + "member":{"shape":"TagDescription"} + }, + "TagKey":{ + "type":"string", + "min":1, + "max":128, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKeyOnly"}, + "min":1 + }, + "TagKeyOnly":{ + "type":"structure", + "members":{ + "Key":{"shape":"TagKey"} + } + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "min":1 + }, + "TagValue":{ + "type":"string", + "min":0, + "max":256, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "TooManyAccessPointsException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"TooManyLoadBalancers", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "TooManyPoliciesException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"TooManyPolicies", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "TooManyTagsException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"TooManyTags", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "UnhealthyThreshold":{ + "type":"integer", + "min":2, + "max":10 + }, + "VPCId":{"type":"string"} + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1078 @@ +{ + "version": "2.0", + "operations": { + "AddTags": "

    Adds the specified tags to the specified load balancer. Each load balancer can have a maximum of 10 tags.

    Each tag consists of a key and an optional value. If a tag with the same key is already associated with the load balancer, AddTags updates its value.

    For more information, see Tag Your Load Balancer in the Elastic Load Balancing Developer Guide.

    ", + "ApplySecurityGroupsToLoadBalancer": "

    Associates one or more security groups with your load balancer in a virtual private cloud (VPC). The specified security groups override the previously associated security groups.

    For more information, see Security Groups for Load Balancers in a VPC in the Elastic Load Balancing Developer Guide.

    ", + "AttachLoadBalancerToSubnets": "

    Adds one or more subnets to the set of configured subnets for the specified load balancer.

    The load balancer evenly distributes requests across all registered subnets. For more information, see Add or Remove Subnets for Your Load Balancer in a VPC in the Elastic Load Balancing Developer Guide.

    ", + "ConfigureHealthCheck": "

    Specifies the health check settings to use when evaluating the health state of your back-end instances.

    For more information, see Configure Health Checks in the Elastic Load Balancing Developer Guide.

    ", + "CreateAppCookieStickinessPolicy": "

    Generates a stickiness policy with sticky session lifetimes that follow that of an application-generated cookie. This policy can be associated only with HTTP/HTTPS listeners.

    This policy is similar to the policy created by CreateLBCookieStickinessPolicy, except that the lifetime of the special Elastic Load Balancing cookie, AWSELB, follows the lifetime of the application-generated cookie specified in the policy configuration. The load balancer only inserts a new stickiness cookie when the application response includes a new application cookie.

    If the application cookie is explicitly removed or expires, the session stops being sticky until a new application cookie is issued.

    For more information, see Application-Controlled Session Stickiness in the Elastic Load Balancing Developer Guide.

    ", + "CreateLBCookieStickinessPolicy": "

    Generates a stickiness policy with sticky session lifetimes controlled by the lifetime of the browser (user-agent) or a specified expiration period. This policy can be associated only with HTTP/HTTPS listeners.

    When a load balancer implements this policy, the load balancer uses a special cookie to track the back-end server instance for each request. When the load balancer receives a request, it first checks to see if this cookie is present in the request. If so, the load balancer sends the request to the application server specified in the cookie. If not, the load balancer sends the request to a server that is chosen based on the existing load-balancing algorithm.

    A cookie is inserted into the response for binding subsequent requests from the same user to that server. The validity of the cookie is based on the cookie expiration time, which is specified in the policy configuration.

    For more information, see Duration-Based Session Stickiness in the Elastic Load Balancing Developer Guide.

    ", + "CreateLoadBalancer": "

    Creates a load balancer.

    If the call completes successfully, a new load balancer is created with a unique Domain Name Service (DNS) name. The load balancer receives incoming traffic and routes it to the registered instances. For more information, see How Elastic Load Balancing Works in the Elastic Load Balancing Developer Guide.

    You can create up to 20 load balancers per region per account. You can request an increase for the number of load balancers for your account. For more information, see Elastic Load Balancing Limits in the Elastic Load Balancing Developer Guide.

    ", + "CreateLoadBalancerListeners": "

    Creates one or more listeners for the specified load balancer. If a listener with the specified port does not already exist, it is created; otherwise, the properties of the new listener must match the properties of the existing listener.

    For more information, see Add a Listener to Your Load Balancer in the Elastic Load Balancing Developer Guide.

    ", + "CreateLoadBalancerPolicy": "

    Creates a policy with the specified attributes for the specified load balancer.

    Policies are settings that are saved for your load balancer and that can be applied to the front-end listener or the back-end application server, depending on the policy type.

    ", + "DeleteLoadBalancer": "

    Deletes the specified load balancer.

    If you are attempting to recreate a load balancer, you must reconfigure all settings. The DNS name associated with a deleted load balancer are no longer usable. The name and associated DNS record of the deleted load balancer no longer exist and traffic sent to any of its IP addresses is no longer delivered to back-end instances.

    If the load balancer does not exist or has already been deleted, the call to DeleteLoadBalancer still succeeds.

    ", + "DeleteLoadBalancerListeners": "

    Deletes the specified listeners from the specified load balancer.

    ", + "DeleteLoadBalancerPolicy": "

    Deletes the specified policy from the specified load balancer. This policy must not be enabled for any listeners.

    ", + "DeregisterInstancesFromLoadBalancer": "

    Deregisters the specified instances from the specified load balancer. After the instance is deregistered, it no longer receives traffic from the load balancer.

    You can use DescribeLoadBalancers to verify that the instance is deregistered from the load balancer.

    For more information, see Deregister and Register Amazon EC2 Instances in the Elastic Load Balancing Developer Guide.

    ", + "DescribeInstanceHealth": "

    Describes the state of the specified instances with respect to the specified load balancer. If no instances are specified, the call describes the state of all instances that are currently registered with the load balancer. If instances are specified, their state is returned even if they are no longer registered with the load balancer. The state of terminated instances is not returned.

    ", + "DescribeLoadBalancerAttributes": "

    Describes the attributes for the specified load balancer.

    ", + "DescribeLoadBalancerPolicies": "

    Describes the specified policies.

    If you specify a load balancer name, the action returns the descriptions of all policies created for the load balancer. If you specify a policy name associated with your load balancer, the action returns the description of that policy. If you don't specify a load balancer name, the action returns descriptions of the specified sample policies, or descriptions of all sample policies. The names of the sample policies have the ELBSample- prefix.

    ", + "DescribeLoadBalancerPolicyTypes": "

    Describes the specified load balancer policy types.

    You can use these policy types with CreateLoadBalancerPolicy to create policy configurations for a load balancer.

    ", + "DescribeLoadBalancers": "

    Describes the specified the load balancers. If no load balancers are specified, the call describes all of your load balancers.

    ", + "DescribeTags": "

    Describes the tags associated with the specified load balancers.

    ", + "DetachLoadBalancerFromSubnets": "

    Removes the specified subnets from the set of configured subnets for the load balancer.

    After a subnet is removed, all EC2 instances registered with the load balancer in the removed subnet go into the OutOfService state. Then, the load balancer balances the traffic among the remaining routable subnets.

    ", + "DisableAvailabilityZonesForLoadBalancer": "

    Removes the specified Availability Zones from the set of Availability Zones for the specified load balancer.

    There must be at least one Availability Zone registered with a load balancer at all times. After an Availability Zone is removed, all instances registered with the load balancer that are in the removed Availability Zone go into the OutOfService state. Then, the load balancer attempts to equally balance the traffic among its remaining Availability Zones.

    For more information, see Disable an Availability Zone from a Load-Balanced Application in the Elastic Load Balancing Developer Guide.

    ", + "EnableAvailabilityZonesForLoadBalancer": "

    Adds the specified Availability Zones to the set of Availability Zones for the specified load balancer.

    The load balancer evenly distributes requests across all its registered Availability Zones that contain instances.

    For more information, see Add Availability Zone in the Elastic Load Balancing Developer Guide.

    ", + "ModifyLoadBalancerAttributes": "

    Modifies the attributes of the specified load balancer.

    You can modify the load balancer attributes, such as AccessLogs, ConnectionDraining, and CrossZoneLoadBalancing by either enabling or disabling them. Or, you can modify the load balancer attribute ConnectionSettings by specifying an idle connection timeout value for your load balancer.

    For more information, see the following in the Elastic Load Balancing Developer Guide:

    ", + "RegisterInstancesWithLoadBalancer": "

    Adds the specified instances to the specified load balancer.

    The instance must be a running instance in the same network as the load balancer (EC2-Classic or the same VPC). If you have EC2-Classic instances and a load balancer in a VPC with ClassicLink enabled, you can link the EC2-Classic instances to that VPC and then register the linked EC2-Classic instances with the load balancer in the VPC.

    Note that RegisterInstanceWithLoadBalancer completes when the request has been registered. Instance registration takes a little time to complete. To check the state of the registered instances, use DescribeLoadBalancers or DescribeInstanceHealth.

    After the instance is registered, it starts receiving traffic and requests from the load balancer. Any instance that is not in one of the Availability Zones registered for the load balancer is moved to the OutOfService state. If an Availability Zone is added to the load balancer later, any instances registered with the load balancer move to the InService state.

    If you stop an instance registered with a load balancer and then start it, the IP addresses associated with the instance changes. Elastic Load Balancing cannot recognize the new IP address, which prevents it from routing traffic to the instances. We recommend that you use the following sequence: stop the instance, deregister the instance, start the instance, and then register the instance. To deregister instances from a load balancer, use DeregisterInstancesFromLoadBalancer.

    For more information, see Deregister and Register EC2 Instances in the Elastic Load Balancing Developer Guide.

    ", + "RemoveTags": "

    Removes one or more tags from the specified load balancer.

    ", + "SetLoadBalancerListenerSSLCertificate": "

    Sets the certificate that terminates the specified listener's SSL connections. The specified certificate replaces any prior certificate that was used on the same load balancer and port.

    For more information about updating your SSL certificate, see Updating an SSL Certificate for a Load Balancer in the Elastic Load Balancing Developer Guide.

    ", + "SetLoadBalancerPoliciesForBackendServer": "

    Replaces the set of policies associated with the specified port on which the back-end server is listening with a new set of policies. At this time, only the back-end server authentication policy type can be applied to the back-end ports; this policy type is composed of multiple public key policies.

    Each time you use SetLoadBalancerPoliciesForBackendServer to enable the policies, use the PolicyNames parameter to list the policies that you want to enable.

    You can use DescribeLoadBalancers or DescribeLoadBalancerPolicies to verify that the policy is associated with the back-end server.

    ", + "SetLoadBalancerPoliciesOfListener": "

    Associates, updates, or disables a policy with a listener for the specified load balancer. You can associate multiple policies with a listener.

    " + }, + "service": "Elastic Load Balancing

    Elastic Load Balancing distributes incoming traffic across your EC2 instances.

    For information about the features of Elastic Load Balancing, see What Is Elastic Load Balancing? in the Elastic Load Balancing Developer Guide.

    For information about the AWS regions supported by Elastic Load Balancing, see Regions and Endpoints - Elastic Load Balancing in the Amazon Web Services General Reference.

    All Elastic Load Balancing operations are idempotent, which means that they complete at most one time. If you repeat an operation, it succeeds with a 200 OK response code.

    ", + "shapes": { + "AccessLog": { + "base": "

    Information about the AccessLog attribute.

    ", + "refs": { + "LoadBalancerAttributes$AccessLog": "

    If enabled, the load balancer captures detailed information of all requests and delivers the information to the Amazon S3 bucket that you specify.

    For more information, see Enable Access Logs in the Elastic Load Balancing Developer Guide.

    " + } + }, + "AccessLogEnabled": { + "base": null, + "refs": { + "AccessLog$Enabled": "

    Specifies whether access log is enabled for the load balancer.

    " + } + }, + "AccessLogInterval": { + "base": null, + "refs": { + "AccessLog$EmitInterval": "

    The interval for publishing the access logs. You can specify an interval of either 5 minutes or 60 minutes.

    Default: 60 minutes

    " + } + }, + "AccessLogPrefix": { + "base": null, + "refs": { + "AccessLog$S3BucketPrefix": "

    The logical hierarchy you created for your Amazon S3 bucket, for example my-bucket-prefix/prod. If the prefix is not provided, the log is placed at the root level of the bucket.

    " + } + }, + "AccessPointName": { + "base": null, + "refs": { + "AddAvailabilityZonesInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "ApplySecurityGroupsToLoadBalancerInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "AttachLoadBalancerToSubnetsInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "ConfigureHealthCheckInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "CreateAccessPointInput$LoadBalancerName": "

    The name of the load balancer.

    This name must be unique within your set of load balancers for the region, must have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, and cannot begin or end with a hyphen.

    ", + "CreateAppCookieStickinessPolicyInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "CreateLBCookieStickinessPolicyInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "CreateLoadBalancerListenerInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "CreateLoadBalancerPolicyInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "DeleteAccessPointInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "DeleteLoadBalancerListenerInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "DeleteLoadBalancerPolicyInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "DeregisterEndPointsInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "DescribeEndPointStateInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "DescribeLoadBalancerAttributesInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "DescribeLoadBalancerPoliciesInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "DetachLoadBalancerFromSubnetsInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "LoadBalancerDescription$LoadBalancerName": "

    The name of the load balancer.

    ", + "LoadBalancerNames$member": null, + "LoadBalancerNamesMax20$member": null, + "ModifyLoadBalancerAttributesInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "ModifyLoadBalancerAttributesOutput$LoadBalancerName": "

    The name of the load balancer.

    ", + "RegisterEndPointsInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "RemoveAvailabilityZonesInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "SetLoadBalancerListenerSSLCertificateInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "SetLoadBalancerPoliciesForBackendServerInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "SetLoadBalancerPoliciesOfListenerInput$LoadBalancerName": "

    The name of the load balancer.

    ", + "TagDescription$LoadBalancerName": "

    The name of the load balancer.

    " + } + }, + "AccessPointNotFoundException": { + "base": "

    The specified load balancer does not exist.

    ", + "refs": { + } + }, + "AccessPointPort": { + "base": null, + "refs": { + "Listener$LoadBalancerPort": "

    The port on which the load balancer is listening. On EC2-VPC, you can specify any port from the range 1-65535. On EC2-Classic, you can specify any port from the following list: 25, 80, 443, 465, 587, 1024-65535.

    ", + "Ports$member": null, + "SetLoadBalancerListenerSSLCertificateInput$LoadBalancerPort": "

    The port that uses the specified SSL certificate.

    ", + "SetLoadBalancerPoliciesOfListenerInput$LoadBalancerPort": "

    The external port of the load balancer for the policy.

    " + } + }, + "AddAvailabilityZonesInput": { + "base": null, + "refs": { + } + }, + "AddAvailabilityZonesOutput": { + "base": null, + "refs": { + } + }, + "AddTagsInput": { + "base": null, + "refs": { + } + }, + "AddTagsOutput": { + "base": null, + "refs": { + } + }, + "AdditionalAttribute": { + "base": "

    This data type is reserved.

    ", + "refs": { + "AdditionalAttributes$member": null + } + }, + "AdditionalAttributes": { + "base": null, + "refs": { + "LoadBalancerAttributes$AdditionalAttributes": "

    This parameter is reserved.

    " + } + }, + "AppCookieStickinessPolicies": { + "base": null, + "refs": { + "Policies$AppCookieStickinessPolicies": "

    The stickiness policies created using CreateAppCookieStickinessPolicy.

    " + } + }, + "AppCookieStickinessPolicy": { + "base": "

    Information about a policy for application-controlled session stickiness.

    ", + "refs": { + "AppCookieStickinessPolicies$member": null + } + }, + "ApplySecurityGroupsToLoadBalancerInput": { + "base": null, + "refs": { + } + }, + "ApplySecurityGroupsToLoadBalancerOutput": { + "base": null, + "refs": { + } + }, + "AttachLoadBalancerToSubnetsInput": { + "base": null, + "refs": { + } + }, + "AttachLoadBalancerToSubnetsOutput": { + "base": null, + "refs": { + } + }, + "AttributeName": { + "base": null, + "refs": { + "PolicyAttribute$AttributeName": "

    The name of the attribute.

    ", + "PolicyAttributeDescription$AttributeName": "

    The name of the attribute.

    ", + "PolicyAttributeTypeDescription$AttributeName": "

    The name of the attribute.

    " + } + }, + "AttributeType": { + "base": null, + "refs": { + "PolicyAttributeTypeDescription$AttributeType": "

    The type of the attribute. For example, Boolean or Integer.

    " + } + }, + "AttributeValue": { + "base": null, + "refs": { + "PolicyAttribute$AttributeValue": "

    The value of the attribute.

    ", + "PolicyAttributeDescription$AttributeValue": "

    The value of the attribute.

    " + } + }, + "AvailabilityZone": { + "base": null, + "refs": { + "AvailabilityZones$member": null + } + }, + "AvailabilityZones": { + "base": null, + "refs": { + "AddAvailabilityZonesInput$AvailabilityZones": "

    The Availability Zones. These must be in the same region as the load balancer.

    ", + "AddAvailabilityZonesOutput$AvailabilityZones": "

    The updated list of Availability Zones for the load balancer.

    ", + "CreateAccessPointInput$AvailabilityZones": "

    One or more Availability Zones from the same region as the load balancer. Traffic is equally distributed across all specified Availability Zones.

    You must specify at least one Availability Zone.

    You can add more Availability Zones after you create the load balancer using EnableAvailabilityZonesForLoadBalancer.

    ", + "LoadBalancerDescription$AvailabilityZones": "

    The Availability Zones for the load balancer.

    ", + "RemoveAvailabilityZonesInput$AvailabilityZones": "

    The Availability Zones.

    ", + "RemoveAvailabilityZonesOutput$AvailabilityZones": "

    The remaining Availability Zones for the load balancer.

    " + } + }, + "BackendServerDescription": { + "base": "

    Information about the configuration of a back-end server.

    ", + "refs": { + "BackendServerDescriptions$member": null + } + }, + "BackendServerDescriptions": { + "base": null, + "refs": { + "LoadBalancerDescription$BackendServerDescriptions": "

    Information about the back-end servers.

    " + } + }, + "Cardinality": { + "base": null, + "refs": { + "PolicyAttributeTypeDescription$Cardinality": "

    The cardinality of the attribute.

    Valid values:

    • ONE(1) : Single value required
    • ZERO_OR_ONE(0..1) : Up to one value can be supplied
    • ZERO_OR_MORE(0..*) : Optional. Multiple values are allowed
    • ONE_OR_MORE(1..*0) : Required. Multiple values are allowed
    " + } + }, + "CertificateNotFoundException": { + "base": "

    The specified SSL ID does not refer to a valid SSL certificate in AWS Identity and Access Management (IAM).

    ", + "refs": { + } + }, + "ConfigureHealthCheckInput": { + "base": null, + "refs": { + } + }, + "ConfigureHealthCheckOutput": { + "base": null, + "refs": { + } + }, + "ConnectionDraining": { + "base": "

    Information about the ConnectionDraining attribute.

    ", + "refs": { + "LoadBalancerAttributes$ConnectionDraining": "

    If enabled, the load balancer allows existing requests to complete before the load balancer shifts traffic away from a deregistered or unhealthy back-end instance.

    For more information, see Enable Connection Draining in the Elastic Load Balancing Developer Guide.

    " + } + }, + "ConnectionDrainingEnabled": { + "base": null, + "refs": { + "ConnectionDraining$Enabled": "

    Specifies whether connection draining is enabled for the load balancer.

    " + } + }, + "ConnectionDrainingTimeout": { + "base": null, + "refs": { + "ConnectionDraining$Timeout": "

    The maximum time, in seconds, to keep the existing connections open before deregistering the instances.

    " + } + }, + "ConnectionSettings": { + "base": "

    Information about the ConnectionSettings attribute.

    ", + "refs": { + "LoadBalancerAttributes$ConnectionSettings": "

    If enabled, the load balancer allows the connections to remain idle (no data is sent over the connection) for the specified duration.

    By default, Elastic Load Balancing maintains a 60-second idle connection timeout for both front-end and back-end connections of your load balancer. For more information, see Configure Idle Connection Timeout in the Elastic Load Balancing Developer Guide.

    " + } + }, + "CookieExpirationPeriod": { + "base": null, + "refs": { + "CreateLBCookieStickinessPolicyInput$CookieExpirationPeriod": "

    The time period, in seconds, after which the cookie should be considered stale. If you do not specify this parameter, the sticky session lasts for the duration of the browser session.

    ", + "LBCookieStickinessPolicy$CookieExpirationPeriod": "

    The time period, in seconds, after which the cookie should be considered stale. If this parameter is not specified, the stickiness session lasts for the duration of the browser session.

    " + } + }, + "CookieName": { + "base": null, + "refs": { + "AppCookieStickinessPolicy$CookieName": "

    The name of the application cookie used for stickiness.

    ", + "CreateAppCookieStickinessPolicyInput$CookieName": "

    The name of the application cookie used for stickiness.

    " + } + }, + "CreateAccessPointInput": { + "base": null, + "refs": { + } + }, + "CreateAccessPointOutput": { + "base": null, + "refs": { + } + }, + "CreateAppCookieStickinessPolicyInput": { + "base": null, + "refs": { + } + }, + "CreateAppCookieStickinessPolicyOutput": { + "base": null, + "refs": { + } + }, + "CreateLBCookieStickinessPolicyInput": { + "base": null, + "refs": { + } + }, + "CreateLBCookieStickinessPolicyOutput": { + "base": null, + "refs": { + } + }, + "CreateLoadBalancerListenerInput": { + "base": null, + "refs": { + } + }, + "CreateLoadBalancerListenerOutput": { + "base": null, + "refs": { + } + }, + "CreateLoadBalancerPolicyInput": { + "base": null, + "refs": { + } + }, + "CreateLoadBalancerPolicyOutput": { + "base": null, + "refs": { + } + }, + "CreatedTime": { + "base": null, + "refs": { + "LoadBalancerDescription$CreatedTime": "

    The date and time the load balancer was created.

    " + } + }, + "CrossZoneLoadBalancing": { + "base": "

    Information about the CrossZoneLoadBalancing attribute.

    ", + "refs": { + "LoadBalancerAttributes$CrossZoneLoadBalancing": "

    If enabled, the load balancer routes the request traffic evenly across all back-end instances regardless of the Availability Zones.

    For more information, see Enable Cross-Zone Load Balancing in the Elastic Load Balancing Developer Guide.

    " + } + }, + "CrossZoneLoadBalancingEnabled": { + "base": null, + "refs": { + "CrossZoneLoadBalancing$Enabled": "

    Specifies whether cross-zone load balancing is enabled for the load balancer.

    " + } + }, + "DNSName": { + "base": null, + "refs": { + "CreateAccessPointOutput$DNSName": "

    The DNS name of the load balancer.

    ", + "LoadBalancerDescription$DNSName": "

    The external DNS name of the load balancer.

    ", + "LoadBalancerDescription$CanonicalHostedZoneName": "

    The Amazon Route 53 hosted zone associated with the load balancer.

    For more information, see Using Domain Names With Elastic Load Balancing in the Elastic Load Balancing Developer Guide.

    ", + "LoadBalancerDescription$CanonicalHostedZoneNameID": "

    The ID of the Amazon Route 53 hosted zone name associated with the load balancer.

    " + } + }, + "DefaultValue": { + "base": null, + "refs": { + "PolicyAttributeTypeDescription$DefaultValue": "

    The default value of the attribute, if applicable.

    " + } + }, + "DeleteAccessPointInput": { + "base": null, + "refs": { + } + }, + "DeleteAccessPointOutput": { + "base": null, + "refs": { + } + }, + "DeleteLoadBalancerListenerInput": { + "base": null, + "refs": { + } + }, + "DeleteLoadBalancerListenerOutput": { + "base": null, + "refs": { + } + }, + "DeleteLoadBalancerPolicyInput": { + "base": "=", + "refs": { + } + }, + "DeleteLoadBalancerPolicyOutput": { + "base": null, + "refs": { + } + }, + "DeregisterEndPointsInput": { + "base": null, + "refs": { + } + }, + "DeregisterEndPointsOutput": { + "base": null, + "refs": { + } + }, + "DescribeAccessPointsInput": { + "base": null, + "refs": { + } + }, + "DescribeAccessPointsOutput": { + "base": null, + "refs": { + } + }, + "DescribeEndPointStateInput": { + "base": null, + "refs": { + } + }, + "DescribeEndPointStateOutput": { + "base": null, + "refs": { + } + }, + "DescribeLoadBalancerAttributesInput": { + "base": null, + "refs": { + } + }, + "DescribeLoadBalancerAttributesOutput": { + "base": null, + "refs": { + } + }, + "DescribeLoadBalancerPoliciesInput": { + "base": null, + "refs": { + } + }, + "DescribeLoadBalancerPoliciesOutput": { + "base": null, + "refs": { + } + }, + "DescribeLoadBalancerPolicyTypesInput": { + "base": null, + "refs": { + } + }, + "DescribeLoadBalancerPolicyTypesOutput": { + "base": null, + "refs": { + } + }, + "DescribeTagsInput": { + "base": null, + "refs": { + } + }, + "DescribeTagsOutput": { + "base": null, + "refs": { + } + }, + "Description": { + "base": null, + "refs": { + "InstanceState$Description": "

    A description of the instance state. This string can contain one or more of the following messages.

    • N/A

    • A transient error occurred. Please try again later.

    • Instance has failed at least the UnhealthyThreshold number of health checks consecutively.

    • Instance has not passed the configured HealthyThreshold number of health checks consecutively.

    • Instance registration is still in progress.

    • Instance is in the EC2 Availability Zone for which LoadBalancer is not configured to route traffic to.

    • Instance is not currently registered with the LoadBalancer.

    • Instance deregistration currently in progress.

    • Disable Availability Zone is currently in progress.

    • Instance is in pending state.

    • Instance is in stopped state.

    • Instance is in terminated state.

    ", + "PolicyAttributeTypeDescription$Description": "

    A description of the attribute.

    ", + "PolicyTypeDescription$Description": "

    A description of the policy type.

    " + } + }, + "DetachLoadBalancerFromSubnetsInput": { + "base": null, + "refs": { + } + }, + "DetachLoadBalancerFromSubnetsOutput": { + "base": null, + "refs": { + } + }, + "DuplicateAccessPointNameException": { + "base": "

    The specified load balancer name already exists for this account.

    ", + "refs": { + } + }, + "DuplicateListenerException": { + "base": "

    A listener already exists for the specified LoadBalancerName and LoadBalancerPort, but with a different InstancePort, Protocol, or SSLCertificateId.

    ", + "refs": { + } + }, + "DuplicatePolicyNameException": { + "base": "

    A policy with the specified name already exists for this load balancer.

    ", + "refs": { + } + }, + "DuplicateTagKeysException": { + "base": "

    A tag key was specified more than once.

    ", + "refs": { + } + }, + "EndPointPort": { + "base": null, + "refs": { + "SetLoadBalancerPoliciesForBackendServerInput$InstancePort": "

    The port number associated with the back-end server.

    " + } + }, + "HealthCheck": { + "base": "

    Information about a health check.

    ", + "refs": { + "ConfigureHealthCheckInput$HealthCheck": "

    The configuration information for the new health check.

    ", + "ConfigureHealthCheckOutput$HealthCheck": "

    The updated health check.

    ", + "LoadBalancerDescription$HealthCheck": "

    Information about the health checks conducted on the load balancer.

    " + } + }, + "HealthCheckInterval": { + "base": null, + "refs": { + "HealthCheck$Interval": "

    The approximate interval, in seconds, between health checks of an individual instance.

    " + } + }, + "HealthCheckTarget": { + "base": null, + "refs": { + "HealthCheck$Target": "

    The instance being checked. The protocol is either TCP, HTTP, HTTPS, or SSL. The range of valid ports is one (1) through 65535.

    TCP is the default, specified as a TCP: port pair, for example \"TCP:5000\". In this case, a health check simply attempts to open a TCP connection to the instance on the specified port. Failure to connect within the configured timeout is considered unhealthy.

    SSL is also specified as SSL: port pair, for example, SSL:5000.

    For HTTP/HTTPS, you must include a ping path in the string. HTTP is specified as a HTTP:port;/;PathToPing; grouping, for example \"HTTP:80/weather/us/wa/seattle\". In this case, a HTTP GET request is issued to the instance on the given port and path. Any answer other than \"200 OK\" within the timeout period is considered unhealthy.

    The total length of the HTTP ping target must be 1024 16-bit Unicode characters or less.

    " + } + }, + "HealthCheckTimeout": { + "base": null, + "refs": { + "HealthCheck$Timeout": "

    The amount of time, in seconds, during which no response means a failed health check.

    This value must be less than the Interval value.

    " + } + }, + "HealthyThreshold": { + "base": null, + "refs": { + "HealthCheck$HealthyThreshold": "

    The number of consecutive health checks successes required before moving the instance to the Healthy state.

    " + } + }, + "IdleTimeout": { + "base": null, + "refs": { + "ConnectionSettings$IdleTimeout": "

    The time, in seconds, that the connection is allowed to be idle (no data has been sent over the connection) before it is closed by the load balancer.

    " + } + }, + "Instance": { + "base": "

    The ID of a back-end instance.

    ", + "refs": { + "Instances$member": null + } + }, + "InstanceId": { + "base": null, + "refs": { + "Instance$InstanceId": "

    The ID of the instance.

    ", + "InstanceState$InstanceId": "

    The ID of the instance.

    " + } + }, + "InstancePort": { + "base": null, + "refs": { + "BackendServerDescription$InstancePort": "

    The port on which the back-end server is listening.

    ", + "Listener$InstancePort": "

    The port on which the instance is listening.

    " + } + }, + "InstanceState": { + "base": "

    Information about the state of a back-end instance.

    ", + "refs": { + "InstanceStates$member": null + } + }, + "InstanceStates": { + "base": null, + "refs": { + "DescribeEndPointStateOutput$InstanceStates": "

    Information about the health of the instances.

    " + } + }, + "Instances": { + "base": null, + "refs": { + "DeregisterEndPointsInput$Instances": "

    The IDs of the instances.

    ", + "DeregisterEndPointsOutput$Instances": "

    The remaining instances registered with the load balancer.

    ", + "DescribeEndPointStateInput$Instances": "

    The IDs of the instances.

    ", + "LoadBalancerDescription$Instances": "

    The IDs of the instances for the load balancer.

    ", + "RegisterEndPointsInput$Instances": "

    The IDs of the instances.

    ", + "RegisterEndPointsOutput$Instances": "

    The updated list of instances for the load balancer.

    " + } + }, + "InvalidConfigurationRequestException": { + "base": "

    The requested configuration change is not valid.

    ", + "refs": { + } + }, + "InvalidEndPointException": { + "base": "

    The specified endpoint is not valid.

    ", + "refs": { + } + }, + "InvalidSchemeException": { + "base": "

    The specified value for the schema is not valid. You can only specify a scheme for load balancers in a VPC.

    ", + "refs": { + } + }, + "InvalidSecurityGroupException": { + "base": "

    One or more of the specified security groups do not exist.

    ", + "refs": { + } + }, + "InvalidSubnetException": { + "base": "

    The specified VPC has no associated Internet gateway.

    ", + "refs": { + } + }, + "LBCookieStickinessPolicies": { + "base": null, + "refs": { + "Policies$LBCookieStickinessPolicies": "

    The stickiness policies created using CreateLBCookieStickinessPolicy.

    " + } + }, + "LBCookieStickinessPolicy": { + "base": "

    Information about a policy for duration-based session stickiness.

    ", + "refs": { + "LBCookieStickinessPolicies$member": null + } + }, + "Listener": { + "base": "

    Information about a listener.

    For information about the protocols and the ports supported by Elastic Load Balancing, see Listener Configurations for Elastic Load Balancing in the Elastic Load Balancing Developer Guide.

    ", + "refs": { + "ListenerDescription$Listener": null, + "Listeners$member": null + } + }, + "ListenerDescription": { + "base": "

    The policies enabled for a listener.

    ", + "refs": { + "ListenerDescriptions$member": null + } + }, + "ListenerDescriptions": { + "base": null, + "refs": { + "LoadBalancerDescription$ListenerDescriptions": "

    The listeners for the load balancer.

    " + } + }, + "ListenerNotFoundException": { + "base": "

    The load balancer does not have a listener configured at the specified port.

    ", + "refs": { + } + }, + "Listeners": { + "base": null, + "refs": { + "CreateAccessPointInput$Listeners": "

    The listeners.

    For more information, see Listeners for Your Load Balancer in the Elastic Load Balancing Developer Guide.

    ", + "CreateLoadBalancerListenerInput$Listeners": "

    The listeners.

    " + } + }, + "LoadBalancerAttributeNotFoundException": { + "base": "

    The specified load balancer attribute does not exist.

    ", + "refs": { + } + }, + "LoadBalancerAttributes": { + "base": "

    The attributes for a load balancer.

    ", + "refs": { + "DescribeLoadBalancerAttributesOutput$LoadBalancerAttributes": "

    Information about the load balancer attributes.

    ", + "ModifyLoadBalancerAttributesInput$LoadBalancerAttributes": "

    The attributes of the load balancer.

    ", + "ModifyLoadBalancerAttributesOutput$LoadBalancerAttributes": null + } + }, + "LoadBalancerDescription": { + "base": "

    Information about a load balancer.

    ", + "refs": { + "LoadBalancerDescriptions$member": null + } + }, + "LoadBalancerDescriptions": { + "base": null, + "refs": { + "DescribeAccessPointsOutput$LoadBalancerDescriptions": "

    Information about the load balancers.

    " + } + }, + "LoadBalancerNames": { + "base": null, + "refs": { + "AddTagsInput$LoadBalancerNames": "

    The name of the load balancer. You can specify one load balancer only.

    ", + "DescribeAccessPointsInput$LoadBalancerNames": "

    The names of the load balancers.

    ", + "RemoveTagsInput$LoadBalancerNames": "

    The name of the load balancer. You can specify a maximum of one load balancer name.

    " + } + }, + "LoadBalancerNamesMax20": { + "base": null, + "refs": { + "DescribeTagsInput$LoadBalancerNames": "

    The names of the load balancers.

    " + } + }, + "LoadBalancerScheme": { + "base": null, + "refs": { + "CreateAccessPointInput$Scheme": "

    The type of a load balancer. Valid only for load balancers in a VPC.

    By default, Elastic Load Balancing creates an Internet-facing load balancer with a publicly resolvable DNS name, which resolves to public IP addresses. For more information about Internet-facing and Internal load balancers, see Internet-facing and Internal Load Balancers in the Elastic Load Balancing Developer Guide.

    Specify internal to create an internal load balancer with a DNS name that resolves to private IP addresses.

    ", + "LoadBalancerDescription$Scheme": "

    The type of load balancer. Valid only for load balancers in a VPC.

    If Scheme is internet-facing, the load balancer has a public DNS name that resolves to a public IP address.

    If Scheme is internal, the load balancer has a public DNS name that resolves to a private IP address.

    " + } + }, + "Marker": { + "base": null, + "refs": { + "DescribeAccessPointsInput$Marker": "

    The marker for the next set of results. (You received this marker from a previous call.)

    ", + "DescribeAccessPointsOutput$NextMarker": "

    The marker to use when requesting the next set of results. If there are no additional results, the string is empty.

    " + } + }, + "ModifyLoadBalancerAttributesInput": { + "base": null, + "refs": { + } + }, + "ModifyLoadBalancerAttributesOutput": { + "base": null, + "refs": { + } + }, + "PageSize": { + "base": null, + "refs": { + "DescribeAccessPointsInput$PageSize": "

    The maximum number of results to return with this call (a number from 1 to 400). The default is 400.

    " + } + }, + "Policies": { + "base": "

    The policies for a load balancer.

    ", + "refs": { + "LoadBalancerDescription$Policies": "

    The policies defined for the load balancer.

    " + } + }, + "PolicyAttribute": { + "base": "

    Information about a policy attribute.

    ", + "refs": { + "PolicyAttributes$member": null + } + }, + "PolicyAttributeDescription": { + "base": "

    Information about a policy attribute.

    ", + "refs": { + "PolicyAttributeDescriptions$member": null + } + }, + "PolicyAttributeDescriptions": { + "base": null, + "refs": { + "PolicyDescription$PolicyAttributeDescriptions": "

    The policy attributes.

    " + } + }, + "PolicyAttributeTypeDescription": { + "base": "

    Information about a policy attribute type.

    ", + "refs": { + "PolicyAttributeTypeDescriptions$member": null + } + }, + "PolicyAttributeTypeDescriptions": { + "base": null, + "refs": { + "PolicyTypeDescription$PolicyAttributeTypeDescriptions": "

    The description of the policy attributes associated with the policies defined by Elastic Load Balancing.

    " + } + }, + "PolicyAttributes": { + "base": null, + "refs": { + "CreateLoadBalancerPolicyInput$PolicyAttributes": "

    The attributes for the policy.

    " + } + }, + "PolicyDescription": { + "base": "

    Information about a policy.

    ", + "refs": { + "PolicyDescriptions$member": null + } + }, + "PolicyDescriptions": { + "base": null, + "refs": { + "DescribeLoadBalancerPoliciesOutput$PolicyDescriptions": "

    Information about the policies.

    " + } + }, + "PolicyName": { + "base": null, + "refs": { + "AppCookieStickinessPolicy$PolicyName": "

    The mnemonic name for the policy being created. The name must be unique within a set of policies for this load balancer.

    ", + "CreateAppCookieStickinessPolicyInput$PolicyName": "

    The name of the policy being created. Policy names must consist of alphanumeric characters and dashes (-). This name must be unique within the set of policies for this load balancer.

    ", + "CreateLBCookieStickinessPolicyInput$PolicyName": "

    The name of the policy being created. Policy names must consist of alphanumeric characters and dashes (-). This name must be unique within the set of policies for this load balancer.

    ", + "CreateLoadBalancerPolicyInput$PolicyName": "

    The name of the load balancer policy to be created. This name must be unique within the set of policies for this load balancer.

    ", + "DeleteLoadBalancerPolicyInput$PolicyName": "

    The name of the policy.

    ", + "LBCookieStickinessPolicy$PolicyName": "

    The name for the policy being created. The name must be unique within the set of policies for this load balancer.

    ", + "PolicyDescription$PolicyName": "

    The name of the policy.

    ", + "PolicyNames$member": null + } + }, + "PolicyNames": { + "base": null, + "refs": { + "BackendServerDescription$PolicyNames": "

    The names of the policies enabled for the back-end server.

    ", + "DescribeLoadBalancerPoliciesInput$PolicyNames": "

    The names of the policies.

    ", + "ListenerDescription$PolicyNames": "

    The policies. If there are no policies enabled, the list is empty.

    ", + "Policies$OtherPolicies": "

    The policies other than the stickiness policies.

    ", + "SetLoadBalancerPoliciesForBackendServerInput$PolicyNames": "

    The names of the policies. If the list is empty, then all current polices are removed from the back-end server.

    ", + "SetLoadBalancerPoliciesOfListenerInput$PolicyNames": "

    The names of the policies. If the list is empty, the current policy is removed from the listener.

    " + } + }, + "PolicyNotFoundException": { + "base": "

    One or more of the specified policies do not exist.

    ", + "refs": { + } + }, + "PolicyTypeDescription": { + "base": "

    Information about a policy type.

    ", + "refs": { + "PolicyTypeDescriptions$member": null + } + }, + "PolicyTypeDescriptions": { + "base": null, + "refs": { + "DescribeLoadBalancerPolicyTypesOutput$PolicyTypeDescriptions": "

    Information about the policy types.

    " + } + }, + "PolicyTypeName": { + "base": null, + "refs": { + "CreateLoadBalancerPolicyInput$PolicyTypeName": "

    The name of the base policy type. To get the list of policy types, use DescribeLoadBalancerPolicyTypes.

    ", + "PolicyDescription$PolicyTypeName": "

    The name of the policy type.

    ", + "PolicyTypeDescription$PolicyTypeName": "

    The name of the policy type.

    ", + "PolicyTypeNames$member": null + } + }, + "PolicyTypeNames": { + "base": null, + "refs": { + "DescribeLoadBalancerPolicyTypesInput$PolicyTypeNames": "

    The names of the policy types. If no names are specified, describes all policy types defined by Elastic Load Balancing.

    " + } + }, + "PolicyTypeNotFoundException": { + "base": "

    One or more of the specified policy types do not exist.

    ", + "refs": { + } + }, + "Ports": { + "base": null, + "refs": { + "DeleteLoadBalancerListenerInput$LoadBalancerPorts": "

    The client port numbers of the listeners.

    " + } + }, + "Protocol": { + "base": null, + "refs": { + "Listener$Protocol": "

    The load balancer transport protocol to use for routing: HTTP, HTTPS, TCP, or SSL.

    ", + "Listener$InstanceProtocol": "

    The protocol to use for routing traffic to back-end instances: HTTP, HTTPS, TCP, or SSL.

    If the front-end protocol is HTTP, HTTPS, TCP, or SSL, InstanceProtocol must be at the same protocol.

    If there is another listener with the same InstancePort whose InstanceProtocol is secure, (HTTPS or SSL), the listener's InstanceProtocol must also be secure.

    If there is another listener with the same InstancePort whose InstanceProtocol is HTTP or TCP, the listener's InstanceProtocol must be HTTP or TCP.

    " + } + }, + "ReasonCode": { + "base": null, + "refs": { + "InstanceState$ReasonCode": "

    Information about the cause of OutOfService instances. Specifically, whether the cause is Elastic Load Balancing or the instance.

    Valid values: ELB | Instance | N/A

    " + } + }, + "RegisterEndPointsInput": { + "base": null, + "refs": { + } + }, + "RegisterEndPointsOutput": { + "base": null, + "refs": { + } + }, + "RemoveAvailabilityZonesInput": { + "base": null, + "refs": { + } + }, + "RemoveAvailabilityZonesOutput": { + "base": null, + "refs": { + } + }, + "RemoveTagsInput": { + "base": null, + "refs": { + } + }, + "RemoveTagsOutput": { + "base": null, + "refs": { + } + }, + "S3BucketName": { + "base": null, + "refs": { + "AccessLog$S3BucketName": "

    The name of the Amazon S3 bucket where the access logs are stored.

    " + } + }, + "SSLCertificateId": { + "base": null, + "refs": { + "Listener$SSLCertificateId": "

    The Amazon Resource Name (ARN) of the server certificate.

    ", + "SetLoadBalancerListenerSSLCertificateInput$SSLCertificateId": "

    The Amazon Resource Name (ARN) of the SSL certificate.

    " + } + }, + "SecurityGroupId": { + "base": null, + "refs": { + "SecurityGroups$member": null + } + }, + "SecurityGroupName": { + "base": null, + "refs": { + "SourceSecurityGroup$GroupName": "

    The name of the security group.

    " + } + }, + "SecurityGroupOwnerAlias": { + "base": null, + "refs": { + "SourceSecurityGroup$OwnerAlias": "

    The owner of the security group.

    " + } + }, + "SecurityGroups": { + "base": null, + "refs": { + "ApplySecurityGroupsToLoadBalancerInput$SecurityGroups": "

    The IDs of the security groups to associate with the load balancer. Note that you cannot specify the name of the security group.

    ", + "ApplySecurityGroupsToLoadBalancerOutput$SecurityGroups": "

    The IDs of the security groups associated with the load balancer.

    ", + "CreateAccessPointInput$SecurityGroups": "

    The IDs of the security groups to assign to the load balancer.

    ", + "LoadBalancerDescription$SecurityGroups": "

    The security groups for the load balancer. Valid only for load balancers in a VPC.

    " + } + }, + "SetLoadBalancerListenerSSLCertificateInput": { + "base": null, + "refs": { + } + }, + "SetLoadBalancerListenerSSLCertificateOutput": { + "base": null, + "refs": { + } + }, + "SetLoadBalancerPoliciesForBackendServerInput": { + "base": null, + "refs": { + } + }, + "SetLoadBalancerPoliciesForBackendServerOutput": { + "base": null, + "refs": { + } + }, + "SetLoadBalancerPoliciesOfListenerInput": { + "base": null, + "refs": { + } + }, + "SetLoadBalancerPoliciesOfListenerOutput": { + "base": null, + "refs": { + } + }, + "SourceSecurityGroup": { + "base": "

    Information about a source security group.

    ", + "refs": { + "LoadBalancerDescription$SourceSecurityGroup": "

    The security group that you can use as part of your inbound rules for your load balancer's back-end application instances. To only allow traffic from load balancers, add a security group rule to your back end instance that specifies this source security group as the inbound source.

    " + } + }, + "State": { + "base": null, + "refs": { + "InstanceState$State": "

    The current state of the instance.

    Valid values: InService | OutOfService | Unknown

    " + } + }, + "StringVal": { + "base": null, + "refs": { + "AdditionalAttribute$Key": "

    This parameter is reserved.

    ", + "AdditionalAttribute$Value": "

    This parameter is reserved.

    " + } + }, + "SubnetId": { + "base": null, + "refs": { + "Subnets$member": null + } + }, + "SubnetNotFoundException": { + "base": "

    One or more of the specified subnets do not exist.

    ", + "refs": { + } + }, + "Subnets": { + "base": null, + "refs": { + "AttachLoadBalancerToSubnetsInput$Subnets": "

    The IDs of the subnets to add for the load balancer. You can add only one subnet per Availability Zone.

    ", + "AttachLoadBalancerToSubnetsOutput$Subnets": "

    The IDs of the subnets attached to the load balancer.

    ", + "CreateAccessPointInput$Subnets": "

    The IDs of the subnets in your VPC to attach to the load balancer. Specify one subnet per Availability Zone specified in AvailabilityZones.

    ", + "DetachLoadBalancerFromSubnetsInput$Subnets": "

    The IDs of the subnets.

    ", + "DetachLoadBalancerFromSubnetsOutput$Subnets": "

    The IDs of the remaining subnets for the load balancer.

    ", + "LoadBalancerDescription$Subnets": "

    The IDs of the subnets for the load balancer.

    " + } + }, + "Tag": { + "base": "

    Information about a tag.

    ", + "refs": { + "TagList$member": null + } + }, + "TagDescription": { + "base": "

    The tags associated with a load balancer.

    ", + "refs": { + "TagDescriptions$member": null + } + }, + "TagDescriptions": { + "base": null, + "refs": { + "DescribeTagsOutput$TagDescriptions": "

    Information about the tags.

    " + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": "

    The key of the tag.

    ", + "TagKeyOnly$Key": "

    The name of the key.

    " + } + }, + "TagKeyList": { + "base": null, + "refs": { + "RemoveTagsInput$Tags": "

    The list of tag keys to remove.

    " + } + }, + "TagKeyOnly": { + "base": "

    The key of a tag.

    ", + "refs": { + "TagKeyList$member": null + } + }, + "TagList": { + "base": null, + "refs": { + "AddTagsInput$Tags": "

    The tags.

    ", + "CreateAccessPointInput$Tags": "

    A list of tags to assign to the load balancer.

    For more information about tagging your load balancer, see Tagging in the Elastic Load Balancing Developer Guide.

    ", + "TagDescription$Tags": "

    The tags.

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": "

    The value of the tag.

    " + } + }, + "TooManyAccessPointsException": { + "base": "

    The quota for the number of load balancers has been reached.

    ", + "refs": { + } + }, + "TooManyPoliciesException": { + "base": "

    The quota for the number of policies for this load balancer has been reached.

    ", + "refs": { + } + }, + "TooManyTagsException": { + "base": "

    The quota for the number of tags that can be assigned to a load balancer has been reached.

    ", + "refs": { + } + }, + "UnhealthyThreshold": { + "base": null, + "refs": { + "HealthCheck$UnhealthyThreshold": "

    The number of consecutive health check failures required before moving the instance to the Unhealthy state.

    " + } + }, + "VPCId": { + "base": null, + "refs": { + "LoadBalancerDescription$VPCId": "

    The ID of the VPC for the load balancer.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,18 @@ +{ + "pagination": { + "DescribeInstanceHealth": { + "result_key": "InstanceStates" + }, + "DescribeLoadBalancerPolicies": { + "result_key": "PolicyDescriptions" + }, + "DescribeLoadBalancerPolicyTypes": { + "result_key": "PolicyTypeDescriptions" + }, + "DescribeLoadBalancers": { + "input_token": "Marker", + "output_token": "NextMarker", + "result_key": "LoadBalancerDescriptions" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/waiters-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/waiters-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/waiters-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticloadbalancing/2012-06-01/waiters-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,31 @@ +{ + "version": 2, + "waiters": { + "InstanceInService": { + "delay": 15, + "operation": "DescribeInstanceHealth", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "InService", + "matcher": "pathAll", + "state": "success", + "argument": "InstanceStates[].State" + } + ] + }, + "AnyInstanceInService": { + "delay": 15, + "operation": "DescribeInstanceHealth", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "InService", + "matcher": "pathAny", + "state": "success", + "argument": "InstanceStates[].State" + } + ] + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1256 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2009-03-31", + "endpointPrefix":"elasticmapreduce", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"Amazon EMR", + "serviceFullName":"Amazon Elastic MapReduce", + "signatureVersion":"v4", + "targetPrefix":"ElasticMapReduce", + "timestampFormat":"unixTimestamp" + }, + "operations":{ + "AddInstanceGroups":{ + "name":"AddInstanceGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddInstanceGroupsInput"}, + "output":{"shape":"AddInstanceGroupsOutput"}, + "errors":[ + {"shape":"InternalServerError"} + ] + }, + "AddJobFlowSteps":{ + "name":"AddJobFlowSteps", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddJobFlowStepsInput"}, + "output":{"shape":"AddJobFlowStepsOutput"}, + "errors":[ + {"shape":"InternalServerError"} + ] + }, + "AddTags":{ + "name":"AddTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsInput"}, + "output":{"shape":"AddTagsOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ] + }, + "DescribeCluster":{ + "name":"DescribeCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeClusterInput"}, + "output":{"shape":"DescribeClusterOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ] + }, + "DescribeJobFlows":{ + "name":"DescribeJobFlows", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeJobFlowsInput"}, + "output":{"shape":"DescribeJobFlowsOutput"}, + "errors":[ + {"shape":"InternalServerError"} + ], + "deprecated":true + }, + "DescribeStep":{ + "name":"DescribeStep", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStepInput"}, + "output":{"shape":"DescribeStepOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ] + }, + "ListBootstrapActions":{ + "name":"ListBootstrapActions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListBootstrapActionsInput"}, + "output":{"shape":"ListBootstrapActionsOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ] + }, + "ListClusters":{ + "name":"ListClusters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListClustersInput"}, + "output":{"shape":"ListClustersOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ] + }, + "ListInstanceGroups":{ + "name":"ListInstanceGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListInstanceGroupsInput"}, + "output":{"shape":"ListInstanceGroupsOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ] + }, + "ListInstances":{ + "name":"ListInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListInstancesInput"}, + "output":{"shape":"ListInstancesOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ] + }, + "ListSteps":{ + "name":"ListSteps", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListStepsInput"}, + "output":{"shape":"ListStepsOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ] + }, + "ModifyInstanceGroups":{ + "name":"ModifyInstanceGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyInstanceGroupsInput"}, + "errors":[ + {"shape":"InternalServerError"} + ] + }, + "RemoveTags":{ + "name":"RemoveTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsInput"}, + "output":{"shape":"RemoveTagsOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ] + }, + "RunJobFlow":{ + "name":"RunJobFlow", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RunJobFlowInput"}, + "output":{"shape":"RunJobFlowOutput"}, + "errors":[ + {"shape":"InternalServerError"} + ] + }, + "SetTerminationProtection":{ + "name":"SetTerminationProtection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetTerminationProtectionInput"}, + "errors":[ + {"shape":"InternalServerError"} + ] + }, + "SetVisibleToAllUsers":{ + "name":"SetVisibleToAllUsers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetVisibleToAllUsersInput"}, + "errors":[ + {"shape":"InternalServerError"} + ] + }, + "TerminateJobFlows":{ + "name":"TerminateJobFlows", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TerminateJobFlowsInput"}, + "errors":[ + {"shape":"InternalServerError"} + ] + } + }, + "shapes":{ + "ActionOnFailure":{ + "type":"string", + "enum":[ + "TERMINATE_JOB_FLOW", + "TERMINATE_CLUSTER", + "CANCEL_AND_WAIT", + "CONTINUE" + ] + }, + "AddInstanceGroupsInput":{ + "type":"structure", + "required":[ + "InstanceGroups", + "JobFlowId" + ], + "members":{ + "InstanceGroups":{"shape":"InstanceGroupConfigList"}, + "JobFlowId":{"shape":"XmlStringMaxLen256"} + } + }, + "AddInstanceGroupsOutput":{ + "type":"structure", + "members":{ + "JobFlowId":{"shape":"XmlStringMaxLen256"}, + "InstanceGroupIds":{"shape":"InstanceGroupIdsList"} + } + }, + "AddJobFlowStepsInput":{ + "type":"structure", + "required":[ + "JobFlowId", + "Steps" + ], + "members":{ + "JobFlowId":{"shape":"XmlStringMaxLen256"}, + "Steps":{"shape":"StepConfigList"} + } + }, + "AddJobFlowStepsOutput":{ + "type":"structure", + "members":{ + "StepIds":{"shape":"StepIdsList"} + } + }, + "AddTagsInput":{ + "type":"structure", + "required":[ + "ResourceId", + "Tags" + ], + "members":{ + "ResourceId":{"shape":"ResourceId"}, + "Tags":{"shape":"TagList"} + } + }, + "AddTagsOutput":{ + "type":"structure", + "members":{ + } + }, + "Application":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Version":{"shape":"String"}, + "Args":{"shape":"StringList"}, + "AdditionalInfo":{"shape":"StringMap"} + } + }, + "ApplicationList":{ + "type":"list", + "member":{"shape":"Application"} + }, + "Boolean":{"type":"boolean"}, + "BootstrapActionConfig":{ + "type":"structure", + "required":[ + "Name", + "ScriptBootstrapAction" + ], + "members":{ + "Name":{"shape":"XmlStringMaxLen256"}, + "ScriptBootstrapAction":{"shape":"ScriptBootstrapActionConfig"} + } + }, + "BootstrapActionConfigList":{ + "type":"list", + "member":{"shape":"BootstrapActionConfig"} + }, + "BootstrapActionDetail":{ + "type":"structure", + "members":{ + "BootstrapActionConfig":{"shape":"BootstrapActionConfig"} + } + }, + "BootstrapActionDetailList":{ + "type":"list", + "member":{"shape":"BootstrapActionDetail"} + }, + "Cluster":{ + "type":"structure", + "members":{ + "Id":{"shape":"ClusterId"}, + "Name":{"shape":"String"}, + "Status":{"shape":"ClusterStatus"}, + "Ec2InstanceAttributes":{"shape":"Ec2InstanceAttributes"}, + "LogUri":{"shape":"String"}, + "RequestedAmiVersion":{"shape":"String"}, + "RunningAmiVersion":{"shape":"String"}, + "ReleaseLabel":{"shape":"String"}, + "AutoTerminate":{"shape":"Boolean"}, + "TerminationProtected":{"shape":"Boolean"}, + "VisibleToAllUsers":{"shape":"Boolean"}, + "Applications":{"shape":"ApplicationList"}, + "Tags":{"shape":"TagList"}, + "ServiceRole":{"shape":"String"}, + "NormalizedInstanceHours":{"shape":"Integer"}, + "MasterPublicDnsName":{"shape":"String"}, + "Configurations":{"shape":"ConfigurationList"} + } + }, + "ClusterId":{"type":"string"}, + "ClusterState":{ + "type":"string", + "enum":[ + "STARTING", + "BOOTSTRAPPING", + "RUNNING", + "WAITING", + "TERMINATING", + "TERMINATED", + "TERMINATED_WITH_ERRORS" + ] + }, + "ClusterStateChangeReason":{ + "type":"structure", + "members":{ + "Code":{"shape":"ClusterStateChangeReasonCode"}, + "Message":{"shape":"String"} + } + }, + "ClusterStateChangeReasonCode":{ + "type":"string", + "enum":[ + "INTERNAL_ERROR", + "VALIDATION_ERROR", + "INSTANCE_FAILURE", + "BOOTSTRAP_FAILURE", + "USER_REQUEST", + "STEP_FAILURE", + "ALL_STEPS_COMPLETED" + ] + }, + "ClusterStateList":{ + "type":"list", + "member":{"shape":"ClusterState"} + }, + "ClusterStatus":{ + "type":"structure", + "members":{ + "State":{"shape":"ClusterState"}, + "StateChangeReason":{"shape":"ClusterStateChangeReason"}, + "Timeline":{"shape":"ClusterTimeline"} + } + }, + "ClusterSummary":{ + "type":"structure", + "members":{ + "Id":{"shape":"ClusterId"}, + "Name":{"shape":"String"}, + "Status":{"shape":"ClusterStatus"}, + "NormalizedInstanceHours":{"shape":"Integer"} + } + }, + "ClusterSummaryList":{ + "type":"list", + "member":{"shape":"ClusterSummary"} + }, + "ClusterTimeline":{ + "type":"structure", + "members":{ + "CreationDateTime":{"shape":"Date"}, + "ReadyDateTime":{"shape":"Date"}, + "EndDateTime":{"shape":"Date"} + } + }, + "Command":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "ScriptPath":{"shape":"String"}, + "Args":{"shape":"StringList"} + } + }, + "CommandList":{ + "type":"list", + "member":{"shape":"Command"} + }, + "Configuration":{ + "type":"structure", + "members":{ + "Classification":{"shape":"String"}, + "Configurations":{"shape":"ConfigurationList"}, + "Properties":{"shape":"StringMap"} + } + }, + "ConfigurationList":{ + "type":"list", + "member":{"shape":"Configuration"} + }, + "Date":{"type":"timestamp"}, + "DescribeClusterInput":{ + "type":"structure", + "required":["ClusterId"], + "members":{ + "ClusterId":{"shape":"ClusterId"} + } + }, + "DescribeClusterOutput":{ + "type":"structure", + "members":{ + "Cluster":{"shape":"Cluster"} + } + }, + "DescribeJobFlowsInput":{ + "type":"structure", + "members":{ + "CreatedAfter":{"shape":"Date"}, + "CreatedBefore":{"shape":"Date"}, + "JobFlowIds":{"shape":"XmlStringList"}, + "JobFlowStates":{"shape":"JobFlowExecutionStateList"} + } + }, + "DescribeJobFlowsOutput":{ + "type":"structure", + "members":{ + "JobFlows":{"shape":"JobFlowDetailList"} + } + }, + "DescribeStepInput":{ + "type":"structure", + "required":[ + "ClusterId", + "StepId" + ], + "members":{ + "ClusterId":{"shape":"ClusterId"}, + "StepId":{"shape":"StepId"} + } + }, + "DescribeStepOutput":{ + "type":"structure", + "members":{ + "Step":{"shape":"Step"} + } + }, + "EC2InstanceIdsToTerminateList":{ + "type":"list", + "member":{"shape":"InstanceId"} + }, + "Ec2InstanceAttributes":{ + "type":"structure", + "members":{ + "Ec2KeyName":{"shape":"String"}, + "Ec2SubnetId":{"shape":"String"}, + "Ec2AvailabilityZone":{"shape":"String"}, + "IamInstanceProfile":{"shape":"String"}, + "EmrManagedMasterSecurityGroup":{"shape":"String"}, + "EmrManagedSlaveSecurityGroup":{"shape":"String"}, + "ServiceAccessSecurityGroup":{"shape":"String"}, + "AdditionalMasterSecurityGroups":{"shape":"StringList"}, + "AdditionalSlaveSecurityGroups":{"shape":"StringList"} + } + }, + "ErrorCode":{ + "type":"string", + "max":256, + "min":1 + }, + "ErrorMessage":{"type":"string"}, + "HadoopJarStepConfig":{ + "type":"structure", + "required":["Jar"], + "members":{ + "Properties":{"shape":"KeyValueList"}, + "Jar":{"shape":"XmlString"}, + "MainClass":{"shape":"XmlString"}, + "Args":{"shape":"XmlStringList"} + } + }, + "HadoopStepConfig":{ + "type":"structure", + "members":{ + "Jar":{"shape":"String"}, + "Properties":{"shape":"StringMap"}, + "MainClass":{"shape":"String"}, + "Args":{"shape":"StringList"} + } + }, + "Instance":{ + "type":"structure", + "members":{ + "Id":{"shape":"InstanceId"}, + "Ec2InstanceId":{"shape":"InstanceId"}, + "PublicDnsName":{"shape":"String"}, + "PublicIpAddress":{"shape":"String"}, + "PrivateDnsName":{"shape":"String"}, + "PrivateIpAddress":{"shape":"String"}, + "Status":{"shape":"InstanceStatus"} + } + }, + "InstanceGroup":{ + "type":"structure", + "members":{ + "Id":{"shape":"InstanceGroupId"}, + "Name":{"shape":"String"}, + "Market":{"shape":"MarketType"}, + "InstanceGroupType":{"shape":"InstanceGroupType"}, + "BidPrice":{"shape":"String"}, + "InstanceType":{"shape":"InstanceType"}, + "RequestedInstanceCount":{"shape":"Integer"}, + "RunningInstanceCount":{"shape":"Integer"}, + "Status":{"shape":"InstanceGroupStatus"}, + "Configurations":{"shape":"ConfigurationList"} + } + }, + "InstanceGroupConfig":{ + "type":"structure", + "required":[ + "InstanceRole", + "InstanceType", + "InstanceCount" + ], + "members":{ + "Name":{"shape":"XmlStringMaxLen256"}, + "Market":{"shape":"MarketType"}, + "InstanceRole":{"shape":"InstanceRoleType"}, + "BidPrice":{"shape":"XmlStringMaxLen256"}, + "InstanceType":{"shape":"InstanceType"}, + "InstanceCount":{"shape":"Integer"}, + "Configurations":{"shape":"ConfigurationList"} + } + }, + "InstanceGroupConfigList":{ + "type":"list", + "member":{"shape":"InstanceGroupConfig"} + }, + "InstanceGroupDetail":{ + "type":"structure", + "required":[ + "Market", + "InstanceRole", + "InstanceType", + "InstanceRequestCount", + "InstanceRunningCount", + "State", + "CreationDateTime" + ], + "members":{ + "InstanceGroupId":{"shape":"XmlStringMaxLen256"}, + "Name":{"shape":"XmlStringMaxLen256"}, + "Market":{"shape":"MarketType"}, + "InstanceRole":{"shape":"InstanceRoleType"}, + "BidPrice":{"shape":"XmlStringMaxLen256"}, + "InstanceType":{"shape":"InstanceType"}, + "InstanceRequestCount":{"shape":"Integer"}, + "InstanceRunningCount":{"shape":"Integer"}, + "State":{"shape":"InstanceGroupState"}, + "LastStateChangeReason":{"shape":"XmlString"}, + "CreationDateTime":{"shape":"Date"}, + "StartDateTime":{"shape":"Date"}, + "ReadyDateTime":{"shape":"Date"}, + "EndDateTime":{"shape":"Date"} + } + }, + "InstanceGroupDetailList":{ + "type":"list", + "member":{"shape":"InstanceGroupDetail"} + }, + "InstanceGroupId":{"type":"string"}, + "InstanceGroupIdsList":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen256"} + }, + "InstanceGroupList":{ + "type":"list", + "member":{"shape":"InstanceGroup"} + }, + "InstanceGroupModifyConfig":{ + "type":"structure", + "required":["InstanceGroupId"], + "members":{ + "InstanceGroupId":{"shape":"XmlStringMaxLen256"}, + "InstanceCount":{"shape":"Integer"}, + "EC2InstanceIdsToTerminate":{"shape":"EC2InstanceIdsToTerminateList"} + } + }, + "InstanceGroupModifyConfigList":{ + "type":"list", + "member":{"shape":"InstanceGroupModifyConfig"} + }, + "InstanceGroupState":{ + "type":"string", + "enum":[ + "PROVISIONING", + "BOOTSTRAPPING", + "RUNNING", + "RESIZING", + "SUSPENDED", + "TERMINATING", + "TERMINATED", + "ARRESTED", + "SHUTTING_DOWN", + "ENDED" + ] + }, + "InstanceGroupStateChangeReason":{ + "type":"structure", + "members":{ + "Code":{"shape":"InstanceGroupStateChangeReasonCode"}, + "Message":{"shape":"String"} + } + }, + "InstanceGroupStateChangeReasonCode":{ + "type":"string", + "enum":[ + "INTERNAL_ERROR", + "VALIDATION_ERROR", + "INSTANCE_FAILURE", + "CLUSTER_TERMINATED" + ] + }, + "InstanceGroupStatus":{ + "type":"structure", + "members":{ + "State":{"shape":"InstanceGroupState"}, + "StateChangeReason":{"shape":"InstanceGroupStateChangeReason"}, + "Timeline":{"shape":"InstanceGroupTimeline"} + } + }, + "InstanceGroupTimeline":{ + "type":"structure", + "members":{ + "CreationDateTime":{"shape":"Date"}, + "ReadyDateTime":{"shape":"Date"}, + "EndDateTime":{"shape":"Date"} + } + }, + "InstanceGroupType":{ + "type":"string", + "enum":[ + "MASTER", + "CORE", + "TASK" + ] + }, + "InstanceGroupTypeList":{ + "type":"list", + "member":{"shape":"InstanceGroupType"} + }, + "InstanceId":{"type":"string"}, + "InstanceList":{ + "type":"list", + "member":{"shape":"Instance"} + }, + "InstanceRoleType":{ + "type":"string", + "enum":[ + "MASTER", + "CORE", + "TASK" + ] + }, + "InstanceState":{ + "type":"string", + "enum":[ + "AWAITING_FULFILLMENT", + "PROVISIONING", + "BOOTSTRAPPING", + "RUNNING", + "TERMINATED" + ] + }, + "InstanceStateChangeReason":{ + "type":"structure", + "members":{ + "Code":{"shape":"InstanceStateChangeReasonCode"}, + "Message":{"shape":"String"} + } + }, + "InstanceStateChangeReasonCode":{ + "type":"string", + "enum":[ + "INTERNAL_ERROR", + "VALIDATION_ERROR", + "INSTANCE_FAILURE", + "BOOTSTRAP_FAILURE", + "CLUSTER_TERMINATED" + ] + }, + "InstanceStatus":{ + "type":"structure", + "members":{ + "State":{"shape":"InstanceState"}, + "StateChangeReason":{"shape":"InstanceStateChangeReason"}, + "Timeline":{"shape":"InstanceTimeline"} + } + }, + "InstanceTimeline":{ + "type":"structure", + "members":{ + "CreationDateTime":{"shape":"Date"}, + "ReadyDateTime":{"shape":"Date"}, + "EndDateTime":{"shape":"Date"} + } + }, + "InstanceType":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "Integer":{"type":"integer"}, + "InternalServerError":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "exception":true, + "fault":true + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "JobFlowDetail":{ + "type":"structure", + "required":[ + "JobFlowId", + "Name", + "ExecutionStatusDetail", + "Instances" + ], + "members":{ + "JobFlowId":{"shape":"XmlStringMaxLen256"}, + "Name":{"shape":"XmlStringMaxLen256"}, + "LogUri":{"shape":"XmlString"}, + "AmiVersion":{"shape":"XmlStringMaxLen256"}, + "ExecutionStatusDetail":{"shape":"JobFlowExecutionStatusDetail"}, + "Instances":{"shape":"JobFlowInstancesDetail"}, + "Steps":{"shape":"StepDetailList"}, + "BootstrapActions":{"shape":"BootstrapActionDetailList"}, + "SupportedProducts":{"shape":"SupportedProductsList"}, + "VisibleToAllUsers":{"shape":"Boolean"}, + "JobFlowRole":{"shape":"XmlString"}, + "ServiceRole":{"shape":"XmlString"} + } + }, + "JobFlowDetailList":{ + "type":"list", + "member":{"shape":"JobFlowDetail"} + }, + "JobFlowExecutionState":{ + "type":"string", + "enum":[ + "STARTING", + "BOOTSTRAPPING", + "RUNNING", + "WAITING", + "SHUTTING_DOWN", + "TERMINATED", + "COMPLETED", + "FAILED" + ] + }, + "JobFlowExecutionStateList":{ + "type":"list", + "member":{"shape":"JobFlowExecutionState"} + }, + "JobFlowExecutionStatusDetail":{ + "type":"structure", + "required":[ + "State", + "CreationDateTime" + ], + "members":{ + "State":{"shape":"JobFlowExecutionState"}, + "CreationDateTime":{"shape":"Date"}, + "StartDateTime":{"shape":"Date"}, + "ReadyDateTime":{"shape":"Date"}, + "EndDateTime":{"shape":"Date"}, + "LastStateChangeReason":{"shape":"XmlString"} + } + }, + "JobFlowInstancesConfig":{ + "type":"structure", + "members":{ + "MasterInstanceType":{"shape":"InstanceType"}, + "SlaveInstanceType":{"shape":"InstanceType"}, + "InstanceCount":{"shape":"Integer"}, + "InstanceGroups":{"shape":"InstanceGroupConfigList"}, + "Ec2KeyName":{"shape":"XmlStringMaxLen256"}, + "Placement":{"shape":"PlacementType"}, + "KeepJobFlowAliveWhenNoSteps":{"shape":"Boolean"}, + "TerminationProtected":{"shape":"Boolean"}, + "HadoopVersion":{"shape":"XmlStringMaxLen256"}, + "Ec2SubnetId":{"shape":"XmlStringMaxLen256"}, + "EmrManagedMasterSecurityGroup":{"shape":"XmlStringMaxLen256"}, + "EmrManagedSlaveSecurityGroup":{"shape":"XmlStringMaxLen256"}, + "ServiceAccessSecurityGroup":{"shape":"XmlStringMaxLen256"}, + "AdditionalMasterSecurityGroups":{"shape":"SecurityGroupsList"}, + "AdditionalSlaveSecurityGroups":{"shape":"SecurityGroupsList"} + } + }, + "JobFlowInstancesDetail":{ + "type":"structure", + "required":[ + "MasterInstanceType", + "SlaveInstanceType", + "InstanceCount" + ], + "members":{ + "MasterInstanceType":{"shape":"InstanceType"}, + "MasterPublicDnsName":{"shape":"XmlString"}, + "MasterInstanceId":{"shape":"XmlString"}, + "SlaveInstanceType":{"shape":"InstanceType"}, + "InstanceCount":{"shape":"Integer"}, + "InstanceGroups":{"shape":"InstanceGroupDetailList"}, + "NormalizedInstanceHours":{"shape":"Integer"}, + "Ec2KeyName":{"shape":"XmlStringMaxLen256"}, + "Ec2SubnetId":{"shape":"XmlStringMaxLen256"}, + "Placement":{"shape":"PlacementType"}, + "KeepJobFlowAliveWhenNoSteps":{"shape":"Boolean"}, + "TerminationProtected":{"shape":"Boolean"}, + "HadoopVersion":{"shape":"XmlStringMaxLen256"} + } + }, + "KeyValue":{ + "type":"structure", + "members":{ + "Key":{"shape":"XmlString"}, + "Value":{"shape":"XmlString"} + } + }, + "KeyValueList":{ + "type":"list", + "member":{"shape":"KeyValue"} + }, + "ListBootstrapActionsInput":{ + "type":"structure", + "required":["ClusterId"], + "members":{ + "ClusterId":{"shape":"ClusterId"}, + "Marker":{"shape":"Marker"} + } + }, + "ListBootstrapActionsOutput":{ + "type":"structure", + "members":{ + "BootstrapActions":{"shape":"CommandList"}, + "Marker":{"shape":"Marker"} + } + }, + "ListClustersInput":{ + "type":"structure", + "members":{ + "CreatedAfter":{"shape":"Date"}, + "CreatedBefore":{"shape":"Date"}, + "ClusterStates":{"shape":"ClusterStateList"}, + "Marker":{"shape":"Marker"} + } + }, + "ListClustersOutput":{ + "type":"structure", + "members":{ + "Clusters":{"shape":"ClusterSummaryList"}, + "Marker":{"shape":"Marker"} + } + }, + "ListInstanceGroupsInput":{ + "type":"structure", + "required":["ClusterId"], + "members":{ + "ClusterId":{"shape":"ClusterId"}, + "Marker":{"shape":"Marker"} + } + }, + "ListInstanceGroupsOutput":{ + "type":"structure", + "members":{ + "InstanceGroups":{"shape":"InstanceGroupList"}, + "Marker":{"shape":"Marker"} + } + }, + "ListInstancesInput":{ + "type":"structure", + "required":["ClusterId"], + "members":{ + "ClusterId":{"shape":"ClusterId"}, + "InstanceGroupId":{"shape":"InstanceGroupId"}, + "InstanceGroupTypes":{"shape":"InstanceGroupTypeList"}, + "Marker":{"shape":"Marker"} + } + }, + "ListInstancesOutput":{ + "type":"structure", + "members":{ + "Instances":{"shape":"InstanceList"}, + "Marker":{"shape":"Marker"} + } + }, + "ListStepsInput":{ + "type":"structure", + "required":["ClusterId"], + "members":{ + "ClusterId":{"shape":"ClusterId"}, + "StepStates":{"shape":"StepStateList"}, + "StepIds":{"shape":"XmlStringList"}, + "Marker":{"shape":"Marker"} + } + }, + "ListStepsOutput":{ + "type":"structure", + "members":{ + "Steps":{"shape":"StepSummaryList"}, + "Marker":{"shape":"Marker"} + } + }, + "Marker":{"type":"string"}, + "MarketType":{ + "type":"string", + "enum":[ + "ON_DEMAND", + "SPOT" + ] + }, + "ModifyInstanceGroupsInput":{ + "type":"structure", + "members":{ + "InstanceGroups":{"shape":"InstanceGroupModifyConfigList"} + } + }, + "NewSupportedProductsList":{ + "type":"list", + "member":{"shape":"SupportedProductConfig"} + }, + "PlacementType":{ + "type":"structure", + "required":["AvailabilityZone"], + "members":{ + "AvailabilityZone":{"shape":"XmlString"} + } + }, + "RemoveTagsInput":{ + "type":"structure", + "required":[ + "ResourceId", + "TagKeys" + ], + "members":{ + "ResourceId":{"shape":"ResourceId"}, + "TagKeys":{"shape":"StringList"} + } + }, + "RemoveTagsOutput":{ + "type":"structure", + "members":{ + } + }, + "ResourceId":{"type":"string"}, + "RunJobFlowInput":{ + "type":"structure", + "required":[ + "Name", + "Instances" + ], + "members":{ + "Name":{"shape":"XmlStringMaxLen256"}, + "LogUri":{"shape":"XmlString"}, + "AdditionalInfo":{"shape":"XmlString"}, + "AmiVersion":{"shape":"XmlStringMaxLen256"}, + "ReleaseLabel":{"shape":"XmlStringMaxLen256"}, + "Instances":{"shape":"JobFlowInstancesConfig"}, + "Steps":{"shape":"StepConfigList"}, + "BootstrapActions":{"shape":"BootstrapActionConfigList"}, + "SupportedProducts":{"shape":"SupportedProductsList"}, + "NewSupportedProducts":{"shape":"NewSupportedProductsList"}, + "Applications":{"shape":"ApplicationList"}, + "Configurations":{"shape":"ConfigurationList"}, + "VisibleToAllUsers":{"shape":"Boolean"}, + "JobFlowRole":{"shape":"XmlString"}, + "ServiceRole":{"shape":"XmlString"}, + "Tags":{"shape":"TagList"} + } + }, + "RunJobFlowOutput":{ + "type":"structure", + "members":{ + "JobFlowId":{"shape":"XmlStringMaxLen256"} + } + }, + "ScriptBootstrapActionConfig":{ + "type":"structure", + "required":["Path"], + "members":{ + "Path":{"shape":"XmlString"}, + "Args":{"shape":"XmlStringList"} + } + }, + "SecurityGroupsList":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen256"} + }, + "SetTerminationProtectionInput":{ + "type":"structure", + "required":[ + "JobFlowIds", + "TerminationProtected" + ], + "members":{ + "JobFlowIds":{"shape":"XmlStringList"}, + "TerminationProtected":{"shape":"Boolean"} + } + }, + "SetVisibleToAllUsersInput":{ + "type":"structure", + "required":[ + "JobFlowIds", + "VisibleToAllUsers" + ], + "members":{ + "JobFlowIds":{"shape":"XmlStringList"}, + "VisibleToAllUsers":{"shape":"Boolean"} + } + }, + "Step":{ + "type":"structure", + "members":{ + "Id":{"shape":"StepId"}, + "Name":{"shape":"String"}, + "Config":{"shape":"HadoopStepConfig"}, + "ActionOnFailure":{"shape":"ActionOnFailure"}, + "Status":{"shape":"StepStatus"} + } + }, + "StepConfig":{ + "type":"structure", + "required":[ + "Name", + "HadoopJarStep" + ], + "members":{ + "Name":{"shape":"XmlStringMaxLen256"}, + "ActionOnFailure":{"shape":"ActionOnFailure"}, + "HadoopJarStep":{"shape":"HadoopJarStepConfig"} + } + }, + "StepConfigList":{ + "type":"list", + "member":{"shape":"StepConfig"} + }, + "StepDetail":{ + "type":"structure", + "required":[ + "StepConfig", + "ExecutionStatusDetail" + ], + "members":{ + "StepConfig":{"shape":"StepConfig"}, + "ExecutionStatusDetail":{"shape":"StepExecutionStatusDetail"} + } + }, + "StepDetailList":{ + "type":"list", + "member":{"shape":"StepDetail"} + }, + "StepExecutionState":{ + "type":"string", + "enum":[ + "PENDING", + "RUNNING", + "CONTINUE", + "COMPLETED", + "CANCELLED", + "FAILED", + "INTERRUPTED" + ] + }, + "StepExecutionStatusDetail":{ + "type":"structure", + "required":[ + "State", + "CreationDateTime" + ], + "members":{ + "State":{"shape":"StepExecutionState"}, + "CreationDateTime":{"shape":"Date"}, + "StartDateTime":{"shape":"Date"}, + "EndDateTime":{"shape":"Date"}, + "LastStateChangeReason":{"shape":"XmlString"} + } + }, + "StepId":{"type":"string"}, + "StepIdsList":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen256"} + }, + "StepState":{ + "type":"string", + "enum":[ + "PENDING", + "RUNNING", + "COMPLETED", + "CANCELLED", + "FAILED", + "INTERRUPTED" + ] + }, + "StepStateChangeReason":{ + "type":"structure", + "members":{ + "Code":{"shape":"StepStateChangeReasonCode"}, + "Message":{"shape":"String"} + } + }, + "StepStateChangeReasonCode":{ + "type":"string", + "enum":["NONE"] + }, + "StepStateList":{ + "type":"list", + "member":{"shape":"StepState"} + }, + "StepStatus":{ + "type":"structure", + "members":{ + "State":{"shape":"StepState"}, + "StateChangeReason":{"shape":"StepStateChangeReason"}, + "Timeline":{"shape":"StepTimeline"} + } + }, + "StepSummary":{ + "type":"structure", + "members":{ + "Id":{"shape":"StepId"}, + "Name":{"shape":"String"}, + "Config":{"shape":"HadoopStepConfig"}, + "ActionOnFailure":{"shape":"ActionOnFailure"}, + "Status":{"shape":"StepStatus"} + } + }, + "StepSummaryList":{ + "type":"list", + "member":{"shape":"StepSummary"} + }, + "StepTimeline":{ + "type":"structure", + "members":{ + "CreationDateTime":{"shape":"Date"}, + "StartDateTime":{"shape":"Date"}, + "EndDateTime":{"shape":"Date"} + } + }, + "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "StringMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "SupportedProductConfig":{ + "type":"structure", + "members":{ + "Name":{"shape":"XmlStringMaxLen256"}, + "Args":{"shape":"XmlStringList"} + } + }, + "SupportedProductsList":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen256"} + }, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"String"}, + "Value":{"shape":"String"} + } + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "TerminateJobFlowsInput":{ + "type":"structure", + "required":["JobFlowIds"], + "members":{ + "JobFlowIds":{"shape":"XmlStringList"} + } + }, + "XmlString":{ + "type":"string", + "max":10280, + "min":0, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "XmlStringList":{ + "type":"list", + "member":{"shape":"XmlString"} + }, + "XmlStringMaxLen256":{ + "type":"string", + "max":256, + "min":0, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,993 @@ +{ + "version": "2.0", + "service": "

    Amazon Elastic MapReduce (Amazon EMR) is a web service that makes it easy to process large amounts of data efficiently. Amazon EMR uses Hadoop processing combined with several AWS products to do tasks such as web indexing, data mining, log file analysis, machine learning, scientific simulation, and data warehousing.

    ", + "operations": { + "AddInstanceGroups": "

    AddInstanceGroups adds an instance group to a running cluster.

    ", + "AddJobFlowSteps": "

    AddJobFlowSteps adds new steps to a running job flow. A maximum of 256 steps are allowed in each job flow.

    If your job flow is long-running (such as a Hive data warehouse) or complex, you may require more than 256 steps to process your data. You can bypass the 256-step limitation in various ways, including using the SSH shell to connect to the master node and submitting queries directly to the software running on the master node, such as Hive and Hadoop. For more information on how to do this, go to Add More than 256 Steps to a Job Flow in the Amazon Elastic MapReduce Developer's Guide.

    A step specifies the location of a JAR file stored either on the master node of the job flow or in Amazon S3. Each step is performed by the main function of the main class of the JAR file. The main class can be specified either in the manifest of the JAR or by using the MainFunction parameter of the step.

    Elastic MapReduce executes each step in the order listed. For a step to be considered complete, the main function must exit with a zero exit code and all Hadoop jobs started while the step was running must have completed and run successfully.

    You can only add steps to a job flow that is in one of the following states: STARTING, BOOTSTRAPPING, RUNNING, or WAITING.

    ", + "AddTags": "

    Adds tags to an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. For more information, see Tagging Amazon EMR Resources.

    ", + "DescribeCluster": "

    Provides cluster-level details including status, hardware and software configuration, VPC settings, and so on. For information about the cluster steps, see ListSteps.

    ", + "DescribeJobFlows": "

    This API is deprecated and will eventually be removed. We recommend you use ListClusters, DescribeCluster, ListSteps, ListInstanceGroups and ListBootstrapActions instead.

    DescribeJobFlows returns a list of job flows that match all of the supplied parameters. The parameters can include a list of job flow IDs, job flow states, and restrictions on job flow creation date and time.

    Regardless of supplied parameters, only job flows created within the last two months are returned.

    If no parameters are supplied, then job flows matching either of the following criteria are returned:

    • Job flows created and completed in the last two weeks
    • Job flows created within the last two months that are in one of the following states: RUNNING, WAITING, SHUTTING_DOWN, STARTING

    Amazon Elastic MapReduce can return a maximum of 512 job flow descriptions.

    ", + "DescribeStep": "

    Provides more detail about the cluster step.

    ", + "ListBootstrapActions": "

    Provides information about the bootstrap actions associated with a cluster.

    ", + "ListClusters": "

    Provides the status of all clusters visible to this AWS account. Allows you to filter the list of clusters based on certain criteria; for example, filtering by cluster creation date and time or by status. This call returns a maximum of 50 clusters per call, but returns a marker to track the paging of the cluster list across multiple ListClusters calls.

    ", + "ListInstanceGroups": "

    Provides all available details about the instance groups in a cluster.

    ", + "ListInstances": "

    Provides information about the cluster instances that Amazon EMR provisions on behalf of a user when it creates the cluster. For example, this operation indicates when the EC2 instances reach the Ready state, when instances become available to Amazon EMR to use for jobs, and the IP addresses for cluster instances, etc.

    ", + "ListSteps": "

    Provides a list of steps for the cluster.

    ", + "ModifyInstanceGroups": "

    ModifyInstanceGroups modifies the number of nodes and configuration settings of an instance group. The input parameters include the new target instance count for the group and the instance group ID. The call will either succeed or fail atomically.

    ", + "RemoveTags": "

    Removes tags from an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. For more information, see Tagging Amazon EMR Resources.

    The following example removes the stack tag with value Prod from a cluster:

    ", + "RunJobFlow": "

    RunJobFlow creates and starts running a new job flow. The job flow will run the steps specified. Once the job flow completes, the cluster is stopped and the HDFS partition is lost. To prevent loss of data, configure the last step of the job flow to store results in Amazon S3. If the JobFlowInstancesConfig KeepJobFlowAliveWhenNoSteps parameter is set to TRUE, the job flow will transition to the WAITING state rather than shutting down once the steps have completed.

    For additional protection, you can set the JobFlowInstancesConfig TerminationProtected parameter to TRUE to lock the job flow and prevent it from being terminated by API call, user intervention, or in the event of a job flow error.

    A maximum of 256 steps are allowed in each job flow.

    If your job flow is long-running (such as a Hive data warehouse) or complex, you may require more than 256 steps to process your data. You can bypass the 256-step limitation in various ways, including using the SSH shell to connect to the master node and submitting queries directly to the software running on the master node, such as Hive and Hadoop. For more information on how to do this, go to Add More than 256 Steps to a Job Flow in the Amazon Elastic MapReduce Developer's Guide.

    For long running job flows, we recommend that you periodically store your results.

    ", + "SetTerminationProtection": "

    SetTerminationProtection locks a job flow so the Amazon EC2 instances in the cluster cannot be terminated by user intervention, an API call, or in the event of a job-flow error. The cluster still terminates upon successful completion of the job flow. Calling SetTerminationProtection on a job flow is analogous to calling the Amazon EC2 DisableAPITermination API on all of the EC2 instances in a cluster.

    SetTerminationProtection is used to prevent accidental termination of a job flow and to ensure that in the event of an error, the instances will persist so you can recover any data stored in their ephemeral instance storage.

    To terminate a job flow that has been locked by setting SetTerminationProtection to true, you must first unlock the job flow by a subsequent call to SetTerminationProtection in which you set the value to false.

    For more information, go to Protecting a Job Flow from Termination in the Amazon Elastic MapReduce Developer's Guide.

    ", + "SetVisibleToAllUsers": "

    Sets whether all AWS Identity and Access Management (IAM) users under your account can access the specified job flows. This action works on running job flows. You can also set the visibility of a job flow when you launch it using the VisibleToAllUsers parameter of RunJobFlow. The SetVisibleToAllUsers action can be called only by an IAM user who created the job flow or the AWS account that owns the job flow.

    ", + "TerminateJobFlows": "

    TerminateJobFlows shuts a list of job flows down. When a job flow is shut down, any step not yet completed is canceled and the EC2 instances on which the job flow is running are stopped. Any log files not already saved are uploaded to Amazon S3 if a LogUri was specified when the job flow was created.

    The maximum number of JobFlows allowed is 10. The call to TerminateJobFlows is asynchronous. Depending on the configuration of the job flow, it may take up to 5-20 minutes for the job flow to completely terminate and release allocated resources, such as Amazon EC2 instances.

    " + }, + "shapes": { + "ActionOnFailure": { + "base": null, + "refs": { + "Step$ActionOnFailure": "

    This specifies what action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE.

    ", + "StepConfig$ActionOnFailure": "

    The action to take if the job flow step fails.

    ", + "StepSummary$ActionOnFailure": "

    This specifies what action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE.

    " + } + }, + "AddInstanceGroupsInput": { + "base": "

    Input to an AddInstanceGroups call.

    ", + "refs": { + } + }, + "AddInstanceGroupsOutput": { + "base": "

    Output from an AddInstanceGroups call.

    ", + "refs": { + } + }, + "AddJobFlowStepsInput": { + "base": "

    The input argument to the AddJobFlowSteps operation.

    ", + "refs": { + } + }, + "AddJobFlowStepsOutput": { + "base": "

    The output for the AddJobFlowSteps operation.

    ", + "refs": { + } + }, + "AddTagsInput": { + "base": "

    This input identifies a cluster and a list of tags to attach.

    ", + "refs": { + } + }, + "AddTagsOutput": { + "base": "

    This output indicates the result of adding tags to a resource.

    ", + "refs": { + } + }, + "Application": { + "base": "

    An application is any Amazon or third-party software that you can add to the cluster. This structure contains a list of strings that indicates the software to use with the cluster and accepts a user argument list. Amazon EMR accepts and forwards the argument list to the corresponding installation script as bootstrap action argument. For more information, see Launch a Job Flow on the MapR Distribution for Hadoop. Currently supported values are:

    • \"mapr-m3\" - launch the job flow using MapR M3 Edition.
    • \"mapr-m5\" - launch the job flow using MapR M5 Edition.
    • \"mapr\" with the user arguments specifying \"--edition,m3\" or \"--edition,m5\" - launch the job flow using MapR M3 or M5 Edition, respectively.

    In Amazon EMR releases 4.0 and greater, the only accepted parameter is the application name. To pass arguments to applications, you supply a configuration for each application.

    ", + "refs": { + "ApplicationList$member": null + } + }, + "ApplicationList": { + "base": null, + "refs": { + "Cluster$Applications": "

    The applications installed on this cluster.

    ", + "RunJobFlowInput$Applications": "

    Amazon EMR releases 4.x or later.

    A list of applications for the cluster. Valid values are: \"Hadoop\", \"Hive\", \"Mahout\", \"Pig\", and \"Spark.\" They are case insensitive.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "Cluster$AutoTerminate": "

    Specifies whether the cluster should terminate after completing all steps.

    ", + "Cluster$TerminationProtected": "

    Indicates whether Amazon EMR will lock the cluster to prevent the EC2 instances from being terminated by an API call or user intervention, or in the event of a cluster error.

    ", + "Cluster$VisibleToAllUsers": "

    Indicates whether the job flow is visible to all IAM users of the AWS account associated with the job flow. If this value is set to true, all IAM users of that AWS account can view and manage the job flow if they have the proper policy permissions set. If this value is false, only the IAM user that created the cluster can view and manage it. This value can be changed using the SetVisibleToAllUsers action.

    ", + "JobFlowDetail$VisibleToAllUsers": "

    Specifies whether the job flow is visible to all IAM users of the AWS account associated with the job flow. If this value is set to true, all IAM users of that AWS account can view and (if they have the proper policy permissions set) manage the job flow. If it is set to false, only the IAM user that created the job flow can view and manage it. This value can be changed using the SetVisibleToAllUsers action.

    ", + "JobFlowInstancesConfig$KeepJobFlowAliveWhenNoSteps": "

    Specifies whether the job flow should be kept alive after completing all steps.

    ", + "JobFlowInstancesConfig$TerminationProtected": "

    Specifies whether to lock the job flow to prevent the Amazon EC2 instances from being terminated by API call, user intervention, or in the event of a job flow error.

    ", + "JobFlowInstancesDetail$KeepJobFlowAliveWhenNoSteps": "

    Specifies whether the job flow should terminate after completing all steps.

    ", + "JobFlowInstancesDetail$TerminationProtected": "

    Specifies whether the Amazon EC2 instances in the cluster are protected from termination by API calls, user intervention, or in the event of a job flow error.

    ", + "RunJobFlowInput$VisibleToAllUsers": "

    Whether the job flow is visible to all IAM users of the AWS account associated with the job flow. If this value is set to true, all IAM users of that AWS account can view and (if they have the proper policy permissions set) manage the job flow. If it is set to false, only the IAM user that created the job flow can view and manage it.

    ", + "SetTerminationProtectionInput$TerminationProtected": "

    A Boolean that indicates whether to protect the job flow and prevent the Amazon EC2 instances in the cluster from shutting down due to API calls, user intervention, or job-flow error.

    ", + "SetVisibleToAllUsersInput$VisibleToAllUsers": "

    Whether the specified job flows are visible to all IAM users of the AWS account associated with the job flow. If this value is set to True, all IAM users of that AWS account can view and, if they have the proper IAM policy permissions set, manage the job flows. If it is set to False, only the IAM user that created a job flow can view and manage it.

    " + } + }, + "BootstrapActionConfig": { + "base": "

    Configuration of a bootstrap action.

    ", + "refs": { + "BootstrapActionConfigList$member": null, + "BootstrapActionDetail$BootstrapActionConfig": "

    A description of the bootstrap action.

    " + } + }, + "BootstrapActionConfigList": { + "base": null, + "refs": { + "RunJobFlowInput$BootstrapActions": "

    A list of bootstrap actions that will be run before Hadoop is started on the cluster nodes.

    " + } + }, + "BootstrapActionDetail": { + "base": "

    Reports the configuration of a bootstrap action in a job flow.

    ", + "refs": { + "BootstrapActionDetailList$member": null + } + }, + "BootstrapActionDetailList": { + "base": null, + "refs": { + "JobFlowDetail$BootstrapActions": "

    A list of the bootstrap actions run by the job flow.

    " + } + }, + "Cluster": { + "base": "

    The detailed description of the cluster.

    ", + "refs": { + "DescribeClusterOutput$Cluster": "

    This output contains the details for the requested cluster.

    " + } + }, + "ClusterId": { + "base": null, + "refs": { + "Cluster$Id": "

    The unique identifier for the cluster.

    ", + "ClusterSummary$Id": "

    The unique identifier for the cluster.

    ", + "DescribeClusterInput$ClusterId": "

    The identifier of the cluster to describe.

    ", + "DescribeStepInput$ClusterId": "

    The identifier of the cluster with steps to describe.

    ", + "ListBootstrapActionsInput$ClusterId": "

    The cluster identifier for the bootstrap actions to list .

    ", + "ListInstanceGroupsInput$ClusterId": "

    The identifier of the cluster for which to list the instance groups.

    ", + "ListInstancesInput$ClusterId": "

    The identifier of the cluster for which to list the instances.

    ", + "ListStepsInput$ClusterId": "

    The identifier of the cluster for which to list the steps.

    " + } + }, + "ClusterState": { + "base": null, + "refs": { + "ClusterStateList$member": null, + "ClusterStatus$State": "

    The current state of the cluster.

    " + } + }, + "ClusterStateChangeReason": { + "base": "

    The reason that the cluster changed to its current state.

    ", + "refs": { + "ClusterStatus$StateChangeReason": "

    The reason for the cluster status change.

    " + } + }, + "ClusterStateChangeReasonCode": { + "base": null, + "refs": { + "ClusterStateChangeReason$Code": "

    The programmatic code for the state change reason.

    " + } + }, + "ClusterStateList": { + "base": null, + "refs": { + "ListClustersInput$ClusterStates": "

    The cluster state filters to apply when listing clusters.

    " + } + }, + "ClusterStatus": { + "base": "

    The detailed status of the cluster.

    ", + "refs": { + "Cluster$Status": "

    The current status details about the cluster.

    ", + "ClusterSummary$Status": "

    The details about the current status of the cluster.

    " + } + }, + "ClusterSummary": { + "base": "

    The summary description of the cluster.

    ", + "refs": { + "ClusterSummaryList$member": null + } + }, + "ClusterSummaryList": { + "base": null, + "refs": { + "ListClustersOutput$Clusters": "

    The list of clusters for the account based on the given filters.

    " + } + }, + "ClusterTimeline": { + "base": "

    Represents the timeline of the cluster's lifecycle.

    ", + "refs": { + "ClusterStatus$Timeline": "

    A timeline that represents the status of a cluster over the lifetime of the cluster.

    " + } + }, + "Command": { + "base": "

    An entity describing an executable that runs on a cluster.

    ", + "refs": { + "CommandList$member": null + } + }, + "CommandList": { + "base": null, + "refs": { + "ListBootstrapActionsOutput$BootstrapActions": "

    The bootstrap actions associated with the cluster .

    " + } + }, + "Configuration": { + "base": "

    Amazon EMR releases 4.x or later.

    Specifies a hardware and software configuration of the EMR cluster. This includes configurations for applications and software bundled with Amazon EMR. The Configuration object is a JSON object which is defined by a classification and a set of properties. Configurations can be nested, so a configuration may have its own Configuration objects listed.

    ", + "refs": { + "ConfigurationList$member": null + } + }, + "ConfigurationList": { + "base": null, + "refs": { + "Cluster$Configurations": "

    Amazon EMR releases 4.x or later.

    The list of Configurations supplied to the EMR cluster.

    ", + "Configuration$Configurations": "

    A list of configurations you apply to this configuration object.

    ", + "InstanceGroup$Configurations": "

    Amazon EMR releases 4.x or later.

    The list of configurations supplied for an EMR cluster instance group. You can specify a separate configuration for each instance group (master, core, and task).

    ", + "InstanceGroupConfig$Configurations": "

    Amazon EMR releases 4.x or later.

    The list of configurations supplied for an EMR cluster instance group. You can specify a separate configuration for each instance group (master, core, and task).

    ", + "RunJobFlowInput$Configurations": "

    Amazon EMR releases 4.x or later.

    The list of configurations supplied for the EMR cluster you are creating.

    " + } + }, + "Date": { + "base": null, + "refs": { + "ClusterTimeline$CreationDateTime": "

    The creation date and time of the cluster.

    ", + "ClusterTimeline$ReadyDateTime": "

    The date and time when the cluster was ready to execute steps.

    ", + "ClusterTimeline$EndDateTime": "

    The date and time when the cluster was terminated.

    ", + "DescribeJobFlowsInput$CreatedAfter": "

    Return only job flows created after this date and time.

    ", + "DescribeJobFlowsInput$CreatedBefore": "

    Return only job flows created before this date and time.

    ", + "InstanceGroupDetail$CreationDateTime": "

    The date/time the instance group was created.

    ", + "InstanceGroupDetail$StartDateTime": "

    The date/time the instance group was started.

    ", + "InstanceGroupDetail$ReadyDateTime": "

    The date/time the instance group was available to the cluster.

    ", + "InstanceGroupDetail$EndDateTime": "

    The date/time the instance group was terminated.

    ", + "InstanceGroupTimeline$CreationDateTime": "

    The creation date and time of the instance group.

    ", + "InstanceGroupTimeline$ReadyDateTime": "

    The date and time when the instance group became ready to perform tasks.

    ", + "InstanceGroupTimeline$EndDateTime": "

    The date and time when the instance group terminated.

    ", + "InstanceTimeline$CreationDateTime": "

    The creation date and time of the instance.

    ", + "InstanceTimeline$ReadyDateTime": "

    The date and time when the instance was ready to perform tasks.

    ", + "InstanceTimeline$EndDateTime": "

    The date and time when the instance was terminated.

    ", + "JobFlowExecutionStatusDetail$CreationDateTime": "

    The creation date and time of the job flow.

    ", + "JobFlowExecutionStatusDetail$StartDateTime": "

    The start date and time of the job flow.

    ", + "JobFlowExecutionStatusDetail$ReadyDateTime": "

    The date and time when the job flow was ready to start running bootstrap actions.

    ", + "JobFlowExecutionStatusDetail$EndDateTime": "

    The completion date and time of the job flow.

    ", + "ListClustersInput$CreatedAfter": "

    The creation date and time beginning value filter for listing clusters .

    ", + "ListClustersInput$CreatedBefore": "

    The creation date and time end value filter for listing clusters .

    ", + "StepExecutionStatusDetail$CreationDateTime": "

    The creation date and time of the step.

    ", + "StepExecutionStatusDetail$StartDateTime": "

    The start date and time of the step.

    ", + "StepExecutionStatusDetail$EndDateTime": "

    The completion date and time of the step.

    ", + "StepTimeline$CreationDateTime": "

    The date and time when the cluster step was created.

    ", + "StepTimeline$StartDateTime": "

    The date and time when the cluster step execution started.

    ", + "StepTimeline$EndDateTime": "

    The date and time when the cluster step execution completed or failed.

    " + } + }, + "DescribeClusterInput": { + "base": "

    This input determines which cluster to describe.

    ", + "refs": { + } + }, + "DescribeClusterOutput": { + "base": "

    This output contains the description of the cluster.

    ", + "refs": { + } + }, + "DescribeJobFlowsInput": { + "base": "

    The input for the DescribeJobFlows operation.

    ", + "refs": { + } + }, + "DescribeJobFlowsOutput": { + "base": "

    The output for the DescribeJobFlows operation.

    ", + "refs": { + } + }, + "DescribeStepInput": { + "base": "

    This input determines which step to describe.

    ", + "refs": { + } + }, + "DescribeStepOutput": { + "base": "

    This output contains the description of the cluster step.

    ", + "refs": { + } + }, + "EC2InstanceIdsToTerminateList": { + "base": null, + "refs": { + "InstanceGroupModifyConfig$EC2InstanceIdsToTerminate": "

    The EC2 InstanceIds to terminate. For advanced users only. Once you terminate the instances, the instance group will not return to its original requested size.

    " + } + }, + "Ec2InstanceAttributes": { + "base": "

    Provides information about the EC2 instances in a cluster grouped by category. For example, key name, subnet ID, IAM instance profile, and so on.

    ", + "refs": { + "Cluster$Ec2InstanceAttributes": null + } + }, + "ErrorCode": { + "base": null, + "refs": { + "InvalidRequestException$ErrorCode": "

    The error code associated with the exception.

    " + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "InternalServerException$Message": "

    The message associated with the exception.

    ", + "InvalidRequestException$Message": "

    The message associated with the exception.

    " + } + }, + "HadoopJarStepConfig": { + "base": "

    A job flow step consisting of a JAR file whose main function will be executed. The main function submits a job for Hadoop to execute and waits for the job to finish or fail.

    ", + "refs": { + "StepConfig$HadoopJarStep": "

    The JAR file used for the job flow step.

    " + } + }, + "HadoopStepConfig": { + "base": "

    A cluster step consisting of a JAR file whose main function will be executed. The main function submits a job for Hadoop to execute and waits for the job to finish or fail.

    ", + "refs": { + "Step$Config": "

    The Hadoop job configuration of the cluster step.

    ", + "StepSummary$Config": "

    The Hadoop job configuration of the cluster step.

    " + } + }, + "Instance": { + "base": "

    Represents an EC2 instance provisioned as part of cluster.

    ", + "refs": { + "InstanceList$member": null + } + }, + "InstanceGroup": { + "base": "

    This entity represents an instance group, which is a group of instances that have common purpose. For example, CORE instance group is used for HDFS.

    ", + "refs": { + "InstanceGroupList$member": null + } + }, + "InstanceGroupConfig": { + "base": "

    Configuration defining a new instance group.

    ", + "refs": { + "InstanceGroupConfigList$member": null + } + }, + "InstanceGroupConfigList": { + "base": null, + "refs": { + "AddInstanceGroupsInput$InstanceGroups": "

    Instance Groups to add.

    ", + "JobFlowInstancesConfig$InstanceGroups": "

    Configuration for the job flow's instance groups.

    " + } + }, + "InstanceGroupDetail": { + "base": "

    Detailed information about an instance group.

    ", + "refs": { + "InstanceGroupDetailList$member": null + } + }, + "InstanceGroupDetailList": { + "base": null, + "refs": { + "JobFlowInstancesDetail$InstanceGroups": "

    Details about the job flow's instance groups.

    " + } + }, + "InstanceGroupId": { + "base": null, + "refs": { + "InstanceGroup$Id": "

    The identifier of the instance group.

    ", + "ListInstancesInput$InstanceGroupId": "

    The identifier of the instance group for which to list the instances.

    " + } + }, + "InstanceGroupIdsList": { + "base": null, + "refs": { + "AddInstanceGroupsOutput$InstanceGroupIds": "

    Instance group IDs of the newly created instance groups.

    " + } + }, + "InstanceGroupList": { + "base": null, + "refs": { + "ListInstanceGroupsOutput$InstanceGroups": "

    The list of instance groups for the cluster and given filters.

    " + } + }, + "InstanceGroupModifyConfig": { + "base": "

    Modify an instance group size.

    ", + "refs": { + "InstanceGroupModifyConfigList$member": null + } + }, + "InstanceGroupModifyConfigList": { + "base": null, + "refs": { + "ModifyInstanceGroupsInput$InstanceGroups": "

    Instance groups to change.

    " + } + }, + "InstanceGroupState": { + "base": null, + "refs": { + "InstanceGroupDetail$State": "

    State of instance group. The following values are deprecated: STARTING, TERMINATED, and FAILED.

    ", + "InstanceGroupStatus$State": "

    The current state of the instance group.

    " + } + }, + "InstanceGroupStateChangeReason": { + "base": "

    The status change reason details for the instance group.

    ", + "refs": { + "InstanceGroupStatus$StateChangeReason": "

    The status change reason details for the instance group.

    " + } + }, + "InstanceGroupStateChangeReasonCode": { + "base": null, + "refs": { + "InstanceGroupStateChangeReason$Code": "

    The programmable code for the state change reason.

    " + } + }, + "InstanceGroupStatus": { + "base": "

    The details of the instance group status.

    ", + "refs": { + "InstanceGroup$Status": "

    The current status of the instance group.

    " + } + }, + "InstanceGroupTimeline": { + "base": "

    The timeline of the instance group lifecycle.

    ", + "refs": { + "InstanceGroupStatus$Timeline": "

    The timeline of the instance group status over time.

    " + } + }, + "InstanceGroupType": { + "base": null, + "refs": { + "InstanceGroup$InstanceGroupType": "

    The type of the instance group. Valid values are MASTER, CORE or TASK.

    ", + "InstanceGroupTypeList$member": null + } + }, + "InstanceGroupTypeList": { + "base": null, + "refs": { + "ListInstancesInput$InstanceGroupTypes": "

    The type of instance group for which to list the instances.

    " + } + }, + "InstanceId": { + "base": null, + "refs": { + "EC2InstanceIdsToTerminateList$member": null, + "Instance$Id": "

    The unique identifier for the instance in Amazon EMR.

    ", + "Instance$Ec2InstanceId": "

    The unique identifier of the instance in Amazon EC2.

    " + } + }, + "InstanceList": { + "base": null, + "refs": { + "ListInstancesOutput$Instances": "

    The list of instances for the cluster and given filters.

    " + } + }, + "InstanceRoleType": { + "base": null, + "refs": { + "InstanceGroupConfig$InstanceRole": "

    The role of the instance group in the cluster.

    ", + "InstanceGroupDetail$InstanceRole": "

    Instance group role in the cluster

    " + } + }, + "InstanceState": { + "base": null, + "refs": { + "InstanceStatus$State": "

    The current state of the instance.

    " + } + }, + "InstanceStateChangeReason": { + "base": "

    The details of the status change reason for the instance.

    ", + "refs": { + "InstanceStatus$StateChangeReason": "

    The details of the status change reason for the instance.

    " + } + }, + "InstanceStateChangeReasonCode": { + "base": null, + "refs": { + "InstanceStateChangeReason$Code": "

    The programmable code for the state change reason.

    " + } + }, + "InstanceStatus": { + "base": "

    The instance status details.

    ", + "refs": { + "Instance$Status": "

    The current status of the instance.

    " + } + }, + "InstanceTimeline": { + "base": "

    The timeline of the instance lifecycle.

    ", + "refs": { + "InstanceStatus$Timeline": "

    The timeline of the instance status over time.

    " + } + }, + "InstanceType": { + "base": null, + "refs": { + "InstanceGroup$InstanceType": "

    The EC2 instance type for all instances in the instance group.

    ", + "InstanceGroupConfig$InstanceType": "

    The Amazon EC2 instance type for all instances in the instance group.

    ", + "InstanceGroupDetail$InstanceType": "

    Amazon EC2 Instance type.

    ", + "JobFlowInstancesConfig$MasterInstanceType": "

    The EC2 instance type of the master node.

    ", + "JobFlowInstancesConfig$SlaveInstanceType": "

    The EC2 instance type of the slave nodes.

    ", + "JobFlowInstancesDetail$MasterInstanceType": "

    The Amazon EC2 master node instance type.

    ", + "JobFlowInstancesDetail$SlaveInstanceType": "

    The Amazon EC2 slave node instance type.

    " + } + }, + "Integer": { + "base": null, + "refs": { + "Cluster$NormalizedInstanceHours": "

    An approximation of the cost of the job flow, represented in m1.small/hours. This value is incremented one time for every hour an m1.small instance runs. Larger instances are weighted more, so an EC2 instance that is roughly four times more expensive would result in the normalized instance hours being incremented by four. This result is only an approximation and does not reflect the actual billing rate.

    ", + "ClusterSummary$NormalizedInstanceHours": "

    An approximation of the cost of the job flow, represented in m1.small/hours. This value is incremented one time for every hour an m1.small instance runs. Larger instances are weighted more, so an EC2 instance that is roughly four times more expensive would result in the normalized instance hours being incremented by four. This result is only an approximation and does not reflect the actual billing rate.

    ", + "InstanceGroup$RequestedInstanceCount": "

    The target number of instances for the instance group.

    ", + "InstanceGroup$RunningInstanceCount": "

    The number of instances currently running in this instance group.

    ", + "InstanceGroupConfig$InstanceCount": "

    Target number of instances for the instance group.

    ", + "InstanceGroupDetail$InstanceRequestCount": "

    Target number of instances to run in the instance group.

    ", + "InstanceGroupDetail$InstanceRunningCount": "

    Actual count of running instances.

    ", + "InstanceGroupModifyConfig$InstanceCount": "

    Target size for the instance group.

    ", + "JobFlowInstancesConfig$InstanceCount": "

    The number of Amazon EC2 instances used to execute the job flow.

    ", + "JobFlowInstancesDetail$InstanceCount": "

    The number of Amazon EC2 instances in the cluster. If the value is 1, the same instance serves as both the master and slave node. If the value is greater than 1, one instance is the master node and all others are slave nodes.

    ", + "JobFlowInstancesDetail$NormalizedInstanceHours": "

    An approximation of the cost of the job flow, represented in m1.small/hours. This value is incremented once for every hour an m1.small runs. Larger instances are weighted more, so an Amazon EC2 instance that is roughly four times more expensive would result in the normalized instance hours being incremented by four. This result is only an approximation and does not reflect the actual billing rate.

    " + } + }, + "InternalServerError": { + "base": "

    Indicates that an error occurred while processing the request and that the request was not completed.

    ", + "refs": { + } + }, + "InternalServerException": { + "base": "

    This exception occurs when there is an internal failure in the EMR service.

    ", + "refs": { + } + }, + "InvalidRequestException": { + "base": "

    This exception occurs when there is something wrong with user input.

    ", + "refs": { + } + }, + "JobFlowDetail": { + "base": "

    A description of a job flow.

    ", + "refs": { + "JobFlowDetailList$member": null + } + }, + "JobFlowDetailList": { + "base": null, + "refs": { + "DescribeJobFlowsOutput$JobFlows": "

    A list of job flows matching the parameters supplied.

    " + } + }, + "JobFlowExecutionState": { + "base": "

    The type of instance.

    A small instance

    A large instance

    ", + "refs": { + "JobFlowExecutionStateList$member": null, + "JobFlowExecutionStatusDetail$State": "

    The state of the job flow.

    " + } + }, + "JobFlowExecutionStateList": { + "base": null, + "refs": { + "DescribeJobFlowsInput$JobFlowStates": "

    Return only job flows whose state is contained in this list.

    " + } + }, + "JobFlowExecutionStatusDetail": { + "base": "

    Describes the status of the job flow.

    ", + "refs": { + "JobFlowDetail$ExecutionStatusDetail": "

    Describes the execution status of the job flow.

    " + } + }, + "JobFlowInstancesConfig": { + "base": "

    A description of the Amazon EC2 instance running the job flow. A valid JobFlowInstancesConfig must contain at least InstanceGroups, which is the recommended configuration. However, a valid alternative is to have MasterInstanceType, SlaveInstanceType, and InstanceCount (all three must be present).

    ", + "refs": { + "RunJobFlowInput$Instances": "

    A specification of the number and type of Amazon EC2 instances on which to run the job flow.

    " + } + }, + "JobFlowInstancesDetail": { + "base": "

    Specify the type of Amazon EC2 instances to run the job flow on.

    ", + "refs": { + "JobFlowDetail$Instances": "

    Describes the Amazon EC2 instances of the job flow.

    " + } + }, + "KeyValue": { + "base": "

    A key value pair.

    ", + "refs": { + "KeyValueList$member": null + } + }, + "KeyValueList": { + "base": null, + "refs": { + "HadoopJarStepConfig$Properties": "

    A list of Java properties that are set when the step runs. You can use these properties to pass key value pairs to your main function.

    " + } + }, + "ListBootstrapActionsInput": { + "base": "

    This input determines which bootstrap actions to retrieve.

    ", + "refs": { + } + }, + "ListBootstrapActionsOutput": { + "base": "

    This output contains the boostrap actions detail .

    ", + "refs": { + } + }, + "ListClustersInput": { + "base": "

    This input determines how the ListClusters action filters the list of clusters that it returns.

    ", + "refs": { + } + }, + "ListClustersOutput": { + "base": "

    This contains a ClusterSummaryList with the cluster details; for example, the cluster IDs, names, and status.

    ", + "refs": { + } + }, + "ListInstanceGroupsInput": { + "base": "

    This input determines which instance groups to retrieve.

    ", + "refs": { + } + }, + "ListInstanceGroupsOutput": { + "base": "

    This input determines which instance groups to retrieve.

    ", + "refs": { + } + }, + "ListInstancesInput": { + "base": "

    This input determines which instances to list.

    ", + "refs": { + } + }, + "ListInstancesOutput": { + "base": "

    This output contains the list of instances.

    ", + "refs": { + } + }, + "ListStepsInput": { + "base": "

    This input determines which steps to list.

    ", + "refs": { + } + }, + "ListStepsOutput": { + "base": "

    This output contains the list of steps.

    ", + "refs": { + } + }, + "Marker": { + "base": null, + "refs": { + "ListBootstrapActionsInput$Marker": "

    The pagination token that indicates the next set of results to retrieve .

    ", + "ListBootstrapActionsOutput$Marker": "

    The pagination token that indicates the next set of results to retrieve .

    ", + "ListClustersInput$Marker": "

    The pagination token that indicates the next set of results to retrieve.

    ", + "ListClustersOutput$Marker": "

    The pagination token that indicates the next set of results to retrieve.

    ", + "ListInstanceGroupsInput$Marker": "

    The pagination token that indicates the next set of results to retrieve.

    ", + "ListInstanceGroupsOutput$Marker": "

    The pagination token that indicates the next set of results to retrieve.

    ", + "ListInstancesInput$Marker": "

    The pagination token that indicates the next set of results to retrieve.

    ", + "ListInstancesOutput$Marker": "

    The pagination token that indicates the next set of results to retrieve.

    ", + "ListStepsInput$Marker": "

    The pagination token that indicates the next set of results to retrieve.

    ", + "ListStepsOutput$Marker": "

    The pagination token that indicates the next set of results to retrieve.

    " + } + }, + "MarketType": { + "base": null, + "refs": { + "InstanceGroup$Market": "

    The marketplace to provision instances for this group. Valid values are ON_DEMAND or SPOT.

    ", + "InstanceGroupConfig$Market": "

    Market type of the Amazon EC2 instances used to create a cluster node.

    ", + "InstanceGroupDetail$Market": "

    Market type of the Amazon EC2 instances used to create a cluster node.

    " + } + }, + "ModifyInstanceGroupsInput": { + "base": "

    Change the size of some instance groups.

    ", + "refs": { + } + }, + "NewSupportedProductsList": { + "base": null, + "refs": { + "RunJobFlowInput$NewSupportedProducts": "

    For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and greater, use Applications.

    A list of strings that indicates third-party software to use with the job flow that accepts a user argument list. EMR accepts and forwards the argument list to the corresponding installation script as bootstrap action arguments. For more information, see Launch a Job Flow on the MapR Distribution for Hadoop. Currently supported values are:

    • \"mapr-m3\" - launch the cluster using MapR M3 Edition.
    • \"mapr-m5\" - launch the cluster using MapR M5 Edition.
    • \"mapr\" with the user arguments specifying \"--edition,m3\" or \"--edition,m5\" - launch the job flow using MapR M3 or M5 Edition respectively.
    • \"mapr-m7\" - launch the cluster using MapR M7 Edition.
    • \"hunk\" - launch the cluster with the Hunk Big Data Analtics Platform.
    • \"hue\"- launch the cluster with Hue installed.
    • \"spark\" - launch the cluster with Apache Spark installed.
    • \"ganglia\" - launch the cluster with the Ganglia Monitoring System installed.
    " + } + }, + "PlacementType": { + "base": "

    The Amazon EC2 location for the job flow.

    ", + "refs": { + "JobFlowInstancesConfig$Placement": "

    The Availability Zone the job flow will run in.

    ", + "JobFlowInstancesDetail$Placement": "

    The Amazon EC2 Availability Zone for the job flow.

    " + } + }, + "RemoveTagsInput": { + "base": "

    This input identifies a cluster and a list of tags to remove.

    ", + "refs": { + } + }, + "RemoveTagsOutput": { + "base": "

    This output indicates the result of removing tags from a resource.

    ", + "refs": { + } + }, + "ResourceId": { + "base": null, + "refs": { + "AddTagsInput$ResourceId": "

    The Amazon EMR resource identifier to which tags will be added. This value must be a cluster identifier.

    ", + "RemoveTagsInput$ResourceId": "

    The Amazon EMR resource identifier from which tags will be removed. This value must be a cluster identifier.

    " + } + }, + "RunJobFlowInput": { + "base": "

    Input to the RunJobFlow operation.

    ", + "refs": { + } + }, + "RunJobFlowOutput": { + "base": "

    The result of the RunJobFlow operation.

    ", + "refs": { + } + }, + "ScriptBootstrapActionConfig": { + "base": "

    Configuration of the script to run during a bootstrap action.

    ", + "refs": { + "BootstrapActionConfig$ScriptBootstrapAction": "

    The script run by the bootstrap action.

    " + } + }, + "SecurityGroupsList": { + "base": null, + "refs": { + "JobFlowInstancesConfig$AdditionalMasterSecurityGroups": "

    A list of additional Amazon EC2 security group IDs for the master node.

    ", + "JobFlowInstancesConfig$AdditionalSlaveSecurityGroups": "

    A list of additional Amazon EC2 security group IDs for the slave nodes.

    " + } + }, + "SetTerminationProtectionInput": { + "base": "

    The input argument to the TerminationProtection operation.

    ", + "refs": { + } + }, + "SetVisibleToAllUsersInput": { + "base": "

    The input to the SetVisibleToAllUsers action.

    ", + "refs": { + } + }, + "Step": { + "base": "

    This represents a step in a cluster.

    ", + "refs": { + "DescribeStepOutput$Step": "

    The step details for the requested step identifier.

    " + } + }, + "StepConfig": { + "base": "

    Specification of a job flow step.

    ", + "refs": { + "StepConfigList$member": null, + "StepDetail$StepConfig": "

    The step configuration.

    " + } + }, + "StepConfigList": { + "base": null, + "refs": { + "AddJobFlowStepsInput$Steps": "

    A list of StepConfig to be executed by the job flow.

    ", + "RunJobFlowInput$Steps": "

    A list of steps to be executed by the job flow.

    " + } + }, + "StepDetail": { + "base": "

    Combines the execution state and configuration of a step.

    ", + "refs": { + "StepDetailList$member": null + } + }, + "StepDetailList": { + "base": null, + "refs": { + "JobFlowDetail$Steps": "

    A list of steps run by the job flow.

    " + } + }, + "StepExecutionState": { + "base": null, + "refs": { + "StepExecutionStatusDetail$State": "

    The state of the job flow step.

    " + } + }, + "StepExecutionStatusDetail": { + "base": "

    The execution state of a step.

    ", + "refs": { + "StepDetail$ExecutionStatusDetail": "

    The description of the step status.

    " + } + }, + "StepId": { + "base": null, + "refs": { + "DescribeStepInput$StepId": "

    The identifier of the step to describe.

    ", + "Step$Id": "

    The identifier of the cluster step.

    ", + "StepSummary$Id": "

    The identifier of the cluster step.

    " + } + }, + "StepIdsList": { + "base": null, + "refs": { + "AddJobFlowStepsOutput$StepIds": "

    The identifiers of the list of steps added to the job flow.

    " + } + }, + "StepState": { + "base": null, + "refs": { + "StepStateList$member": null, + "StepStatus$State": "

    The execution state of the cluster step.

    " + } + }, + "StepStateChangeReason": { + "base": "

    The details of the step state change reason.

    ", + "refs": { + "StepStatus$StateChangeReason": "

    The reason for the step execution status change.

    " + } + }, + "StepStateChangeReasonCode": { + "base": null, + "refs": { + "StepStateChangeReason$Code": "

    The programmable code for the state change reason. Note: Currently, the service provides no code for the state change.

    " + } + }, + "StepStateList": { + "base": null, + "refs": { + "ListStepsInput$StepStates": "

    The filter to limit the step list based on certain states.

    " + } + }, + "StepStatus": { + "base": "

    The execution status details of the cluster step.

    ", + "refs": { + "Step$Status": "

    The current execution status details of the cluster step.

    ", + "StepSummary$Status": "

    The current execution status details of the cluster step.

    " + } + }, + "StepSummary": { + "base": "

    The summary of the cluster step.

    ", + "refs": { + "StepSummaryList$member": null + } + }, + "StepSummaryList": { + "base": null, + "refs": { + "ListStepsOutput$Steps": "

    The filtered list of steps for the cluster.

    " + } + }, + "StepTimeline": { + "base": "

    The timeline of the cluster step lifecycle.

    ", + "refs": { + "StepStatus$Timeline": "

    The timeline of the cluster step status over time.

    " + } + }, + "String": { + "base": null, + "refs": { + "Application$Name": "

    The name of the application.

    ", + "Application$Version": "

    The version of the application.

    ", + "Cluster$Name": "

    The name of the cluster.

    ", + "Cluster$LogUri": "

    The path to the Amazon S3 location where logs for this cluster are stored.

    ", + "Cluster$RequestedAmiVersion": "

    The AMI version requested for this cluster.

    ", + "Cluster$RunningAmiVersion": "

    The AMI version running on this cluster.

    ", + "Cluster$ReleaseLabel": "

    The release label for the Amazon EMR release. For Amazon EMR 3.x and 2.x AMIs, use amiVersion instead instead of ReleaseLabel.

    ", + "Cluster$ServiceRole": "

    The IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf.

    ", + "Cluster$MasterPublicDnsName": "

    The public DNS name of the master EC2 instance.

    ", + "ClusterStateChangeReason$Message": "

    The descriptive message for the state change reason.

    ", + "ClusterSummary$Name": "

    The name of the cluster.

    ", + "Command$Name": "

    The name of the command.

    ", + "Command$ScriptPath": "

    The Amazon S3 location of the command script.

    ", + "Configuration$Classification": "

    The classification of a configuration. For more information see, Amazon EMR Configurations.

    ", + "Ec2InstanceAttributes$Ec2KeyName": "

    The name of the Amazon EC2 key pair to use when connecting with SSH into the master node as a user named \"hadoop\".

    ", + "Ec2InstanceAttributes$Ec2SubnetId": "

    To launch the job flow in Amazon VPC, set this parameter to the identifier of the Amazon VPC subnet where you want the job flow to launch. If you do not specify this value, the job flow is launched in the normal AWS cloud, outside of a VPC.

    Amazon VPC currently does not support cluster compute quadruple extra large (cc1.4xlarge) instances. Thus, you cannot specify the cc1.4xlarge instance type for nodes of a job flow launched in a VPC.

    ", + "Ec2InstanceAttributes$Ec2AvailabilityZone": "

    The Availability Zone in which the cluster will run.

    ", + "Ec2InstanceAttributes$IamInstanceProfile": "

    The IAM role that was specified when the job flow was launched. The EC2 instances of the job flow assume this role.

    ", + "Ec2InstanceAttributes$EmrManagedMasterSecurityGroup": "

    The identifier of the Amazon EC2 security group for the master node.

    ", + "Ec2InstanceAttributes$EmrManagedSlaveSecurityGroup": "

    The identifier of the Amazon EC2 security group for the slave nodes.

    ", + "Ec2InstanceAttributes$ServiceAccessSecurityGroup": "

    The identifier of the Amazon EC2 security group for the Amazon EMR service to access clusters in VPC private subnets.

    ", + "HadoopStepConfig$Jar": "

    The path to the JAR file that runs during the step.

    ", + "HadoopStepConfig$MainClass": "

    The name of the main class in the specified Java file. If not specified, the JAR file should specify a main class in its manifest file.

    ", + "Instance$PublicDnsName": "

    The public DNS name of the instance.

    ", + "Instance$PublicIpAddress": "

    The public IP address of the instance.

    ", + "Instance$PrivateDnsName": "

    The private DNS name of the instance.

    ", + "Instance$PrivateIpAddress": "

    The private IP address of the instance.

    ", + "InstanceGroup$Name": "

    The name of the instance group.

    ", + "InstanceGroup$BidPrice": "

    The bid price for each EC2 instance in the instance group when launching nodes as Spot Instances, expressed in USD.

    ", + "InstanceGroupStateChangeReason$Message": "

    The status change reason description.

    ", + "InstanceStateChangeReason$Message": "

    The status change reason description.

    ", + "Step$Name": "

    The name of the cluster step.

    ", + "StepStateChangeReason$Message": "

    The descriptive message for the state change reason.

    ", + "StepSummary$Name": "

    The name of the cluster step.

    ", + "StringList$member": null, + "StringMap$key": null, + "StringMap$value": null, + "Tag$Key": "

    A user-defined key, which is the minimum required information for a valid tag. For more information, see Tagging Amazon EMR Resources.

    ", + "Tag$Value": "

    A user-defined value, which is optional in a tag. For more information, see Tagging Amazon EMR Resources.

    " + } + }, + "StringList": { + "base": null, + "refs": { + "Application$Args": "

    Arguments for Amazon EMR to pass to the application.

    ", + "Command$Args": "

    Arguments for Amazon EMR to pass to the command for execution.

    ", + "Ec2InstanceAttributes$AdditionalMasterSecurityGroups": "

    A list of additional Amazon EC2 security group IDs for the master node.

    ", + "Ec2InstanceAttributes$AdditionalSlaveSecurityGroups": "

    A list of additional Amazon EC2 security group IDs for the slave nodes.

    ", + "HadoopStepConfig$Args": "

    The list of command line arguments to pass to the JAR file's main function for execution.

    ", + "RemoveTagsInput$TagKeys": "

    A list of tag keys to remove from a resource.

    " + } + }, + "StringMap": { + "base": null, + "refs": { + "Application$AdditionalInfo": "

    This option is for advanced users only. This is meta information about third-party applications that third-party vendors use for testing purposes.

    ", + "Configuration$Properties": "

    A set of properties supplied to the Configuration object.

    ", + "HadoopStepConfig$Properties": "

    The list of Java properties that are set when the step runs. You can use these properties to pass key value pairs to your main function.

    " + } + }, + "SupportedProductConfig": { + "base": "

    The list of supported product configurations which allow user-supplied arguments. EMR accepts these arguments and forwards them to the corresponding installation script as bootstrap action arguments.

    ", + "refs": { + "NewSupportedProductsList$member": null + } + }, + "SupportedProductsList": { + "base": null, + "refs": { + "JobFlowDetail$SupportedProducts": "

    A list of strings set by third party software when the job flow is launched. If you are not using third party software to manage the job flow this value is empty.

    ", + "RunJobFlowInput$SupportedProducts": "

    For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and greater, use Applications.

    A list of strings that indicates third-party software to use with the job flow. For more information, go to Use Third Party Applications with Amazon EMR. Currently supported values are:

    • \"mapr-m3\" - launch the job flow using MapR M3 Edition.
    • \"mapr-m5\" - launch the job flow using MapR M5 Edition.
    " + } + }, + "Tag": { + "base": "

    A key/value pair containing user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clu\\ sters to track your Amazon EMR resource allocation costs. For more information, see Tagging Amazon EMR Resources.

    ", + "refs": { + "TagList$member": null + } + }, + "TagList": { + "base": null, + "refs": { + "AddTagsInput$Tags": "

    A list of tags to associate with a cluster and propagate to Amazon EC2 instances. Tags are user-defined key/value pairs that consist of a required key string with a maximum of 128 characters, and an optional value string with a maximum of 256 characters.

    ", + "Cluster$Tags": "

    A list of tags associated with a cluster.

    ", + "RunJobFlowInput$Tags": "

    A list of tags to associate with a cluster and propagate to Amazon EC2 instances.

    " + } + }, + "TerminateJobFlowsInput": { + "base": "

    Input to the TerminateJobFlows operation.

    ", + "refs": { + } + }, + "XmlString": { + "base": null, + "refs": { + "HadoopJarStepConfig$Jar": "

    A path to a JAR file run during the step.

    ", + "HadoopJarStepConfig$MainClass": "

    The name of the main class in the specified Java file. If not specified, the JAR file should specify a Main-Class in its manifest file.

    ", + "InstanceGroupDetail$LastStateChangeReason": "

    Details regarding the state of the instance group.

    ", + "JobFlowDetail$LogUri": "

    The location in Amazon S3 where log files for the job are stored.

    ", + "JobFlowDetail$JobFlowRole": "

    The IAM role that was specified when the job flow was launched. The EC2 instances of the job flow assume this role.

    ", + "JobFlowDetail$ServiceRole": "

    The IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf.

    ", + "JobFlowExecutionStatusDetail$LastStateChangeReason": "

    Description of the job flow last changed state.

    ", + "JobFlowInstancesDetail$MasterPublicDnsName": "

    The DNS name of the master node.

    ", + "JobFlowInstancesDetail$MasterInstanceId": "

    The Amazon EC2 instance identifier of the master node.

    ", + "KeyValue$Key": "

    The unique identifier of a key value pair.

    ", + "KeyValue$Value": "

    The value part of the identified key.

    ", + "PlacementType$AvailabilityZone": "

    The Amazon EC2 Availability Zone for the job flow.

    ", + "RunJobFlowInput$LogUri": "

    The location in Amazon S3 to write the log files of the job flow. If a value is not provided, logs are not created.

    ", + "RunJobFlowInput$AdditionalInfo": "

    A JSON string for selecting additional features.

    ", + "RunJobFlowInput$JobFlowRole": "

    Also called instance profile and EC2 role. An IAM role for an EMR cluster. The EC2 instances of the cluster assume this role. The default role is EMR_EC2_DefaultRole. In order to use the default role, you must have already created it using the CLI or console.

    ", + "RunJobFlowInput$ServiceRole": "

    The IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf.

    ", + "ScriptBootstrapActionConfig$Path": "

    Location of the script to run during a bootstrap action. Can be either a location in Amazon S3 or on a local file system.

    ", + "StepExecutionStatusDetail$LastStateChangeReason": "

    A description of the step's current state.

    ", + "XmlStringList$member": null + } + }, + "XmlStringList": { + "base": null, + "refs": { + "DescribeJobFlowsInput$JobFlowIds": "

    Return only job flows whose job flow ID is contained in this list.

    ", + "HadoopJarStepConfig$Args": "

    A list of command line arguments passed to the JAR file's main function when executed.

    ", + "ListStepsInput$StepIds": "

    The filter to limit the step list based on the identifier of the steps.

    ", + "ScriptBootstrapActionConfig$Args": "

    A list of command line arguments to pass to the bootstrap action script.

    ", + "SetTerminationProtectionInput$JobFlowIds": "

    A list of strings that uniquely identify the job flows to protect. This identifier is returned by RunJobFlow and can also be obtained from DescribeJobFlows .

    ", + "SetVisibleToAllUsersInput$JobFlowIds": "

    Identifiers of the job flows to receive the new visibility setting.

    ", + "SupportedProductConfig$Args": "

    The list of user-supplied arguments.

    ", + "TerminateJobFlowsInput$JobFlowIds": "

    A list of job flows to be shutdown.

    " + } + }, + "XmlStringMaxLen256": { + "base": null, + "refs": { + "AddInstanceGroupsInput$JobFlowId": "

    Job flow in which to add the instance groups.

    ", + "AddInstanceGroupsOutput$JobFlowId": "

    The job flow ID in which the instance groups are added.

    ", + "AddJobFlowStepsInput$JobFlowId": "

    A string that uniquely identifies the job flow. This identifier is returned by RunJobFlow and can also be obtained from ListClusters.

    ", + "BootstrapActionConfig$Name": "

    The name of the bootstrap action.

    ", + "InstanceGroupConfig$Name": "

    Friendly name given to the instance group.

    ", + "InstanceGroupConfig$BidPrice": "

    Bid price for each Amazon EC2 instance in the instance group when launching nodes as Spot Instances, expressed in USD.

    ", + "InstanceGroupDetail$InstanceGroupId": "

    Unique identifier for the instance group.

    ", + "InstanceGroupDetail$Name": "

    Friendly name for the instance group.

    ", + "InstanceGroupDetail$BidPrice": "

    Bid price for EC2 Instances when launching nodes as Spot Instances, expressed in USD.

    ", + "InstanceGroupIdsList$member": null, + "InstanceGroupModifyConfig$InstanceGroupId": "

    Unique ID of the instance group to expand or shrink.

    ", + "JobFlowDetail$JobFlowId": "

    The job flow identifier.

    ", + "JobFlowDetail$Name": "

    The name of the job flow.

    ", + "JobFlowDetail$AmiVersion": "

    The version of the AMI used to initialize Amazon EC2 instances in the job flow. For a list of AMI versions currently supported by Amazon ElasticMapReduce, go to AMI Versions Supported in Elastic MapReduce in the Amazon Elastic MapReduce Developer Guide.

    ", + "JobFlowInstancesConfig$Ec2KeyName": "

    The name of the Amazon EC2 key pair that can be used to ssh to the master node as the user called \"hadoop.\"

    ", + "JobFlowInstancesConfig$HadoopVersion": "

    The Hadoop version for the job flow. Valid inputs are \"0.18\" (deprecated), \"0.20\" (deprecated), \"0.20.205\" (deprecated), \"1.0.3\", \"2.2.0\", or \"2.4.0\". If you do not set this value, the default of 0.18 is used, unless the AmiVersion parameter is set in the RunJobFlow call, in which case the default version of Hadoop for that AMI version is used.

    ", + "JobFlowInstancesConfig$Ec2SubnetId": "

    To launch the job flow in Amazon Virtual Private Cloud (Amazon VPC), set this parameter to the identifier of the Amazon VPC subnet where you want the job flow to launch. If you do not specify this value, the job flow is launched in the normal Amazon Web Services cloud, outside of an Amazon VPC.

    Amazon VPC currently does not support cluster compute quadruple extra large (cc1.4xlarge) instances. Thus you cannot specify the cc1.4xlarge instance type for nodes of a job flow launched in a Amazon VPC.

    ", + "JobFlowInstancesConfig$EmrManagedMasterSecurityGroup": "

    The identifier of the Amazon EC2 security group for the master node.

    ", + "JobFlowInstancesConfig$EmrManagedSlaveSecurityGroup": "

    The identifier of the Amazon EC2 security group for the slave nodes.

    ", + "JobFlowInstancesConfig$ServiceAccessSecurityGroup": "

    The identifier of the Amazon EC2 security group for the Amazon EMR service to access clusters in VPC private subnets.

    ", + "JobFlowInstancesDetail$Ec2KeyName": "

    The name of an Amazon EC2 key pair that can be used to ssh to the master node of job flow.

    ", + "JobFlowInstancesDetail$Ec2SubnetId": "

    For job flows launched within Amazon Virtual Private Cloud, this value specifies the identifier of the subnet where the job flow was launched.

    ", + "JobFlowInstancesDetail$HadoopVersion": "

    The Hadoop version for the job flow.

    ", + "RunJobFlowInput$Name": "

    The name of the job flow.

    ", + "RunJobFlowInput$AmiVersion": "

    For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and greater, use ReleaseLabel.

    The version of the Amazon Machine Image (AMI) to use when launching Amazon EC2 instances in the job flow. The following values are valid:

    • The version number of the AMI to use, for example, \"2.0.\"

    If the AMI supports multiple versions of Hadoop (for example, AMI 1.0 supports both Hadoop 0.18 and 0.20) you can use the JobFlowInstancesConfig HadoopVersion parameter to modify the version of Hadoop from the defaults shown above.

    For details about the AMI versions currently supported by Amazon Elastic MapReduce, go to AMI Versions Supported in Elastic MapReduce in the Amazon Elastic MapReduce Developer's Guide.

    ", + "RunJobFlowInput$ReleaseLabel": "

    Amazon EMR releases 4.x or later.

    The release label for the Amazon EMR release. For Amazon EMR 3.x and 2.x AMIs, use amiVersion instead instead of ReleaseLabel.

    ", + "RunJobFlowOutput$JobFlowId": "

    An unique identifier for the job flow.

    ", + "SecurityGroupsList$member": null, + "StepConfig$Name": "

    The name of the job flow step.

    ", + "StepIdsList$member": null, + "SupportedProductConfig$Name": "

    The name of the product configuration.

    ", + "SupportedProductsList$member": null + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,32 @@ +{ + "pagination": { + "DescribeJobFlows": { + "result_key": "JobFlows" + }, + "ListBootstrapActions": { + "input_token": "Marker", + "output_token": "Marker", + "result_key": "BootstrapActions" + }, + "ListClusters": { + "input_token": "Marker", + "output_token": "Marker", + "result_key": "Clusters" + }, + "ListInstanceGroups": { + "input_token": "Marker", + "output_token": "Marker", + "result_key": "InstanceGroups" + }, + "ListInstances": { + "input_token": "Marker", + "output_token": "Marker", + "result_key": "Instances" + }, + "ListSteps": { + "input_token": "Marker", + "output_token": "Marker", + "result_key": "Steps" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/waiters-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/waiters-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/waiters-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elasticmapreduce/2009-03-31/waiters-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,67 @@ +{ + "version": 2, + "waiters": { + "ClusterRunning": { + "delay": 30, + "operation": "DescribeCluster", + "maxAttempts": 60, + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "Cluster.Status.State", + "expected": "RUNNING" + }, + { + "state": "success", + "matcher": "path", + "argument": "Cluster.Status.State", + "expected": "WAITING" + }, + { + "state": "failure", + "matcher": "path", + "argument": "Cluster.Status.State", + "expected": "TERMINATING" + }, + { + "state": "failure", + "matcher": "path", + "argument": "Cluster.Status.State", + "expected": "TERMINATED" + }, + { + "state": "failure", + "matcher": "path", + "argument": "Cluster.Status.State", + "expected": "TERMINATED_WITH_ERRORS" + } + ] + }, + "StepComplete": { + "delay": 30, + "operation": "DescribeStep", + "maxAttempts": 60, + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "Step.Status.State", + "expected": "COMPLETED" + }, + { + "state": "failure", + "matcher": "path", + "argument": "Step.Status.State", + "expected": "FAILED" + }, + { + "state": "failure", + "matcher": "path", + "argument": "Step.Status.State", + "expected": "CANCELLED" + } + ] + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1807 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2012-09-25", + "endpointPrefix":"elastictranscoder", + "serviceFullName":"Amazon Elastic Transcoder", + "signatureVersion":"v4", + "protocol":"rest-json" + }, + "operations":{ + "CancelJob":{ + "name":"CancelJob", + "http":{ + "method":"DELETE", + "requestUri":"/2012-09-25/jobs/{Id}", + "responseCode":202 + }, + "input":{"shape":"CancelJobRequest"}, + "output":{"shape":"CancelJobResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"ResourceInUseException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "CreateJob":{ + "name":"CreateJob", + "http":{ + "method":"POST", + "requestUri":"/2012-09-25/jobs", + "responseCode":201 + }, + "input":{"shape":"CreateJobRequest"}, + "output":{"shape":"CreateJobResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "CreatePipeline":{ + "name":"CreatePipeline", + "http":{ + "method":"POST", + "requestUri":"/2012-09-25/pipelines", + "responseCode":201 + }, + "input":{"shape":"CreatePipelineRequest"}, + "output":{"shape":"CreatePipelineResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "CreatePreset":{ + "name":"CreatePreset", + "http":{ + "method":"POST", + "requestUri":"/2012-09-25/presets", + "responseCode":201 + }, + "input":{"shape":"CreatePresetRequest"}, + "output":{"shape":"CreatePresetResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "DeletePipeline":{ + "name":"DeletePipeline", + "http":{ + "method":"DELETE", + "requestUri":"/2012-09-25/pipelines/{Id}", + "responseCode":202 + }, + "input":{"shape":"DeletePipelineRequest"}, + "output":{"shape":"DeletePipelineResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"ResourceInUseException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "DeletePreset":{ + "name":"DeletePreset", + "http":{ + "method":"DELETE", + "requestUri":"/2012-09-25/presets/{Id}", + "responseCode":202 + }, + "input":{"shape":"DeletePresetRequest"}, + "output":{"shape":"DeletePresetResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "ListJobsByPipeline":{ + "name":"ListJobsByPipeline", + "http":{ + "method":"GET", + "requestUri":"/2012-09-25/jobsByPipeline/{PipelineId}" + }, + "input":{"shape":"ListJobsByPipelineRequest"}, + "output":{"shape":"ListJobsByPipelineResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "ListJobsByStatus":{ + "name":"ListJobsByStatus", + "http":{ + "method":"GET", + "requestUri":"/2012-09-25/jobsByStatus/{Status}" + }, + "input":{"shape":"ListJobsByStatusRequest"}, + "output":{"shape":"ListJobsByStatusResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "ListPipelines":{ + "name":"ListPipelines", + "http":{ + "method":"GET", + "requestUri":"/2012-09-25/pipelines" + }, + "input":{"shape":"ListPipelinesRequest"}, + "output":{"shape":"ListPipelinesResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "ListPresets":{ + "name":"ListPresets", + "http":{ + "method":"GET", + "requestUri":"/2012-09-25/presets" + }, + "input":{"shape":"ListPresetsRequest"}, + "output":{"shape":"ListPresetsResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "ReadJob":{ + "name":"ReadJob", + "http":{ + "method":"GET", + "requestUri":"/2012-09-25/jobs/{Id}" + }, + "input":{"shape":"ReadJobRequest"}, + "output":{"shape":"ReadJobResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "ReadPipeline":{ + "name":"ReadPipeline", + "http":{ + "method":"GET", + "requestUri":"/2012-09-25/pipelines/{Id}" + }, + "input":{"shape":"ReadPipelineRequest"}, + "output":{"shape":"ReadPipelineResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "ReadPreset":{ + "name":"ReadPreset", + "http":{ + "method":"GET", + "requestUri":"/2012-09-25/presets/{Id}" + }, + "input":{"shape":"ReadPresetRequest"}, + "output":{"shape":"ReadPresetResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "TestRole":{ + "name":"TestRole", + "http":{ + "method":"POST", + "requestUri":"/2012-09-25/roleTests", + "responseCode":200 + }, + "input":{"shape":"TestRoleRequest"}, + "output":{"shape":"TestRoleResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "UpdatePipeline":{ + "name":"UpdatePipeline", + "http":{ + "method":"PUT", + "requestUri":"/2012-09-25/pipelines/{Id}", + "responseCode":200 + }, + "input":{"shape":"UpdatePipelineRequest"}, + "output":{"shape":"UpdatePipelineResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"ResourceInUseException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "UpdatePipelineNotifications":{ + "name":"UpdatePipelineNotifications", + "http":{ + "method":"POST", + "requestUri":"/2012-09-25/pipelines/{Id}/notifications" + }, + "input":{"shape":"UpdatePipelineNotificationsRequest"}, + "output":{"shape":"UpdatePipelineNotificationsResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"ResourceInUseException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + }, + "UpdatePipelineStatus":{ + "name":"UpdatePipelineStatus", + "http":{ + "method":"POST", + "requestUri":"/2012-09-25/pipelines/{Id}/status" + }, + "input":{"shape":"UpdatePipelineStatusRequest"}, + "output":{"shape":"UpdatePipelineStatusResponse"}, + "errors":[ + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"IncompatibleVersionException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"ResourceInUseException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"AccessDeniedException", + "error":{"httpStatusCode":403}, + "exception":true + }, + { + "shape":"InternalServiceException", + "exception":true, + "fault":true + } + ] + } + }, + "shapes":{ + "AccessControl":{ + "type":"string", + "pattern":"(^FullControl$)|(^Read$)|(^ReadAcp$)|(^WriteAcp$)" + }, + "AccessControls":{ + "type":"list", + "member":{"shape":"AccessControl"}, + "max":30 + }, + "AccessDeniedException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":403}, + "exception":true + }, + "Artwork":{ + "type":"structure", + "members":{ + "InputKey":{"shape":"WatermarkKey"}, + "MaxWidth":{"shape":"DigitsOrAuto"}, + "MaxHeight":{"shape":"DigitsOrAuto"}, + "SizingPolicy":{"shape":"SizingPolicy"}, + "PaddingPolicy":{"shape":"PaddingPolicy"}, + "AlbumArtFormat":{"shape":"JpgOrPng"}, + "Encryption":{"shape":"Encryption"} + } + }, + "Artworks":{ + "type":"list", + "member":{"shape":"Artwork"} + }, + "Ascending":{ + "type":"string", + "pattern":"(^true$)|(^false$)" + }, + "AspectRatio":{ + "type":"string", + "pattern":"(^auto$)|(^1:1$)|(^4:3$)|(^3:2$)|(^16:9$)" + }, + "AudioBitDepth":{ + "type":"string", + "pattern":"(^16$)|(^24$)" + }, + "AudioBitOrder":{ + "type":"string", + "pattern":"(^LittleEndian$)" + }, + "AudioBitRate":{ + "type":"string", + "pattern":"^\\d{1,3}$" + }, + "AudioChannels":{ + "type":"string", + "pattern":"(^auto$)|(^0$)|(^1$)|(^2$)" + }, + "AudioCodec":{ + "type":"string", + "pattern":"(^AAC$)|(^vorbis$)|(^mp3$)|(^mp2$)|(^pcm$)|(^flac$)" + }, + "AudioCodecOptions":{ + "type":"structure", + "members":{ + "Profile":{"shape":"AudioCodecProfile"}, + "BitDepth":{"shape":"AudioBitDepth"}, + "BitOrder":{"shape":"AudioBitOrder"}, + "Signed":{"shape":"AudioSigned"} + } + }, + "AudioCodecProfile":{ + "type":"string", + "pattern":"(^auto$)|(^AAC-LC$)|(^HE-AAC$)|(^HE-AACv2$)" + }, + "AudioPackingMode":{ + "type":"string", + "pattern":"(^SingleTrack$)|(^OneChannelPerTrack$)|(^OneChannelPerTrackWithMosTo8Tracks$)" + }, + "AudioParameters":{ + "type":"structure", + "members":{ + "Codec":{"shape":"AudioCodec"}, + "SampleRate":{"shape":"AudioSampleRate"}, + "BitRate":{"shape":"AudioBitRate"}, + "Channels":{"shape":"AudioChannels"}, + "AudioPackingMode":{"shape":"AudioPackingMode"}, + "CodecOptions":{"shape":"AudioCodecOptions"} + } + }, + "AudioSampleRate":{ + "type":"string", + "pattern":"(^auto$)|(^22050$)|(^32000$)|(^44100$)|(^48000$)|(^96000$)|(^192000$)" + }, + "AudioSigned":{ + "type":"string", + "pattern":"(^Signed$)" + }, + "Base64EncodedString":{ + "type":"string", + "pattern":"^$|(^(?:[A-Za-z0-9\\+/]{4})*(?:[A-Za-z0-9\\+/]{2}==|[A-Za-z0-9\\+/]{3}=)?$)" + }, + "BucketName":{ + "type":"string", + "pattern":"^(\\w|\\.|-){1,255}$" + }, + "CancelJobRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"Id", + "location":"uri", + "locationName":"Id" + } + } + }, + "CancelJobResponse":{ + "type":"structure", + "members":{ + } + }, + "CaptionFormat":{ + "type":"structure", + "members":{ + "Format":{"shape":"CaptionFormatFormat"}, + "Pattern":{"shape":"CaptionFormatPattern"}, + "Encryption":{"shape":"Encryption"} + } + }, + "CaptionFormatFormat":{ + "type":"string", + "pattern":"(^mov-text$)|(^srt$)|(^scc$)|(^webvtt$)|(^dfxp$)" + }, + "CaptionFormatPattern":{ + "type":"string", + "pattern":"(^$)|(^.*\\{language\\}.*$)" + }, + "CaptionFormats":{ + "type":"list", + "member":{"shape":"CaptionFormat"}, + "max":4 + }, + "CaptionMergePolicy":{ + "type":"string", + "pattern":"(^MergeOverride$)|(^MergeRetain$)|(^Override$)" + }, + "CaptionSource":{ + "type":"structure", + "members":{ + "Key":{"shape":"Key"}, + "Language":{"shape":"Key"}, + "TimeOffset":{"shape":"TimeOffset"}, + "Label":{"shape":"Name"}, + "Encryption":{"shape":"Encryption"} + } + }, + "CaptionSources":{ + "type":"list", + "member":{"shape":"CaptionSource"}, + "max":20 + }, + "Captions":{ + "type":"structure", + "members":{ + "MergePolicy":{"shape":"CaptionMergePolicy"}, + "CaptionSources":{"shape":"CaptionSources"}, + "CaptionFormats":{"shape":"CaptionFormats"} + } + }, + "Clip":{ + "type":"structure", + "members":{ + "TimeSpan":{"shape":"TimeSpan"} + } + }, + "CodecOption":{ + "type":"string", + "min":1, + "max":255 + }, + "CodecOptions":{ + "type":"map", + "key":{"shape":"CodecOption"}, + "value":{"shape":"CodecOption"}, + "max":30 + }, + "Composition":{ + "type":"list", + "member":{"shape":"Clip"} + }, + "CreateJobOutput":{ + "type":"structure", + "members":{ + "Key":{"shape":"Key"}, + "ThumbnailPattern":{"shape":"ThumbnailPattern"}, + "ThumbnailEncryption":{"shape":"Encryption"}, + "Rotate":{"shape":"Rotate"}, + "PresetId":{"shape":"Id"}, + "SegmentDuration":{"shape":"FloatString"}, + "Watermarks":{"shape":"JobWatermarks"}, + "AlbumArt":{"shape":"JobAlbumArt"}, + "Composition":{"shape":"Composition"}, + "Captions":{"shape":"Captions"}, + "Encryption":{"shape":"Encryption"} + } + }, + "CreateJobOutputs":{ + "type":"list", + "member":{"shape":"CreateJobOutput"}, + "max":30 + }, + "CreateJobPlaylist":{ + "type":"structure", + "members":{ + "Name":{"shape":"Filename"}, + "Format":{"shape":"PlaylistFormat"}, + "OutputKeys":{"shape":"OutputKeys"}, + "HlsContentProtection":{"shape":"HlsContentProtection"}, + "PlayReadyDrm":{"shape":"PlayReadyDrm"} + } + }, + "CreateJobPlaylists":{ + "type":"list", + "member":{"shape":"CreateJobPlaylist"}, + "max":30 + }, + "CreateJobRequest":{ + "type":"structure", + "required":[ + "PipelineId", + "Input" + ], + "members":{ + "PipelineId":{"shape":"Id"}, + "Input":{"shape":"JobInput"}, + "Output":{"shape":"CreateJobOutput"}, + "Outputs":{"shape":"CreateJobOutputs"}, + "OutputKeyPrefix":{"shape":"Key"}, + "Playlists":{"shape":"CreateJobPlaylists"}, + "UserMetadata":{"shape":"UserMetadata"} + } + }, + "CreateJobResponse":{ + "type":"structure", + "members":{ + "Job":{"shape":"Job"} + } + }, + "CreatePipelineRequest":{ + "type":"structure", + "required":[ + "Name", + "InputBucket", + "Role" + ], + "members":{ + "Name":{"shape":"Name"}, + "InputBucket":{"shape":"BucketName"}, + "OutputBucket":{"shape":"BucketName"}, + "Role":{"shape":"Role"}, + "AwsKmsKeyArn":{"shape":"KeyArn"}, + "Notifications":{"shape":"Notifications"}, + "ContentConfig":{"shape":"PipelineOutputConfig"}, + "ThumbnailConfig":{"shape":"PipelineOutputConfig"} + } + }, + "CreatePipelineResponse":{ + "type":"structure", + "members":{ + "Pipeline":{"shape":"Pipeline"}, + "Warnings":{"shape":"Warnings"} + } + }, + "CreatePresetRequest":{ + "type":"structure", + "required":[ + "Name", + "Container" + ], + "members":{ + "Name":{"shape":"Name"}, + "Description":{"shape":"Description"}, + "Container":{"shape":"PresetContainer"}, + "Video":{"shape":"VideoParameters"}, + "Audio":{"shape":"AudioParameters"}, + "Thumbnails":{"shape":"Thumbnails"} + } + }, + "CreatePresetResponse":{ + "type":"structure", + "members":{ + "Preset":{"shape":"Preset"}, + "Warning":{"shape":"String"} + } + }, + "DeletePipelineRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"Id", + "location":"uri", + "locationName":"Id" + } + } + }, + "DeletePipelineResponse":{ + "type":"structure", + "members":{ + } + }, + "DeletePresetRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"Id", + "location":"uri", + "locationName":"Id" + } + } + }, + "DeletePresetResponse":{ + "type":"structure", + "members":{ + } + }, + "Description":{ + "type":"string", + "min":0, + "max":255 + }, + "DetectedProperties":{ + "type":"structure", + "members":{ + "Width":{"shape":"NullableInteger"}, + "Height":{"shape":"NullableInteger"}, + "FrameRate":{"shape":"FloatString"}, + "FileSize":{"shape":"NullableLong"}, + "DurationMillis":{"shape":"NullableLong"} + } + }, + "Digits":{ + "type":"string", + "pattern":"^\\d{1,5}$" + }, + "DigitsOrAuto":{ + "type":"string", + "pattern":"(^auto$)|(^\\d{2,4}$)" + }, + "Encryption":{ + "type":"structure", + "members":{ + "Mode":{"shape":"EncryptionMode"}, + "Key":{"shape":"Base64EncodedString"}, + "KeyMd5":{"shape":"Base64EncodedString"}, + "InitializationVector":{"shape":"ZeroTo255String"} + } + }, + "EncryptionMode":{ + "type":"string", + "pattern":"(^s3$)|(^s3-aws-kms$)|(^aes-cbc-pkcs7$)|(^aes-ctr$)|(^aes-gcm$)" + }, + "ExceptionMessages":{ + "type":"list", + "member":{"shape":"String"} + }, + "Filename":{ + "type":"string", + "min":1, + "max":255 + }, + "FixedGOP":{ + "type":"string", + "pattern":"(^true$)|(^false$)" + }, + "FloatString":{ + "type":"string", + "pattern":"^\\d{1,5}(\\.\\d{0,5})?$" + }, + "FrameRate":{ + "type":"string", + "pattern":"(^auto$)|(^10$)|(^15$)|(^23.97$)|(^24$)|(^25$)|(^29.97$)|(^30$)|(^50$)|(^60$)" + }, + "Grantee":{ + "type":"string", + "min":1, + "max":255 + }, + "GranteeType":{ + "type":"string", + "pattern":"(^Canonical$)|(^Email$)|(^Group$)" + }, + "HlsContentProtection":{ + "type":"structure", + "members":{ + "Method":{"shape":"HlsContentProtectionMethod"}, + "Key":{"shape":"Base64EncodedString"}, + "KeyMd5":{"shape":"Base64EncodedString"}, + "InitializationVector":{"shape":"ZeroTo255String"}, + "LicenseAcquisitionUrl":{"shape":"ZeroTo512String"}, + "KeyStoragePolicy":{"shape":"KeyStoragePolicy"} + } + }, + "HlsContentProtectionMethod":{ + "type":"string", + "pattern":"(^aes-128$)" + }, + "HorizontalAlign":{ + "type":"string", + "pattern":"(^Left$)|(^Right$)|(^Center$)" + }, + "Id":{ + "type":"string", + "pattern":"^\\d{13}-\\w{6}$" + }, + "IncompatibleVersionException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "Interlaced":{ + "type":"string", + "pattern":"(^auto$)|(^true$)|(^false$)" + }, + "InternalServiceException":{ + "type":"structure", + "members":{ + }, + "exception":true, + "fault":true + }, + "Job":{ + "type":"structure", + "members":{ + "Id":{"shape":"Id"}, + "Arn":{"shape":"String"}, + "PipelineId":{"shape":"Id"}, + "Input":{"shape":"JobInput"}, + "Output":{"shape":"JobOutput"}, + "Outputs":{"shape":"JobOutputs"}, + "OutputKeyPrefix":{"shape":"Key"}, + "Playlists":{"shape":"Playlists"}, + "Status":{"shape":"JobStatus"}, + "UserMetadata":{"shape":"UserMetadata"}, + "Timing":{"shape":"Timing"} + } + }, + "JobAlbumArt":{ + "type":"structure", + "members":{ + "MergePolicy":{"shape":"MergePolicy"}, + "Artwork":{"shape":"Artworks"} + } + }, + "JobContainer":{ + "type":"string", + "pattern":"(^auto$)|(^3gp$)|(^asf$)|(^avi$)|(^divx$)|(^flv$)|(^mkv$)|(^mov$)|(^mp4$)|(^mpeg$)|(^mpeg-ps$)|(^mpeg-ts$)|(^mxf$)|(^ogg$)|(^ts$)|(^vob$)|(^wav$)|(^webm$)|(^mp3$)|(^m4a$)|(^aac$)" + }, + "JobInput":{ + "type":"structure", + "members":{ + "Key":{"shape":"Key"}, + "FrameRate":{"shape":"FrameRate"}, + "Resolution":{"shape":"Resolution"}, + "AspectRatio":{"shape":"AspectRatio"}, + "Interlaced":{"shape":"Interlaced"}, + "Container":{"shape":"JobContainer"}, + "Encryption":{"shape":"Encryption"}, + "DetectedProperties":{"shape":"DetectedProperties"} + } + }, + "JobOutput":{ + "type":"structure", + "members":{ + "Id":{"shape":"String"}, + "Key":{"shape":"Key"}, + "ThumbnailPattern":{"shape":"ThumbnailPattern"}, + "ThumbnailEncryption":{"shape":"Encryption"}, + "Rotate":{"shape":"Rotate"}, + "PresetId":{"shape":"Id"}, + "SegmentDuration":{"shape":"FloatString"}, + "Status":{"shape":"JobStatus"}, + "StatusDetail":{"shape":"Description"}, + "Duration":{"shape":"NullableLong"}, + "Width":{"shape":"NullableInteger"}, + "Height":{"shape":"NullableInteger"}, + "FrameRate":{"shape":"FloatString"}, + "FileSize":{"shape":"NullableLong"}, + "DurationMillis":{"shape":"NullableLong"}, + "Watermarks":{"shape":"JobWatermarks"}, + "AlbumArt":{"shape":"JobAlbumArt"}, + "Composition":{"shape":"Composition"}, + "Captions":{"shape":"Captions"}, + "Encryption":{"shape":"Encryption"}, + "AppliedColorSpaceConversion":{"shape":"String"} + } + }, + "JobOutputs":{ + "type":"list", + "member":{"shape":"JobOutput"} + }, + "JobStatus":{ + "type":"string", + "pattern":"(^Submitted$)|(^Progressing$)|(^Complete$)|(^Canceled$)|(^Error$)" + }, + "JobWatermark":{ + "type":"structure", + "members":{ + "PresetWatermarkId":{"shape":"PresetWatermarkId"}, + "InputKey":{"shape":"WatermarkKey"}, + "Encryption":{"shape":"Encryption"} + } + }, + "JobWatermarks":{ + "type":"list", + "member":{"shape":"JobWatermark"} + }, + "Jobs":{ + "type":"list", + "member":{"shape":"Job"} + }, + "JpgOrPng":{ + "type":"string", + "pattern":"(^jpg$)|(^png$)" + }, + "Key":{ + "type":"string", + "min":1, + "max":255 + }, + "KeyArn":{ + "type":"string", + "min":0, + "max":255 + }, + "KeyIdGuid":{ + "type":"string", + "pattern":"(^[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{12}$)|(^[0-9A-Fa-f]{32}$)" + }, + "KeyStoragePolicy":{ + "type":"string", + "pattern":"(^NoStore$)|(^WithVariantPlaylists$)" + }, + "KeyframesMaxDist":{ + "type":"string", + "pattern":"^\\d{1,6}$" + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":429}, + "exception":true + }, + "ListJobsByPipelineRequest":{ + "type":"structure", + "required":["PipelineId"], + "members":{ + "PipelineId":{ + "shape":"Id", + "location":"uri", + "locationName":"PipelineId" + }, + "Ascending":{ + "shape":"Ascending", + "location":"querystring", + "locationName":"Ascending" + }, + "PageToken":{ + "shape":"Id", + "location":"querystring", + "locationName":"PageToken" + } + } + }, + "ListJobsByPipelineResponse":{ + "type":"structure", + "members":{ + "Jobs":{"shape":"Jobs"}, + "NextPageToken":{"shape":"Id"} + } + }, + "ListJobsByStatusRequest":{ + "type":"structure", + "required":["Status"], + "members":{ + "Status":{ + "shape":"JobStatus", + "location":"uri", + "locationName":"Status" + }, + "Ascending":{ + "shape":"Ascending", + "location":"querystring", + "locationName":"Ascending" + }, + "PageToken":{ + "shape":"Id", + "location":"querystring", + "locationName":"PageToken" + } + } + }, + "ListJobsByStatusResponse":{ + "type":"structure", + "members":{ + "Jobs":{"shape":"Jobs"}, + "NextPageToken":{"shape":"Id"} + } + }, + "ListPipelinesRequest":{ + "type":"structure", + "members":{ + "Ascending":{ + "shape":"Ascending", + "location":"querystring", + "locationName":"Ascending" + }, + "PageToken":{ + "shape":"Id", + "location":"querystring", + "locationName":"PageToken" + } + } + }, + "ListPipelinesResponse":{ + "type":"structure", + "members":{ + "Pipelines":{"shape":"Pipelines"}, + "NextPageToken":{"shape":"Id"} + } + }, + "ListPresetsRequest":{ + "type":"structure", + "members":{ + "Ascending":{ + "shape":"Ascending", + "location":"querystring", + "locationName":"Ascending" + }, + "PageToken":{ + "shape":"Id", + "location":"querystring", + "locationName":"PageToken" + } + } + }, + "ListPresetsResponse":{ + "type":"structure", + "members":{ + "Presets":{"shape":"Presets"}, + "NextPageToken":{"shape":"Id"} + } + }, + "MaxFrameRate":{ + "type":"string", + "pattern":"(^10$)|(^15$)|(^23.97$)|(^24$)|(^25$)|(^29.97$)|(^30$)|(^50$)|(^60$)" + }, + "MergePolicy":{ + "type":"string", + "pattern":"(^Replace$)|(^Prepend$)|(^Append$)|(^Fallback$)" + }, + "Name":{ + "type":"string", + "min":1, + "max":40 + }, + "NonEmptyBase64EncodedString":{ + "type":"string", + "pattern":"(^(?:[A-Za-z0-9\\+/]{4})*(?:[A-Za-z0-9\\+/]{2}==|[A-Za-z0-9\\+/]{3}=)?$)" + }, + "Notifications":{ + "type":"structure", + "members":{ + "Progressing":{"shape":"SnsTopic"}, + "Completed":{"shape":"SnsTopic"}, + "Warning":{"shape":"SnsTopic"}, + "Error":{"shape":"SnsTopic"} + } + }, + "NullableInteger":{"type":"integer"}, + "NullableLong":{"type":"long"}, + "OneTo512String":{ + "type":"string", + "min":1, + "max":512 + }, + "Opacity":{ + "type":"string", + "pattern":"^\\d{1,3}(\\.\\d{0,20})?$" + }, + "OutputKeys":{ + "type":"list", + "member":{"shape":"Key"}, + "max":30 + }, + "PaddingPolicy":{ + "type":"string", + "pattern":"(^Pad$)|(^NoPad$)" + }, + "Permission":{ + "type":"structure", + "members":{ + "GranteeType":{"shape":"GranteeType"}, + "Grantee":{"shape":"Grantee"}, + "Access":{"shape":"AccessControls"} + } + }, + "Permissions":{ + "type":"list", + "member":{"shape":"Permission"}, + "max":30 + }, + "Pipeline":{ + "type":"structure", + "members":{ + "Id":{"shape":"Id"}, + "Arn":{"shape":"String"}, + "Name":{"shape":"Name"}, + "Status":{"shape":"PipelineStatus"}, + "InputBucket":{"shape":"BucketName"}, + "OutputBucket":{"shape":"BucketName"}, + "Role":{"shape":"Role"}, + "AwsKmsKeyArn":{"shape":"KeyArn"}, + "Notifications":{"shape":"Notifications"}, + "ContentConfig":{"shape":"PipelineOutputConfig"}, + "ThumbnailConfig":{"shape":"PipelineOutputConfig"} + } + }, + "PipelineOutputConfig":{ + "type":"structure", + "members":{ + "Bucket":{"shape":"BucketName"}, + "StorageClass":{"shape":"StorageClass"}, + "Permissions":{"shape":"Permissions"} + } + }, + "PipelineStatus":{ + "type":"string", + "pattern":"(^Active$)|(^Paused$)" + }, + "Pipelines":{ + "type":"list", + "member":{"shape":"Pipeline"} + }, + "PixelsOrPercent":{ + "type":"string", + "pattern":"(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)" + }, + "PlayReadyDrm":{ + "type":"structure", + "members":{ + "Format":{"shape":"PlayReadyDrmFormatString"}, + "Key":{"shape":"NonEmptyBase64EncodedString"}, + "KeyMd5":{"shape":"NonEmptyBase64EncodedString"}, + "KeyId":{"shape":"KeyIdGuid"}, + "InitializationVector":{"shape":"ZeroTo255String"}, + "LicenseAcquisitionUrl":{"shape":"OneTo512String"} + } + }, + "PlayReadyDrmFormatString":{ + "type":"string", + "pattern":"(^microsoft$)|(^discretix-3.0$)" + }, + "Playlist":{ + "type":"structure", + "members":{ + "Name":{"shape":"Filename"}, + "Format":{"shape":"PlaylistFormat"}, + "OutputKeys":{"shape":"OutputKeys"}, + "HlsContentProtection":{"shape":"HlsContentProtection"}, + "PlayReadyDrm":{"shape":"PlayReadyDrm"}, + "Status":{"shape":"JobStatus"}, + "StatusDetail":{"shape":"Description"} + } + }, + "PlaylistFormat":{ + "type":"string", + "pattern":"(^HLSv3$)|(^HLSv4$)|(^Smooth$)" + }, + "Playlists":{ + "type":"list", + "member":{"shape":"Playlist"} + }, + "Preset":{ + "type":"structure", + "members":{ + "Id":{"shape":"Id"}, + "Arn":{"shape":"String"}, + "Name":{"shape":"Name"}, + "Description":{"shape":"Description"}, + "Container":{"shape":"PresetContainer"}, + "Audio":{"shape":"AudioParameters"}, + "Video":{"shape":"VideoParameters"}, + "Thumbnails":{"shape":"Thumbnails"}, + "Type":{"shape":"PresetType"} + } + }, + "PresetContainer":{ + "type":"string", + "pattern":"(^mp4$)|(^ts$)|(^webm$)|(^mp3$)|(^flac$)|(^oga$)|(^ogg$)|(^fmp4$)|(^mpg$)|(^flv$)|(^gif$)|(^mxf$)" + }, + "PresetType":{ + "type":"string", + "pattern":"(^System$)|(^Custom$)" + }, + "PresetWatermark":{ + "type":"structure", + "members":{ + "Id":{"shape":"PresetWatermarkId"}, + "MaxWidth":{"shape":"PixelsOrPercent"}, + "MaxHeight":{"shape":"PixelsOrPercent"}, + "SizingPolicy":{"shape":"WatermarkSizingPolicy"}, + "HorizontalAlign":{"shape":"HorizontalAlign"}, + "HorizontalOffset":{"shape":"PixelsOrPercent"}, + "VerticalAlign":{"shape":"VerticalAlign"}, + "VerticalOffset":{"shape":"PixelsOrPercent"}, + "Opacity":{"shape":"Opacity"}, + "Target":{"shape":"Target"} + } + }, + "PresetWatermarkId":{ + "type":"string", + "min":1, + "max":40 + }, + "PresetWatermarks":{ + "type":"list", + "member":{"shape":"PresetWatermark"} + }, + "Presets":{ + "type":"list", + "member":{"shape":"Preset"} + }, + "ReadJobRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"Id", + "location":"uri", + "locationName":"Id" + } + } + }, + "ReadJobResponse":{ + "type":"structure", + "members":{ + "Job":{"shape":"Job"} + } + }, + "ReadPipelineRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"Id", + "location":"uri", + "locationName":"Id" + } + } + }, + "ReadPipelineResponse":{ + "type":"structure", + "members":{ + "Pipeline":{"shape":"Pipeline"}, + "Warnings":{"shape":"Warnings"} + } + }, + "ReadPresetRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"Id", + "location":"uri", + "locationName":"Id" + } + } + }, + "ReadPresetResponse":{ + "type":"structure", + "members":{ + "Preset":{"shape":"Preset"} + } + }, + "Resolution":{ + "type":"string", + "pattern":"(^auto$)|(^\\d{1,5}x\\d{1,5}$)" + }, + "ResourceInUseException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "Role":{ + "type":"string", + "pattern":"^arn:aws:iam::\\w{12}:role/.+$" + }, + "Rotate":{ + "type":"string", + "pattern":"(^auto$)|(^0$)|(^90$)|(^180$)|(^270$)" + }, + "SizingPolicy":{ + "type":"string", + "pattern":"(^Fit$)|(^Fill$)|(^Stretch$)|(^Keep$)|(^ShrinkToFit$)|(^ShrinkToFill$)" + }, + "SnsTopic":{ + "type":"string", + "pattern":"(^$)|(^arn:aws:sns:.*:\\w{12}:.+$)" + }, + "SnsTopics":{ + "type":"list", + "member":{"shape":"SnsTopic"}, + "max":30 + }, + "StorageClass":{ + "type":"string", + "pattern":"(^ReducedRedundancy$)|(^Standard$)" + }, + "String":{"type":"string"}, + "Success":{ + "type":"string", + "pattern":"(^true$)|(^false$)" + }, + "Target":{ + "type":"string", + "pattern":"(^Content$)|(^Frame$)" + }, + "TestRoleRequest":{ + "type":"structure", + "required":[ + "Role", + "InputBucket", + "OutputBucket", + "Topics" + ], + "members":{ + "Role":{"shape":"Role"}, + "InputBucket":{"shape":"BucketName"}, + "OutputBucket":{"shape":"BucketName"}, + "Topics":{"shape":"SnsTopics"} + } + }, + "TestRoleResponse":{ + "type":"structure", + "members":{ + "Success":{"shape":"Success"}, + "Messages":{"shape":"ExceptionMessages"} + } + }, + "ThumbnailPattern":{ + "type":"string", + "pattern":"(^$)|(^.*\\{count\\}.*$)" + }, + "ThumbnailResolution":{ + "type":"string", + "pattern":"^\\d{1,5}x\\d{1,5}$" + }, + "Thumbnails":{ + "type":"structure", + "members":{ + "Format":{"shape":"JpgOrPng"}, + "Interval":{"shape":"Digits"}, + "Resolution":{"shape":"ThumbnailResolution"}, + "AspectRatio":{"shape":"AspectRatio"}, + "MaxWidth":{"shape":"DigitsOrAuto"}, + "MaxHeight":{"shape":"DigitsOrAuto"}, + "SizingPolicy":{"shape":"SizingPolicy"}, + "PaddingPolicy":{"shape":"PaddingPolicy"} + } + }, + "Time":{ + "type":"string", + "pattern":"(^\\d{1,5}(\\.\\d{0,3})?$)|(^([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)" + }, + "TimeOffset":{ + "type":"string", + "pattern":"(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)" + }, + "TimeSpan":{ + "type":"structure", + "members":{ + "StartTime":{"shape":"Time"}, + "Duration":{"shape":"Time"} + } + }, + "Timing":{ + "type":"structure", + "members":{ + "SubmitTimeMillis":{"shape":"NullableLong"}, + "StartTimeMillis":{"shape":"NullableLong"}, + "FinishTimeMillis":{"shape":"NullableLong"} + } + }, + "UpdatePipelineNotificationsRequest":{ + "type":"structure", + "required":[ + "Id", + "Notifications" + ], + "members":{ + "Id":{ + "shape":"Id", + "location":"uri", + "locationName":"Id" + }, + "Notifications":{"shape":"Notifications"} + } + }, + "UpdatePipelineNotificationsResponse":{ + "type":"structure", + "members":{ + "Pipeline":{"shape":"Pipeline"} + } + }, + "UpdatePipelineRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"Id", + "location":"uri", + "locationName":"Id" + }, + "Name":{"shape":"Name"}, + "InputBucket":{"shape":"BucketName"}, + "Role":{"shape":"Role"}, + "AwsKmsKeyArn":{"shape":"KeyArn"}, + "Notifications":{"shape":"Notifications"}, + "ContentConfig":{"shape":"PipelineOutputConfig"}, + "ThumbnailConfig":{"shape":"PipelineOutputConfig"} + } + }, + "UpdatePipelineResponse":{ + "type":"structure", + "members":{ + "Pipeline":{"shape":"Pipeline"}, + "Warnings":{"shape":"Warnings"} + } + }, + "UpdatePipelineStatusRequest":{ + "type":"structure", + "required":[ + "Id", + "Status" + ], + "members":{ + "Id":{ + "shape":"Id", + "location":"uri", + "locationName":"Id" + }, + "Status":{"shape":"PipelineStatus"} + } + }, + "UpdatePipelineStatusResponse":{ + "type":"structure", + "members":{ + "Pipeline":{"shape":"Pipeline"} + } + }, + "UserMetadata":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "ValidationException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "VerticalAlign":{ + "type":"string", + "pattern":"(^Top$)|(^Bottom$)|(^Center$)" + }, + "VideoBitRate":{ + "type":"string", + "pattern":"(^\\d{2,5}$)|(^auto$)" + }, + "VideoCodec":{ + "type":"string", + "pattern":"(^H\\.264$)|(^vp8$)|(^mpeg2$)|(^gif$)" + }, + "VideoParameters":{ + "type":"structure", + "members":{ + "Codec":{"shape":"VideoCodec"}, + "CodecOptions":{"shape":"CodecOptions"}, + "KeyframesMaxDist":{"shape":"KeyframesMaxDist"}, + "FixedGOP":{"shape":"FixedGOP"}, + "BitRate":{"shape":"VideoBitRate"}, + "FrameRate":{"shape":"FrameRate"}, + "MaxFrameRate":{"shape":"MaxFrameRate"}, + "Resolution":{"shape":"Resolution"}, + "AspectRatio":{"shape":"AspectRatio"}, + "MaxWidth":{"shape":"DigitsOrAuto"}, + "MaxHeight":{"shape":"DigitsOrAuto"}, + "DisplayAspectRatio":{"shape":"AspectRatio"}, + "SizingPolicy":{"shape":"SizingPolicy"}, + "PaddingPolicy":{"shape":"PaddingPolicy"}, + "Watermarks":{"shape":"PresetWatermarks"} + } + }, + "Warning":{ + "type":"structure", + "members":{ + "Code":{"shape":"String"}, + "Message":{"shape":"String"} + } + }, + "Warnings":{ + "type":"list", + "member":{"shape":"Warning"} + }, + "WatermarkKey":{ + "type":"string", + "min":1, + "max":255, + "pattern":"(^.{1,}.jpg$)|(^.{1,}.jpeg$)|(^.{1,}.png$)" + }, + "WatermarkSizingPolicy":{ + "type":"string", + "pattern":"(^Fit$)|(^Stretch$)|(^ShrinkToFit$)" + }, + "ZeroTo255String":{ + "type":"string", + "min":0, + "max":255 + }, + "ZeroTo512String":{ + "type":"string", + "min":0, + "max":512 + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1152 @@ +{ + "version": "2.0", + "operations": { + "CancelJob": "

    The CancelJob operation cancels an unfinished job.

    You can only cancel a job that has a status of Submitted. To prevent a pipeline from starting to process a job while you're getting the job identifier, use UpdatePipelineStatus to temporarily pause the pipeline.", + "CreateJob": "

    When you create a job, Elastic Transcoder returns JSON data that includes the values that you specified plus information about the job that is created.

    If you have specified more than one output for your jobs (for example, one output for the Kindle Fire and another output for the Apple iPhone 4s), you currently must use the Elastic Transcoder API to list the jobs (as opposed to the AWS Console).

    ", + "CreatePipeline": "

    The CreatePipeline operation creates a pipeline with settings that you specify.

    ", + "CreatePreset": "

    The CreatePreset operation creates a preset with settings that you specify.

    Elastic Transcoder checks the CreatePreset settings to ensure that they meet Elastic Transcoder requirements and to determine whether they comply with H.264 standards. If your settings are not valid for Elastic Transcoder, Elastic Transcoder returns an HTTP 400 response (ValidationException) and does not create the preset. If the settings are valid for Elastic Transcoder but aren't strictly compliant with the H.264 standard, Elastic Transcoder creates the preset and returns a warning message in the response. This helps you determine whether your settings comply with the H.264 standard while giving you greater flexibility with respect to the video that Elastic Transcoder produces.

    Elastic Transcoder uses the H.264 video-compression format. For more information, see the International Telecommunication Union publication Recommendation ITU-T H.264: Advanced video coding for generic audiovisual services.

    ", + "DeletePipeline": "

    The DeletePipeline operation removes a pipeline.

    You can only delete a pipeline that has never been used or that is not currently in use (doesn't contain any active jobs). If the pipeline is currently in use, DeletePipeline returns an error.

    ", + "DeletePreset": "

    The DeletePreset operation removes a preset that you've added in an AWS region.

    You can't delete the default presets that are included with Elastic Transcoder.

    ", + "ListJobsByPipeline": "

    The ListJobsByPipeline operation gets a list of the jobs currently in a pipeline.

    Elastic Transcoder returns all of the jobs currently in the specified pipeline. The response body contains one element for each job that satisfies the search criteria.

    ", + "ListJobsByStatus": "

    The ListJobsByStatus operation gets a list of jobs that have a specified status. The response body contains one element for each job that satisfies the search criteria.

    ", + "ListPipelines": "

    The ListPipelines operation gets a list of the pipelines associated with the current AWS account.

    ", + "ListPresets": "

    The ListPresets operation gets a list of the default presets included with Elastic Transcoder and the presets that you've added in an AWS region.

    ", + "ReadJob": "

    The ReadJob operation returns detailed information about a job.

    ", + "ReadPipeline": "

    The ReadPipeline operation gets detailed information about a pipeline.

    ", + "ReadPreset": "

    The ReadPreset operation gets detailed information about a preset.

    ", + "TestRole": "

    The TestRole operation tests the IAM role used to create the pipeline.

    The TestRole action lets you determine whether the IAM role you are using has sufficient permissions to let Elastic Transcoder perform tasks associated with the transcoding process. The action attempts to assume the specified IAM role, checks read access to the input and output buckets, and tries to send a test notification to Amazon SNS topics that you specify.

    ", + "UpdatePipeline": "

    Use the UpdatePipeline operation to update settings for a pipeline. When you change pipeline settings, your changes take effect immediately. Jobs that you have already submitted and that Elastic Transcoder has not started to process are affected in addition to jobs that you submit after you change settings.

    ", + "UpdatePipelineNotifications": "

    With the UpdatePipelineNotifications operation, you can update Amazon Simple Notification Service (Amazon SNS) notifications for a pipeline.

    When you update notifications for a pipeline, Elastic Transcoder returns the values that you specified in the request.

    ", + "UpdatePipelineStatus": "

    The UpdatePipelineStatus operation pauses or reactivates a pipeline, so that the pipeline stops or restarts the processing of jobs.

    Changing the pipeline status is useful if you want to cancel one or more jobs. You can't cancel jobs after Elastic Transcoder has started processing them; if you pause the pipeline to which you submitted the jobs, you have more time to get the job IDs for the jobs that you want to cancel, and to send a CancelJob request.

    " + }, + "service": "AWS Elastic Transcoder Service

    The AWS Elastic Transcoder Service.

    ", + "shapes": { + "AccessControl": { + "base": null, + "refs": { + "AccessControls$member": null + } + }, + "AccessControls": { + "base": null, + "refs": { + "Permission$Access": "

    The permission that you want to give to the AWS user that is listed in Grantee. Valid values include:

    • READ: The grantee can read the thumbnails and metadata for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.
    • READ_ACP: The grantee can read the object ACL for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.
    • WRITE_ACP: The grantee can write the ACL for the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.
    • FULL_CONTROL: The grantee has READ, READ_ACP, and WRITE_ACP permissions for the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.

    " + } + }, + "AccessDeniedException": { + "base": "

    General authentication failure. The request was not signed correctly.

    ", + "refs": { + } + }, + "Artwork": { + "base": "

    The file to be used as album art. There can be multiple artworks associated with an audio file, to a maximum of 20.

    To remove artwork or leave the artwork empty, you can either set Artwork to null, or set the Merge Policy to \"Replace\" and use an empty Artwork array.

    To pass through existing artwork unchanged, set the Merge Policy to \"Prepend\", \"Append\", or \"Fallback\", and use an empty Artwork array.

    ", + "refs": { + "Artworks$member": null + } + }, + "Artworks": { + "base": null, + "refs": { + "JobAlbumArt$Artwork": "

    The file to be used as album art. There can be multiple artworks associated with an audio file, to a maximum of 20. Valid formats are .jpg and .png

    " + } + }, + "Ascending": { + "base": null, + "refs": { + "ListJobsByPipelineRequest$Ascending": "

    To list jobs in chronological order by the date and time that they were submitted, enter true. To list jobs in reverse chronological order, enter false.

    ", + "ListJobsByStatusRequest$Ascending": "

    To list jobs in chronological order by the date and time that they were submitted, enter true. To list jobs in reverse chronological order, enter false.

    ", + "ListPipelinesRequest$Ascending": "

    To list pipelines in chronological order by the date and time that they were created, enter true. To list pipelines in reverse chronological order, enter false.

    ", + "ListPresetsRequest$Ascending": "

    To list presets in chronological order by the date and time that they were created, enter true. To list presets in reverse chronological order, enter false.

    " + } + }, + "AspectRatio": { + "base": null, + "refs": { + "JobInput$AspectRatio": "

    The aspect ratio of the input file. If you want Elastic Transcoder to automatically detect the aspect ratio of the input file, specify auto. If you want to specify the aspect ratio for the output file, enter one of the following values:

    1:1, 4:3, 3:2, 16:9

    If you specify a value other than auto, Elastic Transcoder disables automatic detection of the aspect ratio.

    ", + "Thumbnails$AspectRatio": "

    To better control resolution and aspect ratio of thumbnails, we recommend that you use the values MaxWidth, MaxHeight, SizingPolicy, and PaddingPolicy instead of Resolution and AspectRatio. The two groups of settings are mutually exclusive. Do not use them together.

    The aspect ratio of thumbnails. Valid values include:

    auto, 1:1, 4:3, 3:2, 16:9

    If you specify auto, Elastic Transcoder tries to preserve the aspect ratio of the video in the output file.

    ", + "VideoParameters$AspectRatio": "

    To better control resolution and aspect ratio of output videos, we recommend that you use the values MaxWidth, MaxHeight, SizingPolicy, PaddingPolicy, and DisplayAspectRatio instead of Resolution and AspectRatio. The two groups of settings are mutually exclusive. Do not use them together.

    The display aspect ratio of the video in the output file. Valid values include:

    auto, 1:1, 4:3, 3:2, 16:9

    If you specify auto, Elastic Transcoder tries to preserve the aspect ratio of the input file.

    If you specify an aspect ratio for the output file that differs from aspect ratio of the input file, Elastic Transcoder adds pillarboxing (black bars on the sides) or letterboxing (black bars on the top and bottom) to maintain the aspect ratio of the active region of the video.

    ", + "VideoParameters$DisplayAspectRatio": "

    The value that Elastic Transcoder adds to the metadata in the output file.

    " + } + }, + "AudioBitDepth": { + "base": null, + "refs": { + "AudioCodecOptions$BitDepth": "

    You can only choose an audio bit depth when you specify flac or pcm for the value of Audio:Codec.

    The bit depth of a sample is how many bits of information are included in the audio samples. The higher the bit depth, the better the audio, but the larger the file.

    Valid values are 16 and 24.

    The most common bit depth is 24.

    " + } + }, + "AudioBitOrder": { + "base": null, + "refs": { + "AudioCodecOptions$BitOrder": "

    You can only choose an audio bit order when you specify pcm for the value of Audio:Codec.

    The order the bits of a PCM sample are stored in.

    The supported value is LittleEndian.

    " + } + }, + "AudioBitRate": { + "base": null, + "refs": { + "AudioParameters$BitRate": "

    The bit rate of the audio stream in the output file, in kilobits/second. Enter an integer between 64 and 320, inclusive.

    " + } + }, + "AudioChannels": { + "base": null, + "refs": { + "AudioParameters$Channels": "

    The number of audio channels in the output file. The following values are valid:

    auto, 0, 1, 2

    One channel carries the information played by a single speaker. For example, a stereo track with two channels sends one channel to the left speaker, and the other channel to the right speaker. The output channels are organized into tracks. If you want Elastic Transcoder to automatically detect the number of audio channels in the input file and use that value for the output file, select auto.

    The output of a specific channel value and inputs are as follows:

    • auto channel specified, with any input: Pass through up to eight input channels.
    • 0 channels specified, with any input: Audio omitted from the output.
    • 1 channel specified, with at least one input channel: Mono sound.
    • 2 channels specified, with any input: Two identical mono channels or stereo. For more information about tracks, see Audio:AudioPackingMode.

    For more information about how Elastic Transcoder organizes channels and tracks, see Audio:AudioPackingMode.

    " + } + }, + "AudioCodec": { + "base": null, + "refs": { + "AudioParameters$Codec": "

    The audio codec for the output file. Valid values include aac, flac, mp2, mp3, pcm, and vorbis.

    " + } + }, + "AudioCodecOptions": { + "base": "

    Options associated with your audio codec.

    ", + "refs": { + "AudioParameters$CodecOptions": "

    If you specified AAC for Audio:Codec, this is the AAC compression profile to use. Valid values include:

    auto, AAC-LC, HE-AAC, HE-AACv2

    If you specify auto, Elastic Transcoder chooses a profile based on the bit rate of the output file.

    " + } + }, + "AudioCodecProfile": { + "base": null, + "refs": { + "AudioCodecOptions$Profile": "

    You can only choose an audio profile when you specify AAC for the value of Audio:Codec.

    Specify the AAC profile for the output file. Elastic Transcoder supports the following profiles:

    • auto: If you specify auto, Elastic Transcoder will select the profile based on the bit rate selected for the output file.
    • AAC-LC: The most common AAC profile. Use for bit rates larger than 64 kbps.
    • HE-AAC: Not supported on some older players and devices. Use for bit rates between 40 and 80 kbps.
    • HE-AACv2: Not supported on some players and devices. Use for bit rates less than 48 kbps.

    All outputs in a Smooth playlist must have the same value for Profile.

    If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated your presets to use AAC-LC. You can change the value as required.

    " + } + }, + "AudioPackingMode": { + "base": null, + "refs": { + "AudioParameters$AudioPackingMode": "

    The method of organizing audio channels and tracks. Use Audio:Channels to specify the number of channels in your output, and Audio:AudioPackingMode to specify the number of tracks and their relation to the channels. If you do not specify an Audio:AudioPackingMode, Elastic Transcoder uses SingleTrack.

    The following values are valid:

    SingleTrack, OneChannelPerTrack, and OneChannelPerTrackWithMosTo8Tracks

    When you specify SingleTrack, Elastic Transcoder creates a single track for your output. The track can have up to eight channels. Use SingleTrack for all non-mxf containers.

    The outputs of SingleTrack for a specific channel value and inputs are as follows:

    • 0 channels with any input: Audio omitted from the output
    • 1, 2, or auto channels with no audio input: Audio omitted from the output
    • 1 channel with any input with audio: One track with one channel, downmixed if necessary
    • 2 channels with one track with one channel: One track with two identical channels
    • 2 or auto channels with two tracks with one channel each: One track with two channels
    • 2 or auto channels with one track with two channels: One track with two channels
    • 2 channels with one track with multiple channels: One track with two channels
    • auto channels with one track with one channel: One track with one channel
    • auto channels with one track with multiple channels: One track with multiple channels

    When you specify OneChannelPerTrack, Elastic Transcoder creates a new track for every channel in your output. Your output can have up to eight single-channel tracks.

    The outputs of OneChannelPerTrack for a specific channel value and inputs are as follows:

    • 0 channels with any input: Audio omitted from the output
    • 1, 2, or auto channels with no audio input: Audio omitted from the output
    • 1 channel with any input with audio: One track with one channel, downmixed if necessary
    • 2 channels with one track with one channel: Two tracks with one identical channel each
    • 2 or auto channels with two tracks with one channel each: Two tracks with one channel each
    • 2 or auto channels with one track with two channels: Two tracks with one channel each
    • 2 channels with one track with multiple channels: Two tracks with one channel each
    • auto channels with one track with one channel: One track with one channel
    • auto channels with one track with multiple channels: Up to eight tracks with one channel each

    When you specify OneChannelPerTrackWithMosTo8Tracks, Elastic Transcoder creates eight single-channel tracks for your output. All tracks that do not contain audio data from an input channel are MOS, or Mit Out Sound, tracks.

    The outputs of OneChannelPerTrackWithMosTo8Tracks for a specific channel value and inputs are as follows:

    • 0 channels with any input: Audio omitted from the output
    • 1, 2, or auto channels with no audio input: Audio omitted from the output
    • 1 channel with any input with audio: One track with one channel, downmixed if necessary, plus six MOS tracks
    • 2 channels with one track with one channel: Two tracks with one identical channel each, plus six MOS tracks
    • 2 or auto channels with two tracks with one channel each: Two tracks with one channel each, plus six MOS tracks
    • 2 or auto channels with one track with two channels: Two tracks with one channel each, plus six MOS tracks
    • 2 channels with one track with multiple channels: Two tracks with one channel each, plus six MOS tracks
    • auto channels with one track with one channel: One track with one channel, plus seven MOS tracks
    • auto channels with one track with multiple channels: Up to eight tracks with one channel each, plus MOS tracks until there are eight tracks in all
    " + } + }, + "AudioParameters": { + "base": "

    Parameters required for transcoding audio.

    ", + "refs": { + "CreatePresetRequest$Audio": "

    A section of the request body that specifies the audio parameters.

    ", + "Preset$Audio": "

    A section of the response body that provides information about the audio preset values.

    " + } + }, + "AudioSampleRate": { + "base": null, + "refs": { + "AudioParameters$SampleRate": "

    The sample rate of the audio stream in the output file, in Hertz. Valid values include:

    auto, 22050, 32000, 44100, 48000, 96000

    If you specify auto, Elastic Transcoder automatically detects the sample rate.

    " + } + }, + "AudioSigned": { + "base": null, + "refs": { + "AudioCodecOptions$Signed": "

    You can only choose whether an audio sample is signed when you specify pcm for the value of Audio:Codec.

    Whether audio samples are represented with negative and positive numbers (signed) or only positive numbers (unsigned).

    The supported value is Signed.

    " + } + }, + "Base64EncodedString": { + "base": null, + "refs": { + "Encryption$Key": "

    The data encryption key that you want Elastic Transcoder to use to encrypt your output file, or that was used to encrypt your input file. The key must be base64-encoded and it must be one of the following bit lengths before being base64-encoded:

    128, 192, or 256.

    The key must also be encrypted by using the Amazon Key Management Service.

    ", + "Encryption$KeyMd5": "

    The MD5 digest of the key that you used to encrypt your input file, or that you want Elastic Transcoder to use to encrypt your output file. Elastic Transcoder uses the key digest as a checksum to make sure your key was not corrupted in transit. The key MD5 must be base64-encoded, and it must be exactly 16 bytes long before being base64-encoded.

    ", + "HlsContentProtection$Key": "

    If you want Elastic Transcoder to generate a key for you, leave this field blank.

    If you choose to supply your own key, you must encrypt the key by using AWS KMS. The key must be base64-encoded, and it must be one of the following bit lengths before being base64-encoded:

    128, 192, or 256.

    ", + "HlsContentProtection$KeyMd5": "

    If Elastic Transcoder is generating your key for you, you must leave this field blank.

    The MD5 digest of the key that you want Elastic Transcoder to use to encrypt your output file, and that you want Elastic Transcoder to use as a checksum to make sure your key was not corrupted in transit. The key MD5 must be base64-encoded, and it must be exactly 16 bytes before being base64- encoded.

    " + } + }, + "BucketName": { + "base": null, + "refs": { + "CreatePipelineRequest$InputBucket": "

    The Amazon S3 bucket in which you saved the media files that you want to transcode.

    ", + "CreatePipelineRequest$OutputBucket": "

    The Amazon S3 bucket in which you want Elastic Transcoder to save the transcoded files. (Use this, or use ContentConfig:Bucket plus ThumbnailConfig:Bucket.)

    Specify this value when all of the following are true:

    • You want to save transcoded files, thumbnails (if any), and playlists (if any) together in one bucket.
    • You do not want to specify the users or groups who have access to the transcoded files, thumbnails, and playlists.
    • You do not want to specify the permissions that Elastic Transcoder grants to the files. When Elastic Transcoder saves files in OutputBucket, it grants full control over the files only to the AWS account that owns the role that is specified by Role.
    • You want to associate the transcoded files and thumbnails with the Amazon S3 Standard storage class.

    If you want to save transcoded files and playlists in one bucket and thumbnails in another bucket, specify which users can access the transcoded files or the permissions the users have, or change the Amazon S3 storage class, omit OutputBucket and specify values for ContentConfig and ThumbnailConfig instead.

    ", + "Pipeline$InputBucket": "

    The Amazon S3 bucket from which Elastic Transcoder gets media files for transcoding and the graphics files, if any, that you want to use for watermarks.

    ", + "Pipeline$OutputBucket": "

    The Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files, thumbnails, and playlists. Either you specify this value, or you specify both ContentConfig and ThumbnailConfig.

    ", + "PipelineOutputConfig$Bucket": "

    The Amazon S3 bucket in which you want Elastic Transcoder to save the transcoded files. Specify this value when all of the following are true:

    • You want to save transcoded files, thumbnails (if any), and playlists (if any) together in one bucket.
    • You do not want to specify the users or groups who have access to the transcoded files, thumbnails, and playlists.
    • You do not want to specify the permissions that Elastic Transcoder grants to the files.
    • You want to associate the transcoded files and thumbnails with the Amazon S3 Standard storage class.
    If you want to save transcoded files and playlists in one bucket and thumbnails in another bucket, specify which users can access the transcoded files or the permissions the users have, or change the Amazon S3 storage class, omit OutputBucket and specify values for ContentConfig and ThumbnailConfig instead.

    ", + "TestRoleRequest$InputBucket": "

    The Amazon S3 bucket that contains media files to be transcoded. The action attempts to read from this bucket.

    ", + "TestRoleRequest$OutputBucket": "

    The Amazon S3 bucket that Elastic Transcoder will write transcoded media files to. The action attempts to read from this bucket.

    ", + "UpdatePipelineRequest$InputBucket": "

    The Amazon S3 bucket in which you saved the media files that you want to transcode and the graphics that you want to use as watermarks.

    " + } + }, + "CancelJobRequest": { + "base": "

    The CancelJobRequest structure.

    ", + "refs": { + } + }, + "CancelJobResponse": { + "base": "

    The response body contains a JSON object. If the job is successfully canceled, the value of Success is true.

    ", + "refs": { + } + }, + "CaptionFormat": { + "base": "

    The file format of the output captions. If you leave this value blank, Elastic Transcoder returns an error.

    ", + "refs": { + "CaptionFormats$member": null + } + }, + "CaptionFormatFormat": { + "base": null, + "refs": { + "CaptionFormat$Format": "

    The format you specify determines whether Elastic Transcoder generates an embedded or sidecar caption for this output.

    • Valid Embedded Caption Formats:

      • for FLAC: None

      • For MP3: None

      • For MP4: mov-text

      • For MPEG-TS: None

      • For ogg: None

      • For webm: None

    • Valid Sidecar Caption Formats: Elastic Transcoder supports dfxp (first div element only), scc, srt, and webvtt. If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

      • For FMP4: dfxp

      • Non-FMP4 outputs: All sidecar types

      fmp4 captions have an extension of .ismt

    " + } + }, + "CaptionFormatPattern": { + "base": null, + "refs": { + "CaptionFormat$Pattern": "

    The prefix for caption filenames, in the form description-{language}, where:

    • description is a description of the video.
    • {language} is a literal value that Elastic Transcoder replaces with the two- or three-letter code for the language of the caption in the output file names.

    If you don't include {language} in the file name pattern, Elastic Transcoder automatically appends \"{language}\" to the value that you specify for the description. In addition, Elastic Transcoder automatically appends the count to the end of the segment files.

    For example, suppose you're transcoding into srt format. When you enter \"Sydney-{language}-sunrise\", and the language of the captions is English (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.

    " + } + }, + "CaptionFormats": { + "base": null, + "refs": { + "Captions$CaptionFormats": "

    The array of file formats for the output captions. If you leave this value blank, Elastic Transcoder returns an error.

    " + } + }, + "CaptionMergePolicy": { + "base": null, + "refs": { + "Captions$MergePolicy": "

    A policy that determines how Elastic Transcoder handles the existence of multiple captions.

    • MergeOverride: Elastic Transcoder transcodes both embedded and sidecar captions into outputs. If captions for a language are embedded in the input file and also appear in a sidecar file, Elastic Transcoder uses the sidecar captions and ignores the embedded captions for that language.

    • MergeRetain: Elastic Transcoder transcodes both embedded and sidecar captions into outputs. If captions for a language are embedded in the input file and also appear in a sidecar file, Elastic Transcoder uses the embedded captions and ignores the sidecar captions for that language. If CaptionSources is empty, Elastic Transcoder omits all sidecar captions from the output files.

    • Override: Elastic Transcoder transcodes only the sidecar captions that you specify in CaptionSources.

    MergePolicy cannot be null.

    " + } + }, + "CaptionSource": { + "base": "

    A source file for the input sidecar captions used during the transcoding process.

    ", + "refs": { + "CaptionSources$member": null + } + }, + "CaptionSources": { + "base": null, + "refs": { + "Captions$CaptionSources": "

    Source files for the input sidecar captions used during the transcoding process. To omit all sidecar captions, leave CaptionSources blank.

    " + } + }, + "Captions": { + "base": "

    The captions to be created, if any.

    ", + "refs": { + "CreateJobOutput$Captions": "

    You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another. All captions must be in UTF-8. Elastic Transcoder supports two types of captions:

    • Embedded: Embedded captions are included in the same file as the audio and video. Elastic Transcoder supports only one embedded caption per language, to a maximum of 300 embedded captions per file.

      Valid input values include: CEA-608 (EIA-608, first non-empty channel only), CEA-708 (EIA-708, first non-empty channel only), and mov-text

      Valid outputs include: mov-text

      Elastic Transcoder supports a maximum of one embedded format per output.

    • Sidecar: Sidecar captions are kept in a separate metadata file from the audio and video data. Sidecar captions require a player that is capable of understanding the relationship between the video file and the sidecar file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar captions per file.

      Valid input values include: dfxp (first div element only), ebu-tt, scc, smpt, srt, ttml (first div element only), and webvtt

      Valid outputs include: dfxp (first div element only), scc, srt, and webvtt.

    If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

    Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does not preserve text formatting (for example, italics) during the transcoding process.

    To remove captions or leave the captions empty, set Captions to null. To pass through existing captions unchanged, set the MergePolicy to MergeRetain, and pass in a null CaptionSources array.

    For more information on embedded files, see the Subtitles Wikipedia page.

    For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file Wikipedia pages.

    ", + "JobOutput$Captions": "

    You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another. All captions must be in UTF-8. Elastic Transcoder supports two types of captions:

    • Embedded: Embedded captions are included in the same file as the audio and video. Elastic Transcoder supports only one embedded caption per language, to a maximum of 300 embedded captions per file.

      Valid input values include: CEA-608 (EIA-608, first non-empty channel only), CEA-708 (EIA-708, first non-empty channel only), and mov-text

      Valid outputs include: mov-text

      Elastic Transcoder supports a maximum of one embedded format per output.

    • Sidecar: Sidecar captions are kept in a separate metadata file from the audio and video data. Sidecar captions require a player that is capable of understanding the relationship between the video file and the sidecar file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar captions per file.

      Valid input values include: dfxp (first div element only), ebu-tt, scc, smpt, srt, ttml (first div element only), and webvtt

      Valid outputs include: dfxp (first div element only), scc, srt, and webvtt.

    If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

    Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does not preserve text formatting (for example, italics) during the transcoding process.

    To remove captions or leave the captions empty, set Captions to null. To pass through existing captions unchanged, set the MergePolicy to MergeRetain, and pass in a null CaptionSources array.

    For more information on embedded files, see the Subtitles Wikipedia page.

    For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file Wikipedia pages.

    " + } + }, + "Clip": { + "base": "

    Settings for one clip in a composition. All jobs in a playlist must have the same clip settings.

    ", + "refs": { + "Composition$member": null + } + }, + "CodecOption": { + "base": null, + "refs": { + "CodecOptions$key": null, + "CodecOptions$value": null + } + }, + "CodecOptions": { + "base": null, + "refs": { + "VideoParameters$CodecOptions": "

    Profile (H.264/VP8 Only)

    The H.264 profile that you want to use for the output file. Elastic Transcoder supports the following profiles:

    • baseline: The profile most commonly used for videoconferencing and for mobile applications.
    • main: The profile used for standard-definition digital TV broadcasts.
    • high: The profile used for high-definition digital TV broadcasts and for Blu-ray discs.

    Level (H.264 Only)

    The H.264 level that you want to use for the output file. Elastic Transcoder supports the following levels:

    1, 1b, 1.1, 1.2, 1.3, 2, 2.1, 2.2, 3, 3.1, 3.2, 4, 4.1

    MaxReferenceFrames (H.264 Only)

    Applicable only when the value of Video:Codec is H.264. The maximum number of previously decoded frames to use as a reference for decoding future frames. Valid values are integers 0 through 16, but we recommend that you not use a value greater than the following:

    Min(Floor(Maximum decoded picture buffer in macroblocks * 256 / (Width in pixels * Height in pixels)), 16)

    where Width in pixels and Height in pixels represent either MaxWidth and MaxHeight, or Resolution. Maximum decoded picture buffer in macroblocks depends on the value of the Level object. See the list below. (A macroblock is a block of pixels measuring 16x16.)

    • 1 - 396
    • 1b - 396
    • 1.1 - 900
    • 1.2 - 2376
    • 1.3 - 2376
    • 2 - 2376
    • 2.1 - 4752
    • 2.2 - 8100
    • 3 - 8100
    • 3.1 - 18000
    • 3.2 - 20480
    • 4 - 32768
    • 4.1 - 32768

    MaxBitRate (Optional, H.264/MPEG2/VP8 only)

    The maximum number of bits per second in a video buffer; the size of the buffer is specified by BufferSize. Specify a value between 16 and 62,500. You can reduce the bandwidth required to stream a video by reducing the maximum bit rate, but this also reduces the quality of the video.

    BufferSize (Optional, H.264/MPEG2/VP8 only)

    The maximum number of bits in any x seconds of the output video. This window is commonly 10 seconds, the standard segment duration when you're using FMP4 or MPEG-TS for the container type of the output video. Specify an integer greater than 0. If you specify MaxBitRate and omit BufferSize, Elastic Transcoder sets BufferSize to 10 times the value of MaxBitRate.

    InterlacedMode (Optional, H.264/MPEG2 Only)

    The interlace mode for the output video.

    Interlaced video is used to double the perceived frame rate for a video by interlacing two fields (one field on every other line, the other field on the other lines) so that the human eye registers multiple pictures per frame. Interlacing reduces the bandwidth required for transmitting a video, but can result in blurred images and flickering.

    Valid values include Progressive (no interlacing, top to bottom), TopFirst (top field first), BottomFirst (bottom field first), and Auto.

    If InterlaceMode is not specified, Elastic Transcoder uses Progressive for the output. If Auto is specified, Elastic Transcoder interlaces the output.

    ColorSpaceConversionMode (Optional, H.264/MPEG2 Only)

    The color space conversion Elastic Transcoder applies to the output video. Color spaces are the algorithms used by the computer to store information about how to render color. Bt.601 is the standard for standard definition video, while Bt.709 is the standard for high definition video.

    Valid values include None, Bt709toBt601, Bt601toBt709, and Auto.

    If you chose Auto for ColorSpaceConversionMode and your output is interlaced, your frame rate is one of 23.97, 24, 25, 29.97, 50, or 60, your SegmentDuration is null, and you are using one of the resolution changes from the list below, Elastic Transcoder applies the following color space conversions:

    • Standard to HD, 720x480 to 1920x1080 - Elastic Transcoder applies Bt601ToBt709
    • Standard to HD, 720x576 to 1920x1080 - Elastic Transcoder applies Bt601ToBt709
    • HD to Standard, 1920x1080 to 720x480 - Elastic Transcoder applies Bt709ToBt601
    • HD to Standard, 1920x1080 to 720x576 - Elastic Transcoder applies Bt709ToBt601
    Elastic Transcoder may change the behavior of the ColorspaceConversionMode Auto mode in the future. All outputs in a playlist must use the same ColorSpaceConversionMode.

    If you do not specify a ColorSpaceConversionMode, Elastic Transcoder does not change the color space of a file. If you are unsure what ColorSpaceConversionMode was applied to your output file, you can check the AppliedColorSpaceConversion parameter included in your job response. If your job does not have an AppliedColorSpaceConversion in its response, no ColorSpaceConversionMode was applied.

    ChromaSubsampling

    The sampling pattern for the chroma (color) channels of the output video. Valid values include yuv420p and yuv422p.

    yuv420p samples the chroma information of every other horizontal and every other vertical line, yuv422p samples the color information of every horizontal line and every other vertical line.

    LoopCount (Gif Only)

    The number of times you want the output gif to loop. Valid values include Infinite and integers between 0 and 100, inclusive.

    " + } + }, + "Composition": { + "base": null, + "refs": { + "CreateJobOutput$Composition": "

    You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.

    ", + "JobOutput$Composition": "

    You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.

    " + } + }, + "CreateJobOutput": { + "base": "

    The CreateJobOutput structure.

    ", + "refs": { + "CreateJobOutputs$member": null, + "CreateJobRequest$Output": null + } + }, + "CreateJobOutputs": { + "base": null, + "refs": { + "CreateJobRequest$Outputs": "

    A section of the request body that provides information about the transcoded (target) files. We recommend that you use the Outputs syntax instead of the Output syntax.

    " + } + }, + "CreateJobPlaylist": { + "base": "

    Information about the master playlist.

    ", + "refs": { + "CreateJobPlaylists$member": null + } + }, + "CreateJobPlaylists": { + "base": null, + "refs": { + "CreateJobRequest$Playlists": "

    If you specify a preset in PresetId for which the value of Container is fmp4 (Fragmented MP4) or ts (MPEG-TS), Playlists contains information about the master playlists that you want Elastic Transcoder to create.

    The maximum number of master playlists in a job is 30.

    " + } + }, + "CreateJobRequest": { + "base": "

    The CreateJobRequest structure.

    ", + "refs": { + } + }, + "CreateJobResponse": { + "base": "

    The CreateJobResponse structure.

    ", + "refs": { + } + }, + "CreatePipelineRequest": { + "base": "

    The CreatePipelineRequest structure.

    ", + "refs": { + } + }, + "CreatePipelineResponse": { + "base": "

    When you create a pipeline, Elastic Transcoder returns the values that you specified in the request.

    ", + "refs": { + } + }, + "CreatePresetRequest": { + "base": "

    The CreatePresetRequest structure.

    ", + "refs": { + } + }, + "CreatePresetResponse": { + "base": "

    The CreatePresetResponse structure.

    ", + "refs": { + } + }, + "DeletePipelineRequest": { + "base": "

    The DeletePipelineRequest structure.

    ", + "refs": { + } + }, + "DeletePipelineResponse": { + "base": "

    The DeletePipelineResponse structure.

    ", + "refs": { + } + }, + "DeletePresetRequest": { + "base": "

    The DeletePresetRequest structure.

    ", + "refs": { + } + }, + "DeletePresetResponse": { + "base": "

    The DeletePresetResponse structure.

    ", + "refs": { + } + }, + "Description": { + "base": null, + "refs": { + "CreatePresetRequest$Description": "

    A description of the preset.

    ", + "JobOutput$StatusDetail": "

    Information that further explains Status.

    ", + "Playlist$StatusDetail": "

    Information that further explains the status.

    ", + "Preset$Description": "

    A description of the preset.

    " + } + }, + "DetectedProperties": { + "base": "

    The detected properties of the input file. Elastic Transcoder identifies these values from the input file.

    ", + "refs": { + "JobInput$DetectedProperties": "

    The detected properties of the input file.

    " + } + }, + "Digits": { + "base": null, + "refs": { + "Thumbnails$Interval": "

    The approximate number of seconds between thumbnails. Specify an integer value.

    " + } + }, + "DigitsOrAuto": { + "base": null, + "refs": { + "Artwork$MaxWidth": "

    The maximum width of the output album art in pixels. If you specify auto, Elastic Transcoder uses 600 as the default value. If you specify a numeric value, enter an even integer between 32 and 4096, inclusive.

    ", + "Artwork$MaxHeight": "

    The maximum height of the output album art in pixels. If you specify auto, Elastic Transcoder uses 600 as the default value. If you specify a numeric value, enter an even integer between 32 and 3072, inclusive.

    ", + "Thumbnails$MaxWidth": "

    The maximum width of thumbnails in pixels. If you specify auto, Elastic Transcoder uses 1920 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 4096.

    ", + "Thumbnails$MaxHeight": "

    The maximum height of thumbnails in pixels. If you specify auto, Elastic Transcoder uses 1080 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 3072.

    ", + "VideoParameters$MaxWidth": "

    The maximum width of the output video in pixels. If you specify auto, Elastic Transcoder uses 1920 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 128 and 4096.

    ", + "VideoParameters$MaxHeight": "

    The maximum height of the output video in pixels. If you specify auto, Elastic Transcoder uses 1080 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 96 and 3072.

    " + } + }, + "Encryption": { + "base": "

    The encryption settings, if any, that are used for decrypting your input files or encrypting your output files. If your input file is encrypted, you must specify the mode that Elastic Transcoder will use to decrypt your file, otherwise you must specify the mode you want Elastic Transcoder to use to encrypt your output files.

    ", + "refs": { + "Artwork$Encryption": "

    The encryption settings, if any, that you want Elastic Transcoder to apply to your artwork.

    ", + "CaptionFormat$Encryption": "

    The encryption settings, if any, that you want Elastic Transcoder to apply to your caption formats.

    ", + "CaptionSource$Encryption": "

    The encryption settings, if any, that you want Elastic Transcoder to apply to your caption sources.

    ", + "CreateJobOutput$ThumbnailEncryption": "

    The encryption settings, if any, that you want Elastic Transcoder to apply to your thumbnail.

    ", + "CreateJobOutput$Encryption": "

    You can specify encryption settings for any output files that you want to use for a transcoding job. This includes the output file and any watermarks, thumbnails, album art, or captions that you want to use. You must specify encryption settings for each file individually.

    ", + "JobInput$Encryption": "

    The encryption settings, if any, that are used for decrypting your input files. If your input file is encrypted, you must specify the mode that Elastic Transcoder will use to decrypt your file.

    ", + "JobOutput$ThumbnailEncryption": "

    The encryption settings, if any, that you want Elastic Transcoder to apply to your thumbnail.

    ", + "JobOutput$Encryption": "

    The encryption settings, if any, that you want Elastic Transcoder to apply to your output files. If you choose to use encryption, you must specify a mode to use. If you choose not to use encryption, Elastic Transcoder will write an unencrypted file to your Amazon S3 bucket.

    ", + "JobWatermark$Encryption": "

    The encryption settings, if any, that you want Elastic Transcoder to apply to your watermarks.

    " + } + }, + "EncryptionMode": { + "base": null, + "refs": { + "Encryption$Mode": "

    The specific server-side encryption mode that you want Elastic Transcoder to use when decrypting your input files or encrypting your output files. Elastic Transcoder supports the following options:

    • S3: Amazon S3 creates and manages the keys used for encrypting your files.

    • S3-AWS-KMS: Amazon S3 calls the Amazon Key Management Service, which creates and manages the keys that are used for encrypting your files. If you specify S3-AWS-KMS and you don't want to use the default key, you must add the AWS-KMS key that you want to use to your pipeline.

    • AES-CBC-PKCS7: A padded cipher-block mode of operation originally used for HLS files.

    • AES-CTR: AES Counter Mode.

    • AES-GCM: AES Galois Counter Mode, a mode of operation that is an authenticated encryption format, meaning that a file, key, or initialization vector that has been tampered with will fail the decryption process.

    For all three AES options, you must provide the following settings, which must be base64-encoded:

    • Key

    • Key MD5

    • Initialization Vector

    For the AES modes, your private encryption keys and your unencrypted data are never stored by AWS; therefore, it is important that you safely manage your encryption keys. If you lose them, you won't be able to unencrypt your data.

    " + } + }, + "ExceptionMessages": { + "base": null, + "refs": { + "TestRoleResponse$Messages": "

    If the Success element contains false, this value is an array of one or more error messages that were generated during the test process.

    " + } + }, + "Filename": { + "base": null, + "refs": { + "CreateJobPlaylist$Name": "

    The name that you want Elastic Transcoder to assign to the master playlist, for example, nyc-vacation.m3u8. If the name includes a / character, the section of the name before the last / must be identical for all Name objects. If you create more than one master playlist, the values of all Name objects must be unique.

    Note: Elastic Transcoder automatically appends the relevant file extension to the file name (.m3u8 for HLSv3 and HLSv4 playlists, and .ism and .ismc for Smooth playlists). If you include a file extension in Name, the file name will have two extensions.

    ", + "Playlist$Name": "

    The name that you want Elastic Transcoder to assign to the master playlist, for example, nyc-vacation.m3u8. If the name includes a / character, the section of the name before the last / must be identical for all Name objects. If you create more than one master playlist, the values of all Name objects must be unique.

    Note: Elastic Transcoder automatically appends the relevant file extension to the file name (.m3u8 for HLSv3 and HLSv4 playlists, and .ism and .ismc for Smooth playlists). If you include a file extension in Name, the file name will have two extensions.

    " + } + }, + "FixedGOP": { + "base": null, + "refs": { + "VideoParameters$FixedGOP": "

    Applicable only when the value of Video:Codec is one of H.264, MPEG2, or VP8.

    Whether to use a fixed value for FixedGOP. Valid values are true and false:

    • true: Elastic Transcoder uses the value of KeyframesMaxDist for the distance between key frames (the number of frames in a group of pictures, or GOP).
    • false: The distance between key frames can vary.

    FixedGOP must be set to true for fmp4 containers.

    " + } + }, + "FloatString": { + "base": null, + "refs": { + "CreateJobOutput$SegmentDuration": "

    (Outputs in Fragmented MP4 or MPEG-TS format only.If you specify a preset in PresetId for which the value of Container is fmp4 (Fragmented MP4) or ts (MPEG-TS), SegmentDuration is the target maximum duration of each segment in seconds. For HLSv3 format playlists, each media segment is stored in a separate .ts file. For HLSv4 and Smooth playlists, all media segments for an output are stored in a single file. Each segment is approximately the length of the SegmentDuration, though individual segments might be shorter or longer.

    The range of valid values is 1 to 60 seconds. If the duration of the video is not evenly divisible by SegmentDuration, the duration of the last segment is the remainder of total length/SegmentDuration.

    Elastic Transcoder creates an output-specific playlist for each output HLS output that you specify in OutputKeys. To add an output to the master playlist for this job, include it in the OutputKeys of the associated playlist.

    ", + "DetectedProperties$FrameRate": "

    The detected frame rate of the input file, in frames per second.

    ", + "JobOutput$SegmentDuration": "

    (Outputs in Fragmented MP4 or MPEG-TS format only.If you specify a preset in PresetId for which the value of Container is fmp4 (Fragmented MP4) or ts (MPEG-TS), SegmentDuration is the target maximum duration of each segment in seconds. For HLSv3 format playlists, each media segment is stored in a separate .ts file. For HLSv4 and Smooth playlists, all media segments for an output are stored in a single file. Each segment is approximately the length of the SegmentDuration, though individual segments might be shorter or longer.

    The range of valid values is 1 to 60 seconds. If the duration of the video is not evenly divisible by SegmentDuration, the duration of the last segment is the remainder of total length/SegmentDuration.

    Elastic Transcoder creates an output-specific playlist for each output HLS output that you specify in OutputKeys. To add an output to the master playlist for this job, include it in the OutputKeys of the associated playlist.

    ", + "JobOutput$FrameRate": "

    Frame rate of the output file, in frames per second.

    " + } + }, + "FrameRate": { + "base": null, + "refs": { + "JobInput$FrameRate": "

    The frame rate of the input file. If you want Elastic Transcoder to automatically detect the frame rate of the input file, specify auto. If you want to specify the frame rate for the input file, enter one of the following values:

    10, 15, 23.97, 24, 25, 29.97, 30, 60

    If you specify a value other than auto, Elastic Transcoder disables automatic detection of the frame rate.

    ", + "VideoParameters$FrameRate": "

    The frames per second for the video stream in the output file. Valid values include:

    auto, 10, 15, 23.97, 24, 25, 29.97, 30, 60

    If you specify auto, Elastic Transcoder uses the detected frame rate of the input source. If you specify a frame rate, we recommend that you perform the following calculation:

    Frame rate = maximum recommended decoding speed in luma samples/second / (width in pixels * height in pixels)

    where:

    • width in pixels and height in pixels represent the Resolution of the output video.
    • maximum recommended decoding speed in Luma samples/second is less than or equal to the maximum value listed in the following table, based on the value that you specified for Level.

    The maximum recommended decoding speed in Luma samples/second for each level is described in the following list (Level - Decoding speed):

    • 1 - 380160
    • 1b - 380160
    • 1.1 - 76800
    • 1.2 - 1536000
    • 1.3 - 3041280
    • 2 - 3041280
    • 2.1 - 5068800
    • 2.2 - 5184000
    • 3 - 10368000
    • 3.1 - 27648000
    • 3.2 - 55296000
    • 4 - 62914560
    • 4.1 - 62914560
    " + } + }, + "Grantee": { + "base": null, + "refs": { + "Permission$Grantee": "

    The AWS user or group that you want to have access to transcoded files and playlists. To identify the user or group, you can specify the canonical user ID for an AWS account, an origin access identity for a CloudFront distribution, the registered email address of an AWS account, or a predefined Amazon S3 group.

    " + } + }, + "GranteeType": { + "base": null, + "refs": { + "Permission$GranteeType": "

    The type of value that appears in the Grantee object:

    • Canonical: Either the canonical user ID for an AWS account or an origin access identity for an Amazon CloudFront distribution. A canonical user ID is not the same as an AWS account number.
    • Email: The registered email address of an AWS account.
    • Group: One of the following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery.

    " + } + }, + "HlsContentProtection": { + "base": "

    The HLS content protection settings, if any, that you want Elastic Transcoder to apply to your output files.

    ", + "refs": { + "CreateJobPlaylist$HlsContentProtection": "

    The HLS content protection settings, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.

    ", + "Playlist$HlsContentProtection": "

    The HLS content protection settings, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.

    " + } + }, + "HlsContentProtectionMethod": { + "base": null, + "refs": { + "HlsContentProtection$Method": "

    The content protection method for your output. The only valid value is: aes-128.

    This value will be written into the method attribute of the EXT-X-KEY metadata tag in the output playlist.

    " + } + }, + "HorizontalAlign": { + "base": null, + "refs": { + "PresetWatermark$HorizontalAlign": "

    The horizontal position of the watermark unless you specify a non-zero value for HorizontalOffset:

    • Left: The left edge of the watermark is aligned with the left border of the video.
    • Right: The right edge of the watermark is aligned with the right border of the video.
    • Center: The watermark is centered between the left and right borders.

    " + } + }, + "Id": { + "base": null, + "refs": { + "CancelJobRequest$Id": "

    The identifier of the job that you want to cancel.

    To get a list of the jobs (including their jobId) that have a status of Submitted, use the ListJobsByStatus API action.

    ", + "CreateJobOutput$PresetId": "

    The Id of the preset to use for this job. The preset determines the audio, video, and thumbnail settings that Elastic Transcoder uses for transcoding.

    ", + "CreateJobRequest$PipelineId": "

    The Id of the pipeline that you want Elastic Transcoder to use for transcoding. The pipeline determines several settings, including the Amazon S3 bucket from which Elastic Transcoder gets the files to transcode and the bucket into which Elastic Transcoder puts the transcoded files.

    ", + "DeletePipelineRequest$Id": "

    The identifier of the pipeline that you want to delete.

    ", + "DeletePresetRequest$Id": "

    The identifier of the preset for which you want to get detailed information.

    ", + "Job$Id": "

    The identifier that Elastic Transcoder assigned to the job. You use this value to get settings for the job or to delete the job.

    ", + "Job$PipelineId": "

    The Id of the pipeline that you want Elastic Transcoder to use for transcoding. The pipeline determines several settings, including the Amazon S3 bucket from which Elastic Transcoder gets the files to transcode and the bucket into which Elastic Transcoder puts the transcoded files.

    ", + "JobOutput$PresetId": "

    The value of the Id object for the preset that you want to use for this job. The preset determines the audio, video, and thumbnail settings that Elastic Transcoder uses for transcoding. To use a preset that you created, specify the preset ID that Elastic Transcoder returned in the response when you created the preset. You can also use the Elastic Transcoder system presets, which you can get with ListPresets.

    ", + "ListJobsByPipelineRequest$PipelineId": "

    The ID of the pipeline for which you want to get job information.

    ", + "ListJobsByPipelineRequest$PageToken": "

    When Elastic Transcoder returns more than one page of results, use pageToken in subsequent GET requests to get each successive page of results.

    ", + "ListJobsByPipelineResponse$NextPageToken": "

    A value that you use to access the second and subsequent pages of results, if any. When the jobs in the specified pipeline fit on one page or when you've reached the last page of results, the value of NextPageToken is null.

    ", + "ListJobsByStatusRequest$PageToken": "

    When Elastic Transcoder returns more than one page of results, use pageToken in subsequent GET requests to get each successive page of results.

    ", + "ListJobsByStatusResponse$NextPageToken": "

    A value that you use to access the second and subsequent pages of results, if any. When the jobs in the specified pipeline fit on one page or when you've reached the last page of results, the value of NextPageToken is null.

    ", + "ListPipelinesRequest$PageToken": "

    When Elastic Transcoder returns more than one page of results, use pageToken in subsequent GET requests to get each successive page of results.

    ", + "ListPipelinesResponse$NextPageToken": "

    A value that you use to access the second and subsequent pages of results, if any. When the pipelines fit on one page or when you've reached the last page of results, the value of NextPageToken is null.

    ", + "ListPresetsRequest$PageToken": "

    When Elastic Transcoder returns more than one page of results, use pageToken in subsequent GET requests to get each successive page of results.

    ", + "ListPresetsResponse$NextPageToken": "

    A value that you use to access the second and subsequent pages of results, if any. When the presets fit on one page or when you've reached the last page of results, the value of NextPageToken is null.

    ", + "Pipeline$Id": "

    The identifier for the pipeline. You use this value to identify the pipeline in which you want to perform a variety of operations, such as creating a job or a preset.

    ", + "Preset$Id": "

    Identifier for the new preset. You use this value to get settings for the preset or to delete it.

    ", + "ReadJobRequest$Id": "

    The identifier of the job for which you want to get detailed information.

    ", + "ReadPipelineRequest$Id": "

    The identifier of the pipeline to read.

    ", + "ReadPresetRequest$Id": "

    The identifier of the preset for which you want to get detailed information.

    ", + "UpdatePipelineNotificationsRequest$Id": "

    The identifier of the pipeline for which you want to change notification settings.

    ", + "UpdatePipelineRequest$Id": "

    The ID of the pipeline that you want to update.

    ", + "UpdatePipelineStatusRequest$Id": "

    The identifier of the pipeline to update.

    " + } + }, + "IncompatibleVersionException": { + "base": null, + "refs": { + } + }, + "Interlaced": { + "base": null, + "refs": { + "JobInput$Interlaced": "

    Whether the input file is interlaced. If you want Elastic Transcoder to automatically detect whether the input file is interlaced, specify auto. If you want to specify whether the input file is interlaced, enter one of the following values:

    true, false

    If you specify a value other than auto, Elastic Transcoder disables automatic detection of interlacing.

    " + } + }, + "InternalServiceException": { + "base": "

    Elastic Transcoder encountered an unexpected exception while trying to fulfill the request.

    ", + "refs": { + } + }, + "Job": { + "base": "

    A section of the response body that provides information about the job that is created.

    ", + "refs": { + "CreateJobResponse$Job": "

    A section of the response body that provides information about the job that is created.

    ", + "Jobs$member": null, + "ReadJobResponse$Job": "

    A section of the response body that provides information about the job.

    " + } + }, + "JobAlbumArt": { + "base": "

    The .jpg or .png file associated with an audio file.

    ", + "refs": { + "CreateJobOutput$AlbumArt": "

    Information about the album art that you want Elastic Transcoder to add to the file during transcoding. You can specify up to twenty album artworks for each output. Settings for each artwork must be defined in the job for the current output.

    ", + "JobOutput$AlbumArt": "

    The album art to be associated with the output file, if any.

    " + } + }, + "JobContainer": { + "base": null, + "refs": { + "JobInput$Container": "

    The container type for the input file. If you want Elastic Transcoder to automatically detect the container type of the input file, specify auto. If you want to specify the container type for the input file, enter one of the following values:

    3gp, aac, asf, avi, divx, flv, m4a, mkv, mov, mp3, mp4, mpeg, mpeg-ps, mpeg-ts, mxf, ogg, vob, wav, webm

    " + } + }, + "JobInput": { + "base": "

    Information about the file that you're transcoding.

    ", + "refs": { + "CreateJobRequest$Input": "

    A section of the request body that provides information about the file that is being transcoded.

    ", + "Job$Input": "

    A section of the request or response body that provides information about the file that is being transcoded.

    " + } + }, + "JobOutput": { + "base": "

    Outputs recommended instead.If you specified one output for a job, information about that output. If you specified multiple outputs for a job, the Output object lists information about the first output. This duplicates the information that is listed for the first output in the Outputs object.

    ", + "refs": { + "Job$Output": "

    If you specified one output for a job, information about that output. If you specified multiple outputs for a job, the Output object lists information about the first output. This duplicates the information that is listed for the first output in the Outputs object.

    Outputs recommended instead. A section of the request or response body that provides information about the transcoded (target) file.

    ", + "JobOutputs$member": null + } + }, + "JobOutputs": { + "base": null, + "refs": { + "Job$Outputs": "

    Information about the output files. We recommend that you use the Outputs syntax for all jobs, even when you want Elastic Transcoder to transcode a file into only one format. Do not use both the Outputs and Output syntaxes in the same request. You can create a maximum of 30 outputs per job.

    If you specify more than one output for a job, Elastic Transcoder creates the files for each output in the order in which you specify them in the job.

    " + } + }, + "JobStatus": { + "base": null, + "refs": { + "Job$Status": "

    The status of the job: Submitted, Progressing, Complete, Canceled, or Error.

    ", + "JobOutput$Status": "

    The status of one output in a job. If you specified only one output for the job, Outputs:Status is always the same as Job:Status. If you specified more than one output:

    • Job:Status and Outputs:Status for all of the outputs is Submitted until Elastic Transcoder starts to process the first output.
    • When Elastic Transcoder starts to process the first output, Outputs:Status for that output and Job:Status both change to Progressing. For each output, the value of Outputs:Status remains Submitted until Elastic Transcoder starts to process the output.
    • Job:Status remains Progressing until all of the outputs reach a terminal status, either Complete or Error.
    • When all of the outputs reach a terminal status, Job:Status changes to Complete only if Outputs:Status for all of the outputs is Complete. If Outputs:Status for one or more outputs is Error, the terminal status for Job:Status is also Error.
    The value of Status is one of the following: Submitted, Progressing, Complete, Canceled, or Error.

    ", + "ListJobsByStatusRequest$Status": "

    To get information about all of the jobs associated with the current AWS account that have a given status, specify the following status: Submitted, Progressing, Complete, Canceled, or Error.

    ", + "Playlist$Status": "

    The status of the job with which the playlist is associated.

    " + } + }, + "JobWatermark": { + "base": "

    Watermarks can be in .png or .jpg format. If you want to display a watermark that is not rectangular, use the .png format, which supports transparency.

    ", + "refs": { + "JobWatermarks$member": null + } + }, + "JobWatermarks": { + "base": null, + "refs": { + "CreateJobOutput$Watermarks": "

    Information about the watermarks that you want Elastic Transcoder to add to the video during transcoding. You can specify up to four watermarks for each output. Settings for each watermark must be defined in the preset for the current output.

    ", + "JobOutput$Watermarks": "

    Information about the watermarks that you want Elastic Transcoder to add to the video during transcoding. You can specify up to four watermarks for each output. Settings for each watermark must be defined in the preset that you specify in Preset for the current output.

    Watermarks are added to the output video in the sequence in which you list them in the job output—the first watermark in the list is added to the output video first, the second watermark in the list is added next, and so on. As a result, if the settings in a preset cause Elastic Transcoder to place all watermarks in the same location, the second watermark that you add will cover the first one, the third one will cover the second, and the fourth one will cover the third.

    " + } + }, + "Jobs": { + "base": null, + "refs": { + "ListJobsByPipelineResponse$Jobs": "

    An array of Job objects that are in the specified pipeline.

    ", + "ListJobsByStatusResponse$Jobs": "

    An array of Job objects that have the specified status.

    " + } + }, + "JpgOrPng": { + "base": null, + "refs": { + "Artwork$AlbumArtFormat": "

    The format of album art, if any. Valid formats are .jpg and .png.

    ", + "Thumbnails$Format": "

    The format of thumbnails, if any. Valid values are jpg and png.

    You specify whether you want Elastic Transcoder to create thumbnails when you create a job.

    " + } + }, + "Key": { + "base": null, + "refs": { + "CaptionSource$Key": "

    The name of the sidecar caption file that you want Elastic Transcoder to include in the output file.

    ", + "CaptionSource$Language": "

    A string that specifies the language of the caption. Specify this as one of:

    • 2-character ISO 639-1 code

    • 3-character ISO 639-2 code

    For more information on ISO language codes and language names, see the List of ISO 639-1 codes.

    ", + "CreateJobOutput$Key": "

    The name to assign to the transcoded file. Elastic Transcoder saves the file in the Amazon S3 bucket specified by the OutputBucket object in the pipeline that is specified by the pipeline ID. If a file with the specified name already exists in the output bucket, the job fails.

    ", + "CreateJobRequest$OutputKeyPrefix": "

    The value, if any, that you want Elastic Transcoder to prepend to the names of all files that this job creates, including output files, thumbnails, and playlists.

    ", + "Job$OutputKeyPrefix": "

    The value, if any, that you want Elastic Transcoder to prepend to the names of all files that this job creates, including output files, thumbnails, and playlists. We recommend that you add a / or some other delimiter to the end of the OutputKeyPrefix.

    ", + "JobInput$Key": "

    The name of the file to transcode. Elsewhere in the body of the JSON block is the the ID of the pipeline to use for processing the job. The InputBucket object in that pipeline tells Elastic Transcoder which Amazon S3 bucket to get the file from.

    If the file name includes a prefix, such as cooking/lasagna.mpg, include the prefix in the key. If the file isn't in the specified bucket, Elastic Transcoder returns an error.

    ", + "JobOutput$Key": "

    The name to assign to the transcoded file. Elastic Transcoder saves the file in the Amazon S3 bucket specified by the OutputBucket object in the pipeline that is specified by the pipeline ID.

    ", + "OutputKeys$member": null + } + }, + "KeyArn": { + "base": null, + "refs": { + "CreatePipelineRequest$AwsKmsKeyArn": "

    The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline.

    If you use either S3 or S3-AWS-KMS as your Encryption:Mode, you don't need to provide a key with your job because a default key, known as an AWS-KMS key, is created for you automatically. You need to provide an AWS-KMS key only if you want to use a non-default AWS-KMS key, or if you are using an Encryption:Mode of AES-PKCS7, AES-CTR, or AES-GCM.

    ", + "Pipeline$AwsKmsKeyArn": "

    The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline.

    If you use either S3 or S3-AWS-KMS as your Encryption:Mode, you don't need to provide a key with your job because a default key, known as an AWS-KMS key, is created for you automatically. You need to provide an AWS-KMS key only if you want to use a non-default AWS-KMS key, or if you are using an Encryption:Mode of AES-PKCS7, AES-CTR, or AES-GCM.

    ", + "UpdatePipelineRequest$AwsKmsKeyArn": "

    The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline.

    If you use either S3 or S3-AWS-KMS as your Encryption:Mode, you don't need to provide a key with your job because a default key, known as an AWS-KMS key, is created for you automatically. You need to provide an AWS-KMS key only if you want to use a non-default AWS-KMS key, or if you are using an Encryption:Mode of AES-PKCS7, AES-CTR, or AES-GCM.

    " + } + }, + "KeyIdGuid": { + "base": null, + "refs": { + "PlayReadyDrm$KeyId": "

    The ID for your DRM key, so that your DRM license provider knows which key to provide.

    The key ID must be provided in big endian, and Elastic Transcoder will convert it to little endian before inserting it into the PlayReady DRM headers. If you are unsure whether your license server provides your key ID in big or little endian, check with your DRM provider.

    " + } + }, + "KeyStoragePolicy": { + "base": null, + "refs": { + "HlsContentProtection$KeyStoragePolicy": "

    Specify whether you want Elastic Transcoder to write your HLS license key to an Amazon S3 bucket. If you choose WithVariantPlaylists, LicenseAcquisitionUrl must be left blank and Elastic Transcoder writes your data key into the same bucket as the associated playlist.

    " + } + }, + "KeyframesMaxDist": { + "base": null, + "refs": { + "VideoParameters$KeyframesMaxDist": "

    Applicable only when the value of Video:Codec is one of H.264, MPEG2, or VP8.

    The maximum number of frames between key frames. Key frames are fully encoded frames; the frames between key frames are encoded based, in part, on the content of the key frames. The value is an integer formatted as a string; valid values are between 1 (every frame is a key frame) and 100000, inclusive. A higher value results in higher compression but may also discernibly decrease video quality.

    For Smooth outputs, the FrameRate must have a constant ratio to the KeyframesMaxDist. This allows Smooth playlists to switch between different quality levels while the file is being played.

    For example, an input file can have a FrameRate of 30 with a KeyframesMaxDist of 90. The output file then needs to have a ratio of 1:3. Valid outputs would have FrameRate of 30, 25, and 10, and KeyframesMaxDist of 90, 75, and 30, respectively.

    Alternately, this can be achieved by setting FrameRate to auto and having the same values for MaxFrameRate and KeyframesMaxDist.

    " + } + }, + "LimitExceededException": { + "base": "

    Too many operations for a given AWS account. For example, the number of pipelines exceeds the maximum allowed.

    ", + "refs": { + } + }, + "ListJobsByPipelineRequest": { + "base": "

    The ListJobsByPipelineRequest structure.

    ", + "refs": { + } + }, + "ListJobsByPipelineResponse": { + "base": "

    The ListJobsByPipelineResponse structure.

    ", + "refs": { + } + }, + "ListJobsByStatusRequest": { + "base": "

    The ListJobsByStatusRequest structure.

    ", + "refs": { + } + }, + "ListJobsByStatusResponse": { + "base": "

    The ListJobsByStatusResponse structure.

    ", + "refs": { + } + }, + "ListPipelinesRequest": { + "base": "

    The ListPipelineRequest structure.

    ", + "refs": { + } + }, + "ListPipelinesResponse": { + "base": "

    A list of the pipelines associated with the current AWS account.

    ", + "refs": { + } + }, + "ListPresetsRequest": { + "base": "

    The ListPresetsRequest structure.

    ", + "refs": { + } + }, + "ListPresetsResponse": { + "base": "

    The ListPresetsResponse structure.

    ", + "refs": { + } + }, + "MaxFrameRate": { + "base": null, + "refs": { + "VideoParameters$MaxFrameRate": "

    If you specify auto for FrameRate, Elastic Transcoder uses the frame rate of the input video for the frame rate of the output video. Specify the maximum frame rate that you want Elastic Transcoder to use when the frame rate of the input video is greater than the desired maximum frame rate of the output video. Valid values include: 10, 15, 23.97, 24, 25, 29.97, 30, 60.

    " + } + }, + "MergePolicy": { + "base": null, + "refs": { + "JobAlbumArt$MergePolicy": "

    A policy that determines how Elastic Transcoder will handle the existence of multiple album artwork files.

    • Replace: The specified album art will replace any existing album art.
    • Prepend: The specified album art will be placed in front of any existing album art.
    • Append: The specified album art will be placed after any existing album art.
    • Fallback: If the original input file contains artwork, Elastic Transcoder will use that artwork for the output. If the original input does not contain artwork, Elastic Transcoder will use the specified album art file.

    " + } + }, + "Name": { + "base": null, + "refs": { + "CaptionSource$Label": "

    The label of the caption shown in the player when choosing a language. We recommend that you put the caption language name here, in the language of the captions.

    ", + "CreatePipelineRequest$Name": "

    The name of the pipeline. We recommend that the name be unique within the AWS account, but uniqueness is not enforced.

    Constraints: Maximum 40 characters.

    ", + "CreatePresetRequest$Name": "

    The name of the preset. We recommend that the name be unique within the AWS account, but uniqueness is not enforced.

    ", + "Pipeline$Name": "

    The name of the pipeline. We recommend that the name be unique within the AWS account, but uniqueness is not enforced.

    Constraints: Maximum 40 characters

    ", + "Preset$Name": "

    The name of the preset.

    ", + "UpdatePipelineRequest$Name": "

    The name of the pipeline. We recommend that the name be unique within the AWS account, but uniqueness is not enforced.

    Constraints: Maximum 40 characters

    " + } + }, + "NonEmptyBase64EncodedString": { + "base": null, + "refs": { + "PlayReadyDrm$Key": "

    The DRM key for your file, provided by your DRM license provider. The key must be base64-encoded, and it must be one of the following bit lengths before being base64-encoded:

    128, 192, or 256.

    The key must also be encrypted by using AWS KMS.

    ", + "PlayReadyDrm$KeyMd5": "

    The MD5 digest of the key used for DRM on your file, and that you want Elastic Transcoder to use as a checksum to make sure your key was not corrupted in transit. The key MD5 must be base64-encoded, and it must be exactly 16 bytes before being base64-encoded.

    " + } + }, + "Notifications": { + "base": "

    The Amazon Simple Notification Service (Amazon SNS) topic or topics to notify in order to report job status.

    To receive notifications, you must also subscribe to the new topic in the Amazon SNS console.", + "refs": { + "CreatePipelineRequest$Notifications": "

    The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job status.

    To receive notifications, you must also subscribe to the new topic in the Amazon SNS console.
    • Progressing: The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify when Elastic Transcoder has started to process a job in this pipeline. This is the ARN that Amazon SNS returned when you created the topic. For more information, see Create a Topic in the Amazon Simple Notification Service Developer Guide.
    • Completed: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder has finished processing a job in this pipeline. This is the ARN that Amazon SNS returned when you created the topic.
    • Warning: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters a warning condition while processing a job in this pipeline. This is the ARN that Amazon SNS returned when you created the topic.
    • Error: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition while processing a job in this pipeline. This is the ARN that Amazon SNS returned when you created the topic.
    ", + "Pipeline$Notifications": "

    The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job status.

    To receive notifications, you must also subscribe to the new topic in the Amazon SNS console.
    • Progressing (optional): The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify when Elastic Transcoder has started to process the job.
    • Completed (optional): The Amazon SNS topic that you want to notify when Elastic Transcoder has finished processing the job.
    • Warning (optional): The Amazon SNS topic that you want to notify when Elastic Transcoder encounters a warning condition.
    • Error (optional): The Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition.
    ", + "UpdatePipelineNotificationsRequest$Notifications": "

    The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job status.

    To receive notifications, you must also subscribe to the new topic in the Amazon SNS console.
    • Progressing: The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify when Elastic Transcoder has started to process jobs that are added to this pipeline. This is the ARN that Amazon SNS returned when you created the topic.
    • Completed: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder has finished processing a job. This is the ARN that Amazon SNS returned when you created the topic.
    • Warning: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters a warning condition. This is the ARN that Amazon SNS returned when you created the topic.
    • Error: The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition. This is the ARN that Amazon SNS returned when you created the topic.
    ", + "UpdatePipelineRequest$Notifications": null + } + }, + "NullableInteger": { + "base": null, + "refs": { + "DetectedProperties$Width": "

    The detected width of the input file, in pixels.

    ", + "DetectedProperties$Height": "

    The detected height of the input file, in pixels.

    ", + "JobOutput$Width": "

    Specifies the width of the output file in pixels.

    ", + "JobOutput$Height": "

    Height of the output file, in pixels.

    " + } + }, + "NullableLong": { + "base": null, + "refs": { + "DetectedProperties$FileSize": "

    The detected file size of the input file, in bytes.

    ", + "DetectedProperties$DurationMillis": "

    The detected duration of the input file, in milliseconds.

    ", + "JobOutput$Duration": "

    Duration of the output file, in seconds.

    ", + "JobOutput$FileSize": "

    File size of the output file, in bytes.

    ", + "JobOutput$DurationMillis": "

    Duration of the output file, in milliseconds.

    ", + "Timing$SubmitTimeMillis": "

    The time the job was submitted to Elastic Transcoder, in epoch milliseconds.

    ", + "Timing$StartTimeMillis": "

    The time the job began transcoding, in epoch milliseconds.

    ", + "Timing$FinishTimeMillis": "

    The time the job finished transcoding, in epoch milliseconds.

    " + } + }, + "OneTo512String": { + "base": null, + "refs": { + "PlayReadyDrm$LicenseAcquisitionUrl": "

    The location of the license key required to play DRM content. The URL must be an absolute path, and is referenced by the PlayReady header. The PlayReady header is referenced in the protection header of the client manifest for Smooth Streaming outputs, and in the EXT-X-DXDRM and EXT-XDXDRMINFO metadata tags for HLS playlist outputs. An example URL looks like this: https://www.example.com/exampleKey/

    " + } + }, + "Opacity": { + "base": null, + "refs": { + "PresetWatermark$Opacity": "

    A percentage that indicates how much you want a watermark to obscure the video in the location where it appears. Valid values are 0 (the watermark is invisible) to 100 (the watermark completely obscures the video in the specified location). The datatype of Opacity is float.

    Elastic Transcoder supports transparent .png graphics. If you use a transparent .png, the transparent portion of the video appears as if you had specified a value of 0 for Opacity. The .jpg file format doesn't support transparency.

    " + } + }, + "OutputKeys": { + "base": null, + "refs": { + "CreateJobPlaylist$OutputKeys": "

    For each output in this job that you want to include in a master playlist, the value of the Outputs:Key object.

    • If your output is not HLS or does not have a segment duration set, the name of the output file is a concatenation of OutputKeyPrefix and Outputs:Key:

      OutputKeyPrefixOutputs:Key

    • If your output is HLSv3 and has a segment duration set, or is not included in a playlist, Elastic Transcoder creates an output playlist file with a file extension of .m3u8, and a series of .ts files that include a five-digit sequential counter beginning with 00000:

      OutputKeyPrefixOutputs:Key.m3u8

      OutputKeyPrefixOutputs:Key00000.ts

    • If your output is HLSv4, has a segment duration set, and is included in an HLSv4 playlist, Elastic Transcoder creates an output playlist file with a file extension of _v4.m3u8. If the output is video, Elastic Transcoder also creates an output file with an extension of _iframe.m3u8:

      OutputKeyPrefixOutputs:Key_v4.m3u8

      OutputKeyPrefixOutputs:Key_iframe.m3u8

      OutputKeyPrefixOutputs:Key.ts

    Elastic Transcoder automatically appends the relevant file extension to the file name. If you include a file extension in Output Key, the file name will have two extensions.

    If you include more than one output in a playlist, any segment duration settings, clip settings, or caption settings must be the same for all outputs in the playlist. For Smooth playlists, the Audio:Profile, Video:Profile, and Video:FrameRate to Video:KeyframesMaxDist ratio must be the same for all outputs.

    ", + "Playlist$OutputKeys": "

    For each output in this job that you want to include in a master playlist, the value of the Outputs:Key object.

    • If your output is not HLS or does not have a segment duration set, the name of the output file is a concatenation of OutputKeyPrefix and Outputs:Key:

      OutputKeyPrefixOutputs:Key

    • If your output is HLSv3 and has a segment duration set, or is not included in a playlist, Elastic Transcoder creates an output playlist file with a file extension of .m3u8, and a series of .ts files that include a five-digit sequential counter beginning with 00000:

      OutputKeyPrefixOutputs:Key.m3u8

      OutputKeyPrefixOutputs:Key00000.ts

    • If your output is HLSv4, has a segment duration set, and is included in an HLSv4 playlist, Elastic Transcoder creates an output playlist file with a file extension of _v4.m3u8. If the output is video, Elastic Transcoder also creates an output file with an extension of _iframe.m3u8:

      OutputKeyPrefixOutputs:Key_v4.m3u8

      OutputKeyPrefixOutputs:Key_iframe.m3u8

      OutputKeyPrefixOutputs:Key.ts

    Elastic Transcoder automatically appends the relevant file extension to the file name. If you include a file extension in Output Key, the file name will have two extensions.

    If you include more than one output in a playlist, any segment duration settings, clip settings, or caption settings must be the same for all outputs in the playlist. For Smooth playlists, the Audio:Profile, Video:Profile, and Video:FrameRate to Video:KeyframesMaxDist ratio must be the same for all outputs.

    " + } + }, + "PaddingPolicy": { + "base": null, + "refs": { + "Artwork$PaddingPolicy": "

    When you set PaddingPolicy to Pad, Elastic Transcoder may add white bars to the top and bottom and/or left and right sides of the output album art to make the total size of the output art match the values that you specified for MaxWidth and MaxHeight.

    ", + "Thumbnails$PaddingPolicy": "

    When you set PaddingPolicy to Pad, Elastic Transcoder may add black bars to the top and bottom and/or left and right sides of thumbnails to make the total size of the thumbnails match the values that you specified for thumbnail MaxWidth and MaxHeight settings.

    ", + "VideoParameters$PaddingPolicy": "

    When you set PaddingPolicy to Pad, Elastic Transcoder may add black bars to the top and bottom and/or left and right sides of the output video to make the total size of the output video match the values that you specified for MaxWidth and MaxHeight.

    " + } + }, + "Permission": { + "base": "

    The Permission structure.

    ", + "refs": { + "Permissions$member": null + } + }, + "Permissions": { + "base": null, + "refs": { + "PipelineOutputConfig$Permissions": "

    Optional. The Permissions object specifies which users and/or predefined Amazon S3 groups you want to have access to transcoded files and playlists, and the type of access you want them to have. You can grant permissions to a maximum of 30 users and/or predefined Amazon S3 groups.

    If you include Permissions, Elastic Transcoder grants only the permissions that you specify. It does not grant full permissions to the owner of the role specified by Role. If you want that user to have full control, you must explicitly grant full control to the user.

    If you omit Permissions, Elastic Transcoder grants full control over the transcoded files and playlists to the owner of the role specified by Role, and grants no other permissions to any other user or group.

    " + } + }, + "Pipeline": { + "base": "

    The pipeline (queue) that is used to manage jobs.

    ", + "refs": { + "CreatePipelineResponse$Pipeline": "

    A section of the response body that provides information about the pipeline that is created.

    ", + "Pipelines$member": null, + "ReadPipelineResponse$Pipeline": "

    A section of the response body that provides information about the pipeline.

    ", + "UpdatePipelineNotificationsResponse$Pipeline": "

    A section of the response body that provides information about the pipeline.

    ", + "UpdatePipelineResponse$Pipeline": null, + "UpdatePipelineStatusResponse$Pipeline": "

    A section of the response body that provides information about the pipeline.

    " + } + }, + "PipelineOutputConfig": { + "base": "

    The PipelineOutputConfig structure.

    ", + "refs": { + "CreatePipelineRequest$ContentConfig": "

    The optional ContentConfig object specifies information about the Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists: which bucket to use, which users you want to have access to the files, the type of access you want users to have, and the storage class that you want to assign to the files.

    If you specify values for ContentConfig, you must also specify values for ThumbnailConfig.

    If you specify values for ContentConfig and ThumbnailConfig, omit the OutputBucket object.

    • Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists.
    • Permissions (Optional): The Permissions object specifies which users you want to have access to transcoded files and the type of access you want them to have. You can grant permissions to a maximum of 30 users and/or predefined Amazon S3 groups.
    • Grantee Type: Specify the type of value that appears in the Grantee object:
      • Canonical: The value in the Grantee object is either the canonical user ID for an AWS account or an origin access identity for an Amazon CloudFront distribution. For more information about canonical user IDs, see Access Control List (ACL) Overview in the Amazon Simple Storage Service Developer Guide. For more information about using CloudFront origin access identities to require that users use CloudFront URLs instead of Amazon S3 URLs, see Using an Origin Access Identity to Restrict Access to Your Amazon S3 Content. A canonical user ID is not the same as an AWS account number.
      • Email: The value in the Grantee object is the registered email address of an AWS account.
      • Group: The value in the Grantee object is one of the following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery.
    • Grantee: The AWS user or group that you want to have access to transcoded files and playlists. To identify the user or group, you can specify the canonical user ID for an AWS account, an origin access identity for a CloudFront distribution, the registered email address of an AWS account, or a predefined Amazon S3 group
    • Access: The permission that you want to give to the AWS user that you specified in Grantee. Permissions are granted on the files that Elastic Transcoder adds to the bucket, including playlists and video files. Valid values include:
      • READ: The grantee can read the objects and metadata for objects that Elastic Transcoder adds to the Amazon S3 bucket.
      • READ_ACP: The grantee can read the object ACL for objects that Elastic Transcoder adds to the Amazon S3 bucket.
      • WRITE_ACP: The grantee can write the ACL for the objects that Elastic Transcoder adds to the Amazon S3 bucket.
      • FULL_CONTROL: The grantee has READ, READ_ACP, and WRITE_ACP permissions for the objects that Elastic Transcoder adds to the Amazon S3 bucket.
    • StorageClass: The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the video files and playlists that it stores in your Amazon S3 bucket.
    ", + "CreatePipelineRequest$ThumbnailConfig": "

    The ThumbnailConfig object specifies several values, including the Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files, which users you want to have access to the files, the type of access you want users to have, and the storage class that you want to assign to the files.

    If you specify values for ContentConfig, you must also specify values for ThumbnailConfig even if you don't want to create thumbnails.

    If you specify values for ContentConfig and ThumbnailConfig, omit the OutputBucket object.

    • Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files.
    • Permissions (Optional): The Permissions object specifies which users and/or predefined Amazon S3 groups you want to have access to thumbnail files, and the type of access you want them to have. You can grant permissions to a maximum of 30 users and/or predefined Amazon S3 groups.
    • GranteeType: Specify the type of value that appears in the Grantee object:
      • Canonical: The value in the Grantee object is either the canonical user ID for an AWS account or an origin access identity for an Amazon CloudFront distribution. A canonical user ID is not the same as an AWS account number.
      • Email: The value in the Grantee object is the registered email address of an AWS account.
      • Group: The value in the Grantee object is one of the following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery.
    • Grantee: The AWS user or group that you want to have access to thumbnail files. To identify the user or group, you can specify the canonical user ID for an AWS account, an origin access identity for a CloudFront distribution, the registered email address of an AWS account, or a predefined Amazon S3 group.
    • Access: The permission that you want to give to the AWS user that you specified in Grantee. Permissions are granted on the thumbnail files that Elastic Transcoder adds to the bucket. Valid values include:
      • READ: The grantee can read the thumbnails and metadata for objects that Elastic Transcoder adds to the Amazon S3 bucket.
      • READ_ACP: The grantee can read the object ACL for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.
      • WRITE_ACP: The grantee can write the ACL for the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.
      • FULL_CONTROL: The grantee has READ, READ_ACP, and WRITE_ACP permissions for the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.
    • StorageClass: The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the thumbnails that it stores in your Amazon S3 bucket.
    ", + "Pipeline$ContentConfig": "

    Information about the Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists. Either you specify both ContentConfig and ThumbnailConfig, or you specify OutputBucket.

    • Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists.
    • Permissions: A list of the users and/or predefined Amazon S3 groups you want to have access to transcoded files and playlists, and the type of access that you want them to have.
      • GranteeType: The type of value that appears in the Grantee object:
        • Canonical: Either the canonical user ID for an AWS account or an origin access identity for an Amazon CloudFront distribution.
        • Email: The registered email address of an AWS account.
        • Group: One of the following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery.
      • Grantee: The AWS user or group that you want to have access to transcoded files and playlists.
      • Access: The permission that you want to give to the AWS user that is listed in Grantee. Valid values include:
        • READ: The grantee can read the objects and metadata for objects that Elastic Transcoder adds to the Amazon S3 bucket.
        • READ_ACP: The grantee can read the object ACL for objects that Elastic Transcoder adds to the Amazon S3 bucket.
        • WRITE_ACP: The grantee can write the ACL for the objects that Elastic Transcoder adds to the Amazon S3 bucket.
        • FULL_CONTROL: The grantee has READ, READ_ACP, and WRITE_ACP permissions for the objects that Elastic Transcoder adds to the Amazon S3 bucket.
    • StorageClass: The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the video files and playlists that it stores in your Amazon S3 bucket.
    ", + "Pipeline$ThumbnailConfig": "

    Information about the Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files. Either you specify both ContentConfig and ThumbnailConfig, or you specify OutputBucket.

    • Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files.
    • Permissions: A list of the users and/or predefined Amazon S3 groups you want to have access to thumbnail files, and the type of access that you want them to have.
      • GranteeType: The type of value that appears in the Grantee object:
        • Canonical: Either the canonical user ID for an AWS account or an origin access identity for an Amazon CloudFront distribution. A canonical user ID is not the same as an AWS account number.
        • Email: The registered email address of an AWS account.
        • Group: One of the following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery.
      • Grantee: The AWS user or group that you want to have access to thumbnail files.
      • Access: The permission that you want to give to the AWS user that is listed in Grantee. Valid values include:
        • READ: The grantee can read the thumbnails and metadata for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.
        • READ_ACP: The grantee can read the object ACL for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.
        • WRITE_ACP: The grantee can write the ACL for the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.
        • FULL_CONTROL: The grantee has READ, READ_ACP, and WRITE_ACP permissions for the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.
    • StorageClass: The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the thumbnails that it stores in your Amazon S3 bucket.
    ", + "UpdatePipelineRequest$ContentConfig": "

    The optional ContentConfig object specifies information about the Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists: which bucket to use, which users you want to have access to the files, the type of access you want users to have, and the storage class that you want to assign to the files.

    If you specify values for ContentConfig, you must also specify values for ThumbnailConfig.

    If you specify values for ContentConfig and ThumbnailConfig, omit the OutputBucket object.

    • Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists.
    • Permissions (Optional): The Permissions object specifies which users you want to have access to transcoded files and the type of access you want them to have. You can grant permissions to a maximum of 30 users and/or predefined Amazon S3 groups.
    • Grantee Type: Specify the type of value that appears in the Grantee object:
      • Canonical: The value in the Grantee object is either the canonical user ID for an AWS account or an origin access identity for an Amazon CloudFront distribution. For more information about canonical user IDs, see Access Control List (ACL) Overview in the Amazon Simple Storage Service Developer Guide. For more information about using CloudFront origin access identities to require that users use CloudFront URLs instead of Amazon S3 URLs, see Using an Origin Access Identity to Restrict Access to Your Amazon S3 Content. A canonical user ID is not the same as an AWS account number.
      • Email: The value in the Grantee object is the registered email address of an AWS account.
      • Group: The value in the Grantee object is one of the following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery.
    • Grantee: The AWS user or group that you want to have access to transcoded files and playlists. To identify the user or group, you can specify the canonical user ID for an AWS account, an origin access identity for a CloudFront distribution, the registered email address of an AWS account, or a predefined Amazon S3 group
    • Access: The permission that you want to give to the AWS user that you specified in Grantee. Permissions are granted on the files that Elastic Transcoder adds to the bucket, including playlists and video files. Valid values include:
      • READ: The grantee can read the objects and metadata for objects that Elastic Transcoder adds to the Amazon S3 bucket.
      • READ_ACP: The grantee can read the object ACL for objects that Elastic Transcoder adds to the Amazon S3 bucket.
      • WRITE_ACP: The grantee can write the ACL for the objects that Elastic Transcoder adds to the Amazon S3 bucket.
      • FULL_CONTROL: The grantee has READ, READ_ACP, and WRITE_ACP permissions for the objects that Elastic Transcoder adds to the Amazon S3 bucket.
    • StorageClass: The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the video files and playlists that it stores in your Amazon S3 bucket.
    ", + "UpdatePipelineRequest$ThumbnailConfig": "

    The ThumbnailConfig object specifies several values, including the Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files, which users you want to have access to the files, the type of access you want users to have, and the storage class that you want to assign to the files.

    If you specify values for ContentConfig, you must also specify values for ThumbnailConfig even if you don't want to create thumbnails.

    If you specify values for ContentConfig and ThumbnailConfig, omit the OutputBucket object.

    • Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files.
    • Permissions (Optional): The Permissions object specifies which users and/or predefined Amazon S3 groups you want to have access to thumbnail files, and the type of access you want them to have. You can grant permissions to a maximum of 30 users and/or predefined Amazon S3 groups.
    • GranteeType: Specify the type of value that appears in the Grantee object:
      • Canonical: The value in the Grantee object is either the canonical user ID for an AWS account or an origin access identity for an Amazon CloudFront distribution. A canonical user ID is not the same as an AWS account number.
      • Email: The value in the Grantee object is the registered email address of an AWS account.
      • Group: The value in the Grantee object is one of the following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery.
    • Grantee: The AWS user or group that you want to have access to thumbnail files. To identify the user or group, you can specify the canonical user ID for an AWS account, an origin access identity for a CloudFront distribution, the registered email address of an AWS account, or a predefined Amazon S3 group.
    • Access: The permission that you want to give to the AWS user that you specified in Grantee. Permissions are granted on the thumbnail files that Elastic Transcoder adds to the bucket. Valid values include:
      • READ: The grantee can read the thumbnails and metadata for objects that Elastic Transcoder adds to the Amazon S3 bucket.
      • READ_ACP: The grantee can read the object ACL for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.
      • WRITE_ACP: The grantee can write the ACL for the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.
      • FULL_CONTROL: The grantee has READ, READ_ACP, and WRITE_ACP permissions for the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.
    • StorageClass: The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the thumbnails that it stores in your Amazon S3 bucket.
    " + } + }, + "PipelineStatus": { + "base": null, + "refs": { + "Pipeline$Status": "

    The current status of the pipeline:

    • Active: The pipeline is processing jobs.
    • Paused: The pipeline is not currently processing jobs.
    ", + "UpdatePipelineStatusRequest$Status": "

    The desired status of the pipeline:

    • Active: The pipeline is processing jobs.
    • Paused: The pipeline is not currently processing jobs.
    " + } + }, + "Pipelines": { + "base": null, + "refs": { + "ListPipelinesResponse$Pipelines": "

    An array of Pipeline objects.

    " + } + }, + "PixelsOrPercent": { + "base": null, + "refs": { + "PresetWatermark$MaxWidth": "

    The maximum width of the watermark in one of the following formats:

    • number of pixels (px): The minimum value is 16 pixels, and the maximum value is the value of MaxWidth.
    • integer percentage (%): The range of valid values is 0 to 100. Use the value of Target to specify whether you want Elastic Transcoder to include the black bars that are added by Elastic Transcoder, if any, in the calculation.
    • If you specify the value in pixels, it must be less than or equal to the value of MaxWidth.

    ", + "PresetWatermark$MaxHeight": "

    The maximum height of the watermark in one of the following formats:

    • number of pixels (px): The minimum value is 16 pixels, and the maximum value is the value of MaxHeight.
    • integer percentage (%): The range of valid values is 0 to 100. Use the value of Target to specify whether you want Elastic Transcoder to include the black bars that are added by Elastic Transcoder, if any, in the calculation.
    If you specify the value in pixels, it must be less than or equal to the value of MaxHeight.

    ", + "PresetWatermark$HorizontalOffset": "

    The amount by which you want the horizontal position of the watermark to be offset from the position specified by HorizontalAlign:

    • number of pixels (px): The minimum value is 0 pixels, and the maximum value is the value of MaxWidth.
    • integer percentage (%): The range of valid values is 0 to 100.
    For example, if you specify Left for HorizontalAlign and 5px for HorizontalOffset, the left side of the watermark appears 5 pixels from the left border of the output video.

    HorizontalOffset is only valid when the value of HorizontalAlign is Left or Right. If you specify an offset that causes the watermark to extend beyond the left or right border and Elastic Transcoder has not added black bars, the watermark is cropped. If Elastic Transcoder has added black bars, the watermark extends into the black bars. If the watermark extends beyond the black bars, it is cropped.

    Use the value of Target to specify whether you want to include the black bars that are added by Elastic Transcoder, if any, in the offset calculation.

    ", + "PresetWatermark$VerticalOffset": "VerticalOffset

    The amount by which you want the vertical position of the watermark to be offset from the position specified by VerticalAlign:

    • number of pixels (px): The minimum value is 0 pixels, and the maximum value is the value of MaxHeight.
    • integer percentage (%): The range of valid values is 0 to 100.
    For example, if you specify Top for VerticalAlign and 5px for VerticalOffset, the top of the watermark appears 5 pixels from the top border of the output video.

    VerticalOffset is only valid when the value of VerticalAlign is Top or Bottom.

    If you specify an offset that causes the watermark to extend beyond the top or bottom border and Elastic Transcoder has not added black bars, the watermark is cropped. If Elastic Transcoder has added black bars, the watermark extends into the black bars. If the watermark extends beyond the black bars, it is cropped.

    Use the value of Target to specify whether you want Elastic Transcoder to include the black bars that are added by Elastic Transcoder, if any, in the offset calculation.

    " + } + }, + "PlayReadyDrm": { + "base": "

    The PlayReady DRM settings, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.

    PlayReady DRM encrypts your media files using AES-CTR encryption.

    If you use DRM for an HLSv3 playlist, your outputs must have a master playlist.

    ", + "refs": { + "CreateJobPlaylist$PlayReadyDrm": "

    The DRM settings, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.

    ", + "Playlist$PlayReadyDrm": "

    The DRM settings, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.

    " + } + }, + "PlayReadyDrmFormatString": { + "base": null, + "refs": { + "PlayReadyDrm$Format": "

    The type of DRM, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.

    " + } + }, + "Playlist": { + "base": "

    Use Only for Fragmented MP4 or MPEG-TS Outputs. If you specify a preset for which the value of Container is fmp4 (Fragmented MP4) or ts (MPEG-TS), Playlists contains information about the master playlists that you want Elastic Transcoder to create. We recommend that you create only one master playlist per output format. The maximum number of master playlists in a job is 30.

    ", + "refs": { + "Playlists$member": null + } + }, + "PlaylistFormat": { + "base": null, + "refs": { + "CreateJobPlaylist$Format": "

    The format of the output playlist. Valid formats include HLSv3, HLSv4, and Smooth.

    ", + "Playlist$Format": "

    The format of the output playlist. Valid formats include HLSv3, HLSv4, and Smooth.

    " + } + }, + "Playlists": { + "base": null, + "refs": { + "Job$Playlists": "

    Outputs in Fragmented MP4 or MPEG-TS format only.If you specify a preset in PresetId for which the value of Container is fmp4 (Fragmented MP4) or ts (MPEG-TS), Playlists contains information about the master playlists that you want Elastic Transcoder to create.

    The maximum number of master playlists in a job is 30.

    " + } + }, + "Preset": { + "base": "

    Presets are templates that contain most of the settings for transcoding media files from one format to another. Elastic Transcoder includes some default presets for common formats, for example, several iPod and iPhone versions. You can also create your own presets for formats that aren't included among the default presets. You specify which preset you want to use when you create a job.

    ", + "refs": { + "CreatePresetResponse$Preset": "

    A section of the response body that provides information about the preset that is created.

    ", + "Presets$member": null, + "ReadPresetResponse$Preset": "

    A section of the response body that provides information about the preset.

    " + } + }, + "PresetContainer": { + "base": null, + "refs": { + "CreatePresetRequest$Container": "

    The container type for the output file. Valid values include flac, flv, fmp4, gif, mp3, mp4, mpg, mxf, oga, ogg, ts, and webm.

    ", + "Preset$Container": "

    The container type for the output file. Valid values include flac, flv, fmp4, gif, mp3, mp4, mpg, mxf, oga, ogg, ts, and webm.

    " + } + }, + "PresetType": { + "base": null, + "refs": { + "Preset$Type": "

    Whether the preset is a default preset provided by Elastic Transcoder (System) or a preset that you have defined (Custom).

    " + } + }, + "PresetWatermark": { + "base": "

    Settings for the size, location, and opacity of graphics that you want Elastic Transcoder to overlay over videos that are transcoded using this preset. You can specify settings for up to four watermarks. Watermarks appear in the specified size and location, and with the specified opacity for the duration of the transcoded video.

    Watermarks can be in .png or .jpg format. If you want to display a watermark that is not rectangular, use the .png format, which supports transparency.

    When you create a job that uses this preset, you specify the .png or .jpg graphics that you want Elastic Transcoder to include in the transcoded videos. You can specify fewer graphics in the job than you specify watermark settings in the preset, which allows you to use the same preset for up to four watermarks that have different dimensions.

    ", + "refs": { + "PresetWatermarks$member": null + } + }, + "PresetWatermarkId": { + "base": null, + "refs": { + "JobWatermark$PresetWatermarkId": "

    The ID of the watermark settings that Elastic Transcoder uses to add watermarks to the video during transcoding. The settings are in the preset specified by Preset for the current output. In that preset, the value of Watermarks Id tells Elastic Transcoder which settings to use.

    ", + "PresetWatermark$Id": "A unique identifier for the settings for one watermark. The value of Id can be up to 40 characters long." + } + }, + "PresetWatermarks": { + "base": null, + "refs": { + "VideoParameters$Watermarks": "

    Settings for the size, location, and opacity of graphics that you want Elastic Transcoder to overlay over videos that are transcoded using this preset. You can specify settings for up to four watermarks. Watermarks appear in the specified size and location, and with the specified opacity for the duration of the transcoded video.

    Watermarks can be in .png or .jpg format. If you want to display a watermark that is not rectangular, use the .png format, which supports transparency.

    When you create a job that uses this preset, you specify the .png or .jpg graphics that you want Elastic Transcoder to include in the transcoded videos. You can specify fewer graphics in the job than you specify watermark settings in the preset, which allows you to use the same preset for up to four watermarks that have different dimensions.

    " + } + }, + "Presets": { + "base": null, + "refs": { + "ListPresetsResponse$Presets": "

    An array of Preset objects.

    " + } + }, + "ReadJobRequest": { + "base": "

    The ReadJobRequest structure.

    ", + "refs": { + } + }, + "ReadJobResponse": { + "base": "

    The ReadJobResponse structure.

    ", + "refs": { + } + }, + "ReadPipelineRequest": { + "base": "

    The ReadPipelineRequest structure.

    ", + "refs": { + } + }, + "ReadPipelineResponse": { + "base": "

    The ReadPipelineResponse structure.

    ", + "refs": { + } + }, + "ReadPresetRequest": { + "base": "

    The ReadPresetRequest structure.

    ", + "refs": { + } + }, + "ReadPresetResponse": { + "base": "

    The ReadPresetResponse structure.

    ", + "refs": { + } + }, + "Resolution": { + "base": null, + "refs": { + "JobInput$Resolution": "

    This value must be auto, which causes Elastic Transcoder to automatically detect the resolution of the input file.

    ", + "VideoParameters$Resolution": "

    To better control resolution and aspect ratio of output videos, we recommend that you use the values MaxWidth, MaxHeight, SizingPolicy, PaddingPolicy, and DisplayAspectRatio instead of Resolution and AspectRatio. The two groups of settings are mutually exclusive. Do not use them together.

    The width and height of the video in the output file, in pixels. Valid values are auto and width x height:

    • auto: Elastic Transcoder attempts to preserve the width and height of the input file, subject to the following rules.
    • width x height: The width and height of the output video in pixels.

    Note the following about specifying the width and height:

    • The width must be an even integer between 128 and 4096, inclusive.
    • The height must be an even integer between 96 and 3072, inclusive.
    • If you specify a resolution that is less than the resolution of the input file, Elastic Transcoder rescales the output file to the lower resolution.
    • If you specify a resolution that is greater than the resolution of the input file, Elastic Transcoder rescales the output to the higher resolution.
    • We recommend that you specify a resolution for which the product of width and height is less than or equal to the applicable value in the following list (List - Max width x height value):
      • 1 - 25344
      • 1b - 25344
      • 1.1 - 101376
      • 1.2 - 101376
      • 1.3 - 101376
      • 2 - 101376
      • 2.1 - 202752
      • 2.2 - 404720
      • 3 - 404720
      • 3.1 - 921600
      • 3.2 - 1310720
      • 4 - 2097152
      • 4.1 - 2097152
    " + } + }, + "ResourceInUseException": { + "base": "

    The resource you are attempting to change is in use. For example, you are attempting to delete a pipeline that is currently in use.

    ", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "

    The requested resource does not exist or is not available. For example, the pipeline to which you're trying to add a job doesn't exist or is still being created.

    ", + "refs": { + } + }, + "Role": { + "base": null, + "refs": { + "CreatePipelineRequest$Role": "

    The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to use to create the pipeline.

    ", + "Pipeline$Role": "

    The IAM Amazon Resource Name (ARN) for the role that Elastic Transcoder uses to transcode jobs for this pipeline.

    ", + "TestRoleRequest$Role": "

    The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to test.

    ", + "UpdatePipelineRequest$Role": "

    The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to use to transcode jobs for this pipeline.

    " + } + }, + "Rotate": { + "base": null, + "refs": { + "CreateJobOutput$Rotate": "

    The number of degrees clockwise by which you want Elastic Transcoder to rotate the output relative to the input. Enter one of the following values: auto, 0, 90, 180, 270. The value auto generally works only if the file that you're transcoding contains rotation metadata.

    ", + "JobOutput$Rotate": "

    The number of degrees clockwise by which you want Elastic Transcoder to rotate the output relative to the input. Enter one of the following values:

    auto, 0, 90, 180, 270

    The value auto generally works only if the file that you're transcoding contains rotation metadata.

    " + } + }, + "SizingPolicy": { + "base": null, + "refs": { + "Artwork$SizingPolicy": "

    Specify one of the following values to control scaling of the output album art:

    • Fit: Elastic Transcoder scales the output art so it matches the value that you specified in either MaxWidth or MaxHeight without exceeding the other value.
    • Fill: Elastic Transcoder scales the output art so it matches the value that you specified in either MaxWidth or MaxHeight and matches or exceeds the other value. Elastic Transcoder centers the output art and then crops it in the dimension (if any) that exceeds the maximum value.
    • Stretch: Elastic Transcoder stretches the output art to match the values that you specified for MaxWidth and MaxHeight. If the relative proportions of the input art and the output art are different, the output art will be distorted.
    • Keep: Elastic Transcoder does not scale the output art. If either dimension of the input art exceeds the values that you specified for MaxWidth and MaxHeight, Elastic Transcoder crops the output art.
    • ShrinkToFit: Elastic Transcoder scales the output art down so that its dimensions match the values that you specified for at least one of MaxWidth and MaxHeight without exceeding either value. If you specify this option, Elastic Transcoder does not scale the art up.
    • ShrinkToFill Elastic Transcoder scales the output art down so that its dimensions match the values that you specified for at least one of MaxWidth and MaxHeight without dropping below either value. If you specify this option, Elastic Transcoder does not scale the art up.

    ", + "Thumbnails$SizingPolicy": "

    Specify one of the following values to control scaling of thumbnails:

    • Fit: Elastic Transcoder scales thumbnails so they match the value that you specified in thumbnail MaxWidth or MaxHeight settings without exceeding the other value.
    • Fill: Elastic Transcoder scales thumbnails so they match the value that you specified in thumbnail MaxWidth or MaxHeight settings and matches or exceeds the other value. Elastic Transcoder centers the image in thumbnails and then crops in the dimension (if any) that exceeds the maximum value.
    • Stretch: Elastic Transcoder stretches thumbnails to match the values that you specified for thumbnail MaxWidth and MaxHeight settings. If the relative proportions of the input video and thumbnails are different, the thumbnails will be distorted.
    • Keep: Elastic Transcoder does not scale thumbnails. If either dimension of the input video exceeds the values that you specified for thumbnail MaxWidth and MaxHeight settings, Elastic Transcoder crops the thumbnails.
    • ShrinkToFit: Elastic Transcoder scales thumbnails down so that their dimensions match the values that you specified for at least one of thumbnail MaxWidth and MaxHeight without exceeding either value. If you specify this option, Elastic Transcoder does not scale thumbnails up.
    • ShrinkToFill: Elastic Transcoder scales thumbnails down so that their dimensions match the values that you specified for at least one of MaxWidth and MaxHeight without dropping below either value. If you specify this option, Elastic Transcoder does not scale thumbnails up.

    ", + "VideoParameters$SizingPolicy": "

    Specify one of the following values to control scaling of the output video:

    • Fit: Elastic Transcoder scales the output video so it matches the value that you specified in either MaxWidth or MaxHeight without exceeding the other value.
    • Fill: Elastic Transcoder scales the output video so it matches the value that you specified in either MaxWidth or MaxHeight and matches or exceeds the other value. Elastic Transcoder centers the output video and then crops it in the dimension (if any) that exceeds the maximum value.
    • Stretch: Elastic Transcoder stretches the output video to match the values that you specified for MaxWidth and MaxHeight. If the relative proportions of the input video and the output video are different, the output video will be distorted.
    • Keep: Elastic Transcoder does not scale the output video. If either dimension of the input video exceeds the values that you specified for MaxWidth and MaxHeight, Elastic Transcoder crops the output video.
    • ShrinkToFit: Elastic Transcoder scales the output video down so that its dimensions match the values that you specified for at least one of MaxWidth and MaxHeight without exceeding either value. If you specify this option, Elastic Transcoder does not scale the video up.
    • ShrinkToFill: Elastic Transcoder scales the output video down so that its dimensions match the values that you specified for at least one of MaxWidth and MaxHeight without dropping below either value. If you specify this option, Elastic Transcoder does not scale the video up.

    " + } + }, + "SnsTopic": { + "base": null, + "refs": { + "Notifications$Progressing": "

    The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify when Elastic Transcoder has started to process the job.

    ", + "Notifications$Completed": "

    The Amazon SNS topic that you want to notify when Elastic Transcoder has finished processing the job.

    ", + "Notifications$Warning": "

    The Amazon SNS topic that you want to notify when Elastic Transcoder encounters a warning condition.

    ", + "Notifications$Error": "

    The Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition.

    ", + "SnsTopics$member": null + } + }, + "SnsTopics": { + "base": null, + "refs": { + "TestRoleRequest$Topics": "

    The ARNs of one or more Amazon Simple Notification Service (Amazon SNS) topics that you want the action to send a test notification to.

    " + } + }, + "StorageClass": { + "base": null, + "refs": { + "PipelineOutputConfig$StorageClass": "

    The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the video files and playlists that it stores in your Amazon S3 bucket.

    " + } + }, + "String": { + "base": null, + "refs": { + "CreatePresetResponse$Warning": "

    If the preset settings don't comply with the standards for the video codec but Elastic Transcoder created the preset, this message explains the reason the preset settings don't meet the standard. Elastic Transcoder created the preset because the settings might produce acceptable output.

    ", + "ExceptionMessages$member": null, + "Job$Arn": "

    The Amazon Resource Name (ARN) for the job.

    ", + "JobOutput$Id": "

    A sequential counter, starting with 1, that identifies an output among the outputs from the current job. In the Output syntax, this value is always 1.

    ", + "JobOutput$AppliedColorSpaceConversion": "

    If Elastic Transcoder used a preset with a ColorSpaceConversionMode to transcode the output file, the AppliedColorSpaceConversion parameter shows the conversion used. If no ColorSpaceConversionMode was defined in the preset, this parameter will not be included in the job response.

    ", + "Pipeline$Arn": "

    The Amazon Resource Name (ARN) for the pipeline.

    ", + "Preset$Arn": "

    The Amazon Resource Name (ARN) for the preset.

    ", + "UserMetadata$key": null, + "UserMetadata$value": null, + "Warning$Code": "

    The code of the cross-regional warning.

    ", + "Warning$Message": "

    The message explaining what resources are in a different region from the pipeline.

    Note: AWS KMS keys must be in the same region as the pipeline.

    " + } + }, + "Success": { + "base": null, + "refs": { + "TestRoleResponse$Success": "

    If the operation is successful, this value is true; otherwise, the value is false.

    " + } + }, + "Target": { + "base": null, + "refs": { + "PresetWatermark$Target": "

    A value that determines how Elastic Transcoder interprets values that you specified for HorizontalOffset, VerticalOffset, MaxWidth, and MaxHeight:

    • Content: HorizontalOffset and VerticalOffset values are calculated based on the borders of the video excluding black bars added by Elastic Transcoder, if any. In addition, MaxWidth and MaxHeight, if specified as a percentage, are calculated based on the borders of the video excluding black bars added by Elastic Transcoder, if any.
    • Frame: HorizontalOffset and VerticalOffset values are calculated based on the borders of the video including black bars added by Elastic Transcoder, if any.
    • In addition, MaxWidth and MaxHeight, if specified as a percentage, are calculated based on the borders of the video including black bars added by Elastic Transcoder, if any.

    " + } + }, + "TestRoleRequest": { + "base": "

    The TestRoleRequest structure.

    ", + "refs": { + } + }, + "TestRoleResponse": { + "base": "

    The TestRoleResponse structure.

    ", + "refs": { + } + }, + "ThumbnailPattern": { + "base": null, + "refs": { + "CreateJobOutput$ThumbnailPattern": "

    Whether you want Elastic Transcoder to create thumbnails for your videos and, if so, how you want Elastic Transcoder to name the files.

    If you don't want Elastic Transcoder to create thumbnails, specify \"\".

    If you do want Elastic Transcoder to create thumbnails, specify the information that you want to include in the file name for each thumbnail. You can specify the following values in any sequence:

    • {count} (Required): If you want to create thumbnails, you must include {count} in the ThumbnailPattern object. Wherever you specify {count}, Elastic Transcoder adds a five-digit sequence number (beginning with 00001) to thumbnail file names. The number indicates where a given thumbnail appears in the sequence of thumbnails for a transcoded file.

      If you specify a literal value and/or {resolution} but you omit {count}, Elastic Transcoder returns a validation error and does not create the job.
    • Literal values (Optional): You can specify literal values anywhere in the ThumbnailPattern object. For example, you can include them as a file name prefix or as a delimiter between {resolution} and {count}.

    • {resolution} (Optional): If you want Elastic Transcoder to include the resolution in the file name, include {resolution} in the ThumbnailPattern object.

    When creating thumbnails, Elastic Transcoder automatically saves the files in the format (.jpg or .png) that appears in the preset that you specified in the PresetID value of CreateJobOutput. Elastic Transcoder also appends the applicable file name extension.

    ", + "JobOutput$ThumbnailPattern": "

    Whether you want Elastic Transcoder to create thumbnails for your videos and, if so, how you want Elastic Transcoder to name the files.

    If you don't want Elastic Transcoder to create thumbnails, specify \"\".

    If you do want Elastic Transcoder to create thumbnails, specify the information that you want to include in the file name for each thumbnail. You can specify the following values in any sequence:

    • {count} (Required): If you want to create thumbnails, you must include {count} in the ThumbnailPattern object. Wherever you specify {count}, Elastic Transcoder adds a five-digit sequence number (beginning with 00001) to thumbnail file names. The number indicates where a given thumbnail appears in the sequence of thumbnails for a transcoded file.

      If you specify a literal value and/or {resolution} but you omit {count}, Elastic Transcoder returns a validation error and does not create the job.
    • Literal values (Optional): You can specify literal values anywhere in the ThumbnailPattern object. For example, you can include them as a file name prefix or as a delimiter between {resolution} and {count}.

    • {resolution} (Optional): If you want Elastic Transcoder to include the resolution in the file name, include {resolution} in the ThumbnailPattern object.

    When creating thumbnails, Elastic Transcoder automatically saves the files in the format (.jpg or .png) that appears in the preset that you specified in the PresetID value of CreateJobOutput. Elastic Transcoder also appends the applicable file name extension.

    " + } + }, + "ThumbnailResolution": { + "base": null, + "refs": { + "Thumbnails$Resolution": "

    To better control resolution and aspect ratio of thumbnails, we recommend that you use the values MaxWidth, MaxHeight, SizingPolicy, and PaddingPolicy instead of Resolution and AspectRatio. The two groups of settings are mutually exclusive. Do not use them together.

    The width and height of thumbnail files in pixels. Specify a value in the format width x height where both values are even integers. The values cannot exceed the width and height that you specified in the Video:Resolution object.

    " + } + }, + "Thumbnails": { + "base": "

    Thumbnails for videos.

    ", + "refs": { + "CreatePresetRequest$Thumbnails": "

    A section of the request body that specifies the thumbnail parameters, if any.

    ", + "Preset$Thumbnails": "

    A section of the response body that provides information about the thumbnail preset values, if any.

    " + } + }, + "Time": { + "base": null, + "refs": { + "TimeSpan$StartTime": "

    The place in the input file where you want a clip to start. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, Elastic Transcoder starts at the beginning of the input file.

    ", + "TimeSpan$Duration": "

    The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, Elastic Transcoder creates an output file from StartTime to the end of the file.

    If you specify a value longer than the duration of the input file, Elastic Transcoder transcodes the file and returns a warning message.

    " + } + }, + "TimeOffset": { + "base": null, + "refs": { + "CaptionSource$TimeOffset": "

    For clip generation or captions that do not start at the same time as the associated video file, the TimeOffset tells Elastic Transcoder how much of the video to encode before including captions.

    Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.

    " + } + }, + "TimeSpan": { + "base": "

    Settings that determine when a clip begins and how long it lasts.

    ", + "refs": { + "Clip$TimeSpan": "

    Settings that determine when a clip begins and how long it lasts.

    " + } + }, + "Timing": { + "base": "

    Details about the timing of a job.

    ", + "refs": { + "Job$Timing": "

    Details about the timing of a job.

    " + } + }, + "UpdatePipelineNotificationsRequest": { + "base": "

    The UpdatePipelineNotificationsRequest structure.

    ", + "refs": { + } + }, + "UpdatePipelineNotificationsResponse": { + "base": "

    The UpdatePipelineNotificationsResponse structure.

    ", + "refs": { + } + }, + "UpdatePipelineRequest": { + "base": "

    The UpdatePipelineRequest structure.

    ", + "refs": { + } + }, + "UpdatePipelineResponse": { + "base": "

    When you update a pipeline, Elastic Transcoder returns the values that you specified in the request.

    ", + "refs": { + } + }, + "UpdatePipelineStatusRequest": { + "base": "

    The UpdatePipelineStatusRequest structure.

    ", + "refs": { + } + }, + "UpdatePipelineStatusResponse": { + "base": "When you update status for a pipeline, Elastic Transcoder returns the values that you specified in the request.", + "refs": { + } + }, + "UserMetadata": { + "base": null, + "refs": { + "CreateJobRequest$UserMetadata": "

    User-defined metadata that you want to associate with an Elastic Transcoder job. You specify metadata in key/value pairs, and you can add up to 10 key/value pairs per job. Elastic Transcoder does not guarantee that key/value pairs will be returned in the same order in which you specify them.

    ", + "Job$UserMetadata": "

    User-defined metadata that you want to associate with an Elastic Transcoder job. You specify metadata in key/value pairs, and you can add up to 10 key/value pairs per job. Elastic Transcoder does not guarantee that key/value pairs will be returned in the same order in which you specify them.

    Metadata keys and values must use characters from the following list:

    • 0-9

    • A-Z and a-z

    • Space

    • The following symbols: _.:/=+-%@

    " + } + }, + "ValidationException": { + "base": "

    One or more required parameter values were not provided in the request.

    ", + "refs": { + } + }, + "VerticalAlign": { + "base": null, + "refs": { + "PresetWatermark$VerticalAlign": "

    The vertical position of the watermark unless you specify a non-zero value for VerticalOffset:

    • Top: The top edge of the watermark is aligned with the top border of the video.
    • Bottom: The bottom edge of the watermark is aligned with the bottom border of the video.
    • Center: The watermark is centered between the top and bottom borders.

    " + } + }, + "VideoBitRate": { + "base": null, + "refs": { + "VideoParameters$BitRate": "

    The bit rate of the video stream in the output file, in kilobits/second. Valid values depend on the values of Level and Profile. If you specify auto, Elastic Transcoder uses the detected bit rate of the input source. If you specify a value other than auto, we recommend that you specify a value less than or equal to the maximum H.264-compliant value listed for your level and profile:

    Level - Maximum video bit rate in kilobits/second (baseline and main Profile) : maximum video bit rate in kilobits/second (high Profile)

    • 1 - 64 : 80
    • 1b - 128 : 160
    • 1.1 - 192 : 240
    • 1.2 - 384 : 480
    • 1.3 - 768 : 960
    • 2 - 2000 : 2500
    • 3 - 10000 : 12500
    • 3.1 - 14000 : 17500
    • 3.2 - 20000 : 25000
    • 4 - 20000 : 25000
    • 4.1 - 50000 : 62500
    " + } + }, + "VideoCodec": { + "base": null, + "refs": { + "VideoParameters$Codec": "

    The video codec for the output file. Valid values include gif, H.264, mpeg2, and vp8. You can only specify vp8 when the container type is webm, gif when the container type is gif, and mpeg2 when the container type is mpg.

    " + } + }, + "VideoParameters": { + "base": "

    The VideoParameters structure.

    ", + "refs": { + "CreatePresetRequest$Video": "

    A section of the request body that specifies the video parameters.

    ", + "Preset$Video": "

    A section of the response body that provides information about the video preset values.

    " + } + }, + "Warning": { + "base": "

    Elastic Transcoder returns a warning if the resources used by your pipeline are not in the same region as the pipeline.

    Using resources in the same region, such as your Amazon S3 buckets, Amazon SNS notification topics, and AWS KMS key, reduces processing time and prevents cross-regional charges.

    ", + "refs": { + "Warnings$member": null + } + }, + "Warnings": { + "base": null, + "refs": { + "CreatePipelineResponse$Warnings": "

    Elastic Transcoder returns a warning if the resources used by your pipeline are not in the same region as the pipeline.

    Using resources in the same region, such as your Amazon S3 buckets, Amazon SNS notification topics, and AWS KMS key, reduces processing time and prevents cross-regional charges.

    ", + "ReadPipelineResponse$Warnings": "

    Elastic Transcoder returns a warning if the resources used by your pipeline are not in the same region as the pipeline.

    Using resources in the same region, such as your Amazon S3 buckets, Amazon SNS notification topics, and AWS KMS key, reduces processing time and prevents cross-regional charges.

    ", + "UpdatePipelineResponse$Warnings": "

    Elastic Transcoder returns a warning if the resources used by your pipeline are not in the same region as the pipeline.

    Using resources in the same region, such as your Amazon S3 buckets, Amazon SNS notification topics, and AWS KMS key, reduces processing time and prevents cross-regional charges.

    " + } + }, + "WatermarkKey": { + "base": null, + "refs": { + "Artwork$InputKey": "

    The name of the file to be used as album art. To determine which Amazon S3 bucket contains the specified file, Elastic Transcoder checks the pipeline specified by PipelineId; the InputBucket object in that pipeline identifies the bucket.

    If the file name includes a prefix, for example, cooking/pie.jpg, include the prefix in the key. If the file isn't in the specified bucket, Elastic Transcoder returns an error.

    ", + "JobWatermark$InputKey": "

    The name of the .png or .jpg file that you want to use for the watermark. To determine which Amazon S3 bucket contains the specified file, Elastic Transcoder checks the pipeline specified by Pipeline; the Input Bucket object in that pipeline identifies the bucket.

    If the file name includes a prefix, for example, logos/128x64.png, include the prefix in the key. If the file isn't in the specified bucket, Elastic Transcoder returns an error.

    " + } + }, + "WatermarkSizingPolicy": { + "base": null, + "refs": { + "PresetWatermark$SizingPolicy": "

    A value that controls scaling of the watermark:

    • Fit: Elastic Transcoder scales the watermark so it matches the value that you specified in either MaxWidth or MaxHeight without exceeding the other value.
    • Stretch: Elastic Transcoder stretches the watermark to match the values that you specified for MaxWidth and MaxHeight. If the relative proportions of the watermark and the values of MaxWidth and MaxHeight are different, the watermark will be distorted.
    • ShrinkToFit: Elastic Transcoder scales the watermark down so that its dimensions match the values that you specified for at least one of MaxWidth and MaxHeight without exceeding either value. If you specify this option, Elastic Transcoder does not scale the watermark up.

    " + } + }, + "ZeroTo255String": { + "base": null, + "refs": { + "Encryption$InitializationVector": "

    The series of random bits created by a random bit generator, unique for every encryption operation, that you used to encrypt your input files or that you want Elastic Transcoder to use to encrypt your output files. The initialization vector must be base64-encoded, and it must be exactly 16 bytes long before being base64-encoded.

    ", + "HlsContentProtection$InitializationVector": "

    If Elastic Transcoder is generating your key for you, you must leave this field blank.

    The series of random bits created by a random bit generator, unique for every encryption operation, that you want Elastic Transcoder to use to encrypt your output files. The initialization vector must be base64-encoded, and it must be exactly 16 bytes before being base64-encoded.

    ", + "PlayReadyDrm$InitializationVector": "

    The series of random bits created by a random bit generator, unique for every encryption operation, that you want Elastic Transcoder to use to encrypt your files. The initialization vector must be base64-encoded, and it must be exactly 8 bytes long before being base64-encoded. If no initialization vector is provided, Elastic Transcoder generates one for you.

    " + } + }, + "ZeroTo512String": { + "base": null, + "refs": { + "HlsContentProtection$LicenseAcquisitionUrl": "

    The location of the license key required to decrypt your HLS playlist. The URL must be an absolute path, and is referenced in the URI attribute of the EXT-X-KEY metadata tag in the playlist file.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,24 @@ +{ + "pagination": { + "ListJobsByPipeline": { + "input_token": "PageToken", + "output_token": "NextPageToken", + "result_key": "Jobs" + }, + "ListJobsByStatus": { + "input_token": "PageToken", + "output_token": "NextPageToken", + "result_key": "Jobs" + }, + "ListPipelines": { + "input_token": "PageToken", + "output_token": "NextPageToken", + "result_key": "Pipelines" + }, + "ListPresets": { + "input_token": "PageToken", + "output_token": "NextPageToken", + "result_key": "Presets" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/waiters-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/waiters-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/waiters-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/elastictranscoder/2012-09-25/waiters-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,30 @@ +{ + "version": 2, + "waiters": { + "JobComplete": { + "delay": 30, + "operation": "ReadJob", + "maxAttempts": 120, + "acceptors": [ + { + "expected": "Complete", + "matcher": "path", + "state": "success", + "argument": "Job.Status" + }, + { + "expected": "Canceled", + "matcher": "path", + "state": "failure", + "argument": "Job.Status" + }, + { + "expected": "Error", + "matcher": "path", + "state": "failure", + "argument": "Job.Status" + } + ] + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1922 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2010-12-01", + "endpointPrefix":"email", + "serviceAbbreviation":"Amazon SES", + "serviceFullName":"Amazon Simple Email Service", + "signatureVersion":"v4", + "signingName":"ses", + "xmlNamespace":"http://ses.amazonaws.com/doc/2010-12-01/", + "protocol":"query" + }, + "operations":{ + "CloneReceiptRuleSet":{ + "name":"CloneReceiptRuleSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CloneReceiptRuleSetRequest"}, + "output":{ + "shape":"CloneReceiptRuleSetResponse", + "resultWrapper":"CloneReceiptRuleSetResult" + }, + "errors":[ + { + "shape":"RuleSetDoesNotExistException", + "error":{ + "code":"RuleSetDoesNotExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"AlreadyExistsException", + "error":{ + "code":"AlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateReceiptFilter":{ + "name":"CreateReceiptFilter", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateReceiptFilterRequest"}, + "output":{ + "shape":"CreateReceiptFilterResponse", + "resultWrapper":"CreateReceiptFilterResult" + }, + "errors":[ + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"AlreadyExistsException", + "error":{ + "code":"AlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateReceiptRule":{ + "name":"CreateReceiptRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateReceiptRuleRequest"}, + "output":{ + "shape":"CreateReceiptRuleResponse", + "resultWrapper":"CreateReceiptRuleResult" + }, + "errors":[ + { + "shape":"InvalidSnsTopicException", + "error":{ + "code":"InvalidSnsTopic", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidS3ConfigurationException", + "error":{ + "code":"InvalidS3Configuration", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidLambdaFunctionException", + "error":{ + "code":"InvalidLambdaFunction", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"AlreadyExistsException", + "error":{ + "code":"AlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"RuleDoesNotExistException", + "error":{ + "code":"RuleDoesNotExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"RuleSetDoesNotExistException", + "error":{ + "code":"RuleSetDoesNotExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateReceiptRuleSet":{ + "name":"CreateReceiptRuleSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateReceiptRuleSetRequest"}, + "output":{ + "shape":"CreateReceiptRuleSetResponse", + "resultWrapper":"CreateReceiptRuleSetResult" + }, + "errors":[ + { + "shape":"AlreadyExistsException", + "error":{ + "code":"AlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteIdentity":{ + "name":"DeleteIdentity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteIdentityRequest"}, + "output":{ + "shape":"DeleteIdentityResponse", + "resultWrapper":"DeleteIdentityResult" + } + }, + "DeleteIdentityPolicy":{ + "name":"DeleteIdentityPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteIdentityPolicyRequest"}, + "output":{ + "shape":"DeleteIdentityPolicyResponse", + "resultWrapper":"DeleteIdentityPolicyResult" + } + }, + "DeleteReceiptFilter":{ + "name":"DeleteReceiptFilter", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteReceiptFilterRequest"}, + "output":{ + "shape":"DeleteReceiptFilterResponse", + "resultWrapper":"DeleteReceiptFilterResult" + } + }, + "DeleteReceiptRule":{ + "name":"DeleteReceiptRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteReceiptRuleRequest"}, + "output":{ + "shape":"DeleteReceiptRuleResponse", + "resultWrapper":"DeleteReceiptRuleResult" + }, + "errors":[ + { + "shape":"RuleSetDoesNotExistException", + "error":{ + "code":"RuleSetDoesNotExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteReceiptRuleSet":{ + "name":"DeleteReceiptRuleSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteReceiptRuleSetRequest"}, + "output":{ + "shape":"DeleteReceiptRuleSetResponse", + "resultWrapper":"DeleteReceiptRuleSetResult" + }, + "errors":[ + { + "shape":"CannotDeleteException", + "error":{ + "code":"CannotDelete", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteVerifiedEmailAddress":{ + "name":"DeleteVerifiedEmailAddress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVerifiedEmailAddressRequest"} + }, + "DescribeActiveReceiptRuleSet":{ + "name":"DescribeActiveReceiptRuleSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeActiveReceiptRuleSetRequest"}, + "output":{ + "shape":"DescribeActiveReceiptRuleSetResponse", + "resultWrapper":"DescribeActiveReceiptRuleSetResult" + } + }, + "DescribeReceiptRule":{ + "name":"DescribeReceiptRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReceiptRuleRequest"}, + "output":{ + "shape":"DescribeReceiptRuleResponse", + "resultWrapper":"DescribeReceiptRuleResult" + }, + "errors":[ + { + "shape":"RuleDoesNotExistException", + "error":{ + "code":"RuleDoesNotExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"RuleSetDoesNotExistException", + "error":{ + "code":"RuleSetDoesNotExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeReceiptRuleSet":{ + "name":"DescribeReceiptRuleSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReceiptRuleSetRequest"}, + "output":{ + "shape":"DescribeReceiptRuleSetResponse", + "resultWrapper":"DescribeReceiptRuleSetResult" + }, + "errors":[ + { + "shape":"RuleSetDoesNotExistException", + "error":{ + "code":"RuleSetDoesNotExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "GetIdentityDkimAttributes":{ + "name":"GetIdentityDkimAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetIdentityDkimAttributesRequest"}, + "output":{ + "shape":"GetIdentityDkimAttributesResponse", + "resultWrapper":"GetIdentityDkimAttributesResult" + } + }, + "GetIdentityNotificationAttributes":{ + "name":"GetIdentityNotificationAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetIdentityNotificationAttributesRequest"}, + "output":{ + "shape":"GetIdentityNotificationAttributesResponse", + "resultWrapper":"GetIdentityNotificationAttributesResult" + } + }, + "GetIdentityPolicies":{ + "name":"GetIdentityPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetIdentityPoliciesRequest"}, + "output":{ + "shape":"GetIdentityPoliciesResponse", + "resultWrapper":"GetIdentityPoliciesResult" + } + }, + "GetIdentityVerificationAttributes":{ + "name":"GetIdentityVerificationAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetIdentityVerificationAttributesRequest"}, + "output":{ + "shape":"GetIdentityVerificationAttributesResponse", + "resultWrapper":"GetIdentityVerificationAttributesResult" + } + }, + "GetSendQuota":{ + "name":"GetSendQuota", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"GetSendQuotaResponse", + "resultWrapper":"GetSendQuotaResult" + } + }, + "GetSendStatistics":{ + "name":"GetSendStatistics", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"GetSendStatisticsResponse", + "resultWrapper":"GetSendStatisticsResult" + } + }, + "ListIdentities":{ + "name":"ListIdentities", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListIdentitiesRequest"}, + "output":{ + "shape":"ListIdentitiesResponse", + "resultWrapper":"ListIdentitiesResult" + } + }, + "ListIdentityPolicies":{ + "name":"ListIdentityPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListIdentityPoliciesRequest"}, + "output":{ + "shape":"ListIdentityPoliciesResponse", + "resultWrapper":"ListIdentityPoliciesResult" + } + }, + "ListReceiptFilters":{ + "name":"ListReceiptFilters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListReceiptFiltersRequest"}, + "output":{ + "shape":"ListReceiptFiltersResponse", + "resultWrapper":"ListReceiptFiltersResult" + } + }, + "ListReceiptRuleSets":{ + "name":"ListReceiptRuleSets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListReceiptRuleSetsRequest"}, + "output":{ + "shape":"ListReceiptRuleSetsResponse", + "resultWrapper":"ListReceiptRuleSetsResult" + } + }, + "ListVerifiedEmailAddresses":{ + "name":"ListVerifiedEmailAddresses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"ListVerifiedEmailAddressesResponse", + "resultWrapper":"ListVerifiedEmailAddressesResult" + } + }, + "PutIdentityPolicy":{ + "name":"PutIdentityPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutIdentityPolicyRequest"}, + "output":{ + "shape":"PutIdentityPolicyResponse", + "resultWrapper":"PutIdentityPolicyResult" + }, + "errors":[ + { + "shape":"InvalidPolicyException", + "error":{ + "code":"InvalidPolicy", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "ReorderReceiptRuleSet":{ + "name":"ReorderReceiptRuleSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReorderReceiptRuleSetRequest"}, + "output":{ + "shape":"ReorderReceiptRuleSetResponse", + "resultWrapper":"ReorderReceiptRuleSetResult" + }, + "errors":[ + { + "shape":"RuleSetDoesNotExistException", + "error":{ + "code":"RuleSetDoesNotExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"RuleDoesNotExistException", + "error":{ + "code":"RuleDoesNotExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "SendBounce":{ + "name":"SendBounce", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SendBounceRequest"}, + "output":{ + "shape":"SendBounceResponse", + "resultWrapper":"SendBounceResult" + }, + "errors":[ + { + "shape":"MessageRejected", + "error":{ + "code":"MessageRejected", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "SendEmail":{ + "name":"SendEmail", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SendEmailRequest"}, + "output":{ + "shape":"SendEmailResponse", + "resultWrapper":"SendEmailResult" + }, + "errors":[ + { + "shape":"MessageRejected", + "error":{ + "code":"MessageRejected", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "SendRawEmail":{ + "name":"SendRawEmail", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SendRawEmailRequest"}, + "output":{ + "shape":"SendRawEmailResponse", + "resultWrapper":"SendRawEmailResult" + }, + "errors":[ + { + "shape":"MessageRejected", + "error":{ + "code":"MessageRejected", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "SetActiveReceiptRuleSet":{ + "name":"SetActiveReceiptRuleSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetActiveReceiptRuleSetRequest"}, + "output":{ + "shape":"SetActiveReceiptRuleSetResponse", + "resultWrapper":"SetActiveReceiptRuleSetResult" + }, + "errors":[ + { + "shape":"RuleSetDoesNotExistException", + "error":{ + "code":"RuleSetDoesNotExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "SetIdentityDkimEnabled":{ + "name":"SetIdentityDkimEnabled", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetIdentityDkimEnabledRequest"}, + "output":{ + "shape":"SetIdentityDkimEnabledResponse", + "resultWrapper":"SetIdentityDkimEnabledResult" + } + }, + "SetIdentityFeedbackForwardingEnabled":{ + "name":"SetIdentityFeedbackForwardingEnabled", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetIdentityFeedbackForwardingEnabledRequest"}, + "output":{ + "shape":"SetIdentityFeedbackForwardingEnabledResponse", + "resultWrapper":"SetIdentityFeedbackForwardingEnabledResult" + } + }, + "SetIdentityNotificationTopic":{ + "name":"SetIdentityNotificationTopic", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetIdentityNotificationTopicRequest"}, + "output":{ + "shape":"SetIdentityNotificationTopicResponse", + "resultWrapper":"SetIdentityNotificationTopicResult" + } + }, + "SetReceiptRulePosition":{ + "name":"SetReceiptRulePosition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetReceiptRulePositionRequest"}, + "output":{ + "shape":"SetReceiptRulePositionResponse", + "resultWrapper":"SetReceiptRulePositionResult" + }, + "errors":[ + { + "shape":"RuleSetDoesNotExistException", + "error":{ + "code":"RuleSetDoesNotExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"RuleDoesNotExistException", + "error":{ + "code":"RuleDoesNotExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "UpdateReceiptRule":{ + "name":"UpdateReceiptRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateReceiptRuleRequest"}, + "output":{ + "shape":"UpdateReceiptRuleResponse", + "resultWrapper":"UpdateReceiptRuleResult" + }, + "errors":[ + { + "shape":"InvalidSnsTopicException", + "error":{ + "code":"InvalidSnsTopic", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidS3ConfigurationException", + "error":{ + "code":"InvalidS3Configuration", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidLambdaFunctionException", + "error":{ + "code":"InvalidLambdaFunction", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"RuleSetDoesNotExistException", + "error":{ + "code":"RuleSetDoesNotExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"RuleDoesNotExistException", + "error":{ + "code":"RuleDoesNotExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "VerifyDomainDkim":{ + "name":"VerifyDomainDkim", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"VerifyDomainDkimRequest"}, + "output":{ + "shape":"VerifyDomainDkimResponse", + "resultWrapper":"VerifyDomainDkimResult" + } + }, + "VerifyDomainIdentity":{ + "name":"VerifyDomainIdentity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"VerifyDomainIdentityRequest"}, + "output":{ + "shape":"VerifyDomainIdentityResponse", + "resultWrapper":"VerifyDomainIdentityResult" + } + }, + "VerifyEmailAddress":{ + "name":"VerifyEmailAddress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"VerifyEmailAddressRequest"} + }, + "VerifyEmailIdentity":{ + "name":"VerifyEmailIdentity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"VerifyEmailIdentityRequest"}, + "output":{ + "shape":"VerifyEmailIdentityResponse", + "resultWrapper":"VerifyEmailIdentityResult" + } + } + }, + "shapes":{ + "AddHeaderAction":{ + "type":"structure", + "required":[ + "HeaderName", + "HeaderValue" + ], + "members":{ + "HeaderName":{"shape":"HeaderName"}, + "HeaderValue":{"shape":"HeaderValue"} + } + }, + "Address":{"type":"string"}, + "AddressList":{ + "type":"list", + "member":{"shape":"Address"} + }, + "AlreadyExistsException":{ + "type":"structure", + "members":{ + "Name":{"shape":"RuleOrRuleSetName"} + }, + "error":{ + "code":"AlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AmazonResourceName":{"type":"string"}, + "ArrivalDate":{"type":"timestamp"}, + "Body":{ + "type":"structure", + "members":{ + "Text":{"shape":"Content"}, + "Html":{"shape":"Content"} + } + }, + "BounceAction":{ + "type":"structure", + "required":[ + "SmtpReplyCode", + "Message", + "Sender" + ], + "members":{ + "TopicArn":{"shape":"AmazonResourceName"}, + "SmtpReplyCode":{"shape":"BounceSmtpReplyCode"}, + "StatusCode":{"shape":"BounceStatusCode"}, + "Message":{"shape":"BounceMessage"}, + "Sender":{"shape":"Address"} + } + }, + "BounceMessage":{"type":"string"}, + "BounceSmtpReplyCode":{"type":"string"}, + "BounceStatusCode":{"type":"string"}, + "BounceType":{ + "type":"string", + "enum":[ + "DoesNotExist", + "MessageTooLarge", + "ExceededQuota", + "ContentRejected", + "Undefined", + "TemporaryFailure" + ] + }, + "BouncedRecipientInfo":{ + "type":"structure", + "required":["Recipient"], + "members":{ + "Recipient":{"shape":"Address"}, + "RecipientArn":{"shape":"AmazonResourceName"}, + "BounceType":{"shape":"BounceType"}, + "RecipientDsnFields":{"shape":"RecipientDsnFields"} + } + }, + "BouncedRecipientInfoList":{ + "type":"list", + "member":{"shape":"BouncedRecipientInfo"} + }, + "CannotDeleteException":{ + "type":"structure", + "members":{ + "Name":{"shape":"RuleOrRuleSetName"} + }, + "error":{ + "code":"CannotDelete", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Charset":{"type":"string"}, + "Cidr":{"type":"string"}, + "CloneReceiptRuleSetRequest":{ + "type":"structure", + "required":[ + "RuleSetName", + "OriginalRuleSetName" + ], + "members":{ + "RuleSetName":{"shape":"ReceiptRuleSetName"}, + "OriginalRuleSetName":{"shape":"ReceiptRuleSetName"} + } + }, + "CloneReceiptRuleSetResponse":{ + "type":"structure", + "members":{ + } + }, + "Content":{ + "type":"structure", + "required":["Data"], + "members":{ + "Data":{"shape":"MessageData"}, + "Charset":{"shape":"Charset"} + } + }, + "Counter":{"type":"long"}, + "CreateReceiptFilterRequest":{ + "type":"structure", + "required":["Filter"], + "members":{ + "Filter":{"shape":"ReceiptFilter"} + } + }, + "CreateReceiptFilterResponse":{ + "type":"structure", + "members":{ + } + }, + "CreateReceiptRuleRequest":{ + "type":"structure", + "required":[ + "RuleSetName", + "Rule" + ], + "members":{ + "RuleSetName":{"shape":"ReceiptRuleSetName"}, + "After":{"shape":"ReceiptRuleName"}, + "Rule":{"shape":"ReceiptRule"} + } + }, + "CreateReceiptRuleResponse":{ + "type":"structure", + "members":{ + } + }, + "CreateReceiptRuleSetRequest":{ + "type":"structure", + "required":["RuleSetName"], + "members":{ + "RuleSetName":{"shape":"ReceiptRuleSetName"} + } + }, + "CreateReceiptRuleSetResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteIdentityPolicyRequest":{ + "type":"structure", + "required":[ + "Identity", + "PolicyName" + ], + "members":{ + "Identity":{"shape":"Identity"}, + "PolicyName":{"shape":"PolicyName"} + } + }, + "DeleteIdentityPolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteIdentityRequest":{ + "type":"structure", + "required":["Identity"], + "members":{ + "Identity":{"shape":"Identity"} + } + }, + "DeleteIdentityResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteReceiptFilterRequest":{ + "type":"structure", + "required":["FilterName"], + "members":{ + "FilterName":{"shape":"ReceiptFilterName"} + } + }, + "DeleteReceiptFilterResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteReceiptRuleRequest":{ + "type":"structure", + "required":[ + "RuleSetName", + "RuleName" + ], + "members":{ + "RuleSetName":{"shape":"ReceiptRuleSetName"}, + "RuleName":{"shape":"ReceiptRuleName"} + } + }, + "DeleteReceiptRuleResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteReceiptRuleSetRequest":{ + "type":"structure", + "required":["RuleSetName"], + "members":{ + "RuleSetName":{"shape":"ReceiptRuleSetName"} + } + }, + "DeleteReceiptRuleSetResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteVerifiedEmailAddressRequest":{ + "type":"structure", + "required":["EmailAddress"], + "members":{ + "EmailAddress":{"shape":"Address"} + } + }, + "DescribeActiveReceiptRuleSetRequest":{ + "type":"structure", + "members":{ + } + }, + "DescribeActiveReceiptRuleSetResponse":{ + "type":"structure", + "members":{ + "Metadata":{"shape":"ReceiptRuleSetMetadata"}, + "Rules":{"shape":"ReceiptRulesList"} + } + }, + "DescribeReceiptRuleRequest":{ + "type":"structure", + "required":[ + "RuleSetName", + "RuleName" + ], + "members":{ + "RuleSetName":{"shape":"ReceiptRuleSetName"}, + "RuleName":{"shape":"ReceiptRuleName"} + } + }, + "DescribeReceiptRuleResponse":{ + "type":"structure", + "members":{ + "Rule":{"shape":"ReceiptRule"} + } + }, + "DescribeReceiptRuleSetRequest":{ + "type":"structure", + "required":["RuleSetName"], + "members":{ + "RuleSetName":{"shape":"ReceiptRuleSetName"} + } + }, + "DescribeReceiptRuleSetResponse":{ + "type":"structure", + "members":{ + "Metadata":{"shape":"ReceiptRuleSetMetadata"}, + "Rules":{"shape":"ReceiptRulesList"} + } + }, + "Destination":{ + "type":"structure", + "members":{ + "ToAddresses":{"shape":"AddressList"}, + "CcAddresses":{"shape":"AddressList"}, + "BccAddresses":{"shape":"AddressList"} + } + }, + "DiagnosticCode":{"type":"string"}, + "DkimAttributes":{ + "type":"map", + "key":{"shape":"Identity"}, + "value":{"shape":"IdentityDkimAttributes"} + }, + "Domain":{"type":"string"}, + "DsnAction":{ + "type":"string", + "enum":[ + "failed", + "delayed", + "delivered", + "relayed", + "expanded" + ] + }, + "DsnStatus":{"type":"string"}, + "Enabled":{"type":"boolean"}, + "Explanation":{"type":"string"}, + "ExtensionField":{ + "type":"structure", + "required":[ + "Name", + "Value" + ], + "members":{ + "Name":{"shape":"ExtensionFieldName"}, + "Value":{"shape":"ExtensionFieldValue"} + } + }, + "ExtensionFieldList":{ + "type":"list", + "member":{"shape":"ExtensionField"} + }, + "ExtensionFieldName":{"type":"string"}, + "ExtensionFieldValue":{"type":"string"}, + "GetIdentityDkimAttributesRequest":{ + "type":"structure", + "required":["Identities"], + "members":{ + "Identities":{"shape":"IdentityList"} + } + }, + "GetIdentityDkimAttributesResponse":{ + "type":"structure", + "required":["DkimAttributes"], + "members":{ + "DkimAttributes":{"shape":"DkimAttributes"} + } + }, + "GetIdentityNotificationAttributesRequest":{ + "type":"structure", + "required":["Identities"], + "members":{ + "Identities":{"shape":"IdentityList"} + } + }, + "GetIdentityNotificationAttributesResponse":{ + "type":"structure", + "required":["NotificationAttributes"], + "members":{ + "NotificationAttributes":{"shape":"NotificationAttributes"} + } + }, + "GetIdentityPoliciesRequest":{ + "type":"structure", + "required":[ + "Identity", + "PolicyNames" + ], + "members":{ + "Identity":{"shape":"Identity"}, + "PolicyNames":{"shape":"PolicyNameList"} + } + }, + "GetIdentityPoliciesResponse":{ + "type":"structure", + "required":["Policies"], + "members":{ + "Policies":{"shape":"PolicyMap"} + } + }, + "GetIdentityVerificationAttributesRequest":{ + "type":"structure", + "required":["Identities"], + "members":{ + "Identities":{"shape":"IdentityList"} + } + }, + "GetIdentityVerificationAttributesResponse":{ + "type":"structure", + "required":["VerificationAttributes"], + "members":{ + "VerificationAttributes":{"shape":"VerificationAttributes"} + } + }, + "GetSendQuotaResponse":{ + "type":"structure", + "members":{ + "Max24HourSend":{"shape":"Max24HourSend"}, + "MaxSendRate":{"shape":"MaxSendRate"}, + "SentLast24Hours":{"shape":"SentLast24Hours"} + } + }, + "GetSendStatisticsResponse":{ + "type":"structure", + "members":{ + "SendDataPoints":{"shape":"SendDataPointList"} + } + }, + "HeaderName":{"type":"string"}, + "HeaderValue":{"type":"string"}, + "Identity":{"type":"string"}, + "IdentityDkimAttributes":{ + "type":"structure", + "required":[ + "DkimEnabled", + "DkimVerificationStatus" + ], + "members":{ + "DkimEnabled":{"shape":"Enabled"}, + "DkimVerificationStatus":{"shape":"VerificationStatus"}, + "DkimTokens":{"shape":"VerificationTokenList"} + } + }, + "IdentityList":{ + "type":"list", + "member":{"shape":"Identity"} + }, + "IdentityNotificationAttributes":{ + "type":"structure", + "required":[ + "BounceTopic", + "ComplaintTopic", + "DeliveryTopic", + "ForwardingEnabled" + ], + "members":{ + "BounceTopic":{"shape":"NotificationTopic"}, + "ComplaintTopic":{"shape":"NotificationTopic"}, + "DeliveryTopic":{"shape":"NotificationTopic"}, + "ForwardingEnabled":{"shape":"Enabled"} + } + }, + "IdentityType":{ + "type":"string", + "enum":[ + "EmailAddress", + "Domain" + ] + }, + "IdentityVerificationAttributes":{ + "type":"structure", + "required":["VerificationStatus"], + "members":{ + "VerificationStatus":{"shape":"VerificationStatus"}, + "VerificationToken":{"shape":"VerificationToken"} + } + }, + "InvalidLambdaFunctionException":{ + "type":"structure", + "members":{ + "FunctionArn":{"shape":"AmazonResourceName"} + }, + "error":{ + "code":"InvalidLambdaFunction", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidPolicyException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidPolicy", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidS3ConfigurationException":{ + "type":"structure", + "members":{ + "Bucket":{"shape":"S3BucketName"} + }, + "error":{ + "code":"InvalidS3Configuration", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSnsTopicException":{ + "type":"structure", + "members":{ + "Topic":{"shape":"AmazonResourceName"} + }, + "error":{ + "code":"InvalidSnsTopic", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvocationType":{ + "type":"string", + "enum":[ + "Event", + "RequestResponse" + ] + }, + "LambdaAction":{ + "type":"structure", + "required":["FunctionArn"], + "members":{ + "TopicArn":{"shape":"AmazonResourceName"}, + "FunctionArn":{"shape":"AmazonResourceName"}, + "InvocationType":{"shape":"InvocationType"} + } + }, + "LastAttemptDate":{"type":"timestamp"}, + "LimitExceededException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"LimitExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ListIdentitiesRequest":{ + "type":"structure", + "members":{ + "IdentityType":{"shape":"IdentityType"}, + "NextToken":{"shape":"NextToken"}, + "MaxItems":{"shape":"MaxItems"} + } + }, + "ListIdentitiesResponse":{ + "type":"structure", + "required":["Identities"], + "members":{ + "Identities":{"shape":"IdentityList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListIdentityPoliciesRequest":{ + "type":"structure", + "required":["Identity"], + "members":{ + "Identity":{"shape":"Identity"} + } + }, + "ListIdentityPoliciesResponse":{ + "type":"structure", + "required":["PolicyNames"], + "members":{ + "PolicyNames":{"shape":"PolicyNameList"} + } + }, + "ListReceiptFiltersRequest":{ + "type":"structure", + "members":{ + } + }, + "ListReceiptFiltersResponse":{ + "type":"structure", + "members":{ + "Filters":{"shape":"ReceiptFilterList"} + } + }, + "ListReceiptRuleSetsRequest":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"NextToken"} + } + }, + "ListReceiptRuleSetsResponse":{ + "type":"structure", + "members":{ + "RuleSets":{"shape":"ReceiptRuleSetsLists"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListVerifiedEmailAddressesResponse":{ + "type":"structure", + "members":{ + "VerifiedEmailAddresses":{"shape":"AddressList"} + } + }, + "Max24HourSend":{"type":"double"}, + "MaxItems":{"type":"integer"}, + "MaxSendRate":{"type":"double"}, + "Message":{ + "type":"structure", + "required":[ + "Subject", + "Body" + ], + "members":{ + "Subject":{"shape":"Content"}, + "Body":{"shape":"Body"} + } + }, + "MessageData":{"type":"string"}, + "MessageDsn":{ + "type":"structure", + "required":["ReportingMta"], + "members":{ + "ReportingMta":{"shape":"ReportingMta"}, + "ArrivalDate":{"shape":"ArrivalDate"}, + "ExtensionFields":{"shape":"ExtensionFieldList"} + } + }, + "MessageId":{"type":"string"}, + "MessageRejected":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"MessageRejected", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "NextToken":{"type":"string"}, + "NotificationAttributes":{ + "type":"map", + "key":{"shape":"Identity"}, + "value":{"shape":"IdentityNotificationAttributes"} + }, + "NotificationTopic":{"type":"string"}, + "NotificationType":{ + "type":"string", + "enum":[ + "Bounce", + "Complaint", + "Delivery" + ] + }, + "Policy":{ + "type":"string", + "min":1 + }, + "PolicyMap":{ + "type":"map", + "key":{"shape":"PolicyName"}, + "value":{"shape":"Policy"} + }, + "PolicyName":{ + "type":"string", + "min":1, + "max":64 + }, + "PolicyNameList":{ + "type":"list", + "member":{"shape":"PolicyName"} + }, + "PutIdentityPolicyRequest":{ + "type":"structure", + "required":[ + "Identity", + "PolicyName", + "Policy" + ], + "members":{ + "Identity":{"shape":"Identity"}, + "PolicyName":{"shape":"PolicyName"}, + "Policy":{"shape":"Policy"} + } + }, + "PutIdentityPolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "RawMessage":{ + "type":"structure", + "required":["Data"], + "members":{ + "Data":{"shape":"RawMessageData"} + } + }, + "RawMessageData":{"type":"blob"}, + "ReceiptAction":{ + "type":"structure", + "members":{ + "S3Action":{"shape":"S3Action"}, + "BounceAction":{"shape":"BounceAction"}, + "WorkmailAction":{"shape":"WorkmailAction"}, + "LambdaAction":{"shape":"LambdaAction"}, + "StopAction":{"shape":"StopAction"}, + "AddHeaderAction":{"shape":"AddHeaderAction"}, + "SNSAction":{"shape":"SNSAction"} + } + }, + "ReceiptActionsList":{ + "type":"list", + "member":{"shape":"ReceiptAction"} + }, + "ReceiptFilter":{ + "type":"structure", + "required":[ + "Name", + "IpFilter" + ], + "members":{ + "Name":{"shape":"ReceiptFilterName"}, + "IpFilter":{"shape":"ReceiptIpFilter"} + } + }, + "ReceiptFilterList":{ + "type":"list", + "member":{"shape":"ReceiptFilter"} + }, + "ReceiptFilterName":{"type":"string"}, + "ReceiptFilterPolicy":{ + "type":"string", + "enum":[ + "Block", + "Allow" + ] + }, + "ReceiptIpFilter":{ + "type":"structure", + "required":[ + "Policy", + "Cidr" + ], + "members":{ + "Policy":{"shape":"ReceiptFilterPolicy"}, + "Cidr":{"shape":"Cidr"} + } + }, + "ReceiptRule":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"ReceiptRuleName"}, + "Enabled":{"shape":"Enabled"}, + "TlsPolicy":{"shape":"TlsPolicy"}, + "Recipients":{"shape":"RecipientsList"}, + "Actions":{"shape":"ReceiptActionsList"}, + "ScanEnabled":{"shape":"Enabled"} + } + }, + "ReceiptRuleName":{"type":"string"}, + "ReceiptRuleNamesList":{ + "type":"list", + "member":{"shape":"ReceiptRuleName"} + }, + "ReceiptRuleSetMetadata":{ + "type":"structure", + "members":{ + "Name":{"shape":"ReceiptRuleSetName"}, + "CreatedTimestamp":{"shape":"Timestamp"} + } + }, + "ReceiptRuleSetName":{"type":"string"}, + "ReceiptRuleSetsLists":{ + "type":"list", + "member":{"shape":"ReceiptRuleSetMetadata"} + }, + "ReceiptRulesList":{ + "type":"list", + "member":{"shape":"ReceiptRule"} + }, + "Recipient":{"type":"string"}, + "RecipientDsnFields":{ + "type":"structure", + "required":[ + "Action", + "Status" + ], + "members":{ + "FinalRecipient":{"shape":"Address"}, + "Action":{"shape":"DsnAction"}, + "RemoteMta":{"shape":"RemoteMta"}, + "Status":{"shape":"DsnStatus"}, + "DiagnosticCode":{"shape":"DiagnosticCode"}, + "LastAttemptDate":{"shape":"LastAttemptDate"}, + "ExtensionFields":{"shape":"ExtensionFieldList"} + } + }, + "RecipientsList":{ + "type":"list", + "member":{"shape":"Recipient"} + }, + "RemoteMta":{"type":"string"}, + "ReorderReceiptRuleSetRequest":{ + "type":"structure", + "required":[ + "RuleSetName", + "RuleNames" + ], + "members":{ + "RuleSetName":{"shape":"ReceiptRuleSetName"}, + "RuleNames":{"shape":"ReceiptRuleNamesList"} + } + }, + "ReorderReceiptRuleSetResponse":{ + "type":"structure", + "members":{ + } + }, + "ReportingMta":{"type":"string"}, + "RuleDoesNotExistException":{ + "type":"structure", + "members":{ + "Name":{"shape":"RuleOrRuleSetName"} + }, + "error":{ + "code":"RuleDoesNotExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "RuleOrRuleSetName":{"type":"string"}, + "RuleSetDoesNotExistException":{ + "type":"structure", + "members":{ + "Name":{"shape":"RuleOrRuleSetName"} + }, + "error":{ + "code":"RuleSetDoesNotExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "S3Action":{ + "type":"structure", + "required":["BucketName"], + "members":{ + "TopicArn":{"shape":"AmazonResourceName"}, + "BucketName":{"shape":"S3BucketName"}, + "ObjectKeyPrefix":{"shape":"S3KeyPrefix"}, + "KmsKeyArn":{"shape":"AmazonResourceName"} + } + }, + "S3BucketName":{"type":"string"}, + "S3KeyPrefix":{"type":"string"}, + "SNSAction":{ + "type":"structure", + "required":["TopicArn"], + "members":{ + "TopicArn":{"shape":"AmazonResourceName"} + } + }, + "SendBounceRequest":{ + "type":"structure", + "required":[ + "OriginalMessageId", + "BounceSender", + "BouncedRecipientInfoList" + ], + "members":{ + "OriginalMessageId":{"shape":"MessageId"}, + "BounceSender":{"shape":"Address"}, + "Explanation":{"shape":"Explanation"}, + "MessageDsn":{"shape":"MessageDsn"}, + "BouncedRecipientInfoList":{"shape":"BouncedRecipientInfoList"}, + "BounceSenderArn":{"shape":"AmazonResourceName"} + } + }, + "SendBounceResponse":{ + "type":"structure", + "members":{ + "MessageId":{"shape":"MessageId"} + } + }, + "SendDataPoint":{ + "type":"structure", + "members":{ + "Timestamp":{"shape":"Timestamp"}, + "DeliveryAttempts":{"shape":"Counter"}, + "Bounces":{"shape":"Counter"}, + "Complaints":{"shape":"Counter"}, + "Rejects":{"shape":"Counter"} + } + }, + "SendDataPointList":{ + "type":"list", + "member":{"shape":"SendDataPoint"} + }, + "SendEmailRequest":{ + "type":"structure", + "required":[ + "Source", + "Destination", + "Message" + ], + "members":{ + "Source":{"shape":"Address"}, + "Destination":{"shape":"Destination"}, + "Message":{"shape":"Message"}, + "ReplyToAddresses":{"shape":"AddressList"}, + "ReturnPath":{"shape":"Address"}, + "SourceArn":{"shape":"AmazonResourceName"}, + "ReturnPathArn":{"shape":"AmazonResourceName"} + } + }, + "SendEmailResponse":{ + "type":"structure", + "required":["MessageId"], + "members":{ + "MessageId":{"shape":"MessageId"} + } + }, + "SendRawEmailRequest":{ + "type":"structure", + "required":["RawMessage"], + "members":{ + "Source":{"shape":"Address"}, + "Destinations":{"shape":"AddressList"}, + "RawMessage":{"shape":"RawMessage"}, + "FromArn":{"shape":"AmazonResourceName"}, + "SourceArn":{"shape":"AmazonResourceName"}, + "ReturnPathArn":{"shape":"AmazonResourceName"} + } + }, + "SendRawEmailResponse":{ + "type":"structure", + "required":["MessageId"], + "members":{ + "MessageId":{"shape":"MessageId"} + } + }, + "SentLast24Hours":{"type":"double"}, + "SetActiveReceiptRuleSetRequest":{ + "type":"structure", + "members":{ + "RuleSetName":{"shape":"ReceiptRuleSetName"} + } + }, + "SetActiveReceiptRuleSetResponse":{ + "type":"structure", + "members":{ + } + }, + "SetIdentityDkimEnabledRequest":{ + "type":"structure", + "required":[ + "Identity", + "DkimEnabled" + ], + "members":{ + "Identity":{"shape":"Identity"}, + "DkimEnabled":{"shape":"Enabled"} + } + }, + "SetIdentityDkimEnabledResponse":{ + "type":"structure", + "members":{ + } + }, + "SetIdentityFeedbackForwardingEnabledRequest":{ + "type":"structure", + "required":[ + "Identity", + "ForwardingEnabled" + ], + "members":{ + "Identity":{"shape":"Identity"}, + "ForwardingEnabled":{"shape":"Enabled"} + } + }, + "SetIdentityFeedbackForwardingEnabledResponse":{ + "type":"structure", + "members":{ + } + }, + "SetIdentityNotificationTopicRequest":{ + "type":"structure", + "required":[ + "Identity", + "NotificationType" + ], + "members":{ + "Identity":{"shape":"Identity"}, + "NotificationType":{"shape":"NotificationType"}, + "SnsTopic":{"shape":"NotificationTopic"} + } + }, + "SetIdentityNotificationTopicResponse":{ + "type":"structure", + "members":{ + } + }, + "SetReceiptRulePositionRequest":{ + "type":"structure", + "required":[ + "RuleSetName", + "RuleName" + ], + "members":{ + "RuleSetName":{"shape":"ReceiptRuleSetName"}, + "RuleName":{"shape":"ReceiptRuleName"}, + "After":{"shape":"ReceiptRuleName"} + } + }, + "SetReceiptRulePositionResponse":{ + "type":"structure", + "members":{ + } + }, + "StopAction":{ + "type":"structure", + "required":["Scope"], + "members":{ + "Scope":{"shape":"StopScope"}, + "TopicArn":{"shape":"AmazonResourceName"} + } + }, + "StopScope":{ + "type":"string", + "enum":["RuleSet"] + }, + "Timestamp":{"type":"timestamp"}, + "TlsPolicy":{ + "type":"string", + "enum":[ + "Require", + "Optional" + ] + }, + "UpdateReceiptRuleRequest":{ + "type":"structure", + "required":[ + "RuleSetName", + "Rule" + ], + "members":{ + "RuleSetName":{"shape":"ReceiptRuleSetName"}, + "Rule":{"shape":"ReceiptRule"} + } + }, + "UpdateReceiptRuleResponse":{ + "type":"structure", + "members":{ + } + }, + "VerificationAttributes":{ + "type":"map", + "key":{"shape":"Identity"}, + "value":{"shape":"IdentityVerificationAttributes"} + }, + "VerificationStatus":{ + "type":"string", + "enum":[ + "Pending", + "Success", + "Failed", + "TemporaryFailure", + "NotStarted" + ] + }, + "VerificationToken":{"type":"string"}, + "VerificationTokenList":{ + "type":"list", + "member":{"shape":"VerificationToken"} + }, + "VerifyDomainDkimRequest":{ + "type":"structure", + "required":["Domain"], + "members":{ + "Domain":{"shape":"Domain"} + } + }, + "VerifyDomainDkimResponse":{ + "type":"structure", + "required":["DkimTokens"], + "members":{ + "DkimTokens":{"shape":"VerificationTokenList"} + } + }, + "VerifyDomainIdentityRequest":{ + "type":"structure", + "required":["Domain"], + "members":{ + "Domain":{"shape":"Domain"} + } + }, + "VerifyDomainIdentityResponse":{ + "type":"structure", + "required":["VerificationToken"], + "members":{ + "VerificationToken":{"shape":"VerificationToken"} + } + }, + "VerifyEmailAddressRequest":{ + "type":"structure", + "required":["EmailAddress"], + "members":{ + "EmailAddress":{"shape":"Address"} + } + }, + "VerifyEmailIdentityRequest":{ + "type":"structure", + "required":["EmailAddress"], + "members":{ + "EmailAddress":{"shape":"Address"} + } + }, + "VerifyEmailIdentityResponse":{ + "type":"structure", + "members":{ + } + }, + "WorkmailAction":{ + "type":"structure", + "required":["OrganizationArn"], + "members":{ + "TopicArn":{"shape":"AmazonResourceName"}, + "OrganizationArn":{"shape":"AmazonResourceName"} + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1127 @@ +{ + "version": "2.0", + "operations": { + "CloneReceiptRuleSet": "

    Creates a receipt rule set by cloning an existing one. All receipt rules and configurations are copied to the new receipt rule set and are completely independent of the source rule set.

    For information about setting up rule sets, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "CreateReceiptFilter": "

    Creates a new IP address filter.

    For information about setting up IP address filters, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "CreateReceiptRule": "

    Creates a receipt rule.

    For information about setting up receipt rules, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "CreateReceiptRuleSet": "

    Creates an empty receipt rule set.

    For information about setting up receipt rule sets, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "DeleteIdentity": "

    Deletes the specified identity (email address or domain) from the list of verified identities.

    This action is throttled at one request per second.

    ", + "DeleteIdentityPolicy": "

    Deletes the specified sending authorization policy for the given identity (email address or domain). This API returns successfully even if a policy with the specified name does not exist.

    This API is for the identity owner only. If you have not verified the identity, this API will return an error.

    Sending authorization is a feature that enables an identity owner to authorize other senders to use its identities. For information about using sending authorization, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "DeleteReceiptFilter": "

    Deletes the specified IP address filter.

    For information about managing IP address filters, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "DeleteReceiptRule": "

    Deletes the specified receipt rule.

    For information about managing receipt rules, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "DeleteReceiptRuleSet": "

    Deletes the specified receipt rule set and all of the receipt rules it contains.

    The currently active rule set cannot be deleted.

    For information about managing receipt rule sets, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "DeleteVerifiedEmailAddress": "

    Deletes the specified email address from the list of verified addresses.

    The DeleteVerifiedEmailAddress action is deprecated as of the May 15, 2012 release of Domain Verification. The DeleteIdentity action is now preferred.

    This action is throttled at one request per second.

    ", + "DescribeActiveReceiptRuleSet": "

    Returns the metadata and receipt rules for the receipt rule set that is currently active.

    For information about setting up receipt rule sets, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "DescribeReceiptRule": "

    Returns the details of the specified receipt rule.

    For information about setting up receipt rules, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "DescribeReceiptRuleSet": "

    Returns the details of the specified receipt rule set.

    For information about managing receipt rule sets, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "GetIdentityDkimAttributes": "

    Returns the current status of Easy DKIM signing for an entity. For domain name identities, this action also returns the DKIM tokens that are required for Easy DKIM signing, and whether Amazon SES has successfully verified that these tokens have been published.

    This action takes a list of identities as input and returns the following information for each:

    • Whether Easy DKIM signing is enabled or disabled.
    • A set of DKIM tokens that represent the identity. If the identity is an email address, the tokens represent the domain of that address.
    • Whether Amazon SES has successfully verified the DKIM tokens published in the domain's DNS. This information is only returned for domain name identities, not for email addresses.

    This action is throttled at one request per second and can only get DKIM attributes for up to 100 identities at a time.

    For more information about creating DNS records using DKIM tokens, go to the Amazon SES Developer Guide.

    ", + "GetIdentityNotificationAttributes": "

    Given a list of verified identities (email addresses and/or domains), returns a structure describing identity notification attributes.

    This action is throttled at one request per second and can only get notification attributes for up to 100 identities at a time.

    For more information about using notifications with Amazon SES, see the Amazon SES Developer Guide.

    ", + "GetIdentityPolicies": "

    Returns the requested sending authorization policies for the given identity (email address or domain). The policies are returned as a map of policy names to policy contents. You can retrieve a maximum of 20 policies at a time.

    This API is for the identity owner only. If you have not verified the identity, this API will return an error.

    Sending authorization is a feature that enables an identity owner to authorize other senders to use its identities. For information about using sending authorization, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "GetIdentityVerificationAttributes": "

    Given a list of identities (email addresses and/or domains), returns the verification status and (for domain identities) the verification token for each identity.

    This action is throttled at one request per second and can only get verification attributes for up to 100 identities at a time.

    ", + "GetSendQuota": "

    Returns the user's current sending limits.

    This action is throttled at one request per second.

    ", + "GetSendStatistics": "

    Returns the user's sending statistics. The result is a list of data points, representing the last two weeks of sending activity.

    Each data point in the list contains statistics for a 15-minute interval.

    This action is throttled at one request per second.

    ", + "ListIdentities": "

    Returns a list containing all of the identities (email addresses and domains) for a specific AWS Account, regardless of verification status.

    This action is throttled at one request per second.

    ", + "ListIdentityPolicies": "

    Returns a list of sending authorization policies that are attached to the given identity (email address or domain). This API returns only a list. If you want the actual policy content, you can use GetIdentityPolicies.

    This API is for the identity owner only. If you have not verified the identity, this API will return an error.

    Sending authorization is a feature that enables an identity owner to authorize other senders to use its identities. For information about using sending authorization, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "ListReceiptFilters": "

    Lists the IP address filters associated with your account.

    For information about managing IP address filters, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "ListReceiptRuleSets": "

    Lists the receipt rule sets that exist under your AWS account. If there are additional receipt rule sets to be retrieved, you will receive a NextToken that you can provide to the next call to ListReceiptRuleSets to retrieve the additional entries.

    For information about managing receipt rule sets, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "ListVerifiedEmailAddresses": "

    Returns a list containing all of the email addresses that have been verified.

    The ListVerifiedEmailAddresses action is deprecated as of the May 15, 2012 release of Domain Verification. The ListIdentities action is now preferred.

    This action is throttled at one request per second.

    ", + "PutIdentityPolicy": "

    Adds or updates a sending authorization policy for the specified identity (email address or domain).

    This API is for the identity owner only. If you have not verified the identity, this API will return an error.

    Sending authorization is a feature that enables an identity owner to authorize other senders to use its identities. For information about using sending authorization, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "ReorderReceiptRuleSet": "

    Reorders the receipt rules within a receipt rule set.

    All of the rules in the rule set must be represented in this request. That is, this API will return an error if the reorder request doesn’t explicitly position all of the rules.

    For information about managing receipt rule sets, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "SendBounce": "

    Generates and sends a bounce message to the sender of an email you received through Amazon SES. You can only use this API on an email up to 24 hours after you receive it.

    You cannot use this API to send generic bounces for mail that was not received by Amazon SES.

    For information about receiving email through Amazon SES, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "SendEmail": "

    Composes an email message based on input data, and then immediately queues the message for sending.

    There are several important points to know about SendEmail:

    • You can only send email from verified email addresses and domains; otherwise, you will get an \"Email address not verified\" error. If your account is still in the Amazon SES sandbox, you must also verify every recipient email address except for the recipients provided by the Amazon SES mailbox simulator. For more information, go to the Amazon SES Developer Guide.
    • The total size of the message cannot exceed 10 MB. This includes any attachments that are part of the message.
    • Amazon SES has a limit on the total number of recipients per message. The combined number of To:, CC: and BCC: email addresses cannot exceed 50. If you need to send an email message to a larger audience, you can divide your recipient list into groups of 50 or fewer, and then call Amazon SES repeatedly to send the message to each group.
    • For every message that you send, the total number of recipients (To:, CC: and BCC:) is counted against your sending quota - the maximum number of emails you can send in a 24-hour period. For information about your sending quota, go to the Amazon SES Developer Guide.
    ", + "SendRawEmail": "

    Sends an email message, with header and content specified by the client. The SendRawEmail action is useful for sending multipart MIME emails. The raw text of the message must comply with Internet email standards; otherwise, the message cannot be sent.

    There are several important points to know about SendRawEmail:

    • You can only send email from verified email addresses and domains; otherwise, you will get an \"Email address not verified\" error. If your account is still in the Amazon SES sandbox, you must also verify every recipient email address except for the recipients provided by the Amazon SES mailbox simulator. For more information, go to the Amazon SES Developer Guide.
    • The total size of the message cannot exceed 10 MB. This includes any attachments that are part of the message.
    • Amazon SES has a limit on the total number of recipients per message. The combined number of To:, CC: and BCC: email addresses cannot exceed 50. If you need to send an email message to a larger audience, you can divide your recipient list into groups of 50 or fewer, and then call Amazon SES repeatedly to send the message to each group.
    • The To:, CC:, and BCC: headers in the raw message can contain a group list. Note that each recipient in a group list counts towards the 50-recipient limit.
    • For every message that you send, the total number of recipients (To:, CC: and BCC:) is counted against your sending quota - the maximum number of emails you can send in a 24-hour period. For information about your sending quota, go to the Amazon SES Developer Guide.
    • If you are using sending authorization to send on behalf of another user, SendRawEmail enables you to specify the cross-account identity for the email's \"Source,\" \"From,\" and \"Return-Path\" parameters in one of two ways: you can pass optional parameters SourceArn, FromArn, and/or ReturnPathArn to the API, or you can include the following X-headers in the header of your raw email:
      • X-SES-SOURCE-ARN
      • X-SES-FROM-ARN
      • X-SES-RETURN-PATH-ARN
      Do not include these X-headers in the DKIM signature, because they are removed by Amazon SES before sending the email. For the most common sending authorization use case, we recommend that you specify the SourceIdentityArn and do not specify either the FromIdentityArn or ReturnPathIdentityArn. (The same note applies to the corresponding X-headers.) If you only specify the SourceIdentityArn, Amazon SES will simply set the \"From\" address and the \"Return Path\" address to the identity specified in SourceIdentityArn. For more information about sending authorization, see the Amazon SES Developer Guide.
    ", + "SetActiveReceiptRuleSet": "

    Sets the specified receipt rule set as the active receipt rule set.

    To disable your email-receiving through Amazon SES completely, you can call this API with RuleSetName set to null.

    For information about managing receipt rule sets, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "SetIdentityDkimEnabled": "

    Enables or disables Easy DKIM signing of email sent from an identity:

    • If Easy DKIM signing is enabled for a domain name identity (e.g., example.com), then Amazon SES will DKIM-sign all email sent by addresses under that domain name (e.g., user@example.com).
    • If Easy DKIM signing is enabled for an email address, then Amazon SES will DKIM-sign all email sent by that email address.

    For email addresses (e.g., user@example.com), you can only enable Easy DKIM signing if the corresponding domain (e.g., example.com) has been set up for Easy DKIM using the AWS Console or the VerifyDomainDkim action.

    This action is throttled at one request per second.

    For more information about Easy DKIM signing, go to the Amazon SES Developer Guide.

    ", + "SetIdentityFeedbackForwardingEnabled": "

    Given an identity (email address or domain), enables or disables whether Amazon SES forwards bounce and complaint notifications as email. Feedback forwarding can only be disabled when Amazon Simple Notification Service (Amazon SNS) topics are specified for both bounces and complaints.

    Feedback forwarding does not apply to delivery notifications. Delivery notifications are only available through Amazon SNS.

    This action is throttled at one request per second.

    For more information about using notifications with Amazon SES, see the Amazon SES Developer Guide.

    ", + "SetIdentityNotificationTopic": "

    Given an identity (email address or domain), sets the Amazon Simple Notification Service (Amazon SNS) topic to which Amazon SES will publish bounce, complaint, and/or delivery notifications for emails sent with that identity as the Source.

    Unless feedback forwarding is enabled, you must specify Amazon SNS topics for bounce and complaint notifications. For more information, see SetIdentityFeedbackForwardingEnabled.

    This action is throttled at one request per second.

    For more information about feedback notification, see the Amazon SES Developer Guide.

    ", + "SetReceiptRulePosition": "

    Sets the position of the specified receipt rule in the receipt rule set.

    For information about managing receipt rules, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "UpdateReceiptRule": "

    Updates a receipt rule.

    For information about managing receipt rules, see the Amazon SES Developer Guide.

    This action is throttled at one request per second.

    ", + "VerifyDomainDkim": "

    Returns a set of DKIM tokens for a domain. DKIM tokens are character strings that represent your domain's identity. Using these tokens, you will need to create DNS CNAME records that point to DKIM public keys hosted by Amazon SES. Amazon Web Services will eventually detect that you have updated your DNS records; this detection process may take up to 72 hours. Upon successful detection, Amazon SES will be able to DKIM-sign email originating from that domain.

    This action is throttled at one request per second.

    To enable or disable Easy DKIM signing for a domain, use the SetIdentityDkimEnabled action.

    For more information about creating DNS records using DKIM tokens, go to the Amazon SES Developer Guide.

    ", + "VerifyDomainIdentity": "

    Verifies a domain.

    This action is throttled at one request per second.

    ", + "VerifyEmailAddress": "

    Verifies an email address. This action causes a confirmation email message to be sent to the specified address.

    The VerifyEmailAddress action is deprecated as of the May 15, 2012 release of Domain Verification. The VerifyEmailIdentity action is now preferred.

    This action is throttled at one request per second.

    ", + "VerifyEmailIdentity": "

    Verifies an email address. This action causes a confirmation email message to be sent to the specified address.

    This action is throttled at one request per second.

    " + }, + "service": "Amazon Simple Email Service

    This is the API Reference for Amazon Simple Email Service (Amazon SES). This documentation is intended to be used in conjunction with the Amazon SES Developer Guide.

    For a list of Amazon SES endpoints to use in service requests, see Regions and Amazon SES in the Amazon SES Developer Guide. ", + "shapes": { + "AddHeaderAction": { + "base": "

    When included in a receipt rule, this action adds a header to the received email.

    For information about adding a header using a receipt rule, see the Amazon SES Developer Guide.

    ", + "refs": { + "ReceiptAction$AddHeaderAction": "

    Adds a header to the received email.

    " + } + }, + "Address": { + "base": null, + "refs": { + "AddressList$member": null, + "BounceAction$Sender": "

    The email address of the sender of the bounced email. This is the address from which the bounce message will be sent.

    ", + "BouncedRecipientInfo$Recipient": "

    The email address of the recipient of the bounced email.

    ", + "DeleteVerifiedEmailAddressRequest$EmailAddress": "

    An email address to be removed from the list of verified addresses.

    ", + "RecipientDsnFields$FinalRecipient": "

    The email address to which the message was ultimately delivered. This corresponds to the Final-Recipient in the DSN. If not specified, FinalRecipient will be set to the Recipient specified in the BouncedRecipientInfo structure. Either FinalRecipient or the recipient in BouncedRecipientInfo must be a recipient of the original bounced message.

    Do not prepend the FinalRecipient email address with rfc 822;, as described in RFC 3798.", + "SendBounceRequest$BounceSender": "

    The address to use in the \"From\" header of the bounce message. This must be an identity that you have verified with Amazon SES.

    ", + "SendEmailRequest$Source": "

    The email address that is sending the email. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES. For information about verifying identities, see the Amazon SES Developer Guide.

    If you are sending on behalf of another user and have been permitted to do so by a sending authorization policy, then you must also specify the SourceArn parameter. For more information about sending authorization, see the Amazon SES Developer Guide.

    In all cases, the email address must be 7-bit ASCII. If the text must contain any other characters, then you must use MIME encoded-word syntax (RFC 2047) instead of a literal string. MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?=. For more information, see RFC 2047.

    ", + "SendEmailRequest$ReturnPath": "

    The email address to which bounces and complaints are to be forwarded when feedback forwarding is enabled. If the message cannot be delivered to the recipient, then an error message will be returned from the recipient's ISP; this message will then be forwarded to the email address specified by the ReturnPath parameter. The ReturnPath parameter is never overwritten. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES.

    ", + "SendRawEmailRequest$Source": "

    The identity's email address. If you do not provide a value for this parameter, you must specify a \"From\" address in the raw text of the message. (You can also specify both.)

    By default, the string must be 7-bit ASCII. If the text must contain any other characters, then you must use MIME encoded-word syntax (RFC 2047) instead of a literal string. MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?=. For more information, see RFC 2047.

    If you specify the Source parameter and have feedback forwarding enabled, then bounces and complaints will be sent to this email address. This takes precedence over any Return-Path header that you might include in the raw text of the message. ", + "VerifyEmailAddressRequest$EmailAddress": "

    The email address to be verified.

    ", + "VerifyEmailIdentityRequest$EmailAddress": "

    The email address to be verified.

    " + } + }, + "AddressList": { + "base": null, + "refs": { + "Destination$ToAddresses": "

    The To: field(s) of the message.

    ", + "Destination$CcAddresses": "

    The CC: field(s) of the message.

    ", + "Destination$BccAddresses": "

    The BCC: field(s) of the message.

    ", + "ListVerifiedEmailAddressesResponse$VerifiedEmailAddresses": "

    A list of email addresses that have been verified.

    ", + "SendEmailRequest$ReplyToAddresses": "

    The reply-to email address(es) for the message. If the recipient replies to the message, each reply-to address will receive the reply.

    ", + "SendRawEmailRequest$Destinations": "

    A list of destinations for the message, consisting of To:, CC:, and BCC: addresses.

    " + } + }, + "AlreadyExistsException": { + "base": "

    Indicates that a resource could not be created due to a naming conflict.

    ", + "refs": { + } + }, + "AmazonResourceName": { + "base": null, + "refs": { + "BounceAction$TopicArn": "

    The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the bounce action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

    ", + "BouncedRecipientInfo$RecipientArn": "

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to receive email for the recipient of the bounced email. For more information about sending authorization, see the Amazon SES Developer Guide.

    ", + "InvalidLambdaFunctionException$FunctionArn": null, + "InvalidSnsTopicException$Topic": null, + "LambdaAction$TopicArn": "

    The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the Lambda action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

    ", + "LambdaAction$FunctionArn": "

    The Amazon Resource Name (ARN) of the AWS Lambda function. An example of an AWS Lambda function ARN is arn:aws:lambda:us-west-2:account-id:function:MyFunction. For more information about AWS Lambda, see the AWS Lambda Developer Guide.

    ", + "S3Action$TopicArn": "

    The ARN of the Amazon SNS topic to notify when the message is saved to the Amazon S3 bucket. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

    ", + "S3Action$KmsKeyArn": "

    The customer master key that Amazon SES should use to encrypt your emails before saving them to the Amazon S3 bucket. You can use the default master key or a custom master key you created in AWS KMS as follows:

    • To use the default master key, provide an ARN in the form of arn:aws:kms:REGION:ACCOUNT-ID-WITHOUT-HYPHENS:alias/aws/ses. For example, if your AWS account ID is 123456789012 and you want to use the default master key in the US West (Oregon) region, the ARN of the default master key would be arn:aws:kms:us-west-2:123456789012:alias/aws/ses. If you use the default master key, you don't need to perform any extra steps to give Amazon SES permission to use the key.
    • To use a custom master key you created in AWS KMS, provide the ARN of the master key and ensure that you add a statement to your key's policy to give Amazon SES permission to use it. For more information about giving permissions, see the Amazon SES Developer Guide.

    For more information about key policies, see the AWS KMS Developer Guide. If you do not specify a master key, Amazon SES will not encrypt your emails.

    Your mail is encrypted by Amazon SES using the Amazon S3 encryption client before the mail is submitted to Amazon S3 for storage. It is not encrypted using Amazon S3 server-side encryption. This means that you must use the Amazon S3 encryption client to decrypt the email after retrieving it from Amazon S3, as the service has no access to use your AWS KMS keys for decryption. This encryption client is currently available with the AWS Java SDK and AWS Ruby SDK only. For more information about client-side encryption using AWS KMS master keys, see the Amazon S3 Developer Guide. ", + "SNSAction$TopicArn": "

    The Amazon Resource Name (ARN) of the Amazon SNS topic to notify. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

    ", + "SendBounceRequest$BounceSenderArn": "

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the address in the \"From\" header of the bounce. For more information about sending authorization, see the Amazon SES Developer Guide.

    ", + "SendEmailRequest$SourceArn": "

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to send for the email address specified in the Source parameter.

    For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to send from user@example.com, then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the Source to be user@example.com.

    For more information about sending authorization, see the Amazon SES Developer Guide.

    ", + "SendEmailRequest$ReturnPathArn": "

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the ReturnPath parameter.

    For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to use feedback@example.com, then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the ReturnPath to be feedback@example.com.

    For more information about sending authorization, see the Amazon SES Developer Guide.

    ", + "SendRawEmailRequest$FromArn": "

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to specify a particular \"From\" address in the header of the raw email.

    Instead of using this parameter, you can use the X-header X-SES-FROM-ARN in the raw message of the email. If you use both the FromArn parameter and the corresponding X-header, Amazon SES uses the value of the FromArn parameter.

    For information about when to use this parameter, see the description of SendRawEmail in this guide, or see the Amazon SES Developer Guide. ", + "SendRawEmailRequest$SourceArn": "

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to send for the email address specified in the Source parameter.

    For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to send from user@example.com, then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the Source to be user@example.com.

    Instead of using this parameter, you can use the X-header X-SES-SOURCE-ARN in the raw message of the email. If you use both the SourceArn parameter and the corresponding X-header, Amazon SES uses the value of the SourceArn parameter.

    For information about when to use this parameter, see the description of SendRawEmail in this guide, or see the Amazon SES Developer Guide. ", + "SendRawEmailRequest$ReturnPathArn": "

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the ReturnPath parameter.

    For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to use feedback@example.com, then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the ReturnPath to be feedback@example.com.

    Instead of using this parameter, you can use the X-header X-SES-RETURN-PATH-ARN in the raw message of the email. If you use both the ReturnPathArn parameter and the corresponding X-header, Amazon SES uses the value of the ReturnPathArn parameter.

    For information about when to use this parameter, see the description of SendRawEmail in this guide, or see the Amazon SES Developer Guide. ", + "StopAction$TopicArn": "

    The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the stop action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

    ", + "WorkmailAction$TopicArn": "

    The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the WorkMail action is called. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

    ", + "WorkmailAction$OrganizationArn": "

    The ARN of the Amazon WorkMail organization. An example of an Amazon WorkMail organization ARN is arn:aws:workmail:us-west-2:123456789012:organization/m-68755160c4cb4e29a2b2f8fb58f359d7. For information about Amazon WorkMail organizations, see the Amazon WorkMail Administrator Guide.

    " + } + }, + "ArrivalDate": { + "base": null, + "refs": { + "MessageDsn$ArrivalDate": "

    When the message was received by the reporting mail transfer agent (MTA), in RFC 822 date-time format.

    " + } + }, + "Body": { + "base": "

    Represents the body of the message. You can specify text, HTML, or both. If you use both, then the message should display correctly in the widest variety of email clients.

    ", + "refs": { + "Message$Body": "

    The message body.

    " + } + }, + "BounceAction": { + "base": "

    When included in a receipt rule, this action rejects the received email by returning a bounce response to the sender and, optionally, publishes a notification to Amazon Simple Notification Service (Amazon SNS).

    For information about sending a bounce message in response to a received email, see the Amazon SES Developer Guide.

    ", + "refs": { + "ReceiptAction$BounceAction": "

    Rejects the received email by returning a bounce response to the sender and, optionally, publishes a notification to Amazon Simple Notification Service (Amazon SNS).

    " + } + }, + "BounceMessage": { + "base": null, + "refs": { + "BounceAction$Message": "

    Human-readable text to include in the bounce message.

    " + } + }, + "BounceSmtpReplyCode": { + "base": null, + "refs": { + "BounceAction$SmtpReplyCode": "

    The SMTP reply code, as defined by RFC 5321.

    " + } + }, + "BounceStatusCode": { + "base": null, + "refs": { + "BounceAction$StatusCode": "

    The SMTP enhanced status code, as defined by RFC 3463.

    " + } + }, + "BounceType": { + "base": null, + "refs": { + "BouncedRecipientInfo$BounceType": "

    The reason for the bounce. You must provide either this parameter or RecipientDsnFields.

    " + } + }, + "BouncedRecipientInfo": { + "base": "

    Recipient-related information to include in the Delivery Status Notification (DSN) when an email that Amazon SES receives on your behalf bounces.

    For information about receiving email through Amazon SES, see the Amazon SES Developer Guide.

    ", + "refs": { + "BouncedRecipientInfoList$member": null + } + }, + "BouncedRecipientInfoList": { + "base": null, + "refs": { + "SendBounceRequest$BouncedRecipientInfoList": "

    A list of recipients of the bounced message, including the information required to create the Delivery Status Notifications (DSNs) for the recipients. You must specify at least one BouncedRecipientInfo in the list.

    " + } + }, + "CannotDeleteException": { + "base": "

    Indicates that the delete operation could not be completed.

    ", + "refs": { + } + }, + "Charset": { + "base": null, + "refs": { + "Content$Charset": "

    The character set of the content.

    " + } + }, + "Cidr": { + "base": null, + "refs": { + "ReceiptIpFilter$Cidr": "

    A single IP address or a range of IP addresses that you want to block or allow, specified in Classless Inter-Domain Routing (CIDR) notation. An example of a single email address is 10.0.0.1. An example of a range of IP addresses is 10.0.0.1/24. For more information about CIDR notation, see RFC 2317.

    " + } + }, + "CloneReceiptRuleSetRequest": { + "base": null, + "refs": { + } + }, + "CloneReceiptRuleSetResponse": { + "base": null, + "refs": { + } + }, + "Content": { + "base": "

    Represents textual data, plus an optional character set specification.

    By default, the text must be 7-bit ASCII, due to the constraints of the SMTP protocol. If the text must contain any other characters, then you must also specify a character set. Examples include UTF-8, ISO-8859-1, and Shift_JIS.

    ", + "refs": { + "Body$Text": "

    The content of the message, in text format. Use this for text-based email clients, or clients on high-latency networks (such as mobile devices).

    ", + "Body$Html": "

    The content of the message, in HTML format. Use this for email clients that can process HTML. You can include clickable links, formatted text, and much more in an HTML message.

    ", + "Message$Subject": "

    The subject of the message: A short summary of the content, which will appear in the recipient's inbox.

    " + } + }, + "Counter": { + "base": null, + "refs": { + "SendDataPoint$DeliveryAttempts": "

    Number of emails that have been enqueued for sending.

    ", + "SendDataPoint$Bounces": "

    Number of emails that have bounced.

    ", + "SendDataPoint$Complaints": "

    Number of unwanted emails that were rejected by recipients.

    ", + "SendDataPoint$Rejects": "

    Number of emails rejected by Amazon SES.

    " + } + }, + "CreateReceiptFilterRequest": { + "base": null, + "refs": { + } + }, + "CreateReceiptFilterResponse": { + "base": null, + "refs": { + } + }, + "CreateReceiptRuleRequest": { + "base": null, + "refs": { + } + }, + "CreateReceiptRuleResponse": { + "base": null, + "refs": { + } + }, + "CreateReceiptRuleSetRequest": { + "base": null, + "refs": { + } + }, + "CreateReceiptRuleSetResponse": { + "base": null, + "refs": { + } + }, + "DeleteIdentityPolicyRequest": { + "base": "

    Represents a request instructing the service to delete an authorization policy applying to an identity.

    This request succeeds regardless of whether the specified policy exists.

    ", + "refs": { + } + }, + "DeleteIdentityPolicyResponse": { + "base": "

    An empty element. Receiving this element indicates that the request completed successfully.

    ", + "refs": { + } + }, + "DeleteIdentityRequest": { + "base": "

    Represents a request instructing the service to delete an identity from the list of identities for the AWS Account.

    ", + "refs": { + } + }, + "DeleteIdentityResponse": { + "base": "

    An empty element. Receiving this element indicates that the request completed successfully.

    ", + "refs": { + } + }, + "DeleteReceiptFilterRequest": { + "base": null, + "refs": { + } + }, + "DeleteReceiptFilterResponse": { + "base": null, + "refs": { + } + }, + "DeleteReceiptRuleRequest": { + "base": null, + "refs": { + } + }, + "DeleteReceiptRuleResponse": { + "base": null, + "refs": { + } + }, + "DeleteReceiptRuleSetRequest": { + "base": null, + "refs": { + } + }, + "DeleteReceiptRuleSetResponse": { + "base": null, + "refs": { + } + }, + "DeleteVerifiedEmailAddressRequest": { + "base": "

    Represents a request instructing the service to delete an address from the list of verified email addresses.

    ", + "refs": { + } + }, + "DescribeActiveReceiptRuleSetRequest": { + "base": null, + "refs": { + } + }, + "DescribeActiveReceiptRuleSetResponse": { + "base": null, + "refs": { + } + }, + "DescribeReceiptRuleRequest": { + "base": null, + "refs": { + } + }, + "DescribeReceiptRuleResponse": { + "base": null, + "refs": { + } + }, + "DescribeReceiptRuleSetRequest": { + "base": null, + "refs": { + } + }, + "DescribeReceiptRuleSetResponse": { + "base": null, + "refs": { + } + }, + "Destination": { + "base": "

    Represents the destination of the message, consisting of To:, CC:, and BCC: fields.

    By default, the string must be 7-bit ASCII. If the text must contain any other characters, then you must use MIME encoded-word syntax (RFC 2047) instead of a literal string. MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?=. For more information, see RFC 2047.

    ", + "refs": { + "SendEmailRequest$Destination": "

    The destination for this email, composed of To:, CC:, and BCC: fields.

    " + } + }, + "DiagnosticCode": { + "base": null, + "refs": { + "RecipientDsnFields$DiagnosticCode": "

    An extended explanation of what went wrong; this is usually an SMTP response. See RFC 3463 for the correct formatting of this parameter.

    " + } + }, + "DkimAttributes": { + "base": null, + "refs": { + "GetIdentityDkimAttributesResponse$DkimAttributes": "

    The DKIM attributes for an email address or a domain.

    " + } + }, + "Domain": { + "base": null, + "refs": { + "VerifyDomainDkimRequest$Domain": "

    The name of the domain to be verified for Easy DKIM signing.

    ", + "VerifyDomainIdentityRequest$Domain": "

    The domain to be verified.

    " + } + }, + "DsnAction": { + "base": null, + "refs": { + "RecipientDsnFields$Action": "

    The action performed by the reporting mail transfer agent (MTA) as a result of its attempt to deliver the message to the recipient address. This is required by RFC 3464.

    " + } + }, + "DsnStatus": { + "base": null, + "refs": { + "RecipientDsnFields$Status": "

    The status code that indicates what went wrong. This is required by RFC 3464.

    " + } + }, + "Enabled": { + "base": null, + "refs": { + "IdentityDkimAttributes$DkimEnabled": "

    True if DKIM signing is enabled for email sent from the identity; false otherwise.

    ", + "IdentityNotificationAttributes$ForwardingEnabled": "

    Describes whether Amazon SES will forward bounce and complaint notifications as email. true indicates that Amazon SES will forward bounce and complaint notifications as email, while false indicates that bounce and complaint notifications will be published only to the specified bounce and complaint Amazon SNS topics.

    ", + "ReceiptRule$Enabled": "

    If true, the receipt rule is active. The default value is true.

    ", + "ReceiptRule$ScanEnabled": "

    If true, then messages to which this receipt rule applies are scanned for spam and viruses. The default value is true.

    ", + "SetIdentityDkimEnabledRequest$DkimEnabled": "

    Sets whether DKIM signing is enabled for an identity. Set to true to enable DKIM signing for this identity; false to disable it.

    ", + "SetIdentityFeedbackForwardingEnabledRequest$ForwardingEnabled": "

    Sets whether Amazon SES will forward bounce and complaint notifications as email. true specifies that Amazon SES will forward bounce and complaint notifications as email, in addition to any Amazon SNS topic publishing otherwise specified. false specifies that Amazon SES will publish bounce and complaint notifications only through Amazon SNS. This value can only be set to false when Amazon SNS topics are set for both Bounce and Complaint notification types.

    " + } + }, + "Explanation": { + "base": null, + "refs": { + "SendBounceRequest$Explanation": "

    Human-readable text for the bounce message to explain the failure. If not specified, the text will be auto-generated based on the bounced recipient information.

    " + } + }, + "ExtensionField": { + "base": "

    Additional X-headers to include in the Delivery Status Notification (DSN) when an email that Amazon SES receives on your behalf bounces.

    For information about receiving email through Amazon SES, see the Amazon SES Developer Guide.

    ", + "refs": { + "ExtensionFieldList$member": null + } + }, + "ExtensionFieldList": { + "base": null, + "refs": { + "MessageDsn$ExtensionFields": "

    Additional X-headers to include in the DSN.

    ", + "RecipientDsnFields$ExtensionFields": "

    Additional X-headers to include in the DSN.

    " + } + }, + "ExtensionFieldName": { + "base": null, + "refs": { + "ExtensionField$Name": "

    The name of the header to add. Must be between 1 and 50 characters, inclusive, and consist of alphanumeric (a-z, A-Z, 0-9) characters and dashes only.

    " + } + }, + "ExtensionFieldValue": { + "base": null, + "refs": { + "ExtensionField$Value": "

    The value of the header to add. Must be less than 2048 characters, and must not contain newline characters (\"\\r\" or \"\\n\").

    " + } + }, + "GetIdentityDkimAttributesRequest": { + "base": "

    Given a list of verified identities, describes their DKIM attributes. The DKIM attributes of an email address identity includes whether DKIM signing is individually enabled or disabled for that address. The DKIM attributes of a domain name identity includes whether DKIM signing is enabled, as well as the DNS records (tokens) that must remain published in the domain name's DNS.

    ", + "refs": { + } + }, + "GetIdentityDkimAttributesResponse": { + "base": "

    Represents a list of all the DKIM attributes for the specified identity.

    ", + "refs": { + } + }, + "GetIdentityNotificationAttributesRequest": { + "base": null, + "refs": { + } + }, + "GetIdentityNotificationAttributesResponse": { + "base": "

    Describes whether an identity has Amazon Simple Notification Service (Amazon SNS) topics set for bounce, complaint, and/or delivery notifications, and specifies whether feedback forwarding is enabled for bounce and complaint notifications.

    ", + "refs": { + } + }, + "GetIdentityPoliciesRequest": { + "base": "

    Represents a request instructing the service to retrieve the text of a list of authorization policies applying to an identity.

    ", + "refs": { + } + }, + "GetIdentityPoliciesResponse": { + "base": "

    Represents a map of policy names to policies returned from a successful GetIdentityPolicies request.

    ", + "refs": { + } + }, + "GetIdentityVerificationAttributesRequest": { + "base": "

    Represents a request instructing the service to provide the verification attributes for a list of identities.

    ", + "refs": { + } + }, + "GetIdentityVerificationAttributesResponse": { + "base": "

    Represents the verification attributes for a list of identities.

    ", + "refs": { + } + }, + "GetSendQuotaResponse": { + "base": "

    Represents the user's current activity limits returned from a successful GetSendQuota request.

    ", + "refs": { + } + }, + "GetSendStatisticsResponse": { + "base": "

    Represents a list of SendDataPoint items returned from a successful GetSendStatistics request. This list contains aggregated data from the previous two weeks of sending activity.

    ", + "refs": { + } + }, + "HeaderName": { + "base": null, + "refs": { + "AddHeaderAction$HeaderName": "

    The name of the header to add. Must be between 1 and 50 characters, inclusive, and consist of alphanumeric (a-z, A-Z, 0-9) characters and dashes only.

    " + } + }, + "HeaderValue": { + "base": null, + "refs": { + "AddHeaderAction$HeaderValue": "

    Must be less than 2048 characters, and must not contain newline characters (\"\\r\" or \"\\n\").

    " + } + }, + "Identity": { + "base": null, + "refs": { + "DeleteIdentityPolicyRequest$Identity": "

    The identity that is associated with the policy that you want to delete. You can specify the identity by using its name or by using its Amazon Resource Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com.

    To successfully call this API, you must own the identity.

    ", + "DeleteIdentityRequest$Identity": "

    The identity to be removed from the list of identities for the AWS Account.

    ", + "DkimAttributes$key": null, + "GetIdentityPoliciesRequest$Identity": "

    The identity for which the policies will be retrieved. You can specify an identity by using its name or by using its Amazon Resource Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com.

    To successfully call this API, you must own the identity.

    ", + "IdentityList$member": null, + "ListIdentityPoliciesRequest$Identity": "

    The identity that is associated with the policy for which the policies will be listed. You can specify an identity by using its name or by using its Amazon Resource Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com.

    To successfully call this API, you must own the identity.

    ", + "NotificationAttributes$key": null, + "PutIdentityPolicyRequest$Identity": "

    The identity to which the policy will apply. You can specify an identity by using its name or by using its Amazon Resource Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com.

    To successfully call this API, you must own the identity.

    ", + "SetIdentityDkimEnabledRequest$Identity": "

    The identity for which DKIM signing should be enabled or disabled.

    ", + "SetIdentityFeedbackForwardingEnabledRequest$Identity": "

    The identity for which to set bounce and complaint notification forwarding. Examples: user@example.com, example.com.

    ", + "SetIdentityNotificationTopicRequest$Identity": "

    The identity for which the Amazon SNS topic will be set. You can specify an identity by using its name or by using its Amazon Resource Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com.

    ", + "VerificationAttributes$key": null + } + }, + "IdentityDkimAttributes": { + "base": "

    Represents the DKIM attributes of a verified email address or a domain.

    ", + "refs": { + "DkimAttributes$value": null + } + }, + "IdentityList": { + "base": null, + "refs": { + "GetIdentityDkimAttributesRequest$Identities": "

    A list of one or more verified identities - email addresses, domains, or both.

    ", + "GetIdentityNotificationAttributesRequest$Identities": "

    A list of one or more identities. You can specify an identity by using its name or by using its Amazon Resource Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com.

    ", + "GetIdentityVerificationAttributesRequest$Identities": "

    A list of identities.

    ", + "ListIdentitiesResponse$Identities": "

    A list of identities.

    " + } + }, + "IdentityNotificationAttributes": { + "base": "

    Represents the notification attributes of an identity, including whether an identity has Amazon Simple Notification Service (Amazon SNS) topics set for bounce, complaint, and/or delivery notifications, and whether feedback forwarding is enabled for bounce and complaint notifications.

    ", + "refs": { + "NotificationAttributes$value": null + } + }, + "IdentityType": { + "base": null, + "refs": { + "ListIdentitiesRequest$IdentityType": "

    The type of the identities to list. Possible values are \"EmailAddress\" and \"Domain\". If this parameter is omitted, then all identities will be listed.

    " + } + }, + "IdentityVerificationAttributes": { + "base": "

    Represents the verification attributes of a single identity.

    ", + "refs": { + "VerificationAttributes$value": null + } + }, + "InvalidLambdaFunctionException": { + "base": "

    Indicates that the provided AWS Lambda function is invalid, or that Amazon SES could not execute the provided function, possibly due to permissions issues. For information about giving permissions, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "InvalidPolicyException": { + "base": "

    Indicates that the provided policy is invalid. Check the error stack for more information about what caused the error.

    ", + "refs": { + } + }, + "InvalidS3ConfigurationException": { + "base": "

    Indicates that the provided Amazon S3 bucket or AWS KMS encryption key is invalid, or that Amazon SES could not publish to the bucket, possibly due to permissions issues. For information about giving permissions, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "InvalidSnsTopicException": { + "base": "

    Indicates that the provided Amazon SNS topic is invalid, or that Amazon SES could not publish to the topic, possibly due to permissions issues. For information about giving permissions, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "InvocationType": { + "base": null, + "refs": { + "LambdaAction$InvocationType": "

    The invocation type of the AWS Lambda function. An invocation type of RequestResponse means that the execution of the function will immediately result in a response, and a value of Event means that the function will be invoked asynchronously. The default value is Event. For information about AWS Lambda invocation types, see the AWS Lambda Developer Guide.

    There is a 30-second timeout on RequestResponse invocations. You should use Event invocation in most cases. Use RequestResponse only when you want to make a mail flow decision, such as whether to stop the receipt rule or the receipt rule set." + } + }, + "LambdaAction": { + "base": "

    When included in a receipt rule, this action calls an AWS Lambda function and, optionally, publishes a notification to Amazon Simple Notification Service (Amazon SNS).

    To enable Amazon SES to call your AWS Lambda function or to publish to an Amazon SNS topic of another account, Amazon SES must have permission to access those resources. For information about giving permissions, see the Amazon SES Developer Guide.

    For information about using AWS Lambda actions in receipt rules, see the Amazon SES Developer Guide.

    ", + "refs": { + "ReceiptAction$LambdaAction": "

    Calls an AWS Lambda function, and optionally, publishes a notification to Amazon SNS.

    " + } + }, + "LastAttemptDate": { + "base": null, + "refs": { + "RecipientDsnFields$LastAttemptDate": "

    The time the final delivery attempt was made, in RFC 822 date-time format.

    " + } + }, + "LimitExceededException": { + "base": "

    Indicates that a resource could not be created due to service limits. For a list of Amazon SES limits, see the Amazon SES Developer Guide.

    ", + "refs": { + } + }, + "ListIdentitiesRequest": { + "base": "

    Represents a request instructing the service to list all identities for the AWS Account.

    ", + "refs": { + } + }, + "ListIdentitiesResponse": { + "base": "

    Represents a list of all verified identities for the AWS Account.

    ", + "refs": { + } + }, + "ListIdentityPoliciesRequest": { + "base": "

    Represents a request instructing the service to list all authorization policies, by name, applying to an identity.

    ", + "refs": { + } + }, + "ListIdentityPoliciesResponse": { + "base": "

    Represents a list of policy names returned from a successful ListIdentityPolicies request.

    ", + "refs": { + } + }, + "ListReceiptFiltersRequest": { + "base": null, + "refs": { + } + }, + "ListReceiptFiltersResponse": { + "base": null, + "refs": { + } + }, + "ListReceiptRuleSetsRequest": { + "base": null, + "refs": { + } + }, + "ListReceiptRuleSetsResponse": { + "base": null, + "refs": { + } + }, + "ListVerifiedEmailAddressesResponse": { + "base": "

    Represents a list of all the email addresses verified for the current user.

    ", + "refs": { + } + }, + "Max24HourSend": { + "base": null, + "refs": { + "GetSendQuotaResponse$Max24HourSend": "

    The maximum number of emails the user is allowed to send in a 24-hour interval. A value of -1 signifies an unlimited quota.

    " + } + }, + "MaxItems": { + "base": null, + "refs": { + "ListIdentitiesRequest$MaxItems": "

    The maximum number of identities per page. Possible values are 1-1000 inclusive.

    " + } + }, + "MaxSendRate": { + "base": null, + "refs": { + "GetSendQuotaResponse$MaxSendRate": "

    The maximum number of emails that Amazon SES can accept from the user's account per second.

    The rate at which Amazon SES accepts the user's messages might be less than the maximum send rate." + } + }, + "Message": { + "base": "

    Represents the message to be sent, composed of a subject and a body.

    ", + "refs": { + "SendEmailRequest$Message": "

    The message to be sent.

    " + } + }, + "MessageData": { + "base": null, + "refs": { + "Content$Data": "

    The textual data of the content.

    " + } + }, + "MessageDsn": { + "base": "

    Message-related information to include in the Delivery Status Notification (DSN) when an email that Amazon SES receives on your behalf bounces.

    For information about receiving email through Amazon SES, see the Amazon SES Developer Guide.

    ", + "refs": { + "SendBounceRequest$MessageDsn": "

    Message-related DSN fields. If not specified, Amazon SES will choose the values.

    " + } + }, + "MessageId": { + "base": null, + "refs": { + "SendBounceRequest$OriginalMessageId": "

    The message ID of the message to be bounced.

    ", + "SendBounceResponse$MessageId": "

    The message ID of the bounce message.

    ", + "SendEmailResponse$MessageId": "

    The unique message identifier returned from the SendEmail action.

    ", + "SendRawEmailResponse$MessageId": "

    The unique message identifier returned from the SendRawEmail action.

    " + } + }, + "MessageRejected": { + "base": "

    Indicates that the action failed, and the message could not be sent. Check the error stack for more information about what caused the error.

    ", + "refs": { + } + }, + "NextToken": { + "base": null, + "refs": { + "ListIdentitiesRequest$NextToken": "

    The token to use for pagination.

    ", + "ListIdentitiesResponse$NextToken": "

    The token used for pagination.

    ", + "ListReceiptRuleSetsRequest$NextToken": "

    A token returned from a previous call to ListReceiptRuleSets to indicate the position in the receipt rule set list.

    ", + "ListReceiptRuleSetsResponse$NextToken": "

    A token indicating that there are additional receipt rule sets available to be listed. Pass this token to successive calls of ListReceiptRuleSets to retrieve up to 100 receipt rule sets at a time.

    " + } + }, + "NotificationAttributes": { + "base": null, + "refs": { + "GetIdentityNotificationAttributesResponse$NotificationAttributes": "

    A map of Identity to IdentityNotificationAttributes.

    " + } + }, + "NotificationTopic": { + "base": null, + "refs": { + "IdentityNotificationAttributes$BounceTopic": "

    The Amazon Resource Name (ARN) of the Amazon SNS topic where Amazon SES will publish bounce notifications.

    ", + "IdentityNotificationAttributes$ComplaintTopic": "

    The Amazon Resource Name (ARN) of the Amazon SNS topic where Amazon SES will publish complaint notifications.

    ", + "IdentityNotificationAttributes$DeliveryTopic": "

    The Amazon Resource Name (ARN) of the Amazon SNS topic where Amazon SES will publish delivery notifications.

    ", + "SetIdentityNotificationTopicRequest$SnsTopic": "

    The Amazon Resource Name (ARN) of the Amazon SNS topic. If the parameter is omitted from the request or a null value is passed, SnsTopic is cleared and publishing is disabled.

    " + } + }, + "NotificationType": { + "base": null, + "refs": { + "SetIdentityNotificationTopicRequest$NotificationType": "

    The type of notifications that will be published to the specified Amazon SNS topic.

    " + } + }, + "Policy": { + "base": "JSON representation of a valid policy.", + "refs": { + "PolicyMap$value": null, + "PutIdentityPolicyRequest$Policy": "

    The text of the policy in JSON format. The policy cannot exceed 4 KB.

    For information about the syntax of sending authorization policies, see the Amazon SES Developer Guide.

    " + } + }, + "PolicyMap": { + "base": null, + "refs": { + "GetIdentityPoliciesResponse$Policies": "

    A map of policy names to policies.

    " + } + }, + "PolicyName": { + "base": "Name of the policy.", + "refs": { + "DeleteIdentityPolicyRequest$PolicyName": "

    The name of the policy to be deleted.

    ", + "PolicyMap$key": null, + "PolicyNameList$member": null, + "PutIdentityPolicyRequest$PolicyName": "

    The name of the policy.

    The policy name cannot exceed 64 characters and can only include alphanumeric characters, dashes, and underscores.

    " + } + }, + "PolicyNameList": { + "base": null, + "refs": { + "GetIdentityPoliciesRequest$PolicyNames": "

    A list of the names of policies to be retrieved. You can retrieve a maximum of 20 policies at a time. If you do not know the names of the policies that are attached to the identity, you can use ListIdentityPolicies.

    ", + "ListIdentityPoliciesResponse$PolicyNames": "

    A list of names of policies that apply to the specified identity.

    " + } + }, + "PutIdentityPolicyRequest": { + "base": "

    Represents a request instructing the service to apply an authorization policy to an identity.

    ", + "refs": { + } + }, + "PutIdentityPolicyResponse": { + "base": "

    An empty element. Receiving this element indicates that the request completed successfully.

    ", + "refs": { + } + }, + "RawMessage": { + "base": "

    Represents the raw data of the message.

    ", + "refs": { + "SendRawEmailRequest$RawMessage": "

    The raw text of the message. The client is responsible for ensuring the following:

    • Message must contain a header and a body, separated by a blank line.
    • All required header fields must be present.
    • Each part of a multipart MIME message must be formatted properly.
    • MIME content types must be among those supported by Amazon SES. For more information, go to the Amazon SES Developer Guide.
    • Content must be base64-encoded, if MIME requires it.

    " + } + }, + "RawMessageData": { + "base": null, + "refs": { + "RawMessage$Data": "

    The raw data of the message. The client must ensure that the message format complies with Internet email standards regarding email header fields, MIME types, MIME encoding, and base64 encoding (if necessary).

    The To:, CC:, and BCC: headers in the raw message can contain a group list.

    If you are using SendRawEmail with sending authorization, you can include X-headers in the raw message to specify the \"Source,\" \"From,\" and \"Return-Path\" addresses. For more information, see the documentation for SendRawEmail.

    Do not include these X-headers in the DKIM signature, because they are removed by Amazon SES before sending the email.

    For more information, go to the Amazon SES Developer Guide.

    " + } + }, + "ReceiptAction": { + "base": "

    An action that Amazon SES can take when it receives an email on behalf of one or more email addresses or domains that you own. An instance of this data type can represent only one action.

    For information about setting up receipt rules, see the Amazon SES Developer Guide.

    ", + "refs": { + "ReceiptActionsList$member": null + } + }, + "ReceiptActionsList": { + "base": null, + "refs": { + "ReceiptRule$Actions": "

    An ordered list of actions to perform on messages that match at least one of the recipient email addresses or domains specified in the receipt rule.

    " + } + }, + "ReceiptFilter": { + "base": "

    A receipt IP address filter enables you to specify whether to accept or reject mail originating from an IP address or range of IP addresses.

    For information about setting up IP address filters, see the Amazon SES Developer Guide.

    ", + "refs": { + "CreateReceiptFilterRequest$Filter": "

    A data structure that describes the IP address filter to create, which consists of a name, an IP address range, and whether to allow or block mail from it.

    ", + "ReceiptFilterList$member": null + } + }, + "ReceiptFilterList": { + "base": null, + "refs": { + "ListReceiptFiltersResponse$Filters": "

    A list of IP address filter data structures, which each consist of a name, an IP address range, and whether to allow or block mail from it.

    " + } + }, + "ReceiptFilterName": { + "base": null, + "refs": { + "DeleteReceiptFilterRequest$FilterName": "

    The name of the IP address filter to delete.

    ", + "ReceiptFilter$Name": "

    The name of the IP address filter. The name must:

    • Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-).
    • Start and end with a letter or number.
    • Contain less than 64 characters.
    " + } + }, + "ReceiptFilterPolicy": { + "base": null, + "refs": { + "ReceiptIpFilter$Policy": "

    Indicates whether to block or allow incoming mail from the specified IP addresses.

    " + } + }, + "ReceiptIpFilter": { + "base": "

    A receipt IP address filter enables you to specify whether to accept or reject mail originating from an IP address or range of IP addresses.

    For information about setting up IP address filters, see the Amazon SES Developer Guide.

    ", + "refs": { + "ReceiptFilter$IpFilter": "

    A structure that provides the IP addresses to block or allow, and whether to block or allow incoming mail from them.

    " + } + }, + "ReceiptRule": { + "base": "

    Receipt rules enable you to specify which actions Amazon SES should take when it receives mail on behalf of one or more email addresses or domains that you own.

    Each receipt rule defines a set of email addresses or domains to which it applies. If the email addresses or domains match at least one recipient address of the message, Amazon SES executes all of the receipt rule's actions on the message.

    For information about setting up receipt rules, see the Amazon SES Developer Guide.

    ", + "refs": { + "CreateReceiptRuleRequest$Rule": "

    A data structure that contains the specified rule's name, actions, recipients, domains, enabled status, scan status, and TLS policy.

    ", + "DescribeReceiptRuleResponse$Rule": "

    A data structure that contains the specified receipt rule's name, actions, recipients, domains, enabled status, scan status, and Transport Layer Security (TLS) policy.

    ", + "ReceiptRulesList$member": null, + "UpdateReceiptRuleRequest$Rule": "

    A data structure that contains the updated receipt rule information.

    " + } + }, + "ReceiptRuleName": { + "base": null, + "refs": { + "CreateReceiptRuleRequest$After": "

    The name of an existing rule after which the new rule will be placed. If this parameter is null, the new rule will be inserted at the beginning of the rule list.

    ", + "DeleteReceiptRuleRequest$RuleName": "

    The name of the receipt rule to delete.

    ", + "DescribeReceiptRuleRequest$RuleName": "

    The name of the receipt rule.

    ", + "ReceiptRule$Name": "

    The name of the receipt rule. The name must:

    • Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-).
    • Start and end with a letter or number.
    • Contain less than 64 characters.
    ", + "ReceiptRuleNamesList$member": null, + "SetReceiptRulePositionRequest$RuleName": "

    The name of the receipt rule to reposition.

    ", + "SetReceiptRulePositionRequest$After": "

    The name of the receipt rule after which to place the specified receipt rule.

    " + } + }, + "ReceiptRuleNamesList": { + "base": null, + "refs": { + "ReorderReceiptRuleSetRequest$RuleNames": "

    A list of the specified receipt rule set's receipt rules in the order that you want to put them.

    " + } + }, + "ReceiptRuleSetMetadata": { + "base": "

    Information about a receipt rule set.

    A receipt rule set is a collection of rules that specify what Amazon SES should do with mail it receives on behalf of your account's verified domains.

    For information about setting up receipt rule sets, see the Amazon SES Developer Guide.

    ", + "refs": { + "DescribeActiveReceiptRuleSetResponse$Metadata": "

    The metadata for the currently active receipt rule set. The metadata consists of the rule set name and a timestamp of when the rule set was created.

    ", + "DescribeReceiptRuleSetResponse$Metadata": "

    The metadata for the receipt rule set, which consists of the rule set name and the timestamp of when the rule set was created.

    ", + "ReceiptRuleSetsLists$member": null + } + }, + "ReceiptRuleSetName": { + "base": null, + "refs": { + "CloneReceiptRuleSetRequest$RuleSetName": "

    The name of the rule set to create. The name must:

    • Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-).
    • Start and end with a letter or number.
    • Contain less than 64 characters.
    ", + "CloneReceiptRuleSetRequest$OriginalRuleSetName": "

    The name of the rule set to clone.

    ", + "CreateReceiptRuleRequest$RuleSetName": "

    The name of the rule set to which to add the rule.

    ", + "CreateReceiptRuleSetRequest$RuleSetName": "

    The name of the rule set to create. The name must:

    • Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-).
    • Start and end with a letter or number.
    • Contain less than 64 characters.
    ", + "DeleteReceiptRuleRequest$RuleSetName": "

    The name of the receipt rule set that contains the receipt rule to delete.

    ", + "DeleteReceiptRuleSetRequest$RuleSetName": "

    The name of the receipt rule set to delete.

    ", + "DescribeReceiptRuleRequest$RuleSetName": "

    The name of the receipt rule set to which the receipt rule belongs.

    ", + "DescribeReceiptRuleSetRequest$RuleSetName": "

    The name of the receipt rule set to describe.

    ", + "ReceiptRuleSetMetadata$Name": "

    The name of the receipt rule set. The name must:

    • Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-).
    • Start and end with a letter or number.
    • Contain less than 64 characters.
    ", + "ReorderReceiptRuleSetRequest$RuleSetName": "

    The name of the receipt rule set to reorder.

    ", + "SetActiveReceiptRuleSetRequest$RuleSetName": "

    The name of the receipt rule set to make active. Setting this value to null disables all email receiving.

    ", + "SetReceiptRulePositionRequest$RuleSetName": "

    The name of the receipt rule set that contains the receipt rule to reposition.

    ", + "UpdateReceiptRuleRequest$RuleSetName": "

    The name of the receipt rule set to which the receipt rule belongs.

    " + } + }, + "ReceiptRuleSetsLists": { + "base": null, + "refs": { + "ListReceiptRuleSetsResponse$RuleSets": "

    The metadata for the currently active receipt rule set. The metadata consists of the rule set name and the timestamp of when the rule set was created.

    " + } + }, + "ReceiptRulesList": { + "base": null, + "refs": { + "DescribeActiveReceiptRuleSetResponse$Rules": "

    The receipt rules that belong to the active rule set.

    ", + "DescribeReceiptRuleSetResponse$Rules": "

    A list of the receipt rules that belong to the specified receipt rule set.

    " + } + }, + "Recipient": { + "base": null, + "refs": { + "RecipientsList$member": null + } + }, + "RecipientDsnFields": { + "base": "

    Recipient-related information to include in the Delivery Status Notification (DSN) when an email that Amazon SES receives on your behalf bounces.

    For information about receiving email through Amazon SES, see the Amazon SES Developer Guide.

    ", + "refs": { + "BouncedRecipientInfo$RecipientDsnFields": "

    Recipient-related DSN fields, most of which would normally be filled in automatically when provided with a BounceType. You must provide either this parameter or BounceType.

    " + } + }, + "RecipientsList": { + "base": null, + "refs": { + "ReceiptRule$Recipients": "

    The recipient domains and email addresses to which the receipt rule applies. If this field is not specified, this rule will match all recipients under all verified domains.

    " + } + }, + "RemoteMta": { + "base": null, + "refs": { + "RecipientDsnFields$RemoteMta": "

    The MTA to which the remote MTA attempted to deliver the message, formatted as specified in RFC 3464 (mta-name-type; mta-name). This parameter typically applies only to propagating synchronous bounces.

    " + } + }, + "ReorderReceiptRuleSetRequest": { + "base": null, + "refs": { + } + }, + "ReorderReceiptRuleSetResponse": { + "base": null, + "refs": { + } + }, + "ReportingMta": { + "base": null, + "refs": { + "MessageDsn$ReportingMta": "

    The reporting MTA that attempted to deliver the message, formatted as specified in RFC 3464 (mta-name-type; mta-name). The default value is dns; inbound-smtp.[region].amazonaws.com.

    " + } + }, + "RuleDoesNotExistException": { + "base": "

    Indicates that the provided receipt rule does not exist.

    ", + "refs": { + } + }, + "RuleOrRuleSetName": { + "base": null, + "refs": { + "AlreadyExistsException$Name": null, + "CannotDeleteException$Name": null, + "RuleDoesNotExistException$Name": null, + "RuleSetDoesNotExistException$Name": null + } + }, + "RuleSetDoesNotExistException": { + "base": "

    Indicates that the provided receipt rule set does not exist.

    ", + "refs": { + } + }, + "S3Action": { + "base": "

    When included in a receipt rule, this action saves the received message to an Amazon Simple Storage Service (Amazon S3) bucket and, optionally, publishes a notification to Amazon Simple Notification Service (Amazon SNS).

    To enable Amazon SES to write emails to your Amazon S3 bucket, use an AWS KMS key to encrypt your emails, or publish to an Amazon SNS topic of another account, Amazon SES must have permission to access those resources. For information about giving permissions, see the Amazon SES Developer Guide.

    When you save your emails to an Amazon S3 bucket, the maximum email size (including headers) is 30 MB. Emails larger than that will bounce.

    For information about specifying Amazon S3 actions in receipt rules, see the Amazon SES Developer Guide.

    ", + "refs": { + "ReceiptAction$S3Action": "

    Saves the received message to an Amazon Simple Storage Service (Amazon S3) bucket and, optionally, publishes a notification to Amazon SNS.

    " + } + }, + "S3BucketName": { + "base": null, + "refs": { + "InvalidS3ConfigurationException$Bucket": null, + "S3Action$BucketName": "

    The name of the Amazon S3 bucket to which to save the received email.

    " + } + }, + "S3KeyPrefix": { + "base": null, + "refs": { + "S3Action$ObjectKeyPrefix": "

    The key prefix of the Amazon S3 bucket. The key prefix is similar to a directory name that enables you to store similar data under the same directory in a bucket.

    " + } + }, + "SNSAction": { + "base": "

    When included in a receipt rule, this action publishes a notification to Amazon Simple Notification Service (Amazon SNS). This action includes a complete copy of the email content in the Amazon SNS notifications. Amazon SNS notifications for all other actions simply provide information about the email. They do not include the email content itself.

    If you own the Amazon SNS topic, you don't need to do anything to give Amazon SES permission to publish emails to it. However, if you don't own the Amazon SNS topic, you need to attach a policy to the topic to give Amazon SES permissions to access it. For information about giving permissions, see the Amazon SES Developer Guide.

    You can only publish emails that are 150 KB or less (including the header) to Amazon SNS. Larger emails will bounce. If you anticipate emails larger than 150 KB, use the S3 action instead.

    For information about using a receipt rule to publish an Amazon SNS notification, see the Amazon SES Developer Guide.

    ", + "refs": { + "ReceiptAction$SNSAction": "

    Publishes the email content within a notification to Amazon SNS.

    " + } + }, + "SendBounceRequest": { + "base": "

    Request object for sending a simple/complex bounce. It contains all of the information needed to generate a basic DSN or a fully-customized DSN.

    ", + "refs": { + } + }, + "SendBounceResponse": { + "base": null, + "refs": { + } + }, + "SendDataPoint": { + "base": "

    Represents sending statistics data. Each SendDataPoint contains statistics for a 15-minute period of sending activity.

    ", + "refs": { + "SendDataPointList$member": null + } + }, + "SendDataPointList": { + "base": null, + "refs": { + "GetSendStatisticsResponse$SendDataPoints": "

    A list of data points, each of which represents 15 minutes of activity.

    " + } + }, + "SendEmailRequest": { + "base": "

    Represents a request instructing the service to send a single email message.

    This datatype can be used in application code to compose a message consisting of source, destination, message, reply-to, and return-path parts. This object can then be sent using the SendEmail action.

    ", + "refs": { + } + }, + "SendEmailResponse": { + "base": "

    Represents a unique message ID returned from a successful SendEmail request.

    ", + "refs": { + } + }, + "SendRawEmailRequest": { + "base": "

    Represents a request instructing the service to send a raw email message.

    This datatype can be used in application code to compose a message consisting of source, destination, and raw message text. This object can then be sent using the SendRawEmail action.

    ", + "refs": { + } + }, + "SendRawEmailResponse": { + "base": "

    Represents a unique message ID returned from a successful SendRawEmail request.

    ", + "refs": { + } + }, + "SentLast24Hours": { + "base": null, + "refs": { + "GetSendQuotaResponse$SentLast24Hours": "

    The number of emails sent during the previous 24 hours.

    " + } + }, + "SetActiveReceiptRuleSetRequest": { + "base": null, + "refs": { + } + }, + "SetActiveReceiptRuleSetResponse": { + "base": null, + "refs": { + } + }, + "SetIdentityDkimEnabledRequest": { + "base": "

    Represents a request instructing the service to enable or disable DKIM signing for an identity.

    ", + "refs": { + } + }, + "SetIdentityDkimEnabledResponse": { + "base": "

    An empty element. Receiving this element indicates that the request completed successfully.

    ", + "refs": { + } + }, + "SetIdentityFeedbackForwardingEnabledRequest": { + "base": null, + "refs": { + } + }, + "SetIdentityFeedbackForwardingEnabledResponse": { + "base": "

    An empty element. Receiving this element indicates that the request completed successfully.

    ", + "refs": { + } + }, + "SetIdentityNotificationTopicRequest": { + "base": "

    Represents a request to set or clear an identity's notification topic.

    ", + "refs": { + } + }, + "SetIdentityNotificationTopicResponse": { + "base": "

    An empty element. Receiving this element indicates that the request completed successfully.

    ", + "refs": { + } + }, + "SetReceiptRulePositionRequest": { + "base": null, + "refs": { + } + }, + "SetReceiptRulePositionResponse": { + "base": null, + "refs": { + } + }, + "StopAction": { + "base": "

    When included in a receipt rule, this action terminates the evaluation of the receipt rule set and, optionally, publishes a notification to Amazon Simple Notification Service (Amazon SNS).

    For information about setting a stop action in a receipt rule, see the Amazon SES Developer Guide.

    ", + "refs": { + "ReceiptAction$StopAction": "

    Terminates the evaluation of the receipt rule set and optionally publishes a notification to Amazon SNS.

    " + } + }, + "StopScope": { + "base": null, + "refs": { + "StopAction$Scope": "

    The scope to which the Stop action applies. That is, what is being stopped.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "ReceiptRuleSetMetadata$CreatedTimestamp": "

    The date and time the receipt rule set was created.

    ", + "SendDataPoint$Timestamp": "

    Time of the data point.

    " + } + }, + "TlsPolicy": { + "base": null, + "refs": { + "ReceiptRule$TlsPolicy": "

    Specifies whether Amazon SES should require that incoming email is delivered over a connection encrypted with Transport Layer Security (TLS). If this parameter is set to Require, Amazon SES will bounce emails that are not received over TLS. The default is Optional.

    " + } + }, + "UpdateReceiptRuleRequest": { + "base": null, + "refs": { + } + }, + "UpdateReceiptRuleResponse": { + "base": null, + "refs": { + } + }, + "VerificationAttributes": { + "base": null, + "refs": { + "GetIdentityVerificationAttributesResponse$VerificationAttributes": "

    A map of Identities to IdentityVerificationAttributes objects.

    " + } + }, + "VerificationStatus": { + "base": null, + "refs": { + "IdentityDkimAttributes$DkimVerificationStatus": "

    Describes whether Amazon SES has successfully verified the DKIM DNS records (tokens) published in the domain name's DNS. (This only applies to domain identities, not email address identities.)

    ", + "IdentityVerificationAttributes$VerificationStatus": "

    The verification status of the identity: \"Pending\", \"Success\", \"Failed\", or \"TemporaryFailure\".

    " + } + }, + "VerificationToken": { + "base": null, + "refs": { + "IdentityVerificationAttributes$VerificationToken": "

    The verification token for a domain identity. Null for email address identities.

    ", + "VerificationTokenList$member": null, + "VerifyDomainIdentityResponse$VerificationToken": "

    A TXT record that must be placed in the DNS settings for the domain, in order to complete domain verification.

    " + } + }, + "VerificationTokenList": { + "base": null, + "refs": { + "IdentityDkimAttributes$DkimTokens": "

    A set of character strings that represent the domain's identity. Using these tokens, you will need to create DNS CNAME records that point to DKIM public keys hosted by Amazon SES. Amazon Web Services will eventually detect that you have updated your DNS records; this detection process may take up to 72 hours. Upon successful detection, Amazon SES will be able to DKIM-sign email originating from that domain. (This only applies to domain identities, not email address identities.)

    For more information about creating DNS records using DKIM tokens, go to the Amazon SES Developer Guide.

    ", + "VerifyDomainDkimResponse$DkimTokens": "

    A set of character strings that represent the domain's identity. If the identity is an email address, the tokens represent the domain of that address.

    Using these tokens, you will need to create DNS CNAME records that point to DKIM public keys hosted by Amazon SES. Amazon Web Services will eventually detect that you have updated your DNS records; this detection process may take up to 72 hours. Upon successful detection, Amazon SES will be able to DKIM-sign emails originating from that domain.

    For more information about creating DNS records using DKIM tokens, go to the Amazon SES Developer Guide.

    " + } + }, + "VerifyDomainDkimRequest": { + "base": "

    Represents a request instructing the service to begin DKIM verification for a domain.

    ", + "refs": { + } + }, + "VerifyDomainDkimResponse": { + "base": "

    Represents the DNS records that must be published in the domain name's DNS to complete DKIM setup.

    ", + "refs": { + } + }, + "VerifyDomainIdentityRequest": { + "base": "

    Represents a request instructing the service to begin domain verification.

    ", + "refs": { + } + }, + "VerifyDomainIdentityResponse": { + "base": "

    Represents a token used for domain ownership verification.

    ", + "refs": { + } + }, + "VerifyEmailAddressRequest": { + "base": "

    Represents a request instructing the service to begin email address verification.

    ", + "refs": { + } + }, + "VerifyEmailIdentityRequest": { + "base": "

    Represents a request instructing the service to begin email address verification.

    ", + "refs": { + } + }, + "VerifyEmailIdentityResponse": { + "base": "

    An empty element. Receiving this element indicates that the request completed successfully.

    ", + "refs": { + } + }, + "WorkmailAction": { + "base": "

    When included in a receipt rule, this action calls Amazon WorkMail and, optionally, publishes a notification to Amazon Simple Notification Service (Amazon SNS). You will typically not use this action directly because Amazon WorkMail adds the rule automatically during its setup procedure.

    For information using a receipt rule to call Amazon WorkMail, see the Amazon SES Developer Guide.

    ", + "refs": { + "ReceiptAction$WorkmailAction": "

    Calls Amazon WorkMail and, optionally, publishes a notification to Amazon SNS.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,13 @@ +{ + "pagination": { + "ListIdentities": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxItems", + "result_key": "Identities" + }, + "ListVerifiedEmailAddresses": { + "result_key": "VerifiedEmailAddresses" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/waiters-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/waiters-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/waiters-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/email/2010-12-01/waiters-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,18 @@ +{ + "version": 2, + "waiters": { + "IdentityExists": { + "delay": 3, + "operation": "GetIdentityVerificationAttributes", + "maxAttempts": 20, + "acceptors": [ + { + "expected": "Success", + "matcher": "pathAll", + "state": "success", + "argument": "VerificationAttributes.*.VerificationStatus" + } + ] + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/es/2015-01-01/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/es/2015-01-01/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/es/2015-01-01/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/es/2015-01-01/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,764 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-01-01", + "endpointPrefix":"es", + "serviceFullName":"Amazon Elasticsearch Service", + "signatureVersion":"v4", + "protocol":"rest-json" + }, + "operations":{ + "AddTags":{ + "name":"AddTags", + "http":{ + "method":"POST", + "requestUri":"/2015-01-01/tags" + }, + "input":{"shape":"AddTagsRequest"}, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "CreateElasticsearchDomain":{ + "name":"CreateElasticsearchDomain", + "http":{ + "method":"POST", + "requestUri":"/2015-01-01/es/domain" + }, + "input":{"shape":"CreateElasticsearchDomainRequest"}, + "output":{"shape":"CreateElasticsearchDomainResponse"}, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"DisabledOperationException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"InvalidTypeException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ResourceAlreadyExistsException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "DeleteElasticsearchDomain":{ + "name":"DeleteElasticsearchDomain", + "http":{ + "method":"DELETE", + "requestUri":"/2015-01-01/es/domain/{DomainName}" + }, + "input":{"shape":"DeleteElasticsearchDomainRequest"}, + "output":{"shape":"DeleteElasticsearchDomainResponse"}, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "DescribeElasticsearchDomain":{ + "name":"DescribeElasticsearchDomain", + "http":{ + "method":"GET", + "requestUri":"/2015-01-01/es/domain/{DomainName}" + }, + "input":{"shape":"DescribeElasticsearchDomainRequest"}, + "output":{"shape":"DescribeElasticsearchDomainResponse"}, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "DescribeElasticsearchDomainConfig":{ + "name":"DescribeElasticsearchDomainConfig", + "http":{ + "method":"GET", + "requestUri":"/2015-01-01/es/domain/{DomainName}/config" + }, + "input":{"shape":"DescribeElasticsearchDomainConfigRequest"}, + "output":{"shape":"DescribeElasticsearchDomainConfigResponse"}, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "DescribeElasticsearchDomains":{ + "name":"DescribeElasticsearchDomains", + "http":{ + "method":"POST", + "requestUri":"/2015-01-01/es/domain-info" + }, + "input":{"shape":"DescribeElasticsearchDomainsRequest"}, + "output":{"shape":"DescribeElasticsearchDomainsResponse"}, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "ListDomainNames":{ + "name":"ListDomainNames", + "http":{ + "method":"GET", + "requestUri":"/2015-01-01/domain" + }, + "output":{"shape":"ListDomainNamesResponse"}, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "ListTags":{ + "name":"ListTags", + "http":{ + "method":"GET", + "requestUri":"/2015-01-01/tags/" + }, + "input":{"shape":"ListTagsRequest"}, + "output":{"shape":"ListTagsResponse"}, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "RemoveTags":{ + "name":"RemoveTags", + "http":{ + "method":"POST", + "requestUri":"/2015-01-01/tags-removal" + }, + "input":{"shape":"RemoveTagsRequest"}, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "UpdateElasticsearchDomainConfig":{ + "name":"UpdateElasticsearchDomainConfig", + "http":{ + "method":"POST", + "requestUri":"/2015-01-01/es/domain/{DomainName}/config" + }, + "input":{"shape":"UpdateElasticsearchDomainConfigRequest"}, + "output":{"shape":"UpdateElasticsearchDomainConfigResponse"}, + "errors":[ + { + "shape":"BaseException", + "exception":true + }, + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"InvalidTypeException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ValidationException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + } + }, + "shapes":{ + "ARN":{"type":"string"}, + "AccessPoliciesStatus":{ + "type":"structure", + "required":[ + "Options", + "Status" + ], + "members":{ + "Options":{"shape":"PolicyDocument"}, + "Status":{"shape":"OptionStatus"} + } + }, + "AddTagsRequest":{ + "type":"structure", + "required":[ + "ARN", + "TagList" + ], + "members":{ + "ARN":{"shape":"ARN"}, + "TagList":{"shape":"TagList"} + } + }, + "AdvancedOptions":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "AdvancedOptionsStatus":{ + "type":"structure", + "required":[ + "Options", + "Status" + ], + "members":{ + "Options":{"shape":"AdvancedOptions"}, + "Status":{"shape":"OptionStatus"} + } + }, + "BaseException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "Boolean":{"type":"boolean"}, + "CreateElasticsearchDomainRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"}, + "ElasticsearchClusterConfig":{"shape":"ElasticsearchClusterConfig"}, + "EBSOptions":{"shape":"EBSOptions"}, + "AccessPolicies":{"shape":"PolicyDocument"}, + "SnapshotOptions":{"shape":"SnapshotOptions"}, + "AdvancedOptions":{"shape":"AdvancedOptions"} + } + }, + "CreateElasticsearchDomainResponse":{ + "type":"structure", + "members":{ + "DomainStatus":{"shape":"ElasticsearchDomainStatus"} + } + }, + "DeleteElasticsearchDomainRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"DomainName", + "location":"uri", + "locationName":"DomainName" + } + } + }, + "DeleteElasticsearchDomainResponse":{ + "type":"structure", + "members":{ + "DomainStatus":{"shape":"ElasticsearchDomainStatus"} + } + }, + "DescribeElasticsearchDomainConfigRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"DomainName", + "location":"uri", + "locationName":"DomainName" + } + } + }, + "DescribeElasticsearchDomainConfigResponse":{ + "type":"structure", + "required":["DomainConfig"], + "members":{ + "DomainConfig":{"shape":"ElasticsearchDomainConfig"} + } + }, + "DescribeElasticsearchDomainRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"DomainName", + "location":"uri", + "locationName":"DomainName" + } + } + }, + "DescribeElasticsearchDomainResponse":{ + "type":"structure", + "required":["DomainStatus"], + "members":{ + "DomainStatus":{"shape":"ElasticsearchDomainStatus"} + } + }, + "DescribeElasticsearchDomainsRequest":{ + "type":"structure", + "required":["DomainNames"], + "members":{ + "DomainNames":{"shape":"DomainNameList"} + } + }, + "DescribeElasticsearchDomainsResponse":{ + "type":"structure", + "required":["DomainStatusList"], + "members":{ + "DomainStatusList":{"shape":"ElasticsearchDomainStatusList"} + } + }, + "DisabledOperationException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "DomainId":{ + "type":"string", + "min":1, + "max":64 + }, + "DomainInfo":{ + "type":"structure", + "members":{ + "DomainName":{"shape":"DomainName"} + } + }, + "DomainInfoList":{ + "type":"list", + "member":{"shape":"DomainInfo"} + }, + "DomainName":{ + "type":"string", + "min":3, + "max":28, + "pattern":"[a-z][a-z0-9\\-]+" + }, + "DomainNameList":{ + "type":"list", + "member":{"shape":"DomainName"} + }, + "EBSOptions":{ + "type":"structure", + "members":{ + "EBSEnabled":{"shape":"Boolean"}, + "VolumeType":{"shape":"VolumeType"}, + "VolumeSize":{"shape":"IntegerClass"}, + "Iops":{"shape":"IntegerClass"} + } + }, + "EBSOptionsStatus":{ + "type":"structure", + "required":[ + "Options", + "Status" + ], + "members":{ + "Options":{"shape":"EBSOptions"}, + "Status":{"shape":"OptionStatus"} + } + }, + "ESPartitionInstanceType":{ + "type":"string", + "enum":[ + "m3.medium.elasticsearch", + "m3.large.elasticsearch", + "m3.xlarge.elasticsearch", + "m3.2xlarge.elasticsearch", + "t2.micro.elasticsearch", + "t2.small.elasticsearch", + "t2.medium.elasticsearch", + "r3.large.elasticsearch", + "r3.xlarge.elasticsearch", + "r3.2xlarge.elasticsearch", + "r3.4xlarge.elasticsearch", + "r3.8xlarge.elasticsearch", + "i2.xlarge.elasticsearch", + "i2.2xlarge.elasticsearch" + ] + }, + "ElasticsearchClusterConfig":{ + "type":"structure", + "members":{ + "InstanceType":{"shape":"ESPartitionInstanceType"}, + "InstanceCount":{"shape":"IntegerClass"}, + "DedicatedMasterEnabled":{"shape":"Boolean"}, + "ZoneAwarenessEnabled":{"shape":"Boolean"}, + "DedicatedMasterType":{"shape":"ESPartitionInstanceType"}, + "DedicatedMasterCount":{"shape":"IntegerClass"} + } + }, + "ElasticsearchClusterConfigStatus":{ + "type":"structure", + "required":[ + "Options", + "Status" + ], + "members":{ + "Options":{"shape":"ElasticsearchClusterConfig"}, + "Status":{"shape":"OptionStatus"} + } + }, + "ElasticsearchDomainConfig":{ + "type":"structure", + "members":{ + "ElasticsearchClusterConfig":{"shape":"ElasticsearchClusterConfigStatus"}, + "EBSOptions":{"shape":"EBSOptionsStatus"}, + "AccessPolicies":{"shape":"AccessPoliciesStatus"}, + "SnapshotOptions":{"shape":"SnapshotOptionsStatus"}, + "AdvancedOptions":{"shape":"AdvancedOptionsStatus"} + } + }, + "ElasticsearchDomainStatus":{ + "type":"structure", + "required":[ + "DomainId", + "DomainName", + "ARN", + "ElasticsearchClusterConfig" + ], + "members":{ + "DomainId":{"shape":"DomainId"}, + "DomainName":{"shape":"DomainName"}, + "ARN":{"shape":"ARN"}, + "Created":{"shape":"Boolean"}, + "Deleted":{"shape":"Boolean"}, + "Endpoint":{"shape":"ServiceUrl"}, + "Processing":{"shape":"Boolean"}, + "ElasticsearchClusterConfig":{"shape":"ElasticsearchClusterConfig"}, + "EBSOptions":{"shape":"EBSOptions"}, + "AccessPolicies":{"shape":"PolicyDocument"}, + "SnapshotOptions":{"shape":"SnapshotOptions"}, + "AdvancedOptions":{"shape":"AdvancedOptions"} + } + }, + "ElasticsearchDomainStatusList":{ + "type":"list", + "member":{"shape":"ElasticsearchDomainStatus"} + }, + "ErrorMessage":{"type":"string"}, + "IntegerClass":{"type":"integer"}, + "InternalException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":500}, + "exception":true + }, + "InvalidTypeException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "ListDomainNamesResponse":{ + "type":"structure", + "members":{ + "DomainNames":{"shape":"DomainInfoList"} + } + }, + "ListTagsRequest":{ + "type":"structure", + "required":["ARN"], + "members":{ + "ARN":{ + "shape":"ARN", + "location":"querystring", + "locationName":"arn" + } + } + }, + "ListTagsResponse":{ + "type":"structure", + "members":{ + "TagList":{"shape":"TagList"} + } + }, + "OptionState":{ + "type":"string", + "enum":[ + "RequiresIndexDocuments", + "Processing", + "Active" + ] + }, + "OptionStatus":{ + "type":"structure", + "required":[ + "CreationDate", + "UpdateDate", + "State" + ], + "members":{ + "CreationDate":{"shape":"UpdateTimestamp"}, + "UpdateDate":{"shape":"UpdateTimestamp"}, + "UpdateVersion":{"shape":"UIntValue"}, + "State":{"shape":"OptionState"}, + "PendingDeletion":{"shape":"Boolean"} + } + }, + "PolicyDocument":{"type":"string"}, + "RemoveTagsRequest":{ + "type":"structure", + "required":[ + "ARN", + "TagKeys" + ], + "members":{ + "ARN":{"shape":"ARN"}, + "TagKeys":{"shape":"StringList"} + } + }, + "ResourceAlreadyExistsException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "ServiceUrl":{"type":"string"}, + "SnapshotOptions":{ + "type":"structure", + "members":{ + "AutomatedSnapshotStartHour":{"shape":"IntegerClass"} + } + }, + "SnapshotOptionsStatus":{ + "type":"structure", + "required":[ + "Options", + "Status" + ], + "members":{ + "Options":{"shape":"SnapshotOptions"}, + "Status":{"shape":"OptionStatus"} + } + }, + "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{ + "type":"string", + "min":1, + "max":128 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "TagValue":{ + "type":"string", + "min":0, + "max":256 + }, + "UIntValue":{ + "type":"integer", + "min":0 + }, + "UpdateElasticsearchDomainConfigRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"DomainName", + "location":"uri", + "locationName":"DomainName" + }, + "ElasticsearchClusterConfig":{"shape":"ElasticsearchClusterConfig"}, + "EBSOptions":{"shape":"EBSOptions"}, + "SnapshotOptions":{"shape":"SnapshotOptions"}, + "AdvancedOptions":{"shape":"AdvancedOptions"}, + "AccessPolicies":{"shape":"PolicyDocument"} + } + }, + "UpdateElasticsearchDomainConfigResponse":{ + "type":"structure", + "required":["DomainConfig"], + "members":{ + "DomainConfig":{"shape":"ElasticsearchDomainConfig"} + } + }, + "UpdateTimestamp":{"type":"timestamp"}, + "ValidationException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "VolumeType":{ + "type":"string", + "enum":[ + "standard", + "gp2", + "io1" + ] + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/es/2015-01-01/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/es/2015-01-01/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/es/2015-01-01/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/es/2015-01-01/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,401 @@ +{ + "version": "2.0", + "operations": { + "AddTags": "

    Attaches tags to an existing Elasticsearch domain. Tags are a set of case-sensitive key value pairs. An Elasticsearch domain may have up to 10 tags. See Tagging Amazon Elasticsearch Service Domains for more information.

    ", + "CreateElasticsearchDomain": "

    Creates a new Elasticsearch domain. For more information, see Creating Elasticsearch Domains in the Amazon Elasticsearch Service Developer Guide.

    ", + "DeleteElasticsearchDomain": "

    Permanently deletes the specified Elasticsearch domain and all of its data. Once a domain is deleted, it cannot be recovered.

    ", + "DescribeElasticsearchDomain": "

    Returns domain configuration information about the specified Elasticsearch domain, including the domain ID, domain endpoint, and domain ARN.

    ", + "DescribeElasticsearchDomainConfig": "

    Provides cluster configuration information about the specified Elasticsearch domain, such as the state, creation date, update version, and update date for cluster options.

    ", + "DescribeElasticsearchDomains": "

    Returns domain configuration information about the specified Elasticsearch domains, including the domain ID, domain endpoint, and domain ARN.

    ", + "ListDomainNames": "

    Returns the name of all Elasticsearch domains owned by the current user's account.

    ", + "ListTags": "

    Returns all tags for the given Elasticsearch domain.

    ", + "RemoveTags": "

    Removes the specified set of tags from the specified Elasticsearch domain.

    ", + "UpdateElasticsearchDomainConfig": "

    Modifies the cluster configuration of the specified Elasticsearch domain, setting as setting the instance type and the number of instances.

    " + }, + "service": "Amazon Elasticsearch Configuration Service

    Use the Amazon Elasticsearch configuration API to create, configure, and manage Elasticsearch domains.

    The endpoint for configuration service requests is region-specific: es.region.amazonaws.com. For example, es.us-east-1.amazonaws.com. For a current list of supported regions and endpoints, see Regions and Endpoints.

    ", + "shapes": { + "ARN": { + "base": "

    The Amazon Resource Name (ARN) of the Elasticsearch domain. See Identifiers for IAM Entities in Using AWS Identity and Access Management for more information.

    ", + "refs": { + "AddTagsRequest$ARN": "

    Specify the ARN for which you want to add the tags.

    ", + "ElasticsearchDomainStatus$ARN": "

    The Amazon resource name (ARN) of an Elasticsearch domain. See Identifiers for IAM Entities in Using AWS Identity and Access Management for more information.

    ", + "ListTagsRequest$ARN": "

    Specify the ARN for the Elasticsearch domain to which the tags are attached that you want to view.

    ", + "RemoveTagsRequest$ARN": "

    Specifies the ARN for the Elasticsearch domain from which you want to delete the specified tags.

    " + } + }, + "AccessPoliciesStatus": { + "base": "

    The configured access rules for the domain's document and search endpoints, and the current status of those rules.

    ", + "refs": { + "ElasticsearchDomainConfig$AccessPolicies": "

    IAM access policy as a JSON-formatted string.

    " + } + }, + "AddTagsRequest": { + "base": "

    Container for the parameters to the AddTags operation. Specify the tags that you want to attach to the Elasticsearch domain.

    ", + "refs": { + } + }, + "AdvancedOptions": { + "base": "

    Exposes select native Elasticsearch configuration values from elasticsearch.yml. Currently, the following advanced options are available:

    • Option to allow references to indices in an HTTP request body. Must be false when configuring access to individual sub-resources. By default, the value is true. See Configuration Advanced Options for more information.
    • Option to specify the percentage of heap space that is allocated to field data. By default, this setting is unbounded.

    For more information, see Configuring Advanced Options.

    ", + "refs": { + "AdvancedOptionsStatus$Options": "

    Specifies the status of advanced options for the specified Elasticsearch domain.

    ", + "CreateElasticsearchDomainRequest$AdvancedOptions": "

    Option to allow references to indices in an HTTP request body. Must be false when configuring access to individual sub-resources. By default, the value is true. See Configuration Advanced Options for more information.

    ", + "ElasticsearchDomainStatus$AdvancedOptions": "

    Specifies the status of the AdvancedOptions

    ", + "UpdateElasticsearchDomainConfigRequest$AdvancedOptions": "

    Modifies the advanced option to allow references to indices in an HTTP request body. Must be false when configuring access to individual sub-resources. By default, the value is true. See Configuration Advanced Options for more information.

    " + } + }, + "AdvancedOptionsStatus": { + "base": "

    Status of the advanced options for the specified Elasticsearch domain. Currently, the following advanced options are available:

    • Option to allow references to indices in an HTTP request body. Must be false when configuring access to individual sub-resources. By default, the value is true. See Configuration Advanced Options for more information.
    • Option to specify the percentage of heap space that is allocated to field data. By default, this setting is unbounded.

    For more information, see Configuring Advanced Options.

    ", + "refs": { + "ElasticsearchDomainConfig$AdvancedOptions": "

    Specifies the AdvancedOptions for the domain. See Configuring Advanced Options for more information.

    " + } + }, + "BaseException": { + "base": "

    An error occurred while processing the request.

    ", + "refs": { + } + }, + "Boolean": { + "base": null, + "refs": { + "EBSOptions$EBSEnabled": "

    Specifies whether EBS-based storage is enabled.

    ", + "ElasticsearchClusterConfig$DedicatedMasterEnabled": "

    A boolean value to indicate whether a dedicated master node is enabled. See About Dedicated Master Nodes for more information.

    ", + "ElasticsearchClusterConfig$ZoneAwarenessEnabled": "

    A boolean value to indicate whether zone awareness is enabled. See About Zone Awareness for more information.

    ", + "ElasticsearchDomainStatus$Created": "

    The domain creation status. True if the creation of an Elasticsearch domain is complete. False if domain creation is still in progress.

    ", + "ElasticsearchDomainStatus$Deleted": "

    The domain deletion status. True if a delete request has been received for the domain but resource cleanup is still in progress. False if the domain has not been deleted. Once domain deletion is complete, the status of the domain is no longer returned.

    ", + "ElasticsearchDomainStatus$Processing": "

    The status of the Elasticsearch domain configuration. True if Amazon Elasticsearch Service is processing configuration changes. False if the configuration is active.

    ", + "OptionStatus$PendingDeletion": "

    Indicates whether the Elasticsearch domain is being deleted.

    " + } + }, + "CreateElasticsearchDomainRequest": { + "base": null, + "refs": { + } + }, + "CreateElasticsearchDomainResponse": { + "base": "

    The result of a CreateElasticsearchDomain operation. Contains the status of the newly created Elasticsearch domain.

    ", + "refs": { + } + }, + "DeleteElasticsearchDomainRequest": { + "base": "

    Container for the parameters to the DeleteElasticsearchDomain operation. Specifies the name of the Elasticsearch domain that you want to delete.

    ", + "refs": { + } + }, + "DeleteElasticsearchDomainResponse": { + "base": "

    The result of a DeleteElasticsearchDomain request. Contains the status of the pending deletion, or no status if the domain and all of its resources have been deleted.

    ", + "refs": { + } + }, + "DescribeElasticsearchDomainConfigRequest": { + "base": "

    Container for the parameters to the DescribeElasticsearchDomainConfig operation. Specifies the domain name for which you want configuration information.

    ", + "refs": { + } + }, + "DescribeElasticsearchDomainConfigResponse": { + "base": "

    The result of a DescribeElasticsearchDomainConfig request. Contains the configuration information of the requested domain.

    ", + "refs": { + } + }, + "DescribeElasticsearchDomainRequest": { + "base": "

    Container for the parameters to the DescribeElasticsearchDomain operation.

    ", + "refs": { + } + }, + "DescribeElasticsearchDomainResponse": { + "base": "

    The result of a DescribeElasticsearchDomain request. Contains the status of the domain specified in the request.

    ", + "refs": { + } + }, + "DescribeElasticsearchDomainsRequest": { + "base": "

    Container for the parameters to the DescribeElasticsearchDomains operation. By default, the API returns the status of all Elasticsearch domains.

    ", + "refs": { + } + }, + "DescribeElasticsearchDomainsResponse": { + "base": "

    The result of a DescribeElasticsearchDomains request. Contains the status of the specified domains or all domains owned by the account.

    ", + "refs": { + } + }, + "DisabledOperationException": { + "base": "

    An error occured because the client wanted to access a not supported operation. Gives http status code of 409.

    ", + "refs": { + } + }, + "DomainId": { + "base": "

    Unique identifier for an Elasticsearch domain.

    ", + "refs": { + "ElasticsearchDomainStatus$DomainId": "

    The unique identifier for the specified Elasticsearch domain.

    " + } + }, + "DomainInfo": { + "base": null, + "refs": { + "DomainInfoList$member": null + } + }, + "DomainInfoList": { + "base": "

    Contains the list of Elasticsearch domain information.

    ", + "refs": { + "ListDomainNamesResponse$DomainNames": "

    List of Elasticsearch domain names.

    " + } + }, + "DomainName": { + "base": "

    The name of an Elasticsearch domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen).

    ", + "refs": { + "CreateElasticsearchDomainRequest$DomainName": "

    The name of the Elasticsearch domain that you are creating. Domain names are unique across the domains owned by an account within an AWS region. Domain names must start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen).

    ", + "DeleteElasticsearchDomainRequest$DomainName": "

    The name of the Elasticsearch domain that you want to permanently delete.

    ", + "DescribeElasticsearchDomainConfigRequest$DomainName": "

    The Elasticsearch domain that you want to get information about.

    ", + "DescribeElasticsearchDomainRequest$DomainName": "

    The name of the Elasticsearch domain for which you want information.

    ", + "DomainInfo$DomainName": "

    Specifies the DomainName.

    ", + "DomainNameList$member": null, + "ElasticsearchDomainStatus$DomainName": "

    The name of an Elasticsearch domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen).

    ", + "UpdateElasticsearchDomainConfigRequest$DomainName": "

    The name of the Elasticsearch domain that you are updating.

    " + } + }, + "DomainNameList": { + "base": "

    A list of Elasticsearch domain names.

    ", + "refs": { + "DescribeElasticsearchDomainsRequest$DomainNames": "

    The Elasticsearch domains for which you want information.

    " + } + }, + "EBSOptions": { + "base": "

    Options to enable, disable, and specify the properties of EBS storage volumes. For more information, see Configuring EBS-based Storage.

    ", + "refs": { + "CreateElasticsearchDomainRequest$EBSOptions": "

    Options to enable, disable and specify the type and size of EBS storage volumes.

    ", + "EBSOptionsStatus$Options": "

    Specifies the EBS options for the specified Elasticsearch domain.

    ", + "ElasticsearchDomainStatus$EBSOptions": "

    The EBSOptions for the specified domain. See Configuring EBS-based Storage for more information.

    ", + "UpdateElasticsearchDomainConfigRequest$EBSOptions": "

    Specify the type and size of the EBS volume that you want to use.

    " + } + }, + "EBSOptionsStatus": { + "base": "

    Status of the EBS options for the specified Elasticsearch domain.

    ", + "refs": { + "ElasticsearchDomainConfig$EBSOptions": "

    Specifies the EBSOptions for the Elasticsearch domain.

    " + } + }, + "ESPartitionInstanceType": { + "base": null, + "refs": { + "ElasticsearchClusterConfig$InstanceType": "

    The instance type for an Elasticsearch cluster.

    ", + "ElasticsearchClusterConfig$DedicatedMasterType": "

    The instance type for a dedicated master node.

    " + } + }, + "ElasticsearchClusterConfig": { + "base": "

    Specifies the configuration for the domain cluster, such as the type and number of instances.

    ", + "refs": { + "CreateElasticsearchDomainRequest$ElasticsearchClusterConfig": "

    Configuration options for an Elasticsearch domain. Specifies the instance type and number of instances in the domain cluster.

    ", + "ElasticsearchClusterConfigStatus$Options": "

    Specifies the cluster configuration for the specified Elasticsearch domain.

    ", + "ElasticsearchDomainStatus$ElasticsearchClusterConfig": "

    The type and number of instances in the domain cluster.

    ", + "UpdateElasticsearchDomainConfigRequest$ElasticsearchClusterConfig": "

    The type and number of instances to instantiate for the domain cluster.

    " + } + }, + "ElasticsearchClusterConfigStatus": { + "base": "

    Specifies the configuration status for the specified Elasticsearch domain.

    ", + "refs": { + "ElasticsearchDomainConfig$ElasticsearchClusterConfig": "

    Specifies the ElasticsearchClusterConfig for the Elasticsearch domain.

    " + } + }, + "ElasticsearchDomainConfig": { + "base": "

    The configuration of an Elasticsearch domain.

    ", + "refs": { + "DescribeElasticsearchDomainConfigResponse$DomainConfig": "

    The configuration information of the domain requested in the DescribeElasticsearchDomainConfig request.

    ", + "UpdateElasticsearchDomainConfigResponse$DomainConfig": "

    The status of the updated Elasticsearch domain.

    " + } + }, + "ElasticsearchDomainStatus": { + "base": "

    The current status of an Elasticsearch domain.

    ", + "refs": { + "CreateElasticsearchDomainResponse$DomainStatus": "

    The status of the newly created Elasticsearch domain.

    ", + "DeleteElasticsearchDomainResponse$DomainStatus": "

    The status of the Elasticsearch domain being deleted.

    ", + "DescribeElasticsearchDomainResponse$DomainStatus": "

    The current status of the Elasticsearch domain.

    ", + "ElasticsearchDomainStatusList$member": null + } + }, + "ElasticsearchDomainStatusList": { + "base": "

    A list that contains the status of each requested Elasticsearch domain.

    ", + "refs": { + "DescribeElasticsearchDomainsResponse$DomainStatusList": "

    The status of the domains requested in the DescribeElasticsearchDomains request.

    " + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "BaseException$message": "

    A description of the error.

    " + } + }, + "IntegerClass": { + "base": null, + "refs": { + "EBSOptions$VolumeSize": "

    Integer to specify the size of an EBS volume.

    ", + "EBSOptions$Iops": "

    Specifies the IOPD for a Provisioned IOPS EBS volume (SSD).

    ", + "ElasticsearchClusterConfig$InstanceCount": "

    The number of instances in the specified domain cluster.

    ", + "ElasticsearchClusterConfig$DedicatedMasterCount": "

    Total number of dedicated master nodes, active and on standby, for the cluster.

    ", + "SnapshotOptions$AutomatedSnapshotStartHour": "

    Specifies the time, in UTC format, when the service takes a daily automated snapshot of the specified Elasticsearch domain. Default value is 0 hours.

    " + } + }, + "InternalException": { + "base": "

    The request processing has failed because of an unknown error, exception or failure (the failure is internal to the service) . Gives http status code of 500.

    ", + "refs": { + } + }, + "InvalidTypeException": { + "base": "

    An exception for trying to create or access sub-resource that is either invalid or not supported. Gives http status code of 409.

    ", + "refs": { + } + }, + "LimitExceededException": { + "base": "

    An exception for trying to create more than allowed resources or sub-resources. Gives http status code of 409.

    ", + "refs": { + } + }, + "ListDomainNamesResponse": { + "base": "

    The result of a ListDomainNames operation. Contains the names of all Elasticsearch domains owned by this account.

    ", + "refs": { + } + }, + "ListTagsRequest": { + "base": "

    Container for the parameters to the ListTags operation. Specify the ARN for the Elasticsearch domain to which the tags are attached that you want to view are attached.

    ", + "refs": { + } + }, + "ListTagsResponse": { + "base": "

    The result of a ListTags operation. Contains tags for all requested Elasticsearch domains.

    ", + "refs": { + } + }, + "OptionState": { + "base": "

    The state of a requested change. One of the following:

    • Processing: The request change is still in-process.
    • Active: The request change is processed and deployed to the Elasticsearch domain.
    ", + "refs": { + "OptionStatus$State": "

    Provides the OptionState for the Elasticsearch domain.

    " + } + }, + "OptionStatus": { + "base": "

    Provides the current status of the entity.

    ", + "refs": { + "AccessPoliciesStatus$Status": "

    The status of the access policy for the Elasticsearch domain. See OptionStatus for the status information that's included.

    ", + "AdvancedOptionsStatus$Status": "

    Specifies the status of OptionStatus for advanced options for the specified Elasticsearch domain.

    ", + "EBSOptionsStatus$Status": "

    Specifies the status of the EBS options for the specified Elasticsearch domain.

    ", + "ElasticsearchClusterConfigStatus$Status": "

    Specifies the status of the configuration for the specified Elasticsearch domain.

    ", + "SnapshotOptionsStatus$Status": "

    Specifies the status of a daily automated snapshot.

    " + } + }, + "PolicyDocument": { + "base": "

    Access policy rules for an Elasticsearch domain service endpoints. For more information, see Configuring Access Policies in the Amazon Elasticsearch Service Developer Guide. The maximum size of a policy document is 100 KB.

    ", + "refs": { + "AccessPoliciesStatus$Options": "

    The access policy configured for the Elasticsearch domain. Access policies may be resource-based, IP-based, or IAM-based. See Configuring Access Policiesfor more information.

    ", + "CreateElasticsearchDomainRequest$AccessPolicies": "

    IAM access policy as a JSON-formatted string.

    ", + "ElasticsearchDomainStatus$AccessPolicies": "

    IAM access policy as a JSON-formatted string.

    ", + "UpdateElasticsearchDomainConfigRequest$AccessPolicies": "

    IAM access policy as a JSON-formatted string.

    " + } + }, + "RemoveTagsRequest": { + "base": "

    Container for the parameters to the RemoveTags operation. Specify the ARN for the Elasticsearch domain from which you want to remove the specified TagKey.

    ", + "refs": { + } + }, + "ResourceAlreadyExistsException": { + "base": "

    An exception for creating a resource that already exists. Gives http status code of 400.

    ", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "

    An exception for accessing or deleting a resource that does not exist. Gives http status code of 400.

    ", + "refs": { + } + }, + "ServiceUrl": { + "base": "

    The endpoint to which service requests are submitted. For example, search-imdb-movies-oopcnjfn6ugofer3zx5iadxxca.eu-west-1.es.amazonaws.com or doc-imdb-movies-oopcnjfn6ugofer3zx5iadxxca.eu-west-1.es.amazonaws.com.

    ", + "refs": { + "ElasticsearchDomainStatus$Endpoint": "

    The Elasticsearch domain endpoint that you use to submit index and search requests.

    " + } + }, + "SnapshotOptions": { + "base": "

    Specifies the time, in UTC format, when the service takes a daily automated snapshot of the specified Elasticsearch domain. Default value is 0 hours.

    ", + "refs": { + "CreateElasticsearchDomainRequest$SnapshotOptions": "

    Option to set time, in UTC format, of the daily automated snapshot. Default value is 0 hours.

    ", + "ElasticsearchDomainStatus$SnapshotOptions": "

    Specifies the status of the SnapshotOptions

    ", + "SnapshotOptionsStatus$Options": "

    Specifies the daily snapshot options specified for the Elasticsearch domain.

    ", + "UpdateElasticsearchDomainConfigRequest$SnapshotOptions": "

    Option to set the time, in UTC format, for the daily automated snapshot. Default value is 0 hours.

    " + } + }, + "SnapshotOptionsStatus": { + "base": "

    Status of a daily automated snapshot.

    ", + "refs": { + "ElasticsearchDomainConfig$SnapshotOptions": "

    Specifies the SnapshotOptions for the Elasticsearch domain.

    " + } + }, + "String": { + "base": null, + "refs": { + "AdvancedOptions$key": null, + "AdvancedOptions$value": null, + "StringList$member": null + } + }, + "StringList": { + "base": null, + "refs": { + "RemoveTagsRequest$TagKeys": "

    Specifies the TagKey list which you want to remove from the Elasticsearch domain.

    " + } + }, + "Tag": { + "base": "

    Specifies a key value pair for a resource tag.

    ", + "refs": { + "TagList$member": null + } + }, + "TagKey": { + "base": "

    A string of length from 1 to 128 characters that specifies the key for a Tag. Tag keys must be unique for the Elasticsearch domain to which they are attached.

    ", + "refs": { + "Tag$Key": "

    Specifies the TagKey, the name of the tag. Tag keys must be unique for the Elasticsearch domain to which they are attached.

    " + } + }, + "TagList": { + "base": "

    A list of Tag

    ", + "refs": { + "AddTagsRequest$TagList": "

    List of Tag that need to be added for the Elasticsearch domain.

    ", + "ListTagsResponse$TagList": "

    List of Tag for the requested Elasticsearch domain.

    " + } + }, + "TagValue": { + "base": "

    A string of length from 0 to 256 characters that specifies the value for a Tag. Tag values can be null and do not have to be unique in a tag set.

    ", + "refs": { + "Tag$Value": "

    Specifies the TagValue, the value assigned to the corresponding tag key. Tag values can be null and do not have to be unique in a tag set. For example, you can have a key value pair in a tag set of project : Trinity and cost-center : Trinity

    " + } + }, + "UIntValue": { + "base": null, + "refs": { + "OptionStatus$UpdateVersion": "

    Specifies the latest version for the entity.

    " + } + }, + "UpdateElasticsearchDomainConfigRequest": { + "base": "

    Container for the parameters to the UpdateElasticsearchDomain operation. Specifies the type and number of instances in the domain cluster.

    ", + "refs": { + } + }, + "UpdateElasticsearchDomainConfigResponse": { + "base": "

    The result of an UpdateElasticsearchDomain request. Contains the status of the Elasticsearch domain being updated.

    ", + "refs": { + } + }, + "UpdateTimestamp": { + "base": null, + "refs": { + "OptionStatus$CreationDate": "

    Timestamp which tells the creation date for the entity.

    ", + "OptionStatus$UpdateDate": "

    Timestamp which tells the last updated time for the entity.

    " + } + }, + "ValidationException": { + "base": "

    An exception for missing / invalid input fields. Gives http status code of 400.

    ", + "refs": { + } + }, + "VolumeType": { + "base": "

    The type of EBS volume, standard, gp2, or io1. See Configuring EBS-based Storagefor more information.

    ", + "refs": { + "EBSOptions$VolumeType": "

    Specifies the volume type for EBS-based storage.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/events/2014-02-03/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/events/2014-02-03/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/events/2014-02-03/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/events/2014-02-03/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,643 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-02-03", + "endpointPrefix":"events", + "jsonVersion":"1.1", + "serviceFullName":"Amazon CloudWatch Events", + "signatureVersion":"v4", + "targetPrefix":"AWSEvents", + "protocol":"json" + }, + "operations":{ + "DeleteRule":{ + "name":"DeleteRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRuleRequest"}, + "errors":[ + { + "shape":"ConcurrentModificationException", + "exception":true + }, + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "DescribeRule":{ + "name":"DescribeRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRuleRequest"}, + "output":{"shape":"DescribeRuleResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "DisableRule":{ + "name":"DisableRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableRuleRequest"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"ConcurrentModificationException", + "exception":true + }, + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "EnableRule":{ + "name":"EnableRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableRuleRequest"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"ConcurrentModificationException", + "exception":true + }, + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "ListRuleNamesByTarget":{ + "name":"ListRuleNamesByTarget", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRuleNamesByTargetRequest"}, + "output":{"shape":"ListRuleNamesByTargetResponse"}, + "errors":[ + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "ListRules":{ + "name":"ListRules", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRulesRequest"}, + "output":{"shape":"ListRulesResponse"}, + "errors":[ + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "ListTargetsByRule":{ + "name":"ListTargetsByRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTargetsByRuleRequest"}, + "output":{"shape":"ListTargetsByRuleResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "PutEvents":{ + "name":"PutEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutEventsRequest"}, + "output":{"shape":"PutEventsResponse"}, + "errors":[ + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "PutRule":{ + "name":"PutRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutRuleRequest"}, + "output":{"shape":"PutRuleResponse"}, + "errors":[ + { + "shape":"InvalidEventPatternException", + "exception":true + }, + { + "shape":"LimitExceededException", + "exception":true + }, + { + "shape":"ConcurrentModificationException", + "exception":true + }, + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "PutTargets":{ + "name":"PutTargets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutTargetsRequest"}, + "output":{"shape":"PutTargetsResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"ConcurrentModificationException", + "exception":true + }, + { + "shape":"LimitExceededException", + "exception":true + }, + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "RemoveTargets":{ + "name":"RemoveTargets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTargetsRequest"}, + "output":{"shape":"RemoveTargetsResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"ConcurrentModificationException", + "exception":true + }, + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + }, + "TestEventPattern":{ + "name":"TestEventPattern", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TestEventPatternRequest"}, + "output":{"shape":"TestEventPatternResponse"}, + "errors":[ + { + "shape":"InvalidEventPatternException", + "exception":true + }, + { + "shape":"InternalException", + "exception":true, + "fault":true + } + ] + } + }, + "shapes":{ + "Boolean":{"type":"boolean"}, + "ConcurrentModificationException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DeleteRuleRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"RuleName"} + } + }, + "DescribeRuleRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"RuleName"} + } + }, + "DescribeRuleResponse":{ + "type":"structure", + "members":{ + "Name":{"shape":"RuleName"}, + "Arn":{"shape":"RuleArn"}, + "EventPattern":{"shape":"EventPattern"}, + "ScheduleExpression":{"shape":"ScheduleExpression"}, + "State":{"shape":"RuleState"}, + "Description":{"shape":"RuleDescription"}, + "RoleArn":{"shape":"RoleArn"} + } + }, + "DisableRuleRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"RuleName"} + } + }, + "EnableRuleRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"RuleName"} + } + }, + "ErrorCode":{"type":"string"}, + "ErrorMessage":{"type":"string"}, + "EventId":{"type":"string"}, + "EventPattern":{ + "type":"string", + "max":2048 + }, + "EventResource":{"type":"string"}, + "EventResourceList":{ + "type":"list", + "member":{"shape":"EventResource"} + }, + "EventTime":{"type":"timestamp"}, + "Integer":{"type":"integer"}, + "InternalException":{ + "type":"structure", + "members":{ + }, + "exception":true, + "fault":true + }, + "InvalidEventPatternException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "LimitMax100":{ + "type":"integer", + "min":1, + "max":100 + }, + "ListRuleNamesByTargetRequest":{ + "type":"structure", + "required":["TargetArn"], + "members":{ + "TargetArn":{"shape":"TargetArn"}, + "NextToken":{"shape":"NextToken"}, + "Limit":{"shape":"LimitMax100"} + } + }, + "ListRuleNamesByTargetResponse":{ + "type":"structure", + "members":{ + "RuleNames":{"shape":"RuleNameList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListRulesRequest":{ + "type":"structure", + "members":{ + "NamePrefix":{"shape":"RuleName"}, + "NextToken":{"shape":"NextToken"}, + "Limit":{"shape":"LimitMax100"} + } + }, + "ListRulesResponse":{ + "type":"structure", + "members":{ + "Rules":{"shape":"RuleResponseList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListTargetsByRuleRequest":{ + "type":"structure", + "required":["Rule"], + "members":{ + "Rule":{"shape":"RuleName"}, + "NextToken":{"shape":"NextToken"}, + "Limit":{"shape":"LimitMax100"} + } + }, + "ListTargetsByRuleResponse":{ + "type":"structure", + "members":{ + "Targets":{"shape":"TargetList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "NextToken":{ + "type":"string", + "min":1, + "max":2048 + }, + "PutEventsRequest":{ + "type":"structure", + "required":["Entries"], + "members":{ + "Entries":{"shape":"PutEventsRequestEntryList"} + } + }, + "PutEventsRequestEntry":{ + "type":"structure", + "members":{ + "Time":{"shape":"EventTime"}, + "Source":{"shape":"String"}, + "Resources":{"shape":"EventResourceList"}, + "DetailType":{"shape":"String"}, + "Detail":{"shape":"String"} + } + }, + "PutEventsRequestEntryList":{ + "type":"list", + "member":{"shape":"PutEventsRequestEntry"}, + "min":1, + "max":10 + }, + "PutEventsResponse":{ + "type":"structure", + "members":{ + "FailedEntryCount":{"shape":"Integer"}, + "Entries":{"shape":"PutEventsResultEntryList"} + } + }, + "PutEventsResultEntry":{ + "type":"structure", + "members":{ + "EventId":{"shape":"EventId"}, + "ErrorCode":{"shape":"ErrorCode"}, + "ErrorMessage":{"shape":"ErrorMessage"} + } + }, + "PutEventsResultEntryList":{ + "type":"list", + "member":{"shape":"PutEventsResultEntry"} + }, + "PutRuleRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"RuleName"}, + "ScheduleExpression":{"shape":"ScheduleExpression"}, + "EventPattern":{"shape":"EventPattern"}, + "State":{"shape":"RuleState"}, + "Description":{"shape":"RuleDescription"}, + "RoleArn":{"shape":"RoleArn"} + } + }, + "PutRuleResponse":{ + "type":"structure", + "members":{ + "RuleArn":{"shape":"RuleArn"} + } + }, + "PutTargetsRequest":{ + "type":"structure", + "required":[ + "Rule", + "Targets" + ], + "members":{ + "Rule":{"shape":"RuleName"}, + "Targets":{"shape":"TargetList"} + } + }, + "PutTargetsResponse":{ + "type":"structure", + "members":{ + "FailedEntryCount":{"shape":"Integer"}, + "FailedEntries":{"shape":"PutTargetsResultEntryList"} + } + }, + "PutTargetsResultEntry":{ + "type":"structure", + "members":{ + "TargetId":{"shape":"TargetId"}, + "ErrorCode":{"shape":"ErrorCode"}, + "ErrorMessage":{"shape":"ErrorMessage"} + } + }, + "PutTargetsResultEntryList":{ + "type":"list", + "member":{"shape":"PutTargetsResultEntry"} + }, + "RemoveTargetsRequest":{ + "type":"structure", + "required":[ + "Rule", + "Ids" + ], + "members":{ + "Rule":{"shape":"RuleName"}, + "Ids":{"shape":"TargetIdList"} + } + }, + "RemoveTargetsResponse":{ + "type":"structure", + "members":{ + "FailedEntryCount":{"shape":"Integer"}, + "FailedEntries":{"shape":"RemoveTargetsResultEntryList"} + } + }, + "RemoveTargetsResultEntry":{ + "type":"structure", + "members":{ + "TargetId":{"shape":"TargetId"}, + "ErrorCode":{"shape":"ErrorCode"}, + "ErrorMessage":{"shape":"ErrorMessage"} + } + }, + "RemoveTargetsResultEntryList":{ + "type":"list", + "member":{"shape":"RemoveTargetsResultEntry"} + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "RoleArn":{ + "type":"string", + "min":1, + "max":1600 + }, + "Rule":{ + "type":"structure", + "members":{ + "Name":{"shape":"RuleName"}, + "Arn":{"shape":"RuleArn"}, + "EventPattern":{"shape":"EventPattern"}, + "State":{"shape":"RuleState"}, + "Description":{"shape":"RuleDescription"}, + "ScheduleExpression":{"shape":"ScheduleExpression"}, + "RoleArn":{"shape":"RoleArn"} + } + }, + "RuleArn":{ + "type":"string", + "min":1, + "max":1600 + }, + "RuleDescription":{ + "type":"string", + "max":512 + }, + "RuleName":{ + "type":"string", + "min":1, + "max":64, + "pattern":"[\\.\\-_A-Za-z0-9]+" + }, + "RuleNameList":{ + "type":"list", + "member":{"shape":"RuleName"} + }, + "RuleResponseList":{ + "type":"list", + "member":{"shape":"Rule"} + }, + "RuleState":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "ScheduleExpression":{ + "type":"string", + "max":256 + }, + "String":{"type":"string"}, + "Target":{ + "type":"structure", + "required":[ + "Id", + "Arn" + ], + "members":{ + "Id":{"shape":"TargetId"}, + "Arn":{"shape":"TargetArn"}, + "Input":{"shape":"TargetInput"}, + "InputPath":{"shape":"TargetInputPath"} + } + }, + "TargetArn":{ + "type":"string", + "min":1, + "max":1600 + }, + "TargetId":{ + "type":"string", + "min":1, + "max":64, + "pattern":"[\\.\\-_A-Za-z0-9]+" + }, + "TargetIdList":{ + "type":"list", + "member":{"shape":"TargetId"}, + "min":1, + "max":100 + }, + "TargetInput":{ + "type":"string", + "max":8192 + }, + "TargetInputPath":{ + "type":"string", + "max":256 + }, + "TargetList":{ + "type":"list", + "member":{"shape":"Target"} + }, + "TestEventPatternRequest":{ + "type":"structure", + "required":[ + "EventPattern", + "Event" + ], + "members":{ + "EventPattern":{"shape":"EventPattern"}, + "Event":{"shape":"String"} + } + }, + "TestEventPatternResponse":{ + "type":"structure", + "members":{ + "Result":{"shape":"Boolean"} + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/events/2014-02-03/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/events/2014-02-03/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/events/2014-02-03/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/events/2014-02-03/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,411 @@ +{ + "version": "2.0", + "operations": { + "DeleteRule": "

    Deletes a rule. You must remove all targets from a rule using RemoveTargets before you can delete the rule.

    Note: When you make a change with this action, incoming events might still continue to match to the deleted rule. Please allow a short period of time for changes to take effect.

    ", + "DescribeRule": "

    Describes the details of the specified rule.

    ", + "DisableRule": "

    Disables a rule. A disabled rule won't match any events, and won't self-trigger if it has a schedule expression.

    Note: When you make a change with this action, incoming events might still continue to match to the disabled rule. Please allow a short period of time for changes to take effect.

    ", + "EnableRule": "

    Enables a rule. If the rule does not exist, the operation fails.

    Note: When you make a change with this action, incoming events might not immediately start matching to a newly enabled rule. Please allow a short period of time for changes to take effect.

    ", + "ListRuleNamesByTarget": "

    Lists the names of the rules that the given target is put to. Using this action, you can find out which of the rules in Amazon CloudWatch Events can invoke a specific target in your account. If you have more rules in your account than the given limit, the results will be paginated. In that case, use the next token returned in the response and repeat the ListRulesByTarget action until the NextToken in the response is returned as null.

    ", + "ListRules": "

    Lists the Amazon CloudWatch Events rules in your account. You can either list all the rules or you can provide a prefix to match to the rule names. If you have more rules in your account than the given limit, the results will be paginated. In that case, use the next token returned in the response and repeat the ListRules action until the NextToken in the response is returned as null.

    ", + "ListTargetsByRule": "

    Lists of targets assigned to the rule.

    ", + "PutEvents": "

    Sends custom events to Amazon CloudWatch Events so that they can be matched to rules.

    ", + "PutRule": "

    Creates or updates a rule. Rules are enabled by default, or based on value of the State parameter. You can disable a rule using DisableRule.

    Note: When you make a change with this action, incoming events might not immediately start matching to new or updated rules. Please allow a short period of time for changes to take effect.

    A rule must contain at least an EventPattern or ScheduleExpression. Rules with EventPatterns are triggered when a matching event is observed. Rules with ScheduleExpressions self-trigger based on the given schedule. A rule can have both an EventPattern and a ScheduleExpression, in which case the rule will trigger on matching events as well as on a schedule.

    Note: Most services in AWS treat : or / as the same character in Amazon Resource Names (ARNs). However, CloudWatch Events uses an exact match in event patterns and rules. Be sure to use the correct ARN characters when creating event patterns so that they match the ARN syntax in the event you want to match.

    ", + "PutTargets": "

    Adds target(s) to a rule. Updates the target(s) if they are already associated with the role. In other words, if there is already a target with the given target ID, then the target associated with that ID is updated.

    Note: When you make a change with this action, when the associated rule triggers, new or updated targets might not be immediately invoked. Please allow a short period of time for changes to take effect.

    ", + "RemoveTargets": "

    Removes target(s) from a rule so that when the rule is triggered, those targets will no longer be invoked.

    Note: When you make a change with this action, when the associated rule triggers, removed targets might still continue to be invoked. Please allow a short period of time for changes to take effect.

    ", + "TestEventPattern": "

    Tests whether an event pattern matches the provided event.

    Note: Most services in AWS treat : or / as the same character in Amazon Resource Names (ARNs). However, CloudWatch Events uses an exact match in event patterns and rules. Be sure to use the correct ARN characters when creating event patterns so that they match the ARN syntax in the event you want to match.

    " + }, + "service": "

    Amazon CloudWatch Events helps you to respond to state changes in your AWS resources. When your resources change state they automatically send events into an event stream. You can create rules that match selected events in the stream and route them to targets to take action. You can also use rules to take action on a pre-determined schedule. For example, you can configure rules to:

    • Automatically invoke an AWS Lambda function to update DNS entries when an event notifies you that Amazon EC2 instance enters the running state.
    • Direct specific API records from CloudTrail to an Amazon Kinesis stream for detailed analysis of potential security or availability risks.
    • Periodically invoke a built-in target to create a snapshot of an Amazon EBS volume.

    For more information about Amazon CloudWatch Events features, see the Amazon CloudWatch Developer Guide.

    ", + "shapes": { + "Boolean": { + "base": null, + "refs": { + "TestEventPatternResponse$Result": "

    Indicates whether the event matches the event pattern.

    " + } + }, + "ConcurrentModificationException": { + "base": "

    This exception occurs if there is concurrent modification on rule or target.

    ", + "refs": { + } + }, + "DeleteRuleRequest": { + "base": "

    Container for the parameters to the DeleteRule operation.

    ", + "refs": { + } + }, + "DescribeRuleRequest": { + "base": "

    Container for the parameters to the DescribeRule operation.

    ", + "refs": { + } + }, + "DescribeRuleResponse": { + "base": "

    The result of the DescribeRule operation.

    ", + "refs": { + } + }, + "DisableRuleRequest": { + "base": "

    Container for the parameters to the DisableRule operation.

    ", + "refs": { + } + }, + "EnableRuleRequest": { + "base": "

    Container for the parameters to the EnableRule operation.

    ", + "refs": { + } + }, + "ErrorCode": { + "base": null, + "refs": { + "PutEventsResultEntry$ErrorCode": "

    The error code representing why the event submission failed on this entry.

    ", + "PutTargetsResultEntry$ErrorCode": "

    The error code representing why the target submission failed on this entry.

    ", + "RemoveTargetsResultEntry$ErrorCode": "

    The error code representing why the target removal failed on this entry.

    " + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "PutEventsResultEntry$ErrorMessage": "

    The error message explaining why the event submission failed on this entry.

    ", + "PutTargetsResultEntry$ErrorMessage": "

    The error message explaining why the target submission failed on this entry.

    ", + "RemoveTargetsResultEntry$ErrorMessage": "

    The error message explaining why the target removal failed on this entry.

    " + } + }, + "EventId": { + "base": null, + "refs": { + "PutEventsResultEntry$EventId": "

    The ID of the event submitted to Amazon CloudWatch Events.

    " + } + }, + "EventPattern": { + "base": null, + "refs": { + "DescribeRuleResponse$EventPattern": "

    The event pattern.

    ", + "PutRuleRequest$EventPattern": "

    The event pattern.

    ", + "Rule$EventPattern": "

    The event pattern of the rule.

    ", + "TestEventPatternRequest$EventPattern": "

    The event pattern you want to test.

    " + } + }, + "EventResource": { + "base": null, + "refs": { + "EventResourceList$member": null + } + }, + "EventResourceList": { + "base": null, + "refs": { + "PutEventsRequestEntry$Resources": "

    AWS resources, identified by Amazon Resource Name (ARN), which the event primarily concerns. Any number, including zero, may be present.

    " + } + }, + "EventTime": { + "base": null, + "refs": { + "PutEventsRequestEntry$Time": "

    Timestamp of event, per RFC3339. If no timestamp is provided, the timestamp of the PutEvents call will be used.

    " + } + }, + "Integer": { + "base": null, + "refs": { + "PutEventsResponse$FailedEntryCount": "

    The number of failed entries.

    ", + "PutTargetsResponse$FailedEntryCount": "

    The number of failed entries.

    ", + "RemoveTargetsResponse$FailedEntryCount": "

    The number of failed entries.

    " + } + }, + "InternalException": { + "base": "

    This exception occurs due to unexpected causes.

    ", + "refs": { + } + }, + "InvalidEventPatternException": { + "base": "

    The event pattern is invalid.

    ", + "refs": { + } + }, + "LimitExceededException": { + "base": "

    This exception occurs if you try to create more rules or add more targets to a rule than allowed by default.

    ", + "refs": { + } + }, + "LimitMax100": { + "base": null, + "refs": { + "ListRuleNamesByTargetRequest$Limit": "

    The maximum number of results to return.

    ", + "ListRulesRequest$Limit": "

    The maximum number of results to return.

    ", + "ListTargetsByRuleRequest$Limit": "

    The maximum number of results to return.

    " + } + }, + "ListRuleNamesByTargetRequest": { + "base": "

    Container for the parameters to the ListRuleNamesByTarget operation.

    ", + "refs": { + } + }, + "ListRuleNamesByTargetResponse": { + "base": "

    The result of the ListRuleNamesByTarget operation.

    ", + "refs": { + } + }, + "ListRulesRequest": { + "base": "

    Container for the parameters to the ListRules operation.

    ", + "refs": { + } + }, + "ListRulesResponse": { + "base": "

    The result of the ListRules operation.

    ", + "refs": { + } + }, + "ListTargetsByRuleRequest": { + "base": "

    Container for the parameters to the ListTargetsByRule operation.

    ", + "refs": { + } + }, + "ListTargetsByRuleResponse": { + "base": "

    The result of the ListTargetsByRule operation.

    ", + "refs": { + } + }, + "NextToken": { + "base": null, + "refs": { + "ListRuleNamesByTargetRequest$NextToken": "

    The token returned by a previous call to indicate that there is more data available.

    ", + "ListRuleNamesByTargetResponse$NextToken": "

    Indicates that there are additional results to retrieve.

    ", + "ListRulesRequest$NextToken": "

    The token returned by a previous call to indicate that there is more data available.

    ", + "ListRulesResponse$NextToken": "

    Indicates that there are additional results to retrieve.

    ", + "ListTargetsByRuleRequest$NextToken": "

    The token returned by a previous call to indicate that there is more data available.

    ", + "ListTargetsByRuleResponse$NextToken": "

    Indicates that there are additional results to retrieve.

    " + } + }, + "PutEventsRequest": { + "base": "

    Container for the parameters to the PutEvents operation.

    ", + "refs": { + } + }, + "PutEventsRequestEntry": { + "base": "

    Contains information about the event to be used in the PutEvents action.

    ", + "refs": { + "PutEventsRequestEntryList$member": null + } + }, + "PutEventsRequestEntryList": { + "base": null, + "refs": { + "PutEventsRequest$Entries": "

    The entry that defines an event in your system. You can specify several parameters for the entry such as the source and type of the event, resources associated with the event, and so on.

    " + } + }, + "PutEventsResponse": { + "base": "

    The result of the PutEvents operation.

    ", + "refs": { + } + }, + "PutEventsResultEntry": { + "base": "

    A PutEventsResult contains a list of PutEventsResultEntry.

    ", + "refs": { + "PutEventsResultEntryList$member": null + } + }, + "PutEventsResultEntryList": { + "base": null, + "refs": { + "PutEventsResponse$Entries": "

    A list of successfully and unsuccessfully ingested events results. If the ingestion was successful, the entry will have the event ID in it. If not, then the ErrorCode and ErrorMessage can be used to identify the problem with the entry.

    " + } + }, + "PutRuleRequest": { + "base": "

    Container for the parameters to the PutRule operation.

    ", + "refs": { + } + }, + "PutRuleResponse": { + "base": "

    The result of the PutRule operation.

    ", + "refs": { + } + }, + "PutTargetsRequest": { + "base": "

    Container for the parameters to the PutTargets operation.

    ", + "refs": { + } + }, + "PutTargetsResponse": { + "base": "

    The result of the PutTargets operation.

    ", + "refs": { + } + }, + "PutTargetsResultEntry": { + "base": "

    A PutTargetsResult contains a list of PutTargetsResultEntry.

    ", + "refs": { + "PutTargetsResultEntryList$member": null + } + }, + "PutTargetsResultEntryList": { + "base": null, + "refs": { + "PutTargetsResponse$FailedEntries": "

    An array of failed target entries.

    " + } + }, + "RemoveTargetsRequest": { + "base": "

    Container for the parameters to the RemoveTargets operation.

    ", + "refs": { + } + }, + "RemoveTargetsResponse": { + "base": "

    The result of the RemoveTargets operation.

    ", + "refs": { + } + }, + "RemoveTargetsResultEntry": { + "base": "

    The ID of the target requested to be removed from the rule by Amazon CloudWatch Events.

    ", + "refs": { + "RemoveTargetsResultEntryList$member": null + } + }, + "RemoveTargetsResultEntryList": { + "base": null, + "refs": { + "RemoveTargetsResponse$FailedEntries": "

    An array of failed target entries.

    " + } + }, + "ResourceNotFoundException": { + "base": "

    The rule does not exist.

    ", + "refs": { + } + }, + "RoleArn": { + "base": null, + "refs": { + "DescribeRuleResponse$RoleArn": "

    The Amazon Resource Name (ARN) of the IAM role associated with the rule.

    ", + "PutRuleRequest$RoleArn": "

    The Amazon Resource Name (ARN) of the IAM role associated with the rule.

    ", + "Rule$RoleArn": "

    The Amazon Resource Name (ARN) associated with the role that is used for target invocation.

    " + } + }, + "Rule": { + "base": "

    Contains information about a rule in Amazon CloudWatch Events. A ListRulesResult contains a list of Rules.

    ", + "refs": { + "RuleResponseList$member": null + } + }, + "RuleArn": { + "base": null, + "refs": { + "DescribeRuleResponse$Arn": "

    The Amazon Resource Name (ARN) associated with the rule.

    ", + "PutRuleResponse$RuleArn": "

    The Amazon Resource Name (ARN) that identifies the rule.

    ", + "Rule$Arn": "

    The Amazon Resource Name (ARN) of the rule.

    " + } + }, + "RuleDescription": { + "base": null, + "refs": { + "DescribeRuleResponse$Description": "

    The rule's description.

    ", + "PutRuleRequest$Description": "

    A description of the rule.

    ", + "Rule$Description": "

    The description of the rule.

    " + } + }, + "RuleName": { + "base": null, + "refs": { + "DeleteRuleRequest$Name": "

    The name of the rule to be deleted.

    ", + "DescribeRuleRequest$Name": "

    The name of the rule you want to describe details for.

    ", + "DescribeRuleResponse$Name": "

    The rule's name.

    ", + "DisableRuleRequest$Name": "

    The name of the rule you want to disable.

    ", + "EnableRuleRequest$Name": "

    The name of the rule that you want to enable.

    ", + "ListRulesRequest$NamePrefix": "

    The prefix matching the rule name.

    ", + "ListTargetsByRuleRequest$Rule": "

    The name of the rule whose targets you want to list.

    ", + "PutRuleRequest$Name": "

    The name of the rule that you are creating or updating.

    ", + "PutTargetsRequest$Rule": "

    The name of the rule you want to add targets to.

    ", + "RemoveTargetsRequest$Rule": "

    The name of the rule you want to remove targets from.

    ", + "Rule$Name": "

    The rule's name.

    ", + "RuleNameList$member": null + } + }, + "RuleNameList": { + "base": null, + "refs": { + "ListRuleNamesByTargetResponse$RuleNames": "

    List of rules names that can invoke the given target.

    " + } + }, + "RuleResponseList": { + "base": null, + "refs": { + "ListRulesResponse$Rules": "

    List of rules matching the specified criteria.

    " + } + }, + "RuleState": { + "base": null, + "refs": { + "DescribeRuleResponse$State": "

    Specifies whether the rule is enabled or disabled.

    ", + "PutRuleRequest$State": "

    Indicates whether the rule is enabled or disabled.

    ", + "Rule$State": "

    The rule's state.

    " + } + }, + "ScheduleExpression": { + "base": null, + "refs": { + "DescribeRuleResponse$ScheduleExpression": "

    The scheduling expression. For example, \"cron(0 20 * * ? *)\", \"rate(5 minutes)\".

    ", + "PutRuleRequest$ScheduleExpression": "

    The scheduling expression. For example, \"cron(0 20 * * ? *)\", \"rate(5 minutes)\".

    ", + "Rule$ScheduleExpression": "

    The scheduling expression. For example, \"cron(0 20 * * ? *)\", \"rate(5 minutes)\".

    " + } + }, + "String": { + "base": null, + "refs": { + "PutEventsRequestEntry$Source": "

    The source of the event.

    ", + "PutEventsRequestEntry$DetailType": "

    Free-form string used to decide what fields to expect in the event detail.

    ", + "PutEventsRequestEntry$Detail": "

    In the JSON sense, an object containing fields, which may also contain nested sub-objects. No constraints are imposed on its contents.

    ", + "TestEventPatternRequest$Event": "

    The event in the JSON format to test against the event pattern.

    " + } + }, + "Target": { + "base": "

    Targets are the resources that can be invoked when a rule is triggered. For example, AWS Lambda functions, Amazon Kinesis streams, and built-in targets.

    Input and InputPath are mutually-exclusive and optional parameters of a target. When a rule is triggered due to a matched event, if for a target:

    • Neither Input nor InputPath is specified, then the entire event is passed to the target in JSON form.
    • InputPath is specified in the form of JSONPath (e.g. $.detail), then only the part of the event specified in the path is passed to the target (e.g. only the detail part of the event is passed).
    • Input is specified in the form of a valid JSON, then the matched event is overridden with this constant.
    ", + "refs": { + "TargetList$member": null + } + }, + "TargetArn": { + "base": null, + "refs": { + "ListRuleNamesByTargetRequest$TargetArn": "

    The Amazon Resource Name (ARN) of the target resource that you want to list the rules for.

    ", + "Target$Arn": "

    The Amazon Resource Name (ARN) associated of the target.

    " + } + }, + "TargetId": { + "base": null, + "refs": { + "PutTargetsResultEntry$TargetId": "

    The ID of the target submitted to Amazon CloudWatch Events.

    ", + "RemoveTargetsResultEntry$TargetId": "

    The ID of the target requested to be removed by Amazon CloudWatch Events.

    ", + "Target$Id": "

    The unique target assignment ID.

    ", + "TargetIdList$member": null + } + }, + "TargetIdList": { + "base": null, + "refs": { + "RemoveTargetsRequest$Ids": "

    The list of target IDs to remove from the rule.

    " + } + }, + "TargetInput": { + "base": null, + "refs": { + "Target$Input": "

    Valid JSON text passed to the target. For more information about JSON text, see The JavaScript Object Notation (JSON) Data Interchange Format.

    " + } + }, + "TargetInputPath": { + "base": null, + "refs": { + "Target$InputPath": "

    The value of the JSONPath that is used for extracting part of the matched event when passing it to the target. For more information about JSON paths, see JSONPath.

    " + } + }, + "TargetList": { + "base": null, + "refs": { + "ListTargetsByRuleResponse$Targets": "

    Lists the targets assigned to the rule.

    ", + "PutTargetsRequest$Targets": "

    List of targets you want to update or add to the rule.

    " + } + }, + "TestEventPatternRequest": { + "base": "

    Container for the parameters to the TestEventPattern operation.

    ", + "refs": { + } + }, + "TestEventPatternResponse": { + "base": "

    The result of the TestEventPattern operation.

    ", + "refs": { + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/events/2014-02-03/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/events/2014-02-03/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/events/2014-02-03/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/events/2014-02-03/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version":"1.0", + "examples":{ + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/firehose/2015-08-04/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/firehose/2015-08-04/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/firehose/2015-08-04/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/firehose/2015-08-04/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,627 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-08-04", + "endpointPrefix":"firehose", + "jsonVersion":"1.1", + "serviceAbbreviation":"Firehose", + "serviceFullName":"Amazon Kinesis Firehose", + "signatureVersion":"v4", + "targetPrefix":"Firehose_20150804", + "protocol":"json" + }, + "operations":{ + "CreateDeliveryStream":{ + "name":"CreateDeliveryStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDeliveryStreamInput"}, + "output":{"shape":"CreateDeliveryStreamOutput"}, + "errors":[ + { + "shape":"InvalidArgumentException", + "exception":true + }, + { + "shape":"LimitExceededException", + "exception":true + }, + { + "shape":"ResourceInUseException", + "exception":true + } + ] + }, + "DeleteDeliveryStream":{ + "name":"DeleteDeliveryStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDeliveryStreamInput"}, + "output":{"shape":"DeleteDeliveryStreamOutput"}, + "errors":[ + { + "shape":"ResourceInUseException", + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "exception":true + } + ] + }, + "DescribeDeliveryStream":{ + "name":"DescribeDeliveryStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDeliveryStreamInput"}, + "output":{"shape":"DescribeDeliveryStreamOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + } + ] + }, + "ListDeliveryStreams":{ + "name":"ListDeliveryStreams", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDeliveryStreamsInput"}, + "output":{"shape":"ListDeliveryStreamsOutput"} + }, + "PutRecord":{ + "name":"PutRecord", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutRecordInput"}, + "output":{"shape":"PutRecordOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"InvalidArgumentException", + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + } + ] + }, + "PutRecordBatch":{ + "name":"PutRecordBatch", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutRecordBatchInput"}, + "output":{"shape":"PutRecordBatchOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"InvalidArgumentException", + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + } + ] + }, + "UpdateDestination":{ + "name":"UpdateDestination", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDestinationInput"}, + "output":{"shape":"UpdateDestinationOutput"}, + "errors":[ + { + "shape":"InvalidArgumentException", + "exception":true + }, + { + "shape":"ResourceInUseException", + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"ConcurrentModificationException", + "exception":true + } + ] + } + }, + "shapes":{ + "AWSKMSKeyARN":{ + "type":"string", + "min":1, + "max":512, + "pattern":"arn:.*" + }, + "BooleanObject":{"type":"boolean"}, + "BucketARN":{ + "type":"string", + "min":1, + "max":2048, + "pattern":"arn:.*" + }, + "BufferingHints":{ + "type":"structure", + "members":{ + "SizeInMBs":{"shape":"SizeInMBs"}, + "IntervalInSeconds":{"shape":"IntervalInSeconds"} + } + }, + "ClusterJDBCURL":{ + "type":"string", + "min":1, + "pattern":"jdbc:(redshift|postgresql)://((?!-)[A-Za-z0-9-]{1,63}(?Creates a delivery stream.

    CreateDeliveryStream is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING. After the delivery stream is created, its status is ACTIVE and it now accepts data. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream.

    The name of a delivery stream identifies it. You can't have two delivery streams with the same name in the same region. Two delivery streams in different AWS accounts or different regions in the same AWS account can have the same name.

    By default, you can create up to 5 delivery streams per region.

    A delivery stream can only be configured with a single destination, Amazon S3 or Amazon Redshift. For correct CreateDeliveryStream request syntax, specify only one destination configuration parameter: either RedshiftDestinationConfiguration or S3DestinationConfiguration

    As part of S3DestinationConfiguration, optional values BufferingHints, EncryptionConfiguration, and CompressionFormat can be provided. By default, if no BufferingHints value is provided, Amazon Kinesis Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. Note that BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly; for example, record boundaries are such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.

    A few notes about RedshiftDestinationConfiguration:

    • An Amazon Redshift destination requires an S3 bucket as intermediate location, as Amazon Kinesis Firehose first delivers data to S3 and then uses COPY syntax to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration parameter element.
    • The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats.
    • We strongly recommend that the username and password provided is used exclusively for Amazon Kinesis Firehose purposes, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions.

    Amazon Kinesis Firehose assumes the IAM role that is configured as part of destinations. The IAM role should allow the Amazon Kinesis Firehose principal to assume the role, and the role should have permissions that allows the service to deliver the data. For more information, see Amazon S3 Bucket Access in the Amazon Kinesis Firehose Developer Guide.

    ", + "DeleteDeliveryStream": "

    Deletes a delivery stream and its data.

    You can delete a delivery stream only if it is in ACTIVE or DELETING state, and not in the CREATING state. While the deletion request is in process, the delivery stream is in the DELETING state.

    To check the state of a delivery stream, use DescribeDeliveryStream.

    While the delivery stream is DELETING state, the service may continue to accept the records, but the service doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, you should first stop any applications that are sending records before deleting a delivery stream.

    ", + "DescribeDeliveryStream": "

    Describes the specified delivery stream and gets the status. For example, after your delivery stream is created, call DescribeDeliveryStream to see if the delivery stream is ACTIVE and therefore ready for data to be sent to it.

    ", + "ListDeliveryStreams": "

    Lists your delivery streams.

    The number of delivery streams might be too large to return using a single call to ListDeliveryStreams. You can limit the number of delivery streams returned, using the Limit parameter. To determine whether there are more delivery streams to list, check the value of HasMoreDeliveryStreams in the output. If there are more delivery streams to list, you can request them by specifying the name of the last delivery stream returned in the call in the ExclusiveStartDeliveryStreamName parameter of a subsequent call.

    ", + "PutRecord": "

    Writes a single data record into an Amazon Kinesis Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers.

    By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. Note that if you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Firehose Limits.

    You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data, for example, a segment from a log file, geographic location data, web site clickstream data, etc.

    Amazon Kinesis Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application(s) to parse individual data items when reading the data from the destination.

    Amazon Kinesis Firehose does not maintain data record ordering. If the destination data needs to be re-ordered by the consumer application, the producer should include some form of sequence number in each data record.

    The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.

    If the PutRecord operation throws a ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

    Data records sent to Amazon Kinesis Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

    ", + "PutRecordBatch": "

    Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers.

    Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before 64-bit encoding), up to a limit of 4 MB for the entire request. By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. Note that if you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Firehose Limits.

    You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data, for example, a segment from a log file, geographic location data, web site clickstream data, and so on.

    Amazon Kinesis Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application(s) to parse individual data items when reading the data from the destination.

    The PutRecordBatch response includes a count of any failed records, FailedPutCount, and an array of responses, RequestResponses. The FailedPutCount value is a count of records that failed. Each entry in the RequestResponses array gives additional information of the processed record. Each entry in RequestResponses directly correlates with a record in the request array using the same ordering, from the top to the bottom of the request and response. RequestResponses always includes the same number of records as the request array. RequestResponses both successfully and unsuccessfully processed records. Amazon Kinesis Firehose attempts to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records.

    A successfully processed record includes a RecordId value, which is a unique value identified for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error and is one of the following values: ServiceUnavailable or InternalFailure. ErrorMessage provides more detailed information about the error.

    If FailedPutCount is greater than 0 (zero), retry the request. A retry of the entire batch of records is possible; however, we strongly recommend that you inspect the entire response and resend only those records that failed processing. This minimizes duplicate records and also reduces the total bytes sent (and corresponding charges).

    If the PutRecordBatch operation throws a ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

    Data records sent to Amazon Kinesis Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

    ", + "UpdateDestination": "

    Updates the specified destination of the specified delivery stream.

    This operation can be used to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a given destination (for example, to change the bucket name of the Amazon S3 destination). The update may not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are normally effective within a few minutes.

    If the destination type is the same, Amazon Kinesis Firehose merges the configuration parameters specified in the UpdateDestination request with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the update request, then the existing configuration parameters are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified then the existing EncryptionConfiguration is maintained on the destination.

    If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Amazon Kinesis Firehose does not merge any parameters. In this case, all parameters must be specified.

    Amazon Kinesis Firehose uses the CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field in every request and the service only updates the configuration if the existing configuration matches the VersionId. After the update is applied successfully, the VersionId is updated, which can be retrieved with the DescribeDeliveryStream operation. The new VersionId should be uses to set CurrentDeliveryStreamVersionId in the next UpdateDestination operation.

    " + }, + "service": "Amazon Kinesis Firehose API Reference

    Amazon Kinesis Firehose is a fully-managed service that delivers real-time streaming data to destinations such as Amazon S3 and Amazon Redshift.

    ", + "shapes": { + "AWSKMSKeyARN": { + "base": null, + "refs": { + "KMSEncryptionConfig$AWSKMSKeyARN": "

    The ARN of the encryption key. Must belong to the same region as the destination Amazon S3 bucket.

    " + } + }, + "BooleanObject": { + "base": null, + "refs": { + "DeliveryStreamDescription$HasMoreDestinations": "

    Indicates whether there are more destinations available to list.

    ", + "ListDeliveryStreamsOutput$HasMoreDeliveryStreams": "

    Indicates whether there are more delivery streams available to list.

    " + } + }, + "BucketARN": { + "base": null, + "refs": { + "S3DestinationConfiguration$BucketARN": "

    The ARN of the S3 bucket.

    ", + "S3DestinationDescription$BucketARN": "

    The ARN of the S3 bucket.

    ", + "S3DestinationUpdate$BucketARN": "

    The ARN of the S3 bucket.

    " + } + }, + "BufferingHints": { + "base": "

    Describes the buffering to perform before delivering data to the destination.

    ", + "refs": { + "S3DestinationConfiguration$BufferingHints": "

    The buffering option. If no value is specified, BufferingHints object default values are used.

    ", + "S3DestinationDescription$BufferingHints": "

    The buffering option. If no value is specified, BufferingHints object default values are used.

    ", + "S3DestinationUpdate$BufferingHints": "

    The buffering option. If no value is specified, BufferingHints object default values are used.

    " + } + }, + "ClusterJDBCURL": { + "base": null, + "refs": { + "RedshiftDestinationConfiguration$ClusterJDBCURL": "

    The database connection string.

    ", + "RedshiftDestinationDescription$ClusterJDBCURL": "

    The database connection string.

    ", + "RedshiftDestinationUpdate$ClusterJDBCURL": "

    The database connection string.

    " + } + }, + "CompressionFormat": { + "base": null, + "refs": { + "S3DestinationConfiguration$CompressionFormat": "

    The compression format. If no value is specified, the default is UNCOMPRESSED.

    The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.

    ", + "S3DestinationDescription$CompressionFormat": "

    The compression format. If no value is specified, the default is NOCOMPRESSION.

    ", + "S3DestinationUpdate$CompressionFormat": "

    The compression format. If no value is specified, the default is NOCOMPRESSION.

    The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.

    " + } + }, + "ConcurrentModificationException": { + "base": "

    Another modification has already happened. Fetch VersionId again and use it to update the destination.

    ", + "refs": { + } + }, + "CopyCommand": { + "base": "

    Describes a COPY command for Amazon Redshift.

    ", + "refs": { + "RedshiftDestinationConfiguration$CopyCommand": "

    The COPY command.

    ", + "RedshiftDestinationDescription$CopyCommand": "

    The COPY command.

    ", + "RedshiftDestinationUpdate$CopyCommand": "

    The COPY command.

    " + } + }, + "CopyOptions": { + "base": null, + "refs": { + "CopyCommand$CopyOptions": "

    Optional parameters to use with the Amazon Redshift COPY command. For more information, see the \"Optional Parameters\" section of Amazon Redshift COPY command. Some possible examples that would apply to Amazon Kinesis Firehose are as follows.

    delimiter '\\t' lzop; - fields are delimited with \"\\t\" (TAB character) and compressed using lzop.

    delimiter '| - fields are delimited with \"|\" (this is the default delimiter).

    delimiter '|' escape - the delimiter should be escaped.

    fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6' - fields are fixed width in the source, with each width specified after every column in the table.

    JSON 's3://mybucket/jsonpaths.txt' - data is in JSON format, and the path specified is the format of the data.

    For more examples, see and Amazon Redshift COPY command exmaples.

    " + } + }, + "CreateDeliveryStreamInput": { + "base": "

    Contains the parameters for CreateDeliveryStream.

    ", + "refs": { + } + }, + "CreateDeliveryStreamOutput": { + "base": "

    Contains the output of CreateDeliveryStream.

    ", + "refs": { + } + }, + "Data": { + "base": null, + "refs": { + "Record$Data": "

    The data blob, which is base64-encoded when the blob is serialized. The maximum size of the data blob, before base64-encoding, is 1,000 KB.

    " + } + }, + "DataTableColumns": { + "base": null, + "refs": { + "CopyCommand$DataTableColumns": "

    A comma-separated list of column names.

    " + } + }, + "DataTableName": { + "base": null, + "refs": { + "CopyCommand$DataTableName": "

    The name of the target table. The table must already exist in the database.

    " + } + }, + "DeleteDeliveryStreamInput": { + "base": "

    Contains the parameters for DeleteDeliveryStream.

    ", + "refs": { + } + }, + "DeleteDeliveryStreamOutput": { + "base": "

    Contains the output of DeleteDeliveryStream.

    ", + "refs": { + } + }, + "DeliveryStreamARN": { + "base": null, + "refs": { + "CreateDeliveryStreamOutput$DeliveryStreamARN": "

    The ARN of the delivery stream.

    ", + "DeliveryStreamDescription$DeliveryStreamARN": "

    The Amazon Resource Name (ARN) of the delivery stream.

    " + } + }, + "DeliveryStreamDescription": { + "base": "

    Contains information about a delivery stream.

    ", + "refs": { + "DescribeDeliveryStreamOutput$DeliveryStreamDescription": "

    Information about the delivery stream.

    " + } + }, + "DeliveryStreamName": { + "base": null, + "refs": { + "CreateDeliveryStreamInput$DeliveryStreamName": "

    The name of the delivery stream.

    ", + "DeleteDeliveryStreamInput$DeliveryStreamName": "

    The name of the delivery stream.

    ", + "DeliveryStreamDescription$DeliveryStreamName": "

    The name of the delivery stream.

    ", + "DeliveryStreamNameList$member": null, + "DescribeDeliveryStreamInput$DeliveryStreamName": "

    The name of the delivery stream.

    ", + "ListDeliveryStreamsInput$ExclusiveStartDeliveryStreamName": "

    The name of the delivery stream to start the list with.

    ", + "PutRecordBatchInput$DeliveryStreamName": "

    The name of the delivery stream.

    ", + "PutRecordInput$DeliveryStreamName": "

    The name of the delivery stream.

    ", + "UpdateDestinationInput$DeliveryStreamName": "

    The name of the delivery stream.

    " + } + }, + "DeliveryStreamNameList": { + "base": null, + "refs": { + "ListDeliveryStreamsOutput$DeliveryStreamNames": "

    The names of the delivery streams.

    " + } + }, + "DeliveryStreamStatus": { + "base": null, + "refs": { + "DeliveryStreamDescription$DeliveryStreamStatus": "

    The status of the delivery stream.

    " + } + }, + "DeliveryStreamVersionId": { + "base": null, + "refs": { + "DeliveryStreamDescription$VersionId": "

    Used when calling the UpdateDestination operation. Each time the destination is updated for the delivery stream, the VersionId is changed, and the current VersionId is required when updating the destination. This is so that the service knows it is applying the changes to the correct version of the delivery stream.

    ", + "UpdateDestinationInput$CurrentDeliveryStreamVersionId": "

    Obtain this value from the VersionId result of the DeliveryStreamDescription operation. This value is required, and helps the service to perform conditional operations. For example, if there is a interleaving update and this value is null, then the update destination fails. After the update is successful, the VersionId value is updated. The service then performs a merge of the old configuration with the new configuration.

    " + } + }, + "DescribeDeliveryStreamInput": { + "base": "

    Contains the parameters for DescribeDeliveryStream.

    ", + "refs": { + } + }, + "DescribeDeliveryStreamInputLimit": { + "base": null, + "refs": { + "DescribeDeliveryStreamInput$Limit": "

    The limit on the number of destinations to return. Currently, you can have one destination per delivery stream.

    " + } + }, + "DescribeDeliveryStreamOutput": { + "base": "

    Contains the output of DescribeDeliveryStream.

    ", + "refs": { + } + }, + "DestinationDescription": { + "base": "

    Describes the destination for a delivery stream.

    ", + "refs": { + "DestinationDescriptionList$member": null + } + }, + "DestinationDescriptionList": { + "base": null, + "refs": { + "DeliveryStreamDescription$Destinations": "

    The destinations.

    " + } + }, + "DestinationId": { + "base": null, + "refs": { + "DescribeDeliveryStreamInput$ExclusiveStartDestinationId": "

    Specifies the destination ID to start returning the destination information. Currently Amazon Kinesis Firehose supports one destination per delivery stream.

    ", + "DestinationDescription$DestinationId": "

    The ID of the destination.

    ", + "UpdateDestinationInput$DestinationId": "

    The ID of the destination.

    " + } + }, + "EncryptionConfiguration": { + "base": "

    Describes the encryption for a destination in Amazon S3.

    ", + "refs": { + "S3DestinationConfiguration$EncryptionConfiguration": "

    The encryption configuration. If no value is specified, the default is no encryption.

    ", + "S3DestinationDescription$EncryptionConfiguration": "

    The encryption configuration. If no value is specified, the default is no encryption.

    ", + "S3DestinationUpdate$EncryptionConfiguration": "

    The encryption configuration. If no value is specified, the default is no encryption.

    " + } + }, + "ErrorCode": { + "base": null, + "refs": { + "PutRecordBatchResponseEntry$ErrorCode": "

    The error code for an individual record result.

    " + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "ConcurrentModificationException$message": "

    A message that provides information about the error.

    ", + "InvalidArgumentException$message": "

    A message that provides information about the error.

    ", + "LimitExceededException$message": "

    A message that provides information about the error.

    ", + "PutRecordBatchResponseEntry$ErrorMessage": "

    The error message for an individual record result.

    ", + "ResourceInUseException$message": "

    A message that provides information about the error.

    ", + "ResourceNotFoundException$message": "

    A message that provides information about the error.

    ", + "ServiceUnavailableException$message": "

    A message that provides information about the error.

    " + } + }, + "IntervalInSeconds": { + "base": null, + "refs": { + "BufferingHints$IntervalInSeconds": "

    Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.

    " + } + }, + "InvalidArgumentException": { + "base": "

    The specified input parameter has an value that is not valid.

    ", + "refs": { + } + }, + "KMSEncryptionConfig": { + "base": "

    Describes an encryption key for a destination in Amazon S3.

    ", + "refs": { + "EncryptionConfiguration$KMSEncryptionConfig": "

    The encryption key.

    " + } + }, + "LimitExceededException": { + "base": "

    You have already reached the limit for a requested resource.

    ", + "refs": { + } + }, + "ListDeliveryStreamsInput": { + "base": "

    Contains the parameters for ListDeliveryStreams.

    ", + "refs": { + } + }, + "ListDeliveryStreamsInputLimit": { + "base": null, + "refs": { + "ListDeliveryStreamsInput$Limit": "

    The maximum number of delivery streams to list.

    " + } + }, + "ListDeliveryStreamsOutput": { + "base": "

    Contains the output of ListDeliveryStreams.

    ", + "refs": { + } + }, + "NoEncryptionConfig": { + "base": null, + "refs": { + "EncryptionConfiguration$NoEncryptionConfig": "

    Specifically override existing encryption information to ensure no encryption is used.

    " + } + }, + "NonNegativeIntegerObject": { + "base": null, + "refs": { + "PutRecordBatchOutput$FailedPutCount": "

    The number of unsuccessfully written records.

    " + } + }, + "Password": { + "base": null, + "refs": { + "RedshiftDestinationConfiguration$Password": "

    The user password.

    ", + "RedshiftDestinationUpdate$Password": "

    The user password.

    " + } + }, + "Prefix": { + "base": null, + "refs": { + "S3DestinationConfiguration$Prefix": "

    The \"YYYY/MM/DD/HH\" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the guide-fh-dev.

    ", + "S3DestinationDescription$Prefix": "

    The \"YYYY/MM/DD/HH\" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the guide-fh-dev.

    ", + "S3DestinationUpdate$Prefix": "

    The \"YYYY/MM/DD/HH\" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the guide-fh-dev.

    " + } + }, + "PutRecordBatchInput": { + "base": "

    Contains the parameters for PutRecordBatch.

    ", + "refs": { + } + }, + "PutRecordBatchOutput": { + "base": "

    Contains the output of PutRecordBatch.

    ", + "refs": { + } + }, + "PutRecordBatchRequestEntryList": { + "base": null, + "refs": { + "PutRecordBatchInput$Records": "

    One or more records.

    " + } + }, + "PutRecordBatchResponseEntry": { + "base": "

    Contains the result for an individual record from a PutRecordBatch request. If the record is successfully added to your delivery stream, it receives a record ID. If the record fails to be added to your delivery stream, the result includes an error code and an error message.

    ", + "refs": { + "PutRecordBatchResponseEntryList$member": null + } + }, + "PutRecordBatchResponseEntryList": { + "base": null, + "refs": { + "PutRecordBatchOutput$RequestResponses": "

    The results for the individual records. The index of each element matches the same index in which records were sent.

    " + } + }, + "PutRecordInput": { + "base": "

    Contains the parameters for PutRecord.

    ", + "refs": { + } + }, + "PutRecordOutput": { + "base": "

    Contains the output of PutRecord.

    ", + "refs": { + } + }, + "PutResponseRecordId": { + "base": null, + "refs": { + "PutRecordBatchResponseEntry$RecordId": "

    The ID of the record.

    ", + "PutRecordOutput$RecordId": "

    The ID of the record.

    " + } + }, + "Record": { + "base": "

    The unit of data in a delivery stream.

    ", + "refs": { + "PutRecordBatchRequestEntryList$member": null, + "PutRecordInput$Record": "

    The record.

    " + } + }, + "RedshiftDestinationConfiguration": { + "base": "

    Describes the configuration of a destination in Amazon Redshift.

    ", + "refs": { + "CreateDeliveryStreamInput$RedshiftDestinationConfiguration": "

    The destination in Amazon Redshift. This value cannot be specified if Amazon S3 is the desired destination (see restrictions listed above).

    " + } + }, + "RedshiftDestinationDescription": { + "base": "

    Describes a destination in Amazon Redshift.

    ", + "refs": { + "DestinationDescription$RedshiftDestinationDescription": "

    The destination in Amazon Redshift.

    " + } + }, + "RedshiftDestinationUpdate": { + "base": "

    Describes an update for a destination in Amazon Redshift.

    ", + "refs": { + "UpdateDestinationInput$RedshiftDestinationUpdate": null + } + }, + "ResourceInUseException": { + "base": "

    The resource is already in use and not available for this operation.

    ", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "

    The specified resource could not be found.

    ", + "refs": { + } + }, + "RoleARN": { + "base": null, + "refs": { + "RedshiftDestinationConfiguration$RoleARN": "

    The ARN of the AWS credentials.

    ", + "RedshiftDestinationDescription$RoleARN": "

    The ARN of the AWS credentials.

    ", + "RedshiftDestinationUpdate$RoleARN": "

    The ARN of the AWS credentials.

    ", + "S3DestinationConfiguration$RoleARN": "

    The ARN of the AWS credentials.

    ", + "S3DestinationDescription$RoleARN": "

    The ARN of the AWS credentials.

    ", + "S3DestinationUpdate$RoleARN": "

    The ARN of the AWS credentials.

    " + } + }, + "S3DestinationConfiguration": { + "base": "

    Describes the configuration of a destination in Amazon S3.

    ", + "refs": { + "CreateDeliveryStreamInput$S3DestinationConfiguration": "

    The destination in Amazon S3. This value must be specified if RedshiftDestinationConfiguration is specified (see restrictions listed above).

    ", + "RedshiftDestinationConfiguration$S3Configuration": "

    The S3 configuration for the intermediate location from which Amazon Redshift obtains data. Restrictions are described in the topic for CreateDeliveryStream.

    The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats.

    " + } + }, + "S3DestinationDescription": { + "base": "

    Describes a destination in Amazon S3.

    ", + "refs": { + "DestinationDescription$S3DestinationDescription": "

    The Amazon S3 destination.

    ", + "RedshiftDestinationDescription$S3DestinationDescription": "

    The Amazon S3 destination.

    " + } + }, + "S3DestinationUpdate": { + "base": "

    Describes an update for a destination in Amazon S3.

    ", + "refs": { + "RedshiftDestinationUpdate$S3Update": "

    The Amazon S3 destination.

    The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationUpdate.S3Update because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats.

    ", + "UpdateDestinationInput$S3DestinationUpdate": null + } + }, + "ServiceUnavailableException": { + "base": "

    The service is unavailable, back off and retry the operation. If you continue to see the exception, throughput limits for the delivery stream may have been exceeded. For more information about limits and how to request an increase, see Amazon Kinesis Firehose Limits.

    ", + "refs": { + } + }, + "SizeInMBs": { + "base": null, + "refs": { + "BufferingHints$SizeInMBs": "

    Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.

    We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "DeliveryStreamDescription$CreateTimestamp": "

    The date and time that the delivery stream was created.

    ", + "DeliveryStreamDescription$LastUpdateTimestamp": "

    The date and time that the delivery stream was last updated.

    " + } + }, + "UpdateDestinationInput": { + "base": "

    Contains the parameters for UpdateDestination.

    ", + "refs": { + } + }, + "UpdateDestinationOutput": { + "base": "

    Contains the output of UpdateDestination.

    ", + "refs": { + } + }, + "Username": { + "base": null, + "refs": { + "RedshiftDestinationConfiguration$Username": "

    The name of the user.

    ", + "RedshiftDestinationDescription$Username": "

    The name of the user.

    ", + "RedshiftDestinationUpdate$Username": "

    The name of the user.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2144 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2012-06-01", + "checksumFormat":"sha256", + "endpointPrefix":"glacier", + "serviceFullName":"Amazon Glacier", + "signatureVersion":"v4", + "protocol":"rest-json" + }, + "operations":{ + "AbortMultipartUpload":{ + "name":"AbortMultipartUpload", + "http":{ + "method":"DELETE", + "requestUri":"/{accountId}/vaults/{vaultName}/multipart-uploads/{uploadId}", + "responseCode":204 + }, + "input":{"shape":"AbortMultipartUploadInput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "AbortVaultLock":{ + "name":"AbortVaultLock", + "http":{ + "method":"DELETE", + "requestUri":"/{accountId}/vaults/{vaultName}/lock-policy", + "responseCode":204 + }, + "input":{"shape":"AbortVaultLockInput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "AddTagsToVault":{ + "name":"AddTagsToVault", + "http":{ + "method":"POST", + "requestUri":"/{accountId}/vaults/{vaultName}/tags?operation=add", + "responseCode":204 + }, + "input":{"shape":"AddTagsToVaultInput"}, + "errors":[ + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "CompleteMultipartUpload":{ + "name":"CompleteMultipartUpload", + "http":{ + "method":"POST", + "requestUri":"/{accountId}/vaults/{vaultName}/multipart-uploads/{uploadId}", + "responseCode":201 + }, + "input":{"shape":"CompleteMultipartUploadInput"}, + "output":{"shape":"ArchiveCreationOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "CompleteVaultLock":{ + "name":"CompleteVaultLock", + "http":{ + "method":"POST", + "requestUri":"/{accountId}/vaults/{vaultName}/lock-policy/{lockId}", + "responseCode":204 + }, + "input":{"shape":"CompleteVaultLockInput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "CreateVault":{ + "name":"CreateVault", + "http":{ + "method":"PUT", + "requestUri":"/{accountId}/vaults/{vaultName}", + "responseCode":201 + }, + "input":{"shape":"CreateVaultInput"}, + "output":{"shape":"CreateVaultOutput"}, + "errors":[ + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "DeleteArchive":{ + "name":"DeleteArchive", + "http":{ + "method":"DELETE", + "requestUri":"/{accountId}/vaults/{vaultName}/archives/{archiveId}", + "responseCode":204 + }, + "input":{"shape":"DeleteArchiveInput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DeleteVault":{ + "name":"DeleteVault", + "http":{ + "method":"DELETE", + "requestUri":"/{accountId}/vaults/{vaultName}", + "responseCode":204 + }, + "input":{"shape":"DeleteVaultInput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DeleteVaultAccessPolicy":{ + "name":"DeleteVaultAccessPolicy", + "http":{ + "method":"DELETE", + "requestUri":"/{accountId}/vaults/{vaultName}/access-policy", + "responseCode":204 + }, + "input":{"shape":"DeleteVaultAccessPolicyInput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DeleteVaultNotifications":{ + "name":"DeleteVaultNotifications", + "http":{ + "method":"DELETE", + "requestUri":"/{accountId}/vaults/{vaultName}/notification-configuration", + "responseCode":204 + }, + "input":{"shape":"DeleteVaultNotificationsInput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DescribeJob":{ + "name":"DescribeJob", + "http":{ + "method":"GET", + "requestUri":"/{accountId}/vaults/{vaultName}/jobs/{jobId}" + }, + "input":{"shape":"DescribeJobInput"}, + "output":{"shape":"GlacierJobDescription"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DescribeVault":{ + "name":"DescribeVault", + "http":{ + "method":"GET", + "requestUri":"/{accountId}/vaults/{vaultName}" + }, + "input":{"shape":"DescribeVaultInput"}, + "output":{"shape":"DescribeVaultOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "GetDataRetrievalPolicy":{ + "name":"GetDataRetrievalPolicy", + "http":{ + "method":"GET", + "requestUri":"/{accountId}/policies/data-retrieval" + }, + "input":{"shape":"GetDataRetrievalPolicyInput"}, + "output":{"shape":"GetDataRetrievalPolicyOutput"}, + "errors":[ + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "GetJobOutput":{ + "name":"GetJobOutput", + "http":{ + "method":"GET", + "requestUri":"/{accountId}/vaults/{vaultName}/jobs/{jobId}/output" + }, + "input":{"shape":"GetJobOutputInput"}, + "output":{"shape":"GetJobOutputOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "GetVaultAccessPolicy":{ + "name":"GetVaultAccessPolicy", + "http":{ + "method":"GET", + "requestUri":"/{accountId}/vaults/{vaultName}/access-policy" + }, + "input":{"shape":"GetVaultAccessPolicyInput"}, + "output":{"shape":"GetVaultAccessPolicyOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "GetVaultLock":{ + "name":"GetVaultLock", + "http":{ + "method":"GET", + "requestUri":"/{accountId}/vaults/{vaultName}/lock-policy" + }, + "input":{"shape":"GetVaultLockInput"}, + "output":{"shape":"GetVaultLockOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "GetVaultNotifications":{ + "name":"GetVaultNotifications", + "http":{ + "method":"GET", + "requestUri":"/{accountId}/vaults/{vaultName}/notification-configuration" + }, + "input":{"shape":"GetVaultNotificationsInput"}, + "output":{"shape":"GetVaultNotificationsOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "InitiateJob":{ + "name":"InitiateJob", + "http":{ + "method":"POST", + "requestUri":"/{accountId}/vaults/{vaultName}/jobs", + "responseCode":202 + }, + "input":{"shape":"InitiateJobInput"}, + "output":{"shape":"InitiateJobOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"PolicyEnforcedException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "InitiateMultipartUpload":{ + "name":"InitiateMultipartUpload", + "http":{ + "method":"POST", + "requestUri":"/{accountId}/vaults/{vaultName}/multipart-uploads", + "responseCode":201 + }, + "input":{"shape":"InitiateMultipartUploadInput"}, + "output":{"shape":"InitiateMultipartUploadOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "InitiateVaultLock":{ + "name":"InitiateVaultLock", + "http":{ + "method":"POST", + "requestUri":"/{accountId}/vaults/{vaultName}/lock-policy", + "responseCode":201 + }, + "input":{"shape":"InitiateVaultLockInput"}, + "output":{"shape":"InitiateVaultLockOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "ListJobs":{ + "name":"ListJobs", + "http":{ + "method":"GET", + "requestUri":"/{accountId}/vaults/{vaultName}/jobs" + }, + "input":{"shape":"ListJobsInput"}, + "output":{"shape":"ListJobsOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "ListMultipartUploads":{ + "name":"ListMultipartUploads", + "http":{ + "method":"GET", + "requestUri":"/{accountId}/vaults/{vaultName}/multipart-uploads" + }, + "input":{"shape":"ListMultipartUploadsInput"}, + "output":{"shape":"ListMultipartUploadsOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "ListParts":{ + "name":"ListParts", + "http":{ + "method":"GET", + "requestUri":"/{accountId}/vaults/{vaultName}/multipart-uploads/{uploadId}" + }, + "input":{"shape":"ListPartsInput"}, + "output":{"shape":"ListPartsOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "ListTagsForVault":{ + "name":"ListTagsForVault", + "http":{ + "method":"GET", + "requestUri":"/{accountId}/vaults/{vaultName}/tags" + }, + "input":{"shape":"ListTagsForVaultInput"}, + "output":{"shape":"ListTagsForVaultOutput"}, + "errors":[ + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "ListVaults":{ + "name":"ListVaults", + "http":{ + "method":"GET", + "requestUri":"/{accountId}/vaults" + }, + "input":{"shape":"ListVaultsInput"}, + "output":{"shape":"ListVaultsOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "RemoveTagsFromVault":{ + "name":"RemoveTagsFromVault", + "http":{ + "method":"POST", + "requestUri":"/{accountId}/vaults/{vaultName}/tags?operation=remove", + "responseCode":204 + }, + "input":{"shape":"RemoveTagsFromVaultInput"}, + "errors":[ + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "SetDataRetrievalPolicy":{ + "name":"SetDataRetrievalPolicy", + "http":{ + "method":"PUT", + "requestUri":"/{accountId}/policies/data-retrieval", + "responseCode":204 + }, + "input":{"shape":"SetDataRetrievalPolicyInput"}, + "errors":[ + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "SetVaultAccessPolicy":{ + "name":"SetVaultAccessPolicy", + "http":{ + "method":"PUT", + "requestUri":"/{accountId}/vaults/{vaultName}/access-policy", + "responseCode":204 + }, + "input":{"shape":"SetVaultAccessPolicyInput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "SetVaultNotifications":{ + "name":"SetVaultNotifications", + "http":{ + "method":"PUT", + "requestUri":"/{accountId}/vaults/{vaultName}/notification-configuration", + "responseCode":204 + }, + "input":{"shape":"SetVaultNotificationsInput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "UploadArchive":{ + "name":"UploadArchive", + "http":{ + "method":"POST", + "requestUri":"/{accountId}/vaults/{vaultName}/archives", + "responseCode":201 + }, + "input":{"shape":"UploadArchiveInput"}, + "output":{"shape":"ArchiveCreationOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"RequestTimeoutException", + "error":{"httpStatusCode":408}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "UploadMultipartPart":{ + "name":"UploadMultipartPart", + "http":{ + "method":"PUT", + "requestUri":"/{accountId}/vaults/{vaultName}/multipart-uploads/{uploadId}", + "responseCode":204 + }, + "input":{"shape":"UploadMultipartPartInput"}, + "output":{"shape":"UploadMultipartPartOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"MissingParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"RequestTimeoutException", + "error":{"httpStatusCode":408}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + } + }, + "shapes":{ + "AbortMultipartUploadInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "uploadId":{ + "shape":"string", + "location":"uri", + "locationName":"uploadId" + } + }, + "required":[ + "accountId", + "vaultName", + "uploadId" + ] + }, + "AbortVaultLockInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + } + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "ActionCode":{ + "type":"string", + "enum":[ + "ArchiveRetrieval", + "InventoryRetrieval" + ] + }, + "AddTagsToVaultInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "Tags":{"shape":"TagMap"} + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "ArchiveCreationOutput":{ + "type":"structure", + "members":{ + "location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "checksum":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-sha256-tree-hash" + }, + "archiveId":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-archive-id" + } + } + }, + "CompleteMultipartUploadInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "uploadId":{ + "shape":"string", + "location":"uri", + "locationName":"uploadId" + }, + "archiveSize":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-archive-size" + }, + "checksum":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-sha256-tree-hash" + } + }, + "required":[ + "accountId", + "vaultName", + "uploadId" + ] + }, + "CompleteVaultLockInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "lockId":{ + "shape":"string", + "location":"uri", + "locationName":"lockId" + } + }, + "required":[ + "accountId", + "vaultName", + "lockId" + ] + }, + "CreateVaultInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + } + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "CreateVaultOutput":{ + "type":"structure", + "members":{ + "location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + } + } + }, + "DataRetrievalPolicy":{ + "type":"structure", + "members":{ + "Rules":{"shape":"DataRetrievalRulesList"} + } + }, + "DataRetrievalRule":{ + "type":"structure", + "members":{ + "Strategy":{"shape":"string"}, + "BytesPerHour":{"shape":"NullableLong"} + } + }, + "DataRetrievalRulesList":{ + "type":"list", + "member":{"shape":"DataRetrievalRule"} + }, + "DateTime":{"type":"string"}, + "DeleteArchiveInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "archiveId":{ + "shape":"string", + "location":"uri", + "locationName":"archiveId" + } + }, + "required":[ + "accountId", + "vaultName", + "archiveId" + ] + }, + "DeleteVaultAccessPolicyInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + } + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "DeleteVaultInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + } + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "DeleteVaultNotificationsInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + } + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "DescribeJobInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "jobId":{ + "shape":"string", + "location":"uri", + "locationName":"jobId" + } + }, + "required":[ + "accountId", + "vaultName", + "jobId" + ] + }, + "DescribeVaultInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + } + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "DescribeVaultOutput":{ + "type":"structure", + "members":{ + "VaultARN":{"shape":"string"}, + "VaultName":{"shape":"string"}, + "CreationDate":{"shape":"string"}, + "LastInventoryDate":{"shape":"string"}, + "NumberOfArchives":{"shape":"long"}, + "SizeInBytes":{"shape":"long"} + } + }, + "GetDataRetrievalPolicyInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + } + }, + "required":["accountId"] + }, + "GetDataRetrievalPolicyOutput":{ + "type":"structure", + "members":{ + "Policy":{"shape":"DataRetrievalPolicy"} + } + }, + "GetJobOutputInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "jobId":{ + "shape":"string", + "location":"uri", + "locationName":"jobId" + }, + "range":{ + "shape":"string", + "location":"header", + "locationName":"Range" + } + }, + "required":[ + "accountId", + "vaultName", + "jobId" + ] + }, + "GetJobOutputOutput":{ + "type":"structure", + "members":{ + "body":{"shape":"Stream"}, + "checksum":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-sha256-tree-hash" + }, + "status":{ + "shape":"httpstatus", + "location":"statusCode" + }, + "contentRange":{ + "shape":"string", + "location":"header", + "locationName":"Content-Range" + }, + "acceptRanges":{ + "shape":"string", + "location":"header", + "locationName":"Accept-Ranges" + }, + "contentType":{ + "shape":"string", + "location":"header", + "locationName":"Content-Type" + }, + "archiveDescription":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-archive-description" + } + }, + "payload":"body" + }, + "GetVaultAccessPolicyInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + } + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "GetVaultAccessPolicyOutput":{ + "type":"structure", + "members":{ + "policy":{"shape":"VaultAccessPolicy"} + }, + "payload":"policy" + }, + "GetVaultLockInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + } + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "GetVaultLockOutput":{ + "type":"structure", + "members":{ + "Policy":{"shape":"string"}, + "State":{"shape":"string"}, + "ExpirationDate":{"shape":"string"}, + "CreationDate":{"shape":"string"} + } + }, + "GetVaultNotificationsInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + } + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "GetVaultNotificationsOutput":{ + "type":"structure", + "members":{ + "vaultNotificationConfig":{"shape":"VaultNotificationConfig"} + }, + "payload":"vaultNotificationConfig" + }, + "GlacierJobDescription":{ + "type":"structure", + "members":{ + "JobId":{"shape":"string"}, + "JobDescription":{"shape":"string"}, + "Action":{"shape":"ActionCode"}, + "ArchiveId":{"shape":"string"}, + "VaultARN":{"shape":"string"}, + "CreationDate":{"shape":"string"}, + "Completed":{"shape":"boolean"}, + "StatusCode":{"shape":"StatusCode"}, + "StatusMessage":{"shape":"string"}, + "ArchiveSizeInBytes":{"shape":"Size"}, + "InventorySizeInBytes":{"shape":"Size"}, + "SNSTopic":{"shape":"string"}, + "CompletionDate":{"shape":"string"}, + "SHA256TreeHash":{"shape":"string"}, + "ArchiveSHA256TreeHash":{"shape":"string"}, + "RetrievalByteRange":{"shape":"string"}, + "InventoryRetrievalParameters":{"shape":"InventoryRetrievalJobDescription"} + } + }, + "InitiateJobInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "jobParameters":{"shape":"JobParameters"} + }, + "required":[ + "accountId", + "vaultName" + ], + "payload":"jobParameters" + }, + "InitiateJobOutput":{ + "type":"structure", + "members":{ + "location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "jobId":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-job-id" + } + } + }, + "InitiateMultipartUploadInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "archiveDescription":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-archive-description" + }, + "partSize":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-part-size" + } + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "InitiateMultipartUploadOutput":{ + "type":"structure", + "members":{ + "location":{ + "shape":"string", + "location":"header", + "locationName":"Location" + }, + "uploadId":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-multipart-upload-id" + } + } + }, + "InitiateVaultLockInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "policy":{"shape":"VaultLockPolicy"} + }, + "required":[ + "accountId", + "vaultName" + ], + "payload":"policy" + }, + "InitiateVaultLockOutput":{ + "type":"structure", + "members":{ + "lockId":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-lock-id" + } + } + }, + "InvalidParameterValueException":{ + "type":"structure", + "members":{ + "type":{"shape":"string"}, + "code":{"shape":"string"}, + "message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InventoryRetrievalJobDescription":{ + "type":"structure", + "members":{ + "Format":{"shape":"string"}, + "StartDate":{"shape":"DateTime"}, + "EndDate":{"shape":"DateTime"}, + "Limit":{"shape":"string"}, + "Marker":{"shape":"string"} + } + }, + "InventoryRetrievalJobInput":{ + "type":"structure", + "members":{ + "StartDate":{"shape":"string"}, + "EndDate":{"shape":"string"}, + "Limit":{"shape":"string"}, + "Marker":{"shape":"string"} + } + }, + "JobList":{ + "type":"list", + "member":{"shape":"GlacierJobDescription"} + }, + "JobParameters":{ + "type":"structure", + "members":{ + "Format":{"shape":"string"}, + "Type":{"shape":"string"}, + "ArchiveId":{"shape":"string"}, + "Description":{"shape":"string"}, + "SNSTopic":{"shape":"string"}, + "RetrievalByteRange":{"shape":"string"}, + "InventoryRetrievalParameters":{"shape":"InventoryRetrievalJobInput"} + } + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "type":{"shape":"string"}, + "code":{"shape":"string"}, + "message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "ListJobsInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "limit":{ + "shape":"string", + "location":"querystring", + "locationName":"limit" + }, + "marker":{ + "shape":"string", + "location":"querystring", + "locationName":"marker" + }, + "statuscode":{ + "shape":"string", + "location":"querystring", + "locationName":"statuscode" + }, + "completed":{ + "shape":"string", + "location":"querystring", + "locationName":"completed" + } + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "ListJobsOutput":{ + "type":"structure", + "members":{ + "JobList":{"shape":"JobList"}, + "Marker":{"shape":"string"} + } + }, + "ListMultipartUploadsInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "marker":{ + "shape":"string", + "location":"querystring", + "locationName":"marker" + }, + "limit":{ + "shape":"string", + "location":"querystring", + "locationName":"limit" + } + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "ListMultipartUploadsOutput":{ + "type":"structure", + "members":{ + "UploadsList":{"shape":"UploadsList"}, + "Marker":{"shape":"string"} + } + }, + "ListPartsInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "uploadId":{ + "shape":"string", + "location":"uri", + "locationName":"uploadId" + }, + "marker":{ + "shape":"string", + "location":"querystring", + "locationName":"marker" + }, + "limit":{ + "shape":"string", + "location":"querystring", + "locationName":"limit" + } + }, + "required":[ + "accountId", + "vaultName", + "uploadId" + ] + }, + "ListPartsOutput":{ + "type":"structure", + "members":{ + "MultipartUploadId":{"shape":"string"}, + "VaultARN":{"shape":"string"}, + "ArchiveDescription":{"shape":"string"}, + "PartSizeInBytes":{"shape":"long"}, + "CreationDate":{"shape":"string"}, + "Parts":{"shape":"PartList"}, + "Marker":{"shape":"string"} + } + }, + "ListTagsForVaultInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + } + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "ListTagsForVaultOutput":{ + "type":"structure", + "members":{ + "Tags":{"shape":"TagMap"} + } + }, + "ListVaultsInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "marker":{ + "shape":"string", + "location":"querystring", + "locationName":"marker" + }, + "limit":{ + "shape":"string", + "location":"querystring", + "locationName":"limit" + } + }, + "required":["accountId"] + }, + "ListVaultsOutput":{ + "type":"structure", + "members":{ + "VaultList":{"shape":"VaultList"}, + "Marker":{"shape":"string"} + } + }, + "MissingParameterValueException":{ + "type":"structure", + "members":{ + "type":{"shape":"string"}, + "code":{"shape":"string"}, + "message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "NotificationEventList":{ + "type":"list", + "member":{"shape":"string"} + }, + "NullableLong":{"type":"long"}, + "PartList":{ + "type":"list", + "member":{"shape":"PartListElement"} + }, + "PartListElement":{ + "type":"structure", + "members":{ + "RangeInBytes":{"shape":"string"}, + "SHA256TreeHash":{"shape":"string"} + } + }, + "PolicyEnforcedException":{ + "type":"structure", + "members":{ + "type":{"shape":"string"}, + "code":{"shape":"string"}, + "message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "RemoveTagsFromVaultInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "TagKeys":{"shape":"TagKeyList"} + }, + "required":[ + "accountId", + "vaultName" + ] + }, + "RequestTimeoutException":{ + "type":"structure", + "members":{ + "type":{"shape":"string"}, + "code":{"shape":"string"}, + "message":{"shape":"string"} + }, + "error":{"httpStatusCode":408}, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "type":{"shape":"string"}, + "code":{"shape":"string"}, + "message":{"shape":"string"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + "type":{"shape":"string"}, + "code":{"shape":"string"}, + "message":{"shape":"string"} + }, + "error":{"httpStatusCode":500}, + "exception":true + }, + "SetDataRetrievalPolicyInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "Policy":{"shape":"DataRetrievalPolicy"} + }, + "required":["accountId"] + }, + "SetVaultAccessPolicyInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "policy":{"shape":"VaultAccessPolicy"} + }, + "required":[ + "accountId", + "vaultName" + ], + "payload":"policy" + }, + "SetVaultNotificationsInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "vaultNotificationConfig":{"shape":"VaultNotificationConfig"} + }, + "required":[ + "accountId", + "vaultName" + ], + "payload":"vaultNotificationConfig" + }, + "Size":{"type":"long"}, + "StatusCode":{ + "type":"string", + "enum":[ + "InProgress", + "Succeeded", + "Failed" + ] + }, + "Stream":{ + "type":"blob", + "streaming":true + }, + "TagKey":{"type":"string"}, + "TagKeyList":{ + "type":"list", + "member":{"shape":"string"} + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"} + }, + "TagValue":{"type":"string"}, + "UploadArchiveInput":{ + "type":"structure", + "members":{ + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "archiveDescription":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-archive-description" + }, + "checksum":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-sha256-tree-hash" + }, + "body":{"shape":"Stream"} + }, + "required":[ + "vaultName", + "accountId" + ], + "payload":"body" + }, + "UploadListElement":{ + "type":"structure", + "members":{ + "MultipartUploadId":{"shape":"string"}, + "VaultARN":{"shape":"string"}, + "ArchiveDescription":{"shape":"string"}, + "PartSizeInBytes":{"shape":"long"}, + "CreationDate":{"shape":"string"} + } + }, + "UploadMultipartPartInput":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"string", + "location":"uri", + "locationName":"accountId" + }, + "vaultName":{ + "shape":"string", + "location":"uri", + "locationName":"vaultName" + }, + "uploadId":{ + "shape":"string", + "location":"uri", + "locationName":"uploadId" + }, + "checksum":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-sha256-tree-hash" + }, + "range":{ + "shape":"string", + "location":"header", + "locationName":"Content-Range" + }, + "body":{"shape":"Stream"} + }, + "required":[ + "accountId", + "vaultName", + "uploadId" + ], + "payload":"body" + }, + "UploadMultipartPartOutput":{ + "type":"structure", + "members":{ + "checksum":{ + "shape":"string", + "location":"header", + "locationName":"x-amz-sha256-tree-hash" + } + } + }, + "UploadsList":{ + "type":"list", + "member":{"shape":"UploadListElement"} + }, + "VaultAccessPolicy":{ + "type":"structure", + "members":{ + "Policy":{"shape":"string"} + } + }, + "VaultList":{ + "type":"list", + "member":{"shape":"DescribeVaultOutput"} + }, + "VaultLockPolicy":{ + "type":"structure", + "members":{ + "Policy":{"shape":"string"} + } + }, + "VaultNotificationConfig":{ + "type":"structure", + "members":{ + "SNSTopic":{"shape":"string"}, + "Events":{"shape":"NotificationEventList"} + } + }, + "boolean":{"type":"boolean"}, + "httpstatus":{"type":"integer"}, + "long":{"type":"long"}, + "string":{"type":"string"} + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,685 @@ +{ + "version": "2.0", + "operations": { + "AbortMultipartUpload": "

    This operation aborts a multipart upload identified by the upload ID.

    After the Abort Multipart Upload request succeeds, you cannot upload any more parts to the multipart upload or complete the multipart upload. Aborting a completed upload fails. However, aborting an already-aborted upload will succeed, for a short time. For more information about uploading a part and completing a multipart upload, see UploadMultipartPart and CompleteMultipartUpload.

    This operation is idempotent.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and underlying REST API, go to Working with Archives in Amazon Glacier and Abort Multipart Upload in the Amazon Glacier Developer Guide.

    ", + "AbortVaultLock": "

    This operation aborts the vault locking process if the vault lock is not in the Locked state. If the vault lock is in the Locked state when this operation is requested, the operation returns an AccessDeniedException error. Aborting the vault locking process removes the vault lock policy from the specified vault.

    A vault lock is put into the InProgress state by calling InitiateVaultLock. A vault lock is put into the Locked state by calling CompleteVaultLock. You can get the state of a vault lock by calling GetVaultLock. For more information about the vault locking process, see Amazon Glacier Vault Lock. For more information about vault lock policies, see Amazon Glacier Access Control with Vault Lock Policies.

    This operation is idempotent. You can successfully invoke this operation multiple times, if the vault lock is in the InProgress state or if there is no policy associated with the vault.

    ", + "AddTagsToVault": "

    This operation adds the specified tags to a vault. Each tag is composed of a key and a value. Each vault can have up to 10 tags. If your request would cause the tag limit for the vault to be exceeded, the operation throws the LimitExceededException error. If a tag already exists on the vault under a specified key, the existing key value will be overwritten. For more information about tags, see Tagging Amazon Glacier Resources.

    ", + "CompleteMultipartUpload": "

    You call this operation to inform Amazon Glacier that all the archive parts have been uploaded and that Amazon Glacier can now assemble the archive from the uploaded parts. After assembling and saving the archive to the vault, Amazon Glacier returns the URI path of the newly created archive resource. Using the URI path, you can then access the archive. After you upload an archive, you should save the archive ID returned to retrieve the archive at a later point. You can also get the vault inventory to obtain a list of archive IDs in a vault. For more information, see InitiateJob.

    In the request, you must include the computed SHA256 tree hash of the entire archive you have uploaded. For information about computing a SHA256 tree hash, see Computing Checksums. On the server side, Amazon Glacier also constructs the SHA256 tree hash of the assembled archive. If the values match, Amazon Glacier saves the archive to the vault; otherwise, it returns an error, and the operation fails. The ListParts operation returns a list of parts uploaded for a specific multipart upload. It includes checksum information for each uploaded part that can be used to debug a bad checksum issue.

    Additionally, Amazon Glacier also checks for any missing content ranges when assembling the archive, if missing content ranges are found, Amazon Glacier returns an error and the operation fails.

    Complete Multipart Upload is an idempotent operation. After your first successful complete multipart upload, if you call the operation again within a short period, the operation will succeed and return the same archive ID. This is useful in the event you experience a network issue that causes an aborted connection or receive a 500 server error, in which case you can repeat your Complete Multipart Upload request and get the same archive ID without creating duplicate archives. Note, however, that after the multipart upload completes, you cannot call the List Parts operation and the multipart upload will not appear in List Multipart Uploads response, even if idempotent complete is possible.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and underlying REST API, go to Uploading Large Archives in Parts (Multipart Upload) and Complete Multipart Upload in the Amazon Glacier Developer Guide.

    ", + "CompleteVaultLock": "

    This operation completes the vault locking process by transitioning the vault lock from the InProgress state to the Locked state, which causes the vault lock policy to become unchangeable. A vault lock is put into the InProgress state by calling InitiateVaultLock. You can obtain the state of the vault lock by calling GetVaultLock. For more information about the vault locking process, Amazon Glacier Vault Lock.

    This operation is idempotent. This request is always successful if the vault lock is in the Locked state and the provided lock ID matches the lock ID originally used to lock the vault.

    If an invalid lock ID is passed in the request when the vault lock is in the Locked state, the operation returns an AccessDeniedException error. If an invalid lock ID is passed in the request when the vault lock is in the InProgress state, the operation throws an InvalidParameter error.

    ", + "CreateVault": "

    This operation creates a new vault with the specified name. The name of the vault must be unique within a region for an AWS account. You can create up to 1,000 vaults per account. If you need to create more vaults, contact Amazon Glacier.

    You must use the following guidelines when naming a vault.

    • Names can be between 1 and 255 characters long.

    • Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), and '.' (period).

    This operation is idempotent.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and underlying REST API, go to Creating a Vault in Amazon Glacier and Create Vault in the Amazon Glacier Developer Guide.

    ", + "DeleteArchive": "

    This operation deletes an archive from a vault. Subsequent requests to initiate a retrieval of this archive will fail. Archive retrievals that are in progress for this archive ID may or may not succeed according to the following scenarios:

    • If the archive retrieval job is actively preparing the data for download when Amazon Glacier receives the delete archive request, the archival retrieval operation might fail.
    • If the archive retrieval job has successfully prepared the archive for download when Amazon Glacier receives the delete archive request, you will be able to download the output.

    This operation is idempotent. Attempting to delete an already-deleted archive does not result in an error.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and underlying REST API, go to Deleting an Archive in Amazon Glacier and Delete Archive in the Amazon Glacier Developer Guide.

    ", + "DeleteVault": "

    This operation deletes a vault. Amazon Glacier will delete a vault only if there are no archives in the vault as of the last inventory and there have been no writes to the vault since the last inventory. If either of these conditions is not satisfied, the vault deletion fails (that is, the vault is not removed) and Amazon Glacier returns an error. You can use DescribeVault to return the number of archives in a vault, and you can use Initiate a Job (POST jobs) to initiate a new inventory retrieval for a vault. The inventory contains the archive IDs you use to delete archives using Delete Archive (DELETE archive).

    This operation is idempotent.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and underlying REST API, go to Deleting a Vault in Amazon Glacier and Delete Vault in the Amazon Glacier Developer Guide.

    ", + "DeleteVaultAccessPolicy": "

    This operation deletes the access policy associated with the specified vault. The operation is eventually consistent; that is, it might take some time for Amazon Glacier to completely remove the access policy, and you might still see the effect of the policy for a short time after you send the delete request.

    This operation is idempotent. You can invoke delete multiple times, even if there is no policy associated with the vault. For more information about vault access policies, see Amazon Glacier Access Control with Vault Access Policies.

    ", + "DeleteVaultNotifications": "

    This operation deletes the notification configuration set for a vault. The operation is eventually consistent; that is, it might take some time for Amazon Glacier to completely disable the notifications and you might still receive some notifications for a short time after you send the delete request.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and underlying REST API, go to Configuring Vault Notifications in Amazon Glacier and Delete Vault Notification Configuration in the Amazon Glacier Developer Guide.

    ", + "DescribeJob": "

    This operation returns information about a job you previously initiated, including the job initiation date, the user who initiated the job, the job status code/message and the Amazon SNS topic to notify after Amazon Glacier completes the job. For more information about initiating a job, see InitiateJob.

    This operation enables you to check the status of your job. However, it is strongly recommended that you set up an Amazon SNS topic and specify it in your initiate job request so that Amazon Glacier can notify the topic after it completes the job.

    A job ID will not expire for at least 24 hours after Amazon Glacier completes the job.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For information about the underlying REST API, go to Working with Archives in Amazon Glacier in the Amazon Glacier Developer Guide.

    ", + "DescribeVault": "

    This operation returns information about a vault, including the vault's Amazon Resource Name (ARN), the date the vault was created, the number of archives it contains, and the total size of all the archives in the vault. The number of archives and their total size are as of the last inventory generation. This means that if you add or remove an archive from a vault, and then immediately use Describe Vault, the change in contents will not be immediately reflected. If you want to retrieve the latest inventory of the vault, use InitiateJob. Amazon Glacier generates vault inventories approximately daily. For more information, see Downloading a Vault Inventory in Amazon Glacier.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and underlying REST API, go to Retrieving Vault Metadata in Amazon Glacier and Describe Vault in the Amazon Glacier Developer Guide.

    ", + "GetDataRetrievalPolicy": "

    This operation returns the current data retrieval policy for the account and region specified in the GET request. For more information about data retrieval policies, see Amazon Glacier Data Retrieval Policies.

    ", + "GetJobOutput": "

    This operation downloads the output of the job you initiated using InitiateJob. Depending on the job type you specified when you initiated the job, the output will be either the content of an archive or a vault inventory.

    A job ID will not expire for at least 24 hours after Amazon Glacier completes the job. That is, you can download the job output within the 24 hours period after Amazon Glacier completes the job.

    If the job output is large, then you can use the Range request header to retrieve a portion of the output. This allows you to download the entire output in smaller chunks of bytes. For example, suppose you have 1 GB of job output you want to download and you decide to download 128 MB chunks of data at a time, which is a total of eight Get Job Output requests. You use the following process to download the job output:

    1. Download a 128 MB chunk of output by specifying the appropriate byte range using the Range header.

    2. Along with the data, the response includes a SHA256 tree hash of the payload. You compute the checksum of the payload on the client and compare it with the checksum you received in the response to ensure you received all the expected data.

    3. Repeat steps 1 and 2 for all the eight 128 MB chunks of output data, each time specifying the appropriate byte range.

    4. After downloading all the parts of the job output, you have a list of eight checksum values. Compute the tree hash of these values to find the checksum of the entire output. Using the DescribeJob API, obtain job information of the job that provided you the output. The response includes the checksum of the entire archive stored in Amazon Glacier. You compare this value with the checksum you computed to ensure you have downloaded the entire archive content with no errors.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and the underlying REST API, go to Downloading a Vault Inventory, Downloading an Archive, and Get Job Output

    ", + "GetVaultAccessPolicy": "

    This operation retrieves the access-policy subresource set on the vault; for more information on setting this subresource, see Set Vault Access Policy (PUT access-policy). If there is no access policy set on the vault, the operation returns a 404 Not found error. For more information about vault access policies, see Amazon Glacier Access Control with Vault Access Policies.

    ", + "GetVaultLock": "

    This operation retrieves the following attributes from the lock-policy subresource set on the specified vault:

    • The vault lock policy set on the vault.

    • The state of the vault lock, which is either InProgess or Locked.

    • When the lock ID expires. The lock ID is used to complete the vault locking process.

    • When the vault lock was initiated and put into the InProgress state.

    A vault lock is put into the InProgress state by calling InitiateVaultLock. A vault lock is put into the Locked state by calling CompleteVaultLock. You can abort the vault locking process by calling AbortVaultLock. For more information about the vault locking process, Amazon Glacier Vault Lock.

    If there is no vault lock policy set on the vault, the operation returns a 404 Not found error. For more information about vault lock policies, Amazon Glacier Access Control with Vault Lock Policies.

    ", + "GetVaultNotifications": "

    This operation retrieves the notification-configuration subresource of the specified vault.

    For information about setting a notification configuration on a vault, see SetVaultNotifications. If a notification configuration for a vault is not set, the operation returns a 404 Not Found error. For more information about vault notifications, see Configuring Vault Notifications in Amazon Glacier.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and underlying REST API, go to Configuring Vault Notifications in Amazon Glacier and Get Vault Notification Configuration in the Amazon Glacier Developer Guide.

    ", + "InitiateJob": "

    This operation initiates a job of the specified type. In this release, you can initiate a job to retrieve either an archive or a vault inventory (a list of archives in a vault).

    Retrieving data from Amazon Glacier is a two-step process:

    1. Initiate a retrieval job.

      A data retrieval policy can cause your initiate retrieval job request to fail with a PolicyEnforcedException exception. For more information about data retrieval policies, see Amazon Glacier Data Retrieval Policies. For more information about the PolicyEnforcedException exception, see Error Responses.

    2. After the job completes, download the bytes.

    The retrieval request is executed asynchronously. When you initiate a retrieval job, Amazon Glacier creates a job and returns a job ID in the response. When Amazon Glacier completes the job, you can get the job output (archive or inventory data). For information about getting job output, see GetJobOutput operation.

    The job must complete before you can get its output. To determine when a job is complete, you have the following options:

    • Use Amazon SNS Notification You can specify an Amazon Simple Notification Service (Amazon SNS) topic to which Amazon Glacier can post a notification after the job is completed. You can specify an SNS topic per job request. The notification is sent only after Amazon Glacier completes the job. In addition to specifying an SNS topic per job request, you can configure vault notifications for a vault so that job notifications are always sent. For more information, see SetVaultNotifications.

    • Get job details You can make a DescribeJob request to obtain job status information while a job is in progress. However, it is more efficient to use an Amazon SNS notification to determine when a job is complete.

    The information you get via notification is same that you get by calling DescribeJob.

    If for a specific event, you add both the notification configuration on the vault and also specify an SNS topic in your initiate job request, Amazon Glacier sends both notifications. For more information, see SetVaultNotifications.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    About the Vault Inventory

    Amazon Glacier prepares an inventory for each vault periodically, every 24 hours. When you initiate a job for a vault inventory, Amazon Glacier returns the last inventory for the vault. The inventory data you get might be up to a day or two days old. Also, the initiate inventory job might take some time to complete before you can download the vault inventory. So you do not want to retrieve a vault inventory for each vault operation. However, in some scenarios, you might find the vault inventory useful. For example, when you upload an archive, you can provide an archive description but not an archive name. Amazon Glacier provides you a unique archive ID, an opaque string of characters. So, you might maintain your own database that maps archive names to their corresponding Amazon Glacier assigned archive IDs. You might find the vault inventory useful in the event you need to reconcile information in your database with the actual vault inventory.

    Range Inventory Retrieval

    You can limit the number of inventory items retrieved by filtering on the archive creation date or by setting a limit.

    Filtering by Archive Creation Date

    You can retrieve inventory items for archives created between StartDate and EndDate by specifying values for these parameters in the InitiateJob request. Archives created on or after the StartDate and before the EndDate will be returned. If you only provide the StartDate without the EndDate, you will retrieve the inventory for all archives created on or after the StartDate. If you only provide the EndDate without the StartDate, you will get back the inventory for all archives created before the EndDate.

    Limiting Inventory Items per Retrieval

    You can limit the number of inventory items returned by setting the Limit parameter in the InitiateJob request. The inventory job output will contain inventory items up to the specified Limit. If there are more inventory items available, the result is paginated. After a job is complete you can use the DescribeJob operation to get a marker that you use in a subsequent InitiateJob request. The marker will indicate the starting point to retrieve the next set of inventory items. You can page through your entire inventory by repeatedly making InitiateJob requests with the marker from the previous DescribeJob output, until you get a marker from DescribeJob that returns null, indicating that there are no more inventory items available.

    You can use the Limit parameter together with the date range parameters.

    About Ranged Archive Retrieval

    You can initiate an archive retrieval for the whole archive or a range of the archive. In the case of ranged archive retrieval, you specify a byte range to return or the whole archive. The range specified must be megabyte (MB) aligned, that is the range start value must be divisible by 1 MB and range end value plus 1 must be divisible by 1 MB or equal the end of the archive. If the ranged archive retrieval is not megabyte aligned, this operation returns a 400 response. Furthermore, to ensure you get checksum values for data you download using Get Job Output API, the range must be tree hash aligned.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and the underlying REST API, go to Initiate a Job and Downloading a Vault Inventory

    ", + "InitiateMultipartUpload": "

    This operation initiates a multipart upload. Amazon Glacier creates a multipart upload resource and returns its ID in the response. The multipart upload ID is used in subsequent requests to upload parts of an archive (see UploadMultipartPart).

    When you initiate a multipart upload, you specify the part size in number of bytes. The part size must be a megabyte (1024 KB) multiplied by a power of 2-for example, 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 (8 MB), and so on. The minimum allowable part size is 1 MB, and the maximum is 4 GB.

    Every part you upload to this resource (see UploadMultipartPart), except the last one, must have the same size. The last one can be the same size or smaller. For example, suppose you want to upload a 16.2 MB file. If you initiate the multipart upload with a part size of 4 MB, you will upload four parts of 4 MB each and one part of 0.2 MB.

    You don't need to know the size of the archive when you start a multipart upload because Amazon Glacier does not require you to specify the overall archive size.

    After you complete the multipart upload, Amazon Glacier removes the multipart upload resource referenced by the ID. Amazon Glacier also removes the multipart upload resource if you cancel the multipart upload or it may be removed if there is no activity for a period of 24 hours.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and underlying REST API, go to Uploading Large Archives in Parts (Multipart Upload) and Initiate Multipart Upload in the Amazon Glacier Developer Guide.

    ", + "InitiateVaultLock": "

    This operation initiates the vault locking process by doing the following:

    • Installing a vault lock policy on the specified vault.

    • Setting the lock state of vault lock to InProgress.

    • Returning a lock ID, which is used to complete the vault locking process.

    You can set one vault lock policy for each vault and this policy can be up to 20 KB in size. For more information about vault lock policies, see Amazon Glacier Access Control with Vault Lock Policies.

    You must complete the vault locking process within 24 hours after the vault lock enters the InProgress state. After the 24 hour window ends, the lock ID expires, the vault automatically exits the InProgress state, and the vault lock policy is removed from the vault. You call CompleteVaultLock to complete the vault locking process by setting the state of the vault lock to Locked.

    After a vault lock is in the Locked state, you cannot initiate a new vault lock for the vault.

    You can abort the vault locking process by calling AbortVaultLock. You can get the state of the vault lock by calling GetVaultLock. For more information about the vault locking process, Amazon Glacier Vault Lock.

    If this operation is called when the vault lock is in the InProgress state, the operation returns an AccessDeniedException error. When the vault lock is in the InProgress state you must call AbortVaultLock before you can initiate a new vault lock policy.

    ", + "ListJobs": "

    This operation lists jobs for a vault, including jobs that are in-progress and jobs that have recently finished.

    Amazon Glacier retains recently completed jobs for a period before deleting them; however, it eventually removes completed jobs. The output of completed jobs can be retrieved. Retaining completed jobs for a period of time after they have completed enables you to get a job output in the event you miss the job completion notification or your first attempt to download it fails. For example, suppose you start an archive retrieval job to download an archive. After the job completes, you start to download the archive but encounter a network error. In this scenario, you can retry and download the archive while the job exists.

    To retrieve an archive or retrieve a vault inventory from Amazon Glacier, you first initiate a job, and after the job completes, you download the data. For an archive retrieval, the output is the archive data, and for an inventory retrieval, it is the inventory list. The List Job operation returns a list of these jobs sorted by job initiation time.

    This List Jobs operation supports pagination. By default, this operation returns up to 1,000 jobs in the response. You should always check the response for a marker at which to continue the list; if there are no more items the marker is null. To return a list of jobs that begins at a specific job, set the marker request parameter to the value you obtained from a previous List Jobs request. You can also limit the number of jobs returned in the response by specifying the limit parameter in the request.

    Additionally, you can filter the jobs list returned by specifying an optional statuscode (InProgress, Succeeded, or Failed) and completed (true, false) parameter. The statuscode allows you to specify that only jobs that match a specified status are returned. The completed parameter allows you to specify that only jobs in a specific completion state are returned.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For the underlying REST API, go to List Jobs

    ", + "ListMultipartUploads": "

    This operation lists in-progress multipart uploads for the specified vault. An in-progress multipart upload is a multipart upload that has been initiated by an InitiateMultipartUpload request, but has not yet been completed or aborted. The list returned in the List Multipart Upload response has no guaranteed order.

    The List Multipart Uploads operation supports pagination. By default, this operation returns up to 1,000 multipart uploads in the response. You should always check the response for a marker at which to continue the list; if there are no more items the marker is null. To return a list of multipart uploads that begins at a specific upload, set the marker request parameter to the value you obtained from a previous List Multipart Upload request. You can also limit the number of uploads returned in the response by specifying the limit parameter in the request.

    Note the difference between this operation and listing parts (ListParts). The List Multipart Uploads operation lists all multipart uploads for a vault and does not require a multipart upload ID. The List Parts operation requires a multipart upload ID since parts are associated with a single upload.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and the underlying REST API, go to Working with Archives in Amazon Glacier and List Multipart Uploads in the Amazon Glacier Developer Guide.

    ", + "ListParts": "

    This operation lists the parts of an archive that have been uploaded in a specific multipart upload. You can make this request at any time during an in-progress multipart upload before you complete the upload (see CompleteMultipartUpload. List Parts returns an error for completed uploads. The list returned in the List Parts response is sorted by part range.

    The List Parts operation supports pagination. By default, this operation returns up to 1,000 uploaded parts in the response. You should always check the response for a marker at which to continue the list; if there are no more items the marker is null. To return a list of parts that begins at a specific part, set the marker request parameter to the value you obtained from a previous List Parts request. You can also limit the number of parts returned in the response by specifying the limit parameter in the request.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and the underlying REST API, go to Working with Archives in Amazon Glacier and List Parts in the Amazon Glacier Developer Guide.

    ", + "ListTagsForVault": "

    This operation lists all the tags attached to a vault. The operation returns an empty map if there are no tags. For more information about tags, see Tagging Amazon Glacier Resources.

    ", + "ListVaults": "

    This operation lists all vaults owned by the calling user's account. The list returned in the response is ASCII-sorted by vault name.

    By default, this operation returns up to 1,000 items. If there are more vaults to list, the response marker field contains the vault Amazon Resource Name (ARN) at which to continue the list with a new List Vaults request; otherwise, the marker field is null. To return a list of vaults that begins at a specific vault, set the marker request parameter to the vault ARN you obtained from a previous List Vaults request. You can also limit the number of vaults returned in the response by specifying the limit parameter in the request.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and underlying REST API, go to Retrieving Vault Metadata in Amazon Glacier and List Vaults in the Amazon Glacier Developer Guide.

    ", + "RemoveTagsFromVault": "

    This operation removes one or more tags from the set of tags attached to a vault. For more information about tags, see Tagging Amazon Glacier Resources. This operation is idempotent. The operation will be successful, even if there are no tags attached to the vault.

    ", + "SetDataRetrievalPolicy": "

    This operation sets and then enacts a data retrieval policy in the region specified in the PUT request. You can set one policy per region for an AWS account. The policy is enacted within a few minutes of a successful PUT operation.

    The set policy operation does not affect retrieval jobs that were in progress before the policy was enacted. For more information about data retrieval policies, see Amazon Glacier Data Retrieval Policies.

    ", + "SetVaultAccessPolicy": "

    This operation configures an access policy for a vault and will overwrite an existing policy. To configure a vault access policy, send a PUT request to the access-policy subresource of the vault. An access policy is specific to a vault and is also called a vault subresource. You can set one access policy per vault and the policy can be up to 20 KB in size. For more information about vault access policies, see Amazon Glacier Access Control with Vault Access Policies.

    ", + "SetVaultNotifications": "

    This operation configures notifications that will be sent when specific events happen to a vault. By default, you don't get any notifications.

    To configure vault notifications, send a PUT request to the notification-configuration subresource of the vault. The request should include a JSON document that provides an Amazon SNS topic and specific events for which you want Amazon Glacier to send notifications to the topic.

    Amazon SNS topics must grant permission to the vault to be allowed to publish notifications to the topic. You can configure a vault to publish a notification for the following vault events:

    • ArchiveRetrievalCompleted This event occurs when a job that was initiated for an archive retrieval is completed (InitiateJob). The status of the completed job can be \"Succeeded\" or \"Failed\". The notification sent to the SNS topic is the same output as returned from DescribeJob.
    • InventoryRetrievalCompleted This event occurs when a job that was initiated for an inventory retrieval is completed (InitiateJob). The status of the completed job can be \"Succeeded\" or \"Failed\". The notification sent to the SNS topic is the same output as returned from DescribeJob.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and underlying REST API, go to Configuring Vault Notifications in Amazon Glacier and Set Vault Notification Configuration in the Amazon Glacier Developer Guide.

    ", + "UploadArchive": "

    This operation adds an archive to a vault. This is a synchronous operation, and for a successful upload, your data is durably persisted. Amazon Glacier returns the archive ID in the x-amz-archive-id header of the response.

    You must use the archive ID to access your data in Amazon Glacier. After you upload an archive, you should save the archive ID returned so that you can retrieve or delete the archive later. Besides saving the archive ID, you can also index it and give it a friendly name to allow for better searching. You can also use the optional archive description field to specify how the archive is referred to in an external index of archives, such as you might create in Amazon DynamoDB. You can also get the vault inventory to obtain a list of archive IDs in a vault. For more information, see InitiateJob.

    You must provide a SHA256 tree hash of the data you are uploading. For information about computing a SHA256 tree hash, see Computing Checksums.

    You can optionally specify an archive description of up to 1,024 printable ASCII characters. You can get the archive description when you either retrieve the archive or get the vault inventory. For more information, see InitiateJob. Amazon Glacier does not interpret the description in any way. An archive description does not need to be unique. You cannot use the description to retrieve or sort the archive list.

    Archives are immutable. After you upload an archive, you cannot edit the archive or its description.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and underlying REST API, go to Uploading an Archive in Amazon Glacier and Upload Archive in the Amazon Glacier Developer Guide.

    ", + "UploadMultipartPart": "

    This operation uploads a part of an archive. You can upload archive parts in any order. You can also upload them in parallel. You can upload up to 10,000 parts for a multipart upload.

    Amazon Glacier rejects your upload part request if any of the following conditions is true:

    • SHA256 tree hash does not matchTo ensure that part data is not corrupted in transmission, you compute a SHA256 tree hash of the part and include it in your request. Upon receiving the part data, Amazon Glacier also computes a SHA256 tree hash. If these hash values don't match, the operation fails. For information about computing a SHA256 tree hash, see Computing Checksums.

    • Part size does not matchThe size of each part except the last must match the size specified in the corresponding InitiateMultipartUpload request. The size of the last part must be the same size as, or smaller than, the specified size.

      If you upload a part whose size is smaller than the part size you specified in your initiate multipart upload request and that part is not the last part, then the upload part request will succeed. However, the subsequent Complete Multipart Upload request will fail.

    • Range does not alignThe byte range value in the request does not align with the part size specified in the corresponding initiate request. For example, if you specify a part size of 4194304 bytes (4 MB), then 0 to 4194303 bytes (4 MB - 1) and 4194304 (4 MB) to 8388607 (8 MB - 1) are valid part ranges. However, if you set a range value of 2 MB to 6 MB, the range does not align with the part size and the upload will fail.

    This operation is idempotent. If you upload the same part multiple times, the data included in the most recent request overwrites the previously uploaded data.

    An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

    For conceptual information and underlying REST API, go to Uploading Large Archives in Parts (Multipart Upload) and Upload Part in the Amazon Glacier Developer Guide.

    " + }, + "service": "

    Amazon Glacier is a storage solution for \"cold data.\"

    Amazon Glacier is an extremely low-cost storage service that provides secure, durable, and easy-to-use storage for data backup and archival. With Amazon Glacier, customers can store their data cost effectively for months, years, or decades. Amazon Glacier also enables customers to offload the administrative burdens of operating and scaling storage to AWS, so they don't have to worry about capacity planning, hardware provisioning, data replication, hardware failure and recovery, or time-consuming hardware migrations.

    Amazon Glacier is a great storage choice when low storage cost is paramount, your data is rarely retrieved, and retrieval latency of several hours is acceptable. If your application requires fast or frequent access to your data, consider using Amazon S3. For more information, go to Amazon Simple Storage Service (Amazon S3).

    You can store any kind of data in any format. There is no maximum limit on the total amount of data you can store in Amazon Glacier.

    If you are a first-time user of Amazon Glacier, we recommend that you begin by reading the following sections in the Amazon Glacier Developer Guide:

    • What is Amazon Glacier - This section of the Developer Guide describes the underlying data model, the operations it supports, and the AWS SDKs that you can use to interact with the service.

    • Getting Started with Amazon Glacier - The Getting Started section walks you through the process of creating a vault, uploading archives, creating jobs to download archives, retrieving the job output, and deleting archives.

    ", + "shapes": { + "AbortMultipartUploadInput": { + "base": "

    Provides options to abort a multipart upload identified by the upload ID.

    For information about the underlying REST API, go to Abort Multipart Upload. For conceptual information, go to Working with Archives in Amazon Glacier.

    ", + "refs": { + } + }, + "AbortVaultLockInput": { + "base": "

    The input values for AbortVaultLock.

    ", + "refs": { + } + }, + "ActionCode": { + "base": null, + "refs": { + "GlacierJobDescription$Action": "

    The job type. It is either ArchiveRetrieval or InventoryRetrieval.

    " + } + }, + "AddTagsToVaultInput": { + "base": "

    The input values for AddTagsToVault.

    ", + "refs": { + } + }, + "ArchiveCreationOutput": { + "base": "

    Contains the Amazon Glacier response to your request.

    For information about the underlying REST API, go to Upload Archive. For conceptual information, go to Working with Archives in Amazon Glacier.

    ", + "refs": { + } + }, + "CompleteMultipartUploadInput": { + "base": "

    Provides options to complete a multipart upload operation. This informs Amazon Glacier that all the archive parts have been uploaded and Amazon Glacier can now assemble the archive from the uploaded parts. After assembling and saving the archive to the vault, Amazon Glacier returns the URI path of the newly created archive resource.

    ", + "refs": { + } + }, + "CompleteVaultLockInput": { + "base": "

    The input values for CompleteVaultLock.

    ", + "refs": { + } + }, + "CreateVaultInput": { + "base": "

    Provides options to create a vault.

    ", + "refs": { + } + }, + "CreateVaultOutput": { + "base": "

    Contains the Amazon Glacier response to your request.

    ", + "refs": { + } + }, + "DataRetrievalPolicy": { + "base": "

    Data retrieval policy.

    ", + "refs": { + "GetDataRetrievalPolicyOutput$Policy": "

    Contains the returned data retrieval policy in JSON format.

    ", + "SetDataRetrievalPolicyInput$Policy": "

    The data retrieval policy in JSON format.

    " + } + }, + "DataRetrievalRule": { + "base": "

    Data retrieval policy rule.

    ", + "refs": { + "DataRetrievalRulesList$member": null + } + }, + "DataRetrievalRulesList": { + "base": null, + "refs": { + "DataRetrievalPolicy$Rules": "

    The policy rule. Although this is a list type, currently there must be only one rule, which contains a Strategy field and optionally a BytesPerHour field.

    " + } + }, + "DateTime": { + "base": null, + "refs": { + "InventoryRetrievalJobDescription$StartDate": "

    The start of the date range in UTC for vault inventory retrieval that includes archives created on or after this date. A string representation of ISO 8601 date format, for example, 2013-03-20T17:03:43Z.

    ", + "InventoryRetrievalJobDescription$EndDate": "

    The end of the date range in UTC for vault inventory retrieval that includes archives created before this date. A string representation of ISO 8601 date format, for example, 2013-03-20T17:03:43Z.

    " + } + }, + "DeleteArchiveInput": { + "base": "

    Provides options for deleting an archive from an Amazon Glacier vault.

    ", + "refs": { + } + }, + "DeleteVaultAccessPolicyInput": { + "base": "

    DeleteVaultAccessPolicy input.

    ", + "refs": { + } + }, + "DeleteVaultInput": { + "base": "

    Provides options for deleting a vault from Amazon Glacier.

    ", + "refs": { + } + }, + "DeleteVaultNotificationsInput": { + "base": "

    Provides options for deleting a vault notification configuration from an Amazon Glacier vault.

    ", + "refs": { + } + }, + "DescribeJobInput": { + "base": "

    Provides options for retrieving a job description.

    ", + "refs": { + } + }, + "DescribeVaultInput": { + "base": "

    Provides options for retrieving metadata for a specific vault in Amazon Glacier.

    ", + "refs": { + } + }, + "DescribeVaultOutput": { + "base": "

    Contains the Amazon Glacier response to your request.

    ", + "refs": { + "VaultList$member": null + } + }, + "GetDataRetrievalPolicyInput": { + "base": "

    Input for GetDataRetrievalPolicy.

    ", + "refs": { + } + }, + "GetDataRetrievalPolicyOutput": { + "base": "

    Contains the Amazon Glacier response to the GetDataRetrievalPolicy request.

    ", + "refs": { + } + }, + "GetJobOutputInput": { + "base": "

    Provides options for downloading output of an Amazon Glacier job.

    ", + "refs": { + } + }, + "GetJobOutputOutput": { + "base": "

    Contains the Amazon Glacier response to your request.

    ", + "refs": { + } + }, + "GetVaultAccessPolicyInput": { + "base": "

    Input for GetVaultAccessPolicy.

    ", + "refs": { + } + }, + "GetVaultAccessPolicyOutput": { + "base": "

    Output for GetVaultAccessPolicy.

    ", + "refs": { + } + }, + "GetVaultLockInput": { + "base": "

    The input values for GetVaultLock.

    ", + "refs": { + } + }, + "GetVaultLockOutput": { + "base": "

    Contains the Amazon Glacier response to your request.

    ", + "refs": { + } + }, + "GetVaultNotificationsInput": { + "base": "

    Provides options for retrieving the notification configuration set on an Amazon Glacier vault.

    ", + "refs": { + } + }, + "GetVaultNotificationsOutput": { + "base": "

    Contains the Amazon Glacier response to your request.

    ", + "refs": { + } + }, + "GlacierJobDescription": { + "base": "

    Describes an Amazon Glacier job.

    ", + "refs": { + "JobList$member": null + } + }, + "InitiateJobInput": { + "base": "

    Provides options for initiating an Amazon Glacier job.

    ", + "refs": { + } + }, + "InitiateJobOutput": { + "base": "

    Contains the Amazon Glacier response to your request.

    ", + "refs": { + } + }, + "InitiateMultipartUploadInput": { + "base": "

    Provides options for initiating a multipart upload to an Amazon Glacier vault.

    ", + "refs": { + } + }, + "InitiateMultipartUploadOutput": { + "base": "

    The Amazon Glacier response to your request.

    ", + "refs": { + } + }, + "InitiateVaultLockInput": { + "base": "

    The input values for InitiateVaultLock.

    ", + "refs": { + } + }, + "InitiateVaultLockOutput": { + "base": "

    Contains the Amazon Glacier response to your request.

    ", + "refs": { + } + }, + "InvalidParameterValueException": { + "base": "

    Returned if a parameter of the request is incorrectly specified.

    ", + "refs": { + } + }, + "InventoryRetrievalJobDescription": { + "base": "

    Describes the options for a range inventory retrieval job.

    ", + "refs": { + "GlacierJobDescription$InventoryRetrievalParameters": "

    Parameters used for range inventory retrieval.

    " + } + }, + "InventoryRetrievalJobInput": { + "base": "

    Provides options for specifying a range inventory retrieval job.

    ", + "refs": { + "JobParameters$InventoryRetrievalParameters": "

    Input parameters used for range inventory retrieval.

    " + } + }, + "JobList": { + "base": null, + "refs": { + "ListJobsOutput$JobList": "

    A list of job objects. Each job object contains metadata describing the job.

    " + } + }, + "JobParameters": { + "base": "

    Provides options for defining a job.

    ", + "refs": { + "InitiateJobInput$jobParameters": "

    Provides options for specifying job information.

    " + } + }, + "LimitExceededException": { + "base": "

    Returned if the request results in a vault or account limit being exceeded.

    ", + "refs": { + } + }, + "ListJobsInput": { + "base": "

    Provides options for retrieving a job list for an Amazon Glacier vault.

    ", + "refs": { + } + }, + "ListJobsOutput": { + "base": "

    Contains the Amazon Glacier response to your request.

    ", + "refs": { + } + }, + "ListMultipartUploadsInput": { + "base": "

    Provides options for retrieving list of in-progress multipart uploads for an Amazon Glacier vault.

    ", + "refs": { + } + }, + "ListMultipartUploadsOutput": { + "base": "

    Contains the Amazon Glacier response to your request.

    ", + "refs": { + } + }, + "ListPartsInput": { + "base": "

    Provides options for retrieving a list of parts of an archive that have been uploaded in a specific multipart upload.

    ", + "refs": { + } + }, + "ListPartsOutput": { + "base": "

    Contains the Amazon Glacier response to your request.

    ", + "refs": { + } + }, + "ListTagsForVaultInput": { + "base": "

    The input value for ListTagsForVaultInput.

    ", + "refs": { + } + }, + "ListTagsForVaultOutput": { + "base": "

    Contains the Amazon Glacier response to your request.

    ", + "refs": { + } + }, + "ListVaultsInput": { + "base": "

    Provides options to retrieve the vault list owned by the calling user's account. The list provides metadata information for each vault.

    ", + "refs": { + } + }, + "ListVaultsOutput": { + "base": "

    Contains the Amazon Glacier response to your request.

    ", + "refs": { + } + }, + "MissingParameterValueException": { + "base": "

    Returned if a required header or parameter is missing from the request.

    ", + "refs": { + } + }, + "NotificationEventList": { + "base": null, + "refs": { + "VaultNotificationConfig$Events": "

    A list of one or more events for which Amazon Glacier will send a notification to the specified Amazon SNS topic.

    " + } + }, + "NullableLong": { + "base": null, + "refs": { + "DataRetrievalRule$BytesPerHour": "

    The maximum number of bytes that can be retrieved in an hour.

    This field is required only if the value of the Strategy field is BytesPerHour. Your PUT operation will be rejected if the Strategy field is not set to BytesPerHour and you set this field.

    " + } + }, + "PartList": { + "base": null, + "refs": { + "ListPartsOutput$Parts": "

    A list of the part sizes of the multipart upload.

    " + } + }, + "PartListElement": { + "base": "

    A list of the part sizes of the multipart upload.

    ", + "refs": { + "PartList$member": null + } + }, + "PolicyEnforcedException": { + "base": "

    Returned if a retrieval job would exceed the current data policy's retrieval rate limit. For more information about data retrieval policies,

    ", + "refs": { + } + }, + "RemoveTagsFromVaultInput": { + "base": "

    The input value for RemoveTagsFromVaultInput.

    ", + "refs": { + } + }, + "RequestTimeoutException": { + "base": "

    Returned if, when uploading an archive, Amazon Glacier times out while receiving the upload.

    ", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "

    Returned if the specified resource, such as a vault, upload ID, or job ID, does not exist.

    ", + "refs": { + } + }, + "ServiceUnavailableException": { + "base": "

    Returned if the service cannot complete the request.

    ", + "refs": { + } + }, + "SetDataRetrievalPolicyInput": { + "base": "

    SetDataRetrievalPolicy input.

    ", + "refs": { + } + }, + "SetVaultAccessPolicyInput": { + "base": "

    SetVaultAccessPolicy input.

    ", + "refs": { + } + }, + "SetVaultNotificationsInput": { + "base": "

    Provides options to configure notifications that will be sent when specific events happen to a vault.

    ", + "refs": { + } + }, + "Size": { + "base": null, + "refs": { + "GlacierJobDescription$ArchiveSizeInBytes": "

    For an ArchiveRetrieval job, this is the size in bytes of the archive being requested for download. For the InventoryRetrieval job, the value is null.

    ", + "GlacierJobDescription$InventorySizeInBytes": "

    For an InventoryRetrieval job, this is the size in bytes of the inventory requested for download. For the ArchiveRetrieval job, the value is null.

    " + } + }, + "StatusCode": { + "base": null, + "refs": { + "GlacierJobDescription$StatusCode": "

    The status code can be InProgress, Succeeded, or Failed, and indicates the status of the job.

    " + } + }, + "Stream": { + "base": null, + "refs": { + "GetJobOutputOutput$body": "

    The job data, either archive data or inventory data.

    ", + "UploadArchiveInput$body": "

    The data to upload.

    ", + "UploadMultipartPartInput$body": "

    The data to upload.

    " + } + }, + "TagKey": { + "base": null, + "refs": { + "TagMap$key": null + } + }, + "TagKeyList": { + "base": null, + "refs": { + "RemoveTagsFromVaultInput$TagKeys": "

    A list of tag keys. Each corresponding tag is removed from the vault.

    " + } + }, + "TagMap": { + "base": null, + "refs": { + "AddTagsToVaultInput$Tags": "

    The tags to add to the vault. Each tag is composed of a key and a value. The value can be an empty string.

    ", + "ListTagsForVaultOutput$Tags": "

    The tags attached to the vault. Each tag is composed of a key and a value.

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "TagMap$value": null + } + }, + "UploadArchiveInput": { + "base": "

    Provides options to add an archive to a vault.

    ", + "refs": { + } + }, + "UploadListElement": { + "base": "

    A list of in-progress multipart uploads for a vault.

    ", + "refs": { + "UploadsList$member": null + } + }, + "UploadMultipartPartInput": { + "base": "

    Provides options to upload a part of an archive in a multipart upload operation.

    ", + "refs": { + } + }, + "UploadMultipartPartOutput": { + "base": "

    Contains the Amazon Glacier response to your request.

    ", + "refs": { + } + }, + "UploadsList": { + "base": null, + "refs": { + "ListMultipartUploadsOutput$UploadsList": "

    A list of in-progress multipart uploads.

    " + } + }, + "VaultAccessPolicy": { + "base": "

    Contains the vault access policy.

    ", + "refs": { + "GetVaultAccessPolicyOutput$policy": "

    Contains the returned vault access policy as a JSON string.

    ", + "SetVaultAccessPolicyInput$policy": "

    The vault access policy as a JSON string.

    " + } + }, + "VaultList": { + "base": null, + "refs": { + "ListVaultsOutput$VaultList": "

    List of vaults.

    " + } + }, + "VaultLockPolicy": { + "base": "

    Contains the vault lock policy.

    ", + "refs": { + "InitiateVaultLockInput$policy": "

    The vault lock policy as a JSON string, which uses \"\\\" as an escape character.

    " + } + }, + "VaultNotificationConfig": { + "base": "

    Represents a vault's notification configuration.

    ", + "refs": { + "GetVaultNotificationsOutput$vaultNotificationConfig": "

    Returns the notification configuration set on the vault.

    ", + "SetVaultNotificationsInput$vaultNotificationConfig": "

    Provides options for specifying notification configuration.

    " + } + }, + "boolean": { + "base": null, + "refs": { + "GlacierJobDescription$Completed": "

    The job status. When a job is completed, you get the job's output.

    " + } + }, + "httpstatus": { + "base": null, + "refs": { + "GetJobOutputOutput$status": "

    The HTTP response code for a job output request. The value depends on whether a range was specified in the request.

    " + } + }, + "long": { + "base": null, + "refs": { + "DescribeVaultOutput$NumberOfArchives": "

    The number of archives in the vault as of the last inventory date. This field will return null if an inventory has not yet run on the vault, for example, if you just created the vault.

    ", + "DescribeVaultOutput$SizeInBytes": "

    Total size, in bytes, of the archives in the vault as of the last inventory date. This field will return null if an inventory has not yet run on the vault, for example, if you just created the vault.

    ", + "ListPartsOutput$PartSizeInBytes": "

    The part size in bytes.

    ", + "UploadListElement$PartSizeInBytes": "

    The part size, in bytes, specified in the Initiate Multipart Upload request. This is the size of all the parts in the upload except the last part, which may be smaller than this size.

    " + } + }, + "string": { + "base": null, + "refs": { + "AbortMultipartUploadInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "AbortMultipartUploadInput$vaultName": "

    The name of the vault.

    ", + "AbortMultipartUploadInput$uploadId": "

    The upload ID of the multipart upload to delete.

    ", + "AbortVaultLockInput$accountId": "

    The AccountId value is the AWS account ID. This value must match the AWS account ID associated with the credentials used to sign the request. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you specify your account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "AbortVaultLockInput$vaultName": "

    The name of the vault.

    ", + "AddTagsToVaultInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "AddTagsToVaultInput$vaultName": "

    The name of the vault.

    ", + "ArchiveCreationOutput$location": "

    The relative URI path of the newly added archive resource.

    ", + "ArchiveCreationOutput$checksum": "

    The checksum of the archive computed by Amazon Glacier.

    ", + "ArchiveCreationOutput$archiveId": "

    The ID of the archive. This value is also included as part of the location.

    ", + "CompleteMultipartUploadInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "CompleteMultipartUploadInput$vaultName": "

    The name of the vault.

    ", + "CompleteMultipartUploadInput$uploadId": "

    The upload ID of the multipart upload.

    ", + "CompleteMultipartUploadInput$archiveSize": "

    The total size, in bytes, of the entire archive. This value should be the sum of all the sizes of the individual parts that you uploaded.

    ", + "CompleteMultipartUploadInput$checksum": "

    The SHA256 tree hash of the entire archive. It is the tree hash of SHA256 tree hash of the individual parts. If the value you specify in the request does not match the SHA256 tree hash of the final assembled archive as computed by Amazon Glacier, Amazon Glacier returns an error and the request fails.

    ", + "CompleteVaultLockInput$accountId": "

    The AccountId value is the AWS account ID. This value must match the AWS account ID associated with the credentials used to sign the request. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you specify your account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "CompleteVaultLockInput$vaultName": "

    The name of the vault.

    ", + "CompleteVaultLockInput$lockId": "

    The lockId value is the lock ID obtained from a InitiateVaultLock request.

    ", + "CreateVaultInput$accountId": "

    The AccountId value is the AWS account ID. This value must match the AWS account ID associated with the credentials used to sign the request. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you specify your account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "CreateVaultInput$vaultName": "

    The name of the vault.

    ", + "CreateVaultOutput$location": "

    The URI of the vault that was created.

    ", + "DataRetrievalRule$Strategy": "

    The type of data retrieval policy to set.

    Valid values: BytesPerHour|FreeTier|None

    ", + "DeleteArchiveInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "DeleteArchiveInput$vaultName": "

    The name of the vault.

    ", + "DeleteArchiveInput$archiveId": "

    The ID of the archive to delete.

    ", + "DeleteVaultAccessPolicyInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "DeleteVaultAccessPolicyInput$vaultName": "

    The name of the vault.

    ", + "DeleteVaultInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "DeleteVaultInput$vaultName": "

    The name of the vault.

    ", + "DeleteVaultNotificationsInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "DeleteVaultNotificationsInput$vaultName": "

    The name of the vault.

    ", + "DescribeJobInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "DescribeJobInput$vaultName": "

    The name of the vault.

    ", + "DescribeJobInput$jobId": "

    The ID of the job to describe.

    ", + "DescribeVaultInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "DescribeVaultInput$vaultName": "

    The name of the vault.

    ", + "DescribeVaultOutput$VaultARN": "

    The Amazon Resource Name (ARN) of the vault.

    ", + "DescribeVaultOutput$VaultName": "

    The name of the vault.

    ", + "DescribeVaultOutput$CreationDate": "

    The UTC date when the vault was created. A string representation of ISO 8601 date format, for example, \"2012-03-20T17:03:43.221Z\".

    ", + "DescribeVaultOutput$LastInventoryDate": "

    The UTC date when Amazon Glacier completed the last vault inventory. A string representation of ISO 8601 date format, for example, \"2012-03-20T17:03:43.221Z\".

    ", + "GetDataRetrievalPolicyInput$accountId": "

    The AccountId value is the AWS account ID. This value must match the AWS account ID associated with the credentials used to sign the request. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you specify your account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "GetJobOutputInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "GetJobOutputInput$vaultName": "

    The name of the vault.

    ", + "GetJobOutputInput$jobId": "

    The job ID whose data is downloaded.

    ", + "GetJobOutputInput$range": "

    The range of bytes to retrieve from the output. For example, if you want to download the first 1,048,576 bytes, specify \"Range: bytes=0-1048575\". By default, this operation downloads the entire output.

    ", + "GetJobOutputOutput$checksum": "

    The checksum of the data in the response. This header is returned only when retrieving the output for an archive retrieval job. Furthermore, this header appears only under the following conditions:

    • You get the entire range of the archive.
    • You request a range to return of the archive that starts and ends on a multiple of 1 MB. For example, if you have an 3.1 MB archive and you specify a range to return that starts at 1 MB and ends at 2 MB, then the x-amz-sha256-tree-hash is returned as a response header.
    • You request a range of the archive to return that starts on a multiple of 1 MB and goes to the end of the archive. For example, if you have a 3.1 MB archive and you specify a range that starts at 2 MB and ends at 3.1 MB (the end of the archive), then the x-amz-sha256-tree-hash is returned as a response header.

    ", + "GetJobOutputOutput$contentRange": "

    The range of bytes returned by Amazon Glacier. If only partial output is downloaded, the response provides the range of bytes Amazon Glacier returned. For example, bytes 0-1048575/8388608 returns the first 1 MB from 8 MB.

    ", + "GetJobOutputOutput$acceptRanges": "

    Indicates the range units accepted. For more information, go to RFC2616.

    ", + "GetJobOutputOutput$contentType": "

    The Content-Type depends on whether the job output is an archive or a vault inventory. For archive data, the Content-Type is application/octet-stream. For vault inventory, if you requested CSV format when you initiated the job, the Content-Type is text/csv. Otherwise, by default, vault inventory is returned as JSON, and the Content-Type is application/json.

    ", + "GetJobOutputOutput$archiveDescription": "

    The description of an archive.

    ", + "GetVaultAccessPolicyInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "GetVaultAccessPolicyInput$vaultName": "

    The name of the vault.

    ", + "GetVaultLockInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "GetVaultLockInput$vaultName": "

    The name of the vault.

    ", + "GetVaultLockOutput$Policy": "

    The vault lock policy as a JSON string, which uses \"\\\" as an escape character.

    ", + "GetVaultLockOutput$State": "

    The state of the vault lock. InProgress or Locked.

    ", + "GetVaultLockOutput$ExpirationDate": "

    The UTC date and time at which the lock ID expires. This value can be null if the vault lock is in a Locked state.

    ", + "GetVaultLockOutput$CreationDate": "

    The UTC date and time at which the vault lock was put into the InProgress state.

    ", + "GetVaultNotificationsInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "GetVaultNotificationsInput$vaultName": "

    The name of the vault.

    ", + "GlacierJobDescription$JobId": "

    An opaque string that identifies an Amazon Glacier job.

    ", + "GlacierJobDescription$JobDescription": "

    The job description you provided when you initiated the job.

    ", + "GlacierJobDescription$ArchiveId": "

    For an ArchiveRetrieval job, this is the archive ID requested for download. Otherwise, this field is null.

    ", + "GlacierJobDescription$VaultARN": "

    The Amazon Resource Name (ARN) of the vault from which the archive retrieval was requested.

    ", + "GlacierJobDescription$CreationDate": "

    The UTC date when the job was created. A string representation of ISO 8601 date format, for example, \"2012-03-20T17:03:43.221Z\".

    ", + "GlacierJobDescription$StatusMessage": "

    A friendly message that describes the job status.

    ", + "GlacierJobDescription$SNSTopic": "

    An Amazon Simple Notification Service (Amazon SNS) topic that receives notification.

    ", + "GlacierJobDescription$CompletionDate": "

    The UTC time that the archive retrieval request completed. While the job is in progress, the value will be null.

    ", + "GlacierJobDescription$SHA256TreeHash": "

    For an ArchiveRetrieval job, it is the checksum of the archive. Otherwise, the value is null.

    The SHA256 tree hash value for the requested range of an archive. If the Initiate a Job request for an archive specified a tree-hash aligned range, then this field returns a value.

    For the specific case when the whole archive is retrieved, this value is the same as the ArchiveSHA256TreeHash value.

    This field is null in the following situations:

    • Archive retrieval jobs that specify a range that is not tree-hash aligned.

    • Archival jobs that specify a range that is equal to the whole archive and the job status is InProgress.

    • Inventory jobs.

    ", + "GlacierJobDescription$ArchiveSHA256TreeHash": "

    The SHA256 tree hash of the entire archive for an archive retrieval. For inventory retrieval jobs, this field is null.

    ", + "GlacierJobDescription$RetrievalByteRange": "

    The retrieved byte range for archive retrieval jobs in the form \"StartByteValue-EndByteValue\" If no range was specified in the archive retrieval, then the whole archive is retrieved and StartByteValue equals 0 and EndByteValue equals the size of the archive minus 1. For inventory retrieval jobs this field is null.

    ", + "InitiateJobInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "InitiateJobInput$vaultName": "

    The name of the vault.

    ", + "InitiateJobOutput$location": "

    The relative URI path of the job.

    ", + "InitiateJobOutput$jobId": "

    The ID of the job.

    ", + "InitiateMultipartUploadInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "InitiateMultipartUploadInput$vaultName": "

    The name of the vault.

    ", + "InitiateMultipartUploadInput$archiveDescription": "

    The archive description that you are uploading in parts.

    The part size must be a megabyte (1024 KB) multiplied by a power of 2, for example 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 (8 MB), and so on. The minimum allowable part size is 1 MB, and the maximum is 4 GB (4096 MB).

    ", + "InitiateMultipartUploadInput$partSize": "

    The size of each part except the last, in bytes. The last part can be smaller than this part size.

    ", + "InitiateMultipartUploadOutput$location": "

    The relative URI path of the multipart upload ID Amazon Glacier created.

    ", + "InitiateMultipartUploadOutput$uploadId": "

    The ID of the multipart upload. This value is also included as part of the location.

    ", + "InitiateVaultLockInput$accountId": "

    The AccountId value is the AWS account ID. This value must match the AWS account ID associated with the credentials used to sign the request. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you specify your account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "InitiateVaultLockInput$vaultName": "

    The name of the vault.

    ", + "InitiateVaultLockOutput$lockId": "

    The lock ID, which is used to complete the vault locking process.

    ", + "InvalidParameterValueException$type": "

    Client

    ", + "InvalidParameterValueException$code": "

    400 Bad Request

    ", + "InvalidParameterValueException$message": null, + "InventoryRetrievalJobDescription$Format": "

    The output format for the vault inventory list, which is set by the InitiateJob request when initiating a job to retrieve a vault inventory. Valid values are \"CSV\" and \"JSON\".

    ", + "InventoryRetrievalJobDescription$Limit": "

    Specifies the maximum number of inventory items returned per vault inventory retrieval request. This limit is set when initiating the job with the a InitiateJob request.

    ", + "InventoryRetrievalJobDescription$Marker": "

    An opaque string that represents where to continue pagination of the vault inventory retrieval results. You use the marker in a new InitiateJob request to obtain additional inventory items. If there are no more inventory items, this value is null. For more information, see Range Inventory Retrieval.

    ", + "InventoryRetrievalJobInput$StartDate": "

    The start of the date range in UTC for vault inventory retrieval that includes archives created on or after this date. A string representation of ISO 8601 date format, for example, 2013-03-20T17:03:43Z.

    ", + "InventoryRetrievalJobInput$EndDate": "

    The end of the date range in UTC for vault inventory retrieval that includes archives created before this date. A string representation of ISO 8601 date format, for example, 2013-03-20T17:03:43Z.

    ", + "InventoryRetrievalJobInput$Limit": "

    Specifies the maximum number of inventory items returned per vault inventory retrieval request. Valid values are greater than or equal to 1.

    ", + "InventoryRetrievalJobInput$Marker": "

    An opaque string that represents where to continue pagination of the vault inventory retrieval results. You use the marker in a new InitiateJob request to obtain additional inventory items. If there are no more inventory items, this value is null.

    ", + "JobParameters$Format": "

    When initiating a job to retrieve a vault inventory, you can optionally add this parameter to your request to specify the output format. If you are initiating an inventory job and do not specify a Format field, JSON is the default format. Valid values are \"CSV\" and \"JSON\".

    ", + "JobParameters$Type": "

    The job type. You can initiate a job to retrieve an archive or get an inventory of a vault. Valid values are \"archive-retrieval\" and \"inventory-retrieval\".

    ", + "JobParameters$ArchiveId": "

    The ID of the archive that you want to retrieve. This field is required only if Type is set to archive-retrieval. An error occurs if you specify this request parameter for an inventory retrieval job request.

    ", + "JobParameters$Description": "

    The optional description for the job. The description must be less than or equal to 1,024 bytes. The allowable characters are 7-bit ASCII without control codes-specifically, ASCII values 32-126 decimal or 0x20-0x7E hexadecimal.

    ", + "JobParameters$SNSTopic": "

    The Amazon SNS topic ARN to which Amazon Glacier sends a notification when the job is completed and the output is ready for you to download. The specified topic publishes the notification to its subscribers. The SNS topic must exist.

    ", + "JobParameters$RetrievalByteRange": "

    The byte range to retrieve for an archive retrieval. in the form \"StartByteValue-EndByteValue\" If not specified, the whole archive is retrieved. If specified, the byte range must be megabyte (1024*1024) aligned which means that StartByteValue must be divisible by 1 MB and EndByteValue plus 1 must be divisible by 1 MB or be the end of the archive specified as the archive byte size value minus 1. If RetrievalByteRange is not megabyte aligned, this operation returns a 400 response.

    An error occurs if you specify this field for an inventory retrieval job request.

    ", + "LimitExceededException$type": "

    Client

    ", + "LimitExceededException$code": "

    400 Bad Request

    ", + "LimitExceededException$message": null, + "ListJobsInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "ListJobsInput$vaultName": "

    The name of the vault.

    ", + "ListJobsInput$limit": "

    Specifies that the response be limited to the specified number of items or fewer. If not specified, the List Jobs operation returns up to 1,000 jobs.

    ", + "ListJobsInput$marker": "

    An opaque string used for pagination. This value specifies the job at which the listing of jobs should begin. Get the marker value from a previous List Jobs response. You need only include the marker if you are continuing the pagination of results started in a previous List Jobs request.

    ", + "ListJobsInput$statuscode": "

    Specifies the type of job status to return. You can specify the following values: \"InProgress\", \"Succeeded\", or \"Failed\".

    ", + "ListJobsInput$completed": "

    Specifies the state of the jobs to return. You can specify true or false.

    ", + "ListJobsOutput$Marker": "

    An opaque string that represents where to continue pagination of the results. You use this value in a new List Jobs request to obtain more jobs in the list. If there are no more jobs, this value is null.

    ", + "ListMultipartUploadsInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "ListMultipartUploadsInput$vaultName": "

    The name of the vault.

    ", + "ListMultipartUploadsInput$marker": "

    An opaque string used for pagination. This value specifies the upload at which the listing of uploads should begin. Get the marker value from a previous List Uploads response. You need only include the marker if you are continuing the pagination of results started in a previous List Uploads request.

    ", + "ListMultipartUploadsInput$limit": "

    Specifies the maximum number of uploads returned in the response body. If this value is not specified, the List Uploads operation returns up to 1,000 uploads.

    ", + "ListMultipartUploadsOutput$Marker": "

    An opaque string that represents where to continue pagination of the results. You use the marker in a new List Multipart Uploads request to obtain more uploads in the list. If there are no more uploads, this value is null.

    ", + "ListPartsInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "ListPartsInput$vaultName": "

    The name of the vault.

    ", + "ListPartsInput$uploadId": "

    The upload ID of the multipart upload.

    ", + "ListPartsInput$marker": "

    An opaque string used for pagination. This value specifies the part at which the listing of parts should begin. Get the marker value from the response of a previous List Parts response. You need only include the marker if you are continuing the pagination of results started in a previous List Parts request.

    ", + "ListPartsInput$limit": "

    Specifies the maximum number of parts returned in the response body. If this value is not specified, the List Parts operation returns up to 1,000 uploads.

    ", + "ListPartsOutput$MultipartUploadId": "

    The ID of the upload to which the parts are associated.

    ", + "ListPartsOutput$VaultARN": "

    The Amazon Resource Name (ARN) of the vault to which the multipart upload was initiated.

    ", + "ListPartsOutput$ArchiveDescription": "

    The description of the archive that was specified in the Initiate Multipart Upload request.

    ", + "ListPartsOutput$CreationDate": "

    The UTC time at which the multipart upload was initiated.

    ", + "ListPartsOutput$Marker": "

    An opaque string that represents where to continue pagination of the results. You use the marker in a new List Parts request to obtain more jobs in the list. If there are no more parts, this value is null.

    ", + "ListTagsForVaultInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "ListTagsForVaultInput$vaultName": "

    The name of the vault.

    ", + "ListVaultsInput$accountId": "

    The AccountId value is the AWS account ID. This value must match the AWS account ID associated with the credentials used to sign the request. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you specify your account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "ListVaultsInput$marker": "

    A string used for pagination. The marker specifies the vault ARN after which the listing of vaults should begin.

    ", + "ListVaultsInput$limit": "

    The maximum number of items returned in the response. If you don't specify a value, the List Vaults operation returns up to 1,000 items.

    ", + "ListVaultsOutput$Marker": "

    The vault ARN at which to continue pagination of the results. You use the marker in another List Vaults request to obtain more vaults in the list.

    ", + "MissingParameterValueException$type": "

    Client.

    ", + "MissingParameterValueException$code": "

    400 Bad Request

    ", + "MissingParameterValueException$message": null, + "NotificationEventList$member": null, + "PartListElement$RangeInBytes": "

    The byte range of a part, inclusive of the upper value of the range.

    ", + "PartListElement$SHA256TreeHash": "

    The SHA256 tree hash value that Amazon Glacier calculated for the part. This field is never null.

    ", + "PolicyEnforcedException$type": "

    Client

    ", + "PolicyEnforcedException$code": "

    PolicyEnforcedException

    ", + "PolicyEnforcedException$message": "

    InitiateJob request denied by current data retrieval policy.

    ", + "RemoveTagsFromVaultInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "RemoveTagsFromVaultInput$vaultName": "

    The name of the vault.

    ", + "RequestTimeoutException$type": "

    Client

    ", + "RequestTimeoutException$code": "

    408 Request Timeout

    ", + "RequestTimeoutException$message": null, + "ResourceNotFoundException$type": "

    Client

    ", + "ResourceNotFoundException$code": "

    404 Not Found

    ", + "ResourceNotFoundException$message": null, + "ServiceUnavailableException$type": "

    Server

    ", + "ServiceUnavailableException$code": "

    500 Internal Server Error

    ", + "ServiceUnavailableException$message": null, + "SetDataRetrievalPolicyInput$accountId": "

    The AccountId value is the AWS account ID. This value must match the AWS account ID associated with the credentials used to sign the request. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you specify your account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "SetVaultAccessPolicyInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "SetVaultAccessPolicyInput$vaultName": "

    The name of the vault.

    ", + "SetVaultNotificationsInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "SetVaultNotificationsInput$vaultName": "

    The name of the vault.

    ", + "TagKeyList$member": null, + "UploadArchiveInput$vaultName": "

    The name of the vault.

    ", + "UploadArchiveInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "UploadArchiveInput$archiveDescription": "

    The optional description of the archive you are uploading.

    ", + "UploadArchiveInput$checksum": "

    The SHA256 tree hash of the data being uploaded.

    ", + "UploadListElement$MultipartUploadId": "

    The ID of a multipart upload.

    ", + "UploadListElement$VaultARN": "

    The Amazon Resource Name (ARN) of the vault that contains the archive.

    ", + "UploadListElement$ArchiveDescription": "

    The description of the archive that was specified in the Initiate Multipart Upload request.

    ", + "UploadListElement$CreationDate": "

    The UTC time at which the multipart upload was initiated.

    ", + "UploadMultipartPartInput$accountId": "

    The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single apos-apos (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens (apos-apos) in the ID.

    ", + "UploadMultipartPartInput$vaultName": "

    The name of the vault.

    ", + "UploadMultipartPartInput$uploadId": "

    The upload ID of the multipart upload.

    ", + "UploadMultipartPartInput$checksum": "

    The SHA256 tree hash of the data being uploaded.

    ", + "UploadMultipartPartInput$range": "

    Identifies the range of bytes in the assembled archive that will be uploaded in this part. Amazon Glacier uses this information to assemble the archive in the proper sequence. The format of this header follows RFC 2616. An example header is Content-Range:bytes 0-4194303/*.

    ", + "UploadMultipartPartOutput$checksum": "

    The SHA256 tree hash that Amazon Glacier computed for the uploaded part.

    ", + "VaultAccessPolicy$Policy": "

    The vault access policy.

    ", + "VaultLockPolicy$Policy": "

    The vault lock policy.

    ", + "VaultNotificationConfig$SNSTopic": "

    The Amazon Simple Notification Service (Amazon SNS) topic Amazon Resource Name (ARN).

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,28 @@ +{ + "pagination": { + "ListJobs": { + "input_token": "marker", + "output_token": "Marker", + "limit_key": "limit", + "result_key": "JobList" + }, + "ListMultipartUploads": { + "input_token": "marker", + "output_token": "Marker", + "limit_key": "limit", + "result_key": "UploadsList" + }, + "ListParts": { + "input_token": "marker", + "output_token": "Marker", + "limit_key": "limit", + "result_key": "Parts" + }, + "ListVaults": { + "input_token": "marker", + "output_token": "Marker", + "limit_key": "limit", + "result_key": "VaultList" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/waiters-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/waiters-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/waiters-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/glacier/2012-06-01/waiters-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,39 @@ +{ + "version": 2, + "waiters": { + "VaultExists": { + "operation": "DescribeVault", + "delay": 3, + "maxAttempts": 15, + "acceptors": [ + { + "state": "success", + "matcher": "status", + "expected": 200 + }, + { + "state": "retry", + "matcher": "error", + "expected": "ResourceNotFoundException" + } + ] + }, + "VaultNotExists": { + "operation": "DescribeVault", + "delay": 3, + "maxAttempts": 15, + "acceptors": [ + { + "state": "retry", + "matcher": "status", + "expected": 200 + }, + { + "state": "success", + "matcher": "error", + "expected": "ResourceNotFoundException" + } + ] + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,4511 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2010-05-08", + "endpointPrefix":"iam", + "globalEndpoint":"iam.amazonaws.com", + "protocol":"query", + "serviceAbbreviation":"IAM", + "serviceFullName":"AWS Identity and Access Management", + "signatureVersion":"v4", + "xmlNamespace":"https://iam.amazonaws.com/doc/2010-05-08/" + }, + "operations":{ + "AddClientIDToOpenIDConnectProvider":{ + "name":"AddClientIDToOpenIDConnectProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddClientIDToOpenIDConnectProviderRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "AddRoleToInstanceProfile":{ + "name":"AddRoleToInstanceProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddRoleToInstanceProfileRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "AddUserToGroup":{ + "name":"AddUserToGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddUserToGroupRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "AttachGroupPolicy":{ + "name":"AttachGroupPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachGroupPolicyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ] + }, + "AttachRolePolicy":{ + "name":"AttachRolePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachRolePolicyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ] + }, + "AttachUserPolicy":{ + "name":"AttachUserPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachUserPolicyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ChangePassword":{ + "name":"ChangePassword", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ChangePasswordRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidUserTypeException"}, + {"shape":"LimitExceededException"}, + {"shape":"EntityTemporarilyUnmodifiableException"}, + {"shape":"PasswordPolicyViolationException"}, + {"shape":"ServiceFailureException"} + ] + }, + "CreateAccessKey":{ + "name":"CreateAccessKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAccessKeyRequest"}, + "output":{ + "shape":"CreateAccessKeyResponse", + "resultWrapper":"CreateAccessKeyResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "CreateAccountAlias":{ + "name":"CreateAccountAlias", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAccountAliasRequest"}, + "errors":[ + {"shape":"EntityAlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "CreateGroup":{ + "name":"CreateGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateGroupRequest"}, + "output":{ + "shape":"CreateGroupResponse", + "resultWrapper":"CreateGroupResult" + }, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "CreateInstanceProfile":{ + "name":"CreateInstanceProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateInstanceProfileRequest"}, + "output":{ + "shape":"CreateInstanceProfileResponse", + "resultWrapper":"CreateInstanceProfileResult" + }, + "errors":[ + {"shape":"EntityAlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "CreateLoginProfile":{ + "name":"CreateLoginProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLoginProfileRequest"}, + "output":{ + "shape":"CreateLoginProfileResponse", + "resultWrapper":"CreateLoginProfileResult" + }, + "errors":[ + {"shape":"EntityAlreadyExistsException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"PasswordPolicyViolationException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "CreateOpenIDConnectProvider":{ + "name":"CreateOpenIDConnectProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateOpenIDConnectProviderRequest"}, + "output":{ + "shape":"CreateOpenIDConnectProviderResponse", + "resultWrapper":"CreateOpenIDConnectProviderResult" + }, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "CreatePolicy":{ + "name":"CreatePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePolicyRequest"}, + "output":{ + "shape":"CreatePolicyResponse", + "resultWrapper":"CreatePolicyResult" + }, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"MalformedPolicyDocumentException"}, + {"shape":"ServiceFailureException"} + ] + }, + "CreatePolicyVersion":{ + "name":"CreatePolicyVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePolicyVersionRequest"}, + "output":{ + "shape":"CreatePolicyVersionResponse", + "resultWrapper":"CreatePolicyVersionResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"MalformedPolicyDocumentException"}, + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "CreateRole":{ + "name":"CreateRole", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRoleRequest"}, + "output":{ + "shape":"CreateRoleResponse", + "resultWrapper":"CreateRoleResult" + }, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"MalformedPolicyDocumentException"}, + {"shape":"ServiceFailureException"} + ] + }, + "CreateSAMLProvider":{ + "name":"CreateSAMLProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSAMLProviderRequest"}, + "output":{ + "shape":"CreateSAMLProviderResponse", + "resultWrapper":"CreateSAMLProviderResult" + }, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "CreateUser":{ + "name":"CreateUser", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateUserRequest"}, + "output":{ + "shape":"CreateUserResponse", + "resultWrapper":"CreateUserResult" + }, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "CreateVirtualMFADevice":{ + "name":"CreateVirtualMFADevice", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVirtualMFADeviceRequest"}, + "output":{ + "shape":"CreateVirtualMFADeviceResponse", + "resultWrapper":"CreateVirtualMFADeviceResult" + }, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeactivateMFADevice":{ + "name":"DeactivateMFADevice", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeactivateMFADeviceRequest"}, + "errors":[ + {"shape":"EntityTemporarilyUnmodifiableException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteAccessKey":{ + "name":"DeleteAccessKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAccessKeyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteAccountAlias":{ + "name":"DeleteAccountAlias", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAccountAliasRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteAccountPasswordPolicy":{ + "name":"DeleteAccountPasswordPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteGroup":{ + "name":"DeleteGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteGroupRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"DeleteConflictException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteGroupPolicy":{ + "name":"DeleteGroupPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteGroupPolicyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteInstanceProfile":{ + "name":"DeleteInstanceProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteInstanceProfileRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"DeleteConflictException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteLoginProfile":{ + "name":"DeleteLoginProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteLoginProfileRequest"}, + "errors":[ + {"shape":"EntityTemporarilyUnmodifiableException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteOpenIDConnectProvider":{ + "name":"DeleteOpenIDConnectProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteOpenIDConnectProviderRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeletePolicy":{ + "name":"DeletePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePolicyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidInputException"}, + {"shape":"DeleteConflictException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeletePolicyVersion":{ + "name":"DeletePolicyVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePolicyVersionRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidInputException"}, + {"shape":"DeleteConflictException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteRole":{ + "name":"DeleteRole", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRoleRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"DeleteConflictException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteRolePolicy":{ + "name":"DeleteRolePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRolePolicyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteSAMLProvider":{ + "name":"DeleteSAMLProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSAMLProviderRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteSSHPublicKey":{ + "name":"DeleteSSHPublicKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSSHPublicKeyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"} + ] + }, + "DeleteServerCertificate":{ + "name":"DeleteServerCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteServerCertificateRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"DeleteConflictException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteSigningCertificate":{ + "name":"DeleteSigningCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSigningCertificateRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteUser":{ + "name":"DeleteUser", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteUserRequest"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"DeleteConflictException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteUserPolicy":{ + "name":"DeleteUserPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteUserPolicyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteVirtualMFADevice":{ + "name":"DeleteVirtualMFADevice", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVirtualMFADeviceRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"DeleteConflictException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DetachGroupPolicy":{ + "name":"DetachGroupPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachGroupPolicyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DetachRolePolicy":{ + "name":"DetachRolePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachRolePolicyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DetachUserPolicy":{ + "name":"DetachUserPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachUserPolicyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ] + }, + "EnableMFADevice":{ + "name":"EnableMFADevice", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableMFADeviceRequest"}, + "errors":[ + {"shape":"EntityAlreadyExistsException"}, + {"shape":"EntityTemporarilyUnmodifiableException"}, + {"shape":"InvalidAuthenticationCodeException"}, + {"shape":"LimitExceededException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GenerateCredentialReport":{ + "name":"GenerateCredentialReport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"GenerateCredentialReportResponse", + "resultWrapper":"GenerateCredentialReportResult" + }, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetAccessKeyLastUsed":{ + "name":"GetAccessKeyLastUsed", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAccessKeyLastUsedRequest"}, + "output":{ + "shape":"GetAccessKeyLastUsedResponse", + "resultWrapper":"GetAccessKeyLastUsedResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"} + ] + }, + "GetAccountAuthorizationDetails":{ + "name":"GetAccountAuthorizationDetails", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAccountAuthorizationDetailsRequest"}, + "output":{ + "shape":"GetAccountAuthorizationDetailsResponse", + "resultWrapper":"GetAccountAuthorizationDetailsResult" + }, + "errors":[ + {"shape":"ServiceFailureException"} + ] + }, + "GetAccountPasswordPolicy":{ + "name":"GetAccountPasswordPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"GetAccountPasswordPolicyResponse", + "resultWrapper":"GetAccountPasswordPolicyResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetAccountSummary":{ + "name":"GetAccountSummary", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"GetAccountSummaryResponse", + "resultWrapper":"GetAccountSummaryResult" + }, + "errors":[ + {"shape":"ServiceFailureException"} + ] + }, + "GetContextKeysForCustomPolicy":{ + "name":"GetContextKeysForCustomPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetContextKeysForCustomPolicyRequest"}, + "output":{ + "shape":"GetContextKeysForPolicyResponse", + "resultWrapper":"GetContextKeysForCustomPolicyResult" + }, + "errors":[ + {"shape":"InvalidInputException"} + ] + }, + "GetContextKeysForPrincipalPolicy":{ + "name":"GetContextKeysForPrincipalPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetContextKeysForPrincipalPolicyRequest"}, + "output":{ + "shape":"GetContextKeysForPolicyResponse", + "resultWrapper":"GetContextKeysForPrincipalPolicyResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"} + ] + }, + "GetCredentialReport":{ + "name":"GetCredentialReport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{ + "shape":"GetCredentialReportResponse", + "resultWrapper":"GetCredentialReportResult" + }, + "errors":[ + {"shape":"CredentialReportNotPresentException"}, + {"shape":"CredentialReportExpiredException"}, + {"shape":"CredentialReportNotReadyException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetGroup":{ + "name":"GetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetGroupRequest"}, + "output":{ + "shape":"GetGroupResponse", + "resultWrapper":"GetGroupResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetGroupPolicy":{ + "name":"GetGroupPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetGroupPolicyRequest"}, + "output":{ + "shape":"GetGroupPolicyResponse", + "resultWrapper":"GetGroupPolicyResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetInstanceProfile":{ + "name":"GetInstanceProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetInstanceProfileRequest"}, + "output":{ + "shape":"GetInstanceProfileResponse", + "resultWrapper":"GetInstanceProfileResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetLoginProfile":{ + "name":"GetLoginProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetLoginProfileRequest"}, + "output":{ + "shape":"GetLoginProfileResponse", + "resultWrapper":"GetLoginProfileResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetOpenIDConnectProvider":{ + "name":"GetOpenIDConnectProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetOpenIDConnectProviderRequest"}, + "output":{ + "shape":"GetOpenIDConnectProviderResponse", + "resultWrapper":"GetOpenIDConnectProviderResult" + }, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetPolicy":{ + "name":"GetPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPolicyRequest"}, + "output":{ + "shape":"GetPolicyResponse", + "resultWrapper":"GetPolicyResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetPolicyVersion":{ + "name":"GetPolicyVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPolicyVersionRequest"}, + "output":{ + "shape":"GetPolicyVersionResponse", + "resultWrapper":"GetPolicyVersionResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetRole":{ + "name":"GetRole", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRoleRequest"}, + "output":{ + "shape":"GetRoleResponse", + "resultWrapper":"GetRoleResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetRolePolicy":{ + "name":"GetRolePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRolePolicyRequest"}, + "output":{ + "shape":"GetRolePolicyResponse", + "resultWrapper":"GetRolePolicyResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetSAMLProvider":{ + "name":"GetSAMLProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSAMLProviderRequest"}, + "output":{ + "shape":"GetSAMLProviderResponse", + "resultWrapper":"GetSAMLProviderResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetSSHPublicKey":{ + "name":"GetSSHPublicKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSSHPublicKeyRequest"}, + "output":{ + "shape":"GetSSHPublicKeyResponse", + "resultWrapper":"GetSSHPublicKeyResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"UnrecognizedPublicKeyEncodingException"} + ] + }, + "GetServerCertificate":{ + "name":"GetServerCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetServerCertificateRequest"}, + "output":{ + "shape":"GetServerCertificateResponse", + "resultWrapper":"GetServerCertificateResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetUser":{ + "name":"GetUser", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetUserRequest"}, + "output":{ + "shape":"GetUserResponse", + "resultWrapper":"GetUserResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "GetUserPolicy":{ + "name":"GetUserPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetUserPolicyRequest"}, + "output":{ + "shape":"GetUserPolicyResponse", + "resultWrapper":"GetUserPolicyResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListAccessKeys":{ + "name":"ListAccessKeys", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAccessKeysRequest"}, + "output":{ + "shape":"ListAccessKeysResponse", + "resultWrapper":"ListAccessKeysResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListAccountAliases":{ + "name":"ListAccountAliases", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAccountAliasesRequest"}, + "output":{ + "shape":"ListAccountAliasesResponse", + "resultWrapper":"ListAccountAliasesResult" + }, + "errors":[ + {"shape":"ServiceFailureException"} + ] + }, + "ListAttachedGroupPolicies":{ + "name":"ListAttachedGroupPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAttachedGroupPoliciesRequest"}, + "output":{ + "shape":"ListAttachedGroupPoliciesResponse", + "resultWrapper":"ListAttachedGroupPoliciesResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListAttachedRolePolicies":{ + "name":"ListAttachedRolePolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAttachedRolePoliciesRequest"}, + "output":{ + "shape":"ListAttachedRolePoliciesResponse", + "resultWrapper":"ListAttachedRolePoliciesResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListAttachedUserPolicies":{ + "name":"ListAttachedUserPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAttachedUserPoliciesRequest"}, + "output":{ + "shape":"ListAttachedUserPoliciesResponse", + "resultWrapper":"ListAttachedUserPoliciesResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListEntitiesForPolicy":{ + "name":"ListEntitiesForPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListEntitiesForPolicyRequest"}, + "output":{ + "shape":"ListEntitiesForPolicyResponse", + "resultWrapper":"ListEntitiesForPolicyResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListGroupPolicies":{ + "name":"ListGroupPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListGroupPoliciesRequest"}, + "output":{ + "shape":"ListGroupPoliciesResponse", + "resultWrapper":"ListGroupPoliciesResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListGroups":{ + "name":"ListGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListGroupsRequest"}, + "output":{ + "shape":"ListGroupsResponse", + "resultWrapper":"ListGroupsResult" + }, + "errors":[ + {"shape":"ServiceFailureException"} + ] + }, + "ListGroupsForUser":{ + "name":"ListGroupsForUser", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListGroupsForUserRequest"}, + "output":{ + "shape":"ListGroupsForUserResponse", + "resultWrapper":"ListGroupsForUserResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListInstanceProfiles":{ + "name":"ListInstanceProfiles", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListInstanceProfilesRequest"}, + "output":{ + "shape":"ListInstanceProfilesResponse", + "resultWrapper":"ListInstanceProfilesResult" + }, + "errors":[ + {"shape":"ServiceFailureException"} + ] + }, + "ListInstanceProfilesForRole":{ + "name":"ListInstanceProfilesForRole", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListInstanceProfilesForRoleRequest"}, + "output":{ + "shape":"ListInstanceProfilesForRoleResponse", + "resultWrapper":"ListInstanceProfilesForRoleResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListMFADevices":{ + "name":"ListMFADevices", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListMFADevicesRequest"}, + "output":{ + "shape":"ListMFADevicesResponse", + "resultWrapper":"ListMFADevicesResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListOpenIDConnectProviders":{ + "name":"ListOpenIDConnectProviders", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListOpenIDConnectProvidersRequest"}, + "output":{ + "shape":"ListOpenIDConnectProvidersResponse", + "resultWrapper":"ListOpenIDConnectProvidersResult" + }, + "errors":[ + {"shape":"ServiceFailureException"} + ] + }, + "ListPolicies":{ + "name":"ListPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPoliciesRequest"}, + "output":{ + "shape":"ListPoliciesResponse", + "resultWrapper":"ListPoliciesResult" + }, + "errors":[ + {"shape":"ServiceFailureException"} + ] + }, + "ListPolicyVersions":{ + "name":"ListPolicyVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPolicyVersionsRequest"}, + "output":{ + "shape":"ListPolicyVersionsResponse", + "resultWrapper":"ListPolicyVersionsResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListRolePolicies":{ + "name":"ListRolePolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRolePoliciesRequest"}, + "output":{ + "shape":"ListRolePoliciesResponse", + "resultWrapper":"ListRolePoliciesResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListRoles":{ + "name":"ListRoles", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRolesRequest"}, + "output":{ + "shape":"ListRolesResponse", + "resultWrapper":"ListRolesResult" + }, + "errors":[ + {"shape":"ServiceFailureException"} + ] + }, + "ListSAMLProviders":{ + "name":"ListSAMLProviders", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSAMLProvidersRequest"}, + "output":{ + "shape":"ListSAMLProvidersResponse", + "resultWrapper":"ListSAMLProvidersResult" + }, + "errors":[ + {"shape":"ServiceFailureException"} + ] + }, + "ListSSHPublicKeys":{ + "name":"ListSSHPublicKeys", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSSHPublicKeysRequest"}, + "output":{ + "shape":"ListSSHPublicKeysResponse", + "resultWrapper":"ListSSHPublicKeysResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"} + ] + }, + "ListServerCertificates":{ + "name":"ListServerCertificates", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListServerCertificatesRequest"}, + "output":{ + "shape":"ListServerCertificatesResponse", + "resultWrapper":"ListServerCertificatesResult" + }, + "errors":[ + {"shape":"ServiceFailureException"} + ] + }, + "ListSigningCertificates":{ + "name":"ListSigningCertificates", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSigningCertificatesRequest"}, + "output":{ + "shape":"ListSigningCertificatesResponse", + "resultWrapper":"ListSigningCertificatesResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListUserPolicies":{ + "name":"ListUserPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListUserPoliciesRequest"}, + "output":{ + "shape":"ListUserPoliciesResponse", + "resultWrapper":"ListUserPoliciesResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListUsers":{ + "name":"ListUsers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListUsersRequest"}, + "output":{ + "shape":"ListUsersResponse", + "resultWrapper":"ListUsersResult" + }, + "errors":[ + {"shape":"ServiceFailureException"} + ] + }, + "ListVirtualMFADevices":{ + "name":"ListVirtualMFADevices", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListVirtualMFADevicesRequest"}, + "output":{ + "shape":"ListVirtualMFADevicesResponse", + "resultWrapper":"ListVirtualMFADevicesResult" + } + }, + "PutGroupPolicy":{ + "name":"PutGroupPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutGroupPolicyRequest"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"MalformedPolicyDocumentException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "PutRolePolicy":{ + "name":"PutRolePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutRolePolicyRequest"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"MalformedPolicyDocumentException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "PutUserPolicy":{ + "name":"PutUserPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutUserPolicyRequest"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"MalformedPolicyDocumentException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "RemoveClientIDFromOpenIDConnectProvider":{ + "name":"RemoveClientIDFromOpenIDConnectProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveClientIDFromOpenIDConnectProviderRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "RemoveRoleFromInstanceProfile":{ + "name":"RemoveRoleFromInstanceProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveRoleFromInstanceProfileRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "RemoveUserFromGroup":{ + "name":"RemoveUserFromGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveUserFromGroupRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ResyncMFADevice":{ + "name":"ResyncMFADevice", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResyncMFADeviceRequest"}, + "errors":[ + {"shape":"InvalidAuthenticationCodeException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "SetDefaultPolicyVersion":{ + "name":"SetDefaultPolicyVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetDefaultPolicyVersionRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "SimulateCustomPolicy":{ + "name":"SimulateCustomPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SimulateCustomPolicyRequest"}, + "output":{ + "shape":"SimulatePolicyResponse", + "resultWrapper":"SimulateCustomPolicyResult" + }, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"PolicyEvaluationException"} + ] + }, + "SimulatePrincipalPolicy":{ + "name":"SimulatePrincipalPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SimulatePrincipalPolicyRequest"}, + "output":{ + "shape":"SimulatePolicyResponse", + "resultWrapper":"SimulatePrincipalPolicyResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"PolicyEvaluationException"} + ] + }, + "UpdateAccessKey":{ + "name":"UpdateAccessKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAccessKeyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "UpdateAccountPasswordPolicy":{ + "name":"UpdateAccountPasswordPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAccountPasswordPolicyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"MalformedPolicyDocumentException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "UpdateAssumeRolePolicy":{ + "name":"UpdateAssumeRolePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAssumeRolePolicyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"MalformedPolicyDocumentException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "UpdateGroup":{ + "name":"UpdateGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateGroupRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "UpdateLoginProfile":{ + "name":"UpdateLoginProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateLoginProfileRequest"}, + "errors":[ + {"shape":"EntityTemporarilyUnmodifiableException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"PasswordPolicyViolationException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "UpdateOpenIDConnectProviderThumbprint":{ + "name":"UpdateOpenIDConnectProviderThumbprint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateOpenIDConnectProviderThumbprintRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + }, + "UpdateSAMLProvider":{ + "name":"UpdateSAMLProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateSAMLProviderRequest"}, + "output":{ + "shape":"UpdateSAMLProviderResponse", + "resultWrapper":"UpdateSAMLProviderResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "UpdateSSHPublicKey":{ + "name":"UpdateSSHPublicKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateSSHPublicKeyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"} + ] + }, + "UpdateServerCertificate":{ + "name":"UpdateServerCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateServerCertificateRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "UpdateSigningCertificate":{ + "name":"UpdateSigningCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateSigningCertificateRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceFailureException"} + ] + }, + "UpdateUser":{ + "name":"UpdateUser", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateUserRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"EntityTemporarilyUnmodifiableException"}, + {"shape":"ServiceFailureException"} + ] + }, + "UploadSSHPublicKey":{ + "name":"UploadSSHPublicKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UploadSSHPublicKeyRequest"}, + "output":{ + "shape":"UploadSSHPublicKeyResponse", + "resultWrapper":"UploadSSHPublicKeyResult" + }, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidPublicKeyException"}, + {"shape":"DuplicateSSHPublicKeyException"}, + {"shape":"UnrecognizedPublicKeyEncodingException"} + ] + }, + "UploadServerCertificate":{ + "name":"UploadServerCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UploadServerCertificateRequest"}, + "output":{ + "shape":"UploadServerCertificateResponse", + "resultWrapper":"UploadServerCertificateResult" + }, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"MalformedCertificateException"}, + {"shape":"KeyPairMismatchException"}, + {"shape":"ServiceFailureException"} + ] + }, + "UploadSigningCertificate":{ + "name":"UploadSigningCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UploadSigningCertificateRequest"}, + "output":{ + "shape":"UploadSigningCertificateResponse", + "resultWrapper":"UploadSigningCertificateResult" + }, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"EntityAlreadyExistsException"}, + {"shape":"MalformedCertificateException"}, + {"shape":"InvalidCertificateException"}, + {"shape":"DuplicateCertificateException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ] + } + }, + "shapes":{ + "AccessKey":{ + "type":"structure", + "required":[ + "UserName", + "AccessKeyId", + "Status", + "SecretAccessKey" + ], + "members":{ + "UserName":{"shape":"userNameType"}, + "AccessKeyId":{"shape":"accessKeyIdType"}, + "Status":{"shape":"statusType"}, + "SecretAccessKey":{"shape":"accessKeySecretType"}, + "CreateDate":{"shape":"dateType"} + } + }, + "AccessKeyLastUsed":{ + "type":"structure", + "required":[ + "LastUsedDate", + "ServiceName", + "Region" + ], + "members":{ + "LastUsedDate":{"shape":"dateType"}, + "ServiceName":{"shape":"stringType"}, + "Region":{"shape":"stringType"} + } + }, + "AccessKeyMetadata":{ + "type":"structure", + "members":{ + "UserName":{"shape":"userNameType"}, + "AccessKeyId":{"shape":"accessKeyIdType"}, + "Status":{"shape":"statusType"}, + "CreateDate":{"shape":"dateType"} + } + }, + "ActionNameListType":{ + "type":"list", + "member":{"shape":"ActionNameType"} + }, + "ActionNameType":{ + "type":"string", + "max":128, + "min":3 + }, + "AddClientIDToOpenIDConnectProviderRequest":{ + "type":"structure", + "required":[ + "OpenIDConnectProviderArn", + "ClientID" + ], + "members":{ + "OpenIDConnectProviderArn":{"shape":"arnType"}, + "ClientID":{"shape":"clientIDType"} + } + }, + "AddRoleToInstanceProfileRequest":{ + "type":"structure", + "required":[ + "InstanceProfileName", + "RoleName" + ], + "members":{ + "InstanceProfileName":{"shape":"instanceProfileNameType"}, + "RoleName":{"shape":"roleNameType"} + } + }, + "AddUserToGroupRequest":{ + "type":"structure", + "required":[ + "GroupName", + "UserName" + ], + "members":{ + "GroupName":{"shape":"groupNameType"}, + "UserName":{"shape":"existingUserNameType"} + } + }, + "AttachGroupPolicyRequest":{ + "type":"structure", + "required":[ + "GroupName", + "PolicyArn" + ], + "members":{ + "GroupName":{"shape":"groupNameType"}, + "PolicyArn":{"shape":"arnType"} + } + }, + "AttachRolePolicyRequest":{ + "type":"structure", + "required":[ + "RoleName", + "PolicyArn" + ], + "members":{ + "RoleName":{"shape":"roleNameType"}, + "PolicyArn":{"shape":"arnType"} + } + }, + "AttachUserPolicyRequest":{ + "type":"structure", + "required":[ + "UserName", + "PolicyArn" + ], + "members":{ + "UserName":{"shape":"userNameType"}, + "PolicyArn":{"shape":"arnType"} + } + }, + "AttachedPolicy":{ + "type":"structure", + "members":{ + "PolicyName":{"shape":"policyNameType"}, + "PolicyArn":{"shape":"arnType"} + } + }, + "BootstrapDatum":{ + "type":"blob", + "sensitive":true + }, + "ChangePasswordRequest":{ + "type":"structure", + "required":[ + "OldPassword", + "NewPassword" + ], + "members":{ + "OldPassword":{"shape":"passwordType"}, + "NewPassword":{"shape":"passwordType"} + } + }, + "ColumnNumber":{"type":"integer"}, + "ContextEntry":{ + "type":"structure", + "members":{ + "ContextKeyName":{"shape":"ContextKeyNameType"}, + "ContextKeyValues":{"shape":"ContextKeyValueListType"}, + "ContextKeyType":{"shape":"ContextKeyTypeEnum"} + } + }, + "ContextEntryListType":{ + "type":"list", + "member":{"shape":"ContextEntry"} + }, + "ContextKeyNameType":{ + "type":"string", + "max":256, + "min":5 + }, + "ContextKeyNamesResultListType":{ + "type":"list", + "member":{"shape":"ContextKeyNameType"} + }, + "ContextKeyTypeEnum":{ + "type":"string", + "enum":[ + "string", + "stringList", + "numeric", + "numericList", + "boolean", + "booleanList", + "ip", + "ipList", + "binary", + "binaryList", + "date", + "dateList" + ] + }, + "ContextKeyValueListType":{ + "type":"list", + "member":{"shape":"ContextKeyValueType"} + }, + "ContextKeyValueType":{"type":"string"}, + "CreateAccessKeyRequest":{ + "type":"structure", + "members":{ + "UserName":{"shape":"existingUserNameType"} + } + }, + "CreateAccessKeyResponse":{ + "type":"structure", + "required":["AccessKey"], + "members":{ + "AccessKey":{"shape":"AccessKey"} + } + }, + "CreateAccountAliasRequest":{ + "type":"structure", + "required":["AccountAlias"], + "members":{ + "AccountAlias":{"shape":"accountAliasType"} + } + }, + "CreateGroupRequest":{ + "type":"structure", + "required":["GroupName"], + "members":{ + "Path":{"shape":"pathType"}, + "GroupName":{"shape":"groupNameType"} + } + }, + "CreateGroupResponse":{ + "type":"structure", + "required":["Group"], + "members":{ + "Group":{"shape":"Group"} + } + }, + "CreateInstanceProfileRequest":{ + "type":"structure", + "required":["InstanceProfileName"], + "members":{ + "InstanceProfileName":{"shape":"instanceProfileNameType"}, + "Path":{"shape":"pathType"} + } + }, + "CreateInstanceProfileResponse":{ + "type":"structure", + "required":["InstanceProfile"], + "members":{ + "InstanceProfile":{"shape":"InstanceProfile"} + } + }, + "CreateLoginProfileRequest":{ + "type":"structure", + "required":[ + "UserName", + "Password" + ], + "members":{ + "UserName":{"shape":"userNameType"}, + "Password":{"shape":"passwordType"}, + "PasswordResetRequired":{"shape":"booleanType"} + } + }, + "CreateLoginProfileResponse":{ + "type":"structure", + "required":["LoginProfile"], + "members":{ + "LoginProfile":{"shape":"LoginProfile"} + } + }, + "CreateOpenIDConnectProviderRequest":{ + "type":"structure", + "required":[ + "Url", + "ThumbprintList" + ], + "members":{ + "Url":{"shape":"OpenIDConnectProviderUrlType"}, + "ClientIDList":{"shape":"clientIDListType"}, + "ThumbprintList":{"shape":"thumbprintListType"} + } + }, + "CreateOpenIDConnectProviderResponse":{ + "type":"structure", + "members":{ + "OpenIDConnectProviderArn":{"shape":"arnType"} + } + }, + "CreatePolicyRequest":{ + "type":"structure", + "required":[ + "PolicyName", + "PolicyDocument" + ], + "members":{ + "PolicyName":{"shape":"policyNameType"}, + "Path":{"shape":"policyPathType"}, + "PolicyDocument":{"shape":"policyDocumentType"}, + "Description":{"shape":"policyDescriptionType"} + } + }, + "CreatePolicyResponse":{ + "type":"structure", + "members":{ + "Policy":{"shape":"Policy"} + } + }, + "CreatePolicyVersionRequest":{ + "type":"structure", + "required":[ + "PolicyArn", + "PolicyDocument" + ], + "members":{ + "PolicyArn":{"shape":"arnType"}, + "PolicyDocument":{"shape":"policyDocumentType"}, + "SetAsDefault":{"shape":"booleanType"} + } + }, + "CreatePolicyVersionResponse":{ + "type":"structure", + "members":{ + "PolicyVersion":{"shape":"PolicyVersion"} + } + }, + "CreateRoleRequest":{ + "type":"structure", + "required":[ + "RoleName", + "AssumeRolePolicyDocument" + ], + "members":{ + "Path":{"shape":"pathType"}, + "RoleName":{"shape":"roleNameType"}, + "AssumeRolePolicyDocument":{"shape":"policyDocumentType"} + } + }, + "CreateRoleResponse":{ + "type":"structure", + "required":["Role"], + "members":{ + "Role":{"shape":"Role"} + } + }, + "CreateSAMLProviderRequest":{ + "type":"structure", + "required":[ + "SAMLMetadataDocument", + "Name" + ], + "members":{ + "SAMLMetadataDocument":{"shape":"SAMLMetadataDocumentType"}, + "Name":{"shape":"SAMLProviderNameType"} + } + }, + "CreateSAMLProviderResponse":{ + "type":"structure", + "members":{ + "SAMLProviderArn":{"shape":"arnType"} + } + }, + "CreateUserRequest":{ + "type":"structure", + "required":["UserName"], + "members":{ + "Path":{"shape":"pathType"}, + "UserName":{"shape":"userNameType"} + } + }, + "CreateUserResponse":{ + "type":"structure", + "members":{ + "User":{"shape":"User"} + } + }, + "CreateVirtualMFADeviceRequest":{ + "type":"structure", + "required":["VirtualMFADeviceName"], + "members":{ + "Path":{"shape":"pathType"}, + "VirtualMFADeviceName":{"shape":"virtualMFADeviceName"} + } + }, + "CreateVirtualMFADeviceResponse":{ + "type":"structure", + "required":["VirtualMFADevice"], + "members":{ + "VirtualMFADevice":{"shape":"VirtualMFADevice"} + } + }, + "CredentialReportExpiredException":{ + "type":"structure", + "members":{ + "message":{"shape":"credentialReportExpiredExceptionMessage"} + }, + "error":{ + "code":"ReportExpired", + "httpStatusCode":410, + "senderFault":true + }, + "exception":true + }, + "CredentialReportNotPresentException":{ + "type":"structure", + "members":{ + "message":{"shape":"credentialReportNotPresentExceptionMessage"} + }, + "error":{ + "code":"ReportNotPresent", + "httpStatusCode":410, + "senderFault":true + }, + "exception":true + }, + "CredentialReportNotReadyException":{ + "type":"structure", + "members":{ + "message":{"shape":"credentialReportNotReadyExceptionMessage"} + }, + "error":{ + "code":"ReportInProgress", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DeactivateMFADeviceRequest":{ + "type":"structure", + "required":[ + "UserName", + "SerialNumber" + ], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "SerialNumber":{"shape":"serialNumberType"} + } + }, + "DeleteAccessKeyRequest":{ + "type":"structure", + "required":["AccessKeyId"], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "AccessKeyId":{"shape":"accessKeyIdType"} + } + }, + "DeleteAccountAliasRequest":{ + "type":"structure", + "required":["AccountAlias"], + "members":{ + "AccountAlias":{"shape":"accountAliasType"} + } + }, + "DeleteConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"deleteConflictMessage"} + }, + "error":{ + "code":"DeleteConflict", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "DeleteGroupPolicyRequest":{ + "type":"structure", + "required":[ + "GroupName", + "PolicyName" + ], + "members":{ + "GroupName":{"shape":"groupNameType"}, + "PolicyName":{"shape":"policyNameType"} + } + }, + "DeleteGroupRequest":{ + "type":"structure", + "required":["GroupName"], + "members":{ + "GroupName":{"shape":"groupNameType"} + } + }, + "DeleteInstanceProfileRequest":{ + "type":"structure", + "required":["InstanceProfileName"], + "members":{ + "InstanceProfileName":{"shape":"instanceProfileNameType"} + } + }, + "DeleteLoginProfileRequest":{ + "type":"structure", + "required":["UserName"], + "members":{ + "UserName":{"shape":"userNameType"} + } + }, + "DeleteOpenIDConnectProviderRequest":{ + "type":"structure", + "required":["OpenIDConnectProviderArn"], + "members":{ + "OpenIDConnectProviderArn":{"shape":"arnType"} + } + }, + "DeletePolicyRequest":{ + "type":"structure", + "required":["PolicyArn"], + "members":{ + "PolicyArn":{"shape":"arnType"} + } + }, + "DeletePolicyVersionRequest":{ + "type":"structure", + "required":[ + "PolicyArn", + "VersionId" + ], + "members":{ + "PolicyArn":{"shape":"arnType"}, + "VersionId":{"shape":"policyVersionIdType"} + } + }, + "DeleteRolePolicyRequest":{ + "type":"structure", + "required":[ + "RoleName", + "PolicyName" + ], + "members":{ + "RoleName":{"shape":"roleNameType"}, + "PolicyName":{"shape":"policyNameType"} + } + }, + "DeleteRoleRequest":{ + "type":"structure", + "required":["RoleName"], + "members":{ + "RoleName":{"shape":"roleNameType"} + } + }, + "DeleteSAMLProviderRequest":{ + "type":"structure", + "required":["SAMLProviderArn"], + "members":{ + "SAMLProviderArn":{"shape":"arnType"} + } + }, + "DeleteSSHPublicKeyRequest":{ + "type":"structure", + "required":[ + "UserName", + "SSHPublicKeyId" + ], + "members":{ + "UserName":{"shape":"userNameType"}, + "SSHPublicKeyId":{"shape":"publicKeyIdType"} + } + }, + "DeleteServerCertificateRequest":{ + "type":"structure", + "required":["ServerCertificateName"], + "members":{ + "ServerCertificateName":{"shape":"serverCertificateNameType"} + } + }, + "DeleteSigningCertificateRequest":{ + "type":"structure", + "required":["CertificateId"], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "CertificateId":{"shape":"certificateIdType"} + } + }, + "DeleteUserPolicyRequest":{ + "type":"structure", + "required":[ + "UserName", + "PolicyName" + ], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "PolicyName":{"shape":"policyNameType"} + } + }, + "DeleteUserRequest":{ + "type":"structure", + "required":["UserName"], + "members":{ + "UserName":{"shape":"existingUserNameType"} + } + }, + "DeleteVirtualMFADeviceRequest":{ + "type":"structure", + "required":["SerialNumber"], + "members":{ + "SerialNumber":{"shape":"serialNumberType"} + } + }, + "DetachGroupPolicyRequest":{ + "type":"structure", + "required":[ + "GroupName", + "PolicyArn" + ], + "members":{ + "GroupName":{"shape":"groupNameType"}, + "PolicyArn":{"shape":"arnType"} + } + }, + "DetachRolePolicyRequest":{ + "type":"structure", + "required":[ + "RoleName", + "PolicyArn" + ], + "members":{ + "RoleName":{"shape":"roleNameType"}, + "PolicyArn":{"shape":"arnType"} + } + }, + "DetachUserPolicyRequest":{ + "type":"structure", + "required":[ + "UserName", + "PolicyArn" + ], + "members":{ + "UserName":{"shape":"userNameType"}, + "PolicyArn":{"shape":"arnType"} + } + }, + "DuplicateCertificateException":{ + "type":"structure", + "members":{ + "message":{"shape":"duplicateCertificateMessage"} + }, + "error":{ + "code":"DuplicateCertificate", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "DuplicateSSHPublicKeyException":{ + "type":"structure", + "members":{ + "message":{"shape":"duplicateSSHPublicKeyMessage"} + }, + "error":{ + "code":"DuplicateSSHPublicKey", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "EnableMFADeviceRequest":{ + "type":"structure", + "required":[ + "UserName", + "SerialNumber", + "AuthenticationCode1", + "AuthenticationCode2" + ], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "SerialNumber":{"shape":"serialNumberType"}, + "AuthenticationCode1":{"shape":"authenticationCodeType"}, + "AuthenticationCode2":{"shape":"authenticationCodeType"} + } + }, + "EntityAlreadyExistsException":{ + "type":"structure", + "members":{ + "message":{"shape":"entityAlreadyExistsMessage"} + }, + "error":{ + "code":"EntityAlreadyExists", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "EntityTemporarilyUnmodifiableException":{ + "type":"structure", + "members":{ + "message":{"shape":"entityTemporarilyUnmodifiableMessage"} + }, + "error":{ + "code":"EntityTemporarilyUnmodifiable", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "EntityType":{ + "type":"string", + "enum":[ + "User", + "Role", + "Group", + "LocalManagedPolicy", + "AWSManagedPolicy" + ] + }, + "EvalDecisionDetailsType":{ + "type":"map", + "key":{"shape":"EvalDecisionSourceType"}, + "value":{"shape":"PolicyEvaluationDecisionType"} + }, + "EvalDecisionSourceType":{ + "type":"string", + "max":256, + "min":3 + }, + "EvaluationResult":{ + "type":"structure", + "required":[ + "EvalActionName", + "EvalDecision" + ], + "members":{ + "EvalActionName":{"shape":"ActionNameType"}, + "EvalResourceName":{"shape":"ResourceNameType"}, + "EvalDecision":{"shape":"PolicyEvaluationDecisionType"}, + "MatchedStatements":{"shape":"StatementListType"}, + "MissingContextValues":{"shape":"ContextKeyNamesResultListType"}, + "EvalDecisionDetails":{"shape":"EvalDecisionDetailsType"}, + "ResourceSpecificResults":{"shape":"ResourceSpecificResultListType"} + } + }, + "EvaluationResultsListType":{ + "type":"list", + "member":{"shape":"EvaluationResult"} + }, + "GenerateCredentialReportResponse":{ + "type":"structure", + "members":{ + "State":{"shape":"ReportStateType"}, + "Description":{"shape":"ReportStateDescriptionType"} + } + }, + "GetAccessKeyLastUsedRequest":{ + "type":"structure", + "required":["AccessKeyId"], + "members":{ + "AccessKeyId":{"shape":"accessKeyIdType"} + } + }, + "GetAccessKeyLastUsedResponse":{ + "type":"structure", + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "AccessKeyLastUsed":{"shape":"AccessKeyLastUsed"} + } + }, + "GetAccountAuthorizationDetailsRequest":{ + "type":"structure", + "members":{ + "Filter":{"shape":"entityListType"}, + "MaxItems":{"shape":"maxItemsType"}, + "Marker":{"shape":"markerType"} + } + }, + "GetAccountAuthorizationDetailsResponse":{ + "type":"structure", + "members":{ + "UserDetailList":{"shape":"userDetailListType"}, + "GroupDetailList":{"shape":"groupDetailListType"}, + "RoleDetailList":{"shape":"roleDetailListType"}, + "Policies":{"shape":"ManagedPolicyDetailListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "GetAccountPasswordPolicyResponse":{ + "type":"structure", + "required":["PasswordPolicy"], + "members":{ + "PasswordPolicy":{"shape":"PasswordPolicy"} + } + }, + "GetAccountSummaryResponse":{ + "type":"structure", + "members":{ + "SummaryMap":{"shape":"summaryMapType"} + } + }, + "GetContextKeysForCustomPolicyRequest":{ + "type":"structure", + "required":["PolicyInputList"], + "members":{ + "PolicyInputList":{"shape":"SimulationPolicyListType"} + } + }, + "GetContextKeysForPolicyResponse":{ + "type":"structure", + "members":{ + "ContextKeyNames":{"shape":"ContextKeyNamesResultListType"} + } + }, + "GetContextKeysForPrincipalPolicyRequest":{ + "type":"structure", + "required":["PolicySourceArn"], + "members":{ + "PolicySourceArn":{"shape":"arnType"}, + "PolicyInputList":{"shape":"SimulationPolicyListType"} + } + }, + "GetCredentialReportResponse":{ + "type":"structure", + "members":{ + "Content":{"shape":"ReportContentType"}, + "ReportFormat":{"shape":"ReportFormatType"}, + "GeneratedTime":{"shape":"dateType"} + } + }, + "GetGroupPolicyRequest":{ + "type":"structure", + "required":[ + "GroupName", + "PolicyName" + ], + "members":{ + "GroupName":{"shape":"groupNameType"}, + "PolicyName":{"shape":"policyNameType"} + } + }, + "GetGroupPolicyResponse":{ + "type":"structure", + "required":[ + "GroupName", + "PolicyName", + "PolicyDocument" + ], + "members":{ + "GroupName":{"shape":"groupNameType"}, + "PolicyName":{"shape":"policyNameType"}, + "PolicyDocument":{"shape":"policyDocumentType"} + } + }, + "GetGroupRequest":{ + "type":"structure", + "required":["GroupName"], + "members":{ + "GroupName":{"shape":"groupNameType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "GetGroupResponse":{ + "type":"structure", + "required":[ + "Group", + "Users" + ], + "members":{ + "Group":{"shape":"Group"}, + "Users":{"shape":"userListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "GetInstanceProfileRequest":{ + "type":"structure", + "required":["InstanceProfileName"], + "members":{ + "InstanceProfileName":{"shape":"instanceProfileNameType"} + } + }, + "GetInstanceProfileResponse":{ + "type":"structure", + "required":["InstanceProfile"], + "members":{ + "InstanceProfile":{"shape":"InstanceProfile"} + } + }, + "GetLoginProfileRequest":{ + "type":"structure", + "required":["UserName"], + "members":{ + "UserName":{"shape":"userNameType"} + } + }, + "GetLoginProfileResponse":{ + "type":"structure", + "required":["LoginProfile"], + "members":{ + "LoginProfile":{"shape":"LoginProfile"} + } + }, + "GetOpenIDConnectProviderRequest":{ + "type":"structure", + "required":["OpenIDConnectProviderArn"], + "members":{ + "OpenIDConnectProviderArn":{"shape":"arnType"} + } + }, + "GetOpenIDConnectProviderResponse":{ + "type":"structure", + "members":{ + "Url":{"shape":"OpenIDConnectProviderUrlType"}, + "ClientIDList":{"shape":"clientIDListType"}, + "ThumbprintList":{"shape":"thumbprintListType"}, + "CreateDate":{"shape":"dateType"} + } + }, + "GetPolicyRequest":{ + "type":"structure", + "required":["PolicyArn"], + "members":{ + "PolicyArn":{"shape":"arnType"} + } + }, + "GetPolicyResponse":{ + "type":"structure", + "members":{ + "Policy":{"shape":"Policy"} + } + }, + "GetPolicyVersionRequest":{ + "type":"structure", + "required":[ + "PolicyArn", + "VersionId" + ], + "members":{ + "PolicyArn":{"shape":"arnType"}, + "VersionId":{"shape":"policyVersionIdType"} + } + }, + "GetPolicyVersionResponse":{ + "type":"structure", + "members":{ + "PolicyVersion":{"shape":"PolicyVersion"} + } + }, + "GetRolePolicyRequest":{ + "type":"structure", + "required":[ + "RoleName", + "PolicyName" + ], + "members":{ + "RoleName":{"shape":"roleNameType"}, + "PolicyName":{"shape":"policyNameType"} + } + }, + "GetRolePolicyResponse":{ + "type":"structure", + "required":[ + "RoleName", + "PolicyName", + "PolicyDocument" + ], + "members":{ + "RoleName":{"shape":"roleNameType"}, + "PolicyName":{"shape":"policyNameType"}, + "PolicyDocument":{"shape":"policyDocumentType"} + } + }, + "GetRoleRequest":{ + "type":"structure", + "required":["RoleName"], + "members":{ + "RoleName":{"shape":"roleNameType"} + } + }, + "GetRoleResponse":{ + "type":"structure", + "required":["Role"], + "members":{ + "Role":{"shape":"Role"} + } + }, + "GetSAMLProviderRequest":{ + "type":"structure", + "required":["SAMLProviderArn"], + "members":{ + "SAMLProviderArn":{"shape":"arnType"} + } + }, + "GetSAMLProviderResponse":{ + "type":"structure", + "members":{ + "SAMLMetadataDocument":{"shape":"SAMLMetadataDocumentType"}, + "CreateDate":{"shape":"dateType"}, + "ValidUntil":{"shape":"dateType"} + } + }, + "GetSSHPublicKeyRequest":{ + "type":"structure", + "required":[ + "UserName", + "SSHPublicKeyId", + "Encoding" + ], + "members":{ + "UserName":{"shape":"userNameType"}, + "SSHPublicKeyId":{"shape":"publicKeyIdType"}, + "Encoding":{"shape":"encodingType"} + } + }, + "GetSSHPublicKeyResponse":{ + "type":"structure", + "members":{ + "SSHPublicKey":{"shape":"SSHPublicKey"} + } + }, + "GetServerCertificateRequest":{ + "type":"structure", + "required":["ServerCertificateName"], + "members":{ + "ServerCertificateName":{"shape":"serverCertificateNameType"} + } + }, + "GetServerCertificateResponse":{ + "type":"structure", + "required":["ServerCertificate"], + "members":{ + "ServerCertificate":{"shape":"ServerCertificate"} + } + }, + "GetUserPolicyRequest":{ + "type":"structure", + "required":[ + "UserName", + "PolicyName" + ], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "PolicyName":{"shape":"policyNameType"} + } + }, + "GetUserPolicyResponse":{ + "type":"structure", + "required":[ + "UserName", + "PolicyName", + "PolicyDocument" + ], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "PolicyName":{"shape":"policyNameType"}, + "PolicyDocument":{"shape":"policyDocumentType"} + } + }, + "GetUserRequest":{ + "type":"structure", + "members":{ + "UserName":{"shape":"existingUserNameType"} + } + }, + "GetUserResponse":{ + "type":"structure", + "required":["User"], + "members":{ + "User":{"shape":"User"} + } + }, + "Group":{ + "type":"structure", + "required":[ + "Path", + "GroupName", + "GroupId", + "Arn", + "CreateDate" + ], + "members":{ + "Path":{"shape":"pathType"}, + "GroupName":{"shape":"groupNameType"}, + "GroupId":{"shape":"idType"}, + "Arn":{"shape":"arnType"}, + "CreateDate":{"shape":"dateType"} + } + }, + "GroupDetail":{ + "type":"structure", + "members":{ + "Path":{"shape":"pathType"}, + "GroupName":{"shape":"groupNameType"}, + "GroupId":{"shape":"idType"}, + "Arn":{"shape":"arnType"}, + "CreateDate":{"shape":"dateType"}, + "GroupPolicyList":{"shape":"policyDetailListType"}, + "AttachedManagedPolicies":{"shape":"attachedPoliciesListType"} + } + }, + "InstanceProfile":{ + "type":"structure", + "required":[ + "Path", + "InstanceProfileName", + "InstanceProfileId", + "Arn", + "CreateDate", + "Roles" + ], + "members":{ + "Path":{"shape":"pathType"}, + "InstanceProfileName":{"shape":"instanceProfileNameType"}, + "InstanceProfileId":{"shape":"idType"}, + "Arn":{"shape":"arnType"}, + "CreateDate":{"shape":"dateType"}, + "Roles":{"shape":"roleListType"} + } + }, + "InvalidAuthenticationCodeException":{ + "type":"structure", + "members":{ + "message":{"shape":"invalidAuthenticationCodeMessage"} + }, + "error":{ + "code":"InvalidAuthenticationCode", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "InvalidCertificateException":{ + "type":"structure", + "members":{ + "message":{"shape":"invalidCertificateMessage"} + }, + "error":{ + "code":"InvalidCertificate", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidInputException":{ + "type":"structure", + "members":{ + "message":{"shape":"invalidInputMessage"} + }, + "error":{ + "code":"InvalidInput", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidPublicKeyException":{ + "type":"structure", + "members":{ + "message":{"shape":"invalidPublicKeyMessage"} + }, + "error":{ + "code":"InvalidPublicKey", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidUserTypeException":{ + "type":"structure", + "members":{ + "message":{"shape":"invalidUserTypeMessage"} + }, + "error":{ + "code":"InvalidUserType", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "KeyPairMismatchException":{ + "type":"structure", + "members":{ + "message":{"shape":"keyPairMismatchMessage"} + }, + "error":{ + "code":"KeyPairMismatch", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"limitExceededMessage"} + }, + "error":{ + "code":"LimitExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "LineNumber":{"type":"integer"}, + "ListAccessKeysRequest":{ + "type":"structure", + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListAccessKeysResponse":{ + "type":"structure", + "required":["AccessKeyMetadata"], + "members":{ + "AccessKeyMetadata":{"shape":"accessKeyMetadataListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListAccountAliasesRequest":{ + "type":"structure", + "members":{ + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListAccountAliasesResponse":{ + "type":"structure", + "required":["AccountAliases"], + "members":{ + "AccountAliases":{"shape":"accountAliasListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListAttachedGroupPoliciesRequest":{ + "type":"structure", + "required":["GroupName"], + "members":{ + "GroupName":{"shape":"groupNameType"}, + "PathPrefix":{"shape":"policyPathType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListAttachedGroupPoliciesResponse":{ + "type":"structure", + "members":{ + "AttachedPolicies":{"shape":"attachedPoliciesListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListAttachedRolePoliciesRequest":{ + "type":"structure", + "required":["RoleName"], + "members":{ + "RoleName":{"shape":"roleNameType"}, + "PathPrefix":{"shape":"policyPathType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListAttachedRolePoliciesResponse":{ + "type":"structure", + "members":{ + "AttachedPolicies":{"shape":"attachedPoliciesListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListAttachedUserPoliciesRequest":{ + "type":"structure", + "required":["UserName"], + "members":{ + "UserName":{"shape":"userNameType"}, + "PathPrefix":{"shape":"policyPathType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListAttachedUserPoliciesResponse":{ + "type":"structure", + "members":{ + "AttachedPolicies":{"shape":"attachedPoliciesListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListEntitiesForPolicyRequest":{ + "type":"structure", + "required":["PolicyArn"], + "members":{ + "PolicyArn":{"shape":"arnType"}, + "EntityFilter":{"shape":"EntityType"}, + "PathPrefix":{"shape":"pathType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListEntitiesForPolicyResponse":{ + "type":"structure", + "members":{ + "PolicyGroups":{"shape":"PolicyGroupListType"}, + "PolicyUsers":{"shape":"PolicyUserListType"}, + "PolicyRoles":{"shape":"PolicyRoleListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListGroupPoliciesRequest":{ + "type":"structure", + "required":["GroupName"], + "members":{ + "GroupName":{"shape":"groupNameType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListGroupPoliciesResponse":{ + "type":"structure", + "required":["PolicyNames"], + "members":{ + "PolicyNames":{"shape":"policyNameListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListGroupsForUserRequest":{ + "type":"structure", + "required":["UserName"], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListGroupsForUserResponse":{ + "type":"structure", + "required":["Groups"], + "members":{ + "Groups":{"shape":"groupListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListGroupsRequest":{ + "type":"structure", + "members":{ + "PathPrefix":{"shape":"pathPrefixType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListGroupsResponse":{ + "type":"structure", + "required":["Groups"], + "members":{ + "Groups":{"shape":"groupListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListInstanceProfilesForRoleRequest":{ + "type":"structure", + "required":["RoleName"], + "members":{ + "RoleName":{"shape":"roleNameType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListInstanceProfilesForRoleResponse":{ + "type":"structure", + "required":["InstanceProfiles"], + "members":{ + "InstanceProfiles":{"shape":"instanceProfileListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListInstanceProfilesRequest":{ + "type":"structure", + "members":{ + "PathPrefix":{"shape":"pathPrefixType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListInstanceProfilesResponse":{ + "type":"structure", + "required":["InstanceProfiles"], + "members":{ + "InstanceProfiles":{"shape":"instanceProfileListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListMFADevicesRequest":{ + "type":"structure", + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListMFADevicesResponse":{ + "type":"structure", + "required":["MFADevices"], + "members":{ + "MFADevices":{"shape":"mfaDeviceListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListOpenIDConnectProvidersRequest":{ + "type":"structure", + "members":{ + } + }, + "ListOpenIDConnectProvidersResponse":{ + "type":"structure", + "members":{ + "OpenIDConnectProviderList":{"shape":"OpenIDConnectProviderListType"} + } + }, + "ListPoliciesRequest":{ + "type":"structure", + "members":{ + "Scope":{"shape":"policyScopeType"}, + "OnlyAttached":{"shape":"booleanType"}, + "PathPrefix":{"shape":"policyPathType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListPoliciesResponse":{ + "type":"structure", + "members":{ + "Policies":{"shape":"policyListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListPolicyVersionsRequest":{ + "type":"structure", + "required":["PolicyArn"], + "members":{ + "PolicyArn":{"shape":"arnType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListPolicyVersionsResponse":{ + "type":"structure", + "members":{ + "Versions":{"shape":"policyDocumentVersionListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListRolePoliciesRequest":{ + "type":"structure", + "required":["RoleName"], + "members":{ + "RoleName":{"shape":"roleNameType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListRolePoliciesResponse":{ + "type":"structure", + "required":["PolicyNames"], + "members":{ + "PolicyNames":{"shape":"policyNameListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListRolesRequest":{ + "type":"structure", + "members":{ + "PathPrefix":{"shape":"pathPrefixType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListRolesResponse":{ + "type":"structure", + "required":["Roles"], + "members":{ + "Roles":{"shape":"roleListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListSAMLProvidersRequest":{ + "type":"structure", + "members":{ + } + }, + "ListSAMLProvidersResponse":{ + "type":"structure", + "members":{ + "SAMLProviderList":{"shape":"SAMLProviderListType"} + } + }, + "ListSSHPublicKeysRequest":{ + "type":"structure", + "members":{ + "UserName":{"shape":"userNameType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListSSHPublicKeysResponse":{ + "type":"structure", + "members":{ + "SSHPublicKeys":{"shape":"SSHPublicKeyListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListServerCertificatesRequest":{ + "type":"structure", + "members":{ + "PathPrefix":{"shape":"pathPrefixType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListServerCertificatesResponse":{ + "type":"structure", + "required":["ServerCertificateMetadataList"], + "members":{ + "ServerCertificateMetadataList":{"shape":"serverCertificateMetadataListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListSigningCertificatesRequest":{ + "type":"structure", + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListSigningCertificatesResponse":{ + "type":"structure", + "required":["Certificates"], + "members":{ + "Certificates":{"shape":"certificateListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListUserPoliciesRequest":{ + "type":"structure", + "required":["UserName"], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListUserPoliciesResponse":{ + "type":"structure", + "required":["PolicyNames"], + "members":{ + "PolicyNames":{"shape":"policyNameListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListUsersRequest":{ + "type":"structure", + "members":{ + "PathPrefix":{"shape":"pathPrefixType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListUsersResponse":{ + "type":"structure", + "required":["Users"], + "members":{ + "Users":{"shape":"userListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "ListVirtualMFADevicesRequest":{ + "type":"structure", + "members":{ + "AssignmentStatus":{"shape":"assignmentStatusType"}, + "Marker":{"shape":"markerType"}, + "MaxItems":{"shape":"maxItemsType"} + } + }, + "ListVirtualMFADevicesResponse":{ + "type":"structure", + "required":["VirtualMFADevices"], + "members":{ + "VirtualMFADevices":{"shape":"virtualMFADeviceListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "LoginProfile":{ + "type":"structure", + "required":[ + "UserName", + "CreateDate" + ], + "members":{ + "UserName":{"shape":"userNameType"}, + "CreateDate":{"shape":"dateType"}, + "PasswordResetRequired":{"shape":"booleanType"} + } + }, + "MFADevice":{ + "type":"structure", + "required":[ + "UserName", + "SerialNumber", + "EnableDate" + ], + "members":{ + "UserName":{"shape":"userNameType"}, + "SerialNumber":{"shape":"serialNumberType"}, + "EnableDate":{"shape":"dateType"} + } + }, + "MalformedCertificateException":{ + "type":"structure", + "members":{ + "message":{"shape":"malformedCertificateMessage"} + }, + "error":{ + "code":"MalformedCertificate", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "MalformedPolicyDocumentException":{ + "type":"structure", + "members":{ + "message":{"shape":"malformedPolicyDocumentMessage"} + }, + "error":{ + "code":"MalformedPolicyDocument", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ManagedPolicyDetail":{ + "type":"structure", + "members":{ + "PolicyName":{"shape":"policyNameType"}, + "PolicyId":{"shape":"idType"}, + "Arn":{"shape":"arnType"}, + "Path":{"shape":"policyPathType"}, + "DefaultVersionId":{"shape":"policyVersionIdType"}, + "AttachmentCount":{"shape":"attachmentCountType"}, + "IsAttachable":{"shape":"booleanType"}, + "Description":{"shape":"policyDescriptionType"}, + "CreateDate":{"shape":"dateType"}, + "UpdateDate":{"shape":"dateType"}, + "PolicyVersionList":{"shape":"policyDocumentVersionListType"} + } + }, + "ManagedPolicyDetailListType":{ + "type":"list", + "member":{"shape":"ManagedPolicyDetail"} + }, + "NoSuchEntityException":{ + "type":"structure", + "members":{ + "message":{"shape":"noSuchEntityMessage"} + }, + "error":{ + "code":"NoSuchEntity", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "OpenIDConnectProviderListEntry":{ + "type":"structure", + "members":{ + "Arn":{"shape":"arnType"} + } + }, + "OpenIDConnectProviderListType":{ + "type":"list", + "member":{"shape":"OpenIDConnectProviderListEntry"} + }, + "OpenIDConnectProviderUrlType":{ + "type":"string", + "max":255, + "min":1 + }, + "PasswordPolicy":{ + "type":"structure", + "members":{ + "MinimumPasswordLength":{"shape":"minimumPasswordLengthType"}, + "RequireSymbols":{"shape":"booleanType"}, + "RequireNumbers":{"shape":"booleanType"}, + "RequireUppercaseCharacters":{"shape":"booleanType"}, + "RequireLowercaseCharacters":{"shape":"booleanType"}, + "AllowUsersToChangePassword":{"shape":"booleanType"}, + "ExpirePasswords":{"shape":"booleanType"}, + "MaxPasswordAge":{"shape":"maxPasswordAgeType"}, + "PasswordReusePrevention":{"shape":"passwordReusePreventionType"}, + "HardExpiry":{"shape":"booleanObjectType"} + } + }, + "PasswordPolicyViolationException":{ + "type":"structure", + "members":{ + "message":{"shape":"passwordPolicyViolationMessage"} + }, + "error":{ + "code":"PasswordPolicyViolation", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Policy":{ + "type":"structure", + "members":{ + "PolicyName":{"shape":"policyNameType"}, + "PolicyId":{"shape":"idType"}, + "Arn":{"shape":"arnType"}, + "Path":{"shape":"policyPathType"}, + "DefaultVersionId":{"shape":"policyVersionIdType"}, + "AttachmentCount":{"shape":"attachmentCountType"}, + "IsAttachable":{"shape":"booleanType"}, + "Description":{"shape":"policyDescriptionType"}, + "CreateDate":{"shape":"dateType"}, + "UpdateDate":{"shape":"dateType"} + } + }, + "PolicyDetail":{ + "type":"structure", + "members":{ + "PolicyName":{"shape":"policyNameType"}, + "PolicyDocument":{"shape":"policyDocumentType"} + } + }, + "PolicyEvaluationDecisionType":{ + "type":"string", + "enum":[ + "allowed", + "explicitDeny", + "implicitDeny" + ] + }, + "PolicyEvaluationException":{ + "type":"structure", + "members":{ + "message":{"shape":"policyEvaluationErrorMessage"} + }, + "error":{ + "code":"PolicyEvaluation", + "httpStatusCode":500 + }, + "exception":true + }, + "PolicyGroup":{ + "type":"structure", + "members":{ + "GroupName":{"shape":"groupNameType"} + } + }, + "PolicyGroupListType":{ + "type":"list", + "member":{"shape":"PolicyGroup"} + }, + "PolicyIdentifierType":{"type":"string"}, + "PolicyRole":{ + "type":"structure", + "members":{ + "RoleName":{"shape":"roleNameType"} + } + }, + "PolicyRoleListType":{ + "type":"list", + "member":{"shape":"PolicyRole"} + }, + "PolicySourceType":{ + "type":"string", + "enum":[ + "user", + "group", + "role", + "aws-managed", + "user-managed", + "resource", + "none" + ] + }, + "PolicyUser":{ + "type":"structure", + "members":{ + "UserName":{"shape":"userNameType"} + } + }, + "PolicyUserListType":{ + "type":"list", + "member":{"shape":"PolicyUser"} + }, + "PolicyVersion":{ + "type":"structure", + "members":{ + "Document":{"shape":"policyDocumentType"}, + "VersionId":{"shape":"policyVersionIdType"}, + "IsDefaultVersion":{"shape":"booleanType"}, + "CreateDate":{"shape":"dateType"} + } + }, + "Position":{ + "type":"structure", + "members":{ + "Line":{"shape":"LineNumber"}, + "Column":{"shape":"ColumnNumber"} + } + }, + "PutGroupPolicyRequest":{ + "type":"structure", + "required":[ + "GroupName", + "PolicyName", + "PolicyDocument" + ], + "members":{ + "GroupName":{"shape":"groupNameType"}, + "PolicyName":{"shape":"policyNameType"}, + "PolicyDocument":{"shape":"policyDocumentType"} + } + }, + "PutRolePolicyRequest":{ + "type":"structure", + "required":[ + "RoleName", + "PolicyName", + "PolicyDocument" + ], + "members":{ + "RoleName":{"shape":"roleNameType"}, + "PolicyName":{"shape":"policyNameType"}, + "PolicyDocument":{"shape":"policyDocumentType"} + } + }, + "PutUserPolicyRequest":{ + "type":"structure", + "required":[ + "UserName", + "PolicyName", + "PolicyDocument" + ], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "PolicyName":{"shape":"policyNameType"}, + "PolicyDocument":{"shape":"policyDocumentType"} + } + }, + "RemoveClientIDFromOpenIDConnectProviderRequest":{ + "type":"structure", + "required":[ + "OpenIDConnectProviderArn", + "ClientID" + ], + "members":{ + "OpenIDConnectProviderArn":{"shape":"arnType"}, + "ClientID":{"shape":"clientIDType"} + } + }, + "RemoveRoleFromInstanceProfileRequest":{ + "type":"structure", + "required":[ + "InstanceProfileName", + "RoleName" + ], + "members":{ + "InstanceProfileName":{"shape":"instanceProfileNameType"}, + "RoleName":{"shape":"roleNameType"} + } + }, + "RemoveUserFromGroupRequest":{ + "type":"structure", + "required":[ + "GroupName", + "UserName" + ], + "members":{ + "GroupName":{"shape":"groupNameType"}, + "UserName":{"shape":"existingUserNameType"} + } + }, + "ReportContentType":{"type":"blob"}, + "ReportFormatType":{ + "type":"string", + "enum":["text/csv"] + }, + "ReportStateDescriptionType":{"type":"string"}, + "ReportStateType":{ + "type":"string", + "enum":[ + "STARTED", + "INPROGRESS", + "COMPLETE" + ] + }, + "ResourceHandlingOptionType":{ + "type":"string", + "max":64, + "min":1 + }, + "ResourceNameListType":{ + "type":"list", + "member":{"shape":"ResourceNameType"} + }, + "ResourceNameType":{ + "type":"string", + "max":2048, + "min":1 + }, + "ResourceSpecificResult":{ + "type":"structure", + "required":[ + "EvalResourceName", + "EvalResourceDecision" + ], + "members":{ + "EvalResourceName":{"shape":"ResourceNameType"}, + "EvalResourceDecision":{"shape":"PolicyEvaluationDecisionType"}, + "MatchedStatements":{"shape":"StatementListType"}, + "MissingContextValues":{"shape":"ContextKeyNamesResultListType"}, + "EvalDecisionDetails":{"shape":"EvalDecisionDetailsType"} + } + }, + "ResourceSpecificResultListType":{ + "type":"list", + "member":{"shape":"ResourceSpecificResult"} + }, + "ResyncMFADeviceRequest":{ + "type":"structure", + "required":[ + "UserName", + "SerialNumber", + "AuthenticationCode1", + "AuthenticationCode2" + ], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "SerialNumber":{"shape":"serialNumberType"}, + "AuthenticationCode1":{"shape":"authenticationCodeType"}, + "AuthenticationCode2":{"shape":"authenticationCodeType"} + } + }, + "Role":{ + "type":"structure", + "required":[ + "Path", + "RoleName", + "RoleId", + "Arn", + "CreateDate" + ], + "members":{ + "Path":{"shape":"pathType"}, + "RoleName":{"shape":"roleNameType"}, + "RoleId":{"shape":"idType"}, + "Arn":{"shape":"arnType"}, + "CreateDate":{"shape":"dateType"}, + "AssumeRolePolicyDocument":{"shape":"policyDocumentType"} + } + }, + "RoleDetail":{ + "type":"structure", + "members":{ + "Path":{"shape":"pathType"}, + "RoleName":{"shape":"roleNameType"}, + "RoleId":{"shape":"idType"}, + "Arn":{"shape":"arnType"}, + "CreateDate":{"shape":"dateType"}, + "AssumeRolePolicyDocument":{"shape":"policyDocumentType"}, + "InstanceProfileList":{"shape":"instanceProfileListType"}, + "RolePolicyList":{"shape":"policyDetailListType"}, + "AttachedManagedPolicies":{"shape":"attachedPoliciesListType"} + } + }, + "SAMLMetadataDocumentType":{ + "type":"string", + "max":10000000, + "min":1000 + }, + "SAMLProviderListEntry":{ + "type":"structure", + "members":{ + "Arn":{"shape":"arnType"}, + "ValidUntil":{"shape":"dateType"}, + "CreateDate":{"shape":"dateType"} + } + }, + "SAMLProviderListType":{ + "type":"list", + "member":{"shape":"SAMLProviderListEntry"} + }, + "SAMLProviderNameType":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\w._-]+" + }, + "SSHPublicKey":{ + "type":"structure", + "required":[ + "UserName", + "SSHPublicKeyId", + "Fingerprint", + "SSHPublicKeyBody", + "Status" + ], + "members":{ + "UserName":{"shape":"userNameType"}, + "SSHPublicKeyId":{"shape":"publicKeyIdType"}, + "Fingerprint":{"shape":"publicKeyFingerprintType"}, + "SSHPublicKeyBody":{"shape":"publicKeyMaterialType"}, + "Status":{"shape":"statusType"}, + "UploadDate":{"shape":"dateType"} + } + }, + "SSHPublicKeyListType":{ + "type":"list", + "member":{"shape":"SSHPublicKeyMetadata"} + }, + "SSHPublicKeyMetadata":{ + "type":"structure", + "required":[ + "UserName", + "SSHPublicKeyId", + "Status", + "UploadDate" + ], + "members":{ + "UserName":{"shape":"userNameType"}, + "SSHPublicKeyId":{"shape":"publicKeyIdType"}, + "Status":{"shape":"statusType"}, + "UploadDate":{"shape":"dateType"} + } + }, + "ServerCertificate":{ + "type":"structure", + "required":[ + "ServerCertificateMetadata", + "CertificateBody" + ], + "members":{ + "ServerCertificateMetadata":{"shape":"ServerCertificateMetadata"}, + "CertificateBody":{"shape":"certificateBodyType"}, + "CertificateChain":{"shape":"certificateChainType"} + } + }, + "ServerCertificateMetadata":{ + "type":"structure", + "required":[ + "Path", + "ServerCertificateName", + "ServerCertificateId", + "Arn" + ], + "members":{ + "Path":{"shape":"pathType"}, + "ServerCertificateName":{"shape":"serverCertificateNameType"}, + "ServerCertificateId":{"shape":"idType"}, + "Arn":{"shape":"arnType"}, + "UploadDate":{"shape":"dateType"}, + "Expiration":{"shape":"dateType"} + } + }, + "ServiceFailureException":{ + "type":"structure", + "members":{ + "message":{"shape":"serviceFailureExceptionMessage"} + }, + "error":{ + "code":"ServiceFailure", + "httpStatusCode":500 + }, + "exception":true + }, + "SetDefaultPolicyVersionRequest":{ + "type":"structure", + "required":[ + "PolicyArn", + "VersionId" + ], + "members":{ + "PolicyArn":{"shape":"arnType"}, + "VersionId":{"shape":"policyVersionIdType"} + } + }, + "SigningCertificate":{ + "type":"structure", + "required":[ + "UserName", + "CertificateId", + "CertificateBody", + "Status" + ], + "members":{ + "UserName":{"shape":"userNameType"}, + "CertificateId":{"shape":"certificateIdType"}, + "CertificateBody":{"shape":"certificateBodyType"}, + "Status":{"shape":"statusType"}, + "UploadDate":{"shape":"dateType"} + } + }, + "SimulateCustomPolicyRequest":{ + "type":"structure", + "required":[ + "PolicyInputList", + "ActionNames" + ], + "members":{ + "PolicyInputList":{"shape":"SimulationPolicyListType"}, + "ActionNames":{"shape":"ActionNameListType"}, + "ResourceArns":{"shape":"ResourceNameListType"}, + "ResourcePolicy":{"shape":"policyDocumentType"}, + "ResourceOwner":{"shape":"ResourceNameType"}, + "CallerArn":{"shape":"ResourceNameType"}, + "ContextEntries":{"shape":"ContextEntryListType"}, + "ResourceHandlingOption":{"shape":"ResourceHandlingOptionType"}, + "MaxItems":{"shape":"maxItemsType"}, + "Marker":{"shape":"markerType"} + } + }, + "SimulatePolicyResponse":{ + "type":"structure", + "members":{ + "EvaluationResults":{"shape":"EvaluationResultsListType"}, + "IsTruncated":{"shape":"booleanType"}, + "Marker":{"shape":"markerType"} + } + }, + "SimulatePrincipalPolicyRequest":{ + "type":"structure", + "required":[ + "PolicySourceArn", + "ActionNames" + ], + "members":{ + "PolicySourceArn":{"shape":"arnType"}, + "PolicyInputList":{"shape":"SimulationPolicyListType"}, + "ActionNames":{"shape":"ActionNameListType"}, + "ResourceArns":{"shape":"ResourceNameListType"}, + "ResourcePolicy":{"shape":"policyDocumentType"}, + "ResourceOwner":{"shape":"ResourceNameType"}, + "CallerArn":{"shape":"ResourceNameType"}, + "ContextEntries":{"shape":"ContextEntryListType"}, + "ResourceHandlingOption":{"shape":"ResourceHandlingOptionType"}, + "MaxItems":{"shape":"maxItemsType"}, + "Marker":{"shape":"markerType"} + } + }, + "SimulationPolicyListType":{ + "type":"list", + "member":{"shape":"policyDocumentType"} + }, + "Statement":{ + "type":"structure", + "members":{ + "SourcePolicyId":{"shape":"PolicyIdentifierType"}, + "SourcePolicyType":{"shape":"PolicySourceType"}, + "StartPosition":{"shape":"Position"}, + "EndPosition":{"shape":"Position"} + } + }, + "StatementListType":{ + "type":"list", + "member":{"shape":"Statement"} + }, + "UnrecognizedPublicKeyEncodingException":{ + "type":"structure", + "members":{ + "message":{"shape":"unrecognizedPublicKeyEncodingMessage"} + }, + "error":{ + "code":"UnrecognizedPublicKeyEncoding", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "UpdateAccessKeyRequest":{ + "type":"structure", + "required":[ + "AccessKeyId", + "Status" + ], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "AccessKeyId":{"shape":"accessKeyIdType"}, + "Status":{"shape":"statusType"} + } + }, + "UpdateAccountPasswordPolicyRequest":{ + "type":"structure", + "members":{ + "MinimumPasswordLength":{"shape":"minimumPasswordLengthType"}, + "RequireSymbols":{"shape":"booleanType"}, + "RequireNumbers":{"shape":"booleanType"}, + "RequireUppercaseCharacters":{"shape":"booleanType"}, + "RequireLowercaseCharacters":{"shape":"booleanType"}, + "AllowUsersToChangePassword":{"shape":"booleanType"}, + "MaxPasswordAge":{"shape":"maxPasswordAgeType"}, + "PasswordReusePrevention":{"shape":"passwordReusePreventionType"}, + "HardExpiry":{"shape":"booleanObjectType"} + } + }, + "UpdateAssumeRolePolicyRequest":{ + "type":"structure", + "required":[ + "RoleName", + "PolicyDocument" + ], + "members":{ + "RoleName":{"shape":"roleNameType"}, + "PolicyDocument":{"shape":"policyDocumentType"} + } + }, + "UpdateGroupRequest":{ + "type":"structure", + "required":["GroupName"], + "members":{ + "GroupName":{"shape":"groupNameType"}, + "NewPath":{"shape":"pathType"}, + "NewGroupName":{"shape":"groupNameType"} + } + }, + "UpdateLoginProfileRequest":{ + "type":"structure", + "required":["UserName"], + "members":{ + "UserName":{"shape":"userNameType"}, + "Password":{"shape":"passwordType"}, + "PasswordResetRequired":{"shape":"booleanObjectType"} + } + }, + "UpdateOpenIDConnectProviderThumbprintRequest":{ + "type":"structure", + "required":[ + "OpenIDConnectProviderArn", + "ThumbprintList" + ], + "members":{ + "OpenIDConnectProviderArn":{"shape":"arnType"}, + "ThumbprintList":{"shape":"thumbprintListType"} + } + }, + "UpdateSAMLProviderRequest":{ + "type":"structure", + "required":[ + "SAMLMetadataDocument", + "SAMLProviderArn" + ], + "members":{ + "SAMLMetadataDocument":{"shape":"SAMLMetadataDocumentType"}, + "SAMLProviderArn":{"shape":"arnType"} + } + }, + "UpdateSAMLProviderResponse":{ + "type":"structure", + "members":{ + "SAMLProviderArn":{"shape":"arnType"} + } + }, + "UpdateSSHPublicKeyRequest":{ + "type":"structure", + "required":[ + "UserName", + "SSHPublicKeyId", + "Status" + ], + "members":{ + "UserName":{"shape":"userNameType"}, + "SSHPublicKeyId":{"shape":"publicKeyIdType"}, + "Status":{"shape":"statusType"} + } + }, + "UpdateServerCertificateRequest":{ + "type":"structure", + "required":["ServerCertificateName"], + "members":{ + "ServerCertificateName":{"shape":"serverCertificateNameType"}, + "NewPath":{"shape":"pathType"}, + "NewServerCertificateName":{"shape":"serverCertificateNameType"} + } + }, + "UpdateSigningCertificateRequest":{ + "type":"structure", + "required":[ + "CertificateId", + "Status" + ], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "CertificateId":{"shape":"certificateIdType"}, + "Status":{"shape":"statusType"} + } + }, + "UpdateUserRequest":{ + "type":"structure", + "required":["UserName"], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "NewPath":{"shape":"pathType"}, + "NewUserName":{"shape":"userNameType"} + } + }, + "UploadSSHPublicKeyRequest":{ + "type":"structure", + "required":[ + "UserName", + "SSHPublicKeyBody" + ], + "members":{ + "UserName":{"shape":"userNameType"}, + "SSHPublicKeyBody":{"shape":"publicKeyMaterialType"} + } + }, + "UploadSSHPublicKeyResponse":{ + "type":"structure", + "members":{ + "SSHPublicKey":{"shape":"SSHPublicKey"} + } + }, + "UploadServerCertificateRequest":{ + "type":"structure", + "required":[ + "ServerCertificateName", + "CertificateBody", + "PrivateKey" + ], + "members":{ + "Path":{"shape":"pathType"}, + "ServerCertificateName":{"shape":"serverCertificateNameType"}, + "CertificateBody":{"shape":"certificateBodyType"}, + "PrivateKey":{"shape":"privateKeyType"}, + "CertificateChain":{"shape":"certificateChainType"} + } + }, + "UploadServerCertificateResponse":{ + "type":"structure", + "members":{ + "ServerCertificateMetadata":{"shape":"ServerCertificateMetadata"} + } + }, + "UploadSigningCertificateRequest":{ + "type":"structure", + "required":["CertificateBody"], + "members":{ + "UserName":{"shape":"existingUserNameType"}, + "CertificateBody":{"shape":"certificateBodyType"} + } + }, + "UploadSigningCertificateResponse":{ + "type":"structure", + "required":["Certificate"], + "members":{ + "Certificate":{"shape":"SigningCertificate"} + } + }, + "User":{ + "type":"structure", + "required":[ + "Path", + "UserName", + "UserId", + "Arn", + "CreateDate" + ], + "members":{ + "Path":{"shape":"pathType"}, + "UserName":{"shape":"userNameType"}, + "UserId":{"shape":"idType"}, + "Arn":{"shape":"arnType"}, + "CreateDate":{"shape":"dateType"}, + "PasswordLastUsed":{"shape":"dateType"} + } + }, + "UserDetail":{ + "type":"structure", + "members":{ + "Path":{"shape":"pathType"}, + "UserName":{"shape":"userNameType"}, + "UserId":{"shape":"idType"}, + "Arn":{"shape":"arnType"}, + "CreateDate":{"shape":"dateType"}, + "UserPolicyList":{"shape":"policyDetailListType"}, + "GroupList":{"shape":"groupNameListType"}, + "AttachedManagedPolicies":{"shape":"attachedPoliciesListType"} + } + }, + "VirtualMFADevice":{ + "type":"structure", + "required":["SerialNumber"], + "members":{ + "SerialNumber":{"shape":"serialNumberType"}, + "Base32StringSeed":{"shape":"BootstrapDatum"}, + "QRCodePNG":{"shape":"BootstrapDatum"}, + "User":{"shape":"User"}, + "EnableDate":{"shape":"dateType"} + } + }, + "accessKeyIdType":{ + "type":"string", + "max":32, + "min":16, + "pattern":"[\\w]+" + }, + "accessKeyMetadataListType":{ + "type":"list", + "member":{"shape":"AccessKeyMetadata"} + }, + "accessKeySecretType":{ + "type":"string", + "sensitive":true + }, + "accountAliasListType":{ + "type":"list", + "member":{"shape":"accountAliasType"} + }, + "accountAliasType":{ + "type":"string", + "max":63, + "min":3, + "pattern":"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$" + }, + "arnType":{ + "type":"string", + "max":2048, + "min":20 + }, + "assignmentStatusType":{ + "type":"string", + "enum":[ + "Assigned", + "Unassigned", + "Any" + ] + }, + "attachedPoliciesListType":{ + "type":"list", + "member":{"shape":"AttachedPolicy"} + }, + "attachmentCountType":{"type":"integer"}, + "authenticationCodeType":{ + "type":"string", + "max":6, + "min":6, + "pattern":"[\\d]+" + }, + "booleanObjectType":{ + "type":"boolean", + "box":true + }, + "booleanType":{"type":"boolean"}, + "certificateBodyType":{ + "type":"string", + "max":16384, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+" + }, + "certificateChainType":{ + "type":"string", + "max":2097152, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+" + }, + "certificateIdType":{ + "type":"string", + "max":128, + "min":24, + "pattern":"[\\w]+" + }, + "certificateListType":{ + "type":"list", + "member":{"shape":"SigningCertificate"} + }, + "clientIDListType":{ + "type":"list", + "member":{"shape":"clientIDType"} + }, + "clientIDType":{ + "type":"string", + "max":255, + "min":1 + }, + "credentialReportExpiredExceptionMessage":{"type":"string"}, + "credentialReportNotPresentExceptionMessage":{"type":"string"}, + "credentialReportNotReadyExceptionMessage":{"type":"string"}, + "dateType":{"type":"timestamp"}, + "deleteConflictMessage":{"type":"string"}, + "duplicateCertificateMessage":{"type":"string"}, + "duplicateSSHPublicKeyMessage":{"type":"string"}, + "encodingType":{ + "type":"string", + "enum":[ + "SSH", + "PEM" + ] + }, + "entityAlreadyExistsMessage":{"type":"string"}, + "entityListType":{ + "type":"list", + "member":{"shape":"EntityType"} + }, + "entityTemporarilyUnmodifiableMessage":{"type":"string"}, + "existingUserNameType":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\w+=,.@-]+" + }, + "groupDetailListType":{ + "type":"list", + "member":{"shape":"GroupDetail"} + }, + "groupListType":{ + "type":"list", + "member":{"shape":"Group"} + }, + "groupNameListType":{ + "type":"list", + "member":{"shape":"groupNameType"} + }, + "groupNameType":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\w+=,.@-]+" + }, + "idType":{ + "type":"string", + "max":32, + "min":16, + "pattern":"[\\w]+" + }, + "instanceProfileListType":{ + "type":"list", + "member":{"shape":"InstanceProfile"} + }, + "instanceProfileNameType":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\w+=,.@-]+" + }, + "invalidAuthenticationCodeMessage":{"type":"string"}, + "invalidCertificateMessage":{"type":"string"}, + "invalidInputMessage":{"type":"string"}, + "invalidPublicKeyMessage":{"type":"string"}, + "invalidUserTypeMessage":{"type":"string"}, + "keyPairMismatchMessage":{"type":"string"}, + "limitExceededMessage":{"type":"string"}, + "malformedCertificateMessage":{"type":"string"}, + "malformedPolicyDocumentMessage":{"type":"string"}, + "markerType":{ + "type":"string", + "max":320, + "min":1, + "pattern":"[\\u0020-\\u00FF]+" + }, + "maxItemsType":{ + "type":"integer", + "max":1000, + "min":1 + }, + "maxPasswordAgeType":{ + "type":"integer", + "box":true, + "max":1095, + "min":1 + }, + "mfaDeviceListType":{ + "type":"list", + "member":{"shape":"MFADevice"} + }, + "minimumPasswordLengthType":{ + "type":"integer", + "max":128, + "min":6 + }, + "noSuchEntityMessage":{"type":"string"}, + "passwordPolicyViolationMessage":{"type":"string"}, + "passwordReusePreventionType":{ + "type":"integer", + "box":true, + "max":24, + "min":1 + }, + "passwordType":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+", + "sensitive":true + }, + "pathPrefixType":{ + "type":"string", + "max":512, + "min":1, + "pattern":"\\u002F[\\u0021-\\u007F]*" + }, + "pathType":{ + "type":"string", + "max":512, + "min":1, + "pattern":"(\\u002F)|(\\u002F[\\u0021-\\u007F]+\\u002F)" + }, + "policyDescriptionType":{ + "type":"string", + "max":1000 + }, + "policyDetailListType":{ + "type":"list", + "member":{"shape":"PolicyDetail"} + }, + "policyDocumentType":{ + "type":"string", + "max":131072, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+" + }, + "policyDocumentVersionListType":{ + "type":"list", + "member":{"shape":"PolicyVersion"} + }, + "policyEvaluationErrorMessage":{"type":"string"}, + "policyListType":{ + "type":"list", + "member":{"shape":"Policy"} + }, + "policyNameListType":{ + "type":"list", + "member":{"shape":"policyNameType"} + }, + "policyNameType":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\w+=,.@-]+" + }, + "policyPathType":{ + "type":"string", + "pattern":"((/[A-Za-z0-9\\.,\\+@=_-]+)*)/" + }, + "policyScopeType":{ + "type":"string", + "enum":[ + "All", + "AWS", + "Local" + ] + }, + "policyVersionIdType":{ + "type":"string", + "pattern":"v[1-9][0-9]*(\\.[A-Za-z0-9-]*)?" + }, + "privateKeyType":{ + "type":"string", + "max":16384, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+", + "sensitive":true + }, + "publicKeyFingerprintType":{ + "type":"string", + "max":48, + "min":48, + "pattern":"[:\\w]+" + }, + "publicKeyIdType":{ + "type":"string", + "max":128, + "min":20, + "pattern":"[\\w]+" + }, + "publicKeyMaterialType":{ + "type":"string", + "max":16384, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+" + }, + "roleDetailListType":{ + "type":"list", + "member":{"shape":"RoleDetail"} + }, + "roleListType":{ + "type":"list", + "member":{"shape":"Role"} + }, + "roleNameType":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[\\w+=,.@-]+" + }, + "serialNumberType":{ + "type":"string", + "max":256, + "min":9, + "pattern":"[\\w+=/:,.@-]+" + }, + "serverCertificateMetadataListType":{ + "type":"list", + "member":{"shape":"ServerCertificateMetadata"} + }, + "serverCertificateNameType":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\w+=,.@-]+" + }, + "serviceFailureExceptionMessage":{"type":"string"}, + "statusType":{ + "type":"string", + "enum":[ + "Active", + "Inactive" + ] + }, + "stringType":{"type":"string"}, + "summaryKeyType":{ + "type":"string", + "enum":[ + "Users", + "UsersQuota", + "Groups", + "GroupsQuota", + "ServerCertificates", + "ServerCertificatesQuota", + "UserPolicySizeQuota", + "GroupPolicySizeQuota", + "GroupsPerUserQuota", + "SigningCertificatesPerUserQuota", + "AccessKeysPerUserQuota", + "MFADevices", + "MFADevicesInUse", + "AccountMFAEnabled", + "AccountAccessKeysPresent", + "AccountSigningCertificatesPresent", + "AttachedPoliciesPerGroupQuota", + "AttachedPoliciesPerRoleQuota", + "AttachedPoliciesPerUserQuota", + "Policies", + "PoliciesQuota", + "PolicySizeQuota", + "PolicyVersionsInUse", + "PolicyVersionsInUseQuota", + "VersionsPerPolicyQuota" + ] + }, + "summaryMapType":{ + "type":"map", + "key":{"shape":"summaryKeyType"}, + "value":{"shape":"summaryValueType"} + }, + "summaryValueType":{"type":"integer"}, + "thumbprintListType":{ + "type":"list", + "member":{"shape":"thumbprintType"} + }, + "thumbprintType":{ + "type":"string", + "max":40, + "min":40 + }, + "unrecognizedPublicKeyEncodingMessage":{"type":"string"}, + "userDetailListType":{ + "type":"list", + "member":{"shape":"UserDetail"} + }, + "userListType":{ + "type":"list", + "member":{"shape":"User"} + }, + "userNameType":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[\\w+=,.@-]+" + }, + "virtualMFADeviceListType":{ + "type":"list", + "member":{"shape":"VirtualMFADevice"} + }, + "virtualMFADeviceName":{ + "type":"string", + "min":1, + "pattern":"[\\w+=,.@-]+" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2520 @@ +{ + "version": "2.0", + "service": "AWS Identity and Access Management

    AWS Identity and Access Management (IAM) is a web service that you can use to manage users and user permissions under your AWS account. This guide provides descriptions of IAM actions that you can call programmatically. For general information about IAM, see AWS Identity and Access Management (IAM). For the user guide for IAM, see Using IAM.

    AWS provides SDKs that consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient way to create programmatic access to IAM and AWS. For example, the SDKs take care of tasks such as cryptographically signing requests (see below), managing errors, and retrying requests automatically. For information about the AWS SDKs, including how to download and install them, see the Tools for Amazon Web Services page.

    We recommend that you use the AWS SDKs to make programmatic API calls to IAM. However, you can also use the IAM Query API to make direct calls to the IAM web service. To learn more about the IAM Query API, see Making Query Requests in the Using IAM guide. IAM supports GET and POST requests for all actions. That is, the API does not require you to use GET for some actions and POST for others. However, GET requests are subject to the limitation size of a URL. Therefore, for operations that require larger sizes, use a POST request.

    Signing Requests

    Requests must be signed using an access key ID and a secret access key. We strongly recommend that you do not use your AWS account access key ID and secret access key for everyday work with IAM. You can use the access key ID and secret access key for an IAM user or you can use the AWS Security Token Service to generate temporary security credentials and use those to sign requests.

    To sign requests, we recommend that you use Signature Version 4. If you have an existing application that uses Signature Version 2, you do not have to update it to use Signature Version 4. However, some operations now require Signature Version 4. The documentation for operations that require version 4 indicate this requirement.

    Additional Resources

    For more information, see the following:

    • AWS Security Credentials. This topic provides general information about the types of credentials used for accessing AWS.
    • IAM Best Practices. This topic presents a list of suggestions for using the IAM service to help secure your AWS resources.
    • Signing AWS API Requests. This set of topics walk you through the process of signing a request using an access key ID and secret access key.
    ", + "operations": { + "AddClientIDToOpenIDConnectProvider": "

    Adds a new client ID (also known as audience) to the list of client IDs already registered for the specified IAM OpenID Connect provider.

    This action is idempotent; it does not fail or return an error if you add an existing client ID to the provider.

    ", + "AddRoleToInstanceProfile": "

    Adds the specified role to the specified instance profile. For more information about roles, go to Working with Roles. For more information about instance profiles, go to About Instance Profiles.

    ", + "AddUserToGroup": "

    Adds the specified user to the specified group.

    ", + "AttachGroupPolicy": "

    Attaches the specified managed policy to the specified group.

    You use this API to attach a managed policy to a group. To embed an inline policy in a group, use PutGroupPolicy.

    For more information about policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    ", + "AttachRolePolicy": "

    Attaches the specified managed policy to the specified role.

    When you attach a managed policy to a role, the managed policy is used as the role's access (permissions) policy. You cannot use a managed policy as the role's trust policy. The role's trust policy is created at the same time as the role, using CreateRole. You can update a role's trust policy using UpdateAssumeRolePolicy.

    Use this API to attach a managed policy to a role. To embed an inline policy in a role, use PutRolePolicy. For more information about policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    ", + "AttachUserPolicy": "

    Attaches the specified managed policy to the specified user.

    You use this API to attach a managed policy to a user. To embed an inline policy in a user, use PutUserPolicy.

    For more information about policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    ", + "ChangePassword": "

    Changes the password of the IAM user who is calling this action. The root account password is not affected by this action.

    To change the password for a different user, see UpdateLoginProfile. For more information about modifying passwords, see Managing Passwords in the IAM User Guide.

    ", + "CreateAccessKey": "

    Creates a new AWS secret access key and corresponding AWS access key ID for the specified user. The default status for new keys is Active.

    If you do not specify a user name, IAM determines the user name implicitly based on the AWS access key ID signing the request. Because this action works for access keys under the AWS account, you can use this action to manage root credentials even if the AWS account has no associated users.

    For information about limits on the number of keys you can create, see Limitations on IAM Entities in the IAM User Guide.

    To ensure the security of your AWS account, the secret access key is accessible only during key and user creation. You must save the key (for example, in a text file) if you want to be able to access it again. If a secret key is lost, you can delete the access keys for the associated user and then create new keys. ", + "CreateAccountAlias": "

    Creates an alias for your AWS account. For information about using an AWS account alias, see Using an Alias for Your AWS Account ID in the IAM User Guide.

    ", + "CreateGroup": "

    Creates a new group.

    For information about the number of groups you can create, see Limitations on IAM Entities in the IAM User Guide.

    ", + "CreateInstanceProfile": "

    Creates a new instance profile. For information about instance profiles, go to About Instance Profiles.

    For information about the number of instance profiles you can create, see Limitations on IAM Entities in the IAM User Guide.

    ", + "CreateLoginProfile": "

    Creates a password for the specified user, giving the user the ability to access AWS services through the AWS Management Console. For more information about managing passwords, see Managing Passwords in the Using IAM guide.

    ", + "CreateOpenIDConnectProvider": "

    Creates an IAM entity to describe an identity provider (IdP) that supports OpenID Connect (OIDC).

    The OIDC provider that you create with this operation can be used as a principal in a role's trust policy to establish a trust relationship between AWS and the OIDC provider.

    When you create the IAM OIDC provider, you specify the URL of the OIDC identity provider (IdP) to trust, a list of client IDs (also known as audiences) that identify the application or applications that are allowed to authenticate using the OIDC provider, and a list of thumbprints of the server certificate(s) that the IdP uses. You get all of this information from the OIDC IdP that you want to use for access to AWS.

    Because trust for the OIDC provider is ultimately derived from the IAM provider that this action creates, it is a best practice to limit access to the CreateOpenIDConnectProvider action to highly-privileged users. ", + "CreatePolicy": "

    Creates a new managed policy for your AWS account.

    This operation creates a policy version with a version identifier of v1 and sets v1 as the policy's default version. For more information about policy versions, see Versioning for Managed Policies in the IAM User Guide.

    For more information about managed policies in general, refer to Managed Policies and Inline Policies in the IAM User Guide.

    ", + "CreatePolicyVersion": "

    Creates a new version of the specified managed policy. To update a managed policy, you create a new policy version. A managed policy can have up to five versions. If the policy has five versions, you must delete an existing version using DeletePolicyVersion before you create a new version.

    Optionally, you can set the new version as the policy's default version. The default version is the operative version; that is, the version that is in effect for the IAM users, groups, and roles that the policy is attached to.

    For more information about managed policy versions, see Versioning for Managed Policies in the IAM User Guide.

    ", + "CreateRole": "

    Creates a new role for your AWS account. For more information about roles, go to Working with Roles. For information about limitations on role names and the number of roles you can create, go to Limitations on IAM Entities in the IAM User Guide.

    ", + "CreateSAMLProvider": "

    Creates an IAM entity to describe an identity provider (IdP) that supports SAML 2.0.

    The SAML provider that you create with this operation can be used as a principal in a role's trust policy to establish a trust relationship between AWS and a SAML identity provider. You can create an IAM role that supports Web-based single sign-on (SSO) to the AWS Management Console or one that supports API access to AWS.

    When you create the SAML provider, you upload an a SAML metadata document that you get from your IdP and that includes the issuer's name, expiration information, and keys that can be used to validate the SAML authentication response (assertions) that are received from the IdP. You must generate the metadata document using the identity management software that is used as your organization's IdP.

    This operation requires Signature Version 4.

    For more information, see Enabling SAML 2.0 Federated Users to Access the AWS Management Console and About SAML 2.0-based Federation in the IAM User Guide.

    ", + "CreateUser": "

    Creates a new user for your AWS account.

    For information about limitations on the number of users you can create, see Limitations on IAM Entities in the IAM User Guide.

    ", + "CreateVirtualMFADevice": "

    Creates a new virtual MFA device for the AWS account. After creating the virtual MFA, use EnableMFADevice to attach the MFA device to an IAM user. For more information about creating and working with virtual MFA devices, go to Using a Virtual MFA Device in the Using IAM guide.

    For information about limits on the number of MFA devices you can create, see Limitations on Entities in the Using IAM guide.

    The seed information contained in the QR code and the Base32 string should be treated like any other secret access information, such as your AWS access keys or your passwords. After you provision your virtual device, you should ensure that the information is destroyed following secure procedures. ", + "DeactivateMFADevice": "

    Deactivates the specified MFA device and removes it from association with the user name for which it was originally enabled.

    For more information about creating and working with virtual MFA devices, go to Using a Virtual MFA Device in the Using IAM guide.

    ", + "DeleteAccessKey": "

    Deletes the access key associated with the specified user.

    If you do not specify a user name, IAM determines the user name implicitly based on the AWS access key ID signing the request. Because this action works for access keys under the AWS account, you can use this action to manage root credentials even if the AWS account has no associated users.

    ", + "DeleteAccountAlias": "

    Deletes the specified AWS account alias. For information about using an AWS account alias, see Using an Alias for Your AWS Account ID in the IAM User Guide.

    ", + "DeleteAccountPasswordPolicy": "

    Deletes the password policy for the AWS account.

    ", + "DeleteGroup": "

    Deletes the specified group. The group must not contain any users or have any attached policies.

    ", + "DeleteGroupPolicy": "

    Deletes the specified inline policy that is embedded in the specified group.

    A group can also have managed policies attached to it. To detach a managed policy from a group, use DetachGroupPolicy. For more information about policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    ", + "DeleteInstanceProfile": "

    Deletes the specified instance profile. The instance profile must not have an associated role.

    Make sure you do not have any Amazon EC2 instances running with the instance profile you are about to delete. Deleting a role or instance profile that is associated with a running instance will break any applications running on the instance.

    For more information about instance profiles, go to About Instance Profiles.

    ", + "DeleteLoginProfile": "

    Deletes the password for the specified user, which terminates the user's ability to access AWS services through the AWS Management Console.

    Deleting a user's password does not prevent a user from accessing IAM through the command line interface or the API. To prevent all user access you must also either make the access key inactive or delete it. For more information about making keys inactive or deleting them, see UpdateAccessKey and DeleteAccessKey. ", + "DeleteOpenIDConnectProvider": "

    Deletes an IAM OpenID Connect identity provider.

    Deleting an OIDC provider does not update any roles that reference the provider as a principal in their trust policies. Any attempt to assume a role that references a provider that has been deleted will fail.

    This action is idempotent; it does not fail or return an error if you call the action for a provider that was already deleted.

    ", + "DeletePolicy": "

    Deletes the specified managed policy.

    Before you can delete a managed policy, you must detach the policy from all users, groups, and roles that it is attached to, and you must delete all of the policy's versions. The following steps describe the process for deleting a managed policy:

    1. Detach the policy from all users, groups, and roles that the policy is attached to, using the DetachUserPolicy, DetachGroupPolicy, or DetachRolePolicy APIs. To list all the users, groups, and roles that a policy is attached to, use ListEntitiesForPolicy.
    2. Delete all versions of the policy using DeletePolicyVersion. To list the policy's versions, use ListPolicyVersions. You cannot use DeletePolicyVersion to delete the version that is marked as the default version. You delete the policy's default version in the next step of the process.
    3. Delete the policy (this automatically deletes the policy's default version) using this API.

    For information about managed policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    ", + "DeletePolicyVersion": "

    Deletes the specified version of the specified managed policy.

    You cannot delete the default version of a policy using this API. To delete the default version of a policy, use DeletePolicy. To find out which version of a policy is marked as the default version, use ListPolicyVersions.

    For information about versions for managed policies, refer to Versioning for Managed Policies in the IAM User Guide.

    ", + "DeleteRole": "

    Deletes the specified role. The role must not have any policies attached. For more information about roles, go to Working with Roles.

    Make sure you do not have any Amazon EC2 instances running with the role you are about to delete. Deleting a role or instance profile that is associated with a running instance will break any applications running on the instance. ", + "DeleteRolePolicy": "

    Deletes the specified inline policy that is embedded in the specified role.

    A role can also have managed policies attached to it. To detach a managed policy from a role, use DetachRolePolicy. For more information about policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    ", + "DeleteSAMLProvider": "

    Deletes a SAML provider.

    Deleting the provider does not update any roles that reference the SAML provider as a principal in their trust policies. Any attempt to assume a role that references a SAML provider that has been deleted will fail.

    This operation requires Signature Version 4. ", + "DeleteSSHPublicKey": "

    Deletes the specified SSH public key.

    The SSH public key deleted by this action is used only for authenticating the associated IAM user to an AWS CodeCommit repository. For more information about using SSH keys to authenticate to an AWS CodeCommit repository, see Set up AWS CodeCommit for SSH Connections in the AWS CodeCommit User Guide.

    ", + "DeleteServerCertificate": "

    Deletes the specified server certificate.

    For more information about working with server certificates, including a list of AWS services that can use the server certificates that you manage with IAM, go to Working with Server Certificates in the IAM User Guide.

    If you are using a server certificate with Elastic Load Balancing, deleting the certificate could have implications for your application. If Elastic Load Balancing doesn't detect the deletion of bound certificates, it may continue to use the certificates. This could cause Elastic Load Balancing to stop accepting traffic. We recommend that you remove the reference to the certificate from Elastic Load Balancing before using this command to delete the certificate. For more information, go to DeleteLoadBalancerListeners in the Elastic Load Balancing API Reference. ", + "DeleteSigningCertificate": "

    Deletes the specified signing certificate associated with the specified user.

    If you do not specify a user name, IAM determines the user name implicitly based on the AWS access key ID signing the request. Because this action works for access keys under the AWS account, you can use this action to manage root credentials even if the AWS account has no associated users.

    ", + "DeleteUser": "

    Deletes the specified user. The user must not belong to any groups, have any keys or signing certificates, or have any attached policies.

    ", + "DeleteUserPolicy": "

    Deletes the specified inline policy that is embedded in the specified user.

    A user can also have managed policies attached to it. To detach a managed policy from a user, use DetachUserPolicy. For more information about policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    ", + "DeleteVirtualMFADevice": "

    Deletes a virtual MFA device.

    You must deactivate a user's virtual MFA device before you can delete it. For information about deactivating MFA devices, see DeactivateMFADevice. ", + "DetachGroupPolicy": "

    Removes the specified managed policy from the specified group.

    A group can also have inline policies embedded with it. To delete an inline policy, use the DeleteGroupPolicy API. For information about policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    ", + "DetachRolePolicy": "

    Removes the specified managed policy from the specified role.

    A role can also have inline policies embedded with it. To delete an inline policy, use the DeleteRolePolicy API. For information about policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    ", + "DetachUserPolicy": "

    Removes the specified managed policy from the specified user.

    A user can also have inline policies embedded with it. To delete an inline policy, use the DeleteUserPolicy API. For information about policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    ", + "EnableMFADevice": "

    Enables the specified MFA device and associates it with the specified user name. When enabled, the MFA device is required for every subsequent login by the user name associated with the device.

    ", + "GenerateCredentialReport": "

    Generates a credential report for the AWS account. For more information about the credential report, see Getting Credential Reports in the IAM User Guide.

    ", + "GetAccessKeyLastUsed": "

    Retrieves information about when the specified access key was last used. The information includes the date and time of last use, along with the AWS service and region that were specified in the last request made with that key.

    ", + "GetAccountAuthorizationDetails": "

    Retrieves information about all IAM users, groups, roles, and policies in your account, including their relationships to one another. Use this API to obtain a snapshot of the configuration of IAM permissions (users, groups, roles, and policies) in your account.

    You can optionally filter the results using the Filter parameter. You can paginate the results using the MaxItems and Marker parameters.

    ", + "GetAccountPasswordPolicy": "

    Retrieves the password policy for the AWS account. For more information about using a password policy, go to Managing an IAM Password Policy.

    ", + "GetAccountSummary": "

    Retrieves information about IAM entity usage and IAM quotas in the AWS account.

    For information about limitations on IAM entities, see Limitations on IAM Entities in the IAM User Guide.

    ", + "GetContextKeysForCustomPolicy": "

    Gets a list of all of the context keys referenced in Condition elements in the input policies. The policies are supplied as a list of one or more strings. To get the context keys from policies associated with an IAM user, group, or role, use GetContextKeysForPrincipalPolicy.

    Context keys are variables maintained by AWS and its services that provide details about the context of an API query request, and can be evaluated by using the Condition element of an IAM policy. Use GetContextKeysForCustomPolicy to understand what key names and values you must supply when you call SimulateCustomPolicy. Note that all parameters are shown in unencoded form here for clarity, but must be URL encoded to be included as a part of a real HTML request.

    ", + "GetContextKeysForPrincipalPolicy": "

    Gets a list of all of the context keys referenced in Condition elements in all of the IAM policies attached to the specified IAM entity. The entity can be an IAM user, group, or role. If you specify a user, then the request also includes all of the policies attached to groups that the user is a member of.

    You can optionally include a list of one or more additional policies, specified as strings. If you want to include only a list of policies by string, use GetContextKeysForCustomPolicy instead.

    Note: This API discloses information about the permissions granted to other users. If you do not want users to see other user's permissions, then consider allowing them to use GetContextKeysForCustomPolicy instead.

    Context keys are variables maintained by AWS and its services that provide details about the context of an API query request, and can be evaluated by using the Condition element of an IAM policy. Use GetContextKeysForPrincipalPolicy to understand what key names and values you must supply when you call SimulatePrincipalPolicy.

    ", + "GetCredentialReport": "

    Retrieves a credential report for the AWS account. For more information about the credential report, see Getting Credential Reports in the IAM User Guide.

    ", + "GetGroup": "

    Returns a list of users that are in the specified group. You can paginate the results using the MaxItems and Marker parameters.

    ", + "GetGroupPolicy": "

    Retrieves the specified inline policy document that is embedded in the specified group.

    A group can also have managed policies attached to it. To retrieve a managed policy document that is attached to a group, use GetPolicy to determine the policy's default version, then use GetPolicyVersion to retrieve the policy document.

    For more information about policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    ", + "GetInstanceProfile": "

    Retrieves information about the specified instance profile, including the instance profile's path, GUID, ARN, and role. For more information about instance profiles, go to About Instance Profiles. For more information about ARNs, go to ARNs.

    ", + "GetLoginProfile": "

    Retrieves the user name and password-creation date for the specified user. If the user has not been assigned a password, the action returns a 404 (NoSuchEntity) error.

    ", + "GetOpenIDConnectProvider": "

    Returns information about the specified OpenID Connect provider.

    ", + "GetPolicy": "

    Retrieves information about the specified managed policy, including the policy's default version and the total number of users, groups, and roles that the policy is attached to. For a list of the specific users, groups, and roles that the policy is attached to, use the ListEntitiesForPolicy API. This API returns metadata about the policy. To retrieve the policy document for a specific version of the policy, use GetPolicyVersion.

    This API retrieves information about managed policies. To retrieve information about an inline policy that is embedded with a user, group, or role, use the GetUserPolicy, GetGroupPolicy, or GetRolePolicy API.

    For more information about policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    ", + "GetPolicyVersion": "

    Retrieves information about the specified version of the specified managed policy, including the policy document.

    To list the available versions for a policy, use ListPolicyVersions.

    This API retrieves information about managed policies. To retrieve information about an inline policy that is embedded in a user, group, or role, use the GetUserPolicy, GetGroupPolicy, or GetRolePolicy API.

    For more information about the types of policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    ", + "GetRole": "

    Retrieves information about the specified role, including the role's path, GUID, ARN, and the policy granting permission to assume the role. For more information about ARNs, go to ARNs. For more information about roles, go to Working with Roles.

    ", + "GetRolePolicy": "

    Retrieves the specified inline policy document that is embedded with the specified role.

    A role can also have managed policies attached to it. To retrieve a managed policy document that is attached to a role, use GetPolicy to determine the policy's default version, then use GetPolicyVersion to retrieve the policy document.

    For more information about policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    For more information about roles, go to Using Roles to Delegate Permissions and Federate Identities.

    ", + "GetSAMLProvider": "

    Returns the SAML provider metadocument that was uploaded when the provider was created or updated.

    This operation requires Signature Version 4. ", + "GetSSHPublicKey": "

    Retrieves the specified SSH public key, including metadata about the key.

    The SSH public key retrieved by this action is used only for authenticating the associated IAM user to an AWS CodeCommit repository. For more information about using SSH keys to authenticate to an AWS CodeCommit repository, see Set up AWS CodeCommit for SSH Connections in the AWS CodeCommit User Guide.

    ", + "GetServerCertificate": "

    Retrieves information about the specified server certificate.

    For more information about working with server certificates, including a list of AWS services that can use the server certificates that you manage with IAM, go to Working with Server Certificates in the IAM User Guide.

    ", + "GetUser": "

    Retrieves information about the specified user, including the user's creation date, path, unique ID, and ARN.

    If you do not specify a user name, IAM determines the user name implicitly based on the AWS access key ID used to sign the request.

    ", + "GetUserPolicy": "

    Retrieves the specified inline policy document that is embedded in the specified user.

    A user can also have managed policies attached to it. To retrieve a managed policy document that is attached to a user, use GetPolicy to determine the policy's default version, then use GetPolicyVersion to retrieve the policy document.

    For more information about policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    ", + "ListAccessKeys": "

    Returns information about the access key IDs associated with the specified user. If there are none, the action returns an empty list.

    Although each user is limited to a small number of keys, you can still paginate the results using the MaxItems and Marker parameters.

    If the UserName field is not specified, the UserName is determined implicitly based on the AWS access key ID used to sign the request. Because this action works for access keys under the AWS account, you can use this action to manage root credentials even if the AWS account has no associated users.

    To ensure the security of your AWS account, the secret access key is accessible only during key and user creation. ", + "ListAccountAliases": "

    Lists the account alias associated with the account (Note: you can have only one). For information about using an AWS account alias, see Using an Alias for Your AWS Account ID in the IAM User Guide.

    ", + "ListAttachedGroupPolicies": "

    Lists all managed policies that are attached to the specified group.

    A group can also have inline policies embedded with it. To list the inline policies for a group, use the ListGroupPolicies API. For information about policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    You can paginate the results using the MaxItems and Marker parameters. You can use the PathPrefix parameter to limit the list of policies to only those matching the specified path prefix. If there are no policies attached to the specified group (or none that match the specified path prefix), the action returns an empty list.

    ", + "ListAttachedRolePolicies": "

    Lists all managed policies that are attached to the specified role.

    A role can also have inline policies embedded with it. To list the inline policies for a role, use the ListRolePolicies API. For information about policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    You can paginate the results using the MaxItems and Marker parameters. You can use the PathPrefix parameter to limit the list of policies to only those matching the specified path prefix. If there are no policies attached to the specified role (or none that match the specified path prefix), the action returns an empty list.

    ", + "ListAttachedUserPolicies": "

    Lists all managed policies that are attached to the specified user.

    A user can also have inline policies embedded with it. To list the inline policies for a user, use the ListUserPolicies API. For information about policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    You can paginate the results using the MaxItems and Marker parameters. You can use the PathPrefix parameter to limit the list of policies to only those matching the specified path prefix. If there are no policies attached to the specified group (or none that match the specified path prefix), the action returns an empty list.

    ", + "ListEntitiesForPolicy": "

    Lists all users, groups, and roles that the specified managed policy is attached to.

    You can use the optional EntityFilter parameter to limit the results to a particular type of entity (users, groups, or roles). For example, to list only the roles that are attached to the specified policy, set EntityFilter to Role.

    You can paginate the results using the MaxItems and Marker parameters.

    ", + "ListGroupPolicies": "

    Lists the names of the inline policies that are embedded in the specified group.

    A group can also have managed policies attached to it. To list the managed policies that are attached to a group, use ListAttachedGroupPolicies. For more information about policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    You can paginate the results using the MaxItems and Marker parameters. If there are no inline policies embedded with the specified group, the action returns an empty list.

    ", + "ListGroups": "

    Lists the groups that have the specified path prefix.

    You can paginate the results using the MaxItems and Marker parameters.

    ", + "ListGroupsForUser": "

    Lists the groups the specified user belongs to.

    You can paginate the results using the MaxItems and Marker parameters.

    ", + "ListInstanceProfiles": "

    Lists the instance profiles that have the specified path prefix. If there are none, the action returns an empty list. For more information about instance profiles, go to About Instance Profiles.

    You can paginate the results using the MaxItems and Marker parameters.

    ", + "ListInstanceProfilesForRole": "

    Lists the instance profiles that have the specified associated role. If there are none, the action returns an empty list. For more information about instance profiles, go to About Instance Profiles.

    You can paginate the results using the MaxItems and Marker parameters.

    ", + "ListMFADevices": "

    Lists the MFA devices. If the request includes the user name, then this action lists all the MFA devices associated with the specified user name. If you do not specify a user name, IAM determines the user name implicitly based on the AWS access key ID signing the request.

    You can paginate the results using the MaxItems and Marker parameters.

    ", + "ListOpenIDConnectProviders": "

    Lists information about the OpenID Connect providers in the AWS account.

    ", + "ListPolicies": "

    Lists all the managed policies that are available to your account, including your own customer managed policies and all AWS managed policies.

    You can filter the list of policies that is returned using the optional OnlyAttached, Scope, and PathPrefix parameters. For example, to list only the customer managed policies in your AWS account, set Scope to Local. To list only AWS managed policies, set Scope to AWS.

    You can paginate the results using the MaxItems and Marker parameters.

    For more information about managed policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    ", + "ListPolicyVersions": "

    Lists information about the versions of the specified managed policy, including the version that is set as the policy's default version.

    For more information about managed policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    ", + "ListRolePolicies": "

    Lists the names of the inline policies that are embedded in the specified role.

    A role can also have managed policies attached to it. To list the managed policies that are attached to a role, use ListAttachedRolePolicies. For more information about policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    You can paginate the results using the MaxItems and Marker parameters. If there are no inline policies embedded with the specified role, the action returns an empty list.

    ", + "ListRoles": "

    Lists the roles that have the specified path prefix. If there are none, the action returns an empty list. For more information about roles, go to Working with Roles.

    You can paginate the results using the MaxItems and Marker parameters.

    ", + "ListSAMLProviders": "

    Lists the SAML providers in the account.

    This operation requires Signature Version 4. ", + "ListSSHPublicKeys": "

    Returns information about the SSH public keys associated with the specified IAM user. If there are none, the action returns an empty list.

    The SSH public keys returned by this action are used only for authenticating the IAM user to an AWS CodeCommit repository. For more information about using SSH keys to authenticate to an AWS CodeCommit repository, see Set up AWS CodeCommit for SSH Connections in the AWS CodeCommit User Guide.

    Although each user is limited to a small number of keys, you can still paginate the results using the MaxItems and Marker parameters.

    ", + "ListServerCertificates": "

    Lists the server certificates that have the specified path prefix. If none exist, the action returns an empty list.

    You can paginate the results using the MaxItems and Marker parameters.

    For more information about working with server certificates, including a list of AWS services that can use the server certificates that you manage with IAM, go to Working with Server Certificates in the IAM User Guide.

    ", + "ListSigningCertificates": "

    Returns information about the signing certificates associated with the specified user. If there are none, the action returns an empty list.

    Although each user is limited to a small number of signing certificates, you can still paginate the results using the MaxItems and Marker parameters.

    If the UserName field is not specified, the user name is determined implicitly based on the AWS access key ID used to sign the request. Because this action works for access keys under the AWS account, you can use this action to manage root credentials even if the AWS account has no associated users.

    ", + "ListUserPolicies": "

    Lists the names of the inline policies embedded in the specified user.

    A user can also have managed policies attached to it. To list the managed policies that are attached to a user, use ListAttachedUserPolicies. For more information about policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    You can paginate the results using the MaxItems and Marker parameters. If there are no inline policies embedded with the specified user, the action returns an empty list.

    ", + "ListUsers": "

    Lists the IAM users that have the specified path prefix. If no path prefix is specified, the action returns all users in the AWS account. If there are none, the action returns an empty list.

    You can paginate the results using the MaxItems and Marker parameters.

    ", + "ListVirtualMFADevices": "

    Lists the virtual MFA devices under the AWS account by assignment status. If you do not specify an assignment status, the action returns a list of all virtual MFA devices. Assignment status can be Assigned, Unassigned, or Any.

    You can paginate the results using the MaxItems and Marker parameters.

    ", + "PutGroupPolicy": "

    Adds (or updates) an inline policy document that is embedded in the specified group.

    A user can also have managed policies attached to it. To attach a managed policy to a group, use AttachGroupPolicy. To create a new managed policy, use CreatePolicy. For information about policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    For information about limits on the number of inline policies that you can embed in a group, see Limitations on IAM Entities in the IAM User Guide.

    Because policy documents can be large, you should use POST rather than GET when calling PutGroupPolicy. For general information about using the Query API with IAM, go to Making Query Requests in the Using IAM guide. ", + "PutRolePolicy": "

    Adds (or updates) an inline policy document that is embedded in the specified role.

    When you embed an inline policy in a role, the inline policy is used as the role's access (permissions) policy. The role's trust policy is created at the same time as the role, using CreateRole. You can update a role's trust policy using UpdateAssumeRolePolicy. For more information about roles, go to Using Roles to Delegate Permissions and Federate Identities.

    A role can also have a managed policy attached to it. To attach a managed policy to a role, use AttachRolePolicy. To create a new managed policy, use CreatePolicy. For information about policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    For information about limits on the number of inline policies that you can embed with a role, see Limitations on IAM Entities in the IAM User Guide.

    Because policy documents can be large, you should use POST rather than GET when calling PutRolePolicy. For general information about using the Query API with IAM, go to Making Query Requests in the Using IAM guide. ", + "PutUserPolicy": "

    Adds (or updates) an inline policy document that is embedded in the specified user.

    A user can also have a managed policy attached to it. To attach a managed policy to a user, use AttachUserPolicy. To create a new managed policy, use CreatePolicy. For information about policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    For information about limits on the number of inline policies that you can embed in a user, see Limitations on IAM Entities in the IAM User Guide.

    Because policy documents can be large, you should use POST rather than GET when calling PutUserPolicy. For general information about using the Query API with IAM, go to Making Query Requests in the Using IAM guide. ", + "RemoveClientIDFromOpenIDConnectProvider": "

    Removes the specified client ID (also known as audience) from the list of client IDs registered for the specified IAM OpenID Connect provider.

    This action is idempotent; it does not fail or return an error if you try to remove a client ID that was removed previously.

    ", + "RemoveRoleFromInstanceProfile": "

    Removes the specified role from the specified instance profile.

    Make sure you do not have any Amazon EC2 instances running with the role you are about to remove from the instance profile. Removing a role from an instance profile that is associated with a running instance will break any applications running on the instance.

    For more information about roles, go to Working with Roles. For more information about instance profiles, go to About Instance Profiles.

    ", + "RemoveUserFromGroup": "

    Removes the specified user from the specified group.

    ", + "ResyncMFADevice": "

    Synchronizes the specified MFA device with AWS servers.

    For more information about creating and working with virtual MFA devices, go to Using a Virtual MFA Device in the Using IAM guide.

    ", + "SetDefaultPolicyVersion": "

    Sets the specified version of the specified policy as the policy's default (operative) version.

    This action affects all users, groups, and roles that the policy is attached to. To list the users, groups, and roles that the policy is attached to, use the ListEntitiesForPolicy API.

    For information about managed policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

    ", + "SimulateCustomPolicy": "

    Simulate how a set of IAM policies and optionally a resource-based policy works with a list of API actions and AWS resources to determine the policies' effective permissions. The policies are provided as strings.

    The simulation does not perform the API actions; it only checks the authorization to determine if the simulated policies allow or deny the actions.

    If you want to simulate existing policies attached to an IAM user, group, or role, use SimulatePrincipalPolicy instead.

    Context keys are variables maintained by AWS and its services that provide details about the context of an API query request. You can use the Condition element of an IAM policy to evaluate context keys. To get the list of context keys that the policies require for correct simulation, use GetContextKeysForCustomPolicy.

    If the output is long, you can use MaxItems and Marker parameters to paginate the results.

    ", + "SimulatePrincipalPolicy": "

    Simulate how a set of IAM policies attached to an IAM entity works with a list of API actions and AWS resources to determine the policies' effective permissions. The entity can be an IAM user, group, or role. If you specify a user, then the simulation also includes all of the policies that are attached to groups that the user belongs to .

    You can optionally include a list of one or more additional policies specified as strings to include in the simulation. If you want to simulate only policies specified as strings, use SimulateCustomPolicy instead.

    You can also optionally include one resource-based policy to be evaluated with each of the resources included in the simulation.

    The simulation does not perform the API actions, it only checks the authorization to determine if the simulated policies allow or deny the actions.

    Note: This API discloses information about the permissions granted to other users. If you do not want users to see other user's permissions, then consider allowing them to use SimulateCustomPolicy instead.

    Context keys are variables maintained by AWS and its services that provide details about the context of an API query request. You can use the Condition element of an IAM policy to evaluate context keys. To get the list of context keys that the policies require for correct simulation, use GetContextKeysForPrincipalPolicy.

    If the output is long, you can use the MaxItems and Marker parameters to paginate the results.

    ", + "UpdateAccessKey": "

    Changes the status of the specified access key from Active to Inactive, or vice versa. This action can be used to disable a user's key as part of a key rotation work flow.

    If the UserName field is not specified, the UserName is determined implicitly based on the AWS access key ID used to sign the request. Because this action works for access keys under the AWS account, you can use this action to manage root credentials even if the AWS account has no associated users.

    For information about rotating keys, see Managing Keys and Certificates in the IAM User Guide.

    ", + "UpdateAccountPasswordPolicy": "

    Updates the password policy settings for the AWS account.

    This action does not support partial updates. No parameters are required, but if you do not specify a parameter, that parameter's value reverts to its default value. See the Request Parameters section for each parameter's default value.

    For more information about using a password policy, see Managing an IAM Password Policy in the IAM User Guide.

    ", + "UpdateAssumeRolePolicy": "

    Updates the policy that grants an entity permission to assume a role. For more information about roles, go to Using Roles to Delegate Permissions and Federate Identities.

    ", + "UpdateGroup": "

    Updates the name and/or the path of the specified group.

    You should understand the implications of changing a group's path or name. For more information, see Renaming Users and Groups in the IAM User Guide. To change a group name the requester must have appropriate permissions on both the source object and the target object. For example, to change Managers to MGRs, the entity making the request must have permission on Managers and MGRs, or must have permission on all (*). For more information about permissions, see Permissions and Policies. ", + "UpdateLoginProfile": "

    Changes the password for the specified user.

    Users can change their own passwords by calling ChangePassword. For more information about modifying passwords, see Managing Passwords in the IAM User Guide.

    ", + "UpdateOpenIDConnectProviderThumbprint": "

    Replaces the existing list of server certificate thumbprints with a new list.

    The list that you pass with this action completely replaces the existing list of thumbprints. (The lists are not merged.)

    Typically, you need to update a thumbprint only when the identity provider's certificate changes, which occurs rarely. However, if the provider's certificate does change, any attempt to assume an IAM role that specifies the OIDC provider as a principal will fail until the certificate thumbprint is updated.

    Because trust for the OpenID Connect provider is ultimately derived from the provider's certificate and is validated by the thumbprint, it is a best practice to limit access to the UpdateOpenIDConnectProviderThumbprint action to highly-privileged users. ", + "UpdateSAMLProvider": "

    Updates the metadata document for an existing SAML provider.

    This operation requires Signature Version 4. ", + "UpdateSSHPublicKey": "

    Sets the status of the specified SSH public key to active or inactive. SSH public keys that are inactive cannot be used for authentication. This action can be used to disable a user's SSH public key as part of a key rotation work flow.

    The SSH public key affected by this action is used only for authenticating the associated IAM user to an AWS CodeCommit repository. For more information about using SSH keys to authenticate to an AWS CodeCommit repository, see Set up AWS CodeCommit for SSH Connections in the AWS CodeCommit User Guide.

    ", + "UpdateServerCertificate": "

    Updates the name and/or the path of the specified server certificate.

    For more information about working with server certificates, including a list of AWS services that can use the server certificates that you manage with IAM, go to Working with Server Certificates in the IAM User Guide.

    You should understand the implications of changing a server certificate's path or name. For more information, see Renaming a Server Certificate in the IAM User Guide. To change a server certificate name the requester must have appropriate permissions on both the source object and the target object. For example, to change the name from ProductionCert to ProdCert, the entity making the request must have permission on ProductionCert and ProdCert, or must have permission on all (*). For more information about permissions, see Access Management in the IAM User Guide. ", + "UpdateSigningCertificate": "

    Changes the status of the specified signing certificate from active to disabled, or vice versa. This action can be used to disable a user's signing certificate as part of a certificate rotation work flow.

    If the UserName field is not specified, the UserName is determined implicitly based on the AWS access key ID used to sign the request. Because this action works for access keys under the AWS account, you can use this action to manage root credentials even if the AWS account has no associated users.

    ", + "UpdateUser": "

    Updates the name and/or the path of the specified user.

    You should understand the implications of changing a user's path or name. For more information, see Renaming Users and Groups in the IAM User Guide. To change a user name the requester must have appropriate permissions on both the source object and the target object. For example, to change Bob to Robert, the entity making the request must have permission on Bob and Robert, or must have permission on all (*). For more information about permissions, see Permissions and Policies. ", + "UploadSSHPublicKey": "

    Uploads an SSH public key and associates it with the specified IAM user.

    The SSH public key uploaded by this action can be used only for authenticating the associated IAM user to an AWS CodeCommit repository. For more information about using SSH keys to authenticate to an AWS CodeCommit repository, see Set up AWS CodeCommit for SSH Connections in the AWS CodeCommit User Guide.

    ", + "UploadServerCertificate": "

    Uploads a server certificate entity for the AWS account. The server certificate entity includes a public key certificate, a private key, and an optional certificate chain, which should all be PEM-encoded.

    For more information about working with server certificates, including a list of AWS services that can use the server certificates that you manage with IAM, go to Working with Server Certificates in the IAM User Guide.

    For information about the number of server certificates you can upload, see Limitations on IAM Entities and Objects in the IAM User Guide.

    Because the body of the public key certificate, private key, and the certificate chain can be large, you should use POST rather than GET when calling UploadServerCertificate. For information about setting up signatures and authorization through the API, go to Signing AWS API Requests in the AWS General Reference. For general information about using the Query API with IAM, go to Calling the API by Making HTTP Query Requests in the IAM User Guide. ", + "UploadSigningCertificate": "

    Uploads an X.509 signing certificate and associates it with the specified user. Some AWS services use X.509 signing certificates to validate requests that are signed with a corresponding private key. When you upload the certificate, its default status is Active.

    If the UserName field is not specified, the user name is determined implicitly based on the AWS access key ID used to sign the request. Because this action works for access keys under the AWS account, you can use this action to manage root credentials even if the AWS account has no associated users.

    Because the body of a X.509 certificate can be large, you should use POST rather than GET when calling UploadSigningCertificate. For information about setting up signatures and authorization through the API, go to Signing AWS API Requests in the AWS General Reference. For general information about using the Query API with IAM, go to Making Query Requests in the Using IAMguide. " + }, + "shapes": { + "AccessKey": { + "base": "

    Contains information about an AWS access key.

    This data type is used as a response element in the CreateAccessKey and ListAccessKeys actions.

    The SecretAccessKey value is returned only in response to CreateAccessKey. You can get a secret access key only when you first create an access key; you cannot recover the secret access key later. If you lose a secret access key, you must create a new access key. ", + "refs": { + "CreateAccessKeyResponse$AccessKey": "

    Information about the access key.

    " + } + }, + "AccessKeyLastUsed": { + "base": "

    Contains information about the last time an AWS access key was used.

    This data type is used as a response element in the GetAccessKeyLastUsed action.

    ", + "refs": { + "GetAccessKeyLastUsedResponse$AccessKeyLastUsed": "

    Contains information about the last time the access key was used.

    " + } + }, + "AccessKeyMetadata": { + "base": "

    Contains information about an AWS access key, without its secret key.

    This data type is used as a response element in the ListAccessKeys action.

    ", + "refs": { + "accessKeyMetadataListType$member": null + } + }, + "ActionNameListType": { + "base": null, + "refs": { + "SimulateCustomPolicyRequest$ActionNames": "

    A list of names of API actions to evaluate in the simulation. Each action is evaluated against each resource. Each action must include the service identifier, such as iam:CreateUser.

    ", + "SimulatePrincipalPolicyRequest$ActionNames": "

    A list of names of API actions to evaluate in the simulation. Each action is evaluated for each resource. Each action must include the service identifier, such as iam:CreateUser.

    " + } + }, + "ActionNameType": { + "base": null, + "refs": { + "ActionNameListType$member": null, + "EvaluationResult$EvalActionName": "

    The name of the API action tested on the indicated resource.

    " + } + }, + "AddClientIDToOpenIDConnectProviderRequest": { + "base": null, + "refs": { + } + }, + "AddRoleToInstanceProfileRequest": { + "base": null, + "refs": { + } + }, + "AddUserToGroupRequest": { + "base": null, + "refs": { + } + }, + "AttachGroupPolicyRequest": { + "base": null, + "refs": { + } + }, + "AttachRolePolicyRequest": { + "base": null, + "refs": { + } + }, + "AttachUserPolicyRequest": { + "base": null, + "refs": { + } + }, + "AttachedPolicy": { + "base": "

    Contains information about an attached policy.

    An attached policy is a managed policy that has been attached to a user, group, or role. This data type is used as a response element in the ListAttachedGroupPolicies, ListAttachedRolePolicies, ListAttachedUserPolicies, and GetAccountAuthorizationDetails actions.

    For more information about managed policies, refer to Managed Policies and Inline Policies in the Using IAM guide.

    ", + "refs": { + "attachedPoliciesListType$member": null + } + }, + "BootstrapDatum": { + "base": null, + "refs": { + "VirtualMFADevice$Base32StringSeed": "

    The Base32 seed defined as specified in RFC3548. The Base32StringSeed is Base64-encoded.

    ", + "VirtualMFADevice$QRCodePNG": "

    A QR code PNG image that encodes otpauth://totp/$virtualMFADeviceName@$AccountName?secret=$Base32String where $virtualMFADeviceName is one of the create call arguments, AccountName is the user name if set (otherwise, the account ID otherwise), and Base32String is the seed in Base32 format. The Base32String value is Base64-encoded.

    " + } + }, + "ChangePasswordRequest": { + "base": null, + "refs": { + } + }, + "ColumnNumber": { + "base": null, + "refs": { + "Position$Column": "

    The column in the line containing the specified position in the document.

    " + } + }, + "ContextEntry": { + "base": "

    Contains information about a condition context key. It includes the name of the key and specifies the value (or values, if the context key supports multiple values) to use in the simulation. This information is used when evaluating the Condition elements of the input policies.

    This data type is used as an input parameter to SimulatePolicy.

    ", + "refs": { + "ContextEntryListType$member": null + } + }, + "ContextEntryListType": { + "base": null, + "refs": { + "SimulateCustomPolicyRequest$ContextEntries": "

    A list of context keys and corresponding values for the simulation to use. Whenever a context key is evaluated by a Condition element in one of the simulated IAM permission policies, the corresponding value is supplied.

    ", + "SimulatePrincipalPolicyRequest$ContextEntries": "

    A list of context keys and corresponding values for the simulation to use. Whenever a context key is evaluated by a Condition element in one of the simulated policies, the corresponding value is supplied.

    " + } + }, + "ContextKeyNameType": { + "base": null, + "refs": { + "ContextEntry$ContextKeyName": "

    The full name of a condition context key, including the service prefix. For example, aws:SourceIp or s3:VersionId.

    ", + "ContextKeyNamesResultListType$member": null + } + }, + "ContextKeyNamesResultListType": { + "base": null, + "refs": { + "EvaluationResult$MissingContextValues": "

    A list of context keys that are required by the included input policies but that were not provided by one of the input parameters. To discover the context keys used by a set of policies, you can call GetContextKeysForCustomPolicy or GetContextKeysForPrincipalPolicy.

    If the response includes any keys in this list, then the reported results might be untrustworthy because the simulation could not completely evaluate all of the conditions specified in the policies that would occur in a real world request.

    ", + "GetContextKeysForPolicyResponse$ContextKeyNames": "

    The list of context keys that are used in the Condition elements of the input policies.

    ", + "ResourceSpecificResult$MissingContextValues": "

    A list of context keys that are required by the included input policies but that were not provided by one of the input parameters. To discover the context keys used by a set of policies, you can call GetContextKeysForCustomPolicy or GetContextKeysForPrincipalPolicy.

    " + } + }, + "ContextKeyTypeEnum": { + "base": null, + "refs": { + "ContextEntry$ContextKeyType": "

    The data type of the value (or values) specified in the ContextKeyValues parameter.

    " + } + }, + "ContextKeyValueListType": { + "base": null, + "refs": { + "ContextEntry$ContextKeyValues": "

    The value (or values, if the condition context key supports multiple values) to provide to the simulation for use when the key is referenced by a Condition element in an input policy.

    " + } + }, + "ContextKeyValueType": { + "base": null, + "refs": { + "ContextKeyValueListType$member": null + } + }, + "CreateAccessKeyRequest": { + "base": null, + "refs": { + } + }, + "CreateAccessKeyResponse": { + "base": "

    Contains the response to a successful CreateAccessKey request.

    ", + "refs": { + } + }, + "CreateAccountAliasRequest": { + "base": null, + "refs": { + } + }, + "CreateGroupRequest": { + "base": null, + "refs": { + } + }, + "CreateGroupResponse": { + "base": "

    Contains the response to a successful CreateGroup request.

    ", + "refs": { + } + }, + "CreateInstanceProfileRequest": { + "base": null, + "refs": { + } + }, + "CreateInstanceProfileResponse": { + "base": "

    Contains the response to a successful CreateInstanceProfile request.

    ", + "refs": { + } + }, + "CreateLoginProfileRequest": { + "base": null, + "refs": { + } + }, + "CreateLoginProfileResponse": { + "base": "

    Contains the response to a successful CreateLoginProfile request.

    ", + "refs": { + } + }, + "CreateOpenIDConnectProviderRequest": { + "base": null, + "refs": { + } + }, + "CreateOpenIDConnectProviderResponse": { + "base": "

    Contains the response to a successful CreateOpenIDConnectProvider request.

    ", + "refs": { + } + }, + "CreatePolicyRequest": { + "base": null, + "refs": { + } + }, + "CreatePolicyResponse": { + "base": "

    Contains the response to a successful CreatePolicy request.

    ", + "refs": { + } + }, + "CreatePolicyVersionRequest": { + "base": null, + "refs": { + } + }, + "CreatePolicyVersionResponse": { + "base": "

    Contains the response to a successful CreatePolicyVersion request.

    ", + "refs": { + } + }, + "CreateRoleRequest": { + "base": null, + "refs": { + } + }, + "CreateRoleResponse": { + "base": "

    Contains the response to a successful CreateRole request.

    ", + "refs": { + } + }, + "CreateSAMLProviderRequest": { + "base": null, + "refs": { + } + }, + "CreateSAMLProviderResponse": { + "base": "

    Contains the response to a successful CreateSAMLProvider request.

    ", + "refs": { + } + }, + "CreateUserRequest": { + "base": null, + "refs": { + } + }, + "CreateUserResponse": { + "base": "

    Contains the response to a successful CreateUser request.

    ", + "refs": { + } + }, + "CreateVirtualMFADeviceRequest": { + "base": null, + "refs": { + } + }, + "CreateVirtualMFADeviceResponse": { + "base": "

    Contains the response to a successful CreateVirtualMFADevice request.

    ", + "refs": { + } + }, + "CredentialReportExpiredException": { + "base": "

    The request was rejected because the most recent credential report has expired. To generate a new credential report, use GenerateCredentialReport. For more information about credential report expiration, see Getting Credential Reports in the IAM User Guide.

    ", + "refs": { + } + }, + "CredentialReportNotPresentException": { + "base": "

    The request was rejected because the credential report does not exist. To generate a credential report, use GenerateCredentialReport.

    ", + "refs": { + } + }, + "CredentialReportNotReadyException": { + "base": "

    The request was rejected because the credential report is still being generated.

    ", + "refs": { + } + }, + "DeactivateMFADeviceRequest": { + "base": null, + "refs": { + } + }, + "DeleteAccessKeyRequest": { + "base": null, + "refs": { + } + }, + "DeleteAccountAliasRequest": { + "base": null, + "refs": { + } + }, + "DeleteConflictException": { + "base": "

    The request was rejected because it attempted to delete a resource that has attached subordinate entities. The error message describes these entities.

    ", + "refs": { + } + }, + "DeleteGroupPolicyRequest": { + "base": null, + "refs": { + } + }, + "DeleteGroupRequest": { + "base": null, + "refs": { + } + }, + "DeleteInstanceProfileRequest": { + "base": null, + "refs": { + } + }, + "DeleteLoginProfileRequest": { + "base": null, + "refs": { + } + }, + "DeleteOpenIDConnectProviderRequest": { + "base": null, + "refs": { + } + }, + "DeletePolicyRequest": { + "base": null, + "refs": { + } + }, + "DeletePolicyVersionRequest": { + "base": null, + "refs": { + } + }, + "DeleteRolePolicyRequest": { + "base": null, + "refs": { + } + }, + "DeleteRoleRequest": { + "base": null, + "refs": { + } + }, + "DeleteSAMLProviderRequest": { + "base": null, + "refs": { + } + }, + "DeleteSSHPublicKeyRequest": { + "base": null, + "refs": { + } + }, + "DeleteServerCertificateRequest": { + "base": null, + "refs": { + } + }, + "DeleteSigningCertificateRequest": { + "base": null, + "refs": { + } + }, + "DeleteUserPolicyRequest": { + "base": null, + "refs": { + } + }, + "DeleteUserRequest": { + "base": null, + "refs": { + } + }, + "DeleteVirtualMFADeviceRequest": { + "base": null, + "refs": { + } + }, + "DetachGroupPolicyRequest": { + "base": null, + "refs": { + } + }, + "DetachRolePolicyRequest": { + "base": null, + "refs": { + } + }, + "DetachUserPolicyRequest": { + "base": null, + "refs": { + } + }, + "DuplicateCertificateException": { + "base": "

    The request was rejected because the same certificate is associated with an IAM user in the account.

    ", + "refs": { + } + }, + "DuplicateSSHPublicKeyException": { + "base": "

    The request was rejected because the SSH public key is already associated with the specified IAM user.

    ", + "refs": { + } + }, + "EnableMFADeviceRequest": { + "base": null, + "refs": { + } + }, + "EntityAlreadyExistsException": { + "base": "

    The request was rejected because it attempted to create a resource that already exists.

    ", + "refs": { + } + }, + "EntityTemporarilyUnmodifiableException": { + "base": "

    The request was rejected because it referenced an entity that is temporarily unmodifiable, such as a user name that was deleted and then recreated. The error indicates that the request is likely to succeed if you try again after waiting several minutes. The error message describes the entity.

    ", + "refs": { + } + }, + "EntityType": { + "base": null, + "refs": { + "ListEntitiesForPolicyRequest$EntityFilter": "

    The entity type to use for filtering the results.

    For example, when EntityFilter is Role, only the roles that are attached to the specified policy are returned. This parameter is optional. If it is not included, all attached entities (users, groups, and roles) are returned.

    ", + "entityListType$member": null + } + }, + "EvalDecisionDetailsType": { + "base": null, + "refs": { + "EvaluationResult$EvalDecisionDetails": "

    Additional details about the results of the evaluation decision. When there are both IAM policies and resource policies, this parameter explains how each set of policies contributes to the final evaluation decision. When simulating cross-account access to a resource, both the resource-based policy and the caller's IAM policy must grant access. See How IAM Roles Differ from Resource-based Policies

    ", + "ResourceSpecificResult$EvalDecisionDetails": "

    Additional details about the results of the evaluation decision. When there are both IAM policies and resource policies, this parameter explains how each set of policies contributes to the final evaluation decision. When simulating cross-account access to a resource, both the resource-based policy and the caller's IAM policy must grant access.

    " + } + }, + "EvalDecisionSourceType": { + "base": null, + "refs": { + "EvalDecisionDetailsType$key": null + } + }, + "EvaluationResult": { + "base": "

    Contains the results of a simulation.

    This data type is used by the return parameter of SimulatePolicy.

    ", + "refs": { + "EvaluationResultsListType$member": null + } + }, + "EvaluationResultsListType": { + "base": null, + "refs": { + "SimulatePolicyResponse$EvaluationResults": "

    The results of the simulation.

    " + } + }, + "GenerateCredentialReportResponse": { + "base": "

    Contains the response to a successful GenerateCredentialReport request.

    ", + "refs": { + } + }, + "GetAccessKeyLastUsedRequest": { + "base": null, + "refs": { + } + }, + "GetAccessKeyLastUsedResponse": { + "base": "

    Contains the response to a successful GetAccessKeyLastUsed request. It is also returned as a member of the AccessKeyMetaData structure returned by the ListAccessKeys action.

    ", + "refs": { + } + }, + "GetAccountAuthorizationDetailsRequest": { + "base": null, + "refs": { + } + }, + "GetAccountAuthorizationDetailsResponse": { + "base": "

    Contains the response to a successful GetAccountAuthorizationDetails request.

    ", + "refs": { + } + }, + "GetAccountPasswordPolicyResponse": { + "base": "

    Contains the response to a successful GetAccountPasswordPolicy request.

    ", + "refs": { + } + }, + "GetAccountSummaryResponse": { + "base": "

    Contains the response to a successful GetAccountSummary request.

    ", + "refs": { + } + }, + "GetContextKeysForCustomPolicyRequest": { + "base": null, + "refs": { + } + }, + "GetContextKeysForPolicyResponse": { + "base": "

    Contains the response to a successful GetContextKeysForPrincipalPolicy or GetContextKeysForCustomPolicy request.

    ", + "refs": { + } + }, + "GetContextKeysForPrincipalPolicyRequest": { + "base": null, + "refs": { + } + }, + "GetCredentialReportResponse": { + "base": "

    Contains the response to a successful GetCredentialReport request.

    ", + "refs": { + } + }, + "GetGroupPolicyRequest": { + "base": null, + "refs": { + } + }, + "GetGroupPolicyResponse": { + "base": "

    Contains the response to a successful GetGroupPolicy request.

    ", + "refs": { + } + }, + "GetGroupRequest": { + "base": null, + "refs": { + } + }, + "GetGroupResponse": { + "base": "

    Contains the response to a successful GetGroup request.

    ", + "refs": { + } + }, + "GetInstanceProfileRequest": { + "base": null, + "refs": { + } + }, + "GetInstanceProfileResponse": { + "base": "

    Contains the response to a successful GetInstanceProfile request.

    ", + "refs": { + } + }, + "GetLoginProfileRequest": { + "base": null, + "refs": { + } + }, + "GetLoginProfileResponse": { + "base": "

    Contains the response to a successful GetLoginProfile request.

    ", + "refs": { + } + }, + "GetOpenIDConnectProviderRequest": { + "base": null, + "refs": { + } + }, + "GetOpenIDConnectProviderResponse": { + "base": "

    Contains the response to a successful GetOpenIDConnectProvider request.

    ", + "refs": { + } + }, + "GetPolicyRequest": { + "base": null, + "refs": { + } + }, + "GetPolicyResponse": { + "base": "

    Contains the response to a successful GetPolicy request.

    ", + "refs": { + } + }, + "GetPolicyVersionRequest": { + "base": null, + "refs": { + } + }, + "GetPolicyVersionResponse": { + "base": "

    Contains the response to a successful GetPolicyVersion request.

    ", + "refs": { + } + }, + "GetRolePolicyRequest": { + "base": null, + "refs": { + } + }, + "GetRolePolicyResponse": { + "base": "

    Contains the response to a successful GetRolePolicy request.

    ", + "refs": { + } + }, + "GetRoleRequest": { + "base": null, + "refs": { + } + }, + "GetRoleResponse": { + "base": "

    Contains the response to a successful GetRole request.

    ", + "refs": { + } + }, + "GetSAMLProviderRequest": { + "base": null, + "refs": { + } + }, + "GetSAMLProviderResponse": { + "base": "

    Contains the response to a successful GetSAMLProvider request.

    ", + "refs": { + } + }, + "GetSSHPublicKeyRequest": { + "base": null, + "refs": { + } + }, + "GetSSHPublicKeyResponse": { + "base": "

    Contains the response to a successful GetSSHPublicKey request.

    ", + "refs": { + } + }, + "GetServerCertificateRequest": { + "base": null, + "refs": { + } + }, + "GetServerCertificateResponse": { + "base": "

    Contains the response to a successful GetServerCertificate request.

    ", + "refs": { + } + }, + "GetUserPolicyRequest": { + "base": null, + "refs": { + } + }, + "GetUserPolicyResponse": { + "base": "

    Contains the response to a successful GetUserPolicy request.

    ", + "refs": { + } + }, + "GetUserRequest": { + "base": null, + "refs": { + } + }, + "GetUserResponse": { + "base": "

    Contains the response to a successful GetUser request.

    ", + "refs": { + } + }, + "Group": { + "base": "

    Contains information about an IAM group entity.

    This data type is used as a response element in the following actions:

    ", + "refs": { + "CreateGroupResponse$Group": "

    Information about the group.

    ", + "GetGroupResponse$Group": "

    Information about the group.

    ", + "groupListType$member": null + } + }, + "GroupDetail": { + "base": "

    Contains information about an IAM group, including all of the group's policies.

    This data type is used as a response element in the GetAccountAuthorizationDetails action.

    ", + "refs": { + "groupDetailListType$member": null + } + }, + "InstanceProfile": { + "base": "

    Contains information about an instance profile.

    This data type is used as a response element in the following actions:

    ", + "refs": { + "CreateInstanceProfileResponse$InstanceProfile": "

    Information about the instance profile.

    ", + "GetInstanceProfileResponse$InstanceProfile": "

    Information about the instance profile.

    ", + "instanceProfileListType$member": null + } + }, + "InvalidAuthenticationCodeException": { + "base": "

    The request was rejected because the authentication code was not recognized. The error message describes the specific error.

    ", + "refs": { + } + }, + "InvalidCertificateException": { + "base": "

    The request was rejected because the certificate is invalid.

    ", + "refs": { + } + }, + "InvalidInputException": { + "base": "

    The request was rejected because an invalid or out-of-range value was supplied for an input parameter.

    ", + "refs": { + } + }, + "InvalidPublicKeyException": { + "base": "

    The request was rejected because the public key is malformed or otherwise invalid.

    ", + "refs": { + } + }, + "InvalidUserTypeException": { + "base": "

    The request was rejected because the type of user for the transaction was incorrect.

    ", + "refs": { + } + }, + "KeyPairMismatchException": { + "base": "

    The request was rejected because the public key certificate and the private key do not match.

    ", + "refs": { + } + }, + "LimitExceededException": { + "base": "

    The request was rejected because it attempted to create resources beyond the current AWS account limits. The error message describes the limit exceeded.

    ", + "refs": { + } + }, + "LineNumber": { + "base": null, + "refs": { + "Position$Line": "

    The line containing the specified position in the document.

    " + } + }, + "ListAccessKeysRequest": { + "base": null, + "refs": { + } + }, + "ListAccessKeysResponse": { + "base": "

    Contains the response to a successful ListAccessKeys request.

    ", + "refs": { + } + }, + "ListAccountAliasesRequest": { + "base": null, + "refs": { + } + }, + "ListAccountAliasesResponse": { + "base": "

    Contains the response to a successful ListAccountAliases request.

    ", + "refs": { + } + }, + "ListAttachedGroupPoliciesRequest": { + "base": null, + "refs": { + } + }, + "ListAttachedGroupPoliciesResponse": { + "base": "

    Contains the response to a successful ListAttachedGroupPolicies request.

    ", + "refs": { + } + }, + "ListAttachedRolePoliciesRequest": { + "base": null, + "refs": { + } + }, + "ListAttachedRolePoliciesResponse": { + "base": "

    Contains the response to a successful ListAttachedRolePolicies request.

    ", + "refs": { + } + }, + "ListAttachedUserPoliciesRequest": { + "base": null, + "refs": { + } + }, + "ListAttachedUserPoliciesResponse": { + "base": "

    Contains the response to a successful ListAttachedUserPolicies request.

    ", + "refs": { + } + }, + "ListEntitiesForPolicyRequest": { + "base": null, + "refs": { + } + }, + "ListEntitiesForPolicyResponse": { + "base": "

    Contains the response to a successful ListEntitiesForPolicy request.

    ", + "refs": { + } + }, + "ListGroupPoliciesRequest": { + "base": null, + "refs": { + } + }, + "ListGroupPoliciesResponse": { + "base": "

    Contains the response to a successful ListGroupPolicies request.

    ", + "refs": { + } + }, + "ListGroupsForUserRequest": { + "base": null, + "refs": { + } + }, + "ListGroupsForUserResponse": { + "base": "

    Contains the response to a successful ListGroupsForUser request.

    ", + "refs": { + } + }, + "ListGroupsRequest": { + "base": null, + "refs": { + } + }, + "ListGroupsResponse": { + "base": "

    Contains the response to a successful ListGroups request.

    ", + "refs": { + } + }, + "ListInstanceProfilesForRoleRequest": { + "base": null, + "refs": { + } + }, + "ListInstanceProfilesForRoleResponse": { + "base": "

    Contains the response to a successful ListInstanceProfilesForRole request.

    ", + "refs": { + } + }, + "ListInstanceProfilesRequest": { + "base": null, + "refs": { + } + }, + "ListInstanceProfilesResponse": { + "base": "

    Contains the response to a successful ListInstanceProfiles request.

    ", + "refs": { + } + }, + "ListMFADevicesRequest": { + "base": null, + "refs": { + } + }, + "ListMFADevicesResponse": { + "base": "

    Contains the response to a successful ListMFADevices request.

    ", + "refs": { + } + }, + "ListOpenIDConnectProvidersRequest": { + "base": null, + "refs": { + } + }, + "ListOpenIDConnectProvidersResponse": { + "base": "

    Contains the response to a successful ListOpenIDConnectProviders request.

    ", + "refs": { + } + }, + "ListPoliciesRequest": { + "base": null, + "refs": { + } + }, + "ListPoliciesResponse": { + "base": "

    Contains the response to a successful ListPolicies request.

    ", + "refs": { + } + }, + "ListPolicyVersionsRequest": { + "base": null, + "refs": { + } + }, + "ListPolicyVersionsResponse": { + "base": "

    Contains the response to a successful ListPolicyVersions request.

    ", + "refs": { + } + }, + "ListRolePoliciesRequest": { + "base": null, + "refs": { + } + }, + "ListRolePoliciesResponse": { + "base": "

    Contains the response to a successful ListRolePolicies request.

    ", + "refs": { + } + }, + "ListRolesRequest": { + "base": null, + "refs": { + } + }, + "ListRolesResponse": { + "base": "

    Contains the response to a successful ListRoles request.

    ", + "refs": { + } + }, + "ListSAMLProvidersRequest": { + "base": null, + "refs": { + } + }, + "ListSAMLProvidersResponse": { + "base": "

    Contains the response to a successful ListSAMLProviders request.

    ", + "refs": { + } + }, + "ListSSHPublicKeysRequest": { + "base": null, + "refs": { + } + }, + "ListSSHPublicKeysResponse": { + "base": "

    Contains the response to a successful ListSSHPublicKeys request.

    ", + "refs": { + } + }, + "ListServerCertificatesRequest": { + "base": null, + "refs": { + } + }, + "ListServerCertificatesResponse": { + "base": "

    Contains the response to a successful ListServerCertificates request.

    ", + "refs": { + } + }, + "ListSigningCertificatesRequest": { + "base": null, + "refs": { + } + }, + "ListSigningCertificatesResponse": { + "base": "

    Contains the response to a successful ListSigningCertificates request.

    ", + "refs": { + } + }, + "ListUserPoliciesRequest": { + "base": null, + "refs": { + } + }, + "ListUserPoliciesResponse": { + "base": "

    Contains the response to a successful ListUserPolicies request.

    ", + "refs": { + } + }, + "ListUsersRequest": { + "base": null, + "refs": { + } + }, + "ListUsersResponse": { + "base": "

    Contains the response to a successful ListUsers request.

    ", + "refs": { + } + }, + "ListVirtualMFADevicesRequest": { + "base": null, + "refs": { + } + }, + "ListVirtualMFADevicesResponse": { + "base": "

    Contains the response to a successful ListVirtualMFADevices request.

    ", + "refs": { + } + }, + "LoginProfile": { + "base": "

    Contains the user name and password create date for a user.

    This data type is used as a response element in the CreateLoginProfile and GetLoginProfile actions.

    ", + "refs": { + "CreateLoginProfileResponse$LoginProfile": "

    The user name and password create date.

    ", + "GetLoginProfileResponse$LoginProfile": "

    The user name and password create date for the user.

    " + } + }, + "MFADevice": { + "base": "

    Contains information about an MFA device.

    This data type is used as a response element in the ListMFADevices action.

    ", + "refs": { + "mfaDeviceListType$member": null + } + }, + "MalformedCertificateException": { + "base": "

    The request was rejected because the certificate was malformed or expired. The error message describes the specific error.

    ", + "refs": { + } + }, + "MalformedPolicyDocumentException": { + "base": "

    The request was rejected because the policy document was malformed. The error message describes the specific error.

    ", + "refs": { + } + }, + "ManagedPolicyDetail": { + "base": "

    Contains information about a managed policy, including the policy's ARN, versions, and the number of principal entities (users, groups, and roles) that the policy is attached to.

    This data type is used as a response element in the GetAccountAuthorizationDetails action.

    For more information about managed policies, see Managed Policies and Inline Policies in the Using IAM guide.

    ", + "refs": { + "ManagedPolicyDetailListType$member": null + } + }, + "ManagedPolicyDetailListType": { + "base": null, + "refs": { + "GetAccountAuthorizationDetailsResponse$Policies": "

    A list containing information about managed policies.

    " + } + }, + "NoSuchEntityException": { + "base": "

    The request was rejected because it referenced an entity that does not exist. The error message describes the entity.

    ", + "refs": { + } + }, + "OpenIDConnectProviderListEntry": { + "base": "

    Contains the Amazon Resource Name (ARN) for an IAM OpenID Connect provider.

    ", + "refs": { + "OpenIDConnectProviderListType$member": null + } + }, + "OpenIDConnectProviderListType": { + "base": "

    Contains a list of IAM OpenID Connect providers.

    ", + "refs": { + "ListOpenIDConnectProvidersResponse$OpenIDConnectProviderList": "

    The list of IAM OpenID Connect providers in the AWS account.

    " + } + }, + "OpenIDConnectProviderUrlType": { + "base": "

    Contains a URL that specifies the endpoint for an OpenID Connect provider.

    ", + "refs": { + "CreateOpenIDConnectProviderRequest$Url": "

    The URL of the identity provider. The URL must begin with \"https://\" and should correspond to the iss claim in the provider's OpenID Connect ID tokens. Per the OIDC standard, path components are allowed but query parameters are not. Typically the URL consists of only a host name, like \"https://server.example.org\" or \"https://example.com\".

    You cannot register the same provider multiple times in a single AWS account. If you try to submit a URL that has already been used for an OpenID Connect provider in the AWS account, you will get an error.

    ", + "GetOpenIDConnectProviderResponse$Url": "

    The URL that the IAM OpenID Connect provider is associated with. For more information, see CreateOpenIDConnectProvider.

    " + } + }, + "PasswordPolicy": { + "base": "

    Contains information about the account password policy.

    This data type is used as a response element in the GetAccountPasswordPolicy action.

    ", + "refs": { + "GetAccountPasswordPolicyResponse$PasswordPolicy": null + } + }, + "PasswordPolicyViolationException": { + "base": "

    The request was rejected because the provided password did not meet the requirements imposed by the account password policy.

    ", + "refs": { + } + }, + "Policy": { + "base": "

    Contains information about a managed policy.

    This data type is used as a response element in the CreatePolicy, GetPolicy, and ListPolicies actions.

    For more information about managed policies, refer to Managed Policies and Inline Policies in the Using IAM guide.

    ", + "refs": { + "CreatePolicyResponse$Policy": "

    Information about the policy.

    ", + "GetPolicyResponse$Policy": "

    Information about the policy.

    ", + "policyListType$member": null + } + }, + "PolicyDetail": { + "base": "

    Contains information about an IAM policy, including the policy document.

    This data type is used as a response element in the GetAccountAuthorizationDetails action.

    ", + "refs": { + "policyDetailListType$member": null + } + }, + "PolicyEvaluationDecisionType": { + "base": null, + "refs": { + "EvalDecisionDetailsType$value": null, + "EvaluationResult$EvalDecision": "

    The result of the simulation.

    ", + "ResourceSpecificResult$EvalResourceDecision": "

    The result of the simulation of the simulated API action on the resource specified in EvalResourceName.

    " + } + }, + "PolicyEvaluationException": { + "base": "

    The request failed because a provided policy could not be successfully evaluated. An additional detail message indicates the source of the failure.

    ", + "refs": { + } + }, + "PolicyGroup": { + "base": "

    Contains information about a group that a managed policy is attached to.

    This data type is used as a response element in the ListEntitiesForPolicy action.

    For more information about managed policies, refer to Managed Policies and Inline Policies in the Using IAM guide.

    ", + "refs": { + "PolicyGroupListType$member": null + } + }, + "PolicyGroupListType": { + "base": null, + "refs": { + "ListEntitiesForPolicyResponse$PolicyGroups": "

    A list of groups that the policy is attached to.

    " + } + }, + "PolicyIdentifierType": { + "base": null, + "refs": { + "Statement$SourcePolicyId": "

    The identifier of the policy that was provided as an input.

    " + } + }, + "PolicyRole": { + "base": "

    Contains information about a role that a managed policy is attached to.

    This data type is used as a response element in the ListEntitiesForPolicy action.

    For more information about managed policies, refer to Managed Policies and Inline Policies in the Using IAM guide.

    ", + "refs": { + "PolicyRoleListType$member": null + } + }, + "PolicyRoleListType": { + "base": null, + "refs": { + "ListEntitiesForPolicyResponse$PolicyRoles": "

    A list of roles that the policy is attached to.

    " + } + }, + "PolicySourceType": { + "base": null, + "refs": { + "Statement$SourcePolicyType": "

    The type of the policy.

    " + } + }, + "PolicyUser": { + "base": "

    Contains information about a user that a managed policy is attached to.

    This data type is used as a response element in the ListEntitiesForPolicy action.

    For more information about managed policies, refer to Managed Policies and Inline Policies in the Using IAM guide.

    ", + "refs": { + "PolicyUserListType$member": null + } + }, + "PolicyUserListType": { + "base": null, + "refs": { + "ListEntitiesForPolicyResponse$PolicyUsers": "

    A list of users that the policy is attached to.

    " + } + }, + "PolicyVersion": { + "base": "

    Contains information about a version of a managed policy.

    This data type is used as a response element in the CreatePolicyVersion, GetPolicyVersion, ListPolicyVersions, and GetAccountAuthorizationDetails actions.

    For more information about managed policies, refer to Managed Policies and Inline Policies in the Using IAM guide.

    ", + "refs": { + "CreatePolicyVersionResponse$PolicyVersion": "

    Information about the policy version.

    ", + "GetPolicyVersionResponse$PolicyVersion": "

    Information about the policy version.

    For more information about managed policy versions, see Versioning for Managed Policies in the IAM User Guide.

    ", + "policyDocumentVersionListType$member": null + } + }, + "Position": { + "base": "

    Contains the row and column of a location of a Statement element in a policy document.

    This data type is used as a member of the Statement type.

    ", + "refs": { + "Statement$StartPosition": "

    The row and column of the beginning of the Statement in an IAM policy.

    ", + "Statement$EndPosition": "

    The row and column of the end of a Statement in an IAM policy.

    " + } + }, + "PutGroupPolicyRequest": { + "base": null, + "refs": { + } + }, + "PutRolePolicyRequest": { + "base": null, + "refs": { + } + }, + "PutUserPolicyRequest": { + "base": null, + "refs": { + } + }, + "RemoveClientIDFromOpenIDConnectProviderRequest": { + "base": null, + "refs": { + } + }, + "RemoveRoleFromInstanceProfileRequest": { + "base": null, + "refs": { + } + }, + "RemoveUserFromGroupRequest": { + "base": null, + "refs": { + } + }, + "ReportContentType": { + "base": null, + "refs": { + "GetCredentialReportResponse$Content": "

    Contains the credential report. The report is Base64-encoded.

    " + } + }, + "ReportFormatType": { + "base": null, + "refs": { + "GetCredentialReportResponse$ReportFormat": "

    The format (MIME type) of the credential report.

    " + } + }, + "ReportStateDescriptionType": { + "base": null, + "refs": { + "GenerateCredentialReportResponse$Description": "

    Information about the credential report.

    " + } + }, + "ReportStateType": { + "base": null, + "refs": { + "GenerateCredentialReportResponse$State": "

    Information about the state of the credential report.

    " + } + }, + "ResourceHandlingOptionType": { + "base": null, + "refs": { + "SimulateCustomPolicyRequest$ResourceHandlingOption": "

    Specifies the type of simulation to run. Different APIs that support resource-based policies require different combinations of resources. By specifying the type of simulation to run, you enable the policy simulator to enforce the presence of the required resources to ensure reliable simulation results. If your simulation does not match one of the following scenarios, then you can omit this parameter. The following list shows each of the supported scenario values and the resources that you must define to run the simulation.

    Each of the EC2 scenarios requires that you specify instance, image, and security-group resources. If your scenario includes an EBS volume, then you must specify that volume as a resource. If the EC2 scenario includes VPC, then you must supply the network-interface resource. If it includes an IP subnet, then you must specify the subnet resource. For more information on the EC2 scenario options, see Supported Platforms in the AWS EC2 User Guide.

    • EC2-Classic-InstanceStore

      instance, image, security-group

    • EC2-Classic-EBS

      instance, image, security-group, volume

    • EC2-VPC-InstanceStore

      instance, image, security-group, network-interface

    • EC2-VPC-InstanceStore-Subnet

      instance, image, security-group, network-interface, subnet

    • EC2-VPC-EBS

      instance, image, security-group, network-interface, volume

    • EC2-VPC-EBS-Subnet

      instance, image, security-group, network-interface, subnet, volume

    ", + "SimulatePrincipalPolicyRequest$ResourceHandlingOption": "

    Specifies the type of simulation to run. Different APIs that support resource-based policies require different combinations of resources. By specifying the type of simulation to run, you enable the policy simulator to enforce the presence of the required resources to ensure reliable simulation results. If your simulation does not match one of the following scenarios, then you can omit this parameter. The following list shows each of the supported scenario values and the resources that you must define to run the simulation.

    Each of the EC2 scenarios requires that you specify instance, image, and security-group resources. If your scenario includes an EBS volume, then you must specify that volume as a resource. If the EC2 scenario includes VPC, then you must supply the network-interface resource. If it includes an IP subnet, then you must specify the subnet resource. For more information on the EC2 scenario options, see Supported Platforms in the AWS EC2 User Guide.

    • EC2-Classic-InstanceStore

      instance, image, security-group

    • EC2-Classic-EBS

      instance, image, security-group, volume

    • EC2-VPC-InstanceStore

      instance, image, security-group, network-interface

    • EC2-VPC-InstanceStore-Subnet

      instance, image, security-group, network-interface, subnet

    • EC2-VPC-EBS

      instance, image, security-group, network-interface, volume

    • EC2-VPC-EBS-Subnet

      instance, image, security-group, network-interface, subnet, volume

    " + } + }, + "ResourceNameListType": { + "base": null, + "refs": { + "SimulateCustomPolicyRequest$ResourceArns": "

    A list of ARNs of AWS resources to include in the simulation. If this parameter is not provided then the value defaults to * (all resources). Each API in the ActionNames parameter is evaluated for each resource in this list. The simulation determines the access result (allowed or denied) of each combination and reports it in the response.

    The simulation does not automatically retrieve policies for the specified resources. If you want to include a resource policy in the simulation, then you must include the policy as a string in the ResourcePolicy parameter.

    If you include a ResourcePolicy, then it must be applicable to all of the resources included in the simulation or you receive an invalid input error.

    ", + "SimulatePrincipalPolicyRequest$ResourceArns": "

    A list of ARNs of AWS resources to include in the simulation. If this parameter is not provided then the value defaults to * (all resources). Each API in the ActionNames parameter is evaluated for each resource in this list. The simulation determines the access result (allowed or denied) of each combination and reports it in the response.

    The simulation does not automatically retrieve policies for the specified resources. If you want to include a resource policy in the simulation, then you must include the policy as a string in the ResourcePolicy parameter.

    " + } + }, + "ResourceNameType": { + "base": null, + "refs": { + "EvaluationResult$EvalResourceName": "

    The ARN of the resource that the indicated API action was tested on.

    ", + "ResourceNameListType$member": null, + "ResourceSpecificResult$EvalResourceName": "

    The name of the simulated resource, in Amazon Resource Name (ARN) format.

    ", + "SimulateCustomPolicyRequest$ResourceOwner": "

    An AWS account ID that specifies the owner of any simulated resource that does not identify its owner in the resource ARN, such as an S3 bucket or object. If ResourceOwner is specified, it is also used as the account owner of any ResourcePolicy included in the simulation. If the ResourceOwner parameter is not specified, then the owner of the resources and the resource policy defaults to the account of the identity provided in CallerArn. This parameter is required only if you specify a resource-based policy and account that owns the resource is different from the account that owns the simulated calling user CallerArn.

    ", + "SimulateCustomPolicyRequest$CallerArn": "

    The ARN of the user that you want to use as the simulated caller of the APIs. CallerArn is required if you include a ResourcePolicy so that the policy's Principal element has a value to use in evaluating the policy.

    You can specify only the ARN of an IAM user. You cannot specify the ARN of an assumed role, federated user, or a service principal.

    ", + "SimulatePrincipalPolicyRequest$ResourceOwner": "

    An AWS account ID that specifies the owner of any simulated resource that does not identify its owner in the resource ARN, such as an S3 bucket or object. If ResourceOwner is specified, it is also used as the account owner of any ResourcePolicy included in the simulation. If the ResourceOwner parameter is not specified, then the owner of the resources and the resource policy defaults to the account of the identity provided in CallerArn. This parameter is required only if you specify a resource-based policy and account that owns the resource is different from the account that owns the simulated calling user CallerArn.

    ", + "SimulatePrincipalPolicyRequest$CallerArn": "

    The ARN of the user that you want to specify as the simulated caller of the APIs. If you do not specify a CallerArn, it defaults to the ARN of the user that you specify in PolicySourceArn, if you specified a user. If you include both a PolicySourceArn (for example, arn:aws:iam::123456789012:user/David) and a CallerArn (for example, arn:aws:iam::123456789012:user/Bob), the result is that you simulate calling the APIs as Bob, as if Bob had David's policies.

    You can specify only the ARN of an IAM user. You cannot specify the ARN of an assumed role, federated user, or a service principal.

    CallerArn is required if you include a ResourcePolicy and the PolicySourceArn is not the ARN for an IAM user. This is required so that the resource-based policy's Principal element has a value to use in evaluating the policy.

    " + } + }, + "ResourceSpecificResult": { + "base": "

    Contains the result of the simulation of a single API action call on a single resource.

    This data type is used by a member of the EvaluationResult data type.

    ", + "refs": { + "ResourceSpecificResultListType$member": null + } + }, + "ResourceSpecificResultListType": { + "base": null, + "refs": { + "EvaluationResult$ResourceSpecificResults": "

    The individual results of the simulation of the API action specified in EvalActionName on each resource.

    " + } + }, + "ResyncMFADeviceRequest": { + "base": null, + "refs": { + } + }, + "Role": { + "base": "

    Contains information about an IAM role.

    This data type is used as a response element in the following actions:

    ", + "refs": { + "CreateRoleResponse$Role": "

    Information about the role.

    ", + "GetRoleResponse$Role": "

    Information about the role.

    ", + "roleListType$member": null + } + }, + "RoleDetail": { + "base": "

    Contains information about an IAM role, including all of the role's policies.

    This data type is used as a response element in the GetAccountAuthorizationDetails action.

    ", + "refs": { + "roleDetailListType$member": null + } + }, + "SAMLMetadataDocumentType": { + "base": null, + "refs": { + "CreateSAMLProviderRequest$SAMLMetadataDocument": "

    An XML document generated by an identity provider (IdP) that supports SAML 2.0. The document includes the issuer's name, expiration information, and keys that can be used to validate the SAML authentication response (assertions) that are received from the IdP. You must generate the metadata document using the identity management software that is used as your organization's IdP.

    For more information, see About SAML 2.0-based Federation in the IAM User Guide

    ", + "GetSAMLProviderResponse$SAMLMetadataDocument": "

    The XML metadata document that includes information about an identity provider.

    ", + "UpdateSAMLProviderRequest$SAMLMetadataDocument": "

    An XML document generated by an identity provider (IdP) that supports SAML 2.0. The document includes the issuer's name, expiration information, and keys that can be used to validate the SAML authentication response (assertions) that are received from the IdP. You must generate the metadata document using the identity management software that is used as your organization's IdP.

    " + } + }, + "SAMLProviderListEntry": { + "base": "

    Contains the list of SAML providers for this account.

    ", + "refs": { + "SAMLProviderListType$member": null + } + }, + "SAMLProviderListType": { + "base": null, + "refs": { + "ListSAMLProvidersResponse$SAMLProviderList": "

    The list of SAML providers for this account.

    " + } + }, + "SAMLProviderNameType": { + "base": null, + "refs": { + "CreateSAMLProviderRequest$Name": "

    The name of the provider to create.

    " + } + }, + "SSHPublicKey": { + "base": "

    Contains information about an SSH public key.

    This data type is used as a response element in the GetSSHPublicKey and UploadSSHPublicKey actions.

    ", + "refs": { + "GetSSHPublicKeyResponse$SSHPublicKey": "

    Information about the SSH public key.

    ", + "UploadSSHPublicKeyResponse$SSHPublicKey": "

    Contains information about the SSH public key.

    " + } + }, + "SSHPublicKeyListType": { + "base": null, + "refs": { + "ListSSHPublicKeysResponse$SSHPublicKeys": "

    A list of SSH public keys.

    " + } + }, + "SSHPublicKeyMetadata": { + "base": "

    Contains information about an SSH public key, without the key's body or fingerprint.

    This data type is used as a response element in the ListSSHPublicKeys action.

    ", + "refs": { + "SSHPublicKeyListType$member": null + } + }, + "ServerCertificate": { + "base": "

    Contains information about a server certificate.

    This data type is used as a response element in the GetServerCertificate action.

    ", + "refs": { + "GetServerCertificateResponse$ServerCertificate": "

    Information about the server certificate.

    " + } + }, + "ServerCertificateMetadata": { + "base": "

    Contains information about a server certificate without its certificate body, certificate chain, and private key.

    This data type is used as a response element in the UploadServerCertificate and ListServerCertificates actions.

    ", + "refs": { + "ServerCertificate$ServerCertificateMetadata": "

    The meta information of the server certificate, such as its name, path, ID, and ARN.

    ", + "UploadServerCertificateResponse$ServerCertificateMetadata": "

    The meta information of the uploaded server certificate without its certificate body, certificate chain, and private key.

    ", + "serverCertificateMetadataListType$member": null + } + }, + "ServiceFailureException": { + "base": "

    The request processing has failed because of an unknown error, exception or failure.

    ", + "refs": { + } + }, + "SetDefaultPolicyVersionRequest": { + "base": null, + "refs": { + } + }, + "SigningCertificate": { + "base": "

    Contains information about an X.509 signing certificate.

    This data type is used as a response element in the UploadSigningCertificate and ListSigningCertificates actions.

    ", + "refs": { + "UploadSigningCertificateResponse$Certificate": "

    Information about the certificate.

    ", + "certificateListType$member": null + } + }, + "SimulateCustomPolicyRequest": { + "base": null, + "refs": { + } + }, + "SimulatePolicyResponse": { + "base": "

    Contains the response to a successful SimulatePrincipalPolicy or SimulateCustomPolicy request.

    ", + "refs": { + } + }, + "SimulatePrincipalPolicyRequest": { + "base": null, + "refs": { + } + }, + "SimulationPolicyListType": { + "base": null, + "refs": { + "GetContextKeysForCustomPolicyRequest$PolicyInputList": "

    A list of policies for which you want list of context keys used in Condition elements. Each document is specified as a string containing the complete, valid JSON text of an IAM policy.

    ", + "GetContextKeysForPrincipalPolicyRequest$PolicyInputList": "

    A optional list of additional policies for which you want list of context keys used in Condition elements.

    ", + "SimulateCustomPolicyRequest$PolicyInputList": "

    A list of policy documents to include in the simulation. Each document is specified as a string containing the complete, valid JSON text of an IAM policy. Do not include any resource-based policies in this parameter. Any resource-based policy must be submitted with the ResourcePolicy parameter. The policies cannot be \"scope-down\" policies, such as you could include in a call to GetFederationToken or one of the AssumeRole APIs to restrict what a user can do while using the temporary credentials.

    ", + "SimulatePrincipalPolicyRequest$PolicyInputList": "

    An optional list of additional policy documents to include in the simulation. Each document is specified as a string containing the complete, valid JSON text of an IAM policy.

    " + } + }, + "Statement": { + "base": "

    Contains a reference to a Statement element in a policy document that determines the result of the simulation.

    This data type is used by the MatchedStatements member of the EvaluationResult type.

    ", + "refs": { + "StatementListType$member": null + } + }, + "StatementListType": { + "base": null, + "refs": { + "EvaluationResult$MatchedStatements": "

    A list of the statements in the input policies that determine the result for this scenario. Remember that even if multiple statements allow the action on the resource, if only one statement denies that action, then the explicit deny overrides any allow, and the deny statement is the only entry included in the result.

    ", + "ResourceSpecificResult$MatchedStatements": "

    A list of the statements in the input policies that determine the result for this part of the simulation. Remember that even if multiple statements allow the action on the resource, if any statement denies that action, then the explicit deny overrides any allow, and the deny statement is the only entry included in the result.

    " + } + }, + "UnrecognizedPublicKeyEncodingException": { + "base": "

    The request was rejected because the public key encoding format is unsupported or unrecognized.

    ", + "refs": { + } + }, + "UpdateAccessKeyRequest": { + "base": null, + "refs": { + } + }, + "UpdateAccountPasswordPolicyRequest": { + "base": null, + "refs": { + } + }, + "UpdateAssumeRolePolicyRequest": { + "base": null, + "refs": { + } + }, + "UpdateGroupRequest": { + "base": null, + "refs": { + } + }, + "UpdateLoginProfileRequest": { + "base": null, + "refs": { + } + }, + "UpdateOpenIDConnectProviderThumbprintRequest": { + "base": null, + "refs": { + } + }, + "UpdateSAMLProviderRequest": { + "base": null, + "refs": { + } + }, + "UpdateSAMLProviderResponse": { + "base": "

    Contains the response to a successful UpdateSAMLProvider request.

    ", + "refs": { + } + }, + "UpdateSSHPublicKeyRequest": { + "base": null, + "refs": { + } + }, + "UpdateServerCertificateRequest": { + "base": null, + "refs": { + } + }, + "UpdateSigningCertificateRequest": { + "base": null, + "refs": { + } + }, + "UpdateUserRequest": { + "base": null, + "refs": { + } + }, + "UploadSSHPublicKeyRequest": { + "base": null, + "refs": { + } + }, + "UploadSSHPublicKeyResponse": { + "base": "

    Contains the response to a successful UploadSSHPublicKey request.

    ", + "refs": { + } + }, + "UploadServerCertificateRequest": { + "base": null, + "refs": { + } + }, + "UploadServerCertificateResponse": { + "base": "

    Contains the response to a successful UploadServerCertificate request.

    ", + "refs": { + } + }, + "UploadSigningCertificateRequest": { + "base": null, + "refs": { + } + }, + "UploadSigningCertificateResponse": { + "base": "

    Contains the response to a successful UploadSigningCertificate request.

    ", + "refs": { + } + }, + "User": { + "base": "

    Contains information about an IAM user entity.

    This data type is used as a response element in the following actions:

    ", + "refs": { + "CreateUserResponse$User": "

    Information about the user.

    ", + "GetUserResponse$User": "

    Information about the user.

    ", + "VirtualMFADevice$User": null, + "userListType$member": null + } + }, + "UserDetail": { + "base": "

    Contains information about an IAM user, including all the user's policies and all the IAM groups the user is in.

    This data type is used as a response element in the GetAccountAuthorizationDetails action.

    ", + "refs": { + "userDetailListType$member": null + } + }, + "VirtualMFADevice": { + "base": "

    Contains information about a virtual MFA device.

    ", + "refs": { + "CreateVirtualMFADeviceResponse$VirtualMFADevice": "

    A newly created virtual MFA device.

    ", + "virtualMFADeviceListType$member": null + } + }, + "accessKeyIdType": { + "base": null, + "refs": { + "AccessKey$AccessKeyId": "

    The ID for this access key.

    ", + "AccessKeyMetadata$AccessKeyId": "

    The ID for this access key.

    ", + "DeleteAccessKeyRequest$AccessKeyId": "

    The access key ID for the access key ID and secret access key you want to delete.

    ", + "GetAccessKeyLastUsedRequest$AccessKeyId": "

    The identifier of an access key.

    ", + "UpdateAccessKeyRequest$AccessKeyId": "

    The access key ID of the secret access key you want to update.

    " + } + }, + "accessKeyMetadataListType": { + "base": "

    Contains a list of access key metadata.

    This data type is used as a response element in the ListAccessKeys action.

    ", + "refs": { + "ListAccessKeysResponse$AccessKeyMetadata": "

    A list of access key metadata.

    " + } + }, + "accessKeySecretType": { + "base": null, + "refs": { + "AccessKey$SecretAccessKey": "

    The secret key used to sign requests.

    " + } + }, + "accountAliasListType": { + "base": null, + "refs": { + "ListAccountAliasesResponse$AccountAliases": "

    A list of aliases associated with the account. AWS supports only one alias per account.

    " + } + }, + "accountAliasType": { + "base": null, + "refs": { + "CreateAccountAliasRequest$AccountAlias": "

    The account alias to create.

    ", + "DeleteAccountAliasRequest$AccountAlias": "

    The name of the account alias to delete.

    ", + "accountAliasListType$member": null + } + }, + "arnType": { + "base": "

    The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources.

    For more information about ARNs, go to Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

    ", + "refs": { + "AddClientIDToOpenIDConnectProviderRequest$OpenIDConnectProviderArn": "

    The Amazon Resource Name (ARN) of the IAM OpenID Connect (OIDC) provider to add the client ID to. You can get a list of OIDC provider ARNs by using the ListOpenIDConnectProviders action.

    ", + "AttachGroupPolicyRequest$PolicyArn": null, + "AttachRolePolicyRequest$PolicyArn": null, + "AttachUserPolicyRequest$PolicyArn": null, + "AttachedPolicy$PolicyArn": null, + "CreateOpenIDConnectProviderResponse$OpenIDConnectProviderArn": "

    The Amazon Resource Name (ARN) of the IAM OpenID Connect provider that was created. For more information, see OpenIDConnectProviderListEntry.

    ", + "CreatePolicyVersionRequest$PolicyArn": null, + "CreateSAMLProviderResponse$SAMLProviderArn": "

    The Amazon Resource Name (ARN) of the SAML provider.

    ", + "DeleteOpenIDConnectProviderRequest$OpenIDConnectProviderArn": "

    The Amazon Resource Name (ARN) of the IAM OpenID Connect provider to delete. You can get a list of OpenID Connect provider ARNs by using the ListOpenIDConnectProviders action.

    ", + "DeletePolicyRequest$PolicyArn": null, + "DeletePolicyVersionRequest$PolicyArn": null, + "DeleteSAMLProviderRequest$SAMLProviderArn": "

    The Amazon Resource Name (ARN) of the SAML provider to delete.

    ", + "DetachGroupPolicyRequest$PolicyArn": null, + "DetachRolePolicyRequest$PolicyArn": null, + "DetachUserPolicyRequest$PolicyArn": null, + "GetContextKeysForPrincipalPolicyRequest$PolicySourceArn": "

    The ARN of a user, group, or role whose policies contain the context keys that you want listed. If you specify a user, the list includes context keys that are found in all policies attached to the user as well as to all groups that the user is a member of. If you pick a group or a role, then it includes only those context keys that are found in policies attached to that entity. Note that all parameters are shown in unencoded form here for clarity, but must be URL encoded to be included as a part of a real HTML request.

    ", + "GetOpenIDConnectProviderRequest$OpenIDConnectProviderArn": "

    The Amazon Resource Name (ARN) of the IAM OpenID Connect (OIDC) provider to get information for. You can get a list of OIDC provider ARNs by using the ListOpenIDConnectProviders action.

    ", + "GetPolicyRequest$PolicyArn": null, + "GetPolicyVersionRequest$PolicyArn": null, + "GetSAMLProviderRequest$SAMLProviderArn": "

    The Amazon Resource Name (ARN) of the SAML provider to get information about.

    ", + "Group$Arn": "

    The Amazon Resource Name (ARN) specifying the group. For more information about ARNs and how to use them in policies, see IAM Identifiers in the Using IAM guide.

    ", + "GroupDetail$Arn": null, + "InstanceProfile$Arn": "

    The Amazon Resource Name (ARN) specifying the instance profile. For more information about ARNs and how to use them in policies, see IAM Identifiers in the Using IAM guide.

    ", + "ListEntitiesForPolicyRequest$PolicyArn": null, + "ListPolicyVersionsRequest$PolicyArn": null, + "ManagedPolicyDetail$Arn": null, + "OpenIDConnectProviderListEntry$Arn": null, + "Policy$Arn": null, + "RemoveClientIDFromOpenIDConnectProviderRequest$OpenIDConnectProviderArn": "

    The Amazon Resource Name (ARN) of the IAM OpenID Connect (OIDC) provider to remove the client ID from. You can get a list of OIDC provider ARNs by using the ListOpenIDConnectProviders action.

    ", + "Role$Arn": "

    The Amazon Resource Name (ARN) specifying the role. For more information about ARNs and how to use them in policies, see IAM Identifiers in the Using IAM guide.

    ", + "RoleDetail$Arn": null, + "SAMLProviderListEntry$Arn": "

    The Amazon Resource Name (ARN) of the SAML provider.

    ", + "ServerCertificateMetadata$Arn": "

    The Amazon Resource Name (ARN) specifying the server certificate. For more information about ARNs and how to use them in policies, see IAM Identifiers in the Using IAM guide.

    ", + "SetDefaultPolicyVersionRequest$PolicyArn": null, + "SimulatePrincipalPolicyRequest$PolicySourceArn": "

    The Amazon Resource Name (ARN) of a user, group, or role whose policies you want to include in the simulation. If you specify a user, group, or role, the simulation includes all policies that are associated with that entity. If you specify a user, the simulation also includes all policies that are attached to any groups the user belongs to.

    ", + "UpdateOpenIDConnectProviderThumbprintRequest$OpenIDConnectProviderArn": "

    The Amazon Resource Name (ARN) of the IAM OpenID Connect (OIDC) provider to update the thumbprint for. You can get a list of OIDC provider ARNs by using the ListOpenIDConnectProviders action.

    ", + "UpdateSAMLProviderRequest$SAMLProviderArn": "

    The Amazon Resource Name (ARN) of the SAML provider to update.

    ", + "UpdateSAMLProviderResponse$SAMLProviderArn": "

    The Amazon Resource Name (ARN) of the SAML provider that was updated.

    ", + "User$Arn": "

    The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the Using IAM guide.

    ", + "UserDetail$Arn": null + } + }, + "assignmentStatusType": { + "base": null, + "refs": { + "ListVirtualMFADevicesRequest$AssignmentStatus": "

    The status (unassigned or assigned) of the devices to list. If you do not specify an AssignmentStatus, the action defaults to Any which lists both assigned and unassigned virtual MFA devices.

    " + } + }, + "attachedPoliciesListType": { + "base": null, + "refs": { + "GroupDetail$AttachedManagedPolicies": "

    A list of the managed policies attached to the group.

    ", + "ListAttachedGroupPoliciesResponse$AttachedPolicies": "

    A list of the attached policies.

    ", + "ListAttachedRolePoliciesResponse$AttachedPolicies": "

    A list of the attached policies.

    ", + "ListAttachedUserPoliciesResponse$AttachedPolicies": "

    A list of the attached policies.

    ", + "RoleDetail$AttachedManagedPolicies": "

    A list of managed policies attached to the role. These policies are the role's access (permissions) policies.

    ", + "UserDetail$AttachedManagedPolicies": "

    A list of the managed policies attached to the user.

    " + } + }, + "attachmentCountType": { + "base": null, + "refs": { + "ManagedPolicyDetail$AttachmentCount": "

    The number of principal entities (users, groups, and roles) that the policy is attached to.

    ", + "Policy$AttachmentCount": "

    The number of entities (users, groups, and roles) that the policy is attached to.

    " + } + }, + "authenticationCodeType": { + "base": null, + "refs": { + "EnableMFADeviceRequest$AuthenticationCode1": "

    An authentication code emitted by the device.

    ", + "EnableMFADeviceRequest$AuthenticationCode2": "

    A subsequent authentication code emitted by the device.

    ", + "ResyncMFADeviceRequest$AuthenticationCode1": "

    An authentication code emitted by the device.

    ", + "ResyncMFADeviceRequest$AuthenticationCode2": "

    A subsequent authentication code emitted by the device.

    " + } + }, + "booleanObjectType": { + "base": null, + "refs": { + "PasswordPolicy$HardExpiry": "

    Specifies whether IAM users are prevented from setting a new password after their password has expired.

    ", + "UpdateAccountPasswordPolicyRequest$HardExpiry": "

    Prevents IAM users from setting a new password after their password has expired.

    Default value: false

    ", + "UpdateLoginProfileRequest$PasswordResetRequired": "

    Require the specified user to set a new password on next sign-in.

    " + } + }, + "booleanType": { + "base": null, + "refs": { + "CreateLoginProfileRequest$PasswordResetRequired": "

    Specifies whether the user is required to set a new password on next sign-in.

    ", + "CreatePolicyVersionRequest$SetAsDefault": "

    Specifies whether to set this version as the policy's default version.

    When this parameter is true, the new policy version becomes the operative version; that is, the version that is in effect for the IAM users, groups, and roles that the policy is attached to.

    For more information about managed policy versions, see Versioning for Managed Policies in the IAM User Guide.

    ", + "GetAccountAuthorizationDetailsResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "GetGroupResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListAccessKeysResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListAccountAliasesResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListAttachedGroupPoliciesResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListAttachedRolePoliciesResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListAttachedUserPoliciesResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListEntitiesForPolicyResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListGroupPoliciesResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListGroupsForUserResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListGroupsResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListInstanceProfilesForRoleResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListInstanceProfilesResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListMFADevicesResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListPoliciesRequest$OnlyAttached": "

    A flag to filter the results to only the attached policies.

    When OnlyAttached is true, the returned list contains only the policies that are attached to a user, group, or role. When OnlyAttached is false, or when the parameter is not included, all policies are returned.

    ", + "ListPoliciesResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListPolicyVersionsResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListRolePoliciesResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListRolesResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListSSHPublicKeysResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListServerCertificatesResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListSigningCertificatesResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListUserPoliciesResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListUsersResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "ListVirtualMFADevicesResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "LoginProfile$PasswordResetRequired": "

    Specifies whether the user is required to set a new password on next sign-in.

    ", + "ManagedPolicyDetail$IsAttachable": "

    Specifies whether the policy can be attached to an IAM user, group, or role.

    ", + "PasswordPolicy$RequireSymbols": "

    Specifies whether to require symbols for IAM user passwords.

    ", + "PasswordPolicy$RequireNumbers": "

    Specifies whether to require numbers for IAM user passwords.

    ", + "PasswordPolicy$RequireUppercaseCharacters": "

    Specifies whether to require uppercase characters for IAM user passwords.

    ", + "PasswordPolicy$RequireLowercaseCharacters": "

    Specifies whether to require lowercase characters for IAM user passwords.

    ", + "PasswordPolicy$AllowUsersToChangePassword": "

    Specifies whether IAM users are allowed to change their own password.

    ", + "PasswordPolicy$ExpirePasswords": "

    Indicates whether passwords in the account expire. Returns true if MaxPasswordAge is contains a value greater than 0. Returns false if MaxPasswordAge is 0 or not present.

    ", + "Policy$IsAttachable": "

    Specifies whether the policy can be attached to an IAM user, group, or role.

    ", + "PolicyVersion$IsDefaultVersion": "

    Specifies whether the policy version is set as the policy's default version.

    ", + "SimulatePolicyResponse$IsTruncated": "

    A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all of your results.

    ", + "UpdateAccountPasswordPolicyRequest$RequireSymbols": "

    Specifies whether IAM user passwords must contain at least one of the following non-alphanumeric characters:

    ! @ # $ % ^ &amp; * ( ) _ + - = [ ] { } | '

    Default value: false

    ", + "UpdateAccountPasswordPolicyRequest$RequireNumbers": "

    Specifies whether IAM user passwords must contain at least one numeric character (0 to 9).

    Default value: false

    ", + "UpdateAccountPasswordPolicyRequest$RequireUppercaseCharacters": "

    Specifies whether IAM user passwords must contain at least one uppercase character from the ISO basic Latin alphabet (A to Z).

    Default value: false

    ", + "UpdateAccountPasswordPolicyRequest$RequireLowercaseCharacters": "

    Specifies whether IAM user passwords must contain at least one lowercase character from the ISO basic Latin alphabet (a to z).

    Default value: false

    ", + "UpdateAccountPasswordPolicyRequest$AllowUsersToChangePassword": "

    Allows all IAM users in your account to use the AWS Management Console to change their own passwords. For more information, see Letting IAM Users Change Their Own Passwords in the IAM User Guide.

    Default value: false

    " + } + }, + "certificateBodyType": { + "base": null, + "refs": { + "ServerCertificate$CertificateBody": "

    The contents of the public key certificate.

    ", + "SigningCertificate$CertificateBody": "

    The contents of the signing certificate.

    ", + "UploadServerCertificateRequest$CertificateBody": "

    The contents of the public key certificate in PEM-encoded format.

    ", + "UploadSigningCertificateRequest$CertificateBody": "

    The contents of the signing certificate.

    " + } + }, + "certificateChainType": { + "base": null, + "refs": { + "ServerCertificate$CertificateChain": "

    The contents of the public key certificate chain.

    ", + "UploadServerCertificateRequest$CertificateChain": "

    The contents of the certificate chain. This is typically a concatenation of the PEM-encoded public key certificates of the chain.

    " + } + }, + "certificateIdType": { + "base": null, + "refs": { + "DeleteSigningCertificateRequest$CertificateId": "

    The ID of the signing certificate to delete.

    ", + "SigningCertificate$CertificateId": "

    The ID for the signing certificate.

    ", + "UpdateSigningCertificateRequest$CertificateId": "

    The ID of the signing certificate you want to update.

    " + } + }, + "certificateListType": { + "base": "

    Contains a list of signing certificates.

    This data type is used as a response element in the ListSigningCertificates action.

    ", + "refs": { + "ListSigningCertificatesResponse$Certificates": "

    A list of the user's signing certificate information.

    " + } + }, + "clientIDListType": { + "base": null, + "refs": { + "CreateOpenIDConnectProviderRequest$ClientIDList": "

    A list of client IDs (also known as audiences). When a mobile or web app registers with an OpenID Connect provider, they establish a value that identifies the application. (This is the value that's sent as the client_id parameter on OAuth requests.)

    You can register multiple client IDs with the same provider. For example, you might have multiple applications that use the same OIDC provider. You cannot register more than 100 client IDs with a single IAM OIDC provider.

    There is no defined format for a client ID. The CreateOpenIDConnectProviderRequest action accepts client IDs up to 255 characters long.

    ", + "GetOpenIDConnectProviderResponse$ClientIDList": "

    A list of client IDs (also known as audiences) that are associated with the specified IAM OpenID Connect provider. For more information, see CreateOpenIDConnectProvider.

    " + } + }, + "clientIDType": { + "base": null, + "refs": { + "AddClientIDToOpenIDConnectProviderRequest$ClientID": "

    The client ID (also known as audience) to add to the IAM OpenID Connect provider.

    ", + "RemoveClientIDFromOpenIDConnectProviderRequest$ClientID": "

    The client ID (also known as audience) to remove from the IAM OpenID Connect provider. For more information about client IDs, see CreateOpenIDConnectProvider.

    ", + "clientIDListType$member": null + } + }, + "credentialReportExpiredExceptionMessage": { + "base": null, + "refs": { + "CredentialReportExpiredException$message": null + } + }, + "credentialReportNotPresentExceptionMessage": { + "base": null, + "refs": { + "CredentialReportNotPresentException$message": null + } + }, + "credentialReportNotReadyExceptionMessage": { + "base": null, + "refs": { + "CredentialReportNotReadyException$message": null + } + }, + "dateType": { + "base": null, + "refs": { + "AccessKey$CreateDate": "

    The date when the access key was created.

    ", + "AccessKeyLastUsed$LastUsedDate": "

    The date and time, in ISO 8601 date-time format, when the access key was most recently used. This field is null when:

    • The user does not have an access key.

    • An access key exists but has never been used, at least not since IAM started tracking this information on April 22nd, 2015.

    • There is no sign-in data associated with the user

    ", + "AccessKeyMetadata$CreateDate": "

    The date when the access key was created.

    ", + "GetCredentialReportResponse$GeneratedTime": "

    The date and time when the credential report was created, in ISO 8601 date-time format.

    ", + "GetOpenIDConnectProviderResponse$CreateDate": "

    The date and time when the IAM OpenID Connect provider entity was created in the AWS account.

    ", + "GetSAMLProviderResponse$CreateDate": "

    The date and time when the SAML provider was created.

    ", + "GetSAMLProviderResponse$ValidUntil": "

    The expiration date and time for the SAML provider.

    ", + "Group$CreateDate": "

    The date and time, in ISO 8601 date-time format, when the group was created.

    ", + "GroupDetail$CreateDate": "

    The date and time, in ISO 8601 date-time format, when the group was created.

    ", + "InstanceProfile$CreateDate": "

    The date when the instance profile was created.

    ", + "LoginProfile$CreateDate": "

    The date when the password for the user was created.

    ", + "MFADevice$EnableDate": "

    The date when the MFA device was enabled for the user.

    ", + "ManagedPolicyDetail$CreateDate": "

    The date and time, in ISO 8601 date-time format, when the policy was created.

    ", + "ManagedPolicyDetail$UpdateDate": "

    The date and time, in ISO 8601 date-time format, when the policy was last updated.

    When a policy has only one version, this field contains the date and time when the policy was created. When a policy has more than one version, this field contains the date and time when the most recent policy version was created.

    ", + "Policy$CreateDate": "

    The date and time, in ISO 8601 date-time format, when the policy was created.

    ", + "Policy$UpdateDate": "

    The date and time, in ISO 8601 date-time format, when the policy was last updated.

    When a policy has only one version, this field contains the date and time when the policy was created. When a policy has more than one version, this field contains the date and time when the most recent policy version was created.

    ", + "PolicyVersion$CreateDate": "

    The date and time, in ISO 8601 date-time format, when the policy version was created.

    ", + "Role$CreateDate": "

    The date and time, in ISO 8601 date-time format, when the role was created.

    ", + "RoleDetail$CreateDate": "

    The date and time, in ISO 8601 date-time format, when the role was created.

    ", + "SAMLProviderListEntry$ValidUntil": "

    The expiration date and time for the SAML provider.

    ", + "SAMLProviderListEntry$CreateDate": "

    The date and time when the SAML provider was created.

    ", + "SSHPublicKey$UploadDate": "

    The date and time, in ISO 8601 date-time format, when the SSH public key was uploaded.

    ", + "SSHPublicKeyMetadata$UploadDate": "

    The date and time, in ISO 8601 date-time format, when the SSH public key was uploaded.

    ", + "ServerCertificateMetadata$UploadDate": "

    The date when the server certificate was uploaded.

    ", + "ServerCertificateMetadata$Expiration": "

    The date on which the certificate is set to expire.

    ", + "SigningCertificate$UploadDate": "

    The date when the signing certificate was uploaded.

    ", + "User$CreateDate": "

    The date and time, in ISO 8601 date-time format, when the user was created.

    ", + "User$PasswordLastUsed": "

    The date and time, in ISO 8601 date-time format, when the user's password was last used to sign in to an AWS website. For a list of AWS websites that capture a user's last sign-in time, see the Credential Reports topic in the Using IAM guide. If a password is used more than once in a five-minute span, only the first use is returned in this field. This field is null (not present) when:

    • The user does not have a password

    • The password exists but has never been used (at least not since IAM started tracking this information on October 20th, 2014

    • there is no sign-in data associated with the user

    This value is returned only in the GetUser and ListUsers actions.

    ", + "UserDetail$CreateDate": "

    The date and time, in ISO 8601 date-time format, when the user was created.

    ", + "VirtualMFADevice$EnableDate": "

    The date and time on which the virtual MFA device was enabled.

    " + } + }, + "deleteConflictMessage": { + "base": null, + "refs": { + "DeleteConflictException$message": null + } + }, + "duplicateCertificateMessage": { + "base": null, + "refs": { + "DuplicateCertificateException$message": null + } + }, + "duplicateSSHPublicKeyMessage": { + "base": null, + "refs": { + "DuplicateSSHPublicKeyException$message": null + } + }, + "encodingType": { + "base": null, + "refs": { + "GetSSHPublicKeyRequest$Encoding": "

    Specifies the public key encoding format to use in the response. To retrieve the public key in ssh-rsa format, use SSH. To retrieve the public key in PEM format, use PEM.

    " + } + }, + "entityAlreadyExistsMessage": { + "base": null, + "refs": { + "EntityAlreadyExistsException$message": null + } + }, + "entityListType": { + "base": null, + "refs": { + "GetAccountAuthorizationDetailsRequest$Filter": "

    A list of entity types (user, group, role, local managed policy, or AWS managed policy) for filtering the results.

    " + } + }, + "entityTemporarilyUnmodifiableMessage": { + "base": null, + "refs": { + "EntityTemporarilyUnmodifiableException$message": null + } + }, + "existingUserNameType": { + "base": null, + "refs": { + "AddUserToGroupRequest$UserName": "

    The name of the user to add.

    ", + "CreateAccessKeyRequest$UserName": "

    The user name that the new key will belong to.

    ", + "DeactivateMFADeviceRequest$UserName": "

    The name of the user whose MFA device you want to deactivate.

    ", + "DeleteAccessKeyRequest$UserName": "

    The name of the user whose key you want to delete.

    ", + "DeleteSigningCertificateRequest$UserName": "

    The name of the user the signing certificate belongs to.

    ", + "DeleteUserPolicyRequest$UserName": "

    The name (friendly name, not ARN) identifying the user that the policy is embedded in.

    ", + "DeleteUserRequest$UserName": "

    The name of the user to delete.

    ", + "EnableMFADeviceRequest$UserName": "

    The name of the user for whom you want to enable the MFA device.

    ", + "GetAccessKeyLastUsedResponse$UserName": "

    The name of the AWS IAM user that owns this access key.

    ", + "GetUserPolicyRequest$UserName": "

    The name of the user who the policy is associated with.

    ", + "GetUserPolicyResponse$UserName": "

    The user the policy is associated with.

    ", + "GetUserRequest$UserName": "

    The name of the user to get information about.

    This parameter is optional. If it is not included, it defaults to the user making the request.

    ", + "ListAccessKeysRequest$UserName": "

    The name of the user.

    ", + "ListGroupsForUserRequest$UserName": "

    The name of the user to list groups for.

    ", + "ListMFADevicesRequest$UserName": "

    The name of the user whose MFA devices you want to list.

    ", + "ListSigningCertificatesRequest$UserName": "

    The name of the user.

    ", + "ListUserPoliciesRequest$UserName": "

    The name of the user to list policies for.

    ", + "PutUserPolicyRequest$UserName": "

    The name of the user to associate the policy with.

    ", + "RemoveUserFromGroupRequest$UserName": "

    The name of the user to remove.

    ", + "ResyncMFADeviceRequest$UserName": "

    The name of the user whose MFA device you want to resynchronize.

    ", + "UpdateAccessKeyRequest$UserName": "

    The name of the user whose key you want to update.

    ", + "UpdateSigningCertificateRequest$UserName": "

    The name of the user the signing certificate belongs to.

    ", + "UpdateUserRequest$UserName": "

    Name of the user to update. If you're changing the name of the user, this is the original user name.

    ", + "UploadSigningCertificateRequest$UserName": "

    The name of the user the signing certificate is for.

    " + } + }, + "groupDetailListType": { + "base": null, + "refs": { + "GetAccountAuthorizationDetailsResponse$GroupDetailList": "

    A list containing information about IAM groups.

    " + } + }, + "groupListType": { + "base": "

    Contains a list of IAM groups.

    This data type is used as a response element in the ListGroups action.

    ", + "refs": { + "ListGroupsForUserResponse$Groups": "

    A list of groups.

    ", + "ListGroupsResponse$Groups": "

    A list of groups.

    " + } + }, + "groupNameListType": { + "base": null, + "refs": { + "UserDetail$GroupList": "

    A list of IAM groups that the user is in.

    " + } + }, + "groupNameType": { + "base": null, + "refs": { + "AddUserToGroupRequest$GroupName": "

    The name of the group to update.

    ", + "AttachGroupPolicyRequest$GroupName": "

    The name (friendly name, not ARN) of the group to attach the policy to.

    ", + "CreateGroupRequest$GroupName": "

    The name of the group to create. Do not include the path in this value.

    ", + "DeleteGroupPolicyRequest$GroupName": "

    The name (friendly name, not ARN) identifying the group that the policy is embedded in.

    ", + "DeleteGroupRequest$GroupName": "

    The name of the group to delete.

    ", + "DetachGroupPolicyRequest$GroupName": "

    The name (friendly name, not ARN) of the group to detach the policy from.

    ", + "GetGroupPolicyRequest$GroupName": "

    The name of the group the policy is associated with.

    ", + "GetGroupPolicyResponse$GroupName": "

    The group the policy is associated with.

    ", + "GetGroupRequest$GroupName": "

    The name of the group.

    ", + "Group$GroupName": "

    The friendly name that identifies the group.

    ", + "GroupDetail$GroupName": "

    The friendly name that identifies the group.

    ", + "ListAttachedGroupPoliciesRequest$GroupName": "

    The name (friendly name, not ARN) of the group to list attached policies for.

    ", + "ListGroupPoliciesRequest$GroupName": "

    The name of the group to list policies for.

    ", + "PolicyGroup$GroupName": "

    The name (friendly name, not ARN) identifying the group.

    ", + "PutGroupPolicyRequest$GroupName": "

    The name of the group to associate the policy with.

    ", + "RemoveUserFromGroupRequest$GroupName": "

    The name of the group to update.

    ", + "UpdateGroupRequest$GroupName": "

    Name of the group to update. If you're changing the name of the group, this is the original name.

    ", + "UpdateGroupRequest$NewGroupName": "

    New name for the group. Only include this if changing the group's name.

    ", + "groupNameListType$member": null + } + }, + "idType": { + "base": null, + "refs": { + "Group$GroupId": "

    The stable and unique string identifying the group. For more information about IDs, see IAM Identifiers in the Using IAM guide.

    ", + "GroupDetail$GroupId": "

    The stable and unique string identifying the group. For more information about IDs, see IAM Identifiers in the Using IAM guide.

    ", + "InstanceProfile$InstanceProfileId": "

    The stable and unique string identifying the instance profile. For more information about IDs, see IAM Identifiers in the Using IAM guide.

    ", + "ManagedPolicyDetail$PolicyId": "

    The stable and unique string identifying the policy.

    For more information about IDs, see IAM Identifiers in the Using IAM guide.

    ", + "Policy$PolicyId": "

    The stable and unique string identifying the policy.

    For more information about IDs, see IAM Identifiers in the Using IAM guide.

    ", + "Role$RoleId": "

    The stable and unique string identifying the role. For more information about IDs, see IAM Identifiers in the Using IAM guide.

    ", + "RoleDetail$RoleId": "

    The stable and unique string identifying the role. For more information about IDs, see IAM Identifiers in the Using IAM guide.

    ", + "ServerCertificateMetadata$ServerCertificateId": "

    The stable and unique string identifying the server certificate. For more information about IDs, see IAM Identifiers in the Using IAM guide.

    ", + "User$UserId": "

    The stable and unique string identifying the user. For more information about IDs, see IAM Identifiers in the Using IAM guide.

    ", + "UserDetail$UserId": "

    The stable and unique string identifying the user. For more information about IDs, see IAM Identifiers in the Using IAM guide.

    " + } + }, + "instanceProfileListType": { + "base": "

    Contains a list of instance profiles.

    ", + "refs": { + "ListInstanceProfilesForRoleResponse$InstanceProfiles": "

    A list of instance profiles.

    ", + "ListInstanceProfilesResponse$InstanceProfiles": "

    A list of instance profiles.

    ", + "RoleDetail$InstanceProfileList": null + } + }, + "instanceProfileNameType": { + "base": null, + "refs": { + "AddRoleToInstanceProfileRequest$InstanceProfileName": "

    The name of the instance profile to update.

    ", + "CreateInstanceProfileRequest$InstanceProfileName": "

    The name of the instance profile to create.

    ", + "DeleteInstanceProfileRequest$InstanceProfileName": "

    The name of the instance profile to delete.

    ", + "GetInstanceProfileRequest$InstanceProfileName": "

    The name of the instance profile to get information about.

    ", + "InstanceProfile$InstanceProfileName": "

    The name identifying the instance profile.

    ", + "RemoveRoleFromInstanceProfileRequest$InstanceProfileName": "

    The name of the instance profile to update.

    " + } + }, + "invalidAuthenticationCodeMessage": { + "base": null, + "refs": { + "InvalidAuthenticationCodeException$message": null + } + }, + "invalidCertificateMessage": { + "base": null, + "refs": { + "InvalidCertificateException$message": null + } + }, + "invalidInputMessage": { + "base": null, + "refs": { + "InvalidInputException$message": null + } + }, + "invalidPublicKeyMessage": { + "base": null, + "refs": { + "InvalidPublicKeyException$message": null + } + }, + "invalidUserTypeMessage": { + "base": null, + "refs": { + "InvalidUserTypeException$message": null + } + }, + "keyPairMismatchMessage": { + "base": null, + "refs": { + "KeyPairMismatchException$message": null + } + }, + "limitExceededMessage": { + "base": null, + "refs": { + "LimitExceededException$message": null + } + }, + "malformedCertificateMessage": { + "base": null, + "refs": { + "MalformedCertificateException$message": null + } + }, + "malformedPolicyDocumentMessage": { + "base": null, + "refs": { + "MalformedPolicyDocumentException$message": null + } + }, + "markerType": { + "base": null, + "refs": { + "GetAccountAuthorizationDetailsRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "GetAccountAuthorizationDetailsResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "GetGroupRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "GetGroupResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListAccessKeysRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListAccessKeysResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListAccountAliasesRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListAccountAliasesResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListAttachedGroupPoliciesRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListAttachedGroupPoliciesResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListAttachedRolePoliciesRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListAttachedRolePoliciesResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListAttachedUserPoliciesRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListAttachedUserPoliciesResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListEntitiesForPolicyRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListEntitiesForPolicyResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListGroupPoliciesRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListGroupPoliciesResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListGroupsForUserRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListGroupsForUserResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListGroupsRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListGroupsResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListInstanceProfilesForRoleRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListInstanceProfilesForRoleResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListInstanceProfilesRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListInstanceProfilesResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListMFADevicesRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListMFADevicesResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListPoliciesRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListPoliciesResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListPolicyVersionsRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListPolicyVersionsResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListRolePoliciesRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListRolePoliciesResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListRolesRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListRolesResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListSSHPublicKeysRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListSSHPublicKeysResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListServerCertificatesRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListServerCertificatesResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListSigningCertificatesRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListSigningCertificatesResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListUserPoliciesRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListUserPoliciesResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListUsersRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListUsersResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListVirtualMFADevicesRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "ListVirtualMFADevicesResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "SimulateCustomPolicyRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    ", + "SimulatePolicyResponse$Marker": "

    When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "SimulatePrincipalPolicyRequest$Marker": "

    Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

    " + } + }, + "maxItemsType": { + "base": null, + "refs": { + "GetAccountAuthorizationDetailsRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "GetGroupRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListAccessKeysRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListAccountAliasesRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListAttachedGroupPoliciesRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListAttachedRolePoliciesRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListAttachedUserPoliciesRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListEntitiesForPolicyRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListGroupPoliciesRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListGroupsForUserRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListGroupsRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListInstanceProfilesForRoleRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListInstanceProfilesRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListMFADevicesRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListPoliciesRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListPolicyVersionsRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListRolePoliciesRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListRolesRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListSSHPublicKeysRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListServerCertificatesRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListSigningCertificatesRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListUserPoliciesRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListUsersRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "ListVirtualMFADevicesRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "SimulateCustomPolicyRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    ", + "SimulatePrincipalPolicyRequest$MaxItems": "

    Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

    This parameter is optional. If you do not include it, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.

    " + } + }, + "maxPasswordAgeType": { + "base": null, + "refs": { + "PasswordPolicy$MaxPasswordAge": "

    The number of days that an IAM user password is valid.

    ", + "UpdateAccountPasswordPolicyRequest$MaxPasswordAge": "

    The number of days that an IAM user password is valid. The default value of 0 means IAM user passwords never expire.

    Default value: 0

    " + } + }, + "mfaDeviceListType": { + "base": "

    Contains a list of MFA devices.

    This data type is used as a response element in the ListMFADevices and ListVirtualMFADevices actions.

    ", + "refs": { + "ListMFADevicesResponse$MFADevices": "

    A list of MFA devices.

    " + } + }, + "minimumPasswordLengthType": { + "base": null, + "refs": { + "PasswordPolicy$MinimumPasswordLength": "

    Minimum length to require for IAM user passwords.

    ", + "UpdateAccountPasswordPolicyRequest$MinimumPasswordLength": "

    The minimum number of characters allowed in an IAM user password.

    Default value: 6

    " + } + }, + "noSuchEntityMessage": { + "base": null, + "refs": { + "NoSuchEntityException$message": null + } + }, + "passwordPolicyViolationMessage": { + "base": null, + "refs": { + "PasswordPolicyViolationException$message": null + } + }, + "passwordReusePreventionType": { + "base": null, + "refs": { + "PasswordPolicy$PasswordReusePrevention": "

    Specifies the number of previous passwords that IAM users are prevented from reusing.

    ", + "UpdateAccountPasswordPolicyRequest$PasswordReusePrevention": "

    Specifies the number of previous passwords that IAM users are prevented from reusing. The default value of 0 means IAM users are not prevented from reusing previous passwords.

    Default value: 0

    " + } + }, + "passwordType": { + "base": null, + "refs": { + "ChangePasswordRequest$OldPassword": "

    The IAM user's current password.

    ", + "ChangePasswordRequest$NewPassword": "

    The new password. The new password must conform to the AWS account's password policy, if one exists.

    ", + "CreateLoginProfileRequest$Password": "

    The new password for the user.

    ", + "UpdateLoginProfileRequest$Password": "

    The new password for the specified user.

    " + } + }, + "pathPrefixType": { + "base": null, + "refs": { + "ListGroupsRequest$PathPrefix": "

    The path prefix for filtering the results. For example, the prefix /division_abc/subdivision_xyz/ gets all groups whose path starts with /division_abc/subdivision_xyz/.

    This parameter is optional. If it is not included, it defaults to a slash (/), listing all groups.

    ", + "ListInstanceProfilesRequest$PathPrefix": "

    The path prefix for filtering the results. For example, the prefix /application_abc/component_xyz/ gets all instance profiles whose path starts with /application_abc/component_xyz/.

    This parameter is optional. If it is not included, it defaults to a slash (/), listing all instance profiles.

    ", + "ListRolesRequest$PathPrefix": "

    The path prefix for filtering the results. For example, the prefix /application_abc/component_xyz/ gets all roles whose path starts with /application_abc/component_xyz/.

    This parameter is optional. If it is not included, it defaults to a slash (/), listing all roles.

    ", + "ListServerCertificatesRequest$PathPrefix": "

    The path prefix for filtering the results. For example: /company/servercerts would get all server certificates for which the path starts with /company/servercerts.

    This parameter is optional. If it is not included, it defaults to a slash (/), listing all server certificates.

    ", + "ListUsersRequest$PathPrefix": "

    The path prefix for filtering the results. For example: /division_abc/subdivision_xyz/, which would get all user names whose path starts with /division_abc/subdivision_xyz/.

    This parameter is optional. If it is not included, it defaults to a slash (/), listing all user names.

    " + } + }, + "pathType": { + "base": null, + "refs": { + "CreateGroupRequest$Path": "

    The path to the group. For more information about paths, see IAM Identifiers in the Using IAM guide.

    This parameter is optional. If it is not included, it defaults to a slash (/).

    ", + "CreateInstanceProfileRequest$Path": "

    The path to the instance profile. For more information about paths, see IAM Identifiers in the Using IAM guide.

    This parameter is optional. If it is not included, it defaults to a slash (/).

    ", + "CreateRoleRequest$Path": "

    The path to the role. For more information about paths, see IAM Identifiers in the Using IAM guide.

    This parameter is optional. If it is not included, it defaults to a slash (/).

    ", + "CreateUserRequest$Path": "

    The path for the user name. For more information about paths, see IAM Identifiers in the Using IAM guide.

    This parameter is optional. If it is not included, it defaults to a slash (/).

    ", + "CreateVirtualMFADeviceRequest$Path": "

    The path for the virtual MFA device. For more information about paths, see IAM Identifiers in the Using IAM guide.

    This parameter is optional. If it is not included, it defaults to a slash (/).

    ", + "Group$Path": "

    The path to the group. For more information about paths, see IAM Identifiers in the Using IAM guide.

    ", + "GroupDetail$Path": "

    The path to the group. For more information about paths, see IAM Identifiers in the Using IAM guide.

    ", + "InstanceProfile$Path": "

    The path to the instance profile. For more information about paths, see IAM Identifiers in the Using IAM guide.

    ", + "ListEntitiesForPolicyRequest$PathPrefix": "

    The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all entities.

    ", + "Role$Path": "

    The path to the role. For more information about paths, see IAM Identifiers in the Using IAM guide.

    ", + "RoleDetail$Path": "

    The path to the role. For more information about paths, see IAM Identifiers in the Using IAM guide.

    ", + "ServerCertificateMetadata$Path": "

    The path to the server certificate. For more information about paths, see IAM Identifiers in the Using IAM guide.

    ", + "UpdateGroupRequest$NewPath": "

    New path for the group. Only include this if changing the group's path.

    ", + "UpdateServerCertificateRequest$NewPath": "

    The new path for the server certificate. Include this only if you are updating the server certificate's path.

    ", + "UpdateUserRequest$NewPath": "

    New path for the user. Include this parameter only if you're changing the user's path.

    ", + "UploadServerCertificateRequest$Path": "

    The path for the server certificate. For more information about paths, see IAM Identifiers in the Using IAM guide.

    This parameter is optional. If it is not included, it defaults to a slash (/).

    If you are uploading a server certificate specifically for use with Amazon CloudFront distributions, you must specify a path using the --path option. The path must begin with /cloudfront and must include a trailing slash (for example, /cloudfront/test/). ", + "User$Path": "

    The path to the user. For more information about paths, see IAM Identifiers in the Using IAM guide.

    ", + "UserDetail$Path": "

    The path to the user. For more information about paths, see IAM Identifiers in the Using IAM guide.

    " + } + }, + "policyDescriptionType": { + "base": null, + "refs": { + "CreatePolicyRequest$Description": "

    A friendly description of the policy.

    Typically used to store information about the permissions defined in the policy. For example, \"Grants access to production DynamoDB tables.\"

    The policy description is immutable. After a value is assigned, it cannot be changed.

    ", + "ManagedPolicyDetail$Description": "

    A friendly description of the policy.

    ", + "Policy$Description": "

    A friendly description of the policy.

    This element is included in the response to the GetPolicy operation. It is not included in the response to the ListPolicies operation.

    " + } + }, + "policyDetailListType": { + "base": null, + "refs": { + "GroupDetail$GroupPolicyList": "

    A list of the inline policies embedded in the group.

    ", + "RoleDetail$RolePolicyList": "

    A list of inline policies embedded in the role. These policies are the role's access (permissions) policies.

    ", + "UserDetail$UserPolicyList": "

    A list of the inline policies embedded in the user.

    " + } + }, + "policyDocumentType": { + "base": null, + "refs": { + "CreatePolicyRequest$PolicyDocument": "

    The policy document.

    ", + "CreatePolicyVersionRequest$PolicyDocument": "

    The policy document.

    ", + "CreateRoleRequest$AssumeRolePolicyDocument": "

    The trust relationship policy document that grants an entity permission to assume the role.

    ", + "GetGroupPolicyResponse$PolicyDocument": "

    The policy document.

    ", + "GetRolePolicyResponse$PolicyDocument": "

    The policy document.

    ", + "GetUserPolicyResponse$PolicyDocument": "

    The policy document.

    ", + "PolicyDetail$PolicyDocument": "

    The policy document.

    ", + "PolicyVersion$Document": "

    The policy document.

    The policy document is returned in the response to the GetPolicyVersion and GetAccountAuthorizationDetails operations. It is not returned in the response to the CreatePolicyVersion or ListPolicyVersions operations.

    ", + "PutGroupPolicyRequest$PolicyDocument": "

    The policy document.

    ", + "PutRolePolicyRequest$PolicyDocument": "

    The policy document.

    ", + "PutUserPolicyRequest$PolicyDocument": "

    The policy document.

    ", + "Role$AssumeRolePolicyDocument": "

    The policy that grants an entity permission to assume the role.

    ", + "RoleDetail$AssumeRolePolicyDocument": "

    The trust policy that grants permission to assume the role.

    ", + "SimulateCustomPolicyRequest$ResourcePolicy": "

    A resource-based policy to include in the simulation provided as a string. Each resource in the simulation is treated as if it had this policy attached. You can include only one resource-based policy in a simulation.

    ", + "SimulatePrincipalPolicyRequest$ResourcePolicy": "

    A resource-based policy to include in the simulation provided as a string. Each resource in the simulation is treated as if it had this policy attached. You can include only one resource-based policy in a simulation.

    ", + "SimulationPolicyListType$member": null, + "UpdateAssumeRolePolicyRequest$PolicyDocument": "

    The policy that grants an entity permission to assume the role.

    " + } + }, + "policyDocumentVersionListType": { + "base": null, + "refs": { + "ListPolicyVersionsResponse$Versions": "

    A list of policy versions.

    For more information about managed policy versions, see Versioning for Managed Policies in the IAM User Guide.

    ", + "ManagedPolicyDetail$PolicyVersionList": "

    A list containing information about the versions of the policy.

    " + } + }, + "policyEvaluationErrorMessage": { + "base": null, + "refs": { + "PolicyEvaluationException$message": null + } + }, + "policyListType": { + "base": null, + "refs": { + "ListPoliciesResponse$Policies": "

    A list of policies.

    " + } + }, + "policyNameListType": { + "base": "

    Contains a list of policy names.

    This data type is used as a response element in the ListPolicies action.

    ", + "refs": { + "ListGroupPoliciesResponse$PolicyNames": "

    A list of policy names.

    ", + "ListRolePoliciesResponse$PolicyNames": "

    A list of policy names.

    ", + "ListUserPoliciesResponse$PolicyNames": "

    A list of policy names.

    " + } + }, + "policyNameType": { + "base": null, + "refs": { + "AttachedPolicy$PolicyName": "

    The friendly name of the attached policy.

    ", + "CreatePolicyRequest$PolicyName": "

    The name of the policy document.

    ", + "DeleteGroupPolicyRequest$PolicyName": "

    The name identifying the policy document to delete.

    ", + "DeleteRolePolicyRequest$PolicyName": "

    The name identifying the policy document to delete.

    ", + "DeleteUserPolicyRequest$PolicyName": "

    The name identifying the policy document to delete.

    ", + "GetGroupPolicyRequest$PolicyName": "

    The name of the policy document to get.

    ", + "GetGroupPolicyResponse$PolicyName": "

    The name of the policy.

    ", + "GetRolePolicyRequest$PolicyName": "

    The name of the policy document to get.

    ", + "GetRolePolicyResponse$PolicyName": "

    The name of the policy.

    ", + "GetUserPolicyRequest$PolicyName": "

    The name of the policy document to get.

    ", + "GetUserPolicyResponse$PolicyName": "

    The name of the policy.

    ", + "ManagedPolicyDetail$PolicyName": "

    The friendly name (not ARN) identifying the policy.

    ", + "Policy$PolicyName": "

    The friendly name (not ARN) identifying the policy.

    ", + "PolicyDetail$PolicyName": "

    The name of the policy.

    ", + "PutGroupPolicyRequest$PolicyName": "

    The name of the policy document.

    ", + "PutRolePolicyRequest$PolicyName": "

    The name of the policy document.

    ", + "PutUserPolicyRequest$PolicyName": "

    The name of the policy document.

    ", + "policyNameListType$member": null + } + }, + "policyPathType": { + "base": null, + "refs": { + "CreatePolicyRequest$Path": "

    The path for the policy.

    For more information about paths, see IAM Identifiers in the IAM User Guide.

    This parameter is optional. If it is not included, it defaults to a slash (/).

    ", + "ListAttachedGroupPoliciesRequest$PathPrefix": "

    The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all policies.

    ", + "ListAttachedRolePoliciesRequest$PathPrefix": "

    The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all policies.

    ", + "ListAttachedUserPoliciesRequest$PathPrefix": "

    The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all policies.

    ", + "ListPoliciesRequest$PathPrefix": "

    The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all policies.

    ", + "ManagedPolicyDetail$Path": "

    The path to the policy.

    For more information about paths, see IAM Identifiers in the Using IAM guide.

    ", + "Policy$Path": "

    The path to the policy.

    For more information about paths, see IAM Identifiers in the Using IAM guide.

    " + } + }, + "policyScopeType": { + "base": null, + "refs": { + "ListPoliciesRequest$Scope": "

    The scope to use for filtering the results.

    To list only AWS managed policies, set Scope to AWS. To list only the customer managed policies in your AWS account, set Scope to Local.

    This parameter is optional. If it is not included, or if it is set to All, all policies are returned.

    " + } + }, + "policyVersionIdType": { + "base": null, + "refs": { + "DeletePolicyVersionRequest$VersionId": "

    The policy version to delete.

    For more information about managed policy versions, see Versioning for Managed Policies in the IAM User Guide.

    ", + "GetPolicyVersionRequest$VersionId": "

    Identifies the policy version to retrieve.

    ", + "ManagedPolicyDetail$DefaultVersionId": "

    The identifier for the version of the policy that is set as the default (operative) version.

    For more information about policy versions, see Versioning for Managed Policies in the Using IAM guide.

    ", + "Policy$DefaultVersionId": "

    The identifier for the version of the policy that is set as the default version.

    ", + "PolicyVersion$VersionId": "

    The identifier for the policy version.

    Policy version identifiers always begin with v (always lowercase). When a policy is created, the first policy version is v1.

    ", + "SetDefaultPolicyVersionRequest$VersionId": "

    The version of the policy to set as the default (operative) version.

    For more information about managed policy versions, see Versioning for Managed Policies in the IAM User Guide.

    " + } + }, + "privateKeyType": { + "base": null, + "refs": { + "UploadServerCertificateRequest$PrivateKey": "

    The contents of the private key in PEM-encoded format.

    " + } + }, + "publicKeyFingerprintType": { + "base": null, + "refs": { + "SSHPublicKey$Fingerprint": "

    The MD5 message digest of the SSH public key.

    " + } + }, + "publicKeyIdType": { + "base": null, + "refs": { + "DeleteSSHPublicKeyRequest$SSHPublicKeyId": "

    The unique identifier for the SSH public key.

    ", + "GetSSHPublicKeyRequest$SSHPublicKeyId": "

    The unique identifier for the SSH public key.

    ", + "SSHPublicKey$SSHPublicKeyId": "

    The unique identifier for the SSH public key.

    ", + "SSHPublicKeyMetadata$SSHPublicKeyId": "

    The unique identifier for the SSH public key.

    ", + "UpdateSSHPublicKeyRequest$SSHPublicKeyId": "

    The unique identifier for the SSH public key.

    " + } + }, + "publicKeyMaterialType": { + "base": null, + "refs": { + "SSHPublicKey$SSHPublicKeyBody": "

    The SSH public key.

    ", + "UploadSSHPublicKeyRequest$SSHPublicKeyBody": "

    The SSH public key. The public key must be encoded in ssh-rsa format or PEM format.

    " + } + }, + "roleDetailListType": { + "base": null, + "refs": { + "GetAccountAuthorizationDetailsResponse$RoleDetailList": "

    A list containing information about IAM roles.

    " + } + }, + "roleListType": { + "base": "

    Contains a list of IAM roles.

    This data type is used as a response element in the ListRoles action.

    ", + "refs": { + "InstanceProfile$Roles": "

    The role associated with the instance profile.

    ", + "ListRolesResponse$Roles": "

    A list of roles.

    " + } + }, + "roleNameType": { + "base": null, + "refs": { + "AddRoleToInstanceProfileRequest$RoleName": "

    The name of the role to add.

    ", + "AttachRolePolicyRequest$RoleName": "

    The name (friendly name, not ARN) of the role to attach the policy to.

    ", + "CreateRoleRequest$RoleName": "

    The name of the role to create.

    ", + "DeleteRolePolicyRequest$RoleName": "

    The name (friendly name, not ARN) identifying the role that the policy is embedded in.

    ", + "DeleteRoleRequest$RoleName": "

    The name of the role to delete.

    ", + "DetachRolePolicyRequest$RoleName": "

    The name (friendly name, not ARN) of the role to detach the policy from.

    ", + "GetRolePolicyRequest$RoleName": "

    The name of the role associated with the policy.

    ", + "GetRolePolicyResponse$RoleName": "

    The role the policy is associated with.

    ", + "GetRoleRequest$RoleName": "

    The name of the role to get information about.

    ", + "ListAttachedRolePoliciesRequest$RoleName": "

    The name (friendly name, not ARN) of the role to list attached policies for.

    ", + "ListInstanceProfilesForRoleRequest$RoleName": "

    The name of the role to list instance profiles for.

    ", + "ListRolePoliciesRequest$RoleName": "

    The name of the role to list policies for.

    ", + "PolicyRole$RoleName": "

    The name (friendly name, not ARN) identifying the role.

    ", + "PutRolePolicyRequest$RoleName": "

    The name of the role to associate the policy with.

    ", + "RemoveRoleFromInstanceProfileRequest$RoleName": "

    The name of the role to remove.

    ", + "Role$RoleName": "

    The friendly name that identifies the role.

    ", + "RoleDetail$RoleName": "

    The friendly name that identifies the role.

    ", + "UpdateAssumeRolePolicyRequest$RoleName": "

    The name of the role to update.

    " + } + }, + "serialNumberType": { + "base": null, + "refs": { + "DeactivateMFADeviceRequest$SerialNumber": "

    The serial number that uniquely identifies the MFA device. For virtual MFA devices, the serial number is the device ARN.

    ", + "DeleteVirtualMFADeviceRequest$SerialNumber": "

    The serial number that uniquely identifies the MFA device. For virtual MFA devices, the serial number is the same as the ARN.

    ", + "EnableMFADeviceRequest$SerialNumber": "

    The serial number that uniquely identifies the MFA device. For virtual MFA devices, the serial number is the device ARN.

    ", + "MFADevice$SerialNumber": "

    The serial number that uniquely identifies the MFA device. For virtual MFA devices, the serial number is the device ARN.

    ", + "ResyncMFADeviceRequest$SerialNumber": "

    Serial number that uniquely identifies the MFA device.

    ", + "VirtualMFADevice$SerialNumber": "

    The serial number associated with VirtualMFADevice.

    " + } + }, + "serverCertificateMetadataListType": { + "base": null, + "refs": { + "ListServerCertificatesResponse$ServerCertificateMetadataList": "

    A list of server certificates.

    " + } + }, + "serverCertificateNameType": { + "base": null, + "refs": { + "DeleteServerCertificateRequest$ServerCertificateName": "

    The name of the server certificate you want to delete.

    ", + "GetServerCertificateRequest$ServerCertificateName": "

    The name of the server certificate you want to retrieve information about.

    ", + "ServerCertificateMetadata$ServerCertificateName": "

    The name that identifies the server certificate.

    ", + "UpdateServerCertificateRequest$ServerCertificateName": "

    The name of the server certificate that you want to update.

    ", + "UpdateServerCertificateRequest$NewServerCertificateName": "

    The new name for the server certificate. Include this only if you are updating the server certificate's name. The name of the certificate cannot contain any spaces.

    ", + "UploadServerCertificateRequest$ServerCertificateName": "

    The name for the server certificate. Do not include the path in this value. The name of the certificate cannot contain any spaces.

    " + } + }, + "serviceFailureExceptionMessage": { + "base": null, + "refs": { + "ServiceFailureException$message": null + } + }, + "statusType": { + "base": null, + "refs": { + "AccessKey$Status": "

    The status of the access key. Active means the key is valid for API calls, while Inactive means it is not.

    ", + "AccessKeyMetadata$Status": "

    The status of the access key. Active means the key is valid for API calls; Inactive means it is not.

    ", + "SSHPublicKey$Status": "

    The status of the SSH public key. Active means the key can be used for authentication with an AWS CodeCommit repository. Inactive means the key cannot be used.

    ", + "SSHPublicKeyMetadata$Status": "

    The status of the SSH public key. Active means the key can be used for authentication with an AWS CodeCommit repository. Inactive means the key cannot be used.

    ", + "SigningCertificate$Status": "

    The status of the signing certificate. Active means the key is valid for API calls, while Inactive means it is not.

    ", + "UpdateAccessKeyRequest$Status": "

    The status you want to assign to the secret access key. Active means the key can be used for API calls to AWS, while Inactive means the key cannot be used.

    ", + "UpdateSSHPublicKeyRequest$Status": "

    The status to assign to the SSH public key. Active means the key can be used for authentication with an AWS CodeCommit repository. Inactive means the key cannot be used.

    ", + "UpdateSigningCertificateRequest$Status": "

    The status you want to assign to the certificate. Active means the certificate can be used for API calls to AWS, while Inactive means the certificate cannot be used.

    " + } + }, + "stringType": { + "base": null, + "refs": { + "AccessKeyLastUsed$ServiceName": "

    The name of the AWS service with which this access key was most recently used. This field is null when:

    • The user does not have an access key.

    • An access key exists but has never been used, at least not since IAM started tracking this information on April 22nd, 2015.

    • There is no sign-in data associated with the user

    ", + "AccessKeyLastUsed$Region": "

    The AWS region where this access key was most recently used. This field is null when:

    • The user does not have an access key.

    • An access key exists but has never been used, at least not since IAM started tracking this information on April 22nd, 2015.

    • There is no sign-in data associated with the user

    For more information about AWS regions, see Regions and Endpoints in the Amazon Web Services General Reference.

    " + } + }, + "summaryKeyType": { + "base": null, + "refs": { + "summaryMapType$key": null + } + }, + "summaryMapType": { + "base": null, + "refs": { + "GetAccountSummaryResponse$SummaryMap": "

    A set of key value pairs containing information about IAM entity usage and IAM quotas.

    SummaryMap contains the following keys:

    • AccessKeysPerUserQuota

      The maximum number of active access keys allowed for each IAM user.

    • AccountAccessKeysPresent

      This value is 1 if the AWS account (root) has an access key, otherwise it is 0.

    • AccountMFAEnabled

      This value is 1 if the AWS account (root) has an MFA device assigned, otherwise it is 0.

    • AccountSigningCertificatesPresent

      This value is 1 if the AWS account (root) has a signing certificate, otherwise it is 0.

    • AssumeRolePolicySizeQuota

      The maximum allowed size for assume role policy documents (trust policies), in non-whitespace characters.

    • AttachedPoliciesPerGroupQuota

      The maximum number of managed policies that can be attached to an IAM group.

    • AttachedPoliciesPerRoleQuota

      The maximum number of managed policies that can be attached to an IAM role.

    • AttachedPoliciesPerUserQuota

      The maximum number of managed policies that can be attached to an IAM user.

    • GroupPolicySizeQuota

      The maximum allowed size for the aggregate of all inline policies embedded in an IAM group, in non-whitespace characters.

    • Groups

      The number of IAM groups in the AWS account.

    • GroupsPerUserQuota

      The maximum number of IAM groups each IAM user can belong to.

    • GroupsQuota

      The maximum number of IAM groups allowed in the AWS account.

    • InstanceProfiles

      The number of instance profiles in the AWS account.

    • InstanceProfilesQuota

      The maximum number of instance profiles allowed in the AWS account.

    • MFADevices

      The number of MFA devices in the AWS account, including those assigned and unassigned.

    • MFADevicesInUse

      The number of MFA devices that have been assigned to an IAM user or to the AWS account (root).

    • Policies

      The number of customer managed policies in the AWS account.

    • PoliciesQuota

      The maximum number of customer managed policies allowed in the AWS account.

    • PolicySizeQuota

      The maximum allowed size of a customer managed policy, in non-whitespace characters.

    • PolicyVersionsInUse

      The number of managed policies that are attached to IAM users, groups, or roles in the AWS account.

    • PolicyVersionsInUseQuota

      The maximum number of managed policies that can be attached to IAM users, groups, or roles in the AWS account.

    • Providers

      The number of identity providers in the AWS account.

    • RolePolicySizeQuota

      The maximum allowed size for the aggregate of all inline policies (access policies, not the trust policy) embedded in an IAM role, in non-whitespace characters.

    • Roles

      The number of IAM roles in the AWS account.

    • RolesQuota

      The maximum number of IAM roles allowed in the AWS account.

    • ServerCertificates

      The number of server certificates in the AWS account.

    • ServerCertificatesQuota

      The maximum number of server certificates allowed in the AWS account.

    • SigningCertificatesPerUserQuota

      The maximum number of X.509 signing certificates allowed for each IAM user.

    • UserPolicySizeQuota

      The maximum allowed size for the aggregate of all inline policies embedded in an IAM user, in non-whitespace characters.

    • Users

      The number of IAM users in the AWS account.

    • UsersQuota

      The maximum number of IAM users allowed in the AWS account.

    • VersionsPerPolicyQuota

      The maximum number of policy versions allowed for each managed policy.

    " + } + }, + "summaryValueType": { + "base": null, + "refs": { + "summaryMapType$value": null + } + }, + "thumbprintListType": { + "base": "

    Contains a list of thumbprints of identity provider server certificates.

    ", + "refs": { + "CreateOpenIDConnectProviderRequest$ThumbprintList": "

    A list of server certificate thumbprints for the OpenID Connect (OIDC) identity provider's server certificate(s). Typically this list includes only one entry. However, IAM lets you have up to five thumbprints for an OIDC provider. This lets you maintain multiple thumbprints if the identity provider is rotating certificates.

    The server certificate thumbprint is the hex-encoded SHA-1 hash value of the X.509 certificate used by the domain where the OpenID Connect provider makes its keys available. It is always a 40-character string.

    You must provide at least one thumbprint when creating an IAM OIDC provider. For example, if the OIDC provider is server.example.com and the provider stores its keys at \"https://keys.server.example.com/openid-connect\", the thumbprint string would be the hex-encoded SHA-1 hash value of the certificate used by https://keys.server.example.com.

    For more information about obtaining the OIDC provider's thumbprint, see Obtaining the Thumbprint for an OpenID Connect Provider in the IAM User Guide.

    ", + "GetOpenIDConnectProviderResponse$ThumbprintList": "

    A list of certificate thumbprints that are associated with the specified IAM OpenID Connect provider. For more information, see CreateOpenIDConnectProvider.

    ", + "UpdateOpenIDConnectProviderThumbprintRequest$ThumbprintList": "

    A list of certificate thumbprints that are associated with the specified IAM OpenID Connect provider. For more information, see CreateOpenIDConnectProvider.

    " + } + }, + "thumbprintType": { + "base": "

    Contains a thumbprint for an identity provider's server certificate.

    The identity provider's server certificate thumbprint is the hex-encoded SHA-1 hash value of the self-signed X.509 certificate used by the domain where the OpenID Connect provider makes its keys available. It is always a 40-character string.

    ", + "refs": { + "thumbprintListType$member": null + } + }, + "unrecognizedPublicKeyEncodingMessage": { + "base": null, + "refs": { + "UnrecognizedPublicKeyEncodingException$message": null + } + }, + "userDetailListType": { + "base": null, + "refs": { + "GetAccountAuthorizationDetailsResponse$UserDetailList": "

    A list containing information about IAM users.

    " + } + }, + "userListType": { + "base": "

    Contains a list of users.

    This data type is used as a response element in the GetGroup and ListUsers actions.

    ", + "refs": { + "GetGroupResponse$Users": "

    A list of users in the group.

    ", + "ListUsersResponse$Users": "

    A list of users.

    " + } + }, + "userNameType": { + "base": null, + "refs": { + "AccessKey$UserName": "

    The name of the IAM user that the access key is associated with.

    ", + "AccessKeyMetadata$UserName": "

    The name of the IAM user that the key is associated with.

    ", + "AttachUserPolicyRequest$UserName": "

    The name (friendly name, not ARN) of the user to attach the policy to.

    ", + "CreateLoginProfileRequest$UserName": "

    The name of the user to create a password for.

    ", + "CreateUserRequest$UserName": "

    The name of the user to create.

    ", + "DeleteLoginProfileRequest$UserName": "

    The name of the user whose password you want to delete.

    ", + "DeleteSSHPublicKeyRequest$UserName": "

    The name of the IAM user associated with the SSH public key.

    ", + "DetachUserPolicyRequest$UserName": "

    The name (friendly name, not ARN) of the user to detach the policy from.

    ", + "GetLoginProfileRequest$UserName": "

    The name of the user whose login profile you want to retrieve.

    ", + "GetSSHPublicKeyRequest$UserName": "

    The name of the IAM user associated with the SSH public key.

    ", + "ListAttachedUserPoliciesRequest$UserName": "

    The name (friendly name, not ARN) of the user to list attached policies for.

    ", + "ListSSHPublicKeysRequest$UserName": "

    The name of the IAM user to list SSH public keys for. If none is specified, the UserName field is determined implicitly based on the AWS access key used to sign the request.

    ", + "LoginProfile$UserName": "

    The name of the user, which can be used for signing in to the AWS Management Console.

    ", + "MFADevice$UserName": "

    The user with whom the MFA device is associated.

    ", + "PolicyUser$UserName": "

    The name (friendly name, not ARN) identifying the user.

    ", + "SSHPublicKey$UserName": "

    The name of the IAM user associated with the SSH public key.

    ", + "SSHPublicKeyMetadata$UserName": "

    The name of the IAM user associated with the SSH public key.

    ", + "SigningCertificate$UserName": "

    The name of the user the signing certificate is associated with.

    ", + "UpdateLoginProfileRequest$UserName": "

    The name of the user whose password you want to update.

    ", + "UpdateSSHPublicKeyRequest$UserName": "

    The name of the IAM user associated with the SSH public key.

    ", + "UpdateUserRequest$NewUserName": "

    New name for the user. Include this parameter only if you're changing the user's name.

    ", + "UploadSSHPublicKeyRequest$UserName": "

    The name of the IAM user to associate the SSH public key with.

    ", + "User$UserName": "

    The friendly name identifying the user.

    ", + "UserDetail$UserName": "

    The friendly name identifying the user.

    " + } + }, + "virtualMFADeviceListType": { + "base": null, + "refs": { + "ListVirtualMFADevicesResponse$VirtualMFADevices": "

    The list of virtual MFA devices in the current account that match the AssignmentStatus value that was passed in the request.

    " + } + }, + "virtualMFADeviceName": { + "base": null, + "refs": { + "CreateVirtualMFADeviceRequest$VirtualMFADeviceName": "

    The name of the virtual MFA device. Use with path to uniquely identify a virtual MFA device.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,164 @@ +{ + "pagination": { + "GetGroup": { + "input_token": "Marker", + "output_token": "Marker", + "more_results": "IsTruncated", + "limit_key": "MaxItems", + "result_key": "Users" + }, + "ListAccessKeys": { + "input_token": "Marker", + "output_token": "Marker", + "more_results": "IsTruncated", + "limit_key": "MaxItems", + "result_key": "AccessKeyMetadata" + }, + "ListAccountAliases": { + "input_token": "Marker", + "output_token": "Marker", + "more_results": "IsTruncated", + "limit_key": "MaxItems", + "result_key": "AccountAliases" + }, + "ListAttachedGroupPolicies": { + "input_token": "Marker", + "output_token": "Marker", + "more_results": "IsTruncated", + "limit_key": "MaxItems", + "result_key": "AttachedPolicies" + }, + "ListAttachedRolePolicies": { + "input_token": "Marker", + "output_token": "Marker", + "more_results": "IsTruncated", + "limit_key": "MaxItems", + "result_key": "AttachedPolicies" + }, + "ListAttachedUserPolicies": { + "input_token": "Marker", + "output_token": "Marker", + "more_results": "IsTruncated", + "limit_key": "MaxItems", + "result_key": "AttachedPolicies" + }, + "ListEntitiesForPolicy": { + "input_token": "Marker", + "output_token": "Marker", + "more_results": "IsTruncated", + "limit_key": "MaxItems", + "result_key": [ + "PolicyGroups", + "PolicyUsers", + "PolicyRoles" + ] + }, + "ListGroupPolicies": { + "input_token": "Marker", + "output_token": "Marker", + "more_results": "IsTruncated", + "limit_key": "MaxItems", + "result_key": "PolicyNames" + }, + "ListGroups": { + "input_token": "Marker", + "output_token": "Marker", + "more_results": "IsTruncated", + "limit_key": "MaxItems", + "result_key": "Groups" + }, + "ListGroupsForUser": { + "input_token": "Marker", + "output_token": "Marker", + "more_results": "IsTruncated", + "limit_key": "MaxItems", + "result_key": "Groups" + }, + "ListInstanceProfiles": { + "input_token": "Marker", + "output_token": "Marker", + "more_results": "IsTruncated", + "limit_key": "MaxItems", + "result_key": "InstanceProfiles" + }, + "ListInstanceProfilesForRole": { + "input_token": "Marker", + "output_token": "Marker", + "more_results": "IsTruncated", + "limit_key": "MaxItems", + "result_key": "InstanceProfiles" + }, + "ListMFADevices": { + "input_token": "Marker", + "output_token": "Marker", + "more_results": "IsTruncated", + "limit_key": "MaxItems", + "result_key": "MFADevices" + }, + "ListPolicies": { + "input_token": "Marker", + "output_token": "Marker", + "more_results": "IsTruncated", + "limit_key": "MaxItems", + "result_key": "Policies" + }, + "ListRolePolicies": { + "input_token": "Marker", + "output_token": "Marker", + "more_results": "IsTruncated", + "limit_key": "MaxItems", + "result_key": "PolicyNames" + }, + "ListRoles": { + "input_token": "Marker", + "output_token": "Marker", + "more_results": "IsTruncated", + "limit_key": "MaxItems", + "result_key": "Roles" + }, + "ListSAMLProviders": { + "result_key": "SAMLProviderList" + }, + "ListServerCertificates": { + "input_token": "Marker", + "output_token": "Marker", + "more_results": "IsTruncated", + "limit_key": "MaxItems", + "result_key": "ServerCertificateMetadataList" + }, + "ListSigningCertificates": { + "input_token": "Marker", + "output_token": "Marker", + "more_results": "IsTruncated", + "limit_key": "MaxItems", + "result_key": "Certificates" + }, + "ListUserPolicies": { + "input_token": "Marker", + "output_token": "Marker", + "more_results": "IsTruncated", + "limit_key": "MaxItems", + "result_key": "PolicyNames" + }, + "ListUsers": { + "input_token": "Marker", + "output_token": "Marker", + "more_results": "IsTruncated", + "limit_key": "MaxItems", + "result_key": "Users" + }, + "ListVirtualMFADevices": { + "input_token": "Marker", + "output_token": "Marker", + "more_results": "IsTruncated", + "limit_key": "MaxItems", + "result_key": "VirtualMFADevices" + }, + "GetAccountAuthorizationDetails": { + "input_token": "Marker", + "output_token": "Marker", + "more_results": "IsTruncated", + "limit_key": "MaxItems" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/waiters-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/waiters-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/waiters-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iam/2010-05-08/waiters-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,39 @@ +{ + "version": 2, + "waiters": { + "InstanceProfileExists": { + "delay": 1, + "operation": "GetInstanceProfile", + "maxAttempts": 40, + "acceptors": [ + { + "expected": 200, + "matcher": "status", + "state": "success" + }, + { + "state": "retry", + "matcher": "status", + "expected": 404 + } + ] + }, + "UserExists": { + "delay": 1, + "operation": "GetUser", + "maxAttempts": 20, + "acceptors": [ + { + "state": "success", + "matcher": "status", + "expected": 200 + }, + { + "state": "retry", + "matcher": "error", + "expected": "NoSuchEntity" + } + ] + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/importexport/2010-06-01/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/importexport/2010-06-01/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/importexport/2010-06-01/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/importexport/2010-06-01/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,656 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2010-06-01", + "endpointPrefix":"importexport", + "globalEndpoint":"importexport.amazonaws.com", + "serviceFullName":"AWS Import/Export", + "signatureVersion":"v2", + "xmlNamespace":"http://importexport.amazonaws.com/doc/2010-06-01/", + "protocol":"query" + }, + "operations":{ + "CancelJob":{ + "name":"CancelJob", + "http":{ + "method":"POST", + "requestUri":"/?Operation=CancelJob" + }, + "input":{"shape":"CancelJobInput"}, + "output":{ + "shape":"CancelJobOutput", + "resultWrapper":"CancelJobResult" + }, + "errors":[ + { + "shape":"InvalidJobIdException", + "exception":true + }, + { + "shape":"ExpiredJobIdException", + "exception":true + }, + { + "shape":"CanceledJobIdException", + "exception":true + }, + { + "shape":"UnableToCancelJobIdException", + "exception":true + }, + { + "shape":"InvalidAccessKeyIdException", + "exception":true + }, + { + "shape":"InvalidVersionException", + "exception":true + } + ] + }, + "CreateJob":{ + "name":"CreateJob", + "http":{ + "method":"POST", + "requestUri":"/?Operation=CreateJob" + }, + "input":{"shape":"CreateJobInput"}, + "output":{ + "shape":"CreateJobOutput", + "resultWrapper":"CreateJobResult" + }, + "errors":[ + { + "shape":"MissingParameterException", + "exception":true + }, + { + "shape":"InvalidParameterException", + "exception":true + }, + { + "shape":"InvalidAccessKeyIdException", + "exception":true + }, + { + "shape":"InvalidAddressException", + "exception":true + }, + { + "shape":"InvalidManifestFieldException", + "exception":true + }, + { + "shape":"MissingManifestFieldException", + "exception":true + }, + { + "shape":"NoSuchBucketException", + "exception":true + }, + { + "shape":"MissingCustomsException", + "exception":true + }, + { + "shape":"InvalidCustomsException", + "exception":true + }, + { + "shape":"InvalidFileSystemException", + "exception":true + }, + { + "shape":"MultipleRegionsException", + "exception":true + }, + { + "shape":"BucketPermissionException", + "exception":true + }, + { + "shape":"MalformedManifestException", + "exception":true + }, + { + "shape":"CreateJobQuotaExceededException", + "exception":true + }, + { + "shape":"InvalidJobIdException", + "exception":true + }, + { + "shape":"InvalidVersionException", + "exception":true + } + ] + }, + "GetShippingLabel":{ + "name":"GetShippingLabel", + "http":{ + "method":"POST", + "requestUri":"/?Operation=GetShippingLabel" + }, + "input":{"shape":"GetShippingLabelInput"}, + "output":{ + "shape":"GetShippingLabelOutput", + "resultWrapper":"GetShippingLabelResult" + }, + "errors":[ + { + "shape":"InvalidJobIdException", + "exception":true + }, + { + "shape":"ExpiredJobIdException", + "exception":true + }, + { + "shape":"CanceledJobIdException", + "exception":true + }, + { + "shape":"InvalidAccessKeyIdException", + "exception":true + }, + { + "shape":"InvalidAddressException", + "exception":true + }, + { + "shape":"InvalidVersionException", + "exception":true + }, + { + "shape":"InvalidParameterException", + "exception":true + } + ] + }, + "GetStatus":{ + "name":"GetStatus", + "http":{ + "method":"POST", + "requestUri":"/?Operation=GetStatus" + }, + "input":{"shape":"GetStatusInput"}, + "output":{ + "shape":"GetStatusOutput", + "resultWrapper":"GetStatusResult" + }, + "errors":[ + { + "shape":"InvalidJobIdException", + "exception":true + }, + { + "shape":"ExpiredJobIdException", + "exception":true + }, + { + "shape":"CanceledJobIdException", + "exception":true + }, + { + "shape":"InvalidAccessKeyIdException", + "exception":true + }, + { + "shape":"InvalidVersionException", + "exception":true + } + ] + }, + "ListJobs":{ + "name":"ListJobs", + "http":{ + "method":"POST", + "requestUri":"/?Operation=ListJobs" + }, + "input":{"shape":"ListJobsInput"}, + "output":{ + "shape":"ListJobsOutput", + "resultWrapper":"ListJobsResult" + }, + "errors":[ + { + "shape":"InvalidParameterException", + "exception":true + }, + { + "shape":"InvalidAccessKeyIdException", + "exception":true + }, + { + "shape":"InvalidVersionException", + "exception":true + } + ] + }, + "UpdateJob":{ + "name":"UpdateJob", + "http":{ + "method":"POST", + "requestUri":"/?Operation=UpdateJob" + }, + "input":{"shape":"UpdateJobInput"}, + "output":{ + "shape":"UpdateJobOutput", + "resultWrapper":"UpdateJobResult" + }, + "errors":[ + { + "shape":"MissingParameterException", + "exception":true + }, + { + "shape":"InvalidParameterException", + "exception":true + }, + { + "shape":"InvalidAccessKeyIdException", + "exception":true + }, + { + "shape":"InvalidAddressException", + "exception":true + }, + { + "shape":"InvalidManifestFieldException", + "exception":true + }, + { + "shape":"InvalidJobIdException", + "exception":true + }, + { + "shape":"MissingManifestFieldException", + "exception":true + }, + { + "shape":"NoSuchBucketException", + "exception":true + }, + { + "shape":"ExpiredJobIdException", + "exception":true + }, + { + "shape":"CanceledJobIdException", + "exception":true + }, + { + "shape":"MissingCustomsException", + "exception":true + }, + { + "shape":"InvalidCustomsException", + "exception":true + }, + { + "shape":"InvalidFileSystemException", + "exception":true + }, + { + "shape":"MultipleRegionsException", + "exception":true + }, + { + "shape":"BucketPermissionException", + "exception":true + }, + { + "shape":"MalformedManifestException", + "exception":true + }, + { + "shape":"UnableToUpdateJobIdException", + "exception":true + }, + { + "shape":"InvalidVersionException", + "exception":true + } + ] + } + }, + "shapes":{ + "APIVersion":{"type":"string"}, + "Artifact":{ + "type":"structure", + "members":{ + "Description":{"shape":"Description"}, + "URL":{"shape":"URL"} + } + }, + "ArtifactList":{ + "type":"list", + "member":{"shape":"Artifact"} + }, + "BucketPermissionException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "CancelJobInput":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{"shape":"JobId"}, + "APIVersion":{"shape":"APIVersion"} + } + }, + "CancelJobOutput":{ + "type":"structure", + "members":{ + "Success":{"shape":"Success"} + } + }, + "CanceledJobIdException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "Carrier":{"type":"string"}, + "CreateJobInput":{ + "type":"structure", + "required":[ + "JobType", + "Manifest", + "ValidateOnly" + ], + "members":{ + "JobType":{"shape":"JobType"}, + "Manifest":{"shape":"Manifest"}, + "ManifestAddendum":{"shape":"ManifestAddendum"}, + "ValidateOnly":{"shape":"ValidateOnly"}, + "APIVersion":{"shape":"APIVersion"} + } + }, + "CreateJobOutput":{ + "type":"structure", + "members":{ + "JobId":{"shape":"JobId"}, + "JobType":{"shape":"JobType"}, + "Signature":{"shape":"Signature"}, + "SignatureFileContents":{"shape":"SignatureFileContents"}, + "WarningMessage":{"shape":"WarningMessage"}, + "ArtifactList":{"shape":"ArtifactList"} + } + }, + "CreateJobQuotaExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "CreationDate":{"type":"timestamp"}, + "CurrentManifest":{"type":"string"}, + "Description":{"type":"string"}, + "ErrorCount":{"type":"integer"}, + "ErrorMessage":{"type":"string"}, + "ExpiredJobIdException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "GenericString":{"type":"string"}, + "GetShippingLabelInput":{ + "type":"structure", + "required":["jobIds"], + "members":{ + "jobIds":{"shape":"JobIdList"}, + "name":{"shape":"GenericString"}, + "company":{"shape":"GenericString"}, + "phoneNumber":{"shape":"GenericString"}, + "country":{"shape":"GenericString"}, + "stateOrProvince":{"shape":"GenericString"}, + "city":{"shape":"GenericString"}, + "postalCode":{"shape":"GenericString"}, + "street1":{"shape":"GenericString"}, + "street2":{"shape":"GenericString"}, + "street3":{"shape":"GenericString"}, + "APIVersion":{"shape":"GenericString"} + } + }, + "GetShippingLabelOutput":{ + "type":"structure", + "members":{ + "ShippingLabelURL":{"shape":"GenericString"}, + "Warning":{"shape":"GenericString"} + } + }, + "GetStatusInput":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{"shape":"JobId"}, + "APIVersion":{"shape":"APIVersion"} + } + }, + "GetStatusOutput":{ + "type":"structure", + "members":{ + "JobId":{"shape":"JobId"}, + "JobType":{"shape":"JobType"}, + "LocationCode":{"shape":"LocationCode"}, + "LocationMessage":{"shape":"LocationMessage"}, + "ProgressCode":{"shape":"ProgressCode"}, + "ProgressMessage":{"shape":"ProgressMessage"}, + "Carrier":{"shape":"Carrier"}, + "TrackingNumber":{"shape":"TrackingNumber"}, + "LogBucket":{"shape":"LogBucket"}, + "LogKey":{"shape":"LogKey"}, + "ErrorCount":{"shape":"ErrorCount"}, + "Signature":{"shape":"Signature"}, + "SignatureFileContents":{"shape":"Signature"}, + "CurrentManifest":{"shape":"CurrentManifest"}, + "CreationDate":{"shape":"CreationDate"}, + "ArtifactList":{"shape":"ArtifactList"} + } + }, + "InvalidAccessKeyIdException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "InvalidAddressException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "InvalidCustomsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "InvalidFileSystemException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "InvalidJobIdException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "InvalidManifestFieldException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "InvalidParameterException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "InvalidVersionException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "IsCanceled":{"type":"boolean"}, + "IsTruncated":{"type":"boolean"}, + "Job":{ + "type":"structure", + "members":{ + "JobId":{"shape":"JobId"}, + "CreationDate":{"shape":"CreationDate"}, + "IsCanceled":{"shape":"IsCanceled"}, + "JobType":{"shape":"JobType"} + } + }, + "JobId":{"type":"string"}, + "JobIdList":{ + "type":"list", + "member":{"shape":"GenericString"} + }, + "JobType":{ + "type":"string", + "enum":[ + "Import", + "Export" + ] + }, + "JobsList":{ + "type":"list", + "member":{"shape":"Job"} + }, + "ListJobsInput":{ + "type":"structure", + "members":{ + "MaxJobs":{"shape":"MaxJobs"}, + "Marker":{"shape":"Marker"}, + "APIVersion":{"shape":"APIVersion"} + } + }, + "ListJobsOutput":{ + "type":"structure", + "members":{ + "Jobs":{"shape":"JobsList"}, + "IsTruncated":{"shape":"IsTruncated"} + } + }, + "LocationCode":{"type":"string"}, + "LocationMessage":{"type":"string"}, + "LogBucket":{"type":"string"}, + "LogKey":{"type":"string"}, + "MalformedManifestException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "Manifest":{"type":"string"}, + "ManifestAddendum":{"type":"string"}, + "Marker":{"type":"string"}, + "MaxJobs":{"type":"integer"}, + "MissingCustomsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "MissingManifestFieldException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "MissingParameterException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "MultipleRegionsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "NoSuchBucketException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ProgressCode":{"type":"string"}, + "ProgressMessage":{"type":"string"}, + "Signature":{"type":"string"}, + "SignatureFileContents":{"type":"string"}, + "Success":{"type":"boolean"}, + "TrackingNumber":{"type":"string"}, + "URL":{"type":"string"}, + "UnableToCancelJobIdException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "UnableToUpdateJobIdException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "UpdateJobInput":{ + "type":"structure", + "required":[ + "JobId", + "Manifest", + "JobType", + "ValidateOnly" + ], + "members":{ + "JobId":{"shape":"JobId"}, + "Manifest":{"shape":"Manifest"}, + "JobType":{"shape":"JobType"}, + "ValidateOnly":{"shape":"ValidateOnly"}, + "APIVersion":{"shape":"APIVersion"} + } + }, + "UpdateJobOutput":{ + "type":"structure", + "members":{ + "Success":{"shape":"Success"}, + "WarningMessage":{"shape":"WarningMessage"}, + "ArtifactList":{"shape":"ArtifactList"} + } + }, + "ValidateOnly":{"type":"boolean"}, + "WarningMessage":{"type":"string"} + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/importexport/2010-06-01/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/importexport/2010-06-01/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/importexport/2010-06-01/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/importexport/2010-06-01/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,432 @@ +{ + "version": "2.0", + "operations": { + "CancelJob": "This operation cancels a specified job. Only the job owner can cancel it. The operation fails if the job has already started or is complete.", + "CreateJob": "This operation initiates the process of scheduling an upload or download of your data. You include in the request a manifest that describes the data transfer specifics. The response to the request includes a job ID, which you can use in other operations, a signature that you use to identify your storage device, and the address where you should ship your storage device.", + "GetShippingLabel": "This operation generates a pre-paid UPS shipping label that you will use to ship your device to AWS for processing.", + "GetStatus": "This operation returns information about a job, including where the job is in the processing pipeline, the status of the results, and the signature value associated with the job. You can only return information about jobs you own.", + "ListJobs": "This operation returns the jobs associated with the requester. AWS Import/Export lists the jobs in reverse chronological order based on the date of creation. For example if Job Test1 was created 2009Dec30 and Test2 was created 2010Feb05, the ListJobs operation would return Test2 followed by Test1.", + "UpdateJob": "You use this operation to change the parameters specified in the original manifest file by supplying a new manifest file. The manifest file attached to this request replaces the original manifest file. You can only use the operation after a CreateJob request but before the data transfer starts and you can only use it on jobs you own." + }, + "service": "AWS Import/Export Service AWS Import/Export accelerates transferring large amounts of data between the AWS cloud and portable storage devices that you mail to us. AWS Import/Export transfers data directly onto and off of your storage devices using Amazon's high-speed internal network and bypassing the Internet. For large data sets, AWS Import/Export is often faster than Internet transfer and more cost effective than upgrading your connectivity.", + "shapes": { + "APIVersion": { + "base": "Specifies the version of the client tool.", + "refs": { + "CancelJobInput$APIVersion": null, + "CreateJobInput$APIVersion": null, + "GetStatusInput$APIVersion": null, + "ListJobsInput$APIVersion": null, + "UpdateJobInput$APIVersion": null + } + }, + "Artifact": { + "base": "A discrete item that contains the description and URL of an artifact (such as a PDF).", + "refs": { + "ArtifactList$member": null + } + }, + "ArtifactList": { + "base": "A collection of artifacts.", + "refs": { + "CreateJobOutput$ArtifactList": null, + "GetStatusOutput$ArtifactList": null, + "UpdateJobOutput$ArtifactList": null + } + }, + "BucketPermissionException": { + "base": "The account specified does not have the appropriate bucket permissions.", + "refs": { + } + }, + "CancelJobInput": { + "base": "Input structure for the CancelJob operation.", + "refs": { + } + }, + "CancelJobOutput": { + "base": "Output structure for the CancelJob operation.", + "refs": { + } + }, + "CanceledJobIdException": { + "base": "The specified job ID has been canceled and is no longer valid.", + "refs": { + } + }, + "Carrier": { + "base": "Name of the shipping company. This value is included when the LocationCode is \"Returned\".", + "refs": { + "GetStatusOutput$Carrier": null + } + }, + "CreateJobInput": { + "base": "Input structure for the CreateJob operation.", + "refs": { + } + }, + "CreateJobOutput": { + "base": "Output structure for the CreateJob operation.", + "refs": { + } + }, + "CreateJobQuotaExceededException": { + "base": "Each account can create only a certain number of jobs per day. If you need to create more than this, please contact awsimportexport@amazon.com to explain your particular use case.", + "refs": { + } + }, + "CreationDate": { + "base": "Timestamp of the CreateJob request in ISO8601 date format. For example \"2010-03-28T20:27:35Z\".", + "refs": { + "GetStatusOutput$CreationDate": null, + "Job$CreationDate": null + } + }, + "CurrentManifest": { + "base": "The last manifest submitted, which will be used to process the job.", + "refs": { + "GetStatusOutput$CurrentManifest": null + } + }, + "Description": { + "base": "The associated description for this object.", + "refs": { + "Artifact$Description": null + } + }, + "ErrorCount": { + "base": "Number of errors. We return this value when the ProgressCode is Success or SuccessWithErrors.", + "refs": { + "GetStatusOutput$ErrorCount": null + } + }, + "ErrorMessage": { + "base": "The human-readable description of a particular error.", + "refs": { + "BucketPermissionException$message": null, + "CanceledJobIdException$message": null, + "CreateJobQuotaExceededException$message": null, + "ExpiredJobIdException$message": null, + "InvalidAccessKeyIdException$message": null, + "InvalidAddressException$message": null, + "InvalidCustomsException$message": null, + "InvalidFileSystemException$message": null, + "InvalidJobIdException$message": null, + "InvalidManifestFieldException$message": null, + "InvalidParameterException$message": null, + "InvalidVersionException$message": null, + "MalformedManifestException$message": null, + "MissingCustomsException$message": null, + "MissingManifestFieldException$message": null, + "MissingParameterException$message": null, + "MultipleRegionsException$message": null, + "NoSuchBucketException$message": null, + "UnableToCancelJobIdException$message": null, + "UnableToUpdateJobIdException$message": null + } + }, + "ExpiredJobIdException": { + "base": "Indicates that the specified job has expired out of the system.", + "refs": { + } + }, + "GenericString": { + "base": null, + "refs": { + "GetShippingLabelInput$name": null, + "GetShippingLabelInput$company": null, + "GetShippingLabelInput$phoneNumber": null, + "GetShippingLabelInput$country": null, + "GetShippingLabelInput$stateOrProvince": null, + "GetShippingLabelInput$city": null, + "GetShippingLabelInput$postalCode": null, + "GetShippingLabelInput$street1": null, + "GetShippingLabelInput$street2": null, + "GetShippingLabelInput$street3": null, + "GetShippingLabelInput$APIVersion": null, + "GetShippingLabelOutput$ShippingLabelURL": null, + "GetShippingLabelOutput$Warning": null, + "JobIdList$member": null + } + }, + "GetShippingLabelInput": { + "base": null, + "refs": { + } + }, + "GetShippingLabelOutput": { + "base": null, + "refs": { + } + }, + "GetStatusInput": { + "base": "Input structure for the GetStatus operation.", + "refs": { + } + }, + "GetStatusOutput": { + "base": "Output structure for the GetStatus operation.", + "refs": { + } + }, + "InvalidAccessKeyIdException": { + "base": "The AWS Access Key ID specified in the request did not match the manifest's accessKeyId value. The manifest and the request authentication must use the same AWS Access Key ID.", + "refs": { + } + }, + "InvalidAddressException": { + "base": "The address specified in the manifest is invalid.", + "refs": { + } + }, + "InvalidCustomsException": { + "base": "One or more customs parameters was invalid. Please correct and resubmit.", + "refs": { + } + }, + "InvalidFileSystemException": { + "base": "File system specified in export manifest is invalid.", + "refs": { + } + }, + "InvalidJobIdException": { + "base": "The JOBID was missing, not found, or not associated with the AWS account.", + "refs": { + } + }, + "InvalidManifestFieldException": { + "base": "One or more manifest fields was invalid. Please correct and resubmit.", + "refs": { + } + }, + "InvalidParameterException": { + "base": "One or more parameters had an invalid value.", + "refs": { + } + }, + "InvalidVersionException": { + "base": "The client tool version is invalid.", + "refs": { + } + }, + "IsCanceled": { + "base": "Indicates whether the job was canceled.", + "refs": { + "Job$IsCanceled": null + } + }, + "IsTruncated": { + "base": "Indicates whether the list of jobs was truncated. If true, then call ListJobs again using the last JobId element as the marker.", + "refs": { + "ListJobsOutput$IsTruncated": null + } + }, + "Job": { + "base": "Representation of a job returned by the ListJobs operation.", + "refs": { + "JobsList$member": null + } + }, + "JobId": { + "base": "A unique identifier which refers to a particular job.", + "refs": { + "CancelJobInput$JobId": null, + "CreateJobOutput$JobId": null, + "GetStatusInput$JobId": null, + "GetStatusOutput$JobId": null, + "Job$JobId": null, + "UpdateJobInput$JobId": null + } + }, + "JobIdList": { + "base": null, + "refs": { + "GetShippingLabelInput$jobIds": null + } + }, + "JobType": { + "base": "Specifies whether the job to initiate is an import or export job.", + "refs": { + "CreateJobInput$JobType": null, + "CreateJobOutput$JobType": null, + "GetStatusOutput$JobType": null, + "Job$JobType": null, + "UpdateJobInput$JobType": null + } + }, + "JobsList": { + "base": "A list container for Jobs returned by the ListJobs operation.", + "refs": { + "ListJobsOutput$Jobs": null + } + }, + "ListJobsInput": { + "base": "Input structure for the ListJobs operation.", + "refs": { + } + }, + "ListJobsOutput": { + "base": "Output structure for the ListJobs operation.", + "refs": { + } + }, + "LocationCode": { + "base": "A token representing the location of the storage device, such as \"AtAWS\".", + "refs": { + "GetStatusOutput$LocationCode": null + } + }, + "LocationMessage": { + "base": "A more human readable form of the physical location of the storage device.", + "refs": { + "GetStatusOutput$LocationMessage": null + } + }, + "LogBucket": { + "base": "Amazon S3 bucket for user logs.", + "refs": { + "GetStatusOutput$LogBucket": null + } + }, + "LogKey": { + "base": "The key where the user logs were stored.", + "refs": { + "GetStatusOutput$LogKey": null + } + }, + "MalformedManifestException": { + "base": "Your manifest is not well-formed.", + "refs": { + } + }, + "Manifest": { + "base": "The UTF-8 encoded text of the manifest file.", + "refs": { + "CreateJobInput$Manifest": null, + "UpdateJobInput$Manifest": null + } + }, + "ManifestAddendum": { + "base": "For internal use only.", + "refs": { + "CreateJobInput$ManifestAddendum": null + } + }, + "Marker": { + "base": "Specifies the JOBID to start after when listing the jobs created with your account. AWS Import/Export lists your jobs in reverse chronological order. See MaxJobs.", + "refs": { + "ListJobsInput$Marker": null + } + }, + "MaxJobs": { + "base": "Sets the maximum number of jobs returned in the response. If there are additional jobs that were not returned because MaxJobs was exceeded, the response contains <IsTruncated>true</IsTruncated>. To return the additional jobs, see Marker.", + "refs": { + "ListJobsInput$MaxJobs": null + } + }, + "MissingCustomsException": { + "base": "One or more required customs parameters was missing from the manifest.", + "refs": { + } + }, + "MissingManifestFieldException": { + "base": "One or more required fields were missing from the manifest file. Please correct and resubmit.", + "refs": { + } + }, + "MissingParameterException": { + "base": "One or more required parameters was missing from the request.", + "refs": { + } + }, + "MultipleRegionsException": { + "base": "Your manifest file contained buckets from multiple regions. A job is restricted to buckets from one region. Please correct and resubmit.", + "refs": { + } + }, + "NoSuchBucketException": { + "base": "The specified bucket does not exist. Create the specified bucket or change the manifest's bucket, exportBucket, or logBucket field to a bucket that the account, as specified by the manifest's Access Key ID, has write permissions to.", + "refs": { + } + }, + "ProgressCode": { + "base": "A token representing the state of the job, such as \"Started\".", + "refs": { + "GetStatusOutput$ProgressCode": null + } + }, + "ProgressMessage": { + "base": "A more human readable form of the job status.", + "refs": { + "GetStatusOutput$ProgressMessage": null + } + }, + "Signature": { + "base": "An encrypted code used to authenticate the request and response, for example, \"DV+TpDfx1/TdSE9ktyK9k/bDTVI=\". Only use this value is you want to create the signature file yourself. Generally you should use the SignatureFileContents value.", + "refs": { + "CreateJobOutput$Signature": null, + "GetStatusOutput$Signature": null, + "GetStatusOutput$SignatureFileContents": null + } + }, + "SignatureFileContents": { + "base": "The actual text of the SIGNATURE file to be written to disk.", + "refs": { + "CreateJobOutput$SignatureFileContents": null + } + }, + "Success": { + "base": "Specifies whether (true) or not (false) AWS Import/Export updated your job.", + "refs": { + "CancelJobOutput$Success": null, + "UpdateJobOutput$Success": null + } + }, + "TrackingNumber": { + "base": "The shipping tracking number assigned by AWS Import/Export to the storage device when it's returned to you. We return this value when the LocationCode is \"Returned\".", + "refs": { + "GetStatusOutput$TrackingNumber": null + } + }, + "URL": { + "base": "The URL for a given Artifact.", + "refs": { + "Artifact$URL": null + } + }, + "UnableToCancelJobIdException": { + "base": "AWS Import/Export cannot cancel the job", + "refs": { + } + }, + "UnableToUpdateJobIdException": { + "base": "AWS Import/Export cannot update the job", + "refs": { + } + }, + "UpdateJobInput": { + "base": "Input structure for the UpateJob operation.", + "refs": { + } + }, + "UpdateJobOutput": { + "base": "Output structure for the UpateJob operation.", + "refs": { + } + }, + "ValidateOnly": { + "base": "Validate the manifest and parameter values in the request but do not actually create a job.", + "refs": { + "CreateJobInput$ValidateOnly": null, + "UpdateJobInput$ValidateOnly": null + } + }, + "WarningMessage": { + "base": "An optional message notifying you of non-fatal issues with the job, such as use of an incompatible Amazon S3 bucket name.", + "refs": { + "CreateJobOutput$WarningMessage": null, + "UpdateJobOutput$WarningMessage": null + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/importexport/2010-06-01/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/importexport/2010-06-01/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/importexport/2010-06-01/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/importexport/2010-06-01/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,11 @@ +{ + "pagination": { + "ListJobs": { + "input_token": "Marker", + "output_token": "Jobs[-1].JobId", + "more_results": "IsTruncated", + "limit_key": "MaxJobs", + "result_key": "Jobs" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/inspector/2015-08-18/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/inspector/2015-08-18/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/inspector/2015-08-18/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/inspector/2015-08-18/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1426 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-08-18", + "endpointPrefix":"inspector", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"Amazon Inspector", + "signatureVersion":"v4", + "targetPrefix":"InspectorService" + }, + "operations":{ + "AddAttributesToFindings":{ + "name":"AddAttributesToFindings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddAttributesToFindingsRequest"}, + "output":{"shape":"AddAttributesToFindingsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "AttachAssessmentAndRulesPackage":{ + "name":"AttachAssessmentAndRulesPackage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachAssessmentAndRulesPackageRequest"}, + "output":{"shape":"AttachAssessmentAndRulesPackageResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "CreateApplication":{ + "name":"CreateApplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateApplicationRequest"}, + "output":{"shape":"CreateApplicationResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "CreateAssessment":{ + "name":"CreateAssessment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAssessmentRequest"}, + "output":{"shape":"CreateAssessmentResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "CreateResourceGroup":{ + "name":"CreateResourceGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateResourceGroupRequest"}, + "output":{"shape":"CreateResourceGroupResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"} + ] + }, + "DeleteApplication":{ + "name":"DeleteApplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteApplicationRequest"}, + "output":{"shape":"DeleteApplicationResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"OperationInProgressException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "DeleteAssessment":{ + "name":"DeleteAssessment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAssessmentRequest"}, + "output":{"shape":"DeleteAssessmentResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"OperationInProgressException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "DeleteRun":{ + "name":"DeleteRun", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRunRequest"}, + "output":{"shape":"DeleteRunResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "DescribeApplication":{ + "name":"DescribeApplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeApplicationRequest"}, + "output":{"shape":"DescribeApplicationResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "DescribeAssessment":{ + "name":"DescribeAssessment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAssessmentRequest"}, + "output":{"shape":"DescribeAssessmentResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "DescribeCrossAccountAccessRole":{ + "name":"DescribeCrossAccountAccessRole", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{"shape":"DescribeCrossAccountAccessRoleResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"AccessDeniedException"} + ] + }, + "DescribeFinding":{ + "name":"DescribeFinding", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFindingRequest"}, + "output":{"shape":"DescribeFindingResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "DescribeResourceGroup":{ + "name":"DescribeResourceGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeResourceGroupRequest"}, + "output":{"shape":"DescribeResourceGroupResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "DescribeRulesPackage":{ + "name":"DescribeRulesPackage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRulesPackageRequest"}, + "output":{"shape":"DescribeRulesPackageResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "DescribeRun":{ + "name":"DescribeRun", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRunRequest"}, + "output":{"shape":"DescribeRunResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "DetachAssessmentAndRulesPackage":{ + "name":"DetachAssessmentAndRulesPackage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachAssessmentAndRulesPackageRequest"}, + "output":{"shape":"DetachAssessmentAndRulesPackageResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "GetAssessmentTelemetry":{ + "name":"GetAssessmentTelemetry", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAssessmentTelemetryRequest"}, + "output":{"shape":"GetAssessmentTelemetryResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "ListApplications":{ + "name":"ListApplications", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListApplicationsRequest"}, + "output":{"shape":"ListApplicationsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"} + ] + }, + "ListAssessmentAgents":{ + "name":"ListAssessmentAgents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAssessmentAgentsRequest"}, + "output":{"shape":"ListAssessmentAgentsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "ListAssessments":{ + "name":"ListAssessments", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAssessmentsRequest"}, + "output":{"shape":"ListAssessmentsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "ListAttachedAssessments":{ + "name":"ListAttachedAssessments", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAttachedAssessmentsRequest"}, + "output":{"shape":"ListAttachedAssessmentsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "ListAttachedRulesPackages":{ + "name":"ListAttachedRulesPackages", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAttachedRulesPackagesRequest"}, + "output":{"shape":"ListAttachedRulesPackagesResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "ListFindings":{ + "name":"ListFindings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListFindingsRequest"}, + "output":{"shape":"ListFindingsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "ListRulesPackages":{ + "name":"ListRulesPackages", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRulesPackagesRequest"}, + "output":{"shape":"ListRulesPackagesResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"} + ] + }, + "ListRuns":{ + "name":"ListRuns", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRunsRequest"}, + "output":{"shape":"ListRunsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "LocalizeText":{ + "name":"LocalizeText", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"LocalizeTextRequest"}, + "output":{"shape":"LocalizeTextResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "PreviewAgentsForResourceGroup":{ + "name":"PreviewAgentsForResourceGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PreviewAgentsForResourceGroupRequest"}, + "output":{"shape":"PreviewAgentsForResourceGroupResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidCrossAccountRoleException"} + ] + }, + "RegisterCrossAccountAccessRole":{ + "name":"RegisterCrossAccountAccessRole", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterCrossAccountAccessRoleRequest"}, + "output":{"shape":"RegisterCrossAccountAccessRoleResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidCrossAccountRoleException"} + ] + }, + "RemoveAttributesFromFindings":{ + "name":"RemoveAttributesFromFindings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveAttributesFromFindingsRequest"}, + "output":{"shape":"RemoveAttributesFromFindingsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "RunAssessment":{ + "name":"RunAssessment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RunAssessmentRequest"}, + "output":{"shape":"RunAssessmentResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "SetTagsForResource":{ + "name":"SetTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetTagsForResourceRequest"}, + "output":{"shape":"SetTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "StartDataCollection":{ + "name":"StartDataCollection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartDataCollectionRequest"}, + "output":{"shape":"StartDataCollectionResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidCrossAccountRoleException"} + ] + }, + "StopDataCollection":{ + "name":"StopDataCollection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopDataCollectionRequest"}, + "output":{"shape":"StopDataCollectionResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "UpdateApplication":{ + "name":"UpdateApplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateApplicationRequest"}, + "output":{"shape":"UpdateApplicationResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + }, + "UpdateAssessment":{ + "name":"UpdateAssessment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAssessmentRequest"}, + "output":{"shape":"UpdateAssessmentResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ] + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "AddAttributesToFindingsRequest":{ + "type":"structure", + "required":[ + "findingArns", + "attributes" + ], + "members":{ + "findingArns":{"shape":"ArnList"}, + "attributes":{"shape":"AttributeList"} + } + }, + "AddAttributesToFindingsResponse":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + } + }, + "Agent":{ + "type":"structure", + "members":{ + "agentId":{"shape":"AgentId"}, + "assessmentArn":{"shape":"Arn"}, + "agentHealth":{"shape":"AgentHealth"}, + "agentHealthCode":{"shape":"AgentHealthCode"}, + "agentHealthDetails":{"shape":"AgentHealthDetails"}, + "autoScalingGroup":{"shape":"AutoScalingGroup"}, + "accountId":{"shape":"AwsAccount"}, + "telemetry":{"shape":"TelemetryList"} + } + }, + "AgentHealth":{"type":"string"}, + "AgentHealthCode":{"type":"string"}, + "AgentHealthDetails":{"type":"string"}, + "AgentHealthList":{ + "type":"list", + "member":{"shape":"AgentHealth"} + }, + "AgentId":{"type":"string"}, + "AgentList":{ + "type":"list", + "member":{"shape":"Agent"} + }, + "AgentPreview":{ + "type":"structure", + "members":{ + "agentId":{"shape":"AgentId"}, + "autoScalingGroup":{"shape":"AutoScalingGroup"} + } + }, + "AgentPreviewList":{ + "type":"list", + "member":{"shape":"AgentPreview"} + }, + "AgentsFilter":{ + "type":"structure", + "members":{ + "agentHealthList":{"shape":"AgentHealthList"} + } + }, + "Application":{ + "type":"structure", + "members":{ + "applicationArn":{"shape":"Arn"}, + "applicationName":{"shape":"Name"}, + "resourceGroupArn":{"shape":"Arn"} + } + }, + "ApplicationsFilter":{ + "type":"structure", + "members":{ + "applicationNamePatterns":{"shape":"NamePatternList"} + } + }, + "Arn":{"type":"string"}, + "ArnList":{ + "type":"list", + "member":{"shape":"Arn"} + }, + "Assessment":{ + "type":"structure", + "members":{ + "assessmentArn":{"shape":"Arn"}, + "assessmentName":{"shape":"Name"}, + "applicationArn":{"shape":"Arn"}, + "assessmentState":{"shape":"AssessmentState"}, + "failureMessage":{"shape":"FailureMessage"}, + "dataCollected":{"shape":"Bool"}, + "startTime":{"shape":"Timestamp"}, + "endTime":{"shape":"Timestamp"}, + "durationInSeconds":{"shape":"Duration"}, + "userAttributesForFindings":{"shape":"AttributeList"} + } + }, + "AssessmentState":{"type":"string"}, + "AssessmentStateList":{ + "type":"list", + "member":{"shape":"AssessmentState"} + }, + "AssessmentsFilter":{ + "type":"structure", + "members":{ + "assessmentNamePatterns":{"shape":"NamePatternList"}, + "assessmentStates":{"shape":"AssessmentStateList"}, + "dataCollected":{"shape":"Bool"}, + "startTimeRange":{"shape":"TimestampRange"}, + "endTimeRange":{"shape":"TimestampRange"}, + "durationRange":{"shape":"DurationRange"} + } + }, + "AttachAssessmentAndRulesPackageRequest":{ + "type":"structure", + "required":[ + "assessmentArn", + "rulesPackageArn" + ], + "members":{ + "assessmentArn":{"shape":"Arn"}, + "rulesPackageArn":{"shape":"Arn"} + } + }, + "AttachAssessmentAndRulesPackageResponse":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + } + }, + "Attribute":{ + "type":"structure", + "members":{ + "key":{"shape":"AttributeKey"}, + "value":{"shape":"AttributeValue"} + } + }, + "AttributeKey":{"type":"string"}, + "AttributeKeyList":{ + "type":"list", + "member":{"shape":"AttributeKey"} + }, + "AttributeList":{ + "type":"list", + "member":{"shape":"Attribute"} + }, + "AttributeValue":{"type":"string"}, + "AutoScalingGroup":{"type":"string"}, + "AwsAccount":{"type":"string"}, + "Bool":{"type":"boolean"}, + "CreateApplicationRequest":{ + "type":"structure", + "required":[ + "applicationName", + "resourceGroupArn" + ], + "members":{ + "applicationName":{"shape":"Name"}, + "resourceGroupArn":{"shape":"Arn"} + } + }, + "CreateApplicationResponse":{ + "type":"structure", + "members":{ + "applicationArn":{"shape":"Arn"} + } + }, + "CreateAssessmentRequest":{ + "type":"structure", + "required":[ + "applicationArn", + "assessmentName", + "durationInSeconds" + ], + "members":{ + "applicationArn":{"shape":"Arn"}, + "assessmentName":{"shape":"Name"}, + "durationInSeconds":{"shape":"Duration"}, + "userAttributesForFindings":{"shape":"AttributeList"} + } + }, + "CreateAssessmentResponse":{ + "type":"structure", + "members":{ + "assessmentArn":{"shape":"Arn"} + } + }, + "CreateResourceGroupRequest":{ + "type":"structure", + "required":["resourceGroupTags"], + "members":{ + "resourceGroupTags":{"shape":"ResourceGroupTags"} + } + }, + "CreateResourceGroupResponse":{ + "type":"structure", + "members":{ + "resourceGroupArn":{"shape":"Arn"} + } + }, + "DeleteApplicationRequest":{ + "type":"structure", + "required":["applicationArn"], + "members":{ + "applicationArn":{"shape":"Arn"} + } + }, + "DeleteApplicationResponse":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + } + }, + "DeleteAssessmentRequest":{ + "type":"structure", + "required":["assessmentArn"], + "members":{ + "assessmentArn":{"shape":"Arn"} + } + }, + "DeleteAssessmentResponse":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + } + }, + "DeleteRunRequest":{ + "type":"structure", + "required":["runArn"], + "members":{ + "runArn":{"shape":"Arn"} + } + }, + "DeleteRunResponse":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + } + }, + "DescribeApplicationRequest":{ + "type":"structure", + "required":["applicationArn"], + "members":{ + "applicationArn":{"shape":"Arn"} + } + }, + "DescribeApplicationResponse":{ + "type":"structure", + "members":{ + "application":{"shape":"Application"} + } + }, + "DescribeAssessmentRequest":{ + "type":"structure", + "required":["assessmentArn"], + "members":{ + "assessmentArn":{"shape":"Arn"} + } + }, + "DescribeAssessmentResponse":{ + "type":"structure", + "members":{ + "assessment":{"shape":"Assessment"} + } + }, + "DescribeCrossAccountAccessRoleResponse":{ + "type":"structure", + "members":{ + "roleArn":{"shape":"Arn"}, + "valid":{"shape":"Bool"} + } + }, + "DescribeFindingRequest":{ + "type":"structure", + "required":["findingArn"], + "members":{ + "findingArn":{"shape":"Arn"} + } + }, + "DescribeFindingResponse":{ + "type":"structure", + "members":{ + "finding":{"shape":"Finding"} + } + }, + "DescribeResourceGroupRequest":{ + "type":"structure", + "required":["resourceGroupArn"], + "members":{ + "resourceGroupArn":{"shape":"Arn"} + } + }, + "DescribeResourceGroupResponse":{ + "type":"structure", + "members":{ + "resourceGroup":{"shape":"ResourceGroup"} + } + }, + "DescribeRulesPackageRequest":{ + "type":"structure", + "required":["rulesPackageArn"], + "members":{ + "rulesPackageArn":{"shape":"Arn"} + } + }, + "DescribeRulesPackageResponse":{ + "type":"structure", + "members":{ + "rulesPackage":{"shape":"RulesPackage"} + } + }, + "DescribeRunRequest":{ + "type":"structure", + "required":["runArn"], + "members":{ + "runArn":{"shape":"Arn"} + } + }, + "DescribeRunResponse":{ + "type":"structure", + "members":{ + "run":{"shape":"Run"} + } + }, + "DetachAssessmentAndRulesPackageRequest":{ + "type":"structure", + "required":[ + "assessmentArn", + "rulesPackageArn" + ], + "members":{ + "assessmentArn":{"shape":"Arn"}, + "rulesPackageArn":{"shape":"Arn"} + } + }, + "DetachAssessmentAndRulesPackageResponse":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + } + }, + "Duration":{"type":"integer"}, + "DurationRange":{ + "type":"structure", + "members":{ + "minimum":{"shape":"Duration"}, + "maximum":{"shape":"Duration"} + } + }, + "FailureMessage":{"type":"string"}, + "Finding":{ + "type":"structure", + "members":{ + "findingArn":{"shape":"Arn"}, + "runArn":{"shape":"Arn"}, + "rulesPackageArn":{"shape":"Arn"}, + "ruleName":{"shape":"Name"}, + "agentId":{"shape":"AgentId"}, + "autoScalingGroup":{"shape":"AutoScalingGroup"}, + "severity":{"shape":"Severity"}, + "finding":{"shape":"LocalizedText"}, + "description":{"shape":"LocalizedText"}, + "recommendation":{"shape":"LocalizedText"}, + "attributes":{"shape":"AttributeList"}, + "userAttributes":{"shape":"AttributeList"} + } + }, + "FindingsFilter":{ + "type":"structure", + "members":{ + "rulesPackageArns":{"shape":"ArnList"}, + "ruleNames":{"shape":"NameList"}, + "severities":{"shape":"SeverityList"}, + "attributes":{"shape":"AttributeList"}, + "userAttributes":{"shape":"AttributeList"} + } + }, + "GetAssessmentTelemetryRequest":{ + "type":"structure", + "required":["assessmentArn"], + "members":{ + "assessmentArn":{"shape":"Arn"} + } + }, + "GetAssessmentTelemetryResponse":{ + "type":"structure", + "members":{ + "telemetry":{"shape":"TelemetryList"} + } + }, + "Integer":{"type":"integer"}, + "InternalException":{ + "type":"structure", + "members":{ + }, + "exception":true, + "fault":true + }, + "InvalidCrossAccountRoleException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidInputException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ListApplicationsRequest":{ + "type":"structure", + "members":{ + "filter":{"shape":"ApplicationsFilter"}, + "nextToken":{"shape":"PaginationToken"}, + "maxResults":{"shape":"Integer"} + } + }, + "ListApplicationsResponse":{ + "type":"structure", + "members":{ + "applicationArnList":{"shape":"ArnList"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListAssessmentAgentsRequest":{ + "type":"structure", + "required":["assessmentArn"], + "members":{ + "assessmentArn":{"shape":"Arn"}, + "filter":{"shape":"AgentsFilter"}, + "nextToken":{"shape":"PaginationToken"}, + "maxResults":{"shape":"Integer"} + } + }, + "ListAssessmentAgentsResponse":{ + "type":"structure", + "members":{ + "agentList":{"shape":"AgentList"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListAssessmentsRequest":{ + "type":"structure", + "members":{ + "applicationArns":{"shape":"ArnList"}, + "filter":{"shape":"AssessmentsFilter"}, + "nextToken":{"shape":"PaginationToken"}, + "maxResults":{"shape":"Integer"} + } + }, + "ListAssessmentsResponse":{ + "type":"structure", + "members":{ + "assessmentArnList":{"shape":"ArnList"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListAttachedAssessmentsRequest":{ + "type":"structure", + "required":["rulesPackageArn"], + "members":{ + "rulesPackageArn":{"shape":"Arn"}, + "filter":{"shape":"AssessmentsFilter"}, + "nextToken":{"shape":"PaginationToken"}, + "maxResults":{"shape":"Integer"} + } + }, + "ListAttachedAssessmentsResponse":{ + "type":"structure", + "members":{ + "assessmentArnList":{"shape":"ArnList"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListAttachedRulesPackagesRequest":{ + "type":"structure", + "required":["assessmentArn"], + "members":{ + "assessmentArn":{"shape":"Arn"}, + "nextToken":{"shape":"PaginationToken"}, + "maxResults":{"shape":"Integer"} + } + }, + "ListAttachedRulesPackagesResponse":{ + "type":"structure", + "members":{ + "rulesPackageArnList":{"shape":"ArnList"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListFindingsRequest":{ + "type":"structure", + "members":{ + "runArns":{"shape":"ArnList"}, + "filter":{"shape":"FindingsFilter"}, + "nextToken":{"shape":"PaginationToken"}, + "maxResults":{"shape":"Integer"} + } + }, + "ListFindingsResponse":{ + "type":"structure", + "members":{ + "findingArnList":{"shape":"ArnList"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListRulesPackagesRequest":{ + "type":"structure", + "members":{ + "nextToken":{"shape":"PaginationToken"}, + "maxResults":{"shape":"Integer"} + } + }, + "ListRulesPackagesResponse":{ + "type":"structure", + "members":{ + "rulesPackageArnList":{"shape":"ArnList"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListRunsRequest":{ + "type":"structure", + "members":{ + "assessmentArns":{"shape":"ArnList"}, + "filter":{"shape":"RunsFilter"}, + "nextToken":{"shape":"PaginationToken"}, + "maxResults":{"shape":"Integer"} + } + }, + "ListRunsResponse":{ + "type":"structure", + "members":{ + "runArnList":{"shape":"ArnList"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{"shape":"Arn"} + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tagList":{"shape":"TagList"} + } + }, + "Locale":{"type":"string"}, + "LocalizeTextRequest":{ + "type":"structure", + "required":[ + "localizedTexts", + "locale" + ], + "members":{ + "localizedTexts":{"shape":"LocalizedTextList"}, + "locale":{"shape":"Locale"} + } + }, + "LocalizeTextResponse":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"}, + "results":{"shape":"TextList"} + } + }, + "LocalizedFacility":{"type":"string"}, + "LocalizedText":{ + "type":"structure", + "members":{ + "key":{"shape":"LocalizedTextKey"}, + "parameters":{"shape":"ParameterList"} + } + }, + "LocalizedTextId":{"type":"string"}, + "LocalizedTextKey":{ + "type":"structure", + "members":{ + "facility":{"shape":"LocalizedFacility"}, + "id":{"shape":"LocalizedTextId"} + } + }, + "LocalizedTextList":{ + "type":"list", + "member":{"shape":"LocalizedText"} + }, + "Long":{"type":"long"}, + "Message":{"type":"string"}, + "MessageType":{"type":"string"}, + "MessageTypeTelemetry":{ + "type":"structure", + "members":{ + "messageType":{"shape":"MessageType"}, + "count":{"shape":"Long"}, + "dataSize":{"shape":"Long"} + } + }, + "MessageTypeTelemetryList":{ + "type":"list", + "member":{"shape":"MessageTypeTelemetry"} + }, + "Name":{"type":"string"}, + "NameList":{ + "type":"list", + "member":{"shape":"Name"} + }, + "NamePattern":{"type":"string"}, + "NamePatternList":{ + "type":"list", + "member":{"shape":"NamePattern"} + }, + "NoSuchEntityException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "OperationInProgressException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "PaginationToken":{"type":"string"}, + "Parameter":{ + "type":"structure", + "members":{ + "name":{"shape":"ParameterName"}, + "value":{"shape":"ParameterValue"} + } + }, + "ParameterList":{ + "type":"list", + "member":{"shape":"Parameter"} + }, + "ParameterName":{"type":"string"}, + "ParameterValue":{"type":"string"}, + "PreviewAgentsForResourceGroupRequest":{ + "type":"structure", + "required":["resourceGroupArn"], + "members":{ + "resourceGroupArn":{"shape":"Arn"}, + "nextToken":{"shape":"PaginationToken"}, + "maxResults":{"shape":"Integer"} + } + }, + "PreviewAgentsForResourceGroupResponse":{ + "type":"structure", + "members":{ + "agentPreviewList":{"shape":"AgentPreviewList"}, + "nextToken":{"shape":"PaginationToken"} + } + }, + "RegisterCrossAccountAccessRoleRequest":{ + "type":"structure", + "required":["roleArn"], + "members":{ + "roleArn":{"shape":"Arn"} + } + }, + "RegisterCrossAccountAccessRoleResponse":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + } + }, + "RemoveAttributesFromFindingsRequest":{ + "type":"structure", + "required":[ + "findingArns", + "attributeKeys" + ], + "members":{ + "findingArns":{"shape":"ArnList"}, + "attributeKeys":{"shape":"AttributeKeyList"} + } + }, + "RemoveAttributesFromFindingsResponse":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + } + }, + "ResourceGroup":{ + "type":"structure", + "members":{ + "resourceGroupArn":{"shape":"Arn"}, + "resourceGroupTags":{"shape":"ResourceGroupTags"} + } + }, + "ResourceGroupTags":{"type":"string"}, + "RulesPackage":{ + "type":"structure", + "members":{ + "rulesPackageArn":{"shape":"Arn"}, + "rulesPackageName":{"shape":"Name"}, + "version":{"shape":"Version"}, + "provider":{"shape":"Name"}, + "description":{"shape":"LocalizedText"} + } + }, + "Run":{ + "type":"structure", + "members":{ + "runArn":{"shape":"Arn"}, + "runName":{"shape":"Name"}, + "assessmentArn":{"shape":"Arn"}, + "runState":{"shape":"RunState"}, + "rulesPackages":{"shape":"ArnList"}, + "creationTime":{"shape":"Timestamp"}, + "completionTime":{"shape":"Timestamp"} + } + }, + "RunAssessmentRequest":{ + "type":"structure", + "required":[ + "assessmentArn", + "runName" + ], + "members":{ + "assessmentArn":{"shape":"Arn"}, + "runName":{"shape":"Name"} + } + }, + "RunAssessmentResponse":{ + "type":"structure", + "members":{ + "runArn":{"shape":"Arn"} + } + }, + "RunState":{"type":"string"}, + "RunStateList":{ + "type":"list", + "member":{"shape":"RunState"} + }, + "RunsFilter":{ + "type":"structure", + "members":{ + "runNamePatterns":{"shape":"NamePatternList"}, + "runStates":{"shape":"RunStateList"}, + "rulesPackages":{"shape":"ArnList"}, + "creationTime":{"shape":"TimestampRange"}, + "completionTime":{"shape":"TimestampRange"} + } + }, + "SetTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{"shape":"Arn"}, + "tags":{"shape":"TagList"} + } + }, + "SetTagsForResourceResponse":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + } + }, + "Severity":{"type":"string"}, + "SeverityList":{ + "type":"list", + "member":{"shape":"Severity"} + }, + "StartDataCollectionRequest":{ + "type":"structure", + "required":["assessmentArn"], + "members":{ + "assessmentArn":{"shape":"Arn"} + } + }, + "StartDataCollectionResponse":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + } + }, + "StopDataCollectionRequest":{ + "type":"structure", + "required":["assessmentArn"], + "members":{ + "assessmentArn":{"shape":"Arn"} + } + }, + "StopDataCollectionResponse":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + } + }, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{"type":"string"}, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "TagValue":{"type":"string"}, + "Telemetry":{ + "type":"structure", + "members":{ + "status":{"shape":"TelemetryStatus"}, + "messageTypeTelemetries":{"shape":"MessageTypeTelemetryList"} + } + }, + "TelemetryList":{ + "type":"list", + "member":{"shape":"Telemetry"} + }, + "TelemetryStatus":{"type":"string"}, + "Text":{"type":"string"}, + "TextList":{ + "type":"list", + "member":{"shape":"Text"} + }, + "Timestamp":{"type":"timestamp"}, + "TimestampRange":{ + "type":"structure", + "members":{ + "minimum":{"shape":"Timestamp"}, + "maximum":{"shape":"Timestamp"} + } + }, + "UpdateApplicationRequest":{ + "type":"structure", + "required":[ + "applicationArn", + "applicationName", + "resourceGroupArn" + ], + "members":{ + "applicationArn":{"shape":"Arn"}, + "applicationName":{"shape":"Name"}, + "resourceGroupArn":{"shape":"Arn"} + } + }, + "UpdateApplicationResponse":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + } + }, + "UpdateAssessmentRequest":{ + "type":"structure", + "required":[ + "assessmentArn", + "assessmentName", + "durationInSeconds" + ], + "members":{ + "assessmentArn":{"shape":"Arn"}, + "assessmentName":{"shape":"Name"}, + "durationInSeconds":{"shape":"Duration"} + } + }, + "UpdateAssessmentResponse":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + } + }, + "Version":{"type":"string"} + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/inspector/2015-08-18/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/inspector/2015-08-18/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/inspector/2015-08-18/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/inspector/2015-08-18/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1016 @@ +{ + "version": "2.0", + "service": "Amazon Inspector

    Amazon Inspector enables you to analyze the behavior of the applications you run in AWS and to identify potential security issues. For more information, see Amazon Inspector User Guide.

    ", + "operations": { + "AddAttributesToFindings": "

    Assigns attributes (key and value pair) to the findings specified by the findings' ARNs.

    ", + "AttachAssessmentAndRulesPackage": "

    Attaches the rules package specified by the rules package ARN to the assessment specified by the assessment ARN.

    ", + "CreateApplication": "

    Creates a new application using the resource group ARN generated by CreateResourceGroup. You can create up to 50 applications per AWS account. You can run up to 500 concurrent agents per AWS account. For more information, see Inspector Applications.

    ", + "CreateAssessment": "

    Creates an assessment for the application specified by the application ARN. You can create up to 500 assessments per AWS account.

    ", + "CreateResourceGroup": "

    Creates a resource group using the specified set of tags (key and value pairs) that are used to select the EC2 instances to be included in an Inspector application. The created resource group is then used to create an Inspector application.

    ", + "DeleteApplication": "

    Deletes the application specified by the application ARN.

    ", + "DeleteAssessment": "

    Deletes the assessment specified by the assessment ARN.

    ", + "DeleteRun": "

    Deletes the assessment run specified by the run ARN.

    ", + "DescribeApplication": "

    Describes the application specified by the application ARN.

    ", + "DescribeAssessment": "

    Describes the assessment specified by the assessment ARN.

    ", + "DescribeCrossAccountAccessRole": "

    Describes the IAM role that enables Inspector to access your AWS account.

    ", + "DescribeFinding": "

    Describes the finding specified by the finding ARN.

    ", + "DescribeResourceGroup": "

    Describes the resource group specified by the resource group ARN.

    ", + "DescribeRulesPackage": "

    Describes the rules package specified by the rules package ARN.

    ", + "DescribeRun": "

    Describes the assessment run specified by the run ARN.

    ", + "DetachAssessmentAndRulesPackage": "

    Detaches the rules package specified by the rules package ARN from the assessment specified by the assessment ARN.

    ", + "GetAssessmentTelemetry": "

    Returns the metadata about the telemetry (application behavioral data) for the assessment specified by the assessment ARN.

    ", + "ListApplications": "

    Lists the ARNs of the applications within this AWS account. For more information about applications, see Inspector Applications.

    ", + "ListAssessmentAgents": "

    Lists the agents of the assessment specified by the assessment ARN.

    ", + "ListAssessments": "

    Lists the assessments corresponding to applications specified by the applications' ARNs.

    ", + "ListAttachedAssessments": "

    Lists the assessments attached to the rules package specified by the rules package ARN.

    ", + "ListAttachedRulesPackages": "

    Lists the rules packages attached to the assessment specified by the assessment ARN.

    ", + "ListFindings": "

    Lists findings generated by the assessment run specified by the run ARNs.

    ", + "ListRulesPackages": "

    Lists all available Inspector rules packages.

    ", + "ListRuns": "

    Lists the assessment runs associated with the assessments specified by the assessment ARNs.

    ", + "ListTagsForResource": "

    Lists all tags associated with a resource.

    ", + "LocalizeText": "

    Translates a textual identifier into a user-readable text in a specified locale.

    ", + "PreviewAgentsForResourceGroup": "

    Previews the agents installed on the EC2 instances that are included in the application created with the specified resource group.

    ", + "RegisterCrossAccountAccessRole": "

    Register the role that Inspector uses to list your EC2 instances during the assessment.

    ", + "RemoveAttributesFromFindings": "

    Removes the entire attribute (key and value pair) from the findings specified by the finding ARNs where an attribute with the specified key exists.

    ", + "RunAssessment": "

    Starts the analysis of the application’s behavior against selected rule packages for the assessment specified by the assessment ARN.

    ", + "SetTagsForResource": "

    Sets tags (key and value pairs) to the assessment specified by the assessment ARN.

    ", + "StartDataCollection": "

    Starts data collection for the assessment specified by the assessment ARN. For this API to function properly, you must not exceed the limit of running up to 500 concurrent agents per AWS account.

    ", + "StopDataCollection": "

    Stop data collection for the assessment specified by the assessment ARN.

    ", + "UpdateApplication": "

    Updates application specified by the application ARN.

    ", + "UpdateAssessment": "

    Updates the assessment specified by the assessment ARN.

    " + }, + "shapes": { + "AccessDeniedException": { + "base": null, + "refs": { + } + }, + "AddAttributesToFindingsRequest": { + "base": null, + "refs": { + } + }, + "AddAttributesToFindingsResponse": { + "base": null, + "refs": { + } + }, + "Agent": { + "base": "

    Contains information about an Inspector agent. This data type is used as a response element in the ListAssessmentAgents action.

    ", + "refs": { + "AgentList$member": null + } + }, + "AgentHealth": { + "base": null, + "refs": { + "Agent$agentHealth": "

    The current health state of the agent. Values can be set to HEALTHY or UNHEALTHY.

    ", + "AgentHealthList$member": null + } + }, + "AgentHealthCode": { + "base": null, + "refs": { + "Agent$agentHealthCode": "

    The detailed health state of the agent. Values can be set to RUNNING, HEALTHY, UNHEALTHY, UNKNOWN, BLACKLISTED, SHUTDOWN, THROTTLED.

    " + } + }, + "AgentHealthDetails": { + "base": null, + "refs": { + "Agent$agentHealthDetails": "

    The description for the agent health code.

    " + } + }, + "AgentHealthList": { + "base": null, + "refs": { + "AgentsFilter$agentHealthList": "

    For a record to match a filter, the value specified for this data type property must be the exact match of the value of the agentHealth property of the Agent data type.

    " + } + }, + "AgentId": { + "base": null, + "refs": { + "Agent$agentId": "

    The EC2 instance ID where the agent is installed.

    ", + "AgentPreview$agentId": "

    The id of the EC2 instance where the agent is intalled.

    ", + "Finding$agentId": "

    The EC2 instance ID where the agent is installed that is used during the assessment that generates the finding.

    " + } + }, + "AgentList": { + "base": null, + "refs": { + "ListAssessmentAgentsResponse$agentList": "

    A list of ARNs specifying the agents returned by the action.

    " + } + }, + "AgentPreview": { + "base": "

    This data type is used as a response element in the PreviewAgentsForResourceGroup action.

    ", + "refs": { + "AgentPreviewList$member": null + } + }, + "AgentPreviewList": { + "base": null, + "refs": { + "PreviewAgentsForResourceGroupResponse$agentPreviewList": "

    The resulting list of agents.

    " + } + }, + "AgentsFilter": { + "base": "

    This data type is used as a response element in the ListAssessmentAgents action.

    ", + "refs": { + "ListAssessmentAgentsRequest$filter": "

    You can use this parameter to specify a subset of data to be included in the action's response.

    For a record to match a filter, all specified filter attributes must match. When multiple values are specified for a filter attribute, any of the values can match.

    " + } + }, + "Application": { + "base": "

    Contains information about an Inspector application.

    This data type is used as the response element in the DescribeApplication action.

    ", + "refs": { + "DescribeApplicationResponse$application": "

    Information about the application.

    " + } + }, + "ApplicationsFilter": { + "base": "

    This data type is used as the request parameter in the ListApplications action.

    ", + "refs": { + "ListApplicationsRequest$filter": "

    You can use this parameter to specify a subset of data to be included in the action's response.

    For a record to match a filter, all specified filter attributes must match. When multiple values are specified for a filter attribute, any of the values can match.

    " + } + }, + "Arn": { + "base": null, + "refs": { + "Agent$assessmentArn": "

    The ARN of the assessment that is associated with the agent.

    ", + "Application$applicationArn": "

    The ARN specifying the Inspector application.

    ", + "Application$resourceGroupArn": "

    The ARN specifying the resource group that is associated with the application.

    ", + "ArnList$member": null, + "Assessment$assessmentArn": "

    The ARN of the assessment.

    ", + "Assessment$applicationArn": "

    The ARN of the application that corresponds to this assessment.

    ", + "AttachAssessmentAndRulesPackageRequest$assessmentArn": "

    The ARN specifying the assessment to which you want to attach a rules package.

    ", + "AttachAssessmentAndRulesPackageRequest$rulesPackageArn": "

    The ARN specifying the rules package that you want to attach to the assessment.

    ", + "CreateApplicationRequest$resourceGroupArn": "

    The ARN specifying the resource group that is used to create the application.

    ", + "CreateApplicationResponse$applicationArn": "

    The ARN specifying the application that is created.

    ", + "CreateAssessmentRequest$applicationArn": "

    The ARN specifying the application for which you want to create an assessment.

    ", + "CreateAssessmentResponse$assessmentArn": "

    The ARN specifying the assessment that is created.

    ", + "CreateResourceGroupResponse$resourceGroupArn": "

    The ARN specifying the resource group that is created.

    ", + "DeleteApplicationRequest$applicationArn": "

    The ARN specifying the application that you want to delete.

    ", + "DeleteAssessmentRequest$assessmentArn": "

    The ARN specifying the assessment that you want to delete.

    ", + "DeleteRunRequest$runArn": "

    The ARN specifying the assessment run that you want to delete.

    ", + "DescribeApplicationRequest$applicationArn": "

    The ARN specifying the application that you want to describe.

    ", + "DescribeAssessmentRequest$assessmentArn": "

    The ARN specifying the assessment that you want to describe.

    ", + "DescribeCrossAccountAccessRoleResponse$roleArn": "

    The ARN specifying the IAM role that Inspector uses to access your AWS account.

    ", + "DescribeFindingRequest$findingArn": "

    The ARN specifying the finding that you want to describe.

    ", + "DescribeResourceGroupRequest$resourceGroupArn": "

    The ARN specifying the resource group that you want to describe.

    ", + "DescribeRulesPackageRequest$rulesPackageArn": "

    The ARN specifying the rules package that you want to describe.

    ", + "DescribeRunRequest$runArn": "

    The ARN specifying the assessment run that you want to describe.

    ", + "DetachAssessmentAndRulesPackageRequest$assessmentArn": "

    The ARN specifying the assessment from which you want to detach a rules package.

    ", + "DetachAssessmentAndRulesPackageRequest$rulesPackageArn": "

    The ARN specifying the rules package that you want to detach from the assessment.

    ", + "Finding$findingArn": "

    The ARN specifying the finding.

    ", + "Finding$runArn": "

    The ARN of the assessment run that generated the finding.

    ", + "Finding$rulesPackageArn": "

    The ARN of the rules package that is used to generate the finding.

    ", + "GetAssessmentTelemetryRequest$assessmentArn": "

    The ARN specifying the assessment the telemetry of which you want to obtain.

    ", + "ListAssessmentAgentsRequest$assessmentArn": "

    The ARN specifying the assessment whose agents you want to list.

    ", + "ListAttachedAssessmentsRequest$rulesPackageArn": "

    The ARN specifying the rules package whose assessments you want to list.

    ", + "ListAttachedRulesPackagesRequest$assessmentArn": "

    The ARN specifying the assessment whose rules packages you want to list.

    ", + "ListTagsForResourceRequest$resourceArn": "

    The ARN specifying the resource whose tags you want to list.

    ", + "PreviewAgentsForResourceGroupRequest$resourceGroupArn": "

    The ARN of the resource group that is used to create an application.

    ", + "RegisterCrossAccountAccessRoleRequest$roleArn": "The ARN of the IAM role that Inspector uses to list your EC2 instances during the assessment.", + "ResourceGroup$resourceGroupArn": "

    The ARN of the resource group.

    ", + "RulesPackage$rulesPackageArn": "

    The ARN of the rules package.

    ", + "Run$runArn": "

    The ARN of the run.

    ", + "Run$assessmentArn": "

    The ARN of the assessment that is associated with the run.

    ", + "RunAssessmentRequest$assessmentArn": "

    The ARN of the assessment that you want to run.

    ", + "RunAssessmentResponse$runArn": "

    The ARN specifying the run of the assessment.

    ", + "SetTagsForResourceRequest$resourceArn": "

    The ARN of the assessment that you want to set tags to.

    ", + "StartDataCollectionRequest$assessmentArn": "

    The ARN of the assessment for which you want to start the data collection process.

    ", + "StopDataCollectionRequest$assessmentArn": "

    The ARN of the assessment for which you want to stop the data collection process.

    ", + "UpdateApplicationRequest$applicationArn": "

    Application ARN that you want to update.

    ", + "UpdateApplicationRequest$resourceGroupArn": "

    The resource group ARN that you want to update.

    ", + "UpdateAssessmentRequest$assessmentArn": "

    Asessment ARN that you want to update.

    " + } + }, + "ArnList": { + "base": null, + "refs": { + "AddAttributesToFindingsRequest$findingArns": "

    The ARNs specifying the findings that you want to assign attributes to.

    ", + "FindingsFilter$rulesPackageArns": "

    For a record to match a filter, the value specified for this data type property must be the exact match of the value of the rulesPackageArn property of the Finding data type.

    ", + "ListApplicationsResponse$applicationArnList": "

    A list of ARNs specifying the applications returned by the action.

    ", + "ListAssessmentsRequest$applicationArns": "

    A list of ARNs specifying the applications the assessments of which you want to list.

    ", + "ListAssessmentsResponse$assessmentArnList": "

    A list of ARNs specifying the assessments returned by the action.

    ", + "ListAttachedAssessmentsResponse$assessmentArnList": "

    A list of ARNs specifying the assessments returned by the action.

    ", + "ListAttachedRulesPackagesResponse$rulesPackageArnList": "

    A list of ARNs specifying the rules packages returned by the action.

    ", + "ListFindingsRequest$runArns": "

    The ARNs of the assessment runs that generate the findings that you want to list.

    ", + "ListFindingsResponse$findingArnList": "

    A list of ARNs specifying the findings returned by the action.

    ", + "ListRulesPackagesResponse$rulesPackageArnList": "

    The list of ARNs specifying the rules packages returned by the action.

    ", + "ListRunsRequest$assessmentArns": "

    The ARNs specifying the assessments whose runs you want to list.

    ", + "ListRunsResponse$runArnList": "

    A list of ARNs specifying the assessment runs returned by the action.

    ", + "RemoveAttributesFromFindingsRequest$findingArns": "

    The ARNs specifying the findings that you want to remove attributes from.

    ", + "Run$rulesPackages": "

    Rules packages selected for the run of the assessment.

    ", + "RunsFilter$rulesPackages": "

    For a record to match a filter, the value specified for this data type property must match a list of values of the rulesPackages property of the Run data type.

    " + } + }, + "Assessment": { + "base": "

    Contains information about an Inspector assessment.

    This data type is used as the response element in the DescribeAssessment action.

    ", + "refs": { + "DescribeAssessmentResponse$assessment": "

    Information about the assessment.

    " + } + }, + "AssessmentState": { + "base": null, + "refs": { + "Assessment$assessmentState": "

    The state of the assessment. Values can be set to Created, Collecting Data, Stopping, and Completed.

    ", + "AssessmentStateList$member": null + } + }, + "AssessmentStateList": { + "base": null, + "refs": { + "AssessmentsFilter$assessmentStates": "

    For a record to match a filter, the value specified for this data type property must be the exact match of the value of the assessmentState property of the Assessment data type.

    " + } + }, + "AssessmentsFilter": { + "base": "

    This data type is used as the request parameter in the ListAssessments and ListAttachedAssessments actions.

    ", + "refs": { + "ListAssessmentsRequest$filter": "

    You can use this parameter to specify a subset of data to be included in the action's response.

    For a record to match a filter, all specified filter attributes must match. When multiple values are specified for a filter attribute, any of the values can match.

    ", + "ListAttachedAssessmentsRequest$filter": "

    You can use this parameter to specify a subset of data to be included in the action's response.

    For a record to match a filter, all specified filter attributes must match. When multiple values are specified for a filter attribute, any of the values can match.

    " + } + }, + "AttachAssessmentAndRulesPackageRequest": { + "base": null, + "refs": { + } + }, + "AttachAssessmentAndRulesPackageResponse": { + "base": null, + "refs": { + } + }, + "Attribute": { + "base": "

    This data type is used as a response element in the AddAttributesToFindings action and a request parameter in the CreateAssessment action.

    ", + "refs": { + "AttributeList$member": null + } + }, + "AttributeKey": { + "base": null, + "refs": { + "Attribute$key": "

    The attribute key.

    ", + "AttributeKeyList$member": null + } + }, + "AttributeKeyList": { + "base": null, + "refs": { + "RemoveAttributesFromFindingsRequest$attributeKeys": "

    The array of attribute keys that you want to remove from specified findings.

    " + } + }, + "AttributeList": { + "base": null, + "refs": { + "AddAttributesToFindingsRequest$attributes": "

    The array of attributes that you want to assign to specified findings.

    ", + "Assessment$userAttributesForFindings": "

    The user-defined attributes that are assigned to every generated finding.

    ", + "CreateAssessmentRequest$userAttributesForFindings": "

    The user-defined attributes that are assigned to every finding generated by running this assessment.

    ", + "Finding$attributes": "

    The system-defined attributes for the finding.

    ", + "Finding$userAttributes": "

    The user-defined attributes that are assigned to the finding.

    ", + "FindingsFilter$attributes": "

    For a record to match a filter, the value specified for this data type property must be the exact match of the value of the attributes property of the Finding data type.

    ", + "FindingsFilter$userAttributes": "

    For a record to match a filter, the value specified for this data type property must be the exact match of the value of the userAttributes property of the Finding data type.

    " + } + }, + "AttributeValue": { + "base": null, + "refs": { + "Attribute$value": "

    The value assigned to the attribute key.

    " + } + }, + "AutoScalingGroup": { + "base": null, + "refs": { + "Agent$autoScalingGroup": "

    This data type property is currently not used.

    ", + "AgentPreview$autoScalingGroup": "

    The autoscaling group for the EC2 instance where the agent is installed.

    ", + "Finding$autoScalingGroup": "

    The autoscaling group of the EC2 instance where the agent is installed that is used during the assessment that generates the finding.

    " + } + }, + "AwsAccount": { + "base": null, + "refs": { + "Agent$accountId": "

    AWS account of the EC2 instance where the agent is installed.

    " + } + }, + "Bool": { + "base": null, + "refs": { + "Assessment$dataCollected": "

    Boolean value (true or false) specifying whether the data collection process is completed.

    ", + "AssessmentsFilter$dataCollected": "

    For a record to match a filter, the value specified for this data type property must be the exact match of the value of the dataCollected property of the Assessment data type.

    ", + "DescribeCrossAccountAccessRoleResponse$valid": "

    A Boolean value that specifies whether the IAM role has the necessary policies attached to enable Inspector to access your AWS account.

    " + } + }, + "CreateApplicationRequest": { + "base": null, + "refs": { + } + }, + "CreateApplicationResponse": { + "base": null, + "refs": { + } + }, + "CreateAssessmentRequest": { + "base": null, + "refs": { + } + }, + "CreateAssessmentResponse": { + "base": null, + "refs": { + } + }, + "CreateResourceGroupRequest": { + "base": null, + "refs": { + } + }, + "CreateResourceGroupResponse": { + "base": null, + "refs": { + } + }, + "DeleteApplicationRequest": { + "base": null, + "refs": { + } + }, + "DeleteApplicationResponse": { + "base": null, + "refs": { + } + }, + "DeleteAssessmentRequest": { + "base": null, + "refs": { + } + }, + "DeleteAssessmentResponse": { + "base": null, + "refs": { + } + }, + "DeleteRunRequest": { + "base": null, + "refs": { + } + }, + "DeleteRunResponse": { + "base": null, + "refs": { + } + }, + "DescribeApplicationRequest": { + "base": null, + "refs": { + } + }, + "DescribeApplicationResponse": { + "base": null, + "refs": { + } + }, + "DescribeAssessmentRequest": { + "base": null, + "refs": { + } + }, + "DescribeAssessmentResponse": { + "base": null, + "refs": { + } + }, + "DescribeCrossAccountAccessRoleResponse": { + "base": null, + "refs": { + } + }, + "DescribeFindingRequest": { + "base": null, + "refs": { + } + }, + "DescribeFindingResponse": { + "base": null, + "refs": { + } + }, + "DescribeResourceGroupRequest": { + "base": null, + "refs": { + } + }, + "DescribeResourceGroupResponse": { + "base": null, + "refs": { + } + }, + "DescribeRulesPackageRequest": { + "base": null, + "refs": { + } + }, + "DescribeRulesPackageResponse": { + "base": null, + "refs": { + } + }, + "DescribeRunRequest": { + "base": null, + "refs": { + } + }, + "DescribeRunResponse": { + "base": null, + "refs": { + } + }, + "DetachAssessmentAndRulesPackageRequest": { + "base": null, + "refs": { + } + }, + "DetachAssessmentAndRulesPackageResponse": { + "base": null, + "refs": { + } + }, + "Duration": { + "base": null, + "refs": { + "Assessment$durationInSeconds": "

    The assessment duration in seconds. The default value is 3600 seconds (one hour). The maximum value is 86400 seconds (one day).

    ", + "CreateAssessmentRequest$durationInSeconds": "

    The duration of the assessment in seconds. The default value is 3600 seconds (one hour). The maximum value is 86400 seconds (one day).

    ", + "DurationRange$minimum": "

    The minimum value of the duration range. Must be greater than zero.

    ", + "DurationRange$maximum": "

    The maximum value of the duration range. Must be less than or equal to 604800 seconds (1 week).

    ", + "UpdateAssessmentRequest$durationInSeconds": "

    Assessment duration in seconds that you want to update. The default value is 3600 seconds (one hour). The maximum value is 86400 seconds (one day).

    " + } + }, + "DurationRange": { + "base": "

    This data type is used in the AssessmentsFilter data type.

    ", + "refs": { + "AssessmentsFilter$durationRange": "

    For a record to match a filter, the value specified for this data type property must inclusively match any value between the specified minimum and maximum values of the durationInSeconds property of the Assessment data type.

    " + } + }, + "FailureMessage": { + "base": null, + "refs": { + "Assessment$failureMessage": "

    This data type property is not currently used.

    " + } + }, + "Finding": { + "base": "

    Contains information about an Inspector finding.

    This data type is used as the response element in the DescribeFinding action.

    ", + "refs": { + "DescribeFindingResponse$finding": "

    Information about the finding.

    " + } + }, + "FindingsFilter": { + "base": "

    This data type is used as a request parameter in the ListFindings action.

    ", + "refs": { + "ListFindingsRequest$filter": "

    You can use this parameter to specify a subset of data to be included in the action's response.

    For a record to match a filter, all specified filter attributes must match. When multiple values are specified for a filter attribute, any of the values can match.

    " + } + }, + "GetAssessmentTelemetryRequest": { + "base": null, + "refs": { + } + }, + "GetAssessmentTelemetryResponse": { + "base": null, + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "ListApplicationsRequest$maxResults": "

    You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500.

    ", + "ListAssessmentAgentsRequest$maxResults": "

    You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500.

    ", + "ListAssessmentsRequest$maxResults": "

    You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500.

    ", + "ListAttachedAssessmentsRequest$maxResults": "

    You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500.

    ", + "ListAttachedRulesPackagesRequest$maxResults": "

    You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500.

    ", + "ListFindingsRequest$maxResults": "

    You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500.

    ", + "ListRulesPackagesRequest$maxResults": "

    You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500.

    ", + "ListRunsRequest$maxResults": "

    You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500.

    ", + "PreviewAgentsForResourceGroupRequest$maxResults": "

    You can use this parameter to indicate the maximum number of items you want in the response. The default value is 10. The maximum value is 500.

    " + } + }, + "InternalException": { + "base": null, + "refs": { + } + }, + "InvalidCrossAccountRoleException": { + "base": null, + "refs": { + } + }, + "InvalidInputException": { + "base": null, + "refs": { + } + }, + "ListApplicationsRequest": { + "base": null, + "refs": { + } + }, + "ListApplicationsResponse": { + "base": null, + "refs": { + } + }, + "ListAssessmentAgentsRequest": { + "base": null, + "refs": { + } + }, + "ListAssessmentAgentsResponse": { + "base": null, + "refs": { + } + }, + "ListAssessmentsRequest": { + "base": null, + "refs": { + } + }, + "ListAssessmentsResponse": { + "base": null, + "refs": { + } + }, + "ListAttachedAssessmentsRequest": { + "base": null, + "refs": { + } + }, + "ListAttachedAssessmentsResponse": { + "base": null, + "refs": { + } + }, + "ListAttachedRulesPackagesRequest": { + "base": null, + "refs": { + } + }, + "ListAttachedRulesPackagesResponse": { + "base": null, + "refs": { + } + }, + "ListFindingsRequest": { + "base": null, + "refs": { + } + }, + "ListFindingsResponse": { + "base": null, + "refs": { + } + }, + "ListRulesPackagesRequest": { + "base": null, + "refs": { + } + }, + "ListRulesPackagesResponse": { + "base": null, + "refs": { + } + }, + "ListRunsRequest": { + "base": null, + "refs": { + } + }, + "ListRunsResponse": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceRequest": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceResponse": { + "base": null, + "refs": { + } + }, + "Locale": { + "base": null, + "refs": { + "LocalizeTextRequest$locale": "

    The locale that you want to translate a textual identifier into.

    " + } + }, + "LocalizeTextRequest": { + "base": null, + "refs": { + } + }, + "LocalizeTextResponse": { + "base": null, + "refs": { + } + }, + "LocalizedFacility": { + "base": null, + "refs": { + "LocalizedTextKey$facility": "

    The module response source of the text.

    " + } + }, + "LocalizedText": { + "base": "

    The textual identifier. This data type is used as the request parameter in the LocalizeText action.

    ", + "refs": { + "Finding$finding": "

    A short description that identifies the finding.

    ", + "Finding$description": "

    The description of the finding.

    ", + "Finding$recommendation": "

    The recommendation for the finding.

    ", + "LocalizedTextList$member": null, + "RulesPackage$description": "

    The description of the rules package.

    " + } + }, + "LocalizedTextId": { + "base": null, + "refs": { + "LocalizedTextKey$id": "

    Part of the module response source of the text.

    " + } + }, + "LocalizedTextKey": { + "base": "

    This data type is used in the LocalizedText data type.

    ", + "refs": { + "LocalizedText$key": "

    The facility and id properties of the LocalizedTextKey data type.

    " + } + }, + "LocalizedTextList": { + "base": null, + "refs": { + "LocalizeTextRequest$localizedTexts": "

    A list of textual identifiers.

    " + } + }, + "Long": { + "base": null, + "refs": { + "MessageTypeTelemetry$count": "

    The number of times that the behavioral data is collected by the agent during an assessment.

    ", + "MessageTypeTelemetry$dataSize": "

    The total size of the behavioral data that is collected by the agent during an assessment.

    " + } + }, + "Message": { + "base": null, + "refs": { + "AddAttributesToFindingsResponse$message": "

    Confirmation details of the action performed.

    ", + "AttachAssessmentAndRulesPackageResponse$message": "

    Confirmation details of the action performed.

    ", + "DeleteApplicationResponse$message": "

    Confirmation details of the action performed.

    ", + "DeleteAssessmentResponse$message": "

    Confirmation details of the action performed.

    ", + "DeleteRunResponse$message": "

    Confirmation details of the action performed.

    ", + "DetachAssessmentAndRulesPackageResponse$message": "

    Confirmation details of the action performed.

    ", + "LocalizeTextResponse$message": "

    Confirmation details of the action performed.

    ", + "RegisterCrossAccountAccessRoleResponse$message": "

    Confirmation details of the action performed.

    ", + "RemoveAttributesFromFindingsResponse$message": "

    Confirmation details of the action performed.

    ", + "SetTagsForResourceResponse$message": "

    Confirmation details of the action performed.

    ", + "StartDataCollectionResponse$message": "

    Confirmation details of the action performed.

    ", + "StopDataCollectionResponse$message": "

    Confirmation details of the action performed.

    ", + "UpdateApplicationResponse$message": "

    Confirmation details of the action performed.

    ", + "UpdateAssessmentResponse$message": "

    Confirmation details of the action performed.

    " + } + }, + "MessageType": { + "base": null, + "refs": { + "MessageTypeTelemetry$messageType": "

    A specific type of behavioral data that is collected by the agent.

    " + } + }, + "MessageTypeTelemetry": { + "base": "

    This data type is used in the Telemetry data type.

    This is metadata about the behavioral data collected by the Inspector agent on your EC2 instances during an assessment and passed to the Inspector service for analysis.

    ", + "refs": { + "MessageTypeTelemetryList$member": null + } + }, + "MessageTypeTelemetryList": { + "base": null, + "refs": { + "Telemetry$messageTypeTelemetries": "

    Counts of individual metrics received by Inspector from the agent.

    " + } + }, + "Name": { + "base": null, + "refs": { + "Application$applicationName": "

    The name of the Inspector application.

    ", + "Assessment$assessmentName": "

    The name of the assessment.

    ", + "CreateApplicationRequest$applicationName": "

    The user-defined name identifying the application that you want to create. The name must be unique within the AWS account.

    ", + "CreateAssessmentRequest$assessmentName": "

    The user-defined name identifying the assessment that you want to create. You can create several assessments for an application. The names of the assessments corresponding to a particular application must be unique.

    ", + "Finding$ruleName": "

    The rule name that is used to generate the finding.

    ", + "NameList$member": null, + "RulesPackage$rulesPackageName": "

    The name of the rules package.

    ", + "RulesPackage$provider": "

    The provider of the rules package.

    ", + "Run$runName": "

    The auto-generated name for the run.

    ", + "RunAssessmentRequest$runName": "

    A name specifying the run of the assessment.

    ", + "UpdateApplicationRequest$applicationName": "

    Application name that you want to update.

    ", + "UpdateAssessmentRequest$assessmentName": "

    Assessment name that you want to update.

    " + } + }, + "NameList": { + "base": null, + "refs": { + "FindingsFilter$ruleNames": "

    For a record to match a filter, the value specified for this data type property must be the exact match of the value of the ruleName property of the Finding data type.

    " + } + }, + "NamePattern": { + "base": null, + "refs": { + "NamePatternList$member": null + } + }, + "NamePatternList": { + "base": null, + "refs": { + "ApplicationsFilter$applicationNamePatterns": "

    For a record to match a filter, an explicit value or a string containing a wildcard specified for this data type property must match the value of the applicationName property of the Application data type.

    ", + "AssessmentsFilter$assessmentNamePatterns": "

    For a record to match a filter, an explicit value or a string containing a wildcard specified for this data type property must match the value of the assessmentName property of the Assessment data type.

    ", + "RunsFilter$runNamePatterns": "

    For a record to match a filter, an explicit value or a string containing a wildcard specified for this data type property must match the value of the runName property of the Run data type.

    " + } + }, + "NoSuchEntityException": { + "base": null, + "refs": { + } + }, + "OperationInProgressException": { + "base": null, + "refs": { + } + }, + "PaginationToken": { + "base": null, + "refs": { + "ListApplicationsRequest$nextToken": "

    You can use this parameter when paginating results. Set the value of this parameter to 'null' on your first call to the ListApplications action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from previous response to continue listing data.

    ", + "ListApplicationsResponse$nextToken": "

    When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to 'null'.

    ", + "ListAssessmentAgentsRequest$nextToken": "

    You can use this parameter when paginating results. Set the value of this parameter to 'null' on your first call to the ListAssessmentAgents action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from previous response to continue listing data.

    ", + "ListAssessmentAgentsResponse$nextToken": "

    When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to 'null'.

    ", + "ListAssessmentsRequest$nextToken": "

    You can use this parameter when paginating results. Set the value of this parameter to 'null' on your first call to the ListAssessments action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from previous response to continue listing data.

    ", + "ListAssessmentsResponse$nextToken": "

    When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to 'null'.

    ", + "ListAttachedAssessmentsRequest$nextToken": "

    You can use this parameter when paginating results. Set the value of this parameter to 'null' on your first call to the ListAttachedAssessments action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from previous response to continue listing data.

    ", + "ListAttachedAssessmentsResponse$nextToken": "

    When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to 'null'.

    ", + "ListAttachedRulesPackagesRequest$nextToken": "

    You can use this parameter when paginating results. Set the value of this parameter to 'null' on your first call to the ListAttachedRulesPackages action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from previous response to continue listing data.

    ", + "ListAttachedRulesPackagesResponse$nextToken": "

    When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to 'null'.

    ", + "ListFindingsRequest$nextToken": "

    You can use this parameter when paginating results. Set the value of this parameter to 'null' on your first call to the ListFindings action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from previous response to continue listing data.

    ", + "ListFindingsResponse$nextToken": "

    When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to 'null'.

    ", + "ListRulesPackagesRequest$nextToken": "

    You can use this parameter when paginating results. Set the value of this parameter to 'null' on your first call to the ListRulesPackages action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from previous response to continue listing data.

    ", + "ListRulesPackagesResponse$nextToken": "

    When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to 'null'.

    ", + "ListRunsRequest$nextToken": "

    You can use this parameter when paginating results. Set the value of this parameter to 'null' on your first call to the ListRuns action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from previous response to continue listing data.

    ", + "ListRunsResponse$nextToken": "

    When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to 'null'.

    ", + "PreviewAgentsForResourceGroupRequest$nextToken": "

    You can use this parameter when paginating results. Set the value of this parameter to 'null' on your first call to the PreviewAgentsForResourceGroup action. Subsequent calls to the action fill nextToken in the request with the value of NextToken from previous response to continue listing data.

    ", + "PreviewAgentsForResourceGroupResponse$nextToken": "

    When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to 'null'.

    " + } + }, + "Parameter": { + "base": "

    This data type is used in the LocalizedText data type.

    ", + "refs": { + "ParameterList$member": null + } + }, + "ParameterList": { + "base": null, + "refs": { + "LocalizedText$parameters": "

    Values for the dynamic elements of the string specified by the textual identifier.

    " + } + }, + "ParameterName": { + "base": null, + "refs": { + "Parameter$name": "

    The name of the variable that is being replaced.

    " + } + }, + "ParameterValue": { + "base": null, + "refs": { + "Parameter$value": "

    The value assigned to the variable that is being replaced.

    " + } + }, + "PreviewAgentsForResourceGroupRequest": { + "base": null, + "refs": { + } + }, + "PreviewAgentsForResourceGroupResponse": { + "base": null, + "refs": { + } + }, + "RegisterCrossAccountAccessRoleRequest": { + "base": null, + "refs": { + } + }, + "RegisterCrossAccountAccessRoleResponse": { + "base": null, + "refs": { + } + }, + "RemoveAttributesFromFindingsRequest": { + "base": null, + "refs": { + } + }, + "RemoveAttributesFromFindingsResponse": { + "base": null, + "refs": { + } + }, + "ResourceGroup": { + "base": "

    Contains information about a resource group. The resource group defines a set of tags that, when queried, identify the AWS resources that comprise the application.

    This data type is used as the response element in the DescribeResourceGroup action.

    ", + "refs": { + "DescribeResourceGroupResponse$resourceGroup": "

    Information about the resource group.

    " + } + }, + "ResourceGroupTags": { + "base": null, + "refs": { + "CreateResourceGroupRequest$resourceGroupTags": "

    A collection of keys and an array of possible values in JSON format.

    For example, [{ \"key1\" : [\"Value1\",\"Value2\"]},{\"Key2\": [\"Value3\"]}]

    ", + "ResourceGroup$resourceGroupTags": "

    The tags (key and value pairs) of the resource group.

    This data type property is used in the CreateResourceGroup action.

    A collection of keys and an array of possible values in JSON format.

    For example, [{ \"key1\" : [\"Value1\",\"Value2\"]},{\"Key2\": [\"Value3\"]}]

    " + } + }, + "RulesPackage": { + "base": "

    Contains information about an Inspector rules package.

    This data type is used as the response element in the DescribeRulesPackage action.

    ", + "refs": { + "DescribeRulesPackageResponse$rulesPackage": "

    Information about the rules package.

    " + } + }, + "Run": { + "base": "

    A snapshot of an Inspector assessment that contains the assessment's findings.

    This data type is used as the response element in the DescribeRun action.

    ", + "refs": { + "DescribeRunResponse$run": "

    Information about the assessment run.

    " + } + }, + "RunAssessmentRequest": { + "base": null, + "refs": { + } + }, + "RunAssessmentResponse": { + "base": null, + "refs": { + } + }, + "RunState": { + "base": null, + "refs": { + "Run$runState": "

    The state of the run. Values can be set to DataCollectionComplete, EvaluatingPolicies, EvaluatingPoliciesErrorCanRetry, Completed, Failed, TombStoned.

    ", + "RunStateList$member": null + } + }, + "RunStateList": { + "base": null, + "refs": { + "RunsFilter$runStates": "

    For a record to match a filter, the value specified for this data type property must be the exact match of the value of the runState property of the Run data type.

    " + } + }, + "RunsFilter": { + "base": "

    This data type is used as the request parameter in the ListRuns action.

    ", + "refs": { + "ListRunsRequest$filter": "

    You can use this parameter to specify a subset of data to be included in the action's response.

    For a record to match a filter, all specified filter attributes must match. When multiple values are specified for a filter attribute, any of the values can match.

    " + } + }, + "SetTagsForResourceRequest": { + "base": null, + "refs": { + } + }, + "SetTagsForResourceResponse": { + "base": null, + "refs": { + } + }, + "Severity": { + "base": null, + "refs": { + "Finding$severity": "

    The finding severity. Values can be set to High, Medium, Low, and Informational.

    ", + "SeverityList$member": null + } + }, + "SeverityList": { + "base": null, + "refs": { + "FindingsFilter$severities": "

    For a record to match a filter, the value specified for this data type property must be the exact match of the value of the severity property of the Finding data type.

    " + } + }, + "StartDataCollectionRequest": { + "base": null, + "refs": { + } + }, + "StartDataCollectionResponse": { + "base": null, + "refs": { + } + }, + "StopDataCollectionRequest": { + "base": null, + "refs": { + } + }, + "StopDataCollectionResponse": { + "base": null, + "refs": { + } + }, + "Tag": { + "base": "

    A key and value pair.

    This data type is used as a request parameter in the SetTagsForResource action and a response element in the ListTagsForResource action.

    ", + "refs": { + "TagList$member": null + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": "

    The tag key.

    " + } + }, + "TagList": { + "base": null, + "refs": { + "ListTagsForResourceResponse$tagList": "

    A collection of key and value pairs.

    ", + "SetTagsForResourceRequest$tags": "

    A collection of key and value pairs that you want to set to an assessment.

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": "

    The value assigned to a tag key.

    " + } + }, + "Telemetry": { + "base": "

    The metadata about the Inspector application data metrics collected by the agent.

    This data type is used as the response element in the GetAssessmentTelemetry action.

    ", + "refs": { + "TelemetryList$member": null + } + }, + "TelemetryList": { + "base": null, + "refs": { + "Agent$telemetry": "

    The Inspector application data metrics collected by the agent.

    ", + "GetAssessmentTelemetryResponse$telemetry": "

    Telemetry details.

    " + } + }, + "TelemetryStatus": { + "base": null, + "refs": { + "Telemetry$status": "

    The category of the individual metrics that together constitute the telemetry that Inspector received from the agent.

    " + } + }, + "Text": { + "base": null, + "refs": { + "TextList$member": null + } + }, + "TextList": { + "base": null, + "refs": { + "LocalizeTextResponse$results": "

    The resulting list of user-readable texts.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "Assessment$startTime": "

    The assessment start time.

    ", + "Assessment$endTime": "

    The assessment end time.

    ", + "Run$creationTime": "

    Run creation time that corresponds to the data collection completion time or failure.

    ", + "Run$completionTime": "

    Run completion time that corresponds to the rules packages evaluation completion time or failure.

    ", + "TimestampRange$minimum": "

    The minimum value of the timestamp range.

    ", + "TimestampRange$maximum": "

    The maximum value of the timestamp range.

    " + } + }, + "TimestampRange": { + "base": "

    This data type is used in the AssessmentsFilter and RunsFilter data types.

    ", + "refs": { + "AssessmentsFilter$startTimeRange": "

    For a record to match a filter, the value specified for this data type property must inclusively match any value between the specified minimum and maximum values of the startTime property of the Assessment data type.

    ", + "AssessmentsFilter$endTimeRange": "

    For a record to match a filter, the value specified for this data type property must inclusively match any value between the specified minimum and maximum values of the endTime property of the Assessment data type.

    ", + "RunsFilter$creationTime": "

    For a record to match a filter, the value specified for this data type property must inclusively match any value between the specified minimum and maximum values of the creationTime property of the Run data type.

    ", + "RunsFilter$completionTime": "

    For a record to match a filter, the value specified for this data type property must inclusively match any value between the specified minimum and maximum values of the completionTime property of the Run data type.

    " + } + }, + "UpdateApplicationRequest": { + "base": null, + "refs": { + } + }, + "UpdateApplicationResponse": { + "base": null, + "refs": { + } + }, + "UpdateAssessmentRequest": { + "base": null, + "refs": { + } + }, + "UpdateAssessmentResponse": { + "base": null, + "refs": { + } + }, + "Version": { + "base": null, + "refs": { + "RulesPackage$version": "

    The version id of the rules package.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/inspector/2015-08-18/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/inspector/2015-08-18/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/inspector/2015-08-18/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/inspector/2015-08-18/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iot/2015-05-28/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iot/2015-05-28/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iot/2015-05-28/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iot/2015-05-28/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2950 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-05-28", + "endpointPrefix":"iot", + "serviceFullName":"AWS IoT", + "signatureVersion":"v4", + "signingName":"execute-api", + "protocol":"rest-json" + }, + "operations":{ + "AcceptCertificateTransfer":{ + "name":"AcceptCertificateTransfer", + "http":{ + "method":"PATCH", + "requestUri":"/accept-certificate-transfer/{certificateId}" + }, + "input":{"shape":"AcceptCertificateTransferRequest"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TransferAlreadyCompletedException", + "error":{"httpStatusCode":410}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "AttachPrincipalPolicy":{ + "name":"AttachPrincipalPolicy", + "http":{ + "method":"PUT", + "requestUri":"/principal-policies/{policyName}" + }, + "input":{"shape":"AttachPrincipalPolicyRequest"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":410}, + "exception":true + } + ] + }, + "AttachThingPrincipal":{ + "name":"AttachThingPrincipal", + "http":{ + "method":"PUT", + "requestUri":"/things/{thingName}/principals" + }, + "input":{"shape":"AttachThingPrincipalRequest"}, + "output":{"shape":"AttachThingPrincipalResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "CancelCertificateTransfer":{ + "name":"CancelCertificateTransfer", + "http":{ + "method":"PATCH", + "requestUri":"/cancel-certificate-transfer/{certificateId}" + }, + "input":{"shape":"CancelCertificateTransferRequest"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TransferAlreadyCompletedException", + "error":{"httpStatusCode":410}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "CreateCertificateFromCsr":{ + "name":"CreateCertificateFromCsr", + "http":{ + "method":"POST", + "requestUri":"/certificates" + }, + "input":{"shape":"CreateCertificateFromCsrRequest"}, + "output":{"shape":"CreateCertificateFromCsrResponse"}, + "errors":[ + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "CreateKeysAndCertificate":{ + "name":"CreateKeysAndCertificate", + "http":{ + "method":"POST", + "requestUri":"/keys-and-certificate" + }, + "input":{"shape":"CreateKeysAndCertificateRequest"}, + "output":{"shape":"CreateKeysAndCertificateResponse"}, + "errors":[ + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "CreatePolicy":{ + "name":"CreatePolicy", + "http":{ + "method":"POST", + "requestUri":"/policies/{policyName}" + }, + "input":{"shape":"CreatePolicyRequest"}, + "output":{"shape":"CreatePolicyResponse"}, + "errors":[ + { + "shape":"ResourceAlreadyExistsException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"MalformedPolicyException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "CreatePolicyVersion":{ + "name":"CreatePolicyVersion", + "http":{ + "method":"POST", + "requestUri":"/policies/{policyName}/version" + }, + "input":{"shape":"CreatePolicyVersionRequest"}, + "output":{"shape":"CreatePolicyVersionResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"MalformedPolicyException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"VersionsLimitExceededException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "CreateThing":{ + "name":"CreateThing", + "http":{ + "method":"POST", + "requestUri":"/things/{thingName}" + }, + "input":{"shape":"CreateThingRequest"}, + "output":{"shape":"CreateThingResponse"}, + "errors":[ + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"ResourceAlreadyExistsException", + "error":{"httpStatusCode":409}, + "exception":true + } + ] + }, + "CreateTopicRule":{ + "name":"CreateTopicRule", + "http":{ + "method":"POST", + "requestUri":"/rules/{ruleName}" + }, + "input":{"shape":"CreateTopicRuleRequest"}, + "errors":[ + { + "shape":"SqlParseException", + "error":{"httpStatusCode":406}, + "exception":true + }, + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceAlreadyExistsException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + } + ] + }, + "DeleteCertificate":{ + "name":"DeleteCertificate", + "http":{ + "method":"DELETE", + "requestUri":"/certificates/{certificateId}" + }, + "input":{"shape":"DeleteCertificateRequest"}, + "errors":[ + { + "shape":"DeleteConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"CertificateStateException", + "error":{"httpStatusCode":406}, + "exception":true + } + ] + }, + "DeletePolicy":{ + "name":"DeletePolicy", + "http":{ + "method":"DELETE", + "requestUri":"/policies/{policyName}" + }, + "input":{"shape":"DeletePolicyRequest"}, + "errors":[ + { + "shape":"DeleteConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DeletePolicyVersion":{ + "name":"DeletePolicyVersion", + "http":{ + "method":"DELETE", + "requestUri":"/policies/{policyName}/version/{policyVersionId}" + }, + "input":{"shape":"DeletePolicyVersionRequest"}, + "errors":[ + { + "shape":"DeleteConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DeleteThing":{ + "name":"DeleteThing", + "http":{ + "method":"DELETE", + "requestUri":"/things/{thingName}" + }, + "input":{"shape":"DeleteThingRequest"}, + "output":{"shape":"DeleteThingResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DeleteTopicRule":{ + "name":"DeleteTopicRule", + "http":{ + "method":"DELETE", + "requestUri":"/rules/{ruleName}" + }, + "input":{"shape":"DeleteTopicRuleRequest"}, + "errors":[ + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + } + ] + }, + "DescribeCertificate":{ + "name":"DescribeCertificate", + "http":{ + "method":"GET", + "requestUri":"/certificates/{certificateId}" + }, + "input":{"shape":"DescribeCertificateRequest"}, + "output":{"shape":"DescribeCertificateResponse"}, + "errors":[ + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + } + ] + }, + "DescribeEndpoint":{ + "name":"DescribeEndpoint", + "http":{ + "method":"GET", + "requestUri":"/endpoint" + }, + "input":{"shape":"DescribeEndpointRequest"}, + "output":{"shape":"DescribeEndpointResponse"}, + "errors":[ + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "DescribeThing":{ + "name":"DescribeThing", + "http":{ + "method":"GET", + "requestUri":"/things/{thingName}" + }, + "input":{"shape":"DescribeThingRequest"}, + "output":{"shape":"DescribeThingResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DetachPrincipalPolicy":{ + "name":"DetachPrincipalPolicy", + "http":{ + "method":"DELETE", + "requestUri":"/principal-policies/{policyName}" + }, + "input":{"shape":"DetachPrincipalPolicyRequest"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DetachThingPrincipal":{ + "name":"DetachThingPrincipal", + "http":{ + "method":"DELETE", + "requestUri":"/things/{thingName}/principals" + }, + "input":{"shape":"DetachThingPrincipalRequest"}, + "output":{"shape":"DetachThingPrincipalResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DisableTopicRule":{ + "name":"DisableTopicRule", + "http":{ + "method":"POST", + "requestUri":"/rules/{ruleName}/disable" + }, + "input":{"shape":"DisableTopicRuleRequest"}, + "errors":[ + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + } + ] + }, + "EnableTopicRule":{ + "name":"EnableTopicRule", + "http":{ + "method":"POST", + "requestUri":"/rules/{ruleName}/enable" + }, + "input":{"shape":"EnableTopicRuleRequest"}, + "errors":[ + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + } + ] + }, + "GetLoggingOptions":{ + "name":"GetLoggingOptions", + "http":{ + "method":"GET", + "requestUri":"/loggingOptions" + }, + "input":{"shape":"GetLoggingOptionsRequest"}, + "output":{"shape":"GetLoggingOptionsResponse"}, + "errors":[ + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + } + ] + }, + "GetPolicy":{ + "name":"GetPolicy", + "http":{ + "method":"GET", + "requestUri":"/policies/{policyName}" + }, + "input":{"shape":"GetPolicyRequest"}, + "output":{"shape":"GetPolicyResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "GetPolicyVersion":{ + "name":"GetPolicyVersion", + "http":{ + "method":"GET", + "requestUri":"/policies/{policyName}/version/{policyVersionId}" + }, + "input":{"shape":"GetPolicyVersionRequest"}, + "output":{"shape":"GetPolicyVersionResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "GetTopicRule":{ + "name":"GetTopicRule", + "http":{ + "method":"GET", + "requestUri":"/rules/{ruleName}" + }, + "input":{"shape":"GetTopicRuleRequest"}, + "output":{"shape":"GetTopicRuleResponse"}, + "errors":[ + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + } + ] + }, + "ListCertificates":{ + "name":"ListCertificates", + "http":{ + "method":"GET", + "requestUri":"/certificates" + }, + "input":{"shape":"ListCertificatesRequest"}, + "output":{"shape":"ListCertificatesResponse"}, + "errors":[ + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "ListPolicies":{ + "name":"ListPolicies", + "http":{ + "method":"GET", + "requestUri":"/policies" + }, + "input":{"shape":"ListPoliciesRequest"}, + "output":{"shape":"ListPoliciesResponse"}, + "errors":[ + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "ListPolicyVersions":{ + "name":"ListPolicyVersions", + "http":{ + "method":"GET", + "requestUri":"/policies/{policyName}/version" + }, + "input":{"shape":"ListPolicyVersionsRequest"}, + "output":{"shape":"ListPolicyVersionsResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "ListPrincipalPolicies":{ + "name":"ListPrincipalPolicies", + "http":{ + "method":"GET", + "requestUri":"/principal-policies" + }, + "input":{"shape":"ListPrincipalPoliciesRequest"}, + "output":{"shape":"ListPrincipalPoliciesResponse"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "ListPrincipalThings":{ + "name":"ListPrincipalThings", + "http":{ + "method":"GET", + "requestUri":"/principals/things" + }, + "input":{"shape":"ListPrincipalThingsRequest"}, + "output":{"shape":"ListPrincipalThingsResponse"}, + "errors":[ + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "ListThingPrincipals":{ + "name":"ListThingPrincipals", + "http":{ + "method":"GET", + "requestUri":"/things/{thingName}/principals" + }, + "input":{"shape":"ListThingPrincipalsRequest"}, + "output":{"shape":"ListThingPrincipalsResponse"}, + "errors":[ + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "ListThings":{ + "name":"ListThings", + "http":{ + "method":"GET", + "requestUri":"/things" + }, + "input":{"shape":"ListThingsRequest"}, + "output":{"shape":"ListThingsResponse"}, + "errors":[ + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "ListTopicRules":{ + "name":"ListTopicRules", + "http":{ + "method":"GET", + "requestUri":"/rules" + }, + "input":{"shape":"ListTopicRulesRequest"}, + "output":{"shape":"ListTopicRulesResponse"}, + "errors":[ + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + } + ] + }, + "RejectCertificateTransfer":{ + "name":"RejectCertificateTransfer", + "http":{ + "method":"PATCH", + "requestUri":"/reject-certificate-transfer/{certificateId}" + }, + "input":{"shape":"RejectCertificateTransferRequest"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TransferAlreadyCompletedException", + "error":{"httpStatusCode":410}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "ReplaceTopicRule":{ + "name":"ReplaceTopicRule", + "http":{ + "method":"PATCH", + "requestUri":"/rules/{ruleName}" + }, + "input":{"shape":"ReplaceTopicRuleRequest"}, + "errors":[ + { + "shape":"SqlParseException", + "error":{"httpStatusCode":406}, + "exception":true + }, + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + } + ] + }, + "SetDefaultPolicyVersion":{ + "name":"SetDefaultPolicyVersion", + "http":{ + "method":"PATCH", + "requestUri":"/policies/{policyName}/version/{policyVersionId}" + }, + "input":{"shape":"SetDefaultPolicyVersionRequest"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "SetLoggingOptions":{ + "name":"SetLoggingOptions", + "http":{ + "method":"POST", + "requestUri":"/loggingOptions" + }, + "input":{"shape":"SetLoggingOptionsRequest"}, + "errors":[ + { + "shape":"InternalException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + } + ] + }, + "TransferCertificate":{ + "name":"TransferCertificate", + "http":{ + "method":"PATCH", + "requestUri":"/transfer-certificate/{certificateId}" + }, + "input":{"shape":"TransferCertificateRequest"}, + "output":{"shape":"TransferCertificateResponse"}, + "errors":[ + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"CertificateStateException", + "error":{"httpStatusCode":406}, + "exception":true + }, + { + "shape":"TransferConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "UpdateCertificate":{ + "name":"UpdateCertificate", + "http":{ + "method":"PUT", + "requestUri":"/certificates/{certificateId}" + }, + "input":{"shape":"UpdateCertificateRequest"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"CertificateStateException", + "error":{"httpStatusCode":406}, + "exception":true + }, + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "UpdateThing":{ + "name":"UpdateThing", + "http":{ + "method":"PATCH", + "requestUri":"/things/{thingName}" + }, + "input":{"shape":"UpdateThingRequest"}, + "output":{"shape":"UpdateThingResponse"}, + "errors":[ + { + "shape":"InvalidRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ThrottlingException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"UnauthorizedException", + "error":{"httpStatusCode":401}, + "exception":true + }, + { + "shape":"ServiceUnavailableException", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + { + "shape":"InternalFailureException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + } + ] + } + }, + "shapes":{ + "AcceptCertificateTransferRequest":{ + "type":"structure", + "required":["certificateId"], + "members":{ + "certificateId":{ + "shape":"CertificateId", + "location":"uri", + "locationName":"certificateId" + }, + "setAsActive":{ + "shape":"SetAsActive", + "location":"querystring", + "locationName":"setAsActive" + } + } + }, + "Action":{ + "type":"structure", + "members":{ + "dynamoDB":{"shape":"DynamoDBAction"}, + "lambda":{"shape":"LambdaAction"}, + "sns":{"shape":"SnsAction"}, + "sqs":{"shape":"SqsAction"}, + "kinesis":{"shape":"KinesisAction"}, + "republish":{"shape":"RepublishAction"}, + "s3":{"shape":"S3Action"}, + "firehose":{"shape":"FirehoseAction"} + } + }, + "ActionList":{ + "type":"list", + "member":{"shape":"Action"}, + "min":0, + "max":10 + }, + "AscendingOrder":{"type":"boolean"}, + "AttachPrincipalPolicyRequest":{ + "type":"structure", + "required":[ + "policyName", + "principal" + ], + "members":{ + "policyName":{ + "shape":"PolicyName", + "location":"uri", + "locationName":"policyName" + }, + "principal":{ + "shape":"Principal", + "location":"header", + "locationName":"x-amzn-iot-principal" + } + } + }, + "AttachThingPrincipalRequest":{ + "type":"structure", + "required":[ + "thingName", + "principal" + ], + "members":{ + "thingName":{ + "shape":"ThingName", + "location":"uri", + "locationName":"thingName" + }, + "principal":{ + "shape":"Principal", + "location":"header", + "locationName":"x-amzn-principal" + } + } + }, + "AttachThingPrincipalResponse":{ + "type":"structure", + "members":{ + } + }, + "AttributeName":{ + "type":"string", + "max":128, + "pattern":"[a-zA-Z0-9_.,@/:#-]+" + }, + "AttributePayload":{ + "type":"structure", + "members":{ + "attributes":{"shape":"Attributes"} + } + }, + "AttributeValue":{ + "type":"string", + "max":1024, + "pattern":"[a-zA-Z0-9_.,@/:#-]+" + }, + "Attributes":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValue"} + }, + "AwsAccountId":{ + "type":"string", + "pattern":"[0-9]{12}" + }, + "AwsArn":{"type":"string"}, + "BucketName":{"type":"string"}, + "CancelCertificateTransferRequest":{ + "type":"structure", + "required":["certificateId"], + "members":{ + "certificateId":{ + "shape":"CertificateId", + "location":"uri", + "locationName":"certificateId" + } + } + }, + "Certificate":{ + "type":"structure", + "members":{ + "certificateArn":{"shape":"CertificateArn"}, + "certificateId":{"shape":"CertificateId"}, + "status":{"shape":"CertificateStatus"}, + "creationDate":{"shape":"DateType"} + } + }, + "CertificateArn":{"type":"string"}, + "CertificateDescription":{ + "type":"structure", + "members":{ + "certificateArn":{"shape":"CertificateArn"}, + "certificateId":{"shape":"CertificateId"}, + "status":{"shape":"CertificateStatus"}, + "certificatePem":{"shape":"CertificatePem"}, + "ownedBy":{"shape":"AwsAccountId"}, + "creationDate":{"shape":"DateType"}, + "lastModifiedDate":{"shape":"DateType"} + } + }, + "CertificateId":{ + "type":"string", + "min":64, + "max":64, + "pattern":"(0x)?[a-fA-F0-9]+" + }, + "CertificatePem":{ + "type":"string", + "min":1 + }, + "CertificateSigningRequest":{ + "type":"string", + "min":1 + }, + "CertificateStateException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":406}, + "exception":true + }, + "CertificateStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "INACTIVE", + "REVOKED", + "PENDING_TRANSFER" + ] + }, + "Certificates":{ + "type":"list", + "member":{"shape":"Certificate"} + }, + "ClientId":{"type":"string"}, + "CreateCertificateFromCsrRequest":{ + "type":"structure", + "required":["certificateSigningRequest"], + "members":{ + "certificateSigningRequest":{"shape":"CertificateSigningRequest"}, + "setAsActive":{ + "shape":"SetAsActive", + "location":"querystring", + "locationName":"setAsActive" + } + } + }, + "CreateCertificateFromCsrResponse":{ + "type":"structure", + "members":{ + "certificateArn":{"shape":"CertificateArn"}, + "certificateId":{"shape":"CertificateId"}, + "certificatePem":{"shape":"CertificatePem"} + } + }, + "CreateKeysAndCertificateRequest":{ + "type":"structure", + "members":{ + "setAsActive":{ + "shape":"SetAsActive", + "location":"querystring", + "locationName":"setAsActive" + } + } + }, + "CreateKeysAndCertificateResponse":{ + "type":"structure", + "members":{ + "certificateArn":{"shape":"CertificateArn"}, + "certificateId":{"shape":"CertificateId"}, + "certificatePem":{"shape":"CertificatePem"}, + "keyPair":{"shape":"KeyPair"} + } + }, + "CreatePolicyRequest":{ + "type":"structure", + "required":[ + "policyName", + "policyDocument" + ], + "members":{ + "policyName":{ + "shape":"PolicyName", + "location":"uri", + "locationName":"policyName" + }, + "policyDocument":{"shape":"PolicyDocument"} + } + }, + "CreatePolicyResponse":{ + "type":"structure", + "members":{ + "policyName":{"shape":"PolicyName"}, + "policyArn":{"shape":"PolicyArn"}, + "policyDocument":{"shape":"PolicyDocument"}, + "policyVersionId":{"shape":"PolicyVersionId"} + } + }, + "CreatePolicyVersionRequest":{ + "type":"structure", + "required":[ + "policyName", + "policyDocument" + ], + "members":{ + "policyName":{ + "shape":"PolicyName", + "location":"uri", + "locationName":"policyName" + }, + "policyDocument":{"shape":"PolicyDocument"}, + "setAsDefault":{ + "shape":"SetAsDefault", + "location":"querystring", + "locationName":"setAsDefault" + } + } + }, + "CreatePolicyVersionResponse":{ + "type":"structure", + "members":{ + "policyArn":{"shape":"PolicyArn"}, + "policyDocument":{"shape":"PolicyDocument"}, + "policyVersionId":{"shape":"PolicyVersionId"}, + "isDefaultVersion":{"shape":"IsDefaultVersion"} + } + }, + "CreateThingRequest":{ + "type":"structure", + "required":["thingName"], + "members":{ + "thingName":{ + "shape":"ThingName", + "location":"uri", + "locationName":"thingName" + }, + "attributePayload":{"shape":"AttributePayload"} + } + }, + "CreateThingResponse":{ + "type":"structure", + "members":{ + "thingName":{"shape":"ThingName"}, + "thingArn":{"shape":"ThingArn"} + } + }, + "CreateTopicRuleRequest":{ + "type":"structure", + "required":[ + "ruleName", + "topicRulePayload" + ], + "members":{ + "ruleName":{ + "shape":"RuleName", + "location":"uri", + "locationName":"ruleName" + }, + "topicRulePayload":{"shape":"TopicRulePayload"} + }, + "payload":"topicRulePayload" + }, + "CreatedAtDate":{"type":"timestamp"}, + "DateType":{"type":"timestamp"}, + "DeleteCertificateRequest":{ + "type":"structure", + "required":["certificateId"], + "members":{ + "certificateId":{ + "shape":"CertificateId", + "location":"uri", + "locationName":"certificateId" + } + } + }, + "DeleteConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "DeletePolicyRequest":{ + "type":"structure", + "required":["policyName"], + "members":{ + "policyName":{ + "shape":"PolicyName", + "location":"uri", + "locationName":"policyName" + } + } + }, + "DeletePolicyVersionRequest":{ + "type":"structure", + "required":[ + "policyName", + "policyVersionId" + ], + "members":{ + "policyName":{ + "shape":"PolicyName", + "location":"uri", + "locationName":"policyName" + }, + "policyVersionId":{ + "shape":"PolicyVersionId", + "location":"uri", + "locationName":"policyVersionId" + } + } + }, + "DeleteThingRequest":{ + "type":"structure", + "required":["thingName"], + "members":{ + "thingName":{ + "shape":"ThingName", + "location":"uri", + "locationName":"thingName" + } + } + }, + "DeleteThingResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteTopicRuleRequest":{ + "type":"structure", + "members":{ + "ruleName":{ + "shape":"RuleName", + "location":"uri", + "locationName":"ruleName" + } + }, + "required":["ruleName"] + }, + "DeliveryStreamName":{"type":"string"}, + "DescribeCertificateRequest":{ + "type":"structure", + "required":["certificateId"], + "members":{ + "certificateId":{ + "shape":"CertificateId", + "location":"uri", + "locationName":"certificateId" + } + } + }, + "DescribeCertificateResponse":{ + "type":"structure", + "members":{ + "certificateDescription":{"shape":"CertificateDescription"} + } + }, + "DescribeEndpointRequest":{ + "type":"structure", + "members":{ + } + }, + "DescribeEndpointResponse":{ + "type":"structure", + "members":{ + "endpointAddress":{"shape":"EndpointAddress"} + } + }, + "DescribeThingRequest":{ + "type":"structure", + "required":["thingName"], + "members":{ + "thingName":{ + "shape":"ThingName", + "location":"uri", + "locationName":"thingName" + } + } + }, + "DescribeThingResponse":{ + "type":"structure", + "members":{ + "defaultClientId":{"shape":"ClientId"}, + "thingName":{"shape":"ThingName"}, + "attributes":{"shape":"Attributes"} + } + }, + "Description":{"type":"string"}, + "DetachPrincipalPolicyRequest":{ + "type":"structure", + "required":[ + "policyName", + "principal" + ], + "members":{ + "policyName":{ + "shape":"PolicyName", + "location":"uri", + "locationName":"policyName" + }, + "principal":{ + "shape":"Principal", + "location":"header", + "locationName":"x-amzn-iot-principal" + } + } + }, + "DetachThingPrincipalRequest":{ + "type":"structure", + "required":[ + "thingName", + "principal" + ], + "members":{ + "thingName":{ + "shape":"ThingName", + "location":"uri", + "locationName":"thingName" + }, + "principal":{ + "shape":"Principal", + "location":"header", + "locationName":"x-amzn-principal" + } + } + }, + "DetachThingPrincipalResponse":{ + "type":"structure", + "members":{ + } + }, + "DisableTopicRuleRequest":{ + "type":"structure", + "required":["ruleName"], + "members":{ + "ruleName":{ + "shape":"RuleName", + "location":"uri", + "locationName":"ruleName" + } + } + }, + "DynamoDBAction":{ + "type":"structure", + "required":[ + "tableName", + "roleArn", + "hashKeyField", + "hashKeyValue", + "rangeKeyField", + "rangeKeyValue" + ], + "members":{ + "tableName":{"shape":"TableName"}, + "roleArn":{"shape":"AwsArn"}, + "hashKeyField":{"shape":"HashKeyField"}, + "hashKeyValue":{"shape":"HashKeyValue"}, + "rangeKeyField":{"shape":"RangeKeyField"}, + "rangeKeyValue":{"shape":"RangeKeyValue"}, + "payloadField":{"shape":"PayloadField"} + } + }, + "EnableTopicRuleRequest":{ + "type":"structure", + "required":["ruleName"], + "members":{ + "ruleName":{ + "shape":"RuleName", + "location":"uri", + "locationName":"ruleName" + } + } + }, + "EndpointAddress":{"type":"string"}, + "FirehoseAction":{ + "type":"structure", + "required":[ + "roleArn", + "deliveryStreamName" + ], + "members":{ + "roleArn":{"shape":"AwsArn"}, + "deliveryStreamName":{"shape":"DeliveryStreamName"} + } + }, + "FunctionArn":{"type":"string"}, + "GetLoggingOptionsRequest":{ + "type":"structure", + "members":{ + } + }, + "GetLoggingOptionsResponse":{ + "type":"structure", + "members":{ + "roleArn":{"shape":"AwsArn"}, + "logLevel":{"shape":"LogLevel"} + } + }, + "GetPolicyRequest":{ + "type":"structure", + "required":["policyName"], + "members":{ + "policyName":{ + "shape":"PolicyName", + "location":"uri", + "locationName":"policyName" + } + } + }, + "GetPolicyResponse":{ + "type":"structure", + "members":{ + "policyName":{"shape":"PolicyName"}, + "policyArn":{"shape":"PolicyArn"}, + "policyDocument":{"shape":"PolicyDocument"}, + "defaultVersionId":{"shape":"PolicyVersionId"} + } + }, + "GetPolicyVersionRequest":{ + "type":"structure", + "required":[ + "policyName", + "policyVersionId" + ], + "members":{ + "policyName":{ + "shape":"PolicyName", + "location":"uri", + "locationName":"policyName" + }, + "policyVersionId":{ + "shape":"PolicyVersionId", + "location":"uri", + "locationName":"policyVersionId" + } + } + }, + "GetPolicyVersionResponse":{ + "type":"structure", + "members":{ + "policyArn":{"shape":"PolicyArn"}, + "policyName":{"shape":"PolicyName"}, + "policyDocument":{"shape":"PolicyDocument"}, + "policyVersionId":{"shape":"PolicyVersionId"}, + "isDefaultVersion":{"shape":"IsDefaultVersion"} + } + }, + "GetTopicRuleRequest":{ + "type":"structure", + "required":["ruleName"], + "members":{ + "ruleName":{ + "shape":"RuleName", + "location":"uri", + "locationName":"ruleName" + } + } + }, + "GetTopicRuleResponse":{ + "type":"structure", + "members":{ + "ruleArn":{"shape":"RuleArn"}, + "rule":{"shape":"TopicRule"} + } + }, + "HashKeyField":{"type":"string"}, + "HashKeyValue":{"type":"string"}, + "InternalException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "InternalFailureException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "IsDefaultVersion":{"type":"boolean"}, + "IsDisabled":{"type":"boolean"}, + "Key":{"type":"string"}, + "KeyPair":{ + "type":"structure", + "members":{ + "PublicKey":{"shape":"PublicKey"}, + "PrivateKey":{"shape":"PrivateKey"} + } + }, + "KinesisAction":{ + "type":"structure", + "required":[ + "roleArn", + "streamName" + ], + "members":{ + "roleArn":{"shape":"AwsArn"}, + "streamName":{"shape":"StreamName"}, + "partitionKey":{"shape":"PartitionKey"} + } + }, + "LambdaAction":{ + "type":"structure", + "required":["functionArn"], + "members":{ + "functionArn":{"shape":"FunctionArn"} + } + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":410}, + "exception":true + }, + "ListCertificatesRequest":{ + "type":"structure", + "members":{ + "pageSize":{ + "shape":"PageSize", + "location":"querystring", + "locationName":"pageSize" + }, + "marker":{ + "shape":"Marker", + "location":"querystring", + "locationName":"marker" + }, + "ascendingOrder":{ + "shape":"AscendingOrder", + "location":"querystring", + "locationName":"isAscendingOrder" + } + } + }, + "ListCertificatesResponse":{ + "type":"structure", + "members":{ + "certificates":{"shape":"Certificates"}, + "nextMarker":{"shape":"Marker"} + } + }, + "ListPoliciesRequest":{ + "type":"structure", + "members":{ + "marker":{ + "shape":"Marker", + "location":"querystring", + "locationName":"marker" + }, + "pageSize":{ + "shape":"PageSize", + "location":"querystring", + "locationName":"pageSize" + }, + "ascendingOrder":{ + "shape":"AscendingOrder", + "location":"querystring", + "locationName":"isAscendingOrder" + } + } + }, + "ListPoliciesResponse":{ + "type":"structure", + "members":{ + "policies":{"shape":"Policies"}, + "nextMarker":{"shape":"Marker"} + } + }, + "ListPolicyVersionsRequest":{ + "type":"structure", + "required":["policyName"], + "members":{ + "policyName":{ + "shape":"PolicyName", + "location":"uri", + "locationName":"policyName" + } + } + }, + "ListPolicyVersionsResponse":{ + "type":"structure", + "members":{ + "policyVersions":{"shape":"PolicyVersions"} + } + }, + "ListPrincipalPoliciesRequest":{ + "type":"structure", + "required":["principal"], + "members":{ + "principal":{ + "shape":"Principal", + "location":"header", + "locationName":"x-amzn-iot-principal" + }, + "marker":{ + "shape":"Marker", + "location":"querystring", + "locationName":"marker" + }, + "pageSize":{ + "shape":"PageSize", + "location":"querystring", + "locationName":"pageSize" + }, + "ascendingOrder":{ + "shape":"AscendingOrder", + "location":"querystring", + "locationName":"isAscendingOrder" + } + } + }, + "ListPrincipalPoliciesResponse":{ + "type":"structure", + "members":{ + "policies":{"shape":"Policies"}, + "nextMarker":{"shape":"Marker"} + } + }, + "ListPrincipalThingsRequest":{ + "type":"structure", + "required":["principal"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "location":"querystring", + "locationName":"maxResults" + }, + "principal":{ + "shape":"Principal", + "location":"header", + "locationName":"x-amzn-principal" + } + } + }, + "ListPrincipalThingsResponse":{ + "type":"structure", + "members":{ + "things":{"shape":"ThingNameList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListThingPrincipalsRequest":{ + "type":"structure", + "required":["thingName"], + "members":{ + "thingName":{ + "shape":"ThingName", + "location":"uri", + "locationName":"thingName" + } + } + }, + "ListThingPrincipalsResponse":{ + "type":"structure", + "members":{ + "principals":{"shape":"Principals"} + } + }, + "ListThingsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "location":"querystring", + "locationName":"maxResults" + }, + "attributeName":{ + "shape":"AttributeName", + "location":"querystring", + "locationName":"attributeName" + }, + "attributeValue":{ + "shape":"AttributeValue", + "location":"querystring", + "locationName":"attributeValue" + } + } + }, + "ListThingsResponse":{ + "type":"structure", + "members":{ + "things":{"shape":"ThingAttributeList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListTopicRulesRequest":{ + "type":"structure", + "members":{ + "topic":{ + "shape":"Topic", + "location":"querystring", + "locationName":"topic" + }, + "maxResults":{ + "shape":"MaxResults", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"nextToken" + }, + "ruleDisabled":{ + "shape":"IsDisabled", + "location":"querystring", + "locationName":"ruleDisabled" + } + } + }, + "ListTopicRulesResponse":{ + "type":"structure", + "members":{ + "rules":{"shape":"TopicRuleList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "LogLevel":{ + "type":"string", + "enum":[ + "DEBUG", + "INFO", + "ERROR", + "WARN", + "DISABLED" + ] + }, + "LoggingOptionsPayload":{ + "type":"structure", + "required":["roleArn"], + "members":{ + "roleArn":{"shape":"AwsArn"}, + "logLevel":{"shape":"LogLevel"} + } + }, + "MalformedPolicyException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "Marker":{"type":"string"}, + "MaxResults":{ + "type":"integer", + "min":1, + "max":10000 + }, + "NextToken":{"type":"string"}, + "PageSize":{ + "type":"integer", + "min":1, + "max":250 + }, + "PartitionKey":{"type":"string"}, + "PayloadField":{"type":"string"}, + "Policies":{ + "type":"list", + "member":{"shape":"Policy"} + }, + "Policy":{ + "type":"structure", + "members":{ + "policyName":{"shape":"PolicyName"}, + "policyArn":{"shape":"PolicyArn"} + } + }, + "PolicyArn":{"type":"string"}, + "PolicyDocument":{"type":"string"}, + "PolicyName":{ + "type":"string", + "min":1, + "max":128, + "pattern":"[\\w+=,.@-]+" + }, + "PolicyVersion":{ + "type":"structure", + "members":{ + "versionId":{"shape":"PolicyVersionId"}, + "isDefaultVersion":{"shape":"IsDefaultVersion"}, + "createDate":{"shape":"DateType"} + } + }, + "PolicyVersionId":{ + "type":"string", + "pattern":"[0-9]+" + }, + "PolicyVersions":{ + "type":"list", + "member":{"shape":"PolicyVersion"} + }, + "Principal":{"type":"string"}, + "PrincipalArn":{"type":"string"}, + "Principals":{ + "type":"list", + "member":{"shape":"PrincipalArn"} + }, + "PrivateKey":{ + "type":"string", + "min":1, + "sensitive":true + }, + "PublicKey":{ + "type":"string", + "min":1 + }, + "QueueUrl":{"type":"string"}, + "RangeKeyField":{"type":"string"}, + "RangeKeyValue":{"type":"string"}, + "RejectCertificateTransferRequest":{ + "type":"structure", + "required":["certificateId"], + "members":{ + "certificateId":{ + "shape":"CertificateId", + "location":"uri", + "locationName":"certificateId" + } + } + }, + "ReplaceTopicRuleRequest":{ + "type":"structure", + "required":[ + "ruleName", + "topicRulePayload" + ], + "members":{ + "ruleName":{ + "shape":"RuleName", + "location":"uri", + "locationName":"ruleName" + }, + "topicRulePayload":{"shape":"TopicRulePayload"} + }, + "payload":"topicRulePayload" + }, + "RepublishAction":{ + "type":"structure", + "required":[ + "roleArn", + "topic" + ], + "members":{ + "roleArn":{"shape":"AwsArn"}, + "topic":{"shape":"TopicPattern"} + } + }, + "ResourceAlreadyExistsException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "RuleArn":{"type":"string"}, + "RuleName":{ + "type":"string", + "min":1, + "max":128, + "pattern":"^[a-zA-Z0-9_]+$" + }, + "S3Action":{ + "type":"structure", + "required":[ + "roleArn", + "bucketName", + "key" + ], + "members":{ + "roleArn":{"shape":"AwsArn"}, + "bucketName":{"shape":"BucketName"}, + "key":{"shape":"Key"} + } + }, + "SQL":{"type":"string"}, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + "SetAsActive":{"type":"boolean"}, + "SetAsDefault":{"type":"boolean"}, + "SetDefaultPolicyVersionRequest":{ + "type":"structure", + "required":[ + "policyName", + "policyVersionId" + ], + "members":{ + "policyName":{ + "shape":"PolicyName", + "location":"uri", + "locationName":"policyName" + }, + "policyVersionId":{ + "shape":"PolicyVersionId", + "location":"uri", + "locationName":"policyVersionId" + } + } + }, + "SetLoggingOptionsRequest":{ + "type":"structure", + "required":["loggingOptionsPayload"], + "members":{ + "loggingOptionsPayload":{"shape":"LoggingOptionsPayload"} + }, + "payload":"loggingOptionsPayload" + }, + "SnsAction":{ + "type":"structure", + "required":[ + "targetArn", + "roleArn" + ], + "members":{ + "targetArn":{"shape":"AwsArn"}, + "roleArn":{"shape":"AwsArn"} + } + }, + "SqlParseException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":406}, + "exception":true + }, + "SqsAction":{ + "type":"structure", + "required":[ + "roleArn", + "queueUrl" + ], + "members":{ + "roleArn":{"shape":"AwsArn"}, + "queueUrl":{"shape":"QueueUrl"}, + "useBase64":{"shape":"UseBase64"} + } + }, + "StreamName":{"type":"string"}, + "TableName":{"type":"string"}, + "ThingArn":{"type":"string"}, + "ThingAttribute":{ + "type":"structure", + "members":{ + "thingName":{"shape":"ThingName"}, + "attributes":{"shape":"Attributes"} + } + }, + "ThingAttributeList":{ + "type":"list", + "member":{"shape":"ThingAttribute"} + }, + "ThingName":{ + "type":"string", + "min":1, + "max":128, + "pattern":"[a-zA-Z0-9_-]+" + }, + "ThingNameList":{ + "type":"list", + "member":{"shape":"ThingName"} + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":429}, + "exception":true + }, + "Topic":{"type":"string"}, + "TopicPattern":{"type":"string"}, + "TopicRule":{ + "type":"structure", + "members":{ + "ruleName":{"shape":"RuleName"}, + "sql":{"shape":"SQL"}, + "description":{"shape":"Description"}, + "createdAt":{"shape":"CreatedAtDate"}, + "actions":{"shape":"ActionList"}, + "ruleDisabled":{"shape":"IsDisabled"} + } + }, + "TopicRuleList":{ + "type":"list", + "member":{"shape":"TopicRuleListItem"} + }, + "TopicRuleListItem":{ + "type":"structure", + "members":{ + "ruleArn":{"shape":"RuleArn"}, + "ruleName":{"shape":"RuleName"}, + "topicPattern":{"shape":"TopicPattern"}, + "createdAt":{"shape":"CreatedAtDate"}, + "ruleDisabled":{"shape":"IsDisabled"} + } + }, + "TopicRulePayload":{ + "type":"structure", + "required":[ + "sql", + "actions" + ], + "members":{ + "sql":{"shape":"SQL"}, + "description":{"shape":"Description"}, + "actions":{"shape":"ActionList"}, + "ruleDisabled":{"shape":"IsDisabled"} + } + }, + "TransferAlreadyCompletedException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":410}, + "exception":true + }, + "TransferCertificateRequest":{ + "type":"structure", + "required":[ + "certificateId", + "targetAwsAccount" + ], + "members":{ + "certificateId":{ + "shape":"CertificateId", + "location":"uri", + "locationName":"certificateId" + }, + "targetAwsAccount":{ + "shape":"AwsAccountId", + "location":"querystring", + "locationName":"targetAwsAccount" + } + } + }, + "TransferCertificateResponse":{ + "type":"structure", + "members":{ + "transferredCertificateArn":{"shape":"CertificateArn"} + } + }, + "TransferConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "UnauthorizedException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":401}, + "exception":true + }, + "UpdateCertificateRequest":{ + "type":"structure", + "required":[ + "certificateId", + "newStatus" + ], + "members":{ + "certificateId":{ + "shape":"CertificateId", + "location":"uri", + "locationName":"certificateId" + }, + "newStatus":{ + "shape":"CertificateStatus", + "location":"querystring", + "locationName":"newStatus" + } + } + }, + "UpdateThingRequest":{ + "type":"structure", + "required":[ + "thingName", + "attributePayload" + ], + "members":{ + "thingName":{ + "shape":"ThingName", + "location":"uri", + "locationName":"thingName" + }, + "attributePayload":{"shape":"AttributePayload"} + } + }, + "UpdateThingResponse":{ + "type":"structure", + "members":{ + } + }, + "UseBase64":{"type":"boolean"}, + "VersionsLimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "errorMessage":{"type":"string"} + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iot/2015-05-28/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iot/2015-05-28/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iot/2015-05-28/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iot/2015-05-28/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1091 @@ +{ + "version": "2.0", + "operations": { + "AcceptCertificateTransfer": "

    Accepts a pending certificate transfer. The default state of the certificate is INACTIVE.

    To check for pending certificate transfers, call ListCertificates to enumerate your certificates.

    ", + "AttachPrincipalPolicy": "

    Attaches the specified policy to the specified principal (certificate or other credential).

    ", + "AttachThingPrincipal": "

    Attaches the specified principal to the specified thing.

    ", + "CancelCertificateTransfer": "

    Cancels a pending transfer for the specified certificate.

    Note Only the transfer source account can use this operation to cancel a transfer (transfer destinations can use RejectCertificateTransfer instead). After transfer, AWS IoT returns the certificate to the source account in the INACTIVE state. Once the destination account has accepted the transfer, the transfer may no longer be cancelled.

    After a certificate transfer is cancelled, the status of the certificate changes from PENDING_TRANSFER to INACTIVE.

    ", + "CreateCertificateFromCsr": "

    Creates an X.509 certificate using the specified certificate signing request.

    Note Reusing the same certificate signing request (CSR) results in a distinct certificate.

    You can create multiple certificates in a batch by creating a directory and copying multiple .csr files into that directory and specifying that directory on the command line. The following commands show how to create a batch of certificates given a batch of CSRs.

    Assuming a set of CSRs are located inside of the directory my-csr-directory:

    >

    On Linux and OSX, the command is:

    $ ls my-csr-directory/ | xargs -I {} aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/{}

    This command lists all of the CSRs in my-csr-directory and pipes each CSR filename to the aws iot create-certificate-from-csr AWS CLI command to create a certificate for the corresponding CSR.

    The aws iot create-certificate-from-csr part of the command can also be run in parallel to speed up the certificate creation process:

    $ ls my-csr-directory/ | xargs -P 10 -I {} aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/{}

    On Windows PowerShell, the command to create certificates for all CSRs in my-csr-directory is:

    > ls -Name my-csr-directory | %{aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/$_}

    On Windows Command Prompt, the command to create certificates for all CSRs in my-csr-directory is:

    > forfiles /p my-csr-directory /c \"cmd /c aws iot create-certificate-from-csr --certificate-signing-request file://@path\"

    ", + "CreateKeysAndCertificate": "

    Creates a 2048 bit RSA key pair and issues an X.509 certificate using the issued public key.

    Note This is the only time AWS IoT issues the private key for this certificate. It is important to keep track of the private key.

    ", + "CreatePolicy": "

    Creates an AWS IoT policy.

    The created policy is the default version for the policy. This operation creates a policy version with a version identifier of 1 and sets 1 as the policy's default version.

    ", + "CreatePolicyVersion": "

    Creates a new version of the specified AWS IoT policy. To update a policy, create a new policy version. A managed policy can have up to five versions. If the policy has five versions, you must delete an existing version using DeletePolicyVersion before you create a new version.

    Optionally, you can set the new version as the policy's default version. The default version is the operative version; that is, the version that is in effect for the certificates that the policy is attached to.

    ", + "CreateThing": "

    Creates a thing in the thing registry.

    ", + "CreateTopicRule": "

    Creates a rule.

    ", + "DeleteCertificate": "

    Deletes the specified certificate.

    A certificate cannot be deleted if it has a policy attached to it or if its status is set to ACTIVE. To delete a certificate, first detach all policies using the DetachPrincipalPolicy API. Next use the UpdateCertificate API to set the certificate to the INACTIVE status.

    ", + "DeletePolicy": "

    Deletes the specified policy.

    A policy cannot be deleted if it has non-default versions and/or it is attached to any certificate.

    To delete a policy, delete all non-default versions of the policy using the DeletePolicyVersion API, detach the policy from any certificate using the DetachPrincipalPolicy API, and then use the DeletePolicy API to delete the policy.

    When a policy is deleted using DeletePolicy, its default version is deleted with it.

    ", + "DeletePolicyVersion": "

    Deletes the specified version of the specified policy. You cannot delete the default version of a policy using this API. To delete the default version of a policy, use DeletePolicy. To find out which version of a policy is marked as the default version, use ListPolicyVersions.

    ", + "DeleteThing": "

    Deletes the specified thing from the Thing Registry.

    ", + "DeleteTopicRule": "

    Deletes the specified rule.

    ", + "DescribeCertificate": "

    Gets information about the specified certificate.

    ", + "DescribeEndpoint": "

    Returns a unique endpoint specific to the AWS account making the call. You specify the following URI when updating state information for your thing: https://endpoint/things/thingName/shadow.

    ", + "DescribeThing": "

    Gets information about the specified thing.

    ", + "DetachPrincipalPolicy": "

    Removes the specified policy from the specified certificate.

    ", + "DetachThingPrincipal": "

    Detaches the specified principal from the specified thing.

    ", + "DisableTopicRule": "

    Disables the specified rule

    ", + "EnableTopicRule": "

    Enables the specified rule.

    ", + "GetLoggingOptions": "

    Gets the logging options.

    ", + "GetPolicy": "

    Gets information about the specified policy with the policy document of the default version.

    ", + "GetPolicyVersion": "

    Gets information about the specified policy version.

    ", + "GetTopicRule": "

    Gets information about the specified rule.

    ", + "ListCertificates": "

    Lists your certificates.

    The results are paginated with a default page size of 25. You can retrieve additional results using the returned marker.

    ", + "ListPolicies": "

    Lists your policies.

    ", + "ListPolicyVersions": "

    Lists the versions of the specified policy, and identifies the default version.

    ", + "ListPrincipalPolicies": "

    Lists the policies attached to the specified principal. If you use an Amazon Cognito identity, the ID needs to be in Amazon Cognito Identity format.

    ", + "ListPrincipalThings": "

    Lists the things associated with the specified principal.

    ", + "ListThingPrincipals": "

    Lists the principals associated with the specified thing.

    ", + "ListThings": "

    Lists your things. You can pass an AttributeName and/or AttributeValue to filter your things. For example: \"ListThings where AttributeName=Color and AttributeValue=Red\"

    ", + "ListTopicRules": "

    Lists the rules for the specific topic.

    ", + "RejectCertificateTransfer": "

    Rejects a pending certificate transfer. After AWS IoT rejects a certificate transfer, the certificate status changes from PENDING_TRANFER to INACTIVE.

    To check for pending certificate transfers, call ListCertificates to enumerate your certificates.

    This operation can only be called by the transfer destination. Once called, the certificate will be returned to the source's account in the INACTIVE state.

    ", + "ReplaceTopicRule": "

    Replaces the specified rule. You must specify all parameters for the new rule.

    ", + "SetDefaultPolicyVersion": "

    Sets the specified version of the specified policy as the policy's default (operative) version. This action affects all certificates that the policy is attached to. To list the principals the policy is attached to, use the ListPrincipalPolicy API.

    ", + "SetLoggingOptions": "

    Sets the logging options.

    ", + "TransferCertificate": "

    Transfers the specified certificate to the specified AWS account.

    You can cancel the transfer until it is acknowledged by the recipient.

    No notification is sent to the transfer destination's account, it is up to the caller to notify the transfer target.

    The certificate being transferred must not be in the ACTIVE state. It can be deactivated using the UpdateCertificate API.

    The certificate must not have any policies attached to it. These can be detached using the DetachPrincipalPolicy API.

    ", + "UpdateCertificate": "

    Updates the status of the specified certificate. This operation is idempotent.

    Moving a cert from the ACTIVE state (including REVOKED) will NOT disconnect currently-connected devices, although these devices will be unable to reconnect.

    The ACTIVE state is required to authenticate devices connecting to AWS IoT using a certificate.

    ", + "UpdateThing": "

    Updates the data for a thing.

    " + }, + "service": "AWS IoT

    AWS IoT provides secure, bi-directional communication between Internet-connected things (such as sensors, actuators, embedded devices, or smart appliances) and the AWS cloud. You can discover your custom IoT-Data endpoint to communicate with, configure rules for data processing and integration with other services, organize resources associated with each thing (Thing Registry), configure logging, and create and manage policies and credentials to authenticate things.

    For more information about how AWS IoT works, see the Developer Guide.

    ", + "shapes": { + "AcceptCertificateTransferRequest": { + "base": "

    The input for the AcceptCertificateTransfer operation.

    ", + "refs": { + } + }, + "Action": { + "base": "

    Describes the actions associated with a rule.

    ", + "refs": { + "ActionList$member": null + } + }, + "ActionList": { + "base": null, + "refs": { + "TopicRule$actions": "

    The actions associated with the rule.

    ", + "TopicRulePayload$actions": "

    The actions associated with the rule.

    " + } + }, + "AscendingOrder": { + "base": null, + "refs": { + "ListCertificatesRequest$ascendingOrder": "

    Specifies the order for results. If True, the results are returned in ascending order, based on the creation date.

    ", + "ListPoliciesRequest$ascendingOrder": "

    Specifies the order for results. If true, the results are returned in ascending creation order.

    ", + "ListPrincipalPoliciesRequest$ascendingOrder": "

    Specifies the order for results. If true, results are returned in ascending creation order.

    " + } + }, + "AttachPrincipalPolicyRequest": { + "base": "

    The input for the AttachPrincipalPolicy operation.

    ", + "refs": { + } + }, + "AttachThingPrincipalRequest": { + "base": "

    The input for the AttachThingPrincipal operation.

    ", + "refs": { + } + }, + "AttachThingPrincipalResponse": { + "base": "

    The output from the AttachThingPrincipal operation.

    ", + "refs": { + } + }, + "AttributeName": { + "base": null, + "refs": { + "Attributes$key": null, + "ListThingsRequest$attributeName": "

    The attribute name.

    " + } + }, + "AttributePayload": { + "base": "

    The attribute payload, a JSON string containing up to three key-value pairs.

    For example: {\\\"attributes\\\":{\\\"string1\\\":\\\"string2\\\"}}

    ", + "refs": { + "CreateThingRequest$attributePayload": "

    The attribute payload. Which consists of up to 3 name/value pairs in a JSON document. For example: {\\\"attributes\\\":{\\\"string1\\\":\\\"string2\\\"}}

    ", + "UpdateThingRequest$attributePayload": "

    The attribute payload, a JSON string containing up to three key-value pairs.

    For example: {\\\"attributes\\\":{\\\"string1\\\":\\\"string2\\\"}}

    " + } + }, + "AttributeValue": { + "base": null, + "refs": { + "Attributes$value": null, + "ListThingsRequest$attributeValue": "

    The attribute value.

    " + } + }, + "Attributes": { + "base": null, + "refs": { + "AttributePayload$attributes": "

    A JSON string containing up to three key-value pair in JSON format.

    For example: {\\\"attributes\\\":{\\\"string1\\\":\\\"string2\\\"}}

    ", + "DescribeThingResponse$attributes": "

    The attributes which are name/value pairs in JSON format. For example:

    {\\\"attributes\\\":{\\\"some-name1\\\":\\\"some-value1\\\"}, {\\\"some-name2\\\":\\\"some-value2\\\"}, {\\\"some-name3\\\":\\\"some-value3\\\"}}

    ", + "ThingAttribute$attributes": "

    The attributes.

    " + } + }, + "AwsAccountId": { + "base": null, + "refs": { + "CertificateDescription$ownedBy": "

    The ID of the AWS account that owns the certificate.

    ", + "TransferCertificateRequest$targetAwsAccount": "

    The AWS account.

    " + } + }, + "AwsArn": { + "base": null, + "refs": { + "DynamoDBAction$roleArn": "

    The ARN of the IAM role that grants access to the DynamoDB table.

    ", + "FirehoseAction$roleArn": "

    The IAM role that grants access to the firehose stream.

    ", + "GetLoggingOptionsResponse$roleArn": "

    The ARN of the IAM role that grants access.

    ", + "KinesisAction$roleArn": "

    The ARN of the IAM role that grants access to the Kinesis stream.

    ", + "LoggingOptionsPayload$roleArn": "

    The ARN of the IAM role that grants access.

    ", + "RepublishAction$roleArn": "

    The ARN of the IAM role that grants access.

    ", + "S3Action$roleArn": "

    The ARN of the IAM role that grants access.

    ", + "SnsAction$targetArn": "

    The ARN of the SNS topic.

    ", + "SnsAction$roleArn": "

    The ARN of the IAM role that grants access.

    ", + "SqsAction$roleArn": "

    The ARN of the IAM role that grants access.

    " + } + }, + "BucketName": { + "base": null, + "refs": { + "S3Action$bucketName": "

    The S3 bucket.

    " + } + }, + "CancelCertificateTransferRequest": { + "base": "

    The input for the CancelCertificateTransfer operation.

    ", + "refs": { + } + }, + "Certificate": { + "base": "

    Information about a certificate.

    ", + "refs": { + "Certificates$member": null + } + }, + "CertificateArn": { + "base": null, + "refs": { + "Certificate$certificateArn": "

    The ARN of the certificate.

    ", + "CertificateDescription$certificateArn": "

    The ARN of the certificate.

    ", + "CreateCertificateFromCsrResponse$certificateArn": "

    The Amazon Resource Name (ARN) of the certificate. You can use the ARN as a principal for policy operations.

    ", + "CreateKeysAndCertificateResponse$certificateArn": "

    The ARN of the certificate.

    ", + "TransferCertificateResponse$transferredCertificateArn": "

    The ARN of the certificate.

    " + } + }, + "CertificateDescription": { + "base": "

    Describes a certificate.

    ", + "refs": { + "DescribeCertificateResponse$certificateDescription": "

    The description of the certificate.

    " + } + }, + "CertificateId": { + "base": null, + "refs": { + "AcceptCertificateTransferRequest$certificateId": "

    The ID of the certificate.

    ", + "CancelCertificateTransferRequest$certificateId": "

    The ID of the certificate.

    ", + "Certificate$certificateId": "

    The ID of the certificate.

    ", + "CertificateDescription$certificateId": "

    The ID of the certificate.

    ", + "CreateCertificateFromCsrResponse$certificateId": "

    The ID of the certificate. Certificate management operations only take a certificateId.

    ", + "CreateKeysAndCertificateResponse$certificateId": "

    The ID of the certificate. AWS IoT issues a default subject name for the certificate (e.g., AWS IoT Certificate).

    ", + "DeleteCertificateRequest$certificateId": "

    The ID of the certificate.

    ", + "DescribeCertificateRequest$certificateId": "

    The ID of the certificate.

    ", + "RejectCertificateTransferRequest$certificateId": "

    The ID of the certificate.

    ", + "TransferCertificateRequest$certificateId": "

    The ID of the certificate.

    ", + "UpdateCertificateRequest$certificateId": "

    The ID of the certificate.

    " + } + }, + "CertificatePem": { + "base": null, + "refs": { + "CertificateDescription$certificatePem": "

    The certificate data, in PEM format.

    ", + "CreateCertificateFromCsrResponse$certificatePem": "

    The certificate data, in PEM format.

    ", + "CreateKeysAndCertificateResponse$certificatePem": "

    The certificate data, in PEM format.

    " + } + }, + "CertificateSigningRequest": { + "base": null, + "refs": { + "CreateCertificateFromCsrRequest$certificateSigningRequest": "

    The certificate signing request (CSR).

    " + } + }, + "CertificateStateException": { + "base": "

    The certificate operation is not allowed.

    ", + "refs": { + } + }, + "CertificateStatus": { + "base": null, + "refs": { + "Certificate$status": "

    The status of the certificate.

    ", + "CertificateDescription$status": "

    The status of the certificate.

    ", + "UpdateCertificateRequest$newStatus": "

    The new status.

    Note: setting the status to PENDING_TRANSFER will result in an exception being thrown. PENDING_TRANSFER is a status used internally by AWS IoT and is not meant to be used by developers.

    " + } + }, + "Certificates": { + "base": null, + "refs": { + "ListCertificatesResponse$certificates": "

    The descriptions of the certificates.

    " + } + }, + "ClientId": { + "base": null, + "refs": { + "DescribeThingResponse$defaultClientId": "

    The default client ID.

    " + } + }, + "CreateCertificateFromCsrRequest": { + "base": "

    The input for the CreateCertificateFromCsr operation.

    ", + "refs": { + } + }, + "CreateCertificateFromCsrResponse": { + "base": "

    The output from the CreateCertificateFromCsr operation.

    ", + "refs": { + } + }, + "CreateKeysAndCertificateRequest": { + "base": "

    The input for the CreateKeysAndCertificate operation.

    ", + "refs": { + } + }, + "CreateKeysAndCertificateResponse": { + "base": "

    The output of the CreateKeysAndCertificate operation.

    ", + "refs": { + } + }, + "CreatePolicyRequest": { + "base": "

    The input for the CreatePolicy operation.

    ", + "refs": { + } + }, + "CreatePolicyResponse": { + "base": "

    The output from the CreatePolicy operation.

    ", + "refs": { + } + }, + "CreatePolicyVersionRequest": { + "base": "

    The input for the CreatePolicyVersion operation.

    ", + "refs": { + } + }, + "CreatePolicyVersionResponse": { + "base": "

    The output of the CreatePolicyVersion operation.

    ", + "refs": { + } + }, + "CreateThingRequest": { + "base": "

    The input for the CreateThing operation.

    ", + "refs": { + } + }, + "CreateThingResponse": { + "base": "

    The output of the CreateThing operation.

    ", + "refs": { + } + }, + "CreateTopicRuleRequest": { + "base": "

    The input for the CreateTopicRule operation.

    ", + "refs": { + } + }, + "CreatedAtDate": { + "base": null, + "refs": { + "TopicRule$createdAt": "

    The date and time the rule was created.

    ", + "TopicRuleListItem$createdAt": "

    The date and time the rule was created.

    " + } + }, + "DateType": { + "base": null, + "refs": { + "Certificate$creationDate": "

    The date and time the certificate was created.

    ", + "CertificateDescription$creationDate": "

    The date and time the certificate was created.

    ", + "CertificateDescription$lastModifiedDate": "

    The date and time the certificate was last modified.

    ", + "PolicyVersion$createDate": "

    The date and time the policy was created.

    " + } + }, + "DeleteCertificateRequest": { + "base": "

    The input for the DeleteCertificate operation.

    ", + "refs": { + } + }, + "DeleteConflictException": { + "base": "

    You can't delete the resource because it is attached to one or more resources.

    ", + "refs": { + } + }, + "DeletePolicyRequest": { + "base": "

    The input for the DeletePolicy operation.

    ", + "refs": { + } + }, + "DeletePolicyVersionRequest": { + "base": "

    The input for the DeletePolicyVersion operation.

    ", + "refs": { + } + }, + "DeleteThingRequest": { + "base": "

    The input for the DeleteThing operation.

    ", + "refs": { + } + }, + "DeleteThingResponse": { + "base": "

    The output of the DeleteThing operation.

    ", + "refs": { + } + }, + "DeleteTopicRuleRequest": { + "base": "

    The input for the DeleteTopicRule operation.

    ", + "refs": { + } + }, + "DeliveryStreamName": { + "base": null, + "refs": { + "FirehoseAction$deliveryStreamName": "

    The delivery stream name.

    " + } + }, + "DescribeCertificateRequest": { + "base": "

    The input for the DescribeCertificate operation.

    ", + "refs": { + } + }, + "DescribeCertificateResponse": { + "base": "

    The output of the DescribeCertificate operation.

    ", + "refs": { + } + }, + "DescribeEndpointRequest": { + "base": "

    The input for the DescribeEndpoint operation.

    ", + "refs": { + } + }, + "DescribeEndpointResponse": { + "base": "

    The output from the DescribeEndpoint operation.

    ", + "refs": { + } + }, + "DescribeThingRequest": { + "base": "

    The input for the DescribeThing operation.

    ", + "refs": { + } + }, + "DescribeThingResponse": { + "base": "

    The output from the DescribeThing operation.

    ", + "refs": { + } + }, + "Description": { + "base": null, + "refs": { + "TopicRule$description": "

    The description of the rule.

    ", + "TopicRulePayload$description": "

    The description of the rule.

    " + } + }, + "DetachPrincipalPolicyRequest": { + "base": "

    The input for the DetachPrincipalPolicy operation.

    ", + "refs": { + } + }, + "DetachThingPrincipalRequest": { + "base": "

    The input for the DetachThingPrincipal operation.

    ", + "refs": { + } + }, + "DetachThingPrincipalResponse": { + "base": "

    The output from the DetachThingPrincipal operation.

    ", + "refs": { + } + }, + "DisableTopicRuleRequest": { + "base": "

    The input for the DisableTopicRuleRequest operation.

    ", + "refs": { + } + }, + "DynamoDBAction": { + "base": "

    Describes an action to write to a DynamoDB table.

    The tableName, hashKeyField, and rangeKeyField values must match the values used when you created the table.

    The hashKeyValue and rangeKeyvalue fields use a substitution template syntax. These templates provide data at runtime. The syntax is as follows: ${sql-expression}.

    You can specify any expression that's valid in a WHERE or SELECT clause, including JSON properties, comparisons, calculations, and functions. For example, the following field uses the third level of the topic:

    \"hashKeyValue\": \"${topic(3)}\"

    The following field uses the timestamp:

    \"rangeKeyValue\": \"${timestamp()}\"

    ", + "refs": { + "Action$dynamoDB": "

    Write to a DynamoDB table.

    " + } + }, + "EnableTopicRuleRequest": { + "base": "

    The input for the EnableTopicRuleRequest operation.

    ", + "refs": { + } + }, + "EndpointAddress": { + "base": null, + "refs": { + "DescribeEndpointResponse$endpointAddress": "

    The endpoint. The format of the endpoint is as follows: identifier.iot.region.amazonaws.com.

    " + } + }, + "FirehoseAction": { + "base": "

    Describes an action that writes data to a Kinesis Firehose stream.

    ", + "refs": { + "Action$firehose": "

    Write to a Kinesis Firehose stream.

    " + } + }, + "FunctionArn": { + "base": null, + "refs": { + "LambdaAction$functionArn": "

    The ARN of the Lambda function.

    " + } + }, + "GetLoggingOptionsRequest": { + "base": "

    The input for the GetLoggingOptions operation.

    ", + "refs": { + } + }, + "GetLoggingOptionsResponse": { + "base": "

    The output from the GetLoggingOptions operation.

    ", + "refs": { + } + }, + "GetPolicyRequest": { + "base": "

    The input for the GetPolicy operation.

    ", + "refs": { + } + }, + "GetPolicyResponse": { + "base": "

    The output from the GetPolicy operation.

    ", + "refs": { + } + }, + "GetPolicyVersionRequest": { + "base": "

    The input for the GetPolicyVersion operation.

    ", + "refs": { + } + }, + "GetPolicyVersionResponse": { + "base": "

    The output from the GetPolicyVersion operation.

    ", + "refs": { + } + }, + "GetTopicRuleRequest": { + "base": "

    The input for the GetTopicRule operation.

    ", + "refs": { + } + }, + "GetTopicRuleResponse": { + "base": "

    The output from the GetTopicRule operation.

    ", + "refs": { + } + }, + "HashKeyField": { + "base": null, + "refs": { + "DynamoDBAction$hashKeyField": "

    The hash key name.

    " + } + }, + "HashKeyValue": { + "base": null, + "refs": { + "DynamoDBAction$hashKeyValue": "

    The hash key value.

    " + } + }, + "InternalException": { + "base": "

    An unexpected error has occurred.

    ", + "refs": { + } + }, + "InternalFailureException": { + "base": "

    An unexpected error has occurred.

    ", + "refs": { + } + }, + "InvalidRequestException": { + "base": "

    The request is not valid.

    ", + "refs": { + } + }, + "IsDefaultVersion": { + "base": null, + "refs": { + "CreatePolicyVersionResponse$isDefaultVersion": "

    Specifies whether the policy version is the default.

    ", + "GetPolicyVersionResponse$isDefaultVersion": "

    Specifies whether the policy version is the default.

    ", + "PolicyVersion$isDefaultVersion": "

    Specifies whether the policy version is the default.

    " + } + }, + "IsDisabled": { + "base": null, + "refs": { + "ListTopicRulesRequest$ruleDisabled": "

    Specifies whether the rule is disabled.

    ", + "TopicRule$ruleDisabled": "

    Specifies whether the rule is disabled.

    ", + "TopicRuleListItem$ruleDisabled": "

    Specifies whether the rule is disabled.

    ", + "TopicRulePayload$ruleDisabled": "

    Specifies whether the rule is disabled.

    " + } + }, + "Key": { + "base": null, + "refs": { + "S3Action$key": "

    The object key.

    " + } + }, + "KeyPair": { + "base": "

    Describes a key pair.

    ", + "refs": { + "CreateKeysAndCertificateResponse$keyPair": "

    The generated key pair.

    " + } + }, + "KinesisAction": { + "base": "

    Describes an action to write data to an Amazon Kinesis stream.

    ", + "refs": { + "Action$kinesis": "

    Write data to a Kinesis stream.

    " + } + }, + "LambdaAction": { + "base": "

    Describes an action to invoke a Lamdba function.

    ", + "refs": { + "Action$lambda": "

    Invoke a Lambda function.

    " + } + }, + "LimitExceededException": { + "base": "

    The number of attached entities exceeds the limit.

    ", + "refs": { + } + }, + "ListCertificatesRequest": { + "base": "

    The input for the ListCertificates operation.

    ", + "refs": { + } + }, + "ListCertificatesResponse": { + "base": "

    The output of the ListCertificates operation.

    ", + "refs": { + } + }, + "ListPoliciesRequest": { + "base": "

    The input for the ListPolicies operation.

    ", + "refs": { + } + }, + "ListPoliciesResponse": { + "base": "

    The output from the ListPolicies operation.

    ", + "refs": { + } + }, + "ListPolicyVersionsRequest": { + "base": "

    The input for the ListPolicyVersions operation.

    ", + "refs": { + } + }, + "ListPolicyVersionsResponse": { + "base": "

    The output from the ListPolicyVersions operation.

    ", + "refs": { + } + }, + "ListPrincipalPoliciesRequest": { + "base": "

    The input for the ListPrincipalPolicies operation.

    ", + "refs": { + } + }, + "ListPrincipalPoliciesResponse": { + "base": "

    The output from the ListPrincipalPolicies operation.

    ", + "refs": { + } + }, + "ListPrincipalThingsRequest": { + "base": "

    The input for the ListPrincipalThings operation.

    ", + "refs": { + } + }, + "ListPrincipalThingsResponse": { + "base": "

    The output from the ListPrincipalThings operation.

    ", + "refs": { + } + }, + "ListThingPrincipalsRequest": { + "base": "

    The input for the ListThingPrincipal operation.

    ", + "refs": { + } + }, + "ListThingPrincipalsResponse": { + "base": "

    The output from the ListThingPrincipals operation.

    ", + "refs": { + } + }, + "ListThingsRequest": { + "base": "

    The input for the ListThings operation.

    ", + "refs": { + } + }, + "ListThingsResponse": { + "base": "

    The output from the ListThings operation.

    ", + "refs": { + } + }, + "ListTopicRulesRequest": { + "base": "

    The input for the ListTopicRules operation.

    ", + "refs": { + } + }, + "ListTopicRulesResponse": { + "base": "

    The output from the ListTopicRules operation.

    ", + "refs": { + } + }, + "LogLevel": { + "base": null, + "refs": { + "GetLoggingOptionsResponse$logLevel": "

    The logging level.

    ", + "LoggingOptionsPayload$logLevel": "

    The logging level.

    " + } + }, + "LoggingOptionsPayload": { + "base": "

    Describes the logging options payload.

    ", + "refs": { + "SetLoggingOptionsRequest$loggingOptionsPayload": "

    The logging options payload.

    " + } + }, + "MalformedPolicyException": { + "base": "

    The policy documentation is not valid.

    ", + "refs": { + } + }, + "Marker": { + "base": null, + "refs": { + "ListCertificatesRequest$marker": "

    The marker for the next set of results.

    ", + "ListCertificatesResponse$nextMarker": "

    The marker for the next set of results, or null if there are no additional results.

    ", + "ListPoliciesRequest$marker": "

    The marker for the next set of results.

    ", + "ListPoliciesResponse$nextMarker": "

    The marker for the next set of results, or null if there are no additional results.

    ", + "ListPrincipalPoliciesRequest$marker": "

    The marker for the next set of results.

    ", + "ListPrincipalPoliciesResponse$nextMarker": "

    The marker for the next set of results, or null if there are no additional results.

    " + } + }, + "MaxResults": { + "base": null, + "refs": { + "ListPrincipalThingsRequest$maxResults": "

    The maximum number of principals to return.

    ", + "ListThingsRequest$maxResults": "

    The maximum number of results.

    ", + "ListTopicRulesRequest$maxResults": "

    The maximum number of results to return.

    " + } + }, + "NextToken": { + "base": null, + "refs": { + "ListPrincipalThingsRequest$nextToken": "

    A token used to retrieve the next value.

    ", + "ListPrincipalThingsResponse$nextToken": "

    A token used to retrieve the next value.

    ", + "ListThingsRequest$nextToken": "

    The token for the next value.

    ", + "ListThingsResponse$nextToken": "

    A token used to retrieve the next value.

    ", + "ListTopicRulesRequest$nextToken": "

    A token used to retrieve the next value.

    ", + "ListTopicRulesResponse$nextToken": "

    A token used to retrieve the next value.

    " + } + }, + "PageSize": { + "base": null, + "refs": { + "ListCertificatesRequest$pageSize": "

    The result page size.

    ", + "ListPoliciesRequest$pageSize": "

    The result page size.

    ", + "ListPrincipalPoliciesRequest$pageSize": "

    The result page size.

    " + } + }, + "PartitionKey": { + "base": null, + "refs": { + "KinesisAction$partitionKey": "

    The partition key.

    " + } + }, + "PayloadField": { + "base": null, + "refs": { + "DynamoDBAction$payloadField": "

    The action payload, this name can be customized.

    " + } + }, + "Policies": { + "base": null, + "refs": { + "ListPoliciesResponse$policies": "

    The descriptions of the policies.

    ", + "ListPrincipalPoliciesResponse$policies": "

    The policies.

    " + } + }, + "Policy": { + "base": "

    Describes an AWS IoT policy.

    ", + "refs": { + "Policies$member": null + } + }, + "PolicyArn": { + "base": null, + "refs": { + "CreatePolicyResponse$policyArn": "

    The policy ARN.

    ", + "CreatePolicyVersionResponse$policyArn": "

    The policy ARN.

    ", + "GetPolicyResponse$policyArn": "

    The policy ARN.

    ", + "GetPolicyVersionResponse$policyArn": "

    The policy ARN.

    ", + "Policy$policyArn": "

    The policy ARN.

    " + } + }, + "PolicyDocument": { + "base": null, + "refs": { + "CreatePolicyRequest$policyDocument": "

    The JSON document that describes the policy. The length of the policyDocument must be a minimum length of 1, with a maximum length of 2048, excluding whitespace.

    ", + "CreatePolicyResponse$policyDocument": "

    The JSON document that describes the policy.

    ", + "CreatePolicyVersionRequest$policyDocument": "

    The JSON document that describes the policy. Minimum length of 1. Maximum length of 2048 excluding whitespaces

    ", + "CreatePolicyVersionResponse$policyDocument": "

    The JSON document that describes the policy.

    ", + "GetPolicyResponse$policyDocument": "

    The JSON document that describes the policy.

    ", + "GetPolicyVersionResponse$policyDocument": "

    The JSON document that describes the policy.

    " + } + }, + "PolicyName": { + "base": null, + "refs": { + "AttachPrincipalPolicyRequest$policyName": "

    The policy name.

    ", + "CreatePolicyRequest$policyName": "

    The policy name.

    ", + "CreatePolicyResponse$policyName": "

    The policy name.

    ", + "CreatePolicyVersionRequest$policyName": "

    The policy name.

    ", + "DeletePolicyRequest$policyName": "

    The name of the policy to delete.

    ", + "DeletePolicyVersionRequest$policyName": "

    The name of the policy.

    ", + "DetachPrincipalPolicyRequest$policyName": "

    The name of the policy to detach.

    ", + "GetPolicyRequest$policyName": "

    The name of the policy.

    ", + "GetPolicyResponse$policyName": "

    The policy name.

    ", + "GetPolicyVersionRequest$policyName": "

    The name of the policy.

    ", + "GetPolicyVersionResponse$policyName": "

    The policy name.

    ", + "ListPolicyVersionsRequest$policyName": "

    The policy name.

    ", + "Policy$policyName": "

    The policy name.

    ", + "SetDefaultPolicyVersionRequest$policyName": "

    The policy name.

    " + } + }, + "PolicyVersion": { + "base": "

    Describes a policy version.

    ", + "refs": { + "PolicyVersions$member": null + } + }, + "PolicyVersionId": { + "base": null, + "refs": { + "CreatePolicyResponse$policyVersionId": "

    The policy version ID.

    ", + "CreatePolicyVersionResponse$policyVersionId": "

    The policy version ID.

    ", + "DeletePolicyVersionRequest$policyVersionId": "

    The policy version ID.

    ", + "GetPolicyResponse$defaultVersionId": "

    The default policy version ID.

    ", + "GetPolicyVersionRequest$policyVersionId": "

    The policy version ID.

    ", + "GetPolicyVersionResponse$policyVersionId": "

    The policy version ID.

    ", + "PolicyVersion$versionId": "

    The policy version ID.

    ", + "SetDefaultPolicyVersionRequest$policyVersionId": "

    The policy version ID.

    " + } + }, + "PolicyVersions": { + "base": null, + "refs": { + "ListPolicyVersionsResponse$policyVersions": "

    The policy versions.

    " + } + }, + "Principal": { + "base": null, + "refs": { + "AttachPrincipalPolicyRequest$principal": "

    The principal which can be a certificate ARN (as returned from the CreateCertificate operation) or a Cognito ID.

    ", + "AttachThingPrincipalRequest$principal": "

    The principal (certificate or other credential).

    ", + "DetachPrincipalPolicyRequest$principal": "

    The principal

    If the principal is a certificate, specify the certificate ARN. If the principal is a Cognito identity specify the identity ID.

    ", + "DetachThingPrincipalRequest$principal": "

    The principal.

    ", + "ListPrincipalPoliciesRequest$principal": "

    The principal.

    ", + "ListPrincipalThingsRequest$principal": "

    The principal.

    " + } + }, + "PrincipalArn": { + "base": null, + "refs": { + "Principals$member": null + } + }, + "Principals": { + "base": null, + "refs": { + "ListThingPrincipalsResponse$principals": "

    The principals.

    " + } + }, + "PrivateKey": { + "base": null, + "refs": { + "KeyPair$PrivateKey": "

    The private key.

    " + } + }, + "PublicKey": { + "base": null, + "refs": { + "KeyPair$PublicKey": "

    The public key.

    " + } + }, + "QueueUrl": { + "base": null, + "refs": { + "SqsAction$queueUrl": "

    The URL of the Amazon SQS queue.

    " + } + }, + "RangeKeyField": { + "base": null, + "refs": { + "DynamoDBAction$rangeKeyField": "

    The range key name.

    " + } + }, + "RangeKeyValue": { + "base": null, + "refs": { + "DynamoDBAction$rangeKeyValue": "

    The range key value.

    " + } + }, + "RejectCertificateTransferRequest": { + "base": "

    The input for the RejectCertificateTransfer operation.

    ", + "refs": { + } + }, + "ReplaceTopicRuleRequest": { + "base": "

    The input for the ReplaceTopicRule operation.

    ", + "refs": { + } + }, + "RepublishAction": { + "base": "

    Describes an action to republish to another topic.

    ", + "refs": { + "Action$republish": "

    Publish to another MQTT topic.

    " + } + }, + "ResourceAlreadyExistsException": { + "base": "

    The resource already exists.

    ", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "

    The specified resource does not exist.

    ", + "refs": { + } + }, + "RuleArn": { + "base": null, + "refs": { + "GetTopicRuleResponse$ruleArn": "

    The rule ARN.

    ", + "TopicRuleListItem$ruleArn": "

    The rule ARN.

    " + } + }, + "RuleName": { + "base": null, + "refs": { + "CreateTopicRuleRequest$ruleName": "

    The name of the rule.

    ", + "DeleteTopicRuleRequest$ruleName": "

    The name of the rule.

    ", + "DisableTopicRuleRequest$ruleName": "

    The name of the rule to disable.

    ", + "EnableTopicRuleRequest$ruleName": "

    The name of the topic rule to enable.

    ", + "GetTopicRuleRequest$ruleName": "

    The name of the rule.

    ", + "ReplaceTopicRuleRequest$ruleName": "

    The name of the rule.

    ", + "TopicRule$ruleName": "

    The name of the rule.

    ", + "TopicRuleListItem$ruleName": "

    The name of the rule.

    " + } + }, + "S3Action": { + "base": "

    Describes an action to write data to an Amazon S3 bucket.

    ", + "refs": { + "Action$s3": "

    Write to an S3 bucket.

    " + } + }, + "SQL": { + "base": null, + "refs": { + "TopicRule$sql": "

    The SQL statement used to query the topic. When using a SQL query with multiple lines, be sure to escape the newline characters properly.

    ", + "TopicRulePayload$sql": "

    The SQL statement used to query the topic. For more information, see AWS IoT SQL Reference in the AWS IoT Developer Guide.

    " + } + }, + "ServiceUnavailableException": { + "base": "

    The service is temporarily unavailable.

    ", + "refs": { + } + }, + "SetAsActive": { + "base": null, + "refs": { + "AcceptCertificateTransferRequest$setAsActive": "

    Specifies whether the certificate is active.

    ", + "CreateCertificateFromCsrRequest$setAsActive": "

    Specifies whether the certificate is active.

    ", + "CreateKeysAndCertificateRequest$setAsActive": "

    Specifies whether the certificate is active.

    " + } + }, + "SetAsDefault": { + "base": null, + "refs": { + "CreatePolicyVersionRequest$setAsDefault": "

    Specifies whether the policy version is set as the default. When this parameter is true, the new policy version becomes the operative version; that is, the version that is in effect for the certificates that the policy is attached to.

    " + } + }, + "SetDefaultPolicyVersionRequest": { + "base": "

    The input for the SetDefaultPolicyVersion operation.

    ", + "refs": { + } + }, + "SetLoggingOptionsRequest": { + "base": "

    The input for the SetLoggingOptions operation.

    ", + "refs": { + } + }, + "SnsAction": { + "base": "

    Describes an action to publish to an Amazon SNS topic.

    ", + "refs": { + "Action$sns": "

    Publish to an SNS topic.

    " + } + }, + "SqlParseException": { + "base": "

    The Rule-SQL expression can't be parsed correctly.

    ", + "refs": { + } + }, + "SqsAction": { + "base": "

    Describes an action to publish data to an SQS queue.

    ", + "refs": { + "Action$sqs": "

    Publish to an SQS queue.

    " + } + }, + "StreamName": { + "base": null, + "refs": { + "KinesisAction$streamName": "

    The name of the Kinesis stream.

    " + } + }, + "TableName": { + "base": null, + "refs": { + "DynamoDBAction$tableName": "

    The name of the DynamoDB table.

    " + } + }, + "ThingArn": { + "base": null, + "refs": { + "CreateThingResponse$thingArn": "

    The thing ARN.

    " + } + }, + "ThingAttribute": { + "base": "

    Describes a thing attribute.

    ", + "refs": { + "ThingAttributeList$member": null + } + }, + "ThingAttributeList": { + "base": null, + "refs": { + "ListThingsResponse$things": "

    The things.

    " + } + }, + "ThingName": { + "base": null, + "refs": { + "AttachThingPrincipalRequest$thingName": "

    The name of the thing.

    ", + "CreateThingRequest$thingName": "

    The name of the thing.

    ", + "CreateThingResponse$thingName": "

    The name of the thing.

    ", + "DeleteThingRequest$thingName": "

    The thing name.

    ", + "DescribeThingRequest$thingName": "

    The name of the thing.

    ", + "DescribeThingResponse$thingName": "

    The name of the thing.

    ", + "DetachThingPrincipalRequest$thingName": "

    The name of the thing.

    ", + "ListThingPrincipalsRequest$thingName": "

    The name of the thing.

    ", + "ThingAttribute$thingName": "

    The name of the thing.

    ", + "ThingNameList$member": null, + "UpdateThingRequest$thingName": "

    The thing name.

    " + } + }, + "ThingNameList": { + "base": null, + "refs": { + "ListPrincipalThingsResponse$things": "

    The things.

    " + } + }, + "ThrottlingException": { + "base": "

    The rate exceeds the limit.

    ", + "refs": { + } + }, + "Topic": { + "base": null, + "refs": { + "ListTopicRulesRequest$topic": "

    The topic.

    " + } + }, + "TopicPattern": { + "base": null, + "refs": { + "RepublishAction$topic": "

    The name of the MQTT topic.

    ", + "TopicRuleListItem$topicPattern": "

    The pattern for the topic names that apply.

    " + } + }, + "TopicRule": { + "base": "

    Describes a rule.

    ", + "refs": { + "GetTopicRuleResponse$rule": "

    The rule.

    " + } + }, + "TopicRuleList": { + "base": null, + "refs": { + "ListTopicRulesResponse$rules": "

    The rules.

    " + } + }, + "TopicRuleListItem": { + "base": "

    Describes a rule.

    ", + "refs": { + "TopicRuleList$member": null + } + }, + "TopicRulePayload": { + "base": "

    Describes a rule.

    ", + "refs": { + "CreateTopicRuleRequest$topicRulePayload": "

    The rule payload.

    ", + "ReplaceTopicRuleRequest$topicRulePayload": "

    The rule payload.

    " + } + }, + "TransferAlreadyCompletedException": { + "base": "

    You can't revert the certificate transfer because it has already completed.

    ", + "refs": { + } + }, + "TransferCertificateRequest": { + "base": "

    The input for the TransferCertificate operation.

    ", + "refs": { + } + }, + "TransferCertificateResponse": { + "base": "

    The output from the TransferCertificate operation.

    ", + "refs": { + } + }, + "TransferConflictException": { + "base": "

    You can't transfer the the certificate because authorization policies are still attached.

    ", + "refs": { + } + }, + "UnauthorizedException": { + "base": "

    You are not authorized to perform this operation.

    ", + "refs": { + } + }, + "UpdateCertificateRequest": { + "base": "

    The input for the UpdateCertificate operation.

    ", + "refs": { + } + }, + "UpdateThingRequest": { + "base": "

    The input for the UpdateThing operation.

    ", + "refs": { + } + }, + "UpdateThingResponse": { + "base": "

    The output from the UpdateThing operation.

    ", + "refs": { + } + }, + "UseBase64": { + "base": null, + "refs": { + "SqsAction$useBase64": "

    Specifies whether to use Base64 encoding.

    " + } + }, + "VersionsLimitExceededException": { + "base": "

    The number of policy versions exceeds the limit.

    ", + "refs": { + } + }, + "errorMessage": { + "base": null, + "refs": { + "CertificateStateException$message": "

    The message for the exception.

    ", + "DeleteConflictException$message": "

    The message for the exception.

    ", + "InternalException$message": "

    The message for the exception.

    ", + "InternalFailureException$message": "

    The message for the exception.

    ", + "InvalidRequestException$message": "

    The message for the exception.

    ", + "LimitExceededException$message": "

    The message for the exception.

    ", + "MalformedPolicyException$message": "

    The message for the exception.

    ", + "ResourceAlreadyExistsException$message": "

    The message for the exception.

    ", + "ResourceNotFoundException$message": "

    The message for the exception.

    ", + "ServiceUnavailableException$message": "

    The message for the exception.

    ", + "SqlParseException$message": "

    The message for the exception.

    ", + "ThrottlingException$message": "

    The message for the exception.

    ", + "TransferAlreadyCompletedException$message": "

    The message for the exception.

    ", + "TransferConflictException$message": "

    The message for the exception.

    ", + "UnauthorizedException$message": "

    The message for the exception.

    ", + "VersionsLimitExceededException$message": "

    The message for the exception.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iot/2015-05-28/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iot/2015-05-28/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iot/2015-05-28/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iot/2015-05-28/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version":"1.0", + "examples":{ + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iot-data/2015-05-28/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iot-data/2015-05-28/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iot-data/2015-05-28/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iot-data/2015-05-28/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,263 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-05-28", + "endpointPrefix":"data.iot", + "protocol":"rest-json", + "serviceFullName":"AWS IoT Data Plane", + "signatureVersion":"v4", + "signingName":"iotdata" + }, + "operations":{ + "DeleteThingShadow":{ + "name":"DeleteThingShadow", + "http":{ + "method":"DELETE", + "requestUri":"/things/{thingName}/shadow" + }, + "input":{"shape":"DeleteThingShadowRequest"}, + "output":{"shape":"DeleteThingShadowResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalFailureException"}, + {"shape":"MethodNotAllowedException"}, + {"shape":"UnsupportedDocumentEncodingException"} + ] + }, + "GetThingShadow":{ + "name":"GetThingShadow", + "http":{ + "method":"GET", + "requestUri":"/things/{thingName}/shadow" + }, + "input":{"shape":"GetThingShadowRequest"}, + "output":{"shape":"GetThingShadowResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalFailureException"}, + {"shape":"MethodNotAllowedException"}, + {"shape":"UnsupportedDocumentEncodingException"} + ] + }, + "Publish":{ + "name":"Publish", + "http":{ + "method":"POST", + "requestUri":"/topics/{topic}" + }, + "input":{"shape":"PublishRequest"}, + "errors":[ + {"shape":"InternalFailureException"}, + {"shape":"InvalidRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"MethodNotAllowedException"} + ] + }, + "UpdateThingShadow":{ + "name":"UpdateThingShadow", + "http":{ + "method":"POST", + "requestUri":"/things/{thingName}/shadow" + }, + "input":{"shape":"UpdateThingShadowRequest"}, + "output":{"shape":"UpdateThingShadowResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"RequestEntityTooLargeException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalFailureException"}, + {"shape":"MethodNotAllowedException"}, + {"shape":"UnsupportedDocumentEncodingException"} + ] + } + }, + "shapes":{ + "ConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "DeleteThingShadowRequest":{ + "type":"structure", + "required":["thingName"], + "members":{ + "thingName":{ + "shape":"ThingName", + "location":"uri", + "locationName":"thingName" + } + } + }, + "DeleteThingShadowResponse":{ + "type":"structure", + "required":["payload"], + "members":{ + "payload":{"shape":"JsonDocument"} + }, + "payload":"payload" + }, + "ErrorMessage":{"type":"string"}, + "GetThingShadowRequest":{ + "type":"structure", + "required":["thingName"], + "members":{ + "thingName":{ + "shape":"ThingName", + "location":"uri", + "locationName":"thingName" + } + } + }, + "GetThingShadowResponse":{ + "type":"structure", + "members":{ + "payload":{"shape":"JsonDocument"} + }, + "payload":"payload" + }, + "InternalFailureException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "JsonDocument":{"type":"blob"}, + "MethodNotAllowedException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":405}, + "exception":true + }, + "Payload":{"type":"blob"}, + "PublishRequest":{ + "type":"structure", + "required":["topic"], + "members":{ + "topic":{ + "shape":"Topic", + "location":"uri", + "locationName":"topic" + }, + "qos":{ + "shape":"Qos", + "location":"querystring", + "locationName":"qos" + }, + "payload":{"shape":"Payload"} + }, + "payload":"payload" + }, + "Qos":{ + "type":"integer", + "max":1, + "min":0 + }, + "RequestEntityTooLargeException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":413}, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + "ThingName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9_-]+" + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":429}, + "exception":true + }, + "Topic":{"type":"string"}, + "UnauthorizedException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":401}, + "exception":true + }, + "UnsupportedDocumentEncodingException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "error":{"httpStatusCode":415}, + "exception":true + }, + "UpdateThingShadowRequest":{ + "type":"structure", + "required":[ + "thingName", + "payload" + ], + "members":{ + "thingName":{ + "shape":"ThingName", + "location":"uri", + "locationName":"thingName" + }, + "payload":{"shape":"JsonDocument"} + }, + "payload":"payload" + }, + "UpdateThingShadowResponse":{ + "type":"structure", + "members":{ + "payload":{"shape":"JsonDocument"} + }, + "payload":"payload" + }, + "errorMessage":{"type":"string"} + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iot-data/2015-05-28/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iot-data/2015-05-28/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iot-data/2015-05-28/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iot-data/2015-05-28/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,152 @@ +{ + "version": "2.0", + "service": "AWS IoT

    AWS IoT-Data enables secure, bi-directional communication between Internet-connected things (such as sensors, actuators, embedded devices, or smart appliances) and the AWS cloud. It implements a broker for applications and things to publish messages over HTTP (Publish) and retrieve, update, and delete thing shadows. A thing shadow is a persistent representation of your things and their state in the AWS cloud.

    ", + "operations": { + "DeleteThingShadow": "

    Deletes the thing shadow for the specified thing.

    For more information, see DeleteThingShadow in the AWS IoT Developer Guide.

    ", + "GetThingShadow": "

    Gets the thing shadow for the specified thing.

    For more information, see GetThingShadow in the AWS IoT Developer Guide.

    ", + "Publish": "

    Publishes state information.

    For more information, see HTTP Protocol in the AWS IoT Developer Guide.

    ", + "UpdateThingShadow": "

    Updates the thing shadow for the specified thing.

    For more information, see UpdateThingShadow in the AWS IoT Developer Guide.

    " + }, + "shapes": { + "ConflictException": { + "base": "

    The specified version does not match the version of the document.

    ", + "refs": { + } + }, + "DeleteThingShadowRequest": { + "base": "

    The input for the DeleteThingShadow operation.

    ", + "refs": { + } + }, + "DeleteThingShadowResponse": { + "base": "

    The output from the DeleteThingShadow operation.

    ", + "refs": { + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "ConflictException$message": "

    The message for the exception.

    ", + "MethodNotAllowedException$message": "

    The message for the exception.

    ", + "RequestEntityTooLargeException$message": "

    The message for the exception.

    " + } + }, + "GetThingShadowRequest": { + "base": "

    The input for the GetThingShadow operation.

    ", + "refs": { + } + }, + "GetThingShadowResponse": { + "base": "

    The output from the GetThingShadow operation.

    ", + "refs": { + } + }, + "InternalFailureException": { + "base": "

    An unexpected error has occurred.

    ", + "refs": { + } + }, + "InvalidRequestException": { + "base": "

    The request is not valid.

    ", + "refs": { + } + }, + "JsonDocument": { + "base": null, + "refs": { + "DeleteThingShadowResponse$payload": "

    The state information, in JSON format.

    ", + "GetThingShadowResponse$payload": "

    The state information, in JSON format.

    ", + "UpdateThingShadowRequest$payload": "

    The state information, in JSON format.

    ", + "UpdateThingShadowResponse$payload": "

    The state information, in JSON format.

    " + } + }, + "MethodNotAllowedException": { + "base": "

    The specified combination of HTTP verb and URI is not supported.

    ", + "refs": { + } + }, + "Payload": { + "base": null, + "refs": { + "PublishRequest$payload": "

    The state information, in JSON format.

    " + } + }, + "PublishRequest": { + "base": "

    The input for the Publish operation.

    ", + "refs": { + } + }, + "Qos": { + "base": null, + "refs": { + "PublishRequest$qos": "

    The Quality of Service (QoS) level.

    " + } + }, + "RequestEntityTooLargeException": { + "base": "

    The payload exceeds the maximum size allowed.

    ", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "

    The specified resource does not exist.

    ", + "refs": { + } + }, + "ServiceUnavailableException": { + "base": "

    The service is temporarily unavailable.

    ", + "refs": { + } + }, + "ThingName": { + "base": null, + "refs": { + "DeleteThingShadowRequest$thingName": "

    The name of the thing.

    ", + "GetThingShadowRequest$thingName": "

    The name of the thing.

    ", + "UpdateThingShadowRequest$thingName": "

    The name of the thing.

    " + } + }, + "ThrottlingException": { + "base": "

    The rate exceeds the limit.

    ", + "refs": { + } + }, + "Topic": { + "base": null, + "refs": { + "PublishRequest$topic": "

    The name of the MQTT topic.

    " + } + }, + "UnauthorizedException": { + "base": "

    You are not authorized to perform this operation.

    ", + "refs": { + } + }, + "UnsupportedDocumentEncodingException": { + "base": "

    The document encoding is not supported.

    ", + "refs": { + } + }, + "UpdateThingShadowRequest": { + "base": "

    The input for the UpdateThingShadow operation.

    ", + "refs": { + } + }, + "UpdateThingShadowResponse": { + "base": "

    The output from the UpdateThingShadow operation.

    ", + "refs": { + } + }, + "errorMessage": { + "base": null, + "refs": { + "InternalFailureException$message": "

    The message for the exception.

    ", + "InvalidRequestException$message": "

    The message for the exception.

    ", + "ResourceNotFoundException$message": "

    The message for the exception.

    ", + "ServiceUnavailableException$message": "

    The message for the exception.

    ", + "ThrottlingException$message": "

    The message for the exception.

    ", + "UnauthorizedException$message": "

    The message for the exception.

    ", + "UnsupportedDocumentEncodingException$message": "

    The message for the exception.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iot-data/2015-05-28/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iot-data/2015-05-28/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iot-data/2015-05-28/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/iot-data/2015-05-28/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,872 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2013-12-02", + "endpointPrefix":"kinesis", + "jsonVersion":"1.1", + "serviceAbbreviation":"Kinesis", + "serviceFullName":"Amazon Kinesis", + "signatureVersion":"v4", + "targetPrefix":"Kinesis_20131202", + "protocol":"json" + }, + "operations":{ + "AddTagsToStream":{ + "name":"AddTagsToStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsToStreamInput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"ResourceInUseException", + "exception":true + }, + { + "shape":"InvalidArgumentException", + "exception":true + }, + { + "shape":"LimitExceededException", + "exception":true + } + ] + }, + "CreateStream":{ + "name":"CreateStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateStreamInput"}, + "errors":[ + { + "shape":"ResourceInUseException", + "exception":true + }, + { + "shape":"LimitExceededException", + "exception":true + }, + { + "shape":"InvalidArgumentException", + "exception":true + } + ] + }, + "DecreaseStreamRetentionPeriod":{ + "name":"DecreaseStreamRetentionPeriod", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DecreaseStreamRetentionPeriodInput"}, + "errors":[ + { + "shape":"ResourceInUseException", + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"LimitExceededException", + "exception":true + }, + { + "shape":"InvalidArgumentException", + "exception":true + } + ] + }, + "DeleteStream":{ + "name":"DeleteStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteStreamInput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"LimitExceededException", + "exception":true + } + ] + }, + "DescribeStream":{ + "name":"DescribeStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStreamInput"}, + "output":{"shape":"DescribeStreamOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"LimitExceededException", + "exception":true + } + ] + }, + "GetRecords":{ + "name":"GetRecords", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRecordsInput"}, + "output":{"shape":"GetRecordsOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"InvalidArgumentException", + "exception":true + }, + { + "shape":"ProvisionedThroughputExceededException", + "exception":true + }, + { + "shape":"ExpiredIteratorException", + "exception":true + } + ] + }, + "GetShardIterator":{ + "name":"GetShardIterator", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetShardIteratorInput"}, + "output":{"shape":"GetShardIteratorOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"InvalidArgumentException", + "exception":true + }, + { + "shape":"ProvisionedThroughputExceededException", + "exception":true + } + ] + }, + "IncreaseStreamRetentionPeriod":{ + "name":"IncreaseStreamRetentionPeriod", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"IncreaseStreamRetentionPeriodInput"}, + "errors":[ + { + "shape":"ResourceInUseException", + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"LimitExceededException", + "exception":true + }, + { + "shape":"InvalidArgumentException", + "exception":true + } + ] + }, + "ListStreams":{ + "name":"ListStreams", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListStreamsInput"}, + "output":{"shape":"ListStreamsOutput"}, + "errors":[ + { + "shape":"LimitExceededException", + "exception":true + } + ] + }, + "ListTagsForStream":{ + "name":"ListTagsForStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForStreamInput"}, + "output":{"shape":"ListTagsForStreamOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"InvalidArgumentException", + "exception":true + }, + { + "shape":"LimitExceededException", + "exception":true + } + ] + }, + "MergeShards":{ + "name":"MergeShards", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"MergeShardsInput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"ResourceInUseException", + "exception":true + }, + { + "shape":"InvalidArgumentException", + "exception":true + }, + { + "shape":"LimitExceededException", + "exception":true + } + ] + }, + "PutRecord":{ + "name":"PutRecord", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutRecordInput"}, + "output":{"shape":"PutRecordOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"InvalidArgumentException", + "exception":true + }, + { + "shape":"ProvisionedThroughputExceededException", + "exception":true + } + ] + }, + "PutRecords":{ + "name":"PutRecords", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutRecordsInput"}, + "output":{"shape":"PutRecordsOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"InvalidArgumentException", + "exception":true + }, + { + "shape":"ProvisionedThroughputExceededException", + "exception":true + } + ] + }, + "RemoveTagsFromStream":{ + "name":"RemoveTagsFromStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsFromStreamInput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"ResourceInUseException", + "exception":true + }, + { + "shape":"InvalidArgumentException", + "exception":true + }, + { + "shape":"LimitExceededException", + "exception":true + } + ] + }, + "SplitShard":{ + "name":"SplitShard", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SplitShardInput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"ResourceInUseException", + "exception":true + }, + { + "shape":"InvalidArgumentException", + "exception":true + }, + { + "shape":"LimitExceededException", + "exception":true + } + ] + } + }, + "shapes":{ + "AddTagsToStreamInput":{ + "type":"structure", + "required":[ + "StreamName", + "Tags" + ], + "members":{ + "StreamName":{"shape":"StreamName"}, + "Tags":{"shape":"TagMap"} + } + }, + "ApproximateArrivalTimestamp":{"type":"timestamp"}, + "BooleanObject":{"type":"boolean"}, + "CreateStreamInput":{ + "type":"structure", + "required":[ + "StreamName", + "ShardCount" + ], + "members":{ + "StreamName":{"shape":"StreamName"}, + "ShardCount":{"shape":"PositiveIntegerObject"} + } + }, + "Data":{ + "type":"blob", + "min":0, + "max":1048576 + }, + "DecreaseStreamRetentionPeriodInput":{ + "type":"structure", + "required":[ + "StreamName", + "RetentionPeriodHours" + ], + "members":{ + "StreamName":{"shape":"StreamName"}, + "RetentionPeriodHours":{"shape":"RetentionPeriodHours"} + } + }, + "DeleteStreamInput":{ + "type":"structure", + "required":["StreamName"], + "members":{ + "StreamName":{"shape":"StreamName"} + } + }, + "DescribeStreamInput":{ + "type":"structure", + "required":["StreamName"], + "members":{ + "StreamName":{"shape":"StreamName"}, + "Limit":{"shape":"DescribeStreamInputLimit"}, + "ExclusiveStartShardId":{"shape":"ShardId"} + } + }, + "DescribeStreamInputLimit":{ + "type":"integer", + "min":1, + "max":10000 + }, + "DescribeStreamOutput":{ + "type":"structure", + "required":["StreamDescription"], + "members":{ + "StreamDescription":{"shape":"StreamDescription"} + } + }, + "ErrorCode":{"type":"string"}, + "ErrorMessage":{"type":"string"}, + "ExpiredIteratorException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "GetRecordsInput":{ + "type":"structure", + "required":["ShardIterator"], + "members":{ + "ShardIterator":{"shape":"ShardIterator"}, + "Limit":{"shape":"GetRecordsInputLimit"} + } + }, + "GetRecordsInputLimit":{ + "type":"integer", + "min":1, + "max":10000 + }, + "GetRecordsOutput":{ + "type":"structure", + "required":["Records"], + "members":{ + "Records":{"shape":"RecordList"}, + "NextShardIterator":{"shape":"ShardIterator"}, + "MillisBehindLatest":{"shape":"MillisBehindLatest"} + } + }, + "GetShardIteratorInput":{ + "type":"structure", + "required":[ + "StreamName", + "ShardId", + "ShardIteratorType" + ], + "members":{ + "StreamName":{"shape":"StreamName"}, + "ShardId":{"shape":"ShardId"}, + "ShardIteratorType":{"shape":"ShardIteratorType"}, + "StartingSequenceNumber":{"shape":"SequenceNumber"} + } + }, + "GetShardIteratorOutput":{ + "type":"structure", + "members":{ + "ShardIterator":{"shape":"ShardIterator"} + } + }, + "HashKey":{ + "type":"string", + "pattern":"0|([1-9]\\d{0,38})" + }, + "HashKeyRange":{ + "type":"structure", + "required":[ + "StartingHashKey", + "EndingHashKey" + ], + "members":{ + "StartingHashKey":{"shape":"HashKey"}, + "EndingHashKey":{"shape":"HashKey"} + } + }, + "IncreaseStreamRetentionPeriodInput":{ + "type":"structure", + "required":[ + "StreamName", + "RetentionPeriodHours" + ], + "members":{ + "StreamName":{"shape":"StreamName"}, + "RetentionPeriodHours":{"shape":"RetentionPeriodHours"} + } + }, + "InvalidArgumentException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ListStreamsInput":{ + "type":"structure", + "members":{ + "Limit":{"shape":"ListStreamsInputLimit"}, + "ExclusiveStartStreamName":{"shape":"StreamName"} + } + }, + "ListStreamsInputLimit":{ + "type":"integer", + "min":1, + "max":10000 + }, + "ListStreamsOutput":{ + "type":"structure", + "required":[ + "StreamNames", + "HasMoreStreams" + ], + "members":{ + "StreamNames":{"shape":"StreamNameList"}, + "HasMoreStreams":{"shape":"BooleanObject"} + } + }, + "ListTagsForStreamInput":{ + "type":"structure", + "required":["StreamName"], + "members":{ + "StreamName":{"shape":"StreamName"}, + "ExclusiveStartTagKey":{"shape":"TagKey"}, + "Limit":{"shape":"ListTagsForStreamInputLimit"} + } + }, + "ListTagsForStreamInputLimit":{ + "type":"integer", + "min":1, + "max":10 + }, + "ListTagsForStreamOutput":{ + "type":"structure", + "required":[ + "Tags", + "HasMoreTags" + ], + "members":{ + "Tags":{"shape":"TagList"}, + "HasMoreTags":{"shape":"BooleanObject"} + } + }, + "MergeShardsInput":{ + "type":"structure", + "required":[ + "StreamName", + "ShardToMerge", + "AdjacentShardToMerge" + ], + "members":{ + "StreamName":{"shape":"StreamName"}, + "ShardToMerge":{"shape":"ShardId"}, + "AdjacentShardToMerge":{"shape":"ShardId"} + } + }, + "MillisBehindLatest":{ + "type":"long", + "min":0 + }, + "PartitionKey":{ + "type":"string", + "min":1, + "max":256 + }, + "PositiveIntegerObject":{ + "type":"integer", + "min":1, + "max":100000 + }, + "ProvisionedThroughputExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "PutRecordInput":{ + "type":"structure", + "required":[ + "StreamName", + "Data", + "PartitionKey" + ], + "members":{ + "StreamName":{"shape":"StreamName"}, + "Data":{"shape":"Data"}, + "PartitionKey":{"shape":"PartitionKey"}, + "ExplicitHashKey":{"shape":"HashKey"}, + "SequenceNumberForOrdering":{"shape":"SequenceNumber"} + } + }, + "PutRecordOutput":{ + "type":"structure", + "required":[ + "ShardId", + "SequenceNumber" + ], + "members":{ + "ShardId":{"shape":"ShardId"}, + "SequenceNumber":{"shape":"SequenceNumber"} + } + }, + "PutRecordsInput":{ + "type":"structure", + "required":[ + "Records", + "StreamName" + ], + "members":{ + "Records":{"shape":"PutRecordsRequestEntryList"}, + "StreamName":{"shape":"StreamName"} + } + }, + "PutRecordsOutput":{ + "type":"structure", + "required":["Records"], + "members":{ + "FailedRecordCount":{"shape":"PositiveIntegerObject"}, + "Records":{"shape":"PutRecordsResultEntryList"} + } + }, + "PutRecordsRequestEntry":{ + "type":"structure", + "required":[ + "Data", + "PartitionKey" + ], + "members":{ + "Data":{"shape":"Data"}, + "ExplicitHashKey":{"shape":"HashKey"}, + "PartitionKey":{"shape":"PartitionKey"} + } + }, + "PutRecordsRequestEntryList":{ + "type":"list", + "member":{"shape":"PutRecordsRequestEntry"}, + "min":1, + "max":500 + }, + "PutRecordsResultEntry":{ + "type":"structure", + "members":{ + "SequenceNumber":{"shape":"SequenceNumber"}, + "ShardId":{"shape":"ShardId"}, + "ErrorCode":{"shape":"ErrorCode"}, + "ErrorMessage":{"shape":"ErrorMessage"} + } + }, + "PutRecordsResultEntryList":{ + "type":"list", + "member":{"shape":"PutRecordsResultEntry"}, + "min":1, + "max":500 + }, + "Record":{ + "type":"structure", + "required":[ + "SequenceNumber", + "Data", + "PartitionKey" + ], + "members":{ + "SequenceNumber":{"shape":"SequenceNumber"}, + "ApproximateArrivalTimestamp":{"shape":"ApproximateArrivalTimestamp"}, + "Data":{"shape":"Data"}, + "PartitionKey":{"shape":"PartitionKey"} + } + }, + "RecordList":{ + "type":"list", + "member":{"shape":"Record"} + }, + "RemoveTagsFromStreamInput":{ + "type":"structure", + "required":[ + "StreamName", + "TagKeys" + ], + "members":{ + "StreamName":{"shape":"StreamName"}, + "TagKeys":{"shape":"TagKeyList"} + } + }, + "ResourceInUseException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "RetentionPeriodHours":{ + "type":"integer", + "min":24, + "max":168 + }, + "SequenceNumber":{ + "type":"string", + "pattern":"0|([1-9]\\d{0,128})" + }, + "SequenceNumberRange":{ + "type":"structure", + "required":["StartingSequenceNumber"], + "members":{ + "StartingSequenceNumber":{"shape":"SequenceNumber"}, + "EndingSequenceNumber":{"shape":"SequenceNumber"} + } + }, + "Shard":{ + "type":"structure", + "required":[ + "ShardId", + "HashKeyRange", + "SequenceNumberRange" + ], + "members":{ + "ShardId":{"shape":"ShardId"}, + "ParentShardId":{"shape":"ShardId"}, + "AdjacentParentShardId":{"shape":"ShardId"}, + "HashKeyRange":{"shape":"HashKeyRange"}, + "SequenceNumberRange":{"shape":"SequenceNumberRange"} + } + }, + "ShardId":{ + "type":"string", + "min":1, + "max":128, + "pattern":"[a-zA-Z0-9_.-]+" + }, + "ShardIterator":{ + "type":"string", + "min":1, + "max":512 + }, + "ShardIteratorType":{ + "type":"string", + "enum":[ + "AT_SEQUENCE_NUMBER", + "AFTER_SEQUENCE_NUMBER", + "TRIM_HORIZON", + "LATEST" + ] + }, + "ShardList":{ + "type":"list", + "member":{"shape":"Shard"} + }, + "SplitShardInput":{ + "type":"structure", + "required":[ + "StreamName", + "ShardToSplit", + "NewStartingHashKey" + ], + "members":{ + "StreamName":{"shape":"StreamName"}, + "ShardToSplit":{"shape":"ShardId"}, + "NewStartingHashKey":{"shape":"HashKey"} + } + }, + "StreamARN":{"type":"string"}, + "StreamDescription":{ + "type":"structure", + "required":[ + "StreamName", + "StreamARN", + "StreamStatus", + "Shards", + "HasMoreShards", + "RetentionPeriodHours" + ], + "members":{ + "StreamName":{"shape":"StreamName"}, + "StreamARN":{"shape":"StreamARN"}, + "StreamStatus":{"shape":"StreamStatus"}, + "Shards":{"shape":"ShardList"}, + "HasMoreShards":{"shape":"BooleanObject"}, + "RetentionPeriodHours":{"shape":"RetentionPeriodHours"} + } + }, + "StreamName":{ + "type":"string", + "min":1, + "max":128, + "pattern":"[a-zA-Z0-9_.-]+" + }, + "StreamNameList":{ + "type":"list", + "member":{"shape":"StreamName"} + }, + "StreamStatus":{ + "type":"string", + "enum":[ + "CREATING", + "DELETING", + "ACTIVE", + "UPDATING" + ] + }, + "Tag":{ + "type":"structure", + "required":["Key"], + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{ + "type":"string", + "min":1, + "max":128 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "min":1, + "max":10 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "min":0 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "min":1, + "max":10 + }, + "TagValue":{ + "type":"string", + "min":0, + "max":256 + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,452 @@ +{ + "version": "2.0", + "operations": { + "AddTagsToStream": "

    Adds or updates tags for the specified Amazon Kinesis stream. Each stream can have up to 10 tags.

    If tags have already been assigned to the stream, AddTagsToStream overwrites any existing tags that correspond to the specified tag keys.

    ", + "CreateStream": "

    Creates a Amazon Kinesis stream. A stream captures and transports data records that are continuously emitted from different data sources or producers. Scale-out within an Amazon Kinesis stream is explicitly supported by means of shards, which are uniquely identified groups of data records in an Amazon Kinesis stream.

    You specify and control the number of shards that a stream is composed of. Each shard can support reads up to 5 transactions per second, up to a maximum data read total of 2 MB per second. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second. You can add shards to a stream if the amount of data input increases and you can remove shards if the amount of data input decreases.

    The stream name identifies the stream. The name is scoped to the AWS account used by the application. It is also scoped by region. That is, two streams in two different accounts can have the same name, and two streams in the same account, but in two different regions, can have the same name.

    CreateStream is an asynchronous operation. Upon receiving a CreateStream request, Amazon Kinesis immediately returns and sets the stream status to CREATING. After the stream is created, Amazon Kinesis sets the stream status to ACTIVE. You should perform read and write operations only on an ACTIVE stream.

    You receive a LimitExceededException when making a CreateStream request if you try to do one of the following:

    • Have more than five streams in the CREATING state at any point in time.
    • Create more shards than are authorized for your account.

    For the default shard limit for an AWS account, see Amazon Kinesis Limits. If you need to increase this limit, contact AWS Support.

    You can use DescribeStream to check the stream status, which is returned in StreamStatus.

    CreateStream has a limit of 5 transactions per second per account.

    ", + "DecreaseStreamRetentionPeriod": "

    Decreases the stream's retention period, which is the length of time data records are accessible after they are added to the stream. The minimum value of a stream’s retention period is 24 hours.

    This operation may result in lost data. For example, if the stream's retention period is 48 hours and is decreased to 24 hours, any data already in the stream that is older than 24 hours is inaccessible.

    ", + "DeleteStream": "

    Deletes a stream and all its shards and data. You must shut down any applications that are operating on the stream before you delete the stream. If an application attempts to operate on a deleted stream, it will receive the exception ResourceNotFoundException.

    If the stream is in the ACTIVE state, you can delete it. After a DeleteStream request, the specified stream is in the DELETING state until Amazon Kinesis completes the deletion.

    Note: Amazon Kinesis might continue to accept data read and write operations, such as PutRecord, PutRecords, and GetRecords, on a stream in the DELETING state until the stream deletion is complete.

    When you delete a stream, any shards in that stream are also deleted, and any tags are dissociated from the stream.

    You can use the DescribeStream operation to check the state of the stream, which is returned in StreamStatus.

    DeleteStream has a limit of 5 transactions per second per account.

    ", + "DescribeStream": "

    Describes the specified stream.

    The information about the stream includes its current status, its Amazon Resource Name (ARN), and an array of shard objects. For each shard object, there is information about the hash key and sequence number ranges that the shard spans, and the IDs of any earlier shards that played in a role in creating the shard. A sequence number is the identifier associated with every record ingested in the Amazon Kinesis stream. The sequence number is assigned when a record is put into the stream.

    You can limit the number of returned shards using the Limit parameter. The number of shards in a stream may be too large to return from a single call to DescribeStream. You can detect this by using the HasMoreShards flag in the returned output. HasMoreShards is set to true when there is more data available.

    DescribeStream is a paginated operation. If there are more shards available, you can request them using the shard ID of the last shard returned. Specify this ID in the ExclusiveStartShardId parameter in a subsequent request to DescribeStream.

    DescribeStream has a limit of 10 transactions per second per account.

    ", + "GetRecords": "

    Gets data records from a shard.

    Specify a shard iterator using the ShardIterator parameter. The shard iterator specifies the position in the shard from which you want to start reading data records sequentially. If there are no records available in the portion of the shard that the iterator points to, GetRecords returns an empty list. Note that it might take multiple calls to get to a portion of the shard that contains records.

    You can scale by provisioning multiple shards. Your application should have one thread per shard, each reading continuously from its stream. To read from a stream continually, call GetRecords in a loop. Use GetShardIterator to get the shard iterator to specify in the first GetRecords call. GetRecords returns a new shard iterator in NextShardIterator. Specify the shard iterator returned in NextShardIterator in subsequent calls to GetRecords. Note that if the shard has been closed, the shard iterator can't return more data and GetRecords returns null in NextShardIterator. You can terminate the loop when the shard is closed, or when the shard iterator reaches the record with the sequence number or other attribute that marks it as the last record to process.

    Each data record can be up to 1 MB in size, and each shard can read up to 2 MB per second. You can ensure that your calls don't exceed the maximum supported size or throughput by using the Limit parameter to specify the maximum number of records that GetRecords can return. Consider your average record size when determining this limit.

    The size of the data returned by GetRecords will vary depending on the utilization of the shard. The maximum size of data that GetRecords can return is 10 MB. If a call returns this amount of data, subsequent calls made within the next 5 seconds throw ProvisionedThroughputExceededException. If there is insufficient provisioned throughput on the shard, subsequent calls made within the next 1 second throw ProvisionedThroughputExceededException. Note that GetRecords won't return any data when it throws an exception. For this reason, we recommend that you wait one second between calls to GetRecords; however, it's possible that the application will get exceptions for longer than 1 second.

    To detect whether the application is falling behind in processing, you can use the MillisBehindLatest response attribute. You can also monitor the stream using CloudWatch metrics (see Monitoring Amazon Kinesis in the Amazon Kinesis Developer Guide).

    Each Amazon Kinesis record includes a value, ApproximateArrivalTimestamp, that is set when an Amazon Kinesis stream successfully receives and stores a record. This is commonly referred to as a server-side timestamp, which is different than a client-side timestamp, where the timestamp is set when a data producer creates or sends the record to a stream. The timestamp has millisecond precision. There are no guarantees about the timestamp accuracy, or that the timestamp is always increasing. For example, records in a shard or across a stream might have timestamps that are out of order.

    ", + "GetShardIterator": "

    Gets a shard iterator. A shard iterator expires five minutes after it is returned to the requester.

    A shard iterator specifies the position in the shard from which to start reading data records sequentially. A shard iterator specifies this position using the sequence number of a data record in a shard. A sequence number is the identifier associated with every record ingested in the Amazon Kinesis stream. The sequence number is assigned when a record is put into the stream.

    You must specify the shard iterator type. For example, you can set the ShardIteratorType parameter to read exactly from the position denoted by a specific sequence number by using the AT_SEQUENCE_NUMBER shard iterator type, or right after the sequence number by using the AFTER_SEQUENCE_NUMBER shard iterator type, using sequence numbers returned by earlier calls to PutRecord, PutRecords, GetRecords, or DescribeStream. You can specify the shard iterator type TRIM_HORIZON in the request to cause ShardIterator to point to the last untrimmed record in the shard in the system, which is the oldest data record in the shard. Or you can point to just after the most recent record in the shard, by using the shard iterator type LATEST, so that you always read the most recent data in the shard.

    When you repeatedly read from an Amazon Kinesis stream use a GetShardIterator request to get the first shard iterator for use in your first GetRecords request and then use the shard iterator returned by the GetRecords request in NextShardIterator for subsequent reads. A new shard iterator is returned by every GetRecords request in NextShardIterator, which you use in the ShardIterator parameter of the next GetRecords request.

    If a GetShardIterator request is made too often, you receive a ProvisionedThroughputExceededException. For more information about throughput limits, see GetRecords.

    If the shard is closed, the iterator can't return more data, and GetShardIterator returns null for its ShardIterator. A shard can be closed using SplitShard or MergeShards.

    GetShardIterator has a limit of 5 transactions per second per account per open shard.

    ", + "IncreaseStreamRetentionPeriod": "

    Increases the stream's retention period, which is the length of time data records are accessible after they are added to the stream. The maximum value of a stream’s retention period is 168 hours (7 days).

    Upon choosing a longer stream retention period, this operation will increase the time period records are accessible that have not yet expired. However, it will not make previous data that has expired (older than the stream’s previous retention period) accessible after the operation has been called. For example, if a stream’s retention period is set to 24 hours and is increased to 168 hours, any data that is older than 24 hours will remain inaccessible to consumer applications.

    ", + "ListStreams": "

    Lists your streams.

    The number of streams may be too large to return from a single call to ListStreams. You can limit the number of returned streams using the Limit parameter. If you do not specify a value for the Limit parameter, Amazon Kinesis uses the default limit, which is currently 10.

    You can detect if there are more streams available to list by using the HasMoreStreams flag from the returned output. If there are more streams available, you can request more streams by using the name of the last stream returned by the ListStreams request in the ExclusiveStartStreamName parameter in a subsequent request to ListStreams. The group of stream names returned by the subsequent request is then added to the list. You can continue this process until all the stream names have been collected in the list.

    ListStreams has a limit of 5 transactions per second per account.

    ", + "ListTagsForStream": "

    Lists the tags for the specified Amazon Kinesis stream.

    ", + "MergeShards": "

    Merges two adjacent shards in a stream and combines them into a single shard to reduce the stream's capacity to ingest and transport data. Two shards are considered adjacent if the union of the hash key ranges for the two shards form a contiguous set with no gaps. For example, if you have two shards, one with a hash key range of 276...381 and the other with a hash key range of 382...454, then you could merge these two shards into a single shard that would have a hash key range of 276...454. After the merge, the single child shard receives data for all hash key values covered by the two parent shards.

    MergeShards is called when there is a need to reduce the overall capacity of a stream because of excess capacity that is not being used. You must specify the shard to be merged and the adjacent shard for a stream. For more information about merging shards, see Merge Two Shards in the Amazon Kinesis Developer Guide.

    If the stream is in the ACTIVE state, you can call MergeShards. If a stream is in the CREATING, UPDATING, or DELETING state, MergeShards returns a ResourceInUseException. If the specified stream does not exist, MergeShards returns a ResourceNotFoundException.

    You can use DescribeStream to check the state of the stream, which is returned in StreamStatus.

    MergeShards is an asynchronous operation. Upon receiving a MergeShards request, Amazon Kinesis immediately returns a response and sets the StreamStatus to UPDATING. After the operation is completed, Amazon Kinesis sets the StreamStatus to ACTIVE. Read and write operations continue to work while the stream is in the UPDATING state.

    You use DescribeStream to determine the shard IDs that are specified in the MergeShards request.

    If you try to operate on too many streams in parallel using CreateStream, DeleteStream, MergeShards or SplitShard, you will receive a LimitExceededException.

    MergeShards has limit of 5 transactions per second per account.

    ", + "PutRecord": "

    Writes a single data record from a producer into an Amazon Kinesis stream. Call PutRecord to send data from the producer into the Amazon Kinesis stream for real-time ingestion and subsequent processing, one record at a time. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second.

    You must specify the name of the stream that captures, stores, and transports the data; a partition key; and the data blob itself.

    The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on.

    The partition key is used by Amazon Kinesis to distribute data across shards. Amazon Kinesis segregates the data records that belong to a data stream into multiple shards, using the partition key associated with each data record to determine which shard a given data record belongs to.

    Partition keys are Unicode strings, with a maximum length limit of 256 characters for each key. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards using the hash key ranges of the shards. You can override hashing the partition key to determine the shard by explicitly specifying a hash value using the ExplicitHashKey parameter. For more information, see Adding Data to a Stream in the Amazon Kinesis Developer Guide.

    PutRecord returns the shard ID of where the data record was placed and the sequence number that was assigned to the data record.

    Sequence numbers generally increase over time. To guarantee strictly increasing ordering, use the SequenceNumberForOrdering parameter. For more information, see Adding Data to a Stream in the Amazon Kinesis Developer Guide.

    If a PutRecord request cannot be processed because of insufficient provisioned throughput on the shard involved in the request, PutRecord throws ProvisionedThroughputExceededException.

    By default, data records are accessible for only 24 hours from the time that they are added to an Amazon Kinesis stream. This retention period can be modified using the DecreaseStreamRetentionPeriod and IncreaseStreamRetentionPeriod operations.

    ", + "PutRecords": "

    Writes multiple data records from a producer into an Amazon Kinesis stream in a single call (also referred to as a PutRecords request). Use this operation to send data from a data producer into the Amazon Kinesis stream for data ingestion and processing.

    Each PutRecords request can support up to 500 records. Each record in the request can be as large as 1 MB, up to a limit of 5 MB for the entire request, including partition keys. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second.

    You must specify the name of the stream that captures, stores, and transports the data; and an array of request Records, with each record in the array requiring a partition key and data blob. The record size limit applies to the total size of the partition key and data blob.

    The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on.

    The partition key is used by Amazon Kinesis as input to a hash function that maps the partition key and associated data to a specific shard. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream. For more information, see Adding Data to a Stream in the Amazon Kinesis Developer Guide.

    Each record in the Records array may include an optional parameter, ExplicitHashKey, which overrides the partition key to shard mapping. This parameter allows a data producer to determine explicitly the shard where the record is stored. For more information, see Adding Multiple Records with PutRecords in the Amazon Kinesis Developer Guide.

    The PutRecords response includes an array of response Records. Each record in the response array directly correlates with a record in the request array using natural ordering, from the top to the bottom of the request and response. The response Records array always includes the same number of records as the request array.

    The response Records array includes both successfully and unsuccessfully processed records. Amazon Kinesis attempts to process all records in each PutRecords request. A single record failure does not stop the processing of subsequent records.

    A successfully-processed record includes ShardId and SequenceNumber values. The ShardId parameter identifies the shard in the stream where the record is stored. The SequenceNumber parameter is an identifier assigned to the put record, unique to all records in the stream.

    An unsuccessfully-processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error and can be one of the following values: ProvisionedThroughputExceededException or InternalFailure. ErrorMessage provides more detailed information about the ProvisionedThroughputExceededException exception including the account ID, stream name, and shard ID of the record that was throttled. For more information about partially successful responses, see Adding Multiple Records with PutRecords in the Amazon Kinesis Developer Guide.

    By default, data records are accessible for only 24 hours from the time that they are added to an Amazon Kinesis stream. This retention period can be modified using the DecreaseStreamRetentionPeriod and IncreaseStreamRetentionPeriod operations.

    ", + "RemoveTagsFromStream": "

    Deletes tags from the specified Amazon Kinesis stream.

    If you specify a tag that does not exist, it is ignored.

    ", + "SplitShard": "

    Splits a shard into two new shards in the stream, to increase the stream's capacity to ingest and transport data. SplitShard is called when there is a need to increase the overall capacity of stream because of an expected increase in the volume of data records being ingested.

    You can also use SplitShard when a shard appears to be approaching its maximum utilization, for example, when the set of producers sending data into the specific shard are suddenly sending more than previously anticipated. You can also call SplitShard to increase stream capacity, so that more Amazon Kinesis applications can simultaneously read data from the stream for real-time processing.

    You must specify the shard to be split and the new hash key, which is the position in the shard where the shard gets split in two. In many cases, the new hash key might simply be the average of the beginning and ending hash key, but it can be any hash key value in the range being mapped into the shard. For more information about splitting shards, see Split a Shard in the Amazon Kinesis Developer Guide.

    You can use DescribeStream to determine the shard ID and hash key values for the ShardToSplit and NewStartingHashKey parameters that are specified in the SplitShard request.

    SplitShard is an asynchronous operation. Upon receiving a SplitShard request, Amazon Kinesis immediately returns a response and sets the stream status to UPDATING. After the operation is completed, Amazon Kinesis sets the stream status to ACTIVE. Read and write operations continue to work while the stream is in the UPDATING state.

    You can use DescribeStream to check the status of the stream, which is returned in StreamStatus. If the stream is in the ACTIVE state, you can call SplitShard. If a stream is in CREATING or UPDATING or DELETING states, DescribeStream returns a ResourceInUseException.

    If the specified stream does not exist, DescribeStream returns a ResourceNotFoundException. If you try to create more shards than are authorized for your account, you receive a LimitExceededException.

    For the default shard limit for an AWS account, see Amazon Kinesis Limits. If you need to increase this limit, contact AWS Support.

    If you try to operate on too many streams in parallel using CreateStream, DeleteStream, MergeShards or SplitShard, you receive a LimitExceededException.

    SplitShard has limit of 5 transactions per second per account.

    " + }, + "service": "Amazon Kinesis Service API Reference

    Amazon Kinesis is a managed service that scales elastically for real time processing of streaming big data.

    ", + "shapes": { + "AddTagsToStreamInput": { + "base": "

    Represents the input for AddTagsToStream.

    ", + "refs": { + } + }, + "ApproximateArrivalTimestamp": { + "base": null, + "refs": { + "Record$ApproximateArrivalTimestamp": "

    The approximate time that the record was inserted into the stream.

    " + } + }, + "BooleanObject": { + "base": null, + "refs": { + "ListStreamsOutput$HasMoreStreams": "

    If set to true, there are more streams available to list.

    ", + "ListTagsForStreamOutput$HasMoreTags": "

    If set to true, more tags are available. To request additional tags, set ExclusiveStartTagKey to the key of the last tag returned.

    ", + "StreamDescription$HasMoreShards": "

    If set to true, more shards in the stream are available to describe.

    " + } + }, + "CreateStreamInput": { + "base": "

    Represents the input for CreateStream.

    ", + "refs": { + } + }, + "Data": { + "base": null, + "refs": { + "PutRecordInput$Data": "

    The data blob to put into the record, which is base64-encoded when the blob is serialized. When the data blob (the payload before base64-encoding) is added to the partition key size, the total size must not exceed the maximum record size (1 MB).

    ", + "PutRecordsRequestEntry$Data": "

    The data blob to put into the record, which is base64-encoded when the blob is serialized. When the data blob (the payload before base64-encoding) is added to the partition key size, the total size must not exceed the maximum record size (1 MB).

    ", + "Record$Data": "

    The data blob. The data in the blob is both opaque and immutable to the Amazon Kinesis service, which does not inspect, interpret, or change the data in the blob in any way. When the data blob (the payload before base64-encoding) is added to the partition key size, the total size must not exceed the maximum record size (1 MB).

    " + } + }, + "DecreaseStreamRetentionPeriodInput": { + "base": "

    Represents the input for DecreaseStreamRetentionPeriod.

    ", + "refs": { + } + }, + "DeleteStreamInput": { + "base": "

    Represents the input for DeleteStream.

    ", + "refs": { + } + }, + "DescribeStreamInput": { + "base": "

    Represents the input for DescribeStream.

    ", + "refs": { + } + }, + "DescribeStreamInputLimit": { + "base": null, + "refs": { + "DescribeStreamInput$Limit": "

    The maximum number of shards to return.

    " + } + }, + "DescribeStreamOutput": { + "base": "

    Represents the output for DescribeStream.

    ", + "refs": { + } + }, + "ErrorCode": { + "base": null, + "refs": { + "PutRecordsResultEntry$ErrorCode": "

    The error code for an individual record result. ErrorCodes can be either ProvisionedThroughputExceededException or InternalFailure.

    " + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "ExpiredIteratorException$message": "

    A message that provides information about the error.

    ", + "InvalidArgumentException$message": "

    A message that provides information about the error.

    ", + "LimitExceededException$message": "

    A message that provides information about the error.

    ", + "ProvisionedThroughputExceededException$message": "

    A message that provides information about the error.

    ", + "PutRecordsResultEntry$ErrorMessage": "

    The error message for an individual record result. An ErrorCode value of ProvisionedThroughputExceededException has an error message that includes the account ID, stream name, and shard ID. An ErrorCode value of InternalFailure has the error message \"Internal Service Failure\".

    ", + "ResourceInUseException$message": "

    A message that provides information about the error.

    ", + "ResourceNotFoundException$message": "

    A message that provides information about the error.

    " + } + }, + "ExpiredIteratorException": { + "base": "

    The provided iterator exceeds the maximum age allowed.

    ", + "refs": { + } + }, + "GetRecordsInput": { + "base": "

    Represents the input for GetRecords.

    ", + "refs": { + } + }, + "GetRecordsInputLimit": { + "base": null, + "refs": { + "GetRecordsInput$Limit": "

    The maximum number of records to return. Specify a value of up to 10,000. If you specify a value that is greater than 10,000, GetRecords throws InvalidArgumentException.

    " + } + }, + "GetRecordsOutput": { + "base": "

    Represents the output for GetRecords.

    ", + "refs": { + } + }, + "GetShardIteratorInput": { + "base": "

    Represents the input for GetShardIterator.

    ", + "refs": { + } + }, + "GetShardIteratorOutput": { + "base": "

    Represents the output for GetShardIterator.

    ", + "refs": { + } + }, + "HashKey": { + "base": null, + "refs": { + "HashKeyRange$StartingHashKey": "

    The starting hash key of the hash key range.

    ", + "HashKeyRange$EndingHashKey": "

    The ending hash key of the hash key range.

    ", + "PutRecordInput$ExplicitHashKey": "

    The hash value used to explicitly determine the shard the data record is assigned to by overriding the partition key hash.

    ", + "PutRecordsRequestEntry$ExplicitHashKey": "

    The hash value used to determine explicitly the shard that the data record is assigned to by overriding the partition key hash.

    ", + "SplitShardInput$NewStartingHashKey": "

    A hash key value for the starting hash key of one of the child shards created by the split. The hash key range for a given shard constitutes a set of ordered contiguous positive integers. The value for NewStartingHashKey must be in the range of hash keys being mapped into the shard. The NewStartingHashKey hash key value and all higher hash key values in hash key range are distributed to one of the child shards. All the lower hash key values in the range are distributed to the other child shard.

    " + } + }, + "HashKeyRange": { + "base": "

    The range of possible hash key values for the shard, which is a set of ordered contiguous positive integers.

    ", + "refs": { + "Shard$HashKeyRange": "

    The range of possible hash key values for the shard, which is a set of ordered contiguous positive integers.

    " + } + }, + "IncreaseStreamRetentionPeriodInput": { + "base": "

    Represents the input for IncreaseStreamRetentionPeriod.

    ", + "refs": { + } + }, + "InvalidArgumentException": { + "base": "

    A specified parameter exceeds its restrictions, is not supported, or can't be used. For more information, see the returned message.

    ", + "refs": { + } + }, + "LimitExceededException": { + "base": "

    The requested resource exceeds the maximum number allowed, or the number of concurrent stream requests exceeds the maximum number allowed (5).

    ", + "refs": { + } + }, + "ListStreamsInput": { + "base": "

    Represents the input for ListStreams.

    ", + "refs": { + } + }, + "ListStreamsInputLimit": { + "base": null, + "refs": { + "ListStreamsInput$Limit": "

    The maximum number of streams to list.

    " + } + }, + "ListStreamsOutput": { + "base": "

    Represents the output for ListStreams.

    ", + "refs": { + } + }, + "ListTagsForStreamInput": { + "base": "

    Represents the input for ListTagsForStream.

    ", + "refs": { + } + }, + "ListTagsForStreamInputLimit": { + "base": null, + "refs": { + "ListTagsForStreamInput$Limit": "

    The number of tags to return. If this number is less than the total number of tags associated with the stream, HasMoreTags is set to true. To list additional tags, set ExclusiveStartTagKey to the last key in the response.

    " + } + }, + "ListTagsForStreamOutput": { + "base": "

    Represents the output for ListTagsForStream.

    ", + "refs": { + } + }, + "MergeShardsInput": { + "base": "

    Represents the input for MergeShards.

    ", + "refs": { + } + }, + "MillisBehindLatest": { + "base": null, + "refs": { + "GetRecordsOutput$MillisBehindLatest": "

    The number of milliseconds the GetRecords response is from the tip of the stream, indicating how far behind current time the consumer is. A value of zero indicates record processing is caught up, and there are no new records to process at this moment.

    " + } + }, + "PartitionKey": { + "base": null, + "refs": { + "PutRecordInput$PartitionKey": "

    Determines which shard in the stream the data record is assigned to. Partition keys are Unicode strings with a maximum length limit of 256 characters for each key. Amazon Kinesis uses the partition key as input to a hash function that maps the partition key and associated data to a specific shard. Specifically, an MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key will map to the same shard within the stream.

    ", + "PutRecordsRequestEntry$PartitionKey": "

    Determines which shard in the stream the data record is assigned to. Partition keys are Unicode strings with a maximum length limit of 256 characters for each key. Amazon Kinesis uses the partition key as input to a hash function that maps the partition key and associated data to a specific shard. Specifically, an MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream.

    ", + "Record$PartitionKey": "

    Identifies which shard in the stream the data record is assigned to.

    " + } + }, + "PositiveIntegerObject": { + "base": null, + "refs": { + "CreateStreamInput$ShardCount": "

    The number of shards that the stream will use. The throughput of the stream is a function of the number of shards; more shards are required for greater provisioned throughput.

    DefaultShardLimit;

    ", + "PutRecordsOutput$FailedRecordCount": "

    The number of unsuccessfully processed records in a PutRecords request.

    " + } + }, + "ProvisionedThroughputExceededException": { + "base": "

    The request rate is too high, or the requested data is too large for the available throughput. Reduce the frequency or size of your requests. For more information, see Error Retries and Exponential Backoff in AWS in the AWS General Reference.

    ", + "refs": { + } + }, + "PutRecordInput": { + "base": "

    Represents the input for PutRecord.

    ", + "refs": { + } + }, + "PutRecordOutput": { + "base": "

    Represents the output for PutRecord.

    ", + "refs": { + } + }, + "PutRecordsInput": { + "base": "

    A PutRecords request.

    ", + "refs": { + } + }, + "PutRecordsOutput": { + "base": "

    PutRecords results.

    ", + "refs": { + } + }, + "PutRecordsRequestEntry": { + "base": "

    Represents the output for PutRecords.

    ", + "refs": { + "PutRecordsRequestEntryList$member": null + } + }, + "PutRecordsRequestEntryList": { + "base": null, + "refs": { + "PutRecordsInput$Records": "

    The records associated with the request.

    " + } + }, + "PutRecordsResultEntry": { + "base": "

    Represents the result of an individual record from a PutRecords request. A record that is successfully added to your Amazon Kinesis stream includes SequenceNumber and ShardId in the result. A record that fails to be added to your Amazon Kinesis stream includes ErrorCode and ErrorMessage in the result.

    ", + "refs": { + "PutRecordsResultEntryList$member": null + } + }, + "PutRecordsResultEntryList": { + "base": null, + "refs": { + "PutRecordsOutput$Records": "

    An array of successfully and unsuccessfully processed record results, correlated with the request by natural ordering. A record that is successfully added to your Amazon Kinesis stream includes SequenceNumber and ShardId in the result. A record that fails to be added to your Amazon Kinesis stream includes ErrorCode and ErrorMessage in the result.

    " + } + }, + "Record": { + "base": "

    The unit of data of the Amazon Kinesis stream, which is composed of a sequence number, a partition key, and a data blob.

    ", + "refs": { + "RecordList$member": null + } + }, + "RecordList": { + "base": null, + "refs": { + "GetRecordsOutput$Records": "

    The data records retrieved from the shard.

    " + } + }, + "RemoveTagsFromStreamInput": { + "base": "

    Represents the input for RemoveTagsFromStream.

    ", + "refs": { + } + }, + "ResourceInUseException": { + "base": "

    The resource is not available for this operation. For successful operation, the resource needs to be in the ACTIVE state.

    ", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "

    The requested resource could not be found. The stream might not be specified correctly, or it might not be in the ACTIVE state if the operation requires it.

    ", + "refs": { + } + }, + "RetentionPeriodHours": { + "base": null, + "refs": { + "DecreaseStreamRetentionPeriodInput$RetentionPeriodHours": "

    The new retention period of the stream, in hours. Must be less than the current retention period.

    ", + "IncreaseStreamRetentionPeriodInput$RetentionPeriodHours": "

    The new retention period of the stream, in hours. Must be more than the current retention period.

    ", + "StreamDescription$RetentionPeriodHours": "

    The current retention period, in hours.

    " + } + }, + "SequenceNumber": { + "base": null, + "refs": { + "GetShardIteratorInput$StartingSequenceNumber": "

    The sequence number of the data record in the shard from which to start reading from.

    ", + "PutRecordInput$SequenceNumberForOrdering": "

    Guarantees strictly increasing sequence numbers, for puts from the same client and to the same partition key. Usage: set the SequenceNumberForOrdering of record n to the sequence number of record n-1 (as returned in the result when putting record n-1). If this parameter is not set, records will be coarsely ordered based on arrival time.

    ", + "PutRecordOutput$SequenceNumber": "

    The sequence number identifier that was assigned to the put data record. The sequence number for the record is unique across all records in the stream. A sequence number is the identifier associated with every record put into the stream.

    ", + "PutRecordsResultEntry$SequenceNumber": "

    The sequence number for an individual record result.

    ", + "Record$SequenceNumber": "

    The unique identifier of the record in the stream.

    ", + "SequenceNumberRange$StartingSequenceNumber": "

    The starting sequence number for the range.

    ", + "SequenceNumberRange$EndingSequenceNumber": "

    The ending sequence number for the range. Shards that are in the OPEN state have an ending sequence number of null.

    " + } + }, + "SequenceNumberRange": { + "base": "

    The range of possible sequence numbers for the shard.

    ", + "refs": { + "Shard$SequenceNumberRange": "

    The range of possible sequence numbers for the shard.

    " + } + }, + "Shard": { + "base": "

    A uniquely identified group of data records in an Amazon Kinesis stream.

    ", + "refs": { + "ShardList$member": null + } + }, + "ShardId": { + "base": null, + "refs": { + "DescribeStreamInput$ExclusiveStartShardId": "

    The shard ID of the shard to start with.

    ", + "GetShardIteratorInput$ShardId": "

    The shard ID of the shard to get the iterator for.

    ", + "MergeShardsInput$ShardToMerge": "

    The shard ID of the shard to combine with the adjacent shard for the merge.

    ", + "MergeShardsInput$AdjacentShardToMerge": "

    The shard ID of the adjacent shard for the merge.

    ", + "PutRecordOutput$ShardId": "

    The shard ID of the shard where the data record was placed.

    ", + "PutRecordsResultEntry$ShardId": "

    The shard ID for an individual record result.

    ", + "Shard$ShardId": "

    The unique identifier of the shard within the Amazon Kinesis stream.

    ", + "Shard$ParentShardId": "

    The shard Id of the shard's parent.

    ", + "Shard$AdjacentParentShardId": "

    The shard Id of the shard adjacent to the shard's parent.

    ", + "SplitShardInput$ShardToSplit": "

    The shard ID of the shard to split.

    " + } + }, + "ShardIterator": { + "base": null, + "refs": { + "GetRecordsInput$ShardIterator": "

    The position in the shard from which you want to start sequentially reading data records. A shard iterator specifies this position using the sequence number of a data record in the shard.

    ", + "GetRecordsOutput$NextShardIterator": "

    The next position in the shard from which to start sequentially reading data records. If set to null, the shard has been closed and the requested iterator will not return any more data.

    ", + "GetShardIteratorOutput$ShardIterator": "

    The position in the shard from which to start reading data records sequentially. A shard iterator specifies this position using the sequence number of a data record in a shard.

    " + } + }, + "ShardIteratorType": { + "base": null, + "refs": { + "GetShardIteratorInput$ShardIteratorType": "

    Determines how the shard iterator is used to start reading data records from the shard.

    The following are the valid shard iterator types:

    • AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted by a specific sequence number.
    • AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted by a specific sequence number.
    • TRIM_HORIZON - Start reading at the last untrimmed record in the shard in the system, which is the oldest data record in the shard.
    • LATEST - Start reading just after the most recent record in the shard, so that you always read the most recent data in the shard.
    " + } + }, + "ShardList": { + "base": null, + "refs": { + "StreamDescription$Shards": "

    The shards that comprise the stream.

    " + } + }, + "SplitShardInput": { + "base": "

    Represents the input for SplitShard.

    ", + "refs": { + } + }, + "StreamARN": { + "base": null, + "refs": { + "StreamDescription$StreamARN": "

    The Amazon Resource Name (ARN) for the stream being described.

    " + } + }, + "StreamDescription": { + "base": "

    Represents the output for DescribeStream.

    ", + "refs": { + "DescribeStreamOutput$StreamDescription": "

    The current status of the stream, the stream ARN, an array of shard objects that comprise the stream, and states whether there are more shards available.

    " + } + }, + "StreamName": { + "base": null, + "refs": { + "AddTagsToStreamInput$StreamName": "

    The name of the stream.

    ", + "CreateStreamInput$StreamName": "

    A name to identify the stream. The stream name is scoped to the AWS account used by the application that creates the stream. It is also scoped by region. That is, two streams in two different AWS accounts can have the same name, and two streams in the same AWS account, but in two different regions, can have the same name.

    ", + "DecreaseStreamRetentionPeriodInput$StreamName": "

    The name of the stream to modify.

    ", + "DeleteStreamInput$StreamName": "

    The name of the stream to delete.

    ", + "DescribeStreamInput$StreamName": "

    The name of the stream to describe.

    ", + "GetShardIteratorInput$StreamName": "

    The name of the stream.

    ", + "IncreaseStreamRetentionPeriodInput$StreamName": "

    The name of the stream to modify.

    ", + "ListStreamsInput$ExclusiveStartStreamName": "

    The name of the stream to start the list with.

    ", + "ListTagsForStreamInput$StreamName": "

    The name of the stream.

    ", + "MergeShardsInput$StreamName": "

    The name of the stream for the merge.

    ", + "PutRecordInput$StreamName": "

    The name of the stream to put the data record into.

    ", + "PutRecordsInput$StreamName": "

    The stream name associated with the request.

    ", + "RemoveTagsFromStreamInput$StreamName": "

    The name of the stream.

    ", + "SplitShardInput$StreamName": "

    The name of the stream for the shard split.

    ", + "StreamDescription$StreamName": "

    The name of the stream being described.

    ", + "StreamNameList$member": null + } + }, + "StreamNameList": { + "base": null, + "refs": { + "ListStreamsOutput$StreamNames": "

    The names of the streams that are associated with the AWS account making the ListStreams request.

    " + } + }, + "StreamStatus": { + "base": null, + "refs": { + "StreamDescription$StreamStatus": "

    The current status of the stream being described.

    The stream status is one of the following states:

    • CREATING - The stream is being created. Amazon Kinesis immediately returns and sets StreamStatus to CREATING.
    • DELETING - The stream is being deleted. The specified stream is in the DELETING state until Amazon Kinesis completes the deletion.
    • ACTIVE - The stream exists and is ready for read and write operations or deletion. You should perform read and write operations only on an ACTIVE stream.
    • UPDATING - Shards in the stream are being merged or split. Read and write operations continue to work while the stream is in the UPDATING state.
    " + } + }, + "Tag": { + "base": "

    Metadata assigned to the stream, consisting of a key-value pair.

    ", + "refs": { + "TagList$member": null + } + }, + "TagKey": { + "base": null, + "refs": { + "ListTagsForStreamInput$ExclusiveStartTagKey": "

    The key to use as the starting point for the list of tags. If this parameter is set, ListTagsForStream gets all tags that occur after ExclusiveStartTagKey.

    ", + "Tag$Key": "

    A unique identifier for the tag. Maximum length: 128 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @

    ", + "TagKeyList$member": null, + "TagMap$key": null + } + }, + "TagKeyList": { + "base": null, + "refs": { + "RemoveTagsFromStreamInput$TagKeys": "

    A list of tag keys. Each corresponding tag is removed from the stream.

    " + } + }, + "TagList": { + "base": null, + "refs": { + "ListTagsForStreamOutput$Tags": "

    A list of tags associated with StreamName, starting with the first tag after ExclusiveStartTagKey and up to the specified Limit.

    " + } + }, + "TagMap": { + "base": null, + "refs": { + "AddTagsToStreamInput$Tags": "

    The set of key-value pairs to use to create the tags.

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": "

    An optional string, typically used to describe or define the tag. Maximum length: 256 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @

    ", + "TagMap$value": null + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,18 @@ +{ + "pagination": { + "DescribeStream": { + "input_token": "ExclusiveStartShardId", + "limit_key": "Limit", + "more_results": "StreamDescription.HasMoreShards", + "output_token": "StreamDescription.Shards[-1].ShardId", + "result_key": "StreamDescription.Shards" + }, + "ListStreams": { + "input_token": "ExclusiveStartStreamName", + "limit_key": "Limit", + "more_results": "HasMoreStreams", + "output_token": "StreamNames[-1]", + "result_key": "StreamNames" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/waiters-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/waiters-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/waiters-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/kinesis/2013-12-02/waiters-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,18 @@ +{ + "version": 2, + "waiters": { + "StreamExists": { + "delay": 10, + "operation": "DescribeStream", + "maxAttempts": 18, + "acceptors": [ + { + "expected": "ACTIVE", + "matcher": "path", + "state": "success", + "argument": "StreamDescription.StreamStatus" + } + ] + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2589 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-11-01", + "endpointPrefix":"kms", + "jsonVersion":"1.1", + "serviceAbbreviation":"KMS", + "serviceFullName":"AWS Key Management Service", + "signatureVersion":"v4", + "targetPrefix":"TrentService", + "protocol":"json" + }, + "operations":{ + "CancelKeyDeletion":{ + "name":"CancelKeyDeletion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelKeyDeletionRequest"}, + "output":{"shape":"CancelKeyDeletionResponse"}, + "errors":[ + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidArnException", + "error":{ + "code":"InvalidArn", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"KMSInvalidStateException", + "error":{ + "code":"KMSInvalidStateException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateAlias":{ + "name":"CreateAlias", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAliasRequest"}, + "errors":[ + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"AlreadyExistsException", + "error":{ + "code":"AlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidAliasNameException", + "error":{ + "code":"InvalidAliasName", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"KMSInvalidStateException", + "error":{ + "code":"KMSInvalidStateException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateGrant":{ + "name":"CreateGrant", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateGrantRequest"}, + "output":{"shape":"CreateGrantResponse"}, + "errors":[ + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DisabledException", + "error":{ + "code":"Disabled", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"InvalidArnException", + "error":{ + "code":"InvalidArn", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"InvalidGrantTokenException", + "error":{ + "code":"InvalidGrantToken", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"KMSInvalidStateException", + "error":{ + "code":"KMSInvalidStateException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateKey":{ + "name":"CreateKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateKeyRequest"}, + "output":{"shape":"CreateKeyResponse"}, + "errors":[ + { + "shape":"MalformedPolicyDocumentException", + "error":{ + "code":"MalformedPolicyDocument", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"InvalidArnException", + "error":{ + "code":"InvalidArn", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"UnsupportedOperationException", + "error":{ + "code":"UnsupportedOperation", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "Decrypt":{ + "name":"Decrypt", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DecryptRequest"}, + "output":{"shape":"DecryptResponse"}, + "errors":[ + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DisabledException", + "error":{ + "code":"Disabled", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidCiphertextException", + "error":{ + "code":"InvalidCiphertext", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"KeyUnavailableException", + "error":{ + "code":"KeyUnavailable", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"InvalidGrantTokenException", + "error":{ + "code":"InvalidGrantToken", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"KMSInvalidStateException", + "error":{ + "code":"KMSInvalidStateException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteAlias":{ + "name":"DeleteAlias", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAliasRequest"}, + "errors":[ + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"KMSInvalidStateException", + "error":{ + "code":"KMSInvalidStateException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeKey":{ + "name":"DescribeKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeKeyRequest"}, + "output":{"shape":"DescribeKeyResponse"}, + "errors":[ + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidArnException", + "error":{ + "code":"InvalidArn", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + } + ] + }, + "DisableKey":{ + "name":"DisableKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableKeyRequest"}, + "errors":[ + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidArnException", + "error":{ + "code":"InvalidArn", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"KMSInvalidStateException", + "error":{ + "code":"KMSInvalidStateException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DisableKeyRotation":{ + "name":"DisableKeyRotation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableKeyRotationRequest"}, + "errors":[ + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DisabledException", + "error":{ + "code":"Disabled", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidArnException", + "error":{ + "code":"InvalidArn", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"KMSInvalidStateException", + "error":{ + "code":"KMSInvalidStateException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "EnableKey":{ + "name":"EnableKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableKeyRequest"}, + "errors":[ + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidArnException", + "error":{ + "code":"InvalidArn", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"KMSInvalidStateException", + "error":{ + "code":"KMSInvalidStateException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "EnableKeyRotation":{ + "name":"EnableKeyRotation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableKeyRotationRequest"}, + "errors":[ + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DisabledException", + "error":{ + "code":"Disabled", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidArnException", + "error":{ + "code":"InvalidArn", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"KMSInvalidStateException", + "error":{ + "code":"KMSInvalidStateException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "Encrypt":{ + "name":"Encrypt", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EncryptRequest"}, + "output":{"shape":"EncryptResponse"}, + "errors":[ + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DisabledException", + "error":{ + "code":"Disabled", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"KeyUnavailableException", + "error":{ + "code":"KeyUnavailable", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"InvalidKeyUsageException", + "error":{ + "code":"InvalidKeyUsage", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidGrantTokenException", + "error":{ + "code":"InvalidGrantToken", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"KMSInvalidStateException", + "error":{ + "code":"KMSInvalidStateException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "GenerateDataKey":{ + "name":"GenerateDataKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GenerateDataKeyRequest"}, + "output":{"shape":"GenerateDataKeyResponse"}, + "errors":[ + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DisabledException", + "error":{ + "code":"Disabled", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"KeyUnavailableException", + "error":{ + "code":"KeyUnavailable", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"InvalidKeyUsageException", + "error":{ + "code":"InvalidKeyUsage", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidGrantTokenException", + "error":{ + "code":"InvalidGrantToken", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"KMSInvalidStateException", + "error":{ + "code":"KMSInvalidStateException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "GenerateDataKeyWithoutPlaintext":{ + "name":"GenerateDataKeyWithoutPlaintext", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GenerateDataKeyWithoutPlaintextRequest"}, + "output":{"shape":"GenerateDataKeyWithoutPlaintextResponse"}, + "errors":[ + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DisabledException", + "error":{ + "code":"Disabled", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"KeyUnavailableException", + "error":{ + "code":"KeyUnavailable", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"InvalidKeyUsageException", + "error":{ + "code":"InvalidKeyUsage", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidGrantTokenException", + "error":{ + "code":"InvalidGrantToken", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"KMSInvalidStateException", + "error":{ + "code":"KMSInvalidStateException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "GenerateRandom":{ + "name":"GenerateRandom", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GenerateRandomRequest"}, + "output":{"shape":"GenerateRandomResponse"}, + "errors":[ + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + } + ] + }, + "GetKeyPolicy":{ + "name":"GetKeyPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetKeyPolicyRequest"}, + "output":{"shape":"GetKeyPolicyResponse"}, + "errors":[ + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidArnException", + "error":{ + "code":"InvalidArn", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"KMSInvalidStateException", + "error":{ + "code":"KMSInvalidStateException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "GetKeyRotationStatus":{ + "name":"GetKeyRotationStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetKeyRotationStatusRequest"}, + "output":{"shape":"GetKeyRotationStatusResponse"}, + "errors":[ + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidArnException", + "error":{ + "code":"InvalidArn", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"KMSInvalidStateException", + "error":{ + "code":"KMSInvalidStateException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "ListAliases":{ + "name":"ListAliases", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAliasesRequest"}, + "output":{"shape":"ListAliasesResponse"}, + "errors":[ + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"InvalidMarkerException", + "error":{ + "code":"InvalidMarker", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + } + ] + }, + "ListGrants":{ + "name":"ListGrants", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListGrantsRequest"}, + "output":{"shape":"ListGrantsResponse"}, + "errors":[ + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"InvalidMarkerException", + "error":{ + "code":"InvalidMarker", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidArnException", + "error":{ + "code":"InvalidArn", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"KMSInvalidStateException", + "error":{ + "code":"KMSInvalidStateException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "ListKeyPolicies":{ + "name":"ListKeyPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListKeyPoliciesRequest"}, + "output":{"shape":"ListKeyPoliciesResponse"}, + "errors":[ + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidArnException", + "error":{ + "code":"InvalidArn", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"KMSInvalidStateException", + "error":{ + "code":"KMSInvalidStateException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "ListKeys":{ + "name":"ListKeys", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListKeysRequest"}, + "output":{"shape":"ListKeysResponse"}, + "errors":[ + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + } + ] + }, + "ListRetirableGrants":{ + "name":"ListRetirableGrants", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRetirableGrantsRequest"}, + "output":{"shape":"ListGrantsResponse"}, + "errors":[ + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"InvalidMarkerException", + "error":{ + "code":"InvalidMarker", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidArnException", + "error":{ + "code":"InvalidArn", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + } + ] + }, + "PutKeyPolicy":{ + "name":"PutKeyPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutKeyPolicyRequest"}, + "errors":[ + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidArnException", + "error":{ + "code":"InvalidArn", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"MalformedPolicyDocumentException", + "error":{ + "code":"MalformedPolicyDocument", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"InvalidArnException", + "error":{ + "code":"InvalidArn", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"UnsupportedOperationException", + "error":{ + "code":"UnsupportedOperation", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"KMSInvalidStateException", + "error":{ + "code":"KMSInvalidStateException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "ReEncrypt":{ + "name":"ReEncrypt", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReEncryptRequest"}, + "output":{"shape":"ReEncryptResponse"}, + "errors":[ + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DisabledException", + "error":{ + "code":"Disabled", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidCiphertextException", + "error":{ + "code":"InvalidCiphertext", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"KeyUnavailableException", + "error":{ + "code":"KeyUnavailable", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"InvalidKeyUsageException", + "error":{ + "code":"InvalidKeyUsage", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidGrantTokenException", + "error":{ + "code":"InvalidGrantToken", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"KMSInvalidStateException", + "error":{ + "code":"KMSInvalidStateException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "RetireGrant":{ + "name":"RetireGrant", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RetireGrantRequest"}, + "errors":[ + { + "shape":"InvalidGrantTokenException", + "error":{ + "code":"InvalidGrantToken", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidGrantIdException", + "error":{ + "code":"InvalidGrantId", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"KMSInvalidStateException", + "error":{ + "code":"KMSInvalidStateException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "RevokeGrant":{ + "name":"RevokeGrant", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeGrantRequest"}, + "errors":[ + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"InvalidArnException", + "error":{ + "code":"InvalidArn", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidGrantIdException", + "error":{ + "code":"InvalidGrantId", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"KMSInvalidStateException", + "error":{ + "code":"KMSInvalidStateException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "ScheduleKeyDeletion":{ + "name":"ScheduleKeyDeletion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ScheduleKeyDeletionRequest"}, + "output":{"shape":"ScheduleKeyDeletionResponse"}, + "errors":[ + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidArnException", + "error":{ + "code":"InvalidArn", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"KMSInvalidStateException", + "error":{ + "code":"KMSInvalidStateException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "UpdateAlias":{ + "name":"UpdateAlias", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAliasRequest"}, + "errors":[ + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"KMSInvalidStateException", + "error":{ + "code":"KMSInvalidStateException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "UpdateKeyDescription":{ + "name":"UpdateKeyDescription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateKeyDescriptionRequest"}, + "errors":[ + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidArnException", + "error":{ + "code":"InvalidArn", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"DependencyTimeoutException", + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + { + "shape":"KMSInternalException", + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + }, + { + "shape":"KMSInvalidStateException", + "error":{ + "code":"KMSInvalidStateException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + } + }, + "shapes":{ + "AWSAccountIdType":{"type":"string"}, + "AliasList":{ + "type":"list", + "member":{"shape":"AliasListEntry"} + }, + "AliasListEntry":{ + "type":"structure", + "members":{ + "AliasName":{"shape":"AliasNameType"}, + "AliasArn":{"shape":"ArnType"}, + "TargetKeyId":{"shape":"KeyIdType"} + } + }, + "AliasNameType":{ + "type":"string", + "min":1, + "max":256, + "pattern":"^[a-zA-Z0-9:/_-]+$" + }, + "AlreadyExistsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "error":{ + "code":"AlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ArnType":{ + "type":"string", + "min":20, + "max":2048 + }, + "BooleanType":{"type":"boolean"}, + "CancelKeyDeletionRequest":{ + "type":"structure", + "required":["KeyId"], + "members":{ + "KeyId":{"shape":"KeyIdType"} + } + }, + "CancelKeyDeletionResponse":{ + "type":"structure", + "members":{ + "KeyId":{"shape":"KeyIdType"} + } + }, + "CiphertextType":{ + "type":"blob", + "min":1, + "max":6144 + }, + "CreateAliasRequest":{ + "type":"structure", + "required":[ + "AliasName", + "TargetKeyId" + ], + "members":{ + "AliasName":{"shape":"AliasNameType"}, + "TargetKeyId":{"shape":"KeyIdType"} + } + }, + "CreateGrantRequest":{ + "type":"structure", + "required":[ + "KeyId", + "GranteePrincipal" + ], + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "GranteePrincipal":{"shape":"PrincipalIdType"}, + "RetiringPrincipal":{"shape":"PrincipalIdType"}, + "Operations":{"shape":"GrantOperationList"}, + "Constraints":{"shape":"GrantConstraints"}, + "GrantTokens":{"shape":"GrantTokenList"}, + "Name":{"shape":"GrantNameType"} + } + }, + "CreateGrantResponse":{ + "type":"structure", + "members":{ + "GrantToken":{"shape":"GrantTokenType"}, + "GrantId":{"shape":"GrantIdType"} + } + }, + "CreateKeyRequest":{ + "type":"structure", + "members":{ + "Policy":{"shape":"PolicyType"}, + "Description":{"shape":"DescriptionType"}, + "KeyUsage":{"shape":"KeyUsageType"} + } + }, + "CreateKeyResponse":{ + "type":"structure", + "members":{ + "KeyMetadata":{"shape":"KeyMetadata"} + } + }, + "DataKeySpec":{ + "type":"string", + "enum":[ + "AES_256", + "AES_128" + ] + }, + "DateType":{"type":"timestamp"}, + "DecryptRequest":{ + "type":"structure", + "required":["CiphertextBlob"], + "members":{ + "CiphertextBlob":{"shape":"CiphertextType"}, + "EncryptionContext":{"shape":"EncryptionContextType"}, + "GrantTokens":{"shape":"GrantTokenList"} + } + }, + "DecryptResponse":{ + "type":"structure", + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "Plaintext":{"shape":"PlaintextType"} + } + }, + "DeleteAliasRequest":{ + "type":"structure", + "required":["AliasName"], + "members":{ + "AliasName":{"shape":"AliasNameType"} + } + }, + "DependencyTimeoutException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "error":{ + "code":"DependencyTimeout", + "httpStatusCode":503 + }, + "exception":true, + "fault":true + }, + "DescribeKeyRequest":{ + "type":"structure", + "required":["KeyId"], + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "GrantTokens":{"shape":"GrantTokenList"} + } + }, + "DescribeKeyResponse":{ + "type":"structure", + "members":{ + "KeyMetadata":{"shape":"KeyMetadata"} + } + }, + "DescriptionType":{ + "type":"string", + "min":0, + "max":8192 + }, + "DisableKeyRequest":{ + "type":"structure", + "required":["KeyId"], + "members":{ + "KeyId":{"shape":"KeyIdType"} + } + }, + "DisableKeyRotationRequest":{ + "type":"structure", + "required":["KeyId"], + "members":{ + "KeyId":{"shape":"KeyIdType"} + } + }, + "DisabledException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "error":{ + "code":"Disabled", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "EnableKeyRequest":{ + "type":"structure", + "required":["KeyId"], + "members":{ + "KeyId":{"shape":"KeyIdType"} + } + }, + "EnableKeyRotationRequest":{ + "type":"structure", + "required":["KeyId"], + "members":{ + "KeyId":{"shape":"KeyIdType"} + } + }, + "EncryptRequest":{ + "type":"structure", + "required":[ + "KeyId", + "Plaintext" + ], + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "Plaintext":{"shape":"PlaintextType"}, + "EncryptionContext":{"shape":"EncryptionContextType"}, + "GrantTokens":{"shape":"GrantTokenList"} + } + }, + "EncryptResponse":{ + "type":"structure", + "members":{ + "CiphertextBlob":{"shape":"CiphertextType"}, + "KeyId":{"shape":"KeyIdType"} + } + }, + "EncryptionContextKey":{"type":"string"}, + "EncryptionContextType":{ + "type":"map", + "key":{"shape":"EncryptionContextKey"}, + "value":{"shape":"EncryptionContextValue"} + }, + "EncryptionContextValue":{"type":"string"}, + "ErrorMessageType":{"type":"string"}, + "GenerateDataKeyRequest":{ + "type":"structure", + "required":["KeyId"], + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "EncryptionContext":{"shape":"EncryptionContextType"}, + "NumberOfBytes":{"shape":"NumberOfBytesType"}, + "KeySpec":{"shape":"DataKeySpec"}, + "GrantTokens":{"shape":"GrantTokenList"} + } + }, + "GenerateDataKeyResponse":{ + "type":"structure", + "members":{ + "CiphertextBlob":{"shape":"CiphertextType"}, + "Plaintext":{"shape":"PlaintextType"}, + "KeyId":{"shape":"KeyIdType"} + } + }, + "GenerateDataKeyWithoutPlaintextRequest":{ + "type":"structure", + "required":["KeyId"], + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "EncryptionContext":{"shape":"EncryptionContextType"}, + "KeySpec":{"shape":"DataKeySpec"}, + "NumberOfBytes":{"shape":"NumberOfBytesType"}, + "GrantTokens":{"shape":"GrantTokenList"} + } + }, + "GenerateDataKeyWithoutPlaintextResponse":{ + "type":"structure", + "members":{ + "CiphertextBlob":{"shape":"CiphertextType"}, + "KeyId":{"shape":"KeyIdType"} + } + }, + "GenerateRandomRequest":{ + "type":"structure", + "members":{ + "NumberOfBytes":{"shape":"NumberOfBytesType"} + } + }, + "GenerateRandomResponse":{ + "type":"structure", + "members":{ + "Plaintext":{"shape":"PlaintextType"} + } + }, + "GetKeyPolicyRequest":{ + "type":"structure", + "required":[ + "KeyId", + "PolicyName" + ], + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "PolicyName":{"shape":"PolicyNameType"} + } + }, + "GetKeyPolicyResponse":{ + "type":"structure", + "members":{ + "Policy":{"shape":"PolicyType"} + } + }, + "GetKeyRotationStatusRequest":{ + "type":"structure", + "required":["KeyId"], + "members":{ + "KeyId":{"shape":"KeyIdType"} + } + }, + "GetKeyRotationStatusResponse":{ + "type":"structure", + "members":{ + "KeyRotationEnabled":{"shape":"BooleanType"} + } + }, + "GrantConstraints":{ + "type":"structure", + "members":{ + "EncryptionContextSubset":{"shape":"EncryptionContextType"}, + "EncryptionContextEquals":{"shape":"EncryptionContextType"} + } + }, + "GrantIdType":{ + "type":"string", + "min":1, + "max":128 + }, + "GrantList":{ + "type":"list", + "member":{"shape":"GrantListEntry"} + }, + "GrantListEntry":{ + "type":"structure", + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "GrantId":{"shape":"GrantIdType"}, + "Name":{"shape":"GrantNameType"}, + "CreationDate":{"shape":"DateType"}, + "GranteePrincipal":{"shape":"PrincipalIdType"}, + "RetiringPrincipal":{"shape":"PrincipalIdType"}, + "IssuingAccount":{"shape":"PrincipalIdType"}, + "Operations":{"shape":"GrantOperationList"}, + "Constraints":{"shape":"GrantConstraints"} + } + }, + "GrantNameType":{ + "type":"string", + "min":1, + "max":256, + "pattern":"^[a-zA-Z0-9:/_-]+$" + }, + "GrantOperation":{ + "type":"string", + "enum":[ + "Decrypt", + "Encrypt", + "GenerateDataKey", + "GenerateDataKeyWithoutPlaintext", + "ReEncryptFrom", + "ReEncryptTo", + "CreateGrant", + "RetireGrant", + "DescribeKey" + ] + }, + "GrantOperationList":{ + "type":"list", + "member":{"shape":"GrantOperation"} + }, + "GrantTokenList":{ + "type":"list", + "member":{"shape":"GrantTokenType"}, + "min":0, + "max":10 + }, + "GrantTokenType":{ + "type":"string", + "min":1, + "max":8192 + }, + "InvalidAliasNameException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "error":{ + "code":"InvalidAliasName", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidArnException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "error":{ + "code":"InvalidArn", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidCiphertextException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "error":{ + "code":"InvalidCiphertext", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidGrantIdException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "error":{ + "code":"InvalidGrantId", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidGrantTokenException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "error":{ + "code":"InvalidGrantToken", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidKeyUsageException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "error":{ + "code":"InvalidKeyUsage", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidMarkerException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "error":{ + "code":"InvalidMarker", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "KMSInternalException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "error":{ + "code":"KMSInternal", + "httpStatusCode":500 + }, + "exception":true + }, + "KMSInvalidStateException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "error":{ + "code":"KMSInvalidStateException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "KeyIdType":{ + "type":"string", + "min":1, + "max":256 + }, + "KeyList":{ + "type":"list", + "member":{"shape":"KeyListEntry"} + }, + "KeyListEntry":{ + "type":"structure", + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "KeyArn":{"shape":"ArnType"} + } + }, + "KeyMetadata":{ + "type":"structure", + "required":["KeyId"], + "members":{ + "AWSAccountId":{"shape":"AWSAccountIdType"}, + "KeyId":{"shape":"KeyIdType"}, + "Arn":{"shape":"ArnType"}, + "CreationDate":{"shape":"DateType"}, + "Enabled":{"shape":"BooleanType"}, + "Description":{"shape":"DescriptionType"}, + "KeyUsage":{"shape":"KeyUsageType"}, + "KeyState":{"shape":"KeyState"}, + "DeletionDate":{"shape":"DateType"} + } + }, + "KeyState":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled", + "PendingDeletion" + ] + }, + "KeyUnavailableException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "error":{ + "code":"KeyUnavailable", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + "KeyUsageType":{ + "type":"string", + "enum":["ENCRYPT_DECRYPT"] + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "error":{ + "code":"LimitExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "LimitType":{ + "type":"integer", + "min":1, + "max":1000 + }, + "ListAliasesRequest":{ + "type":"structure", + "members":{ + "Limit":{"shape":"LimitType"}, + "Marker":{"shape":"MarkerType"} + } + }, + "ListAliasesResponse":{ + "type":"structure", + "members":{ + "Aliases":{"shape":"AliasList"}, + "NextMarker":{"shape":"MarkerType"}, + "Truncated":{"shape":"BooleanType"} + } + }, + "ListGrantsRequest":{ + "type":"structure", + "required":["KeyId"], + "members":{ + "Limit":{"shape":"LimitType"}, + "Marker":{"shape":"MarkerType"}, + "KeyId":{"shape":"KeyIdType"} + } + }, + "ListGrantsResponse":{ + "type":"structure", + "members":{ + "Grants":{"shape":"GrantList"}, + "NextMarker":{"shape":"MarkerType"}, + "Truncated":{"shape":"BooleanType"} + } + }, + "ListKeyPoliciesRequest":{ + "type":"structure", + "required":["KeyId"], + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "Limit":{"shape":"LimitType"}, + "Marker":{"shape":"MarkerType"} + } + }, + "ListKeyPoliciesResponse":{ + "type":"structure", + "members":{ + "PolicyNames":{"shape":"PolicyNameList"}, + "NextMarker":{"shape":"MarkerType"}, + "Truncated":{"shape":"BooleanType"} + } + }, + "ListKeysRequest":{ + "type":"structure", + "members":{ + "Limit":{"shape":"LimitType"}, + "Marker":{"shape":"MarkerType"} + } + }, + "ListKeysResponse":{ + "type":"structure", + "members":{ + "Keys":{"shape":"KeyList"}, + "NextMarker":{"shape":"MarkerType"}, + "Truncated":{"shape":"BooleanType"} + } + }, + "ListRetirableGrantsRequest":{ + "type":"structure", + "required":["RetiringPrincipal"], + "members":{ + "Limit":{"shape":"LimitType"}, + "Marker":{"shape":"MarkerType"}, + "RetiringPrincipal":{"shape":"PrincipalIdType"} + } + }, + "MalformedPolicyDocumentException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "error":{ + "code":"MalformedPolicyDocument", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "MarkerType":{ + "type":"string", + "min":1, + "max":320, + "pattern":"[\\u0020-\\u00FF]*" + }, + "NotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "NumberOfBytesType":{ + "type":"integer", + "min":1, + "max":1024 + }, + "PendingWindowInDaysType":{ + "type":"integer", + "min":1, + "max":365 + }, + "PlaintextType":{ + "type":"blob", + "min":1, + "max":4096, + "sensitive":true + }, + "PolicyNameList":{ + "type":"list", + "member":{"shape":"PolicyNameType"} + }, + "PolicyNameType":{ + "type":"string", + "min":1, + "max":128, + "pattern":"[\\w]+" + }, + "PolicyType":{ + "type":"string", + "min":1, + "max":131072, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+" + }, + "PrincipalIdType":{ + "type":"string", + "min":1, + "max":256 + }, + "PutKeyPolicyRequest":{ + "type":"structure", + "required":[ + "KeyId", + "PolicyName", + "Policy" + ], + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "PolicyName":{"shape":"PolicyNameType"}, + "Policy":{"shape":"PolicyType"} + } + }, + "ReEncryptRequest":{ + "type":"structure", + "required":[ + "CiphertextBlob", + "DestinationKeyId" + ], + "members":{ + "CiphertextBlob":{"shape":"CiphertextType"}, + "SourceEncryptionContext":{"shape":"EncryptionContextType"}, + "DestinationKeyId":{"shape":"KeyIdType"}, + "DestinationEncryptionContext":{"shape":"EncryptionContextType"}, + "GrantTokens":{"shape":"GrantTokenList"} + } + }, + "ReEncryptResponse":{ + "type":"structure", + "members":{ + "CiphertextBlob":{"shape":"CiphertextType"}, + "SourceKeyId":{"shape":"KeyIdType"}, + "KeyId":{"shape":"KeyIdType"} + } + }, + "RetireGrantRequest":{ + "type":"structure", + "members":{ + "GrantToken":{"shape":"GrantTokenType"}, + "KeyId":{"shape":"KeyIdType"}, + "GrantId":{"shape":"GrantIdType"} + } + }, + "RevokeGrantRequest":{ + "type":"structure", + "required":[ + "KeyId", + "GrantId" + ], + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "GrantId":{"shape":"GrantIdType"} + } + }, + "ScheduleKeyDeletionRequest":{ + "type":"structure", + "required":["KeyId"], + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "PendingWindowInDays":{"shape":"PendingWindowInDaysType"} + } + }, + "ScheduleKeyDeletionResponse":{ + "type":"structure", + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "DeletionDate":{"shape":"DateType"} + } + }, + "UnsupportedOperationException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessageType"} + }, + "error":{ + "code":"UnsupportedOperation", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "UpdateAliasRequest":{ + "type":"structure", + "required":[ + "AliasName", + "TargetKeyId" + ], + "members":{ + "AliasName":{"shape":"AliasNameType"}, + "TargetKeyId":{"shape":"KeyIdType"} + } + }, + "UpdateKeyDescriptionRequest":{ + "type":"structure", + "required":[ + "KeyId", + "Description" + ], + "members":{ + "KeyId":{"shape":"KeyIdType"}, + "Description":{"shape":"DescriptionType"} + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,699 @@ +{ + "version": "2.0", + "operations": { + "CancelKeyDeletion": "

    Cancels the deletion of a customer master key (CMK). When this operation is successful, the CMK is set to the Disabled state. To enable a CMK, use EnableKey.

    For more information about scheduling and canceling deletion of a CMK, go to Deleting Customer Master Keys in the AWS Key Management Service Developer Guide.

    ", + "CreateAlias": "

    Creates a display name for a customer master key. An alias can be used to identify a key and should be unique. The console enforces a one-to-one mapping between the alias and a key. An alias name can contain only alphanumeric characters, forward slashes (/), underscores (_), and dashes (-). An alias must start with the word \"alias\" followed by a forward slash (alias/). An alias that begins with \"aws\" after the forward slash (alias/aws...) is reserved by Amazon Web Services (AWS).

    The alias and the key it is mapped to must be in the same AWS account and the same region.

    To map an alias to a different key, call UpdateAlias.

    ", + "CreateGrant": "

    Adds a grant to a key to specify who can use the key and under what conditions. Grants are alternate permission mechanisms to key policies.

    For more information about grants, see Grants in the AWS Key Management Service Developer Guide.

    ", + "CreateKey": "

    Creates a customer master key. Customer master keys can be used to encrypt small amounts of data (less than 4K) directly, but they are most commonly used to encrypt or envelope data keys that are then used to encrypt customer data. For more information about data keys, see GenerateDataKey and GenerateDataKeyWithoutPlaintext.

    ", + "Decrypt": "

    Decrypts ciphertext. Ciphertext is plaintext that has been previously encrypted by using any of the following functions:

    Note that if a caller has been granted access permissions to all keys (through, for example, IAM user policies that grant Decrypt permission on all resources), then ciphertext encrypted by using keys in other accounts where the key grants access to the caller can be decrypted. To remedy this, we recommend that you do not grant Decrypt access in an IAM user policy. Instead grant Decrypt access only in key policies. If you must grant Decrypt access in an IAM user policy, you should scope the resource to specific keys or to specific trusted accounts.

    ", + "DeleteAlias": "

    Deletes the specified alias. To map an alias to a different key, call UpdateAlias.

    ", + "DescribeKey": "

    Provides detailed information about the specified customer master key.

    ", + "DisableKey": "

    Sets the state of a master key to disabled, thereby preventing its use for cryptographic operations. For more information about how key state affects the use of a master key, go to How Key State Affects the Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

    ", + "DisableKeyRotation": "

    Disables rotation of the specified key.

    ", + "EnableKey": "

    Marks a key as enabled, thereby permitting its use.

    ", + "EnableKeyRotation": "

    Enables rotation of the specified customer master key.

    ", + "Encrypt": "

    Encrypts plaintext into ciphertext by using a customer master key. The Encrypt function has two primary use cases:

    • You can encrypt up to 4 KB of arbitrary data such as an RSA key, a database password, or other sensitive customer information.
    • If you are moving encrypted data from one region to another, you can use this API to encrypt in the new region the plaintext data key that was used to encrypt the data in the original region. This provides you with an encrypted copy of the data key that can be decrypted in the new region and used there to decrypt the encrypted data.

    Unless you are moving encrypted data from one region to another, you don't use this function to encrypt a generated data key within a region. You retrieve data keys already encrypted by calling the GenerateDataKey or GenerateDataKeyWithoutPlaintext function. Data keys don't need to be encrypted again by calling Encrypt.

    If you want to encrypt data locally in your application, you can use the GenerateDataKey function to return a plaintext data encryption key and a copy of the key encrypted under the customer master key (CMK) of your choosing.

    ", + "GenerateDataKey": "

    Generates a data key that you can use in your application to locally encrypt data. This call returns a plaintext version of the key in the Plaintext field of the response object and an encrypted copy of the key in the CiphertextBlob field. The key is encrypted by using the master key specified by the KeyId field. To decrypt the encrypted key, pass it to the Decrypt API.

    We recommend that you use the following pattern to locally encrypt data: call the GenerateDataKey API, use the key returned in the Plaintext response field to locally encrypt data, and then erase the plaintext data key from memory. Store the encrypted data key (contained in the CiphertextBlob field) alongside of the locally encrypted data.

    You should not call the Encrypt function to re-encrypt your data keys within a region. GenerateDataKey always returns the data key encrypted and tied to the customer master key that will be used to decrypt it. There is no need to decrypt it twice.

    If you decide to use the optional EncryptionContext parameter, you must also store the context in full or at least store enough information along with the encrypted data to be able to reconstruct the context when submitting the ciphertext to the Decrypt API. It is a good practice to choose a context that you can reconstruct on the fly to better secure the ciphertext. For more information about how this parameter is used, see Encryption Context.

    To decrypt data, pass the encrypted data key to the Decrypt API. Decrypt uses the associated master key to decrypt the encrypted data key and returns it as plaintext. Use the plaintext data key to locally decrypt your data and then erase the key from memory. You must specify the encryption context, if any, that you specified when you generated the key. The encryption context is logged by CloudTrail, and you can use this log to help track the use of particular data.

    ", + "GenerateDataKeyWithoutPlaintext": "

    Returns a data key encrypted by a customer master key without the plaintext copy of that key. Otherwise, this API functions exactly like GenerateDataKey. You can use this API to, for example, satisfy an audit requirement that an encrypted key be made available without exposing the plaintext copy of that key.

    ", + "GenerateRandom": "

    Generates an unpredictable byte string.

    ", + "GetKeyPolicy": "

    Retrieves a policy attached to the specified key.

    ", + "GetKeyRotationStatus": "

    Retrieves a Boolean value that indicates whether key rotation is enabled for the specified key.

    ", + "ListAliases": "

    Lists all of the key aliases in the account.

    ", + "ListGrants": "

    List the grants for a specified key.

    ", + "ListKeyPolicies": "

    Retrieves a list of policies attached to a key.

    ", + "ListKeys": "

    Lists the customer master keys.

    ", + "ListRetirableGrants": "

    Returns a list of all grants for which the grant's RetiringPrincipal matches the one specified.

    A typical use is to list all grants that you are able to retire. To retire a grant, use RetireGrant.

    ", + "PutKeyPolicy": "

    Attaches a policy to the specified key.

    ", + "ReEncrypt": "

    Encrypts data on the server side with a new customer master key without exposing the plaintext of the data on the client side. The data is first decrypted and then encrypted. This operation can also be used to change the encryption context of a ciphertext.

    Unlike other actions, ReEncrypt is authorized twice - once as ReEncryptFrom on the source key and once as ReEncryptTo on the destination key. We therefore recommend that you include the \"action\":\"kms:ReEncrypt*\" statement in your key policies to permit re-encryption from or to the key. The statement is included automatically when you authorize use of the key through the console but must be included manually when you set a policy by using the PutKeyPolicy function.

    ", + "RetireGrant": "

    Retires a grant. You can retire a grant when you're done using it to clean up. You should revoke a grant when you intend to actively deny operations that depend on it. The following are permitted to call this API:

    • The account that created the grant
    • The RetiringPrincipal, if present
    • The GranteePrincipal, if RetireGrant is a grantee operation
    The grant to retire must be identified by its grant token or by a combination of the key ARN and the grant ID. A grant token is a unique variable-length base64-encoded string. A grant ID is a 64 character unique identifier of a grant. Both are returned by the CreateGrant function.

    ", + "RevokeGrant": "

    Revokes a grant. You can revoke a grant to actively deny operations that depend on it.

    ", + "ScheduleKeyDeletion": "

    Schedules the deletion of a customer master key (CMK). You may provide a waiting period, specified in days, before deletion occurs. If you do not provide a waiting period, the default period of 30 days is used. When this operation is successful, the state of the CMK changes to PendingDeletion. Before the waiting period ends, you can use CancelKeyDeletion to cancel the deletion of the CMK. After the waiting period ends, AWS KMS deletes the CMK and all AWS KMS data associated with it, including all aliases that point to it.

    Deleting a CMK is a destructive and potentially dangerous operation. When a CMK is deleted, all data that was encrypted under the CMK is rendered unrecoverable. To restrict the use of a CMK without deleting it, use DisableKey.

    For more information about scheduling a CMK for deletion, go to Deleting Customer Master Keys in the AWS Key Management Service Developer Guide.

    ", + "UpdateAlias": "

    Updates an alias to map it to a different key.

    An alias is not a property of a key. Therefore, an alias can be mapped to and unmapped from an existing key without changing the properties of the key.

    An alias name can contain only alphanumeric characters, forward slashes (/), underscores (_), and dashes (-). An alias must start with the word \"alias\" followed by a forward slash (alias/). An alias that begins with \"aws\" after the forward slash (alias/aws...) is reserved by Amazon Web Services (AWS).

    The alias and the key it is mapped to must be in the same AWS account and the same region.

    ", + "UpdateKeyDescription": "

    Updates the description of a key.

    " + }, + "service": "AWS Key Management Service

    AWS Key Management Service (AWS KMS) is an encryption and key management web service. This guide describes the AWS KMS operations that you can call programmatically. For general information about AWS KMS, see the AWS Key Management Service Developer Guide.

    AWS provides SDKs that consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .Net, iOS, Android, etc.). The SDKs provide a convenient way to create programmatic access to AWS KMS and other AWS services. For example, the SDKs take care of tasks such as signing requests (see below), managing errors, and retrying requests automatically. For more information about the AWS SDKs, including how to download and install them, see Tools for Amazon Web Services.

    We recommend that you use the AWS SDKs to make programmatic API calls to AWS KMS.

    Clients must support TLS (Transport Layer Security) 1.0. We recommend TLS 1.2. Clients must also support cipher suites with Perfect Forward Secrecy (PFS) such as Ephemeral Diffie-Hellman (DHE) or Elliptic Curve Ephemeral Diffie-Hellman (ECDHE). Most modern systems such as Java 7 and later support these modes.

    Signing Requests

    Requests must be signed by using an access key ID and a secret access key. We strongly recommend that you do not use your AWS account access key ID and secret key for everyday work with AWS KMS. Instead, use the access key ID and secret access key for an IAM user, or you can use the AWS Security Token Service to generate temporary security credentials that you can use to sign requests.

    All AWS KMS operations require Signature Version 4.

    Logging API Requests

    AWS KMS supports AWS CloudTrail, a service that logs AWS API calls and related events for your AWS account and delivers them to an Amazon S3 bucket that you specify. By using the information collected by CloudTrail, you can determine what requests were made to AWS KMS, who made the request, when it was made, and so on. To learn more about CloudTrail, including how to turn it on and find your log files, see the AWS CloudTrail User Guide.

    Additional Resources

    For more information about credentials and request signing, see the following:

    • AWS Security Credentials - This topic provides general information about the types of credentials used for accessing AWS.
    • AWS Security Token Service - This guide describes how to create and use temporary security credentials.
    • Signing AWS API Requests - This set of topics walks you through the process of signing a request using an access key ID and a secret access key.

    Commonly Used APIs

    Of the APIs discussed in this guide, the following will prove the most useful for most applications. You will likely perform actions other than these, such as creating keys and assigning policies, by using the console.

    ", + "shapes": { + "AWSAccountIdType": { + "base": null, + "refs": { + "KeyMetadata$AWSAccountId": "

    The twelve-digit account ID of the AWS account that owns the key.

    " + } + }, + "AliasList": { + "base": null, + "refs": { + "ListAliasesResponse$Aliases": "

    A list of key aliases in the user's account.

    " + } + }, + "AliasListEntry": { + "base": "

    Contains information about an alias.

    ", + "refs": { + "AliasList$member": null + } + }, + "AliasNameType": { + "base": null, + "refs": { + "AliasListEntry$AliasName": "

    String that contains the alias.

    ", + "CreateAliasRequest$AliasName": "

    String that contains the display name. The name must start with the word \"alias\" followed by a forward slash (alias/). Aliases that begin with \"alias/AWS\" are reserved.

    ", + "DeleteAliasRequest$AliasName": "

    The alias to be deleted. The name must start with the word \"alias\" followed by a forward slash (alias/). Aliases that begin with \"alias/AWS\" are reserved.

    ", + "UpdateAliasRequest$AliasName": "

    String that contains the name of the alias to be modified. The name must start with the word \"alias\" followed by a forward slash (alias/). Aliases that begin with \"alias/aws\" are reserved.

    " + } + }, + "AlreadyExistsException": { + "base": "

    The request was rejected because it attempted to create a resource that already exists.

    ", + "refs": { + } + }, + "ArnType": { + "base": null, + "refs": { + "AliasListEntry$AliasArn": "

    String that contains the key ARN.

    ", + "KeyListEntry$KeyArn": "

    ARN of the key.

    ", + "KeyMetadata$Arn": "

    The Amazon Resource Name (ARN) of the key. For examples, see AWS Key Management Service (AWS KMS) in the Example ARNs section of the AWS General Reference.

    " + } + }, + "BooleanType": { + "base": null, + "refs": { + "GetKeyRotationStatusResponse$KeyRotationEnabled": "

    A Boolean value that specifies whether key rotation is enabled.

    ", + "KeyMetadata$Enabled": "

    Specifies whether the key is enabled. When KeyState is Enabled this value is true, otherwise it is false.

    ", + "ListAliasesResponse$Truncated": "

    A flag that indicates whether there are more items in the list. If your results were truncated, you can use the Marker parameter to make a subsequent pagination request to retrieve more items in the list.

    ", + "ListGrantsResponse$Truncated": "

    A flag that indicates whether there are more items in the list. If your results were truncated, you can use the Marker parameter to make a subsequent pagination request to retrieve more items in the list.

    ", + "ListKeyPoliciesResponse$Truncated": "

    A flag that indicates whether there are more items in the list. If your results were truncated, you can use the Marker parameter to make a subsequent pagination request to retrieve more items in the list.

    ", + "ListKeysResponse$Truncated": "

    A flag that indicates whether there are more items in the list. If your results were truncated, you can use the Marker parameter to make a subsequent pagination request to retrieve more items in the list.

    " + } + }, + "CancelKeyDeletionRequest": { + "base": null, + "refs": { + } + }, + "CancelKeyDeletionResponse": { + "base": null, + "refs": { + } + }, + "CiphertextType": { + "base": null, + "refs": { + "DecryptRequest$CiphertextBlob": "

    Ciphertext to be decrypted. The blob includes metadata.

    ", + "EncryptResponse$CiphertextBlob": "

    The encrypted plaintext. If you are using the CLI, the value is Base64 encoded. Otherwise, it is not encoded.

    ", + "GenerateDataKeyResponse$CiphertextBlob": "

    Ciphertext that contains the encrypted data key. You must store the blob and enough information to reconstruct the encryption context so that the data encrypted by using the key can later be decrypted. You must provide both the ciphertext blob and the encryption context to the Decrypt API to recover the plaintext data key and decrypt the object.

    If you are using the CLI, the value is Base64 encoded. Otherwise, it is not encoded.

    ", + "GenerateDataKeyWithoutPlaintextResponse$CiphertextBlob": "

    Ciphertext that contains the wrapped data key. You must store the blob and encryption context so that the key can be used in a future decrypt operation.

    If you are using the CLI, the value is Base64 encoded. Otherwise, it is not encoded.

    ", + "ReEncryptRequest$CiphertextBlob": "

    Ciphertext of the data to re-encrypt.

    ", + "ReEncryptResponse$CiphertextBlob": "

    The re-encrypted data. If you are using the CLI, the value is Base64 encoded. Otherwise, it is not encoded.

    " + } + }, + "CreateAliasRequest": { + "base": null, + "refs": { + } + }, + "CreateGrantRequest": { + "base": null, + "refs": { + } + }, + "CreateGrantResponse": { + "base": null, + "refs": { + } + }, + "CreateKeyRequest": { + "base": null, + "refs": { + } + }, + "CreateKeyResponse": { + "base": null, + "refs": { + } + }, + "DataKeySpec": { + "base": null, + "refs": { + "GenerateDataKeyRequest$KeySpec": "

    Value that identifies the encryption algorithm and key size to generate a data key for. Currently this can be AES_128 or AES_256.

    ", + "GenerateDataKeyWithoutPlaintextRequest$KeySpec": "

    Value that identifies the encryption algorithm and key size. Currently this can be AES_128 or AES_256.

    " + } + }, + "DateType": { + "base": null, + "refs": { + "GrantListEntry$CreationDate": "

    The date and time when the grant was created.

    ", + "KeyMetadata$CreationDate": "

    The date and time when the key was created.

    ", + "KeyMetadata$DeletionDate": "

    The date and time after which AWS KMS deletes the customer master key (CMK). This value is present only when KeyState is PendingDeletion, otherwise this value is null.

    ", + "ScheduleKeyDeletionResponse$DeletionDate": "

    The date and time after which AWS KMS deletes the customer master key (CMK).

    " + } + }, + "DecryptRequest": { + "base": null, + "refs": { + } + }, + "DecryptResponse": { + "base": null, + "refs": { + } + }, + "DeleteAliasRequest": { + "base": null, + "refs": { + } + }, + "DependencyTimeoutException": { + "base": "

    The system timed out while trying to fulfill the request. The request can be retried.

    ", + "refs": { + } + }, + "DescribeKeyRequest": { + "base": null, + "refs": { + } + }, + "DescribeKeyResponse": { + "base": null, + "refs": { + } + }, + "DescriptionType": { + "base": null, + "refs": { + "CreateKeyRequest$Description": "

    Description of the key. We recommend that you choose a description that helps your customer decide whether the key is appropriate for a task.

    ", + "KeyMetadata$Description": "

    The friendly description of the key.

    ", + "UpdateKeyDescriptionRequest$Description": "

    New description for the key.

    " + } + }, + "DisableKeyRequest": { + "base": null, + "refs": { + } + }, + "DisableKeyRotationRequest": { + "base": null, + "refs": { + } + }, + "DisabledException": { + "base": "

    The request was rejected because the specified key was marked as disabled.

    ", + "refs": { + } + }, + "EnableKeyRequest": { + "base": null, + "refs": { + } + }, + "EnableKeyRotationRequest": { + "base": null, + "refs": { + } + }, + "EncryptRequest": { + "base": null, + "refs": { + } + }, + "EncryptResponse": { + "base": null, + "refs": { + } + }, + "EncryptionContextKey": { + "base": null, + "refs": { + "EncryptionContextType$key": null + } + }, + "EncryptionContextType": { + "base": null, + "refs": { + "DecryptRequest$EncryptionContext": "

    The encryption context. If this was specified in the Encrypt function, it must be specified here or the decryption operation will fail. For more information, see Encryption Context.

    ", + "EncryptRequest$EncryptionContext": "

    Name/value pair that specifies the encryption context to be used for authenticated encryption. If used here, the same value must be supplied to the Decrypt API or decryption will fail. For more information, see Encryption Context.

    ", + "GenerateDataKeyRequest$EncryptionContext": "

    Name/value pair that contains additional data to be authenticated during the encryption and decryption processes that use the key. This value is logged by AWS CloudTrail to provide context around the data encrypted by the key.

    ", + "GenerateDataKeyWithoutPlaintextRequest$EncryptionContext": "

    Name:value pair that contains additional data to be authenticated during the encryption and decryption processes.

    ", + "GrantConstraints$EncryptionContextSubset": "

    Contains a list of key-value pairs, a subset of which must be present in the encryption context of a subsequent operation permitted by the grant. When a subsequent operation permitted by the grant includes an encryption context that matches this list or is a subset of this list, the grant allows the operation. Otherwise, the operation is not allowed.

    ", + "GrantConstraints$EncryptionContextEquals": "

    Contains a list of key-value pairs that must be present in the encryption context of a subsequent operation permitted by the grant. When a subsequent operation permitted by the grant includes an encryption context that matches this list, the grant allows the operation. Otherwise, the operation is not allowed.

    ", + "ReEncryptRequest$SourceEncryptionContext": "

    Encryption context used to encrypt and decrypt the data specified in the CiphertextBlob parameter.

    ", + "ReEncryptRequest$DestinationEncryptionContext": "

    Encryption context to be used when the data is re-encrypted.

    " + } + }, + "EncryptionContextValue": { + "base": null, + "refs": { + "EncryptionContextType$value": null + } + }, + "ErrorMessageType": { + "base": null, + "refs": { + "AlreadyExistsException$message": null, + "DependencyTimeoutException$message": null, + "DisabledException$message": null, + "InvalidAliasNameException$message": null, + "InvalidArnException$message": null, + "InvalidCiphertextException$message": null, + "InvalidGrantIdException$message": null, + "InvalidGrantTokenException$message": null, + "InvalidKeyUsageException$message": null, + "InvalidMarkerException$message": null, + "KMSInternalException$message": null, + "KMSInvalidStateException$message": null, + "KeyUnavailableException$message": null, + "LimitExceededException$message": null, + "MalformedPolicyDocumentException$message": null, + "NotFoundException$message": null, + "UnsupportedOperationException$message": null + } + }, + "GenerateDataKeyRequest": { + "base": null, + "refs": { + } + }, + "GenerateDataKeyResponse": { + "base": null, + "refs": { + } + }, + "GenerateDataKeyWithoutPlaintextRequest": { + "base": null, + "refs": { + } + }, + "GenerateDataKeyWithoutPlaintextResponse": { + "base": null, + "refs": { + } + }, + "GenerateRandomRequest": { + "base": null, + "refs": { + } + }, + "GenerateRandomResponse": { + "base": null, + "refs": { + } + }, + "GetKeyPolicyRequest": { + "base": null, + "refs": { + } + }, + "GetKeyPolicyResponse": { + "base": null, + "refs": { + } + }, + "GetKeyRotationStatusRequest": { + "base": null, + "refs": { + } + }, + "GetKeyRotationStatusResponse": { + "base": null, + "refs": { + } + }, + "GrantConstraints": { + "base": "

    A structure for specifying the conditions under which the operations permitted by the grant are allowed.

    You can use this structure to allow the operations permitted by the grant only when a specified encryption context is present. For more information about encryption context, see Encryption Context in the AWS Key Management Service Developer Guide.

    ", + "refs": { + "CreateGrantRequest$Constraints": "

    The conditions under which the operations permitted by the grant are allowed.

    You can use this value to allow the operations permitted by the grant only when a specified encryption context is present. For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

    ", + "GrantListEntry$Constraints": "

    The conditions under which the grant's operations are allowed.

    " + } + }, + "GrantIdType": { + "base": null, + "refs": { + "CreateGrantResponse$GrantId": "

    The unique identifier for the grant.

    You can use the GrantId in a subsequent RetireGrant or RevokeGrant operation.

    ", + "GrantListEntry$GrantId": "

    The unique identifier for the grant.

    ", + "RetireGrantRequest$GrantId": "

    Unique identifier of the grant to be retired. The grant ID is returned by the CreateGrant function.

    • Grant ID Example - 0123456789012345678901234567890123456789012345678901234567890123

    ", + "RevokeGrantRequest$GrantId": "

    Identifier of the grant to be revoked.

    " + } + }, + "GrantList": { + "base": null, + "refs": { + "ListGrantsResponse$Grants": "

    A list of grants.

    " + } + }, + "GrantListEntry": { + "base": "

    Contains information about an entry in a list of grants.

    ", + "refs": { + "GrantList$member": null + } + }, + "GrantNameType": { + "base": null, + "refs": { + "CreateGrantRequest$Name": "

    A friendly name for identifying the grant. Use this value to prevent unintended creation of duplicate grants when retrying this request.

    When this value is absent, all CreateGrant requests result in a new grant with a unique GrantId even if all the supplied parameters are identical. This can result in unintended duplicates when you retry the CreateGrant request.

    When this value is present, you can retry a CreateGrant request with identical parameters; if the grant already exists, the original GrantId is returned without creating a new grant. Note that the returned grant token is unique with every CreateGrant request, even when a duplicate GrantId is returned. All grant tokens obtained in this way can be used interchangeably.

    ", + "GrantListEntry$Name": "

    The friendly name that identifies the grant. If a name was provided in the CreateGrant request, that name is returned. Otherwise this value is null.

    " + } + }, + "GrantOperation": { + "base": null, + "refs": { + "GrantOperationList$member": null + } + }, + "GrantOperationList": { + "base": null, + "refs": { + "CreateGrantRequest$Operations": "

    A list of operations that the grant permits. The list can contain any combination of one or more of the following values:

    • Decrypt
    • Encrypt
    • GenerateDataKey
    • GenerateDataKeyWithoutPlaintext
    • ReEncryptFrom
    • ReEncryptTo
    • CreateGrant
    • RetireGrant

    ", + "GrantListEntry$Operations": "

    The list of operations permitted by the grant.

    " + } + }, + "GrantTokenList": { + "base": null, + "refs": { + "CreateGrantRequest$GrantTokens": "

    A list of grant tokens.

    For more information, go to Grant Tokens in the AWS Key Management Service Developer Guide.

    ", + "DecryptRequest$GrantTokens": "

    A list of grant tokens.

    For more information, go to Grant Tokens in the AWS Key Management Service Developer Guide.

    ", + "DescribeKeyRequest$GrantTokens": "

    A list of grant tokens.

    For more information, go to Grant Tokens in the AWS Key Management Service Developer Guide.

    ", + "EncryptRequest$GrantTokens": "

    A list of grant tokens.

    For more information, go to Grant Tokens in the AWS Key Management Service Developer Guide.

    ", + "GenerateDataKeyRequest$GrantTokens": "

    A list of grant tokens.

    For more information, go to Grant Tokens in the AWS Key Management Service Developer Guide.

    ", + "GenerateDataKeyWithoutPlaintextRequest$GrantTokens": "

    A list of grant tokens.

    For more information, go to Grant Tokens in the AWS Key Management Service Developer Guide.

    ", + "ReEncryptRequest$GrantTokens": "

    A list of grant tokens.

    For more information, go to Grant Tokens in the AWS Key Management Service Developer Guide.

    " + } + }, + "GrantTokenType": { + "base": null, + "refs": { + "CreateGrantResponse$GrantToken": "

    The grant token.

    For more information about using grant tokens, see Grant Tokens in the AWS Key Management Service Developer Guide.

    ", + "GrantTokenList$member": null, + "RetireGrantRequest$GrantToken": "

    Token that identifies the grant to be retired.

    " + } + }, + "InvalidAliasNameException": { + "base": "

    The request was rejected because the specified alias name is not valid.

    ", + "refs": { + } + }, + "InvalidArnException": { + "base": "

    The request was rejected because a specified ARN was not valid.

    ", + "refs": { + } + }, + "InvalidCiphertextException": { + "base": "

    The request was rejected because the specified ciphertext has been corrupted or is otherwise invalid.

    ", + "refs": { + } + }, + "InvalidGrantIdException": { + "base": "

    The request was rejected because the specified GrantId is not valid.

    ", + "refs": { + } + }, + "InvalidGrantTokenException": { + "base": "

    The request was rejected because a grant token provided as part of the request is invalid.

    ", + "refs": { + } + }, + "InvalidKeyUsageException": { + "base": "

    The request was rejected because the specified KeySpec parameter is not valid. The currently supported value is ENCRYPT/DECRYPT.

    ", + "refs": { + } + }, + "InvalidMarkerException": { + "base": "

    The request was rejected because the marker that specifies where pagination should next begin is not valid.

    ", + "refs": { + } + }, + "KMSInternalException": { + "base": "

    The request was rejected because an internal exception occurred. The request can be retried.

    ", + "refs": { + } + }, + "KMSInvalidStateException": { + "base": "

    The request was rejected because the state of the specified resource is not valid for this request.

    For more information about how key state affects the use of a customer master key (CMK), go to How Key State Affects the Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

    ", + "refs": { + } + }, + "KeyIdType": { + "base": null, + "refs": { + "AliasListEntry$TargetKeyId": "

    String that contains the key identifier pointed to by the alias.

    ", + "CancelKeyDeletionRequest$KeyId": "

    The unique identifier for the customer master key (CMK) for which to cancel deletion.

    To specify this value, use the unique key ID or the Amazon Resource Name (ARN) of the CMK. Examples:

    • Unique key ID: 1234abcd-12ab-34cd-56ef-1234567890ab
    • Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

    To obtain the unique key ID and key ARN for a given CMK, use ListKeys or DescribeKey.

    ", + "CancelKeyDeletionResponse$KeyId": "

    The unique identifier of the master key for which deletion is canceled.

    ", + "CreateAliasRequest$TargetKeyId": "

    An identifier of the key for which you are creating the alias. This value cannot be another alias but can be a globally unique identifier or a fully specified ARN to a key.

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    ", + "CreateGrantRequest$KeyId": "

    The unique identifier for the customer master key (CMK) that the grant applies to.

    To specify this value, use the globally unique key ID or the Amazon Resource Name (ARN) of the key. Examples:

    • Globally unique key ID: 12345678-1234-1234-1234-123456789012
    • Key ARN: arn:aws:kms:us-west-2:123456789012:key/12345678-1234-1234-1234-123456789012

    ", + "DecryptResponse$KeyId": "

    ARN of the key used to perform the decryption. This value is returned if no errors are encountered during the operation.

    ", + "DescribeKeyRequest$KeyId": "

    A unique identifier for the customer master key. This value can be a globally unique identifier, a fully specified ARN to either an alias or a key, or an alias name prefixed by \"alias/\".

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
    • Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName
    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012
    • Alias Name Example - alias/MyAliasName

    ", + "DisableKeyRequest$KeyId": "

    A unique identifier for the customer master key. This value can be a globally unique identifier or the fully specified ARN to a key.

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    ", + "DisableKeyRotationRequest$KeyId": "

    A unique identifier for the customer master key. This value can be a globally unique identifier or the fully specified ARN to a key.

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    ", + "EnableKeyRequest$KeyId": "

    A unique identifier for the customer master key. This value can be a globally unique identifier or the fully specified ARN to a key.

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    ", + "EnableKeyRotationRequest$KeyId": "

    A unique identifier for the customer master key. This value can be a globally unique identifier or the fully specified ARN to a key.

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    ", + "EncryptRequest$KeyId": "

    A unique identifier for the customer master key. This value can be a globally unique identifier, a fully specified ARN to either an alias or a key, or an alias name prefixed by \"alias/\".

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
    • Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName
    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012
    • Alias Name Example - alias/MyAliasName

    ", + "EncryptResponse$KeyId": "

    The ID of the key used during encryption.

    ", + "GenerateDataKeyRequest$KeyId": "

    A unique identifier for the customer master key. This value can be a globally unique identifier, a fully specified ARN to either an alias or a key, or an alias name prefixed by \"alias/\".

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
    • Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName
    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012
    • Alias Name Example - alias/MyAliasName

    ", + "GenerateDataKeyResponse$KeyId": "

    System generated unique identifier of the key to be used to decrypt the encrypted copy of the data key.

    ", + "GenerateDataKeyWithoutPlaintextRequest$KeyId": "

    A unique identifier for the customer master key. This value can be a globally unique identifier, a fully specified ARN to either an alias or a key, or an alias name prefixed by \"alias/\".

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
    • Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName
    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012
    • Alias Name Example - alias/MyAliasName

    ", + "GenerateDataKeyWithoutPlaintextResponse$KeyId": "

    System generated unique identifier of the key to be used to decrypt the encrypted copy of the data key.

    ", + "GetKeyPolicyRequest$KeyId": "

    A unique identifier for the customer master key. This value can be a globally unique identifier or the fully specified ARN to a key.

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    ", + "GetKeyRotationStatusRequest$KeyId": "

    A unique identifier for the customer master key. This value can be a globally unique identifier or the fully specified ARN to a key.

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    ", + "GrantListEntry$KeyId": "

    The unique identifier for the customer master key (CMK) to which the grant applies.

    ", + "KeyListEntry$KeyId": "

    Unique identifier of the key.

    ", + "KeyMetadata$KeyId": "

    The globally unique identifier for the key.

    ", + "ListGrantsRequest$KeyId": "

    A unique identifier for the customer master key. This value can be a globally unique identifier or the fully specified ARN to a key.

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    ", + "ListKeyPoliciesRequest$KeyId": "

    A unique identifier for the customer master key. This value can be a globally unique identifier, a fully specified ARN to either an alias or a key, or an alias name prefixed by \"alias/\".

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
    • Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName
    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012
    • Alias Name Example - alias/MyAliasName

    ", + "PutKeyPolicyRequest$KeyId": "

    A unique identifier for the customer master key. This value can be a globally unique identifier or the fully specified ARN to a key.

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    ", + "ReEncryptRequest$DestinationKeyId": "

    A unique identifier for the customer master key used to re-encrypt the data. This value can be a globally unique identifier, a fully specified ARN to either an alias or a key, or an alias name prefixed by \"alias/\".

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
    • Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName
    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012
    • Alias Name Example - alias/MyAliasName

    ", + "ReEncryptResponse$SourceKeyId": "

    Unique identifier of the key used to originally encrypt the data.

    ", + "ReEncryptResponse$KeyId": "

    Unique identifier of the key used to re-encrypt the data.

    ", + "RetireGrantRequest$KeyId": "

    A unique identifier for the customer master key associated with the grant. This value can be a globally unique identifier or a fully specified ARN of the key.

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    ", + "RevokeGrantRequest$KeyId": "

    A unique identifier for the customer master key associated with the grant. This value can be a globally unique identifier or the fully specified ARN to a key.

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    ", + "ScheduleKeyDeletionRequest$KeyId": "

    The unique identifier for the customer master key (CMK) to delete.

    To specify this value, use the unique key ID or the Amazon Resource Name (ARN) of the CMK. Examples:

    • Unique key ID: 1234abcd-12ab-34cd-56ef-1234567890ab
    • Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

    To obtain the unique key ID and key ARN for a given CMK, use ListKeys or DescribeKey.

    ", + "ScheduleKeyDeletionResponse$KeyId": "

    The unique identifier of the customer master key (CMK) for which deletion is scheduled.

    ", + "UpdateAliasRequest$TargetKeyId": "

    Unique identifier of the customer master key to be mapped to the alias. This value can be a globally unique identifier or the fully specified ARN of a key.

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    You can call ListAliases to verify that the alias is mapped to the correct TargetKeyId.

    ", + "UpdateKeyDescriptionRequest$KeyId": "

    A unique identifier for the customer master key. This value can be a globally unique identifier or the fully specified ARN to a key.

    • Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
    • Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012

    " + } + }, + "KeyList": { + "base": null, + "refs": { + "ListKeysResponse$Keys": "

    A list of keys.

    " + } + }, + "KeyListEntry": { + "base": "

    Contains information about each entry in the key list.

    ", + "refs": { + "KeyList$member": null + } + }, + "KeyMetadata": { + "base": "

    Contains metadata about a customer master key (CMK).

    This data type is used as a response element for the CreateKey and DescribeKey operations.

    ", + "refs": { + "CreateKeyResponse$KeyMetadata": "

    Metadata associated with the key.

    ", + "DescribeKeyResponse$KeyMetadata": "

    Metadata associated with the key.

    " + } + }, + "KeyState": { + "base": null, + "refs": { + "KeyMetadata$KeyState": "

    The state of the customer master key (CMK).

    For more information about how key state affects the use of a CMK, go to How Key State Affects the Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

    " + } + }, + "KeyUnavailableException": { + "base": "

    The request was rejected because the key was not available. The request can be retried.

    ", + "refs": { + } + }, + "KeyUsageType": { + "base": null, + "refs": { + "CreateKeyRequest$KeyUsage": "

    Specifies the intended use of the key. Currently this defaults to ENCRYPT/DECRYPT, and only symmetric encryption and decryption are supported.

    ", + "KeyMetadata$KeyUsage": "

    The cryptographic operations for which you can use the key. Currently the only allowed value is ENCRYPT_DECRYPT, which means you can use the key for the Encrypt and Decrypt operations.

    " + } + }, + "LimitExceededException": { + "base": "

    The request was rejected because a limit was exceeded. For more information, see Limits in the AWS Key Management Service Developer Guide.

    ", + "refs": { + } + }, + "LimitType": { + "base": null, + "refs": { + "ListAliasesRequest$Limit": "

    When paginating results, specify the maximum number of items to return in the response. If additional items exist beyond the number you specify, the Truncated element in the response is set to true.

    This value is optional. If you include a value, it must be between 1 and 100, inclusive. If you do not include a value, it defaults to 50.

    ", + "ListGrantsRequest$Limit": "

    When paginating results, specify the maximum number of items to return in the response. If additional items exist beyond the number you specify, the Truncated element in the response is set to true.

    This value is optional. If you include a value, it must be between 1 and 100, inclusive. If you do not include a value, it defaults to 50.

    ", + "ListKeyPoliciesRequest$Limit": "

    When paginating results, specify the maximum number of items to return in the response. If additional items exist beyond the number you specify, the Truncated element in the response is set to true.

    This value is optional. If you include a value, it must be between 1 and 1000, inclusive. If you do not include a value, it defaults to 100.

    Currently only 1 policy can be attached to a key.

    ", + "ListKeysRequest$Limit": "

    When paginating results, specify the maximum number of items to return in the response. If additional items exist beyond the number you specify, the Truncated element in the response is set to true.

    This value is optional. If you include a value, it must be between 1 and 1000, inclusive. If you do not include a value, it defaults to 100.

    ", + "ListRetirableGrantsRequest$Limit": "

    When paginating results, specify the maximum number of items to return in the response. If additional items exist beyond the number you specify, the Truncated element in the response is set to true.

    This value is optional. If you include a value, it must be between 1 and 100, inclusive. If you do not include a value, it defaults to 50.

    " + } + }, + "ListAliasesRequest": { + "base": null, + "refs": { + } + }, + "ListAliasesResponse": { + "base": null, + "refs": { + } + }, + "ListGrantsRequest": { + "base": null, + "refs": { + } + }, + "ListGrantsResponse": { + "base": null, + "refs": { + } + }, + "ListKeyPoliciesRequest": { + "base": null, + "refs": { + } + }, + "ListKeyPoliciesResponse": { + "base": null, + "refs": { + } + }, + "ListKeysRequest": { + "base": null, + "refs": { + } + }, + "ListKeysResponse": { + "base": null, + "refs": { + } + }, + "ListRetirableGrantsRequest": { + "base": null, + "refs": { + } + }, + "MalformedPolicyDocumentException": { + "base": "

    The request was rejected because the specified policy is not syntactically or semantically correct.

    ", + "refs": { + } + }, + "MarkerType": { + "base": null, + "refs": { + "ListAliasesRequest$Marker": "

    Use this parameter only when paginating results and only in a subsequent request after you've received a response with truncated results. Set it to the value of NextMarker from the response you just received.

    ", + "ListAliasesResponse$NextMarker": "

    When Truncated is true, this value is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListGrantsRequest$Marker": "

    Use this parameter only when paginating results and only in a subsequent request after you've received a response with truncated results. Set it to the value of NextMarker from the response you just received.

    ", + "ListGrantsResponse$NextMarker": "

    When Truncated is true, this value is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListKeyPoliciesRequest$Marker": "

    Use this parameter only when paginating results and only in a subsequent request after you've received a response with truncated results. Set it to the value of NextMarker from the response you just received.

    ", + "ListKeyPoliciesResponse$NextMarker": "

    When Truncated is true, this value is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListKeysRequest$Marker": "

    Use this parameter only when paginating results and only in a subsequent request after you've received a response with truncated results. Set it to the value of NextMarker from the response you just received.

    ", + "ListKeysResponse$NextMarker": "

    When Truncated is true, this value is present and contains the value to use for the Marker parameter in a subsequent pagination request.

    ", + "ListRetirableGrantsRequest$Marker": "

    Use this parameter only when paginating results and only in a subsequent request after you've received a response with truncated results. Set it to the value of NextMarker from the response you just received.

    " + } + }, + "NotFoundException": { + "base": "

    The request was rejected because the specified entity or resource could not be found.

    ", + "refs": { + } + }, + "NumberOfBytesType": { + "base": null, + "refs": { + "GenerateDataKeyRequest$NumberOfBytes": "

    Integer that contains the number of bytes to generate. Common values are 128, 256, 512, and 1024. 1024 is the current limit. We recommend that you use the KeySpec parameter instead.

    ", + "GenerateDataKeyWithoutPlaintextRequest$NumberOfBytes": "

    Integer that contains the number of bytes to generate. Common values are 128, 256, 512, 1024 and so on. We recommend that you use the KeySpec parameter instead.

    ", + "GenerateRandomRequest$NumberOfBytes": "

    Integer that contains the number of bytes to generate. Common values are 128, 256, 512, 1024 and so on. The current limit is 1024 bytes.

    " + } + }, + "PendingWindowInDaysType": { + "base": null, + "refs": { + "ScheduleKeyDeletionRequest$PendingWindowInDays": "

    The waiting period, specified in number of days. After the waiting period ends, AWS KMS deletes the customer master key (CMK).

    This value is optional. If you include a value, it must be between 7 and 30, inclusive. If you do not include a value, it defaults to 30.

    " + } + }, + "PlaintextType": { + "base": null, + "refs": { + "DecryptResponse$Plaintext": "

    Decrypted plaintext data. This value may not be returned if the customer master key is not available or if you didn't have permission to use it.

    ", + "EncryptRequest$Plaintext": "

    Data to be encrypted.

    ", + "GenerateDataKeyResponse$Plaintext": "

    Plaintext that contains the data key. Use this for encryption and decryption and then remove it from memory as soon as possible.

    ", + "GenerateRandomResponse$Plaintext": "

    Plaintext that contains the unpredictable byte string.

    " + } + }, + "PolicyNameList": { + "base": null, + "refs": { + "ListKeyPoliciesResponse$PolicyNames": "

    A list of policy names. Currently, there is only one policy and it is named \"Default\".

    " + } + }, + "PolicyNameType": { + "base": null, + "refs": { + "GetKeyPolicyRequest$PolicyName": "

    String that contains the name of the policy. Currently, this must be \"default\". Policy names can be discovered by calling ListKeyPolicies.

    ", + "PolicyNameList$member": null, + "PutKeyPolicyRequest$PolicyName": "

    Name of the policy to be attached. Currently, the only supported name is \"default\".

    " + } + }, + "PolicyType": { + "base": null, + "refs": { + "CreateKeyRequest$Policy": "

    Policy to attach to the key. This is required and delegates back to the account. The key is the root of trust. The policy size limit is 32 KiB (32768 bytes).

    ", + "GetKeyPolicyResponse$Policy": "

    A policy document in JSON format.

    ", + "PutKeyPolicyRequest$Policy": "

    The policy to attach to the key. This is required and delegates back to the account. The key is the root of trust. The policy size limit is 32 KiB (32768 bytes).

    " + } + }, + "PrincipalIdType": { + "base": null, + "refs": { + "CreateGrantRequest$GranteePrincipal": "

    The principal that is given permission to perform the operations that the grant permits.

    To specify the principal, use the Amazon Resource Name (ARN) of an AWS principal. Valid AWS principals include AWS accounts (root), IAM users, federated users, and assumed role users. For examples of the ARN syntax to use for specifying a principal, see AWS Identity and Access Management (IAM) in the Example ARNs section of the AWS General Reference.

    ", + "CreateGrantRequest$RetiringPrincipal": "

    The principal that is given permission to retire the grant by using RetireGrant operation.

    To specify the principal, use the Amazon Resource Name (ARN) of an AWS principal. Valid AWS principals include AWS accounts (root), IAM users, federated users, and assumed role users. For examples of the ARN syntax to use for specifying a principal, see AWS Identity and Access Management (IAM) in the Example ARNs section of the AWS General Reference.

    ", + "GrantListEntry$GranteePrincipal": "

    The principal that receives the grant's permissions.

    ", + "GrantListEntry$RetiringPrincipal": "

    The principal that can retire the grant.

    ", + "GrantListEntry$IssuingAccount": "

    The AWS account under which the grant was issued.

    ", + "ListRetirableGrantsRequest$RetiringPrincipal": "

    The retiring principal for which to list grants.

    To specify the retiring principal, use the Amazon Resource Name (ARN) of an AWS principal. Valid AWS principals include AWS accounts (root), IAM users, federated users, and assumed role users. For examples of the ARN syntax for specifying a principal, go to AWS Identity and Access Management (IAM) in the Example ARNs section of the Amazon Web Services General Reference.

    " + } + }, + "PutKeyPolicyRequest": { + "base": null, + "refs": { + } + }, + "ReEncryptRequest": { + "base": null, + "refs": { + } + }, + "ReEncryptResponse": { + "base": null, + "refs": { + } + }, + "RetireGrantRequest": { + "base": null, + "refs": { + } + }, + "RevokeGrantRequest": { + "base": null, + "refs": { + } + }, + "ScheduleKeyDeletionRequest": { + "base": null, + "refs": { + } + }, + "ScheduleKeyDeletionResponse": { + "base": null, + "refs": { + } + }, + "UnsupportedOperationException": { + "base": "

    The request was rejected because a specified parameter is not supported.

    ", + "refs": { + } + }, + "UpdateAliasRequest": { + "base": null, + "refs": { + } + }, + "UpdateKeyDescriptionRequest": { + "base": null, + "refs": { + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/kms/2014-11-01/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,32 @@ +{ + "pagination": { + "ListAliases": { + "limit_key": "Limit", + "input_token": "Marker", + "output_token": "NextMarker", + "more_results": "Truncated", + "result_key": "Aliases" + }, + "ListGrants": { + "limit_key": "Limit", + "input_token": "Marker", + "output_token": "NextMarker", + "more_results": "Truncated", + "result_key": "Grants" + }, + "ListKeyPolicies": { + "limit_key": "Limit", + "input_token": "Marker", + "output_token": "NextMarker", + "more_results": "Truncated", + "result_key": "PolicyNames" + }, + "ListKeys": { + "limit_key": "Limit", + "input_token": "Marker", + "output_token": "NextMarker", + "more_results": "Truncated", + "result_key": "Keys" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/lambda/2014-11-11/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/lambda/2014-11-11/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/lambda/2014-11-11/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/lambda/2014-11-11/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,667 @@ +{ + "metadata":{ + "apiVersion":"2014-11-11", + "endpointPrefix":"lambda", + "serviceFullName":"AWS Lambda", + "signatureVersion":"v4", + "protocol":"rest-json" + }, + "operations":{ + "AddEventSource":{ + "name":"AddEventSource", + "http":{ + "method":"POST", + "requestUri":"/2014-11-13/event-source-mappings/" + }, + "input":{"shape":"AddEventSourceRequest"}, + "output":{"shape":"EventSourceConfiguration"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "DeleteFunction":{ + "name":"DeleteFunction", + "http":{ + "method":"DELETE", + "requestUri":"/2014-11-13/functions/{FunctionName}", + "responseCode":204 + }, + "input":{"shape":"DeleteFunctionRequest"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + } + ] + }, + "GetEventSource":{ + "name":"GetEventSource", + "http":{ + "method":"GET", + "requestUri":"/2014-11-13/event-source-mappings/{UUID}", + "responseCode":200 + }, + "input":{"shape":"GetEventSourceRequest"}, + "output":{"shape":"EventSourceConfiguration"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "GetFunction":{ + "name":"GetFunction", + "http":{ + "method":"GET", + "requestUri":"/2014-11-13/functions/{FunctionName}", + "responseCode":200 + }, + "input":{"shape":"GetFunctionRequest"}, + "output":{"shape":"GetFunctionResponse"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + } + ] + }, + "GetFunctionConfiguration":{ + "name":"GetFunctionConfiguration", + "http":{ + "method":"GET", + "requestUri":"/2014-11-13/functions/{FunctionName}/configuration", + "responseCode":200 + }, + "input":{"shape":"GetFunctionConfigurationRequest"}, + "output":{"shape":"FunctionConfiguration"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + } + ] + }, + "InvokeAsync":{ + "name":"InvokeAsync", + "http":{ + "method":"POST", + "requestUri":"/2014-11-13/functions/{FunctionName}/invoke-async/", + "responseCode":202 + }, + "input":{"shape":"InvokeAsyncRequest"}, + "output":{"shape":"InvokeAsyncResponse"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestContentException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "ListEventSources":{ + "name":"ListEventSources", + "http":{ + "method":"GET", + "requestUri":"/2014-11-13/event-source-mappings/", + "responseCode":200 + }, + "input":{"shape":"ListEventSourcesRequest"}, + "output":{"shape":"ListEventSourcesResponse"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "ListFunctions":{ + "name":"ListFunctions", + "http":{ + "method":"GET", + "requestUri":"/2014-11-13/functions/", + "responseCode":200 + }, + "input":{"shape":"ListFunctionsRequest"}, + "output":{"shape":"ListFunctionsResponse"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "RemoveEventSource":{ + "name":"RemoveEventSource", + "http":{ + "method":"DELETE", + "requestUri":"/2014-11-13/event-source-mappings/{UUID}", + "responseCode":204 + }, + "input":{"shape":"RemoveEventSourceRequest"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "UpdateFunctionConfiguration":{ + "name":"UpdateFunctionConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/2014-11-13/functions/{FunctionName}/configuration", + "responseCode":200 + }, + "input":{"shape":"UpdateFunctionConfigurationRequest"}, + "output":{"shape":"FunctionConfiguration"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "UploadFunction":{ + "name":"UploadFunction", + "http":{ + "method":"PUT", + "requestUri":"/2014-11-13/functions/{FunctionName}", + "responseCode":201 + }, + "input":{"shape":"UploadFunctionRequest"}, + "output":{"shape":"FunctionConfiguration"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + } + ] + } + }, + "shapes":{ + "AddEventSourceRequest":{ + "type":"structure", + "required":[ + "EventSource", + "FunctionName", + "Role" + ], + "members":{ + "EventSource":{"shape":"String"}, + "FunctionName":{"shape":"FunctionName"}, + "Role":{"shape":"RoleArn"}, + "BatchSize":{"shape":"Integer"}, + "Parameters":{"shape":"Map"} + } + }, + "Blob":{ + "type":"blob", + "streaming":true + }, + "DeleteFunctionRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + } + } + }, + "Description":{ + "type":"string", + "min":0, + "max":256 + }, + "EventSourceConfiguration":{ + "type":"structure", + "members":{ + "UUID":{"shape":"String"}, + "BatchSize":{"shape":"Integer"}, + "EventSource":{"shape":"String"}, + "FunctionName":{"shape":"FunctionName"}, + "Parameters":{"shape":"Map"}, + "Role":{"shape":"RoleArn"}, + "LastModified":{"shape":"Timestamp"}, + "IsActive":{"shape":"Boolean"}, + "Status":{"shape":"String"} + } + }, + "EventSourceList":{ + "type":"list", + "member":{"shape":"EventSourceConfiguration"} + }, + "FunctionArn":{ + "type":"string", + "pattern":"arn:aws:lambda:[a-z]{2}-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_]+(\\/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})?" + }, + "FunctionCodeLocation":{ + "type":"structure", + "members":{ + "RepositoryType":{"shape":"String"}, + "Location":{"shape":"String"} + } + }, + "FunctionConfiguration":{ + "type":"structure", + "members":{ + "FunctionName":{"shape":"FunctionName"}, + "FunctionARN":{"shape":"FunctionArn"}, + "ConfigurationId":{"shape":"String"}, + "Runtime":{"shape":"Runtime"}, + "Role":{"shape":"RoleArn"}, + "Handler":{"shape":"Handler"}, + "Mode":{"shape":"Mode"}, + "CodeSize":{"shape":"Long"}, + "Description":{"shape":"Description"}, + "Timeout":{"shape":"Timeout"}, + "MemorySize":{"shape":"MemorySize"}, + "LastModified":{"shape":"Timestamp"} + } + }, + "FunctionList":{ + "type":"list", + "member":{"shape":"FunctionConfiguration"} + }, + "FunctionName":{ + "type":"string", + "min":1, + "max":64, + "pattern":"[a-zA-Z0-9-_]+" + }, + "GetEventSourceRequest":{ + "type":"structure", + "required":["UUID"], + "members":{ + "UUID":{ + "shape":"String", + "location":"uri", + "locationName":"UUID" + } + } + }, + "GetFunctionConfigurationRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + } + } + }, + "GetFunctionRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + } + } + }, + "GetFunctionResponse":{ + "type":"structure", + "members":{ + "Configuration":{"shape":"FunctionConfiguration"}, + "Code":{"shape":"FunctionCodeLocation"} + } + }, + "Handler":{ + "type":"string", + "pattern":"[a-zA-Z0-9./\\-_]+" + }, + "HttpStatus":{"type":"integer"}, + "Integer":{"type":"integer"}, + "InvalidParameterValueException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRequestContentException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvokeAsyncRequest":{ + "type":"structure", + "required":[ + "FunctionName", + "InvokeArgs" + ], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "InvokeArgs":{"shape":"Blob"} + }, + "payload":"InvokeArgs" + }, + "InvokeAsyncResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"HttpStatus", + "location":"statusCode" + } + } + }, + "ListEventSourcesRequest":{ + "type":"structure", + "members":{ + "EventSourceArn":{ + "shape":"String", + "location":"querystring", + "locationName":"EventSource" + }, + "FunctionName":{ + "shape":"FunctionName", + "location":"querystring", + "locationName":"FunctionName" + }, + "Marker":{ + "shape":"String", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"MaxListItems", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListEventSourcesResponse":{ + "type":"structure", + "members":{ + "NextMarker":{"shape":"String"}, + "EventSources":{"shape":"EventSourceList"} + } + }, + "ListFunctionsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"MaxListItems", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListFunctionsResponse":{ + "type":"structure", + "members":{ + "NextMarker":{"shape":"String"}, + "Functions":{"shape":"FunctionList"} + } + }, + "Long":{"type":"long"}, + "Map":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "MaxListItems":{ + "type":"integer", + "min":1, + "max":10000 + }, + "MemorySize":{ + "type":"integer", + "min":128, + "max":1024 + }, + "Mode":{ + "type":"string", + "enum":["event"] + }, + "RemoveEventSourceRequest":{ + "type":"structure", + "required":["UUID"], + "members":{ + "UUID":{ + "shape":"String", + "location":"uri", + "locationName":"UUID" + } + } + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "Message":{"shape":"String"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "RoleArn":{ + "type":"string", + "pattern":"arn:aws:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+" + }, + "Runtime":{ + "type":"string", + "enum":["nodejs"] + }, + "ServiceException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "Message":{"shape":"String"} + }, + "error":{"httpStatusCode":500}, + "exception":true + }, + "String":{"type":"string"}, + "Timeout":{ + "type":"integer", + "min":1, + "max":60 + }, + "Timestamp":{"type":"string"}, + "UpdateFunctionConfigurationRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "Role":{ + "shape":"RoleArn", + "location":"querystring", + "locationName":"Role" + }, + "Handler":{ + "shape":"Handler", + "location":"querystring", + "locationName":"Handler" + }, + "Description":{ + "shape":"Description", + "location":"querystring", + "locationName":"Description" + }, + "Timeout":{ + "shape":"Timeout", + "location":"querystring", + "locationName":"Timeout" + }, + "MemorySize":{ + "shape":"MemorySize", + "location":"querystring", + "locationName":"MemorySize" + } + } + }, + "UploadFunctionRequest":{ + "type":"structure", + "required":[ + "FunctionName", + "FunctionZip", + "Runtime", + "Role", + "Handler", + "Mode" + ], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "FunctionZip":{"shape":"Blob"}, + "Runtime":{ + "shape":"Runtime", + "location":"querystring", + "locationName":"Runtime" + }, + "Role":{ + "shape":"RoleArn", + "location":"querystring", + "locationName":"Role" + }, + "Handler":{ + "shape":"Handler", + "location":"querystring", + "locationName":"Handler" + }, + "Mode":{ + "shape":"Mode", + "location":"querystring", + "locationName":"Mode" + }, + "Description":{ + "shape":"Description", + "location":"querystring", + "locationName":"Description" + }, + "Timeout":{ + "shape":"Timeout", + "location":"querystring", + "locationName":"Timeout" + }, + "MemorySize":{ + "shape":"MemorySize", + "location":"querystring", + "locationName":"MemorySize" + } + }, + "payload":"FunctionZip" + }, + "Boolean":{"type":"boolean"} + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/lambda/2014-11-11/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/lambda/2014-11-11/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/lambda/2014-11-11/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/lambda/2014-11-11/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,303 @@ +{ + "operations": { + "AddEventSource": "

    Identifies a stream as an event source for an AWS Lambda function. It can be either an Amazon Kinesis stream or a Amazon DynamoDB stream. AWS Lambda invokes the specified function when records are posted to the stream.

    This is the pull model, where AWS Lambda invokes the function. For more information, go to AWS Lambda: How it Works in the AWS Lambda Developer Guide.

    This association between an Amazon Kinesis stream and an AWS Lambda function is called the event source mapping. You provide the configuration information (for example, which stream to read from and which AWS Lambda function to invoke) for the event source mapping in the request body.

    Each event source, such as a Kinesis stream, can only be associated with one AWS Lambda function. If you call AddEventSource for an event source that is already mapped to another AWS Lambda function, the existing mapping is updated to call the new function instead of the old one.

    This operation requires permission for the iam:PassRole action for the IAM role. It also requires permission for the lambda:AddEventSource action.

    ", + "DeleteFunction": "

    Deletes the specified Lambda function code and configuration.

    This operation requires permission for the lambda:DeleteFunction action.

    ", + "GetEventSource": "

    Returns configuration information for the specified event source mapping (see AddEventSource).

    This operation requires permission for the lambda:GetEventSource action.

    ", + "GetFunction": "

    Returns the configuration information of the Lambda function and a presigned URL link to the .zip file you uploaded with UploadFunction so you can download the .zip file. Note that the URL is valid for up to 10 minutes. The configuration information is the same information you provided as parameters when uploading the function.

    This operation requires permission for the lambda:GetFunction action.

    ", + "GetFunctionConfiguration": "

    Returns the configuration information of the Lambda function. This the same information you provided as parameters when uploading the function by using UploadFunction.

    This operation requires permission for the lambda:GetFunctionConfiguration operation.

    ", + "InvokeAsync": "

    Submits an invocation request to AWS Lambda. Upon receiving the request, Lambda executes the specified function asynchronously. To see the logs generated by the Lambda function execution, see the CloudWatch logs console.

    This operation requires permission for the lambda:InvokeFunction action.

    ", + "ListEventSources": "

    Returns a list of event source mappings you created using the AddEventSource (see AddEventSource), where you identify a stream as event source. This list does not include Amazon S3 event sources.

    For each mapping, the API returns configuration information. You can optionally specify filters to retrieve specific event source mappings.

    This operation requires permission for the lambda:ListEventSources action.

    ", + "ListFunctions": "

    Returns a list of your Lambda functions. For each function, the response includes the function configuration information. You must use GetFunction to retrieve the code for your function.

    This operation requires permission for the lambda:ListFunctions action.

    ", + "RemoveEventSource": "

    Removes an event source mapping. This means AWS Lambda will no longer invoke the function for events in the associated source.

    This operation requires permission for the lambda:RemoveEventSource action.

    ", + "UpdateFunctionConfiguration": "

    Updates the configuration parameters for the specified Lambda function by using the values provided in the request. You provide only the parameters you want to change. This operation must only be used on an existing Lambda function and cannot be used to update the function's code.

    This operation requires permission for the lambda:UpdateFunctionConfiguration action.

    ", + "UploadFunction": "

    Creates a new Lambda function or updates an existing function. The function metadata is created from the request parameters, and the code for the function is provided by a .zip file in the request body. If the function name already exists, the existing Lambda function is updated with the new code and metadata.

    This operation requires permission for the lambda:UploadFunction action.

    " + }, + "service": "AWS Lambda

    Overview

    This is the AWS Lambda API Reference. The AWS Lambda Developer Guide provides additional information. For the service overview, go to What is AWS Lambda, and for information about how the service works, go to AWS LambdaL How it Works in the AWS Lambda Developer Guide.

    ", + "shapes": { + "AddEventSourceRequest": { + "base": null, + "refs": { + } + }, + "Blob": { + "base": null, + "refs": { + "InvokeAsyncRequest$InvokeArgs": "

    JSON that you want to provide to your Lambda function as input.

    ", + "UploadFunctionRequest$FunctionZip": "

    A .zip file containing your packaged source code. For more information about creating a .zip file, go to AWS LambdaL How it Works in the AWS Lambda Developer Guide.

    " + } + }, + "DeleteFunctionRequest": { + "base": null, + "refs": { + } + }, + "Description": { + "base": null, + "refs": { + "FunctionConfiguration$Description": "

    The user-provided description.

    ", + "UpdateFunctionConfigurationRequest$Description": "

    A short user-defined function description. Lambda does not use this value. Assign a meaningful description as you see fit.

    ", + "UploadFunctionRequest$Description": "

    A short, user-defined function description. Lambda does not use this value. Assign a meaningful description as you see fit.

    " + } + }, + "EventSourceConfiguration": { + "base": "

    Describes mapping between an Amazon Kinesis stream and a Lambda function.

    ", + "refs": { + "EventSourceList$member": null + } + }, + "EventSourceList": { + "base": null, + "refs": { + "ListEventSourcesResponse$EventSources": "

    An arrary of EventSourceConfiguration objects.

    " + } + }, + "FunctionArn": { + "base": null, + "refs": { + "FunctionConfiguration$FunctionARN": "

    The Amazon Resource Name (ARN) assigned to the function.

    " + } + }, + "FunctionCodeLocation": { + "base": "

    The object for the Lambda function location.

    ", + "refs": { + "GetFunctionResponse$Code": null + } + }, + "FunctionConfiguration": { + "base": "

    A complex type that describes function metadata.

    ", + "refs": { + "FunctionList$member": null, + "GetFunctionResponse$Configuration": null + } + }, + "FunctionList": { + "base": null, + "refs": { + "ListFunctionsResponse$Functions": "

    A list of Lambda functions.

    " + } + }, + "FunctionName": { + "base": null, + "refs": { + "AddEventSourceRequest$FunctionName": "

    The Lambda function to invoke when AWS Lambda detects an event on the stream.

    ", + "DeleteFunctionRequest$FunctionName": "

    The Lambda function to delete.

    ", + "EventSourceConfiguration$FunctionName": "

    The Lambda function to invoke when AWS Lambda detects an event on the stream.

    ", + "FunctionConfiguration$FunctionName": "

    The name of the function.

    ", + "GetFunctionConfigurationRequest$FunctionName": "

    The name of the Lambda function for which you want to retrieve the configuration information.

    ", + "GetFunctionRequest$FunctionName": "

    The Lambda function name.

    ", + "InvokeAsyncRequest$FunctionName": "

    The Lambda function name.

    ", + "ListEventSourcesRequest$FunctionName": "

    The name of the AWS Lambda function.

    ", + "UpdateFunctionConfigurationRequest$FunctionName": "

    The name of the Lambda function.

    ", + "UploadFunctionRequest$FunctionName": "

    The name you want to assign to the function you are uploading. The function names appear in the console and are returned in the ListFunctions API. Function names are used to specify functions to other AWS Lambda APIs, such as InvokeAsync.

    " + } + }, + "GetEventSourceRequest": { + "base": null, + "refs": { + } + }, + "GetFunctionConfigurationRequest": { + "base": null, + "refs": { + } + }, + "GetFunctionRequest": { + "base": null, + "refs": { + } + }, + "GetFunctionResponse": { + "base": "

    This response contains the object for AWS Lambda function location (see API_FunctionCodeLocation

    ", + "refs": { + } + }, + "Handler": { + "base": null, + "refs": { + "FunctionConfiguration$Handler": "

    The function Lambda calls to begin executing your function.

    ", + "UpdateFunctionConfigurationRequest$Handler": "

    The function that Lambda calls to begin executing your function. For Node.js, it is the module-name.export value in your function.

    ", + "UploadFunctionRequest$Handler": "

    The function that Lambda calls to begin execution. For Node.js, it is the module-name.export value in your function.

    " + } + }, + "HttpStatus": { + "base": null, + "refs": { + "InvokeAsyncResponse$Status": "

    It will be 202 upon success.

    " + } + }, + "Integer": { + "base": null, + "refs": { + "AddEventSourceRequest$BatchSize": "

    The largest number of records that AWS Lambda will give to your function in a single event. The default is 100 records.

    ", + "EventSourceConfiguration$BatchSize": "

    The largest number of records that AWS Lambda will POST in the invocation request to your function.

    " + } + }, + "InvalidParameterValueException": { + "base": "

    One of the parameters in the request is invalid. For example, if you provided an IAM role for AWS Lambda to assume in the UploadFunction or the UpdateFunctionConfiguration API, that AWS Lambda is unable to assume you will get this exception.

    ", + "refs": { + } + }, + "InvalidRequestContentException": { + "base": "

    The request body could not be parsed as JSON.

    ", + "refs": { + } + }, + "InvokeAsyncRequest": { + "base": null, + "refs": { + } + }, + "InvokeAsyncResponse": { + "base": "

    Upon success, it returns empty response. Otherwise, throws an exception.

    ", + "refs": { + } + }, + "ListEventSourcesRequest": { + "base": null, + "refs": { + } + }, + "ListEventSourcesResponse": { + "base": "

    Contains a list of event sources (see API_EventSourceConfiguration)

    ", + "refs": { + } + }, + "ListFunctionsRequest": { + "base": null, + "refs": { + } + }, + "ListFunctionsResponse": { + "base": "

    Contains a list of AWS Lambda function configurations (see API_FunctionConfiguration.

    ", + "refs": { + } + }, + "Long": { + "base": null, + "refs": { + "FunctionConfiguration$CodeSize": "

    The size, in bytes, of the function .zip file you uploaded.

    " + } + }, + "Map": { + "base": null, + "refs": { + "AddEventSourceRequest$Parameters": "

    A map (key-value pairs) defining the configuration for AWS Lambda to use when reading the event source. Currently, AWS Lambda supports only the InitialPositionInStream key. The valid values are: \"TRIM_HORIZON\" and \"LATEST\". The default value is \"TRIM_HORIZON\". For more information, go to ShardIteratorType in the Amazon Kinesis Service API Reference.

    ", + "EventSourceConfiguration$Parameters": "

    The map (key-value pairs) defining the configuration for AWS Lambda to use when reading the event source.

    " + } + }, + "MaxListItems": { + "base": null, + "refs": { + "ListEventSourcesRequest$MaxItems": "

    Optional integer. Specifies the maximum number of event sources to return in response. This value must be greater than 0.

    ", + "ListFunctionsRequest$MaxItems": "

    Optional integer. Specifies the maximum number of AWS Lambda functions to return in response. This parameter value must be greater than 0.

    " + } + }, + "MemorySize": { + "base": null, + "refs": { + "FunctionConfiguration$MemorySize": "

    The memory size, in MB, you configured for the function. Must be a multiple of 64 MB.

    ", + "UpdateFunctionConfigurationRequest$MemorySize": "

    The amount of memory, in MB, your Lambda function is given. Lambda uses this memory size to infer the amount of CPU allocated to your function. Your function use-case determines your CPU and memory requirements. For example, a database operation might need less memory compared to an image processing function. The default value is 128 MB. The value must be a multiple of 64 MB.

    ", + "UploadFunctionRequest$MemorySize": "

    The amount of memory, in MB, your Lambda function is given. Lambda uses this memory size to infer the amount of CPU allocated to your function. Your function use-case determines your CPU and memory requirements. For example, database operation might need less memory compared to image processing function. The default value is 128 MB. The value must be a multiple of 64 MB.

    " + } + }, + "Mode": { + "base": null, + "refs": { + "FunctionConfiguration$Mode": "

    The type of the Lambda function you uploaded.

    ", + "UploadFunctionRequest$Mode": "

    How the Lambda function will be invoked. Lambda supports only the \"event\" mode.

    " + } + }, + "RemoveEventSourceRequest": { + "base": null, + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "

    The function or the event source specified in the request does not exist.

    ", + "refs": { + } + }, + "RoleArn": { + "base": null, + "refs": { + "AddEventSourceRequest$Role": "

    The ARN of the IAM role (invocation role) that AWS Lambda can assume to read from the stream and invoke the function.

    ", + "EventSourceConfiguration$Role": "

    The ARN of the IAM role (invocation role) that AWS Lambda can assume to read from the stream and invoke the function.

    ", + "FunctionConfiguration$Role": "

    The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS) resources.

    ", + "UpdateFunctionConfigurationRequest$Role": "

    The Amazon Resource Name (ARN) of the IAM role that Lambda will assume when it executes your function.

    ", + "UploadFunctionRequest$Role": "

    The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS) resources.

    " + } + }, + "Runtime": { + "base": null, + "refs": { + "FunctionConfiguration$Runtime": "

    The runtime environment for the Lambda function.

    ", + "UploadFunctionRequest$Runtime": "

    The runtime environment for the Lambda function you are uploading. Currently, Lambda supports only \"nodejs\" as the runtime.

    " + } + }, + "ServiceException": { + "base": "

    The AWS Lambda service encountered an internal error.

    ", + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "AddEventSourceRequest$EventSource": "

    The Amazon Resource Name (ARN) of the Amazon Kinesis stream that is the event source. Any record added to this stream causes AWS Lambda to invoke your Lambda function. AWS Lambda POSTs the Amazon Kinesis event, containing records, to your Lambda function as JSON.

    ", + "EventSourceConfiguration$UUID": "

    The AWS Lambda assigned opaque identifier for the mapping.

    ", + "EventSourceConfiguration$EventSource": "

    The Amazon Resource Name (ARN) of the Amazon Kinesis stream that is the source of events.

    ", + "EventSourceConfiguration$Status": "

    The description of the health of the event source mapping. Valid values are: \"PENDING\", \"OK\", and \"PROBLEM:message\". Initially this staus is \"PENDING\". When AWS Lambda begins processing events, it changes the status to \"OK\".

    ", + "FunctionCodeLocation$RepositoryType": "

    The repository from which you can download the function.

    ", + "FunctionCodeLocation$Location": "

    The presigned URL you can use to download the function's .zip file that you previously uploaded. The URL is valid for up to 10 minutes.

    ", + "FunctionConfiguration$ConfigurationId": "

    A Lambda-assigned unique identifier for the current function code and related configuration.

    ", + "GetEventSourceRequest$UUID": "

    The AWS Lambda assigned ID of the event source mapping.

    ", + "InvalidParameterValueException$Type": null, + "InvalidParameterValueException$message": null, + "InvalidRequestContentException$Type": null, + "InvalidRequestContentException$message": null, + "ListEventSourcesRequest$EventSourceArn": "

    The Amazon Resource Name (ARN) of the Amazon Kinesis stream.

    ", + "ListEventSourcesRequest$Marker": "

    Optional string. An opaque pagination token returned from a previous ListEventSources operation. If present, specifies to continue the list from where the returning call left off.

    ", + "ListEventSourcesResponse$NextMarker": "

    A string, present if there are more event source mappings.

    ", + "ListFunctionsRequest$Marker": "

    Optional string. An opaque pagination token returned from a previous ListFunctions operation. If present, indicates where to continue the listing.

    ", + "ListFunctionsResponse$NextMarker": "

    A string, present if there are more functions.

    ", + "Map$key": null, + "Map$value": null, + "RemoveEventSourceRequest$UUID": "

    The event source mapping ID.

    ", + "ResourceNotFoundException$Type": null, + "ResourceNotFoundException$Message": null, + "ServiceException$Type": null, + "ServiceException$Message": null + } + }, + "Timeout": { + "base": null, + "refs": { + "FunctionConfiguration$Timeout": "

    The function execution time at which Lambda should terminate the function. Because the execution time has cost implications, we recommend you set this value based on your expected execution time. The default is 3 seconds.

    ", + "UpdateFunctionConfigurationRequest$Timeout": "

    The function execution time at which Lambda should terminate the function. Because the execution time has cost implications, we recommend you set this value based on your expected execution time. The default is 3 seconds.

    ", + "UploadFunctionRequest$Timeout": "

    The function execution time at which Lambda should terminate the function. Because the execution time has cost implications, we recommend you set this value based on your expected execution time. The default is 3 seconds.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "EventSourceConfiguration$LastModified": "

    The UTC time string indicating the last time the event mapping was updated.

    ", + "FunctionConfiguration$LastModified": "

    The timestamp of the last time you updated the function.

    " + } + }, + "UpdateFunctionConfigurationRequest": { + "base": null, + "refs": { + } + }, + "UploadFunctionRequest": { + "base": null, + "refs": { + } + }, + "Boolean": { + "base": null, + "refs": { + "EventSourceConfiguration$IsActive": "

    Indicates whether the event source mapping is currently honored. Events are only processes if IsActive is true.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/lambda/2014-11-11/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/lambda/2014-11-11/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/lambda/2014-11-11/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/lambda/2014-11-11/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +{ + "pagination": { + "ListEventSources": { + "input_token": "Marker", + "output_token": "NextMarker", + "limit_key": "MaxItems", + "result_key": "EventSources" + }, + "ListFunctions": { + "input_token": "Marker", + "output_token": "NextMarker", + "limit_key": "MaxItems", + "result_key": "Functions" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/lambda/2015-03-31/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/lambda/2015-03-31/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/lambda/2015-03-31/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/lambda/2015-03-31/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1648 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-03-31", + "endpointPrefix":"lambda", + "serviceFullName":"AWS Lambda", + "signatureVersion":"v4", + "protocol":"rest-json" + }, + "operations":{ + "AddPermission":{ + "name":"AddPermission", + "http":{ + "method":"POST", + "requestUri":"/2015-03-31/functions/{FunctionName}/policy", + "responseCode":201 + }, + "input":{"shape":"AddPermissionRequest"}, + "output":{"shape":"AddPermissionResponse"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"ResourceConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"PolicyLengthExceededException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "CreateAlias":{ + "name":"CreateAlias", + "http":{ + "method":"POST", + "requestUri":"/2015-03-31/functions/{FunctionName}/aliases", + "responseCode":201 + }, + "input":{"shape":"CreateAliasRequest"}, + "output":{"shape":"AliasConfiguration"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"ResourceConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "CreateEventSourceMapping":{ + "name":"CreateEventSourceMapping", + "http":{ + "method":"POST", + "requestUri":"/2015-03-31/event-source-mappings/", + "responseCode":202 + }, + "input":{"shape":"CreateEventSourceMappingRequest"}, + "output":{"shape":"EventSourceMappingConfiguration"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "CreateFunction":{ + "name":"CreateFunction", + "http":{ + "method":"POST", + "requestUri":"/2015-03-31/functions", + "responseCode":201 + }, + "input":{"shape":"CreateFunctionRequest"}, + "output":{"shape":"FunctionConfiguration"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"ResourceConflictException", + "error":{"httpStatusCode":409}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"CodeStorageExceededException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "DeleteAlias":{ + "name":"DeleteAlias", + "http":{ + "method":"DELETE", + "requestUri":"/2015-03-31/functions/{FunctionName}/aliases/{Name}", + "responseCode":204 + }, + "input":{"shape":"DeleteAliasRequest"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "DeleteEventSourceMapping":{ + "name":"DeleteEventSourceMapping", + "http":{ + "method":"DELETE", + "requestUri":"/2015-03-31/event-source-mappings/{UUID}", + "responseCode":202 + }, + "input":{"shape":"DeleteEventSourceMappingRequest"}, + "output":{"shape":"EventSourceMappingConfiguration"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "DeleteFunction":{ + "name":"DeleteFunction", + "http":{ + "method":"DELETE", + "requestUri":"/2015-03-31/functions/{FunctionName}", + "responseCode":204 + }, + "input":{"shape":"DeleteFunctionRequest"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceConflictException", + "error":{"httpStatusCode":409}, + "exception":true + } + ] + }, + "GetAlias":{ + "name":"GetAlias", + "http":{ + "method":"GET", + "requestUri":"/2015-03-31/functions/{FunctionName}/aliases/{Name}", + "responseCode":200 + }, + "input":{"shape":"GetAliasRequest"}, + "output":{"shape":"AliasConfiguration"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "GetEventSourceMapping":{ + "name":"GetEventSourceMapping", + "http":{ + "method":"GET", + "requestUri":"/2015-03-31/event-source-mappings/{UUID}", + "responseCode":200 + }, + "input":{"shape":"GetEventSourceMappingRequest"}, + "output":{"shape":"EventSourceMappingConfiguration"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "GetFunction":{ + "name":"GetFunction", + "http":{ + "method":"GET", + "requestUri":"/2015-03-31/functions/{FunctionName}", + "responseCode":200 + }, + "input":{"shape":"GetFunctionRequest"}, + "output":{"shape":"GetFunctionResponse"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "GetFunctionConfiguration":{ + "name":"GetFunctionConfiguration", + "http":{ + "method":"GET", + "requestUri":"/2015-03-31/functions/{FunctionName}/configuration", + "responseCode":200 + }, + "input":{"shape":"GetFunctionConfigurationRequest"}, + "output":{"shape":"FunctionConfiguration"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "GetPolicy":{ + "name":"GetPolicy", + "http":{ + "method":"GET", + "requestUri":"/2015-03-31/functions/{FunctionName}/policy", + "responseCode":200 + }, + "input":{"shape":"GetPolicyRequest"}, + "output":{"shape":"GetPolicyResponse"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "Invoke":{ + "name":"Invoke", + "http":{ + "method":"POST", + "requestUri":"/2015-03-31/functions/{FunctionName}/invocations" + }, + "input":{"shape":"InvocationRequest"}, + "output":{"shape":"InvocationResponse"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestContentException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"RequestTooLargeException", + "error":{"httpStatusCode":413}, + "exception":true + }, + { + "shape":"UnsupportedMediaTypeException", + "error":{"httpStatusCode":415}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "InvokeAsync":{ + "name":"InvokeAsync", + "http":{ + "method":"POST", + "requestUri":"/2014-11-13/functions/{FunctionName}/invoke-async/", + "responseCode":202 + }, + "input":{ + "shape":"InvokeAsyncRequest", + "deprecated":true + }, + "output":{ + "shape":"InvokeAsyncResponse", + "deprecated":true + }, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidRequestContentException", + "error":{"httpStatusCode":400}, + "exception":true + } + ], + "deprecated":true + }, + "ListAliases":{ + "name":"ListAliases", + "http":{ + "method":"GET", + "requestUri":"/2015-03-31/functions/{FunctionName}/aliases", + "responseCode":200 + }, + "input":{"shape":"ListAliasesRequest"}, + "output":{"shape":"ListAliasesResponse"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "ListEventSourceMappings":{ + "name":"ListEventSourceMappings", + "http":{ + "method":"GET", + "requestUri":"/2015-03-31/event-source-mappings/", + "responseCode":200 + }, + "input":{"shape":"ListEventSourceMappingsRequest"}, + "output":{"shape":"ListEventSourceMappingsResponse"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "ListFunctions":{ + "name":"ListFunctions", + "http":{ + "method":"GET", + "requestUri":"/2015-03-31/functions/", + "responseCode":200 + }, + "input":{"shape":"ListFunctionsRequest"}, + "output":{"shape":"ListFunctionsResponse"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "ListVersionsByFunction":{ + "name":"ListVersionsByFunction", + "http":{ + "method":"GET", + "requestUri":"/2015-03-31/functions/{FunctionName}/versions", + "responseCode":200 + }, + "input":{"shape":"ListVersionsByFunctionRequest"}, + "output":{"shape":"ListVersionsByFunctionResponse"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "PublishVersion":{ + "name":"PublishVersion", + "http":{ + "method":"POST", + "requestUri":"/2015-03-31/functions/{FunctionName}/versions", + "responseCode":201 + }, + "input":{"shape":"PublishVersionRequest"}, + "output":{"shape":"FunctionConfiguration"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"CodeStorageExceededException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "RemovePermission":{ + "name":"RemovePermission", + "http":{ + "method":"DELETE", + "requestUri":"/2015-03-31/functions/{FunctionName}/policy/{StatementId}", + "responseCode":204 + }, + "input":{"shape":"RemovePermissionRequest"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "UpdateAlias":{ + "name":"UpdateAlias", + "http":{ + "method":"PUT", + "requestUri":"/2015-03-31/functions/{FunctionName}/aliases/{Name}", + "responseCode":200 + }, + "input":{"shape":"UpdateAliasRequest"}, + "output":{"shape":"AliasConfiguration"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + }, + "UpdateEventSourceMapping":{ + "name":"UpdateEventSourceMapping", + "http":{ + "method":"PUT", + "requestUri":"/2015-03-31/event-source-mappings/{UUID}", + "responseCode":202 + }, + "input":{"shape":"UpdateEventSourceMappingRequest"}, + "output":{"shape":"EventSourceMappingConfiguration"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"ResourceConflictException", + "error":{"httpStatusCode":409}, + "exception":true + } + ] + }, + "UpdateFunctionCode":{ + "name":"UpdateFunctionCode", + "http":{ + "method":"PUT", + "requestUri":"/2015-03-31/functions/{FunctionName}/code", + "responseCode":200 + }, + "input":{"shape":"UpdateFunctionCodeRequest"}, + "output":{"shape":"FunctionConfiguration"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + }, + { + "shape":"CodeStorageExceededException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "UpdateFunctionConfiguration":{ + "name":"UpdateFunctionConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/2015-03-31/functions/{FunctionName}/configuration", + "responseCode":200 + }, + "input":{"shape":"UpdateFunctionConfigurationRequest"}, + "output":{"shape":"FunctionConfiguration"}, + "errors":[ + { + "shape":"ServiceException", + "error":{"httpStatusCode":500}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TooManyRequestsException", + "error":{"httpStatusCode":429}, + "exception":true + } + ] + } + }, + "shapes":{ + "Action":{ + "type":"string", + "pattern":"(lambda:[*]|lambda:[a-zA-Z]+|[*])" + }, + "AddPermissionRequest":{ + "type":"structure", + "required":[ + "FunctionName", + "StatementId", + "Action", + "Principal" + ], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "StatementId":{"shape":"StatementId"}, + "Action":{"shape":"Action"}, + "Principal":{"shape":"Principal"}, + "SourceArn":{"shape":"Arn"}, + "SourceAccount":{"shape":"SourceOwner"}, + "Qualifier":{ + "shape":"Qualifier", + "location":"querystring", + "locationName":"Qualifier" + } + } + }, + "AddPermissionResponse":{ + "type":"structure", + "members":{ + "Statement":{"shape":"String"} + } + }, + "Alias":{ + "type":"string", + "min":1, + "max":128, + "pattern":"(?!^[0-9]+$)([a-zA-Z0-9-_]+)" + }, + "AliasConfiguration":{ + "type":"structure", + "members":{ + "AliasArn":{"shape":"FunctionArn"}, + "Name":{"shape":"Alias"}, + "FunctionVersion":{"shape":"Version"}, + "Description":{"shape":"Description"} + } + }, + "AliasList":{ + "type":"list", + "member":{"shape":"AliasConfiguration"} + }, + "Arn":{ + "type":"string", + "pattern":"arn:aws:([a-zA-Z0-9\\-])+:([a-z]{2}-[a-z]+-\\d{1})?:(\\d{12})?:(.*)" + }, + "BatchSize":{ + "type":"integer", + "min":1, + "max":10000 + }, + "Blob":{"type":"blob"}, + "BlobStream":{ + "type":"blob", + "streaming":true + }, + "Boolean":{"type":"boolean"}, + "CodeStorageExceededException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "CreateAliasRequest":{ + "type":"structure", + "required":[ + "FunctionName", + "Name", + "FunctionVersion" + ], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "Name":{"shape":"Alias"}, + "FunctionVersion":{"shape":"Version"}, + "Description":{"shape":"Description"} + } + }, + "CreateEventSourceMappingRequest":{ + "type":"structure", + "required":[ + "EventSourceArn", + "FunctionName", + "StartingPosition" + ], + "members":{ + "EventSourceArn":{"shape":"Arn"}, + "FunctionName":{"shape":"FunctionName"}, + "Enabled":{"shape":"Enabled"}, + "BatchSize":{"shape":"BatchSize"}, + "StartingPosition":{"shape":"EventSourcePosition"} + } + }, + "CreateFunctionRequest":{ + "type":"structure", + "required":[ + "FunctionName", + "Runtime", + "Role", + "Handler", + "Code" + ], + "members":{ + "FunctionName":{"shape":"FunctionName"}, + "Runtime":{"shape":"Runtime"}, + "Role":{"shape":"RoleArn"}, + "Handler":{"shape":"Handler"}, + "Code":{"shape":"FunctionCode"}, + "Description":{"shape":"Description"}, + "Timeout":{"shape":"Timeout"}, + "MemorySize":{"shape":"MemorySize"}, + "Publish":{"shape":"Boolean"} + } + }, + "Date":{"type":"timestamp"}, + "DeleteAliasRequest":{ + "type":"structure", + "required":[ + "FunctionName", + "Name" + ], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "Name":{ + "shape":"Alias", + "location":"uri", + "locationName":"Name" + } + } + }, + "DeleteEventSourceMappingRequest":{ + "type":"structure", + "required":["UUID"], + "members":{ + "UUID":{ + "shape":"String", + "location":"uri", + "locationName":"UUID" + } + } + }, + "DeleteFunctionRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "Qualifier":{ + "shape":"Qualifier", + "location":"querystring", + "locationName":"Qualifier" + } + } + }, + "Description":{ + "type":"string", + "min":0, + "max":256 + }, + "Enabled":{"type":"boolean"}, + "EventSourceMappingConfiguration":{ + "type":"structure", + "members":{ + "UUID":{"shape":"String"}, + "BatchSize":{"shape":"BatchSize"}, + "EventSourceArn":{"shape":"Arn"}, + "FunctionArn":{"shape":"FunctionArn"}, + "LastModified":{"shape":"Date"}, + "LastProcessingResult":{"shape":"String"}, + "State":{"shape":"String"}, + "StateTransitionReason":{"shape":"String"} + } + }, + "EventSourceMappingsList":{ + "type":"list", + "member":{"shape":"EventSourceMappingConfiguration"} + }, + "EventSourcePosition":{ + "type":"string", + "enum":[ + "TRIM_HORIZON", + "LATEST" + ] + }, + "FunctionArn":{ + "type":"string", + "pattern":"arn:aws:lambda:[a-z]{2}-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?" + }, + "FunctionCode":{ + "type":"structure", + "members":{ + "ZipFile":{"shape":"Blob"}, + "S3Bucket":{"shape":"S3Bucket"}, + "S3Key":{"shape":"S3Key"}, + "S3ObjectVersion":{"shape":"S3ObjectVersion"} + } + }, + "FunctionCodeLocation":{ + "type":"structure", + "members":{ + "RepositoryType":{"shape":"String"}, + "Location":{"shape":"String"} + } + }, + "FunctionConfiguration":{ + "type":"structure", + "members":{ + "FunctionName":{"shape":"FunctionName"}, + "FunctionArn":{"shape":"FunctionArn"}, + "Runtime":{"shape":"Runtime"}, + "Role":{"shape":"RoleArn"}, + "Handler":{"shape":"Handler"}, + "CodeSize":{"shape":"Long"}, + "Description":{"shape":"Description"}, + "Timeout":{"shape":"Timeout"}, + "MemorySize":{"shape":"MemorySize"}, + "LastModified":{"shape":"Timestamp"}, + "CodeSha256":{"shape":"String"}, + "Version":{"shape":"Version"} + } + }, + "FunctionList":{ + "type":"list", + "member":{"shape":"FunctionConfiguration"} + }, + "FunctionName":{ + "type":"string", + "min":1, + "max":140, + "pattern":"(arn:aws:lambda:)?([a-z]{2}-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_]+)(:(\\$LATEST|[a-zA-Z0-9-_]+))?" + }, + "GetAliasRequest":{ + "type":"structure", + "required":[ + "FunctionName", + "Name" + ], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "Name":{ + "shape":"Alias", + "location":"uri", + "locationName":"Name" + } + } + }, + "GetEventSourceMappingRequest":{ + "type":"structure", + "required":["UUID"], + "members":{ + "UUID":{ + "shape":"String", + "location":"uri", + "locationName":"UUID" + } + } + }, + "GetFunctionConfigurationRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "Qualifier":{ + "shape":"Qualifier", + "location":"querystring", + "locationName":"Qualifier" + } + } + }, + "GetFunctionRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "Qualifier":{ + "shape":"Qualifier", + "location":"querystring", + "locationName":"Qualifier" + } + } + }, + "GetFunctionResponse":{ + "type":"structure", + "members":{ + "Configuration":{"shape":"FunctionConfiguration"}, + "Code":{"shape":"FunctionCodeLocation"} + } + }, + "GetPolicyRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "Qualifier":{"shape":"Qualifier"} + } + }, + "GetPolicyResponse":{ + "type":"structure", + "members":{ + "Policy":{"shape":"String"} + } + }, + "Handler":{ + "type":"string", + "max":128, + "pattern":"[^\\s]+" + }, + "HttpStatus":{"type":"integer"}, + "Integer":{"type":"integer"}, + "InvalidParameterValueException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRequestContentException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvocationRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "InvocationType":{ + "shape":"InvocationType", + "location":"header", + "locationName":"X-Amz-Invocation-Type" + }, + "LogType":{ + "shape":"LogType", + "location":"header", + "locationName":"X-Amz-Log-Type" + }, + "ClientContext":{ + "shape":"String", + "location":"header", + "locationName":"X-Amz-Client-Context" + }, + "Payload":{"shape":"Blob"}, + "Qualifier":{ + "shape":"Qualifier", + "location":"querystring", + "locationName":"Qualifier" + } + }, + "payload":"Payload" + }, + "InvocationResponse":{ + "type":"structure", + "members":{ + "StatusCode":{ + "shape":"Integer", + "location":"statusCode" + }, + "FunctionError":{ + "shape":"String", + "location":"header", + "locationName":"X-Amz-Function-Error" + }, + "LogResult":{ + "shape":"String", + "location":"header", + "locationName":"X-Amz-Log-Result" + }, + "Payload":{"shape":"Blob"} + }, + "payload":"Payload" + }, + "InvocationType":{ + "type":"string", + "enum":[ + "Event", + "RequestResponse", + "DryRun" + ] + }, + "InvokeAsyncRequest":{ + "type":"structure", + "required":[ + "FunctionName", + "InvokeArgs" + ], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "InvokeArgs":{"shape":"BlobStream"} + }, + "deprecated":true, + "payload":"InvokeArgs" + }, + "InvokeAsyncResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"HttpStatus", + "location":"statusCode" + } + }, + "deprecated":true + }, + "ListAliasesRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "FunctionVersion":{ + "shape":"Version", + "location":"querystring", + "locationName":"FunctionVersion" + }, + "Marker":{ + "shape":"String", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"MaxListItems", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListAliasesResponse":{ + "type":"structure", + "members":{ + "NextMarker":{"shape":"String"}, + "Aliases":{"shape":"AliasList"} + } + }, + "ListEventSourceMappingsRequest":{ + "type":"structure", + "members":{ + "EventSourceArn":{ + "shape":"Arn", + "location":"querystring", + "locationName":"EventSourceArn" + }, + "FunctionName":{ + "shape":"FunctionName", + "location":"querystring", + "locationName":"FunctionName" + }, + "Marker":{ + "shape":"String", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"MaxListItems", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListEventSourceMappingsResponse":{ + "type":"structure", + "members":{ + "NextMarker":{"shape":"String"}, + "EventSourceMappings":{"shape":"EventSourceMappingsList"} + } + }, + "ListFunctionsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"MaxListItems", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListFunctionsResponse":{ + "type":"structure", + "members":{ + "NextMarker":{"shape":"String"}, + "Functions":{"shape":"FunctionList"} + } + }, + "ListVersionsByFunctionRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "Marker":{ + "shape":"String", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"MaxListItems", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListVersionsByFunctionResponse":{ + "type":"structure", + "members":{ + "NextMarker":{"shape":"String"}, + "Versions":{"shape":"FunctionList"} + } + }, + "LogType":{ + "type":"string", + "enum":[ + "None", + "Tail" + ] + }, + "Long":{"type":"long"}, + "MaxListItems":{ + "type":"integer", + "min":1, + "max":10000 + }, + "MemorySize":{ + "type":"integer", + "min":128, + "max":1536 + }, + "PolicyLengthExceededException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "Principal":{ + "type":"string", + "pattern":".*" + }, + "PublishVersionRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "CodeSha256":{"shape":"String"}, + "Description":{"shape":"Description"} + } + }, + "Qualifier":{ + "type":"string", + "min":1, + "max":128, + "pattern":"(|[a-zA-Z0-9$_]+)" + }, + "RemovePermissionRequest":{ + "type":"structure", + "required":[ + "FunctionName", + "StatementId" + ], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "StatementId":{ + "shape":"StatementId", + "location":"uri", + "locationName":"StatementId" + }, + "Qualifier":{ + "shape":"Qualifier", + "location":"querystring", + "locationName":"Qualifier" + } + } + }, + "RequestTooLargeException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":413}, + "exception":true + }, + "ResourceConflictException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "Message":{"shape":"String"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "RoleArn":{ + "type":"string", + "pattern":"arn:aws:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+" + }, + "Runtime":{ + "type":"string", + "enum":[ + "nodejs", + "java8", + "python2.7" + ] + }, + "S3Bucket":{ + "type":"string", + "min":3, + "max":63, + "pattern":"^[0-9A-Za-z\\.\\-_]*(?Adds a permission to the resource policy associated with the specified AWS Lambda function. You use resource policies to grant permissions to event sources that use \"push\" model. In \"push\" model, event sources (such as Amazon S3 and custom applications) invoke your Lambda function. Each permission you add to the resource policy allows an event source, permission to invoke the Lambda function.

    For information about the push model, see AWS Lambda: How it Works.

    If you are using versioning feature (see AWS Lambda Function Versioning and Aliases), a Lambda function can have multiple ARNs that can be used to invoke the function. Note that, each permission you add to resource policy using this API is specific to an ARN, specified using the Qualifier parameter

    This operation requires permission for the lambda:AddPermission action.

    ", + "CreateAlias": "

    Creates an alias to the specified Lambda function version. For more information, see Introduction to AWS Lambda Aliases

    This requires permission for the lambda:CreateAlias action.

    ", + "CreateEventSourceMapping": "

    Identifies a stream as an event source for a Lambda function. It can be either an Amazon Kinesis stream or an Amazon DynamoDB stream. AWS Lambda invokes the specified function when records are posted to the stream.

    This is the pull model, where AWS Lambda invokes the function. For more information, go to AWS Lambda: How it Works in the AWS Lambda Developer Guide.

    This association between an Amazon Kinesis stream and a Lambda function is called the event source mapping. You provide the configuration information (for example, which stream to read from and which Lambda function to invoke) for the event source mapping in the request body.

    Each event source, such as an Amazon Kinesis or a DynamoDB stream, can be associated with multiple AWS Lambda function. A given Lambda function can be associated with multiple AWS event sources.

    This operation requires permission for the lambda:CreateEventSourceMapping action.

    ", + "CreateFunction": "

    Creates a new Lambda function. The function metadata is created from the request parameters, and the code for the function is provided by a .zip file in the request body. If the function name already exists, the operation will fail. Note that the function name is case-sensitive.

    This operation requires permission for the lambda:CreateFunction action.

    ", + "DeleteAlias": "

    Deletes specified Lambda function alias. For more information, see Introduction to AWS Lambda Aliases

    This requires permission for the lambda:DeleteAlias action.

    ", + "DeleteEventSourceMapping": "

    Removes an event source mapping. This means AWS Lambda will no longer invoke the function for events in the associated source.

    This operation requires permission for the lambda:DeleteEventSourceMapping action.

    ", + "DeleteFunction": "

    Deletes the specified Lambda function code and configuration.

    If you don't specify a function version, AWS Lambda will delete the function, including all its versions, and any aliases pointing to the function versions.

    When you delete a function the associated resource policy is also deleted. You will need to delete the event source mappings explicitly.

    For information about function versioning, see AWS Lambda Function Versioning and Aliases.

    This operation requires permission for the lambda:DeleteFunction action.

    ", + "GetAlias": "

    Returns the specified alias information such as the alias ARN, description, and function version it is pointing to. For more information, see Introduction to AWS Lambda Aliases

    This requires permission for the lambda:GetAlias action.

    ", + "GetEventSourceMapping": "

    Returns configuration information for the specified event source mapping (see CreateEventSourceMapping).

    This operation requires permission for the lambda:GetEventSourceMapping action.

    ", + "GetFunction": "

    Returns the configuration information of the Lambda function and a presigned URL link to the .zip file you uploaded with CreateFunction so you can download the .zip file. Note that the URL is valid for up to 10 minutes. The configuration information is the same information you provided as parameters when uploading the function.

    Using the optional Qualifier parameter, you can specify a specific function version for which you want this information. If you don't specify this parameter, the API uses unqualified function ARN which return information about the $LATEST version of the Lambda function. For more information, see AWS Lambda Function Versioning and Aliases.

    This operation requires permission for the lambda:GetFunction action.

    ", + "GetFunctionConfiguration": "

    Returns the configuration information of the Lambda function. This the same information you provided as parameters when uploading the function by using CreateFunction.

    You can use the optional Qualifier parameter to retrieve configuration information for a specific Lambda function version. If you don't provide it, the API returns information about the $LATEST version of the function. For more information about versioning, see AWS Lambda Function Versioning and Aliases.

    This operation requires permission for the lambda:GetFunctionConfiguration operation.

    ", + "GetPolicy": "

    Returns the resource policy, containing a list of permissions that apply to a specific to an ARN that you specify via the Qualifier paramter.

    For informration about adding permissions, see AddPermission.

    You need permission for the lambda:GetPolicy action.

    ", + "Invoke": "

    Invokes a specific Lambda function version.

    If you don't provide the Qualifier parameter, it uses the unqualified function ARN which results in invocation of the $LATEST version of the Lambda function (when you create a Lambda function, the $LATEST is the version). The AWS Lambda versioning and aliases feature allows you to publish multiple versions of a Lambda function and also create aliases for each function version. So each your Lambda function version can be invoked using multiple ARNs. For more information, see AWS Lambda Function Versioning and Aliases. Using the Qualifier parameter, you can specify a function version or alias name to invoke specific function version. If you specify function version, the API uses the qualified function ARN to invoke a specific function version. If you specify alias name, the API uses the alias ARN to invoke the function version to which the alias points.

    This operation requires permission for the lambda:InvokeFunction action.

    ", + "InvokeAsync": "This API is deprecated. We recommend you use Invoke API (see Invoke).

    Submits an invocation request to AWS Lambda. Upon receiving the request, Lambda executes the specified function asynchronously. To see the logs generated by the Lambda function execution, see the CloudWatch logs console.

    This operation requires permission for the lambda:InvokeFunction action.

    ", + "ListAliases": "

    Returns list of aliases created for a Lambda function. For each alias, the response includes information such as the alias ARN, description, alias name, and the function version to which it points. For more information, see Introduction to AWS Lambda Aliases

    This requires permission for the lambda:ListAliases action.

    ", + "ListEventSourceMappings": "

    Returns a list of event source mappings you created using the CreateEventSourceMapping (see CreateEventSourceMapping), where you identify a stream as an event source. This list does not include Amazon S3 event sources.

    For each mapping, the API returns configuration information. You can optionally specify filters to retrieve specific event source mappings.

    This operation requires permission for the lambda:ListEventSourceMappings action.

    ", + "ListFunctions": "

    Returns a list of your Lambda functions. For each function, the response includes the function configuration information. You must use GetFunction to retrieve the code for your function.

    This operation requires permission for the lambda:ListFunctions action.

    ", + "ListVersionsByFunction": "

    List all versions of a function.

    ", + "PublishVersion": "

    Publishes a version of your function from the current snapshot of HEAD. That is, AWS Lambda takes a snapshot of the function code and configuration information from HEAD and publishes a new version. The code and handler of this specific Lambda function version cannot be modified after publication, but you can modify the configuration information.

    ", + "RemovePermission": "

    You can remove individual permissions from an resource policy associated with a Lambda function by providing a statement ID that you provided when you addded the permission. The API removes corresponding permission that is associated with the specific ARN identified by the Qualifier parameter.

    Note that removal of a permission will cause an active event source to lose permission to the function.

    You need permission for the lambda:RemovePermission action.

    ", + "UpdateAlias": "

    Using this API you can update function version to which the alias points to and alias description. For more information, see Introduction to AWS Lambda Aliases

    This requires permission for the lambda:UpdateAlias action.

    ", + "UpdateEventSourceMapping": "

    You can update an event source mapping. This is useful if you want to change the parameters of the existing mapping without losing your position in the stream. You can change which function will receive the stream records, but to change the stream itself, you must create a new mapping.

    This operation requires permission for the lambda:UpdateEventSourceMapping action.

    ", + "UpdateFunctionCode": "

    Updates the code for the specified Lambda function. This operation must only be used on an existing Lambda function and cannot be used to update the function configuration.

    This operation requires permission for the lambda:UpdateFunctionCode action.

    ", + "UpdateFunctionConfiguration": "

    Updates the configuration parameters for the specified Lambda function by using the values provided in the request. You provide only the parameters you want to change. This operation must only be used on an existing Lambda function and cannot be used to update the function's code.

    This operation requires permission for the lambda:UpdateFunctionConfiguration action.

    " + }, + "service": "AWS Lambda

    Overview

    This is the AWS Lambda API Reference. The AWS Lambda Developer Guide provides additional information. For the service overview, go to What is AWS Lambda, and for information about how the service works, go to AWS Lambda: How it Works in the AWS Lambda Developer Guide.

    ", + "shapes": { + "Action": { + "base": null, + "refs": { + "AddPermissionRequest$Action": "

    The AWS Lambda action you want to allow in this statement. Each Lambda action is a string starting with \"lambda:\" followed by the API name (see Operations). For example, \"lambda:CreateFunction\". You can use wildcard (\"lambda:*\") to grant permission for all AWS Lambda actions.

    " + } + }, + "AddPermissionRequest": { + "base": null, + "refs": { + } + }, + "AddPermissionResponse": { + "base": null, + "refs": { + } + }, + "Alias": { + "base": null, + "refs": { + "AliasConfiguration$Name": "

    Alias name.

    ", + "CreateAliasRequest$Name": "

    Name for the alias your creating.

    ", + "DeleteAliasRequest$Name": "

    Name of the alias to delete.

    ", + "GetAliasRequest$Name": "

    Name of the alias for which you want to retrieve information.

    ", + "UpdateAliasRequest$Name": "

    The alias name.

    " + } + }, + "AliasConfiguration": { + "base": "

    Provides configuration information about a Lambda function version alias.

    ", + "refs": { + "AliasList$member": null + } + }, + "AliasList": { + "base": null, + "refs": { + "ListAliasesResponse$Aliases": "

    An list of alises.

    " + } + }, + "Arn": { + "base": null, + "refs": { + "AddPermissionRequest$SourceArn": "

    This is optional; however, when granting Amazon S3 permission to invoke your function, you should specify this field with the bucket Amazon Resource Name (ARN) as its value. This ensures that only events generated from the specified bucket can invoke the function.

    If you add a permission for the Amazon S3 principal without providing the source ARN, any AWS account that creates a mapping to your function ARN can send events to invoke your Lambda function from Amazon S3.", + "CreateEventSourceMappingRequest$EventSourceArn": "

    The Amazon Resource Name (ARN) of the Amazon Kinesis or the Amazon DynamoDB stream that is the event source. Any record added to this stream could cause AWS Lambda to invoke your Lambda function, it depends on the BatchSize. AWS Lambda POSTs the Amazon Kinesis event, containing records, to your Lambda function as JSON.

    ", + "EventSourceMappingConfiguration$EventSourceArn": "

    The Amazon Resource Name (ARN) of the Amazon Kinesis stream that is the source of events.

    ", + "ListEventSourceMappingsRequest$EventSourceArn": "

    The Amazon Resource Name (ARN) of the Amazon Kinesis stream.

    " + } + }, + "BatchSize": { + "base": null, + "refs": { + "CreateEventSourceMappingRequest$BatchSize": "

    The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. Your function receives an event with all the retrieved records. The default is 100 records.

    ", + "EventSourceMappingConfiguration$BatchSize": "

    The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. Your function receives an event with all the retrieved records.

    ", + "UpdateEventSourceMappingRequest$BatchSize": "

    The maximum number of stream records that can be sent to your Lambda function for a single invocation.

    " + } + }, + "Blob": { + "base": null, + "refs": { + "FunctionCode$ZipFile": "

    A base64-encoded .zip file containing your deployment package. For more information about creating a .zip file, go to Execution Permissions in the AWS Lambda Developer Guide.

    ", + "InvocationRequest$Payload": "

    JSON that you want to provide to your Lambda function as input.

    ", + "InvocationResponse$Payload": "

    It is the JSON representation of the object returned by the Lambda function. In This is present only if the invocation type is \"RequestResponse\".

    In the event of a function error this field contains a message describing the error. For the Handled errors the Lambda function will report this message. For Unhandled errors AWS Lambda reports the message.

    ", + "UpdateFunctionCodeRequest$ZipFile": "

    Based64-encoded .zip file containing your packaged source code.

    " + } + }, + "BlobStream": { + "base": null, + "refs": { + "InvokeAsyncRequest$InvokeArgs": "

    JSON that you want to provide to your Lambda function as input.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "CreateFunctionRequest$Publish": "

    This boolean parameter can be used to request AWS Lambda to create the Lambda function and publish a version as an atomic operation.

    ", + "UpdateFunctionCodeRequest$Publish": "

    This boolean parameter can be used to request AWS Lambda to update the Lambda function and publish a version as an atomic operation.

    " + } + }, + "CodeStorageExceededException": { + "base": "

    You have exceeded your maximum total code size per account. Limits

    ", + "refs": { + } + }, + "CreateAliasRequest": { + "base": null, + "refs": { + } + }, + "CreateEventSourceMappingRequest": { + "base": null, + "refs": { + } + }, + "CreateFunctionRequest": { + "base": null, + "refs": { + } + }, + "Date": { + "base": null, + "refs": { + "EventSourceMappingConfiguration$LastModified": "

    The UTC time string indicating the last time the event mapping was updated.

    " + } + }, + "DeleteAliasRequest": { + "base": null, + "refs": { + } + }, + "DeleteEventSourceMappingRequest": { + "base": null, + "refs": { + } + }, + "DeleteFunctionRequest": { + "base": null, + "refs": { + } + }, + "Description": { + "base": null, + "refs": { + "AliasConfiguration$Description": "

    Alias description.

    ", + "CreateAliasRequest$Description": "

    Description of the alias.

    ", + "CreateFunctionRequest$Description": "

    A short, user-defined function description. Lambda does not use this value. Assign a meaningful description as you see fit.

    ", + "FunctionConfiguration$Description": "

    The user-provided description.

    ", + "PublishVersionRequest$Description": "

    The description for the version you are publishing. If not provided, AWS Lambda copies the description from the HEAD version.

    ", + "UpdateAliasRequest$Description": "

    You can optionally change the description of the alias using this parameter.

    ", + "UpdateFunctionConfigurationRequest$Description": "

    A short user-defined function description. AWS Lambda does not use this value. Assign a meaningful description as you see fit.

    " + } + }, + "Enabled": { + "base": null, + "refs": { + "CreateEventSourceMappingRequest$Enabled": "

    Indicates whether AWS Lambda should begin polling the event source. By default, Enabled is true.

    ", + "UpdateEventSourceMappingRequest$Enabled": "

    Specifies whether AWS Lambda should actively poll the stream or not. If disabled, AWS Lambda will not poll the stream.

    " + } + }, + "EventSourceMappingConfiguration": { + "base": "

    Describes mapping between an Amazon Kinesis stream and a Lambda function.

    ", + "refs": { + "EventSourceMappingsList$member": null + } + }, + "EventSourceMappingsList": { + "base": null, + "refs": { + "ListEventSourceMappingsResponse$EventSourceMappings": "

    An array of EventSourceMappingConfiguration objects.

    " + } + }, + "EventSourcePosition": { + "base": null, + "refs": { + "CreateEventSourceMappingRequest$StartingPosition": "

    The position in the stream where AWS Lambda should start reading. For more information, go to ShardIteratorType in the Amazon Kinesis API Reference.

    " + } + }, + "FunctionArn": { + "base": null, + "refs": { + "AliasConfiguration$AliasArn": "

    Lambda function ARN that is qualified using alias name as the suffix. For example, if you create an alias \"BETA\" pointing to a helloworld function version, the ARN is arn:aws:lambda:aws-regions:acct-id:function:helloworld:BETA.

    ", + "EventSourceMappingConfiguration$FunctionArn": "

    The Lambda function to invoke when AWS Lambda detects an event on the stream.

    ", + "FunctionConfiguration$FunctionArn": "

    The Amazon Resource Name (ARN) assigned to the function.

    " + } + }, + "FunctionCode": { + "base": "

    The code for the Lambda function.

    ", + "refs": { + "CreateFunctionRequest$Code": "

    The code for the Lambda function.

    " + } + }, + "FunctionCodeLocation": { + "base": "

    The object for the Lambda function location.

    ", + "refs": { + "GetFunctionResponse$Code": null + } + }, + "FunctionConfiguration": { + "base": "

    A complex type that describes function metadata.

    ", + "refs": { + "FunctionList$member": null, + "GetFunctionResponse$Configuration": null + } + }, + "FunctionList": { + "base": null, + "refs": { + "ListFunctionsResponse$Functions": "

    A list of Lambda functions.

    ", + "ListVersionsByFunctionResponse$Versions": "

    A list of Lambda function versions.

    " + } + }, + "FunctionName": { + "base": null, + "refs": { + "AddPermissionRequest$FunctionName": "

    Name of the Lambda function whose resource policy you are updating by adding a new permission.

    You can specify an unqualified function name (for example, \"Thumbnail\") or you can specify Amazon Resource Name (ARN) of the function (for example, \"arn:aws:lambda:us-west-2:account-id:function:ThumbNail\"). AWS Lambda also allows you to specify only the account ID qualifier (for example, \"account-id:Thumbnail\"). Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length.

    ", + "CreateAliasRequest$FunctionName": "

    Name of the Lambda function for which you want to create an alias.

    ", + "CreateEventSourceMappingRequest$FunctionName": "

    The Lambda function to invoke when AWS Lambda detects an event on the stream.

    You can specify an unqualified function name (for example, \"Thumbnail\") or you can specify Amazon Resource Name (ARN) of the function (for example, \"arn:aws:lambda:us-west-2:account-id:function:ThumbNail\"). AWS Lambda also allows you to specify only the account ID qualifier (for example, \"account-id:Thumbnail\"). Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length.

    ", + "CreateFunctionRequest$FunctionName": "

    The name you want to assign to the function you are uploading. You can specify an unqualified function name (for example, \"Thumbnail\") or you can specify Amazon Resource Name (ARN) of the function (for example, \"arn:aws:lambda:us-west-2:account-id:function:ThumbNail\"). AWS Lambda also allows you to specify only the account ID qualifier (for example, \"account-id:Thumbnail\"). Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length. The function names appear in the console and are returned in the ListFunctions API. Function names are used to specify functions to other AWS Lambda APIs, such as Invoke.

    ", + "DeleteAliasRequest$FunctionName": "

    The Lambda function name for which the alias is created.

    ", + "DeleteFunctionRequest$FunctionName": "

    The Lambda function to delete.

    You can specify an unqualified function name (for example, \"Thumbnail\") or you can specify Amazon Resource Name (ARN) of the function (for example, \"arn:aws:lambda:us-west-2:account-id:function:ThumbNail\"). AWS Lambda also allows you to specify only the account ID qualifier (for example, \"account-id:Thumbnail\"). Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length.

    ", + "FunctionConfiguration$FunctionName": "

    The name of the function.

    ", + "GetAliasRequest$FunctionName": "

    Function name for which the alias is created. An alias is a subresource that exists only in the context of an existing Lambda function. So you must specify the function name.

    ", + "GetFunctionConfigurationRequest$FunctionName": "

    The name of the Lambda function for which you want to retrieve the configuration information.

    You can specify an unqualified function name (for example, \"Thumbnail\") or you can specify Amazon Resource Name (ARN) of the function (for example, \"arn:aws:lambda:us-west-2:account-id:function:ThumbNail\"). AWS Lambda also allows you to specify only the account ID qualifier (for example, \"account-id:Thumbnail\"). Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length.

    ", + "GetFunctionRequest$FunctionName": "

    The Lambda function name.

    You can specify an unqualified function name (for example, \"Thumbnail\") or you can specify Amazon Resource Name (ARN) of the function (for example, \"arn:aws:lambda:us-west-2:account-id:function:ThumbNail\"). AWS Lambda also allows you to specify only the account ID qualifier (for example, \"account-id:Thumbnail\"). Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length.

    ", + "GetPolicyRequest$FunctionName": "

    Function name whose resource policy you want to retrieve.

    You can specify an unqualified function name (for example, \"Thumbnail\") or you can specify Amazon Resource Name (ARN) of the function (for example, \"arn:aws:lambda:us-west-2:account-id:function:ThumbNail\"). AWS Lambda also allows you to specify only the account ID qualifier (for example, \"account-id:Thumbnail\"). Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length.

    ", + "InvocationRequest$FunctionName": "

    The Lambda function name.

    You can specify an unqualified function name (for example, \"Thumbnail\") or you can specify Amazon Resource Name (ARN) of the function (for example, \"arn:aws:lambda:us-west-2:account-id:function:ThumbNail\"). AWS Lambda also allows you to specify only the account ID qualifier (for example, \"account-id:Thumbnail\"). Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length.

    ", + "InvokeAsyncRequest$FunctionName": "

    The Lambda function name.

    ", + "ListAliasesRequest$FunctionName": "

    Lambda function name for which the alias is created.

    ", + "ListEventSourceMappingsRequest$FunctionName": "

    The name of the Lambda function.

    You can specify an unqualified function name (for example, \"Thumbnail\") or you can specify Amazon Resource Name (ARN) of the function (for example, \"arn:aws:lambda:us-west-2:account-id:function:ThumbNail\"). AWS Lambda also allows you to specify only the account ID qualifier (for example, \"account-id:Thumbnail\"). Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length.

    ", + "ListVersionsByFunctionRequest$FunctionName": "

    Function name whose versions to list. You can specify an unqualified function name (for example, \"Thumbnail\") or you can specify Amazon Resource Name (ARN) of the function (for example, \"arn:aws:lambda:us-west-2:account-id:function:ThumbNail\"). AWS Lambda also allows you to specify only the account ID qualifier (for example, \"account-id:Thumbnail\"). Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length.

    ", + "PublishVersionRequest$FunctionName": "

    The Lambda function name. You can specify an unqualified function name (for example, \"Thumbnail\") or you can specify Amazon Resource Name (ARN) of the function (for example, \"arn:aws:lambda:us-west-2:account-id:function:ThumbNail\"). AWS Lambda also allows you to specify only the account ID qualifier (for example, \"account-id:Thumbnail\"). Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length.

    ", + "RemovePermissionRequest$FunctionName": "

    Lambda function whose resource policy you want to remove a permission from.

    You can specify an unqualified function name (for example, \"Thumbnail\") or you can specify Amazon Resource Name (ARN) of the function (for example, \"arn:aws:lambda:us-west-2:account-id:function:ThumbNail\"). AWS Lambda also allows you to specify only the account ID qualifier (for example, \"account-id:Thumbnail\"). Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length.

    ", + "UpdateAliasRequest$FunctionName": "

    The function name for which the alias is created.

    ", + "UpdateEventSourceMappingRequest$FunctionName": "

    The Lambda function to which you want the stream records sent.

    You can specify an unqualified function name (for example, \"Thumbnail\") or you can specify Amazon Resource Name (ARN) of the function (for example, \"arn:aws:lambda:us-west-2:account-id:function:ThumbNail\"). AWS Lambda also allows you to specify only the account ID qualifier (for example, \"account-id:Thumbnail\"). Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length.

    ", + "UpdateFunctionCodeRequest$FunctionName": "

    The existing Lambda function name whose code you want to replace.

    You can specify an unqualified function name (for example, \"Thumbnail\") or you can specify Amazon Resource Name (ARN) of the function (for example, \"arn:aws:lambda:us-west-2:account-id:function:ThumbNail\"). AWS Lambda also allows you to specify only the account ID qualifier (for example, \"account-id:Thumbnail\"). Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length.

    ", + "UpdateFunctionConfigurationRequest$FunctionName": "

    The name of the Lambda function.

    You can specify an unqualified function name (for example, \"Thumbnail\") or you can specify Amazon Resource Name (ARN) of the function (for example, \"arn:aws:lambda:us-west-2:account-id:function:ThumbNail\"). AWS Lambda also allows you to specify only the account ID qualifier (for example, \"account-id:Thumbnail\"). Note that the length constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character in length.

    " + } + }, + "GetAliasRequest": { + "base": null, + "refs": { + } + }, + "GetEventSourceMappingRequest": { + "base": null, + "refs": { + } + }, + "GetFunctionConfigurationRequest": { + "base": null, + "refs": { + } + }, + "GetFunctionRequest": { + "base": null, + "refs": { + } + }, + "GetFunctionResponse": { + "base": "

    This response contains the object for the Lambda function location (see API_FunctionCodeLocation

    ", + "refs": { + } + }, + "GetPolicyRequest": { + "base": null, + "refs": { + } + }, + "GetPolicyResponse": { + "base": null, + "refs": { + } + }, + "Handler": { + "base": null, + "refs": { + "CreateFunctionRequest$Handler": "

    The function within your code that Lambda calls to begin execution. For Node.js, it is the module-name.export value in your function. For Java, it can be package.class-name::handler or package.class-name. For more information, see Lambda Function Handler (Java).

    ", + "FunctionConfiguration$Handler": "

    The function Lambda calls to begin executing your function.

    ", + "UpdateFunctionConfigurationRequest$Handler": "

    The function that Lambda calls to begin executing your function. For Node.js, it is the module-name.export value in your function.

    " + } + }, + "HttpStatus": { + "base": null, + "refs": { + "InvokeAsyncResponse$Status": "

    It will be 202 upon success.

    " + } + }, + "Integer": { + "base": null, + "refs": { + "InvocationResponse$StatusCode": "

    The HTTP status code will be in the 200 range for successful request. For the \"RequestResonse\" invocation type this status code will be 200. For the \"Event\" invocation type this status code will be 202. For the \"DryRun\" invocation type the status code will be 204.

    " + } + }, + "InvalidParameterValueException": { + "base": "

    One of the parameters in the request is invalid. For example, if you provided an IAM role for AWS Lambda to assume in the CreateFunction or the UpdateFunctionConfiguration API, that AWS Lambda is unable to assume you will get this exception.

    ", + "refs": { + } + }, + "InvalidRequestContentException": { + "base": "

    The request body could not be parsed as JSON.

    ", + "refs": { + } + }, + "InvocationRequest": { + "base": null, + "refs": { + } + }, + "InvocationResponse": { + "base": "

    Upon success, returns an empty response. Otherwise, throws an exception.

    ", + "refs": { + } + }, + "InvocationType": { + "base": null, + "refs": { + "InvocationRequest$InvocationType": "

    By default, the Invoke API assumes \"RequestResponse\" invocation type. You can optionally request asynchronous execution by specifying \"Event\" as the InvocationType. You can also use this parameter to request AWS Lambda to not execute the function but do some verification, such as if the caller is authorized to invoke the function and if the inputs are valid. You request this by specifying \"DryRun\" as the InvocationType. This is useful in a cross-account scenario when you want to verify access to a function without running it.

    " + } + }, + "InvokeAsyncRequest": { + "base": null, + "refs": { + } + }, + "InvokeAsyncResponse": { + "base": "

    Upon success, it returns empty response. Otherwise, throws an exception.

    ", + "refs": { + } + }, + "ListAliasesRequest": { + "base": null, + "refs": { + } + }, + "ListAliasesResponse": { + "base": null, + "refs": { + } + }, + "ListEventSourceMappingsRequest": { + "base": null, + "refs": { + } + }, + "ListEventSourceMappingsResponse": { + "base": "

    Contains a list of event sources (see API_EventSourceMappingConfiguration)

    ", + "refs": { + } + }, + "ListFunctionsRequest": { + "base": null, + "refs": { + } + }, + "ListFunctionsResponse": { + "base": "

    Contains a list of AWS Lambda function configurations (see FunctionConfiguration.

    ", + "refs": { + } + }, + "ListVersionsByFunctionRequest": { + "base": null, + "refs": { + } + }, + "ListVersionsByFunctionResponse": { + "base": null, + "refs": { + } + }, + "LogType": { + "base": null, + "refs": { + "InvocationRequest$LogType": "

    You can set this optional parameter to \"Tail\" in the request only if you specify the InvocationType parameter with value \"RequestResponse\". In this case, AWS Lambda returns the base64-encoded last 4 KB of log data produced by your Lambda function in the x-amz-log-results header.

    " + } + }, + "Long": { + "base": null, + "refs": { + "FunctionConfiguration$CodeSize": "

    The size, in bytes, of the function .zip file you uploaded.

    " + } + }, + "MaxListItems": { + "base": null, + "refs": { + "ListAliasesRequest$MaxItems": "

    Optional integer. Specifies the maximum number of aliases to return in response. This parameter value must be greater than 0.

    ", + "ListEventSourceMappingsRequest$MaxItems": "

    Optional integer. Specifies the maximum number of event sources to return in response. This value must be greater than 0.

    ", + "ListFunctionsRequest$MaxItems": "

    Optional integer. Specifies the maximum number of AWS Lambda functions to return in response. This parameter value must be greater than 0.

    ", + "ListVersionsByFunctionRequest$MaxItems": "

    Optional integer. Specifies the maximum number of AWS Lambda function versions to return in response. This parameter value must be greater than 0.

    " + } + }, + "MemorySize": { + "base": null, + "refs": { + "CreateFunctionRequest$MemorySize": "

    The amount of memory, in MB, your Lambda function is given. Lambda uses this memory size to infer the amount of CPU and memory allocated to your function. Your function use-case determines your CPU and memory requirements. For example, a database operation might need less memory compared to an image processing function. The default value is 128 MB. The value must be a multiple of 64 MB.

    ", + "FunctionConfiguration$MemorySize": "

    The memory size, in MB, you configured for the function. Must be a multiple of 64 MB.

    ", + "UpdateFunctionConfigurationRequest$MemorySize": "

    The amount of memory, in MB, your Lambda function is given. AWS Lambda uses this memory size to infer the amount of CPU allocated to your function. Your function use-case determines your CPU and memory requirements. For example, a database operation might need less memory compared to an image processing function. The default value is 128 MB. The value must be a multiple of 64 MB.

    " + } + }, + "PolicyLengthExceededException": { + "base": "

    Lambda function access policy is limited to 20 KB.

    ", + "refs": { + } + }, + "Principal": { + "base": null, + "refs": { + "AddPermissionRequest$Principal": "

    The principal who is getting this permission. It can be Amazon S3 service Principal (\"s3.amazonaws.com\") if you want Amazon S3 to invoke the function, an AWS account ID if you are granting cross-account permission, or any valid AWS service principal such as \"sns.amazonaws.com\". For example, you might want to allow a custom application in another AWS account to push events to AWS Lambda by invoking your function.

    " + } + }, + "PublishVersionRequest": { + "base": null, + "refs": { + } + }, + "Qualifier": { + "base": null, + "refs": { + "AddPermissionRequest$Qualifier": "

    You can specify this optional query parameter to specify function version or alias name. The permission will then apply to the specific qualified ARN. For example, if you specify function version 2 as the qualifier, then permission applies only when request is made using qualified function ARN:

    arn:aws:lambda:aws-region:acct-id:function:function-name:2

    If you specify alias name, for example \"PROD\", then the permission is valid only for requests made using the alias ARN:

    arn:aws:lambda:aws-region:acct-id:function:function-name:PROD

    If the qualifier is not specified, the permission is valid only when requests is made using unqualified function ARN.

    arn:aws:lambda:aws-region:acct-id:function:function-name

    ", + "DeleteFunctionRequest$Qualifier": "

    Using this optional parameter you can specify a function version (but not the $LATEST version) to direct AWS Lambda to delete a specific function version. If the function version has one or more aliases pointing to it, you will get an error because you cannot have aliases pointing to it. You can delete any function version but not the $LATEST, that is, you cannot specify $LATEST as the value of this parameter. The $LATEST version can be deleted only when you want to delete all the function versions and aliases.

    You can only specify a function version and not alias name using this parameter. You cannot delete a function version using its alias.

    If you don't specify this parameter, AWS Lambda will delete the function, including all its versions and aliases.

    ", + "GetFunctionConfigurationRequest$Qualifier": "

    Using this optional parameter you can specify function version or alias name. If you specify function version, the API uses qualified function ARN and returns information about the specific function version. if you specify alias name, the API uses alias ARN and returns information about the function version to which the alias points.

    If you don't specify this parameter, the API uses unqualified function ARN, and returns information about the $LATEST function version.

    ", + "GetFunctionRequest$Qualifier": "

    Using this optional parameter to specify a function version or alias name. If you specify function version, the API uses qualified function ARN for the request and returns information about the specific Lambda function version. If you specify alias name, the API uses alias ARN and returns information about the function version to which the alias points. If you don't provide this parameter, the API uses unqualified function ARN and returns information about the $LATEST version of the Lambda function.

    ", + "GetPolicyRequest$Qualifier": "

    You can specify this optional query parameter to specify function version or alias name in which case this API will return all permissions associated with the specific ARN. If you don't provide this parameter, the API will return permissions that apply to the unqualified function ARN.

    ", + "InvocationRequest$Qualifier": "

    You can use this optional paramter to specify a Lambda function version or alias name. If you specify function version, the API uses qualified function ARN to invoke a specific Lambda function. If you specify alias name, the API uses the alias ARN to invoke the Lambda function version to which the alias points.

    If you don't provide this parameter, then the API uses unqualified function ARN which results in invocation of the $LATEST version.

    ", + "RemovePermissionRequest$Qualifier": "

    You can specify this optional parameter to remove permission associated with a specific function version or function alias. The value of this paramter is the function version or alias name. If you don't specify this parameter, the API removes permission associated with the unqualified function ARN.

    " + } + }, + "RemovePermissionRequest": { + "base": null, + "refs": { + } + }, + "RequestTooLargeException": { + "base": "

    The request payload exceeded the Invoke request body JSON input limit. For more information, see Limits

    ", + "refs": { + } + }, + "ResourceConflictException": { + "base": "

    The resource already exists.

    ", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "

    The resource (for example, a Lambda function or access policy statement) specified in the request does not exist.

    ", + "refs": { + } + }, + "RoleArn": { + "base": null, + "refs": { + "CreateFunctionRequest$Role": "

    The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS) resources. For more information, see AWS Lambda: How it Works

    ", + "FunctionConfiguration$Role": "

    The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS) resources.

    ", + "UpdateFunctionConfigurationRequest$Role": "

    The Amazon Resource Name (ARN) of the IAM role that Lambda will assume when it executes your function.

    " + } + }, + "Runtime": { + "base": null, + "refs": { + "CreateFunctionRequest$Runtime": "

    The runtime environment for the Lambda function you are uploading. Currently, Lambda supports \"java\" and \"nodejs\" as the runtime.

    ", + "FunctionConfiguration$Runtime": "

    The runtime environment for the Lambda function.

    " + } + }, + "S3Bucket": { + "base": null, + "refs": { + "FunctionCode$S3Bucket": "

    Amazon S3 bucket name where the .zip file containing your deployment package is stored. This bucket must reside in the same AWS region where you are creating the Lambda function.

    ", + "UpdateFunctionCodeRequest$S3Bucket": "

    Amazon S3 bucket name where the .zip file containing your deployment package is stored. This bucket must reside in the same AWS region where you are creating the Lambda function.

    " + } + }, + "S3Key": { + "base": null, + "refs": { + "FunctionCode$S3Key": "

    The Amazon S3 object (the deployment package) key name you want to upload.

    ", + "UpdateFunctionCodeRequest$S3Key": "

    The Amazon S3 object (the deployment package) key name you want to upload.

    " + } + }, + "S3ObjectVersion": { + "base": null, + "refs": { + "FunctionCode$S3ObjectVersion": "

    The Amazon S3 object (the deployment package) version you want to upload.

    ", + "UpdateFunctionCodeRequest$S3ObjectVersion": "

    The Amazon S3 object (the deployment package) version you want to upload.

    " + } + }, + "ServiceException": { + "base": "

    The AWS Lambda service encountered an internal error.

    ", + "refs": { + } + }, + "SourceOwner": { + "base": null, + "refs": { + "AddPermissionRequest$SourceAccount": "

    The AWS account ID (without a hyphen) of the source owner. For example, if the SourceArn identifies a bucket, then this is the bucket owner's account ID. You can use this additional condition to ensure the bucket you specify is owned by a specific account (it is possible the bucket owner deleted the bucket and some other AWS account created the bucket). You can also use this condition to specify all sources (that is, you don't specify the SourceArn) owned by a specific account.

    " + } + }, + "StatementId": { + "base": null, + "refs": { + "AddPermissionRequest$StatementId": "

    A unique statement identifier.

    ", + "RemovePermissionRequest$StatementId": "

    Statement ID of the permission to remove.

    " + } + }, + "String": { + "base": null, + "refs": { + "AddPermissionResponse$Statement": "

    The permission statement you specified in the request. The response returns the same as a string using \"\\\" as an escape character in the JSON.

    ", + "CodeStorageExceededException$Type": null, + "CodeStorageExceededException$message": null, + "DeleteEventSourceMappingRequest$UUID": "

    The event source mapping ID.

    ", + "EventSourceMappingConfiguration$UUID": "

    The AWS Lambda assigned opaque identifier for the mapping.

    ", + "EventSourceMappingConfiguration$LastProcessingResult": "

    The result of the last AWS Lambda invocation of your Lambda function.

    ", + "EventSourceMappingConfiguration$State": "

    The state of the event source mapping. It can be \"Creating\", \"Enabled\", \"Disabled\", \"Enabling\", \"Disabling\", \"Updating\", or \"Deleting\".

    ", + "EventSourceMappingConfiguration$StateTransitionReason": "

    The reason the event source mapping is in its current state. It is either user-requested or an AWS Lambda-initiated state transition.

    ", + "FunctionCodeLocation$RepositoryType": "

    The repository from which you can download the function.

    ", + "FunctionCodeLocation$Location": "

    The presigned URL you can use to download the function's .zip file that you previously uploaded. The URL is valid for up to 10 minutes.

    ", + "FunctionConfiguration$CodeSha256": "

    It is the SHA256 hash of your function deployment package.

    ", + "GetEventSourceMappingRequest$UUID": "

    The AWS Lambda assigned ID of the event source mapping.

    ", + "GetPolicyResponse$Policy": "

    The resource policy associated with the specified function. The response returns the same as a string using \"\\\" as an escape character in the JSON.

    ", + "InvalidParameterValueException$Type": null, + "InvalidParameterValueException$message": null, + "InvalidRequestContentException$Type": null, + "InvalidRequestContentException$message": null, + "InvocationRequest$ClientContext": "

    Using the ClientContext you can pass client-specific information to the Lambda function you are invoking. You can then process the client information in your Lambda function as you choose through the context variable. For an example of a ClientContext JSON, go to PutEvents in the Amazon Mobile Analytics API Reference and User Guide.

    The ClientContext JSON must be base64-encoded.

    ", + "InvocationResponse$FunctionError": "

    Indicates whether an error occurred while executing the Lambda function. If an error occurred this field will have one of two values; Handled or Unhandled. Handled errors are errors that are reported by the function while the Unhandled errors are those detected and reported by AWS Lambda. Unhandled errors include out of memory errors and function timeouts. For information about how to report an Handled error, see Programming Model.

    ", + "InvocationResponse$LogResult": "

    It is the base64-encoded logs for the Lambda function invocation. This is present only if the invocation type is \"RequestResponse\" and the logs were requested.

    ", + "ListAliasesRequest$Marker": "

    Optional string. An opaque pagination token returned from a previous ListAliases operation. If present, indicates where to continue the listing.

    ", + "ListAliasesResponse$NextMarker": "

    A string, present if there are more aliases.

    ", + "ListEventSourceMappingsRequest$Marker": "

    Optional string. An opaque pagination token returned from a previous ListEventSourceMappings operation. If present, specifies to continue the list from where the returning call left off.

    ", + "ListEventSourceMappingsResponse$NextMarker": "

    A string, present if there are more event source mappings.

    ", + "ListFunctionsRequest$Marker": "

    Optional string. An opaque pagination token returned from a previous ListFunctions operation. If present, indicates where to continue the listing.

    ", + "ListFunctionsResponse$NextMarker": "

    A string, present if there are more functions.

    ", + "ListVersionsByFunctionRequest$Marker": "

    Optional string. An opaque pagination token returned from a previous ListVersionsByFunction operation. If present, indicates where to continue the listing.

    ", + "ListVersionsByFunctionResponse$NextMarker": "

    A string, present if there are more function versions.

    ", + "PolicyLengthExceededException$Type": null, + "PolicyLengthExceededException$message": null, + "PublishVersionRequest$CodeSha256": "

    The SHA256 hash of the deployment package you want to publish. This provides validation on the code you are publishing. If you provide this parameter value must match the SHA256 of the HEAD version for the publication to succeed.

    ", + "RequestTooLargeException$Type": null, + "RequestTooLargeException$message": null, + "ResourceConflictException$Type": null, + "ResourceConflictException$message": null, + "ResourceNotFoundException$Type": null, + "ResourceNotFoundException$Message": null, + "ServiceException$Type": null, + "ServiceException$Message": null, + "TooManyRequestsException$retryAfterSeconds": "

    The number of seconds the caller should wait before retrying.

    ", + "TooManyRequestsException$Type": null, + "TooManyRequestsException$message": null, + "UnsupportedMediaTypeException$Type": null, + "UnsupportedMediaTypeException$message": null, + "UpdateEventSourceMappingRequest$UUID": "

    The event source mapping identifier.

    " + } + }, + "Timeout": { + "base": null, + "refs": { + "CreateFunctionRequest$Timeout": "

    The function execution time at which Lambda should terminate the function. Because the execution time has cost implications, we recommend you set this value based on your expected execution time. The default is 3 seconds.

    ", + "FunctionConfiguration$Timeout": "

    The function execution time at which Lambda should terminate the function. Because the execution time has cost implications, we recommend you set this value based on your expected execution time. The default is 3 seconds.

    ", + "UpdateFunctionConfigurationRequest$Timeout": "

    The function execution time at which AWS Lambda should terminate the function. Because the execution time has cost implications, we recommend you set this value based on your expected execution time. The default is 3 seconds.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "FunctionConfiguration$LastModified": "

    The timestamp of the last time you updated the function.

    " + } + }, + "TooManyRequestsException": { + "base": null, + "refs": { + } + }, + "UnsupportedMediaTypeException": { + "base": "

    The content type of the Invoke request body is not JSON.

    ", + "refs": { + } + }, + "UpdateAliasRequest": { + "base": null, + "refs": { + } + }, + "UpdateEventSourceMappingRequest": { + "base": null, + "refs": { + } + }, + "UpdateFunctionCodeRequest": { + "base": null, + "refs": { + } + }, + "UpdateFunctionConfigurationRequest": { + "base": null, + "refs": { + } + }, + "Version": { + "base": null, + "refs": { + "AliasConfiguration$FunctionVersion": "

    Function version to which the alias points.

    ", + "CreateAliasRequest$FunctionVersion": "

    Lambda function version for which you are creating the alias.

    ", + "FunctionConfiguration$Version": "

    The version of the Lambda function.

    ", + "ListAliasesRequest$FunctionVersion": "

    If you specify this optional parameter, the API returns only the aliases pointing to the specific Lambda function version, otherwise returns all aliases created for the Lambda function.

    ", + "UpdateAliasRequest$FunctionVersion": "

    Using this parameter you can optionally change the Lambda function version to which the alias to points to.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/lambda/2015-03-31/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/lambda/2015-03-31/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/lambda/2015-03-31/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/lambda/2015-03-31/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +{ + "pagination": { + "ListEventSourceMappings": { + "input_token": "Marker", + "output_token": "NextMarker", + "limit_key": "MaxItems", + "result_key": "EventSourceMappings" + }, + "ListFunctions": { + "input_token": "Marker", + "output_token": "NextMarker", + "limit_key": "MaxItems", + "result_key": "Functions" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1138 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-03-28", + "endpointPrefix":"logs", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"Amazon CloudWatch Logs", + "signatureVersion":"v4", + "targetPrefix":"Logs_20140328" + }, + "operations":{ + "CancelExportTask":{ + "name":"CancelExportTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelExportTaskRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidOperationException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "CreateExportTask":{ + "name":"CreateExportTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateExportTaskRequest"}, + "output":{"shape":"CreateExportTaskResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"LimitExceededException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceAlreadyExistsException"} + ] + }, + "CreateLogGroup":{ + "name":"CreateLogGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLogGroupRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "CreateLogStream":{ + "name":"CreateLogStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLogStreamRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "DeleteDestination":{ + "name":"DeleteDestination", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDestinationRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "DeleteLogGroup":{ + "name":"DeleteLogGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteLogGroupRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "DeleteLogStream":{ + "name":"DeleteLogStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteLogStreamRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "DeleteMetricFilter":{ + "name":"DeleteMetricFilter", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteMetricFilterRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "DeleteRetentionPolicy":{ + "name":"DeleteRetentionPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRetentionPolicyRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "DeleteSubscriptionFilter":{ + "name":"DeleteSubscriptionFilter", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSubscriptionFilterRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "DescribeDestinations":{ + "name":"DescribeDestinations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDestinationsRequest"}, + "output":{"shape":"DescribeDestinationsResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "DescribeExportTasks":{ + "name":"DescribeExportTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeExportTasksRequest"}, + "output":{"shape":"DescribeExportTasksResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "DescribeLogGroups":{ + "name":"DescribeLogGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLogGroupsRequest"}, + "output":{"shape":"DescribeLogGroupsResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "DescribeLogStreams":{ + "name":"DescribeLogStreams", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLogStreamsRequest"}, + "output":{"shape":"DescribeLogStreamsResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "DescribeMetricFilters":{ + "name":"DescribeMetricFilters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMetricFiltersRequest"}, + "output":{"shape":"DescribeMetricFiltersResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "DescribeSubscriptionFilters":{ + "name":"DescribeSubscriptionFilters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSubscriptionFiltersRequest"}, + "output":{"shape":"DescribeSubscriptionFiltersResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "FilterLogEvents":{ + "name":"FilterLogEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"FilterLogEventsRequest"}, + "output":{"shape":"FilterLogEventsResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "GetLogEvents":{ + "name":"GetLogEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetLogEventsRequest"}, + "output":{"shape":"GetLogEventsResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "PutDestination":{ + "name":"PutDestination", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutDestinationRequest"}, + "output":{"shape":"PutDestinationResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "PutDestinationPolicy":{ + "name":"PutDestinationPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutDestinationPolicyRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "PutLogEvents":{ + "name":"PutLogEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutLogEventsRequest"}, + "output":{"shape":"PutLogEventsResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InvalidSequenceTokenException"}, + {"shape":"DataAlreadyAcceptedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "PutMetricFilter":{ + "name":"PutMetricFilter", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutMetricFilterRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationAbortedException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "PutRetentionPolicy":{ + "name":"PutRetentionPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutRetentionPolicyRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "PutSubscriptionFilter":{ + "name":"PutSubscriptionFilter", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutSubscriptionFilterRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationAbortedException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceUnavailableException"} + ] + }, + "TestMetricFilter":{ + "name":"TestMetricFilter", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TestMetricFilterRequest"}, + "output":{"shape":"TestMetricFilterResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ServiceUnavailableException"} + ] + } + }, + "shapes":{ + "AccessPolicy":{ + "type":"string", + "min":1 + }, + "Arn":{"type":"string"}, + "CancelExportTaskRequest":{ + "type":"structure", + "required":["taskId"], + "members":{ + "taskId":{"shape":"ExportTaskId"} + } + }, + "CreateExportTaskRequest":{ + "type":"structure", + "required":[ + "logGroupName", + "from", + "to", + "destination" + ], + "members":{ + "taskName":{"shape":"ExportTaskName"}, + "logGroupName":{"shape":"LogGroupName"}, + "logStreamNamePrefix":{"shape":"LogStreamName"}, + "from":{"shape":"Timestamp"}, + "to":{"shape":"Timestamp"}, + "destination":{"shape":"ExportDestinationBucket"}, + "destinationPrefix":{"shape":"ExportDestinationPrefix"} + } + }, + "CreateExportTaskResponse":{ + "type":"structure", + "members":{ + "taskId":{"shape":"ExportTaskId"} + } + }, + "CreateLogGroupRequest":{ + "type":"structure", + "required":["logGroupName"], + "members":{ + "logGroupName":{"shape":"LogGroupName"} + } + }, + "CreateLogStreamRequest":{ + "type":"structure", + "required":[ + "logGroupName", + "logStreamName" + ], + "members":{ + "logGroupName":{"shape":"LogGroupName"}, + "logStreamName":{"shape":"LogStreamName"} + } + }, + "DataAlreadyAcceptedException":{ + "type":"structure", + "members":{ + "expectedSequenceToken":{"shape":"SequenceToken"} + }, + "exception":true + }, + "Days":{"type":"integer"}, + "DeleteDestinationRequest":{ + "type":"structure", + "required":["destinationName"], + "members":{ + "destinationName":{"shape":"DestinationName"} + } + }, + "DeleteLogGroupRequest":{ + "type":"structure", + "required":["logGroupName"], + "members":{ + "logGroupName":{"shape":"LogGroupName"} + } + }, + "DeleteLogStreamRequest":{ + "type":"structure", + "required":[ + "logGroupName", + "logStreamName" + ], + "members":{ + "logGroupName":{"shape":"LogGroupName"}, + "logStreamName":{"shape":"LogStreamName"} + } + }, + "DeleteMetricFilterRequest":{ + "type":"structure", + "required":[ + "logGroupName", + "filterName" + ], + "members":{ + "logGroupName":{"shape":"LogGroupName"}, + "filterName":{"shape":"FilterName"} + } + }, + "DeleteRetentionPolicyRequest":{ + "type":"structure", + "required":["logGroupName"], + "members":{ + "logGroupName":{"shape":"LogGroupName"} + } + }, + "DeleteSubscriptionFilterRequest":{ + "type":"structure", + "required":[ + "logGroupName", + "filterName" + ], + "members":{ + "logGroupName":{"shape":"LogGroupName"}, + "filterName":{"shape":"FilterName"} + } + }, + "Descending":{"type":"boolean"}, + "DescribeDestinationsRequest":{ + "type":"structure", + "members":{ + "DestinationNamePrefix":{"shape":"DestinationName"}, + "nextToken":{"shape":"NextToken"}, + "limit":{"shape":"DescribeLimit"} + } + }, + "DescribeDestinationsResponse":{ + "type":"structure", + "members":{ + "destinations":{"shape":"Destinations"}, + "nextToken":{"shape":"NextToken"} + } + }, + "DescribeExportTasksRequest":{ + "type":"structure", + "members":{ + "taskId":{"shape":"ExportTaskId"}, + "statusCode":{"shape":"ExportTaskStatusCode"}, + "nextToken":{"shape":"NextToken"}, + "limit":{"shape":"DescribeLimit"} + } + }, + "DescribeExportTasksResponse":{ + "type":"structure", + "members":{ + "exportTasks":{"shape":"ExportTasks"}, + "nextToken":{"shape":"NextToken"} + } + }, + "DescribeLimit":{ + "type":"integer", + "max":50, + "min":1 + }, + "DescribeLogGroupsRequest":{ + "type":"structure", + "members":{ + "logGroupNamePrefix":{"shape":"LogGroupName"}, + "nextToken":{"shape":"NextToken"}, + "limit":{"shape":"DescribeLimit"} + } + }, + "DescribeLogGroupsResponse":{ + "type":"structure", + "members":{ + "logGroups":{"shape":"LogGroups"}, + "nextToken":{"shape":"NextToken"} + } + }, + "DescribeLogStreamsRequest":{ + "type":"structure", + "required":["logGroupName"], + "members":{ + "logGroupName":{"shape":"LogGroupName"}, + "logStreamNamePrefix":{"shape":"LogStreamName"}, + "orderBy":{"shape":"OrderBy"}, + "descending":{"shape":"Descending"}, + "nextToken":{"shape":"NextToken"}, + "limit":{"shape":"DescribeLimit"} + } + }, + "DescribeLogStreamsResponse":{ + "type":"structure", + "members":{ + "logStreams":{"shape":"LogStreams"}, + "nextToken":{"shape":"NextToken"} + } + }, + "DescribeMetricFiltersRequest":{ + "type":"structure", + "required":["logGroupName"], + "members":{ + "logGroupName":{"shape":"LogGroupName"}, + "filterNamePrefix":{"shape":"FilterName"}, + "nextToken":{"shape":"NextToken"}, + "limit":{"shape":"DescribeLimit"} + } + }, + "DescribeMetricFiltersResponse":{ + "type":"structure", + "members":{ + "metricFilters":{"shape":"MetricFilters"}, + "nextToken":{"shape":"NextToken"} + } + }, + "DescribeSubscriptionFiltersRequest":{ + "type":"structure", + "required":["logGroupName"], + "members":{ + "logGroupName":{"shape":"LogGroupName"}, + "filterNamePrefix":{"shape":"FilterName"}, + "nextToken":{"shape":"NextToken"}, + "limit":{"shape":"DescribeLimit"} + } + }, + "DescribeSubscriptionFiltersResponse":{ + "type":"structure", + "members":{ + "subscriptionFilters":{"shape":"SubscriptionFilters"}, + "nextToken":{"shape":"NextToken"} + } + }, + "Destination":{ + "type":"structure", + "members":{ + "destinationName":{"shape":"DestinationName"}, + "targetArn":{"shape":"TargetArn"}, + "roleArn":{"shape":"RoleArn"}, + "accessPolicy":{"shape":"AccessPolicy"}, + "arn":{"shape":"Arn"}, + "creationTime":{"shape":"Timestamp"} + } + }, + "DestinationArn":{ + "type":"string", + "min":1 + }, + "DestinationName":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[^:*]*" + }, + "Destinations":{ + "type":"list", + "member":{"shape":"Destination"} + }, + "EventId":{"type":"string"}, + "EventMessage":{ + "type":"string", + "min":1 + }, + "EventNumber":{"type":"long"}, + "EventsLimit":{ + "type":"integer", + "max":10000, + "min":1 + }, + "ExportDestinationBucket":{ + "type":"string", + "max":512, + "min":1 + }, + "ExportDestinationPrefix":{"type":"string"}, + "ExportTask":{ + "type":"structure", + "members":{ + "taskId":{"shape":"ExportTaskId"}, + "taskName":{"shape":"ExportTaskName"}, + "logGroupName":{"shape":"LogGroupName"}, + "from":{"shape":"Timestamp"}, + "to":{"shape":"Timestamp"}, + "destination":{"shape":"ExportDestinationBucket"}, + "destinationPrefix":{"shape":"ExportDestinationPrefix"}, + "status":{"shape":"ExportTaskStatus"}, + "executionInfo":{"shape":"ExportTaskExecutionInfo"} + } + }, + "ExportTaskExecutionInfo":{ + "type":"structure", + "members":{ + "creationTime":{"shape":"Timestamp"}, + "completionTime":{"shape":"Timestamp"} + } + }, + "ExportTaskId":{ + "type":"string", + "max":512, + "min":1 + }, + "ExportTaskName":{ + "type":"string", + "max":512, + "min":1 + }, + "ExportTaskStatus":{ + "type":"structure", + "members":{ + "code":{"shape":"ExportTaskStatusCode"}, + "message":{"shape":"ExportTaskStatusMessage"} + } + }, + "ExportTaskStatusCode":{ + "type":"string", + "enum":[ + "CANCELLED", + "COMPLETED", + "FAILED", + "PENDING", + "PENDING_CANCEL", + "RUNNING" + ] + }, + "ExportTaskStatusMessage":{"type":"string"}, + "ExportTasks":{ + "type":"list", + "member":{"shape":"ExportTask"} + }, + "ExtractedValues":{ + "type":"map", + "key":{"shape":"Token"}, + "value":{"shape":"Value"} + }, + "FilterCount":{"type":"integer"}, + "FilterLogEventsRequest":{ + "type":"structure", + "required":["logGroupName"], + "members":{ + "logGroupName":{"shape":"LogGroupName"}, + "logStreamNames":{"shape":"InputLogStreamNames"}, + "startTime":{"shape":"Timestamp"}, + "endTime":{"shape":"Timestamp"}, + "filterPattern":{"shape":"FilterPattern"}, + "nextToken":{"shape":"NextToken"}, + "limit":{"shape":"EventsLimit"}, + "interleaved":{"shape":"Interleaved"} + } + }, + "FilterLogEventsResponse":{ + "type":"structure", + "members":{ + "events":{"shape":"FilteredLogEvents"}, + "searchedLogStreams":{"shape":"SearchedLogStreams"}, + "nextToken":{"shape":"NextToken"} + } + }, + "FilterName":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[^:*]*" + }, + "FilterPattern":{ + "type":"string", + "max":512, + "min":0 + }, + "FilteredLogEvent":{ + "type":"structure", + "members":{ + "logStreamName":{"shape":"LogStreamName"}, + "timestamp":{"shape":"Timestamp"}, + "message":{"shape":"EventMessage"}, + "ingestionTime":{"shape":"Timestamp"}, + "eventId":{"shape":"EventId"} + } + }, + "FilteredLogEvents":{ + "type":"list", + "member":{"shape":"FilteredLogEvent"} + }, + "GetLogEventsRequest":{ + "type":"structure", + "required":[ + "logGroupName", + "logStreamName" + ], + "members":{ + "logGroupName":{"shape":"LogGroupName"}, + "logStreamName":{"shape":"LogStreamName"}, + "startTime":{"shape":"Timestamp"}, + "endTime":{"shape":"Timestamp"}, + "nextToken":{"shape":"NextToken"}, + "limit":{"shape":"EventsLimit"}, + "startFromHead":{"shape":"StartFromHead"} + } + }, + "GetLogEventsResponse":{ + "type":"structure", + "members":{ + "events":{"shape":"OutputLogEvents"}, + "nextForwardToken":{"shape":"NextToken"}, + "nextBackwardToken":{"shape":"NextToken"} + } + }, + "InputLogEvent":{ + "type":"structure", + "required":[ + "timestamp", + "message" + ], + "members":{ + "timestamp":{"shape":"Timestamp"}, + "message":{"shape":"EventMessage"} + } + }, + "InputLogEvents":{ + "type":"list", + "member":{"shape":"InputLogEvent"}, + "max":10000, + "min":1 + }, + "InputLogStreamNames":{ + "type":"list", + "member":{"shape":"LogStreamName"}, + "max":100, + "min":1 + }, + "Interleaved":{"type":"boolean"}, + "InvalidOperationException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidParameterException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidSequenceTokenException":{ + "type":"structure", + "members":{ + "expectedSequenceToken":{"shape":"SequenceToken"} + }, + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "LogEventIndex":{"type":"integer"}, + "LogGroup":{ + "type":"structure", + "members":{ + "logGroupName":{"shape":"LogGroupName"}, + "creationTime":{"shape":"Timestamp"}, + "retentionInDays":{"shape":"Days"}, + "metricFilterCount":{"shape":"FilterCount"}, + "arn":{"shape":"Arn"}, + "storedBytes":{"shape":"StoredBytes"} + } + }, + "LogGroupName":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[\\.\\-_/#A-Za-z0-9]+" + }, + "LogGroups":{ + "type":"list", + "member":{"shape":"LogGroup"} + }, + "LogStream":{ + "type":"structure", + "members":{ + "logStreamName":{"shape":"LogStreamName"}, + "creationTime":{"shape":"Timestamp"}, + "firstEventTimestamp":{"shape":"Timestamp"}, + "lastEventTimestamp":{"shape":"Timestamp"}, + "lastIngestionTime":{"shape":"Timestamp"}, + "uploadSequenceToken":{"shape":"SequenceToken"}, + "arn":{"shape":"Arn"}, + "storedBytes":{"shape":"StoredBytes"} + } + }, + "LogStreamName":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[^:*]*" + }, + "LogStreamSearchedCompletely":{"type":"boolean"}, + "LogStreams":{ + "type":"list", + "member":{"shape":"LogStream"} + }, + "MetricFilter":{ + "type":"structure", + "members":{ + "filterName":{"shape":"FilterName"}, + "filterPattern":{"shape":"FilterPattern"}, + "metricTransformations":{"shape":"MetricTransformations"}, + "creationTime":{"shape":"Timestamp"} + } + }, + "MetricFilterMatchRecord":{ + "type":"structure", + "members":{ + "eventNumber":{"shape":"EventNumber"}, + "eventMessage":{"shape":"EventMessage"}, + "extractedValues":{"shape":"ExtractedValues"} + } + }, + "MetricFilterMatches":{ + "type":"list", + "member":{"shape":"MetricFilterMatchRecord"} + }, + "MetricFilters":{ + "type":"list", + "member":{"shape":"MetricFilter"} + }, + "MetricName":{ + "type":"string", + "max":255, + "pattern":"[^:*$]*" + }, + "MetricNamespace":{ + "type":"string", + "max":255, + "pattern":"[^:*$]*" + }, + "MetricTransformation":{ + "type":"structure", + "required":[ + "metricName", + "metricNamespace", + "metricValue" + ], + "members":{ + "metricName":{"shape":"MetricName"}, + "metricNamespace":{"shape":"MetricNamespace"}, + "metricValue":{"shape":"MetricValue"} + } + }, + "MetricTransformations":{ + "type":"list", + "member":{"shape":"MetricTransformation"}, + "max":1, + "min":1 + }, + "MetricValue":{ + "type":"string", + "max":100 + }, + "NextToken":{ + "type":"string", + "min":1 + }, + "OperationAbortedException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "OrderBy":{ + "type":"string", + "enum":[ + "LogStreamName", + "LastEventTime" + ] + }, + "OutputLogEvent":{ + "type":"structure", + "members":{ + "timestamp":{"shape":"Timestamp"}, + "message":{"shape":"EventMessage"}, + "ingestionTime":{"shape":"Timestamp"} + } + }, + "OutputLogEvents":{ + "type":"list", + "member":{"shape":"OutputLogEvent"} + }, + "PutDestinationPolicyRequest":{ + "type":"structure", + "required":[ + "destinationName", + "accessPolicy" + ], + "members":{ + "destinationName":{"shape":"DestinationName"}, + "accessPolicy":{"shape":"AccessPolicy"} + } + }, + "PutDestinationRequest":{ + "type":"structure", + "required":[ + "destinationName", + "targetArn", + "roleArn" + ], + "members":{ + "destinationName":{"shape":"DestinationName"}, + "targetArn":{"shape":"TargetArn"}, + "roleArn":{"shape":"RoleArn"} + } + }, + "PutDestinationResponse":{ + "type":"structure", + "members":{ + "destination":{"shape":"Destination"} + } + }, + "PutLogEventsRequest":{ + "type":"structure", + "required":[ + "logGroupName", + "logStreamName", + "logEvents" + ], + "members":{ + "logGroupName":{"shape":"LogGroupName"}, + "logStreamName":{"shape":"LogStreamName"}, + "logEvents":{"shape":"InputLogEvents"}, + "sequenceToken":{"shape":"SequenceToken"} + } + }, + "PutLogEventsResponse":{ + "type":"structure", + "members":{ + "nextSequenceToken":{"shape":"SequenceToken"}, + "rejectedLogEventsInfo":{"shape":"RejectedLogEventsInfo"} + } + }, + "PutMetricFilterRequest":{ + "type":"structure", + "required":[ + "logGroupName", + "filterName", + "filterPattern", + "metricTransformations" + ], + "members":{ + "logGroupName":{"shape":"LogGroupName"}, + "filterName":{"shape":"FilterName"}, + "filterPattern":{"shape":"FilterPattern"}, + "metricTransformations":{"shape":"MetricTransformations"} + } + }, + "PutRetentionPolicyRequest":{ + "type":"structure", + "required":[ + "logGroupName", + "retentionInDays" + ], + "members":{ + "logGroupName":{"shape":"LogGroupName"}, + "retentionInDays":{"shape":"Days"} + } + }, + "PutSubscriptionFilterRequest":{ + "type":"structure", + "required":[ + "logGroupName", + "filterName", + "filterPattern", + "destinationArn" + ], + "members":{ + "logGroupName":{"shape":"LogGroupName"}, + "filterName":{"shape":"FilterName"}, + "filterPattern":{"shape":"FilterPattern"}, + "destinationArn":{"shape":"DestinationArn"}, + "roleArn":{"shape":"RoleArn"} + } + }, + "RejectedLogEventsInfo":{ + "type":"structure", + "members":{ + "tooNewLogEventStartIndex":{"shape":"LogEventIndex"}, + "tooOldLogEventEndIndex":{"shape":"LogEventIndex"}, + "expiredLogEventEndIndex":{"shape":"LogEventIndex"} + } + }, + "ResourceAlreadyExistsException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "RoleArn":{ + "type":"string", + "min":1 + }, + "SearchedLogStream":{ + "type":"structure", + "members":{ + "logStreamName":{"shape":"LogStreamName"}, + "searchedCompletely":{"shape":"LogStreamSearchedCompletely"} + } + }, + "SearchedLogStreams":{ + "type":"list", + "member":{"shape":"SearchedLogStream"} + }, + "SequenceToken":{ + "type":"string", + "min":1 + }, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + }, + "exception":true, + "fault":true + }, + "StartFromHead":{"type":"boolean"}, + "StoredBytes":{ + "type":"long", + "min":0 + }, + "SubscriptionFilter":{ + "type":"structure", + "members":{ + "filterName":{"shape":"FilterName"}, + "logGroupName":{"shape":"LogGroupName"}, + "filterPattern":{"shape":"FilterPattern"}, + "destinationArn":{"shape":"DestinationArn"}, + "roleArn":{"shape":"RoleArn"}, + "creationTime":{"shape":"Timestamp"} + } + }, + "SubscriptionFilters":{ + "type":"list", + "member":{"shape":"SubscriptionFilter"} + }, + "TargetArn":{ + "type":"string", + "min":1 + }, + "TestEventMessages":{ + "type":"list", + "member":{"shape":"EventMessage"}, + "max":50, + "min":1 + }, + "TestMetricFilterRequest":{ + "type":"structure", + "required":[ + "filterPattern", + "logEventMessages" + ], + "members":{ + "filterPattern":{"shape":"FilterPattern"}, + "logEventMessages":{"shape":"TestEventMessages"} + } + }, + "TestMetricFilterResponse":{ + "type":"structure", + "members":{ + "matches":{"shape":"MetricFilterMatches"} + } + }, + "Timestamp":{ + "type":"long", + "min":0 + }, + "Token":{"type":"string"}, + "Value":{"type":"string"} + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,789 @@ +{ + "version": "2.0", + "service": "Amazon CloudWatch Logs API Reference

    This is the Amazon CloudWatch Logs API Reference. Amazon CloudWatch Logs enables you to monitor, store, and access your system, application, and custom log files. This guide provides detailed information about Amazon CloudWatch Logs actions, data types, parameters, and errors. For detailed information about Amazon CloudWatch Logs features and their associated API calls, go to the Amazon CloudWatch Developer Guide.

    Use the following links to get started using the Amazon CloudWatch Logs API Reference:

    • Actions: An alphabetical list of all Amazon CloudWatch Logs actions.
    • Data Types: An alphabetical list of all Amazon CloudWatch Logs data types.
    • Common Parameters: Parameters that all Query actions can use.
    • Common Errors: Client and server errors that all actions can return.
    • Regions and Endpoints: Itemized regions and endpoints for all AWS products.

    In addition to using the Amazon CloudWatch Logs API, you can also use the following SDKs and third-party libraries to access Amazon CloudWatch Logs programmatically.

    Developers in the AWS developer community also provide their own libraries, which you can find at the following AWS developer centers:

    ", + "operations": { + "CancelExportTask": "

    Cancels an export task if it is in PENDING or RUNNING state.

    ", + "CreateExportTask": "

    Creates an ExportTask which allows you to efficiently export data from a Log Group to your Amazon S3 bucket.

    This is an asynchronous call. If all the required information is provided, this API will initiate an export task and respond with the task Id. Once started, DescribeExportTasks can be used to get the status of an export task.

    You can export logs from multiple log groups or multiple time ranges to the same Amazon S3 bucket. To separate out log data for each export task, you can specify a prefix that will be used as the Amazon S3 key prefix for all exported objects.

    ", + "CreateLogGroup": "

    Creates a new log group with the specified name. The name of the log group must be unique within a region for an AWS account. You can create up to 500 log groups per account.

    You must use the following guidelines when naming a log group:

    • Log group names can be between 1 and 512 characters long.
    • Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), and '.' (period).

    ", + "CreateLogStream": "

    Creates a new log stream in the specified log group. The name of the log stream must be unique within the log group. There is no limit on the number of log streams that can exist in a log group.

    You must use the following guidelines when naming a log stream:

    • Log stream names can be between 1 and 512 characters long.
    • The ':' colon character is not allowed.

    ", + "DeleteDestination": "

    Deletes the destination with the specified name and eventually disables all the subscription filters that publish to it. This will not delete the physical resource encapsulated by the destination.

    ", + "DeleteLogGroup": "

    Deletes the log group with the specified name and permanently deletes all the archived log events associated with it.

    ", + "DeleteLogStream": "

    Deletes a log stream and permanently deletes all the archived log events associated with it.

    ", + "DeleteMetricFilter": "

    Deletes a metric filter associated with the specified log group.

    ", + "DeleteRetentionPolicy": "

    Deletes the retention policy of the specified log group. Log events would not expire if they belong to log groups without a retention policy.

    ", + "DeleteSubscriptionFilter": "

    Deletes a subscription filter associated with the specified log group.

    ", + "DescribeDestinations": "

    Returns all the destinations that are associated with the AWS account making the request. The list returned in the response is ASCII-sorted by destination name.

    By default, this operation returns up to 50 destinations. If there are more destinations to list, the response would contain a nextToken value in the response body. You can also limit the number of destinations returned in the response by specifying the limit parameter in the request.

    ", + "DescribeExportTasks": "

    Returns all the export tasks that are associated with the AWS account making the request. The export tasks can be filtered based on TaskId or TaskStatus.

    By default, this operation returns up to 50 export tasks that satisfy the specified filters. If there are more export tasks to list, the response would contain a nextToken value in the response body. You can also limit the number of export tasks returned in the response by specifying the limit parameter in the request.

    ", + "DescribeLogGroups": "

    Returns all the log groups that are associated with the AWS account making the request. The list returned in the response is ASCII-sorted by log group name.

    By default, this operation returns up to 50 log groups. If there are more log groups to list, the response would contain a nextToken value in the response body. You can also limit the number of log groups returned in the response by specifying the limit parameter in the request.

    ", + "DescribeLogStreams": "

    Returns all the log streams that are associated with the specified log group. The list returned in the response is ASCII-sorted by log stream name.

    By default, this operation returns up to 50 log streams. If there are more log streams to list, the response would contain a nextToken value in the response body. You can also limit the number of log streams returned in the response by specifying the limit parameter in the request. This operation has a limit of five transactions per second, after which transactions are throttled.

    ", + "DescribeMetricFilters": "

    Returns all the metrics filters associated with the specified log group. The list returned in the response is ASCII-sorted by filter name.

    By default, this operation returns up to 50 metric filters. If there are more metric filters to list, the response would contain a nextToken value in the response body. You can also limit the number of metric filters returned in the response by specifying the limit parameter in the request.

    ", + "DescribeSubscriptionFilters": "

    Returns all the subscription filters associated with the specified log group. The list returned in the response is ASCII-sorted by filter name.

    By default, this operation returns up to 50 subscription filters. If there are more subscription filters to list, the response would contain a nextToken value in the response body. You can also limit the number of subscription filters returned in the response by specifying the limit parameter in the request.

    ", + "FilterLogEvents": "

    Retrieves log events, optionally filtered by a filter pattern from the specified log group. You can provide an optional time range to filter the results on the event timestamp. You can limit the streams searched to an explicit list of logStreamNames.

    By default, this operation returns as much matching log events as can fit in a response size of 1MB, up to 10,000 log events, or all the events found within a time-bounded scan window. If the response includes a nextToken, then there is more data to search, and the search can be resumed with a new request providing the nextToken. The response will contain a list of searchedLogStreams that contains information about which streams were searched in the request and whether they have been searched completely or require further pagination. The limit parameter in the request. can be used to specify the maximum number of events to return in a page.

    ", + "GetLogEvents": "

    Retrieves log events from the specified log stream. You can provide an optional time range to filter the results on the event timestamp.

    By default, this operation returns as much log events as can fit in a response size of 1MB, up to 10,000 log events. The response will always include a nextForwardToken and a nextBackwardToken in the response body. You can use any of these tokens in subsequent GetLogEvents requests to paginate through events in either forward or backward direction. You can also limit the number of log events returned in the response by specifying the limit parameter in the request.

    ", + "PutDestination": "

    Creates or updates a Destination. A destination encapsulates a physical resource (such as a Kinesis stream) and allows you to subscribe to a real-time stream of log events of a different account, ingested through PutLogEvents requests. Currently, the only supported physical resource is a Amazon Kinesis stream belonging to the same account as the destination.

    A destination controls what is written to its Amazon Kinesis stream through an access policy. By default, PutDestination does not set any access policy with the destination, which means a cross-account user will not be able to call PutSubscriptionFilter against this destination. To enable that, the destination owner must call PutDestinationPolicy after PutDestination.

    ", + "PutDestinationPolicy": "

    Creates or updates an access policy associated with an existing Destination. An access policy is an IAM policy document that is used to authorize claims to register a subscription filter against a given destination.

    ", + "PutLogEvents": "

    Uploads a batch of log events to the specified log stream.

    Every PutLogEvents request must include the sequenceToken obtained from the response of the previous request. An upload in a newly created log stream does not require a sequenceToken.

    The batch of events must satisfy the following constraints:

    • The maximum batch size is 1,048,576 bytes, and this size is calculated as the sum of all event messages in UTF-8, plus 26 bytes for each log event.
    • None of the log events in the batch can be more than 2 hours in the future.
    • None of the log events in the batch can be older than 14 days or the retention period of the log group.
    • The log events in the batch must be in chronological ordered by their timestamp.
    • The maximum number of log events in a batch is 10,000.

    ", + "PutMetricFilter": "

    Creates or updates a metric filter and associates it with the specified log group. Metric filters allow you to configure rules to extract metric data from log events ingested through PutLogEvents requests.

    The maximum number of metric filters that can be associated with a log group is 100.

    ", + "PutRetentionPolicy": "

    Sets the retention of the specified log group. A retention policy allows you to configure the number of days you want to retain log events in the specified log group.

    ", + "PutSubscriptionFilter": "

    Creates or updates a subscription filter and associates it with the specified log group. Subscription filters allow you to subscribe to a real-time stream of log events ingested through PutLogEvents requests and have them delivered to a specific destination. Currently, the supported destinations are:

    • A Amazon Kinesis stream belonging to the same account as the subscription filter, for same-account delivery.
    • A logical destination (used via an ARN of Destination) belonging to a different account, for cross-account delivery.

    Currently there can only be one subscription filter associated with a log group.

    ", + "TestMetricFilter": "

    Tests the filter pattern of a metric filter against a sample of log event messages. You can use this operation to validate the correctness of a metric filter pattern.

    " + }, + "shapes": { + "AccessPolicy": { + "base": null, + "refs": { + "Destination$accessPolicy": "

    An IAM policy document that governs which AWS accounts can create subscription filters against this destination.

    ", + "PutDestinationPolicyRequest$accessPolicy": "

    An IAM policy document that authorizes cross-account users to deliver their log events to associated destination.

    " + } + }, + "Arn": { + "base": null, + "refs": { + "Destination$arn": "

    ARN of this destination.

    ", + "LogGroup$arn": null, + "LogStream$arn": null + } + }, + "CancelExportTaskRequest": { + "base": null, + "refs": { + } + }, + "CreateExportTaskRequest": { + "base": null, + "refs": { + } + }, + "CreateExportTaskResponse": { + "base": null, + "refs": { + } + }, + "CreateLogGroupRequest": { + "base": null, + "refs": { + } + }, + "CreateLogStreamRequest": { + "base": null, + "refs": { + } + }, + "DataAlreadyAcceptedException": { + "base": null, + "refs": { + } + }, + "Days": { + "base": "

    Specifies the number of days you want to retain log events in the specified log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653.

    ", + "refs": { + "LogGroup$retentionInDays": null, + "PutRetentionPolicyRequest$retentionInDays": null + } + }, + "DeleteDestinationRequest": { + "base": null, + "refs": { + } + }, + "DeleteLogGroupRequest": { + "base": null, + "refs": { + } + }, + "DeleteLogStreamRequest": { + "base": null, + "refs": { + } + }, + "DeleteMetricFilterRequest": { + "base": null, + "refs": { + } + }, + "DeleteRetentionPolicyRequest": { + "base": null, + "refs": { + } + }, + "DeleteSubscriptionFilterRequest": { + "base": null, + "refs": { + } + }, + "Descending": { + "base": null, + "refs": { + "DescribeLogStreamsRequest$descending": "

    If set to true, results are returned in descending order. If you don't specify a value or set it to false, results are returned in ascending order.

    " + } + }, + "DescribeDestinationsRequest": { + "base": null, + "refs": { + } + }, + "DescribeDestinationsResponse": { + "base": null, + "refs": { + } + }, + "DescribeExportTasksRequest": { + "base": null, + "refs": { + } + }, + "DescribeExportTasksResponse": { + "base": null, + "refs": { + } + }, + "DescribeLimit": { + "base": "

    The maximum number of results to return.

    ", + "refs": { + "DescribeDestinationsRequest$limit": null, + "DescribeExportTasksRequest$limit": "

    The maximum number of items returned in the response. If you don't specify a value, the request would return up to 50 items.

    ", + "DescribeLogGroupsRequest$limit": "

    The maximum number of items returned in the response. If you don't specify a value, the request would return up to 50 items.

    ", + "DescribeLogStreamsRequest$limit": "

    The maximum number of items returned in the response. If you don't specify a value, the request would return up to 50 items.

    ", + "DescribeMetricFiltersRequest$limit": "

    The maximum number of items returned in the response. If you don't specify a value, the request would return up to 50 items.

    ", + "DescribeSubscriptionFiltersRequest$limit": null + } + }, + "DescribeLogGroupsRequest": { + "base": null, + "refs": { + } + }, + "DescribeLogGroupsResponse": { + "base": null, + "refs": { + } + }, + "DescribeLogStreamsRequest": { + "base": null, + "refs": { + } + }, + "DescribeLogStreamsResponse": { + "base": null, + "refs": { + } + }, + "DescribeMetricFiltersRequest": { + "base": null, + "refs": { + } + }, + "DescribeMetricFiltersResponse": { + "base": null, + "refs": { + } + }, + "DescribeSubscriptionFiltersRequest": { + "base": null, + "refs": { + } + }, + "DescribeSubscriptionFiltersResponse": { + "base": null, + "refs": { + } + }, + "Destination": { + "base": "

    A cross account destination that is the recipient of subscription log events.

    ", + "refs": { + "Destinations$member": null, + "PutDestinationResponse$destination": null + } + }, + "DestinationArn": { + "base": null, + "refs": { + "PutSubscriptionFilterRequest$destinationArn": "

    The ARN of the destination to deliver matching log events to. Currently, the supported destinations are:

    • A Amazon Kinesis stream belonging to the same account as the subscription filter, for same-account delivery.
    • A logical destination (used via an ARN of Destination) belonging to a different account, for cross-account delivery.

    ", + "SubscriptionFilter$destinationArn": null + } + }, + "DestinationName": { + "base": null, + "refs": { + "DeleteDestinationRequest$destinationName": "

    The name of destination to delete.

    ", + "DescribeDestinationsRequest$DestinationNamePrefix": "

    Will only return destinations that match the provided destinationNamePrefix. If you don't specify a value, no prefix is applied.

    ", + "Destination$destinationName": "

    Name of the destination.

    ", + "PutDestinationPolicyRequest$destinationName": "

    A name for an existing destination.

    ", + "PutDestinationRequest$destinationName": "

    A name for the destination.

    " + } + }, + "Destinations": { + "base": null, + "refs": { + "DescribeDestinationsResponse$destinations": null + } + }, + "EventId": { + "base": null, + "refs": { + "FilteredLogEvent$eventId": "

    A unique identifier for this event.

    " + } + }, + "EventMessage": { + "base": null, + "refs": { + "FilteredLogEvent$message": "

    The data contained in the log event.

    ", + "InputLogEvent$message": null, + "MetricFilterMatchRecord$eventMessage": null, + "OutputLogEvent$message": null, + "TestEventMessages$member": null + } + }, + "EventNumber": { + "base": null, + "refs": { + "MetricFilterMatchRecord$eventNumber": null + } + }, + "EventsLimit": { + "base": "

    The maximum number of events to return.

    ", + "refs": { + "FilterLogEventsRequest$limit": "

    The maximum number of events to return in a page of results. Default is 10,000 events.

    ", + "GetLogEventsRequest$limit": "

    The maximum number of log events returned in the response. If you don't specify a value, the request would return as many log events as can fit in a response size of 1MB, up to 10,000 log events.

    " + } + }, + "ExportDestinationBucket": { + "base": null, + "refs": { + "CreateExportTaskRequest$destination": "

    Name of Amazon S3 bucket to which the log data will be exported.

    NOTE: Only buckets in the same AWS region are supported

    ", + "ExportTask$destination": "

    Name of Amazon S3 bucket to which the log data was exported.

    " + } + }, + "ExportDestinationPrefix": { + "base": null, + "refs": { + "CreateExportTaskRequest$destinationPrefix": "

    Prefix that will be used as the start of Amazon S3 key for every object exported. If not specified, this defaults to 'exportedlogs'.

    ", + "ExportTask$destinationPrefix": "

    Prefix that was used as the start of Amazon S3 key for every object exported.

    " + } + }, + "ExportTask": { + "base": "

    Represents an export task.

    ", + "refs": { + "ExportTasks$member": null + } + }, + "ExportTaskExecutionInfo": { + "base": "

    Represents the status of an export task.

    ", + "refs": { + "ExportTask$executionInfo": "

    Execution info about the export task.

    " + } + }, + "ExportTaskId": { + "base": null, + "refs": { + "CancelExportTaskRequest$taskId": "

    Id of the export task to cancel.

    ", + "CreateExportTaskResponse$taskId": "

    Id of the export task that got created.

    ", + "DescribeExportTasksRequest$taskId": "

    Export task that matches the specified task Id will be returned. This can result in zero or one export task.

    ", + "ExportTask$taskId": "

    Id of the export task.

    " + } + }, + "ExportTaskName": { + "base": null, + "refs": { + "CreateExportTaskRequest$taskName": "

    The name of the export task.

    ", + "ExportTask$taskName": "

    The name of the export task.

    " + } + }, + "ExportTaskStatus": { + "base": "

    Represents the status of an export task.

    ", + "refs": { + "ExportTask$status": "

    Status of the export task.

    " + } + }, + "ExportTaskStatusCode": { + "base": null, + "refs": { + "DescribeExportTasksRequest$statusCode": "

    All export tasks that matches the specified status code will be returned. This can return zero or more export tasks.

    ", + "ExportTaskStatus$code": "

    Status code of the export task.

    " + } + }, + "ExportTaskStatusMessage": { + "base": null, + "refs": { + "ExportTaskStatus$message": "

    Status message related to the code.

    " + } + }, + "ExportTasks": { + "base": "

    A list of export tasks.

    ", + "refs": { + "DescribeExportTasksResponse$exportTasks": null + } + }, + "ExtractedValues": { + "base": null, + "refs": { + "MetricFilterMatchRecord$extractedValues": null + } + }, + "FilterCount": { + "base": "

    The number of metric filters associated with the log group.

    ", + "refs": { + "LogGroup$metricFilterCount": null + } + }, + "FilterLogEventsRequest": { + "base": null, + "refs": { + } + }, + "FilterLogEventsResponse": { + "base": null, + "refs": { + } + }, + "FilterName": { + "base": "

    A name for a metric or subscription filter.

    ", + "refs": { + "DeleteMetricFilterRequest$filterName": "

    The name of the metric filter to delete.

    ", + "DeleteSubscriptionFilterRequest$filterName": "

    The name of the subscription filter to delete.

    ", + "DescribeMetricFiltersRequest$filterNamePrefix": "

    Will only return metric filters that match the provided filterNamePrefix. If you don't specify a value, no prefix filter is applied.

    ", + "DescribeSubscriptionFiltersRequest$filterNamePrefix": "

    Will only return subscription filters that match the provided filterNamePrefix. If you don't specify a value, no prefix filter is applied.

    ", + "MetricFilter$filterName": null, + "PutMetricFilterRequest$filterName": "

    A name for the metric filter.

    ", + "PutSubscriptionFilterRequest$filterName": "

    A name for the subscription filter.

    ", + "SubscriptionFilter$filterName": null + } + }, + "FilterPattern": { + "base": "

    A symbolic description of how Amazon CloudWatch Logs should interpret the data in each log event. For example, a log event may contain timestamps, IP addresses, strings, and so on. You use the filter pattern to specify what to look for in the log event message.

    ", + "refs": { + "FilterLogEventsRequest$filterPattern": "

    A valid CloudWatch Logs filter pattern to use for filtering the response. If not provided, all the events are matched.

    ", + "MetricFilter$filterPattern": null, + "PutMetricFilterRequest$filterPattern": "

    A valid CloudWatch Logs filter pattern for extracting metric data out of ingested log events.

    ", + "PutSubscriptionFilterRequest$filterPattern": "

    A valid CloudWatch Logs filter pattern for subscribing to a filtered stream of log events.

    ", + "SubscriptionFilter$filterPattern": null, + "TestMetricFilterRequest$filterPattern": null + } + }, + "FilteredLogEvent": { + "base": "

    Represents a matched event from a FilterLogEvents request.

    ", + "refs": { + "FilteredLogEvents$member": null + } + }, + "FilteredLogEvents": { + "base": "

    A list of matched FilteredLogEvent objects returned from a FilterLogEvents request.

    ", + "refs": { + "FilterLogEventsResponse$events": "

    A list of FilteredLogEvent objects representing the matched events from the request.

    " + } + }, + "GetLogEventsRequest": { + "base": null, + "refs": { + } + }, + "GetLogEventsResponse": { + "base": null, + "refs": { + } + }, + "InputLogEvent": { + "base": "

    A log event is a record of some activity that was recorded by the application or resource being monitored. The log event record that Amazon CloudWatch Logs understands contains two properties: the timestamp of when the event occurred, and the raw event message.

    ", + "refs": { + "InputLogEvents$member": null + } + }, + "InputLogEvents": { + "base": "

    A list of log events belonging to a log stream.

    ", + "refs": { + "PutLogEventsRequest$logEvents": null + } + }, + "InputLogStreamNames": { + "base": "

    A list of log stream names.

    ", + "refs": { + "FilterLogEventsRequest$logStreamNames": "

    Optional list of log stream names within the specified log group to search. Defaults to all the log streams in the log group.

    " + } + }, + "Interleaved": { + "base": null, + "refs": { + "FilterLogEventsRequest$interleaved": "

    If provided, the API will make a best effort to provide responses that contain events from multiple log streams within the log group interleaved in a single response. If not provided, all the matched log events in the first log stream will be searched first, then those in the next log stream, etc.

    " + } + }, + "InvalidOperationException": { + "base": "

    Returned if the operation is not valid on the specified resource

    ", + "refs": { + } + }, + "InvalidParameterException": { + "base": "

    Returned if a parameter of the request is incorrectly specified.

    ", + "refs": { + } + }, + "InvalidSequenceTokenException": { + "base": null, + "refs": { + } + }, + "LimitExceededException": { + "base": "

    Returned if you have reached the maximum number of resources that can be created.

    ", + "refs": { + } + }, + "LogEventIndex": { + "base": null, + "refs": { + "RejectedLogEventsInfo$tooNewLogEventStartIndex": null, + "RejectedLogEventsInfo$tooOldLogEventEndIndex": null, + "RejectedLogEventsInfo$expiredLogEventEndIndex": null + } + }, + "LogGroup": { + "base": null, + "refs": { + "LogGroups$member": null + } + }, + "LogGroupName": { + "base": null, + "refs": { + "CreateExportTaskRequest$logGroupName": "

    The name of the log group to export.

    ", + "CreateLogGroupRequest$logGroupName": "

    The name of the log group to create.

    ", + "CreateLogStreamRequest$logGroupName": "

    The name of the log group under which the log stream is to be created.

    ", + "DeleteLogGroupRequest$logGroupName": "

    The name of the log group to delete.

    ", + "DeleteLogStreamRequest$logGroupName": "

    The name of the log group under which the log stream to delete belongs.

    ", + "DeleteMetricFilterRequest$logGroupName": "

    The name of the log group that is associated with the metric filter to delete.

    ", + "DeleteRetentionPolicyRequest$logGroupName": "

    The name of the log group that is associated with the retention policy to delete.

    ", + "DeleteSubscriptionFilterRequest$logGroupName": "

    The name of the log group that is associated with the subscription filter to delete.

    ", + "DescribeLogGroupsRequest$logGroupNamePrefix": "

    Will only return log groups that match the provided logGroupNamePrefix. If you don't specify a value, no prefix filter is applied.

    ", + "DescribeLogStreamsRequest$logGroupName": "

    The log group name for which log streams are to be listed.

    ", + "DescribeMetricFiltersRequest$logGroupName": "

    The log group name for which metric filters are to be listed.

    ", + "DescribeSubscriptionFiltersRequest$logGroupName": "

    The log group name for which subscription filters are to be listed.

    ", + "ExportTask$logGroupName": "

    The name of the log group from which logs data was exported.

    ", + "FilterLogEventsRequest$logGroupName": "

    The name of the log group to query.

    ", + "GetLogEventsRequest$logGroupName": "

    The name of the log group to query.

    ", + "LogGroup$logGroupName": null, + "PutLogEventsRequest$logGroupName": "

    The name of the log group to put log events to.

    ", + "PutMetricFilterRequest$logGroupName": "

    The name of the log group to associate the metric filter with.

    ", + "PutRetentionPolicyRequest$logGroupName": "

    The name of the log group to associate the retention policy with.

    ", + "PutSubscriptionFilterRequest$logGroupName": "

    The name of the log group to associate the subscription filter with.

    ", + "SubscriptionFilter$logGroupName": null + } + }, + "LogGroups": { + "base": "

    A list of log groups.

    ", + "refs": { + "DescribeLogGroupsResponse$logGroups": null + } + }, + "LogStream": { + "base": "

    A log stream is sequence of log events from a single emitter of logs.

    ", + "refs": { + "LogStreams$member": null + } + }, + "LogStreamName": { + "base": null, + "refs": { + "CreateExportTaskRequest$logStreamNamePrefix": "

    Will only export log streams that match the provided logStreamNamePrefix. If you don't specify a value, no prefix filter is applied.

    ", + "CreateLogStreamRequest$logStreamName": "

    The name of the log stream to create.

    ", + "DeleteLogStreamRequest$logStreamName": "

    The name of the log stream to delete.

    ", + "DescribeLogStreamsRequest$logStreamNamePrefix": "

    Will only return log streams that match the provided logStreamNamePrefix. If you don't specify a value, no prefix filter is applied.

    ", + "FilteredLogEvent$logStreamName": "

    The name of the log stream this event belongs to.

    ", + "GetLogEventsRequest$logStreamName": "

    The name of the log stream to query.

    ", + "InputLogStreamNames$member": null, + "LogStream$logStreamName": null, + "PutLogEventsRequest$logStreamName": "

    The name of the log stream to put log events to.

    ", + "SearchedLogStream$logStreamName": "

    The name of the log stream.

    " + } + }, + "LogStreamSearchedCompletely": { + "base": null, + "refs": { + "SearchedLogStream$searchedCompletely": "

    Indicates whether all the events in this log stream were searched or more data exists to search by paginating further.

    " + } + }, + "LogStreams": { + "base": "

    A list of log streams.

    ", + "refs": { + "DescribeLogStreamsResponse$logStreams": null + } + }, + "MetricFilter": { + "base": "

    Metric filters can be used to express how Amazon CloudWatch Logs would extract metric observations from ingested log events and transform them to metric data in a CloudWatch metric.

    ", + "refs": { + "MetricFilters$member": null + } + }, + "MetricFilterMatchRecord": { + "base": null, + "refs": { + "MetricFilterMatches$member": null + } + }, + "MetricFilterMatches": { + "base": null, + "refs": { + "TestMetricFilterResponse$matches": null + } + }, + "MetricFilters": { + "base": null, + "refs": { + "DescribeMetricFiltersResponse$metricFilters": null + } + }, + "MetricName": { + "base": "

    The name of the CloudWatch metric to which the monitored log information should be published. For example, you may publish to a metric called ErrorCount.

    ", + "refs": { + "MetricTransformation$metricName": null + } + }, + "MetricNamespace": { + "base": "

    The destination namespace of the new CloudWatch metric.

    ", + "refs": { + "MetricTransformation$metricNamespace": null + } + }, + "MetricTransformation": { + "base": null, + "refs": { + "MetricTransformations$member": null + } + }, + "MetricTransformations": { + "base": null, + "refs": { + "MetricFilter$metricTransformations": null, + "PutMetricFilterRequest$metricTransformations": "

    A collection of information needed to define how metric data gets emitted.

    " + } + }, + "MetricValue": { + "base": "

    What to publish to the metric. For example, if you're counting the occurrences of a particular term like \"Error\", the value will be \"1\" for each occurrence. If you're counting the bytes transferred the published value will be the value in the log event.

    ", + "refs": { + "MetricTransformation$metricValue": null + } + }, + "NextToken": { + "base": "

    A string token used for pagination that points to the next page of results. It must be a value obtained from the response of the previous request. The token expires after 24 hours.

    ", + "refs": { + "DescribeDestinationsRequest$nextToken": null, + "DescribeDestinationsResponse$nextToken": null, + "DescribeExportTasksRequest$nextToken": "

    A string token used for pagination that points to the next page of results. It must be a value obtained from the response of the previous DescribeExportTasks request.

    ", + "DescribeExportTasksResponse$nextToken": null, + "DescribeLogGroupsRequest$nextToken": "

    A string token used for pagination that points to the next page of results. It must be a value obtained from the response of the previous DescribeLogGroups request.

    ", + "DescribeLogGroupsResponse$nextToken": null, + "DescribeLogStreamsRequest$nextToken": "

    A string token used for pagination that points to the next page of results. It must be a value obtained from the response of the previous DescribeLogStreams request.

    ", + "DescribeLogStreamsResponse$nextToken": null, + "DescribeMetricFiltersRequest$nextToken": "

    A string token used for pagination that points to the next page of results. It must be a value obtained from the response of the previous DescribeMetricFilters request.

    ", + "DescribeMetricFiltersResponse$nextToken": null, + "DescribeSubscriptionFiltersRequest$nextToken": null, + "DescribeSubscriptionFiltersResponse$nextToken": null, + "FilterLogEventsRequest$nextToken": "

    A pagination token obtained from a FilterLogEvents response to continue paginating the FilterLogEvents results. This token is omitted from the response when there are no other events to display.

    ", + "FilterLogEventsResponse$nextToken": "

    A pagination token obtained from a FilterLogEvents response to continue paginating the FilterLogEvents results. This token is omitted from the response when there are no other events to display.

    ", + "GetLogEventsRequest$nextToken": "

    A string token used for pagination that points to the next page of results. It must be a value obtained from the nextForwardToken or nextBackwardToken fields in the response of the previous GetLogEvents request.

    ", + "GetLogEventsResponse$nextForwardToken": null, + "GetLogEventsResponse$nextBackwardToken": null + } + }, + "OperationAbortedException": { + "base": "

    Returned if multiple requests to update the same resource were in conflict.

    ", + "refs": { + } + }, + "OrderBy": { + "base": null, + "refs": { + "DescribeLogStreamsRequest$orderBy": "

    Specifies what to order the returned log streams by. Valid arguments are 'LogStreamName' or 'LastEventTime'. If you don't specify a value, results are ordered by LogStreamName. If 'LastEventTime' is chosen, the request cannot also contain a logStreamNamePrefix.

    " + } + }, + "OutputLogEvent": { + "base": null, + "refs": { + "OutputLogEvents$member": null + } + }, + "OutputLogEvents": { + "base": null, + "refs": { + "GetLogEventsResponse$events": null + } + }, + "PutDestinationPolicyRequest": { + "base": null, + "refs": { + } + }, + "PutDestinationRequest": { + "base": null, + "refs": { + } + }, + "PutDestinationResponse": { + "base": null, + "refs": { + } + }, + "PutLogEventsRequest": { + "base": null, + "refs": { + } + }, + "PutLogEventsResponse": { + "base": null, + "refs": { + } + }, + "PutMetricFilterRequest": { + "base": null, + "refs": { + } + }, + "PutRetentionPolicyRequest": { + "base": null, + "refs": { + } + }, + "PutSubscriptionFilterRequest": { + "base": null, + "refs": { + } + }, + "RejectedLogEventsInfo": { + "base": null, + "refs": { + "PutLogEventsResponse$rejectedLogEventsInfo": null + } + }, + "ResourceAlreadyExistsException": { + "base": "

    Returned if the specified resource already exists.

    ", + "refs": { + } + }, + "ResourceNotFoundException": { + "base": "

    Returned if the specified resource does not exist.

    ", + "refs": { + } + }, + "RoleArn": { + "base": null, + "refs": { + "Destination$roleArn": "

    A role for impersonation for delivering log events to the target.

    ", + "PutDestinationRequest$roleArn": "

    The ARN of an IAM role that grants Amazon CloudWatch Logs permissions to do Amazon Kinesis PutRecord requests on the desitnation stream.

    ", + "PutSubscriptionFilterRequest$roleArn": "

    The ARN of an IAM role that grants Amazon CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don't need to provide the ARN when you are working with a logical destination (used via an ARN of Destination) for cross-account delivery.

    ", + "SubscriptionFilter$roleArn": null + } + }, + "SearchedLogStream": { + "base": "

    An object indicating the search status of a log stream in a FilterLogEvents request.

    ", + "refs": { + "SearchedLogStreams$member": null + } + }, + "SearchedLogStreams": { + "base": "

    A list of SearchedLogStream objects indicating the search status for log streams in a FilterLogEvents request.

    ", + "refs": { + "FilterLogEventsResponse$searchedLogStreams": "

    A list of SearchedLogStream objects indicating which log streams have been searched in this request and whether each has been searched completely or still has more to be paginated.

    " + } + }, + "SequenceToken": { + "base": "

    A string token used for making PutLogEvents requests. A sequenceToken can only be used once, and PutLogEvents requests must include the sequenceToken obtained from the response of the previous request.

    ", + "refs": { + "DataAlreadyAcceptedException$expectedSequenceToken": null, + "InvalidSequenceTokenException$expectedSequenceToken": null, + "LogStream$uploadSequenceToken": null, + "PutLogEventsRequest$sequenceToken": "

    A string token that must be obtained from the response of the previous PutLogEvents request.

    ", + "PutLogEventsResponse$nextSequenceToken": null + } + }, + "ServiceUnavailableException": { + "base": "

    Returned if the service cannot complete the request.

    ", + "refs": { + } + }, + "StartFromHead": { + "base": null, + "refs": { + "GetLogEventsRequest$startFromHead": "

    If set to true, the earliest log events would be returned first. The default is false (the latest log events are returned first).

    " + } + }, + "StoredBytes": { + "base": null, + "refs": { + "LogGroup$storedBytes": null, + "LogStream$storedBytes": null + } + }, + "SubscriptionFilter": { + "base": null, + "refs": { + "SubscriptionFilters$member": null + } + }, + "SubscriptionFilters": { + "base": null, + "refs": { + "DescribeSubscriptionFiltersResponse$subscriptionFilters": null + } + }, + "TargetArn": { + "base": null, + "refs": { + "Destination$targetArn": "

    ARN of the physical target where the log events will be delivered (eg. ARN of a Kinesis stream).

    ", + "PutDestinationRequest$targetArn": "

    The ARN of an Amazon Kinesis stream to deliver matching log events to.

    " + } + }, + "TestEventMessages": { + "base": null, + "refs": { + "TestMetricFilterRequest$logEventMessages": "

    A list of log event messages to test.

    " + } + }, + "TestMetricFilterRequest": { + "base": null, + "refs": { + } + }, + "TestMetricFilterResponse": { + "base": null, + "refs": { + } + }, + "Timestamp": { + "base": "

    A point in time expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC.

    ", + "refs": { + "CreateExportTaskRequest$from": "

    A point in time expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC. It indicates the start time of the range for the request. Events with a timestamp prior to this time will not be exported.

    ", + "CreateExportTaskRequest$to": "

    A point in time expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC. It indicates the end time of the range for the request. Events with a timestamp later than this time will not be exported.

    ", + "Destination$creationTime": "

    A point in time expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC specifying when this destination was created.

    ", + "ExportTask$from": "

    A point in time expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC. Events with a timestamp prior to this time are not exported.

    ", + "ExportTask$to": "

    A point in time expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC. Events with a timestamp later than this time are not exported.

    ", + "ExportTaskExecutionInfo$creationTime": "

    A point in time when the export task got created.

    ", + "ExportTaskExecutionInfo$completionTime": "

    A point in time when the export task got completed.

    ", + "FilterLogEventsRequest$startTime": "

    A point in time expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC. If provided, events with a timestamp prior to this time are not returned.

    ", + "FilterLogEventsRequest$endTime": "

    A point in time expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC. If provided, events with a timestamp later than this time are not returned.

    ", + "FilteredLogEvent$timestamp": null, + "FilteredLogEvent$ingestionTime": null, + "GetLogEventsRequest$startTime": null, + "GetLogEventsRequest$endTime": null, + "InputLogEvent$timestamp": null, + "LogGroup$creationTime": null, + "LogStream$creationTime": null, + "LogStream$firstEventTimestamp": null, + "LogStream$lastEventTimestamp": null, + "LogStream$lastIngestionTime": null, + "MetricFilter$creationTime": null, + "OutputLogEvent$timestamp": null, + "OutputLogEvent$ingestionTime": null, + "SubscriptionFilter$creationTime": null + } + }, + "Token": { + "base": null, + "refs": { + "ExtractedValues$key": null + } + }, + "Value": { + "base": null, + "refs": { + "ExtractedValues$value": null + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,49 @@ +{ + "pagination": { + "DescribeDestinations": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "limit", + "result_key": "destinations" + }, + "DescribeLogGroups": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "limit", + "result_key": "logGroups" + }, + "DescribeLogStreams": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "limit", + "result_key": "logStreams" + }, + "DescribeMetricFilters": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "limit", + "result_key": "metricFilters" + }, + "DescribeSubscriptionFilters": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "limit", + "result_key": "subscriptionFilters" + }, + "FilterLogEvents": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "limit", + "result_key": [ + "events", + "searchedLogStreams" + ] + }, + "GetLogEvents": { + "input_token": "nextToken", + "output_token": "nextForwardToken", + "limit_key": "limit", + "result_key": "events" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/machinelearning/2014-12-12/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/machinelearning/2014-12-12/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/machinelearning/2014-12-12/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/machinelearning/2014-12-12/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1745 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-12-12", + "endpointPrefix":"machinelearning", + "jsonVersion":"1.1", + "serviceFullName":"Amazon Machine Learning", + "signatureVersion":"v4", + "targetPrefix":"AmazonML_20141212", + "protocol":"json" + }, + "operations":{ + "CreateBatchPrediction":{ + "name":"CreateBatchPrediction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateBatchPredictionInput"}, + "output":{"shape":"CreateBatchPredictionOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"IdempotentParameterMismatchException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "CreateDataSourceFromRDS":{ + "name":"CreateDataSourceFromRDS", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDataSourceFromRDSInput"}, + "output":{"shape":"CreateDataSourceFromRDSOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"IdempotentParameterMismatchException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "CreateDataSourceFromRedshift":{ + "name":"CreateDataSourceFromRedshift", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDataSourceFromRedshiftInput"}, + "output":{"shape":"CreateDataSourceFromRedshiftOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"IdempotentParameterMismatchException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "CreateDataSourceFromS3":{ + "name":"CreateDataSourceFromS3", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDataSourceFromS3Input"}, + "output":{"shape":"CreateDataSourceFromS3Output"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"IdempotentParameterMismatchException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "CreateEvaluation":{ + "name":"CreateEvaluation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateEvaluationInput"}, + "output":{"shape":"CreateEvaluationOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"IdempotentParameterMismatchException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "CreateMLModel":{ + "name":"CreateMLModel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateMLModelInput"}, + "output":{"shape":"CreateMLModelOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"IdempotentParameterMismatchException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "CreateRealtimeEndpoint":{ + "name":"CreateRealtimeEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRealtimeEndpointInput"}, + "output":{"shape":"CreateRealtimeEndpointOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DeleteBatchPrediction":{ + "name":"DeleteBatchPrediction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteBatchPredictionInput"}, + "output":{"shape":"DeleteBatchPredictionOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DeleteDataSource":{ + "name":"DeleteDataSource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDataSourceInput"}, + "output":{"shape":"DeleteDataSourceOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DeleteEvaluation":{ + "name":"DeleteEvaluation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEvaluationInput"}, + "output":{"shape":"DeleteEvaluationOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DeleteMLModel":{ + "name":"DeleteMLModel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteMLModelInput"}, + "output":{"shape":"DeleteMLModelOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DeleteRealtimeEndpoint":{ + "name":"DeleteRealtimeEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRealtimeEndpointInput"}, + "output":{"shape":"DeleteRealtimeEndpointOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DescribeBatchPredictions":{ + "name":"DescribeBatchPredictions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeBatchPredictionsInput"}, + "output":{"shape":"DescribeBatchPredictionsOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DescribeDataSources":{ + "name":"DescribeDataSources", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDataSourcesInput"}, + "output":{"shape":"DescribeDataSourcesOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DescribeEvaluations":{ + "name":"DescribeEvaluations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEvaluationsInput"}, + "output":{"shape":"DescribeEvaluationsOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "DescribeMLModels":{ + "name":"DescribeMLModels", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMLModelsInput"}, + "output":{"shape":"DescribeMLModelsOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "GetBatchPrediction":{ + "name":"GetBatchPrediction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetBatchPredictionInput"}, + "output":{"shape":"GetBatchPredictionOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "GetDataSource":{ + "name":"GetDataSource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDataSourceInput"}, + "output":{"shape":"GetDataSourceOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "GetEvaluation":{ + "name":"GetEvaluation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetEvaluationInput"}, + "output":{"shape":"GetEvaluationOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "GetMLModel":{ + "name":"GetMLModel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetMLModelInput"}, + "output":{"shape":"GetMLModelOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "Predict":{ + "name":"Predict", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PredictInput"}, + "output":{"shape":"PredictOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"LimitExceededException", + "error":{"httpStatusCode":417}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + { + "shape":"PredictorNotMountedException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "UpdateBatchPrediction":{ + "name":"UpdateBatchPrediction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateBatchPredictionInput"}, + "output":{"shape":"UpdateBatchPredictionOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "UpdateDataSource":{ + "name":"UpdateDataSource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDataSourceInput"}, + "output":{"shape":"UpdateDataSourceOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "UpdateEvaluation":{ + "name":"UpdateEvaluation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateEvaluationInput"}, + "output":{"shape":"UpdateEvaluationOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + }, + "UpdateMLModel":{ + "name":"UpdateMLModel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateMLModelInput"}, + "output":{"shape":"UpdateMLModelOutput"}, + "errors":[ + { + "shape":"InvalidInputException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"ResourceNotFoundException", + "error":{"httpStatusCode":404}, + "exception":true + }, + { + "shape":"InternalServerException", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + } + ] + } + }, + "shapes":{ + "Algorithm":{ + "type":"string", + "enum":["sgd"] + }, + "AwsUserArn":{ + "type":"string", + "pattern":"arn:aws:iam::[0-9]+:((user/.+)|(root))" + }, + "BatchPrediction":{ + "type":"structure", + "members":{ + "BatchPredictionId":{"shape":"EntityId"}, + "MLModelId":{"shape":"EntityId"}, + "BatchPredictionDataSourceId":{"shape":"EntityId"}, + "InputDataLocationS3":{"shape":"S3Url"}, + "CreatedByIamUser":{"shape":"AwsUserArn"}, + "CreatedAt":{"shape":"EpochTime"}, + "LastUpdatedAt":{"shape":"EpochTime"}, + "Name":{"shape":"EntityName"}, + "Status":{"shape":"EntityStatus"}, + "OutputUri":{"shape":"S3Url"}, + "Message":{"shape":"Message"} + } + }, + "BatchPredictionFilterVariable":{ + "type":"string", + "enum":[ + "CreatedAt", + "LastUpdatedAt", + "Status", + "Name", + "IAMUser", + "MLModelId", + "DataSourceId", + "DataURI" + ] + }, + "BatchPredictions":{ + "type":"list", + "member":{"shape":"BatchPrediction"} + }, + "ComparatorValue":{ + "type":"string", + "max":1024, + "pattern":".*\\S.*|^$" + }, + "ComputeStatistics":{"type":"boolean"}, + "CreateBatchPredictionInput":{ + "type":"structure", + "required":[ + "BatchPredictionId", + "MLModelId", + "BatchPredictionDataSourceId", + "OutputUri" + ], + "members":{ + "BatchPredictionId":{"shape":"EntityId"}, + "BatchPredictionName":{"shape":"EntityName"}, + "MLModelId":{"shape":"EntityId"}, + "BatchPredictionDataSourceId":{"shape":"EntityId"}, + "OutputUri":{"shape":"S3Url"} + } + }, + "CreateBatchPredictionOutput":{ + "type":"structure", + "members":{ + "BatchPredictionId":{"shape":"EntityId"} + } + }, + "CreateDataSourceFromRDSInput":{ + "type":"structure", + "required":[ + "DataSourceId", + "RDSData", + "RoleARN" + ], + "members":{ + "DataSourceId":{"shape":"EntityId"}, + "DataSourceName":{"shape":"EntityName"}, + "RDSData":{"shape":"RDSDataSpec"}, + "RoleARN":{"shape":"RoleARN"}, + "ComputeStatistics":{"shape":"ComputeStatistics"} + } + }, + "CreateDataSourceFromRDSOutput":{ + "type":"structure", + "members":{ + "DataSourceId":{"shape":"EntityId"} + } + }, + "CreateDataSourceFromRedshiftInput":{ + "type":"structure", + "required":[ + "DataSourceId", + "DataSpec", + "RoleARN" + ], + "members":{ + "DataSourceId":{"shape":"EntityId"}, + "DataSourceName":{"shape":"EntityName"}, + "DataSpec":{"shape":"RedshiftDataSpec"}, + "RoleARN":{"shape":"RoleARN"}, + "ComputeStatistics":{"shape":"ComputeStatistics"} + } + }, + "CreateDataSourceFromRedshiftOutput":{ + "type":"structure", + "members":{ + "DataSourceId":{"shape":"EntityId"} + } + }, + "CreateDataSourceFromS3Input":{ + "type":"structure", + "required":[ + "DataSourceId", + "DataSpec" + ], + "members":{ + "DataSourceId":{"shape":"EntityId"}, + "DataSourceName":{"shape":"EntityName"}, + "DataSpec":{"shape":"S3DataSpec"}, + "ComputeStatistics":{"shape":"ComputeStatistics"} + } + }, + "CreateDataSourceFromS3Output":{ + "type":"structure", + "members":{ + "DataSourceId":{"shape":"EntityId"} + } + }, + "CreateEvaluationInput":{ + "type":"structure", + "required":[ + "EvaluationId", + "MLModelId", + "EvaluationDataSourceId" + ], + "members":{ + "EvaluationId":{"shape":"EntityId"}, + "EvaluationName":{"shape":"EntityName"}, + "MLModelId":{"shape":"EntityId"}, + "EvaluationDataSourceId":{"shape":"EntityId"} + } + }, + "CreateEvaluationOutput":{ + "type":"structure", + "members":{ + "EvaluationId":{"shape":"EntityId"} + } + }, + "CreateMLModelInput":{ + "type":"structure", + "required":[ + "MLModelId", + "MLModelType", + "TrainingDataSourceId" + ], + "members":{ + "MLModelId":{"shape":"EntityId"}, + "MLModelName":{"shape":"EntityName"}, + "MLModelType":{"shape":"MLModelType"}, + "Parameters":{"shape":"TrainingParameters"}, + "TrainingDataSourceId":{"shape":"EntityId"}, + "Recipe":{"shape":"Recipe"}, + "RecipeUri":{"shape":"S3Url"} + } + }, + "CreateMLModelOutput":{ + "type":"structure", + "members":{ + "MLModelId":{"shape":"EntityId"} + } + }, + "CreateRealtimeEndpointInput":{ + "type":"structure", + "required":["MLModelId"], + "members":{ + "MLModelId":{"shape":"EntityId"} + } + }, + "CreateRealtimeEndpointOutput":{ + "type":"structure", + "members":{ + "MLModelId":{"shape":"EntityId"}, + "RealtimeEndpointInfo":{"shape":"RealtimeEndpointInfo"} + } + }, + "DataRearrangement":{"type":"string"}, + "DataSchema":{ + "type":"string", + "max":131071 + }, + "DataSource":{ + "type":"structure", + "members":{ + "DataSourceId":{"shape":"EntityId"}, + "DataLocationS3":{"shape":"S3Url"}, + "DataRearrangement":{"shape":"DataRearrangement"}, + "CreatedByIamUser":{"shape":"AwsUserArn"}, + "CreatedAt":{"shape":"EpochTime"}, + "LastUpdatedAt":{"shape":"EpochTime"}, + "DataSizeInBytes":{"shape":"LongType"}, + "NumberOfFiles":{"shape":"LongType"}, + "Name":{"shape":"EntityName"}, + "Status":{"shape":"EntityStatus"}, + "Message":{"shape":"Message"}, + "RedshiftMetadata":{"shape":"RedshiftMetadata"}, + "RDSMetadata":{"shape":"RDSMetadata"}, + "RoleARN":{"shape":"RoleARN"}, + "ComputeStatistics":{"shape":"ComputeStatistics"} + } + }, + "DataSourceFilterVariable":{ + "type":"string", + "enum":[ + "CreatedAt", + "LastUpdatedAt", + "Status", + "Name", + "DataLocationS3", + "IAMUser" + ] + }, + "DataSources":{ + "type":"list", + "member":{"shape":"DataSource"} + }, + "DeleteBatchPredictionInput":{ + "type":"structure", + "required":["BatchPredictionId"], + "members":{ + "BatchPredictionId":{"shape":"EntityId"} + } + }, + "DeleteBatchPredictionOutput":{ + "type":"structure", + "members":{ + "BatchPredictionId":{"shape":"EntityId"} + } + }, + "DeleteDataSourceInput":{ + "type":"structure", + "required":["DataSourceId"], + "members":{ + "DataSourceId":{"shape":"EntityId"} + } + }, + "DeleteDataSourceOutput":{ + "type":"structure", + "members":{ + "DataSourceId":{"shape":"EntityId"} + } + }, + "DeleteEvaluationInput":{ + "type":"structure", + "required":["EvaluationId"], + "members":{ + "EvaluationId":{"shape":"EntityId"} + } + }, + "DeleteEvaluationOutput":{ + "type":"structure", + "members":{ + "EvaluationId":{"shape":"EntityId"} + } + }, + "DeleteMLModelInput":{ + "type":"structure", + "required":["MLModelId"], + "members":{ + "MLModelId":{"shape":"EntityId"} + } + }, + "DeleteMLModelOutput":{ + "type":"structure", + "members":{ + "MLModelId":{"shape":"EntityId"} + } + }, + "DeleteRealtimeEndpointInput":{ + "type":"structure", + "required":["MLModelId"], + "members":{ + "MLModelId":{"shape":"EntityId"} + } + }, + "DeleteRealtimeEndpointOutput":{ + "type":"structure", + "members":{ + "MLModelId":{"shape":"EntityId"}, + "RealtimeEndpointInfo":{"shape":"RealtimeEndpointInfo"} + } + }, + "DescribeBatchPredictionsInput":{ + "type":"structure", + "members":{ + "FilterVariable":{"shape":"BatchPredictionFilterVariable"}, + "EQ":{"shape":"ComparatorValue"}, + "GT":{"shape":"ComparatorValue"}, + "LT":{"shape":"ComparatorValue"}, + "GE":{"shape":"ComparatorValue"}, + "LE":{"shape":"ComparatorValue"}, + "NE":{"shape":"ComparatorValue"}, + "Prefix":{"shape":"ComparatorValue"}, + "SortOrder":{"shape":"SortOrder"}, + "NextToken":{"shape":"StringType"}, + "Limit":{"shape":"PageLimit"} + } + }, + "DescribeBatchPredictionsOutput":{ + "type":"structure", + "members":{ + "Results":{"shape":"BatchPredictions"}, + "NextToken":{"shape":"StringType"} + } + }, + "DescribeDataSourcesInput":{ + "type":"structure", + "members":{ + "FilterVariable":{"shape":"DataSourceFilterVariable"}, + "EQ":{"shape":"ComparatorValue"}, + "GT":{"shape":"ComparatorValue"}, + "LT":{"shape":"ComparatorValue"}, + "GE":{"shape":"ComparatorValue"}, + "LE":{"shape":"ComparatorValue"}, + "NE":{"shape":"ComparatorValue"}, + "Prefix":{"shape":"ComparatorValue"}, + "SortOrder":{"shape":"SortOrder"}, + "NextToken":{"shape":"StringType"}, + "Limit":{"shape":"PageLimit"} + } + }, + "DescribeDataSourcesOutput":{ + "type":"structure", + "members":{ + "Results":{"shape":"DataSources"}, + "NextToken":{"shape":"StringType"} + } + }, + "DescribeEvaluationsInput":{ + "type":"structure", + "members":{ + "FilterVariable":{"shape":"EvaluationFilterVariable"}, + "EQ":{"shape":"ComparatorValue"}, + "GT":{"shape":"ComparatorValue"}, + "LT":{"shape":"ComparatorValue"}, + "GE":{"shape":"ComparatorValue"}, + "LE":{"shape":"ComparatorValue"}, + "NE":{"shape":"ComparatorValue"}, + "Prefix":{"shape":"ComparatorValue"}, + "SortOrder":{"shape":"SortOrder"}, + "NextToken":{"shape":"StringType"}, + "Limit":{"shape":"PageLimit"} + } + }, + "DescribeEvaluationsOutput":{ + "type":"structure", + "members":{ + "Results":{"shape":"Evaluations"}, + "NextToken":{"shape":"StringType"} + } + }, + "DescribeMLModelsInput":{ + "type":"structure", + "members":{ + "FilterVariable":{"shape":"MLModelFilterVariable"}, + "EQ":{"shape":"ComparatorValue"}, + "GT":{"shape":"ComparatorValue"}, + "LT":{"shape":"ComparatorValue"}, + "GE":{"shape":"ComparatorValue"}, + "LE":{"shape":"ComparatorValue"}, + "NE":{"shape":"ComparatorValue"}, + "Prefix":{"shape":"ComparatorValue"}, + "SortOrder":{"shape":"SortOrder"}, + "NextToken":{"shape":"StringType"}, + "Limit":{"shape":"PageLimit"} + } + }, + "DescribeMLModelsOutput":{ + "type":"structure", + "members":{ + "Results":{"shape":"MLModels"}, + "NextToken":{"shape":"StringType"} + } + }, + "DetailsAttributes":{ + "type":"string", + "enum":[ + "PredictiveModelType", + "Algorithm" + ] + }, + "DetailsMap":{ + "type":"map", + "key":{"shape":"DetailsAttributes"}, + "value":{"shape":"DetailsValue"} + }, + "DetailsValue":{ + "type":"string", + "min":1 + }, + "EDPPipelineId":{ + "type":"string", + "min":1, + "max":1024 + }, + "EDPResourceRole":{ + "type":"string", + "min":1, + "max":64 + }, + "EDPSecurityGroupId":{ + "type":"string", + "min":1, + "max":255 + }, + "EDPSecurityGroupIds":{ + "type":"list", + "member":{"shape":"EDPSecurityGroupId"} + }, + "EDPServiceRole":{ + "type":"string", + "min":1, + "max":64 + }, + "EDPSubnetId":{ + "type":"string", + "min":1, + "max":255 + }, + "EntityId":{ + "type":"string", + "min":1, + "max":64, + "pattern":"[a-zA-Z0-9_.-]+" + }, + "EntityName":{ + "type":"string", + "max":1024, + "pattern":".*\\S.*|^$" + }, + "EntityStatus":{ + "type":"string", + "enum":[ + "PENDING", + "INPROGRESS", + "FAILED", + "COMPLETED", + "DELETED" + ] + }, + "EpochTime":{"type":"timestamp"}, + "ErrorCode":{"type":"integer"}, + "ErrorMessage":{ + "type":"string", + "max":2048 + }, + "Evaluation":{ + "type":"structure", + "members":{ + "EvaluationId":{"shape":"EntityId"}, + "MLModelId":{"shape":"EntityId"}, + "EvaluationDataSourceId":{"shape":"EntityId"}, + "InputDataLocationS3":{"shape":"S3Url"}, + "CreatedByIamUser":{"shape":"AwsUserArn"}, + "CreatedAt":{"shape":"EpochTime"}, + "LastUpdatedAt":{"shape":"EpochTime"}, + "Name":{"shape":"EntityName"}, + "Status":{"shape":"EntityStatus"}, + "PerformanceMetrics":{"shape":"PerformanceMetrics"}, + "Message":{"shape":"Message"} + } + }, + "EvaluationFilterVariable":{ + "type":"string", + "enum":[ + "CreatedAt", + "LastUpdatedAt", + "Status", + "Name", + "IAMUser", + "MLModelId", + "DataSourceId", + "DataURI" + ] + }, + "Evaluations":{ + "type":"list", + "member":{"shape":"Evaluation"} + }, + "GetBatchPredictionInput":{ + "type":"structure", + "required":["BatchPredictionId"], + "members":{ + "BatchPredictionId":{"shape":"EntityId"} + } + }, + "GetBatchPredictionOutput":{ + "type":"structure", + "members":{ + "BatchPredictionId":{"shape":"EntityId"}, + "MLModelId":{"shape":"EntityId"}, + "BatchPredictionDataSourceId":{"shape":"EntityId"}, + "InputDataLocationS3":{"shape":"S3Url"}, + "CreatedByIamUser":{"shape":"AwsUserArn"}, + "CreatedAt":{"shape":"EpochTime"}, + "LastUpdatedAt":{"shape":"EpochTime"}, + "Name":{"shape":"EntityName"}, + "Status":{"shape":"EntityStatus"}, + "OutputUri":{"shape":"S3Url"}, + "LogUri":{"shape":"PresignedS3Url"}, + "Message":{"shape":"Message"} + } + }, + "GetDataSourceInput":{ + "type":"structure", + "required":["DataSourceId"], + "members":{ + "DataSourceId":{"shape":"EntityId"}, + "Verbose":{"shape":"Verbose"} + } + }, + "GetDataSourceOutput":{ + "type":"structure", + "members":{ + "DataSourceId":{"shape":"EntityId"}, + "DataLocationS3":{"shape":"S3Url"}, + "DataRearrangement":{"shape":"DataRearrangement"}, + "CreatedByIamUser":{"shape":"AwsUserArn"}, + "CreatedAt":{"shape":"EpochTime"}, + "LastUpdatedAt":{"shape":"EpochTime"}, + "DataSizeInBytes":{"shape":"LongType"}, + "NumberOfFiles":{"shape":"LongType"}, + "Name":{"shape":"EntityName"}, + "Status":{"shape":"EntityStatus"}, + "LogUri":{"shape":"PresignedS3Url"}, + "Message":{"shape":"Message"}, + "RedshiftMetadata":{"shape":"RedshiftMetadata"}, + "RDSMetadata":{"shape":"RDSMetadata"}, + "RoleARN":{"shape":"RoleARN"}, + "ComputeStatistics":{"shape":"ComputeStatistics"}, + "DataSourceSchema":{"shape":"DataSchema"} + } + }, + "GetEvaluationInput":{ + "type":"structure", + "required":["EvaluationId"], + "members":{ + "EvaluationId":{"shape":"EntityId"} + } + }, + "GetEvaluationOutput":{ + "type":"structure", + "members":{ + "EvaluationId":{"shape":"EntityId"}, + "MLModelId":{"shape":"EntityId"}, + "EvaluationDataSourceId":{"shape":"EntityId"}, + "InputDataLocationS3":{"shape":"S3Url"}, + "CreatedByIamUser":{"shape":"AwsUserArn"}, + "CreatedAt":{"shape":"EpochTime"}, + "LastUpdatedAt":{"shape":"EpochTime"}, + "Name":{"shape":"EntityName"}, + "Status":{"shape":"EntityStatus"}, + "PerformanceMetrics":{"shape":"PerformanceMetrics"}, + "LogUri":{"shape":"PresignedS3Url"}, + "Message":{"shape":"Message"} + } + }, + "GetMLModelInput":{ + "type":"structure", + "required":["MLModelId"], + "members":{ + "MLModelId":{"shape":"EntityId"}, + "Verbose":{"shape":"Verbose"} + } + }, + "GetMLModelOutput":{ + "type":"structure", + "members":{ + "MLModelId":{"shape":"EntityId"}, + "TrainingDataSourceId":{"shape":"EntityId"}, + "CreatedByIamUser":{"shape":"AwsUserArn"}, + "CreatedAt":{"shape":"EpochTime"}, + "LastUpdatedAt":{"shape":"EpochTime"}, + "Name":{"shape":"MLModelName"}, + "Status":{"shape":"EntityStatus"}, + "SizeInBytes":{"shape":"LongType"}, + "EndpointInfo":{"shape":"RealtimeEndpointInfo"}, + "TrainingParameters":{"shape":"TrainingParameters"}, + "InputDataLocationS3":{"shape":"S3Url"}, + "MLModelType":{"shape":"MLModelType"}, + "ScoreThreshold":{"shape":"ScoreThreshold"}, + "ScoreThresholdLastUpdatedAt":{"shape":"EpochTime"}, + "LogUri":{"shape":"PresignedS3Url"}, + "Message":{"shape":"Message"}, + "Recipe":{"shape":"Recipe"}, + "Schema":{"shape":"DataSchema"} + } + }, + "IdempotentParameterMismatchException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"}, + "code":{"shape":"ErrorCode"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "IntegerType":{"type":"integer"}, + "InternalServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"}, + "code":{"shape":"ErrorCode"} + }, + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "InvalidInputException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"}, + "code":{"shape":"ErrorCode"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "Label":{ + "type":"string", + "min":1 + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"}, + "code":{"shape":"ErrorCode"} + }, + "error":{"httpStatusCode":417}, + "exception":true + }, + "LongType":{"type":"long"}, + "MLModel":{ + "type":"structure", + "members":{ + "MLModelId":{"shape":"EntityId"}, + "TrainingDataSourceId":{"shape":"EntityId"}, + "CreatedByIamUser":{"shape":"AwsUserArn"}, + "CreatedAt":{"shape":"EpochTime"}, + "LastUpdatedAt":{"shape":"EpochTime"}, + "Name":{"shape":"MLModelName"}, + "Status":{"shape":"EntityStatus"}, + "SizeInBytes":{"shape":"LongType"}, + "EndpointInfo":{"shape":"RealtimeEndpointInfo"}, + "TrainingParameters":{"shape":"TrainingParameters"}, + "InputDataLocationS3":{"shape":"S3Url"}, + "Algorithm":{"shape":"Algorithm"}, + "MLModelType":{"shape":"MLModelType"}, + "ScoreThreshold":{"shape":"ScoreThreshold"}, + "ScoreThresholdLastUpdatedAt":{"shape":"EpochTime"}, + "Message":{"shape":"Message"} + } + }, + "MLModelFilterVariable":{ + "type":"string", + "enum":[ + "CreatedAt", + "LastUpdatedAt", + "Status", + "Name", + "IAMUser", + "TrainingDataSourceId", + "RealtimeEndpointStatus", + "MLModelType", + "Algorithm", + "TrainingDataURI" + ] + }, + "MLModelName":{ + "type":"string", + "max":1024 + }, + "MLModelType":{ + "type":"string", + "enum":[ + "REGRESSION", + "BINARY", + "MULTICLASS" + ] + }, + "MLModels":{ + "type":"list", + "member":{"shape":"MLModel"} + }, + "Message":{ + "type":"string", + "max":10240 + }, + "PageLimit":{ + "type":"integer", + "min":1, + "max":100 + }, + "PerformanceMetrics":{ + "type":"structure", + "members":{ + "Properties":{"shape":"PerformanceMetricsProperties"} + } + }, + "PerformanceMetricsProperties":{ + "type":"map", + "key":{"shape":"PerformanceMetricsPropertyKey"}, + "value":{"shape":"PerformanceMetricsPropertyValue"} + }, + "PerformanceMetricsPropertyKey":{"type":"string"}, + "PerformanceMetricsPropertyValue":{"type":"string"}, + "PredictInput":{ + "type":"structure", + "required":[ + "MLModelId", + "Record", + "PredictEndpoint" + ], + "members":{ + "MLModelId":{"shape":"EntityId"}, + "Record":{"shape":"Record"}, + "PredictEndpoint":{"shape":"VipURL"} + } + }, + "PredictOutput":{ + "type":"structure", + "members":{ + "Prediction":{"shape":"Prediction"} + } + }, + "Prediction":{ + "type":"structure", + "members":{ + "predictedLabel":{"shape":"Label"}, + "predictedValue":{"shape":"floatLabel"}, + "predictedScores":{"shape":"ScoreValuePerLabelMap"}, + "details":{"shape":"DetailsMap"} + } + }, + "PredictorNotMountedException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "PresignedS3Url":{"type":"string"}, + "RDSDataSpec":{ + "type":"structure", + "required":[ + "DatabaseInformation", + "SelectSqlQuery", + "DatabaseCredentials", + "S3StagingLocation", + "ResourceRole", + "ServiceRole", + "SubnetId", + "SecurityGroupIds" + ], + "members":{ + "DatabaseInformation":{"shape":"RDSDatabase"}, + "SelectSqlQuery":{"shape":"RDSSelectSqlQuery"}, + "DatabaseCredentials":{"shape":"RDSDatabaseCredentials"}, + "S3StagingLocation":{"shape":"S3Url"}, + "DataRearrangement":{"shape":"DataRearrangement"}, + "DataSchema":{"shape":"DataSchema"}, + "DataSchemaUri":{"shape":"S3Url"}, + "ResourceRole":{"shape":"EDPResourceRole"}, + "ServiceRole":{"shape":"EDPServiceRole"}, + "SubnetId":{"shape":"EDPSubnetId"}, + "SecurityGroupIds":{"shape":"EDPSecurityGroupIds"} + } + }, + "RDSDatabase":{ + "type":"structure", + "required":[ + "InstanceIdentifier", + "DatabaseName" + ], + "members":{ + "InstanceIdentifier":{"shape":"RDSInstanceIdentifier"}, + "DatabaseName":{"shape":"RDSDatabaseName"} + } + }, + "RDSDatabaseCredentials":{ + "type":"structure", + "required":[ + "Username", + "Password" + ], + "members":{ + "Username":{"shape":"RDSDatabaseUsername"}, + "Password":{"shape":"RDSDatabasePassword"} + } + }, + "RDSDatabaseName":{ + "type":"string", + "min":1, + "max":64 + }, + "RDSDatabasePassword":{ + "type":"string", + "min":8, + "max":128 + }, + "RDSDatabaseUsername":{ + "type":"string", + "min":1, + "max":128 + }, + "RDSInstanceIdentifier":{ + "type":"string", + "min":1, + "max":63, + "pattern":"[a-z0-9-]+" + }, + "RDSMetadata":{ + "type":"structure", + "members":{ + "Database":{"shape":"RDSDatabase"}, + "DatabaseUserName":{"shape":"RDSDatabaseUsername"}, + "SelectSqlQuery":{"shape":"RDSSelectSqlQuery"}, + "ResourceRole":{"shape":"EDPResourceRole"}, + "ServiceRole":{"shape":"EDPServiceRole"}, + "DataPipelineId":{"shape":"EDPPipelineId"} + } + }, + "RDSSelectSqlQuery":{ + "type":"string", + "min":1, + "max":16777216 + }, + "RealtimeEndpointInfo":{ + "type":"structure", + "members":{ + "PeakRequestsPerSecond":{"shape":"IntegerType"}, + "CreatedAt":{"shape":"EpochTime"}, + "EndpointUrl":{"shape":"VipURL"}, + "EndpointStatus":{"shape":"RealtimeEndpointStatus"} + } + }, + "RealtimeEndpointStatus":{ + "type":"string", + "enum":[ + "NONE", + "READY", + "UPDATING", + "FAILED" + ] + }, + "Recipe":{ + "type":"string", + "max":131071 + }, + "Record":{ + "type":"map", + "key":{"shape":"VariableName"}, + "value":{"shape":"VariableValue"} + }, + "RedshiftClusterIdentifier":{ + "type":"string", + "min":1, + "max":63, + "pattern":"[a-z0-9-]+" + }, + "RedshiftDataSpec":{ + "type":"structure", + "required":[ + "DatabaseInformation", + "SelectSqlQuery", + "DatabaseCredentials", + "S3StagingLocation" + ], + "members":{ + "DatabaseInformation":{"shape":"RedshiftDatabase"}, + "SelectSqlQuery":{"shape":"RedshiftSelectSqlQuery"}, + "DatabaseCredentials":{"shape":"RedshiftDatabaseCredentials"}, + "S3StagingLocation":{"shape":"S3Url"}, + "DataRearrangement":{"shape":"DataRearrangement"}, + "DataSchema":{"shape":"DataSchema"}, + "DataSchemaUri":{"shape":"S3Url"} + } + }, + "RedshiftDatabase":{ + "type":"structure", + "required":[ + "DatabaseName", + "ClusterIdentifier" + ], + "members":{ + "DatabaseName":{"shape":"RedshiftDatabaseName"}, + "ClusterIdentifier":{"shape":"RedshiftClusterIdentifier"} + } + }, + "RedshiftDatabaseCredentials":{ + "type":"structure", + "required":[ + "Username", + "Password" + ], + "members":{ + "Username":{"shape":"RedshiftDatabaseUsername"}, + "Password":{"shape":"RedshiftDatabasePassword"} + } + }, + "RedshiftDatabaseName":{ + "type":"string", + "min":1, + "max":64, + "pattern":"[a-z0-9]+" + }, + "RedshiftDatabasePassword":{ + "type":"string", + "min":8, + "max":64 + }, + "RedshiftDatabaseUsername":{ + "type":"string", + "min":1, + "max":128 + }, + "RedshiftMetadata":{ + "type":"structure", + "members":{ + "RedshiftDatabase":{"shape":"RedshiftDatabase"}, + "DatabaseUserName":{"shape":"RedshiftDatabaseUsername"}, + "SelectSqlQuery":{"shape":"RedshiftSelectSqlQuery"} + } + }, + "RedshiftSelectSqlQuery":{ + "type":"string", + "min":1, + "max":16777216 + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"}, + "code":{"shape":"ErrorCode"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "RoleARN":{ + "type":"string", + "min":1, + "max":100 + }, + "S3DataSpec":{ + "type":"structure", + "required":["DataLocationS3"], + "members":{ + "DataLocationS3":{"shape":"S3Url"}, + "DataRearrangement":{"shape":"DataRearrangement"}, + "DataSchema":{"shape":"DataSchema"}, + "DataSchemaLocationS3":{"shape":"S3Url"} + } + }, + "S3Url":{ + "type":"string", + "max":2048, + "pattern":"s3://([^/]+)(/.*)?" + }, + "ScoreThreshold":{"type":"float"}, + "ScoreValue":{"type":"float"}, + "ScoreValuePerLabelMap":{ + "type":"map", + "key":{"shape":"Label"}, + "value":{"shape":"ScoreValue"} + }, + "SortOrder":{ + "type":"string", + "enum":[ + "asc", + "dsc" + ] + }, + "StringType":{"type":"string"}, + "TrainingParameters":{ + "type":"map", + "key":{"shape":"StringType"}, + "value":{"shape":"StringType"} + }, + "UpdateBatchPredictionInput":{ + "type":"structure", + "required":[ + "BatchPredictionId", + "BatchPredictionName" + ], + "members":{ + "BatchPredictionId":{"shape":"EntityId"}, + "BatchPredictionName":{"shape":"EntityName"} + } + }, + "UpdateBatchPredictionOutput":{ + "type":"structure", + "members":{ + "BatchPredictionId":{"shape":"EntityId"} + } + }, + "UpdateDataSourceInput":{ + "type":"structure", + "required":[ + "DataSourceId", + "DataSourceName" + ], + "members":{ + "DataSourceId":{"shape":"EntityId"}, + "DataSourceName":{"shape":"EntityName"} + } + }, + "UpdateDataSourceOutput":{ + "type":"structure", + "members":{ + "DataSourceId":{"shape":"EntityId"} + } + }, + "UpdateEvaluationInput":{ + "type":"structure", + "required":[ + "EvaluationId", + "EvaluationName" + ], + "members":{ + "EvaluationId":{"shape":"EntityId"}, + "EvaluationName":{"shape":"EntityName"} + } + }, + "UpdateEvaluationOutput":{ + "type":"structure", + "members":{ + "EvaluationId":{"shape":"EntityId"} + } + }, + "UpdateMLModelInput":{ + "type":"structure", + "required":["MLModelId"], + "members":{ + "MLModelId":{"shape":"EntityId"}, + "MLModelName":{"shape":"EntityName"}, + "ScoreThreshold":{"shape":"ScoreThreshold"} + } + }, + "UpdateMLModelOutput":{ + "type":"structure", + "members":{ + "MLModelId":{"shape":"EntityId"} + } + }, + "VariableName":{"type":"string"}, + "VariableValue":{"type":"string"}, + "Verbose":{"type":"boolean"}, + "VipURL":{ + "type":"string", + "max":2048, + "pattern":"https://[a-zA-Z0-9-.]*\\.amazon(aws)?\\.com[/]?" + }, + "floatLabel":{"type":"float"} + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/machinelearning/2014-12-12/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/machinelearning/2014-12-12/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/machinelearning/2014-12-12/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/machinelearning/2014-12-12/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1034 @@ +{ + "version": "2.0", + "operations": { + "CreateBatchPrediction": "

    Generates predictions for a group of observations. The observations to process exist in one or more data files referenced by a DataSource. This operation creates a new BatchPrediction, and uses an MLModel and the data files referenced by the DataSource as information sources.

    CreateBatchPrediction is an asynchronous operation. In response to CreateBatchPrediction, Amazon Machine Learning (Amazon ML) immediately returns and sets the BatchPrediction status to PENDING. After the BatchPrediction completes, Amazon ML sets the status to COMPLETED.

    You can poll for status updates by using the GetBatchPrediction operation and checking the Status parameter of the result. After the COMPLETED status appears, the results are available in the location specified by the OutputUri parameter.

    ", + "CreateDataSourceFromRDS": "

    Creates a DataSource object from an Amazon Relational Database Service (Amazon RDS). A DataSource references data that can be used to perform CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations.

    CreateDataSourceFromRDS is an asynchronous operation. In response to CreateDataSourceFromRDS, Amazon Machine Learning (Amazon ML) immediately returns and sets the DataSource status to PENDING. After the DataSource is created and ready for use, Amazon ML sets the Status parameter to COMPLETED. DataSource in COMPLETED or PENDING status can only be used to perform CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations.

    If Amazon ML cannot accept the input source, it sets the Status parameter to FAILED and includes an error message in the Message attribute of the GetDataSource operation response.

    ", + "CreateDataSourceFromRedshift": "

    Creates a DataSource from Amazon Redshift. A DataSource references data that can be used to perform either CreateMLModel, CreateEvaluation or CreateBatchPrediction operations.

    CreateDataSourceFromRedshift is an asynchronous operation. In response to CreateDataSourceFromRedshift, Amazon Machine Learning (Amazon ML) immediately returns and sets the DataSource status to PENDING. After the DataSource is created and ready for use, Amazon ML sets the Status parameter to COMPLETED. DataSource in COMPLETED or PENDING status can only be used to perform CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations.

    If Amazon ML cannot accept the input source, it sets the Status parameter to FAILED and includes an error message in the Message attribute of the GetDataSource operation response.

    The observations should exist in the database hosted on an Amazon Redshift cluster and should be specified by a SelectSqlQuery. Amazon ML executes Unload command in Amazon Redshift to transfer the result set of SelectSqlQuery to S3StagingLocation.

    After the DataSource is created, it's ready for use in evaluations and batch predictions. If you plan to use the DataSource to train an MLModel, the DataSource requires another item -- a recipe. A recipe describes the observation variables that participate in training an MLModel. A recipe describes how each input variable will be used in training. Will the variable be included or excluded from training? Will the variable be manipulated, for example, combined with another variable or split apart into word combinations? The recipe provides answers to these questions. For more information, see the Amazon Machine Learning Developer Guide.

    ", + "CreateDataSourceFromS3": "

    Creates a DataSource object. A DataSource references data that can be used to perform CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations.

    CreateDataSourceFromS3 is an asynchronous operation. In response to CreateDataSourceFromS3, Amazon Machine Learning (Amazon ML) immediately returns and sets the DataSource status to PENDING. After the DataSource is created and ready for use, Amazon ML sets the Status parameter to COMPLETED. DataSource in COMPLETED or PENDING status can only be used to perform CreateMLModel, CreateEvaluation or CreateBatchPrediction operations.

    If Amazon ML cannot accept the input source, it sets the Status parameter to FAILED and includes an error message in the Message attribute of the GetDataSource operation response.

    The observation data used in a DataSource should be ready to use; that is, it should have a consistent structure, and missing data values should be kept to a minimum. The observation data must reside in one or more CSV files in an Amazon Simple Storage Service (Amazon S3) bucket, along with a schema that describes the data items by name and type. The same schema must be used for all of the data files referenced by the DataSource.

    After the DataSource has been created, it's ready to use in evaluations and batch predictions. If you plan to use the DataSource to train an MLModel, the DataSource requires another item: a recipe. A recipe describes the observation variables that participate in training an MLModel. A recipe describes how each input variable will be used in training. Will the variable be included or excluded from training? Will the variable be manipulated, for example, combined with another variable, or split apart into word combinations? The recipe provides answers to these questions. For more information, see the Amazon Machine Learning Developer Guide.

    ", + "CreateEvaluation": "

    Creates a new Evaluation of an MLModel. An MLModel is evaluated on a set of observations associated to a DataSource. Like a DataSource for an MLModel, the DataSource for an Evaluation contains values for the Target Variable. The Evaluation compares the predicted result for each observation to the actual outcome and provides a summary so that you know how effective the MLModel functions on the test data. Evaluation generates a relevant performance metric such as BinaryAUC, RegressionRMSE or MulticlassAvgFScore based on the corresponding MLModelType: BINARY, REGRESSION or MULTICLASS.

    CreateEvaluation is an asynchronous operation. In response to CreateEvaluation, Amazon Machine Learning (Amazon ML) immediately returns and sets the evaluation status to PENDING. After the Evaluation is created and ready for use, Amazon ML sets the status to COMPLETED.

    You can use the GetEvaluation operation to check progress of the evaluation during the creation operation.

    ", + "CreateMLModel": "

    Creates a new MLModel using the data files and the recipe as information sources.

    An MLModel is nearly immutable. Users can only update the MLModelName and the ScoreThreshold in an MLModel without creating a new MLModel.

    CreateMLModel is an asynchronous operation. In response to CreateMLModel, Amazon Machine Learning (Amazon ML) immediately returns and sets the MLModel status to PENDING. After the MLModel is created and ready for use, Amazon ML sets the status to COMPLETED.

    You can use the GetMLModel operation to check progress of the MLModel during the creation operation.

    CreateMLModel requires a DataSource with computed statistics, which can be created by setting ComputeStatistics to true in CreateDataSourceFromRDS, CreateDataSourceFromS3, or CreateDataSourceFromRedshift operations.

    ", + "CreateRealtimeEndpoint": "

    Creates a real-time endpoint for the MLModel. The endpoint contains the URI of the MLModel; that is, the location to send real-time prediction requests for the specified MLModel.

    ", + "DeleteBatchPrediction": "

    Assigns the DELETED status to a BatchPrediction, rendering it unusable.

    After using the DeleteBatchPrediction operation, you can use the GetBatchPrediction operation to verify that the status of the BatchPrediction changed to DELETED.

    Caution: The result of the DeleteBatchPrediction operation is irreversible.

    ", + "DeleteDataSource": "

    Assigns the DELETED status to a DataSource, rendering it unusable.

    After using the DeleteDataSource operation, you can use the GetDataSource operation to verify that the status of the DataSource changed to DELETED.

    Caution: The results of the DeleteDataSource operation are irreversible.

    ", + "DeleteEvaluation": "

    Assigns the DELETED status to an Evaluation, rendering it unusable.

    After invoking the DeleteEvaluation operation, you can use the GetEvaluation operation to verify that the status of the Evaluation changed to DELETED.

    Caution: The results of the DeleteEvaluation operation are irreversible.

    ", + "DeleteMLModel": "

    Assigns the DELETED status to an MLModel, rendering it unusable.

    After using the DeleteMLModel operation, you can use the GetMLModel operation to verify that the status of the MLModel changed to DELETED.

    Caution: The result of the DeleteMLModel operation is irreversible.

    ", + "DeleteRealtimeEndpoint": "

    Deletes a real time endpoint of an MLModel.

    ", + "DescribeBatchPredictions": "

    Returns a list of BatchPrediction operations that match the search criteria in the request.

    ", + "DescribeDataSources": "

    Returns a list of DataSource that match the search criteria in the request.

    ", + "DescribeEvaluations": "

    Returns a list of DescribeEvaluations that match the search criteria in the request.

    ", + "DescribeMLModels": "

    Returns a list of MLModel that match the search criteria in the request.

    ", + "GetBatchPrediction": "

    Returns a BatchPrediction that includes detailed metadata, status, and data file information for a Batch Prediction request.

    ", + "GetDataSource": "

    Returns a DataSource that includes metadata and data file information, as well as the current status of the DataSource.

    GetDataSource provides results in normal or verbose format. The verbose format adds the schema description and the list of files pointed to by the DataSource to the normal format.

    ", + "GetEvaluation": "

    Returns an Evaluation that includes metadata as well as the current status of the Evaluation.

    ", + "GetMLModel": "

    Returns an MLModel that includes detailed metadata, and data source information as well as the current status of the MLModel.

    GetMLModel provides results in normal or verbose format.

    ", + "Predict": "

    Generates a prediction for the observation using the specified ML Model.

    Note

    Not all response parameters will be populated. Whether a response parameter is populated depends on the type of model requested.

    ", + "UpdateBatchPrediction": "

    Updates the BatchPredictionName of a BatchPrediction.

    You can use the GetBatchPrediction operation to view the contents of the updated data element.

    ", + "UpdateDataSource": "

    Updates the DataSourceName of a DataSource.

    You can use the GetDataSource operation to view the contents of the updated data element.

    ", + "UpdateEvaluation": "

    Updates the EvaluationName of an Evaluation.

    You can use the GetEvaluation operation to view the contents of the updated data element.

    ", + "UpdateMLModel": "

    Updates the MLModelName and the ScoreThreshold of an MLModel.

    You can use the GetMLModel operation to view the contents of the updated data element.

    " + }, + "service": "Definition of the public APIs exposed by Amazon Machine Learning", + "shapes": { + "Algorithm": { + "base": "

    The function used to train a MLModel. Training choices supported by Amazon ML include the following:

    • SGD - Stochastic Gradient Descent.
    • RandomForest - Random forest of decision trees.
    ", + "refs": { + "MLModel$Algorithm": "

    The algorithm used to train the MLModel. The following algorithm is supported:

    • SGD -- Stochastic gradient descent. The goal of SGD is to minimize the gradient of the loss function.
    " + } + }, + "AwsUserArn": { + "base": "

    An Amazon Web Service (AWS) user account identifier. The account identifier can be an AWS root account or an AWS Identity and Access Management (IAM) user.

    ", + "refs": { + "BatchPrediction$CreatedByIamUser": "

    The AWS user account that invoked the BatchPrediction. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.

    ", + "DataSource$CreatedByIamUser": "

    The AWS user account from which the DataSource was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.

    ", + "Evaluation$CreatedByIamUser": "

    The AWS user account that invoked the evaluation. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.

    ", + "GetBatchPredictionOutput$CreatedByIamUser": "

    The AWS user account that invoked the BatchPrediction. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.

    ", + "GetDataSourceOutput$CreatedByIamUser": "

    The AWS user account from which the DataSource was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.

    ", + "GetEvaluationOutput$CreatedByIamUser": "

    The AWS user account that invoked the evaluation. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.

    ", + "GetMLModelOutput$CreatedByIamUser": "

    The AWS user account from which the MLModel was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.

    ", + "MLModel$CreatedByIamUser": "

    The AWS user account from which the MLModel was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.

    " + } + }, + "BatchPrediction": { + "base": "

    Represents the output of GetBatchPrediction operation.

    The content consists of the detailed metadata, the status, and the data file information of a Batch Prediction.

    ", + "refs": { + "BatchPredictions$member": null + } + }, + "BatchPredictionFilterVariable": { + "base": "

    A list of the variables to use in searching or filtering BatchPrediction.

    • CreatedAt - Sets the search criteria to BatchPrediction creation date.
    • Status - Sets the search criteria to BatchPrediction status.
    • Name - Sets the search criteria to the contents of BatchPrediction Name.
    • IAMUser - Sets the search criteria to the user account that invoked the BatchPrediction creation.
    • MLModelId - Sets the search criteria to the MLModel used in the BatchPrediction.
    • DataSourceId - Sets the search criteria to the DataSource used in the BatchPrediction.
    • DataURI - Sets the search criteria to the data file(s) used in the BatchPrediction. The URL can identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory.
    ", + "refs": { + "DescribeBatchPredictionsInput$FilterVariable": "

    Use one of the following variables to filter a list of BatchPrediction:

    • CreatedAt - Sets the search criteria to the BatchPrediction creation date.
    • Status - Sets the search criteria to the BatchPrediction status.
    • Name - Sets the search criteria to the contents of the BatchPrediction Name.
    • IAMUser - Sets the search criteria to the user account that invoked the BatchPrediction creation.
    • MLModelId - Sets the search criteria to the MLModel used in the BatchPrediction.
    • DataSourceId - Sets the search criteria to the DataSource used in the BatchPrediction.
    • DataURI - Sets the search criteria to the data file(s) used in the BatchPrediction. The URL can identify either a file or an Amazon Simple Storage Solution (Amazon S3) bucket or directory.
    " + } + }, + "BatchPredictions": { + "base": null, + "refs": { + "DescribeBatchPredictionsOutput$Results": "

    A list of BatchPrediction objects that meet the search criteria.

    " + } + }, + "ComparatorValue": { + "base": "

    The value specified in a filtering condition. The ComparatorValue becomes the reference value when matching or evaluating data values in filtering and searching functions.

    ", + "refs": { + "DescribeBatchPredictionsInput$EQ": "

    The equal to operator. The BatchPrediction results will have FilterVariable values that exactly match the value specified with EQ.

    ", + "DescribeBatchPredictionsInput$GT": "

    The greater than operator. The BatchPrediction results will have FilterVariable values that are greater than the value specified with GT.

    ", + "DescribeBatchPredictionsInput$LT": "

    The less than operator. The BatchPrediction results will have FilterVariable values that are less than the value specified with LT.

    ", + "DescribeBatchPredictionsInput$GE": "

    The greater than or equal to operator. The BatchPrediction results will have FilterVariable values that are greater than or equal to the value specified with GE.

    ", + "DescribeBatchPredictionsInput$LE": "

    The less than or equal to operator. The BatchPrediction results will have FilterVariable values that are less than or equal to the value specified with LE.

    ", + "DescribeBatchPredictionsInput$NE": "

    The not equal to operator. The BatchPrediction results will have FilterVariable values not equal to the value specified with NE.

    ", + "DescribeBatchPredictionsInput$Prefix": "

    A string that is found at the beginning of a variable, such as Name or Id.

    For example, a Batch Prediction operation could have the Name 2014-09-09-HolidayGiftMailer. To search for this BatchPrediction, select Name for the FilterVariable and any of the following strings for the Prefix:

    • 2014-09

    • 2014-09-09

    • 2014-09-09-Holiday

    ", + "DescribeDataSourcesInput$EQ": "

    The equal to operator. The DataSource results will have FilterVariable values that exactly match the value specified with EQ.

    ", + "DescribeDataSourcesInput$GT": "

    The greater than operator. The DataSource results will have FilterVariable values that are greater than the value specified with GT.

    ", + "DescribeDataSourcesInput$LT": "

    The less than operator. The DataSource results will have FilterVariable values that are less than the value specified with LT.

    ", + "DescribeDataSourcesInput$GE": "

    The greater than or equal to operator. The DataSource results will have FilterVariable values that are greater than or equal to the value specified with GE.

    ", + "DescribeDataSourcesInput$LE": "

    The less than or equal to operator. The DataSource results will have FilterVariable values that are less than or equal to the value specified with LE.

    ", + "DescribeDataSourcesInput$NE": "

    The not equal to operator. The DataSource results will have FilterVariable values not equal to the value specified with NE.

    ", + "DescribeDataSourcesInput$Prefix": "

    A string that is found at the beginning of a variable, such as Name or Id.

    For example, a DataSource could have the Name 2014-09-09-HolidayGiftMailer. To search for this DataSource, select Name for the FilterVariable and any of the following strings for the Prefix:

    • 2014-09

    • 2014-09-09

    • 2014-09-09-Holiday

    ", + "DescribeEvaluationsInput$EQ": "

    The equal to operator. The Evaluation results will have FilterVariable values that exactly match the value specified with EQ.

    ", + "DescribeEvaluationsInput$GT": "

    The greater than operator. The Evaluation results will have FilterVariable values that are greater than the value specified with GT.

    ", + "DescribeEvaluationsInput$LT": "

    The less than operator. The Evaluation results will have FilterVariable values that are less than the value specified with LT.

    ", + "DescribeEvaluationsInput$GE": "

    The greater than or equal to operator. The Evaluation results will have FilterVariable values that are greater than or equal to the value specified with GE.

    ", + "DescribeEvaluationsInput$LE": "

    The less than or equal to operator. The Evaluation results will have FilterVariable values that are less than or equal to the value specified with LE.

    ", + "DescribeEvaluationsInput$NE": "

    The not equal to operator. The Evaluation results will have FilterVariable values not equal to the value specified with NE.

    ", + "DescribeEvaluationsInput$Prefix": "

    A string that is found at the beginning of a variable, such as Name or Id.

    For example, an Evaluation could have the Name 2014-09-09-HolidayGiftMailer. To search for this Evaluation, select Name for the FilterVariable and any of the following strings for the Prefix:

    • 2014-09

    • 2014-09-09

    • 2014-09-09-Holiday

    ", + "DescribeMLModelsInput$EQ": "

    The equal to operator. The MLModel results will have FilterVariable values that exactly match the value specified with EQ.

    ", + "DescribeMLModelsInput$GT": "

    The greater than operator. The MLModel results will have FilterVariable values that are greater than the value specified with GT.

    ", + "DescribeMLModelsInput$LT": "

    The less than operator. The MLModel results will have FilterVariable values that are less than the value specified with LT.

    ", + "DescribeMLModelsInput$GE": "

    The greater than or equal to operator. The MLModel results will have FilterVariable values that are greater than or equal to the value specified with GE.

    ", + "DescribeMLModelsInput$LE": "

    The less than or equal to operator. The MLModel results will have FilterVariable values that are less than or equal to the value specified with LE.

    ", + "DescribeMLModelsInput$NE": "

    The not equal to operator. The MLModel results will have FilterVariable values not equal to the value specified with NE.

    ", + "DescribeMLModelsInput$Prefix": "

    A string that is found at the beginning of a variable, such as Name or Id.

    For example, an MLModel could have the Name 2014-09-09-HolidayGiftMailer. To search for this MLModel, select Name for the FilterVariable and any of the following strings for the Prefix:

    • 2014-09

    • 2014-09-09

    • 2014-09-09-Holiday

    " + } + }, + "ComputeStatistics": { + "base": null, + "refs": { + "CreateDataSourceFromRDSInput$ComputeStatistics": "

    The compute statistics for a DataSource. The statistics are generated from the observation data referenced by a DataSource. Amazon ML uses the statistics internally during an MLModel training. This parameter must be set to true if the DataSource needs to be used for MLModel training.

    ", + "CreateDataSourceFromRedshiftInput$ComputeStatistics": "

    The compute statistics for a DataSource. The statistics are generated from the observation data referenced by a DataSource. Amazon ML uses the statistics internally during MLModel training. This parameter must be set to true if the DataSource needs to be used for MLModel training

    ", + "CreateDataSourceFromS3Input$ComputeStatistics": "

    The compute statistics for a DataSource. The statistics are generated from the observation data referenced by a DataSource. Amazon ML uses the statistics internally during an MLModel training. This parameter must be set to true if the DataSource needs to be used for MLModel training

    ", + "DataSource$ComputeStatistics": "

    The parameter is true if statistics need to be generated from the observation data.

    ", + "GetDataSourceOutput$ComputeStatistics": "

    The parameter is true if statistics need to be generated from the observation data.

    " + } + }, + "CreateBatchPredictionInput": { + "base": null, + "refs": { + } + }, + "CreateBatchPredictionOutput": { + "base": "

    Represents the output of a CreateBatchPrediction operation, and is an acknowledgement that Amazon ML received the request.

    The CreateBatchPrediction operation is asynchronous. You can poll for status updates by using the GetBatchPrediction operation and checking the Status parameter of the result.

    ", + "refs": { + } + }, + "CreateDataSourceFromRDSInput": { + "base": null, + "refs": { + } + }, + "CreateDataSourceFromRDSOutput": { + "base": "

    Represents the output of a CreateDataSourceFromRDS operation, and is an acknowledgement that Amazon ML received the request.

    The CreateDataSourceFromRDS operation is asynchronous. You can poll for updates by using the GetBatchPrediction operation and checking the Status parameter. You can inspect the Message when Status shows up as FAILED. You can also check the progress of the copy operation by going to the DataPipeline console and looking up the pipeline using the pipelineId from the describe call.

    ", + "refs": { + } + }, + "CreateDataSourceFromRedshiftInput": { + "base": null, + "refs": { + } + }, + "CreateDataSourceFromRedshiftOutput": { + "base": "

    Represents the output of a CreateDataSourceFromRedshift operation, and is an acknowledgement that Amazon ML received the request.

    The CreateDataSourceFromRedshift operation is asynchronous. You can poll for updates by using the GetBatchPrediction operation and checking the Status parameter.

    ", + "refs": { + } + }, + "CreateDataSourceFromS3Input": { + "base": null, + "refs": { + } + }, + "CreateDataSourceFromS3Output": { + "base": "

    Represents the output of a CreateDataSourceFromS3 operation, and is an acknowledgement that Amazon ML received the request.

    The CreateDataSourceFromS3 operation is asynchronous. You can poll for updates by using the GetBatchPrediction operation and checking the Status parameter.

    ", + "refs": { + } + }, + "CreateEvaluationInput": { + "base": null, + "refs": { + } + }, + "CreateEvaluationOutput": { + "base": "

    Represents the output of a CreateEvaluation operation, and is an acknowledgement that Amazon ML received the request.

    CreateEvaluation operation is asynchronous. You can poll for status updates by using the GetEvaluation operation and checking the Status parameter.

    ", + "refs": { + } + }, + "CreateMLModelInput": { + "base": null, + "refs": { + } + }, + "CreateMLModelOutput": { + "base": "

    Represents the output of a CreateMLModel operation, and is an acknowledgement that Amazon ML received the request.

    The CreateMLModel operation is asynchronous. You can poll for status updates by using the GetMLModel operation and checking the Status parameter.

    ", + "refs": { + } + }, + "CreateRealtimeEndpointInput": { + "base": null, + "refs": { + } + }, + "CreateRealtimeEndpointOutput": { + "base": "

    Represents the output of an CreateRealtimeEndpoint operation.

    The result contains the MLModelId and the endpoint information for the MLModel.

    The endpoint information includes the URI of the MLModel; that is, the location to send online prediction requests for the specified MLModel.

    ", + "refs": { + } + }, + "DataRearrangement": { + "base": null, + "refs": { + "DataSource$DataRearrangement": "

    A JSON string that represents the splitting requirement of a Datasource.

    ", + "GetDataSourceOutput$DataRearrangement": "

    A JSON string that captures the splitting rearrangement requirement of the DataSource.

    ", + "RDSDataSpec$DataRearrangement": "

    DataRearrangement - A JSON string that represents the splitting requirement of a DataSource.


    Sample - \"{\\\"splitting\\\":{\\\"percentBegin\\\":10,\\\"percentEnd\\\":60}}\"

    ", + "RedshiftDataSpec$DataRearrangement": "

    Describes the splitting specifications for a DataSource.

    ", + "S3DataSpec$DataRearrangement": "

    Describes the splitting requirement of a Datasource.

    " + } + }, + "DataSchema": { + "base": "

    The schema of a DataSource. The DataSchema defines the structure of the observation data in the data file(s) referenced in the DataSource. The DataSource schema is expressed in JSON format.

    DataSchema is not required if you specify a DataSchemaUri

    { \"version\": \"1.0\", \"recordAnnotationFieldName\": \"F1\", \"recordWeightFieldName\": \"F2\", \"targetFieldName\": \"F3\", \"dataFormat\": \"CSV\", \"dataFileContainsHeader\": true, \"variables\": [ { \"fieldName\": \"F1\", \"fieldType\": \"TEXT\" }, { \"fieldName\": \"F2\", \"fieldType\": \"NUMERIC\" }, { \"fieldName\": \"F3\", \"fieldType\": \"CATEGORICAL\" }, { \"fieldName\": \"F4\", \"fieldType\": \"NUMERIC\" }, { \"fieldName\": \"F5\", \"fieldType\": \"CATEGORICAL\" }, { \"fieldName\": \"F6\", \"fieldType\": \"TEXT\" }, { \"fieldName\": \"F7\", \"fieldType\": \"WEIGHTED_INT_SEQUENCE\" }, { \"fieldName\": \"F8\", \"fieldType\": \"WEIGHTED_STRING_SEQUENCE\" } ], \"excludedVariableNames\": [ \"F6\" ] }

    ", + "refs": { + "GetDataSourceOutput$DataSourceSchema": "

    The schema used by all of the data files of this DataSource.

    Note

    This parameter is provided as part of the verbose format.

    ", + "GetMLModelOutput$Schema": "

    The schema used by all of the data files referenced by the DataSource.

    Note

    This parameter is provided as part of the verbose format.

    ", + "RDSDataSpec$DataSchema": "

    A JSON string that represents the schema for an Amazon RDS DataSource. The DataSchema defines the structure of the observation data in the data file(s) referenced in the DataSource.

    A DataSchema is not required if you specify a DataSchemaUri

    Define your DataSchema as a series of key-value pairs. attributes and excludedVariableNames have an array of key-value pairs for their value. Use the following format to define your DataSchema.

    { \"version\": \"1.0\",

    \"recordAnnotationFieldName\": \"F1\",

    \"recordWeightFieldName\": \"F2\",

    \"targetFieldName\": \"F3\",

    \"dataFormat\": \"CSV\",

    \"dataFileContainsHeader\": true,

    \"attributes\": [

    { \"fieldName\": \"F1\", \"fieldType\": \"TEXT\" }, { \"fieldName\": \"F2\", \"fieldType\": \"NUMERIC\" }, { \"fieldName\": \"F3\", \"fieldType\": \"CATEGORICAL\" }, { \"fieldName\": \"F4\", \"fieldType\": \"NUMERIC\" }, { \"fieldName\": \"F5\", \"fieldType\": \"CATEGORICAL\" }, { \"fieldName\": \"F6\", \"fieldType\": \"TEXT\" }, { \"fieldName\": \"F7\", \"fieldType\": \"WEIGHTED_INT_SEQUENCE\" }, { \"fieldName\": \"F8\", \"fieldType\": \"WEIGHTED_STRING_SEQUENCE\" } ],

    \"excludedVariableNames\": [ \"F6\" ] }

    ", + "RedshiftDataSpec$DataSchema": "

    A JSON string that represents the schema for an Amazon Redshift DataSource. The DataSchema defines the structure of the observation data in the data file(s) referenced in the DataSource.

    A DataSchema is not required if you specify a DataSchemaUri.

    Define your DataSchema as a series of key-value pairs. attributes and excludedVariableNames have an array of key-value pairs for their value. Use the following format to define your DataSchema.

    { \"version\": \"1.0\",

    \"recordAnnotationFieldName\": \"F1\",

    \"recordWeightFieldName\": \"F2\",

    \"targetFieldName\": \"F3\",

    \"dataFormat\": \"CSV\",

    \"dataFileContainsHeader\": true,

    \"attributes\": [

    { \"fieldName\": \"F1\", \"fieldType\": \"TEXT\" }, { \"fieldName\": \"F2\", \"fieldType\": \"NUMERIC\" }, { \"fieldName\": \"F3\", \"fieldType\": \"CATEGORICAL\" }, { \"fieldName\": \"F4\", \"fieldType\": \"NUMERIC\" }, { \"fieldName\": \"F5\", \"fieldType\": \"CATEGORICAL\" }, { \"fieldName\": \"F6\", \"fieldType\": \"TEXT\" }, { \"fieldName\": \"F7\", \"fieldType\": \"WEIGHTED_INT_SEQUENCE\" }, { \"fieldName\": \"F8\", \"fieldType\": \"WEIGHTED_STRING_SEQUENCE\" } ],

    \"excludedVariableNames\": [ \"F6\" ] }

    ", + "S3DataSpec$DataSchema": "

    A JSON string that represents the schema for an Amazon S3 DataSource. The DataSchema defines the structure of the observation data in the data file(s) referenced in the DataSource.

    Define your DataSchema as a series of key-value pairs. attributes and excludedVariableNames have an array of key-value pairs for their value. Use the following format to define your DataSchema.

    { \"version\": \"1.0\",

    \"recordAnnotationFieldName\": \"F1\",

    \"recordWeightFieldName\": \"F2\",

    \"targetFieldName\": \"F3\",

    \"dataFormat\": \"CSV\",

    \"dataFileContainsHeader\": true,

    \"attributes\": [

    { \"fieldName\": \"F1\", \"fieldType\": \"TEXT\" }, { \"fieldName\": \"F2\", \"fieldType\": \"NUMERIC\" }, { \"fieldName\": \"F3\", \"fieldType\": \"CATEGORICAL\" }, { \"fieldName\": \"F4\", \"fieldType\": \"NUMERIC\" }, { \"fieldName\": \"F5\", \"fieldType\": \"CATEGORICAL\" }, { \"fieldName\": \"F6\", \"fieldType\": \"TEXT\" }, { \"fieldName\": \"F7\", \"fieldType\": \"WEIGHTED_INT_SEQUENCE\" }, { \"fieldName\": \"F8\", \"fieldType\": \"WEIGHTED_STRING_SEQUENCE\" } ],

    \"excludedVariableNames\": [ \"F6\" ] }

    " + } + }, + "DataSource": { + "base": "

    Represents the output of the GetDataSource operation.

    The content consists of the detailed metadata and data file information and the current status of the DataSource.

    ", + "refs": { + "DataSources$member": null + } + }, + "DataSourceFilterVariable": { + "base": "

    A list of the variables to use in searching or filtering DataSource.

    • CreatedAt - Sets the search criteria to DataSource creation date.
    • Status - Sets the search criteria to DataSource status.
    • Name - Sets the search criteria to the contents of DataSource Name.
    • DataUri - Sets the search criteria to the URI of data files used to create the DataSource. The URI can identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory.
    • IAMUser - Sets the search criteria to the user account that invoked the DataSource creation.
    Note

    The variable names should match the variable names in the DataSource.

    ", + "refs": { + "DescribeDataSourcesInput$FilterVariable": "

    Use one of the following variables to filter a list of DataSource:

    • CreatedAt - Sets the search criteria to DataSource creation dates.
    • Status - Sets the search criteria to DataSource statuses.
    • Name - Sets the search criteria to the contents of DataSource Name.
    • DataUri - Sets the search criteria to the URI of data files used to create the DataSource. The URI can identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory.
    • IAMUser - Sets the search criteria to the user account that invoked the DataSource creation.
    " + } + }, + "DataSources": { + "base": null, + "refs": { + "DescribeDataSourcesOutput$Results": "

    A list of DataSource that meet the search criteria.

    " + } + }, + "DeleteBatchPredictionInput": { + "base": null, + "refs": { + } + }, + "DeleteBatchPredictionOutput": { + "base": "

    Represents the output of a DeleteBatchPrediction operation.

    You can use the GetBatchPrediction operation and check the value of the Status parameter to see whether a BatchPrediction is marked as DELETED.

    ", + "refs": { + } + }, + "DeleteDataSourceInput": { + "base": null, + "refs": { + } + }, + "DeleteDataSourceOutput": { + "base": "

    Represents the output of a DeleteDataSource operation.

    ", + "refs": { + } + }, + "DeleteEvaluationInput": { + "base": null, + "refs": { + } + }, + "DeleteEvaluationOutput": { + "base": "

    Represents the output of a DeleteEvaluation operation. The output indicates that Amazon Machine Learning (Amazon ML) received the request.

    You can use the GetEvaluation operation and check the value of the Status parameter to see whether an Evaluation is marked as DELETED.

    ", + "refs": { + } + }, + "DeleteMLModelInput": { + "base": null, + "refs": { + } + }, + "DeleteMLModelOutput": { + "base": "

    Represents the output of a DeleteMLModel operation.

    You can use the GetMLModel operation and check the value of the Status parameter to see whether an MLModel is marked as DELETED.

    ", + "refs": { + } + }, + "DeleteRealtimeEndpointInput": { + "base": null, + "refs": { + } + }, + "DeleteRealtimeEndpointOutput": { + "base": "

    Represents the output of an DeleteRealtimeEndpoint operation.

    The result contains the MLModelId and the endpoint information for the MLModel.

    ", + "refs": { + } + }, + "DescribeBatchPredictionsInput": { + "base": null, + "refs": { + } + }, + "DescribeBatchPredictionsOutput": { + "base": "

    Represents the output of a DescribeBatchPredictions operation. The content is essentially a list of BatchPredictions.

    ", + "refs": { + } + }, + "DescribeDataSourcesInput": { + "base": null, + "refs": { + } + }, + "DescribeDataSourcesOutput": { + "base": "

    Represents the query results from a DescribeDataSources operation. The content is essentially a list of DataSource.

    ", + "refs": { + } + }, + "DescribeEvaluationsInput": { + "base": null, + "refs": { + } + }, + "DescribeEvaluationsOutput": { + "base": "

    Represents the query results from a DescribeEvaluations operation. The content is essentially a list of Evaluation.

    ", + "refs": { + } + }, + "DescribeMLModelsInput": { + "base": null, + "refs": { + } + }, + "DescribeMLModelsOutput": { + "base": "

    Represents the output of a DescribeMLModels operation. The content is essentially a list of MLModel.

    ", + "refs": { + } + }, + "DetailsAttributes": { + "base": "Contains the key values of DetailsMap: PredictiveModelType - Indicates the type of the MLModel. Algorithm - Indicates the algorithm was used for the MLModel.", + "refs": { + "DetailsMap$key": null + } + }, + "DetailsMap": { + "base": "Provides any additional details regarding the prediction.", + "refs": { + "Prediction$details": null + } + }, + "DetailsValue": { + "base": null, + "refs": { + "DetailsMap$value": null + } + }, + "EDPPipelineId": { + "base": null, + "refs": { + "RDSMetadata$DataPipelineId": "

    The ID of the Data Pipeline instance that is used to carry to copy data from Amazon RDS to Amazon S3. You can use the ID to find details about the instance in the Data Pipeline console.

    " + } + }, + "EDPResourceRole": { + "base": null, + "refs": { + "RDSDataSpec$ResourceRole": "

    The role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic Compute Cloud (Amazon EC2) instance to carry out the copy operation from Amazon RDS to an Amazon S3 task. For more information, see Role templates for data pipelines.

    ", + "RDSMetadata$ResourceRole": "

    The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2 instance to carry out the copy task from Amazon RDS to Amazon S3. For more information, see Role templates for data pipelines.

    " + } + }, + "EDPSecurityGroupId": { + "base": null, + "refs": { + "EDPSecurityGroupIds$member": null + } + }, + "EDPSecurityGroupIds": { + "base": null, + "refs": { + "RDSDataSpec$SecurityGroupIds": "

    The security group IDs to be used to access a VPC-based RDS DB instance. Ensure that there are appropriate ingress rules set up to allow access to the RDS DB instance. This attribute is used by Data Pipeline to carry out the copy operation from Amazon RDS to an Amazon S3 task.

    " + } + }, + "EDPServiceRole": { + "base": null, + "refs": { + "RDSDataSpec$ServiceRole": "

    The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see Role templates for data pipelines.

    ", + "RDSMetadata$ServiceRole": "

    The role (DataPipelineDefaultRole) assumed by the Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see Role templates for data pipelines.

    " + } + }, + "EDPSubnetId": { + "base": null, + "refs": { + "RDSDataSpec$SubnetId": "

    The subnet ID to be used to access a VPC-based RDS DB instance. This attribute is used by Data Pipeline to carry out the copy task from Amazon RDS to Amazon S3.

    " + } + }, + "EntityId": { + "base": null, + "refs": { + "BatchPrediction$BatchPredictionId": "

    The ID assigned to the BatchPrediction at creation. This value should be identical to the value of the BatchPredictionID in the request.

    ", + "BatchPrediction$MLModelId": "

    The ID of the MLModel that generated predictions for the BatchPrediction request.

    ", + "BatchPrediction$BatchPredictionDataSourceId": "

    The ID of the DataSource that points to the group of observations to predict.

    ", + "CreateBatchPredictionInput$BatchPredictionId": "

    A user-supplied ID that uniquely identifies the BatchPrediction.

    ", + "CreateBatchPredictionInput$MLModelId": "

    The ID of the MLModel that will generate predictions for the group of observations.

    ", + "CreateBatchPredictionInput$BatchPredictionDataSourceId": "

    The ID of the DataSource that points to the group of observations to predict.

    ", + "CreateBatchPredictionOutput$BatchPredictionId": "

    A user-supplied ID that uniquely identifies the BatchPrediction. This value is identical to the value of the BatchPredictionId in the request.

    ", + "CreateDataSourceFromRDSInput$DataSourceId": "

    A user-supplied ID that uniquely identifies the DataSource. Typically, an Amazon Resource Number (ARN) becomes the ID for a DataSource.

    ", + "CreateDataSourceFromRDSOutput$DataSourceId": "

    A user-supplied ID that uniquely identifies the datasource. This value should be identical to the value of the DataSourceID in the request.

    ", + "CreateDataSourceFromRedshiftInput$DataSourceId": "

    A user-supplied ID that uniquely identifies the DataSource.

    ", + "CreateDataSourceFromRedshiftOutput$DataSourceId": "

    A user-supplied ID that uniquely identifies the datasource. This value should be identical to the value of the DataSourceID in the request.

    ", + "CreateDataSourceFromS3Input$DataSourceId": "

    A user-supplied identifier that uniquely identifies the DataSource.

    ", + "CreateDataSourceFromS3Output$DataSourceId": "

    A user-supplied ID that uniquely identifies the datasource. This value should be identical to the value of the DataSourceID in the request.

    ", + "CreateEvaluationInput$EvaluationId": "

    A user-supplied ID that uniquely identifies the Evaluation.

    ", + "CreateEvaluationInput$MLModelId": "

    The ID of the MLModel to evaluate.

    The schema used in creating the MLModel must match the schema of the DataSource used in the Evaluation.

    ", + "CreateEvaluationInput$EvaluationDataSourceId": "

    The ID of the DataSource for the evaluation. The schema of the DataSource must match the schema used to create the MLModel.

    ", + "CreateEvaluationOutput$EvaluationId": "

    The user-supplied ID that uniquely identifies the Evaluation. This value should be identical to the value of the EvaluationId in the request.

    ", + "CreateMLModelInput$MLModelId": "

    A user-supplied ID that uniquely identifies the MLModel.

    ", + "CreateMLModelInput$TrainingDataSourceId": "

    The DataSource that points to the training data.

    ", + "CreateMLModelOutput$MLModelId": "

    A user-supplied ID that uniquely identifies the MLModel. This value should be identical to the value of the MLModelId in the request.

    ", + "CreateRealtimeEndpointInput$MLModelId": "

    The ID assigned to the MLModel during creation.

    ", + "CreateRealtimeEndpointOutput$MLModelId": "

    A user-supplied ID that uniquely identifies the MLModel. This value should be identical to the value of the MLModelId in the request.

    ", + "DataSource$DataSourceId": "

    The ID that is assigned to the DataSource during creation.

    ", + "DeleteBatchPredictionInput$BatchPredictionId": "

    A user-supplied ID that uniquely identifies the BatchPrediction.

    ", + "DeleteBatchPredictionOutput$BatchPredictionId": "

    A user-supplied ID that uniquely identifies the BatchPrediction. This value should be identical to the value of the BatchPredictionID in the request.

    ", + "DeleteDataSourceInput$DataSourceId": "

    A user-supplied ID that uniquely identifies the DataSource.

    ", + "DeleteDataSourceOutput$DataSourceId": "

    A user-supplied ID that uniquely identifies the DataSource. This value should be identical to the value of the DataSourceID in the request.

    ", + "DeleteEvaluationInput$EvaluationId": "

    A user-supplied ID that uniquely identifies the Evaluation to delete.

    ", + "DeleteEvaluationOutput$EvaluationId": "

    A user-supplied ID that uniquely identifies the Evaluation. This value should be identical to the value of the EvaluationId in the request.

    ", + "DeleteMLModelInput$MLModelId": "

    A user-supplied ID that uniquely identifies the MLModel.

    ", + "DeleteMLModelOutput$MLModelId": "

    A user-supplied ID that uniquely identifies the MLModel. This value should be identical to the value of the MLModelID in the request.

    ", + "DeleteRealtimeEndpointInput$MLModelId": "

    The ID assigned to the MLModel during creation.

    ", + "DeleteRealtimeEndpointOutput$MLModelId": "

    A user-supplied ID that uniquely identifies the MLModel. This value should be identical to the value of the MLModelId in the request.

    ", + "Evaluation$EvaluationId": "

    The ID that is assigned to the Evaluation at creation.

    ", + "Evaluation$MLModelId": "

    The ID of the MLModel that is the focus of the evaluation.

    ", + "Evaluation$EvaluationDataSourceId": "

    The ID of the DataSource that is used to evaluate the MLModel.

    ", + "GetBatchPredictionInput$BatchPredictionId": "

    An ID assigned to the BatchPrediction at creation.

    ", + "GetBatchPredictionOutput$BatchPredictionId": "

    An ID assigned to the BatchPrediction at creation. This value should be identical to the value of the BatchPredictionID in the request.

    ", + "GetBatchPredictionOutput$MLModelId": "

    The ID of the MLModel that generated predictions for the BatchPrediction request.

    ", + "GetBatchPredictionOutput$BatchPredictionDataSourceId": "

    The ID of the DataSource that was used to create the BatchPrediction.

    ", + "GetDataSourceInput$DataSourceId": "

    The ID assigned to the DataSource at creation.

    ", + "GetDataSourceOutput$DataSourceId": "

    The ID assigned to the DataSource at creation. This value should be identical to the value of the DataSourceId in the request.

    ", + "GetEvaluationInput$EvaluationId": "

    The ID of the Evaluation to retrieve. The evaluation of each MLModel is recorded and cataloged. The ID provides the means to access the information.

    ", + "GetEvaluationOutput$EvaluationId": "

    The evaluation ID which is same as the EvaluationId in the request.

    ", + "GetEvaluationOutput$MLModelId": "

    The ID of the MLModel that was the focus of the evaluation.

    ", + "GetEvaluationOutput$EvaluationDataSourceId": "

    The DataSource used for this evaluation.

    ", + "GetMLModelInput$MLModelId": "

    The ID assigned to the MLModel at creation.

    ", + "GetMLModelOutput$MLModelId": "

    The MLModel ID which is same as the MLModelId in the request.

    ", + "GetMLModelOutput$TrainingDataSourceId": "

    The ID of the training DataSource.

    ", + "MLModel$MLModelId": "

    The ID assigned to the MLModel at creation.

    ", + "MLModel$TrainingDataSourceId": "

    The ID of the training DataSource. The CreateMLModel operation uses the TrainingDataSourceId.

    ", + "PredictInput$MLModelId": "

    A unique identifier of the MLModel.

    ", + "UpdateBatchPredictionInput$BatchPredictionId": "

    The ID assigned to the BatchPrediction during creation.

    ", + "UpdateBatchPredictionOutput$BatchPredictionId": "

    The ID assigned to the BatchPrediction during creation. This value should be identical to the value of the BatchPredictionId in the request.

    ", + "UpdateDataSourceInput$DataSourceId": "

    The ID assigned to the DataSource during creation.

    ", + "UpdateDataSourceOutput$DataSourceId": "

    The ID assigned to the DataSource during creation. This value should be identical to the value of the DataSourceID in the request.

    ", + "UpdateEvaluationInput$EvaluationId": "

    The ID assigned to the Evaluation during creation.

    ", + "UpdateEvaluationOutput$EvaluationId": "

    The ID assigned to the Evaluation during creation. This value should be identical to the value of the Evaluation in the request.

    ", + "UpdateMLModelInput$MLModelId": "

    The ID assigned to the MLModel during creation.

    ", + "UpdateMLModelOutput$MLModelId": "

    The ID assigned to the MLModel during creation. This value should be identical to the value of the MLModelID in the request.

    " + } + }, + "EntityName": { + "base": "

    A user-supplied name or description of the Amazon ML resource.

    ", + "refs": { + "BatchPrediction$Name": "

    A user-supplied name or description of the BatchPrediction.

    ", + "CreateBatchPredictionInput$BatchPredictionName": "

    A user-supplied name or description of the BatchPrediction. BatchPredictionName can only use the UTF-8 character set.

    ", + "CreateDataSourceFromRDSInput$DataSourceName": "

    A user-supplied name or description of the DataSource.

    ", + "CreateDataSourceFromRedshiftInput$DataSourceName": "

    A user-supplied name or description of the DataSource.

    ", + "CreateDataSourceFromS3Input$DataSourceName": "

    A user-supplied name or description of the DataSource.

    ", + "CreateEvaluationInput$EvaluationName": "

    A user-supplied name or description of the Evaluation.

    ", + "CreateMLModelInput$MLModelName": "

    A user-supplied name or description of the MLModel.

    ", + "DataSource$Name": "

    A user-supplied name or description of the DataSource.

    ", + "Evaluation$Name": "

    A user-supplied name or description of the Evaluation.

    ", + "GetBatchPredictionOutput$Name": "

    A user-supplied name or description of the BatchPrediction.

    ", + "GetDataSourceOutput$Name": "

    A user-supplied name or description of the DataSource.

    ", + "GetEvaluationOutput$Name": "

    A user-supplied name or description of the Evaluation.

    ", + "UpdateBatchPredictionInput$BatchPredictionName": "

    A new user-supplied name or description of the BatchPrediction.

    ", + "UpdateDataSourceInput$DataSourceName": "

    A new user-supplied name or description of the DataSource that will replace the current description.

    ", + "UpdateEvaluationInput$EvaluationName": "

    A new user-supplied name or description of the Evaluation that will replace the current content.

    ", + "UpdateMLModelInput$MLModelName": "

    A user-supplied name or description of the MLModel.

    " + } + }, + "EntityStatus": { + "base": "

    Entity status with the following possible values:

    • PENDING
    • INPROGRESS
    • FAILED
    • COMPLETED
    • DELETED
    ", + "refs": { + "BatchPrediction$Status": "

    The status of the BatchPrediction. This element can have one of the following values:

    • PENDING - Amazon Machine Learning (Amazon ML) submitted a request to generate predictions for a batch of observations.
    • INPROGRESS - The process is underway.
    • FAILED - The request to peform a batch prediction did not run to completion. It is not usable.
    • COMPLETED - The batch prediction process completed successfully.
    • DELETED - The BatchPrediction is marked as deleted. It is not usable.
    ", + "DataSource$Status": "

    The current status of the DataSource. This element can have one of the following values:

    • PENDING - Amazon Machine Learning (Amazon ML) submitted a request to create a DataSource.
    • INPROGRESS - The creation process is underway.
    • FAILED - The request to create a DataSource did not run to completion. It is not usable.
    • COMPLETED - The creation process completed successfully.
    • DELETED - The DataSource is marked as deleted. It is not usable.
    ", + "Evaluation$Status": "

    The status of the evaluation. This element can have one of the following values:

    • PENDING - Amazon Machine Learning (Amazon ML) submitted a request to evaluate an MLModel.
    • INPROGRESS - The evaluation is underway.
    • FAILED - The request to evaluate an MLModel did not run to completion. It is not usable.
    • COMPLETED - The evaluation process completed successfully.
    • DELETED - The Evaluation is marked as deleted. It is not usable.
    ", + "GetBatchPredictionOutput$Status": "

    The status of the BatchPrediction, which can be one of the following values:

    • PENDING - Amazon Machine Learning (Amazon ML) submitted a request to generate batch predictions.
    • INPROGRESS - The batch predictions are in progress.
    • FAILED - The request to perform a batch prediction did not run to completion. It is not usable.
    • COMPLETED - The batch prediction process completed successfully.
    • DELETED - The BatchPrediction is marked as deleted. It is not usable.
    ", + "GetDataSourceOutput$Status": "

    The current status of the DataSource. This element can have one of the following values:

    • PENDING - Amazon Machine Language (Amazon ML) submitted a request to create a DataSource.
    • INPROGRESS - The creation process is underway.
    • FAILED - The request to create a DataSource did not run to completion. It is not usable.
    • COMPLETED - The creation process completed successfully.
    • DELETED - The DataSource is marked as deleted. It is not usable.
    ", + "GetEvaluationOutput$Status": "

    The status of the evaluation. This element can have one of the following values:

    • PENDING - Amazon Machine Language (Amazon ML) submitted a request to evaluate an MLModel.
    • INPROGRESS - The evaluation is underway.
    • FAILED - The request to evaluate an MLModel did not run to completion. It is not usable.
    • COMPLETED - The evaluation process completed successfully.
    • DELETED - The Evaluation is marked as deleted. It is not usable.
    ", + "GetMLModelOutput$Status": "

    The current status of the MLModel. This element can have one of the following values:

    • PENDING - Amazon Machine Learning (Amazon ML) submitted a request to describe a MLModel.
    • INPROGRESS - The request is processing.
    • FAILED - The request did not run to completion. It is not usable.
    • COMPLETED - The request completed successfully.
    • DELETED - The MLModel is marked as deleted. It is not usable.
    ", + "MLModel$Status": "

    The current status of an MLModel. This element can have one of the following values:

    • PENDING - Amazon Machine Learning (Amazon ML) submitted a request to create an MLModel.
    • INPROGRESS - The creation process is underway.
    • FAILED - The request to create an MLModel did not run to completion. It is not usable.
    • COMPLETED - The creation process completed successfully.
    • DELETED - The MLModel is marked as deleted. It is not usable.
    " + } + }, + "EpochTime": { + "base": "

    A timestamp represented in epoch time.

    ", + "refs": { + "BatchPrediction$CreatedAt": "

    The time that the BatchPrediction was created. The time is expressed in epoch time.

    ", + "BatchPrediction$LastUpdatedAt": "

    The time of the most recent edit to the BatchPrediction. The time is expressed in epoch time.

    ", + "DataSource$CreatedAt": "

    The time that the DataSource was created. The time is expressed in epoch time.

    ", + "DataSource$LastUpdatedAt": "

    The time of the most recent edit to the BatchPrediction. The time is expressed in epoch time.

    ", + "Evaluation$CreatedAt": "

    The time that the Evaluation was created. The time is expressed in epoch time.

    ", + "Evaluation$LastUpdatedAt": "

    The time of the most recent edit to the Evaluation. The time is expressed in epoch time.

    ", + "GetBatchPredictionOutput$CreatedAt": "

    The time when the BatchPrediction was created. The time is expressed in epoch time.

    ", + "GetBatchPredictionOutput$LastUpdatedAt": "

    The time of the most recent edit to BatchPrediction. The time is expressed in epoch time.

    ", + "GetDataSourceOutput$CreatedAt": "

    The time that the DataSource was created. The time is expressed in epoch time.

    ", + "GetDataSourceOutput$LastUpdatedAt": "

    The time of the most recent edit to the DataSource. The time is expressed in epoch time.

    ", + "GetEvaluationOutput$CreatedAt": "

    The time that the Evaluation was created. The time is expressed in epoch time.

    ", + "GetEvaluationOutput$LastUpdatedAt": "

    The time of the most recent edit to the BatchPrediction. The time is expressed in epoch time.

    ", + "GetMLModelOutput$CreatedAt": "

    The time that the MLModel was created. The time is expressed in epoch time.

    ", + "GetMLModelOutput$LastUpdatedAt": "

    The time of the most recent edit to the MLModel. The time is expressed in epoch time.

    ", + "GetMLModelOutput$ScoreThresholdLastUpdatedAt": "

    The time of the most recent edit to the ScoreThreshold. The time is expressed in epoch time.

    ", + "MLModel$CreatedAt": "

    The time that the MLModel was created. The time is expressed in epoch time.

    ", + "MLModel$LastUpdatedAt": "

    The time of the most recent edit to the MLModel. The time is expressed in epoch time.

    ", + "MLModel$ScoreThresholdLastUpdatedAt": "

    The time of the most recent edit to the ScoreThreshold. The time is expressed in epoch time.

    ", + "RealtimeEndpointInfo$CreatedAt": "

    The time that the request to create the real-time endpoint for the MLModel was received. The time is expressed in epoch time.

    " + } + }, + "ErrorCode": { + "base": null, + "refs": { + "IdempotentParameterMismatchException$code": null, + "InternalServerException$code": null, + "InvalidInputException$code": null, + "LimitExceededException$code": null, + "ResourceNotFoundException$code": null + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "IdempotentParameterMismatchException$message": null, + "InternalServerException$message": null, + "InvalidInputException$message": null, + "LimitExceededException$message": null, + "PredictorNotMountedException$message": null, + "ResourceNotFoundException$message": null + } + }, + "Evaluation": { + "base": "

    Represents the output of GetEvaluation operation.

    The content consists of the detailed metadata and data file information and the current status of the Evaluation.

    ", + "refs": { + "Evaluations$member": null + } + }, + "EvaluationFilterVariable": { + "base": "

    A list of the variables to use in searching or filtering Evaluation.

    • CreatedAt - Sets the search criteria to Evaluation creation date.
    • Status - Sets the search criteria to Evaluation status.
    • Name - Sets the search criteria to the contents of Evaluation Name.
    • IAMUser - Sets the search criteria to the user account that invoked an evaluation.
    • MLModelId - Sets the search criteria to the Predictor that was evaluated.
    • DataSourceId - Sets the search criteria to the DataSource used in evaluation.
    • DataUri - Sets the search criteria to the data file(s) used in evaluation. The URL can identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory.
    ", + "refs": { + "DescribeEvaluationsInput$FilterVariable": "

    Use one of the following variable to filter a list of Evaluation objects:

    • CreatedAt - Sets the search criteria to the Evaluation creation date.
    • Status - Sets the search criteria to the Evaluation status.
    • Name - Sets the search criteria to the contents of Evaluation Name.
    • IAMUser - Sets the search criteria to the user account that invoked an Evaluation.
    • MLModelId - Sets the search criteria to the MLModel that was evaluated.
    • DataSourceId - Sets the search criteria to the DataSource used in Evaluation.
    • DataUri - Sets the search criteria to the data file(s) used in Evaluation. The URL can identify either a file or an Amazon Simple Storage Solution (Amazon S3) bucket or directory.
    " + } + }, + "Evaluations": { + "base": null, + "refs": { + "DescribeEvaluationsOutput$Results": "

    A list of Evaluation that meet the search criteria.

    " + } + }, + "GetBatchPredictionInput": { + "base": null, + "refs": { + } + }, + "GetBatchPredictionOutput": { + "base": "

    Represents the output of a GetBatchPrediction operation and describes a BatchPrediction.

    ", + "refs": { + } + }, + "GetDataSourceInput": { + "base": null, + "refs": { + } + }, + "GetDataSourceOutput": { + "base": "

    Represents the output of a GetDataSource operation and describes a DataSource.

    ", + "refs": { + } + }, + "GetEvaluationInput": { + "base": null, + "refs": { + } + }, + "GetEvaluationOutput": { + "base": "

    Represents the output of a GetEvaluation operation and describes an Evaluation.

    ", + "refs": { + } + }, + "GetMLModelInput": { + "base": null, + "refs": { + } + }, + "GetMLModelOutput": { + "base": "

    Represents the output of a GetMLModel operation, and provides detailed information about a MLModel.

    ", + "refs": { + } + }, + "IdempotentParameterMismatchException": { + "base": "

    A second request to use or change an object was not allowed. This can result from retrying a request using a parameter that was not present in the original request.

    ", + "refs": { + } + }, + "IntegerType": { + "base": "

    Integer type that is a 32-bit signed number.

    ", + "refs": { + "RealtimeEndpointInfo$PeakRequestsPerSecond": "

    The maximum processing rate for the real-time endpoint for MLModel, measured in incoming requests per second.

    " + } + }, + "InternalServerException": { + "base": "

    An error on the server occurred when trying to process a request.

    ", + "refs": { + } + }, + "InvalidInputException": { + "base": "

    An error on the client occurred. Typically, the cause is an invalid input value.

    ", + "refs": { + } + }, + "Label": { + "base": null, + "refs": { + "Prediction$predictedLabel": "The prediction label for either a BINARY or MULTICLASS MLModel.", + "ScoreValuePerLabelMap$key": null + } + }, + "LimitExceededException": { + "base": "

    The subscriber exceeded the maximum number of operations. This exception can occur when listing objects such as DataSource.

    ", + "refs": { + } + }, + "LongType": { + "base": "

    Long integer type that is a 64-bit signed number.

    ", + "refs": { + "DataSource$DataSizeInBytes": "

    The total number of observations contained in the data files that the DataSource references.

    ", + "DataSource$NumberOfFiles": "

    The number of data files referenced by the DataSource.

    ", + "GetDataSourceOutput$DataSizeInBytes": "

    The total size of observations in the data files.

    ", + "GetDataSourceOutput$NumberOfFiles": "

    The number of data files referenced by the DataSource.

    ", + "GetMLModelOutput$SizeInBytes": null, + "MLModel$SizeInBytes": null + } + }, + "MLModel": { + "base": "

    Represents the output of a GetMLModel operation.

    The content consists of the detailed metadata and the current status of the MLModel.

    ", + "refs": { + "MLModels$member": null + } + }, + "MLModelFilterVariable": { + "base": null, + "refs": { + "DescribeMLModelsInput$FilterVariable": "

    Use one of the following variables to filter a list of MLModel:

    • CreatedAt - Sets the search criteria to MLModel creation date.
    • Status - Sets the search criteria to MLModel status.
    • Name - Sets the search criteria to the contents of MLModel Name.
    • IAMUser - Sets the search criteria to the user account that invoked the MLModel creation.
    • TrainingDataSourceId - Sets the search criteria to the DataSource used to train one or more MLModel.
    • RealtimeEndpointStatus - Sets the search criteria to the MLModel real-time endpoint status.
    • MLModelType - Sets the search criteria to MLModel type: binary, regression, or multi-class.
    • Algorithm - Sets the search criteria to the algorithm that the MLModel uses.
    • TrainingDataURI - Sets the search criteria to the data file(s) used in training a MLModel. The URL can identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory.
    " + } + }, + "MLModelName": { + "base": null, + "refs": { + "GetMLModelOutput$Name": "

    A user-supplied name or description of the MLModel.

    ", + "MLModel$Name": "

    A user-supplied name or description of the MLModel.

    " + } + }, + "MLModelType": { + "base": null, + "refs": { + "CreateMLModelInput$MLModelType": "

    The category of supervised learning that this MLModel will address. Choose from the following types:

    • Choose REGRESSION if the MLModel will be used to predict a numeric value.
    • Choose BINARY if the MLModel result has two possible values.
    • Choose MULTICLASS if the MLModel result has a limited number of values.

    For more information, see the Amazon Machine Learning Developer Guide.

    ", + "GetMLModelOutput$MLModelType": "

    Identifies the MLModel category. The following are the available types:

    • REGRESSION -- Produces a numeric result. For example, \"What listing price should a house have?\"
    • BINARY -- Produces one of two possible results. For example, \"Is this an e-commerce website?\"
    • MULTICLASS -- Produces more than two possible results. For example, \"Is this a HIGH, LOW or MEDIUM risk trade?\"
    ", + "MLModel$MLModelType": "

    Identifies the MLModel category. The following are the available types:

    • REGRESSION - Produces a numeric result. For example, \"What listing price should a house have?\".
    • BINARY - Produces one of two possible results. For example, \"Is this a child-friendly web site?\".
    • MULTICLASS - Produces more than two possible results. For example, \"Is this a HIGH, LOW or MEDIUM risk trade?\".
    " + } + }, + "MLModels": { + "base": null, + "refs": { + "DescribeMLModelsOutput$Results": "

    A list of MLModel that meet the search criteria.

    " + } + }, + "Message": { + "base": "

    Description of the most recent details about an entity.

    ", + "refs": { + "BatchPrediction$Message": "

    A description of the most recent details about processing the batch prediction request.

    ", + "DataSource$Message": "

    A description of the most recent details about creating the DataSource.

    ", + "Evaluation$Message": "

    A description of the most recent details about evaluating the MLModel.

    ", + "GetBatchPredictionOutput$Message": "

    A description of the most recent details about processing the batch prediction request.

    ", + "GetDataSourceOutput$Message": "

    The description of the most recent details about creating the DataSource.

    ", + "GetEvaluationOutput$Message": "

    A description of the most recent details about evaluating the MLModel.

    ", + "GetMLModelOutput$Message": "

    Description of the most recent details about accessing the MLModel.

    ", + "MLModel$Message": "

    A description of the most recent details about accessing the MLModel.

    " + } + }, + "PageLimit": { + "base": null, + "refs": { + "DescribeBatchPredictionsInput$Limit": "

    The number of pages of information to include in the result. The range of acceptable values is 1 through 100. The default value is 100.

    ", + "DescribeDataSourcesInput$Limit": "

    The maximum number of DataSource to include in the result.

    ", + "DescribeEvaluationsInput$Limit": "

    The maximum number of Evaluation to include in the result.

    ", + "DescribeMLModelsInput$Limit": "

    The number of pages of information to include in the result. The range of acceptable values is 1 through 100. The default value is 100.

    " + } + }, + "PerformanceMetrics": { + "base": "

    Measurements of how well the MLModel performed on known observations. One of the following metrics is returned, based on the type of the MLModel:

    • BinaryAUC: The binary MLModel uses the Area Under the Curve (AUC) technique to measure performance.

    • RegressionRMSE: The regression MLModel uses the Root Mean Square Error (RMSE) technique to measure performance. RMSE measures the difference between predicted and actual values for a single variable.

    • MulticlassAvgFScore: The multiclass MLModel uses the F1 score technique to measure performance.

    For more information about performance metrics, please see the Amazon Machine Learning Developer Guide.

    ", + "refs": { + "Evaluation$PerformanceMetrics": "

    Measurements of how well the MLModel performed, using observations referenced by the DataSource. One of the following metrics is returned, based on the type of the MLModel:

    • BinaryAUC: A binary MLModel uses the Area Under the Curve (AUC) technique to measure performance.

    • RegressionRMSE: A regression MLModel uses the Root Mean Square Error (RMSE) technique to measure performance. RMSE measures the difference between predicted and actual values for a single variable.

    • MulticlassAvgFScore: A multiclass MLModel uses the F1 score technique to measure performance.

    For more information about performance metrics, please see the Amazon Machine Learning Developer Guide.

    ", + "GetEvaluationOutput$PerformanceMetrics": "

    Measurements of how well the MLModel performed using observations referenced by the DataSource. One of the following metric is returned based on the type of the MLModel:

    • BinaryAUC: A binary MLModel uses the Area Under the Curve (AUC) technique to measure performance.

    • RegressionRMSE: A regression MLModel uses the Root Mean Square Error (RMSE) technique to measure performance. RMSE measures the difference between predicted and actual values for a single variable.

    • MulticlassAvgFScore: A multiclass MLModel uses the F1 score technique to measure performance.

    For more information about performance metrics, please see the Amazon Machine Learning Developer Guide.

    " + } + }, + "PerformanceMetricsProperties": { + "base": null, + "refs": { + "PerformanceMetrics$Properties": null + } + }, + "PerformanceMetricsPropertyKey": { + "base": null, + "refs": { + "PerformanceMetricsProperties$key": null + } + }, + "PerformanceMetricsPropertyValue": { + "base": null, + "refs": { + "PerformanceMetricsProperties$value": null + } + }, + "PredictInput": { + "base": null, + "refs": { + } + }, + "PredictOutput": { + "base": null, + "refs": { + } + }, + "Prediction": { + "base": "

    The output from a Predict operation:

    • Details - Contains the following attributes: DetailsAttributes.PREDICTIVE_MODEL_TYPE - REGRESSION | BINARY | MULTICLASS DetailsAttributes.ALGORITHM - SGD

    • PredictedLabel - Present for either a BINARY or MULTICLASS MLModel request.

    • PredictedScores - Contains the raw classification score corresponding to each label.

    • PredictedValue - Present for a REGRESSION MLModel request.

    ", + "refs": { + "PredictOutput$Prediction": null + } + }, + "PredictorNotMountedException": { + "base": "

    The exception is thrown when a predict request is made to an unmounted MLModel.

    ", + "refs": { + } + }, + "PresignedS3Url": { + "base": null, + "refs": { + "GetBatchPredictionOutput$LogUri": "

    A link to the file that contains logs of the CreateBatchPrediction operation.

    ", + "GetDataSourceOutput$LogUri": "

    A link to the file containining logs of either create DataSource operation.

    ", + "GetEvaluationOutput$LogUri": "

    A link to the file that contains logs of the CreateEvaluation operation.

    ", + "GetMLModelOutput$LogUri": "

    A link to the file that contains logs of the CreateMLModel operation.

    " + } + }, + "RDSDataSpec": { + "base": "

    The data specification of an Amazon Relational Database Service (Amazon RDS) DataSource.

    ", + "refs": { + "CreateDataSourceFromRDSInput$RDSData": "

    The data specification of an Amazon RDS DataSource:

    • DatabaseInformation -

      • DatabaseName - Name of the Amazon RDS database.
      • InstanceIdentifier - Unique identifier for the Amazon RDS database instance.

    • DatabaseCredentials - AWS Identity and Access Management (IAM) credentials that are used to connect to the Amazon RDS database.

    • ResourceRole - Role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic Compute Cloud (EC2) instance to carry out the copy task from Amazon RDS to Amazon S3. For more information, see Role templates for data pipelines.

    • ServiceRole - Role (DataPipelineDefaultRole) assumed by the AWS Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon Simple Storage Service (S3). For more information, see Role templates for data pipelines.

    • SecurityInfo - Security information to use to access an Amazon RDS instance. You need to set up appropriate ingress rules for the security entity IDs provided to allow access to the Amazon RDS instance. Specify a [SubnetId, SecurityGroupIds] pair for a VPC-based Amazon RDS instance.

    • SelectSqlQuery - Query that is used to retrieve the observation data for the Datasource.

    • S3StagingLocation - Amazon S3 location for staging RDS data. The data retrieved from Amazon RDS using SelectSqlQuery is stored in this location.

    • DataSchemaUri - Amazon S3 location of the DataSchema.

    • DataSchema - A JSON string representing the schema. This is not required if DataSchemaUri is specified.

    • DataRearrangement - A JSON string representing the splitting requirement of a Datasource.


      Sample - \"{\\\"splitting\\\":{\\\"percentBegin\\\":10,\\\"percentEnd\\\":60}}\"

    " + } + }, + "RDSDatabase": { + "base": "

    The database details of an Amazon RDS database.

    ", + "refs": { + "RDSDataSpec$DatabaseInformation": "

    Describes the DatabaseName and InstanceIdentifier of an an Amazon RDS database.

    ", + "RDSMetadata$Database": "

    The database details required to connect to an Amazon RDS.

    " + } + }, + "RDSDatabaseCredentials": { + "base": "

    The database credentials to connect to a database on an RDS DB instance.

    ", + "refs": { + "RDSDataSpec$DatabaseCredentials": "

    The AWS Identity and Access Management (IAM) credentials that are used connect to the Amazon RDS database.

    " + } + }, + "RDSDatabaseName": { + "base": "

    The name of a database hosted on an RDS DB instance.

    ", + "refs": { + "RDSDatabase$DatabaseName": null + } + }, + "RDSDatabasePassword": { + "base": "

    The password to be used by Amazon ML to connect to a database on an RDS DB instance. The password should have sufficient permissions to execute the RDSSelectQuery query.

    ", + "refs": { + "RDSDatabaseCredentials$Password": null + } + }, + "RDSDatabaseUsername": { + "base": "

    The username to be used by Amazon ML to connect to database on an Amazon RDS instance. The username should have sufficient permissions to execute an RDSSelectSqlQuery query.

    ", + "refs": { + "RDSDatabaseCredentials$Username": null, + "RDSMetadata$DatabaseUserName": null + } + }, + "RDSInstanceIdentifier": { + "base": "Identifier of RDS DB Instances.", + "refs": { + "RDSDatabase$InstanceIdentifier": "

    The ID of an RDS DB instance.

    " + } + }, + "RDSMetadata": { + "base": "

    The datasource details that are specific to Amazon RDS.

    ", + "refs": { + "DataSource$RDSMetadata": null, + "GetDataSourceOutput$RDSMetadata": null + } + }, + "RDSSelectSqlQuery": { + "base": "

    The SQL query to be executed against the Amazon RDS database. The SQL query should be valid for the Amazon RDS type being used.

    ", + "refs": { + "RDSDataSpec$SelectSqlQuery": "

    The query that is used to retrieve the observation data for the DataSource.

    ", + "RDSMetadata$SelectSqlQuery": "

    The SQL query that is supplied during CreateDataSourceFromRDS. Returns only if Verbose is true in GetDataSourceInput.

    " + } + }, + "RealtimeEndpointInfo": { + "base": "

    Describes the real-time endpoint information for an MLModel.

    ", + "refs": { + "CreateRealtimeEndpointOutput$RealtimeEndpointInfo": "

    The endpoint information of the MLModel

    ", + "DeleteRealtimeEndpointOutput$RealtimeEndpointInfo": "

    The endpoint information of the MLModel

    ", + "GetMLModelOutput$EndpointInfo": "

    The current endpoint of the MLModel

    ", + "MLModel$EndpointInfo": "

    The current endpoint of the MLModel.

    " + } + }, + "RealtimeEndpointStatus": { + "base": null, + "refs": { + "RealtimeEndpointInfo$EndpointStatus": "

    The current status of the real-time endpoint for the MLModel. This element can have one of the following values:

    • NONE - Endpoint does not exist or was previously deleted.
    • READY - Endpoint is ready to be used for real-time predictions.
    • UPDATING - Updating/creating the endpoint.
    " + } + }, + "Recipe": { + "base": null, + "refs": { + "CreateMLModelInput$Recipe": "

    The data recipe for creating MLModel. You must specify either the recipe or its URI. If you don’t specify a recipe or its URI, Amazon ML creates a default.

    ", + "GetMLModelOutput$Recipe": "

    The recipe to use when training the MLModel. The Recipe provides detailed information about the observation data to use during training, as well as manipulations to perform on the observation data during training.

    Note

    This parameter is provided as part of the verbose format.

    " + } + }, + "Record": { + "base": "

    A map of variable name-value pairs that represent an observation.

    ", + "refs": { + "PredictInput$Record": null + } + }, + "RedshiftClusterIdentifier": { + "base": "

    The ID of an Amazon Redshift cluster.

    ", + "refs": { + "RedshiftDatabase$ClusterIdentifier": null + } + }, + "RedshiftDataSpec": { + "base": "

    Describes the data specification of an Amazon Redshift DataSource.

    ", + "refs": { + "CreateDataSourceFromRedshiftInput$DataSpec": "

    The data specification of an Amazon Redshift DataSource:

    • DatabaseInformation -

      • DatabaseName - Name of the Amazon Redshift database.
      • ClusterIdentifier - Unique ID for the Amazon Redshift cluster.

    • DatabaseCredentials - AWS Identity abd Access Management (IAM) credentials that are used to connect to the Amazon Redshift database.

    • SelectSqlQuery - Query that is used to retrieve the observation data for the Datasource.

    • S3StagingLocation - Amazon Simple Storage Service (Amazon S3) location for staging Amazon Redshift data. The data retrieved from Amazon Relational Database Service (Amazon RDS) using SelectSqlQuery is stored in this location.

    • DataSchemaUri - Amazon S3 location of the DataSchema.

    • DataSchema - A JSON string representing the schema. This is not required if DataSchemaUri is specified.

    • DataRearrangement - A JSON string representing the splitting requirement of a Datasource.


      Sample - \"{\\\"splitting\\\":{\\\"percentBegin\\\":10,\\\"percentEnd\\\":60}}\"

    " + } + }, + "RedshiftDatabase": { + "base": "

    Describes the database details required to connect to an Amazon Redshift database.

    ", + "refs": { + "RedshiftDataSpec$DatabaseInformation": "

    Describes the DatabaseName and ClusterIdentifier for an Amazon Redshift DataSource.

    ", + "RedshiftMetadata$RedshiftDatabase": null + } + }, + "RedshiftDatabaseCredentials": { + "base": "

    Describes the database credentials for connecting to a database on an Amazon Redshift cluster.

    ", + "refs": { + "RedshiftDataSpec$DatabaseCredentials": "

    Describes AWS Identity and Access Management (IAM) credentials that are used connect to the Amazon Redshift database.

    " + } + }, + "RedshiftDatabaseName": { + "base": "

    The name of a database hosted on an Amazon Redshift cluster.

    ", + "refs": { + "RedshiftDatabase$DatabaseName": null + } + }, + "RedshiftDatabasePassword": { + "base": "

    A password to be used by Amazon ML to connect to a database on an Amazon Redshift cluster. The password should have sufficient permissions to execute a RedshiftSelectSqlQuery query. The password should be valid for an Amazon Redshift USER.

    ", + "refs": { + "RedshiftDatabaseCredentials$Password": null + } + }, + "RedshiftDatabaseUsername": { + "base": "

    A username to be used by Amazon Machine Learning (Amazon ML)to connect to a database on an Amazon Redshift cluster. The username should have sufficient permissions to execute the RedshiftSelectSqlQuery query. The username should be valid for an Amazon Redshift USER.

    ", + "refs": { + "RedshiftDatabaseCredentials$Username": null, + "RedshiftMetadata$DatabaseUserName": null + } + }, + "RedshiftMetadata": { + "base": "

    Describes the DataSource details specific to Amazon Redshift.

    ", + "refs": { + "DataSource$RedshiftMetadata": null, + "GetDataSourceOutput$RedshiftMetadata": null + } + }, + "RedshiftSelectSqlQuery": { + "base": "

    Describes the SQL query to execute on the Amazon Redshift database. The SQL query should be valid for an Amazon Redshift SELECT.

    ", + "refs": { + "RedshiftDataSpec$SelectSqlQuery": "

    Describes the SQL Query to execute on an Amazon Redshift database for an Amazon Redshift DataSource.

    ", + "RedshiftMetadata$SelectSqlQuery": "

    The SQL query that is specified during CreateDataSourceFromRedshift. Returns only if Verbose is true in GetDataSourceInput.

    " + } + }, + "ResourceNotFoundException": { + "base": "

    A specified resource cannot be located.

    ", + "refs": { + } + }, + "RoleARN": { + "base": "

    The Amazon Resource Name (ARN) of an AWS IAM Role such as the following: arn:aws:iam::account:role/rolename.

    ", + "refs": { + "CreateDataSourceFromRDSInput$RoleARN": "

    The role that Amazon ML assumes on behalf of the user to create and activate a data pipeline in the user’s account and copy data (using the SelectSqlQuery) query from Amazon RDS to Amazon S3.

    ", + "CreateDataSourceFromRedshiftInput$RoleARN": "

    A fully specified role Amazon Resource Name (ARN). Amazon ML assumes the role on behalf of the user to create the following:

    • A security group to allow Amazon ML to execute the SelectSqlQuery query on an Amazon Redshift cluster

    • An Amazon S3 bucket policy to grant Amazon ML read/write permissions on the S3StagingLocation

    ", + "DataSource$RoleARN": null, + "GetDataSourceOutput$RoleARN": null + } + }, + "S3DataSpec": { + "base": "

    Describes the data specification of a DataSource.

    ", + "refs": { + "CreateDataSourceFromS3Input$DataSpec": "

    The data specification of a DataSource:

    • DataLocationS3 - Amazon Simple Storage Service (Amazon S3) location of the observation data.

    • DataSchemaLocationS3 - Amazon S3 location of the DataSchema.

    • DataSchema - A JSON string representing the schema. This is not required if DataSchemaUri is specified.

    • DataRearrangement - A JSON string representing the splitting requirement of a Datasource.


      Sample - \"{\\\"splitting\\\":{\\\"percentBegin\\\":10,\\\"percentEnd\\\":60}}\"

    " + } + }, + "S3Url": { + "base": "

    A reference to a file or bucket on Amazon Simple Storage Service (Amazon S3).

    ", + "refs": { + "BatchPrediction$InputDataLocationS3": "

    The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).

    ", + "BatchPrediction$OutputUri": "

    The location of an Amazon S3 bucket or directory to receive the operation results. The following substrings are not allowed in the s3 key portion of the \"outputURI\" field: ':', '//', '/./', '/../'.

    ", + "CreateBatchPredictionInput$OutputUri": "

    The location of an Amazon Simple Storage Service (Amazon S3) bucket or directory to store the batch prediction results. The following substrings are not allowed in the s3 key portion of the \"outputURI\" field: ':', '//', '/./', '/../'.

    Amazon ML needs permissions to store and retrieve the logs on your behalf. For information about how to set permissions, see the Amazon Machine Learning Developer Guide.

    ", + "CreateMLModelInput$RecipeUri": "

    The Amazon Simple Storage Service (Amazon S3) location and file name that contains the MLModel recipe. You must specify either the recipe or its URI. If you don’t specify a recipe or its URI, Amazon ML creates a default.

    ", + "DataSource$DataLocationS3": "

    The location and name of the data in Amazon Simple Storage Service (Amazon S3) that is used by a DataSource.

    ", + "Evaluation$InputDataLocationS3": "

    The location and name of the data in Amazon Simple Storage Server (Amazon S3) that is used in the evaluation.

    ", + "GetBatchPredictionOutput$InputDataLocationS3": "

    The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).

    ", + "GetBatchPredictionOutput$OutputUri": "

    The location of an Amazon S3 bucket or directory to receive the operation results.

    ", + "GetDataSourceOutput$DataLocationS3": "

    The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).

    ", + "GetEvaluationOutput$InputDataLocationS3": "

    The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).

    ", + "GetMLModelOutput$InputDataLocationS3": "

    The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).

    ", + "MLModel$InputDataLocationS3": "

    The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).

    ", + "RDSDataSpec$S3StagingLocation": "

    The Amazon S3 location for staging Amazon RDS data. The data retrieved from Amazon RDS using SelectSqlQuery is stored in this location.

    ", + "RDSDataSpec$DataSchemaUri": "

    The Amazon S3 location of the DataSchema.

    ", + "RedshiftDataSpec$S3StagingLocation": "

    Describes an Amazon S3 location to store the result set of the SelectSqlQuery query.

    ", + "RedshiftDataSpec$DataSchemaUri": "

    Describes the schema location for an Amazon Redshift DataSource.

    ", + "S3DataSpec$DataLocationS3": "

    The location of the data file(s) used by a DataSource. The URI specifies a data file or an Amazon Simple Storage Service (Amazon S3) directory or bucket containing data files.

    ", + "S3DataSpec$DataSchemaLocationS3": "

    Describes the schema Location in Amazon S3.

    " + } + }, + "ScoreThreshold": { + "base": null, + "refs": { + "GetMLModelOutput$ScoreThreshold": "

    The scoring threshold is used in binary classification MLModels, and marks the boundary between a positive prediction and a negative prediction.

    Output values greater than or equal to the threshold receive a positive result from the MLModel, such as true. Output values less than the threshold receive a negative response from the MLModel, such as false.

    ", + "MLModel$ScoreThreshold": null, + "UpdateMLModelInput$ScoreThreshold": "

    The ScoreThreshold used in binary classification MLModel that marks the boundary between a positive prediction and a negative prediction.

    Output values greater than or equal to the ScoreThreshold receive a positive result from the MLModel, such as true. Output values less than the ScoreThreshold receive a negative response from the MLModel, such as false.

    " + } + }, + "ScoreValue": { + "base": null, + "refs": { + "ScoreValuePerLabelMap$value": null + } + }, + "ScoreValuePerLabelMap": { + "base": "Provides the raw classification score corresponding to each label.", + "refs": { + "Prediction$predictedScores": null + } + }, + "SortOrder": { + "base": "

    The sort order specified in a listing condition. Possible values include the following:

    • asc - Present the information in ascending order (from A-Z).
    • dsc - Present the information in descending order (from Z-A).
    ", + "refs": { + "DescribeBatchPredictionsInput$SortOrder": "

    A two-value parameter that determines the sequence of the resulting list of MLModels.

    • asc - Arranges the list in ascending order (A-Z, 0-9).
    • dsc - Arranges the list in descending order (Z-A, 9-0).

    Results are sorted by FilterVariable.

    ", + "DescribeDataSourcesInput$SortOrder": "

    A two-value parameter that determines the sequence of the resulting list of DataSource.

    • asc - Arranges the list in ascending order (A-Z, 0-9).
    • dsc - Arranges the list in descending order (Z-A, 9-0).

    Results are sorted by FilterVariable.

    ", + "DescribeEvaluationsInput$SortOrder": "

    A two-value parameter that determines the sequence of the resulting list of Evaluation.

    • asc - Arranges the list in ascending order (A-Z, 0-9).
    • dsc - Arranges the list in descending order (Z-A, 9-0).

    Results are sorted by FilterVariable.

    ", + "DescribeMLModelsInput$SortOrder": "

    A two-value parameter that determines the sequence of the resulting list of MLModel.

    • asc - Arranges the list in ascending order (A-Z, 0-9).
    • dsc - Arranges the list in descending order (Z-A, 9-0).

    Results are sorted by FilterVariable.

    " + } + }, + "StringType": { + "base": "

    String type.

    ", + "refs": { + "DescribeBatchPredictionsInput$NextToken": "

    An ID of the page in the paginated results.

    ", + "DescribeBatchPredictionsOutput$NextToken": "

    The ID of the next page in the paginated results that indicates at least one more page follows.

    ", + "DescribeDataSourcesInput$NextToken": "

    The ID of the page in the paginated results.

    ", + "DescribeDataSourcesOutput$NextToken": "

    An ID of the next page in the paginated results that indicates at least one more page follows.

    ", + "DescribeEvaluationsInput$NextToken": "

    The ID of the page in the paginated results.

    ", + "DescribeEvaluationsOutput$NextToken": "

    The ID of the next page in the paginated results that indicates at least one more page follows.

    ", + "DescribeMLModelsInput$NextToken": "

    The ID of the page in the paginated results.

    ", + "DescribeMLModelsOutput$NextToken": "

    The ID of the next page in the paginated results that indicates at least one more page follows.

    ", + "TrainingParameters$key": null, + "TrainingParameters$value": null + } + }, + "TrainingParameters": { + "base": null, + "refs": { + "CreateMLModelInput$Parameters": "

    A list of the training parameters in the MLModel. The list is implemented as a map of key/value pairs.

    The following is the current set of training parameters:

    • sgd.l1RegularizationAmount - Coefficient regularization L1 norm. It controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to zero, resulting in sparse feature set. If you use this parameter, start by specifying a small value such as 1.0E-08.

      The value is a double that ranges from 0 to MAX_DOUBLE. The default is not to use L1 normalization. The parameter cannot be used when L2 is specified. Use this parameter sparingly.

    • sgd.l2RegularizationAmount - Coefficient regularization L2 norm. It controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to small, nonzero values. If you use this parameter, start by specifying a small value such as 1.0E-08.

      The valuseis a double that ranges from 0 to MAX_DOUBLE. The default is not to use L2 normalization. This cannot be used when L1 is specified. Use this parameter sparingly.

    • sgd.maxPasses - Number of times that the training process traverses the observations to build the MLModel. The value is an integer that ranges from 1 to 10000. The default value is 10.

    • sgd.maxMLModelSizeInBytes - Maximum allowed size of the model. Depending on the input data, the size of the model might affect its performance.

      The value is an integer that ranges from 100000 to 2147483648. The default value is 33554432.

    ", + "GetMLModelOutput$TrainingParameters": "

    A list of the training parameters in the MLModel. The list is implemented as a map of key/value pairs.

    The following is the current set of training parameters:

    • sgd.l1RegularizationAmount - Coefficient regularization L1 norm. It controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to zero, resulting in a sparse feature set. If you use this parameter, specify a small value, such as 1.0E-04 or 1.0E-08.

      The value is a double that ranges from 0 to MAX_DOUBLE. The default is not to use L1 normalization. The parameter cannot be used when L2 is specified. Use this parameter sparingly.

    • sgd.l2RegularizationAmount - Coefficient regularization L2 norm. It controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to small, nonzero values. If you use this parameter, specify a small value, such as 1.0E-04 or 1.0E-08.

      The value is a double that ranges from 0 to MAX_DOUBLE. The default is not to use L2 normalization. This parameter cannot be used when L1 is specified. Use this parameter sparingly.

    • sgd.maxPasses - The number of times that the training process traverses the observations to build the MLModel. The value is an integer that ranges from 1 to 10000. The default value is 10.

    • sgd.maxMLModelSizeInBytes - The maximum allowed size of the model. Depending on the input data, the model size might affect performance.

      The value is an integer that ranges from 100000 to 2147483648. The default value is 33554432.

    ", + "MLModel$TrainingParameters": "

    A list of the training parameters in the MLModel. The list is implemented as a map of key/value pairs.

    The following is the current set of training parameters:

    • sgd.l1RegularizationAmount - Coefficient regularization L1 norm. It controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to zero, resulting in a sparse feature set. If you use this parameter, specify a small value, such as 1.0E-04 or 1.0E-08.

      The value is a double that ranges from 0 to MAX_DOUBLE. The default is not to use L1 normalization. The parameter cannot be used when L2 is specified. Use this parameter sparingly.

    • sgd.l2RegularizationAmount - Coefficient regularization L2 norm. It controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to small, nonzero values. If you use this parameter, specify a small value, such as 1.0E-04 or 1.0E-08.

      The valus is a double that ranges from 0 to MAX_DOUBLE. The default is not to use L2 normalization. This cannot be used when L1 is specified. Use this parameter sparingly.

    • sgd.maxPasses - Number of times that the training process traverses the observations to build the MLModel. The value is an integer that ranges from 1 to 10000. The default value is 10.

    • sgd.maxMLModelSizeInBytes - Maximum allowed size of the model. Depending on the input data, the model size might affect performance.

      The value is an integer that ranges from 100000 to 2147483648. The default value is 33554432.

    " + } + }, + "UpdateBatchPredictionInput": { + "base": null, + "refs": { + } + }, + "UpdateBatchPredictionOutput": { + "base": "

    Represents the output of an UpdateBatchPrediction operation.

    You can see the updated content by using the GetBatchPrediction operation.

    ", + "refs": { + } + }, + "UpdateDataSourceInput": { + "base": null, + "refs": { + } + }, + "UpdateDataSourceOutput": { + "base": "

    Represents the output of an UpdateDataSource operation.

    You can see the updated content by using the GetBatchPrediction operation.

    ", + "refs": { + } + }, + "UpdateEvaluationInput": { + "base": null, + "refs": { + } + }, + "UpdateEvaluationOutput": { + "base": "

    Represents the output of an UpdateEvaluation operation.

    You can see the updated content by using the GetEvaluation operation.

    ", + "refs": { + } + }, + "UpdateMLModelInput": { + "base": null, + "refs": { + } + }, + "UpdateMLModelOutput": { + "base": "

    Represents the output of an UpdateMLModel operation.

    You can see the updated content by using the GetMLModel operation.

    ", + "refs": { + } + }, + "VariableName": { + "base": "

    The name of a variable. Currently it's used to specify the name of the target value, label, weight, and tags.

    ", + "refs": { + "Record$key": null + } + }, + "VariableValue": { + "base": "

    The value of a variable. Currently it's used to specify values of the target value, weights, and tag variables and for filtering variable values.

    ", + "refs": { + "Record$value": null + } + }, + "Verbose": { + "base": "

    Specifies whether a describe operation should return exhaustive or abbreviated information.

    ", + "refs": { + "GetDataSourceInput$Verbose": "

    Specifies whether the GetDataSource operation should return DataSourceSchema.

    If true, DataSourceSchema is returned.

    If false, DataSourceSchema is not returned.

    ", + "GetMLModelInput$Verbose": "

    Specifies whether the GetMLModel operation should return Recipe.

    If true, Recipe is returned.

    If false, Recipe is not returned.

    " + } + }, + "VipURL": { + "base": null, + "refs": { + "PredictInput$PredictEndpoint": null, + "RealtimeEndpointInfo$EndpointUrl": "

    The URI that specifies where to send real-time prediction requests for the MLModel.

    Note

    The application must wait until the real-time endpoint is ready before using this URI.

    " + } + }, + "floatLabel": { + "base": null, + "refs": { + "Prediction$predictedValue": "The prediction value for REGRESSION MLModel." + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/machinelearning/2014-12-12/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/machinelearning/2014-12-12/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/machinelearning/2014-12-12/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/machinelearning/2014-12-12/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,28 @@ +{ + "pagination": { + "DescribeBatchPredictions": { + "limit_key": "Limit", + "output_token": "NextToken", + "input_token": "NextToken", + "result_key": "Results" + }, + "DescribeDataSources": { + "limit_key": "Limit", + "output_token": "NextToken", + "input_token": "NextToken", + "result_key": "Results" + }, + "DescribeEvaluations": { + "limit_key": "Limit", + "output_token": "NextToken", + "input_token": "NextToken", + "result_key": "Results" + }, + "DescribeMLModels": { + "limit_key": "Limit", + "output_token": "NextToken", + "input_token": "NextToken", + "result_key": "Results" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/marketplacecommerceanalytics/2015-07-01/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/marketplacecommerceanalytics/2015-07-01/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/marketplacecommerceanalytics/2015-07-01/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/marketplacecommerceanalytics/2015-07-01/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,104 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-07-01", + "endpointPrefix":"marketplacecommerceanalytics", + "jsonVersion":"1.1", + "serviceFullName":"AWS Marketplace Commerce Analytics", + "signatureVersion":"v4", + "signingName":"marketplacecommerceanalytics", + "targetPrefix":"MarketplaceCommerceAnalytics20150701", + "protocol":"json" + }, + "operations":{ + "GenerateDataSet":{ + "name":"GenerateDataSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GenerateDataSetRequest"}, + "output":{"shape":"GenerateDataSetResult"}, + "errors":[ + { + "shape":"MarketplaceCommerceAnalyticsException", + "exception":true, + "fault":true + } + ] + } + }, + "shapes":{ + "DataSetPublicationDate":{"type":"timestamp"}, + "DataSetRequestId":{"type":"string"}, + "DataSetType":{ + "type":"string", + "enum":[ + "customer_subscriber_hourly_monthly_subscriptions", + "customer_subscriber_annual_subscriptions", + "daily_business_usage_by_instance_type", + "daily_business_fees", + "daily_business_free_trial_conversions", + "daily_business_new_instances", + "daily_business_new_product_subscribers", + "daily_business_canceled_product_subscribers", + "monthly_revenue_billing_and_revenue_data", + "monthly_revenue_annual_subscriptions", + "disbursed_amount_by_product", + "disbursed_amount_by_customer_geo", + "disbursed_amount_by_age_of_uncollected_funds", + "disbursed_amount_by_age_of_disbursed_funds", + "customer_profile_by_industry", + "customer_profile_by_revenue" + ], + "min":1, + "max":255 + }, + "DestinationS3BucketName":{ + "type":"string", + "min":1 + }, + "DestinationS3Prefix":{"type":"string"}, + "ExceptionMessage":{"type":"string"}, + "GenerateDataSetRequest":{ + "type":"structure", + "required":[ + "dataSetType", + "dataSetPublicationDate", + "roleNameArn", + "destinationS3BucketName", + "snsTopicArn" + ], + "members":{ + "dataSetType":{"shape":"DataSetType"}, + "dataSetPublicationDate":{"shape":"DataSetPublicationDate"}, + "roleNameArn":{"shape":"RoleNameArn"}, + "destinationS3BucketName":{"shape":"DestinationS3BucketName"}, + "destinationS3Prefix":{"shape":"DestinationS3Prefix"}, + "snsTopicArn":{"shape":"SnsTopicArn"} + } + }, + "GenerateDataSetResult":{ + "type":"structure", + "members":{ + "dataSetRequestId":{"shape":"DataSetRequestId"} + } + }, + "MarketplaceCommerceAnalyticsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true, + "fault":true + }, + "RoleNameArn":{ + "type":"string", + "min":1 + }, + "SnsTopicArn":{ + "type":"string", + "min":1 + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/marketplacecommerceanalytics/2015-07-01/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/marketplacecommerceanalytics/2015-07-01/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/marketplacecommerceanalytics/2015-07-01/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/marketplacecommerceanalytics/2015-07-01/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,72 @@ +{ + "version": "2.0", + "operations": { + "GenerateDataSet": "Given a data set type and data set publication date, asynchronously publishes the requested data set to the specified S3 bucket and notifies the specified SNS topic once the data is available. Returns a unique request identifier that can be used to correlate requests with notifications from the SNS topic. Data sets will be published in comma-separated values (CSV) format with the file name {data_set_type}_YYYY-MM-DD.csv. If a file with the same name already exists (e.g. if the same data set is requested twice), the original file will be overwritten by the new file. Requires a Role with an attached permissions policy providing Allow permissions for the following actions: s3:PutObject, s3:getBucketLocation, sns:SetRegion, sns:ListTopics, sns:Publish, iam:GetRolePolicy." + }, + "service": "Provides AWS Marketplace business intelligence data on-demand.", + "shapes": { + "DataSetPublicationDate": { + "base": "The date a data set was published. For daily data sets, provide a date with day-level granularity for the desired day. For weekly data sets, provide a date with day-level granularity within the desired week (the day value will be ignored). For monthly data sets, provide a date with month-level granularity for the desired month (the day value will be ignored).", + "refs": { + "GenerateDataSetRequest$dataSetPublicationDate": null + } + }, + "DataSetRequestId": { + "base": "A unique identifier representing a specific request to the GenerateDataSet operation. This identifier can be used to correlate a request with notifications from the SNS topic.", + "refs": { + "GenerateDataSetResult$dataSetRequestId": null + } + }, + "DataSetType": { + "base": "The type of the data set to publish.", + "refs": { + "GenerateDataSetRequest$dataSetType": null + } + }, + "DestinationS3BucketName": { + "base": "The name (friendly name, not ARN) of the destination S3 bucket.", + "refs": { + "GenerateDataSetRequest$destinationS3BucketName": null + } + }, + "DestinationS3Prefix": { + "base": "(Optional) The desired S3 prefix for the published data set, similar to a directory path in standard file systems. For example, if given the bucket name \"mybucket\" and the prefix \"myprefix/mydatasets\", the output file \"outputfile\" would be published to \"s3://mybucket/myprefix/mydatasets/outputfile\". If the prefix directory structure does not exist, it will be created. If no prefix is provided, the data set will be published to the S3 bucket root.", + "refs": { + "GenerateDataSetRequest$destinationS3Prefix": null + } + }, + "ExceptionMessage": { + "base": null, + "refs": { + "MarketplaceCommerceAnalyticsException$message": null + } + }, + "GenerateDataSetRequest": { + "base": "Container for the parameters to the GenerateDataSet operation.", + "refs": { + } + }, + "GenerateDataSetResult": { + "base": "Container for the result of the GenerateDataSet operation.", + "refs": { + } + }, + "MarketplaceCommerceAnalyticsException": { + "base": "This exception is thrown when an internal service error occurs.", + "refs": { + } + }, + "RoleNameArn": { + "base": "The Amazon Resource Name (ARN) of the Role with an attached permissions policy to interact with the provided AWS services.", + "refs": { + "GenerateDataSetRequest$roleNameArn": null + } + }, + "SnsTopicArn": { + "base": "Amazon Resource Name (ARN) for the SNS Topic that will be notified when the data set has been published or if an error has occurred.", + "refs": { + "GenerateDataSetRequest$snsTopicArn": null + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/mobileanalytics/2014-06-05/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/mobileanalytics/2014-06-05/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/mobileanalytics/2014-06-05/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/mobileanalytics/2014-06-05/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,119 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-06-05", + "endpointPrefix":"mobileanalytics", + "serviceFullName":"Amazon Mobile Analytics", + "signatureVersion":"v4", + "protocol":"rest-json" + }, + "operations":{ + "PutEvents":{ + "name":"PutEvents", + "http":{ + "method":"POST", + "requestUri":"/2014-06-05/events", + "responseCode":202 + }, + "input":{"shape":"PutEventsInput"}, + "errors":[ + { + "shape":"BadRequestException", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + } + }, + "shapes":{ + "BadRequestException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "Double":{"type":"double"}, + "Event":{ + "type":"structure", + "required":[ + "eventType", + "timestamp" + ], + "members":{ + "eventType":{"shape":"String50Chars"}, + "timestamp":{"shape":"ISO8601Timestamp"}, + "session":{"shape":"Session"}, + "version":{"shape":"String10Chars"}, + "attributes":{"shape":"MapOfStringToString"}, + "metrics":{"shape":"MapOfStringToNumber"} + } + }, + "EventListDefinition":{ + "type":"list", + "member":{"shape":"Event"} + }, + "ISO8601Timestamp":{"type":"string"}, + "Long":{"type":"long"}, + "MapOfStringToNumber":{ + "type":"map", + "key":{"shape":"String50Chars"}, + "value":{"shape":"Double"}, + "min":0, + "max":50 + }, + "MapOfStringToString":{ + "type":"map", + "key":{"shape":"String50Chars"}, + "value":{"shape":"String0to1000Chars"}, + "min":0, + "max":50 + }, + "PutEventsInput":{ + "type":"structure", + "required":[ + "events", + "clientContext" + ], + "members":{ + "events":{"shape":"EventListDefinition"}, + "clientContext":{ + "shape":"String", + "location":"header", + "locationName":"x-amz-Client-Context" + }, + "clientContextEncoding":{ + "shape":"String", + "location":"header", + "locationName":"x-amz-Client-Context-Encoding" + } + } + }, + "Session":{ + "type":"structure", + "members":{ + "id":{"shape":"String50Chars"}, + "duration":{"shape":"Long"}, + "startTimestamp":{"shape":"ISO8601Timestamp"}, + "stopTimestamp":{"shape":"ISO8601Timestamp"} + } + }, + "String":{"type":"string"}, + "String0to1000Chars":{ + "type":"string", + "min":0, + "max":1000 + }, + "String10Chars":{ + "type":"string", + "min":1, + "max":10 + }, + "String50Chars":{ + "type":"string", + "min":1, + "max":50 + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/mobileanalytics/2014-06-05/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/mobileanalytics/2014-06-05/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/mobileanalytics/2014-06-05/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/mobileanalytics/2014-06-05/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,98 @@ +{ + "version": "2.0", + "operations": { + "PutEvents": "

    The PutEvents operation records one or more events. You can have up to 1,500 unique custom events per app, any combination of up to 40 attributes and metrics per custom event, and any number of attribute or metric values.

    " + }, + "service": "

    Amazon Mobile Analytics is a service for collecting, visualizing, and understanding app usage data at scale.

    ", + "shapes": { + "BadRequestException": { + "base": "

    An exception object returned when a request fails.

    ", + "refs": { + } + }, + "Double": { + "base": null, + "refs": { + "MapOfStringToNumber$value": null + } + }, + "Event": { + "base": "

    A JSON object representing a batch of unique event occurrences in your app.

    ", + "refs": { + "EventListDefinition$member": null + } + }, + "EventListDefinition": { + "base": null, + "refs": { + "PutEventsInput$events": "

    An array of Event JSON objects

    " + } + }, + "ISO8601Timestamp": { + "base": null, + "refs": { + "Event$timestamp": "

    The time the event occurred in ISO 8601 standard date time format. For example, 2014-06-30T19:07:47.885Z

    ", + "Session$startTimestamp": "

    The time the event started in ISO 8601 standard date time format. For example, 2014-06-30T19:07:47.885Z

    ", + "Session$stopTimestamp": "

    The time the event terminated in ISO 8601 standard date time format. For example, 2014-06-30T19:07:47.885Z

    " + } + }, + "Long": { + "base": null, + "refs": { + "Session$duration": "

    The duration of the session.

    " + } + }, + "MapOfStringToNumber": { + "base": null, + "refs": { + "Event$metrics": "

    A collection of key-value pairs that gives additional, measurable context to the event. The key-value pairs are specified by the developer.

    This collection can be empty or the attribute object can be omitted.

    " + } + }, + "MapOfStringToString": { + "base": null, + "refs": { + "Event$attributes": "

    A collection of key-value pairs that give additional context to the event. The key-value pairs are specified by the developer.

    This collection can be empty or the attribute object can be omitted.

    " + } + }, + "PutEventsInput": { + "base": "

    A container for the data needed for a PutEvent operation

    ", + "refs": { + } + }, + "Session": { + "base": "

    Describes the session. Session information is required on ALL events.

    ", + "refs": { + "Event$session": "

    The session the event occured within.

    " + } + }, + "String": { + "base": null, + "refs": { + "BadRequestException$message": "

    A text description associated with the BadRequestException object.

    ", + "PutEventsInput$clientContext": "

    The client context including the client ID, app title, app version and package name.

    ", + "PutEventsInput$clientContextEncoding": "

    The encoding used for the client context.

    " + } + }, + "String0to1000Chars": { + "base": null, + "refs": { + "MapOfStringToString$value": null + } + }, + "String10Chars": { + "base": null, + "refs": { + "Event$version": "

    The version of the event.

    " + } + }, + "String50Chars": { + "base": null, + "refs": { + "Event$eventType": "

    A name signifying an event that occurred in your app. This is used for grouping and aggregating like events together for reporting purposes.

    ", + "MapOfStringToNumber$key": null, + "MapOfStringToString$key": null, + "Session$id": "

    A unique identifier for the session

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,917 @@ +{ + "metadata":{ + "apiVersion":"2010-08-01", + "endpointPrefix":"monitoring", + "serviceAbbreviation":"CloudWatch", + "serviceFullName":"Amazon CloudWatch", + "signatureVersion":"v4", + "xmlNamespace":"http://monitoring.amazonaws.com/doc/2010-08-01/", + "protocol":"query" + }, + "operations":{ + "DeleteAlarms":{ + "name":"DeleteAlarms", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAlarmsInput"}, + "errors":[ + { + "shape":"ResourceNotFound", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeAlarmHistory":{ + "name":"DescribeAlarmHistory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAlarmHistoryInput"}, + "output":{ + "shape":"DescribeAlarmHistoryOutput", + "resultWrapper":"DescribeAlarmHistoryResult" + }, + "errors":[ + { + "shape":"InvalidNextToken", + "error":{ + "code":"InvalidNextToken", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeAlarms":{ + "name":"DescribeAlarms", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAlarmsInput"}, + "output":{ + "shape":"DescribeAlarmsOutput", + "resultWrapper":"DescribeAlarmsResult" + }, + "errors":[ + { + "shape":"InvalidNextToken", + "error":{ + "code":"InvalidNextToken", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeAlarmsForMetric":{ + "name":"DescribeAlarmsForMetric", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAlarmsForMetricInput"}, + "output":{ + "shape":"DescribeAlarmsForMetricOutput", + "resultWrapper":"DescribeAlarmsForMetricResult" + } + }, + "DisableAlarmActions":{ + "name":"DisableAlarmActions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableAlarmActionsInput"} + }, + "EnableAlarmActions":{ + "name":"EnableAlarmActions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableAlarmActionsInput"} + }, + "GetMetricStatistics":{ + "name":"GetMetricStatistics", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetMetricStatisticsInput"}, + "output":{ + "shape":"GetMetricStatisticsOutput", + "resultWrapper":"GetMetricStatisticsResult" + }, + "errors":[ + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"MissingRequiredParameterException", + "error":{ + "code":"MissingParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalServiceFault", + "error":{ + "code":"InternalServiceError", + "httpStatusCode":500 + }, + "exception":true, + "xmlOrder":["Message"] + } + ] + }, + "ListMetrics":{ + "name":"ListMetrics", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListMetricsInput"}, + "output":{ + "shape":"ListMetricsOutput", + "xmlOrder":[ + "Metrics", + "NextToken" + ], + "resultWrapper":"ListMetricsResult" + }, + "errors":[ + { + "shape":"InternalServiceFault", + "error":{ + "code":"InternalServiceError", + "httpStatusCode":500 + }, + "exception":true, + "xmlOrder":["Message"] + }, + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "PutMetricAlarm":{ + "name":"PutMetricAlarm", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutMetricAlarmInput"}, + "errors":[ + { + "shape":"LimitExceededFault", + "error":{ + "code":"LimitExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "PutMetricData":{ + "name":"PutMetricData", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutMetricDataInput"}, + "errors":[ + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"MissingRequiredParameterException", + "error":{ + "code":"MissingParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterCombinationException", + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalServiceFault", + "error":{ + "code":"InternalServiceError", + "httpStatusCode":500 + }, + "exception":true, + "xmlOrder":["Message"] + } + ] + }, + "SetAlarmState":{ + "name":"SetAlarmState", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetAlarmStateInput"}, + "errors":[ + { + "shape":"ResourceNotFound", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidFormatFault", + "error":{ + "code":"InvalidFormat", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + } + }, + "shapes":{ + "ActionPrefix":{ + "type":"string", + "min":1, + "max":1024 + }, + "ActionsEnabled":{"type":"boolean"}, + "AlarmArn":{ + "type":"string", + "min":1, + "max":1600 + }, + "AlarmDescription":{ + "type":"string", + "min":0, + "max":255 + }, + "AlarmHistoryItem":{ + "type":"structure", + "members":{ + "AlarmName":{"shape":"AlarmName"}, + "Timestamp":{"shape":"Timestamp"}, + "HistoryItemType":{"shape":"HistoryItemType"}, + "HistorySummary":{"shape":"HistorySummary"}, + "HistoryData":{"shape":"HistoryData"} + } + }, + "AlarmHistoryItems":{ + "type":"list", + "member":{"shape":"AlarmHistoryItem"} + }, + "AlarmName":{ + "type":"string", + "min":1, + "max":255 + }, + "AlarmNamePrefix":{ + "type":"string", + "min":1, + "max":255 + }, + "AlarmNames":{ + "type":"list", + "member":{"shape":"AlarmName"}, + "max":100 + }, + "AwsQueryErrorMessage":{"type":"string"}, + "ComparisonOperator":{ + "type":"string", + "enum":[ + "GreaterThanOrEqualToThreshold", + "GreaterThanThreshold", + "LessThanThreshold", + "LessThanOrEqualToThreshold" + ] + }, + "Datapoint":{ + "type":"structure", + "members":{ + "Timestamp":{"shape":"Timestamp"}, + "SampleCount":{"shape":"DatapointValue"}, + "Average":{"shape":"DatapointValue"}, + "Sum":{"shape":"DatapointValue"}, + "Minimum":{"shape":"DatapointValue"}, + "Maximum":{"shape":"DatapointValue"}, + "Unit":{"shape":"StandardUnit"} + }, + "xmlOrder":[ + "Timestamp", + "SampleCount", + "Average", + "Sum", + "Minimum", + "Maximum", + "Unit" + ] + }, + "DatapointValue":{"type":"double"}, + "Datapoints":{ + "type":"list", + "member":{"shape":"Datapoint"} + }, + "DeleteAlarmsInput":{ + "type":"structure", + "required":["AlarmNames"], + "members":{ + "AlarmNames":{"shape":"AlarmNames"} + } + }, + "DescribeAlarmHistoryInput":{ + "type":"structure", + "members":{ + "AlarmName":{"shape":"AlarmName"}, + "HistoryItemType":{"shape":"HistoryItemType"}, + "StartDate":{"shape":"Timestamp"}, + "EndDate":{"shape":"Timestamp"}, + "MaxRecords":{"shape":"MaxRecords"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeAlarmHistoryOutput":{ + "type":"structure", + "members":{ + "AlarmHistoryItems":{"shape":"AlarmHistoryItems"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeAlarmsForMetricInput":{ + "type":"structure", + "required":[ + "MetricName", + "Namespace" + ], + "members":{ + "MetricName":{"shape":"MetricName"}, + "Namespace":{"shape":"Namespace"}, + "Statistic":{"shape":"Statistic"}, + "Dimensions":{"shape":"Dimensions"}, + "Period":{"shape":"Period"}, + "Unit":{"shape":"StandardUnit"} + } + }, + "DescribeAlarmsForMetricOutput":{ + "type":"structure", + "members":{ + "MetricAlarms":{"shape":"MetricAlarms"} + } + }, + "DescribeAlarmsInput":{ + "type":"structure", + "members":{ + "AlarmNames":{"shape":"AlarmNames"}, + "AlarmNamePrefix":{"shape":"AlarmNamePrefix"}, + "StateValue":{"shape":"StateValue"}, + "ActionPrefix":{"shape":"ActionPrefix"}, + "MaxRecords":{"shape":"MaxRecords"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeAlarmsOutput":{ + "type":"structure", + "members":{ + "MetricAlarms":{"shape":"MetricAlarms"}, + "NextToken":{"shape":"NextToken"} + } + }, + "Dimension":{ + "type":"structure", + "required":[ + "Name", + "Value" + ], + "members":{ + "Name":{"shape":"DimensionName"}, + "Value":{"shape":"DimensionValue"} + }, + "xmlOrder":[ + "Name", + "Value" + ] + }, + "DimensionFilter":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"DimensionName"}, + "Value":{"shape":"DimensionValue"} + } + }, + "DimensionFilters":{ + "type":"list", + "member":{"shape":"DimensionFilter"}, + "max":10 + }, + "DimensionName":{ + "type":"string", + "min":1, + "max":255 + }, + "DimensionValue":{ + "type":"string", + "min":1, + "max":255 + }, + "Dimensions":{ + "type":"list", + "member":{"shape":"Dimension"}, + "max":10 + }, + "DisableAlarmActionsInput":{ + "type":"structure", + "required":["AlarmNames"], + "members":{ + "AlarmNames":{"shape":"AlarmNames"} + } + }, + "EnableAlarmActionsInput":{ + "type":"structure", + "required":["AlarmNames"], + "members":{ + "AlarmNames":{"shape":"AlarmNames"} + } + }, + "ErrorMessage":{ + "type":"string", + "min":1, + "max":255 + }, + "EvaluationPeriods":{ + "type":"integer", + "min":1 + }, + "FaultDescription":{"type":"string"}, + "GetMetricStatisticsInput":{ + "type":"structure", + "required":[ + "Namespace", + "MetricName", + "StartTime", + "EndTime", + "Period", + "Statistics" + ], + "members":{ + "Namespace":{"shape":"Namespace"}, + "MetricName":{"shape":"MetricName"}, + "Dimensions":{"shape":"Dimensions"}, + "StartTime":{"shape":"Timestamp"}, + "EndTime":{"shape":"Timestamp"}, + "Period":{"shape":"Period"}, + "Statistics":{"shape":"Statistics"}, + "Unit":{"shape":"StandardUnit"} + } + }, + "GetMetricStatisticsOutput":{ + "type":"structure", + "members":{ + "Label":{"shape":"MetricLabel"}, + "Datapoints":{"shape":"Datapoints"} + } + }, + "HistoryData":{ + "type":"string", + "min":1, + "max":4095 + }, + "HistoryItemType":{ + "type":"string", + "enum":[ + "ConfigurationUpdate", + "StateUpdate", + "Action" + ] + }, + "HistorySummary":{ + "type":"string", + "min":1, + "max":255 + }, + "InternalServiceFault":{ + "type":"structure", + "members":{ + "Message":{"shape":"FaultDescription"} + }, + "error":{ + "code":"InternalServiceError", + "httpStatusCode":500 + }, + "exception":true, + "xmlOrder":["Message"] + }, + "InvalidFormatFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{ + "code":"InvalidFormat", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidNextToken":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{ + "code":"InvalidNextToken", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidParameterCombinationException":{ + "type":"structure", + "members":{ + "message":{"shape":"AwsQueryErrorMessage"} + }, + "error":{ + "code":"InvalidParameterCombination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidParameterValueException":{ + "type":"structure", + "members":{ + "message":{"shape":"AwsQueryErrorMessage"} + }, + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "LimitExceededFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{ + "code":"LimitExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ListMetricsInput":{ + "type":"structure", + "members":{ + "Namespace":{"shape":"Namespace"}, + "MetricName":{"shape":"MetricName"}, + "Dimensions":{"shape":"DimensionFilters"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListMetricsOutput":{ + "type":"structure", + "members":{ + "Metrics":{"shape":"Metrics"}, + "NextToken":{"shape":"NextToken"} + }, + "xmlOrder":[ + "Metrics", + "NextToken" + ] + }, + "MaxRecords":{ + "type":"integer", + "min":1, + "max":100 + }, + "Metric":{ + "type":"structure", + "members":{ + "Namespace":{"shape":"Namespace"}, + "MetricName":{"shape":"MetricName"}, + "Dimensions":{"shape":"Dimensions"} + }, + "xmlOrder":[ + "Namespace", + "MetricName", + "Dimensions" + ] + }, + "MetricAlarm":{ + "type":"structure", + "members":{ + "AlarmName":{"shape":"AlarmName"}, + "AlarmArn":{"shape":"AlarmArn"}, + "AlarmDescription":{"shape":"AlarmDescription"}, + "AlarmConfigurationUpdatedTimestamp":{"shape":"Timestamp"}, + "ActionsEnabled":{"shape":"ActionsEnabled"}, + "OKActions":{"shape":"ResourceList"}, + "AlarmActions":{"shape":"ResourceList"}, + "InsufficientDataActions":{"shape":"ResourceList"}, + "StateValue":{"shape":"StateValue"}, + "StateReason":{"shape":"StateReason"}, + "StateReasonData":{"shape":"StateReasonData"}, + "StateUpdatedTimestamp":{"shape":"Timestamp"}, + "MetricName":{"shape":"MetricName"}, + "Namespace":{"shape":"Namespace"}, + "Statistic":{"shape":"Statistic"}, + "Dimensions":{"shape":"Dimensions"}, + "Period":{"shape":"Period"}, + "Unit":{"shape":"StandardUnit"}, + "EvaluationPeriods":{"shape":"EvaluationPeriods"}, + "Threshold":{"shape":"Threshold"}, + "ComparisonOperator":{"shape":"ComparisonOperator"} + }, + "xmlOrder":[ + "AlarmName", + "AlarmArn", + "AlarmDescription", + "AlarmConfigurationUpdatedTimestamp", + "ActionsEnabled", + "OKActions", + "AlarmActions", + "InsufficientDataActions", + "StateValue", + "StateReason", + "StateReasonData", + "StateUpdatedTimestamp", + "MetricName", + "Namespace", + "Statistic", + "Dimensions", + "Period", + "Unit", + "EvaluationPeriods", + "Threshold", + "ComparisonOperator" + ] + }, + "MetricAlarms":{ + "type":"list", + "member":{"shape":"MetricAlarm"} + }, + "MetricData":{ + "type":"list", + "member":{"shape":"MetricDatum"} + }, + "MetricDatum":{ + "type":"structure", + "required":["MetricName"], + "members":{ + "MetricName":{"shape":"MetricName"}, + "Dimensions":{"shape":"Dimensions"}, + "Timestamp":{"shape":"Timestamp"}, + "Value":{"shape":"DatapointValue"}, + "StatisticValues":{"shape":"StatisticSet"}, + "Unit":{"shape":"StandardUnit"} + } + }, + "MetricLabel":{"type":"string"}, + "MetricName":{ + "type":"string", + "min":1, + "max":255 + }, + "Metrics":{ + "type":"list", + "member":{"shape":"Metric"} + }, + "MissingRequiredParameterException":{ + "type":"structure", + "members":{ + "message":{"shape":"AwsQueryErrorMessage"} + }, + "error":{ + "code":"MissingParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Namespace":{ + "type":"string", + "min":1, + "max":255, + "pattern":"[^:].*" + }, + "NextToken":{"type":"string"}, + "Period":{ + "type":"integer", + "min":60 + }, + "PutMetricAlarmInput":{ + "type":"structure", + "required":[ + "AlarmName", + "MetricName", + "Namespace", + "Statistic", + "Period", + "EvaluationPeriods", + "Threshold", + "ComparisonOperator" + ], + "members":{ + "AlarmName":{"shape":"AlarmName"}, + "AlarmDescription":{"shape":"AlarmDescription"}, + "ActionsEnabled":{"shape":"ActionsEnabled"}, + "OKActions":{"shape":"ResourceList"}, + "AlarmActions":{"shape":"ResourceList"}, + "InsufficientDataActions":{"shape":"ResourceList"}, + "MetricName":{"shape":"MetricName"}, + "Namespace":{"shape":"Namespace"}, + "Statistic":{"shape":"Statistic"}, + "Dimensions":{"shape":"Dimensions"}, + "Period":{"shape":"Period"}, + "Unit":{"shape":"StandardUnit"}, + "EvaluationPeriods":{"shape":"EvaluationPeriods"}, + "Threshold":{"shape":"Threshold"}, + "ComparisonOperator":{"shape":"ComparisonOperator"} + } + }, + "PutMetricDataInput":{ + "type":"structure", + "required":[ + "Namespace", + "MetricData" + ], + "members":{ + "Namespace":{"shape":"Namespace"}, + "MetricData":{"shape":"MetricData"} + } + }, + "ResourceList":{ + "type":"list", + "member":{"shape":"ResourceName"}, + "max":5 + }, + "ResourceName":{ + "type":"string", + "min":1, + "max":1024 + }, + "ResourceNotFound":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SetAlarmStateInput":{ + "type":"structure", + "required":[ + "AlarmName", + "StateValue", + "StateReason" + ], + "members":{ + "AlarmName":{"shape":"AlarmName"}, + "StateValue":{"shape":"StateValue"}, + "StateReason":{"shape":"StateReason"}, + "StateReasonData":{"shape":"StateReasonData"} + } + }, + "StandardUnit":{ + "type":"string", + "enum":[ + "Seconds", + "Microseconds", + "Milliseconds", + "Bytes", + "Kilobytes", + "Megabytes", + "Gigabytes", + "Terabytes", + "Bits", + "Kilobits", + "Megabits", + "Gigabits", + "Terabits", + "Percent", + "Count", + "Bytes/Second", + "Kilobytes/Second", + "Megabytes/Second", + "Gigabytes/Second", + "Terabytes/Second", + "Bits/Second", + "Kilobits/Second", + "Megabits/Second", + "Gigabits/Second", + "Terabits/Second", + "Count/Second", + "None" + ] + }, + "StateReason":{ + "type":"string", + "min":0, + "max":1023 + }, + "StateReasonData":{ + "type":"string", + "min":0, + "max":4000 + }, + "StateValue":{ + "type":"string", + "enum":[ + "OK", + "ALARM", + "INSUFFICIENT_DATA" + ] + }, + "Statistic":{ + "type":"string", + "enum":[ + "SampleCount", + "Average", + "Sum", + "Minimum", + "Maximum" + ] + }, + "StatisticSet":{ + "type":"structure", + "required":[ + "SampleCount", + "Sum", + "Minimum", + "Maximum" + ], + "members":{ + "SampleCount":{"shape":"DatapointValue"}, + "Sum":{"shape":"DatapointValue"}, + "Minimum":{"shape":"DatapointValue"}, + "Maximum":{"shape":"DatapointValue"} + } + }, + "Statistics":{ + "type":"list", + "member":{"shape":"Statistic"}, + "min":1, + "max":5 + }, + "Threshold":{"type":"double"}, + "Timestamp":{"type":"timestamp"} + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,514 @@ +{ + "operations": { + "DeleteAlarms": "

    Deletes all specified alarms. In the event of an error, no alarms are deleted.

    ", + "DescribeAlarmHistory": "

    Retrieves history for the specified alarm. Filter alarms by date range or item type. If an alarm name is not specified, Amazon CloudWatch returns histories for all of the owner's alarms.

    ", + "DescribeAlarms": "

    Retrieves alarms with the specified names. If no name is specified, all alarms for the user are returned. Alarms can be retrieved by using only a prefix for the alarm name, the alarm state, or a prefix for any action.

    ", + "DescribeAlarmsForMetric": "

    Retrieves all alarms for a single metric. Specify a statistic, period, or unit to filter the set of alarms further.

    ", + "DisableAlarmActions": "

    Disables actions for the specified alarms. When an alarm's actions are disabled the alarm's state may change, but none of the alarm's actions will execute.

    ", + "EnableAlarmActions": "

    Enables actions for the specified alarms.

    ", + "GetMetricStatistics": "

    Gets statistics for the specified metric.

    The maximum number of data points returned from a single GetMetricStatistics request is 1,440, wereas the maximum number of data points that can be queried is 50,850. If you make a request that generates more than 1,440 data points, Amazon CloudWatch returns an error. In such a case, you can alter the request by narrowing the specified time range or increasing the specified period. Alternatively, you can make multiple requests across adjacent time ranges.

    Amazon CloudWatch aggregates data points based on the length of the period that you specify. For example, if you request statistics with a one-minute granularity, Amazon CloudWatch aggregates data points with time stamps that fall within the same one-minute period. In such a case, the data points queried can greatly outnumber the data points returned.

    The following examples show various statistics allowed by the data point query maximum of 50,850 when you call GetMetricStatistics on Amazon EC2 instances with detailed (one-minute) monitoring enabled:

    • Statistics for up to 400 instances for a span of one hour
    • Statistics for up to 35 instances over a span of 24 hours
    • Statistics for up to 2 instances over a span of 2 weeks

    For information about the namespace, metric names, and dimensions that other Amazon Web Services products use to send metrics to Cloudwatch, go to Amazon CloudWatch Metrics, Namespaces, and Dimensions Reference in the Amazon CloudWatch Developer Guide.

    ", + "ListMetrics": "

    Returns a list of valid metrics stored for the AWS account owner. Returned metrics can be used with GetMetricStatistics to obtain statistical data for a given metric.

    ", + "PutMetricAlarm": "

    Creates or updates an alarm and associates it with the specified Amazon CloudWatch metric. Optionally, this operation can associate one or more Amazon Simple Notification Service resources with the alarm.

    When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA. The alarm is evaluated and its StateValue is set appropriately. Any actions associated with the StateValue is then executed.

    ", + "PutMetricData": "

    Publishes metric data points to Amazon CloudWatch. Amazon Cloudwatch associates the data points with the specified metric. If the specified metric does not exist, Amazon CloudWatch creates the metric. It can take up to fifteen minutes for a new metric to appear in calls to the ListMetrics action.

    The size of a PutMetricData request is limited to 8 KB for HTTP GET requests and 40 KB for HTTP POST requests.

    Although the Value parameter accepts numbers of type Double, Amazon CloudWatch truncates values with very large exponents. Values with base-10 exponents greater than 126 (1 x 10^126) are truncated. Likewise, values with base-10 exponents less than -130 (1 x 10^-130) are also truncated.

    Data that is timestamped 24 hours or more in the past may take in excess of 48 hours to become available from submission time using GetMetricStatistics.

    ", + "SetAlarmState": "

    Temporarily sets the state of an alarm. When the updated StateValue differs from the previous value, the action configured for the appropriate state is invoked. This is not a permanent change. The next periodic alarm check (in about a minute) will set the alarm to its actual state.

    " + }, + "service": "

    This is the Amazon CloudWatch API Reference. This guide provides detailed information about Amazon CloudWatch actions, data types, parameters, and errors. For detailed information about Amazon CloudWatch features and their associated API calls, go to the Amazon CloudWatch Developer Guide.

    Amazon CloudWatch is a web service that enables you to publish, monitor, and manage various metrics, as well as configure alarm actions based on data from metrics. For more information about this product go to http://aws.amazon.com/cloudwatch.

    For information about the namespace, metric names, and dimensions that other Amazon Web Services products use to send metrics to Cloudwatch, go to Amazon CloudWatch Metrics, Namespaces, and Dimensions Reference in the Amazon CloudWatch Developer Guide.

    Use the following links to get started using the Amazon CloudWatch API Reference:

    • Actions: An alphabetical list of all Amazon CloudWatch actions.
    • Data Types: An alphabetical list of all Amazon CloudWatch data types.
    • Common Parameters: Parameters that all Query actions can use.
    • Common Errors: Client and server errors that all actions can return.
    • Regions and Endpoints: Itemized regions and endpoints for all AWS products.
    • WSDL Location: http://monitoring.amazonaws.com/doc/2010-08-01/CloudWatch.wsdl

    In addition to using the Amazon CloudWatch API, you can also use the following SDKs and third-party libraries to access Amazon CloudWatch programmatically.

    Developers in the AWS developer community also provide their own libraries, which you can find at the following AWS developer centers:

    ", + "shapes": { + "ActionPrefix": { + "base": null, + "refs": { + "DescribeAlarmsInput$ActionPrefix": "

    The action name prefix.

    " + } + }, + "ActionsEnabled": { + "base": null, + "refs": { + "MetricAlarm$ActionsEnabled": "

    Indicates whether actions should be executed during any changes to the alarm's state.

    ", + "PutMetricAlarmInput$ActionsEnabled": "

    Indicates whether or not actions should be executed during any changes to the alarm's state.

    " + } + }, + "AlarmArn": { + "base": null, + "refs": { + "MetricAlarm$AlarmArn": "

    The Amazon Resource Name (ARN) of the alarm.

    " + } + }, + "AlarmDescription": { + "base": null, + "refs": { + "MetricAlarm$AlarmDescription": "

    The description for the alarm.

    ", + "PutMetricAlarmInput$AlarmDescription": "

    The description for the alarm.

    " + } + }, + "AlarmHistoryItem": { + "base": "

    The AlarmHistoryItem data type contains descriptive information about the history of a specific alarm. If you call DescribeAlarmHistory, Amazon CloudWatch returns this data type as part of the DescribeAlarmHistoryResult data type.

    ", + "refs": { + "AlarmHistoryItems$member": null + } + }, + "AlarmHistoryItems": { + "base": null, + "refs": { + "DescribeAlarmHistoryOutput$AlarmHistoryItems": "

    A list of alarm histories in JSON format.

    " + } + }, + "AlarmName": { + "base": null, + "refs": { + "AlarmHistoryItem$AlarmName": "

    The descriptive name for the alarm.

    ", + "AlarmNames$member": null, + "DescribeAlarmHistoryInput$AlarmName": "

    The name of the alarm.

    ", + "MetricAlarm$AlarmName": "

    The name of the alarm.

    ", + "PutMetricAlarmInput$AlarmName": "

    The descriptive name for the alarm. This name must be unique within the user's AWS account

    ", + "SetAlarmStateInput$AlarmName": "

    The descriptive name for the alarm. This name must be unique within the user's AWS account. The maximum length is 255 characters.

    " + } + }, + "AlarmNamePrefix": { + "base": null, + "refs": { + "DescribeAlarmsInput$AlarmNamePrefix": "

    The alarm name prefix. AlarmNames cannot be specified if this parameter is specified.

    " + } + }, + "AlarmNames": { + "base": null, + "refs": { + "DeleteAlarmsInput$AlarmNames": "

    A list of alarms to be deleted.

    ", + "DescribeAlarmsInput$AlarmNames": "

    A list of alarm names to retrieve information for.

    ", + "DisableAlarmActionsInput$AlarmNames": "

    The names of the alarms to disable actions for.

    ", + "EnableAlarmActionsInput$AlarmNames": "

    The names of the alarms to enable actions for.

    " + } + }, + "AwsQueryErrorMessage": { + "base": null, + "refs": { + "InvalidParameterCombinationException$message": "

    ", + "InvalidParameterValueException$message": "

    ", + "MissingRequiredParameterException$message": "

    " + } + }, + "ComparisonOperator": { + "base": null, + "refs": { + "MetricAlarm$ComparisonOperator": "

    The arithmetic operation to use when comparing the specified Statistic and Threshold. The specified Statistic value is used as the first operand.

    ", + "PutMetricAlarmInput$ComparisonOperator": "

    The arithmetic operation to use when comparing the specified Statistic and Threshold. The specified Statistic value is used as the first operand.

    " + } + }, + "Datapoint": { + "base": "

    The Datapoint data type encapsulates the statistical data that Amazon CloudWatch computes from metric data.

    ", + "refs": { + "Datapoints$member": null + } + }, + "DatapointValue": { + "base": null, + "refs": { + "Datapoint$SampleCount": "

    The number of metric values that contributed to the aggregate value of this datapoint.

    ", + "Datapoint$Average": "

    The average of metric values that correspond to the datapoint.

    ", + "Datapoint$Sum": "

    The sum of metric values used for the datapoint.

    ", + "Datapoint$Minimum": "

    The minimum metric value used for the datapoint.

    ", + "Datapoint$Maximum": "

    The maximum of the metric value used for the datapoint.

    ", + "MetricDatum$Value": "

    The value for the metric.

    Although the Value parameter accepts numbers of type Double, Amazon CloudWatch truncates values with very large exponents. Values with base-10 exponents greater than 126 (1 x 10^126) are truncated. Likewise, values with base-10 exponents less than -130 (1 x 10^-130) are also truncated. ", + "StatisticSet$SampleCount": "

    The number of samples used for the statistic set.

    ", + "StatisticSet$Sum": "

    The sum of values for the sample set.

    ", + "StatisticSet$Minimum": "

    The minimum value of the sample set.

    ", + "StatisticSet$Maximum": "

    The maximum value of the sample set.

    " + } + }, + "Datapoints": { + "base": null, + "refs": { + "GetMetricStatisticsOutput$Datapoints": "

    The datapoints for the specified metric.

    " + } + }, + "DeleteAlarmsInput": { + "base": null, + "refs": { + } + }, + "DescribeAlarmHistoryInput": { + "base": null, + "refs": { + } + }, + "DescribeAlarmHistoryOutput": { + "base": "

    The output for the DescribeAlarmHistory action.

    ", + "refs": { + } + }, + "DescribeAlarmsForMetricInput": { + "base": null, + "refs": { + } + }, + "DescribeAlarmsForMetricOutput": { + "base": "

    The output for the DescribeAlarmsForMetric action.

    ", + "refs": { + } + }, + "DescribeAlarmsInput": { + "base": null, + "refs": { + } + }, + "DescribeAlarmsOutput": { + "base": "

    The output for the DescribeAlarms action.

    ", + "refs": { + } + }, + "Dimension": { + "base": "

    The Dimension data type further expands on the identity of a metric using a Name, Value pair.

    For examples that use one or more dimensions, see PutMetricData.

    ", + "refs": { + "Dimensions$member": null + } + }, + "DimensionFilter": { + "base": "

    The DimensionFilter data type is used to filter ListMetrics results.

    ", + "refs": { + "DimensionFilters$member": null + } + }, + "DimensionFilters": { + "base": null, + "refs": { + "ListMetricsInput$Dimensions": "

    A list of dimensions to filter against.

    " + } + }, + "DimensionName": { + "base": null, + "refs": { + "Dimension$Name": "

    The name of the dimension.

    ", + "DimensionFilter$Name": "

    The dimension name to be matched.

    " + } + }, + "DimensionValue": { + "base": null, + "refs": { + "Dimension$Value": "

    The value representing the dimension measurement

    ", + "DimensionFilter$Value": "

    The value of the dimension to be matched.

    " + } + }, + "Dimensions": { + "base": null, + "refs": { + "DescribeAlarmsForMetricInput$Dimensions": "

    The list of dimensions associated with the metric.

    ", + "GetMetricStatisticsInput$Dimensions": "

    A list of dimensions describing qualities of the metric.

    ", + "Metric$Dimensions": "

    A list of dimensions associated with the metric.

    ", + "MetricAlarm$Dimensions": "

    The list of dimensions associated with the alarm's associated metric.

    ", + "MetricDatum$Dimensions": "

    A list of dimensions associated with the metric. Note, when using the Dimensions value in a query, you need to append .member.N to it (e.g., Dimensions.member.N).

    ", + "PutMetricAlarmInput$Dimensions": "

    The dimensions for the alarm's associated metric.

    " + } + }, + "DisableAlarmActionsInput": { + "base": "

    ", + "refs": { + } + }, + "EnableAlarmActionsInput": { + "base": null, + "refs": { + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "InvalidFormatFault$message": "

    ", + "InvalidNextToken$message": "

    ", + "LimitExceededFault$message": "

    ", + "ResourceNotFound$message": "

    " + } + }, + "EvaluationPeriods": { + "base": null, + "refs": { + "MetricAlarm$EvaluationPeriods": "

    The number of periods over which data is compared to the specified threshold.

    ", + "PutMetricAlarmInput$EvaluationPeriods": "

    The number of periods over which data is compared to the specified threshold.

    " + } + }, + "FaultDescription": { + "base": null, + "refs": { + "InternalServiceFault$Message": "

    " + } + }, + "GetMetricStatisticsInput": { + "base": null, + "refs": { + } + }, + "GetMetricStatisticsOutput": { + "base": "

    The output for the GetMetricStatistics action.

    ", + "refs": { + } + }, + "HistoryData": { + "base": null, + "refs": { + "AlarmHistoryItem$HistoryData": "

    Machine-readable data about the alarm in JSON format.

    " + } + }, + "HistoryItemType": { + "base": null, + "refs": { + "AlarmHistoryItem$HistoryItemType": "

    The type of alarm history item.

    ", + "DescribeAlarmHistoryInput$HistoryItemType": "

    The type of alarm histories to retrieve.

    " + } + }, + "HistorySummary": { + "base": null, + "refs": { + "AlarmHistoryItem$HistorySummary": "

    A human-readable summary of the alarm history.

    " + } + }, + "InternalServiceFault": { + "base": "

    Indicates that the request processing has failed due to some unknown error, exception, or failure.

    ", + "refs": { + } + }, + "InvalidFormatFault": { + "base": "

    Data was not syntactically valid JSON.

    ", + "refs": { + } + }, + "InvalidNextToken": { + "base": "

    The next token specified is invalid.

    ", + "refs": { + } + }, + "InvalidParameterCombinationException": { + "base": "

    Parameters that must not be used together were used together.

    ", + "refs": { + } + }, + "InvalidParameterValueException": { + "base": "

    Bad or out-of-range value was supplied for the input parameter.

    ", + "refs": { + } + }, + "LimitExceededFault": { + "base": "

    The quota for alarms for this customer has already been reached.

    ", + "refs": { + } + }, + "ListMetricsInput": { + "base": null, + "refs": { + } + }, + "ListMetricsOutput": { + "base": "

    The output for the ListMetrics action.

    ", + "refs": { + } + }, + "MaxRecords": { + "base": null, + "refs": { + "DescribeAlarmHistoryInput$MaxRecords": "

    The maximum number of alarm history records to retrieve.

    ", + "DescribeAlarmsInput$MaxRecords": "

    The maximum number of alarm descriptions to retrieve.

    " + } + }, + "Metric": { + "base": "

    The Metric data type contains information about a specific metric. If you call ListMetrics, Amazon CloudWatch returns information contained by this data type.

    The example in the Examples section publishes two metrics named buffers and latency. Both metrics are in the examples namespace. Both metrics have two dimensions, InstanceID and InstanceType.

    ", + "refs": { + "Metrics$member": null + } + }, + "MetricAlarm": { + "base": "

    The MetricAlarm data type represents an alarm. You can use PutMetricAlarm to create or update an alarm.

    ", + "refs": { + "MetricAlarms$member": null + } + }, + "MetricAlarms": { + "base": null, + "refs": { + "DescribeAlarmsForMetricOutput$MetricAlarms": "

    A list of information for each alarm with the specified metric.

    ", + "DescribeAlarmsOutput$MetricAlarms": "

    A list of information for the specified alarms.

    " + } + }, + "MetricData": { + "base": null, + "refs": { + "PutMetricDataInput$MetricData": "

    A list of data describing the metric.

    " + } + }, + "MetricDatum": { + "base": "

    The MetricDatum data type encapsulates the information sent with PutMetricData to either create a new metric or add new values to be aggregated into an existing metric.

    ", + "refs": { + "MetricData$member": null + } + }, + "MetricLabel": { + "base": null, + "refs": { + "GetMetricStatisticsOutput$Label": "

    A label describing the specified metric.

    " + } + }, + "MetricName": { + "base": null, + "refs": { + "DescribeAlarmsForMetricInput$MetricName": "

    The name of the metric.

    ", + "GetMetricStatisticsInput$MetricName": "

    The name of the metric, with or without spaces.

    ", + "ListMetricsInput$MetricName": "

    The name of the metric to filter against.

    ", + "Metric$MetricName": "

    The name of the metric.

    ", + "MetricAlarm$MetricName": "

    The name of the alarm's metric.

    ", + "MetricDatum$MetricName": "

    The name of the metric.

    ", + "PutMetricAlarmInput$MetricName": "

    The name for the alarm's associated metric.

    " + } + }, + "Metrics": { + "base": null, + "refs": { + "ListMetricsOutput$Metrics": "

    A list of metrics used to generate statistics for an AWS account.

    " + } + }, + "MissingRequiredParameterException": { + "base": "

    An input parameter that is mandatory for processing the request is not supplied.

    ", + "refs": { + } + }, + "Namespace": { + "base": null, + "refs": { + "DescribeAlarmsForMetricInput$Namespace": "

    The namespace of the metric.

    ", + "GetMetricStatisticsInput$Namespace": "

    The namespace of the metric, with or without spaces.

    ", + "ListMetricsInput$Namespace": "

    The namespace to filter against.

    ", + "Metric$Namespace": "

    The namespace of the metric.

    ", + "MetricAlarm$Namespace": "

    The namespace of alarm's associated metric.

    ", + "PutMetricAlarmInput$Namespace": "

    The namespace for the alarm's associated metric.

    ", + "PutMetricDataInput$Namespace": "

    The namespace for the metric data.

    " + } + }, + "NextToken": { + "base": null, + "refs": { + "DescribeAlarmHistoryInput$NextToken": "

    The token returned by a previous call to indicate that there is more data available.

    ", + "DescribeAlarmHistoryOutput$NextToken": "

    A string that marks the start of the next batch of returned results.

    ", + "DescribeAlarmsInput$NextToken": "

    The token returned by a previous call to indicate that there is more data available.

    ", + "DescribeAlarmsOutput$NextToken": "

    A string that marks the start of the next batch of returned results.

    ", + "ListMetricsInput$NextToken": "

    The token returned by a previous call to indicate that there is more data available.

    ", + "ListMetricsOutput$NextToken": "

    A string that marks the start of the next batch of returned results.

    " + } + }, + "Period": { + "base": null, + "refs": { + "DescribeAlarmsForMetricInput$Period": "

    The period in seconds over which the statistic is applied.

    ", + "GetMetricStatisticsInput$Period": "

    The granularity, in seconds, of the returned datapoints. Period must be at least 60 seconds and must be a multiple of 60. The default value is 60.

    ", + "MetricAlarm$Period": "

    The period in seconds over which the statistic is applied.

    ", + "PutMetricAlarmInput$Period": "

    The period in seconds over which the specified statistic is applied.

    " + } + }, + "PutMetricAlarmInput": { + "base": null, + "refs": { + } + }, + "PutMetricDataInput": { + "base": null, + "refs": { + } + }, + "ResourceList": { + "base": null, + "refs": { + "MetricAlarm$OKActions": "

    The list of actions to execute when this alarm transitions into an OK state from any other state. Each action is specified as an Amazon Resource Number (ARN). Currently the only actions supported are publishing to an Amazon SNS topic and triggering an Auto Scaling policy.

    ", + "MetricAlarm$AlarmActions": "

    The list of actions to execute when this alarm transitions into an ALARM state from any other state. Each action is specified as an Amazon Resource Number (ARN). Currently the only actions supported are publishing to an Amazon SNS topic and triggering an Auto Scaling policy.

    ", + "MetricAlarm$InsufficientDataActions": "

    The list of actions to execute when this alarm transitions into an INSUFFICIENT_DATA state from any other state. Each action is specified as an Amazon Resource Number (ARN). Currently the only actions supported are publishing to an Amazon SNS topic or triggering an Auto Scaling policy.

    The current WSDL lists this attribute as UnknownActions.", + "PutMetricAlarmInput$OKActions": "

    The list of actions to execute when this alarm transitions into an OK state from any other state. Each action is specified as an Amazon Resource Number (ARN). Currently the only action supported is publishing to an Amazon SNS topic or an Amazon Auto Scaling policy.

    ", + "PutMetricAlarmInput$AlarmActions": "

    The list of actions to execute when this alarm transitions into an ALARM state from any other state. Each action is specified as an Amazon Resource Number (ARN). Currently the only action supported is publishing to an Amazon SNS topic or an Amazon Auto Scaling policy.

    ", + "PutMetricAlarmInput$InsufficientDataActions": "

    The list of actions to execute when this alarm transitions into an INSUFFICIENT_DATA state from any other state. Each action is specified as an Amazon Resource Number (ARN). Currently the only action supported is publishing to an Amazon SNS topic or an Amazon Auto Scaling policy.

    " + } + }, + "ResourceName": { + "base": null, + "refs": { + "ResourceList$member": null + } + }, + "ResourceNotFound": { + "base": "

    The named resource does not exist.

    ", + "refs": { + } + }, + "SetAlarmStateInput": { + "base": null, + "refs": { + } + }, + "StandardUnit": { + "base": null, + "refs": { + "Datapoint$Unit": "

    The standard unit used for the datapoint.

    ", + "DescribeAlarmsForMetricInput$Unit": "

    The unit for the metric.

    ", + "GetMetricStatisticsInput$Unit": "

    The unit for the metric.

    ", + "MetricAlarm$Unit": "

    The unit of the alarm's associated metric.

    ", + "MetricDatum$Unit": "

    The unit of the metric.

    ", + "PutMetricAlarmInput$Unit": "

    The unit for the alarm's associated metric.

    " + } + }, + "StateReason": { + "base": null, + "refs": { + "MetricAlarm$StateReason": "

    A human-readable explanation for the alarm's state.

    ", + "SetAlarmStateInput$StateReason": "

    The reason that this alarm is set to this specific state (in human-readable text format)

    " + } + }, + "StateReasonData": { + "base": null, + "refs": { + "MetricAlarm$StateReasonData": "

    An explanation for the alarm's state in machine-readable JSON format

    ", + "SetAlarmStateInput$StateReasonData": "

    The reason that this alarm is set to this specific state (in machine-readable JSON format)

    " + } + }, + "StateValue": { + "base": null, + "refs": { + "DescribeAlarmsInput$StateValue": "

    The state value to be used in matching alarms.

    ", + "MetricAlarm$StateValue": "

    The state value for the alarm.

    ", + "SetAlarmStateInput$StateValue": "

    The value of the state.

    " + } + }, + "Statistic": { + "base": null, + "refs": { + "DescribeAlarmsForMetricInput$Statistic": "

    The statistic for the metric.

    ", + "MetricAlarm$Statistic": "

    The statistic to apply to the alarm's associated metric.

    ", + "PutMetricAlarmInput$Statistic": "

    The statistic to apply to the alarm's associated metric.

    ", + "Statistics$member": null + } + }, + "StatisticSet": { + "base": "

    The StatisticSet data type describes the StatisticValues component of MetricDatum, and represents a set of statistics that describes a specific metric.

    ", + "refs": { + "MetricDatum$StatisticValues": "

    A set of statistical values describing the metric.

    " + } + }, + "Statistics": { + "base": null, + "refs": { + "GetMetricStatisticsInput$Statistics": "

    The metric statistics to return. For information about specific statistics returned by GetMetricStatistics, go to Statistics in the Amazon CloudWatch Developer Guide.

    Valid Values: Average | Sum | SampleCount | Maximum | Minimum

    " + } + }, + "Threshold": { + "base": null, + "refs": { + "MetricAlarm$Threshold": "

    The value against which the specified statistic is compared.

    ", + "PutMetricAlarmInput$Threshold": "

    The value against which the specified statistic is compared.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "AlarmHistoryItem$Timestamp": "

    The time stamp for the alarm history item. Amazon CloudWatch uses Coordinated Universal Time (UTC) when returning time stamps, which do not accommodate seasonal adjustments such as daylight savings time. For more information, see Time stamps in the Amazon CloudWatch Developer Guide.

    ", + "Datapoint$Timestamp": "

    The time stamp used for the datapoint. Amazon CloudWatch uses Coordinated Universal Time (UTC) when returning time stamps, which do not accommodate seasonal adjustments such as daylight savings time. For more information, see Time stamps in the Amazon CloudWatch Developer Guide.

    ", + "DescribeAlarmHistoryInput$StartDate": "

    The starting date to retrieve alarm history.

    ", + "DescribeAlarmHistoryInput$EndDate": "

    The ending date to retrieve alarm history.

    ", + "GetMetricStatisticsInput$StartTime": "

    The time stamp to use for determining the first datapoint to return. The value specified is inclusive; results include datapoints with the time stamp specified.

    ", + "GetMetricStatisticsInput$EndTime": "

    The time stamp to use for determining the last datapoint to return. The value specified is exclusive; results will include datapoints up to the time stamp specified.

    ", + "MetricAlarm$AlarmConfigurationUpdatedTimestamp": "

    The time stamp of the last update to the alarm configuration. Amazon CloudWatch uses Coordinated Universal Time (UTC) when returning time stamps, which do not accommodate seasonal adjustments such as daylight savings time. For more information, see Time stamps in the Amazon CloudWatch Developer Guide.

    ", + "MetricAlarm$StateUpdatedTimestamp": "

    The time stamp of the last update to the alarm's state. Amazon CloudWatch uses Coordinated Universal Time (UTC) when returning time stamps, which do not accommodate seasonal adjustments such as daylight savings time. For more information, see Time stamps in the Amazon CloudWatch Developer Guide.

    ", + "MetricDatum$Timestamp": "

    The time stamp used for the metric. If not specified, the default value is set to the time the metric data was received. Amazon CloudWatch uses Coordinated Universal Time (UTC) when returning time stamps, which do not accommodate seasonal adjustments such as daylight savings time. For more information, see Time stamps in the Amazon CloudWatch Developer Guide.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/monitoring/2010-08-01/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,24 @@ +{ + "pagination": { + "DescribeAlarmHistory": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxRecords", + "result_key": "AlarmHistoryItems" + }, + "DescribeAlarms": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxRecords", + "result_key": "MetricAlarms" + }, + "DescribeAlarmsForMetric": { + "result_key": "MetricAlarms" + }, + "ListMetrics": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Metrics" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2603 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2013-02-18", + "endpointPrefix":"opsworks", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"AWS OpsWorks", + "signatureVersion":"v4", + "targetPrefix":"OpsWorks_20130218" + }, + "operations":{ + "AssignInstance":{ + "name":"AssignInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssignInstanceRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "AssignVolume":{ + "name":"AssignVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssignVolumeRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "AssociateElasticIp":{ + "name":"AssociateElasticIp", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateElasticIpRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "AttachElasticLoadBalancer":{ + "name":"AttachElasticLoadBalancer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachElasticLoadBalancerRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "CloneStack":{ + "name":"CloneStack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CloneStackRequest"}, + "output":{"shape":"CloneStackResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "CreateApp":{ + "name":"CreateApp", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAppRequest"}, + "output":{"shape":"CreateAppResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "CreateDeployment":{ + "name":"CreateDeployment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDeploymentRequest"}, + "output":{"shape":"CreateDeploymentResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "CreateInstance":{ + "name":"CreateInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateInstanceRequest"}, + "output":{"shape":"CreateInstanceResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "CreateLayer":{ + "name":"CreateLayer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLayerRequest"}, + "output":{"shape":"CreateLayerResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "CreateStack":{ + "name":"CreateStack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateStackRequest"}, + "output":{"shape":"CreateStackResult"}, + "errors":[ + {"shape":"ValidationException"} + ] + }, + "CreateUserProfile":{ + "name":"CreateUserProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateUserProfileRequest"}, + "output":{"shape":"CreateUserProfileResult"}, + "errors":[ + {"shape":"ValidationException"} + ] + }, + "DeleteApp":{ + "name":"DeleteApp", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAppRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DeleteInstance":{ + "name":"DeleteInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteInstanceRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DeleteLayer":{ + "name":"DeleteLayer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteLayerRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DeleteStack":{ + "name":"DeleteStack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteStackRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DeleteUserProfile":{ + "name":"DeleteUserProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteUserProfileRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DeregisterEcsCluster":{ + "name":"DeregisterEcsCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterEcsClusterRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DeregisterElasticIp":{ + "name":"DeregisterElasticIp", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterElasticIpRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DeregisterInstance":{ + "name":"DeregisterInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterInstanceRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DeregisterRdsDbInstance":{ + "name":"DeregisterRdsDbInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterRdsDbInstanceRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DeregisterVolume":{ + "name":"DeregisterVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterVolumeRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeAgentVersions":{ + "name":"DescribeAgentVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAgentVersionsRequest"}, + "output":{"shape":"DescribeAgentVersionsResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeApps":{ + "name":"DescribeApps", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAppsRequest"}, + "output":{"shape":"DescribeAppsResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeCommands":{ + "name":"DescribeCommands", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCommandsRequest"}, + "output":{"shape":"DescribeCommandsResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeDeployments":{ + "name":"DescribeDeployments", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDeploymentsRequest"}, + "output":{"shape":"DescribeDeploymentsResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeEcsClusters":{ + "name":"DescribeEcsClusters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEcsClustersRequest"}, + "output":{"shape":"DescribeEcsClustersResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeElasticIps":{ + "name":"DescribeElasticIps", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeElasticIpsRequest"}, + "output":{"shape":"DescribeElasticIpsResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeElasticLoadBalancers":{ + "name":"DescribeElasticLoadBalancers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeElasticLoadBalancersRequest"}, + "output":{"shape":"DescribeElasticLoadBalancersResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeInstances":{ + "name":"DescribeInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstancesRequest"}, + "output":{"shape":"DescribeInstancesResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeLayers":{ + "name":"DescribeLayers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLayersRequest"}, + "output":{"shape":"DescribeLayersResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeLoadBasedAutoScaling":{ + "name":"DescribeLoadBasedAutoScaling", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLoadBasedAutoScalingRequest"}, + "output":{"shape":"DescribeLoadBasedAutoScalingResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeMyUserProfile":{ + "name":"DescribeMyUserProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{"shape":"DescribeMyUserProfileResult"} + }, + "DescribePermissions":{ + "name":"DescribePermissions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePermissionsRequest"}, + "output":{"shape":"DescribePermissionsResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeRaidArrays":{ + "name":"DescribeRaidArrays", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRaidArraysRequest"}, + "output":{"shape":"DescribeRaidArraysResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeRdsDbInstances":{ + "name":"DescribeRdsDbInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRdsDbInstancesRequest"}, + "output":{"shape":"DescribeRdsDbInstancesResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeServiceErrors":{ + "name":"DescribeServiceErrors", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeServiceErrorsRequest"}, + "output":{"shape":"DescribeServiceErrorsResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeStackProvisioningParameters":{ + "name":"DescribeStackProvisioningParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStackProvisioningParametersRequest"}, + "output":{"shape":"DescribeStackProvisioningParametersResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeStackSummary":{ + "name":"DescribeStackSummary", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStackSummaryRequest"}, + "output":{"shape":"DescribeStackSummaryResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeStacks":{ + "name":"DescribeStacks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStacksRequest"}, + "output":{"shape":"DescribeStacksResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeTimeBasedAutoScaling":{ + "name":"DescribeTimeBasedAutoScaling", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTimeBasedAutoScalingRequest"}, + "output":{"shape":"DescribeTimeBasedAutoScalingResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeUserProfiles":{ + "name":"DescribeUserProfiles", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeUserProfilesRequest"}, + "output":{"shape":"DescribeUserProfilesResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DescribeVolumes":{ + "name":"DescribeVolumes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVolumesRequest"}, + "output":{"shape":"DescribeVolumesResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "DetachElasticLoadBalancer":{ + "name":"DetachElasticLoadBalancer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachElasticLoadBalancerRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ] + }, + "DisassociateElasticIp":{ + "name":"DisassociateElasticIp", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateElasticIpRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "GetHostnameSuggestion":{ + "name":"GetHostnameSuggestion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetHostnameSuggestionRequest"}, + "output":{"shape":"GetHostnameSuggestionResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "GrantAccess":{ + "name":"GrantAccess", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GrantAccessRequest"}, + "output":{"shape":"GrantAccessResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "RebootInstance":{ + "name":"RebootInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootInstanceRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "RegisterEcsCluster":{ + "name":"RegisterEcsCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterEcsClusterRequest"}, + "output":{"shape":"RegisterEcsClusterResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "RegisterElasticIp":{ + "name":"RegisterElasticIp", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterElasticIpRequest"}, + "output":{"shape":"RegisterElasticIpResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "RegisterInstance":{ + "name":"RegisterInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterInstanceRequest"}, + "output":{"shape":"RegisterInstanceResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "RegisterRdsDbInstance":{ + "name":"RegisterRdsDbInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterRdsDbInstanceRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "RegisterVolume":{ + "name":"RegisterVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterVolumeRequest"}, + "output":{"shape":"RegisterVolumeResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "SetLoadBasedAutoScaling":{ + "name":"SetLoadBasedAutoScaling", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetLoadBasedAutoScalingRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "SetPermission":{ + "name":"SetPermission", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetPermissionRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "SetTimeBasedAutoScaling":{ + "name":"SetTimeBasedAutoScaling", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetTimeBasedAutoScalingRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "StartInstance":{ + "name":"StartInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartInstanceRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "StartStack":{ + "name":"StartStack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartStackRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "StopInstance":{ + "name":"StopInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopInstanceRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "StopStack":{ + "name":"StopStack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopStackRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "UnassignInstance":{ + "name":"UnassignInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UnassignInstanceRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "UnassignVolume":{ + "name":"UnassignVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UnassignVolumeRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "UpdateApp":{ + "name":"UpdateApp", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAppRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "UpdateElasticIp":{ + "name":"UpdateElasticIp", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateElasticIpRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "UpdateInstance":{ + "name":"UpdateInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateInstanceRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "UpdateLayer":{ + "name":"UpdateLayer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateLayerRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "UpdateMyUserProfile":{ + "name":"UpdateMyUserProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateMyUserProfileRequest"}, + "errors":[ + {"shape":"ValidationException"} + ] + }, + "UpdateRdsDbInstance":{ + "name":"UpdateRdsDbInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateRdsDbInstanceRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "UpdateStack":{ + "name":"UpdateStack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateStackRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "UpdateUserProfile":{ + "name":"UpdateUserProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateUserProfileRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "UpdateVolume":{ + "name":"UpdateVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateVolumeRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + } + }, + "shapes":{ + "AgentVersion":{ + "type":"structure", + "members":{ + "Version":{"shape":"String"}, + "ConfigurationManager":{"shape":"StackConfigurationManager"} + } + }, + "AgentVersions":{ + "type":"list", + "member":{"shape":"AgentVersion"} + }, + "App":{ + "type":"structure", + "members":{ + "AppId":{"shape":"String"}, + "StackId":{"shape":"String"}, + "Shortname":{"shape":"String"}, + "Name":{"shape":"String"}, + "Description":{"shape":"String"}, + "DataSources":{"shape":"DataSources"}, + "Type":{"shape":"AppType"}, + "AppSource":{"shape":"Source"}, + "Domains":{"shape":"Strings"}, + "EnableSsl":{"shape":"Boolean"}, + "SslConfiguration":{"shape":"SslConfiguration"}, + "Attributes":{"shape":"AppAttributes"}, + "CreatedAt":{"shape":"String"}, + "Environment":{"shape":"EnvironmentVariables"} + } + }, + "AppAttributes":{ + "type":"map", + "key":{"shape":"AppAttributesKeys"}, + "value":{"shape":"String"} + }, + "AppAttributesKeys":{ + "type":"string", + "enum":[ + "DocumentRoot", + "RailsEnv", + "AutoBundleOnDeploy", + "AwsFlowRubySettings" + ] + }, + "AppType":{ + "type":"string", + "enum":[ + "aws-flow-ruby", + "java", + "rails", + "php", + "nodejs", + "static", + "other" + ] + }, + "Apps":{ + "type":"list", + "member":{"shape":"App"} + }, + "Architecture":{ + "type":"string", + "enum":[ + "x86_64", + "i386" + ] + }, + "AssignInstanceRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "LayerIds" + ], + "members":{ + "InstanceId":{"shape":"String"}, + "LayerIds":{"shape":"Strings"} + } + }, + "AssignVolumeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "VolumeId":{"shape":"String"}, + "InstanceId":{"shape":"String"} + } + }, + "AssociateElasticIpRequest":{ + "type":"structure", + "required":["ElasticIp"], + "members":{ + "ElasticIp":{"shape":"String"}, + "InstanceId":{"shape":"String"} + } + }, + "AttachElasticLoadBalancerRequest":{ + "type":"structure", + "required":[ + "ElasticLoadBalancerName", + "LayerId" + ], + "members":{ + "ElasticLoadBalancerName":{"shape":"String"}, + "LayerId":{"shape":"String"} + } + }, + "AutoScalingThresholds":{ + "type":"structure", + "members":{ + "InstanceCount":{"shape":"Integer"}, + "ThresholdsWaitTime":{"shape":"Minute"}, + "IgnoreMetricsTime":{"shape":"Minute"}, + "CpuThreshold":{"shape":"Double"}, + "MemoryThreshold":{"shape":"Double"}, + "LoadThreshold":{"shape":"Double"}, + "Alarms":{"shape":"Strings"} + } + }, + "AutoScalingType":{ + "type":"string", + "enum":[ + "load", + "timer" + ] + }, + "BlockDeviceMapping":{ + "type":"structure", + "members":{ + "DeviceName":{"shape":"String"}, + "NoDevice":{"shape":"String"}, + "VirtualName":{"shape":"String"}, + "Ebs":{"shape":"EbsBlockDevice"} + } + }, + "BlockDeviceMappings":{ + "type":"list", + "member":{"shape":"BlockDeviceMapping"} + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "ChefConfiguration":{ + "type":"structure", + "members":{ + "ManageBerkshelf":{"shape":"Boolean"}, + "BerkshelfVersion":{"shape":"String"} + } + }, + "CloneStackRequest":{ + "type":"structure", + "required":[ + "SourceStackId", + "ServiceRoleArn" + ], + "members":{ + "SourceStackId":{"shape":"String"}, + "Name":{"shape":"String"}, + "Region":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "Attributes":{"shape":"StackAttributes"}, + "ServiceRoleArn":{"shape":"String"}, + "DefaultInstanceProfileArn":{"shape":"String"}, + "DefaultOs":{"shape":"String"}, + "HostnameTheme":{"shape":"String"}, + "DefaultAvailabilityZone":{"shape":"String"}, + "DefaultSubnetId":{"shape":"String"}, + "CustomJson":{"shape":"String"}, + "ConfigurationManager":{"shape":"StackConfigurationManager"}, + "ChefConfiguration":{"shape":"ChefConfiguration"}, + "UseCustomCookbooks":{"shape":"Boolean"}, + "UseOpsworksSecurityGroups":{"shape":"Boolean"}, + "CustomCookbooksSource":{"shape":"Source"}, + "DefaultSshKeyName":{"shape":"String"}, + "ClonePermissions":{"shape":"Boolean"}, + "CloneAppIds":{"shape":"Strings"}, + "DefaultRootDeviceType":{"shape":"RootDeviceType"}, + "AgentVersion":{"shape":"String"} + } + }, + "CloneStackResult":{ + "type":"structure", + "members":{ + "StackId":{"shape":"String"} + } + }, + "Command":{ + "type":"structure", + "members":{ + "CommandId":{"shape":"String"}, + "InstanceId":{"shape":"String"}, + "DeploymentId":{"shape":"String"}, + "CreatedAt":{"shape":"DateTime"}, + "AcknowledgedAt":{"shape":"DateTime"}, + "CompletedAt":{"shape":"DateTime"}, + "Status":{"shape":"String"}, + "ExitCode":{"shape":"Integer"}, + "LogUrl":{"shape":"String"}, + "Type":{"shape":"String"} + } + }, + "Commands":{ + "type":"list", + "member":{"shape":"Command"} + }, + "CreateAppRequest":{ + "type":"structure", + "required":[ + "StackId", + "Name", + "Type" + ], + "members":{ + "StackId":{"shape":"String"}, + "Shortname":{"shape":"String"}, + "Name":{"shape":"String"}, + "Description":{"shape":"String"}, + "DataSources":{"shape":"DataSources"}, + "Type":{"shape":"AppType"}, + "AppSource":{"shape":"Source"}, + "Domains":{"shape":"Strings"}, + "EnableSsl":{"shape":"Boolean"}, + "SslConfiguration":{"shape":"SslConfiguration"}, + "Attributes":{"shape":"AppAttributes"}, + "Environment":{"shape":"EnvironmentVariables"} + } + }, + "CreateAppResult":{ + "type":"structure", + "members":{ + "AppId":{"shape":"String"} + } + }, + "CreateDeploymentRequest":{ + "type":"structure", + "required":[ + "StackId", + "Command" + ], + "members":{ + "StackId":{"shape":"String"}, + "AppId":{"shape":"String"}, + "InstanceIds":{"shape":"Strings"}, + "Command":{"shape":"DeploymentCommand"}, + "Comment":{"shape":"String"}, + "CustomJson":{"shape":"String"} + } + }, + "CreateDeploymentResult":{ + "type":"structure", + "members":{ + "DeploymentId":{"shape":"String"} + } + }, + "CreateInstanceRequest":{ + "type":"structure", + "required":[ + "StackId", + "LayerIds", + "InstanceType" + ], + "members":{ + "StackId":{"shape":"String"}, + "LayerIds":{"shape":"Strings"}, + "InstanceType":{"shape":"String"}, + "AutoScalingType":{"shape":"AutoScalingType"}, + "Hostname":{"shape":"String"}, + "Os":{"shape":"String"}, + "AmiId":{"shape":"String"}, + "SshKeyName":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"}, + "VirtualizationType":{"shape":"String"}, + "SubnetId":{"shape":"String"}, + "Architecture":{"shape":"Architecture"}, + "RootDeviceType":{"shape":"RootDeviceType"}, + "BlockDeviceMappings":{"shape":"BlockDeviceMappings"}, + "InstallUpdatesOnBoot":{"shape":"Boolean"}, + "EbsOptimized":{"shape":"Boolean"}, + "AgentVersion":{"shape":"String"} + } + }, + "CreateInstanceResult":{ + "type":"structure", + "members":{ + "InstanceId":{"shape":"String"} + } + }, + "CreateLayerRequest":{ + "type":"structure", + "required":[ + "StackId", + "Type", + "Name", + "Shortname" + ], + "members":{ + "StackId":{"shape":"String"}, + "Type":{"shape":"LayerType"}, + "Name":{"shape":"String"}, + "Shortname":{"shape":"String"}, + "Attributes":{"shape":"LayerAttributes"}, + "CustomInstanceProfileArn":{"shape":"String"}, + "CustomJson":{"shape":"String"}, + "CustomSecurityGroupIds":{"shape":"Strings"}, + "Packages":{"shape":"Strings"}, + "VolumeConfigurations":{"shape":"VolumeConfigurations"}, + "EnableAutoHealing":{"shape":"Boolean"}, + "AutoAssignElasticIps":{"shape":"Boolean"}, + "AutoAssignPublicIps":{"shape":"Boolean"}, + "CustomRecipes":{"shape":"Recipes"}, + "InstallUpdatesOnBoot":{"shape":"Boolean"}, + "UseEbsOptimizedInstances":{"shape":"Boolean"}, + "LifecycleEventConfiguration":{"shape":"LifecycleEventConfiguration"} + } + }, + "CreateLayerResult":{ + "type":"structure", + "members":{ + "LayerId":{"shape":"String"} + } + }, + "CreateStackRequest":{ + "type":"structure", + "required":[ + "Name", + "Region", + "ServiceRoleArn", + "DefaultInstanceProfileArn" + ], + "members":{ + "Name":{"shape":"String"}, + "Region":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "Attributes":{"shape":"StackAttributes"}, + "ServiceRoleArn":{"shape":"String"}, + "DefaultInstanceProfileArn":{"shape":"String"}, + "DefaultOs":{"shape":"String"}, + "HostnameTheme":{"shape":"String"}, + "DefaultAvailabilityZone":{"shape":"String"}, + "DefaultSubnetId":{"shape":"String"}, + "CustomJson":{"shape":"String"}, + "ConfigurationManager":{"shape":"StackConfigurationManager"}, + "ChefConfiguration":{"shape":"ChefConfiguration"}, + "UseCustomCookbooks":{"shape":"Boolean"}, + "UseOpsworksSecurityGroups":{"shape":"Boolean"}, + "CustomCookbooksSource":{"shape":"Source"}, + "DefaultSshKeyName":{"shape":"String"}, + "DefaultRootDeviceType":{"shape":"RootDeviceType"}, + "AgentVersion":{"shape":"String"} + } + }, + "CreateStackResult":{ + "type":"structure", + "members":{ + "StackId":{"shape":"String"} + } + }, + "CreateUserProfileRequest":{ + "type":"structure", + "required":["IamUserArn"], + "members":{ + "IamUserArn":{"shape":"String"}, + "SshUsername":{"shape":"String"}, + "SshPublicKey":{"shape":"String"}, + "AllowSelfManagement":{"shape":"Boolean"} + } + }, + "CreateUserProfileResult":{ + "type":"structure", + "members":{ + "IamUserArn":{"shape":"String"} + } + }, + "DailyAutoScalingSchedule":{ + "type":"map", + "key":{"shape":"Hour"}, + "value":{"shape":"Switch"} + }, + "DataSource":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "Arn":{"shape":"String"}, + "DatabaseName":{"shape":"String"} + } + }, + "DataSources":{ + "type":"list", + "member":{"shape":"DataSource"} + }, + "DateTime":{"type":"string"}, + "DeleteAppRequest":{ + "type":"structure", + "required":["AppId"], + "members":{ + "AppId":{"shape":"String"} + } + }, + "DeleteInstanceRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{"shape":"String"}, + "DeleteElasticIp":{"shape":"Boolean"}, + "DeleteVolumes":{"shape":"Boolean"} + } + }, + "DeleteLayerRequest":{ + "type":"structure", + "required":["LayerId"], + "members":{ + "LayerId":{"shape":"String"} + } + }, + "DeleteStackRequest":{ + "type":"structure", + "required":["StackId"], + "members":{ + "StackId":{"shape":"String"} + } + }, + "DeleteUserProfileRequest":{ + "type":"structure", + "required":["IamUserArn"], + "members":{ + "IamUserArn":{"shape":"String"} + } + }, + "Deployment":{ + "type":"structure", + "members":{ + "DeploymentId":{"shape":"String"}, + "StackId":{"shape":"String"}, + "AppId":{"shape":"String"}, + "CreatedAt":{"shape":"DateTime"}, + "CompletedAt":{"shape":"DateTime"}, + "Duration":{"shape":"Integer"}, + "IamUserArn":{"shape":"String"}, + "Comment":{"shape":"String"}, + "Command":{"shape":"DeploymentCommand"}, + "Status":{"shape":"String"}, + "CustomJson":{"shape":"String"}, + "InstanceIds":{"shape":"Strings"} + } + }, + "DeploymentCommand":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"DeploymentCommandName"}, + "Args":{"shape":"DeploymentCommandArgs"} + } + }, + "DeploymentCommandArgs":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"Strings"} + }, + "DeploymentCommandName":{ + "type":"string", + "enum":[ + "install_dependencies", + "update_dependencies", + "update_custom_cookbooks", + "execute_recipes", + "configure", + "setup", + "deploy", + "rollback", + "start", + "stop", + "restart", + "undeploy" + ] + }, + "Deployments":{ + "type":"list", + "member":{"shape":"Deployment"} + }, + "DeregisterEcsClusterRequest":{ + "type":"structure", + "required":["EcsClusterArn"], + "members":{ + "EcsClusterArn":{"shape":"String"} + } + }, + "DeregisterElasticIpRequest":{ + "type":"structure", + "required":["ElasticIp"], + "members":{ + "ElasticIp":{"shape":"String"} + } + }, + "DeregisterInstanceRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{"shape":"String"} + } + }, + "DeregisterRdsDbInstanceRequest":{ + "type":"structure", + "required":["RdsDbInstanceArn"], + "members":{ + "RdsDbInstanceArn":{"shape":"String"} + } + }, + "DeregisterVolumeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "VolumeId":{"shape":"String"} + } + }, + "DescribeAgentVersionsRequest":{ + "type":"structure", + "members":{ + "StackId":{"shape":"String"}, + "ConfigurationManager":{"shape":"StackConfigurationManager"} + } + }, + "DescribeAgentVersionsResult":{ + "type":"structure", + "members":{ + "AgentVersions":{"shape":"AgentVersions"} + } + }, + "DescribeAppsRequest":{ + "type":"structure", + "members":{ + "StackId":{"shape":"String"}, + "AppIds":{"shape":"Strings"} + } + }, + "DescribeAppsResult":{ + "type":"structure", + "members":{ + "Apps":{"shape":"Apps"} + } + }, + "DescribeCommandsRequest":{ + "type":"structure", + "members":{ + "DeploymentId":{"shape":"String"}, + "InstanceId":{"shape":"String"}, + "CommandIds":{"shape":"Strings"} + } + }, + "DescribeCommandsResult":{ + "type":"structure", + "members":{ + "Commands":{"shape":"Commands"} + } + }, + "DescribeDeploymentsRequest":{ + "type":"structure", + "members":{ + "StackId":{"shape":"String"}, + "AppId":{"shape":"String"}, + "DeploymentIds":{"shape":"Strings"} + } + }, + "DescribeDeploymentsResult":{ + "type":"structure", + "members":{ + "Deployments":{"shape":"Deployments"} + } + }, + "DescribeEcsClustersRequest":{ + "type":"structure", + "members":{ + "EcsClusterArns":{"shape":"Strings"}, + "StackId":{"shape":"String"}, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"Integer"} + } + }, + "DescribeEcsClustersResult":{ + "type":"structure", + "members":{ + "EcsClusters":{"shape":"EcsClusters"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeElasticIpsRequest":{ + "type":"structure", + "members":{ + "InstanceId":{"shape":"String"}, + "StackId":{"shape":"String"}, + "Ips":{"shape":"Strings"} + } + }, + "DescribeElasticIpsResult":{ + "type":"structure", + "members":{ + "ElasticIps":{"shape":"ElasticIps"} + } + }, + "DescribeElasticLoadBalancersRequest":{ + "type":"structure", + "members":{ + "StackId":{"shape":"String"}, + "LayerIds":{"shape":"Strings"} + } + }, + "DescribeElasticLoadBalancersResult":{ + "type":"structure", + "members":{ + "ElasticLoadBalancers":{"shape":"ElasticLoadBalancers"} + } + }, + "DescribeInstancesRequest":{ + "type":"structure", + "members":{ + "StackId":{"shape":"String"}, + "LayerId":{"shape":"String"}, + "InstanceIds":{"shape":"Strings"} + } + }, + "DescribeInstancesResult":{ + "type":"structure", + "members":{ + "Instances":{"shape":"Instances"} + } + }, + "DescribeLayersRequest":{ + "type":"structure", + "members":{ + "StackId":{"shape":"String"}, + "LayerIds":{"shape":"Strings"} + } + }, + "DescribeLayersResult":{ + "type":"structure", + "members":{ + "Layers":{"shape":"Layers"} + } + }, + "DescribeLoadBasedAutoScalingRequest":{ + "type":"structure", + "required":["LayerIds"], + "members":{ + "LayerIds":{"shape":"Strings"} + } + }, + "DescribeLoadBasedAutoScalingResult":{ + "type":"structure", + "members":{ + "LoadBasedAutoScalingConfigurations":{"shape":"LoadBasedAutoScalingConfigurations"} + } + }, + "DescribeMyUserProfileResult":{ + "type":"structure", + "members":{ + "UserProfile":{"shape":"SelfUserProfile"} + } + }, + "DescribePermissionsRequest":{ + "type":"structure", + "members":{ + "IamUserArn":{"shape":"String"}, + "StackId":{"shape":"String"} + } + }, + "DescribePermissionsResult":{ + "type":"structure", + "members":{ + "Permissions":{"shape":"Permissions"} + } + }, + "DescribeRaidArraysRequest":{ + "type":"structure", + "members":{ + "InstanceId":{"shape":"String"}, + "StackId":{"shape":"String"}, + "RaidArrayIds":{"shape":"Strings"} + } + }, + "DescribeRaidArraysResult":{ + "type":"structure", + "members":{ + "RaidArrays":{"shape":"RaidArrays"} + } + }, + "DescribeRdsDbInstancesRequest":{ + "type":"structure", + "required":["StackId"], + "members":{ + "StackId":{"shape":"String"}, + "RdsDbInstanceArns":{"shape":"Strings"} + } + }, + "DescribeRdsDbInstancesResult":{ + "type":"structure", + "members":{ + "RdsDbInstances":{"shape":"RdsDbInstances"} + } + }, + "DescribeServiceErrorsRequest":{ + "type":"structure", + "members":{ + "StackId":{"shape":"String"}, + "InstanceId":{"shape":"String"}, + "ServiceErrorIds":{"shape":"Strings"} + } + }, + "DescribeServiceErrorsResult":{ + "type":"structure", + "members":{ + "ServiceErrors":{"shape":"ServiceErrors"} + } + }, + "DescribeStackProvisioningParametersRequest":{ + "type":"structure", + "required":["StackId"], + "members":{ + "StackId":{"shape":"String"} + } + }, + "DescribeStackProvisioningParametersResult":{ + "type":"structure", + "members":{ + "AgentInstallerUrl":{"shape":"String"}, + "Parameters":{"shape":"Parameters"} + } + }, + "DescribeStackSummaryRequest":{ + "type":"structure", + "required":["StackId"], + "members":{ + "StackId":{"shape":"String"} + } + }, + "DescribeStackSummaryResult":{ + "type":"structure", + "members":{ + "StackSummary":{"shape":"StackSummary"} + } + }, + "DescribeStacksRequest":{ + "type":"structure", + "members":{ + "StackIds":{"shape":"Strings"} + } + }, + "DescribeStacksResult":{ + "type":"structure", + "members":{ + "Stacks":{"shape":"Stacks"} + } + }, + "DescribeTimeBasedAutoScalingRequest":{ + "type":"structure", + "required":["InstanceIds"], + "members":{ + "InstanceIds":{"shape":"Strings"} + } + }, + "DescribeTimeBasedAutoScalingResult":{ + "type":"structure", + "members":{ + "TimeBasedAutoScalingConfigurations":{"shape":"TimeBasedAutoScalingConfigurations"} + } + }, + "DescribeUserProfilesRequest":{ + "type":"structure", + "members":{ + "IamUserArns":{"shape":"Strings"} + } + }, + "DescribeUserProfilesResult":{ + "type":"structure", + "members":{ + "UserProfiles":{"shape":"UserProfiles"} + } + }, + "DescribeVolumesRequest":{ + "type":"structure", + "members":{ + "InstanceId":{"shape":"String"}, + "StackId":{"shape":"String"}, + "RaidArrayId":{"shape":"String"}, + "VolumeIds":{"shape":"Strings"} + } + }, + "DescribeVolumesResult":{ + "type":"structure", + "members":{ + "Volumes":{"shape":"Volumes"} + } + }, + "DetachElasticLoadBalancerRequest":{ + "type":"structure", + "required":[ + "ElasticLoadBalancerName", + "LayerId" + ], + "members":{ + "ElasticLoadBalancerName":{"shape":"String"}, + "LayerId":{"shape":"String"} + } + }, + "DisassociateElasticIpRequest":{ + "type":"structure", + "required":["ElasticIp"], + "members":{ + "ElasticIp":{"shape":"String"} + } + }, + "Double":{ + "type":"double", + "box":true + }, + "EbsBlockDevice":{ + "type":"structure", + "members":{ + "SnapshotId":{"shape":"String"}, + "Iops":{"shape":"Integer"}, + "VolumeSize":{"shape":"Integer"}, + "VolumeType":{"shape":"VolumeType"}, + "DeleteOnTermination":{"shape":"Boolean"} + } + }, + "EcsCluster":{ + "type":"structure", + "members":{ + "EcsClusterArn":{"shape":"String"}, + "EcsClusterName":{"shape":"String"}, + "StackId":{"shape":"String"}, + "RegisteredAt":{"shape":"DateTime"} + } + }, + "EcsClusters":{ + "type":"list", + "member":{"shape":"EcsCluster"} + }, + "ElasticIp":{ + "type":"structure", + "members":{ + "Ip":{"shape":"String"}, + "Name":{"shape":"String"}, + "Domain":{"shape":"String"}, + "Region":{"shape":"String"}, + "InstanceId":{"shape":"String"} + } + }, + "ElasticIps":{ + "type":"list", + "member":{"shape":"ElasticIp"} + }, + "ElasticLoadBalancer":{ + "type":"structure", + "members":{ + "ElasticLoadBalancerName":{"shape":"String"}, + "Region":{"shape":"String"}, + "DnsName":{"shape":"String"}, + "StackId":{"shape":"String"}, + "LayerId":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "AvailabilityZones":{"shape":"Strings"}, + "SubnetIds":{"shape":"Strings"}, + "Ec2InstanceIds":{"shape":"Strings"} + } + }, + "ElasticLoadBalancers":{ + "type":"list", + "member":{"shape":"ElasticLoadBalancer"} + }, + "EnvironmentVariable":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{"shape":"String"}, + "Value":{"shape":"String"}, + "Secure":{"shape":"Boolean"} + } + }, + "EnvironmentVariables":{ + "type":"list", + "member":{"shape":"EnvironmentVariable"} + }, + "GetHostnameSuggestionRequest":{ + "type":"structure", + "required":["LayerId"], + "members":{ + "LayerId":{"shape":"String"} + } + }, + "GetHostnameSuggestionResult":{ + "type":"structure", + "members":{ + "LayerId":{"shape":"String"}, + "Hostname":{"shape":"String"} + } + }, + "GrantAccessRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{"shape":"String"}, + "ValidForInMinutes":{"shape":"ValidForInMinutes"} + } + }, + "GrantAccessResult":{ + "type":"structure", + "members":{ + "TemporaryCredential":{"shape":"TemporaryCredential"} + } + }, + "Hour":{"type":"string"}, + "Instance":{ + "type":"structure", + "members":{ + "AgentVersion":{"shape":"String"}, + "AmiId":{"shape":"String"}, + "Architecture":{"shape":"Architecture"}, + "AutoScalingType":{"shape":"AutoScalingType"}, + "AvailabilityZone":{"shape":"String"}, + "BlockDeviceMappings":{"shape":"BlockDeviceMappings"}, + "CreatedAt":{"shape":"DateTime"}, + "EbsOptimized":{"shape":"Boolean"}, + "Ec2InstanceId":{"shape":"String"}, + "EcsClusterArn":{"shape":"String"}, + "EcsContainerInstanceArn":{"shape":"String"}, + "ElasticIp":{"shape":"String"}, + "Hostname":{"shape":"String"}, + "InfrastructureClass":{"shape":"String"}, + "InstallUpdatesOnBoot":{"shape":"Boolean"}, + "InstanceId":{"shape":"String"}, + "InstanceProfileArn":{"shape":"String"}, + "InstanceType":{"shape":"String"}, + "LastServiceErrorId":{"shape":"String"}, + "LayerIds":{"shape":"Strings"}, + "Os":{"shape":"String"}, + "Platform":{"shape":"String"}, + "PrivateDns":{"shape":"String"}, + "PrivateIp":{"shape":"String"}, + "PublicDns":{"shape":"String"}, + "PublicIp":{"shape":"String"}, + "RegisteredBy":{"shape":"String"}, + "ReportedAgentVersion":{"shape":"String"}, + "ReportedOs":{"shape":"ReportedOs"}, + "RootDeviceType":{"shape":"RootDeviceType"}, + "RootDeviceVolumeId":{"shape":"String"}, + "SecurityGroupIds":{"shape":"Strings"}, + "SshHostDsaKeyFingerprint":{"shape":"String"}, + "SshHostRsaKeyFingerprint":{"shape":"String"}, + "SshKeyName":{"shape":"String"}, + "StackId":{"shape":"String"}, + "Status":{"shape":"String"}, + "SubnetId":{"shape":"String"}, + "VirtualizationType":{"shape":"VirtualizationType"} + } + }, + "InstanceIdentity":{ + "type":"structure", + "members":{ + "Document":{"shape":"String"}, + "Signature":{"shape":"String"} + } + }, + "Instances":{ + "type":"list", + "member":{"shape":"Instance"} + }, + "InstancesCount":{ + "type":"structure", + "members":{ + "Assigning":{"shape":"Integer"}, + "Booting":{"shape":"Integer"}, + "ConnectionLost":{"shape":"Integer"}, + "Deregistering":{"shape":"Integer"}, + "Online":{"shape":"Integer"}, + "Pending":{"shape":"Integer"}, + "Rebooting":{"shape":"Integer"}, + "Registered":{"shape":"Integer"}, + "Registering":{"shape":"Integer"}, + "Requested":{"shape":"Integer"}, + "RunningSetup":{"shape":"Integer"}, + "SetupFailed":{"shape":"Integer"}, + "ShuttingDown":{"shape":"Integer"}, + "StartFailed":{"shape":"Integer"}, + "Stopped":{"shape":"Integer"}, + "Stopping":{"shape":"Integer"}, + "Terminated":{"shape":"Integer"}, + "Terminating":{"shape":"Integer"}, + "Unassigning":{"shape":"Integer"} + } + }, + "Integer":{ + "type":"integer", + "box":true + }, + "Layer":{ + "type":"structure", + "members":{ + "StackId":{"shape":"String"}, + "LayerId":{"shape":"String"}, + "Type":{"shape":"LayerType"}, + "Name":{"shape":"String"}, + "Shortname":{"shape":"String"}, + "Attributes":{"shape":"LayerAttributes"}, + "CustomInstanceProfileArn":{"shape":"String"}, + "CustomJson":{"shape":"String"}, + "CustomSecurityGroupIds":{"shape":"Strings"}, + "DefaultSecurityGroupNames":{"shape":"Strings"}, + "Packages":{"shape":"Strings"}, + "VolumeConfigurations":{"shape":"VolumeConfigurations"}, + "EnableAutoHealing":{"shape":"Boolean"}, + "AutoAssignElasticIps":{"shape":"Boolean"}, + "AutoAssignPublicIps":{"shape":"Boolean"}, + "DefaultRecipes":{"shape":"Recipes"}, + "CustomRecipes":{"shape":"Recipes"}, + "CreatedAt":{"shape":"DateTime"}, + "InstallUpdatesOnBoot":{"shape":"Boolean"}, + "UseEbsOptimizedInstances":{"shape":"Boolean"}, + "LifecycleEventConfiguration":{"shape":"LifecycleEventConfiguration"} + } + }, + "LayerAttributes":{ + "type":"map", + "key":{"shape":"LayerAttributesKeys"}, + "value":{"shape":"String"} + }, + "LayerAttributesKeys":{ + "type":"string", + "enum":[ + "EcsClusterArn", + "EnableHaproxyStats", + "HaproxyStatsUrl", + "HaproxyStatsUser", + "HaproxyStatsPassword", + "HaproxyHealthCheckUrl", + "HaproxyHealthCheckMethod", + "MysqlRootPassword", + "MysqlRootPasswordUbiquitous", + "GangliaUrl", + "GangliaUser", + "GangliaPassword", + "MemcachedMemory", + "NodejsVersion", + "RubyVersion", + "RubygemsVersion", + "ManageBundler", + "BundlerVersion", + "RailsStack", + "PassengerVersion", + "Jvm", + "JvmVersion", + "JvmOptions", + "JavaAppServer", + "JavaAppServerVersion" + ] + }, + "LayerType":{ + "type":"string", + "enum":[ + "aws-flow-ruby", + "ecs-cluster", + "java-app", + "lb", + "web", + "php-app", + "rails-app", + "nodejs-app", + "memcached", + "db-master", + "monitoring-master", + "custom" + ] + }, + "Layers":{ + "type":"list", + "member":{"shape":"Layer"} + }, + "LifecycleEventConfiguration":{ + "type":"structure", + "members":{ + "Shutdown":{"shape":"ShutdownEventConfiguration"} + } + }, + "LoadBasedAutoScalingConfiguration":{ + "type":"structure", + "members":{ + "LayerId":{"shape":"String"}, + "Enable":{"shape":"Boolean"}, + "UpScaling":{"shape":"AutoScalingThresholds"}, + "DownScaling":{"shape":"AutoScalingThresholds"} + } + }, + "LoadBasedAutoScalingConfigurations":{ + "type":"list", + "member":{"shape":"LoadBasedAutoScalingConfiguration"} + }, + "Minute":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "Parameters":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "Permission":{ + "type":"structure", + "members":{ + "StackId":{"shape":"String"}, + "IamUserArn":{"shape":"String"}, + "AllowSsh":{"shape":"Boolean"}, + "AllowSudo":{"shape":"Boolean"}, + "Level":{"shape":"String"} + } + }, + "Permissions":{ + "type":"list", + "member":{"shape":"Permission"} + }, + "RaidArray":{ + "type":"structure", + "members":{ + "RaidArrayId":{"shape":"String"}, + "InstanceId":{"shape":"String"}, + "Name":{"shape":"String"}, + "RaidLevel":{"shape":"Integer"}, + "NumberOfDisks":{"shape":"Integer"}, + "Size":{"shape":"Integer"}, + "Device":{"shape":"String"}, + "MountPoint":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"}, + "CreatedAt":{"shape":"DateTime"}, + "StackId":{"shape":"String"}, + "VolumeType":{"shape":"String"}, + "Iops":{"shape":"Integer"} + } + }, + "RaidArrays":{ + "type":"list", + "member":{"shape":"RaidArray"} + }, + "RdsDbInstance":{ + "type":"structure", + "members":{ + "RdsDbInstanceArn":{"shape":"String"}, + "DbInstanceIdentifier":{"shape":"String"}, + "DbUser":{"shape":"String"}, + "DbPassword":{"shape":"String"}, + "Region":{"shape":"String"}, + "Address":{"shape":"String"}, + "Engine":{"shape":"String"}, + "StackId":{"shape":"String"}, + "MissingOnRds":{"shape":"Boolean"} + } + }, + "RdsDbInstances":{ + "type":"list", + "member":{"shape":"RdsDbInstance"} + }, + "RebootInstanceRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{"shape":"String"} + } + }, + "Recipes":{ + "type":"structure", + "members":{ + "Setup":{"shape":"Strings"}, + "Configure":{"shape":"Strings"}, + "Deploy":{"shape":"Strings"}, + "Undeploy":{"shape":"Strings"}, + "Shutdown":{"shape":"Strings"} + } + }, + "RegisterEcsClusterRequest":{ + "type":"structure", + "required":[ + "EcsClusterArn", + "StackId" + ], + "members":{ + "EcsClusterArn":{"shape":"String"}, + "StackId":{"shape":"String"} + } + }, + "RegisterEcsClusterResult":{ + "type":"structure", + "members":{ + "EcsClusterArn":{"shape":"String"} + } + }, + "RegisterElasticIpRequest":{ + "type":"structure", + "required":[ + "ElasticIp", + "StackId" + ], + "members":{ + "ElasticIp":{"shape":"String"}, + "StackId":{"shape":"String"} + } + }, + "RegisterElasticIpResult":{ + "type":"structure", + "members":{ + "ElasticIp":{"shape":"String"} + } + }, + "RegisterInstanceRequest":{ + "type":"structure", + "required":["StackId"], + "members":{ + "StackId":{"shape":"String"}, + "Hostname":{"shape":"String"}, + "PublicIp":{"shape":"String"}, + "PrivateIp":{"shape":"String"}, + "RsaPublicKey":{"shape":"String"}, + "RsaPublicKeyFingerprint":{"shape":"String"}, + "InstanceIdentity":{"shape":"InstanceIdentity"} + } + }, + "RegisterInstanceResult":{ + "type":"structure", + "members":{ + "InstanceId":{"shape":"String"} + } + }, + "RegisterRdsDbInstanceRequest":{ + "type":"structure", + "required":[ + "StackId", + "RdsDbInstanceArn", + "DbUser", + "DbPassword" + ], + "members":{ + "StackId":{"shape":"String"}, + "RdsDbInstanceArn":{"shape":"String"}, + "DbUser":{"shape":"String"}, + "DbPassword":{"shape":"String"} + } + }, + "RegisterVolumeRequest":{ + "type":"structure", + "required":["StackId"], + "members":{ + "Ec2VolumeId":{"shape":"String"}, + "StackId":{"shape":"String"} + } + }, + "RegisterVolumeResult":{ + "type":"structure", + "members":{ + "VolumeId":{"shape":"String"} + } + }, + "ReportedOs":{ + "type":"structure", + "members":{ + "Family":{"shape":"String"}, + "Name":{"shape":"String"}, + "Version":{"shape":"String"} + } + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "RootDeviceType":{ + "type":"string", + "enum":[ + "ebs", + "instance-store" + ] + }, + "SelfUserProfile":{ + "type":"structure", + "members":{ + "IamUserArn":{"shape":"String"}, + "Name":{"shape":"String"}, + "SshUsername":{"shape":"String"}, + "SshPublicKey":{"shape":"String"} + } + }, + "ServiceError":{ + "type":"structure", + "members":{ + "ServiceErrorId":{"shape":"String"}, + "StackId":{"shape":"String"}, + "InstanceId":{"shape":"String"}, + "Type":{"shape":"String"}, + "Message":{"shape":"String"}, + "CreatedAt":{"shape":"DateTime"} + } + }, + "ServiceErrors":{ + "type":"list", + "member":{"shape":"ServiceError"} + }, + "SetLoadBasedAutoScalingRequest":{ + "type":"structure", + "required":["LayerId"], + "members":{ + "LayerId":{"shape":"String"}, + "Enable":{"shape":"Boolean"}, + "UpScaling":{"shape":"AutoScalingThresholds"}, + "DownScaling":{"shape":"AutoScalingThresholds"} + } + }, + "SetPermissionRequest":{ + "type":"structure", + "required":[ + "StackId", + "IamUserArn" + ], + "members":{ + "StackId":{"shape":"String"}, + "IamUserArn":{"shape":"String"}, + "AllowSsh":{"shape":"Boolean"}, + "AllowSudo":{"shape":"Boolean"}, + "Level":{"shape":"String"} + } + }, + "SetTimeBasedAutoScalingRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{"shape":"String"}, + "AutoScalingSchedule":{"shape":"WeeklyAutoScalingSchedule"} + } + }, + "ShutdownEventConfiguration":{ + "type":"structure", + "members":{ + "ExecutionTimeout":{"shape":"Integer"}, + "DelayUntilElbConnectionsDrained":{"shape":"Boolean"} + } + }, + "Source":{ + "type":"structure", + "members":{ + "Type":{"shape":"SourceType"}, + "Url":{"shape":"String"}, + "Username":{"shape":"String"}, + "Password":{"shape":"String"}, + "SshKey":{"shape":"String"}, + "Revision":{"shape":"String"} + } + }, + "SourceType":{ + "type":"string", + "enum":[ + "git", + "svn", + "archive", + "s3" + ] + }, + "SslConfiguration":{ + "type":"structure", + "required":[ + "Certificate", + "PrivateKey" + ], + "members":{ + "Certificate":{"shape":"String"}, + "PrivateKey":{"shape":"String"}, + "Chain":{"shape":"String"} + } + }, + "Stack":{ + "type":"structure", + "members":{ + "StackId":{"shape":"String"}, + "Name":{"shape":"String"}, + "Arn":{"shape":"String"}, + "Region":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "Attributes":{"shape":"StackAttributes"}, + "ServiceRoleArn":{"shape":"String"}, + "DefaultInstanceProfileArn":{"shape":"String"}, + "DefaultOs":{"shape":"String"}, + "HostnameTheme":{"shape":"String"}, + "DefaultAvailabilityZone":{"shape":"String"}, + "DefaultSubnetId":{"shape":"String"}, + "CustomJson":{"shape":"String"}, + "ConfigurationManager":{"shape":"StackConfigurationManager"}, + "ChefConfiguration":{"shape":"ChefConfiguration"}, + "UseCustomCookbooks":{"shape":"Boolean"}, + "UseOpsworksSecurityGroups":{"shape":"Boolean"}, + "CustomCookbooksSource":{"shape":"Source"}, + "DefaultSshKeyName":{"shape":"String"}, + "CreatedAt":{"shape":"DateTime"}, + "DefaultRootDeviceType":{"shape":"RootDeviceType"}, + "AgentVersion":{"shape":"String"} + } + }, + "StackAttributes":{ + "type":"map", + "key":{"shape":"StackAttributesKeys"}, + "value":{"shape":"String"} + }, + "StackAttributesKeys":{ + "type":"string", + "enum":["Color"] + }, + "StackConfigurationManager":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Version":{"shape":"String"} + } + }, + "StackSummary":{ + "type":"structure", + "members":{ + "StackId":{"shape":"String"}, + "Name":{"shape":"String"}, + "Arn":{"shape":"String"}, + "LayersCount":{"shape":"Integer"}, + "AppsCount":{"shape":"Integer"}, + "InstancesCount":{"shape":"InstancesCount"} + } + }, + "Stacks":{ + "type":"list", + "member":{"shape":"Stack"} + }, + "StartInstanceRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{"shape":"String"} + } + }, + "StartStackRequest":{ + "type":"structure", + "required":["StackId"], + "members":{ + "StackId":{"shape":"String"} + } + }, + "StopInstanceRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{"shape":"String"} + } + }, + "StopStackRequest":{ + "type":"structure", + "required":["StackId"], + "members":{ + "StackId":{"shape":"String"} + } + }, + "String":{"type":"string"}, + "Strings":{ + "type":"list", + "member":{"shape":"String"} + }, + "Switch":{"type":"string"}, + "TemporaryCredential":{ + "type":"structure", + "members":{ + "Username":{"shape":"String"}, + "Password":{"shape":"String"}, + "ValidForInMinutes":{"shape":"Integer"}, + "InstanceId":{"shape":"String"} + } + }, + "TimeBasedAutoScalingConfiguration":{ + "type":"structure", + "members":{ + "InstanceId":{"shape":"String"}, + "AutoScalingSchedule":{"shape":"WeeklyAutoScalingSchedule"} + } + }, + "TimeBasedAutoScalingConfigurations":{ + "type":"list", + "member":{"shape":"TimeBasedAutoScalingConfiguration"} + }, + "UnassignInstanceRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{"shape":"String"} + } + }, + "UnassignVolumeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "VolumeId":{"shape":"String"} + } + }, + "UpdateAppRequest":{ + "type":"structure", + "required":["AppId"], + "members":{ + "AppId":{"shape":"String"}, + "Name":{"shape":"String"}, + "Description":{"shape":"String"}, + "DataSources":{"shape":"DataSources"}, + "Type":{"shape":"AppType"}, + "AppSource":{"shape":"Source"}, + "Domains":{"shape":"Strings"}, + "EnableSsl":{"shape":"Boolean"}, + "SslConfiguration":{"shape":"SslConfiguration"}, + "Attributes":{"shape":"AppAttributes"}, + "Environment":{"shape":"EnvironmentVariables"} + } + }, + "UpdateElasticIpRequest":{ + "type":"structure", + "required":["ElasticIp"], + "members":{ + "ElasticIp":{"shape":"String"}, + "Name":{"shape":"String"} + } + }, + "UpdateInstanceRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{"shape":"String"}, + "LayerIds":{"shape":"Strings"}, + "InstanceType":{"shape":"String"}, + "AutoScalingType":{"shape":"AutoScalingType"}, + "Hostname":{"shape":"String"}, + "Os":{"shape":"String"}, + "AmiId":{"shape":"String"}, + "SshKeyName":{"shape":"String"}, + "Architecture":{"shape":"Architecture"}, + "InstallUpdatesOnBoot":{"shape":"Boolean"}, + "EbsOptimized":{"shape":"Boolean"}, + "AgentVersion":{"shape":"String"} + } + }, + "UpdateLayerRequest":{ + "type":"structure", + "required":["LayerId"], + "members":{ + "LayerId":{"shape":"String"}, + "Name":{"shape":"String"}, + "Shortname":{"shape":"String"}, + "Attributes":{"shape":"LayerAttributes"}, + "CustomInstanceProfileArn":{"shape":"String"}, + "CustomJson":{"shape":"String"}, + "CustomSecurityGroupIds":{"shape":"Strings"}, + "Packages":{"shape":"Strings"}, + "VolumeConfigurations":{"shape":"VolumeConfigurations"}, + "EnableAutoHealing":{"shape":"Boolean"}, + "AutoAssignElasticIps":{"shape":"Boolean"}, + "AutoAssignPublicIps":{"shape":"Boolean"}, + "CustomRecipes":{"shape":"Recipes"}, + "InstallUpdatesOnBoot":{"shape":"Boolean"}, + "UseEbsOptimizedInstances":{"shape":"Boolean"}, + "LifecycleEventConfiguration":{"shape":"LifecycleEventConfiguration"} + } + }, + "UpdateMyUserProfileRequest":{ + "type":"structure", + "members":{ + "SshPublicKey":{"shape":"String"} + } + }, + "UpdateRdsDbInstanceRequest":{ + "type":"structure", + "required":["RdsDbInstanceArn"], + "members":{ + "RdsDbInstanceArn":{"shape":"String"}, + "DbUser":{"shape":"String"}, + "DbPassword":{"shape":"String"} + } + }, + "UpdateStackRequest":{ + "type":"structure", + "required":["StackId"], + "members":{ + "StackId":{"shape":"String"}, + "Name":{"shape":"String"}, + "Attributes":{"shape":"StackAttributes"}, + "ServiceRoleArn":{"shape":"String"}, + "DefaultInstanceProfileArn":{"shape":"String"}, + "DefaultOs":{"shape":"String"}, + "HostnameTheme":{"shape":"String"}, + "DefaultAvailabilityZone":{"shape":"String"}, + "DefaultSubnetId":{"shape":"String"}, + "CustomJson":{"shape":"String"}, + "ConfigurationManager":{"shape":"StackConfigurationManager"}, + "ChefConfiguration":{"shape":"ChefConfiguration"}, + "UseCustomCookbooks":{"shape":"Boolean"}, + "CustomCookbooksSource":{"shape":"Source"}, + "DefaultSshKeyName":{"shape":"String"}, + "DefaultRootDeviceType":{"shape":"RootDeviceType"}, + "UseOpsworksSecurityGroups":{"shape":"Boolean"}, + "AgentVersion":{"shape":"String"} + } + }, + "UpdateUserProfileRequest":{ + "type":"structure", + "required":["IamUserArn"], + "members":{ + "IamUserArn":{"shape":"String"}, + "SshUsername":{"shape":"String"}, + "SshPublicKey":{"shape":"String"}, + "AllowSelfManagement":{"shape":"Boolean"} + } + }, + "UpdateVolumeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "VolumeId":{"shape":"String"}, + "Name":{"shape":"String"}, + "MountPoint":{"shape":"String"} + } + }, + "UserProfile":{ + "type":"structure", + "members":{ + "IamUserArn":{"shape":"String"}, + "Name":{"shape":"String"}, + "SshUsername":{"shape":"String"}, + "SshPublicKey":{"shape":"String"}, + "AllowSelfManagement":{"shape":"Boolean"} + } + }, + "UserProfiles":{ + "type":"list", + "member":{"shape":"UserProfile"} + }, + "ValidForInMinutes":{ + "type":"integer", + "box":true, + "max":1440, + "min":60 + }, + "ValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "VirtualizationType":{ + "type":"string", + "enum":[ + "paravirtual", + "hvm" + ] + }, + "Volume":{ + "type":"structure", + "members":{ + "VolumeId":{"shape":"String"}, + "Ec2VolumeId":{"shape":"String"}, + "Name":{"shape":"String"}, + "RaidArrayId":{"shape":"String"}, + "InstanceId":{"shape":"String"}, + "Status":{"shape":"String"}, + "Size":{"shape":"Integer"}, + "Device":{"shape":"String"}, + "MountPoint":{"shape":"String"}, + "Region":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"}, + "VolumeType":{"shape":"String"}, + "Iops":{"shape":"Integer"} + } + }, + "VolumeConfiguration":{ + "type":"structure", + "required":[ + "MountPoint", + "NumberOfDisks", + "Size" + ], + "members":{ + "MountPoint":{"shape":"String"}, + "RaidLevel":{"shape":"Integer"}, + "NumberOfDisks":{"shape":"Integer"}, + "Size":{"shape":"Integer"}, + "VolumeType":{"shape":"String"}, + "Iops":{"shape":"Integer"} + } + }, + "VolumeConfigurations":{ + "type":"list", + "member":{"shape":"VolumeConfiguration"} + }, + "VolumeType":{ + "type":"string", + "enum":[ + "gp2", + "io1", + "standard" + ] + }, + "Volumes":{ + "type":"list", + "member":{"shape":"Volume"} + }, + "WeeklyAutoScalingSchedule":{ + "type":"structure", + "members":{ + "Monday":{"shape":"DailyAutoScalingSchedule"}, + "Tuesday":{"shape":"DailyAutoScalingSchedule"}, + "Wednesday":{"shape":"DailyAutoScalingSchedule"}, + "Thursday":{"shape":"DailyAutoScalingSchedule"}, + "Friday":{"shape":"DailyAutoScalingSchedule"}, + "Saturday":{"shape":"DailyAutoScalingSchedule"}, + "Sunday":{"shape":"DailyAutoScalingSchedule"} + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1685 @@ +{ + "version": "2.0", + "service": "AWS OpsWorks

    Welcome to the AWS OpsWorks API Reference. This guide provides descriptions, syntax, and usage examples about AWS OpsWorks actions and data types, including common parameters and error codes.

    AWS OpsWorks is an application management service that provides an integrated experience for overseeing the complete application lifecycle. For information about this product, go to the AWS OpsWorks details page.

    SDKs and CLI

    The most common way to use the AWS OpsWorks API is by using the AWS Command Line Interface (CLI) or by using one of the AWS SDKs to implement applications in your preferred language. For more information, see:

    Endpoints

    AWS OpsWorks supports only one endpoint, opsworks.us-east-1.amazonaws.com (HTTPS), so you must connect to that endpoint. You can then use the API to direct AWS OpsWorks to create stacks in any AWS Region.

    Chef Versions

    When you call CreateStack, CloneStack, or UpdateStack we recommend you use the ConfigurationManager parameter to specify the Chef version. The recommended value for Linux stacks is currently 12 (the default is 11.4). Windows stacks use Chef 12.2. For more information, see Chef Versions.

    You can specify Chef 12, 11.10, or 11.4 for your Linux stack. We recommend migrating your existing Linux stacks to Chef 12 as soon as possible.", + "operations": { + "AssignInstance": "

    Assign a registered instance to a layer.

    • You can assign registered on-premises instances to any layer type.
    • You can assign registered Amazon EC2 instances only to custom layers.
    • You cannot use this action with instances that were created with AWS OpsWorks.

    Required Permissions: To use this action, an AWS Identity and Access Management (IAM) user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "AssignVolume": "

    Assigns one of the stack's registered Amazon EBS volumes to a specified instance. The volume must first be registered with the stack by calling RegisterVolume. After you register the volume, you must call UpdateVolume to specify a mount point before calling AssignVolume. For more information, see Resource Management.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "AssociateElasticIp": "

    Associates one of the stack's registered Elastic IP addresses with a specified instance. The address must first be registered with the stack by calling RegisterElasticIp. For more information, see Resource Management.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "AttachElasticLoadBalancer": "

    Attaches an Elastic Load Balancing load balancer to a specified layer. For more information, see Elastic Load Balancing.

    You must create the Elastic Load Balancing instance separately, by using the Elastic Load Balancing console, API, or CLI. For more information, see Elastic Load Balancing Developer Guide.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "CloneStack": "

    Creates a clone of a specified stack. For more information, see Clone a Stack. By default, all parameters are set to the values used by the parent stack.

    Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "CreateApp": "

    Creates an app for a specified stack. For more information, see Creating Apps.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "CreateDeployment": "

    Runs deployment or stack commands. For more information, see Deploying Apps and Run Stack Commands.

    Required Permissions: To use this action, an IAM user must have a Deploy or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "CreateInstance": "

    Creates an instance in a specified stack. For more information, see Adding an Instance to a Layer.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "CreateLayer": "

    Creates a layer. For more information, see How to Create a Layer.

    You should use CreateLayer for noncustom layer types such as PHP App Server only if the stack does not have an existing layer of that type. A stack can have at most one instance of each noncustom layer; if you attempt to create a second instance, CreateLayer fails. A stack can have an arbitrary number of custom layers, so you can call CreateLayer as many times as you like for that layer type.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "CreateStack": "

    Creates a new stack. For more information, see Create a New Stack.

    Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "CreateUserProfile": "

    Creates a new user profile.

    Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DeleteApp": "

    Deletes a specified app.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DeleteInstance": "

    Deletes a specified instance, which terminates the associated Amazon EC2 instance. You must stop an instance before you can delete it.

    For more information, see Deleting Instances.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DeleteLayer": "

    Deletes a specified layer. You must first stop and then delete all associated instances or unassign registered instances. For more information, see How to Delete a Layer.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DeleteStack": "

    Deletes a specified stack. You must first delete all instances, layers, and apps or deregister registered instances. For more information, see Shut Down a Stack.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DeleteUserProfile": "

    Deletes a user profile.

    Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DeregisterEcsCluster": "

    Deregisters a specified Amazon ECS cluster from a stack. For more information, see Resource Management.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see .

    ", + "DeregisterElasticIp": "

    Deregisters a specified Elastic IP address. The address can then be registered by another stack. For more information, see Resource Management.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DeregisterInstance": "

    Deregister a registered Amazon EC2 or on-premises instance. This action removes the instance from the stack and returns it to your control. This action can not be used with instances that were created with AWS OpsWorks.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DeregisterRdsDbInstance": "

    Deregisters an Amazon RDS instance.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DeregisterVolume": "

    Deregisters an Amazon EBS volume. The volume can then be registered by another stack. For more information, see Resource Management.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeAgentVersions": "

    Describes the available AWS OpsWorks agent versions. You must specify a stack ID or a configuration manager. DescribeAgentVersions returns a list of available agent versions for the specified stack or configuration manager.

    ", + "DescribeApps": "

    Requests a description of a specified set of apps.

    You must specify at least one of the parameters.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeCommands": "

    Describes the results of specified commands.

    You must specify at least one of the parameters.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeDeployments": "

    Requests a description of a specified set of deployments.

    You must specify at least one of the parameters.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeEcsClusters": "

    Describes Amazon ECS clusters that are registered with a stack. If you specify only a stack ID, you can use the MaxResults and NextToken parameters to paginate the response. However, AWS OpsWorks currently supports only one cluster per layer, so the result set has a maximum of one element.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack or an attached policy that explicitly grants permission. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeElasticIps": "

    Describes Elastic IP addresses.

    You must specify at least one of the parameters.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeElasticLoadBalancers": "

    Describes a stack's Elastic Load Balancing instances.

    You must specify at least one of the parameters.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeInstances": "

    Requests a description of a set of instances.

    You must specify at least one of the parameters.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeLayers": "

    Requests a description of one or more layers in a specified stack.

    You must specify at least one of the parameters.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeLoadBasedAutoScaling": "

    Describes load-based auto scaling configurations for specified layers.

    You must specify at least one of the parameters.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeMyUserProfile": "

    Describes a user's SSH information.

    Required Permissions: To use this action, an IAM user must have self-management enabled or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribePermissions": "

    Describes the permissions for a specified stack.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeRaidArrays": "

    Describe an instance's RAID arrays.

    You must specify at least one of the parameters.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeRdsDbInstances": "

    Describes Amazon RDS instances.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeServiceErrors": "

    Describes AWS OpsWorks service errors.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeStackProvisioningParameters": "

    Requests a description of a stack's provisioning parameters.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeStackSummary": "

    Describes the number of layers and apps in a specified stack, and the number of instances in each state, such as running_setup or online.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeStacks": "

    Requests a description of one or more stacks.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeTimeBasedAutoScaling": "

    Describes time-based auto scaling configurations for specified instances.

    You must specify at least one of the parameters.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeUserProfiles": "

    Describe specified users.

    Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DescribeVolumes": "

    Describes an instance's Amazon EBS volumes.

    You must specify at least one of the parameters.

    Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DetachElasticLoadBalancer": "

    Detaches a specified Elastic Load Balancing instance from its layer.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "DisassociateElasticIp": "

    Disassociates an Elastic IP address from its instance. The address remains registered with the stack. For more information, see Resource Management.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "GetHostnameSuggestion": "

    Gets a generated host name for the specified layer, based on the current host name theme.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "GrantAccess": "This action can be used only with Windows stacks.

    Grants RDP access to a Windows instance for a specified time period.

    ", + "RebootInstance": "

    Reboots a specified instance. For more information, see Starting, Stopping, and Rebooting Instances.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "RegisterEcsCluster": "

    Registers a specified Amazon ECS cluster with a stack. You can register only one cluster with a stack. A cluster can be registered with only one stack. For more information, see Resource Management.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "RegisterElasticIp": "

    Registers an Elastic IP address with a specified stack. An address can be registered with only one stack at a time. If the address is already registered, you must first deregister it by calling DeregisterElasticIp. For more information, see Resource Management.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "RegisterInstance": "

    Registers instances with a specified stack that were created outside of AWS OpsWorks.

    We do not recommend using this action to register instances. The complete registration operation has two primary steps, installing the AWS OpsWorks agent on the instance and registering the instance with the stack. RegisterInstance handles only the second step. You should instead use the AWS CLI register command, which performs the entire registration operation. For more information, see Registering an Instance with an AWS OpsWorks Stack.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "RegisterRdsDbInstance": "

    Registers an Amazon RDS instance with a stack.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "RegisterVolume": "

    Registers an Amazon EBS volume with a specified stack. A volume can be registered with only one stack at a time. If the volume is already registered, you must first deregister it by calling DeregisterVolume. For more information, see Resource Management.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "SetLoadBasedAutoScaling": "

    Specify the load-based auto scaling configuration for a specified layer. For more information, see Managing Load with Time-based and Load-based Instances.

    To use load-based auto scaling, you must create a set of load-based auto scaling instances. Load-based auto scaling operates only on the instances from that set, so you must ensure that you have created enough instances to handle the maximum anticipated load.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "SetPermission": "

    Specifies a user's permissions. For more information, see Security and Permissions.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "SetTimeBasedAutoScaling": "

    Specify the time-based auto scaling configuration for a specified instance. For more information, see Managing Load with Time-based and Load-based Instances.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "StartInstance": "

    Starts a specified instance. For more information, see Starting, Stopping, and Rebooting Instances.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "StartStack": "

    Starts a stack's instances.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "StopInstance": "

    Stops a specified instance. When you stop a standard instance, the data disappears and must be reinstalled when you restart the instance. You can stop an Amazon EBS-backed instance without losing data. For more information, see Starting, Stopping, and Rebooting Instances.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "StopStack": "

    Stops a specified stack.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "UnassignInstance": "

    Unassigns a registered instance from all of it's layers. The instance remains in the stack as an unassigned instance and can be assigned to another layer, as needed. You cannot use this action with instances that were created with AWS OpsWorks.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "UnassignVolume": "

    Unassigns an assigned Amazon EBS volume. The volume remains registered with the stack. For more information, see Resource Management.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "UpdateApp": "

    Updates a specified app.

    Required Permissions: To use this action, an IAM user must have a Deploy or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "UpdateElasticIp": "

    Updates a registered Elastic IP address's name. For more information, see Resource Management.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "UpdateInstance": "

    Updates a specified instance.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "UpdateLayer": "

    Updates a specified layer.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "UpdateMyUserProfile": "

    Updates a user's SSH public key.

    Required Permissions: To use this action, an IAM user must have self-management enabled or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "UpdateRdsDbInstance": "

    Updates an Amazon RDS instance.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "UpdateStack": "

    Updates a specified stack.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "UpdateUserProfile": "

    Updates a specified user profile.

    Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    ", + "UpdateVolume": "

    Updates an Amazon EBS volume's name or mount point. For more information, see Resource Management.

    Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

    " + }, + "shapes": { + "AgentVersion": { + "base": "

    Describes an agent version.

    ", + "refs": { + "AgentVersions$member": null + } + }, + "AgentVersions": { + "base": null, + "refs": { + "DescribeAgentVersionsResult$AgentVersions": "

    The agent versions for the specified stack or configuration manager. Note that this value is the complete version number, not the abbreviated number used by the console.

    " + } + }, + "App": { + "base": "

    A description of the app.

    ", + "refs": { + "Apps$member": null + } + }, + "AppAttributes": { + "base": null, + "refs": { + "App$Attributes": "

    The stack attributes.

    ", + "CreateAppRequest$Attributes": "

    One or more user-defined key/value pairs to be added to the stack attributes.

    ", + "UpdateAppRequest$Attributes": "

    One or more user-defined key/value pairs to be added to the stack attributes.

    " + } + }, + "AppAttributesKeys": { + "base": null, + "refs": { + "AppAttributes$key": null + } + }, + "AppType": { + "base": null, + "refs": { + "App$Type": "

    The app type.

    ", + "CreateAppRequest$Type": "

    The app type. Each supported type is associated with a particular layer. For example, PHP applications are associated with a PHP layer. AWS OpsWorks deploys an application to those instances that are members of the corresponding layer. If your app isn't one of the standard types, or you prefer to implement your own Deploy recipes, specify other.

    ", + "UpdateAppRequest$Type": "

    The app type.

    " + } + }, + "Apps": { + "base": null, + "refs": { + "DescribeAppsResult$Apps": "

    An array of App objects that describe the specified apps.

    " + } + }, + "Architecture": { + "base": null, + "refs": { + "CreateInstanceRequest$Architecture": "

    The instance architecture. The default option is x86_64. Instance types do not necessarily support both architectures. For a list of the architectures that are supported by the different instance types, see Instance Families and Types.

    ", + "Instance$Architecture": "

    The instance architecture: \"i386\" or \"x86_64\".

    ", + "UpdateInstanceRequest$Architecture": "

    The instance architecture. Instance types do not necessarily support both architectures. For a list of the architectures that are supported by the different instance types, see Instance Families and Types.

    " + } + }, + "AssignInstanceRequest": { + "base": null, + "refs": { + } + }, + "AssignVolumeRequest": { + "base": null, + "refs": { + } + }, + "AssociateElasticIpRequest": { + "base": null, + "refs": { + } + }, + "AttachElasticLoadBalancerRequest": { + "base": null, + "refs": { + } + }, + "AutoScalingThresholds": { + "base": "

    Describes a load-based auto scaling upscaling or downscaling threshold configuration, which specifies when AWS OpsWorks starts or stops load-based instances.

    ", + "refs": { + "LoadBasedAutoScalingConfiguration$UpScaling": "

    An AutoScalingThresholds object that describes the upscaling configuration, which defines how and when AWS OpsWorks increases the number of instances.

    ", + "LoadBasedAutoScalingConfiguration$DownScaling": "

    An AutoScalingThresholds object that describes the downscaling configuration, which defines how and when AWS OpsWorks reduces the number of instances.

    ", + "SetLoadBasedAutoScalingRequest$UpScaling": "

    An AutoScalingThresholds object with the upscaling threshold configuration. If the load exceeds these thresholds for a specified amount of time, AWS OpsWorks starts a specified number of instances.

    ", + "SetLoadBasedAutoScalingRequest$DownScaling": "

    An AutoScalingThresholds object with the downscaling threshold configuration. If the load falls below these thresholds for a specified amount of time, AWS OpsWorks stops a specified number of instances.

    " + } + }, + "AutoScalingType": { + "base": null, + "refs": { + "CreateInstanceRequest$AutoScalingType": "

    For load-based or time-based instances, the type. Windows stacks can use only time-based instances.

    ", + "Instance$AutoScalingType": "

    For load-based or time-based instances, the type.

    ", + "UpdateInstanceRequest$AutoScalingType": "

    For load-based or time-based instances, the type. Windows stacks can use only time-based instances.

    " + } + }, + "BlockDeviceMapping": { + "base": "

    Describes a block device mapping. This data type maps directly to the Amazon EC2 BlockDeviceMapping data type.

    ", + "refs": { + "BlockDeviceMappings$member": null + } + }, + "BlockDeviceMappings": { + "base": null, + "refs": { + "CreateInstanceRequest$BlockDeviceMappings": "

    An array of BlockDeviceMapping objects that specify the instance's block devices. For more information, see Block Device Mapping. Note that block device mappings are not supported for custom AMIs.

    ", + "Instance$BlockDeviceMappings": "

    An array of BlockDeviceMapping objects that specify the instance's block device mappings.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "App$EnableSsl": "

    Whether to enable SSL for the app.

    ", + "ChefConfiguration$ManageBerkshelf": "

    Whether to enable Berkshelf.

    ", + "CloneStackRequest$UseCustomCookbooks": "

    Whether to use custom cookbooks.

    ", + "CloneStackRequest$UseOpsworksSecurityGroups": "

    Whether to associate the AWS OpsWorks built-in security groups with the stack's layers.

    AWS OpsWorks provides a standard set of built-in security groups, one for each layer, which are associated with layers by default. With UseOpsworksSecurityGroups you can instead provide your own custom security groups. UseOpsworksSecurityGroups has the following settings:

    • True - AWS OpsWorks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it but you cannot delete the built-in security group.
    • False - AWS OpsWorks does not associate built-in security groups with layers. You must create appropriate Amazon Elastic Compute Cloud (Amazon EC2) security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on creation; custom security groups are required only for those layers that need custom settings.

    For more information, see Create a New Stack.

    ", + "CloneStackRequest$ClonePermissions": "

    Whether to clone the source stack's permissions.

    ", + "CreateAppRequest$EnableSsl": "

    Whether to enable SSL for the app.

    ", + "CreateInstanceRequest$InstallUpdatesOnBoot": "

    Whether to install operating system and package updates when the instance boots. The default value is true. To control when updates are installed, set this value to false. You must then update your instances manually by using CreateDeployment to run the update_dependencies stack command or by manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances.

    We strongly recommend using the default value of true to ensure that your instances have the latest security updates.

    ", + "CreateInstanceRequest$EbsOptimized": "

    Whether to create an Amazon EBS-optimized instance.

    ", + "CreateLayerRequest$EnableAutoHealing": "

    Whether to disable auto healing for the layer.

    ", + "CreateLayerRequest$AutoAssignElasticIps": "

    Whether to automatically assign an Elastic IP address to the layer's instances. For more information, see How to Edit a Layer.

    ", + "CreateLayerRequest$AutoAssignPublicIps": "

    For stacks that are running in a VPC, whether to automatically assign a public IP address to the layer's instances. For more information, see How to Edit a Layer.

    ", + "CreateLayerRequest$InstallUpdatesOnBoot": "

    Whether to install operating system and package updates when the instance boots. The default value is true. To control when updates are installed, set this value to false. You must then update your instances manually by using CreateDeployment to run the update_dependencies stack command or by manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances.

    To ensure that your instances have the latest security updates, we strongly recommend using the default value of true.

    ", + "CreateLayerRequest$UseEbsOptimizedInstances": "

    Whether to use Amazon EBS-optimized instances.

    ", + "CreateStackRequest$UseCustomCookbooks": "

    Whether the stack uses custom cookbooks.

    ", + "CreateStackRequest$UseOpsworksSecurityGroups": "

    Whether to associate the AWS OpsWorks built-in security groups with the stack's layers.

    AWS OpsWorks provides a standard set of built-in security groups, one for each layer, which are associated with layers by default. With UseOpsworksSecurityGroups you can instead provide your own custom security groups. UseOpsworksSecurityGroups has the following settings:

    • True - AWS OpsWorks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it, but you cannot delete the built-in security group.
    • False - AWS OpsWorks does not associate built-in security groups with layers. You must create appropriate EC2 security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on creation; custom security groups are required only for those layers that need custom settings.

    For more information, see Create a New Stack.

    ", + "CreateUserProfileRequest$AllowSelfManagement": "

    Whether users can specify their own SSH public key through the My Settings page. For more information, see Setting an IAM User's Public SSH Key.

    ", + "DeleteInstanceRequest$DeleteElasticIp": "

    Whether to delete the instance Elastic IP address.

    ", + "DeleteInstanceRequest$DeleteVolumes": "

    Whether to delete the instance's Amazon EBS volumes.

    ", + "EbsBlockDevice$DeleteOnTermination": "

    Whether the volume is deleted on instance termination.

    ", + "EnvironmentVariable$Secure": "

    (Optional) Whether the variable's value will be returned by the DescribeApps action. To conceal an environment variable's value, set Secure to true. DescribeApps then returns *****FILTERED***** instead of the actual value. The default value for Secure is false.

    ", + "Instance$EbsOptimized": "

    Whether this is an Amazon EBS-optimized instance.

    ", + "Instance$InstallUpdatesOnBoot": "

    Whether to install operating system and package updates when the instance boots. The default value is true. If this value is set to false, you must then update your instances manually by using CreateDeployment to run the update_dependencies stack command or by manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances.

    We strongly recommend using the default value of true, to ensure that your instances have the latest security updates.

    ", + "Layer$EnableAutoHealing": "

    Whether auto healing is disabled for the layer.

    ", + "Layer$AutoAssignElasticIps": "

    Whether to automatically assign an Elastic IP address to the layer's instances. For more information, see How to Edit a Layer.

    ", + "Layer$AutoAssignPublicIps": "

    For stacks that are running in a VPC, whether to automatically assign a public IP address to the layer's instances. For more information, see How to Edit a Layer.

    ", + "Layer$InstallUpdatesOnBoot": "

    Whether to install operating system and package updates when the instance boots. The default value is true. If this value is set to false, you must then update your instances manually by using CreateDeployment to run the update_dependencies stack command or manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances.

    We strongly recommend using the default value of true, to ensure that your instances have the latest security updates.

    ", + "Layer$UseEbsOptimizedInstances": "

    Whether the layer uses Amazon EBS-optimized instances.

    ", + "LoadBasedAutoScalingConfiguration$Enable": "

    Whether load-based auto scaling is enabled for the layer.

    ", + "Permission$AllowSsh": "

    Whether the user can use SSH.

    ", + "Permission$AllowSudo": "

    Whether the user can use sudo.

    ", + "RdsDbInstance$MissingOnRds": "

    Set to true if AWS OpsWorks was unable to discover the Amazon RDS instance. AWS OpsWorks attempts to discover the instance only once. If this value is set to true, you must deregister the instance and then register it again.

    ", + "SetLoadBasedAutoScalingRequest$Enable": "

    Enables load-based auto scaling for the layer.

    ", + "SetPermissionRequest$AllowSsh": "

    The user is allowed to use SSH to communicate with the instance.

    ", + "SetPermissionRequest$AllowSudo": "

    The user is allowed to use sudo to elevate privileges.

    ", + "ShutdownEventConfiguration$DelayUntilElbConnectionsDrained": "

    Whether to enable Elastic Load Balancing connection draining. For more information, see Connection Draining

    ", + "Stack$UseCustomCookbooks": "

    Whether the stack uses custom cookbooks.

    ", + "Stack$UseOpsworksSecurityGroups": "

    Whether the stack automatically associates the AWS OpsWorks built-in security groups with the stack's layers.

    ", + "UpdateAppRequest$EnableSsl": "

    Whether SSL is enabled for the app.

    ", + "UpdateInstanceRequest$InstallUpdatesOnBoot": "

    Whether to install operating system and package updates when the instance boots. The default value is true. To control when updates are installed, set this value to false. You must then update your instances manually by using CreateDeployment to run the update_dependencies stack command or by manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances.

    We strongly recommend using the default value of true, to ensure that your instances have the latest security updates.

    ", + "UpdateInstanceRequest$EbsOptimized": "

    This property cannot be updated.

    ", + "UpdateLayerRequest$EnableAutoHealing": "

    Whether to disable auto healing for the layer.

    ", + "UpdateLayerRequest$AutoAssignElasticIps": "

    Whether to automatically assign an Elastic IP address to the layer's instances. For more information, see How to Edit a Layer.

    ", + "UpdateLayerRequest$AutoAssignPublicIps": "

    For stacks that are running in a VPC, whether to automatically assign a public IP address to the layer's instances. For more information, see How to Edit a Layer.

    ", + "UpdateLayerRequest$InstallUpdatesOnBoot": "

    Whether to install operating system and package updates when the instance boots. The default value is true. To control when updates are installed, set this value to false. You must then update your instances manually by using CreateDeployment to run the update_dependencies stack command or manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances.

    We strongly recommend using the default value of true, to ensure that your instances have the latest security updates.

    ", + "UpdateLayerRequest$UseEbsOptimizedInstances": "

    Whether to use Amazon EBS-optimized instances.

    ", + "UpdateStackRequest$UseCustomCookbooks": "

    Whether the stack uses custom cookbooks.

    ", + "UpdateStackRequest$UseOpsworksSecurityGroups": "

    Whether to associate the AWS OpsWorks built-in security groups with the stack's layers.

    AWS OpsWorks provides a standard set of built-in security groups, one for each layer, which are associated with layers by default. UseOpsworksSecurityGroups allows you to provide your own custom security groups instead of using the built-in groups. UseOpsworksSecurityGroups has the following settings:

    • True - AWS OpsWorks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it, but you cannot delete the built-in security group.
    • False - AWS OpsWorks does not associate built-in security groups with layers. You must create appropriate EC2 security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on. Custom security groups are required only for those layers that need custom settings.

    For more information, see Create a New Stack.

    ", + "UpdateUserProfileRequest$AllowSelfManagement": "

    Whether users can specify their own SSH public key through the My Settings page. For more information, see Managing User Permissions.

    ", + "UserProfile$AllowSelfManagement": "

    Whether users can specify their own SSH public key through the My Settings page. For more information, see Managing User Permissions.

    " + } + }, + "ChefConfiguration": { + "base": "

    Describes the Chef configuration.

    ", + "refs": { + "CloneStackRequest$ChefConfiguration": "

    A ChefConfiguration object that specifies whether to enable Berkshelf and the Berkshelf version on Chef 11.10 stacks. For more information, see Create a New Stack.

    ", + "CreateStackRequest$ChefConfiguration": "

    A ChefConfiguration object that specifies whether to enable Berkshelf and the Berkshelf version on Chef 11.10 stacks. For more information, see Create a New Stack.

    ", + "Stack$ChefConfiguration": "

    A ChefConfiguration object that specifies whether to enable Berkshelf and the Berkshelf version. For more information, see Create a New Stack.

    ", + "UpdateStackRequest$ChefConfiguration": "

    A ChefConfiguration object that specifies whether to enable Berkshelf and the Berkshelf version on Chef 11.10 stacks. For more information, see Create a New Stack.

    " + } + }, + "CloneStackRequest": { + "base": null, + "refs": { + } + }, + "CloneStackResult": { + "base": "

    Contains the response to a CloneStack request.

    ", + "refs": { + } + }, + "Command": { + "base": "

    Describes a command.

    ", + "refs": { + "Commands$member": null + } + }, + "Commands": { + "base": null, + "refs": { + "DescribeCommandsResult$Commands": "

    An array of Command objects that describe each of the specified commands.

    " + } + }, + "CreateAppRequest": { + "base": null, + "refs": { + } + }, + "CreateAppResult": { + "base": "

    Contains the response to a CreateApp request.

    ", + "refs": { + } + }, + "CreateDeploymentRequest": { + "base": null, + "refs": { + } + }, + "CreateDeploymentResult": { + "base": "

    Contains the response to a CreateDeployment request.

    ", + "refs": { + } + }, + "CreateInstanceRequest": { + "base": null, + "refs": { + } + }, + "CreateInstanceResult": { + "base": "

    Contains the response to a CreateInstance request.

    ", + "refs": { + } + }, + "CreateLayerRequest": { + "base": null, + "refs": { + } + }, + "CreateLayerResult": { + "base": "

    Contains the response to a CreateLayer request.

    ", + "refs": { + } + }, + "CreateStackRequest": { + "base": null, + "refs": { + } + }, + "CreateStackResult": { + "base": "

    Contains the response to a CreateStack request.

    ", + "refs": { + } + }, + "CreateUserProfileRequest": { + "base": null, + "refs": { + } + }, + "CreateUserProfileResult": { + "base": "

    Contains the response to a CreateUserProfile request.

    ", + "refs": { + } + }, + "DailyAutoScalingSchedule": { + "base": null, + "refs": { + "WeeklyAutoScalingSchedule$Monday": "

    The schedule for Monday.

    ", + "WeeklyAutoScalingSchedule$Tuesday": "

    The schedule for Tuesday.

    ", + "WeeklyAutoScalingSchedule$Wednesday": "

    The schedule for Wednesday.

    ", + "WeeklyAutoScalingSchedule$Thursday": "

    The schedule for Thursday.

    ", + "WeeklyAutoScalingSchedule$Friday": "

    The schedule for Friday.

    ", + "WeeklyAutoScalingSchedule$Saturday": "

    The schedule for Saturday.

    ", + "WeeklyAutoScalingSchedule$Sunday": "

    The schedule for Sunday.

    " + } + }, + "DataSource": { + "base": "

    Describes an app's data source.

    ", + "refs": { + "DataSources$member": null + } + }, + "DataSources": { + "base": null, + "refs": { + "App$DataSources": "

    The app's data sources.

    ", + "CreateAppRequest$DataSources": "

    The app's data source.

    ", + "UpdateAppRequest$DataSources": "

    The app's data sources.

    " + } + }, + "DateTime": { + "base": null, + "refs": { + "Command$CreatedAt": "

    Date and time when the command was run.

    ", + "Command$AcknowledgedAt": "

    Date and time when the command was acknowledged.

    ", + "Command$CompletedAt": "

    Date when the command completed.

    ", + "Deployment$CreatedAt": "

    Date when the deployment was created.

    ", + "Deployment$CompletedAt": "

    Date when the deployment completed.

    ", + "EcsCluster$RegisteredAt": "

    The time and date that the cluster was registered with the stack.

    ", + "Instance$CreatedAt": "

    The time that the instance was created.

    ", + "Layer$CreatedAt": "

    Date when the layer was created.

    ", + "RaidArray$CreatedAt": "

    When the RAID array was created.

    ", + "ServiceError$CreatedAt": "

    When the error occurred.

    ", + "Stack$CreatedAt": "

    The date when the stack was created.

    " + } + }, + "DeleteAppRequest": { + "base": null, + "refs": { + } + }, + "DeleteInstanceRequest": { + "base": null, + "refs": { + } + }, + "DeleteLayerRequest": { + "base": null, + "refs": { + } + }, + "DeleteStackRequest": { + "base": null, + "refs": { + } + }, + "DeleteUserProfileRequest": { + "base": null, + "refs": { + } + }, + "Deployment": { + "base": "

    Describes a deployment of a stack or app.

    ", + "refs": { + "Deployments$member": null + } + }, + "DeploymentCommand": { + "base": "

    Used to specify a stack or deployment command.

    ", + "refs": { + "CreateDeploymentRequest$Command": "

    A DeploymentCommand object that specifies the deployment command and any associated arguments.

    ", + "Deployment$Command": null + } + }, + "DeploymentCommandArgs": { + "base": null, + "refs": { + "DeploymentCommand$Args": "

    The arguments of those commands that take arguments. It should be set to a JSON object with the following format:

    {\"arg_name1\" : [\"value1\", \"value2\", ...], \"arg_name2\" : [\"value1\", \"value2\", ...], ...}

    The update_dependencies command takes two arguments:

    • upgrade_os_to - Specifies the desired Amazon Linux version for instances whose OS you want to upgrade, such as Amazon Linux 2014.09. You must also set the allow_reboot argument to true.
    • allow_reboot - Specifies whether to allow AWS OpsWorks to reboot the instances if necessary, after installing the updates. This argument can be set to either true or false. The default value is false.

    For example, to upgrade an instance to Amazon Linux 2014.09, set Args to the following.

    { \"upgrade_os_to\":[\"Amazon Linux 2014.09\"], \"allow_reboot\":[\"true\"] } " + } + }, + "DeploymentCommandName": { + "base": null, + "refs": { + "DeploymentCommand$Name": "

    Specifies the operation. You can specify only one command.

    For stacks, the following commands are available:

    • execute_recipes: Execute one or more recipes. To specify the recipes, set an Args parameter named recipes to the list of recipes to be executed. For example, to execute phpapp::appsetup, set Args to {\"recipes\":[\"phpapp::appsetup\"]}.
    • install_dependencies: Install the stack's dependencies.
    • update_custom_cookbooks: Update the stack's custom cookbooks.
    • update_dependencies: Update the stack's dependencies.
    The update_dependencies and install_dependencies commands are supported only for Linux instances. You can run the commands successfully on Windows instances, but they do nothing.

    For apps, the following commands are available:

    • deploy: Deploy an app. Ruby on Rails apps have an optional Args parameter named migrate. Set Args to {\"migrate\":[\"true\"]} to migrate the database. The default setting is {\"migrate\":[\"false\"]}.
    • rollback Roll the app back to the previous version. When you update an app, AWS OpsWorks stores the previous version, up to a maximum of five versions. You can use this command to roll an app back as many as four versions.
    • start: Start the app's web or application server.
    • stop: Stop the app's web or application server.
    • restart: Restart the app's web or application server.
    • undeploy: Undeploy the app.
    " + } + }, + "Deployments": { + "base": null, + "refs": { + "DescribeDeploymentsResult$Deployments": "

    An array of Deployment objects that describe the deployments.

    " + } + }, + "DeregisterEcsClusterRequest": { + "base": null, + "refs": { + } + }, + "DeregisterElasticIpRequest": { + "base": null, + "refs": { + } + }, + "DeregisterInstanceRequest": { + "base": null, + "refs": { + } + }, + "DeregisterRdsDbInstanceRequest": { + "base": null, + "refs": { + } + }, + "DeregisterVolumeRequest": { + "base": null, + "refs": { + } + }, + "DescribeAgentVersionsRequest": { + "base": null, + "refs": { + } + }, + "DescribeAgentVersionsResult": { + "base": "

    Contains the response to a DescribeAgentVersions request.

    ", + "refs": { + } + }, + "DescribeAppsRequest": { + "base": null, + "refs": { + } + }, + "DescribeAppsResult": { + "base": "

    Contains the response to a DescribeApps request.

    ", + "refs": { + } + }, + "DescribeCommandsRequest": { + "base": null, + "refs": { + } + }, + "DescribeCommandsResult": { + "base": "

    Contains the response to a DescribeCommands request.

    ", + "refs": { + } + }, + "DescribeDeploymentsRequest": { + "base": null, + "refs": { + } + }, + "DescribeDeploymentsResult": { + "base": "

    Contains the response to a DescribeDeployments request.

    ", + "refs": { + } + }, + "DescribeEcsClustersRequest": { + "base": null, + "refs": { + } + }, + "DescribeEcsClustersResult": { + "base": "

    Contains the response to a DescribeEcsClusters request.

    ", + "refs": { + } + }, + "DescribeElasticIpsRequest": { + "base": null, + "refs": { + } + }, + "DescribeElasticIpsResult": { + "base": "

    Contains the response to a DescribeElasticIps request.

    ", + "refs": { + } + }, + "DescribeElasticLoadBalancersRequest": { + "base": null, + "refs": { + } + }, + "DescribeElasticLoadBalancersResult": { + "base": "

    Contains the response to a DescribeElasticLoadBalancers request.

    ", + "refs": { + } + }, + "DescribeInstancesRequest": { + "base": null, + "refs": { + } + }, + "DescribeInstancesResult": { + "base": "

    Contains the response to a DescribeInstances request.

    ", + "refs": { + } + }, + "DescribeLayersRequest": { + "base": null, + "refs": { + } + }, + "DescribeLayersResult": { + "base": "

    Contains the response to a DescribeLayers request.

    ", + "refs": { + } + }, + "DescribeLoadBasedAutoScalingRequest": { + "base": null, + "refs": { + } + }, + "DescribeLoadBasedAutoScalingResult": { + "base": "

    Contains the response to a DescribeLoadBasedAutoScaling request.

    ", + "refs": { + } + }, + "DescribeMyUserProfileResult": { + "base": "

    Contains the response to a DescribeMyUserProfile request.

    ", + "refs": { + } + }, + "DescribePermissionsRequest": { + "base": null, + "refs": { + } + }, + "DescribePermissionsResult": { + "base": "

    Contains the response to a DescribePermissions request.

    ", + "refs": { + } + }, + "DescribeRaidArraysRequest": { + "base": null, + "refs": { + } + }, + "DescribeRaidArraysResult": { + "base": "

    Contains the response to a DescribeRaidArrays request.

    ", + "refs": { + } + }, + "DescribeRdsDbInstancesRequest": { + "base": null, + "refs": { + } + }, + "DescribeRdsDbInstancesResult": { + "base": "

    Contains the response to a DescribeRdsDbInstances request.

    ", + "refs": { + } + }, + "DescribeServiceErrorsRequest": { + "base": null, + "refs": { + } + }, + "DescribeServiceErrorsResult": { + "base": "

    Contains the response to a DescribeServiceErrors request.

    ", + "refs": { + } + }, + "DescribeStackProvisioningParametersRequest": { + "base": null, + "refs": { + } + }, + "DescribeStackProvisioningParametersResult": { + "base": "

    Contains the response to a DescribeStackProvisioningParameters request.

    ", + "refs": { + } + }, + "DescribeStackSummaryRequest": { + "base": null, + "refs": { + } + }, + "DescribeStackSummaryResult": { + "base": "

    Contains the response to a DescribeStackSummary request.

    ", + "refs": { + } + }, + "DescribeStacksRequest": { + "base": null, + "refs": { + } + }, + "DescribeStacksResult": { + "base": "

    Contains the response to a DescribeStacks request.

    ", + "refs": { + } + }, + "DescribeTimeBasedAutoScalingRequest": { + "base": null, + "refs": { + } + }, + "DescribeTimeBasedAutoScalingResult": { + "base": "

    Contains the response to a DescribeTimeBasedAutoScaling request.

    ", + "refs": { + } + }, + "DescribeUserProfilesRequest": { + "base": null, + "refs": { + } + }, + "DescribeUserProfilesResult": { + "base": "

    Contains the response to a DescribeUserProfiles request.

    ", + "refs": { + } + }, + "DescribeVolumesRequest": { + "base": null, + "refs": { + } + }, + "DescribeVolumesResult": { + "base": "

    Contains the response to a DescribeVolumes request.

    ", + "refs": { + } + }, + "DetachElasticLoadBalancerRequest": { + "base": null, + "refs": { + } + }, + "DisassociateElasticIpRequest": { + "base": null, + "refs": { + } + }, + "Double": { + "base": null, + "refs": { + "AutoScalingThresholds$CpuThreshold": "

    The CPU utilization threshold, as a percent of the available CPU. A value of -1 disables the threshold.

    ", + "AutoScalingThresholds$MemoryThreshold": "

    The memory utilization threshold, as a percent of the available memory. A value of -1 disables the threshold.

    ", + "AutoScalingThresholds$LoadThreshold": "

    The load threshold. A value of -1 disables the threshold. For more information about how load is computed, see Load (computing).

    " + } + }, + "EbsBlockDevice": { + "base": "

    Describes an Amazon EBS volume. This data type maps directly to the Amazon EC2 EbsBlockDevice data type.

    ", + "refs": { + "BlockDeviceMapping$Ebs": "

    An EBSBlockDevice that defines how to configure an Amazon EBS volume when the instance is launched.

    " + } + }, + "EcsCluster": { + "base": "

    Describes a registered Amazon ECS cluster.

    ", + "refs": { + "EcsClusters$member": null + } + }, + "EcsClusters": { + "base": null, + "refs": { + "DescribeEcsClustersResult$EcsClusters": "

    A list of EcsCluster objects containing the cluster descriptions.

    " + } + }, + "ElasticIp": { + "base": "

    Describes an Elastic IP address.

    ", + "refs": { + "ElasticIps$member": null + } + }, + "ElasticIps": { + "base": null, + "refs": { + "DescribeElasticIpsResult$ElasticIps": "

    An ElasticIps object that describes the specified Elastic IP addresses.

    " + } + }, + "ElasticLoadBalancer": { + "base": "

    Describes an Elastic Load Balancing instance.

    ", + "refs": { + "ElasticLoadBalancers$member": null + } + }, + "ElasticLoadBalancers": { + "base": null, + "refs": { + "DescribeElasticLoadBalancersResult$ElasticLoadBalancers": "

    A list of ElasticLoadBalancer objects that describe the specified Elastic Load Balancing instances.

    " + } + }, + "EnvironmentVariable": { + "base": "

    Represents an app's environment variable.

    ", + "refs": { + "EnvironmentVariables$member": null + } + }, + "EnvironmentVariables": { + "base": null, + "refs": { + "App$Environment": "

    An array of EnvironmentVariable objects that specify environment variables to be associated with the app. After you deploy the app, these variables are defined on the associated app server instances. For more information, see Environment Variables.

    There is no specific limit on the number of environment variables. However, the size of the associated data structure - which includes the variables' names, values, and protected flag values - cannot exceed 10 KB (10240 Bytes). This limit should accommodate most if not all use cases, but if you do exceed it, you will cause an exception (API) with an \"Environment: is too large (maximum is 10KB)\" message. ", + "CreateAppRequest$Environment": "

    An array of EnvironmentVariable objects that specify environment variables to be associated with the app. After you deploy the app, these variables are defined on the associated app server instance. For more information, see Environment Variables.

    There is no specific limit on the number of environment variables. However, the size of the associated data structure - which includes the variables' names, values, and protected flag values - cannot exceed 10 KB (10240 Bytes). This limit should accommodate most if not all use cases. Exceeding it will cause an exception with the message, \"Environment: is too large (maximum is 10KB).\"

    This parameter is supported only by Chef 11.10 stacks. If you have specified one or more environment variables, you cannot modify the stack's Chef version.", + "UpdateAppRequest$Environment": "

    An array of EnvironmentVariable objects that specify environment variables to be associated with the app. After you deploy the app, these variables are defined on the associated app server instances.For more information, see Environment Variables.

    There is no specific limit on the number of environment variables. However, the size of the associated data structure - which includes the variables' names, values, and protected flag values - cannot exceed 10 KB (10240 Bytes). This limit should accommodate most if not all use cases. Exceeding it will cause an exception with the message, \"Environment: is too large (maximum is 10KB).\"

    This parameter is supported only by Chef 11.10 stacks. If you have specified one or more environment variables, you cannot modify the stack's Chef version. " + } + }, + "GetHostnameSuggestionRequest": { + "base": null, + "refs": { + } + }, + "GetHostnameSuggestionResult": { + "base": "

    Contains the response to a GetHostnameSuggestion request.

    ", + "refs": { + } + }, + "GrantAccessRequest": { + "base": null, + "refs": { + } + }, + "GrantAccessResult": { + "base": "

    Contains the response to a GrantAccess request.

    ", + "refs": { + } + }, + "Hour": { + "base": null, + "refs": { + "DailyAutoScalingSchedule$key": null + } + }, + "Instance": { + "base": "

    Describes an instance.

    ", + "refs": { + "Instances$member": null + } + }, + "InstanceIdentity": { + "base": "

    Contains a description of an Amazon EC2 instance from the Amazon EC2 metadata service. For more information, see Instance Metadata and User Data.

    ", + "refs": { + "RegisterInstanceRequest$InstanceIdentity": "

    An InstanceIdentity object that contains the instance's identity.

    " + } + }, + "Instances": { + "base": null, + "refs": { + "DescribeInstancesResult$Instances": "

    An array of Instance objects that describe the instances.

    " + } + }, + "InstancesCount": { + "base": "

    Describes how many instances a stack has for each status.

    ", + "refs": { + "StackSummary$InstancesCount": "

    An InstancesCount object with the number of instances in each status.

    " + } + }, + "Integer": { + "base": null, + "refs": { + "AutoScalingThresholds$InstanceCount": "

    The number of instances to add or remove when the load exceeds a threshold.

    ", + "Command$ExitCode": "

    The command exit code.

    ", + "Deployment$Duration": "

    The deployment duration.

    ", + "DescribeEcsClustersRequest$MaxResults": "

    To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

    ", + "EbsBlockDevice$Iops": "

    The number of I/O operations per second (IOPS) that the volume supports. For more information, see EbsBlockDevice.

    ", + "EbsBlockDevice$VolumeSize": "

    The volume size, in GiB. For more information, see EbsBlockDevice.

    ", + "InstancesCount$Assigning": "

    The number of instances in the Assigning state.

    ", + "InstancesCount$Booting": "

    The number of instances with booting status.

    ", + "InstancesCount$ConnectionLost": "

    The number of instances with connection_lost status.

    ", + "InstancesCount$Deregistering": "

    The number of instances in the Deregistering state.

    ", + "InstancesCount$Online": "

    The number of instances with online status.

    ", + "InstancesCount$Pending": "

    The number of instances with pending status.

    ", + "InstancesCount$Rebooting": "

    The number of instances with rebooting status.

    ", + "InstancesCount$Registered": "

    The number of instances in the Registered state.

    ", + "InstancesCount$Registering": "

    The number of instances in the Registering state.

    ", + "InstancesCount$Requested": "

    The number of instances with requested status.

    ", + "InstancesCount$RunningSetup": "

    The number of instances with running_setup status.

    ", + "InstancesCount$SetupFailed": "

    The number of instances with setup_failed status.

    ", + "InstancesCount$ShuttingDown": "

    The number of instances with shutting_down status.

    ", + "InstancesCount$StartFailed": "

    The number of instances with start_failed status.

    ", + "InstancesCount$Stopped": "

    The number of instances with stopped status.

    ", + "InstancesCount$Stopping": "

    The number of instances with stopping status.

    ", + "InstancesCount$Terminated": "

    The number of instances with terminated status.

    ", + "InstancesCount$Terminating": "

    The number of instances with terminating status.

    ", + "InstancesCount$Unassigning": "

    The number of instances in the Unassigning state.

    ", + "RaidArray$RaidLevel": "

    The RAID level.

    ", + "RaidArray$NumberOfDisks": "

    The number of disks in the array.

    ", + "RaidArray$Size": "

    The array's size.

    ", + "RaidArray$Iops": "

    For PIOPS volumes, the IOPS per disk.

    ", + "ShutdownEventConfiguration$ExecutionTimeout": "

    The time, in seconds, that AWS OpsWorks will wait after triggering a Shutdown event before shutting down an instance.

    ", + "StackSummary$LayersCount": "

    The number of layers.

    ", + "StackSummary$AppsCount": "

    The number of apps.

    ", + "TemporaryCredential$ValidForInMinutes": "

    The length of time (in minutes) that the grant is valid. When the grant expires, at the end of this period, the user will no longer be able to use the credentials to log in. If they are logged in at the time, they will be automatically logged out.

    ", + "Volume$Size": "

    The volume size.

    ", + "Volume$Iops": "

    For PIOPS volumes, the IOPS per disk.

    ", + "VolumeConfiguration$RaidLevel": "

    The volume RAID level.

    ", + "VolumeConfiguration$NumberOfDisks": "

    The number of disks in the volume.

    ", + "VolumeConfiguration$Size": "

    The volume size.

    ", + "VolumeConfiguration$Iops": "

    For PIOPS volumes, the IOPS per disk.

    " + } + }, + "Layer": { + "base": "

    Describes a layer.

    ", + "refs": { + "Layers$member": null + } + }, + "LayerAttributes": { + "base": null, + "refs": { + "CreateLayerRequest$Attributes": "

    One or more user-defined key-value pairs to be added to the stack attributes.

    To create a cluster layer, set the EcsClusterArn attribute to the cluster's ARN.

    ", + "Layer$Attributes": "

    The layer attributes.

    For the HaproxyStatsPassword, MysqlRootPassword, and GangliaPassword attributes, AWS OpsWorks returns *****FILTERED***** instead of the actual value

    For an ECS Cluster layer, AWS OpsWorks the EcsClusterArn attribute is set to the cluster's ARN.

    ", + "UpdateLayerRequest$Attributes": "

    One or more user-defined key/value pairs to be added to the stack attributes.

    " + } + }, + "LayerAttributesKeys": { + "base": null, + "refs": { + "LayerAttributes$key": null + } + }, + "LayerType": { + "base": null, + "refs": { + "CreateLayerRequest$Type": "

    The layer type. A stack cannot have more than one built-in layer of the same type. It can have any number of custom layers.

    ", + "Layer$Type": "

    The layer type.

    " + } + }, + "Layers": { + "base": null, + "refs": { + "DescribeLayersResult$Layers": "

    An array of Layer objects that describe the layers.

    " + } + }, + "LifecycleEventConfiguration": { + "base": "

    Specifies the lifecycle event configuration

    ", + "refs": { + "CreateLayerRequest$LifecycleEventConfiguration": "

    A LifeCycleEventConfiguration object that you can use to configure the Shutdown event to specify an execution timeout and enable or disable Elastic Load Balancer connection draining.

    ", + "Layer$LifecycleEventConfiguration": "

    A LifeCycleEventConfiguration object that specifies the Shutdown event configuration.

    ", + "UpdateLayerRequest$LifecycleEventConfiguration": "

    " + } + }, + "LoadBasedAutoScalingConfiguration": { + "base": "

    Describes a layer's load-based auto scaling configuration.

    ", + "refs": { + "LoadBasedAutoScalingConfigurations$member": null + } + }, + "LoadBasedAutoScalingConfigurations": { + "base": null, + "refs": { + "DescribeLoadBasedAutoScalingResult$LoadBasedAutoScalingConfigurations": "

    An array of LoadBasedAutoScalingConfiguration objects that describe each layer's configuration.

    " + } + }, + "Minute": { + "base": null, + "refs": { + "AutoScalingThresholds$ThresholdsWaitTime": "

    The amount of time, in minutes, that the load must exceed a threshold before more instances are added or removed.

    ", + "AutoScalingThresholds$IgnoreMetricsTime": "

    The amount of time (in minutes) after a scaling event occurs that AWS OpsWorks should ignore metrics and suppress additional scaling events. For example, AWS OpsWorks adds new instances following an upscaling event but the instances won't start reducing the load until they have been booted and configured. There is no point in raising additional scaling events during that operation, which typically takes several minutes. IgnoreMetricsTime allows you to direct AWS OpsWorks to suppress scaling events long enough to get the new instances online.

    " + } + }, + "Parameters": { + "base": null, + "refs": { + "DescribeStackProvisioningParametersResult$Parameters": "

    An embedded object that contains the provisioning parameters.

    " + } + }, + "Permission": { + "base": "

    Describes stack or user permissions.

    ", + "refs": { + "Permissions$member": null + } + }, + "Permissions": { + "base": null, + "refs": { + "DescribePermissionsResult$Permissions": "

    An array of Permission objects that describe the stack permissions.

    • If the request object contains only a stack ID, the array contains a Permission object with permissions for each of the stack IAM ARNs.
    • If the request object contains only an IAM ARN, the array contains a Permission object with permissions for each of the user's stack IDs.
    • If the request contains a stack ID and an IAM ARN, the array contains a single Permission object with permissions for the specified stack and IAM ARN.
    " + } + }, + "RaidArray": { + "base": "

    Describes an instance's RAID array.

    ", + "refs": { + "RaidArrays$member": null + } + }, + "RaidArrays": { + "base": null, + "refs": { + "DescribeRaidArraysResult$RaidArrays": "

    A RaidArrays object that describes the specified RAID arrays.

    " + } + }, + "RdsDbInstance": { + "base": "

    Describes an Amazon RDS instance.

    ", + "refs": { + "RdsDbInstances$member": null + } + }, + "RdsDbInstances": { + "base": null, + "refs": { + "DescribeRdsDbInstancesResult$RdsDbInstances": "

    An a array of RdsDbInstance objects that describe the instances.

    " + } + }, + "RebootInstanceRequest": { + "base": null, + "refs": { + } + }, + "Recipes": { + "base": "

    AWS OpsWorks supports five lifecycle events: setup, configuration, deploy, undeploy, and shutdown. For each layer, AWS OpsWorks runs a set of standard recipes for each event. In addition, you can provide custom recipes for any or all layers and events. AWS OpsWorks runs custom event recipes after the standard recipes. LayerCustomRecipes specifies the custom recipes for a particular layer to be run in response to each of the five events.

    To specify a recipe, use the cookbook's directory name in the repository followed by two colons and the recipe name, which is the recipe's file name without the .rb extension. For example: phpapp2::dbsetup specifies the dbsetup.rb recipe in the repository's phpapp2 folder.

    ", + "refs": { + "CreateLayerRequest$CustomRecipes": "

    A LayerCustomRecipes object that specifies the layer custom recipes.

    ", + "Layer$DefaultRecipes": null, + "Layer$CustomRecipes": "

    A LayerCustomRecipes object that specifies the layer's custom recipes.

    ", + "UpdateLayerRequest$CustomRecipes": "

    A LayerCustomRecipes object that specifies the layer's custom recipes.

    " + } + }, + "RegisterEcsClusterRequest": { + "base": null, + "refs": { + } + }, + "RegisterEcsClusterResult": { + "base": "

    Contains the response to a RegisterEcsCluster request.

    ", + "refs": { + } + }, + "RegisterElasticIpRequest": { + "base": null, + "refs": { + } + }, + "RegisterElasticIpResult": { + "base": "

    Contains the response to a RegisterElasticIp request.

    ", + "refs": { + } + }, + "RegisterInstanceRequest": { + "base": null, + "refs": { + } + }, + "RegisterInstanceResult": { + "base": "

    Contains the response to a RegisterInstanceResult request.

    ", + "refs": { + } + }, + "RegisterRdsDbInstanceRequest": { + "base": null, + "refs": { + } + }, + "RegisterVolumeRequest": { + "base": null, + "refs": { + } + }, + "RegisterVolumeResult": { + "base": "

    Contains the response to a RegisterVolume request.

    ", + "refs": { + } + }, + "ReportedOs": { + "base": "

    A registered instance's reported operating system.

    ", + "refs": { + "Instance$ReportedOs": "

    For registered instances, the reported operating system.

    " + } + }, + "ResourceNotFoundException": { + "base": "

    Indicates that a resource was not found.

    ", + "refs": { + } + }, + "RootDeviceType": { + "base": null, + "refs": { + "CloneStackRequest$DefaultRootDeviceType": "

    The default root device type. This value is used by default for all instances in the cloned stack, but you can override it when you create an instance. For more information, see Storage for the Root Device.

    ", + "CreateInstanceRequest$RootDeviceType": "

    The instance root device type. For more information, see Storage for the Root Device.

    ", + "CreateStackRequest$DefaultRootDeviceType": "

    The default root device type. This value is the default for all instances in the stack, but you can override it when you create an instance. The default option is instance-store. For more information, see Storage for the Root Device.

    ", + "Instance$RootDeviceType": "

    The instance's root device type. For more information, see Storage for the Root Device.

    ", + "Stack$DefaultRootDeviceType": "

    The default root device type. This value is used by default for all instances in the stack, but you can override it when you create an instance. For more information, see Storage for the Root Device.

    ", + "UpdateStackRequest$DefaultRootDeviceType": "

    The default root device type. This value is used by default for all instances in the stack, but you can override it when you create an instance. For more information, see Storage for the Root Device.

    " + } + }, + "SelfUserProfile": { + "base": "

    Describes a user's SSH information.

    ", + "refs": { + "DescribeMyUserProfileResult$UserProfile": "

    A UserProfile object that describes the user's SSH information.

    " + } + }, + "ServiceError": { + "base": "

    Describes an AWS OpsWorks service error.

    ", + "refs": { + "ServiceErrors$member": null + } + }, + "ServiceErrors": { + "base": null, + "refs": { + "DescribeServiceErrorsResult$ServiceErrors": "

    An array of ServiceError objects that describe the specified service errors.

    " + } + }, + "SetLoadBasedAutoScalingRequest": { + "base": null, + "refs": { + } + }, + "SetPermissionRequest": { + "base": null, + "refs": { + } + }, + "SetTimeBasedAutoScalingRequest": { + "base": null, + "refs": { + } + }, + "ShutdownEventConfiguration": { + "base": "

    The Shutdown event configuration.

    ", + "refs": { + "LifecycleEventConfiguration$Shutdown": "

    A ShutdownEventConfiguration object that specifies the Shutdown event configuration.

    " + } + }, + "Source": { + "base": "

    Contains the information required to retrieve an app or cookbook from a repository. For more information, see Creating Apps or Custom Recipes and Cookbooks.

    ", + "refs": { + "App$AppSource": "

    A Source object that describes the app repository.

    ", + "CloneStackRequest$CustomCookbooksSource": null, + "CreateAppRequest$AppSource": "

    A Source object that specifies the app repository.

    ", + "CreateStackRequest$CustomCookbooksSource": null, + "Stack$CustomCookbooksSource": null, + "UpdateAppRequest$AppSource": "

    A Source object that specifies the app repository.

    ", + "UpdateStackRequest$CustomCookbooksSource": null + } + }, + "SourceType": { + "base": null, + "refs": { + "Source$Type": "

    The repository type.

    " + } + }, + "SslConfiguration": { + "base": "

    Describes an app's SSL configuration.

    ", + "refs": { + "App$SslConfiguration": "

    An SslConfiguration object with the SSL configuration.

    ", + "CreateAppRequest$SslConfiguration": "

    An SslConfiguration object with the SSL configuration.

    ", + "UpdateAppRequest$SslConfiguration": "

    An SslConfiguration object with the SSL configuration.

    " + } + }, + "Stack": { + "base": "

    Describes a stack.

    ", + "refs": { + "Stacks$member": null + } + }, + "StackAttributes": { + "base": null, + "refs": { + "CloneStackRequest$Attributes": "

    A list of stack attributes and values as key/value pairs to be added to the cloned stack.

    ", + "CreateStackRequest$Attributes": "

    One or more user-defined key-value pairs to be added to the stack attributes.

    ", + "Stack$Attributes": "

    The stack's attributes.

    ", + "UpdateStackRequest$Attributes": "

    One or more user-defined key-value pairs to be added to the stack attributes.

    " + } + }, + "StackAttributesKeys": { + "base": null, + "refs": { + "StackAttributes$key": null + } + }, + "StackConfigurationManager": { + "base": "

    Describes the configuration manager.

    ", + "refs": { + "AgentVersion$ConfigurationManager": "

    The configuration manager.

    ", + "CloneStackRequest$ConfigurationManager": "

    The configuration manager. When you clone a stack we recommend that you use the configuration manager to specify the Chef version: 12, 11.10, or 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for Linux stacks is currently 11.4.

    ", + "CreateStackRequest$ConfigurationManager": "

    The configuration manager. When you create a stack we recommend that you use the configuration manager to specify the Chef version: 12, 11.10, or 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for Linux stacks is currently 11.4.

    ", + "DescribeAgentVersionsRequest$ConfigurationManager": "

    The configuration manager.

    ", + "Stack$ConfigurationManager": "

    The configuration manager.

    ", + "UpdateStackRequest$ConfigurationManager": "

    The configuration manager. When you update a stack, we recommend that you use the configuration manager to specify the Chef version: 12, 11.10, or 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for Linux stacks is currently 11.4.

    " + } + }, + "StackSummary": { + "base": "

    Summarizes the number of layers, instances, and apps in a stack.

    ", + "refs": { + "DescribeStackSummaryResult$StackSummary": "

    A StackSummary object that contains the results.

    " + } + }, + "Stacks": { + "base": null, + "refs": { + "DescribeStacksResult$Stacks": "

    An array of Stack objects that describe the stacks.

    " + } + }, + "StartInstanceRequest": { + "base": null, + "refs": { + } + }, + "StartStackRequest": { + "base": null, + "refs": { + } + }, + "StopInstanceRequest": { + "base": null, + "refs": { + } + }, + "StopStackRequest": { + "base": null, + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "AgentVersion$Version": "

    The agent version.

    ", + "App$AppId": "

    The app ID.

    ", + "App$StackId": "

    The app stack ID.

    ", + "App$Shortname": "

    The app's short name.

    ", + "App$Name": "

    The app name.

    ", + "App$Description": "

    A description of the app.

    ", + "App$CreatedAt": "

    When the app was created.

    ", + "AppAttributes$value": null, + "AssignInstanceRequest$InstanceId": "

    The instance ID.

    ", + "AssignVolumeRequest$VolumeId": "

    The volume ID.

    ", + "AssignVolumeRequest$InstanceId": "

    The instance ID.

    ", + "AssociateElasticIpRequest$ElasticIp": "

    The Elastic IP address.

    ", + "AssociateElasticIpRequest$InstanceId": "

    The instance ID.

    ", + "AttachElasticLoadBalancerRequest$ElasticLoadBalancerName": "

    The Elastic Load Balancing instance's name.

    ", + "AttachElasticLoadBalancerRequest$LayerId": "

    The ID of the layer that the Elastic Load Balancing instance is to be attached to.

    ", + "BlockDeviceMapping$DeviceName": "

    The device name that is exposed to the instance, such as /dev/sdh. For the root device, you can use the explicit device name or you can set this parameter to ROOT_DEVICE and AWS OpsWorks will provide the correct device name.

    ", + "BlockDeviceMapping$NoDevice": "

    Suppresses the specified device included in the AMI's block device mapping.

    ", + "BlockDeviceMapping$VirtualName": "

    The virtual device name. For more information, see BlockDeviceMapping.

    ", + "ChefConfiguration$BerkshelfVersion": "

    The Berkshelf version.

    ", + "CloneStackRequest$SourceStackId": "

    The source stack ID.

    ", + "CloneStackRequest$Name": "

    The cloned stack name.

    ", + "CloneStackRequest$Region": "

    The cloned stack AWS region, such as \"us-east-1\". For more information about AWS regions, see Regions and Endpoints.

    ", + "CloneStackRequest$VpcId": "

    The ID of the VPC that the cloned stack is to be launched into. It must be in the specified region. All instances are launched into this VPC, and you cannot change the ID later.

    • If your account supports EC2 Classic, the default value is no VPC.
    • If your account does not support EC2 Classic, the default value is the default VPC for the specified region.

    If the VPC ID corresponds to a default VPC and you have specified either the DefaultAvailabilityZone or the DefaultSubnetId parameter only, AWS OpsWorks infers the value of the other parameter. If you specify neither parameter, AWS OpsWorks sets these parameters to the first valid Availability Zone for the specified region and the corresponding default VPC subnet ID, respectively.

    If you specify a nondefault VPC ID, note the following:

    • It must belong to a VPC in your account that is in the specified region.
    • You must specify a value for DefaultSubnetId.

    For more information on how to use AWS OpsWorks with a VPC, see Running a Stack in a VPC. For more information on default VPC and EC2 Classic, see Supported Platforms.

    ", + "CloneStackRequest$ServiceRoleArn": "

    The stack AWS Identity and Access Management (IAM) role, which allows AWS OpsWorks to work with AWS resources on your behalf. You must set this parameter to the Amazon Resource Name (ARN) for an existing IAM role. If you create a stack by using the AWS OpsWorks console, it creates the role for you. You can obtain an existing stack's IAM ARN programmatically by calling DescribePermissions. For more information about IAM ARNs, see Using Identifiers.

    You must set this parameter to a valid service role ARN or the action will fail; there is no default value. You can specify the source stack's service role ARN, if you prefer, but you must do so explicitly.

    ", + "CloneStackRequest$DefaultInstanceProfileArn": "

    The Amazon Resource Name (ARN) of an IAM profile that is the default profile for all of the stack's EC2 instances. For more information about IAM ARNs, see Using Identifiers.

    ", + "CloneStackRequest$DefaultOs": "

    The stack's operating system, which must be set to one of the following.

    • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2015.03, Red Hat Enterprise Linux 7, Ubuntu 12.04 LTS, or Ubuntu 14.04 LTS.
    • Microsoft Windows Server 2012 R2 Base.
    • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information on how to use custom AMIs with OpsWorks, see Using Custom AMIs.

    The default option is the parent stack's operating system. For more information on the supported operating systems, see AWS OpsWorks Operating Systems.

    You can specify a different Linux operating system for the cloned stack, but you cannot change from Linux to Windows or Windows to Linux.", + "CloneStackRequest$HostnameTheme": "

    The stack's host name theme, with spaces are replaced by underscores. The theme is used to generate host names for the stack's instances. By default, HostnameTheme is set to Layer_Dependent, which creates host names by appending integers to the layer's short name. The other themes are:

    • Baked_Goods
    • Clouds
    • Europe_Cities
    • Fruits
    • Greek_Deities
    • Legendary_creatures_from_Japan
    • Planets_and_Moons
    • Roman_Deities
    • Scottish_Islands
    • US_Cities
    • Wild_Cats

    To obtain a generated host name, call GetHostNameSuggestion, which returns a host name based on the current theme.

    ", + "CloneStackRequest$DefaultAvailabilityZone": "

    The cloned stack's default Availability Zone, which must be in the specified region. For more information, see Regions and Endpoints. If you also specify a value for DefaultSubnetId, the subnet must be in the same zone. For more information, see the VpcId parameter description.

    ", + "CloneStackRequest$DefaultSubnetId": "

    The stack's default VPC subnet ID. This parameter is required if you specify a value for the VpcId parameter. All instances are launched into this subnet unless you specify otherwise when you create the instance. If you also specify a value for DefaultAvailabilityZone, the subnet must be in that zone. For information on default values and when this parameter is required, see the VpcId parameter description.

    ", + "CloneStackRequest$CustomJson": "

    A string that contains user-defined, custom JSON. It is used to override the corresponding default stack configuration JSON values. The string should be in the following format and must escape characters such as '\"':

    \"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"

    For more information on custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes

    ", + "CloneStackRequest$DefaultSshKeyName": "

    A default Amazon EC2 key pair name. The default value is none. If you specify a key pair name, AWS OpsWorks installs the public key on the instance and you can use the private key with an SSH client to log in to the instance. For more information, see Using SSH to Communicate with an Instance and Managing SSH Access. You can override this setting by specifying a different key pair, or no key pair, when you create an instance.

    ", + "CloneStackRequest$AgentVersion": "

    The default AWS OpsWorks agent version. You have the following options:

    • Auto-update - Set this parameter to LATEST. AWS OpsWorks automatically installs new agent versions on the stack's instances as soon as they are available.
    • Fixed version - Set this parameter to your preferred agent version. To update the agent version, you must edit the stack configuration and specify a new version. AWS OpsWorks then automatically installs that version on the stack's instances.

    The default setting is LATEST. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions.

    You can also specify an agent version when you create or update an instance, which overrides the stack's default setting.", + "CloneStackResult$StackId": "

    The cloned stack ID.

    ", + "Command$CommandId": "

    The command ID.

    ", + "Command$InstanceId": "

    The ID of the instance where the command was executed.

    ", + "Command$DeploymentId": "

    The command deployment ID.

    ", + "Command$Status": "

    The command status:

    • failed
    • successful
    • skipped
    • pending
    ", + "Command$LogUrl": "

    The URL of the command log.

    ", + "Command$Type": "

    The command type:

    • deploy
    • rollback
    • start
    • stop
    • restart
    • undeploy
    • update_dependencies
    • install_dependencies
    • update_custom_cookbooks
    • execute_recipes
    ", + "CreateAppRequest$StackId": "

    The stack ID.

    ", + "CreateAppRequest$Shortname": "

    The app's short name.

    ", + "CreateAppRequest$Name": "

    The app name.

    ", + "CreateAppRequest$Description": "

    A description of the app.

    ", + "CreateAppResult$AppId": "

    The app ID.

    ", + "CreateDeploymentRequest$StackId": "

    The stack ID.

    ", + "CreateDeploymentRequest$AppId": "

    The app ID. This parameter is required for app deployments, but not for other deployment commands.

    ", + "CreateDeploymentRequest$Comment": "

    A user-defined comment.

    ", + "CreateDeploymentRequest$CustomJson": "

    A string that contains user-defined, custom JSON. It is used to override the corresponding default stack configuration JSON values. The string should be in the following format and must escape characters such as '\"':

    \"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"

    For more information on custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes.

    ", + "CreateDeploymentResult$DeploymentId": "

    The deployment ID, which can be used with other requests to identify the deployment.

    ", + "CreateInstanceRequest$StackId": "

    The stack ID.

    ", + "CreateInstanceRequest$InstanceType": "

    The instance type, such as t2.micro. For a list of supported instance types, open the stack in the console, choose Instances, and choose + Instance. The Size list contains the currently supported types. For more information, see Instance Families and Types. The parameter values that you use to specify the various types are in the API Name column of the Available Instance Types table.

    ", + "CreateInstanceRequest$Hostname": "

    The instance host name.

    ", + "CreateInstanceRequest$Os": "

    The instance's operating system, which must be set to one of the following.

    • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2015.03, Red Hat Enterprise Linux 7, Ubuntu 12.04 LTS, or Ubuntu 14.04 LTS.
    • Microsoft Windows Server 2012 R2 Base.
    • A custom AMI: Custom.

    For more information on the supported operating systems, see AWS OpsWorks Operating Systems.

    The default option is the current Amazon Linux version. If you set this parameter to Custom, you must use the CreateInstance action's AmiId parameter to specify the custom AMI that you want to use. Block device mappings are not supported if the value is Custom. For more information on the supported operating systems, see Operating SystemsFor more information on how to use custom AMIs with AWS OpsWorks, see Using Custom AMIs.

    ", + "CreateInstanceRequest$AmiId": "

    A custom AMI ID to be used to create the instance. The AMI should be based on one of the supported operating systems. For more information, see Using Custom AMIs.

    If you specify a custom AMI, you must set Os to Custom.", + "CreateInstanceRequest$SshKeyName": "

    The instance's Amazon EC2 key-pair name.

    ", + "CreateInstanceRequest$AvailabilityZone": "

    The instance Availability Zone. For more information, see Regions and Endpoints.

    ", + "CreateInstanceRequest$VirtualizationType": "

    The instance's virtualization type, paravirtual or hvm.

    ", + "CreateInstanceRequest$SubnetId": "

    The ID of the instance's subnet. If the stack is running in a VPC, you can use this parameter to override the stack's default subnet ID value and direct AWS OpsWorks to launch the instance in a different subnet.

    ", + "CreateInstanceRequest$AgentVersion": "

    The default AWS OpsWorks agent version. You have the following options:

    • INHERIT - Use the stack's default agent version setting.
    • version_number - Use the specified agent version. This value overrides the stack's default setting. To update the agent version, edit the instance configuration and specify a new version. AWS OpsWorks then automatically installs that version on the instance.

    The default setting is INHERIT. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions.

    ", + "CreateInstanceResult$InstanceId": "

    The instance ID.

    ", + "CreateLayerRequest$StackId": "

    The layer stack ID.

    ", + "CreateLayerRequest$Name": "

    The layer name, which is used by the console.

    ", + "CreateLayerRequest$Shortname": "

    For custom layers only, use this parameter to specify the layer's short name, which is used internally by AWS OpsWorks and by Chef recipes. The short name is also used as the name for the directory where your app files are installed. It can have a maximum of 200 characters, which are limited to the alphanumeric characters, '-', '_', and '.'.

    The built-in layers' short names are defined by AWS OpsWorks. For more information, see the Layer Reference.

    ", + "CreateLayerRequest$CustomInstanceProfileArn": "

    The ARN of an IAM profile to be used for the layer's EC2 instances. For more information about IAM ARNs, see Using Identifiers.

    ", + "CreateLayerRequest$CustomJson": "

    A JSON-formatted string containing custom stack configuration and deployment attributes to be installed on the layer's instances. For more information, see Using Custom JSON. This feature is supported as of version 1.7.42 of the AWS CLI.

    ", + "CreateLayerResult$LayerId": "

    The layer ID.

    ", + "CreateStackRequest$Name": "

    The stack name.

    ", + "CreateStackRequest$Region": "

    The stack's AWS region, such as \"us-east-1\". For more information about Amazon regions, see Regions and Endpoints.

    ", + "CreateStackRequest$VpcId": "

    The ID of the VPC that the stack is to be launched into. The VPC must be in the stack's region. All instances are launched into this VPC. You cannot change the ID later.

    • If your account supports EC2-Classic, the default value is no VPC.
    • If your account does not support EC2-Classic, the default value is the default VPC for the specified region.

    If the VPC ID corresponds to a default VPC and you have specified either the DefaultAvailabilityZone or the DefaultSubnetId parameter only, AWS OpsWorks infers the value of the other parameter. If you specify neither parameter, AWS OpsWorks sets these parameters to the first valid Availability Zone for the specified region and the corresponding default VPC subnet ID, respectively.

    If you specify a nondefault VPC ID, note the following:

    • It must belong to a VPC in your account that is in the specified region.
    • You must specify a value for DefaultSubnetId.

    For more information on how to use AWS OpsWorks with a VPC, see Running a Stack in a VPC. For more information on default VPC and EC2-Classic, see Supported Platforms.

    ", + "CreateStackRequest$ServiceRoleArn": "

    The stack's AWS Identity and Access Management (IAM) role, which allows AWS OpsWorks to work with AWS resources on your behalf. You must set this parameter to the Amazon Resource Name (ARN) for an existing IAM role. For more information about IAM ARNs, see Using Identifiers.

    ", + "CreateStackRequest$DefaultInstanceProfileArn": "

    The Amazon Resource Name (ARN) of an IAM profile that is the default profile for all of the stack's EC2 instances. For more information about IAM ARNs, see Using Identifiers.

    ", + "CreateStackRequest$DefaultOs": "

    The stack's default operating system, which is installed on every instance unless you specify a different operating system when you create the instance. You can specify one of the following.

    • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2015.03, Red Hat Enterprise Linux 7, Ubuntu 12.04 LTS, or Ubuntu 14.04 LTS.
    • Microsoft Windows Server 2012 R2 Base.
    • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information, see Using Custom AMIs.

    The default option is the current Amazon Linux version. For more information on the supported operating systems, see AWS OpsWorks Operating Systems.

    ", + "CreateStackRequest$HostnameTheme": "

    The stack's host name theme, with spaces replaced by underscores. The theme is used to generate host names for the stack's instances. By default, HostnameTheme is set to Layer_Dependent, which creates host names by appending integers to the layer's short name. The other themes are:

    • Baked_Goods
    • Clouds
    • Europe_Cities
    • Fruits
    • Greek_Deities
    • Legendary_creatures_from_Japan
    • Planets_and_Moons
    • Roman_Deities
    • Scottish_Islands
    • US_Cities
    • Wild_Cats

    To obtain a generated host name, call GetHostNameSuggestion, which returns a host name based on the current theme.

    ", + "CreateStackRequest$DefaultAvailabilityZone": "

    The stack's default Availability Zone, which must be in the specified region. For more information, see Regions and Endpoints. If you also specify a value for DefaultSubnetId, the subnet must be in the same zone. For more information, see the VpcId parameter description.

    ", + "CreateStackRequest$DefaultSubnetId": "

    The stack's default VPC subnet ID. This parameter is required if you specify a value for the VpcId parameter. All instances are launched into this subnet unless you specify otherwise when you create the instance. If you also specify a value for DefaultAvailabilityZone, the subnet must be in that zone. For information on default values and when this parameter is required, see the VpcId parameter description.

    ", + "CreateStackRequest$CustomJson": "

    A string that contains user-defined, custom JSON. It can be used to override the corresponding default stack configuration attribute values or to pass data to recipes. The string should be in the following escape characters such as '\"':

    \"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"

    For more information on custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes.

    ", + "CreateStackRequest$DefaultSshKeyName": "

    A default Amazon EC2 key pair name. The default value is none. If you specify a key pair name, AWS OpsWorks installs the public key on the instance and you can use the private key with an SSH client to log in to the instance. For more information, see Using SSH to Communicate with an Instance and Managing SSH Access. You can override this setting by specifying a different key pair, or no key pair, when you create an instance.

    ", + "CreateStackRequest$AgentVersion": "

    The default AWS OpsWorks agent version. You have the following options:

    • Auto-update - Set this parameter to LATEST. AWS OpsWorks automatically installs new agent versions on the stack's instances as soon as they are available.
    • Fixed version - Set this parameter to your preferred agent version. To update the agent version, you must edit the stack configuration and specify a new version. AWS OpsWorks then automatically installs that version on the stack's instances.

    The default setting is the most recent release of the agent. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions.

    You can also specify an agent version when you create or update an instance, which overrides the stack's default setting.", + "CreateStackResult$StackId": "

    The stack ID, which is an opaque string that you use to identify the stack when performing actions such as DescribeStacks.

    ", + "CreateUserProfileRequest$IamUserArn": "

    The user's IAM ARN.

    ", + "CreateUserProfileRequest$SshUsername": "

    The user's SSH user name. The allowable characters are [a-z], [A-Z], [0-9], '-', and '_'. If the specified name includes other punctuation marks, AWS OpsWorks removes them. For example, my.name will be changed to myname. If you do not specify an SSH user name, AWS OpsWorks generates one from the IAM user name.

    ", + "CreateUserProfileRequest$SshPublicKey": "

    The user's public SSH key.

    ", + "CreateUserProfileResult$IamUserArn": "

    The user's IAM ARN.

    ", + "DataSource$Type": "

    The data source's type, AutoSelectOpsworksMysqlInstance, OpsworksMysqlInstance, or RdsDbInstance.

    ", + "DataSource$Arn": "

    The data source's ARN.

    ", + "DataSource$DatabaseName": "

    The database name.

    ", + "DeleteAppRequest$AppId": "

    The app ID.

    ", + "DeleteInstanceRequest$InstanceId": "

    The instance ID.

    ", + "DeleteLayerRequest$LayerId": "

    The layer ID.

    ", + "DeleteStackRequest$StackId": "

    The stack ID.

    ", + "DeleteUserProfileRequest$IamUserArn": "

    The user's IAM ARN.

    ", + "Deployment$DeploymentId": "

    The deployment ID.

    ", + "Deployment$StackId": "

    The stack ID.

    ", + "Deployment$AppId": "

    The app ID.

    ", + "Deployment$IamUserArn": "

    The user's IAM ARN.

    ", + "Deployment$Comment": "

    A user-defined comment.

    ", + "Deployment$Status": "

    The deployment status:

    • running
    • successful
    • failed
    ", + "Deployment$CustomJson": "

    A string that contains user-defined custom JSON. It can be used to override the corresponding default stack configuration attribute values for stack or to pass data to recipes. The string should be in the following format and must escape characters such as '\"':

    \"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"

    For more information on custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes.

    ", + "DeploymentCommandArgs$key": null, + "DeregisterEcsClusterRequest$EcsClusterArn": "

    The cluster's ARN.

    ", + "DeregisterElasticIpRequest$ElasticIp": "

    The Elastic IP address.

    ", + "DeregisterInstanceRequest$InstanceId": "

    The instance ID.

    ", + "DeregisterRdsDbInstanceRequest$RdsDbInstanceArn": "

    The Amazon RDS instance's ARN.

    ", + "DeregisterVolumeRequest$VolumeId": "

    The AWS OpsWorks volume ID, which is the GUID that AWS OpsWorks assigned to the instance when you registered the volume with the stack, not the Amazon EC2 volume ID.

    ", + "DescribeAgentVersionsRequest$StackId": "

    The stack ID.

    ", + "DescribeAppsRequest$StackId": "

    The app stack ID. If you use this parameter, DescribeApps returns a description of the apps in the specified stack.

    ", + "DescribeCommandsRequest$DeploymentId": "

    The deployment ID. If you include this parameter, DescribeCommands returns a description of the commands associated with the specified deployment.

    ", + "DescribeCommandsRequest$InstanceId": "

    The instance ID. If you include this parameter, DescribeCommands returns a description of the commands associated with the specified instance.

    ", + "DescribeDeploymentsRequest$StackId": "

    The stack ID. If you include this parameter, DescribeDeployments returns a description of the commands associated with the specified stack.

    ", + "DescribeDeploymentsRequest$AppId": "

    The app ID. If you include this parameter, DescribeDeployments returns a description of the commands associated with the specified app.

    ", + "DescribeEcsClustersRequest$StackId": "

    A stack ID. DescribeEcsClusters returns a description of the cluster that is registered with the stack.

    ", + "DescribeEcsClustersRequest$NextToken": "

    If the previous paginated request did not return all of the remaining results, the response object'sNextToken parameter value is set to a token. To retrieve the next set of results, call DescribeEcsClusters again and assign that token to the request object's NextToken parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null.

    ", + "DescribeEcsClustersResult$NextToken": "

    If a paginated request does not return all of the remaining results, this parameter is set to a token that you can assign to the request object's NextToken parameter to retrieve the next set of results. If the previous paginated request returned all of the remaining results, this parameter is set to null.

    ", + "DescribeElasticIpsRequest$InstanceId": "

    The instance ID. If you include this parameter, DescribeElasticIps returns a description of the Elastic IP addresses associated with the specified instance.

    ", + "DescribeElasticIpsRequest$StackId": "

    A stack ID. If you include this parameter, DescribeElasticIps returns a description of the Elastic IP addresses that are registered with the specified stack.

    ", + "DescribeElasticLoadBalancersRequest$StackId": "

    A stack ID. The action describes the stack's Elastic Load Balancing instances.

    ", + "DescribeInstancesRequest$StackId": "

    A stack ID. If you use this parameter, DescribeInstances returns descriptions of the instances associated with the specified stack.

    ", + "DescribeInstancesRequest$LayerId": "

    A layer ID. If you use this parameter, DescribeInstances returns descriptions of the instances associated with the specified layer.

    ", + "DescribeLayersRequest$StackId": "

    The stack ID.

    ", + "DescribePermissionsRequest$IamUserArn": "

    The user's IAM ARN. For more information about IAM ARNs, see Using Identifiers.

    ", + "DescribePermissionsRequest$StackId": "

    The stack ID.

    ", + "DescribeRaidArraysRequest$InstanceId": "

    The instance ID. If you use this parameter, DescribeRaidArrays returns descriptions of the RAID arrays associated with the specified instance.

    ", + "DescribeRaidArraysRequest$StackId": "

    The stack ID.

    ", + "DescribeRdsDbInstancesRequest$StackId": "

    The stack ID that the instances are registered with. The operation returns descriptions of all registered Amazon RDS instances.

    ", + "DescribeServiceErrorsRequest$StackId": "

    The stack ID. If you use this parameter, DescribeServiceErrors returns descriptions of the errors associated with the specified stack.

    ", + "DescribeServiceErrorsRequest$InstanceId": "

    The instance ID. If you use this parameter, DescribeServiceErrors returns descriptions of the errors associated with the specified instance.

    ", + "DescribeStackProvisioningParametersRequest$StackId": "

    The stack ID

    ", + "DescribeStackProvisioningParametersResult$AgentInstallerUrl": "

    The AWS OpsWorks agent installer's URL.

    ", + "DescribeStackSummaryRequest$StackId": "

    The stack ID.

    ", + "DescribeVolumesRequest$InstanceId": "

    The instance ID. If you use this parameter, DescribeVolumes returns descriptions of the volumes associated with the specified instance.

    ", + "DescribeVolumesRequest$StackId": "

    A stack ID. The action describes the stack's registered Amazon EBS volumes.

    ", + "DescribeVolumesRequest$RaidArrayId": "

    The RAID array ID. If you use this parameter, DescribeVolumes returns descriptions of the volumes associated with the specified RAID array.

    ", + "DetachElasticLoadBalancerRequest$ElasticLoadBalancerName": "

    The Elastic Load Balancing instance's name.

    ", + "DetachElasticLoadBalancerRequest$LayerId": "

    The ID of the layer that the Elastic Load Balancing instance is attached to.

    ", + "DisassociateElasticIpRequest$ElasticIp": "

    The Elastic IP address.

    ", + "EbsBlockDevice$SnapshotId": "

    The snapshot ID.

    ", + "EcsCluster$EcsClusterArn": "

    The cluster's ARN.

    ", + "EcsCluster$EcsClusterName": "

    The cluster name.

    ", + "EcsCluster$StackId": "

    The stack ID.

    ", + "ElasticIp$Ip": "

    The IP address.

    ", + "ElasticIp$Name": "

    The name.

    ", + "ElasticIp$Domain": "

    The domain.

    ", + "ElasticIp$Region": "

    The AWS region. For more information, see Regions and Endpoints.

    ", + "ElasticIp$InstanceId": "

    The ID of the instance that the address is attached to.

    ", + "ElasticLoadBalancer$ElasticLoadBalancerName": "

    The Elastic Load Balancing instance's name.

    ", + "ElasticLoadBalancer$Region": "

    The instance's AWS region.

    ", + "ElasticLoadBalancer$DnsName": "

    The instance's public DNS name.

    ", + "ElasticLoadBalancer$StackId": "

    The ID of the stack that the instance is associated with.

    ", + "ElasticLoadBalancer$LayerId": "

    The ID of the layer that the instance is attached to.

    ", + "ElasticLoadBalancer$VpcId": "

    The VPC ID.

    ", + "EnvironmentVariable$Key": "

    (Required) The environment variable's name, which can consist of up to 64 characters and must be specified. The name can contain upper- and lowercase letters, numbers, and underscores (_), but it must start with a letter or underscore.

    ", + "EnvironmentVariable$Value": "

    (Optional) The environment variable's value, which can be left empty. If you specify a value, it can contain up to 256 characters, which must all be printable.

    ", + "GetHostnameSuggestionRequest$LayerId": "

    The layer ID.

    ", + "GetHostnameSuggestionResult$LayerId": "

    The layer ID.

    ", + "GetHostnameSuggestionResult$Hostname": "

    The generated host name.

    ", + "GrantAccessRequest$InstanceId": "

    The instance's AWS OpsWorks ID.

    ", + "Instance$AgentVersion": "

    The agent version. This parameter is set to INHERIT if the instance inherits the default stack setting or to a a version number for a fixed agent version.

    ", + "Instance$AmiId": "

    A custom AMI ID to be used to create the instance. For more information, see Instances

    ", + "Instance$AvailabilityZone": "

    The instance Availability Zone. For more information, see Regions and Endpoints.

    ", + "Instance$Ec2InstanceId": "

    The ID of the associated Amazon EC2 instance.

    ", + "Instance$EcsClusterArn": "

    For container instances, the Amazon ECS cluster's ARN.

    ", + "Instance$EcsContainerInstanceArn": "

    For container instances, the instance's ARN.

    ", + "Instance$ElasticIp": "

    The instance Elastic IP address .

    ", + "Instance$Hostname": "

    The instance host name.

    ", + "Instance$InfrastructureClass": "

    For registered instances, the infrastructure class: ec2 or on-premises.

    ", + "Instance$InstanceId": "

    The instance ID.

    ", + "Instance$InstanceProfileArn": "

    The ARN of the instance's IAM profile. For more information about IAM ARNs, see Using Identifiers.

    ", + "Instance$InstanceType": "

    The instance type, such as t2.micro.

    ", + "Instance$LastServiceErrorId": "

    The ID of the last service error. For more information, call DescribeServiceErrors.

    ", + "Instance$Os": "

    The instance's operating system.

    ", + "Instance$Platform": "

    The instance's platform.

    ", + "Instance$PrivateDns": "

    The The instance's private DNS name.

    ", + "Instance$PrivateIp": "

    The instance's private IP address.

    ", + "Instance$PublicDns": "

    The instance public DNS name.

    ", + "Instance$PublicIp": "

    The instance public IP address.

    ", + "Instance$RegisteredBy": "

    For registered instances, who performed the registration.

    ", + "Instance$ReportedAgentVersion": "

    The instance's reported AWS OpsWorks agent version.

    ", + "Instance$RootDeviceVolumeId": "

    The root device volume ID.

    ", + "Instance$SshHostDsaKeyFingerprint": "

    The SSH key's Deep Security Agent (DSA) fingerprint.

    ", + "Instance$SshHostRsaKeyFingerprint": "

    The SSH key's RSA fingerprint.

    ", + "Instance$SshKeyName": "

    The instance's Amazon EC2 key-pair name.

    ", + "Instance$StackId": "

    The stack ID.

    ", + "Instance$Status": "

    The instance status:

    • booting
    • connection_lost
    • online
    • pending
    • rebooting
    • requested
    • running_setup
    • setup_failed
    • shutting_down
    • start_failed
    • stopped
    • stopping
    • terminated
    • terminating
    ", + "Instance$SubnetId": "

    The instance's subnet ID; applicable only if the stack is running in a VPC.

    ", + "InstanceIdentity$Document": "

    A JSON document that contains the metadata.

    ", + "InstanceIdentity$Signature": "

    A signature that can be used to verify the document's accuracy and authenticity.

    ", + "Layer$StackId": "

    The layer stack ID.

    ", + "Layer$LayerId": "

    The layer ID.

    ", + "Layer$Name": "

    The layer name.

    ", + "Layer$Shortname": "

    The layer short name.

    ", + "Layer$CustomInstanceProfileArn": "

    The ARN of the default IAM profile to be used for the layer's EC2 instances. For more information about IAM ARNs, see Using Identifiers.

    ", + "Layer$CustomJson": "

    A JSON formatted string containing the layer's custom stack configuration and deployment attributes.

    ", + "LayerAttributes$value": null, + "LoadBasedAutoScalingConfiguration$LayerId": "

    The layer ID.

    ", + "Parameters$key": null, + "Parameters$value": null, + "Permission$StackId": "

    A stack ID.

    ", + "Permission$IamUserArn": "

    The Amazon Resource Name (ARN) for an AWS Identity and Access Management (IAM) role. For more information about IAM ARNs, see Using Identifiers.

    ", + "Permission$Level": "

    The user's permission level, which must be the following:

    • deny
    • show
    • deploy
    • manage
    • iam_only

    For more information on the permissions associated with these levels, see Managing User Permissions

    ", + "RaidArray$RaidArrayId": "

    The array ID.

    ", + "RaidArray$InstanceId": "

    The instance ID.

    ", + "RaidArray$Name": "

    The array name.

    ", + "RaidArray$Device": "

    The array's Linux device. For example /dev/mdadm0.

    ", + "RaidArray$MountPoint": "

    The array's mount point.

    ", + "RaidArray$AvailabilityZone": "

    The array's Availability Zone. For more information, see Regions and Endpoints.

    ", + "RaidArray$StackId": "

    The stack ID.

    ", + "RaidArray$VolumeType": "

    The volume type, standard or PIOPS.

    ", + "RdsDbInstance$RdsDbInstanceArn": "

    The instance's ARN.

    ", + "RdsDbInstance$DbInstanceIdentifier": "

    The DB instance identifier.

    ", + "RdsDbInstance$DbUser": "

    The master user name.

    ", + "RdsDbInstance$DbPassword": "

    AWS OpsWorks returns *****FILTERED***** instead of the actual value.

    ", + "RdsDbInstance$Region": "

    The instance's AWS region.

    ", + "RdsDbInstance$Address": "

    The instance's address.

    ", + "RdsDbInstance$Engine": "

    The instance's database engine.

    ", + "RdsDbInstance$StackId": "

    The ID of the stack that the instance is registered with.

    ", + "RebootInstanceRequest$InstanceId": "

    The instance ID.

    ", + "RegisterEcsClusterRequest$EcsClusterArn": "

    The cluster's ARN.

    ", + "RegisterEcsClusterRequest$StackId": "

    The stack ID.

    ", + "RegisterEcsClusterResult$EcsClusterArn": "

    The cluster's ARN.

    ", + "RegisterElasticIpRequest$ElasticIp": "

    The Elastic IP address.

    ", + "RegisterElasticIpRequest$StackId": "

    The stack ID.

    ", + "RegisterElasticIpResult$ElasticIp": "

    The Elastic IP address.

    ", + "RegisterInstanceRequest$StackId": "

    The ID of the stack that the instance is to be registered with.

    ", + "RegisterInstanceRequest$Hostname": "

    The instance's hostname.

    ", + "RegisterInstanceRequest$PublicIp": "

    The instance's public IP address.

    ", + "RegisterInstanceRequest$PrivateIp": "

    The instance's private IP address.

    ", + "RegisterInstanceRequest$RsaPublicKey": "

    The instances public RSA key. This key is used to encrypt communication between the instance and the service.

    ", + "RegisterInstanceRequest$RsaPublicKeyFingerprint": "

    The instances public RSA key fingerprint.

    ", + "RegisterInstanceResult$InstanceId": "

    The registered instance's AWS OpsWorks ID.

    ", + "RegisterRdsDbInstanceRequest$StackId": "

    The stack ID.

    ", + "RegisterRdsDbInstanceRequest$RdsDbInstanceArn": "

    The Amazon RDS instance's ARN.

    ", + "RegisterRdsDbInstanceRequest$DbUser": "

    The database's master user name.

    ", + "RegisterRdsDbInstanceRequest$DbPassword": "

    The database password.

    ", + "RegisterVolumeRequest$Ec2VolumeId": "

    The Amazon EBS volume ID.

    ", + "RegisterVolumeRequest$StackId": "

    The stack ID.

    ", + "RegisterVolumeResult$VolumeId": "

    The volume ID.

    ", + "ReportedOs$Family": "

    The operating system family.

    ", + "ReportedOs$Name": "

    The operating system name.

    ", + "ReportedOs$Version": "

    The operating system version.

    ", + "ResourceNotFoundException$message": "

    The exception message.

    ", + "SelfUserProfile$IamUserArn": "

    The user's IAM ARN.

    ", + "SelfUserProfile$Name": "

    The user's name.

    ", + "SelfUserProfile$SshUsername": "

    The user's SSH user name.

    ", + "SelfUserProfile$SshPublicKey": "

    The user's SSH public key.

    ", + "ServiceError$ServiceErrorId": "

    The error ID.

    ", + "ServiceError$StackId": "

    The stack ID.

    ", + "ServiceError$InstanceId": "

    The instance ID.

    ", + "ServiceError$Type": "

    The error type.

    ", + "ServiceError$Message": "

    A message that describes the error.

    ", + "SetLoadBasedAutoScalingRequest$LayerId": "

    The layer ID.

    ", + "SetPermissionRequest$StackId": "

    The stack ID.

    ", + "SetPermissionRequest$IamUserArn": "

    The user's IAM ARN.

    ", + "SetPermissionRequest$Level": "

    The user's permission level, which must be set to one of the following strings. You cannot set your own permissions level.

    • deny
    • show
    • deploy
    • manage
    • iam_only

    For more information on the permissions associated with these levels, see Managing User Permissions.

    ", + "SetTimeBasedAutoScalingRequest$InstanceId": "

    The instance ID.

    ", + "Source$Url": "

    The source URL.

    ", + "Source$Username": "

    This parameter depends on the repository type.

    • For Amazon S3 bundles, set Username to the appropriate IAM access key ID.
    • For HTTP bundles, Git repositories, and Subversion repositories, set Username to the user name.
    ", + "Source$Password": "

    When included in a request, the parameter depends on the repository type.

    • For Amazon S3 bundles, set Password to the appropriate IAM secret access key.
    • For HTTP bundles and Subversion repositories, set Password to the password.

    For more information on how to safely handle IAM credentials, see .

    In responses, AWS OpsWorks returns *****FILTERED***** instead of the actual value.

    ", + "Source$SshKey": "

    In requests, the repository's SSH key.

    In responses, AWS OpsWorks returns *****FILTERED***** instead of the actual value.

    ", + "Source$Revision": "

    The application's version. AWS OpsWorks enables you to easily deploy new versions of an application. One of the simplest approaches is to have branches or revisions in your repository that represent different versions that can potentially be deployed.

    ", + "SslConfiguration$Certificate": "

    The contents of the certificate's domain.crt file.

    ", + "SslConfiguration$PrivateKey": "

    The private key; the contents of the certificate's domain.kex file.

    ", + "SslConfiguration$Chain": "

    Optional. Can be used to specify an intermediate certificate authority key or client authentication.

    ", + "Stack$StackId": "

    The stack ID.

    ", + "Stack$Name": "

    The stack name.

    ", + "Stack$Arn": "

    The stack's ARN.

    ", + "Stack$Region": "

    The stack AWS region, such as \"us-east-1\". For more information about AWS regions, see Regions and Endpoints.

    ", + "Stack$VpcId": "

    The VPC ID; applicable only if the stack is running in a VPC.

    ", + "Stack$ServiceRoleArn": "

    The stack AWS Identity and Access Management (IAM) role.

    ", + "Stack$DefaultInstanceProfileArn": "

    The ARN of an IAM profile that is the default profile for all of the stack's EC2 instances. For more information about IAM ARNs, see Using Identifiers.

    ", + "Stack$DefaultOs": "

    The stack's default operating system.

    ", + "Stack$HostnameTheme": "

    The stack host name theme, with spaces replaced by underscores.

    ", + "Stack$DefaultAvailabilityZone": "

    The stack's default Availability Zone. For more information, see Regions and Endpoints.

    ", + "Stack$DefaultSubnetId": "

    The default subnet ID; applicable only if the stack is running in a VPC.

    ", + "Stack$CustomJson": "

    A JSON object that contains user-defined attributes to be added to the stack configuration and deployment attributes. You can use custom JSON to override the corresponding default stack configuration attribute values or to pass data to recipes. The string should be in the following format and must escape characters such as '\"':

    \"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"

    For more information on custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes.

    ", + "Stack$DefaultSshKeyName": "

    A default Amazon EC2 key pair for the stack's instances. You can override this value when you create or update an instance.

    ", + "Stack$AgentVersion": "

    The agent version. This parameter is set to LATEST for auto-update. or a version number for a fixed agent version.

    ", + "StackAttributes$value": null, + "StackConfigurationManager$Name": "

    The name. This parameter must be set to \"Chef\".

    ", + "StackConfigurationManager$Version": "

    The Chef version. This parameter must be set to 12, 11.10, or 11.4 for Linux stacks, and to 12.2 for Windows stacks. The default value for Linux stacks is 11.4.

    ", + "StackSummary$StackId": "

    The stack ID.

    ", + "StackSummary$Name": "

    The stack name.

    ", + "StackSummary$Arn": "

    The stack's ARN.

    ", + "StartInstanceRequest$InstanceId": "

    The instance ID.

    ", + "StartStackRequest$StackId": "

    The stack ID.

    ", + "StopInstanceRequest$InstanceId": "

    The instance ID.

    ", + "StopStackRequest$StackId": "

    The stack ID.

    ", + "Strings$member": null, + "TemporaryCredential$Username": "

    The user name.

    ", + "TemporaryCredential$Password": "

    The password.

    ", + "TemporaryCredential$InstanceId": "

    The instance's AWS OpsWorks ID.

    ", + "TimeBasedAutoScalingConfiguration$InstanceId": "

    The instance ID.

    ", + "UnassignInstanceRequest$InstanceId": "

    The instance ID.

    ", + "UnassignVolumeRequest$VolumeId": "

    The volume ID.

    ", + "UpdateAppRequest$AppId": "

    The app ID.

    ", + "UpdateAppRequest$Name": "

    The app name.

    ", + "UpdateAppRequest$Description": "

    A description of the app.

    ", + "UpdateElasticIpRequest$ElasticIp": "

    The address.

    ", + "UpdateElasticIpRequest$Name": "

    The new name.

    ", + "UpdateInstanceRequest$InstanceId": "

    The instance ID.

    ", + "UpdateInstanceRequest$InstanceType": "

    The instance type, such as t2.micro. For a list of supported instance types, open the stack in the console, choose Instances, and choose + Instance. The Size list contains the currently supported types. For more information, see Instance Families and Types. The parameter values that you use to specify the various types are in the API Name column of the Available Instance Types table.

    ", + "UpdateInstanceRequest$Hostname": "

    The instance host name.

    ", + "UpdateInstanceRequest$Os": "

    The instance's operating system, which must be set to one of the following.

    • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2015.03, Red Hat Enterprise Linux 7, Ubuntu 12.04 LTS, or Ubuntu 14.04 LTS.
    • Microsoft Windows Server 2012 R2 Base.
    • A custom AMI: Custom.

    For more information on the supported operating systems, see AWS OpsWorks Operating Systems.

    The default option is the current Amazon Linux version. If you set this parameter to Custom, you must use the AmiId parameter to specify the custom AMI that you want to use. For more information on the supported operating systems, see Operating Systems. For more information on how to use custom AMIs with OpsWorks, see Using Custom AMIs.

    You can specify a different Linux operating system for the updated stack, but you cannot change from Linux to Windows or Windows to Linux.", + "UpdateInstanceRequest$AmiId": "

    A custom AMI ID to be used to create the instance. The AMI must be based on one of the supported operating systems. For more information, see Instances

    If you specify a custom AMI, you must set Os to Custom.", + "UpdateInstanceRequest$SshKeyName": "

    The instance's Amazon EC2 key name.

    ", + "UpdateInstanceRequest$AgentVersion": "

    The default AWS OpsWorks agent version. You have the following options:

    • INHERIT - Use the stack's default agent version setting.
    • version_number - Use the specified agent version. This value overrides the stack's default setting. To update the agent version, you must edit the instance configuration and specify a new version. AWS OpsWorks then automatically installs that version on the instance.

    The default setting is INHERIT. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions.

    ", + "UpdateLayerRequest$LayerId": "

    The layer ID.

    ", + "UpdateLayerRequest$Name": "

    The layer name, which is used by the console.

    ", + "UpdateLayerRequest$Shortname": "

    For custom layers only, use this parameter to specify the layer's short name, which is used internally by AWS OpsWorksand by Chef. The short name is also used as the name for the directory where your app files are installed. It can have a maximum of 200 characters and must be in the following format: /\\A[a-z0-9\\-\\_\\.]+\\Z/.

    The built-in layers' short names are defined by AWS OpsWorks. For more information, see the Layer Reference

    ", + "UpdateLayerRequest$CustomInstanceProfileArn": "

    The ARN of an IAM profile to be used for all of the layer's EC2 instances. For more information about IAM ARNs, see Using Identifiers.

    ", + "UpdateLayerRequest$CustomJson": "

    A JSON-formatted string containing custom stack configuration and deployment attributes to be installed on the layer's instances. For more information, see Using Custom JSON.

    ", + "UpdateMyUserProfileRequest$SshPublicKey": "

    The user's SSH public key.

    ", + "UpdateRdsDbInstanceRequest$RdsDbInstanceArn": "

    The Amazon RDS instance's ARN.

    ", + "UpdateRdsDbInstanceRequest$DbUser": "

    The master user name.

    ", + "UpdateRdsDbInstanceRequest$DbPassword": "

    The database password.

    ", + "UpdateStackRequest$StackId": "

    The stack ID.

    ", + "UpdateStackRequest$Name": "

    The stack's new name.

    ", + "UpdateStackRequest$ServiceRoleArn": "

    Do not use this parameter. You cannot update a stack's service role.

    ", + "UpdateStackRequest$DefaultInstanceProfileArn": "

    The ARN of an IAM profile that is the default profile for all of the stack's EC2 instances. For more information about IAM ARNs, see Using Identifiers.

    ", + "UpdateStackRequest$DefaultOs": "

    The stack's operating system, which must be set to one of the following:

    • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2015.03, Red Hat Enterprise Linux 7, Ubuntu 12.04 LTS, or Ubuntu 14.04 LTS.
    • Microsoft Windows Server 2012 R2 Base.
    • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information on how to use custom AMIs with OpsWorks, see Using Custom AMIs.

    The default option is the stack's current operating system. For more information on the supported operating systems, see AWS OpsWorks Operating Systems.

    ", + "UpdateStackRequest$HostnameTheme": "

    The stack's new host name theme, with spaces replaced by underscores. The theme is used to generate host names for the stack's instances. By default, HostnameTheme is set to Layer_Dependent, which creates host names by appending integers to the layer's short name. The other themes are:

    • Baked_Goods
    • Clouds
    • Europe_Cities
    • Fruits
    • Greek_Deities
    • Legendary_creatures_from_Japan
    • Planets_and_Moons
    • Roman_Deities
    • Scottish_Islands
    • US_Cities
    • Wild_Cats

    To obtain a generated host name, call GetHostNameSuggestion, which returns a host name based on the current theme.

    ", + "UpdateStackRequest$DefaultAvailabilityZone": "

    The stack's default Availability Zone, which must be in the stack's region. For more information, see Regions and Endpoints. If you also specify a value for DefaultSubnetId, the subnet must be in the same zone. For more information, see CreateStack.

    ", + "UpdateStackRequest$DefaultSubnetId": "

    The stack's default VPC subnet ID. This parameter is required if you specify a value for the VpcId parameter. All instances are launched into this subnet unless you specify otherwise when you create the instance. If you also specify a value for DefaultAvailabilityZone, the subnet must be in that zone. For information on default values and when this parameter is required, see the VpcId parameter description.

    ", + "UpdateStackRequest$CustomJson": "

    A string that contains user-defined, custom JSON. It can be used to override the corresponding default stack configuration JSON values or to pass data to recipes. The string should be in the following format and escape characters such as '\"':

    \"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"

    For more information on custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes.

    ", + "UpdateStackRequest$DefaultSshKeyName": "

    A default Amazon EC2 key-pair name. The default value is none. If you specify a key-pair name, AWS OpsWorks installs the public key on the instance and you can use the private key with an SSH client to log in to the instance. For more information, see Using SSH to Communicate with an Instance and Managing SSH Access. You can override this setting by specifying a different key pair, or no key pair, when you create an instance.

    ", + "UpdateStackRequest$AgentVersion": "

    The default AWS OpsWorks agent version. You have the following options:

    • Auto-update - Set this parameter to LATEST. AWS OpsWorks automatically installs new agent versions on the stack's instances as soon as they are available.
    • Fixed version - Set this parameter to your preferred agent version. To update the agent version, you must edit the stack configuration and specify a new version. AWS OpsWorks then automatically installs that version on the stack's instances.

    The default setting is LATEST. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions.

    You can also specify an agent version when you create or update an instance, which overrides the stack's default setting.", + "UpdateUserProfileRequest$IamUserArn": "

    The user IAM ARN.

    ", + "UpdateUserProfileRequest$SshUsername": "

    The user's SSH user name. The allowable characters are [a-z], [A-Z], [0-9], '-', and '_'. If the specified name includes other punctuation marks, AWS OpsWorks removes them. For example, my.name will be changed to myname. If you do not specify an SSH user name, AWS OpsWorks generates one from the IAM user name.

    ", + "UpdateUserProfileRequest$SshPublicKey": "

    The user's new SSH public key.

    ", + "UpdateVolumeRequest$VolumeId": "

    The volume ID.

    ", + "UpdateVolumeRequest$Name": "

    The new name.

    ", + "UpdateVolumeRequest$MountPoint": "

    The new mount point.

    ", + "UserProfile$IamUserArn": "

    The user's IAM ARN.

    ", + "UserProfile$Name": "

    The user's name.

    ", + "UserProfile$SshUsername": "

    The user's SSH user name.

    ", + "UserProfile$SshPublicKey": "

    The user's SSH public key.

    ", + "ValidationException$message": "

    The exception message.

    ", + "Volume$VolumeId": "

    The volume ID.

    ", + "Volume$Ec2VolumeId": "

    The Amazon EC2 volume ID.

    ", + "Volume$Name": "

    The volume name.

    ", + "Volume$RaidArrayId": "

    The RAID array ID.

    ", + "Volume$InstanceId": "

    The instance ID.

    ", + "Volume$Status": "

    The value returned by DescribeVolumes.

    ", + "Volume$Device": "

    The device name.

    ", + "Volume$MountPoint": "

    The volume mount point. For example \"/dev/sdh\".

    ", + "Volume$Region": "

    The AWS region. For more information about AWS regions, see Regions and Endpoints.

    ", + "Volume$AvailabilityZone": "

    The volume Availability Zone. For more information, see Regions and Endpoints.

    ", + "Volume$VolumeType": "

    The volume type, standard or PIOPS.

    ", + "VolumeConfiguration$MountPoint": "

    The volume mount point. For example \"/dev/sdh\".

    ", + "VolumeConfiguration$VolumeType": "

    The volume type:

    • standard - Magnetic
    • io1 - Provisioned IOPS (SSD)
    • gp2 - General Purpose (SSD)
    " + } + }, + "Strings": { + "base": null, + "refs": { + "App$Domains": "

    The app vhost settings with multiple domains separated by commas. For example: 'www.example.com, example.com'

    ", + "AssignInstanceRequest$LayerIds": "

    The layer ID, which must correspond to a custom layer. You cannot assign a registered instance to a built-in layer.

    ", + "AutoScalingThresholds$Alarms": "

    Custom Cloudwatch auto scaling alarms, to be used as thresholds. This parameter takes a list of up to five alarm names, which are case sensitive and must be in the same region as the stack.

    To use custom alarms, you must update your service role to allow cloudwatch:DescribeAlarms. You can either have AWS OpsWorks update the role for you when you first use this feature or you can edit the role manually. For more information, see Allowing AWS OpsWorks to Act on Your Behalf.", + "CloneStackRequest$CloneAppIds": "

    A list of source stack app IDs to be included in the cloned stack.

    ", + "CreateAppRequest$Domains": "

    The app virtual host settings, with multiple domains separated by commas. For example: 'www.example.com, example.com'

    ", + "CreateDeploymentRequest$InstanceIds": "

    The instance IDs for the deployment targets.

    ", + "CreateInstanceRequest$LayerIds": "

    An array that contains the instance's layer IDs.

    ", + "CreateLayerRequest$CustomSecurityGroupIds": "

    An array containing the layer custom security group IDs.

    ", + "CreateLayerRequest$Packages": "

    An array of Package objects that describes the layer packages.

    ", + "Deployment$InstanceIds": "

    The IDs of the target instances.

    ", + "DeploymentCommandArgs$value": null, + "DescribeAppsRequest$AppIds": "

    An array of app IDs for the apps to be described. If you use this parameter, DescribeApps returns a description of the specified apps. Otherwise, it returns a description of every app.

    ", + "DescribeCommandsRequest$CommandIds": "

    An array of command IDs. If you include this parameter, DescribeCommands returns a description of the specified commands. Otherwise, it returns a description of every command.

    ", + "DescribeDeploymentsRequest$DeploymentIds": "

    An array of deployment IDs to be described. If you include this parameter, DescribeDeployments returns a description of the specified deployments. Otherwise, it returns a description of every deployment.

    ", + "DescribeEcsClustersRequest$EcsClusterArns": "

    A list of ARNs, one for each cluster to be described.

    ", + "DescribeElasticIpsRequest$Ips": "

    An array of Elastic IP addresses to be described. If you include this parameter, DescribeElasticIps returns a description of the specified Elastic IP addresses. Otherwise, it returns a description of every Elastic IP address.

    ", + "DescribeElasticLoadBalancersRequest$LayerIds": "

    A list of layer IDs. The action describes the Elastic Load Balancing instances for the specified layers.

    ", + "DescribeInstancesRequest$InstanceIds": "

    An array of instance IDs to be described. If you use this parameter, DescribeInstances returns a description of the specified instances. Otherwise, it returns a description of every instance.

    ", + "DescribeLayersRequest$LayerIds": "

    An array of layer IDs that specify the layers to be described. If you omit this parameter, DescribeLayers returns a description of every layer in the specified stack.

    ", + "DescribeLoadBasedAutoScalingRequest$LayerIds": "

    An array of layer IDs.

    ", + "DescribeRaidArraysRequest$RaidArrayIds": "

    An array of RAID array IDs. If you use this parameter, DescribeRaidArrays returns descriptions of the specified arrays. Otherwise, it returns a description of every array.

    ", + "DescribeRdsDbInstancesRequest$RdsDbInstanceArns": "

    An array containing the ARNs of the instances to be described.

    ", + "DescribeServiceErrorsRequest$ServiceErrorIds": "

    An array of service error IDs. If you use this parameter, DescribeServiceErrors returns descriptions of the specified errors. Otherwise, it returns a description of every error.

    ", + "DescribeStacksRequest$StackIds": "

    An array of stack IDs that specify the stacks to be described. If you omit this parameter, DescribeStacks returns a description of every stack.

    ", + "DescribeTimeBasedAutoScalingRequest$InstanceIds": "

    An array of instance IDs.

    ", + "DescribeUserProfilesRequest$IamUserArns": "

    An array of IAM user ARNs that identify the users to be described.

    ", + "DescribeVolumesRequest$VolumeIds": "

    Am array of volume IDs. If you use this parameter, DescribeVolumes returns descriptions of the specified volumes. Otherwise, it returns a description of every volume.

    ", + "ElasticLoadBalancer$AvailabilityZones": "

    A list of Availability Zones.

    ", + "ElasticLoadBalancer$SubnetIds": "

    A list of subnet IDs, if the stack is running in a VPC.

    ", + "ElasticLoadBalancer$Ec2InstanceIds": "

    A list of the EC2 instances that the Elastic Load Balancing instance is managing traffic for.

    ", + "Instance$LayerIds": "

    An array containing the instance layer IDs.

    ", + "Instance$SecurityGroupIds": "

    An array containing the instance security group IDs.

    ", + "Layer$CustomSecurityGroupIds": "

    An array containing the layer's custom security group IDs.

    ", + "Layer$DefaultSecurityGroupNames": "

    An array containing the layer's security group names.

    ", + "Layer$Packages": "

    An array of Package objects that describe the layer's packages.

    ", + "Recipes$Setup": "

    An array of custom recipe names to be run following a setup event.

    ", + "Recipes$Configure": "

    An array of custom recipe names to be run following a configure event.

    ", + "Recipes$Deploy": "

    An array of custom recipe names to be run following a deploy event.

    ", + "Recipes$Undeploy": "

    An array of custom recipe names to be run following a undeploy event.

    ", + "Recipes$Shutdown": "

    An array of custom recipe names to be run following a shutdown event.

    ", + "UpdateAppRequest$Domains": "

    The app's virtual host settings, with multiple domains separated by commas. For example: 'www.example.com, example.com'

    ", + "UpdateInstanceRequest$LayerIds": "

    The instance's layer IDs.

    ", + "UpdateLayerRequest$CustomSecurityGroupIds": "

    An array containing the layer's custom security group IDs.

    ", + "UpdateLayerRequest$Packages": "

    An array of Package objects that describe the layer's packages.

    " + } + }, + "Switch": { + "base": null, + "refs": { + "DailyAutoScalingSchedule$value": null + } + }, + "TemporaryCredential": { + "base": "

    Contains the data needed by RDP clients such as the Microsoft Remote Desktop Connection to log in to the instance.

    ", + "refs": { + "GrantAccessResult$TemporaryCredential": "

    A TemporaryCredential object that contains the data needed to log in to the instance by RDP clients, such as the Microsoft Remote Desktop Connection.

    " + } + }, + "TimeBasedAutoScalingConfiguration": { + "base": "

    Describes an instance's time-based auto scaling configuration.

    ", + "refs": { + "TimeBasedAutoScalingConfigurations$member": null + } + }, + "TimeBasedAutoScalingConfigurations": { + "base": null, + "refs": { + "DescribeTimeBasedAutoScalingResult$TimeBasedAutoScalingConfigurations": "

    An array of TimeBasedAutoScalingConfiguration objects that describe the configuration for the specified instances.

    " + } + }, + "UnassignInstanceRequest": { + "base": null, + "refs": { + } + }, + "UnassignVolumeRequest": { + "base": null, + "refs": { + } + }, + "UpdateAppRequest": { + "base": null, + "refs": { + } + }, + "UpdateElasticIpRequest": { + "base": null, + "refs": { + } + }, + "UpdateInstanceRequest": { + "base": null, + "refs": { + } + }, + "UpdateLayerRequest": { + "base": null, + "refs": { + } + }, + "UpdateMyUserProfileRequest": { + "base": null, + "refs": { + } + }, + "UpdateRdsDbInstanceRequest": { + "base": null, + "refs": { + } + }, + "UpdateStackRequest": { + "base": null, + "refs": { + } + }, + "UpdateUserProfileRequest": { + "base": null, + "refs": { + } + }, + "UpdateVolumeRequest": { + "base": null, + "refs": { + } + }, + "UserProfile": { + "base": "

    Describes a user's SSH information.

    ", + "refs": { + "UserProfiles$member": null + } + }, + "UserProfiles": { + "base": null, + "refs": { + "DescribeUserProfilesResult$UserProfiles": "

    A Users object that describes the specified users.

    " + } + }, + "ValidForInMinutes": { + "base": null, + "refs": { + "GrantAccessRequest$ValidForInMinutes": "

    The length of time (in minutes) that the grant is valid. When the grant expires at the end of this period, the user will no longer be able to use the credentials to log in. If the user is logged in at the time, he or she automatically will be logged out.

    " + } + }, + "ValidationException": { + "base": "

    Indicates that a request was invalid.

    ", + "refs": { + } + }, + "VirtualizationType": { + "base": null, + "refs": { + "Instance$VirtualizationType": "

    The instance's virtualization type: paravirtual or hvm.

    " + } + }, + "Volume": { + "base": "

    Describes an instance's Amazon EBS volume.

    ", + "refs": { + "Volumes$member": null + } + }, + "VolumeConfiguration": { + "base": "

    Describes an Amazon EBS volume configuration.

    ", + "refs": { + "VolumeConfigurations$member": null + } + }, + "VolumeConfigurations": { + "base": null, + "refs": { + "CreateLayerRequest$VolumeConfigurations": "

    A VolumeConfigurations object that describes the layer's Amazon EBS volumes.

    ", + "Layer$VolumeConfigurations": "

    A VolumeConfigurations object that describes the layer's Amazon EBS volumes.

    ", + "UpdateLayerRequest$VolumeConfigurations": "

    A VolumeConfigurations object that describes the layer's Amazon EBS volumes.

    " + } + }, + "VolumeType": { + "base": null, + "refs": { + "EbsBlockDevice$VolumeType": "

    The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic volumes.

    " + } + }, + "Volumes": { + "base": null, + "refs": { + "DescribeVolumesResult$Volumes": "

    An array of volume IDs.

    " + } + }, + "WeeklyAutoScalingSchedule": { + "base": "

    Describes a time-based instance's auto scaling schedule. The schedule consists of a set of key-value pairs.

    • The key is the time period (a UTC hour) and must be an integer from 0 - 23.
    • The value indicates whether the instance should be online or offline for the specified period, and must be set to \"on\" or \"off\"

    The default setting for all time periods is off, so you use the following parameters primarily to specify the online periods. You don't have to explicitly specify offline periods unless you want to change an online period to an offline period.

    The following example specifies that the instance should be online for four hours, from UTC 1200 - 1600. It will be off for the remainder of the day.

    { \"12\":\"on\", \"13\":\"on\", \"14\":\"on\", \"15\":\"on\" }

    ", + "refs": { + "SetTimeBasedAutoScalingRequest$AutoScalingSchedule": "

    An AutoScalingSchedule with the instance schedule.

    ", + "TimeBasedAutoScalingConfiguration$AutoScalingSchedule": "

    A WeeklyAutoScalingSchedule object with the instance schedule.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,55 @@ +{ + "pagination": { + "DescribeApps": { + "result_key": "Apps" + }, + "DescribeCommands": { + "result_key": "Commands" + }, + "DescribeDeployments": { + "result_key": "Deployments" + }, + "DescribeEcsClusters": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "EcsClusters" + }, + "DescribeElasticIps": { + "result_key": "ElasticIps" + }, + "DescribeElasticLoadBalancers": { + "result_key": "ElasticLoadBalancers" + }, + "DescribeInstances": { + "result_key": "Instances" + }, + "DescribeLayers": { + "result_key": "Layers" + }, + "DescribeLoadBasedAutoScaling": { + "result_key": "LoadBasedAutoScalingConfigurations" + }, + "DescribePermissions": { + "result_key": "Permissions" + }, + "DescribeRaidArrays": { + "result_key": "RaidArrays" + }, + "DescribeServiceErrors": { + "result_key": "ServiceErrors" + }, + "DescribeStacks": { + "result_key": "Stacks" + }, + "DescribeTimeBasedAutoScaling": { + "result_key": "TimeBasedAutoScalingConfigurations" + }, + "DescribeUserProfiles": { + "result_key": "UserProfiles" + }, + "DescribeVolumes": { + "result_key": "Volumes" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/waiters-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/waiters-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/waiters-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/opsworks/2013-02-18/waiters-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,239 @@ +{ + "version": 2, + "waiters": { + "AppExists": { + "delay": 1, + "operation": "DescribeApps", + "maxAttempts": 40, + "acceptors": [ + { + "expected": 200, + "matcher": "status", + "state": "success" + }, + { + "matcher": "status", + "expected": 400, + "state": "failure" + } + ] + }, + "DeploymentSuccessful": { + "delay": 15, + "operation": "DescribeDeployments", + "maxAttempts": 40, + "description": "Wait until a deployment has completed successfully", + "acceptors": [ + { + "expected": "successful", + "matcher": "pathAll", + "state": "success", + "argument": "Deployments[].Status" + }, + { + "expected": "failed", + "matcher": "pathAny", + "state": "failure", + "argument": "Deployments[].Status" + } + ] + }, + "InstanceOnline": { + "delay": 15, + "operation": "DescribeInstances", + "maxAttempts": 40, + "description": "Wait until OpsWorks instance is online.", + "acceptors": [ + { + "expected": "online", + "matcher": "pathAll", + "state": "success", + "argument": "Instances[].Status" + }, + { + "expected": "setup_failed", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "shutting_down", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "start_failed", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "stopped", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "stopping", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "terminating", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "terminated", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "stop_failed", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + } + ] + }, + "InstanceStopped": { + "delay": 15, + "operation": "DescribeInstances", + "maxAttempts": 40, + "description": "Wait until OpsWorks instance is stopped.", + "acceptors": [ + { + "expected": "stopped", + "matcher": "pathAll", + "state": "success", + "argument": "Instances[].Status" + }, + { + "expected": "booting", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "online", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "pending", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "rebooting", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "requested", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "running_setup", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "setup_failed", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "start_failed", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "stop_failed", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + } + ] + }, + "InstanceTerminated": { + "delay": 15, + "operation": "DescribeInstances", + "maxAttempts": 40, + "description": "Wait until OpsWorks instance is terminated.", + "acceptors": [ + { + "expected": "terminated", + "matcher": "pathAll", + "state": "success", + "argument": "Instances[].Status" + }, + { + "expected": "ResourceNotFoundException", + "matcher": "error", + "state": "success" + }, + { + "expected": "booting", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "online", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "pending", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "rebooting", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "requested", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "running_setup", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "setup_failed", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + }, + { + "expected": "start_failed", + "matcher": "pathAny", + "state": "failure", + "argument": "Instances[].Status" + } + ] + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2901 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2013-01-10", + "endpointPrefix":"rds", + "protocol":"query", + "serviceAbbreviation":"Amazon RDS", + "serviceFullName":"Amazon Relational Database Service", + "signatureVersion":"v4", + "xmlNamespace":"http://rds.amazonaws.com/doc/2013-01-10/" + }, + "operations":{ + "AddSourceIdentifierToSubscription":{ + "name":"AddSourceIdentifierToSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddSourceIdentifierToSubscriptionMessage"}, + "output":{ + "shape":"AddSourceIdentifierToSubscriptionResult", + "resultWrapper":"AddSourceIdentifierToSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "AddTagsToResource":{ + "name":"AddTagsToResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsToResourceMessage"}, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "AuthorizeDBSecurityGroupIngress":{ + "name":"AuthorizeDBSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AuthorizeDBSecurityGroupIngressMessage"}, + "output":{ + "shape":"AuthorizeDBSecurityGroupIngressResult", + "resultWrapper":"AuthorizeDBSecurityGroupIngressResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"AuthorizationAlreadyExistsFault"}, + {"shape":"AuthorizationQuotaExceededFault"} + ] + }, + "CopyDBSnapshot":{ + "name":"CopyDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyDBSnapshotMessage"}, + "output":{ + "shape":"CopyDBSnapshotResult", + "resultWrapper":"CopyDBSnapshotResult" + }, + "errors":[ + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "CreateDBInstance":{ + "name":"CreateDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBInstanceMessage"}, + "output":{ + "shape":"CreateDBInstanceResult", + "resultWrapper":"CreateDBInstanceResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "CreateDBInstanceReadReplica":{ + "name":"CreateDBInstanceReadReplica", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBInstanceReadReplicaMessage"}, + "output":{ + "shape":"CreateDBInstanceReadReplicaResult", + "resultWrapper":"CreateDBInstanceReadReplicaResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "CreateDBParameterGroup":{ + "name":"CreateDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBParameterGroupMessage"}, + "output":{ + "shape":"CreateDBParameterGroupResult", + "resultWrapper":"CreateDBParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupQuotaExceededFault"}, + {"shape":"DBParameterGroupAlreadyExistsFault"} + ] + }, + "CreateDBSecurityGroup":{ + "name":"CreateDBSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSecurityGroupMessage"}, + "output":{ + "shape":"CreateDBSecurityGroupResult", + "resultWrapper":"CreateDBSecurityGroupResult" + }, + "errors":[ + {"shape":"DBSecurityGroupAlreadyExistsFault"}, + {"shape":"DBSecurityGroupQuotaExceededFault"}, + {"shape":"DBSecurityGroupNotSupportedFault"} + ] + }, + "CreateDBSnapshot":{ + "name":"CreateDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSnapshotMessage"}, + "output":{ + "shape":"CreateDBSnapshotResult", + "resultWrapper":"CreateDBSnapshotResult" + }, + "errors":[ + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "CreateDBSubnetGroup":{ + "name":"CreateDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSubnetGroupMessage"}, + "output":{ + "shape":"CreateDBSubnetGroupResult", + "resultWrapper":"CreateDBSubnetGroupResult" + }, + "errors":[ + {"shape":"DBSubnetGroupAlreadyExistsFault"}, + {"shape":"DBSubnetGroupQuotaExceededFault"}, + {"shape":"DBSubnetQuotaExceededFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"} + ] + }, + "CreateEventSubscription":{ + "name":"CreateEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateEventSubscriptionMessage"}, + "output":{ + "shape":"CreateEventSubscriptionResult", + "resultWrapper":"CreateEventSubscriptionResult" + }, + "errors":[ + {"shape":"EventSubscriptionQuotaExceededFault"}, + {"shape":"SubscriptionAlreadyExistFault"}, + {"shape":"SNSInvalidTopicFault"}, + {"shape":"SNSNoAuthorizationFault"}, + {"shape":"SNSTopicArnNotFoundFault"}, + {"shape":"SubscriptionCategoryNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "CreateOptionGroup":{ + "name":"CreateOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateOptionGroupMessage"}, + "output":{ + "shape":"CreateOptionGroupResult", + "resultWrapper":"CreateOptionGroupResult" + }, + "errors":[ + {"shape":"OptionGroupAlreadyExistsFault"}, + {"shape":"OptionGroupQuotaExceededFault"} + ] + }, + "DeleteDBInstance":{ + "name":"DeleteDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBInstanceMessage"}, + "output":{ + "shape":"DeleteDBInstanceResult", + "resultWrapper":"DeleteDBInstanceResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "DeleteDBParameterGroup":{ + "name":"DeleteDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBParameterGroupMessage"}, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DeleteDBSecurityGroup":{ + "name":"DeleteDBSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSecurityGroupMessage"}, + "errors":[ + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"DBSecurityGroupNotFoundFault"} + ] + }, + "DeleteDBSnapshot":{ + "name":"DeleteDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSnapshotMessage"}, + "output":{ + "shape":"DeleteDBSnapshotResult", + "resultWrapper":"DeleteDBSnapshotResult" + }, + "errors":[ + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "DeleteDBSubnetGroup":{ + "name":"DeleteDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSubnetGroupMessage"}, + "errors":[ + {"shape":"InvalidDBSubnetGroupStateFault"}, + {"shape":"InvalidDBSubnetStateFault"}, + {"shape":"DBSubnetGroupNotFoundFault"} + ] + }, + "DeleteEventSubscription":{ + "name":"DeleteEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEventSubscriptionMessage"}, + "output":{ + "shape":"DeleteEventSubscriptionResult", + "resultWrapper":"DeleteEventSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"InvalidEventSubscriptionStateFault"} + ] + }, + "DeleteOptionGroup":{ + "name":"DeleteOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteOptionGroupMessage"}, + "errors":[ + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"InvalidOptionGroupStateFault"} + ] + }, + "DescribeDBEngineVersions":{ + "name":"DescribeDBEngineVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBEngineVersionsMessage"}, + "output":{ + "shape":"DBEngineVersionMessage", + "resultWrapper":"DescribeDBEngineVersionsResult" + } + }, + "DescribeDBInstances":{ + "name":"DescribeDBInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBInstancesMessage"}, + "output":{ + "shape":"DBInstanceMessage", + "resultWrapper":"DescribeDBInstancesResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "DescribeDBParameterGroups":{ + "name":"DescribeDBParameterGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBParameterGroupsMessage"}, + "output":{ + "shape":"DBParameterGroupsMessage", + "resultWrapper":"DescribeDBParameterGroupsResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DescribeDBParameters":{ + "name":"DescribeDBParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBParametersMessage"}, + "output":{ + "shape":"DBParameterGroupDetails", + "resultWrapper":"DescribeDBParametersResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DescribeDBSecurityGroups":{ + "name":"DescribeDBSecurityGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSecurityGroupsMessage"}, + "output":{ + "shape":"DBSecurityGroupMessage", + "resultWrapper":"DescribeDBSecurityGroupsResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"} + ] + }, + "DescribeDBSnapshots":{ + "name":"DescribeDBSnapshots", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSnapshotsMessage"}, + "output":{ + "shape":"DBSnapshotMessage", + "resultWrapper":"DescribeDBSnapshotsResult" + }, + "errors":[ + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "DescribeDBSubnetGroups":{ + "name":"DescribeDBSubnetGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSubnetGroupsMessage"}, + "output":{ + "shape":"DBSubnetGroupMessage", + "resultWrapper":"DescribeDBSubnetGroupsResult" + }, + "errors":[ + {"shape":"DBSubnetGroupNotFoundFault"} + ] + }, + "DescribeEngineDefaultParameters":{ + "name":"DescribeEngineDefaultParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEngineDefaultParametersMessage"}, + "output":{ + "shape":"DescribeEngineDefaultParametersResult", + "resultWrapper":"DescribeEngineDefaultParametersResult" + } + }, + "DescribeEventCategories":{ + "name":"DescribeEventCategories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventCategoriesMessage"}, + "output":{ + "shape":"EventCategoriesMessage", + "resultWrapper":"DescribeEventCategoriesResult" + } + }, + "DescribeEventSubscriptions":{ + "name":"DescribeEventSubscriptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventSubscriptionsMessage"}, + "output":{ + "shape":"EventSubscriptionsMessage", + "resultWrapper":"DescribeEventSubscriptionsResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"} + ] + }, + "DescribeEvents":{ + "name":"DescribeEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventsMessage"}, + "output":{ + "shape":"EventsMessage", + "resultWrapper":"DescribeEventsResult" + } + }, + "DescribeOptionGroupOptions":{ + "name":"DescribeOptionGroupOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOptionGroupOptionsMessage"}, + "output":{ + "shape":"OptionGroupOptionsMessage", + "resultWrapper":"DescribeOptionGroupOptionsResult" + } + }, + "DescribeOptionGroups":{ + "name":"DescribeOptionGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOptionGroupsMessage"}, + "output":{ + "shape":"OptionGroups", + "resultWrapper":"DescribeOptionGroupsResult" + }, + "errors":[ + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "DescribeOrderableDBInstanceOptions":{ + "name":"DescribeOrderableDBInstanceOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOrderableDBInstanceOptionsMessage"}, + "output":{ + "shape":"OrderableDBInstanceOptionsMessage", + "resultWrapper":"DescribeOrderableDBInstanceOptionsResult" + } + }, + "DescribeReservedDBInstances":{ + "name":"DescribeReservedDBInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedDBInstancesMessage"}, + "output":{ + "shape":"ReservedDBInstanceMessage", + "resultWrapper":"DescribeReservedDBInstancesResult" + }, + "errors":[ + {"shape":"ReservedDBInstanceNotFoundFault"} + ] + }, + "DescribeReservedDBInstancesOfferings":{ + "name":"DescribeReservedDBInstancesOfferings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedDBInstancesOfferingsMessage"}, + "output":{ + "shape":"ReservedDBInstancesOfferingMessage", + "resultWrapper":"DescribeReservedDBInstancesOfferingsResult" + }, + "errors":[ + {"shape":"ReservedDBInstancesOfferingNotFoundFault"} + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceMessage"}, + "output":{ + "shape":"TagListMessage", + "resultWrapper":"ListTagsForResourceResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "ModifyDBInstance":{ + "name":"ModifyDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBInstanceMessage"}, + "output":{ + "shape":"ModifyDBInstanceResult", + "resultWrapper":"ModifyDBInstanceResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"DBUpgradeDependencyFailureFault"} + ] + }, + "ModifyDBParameterGroup":{ + "name":"ModifyDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBParameterGroupMessage"}, + "output":{ + "shape":"DBParameterGroupNameMessage", + "resultWrapper":"ModifyDBParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"InvalidDBParameterGroupStateFault"} + ] + }, + "ModifyDBSubnetGroup":{ + "name":"ModifyDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBSubnetGroupMessage"}, + "output":{ + "shape":"ModifyDBSubnetGroupResult", + "resultWrapper":"ModifyDBSubnetGroupResult" + }, + "errors":[ + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetQuotaExceededFault"}, + {"shape":"SubnetAlreadyInUse"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"} + ] + }, + "ModifyEventSubscription":{ + "name":"ModifyEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyEventSubscriptionMessage"}, + "output":{ + "shape":"ModifyEventSubscriptionResult", + "resultWrapper":"ModifyEventSubscriptionResult" + }, + "errors":[ + {"shape":"EventSubscriptionQuotaExceededFault"}, + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SNSInvalidTopicFault"}, + {"shape":"SNSNoAuthorizationFault"}, + {"shape":"SNSTopicArnNotFoundFault"}, + {"shape":"SubscriptionCategoryNotFoundFault"} + ] + }, + "ModifyOptionGroup":{ + "name":"ModifyOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyOptionGroupMessage"}, + "output":{ + "shape":"ModifyOptionGroupResult", + "resultWrapper":"ModifyOptionGroupResult" + }, + "errors":[ + {"shape":"InvalidOptionGroupStateFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "PromoteReadReplica":{ + "name":"PromoteReadReplica", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PromoteReadReplicaMessage"}, + "output":{ + "shape":"PromoteReadReplicaResult", + "resultWrapper":"PromoteReadReplicaResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "PurchaseReservedDBInstancesOffering":{ + "name":"PurchaseReservedDBInstancesOffering", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PurchaseReservedDBInstancesOfferingMessage"}, + "output":{ + "shape":"PurchaseReservedDBInstancesOfferingResult", + "resultWrapper":"PurchaseReservedDBInstancesOfferingResult" + }, + "errors":[ + {"shape":"ReservedDBInstancesOfferingNotFoundFault"}, + {"shape":"ReservedDBInstanceAlreadyExistsFault"}, + {"shape":"ReservedDBInstanceQuotaExceededFault"} + ] + }, + "RebootDBInstance":{ + "name":"RebootDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootDBInstanceMessage"}, + "output":{ + "shape":"RebootDBInstanceResult", + "resultWrapper":"RebootDBInstanceResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "RemoveSourceIdentifierFromSubscription":{ + "name":"RemoveSourceIdentifierFromSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveSourceIdentifierFromSubscriptionMessage"}, + "output":{ + "shape":"RemoveSourceIdentifierFromSubscriptionResult", + "resultWrapper":"RemoveSourceIdentifierFromSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "RemoveTagsFromResource":{ + "name":"RemoveTagsFromResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsFromResourceMessage"}, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "ResetDBParameterGroup":{ + "name":"ResetDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetDBParameterGroupMessage"}, + "output":{ + "shape":"DBParameterGroupNameMessage", + "resultWrapper":"ResetDBParameterGroupResult" + }, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "RestoreDBInstanceFromDBSnapshot":{ + "name":"RestoreDBInstanceFromDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreDBInstanceFromDBSnapshotMessage"}, + "output":{ + "shape":"RestoreDBInstanceFromDBSnapshotResult", + "resultWrapper":"RestoreDBInstanceFromDBSnapshotResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "RestoreDBInstanceToPointInTime":{ + "name":"RestoreDBInstanceToPointInTime", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreDBInstanceToPointInTimeMessage"}, + "output":{ + "shape":"RestoreDBInstanceToPointInTimeResult", + "resultWrapper":"RestoreDBInstanceToPointInTimeResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"PointInTimeRestoreNotEnabledFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "RevokeDBSecurityGroupIngress":{ + "name":"RevokeDBSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeDBSecurityGroupIngressMessage"}, + "output":{ + "shape":"RevokeDBSecurityGroupIngressResult", + "resultWrapper":"RevokeDBSecurityGroupIngressResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"AuthorizationNotFoundFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"} + ] + } + }, + "shapes":{ + "AddSourceIdentifierToSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SourceIdentifier" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SourceIdentifier":{"shape":"String"} + } + }, + "AddSourceIdentifierToSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "AddTagsToResourceMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "Tags" + ], + "members":{ + "ResourceName":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "ApplyMethod":{ + "type":"string", + "enum":[ + "immediate", + "pending-reboot" + ] + }, + "AuthorizationAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AuthorizationNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "AuthorizationQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AuthorizeDBSecurityGroupIngressMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "CIDRIP":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "AuthorizeDBSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "AvailabilityZone":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "ProvisionedIopsCapable":{"shape":"Boolean"} + }, + "wrapper":true + }, + "AvailabilityZoneList":{ + "type":"list", + "member":{ + "shape":"AvailabilityZone", + "locationName":"AvailabilityZone" + } + }, + "Boolean":{"type":"boolean"}, + "BooleanOptional":{"type":"boolean"}, + "CharacterSet":{ + "type":"structure", + "members":{ + "CharacterSetName":{"shape":"String"}, + "CharacterSetDescription":{"shape":"String"} + } + }, + "CopyDBSnapshotMessage":{ + "type":"structure", + "required":[ + "SourceDBSnapshotIdentifier", + "TargetDBSnapshotIdentifier" + ], + "members":{ + "SourceDBSnapshotIdentifier":{"shape":"String"}, + "TargetDBSnapshotIdentifier":{"shape":"String"} + } + }, + "CopyDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "CreateDBInstanceMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "AllocatedStorage", + "DBInstanceClass", + "Engine", + "MasterUsername", + "MasterUserPassword" + ], + "members":{ + "DBName":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "DBInstanceClass":{"shape":"String"}, + "Engine":{"shape":"String"}, + "MasterUsername":{"shape":"String"}, + "MasterUserPassword":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "DBParameterGroupName":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "CharacterSetName":{"shape":"String"}, + "PubliclyAccessible":{"shape":"BooleanOptional"} + } + }, + "CreateDBInstanceReadReplicaMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "SourceDBInstanceIdentifier" + ], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "SourceDBInstanceIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "PubliclyAccessible":{"shape":"BooleanOptional"} + } + }, + "CreateDBInstanceReadReplicaResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "CreateDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "CreateDBParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBParameterGroupName", + "DBParameterGroupFamily", + "Description" + ], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"} + } + }, + "CreateDBParameterGroupResult":{ + "type":"structure", + "members":{ + "DBParameterGroup":{"shape":"DBParameterGroup"} + } + }, + "CreateDBSecurityGroupMessage":{ + "type":"structure", + "required":[ + "DBSecurityGroupName", + "DBSecurityGroupDescription" + ], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "DBSecurityGroupDescription":{"shape":"String"} + } + }, + "CreateDBSecurityGroupResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "CreateDBSnapshotMessage":{ + "type":"structure", + "required":[ + "DBSnapshotIdentifier", + "DBInstanceIdentifier" + ], + "members":{ + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"} + } + }, + "CreateDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "CreateDBSubnetGroupMessage":{ + "type":"structure", + "required":[ + "DBSubnetGroupName", + "DBSubnetGroupDescription", + "SubnetIds" + ], + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"} + } + }, + "CreateDBSubnetGroupResult":{ + "type":"structure", + "members":{ + "DBSubnetGroup":{"shape":"DBSubnetGroup"} + } + }, + "CreateEventSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SnsTopicArn" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "SourceIds":{"shape":"SourceIdsList"}, + "Enabled":{"shape":"BooleanOptional"} + } + }, + "CreateEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "CreateOptionGroupMessage":{ + "type":"structure", + "required":[ + "OptionGroupName", + "EngineName", + "MajorEngineVersion", + "OptionGroupDescription" + ], + "members":{ + "OptionGroupName":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "OptionGroupDescription":{"shape":"String"} + } + }, + "CreateOptionGroupResult":{ + "type":"structure", + "members":{ + "OptionGroup":{"shape":"OptionGroup"} + } + }, + "DBEngineVersion":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "DBEngineDescription":{"shape":"String"}, + "DBEngineVersionDescription":{"shape":"String"}, + "DefaultCharacterSet":{"shape":"CharacterSet"}, + "SupportedCharacterSets":{"shape":"SupportedCharacterSetsList"} + } + }, + "DBEngineVersionList":{ + "type":"list", + "member":{ + "shape":"DBEngineVersion", + "locationName":"DBEngineVersion" + } + }, + "DBEngineVersionMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBEngineVersions":{"shape":"DBEngineVersionList"} + } + }, + "DBInstance":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Engine":{"shape":"String"}, + "DBInstanceStatus":{"shape":"String"}, + "MasterUsername":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Endpoint":{"shape":"Endpoint"}, + "AllocatedStorage":{"shape":"Integer"}, + "InstanceCreateTime":{"shape":"TStamp"}, + "PreferredBackupWindow":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"Integer"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupMembershipList"}, + "VpcSecurityGroups":{"shape":"VpcSecurityGroupMembershipList"}, + "DBParameterGroups":{"shape":"DBParameterGroupStatusList"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroup":{"shape":"DBSubnetGroup"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "PendingModifiedValues":{"shape":"PendingModifiedValues"}, + "LatestRestorableTime":{"shape":"TStamp"}, + "MultiAZ":{"shape":"Boolean"}, + "EngineVersion":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"Boolean"}, + "ReadReplicaSourceDBInstanceIdentifier":{"shape":"String"}, + "ReadReplicaDBInstanceIdentifiers":{"shape":"ReadReplicaDBInstanceIdentifierList"}, + "LicenseModel":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupMembership":{"shape":"OptionGroupMembership"}, + "CharacterSetName":{"shape":"String"}, + "SecondaryAvailabilityZone":{"shape":"String"}, + "PubliclyAccessible":{"shape":"Boolean"} + }, + "wrapper":true + }, + "DBInstanceAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBInstanceAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBInstanceList":{ + "type":"list", + "member":{ + "shape":"DBInstance", + "locationName":"DBInstance" + } + }, + "DBInstanceMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBInstances":{"shape":"DBInstanceList"} + } + }, + "DBInstanceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBInstanceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroup":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"} + }, + "wrapper":true + }, + "DBParameterGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupDetails":{ + "type":"structure", + "members":{ + "Parameters":{"shape":"ParametersList"}, + "Marker":{"shape":"String"} + } + }, + "DBParameterGroupList":{ + "type":"list", + "member":{ + "shape":"DBParameterGroup", + "locationName":"DBParameterGroup" + } + }, + "DBParameterGroupNameMessage":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"} + } + }, + "DBParameterGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupStatus":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "ParameterApplyStatus":{"shape":"String"} + } + }, + "DBParameterGroupStatusList":{ + "type":"list", + "member":{ + "shape":"DBParameterGroupStatus", + "locationName":"DBParameterGroup" + } + }, + "DBParameterGroupsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBParameterGroups":{"shape":"DBParameterGroupList"} + } + }, + "DBSecurityGroup":{ + "type":"structure", + "members":{ + "OwnerId":{"shape":"String"}, + "DBSecurityGroupName":{"shape":"String"}, + "DBSecurityGroupDescription":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "EC2SecurityGroups":{"shape":"EC2SecurityGroupList"}, + "IPRanges":{"shape":"IPRangeList"} + }, + "wrapper":true + }, + "DBSecurityGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupMembership":{ + "type":"structure", + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "DBSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"DBSecurityGroupMembership", + "locationName":"DBSecurityGroup" + } + }, + "DBSecurityGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroups"} + } + }, + "DBSecurityGroupNameList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"DBSecurityGroupName" + } + }, + "DBSecurityGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupNotSupportedFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupNotSupported", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"QuotaExceeded.DBSecurityGroup", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroups":{ + "type":"list", + "member":{ + "shape":"DBSecurityGroup", + "locationName":"DBSecurityGroup" + } + }, + "DBSnapshot":{ + "type":"structure", + "members":{ + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"}, + "SnapshotCreateTime":{"shape":"TStamp"}, + "Engine":{"shape":"String"}, + "AllocatedStorage":{"shape":"Integer"}, + "Status":{"shape":"String"}, + "Port":{"shape":"Integer"}, + "AvailabilityZone":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "InstanceCreateTime":{"shape":"TStamp"}, + "MasterUsername":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "SnapshotType":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"} + }, + "wrapper":true + }, + "DBSnapshotAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSnapshotAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSnapshotList":{ + "type":"list", + "member":{ + "shape":"DBSnapshot", + "locationName":"DBSnapshot" + } + }, + "DBSnapshotMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSnapshots":{"shape":"DBSnapshotList"} + } + }, + "DBSnapshotNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSnapshotNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroup":{ + "type":"structure", + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "SubnetGroupStatus":{"shape":"String"}, + "Subnets":{"shape":"SubnetList"} + }, + "wrapper":true + }, + "DBSubnetGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupDoesNotCoverEnoughAZs":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupDoesNotCoverEnoughAZs", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSubnetGroups":{"shape":"DBSubnetGroups"} + } + }, + "DBSubnetGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroups":{ + "type":"list", + "member":{ + "shape":"DBSubnetGroup", + "locationName":"DBSubnetGroup" + } + }, + "DBSubnetQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBUpgradeDependencyFailureFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBUpgradeDependencyFailure", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DeleteDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "SkipFinalSnapshot":{"shape":"Boolean"}, + "FinalDBSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "DeleteDBParameterGroupMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"} + } + }, + "DeleteDBSecurityGroupMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"} + } + }, + "DeleteDBSnapshotMessage":{ + "type":"structure", + "required":["DBSnapshotIdentifier"], + "members":{ + "DBSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "DeleteDBSubnetGroupMessage":{ + "type":"structure", + "required":["DBSubnetGroupName"], + "members":{ + "DBSubnetGroupName":{"shape":"String"} + } + }, + "DeleteEventSubscriptionMessage":{ + "type":"structure", + "required":["SubscriptionName"], + "members":{ + "SubscriptionName":{"shape":"String"} + } + }, + "DeleteEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "DeleteOptionGroupMessage":{ + "type":"structure", + "required":["OptionGroupName"], + "members":{ + "OptionGroupName":{"shape":"String"} + } + }, + "DescribeDBEngineVersionsMessage":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "DefaultOnly":{"shape":"Boolean"}, + "ListSupportedCharacterSets":{"shape":"BooleanOptional"} + } + }, + "DescribeDBInstancesMessage":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBParameterGroupsMessage":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBParametersMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "Source":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBSecurityGroupsMessage":{ + "type":"structure", + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBSnapshotsMessage":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBSnapshotIdentifier":{"shape":"String"}, + "SnapshotType":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBSubnetGroupsMessage":{ + "type":"structure", + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEngineDefaultParametersMessage":{ + "type":"structure", + "required":["DBParameterGroupFamily"], + "members":{ + "DBParameterGroupFamily":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEngineDefaultParametersResult":{ + "type":"structure", + "members":{ + "EngineDefaults":{"shape":"EngineDefaults"} + } + }, + "DescribeEventCategoriesMessage":{ + "type":"structure", + "members":{ + "SourceType":{"shape":"String"} + } + }, + "DescribeEventSubscriptionsMessage":{ + "type":"structure", + "members":{ + "SubscriptionName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEventsMessage":{ + "type":"structure", + "members":{ + "SourceIdentifier":{"shape":"String"}, + "SourceType":{"shape":"SourceType"}, + "StartTime":{"shape":"TStamp"}, + "EndTime":{"shape":"TStamp"}, + "Duration":{"shape":"IntegerOptional"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeOptionGroupOptionsMessage":{ + "type":"structure", + "required":["EngineName"], + "members":{ + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeOptionGroupsMessage":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "Marker":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"} + } + }, + "DescribeOrderableDBInstanceOptionsMessage":{ + "type":"structure", + "required":["Engine"], + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "Vpc":{"shape":"BooleanOptional"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReservedDBInstancesMessage":{ + "type":"structure", + "members":{ + "ReservedDBInstanceId":{"shape":"String"}, + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReservedDBInstancesOfferingsMessage":{ + "type":"structure", + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "Double":{"type":"double"}, + "EC2SecurityGroup":{ + "type":"structure", + "members":{ + "Status":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "EC2SecurityGroupList":{ + "type":"list", + "member":{ + "shape":"EC2SecurityGroup", + "locationName":"EC2SecurityGroup" + } + }, + "Endpoint":{ + "type":"structure", + "members":{ + "Address":{"shape":"String"}, + "Port":{"shape":"Integer"} + } + }, + "EngineDefaults":{ + "type":"structure", + "members":{ + "DBParameterGroupFamily":{"shape":"String"}, + "Marker":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"} + }, + "wrapper":true + }, + "Event":{ + "type":"structure", + "members":{ + "SourceIdentifier":{"shape":"String"}, + "SourceType":{"shape":"SourceType"}, + "Message":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Date":{"shape":"TStamp"} + } + }, + "EventCategoriesList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"EventCategory" + } + }, + "EventCategoriesMap":{ + "type":"structure", + "members":{ + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"} + }, + "wrapper":true + }, + "EventCategoriesMapList":{ + "type":"list", + "member":{ + "shape":"EventCategoriesMap", + "locationName":"EventCategoriesMap" + } + }, + "EventCategoriesMessage":{ + "type":"structure", + "members":{ + "EventCategoriesMapList":{"shape":"EventCategoriesMapList"} + } + }, + "EventList":{ + "type":"list", + "member":{ + "shape":"Event", + "locationName":"Event" + } + }, + "EventSubscription":{ + "type":"structure", + "members":{ + "Id":{"shape":"String"}, + "CustomerAwsId":{"shape":"String"}, + "CustSubscriptionId":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "Status":{"shape":"String"}, + "SubscriptionCreationTime":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "SourceIdsList":{"shape":"SourceIdsList"}, + "EventCategoriesList":{"shape":"EventCategoriesList"}, + "Enabled":{"shape":"Boolean"} + }, + "wrapper":true + }, + "EventSubscriptionQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"EventSubscriptionQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "EventSubscriptionsList":{ + "type":"list", + "member":{ + "shape":"EventSubscription", + "locationName":"EventSubscription" + } + }, + "EventSubscriptionsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "EventSubscriptionsList":{"shape":"EventSubscriptionsList"} + } + }, + "EventsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "Events":{"shape":"EventList"} + } + }, + "IPRange":{ + "type":"structure", + "members":{ + "Status":{"shape":"String"}, + "CIDRIP":{"shape":"String"} + } + }, + "IPRangeList":{ + "type":"list", + "member":{ + "shape":"IPRange", + "locationName":"IPRange" + } + }, + "InstanceQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InstanceQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InsufficientDBInstanceCapacityFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InsufficientDBInstanceCapacity", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Integer":{"type":"integer"}, + "IntegerOptional":{"type":"integer"}, + "InvalidDBInstanceStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBInstanceState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBParameterGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBParameterGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSecurityGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSecurityGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSnapshotStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSnapshotState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSubnetGroupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSubnetStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidEventSubscriptionStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidEventSubscriptionState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidOptionGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidOptionGroupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidRestoreFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidRestoreFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSubnet":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidSubnet", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidVPCNetworkStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidVPCNetworkStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "KeyList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ListTagsForResourceMessage":{ + "type":"structure", + "required":["ResourceName"], + "members":{ + "ResourceName":{"shape":"String"} + } + }, + "ModifyDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "DBInstanceClass":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "ApplyImmediately":{"shape":"Boolean"}, + "MasterUserPassword":{"shape":"String"}, + "DBParameterGroupName":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "AllowMajorVersionUpgrade":{"shape":"Boolean"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "NewDBInstanceIdentifier":{"shape":"String"} + } + }, + "ModifyDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "ModifyDBParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBParameterGroupName", + "Parameters" + ], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"} + } + }, + "ModifyDBSubnetGroupMessage":{ + "type":"structure", + "required":[ + "DBSubnetGroupName", + "SubnetIds" + ], + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"} + } + }, + "ModifyDBSubnetGroupResult":{ + "type":"structure", + "members":{ + "DBSubnetGroup":{"shape":"DBSubnetGroup"} + } + }, + "ModifyEventSubscriptionMessage":{ + "type":"structure", + "required":["SubscriptionName"], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Enabled":{"shape":"BooleanOptional"} + } + }, + "ModifyEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "ModifyOptionGroupMessage":{ + "type":"structure", + "required":["OptionGroupName"], + "members":{ + "OptionGroupName":{"shape":"String"}, + "OptionsToInclude":{"shape":"OptionConfigurationList"}, + "OptionsToRemove":{"shape":"OptionNamesList"}, + "ApplyImmediately":{"shape":"Boolean"} + } + }, + "ModifyOptionGroupResult":{ + "type":"structure", + "members":{ + "OptionGroup":{"shape":"OptionGroup"} + } + }, + "Option":{ + "type":"structure", + "members":{ + "OptionName":{"shape":"String"}, + "OptionDescription":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "DBSecurityGroupMemberships":{"shape":"DBSecurityGroupMembershipList"}, + "VpcSecurityGroupMemberships":{"shape":"VpcSecurityGroupMembershipList"} + } + }, + "OptionConfiguration":{ + "type":"structure", + "required":["OptionName"], + "members":{ + "OptionName":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "DBSecurityGroupMemberships":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupMemberships":{"shape":"VpcSecurityGroupIdList"} + } + }, + "OptionConfigurationList":{ + "type":"list", + "member":{ + "shape":"OptionConfiguration", + "locationName":"OptionConfiguration" + } + }, + "OptionGroup":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "OptionGroupDescription":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "Options":{"shape":"OptionsList"}, + "AllowsVpcAndNonVpcInstanceMemberships":{"shape":"Boolean"}, + "VpcId":{"shape":"String"} + }, + "wrapper":true + }, + "OptionGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "OptionGroupMembership":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "OptionGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "OptionGroupOption":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Description":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "MinimumRequiredMinorEngineVersion":{"shape":"String"}, + "PortRequired":{"shape":"Boolean"}, + "DefaultPort":{"shape":"IntegerOptional"}, + "OptionsDependedOn":{"shape":"OptionsDependedOn"} + } + }, + "OptionGroupOptionsList":{ + "type":"list", + "member":{ + "shape":"OptionGroupOption", + "locationName":"OptionGroupOption" + } + }, + "OptionGroupOptionsMessage":{ + "type":"structure", + "members":{ + "OptionGroupOptions":{"shape":"OptionGroupOptionsList"}, + "Marker":{"shape":"String"} + } + }, + "OptionGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "OptionGroups":{ + "type":"structure", + "members":{ + "OptionGroupsList":{"shape":"OptionGroupsList"}, + "Marker":{"shape":"String"} + } + }, + "OptionGroupsList":{ + "type":"list", + "member":{ + "shape":"OptionGroup", + "locationName":"OptionGroup" + } + }, + "OptionNamesList":{ + "type":"list", + "member":{"shape":"String"} + }, + "OptionsDependedOn":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"OptionName" + } + }, + "OptionsList":{ + "type":"list", + "member":{ + "shape":"Option", + "locationName":"Option" + } + }, + "OrderableDBInstanceOption":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "AvailabilityZones":{"shape":"AvailabilityZoneList"}, + "MultiAZCapable":{"shape":"Boolean"}, + "ReadReplicaCapable":{"shape":"Boolean"}, + "Vpc":{"shape":"Boolean"} + }, + "wrapper":true + }, + "OrderableDBInstanceOptionsList":{ + "type":"list", + "member":{ + "shape":"OrderableDBInstanceOption", + "locationName":"OrderableDBInstanceOption" + } + }, + "OrderableDBInstanceOptionsMessage":{ + "type":"structure", + "members":{ + "OrderableDBInstanceOptions":{"shape":"OrderableDBInstanceOptionsList"}, + "Marker":{"shape":"String"} + } + }, + "Parameter":{ + "type":"structure", + "members":{ + "ParameterName":{"shape":"String"}, + "ParameterValue":{"shape":"String"}, + "Description":{"shape":"String"}, + "Source":{"shape":"String"}, + "ApplyType":{"shape":"String"}, + "DataType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"}, + "MinimumEngineVersion":{"shape":"String"}, + "ApplyMethod":{"shape":"ApplyMethod"} + } + }, + "ParametersList":{ + "type":"list", + "member":{ + "shape":"Parameter", + "locationName":"Parameter" + } + }, + "PendingModifiedValues":{ + "type":"structure", + "members":{ + "DBInstanceClass":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "MasterUserPassword":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "DBInstanceIdentifier":{"shape":"String"} + } + }, + "PointInTimeRestoreNotEnabledFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"PointInTimeRestoreNotEnabled", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "PromoteReadReplicaMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"} + } + }, + "PromoteReadReplicaResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "ProvisionedIopsNotAvailableInAZFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ProvisionedIopsNotAvailableInAZFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "PurchaseReservedDBInstancesOfferingMessage":{ + "type":"structure", + "required":["ReservedDBInstancesOfferingId"], + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "ReservedDBInstanceId":{"shape":"String"}, + "DBInstanceCount":{"shape":"IntegerOptional"} + } + }, + "PurchaseReservedDBInstancesOfferingResult":{ + "type":"structure", + "members":{ + "ReservedDBInstance":{"shape":"ReservedDBInstance"} + } + }, + "ReadReplicaDBInstanceIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ReadReplicaDBInstanceIdentifier" + } + }, + "RebootDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "ForceFailover":{"shape":"BooleanOptional"} + } + }, + "RebootDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RecurringCharge":{ + "type":"structure", + "members":{ + "RecurringChargeAmount":{"shape":"Double"}, + "RecurringChargeFrequency":{"shape":"String"} + }, + "wrapper":true + }, + "RecurringChargeList":{ + "type":"list", + "member":{ + "shape":"RecurringCharge", + "locationName":"RecurringCharge" + } + }, + "RemoveSourceIdentifierFromSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SourceIdentifier" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SourceIdentifier":{"shape":"String"} + } + }, + "RemoveSourceIdentifierFromSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "RemoveTagsFromResourceMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "TagKeys" + ], + "members":{ + "ResourceName":{"shape":"String"}, + "TagKeys":{"shape":"KeyList"} + } + }, + "ReservedDBInstance":{ + "type":"structure", + "members":{ + "ReservedDBInstanceId":{"shape":"String"}, + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "StartTime":{"shape":"TStamp"}, + "Duration":{"shape":"Integer"}, + "FixedPrice":{"shape":"Double"}, + "UsagePrice":{"shape":"Double"}, + "CurrencyCode":{"shape":"String"}, + "DBInstanceCount":{"shape":"Integer"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"Boolean"}, + "State":{"shape":"String"}, + "RecurringCharges":{"shape":"RecurringChargeList"} + }, + "wrapper":true + }, + "ReservedDBInstanceAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceAlreadyExists", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstanceList":{ + "type":"list", + "member":{ + "shape":"ReservedDBInstance", + "locationName":"ReservedDBInstance" + } + }, + "ReservedDBInstanceMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReservedDBInstances":{"shape":"ReservedDBInstanceList"} + } + }, + "ReservedDBInstanceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstanceQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstancesOffering":{ + "type":"structure", + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"Integer"}, + "FixedPrice":{"shape":"Double"}, + "UsagePrice":{"shape":"Double"}, + "CurrencyCode":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"Boolean"}, + "RecurringCharges":{"shape":"RecurringChargeList"} + }, + "wrapper":true + }, + "ReservedDBInstancesOfferingList":{ + "type":"list", + "member":{ + "shape":"ReservedDBInstancesOffering", + "locationName":"ReservedDBInstancesOffering" + } + }, + "ReservedDBInstancesOfferingMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReservedDBInstancesOfferings":{"shape":"ReservedDBInstancesOfferingList"} + } + }, + "ReservedDBInstancesOfferingNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstancesOfferingNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResetDBParameterGroupMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "ResetAllParameters":{"shape":"Boolean"}, + "Parameters":{"shape":"ParametersList"} + } + }, + "RestoreDBInstanceFromDBSnapshotMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "DBSnapshotIdentifier" + ], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Engine":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"} + } + }, + "RestoreDBInstanceFromDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RestoreDBInstanceToPointInTimeMessage":{ + "type":"structure", + "required":[ + "SourceDBInstanceIdentifier", + "TargetDBInstanceIdentifier" + ], + "members":{ + "SourceDBInstanceIdentifier":{"shape":"String"}, + "TargetDBInstanceIdentifier":{"shape":"String"}, + "RestoreTime":{"shape":"TStamp"}, + "UseLatestRestorableTime":{"shape":"Boolean"}, + "DBInstanceClass":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Engine":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"} + } + }, + "RestoreDBInstanceToPointInTimeResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RevokeDBSecurityGroupIngressMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "CIDRIP":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "RevokeDBSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "SNSInvalidTopicFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSInvalidTopic", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SNSNoAuthorizationFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSNoAuthorization", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SNSTopicArnNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSTopicArnNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SnapshotQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SourceIdsList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SourceId" + } + }, + "SourceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SourceType":{ + "type":"string", + "enum":[ + "db-instance", + "db-parameter-group", + "db-security-group", + "db-snapshot" + ] + }, + "StorageQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"StorageQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "String":{"type":"string"}, + "Subnet":{ + "type":"structure", + "members":{ + "SubnetIdentifier":{"shape":"String"}, + "SubnetAvailabilityZone":{"shape":"AvailabilityZone"}, + "SubnetStatus":{"shape":"String"} + } + }, + "SubnetAlreadyInUse":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubnetAlreadyInUse", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubnetIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SubnetIdentifier" + } + }, + "SubnetList":{ + "type":"list", + "member":{ + "shape":"Subnet", + "locationName":"Subnet" + } + }, + "SubscriptionAlreadyExistFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionAlreadyExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubscriptionCategoryNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionCategoryNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SubscriptionNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SupportedCharacterSetsList":{ + "type":"list", + "member":{ + "shape":"CharacterSet", + "locationName":"CharacterSet" + } + }, + "TStamp":{"type":"timestamp"}, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"String"}, + "Value":{"shape":"String"} + } + }, + "TagList":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"Tag" + } + }, + "TagListMessage":{ + "type":"structure", + "members":{ + "TagList":{"shape":"TagList"} + } + }, + "VpcSecurityGroupIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpcSecurityGroupId" + } + }, + "VpcSecurityGroupMembership":{ + "type":"structure", + "members":{ + "VpcSecurityGroupId":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "VpcSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"VpcSecurityGroupMembership", + "locationName":"VpcSecurityGroupMembership" + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1681 @@ +{ + "version": "2.0", + "service": "Amazon Relational Database Service

    Amazon Relational Database Service (Amazon RDS) is a web service that makes it easier to set up, operate, and scale a relational database in the cloud. It provides cost-efficient, resizable capacity for an industry-standard relational database and manages common database administration tasks, freeing up developers to focus on what makes their applications and businesses unique.

    Amazon RDS gives you access to the capabilities of a familiar MySQL or Oracle database server. This means the code, applications, and tools you already use today with your existing MySQL or Oracle databases work with Amazon RDS without modification. Amazon RDS automatically backs up your database and maintains the database software that powers your DB Instance. Amazon RDS is flexible: you can scale your database instance's compute resources and storage capacity to meet your application's demand. As with all Amazon Web Services, there are no up-front investments, and you pay only for the resources you use.

    This is the Amazon RDS API Reference. It contains a comprehensive description of all Amazon RDS Query APIs and data types. Note that this API is asynchronous and some actions may require polling to determine when an action has been applied. See the parameter description to determine if a change is applied immediately or on the next instance reboot or during the maintenance window. To get started with Amazon RDS, go to the Amazon RDS Getting Started Guide. For more information on Amazon RDS concepts and usage scenarios, go to the Amazon RDS User Guide.

    ", + "operations": { + "AddSourceIdentifierToSubscription": "

    Adds a source identifier to an existing RDS event notification subscription.

    ", + "AddTagsToResource": "

    Adds metadata tags to a DB Instance. These tags can also be used with cost allocation reporting to track cost associated with a DB Instance.

    For an overview on tagging DB Instances, see DB Instance Tags.

    ", + "AuthorizeDBSecurityGroupIngress": "

    Enables ingress to a DBSecurityGroup using one of two forms of authorization. First, EC2 or VPC Security Groups can be added to the DBSecurityGroup if the application using the database is running on EC2 or VPC instances. Second, IP ranges are available if the application accessing your database is running on the Internet. Required parameters for this API are one of CIDR range, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId for non-VPC).

    You cannot authorize ingress from an EC2 security group in one Region to an Amazon RDS DB Instance in another. You cannot authorize ingress from a VPC security group in one VPC to an Amazon RDS DB Instance in another.

    For an overview of CIDR ranges, go to the Wikipedia Tutorial.

    ", + "CopyDBSnapshot": "

    Copies the specified DBSnapshot. The source DBSnapshot must be in the \"available\" state.

    ", + "CreateDBInstance": "

    Creates a new DB instance.

    ", + "CreateDBInstanceReadReplica": "

    Creates a DB Instance that acts as a Read Replica of a source DB Instance.

    All Read Replica DB Instances are created as Single-AZ deployments with backups disabled. All other DB Instance attributes (including DB Security Groups and DB Parameter Groups) are inherited from the source DB Instance, except as specified below.

    The source DB Instance must have backup retention enabled.

    ", + "CreateDBParameterGroup": "

    Creates a new DB Parameter Group.

    A DB Parameter Group is initially created with the default parameters for the database engine used by the DB Instance. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBParameterGroup. Once you've created a DB Parameter Group, you need to associate it with your DB Instance using ModifyDBInstance. When you associate a new DB Parameter Group with a running DB Instance, you need to reboot the DB Instance for the new DB Parameter Group and associated settings to take effect.

    ", + "CreateDBSecurityGroup": "

    Creates a new DB Security Group. DB Security Groups control access to a DB Instance.

    ", + "CreateDBSnapshot": "

    Creates a DBSnapshot. The source DBInstance must be in \"available\" state.

    ", + "CreateDBSubnetGroup": "

    Creates a new DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the region.

    ", + "CreateEventSubscription": "

    Creates an RDS event notification subscription. This action requires a topic ARN (Amazon Resource Name) created by either the RDS console, the SNS console, or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.

    You can specify the type of source (SourceType) you want to be notified of, provide a list of RDS sources (SourceIds) that triggers the events, and provide a list of event categories (EventCategories) for events you want to be notified of. For example, you can specify SourceType = db-instance, SourceIds = mydbinstance1, mydbinstance2 and EventCategories = Availability, Backup.

    If you specify both the SourceType and SourceIds, such as SourceType = db-instance and SourceIdentifier = myDBInstance1, you will be notified of all the db-instance events for the specified source. If you specify a SourceType but do not specify a SourceIdentifier, you will receive notice of the events for that source type for all your RDS sources. If you do not specify either the SourceType nor the SourceIdentifier, you will be notified of events generated from all RDS sources belonging to your customer account.

    ", + "CreateOptionGroup": "

    Creates a new Option Group.

    ", + "DeleteDBInstance": "

    The DeleteDBInstance API deletes a previously provisioned RDS instance. A successful response from the web service indicates the request was received correctly. If a final DBSnapshot is requested the status of the RDS instance will be \"deleting\" until the DBSnapshot is created. DescribeDBInstance is used to monitor the status of this operation. This cannot be canceled or reverted once submitted.

    ", + "DeleteDBParameterGroup": "

    Deletes a specified DBParameterGroup. The DBParameterGroup cannot be associated with any RDS instances to be deleted.

    The specified DB Parameter Group cannot be associated with any DB Instances. ", + "DeleteDBSecurityGroup": "

    Deletes a DB Security Group.

    The specified DB Security Group must not be associated with any DB Instances.", + "DeleteDBSnapshot": "

    Deletes a DBSnapshot.

    The DBSnapshot must be in the available state to be deleted.", + "DeleteDBSubnetGroup": "

    Deletes a DB subnet group.

    The specified database subnet group must not be associated with any DB instances.", + "DeleteEventSubscription": "

    Deletes an RDS event notification subscription.

    ", + "DeleteOptionGroup": "

    Deletes an existing Option Group.

    ", + "DescribeDBEngineVersions": "

    Returns a list of the available DB engines.

    ", + "DescribeDBInstances": "

    Returns information about provisioned RDS instances. This API supports pagination.

    ", + "DescribeDBParameterGroups": "

    Returns a list of DBParameterGroup descriptions. If a DBParameterGroupName is specified, the list will contain only the description of the specified DBParameterGroup.

    ", + "DescribeDBParameters": "

    Returns the detailed parameter list for a particular DBParameterGroup.

    ", + "DescribeDBSecurityGroups": "

    Returns a list of DBSecurityGroup descriptions. If a DBSecurityGroupName is specified, the list will contain only the descriptions of the specified DBSecurityGroup.

    For an overview of CIDR ranges, go to the Wikipedia Tutorial.

    ", + "DescribeDBSnapshots": "

    Returns information about DBSnapshots. This API supports pagination.

    ", + "DescribeDBSubnetGroups": "

    Returns a list of DBSubnetGroup descriptions. If a DBSubnetGroupName is specified, the list will contain only the descriptions of the specified DBSubnetGroup.

    For an overview of CIDR ranges, go to the Wikipedia Tutorial.

    ", + "DescribeEngineDefaultParameters": "

    Returns the default engine and system parameter information for the specified database engine.

    ", + "DescribeEventCategories": "

    Displays a list of categories for all event source types, or, if specified, for a specified source type. You can see a list of the event categories and source types in the Events topic in the Amazon RDS User Guide.

    ", + "DescribeEventSubscriptions": "

    Lists all the subscription descriptions for a customer account. The description for a subscription includes SubscriptionName, SNSTopicARN, CustomerID, SourceType, SourceID, CreationTime, and Status.

    If you specify a SubscriptionName, lists the description for that subscription.

    ", + "DescribeEvents": "

    Returns events related to DB instances, DB security groups, DB Snapshots, and DB parameter groups for the past 14 days. Events specific to a particular DB Iinstance, DB security group, DB Snapshot, or DB parameter group can be obtained by providing the source identifier as a parameter. By default, the past hour of events are returned.

    You can see a list of event categories and source types in the Events topic in the Amazon RDS User Guide.

    ", + "DescribeOptionGroupOptions": "

    Describes all available options.

    ", + "DescribeOptionGroups": "

    Describes the available option groups.

    ", + "DescribeOrderableDBInstanceOptions": "

    Returns a list of orderable DB Instance options for the specified engine.

    ", + "DescribeReservedDBInstances": "

    Returns information about reserved DB Instances for this account, or about a specified reserved DB Instance.

    ", + "DescribeReservedDBInstancesOfferings": "

    Lists available reserved DB Instance offerings.

    ", + "ListTagsForResource": "

    Lists all tags on a DB Instance.

    For an overview on tagging DB Instances, see DB Instance Tags.

    ", + "ModifyDBInstance": "

    Modify settings for a DB Instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request.

    ", + "ModifyDBParameterGroup": "

    Modifies the parameters of a DBParameterGroup. To modify more than one parameter submit a list of the following: ParameterName, ParameterValue, and ApplyMethod. A maximum of 20 parameters can be modified in a single request.

    The apply-immediate method can be used only for dynamic parameters; the pending-reboot method can be used with MySQL and Oracle DB Instances for either dynamic or static parameters. For Microsoft SQL Server DB Instances, the pending-reboot method can be used only for static parameters.

    ", + "ModifyDBSubnetGroup": "

    Modifies an existing DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the region.

    ", + "ModifyEventSubscription": "

    Modifies an existing RDS event notification subscription. Note that you cannot modify the source identifiers using this call; to change source identifiers for a subscription, use the AddSourceIdentifierToSubscription and RemoveSourceIdentifierFromSubscription calls.

    You can see a list of the event categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    ", + "ModifyOptionGroup": "

    Modifies an existing Option Group.

    ", + "PromoteReadReplica": "

    Promotes a Read Replica DB Instance to a standalone DB Instance.

    ", + "PurchaseReservedDBInstancesOffering": "

    Purchases a reserved DB Instance offering.

    ", + "RebootDBInstance": "

    Reboots a previously provisioned RDS instance. This API results in the application of modified DBParameterGroup parameters with ApplyStatus of pending-reboot to the RDS instance. This action is taken as soon as possible, and results in a momentary outage to the RDS instance during which the RDS instance status is set to rebooting. If the RDS instance is configured for MultiAZ, it is possible that the reboot will be conducted through a failover. A DBInstance event is created when the reboot is completed.

    ", + "RemoveSourceIdentifierFromSubscription": "

    Removes a source identifier from an existing RDS event notification subscription.

    ", + "RemoveTagsFromResource": "

    Removes metadata tags from a DB Instance.

    For an overview on tagging DB Instances, see DB Instance Tags.

    ", + "ResetDBParameterGroup": "

    Modifies the parameters of a DBParameterGroup to the engine/system default value. To reset specific parameters submit a list of the following: ParameterName and ApplyMethod. To reset the entire DBParameterGroup specify the DBParameterGroup name and ResetAllParameters parameters. When resetting the entire group, dynamic parameters are updated immediately and static parameters are set to pending-reboot to take effect on the next DB instance restart or RebootDBInstance request.

    ", + "RestoreDBInstanceFromDBSnapshot": "

    Creates a new DB Instance from a DB snapshot. The target database is created from the source database restore point with the same configuration as the original source database, except that the new RDS instance is created with the default security group.

    ", + "RestoreDBInstanceToPointInTime": "

    Restores a DB Instance to an arbitrary point-in-time. Users can restore to any point in time before the latestRestorableTime for up to backupRetentionPeriod days. The target database is created from the source database with the same configuration as the original database except that the DB instance is created with the default DB security group.

    ", + "RevokeDBSecurityGroupIngress": "

    Revokes ingress from a DBSecurityGroup for previously authorized IP ranges or EC2 or VPC Security Groups. Required parameters for this API are one of CIDRIP, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId).

    " + }, + "shapes": { + "AddSourceIdentifierToSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "AddSourceIdentifierToSubscriptionResult": { + "base": null, + "refs": { + } + }, + "AddTagsToResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "ApplyMethod": { + "base": null, + "refs": { + "Parameter$ApplyMethod": "

    Indicates when to apply parameter updates.

    " + } + }, + "AuthorizationAlreadyExistsFault": { + "base": "

    The specified CIDRIP or EC2 security group is already authorized for the specified DB security group.

    ", + "refs": { + } + }, + "AuthorizationNotFoundFault": { + "base": "

    Specified CIDRIP or EC2 security group is not authorized for the specified DB security group.

    RDS may not also be authorized via IAM to perform necessary actions on your behalf.

    ", + "refs": { + } + }, + "AuthorizationQuotaExceededFault": { + "base": "

    DB security group authorization quota has been reached.

    ", + "refs": { + } + }, + "AuthorizeDBSecurityGroupIngressMessage": { + "base": "

    ", + "refs": { + } + }, + "AuthorizeDBSecurityGroupIngressResult": { + "base": null, + "refs": { + } + }, + "AvailabilityZone": { + "base": "

    Contains Availability Zone information.

    This data type is used as an element in the following data type:

    ", + "refs": { + "AvailabilityZoneList$member": null, + "Subnet$SubnetAvailabilityZone": null + } + }, + "AvailabilityZoneList": { + "base": null, + "refs": { + "OrderableDBInstanceOption$AvailabilityZones": "

    A list of availability zones for the orderable DB Instance.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "AvailabilityZone$ProvisionedIopsCapable": "

    True indicates the availability zone is capable of provisioned IOPs.

    ", + "DBInstance$MultiAZ": "

    Specifies if the DB Instance is a Multi-AZ deployment.

    ", + "DBInstance$AutoMinorVersionUpgrade": "

    Indicates that minor version patches are applied automatically.

    ", + "DBInstance$PubliclyAccessible": null, + "DeleteDBInstanceMessage$SkipFinalSnapshot": "

    Determines whether a final DB Snapshot is created before the DB Instance is deleted. If true is specified, no DBSnapshot is created. If false is specified, a DB Snapshot is created before the DB Instance is deleted.

    The FinalDBSnapshotIdentifier parameter must be specified if SkipFinalSnapshot is false.

    Default: false

    ", + "DescribeDBEngineVersionsMessage$DefaultOnly": "

    Indicates that only the default version of the specified engine or engine and major version combination is returned.

    ", + "EventSubscription$Enabled": "

    A Boolean value indicating if the subscription is enabled. True indicates the subscription is enabled.

    ", + "ModifyDBInstanceMessage$ApplyImmediately": "

    Specifies whether or not the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB Instance.

    If this parameter is passed as false, changes to the DB Instance are applied on the next call to RebootDBInstance, the next maintenance reboot, or the next failure reboot, whichever occurs first. See each parameter to determine when a change is applied.

    Default: false

    ", + "ModifyDBInstanceMessage$AllowMajorVersionUpgrade": "

    Indicates that major version upgrades are allowed. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints: This parameter must be set to true when specifying a value for the EngineVersion parameter that is a different major version than the DB Instance's current version.

    ", + "ModifyOptionGroupMessage$ApplyImmediately": "

    Indicates whether the changes should be applied immediately, or during the next maintenance window for each instance associated with the Option Group.

    ", + "OptionGroup$AllowsVpcAndNonVpcInstanceMemberships": "

    Indicates whether this option group can be applied to both VPC and non-VPC instances. The value 'true' indicates the option group can be applied to both VPC and non-VPC instances.

    ", + "OptionGroupOption$PortRequired": "

    Specifies whether the option requires a port.

    ", + "OrderableDBInstanceOption$MultiAZCapable": "

    Indicates whether this orderable DB Instance is multi-AZ capable.

    ", + "OrderableDBInstanceOption$ReadReplicaCapable": "

    Indicates whether this orderable DB Instance can have a read replica.

    ", + "OrderableDBInstanceOption$Vpc": "

    Indicates whether this is a VPC orderable DB Instance.

    ", + "Parameter$IsModifiable": "

    Indicates whether (true) or not (false) the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.

    ", + "ReservedDBInstance$MultiAZ": "

    Indicates if the reservation applies to Multi-AZ deployments.

    ", + "ReservedDBInstancesOffering$MultiAZ": "

    Indicates if the offering applies to Multi-AZ deployments.

    ", + "ResetDBParameterGroupMessage$ResetAllParameters": "

    Specifies whether (true) or not (false) to reset all parameters in the DB Parameter Group to default values.

    Default: true

    ", + "RestoreDBInstanceToPointInTimeMessage$UseLatestRestorableTime": "

    Specifies whether (true) or not (false) the DB Instance is restored from the latest backup time.

    Default: false

    Constraints: Cannot be specified if RestoreTime parameter is provided.

    " + } + }, + "BooleanOptional": { + "base": null, + "refs": { + "CreateDBInstanceMessage$MultiAZ": "

    Specifies if the DB Instance is a Multi-AZ deployment. You cannot set the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "CreateDBInstanceMessage$AutoMinorVersionUpgrade": "

    Indicates that minor engine upgrades will be applied automatically to the DB Instance during the maintenance window.

    Default: true

    ", + "CreateDBInstanceMessage$PubliclyAccessible": null, + "CreateDBInstanceReadReplicaMessage$AutoMinorVersionUpgrade": "

    Indicates that minor engine upgrades will be applied automatically to the Read Replica during the maintenance window.

    Default: Inherits from the source DB Instance

    ", + "CreateDBInstanceReadReplicaMessage$PubliclyAccessible": null, + "CreateEventSubscriptionMessage$Enabled": "

    A Boolean value; set to true to activate the subscription, set to false to create the subscription but not active it.

    ", + "DescribeDBEngineVersionsMessage$ListSupportedCharacterSets": "

    If this parameter is specified, and if the requested engine supports the CharacterSetName parameter for CreateDBInstance, the response includes a list of supported character sets for each engine version.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Vpc": "

    The VPC filter value. Specify this parameter to show only the available VPC or non-VPC offerings.

    ", + "DescribeReservedDBInstancesMessage$MultiAZ": "

    The Multi-AZ filter value. Specify this parameter to show only those reservations matching the specified Multi-AZ parameter.

    ", + "DescribeReservedDBInstancesOfferingsMessage$MultiAZ": "

    The Multi-AZ filter value. Specify this parameter to show only the available offerings matching the specified Multi-AZ parameter.

    ", + "ModifyDBInstanceMessage$MultiAZ": "

    Specifies if the DB Instance is a Multi-AZ deployment. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    Constraints: Cannot be specified if the DB Instance is a read replica.

    ", + "ModifyDBInstanceMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB Instance during the maintenance window. Changing this parameter does not result in an outage except in the following case and the change is asynchronously applied as soon as possible. An outage will result if this parameter is set to true during the maintenance window, and a newer minor version is available, and RDS has enabled auto patching for that engine version.

    ", + "ModifyEventSubscriptionMessage$Enabled": "

    A Boolean value; set to true to activate the subscription.

    ", + "PendingModifiedValues$MultiAZ": "

    Indicates that the Single-AZ DB Instance is to change to a Multi-AZ deployment.

    ", + "RebootDBInstanceMessage$ForceFailover": "

    When true, the reboot will be conducted through a MultiAZ failover.

    Constraint: You cannot specify true if the instance is not configured for MultiAZ.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$MultiAZ": "

    Specifies if the DB Instance is a Multi-AZ deployment.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$PubliclyAccessible": null, + "RestoreDBInstanceFromDBSnapshotMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB Instance during the maintenance window.

    ", + "RestoreDBInstanceToPointInTimeMessage$MultiAZ": "

    Specifies if the DB Instance is a Multi-AZ deployment.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "RestoreDBInstanceToPointInTimeMessage$PubliclyAccessible": null, + "RestoreDBInstanceToPointInTimeMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB Instance during the maintenance window.

    " + } + }, + "CharacterSet": { + "base": "

    This data type is used as a response element in the action DescribeDBEngineVersions.

    ", + "refs": { + "DBEngineVersion$DefaultCharacterSet": "

    The default character set for new instances of this engine version, if the CharacterSetName parameter of the CreateDBInstance API is not specified.

    ", + "SupportedCharacterSetsList$member": null + } + }, + "CopyDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "CopyDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBInstanceReadReplicaMessage": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceReadReplicaResult": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceResult": { + "base": null, + "refs": { + } + }, + "CreateDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBParameterGroupResult": { + "base": null, + "refs": { + } + }, + "CreateDBSecurityGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSecurityGroupResult": { + "base": null, + "refs": { + } + }, + "CreateDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "CreateDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSubnetGroupResult": { + "base": null, + "refs": { + } + }, + "CreateEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "CreateOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateOptionGroupResult": { + "base": null, + "refs": { + } + }, + "DBEngineVersion": { + "base": "

    This data type is used as a response element in the action DescribeDBEngineVersions.

    ", + "refs": { + "DBEngineVersionList$member": null + } + }, + "DBEngineVersionList": { + "base": null, + "refs": { + "DBEngineVersionMessage$DBEngineVersions": "

    A list of DBEngineVersion elements.

    " + } + }, + "DBEngineVersionMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBEngineVersions action.

    ", + "refs": { + } + }, + "DBInstance": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBInstances action.

    ", + "refs": { + "CreateDBInstanceReadReplicaResult$DBInstance": null, + "CreateDBInstanceResult$DBInstance": null, + "DBInstanceList$member": null, + "DeleteDBInstanceResult$DBInstance": null, + "ModifyDBInstanceResult$DBInstance": null, + "PromoteReadReplicaResult$DBInstance": null, + "RebootDBInstanceResult$DBInstance": null, + "RestoreDBInstanceFromDBSnapshotResult$DBInstance": null, + "RestoreDBInstanceToPointInTimeResult$DBInstance": null + } + }, + "DBInstanceAlreadyExistsFault": { + "base": "

    User already has a DB instance with the given identifier.

    ", + "refs": { + } + }, + "DBInstanceList": { + "base": null, + "refs": { + "DBInstanceMessage$DBInstances": "

    A list of DBInstance instances.

    " + } + }, + "DBInstanceMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBInstances action.

    ", + "refs": { + } + }, + "DBInstanceNotFoundFault": { + "base": "

    DBInstanceIdentifier does not refer to an existing DB instance.

    ", + "refs": { + } + }, + "DBParameterGroup": { + "base": "

    Contains the result of a successful invocation of the CreateDBParameterGroup action.

    This data type is used as a request parameter in the DeleteDBParameterGroup action, and as a response element in the DescribeDBParameterGroups action.

    ", + "refs": { + "CreateDBParameterGroupResult$DBParameterGroup": null, + "DBParameterGroupList$member": null + } + }, + "DBParameterGroupAlreadyExistsFault": { + "base": "

    A DB parameter group with the same name exists.

    ", + "refs": { + } + }, + "DBParameterGroupDetails": { + "base": "

    Contains the result of a successful invocation of the DescribeDBParameters action.

    ", + "refs": { + } + }, + "DBParameterGroupList": { + "base": null, + "refs": { + "DBParameterGroupsMessage$DBParameterGroups": "

    A list of DBParameterGroup instances.

    " + } + }, + "DBParameterGroupNameMessage": { + "base": "

    Contains the result of a successful invocation of the ModifyDBParameterGroup or ResetDBParameterGroup action.

    ", + "refs": { + } + }, + "DBParameterGroupNotFoundFault": { + "base": "

    DBParameterGroupName does not refer to an existing DB parameter group.

    ", + "refs": { + } + }, + "DBParameterGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB parameter groups.

    ", + "refs": { + } + }, + "DBParameterGroupStatus": { + "base": "

    The status of the DB Parameter Group.

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBParameterGroupStatusList$member": null + } + }, + "DBParameterGroupStatusList": { + "base": null, + "refs": { + "DBInstance$DBParameterGroups": "

    Provides the list of DB Parameter Groups applied to this DB Instance.

    " + } + }, + "DBParameterGroupsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBParameterGroups action.

    ", + "refs": { + } + }, + "DBSecurityGroup": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSecurityGroups action.

    ", + "refs": { + "AuthorizeDBSecurityGroupIngressResult$DBSecurityGroup": null, + "CreateDBSecurityGroupResult$DBSecurityGroup": null, + "DBSecurityGroups$member": null, + "RevokeDBSecurityGroupIngressResult$DBSecurityGroup": null + } + }, + "DBSecurityGroupAlreadyExistsFault": { + "base": "

    A DB security group with the name specified in DBSecurityGroupName already exists.

    ", + "refs": { + } + }, + "DBSecurityGroupMembership": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBSecurityGroupMembershipList$member": null + } + }, + "DBSecurityGroupMembershipList": { + "base": null, + "refs": { + "DBInstance$DBSecurityGroups": "

    Provides List of DB Security Group elements containing only DBSecurityGroup.Name and DBSecurityGroup.Status subelements.

    ", + "Option$DBSecurityGroupMemberships": "

    If the Option requires access to a port, then this DB Security Group allows access to the port.

    " + } + }, + "DBSecurityGroupMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSecurityGroups action.

    ", + "refs": { + } + }, + "DBSecurityGroupNameList": { + "base": null, + "refs": { + "CreateDBInstanceMessage$DBSecurityGroups": "

    A list of DB Security Groups to associate with this DB Instance.

    Default: The default DB Security Group for the database engine.

    ", + "ModifyDBInstanceMessage$DBSecurityGroups": "

    A list of DB Security Groups to authorize on this DB Instance. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "OptionConfiguration$DBSecurityGroupMemberships": "

    A list of DBSecurityGroupMemebrship name strings used for this option.

    " + } + }, + "DBSecurityGroupNotFoundFault": { + "base": "

    DBSecurityGroupName does not refer to an existing DB security group.

    ", + "refs": { + } + }, + "DBSecurityGroupNotSupportedFault": { + "base": "

    A DB security group is not allowed for this action.

    ", + "refs": { + } + }, + "DBSecurityGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB security groups.

    ", + "refs": { + } + }, + "DBSecurityGroups": { + "base": null, + "refs": { + "DBSecurityGroupMessage$DBSecurityGroups": "

    A list of DBSecurityGroup instances.

    " + } + }, + "DBSnapshot": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSnapshots action.

    ", + "refs": { + "CopyDBSnapshotResult$DBSnapshot": null, + "CreateDBSnapshotResult$DBSnapshot": null, + "DBSnapshotList$member": null, + "DeleteDBSnapshotResult$DBSnapshot": null + } + }, + "DBSnapshotAlreadyExistsFault": { + "base": "

    DBSnapshotIdentifier is already used by an existing snapshot.

    ", + "refs": { + } + }, + "DBSnapshotList": { + "base": null, + "refs": { + "DBSnapshotMessage$DBSnapshots": "

    A list of DBSnapshot instances.

    " + } + }, + "DBSnapshotMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSnapshots action.

    ", + "refs": { + } + }, + "DBSnapshotNotFoundFault": { + "base": "

    DBSnapshotIdentifier does not refer to an existing DB snapshot.

    ", + "refs": { + } + }, + "DBSubnetGroup": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSubnetGroups action.

    ", + "refs": { + "CreateDBSubnetGroupResult$DBSubnetGroup": null, + "DBInstance$DBSubnetGroup": "

    Provides the inforamtion of the subnet group associated with the DB instance, including the name, descrption and subnets in the subnet group.

    ", + "DBSubnetGroups$member": null, + "ModifyDBSubnetGroupResult$DBSubnetGroup": null + } + }, + "DBSubnetGroupAlreadyExistsFault": { + "base": "

    DBSubnetGroupName is already used by an existing DB subnet group.

    ", + "refs": { + } + }, + "DBSubnetGroupDoesNotCoverEnoughAZs": { + "base": "

    Subnets in the DB subnet group should cover at least two Availability Zones unless there is only one Availability Zone.

    ", + "refs": { + } + }, + "DBSubnetGroupMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSubnetGroups action.

    ", + "refs": { + } + }, + "DBSubnetGroupNotFoundFault": { + "base": "

    DBSubnetGroupName does not refer to an existing DB subnet group.

    ", + "refs": { + } + }, + "DBSubnetGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB subnet groups.

    ", + "refs": { + } + }, + "DBSubnetGroups": { + "base": null, + "refs": { + "DBSubnetGroupMessage$DBSubnetGroups": "

    A list of DBSubnetGroup instances.

    " + } + }, + "DBSubnetQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of subnets in a DB subnet groups.

    ", + "refs": { + } + }, + "DBUpgradeDependencyFailureFault": { + "base": "

    The DB upgrade failed because a resource the DB depends on could not be modified.

    ", + "refs": { + } + }, + "DeleteDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBInstanceResult": { + "base": null, + "refs": { + } + }, + "DeleteDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSecurityGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "DeleteDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "DeleteOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBEngineVersionsMessage": { + "base": null, + "refs": { + } + }, + "DescribeDBInstancesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBParameterGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBParametersMessage": { + "base": null, + "refs": { + } + }, + "DescribeDBSecurityGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBSnapshotsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBSubnetGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEngineDefaultParametersMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEngineDefaultParametersResult": { + "base": null, + "refs": { + } + }, + "DescribeEventCategoriesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEventSubscriptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEventsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOptionGroupOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOptionGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOrderableDBInstanceOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeReservedDBInstancesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeReservedDBInstancesOfferingsMessage": { + "base": "

    ", + "refs": { + } + }, + "Double": { + "base": null, + "refs": { + "RecurringCharge$RecurringChargeAmount": "

    The amount of the recurring charge.

    ", + "ReservedDBInstance$FixedPrice": "

    The fixed price charged for this reserved DB Instance.

    ", + "ReservedDBInstance$UsagePrice": "

    The hourly price charged for this reserved DB Instance.

    ", + "ReservedDBInstancesOffering$FixedPrice": "

    The fixed price charged for this offering.

    ", + "ReservedDBInstancesOffering$UsagePrice": "

    The hourly price charged for this offering.

    " + } + }, + "EC2SecurityGroup": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "EC2SecurityGroupList$member": null + } + }, + "EC2SecurityGroupList": { + "base": null, + "refs": { + "DBSecurityGroup$EC2SecurityGroups": "

    Contains a list of EC2SecurityGroup elements.

    " + } + }, + "Endpoint": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBInstance$Endpoint": "

    Specifies the connection endpoint.

    " + } + }, + "EngineDefaults": { + "base": "

    Contains the result of a successful invocation of the DescribeEngineDefaultParameters action.

    ", + "refs": { + "DescribeEngineDefaultParametersResult$EngineDefaults": null + } + }, + "Event": { + "base": "

    This data type is used as a response element in the DescribeEvents action.

    ", + "refs": { + "EventList$member": null + } + }, + "EventCategoriesList": { + "base": null, + "refs": { + "CreateEventSubscriptionMessage$EventCategories": "

    A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    ", + "DescribeEventsMessage$EventCategories": "

    A list of event categories that trigger notifications for a event notification subscription.

    ", + "Event$EventCategories": "

    Specifies the category for the event.

    ", + "EventCategoriesMap$EventCategories": "

    The event categories for the specified source type

    ", + "EventSubscription$EventCategoriesList": "

    A list of event categories for the RDS event notification subscription.

    ", + "ModifyEventSubscriptionMessage$EventCategories": "

    A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    " + } + }, + "EventCategoriesMap": { + "base": "

    Contains the results of a successful invocation of the DescribeEventCategories action.

    ", + "refs": { + "EventCategoriesMapList$member": null + } + }, + "EventCategoriesMapList": { + "base": null, + "refs": { + "EventCategoriesMessage$EventCategoriesMapList": "

    A list of EventCategoriesMap data types.

    " + } + }, + "EventCategoriesMessage": { + "base": "

    Data returned from the DescribeEventCategories action.

    ", + "refs": { + } + }, + "EventList": { + "base": null, + "refs": { + "EventsMessage$Events": "

    A list of Event instances.

    " + } + }, + "EventSubscription": { + "base": "

    Contains the results of a successful invocation of the DescribeEventSubscriptions action.

    ", + "refs": { + "AddSourceIdentifierToSubscriptionResult$EventSubscription": null, + "CreateEventSubscriptionResult$EventSubscription": null, + "DeleteEventSubscriptionResult$EventSubscription": null, + "EventSubscriptionsList$member": null, + "ModifyEventSubscriptionResult$EventSubscription": null, + "RemoveSourceIdentifierFromSubscriptionResult$EventSubscription": null + } + }, + "EventSubscriptionQuotaExceededFault": { + "base": "

    You have reached the maximum number of event subscriptions.

    ", + "refs": { + } + }, + "EventSubscriptionsList": { + "base": null, + "refs": { + "EventSubscriptionsMessage$EventSubscriptionsList": "

    A list of EventSubscriptions data types.

    " + } + }, + "EventSubscriptionsMessage": { + "base": "

    Data returned by the DescribeEventSubscriptions action.

    ", + "refs": { + } + }, + "EventsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeEvents action.

    ", + "refs": { + } + }, + "IPRange": { + "base": "

    This data type is used as a response element in the DescribeDBSecurityGroups action.

    ", + "refs": { + "IPRangeList$member": null + } + }, + "IPRangeList": { + "base": null, + "refs": { + "DBSecurityGroup$IPRanges": "

    Contains a list of IPRange elements.

    " + } + }, + "InstanceQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB instances.

    ", + "refs": { + } + }, + "InsufficientDBInstanceCapacityFault": { + "base": "

    Specified DB instance class is not available in the specified Availability Zone.

    ", + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "DBInstance$AllocatedStorage": "

    Specifies the allocated storage size specified in gigabytes.

    ", + "DBInstance$BackupRetentionPeriod": "

    Specifies the number of days for which automatic DB Snapshots are retained.

    ", + "DBSnapshot$AllocatedStorage": "

    Specifies the allocated storage size in gigabytes (GB).

    ", + "DBSnapshot$Port": "

    Specifies the port that the database engine was listening on at the time of the snapshot.

    ", + "Endpoint$Port": "

    Specifies the port that the database engine is listening on.

    ", + "ReservedDBInstance$Duration": "

    The duration of the reservation in seconds.

    ", + "ReservedDBInstance$DBInstanceCount": "

    The number of reserved DB Instances.

    ", + "ReservedDBInstancesOffering$Duration": "

    The duration of the offering in seconds.

    " + } + }, + "IntegerOptional": { + "base": null, + "refs": { + "CreateDBInstanceMessage$AllocatedStorage": "

    The amount of storage (in gigabytes) to be initially allocated for the database instance.

    MySQL

    Constraints: Must be an integer from 5 to 1024.

    Type: Integer

    Oracle

    Constraints: Must be an integer from 10 to 1024.

    SQL Server

    Constraints: Must be an integer from 200 to 1024 (Standard Edition and Enterprise Edition) or from 30 to 1024 (Express Edition and Web Edition)

    ", + "CreateDBInstanceMessage$BackupRetentionPeriod": "

    The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Default: 1

    Constraints:

    • Must be a value from 0 to 8
    • Cannot be set to 0 if the DB Instance is a master instance with read replicas
    ", + "CreateDBInstanceMessage$Port": "

    The port number on which the database accepts connections.

    MySQL

    Default: 3306

    Valid Values: 1150-65535

    Type: Integer

    Oracle

    Default: 1521

    Valid Values: 1150-65535

    SQL Server

    Default: 1433

    Valid Values: 1150-65535 except for 1434 and 3389.

    ", + "CreateDBInstanceMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB Instance.

    Constraints: Must be an integer greater than 1000.

    ", + "CreateDBInstanceReadReplicaMessage$Port": "

    The port number that the DB Instance uses for connections.

    Default: Inherits from the source DB Instance

    Valid Values: 1150-65535

    ", + "CreateDBInstanceReadReplicaMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB Instance.

    ", + "DBInstance$Iops": "

    Specifies the Provisioned IOPS (I/O operations per second) value.

    ", + "DBSnapshot$Iops": "

    Specifies the Provisioned IOPS (I/O operations per second) value of the DB Instance at the time of the snapshot.

    ", + "DescribeDBEngineVersionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBInstancesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBParameterGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBParametersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBSecurityGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBSnapshotsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBSubnetGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeEngineDefaultParametersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeEventSubscriptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeEventsMessage$Duration": "

    The number of minutes to retrieve events for.

    Default: 60

    ", + "DescribeEventsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeOptionGroupOptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeOptionGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeOrderableDBInstanceOptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeReservedDBInstancesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeReservedDBInstancesOfferingsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "ModifyDBInstanceMessage$AllocatedStorage": "

    The new storage capacity of the RDS instance. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    MySQL

    Default: Uses existing setting

    Valid Values: 5-1024

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    Type: Integer

    Oracle

    Default: Uses existing setting

    Valid Values: 10-1024

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    SQL Server

    Cannot be modified.

    ", + "ModifyDBInstanceMessage$BackupRetentionPeriod": "

    The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Changing this parameter can result in an outage if you change from 0 to a non-zero value or from a non-zero value to 0. These changes are applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously applied as soon as possible.

    Default: Uses existing setting

    Constraints:

    • Must be a value from 0 to 8
    • Cannot be set to 0 if the DB Instance is a master instance with read replicas or if the DB Instance is a read replica
    ", + "ModifyDBInstanceMessage$Iops": "

    The new Provisioned IOPS (I/O operations per second) value for the RDS instance. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    Default: Uses existing setting

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    Type: Integer

    ", + "Option$Port": "

    If required, the port configured for this option to use.

    ", + "OptionConfiguration$Port": "

    The optional port for the option.

    ", + "OptionGroupOption$DefaultPort": "

    If the option requires a port, specifies the default port for the option.

    ", + "PendingModifiedValues$AllocatedStorage": "

    Contains the new AllocatedStorage size for the DB Instance that will be applied or is in progress.

    ", + "PendingModifiedValues$Port": "

    Specifies the pending port for the DB Instance.

    ", + "PendingModifiedValues$BackupRetentionPeriod": "

    Specifies the pending number of days for which automated backups are retained.

    ", + "PendingModifiedValues$Iops": "

    Specifies the new Provisioned IOPS value for the DB Instance that will be applied or is being applied.

    ", + "PromoteReadReplicaMessage$BackupRetentionPeriod": "

    The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Default: 1

    Constraints:

    • Must be a value from 0 to 8
    ", + "PurchaseReservedDBInstancesOfferingMessage$DBInstanceCount": "

    The number of instances to reserve.

    Default: 1

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Port": "

    The port number on which the database accepts connections.

    Default: The same port as the original DB Instance

    Constraints: Value must be 1150-65535

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB Instance.

    Constraints: Must be an integer greater than 1000.

    ", + "RestoreDBInstanceToPointInTimeMessage$Port": "

    The port number on which the database accepts connections.

    Constraints: Value must be 1150-65535

    Default: The same port as the original DB Instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB Instance.

    Constraints: Must be an integer greater than 1000.

    " + } + }, + "InvalidDBInstanceStateFault": { + "base": "

    The specified DB instance is not in the available state.

    ", + "refs": { + } + }, + "InvalidDBParameterGroupStateFault": { + "base": "

    The DB parameter group cannot be deleted because it is in use.

    ", + "refs": { + } + }, + "InvalidDBSecurityGroupStateFault": { + "base": "

    The state of the DB security group does not allow deletion.

    ", + "refs": { + } + }, + "InvalidDBSnapshotStateFault": { + "base": "

    The state of the DB snapshot does not allow deletion.

    ", + "refs": { + } + }, + "InvalidDBSubnetGroupStateFault": { + "base": "

    The DB subnet group cannot be deleted because it is in use.

    ", + "refs": { + } + }, + "InvalidDBSubnetStateFault": { + "base": "

    The DB subnet is not in the available state.

    ", + "refs": { + } + }, + "InvalidEventSubscriptionStateFault": { + "base": "

    This error can occur if someone else is modifying a subscription. You should retry the action.

    ", + "refs": { + } + }, + "InvalidOptionGroupStateFault": { + "base": "

    The option group is not in the available state.

    ", + "refs": { + } + }, + "InvalidRestoreFault": { + "base": "

    Cannot restore from vpc backup to non-vpc DB instance.

    ", + "refs": { + } + }, + "InvalidSubnet": { + "base": "

    The requested subnet is invalid, or multiple subnets were requested that are not all in a common VPC.

    ", + "refs": { + } + }, + "InvalidVPCNetworkStateFault": { + "base": "

    DB subnet group does not cover all Availability Zones after it is created because users' change.

    ", + "refs": { + } + }, + "KeyList": { + "base": null, + "refs": { + "RemoveTagsFromResourceMessage$TagKeys": "

    The tag key (name) of the tag to be removed.

    " + } + }, + "ListTagsForResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBInstanceResult": { + "base": null, + "refs": { + } + }, + "ModifyDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBSubnetGroupResult": { + "base": null, + "refs": { + } + }, + "ModifyEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "ModifyOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyOptionGroupResult": { + "base": null, + "refs": { + } + }, + "Option": { + "base": "

    Option details.

    ", + "refs": { + "OptionsList$member": null + } + }, + "OptionConfiguration": { + "base": "

    A list of all available options

    ", + "refs": { + "OptionConfigurationList$member": null + } + }, + "OptionConfigurationList": { + "base": null, + "refs": { + "ModifyOptionGroupMessage$OptionsToInclude": "

    Options in this list are added to the Option Group or, if already present, the specified configuration is used to update the existing configuration.

    " + } + }, + "OptionGroup": { + "base": "

    ", + "refs": { + "CreateOptionGroupResult$OptionGroup": null, + "ModifyOptionGroupResult$OptionGroup": null, + "OptionGroupsList$member": null + } + }, + "OptionGroupAlreadyExistsFault": { + "base": "

    The option group you are trying to create already exists.

    ", + "refs": { + } + }, + "OptionGroupMembership": { + "base": null, + "refs": { + "DBInstance$OptionGroupMembership": "

    Specifies the name and status of the option group that this instance belongs to.

    " + } + }, + "OptionGroupNotFoundFault": { + "base": "

    The specified option group could not be found.

    ", + "refs": { + } + }, + "OptionGroupOption": { + "base": "

    Available option.

    ", + "refs": { + "OptionGroupOptionsList$member": null + } + }, + "OptionGroupOptionsList": { + "base": "

    List of available options.

    ", + "refs": { + "OptionGroupOptionsMessage$OptionGroupOptions": null + } + }, + "OptionGroupOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "OptionGroupQuotaExceededFault": { + "base": "

    The quota of 20 option groups was exceeded for this AWS account.

    ", + "refs": { + } + }, + "OptionGroups": { + "base": "

    List of option groups.

    ", + "refs": { + } + }, + "OptionGroupsList": { + "base": null, + "refs": { + "OptionGroups$OptionGroupsList": "

    List of option groups.

    " + } + }, + "OptionNamesList": { + "base": null, + "refs": { + "ModifyOptionGroupMessage$OptionsToRemove": "

    Options in this list are removed from the Option Group.

    " + } + }, + "OptionsDependedOn": { + "base": null, + "refs": { + "OptionGroupOption$OptionsDependedOn": "

    List of all options that are prerequisites for this option.

    " + } + }, + "OptionsList": { + "base": null, + "refs": { + "OptionGroup$Options": "

    Indicates what options are available in the option group.

    " + } + }, + "OrderableDBInstanceOption": { + "base": "

    Contains a list of available options for a DB Instance

    This data type is used as a response element in the DescribeOrderableDBInstanceOptions action.

    ", + "refs": { + "OrderableDBInstanceOptionsList$member": null + } + }, + "OrderableDBInstanceOptionsList": { + "base": null, + "refs": { + "OrderableDBInstanceOptionsMessage$OrderableDBInstanceOptions": "

    An OrderableDBInstanceOption structure containing information about orderable options for the DB Instance.

    " + } + }, + "OrderableDBInstanceOptionsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeOrderableDBInstanceOptions action.

    ", + "refs": { + } + }, + "Parameter": { + "base": "

    This data type is used as a request parameter in the ModifyDBParameterGroup and ResetDBParameterGroup actions.

    This data type is used as a response element in the DescribeEngineDefaultParameters and DescribeDBParameters actions.

    ", + "refs": { + "ParametersList$member": null + } + }, + "ParametersList": { + "base": null, + "refs": { + "DBParameterGroupDetails$Parameters": "

    A list of Parameter instances.

    ", + "EngineDefaults$Parameters": "

    Contains a list of engine default parameters.

    ", + "ModifyDBParameterGroupMessage$Parameters": "

    An array of parameter names, values, and the apply method for the parameter update. At least one parameter name, value, and apply method must be supplied; subsequent arguments are optional. A maximum of 20 parameters may be modified in a single request.

    Valid Values (for the application method): immediate | pending-reboot

    You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when DB Instance reboots. ", + "ResetDBParameterGroupMessage$Parameters": "

    An array of parameter names, values, and the apply method for the parameter update. At least one parameter name, value, and apply method must be supplied; subsequent arguments are optional. A maximum of 20 parameters may be modified in a single request.

    MySQL

    Valid Values (for Apply method): immediate | pending-reboot

    You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when DB Instance reboots.

    Oracle

    Valid Values (for Apply method): pending-reboot

    " + } + }, + "PendingModifiedValues": { + "base": "

    This data type is used as a response element in the ModifyDBInstance action.

    ", + "refs": { + "DBInstance$PendingModifiedValues": "

    Specifies that changes to the DB Instance are pending. This element is only included when changes are pending. Specific changes are identified by subelements.

    " + } + }, + "PointInTimeRestoreNotEnabledFault": { + "base": "

    SourceDBInstanceIdentifier refers to a DB instance with BackupRetentionPeriod equal to 0.

    ", + "refs": { + } + }, + "PromoteReadReplicaMessage": { + "base": "

    ", + "refs": { + } + }, + "PromoteReadReplicaResult": { + "base": null, + "refs": { + } + }, + "ProvisionedIopsNotAvailableInAZFault": { + "base": "

    Provisioned IOPS not available in the specified Availability Zone.

    ", + "refs": { + } + }, + "PurchaseReservedDBInstancesOfferingMessage": { + "base": "

    ", + "refs": { + } + }, + "PurchaseReservedDBInstancesOfferingResult": { + "base": null, + "refs": { + } + }, + "ReadReplicaDBInstanceIdentifierList": { + "base": null, + "refs": { + "DBInstance$ReadReplicaDBInstanceIdentifiers": "

    Contains one or more identifiers of the Read Replicas associated with this DB Instance.

    " + } + }, + "RebootDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "RebootDBInstanceResult": { + "base": null, + "refs": { + } + }, + "RecurringCharge": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstances and DescribeReservedDBInstancesOfferings actions.

    ", + "refs": { + "RecurringChargeList$member": null + } + }, + "RecurringChargeList": { + "base": null, + "refs": { + "ReservedDBInstance$RecurringCharges": "

    The recurring price charged to run this reserved DB Instance.

    ", + "ReservedDBInstancesOffering$RecurringCharges": "

    The recurring price charged to run this reserved DB Instance.

    " + } + }, + "RemoveSourceIdentifierFromSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "RemoveSourceIdentifierFromSubscriptionResult": { + "base": null, + "refs": { + } + }, + "RemoveTagsFromResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "ReservedDBInstance": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstances and PurchaseReservedDBInstancesOffering actions.

    ", + "refs": { + "PurchaseReservedDBInstancesOfferingResult$ReservedDBInstance": null, + "ReservedDBInstanceList$member": null + } + }, + "ReservedDBInstanceAlreadyExistsFault": { + "base": "

    User already has a reservation with the given identifier.

    ", + "refs": { + } + }, + "ReservedDBInstanceList": { + "base": null, + "refs": { + "ReservedDBInstanceMessage$ReservedDBInstances": "

    A list of of reserved DB Instances.

    " + } + }, + "ReservedDBInstanceMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeReservedDBInstances action.

    ", + "refs": { + } + }, + "ReservedDBInstanceNotFoundFault": { + "base": "

    The specified reserved DB Instance not found.

    ", + "refs": { + } + }, + "ReservedDBInstanceQuotaExceededFault": { + "base": "

    Request would exceed the user's DB Instance quota.

    ", + "refs": { + } + }, + "ReservedDBInstancesOffering": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstancesOfferings action.

    ", + "refs": { + "ReservedDBInstancesOfferingList$member": null + } + }, + "ReservedDBInstancesOfferingList": { + "base": null, + "refs": { + "ReservedDBInstancesOfferingMessage$ReservedDBInstancesOfferings": "

    A list of reserved DB Instance offerings.

    " + } + }, + "ReservedDBInstancesOfferingMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeReservedDBInstancesOfferings action.

    ", + "refs": { + } + }, + "ReservedDBInstancesOfferingNotFoundFault": { + "base": "

    Specified offering does not exist.

    ", + "refs": { + } + }, + "ResetDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBInstanceFromDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBInstanceFromDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "RestoreDBInstanceToPointInTimeMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBInstanceToPointInTimeResult": { + "base": null, + "refs": { + } + }, + "RevokeDBSecurityGroupIngressMessage": { + "base": "

    ", + "refs": { + } + }, + "RevokeDBSecurityGroupIngressResult": { + "base": null, + "refs": { + } + }, + "SNSInvalidTopicFault": { + "base": "

    SNS has responded that there is a problem with the SND topic specified.

    ", + "refs": { + } + }, + "SNSNoAuthorizationFault": { + "base": "

    You do not have permission to publish to the SNS topic ARN.

    ", + "refs": { + } + }, + "SNSTopicArnNotFoundFault": { + "base": "

    The SNS topic ARN does not exist.

    ", + "refs": { + } + }, + "SnapshotQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB snapshots.

    ", + "refs": { + } + }, + "SourceIdsList": { + "base": null, + "refs": { + "CreateEventSubscriptionMessage$SourceIds": "

    The list of identifiers of the event sources for which events will be returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it cannot end with a hyphen or contain two consecutive hyphens.

    Constraints:

    • If SourceIds are supplied, SourceType must also be provided.
    • If the source type is a DB instance, then a DBInstanceIdentifier must be supplied.
    • If the source type is a DB security group, a DBSecurityGroupName must be supplied.
    • If the source type is a DB parameter group, a DBParameterGroupName must be supplied.
    • If the source type is a DB Snapshot, a DBSnapshotIdentifier must be supplied.
    ", + "EventSubscription$SourceIdsList": "

    A list of source Ids for the RDS event notification subscription.

    " + } + }, + "SourceNotFoundFault": { + "base": "

    The requested source could not be found.

    ", + "refs": { + } + }, + "SourceType": { + "base": null, + "refs": { + "DescribeEventsMessage$SourceType": "

    The event source to retrieve events for. If no value is specified, all events are returned.

    ", + "Event$SourceType": "

    Specifies the source type for this event.

    " + } + }, + "StorageQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed amount of storage available across all DB instances.

    ", + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "AddSourceIdentifierToSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to add a source identifier to.

    ", + "AddSourceIdentifierToSubscriptionMessage$SourceIdentifier": "

    The identifier of the event source to be added. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it cannot end with a hyphen or contain two consecutive hyphens.

    Constraints:

    • If the source type is a DB instance, then a DBInstanceIdentifier must be supplied.
    • If the source type is a DB security group, a DBSecurityGroupName must be supplied.
    • If the source type is a DB parameter group, a DBParameterGroupName must be supplied.
    • If the source type is a DB Snapshot, a DBSnapshotIdentifier must be supplied.
    ", + "AddTagsToResourceMessage$ResourceName": "

    The DB Instance the tags will be added to.

    ", + "AuthorizeDBSecurityGroupIngressMessage$DBSecurityGroupName": "

    The name of the DB Security Group to add authorization to.

    ", + "AuthorizeDBSecurityGroupIngressMessage$CIDRIP": "

    The IP range to authorize.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupName": "

    Name of the EC2 Security Group to authorize. For VPC DB Security Groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupId": "

    Id of the EC2 Security Group to authorize. For VPC DB Security Groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "

    AWS Account Number of the owner of the EC2 Security Group specified in the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value. For VPC DB Security Groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AvailabilityZone$Name": "

    The name of the availability zone.

    ", + "CharacterSet$CharacterSetName": "

    The name of the character set.

    ", + "CharacterSet$CharacterSetDescription": "

    The description of the character set.

    ", + "CopyDBSnapshotMessage$SourceDBSnapshotIdentifier": "

    The identifier for the source DB snapshot.

    Constraints:

    • Must be the identifier for a valid system snapshot in the \"available\" state.

    Example: rds:mydb-2012-04-02-00-01

    ", + "CopyDBSnapshotMessage$TargetDBSnapshotIdentifier": "

    The identifier for the copied snapshot.

    Constraints:

    • Cannot be null, empty, or blank
    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-db-snapshot

    ", + "CreateDBInstanceMessage$DBName": "

    The meaning of this parameter differs according to the database engine you use.

    MySQL

    The name of the database to create when the DB Instance is created. If this parameter is not specified, no database is created in the DB Instance.

    Constraints:

    • Must contain 1 to 64 alphanumeric characters
    • Cannot be a word reserved by the specified database engine

    Type: String

    Oracle

    The Oracle System ID (SID) of the created DB Instance.

    Default: ORCL

    Constraints:

    • Cannot be longer than 8 characters

    SQL Server

    Not applicable. Must be null.

    ", + "CreateDBInstanceMessage$DBInstanceIdentifier": "

    The DB Instance identifier. This parameter is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 for SQL Server).
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.

    Example: mydbinstance

    ", + "CreateDBInstanceMessage$DBInstanceClass": "

    The compute and memory capacity of the DB Instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge

    ", + "CreateDBInstanceMessage$Engine": "

    The name of the database engine to be used for this instance.

    Valid Values: MySQL | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee | sqlserver-se | sqlserver-ex | sqlserver-web

    ", + "CreateDBInstanceMessage$MasterUsername": "

    The name of master user for the client DB Instance.

    MySQL

    Constraints:

    • Must be 1 to 16 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.

    Type: String

    Oracle

    Constraints:

    • Must be 1 to 30 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.

    SQL Server

    Constraints:

    • Must be 1 to 128 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.
    ", + "CreateDBInstanceMessage$MasterUserPassword": "

    The password for the master database user. Can be any printable ASCII character except \"/\", \"\\\", or \"@\".

    Type: String

    MySQL

    Constraints: Must contain from 8 to 41 alphanumeric characters.

    Oracle

    Constraints: Must contain from 8 to 30 alphanumeric characters.

    SQL Server

    Constraints: Must contain from 8 to 128 alphanumeric characters.

    ", + "CreateDBInstanceMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in.

    Default: A random, system-chosen Availability Zone in the endpoint's region.

    Example: us-east-1d

    Constraint: The AvailabilityZone parameter cannot be specified if the MultiAZ parameter is set to true. The specified Availability Zone must be in the same region as the current endpoint.

    ", + "CreateDBInstanceMessage$DBSubnetGroupName": "

    A DB Subnet Group to associate with this DB Instance.

    If there is no DB Subnet Group, then it is a non-VPC DB instance.

    ", + "CreateDBInstanceMessage$PreferredMaintenanceWindow": "

    The weekly time range (in UTC) during which system maintenance can occur.

    Format: ddd:hh24:mi-ddd:hh24:mi

    Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. The following list shows the time blocks for each region from which the default maintenance windows are assigned.

    • US-East (Northern Virginia) Region: 03:00-11:00 UTC
    • US-West (Northern California) Region: 06:00-14:00 UTC
    • EU (Ireland) Region: 22:00-06:00 UTC
    • Asia Pacific (Singapore) Region: 14:00-22:00 UTC
    • Asia Pacific (Tokyo) Region: 17:00-03:00 UTC

    Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

    Constraints: Minimum 30-minute window.

    ", + "CreateDBInstanceMessage$DBParameterGroupName": "

    The name of the DB Parameter Group to associate with this DB instance. If this argument is omitted, the default DBParameterGroup for the specified engine will be used.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "CreateDBInstanceMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

    Default: A 30-minute window selected at random from an 8-hour block of time per region. The following list shows the time blocks for each region from which the default backup windows are assigned.

    • US-East (Northern Virginia) Region: 03:00-11:00 UTC
    • US-West (Northern California) Region: 06:00-14:00 UTC
    • EU (Ireland) Region: 22:00-06:00 UTC
    • Asia Pacific (Singapore) Region: 14:00-22:00 UTC
    • Asia Pacific (Tokyo) Region: 17:00-03:00 UTC

    Constraints: Must be in the format hh24:mi-hh24:mi. Times should be Universal Time Coordinated (UTC). Must not conflict with the preferred maintenance window. Must be at least 30 minutes.

    ", + "CreateDBInstanceMessage$EngineVersion": "

    The version number of the database engine to use.

    MySQL

    Example: 5.1.42

    Type: String

    Oracle

    Example: 11.2.0.2.v2

    Type: String

    SQL Server

    Example: 10.50.2789.0.v1

    ", + "CreateDBInstanceMessage$LicenseModel": "

    License model information for this DB Instance.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "CreateDBInstanceMessage$OptionGroupName": "

    Indicates that the DB Instance should be associated with the specified option group.

    ", + "CreateDBInstanceMessage$CharacterSetName": "

    For supported engines, indicates that the DB Instance should be associated with the specified CharacterSet.

    ", + "CreateDBInstanceReadReplicaMessage$DBInstanceIdentifier": "

    The DB Instance identifier of the Read Replica. This is the unique key that identifies a DB Instance. This parameter is stored as a lowercase string.

    ", + "CreateDBInstanceReadReplicaMessage$SourceDBInstanceIdentifier": "

    The identifier of the DB Instance that will act as the source for the Read Replica. Each DB Instance can have up to five Read Replicas.

    Constraints: Must be the identifier of an existing DB Instance that is not already a Read Replica DB Instance.

    ", + "CreateDBInstanceReadReplicaMessage$DBInstanceClass": "

    The compute and memory capacity of the Read Replica.

    Valid Values: db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge

    Default: Inherits from the source DB Instance.

    ", + "CreateDBInstanceReadReplicaMessage$AvailabilityZone": "

    The Amazon EC2 Availability Zone that the Read Replica will be created in.

    Default: A random, system-chosen Availability Zone in the endpoint's region.

    Example: us-east-1d

    ", + "CreateDBInstanceReadReplicaMessage$OptionGroupName": "

    The option group the DB instance will be associated with. If omitted, the default Option Group for the engine specified will be used.

    ", + "CreateDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB Parameter Group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    This value is stored as a lower-case string.", + "CreateDBParameterGroupMessage$DBParameterGroupFamily": "

    The DB Parameter Group Family name. A DB Parameter Group can be associated with one and only one DB Parameter Group Family, and can be applied only to a DB Instance running a database engine and engine version compatible with that DB Parameter Group Family.

    ", + "CreateDBParameterGroupMessage$Description": "

    The description for the DB Parameter Group.

    ", + "CreateDBSecurityGroupMessage$DBSecurityGroupName": "

    The name for the DB Security Group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters or hyphens. Must not be \"Default\".

    Example: mysecuritygroup

    ", + "CreateDBSecurityGroupMessage$DBSecurityGroupDescription": "

    The description for the DB Security Group.

    ", + "CreateDBSnapshotMessage$DBSnapshotIdentifier": "

    The identifier for the DB Snapshot.

    Constraints:

    • Cannot be null, empty, or blank
    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-snapshot-id

    ", + "CreateDBSnapshotMessage$DBInstanceIdentifier": "

    The DB Instance identifier. This is the unique key that identifies a DB Instance. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "CreateDBSubnetGroupMessage$DBSubnetGroupName": "

    The name for the DB Subnet Group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters or hyphens. Must not be \"Default\".

    Example: mySubnetgroup

    ", + "CreateDBSubnetGroupMessage$DBSubnetGroupDescription": "

    The description for the DB Subnet Group.

    ", + "CreateEventSubscriptionMessage$SubscriptionName": "

    The name of the subscription.

    Constraints: The name must be less than 255 characters.

    ", + "CreateEventSubscriptionMessage$SnsTopicArn": "

    The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.

    ", + "CreateEventSubscriptionMessage$SourceType": "

    The type of source that will be generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "CreateOptionGroupMessage$OptionGroupName": "

    Specifies the name of the option group to be created.

    Constraints:

    • Must be 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: myOptiongroup

    ", + "CreateOptionGroupMessage$EngineName": "

    Specifies the name of the engine that this option group should be associated with.

    ", + "CreateOptionGroupMessage$MajorEngineVersion": "

    Specifies the major version of the engine that this option group should be associated with.

    ", + "CreateOptionGroupMessage$OptionGroupDescription": "

    The description of the option group.

    ", + "DBEngineVersion$Engine": "

    The name of the database engine.

    ", + "DBEngineVersion$EngineVersion": "

    The version number of the database engine.

    ", + "DBEngineVersion$DBParameterGroupFamily": "

    The name of the DBParameterGroupFamily for the database engine.

    ", + "DBEngineVersion$DBEngineDescription": "

    The description of the database engine.

    ", + "DBEngineVersion$DBEngineVersionDescription": "

    The description of the database engine version.

    ", + "DBEngineVersionMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBInstance$DBInstanceIdentifier": "

    Contains a user-supplied database identifier. This is the unique key that identifies a DB Instance.

    ", + "DBInstance$DBInstanceClass": "

    Contains the name of the compute and memory capacity class of the DB Instance.

    ", + "DBInstance$Engine": "

    Provides the name of the database engine to be used for this DB Instance.

    ", + "DBInstance$DBInstanceStatus": "

    Specifies the current state of this database.

    ", + "DBInstance$MasterUsername": "

    Contains the master username for the DB Instance.

    ", + "DBInstance$DBName": "

    The meaning of this parameter differs according to the database engine you use.

    MySQL

    Contains the name of the initial database of this instance that was provided at create time, if one was specified when the DB Instance was created. This same name is returned for the life of the DB Instance.

    Type: String

    Oracle

    Contains the Oracle System ID (SID) of the created DB Instance.

    ", + "DBInstance$PreferredBackupWindow": "

    Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod.

    ", + "DBInstance$AvailabilityZone": "

    Specifies the name of the Availability Zone the DB Instance is located in.

    ", + "DBInstance$PreferredMaintenanceWindow": "

    Specifies the weekly time range (in UTC) during which system maintenance can occur.

    ", + "DBInstance$EngineVersion": "

    Indicates the database engine version.

    ", + "DBInstance$ReadReplicaSourceDBInstanceIdentifier": "

    Contains the identifier of the source DB Instance if this DB Instance is a Read Replica.

    ", + "DBInstance$LicenseModel": "

    License model information for this DB Instance.

    ", + "DBInstance$CharacterSetName": "

    If present, specifies the name of the character set that this instance is associated with.

    ", + "DBInstance$SecondaryAvailabilityZone": "

    If present, specifies the name of the secondary Availability Zone for a DB instance with multi-AZ support.

    ", + "DBInstanceMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DBParameterGroup$DBParameterGroupName": "

    Provides the name of the DB Parameter Group.

    ", + "DBParameterGroup$DBParameterGroupFamily": "

    Provides the name of the DB Parameter Group Family that this DB Parameter Group is compatible with.

    ", + "DBParameterGroup$Description": "

    Provides the customer-specified description for this DB Parameter Group.

    ", + "DBParameterGroupDetails$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBParameterGroupNameMessage$DBParameterGroupName": "

    The name of the DB Parameter Group.

    ", + "DBParameterGroupStatus$DBParameterGroupName": "

    The name of the DP Parameter Group.

    ", + "DBParameterGroupStatus$ParameterApplyStatus": "

    The status of parameter updates.

    ", + "DBParameterGroupsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSecurityGroup$OwnerId": "

    Provides the AWS ID of the owner of a specific DB Security Group.

    ", + "DBSecurityGroup$DBSecurityGroupName": "

    Specifies the name of the DB Security Group.

    ", + "DBSecurityGroup$DBSecurityGroupDescription": "

    Provides the description of the DB Security Group.

    ", + "DBSecurityGroup$VpcId": "

    Provides the VpcId of the DB Security Group.

    ", + "DBSecurityGroupMembership$DBSecurityGroupName": "

    The name of the DB Security Group.

    ", + "DBSecurityGroupMembership$Status": "

    The status of the DB Security Group.

    ", + "DBSecurityGroupMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSecurityGroupNameList$member": null, + "DBSnapshot$DBSnapshotIdentifier": "

    Specifies the identifier for the DB Snapshot.

    ", + "DBSnapshot$DBInstanceIdentifier": "

    Specifies the the DBInstanceIdentifier of the DB Instance this DB Snapshot was created from.

    ", + "DBSnapshot$Engine": "

    Specifies the name of the database engine.

    ", + "DBSnapshot$Status": "

    Specifies the status of this DB Snapshot.

    ", + "DBSnapshot$AvailabilityZone": "

    Specifies the name of the Availability Zone the DB Instance was located in at the time of the DB Snapshot.

    ", + "DBSnapshot$VpcId": "

    Provides the Vpc Id associated with the DB Snapshot.

    ", + "DBSnapshot$MasterUsername": "

    Provides the master username for the DB Instance.

    ", + "DBSnapshot$EngineVersion": "

    Specifies the version of the database engine.

    ", + "DBSnapshot$LicenseModel": "

    License model information for the restored DB Instance.

    ", + "DBSnapshot$SnapshotType": "

    Provides the type of the DB Snapshot.

    ", + "DBSnapshotMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSubnetGroup$DBSubnetGroupName": "

    Specifies the name of the DB Subnet Group.

    ", + "DBSubnetGroup$DBSubnetGroupDescription": "

    Provides the description of the DB Subnet Group.

    ", + "DBSubnetGroup$VpcId": "

    Provides the VpcId of the DB Subnet Group.

    ", + "DBSubnetGroup$SubnetGroupStatus": "

    Provides the status of the DB Subnet Group.

    ", + "DBSubnetGroupMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DeleteDBInstanceMessage$DBInstanceIdentifier": "

    The DB Instance identifier for the DB Instance to be deleted. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DeleteDBInstanceMessage$FinalDBSnapshotIdentifier": "

    The DBSnapshotIdentifier of the new DBSnapshot created when SkipFinalSnapshot is set to false.

    Specifying this parameter and also setting the SkipFinalShapshot parameter to true results in an error.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DeleteDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB Parameter Group.

    Constraints:

    • Must be the name of an existing DB Parameter Group
    • You cannot delete a default DB Parameter Group
    • Cannot be associated with any DB Instances
    ", + "DeleteDBSecurityGroupMessage$DBSecurityGroupName": "

    The name of the DB Security Group to delete.

    You cannot delete the default DB Security Group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DeleteDBSnapshotMessage$DBSnapshotIdentifier": "

    The DBSnapshot identifier.

    Constraints: Must be the name of an existing DB Snapshot in the available state.

    ", + "DeleteDBSubnetGroupMessage$DBSubnetGroupName": "

    The name of the database subnet group to delete.

    You cannot delete the default subnet group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DeleteEventSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to delete.

    ", + "DeleteOptionGroupMessage$OptionGroupName": "

    The name of the option group to be deleted.

    You cannot delete default Option Groups.", + "DescribeDBEngineVersionsMessage$Engine": "

    The database engine to return.

    ", + "DescribeDBEngineVersionsMessage$EngineVersion": "

    The database engine version to return.

    Example: 5.1.49

    ", + "DescribeDBEngineVersionsMessage$DBParameterGroupFamily": "

    The name of a specific DB Parameter Group family to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBEngineVersionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBInstancesMessage$DBInstanceIdentifier": "

    The user-supplied instance identifier. If this parameter is specified, information from only the specific DB Instance is returned. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBInstancesMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBInstances request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DescribeDBParameterGroupsMessage$DBParameterGroupName": "

    The name of a specific DB Parameter Group to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBParameterGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBParameterGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBParametersMessage$DBParameterGroupName": "

    The name of a specific DB Parameter Group to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBParametersMessage$Source": "

    The parameter types to return.

    Default: All parameter types returned

    Valid Values: user | system | engine-default

    ", + "DescribeDBParametersMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSecurityGroupsMessage$DBSecurityGroupName": "

    The name of the DB Security Group to return details for.

    ", + "DescribeDBSecurityGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSecurityGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSnapshotsMessage$DBInstanceIdentifier": "

    A DB Instance Identifier to retrieve the list of DB Snapshots for. Cannot be used in conjunction with DBSnapshotIdentifier. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBSnapshotsMessage$DBSnapshotIdentifier": "

    A specific DB Snapshot Identifier to describe. Cannot be used in conjunction with DBInstanceIdentifier. This value is stored as a lowercase string.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    • If this is the identifier of an automated snapshot, the SnapshotType parameter must also be specified.
    ", + "DescribeDBSnapshotsMessage$SnapshotType": "

    An optional snapshot type for which snapshots will be returned. If not specified, the returned results will include snapshots of all types.

    ", + "DescribeDBSnapshotsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSnapshots request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSubnetGroupsMessage$DBSubnetGroupName": "

    The name of the DB Subnet Group to return details for.

    ", + "DescribeDBSubnetGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSubnetGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEngineDefaultParametersMessage$DBParameterGroupFamily": "

    The name of the DB Parameter Group Family.

    ", + "DescribeEngineDefaultParametersMessage$Marker": "

    An optional pagination token provided by a previous DescribeEngineDefaultParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEventCategoriesMessage$SourceType": "

    The type of source that will be generating the events.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "DescribeEventSubscriptionsMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to describe.

    ", + "DescribeEventSubscriptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DescribeEventsMessage$SourceIdentifier": "

    The identifier of the event source for which events will be returned. If not specified, then all sources are included in the response.

    Constraints:

    • If SourceIdentifier is supplied, SourceType must also be provided.
    • If the source type is DBInstance, then a DBInstanceIdentifier must be supplied.
    • If the source type is DBSecurityGroup, a DBSecurityGroupName must be supplied.
    • If the source type is DBParameterGroup, a DBParameterGroupName must be supplied.
    • If the source type is DBSnapshot, a DBSnapshotIdentifier must be supplied.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    ", + "DescribeEventsMessage$Marker": "

    An optional pagination token provided by a previous DescribeEvents request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupOptionsMessage$EngineName": "

    Options available for the given DB engine name to be described.

    ", + "DescribeOptionGroupOptionsMessage$MajorEngineVersion": "

    If specified, filters the results to include only options for the specified major engine version.

    ", + "DescribeOptionGroupOptionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupsMessage$OptionGroupName": "

    The name of the option group to describe. Cannot be supplied together with EngineName or MajorEngineVersion.

    ", + "DescribeOptionGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOptionGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupsMessage$EngineName": "

    Filters the list of option groups to only include groups associated with a specific database engine.

    ", + "DescribeOptionGroupsMessage$MajorEngineVersion": "

    Filters the list of option groups to only include groups associated with a specific database engine version. If specified, then EngineName must also be specified.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Engine": "

    The name of the engine to retrieve DB Instance options for.

    ", + "DescribeOrderableDBInstanceOptionsMessage$EngineVersion": "

    The engine version filter value. Specify this parameter to show only the available offerings matching the specified engine version.

    ", + "DescribeOrderableDBInstanceOptionsMessage$DBInstanceClass": "

    The DB Instance class filter value. Specify this parameter to show only the available offerings matching the specified DB Instance class.

    ", + "DescribeOrderableDBInstanceOptionsMessage$LicenseModel": "

    The license model filter value. Specify this parameter to show only the available offerings matching the specified license model.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DescribeReservedDBInstancesMessage$ReservedDBInstanceId": "

    The reserved DB Instance identifier filter value. Specify this parameter to show only the reservation that matches the specified reservation ID.

    ", + "DescribeReservedDBInstancesMessage$ReservedDBInstancesOfferingId": "

    The offering identifier filter value. Specify this parameter to show only purchased reservations matching the specified offering identifier.

    ", + "DescribeReservedDBInstancesMessage$DBInstanceClass": "

    The DB Instance class filter value. Specify this parameter to show only those reservations matching the specified DB Instances class.

    ", + "DescribeReservedDBInstancesMessage$Duration": "

    The duration filter value, specified in years or seconds. Specify this parameter to show only reservations for this duration.

    Valid Values: 1 | 3 | 31536000 | 94608000

    ", + "DescribeReservedDBInstancesMessage$ProductDescription": "

    The product description filter value. Specify this parameter to show only those reservations matching the specified product description.

    ", + "DescribeReservedDBInstancesMessage$OfferingType": "

    The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type.

    Valid Values: \"Light Utilization\" | \"Medium Utilization\" | \"Heavy Utilization\"

    ", + "DescribeReservedDBInstancesMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeReservedDBInstancesOfferingsMessage$ReservedDBInstancesOfferingId": "

    The offering identifier filter value. Specify this parameter to show only the available offering that matches the specified reservation identifier.

    Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706

    ", + "DescribeReservedDBInstancesOfferingsMessage$DBInstanceClass": "

    The DB Instance class filter value. Specify this parameter to show only the available offerings matching the specified DB Instance class.

    ", + "DescribeReservedDBInstancesOfferingsMessage$Duration": "

    Duration filter value, specified in years or seconds. Specify this parameter to show only reservations for this duration.

    Valid Values: 1 | 3 | 31536000 | 94608000

    ", + "DescribeReservedDBInstancesOfferingsMessage$ProductDescription": "

    Product description filter value. Specify this parameter to show only the available offerings matching the specified product description.

    ", + "DescribeReservedDBInstancesOfferingsMessage$OfferingType": "

    The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type.

    Valid Values: \"Light Utilization\" | \"Medium Utilization\" | \"Heavy Utilization\"

    ", + "DescribeReservedDBInstancesOfferingsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "EC2SecurityGroup$Status": "

    Provides the status of the EC2 security group. Status can be \"authorizing\", \"authorized\", \"revoking\", and \"revoked\".

    ", + "EC2SecurityGroup$EC2SecurityGroupName": "

    Specifies the name of the EC2 Security Group.

    ", + "EC2SecurityGroup$EC2SecurityGroupId": "

    Specifies the id of the EC2 Security Group.

    ", + "EC2SecurityGroup$EC2SecurityGroupOwnerId": "

    Specifies the AWS ID of the owner of the EC2 security group specified in the EC2SecurityGroupName field.

    ", + "Endpoint$Address": "

    Specifies the DNS address of the DB Instance.

    ", + "EngineDefaults$DBParameterGroupFamily": "

    Specifies the name of the DB Parameter Group Family which the engine default parameters apply to.

    ", + "EngineDefaults$Marker": "

    An optional pagination token provided by a previous EngineDefaults request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "Event$SourceIdentifier": "

    Provides the identifier for the source of the event.

    ", + "Event$Message": "

    Provides the text of this event.

    ", + "EventCategoriesList$member": null, + "EventCategoriesMap$SourceType": "

    The source type that the returned categories belong to

    ", + "EventSubscription$Id": "

    Not used.

    ", + "EventSubscription$CustomerAwsId": "

    The AWS customer account associated with the RDS event notification subscription.

    ", + "EventSubscription$CustSubscriptionId": "

    The RDS event notification subscription Id.

    ", + "EventSubscription$SnsTopicArn": "

    The topic ARN of the RDS event notification subscription.

    ", + "EventSubscription$Status": "

    The status of the RDS event notification subscription.

    Constraints:

    Can be one of the following: creating | modifying | deleting | active | no-permission | topic-not-exist

    The status \"no-permission\" indicates that RDS no longer has permission to post to the SNS topic. The status \"topic-not-exist\" indicates that the topic was deleted after the subscription was created.

    ", + "EventSubscription$SubscriptionCreationTime": "

    The time the RDS event notification subscription was created.

    ", + "EventSubscription$SourceType": "

    The source type for the RDS event notification subscription.

    ", + "EventSubscriptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "EventsMessage$Marker": "

    An optional pagination token provided by a previous Events request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "IPRange$Status": "

    Specifies the status of the IP range. Status can be \"authorizing\", \"authorized\", \"revoking\", and \"revoked\".

    ", + "IPRange$CIDRIP": "

    Specifies the IP range.

    ", + "KeyList$member": null, + "ListTagsForResourceMessage$ResourceName": "

    The DB Instance with tags to be listed.

    ", + "ModifyDBInstanceMessage$DBInstanceIdentifier": "

    The DB Instance identifier. This value is stored as a lowercase string.

    Constraints:

    • Must be the identifier for an existing DB Instance
    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "ModifyDBInstanceMessage$DBInstanceClass": "

    The new compute and memory capacity of the DB Instance. To determine the instance classes that are available for a particular DB engine, use the DescribeOrderableDBInstanceOptions action.

    Passing a value for this parameter causes an outage during the change and is applied during the next maintenance window, unless the ApplyImmediately parameter is specified as true for this request.

    Default: Uses existing setting

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge | db.m2.2xlarge | db.m2.4xlarge

    ", + "ModifyDBInstanceMessage$MasterUserPassword": "

    The new password for the DB Instance master user. Can be any printable ASCII character except \"/\", \"\\\", or \"@\".

    Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response.

    Default: Uses existing setting

    Constraints: Must be 8 to 41 alphanumeric characters (MySQL), 8 to 30 alphanumeric characters (Oracle), or 8 to 128 alphanumeric characters (SQL Server).

    Amazon RDS API actions never return the password, so this action provides a way to regain access to a master instance user if the password is lost. ", + "ModifyDBInstanceMessage$DBParameterGroupName": "

    The name of the DB Parameter Group to apply to this DB Instance. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    Default: Uses existing setting

    Constraints: The DB Parameter Group must be in the same DB Parameter Group family as this DB Instance.

    ", + "ModifyDBInstanceMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints:

    • Must be in the format hh24:mi-hh24:mi
    • Times should be Universal Time Coordinated (UTC)
    • Must not conflict with the preferred maintenance window
    • Must be at least 30 minutes
    ", + "ModifyDBInstanceMessage$PreferredMaintenanceWindow": "

    The weekly time range (in UTC) during which system maintenance can occur, which may result in an outage. Changing this parameter does not result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If there are pending actions that cause a reboot, and the maintenance window is changed to include the current time, then changing this parameter will cause a reboot of the DB Instance. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied.

    Default: Uses existing setting

    Format: ddd:hh24:mi-ddd:hh24:mi

    Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun

    Constraints: Must be at least 30 minutes

    ", + "ModifyDBInstanceMessage$EngineVersion": "

    The version number of the database engine to upgrade to. Changing this parameter results in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    For major version upgrades, if a nondefault DB Parameter Group is currently in use, a new DB Parameter Group in the DB Parameter Group Family for the new engine version must be specified. The new DB Parameter Group can be the default for that DB Parameter Group Family.

    Example: 5.1.42

    ", + "ModifyDBInstanceMessage$OptionGroupName": "

    Indicates that the DB Instance should be associated with the specified option group. Changing this parameter does not result in an outage except in the following case and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted.

    ", + "ModifyDBInstanceMessage$NewDBInstanceIdentifier": "

    The new DB Instance identifier for the DB Instance when renaming a DB Instance. This value is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "ModifyDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB Parameter Group.

    Constraints:

    • Must be the name of an existing DB Parameter Group
    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "ModifyDBSubnetGroupMessage$DBSubnetGroupName": "

    The name for the DB Subnet Group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters or hyphens. Must not be \"Default\".

    Example: mySubnetgroup

    ", + "ModifyDBSubnetGroupMessage$DBSubnetGroupDescription": "

    The description for the DB Subnet Group.

    ", + "ModifyEventSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription.

    ", + "ModifyEventSubscriptionMessage$SnsTopicArn": "

    The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.

    ", + "ModifyEventSubscriptionMessage$SourceType": "

    The type of source that will be generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "ModifyOptionGroupMessage$OptionGroupName": "

    The name of the option group to be modified.

    ", + "Option$OptionName": "

    The name of the option.

    ", + "Option$OptionDescription": "

    The description of the option.

    ", + "OptionConfiguration$OptionName": "

    The configuration of options to include in a group.

    ", + "OptionGroup$OptionGroupName": "

    Specifies the name of the option group.

    ", + "OptionGroup$OptionGroupDescription": "

    Provides the description of the option group.

    ", + "OptionGroup$EngineName": "

    Engine name that this option group can be applied to.

    ", + "OptionGroup$MajorEngineVersion": "

    Indicates the major engine version associated with this option group.

    ", + "OptionGroup$VpcId": "

    If AllowsVpcAndNonVpcInstanceMemberships is 'false', this field is blank. If AllowsVpcAndNonVpcInstanceMemberships is 'true' and this field is blank, then this option group can be applied to both VPC and non-VPC instances. If this field contains a value, then this option group can only be applied to instances that are in the VPC indicated by this field.

    ", + "OptionGroupMembership$OptionGroupName": "

    The name of the option group that the instance belongs to.

    ", + "OptionGroupMembership$Status": "

    The status of the DB Instance's option group membership (e.g. in-sync, pending, pending-maintenance, applying).

    ", + "OptionGroupOption$Name": "

    The name of the option.

    ", + "OptionGroupOption$Description": "

    The description of the option.

    ", + "OptionGroupOption$EngineName": "

    Engine name that this option can be applied to.

    ", + "OptionGroupOption$MajorEngineVersion": "

    Indicates the major engine version that the option is available for.

    ", + "OptionGroupOption$MinimumRequiredMinorEngineVersion": "

    The minimum required engine version for the option to be applied.

    ", + "OptionGroupOptionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "OptionGroups$Marker": null, + "OptionNamesList$member": null, + "OptionsDependedOn$member": null, + "OrderableDBInstanceOption$Engine": "

    The engine type of the orderable DB Instance.

    ", + "OrderableDBInstanceOption$EngineVersion": "

    The engine version of the orderable DB Instance.

    ", + "OrderableDBInstanceOption$DBInstanceClass": "

    The DB Instance Class for the orderable DB Instance

    ", + "OrderableDBInstanceOption$LicenseModel": "

    The license model for the orderable DB Instance.

    ", + "OrderableDBInstanceOptionsMessage$Marker": "

    An optional pagination token provided by a previous OrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "Parameter$ParameterName": "

    Specifies the name of the parameter.

    ", + "Parameter$ParameterValue": "

    Specifies the value of the parameter.

    ", + "Parameter$Description": "

    Provides a description of the parameter.

    ", + "Parameter$Source": "

    Indicates the source of the parameter value.

    ", + "Parameter$ApplyType": "

    Specifies the engine specific parameters type.

    ", + "Parameter$DataType": "

    Specifies the valid data type for the parameter.

    ", + "Parameter$AllowedValues": "

    Specifies the valid range of values for the parameter.

    ", + "Parameter$MinimumEngineVersion": "

    The earliest engine version to which the parameter can apply.

    ", + "PendingModifiedValues$DBInstanceClass": "

    Contains the new DBInstanceClass for the DB Instance that will be applied or is in progress.

    ", + "PendingModifiedValues$MasterUserPassword": "

    Contains the pending or in-progress change of the master credentials for the DB Instance.

    ", + "PendingModifiedValues$EngineVersion": "

    Indicates the database engine version.

    ", + "PendingModifiedValues$DBInstanceIdentifier": "

    Contains the new DBInstanceIdentifier for the DB Instance that will be applied or is in progress.

    ", + "PromoteReadReplicaMessage$DBInstanceIdentifier": "

    The DB Instance identifier. This value is stored as a lowercase string.

    Constraints:

    • Must be the identifier for an existing Read Replica DB Instance
    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: mydbinstance

    ", + "PromoteReadReplicaMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

    Default: A 30-minute window selected at random from an 8-hour block of time per region. The following list shows the time blocks for each region from which the default backup windows are assigned.

    • US-East (Northern Virginia) Region: 03:00-11:00 UTC
    • US-West (Northern California) Region: 06:00-14:00 UTC
    • EU (Ireland) Region: 22:00-06:00 UTC
    • Asia Pacific (Singapore) Region: 14:00-22:00 UTC
    • Asia Pacific (Tokyo) Region: 17:00-03:00 UTC

    Constraints: Must be in the format hh24:mi-hh24:mi. Times should be Universal Time Coordinated (UTC). Must not conflict with the preferred maintenance window. Must be at least 30 minutes.

    ", + "PurchaseReservedDBInstancesOfferingMessage$ReservedDBInstancesOfferingId": "

    The ID of the Reserved DB Instance offering to purchase.

    Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706

    ", + "PurchaseReservedDBInstancesOfferingMessage$ReservedDBInstanceId": "

    Customer-specified identifier to track this reservation.

    Example: myreservationID

    ", + "ReadReplicaDBInstanceIdentifierList$member": null, + "RebootDBInstanceMessage$DBInstanceIdentifier": "

    The DB Instance identifier. This parameter is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RecurringCharge$RecurringChargeFrequency": "

    The frequency of the recurring charge.

    ", + "RemoveSourceIdentifierFromSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to remove a source identifier from.

    ", + "RemoveSourceIdentifierFromSubscriptionMessage$SourceIdentifier": "

    The source identifier to be removed from the subscription, such as the DB instance identifier for a DB instance or the name of a security group.

    ", + "RemoveTagsFromResourceMessage$ResourceName": "

    The DB Instance the tags will be removed from.

    ", + "ReservedDBInstance$ReservedDBInstanceId": "

    The unique identifier for the reservation.

    ", + "ReservedDBInstance$ReservedDBInstancesOfferingId": "

    The offering identifier.

    ", + "ReservedDBInstance$DBInstanceClass": "

    The DB instance class for the reserved DB Instance.

    ", + "ReservedDBInstance$CurrencyCode": "

    The currency code for the reserved DB Instance.

    ", + "ReservedDBInstance$ProductDescription": "

    The description of the reserved DB Instance.

    ", + "ReservedDBInstance$OfferingType": "

    The offering type of this reserved DB Instance.

    ", + "ReservedDBInstance$State": "

    The state of the reserved DB Instance.

    ", + "ReservedDBInstanceMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "ReservedDBInstancesOffering$ReservedDBInstancesOfferingId": "

    The offering identifier.

    ", + "ReservedDBInstancesOffering$DBInstanceClass": "

    The DB instance class for the reserved DB Instance.

    ", + "ReservedDBInstancesOffering$CurrencyCode": "

    The currency code for the reserved DB Instance offering.

    ", + "ReservedDBInstancesOffering$ProductDescription": "

    The database engine used by the offering.

    ", + "ReservedDBInstancesOffering$OfferingType": "

    The offering type.

    ", + "ReservedDBInstancesOfferingMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "ResetDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB Parameter Group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBInstanceIdentifier": "

    The identifier for the DB Snapshot to restore from.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBSnapshotIdentifier": "

    Name of the DB Instance to create from the DB Snapshot. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-snapshot-id

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBInstanceClass": "

    The compute and memory capacity of the Amazon RDS DB instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge

    ", + "RestoreDBInstanceFromDBSnapshotMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in.

    Default: A random, system-chosen Availability Zone.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    Example: us-east-1a

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBSubnetGroupName": "

    The DB Subnet Group name to use for the new instance.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$LicenseModel": "

    License model information for the restored DB Instance.

    Default: Same as source.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBName": "

    The database name for the restored DB Instance.

    This parameter doesn't apply to the MySQL engine.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Engine": "

    The database engine to use for the new instance.

    Default: The same as source

    Constraint: Must be compatible with the engine of the source

    Example: oracle-ee

    ", + "RestoreDBInstanceFromDBSnapshotMessage$OptionGroupName": null, + "RestoreDBInstanceToPointInTimeMessage$SourceDBInstanceIdentifier": "

    The identifier of the source DB Instance from which to restore.

    Constraints:

    • Must be the identifier of an existing database instance
    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceToPointInTimeMessage$TargetDBInstanceIdentifier": "

    The name of the new database instance to be created.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceToPointInTimeMessage$DBInstanceClass": "

    The compute and memory capacity of the Amazon RDS DB instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge

    Default: The same DBInstanceClass as the original DB Instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in.

    Default: A random, system-chosen Availability Zone.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    Example: us-east-1a

    ", + "RestoreDBInstanceToPointInTimeMessage$DBSubnetGroupName": "

    The DB subnet group name to use for the new instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$LicenseModel": "

    License model information for the restored DB Instance.

    Default: Same as source.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "RestoreDBInstanceToPointInTimeMessage$DBName": "

    The database name for the restored DB Instance.

    This parameter is not used for the MySQL engine.

    ", + "RestoreDBInstanceToPointInTimeMessage$Engine": "

    The database engine to use for the new instance.

    Default: The same as source

    Constraint: Must be compatible with the engine of the source

    Example: oracle-ee

    ", + "RestoreDBInstanceToPointInTimeMessage$OptionGroupName": null, + "RevokeDBSecurityGroupIngressMessage$DBSecurityGroupName": "

    The name of the DB Security Group to revoke ingress from.

    ", + "RevokeDBSecurityGroupIngressMessage$CIDRIP": "

    The IP range to revoke access from. Must be a valid CIDR range. If CIDRIP is specified, EC2SecurityGroupName, EC2SecurityGroupId and EC2SecurityGroupOwnerId cannot be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupName": "

    The name of the EC2 Security Group to revoke access from. For VPC DB Security Groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupId": "

    The id of the EC2 Security Group to revoke access from. For VPC DB Security Groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "

    The AWS Account Number of the owner of the EC2 security group specified in the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value. For VPC DB Security Groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "SourceIdsList$member": null, + "Subnet$SubnetIdentifier": "

    Specifies the identifier of the subnet.

    ", + "Subnet$SubnetStatus": "

    Specifies the status of the subnet.

    ", + "SubnetIdentifierList$member": null, + "Tag$Key": "

    A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and cannot be prefixed with \"aws:\". The string may only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

    ", + "Tag$Value": "

    A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and cannot be prefixed with \"aws:\". The string may only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

    ", + "VpcSecurityGroupIdList$member": null, + "VpcSecurityGroupMembership$VpcSecurityGroupId": "

    The name of the VPC security group.

    ", + "VpcSecurityGroupMembership$Status": "

    The status of the VPC Security Group.

    " + } + }, + "Subnet": { + "base": "

    This data type is used as a response element in the DescribeDBSubnetGroups action.

    ", + "refs": { + "SubnetList$member": null + } + }, + "SubnetAlreadyInUse": { + "base": "

    The DB subnet is already in use in the Availability Zone.

    ", + "refs": { + } + }, + "SubnetIdentifierList": { + "base": null, + "refs": { + "CreateDBSubnetGroupMessage$SubnetIds": "

    The EC2 Subnet IDs for the DB Subnet Group.

    ", + "ModifyDBSubnetGroupMessage$SubnetIds": "

    The EC2 Subnet IDs for the DB Subnet Group.

    " + } + }, + "SubnetList": { + "base": null, + "refs": { + "DBSubnetGroup$Subnets": "

    Contains a list of Subnet elements.

    " + } + }, + "SubscriptionAlreadyExistFault": { + "base": "

    The supplied subscription name already exists.

    ", + "refs": { + } + }, + "SubscriptionCategoryNotFoundFault": { + "base": "

    The supplied category does not exist.

    ", + "refs": { + } + }, + "SubscriptionNotFoundFault": { + "base": "

    The subscription name does not exist.

    ", + "refs": { + } + }, + "SupportedCharacterSetsList": { + "base": null, + "refs": { + "DBEngineVersion$SupportedCharacterSets": "

    A list of the character sets supported by this engine for the CharacterSetName parameter of the CreateDBInstance API.

    " + } + }, + "TStamp": { + "base": null, + "refs": { + "DBInstance$InstanceCreateTime": "

    Provides the date and time the DB Instance was created.

    ", + "DBInstance$LatestRestorableTime": "

    Specifies the latest time to which a database can be restored with point-in-time restore.

    ", + "DBSnapshot$SnapshotCreateTime": "

    Provides the time (UTC) when the snapshot was taken.

    ", + "DBSnapshot$InstanceCreateTime": "

    Specifies the time (UTC) when the snapshot was taken.

    ", + "DescribeEventsMessage$StartTime": "

    The beginning of the time interval to retrieve events for, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

    Example: 2009-07-08T18:00Z

    ", + "DescribeEventsMessage$EndTime": "

    The end of the time interval for which to retrieve events, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

    Example: 2009-07-08T18:00Z

    ", + "Event$Date": "

    Specifies the date and time of the event.

    ", + "ReservedDBInstance$StartTime": "

    The time the reservation started.

    ", + "RestoreDBInstanceToPointInTimeMessage$RestoreTime": "

    The date and time to restore from.

    Valid Values: Value must be a UTC time

    Constraints:

    • Must be before the latest restorable time for the DB Instance
    • Cannot be specified if UseLatestRestorableTime parameter is true

    Example: 2009-09-07T23:45:00Z

    " + } + }, + "Tag": { + "base": "

    Metadata assigned to a DB Instance consisting of a key-value pair.

    ", + "refs": { + "TagList$member": null + } + }, + "TagList": { + "base": "

    A list of tags.

    ", + "refs": { + "AddTagsToResourceMessage$Tags": "

    The tags to be assigned to the DB Instance.

    ", + "TagListMessage$TagList": "

    List of tags returned by the ListTagsForResource operation.

    " + } + }, + "TagListMessage": { + "base": "

    ", + "refs": { + } + }, + "VpcSecurityGroupIdList": { + "base": null, + "refs": { + "CreateDBInstanceMessage$VpcSecurityGroupIds": "

    A list of EC2 VPC Security Groups to associate with this DB Instance.

    Default: The default EC2 VPC Security Group for the DB Subnet group's VPC.

    ", + "ModifyDBInstanceMessage$VpcSecurityGroupIds": "

    A list of EC2 VPC Security Groups to authorize on this DB Instance. This change is asynchronously applied as soon as possible.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "OptionConfiguration$VpcSecurityGroupMemberships": "

    A list of VpcSecurityGroupMemebrship name strings used for this option.

    " + } + }, + "VpcSecurityGroupMembership": { + "base": "

    This data type is used as a response element for queries on VPC security group membership.

    ", + "refs": { + "VpcSecurityGroupMembershipList$member": null + } + }, + "VpcSecurityGroupMembershipList": { + "base": null, + "refs": { + "DBInstance$VpcSecurityGroups": "

    Provides List of VPC security group elements that the DB Instance belongs to.

    ", + "Option$VpcSecurityGroupMemberships": "

    If the Option requires access to a port, then this VPC Security Group allows access to the port.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-01-10/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,97 @@ +{ + "pagination": { + "DescribeDBEngineVersions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBEngineVersions" + }, + "DescribeDBInstances": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBInstances" + }, + "DescribeDBParameterGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBParameterGroups" + }, + "DescribeDBParameters": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Parameters" + }, + "DescribeDBSecurityGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBSecurityGroups" + }, + "DescribeDBSnapshots": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBSnapshots" + }, + "DescribeDBSubnetGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBSubnetGroups" + }, + "DescribeEngineDefaultParameters": { + "input_token": "Marker", + "output_token": "EngineDefaults.Marker", + "limit_key": "MaxRecords", + "result_key": "EngineDefaults.Parameters" + }, + "DescribeEventSubscriptions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "EventSubscriptionsList" + }, + "DescribeEvents": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Events" + }, + "DescribeOptionGroupOptions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "OptionGroupOptions" + }, + "DescribeOptionGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "OptionGroupsList" + }, + "DescribeOrderableDBInstanceOptions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "OrderableDBInstanceOptions" + }, + "DescribeReservedDBInstances": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ReservedDBInstances" + }, + "DescribeReservedDBInstancesOfferings": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ReservedDBInstancesOfferings" + }, + "ListTagsForResource": { + "result_key": "TagList" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,3057 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2013-02-12", + "endpointPrefix":"rds", + "protocol":"query", + "serviceAbbreviation":"Amazon RDS", + "serviceFullName":"Amazon Relational Database Service", + "signatureVersion":"v4", + "xmlNamespace":"http://rds.amazonaws.com/doc/2013-02-12/" + }, + "operations":{ + "AddSourceIdentifierToSubscription":{ + "name":"AddSourceIdentifierToSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddSourceIdentifierToSubscriptionMessage"}, + "output":{ + "shape":"AddSourceIdentifierToSubscriptionResult", + "resultWrapper":"AddSourceIdentifierToSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "AddTagsToResource":{ + "name":"AddTagsToResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsToResourceMessage"}, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "AuthorizeDBSecurityGroupIngress":{ + "name":"AuthorizeDBSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AuthorizeDBSecurityGroupIngressMessage"}, + "output":{ + "shape":"AuthorizeDBSecurityGroupIngressResult", + "resultWrapper":"AuthorizeDBSecurityGroupIngressResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"AuthorizationAlreadyExistsFault"}, + {"shape":"AuthorizationQuotaExceededFault"} + ] + }, + "CopyDBSnapshot":{ + "name":"CopyDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyDBSnapshotMessage"}, + "output":{ + "shape":"CopyDBSnapshotResult", + "resultWrapper":"CopyDBSnapshotResult" + }, + "errors":[ + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "CreateDBInstance":{ + "name":"CreateDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBInstanceMessage"}, + "output":{ + "shape":"CreateDBInstanceResult", + "resultWrapper":"CreateDBInstanceResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "CreateDBInstanceReadReplica":{ + "name":"CreateDBInstanceReadReplica", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBInstanceReadReplicaMessage"}, + "output":{ + "shape":"CreateDBInstanceReadReplicaResult", + "resultWrapper":"CreateDBInstanceReadReplicaResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "CreateDBParameterGroup":{ + "name":"CreateDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBParameterGroupMessage"}, + "output":{ + "shape":"CreateDBParameterGroupResult", + "resultWrapper":"CreateDBParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupQuotaExceededFault"}, + {"shape":"DBParameterGroupAlreadyExistsFault"} + ] + }, + "CreateDBSecurityGroup":{ + "name":"CreateDBSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSecurityGroupMessage"}, + "output":{ + "shape":"CreateDBSecurityGroupResult", + "resultWrapper":"CreateDBSecurityGroupResult" + }, + "errors":[ + {"shape":"DBSecurityGroupAlreadyExistsFault"}, + {"shape":"DBSecurityGroupQuotaExceededFault"}, + {"shape":"DBSecurityGroupNotSupportedFault"} + ] + }, + "CreateDBSnapshot":{ + "name":"CreateDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSnapshotMessage"}, + "output":{ + "shape":"CreateDBSnapshotResult", + "resultWrapper":"CreateDBSnapshotResult" + }, + "errors":[ + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "CreateDBSubnetGroup":{ + "name":"CreateDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSubnetGroupMessage"}, + "output":{ + "shape":"CreateDBSubnetGroupResult", + "resultWrapper":"CreateDBSubnetGroupResult" + }, + "errors":[ + {"shape":"DBSubnetGroupAlreadyExistsFault"}, + {"shape":"DBSubnetGroupQuotaExceededFault"}, + {"shape":"DBSubnetQuotaExceededFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"} + ] + }, + "CreateEventSubscription":{ + "name":"CreateEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateEventSubscriptionMessage"}, + "output":{ + "shape":"CreateEventSubscriptionResult", + "resultWrapper":"CreateEventSubscriptionResult" + }, + "errors":[ + {"shape":"EventSubscriptionQuotaExceededFault"}, + {"shape":"SubscriptionAlreadyExistFault"}, + {"shape":"SNSInvalidTopicFault"}, + {"shape":"SNSNoAuthorizationFault"}, + {"shape":"SNSTopicArnNotFoundFault"}, + {"shape":"SubscriptionCategoryNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "CreateOptionGroup":{ + "name":"CreateOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateOptionGroupMessage"}, + "output":{ + "shape":"CreateOptionGroupResult", + "resultWrapper":"CreateOptionGroupResult" + }, + "errors":[ + {"shape":"OptionGroupAlreadyExistsFault"}, + {"shape":"OptionGroupQuotaExceededFault"} + ] + }, + "DeleteDBInstance":{ + "name":"DeleteDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBInstanceMessage"}, + "output":{ + "shape":"DeleteDBInstanceResult", + "resultWrapper":"DeleteDBInstanceResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "DeleteDBParameterGroup":{ + "name":"DeleteDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBParameterGroupMessage"}, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DeleteDBSecurityGroup":{ + "name":"DeleteDBSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSecurityGroupMessage"}, + "errors":[ + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"DBSecurityGroupNotFoundFault"} + ] + }, + "DeleteDBSnapshot":{ + "name":"DeleteDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSnapshotMessage"}, + "output":{ + "shape":"DeleteDBSnapshotResult", + "resultWrapper":"DeleteDBSnapshotResult" + }, + "errors":[ + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "DeleteDBSubnetGroup":{ + "name":"DeleteDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSubnetGroupMessage"}, + "errors":[ + {"shape":"InvalidDBSubnetGroupStateFault"}, + {"shape":"InvalidDBSubnetStateFault"}, + {"shape":"DBSubnetGroupNotFoundFault"} + ] + }, + "DeleteEventSubscription":{ + "name":"DeleteEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEventSubscriptionMessage"}, + "output":{ + "shape":"DeleteEventSubscriptionResult", + "resultWrapper":"DeleteEventSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"InvalidEventSubscriptionStateFault"} + ] + }, + "DeleteOptionGroup":{ + "name":"DeleteOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteOptionGroupMessage"}, + "errors":[ + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"InvalidOptionGroupStateFault"} + ] + }, + "DescribeDBEngineVersions":{ + "name":"DescribeDBEngineVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBEngineVersionsMessage"}, + "output":{ + "shape":"DBEngineVersionMessage", + "resultWrapper":"DescribeDBEngineVersionsResult" + } + }, + "DescribeDBInstances":{ + "name":"DescribeDBInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBInstancesMessage"}, + "output":{ + "shape":"DBInstanceMessage", + "resultWrapper":"DescribeDBInstancesResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "DescribeDBLogFiles":{ + "name":"DescribeDBLogFiles", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBLogFilesMessage"}, + "output":{ + "shape":"DescribeDBLogFilesResponse", + "resultWrapper":"DescribeDBLogFilesResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "DescribeDBParameterGroups":{ + "name":"DescribeDBParameterGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBParameterGroupsMessage"}, + "output":{ + "shape":"DBParameterGroupsMessage", + "resultWrapper":"DescribeDBParameterGroupsResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DescribeDBParameters":{ + "name":"DescribeDBParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBParametersMessage"}, + "output":{ + "shape":"DBParameterGroupDetails", + "resultWrapper":"DescribeDBParametersResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DescribeDBSecurityGroups":{ + "name":"DescribeDBSecurityGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSecurityGroupsMessage"}, + "output":{ + "shape":"DBSecurityGroupMessage", + "resultWrapper":"DescribeDBSecurityGroupsResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"} + ] + }, + "DescribeDBSnapshots":{ + "name":"DescribeDBSnapshots", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSnapshotsMessage"}, + "output":{ + "shape":"DBSnapshotMessage", + "resultWrapper":"DescribeDBSnapshotsResult" + }, + "errors":[ + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "DescribeDBSubnetGroups":{ + "name":"DescribeDBSubnetGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSubnetGroupsMessage"}, + "output":{ + "shape":"DBSubnetGroupMessage", + "resultWrapper":"DescribeDBSubnetGroupsResult" + }, + "errors":[ + {"shape":"DBSubnetGroupNotFoundFault"} + ] + }, + "DescribeEngineDefaultParameters":{ + "name":"DescribeEngineDefaultParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEngineDefaultParametersMessage"}, + "output":{ + "shape":"DescribeEngineDefaultParametersResult", + "resultWrapper":"DescribeEngineDefaultParametersResult" + } + }, + "DescribeEventCategories":{ + "name":"DescribeEventCategories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventCategoriesMessage"}, + "output":{ + "shape":"EventCategoriesMessage", + "resultWrapper":"DescribeEventCategoriesResult" + } + }, + "DescribeEventSubscriptions":{ + "name":"DescribeEventSubscriptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventSubscriptionsMessage"}, + "output":{ + "shape":"EventSubscriptionsMessage", + "resultWrapper":"DescribeEventSubscriptionsResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"} + ] + }, + "DescribeEvents":{ + "name":"DescribeEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventsMessage"}, + "output":{ + "shape":"EventsMessage", + "resultWrapper":"DescribeEventsResult" + } + }, + "DescribeOptionGroupOptions":{ + "name":"DescribeOptionGroupOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOptionGroupOptionsMessage"}, + "output":{ + "shape":"OptionGroupOptionsMessage", + "resultWrapper":"DescribeOptionGroupOptionsResult" + } + }, + "DescribeOptionGroups":{ + "name":"DescribeOptionGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOptionGroupsMessage"}, + "output":{ + "shape":"OptionGroups", + "resultWrapper":"DescribeOptionGroupsResult" + }, + "errors":[ + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "DescribeOrderableDBInstanceOptions":{ + "name":"DescribeOrderableDBInstanceOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOrderableDBInstanceOptionsMessage"}, + "output":{ + "shape":"OrderableDBInstanceOptionsMessage", + "resultWrapper":"DescribeOrderableDBInstanceOptionsResult" + } + }, + "DescribeReservedDBInstances":{ + "name":"DescribeReservedDBInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedDBInstancesMessage"}, + "output":{ + "shape":"ReservedDBInstanceMessage", + "resultWrapper":"DescribeReservedDBInstancesResult" + }, + "errors":[ + {"shape":"ReservedDBInstanceNotFoundFault"} + ] + }, + "DescribeReservedDBInstancesOfferings":{ + "name":"DescribeReservedDBInstancesOfferings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedDBInstancesOfferingsMessage"}, + "output":{ + "shape":"ReservedDBInstancesOfferingMessage", + "resultWrapper":"DescribeReservedDBInstancesOfferingsResult" + }, + "errors":[ + {"shape":"ReservedDBInstancesOfferingNotFoundFault"} + ] + }, + "DownloadDBLogFilePortion":{ + "name":"DownloadDBLogFilePortion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DownloadDBLogFilePortionMessage"}, + "output":{ + "shape":"DownloadDBLogFilePortionDetails", + "resultWrapper":"DownloadDBLogFilePortionResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBLogFileNotFoundFault"} + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceMessage"}, + "output":{ + "shape":"TagListMessage", + "resultWrapper":"ListTagsForResourceResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "ModifyDBInstance":{ + "name":"ModifyDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBInstanceMessage"}, + "output":{ + "shape":"ModifyDBInstanceResult", + "resultWrapper":"ModifyDBInstanceResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"DBUpgradeDependencyFailureFault"} + ] + }, + "ModifyDBParameterGroup":{ + "name":"ModifyDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBParameterGroupMessage"}, + "output":{ + "shape":"DBParameterGroupNameMessage", + "resultWrapper":"ModifyDBParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"InvalidDBParameterGroupStateFault"} + ] + }, + "ModifyDBSubnetGroup":{ + "name":"ModifyDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBSubnetGroupMessage"}, + "output":{ + "shape":"ModifyDBSubnetGroupResult", + "resultWrapper":"ModifyDBSubnetGroupResult" + }, + "errors":[ + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetQuotaExceededFault"}, + {"shape":"SubnetAlreadyInUse"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"} + ] + }, + "ModifyEventSubscription":{ + "name":"ModifyEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyEventSubscriptionMessage"}, + "output":{ + "shape":"ModifyEventSubscriptionResult", + "resultWrapper":"ModifyEventSubscriptionResult" + }, + "errors":[ + {"shape":"EventSubscriptionQuotaExceededFault"}, + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SNSInvalidTopicFault"}, + {"shape":"SNSNoAuthorizationFault"}, + {"shape":"SNSTopicArnNotFoundFault"}, + {"shape":"SubscriptionCategoryNotFoundFault"} + ] + }, + "ModifyOptionGroup":{ + "name":"ModifyOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyOptionGroupMessage"}, + "output":{ + "shape":"ModifyOptionGroupResult", + "resultWrapper":"ModifyOptionGroupResult" + }, + "errors":[ + {"shape":"InvalidOptionGroupStateFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "PromoteReadReplica":{ + "name":"PromoteReadReplica", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PromoteReadReplicaMessage"}, + "output":{ + "shape":"PromoteReadReplicaResult", + "resultWrapper":"PromoteReadReplicaResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "PurchaseReservedDBInstancesOffering":{ + "name":"PurchaseReservedDBInstancesOffering", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PurchaseReservedDBInstancesOfferingMessage"}, + "output":{ + "shape":"PurchaseReservedDBInstancesOfferingResult", + "resultWrapper":"PurchaseReservedDBInstancesOfferingResult" + }, + "errors":[ + {"shape":"ReservedDBInstancesOfferingNotFoundFault"}, + {"shape":"ReservedDBInstanceAlreadyExistsFault"}, + {"shape":"ReservedDBInstanceQuotaExceededFault"} + ] + }, + "RebootDBInstance":{ + "name":"RebootDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootDBInstanceMessage"}, + "output":{ + "shape":"RebootDBInstanceResult", + "resultWrapper":"RebootDBInstanceResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "RemoveSourceIdentifierFromSubscription":{ + "name":"RemoveSourceIdentifierFromSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveSourceIdentifierFromSubscriptionMessage"}, + "output":{ + "shape":"RemoveSourceIdentifierFromSubscriptionResult", + "resultWrapper":"RemoveSourceIdentifierFromSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "RemoveTagsFromResource":{ + "name":"RemoveTagsFromResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsFromResourceMessage"}, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "ResetDBParameterGroup":{ + "name":"ResetDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetDBParameterGroupMessage"}, + "output":{ + "shape":"DBParameterGroupNameMessage", + "resultWrapper":"ResetDBParameterGroupResult" + }, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "RestoreDBInstanceFromDBSnapshot":{ + "name":"RestoreDBInstanceFromDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreDBInstanceFromDBSnapshotMessage"}, + "output":{ + "shape":"RestoreDBInstanceFromDBSnapshotResult", + "resultWrapper":"RestoreDBInstanceFromDBSnapshotResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "RestoreDBInstanceToPointInTime":{ + "name":"RestoreDBInstanceToPointInTime", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreDBInstanceToPointInTimeMessage"}, + "output":{ + "shape":"RestoreDBInstanceToPointInTimeResult", + "resultWrapper":"RestoreDBInstanceToPointInTimeResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"PointInTimeRestoreNotEnabledFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "RevokeDBSecurityGroupIngress":{ + "name":"RevokeDBSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeDBSecurityGroupIngressMessage"}, + "output":{ + "shape":"RevokeDBSecurityGroupIngressResult", + "resultWrapper":"RevokeDBSecurityGroupIngressResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"AuthorizationNotFoundFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"} + ] + } + }, + "shapes":{ + "AddSourceIdentifierToSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SourceIdentifier" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SourceIdentifier":{"shape":"String"} + } + }, + "AddSourceIdentifierToSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "AddTagsToResourceMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "Tags" + ], + "members":{ + "ResourceName":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "ApplyMethod":{ + "type":"string", + "enum":[ + "immediate", + "pending-reboot" + ] + }, + "AuthorizationAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AuthorizationNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "AuthorizationQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AuthorizeDBSecurityGroupIngressMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "CIDRIP":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "AuthorizeDBSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "AvailabilityZone":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "ProvisionedIopsCapable":{"shape":"Boolean"} + }, + "wrapper":true + }, + "AvailabilityZoneList":{ + "type":"list", + "member":{ + "shape":"AvailabilityZone", + "locationName":"AvailabilityZone" + } + }, + "Boolean":{"type":"boolean"}, + "BooleanOptional":{"type":"boolean"}, + "CharacterSet":{ + "type":"structure", + "members":{ + "CharacterSetName":{"shape":"String"}, + "CharacterSetDescription":{"shape":"String"} + } + }, + "CopyDBSnapshotMessage":{ + "type":"structure", + "required":[ + "SourceDBSnapshotIdentifier", + "TargetDBSnapshotIdentifier" + ], + "members":{ + "SourceDBSnapshotIdentifier":{"shape":"String"}, + "TargetDBSnapshotIdentifier":{"shape":"String"} + } + }, + "CopyDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "CreateDBInstanceMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "AllocatedStorage", + "DBInstanceClass", + "Engine", + "MasterUsername", + "MasterUserPassword" + ], + "members":{ + "DBName":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "DBInstanceClass":{"shape":"String"}, + "Engine":{"shape":"String"}, + "MasterUsername":{"shape":"String"}, + "MasterUserPassword":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "DBParameterGroupName":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "CharacterSetName":{"shape":"String"}, + "PubliclyAccessible":{"shape":"BooleanOptional"} + } + }, + "CreateDBInstanceReadReplicaMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "SourceDBInstanceIdentifier" + ], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "SourceDBInstanceIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "PubliclyAccessible":{"shape":"BooleanOptional"} + } + }, + "CreateDBInstanceReadReplicaResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "CreateDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "CreateDBParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBParameterGroupName", + "DBParameterGroupFamily", + "Description" + ], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"} + } + }, + "CreateDBParameterGroupResult":{ + "type":"structure", + "members":{ + "DBParameterGroup":{"shape":"DBParameterGroup"} + } + }, + "CreateDBSecurityGroupMessage":{ + "type":"structure", + "required":[ + "DBSecurityGroupName", + "DBSecurityGroupDescription" + ], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "DBSecurityGroupDescription":{"shape":"String"} + } + }, + "CreateDBSecurityGroupResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "CreateDBSnapshotMessage":{ + "type":"structure", + "required":[ + "DBSnapshotIdentifier", + "DBInstanceIdentifier" + ], + "members":{ + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"} + } + }, + "CreateDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "CreateDBSubnetGroupMessage":{ + "type":"structure", + "required":[ + "DBSubnetGroupName", + "DBSubnetGroupDescription", + "SubnetIds" + ], + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"} + } + }, + "CreateDBSubnetGroupResult":{ + "type":"structure", + "members":{ + "DBSubnetGroup":{"shape":"DBSubnetGroup"} + } + }, + "CreateEventSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SnsTopicArn" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "SourceIds":{"shape":"SourceIdsList"}, + "Enabled":{"shape":"BooleanOptional"} + } + }, + "CreateEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "CreateOptionGroupMessage":{ + "type":"structure", + "required":[ + "OptionGroupName", + "EngineName", + "MajorEngineVersion", + "OptionGroupDescription" + ], + "members":{ + "OptionGroupName":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "OptionGroupDescription":{"shape":"String"} + } + }, + "CreateOptionGroupResult":{ + "type":"structure", + "members":{ + "OptionGroup":{"shape":"OptionGroup"} + } + }, + "DBEngineVersion":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "DBEngineDescription":{"shape":"String"}, + "DBEngineVersionDescription":{"shape":"String"}, + "DefaultCharacterSet":{"shape":"CharacterSet"}, + "SupportedCharacterSets":{"shape":"SupportedCharacterSetsList"} + } + }, + "DBEngineVersionList":{ + "type":"list", + "member":{ + "shape":"DBEngineVersion", + "locationName":"DBEngineVersion" + } + }, + "DBEngineVersionMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBEngineVersions":{"shape":"DBEngineVersionList"} + } + }, + "DBInstance":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Engine":{"shape":"String"}, + "DBInstanceStatus":{"shape":"String"}, + "MasterUsername":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Endpoint":{"shape":"Endpoint"}, + "AllocatedStorage":{"shape":"Integer"}, + "InstanceCreateTime":{"shape":"TStamp"}, + "PreferredBackupWindow":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"Integer"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupMembershipList"}, + "VpcSecurityGroups":{"shape":"VpcSecurityGroupMembershipList"}, + "DBParameterGroups":{"shape":"DBParameterGroupStatusList"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroup":{"shape":"DBSubnetGroup"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "PendingModifiedValues":{"shape":"PendingModifiedValues"}, + "LatestRestorableTime":{"shape":"TStamp"}, + "MultiAZ":{"shape":"Boolean"}, + "EngineVersion":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"Boolean"}, + "ReadReplicaSourceDBInstanceIdentifier":{"shape":"String"}, + "ReadReplicaDBInstanceIdentifiers":{"shape":"ReadReplicaDBInstanceIdentifierList"}, + "LicenseModel":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupMemberships":{"shape":"OptionGroupMembershipList"}, + "CharacterSetName":{"shape":"String"}, + "SecondaryAvailabilityZone":{"shape":"String"}, + "PubliclyAccessible":{"shape":"Boolean"} + }, + "wrapper":true + }, + "DBInstanceAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBInstanceAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBInstanceList":{ + "type":"list", + "member":{ + "shape":"DBInstance", + "locationName":"DBInstance" + } + }, + "DBInstanceMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBInstances":{"shape":"DBInstanceList"} + } + }, + "DBInstanceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBInstanceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBLogFileNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBLogFileNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroup":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"} + }, + "wrapper":true + }, + "DBParameterGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupDetails":{ + "type":"structure", + "members":{ + "Parameters":{"shape":"ParametersList"}, + "Marker":{"shape":"String"} + } + }, + "DBParameterGroupList":{ + "type":"list", + "member":{ + "shape":"DBParameterGroup", + "locationName":"DBParameterGroup" + } + }, + "DBParameterGroupNameMessage":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"} + } + }, + "DBParameterGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupStatus":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "ParameterApplyStatus":{"shape":"String"} + } + }, + "DBParameterGroupStatusList":{ + "type":"list", + "member":{ + "shape":"DBParameterGroupStatus", + "locationName":"DBParameterGroup" + } + }, + "DBParameterGroupsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBParameterGroups":{"shape":"DBParameterGroupList"} + } + }, + "DBSecurityGroup":{ + "type":"structure", + "members":{ + "OwnerId":{"shape":"String"}, + "DBSecurityGroupName":{"shape":"String"}, + "DBSecurityGroupDescription":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "EC2SecurityGroups":{"shape":"EC2SecurityGroupList"}, + "IPRanges":{"shape":"IPRangeList"} + }, + "wrapper":true + }, + "DBSecurityGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupMembership":{ + "type":"structure", + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "DBSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"DBSecurityGroupMembership", + "locationName":"DBSecurityGroup" + } + }, + "DBSecurityGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroups"} + } + }, + "DBSecurityGroupNameList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"DBSecurityGroupName" + } + }, + "DBSecurityGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupNotSupportedFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupNotSupported", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"QuotaExceeded.DBSecurityGroup", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroups":{ + "type":"list", + "member":{ + "shape":"DBSecurityGroup", + "locationName":"DBSecurityGroup" + } + }, + "DBSnapshot":{ + "type":"structure", + "members":{ + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"}, + "SnapshotCreateTime":{"shape":"TStamp"}, + "Engine":{"shape":"String"}, + "AllocatedStorage":{"shape":"Integer"}, + "Status":{"shape":"String"}, + "Port":{"shape":"Integer"}, + "AvailabilityZone":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "InstanceCreateTime":{"shape":"TStamp"}, + "MasterUsername":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "SnapshotType":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"} + }, + "wrapper":true + }, + "DBSnapshotAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSnapshotAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSnapshotList":{ + "type":"list", + "member":{ + "shape":"DBSnapshot", + "locationName":"DBSnapshot" + } + }, + "DBSnapshotMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSnapshots":{"shape":"DBSnapshotList"} + } + }, + "DBSnapshotNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSnapshotNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroup":{ + "type":"structure", + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "SubnetGroupStatus":{"shape":"String"}, + "Subnets":{"shape":"SubnetList"} + }, + "wrapper":true + }, + "DBSubnetGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupDoesNotCoverEnoughAZs":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupDoesNotCoverEnoughAZs", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSubnetGroups":{"shape":"DBSubnetGroups"} + } + }, + "DBSubnetGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroups":{ + "type":"list", + "member":{ + "shape":"DBSubnetGroup", + "locationName":"DBSubnetGroup" + } + }, + "DBSubnetQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBUpgradeDependencyFailureFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBUpgradeDependencyFailure", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DeleteDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "SkipFinalSnapshot":{"shape":"Boolean"}, + "FinalDBSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "DeleteDBParameterGroupMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"} + } + }, + "DeleteDBSecurityGroupMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"} + } + }, + "DeleteDBSnapshotMessage":{ + "type":"structure", + "required":["DBSnapshotIdentifier"], + "members":{ + "DBSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "DeleteDBSubnetGroupMessage":{ + "type":"structure", + "required":["DBSubnetGroupName"], + "members":{ + "DBSubnetGroupName":{"shape":"String"} + } + }, + "DeleteEventSubscriptionMessage":{ + "type":"structure", + "required":["SubscriptionName"], + "members":{ + "SubscriptionName":{"shape":"String"} + } + }, + "DeleteEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "DeleteOptionGroupMessage":{ + "type":"structure", + "required":["OptionGroupName"], + "members":{ + "OptionGroupName":{"shape":"String"} + } + }, + "DescribeDBEngineVersionsMessage":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "DefaultOnly":{"shape":"Boolean"}, + "ListSupportedCharacterSets":{"shape":"BooleanOptional"} + } + }, + "DescribeDBInstancesMessage":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBLogFilesDetails":{ + "type":"structure", + "members":{ + "LogFileName":{"shape":"String"}, + "LastWritten":{"shape":"Long"}, + "Size":{"shape":"Long"} + } + }, + "DescribeDBLogFilesList":{ + "type":"list", + "member":{ + "shape":"DescribeDBLogFilesDetails", + "locationName":"DescribeDBLogFilesDetails" + } + }, + "DescribeDBLogFilesMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "FilenameContains":{"shape":"String"}, + "FileLastWritten":{"shape":"Long"}, + "FileSize":{"shape":"Long"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBLogFilesResponse":{ + "type":"structure", + "members":{ + "DescribeDBLogFiles":{"shape":"DescribeDBLogFilesList"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBParameterGroupsMessage":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBParametersMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "Source":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBSecurityGroupsMessage":{ + "type":"structure", + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBSnapshotsMessage":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBSnapshotIdentifier":{"shape":"String"}, + "SnapshotType":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBSubnetGroupsMessage":{ + "type":"structure", + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEngineDefaultParametersMessage":{ + "type":"structure", + "required":["DBParameterGroupFamily"], + "members":{ + "DBParameterGroupFamily":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEngineDefaultParametersResult":{ + "type":"structure", + "members":{ + "EngineDefaults":{"shape":"EngineDefaults"} + } + }, + "DescribeEventCategoriesMessage":{ + "type":"structure", + "members":{ + "SourceType":{"shape":"String"} + } + }, + "DescribeEventSubscriptionsMessage":{ + "type":"structure", + "members":{ + "SubscriptionName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEventsMessage":{ + "type":"structure", + "members":{ + "SourceIdentifier":{"shape":"String"}, + "SourceType":{"shape":"SourceType"}, + "StartTime":{"shape":"TStamp"}, + "EndTime":{"shape":"TStamp"}, + "Duration":{"shape":"IntegerOptional"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeOptionGroupOptionsMessage":{ + "type":"structure", + "required":["EngineName"], + "members":{ + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeOptionGroupsMessage":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "Marker":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"} + } + }, + "DescribeOrderableDBInstanceOptionsMessage":{ + "type":"structure", + "required":["Engine"], + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "Vpc":{"shape":"BooleanOptional"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReservedDBInstancesMessage":{ + "type":"structure", + "members":{ + "ReservedDBInstanceId":{"shape":"String"}, + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReservedDBInstancesOfferingsMessage":{ + "type":"structure", + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "Double":{"type":"double"}, + "DownloadDBLogFilePortionDetails":{ + "type":"structure", + "members":{ + "LogFileData":{"shape":"String"}, + "Marker":{"shape":"String"}, + "AdditionalDataPending":{"shape":"Boolean"} + } + }, + "DownloadDBLogFilePortionMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "LogFileName" + ], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "LogFileName":{"shape":"String"}, + "Marker":{"shape":"String"}, + "NumberOfLines":{"shape":"Integer"} + } + }, + "EC2SecurityGroup":{ + "type":"structure", + "members":{ + "Status":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "EC2SecurityGroupList":{ + "type":"list", + "member":{ + "shape":"EC2SecurityGroup", + "locationName":"EC2SecurityGroup" + } + }, + "Endpoint":{ + "type":"structure", + "members":{ + "Address":{"shape":"String"}, + "Port":{"shape":"Integer"} + } + }, + "EngineDefaults":{ + "type":"structure", + "members":{ + "DBParameterGroupFamily":{"shape":"String"}, + "Marker":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"} + }, + "wrapper":true + }, + "Event":{ + "type":"structure", + "members":{ + "SourceIdentifier":{"shape":"String"}, + "SourceType":{"shape":"SourceType"}, + "Message":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Date":{"shape":"TStamp"} + } + }, + "EventCategoriesList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"EventCategory" + } + }, + "EventCategoriesMap":{ + "type":"structure", + "members":{ + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"} + }, + "wrapper":true + }, + "EventCategoriesMapList":{ + "type":"list", + "member":{ + "shape":"EventCategoriesMap", + "locationName":"EventCategoriesMap" + } + }, + "EventCategoriesMessage":{ + "type":"structure", + "members":{ + "EventCategoriesMapList":{"shape":"EventCategoriesMapList"} + } + }, + "EventList":{ + "type":"list", + "member":{ + "shape":"Event", + "locationName":"Event" + } + }, + "EventSubscription":{ + "type":"structure", + "members":{ + "CustomerAwsId":{"shape":"String"}, + "CustSubscriptionId":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "Status":{"shape":"String"}, + "SubscriptionCreationTime":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "SourceIdsList":{"shape":"SourceIdsList"}, + "EventCategoriesList":{"shape":"EventCategoriesList"}, + "Enabled":{"shape":"Boolean"} + }, + "wrapper":true + }, + "EventSubscriptionQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"EventSubscriptionQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "EventSubscriptionsList":{ + "type":"list", + "member":{ + "shape":"EventSubscription", + "locationName":"EventSubscription" + } + }, + "EventSubscriptionsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "EventSubscriptionsList":{"shape":"EventSubscriptionsList"} + } + }, + "EventsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "Events":{"shape":"EventList"} + } + }, + "IPRange":{ + "type":"structure", + "members":{ + "Status":{"shape":"String"}, + "CIDRIP":{"shape":"String"} + } + }, + "IPRangeList":{ + "type":"list", + "member":{ + "shape":"IPRange", + "locationName":"IPRange" + } + }, + "InstanceQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InstanceQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InsufficientDBInstanceCapacityFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InsufficientDBInstanceCapacity", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Integer":{"type":"integer"}, + "IntegerOptional":{"type":"integer"}, + "InvalidDBInstanceStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBInstanceState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBParameterGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBParameterGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSecurityGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSecurityGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSnapshotStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSnapshotState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSubnetGroupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSubnetStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidEventSubscriptionStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidEventSubscriptionState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidOptionGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidOptionGroupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidRestoreFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidRestoreFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSubnet":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidSubnet", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidVPCNetworkStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidVPCNetworkStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "KeyList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ListTagsForResourceMessage":{ + "type":"structure", + "required":["ResourceName"], + "members":{ + "ResourceName":{"shape":"String"} + } + }, + "Long":{"type":"long"}, + "ModifyDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "DBInstanceClass":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "ApplyImmediately":{"shape":"Boolean"}, + "MasterUserPassword":{"shape":"String"}, + "DBParameterGroupName":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "AllowMajorVersionUpgrade":{"shape":"Boolean"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "NewDBInstanceIdentifier":{"shape":"String"} + } + }, + "ModifyDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "ModifyDBParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBParameterGroupName", + "Parameters" + ], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"} + } + }, + "ModifyDBSubnetGroupMessage":{ + "type":"structure", + "required":[ + "DBSubnetGroupName", + "SubnetIds" + ], + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"} + } + }, + "ModifyDBSubnetGroupResult":{ + "type":"structure", + "members":{ + "DBSubnetGroup":{"shape":"DBSubnetGroup"} + } + }, + "ModifyEventSubscriptionMessage":{ + "type":"structure", + "required":["SubscriptionName"], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Enabled":{"shape":"BooleanOptional"} + } + }, + "ModifyEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "ModifyOptionGroupMessage":{ + "type":"structure", + "required":["OptionGroupName"], + "members":{ + "OptionGroupName":{"shape":"String"}, + "OptionsToInclude":{"shape":"OptionConfigurationList"}, + "OptionsToRemove":{"shape":"OptionNamesList"}, + "ApplyImmediately":{"shape":"Boolean"} + } + }, + "ModifyOptionGroupResult":{ + "type":"structure", + "members":{ + "OptionGroup":{"shape":"OptionGroup"} + } + }, + "Option":{ + "type":"structure", + "members":{ + "OptionName":{"shape":"String"}, + "OptionDescription":{"shape":"String"}, + "Persistent":{"shape":"Boolean"}, + "Port":{"shape":"IntegerOptional"}, + "OptionSettings":{"shape":"OptionSettingConfigurationList"}, + "DBSecurityGroupMemberships":{"shape":"DBSecurityGroupMembershipList"}, + "VpcSecurityGroupMemberships":{"shape":"VpcSecurityGroupMembershipList"} + } + }, + "OptionConfiguration":{ + "type":"structure", + "required":["OptionName"], + "members":{ + "OptionName":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "DBSecurityGroupMemberships":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupMemberships":{"shape":"VpcSecurityGroupIdList"}, + "OptionSettings":{"shape":"OptionSettingsList"} + } + }, + "OptionConfigurationList":{ + "type":"list", + "member":{ + "shape":"OptionConfiguration", + "locationName":"OptionConfiguration" + } + }, + "OptionGroup":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "OptionGroupDescription":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "Options":{"shape":"OptionsList"}, + "AllowsVpcAndNonVpcInstanceMemberships":{"shape":"Boolean"}, + "VpcId":{"shape":"String"} + }, + "wrapper":true + }, + "OptionGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "OptionGroupMembership":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "OptionGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"OptionGroupMembership", + "locationName":"OptionGroupMembership" + } + }, + "OptionGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "OptionGroupOption":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Description":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "MinimumRequiredMinorEngineVersion":{"shape":"String"}, + "PortRequired":{"shape":"Boolean"}, + "DefaultPort":{"shape":"IntegerOptional"}, + "OptionsDependedOn":{"shape":"OptionsDependedOn"}, + "Persistent":{"shape":"Boolean"}, + "OptionGroupOptionSettings":{"shape":"OptionGroupOptionSettingsList"} + } + }, + "OptionGroupOptionSetting":{ + "type":"structure", + "members":{ + "SettingName":{"shape":"String"}, + "SettingDescription":{"shape":"String"}, + "DefaultValue":{"shape":"String"}, + "ApplyType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"} + } + }, + "OptionGroupOptionSettingsList":{ + "type":"list", + "member":{ + "shape":"OptionGroupOptionSetting", + "locationName":"OptionGroupOptionSetting" + } + }, + "OptionGroupOptionsList":{ + "type":"list", + "member":{ + "shape":"OptionGroupOption", + "locationName":"OptionGroupOption" + } + }, + "OptionGroupOptionsMessage":{ + "type":"structure", + "members":{ + "OptionGroupOptions":{"shape":"OptionGroupOptionsList"}, + "Marker":{"shape":"String"} + } + }, + "OptionGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "OptionGroups":{ + "type":"structure", + "members":{ + "OptionGroupsList":{"shape":"OptionGroupsList"}, + "Marker":{"shape":"String"} + } + }, + "OptionGroupsList":{ + "type":"list", + "member":{ + "shape":"OptionGroup", + "locationName":"OptionGroup" + } + }, + "OptionNamesList":{ + "type":"list", + "member":{"shape":"String"} + }, + "OptionSetting":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Value":{"shape":"String"}, + "DefaultValue":{"shape":"String"}, + "Description":{"shape":"String"}, + "ApplyType":{"shape":"String"}, + "DataType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"}, + "IsCollection":{"shape":"Boolean"} + } + }, + "OptionSettingConfigurationList":{ + "type":"list", + "member":{ + "shape":"OptionSetting", + "locationName":"OptionSetting" + } + }, + "OptionSettingsList":{ + "type":"list", + "member":{ + "shape":"OptionSetting", + "locationName":"OptionSetting" + } + }, + "OptionsDependedOn":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"OptionName" + } + }, + "OptionsList":{ + "type":"list", + "member":{ + "shape":"Option", + "locationName":"Option" + } + }, + "OrderableDBInstanceOption":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "AvailabilityZones":{"shape":"AvailabilityZoneList"}, + "MultiAZCapable":{"shape":"Boolean"}, + "ReadReplicaCapable":{"shape":"Boolean"}, + "Vpc":{"shape":"Boolean"} + }, + "wrapper":true + }, + "OrderableDBInstanceOptionsList":{ + "type":"list", + "member":{ + "shape":"OrderableDBInstanceOption", + "locationName":"OrderableDBInstanceOption" + } + }, + "OrderableDBInstanceOptionsMessage":{ + "type":"structure", + "members":{ + "OrderableDBInstanceOptions":{"shape":"OrderableDBInstanceOptionsList"}, + "Marker":{"shape":"String"} + } + }, + "Parameter":{ + "type":"structure", + "members":{ + "ParameterName":{"shape":"String"}, + "ParameterValue":{"shape":"String"}, + "Description":{"shape":"String"}, + "Source":{"shape":"String"}, + "ApplyType":{"shape":"String"}, + "DataType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"}, + "MinimumEngineVersion":{"shape":"String"}, + "ApplyMethod":{"shape":"ApplyMethod"} + } + }, + "ParametersList":{ + "type":"list", + "member":{ + "shape":"Parameter", + "locationName":"Parameter" + } + }, + "PendingModifiedValues":{ + "type":"structure", + "members":{ + "DBInstanceClass":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "MasterUserPassword":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "DBInstanceIdentifier":{"shape":"String"} + } + }, + "PointInTimeRestoreNotEnabledFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"PointInTimeRestoreNotEnabled", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "PromoteReadReplicaMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"} + } + }, + "PromoteReadReplicaResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "ProvisionedIopsNotAvailableInAZFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ProvisionedIopsNotAvailableInAZFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "PurchaseReservedDBInstancesOfferingMessage":{ + "type":"structure", + "required":["ReservedDBInstancesOfferingId"], + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "ReservedDBInstanceId":{"shape":"String"}, + "DBInstanceCount":{"shape":"IntegerOptional"} + } + }, + "PurchaseReservedDBInstancesOfferingResult":{ + "type":"structure", + "members":{ + "ReservedDBInstance":{"shape":"ReservedDBInstance"} + } + }, + "ReadReplicaDBInstanceIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ReadReplicaDBInstanceIdentifier" + } + }, + "RebootDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "ForceFailover":{"shape":"BooleanOptional"} + } + }, + "RebootDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RecurringCharge":{ + "type":"structure", + "members":{ + "RecurringChargeAmount":{"shape":"Double"}, + "RecurringChargeFrequency":{"shape":"String"} + }, + "wrapper":true + }, + "RecurringChargeList":{ + "type":"list", + "member":{ + "shape":"RecurringCharge", + "locationName":"RecurringCharge" + } + }, + "RemoveSourceIdentifierFromSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SourceIdentifier" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SourceIdentifier":{"shape":"String"} + } + }, + "RemoveSourceIdentifierFromSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "RemoveTagsFromResourceMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "TagKeys" + ], + "members":{ + "ResourceName":{"shape":"String"}, + "TagKeys":{"shape":"KeyList"} + } + }, + "ReservedDBInstance":{ + "type":"structure", + "members":{ + "ReservedDBInstanceId":{"shape":"String"}, + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "StartTime":{"shape":"TStamp"}, + "Duration":{"shape":"Integer"}, + "FixedPrice":{"shape":"Double"}, + "UsagePrice":{"shape":"Double"}, + "CurrencyCode":{"shape":"String"}, + "DBInstanceCount":{"shape":"Integer"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"Boolean"}, + "State":{"shape":"String"}, + "RecurringCharges":{"shape":"RecurringChargeList"} + }, + "wrapper":true + }, + "ReservedDBInstanceAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceAlreadyExists", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstanceList":{ + "type":"list", + "member":{ + "shape":"ReservedDBInstance", + "locationName":"ReservedDBInstance" + } + }, + "ReservedDBInstanceMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReservedDBInstances":{"shape":"ReservedDBInstanceList"} + } + }, + "ReservedDBInstanceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstanceQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstancesOffering":{ + "type":"structure", + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"Integer"}, + "FixedPrice":{"shape":"Double"}, + "UsagePrice":{"shape":"Double"}, + "CurrencyCode":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"Boolean"}, + "RecurringCharges":{"shape":"RecurringChargeList"} + }, + "wrapper":true + }, + "ReservedDBInstancesOfferingList":{ + "type":"list", + "member":{ + "shape":"ReservedDBInstancesOffering", + "locationName":"ReservedDBInstancesOffering" + } + }, + "ReservedDBInstancesOfferingMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReservedDBInstancesOfferings":{"shape":"ReservedDBInstancesOfferingList"} + } + }, + "ReservedDBInstancesOfferingNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstancesOfferingNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResetDBParameterGroupMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "ResetAllParameters":{"shape":"Boolean"}, + "Parameters":{"shape":"ParametersList"} + } + }, + "RestoreDBInstanceFromDBSnapshotMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "DBSnapshotIdentifier" + ], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Engine":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"} + } + }, + "RestoreDBInstanceFromDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RestoreDBInstanceToPointInTimeMessage":{ + "type":"structure", + "required":[ + "SourceDBInstanceIdentifier", + "TargetDBInstanceIdentifier" + ], + "members":{ + "SourceDBInstanceIdentifier":{"shape":"String"}, + "TargetDBInstanceIdentifier":{"shape":"String"}, + "RestoreTime":{"shape":"TStamp"}, + "UseLatestRestorableTime":{"shape":"Boolean"}, + "DBInstanceClass":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Engine":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"} + } + }, + "RestoreDBInstanceToPointInTimeResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RevokeDBSecurityGroupIngressMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "CIDRIP":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "RevokeDBSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "SNSInvalidTopicFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSInvalidTopic", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SNSNoAuthorizationFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSNoAuthorization", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SNSTopicArnNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSTopicArnNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SnapshotQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SourceIdsList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SourceId" + } + }, + "SourceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SourceType":{ + "type":"string", + "enum":[ + "db-instance", + "db-parameter-group", + "db-security-group", + "db-snapshot" + ] + }, + "StorageQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"StorageQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "String":{"type":"string"}, + "Subnet":{ + "type":"structure", + "members":{ + "SubnetIdentifier":{"shape":"String"}, + "SubnetAvailabilityZone":{"shape":"AvailabilityZone"}, + "SubnetStatus":{"shape":"String"} + } + }, + "SubnetAlreadyInUse":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubnetAlreadyInUse", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubnetIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SubnetIdentifier" + } + }, + "SubnetList":{ + "type":"list", + "member":{ + "shape":"Subnet", + "locationName":"Subnet" + } + }, + "SubscriptionAlreadyExistFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionAlreadyExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubscriptionCategoryNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionCategoryNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SubscriptionNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SupportedCharacterSetsList":{ + "type":"list", + "member":{ + "shape":"CharacterSet", + "locationName":"CharacterSet" + } + }, + "TStamp":{"type":"timestamp"}, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"String"}, + "Value":{"shape":"String"} + } + }, + "TagList":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"Tag" + } + }, + "TagListMessage":{ + "type":"structure", + "members":{ + "TagList":{"shape":"TagList"} + } + }, + "VpcSecurityGroupIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpcSecurityGroupId" + } + }, + "VpcSecurityGroupMembership":{ + "type":"structure", + "members":{ + "VpcSecurityGroupId":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "VpcSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"VpcSecurityGroupMembership", + "locationName":"VpcSecurityGroupMembership" + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1796 @@ +{ + "version": "2.0", + "service": "Amazon Relational Database Service

    Amazon Relational Database Service (Amazon RDS) is a web service that makes it easier to set up, operate, and scale a relational database in the cloud. It provides cost-efficient, resizable capacity for an industry-standard relational database and manages common database administration tasks, freeing up developers to focus on what makes their applications and businesses unique.

    Amazon RDS gives you access to the capabilities of a familiar MySQL or Oracle database server. This means the code, applications, and tools you already use today with your existing MySQL or Oracle databases work with Amazon RDS without modification. Amazon RDS automatically backs up your database and maintains the database software that powers your DB Instance. Amazon RDS is flexible: you can scale your database instance's compute resources and storage capacity to meet your application's demand. As with all Amazon Web Services, there are no up-front investments, and you pay only for the resources you use.

    This is the Amazon RDS API Reference. It contains a comprehensive description of all Amazon RDS Query APIs and data types. Note that this API is asynchronous and some actions may require polling to determine when an action has been applied. See the parameter description to determine if a change is applied immediately or on the next instance reboot or during the maintenance window. For more information on Amazon RDS concepts and usage scenarios, see the Amazon RDS User Guide.

    ", + "operations": { + "AddSourceIdentifierToSubscription": "

    Adds a source identifier to an existing RDS event notification subscription.

    ", + "AddTagsToResource": "

    Adds metadata tags to a DB Instance. These tags can also be used with cost allocation reporting to track cost associated with a DB Instance.

    ", + "AuthorizeDBSecurityGroupIngress": "

    Enables ingress to a DBSecurityGroup using one of two forms of authorization. First, EC2 or VPC Security Groups can be added to the DBSecurityGroup if the application using the database is running on EC2 or VPC instances. Second, IP ranges are available if the application accessing your database is running on the Internet. Required parameters for this API are one of CIDR range, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId for non-VPC).

    You cannot authorize ingress from an EC2 security group in one Region to an Amazon RDS DB Instance in another. You cannot authorize ingress from a VPC security group in one VPC to an Amazon RDS DB Instance in another.

    For an overview of CIDR ranges, go to the Wikipedia Tutorial.

    ", + "CopyDBSnapshot": "

    Copies the specified DBSnapshot. The source DBSnapshot must be in the \"available\" state.

    ", + "CreateDBInstance": "

    Creates a new DB instance.

    ", + "CreateDBInstanceReadReplica": "

    Creates a DB Instance that acts as a Read Replica of a source DB Instance.

    All Read Replica DB Instances are created as Single-AZ deployments with backups disabled. All other DB Instance attributes (including DB Security Groups and DB Parameter Groups) are inherited from the source DB Instance, except as specified below.

    The source DB Instance must have backup retention enabled.

    ", + "CreateDBParameterGroup": "

    Creates a new DB Parameter Group.

    A DB Parameter Group is initially created with the default parameters for the database engine used by the DB Instance. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBParameterGroup. Once you've created a DB Parameter Group, you need to associate it with your DB Instance using ModifyDBInstance. When you associate a new DB Parameter Group with a running DB Instance, you need to reboot the DB Instance for the new DB Parameter Group and associated settings to take effect.

    ", + "CreateDBSecurityGroup": "

    Creates a new DB Security Group. DB Security Groups control access to a DB Instance.

    ", + "CreateDBSnapshot": "

    Creates a DBSnapshot. The source DBInstance must be in \"available\" state.

    ", + "CreateDBSubnetGroup": "

    Creates a new DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the region.

    ", + "CreateEventSubscription": "

    Creates an RDS event notification subscription. This action requires a topic ARN (Amazon Resource Name) created by either the RDS console, the SNS console, or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.

    You can specify the type of source (SourceType) you want to be notified of, provide a list of RDS sources (SourceIds) that triggers the events, and provide a list of event categories (EventCategories) for events you want to be notified of. For example, you can specify SourceType = db-instance, SourceIds = mydbinstance1, mydbinstance2 and EventCategories = Availability, Backup.

    If you specify both the SourceType and SourceIds, such as SourceType = db-instance and SourceIdentifier = myDBInstance1, you will be notified of all the db-instance events for the specified source. If you specify a SourceType but do not specify a SourceIdentifier, you will receive notice of the events for that source type for all your RDS sources. If you do not specify either the SourceType nor the SourceIdentifier, you will be notified of events generated from all RDS sources belonging to your customer account.

    ", + "CreateOptionGroup": "

    Creates a new Option Group. You can create up to 20 option groups.

    ", + "DeleteDBInstance": "

    The DeleteDBInstance action deletes a previously provisioned DB instance. A successful response from the web service indicates the request was received correctly. When you delete a DB instance, all automated backups for that instance are deleted and cannot be recovered. Manual DB Snapshots of the DB instance to be deleted are not deleted.

    If a final DBSnapshot is requested the status of the RDS instance will be \"deleting\" until the DBSnapshot is created. DescribeDBInstance is used to monitor the status of this operation. This cannot be canceled or reverted once submitted.

    ", + "DeleteDBParameterGroup": "

    Deletes a specified DBParameterGroup. The DBParameterGroup cannot be associated with any RDS instances to be deleted.

    The specified DB Parameter Group cannot be associated with any DB Instances. ", + "DeleteDBSecurityGroup": "

    Deletes a DB Security Group.

    The specified DB Security Group must not be associated with any DB Instances.", + "DeleteDBSnapshot": "

    Deletes a DBSnapshot.

    The DBSnapshot must be in the available state to be deleted.", + "DeleteDBSubnetGroup": "

    Deletes a DB subnet group.

    The specified database subnet group must not be associated with any DB instances.", + "DeleteEventSubscription": "

    Deletes an RDS event notification subscription.

    ", + "DeleteOptionGroup": "

    Deletes an existing Option Group.

    ", + "DescribeDBEngineVersions": "

    Returns a list of the available DB engines.

    ", + "DescribeDBInstances": "

    Returns information about provisioned RDS instances. This API supports pagination.

    ", + "DescribeDBLogFiles": "

    Returns a list of DB log files for the DB instance.

    ", + "DescribeDBParameterGroups": "

    Returns a list of DBParameterGroup descriptions. If a DBParameterGroupName is specified, the list will contain only the description of the specified DBParameterGroup.

    ", + "DescribeDBParameters": "

    Returns the detailed parameter list for a particular DBParameterGroup.

    ", + "DescribeDBSecurityGroups": "

    Returns a list of DBSecurityGroup descriptions. If a DBSecurityGroupName is specified, the list will contain only the descriptions of the specified DBSecurityGroup.

    For an overview of CIDR ranges, go to the Wikipedia Tutorial.

    ", + "DescribeDBSnapshots": "

    Returns information about DBSnapshots. This API supports pagination.

    ", + "DescribeDBSubnetGroups": "

    Returns a list of DBSubnetGroup descriptions. If a DBSubnetGroupName is specified, the list will contain only the descriptions of the specified DBSubnetGroup.

    For an overview of CIDR ranges, go to the Wikipedia Tutorial.

    ", + "DescribeEngineDefaultParameters": "

    Returns the default engine and system parameter information for the specified database engine.

    ", + "DescribeEventCategories": "

    Displays a list of categories for all event source types, or, if specified, for a specified source type. You can see a list of the event categories and source types in the Events topic in the Amazon RDS User Guide.

    ", + "DescribeEventSubscriptions": "

    Lists all the subscription descriptions for a customer account. The description for a subscription includes SubscriptionName, SNSTopicARN, CustomerID, SourceType, SourceID, CreationTime, and Status.

    If you specify a SubscriptionName, lists the description for that subscription.

    ", + "DescribeEvents": "

    Returns events related to DB Instances, DB Security Groups, DB Snapshots and DB Parameter Groups for the past 14 days. Events specific to a particular DB Instance, DB Security Group, database snapshot or DB Parameter Group can be obtained by providing the name as a parameter. By default, the past hour of events are returned.

    ", + "DescribeOptionGroupOptions": "

    Describes all available options.

    ", + "DescribeOptionGroups": "

    Describes the available option groups.

    ", + "DescribeOrderableDBInstanceOptions": "

    Returns a list of orderable DB Instance options for the specified engine.

    ", + "DescribeReservedDBInstances": "

    Returns information about reserved DB Instances for this account, or about a specified reserved DB Instance.

    ", + "DescribeReservedDBInstancesOfferings": "

    Lists available reserved DB Instance offerings.

    ", + "DownloadDBLogFilePortion": "

    Downloads the last line of the specified log file.

    ", + "ListTagsForResource": "

    Lists all tags on a DB Instance.

    ", + "ModifyDBInstance": "

    Modify settings for a DB Instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request.

    ", + "ModifyDBParameterGroup": "

    Modifies the parameters of a DBParameterGroup. To modify more than one parameter submit a list of the following: ParameterName, ParameterValue, and ApplyMethod. A maximum of 20 parameters can be modified in a single request.

    The apply-immediate method can be used only for dynamic parameters; the pending-reboot method can be used with MySQL and Oracle DB Instances for either dynamic or static parameters. For Microsoft SQL Server DB Instances, the pending-reboot method can be used only for static parameters.

    ", + "ModifyDBSubnetGroup": "

    Modifies an existing DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the region.

    ", + "ModifyEventSubscription": "

    Modifies an existing RDS event notification subscription. Note that you cannot modify the source identifiers using this call; to change source identifiers for a subscription, use the AddSourceIdentifierToSubscription and RemoveSourceIdentifierFromSubscription calls.

    You can see a list of the event categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    ", + "ModifyOptionGroup": "

    Modifies an existing Option Group.

    ", + "PromoteReadReplica": "

    Promotes a Read Replica DB Instance to a standalone DB Instance.

    ", + "PurchaseReservedDBInstancesOffering": "

    Purchases a reserved DB Instance offering.

    ", + "RebootDBInstance": "

    Reboots a previously provisioned RDS instance. This API results in the application of modified DBParameterGroup parameters with ApplyStatus of pending-reboot to the RDS instance. This action is taken as soon as possible, and results in a momentary outage to the RDS instance during which the RDS instance status is set to rebooting. If the RDS instance is configured for MultiAZ, it is possible that the reboot will be conducted through a failover. A DBInstance event is created when the reboot is completed.

    ", + "RemoveSourceIdentifierFromSubscription": "

    Removes a source identifier from an existing RDS event notification subscription.

    ", + "RemoveTagsFromResource": "

    Removes metadata tags from a DB Instance.

    ", + "ResetDBParameterGroup": "

    Modifies the parameters of a DBParameterGroup to the engine/system default value. To reset specific parameters submit a list of the following: ParameterName and ApplyMethod. To reset the entire DBParameterGroup specify the DBParameterGroup name and ResetAllParameters parameters. When resetting the entire group, dynamic parameters are updated immediately and static parameters are set to pending-reboot to take effect on the next DB instance restart or RebootDBInstance request.

    ", + "RestoreDBInstanceFromDBSnapshot": "

    Creates a new DB Instance from a DB snapshot. The target database is created from the source database restore point with the same configuration as the original source database, except that the new RDS instance is created with the default security group.

    ", + "RestoreDBInstanceToPointInTime": "

    Restores a DB Instance to an arbitrary point-in-time. Users can restore to any point in time before the latestRestorableTime for up to backupRetentionPeriod days. The target database is created from the source database with the same configuration as the original database except that the DB instance is created with the default DB security group.

    ", + "RevokeDBSecurityGroupIngress": "

    Revokes ingress from a DBSecurityGroup for previously authorized IP ranges or EC2 or VPC Security Groups. Required parameters for this API are one of CIDRIP, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId).

    " + }, + "shapes": { + "AddSourceIdentifierToSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "AddSourceIdentifierToSubscriptionResult": { + "base": null, + "refs": { + } + }, + "AddTagsToResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "ApplyMethod": { + "base": null, + "refs": { + "Parameter$ApplyMethod": "

    Indicates when to apply parameter updates.

    " + } + }, + "AuthorizationAlreadyExistsFault": { + "base": "

    The specified CIDRIP or EC2 security group is already authorized for the specified DB security group.

    ", + "refs": { + } + }, + "AuthorizationNotFoundFault": { + "base": "

    Specified CIDRIP or EC2 security group is not authorized for the specified DB security group.

    RDS may not also be authorized via IAM to perform necessary actions on your behalf.

    ", + "refs": { + } + }, + "AuthorizationQuotaExceededFault": { + "base": "

    DB security group authorization quota has been reached.

    ", + "refs": { + } + }, + "AuthorizeDBSecurityGroupIngressMessage": { + "base": "

    ", + "refs": { + } + }, + "AuthorizeDBSecurityGroupIngressResult": { + "base": null, + "refs": { + } + }, + "AvailabilityZone": { + "base": "

    Contains Availability Zone information.

    This data type is used as an element in the following data type:

    ", + "refs": { + "AvailabilityZoneList$member": null, + "Subnet$SubnetAvailabilityZone": null + } + }, + "AvailabilityZoneList": { + "base": null, + "refs": { + "OrderableDBInstanceOption$AvailabilityZones": "

    A list of availability zones for the orderable DB Instance.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "AvailabilityZone$ProvisionedIopsCapable": "

    True indicates the availability zone is capable of provisioned IOPs.

    ", + "DBInstance$MultiAZ": "

    Specifies if the DB Instance is a Multi-AZ deployment.

    ", + "DBInstance$AutoMinorVersionUpgrade": "

    Indicates that minor version patches are applied automatically.

    ", + "DBInstance$PubliclyAccessible": "

    Specifies the accessibility options for the DB Instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "DeleteDBInstanceMessage$SkipFinalSnapshot": "

    Determines whether a final DB Snapshot is created before the DB Instance is deleted. If true is specified, no DBSnapshot is created. If false is specified, a DB Snapshot is created before the DB Instance is deleted.

    The FinalDBSnapshotIdentifier parameter must be specified if SkipFinalSnapshot is false.

    Default: false

    ", + "DescribeDBEngineVersionsMessage$DefaultOnly": "

    Indicates that only the default version of the specified engine or engine and major version combination is returned.

    ", + "DownloadDBLogFilePortionDetails$AdditionalDataPending": "

    Boolean value that if true, indicates there is more data to be downloaded.

    ", + "EventSubscription$Enabled": "

    A Boolean value indicating if the subscription is enabled. True indicates the subscription is enabled.

    ", + "ModifyDBInstanceMessage$ApplyImmediately": "

    Specifies whether or not the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB Instance.

    If this parameter is passed as false, changes to the DB Instance are applied on the next call to RebootDBInstance, the next maintenance reboot, or the next failure reboot, whichever occurs first. See each parameter to determine when a change is applied.

    Default: false

    ", + "ModifyDBInstanceMessage$AllowMajorVersionUpgrade": "

    Indicates that major version upgrades are allowed. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints: This parameter must be set to true when specifying a value for the EngineVersion parameter that is a different major version than the DB Instance's current version.

    ", + "ModifyOptionGroupMessage$ApplyImmediately": "

    Indicates whether the changes should be applied immediately, or during the next maintenance window for each instance associated with the Option Group.

    ", + "Option$Persistent": "

    Indicate if this option is persistent.

    ", + "OptionGroup$AllowsVpcAndNonVpcInstanceMemberships": "

    Indicates whether this option group can be applied to both VPC and non-VPC instances. The value 'true' indicates the option group can be applied to both VPC and non-VPC instances.

    ", + "OptionGroupOption$PortRequired": "

    Specifies whether the option requires a port.

    ", + "OptionGroupOption$Persistent": "

    Specifies whether the option is persistent in an option group.

    ", + "OptionGroupOptionSetting$IsModifiable": "

    Boolean value where true indicates that this option group option can be changed from the default value.

    ", + "OptionSetting$IsModifiable": "

    A Boolean value that, when true, indicates the option setting can be modified from the default.

    ", + "OptionSetting$IsCollection": "

    Indicates if the option setting is part of a collection.

    ", + "OrderableDBInstanceOption$MultiAZCapable": "

    Indicates whether this orderable DB Instance is multi-AZ capable.

    ", + "OrderableDBInstanceOption$ReadReplicaCapable": "

    Indicates whether this orderable DB Instance can have a read replica.

    ", + "OrderableDBInstanceOption$Vpc": "

    Indicates whether this is a VPC orderable DB Instance.

    ", + "Parameter$IsModifiable": "

    Indicates whether (true) or not (false) the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.

    ", + "ReservedDBInstance$MultiAZ": "

    Indicates if the reservation applies to Multi-AZ deployments.

    ", + "ReservedDBInstancesOffering$MultiAZ": "

    Indicates if the offering applies to Multi-AZ deployments.

    ", + "ResetDBParameterGroupMessage$ResetAllParameters": "

    Specifies whether (true) or not (false) to reset all parameters in the DB Parameter Group to default values.

    Default: true

    ", + "RestoreDBInstanceToPointInTimeMessage$UseLatestRestorableTime": "

    Specifies whether (true) or not (false) the DB Instance is restored from the latest backup time.

    Default: false

    Constraints: Cannot be specified if RestoreTime parameter is provided.

    " + } + }, + "BooleanOptional": { + "base": null, + "refs": { + "CreateDBInstanceMessage$MultiAZ": "

    Specifies if the DB Instance is a Multi-AZ deployment. You cannot set the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "CreateDBInstanceMessage$AutoMinorVersionUpgrade": "

    Indicates that minor engine upgrades will be applied automatically to the DB Instance during the maintenance window.

    Default: true

    ", + "CreateDBInstanceMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB Instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "CreateDBInstanceReadReplicaMessage$AutoMinorVersionUpgrade": "

    Indicates that minor engine upgrades will be applied automatically to the Read Replica during the maintenance window.

    Default: Inherits from the source DB Instance

    ", + "CreateDBInstanceReadReplicaMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB Instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "CreateEventSubscriptionMessage$Enabled": "

    A Boolean value; set to true to activate the subscription, set to false to create the subscription but not active it.

    ", + "DescribeDBEngineVersionsMessage$ListSupportedCharacterSets": "

    If this parameter is specified, and if the requested engine supports the CharacterSetName parameter for CreateDBInstance, the response includes a list of supported character sets for each engine version.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Vpc": "

    The VPC filter value. Specify this parameter to show only the available VPC or non-VPC offerings.

    ", + "DescribeReservedDBInstancesMessage$MultiAZ": "

    The Multi-AZ filter value. Specify this parameter to show only those reservations matching the specified Multi-AZ parameter.

    ", + "DescribeReservedDBInstancesOfferingsMessage$MultiAZ": "

    The Multi-AZ filter value. Specify this parameter to show only the available offerings matching the specified Multi-AZ parameter.

    ", + "ModifyDBInstanceMessage$MultiAZ": "

    Specifies if the DB Instance is a Multi-AZ deployment. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    Constraints: Cannot be specified if the DB Instance is a read replica.

    ", + "ModifyDBInstanceMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB Instance during the maintenance window. Changing this parameter does not result in an outage except in the following case and the change is asynchronously applied as soon as possible. An outage will result if this parameter is set to true during the maintenance window, and a newer minor version is available, and RDS has enabled auto patching for that engine version.

    ", + "ModifyEventSubscriptionMessage$Enabled": "

    A Boolean value; set to true to activate the subscription.

    ", + "PendingModifiedValues$MultiAZ": "

    Indicates that the Single-AZ DB Instance is to change to a Multi-AZ deployment.

    ", + "RebootDBInstanceMessage$ForceFailover": "

    When true, the reboot will be conducted through a MultiAZ failover.

    Constraint: You cannot specify true if the instance is not configured for MultiAZ.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$MultiAZ": "

    Specifies if the DB Instance is a Multi-AZ deployment.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB Instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB Instance during the maintenance window.

    ", + "RestoreDBInstanceToPointInTimeMessage$MultiAZ": "

    Specifies if the DB Instance is a Multi-AZ deployment.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "RestoreDBInstanceToPointInTimeMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB Instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "RestoreDBInstanceToPointInTimeMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB Instance during the maintenance window.

    " + } + }, + "CharacterSet": { + "base": "

    This data type is used as a response element in the action DescribeDBEngineVersions.

    ", + "refs": { + "DBEngineVersion$DefaultCharacterSet": "

    The default character set for new instances of this engine version, if the CharacterSetName parameter of the CreateDBInstance API is not specified.

    ", + "SupportedCharacterSetsList$member": null + } + }, + "CopyDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "CopyDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBInstanceReadReplicaMessage": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceReadReplicaResult": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceResult": { + "base": null, + "refs": { + } + }, + "CreateDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBParameterGroupResult": { + "base": null, + "refs": { + } + }, + "CreateDBSecurityGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSecurityGroupResult": { + "base": null, + "refs": { + } + }, + "CreateDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "CreateDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSubnetGroupResult": { + "base": null, + "refs": { + } + }, + "CreateEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "CreateOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateOptionGroupResult": { + "base": null, + "refs": { + } + }, + "DBEngineVersion": { + "base": "

    This data type is used as a response element in the action DescribeDBEngineVersions.

    ", + "refs": { + "DBEngineVersionList$member": null + } + }, + "DBEngineVersionList": { + "base": null, + "refs": { + "DBEngineVersionMessage$DBEngineVersions": "

    A list of DBEngineVersion elements.

    " + } + }, + "DBEngineVersionMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBEngineVersions action.

    ", + "refs": { + } + }, + "DBInstance": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBInstances action.

    ", + "refs": { + "CreateDBInstanceReadReplicaResult$DBInstance": null, + "CreateDBInstanceResult$DBInstance": null, + "DBInstanceList$member": null, + "DeleteDBInstanceResult$DBInstance": null, + "ModifyDBInstanceResult$DBInstance": null, + "PromoteReadReplicaResult$DBInstance": null, + "RebootDBInstanceResult$DBInstance": null, + "RestoreDBInstanceFromDBSnapshotResult$DBInstance": null, + "RestoreDBInstanceToPointInTimeResult$DBInstance": null + } + }, + "DBInstanceAlreadyExistsFault": { + "base": "

    User already has a DB instance with the given identifier.

    ", + "refs": { + } + }, + "DBInstanceList": { + "base": null, + "refs": { + "DBInstanceMessage$DBInstances": "

    A list of DBInstance instances.

    " + } + }, + "DBInstanceMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBInstances action.

    ", + "refs": { + } + }, + "DBInstanceNotFoundFault": { + "base": "

    DBInstanceIdentifier does not refer to an existing DB instance.

    ", + "refs": { + } + }, + "DBLogFileNotFoundFault": { + "base": "

    LogFileName does not refer to an existing DB log file.

    ", + "refs": { + } + }, + "DBParameterGroup": { + "base": "

    Contains the result of a successful invocation of the CreateDBParameterGroup action.

    This data type is used as a request parameter in the DeleteDBParameterGroup action, and as a response element in the DescribeDBParameterGroups action.

    ", + "refs": { + "CreateDBParameterGroupResult$DBParameterGroup": null, + "DBParameterGroupList$member": null + } + }, + "DBParameterGroupAlreadyExistsFault": { + "base": "

    A DB parameter group with the same name exists.

    ", + "refs": { + } + }, + "DBParameterGroupDetails": { + "base": "

    Contains the result of a successful invocation of the DescribeDBParameters action.

    ", + "refs": { + } + }, + "DBParameterGroupList": { + "base": null, + "refs": { + "DBParameterGroupsMessage$DBParameterGroups": "

    A list of DBParameterGroup instances.

    " + } + }, + "DBParameterGroupNameMessage": { + "base": "

    Contains the result of a successful invocation of the ModifyDBParameterGroup or ResetDBParameterGroup action.

    ", + "refs": { + } + }, + "DBParameterGroupNotFoundFault": { + "base": "

    DBParameterGroupName does not refer to an existing DB parameter group.

    ", + "refs": { + } + }, + "DBParameterGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB parameter groups.

    ", + "refs": { + } + }, + "DBParameterGroupStatus": { + "base": "

    The status of the DB Parameter Group.

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBParameterGroupStatusList$member": null + } + }, + "DBParameterGroupStatusList": { + "base": null, + "refs": { + "DBInstance$DBParameterGroups": "

    Provides the list of DB Parameter Groups applied to this DB Instance.

    " + } + }, + "DBParameterGroupsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBParameterGroups action.

    ", + "refs": { + } + }, + "DBSecurityGroup": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSecurityGroups action.

    ", + "refs": { + "AuthorizeDBSecurityGroupIngressResult$DBSecurityGroup": null, + "CreateDBSecurityGroupResult$DBSecurityGroup": null, + "DBSecurityGroups$member": null, + "RevokeDBSecurityGroupIngressResult$DBSecurityGroup": null + } + }, + "DBSecurityGroupAlreadyExistsFault": { + "base": "

    A DB security group with the name specified in DBSecurityGroupName already exists.

    ", + "refs": { + } + }, + "DBSecurityGroupMembership": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBSecurityGroupMembershipList$member": null + } + }, + "DBSecurityGroupMembershipList": { + "base": null, + "refs": { + "DBInstance$DBSecurityGroups": "

    Provides List of DB Security Group elements containing only DBSecurityGroup.Name and DBSecurityGroup.Status subelements.

    ", + "Option$DBSecurityGroupMemberships": "

    If the option requires access to a port, then this DB Security Group allows access to the port.

    " + } + }, + "DBSecurityGroupMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSecurityGroups action.

    ", + "refs": { + } + }, + "DBSecurityGroupNameList": { + "base": null, + "refs": { + "CreateDBInstanceMessage$DBSecurityGroups": "

    A list of DB Security Groups to associate with this DB Instance.

    Default: The default DB Security Group for the database engine.

    ", + "ModifyDBInstanceMessage$DBSecurityGroups": "

    A list of DB Security Groups to authorize on this DB Instance. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "OptionConfiguration$DBSecurityGroupMemberships": "

    A list of DBSecurityGroupMemebrship name strings used for this option.

    " + } + }, + "DBSecurityGroupNotFoundFault": { + "base": "

    DBSecurityGroupName does not refer to an existing DB security group.

    ", + "refs": { + } + }, + "DBSecurityGroupNotSupportedFault": { + "base": "

    A DB security group is not allowed for this action.

    ", + "refs": { + } + }, + "DBSecurityGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB security groups.

    ", + "refs": { + } + }, + "DBSecurityGroups": { + "base": null, + "refs": { + "DBSecurityGroupMessage$DBSecurityGroups": "

    A list of DBSecurityGroup instances.

    " + } + }, + "DBSnapshot": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSnapshots action.

    ", + "refs": { + "CopyDBSnapshotResult$DBSnapshot": null, + "CreateDBSnapshotResult$DBSnapshot": null, + "DBSnapshotList$member": null, + "DeleteDBSnapshotResult$DBSnapshot": null + } + }, + "DBSnapshotAlreadyExistsFault": { + "base": "

    DBSnapshotIdentifier is already used by an existing snapshot.

    ", + "refs": { + } + }, + "DBSnapshotList": { + "base": null, + "refs": { + "DBSnapshotMessage$DBSnapshots": "

    A list of DBSnapshot instances.

    " + } + }, + "DBSnapshotMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSnapshots action.

    ", + "refs": { + } + }, + "DBSnapshotNotFoundFault": { + "base": "

    DBSnapshotIdentifier does not refer to an existing DB snapshot.

    ", + "refs": { + } + }, + "DBSubnetGroup": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSubnetGroups action.

    ", + "refs": { + "CreateDBSubnetGroupResult$DBSubnetGroup": null, + "DBInstance$DBSubnetGroup": "

    Provides the inforamtion of the subnet group associated with the DB instance, including the name, descrption and subnets in the subnet group.

    ", + "DBSubnetGroups$member": null, + "ModifyDBSubnetGroupResult$DBSubnetGroup": null + } + }, + "DBSubnetGroupAlreadyExistsFault": { + "base": "

    DBSubnetGroupName is already used by an existing DB subnet group.

    ", + "refs": { + } + }, + "DBSubnetGroupDoesNotCoverEnoughAZs": { + "base": "

    Subnets in the DB subnet group should cover at least two Availability Zones unless there is only one Availability Zone.

    ", + "refs": { + } + }, + "DBSubnetGroupMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSubnetGroups action.

    ", + "refs": { + } + }, + "DBSubnetGroupNotFoundFault": { + "base": "

    DBSubnetGroupName does not refer to an existing DB subnet group.

    ", + "refs": { + } + }, + "DBSubnetGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB subnet groups.

    ", + "refs": { + } + }, + "DBSubnetGroups": { + "base": null, + "refs": { + "DBSubnetGroupMessage$DBSubnetGroups": "

    A list of DBSubnetGroup instances.

    " + } + }, + "DBSubnetQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of subnets in a DB subnet groups.

    ", + "refs": { + } + }, + "DBUpgradeDependencyFailureFault": { + "base": "

    The DB upgrade failed because a resource the DB depends on could not be modified.

    ", + "refs": { + } + }, + "DeleteDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBInstanceResult": { + "base": null, + "refs": { + } + }, + "DeleteDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSecurityGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "DeleteDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "DeleteOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBEngineVersionsMessage": { + "base": null, + "refs": { + } + }, + "DescribeDBInstancesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBLogFilesDetails": { + "base": "

    This data type is used as a response element to DescribeDBLogFiles.

    ", + "refs": { + "DescribeDBLogFilesList$member": null + } + }, + "DescribeDBLogFilesList": { + "base": null, + "refs": { + "DescribeDBLogFilesResponse$DescribeDBLogFiles": "

    The DB log files returned.

    " + } + }, + "DescribeDBLogFilesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBLogFilesResponse": { + "base": "

    The response from a call to DescribeDBLogFiles.

    ", + "refs": { + } + }, + "DescribeDBParameterGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBParametersMessage": { + "base": null, + "refs": { + } + }, + "DescribeDBSecurityGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBSnapshotsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBSubnetGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEngineDefaultParametersMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEngineDefaultParametersResult": { + "base": null, + "refs": { + } + }, + "DescribeEventCategoriesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEventSubscriptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEventsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOptionGroupOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOptionGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOrderableDBInstanceOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeReservedDBInstancesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeReservedDBInstancesOfferingsMessage": { + "base": "

    ", + "refs": { + } + }, + "Double": { + "base": null, + "refs": { + "RecurringCharge$RecurringChargeAmount": "

    The amount of the recurring charge.

    ", + "ReservedDBInstance$FixedPrice": "

    The fixed price charged for this reserved DB Instance.

    ", + "ReservedDBInstance$UsagePrice": "

    The hourly price charged for this reserved DB Instance.

    ", + "ReservedDBInstancesOffering$FixedPrice": "

    The fixed price charged for this offering.

    ", + "ReservedDBInstancesOffering$UsagePrice": "

    The hourly price charged for this offering.

    " + } + }, + "DownloadDBLogFilePortionDetails": { + "base": "

    This data type is used as a response element to DownloadDBLogFilePortion.

    ", + "refs": { + } + }, + "DownloadDBLogFilePortionMessage": { + "base": "

    ", + "refs": { + } + }, + "EC2SecurityGroup": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "EC2SecurityGroupList$member": null + } + }, + "EC2SecurityGroupList": { + "base": null, + "refs": { + "DBSecurityGroup$EC2SecurityGroups": "

    Contains a list of EC2SecurityGroup elements.

    " + } + }, + "Endpoint": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBInstance$Endpoint": "

    Specifies the connection endpoint.

    " + } + }, + "EngineDefaults": { + "base": "

    Contains the result of a successful invocation of the DescribeEngineDefaultParameters action.

    ", + "refs": { + "DescribeEngineDefaultParametersResult$EngineDefaults": null + } + }, + "Event": { + "base": "

    This data type is used as a response element in the DescribeEvents action.

    ", + "refs": { + "EventList$member": null + } + }, + "EventCategoriesList": { + "base": null, + "refs": { + "CreateEventSubscriptionMessage$EventCategories": "

    A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    ", + "DescribeEventsMessage$EventCategories": "

    A list of event categories that trigger notifications for a event notification subscription.

    ", + "Event$EventCategories": "

    Specifies the category for the event.

    ", + "EventCategoriesMap$EventCategories": "

    The event categories for the specified source type

    ", + "EventSubscription$EventCategoriesList": "

    A list of event categories for the RDS event notification subscription.

    ", + "ModifyEventSubscriptionMessage$EventCategories": "

    A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    " + } + }, + "EventCategoriesMap": { + "base": "

    Contains the results of a successful invocation of the DescribeEventCategories action.

    ", + "refs": { + "EventCategoriesMapList$member": null + } + }, + "EventCategoriesMapList": { + "base": null, + "refs": { + "EventCategoriesMessage$EventCategoriesMapList": "

    A list of EventCategoriesMap data types.

    " + } + }, + "EventCategoriesMessage": { + "base": "

    Data returned from the DescribeEventCategories action.

    ", + "refs": { + } + }, + "EventList": { + "base": null, + "refs": { + "EventsMessage$Events": "

    A list of Event instances.

    " + } + }, + "EventSubscription": { + "base": "

    Contains the results of a successful invocation of the DescribeEventSubscriptions action.

    ", + "refs": { + "AddSourceIdentifierToSubscriptionResult$EventSubscription": null, + "CreateEventSubscriptionResult$EventSubscription": null, + "DeleteEventSubscriptionResult$EventSubscription": null, + "EventSubscriptionsList$member": null, + "ModifyEventSubscriptionResult$EventSubscription": null, + "RemoveSourceIdentifierFromSubscriptionResult$EventSubscription": null + } + }, + "EventSubscriptionQuotaExceededFault": { + "base": "

    You have reached the maximum number of event subscriptions.

    ", + "refs": { + } + }, + "EventSubscriptionsList": { + "base": null, + "refs": { + "EventSubscriptionsMessage$EventSubscriptionsList": "

    A list of EventSubscriptions data types.

    " + } + }, + "EventSubscriptionsMessage": { + "base": "

    Data returned by the DescribeEventSubscriptions action.

    ", + "refs": { + } + }, + "EventsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeEvents action.

    ", + "refs": { + } + }, + "IPRange": { + "base": "

    This data type is used as a response element in the DescribeDBSecurityGroups action.

    ", + "refs": { + "IPRangeList$member": null + } + }, + "IPRangeList": { + "base": null, + "refs": { + "DBSecurityGroup$IPRanges": "

    Contains a list of IPRange elements.

    " + } + }, + "InstanceQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB instances.

    ", + "refs": { + } + }, + "InsufficientDBInstanceCapacityFault": { + "base": "

    Specified DB instance class is not available in the specified Availability Zone.

    ", + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "DBInstance$AllocatedStorage": "

    Specifies the allocated storage size specified in gigabytes.

    ", + "DBInstance$BackupRetentionPeriod": "

    Specifies the number of days for which automatic DB Snapshots are retained.

    ", + "DBSnapshot$AllocatedStorage": "

    Specifies the allocated storage size in gigabytes (GB).

    ", + "DBSnapshot$Port": "

    Specifies the port that the database engine was listening on at the time of the snapshot.

    ", + "DownloadDBLogFilePortionMessage$NumberOfLines": "

    The number of lines remaining to be downloaded.

    ", + "Endpoint$Port": "

    Specifies the port that the database engine is listening on.

    ", + "ReservedDBInstance$Duration": "

    The duration of the reservation in seconds.

    ", + "ReservedDBInstance$DBInstanceCount": "

    The number of reserved DB Instances.

    ", + "ReservedDBInstancesOffering$Duration": "

    The duration of the offering in seconds.

    " + } + }, + "IntegerOptional": { + "base": null, + "refs": { + "CreateDBInstanceMessage$AllocatedStorage": "

    The amount of storage (in gigabytes) to be initially allocated for the database instance.

    MySQL

    Constraints: Must be an integer from 5 to 1024.

    Type: Integer

    Oracle

    Constraints: Must be an integer from 10 to 1024.

    SQL Server

    Constraints: Must be an integer from 200 to 1024 (Standard Edition and Enterprise Edition) or from 30 to 1024 (Express Edition and Web Edition)

    ", + "CreateDBInstanceMessage$BackupRetentionPeriod": "

    The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Default: 1

    Constraints:

    • Must be a value from 0 to 8
    • Cannot be set to 0 if the DB Instance is a master instance with read replicas
    ", + "CreateDBInstanceMessage$Port": "

    The port number on which the database accepts connections.

    MySQL

    Default: 3306

    Valid Values: 1150-65535

    Type: Integer

    Oracle

    Default: 1521

    Valid Values: 1150-65535

    SQL Server

    Default: 1433

    Valid Values: 1150-65535 except for 1434 and 3389.

    ", + "CreateDBInstanceMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB Instance.

    Constraints: Must be an integer greater than 1000.

    ", + "CreateDBInstanceReadReplicaMessage$Port": "

    The port number that the DB Instance uses for connections.

    Default: Inherits from the source DB Instance

    Valid Values: 1150-65535

    ", + "CreateDBInstanceReadReplicaMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB Instance.

    ", + "DBInstance$Iops": "

    Specifies the Provisioned IOPS (I/O operations per second) value.

    ", + "DBSnapshot$Iops": "

    Specifies the Provisioned IOPS (I/O operations per second) value of the DB Instance at the time of the snapshot.

    ", + "DescribeDBEngineVersionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBInstancesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBLogFilesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    ", + "DescribeDBParameterGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBParametersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBSecurityGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBSnapshotsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBSubnetGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeEngineDefaultParametersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeEventSubscriptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeEventsMessage$Duration": "

    The number of minutes to retrieve events for.

    Default: 60

    ", + "DescribeEventsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeOptionGroupOptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeOptionGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeOrderableDBInstanceOptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeReservedDBInstancesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeReservedDBInstancesOfferingsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "ModifyDBInstanceMessage$AllocatedStorage": "

    The new storage capacity of the RDS instance. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    MySQL

    Default: Uses existing setting

    Valid Values: 5-1024

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    Type: Integer

    Oracle

    Default: Uses existing setting

    Valid Values: 10-1024

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    SQL Server

    Cannot be modified.

    If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance will be available for use, but may experience performance degradation. While the migration takes place, nightly backups for the instance will be suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance.

    ", + "ModifyDBInstanceMessage$BackupRetentionPeriod": "

    The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Changing this parameter can result in an outage if you change from 0 to a non-zero value or from a non-zero value to 0. These changes are applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously applied as soon as possible.

    Default: Uses existing setting

    Constraints:

    • Must be a value from 0 to 8
    • Cannot be set to 0 if the DB Instance is a master instance with read replicas or if the DB Instance is a read replica
    ", + "ModifyDBInstanceMessage$Iops": "

    The new Provisioned IOPS (I/O operations per second) value for the RDS instance. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    Default: Uses existing setting

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    Type: Integer

    If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance will be available for use, but may experience performance degradation. While the migration takes place, nightly backups for the instance will be suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance.

    ", + "Option$Port": "

    If required, the port configured for this option to use.

    ", + "OptionConfiguration$Port": "

    The optional port for the option.

    ", + "OptionGroupOption$DefaultPort": "

    If the option requires a port, specifies the default port for the option.

    ", + "PendingModifiedValues$AllocatedStorage": "

    Contains the new AllocatedStorage size for the DB Instance that will be applied or is in progress.

    ", + "PendingModifiedValues$Port": "

    Specifies the pending port for the DB Instance.

    ", + "PendingModifiedValues$BackupRetentionPeriod": "

    Specifies the pending number of days for which automated backups are retained.

    ", + "PendingModifiedValues$Iops": "

    Specifies the new Provisioned IOPS value for the DB Instance that will be applied or is being applied.

    ", + "PromoteReadReplicaMessage$BackupRetentionPeriod": "

    The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Default: 1

    Constraints:

    • Must be a value from 0 to 8
    ", + "PurchaseReservedDBInstancesOfferingMessage$DBInstanceCount": "

    The number of instances to reserve.

    Default: 1

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Port": "

    The port number on which the database accepts connections.

    Default: The same port as the original DB Instance

    Constraints: Value must be 1150-65535

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Iops": "

    Specifies the amount of provisioned IOPS for the DB Instance, expressed in I/O operations per second. If this parameter is not specified, the IOPS value will be taken from the backup. If this parameter is set to 0, the new instance will be converted to a non-PIOPS instance, which will take additional time, though your DB instance will be available for connections before the conversion starts.

    Constraints: Must be an integer greater than 1000.

    ", + "RestoreDBInstanceToPointInTimeMessage$Port": "

    The port number on which the database accepts connections.

    Constraints: Value must be 1150-65535

    Default: The same port as the original DB Instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB Instance.

    Constraints: Must be an integer greater than 1000.

    " + } + }, + "InvalidDBInstanceStateFault": { + "base": "

    The specified DB instance is not in the available state.

    ", + "refs": { + } + }, + "InvalidDBParameterGroupStateFault": { + "base": "

    The DB parameter group cannot be deleted because it is in use.

    ", + "refs": { + } + }, + "InvalidDBSecurityGroupStateFault": { + "base": "

    The state of the DB security group does not allow deletion.

    ", + "refs": { + } + }, + "InvalidDBSnapshotStateFault": { + "base": "

    The state of the DB snapshot does not allow deletion.

    ", + "refs": { + } + }, + "InvalidDBSubnetGroupStateFault": { + "base": "

    The DB subnet group cannot be deleted because it is in use.

    ", + "refs": { + } + }, + "InvalidDBSubnetStateFault": { + "base": "

    The DB subnet is not in the available state.

    ", + "refs": { + } + }, + "InvalidEventSubscriptionStateFault": { + "base": "

    This error can occur if someone else is modifying a subscription. You should retry the action.

    ", + "refs": { + } + }, + "InvalidOptionGroupStateFault": { + "base": "

    The option group is not in the available state.

    ", + "refs": { + } + }, + "InvalidRestoreFault": { + "base": "

    Cannot restore from vpc backup to non-vpc DB instance.

    ", + "refs": { + } + }, + "InvalidSubnet": { + "base": "

    The requested subnet is invalid, or multiple subnets were requested that are not all in a common VPC.

    ", + "refs": { + } + }, + "InvalidVPCNetworkStateFault": { + "base": "

    DB subnet group does not cover all Availability Zones after it is created because users' change.

    ", + "refs": { + } + }, + "KeyList": { + "base": null, + "refs": { + "RemoveTagsFromResourceMessage$TagKeys": "

    The tag key (name) of the tag to be removed.

    " + } + }, + "ListTagsForResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "Long": { + "base": null, + "refs": { + "DescribeDBLogFilesDetails$LastWritten": "

    The date and time that the last log entry was written.

    ", + "DescribeDBLogFilesDetails$Size": "

    The size, in bytes, of the log file for the specified DB instance.

    ", + "DescribeDBLogFilesMessage$FileLastWritten": "

    Filters the available log files for files written since the specified date.

    ", + "DescribeDBLogFilesMessage$FileSize": "

    Filters the available log files for files larger than the specified size.

    " + } + }, + "ModifyDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBInstanceResult": { + "base": null, + "refs": { + } + }, + "ModifyDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBSubnetGroupResult": { + "base": null, + "refs": { + } + }, + "ModifyEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "ModifyOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyOptionGroupResult": { + "base": null, + "refs": { + } + }, + "Option": { + "base": "

    Option details.

    ", + "refs": { + "OptionsList$member": null + } + }, + "OptionConfiguration": { + "base": "

    A list of all available options

    ", + "refs": { + "OptionConfigurationList$member": null + } + }, + "OptionConfigurationList": { + "base": null, + "refs": { + "ModifyOptionGroupMessage$OptionsToInclude": "

    Options in this list are added to the Option Group or, if already present, the specified configuration is used to update the existing configuration.

    " + } + }, + "OptionGroup": { + "base": "

    ", + "refs": { + "CreateOptionGroupResult$OptionGroup": null, + "ModifyOptionGroupResult$OptionGroup": null, + "OptionGroupsList$member": null + } + }, + "OptionGroupAlreadyExistsFault": { + "base": "

    The option group you are trying to create already exists.

    ", + "refs": { + } + }, + "OptionGroupMembership": { + "base": "

    Provides information on the option groups the DB instance is a member of.

    ", + "refs": { + "OptionGroupMembershipList$member": null + } + }, + "OptionGroupMembershipList": { + "base": null, + "refs": { + "DBInstance$OptionGroupMemberships": "

    Provides the list of option group memberships for this DB Instance.

    " + } + }, + "OptionGroupNotFoundFault": { + "base": "

    The specified option group could not be found.

    ", + "refs": { + } + }, + "OptionGroupOption": { + "base": "

    Available option.

    ", + "refs": { + "OptionGroupOptionsList$member": null + } + }, + "OptionGroupOptionSetting": { + "base": "

    Option Group option settings are used to display settings available for each option with their default values and other information. These values are used with the DescribeOptionGroupOptions action.

    ", + "refs": { + "OptionGroupOptionSettingsList$member": null + } + }, + "OptionGroupOptionSettingsList": { + "base": null, + "refs": { + "OptionGroupOption$OptionGroupOptionSettings": "

    Specifies the option settings that are available (and the default value) for each option in an option group.

    " + } + }, + "OptionGroupOptionsList": { + "base": "

    List of available option group options.

    ", + "refs": { + "OptionGroupOptionsMessage$OptionGroupOptions": null + } + }, + "OptionGroupOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "OptionGroupQuotaExceededFault": { + "base": "

    The quota of 20 option groups was exceeded for this AWS account.

    ", + "refs": { + } + }, + "OptionGroups": { + "base": "

    List of option groups.

    ", + "refs": { + } + }, + "OptionGroupsList": { + "base": null, + "refs": { + "OptionGroups$OptionGroupsList": "

    List of option groups.

    " + } + }, + "OptionNamesList": { + "base": null, + "refs": { + "ModifyOptionGroupMessage$OptionsToRemove": "

    Options in this list are removed from the Option Group.

    " + } + }, + "OptionSetting": { + "base": "

    Option settings are the actual settings being applied or configured for that option. It is used when you modify an option group or describe option groups. For example, the NATIVE_NETWORK_ENCRYPTION option has a setting called SQLNET.ENCRYPTION_SERVER that can have several different values.

    ", + "refs": { + "OptionSettingConfigurationList$member": null, + "OptionSettingsList$member": null + } + }, + "OptionSettingConfigurationList": { + "base": null, + "refs": { + "Option$OptionSettings": "

    The option settings for this option.

    " + } + }, + "OptionSettingsList": { + "base": null, + "refs": { + "OptionConfiguration$OptionSettings": "

    The option settings to include in an option group.

    " + } + }, + "OptionsDependedOn": { + "base": null, + "refs": { + "OptionGroupOption$OptionsDependedOn": "

    List of all options that are prerequisites for this option.

    " + } + }, + "OptionsList": { + "base": null, + "refs": { + "OptionGroup$Options": "

    Indicates what options are available in the option group.

    " + } + }, + "OrderableDBInstanceOption": { + "base": "

    Contains a list of available options for a DB Instance

    This data type is used as a response element in the DescribeOrderableDBInstanceOptions action.

    ", + "refs": { + "OrderableDBInstanceOptionsList$member": null + } + }, + "OrderableDBInstanceOptionsList": { + "base": null, + "refs": { + "OrderableDBInstanceOptionsMessage$OrderableDBInstanceOptions": "

    An OrderableDBInstanceOption structure containing information about orderable options for the DB Instance.

    " + } + }, + "OrderableDBInstanceOptionsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeOrderableDBInstanceOptions action.

    ", + "refs": { + } + }, + "Parameter": { + "base": "

    This data type is used as a request parameter in the ModifyDBParameterGroup and ResetDBParameterGroup actions.

    This data type is used as a response element in the DescribeEngineDefaultParameters and DescribeDBParameters actions.

    ", + "refs": { + "ParametersList$member": null + } + }, + "ParametersList": { + "base": null, + "refs": { + "DBParameterGroupDetails$Parameters": "

    A list of Parameter instances.

    ", + "EngineDefaults$Parameters": "

    Contains a list of engine default parameters.

    ", + "ModifyDBParameterGroupMessage$Parameters": "

    An array of parameter names, values, and the apply method for the parameter update. At least one parameter name, value, and apply method must be supplied; subsequent arguments are optional. A maximum of 20 parameters may be modified in a single request.

    Valid Values (for the application method): immediate | pending-reboot

    You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when DB Instance reboots. ", + "ResetDBParameterGroupMessage$Parameters": "

    An array of parameter names, values, and the apply method for the parameter update. At least one parameter name, value, and apply method must be supplied; subsequent arguments are optional. A maximum of 20 parameters may be modified in a single request.

    MySQL

    Valid Values (for Apply method): immediate | pending-reboot

    You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when DB Instance reboots.

    Oracle

    Valid Values (for Apply method): pending-reboot

    " + } + }, + "PendingModifiedValues": { + "base": "

    This data type is used as a response element in the ModifyDBInstance action.

    ", + "refs": { + "DBInstance$PendingModifiedValues": "

    Specifies that changes to the DB Instance are pending. This element is only included when changes are pending. Specific changes are identified by subelements.

    " + } + }, + "PointInTimeRestoreNotEnabledFault": { + "base": "

    SourceDBInstanceIdentifier refers to a DB instance with BackupRetentionPeriod equal to 0.

    ", + "refs": { + } + }, + "PromoteReadReplicaMessage": { + "base": "

    ", + "refs": { + } + }, + "PromoteReadReplicaResult": { + "base": null, + "refs": { + } + }, + "ProvisionedIopsNotAvailableInAZFault": { + "base": "

    Provisioned IOPS not available in the specified Availability Zone.

    ", + "refs": { + } + }, + "PurchaseReservedDBInstancesOfferingMessage": { + "base": "

    ", + "refs": { + } + }, + "PurchaseReservedDBInstancesOfferingResult": { + "base": null, + "refs": { + } + }, + "ReadReplicaDBInstanceIdentifierList": { + "base": null, + "refs": { + "DBInstance$ReadReplicaDBInstanceIdentifiers": "

    Contains one or more identifiers of the Read Replicas associated with this DB Instance.

    " + } + }, + "RebootDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "RebootDBInstanceResult": { + "base": null, + "refs": { + } + }, + "RecurringCharge": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstances and DescribeReservedDBInstancesOfferings actions.

    ", + "refs": { + "RecurringChargeList$member": null + } + }, + "RecurringChargeList": { + "base": null, + "refs": { + "ReservedDBInstance$RecurringCharges": "

    The recurring price charged to run this reserved DB Instance.

    ", + "ReservedDBInstancesOffering$RecurringCharges": "

    The recurring price charged to run this reserved DB Instance.

    " + } + }, + "RemoveSourceIdentifierFromSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "RemoveSourceIdentifierFromSubscriptionResult": { + "base": null, + "refs": { + } + }, + "RemoveTagsFromResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "ReservedDBInstance": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstances and PurchaseReservedDBInstancesOffering actions.

    ", + "refs": { + "PurchaseReservedDBInstancesOfferingResult$ReservedDBInstance": null, + "ReservedDBInstanceList$member": null + } + }, + "ReservedDBInstanceAlreadyExistsFault": { + "base": "

    User already has a reservation with the given identifier.

    ", + "refs": { + } + }, + "ReservedDBInstanceList": { + "base": null, + "refs": { + "ReservedDBInstanceMessage$ReservedDBInstances": "

    A list of of reserved DB Instances.

    " + } + }, + "ReservedDBInstanceMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeReservedDBInstances action.

    ", + "refs": { + } + }, + "ReservedDBInstanceNotFoundFault": { + "base": "

    The specified reserved DB Instance not found.

    ", + "refs": { + } + }, + "ReservedDBInstanceQuotaExceededFault": { + "base": "

    Request would exceed the user's DB Instance quota.

    ", + "refs": { + } + }, + "ReservedDBInstancesOffering": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstancesOfferings action.

    ", + "refs": { + "ReservedDBInstancesOfferingList$member": null + } + }, + "ReservedDBInstancesOfferingList": { + "base": null, + "refs": { + "ReservedDBInstancesOfferingMessage$ReservedDBInstancesOfferings": "

    A list of reserved DB Instance offerings.

    " + } + }, + "ReservedDBInstancesOfferingMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeReservedDBInstancesOfferings action.

    ", + "refs": { + } + }, + "ReservedDBInstancesOfferingNotFoundFault": { + "base": "

    Specified offering does not exist.

    ", + "refs": { + } + }, + "ResetDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBInstanceFromDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBInstanceFromDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "RestoreDBInstanceToPointInTimeMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBInstanceToPointInTimeResult": { + "base": null, + "refs": { + } + }, + "RevokeDBSecurityGroupIngressMessage": { + "base": "

    ", + "refs": { + } + }, + "RevokeDBSecurityGroupIngressResult": { + "base": null, + "refs": { + } + }, + "SNSInvalidTopicFault": { + "base": "

    SNS has responded that there is a problem with the SND topic specified.

    ", + "refs": { + } + }, + "SNSNoAuthorizationFault": { + "base": "

    You do not have permission to publish to the SNS topic ARN.

    ", + "refs": { + } + }, + "SNSTopicArnNotFoundFault": { + "base": "

    The SNS topic ARN does not exist.

    ", + "refs": { + } + }, + "SnapshotQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB snapshots.

    ", + "refs": { + } + }, + "SourceIdsList": { + "base": null, + "refs": { + "CreateEventSubscriptionMessage$SourceIds": "

    The list of identifiers of the event sources for which events will be returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it cannot end with a hyphen or contain two consecutive hyphens.

    Constraints:

    • If SourceIds are supplied, SourceType must also be provided.
    • If the source type is a DB instance, then a DBInstanceIdentifier must be supplied.
    • If the source type is a DB security group, a DBSecurityGroupName must be supplied.
    • If the source type is a DB parameter group, a DBParameterGroupName must be supplied.
    • If the source type is a DB Snapshot, a DBSnapshotIdentifier must be supplied.
    ", + "EventSubscription$SourceIdsList": "

    A list of source Ids for the RDS event notification subscription.

    " + } + }, + "SourceNotFoundFault": { + "base": "

    The requested source could not be found.

    ", + "refs": { + } + }, + "SourceType": { + "base": null, + "refs": { + "DescribeEventsMessage$SourceType": "

    The event source to retrieve events for. If no value is specified, all events are returned.

    ", + "Event$SourceType": "

    Specifies the source type for this event.

    " + } + }, + "StorageQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed amount of storage available across all DB instances.

    ", + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "AddSourceIdentifierToSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to add a source identifier to.

    ", + "AddSourceIdentifierToSubscriptionMessage$SourceIdentifier": "

    The identifier of the event source to be added. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it cannot end with a hyphen or contain two consecutive hyphens.

    Constraints:

    • If the source type is a DB instance, then a DBInstanceIdentifier must be supplied.
    • If the source type is a DB security group, a DBSecurityGroupName must be supplied.
    • If the source type is a DB parameter group, a DBParameterGroupName must be supplied.
    • If the source type is a DB Snapshot, a DBSnapshotIdentifier must be supplied.
    ", + "AddTagsToResourceMessage$ResourceName": "

    The DB Instance the tags will be added to. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    ", + "AuthorizeDBSecurityGroupIngressMessage$DBSecurityGroupName": "

    The name of the DB Security Group to add authorization to.

    ", + "AuthorizeDBSecurityGroupIngressMessage$CIDRIP": "

    The IP range to authorize.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupName": "

    Name of the EC2 Security Group to authorize. For VPC DB Security Groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupId": "

    Id of the EC2 Security Group to authorize. For VPC DB Security Groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "

    AWS Account Number of the owner of the EC2 Security Group specified in the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value. For VPC DB Security Groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AvailabilityZone$Name": "

    The name of the availability zone.

    ", + "CharacterSet$CharacterSetName": "

    The name of the character set.

    ", + "CharacterSet$CharacterSetDescription": "

    The description of the character set.

    ", + "CopyDBSnapshotMessage$SourceDBSnapshotIdentifier": "

    The identifier for the source DB snapshot.

    Constraints:

    • Must be the identifier for a valid system snapshot in the \"available\" state.

    Example: rds:mydb-2012-04-02-00-01

    ", + "CopyDBSnapshotMessage$TargetDBSnapshotIdentifier": "

    The identifier for the copied snapshot.

    Constraints:

    • Cannot be null, empty, or blank
    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-db-snapshot

    ", + "CreateDBInstanceMessage$DBName": "

    The meaning of this parameter differs according to the database engine you use.

    MySQL

    The name of the database to create when the DB Instance is created. If this parameter is not specified, no database is created in the DB Instance.

    Constraints:

    • Must contain 1 to 64 alphanumeric characters
    • Cannot be a word reserved by the specified database engine

    Type: String

    Oracle

    The Oracle System ID (SID) of the created DB Instance.

    Default: ORCL

    Constraints:

    • Cannot be longer than 8 characters

    SQL Server

    Not applicable. Must be null.

    ", + "CreateDBInstanceMessage$DBInstanceIdentifier": "

    The DB Instance identifier. This parameter is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 for SQL Server).
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.

    Example: mydbinstance

    ", + "CreateDBInstanceMessage$DBInstanceClass": "

    The compute and memory capacity of the DB Instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge

    ", + "CreateDBInstanceMessage$Engine": "

    The name of the database engine to be used for this instance.

    Valid Values: MySQL | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee | sqlserver-se | sqlserver-ex | sqlserver-web

    ", + "CreateDBInstanceMessage$MasterUsername": "

    The name of master user for the client DB Instance.

    MySQL

    Constraints:

    • Must be 1 to 16 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.

    Type: String

    Oracle

    Constraints:

    • Must be 1 to 30 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.

    SQL Server

    Constraints:

    • Must be 1 to 128 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.
    ", + "CreateDBInstanceMessage$MasterUserPassword": "

    The password for the master database user. Can be any printable ASCII character except \"/\", \"\\\", or \"@\".

    Type: String

    MySQL

    Constraints: Must contain from 8 to 41 alphanumeric characters.

    Oracle

    Constraints: Must contain from 8 to 30 alphanumeric characters.

    SQL Server

    Constraints: Must contain from 8 to 128 alphanumeric characters.

    ", + "CreateDBInstanceMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in.

    Default: A random, system-chosen Availability Zone in the endpoint's region.

    Example: us-east-1d

    Constraint: The AvailabilityZone parameter cannot be specified if the MultiAZ parameter is set to true. The specified Availability Zone must be in the same region as the current endpoint.

    ", + "CreateDBInstanceMessage$DBSubnetGroupName": "

    A DB Subnet Group to associate with this DB Instance.

    If there is no DB Subnet Group, then it is a non-VPC DB instance.

    ", + "CreateDBInstanceMessage$PreferredMaintenanceWindow": "

    The weekly time range (in UTC) during which system maintenance can occur.

    Format: ddd:hh24:mi-ddd:hh24:mi

    Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

    Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

    Constraints: Minimum 30-minute window.

    ", + "CreateDBInstanceMessage$DBParameterGroupName": "

    The name of the DB Parameter Group to associate with this DB instance. If this argument is omitted, the default DBParameterGroup for the specified engine will be used.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "CreateDBInstanceMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

    Default: A 30-minute window selected at random from an 8-hour block of time per region. The following list shows the time blocks for each region from which the default backup windows are assigned.

    • US-East (Northern Virginia) Region: 03:00-11:00 UTC
    • US-West (Northern California) Region: 06:00-14:00 UTC
    • EU (Ireland) Region: 22:00-06:00 UTC
    • Asia Pacific (Singapore) Region: 14:00-22:00 UTC
    • Asia Pacific (Tokyo) Region: 17:00-03:00 UTC

    Constraints: Must be in the format hh24:mi-hh24:mi. Times should be Universal Time Coordinated (UTC). Must not conflict with the preferred maintenance window. Must be at least 30 minutes.

    ", + "CreateDBInstanceMessage$EngineVersion": "

    The version number of the database engine to use.

    MySQL

    Example: 5.1.42

    Type: String

    Oracle

    Example: 11.2.0.2.v2

    Type: String

    SQL Server

    Example: 10.50.2789.0.v1

    ", + "CreateDBInstanceMessage$LicenseModel": "

    License model information for this DB Instance.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "CreateDBInstanceMessage$OptionGroupName": "

    Indicates that the DB Instance should be associated with the specified option group.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "CreateDBInstanceMessage$CharacterSetName": "

    For supported engines, indicates that the DB Instance should be associated with the specified CharacterSet.

    ", + "CreateDBInstanceReadReplicaMessage$DBInstanceIdentifier": "

    The DB Instance identifier of the Read Replica. This is the unique key that identifies a DB Instance. This parameter is stored as a lowercase string.

    ", + "CreateDBInstanceReadReplicaMessage$SourceDBInstanceIdentifier": "

    The identifier of the DB Instance that will act as the source for the Read Replica. Each DB Instance can have up to five Read Replicas.

    Constraints: Must be the identifier of an existing DB Instance that is not already a Read Replica DB Instance.

    ", + "CreateDBInstanceReadReplicaMessage$DBInstanceClass": "

    The compute and memory capacity of the Read Replica.

    Valid Values: db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge

    Default: Inherits from the source DB Instance.

    ", + "CreateDBInstanceReadReplicaMessage$AvailabilityZone": "

    The Amazon EC2 Availability Zone that the Read Replica will be created in.

    Default: A random, system-chosen Availability Zone in the endpoint's region.

    Example: us-east-1d

    ", + "CreateDBInstanceReadReplicaMessage$OptionGroupName": "

    The option group the DB instance will be associated with. If omitted, the default Option Group for the engine specified will be used.

    ", + "CreateDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB Parameter Group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    This value is stored as a lower-case string.", + "CreateDBParameterGroupMessage$DBParameterGroupFamily": "

    The DB Parameter Group Family name. A DB Parameter Group can be associated with one and only one DB Parameter Group Family, and can be applied only to a DB Instance running a database engine and engine version compatible with that DB Parameter Group Family.

    ", + "CreateDBParameterGroupMessage$Description": "

    The description for the DB Parameter Group.

    ", + "CreateDBSecurityGroupMessage$DBSecurityGroupName": "

    The name for the DB Security Group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters or hyphens. Must not be \"Default\".

    Example: mysecuritygroup

    ", + "CreateDBSecurityGroupMessage$DBSecurityGroupDescription": "

    The description for the DB Security Group.

    ", + "CreateDBSnapshotMessage$DBSnapshotIdentifier": "

    The identifier for the DB Snapshot.

    Constraints:

    • Cannot be null, empty, or blank
    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-snapshot-id

    ", + "CreateDBSnapshotMessage$DBInstanceIdentifier": "

    The DB Instance identifier. This is the unique key that identifies a DB Instance. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "CreateDBSubnetGroupMessage$DBSubnetGroupName": "

    The name for the DB Subnet Group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters or hyphens. Must not be \"Default\".

    Example: mySubnetgroup

    ", + "CreateDBSubnetGroupMessage$DBSubnetGroupDescription": "

    The description for the DB Subnet Group.

    ", + "CreateEventSubscriptionMessage$SubscriptionName": "

    The name of the subscription.

    Constraints: The name must be less than 255 characters.

    ", + "CreateEventSubscriptionMessage$SnsTopicArn": "

    The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.

    ", + "CreateEventSubscriptionMessage$SourceType": "

    The type of source that will be generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "CreateOptionGroupMessage$OptionGroupName": "

    Specifies the name of the option group to be created.

    Constraints:

    • Must be 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: myOptiongroup

    ", + "CreateOptionGroupMessage$EngineName": "

    Specifies the name of the engine that this option group should be associated with.

    ", + "CreateOptionGroupMessage$MajorEngineVersion": "

    Specifies the major version of the engine that this option group should be associated with.

    ", + "CreateOptionGroupMessage$OptionGroupDescription": "

    The description of the option group.

    ", + "DBEngineVersion$Engine": "

    The name of the database engine.

    ", + "DBEngineVersion$EngineVersion": "

    The version number of the database engine.

    ", + "DBEngineVersion$DBParameterGroupFamily": "

    The name of the DBParameterGroupFamily for the database engine.

    ", + "DBEngineVersion$DBEngineDescription": "

    The description of the database engine.

    ", + "DBEngineVersion$DBEngineVersionDescription": "

    The description of the database engine version.

    ", + "DBEngineVersionMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBInstance$DBInstanceIdentifier": "

    Contains a user-supplied database identifier. This is the unique key that identifies a DB Instance.

    ", + "DBInstance$DBInstanceClass": "

    Contains the name of the compute and memory capacity class of the DB Instance.

    ", + "DBInstance$Engine": "

    Provides the name of the database engine to be used for this DB Instance.

    ", + "DBInstance$DBInstanceStatus": "

    Specifies the current state of this database.

    ", + "DBInstance$MasterUsername": "

    Contains the master username for the DB Instance.

    ", + "DBInstance$DBName": "

    The meaning of this parameter differs according to the database engine you use.

    MySQL

    Contains the name of the initial database of this instance that was provided at create time, if one was specified when the DB Instance was created. This same name is returned for the life of the DB Instance.

    Type: String

    Oracle

    Contains the Oracle System ID (SID) of the created DB Instance.

    ", + "DBInstance$PreferredBackupWindow": "

    Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod.

    ", + "DBInstance$AvailabilityZone": "

    Specifies the name of the Availability Zone the DB Instance is located in.

    ", + "DBInstance$PreferredMaintenanceWindow": "

    Specifies the weekly time range (in UTC) during which system maintenance can occur.

    ", + "DBInstance$EngineVersion": "

    Indicates the database engine version.

    ", + "DBInstance$ReadReplicaSourceDBInstanceIdentifier": "

    Contains the identifier of the source DB Instance if this DB Instance is a Read Replica.

    ", + "DBInstance$LicenseModel": "

    License model information for this DB Instance.

    ", + "DBInstance$CharacterSetName": "

    If present, specifies the name of the character set that this instance is associated with.

    ", + "DBInstance$SecondaryAvailabilityZone": "

    If present, specifies the name of the secondary Availability Zone for a DB instance with multi-AZ support.

    ", + "DBInstanceMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DBParameterGroup$DBParameterGroupName": "

    Provides the name of the DB Parameter Group.

    ", + "DBParameterGroup$DBParameterGroupFamily": "

    Provides the name of the DB Parameter Group Family that this DB Parameter Group is compatible with.

    ", + "DBParameterGroup$Description": "

    Provides the customer-specified description for this DB Parameter Group.

    ", + "DBParameterGroupDetails$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBParameterGroupNameMessage$DBParameterGroupName": "

    The name of the DB Parameter Group.

    ", + "DBParameterGroupStatus$DBParameterGroupName": "

    The name of the DP Parameter Group.

    ", + "DBParameterGroupStatus$ParameterApplyStatus": "

    The status of parameter updates.

    ", + "DBParameterGroupsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSecurityGroup$OwnerId": "

    Provides the AWS ID of the owner of a specific DB Security Group.

    ", + "DBSecurityGroup$DBSecurityGroupName": "

    Specifies the name of the DB Security Group.

    ", + "DBSecurityGroup$DBSecurityGroupDescription": "

    Provides the description of the DB Security Group.

    ", + "DBSecurityGroup$VpcId": "

    Provides the VpcId of the DB Security Group.

    ", + "DBSecurityGroupMembership$DBSecurityGroupName": "

    The name of the DB Security Group.

    ", + "DBSecurityGroupMembership$Status": "

    The status of the DB Security Group.

    ", + "DBSecurityGroupMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSecurityGroupNameList$member": null, + "DBSnapshot$DBSnapshotIdentifier": "

    Specifies the identifier for the DB Snapshot.

    ", + "DBSnapshot$DBInstanceIdentifier": "

    Specifies the the DBInstanceIdentifier of the DB Instance this DB Snapshot was created from.

    ", + "DBSnapshot$Engine": "

    Specifies the name of the database engine.

    ", + "DBSnapshot$Status": "

    Specifies the status of this DB Snapshot.

    ", + "DBSnapshot$AvailabilityZone": "

    Specifies the name of the Availability Zone the DB Instance was located in at the time of the DB Snapshot.

    ", + "DBSnapshot$VpcId": "

    Provides the Vpc Id associated with the DB Snapshot.

    ", + "DBSnapshot$MasterUsername": "

    Provides the master username for the DB Snapshot.

    ", + "DBSnapshot$EngineVersion": "

    Specifies the version of the database engine.

    ", + "DBSnapshot$LicenseModel": "

    License model information for the restored DB Instance.

    ", + "DBSnapshot$SnapshotType": "

    Provides the type of the DB Snapshot.

    ", + "DBSnapshot$OptionGroupName": "

    Provides the option group name for the DB Snapshot.

    ", + "DBSnapshotMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSubnetGroup$DBSubnetGroupName": "

    Specifies the name of the DB Subnet Group.

    ", + "DBSubnetGroup$DBSubnetGroupDescription": "

    Provides the description of the DB Subnet Group.

    ", + "DBSubnetGroup$VpcId": "

    Provides the VpcId of the DB Subnet Group.

    ", + "DBSubnetGroup$SubnetGroupStatus": "

    Provides the status of the DB Subnet Group.

    ", + "DBSubnetGroupMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DeleteDBInstanceMessage$DBInstanceIdentifier": "

    The DB Instance identifier for the DB Instance to be deleted. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DeleteDBInstanceMessage$FinalDBSnapshotIdentifier": "

    The DBSnapshotIdentifier of the new DBSnapshot created when SkipFinalSnapshot is set to false.

    Specifying this parameter and also setting the SkipFinalShapshot parameter to true results in an error.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DeleteDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB Parameter Group.

    Constraints:

    • Must be the name of an existing DB Parameter Group
    • You cannot delete a default DB Parameter Group
    • Cannot be associated with any DB Instances
    ", + "DeleteDBSecurityGroupMessage$DBSecurityGroupName": "

    The name of the DB Security Group to delete.

    You cannot delete the default DB Security Group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DeleteDBSnapshotMessage$DBSnapshotIdentifier": "

    The DBSnapshot identifier.

    Constraints: Must be the name of an existing DB Snapshot in the available state.

    ", + "DeleteDBSubnetGroupMessage$DBSubnetGroupName": "

    The name of the database subnet group to delete.

    You cannot delete the default subnet group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DeleteEventSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to delete.

    ", + "DeleteOptionGroupMessage$OptionGroupName": "

    The name of the option group to be deleted.

    You cannot delete default Option Groups.", + "DescribeDBEngineVersionsMessage$Engine": "

    The database engine to return.

    ", + "DescribeDBEngineVersionsMessage$EngineVersion": "

    The database engine version to return.

    Example: 5.1.49

    ", + "DescribeDBEngineVersionsMessage$DBParameterGroupFamily": "

    The name of a specific DB Parameter Group family to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBEngineVersionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBInstancesMessage$DBInstanceIdentifier": "

    The user-supplied instance identifier. If this parameter is specified, information from only the specific DB Instance is returned. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBInstancesMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBInstances request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DescribeDBLogFilesDetails$LogFileName": "

    The name of the log file for the specified DB instance.

    ", + "DescribeDBLogFilesMessage$DBInstanceIdentifier": "

    The customer-assigned name of the DB Instance that contains the log files you want to list.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBLogFilesMessage$FilenameContains": "

    Filters the available log files for log file names that contain the specified string.

    ", + "DescribeDBLogFilesMessage$Marker": "

    The pagination token provided in the previous request. If this parameter is specified the response includes only records beyond the marker, up to MaxRecords.

    ", + "DescribeDBLogFilesResponse$Marker": "

    An optional paging token.

    ", + "DescribeDBParameterGroupsMessage$DBParameterGroupName": "

    The name of a specific DB Parameter Group to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBParameterGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBParameterGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBParametersMessage$DBParameterGroupName": "

    The name of a specific DB Parameter Group to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBParametersMessage$Source": "

    The parameter types to return.

    Default: All parameter types returned

    Valid Values: user | system | engine-default

    ", + "DescribeDBParametersMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSecurityGroupsMessage$DBSecurityGroupName": "

    The name of the DB Security Group to return details for.

    ", + "DescribeDBSecurityGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSecurityGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSnapshotsMessage$DBInstanceIdentifier": "

    A DB Instance Identifier to retrieve the list of DB Snapshots for. Cannot be used in conjunction with DBSnapshotIdentifier. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBSnapshotsMessage$DBSnapshotIdentifier": "

    A specific DB Snapshot Identifier to describe. Cannot be used in conjunction with DBInstanceIdentifier. This value is stored as a lowercase string.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    • If this is the identifier of an automated snapshot, the SnapshotType parameter must also be specified.
    ", + "DescribeDBSnapshotsMessage$SnapshotType": "

    An optional snapshot type for which snapshots will be returned. If not specified, the returned results will include snapshots of all types.

    ", + "DescribeDBSnapshotsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSnapshots request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSubnetGroupsMessage$DBSubnetGroupName": "

    The name of the DB Subnet Group to return details for.

    ", + "DescribeDBSubnetGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSubnetGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEngineDefaultParametersMessage$DBParameterGroupFamily": "

    The name of the DB Parameter Group Family.

    ", + "DescribeEngineDefaultParametersMessage$Marker": "

    An optional pagination token provided by a previous DescribeEngineDefaultParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEventCategoriesMessage$SourceType": "

    The type of source that will be generating the events.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "DescribeEventSubscriptionsMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to describe.

    ", + "DescribeEventSubscriptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DescribeEventsMessage$SourceIdentifier": "

    The identifier of the event source for which events will be returned. If not specified, then all sources are included in the response.

    Constraints:

    • If SourceIdentifier is supplied, SourceType must also be provided.
    • If the source type is DBInstance, then a DBInstanceIdentifier must be supplied.
    • If the source type is DBSecurityGroup, a DBSecurityGroupName must be supplied.
    • If the source type is DBParameterGroup, a DBParameterGroupName must be supplied.
    • If the source type is DBSnapshot, a DBSnapshotIdentifier must be supplied.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    ", + "DescribeEventsMessage$Marker": "

    An optional pagination token provided by a previous DescribeEvents request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupOptionsMessage$EngineName": "

    A required parameter. Options available for the given Engine name will be described.

    ", + "DescribeOptionGroupOptionsMessage$MajorEngineVersion": "

    If specified, filters the results to include only options for the specified major engine version.

    ", + "DescribeOptionGroupOptionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupsMessage$OptionGroupName": "

    The name of the option group to describe. Cannot be supplied together with EngineName or MajorEngineVersion.

    ", + "DescribeOptionGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOptionGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupsMessage$EngineName": "

    Filters the list of option groups to only include groups associated with a specific database engine.

    ", + "DescribeOptionGroupsMessage$MajorEngineVersion": "

    Filters the list of option groups to only include groups associated with a specific database engine version. If specified, then EngineName must also be specified.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Engine": "

    The name of the engine to retrieve DB Instance options for.

    ", + "DescribeOrderableDBInstanceOptionsMessage$EngineVersion": "

    The engine version filter value. Specify this parameter to show only the available offerings matching the specified engine version.

    ", + "DescribeOrderableDBInstanceOptionsMessage$DBInstanceClass": "

    The DB Instance class filter value. Specify this parameter to show only the available offerings matching the specified DB Instance class.

    ", + "DescribeOrderableDBInstanceOptionsMessage$LicenseModel": "

    The license model filter value. Specify this parameter to show only the available offerings matching the specified license model.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DescribeReservedDBInstancesMessage$ReservedDBInstanceId": "

    The reserved DB Instance identifier filter value. Specify this parameter to show only the reservation that matches the specified reservation ID.

    ", + "DescribeReservedDBInstancesMessage$ReservedDBInstancesOfferingId": "

    The offering identifier filter value. Specify this parameter to show only purchased reservations matching the specified offering identifier.

    ", + "DescribeReservedDBInstancesMessage$DBInstanceClass": "

    The DB Instance class filter value. Specify this parameter to show only those reservations matching the specified DB Instances class.

    ", + "DescribeReservedDBInstancesMessage$Duration": "

    The duration filter value, specified in years or seconds. Specify this parameter to show only reservations for this duration.

    Valid Values: 1 | 3 | 31536000 | 94608000

    ", + "DescribeReservedDBInstancesMessage$ProductDescription": "

    The product description filter value. Specify this parameter to show only those reservations matching the specified product description.

    ", + "DescribeReservedDBInstancesMessage$OfferingType": "

    The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type.

    Valid Values: \"Light Utilization\" | \"Medium Utilization\" | \"Heavy Utilization\"

    ", + "DescribeReservedDBInstancesMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeReservedDBInstancesOfferingsMessage$ReservedDBInstancesOfferingId": "

    The offering identifier filter value. Specify this parameter to show only the available offering that matches the specified reservation identifier.

    Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706

    ", + "DescribeReservedDBInstancesOfferingsMessage$DBInstanceClass": "

    The DB Instance class filter value. Specify this parameter to show only the available offerings matching the specified DB Instance class.

    ", + "DescribeReservedDBInstancesOfferingsMessage$Duration": "

    Duration filter value, specified in years or seconds. Specify this parameter to show only reservations for this duration.

    Valid Values: 1 | 3 | 31536000 | 94608000

    ", + "DescribeReservedDBInstancesOfferingsMessage$ProductDescription": "

    Product description filter value. Specify this parameter to show only the available offerings matching the specified product description.

    ", + "DescribeReservedDBInstancesOfferingsMessage$OfferingType": "

    The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type.

    Valid Values: \"Light Utilization\" | \"Medium Utilization\" | \"Heavy Utilization\"

    ", + "DescribeReservedDBInstancesOfferingsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DownloadDBLogFilePortionDetails$LogFileData": "

    Entries from the specified log file.

    ", + "DownloadDBLogFilePortionDetails$Marker": "

    An optional pagination token provided by a previous DownloadDBLogFilePortion request.

    ", + "DownloadDBLogFilePortionMessage$DBInstanceIdentifier": "

    The customer-assigned name of the DB Instance that contains the log files you want to list.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DownloadDBLogFilePortionMessage$LogFileName": "

    The name of the log file to be downloaded.

    ", + "DownloadDBLogFilePortionMessage$Marker": "

    The pagination token provided in the previous request. If this parameter is specified the response includes only records beyond the marker, up to MaxRecords.

    ", + "EC2SecurityGroup$Status": "

    Provides the status of the EC2 security group. Status can be \"authorizing\", \"authorized\", \"revoking\", and \"revoked\".

    ", + "EC2SecurityGroup$EC2SecurityGroupName": "

    Specifies the name of the EC2 Security Group.

    ", + "EC2SecurityGroup$EC2SecurityGroupId": "

    Specifies the id of the EC2 Security Group.

    ", + "EC2SecurityGroup$EC2SecurityGroupOwnerId": "

    Specifies the AWS ID of the owner of the EC2 security group specified in the EC2SecurityGroupName field.

    ", + "Endpoint$Address": "

    Specifies the DNS address of the DB Instance.

    ", + "EngineDefaults$DBParameterGroupFamily": "

    Specifies the name of the DB Parameter Group Family which the engine default parameters apply to.

    ", + "EngineDefaults$Marker": "

    An optional pagination token provided by a previous EngineDefaults request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "Event$SourceIdentifier": "

    Provides the identifier for the source of the event.

    ", + "Event$Message": "

    Provides the text of this event.

    ", + "EventCategoriesList$member": null, + "EventCategoriesMap$SourceType": "

    The source type that the returned categories belong to

    ", + "EventSubscription$CustomerAwsId": "

    The AWS customer account associated with the RDS event notification subscription.

    ", + "EventSubscription$CustSubscriptionId": "

    The RDS event notification subscription Id.

    ", + "EventSubscription$SnsTopicArn": "

    The topic ARN of the RDS event notification subscription.

    ", + "EventSubscription$Status": "

    The status of the RDS event notification subscription.

    Constraints:

    Can be one of the following: creating | modifying | deleting | active | no-permission | topic-not-exist

    The status \"no-permission\" indicates that RDS no longer has permission to post to the SNS topic. The status \"topic-not-exist\" indicates that the topic was deleted after the subscription was created.

    ", + "EventSubscription$SubscriptionCreationTime": "

    The time the RDS event notification subscription was created.

    ", + "EventSubscription$SourceType": "

    The source type for the RDS event notification subscription.

    ", + "EventSubscriptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "EventsMessage$Marker": "

    An optional pagination token provided by a previous Events request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "IPRange$Status": "

    Specifies the status of the IP range. Status can be \"authorizing\", \"authorized\", \"revoking\", and \"revoked\".

    ", + "IPRange$CIDRIP": "

    Specifies the IP range.

    ", + "KeyList$member": null, + "ListTagsForResourceMessage$ResourceName": "

    The DB Instance with tags to be listed. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    ", + "ModifyDBInstanceMessage$DBInstanceIdentifier": "

    The DB Instance identifier. This value is stored as a lowercase string.

    Constraints:

    • Must be the identifier for an existing DB Instance
    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "ModifyDBInstanceMessage$DBInstanceClass": "

    The new compute and memory capacity of the DB Instance. To determine the instance classes that are available for a particular DB engine, use the DescribeOrderableDBInstanceOptions action.

    Passing a value for this parameter causes an outage during the change and is applied during the next maintenance window, unless the ApplyImmediately parameter is specified as true for this request.

    Default: Uses existing setting

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge | db.m2.2xlarge | db.m2.4xlarge

    ", + "ModifyDBInstanceMessage$MasterUserPassword": "

    The new password for the DB Instance master user. Can be any printable ASCII character except \"/\", \"\\\", or \"@\".

    Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response.

    Default: Uses existing setting

    Constraints: Must be 8 to 41 alphanumeric characters (MySQL), 8 to 30 alphanumeric characters (Oracle), or 8 to 128 alphanumeric characters (SQL Server).

    Amazon RDS API actions never return the password, so this action provides a way to regain access to a master instance user if the password is lost. ", + "ModifyDBInstanceMessage$DBParameterGroupName": "

    The name of the DB Parameter Group to apply to this DB Instance. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    Default: Uses existing setting

    Constraints: The DB Parameter Group must be in the same DB Parameter Group family as this DB Instance.

    ", + "ModifyDBInstanceMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints:

    • Must be in the format hh24:mi-hh24:mi
    • Times should be Universal Time Coordinated (UTC)
    • Must not conflict with the preferred maintenance window
    • Must be at least 30 minutes
    ", + "ModifyDBInstanceMessage$PreferredMaintenanceWindow": "

    The weekly time range (in UTC) during which system maintenance can occur, which may result in an outage. Changing this parameter does not result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If there are pending actions that cause a reboot, and the maintenance window is changed to include the current time, then changing this parameter will cause a reboot of the DB Instance. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied.

    Default: Uses existing setting

    Format: ddd:hh24:mi-ddd:hh24:mi

    Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun

    Constraints: Must be at least 30 minutes

    ", + "ModifyDBInstanceMessage$EngineVersion": "

    The version number of the database engine to upgrade to. Changing this parameter results in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    For major version upgrades, if a nondefault DB Parameter Group is currently in use, a new DB Parameter Group in the DB Parameter Group Family for the new engine version must be specified. The new DB Parameter Group can be the default for that DB Parameter Group Family.

    Example: 5.1.42

    ", + "ModifyDBInstanceMessage$OptionGroupName": "

    Indicates that the DB Instance should be associated with the specified option group. Changing this parameter does not result in an outage except in the following case and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "ModifyDBInstanceMessage$NewDBInstanceIdentifier": "

    The new DB Instance identifier for the DB Instance when renaming a DB Instance. This value is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "ModifyDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB Parameter Group.

    Constraints:

    • Must be the name of an existing DB Parameter Group
    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "ModifyDBSubnetGroupMessage$DBSubnetGroupName": "

    The name for the DB Subnet Group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters or hyphens. Must not be \"Default\".

    Example: mySubnetgroup

    ", + "ModifyDBSubnetGroupMessage$DBSubnetGroupDescription": "

    The description for the DB Subnet Group.

    ", + "ModifyEventSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription.

    ", + "ModifyEventSubscriptionMessage$SnsTopicArn": "

    The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.

    ", + "ModifyEventSubscriptionMessage$SourceType": "

    The type of source that will be generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "ModifyOptionGroupMessage$OptionGroupName": "

    The name of the option group to be modified.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "Option$OptionName": "

    The name of the option.

    ", + "Option$OptionDescription": "

    The description of the option.

    ", + "OptionConfiguration$OptionName": "

    The configuration of options to include in a group.

    ", + "OptionGroup$OptionGroupName": "

    Specifies the name of the option group.

    ", + "OptionGroup$OptionGroupDescription": "

    Provides the description of the option group.

    ", + "OptionGroup$EngineName": "

    Engine name that this option group can be applied to.

    ", + "OptionGroup$MajorEngineVersion": "

    Indicates the major engine version associated with this option group.

    ", + "OptionGroup$VpcId": "

    If AllowsVpcAndNonVpcInstanceMemberships is 'false', this field is blank. If AllowsVpcAndNonVpcInstanceMemberships is 'true' and this field is blank, then this option group can be applied to both VPC and non-VPC instances. If this field contains a value, then this option group can only be applied to instances that are in the VPC indicated by this field.

    ", + "OptionGroupMembership$OptionGroupName": "

    The name of the option group that the instance belongs to.

    ", + "OptionGroupMembership$Status": "

    The status of the DB Instance's option group membership (e.g. in-sync, pending, pending-maintenance, applying).

    ", + "OptionGroupOption$Name": "

    The name of the option.

    ", + "OptionGroupOption$Description": "

    The description of the option.

    ", + "OptionGroupOption$EngineName": "

    Engine name that this option can be applied to.

    ", + "OptionGroupOption$MajorEngineVersion": "

    Indicates the major engine version that the option is available for.

    ", + "OptionGroupOption$MinimumRequiredMinorEngineVersion": "

    The minimum required engine version for the option to be applied.

    ", + "OptionGroupOptionSetting$SettingName": "

    The name of the option group option.

    ", + "OptionGroupOptionSetting$SettingDescription": "

    The description of the option group option.

    ", + "OptionGroupOptionSetting$DefaultValue": "

    The default value for the option group option.

    ", + "OptionGroupOptionSetting$ApplyType": "

    The DB engine specific parameter type for the option group option.

    ", + "OptionGroupOptionSetting$AllowedValues": "

    Indicates the acceptable values for the option group option.

    ", + "OptionGroupOptionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "OptionGroups$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "OptionNamesList$member": null, + "OptionSetting$Name": "

    The name of the option that has settings that you can set.

    ", + "OptionSetting$Value": "

    The current value of the option setting.

    ", + "OptionSetting$DefaultValue": "

    The default value of the option setting.

    ", + "OptionSetting$Description": "

    The description of the option setting.

    ", + "OptionSetting$ApplyType": "

    The DB engine specific parameter type.

    ", + "OptionSetting$DataType": "

    The data type of the option setting.

    ", + "OptionSetting$AllowedValues": "

    The allowed values of the option setting.

    ", + "OptionsDependedOn$member": null, + "OrderableDBInstanceOption$Engine": "

    The engine type of the orderable DB Instance.

    ", + "OrderableDBInstanceOption$EngineVersion": "

    The engine version of the orderable DB Instance.

    ", + "OrderableDBInstanceOption$DBInstanceClass": "

    The DB Instance Class for the orderable DB Instance

    ", + "OrderableDBInstanceOption$LicenseModel": "

    The license model for the orderable DB Instance.

    ", + "OrderableDBInstanceOptionsMessage$Marker": "

    An optional pagination token provided by a previous OrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "Parameter$ParameterName": "

    Specifies the name of the parameter.

    ", + "Parameter$ParameterValue": "

    Specifies the value of the parameter.

    ", + "Parameter$Description": "

    Provides a description of the parameter.

    ", + "Parameter$Source": "

    Indicates the source of the parameter value.

    ", + "Parameter$ApplyType": "

    Specifies the engine specific parameters type.

    ", + "Parameter$DataType": "

    Specifies the valid data type for the parameter.

    ", + "Parameter$AllowedValues": "

    Specifies the valid range of values for the parameter.

    ", + "Parameter$MinimumEngineVersion": "

    The earliest engine version to which the parameter can apply.

    ", + "PendingModifiedValues$DBInstanceClass": "

    Contains the new DBInstanceClass for the DB Instance that will be applied or is in progress.

    ", + "PendingModifiedValues$MasterUserPassword": "

    Contains the pending or in-progress change of the master credentials for the DB Instance.

    ", + "PendingModifiedValues$EngineVersion": "

    Indicates the database engine version.

    ", + "PendingModifiedValues$DBInstanceIdentifier": "

    Contains the new DBInstanceIdentifier for the DB Instance that will be applied or is in progress.

    ", + "PromoteReadReplicaMessage$DBInstanceIdentifier": "

    The DB Instance identifier. This value is stored as a lowercase string.

    Constraints:

    • Must be the identifier for an existing Read Replica DB Instance
    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: mydbinstance

    ", + "PromoteReadReplicaMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

    Default: A 30-minute window selected at random from an 8-hour block of time per region. The following list shows the time blocks for each region from which the default backup windows are assigned.

    • US-East (Northern Virginia) Region: 03:00-11:00 UTC
    • US-West (Northern California) Region: 06:00-14:00 UTC
    • EU (Ireland) Region: 22:00-06:00 UTC
    • Asia Pacific (Singapore) Region: 14:00-22:00 UTC
    • Asia Pacific (Tokyo) Region: 17:00-03:00 UTC

    Constraints: Must be in the format hh24:mi-hh24:mi. Times should be Universal Time Coordinated (UTC). Must not conflict with the preferred maintenance window. Must be at least 30 minutes.

    ", + "PurchaseReservedDBInstancesOfferingMessage$ReservedDBInstancesOfferingId": "

    The ID of the Reserved DB Instance offering to purchase.

    Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706

    ", + "PurchaseReservedDBInstancesOfferingMessage$ReservedDBInstanceId": "

    Customer-specified identifier to track this reservation.

    Example: myreservationID

    ", + "ReadReplicaDBInstanceIdentifierList$member": null, + "RebootDBInstanceMessage$DBInstanceIdentifier": "

    The DB Instance identifier. This parameter is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RecurringCharge$RecurringChargeFrequency": "

    The frequency of the recurring charge.

    ", + "RemoveSourceIdentifierFromSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to remove a source identifier from.

    ", + "RemoveSourceIdentifierFromSubscriptionMessage$SourceIdentifier": "

    The source identifier to be removed from the subscription, such as the DB instance identifier for a DB instance or the name of a security group.

    ", + "RemoveTagsFromResourceMessage$ResourceName": "

    The DB Instance the tags will be removed from. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    ", + "ReservedDBInstance$ReservedDBInstanceId": "

    The unique identifier for the reservation.

    ", + "ReservedDBInstance$ReservedDBInstancesOfferingId": "

    The offering identifier.

    ", + "ReservedDBInstance$DBInstanceClass": "

    The DB instance class for the reserved DB Instance.

    ", + "ReservedDBInstance$CurrencyCode": "

    The currency code for the reserved DB Instance.

    ", + "ReservedDBInstance$ProductDescription": "

    The description of the reserved DB Instance.

    ", + "ReservedDBInstance$OfferingType": "

    The offering type of this reserved DB Instance.

    ", + "ReservedDBInstance$State": "

    The state of the reserved DB Instance.

    ", + "ReservedDBInstanceMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "ReservedDBInstancesOffering$ReservedDBInstancesOfferingId": "

    The offering identifier.

    ", + "ReservedDBInstancesOffering$DBInstanceClass": "

    The DB instance class for the reserved DB Instance.

    ", + "ReservedDBInstancesOffering$CurrencyCode": "

    The currency code for the reserved DB Instance offering.

    ", + "ReservedDBInstancesOffering$ProductDescription": "

    The database engine used by the offering.

    ", + "ReservedDBInstancesOffering$OfferingType": "

    The offering type.

    ", + "ReservedDBInstancesOfferingMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "ResetDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB Parameter Group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBInstanceIdentifier": "

    The identifier for the DB Snapshot to restore from.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBSnapshotIdentifier": "

    Name of the DB Instance to create from the DB Snapshot. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-snapshot-id

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBInstanceClass": "

    The compute and memory capacity of the Amazon RDS DB instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge

    ", + "RestoreDBInstanceFromDBSnapshotMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in.

    Default: A random, system-chosen Availability Zone.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    Example: us-east-1a

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBSubnetGroupName": "

    The DB Subnet Group name to use for the new instance.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$LicenseModel": "

    License model information for the restored DB Instance.

    Default: Same as source.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBName": "

    The database name for the restored DB Instance.

    This parameter doesn't apply to the MySQL engine.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Engine": "

    The database engine to use for the new instance.

    Default: The same as source

    Constraint: Must be compatible with the engine of the source

    Example: oracle-ee

    ", + "RestoreDBInstanceFromDBSnapshotMessage$OptionGroupName": "

    The name of the option group to be used for the restored DB instance.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "RestoreDBInstanceToPointInTimeMessage$SourceDBInstanceIdentifier": "

    The identifier of the source DB Instance from which to restore.

    Constraints:

    • Must be the identifier of an existing database instance
    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceToPointInTimeMessage$TargetDBInstanceIdentifier": "

    The name of the new database instance to be created.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceToPointInTimeMessage$DBInstanceClass": "

    The compute and memory capacity of the Amazon RDS DB instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge

    Default: The same DBInstanceClass as the original DB Instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in.

    Default: A random, system-chosen Availability Zone.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    Example: us-east-1a

    ", + "RestoreDBInstanceToPointInTimeMessage$DBSubnetGroupName": "

    The DB subnet group name to use for the new instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$LicenseModel": "

    License model information for the restored DB Instance.

    Default: Same as source.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "RestoreDBInstanceToPointInTimeMessage$DBName": "

    The database name for the restored DB Instance.

    This parameter is not used for the MySQL engine.

    ", + "RestoreDBInstanceToPointInTimeMessage$Engine": "

    The database engine to use for the new instance.

    Default: The same as source

    Constraint: Must be compatible with the engine of the source

    Example: oracle-ee

    ", + "RestoreDBInstanceToPointInTimeMessage$OptionGroupName": "

    The name of the option group to be used for the restored DB instance.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "RevokeDBSecurityGroupIngressMessage$DBSecurityGroupName": "

    The name of the DB Security Group to revoke ingress from.

    ", + "RevokeDBSecurityGroupIngressMessage$CIDRIP": "

    The IP range to revoke access from. Must be a valid CIDR range. If CIDRIP is specified, EC2SecurityGroupName, EC2SecurityGroupId and EC2SecurityGroupOwnerId cannot be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupName": "

    The name of the EC2 Security Group to revoke access from. For VPC DB Security Groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupId": "

    The id of the EC2 Security Group to revoke access from. For VPC DB Security Groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "

    The AWS Account Number of the owner of the EC2 security group specified in the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value. For VPC DB Security Groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "SourceIdsList$member": null, + "Subnet$SubnetIdentifier": "

    Specifies the identifier of the subnet.

    ", + "Subnet$SubnetStatus": "

    Specifies the status of the subnet.

    ", + "SubnetIdentifierList$member": null, + "Tag$Key": "

    A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and cannot be prefixed with \"aws:\". The string may only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

    ", + "Tag$Value": "

    A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and cannot be prefixed with \"aws:\". The string may only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

    ", + "VpcSecurityGroupIdList$member": null, + "VpcSecurityGroupMembership$VpcSecurityGroupId": "

    The name of the VPC security group.

    ", + "VpcSecurityGroupMembership$Status": "

    The status of the VPC Security Group.

    " + } + }, + "Subnet": { + "base": "

    This data type is used as a response element in the DescribeDBSubnetGroups action.

    ", + "refs": { + "SubnetList$member": null + } + }, + "SubnetAlreadyInUse": { + "base": "

    The DB subnet is already in use in the Availability Zone.

    ", + "refs": { + } + }, + "SubnetIdentifierList": { + "base": null, + "refs": { + "CreateDBSubnetGroupMessage$SubnetIds": "

    The EC2 Subnet IDs for the DB Subnet Group.

    ", + "ModifyDBSubnetGroupMessage$SubnetIds": "

    The EC2 Subnet IDs for the DB Subnet Group.

    " + } + }, + "SubnetList": { + "base": null, + "refs": { + "DBSubnetGroup$Subnets": "

    Contains a list of Subnet elements.

    " + } + }, + "SubscriptionAlreadyExistFault": { + "base": "

    The supplied subscription name already exists.

    ", + "refs": { + } + }, + "SubscriptionCategoryNotFoundFault": { + "base": "

    The supplied category does not exist.

    ", + "refs": { + } + }, + "SubscriptionNotFoundFault": { + "base": "

    The subscription name does not exist.

    ", + "refs": { + } + }, + "SupportedCharacterSetsList": { + "base": null, + "refs": { + "DBEngineVersion$SupportedCharacterSets": "

    A list of the character sets supported by this engine for the CharacterSetName parameter of the CreateDBInstance API.

    " + } + }, + "TStamp": { + "base": null, + "refs": { + "DBInstance$InstanceCreateTime": "

    Provides the date and time the DB Instance was created.

    ", + "DBInstance$LatestRestorableTime": "

    Specifies the latest time to which a database can be restored with point-in-time restore.

    ", + "DBSnapshot$SnapshotCreateTime": "

    Provides the time (UTC) when the snapshot was taken.

    ", + "DBSnapshot$InstanceCreateTime": "

    Specifies the time (UTC) when the snapshot was taken.

    ", + "DescribeEventsMessage$StartTime": "

    The beginning of the time interval to retrieve events for, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

    Example: 2009-07-08T18:00Z

    ", + "DescribeEventsMessage$EndTime": "

    The end of the time interval for which to retrieve events, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

    Example: 2009-07-08T18:00Z

    ", + "Event$Date": "

    Specifies the date and time of the event.

    ", + "ReservedDBInstance$StartTime": "

    The time the reservation started.

    ", + "RestoreDBInstanceToPointInTimeMessage$RestoreTime": "

    The date and time to restore from.

    Valid Values: Value must be a UTC time

    Constraints:

    • Must be before the latest restorable time for the DB Instance
    • Cannot be specified if UseLatestRestorableTime parameter is true

    Example: 2009-09-07T23:45:00Z

    " + } + }, + "Tag": { + "base": "

    Metadata assigned to a DB Instance consisting of a key-value pair.

    ", + "refs": { + "TagList$member": null + } + }, + "TagList": { + "base": "

    A list of tags.

    ", + "refs": { + "AddTagsToResourceMessage$Tags": "

    The tags to be assigned to the DB Instance.

    ", + "TagListMessage$TagList": "

    List of tags returned by the ListTagsForResource operation.

    " + } + }, + "TagListMessage": { + "base": "

    ", + "refs": { + } + }, + "VpcSecurityGroupIdList": { + "base": null, + "refs": { + "CreateDBInstanceMessage$VpcSecurityGroupIds": "

    A list of EC2 VPC Security Groups to associate with this DB Instance.

    Default: The default EC2 VPC Security Group for the DB Subnet group's VPC.

    ", + "ModifyDBInstanceMessage$VpcSecurityGroupIds": "

    A list of EC2 VPC Security Groups to authorize on this DB Instance. This change is asynchronously applied as soon as possible.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "OptionConfiguration$VpcSecurityGroupMemberships": "

    A list of VpcSecurityGroupMemebrship name strings used for this option.

    " + } + }, + "VpcSecurityGroupMembership": { + "base": "

    This data type is used as a response element for queries on VPC security group membership.

    ", + "refs": { + "VpcSecurityGroupMembershipList$member": null + } + }, + "VpcSecurityGroupMembershipList": { + "base": null, + "refs": { + "DBInstance$VpcSecurityGroups": "

    Provides List of VPC security group elements that the DB Instance belongs to.

    ", + "Option$VpcSecurityGroupMemberships": "

    If the option requires access to a port, then this VPC Security Group allows access to the port.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-02-12/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,110 @@ +{ + "pagination": { + "DescribeDBEngineVersions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBEngineVersions" + }, + "DescribeDBInstances": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBInstances" + }, + "DescribeDBLogFiles": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DescribeDBLogFiles" + }, + "DescribeDBParameterGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBParameterGroups" + }, + "DescribeDBParameters": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Parameters" + }, + "DescribeDBSecurityGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBSecurityGroups" + }, + "DescribeDBSnapshots": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBSnapshots" + }, + "DescribeDBSubnetGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBSubnetGroups" + }, + "DescribeEngineDefaultParameters": { + "input_token": "Marker", + "output_token": "EngineDefaults.Marker", + "limit_key": "MaxRecords", + "result_key": "EngineDefaults.Parameters" + }, + "DescribeEventSubscriptions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "EventSubscriptionsList" + }, + "DescribeEvents": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Events" + }, + "DescribeOptionGroupOptions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "OptionGroupOptions" + }, + "DescribeOptionGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "OptionGroupsList" + }, + "DescribeOrderableDBInstanceOptions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "OrderableDBInstanceOptions" + }, + "DescribeReservedDBInstances": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ReservedDBInstances" + }, + "DescribeReservedDBInstancesOfferings": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ReservedDBInstancesOfferings" + }, + "DownloadDBLogFilePortion": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "NumberOfLines", + "more_results": "AdditionalDataPending", + "result_key": "LogFileData" + }, + "ListTagsForResource": { + "result_key": "TagList" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,3158 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2013-09-09", + "endpointPrefix":"rds", + "protocol":"query", + "serviceAbbreviation":"Amazon RDS", + "serviceFullName":"Amazon Relational Database Service", + "signatureVersion":"v4", + "xmlNamespace":"http://rds.amazonaws.com/doc/2013-09-09/" + }, + "operations":{ + "AddSourceIdentifierToSubscription":{ + "name":"AddSourceIdentifierToSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddSourceIdentifierToSubscriptionMessage"}, + "output":{ + "shape":"AddSourceIdentifierToSubscriptionResult", + "resultWrapper":"AddSourceIdentifierToSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "AddTagsToResource":{ + "name":"AddTagsToResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsToResourceMessage"}, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "AuthorizeDBSecurityGroupIngress":{ + "name":"AuthorizeDBSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AuthorizeDBSecurityGroupIngressMessage"}, + "output":{ + "shape":"AuthorizeDBSecurityGroupIngressResult", + "resultWrapper":"AuthorizeDBSecurityGroupIngressResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"AuthorizationAlreadyExistsFault"}, + {"shape":"AuthorizationQuotaExceededFault"} + ] + }, + "CopyDBSnapshot":{ + "name":"CopyDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyDBSnapshotMessage"}, + "output":{ + "shape":"CopyDBSnapshotResult", + "resultWrapper":"CopyDBSnapshotResult" + }, + "errors":[ + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "CreateDBInstance":{ + "name":"CreateDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBInstanceMessage"}, + "output":{ + "shape":"CreateDBInstanceResult", + "resultWrapper":"CreateDBInstanceResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "CreateDBInstanceReadReplica":{ + "name":"CreateDBInstanceReadReplica", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBInstanceReadReplicaMessage"}, + "output":{ + "shape":"CreateDBInstanceReadReplicaResult", + "resultWrapper":"CreateDBInstanceReadReplicaResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"DBSubnetGroupNotAllowedFault"}, + {"shape":"InvalidDBSubnetGroupFault"} + ] + }, + "CreateDBParameterGroup":{ + "name":"CreateDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBParameterGroupMessage"}, + "output":{ + "shape":"CreateDBParameterGroupResult", + "resultWrapper":"CreateDBParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupQuotaExceededFault"}, + {"shape":"DBParameterGroupAlreadyExistsFault"} + ] + }, + "CreateDBSecurityGroup":{ + "name":"CreateDBSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSecurityGroupMessage"}, + "output":{ + "shape":"CreateDBSecurityGroupResult", + "resultWrapper":"CreateDBSecurityGroupResult" + }, + "errors":[ + {"shape":"DBSecurityGroupAlreadyExistsFault"}, + {"shape":"DBSecurityGroupQuotaExceededFault"}, + {"shape":"DBSecurityGroupNotSupportedFault"} + ] + }, + "CreateDBSnapshot":{ + "name":"CreateDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSnapshotMessage"}, + "output":{ + "shape":"CreateDBSnapshotResult", + "resultWrapper":"CreateDBSnapshotResult" + }, + "errors":[ + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "CreateDBSubnetGroup":{ + "name":"CreateDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSubnetGroupMessage"}, + "output":{ + "shape":"CreateDBSubnetGroupResult", + "resultWrapper":"CreateDBSubnetGroupResult" + }, + "errors":[ + {"shape":"DBSubnetGroupAlreadyExistsFault"}, + {"shape":"DBSubnetGroupQuotaExceededFault"}, + {"shape":"DBSubnetQuotaExceededFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"} + ] + }, + "CreateEventSubscription":{ + "name":"CreateEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateEventSubscriptionMessage"}, + "output":{ + "shape":"CreateEventSubscriptionResult", + "resultWrapper":"CreateEventSubscriptionResult" + }, + "errors":[ + {"shape":"EventSubscriptionQuotaExceededFault"}, + {"shape":"SubscriptionAlreadyExistFault"}, + {"shape":"SNSInvalidTopicFault"}, + {"shape":"SNSNoAuthorizationFault"}, + {"shape":"SNSTopicArnNotFoundFault"}, + {"shape":"SubscriptionCategoryNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "CreateOptionGroup":{ + "name":"CreateOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateOptionGroupMessage"}, + "output":{ + "shape":"CreateOptionGroupResult", + "resultWrapper":"CreateOptionGroupResult" + }, + "errors":[ + {"shape":"OptionGroupAlreadyExistsFault"}, + {"shape":"OptionGroupQuotaExceededFault"} + ] + }, + "DeleteDBInstance":{ + "name":"DeleteDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBInstanceMessage"}, + "output":{ + "shape":"DeleteDBInstanceResult", + "resultWrapper":"DeleteDBInstanceResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "DeleteDBParameterGroup":{ + "name":"DeleteDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBParameterGroupMessage"}, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DeleteDBSecurityGroup":{ + "name":"DeleteDBSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSecurityGroupMessage"}, + "errors":[ + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"DBSecurityGroupNotFoundFault"} + ] + }, + "DeleteDBSnapshot":{ + "name":"DeleteDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSnapshotMessage"}, + "output":{ + "shape":"DeleteDBSnapshotResult", + "resultWrapper":"DeleteDBSnapshotResult" + }, + "errors":[ + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "DeleteDBSubnetGroup":{ + "name":"DeleteDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSubnetGroupMessage"}, + "errors":[ + {"shape":"InvalidDBSubnetGroupStateFault"}, + {"shape":"InvalidDBSubnetStateFault"}, + {"shape":"DBSubnetGroupNotFoundFault"} + ] + }, + "DeleteEventSubscription":{ + "name":"DeleteEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEventSubscriptionMessage"}, + "output":{ + "shape":"DeleteEventSubscriptionResult", + "resultWrapper":"DeleteEventSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"InvalidEventSubscriptionStateFault"} + ] + }, + "DeleteOptionGroup":{ + "name":"DeleteOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteOptionGroupMessage"}, + "errors":[ + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"InvalidOptionGroupStateFault"} + ] + }, + "DescribeDBEngineVersions":{ + "name":"DescribeDBEngineVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBEngineVersionsMessage"}, + "output":{ + "shape":"DBEngineVersionMessage", + "resultWrapper":"DescribeDBEngineVersionsResult" + } + }, + "DescribeDBInstances":{ + "name":"DescribeDBInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBInstancesMessage"}, + "output":{ + "shape":"DBInstanceMessage", + "resultWrapper":"DescribeDBInstancesResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "DescribeDBLogFiles":{ + "name":"DescribeDBLogFiles", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBLogFilesMessage"}, + "output":{ + "shape":"DescribeDBLogFilesResponse", + "resultWrapper":"DescribeDBLogFilesResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "DescribeDBParameterGroups":{ + "name":"DescribeDBParameterGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBParameterGroupsMessage"}, + "output":{ + "shape":"DBParameterGroupsMessage", + "resultWrapper":"DescribeDBParameterGroupsResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DescribeDBParameters":{ + "name":"DescribeDBParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBParametersMessage"}, + "output":{ + "shape":"DBParameterGroupDetails", + "resultWrapper":"DescribeDBParametersResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DescribeDBSecurityGroups":{ + "name":"DescribeDBSecurityGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSecurityGroupsMessage"}, + "output":{ + "shape":"DBSecurityGroupMessage", + "resultWrapper":"DescribeDBSecurityGroupsResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"} + ] + }, + "DescribeDBSnapshots":{ + "name":"DescribeDBSnapshots", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSnapshotsMessage"}, + "output":{ + "shape":"DBSnapshotMessage", + "resultWrapper":"DescribeDBSnapshotsResult" + }, + "errors":[ + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "DescribeDBSubnetGroups":{ + "name":"DescribeDBSubnetGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSubnetGroupsMessage"}, + "output":{ + "shape":"DBSubnetGroupMessage", + "resultWrapper":"DescribeDBSubnetGroupsResult" + }, + "errors":[ + {"shape":"DBSubnetGroupNotFoundFault"} + ] + }, + "DescribeEngineDefaultParameters":{ + "name":"DescribeEngineDefaultParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEngineDefaultParametersMessage"}, + "output":{ + "shape":"DescribeEngineDefaultParametersResult", + "resultWrapper":"DescribeEngineDefaultParametersResult" + } + }, + "DescribeEventCategories":{ + "name":"DescribeEventCategories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventCategoriesMessage"}, + "output":{ + "shape":"EventCategoriesMessage", + "resultWrapper":"DescribeEventCategoriesResult" + } + }, + "DescribeEventSubscriptions":{ + "name":"DescribeEventSubscriptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventSubscriptionsMessage"}, + "output":{ + "shape":"EventSubscriptionsMessage", + "resultWrapper":"DescribeEventSubscriptionsResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"} + ] + }, + "DescribeEvents":{ + "name":"DescribeEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventsMessage"}, + "output":{ + "shape":"EventsMessage", + "resultWrapper":"DescribeEventsResult" + } + }, + "DescribeOptionGroupOptions":{ + "name":"DescribeOptionGroupOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOptionGroupOptionsMessage"}, + "output":{ + "shape":"OptionGroupOptionsMessage", + "resultWrapper":"DescribeOptionGroupOptionsResult" + } + }, + "DescribeOptionGroups":{ + "name":"DescribeOptionGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOptionGroupsMessage"}, + "output":{ + "shape":"OptionGroups", + "resultWrapper":"DescribeOptionGroupsResult" + }, + "errors":[ + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "DescribeOrderableDBInstanceOptions":{ + "name":"DescribeOrderableDBInstanceOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOrderableDBInstanceOptionsMessage"}, + "output":{ + "shape":"OrderableDBInstanceOptionsMessage", + "resultWrapper":"DescribeOrderableDBInstanceOptionsResult" + } + }, + "DescribeReservedDBInstances":{ + "name":"DescribeReservedDBInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedDBInstancesMessage"}, + "output":{ + "shape":"ReservedDBInstanceMessage", + "resultWrapper":"DescribeReservedDBInstancesResult" + }, + "errors":[ + {"shape":"ReservedDBInstanceNotFoundFault"} + ] + }, + "DescribeReservedDBInstancesOfferings":{ + "name":"DescribeReservedDBInstancesOfferings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedDBInstancesOfferingsMessage"}, + "output":{ + "shape":"ReservedDBInstancesOfferingMessage", + "resultWrapper":"DescribeReservedDBInstancesOfferingsResult" + }, + "errors":[ + {"shape":"ReservedDBInstancesOfferingNotFoundFault"} + ] + }, + "DownloadDBLogFilePortion":{ + "name":"DownloadDBLogFilePortion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DownloadDBLogFilePortionMessage"}, + "output":{ + "shape":"DownloadDBLogFilePortionDetails", + "resultWrapper":"DownloadDBLogFilePortionResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBLogFileNotFoundFault"} + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceMessage"}, + "output":{ + "shape":"TagListMessage", + "resultWrapper":"ListTagsForResourceResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "ModifyDBInstance":{ + "name":"ModifyDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBInstanceMessage"}, + "output":{ + "shape":"ModifyDBInstanceResult", + "resultWrapper":"ModifyDBInstanceResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"DBUpgradeDependencyFailureFault"} + ] + }, + "ModifyDBParameterGroup":{ + "name":"ModifyDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBParameterGroupMessage"}, + "output":{ + "shape":"DBParameterGroupNameMessage", + "resultWrapper":"ModifyDBParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"InvalidDBParameterGroupStateFault"} + ] + }, + "ModifyDBSubnetGroup":{ + "name":"ModifyDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBSubnetGroupMessage"}, + "output":{ + "shape":"ModifyDBSubnetGroupResult", + "resultWrapper":"ModifyDBSubnetGroupResult" + }, + "errors":[ + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetQuotaExceededFault"}, + {"shape":"SubnetAlreadyInUse"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"} + ] + }, + "ModifyEventSubscription":{ + "name":"ModifyEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyEventSubscriptionMessage"}, + "output":{ + "shape":"ModifyEventSubscriptionResult", + "resultWrapper":"ModifyEventSubscriptionResult" + }, + "errors":[ + {"shape":"EventSubscriptionQuotaExceededFault"}, + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SNSInvalidTopicFault"}, + {"shape":"SNSNoAuthorizationFault"}, + {"shape":"SNSTopicArnNotFoundFault"}, + {"shape":"SubscriptionCategoryNotFoundFault"} + ] + }, + "ModifyOptionGroup":{ + "name":"ModifyOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyOptionGroupMessage"}, + "output":{ + "shape":"ModifyOptionGroupResult", + "resultWrapper":"ModifyOptionGroupResult" + }, + "errors":[ + {"shape":"InvalidOptionGroupStateFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "PromoteReadReplica":{ + "name":"PromoteReadReplica", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PromoteReadReplicaMessage"}, + "output":{ + "shape":"PromoteReadReplicaResult", + "resultWrapper":"PromoteReadReplicaResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "PurchaseReservedDBInstancesOffering":{ + "name":"PurchaseReservedDBInstancesOffering", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PurchaseReservedDBInstancesOfferingMessage"}, + "output":{ + "shape":"PurchaseReservedDBInstancesOfferingResult", + "resultWrapper":"PurchaseReservedDBInstancesOfferingResult" + }, + "errors":[ + {"shape":"ReservedDBInstancesOfferingNotFoundFault"}, + {"shape":"ReservedDBInstanceAlreadyExistsFault"}, + {"shape":"ReservedDBInstanceQuotaExceededFault"} + ] + }, + "RebootDBInstance":{ + "name":"RebootDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootDBInstanceMessage"}, + "output":{ + "shape":"RebootDBInstanceResult", + "resultWrapper":"RebootDBInstanceResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "RemoveSourceIdentifierFromSubscription":{ + "name":"RemoveSourceIdentifierFromSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveSourceIdentifierFromSubscriptionMessage"}, + "output":{ + "shape":"RemoveSourceIdentifierFromSubscriptionResult", + "resultWrapper":"RemoveSourceIdentifierFromSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "RemoveTagsFromResource":{ + "name":"RemoveTagsFromResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsFromResourceMessage"}, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "ResetDBParameterGroup":{ + "name":"ResetDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetDBParameterGroupMessage"}, + "output":{ + "shape":"DBParameterGroupNameMessage", + "resultWrapper":"ResetDBParameterGroupResult" + }, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "RestoreDBInstanceFromDBSnapshot":{ + "name":"RestoreDBInstanceFromDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreDBInstanceFromDBSnapshotMessage"}, + "output":{ + "shape":"RestoreDBInstanceFromDBSnapshotResult", + "resultWrapper":"RestoreDBInstanceFromDBSnapshotResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "RestoreDBInstanceToPointInTime":{ + "name":"RestoreDBInstanceToPointInTime", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreDBInstanceToPointInTimeMessage"}, + "output":{ + "shape":"RestoreDBInstanceToPointInTimeResult", + "resultWrapper":"RestoreDBInstanceToPointInTimeResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"PointInTimeRestoreNotEnabledFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "RevokeDBSecurityGroupIngress":{ + "name":"RevokeDBSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeDBSecurityGroupIngressMessage"}, + "output":{ + "shape":"RevokeDBSecurityGroupIngressResult", + "resultWrapper":"RevokeDBSecurityGroupIngressResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"AuthorizationNotFoundFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"} + ] + } + }, + "shapes":{ + "AddSourceIdentifierToSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SourceIdentifier" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SourceIdentifier":{"shape":"String"} + } + }, + "AddSourceIdentifierToSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "AddTagsToResourceMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "Tags" + ], + "members":{ + "ResourceName":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "ApplyMethod":{ + "type":"string", + "enum":[ + "immediate", + "pending-reboot" + ] + }, + "AuthorizationAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AuthorizationNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "AuthorizationQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AuthorizeDBSecurityGroupIngressMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "CIDRIP":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "AuthorizeDBSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "AvailabilityZone":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "ProvisionedIopsCapable":{"shape":"Boolean"} + }, + "wrapper":true + }, + "AvailabilityZoneList":{ + "type":"list", + "member":{ + "shape":"AvailabilityZone", + "locationName":"AvailabilityZone" + } + }, + "Boolean":{"type":"boolean"}, + "BooleanOptional":{"type":"boolean"}, + "CharacterSet":{ + "type":"structure", + "members":{ + "CharacterSetName":{"shape":"String"}, + "CharacterSetDescription":{"shape":"String"} + } + }, + "CopyDBSnapshotMessage":{ + "type":"structure", + "required":[ + "SourceDBSnapshotIdentifier", + "TargetDBSnapshotIdentifier" + ], + "members":{ + "SourceDBSnapshotIdentifier":{"shape":"String"}, + "TargetDBSnapshotIdentifier":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CopyDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "CreateDBInstanceMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "AllocatedStorage", + "DBInstanceClass", + "Engine", + "MasterUsername", + "MasterUserPassword" + ], + "members":{ + "DBName":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "DBInstanceClass":{"shape":"String"}, + "Engine":{"shape":"String"}, + "MasterUsername":{"shape":"String"}, + "MasterUserPassword":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "DBParameterGroupName":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "CharacterSetName":{"shape":"String"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBInstanceReadReplicaMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "SourceDBInstanceIdentifier" + ], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "SourceDBInstanceIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "Tags":{"shape":"TagList"}, + "DBSubnetGroupName":{"shape":"String"} + } + }, + "CreateDBInstanceReadReplicaResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "CreateDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "CreateDBParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBParameterGroupName", + "DBParameterGroupFamily", + "Description" + ], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBParameterGroupResult":{ + "type":"structure", + "members":{ + "DBParameterGroup":{"shape":"DBParameterGroup"} + } + }, + "CreateDBSecurityGroupMessage":{ + "type":"structure", + "required":[ + "DBSecurityGroupName", + "DBSecurityGroupDescription" + ], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "DBSecurityGroupDescription":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBSecurityGroupResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "CreateDBSnapshotMessage":{ + "type":"structure", + "required":[ + "DBSnapshotIdentifier", + "DBInstanceIdentifier" + ], + "members":{ + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "CreateDBSubnetGroupMessage":{ + "type":"structure", + "required":[ + "DBSubnetGroupName", + "DBSubnetGroupDescription", + "SubnetIds" + ], + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBSubnetGroupResult":{ + "type":"structure", + "members":{ + "DBSubnetGroup":{"shape":"DBSubnetGroup"} + } + }, + "CreateEventSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SnsTopicArn" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "SourceIds":{"shape":"SourceIdsList"}, + "Enabled":{"shape":"BooleanOptional"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "CreateOptionGroupMessage":{ + "type":"structure", + "required":[ + "OptionGroupName", + "EngineName", + "MajorEngineVersion", + "OptionGroupDescription" + ], + "members":{ + "OptionGroupName":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "OptionGroupDescription":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateOptionGroupResult":{ + "type":"structure", + "members":{ + "OptionGroup":{"shape":"OptionGroup"} + } + }, + "DBEngineVersion":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "DBEngineDescription":{"shape":"String"}, + "DBEngineVersionDescription":{"shape":"String"}, + "DefaultCharacterSet":{"shape":"CharacterSet"}, + "SupportedCharacterSets":{"shape":"SupportedCharacterSetsList"} + } + }, + "DBEngineVersionList":{ + "type":"list", + "member":{ + "shape":"DBEngineVersion", + "locationName":"DBEngineVersion" + } + }, + "DBEngineVersionMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBEngineVersions":{"shape":"DBEngineVersionList"} + } + }, + "DBInstance":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Engine":{"shape":"String"}, + "DBInstanceStatus":{"shape":"String"}, + "MasterUsername":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Endpoint":{"shape":"Endpoint"}, + "AllocatedStorage":{"shape":"Integer"}, + "InstanceCreateTime":{"shape":"TStamp"}, + "PreferredBackupWindow":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"Integer"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupMembershipList"}, + "VpcSecurityGroups":{"shape":"VpcSecurityGroupMembershipList"}, + "DBParameterGroups":{"shape":"DBParameterGroupStatusList"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroup":{"shape":"DBSubnetGroup"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "PendingModifiedValues":{"shape":"PendingModifiedValues"}, + "LatestRestorableTime":{"shape":"TStamp"}, + "MultiAZ":{"shape":"Boolean"}, + "EngineVersion":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"Boolean"}, + "ReadReplicaSourceDBInstanceIdentifier":{"shape":"String"}, + "ReadReplicaDBInstanceIdentifiers":{"shape":"ReadReplicaDBInstanceIdentifierList"}, + "LicenseModel":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupMemberships":{"shape":"OptionGroupMembershipList"}, + "CharacterSetName":{"shape":"String"}, + "SecondaryAvailabilityZone":{"shape":"String"}, + "PubliclyAccessible":{"shape":"Boolean"}, + "StatusInfos":{"shape":"DBInstanceStatusInfoList"} + }, + "wrapper":true + }, + "DBInstanceAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBInstanceAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBInstanceList":{ + "type":"list", + "member":{ + "shape":"DBInstance", + "locationName":"DBInstance" + } + }, + "DBInstanceMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBInstances":{"shape":"DBInstanceList"} + } + }, + "DBInstanceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBInstanceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBInstanceStatusInfo":{ + "type":"structure", + "members":{ + "StatusType":{"shape":"String"}, + "Normal":{"shape":"Boolean"}, + "Status":{"shape":"String"}, + "Message":{"shape":"String"} + } + }, + "DBInstanceStatusInfoList":{ + "type":"list", + "member":{ + "shape":"DBInstanceStatusInfo", + "locationName":"DBInstanceStatusInfo" + } + }, + "DBLogFileNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBLogFileNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroup":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"} + }, + "wrapper":true + }, + "DBParameterGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupDetails":{ + "type":"structure", + "members":{ + "Parameters":{"shape":"ParametersList"}, + "Marker":{"shape":"String"} + } + }, + "DBParameterGroupList":{ + "type":"list", + "member":{ + "shape":"DBParameterGroup", + "locationName":"DBParameterGroup" + } + }, + "DBParameterGroupNameMessage":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"} + } + }, + "DBParameterGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupStatus":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "ParameterApplyStatus":{"shape":"String"} + } + }, + "DBParameterGroupStatusList":{ + "type":"list", + "member":{ + "shape":"DBParameterGroupStatus", + "locationName":"DBParameterGroup" + } + }, + "DBParameterGroupsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBParameterGroups":{"shape":"DBParameterGroupList"} + } + }, + "DBSecurityGroup":{ + "type":"structure", + "members":{ + "OwnerId":{"shape":"String"}, + "DBSecurityGroupName":{"shape":"String"}, + "DBSecurityGroupDescription":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "EC2SecurityGroups":{"shape":"EC2SecurityGroupList"}, + "IPRanges":{"shape":"IPRangeList"} + }, + "wrapper":true + }, + "DBSecurityGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupMembership":{ + "type":"structure", + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "DBSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"DBSecurityGroupMembership", + "locationName":"DBSecurityGroup" + } + }, + "DBSecurityGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroups"} + } + }, + "DBSecurityGroupNameList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"DBSecurityGroupName" + } + }, + "DBSecurityGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupNotSupportedFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupNotSupported", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"QuotaExceeded.DBSecurityGroup", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroups":{ + "type":"list", + "member":{ + "shape":"DBSecurityGroup", + "locationName":"DBSecurityGroup" + } + }, + "DBSnapshot":{ + "type":"structure", + "members":{ + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"}, + "SnapshotCreateTime":{"shape":"TStamp"}, + "Engine":{"shape":"String"}, + "AllocatedStorage":{"shape":"Integer"}, + "Status":{"shape":"String"}, + "Port":{"shape":"Integer"}, + "AvailabilityZone":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "InstanceCreateTime":{"shape":"TStamp"}, + "MasterUsername":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "SnapshotType":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "PercentProgress":{"shape":"Integer"}, + "SourceRegion":{"shape":"String"} + }, + "wrapper":true + }, + "DBSnapshotAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSnapshotAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSnapshotList":{ + "type":"list", + "member":{ + "shape":"DBSnapshot", + "locationName":"DBSnapshot" + } + }, + "DBSnapshotMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSnapshots":{"shape":"DBSnapshotList"} + } + }, + "DBSnapshotNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSnapshotNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroup":{ + "type":"structure", + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "SubnetGroupStatus":{"shape":"String"}, + "Subnets":{"shape":"SubnetList"} + }, + "wrapper":true + }, + "DBSubnetGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupDoesNotCoverEnoughAZs":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupDoesNotCoverEnoughAZs", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSubnetGroups":{"shape":"DBSubnetGroups"} + } + }, + "DBSubnetGroupNotAllowedFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupNotAllowedFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroups":{ + "type":"list", + "member":{ + "shape":"DBSubnetGroup", + "locationName":"DBSubnetGroup" + } + }, + "DBSubnetQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBUpgradeDependencyFailureFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBUpgradeDependencyFailure", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DeleteDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "SkipFinalSnapshot":{"shape":"Boolean"}, + "FinalDBSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "DeleteDBParameterGroupMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"} + } + }, + "DeleteDBSecurityGroupMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"} + } + }, + "DeleteDBSnapshotMessage":{ + "type":"structure", + "required":["DBSnapshotIdentifier"], + "members":{ + "DBSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "DeleteDBSubnetGroupMessage":{ + "type":"structure", + "required":["DBSubnetGroupName"], + "members":{ + "DBSubnetGroupName":{"shape":"String"} + } + }, + "DeleteEventSubscriptionMessage":{ + "type":"structure", + "required":["SubscriptionName"], + "members":{ + "SubscriptionName":{"shape":"String"} + } + }, + "DeleteEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "DeleteOptionGroupMessage":{ + "type":"structure", + "required":["OptionGroupName"], + "members":{ + "OptionGroupName":{"shape":"String"} + } + }, + "DescribeDBEngineVersionsMessage":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "DefaultOnly":{"shape":"Boolean"}, + "ListSupportedCharacterSets":{"shape":"BooleanOptional"} + } + }, + "DescribeDBInstancesMessage":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBLogFilesDetails":{ + "type":"structure", + "members":{ + "LogFileName":{"shape":"String"}, + "LastWritten":{"shape":"Long"}, + "Size":{"shape":"Long"} + } + }, + "DescribeDBLogFilesList":{ + "type":"list", + "member":{ + "shape":"DescribeDBLogFilesDetails", + "locationName":"DescribeDBLogFilesDetails" + } + }, + "DescribeDBLogFilesMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "FilenameContains":{"shape":"String"}, + "FileLastWritten":{"shape":"Long"}, + "FileSize":{"shape":"Long"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBLogFilesResponse":{ + "type":"structure", + "members":{ + "DescribeDBLogFiles":{"shape":"DescribeDBLogFilesList"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBParameterGroupsMessage":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBParametersMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "Source":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBSecurityGroupsMessage":{ + "type":"structure", + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBSnapshotsMessage":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBSnapshotIdentifier":{"shape":"String"}, + "SnapshotType":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBSubnetGroupsMessage":{ + "type":"structure", + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEngineDefaultParametersMessage":{ + "type":"structure", + "required":["DBParameterGroupFamily"], + "members":{ + "DBParameterGroupFamily":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEngineDefaultParametersResult":{ + "type":"structure", + "members":{ + "EngineDefaults":{"shape":"EngineDefaults"} + } + }, + "DescribeEventCategoriesMessage":{ + "type":"structure", + "members":{ + "SourceType":{"shape":"String"}, + "Filters":{"shape":"FilterList"} + } + }, + "DescribeEventSubscriptionsMessage":{ + "type":"structure", + "members":{ + "SubscriptionName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEventsMessage":{ + "type":"structure", + "members":{ + "SourceIdentifier":{"shape":"String"}, + "SourceType":{"shape":"SourceType"}, + "StartTime":{"shape":"TStamp"}, + "EndTime":{"shape":"TStamp"}, + "Duration":{"shape":"IntegerOptional"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeOptionGroupOptionsMessage":{ + "type":"structure", + "required":["EngineName"], + "members":{ + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeOptionGroupsMessage":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "Marker":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"} + } + }, + "DescribeOrderableDBInstanceOptionsMessage":{ + "type":"structure", + "required":["Engine"], + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "Vpc":{"shape":"BooleanOptional"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReservedDBInstancesMessage":{ + "type":"structure", + "members":{ + "ReservedDBInstanceId":{"shape":"String"}, + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReservedDBInstancesOfferingsMessage":{ + "type":"structure", + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "Double":{"type":"double"}, + "DownloadDBLogFilePortionDetails":{ + "type":"structure", + "members":{ + "LogFileData":{"shape":"String"}, + "Marker":{"shape":"String"}, + "AdditionalDataPending":{"shape":"Boolean"} + } + }, + "DownloadDBLogFilePortionMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "LogFileName" + ], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "LogFileName":{"shape":"String"}, + "Marker":{"shape":"String"}, + "NumberOfLines":{"shape":"Integer"} + } + }, + "EC2SecurityGroup":{ + "type":"structure", + "members":{ + "Status":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "EC2SecurityGroupList":{ + "type":"list", + "member":{ + "shape":"EC2SecurityGroup", + "locationName":"EC2SecurityGroup" + } + }, + "Endpoint":{ + "type":"structure", + "members":{ + "Address":{"shape":"String"}, + "Port":{"shape":"Integer"} + } + }, + "EngineDefaults":{ + "type":"structure", + "members":{ + "DBParameterGroupFamily":{"shape":"String"}, + "Marker":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"} + }, + "wrapper":true + }, + "Event":{ + "type":"structure", + "members":{ + "SourceIdentifier":{"shape":"String"}, + "SourceType":{"shape":"SourceType"}, + "Message":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Date":{"shape":"TStamp"} + } + }, + "EventCategoriesList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"EventCategory" + } + }, + "EventCategoriesMap":{ + "type":"structure", + "members":{ + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"} + }, + "wrapper":true + }, + "EventCategoriesMapList":{ + "type":"list", + "member":{ + "shape":"EventCategoriesMap", + "locationName":"EventCategoriesMap" + } + }, + "EventCategoriesMessage":{ + "type":"structure", + "members":{ + "EventCategoriesMapList":{"shape":"EventCategoriesMapList"} + } + }, + "EventList":{ + "type":"list", + "member":{ + "shape":"Event", + "locationName":"Event" + } + }, + "EventSubscription":{ + "type":"structure", + "members":{ + "CustomerAwsId":{"shape":"String"}, + "CustSubscriptionId":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "Status":{"shape":"String"}, + "SubscriptionCreationTime":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "SourceIdsList":{"shape":"SourceIdsList"}, + "EventCategoriesList":{"shape":"EventCategoriesList"}, + "Enabled":{"shape":"Boolean"} + }, + "wrapper":true + }, + "EventSubscriptionQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"EventSubscriptionQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "EventSubscriptionsList":{ + "type":"list", + "member":{ + "shape":"EventSubscription", + "locationName":"EventSubscription" + } + }, + "EventSubscriptionsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "EventSubscriptionsList":{"shape":"EventSubscriptionsList"} + } + }, + "EventsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "Events":{"shape":"EventList"} + } + }, + "Filter":{ + "type":"structure", + "required":[ + "Name", + "Values" + ], + "members":{ + "Name":{"shape":"String"}, + "Values":{"shape":"FilterValueList"} + } + }, + "FilterList":{ + "type":"list", + "member":{ + "shape":"Filter", + "locationName":"Filter" + } + }, + "FilterValueList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"Value" + } + }, + "IPRange":{ + "type":"structure", + "members":{ + "Status":{"shape":"String"}, + "CIDRIP":{"shape":"String"} + } + }, + "IPRangeList":{ + "type":"list", + "member":{ + "shape":"IPRange", + "locationName":"IPRange" + } + }, + "InstanceQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InstanceQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InsufficientDBInstanceCapacityFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InsufficientDBInstanceCapacity", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Integer":{"type":"integer"}, + "IntegerOptional":{"type":"integer"}, + "InvalidDBInstanceStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBInstanceState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBParameterGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBParameterGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSecurityGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSecurityGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSnapshotStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSnapshotState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetGroupFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSubnetGroupFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSubnetGroupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSubnetStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidEventSubscriptionStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidEventSubscriptionState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidOptionGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidOptionGroupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidRestoreFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidRestoreFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSubnet":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidSubnet", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidVPCNetworkStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidVPCNetworkStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "KeyList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ListTagsForResourceMessage":{ + "type":"structure", + "required":["ResourceName"], + "members":{ + "ResourceName":{"shape":"String"}, + "Filters":{"shape":"FilterList"} + } + }, + "Long":{"type":"long"}, + "ModifyDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "DBInstanceClass":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "ApplyImmediately":{"shape":"Boolean"}, + "MasterUserPassword":{"shape":"String"}, + "DBParameterGroupName":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "AllowMajorVersionUpgrade":{"shape":"Boolean"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "NewDBInstanceIdentifier":{"shape":"String"} + } + }, + "ModifyDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "ModifyDBParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBParameterGroupName", + "Parameters" + ], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"} + } + }, + "ModifyDBSubnetGroupMessage":{ + "type":"structure", + "required":[ + "DBSubnetGroupName", + "SubnetIds" + ], + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"} + } + }, + "ModifyDBSubnetGroupResult":{ + "type":"structure", + "members":{ + "DBSubnetGroup":{"shape":"DBSubnetGroup"} + } + }, + "ModifyEventSubscriptionMessage":{ + "type":"structure", + "required":["SubscriptionName"], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Enabled":{"shape":"BooleanOptional"} + } + }, + "ModifyEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "ModifyOptionGroupMessage":{ + "type":"structure", + "required":["OptionGroupName"], + "members":{ + "OptionGroupName":{"shape":"String"}, + "OptionsToInclude":{"shape":"OptionConfigurationList"}, + "OptionsToRemove":{"shape":"OptionNamesList"}, + "ApplyImmediately":{"shape":"Boolean"} + } + }, + "ModifyOptionGroupResult":{ + "type":"structure", + "members":{ + "OptionGroup":{"shape":"OptionGroup"} + } + }, + "Option":{ + "type":"structure", + "members":{ + "OptionName":{"shape":"String"}, + "OptionDescription":{"shape":"String"}, + "Persistent":{"shape":"Boolean"}, + "Permanent":{"shape":"Boolean"}, + "Port":{"shape":"IntegerOptional"}, + "OptionSettings":{"shape":"OptionSettingConfigurationList"}, + "DBSecurityGroupMemberships":{"shape":"DBSecurityGroupMembershipList"}, + "VpcSecurityGroupMemberships":{"shape":"VpcSecurityGroupMembershipList"} + } + }, + "OptionConfiguration":{ + "type":"structure", + "required":["OptionName"], + "members":{ + "OptionName":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "DBSecurityGroupMemberships":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupMemberships":{"shape":"VpcSecurityGroupIdList"}, + "OptionSettings":{"shape":"OptionSettingsList"} + } + }, + "OptionConfigurationList":{ + "type":"list", + "member":{ + "shape":"OptionConfiguration", + "locationName":"OptionConfiguration" + } + }, + "OptionGroup":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "OptionGroupDescription":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "Options":{"shape":"OptionsList"}, + "AllowsVpcAndNonVpcInstanceMemberships":{"shape":"Boolean"}, + "VpcId":{"shape":"String"} + }, + "wrapper":true + }, + "OptionGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "OptionGroupMembership":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "OptionGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"OptionGroupMembership", + "locationName":"OptionGroupMembership" + } + }, + "OptionGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "OptionGroupOption":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Description":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "MinimumRequiredMinorEngineVersion":{"shape":"String"}, + "PortRequired":{"shape":"Boolean"}, + "DefaultPort":{"shape":"IntegerOptional"}, + "OptionsDependedOn":{"shape":"OptionsDependedOn"}, + "Persistent":{"shape":"Boolean"}, + "Permanent":{"shape":"Boolean"}, + "OptionGroupOptionSettings":{"shape":"OptionGroupOptionSettingsList"} + } + }, + "OptionGroupOptionSetting":{ + "type":"structure", + "members":{ + "SettingName":{"shape":"String"}, + "SettingDescription":{"shape":"String"}, + "DefaultValue":{"shape":"String"}, + "ApplyType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"} + } + }, + "OptionGroupOptionSettingsList":{ + "type":"list", + "member":{ + "shape":"OptionGroupOptionSetting", + "locationName":"OptionGroupOptionSetting" + } + }, + "OptionGroupOptionsList":{ + "type":"list", + "member":{ + "shape":"OptionGroupOption", + "locationName":"OptionGroupOption" + } + }, + "OptionGroupOptionsMessage":{ + "type":"structure", + "members":{ + "OptionGroupOptions":{"shape":"OptionGroupOptionsList"}, + "Marker":{"shape":"String"} + } + }, + "OptionGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "OptionGroups":{ + "type":"structure", + "members":{ + "OptionGroupsList":{"shape":"OptionGroupsList"}, + "Marker":{"shape":"String"} + } + }, + "OptionGroupsList":{ + "type":"list", + "member":{ + "shape":"OptionGroup", + "locationName":"OptionGroup" + } + }, + "OptionNamesList":{ + "type":"list", + "member":{"shape":"String"} + }, + "OptionSetting":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Value":{"shape":"String"}, + "DefaultValue":{"shape":"String"}, + "Description":{"shape":"String"}, + "ApplyType":{"shape":"String"}, + "DataType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"}, + "IsCollection":{"shape":"Boolean"} + } + }, + "OptionSettingConfigurationList":{ + "type":"list", + "member":{ + "shape":"OptionSetting", + "locationName":"OptionSetting" + } + }, + "OptionSettingsList":{ + "type":"list", + "member":{ + "shape":"OptionSetting", + "locationName":"OptionSetting" + } + }, + "OptionsDependedOn":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"OptionName" + } + }, + "OptionsList":{ + "type":"list", + "member":{ + "shape":"Option", + "locationName":"Option" + } + }, + "OrderableDBInstanceOption":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "AvailabilityZones":{"shape":"AvailabilityZoneList"}, + "MultiAZCapable":{"shape":"Boolean"}, + "ReadReplicaCapable":{"shape":"Boolean"}, + "Vpc":{"shape":"Boolean"} + }, + "wrapper":true + }, + "OrderableDBInstanceOptionsList":{ + "type":"list", + "member":{ + "shape":"OrderableDBInstanceOption", + "locationName":"OrderableDBInstanceOption" + } + }, + "OrderableDBInstanceOptionsMessage":{ + "type":"structure", + "members":{ + "OrderableDBInstanceOptions":{"shape":"OrderableDBInstanceOptionsList"}, + "Marker":{"shape":"String"} + } + }, + "Parameter":{ + "type":"structure", + "members":{ + "ParameterName":{"shape":"String"}, + "ParameterValue":{"shape":"String"}, + "Description":{"shape":"String"}, + "Source":{"shape":"String"}, + "ApplyType":{"shape":"String"}, + "DataType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"}, + "MinimumEngineVersion":{"shape":"String"}, + "ApplyMethod":{"shape":"ApplyMethod"} + } + }, + "ParametersList":{ + "type":"list", + "member":{ + "shape":"Parameter", + "locationName":"Parameter" + } + }, + "PendingModifiedValues":{ + "type":"structure", + "members":{ + "DBInstanceClass":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "MasterUserPassword":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "DBInstanceIdentifier":{"shape":"String"} + } + }, + "PointInTimeRestoreNotEnabledFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"PointInTimeRestoreNotEnabled", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "PromoteReadReplicaMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"} + } + }, + "PromoteReadReplicaResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "ProvisionedIopsNotAvailableInAZFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ProvisionedIopsNotAvailableInAZFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "PurchaseReservedDBInstancesOfferingMessage":{ + "type":"structure", + "required":["ReservedDBInstancesOfferingId"], + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "ReservedDBInstanceId":{"shape":"String"}, + "DBInstanceCount":{"shape":"IntegerOptional"}, + "Tags":{"shape":"TagList"} + } + }, + "PurchaseReservedDBInstancesOfferingResult":{ + "type":"structure", + "members":{ + "ReservedDBInstance":{"shape":"ReservedDBInstance"} + } + }, + "ReadReplicaDBInstanceIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ReadReplicaDBInstanceIdentifier" + } + }, + "RebootDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "ForceFailover":{"shape":"BooleanOptional"} + } + }, + "RebootDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RecurringCharge":{ + "type":"structure", + "members":{ + "RecurringChargeAmount":{"shape":"Double"}, + "RecurringChargeFrequency":{"shape":"String"} + }, + "wrapper":true + }, + "RecurringChargeList":{ + "type":"list", + "member":{ + "shape":"RecurringCharge", + "locationName":"RecurringCharge" + } + }, + "RemoveSourceIdentifierFromSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SourceIdentifier" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SourceIdentifier":{"shape":"String"} + } + }, + "RemoveSourceIdentifierFromSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "RemoveTagsFromResourceMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "TagKeys" + ], + "members":{ + "ResourceName":{"shape":"String"}, + "TagKeys":{"shape":"KeyList"} + } + }, + "ReservedDBInstance":{ + "type":"structure", + "members":{ + "ReservedDBInstanceId":{"shape":"String"}, + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "StartTime":{"shape":"TStamp"}, + "Duration":{"shape":"Integer"}, + "FixedPrice":{"shape":"Double"}, + "UsagePrice":{"shape":"Double"}, + "CurrencyCode":{"shape":"String"}, + "DBInstanceCount":{"shape":"Integer"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"Boolean"}, + "State":{"shape":"String"}, + "RecurringCharges":{"shape":"RecurringChargeList"} + }, + "wrapper":true + }, + "ReservedDBInstanceAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceAlreadyExists", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstanceList":{ + "type":"list", + "member":{ + "shape":"ReservedDBInstance", + "locationName":"ReservedDBInstance" + } + }, + "ReservedDBInstanceMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReservedDBInstances":{"shape":"ReservedDBInstanceList"} + } + }, + "ReservedDBInstanceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstanceQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstancesOffering":{ + "type":"structure", + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"Integer"}, + "FixedPrice":{"shape":"Double"}, + "UsagePrice":{"shape":"Double"}, + "CurrencyCode":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"Boolean"}, + "RecurringCharges":{"shape":"RecurringChargeList"} + }, + "wrapper":true + }, + "ReservedDBInstancesOfferingList":{ + "type":"list", + "member":{ + "shape":"ReservedDBInstancesOffering", + "locationName":"ReservedDBInstancesOffering" + } + }, + "ReservedDBInstancesOfferingMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReservedDBInstancesOfferings":{"shape":"ReservedDBInstancesOfferingList"} + } + }, + "ReservedDBInstancesOfferingNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstancesOfferingNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResetDBParameterGroupMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "ResetAllParameters":{"shape":"Boolean"}, + "Parameters":{"shape":"ParametersList"} + } + }, + "RestoreDBInstanceFromDBSnapshotMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "DBSnapshotIdentifier" + ], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Engine":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "RestoreDBInstanceFromDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RestoreDBInstanceToPointInTimeMessage":{ + "type":"structure", + "required":[ + "SourceDBInstanceIdentifier", + "TargetDBInstanceIdentifier" + ], + "members":{ + "SourceDBInstanceIdentifier":{"shape":"String"}, + "TargetDBInstanceIdentifier":{"shape":"String"}, + "RestoreTime":{"shape":"TStamp"}, + "UseLatestRestorableTime":{"shape":"Boolean"}, + "DBInstanceClass":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Engine":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "RestoreDBInstanceToPointInTimeResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RevokeDBSecurityGroupIngressMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "CIDRIP":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "RevokeDBSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "SNSInvalidTopicFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSInvalidTopic", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SNSNoAuthorizationFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSNoAuthorization", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SNSTopicArnNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSTopicArnNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SnapshotQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SourceIdsList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SourceId" + } + }, + "SourceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SourceType":{ + "type":"string", + "enum":[ + "db-instance", + "db-parameter-group", + "db-security-group", + "db-snapshot" + ] + }, + "StorageQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"StorageQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "String":{"type":"string"}, + "Subnet":{ + "type":"structure", + "members":{ + "SubnetIdentifier":{"shape":"String"}, + "SubnetAvailabilityZone":{"shape":"AvailabilityZone"}, + "SubnetStatus":{"shape":"String"} + } + }, + "SubnetAlreadyInUse":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubnetAlreadyInUse", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubnetIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SubnetIdentifier" + } + }, + "SubnetList":{ + "type":"list", + "member":{ + "shape":"Subnet", + "locationName":"Subnet" + } + }, + "SubscriptionAlreadyExistFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionAlreadyExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubscriptionCategoryNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionCategoryNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SubscriptionNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SupportedCharacterSetsList":{ + "type":"list", + "member":{ + "shape":"CharacterSet", + "locationName":"CharacterSet" + } + }, + "TStamp":{"type":"timestamp"}, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"String"}, + "Value":{"shape":"String"} + } + }, + "TagList":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"Tag" + } + }, + "TagListMessage":{ + "type":"structure", + "members":{ + "TagList":{"shape":"TagList"} + } + }, + "VpcSecurityGroupIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpcSecurityGroupId" + } + }, + "VpcSecurityGroupMembership":{ + "type":"structure", + "members":{ + "VpcSecurityGroupId":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "VpcSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"VpcSecurityGroupMembership", + "locationName":"VpcSecurityGroupMembership" + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1876 @@ +{ + "version": "2.0", + "service": "Amazon Relational Database Service

    Amazon Relational Database Service (Amazon RDS) is a web service that makes it easier to set up, operate, and scale a relational database in the cloud. It provides cost-efficient, resizable capacity for an industry-standard relational database and manages common database administration tasks, freeing up developers to focus on what makes their applications and businesses unique.

    Amazon RDS gives you access to the capabilities of a MySQL, PostgreSQL, Microsoft SQL Server, or Oracle database server. This means the code, applications, and tools you already use today with your existing databases work with Amazon RDS without modification. Amazon RDS automatically backs up your database and maintains the database software that powers your DB instance. Amazon RDS is flexible: you can scale your database instance's compute resources and storage capacity to meet your application's demand. As with all Amazon Web Services, there are no up-front investments, and you pay only for the resources you use.

    This is an interface reference for Amazon RDS. It contains documentation for a programming or command line interface you can use to manage Amazon RDS. Note that Amazon RDS is asynchronous, which means that some interfaces may require techniques such as polling or callback functions to determine when a command has been applied. In this reference, the parameter descriptions indicate whether a command is applied immediately, on the next instance reboot, or during the maintenance window. For a summary of the Amazon RDS interfaces, go to Available RDS Interfaces.

    ", + "operations": { + "AddSourceIdentifierToSubscription": "

    Adds a source identifier to an existing RDS event notification subscription.

    ", + "AddTagsToResource": "

    Adds metadata tags to an Amazon RDS resource. These tags can also be used with cost allocation reporting to track cost associated with Amazon RDS resources, or used in Condition statement in IAM policy for Amazon RDS.

    For an overview on tagging Amazon RDS resources, see Tagging Amazon RDS Resources.

    ", + "AuthorizeDBSecurityGroupIngress": "

    Enables ingress to a DBSecurityGroup using one of two forms of authorization. First, EC2 or VPC security groups can be added to the DBSecurityGroup if the application using the database is running on EC2 or VPC instances. Second, IP ranges are available if the application accessing your database is running on the Internet. Required parameters for this API are one of CIDR range, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId for non-VPC).

    You cannot authorize ingress from an EC2 security group in one Region to an Amazon RDS DB instance in another. You cannot authorize ingress from a VPC security group in one VPC to an Amazon RDS DB instance in another.

    For an overview of CIDR ranges, go to the Wikipedia Tutorial.

    ", + "CopyDBSnapshot": "

    Copies the specified DBSnapshot. The source DBSnapshot must be in the \"available\" state.

    ", + "CreateDBInstance": "

    Creates a new DB instance.

    ", + "CreateDBInstanceReadReplica": "

    Creates a DB instance that acts as a read replica of a source DB instance.

    All read replica DB instances are created as Single-AZ deployments with backups disabled. All other DB instance attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance, except as specified below.

    The source DB instance must have backup retention enabled.

    ", + "CreateDBParameterGroup": "

    Creates a new DB parameter group.

    A DB parameter group is initially created with the default parameters for the database engine used by the DB instance. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBParameterGroup. Once you've created a DB parameter group, you need to associate it with your DB instance using ModifyDBInstance. When you associate a new DB parameter group with a running DB instance, you need to reboot the DB Instance for the new DB parameter group and associated settings to take effect.

    After you create a DB parameter group, you should wait at least 5 minutes before creating your first DB instance that uses that DB parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the parameter group is used as the default for a new DB instance. This is especially important for parameters that are critical when creating the default database for a DB instance, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBParameters command to verify that your DB parameter group has been created or modified.

    ", + "CreateDBSecurityGroup": "

    Creates a new DB security group. DB security groups control access to a DB instance.

    ", + "CreateDBSnapshot": "

    Creates a DBSnapshot. The source DBInstance must be in \"available\" state.

    ", + "CreateDBSubnetGroup": "

    Creates a new DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the region.

    ", + "CreateEventSubscription": "

    Creates an RDS event notification subscription. This action requires a topic ARN (Amazon Resource Name) created by either the RDS console, the SNS console, or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.

    You can specify the type of source (SourceType) you want to be notified of, provide a list of RDS sources (SourceIds) that triggers the events, and provide a list of event categories (EventCategories) for events you want to be notified of. For example, you can specify SourceType = db-instance, SourceIds = mydbinstance1, mydbinstance2 and EventCategories = Availability, Backup.

    If you specify both the SourceType and SourceIds, such as SourceType = db-instance and SourceIdentifier = myDBInstance1, you will be notified of all the db-instance events for the specified source. If you specify a SourceType but do not specify a SourceIdentifier, you will receive notice of the events for that source type for all your RDS sources. If you do not specify either the SourceType nor the SourceIdentifier, you will be notified of events generated from all RDS sources belonging to your customer account.

    ", + "CreateOptionGroup": "

    Creates a new option group. You can create up to 20 option groups.

    ", + "DeleteDBInstance": "

    The DeleteDBInstance action deletes a previously provisioned DB instance. A successful response from the web service indicates the request was received correctly. When you delete a DB instance, all automated backups for that instance are deleted and cannot be recovered. Manual DB snapshots of the DB instance to be deleted are not deleted.

    If a final DB snapshot is requested the status of the RDS instance will be \"deleting\" until the DB snapshot is created. The API action DescribeDBInstance is used to monitor the status of this operation. The action cannot be canceled or reverted once submitted.

    ", + "DeleteDBParameterGroup": "

    Deletes a specified DBParameterGroup. The DBParameterGroup cannot be associated with any RDS instances to be deleted.

    The specified DB parameter group cannot be associated with any DB instances. ", + "DeleteDBSecurityGroup": "

    Deletes a DB security group.

    The specified DB security group must not be associated with any DB instances.", + "DeleteDBSnapshot": "

    Deletes a DBSnapshot. If the snapshot is being copied, the copy operation is terminated.

    The DBSnapshot must be in the available state to be deleted.", + "DeleteDBSubnetGroup": "

    Deletes a DB subnet group.

    The specified database subnet group must not be associated with any DB instances.", + "DeleteEventSubscription": "

    Deletes an RDS event notification subscription.

    ", + "DeleteOptionGroup": "

    Deletes an existing option group.

    ", + "DescribeDBEngineVersions": "

    Returns a list of the available DB engines.

    ", + "DescribeDBInstances": "

    Returns information about provisioned RDS instances. This API supports pagination.

    ", + "DescribeDBLogFiles": "

    Returns a list of DB log files for the DB instance.

    ", + "DescribeDBParameterGroups": "

    Returns a list of DBParameterGroup descriptions. If a DBParameterGroupName is specified, the list will contain only the description of the specified DB parameter group.

    ", + "DescribeDBParameters": "

    Returns the detailed parameter list for a particular DB parameter group.

    ", + "DescribeDBSecurityGroups": "

    Returns a list of DBSecurityGroup descriptions. If a DBSecurityGroupName is specified, the list will contain only the descriptions of the specified DB security group.

    ", + "DescribeDBSnapshots": "

    Returns information about DB snapshots. This API supports pagination.

    ", + "DescribeDBSubnetGroups": "

    Returns a list of DBSubnetGroup descriptions. If a DBSubnetGroupName is specified, the list will contain only the descriptions of the specified DBSubnetGroup.

    For an overview of CIDR ranges, go to the Wikipedia Tutorial.

    ", + "DescribeEngineDefaultParameters": "

    Returns the default engine and system parameter information for the specified database engine.

    ", + "DescribeEventCategories": "

    Displays a list of categories for all event source types, or, if specified, for a specified source type. You can see a list of the event categories and source types in the Events topic in the Amazon RDS User Guide.

    ", + "DescribeEventSubscriptions": "

    Lists all the subscription descriptions for a customer account. The description for a subscription includes SubscriptionName, SNSTopicARN, CustomerID, SourceType, SourceID, CreationTime, and Status.

    If you specify a SubscriptionName, lists the description for that subscription.

    ", + "DescribeEvents": "

    Returns events related to DB instances, DB security groups, DB snapshots, and DB parameter groups for the past 14 days. Events specific to a particular DB instance, DB security group, database snapshot, or DB parameter group can be obtained by providing the name as a parameter. By default, the past hour of events are returned.

    ", + "DescribeOptionGroupOptions": "

    Describes all available options.

    ", + "DescribeOptionGroups": "

    Describes the available option groups.

    ", + "DescribeOrderableDBInstanceOptions": "

    Returns a list of orderable DB instance options for the specified engine.

    ", + "DescribeReservedDBInstances": "

    Returns information about reserved DB instances for this account, or about a specified reserved DB instance.

    ", + "DescribeReservedDBInstancesOfferings": "

    Lists available reserved DB instance offerings.

    ", + "DownloadDBLogFilePortion": "

    Downloads all or a portion of the specified log file.

    ", + "ListTagsForResource": "

    Lists all tags on an Amazon RDS resource.

    For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources.

    ", + "ModifyDBInstance": "

    Modify settings for a DB instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request.

    ", + "ModifyDBParameterGroup": "

    Modifies the parameters of a DB parameter group. To modify more than one parameter, submit a list of the following: ParameterName, ParameterValue, and ApplyMethod. A maximum of 20 parameters can be modified in a single request.

    The apply-immediate method can be used only for dynamic parameters; the pending-reboot method can be used with MySQL, PostgreSQL, and Oracle DB instances for either dynamic or static parameters. For Microsoft SQL Server DB instances, the pending-reboot method can be used only for static parameters.

    After you modify a DB parameter group, you should wait at least 5 minutes before creating your first DB instance that uses that DB parameter group as the default parameter group. This allows Amazon RDS to fully complete the modify action before the parameter group is used as the default for a new DB instance. This is especially important for parameters that are critical when creating the default database for a DB instance, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBParameters command to verify that your DB parameter group has been created or modified.

    ", + "ModifyDBSubnetGroup": "

    Modifies an existing DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the region.

    ", + "ModifyEventSubscription": "

    Modifies an existing RDS event notification subscription. Note that you cannot modify the source identifiers using this call; to change source identifiers for a subscription, use the AddSourceIdentifierToSubscription and RemoveSourceIdentifierFromSubscription calls.

    You can see a list of the event categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    ", + "ModifyOptionGroup": "

    Modifies an existing option group.

    ", + "PromoteReadReplica": "

    Promotes a read replica DB instance to a standalone DB instance.

    We recommend that you enable automated backups on your read replica before promoting the read replica. This ensures that no backup is taken during the promotion process. Once the instance is promoted to a primary instance, backups are taken based on your backup settings.

    ", + "PurchaseReservedDBInstancesOffering": "

    Purchases a reserved DB instance offering.

    ", + "RebootDBInstance": "

    Rebooting a DB instance restarts the database engine service. A reboot also applies to the DB instance any modifications to the associated DB parameter group that were pending. Rebooting a DB instance results in a momentary outage of the instance, during which the DB instance status is set to rebooting. If the RDS instance is configured for MultiAZ, it is possible that the reboot will be conducted through a failover. An Amazon RDS event is created when the reboot is completed.

    If your DB instance is deployed in multiple Availability Zones, you can force a failover from one AZ to the other during the reboot. You might force a failover to test the availability of your DB instance deployment or to restore operations to the original AZ after a failover occurs.

    The time required to reboot is a function of the specific database engine's crash recovery process. To improve the reboot time, we recommend that you reduce database activities as much as possible during the reboot process to reduce rollback activity for in-transit transactions.

    ", + "RemoveSourceIdentifierFromSubscription": "

    Removes a source identifier from an existing RDS event notification subscription.

    ", + "RemoveTagsFromResource": "

    Removes metadata tags from an Amazon RDS resource.

    For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources.

    ", + "ResetDBParameterGroup": "

    Modifies the parameters of a DB parameter group to the engine/system default value. To reset specific parameters submit a list of the following: ParameterName and ApplyMethod. To reset the entire DB parameter group, specify the DBParameterGroup name and ResetAllParameters parameters. When resetting the entire group, dynamic parameters are updated immediately and static parameters are set to pending-reboot to take effect on the next DB instance restart or RebootDBInstance request.

    ", + "RestoreDBInstanceFromDBSnapshot": "

    Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with the same configuration as the original source database, except that the new RDS instance is created with the default security group.

    ", + "RestoreDBInstanceToPointInTime": "

    Restores a DB instance to an arbitrary point-in-time. Users can restore to any point in time before the latestRestorableTime for up to backupRetentionPeriod days. The target database is created from the source database with the same configuration as the original database except that the DB instance is created with the default DB security group.

    ", + "RevokeDBSecurityGroupIngress": "

    Revokes ingress from a DBSecurityGroup for previously authorized IP ranges or EC2 or VPC Security Groups. Required parameters for this API are one of CIDRIP, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId).

    " + }, + "shapes": { + "AddSourceIdentifierToSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "AddSourceIdentifierToSubscriptionResult": { + "base": null, + "refs": { + } + }, + "AddTagsToResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "ApplyMethod": { + "base": null, + "refs": { + "Parameter$ApplyMethod": "

    Indicates when to apply parameter updates.

    " + } + }, + "AuthorizationAlreadyExistsFault": { + "base": "

    The specified CIDRIP or EC2 security group is already authorized for the specified DB security group.

    ", + "refs": { + } + }, + "AuthorizationNotFoundFault": { + "base": "

    Specified CIDRIP or EC2 security group is not authorized for the specified DB security group.

    RDS may not also be authorized via IAM to perform necessary actions on your behalf.

    ", + "refs": { + } + }, + "AuthorizationQuotaExceededFault": { + "base": "

    DB security group authorization quota has been reached.

    ", + "refs": { + } + }, + "AuthorizeDBSecurityGroupIngressMessage": { + "base": "

    ", + "refs": { + } + }, + "AuthorizeDBSecurityGroupIngressResult": { + "base": null, + "refs": { + } + }, + "AvailabilityZone": { + "base": "

    Contains Availability Zone information.

    This data type is used as an element in the following data type:

    ", + "refs": { + "AvailabilityZoneList$member": null, + "Subnet$SubnetAvailabilityZone": null + } + }, + "AvailabilityZoneList": { + "base": null, + "refs": { + "OrderableDBInstanceOption$AvailabilityZones": "

    A list of availability zones for the orderable DB instance.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "AvailabilityZone$ProvisionedIopsCapable": "

    True indicates the availability zone is capable of provisioned IOPs.

    ", + "DBInstance$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment.

    ", + "DBInstance$AutoMinorVersionUpgrade": "

    Indicates that minor version patches are applied automatically.

    ", + "DBInstance$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "DBInstanceStatusInfo$Normal": "

    Boolean value that is true if the instance is operating normally, or false if the instance is in an error state.

    ", + "DeleteDBInstanceMessage$SkipFinalSnapshot": "

    Determines whether a final DB snapshot is created before the DB instance is deleted. If true is specified, no DBSnapshot is created. If false is specified, a DB snapshot is created before the DB instance is deleted.

    Specify true when deleting a read replica.

    The FinalDBSnapshotIdentifier parameter must be specified if SkipFinalSnapshot is false.

    Default: false

    ", + "DescribeDBEngineVersionsMessage$DefaultOnly": "

    Indicates that only the default version of the specified engine or engine and major version combination is returned.

    ", + "DownloadDBLogFilePortionDetails$AdditionalDataPending": "

    Boolean value that if true, indicates there is more data to be downloaded.

    ", + "EventSubscription$Enabled": "

    A Boolean value indicating if the subscription is enabled. True indicates the subscription is enabled.

    ", + "ModifyDBInstanceMessage$ApplyImmediately": "

    Specifies whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB instance.

    If this parameter is set to false, changes to the DB instance are applied during the next maintenance window. Some parameter changes can cause an outage and will be applied on the next call to RebootDBInstance, or the next failure reboot. Review the table of parameters in Modifying a DB Instance and Using the Apply Immediately Parameter to see the impact that setting ApplyImmediately to true or false has for each modified parameter and to determine when the changes will be applied.

    Default: false

    ", + "ModifyDBInstanceMessage$AllowMajorVersionUpgrade": "

    Indicates that major version upgrades are allowed. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints: This parameter must be set to true when specifying a value for the EngineVersion parameter that is a different major version than the DB instance's current version.

    ", + "ModifyOptionGroupMessage$ApplyImmediately": "

    Indicates whether the changes should be applied immediately, or during the next maintenance window for each instance associated with the option group.

    ", + "Option$Persistent": "

    Indicate if this option is persistent.

    ", + "Option$Permanent": "

    Indicate if this option is permanent.

    ", + "OptionGroup$AllowsVpcAndNonVpcInstanceMemberships": "

    Indicates whether this option group can be applied to both VPC and non-VPC instances. The value 'true' indicates the option group can be applied to both VPC and non-VPC instances.

    ", + "OptionGroupOption$PortRequired": "

    Specifies whether the option requires a port.

    ", + "OptionGroupOption$Persistent": "

    A persistent option cannot be removed from the option group once the option group is used, but this option can be removed from the db instance while modifying the related data and assigning another option group without this option.

    ", + "OptionGroupOption$Permanent": "

    A permanent option cannot be removed from the option group once the option group is used, and it cannot be removed from the db instance after assigning an option group with this permanent option.

    ", + "OptionGroupOptionSetting$IsModifiable": "

    Boolean value where true indicates that this option group option can be changed from the default value.

    ", + "OptionSetting$IsModifiable": "

    A Boolean value that, when true, indicates the option setting can be modified from the default.

    ", + "OptionSetting$IsCollection": "

    Indicates if the option setting is part of a collection.

    ", + "OrderableDBInstanceOption$MultiAZCapable": "

    Indicates whether this orderable DB instance is multi-AZ capable.

    ", + "OrderableDBInstanceOption$ReadReplicaCapable": "

    Indicates whether this orderable DB instance can have a read replica.

    ", + "OrderableDBInstanceOption$Vpc": "

    Indicates whether this is a VPC orderable DB instance.

    ", + "Parameter$IsModifiable": "

    Indicates whether (true) or not (false) the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.

    ", + "ReservedDBInstance$MultiAZ": "

    Indicates if the reservation applies to Multi-AZ deployments.

    ", + "ReservedDBInstancesOffering$MultiAZ": "

    Indicates if the offering applies to Multi-AZ deployments.

    ", + "ResetDBParameterGroupMessage$ResetAllParameters": "

    Specifies whether (true) or not (false) to reset all parameters in the DB parameter group to default values.

    Default: true

    ", + "RestoreDBInstanceToPointInTimeMessage$UseLatestRestorableTime": "

    Specifies whether (true) or not (false) the DB instance is restored from the latest backup time.

    Default: false

    Constraints: Cannot be specified if RestoreTime parameter is provided.

    " + } + }, + "BooleanOptional": { + "base": null, + "refs": { + "CreateDBInstanceMessage$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment. You cannot set the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "CreateDBInstanceMessage$AutoMinorVersionUpgrade": "

    Indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window.

    Default: true

    ", + "CreateDBInstanceMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "CreateDBInstanceReadReplicaMessage$AutoMinorVersionUpgrade": "

    Indicates that minor engine upgrades will be applied automatically to the read replica during the maintenance window.

    Default: Inherits from the source DB instance

    ", + "CreateDBInstanceReadReplicaMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "CreateEventSubscriptionMessage$Enabled": "

    A Boolean value; set to true to activate the subscription, set to false to create the subscription but not active it.

    ", + "DescribeDBEngineVersionsMessage$ListSupportedCharacterSets": "

    If this parameter is specified, and if the requested engine supports the CharacterSetName parameter for CreateDBInstance, the response includes a list of supported character sets for each engine version.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Vpc": "

    The VPC filter value. Specify this parameter to show only the available VPC or non-VPC offerings.

    ", + "DescribeReservedDBInstancesMessage$MultiAZ": "

    The Multi-AZ filter value. Specify this parameter to show only those reservations matching the specified Multi-AZ parameter.

    ", + "DescribeReservedDBInstancesOfferingsMessage$MultiAZ": "

    The Multi-AZ filter value. Specify this parameter to show only the available offerings matching the specified Multi-AZ parameter.

    ", + "ModifyDBInstanceMessage$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    Constraints: Cannot be specified if the DB instance is a read replica.

    ", + "ModifyDBInstanceMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB instance during the maintenance window. Changing this parameter does not result in an outage except in the following case and the change is asynchronously applied as soon as possible. An outage will result if this parameter is set to true during the maintenance window, and a newer minor version is available, and RDS has enabled auto patching for that engine version.

    ", + "ModifyEventSubscriptionMessage$Enabled": "

    A Boolean value; set to true to activate the subscription.

    ", + "PendingModifiedValues$MultiAZ": "

    Indicates that the Single-AZ DB instance is to change to a Multi-AZ deployment.

    ", + "RebootDBInstanceMessage$ForceFailover": "

    When true, the reboot will be conducted through a MultiAZ failover.

    Constraint: You cannot specify true if the instance is not configured for MultiAZ.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB instance during the maintenance window.

    ", + "RestoreDBInstanceToPointInTimeMessage$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "RestoreDBInstanceToPointInTimeMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "RestoreDBInstanceToPointInTimeMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB instance during the maintenance window.

    " + } + }, + "CharacterSet": { + "base": "

    This data type is used as a response element in the action DescribeDBEngineVersions.

    ", + "refs": { + "DBEngineVersion$DefaultCharacterSet": "

    The default character set for new instances of this engine version, if the CharacterSetName parameter of the CreateDBInstance API is not specified.

    ", + "SupportedCharacterSetsList$member": null + } + }, + "CopyDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "CopyDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBInstanceReadReplicaMessage": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceReadReplicaResult": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceResult": { + "base": null, + "refs": { + } + }, + "CreateDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBParameterGroupResult": { + "base": null, + "refs": { + } + }, + "CreateDBSecurityGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSecurityGroupResult": { + "base": null, + "refs": { + } + }, + "CreateDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "CreateDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSubnetGroupResult": { + "base": null, + "refs": { + } + }, + "CreateEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "CreateOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateOptionGroupResult": { + "base": null, + "refs": { + } + }, + "DBEngineVersion": { + "base": "

    This data type is used as a response element in the action DescribeDBEngineVersions.

    ", + "refs": { + "DBEngineVersionList$member": null + } + }, + "DBEngineVersionList": { + "base": null, + "refs": { + "DBEngineVersionMessage$DBEngineVersions": "

    A list of DBEngineVersion elements.

    " + } + }, + "DBEngineVersionMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBEngineVersions action.

    ", + "refs": { + } + }, + "DBInstance": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBInstances action.

    ", + "refs": { + "CreateDBInstanceReadReplicaResult$DBInstance": null, + "CreateDBInstanceResult$DBInstance": null, + "DBInstanceList$member": null, + "DeleteDBInstanceResult$DBInstance": null, + "ModifyDBInstanceResult$DBInstance": null, + "PromoteReadReplicaResult$DBInstance": null, + "RebootDBInstanceResult$DBInstance": null, + "RestoreDBInstanceFromDBSnapshotResult$DBInstance": null, + "RestoreDBInstanceToPointInTimeResult$DBInstance": null + } + }, + "DBInstanceAlreadyExistsFault": { + "base": "

    User already has a DB instance with the given identifier.

    ", + "refs": { + } + }, + "DBInstanceList": { + "base": null, + "refs": { + "DBInstanceMessage$DBInstances": "

    A list of DBInstance instances.

    " + } + }, + "DBInstanceMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBInstances action.

    ", + "refs": { + } + }, + "DBInstanceNotFoundFault": { + "base": "

    DBInstanceIdentifier does not refer to an existing DB instance.

    ", + "refs": { + } + }, + "DBInstanceStatusInfo": { + "base": "

    Provides a list of status information for a DB instance.

    ", + "refs": { + "DBInstanceStatusInfoList$member": null + } + }, + "DBInstanceStatusInfoList": { + "base": null, + "refs": { + "DBInstance$StatusInfos": "

    The status of a read replica. If the instance is not a read replica, this will be blank.

    " + } + }, + "DBLogFileNotFoundFault": { + "base": "

    LogFileName does not refer to an existing DB log file.

    ", + "refs": { + } + }, + "DBParameterGroup": { + "base": "

    Contains the result of a successful invocation of the CreateDBParameterGroup action.

    This data type is used as a request parameter in the DeleteDBParameterGroup action, and as a response element in the DescribeDBParameterGroups action.

    ", + "refs": { + "CreateDBParameterGroupResult$DBParameterGroup": null, + "DBParameterGroupList$member": null + } + }, + "DBParameterGroupAlreadyExistsFault": { + "base": "

    A DB parameter group with the same name exists.

    ", + "refs": { + } + }, + "DBParameterGroupDetails": { + "base": "

    Contains the result of a successful invocation of the DescribeDBParameters action.

    ", + "refs": { + } + }, + "DBParameterGroupList": { + "base": null, + "refs": { + "DBParameterGroupsMessage$DBParameterGroups": "

    A list of DBParameterGroup instances.

    " + } + }, + "DBParameterGroupNameMessage": { + "base": "

    Contains the result of a successful invocation of the ModifyDBParameterGroup or ResetDBParameterGroup action.

    ", + "refs": { + } + }, + "DBParameterGroupNotFoundFault": { + "base": "

    DBParameterGroupName does not refer to an existing DB parameter group.

    ", + "refs": { + } + }, + "DBParameterGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB parameter groups.

    ", + "refs": { + } + }, + "DBParameterGroupStatus": { + "base": "

    The status of the DB parameter group.

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBParameterGroupStatusList$member": null + } + }, + "DBParameterGroupStatusList": { + "base": null, + "refs": { + "DBInstance$DBParameterGroups": "

    Provides the list of DB parameter groups applied to this DB instance.

    " + } + }, + "DBParameterGroupsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBParameterGroups action.

    ", + "refs": { + } + }, + "DBSecurityGroup": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSecurityGroups action.

    ", + "refs": { + "AuthorizeDBSecurityGroupIngressResult$DBSecurityGroup": null, + "CreateDBSecurityGroupResult$DBSecurityGroup": null, + "DBSecurityGroups$member": null, + "RevokeDBSecurityGroupIngressResult$DBSecurityGroup": null + } + }, + "DBSecurityGroupAlreadyExistsFault": { + "base": "

    A DB security group with the name specified in DBSecurityGroupName already exists.

    ", + "refs": { + } + }, + "DBSecurityGroupMembership": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBSecurityGroupMembershipList$member": null + } + }, + "DBSecurityGroupMembershipList": { + "base": null, + "refs": { + "DBInstance$DBSecurityGroups": "

    Provides List of DB security group elements containing only DBSecurityGroup.Name and DBSecurityGroup.Status subelements.

    ", + "Option$DBSecurityGroupMemberships": "

    If the option requires access to a port, then this DB security group allows access to the port.

    " + } + }, + "DBSecurityGroupMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSecurityGroups action.

    ", + "refs": { + } + }, + "DBSecurityGroupNameList": { + "base": null, + "refs": { + "CreateDBInstanceMessage$DBSecurityGroups": "

    A list of DB security groups to associate with this DB instance.

    Default: The default DB security group for the database engine.

    ", + "ModifyDBInstanceMessage$DBSecurityGroups": "

    A list of DB security groups to authorize on this DB instance. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "OptionConfiguration$DBSecurityGroupMemberships": "

    A list of DBSecurityGroupMemebrship name strings used for this option.

    " + } + }, + "DBSecurityGroupNotFoundFault": { + "base": "

    DBSecurityGroupName does not refer to an existing DB security group.

    ", + "refs": { + } + }, + "DBSecurityGroupNotSupportedFault": { + "base": "

    A DB security group is not allowed for this action.

    ", + "refs": { + } + }, + "DBSecurityGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB security groups.

    ", + "refs": { + } + }, + "DBSecurityGroups": { + "base": null, + "refs": { + "DBSecurityGroupMessage$DBSecurityGroups": "

    A list of DBSecurityGroup instances.

    " + } + }, + "DBSnapshot": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSnapshots action.

    ", + "refs": { + "CopyDBSnapshotResult$DBSnapshot": null, + "CreateDBSnapshotResult$DBSnapshot": null, + "DBSnapshotList$member": null, + "DeleteDBSnapshotResult$DBSnapshot": null + } + }, + "DBSnapshotAlreadyExistsFault": { + "base": "

    DBSnapshotIdentifier is already used by an existing snapshot.

    ", + "refs": { + } + }, + "DBSnapshotList": { + "base": null, + "refs": { + "DBSnapshotMessage$DBSnapshots": "

    A list of DBSnapshot instances.

    " + } + }, + "DBSnapshotMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSnapshots action.

    ", + "refs": { + } + }, + "DBSnapshotNotFoundFault": { + "base": "

    DBSnapshotIdentifier does not refer to an existing DB snapshot.

    ", + "refs": { + } + }, + "DBSubnetGroup": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSubnetGroups action.

    ", + "refs": { + "CreateDBSubnetGroupResult$DBSubnetGroup": null, + "DBInstance$DBSubnetGroup": "

    Specifies information on the subnet group associated with the DB instance, including the name, description, and subnets in the subnet group.

    ", + "DBSubnetGroups$member": null, + "ModifyDBSubnetGroupResult$DBSubnetGroup": null + } + }, + "DBSubnetGroupAlreadyExistsFault": { + "base": "

    DBSubnetGroupName is already used by an existing DB subnet group.

    ", + "refs": { + } + }, + "DBSubnetGroupDoesNotCoverEnoughAZs": { + "base": "

    Subnets in the DB subnet group should cover at least two Availability Zones unless there is only one Availability Zone.

    ", + "refs": { + } + }, + "DBSubnetGroupMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSubnetGroups action.

    ", + "refs": { + } + }, + "DBSubnetGroupNotAllowedFault": { + "base": "

    Indicates that the DBSubnetGroup should not be specified while creating read replicas that lie in the same region as the source instance.

    ", + "refs": { + } + }, + "DBSubnetGroupNotFoundFault": { + "base": "

    DBSubnetGroupName does not refer to an existing DB subnet group.

    ", + "refs": { + } + }, + "DBSubnetGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB subnet groups.

    ", + "refs": { + } + }, + "DBSubnetGroups": { + "base": null, + "refs": { + "DBSubnetGroupMessage$DBSubnetGroups": "

    A list of DBSubnetGroup instances.

    " + } + }, + "DBSubnetQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of subnets in a DB subnet groups.

    ", + "refs": { + } + }, + "DBUpgradeDependencyFailureFault": { + "base": "

    The DB upgrade failed because a resource the DB depends on could not be modified.

    ", + "refs": { + } + }, + "DeleteDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBInstanceResult": { + "base": null, + "refs": { + } + }, + "DeleteDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSecurityGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "DeleteDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "DeleteOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBEngineVersionsMessage": { + "base": null, + "refs": { + } + }, + "DescribeDBInstancesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBLogFilesDetails": { + "base": "

    This data type is used as a response element to DescribeDBLogFiles.

    ", + "refs": { + "DescribeDBLogFilesList$member": null + } + }, + "DescribeDBLogFilesList": { + "base": null, + "refs": { + "DescribeDBLogFilesResponse$DescribeDBLogFiles": "

    The DB log files returned.

    " + } + }, + "DescribeDBLogFilesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBLogFilesResponse": { + "base": "

    The response from a call to DescribeDBLogFiles.

    ", + "refs": { + } + }, + "DescribeDBParameterGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBParametersMessage": { + "base": null, + "refs": { + } + }, + "DescribeDBSecurityGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBSnapshotsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBSubnetGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEngineDefaultParametersMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEngineDefaultParametersResult": { + "base": null, + "refs": { + } + }, + "DescribeEventCategoriesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEventSubscriptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEventsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOptionGroupOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOptionGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOrderableDBInstanceOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeReservedDBInstancesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeReservedDBInstancesOfferingsMessage": { + "base": "

    ", + "refs": { + } + }, + "Double": { + "base": null, + "refs": { + "RecurringCharge$RecurringChargeAmount": "

    The amount of the recurring charge.

    ", + "ReservedDBInstance$FixedPrice": "

    The fixed price charged for this reserved DB instance.

    ", + "ReservedDBInstance$UsagePrice": "

    The hourly price charged for this reserved DB instance.

    ", + "ReservedDBInstancesOffering$FixedPrice": "

    The fixed price charged for this offering.

    ", + "ReservedDBInstancesOffering$UsagePrice": "

    The hourly price charged for this offering.

    " + } + }, + "DownloadDBLogFilePortionDetails": { + "base": "

    This data type is used as a response element to DownloadDBLogFilePortion.

    ", + "refs": { + } + }, + "DownloadDBLogFilePortionMessage": { + "base": "

    ", + "refs": { + } + }, + "EC2SecurityGroup": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "EC2SecurityGroupList$member": null + } + }, + "EC2SecurityGroupList": { + "base": null, + "refs": { + "DBSecurityGroup$EC2SecurityGroups": "

    Contains a list of EC2SecurityGroup elements.

    " + } + }, + "Endpoint": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBInstance$Endpoint": "

    Specifies the connection endpoint.

    " + } + }, + "EngineDefaults": { + "base": "

    Contains the result of a successful invocation of the DescribeEngineDefaultParameters action.

    ", + "refs": { + "DescribeEngineDefaultParametersResult$EngineDefaults": null + } + }, + "Event": { + "base": "

    This data type is used as a response element in the DescribeEvents action.

    ", + "refs": { + "EventList$member": null + } + }, + "EventCategoriesList": { + "base": null, + "refs": { + "CreateEventSubscriptionMessage$EventCategories": "

    A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    ", + "DescribeEventsMessage$EventCategories": "

    A list of event categories that trigger notifications for a event notification subscription.

    ", + "Event$EventCategories": "

    Specifies the category for the event.

    ", + "EventCategoriesMap$EventCategories": "

    The event categories for the specified source type

    ", + "EventSubscription$EventCategoriesList": "

    A list of event categories for the RDS event notification subscription.

    ", + "ModifyEventSubscriptionMessage$EventCategories": "

    A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    " + } + }, + "EventCategoriesMap": { + "base": "

    Contains the results of a successful invocation of the DescribeEventCategories action.

    ", + "refs": { + "EventCategoriesMapList$member": null + } + }, + "EventCategoriesMapList": { + "base": null, + "refs": { + "EventCategoriesMessage$EventCategoriesMapList": "

    A list of EventCategoriesMap data types.

    " + } + }, + "EventCategoriesMessage": { + "base": "

    Data returned from the DescribeEventCategories action.

    ", + "refs": { + } + }, + "EventList": { + "base": null, + "refs": { + "EventsMessage$Events": "

    A list of Event instances.

    " + } + }, + "EventSubscription": { + "base": "

    Contains the results of a successful invocation of the DescribeEventSubscriptions action.

    ", + "refs": { + "AddSourceIdentifierToSubscriptionResult$EventSubscription": null, + "CreateEventSubscriptionResult$EventSubscription": null, + "DeleteEventSubscriptionResult$EventSubscription": null, + "EventSubscriptionsList$member": null, + "ModifyEventSubscriptionResult$EventSubscription": null, + "RemoveSourceIdentifierFromSubscriptionResult$EventSubscription": null + } + }, + "EventSubscriptionQuotaExceededFault": { + "base": "

    You have reached the maximum number of event subscriptions.

    ", + "refs": { + } + }, + "EventSubscriptionsList": { + "base": null, + "refs": { + "EventSubscriptionsMessage$EventSubscriptionsList": "

    A list of EventSubscriptions data types.

    " + } + }, + "EventSubscriptionsMessage": { + "base": "

    Data returned by the DescribeEventSubscriptions action.

    ", + "refs": { + } + }, + "EventsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeEvents action.

    ", + "refs": { + } + }, + "Filter": { + "base": null, + "refs": { + "FilterList$member": null + } + }, + "FilterList": { + "base": null, + "refs": { + "DescribeDBEngineVersionsMessage$Filters": "

    Not currently supported.

    ", + "DescribeDBInstancesMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBLogFilesMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBParameterGroupsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBParametersMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBSecurityGroupsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBSnapshotsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBSubnetGroupsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeEngineDefaultParametersMessage$Filters": "

    Not currently supported.

    ", + "DescribeEventCategoriesMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeEventSubscriptionsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeEventsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeOptionGroupOptionsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeOptionGroupsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeReservedDBInstancesMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeReservedDBInstancesOfferingsMessage$Filters": "

    This parameter is not currently supported.

    ", + "ListTagsForResourceMessage$Filters": "

    This parameter is not currently supported.

    " + } + }, + "FilterValueList": { + "base": null, + "refs": { + "Filter$Values": "

    This parameter is not currently supported.

    " + } + }, + "IPRange": { + "base": "

    This data type is used as a response element in the DescribeDBSecurityGroups action.

    ", + "refs": { + "IPRangeList$member": null + } + }, + "IPRangeList": { + "base": null, + "refs": { + "DBSecurityGroup$IPRanges": "

    Contains a list of IPRange elements.

    " + } + }, + "InstanceQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB instances.

    ", + "refs": { + } + }, + "InsufficientDBInstanceCapacityFault": { + "base": "

    Specified DB instance class is not available in the specified Availability Zone.

    ", + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "DBInstance$AllocatedStorage": "

    Specifies the allocated storage size specified in gigabytes.

    ", + "DBInstance$BackupRetentionPeriod": "

    Specifies the number of days for which automatic DB snapshots are retained.

    ", + "DBSnapshot$AllocatedStorage": "

    Specifies the allocated storage size in gigabytes (GB).

    ", + "DBSnapshot$Port": "

    Specifies the port that the database engine was listening on at the time of the snapshot.

    ", + "DBSnapshot$PercentProgress": "

    The percentage of the estimated data that has been transferred.

    ", + "DownloadDBLogFilePortionMessage$NumberOfLines": "

    The number of lines to download.

    If the NumberOfLines parameter is specified, then the block of lines returned can be from the beginning or the end of the log file, depending on the value of the Marker parameter.

    • If neither Marker or NumberOfLines are specified, the entire log file is returned.

    • If NumberOfLines is specified and Marker is not specified, then the most recent lines from the end of the log file are returned.

    • If Marker is specified as \"0\", then the specified number of lines from the beginning of the log file are returned.

    • You can download the log file in blocks of lines by specifying the size of the block using the NumberOfLines parameter, and by specifying a value of \"0\" for the Marker parameter in your first request. Include the Marker value returned in the response as the Marker value for the next request, continuing until the AdditionalDataPending response element returns false.

    ", + "Endpoint$Port": "

    Specifies the port that the database engine is listening on.

    ", + "ReservedDBInstance$Duration": "

    The duration of the reservation in seconds.

    ", + "ReservedDBInstance$DBInstanceCount": "

    The number of reserved DB instances.

    ", + "ReservedDBInstancesOffering$Duration": "

    The duration of the offering in seconds.

    " + } + }, + "IntegerOptional": { + "base": null, + "refs": { + "CreateDBInstanceMessage$AllocatedStorage": "

    The amount of storage (in gigabytes) to be initially allocated for the database instance.

    Type: Integer

    MySQL

    Constraints: Must be an integer from 5 to 3072.

    PostgreSQL

    Constraints: Must be an integer from 5 to 3072.

    Oracle

    Constraints: Must be an integer from 10 to 3072.

    SQL Server

    Constraints: Must be an integer from 200 to 1024 (Standard Edition and Enterprise Edition) or from 30 to 1024 (Express Edition and Web Edition)

    ", + "CreateDBInstanceMessage$BackupRetentionPeriod": "

    The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Default: 1

    Constraints:

    • Must be a value from 0 to 35
    • Cannot be set to 0 if the DB instance is a source to read replicas
    ", + "CreateDBInstanceMessage$Port": "

    The port number on which the database accepts connections.

    MySQL

    Default: 3306

    Valid Values: 1150-65535

    Type: Integer

    PostgreSQL

    Default: 5432

    Valid Values: 1150-65535

    Type: Integer

    Oracle

    Default: 1521

    Valid Values: 1150-65535

    SQL Server

    Default: 1433

    Valid Values: 1150-65535 except for 1434, 3389, 47001, 49152, and 49152 through 49156.

    ", + "CreateDBInstanceMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.

    Constraints: To use PIOPS, this value must be an integer greater than 1000.

    ", + "CreateDBInstanceReadReplicaMessage$Port": "

    The port number that the DB instance uses for connections.

    Default: Inherits from the source DB instance

    Valid Values: 1150-65535

    ", + "CreateDBInstanceReadReplicaMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.

    ", + "DBInstance$Iops": "

    Specifies the Provisioned IOPS (I/O operations per second) value.

    ", + "DBSnapshot$Iops": "

    Specifies the Provisioned IOPS (I/O operations per second) value of the DB instance at the time of the snapshot.

    ", + "DescribeDBEngineVersionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBInstancesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBLogFilesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    ", + "DescribeDBParameterGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBParametersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBSecurityGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBSnapshotsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBSubnetGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeEngineDefaultParametersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeEventSubscriptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeEventsMessage$Duration": "

    The number of minutes to retrieve events for.

    Default: 60

    ", + "DescribeEventsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeOptionGroupOptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeOptionGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeOrderableDBInstanceOptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeReservedDBInstancesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeReservedDBInstancesOfferingsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "ModifyDBInstanceMessage$AllocatedStorage": "

    The new storage capacity of the RDS instance. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    MySQL

    Default: Uses existing setting

    Valid Values: 5-3072

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    Type: Integer

    PostgreSQL

    Default: Uses existing setting

    Valid Values: 5-3072

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    Type: Integer

    Oracle

    Default: Uses existing setting

    Valid Values: 10-3072

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    SQL Server

    Cannot be modified.

    If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance will be available for use, but may experience performance degradation. While the migration takes place, nightly backups for the instance will be suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance.

    ", + "ModifyDBInstanceMessage$BackupRetentionPeriod": "

    The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Changing this parameter can result in an outage if you change from 0 to a non-zero value or from a non-zero value to 0. These changes are applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously applied as soon as possible.

    Default: Uses existing setting

    Constraints:

    • Must be a value from 0 to 35
    • Can be specified for a read replica only if the source is running MySQL 5.6
    • Cannot be set to 0 if the DB instance is a source to read replicas
    ", + "ModifyDBInstanceMessage$Iops": "

    The new Provisioned IOPS (I/O operations per second) value for the RDS instance. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    Default: Uses existing setting

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. If you are migrating from Provisioned IOPS to standard storage, set this value to 0.

    SQL Server

    Setting the IOPS value for the SQL Server database engine is not supported.

    Type: Integer

    If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance will be available for use, but may experience performance degradation. While the migration takes place, nightly backups for the instance will be suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance.

    ", + "Option$Port": "

    If required, the port configured for this option to use.

    ", + "OptionConfiguration$Port": "

    The optional port for the option.

    ", + "OptionGroupOption$DefaultPort": "

    If the option requires a port, specifies the default port for the option.

    ", + "PendingModifiedValues$AllocatedStorage": "

    Contains the new AllocatedStorage size for the DB instance that will be applied or is in progress.

    ", + "PendingModifiedValues$Port": "

    Specifies the pending port for the DB instance.

    ", + "PendingModifiedValues$BackupRetentionPeriod": "

    Specifies the pending number of days for which automated backups are retained.

    ", + "PendingModifiedValues$Iops": "

    Specifies the new Provisioned IOPS value for the DB instance that will be applied or is being applied.

    ", + "PromoteReadReplicaMessage$BackupRetentionPeriod": "

    The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Default: 1

    Constraints:

    • Must be a value from 0 to 8
    ", + "PurchaseReservedDBInstancesOfferingMessage$DBInstanceCount": "

    The number of instances to reserve.

    Default: 1

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Port": "

    The port number on which the database accepts connections.

    Default: The same port as the original DB instance

    Constraints: Value must be 1150-65535

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Iops": "

    Specifies the amount of provisioned IOPS for the DB instance, expressed in I/O operations per second. If this parameter is not specified, the IOPS value will be taken from the backup. If this parameter is set to 0, the new instance will be converted to a non-PIOPS instance, which will take additional time, though your DB instance will be available for connections before the conversion starts.

    Constraints: Must be an integer greater than 1000.

    SQL Server

    Setting the IOPS value for the SQL Server database engine is not supported.

    ", + "RestoreDBInstanceToPointInTimeMessage$Port": "

    The port number on which the database accepts connections.

    Constraints: Value must be 1150-65535

    Default: The same port as the original DB instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.

    Constraints: Must be an integer greater than 1000.

    SQL Server

    Setting the IOPS value for the SQL Server database engine is not supported.

    " + } + }, + "InvalidDBInstanceStateFault": { + "base": "

    The specified DB instance is not in the available state.

    ", + "refs": { + } + }, + "InvalidDBParameterGroupStateFault": { + "base": "

    The DB parameter group cannot be deleted because it is in use.

    ", + "refs": { + } + }, + "InvalidDBSecurityGroupStateFault": { + "base": "

    The state of the DB security group does not allow deletion.

    ", + "refs": { + } + }, + "InvalidDBSnapshotStateFault": { + "base": "

    The state of the DB snapshot does not allow deletion.

    ", + "refs": { + } + }, + "InvalidDBSubnetGroupFault": { + "base": "

    Indicates the DBSubnetGroup does not belong to the same VPC as that of an existing cross region read replica of the same source instance.

    ", + "refs": { + } + }, + "InvalidDBSubnetGroupStateFault": { + "base": "

    The DB subnet group cannot be deleted because it is in use.

    ", + "refs": { + } + }, + "InvalidDBSubnetStateFault": { + "base": "

    The DB subnet is not in the available state.

    ", + "refs": { + } + }, + "InvalidEventSubscriptionStateFault": { + "base": "

    This error can occur if someone else is modifying a subscription. You should retry the action.

    ", + "refs": { + } + }, + "InvalidOptionGroupStateFault": { + "base": "

    The option group is not in the available state.

    ", + "refs": { + } + }, + "InvalidRestoreFault": { + "base": "

    Cannot restore from vpc backup to non-vpc DB instance.

    ", + "refs": { + } + }, + "InvalidSubnet": { + "base": "

    The requested subnet is invalid, or multiple subnets were requested that are not all in a common VPC.

    ", + "refs": { + } + }, + "InvalidVPCNetworkStateFault": { + "base": "

    DB subnet group does not cover all Availability Zones after it is created because users' change.

    ", + "refs": { + } + }, + "KeyList": { + "base": null, + "refs": { + "RemoveTagsFromResourceMessage$TagKeys": "

    The tag key (name) of the tag to be removed.

    " + } + }, + "ListTagsForResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "Long": { + "base": null, + "refs": { + "DescribeDBLogFilesDetails$LastWritten": "

    A POSIX timestamp when the last log entry was written.

    ", + "DescribeDBLogFilesDetails$Size": "

    The size, in bytes, of the log file for the specified DB instance.

    ", + "DescribeDBLogFilesMessage$FileLastWritten": "

    Filters the available log files for files written since the specified date, in POSIX timestamp format.

    ", + "DescribeDBLogFilesMessage$FileSize": "

    Filters the available log files for files larger than the specified size.

    " + } + }, + "ModifyDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBInstanceResult": { + "base": null, + "refs": { + } + }, + "ModifyDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBSubnetGroupResult": { + "base": null, + "refs": { + } + }, + "ModifyEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "ModifyOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyOptionGroupResult": { + "base": null, + "refs": { + } + }, + "Option": { + "base": "

    Option details.

    ", + "refs": { + "OptionsList$member": null + } + }, + "OptionConfiguration": { + "base": "

    A list of all available options

    ", + "refs": { + "OptionConfigurationList$member": null + } + }, + "OptionConfigurationList": { + "base": null, + "refs": { + "ModifyOptionGroupMessage$OptionsToInclude": "

    Options in this list are added to the option group or, if already present, the specified configuration is used to update the existing configuration.

    " + } + }, + "OptionGroup": { + "base": "

    ", + "refs": { + "CreateOptionGroupResult$OptionGroup": null, + "ModifyOptionGroupResult$OptionGroup": null, + "OptionGroupsList$member": null + } + }, + "OptionGroupAlreadyExistsFault": { + "base": "

    The option group you are trying to create already exists.

    ", + "refs": { + } + }, + "OptionGroupMembership": { + "base": "

    Provides information on the option groups the DB instance is a member of.

    ", + "refs": { + "OptionGroupMembershipList$member": null + } + }, + "OptionGroupMembershipList": { + "base": null, + "refs": { + "DBInstance$OptionGroupMemberships": "

    Provides the list of option group memberships for this DB instance.

    " + } + }, + "OptionGroupNotFoundFault": { + "base": "

    The specified option group could not be found.

    ", + "refs": { + } + }, + "OptionGroupOption": { + "base": "

    Available option.

    ", + "refs": { + "OptionGroupOptionsList$member": null + } + }, + "OptionGroupOptionSetting": { + "base": "

    option group option settings are used to display settings available for each option with their default values and other information. These values are used with the DescribeOptionGroupOptions action.

    ", + "refs": { + "OptionGroupOptionSettingsList$member": null + } + }, + "OptionGroupOptionSettingsList": { + "base": null, + "refs": { + "OptionGroupOption$OptionGroupOptionSettings": "

    Specifies the option settings that are available (and the default value) for each option in an option group.

    " + } + }, + "OptionGroupOptionsList": { + "base": "

    List of available option group options.

    ", + "refs": { + "OptionGroupOptionsMessage$OptionGroupOptions": null + } + }, + "OptionGroupOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "OptionGroupQuotaExceededFault": { + "base": "

    The quota of 20 option groups was exceeded for this AWS account.

    ", + "refs": { + } + }, + "OptionGroups": { + "base": "

    List of option groups.

    ", + "refs": { + } + }, + "OptionGroupsList": { + "base": null, + "refs": { + "OptionGroups$OptionGroupsList": "

    List of option groups.

    " + } + }, + "OptionNamesList": { + "base": null, + "refs": { + "ModifyOptionGroupMessage$OptionsToRemove": "

    Options in this list are removed from the option group.

    " + } + }, + "OptionSetting": { + "base": "

    Option settings are the actual settings being applied or configured for that option. It is used when you modify an option group or describe option groups. For example, the NATIVE_NETWORK_ENCRYPTION option has a setting called SQLNET.ENCRYPTION_SERVER that can have several different values.

    ", + "refs": { + "OptionSettingConfigurationList$member": null, + "OptionSettingsList$member": null + } + }, + "OptionSettingConfigurationList": { + "base": null, + "refs": { + "Option$OptionSettings": "

    The option settings for this option.

    " + } + }, + "OptionSettingsList": { + "base": null, + "refs": { + "OptionConfiguration$OptionSettings": "

    The option settings to include in an option group.

    " + } + }, + "OptionsDependedOn": { + "base": null, + "refs": { + "OptionGroupOption$OptionsDependedOn": "

    List of all options that are prerequisites for this option.

    " + } + }, + "OptionsList": { + "base": null, + "refs": { + "OptionGroup$Options": "

    Indicates what options are available in the option group.

    " + } + }, + "OrderableDBInstanceOption": { + "base": "

    Contains a list of available options for a DB instance

    This data type is used as a response element in the DescribeOrderableDBInstanceOptions action.

    ", + "refs": { + "OrderableDBInstanceOptionsList$member": null + } + }, + "OrderableDBInstanceOptionsList": { + "base": null, + "refs": { + "OrderableDBInstanceOptionsMessage$OrderableDBInstanceOptions": "

    An OrderableDBInstanceOption structure containing information about orderable options for the DB instance.

    " + } + }, + "OrderableDBInstanceOptionsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeOrderableDBInstanceOptions action.

    ", + "refs": { + } + }, + "Parameter": { + "base": "

    This data type is used as a request parameter in the ModifyDBParameterGroup and ResetDBParameterGroup actions.

    This data type is used as a response element in the DescribeEngineDefaultParameters and DescribeDBParameters actions.

    ", + "refs": { + "ParametersList$member": null + } + }, + "ParametersList": { + "base": null, + "refs": { + "DBParameterGroupDetails$Parameters": "

    A list of Parameter values.

    ", + "EngineDefaults$Parameters": "

    Contains a list of engine default parameters.

    ", + "ModifyDBParameterGroupMessage$Parameters": "

    An array of parameter names, values, and the apply method for the parameter update. At least one parameter name, value, and apply method must be supplied; subsequent arguments are optional. A maximum of 20 parameters may be modified in a single request.

    Valid Values (for the application method): immediate | pending-reboot

    You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when DB instance reboots. ", + "ResetDBParameterGroupMessage$Parameters": "

    An array of parameter names, values, and the apply method for the parameter update. At least one parameter name, value, and apply method must be supplied; subsequent arguments are optional. A maximum of 20 parameters may be modified in a single request.

    MySQL

    Valid Values (for Apply method): immediate | pending-reboot

    You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when DB instance reboots.

    Oracle

    Valid Values (for Apply method): pending-reboot

    " + } + }, + "PendingModifiedValues": { + "base": "

    This data type is used as a response element in the ModifyDBInstance action.

    ", + "refs": { + "DBInstance$PendingModifiedValues": "

    Specifies that changes to the DB instance are pending. This element is only included when changes are pending. Specific changes are identified by subelements.

    " + } + }, + "PointInTimeRestoreNotEnabledFault": { + "base": "

    SourceDBInstanceIdentifier refers to a DB instance with BackupRetentionPeriod equal to 0.

    ", + "refs": { + } + }, + "PromoteReadReplicaMessage": { + "base": "

    ", + "refs": { + } + }, + "PromoteReadReplicaResult": { + "base": null, + "refs": { + } + }, + "ProvisionedIopsNotAvailableInAZFault": { + "base": "

    Provisioned IOPS not available in the specified Availability Zone.

    ", + "refs": { + } + }, + "PurchaseReservedDBInstancesOfferingMessage": { + "base": "

    ", + "refs": { + } + }, + "PurchaseReservedDBInstancesOfferingResult": { + "base": null, + "refs": { + } + }, + "ReadReplicaDBInstanceIdentifierList": { + "base": null, + "refs": { + "DBInstance$ReadReplicaDBInstanceIdentifiers": "

    Contains one or more identifiers of the read replicas associated with this DB instance.

    " + } + }, + "RebootDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "RebootDBInstanceResult": { + "base": null, + "refs": { + } + }, + "RecurringCharge": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstances and DescribeReservedDBInstancesOfferings actions.

    ", + "refs": { + "RecurringChargeList$member": null + } + }, + "RecurringChargeList": { + "base": null, + "refs": { + "ReservedDBInstance$RecurringCharges": "

    The recurring price charged to run this reserved DB instance.

    ", + "ReservedDBInstancesOffering$RecurringCharges": "

    The recurring price charged to run this reserved DB instance.

    " + } + }, + "RemoveSourceIdentifierFromSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "RemoveSourceIdentifierFromSubscriptionResult": { + "base": null, + "refs": { + } + }, + "RemoveTagsFromResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "ReservedDBInstance": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstances and PurchaseReservedDBInstancesOffering actions.

    ", + "refs": { + "PurchaseReservedDBInstancesOfferingResult$ReservedDBInstance": null, + "ReservedDBInstanceList$member": null + } + }, + "ReservedDBInstanceAlreadyExistsFault": { + "base": "

    User already has a reservation with the given identifier.

    ", + "refs": { + } + }, + "ReservedDBInstanceList": { + "base": null, + "refs": { + "ReservedDBInstanceMessage$ReservedDBInstances": "

    A list of reserved DB instances.

    " + } + }, + "ReservedDBInstanceMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeReservedDBInstances action.

    ", + "refs": { + } + }, + "ReservedDBInstanceNotFoundFault": { + "base": "

    The specified reserved DB Instance not found.

    ", + "refs": { + } + }, + "ReservedDBInstanceQuotaExceededFault": { + "base": "

    Request would exceed the user's DB Instance quota.

    ", + "refs": { + } + }, + "ReservedDBInstancesOffering": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstancesOfferings action.

    ", + "refs": { + "ReservedDBInstancesOfferingList$member": null + } + }, + "ReservedDBInstancesOfferingList": { + "base": null, + "refs": { + "ReservedDBInstancesOfferingMessage$ReservedDBInstancesOfferings": "

    A list of reserved DB instance offerings.

    " + } + }, + "ReservedDBInstancesOfferingMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeReservedDBInstancesOfferings action.

    ", + "refs": { + } + }, + "ReservedDBInstancesOfferingNotFoundFault": { + "base": "

    Specified offering does not exist.

    ", + "refs": { + } + }, + "ResetDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBInstanceFromDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBInstanceFromDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "RestoreDBInstanceToPointInTimeMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBInstanceToPointInTimeResult": { + "base": null, + "refs": { + } + }, + "RevokeDBSecurityGroupIngressMessage": { + "base": "

    ", + "refs": { + } + }, + "RevokeDBSecurityGroupIngressResult": { + "base": null, + "refs": { + } + }, + "SNSInvalidTopicFault": { + "base": "

    SNS has responded that there is a problem with the SND topic specified.

    ", + "refs": { + } + }, + "SNSNoAuthorizationFault": { + "base": "

    You do not have permission to publish to the SNS topic ARN.

    ", + "refs": { + } + }, + "SNSTopicArnNotFoundFault": { + "base": "

    The SNS topic ARN does not exist.

    ", + "refs": { + } + }, + "SnapshotQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB snapshots.

    ", + "refs": { + } + }, + "SourceIdsList": { + "base": null, + "refs": { + "CreateEventSubscriptionMessage$SourceIds": "

    The list of identifiers of the event sources for which events will be returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it cannot end with a hyphen or contain two consecutive hyphens.

    Constraints:

    • If SourceIds are supplied, SourceType must also be provided.
    • If the source type is a DB instance, then a DBInstanceIdentifier must be supplied.
    • If the source type is a DB security group, a DBSecurityGroupName must be supplied.
    • If the source type is a DB parameter group, a DBParameterGroupName must be supplied.
    • If the source type is a DB snapshot, a DBSnapshotIdentifier must be supplied.
    ", + "EventSubscription$SourceIdsList": "

    A list of source Ids for the RDS event notification subscription.

    " + } + }, + "SourceNotFoundFault": { + "base": "

    The requested source could not be found.

    ", + "refs": { + } + }, + "SourceType": { + "base": null, + "refs": { + "DescribeEventsMessage$SourceType": "

    The event source to retrieve events for. If no value is specified, all events are returned.

    ", + "Event$SourceType": "

    Specifies the source type for this event.

    " + } + }, + "StorageQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed amount of storage available across all DB instances.

    ", + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "AddSourceIdentifierToSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to add a source identifier to.

    ", + "AddSourceIdentifierToSubscriptionMessage$SourceIdentifier": "

    The identifier of the event source to be added. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it cannot end with a hyphen or contain two consecutive hyphens.

    Constraints:

    • If the source type is a DB instance, then a DBInstanceIdentifier must be supplied.
    • If the source type is a DB security group, a DBSecurityGroupName must be supplied.
    • If the source type is a DB parameter group, a DBParameterGroupName must be supplied.
    • If the source type is a DB snapshot, a DBSnapshotIdentifier must be supplied.
    ", + "AddTagsToResourceMessage$ResourceName": "

    The Amazon RDS resource the tags will be added to. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    ", + "AuthorizeDBSecurityGroupIngressMessage$DBSecurityGroupName": "

    The name of the DB security group to add authorization to.

    ", + "AuthorizeDBSecurityGroupIngressMessage$CIDRIP": "

    The IP range to authorize.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupName": "

    Name of the EC2 security group to authorize. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupId": "

    Id of the EC2 security group to authorize. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "

    AWS Account Number of the owner of the EC2 security group specified in the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AvailabilityZone$Name": "

    The name of the availability zone.

    ", + "CharacterSet$CharacterSetName": "

    The name of the character set.

    ", + "CharacterSet$CharacterSetDescription": "

    The description of the character set.

    ", + "CopyDBSnapshotMessage$SourceDBSnapshotIdentifier": "

    The identifier for the source DB snapshot.

    Constraints:

    • Must specify a valid system snapshot in the \"available\" state.
    • If the source snapshot is in the same region as the copy, specify a valid DB snapshot identifier.
    • If the source snapshot is in a different region than the copy, specify a valid DB snapshot ARN. For more information, go to Copying a DB Snapshot.

    Example: rds:mydb-2012-04-02-00-01

    Example: arn:aws:rds:rr-regn-1:123456789012:snapshot:mysql-instance1-snapshot-20130805

    ", + "CopyDBSnapshotMessage$TargetDBSnapshotIdentifier": "

    The identifier for the copied snapshot.

    Constraints:

    • Cannot be null, empty, or blank
    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-db-snapshot

    ", + "CreateDBInstanceMessage$DBName": "

    The meaning of this parameter differs according to the database engine you use.

    Type: String

    MySQL

    The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance.

    Constraints:

    • Must contain 1 to 64 alphanumeric characters
    • Cannot be a word reserved by the specified database engine

    PostgreSQL

    The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance.

    Constraints:

    • Must contain 1 to 63 alphanumeric characters
    • Must begin with a letter or an underscore. Subsequent characters can be letters, underscores, or digits (0-9).
    • Cannot be a word reserved by the specified database engine

    Oracle

    The Oracle System ID (SID) of the created DB instance.

    Default: ORCL

    Constraints:

    • Cannot be longer than 8 characters

    SQL Server

    Not applicable. Must be null.

    ", + "CreateDBInstanceMessage$DBInstanceIdentifier": "

    The DB instance identifier. This parameter is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 for SQL Server).
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.

    Example: mydbinstance

    ", + "CreateDBInstanceMessage$DBInstanceClass": "

    The compute and memory capacity of the DB instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge

    ", + "CreateDBInstanceMessage$Engine": "

    The name of the database engine to be used for this instance.

    Valid Values: MySQL | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee | sqlserver-se | sqlserver-ex | sqlserver-web | postgres

    ", + "CreateDBInstanceMessage$MasterUsername": "

    The name of master user for the client DB instance.

    MySQL

    Constraints:

    • Must be 1 to 16 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.

    Type: String

    Oracle

    Constraints:

    • Must be 1 to 30 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.

    SQL Server

    Constraints:

    • Must be 1 to 128 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.
    ", + "CreateDBInstanceMessage$MasterUserPassword": "

    The password for the master database user. Can be any printable ASCII character except \"/\", \"\"\", or \"@\".

    Type: String

    MySQL

    Constraints: Must contain from 8 to 41 characters.

    Oracle

    Constraints: Must contain from 8 to 30 characters.

    SQL Server

    Constraints: Must contain from 8 to 128 characters.

    ", + "CreateDBInstanceMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in.

    Default: A random, system-chosen Availability Zone in the endpoint's region.

    Example: us-east-1d

    Constraint: The AvailabilityZone parameter cannot be specified if the MultiAZ parameter is set to true. The specified Availability Zone must be in the same region as the current endpoint.

    ", + "CreateDBInstanceMessage$DBSubnetGroupName": "

    A DB subnet group to associate with this DB instance.

    If there is no DB subnet group, then it is a non-VPC DB instance.

    ", + "CreateDBInstanceMessage$PreferredMaintenanceWindow": "

    The weekly time range (in UTC) during which system maintenance can occur.

    Format: ddd:hh24:mi-ddd:hh24:mi

    Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

    Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

    Constraints: Minimum 30-minute window.

    ", + "CreateDBInstanceMessage$DBParameterGroupName": "

    The name of the DB parameter group to associate with this DB instance. If this argument is omitted, the default DBParameterGroup for the specified engine will be used.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "CreateDBInstanceMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

    Default: A 30-minute window selected at random from an 8-hour block of time per region. See the Amazon RDS User Guide for the time blocks for each region from which the default backup windows are assigned.

    Constraints: Must be in the format hh24:mi-hh24:mi. Times should be Universal Time Coordinated (UTC). Must not conflict with the preferred maintenance window. Must be at least 30 minutes.

    ", + "CreateDBInstanceMessage$EngineVersion": "

    The version number of the database engine to use.

    MySQL

    Example: 5.1.42

    Type: String

    PostgreSQL

    Example: 9.3

    Type: String

    Oracle

    Example: 11.2.0.2.v2

    Type: String

    SQL Server

    Example: 10.50.2789.0.v1

    ", + "CreateDBInstanceMessage$LicenseModel": "

    License model information for this DB instance.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "CreateDBInstanceMessage$OptionGroupName": "

    Indicates that the DB instance should be associated with the specified option group.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "CreateDBInstanceMessage$CharacterSetName": "

    For supported engines, indicates that the DB instance should be associated with the specified CharacterSet.

    ", + "CreateDBInstanceReadReplicaMessage$DBInstanceIdentifier": "

    The DB instance identifier of the read replica. This is the unique key that identifies a DB instance. This parameter is stored as a lowercase string.

    ", + "CreateDBInstanceReadReplicaMessage$SourceDBInstanceIdentifier": "

    The identifier of the DB instance that will act as the source for the read replica. Each DB instance can have up to five read replicas.

    Constraints:

    • Must be the identifier of an existing DB instance.
    • Can specify a DB instance that is a read replica only if the source is running MySQL 5.6.
    • The specified DB instance must have automatic backups enabled, its backup retention period must be greater than 0.
    • If the source DB instance is in the same region as the read replica, specify a valid DB instance identifier.
    • If the source DB instance is in a different region than the read replica, specify a valid DB instance ARN. For more information, go to Constructing a Amazon RDS Amazon Resource Name (ARN).
    ", + "CreateDBInstanceReadReplicaMessage$DBInstanceClass": "

    The compute and memory capacity of the read replica.

    Valid Values: db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge

    Default: Inherits from the source DB instance.

    ", + "CreateDBInstanceReadReplicaMessage$AvailabilityZone": "

    The Amazon EC2 Availability Zone that the read replica will be created in.

    Default: A random, system-chosen Availability Zone in the endpoint's region.

    Example: us-east-1d

    ", + "CreateDBInstanceReadReplicaMessage$OptionGroupName": "

    The option group the DB instance will be associated with. If omitted, the default option group for the engine specified will be used.

    ", + "CreateDBInstanceReadReplicaMessage$DBSubnetGroupName": "

    Specifies a DB subnet group for the DB instance. The new DB instance will be created in the VPC associated with the DB subnet group. If no DB subnet group is specified, then the new DB instance is not created in a VPC.

    Constraints:

    • Can only be specified if the source DB instance identifier specifies a DB instance in another region.
    • The specified DB subnet group must be in the same region in which the operation is running.
    • All read replicas in one region that are created from the same source DB instance must either:
      • Specify DB subnet groups from the same VPC. All these read replicas will be created in the same VPC.
      • Not specify a DB subnet group. All these read replicas will be created outside of any VPC.
    ", + "CreateDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB parameter group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    This value is stored as a lower-case string.", + "CreateDBParameterGroupMessage$DBParameterGroupFamily": "

    The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a database engine and engine version compatible with that DB parameter group family.

    ", + "CreateDBParameterGroupMessage$Description": "

    The description for the DB parameter group.

    ", + "CreateDBSecurityGroupMessage$DBSecurityGroupName": "

    The name for the DB security group. This value is stored as a lowercase string.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    • Must not be \"Default\"
    • May not contain spaces

    Example: mysecuritygroup

    ", + "CreateDBSecurityGroupMessage$DBSecurityGroupDescription": "

    The description for the DB security group.

    ", + "CreateDBSnapshotMessage$DBSnapshotIdentifier": "

    The identifier for the DB snapshot.

    Constraints:

    • Cannot be null, empty, or blank
    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-snapshot-id

    ", + "CreateDBSnapshotMessage$DBInstanceIdentifier": "

    The DB instance identifier. This is the unique key that identifies a DB instance.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "CreateDBSubnetGroupMessage$DBSubnetGroupName": "

    The name for the DB subnet group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters or hyphens. Must not be \"Default\".

    Example: mySubnetgroup

    ", + "CreateDBSubnetGroupMessage$DBSubnetGroupDescription": "

    The description for the DB subnet group.

    ", + "CreateEventSubscriptionMessage$SubscriptionName": "

    The name of the subscription.

    Constraints: The name must be less than 255 characters.

    ", + "CreateEventSubscriptionMessage$SnsTopicArn": "

    The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.

    ", + "CreateEventSubscriptionMessage$SourceType": "

    The type of source that will be generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "CreateOptionGroupMessage$OptionGroupName": "

    Specifies the name of the option group to be created.

    Constraints:

    • Must be 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: myoptiongroup

    ", + "CreateOptionGroupMessage$EngineName": "

    Specifies the name of the engine that this option group should be associated with.

    ", + "CreateOptionGroupMessage$MajorEngineVersion": "

    Specifies the major version of the engine that this option group should be associated with.

    ", + "CreateOptionGroupMessage$OptionGroupDescription": "

    The description of the option group.

    ", + "DBEngineVersion$Engine": "

    The name of the database engine.

    ", + "DBEngineVersion$EngineVersion": "

    The version number of the database engine.

    ", + "DBEngineVersion$DBParameterGroupFamily": "

    The name of the DB parameter group family for the database engine.

    ", + "DBEngineVersion$DBEngineDescription": "

    The description of the database engine.

    ", + "DBEngineVersion$DBEngineVersionDescription": "

    The description of the database engine version.

    ", + "DBEngineVersionMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBInstance$DBInstanceIdentifier": "

    Contains a user-supplied database identifier. This is the unique key that identifies a DB instance.

    ", + "DBInstance$DBInstanceClass": "

    Contains the name of the compute and memory capacity class of the DB instance.

    ", + "DBInstance$Engine": "

    Provides the name of the database engine to be used for this DB instance.

    ", + "DBInstance$DBInstanceStatus": "

    Specifies the current state of this database.

    ", + "DBInstance$MasterUsername": "

    Contains the master username for the DB instance.

    ", + "DBInstance$DBName": "

    The meaning of this parameter differs according to the database engine you use. For example, this value returns only MySQL information when returning values from CreateDBInstanceReadReplica since read replicas are only supported for MySQL.

    MySQL

    Contains the name of the initial database of this instance that was provided at create time, if one was specified when the DB instance was created. This same name is returned for the life of the DB instance.

    Type: String

    Oracle

    Contains the Oracle System ID (SID) of the created DB instance. Not shown when the returned parameters do not apply to an Oracle DB instance.

    ", + "DBInstance$PreferredBackupWindow": "

    Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod.

    ", + "DBInstance$AvailabilityZone": "

    Specifies the name of the Availability Zone the DB instance is located in.

    ", + "DBInstance$PreferredMaintenanceWindow": "

    Specifies the weekly time range (in UTC) during which system maintenance can occur.

    ", + "DBInstance$EngineVersion": "

    Indicates the database engine version.

    ", + "DBInstance$ReadReplicaSourceDBInstanceIdentifier": "

    Contains the identifier of the source DB instance if this DB instance is a read replica.

    ", + "DBInstance$LicenseModel": "

    License model information for this DB instance.

    ", + "DBInstance$CharacterSetName": "

    If present, specifies the name of the character set that this instance is associated with.

    ", + "DBInstance$SecondaryAvailabilityZone": "

    If present, specifies the name of the secondary Availability Zone for a DB instance with multi-AZ support.

    ", + "DBInstanceMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DBInstanceStatusInfo$StatusType": "

    This value is currently \"read replication.\"

    ", + "DBInstanceStatusInfo$Status": "

    Status of the DB instance. For a StatusType of read replica, the values can be replicating, error, stopped, or terminated.

    ", + "DBInstanceStatusInfo$Message": "

    Details of the error if there is an error for the instance. If the instance is not in an error state, this value is blank.

    ", + "DBParameterGroup$DBParameterGroupName": "

    Provides the name of the DB parameter group.

    ", + "DBParameterGroup$DBParameterGroupFamily": "

    Provides the name of the DB parameter group family that this DB parameter group is compatible with.

    ", + "DBParameterGroup$Description": "

    Provides the customer-specified description for this DB parameter group.

    ", + "DBParameterGroupDetails$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBParameterGroupNameMessage$DBParameterGroupName": "

    The name of the DB parameter group.

    ", + "DBParameterGroupStatus$DBParameterGroupName": "

    The name of the DP parameter group.

    ", + "DBParameterGroupStatus$ParameterApplyStatus": "

    The status of parameter updates.

    ", + "DBParameterGroupsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSecurityGroup$OwnerId": "

    Provides the AWS ID of the owner of a specific DB security group.

    ", + "DBSecurityGroup$DBSecurityGroupName": "

    Specifies the name of the DB security group.

    ", + "DBSecurityGroup$DBSecurityGroupDescription": "

    Provides the description of the DB security group.

    ", + "DBSecurityGroup$VpcId": "

    Provides the VpcId of the DB security group.

    ", + "DBSecurityGroupMembership$DBSecurityGroupName": "

    The name of the DB security group.

    ", + "DBSecurityGroupMembership$Status": "

    The status of the DB security group.

    ", + "DBSecurityGroupMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSecurityGroupNameList$member": null, + "DBSnapshot$DBSnapshotIdentifier": "

    Specifies the identifier for the DB snapshot.

    ", + "DBSnapshot$DBInstanceIdentifier": "

    Specifies the DB instance identifier of the DB instance this DB snapshot was created from.

    ", + "DBSnapshot$Engine": "

    Specifies the name of the database engine.

    ", + "DBSnapshot$Status": "

    Specifies the status of this DB snapshot.

    ", + "DBSnapshot$AvailabilityZone": "

    Specifies the name of the Availability Zone the DB instance was located in at the time of the DB snapshot.

    ", + "DBSnapshot$VpcId": "

    Provides the Vpc Id associated with the DB snapshot.

    ", + "DBSnapshot$MasterUsername": "

    Provides the master username for the DB snapshot.

    ", + "DBSnapshot$EngineVersion": "

    Specifies the version of the database engine.

    ", + "DBSnapshot$LicenseModel": "

    License model information for the restored DB instance.

    ", + "DBSnapshot$SnapshotType": "

    Provides the type of the DB snapshot.

    ", + "DBSnapshot$OptionGroupName": "

    Provides the option group name for the DB snapshot.

    ", + "DBSnapshot$SourceRegion": "

    The region that the DB snapshot was created in or copied from.

    ", + "DBSnapshotMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSubnetGroup$DBSubnetGroupName": "

    Specifies the name of the DB subnet group.

    ", + "DBSubnetGroup$DBSubnetGroupDescription": "

    Provides the description of the DB subnet group.

    ", + "DBSubnetGroup$VpcId": "

    Provides the VpcId of the DB subnet group.

    ", + "DBSubnetGroup$SubnetGroupStatus": "

    Provides the status of the DB subnet group.

    ", + "DBSubnetGroupMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DeleteDBInstanceMessage$DBInstanceIdentifier": "

    The DB instance identifier for the DB instance to be deleted. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DeleteDBInstanceMessage$FinalDBSnapshotIdentifier": "

    The DBSnapshotIdentifier of the new DBSnapshot created when SkipFinalSnapshot is set to false.

    Specifying this parameter and also setting the SkipFinalShapshot parameter to true results in an error.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    • Cannot be specified when deleting a read replica.
    ", + "DeleteDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB parameter group.

    Constraints:

    • Must be the name of an existing DB parameter group
    • You cannot delete a default DB parameter group
    • Cannot be associated with any DB instances
    ", + "DeleteDBSecurityGroupMessage$DBSecurityGroupName": "

    The name of the DB security group to delete.

    You cannot delete the default DB security group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    • Must not be \"Default\"
    • May not contain spaces
    ", + "DeleteDBSnapshotMessage$DBSnapshotIdentifier": "

    The DBSnapshot identifier.

    Constraints: Must be the name of an existing DB snapshot in the available state.

    ", + "DeleteDBSubnetGroupMessage$DBSubnetGroupName": "

    The name of the database subnet group to delete.

    You cannot delete the default subnet group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DeleteEventSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to delete.

    ", + "DeleteOptionGroupMessage$OptionGroupName": "

    The name of the option group to be deleted.

    You cannot delete default option groups.", + "DescribeDBEngineVersionsMessage$Engine": "

    The database engine to return.

    ", + "DescribeDBEngineVersionsMessage$EngineVersion": "

    The database engine version to return.

    Example: 5.1.49

    ", + "DescribeDBEngineVersionsMessage$DBParameterGroupFamily": "

    The name of a specific DB parameter group family to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBEngineVersionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBInstancesMessage$DBInstanceIdentifier": "

    The user-supplied instance identifier. If this parameter is specified, information from only the specific DB instance is returned. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBInstancesMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBInstances request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DescribeDBLogFilesDetails$LogFileName": "

    The name of the log file for the specified DB instance.

    ", + "DescribeDBLogFilesMessage$DBInstanceIdentifier": "

    The customer-assigned name of the DB instance that contains the log files you want to list.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBLogFilesMessage$FilenameContains": "

    Filters the available log files for log file names that contain the specified string.

    ", + "DescribeDBLogFilesMessage$Marker": "

    The pagination token provided in the previous request. If this parameter is specified the response includes only records beyond the marker, up to MaxRecords.

    ", + "DescribeDBLogFilesResponse$Marker": "

    A pagination token that can be used in a subsequent DescribeDBLogFiles request.

    ", + "DescribeDBParameterGroupsMessage$DBParameterGroupName": "

    The name of a specific DB parameter group to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBParameterGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBParameterGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBParametersMessage$DBParameterGroupName": "

    The name of a specific DB parameter group to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBParametersMessage$Source": "

    The parameter types to return.

    Default: All parameter types returned

    Valid Values: user | system | engine-default

    ", + "DescribeDBParametersMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSecurityGroupsMessage$DBSecurityGroupName": "

    The name of the DB security group to return details for.

    ", + "DescribeDBSecurityGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSecurityGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSnapshotsMessage$DBInstanceIdentifier": "

    A DB instance identifier to retrieve the list of DB snapshots for. Cannot be used in conjunction with DBSnapshotIdentifier. This parameter is not case sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBSnapshotsMessage$DBSnapshotIdentifier": "

    A specific DB snapshot identifier to describe. Cannot be used in conjunction with DBInstanceIdentifier. This value is stored as a lowercase string.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    • If this is the identifier of an automated snapshot, the SnapshotType parameter must also be specified.
    ", + "DescribeDBSnapshotsMessage$SnapshotType": "

    The type of snapshots that will be returned. Values can be \"automated\" or \"manual.\" If not specified, the returned results will include all snapshots types.

    ", + "DescribeDBSnapshotsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSnapshots request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSubnetGroupsMessage$DBSubnetGroupName": "

    The name of the DB subnet group to return details for.

    ", + "DescribeDBSubnetGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSubnetGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEngineDefaultParametersMessage$DBParameterGroupFamily": "

    The name of the DB parameter group family.

    ", + "DescribeEngineDefaultParametersMessage$Marker": "

    An optional pagination token provided by a previous DescribeEngineDefaultParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEventCategoriesMessage$SourceType": "

    The type of source that will be generating the events.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "DescribeEventSubscriptionsMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to describe.

    ", + "DescribeEventSubscriptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DescribeEventsMessage$SourceIdentifier": "

    The identifier of the event source for which events will be returned. If not specified, then all sources are included in the response.

    Constraints:

    • If SourceIdentifier is supplied, SourceType must also be provided.
    • If the source type is DBInstance, then a DBInstanceIdentifier must be supplied.
    • If the source type is DBSecurityGroup, a DBSecurityGroupName must be supplied.
    • If the source type is DBParameterGroup, a DBParameterGroupName must be supplied.
    • If the source type is DBSnapshot, a DBSnapshotIdentifier must be supplied.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    ", + "DescribeEventsMessage$Marker": "

    An optional pagination token provided by a previous DescribeEvents request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupOptionsMessage$EngineName": "

    A required parameter. Options available for the given Engine name will be described.

    ", + "DescribeOptionGroupOptionsMessage$MajorEngineVersion": "

    If specified, filters the results to include only options for the specified major engine version.

    ", + "DescribeOptionGroupOptionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupsMessage$OptionGroupName": "

    The name of the option group to describe. Cannot be supplied together with EngineName or MajorEngineVersion.

    ", + "DescribeOptionGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOptionGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupsMessage$EngineName": "

    Filters the list of option groups to only include groups associated with a specific database engine.

    ", + "DescribeOptionGroupsMessage$MajorEngineVersion": "

    Filters the list of option groups to only include groups associated with a specific database engine version. If specified, then EngineName must also be specified.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Engine": "

    The name of the engine to retrieve DB instance options for.

    ", + "DescribeOrderableDBInstanceOptionsMessage$EngineVersion": "

    The engine version filter value. Specify this parameter to show only the available offerings matching the specified engine version.

    ", + "DescribeOrderableDBInstanceOptionsMessage$DBInstanceClass": "

    The DB instance class filter value. Specify this parameter to show only the available offerings matching the specified DB instance class.

    ", + "DescribeOrderableDBInstanceOptionsMessage$LicenseModel": "

    The license model filter value. Specify this parameter to show only the available offerings matching the specified license model.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DescribeReservedDBInstancesMessage$ReservedDBInstanceId": "

    The reserved DB instance identifier filter value. Specify this parameter to show only the reservation that matches the specified reservation ID.

    ", + "DescribeReservedDBInstancesMessage$ReservedDBInstancesOfferingId": "

    The offering identifier filter value. Specify this parameter to show only purchased reservations matching the specified offering identifier.

    ", + "DescribeReservedDBInstancesMessage$DBInstanceClass": "

    The DB instance class filter value. Specify this parameter to show only those reservations matching the specified DB instances class.

    ", + "DescribeReservedDBInstancesMessage$Duration": "

    The duration filter value, specified in years or seconds. Specify this parameter to show only reservations for this duration.

    Valid Values: 1 | 3 | 31536000 | 94608000

    ", + "DescribeReservedDBInstancesMessage$ProductDescription": "

    The product description filter value. Specify this parameter to show only those reservations matching the specified product description.

    ", + "DescribeReservedDBInstancesMessage$OfferingType": "

    The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type.

    Valid Values: \"Light Utilization\" | \"Medium Utilization\" | \"Heavy Utilization\"

    ", + "DescribeReservedDBInstancesMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeReservedDBInstancesOfferingsMessage$ReservedDBInstancesOfferingId": "

    The offering identifier filter value. Specify this parameter to show only the available offering that matches the specified reservation identifier.

    Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706

    ", + "DescribeReservedDBInstancesOfferingsMessage$DBInstanceClass": "

    The DB instance class filter value. Specify this parameter to show only the available offerings matching the specified DB instance class.

    ", + "DescribeReservedDBInstancesOfferingsMessage$Duration": "

    Duration filter value, specified in years or seconds. Specify this parameter to show only reservations for this duration.

    Valid Values: 1 | 3 | 31536000 | 94608000

    ", + "DescribeReservedDBInstancesOfferingsMessage$ProductDescription": "

    Product description filter value. Specify this parameter to show only the available offerings matching the specified product description.

    ", + "DescribeReservedDBInstancesOfferingsMessage$OfferingType": "

    The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type.

    Valid Values: \"Light Utilization\" | \"Medium Utilization\" | \"Heavy Utilization\"

    ", + "DescribeReservedDBInstancesOfferingsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DownloadDBLogFilePortionDetails$LogFileData": "

    Entries from the specified log file.

    ", + "DownloadDBLogFilePortionDetails$Marker": "

    A pagination token that can be used in a subsequent DownloadDBLogFilePortion request.

    ", + "DownloadDBLogFilePortionMessage$DBInstanceIdentifier": "

    The customer-assigned name of the DB instance that contains the log files you want to list.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DownloadDBLogFilePortionMessage$LogFileName": "

    The name of the log file to be downloaded.

    ", + "DownloadDBLogFilePortionMessage$Marker": "

    The pagination token provided in the previous request or \"0\". If the Marker parameter is specified the response includes only records beyond the marker until the end of the file or up to NumberOfLines.

    ", + "EC2SecurityGroup$Status": "

    Provides the status of the EC2 security group. Status can be \"authorizing\", \"authorized\", \"revoking\", and \"revoked\".

    ", + "EC2SecurityGroup$EC2SecurityGroupName": "

    Specifies the name of the EC2 security group.

    ", + "EC2SecurityGroup$EC2SecurityGroupId": "

    Specifies the id of the EC2 security group.

    ", + "EC2SecurityGroup$EC2SecurityGroupOwnerId": "

    Specifies the AWS ID of the owner of the EC2 security group specified in the EC2SecurityGroupName field.

    ", + "Endpoint$Address": "

    Specifies the DNS address of the DB instance.

    ", + "EngineDefaults$DBParameterGroupFamily": "

    Specifies the name of the DB parameter group family which the engine default parameters apply to.

    ", + "EngineDefaults$Marker": "

    An optional pagination token provided by a previous EngineDefaults request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "Event$SourceIdentifier": "

    Provides the identifier for the source of the event.

    ", + "Event$Message": "

    Provides the text of this event.

    ", + "EventCategoriesList$member": null, + "EventCategoriesMap$SourceType": "

    The source type that the returned categories belong to

    ", + "EventSubscription$CustomerAwsId": "

    The AWS customer account associated with the RDS event notification subscription.

    ", + "EventSubscription$CustSubscriptionId": "

    The RDS event notification subscription Id.

    ", + "EventSubscription$SnsTopicArn": "

    The topic ARN of the RDS event notification subscription.

    ", + "EventSubscription$Status": "

    The status of the RDS event notification subscription.

    Constraints:

    Can be one of the following: creating | modifying | deleting | active | no-permission | topic-not-exist

    The status \"no-permission\" indicates that RDS no longer has permission to post to the SNS topic. The status \"topic-not-exist\" indicates that the topic was deleted after the subscription was created.

    ", + "EventSubscription$SubscriptionCreationTime": "

    The time the RDS event notification subscription was created.

    ", + "EventSubscription$SourceType": "

    The source type for the RDS event notification subscription.

    ", + "EventSubscriptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "EventsMessage$Marker": "

    An optional pagination token provided by a previous Events request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "Filter$Name": "

    This parameter is not currently supported.

    ", + "FilterValueList$member": null, + "IPRange$Status": "

    Specifies the status of the IP range. Status can be \"authorizing\", \"authorized\", \"revoking\", and \"revoked\".

    ", + "IPRange$CIDRIP": "

    Specifies the IP range.

    ", + "KeyList$member": null, + "ListTagsForResourceMessage$ResourceName": "

    The Amazon RDS resource with tags to be listed. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    ", + "ModifyDBInstanceMessage$DBInstanceIdentifier": "

    The DB instance identifier. This value is stored as a lowercase string.

    Constraints:

    • Must be the identifier for an existing DB instance
    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "ModifyDBInstanceMessage$DBInstanceClass": "

    The new compute and memory capacity of the DB instance. To determine the instance classes that are available for a particular DB engine, use the DescribeOrderableDBInstanceOptions action.

    Passing a value for this parameter causes an outage during the change and is applied during the next maintenance window, unless the ApplyImmediately parameter is specified as true for this request.

    Default: Uses existing setting

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge

    ", + "ModifyDBInstanceMessage$MasterUserPassword": "

    The new password for the DB instance master user. Can be any printable ASCII character except \"/\", \"\"\", or \"@\".

    Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response.

    Default: Uses existing setting

    Constraints: Must be 8 to 41 alphanumeric characters (MySQL), 8 to 30 alphanumeric characters (Oracle), or 8 to 128 alphanumeric characters (SQL Server).

    Amazon RDS API actions never return the password, so this action provides a way to regain access to a master instance user if the password is lost. ", + "ModifyDBInstanceMessage$DBParameterGroupName": "

    The name of the DB parameter group to apply to the DB instance. Changing this setting does not result in an outage. The parameter group name itself is changed immediately, but the actual parameter changes are not applied until you reboot the instance without failover. The DB instance will NOT be rebooted automatically and the parameter changes will NOT be applied during the next maintenance window.

    Default: Uses existing setting

    Constraints: The DB parameter group must be in the same DB parameter group family as the DB instance.

    ", + "ModifyDBInstanceMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints:

    • Must be in the format hh24:mi-hh24:mi
    • Times should be Universal Time Coordinated (UTC)
    • Must not conflict with the preferred maintenance window
    • Must be at least 30 minutes
    ", + "ModifyDBInstanceMessage$PreferredMaintenanceWindow": "

    The weekly time range (in UTC) during which system maintenance can occur, which may result in an outage. Changing this parameter does not result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If there are pending actions that cause a reboot, and the maintenance window is changed to include the current time, then changing this parameter will cause a reboot of the DB instance. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied.

    Default: Uses existing setting

    Format: ddd:hh24:mi-ddd:hh24:mi

    Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun

    Constraints: Must be at least 30 minutes

    ", + "ModifyDBInstanceMessage$EngineVersion": "

    The version number of the database engine to upgrade to. Changing this parameter results in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    For major version upgrades, if a non-default DB parameter group is currently in use, a new DB parameter group in the DB parameter group family for the new engine version must be specified. The new DB parameter group can be the default for that DB parameter group family.

    Example: 5.1.42

    ", + "ModifyDBInstanceMessage$OptionGroupName": "

    Indicates that the DB instance should be associated with the specified option group. Changing this parameter does not result in an outage except in the following case and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "ModifyDBInstanceMessage$NewDBInstanceIdentifier": "

    The new DB instance identifier for the DB instance when renaming a DB instance. When you change the DB instance identifier, an instance reboot will occur immediately if you set Apply Immediately to true, or will occur during the next maintenance window if Apply Immediately to false. This value is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "ModifyDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB parameter group.

    Constraints:

    • Must be the name of an existing DB parameter group
    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "ModifyDBSubnetGroupMessage$DBSubnetGroupName": "

    The name for the DB subnet group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters or hyphens. Must not be \"Default\".

    Example: mySubnetgroup

    ", + "ModifyDBSubnetGroupMessage$DBSubnetGroupDescription": "

    The description for the DB subnet group.

    ", + "ModifyEventSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription.

    ", + "ModifyEventSubscriptionMessage$SnsTopicArn": "

    The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.

    ", + "ModifyEventSubscriptionMessage$SourceType": "

    The type of source that will be generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "ModifyOptionGroupMessage$OptionGroupName": "

    The name of the option group to be modified.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "Option$OptionName": "

    The name of the option.

    ", + "Option$OptionDescription": "

    The description of the option.

    ", + "OptionConfiguration$OptionName": "

    The configuration of options to include in a group.

    ", + "OptionGroup$OptionGroupName": "

    Specifies the name of the option group.

    ", + "OptionGroup$OptionGroupDescription": "

    Provides the description of the option group.

    ", + "OptionGroup$EngineName": "

    Engine name that this option group can be applied to.

    ", + "OptionGroup$MajorEngineVersion": "

    Indicates the major engine version associated with this option group.

    ", + "OptionGroup$VpcId": "

    If AllowsVpcAndNonVpcInstanceMemberships is 'false', this field is blank. If AllowsVpcAndNonVpcInstanceMemberships is 'true' and this field is blank, then this option group can be applied to both VPC and non-VPC instances. If this field contains a value, then this option group can only be applied to instances that are in the VPC indicated by this field.

    ", + "OptionGroupMembership$OptionGroupName": "

    The name of the option group that the instance belongs to.

    ", + "OptionGroupMembership$Status": "

    The status of the DB instance's option group membership (e.g. in-sync, pending, pending-maintenance, applying).

    ", + "OptionGroupOption$Name": "

    The name of the option.

    ", + "OptionGroupOption$Description": "

    The description of the option.

    ", + "OptionGroupOption$EngineName": "

    Engine name that this option can be applied to.

    ", + "OptionGroupOption$MajorEngineVersion": "

    Indicates the major engine version that the option is available for.

    ", + "OptionGroupOption$MinimumRequiredMinorEngineVersion": "

    The minimum required engine version for the option to be applied.

    ", + "OptionGroupOptionSetting$SettingName": "

    The name of the option group option.

    ", + "OptionGroupOptionSetting$SettingDescription": "

    The description of the option group option.

    ", + "OptionGroupOptionSetting$DefaultValue": "

    The default value for the option group option.

    ", + "OptionGroupOptionSetting$ApplyType": "

    The DB engine specific parameter type for the option group option.

    ", + "OptionGroupOptionSetting$AllowedValues": "

    Indicates the acceptable values for the option group option.

    ", + "OptionGroupOptionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "OptionGroups$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "OptionNamesList$member": null, + "OptionSetting$Name": "

    The name of the option that has settings that you can set.

    ", + "OptionSetting$Value": "

    The current value of the option setting.

    ", + "OptionSetting$DefaultValue": "

    The default value of the option setting.

    ", + "OptionSetting$Description": "

    The description of the option setting.

    ", + "OptionSetting$ApplyType": "

    The DB engine specific parameter type.

    ", + "OptionSetting$DataType": "

    The data type of the option setting.

    ", + "OptionSetting$AllowedValues": "

    The allowed values of the option setting.

    ", + "OptionsDependedOn$member": null, + "OrderableDBInstanceOption$Engine": "

    The engine type of the orderable DB instance.

    ", + "OrderableDBInstanceOption$EngineVersion": "

    The engine version of the orderable DB instance.

    ", + "OrderableDBInstanceOption$DBInstanceClass": "

    The DB instance Class for the orderable DB instance

    ", + "OrderableDBInstanceOption$LicenseModel": "

    The license model for the orderable DB instance.

    ", + "OrderableDBInstanceOptionsMessage$Marker": "

    An optional pagination token provided by a previous OrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "Parameter$ParameterName": "

    Specifies the name of the parameter.

    ", + "Parameter$ParameterValue": "

    Specifies the value of the parameter.

    ", + "Parameter$Description": "

    Provides a description of the parameter.

    ", + "Parameter$Source": "

    Indicates the source of the parameter value.

    ", + "Parameter$ApplyType": "

    Specifies the engine specific parameters type.

    ", + "Parameter$DataType": "

    Specifies the valid data type for the parameter.

    ", + "Parameter$AllowedValues": "

    Specifies the valid range of values for the parameter.

    ", + "Parameter$MinimumEngineVersion": "

    The earliest engine version to which the parameter can apply.

    ", + "PendingModifiedValues$DBInstanceClass": "

    Contains the new DBInstanceClass for the DB instance that will be applied or is in progress.

    ", + "PendingModifiedValues$MasterUserPassword": "

    Contains the pending or in-progress change of the master credentials for the DB instance.

    ", + "PendingModifiedValues$EngineVersion": "

    Indicates the database engine version.

    ", + "PendingModifiedValues$DBInstanceIdentifier": "

    Contains the new DBInstanceIdentifier for the DB instance that will be applied or is in progress.

    ", + "PromoteReadReplicaMessage$DBInstanceIdentifier": "

    The DB instance identifier. This value is stored as a lowercase string.

    Constraints:

    • Must be the identifier for an existing read replica DB instance
    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: mydbinstance

    ", + "PromoteReadReplicaMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

    Default: A 30-minute window selected at random from an 8-hour block of time per region. See the Amazon RDS User Guide for the time blocks for each region from which the default backup windows are assigned.

    Constraints: Must be in the format hh24:mi-hh24:mi. Times should be Universal Time Coordinated (UTC). Must not conflict with the preferred maintenance window. Must be at least 30 minutes.

    ", + "PurchaseReservedDBInstancesOfferingMessage$ReservedDBInstancesOfferingId": "

    The ID of the Reserved DB instance offering to purchase.

    Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706

    ", + "PurchaseReservedDBInstancesOfferingMessage$ReservedDBInstanceId": "

    Customer-specified identifier to track this reservation.

    Example: myreservationID

    ", + "ReadReplicaDBInstanceIdentifierList$member": null, + "RebootDBInstanceMessage$DBInstanceIdentifier": "

    The DB instance identifier. This parameter is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RecurringCharge$RecurringChargeFrequency": "

    The frequency of the recurring charge.

    ", + "RemoveSourceIdentifierFromSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to remove a source identifier from.

    ", + "RemoveSourceIdentifierFromSubscriptionMessage$SourceIdentifier": "

    The source identifier to be removed from the subscription, such as the DB instance identifier for a DB instance or the name of a security group.

    ", + "RemoveTagsFromResourceMessage$ResourceName": "

    The Amazon RDS resource the tags will be removed from. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    ", + "ReservedDBInstance$ReservedDBInstanceId": "

    The unique identifier for the reservation.

    ", + "ReservedDBInstance$ReservedDBInstancesOfferingId": "

    The offering identifier.

    ", + "ReservedDBInstance$DBInstanceClass": "

    The DB instance class for the reserved DB instance.

    ", + "ReservedDBInstance$CurrencyCode": "

    The currency code for the reserved DB instance.

    ", + "ReservedDBInstance$ProductDescription": "

    The description of the reserved DB instance.

    ", + "ReservedDBInstance$OfferingType": "

    The offering type of this reserved DB instance.

    ", + "ReservedDBInstance$State": "

    The state of the reserved DB instance.

    ", + "ReservedDBInstanceMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "ReservedDBInstancesOffering$ReservedDBInstancesOfferingId": "

    The offering identifier.

    ", + "ReservedDBInstancesOffering$DBInstanceClass": "

    The DB instance class for the reserved DB instance.

    ", + "ReservedDBInstancesOffering$CurrencyCode": "

    The currency code for the reserved DB instance offering.

    ", + "ReservedDBInstancesOffering$ProductDescription": "

    The database engine used by the offering.

    ", + "ReservedDBInstancesOffering$OfferingType": "

    The offering type.

    ", + "ReservedDBInstancesOfferingMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "ResetDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB parameter group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBInstanceIdentifier": "

    Name of the DB instance to create from the DB snapshot. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-snapshot-id

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBSnapshotIdentifier": "

    The identifier for the DB snapshot to restore from.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBInstanceClass": "

    The compute and memory capacity of the Amazon RDS DB instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge

    ", + "RestoreDBInstanceFromDBSnapshotMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in.

    Default: A random, system-chosen Availability Zone.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    Example: us-east-1a

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBSubnetGroupName": "

    The DB subnet group name to use for the new instance.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$LicenseModel": "

    License model information for the restored DB instance.

    Default: Same as source.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBName": "

    The database name for the restored DB instance.

    This parameter doesn't apply to the MySQL engine.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Engine": "

    The database engine to use for the new instance.

    Default: The same as source

    Constraint: Must be compatible with the engine of the source

    Example: oracle-ee

    ", + "RestoreDBInstanceFromDBSnapshotMessage$OptionGroupName": "

    The name of the option group to be used for the restored DB instance.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "RestoreDBInstanceToPointInTimeMessage$SourceDBInstanceIdentifier": "

    The identifier of the source DB instance from which to restore.

    Constraints:

    • Must be the identifier of an existing database instance
    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceToPointInTimeMessage$TargetDBInstanceIdentifier": "

    The name of the new database instance to be created.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceToPointInTimeMessage$DBInstanceClass": "

    The compute and memory capacity of the Amazon RDS DB instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge

    Default: The same DBInstanceClass as the original DB instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in.

    Default: A random, system-chosen Availability Zone.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    Example: us-east-1a

    ", + "RestoreDBInstanceToPointInTimeMessage$DBSubnetGroupName": "

    The DB subnet group name to use for the new instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$LicenseModel": "

    License model information for the restored DB instance.

    Default: Same as source.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "RestoreDBInstanceToPointInTimeMessage$DBName": "

    The database name for the restored DB instance.

    This parameter is not used for the MySQL engine.

    ", + "RestoreDBInstanceToPointInTimeMessage$Engine": "

    The database engine to use for the new instance.

    Default: The same as source

    Constraint: Must be compatible with the engine of the source

    Example: oracle-ee

    ", + "RestoreDBInstanceToPointInTimeMessage$OptionGroupName": "

    The name of the option group to be used for the restored DB instance.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "RevokeDBSecurityGroupIngressMessage$DBSecurityGroupName": "

    The name of the DB security group to revoke ingress from.

    ", + "RevokeDBSecurityGroupIngressMessage$CIDRIP": "

    The IP range to revoke access from. Must be a valid CIDR range. If CIDRIP is specified, EC2SecurityGroupName, EC2SecurityGroupId and EC2SecurityGroupOwnerId cannot be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupName": "

    The name of the EC2 security group to revoke access from. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupId": "

    The id of the EC2 security group to revoke access from. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "

    The AWS Account Number of the owner of the EC2 security group specified in the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "SourceIdsList$member": null, + "Subnet$SubnetIdentifier": "

    Specifies the identifier of the subnet.

    ", + "Subnet$SubnetStatus": "

    Specifies the status of the subnet.

    ", + "SubnetIdentifierList$member": null, + "Tag$Key": "

    A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and cannot be prefixed with \"aws:\" or \"rds:\". The string may only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

    ", + "Tag$Value": "

    A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and cannot be prefixed with \"aws:\" or \"rds:\". The string may only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

    ", + "VpcSecurityGroupIdList$member": null, + "VpcSecurityGroupMembership$VpcSecurityGroupId": "

    The name of the VPC security group.

    ", + "VpcSecurityGroupMembership$Status": "

    The status of the VPC security group.

    " + } + }, + "Subnet": { + "base": "

    This data type is used as a response element in the DescribeDBSubnetGroups action.

    ", + "refs": { + "SubnetList$member": null + } + }, + "SubnetAlreadyInUse": { + "base": "

    The DB subnet is already in use in the Availability Zone.

    ", + "refs": { + } + }, + "SubnetIdentifierList": { + "base": null, + "refs": { + "CreateDBSubnetGroupMessage$SubnetIds": "

    The EC2 Subnet IDs for the DB subnet group.

    ", + "ModifyDBSubnetGroupMessage$SubnetIds": "

    The EC2 subnet IDs for the DB subnet group.

    " + } + }, + "SubnetList": { + "base": null, + "refs": { + "DBSubnetGroup$Subnets": "

    Contains a list of Subnet elements.

    " + } + }, + "SubscriptionAlreadyExistFault": { + "base": "

    The supplied subscription name already exists.

    ", + "refs": { + } + }, + "SubscriptionCategoryNotFoundFault": { + "base": "

    The supplied category does not exist.

    ", + "refs": { + } + }, + "SubscriptionNotFoundFault": { + "base": "

    The subscription name does not exist.

    ", + "refs": { + } + }, + "SupportedCharacterSetsList": { + "base": null, + "refs": { + "DBEngineVersion$SupportedCharacterSets": "

    A list of the character sets supported by this engine for the CharacterSetName parameter of the CreateDBInstance API.

    " + } + }, + "TStamp": { + "base": null, + "refs": { + "DBInstance$InstanceCreateTime": "

    Provides the date and time the DB instance was created.

    ", + "DBInstance$LatestRestorableTime": "

    Specifies the latest time to which a database can be restored with point-in-time restore.

    ", + "DBSnapshot$SnapshotCreateTime": "

    Provides the time (UTC) when the snapshot was taken.

    ", + "DBSnapshot$InstanceCreateTime": "

    Specifies the time (UTC) when the snapshot was taken.

    ", + "DescribeEventsMessage$StartTime": "

    The beginning of the time interval to retrieve events for, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

    Example: 2009-07-08T18:00Z

    ", + "DescribeEventsMessage$EndTime": "

    The end of the time interval for which to retrieve events, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

    Example: 2009-07-08T18:00Z

    ", + "Event$Date": "

    Specifies the date and time of the event.

    ", + "ReservedDBInstance$StartTime": "

    The time the reservation started.

    ", + "RestoreDBInstanceToPointInTimeMessage$RestoreTime": "

    The date and time to restore from.

    Valid Values: Value must be a UTC time

    Constraints:

    • Must be before the latest restorable time for the DB instance
    • Cannot be specified if UseLatestRestorableTime parameter is true

    Example: 2009-09-07T23:45:00Z

    " + } + }, + "Tag": { + "base": "

    Metadata assigned to an Amazon RDS resource consisting of a key-value pair.

    ", + "refs": { + "TagList$member": null + } + }, + "TagList": { + "base": "

    A list of tags.

    ", + "refs": { + "AddTagsToResourceMessage$Tags": "

    The tags to be assigned to the Amazon RDS resource.

    ", + "CopyDBSnapshotMessage$Tags": null, + "CreateDBInstanceMessage$Tags": null, + "CreateDBInstanceReadReplicaMessage$Tags": null, + "CreateDBParameterGroupMessage$Tags": null, + "CreateDBSecurityGroupMessage$Tags": null, + "CreateDBSnapshotMessage$Tags": null, + "CreateDBSubnetGroupMessage$Tags": null, + "CreateEventSubscriptionMessage$Tags": null, + "CreateOptionGroupMessage$Tags": null, + "PurchaseReservedDBInstancesOfferingMessage$Tags": null, + "RestoreDBInstanceFromDBSnapshotMessage$Tags": null, + "RestoreDBInstanceToPointInTimeMessage$Tags": null, + "TagListMessage$TagList": "

    List of tags returned by the ListTagsForResource operation.

    " + } + }, + "TagListMessage": { + "base": "

    ", + "refs": { + } + }, + "VpcSecurityGroupIdList": { + "base": null, + "refs": { + "CreateDBInstanceMessage$VpcSecurityGroupIds": "

    A list of EC2 VPC security groups to associate with this DB instance.

    Default: The default EC2 VPC security group for the DB subnet group's VPC.

    ", + "ModifyDBInstanceMessage$VpcSecurityGroupIds": "

    A list of EC2 VPC security groups to authorize on this DB instance. This change is asynchronously applied as soon as possible.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "OptionConfiguration$VpcSecurityGroupMemberships": "

    A list of VpcSecurityGroupMemebrship name strings used for this option.

    " + } + }, + "VpcSecurityGroupMembership": { + "base": "

    This data type is used as a response element for queries on VPC security group membership.

    ", + "refs": { + "VpcSecurityGroupMembershipList$member": null + } + }, + "VpcSecurityGroupMembershipList": { + "base": null, + "refs": { + "DBInstance$VpcSecurityGroups": "

    Provides List of VPC security group elements that the DB instance belongs to.

    ", + "Option$VpcSecurityGroupMemberships": "

    If the option requires access to a port, then this VPC security group allows access to the port.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,110 @@ +{ + "pagination": { + "DescribeDBEngineVersions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBEngineVersions" + }, + "DescribeDBInstances": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBInstances" + }, + "DescribeDBLogFiles": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DescribeDBLogFiles" + }, + "DescribeDBParameterGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBParameterGroups" + }, + "DescribeDBParameters": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Parameters" + }, + "DescribeDBSecurityGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBSecurityGroups" + }, + "DescribeDBSnapshots": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBSnapshots" + }, + "DescribeDBSubnetGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBSubnetGroups" + }, + "DescribeEngineDefaultParameters": { + "input_token": "Marker", + "output_token": "EngineDefaults.Marker", + "limit_key": "MaxRecords", + "result_key": "EngineDefaults.Parameters" + }, + "DescribeEventSubscriptions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "EventSubscriptionsList" + }, + "DescribeEvents": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Events" + }, + "DescribeOptionGroupOptions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "OptionGroupOptions" + }, + "DescribeOptionGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "OptionGroupsList" + }, + "DescribeOrderableDBInstanceOptions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "OrderableDBInstanceOptions" + }, + "DescribeReservedDBInstances": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ReservedDBInstances" + }, + "DescribeReservedDBInstancesOfferings": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ReservedDBInstancesOfferings" + }, + "DownloadDBLogFilePortion": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "NumberOfLines", + "more_results": "AdditionalDataPending", + "result_key": "LogFileData" + }, + "ListTagsForResource": { + "result_key": "TagList" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/waiters-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/waiters-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/waiters-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2013-09-09/waiters-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,97 @@ +{ + "version": 2, + "waiters": { + "DBInstanceAvailable": { + "delay": 30, + "operation": "DescribeDBInstances", + "maxAttempts": 60, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "deleting", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "failed", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "incompatible-restore", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "incompatible-parameters", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "incompatible-parameters", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "incompatible-restore", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + } + ] + }, + "DBInstanceDeleted": { + "delay": 30, + "operation": "DescribeDBInstances", + "maxAttempts": 60, + "acceptors": [ + { + "expected": "deleted", + "matcher": "pathAll", + "state": "success", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "creating", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "modifying", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "rebooting", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "resetting-master-credentials", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + } + ] + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-09-01/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-09-01/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-09-01/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-09-01/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,3271 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-09-01", + "endpointPrefix":"rds", + "protocol":"query", + "serviceAbbreviation":"Amazon RDS", + "serviceFullName":"Amazon Relational Database Service", + "signatureVersion":"v4", + "xmlNamespace":"http://rds.amazonaws.com/doc/2014-09-01/" + }, + "operations":{ + "AddSourceIdentifierToSubscription":{ + "name":"AddSourceIdentifierToSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddSourceIdentifierToSubscriptionMessage"}, + "output":{ + "shape":"AddSourceIdentifierToSubscriptionResult", + "resultWrapper":"AddSourceIdentifierToSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "AddTagsToResource":{ + "name":"AddTagsToResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsToResourceMessage"}, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "AuthorizeDBSecurityGroupIngress":{ + "name":"AuthorizeDBSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AuthorizeDBSecurityGroupIngressMessage"}, + "output":{ + "shape":"AuthorizeDBSecurityGroupIngressResult", + "resultWrapper":"AuthorizeDBSecurityGroupIngressResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"AuthorizationAlreadyExistsFault"}, + {"shape":"AuthorizationQuotaExceededFault"} + ] + }, + "CopyDBParameterGroup":{ + "name":"CopyDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyDBParameterGroupMessage"}, + "output":{ + "shape":"CopyDBParameterGroupResult", + "resultWrapper":"CopyDBParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBParameterGroupAlreadyExistsFault"}, + {"shape":"DBParameterGroupQuotaExceededFault"} + ] + }, + "CopyDBSnapshot":{ + "name":"CopyDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyDBSnapshotMessage"}, + "output":{ + "shape":"CopyDBSnapshotResult", + "resultWrapper":"CopyDBSnapshotResult" + }, + "errors":[ + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "CopyOptionGroup":{ + "name":"CopyOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyOptionGroupMessage"}, + "output":{ + "shape":"CopyOptionGroupResult", + "resultWrapper":"CopyOptionGroupResult" + }, + "errors":[ + {"shape":"OptionGroupAlreadyExistsFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"OptionGroupQuotaExceededFault"} + ] + }, + "CreateDBInstance":{ + "name":"CreateDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBInstanceMessage"}, + "output":{ + "shape":"CreateDBInstanceResult", + "resultWrapper":"CreateDBInstanceResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"StorageTypeNotSupportedFault"}, + {"shape":"AuthorizationNotFoundFault"} + ] + }, + "CreateDBInstanceReadReplica":{ + "name":"CreateDBInstanceReadReplica", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBInstanceReadReplicaMessage"}, + "output":{ + "shape":"CreateDBInstanceReadReplicaResult", + "resultWrapper":"CreateDBInstanceReadReplicaResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"DBSubnetGroupNotAllowedFault"}, + {"shape":"InvalidDBSubnetGroupFault"}, + {"shape":"StorageTypeNotSupportedFault"} + ] + }, + "CreateDBParameterGroup":{ + "name":"CreateDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBParameterGroupMessage"}, + "output":{ + "shape":"CreateDBParameterGroupResult", + "resultWrapper":"CreateDBParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupQuotaExceededFault"}, + {"shape":"DBParameterGroupAlreadyExistsFault"} + ] + }, + "CreateDBSecurityGroup":{ + "name":"CreateDBSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSecurityGroupMessage"}, + "output":{ + "shape":"CreateDBSecurityGroupResult", + "resultWrapper":"CreateDBSecurityGroupResult" + }, + "errors":[ + {"shape":"DBSecurityGroupAlreadyExistsFault"}, + {"shape":"DBSecurityGroupQuotaExceededFault"}, + {"shape":"DBSecurityGroupNotSupportedFault"} + ] + }, + "CreateDBSnapshot":{ + "name":"CreateDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSnapshotMessage"}, + "output":{ + "shape":"CreateDBSnapshotResult", + "resultWrapper":"CreateDBSnapshotResult" + }, + "errors":[ + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "CreateDBSubnetGroup":{ + "name":"CreateDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSubnetGroupMessage"}, + "output":{ + "shape":"CreateDBSubnetGroupResult", + "resultWrapper":"CreateDBSubnetGroupResult" + }, + "errors":[ + {"shape":"DBSubnetGroupAlreadyExistsFault"}, + {"shape":"DBSubnetGroupQuotaExceededFault"}, + {"shape":"DBSubnetQuotaExceededFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"} + ] + }, + "CreateEventSubscription":{ + "name":"CreateEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateEventSubscriptionMessage"}, + "output":{ + "shape":"CreateEventSubscriptionResult", + "resultWrapper":"CreateEventSubscriptionResult" + }, + "errors":[ + {"shape":"EventSubscriptionQuotaExceededFault"}, + {"shape":"SubscriptionAlreadyExistFault"}, + {"shape":"SNSInvalidTopicFault"}, + {"shape":"SNSNoAuthorizationFault"}, + {"shape":"SNSTopicArnNotFoundFault"}, + {"shape":"SubscriptionCategoryNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "CreateOptionGroup":{ + "name":"CreateOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateOptionGroupMessage"}, + "output":{ + "shape":"CreateOptionGroupResult", + "resultWrapper":"CreateOptionGroupResult" + }, + "errors":[ + {"shape":"OptionGroupAlreadyExistsFault"}, + {"shape":"OptionGroupQuotaExceededFault"} + ] + }, + "DeleteDBInstance":{ + "name":"DeleteDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBInstanceMessage"}, + "output":{ + "shape":"DeleteDBInstanceResult", + "resultWrapper":"DeleteDBInstanceResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "DeleteDBParameterGroup":{ + "name":"DeleteDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBParameterGroupMessage"}, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DeleteDBSecurityGroup":{ + "name":"DeleteDBSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSecurityGroupMessage"}, + "errors":[ + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"DBSecurityGroupNotFoundFault"} + ] + }, + "DeleteDBSnapshot":{ + "name":"DeleteDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSnapshotMessage"}, + "output":{ + "shape":"DeleteDBSnapshotResult", + "resultWrapper":"DeleteDBSnapshotResult" + }, + "errors":[ + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "DeleteDBSubnetGroup":{ + "name":"DeleteDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSubnetGroupMessage"}, + "errors":[ + {"shape":"InvalidDBSubnetGroupStateFault"}, + {"shape":"InvalidDBSubnetStateFault"}, + {"shape":"DBSubnetGroupNotFoundFault"} + ] + }, + "DeleteEventSubscription":{ + "name":"DeleteEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEventSubscriptionMessage"}, + "output":{ + "shape":"DeleteEventSubscriptionResult", + "resultWrapper":"DeleteEventSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"InvalidEventSubscriptionStateFault"} + ] + }, + "DeleteOptionGroup":{ + "name":"DeleteOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteOptionGroupMessage"}, + "errors":[ + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"InvalidOptionGroupStateFault"} + ] + }, + "DescribeDBEngineVersions":{ + "name":"DescribeDBEngineVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBEngineVersionsMessage"}, + "output":{ + "shape":"DBEngineVersionMessage", + "resultWrapper":"DescribeDBEngineVersionsResult" + } + }, + "DescribeDBInstances":{ + "name":"DescribeDBInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBInstancesMessage"}, + "output":{ + "shape":"DBInstanceMessage", + "resultWrapper":"DescribeDBInstancesResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "DescribeDBLogFiles":{ + "name":"DescribeDBLogFiles", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBLogFilesMessage"}, + "output":{ + "shape":"DescribeDBLogFilesResponse", + "resultWrapper":"DescribeDBLogFilesResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "DescribeDBParameterGroups":{ + "name":"DescribeDBParameterGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBParameterGroupsMessage"}, + "output":{ + "shape":"DBParameterGroupsMessage", + "resultWrapper":"DescribeDBParameterGroupsResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DescribeDBParameters":{ + "name":"DescribeDBParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBParametersMessage"}, + "output":{ + "shape":"DBParameterGroupDetails", + "resultWrapper":"DescribeDBParametersResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DescribeDBSecurityGroups":{ + "name":"DescribeDBSecurityGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSecurityGroupsMessage"}, + "output":{ + "shape":"DBSecurityGroupMessage", + "resultWrapper":"DescribeDBSecurityGroupsResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"} + ] + }, + "DescribeDBSnapshots":{ + "name":"DescribeDBSnapshots", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSnapshotsMessage"}, + "output":{ + "shape":"DBSnapshotMessage", + "resultWrapper":"DescribeDBSnapshotsResult" + }, + "errors":[ + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "DescribeDBSubnetGroups":{ + "name":"DescribeDBSubnetGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSubnetGroupsMessage"}, + "output":{ + "shape":"DBSubnetGroupMessage", + "resultWrapper":"DescribeDBSubnetGroupsResult" + }, + "errors":[ + {"shape":"DBSubnetGroupNotFoundFault"} + ] + }, + "DescribeEngineDefaultParameters":{ + "name":"DescribeEngineDefaultParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEngineDefaultParametersMessage"}, + "output":{ + "shape":"DescribeEngineDefaultParametersResult", + "resultWrapper":"DescribeEngineDefaultParametersResult" + } + }, + "DescribeEventCategories":{ + "name":"DescribeEventCategories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventCategoriesMessage"}, + "output":{ + "shape":"EventCategoriesMessage", + "resultWrapper":"DescribeEventCategoriesResult" + } + }, + "DescribeEventSubscriptions":{ + "name":"DescribeEventSubscriptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventSubscriptionsMessage"}, + "output":{ + "shape":"EventSubscriptionsMessage", + "resultWrapper":"DescribeEventSubscriptionsResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"} + ] + }, + "DescribeEvents":{ + "name":"DescribeEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventsMessage"}, + "output":{ + "shape":"EventsMessage", + "resultWrapper":"DescribeEventsResult" + } + }, + "DescribeOptionGroupOptions":{ + "name":"DescribeOptionGroupOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOptionGroupOptionsMessage"}, + "output":{ + "shape":"OptionGroupOptionsMessage", + "resultWrapper":"DescribeOptionGroupOptionsResult" + } + }, + "DescribeOptionGroups":{ + "name":"DescribeOptionGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOptionGroupsMessage"}, + "output":{ + "shape":"OptionGroups", + "resultWrapper":"DescribeOptionGroupsResult" + }, + "errors":[ + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "DescribeOrderableDBInstanceOptions":{ + "name":"DescribeOrderableDBInstanceOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOrderableDBInstanceOptionsMessage"}, + "output":{ + "shape":"OrderableDBInstanceOptionsMessage", + "resultWrapper":"DescribeOrderableDBInstanceOptionsResult" + } + }, + "DescribeReservedDBInstances":{ + "name":"DescribeReservedDBInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedDBInstancesMessage"}, + "output":{ + "shape":"ReservedDBInstanceMessage", + "resultWrapper":"DescribeReservedDBInstancesResult" + }, + "errors":[ + {"shape":"ReservedDBInstanceNotFoundFault"} + ] + }, + "DescribeReservedDBInstancesOfferings":{ + "name":"DescribeReservedDBInstancesOfferings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedDBInstancesOfferingsMessage"}, + "output":{ + "shape":"ReservedDBInstancesOfferingMessage", + "resultWrapper":"DescribeReservedDBInstancesOfferingsResult" + }, + "errors":[ + {"shape":"ReservedDBInstancesOfferingNotFoundFault"} + ] + }, + "DownloadDBLogFilePortion":{ + "name":"DownloadDBLogFilePortion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DownloadDBLogFilePortionMessage"}, + "output":{ + "shape":"DownloadDBLogFilePortionDetails", + "resultWrapper":"DownloadDBLogFilePortionResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBLogFileNotFoundFault"} + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceMessage"}, + "output":{ + "shape":"TagListMessage", + "resultWrapper":"ListTagsForResourceResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "ModifyDBInstance":{ + "name":"ModifyDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBInstanceMessage"}, + "output":{ + "shape":"ModifyDBInstanceResult", + "resultWrapper":"ModifyDBInstanceResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"DBUpgradeDependencyFailureFault"}, + {"shape":"StorageTypeNotSupportedFault"}, + {"shape":"AuthorizationNotFoundFault"} + ] + }, + "ModifyDBParameterGroup":{ + "name":"ModifyDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBParameterGroupMessage"}, + "output":{ + "shape":"DBParameterGroupNameMessage", + "resultWrapper":"ModifyDBParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"InvalidDBParameterGroupStateFault"} + ] + }, + "ModifyDBSubnetGroup":{ + "name":"ModifyDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBSubnetGroupMessage"}, + "output":{ + "shape":"ModifyDBSubnetGroupResult", + "resultWrapper":"ModifyDBSubnetGroupResult" + }, + "errors":[ + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetQuotaExceededFault"}, + {"shape":"SubnetAlreadyInUse"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"} + ] + }, + "ModifyEventSubscription":{ + "name":"ModifyEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyEventSubscriptionMessage"}, + "output":{ + "shape":"ModifyEventSubscriptionResult", + "resultWrapper":"ModifyEventSubscriptionResult" + }, + "errors":[ + {"shape":"EventSubscriptionQuotaExceededFault"}, + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SNSInvalidTopicFault"}, + {"shape":"SNSNoAuthorizationFault"}, + {"shape":"SNSTopicArnNotFoundFault"}, + {"shape":"SubscriptionCategoryNotFoundFault"} + ] + }, + "ModifyOptionGroup":{ + "name":"ModifyOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyOptionGroupMessage"}, + "output":{ + "shape":"ModifyOptionGroupResult", + "resultWrapper":"ModifyOptionGroupResult" + }, + "errors":[ + {"shape":"InvalidOptionGroupStateFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "PromoteReadReplica":{ + "name":"PromoteReadReplica", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PromoteReadReplicaMessage"}, + "output":{ + "shape":"PromoteReadReplicaResult", + "resultWrapper":"PromoteReadReplicaResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "PurchaseReservedDBInstancesOffering":{ + "name":"PurchaseReservedDBInstancesOffering", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PurchaseReservedDBInstancesOfferingMessage"}, + "output":{ + "shape":"PurchaseReservedDBInstancesOfferingResult", + "resultWrapper":"PurchaseReservedDBInstancesOfferingResult" + }, + "errors":[ + {"shape":"ReservedDBInstancesOfferingNotFoundFault"}, + {"shape":"ReservedDBInstanceAlreadyExistsFault"}, + {"shape":"ReservedDBInstanceQuotaExceededFault"} + ] + }, + "RebootDBInstance":{ + "name":"RebootDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootDBInstanceMessage"}, + "output":{ + "shape":"RebootDBInstanceResult", + "resultWrapper":"RebootDBInstanceResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "RemoveSourceIdentifierFromSubscription":{ + "name":"RemoveSourceIdentifierFromSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveSourceIdentifierFromSubscriptionMessage"}, + "output":{ + "shape":"RemoveSourceIdentifierFromSubscriptionResult", + "resultWrapper":"RemoveSourceIdentifierFromSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "RemoveTagsFromResource":{ + "name":"RemoveTagsFromResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsFromResourceMessage"}, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "ResetDBParameterGroup":{ + "name":"ResetDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetDBParameterGroupMessage"}, + "output":{ + "shape":"DBParameterGroupNameMessage", + "resultWrapper":"ResetDBParameterGroupResult" + }, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "RestoreDBInstanceFromDBSnapshot":{ + "name":"RestoreDBInstanceFromDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreDBInstanceFromDBSnapshotMessage"}, + "output":{ + "shape":"RestoreDBInstanceFromDBSnapshotResult", + "resultWrapper":"RestoreDBInstanceFromDBSnapshotResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"StorageTypeNotSupportedFault"}, + {"shape":"AuthorizationNotFoundFault"} + ] + }, + "RestoreDBInstanceToPointInTime":{ + "name":"RestoreDBInstanceToPointInTime", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreDBInstanceToPointInTimeMessage"}, + "output":{ + "shape":"RestoreDBInstanceToPointInTimeResult", + "resultWrapper":"RestoreDBInstanceToPointInTimeResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"PointInTimeRestoreNotEnabledFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"StorageTypeNotSupportedFault"}, + {"shape":"AuthorizationNotFoundFault"} + ] + }, + "RevokeDBSecurityGroupIngress":{ + "name":"RevokeDBSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeDBSecurityGroupIngressMessage"}, + "output":{ + "shape":"RevokeDBSecurityGroupIngressResult", + "resultWrapper":"RevokeDBSecurityGroupIngressResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"AuthorizationNotFoundFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"} + ] + } + }, + "shapes":{ + "AddSourceIdentifierToSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SourceIdentifier" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SourceIdentifier":{"shape":"String"} + } + }, + "AddSourceIdentifierToSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "AddTagsToResourceMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "Tags" + ], + "members":{ + "ResourceName":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "ApplyMethod":{ + "type":"string", + "enum":[ + "immediate", + "pending-reboot" + ] + }, + "AuthorizationAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AuthorizationNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "AuthorizationQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AuthorizeDBSecurityGroupIngressMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "CIDRIP":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "AuthorizeDBSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "AvailabilityZone":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"} + }, + "wrapper":true + }, + "AvailabilityZoneList":{ + "type":"list", + "member":{ + "shape":"AvailabilityZone", + "locationName":"AvailabilityZone" + } + }, + "Boolean":{"type":"boolean"}, + "BooleanOptional":{"type":"boolean"}, + "CharacterSet":{ + "type":"structure", + "members":{ + "CharacterSetName":{"shape":"String"}, + "CharacterSetDescription":{"shape":"String"} + } + }, + "CopyDBParameterGroupMessage":{ + "type":"structure", + "required":[ + "SourceDBParameterGroupIdentifier", + "TargetDBParameterGroupIdentifier", + "TargetDBParameterGroupDescription" + ], + "members":{ + "SourceDBParameterGroupIdentifier":{"shape":"String"}, + "TargetDBParameterGroupIdentifier":{"shape":"String"}, + "TargetDBParameterGroupDescription":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CopyDBParameterGroupResult":{ + "type":"structure", + "members":{ + "DBParameterGroup":{"shape":"DBParameterGroup"} + } + }, + "CopyDBSnapshotMessage":{ + "type":"structure", + "required":[ + "SourceDBSnapshotIdentifier", + "TargetDBSnapshotIdentifier" + ], + "members":{ + "SourceDBSnapshotIdentifier":{"shape":"String"}, + "TargetDBSnapshotIdentifier":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CopyDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "CopyOptionGroupMessage":{ + "type":"structure", + "required":[ + "SourceOptionGroupIdentifier", + "TargetOptionGroupIdentifier", + "TargetOptionGroupDescription" + ], + "members":{ + "SourceOptionGroupIdentifier":{"shape":"String"}, + "TargetOptionGroupIdentifier":{"shape":"String"}, + "TargetOptionGroupDescription":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CopyOptionGroupResult":{ + "type":"structure", + "members":{ + "OptionGroup":{"shape":"OptionGroup"} + } + }, + "CreateDBInstanceMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "AllocatedStorage", + "DBInstanceClass", + "Engine", + "MasterUsername", + "MasterUserPassword" + ], + "members":{ + "DBName":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "DBInstanceClass":{"shape":"String"}, + "Engine":{"shape":"String"}, + "MasterUsername":{"shape":"String"}, + "MasterUserPassword":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "DBParameterGroupName":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "CharacterSetName":{"shape":"String"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "Tags":{"shape":"TagList"}, + "StorageType":{"shape":"String"}, + "TdeCredentialArn":{"shape":"String"}, + "TdeCredentialPassword":{"shape":"String"} + } + }, + "CreateDBInstanceReadReplicaMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "SourceDBInstanceIdentifier" + ], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "SourceDBInstanceIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "Tags":{"shape":"TagList"}, + "DBSubnetGroupName":{"shape":"String"}, + "StorageType":{"shape":"String"} + } + }, + "CreateDBInstanceReadReplicaResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "CreateDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "CreateDBParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBParameterGroupName", + "DBParameterGroupFamily", + "Description" + ], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBParameterGroupResult":{ + "type":"structure", + "members":{ + "DBParameterGroup":{"shape":"DBParameterGroup"} + } + }, + "CreateDBSecurityGroupMessage":{ + "type":"structure", + "required":[ + "DBSecurityGroupName", + "DBSecurityGroupDescription" + ], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "DBSecurityGroupDescription":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBSecurityGroupResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "CreateDBSnapshotMessage":{ + "type":"structure", + "required":[ + "DBSnapshotIdentifier", + "DBInstanceIdentifier" + ], + "members":{ + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "CreateDBSubnetGroupMessage":{ + "type":"structure", + "required":[ + "DBSubnetGroupName", + "DBSubnetGroupDescription", + "SubnetIds" + ], + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBSubnetGroupResult":{ + "type":"structure", + "members":{ + "DBSubnetGroup":{"shape":"DBSubnetGroup"} + } + }, + "CreateEventSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SnsTopicArn" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "SourceIds":{"shape":"SourceIdsList"}, + "Enabled":{"shape":"BooleanOptional"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "CreateOptionGroupMessage":{ + "type":"structure", + "required":[ + "OptionGroupName", + "EngineName", + "MajorEngineVersion", + "OptionGroupDescription" + ], + "members":{ + "OptionGroupName":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "OptionGroupDescription":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateOptionGroupResult":{ + "type":"structure", + "members":{ + "OptionGroup":{"shape":"OptionGroup"} + } + }, + "DBEngineVersion":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "DBEngineDescription":{"shape":"String"}, + "DBEngineVersionDescription":{"shape":"String"}, + "DefaultCharacterSet":{"shape":"CharacterSet"}, + "SupportedCharacterSets":{"shape":"SupportedCharacterSetsList"} + } + }, + "DBEngineVersionList":{ + "type":"list", + "member":{ + "shape":"DBEngineVersion", + "locationName":"DBEngineVersion" + } + }, + "DBEngineVersionMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBEngineVersions":{"shape":"DBEngineVersionList"} + } + }, + "DBInstance":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Engine":{"shape":"String"}, + "DBInstanceStatus":{"shape":"String"}, + "MasterUsername":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Endpoint":{"shape":"Endpoint"}, + "AllocatedStorage":{"shape":"Integer"}, + "InstanceCreateTime":{"shape":"TStamp"}, + "PreferredBackupWindow":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"Integer"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupMembershipList"}, + "VpcSecurityGroups":{"shape":"VpcSecurityGroupMembershipList"}, + "DBParameterGroups":{"shape":"DBParameterGroupStatusList"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroup":{"shape":"DBSubnetGroup"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "PendingModifiedValues":{"shape":"PendingModifiedValues"}, + "LatestRestorableTime":{"shape":"TStamp"}, + "MultiAZ":{"shape":"Boolean"}, + "EngineVersion":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"Boolean"}, + "ReadReplicaSourceDBInstanceIdentifier":{"shape":"String"}, + "ReadReplicaDBInstanceIdentifiers":{"shape":"ReadReplicaDBInstanceIdentifierList"}, + "LicenseModel":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupMemberships":{"shape":"OptionGroupMembershipList"}, + "CharacterSetName":{"shape":"String"}, + "SecondaryAvailabilityZone":{"shape":"String"}, + "PubliclyAccessible":{"shape":"Boolean"}, + "StatusInfos":{"shape":"DBInstanceStatusInfoList"}, + "StorageType":{"shape":"String"}, + "TdeCredentialArn":{"shape":"String"} + }, + "wrapper":true + }, + "DBInstanceAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBInstanceAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBInstanceList":{ + "type":"list", + "member":{ + "shape":"DBInstance", + "locationName":"DBInstance" + } + }, + "DBInstanceMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBInstances":{"shape":"DBInstanceList"} + } + }, + "DBInstanceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBInstanceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBInstanceStatusInfo":{ + "type":"structure", + "members":{ + "StatusType":{"shape":"String"}, + "Normal":{"shape":"Boolean"}, + "Status":{"shape":"String"}, + "Message":{"shape":"String"} + } + }, + "DBInstanceStatusInfoList":{ + "type":"list", + "member":{ + "shape":"DBInstanceStatusInfo", + "locationName":"DBInstanceStatusInfo" + } + }, + "DBLogFileNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBLogFileNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroup":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"} + }, + "wrapper":true + }, + "DBParameterGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupDetails":{ + "type":"structure", + "members":{ + "Parameters":{"shape":"ParametersList"}, + "Marker":{"shape":"String"} + } + }, + "DBParameterGroupList":{ + "type":"list", + "member":{ + "shape":"DBParameterGroup", + "locationName":"DBParameterGroup" + } + }, + "DBParameterGroupNameMessage":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"} + } + }, + "DBParameterGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupStatus":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "ParameterApplyStatus":{"shape":"String"} + } + }, + "DBParameterGroupStatusList":{ + "type":"list", + "member":{ + "shape":"DBParameterGroupStatus", + "locationName":"DBParameterGroup" + } + }, + "DBParameterGroupsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBParameterGroups":{"shape":"DBParameterGroupList"} + } + }, + "DBSecurityGroup":{ + "type":"structure", + "members":{ + "OwnerId":{"shape":"String"}, + "DBSecurityGroupName":{"shape":"String"}, + "DBSecurityGroupDescription":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "EC2SecurityGroups":{"shape":"EC2SecurityGroupList"}, + "IPRanges":{"shape":"IPRangeList"} + }, + "wrapper":true + }, + "DBSecurityGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupMembership":{ + "type":"structure", + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "DBSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"DBSecurityGroupMembership", + "locationName":"DBSecurityGroup" + } + }, + "DBSecurityGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroups"} + } + }, + "DBSecurityGroupNameList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"DBSecurityGroupName" + } + }, + "DBSecurityGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupNotSupportedFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupNotSupported", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"QuotaExceeded.DBSecurityGroup", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroups":{ + "type":"list", + "member":{ + "shape":"DBSecurityGroup", + "locationName":"DBSecurityGroup" + } + }, + "DBSnapshot":{ + "type":"structure", + "members":{ + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"}, + "SnapshotCreateTime":{"shape":"TStamp"}, + "Engine":{"shape":"String"}, + "AllocatedStorage":{"shape":"Integer"}, + "Status":{"shape":"String"}, + "Port":{"shape":"Integer"}, + "AvailabilityZone":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "InstanceCreateTime":{"shape":"TStamp"}, + "MasterUsername":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "SnapshotType":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "PercentProgress":{"shape":"Integer"}, + "SourceRegion":{"shape":"String"}, + "StorageType":{"shape":"String"}, + "TdeCredentialArn":{"shape":"String"} + }, + "wrapper":true + }, + "DBSnapshotAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSnapshotAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSnapshotList":{ + "type":"list", + "member":{ + "shape":"DBSnapshot", + "locationName":"DBSnapshot" + } + }, + "DBSnapshotMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSnapshots":{"shape":"DBSnapshotList"} + } + }, + "DBSnapshotNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSnapshotNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroup":{ + "type":"structure", + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "SubnetGroupStatus":{"shape":"String"}, + "Subnets":{"shape":"SubnetList"} + }, + "wrapper":true + }, + "DBSubnetGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupDoesNotCoverEnoughAZs":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupDoesNotCoverEnoughAZs", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSubnetGroups":{"shape":"DBSubnetGroups"} + } + }, + "DBSubnetGroupNotAllowedFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupNotAllowedFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroups":{ + "type":"list", + "member":{ + "shape":"DBSubnetGroup", + "locationName":"DBSubnetGroup" + } + }, + "DBSubnetQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBUpgradeDependencyFailureFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBUpgradeDependencyFailure", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DeleteDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "SkipFinalSnapshot":{"shape":"Boolean"}, + "FinalDBSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "DeleteDBParameterGroupMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"} + } + }, + "DeleteDBSecurityGroupMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"} + } + }, + "DeleteDBSnapshotMessage":{ + "type":"structure", + "required":["DBSnapshotIdentifier"], + "members":{ + "DBSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "DeleteDBSubnetGroupMessage":{ + "type":"structure", + "required":["DBSubnetGroupName"], + "members":{ + "DBSubnetGroupName":{"shape":"String"} + } + }, + "DeleteEventSubscriptionMessage":{ + "type":"structure", + "required":["SubscriptionName"], + "members":{ + "SubscriptionName":{"shape":"String"} + } + }, + "DeleteEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "DeleteOptionGroupMessage":{ + "type":"structure", + "required":["OptionGroupName"], + "members":{ + "OptionGroupName":{"shape":"String"} + } + }, + "DescribeDBEngineVersionsMessage":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "DefaultOnly":{"shape":"Boolean"}, + "ListSupportedCharacterSets":{"shape":"BooleanOptional"} + } + }, + "DescribeDBInstancesMessage":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBLogFilesDetails":{ + "type":"structure", + "members":{ + "LogFileName":{"shape":"String"}, + "LastWritten":{"shape":"Long"}, + "Size":{"shape":"Long"} + } + }, + "DescribeDBLogFilesList":{ + "type":"list", + "member":{ + "shape":"DescribeDBLogFilesDetails", + "locationName":"DescribeDBLogFilesDetails" + } + }, + "DescribeDBLogFilesMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "FilenameContains":{"shape":"String"}, + "FileLastWritten":{"shape":"Long"}, + "FileSize":{"shape":"Long"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBLogFilesResponse":{ + "type":"structure", + "members":{ + "DescribeDBLogFiles":{"shape":"DescribeDBLogFilesList"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBParameterGroupsMessage":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBParametersMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "Source":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBSecurityGroupsMessage":{ + "type":"structure", + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBSnapshotsMessage":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBSnapshotIdentifier":{"shape":"String"}, + "SnapshotType":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBSubnetGroupsMessage":{ + "type":"structure", + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEngineDefaultParametersMessage":{ + "type":"structure", + "required":["DBParameterGroupFamily"], + "members":{ + "DBParameterGroupFamily":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEngineDefaultParametersResult":{ + "type":"structure", + "members":{ + "EngineDefaults":{"shape":"EngineDefaults"} + } + }, + "DescribeEventCategoriesMessage":{ + "type":"structure", + "members":{ + "SourceType":{"shape":"String"}, + "Filters":{"shape":"FilterList"} + } + }, + "DescribeEventSubscriptionsMessage":{ + "type":"structure", + "members":{ + "SubscriptionName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEventsMessage":{ + "type":"structure", + "members":{ + "SourceIdentifier":{"shape":"String"}, + "SourceType":{"shape":"SourceType"}, + "StartTime":{"shape":"TStamp"}, + "EndTime":{"shape":"TStamp"}, + "Duration":{"shape":"IntegerOptional"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeOptionGroupOptionsMessage":{ + "type":"structure", + "required":["EngineName"], + "members":{ + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeOptionGroupsMessage":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "Marker":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"} + } + }, + "DescribeOrderableDBInstanceOptionsMessage":{ + "type":"structure", + "required":["Engine"], + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "Vpc":{"shape":"BooleanOptional"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReservedDBInstancesMessage":{ + "type":"structure", + "members":{ + "ReservedDBInstanceId":{"shape":"String"}, + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReservedDBInstancesOfferingsMessage":{ + "type":"structure", + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "Double":{"type":"double"}, + "DownloadDBLogFilePortionDetails":{ + "type":"structure", + "members":{ + "LogFileData":{"shape":"String"}, + "Marker":{"shape":"String"}, + "AdditionalDataPending":{"shape":"Boolean"} + } + }, + "DownloadDBLogFilePortionMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "LogFileName" + ], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "LogFileName":{"shape":"String"}, + "Marker":{"shape":"String"}, + "NumberOfLines":{"shape":"Integer"} + } + }, + "EC2SecurityGroup":{ + "type":"structure", + "members":{ + "Status":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "EC2SecurityGroupList":{ + "type":"list", + "member":{ + "shape":"EC2SecurityGroup", + "locationName":"EC2SecurityGroup" + } + }, + "Endpoint":{ + "type":"structure", + "members":{ + "Address":{"shape":"String"}, + "Port":{"shape":"Integer"} + } + }, + "EngineDefaults":{ + "type":"structure", + "members":{ + "DBParameterGroupFamily":{"shape":"String"}, + "Marker":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"} + }, + "wrapper":true + }, + "Event":{ + "type":"structure", + "members":{ + "SourceIdentifier":{"shape":"String"}, + "SourceType":{"shape":"SourceType"}, + "Message":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Date":{"shape":"TStamp"} + } + }, + "EventCategoriesList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"EventCategory" + } + }, + "EventCategoriesMap":{ + "type":"structure", + "members":{ + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"} + }, + "wrapper":true + }, + "EventCategoriesMapList":{ + "type":"list", + "member":{ + "shape":"EventCategoriesMap", + "locationName":"EventCategoriesMap" + } + }, + "EventCategoriesMessage":{ + "type":"structure", + "members":{ + "EventCategoriesMapList":{"shape":"EventCategoriesMapList"} + } + }, + "EventList":{ + "type":"list", + "member":{ + "shape":"Event", + "locationName":"Event" + } + }, + "EventSubscription":{ + "type":"structure", + "members":{ + "CustomerAwsId":{"shape":"String"}, + "CustSubscriptionId":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "Status":{"shape":"String"}, + "SubscriptionCreationTime":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "SourceIdsList":{"shape":"SourceIdsList"}, + "EventCategoriesList":{"shape":"EventCategoriesList"}, + "Enabled":{"shape":"Boolean"} + }, + "wrapper":true + }, + "EventSubscriptionQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"EventSubscriptionQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "EventSubscriptionsList":{ + "type":"list", + "member":{ + "shape":"EventSubscription", + "locationName":"EventSubscription" + } + }, + "EventSubscriptionsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "EventSubscriptionsList":{"shape":"EventSubscriptionsList"} + } + }, + "EventsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "Events":{"shape":"EventList"} + } + }, + "Filter":{ + "type":"structure", + "required":[ + "Name", + "Values" + ], + "members":{ + "Name":{"shape":"String"}, + "Values":{"shape":"FilterValueList"} + } + }, + "FilterList":{ + "type":"list", + "member":{ + "shape":"Filter", + "locationName":"Filter" + } + }, + "FilterValueList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"Value" + } + }, + "IPRange":{ + "type":"structure", + "members":{ + "Status":{"shape":"String"}, + "CIDRIP":{"shape":"String"} + } + }, + "IPRangeList":{ + "type":"list", + "member":{ + "shape":"IPRange", + "locationName":"IPRange" + } + }, + "InstanceQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InstanceQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InsufficientDBInstanceCapacityFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InsufficientDBInstanceCapacity", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Integer":{"type":"integer"}, + "IntegerOptional":{"type":"integer"}, + "InvalidDBInstanceStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBInstanceState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBParameterGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBParameterGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSecurityGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSecurityGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSnapshotStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSnapshotState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetGroupFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSubnetGroupFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSubnetGroupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSubnetStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidEventSubscriptionStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidEventSubscriptionState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidOptionGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidOptionGroupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidRestoreFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidRestoreFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSubnet":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidSubnet", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidVPCNetworkStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidVPCNetworkStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "KeyList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ListTagsForResourceMessage":{ + "type":"structure", + "required":["ResourceName"], + "members":{ + "ResourceName":{"shape":"String"}, + "Filters":{"shape":"FilterList"} + } + }, + "Long":{"type":"long"}, + "ModifyDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "DBInstanceClass":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "ApplyImmediately":{"shape":"Boolean"}, + "MasterUserPassword":{"shape":"String"}, + "DBParameterGroupName":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "AllowMajorVersionUpgrade":{"shape":"Boolean"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "NewDBInstanceIdentifier":{"shape":"String"}, + "StorageType":{"shape":"String"}, + "TdeCredentialArn":{"shape":"String"}, + "TdeCredentialPassword":{"shape":"String"} + } + }, + "ModifyDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "ModifyDBParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBParameterGroupName", + "Parameters" + ], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"} + } + }, + "ModifyDBSubnetGroupMessage":{ + "type":"structure", + "required":[ + "DBSubnetGroupName", + "SubnetIds" + ], + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"} + } + }, + "ModifyDBSubnetGroupResult":{ + "type":"structure", + "members":{ + "DBSubnetGroup":{"shape":"DBSubnetGroup"} + } + }, + "ModifyEventSubscriptionMessage":{ + "type":"structure", + "required":["SubscriptionName"], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Enabled":{"shape":"BooleanOptional"} + } + }, + "ModifyEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "ModifyOptionGroupMessage":{ + "type":"structure", + "required":["OptionGroupName"], + "members":{ + "OptionGroupName":{"shape":"String"}, + "OptionsToInclude":{"shape":"OptionConfigurationList"}, + "OptionsToRemove":{"shape":"OptionNamesList"}, + "ApplyImmediately":{"shape":"Boolean"} + } + }, + "ModifyOptionGroupResult":{ + "type":"structure", + "members":{ + "OptionGroup":{"shape":"OptionGroup"} + } + }, + "Option":{ + "type":"structure", + "members":{ + "OptionName":{"shape":"String"}, + "OptionDescription":{"shape":"String"}, + "Persistent":{"shape":"Boolean"}, + "Permanent":{"shape":"Boolean"}, + "Port":{"shape":"IntegerOptional"}, + "OptionSettings":{"shape":"OptionSettingConfigurationList"}, + "DBSecurityGroupMemberships":{"shape":"DBSecurityGroupMembershipList"}, + "VpcSecurityGroupMemberships":{"shape":"VpcSecurityGroupMembershipList"} + } + }, + "OptionConfiguration":{ + "type":"structure", + "required":["OptionName"], + "members":{ + "OptionName":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "DBSecurityGroupMemberships":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupMemberships":{"shape":"VpcSecurityGroupIdList"}, + "OptionSettings":{"shape":"OptionSettingsList"} + } + }, + "OptionConfigurationList":{ + "type":"list", + "member":{ + "shape":"OptionConfiguration", + "locationName":"OptionConfiguration" + } + }, + "OptionGroup":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "OptionGroupDescription":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "Options":{"shape":"OptionsList"}, + "AllowsVpcAndNonVpcInstanceMemberships":{"shape":"Boolean"}, + "VpcId":{"shape":"String"} + }, + "wrapper":true + }, + "OptionGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "OptionGroupMembership":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "OptionGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"OptionGroupMembership", + "locationName":"OptionGroupMembership" + } + }, + "OptionGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "OptionGroupOption":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Description":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "MinimumRequiredMinorEngineVersion":{"shape":"String"}, + "PortRequired":{"shape":"Boolean"}, + "DefaultPort":{"shape":"IntegerOptional"}, + "OptionsDependedOn":{"shape":"OptionsDependedOn"}, + "Persistent":{"shape":"Boolean"}, + "Permanent":{"shape":"Boolean"}, + "OptionGroupOptionSettings":{"shape":"OptionGroupOptionSettingsList"} + } + }, + "OptionGroupOptionSetting":{ + "type":"structure", + "members":{ + "SettingName":{"shape":"String"}, + "SettingDescription":{"shape":"String"}, + "DefaultValue":{"shape":"String"}, + "ApplyType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"} + } + }, + "OptionGroupOptionSettingsList":{ + "type":"list", + "member":{ + "shape":"OptionGroupOptionSetting", + "locationName":"OptionGroupOptionSetting" + } + }, + "OptionGroupOptionsList":{ + "type":"list", + "member":{ + "shape":"OptionGroupOption", + "locationName":"OptionGroupOption" + } + }, + "OptionGroupOptionsMessage":{ + "type":"structure", + "members":{ + "OptionGroupOptions":{"shape":"OptionGroupOptionsList"}, + "Marker":{"shape":"String"} + } + }, + "OptionGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "OptionGroups":{ + "type":"structure", + "members":{ + "OptionGroupsList":{"shape":"OptionGroupsList"}, + "Marker":{"shape":"String"} + } + }, + "OptionGroupsList":{ + "type":"list", + "member":{ + "shape":"OptionGroup", + "locationName":"OptionGroup" + } + }, + "OptionNamesList":{ + "type":"list", + "member":{"shape":"String"} + }, + "OptionSetting":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Value":{"shape":"String"}, + "DefaultValue":{"shape":"String"}, + "Description":{"shape":"String"}, + "ApplyType":{"shape":"String"}, + "DataType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"}, + "IsCollection":{"shape":"Boolean"} + } + }, + "OptionSettingConfigurationList":{ + "type":"list", + "member":{ + "shape":"OptionSetting", + "locationName":"OptionSetting" + } + }, + "OptionSettingsList":{ + "type":"list", + "member":{ + "shape":"OptionSetting", + "locationName":"OptionSetting" + } + }, + "OptionsDependedOn":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"OptionName" + } + }, + "OptionsList":{ + "type":"list", + "member":{ + "shape":"Option", + "locationName":"Option" + } + }, + "OrderableDBInstanceOption":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "AvailabilityZones":{"shape":"AvailabilityZoneList"}, + "MultiAZCapable":{"shape":"Boolean"}, + "ReadReplicaCapable":{"shape":"Boolean"}, + "Vpc":{"shape":"Boolean"}, + "StorageType":{"shape":"String"}, + "SupportsIops":{"shape":"Boolean"} + }, + "wrapper":true + }, + "OrderableDBInstanceOptionsList":{ + "type":"list", + "member":{ + "shape":"OrderableDBInstanceOption", + "locationName":"OrderableDBInstanceOption" + } + }, + "OrderableDBInstanceOptionsMessage":{ + "type":"structure", + "members":{ + "OrderableDBInstanceOptions":{"shape":"OrderableDBInstanceOptionsList"}, + "Marker":{"shape":"String"} + } + }, + "Parameter":{ + "type":"structure", + "members":{ + "ParameterName":{"shape":"String"}, + "ParameterValue":{"shape":"String"}, + "Description":{"shape":"String"}, + "Source":{"shape":"String"}, + "ApplyType":{"shape":"String"}, + "DataType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"}, + "MinimumEngineVersion":{"shape":"String"}, + "ApplyMethod":{"shape":"ApplyMethod"} + } + }, + "ParametersList":{ + "type":"list", + "member":{ + "shape":"Parameter", + "locationName":"Parameter" + } + }, + "PendingModifiedValues":{ + "type":"structure", + "members":{ + "DBInstanceClass":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "MasterUserPassword":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "DBInstanceIdentifier":{"shape":"String"}, + "StorageType":{"shape":"String"} + } + }, + "PointInTimeRestoreNotEnabledFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"PointInTimeRestoreNotEnabled", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "PromoteReadReplicaMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"} + } + }, + "PromoteReadReplicaResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "ProvisionedIopsNotAvailableInAZFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ProvisionedIopsNotAvailableInAZFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "PurchaseReservedDBInstancesOfferingMessage":{ + "type":"structure", + "required":["ReservedDBInstancesOfferingId"], + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "ReservedDBInstanceId":{"shape":"String"}, + "DBInstanceCount":{"shape":"IntegerOptional"}, + "Tags":{"shape":"TagList"} + } + }, + "PurchaseReservedDBInstancesOfferingResult":{ + "type":"structure", + "members":{ + "ReservedDBInstance":{"shape":"ReservedDBInstance"} + } + }, + "ReadReplicaDBInstanceIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ReadReplicaDBInstanceIdentifier" + } + }, + "RebootDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "ForceFailover":{"shape":"BooleanOptional"} + } + }, + "RebootDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RecurringCharge":{ + "type":"structure", + "members":{ + "RecurringChargeAmount":{"shape":"Double"}, + "RecurringChargeFrequency":{"shape":"String"} + }, + "wrapper":true + }, + "RecurringChargeList":{ + "type":"list", + "member":{ + "shape":"RecurringCharge", + "locationName":"RecurringCharge" + } + }, + "RemoveSourceIdentifierFromSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SourceIdentifier" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SourceIdentifier":{"shape":"String"} + } + }, + "RemoveSourceIdentifierFromSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "RemoveTagsFromResourceMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "TagKeys" + ], + "members":{ + "ResourceName":{"shape":"String"}, + "TagKeys":{"shape":"KeyList"} + } + }, + "ReservedDBInstance":{ + "type":"structure", + "members":{ + "ReservedDBInstanceId":{"shape":"String"}, + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "StartTime":{"shape":"TStamp"}, + "Duration":{"shape":"Integer"}, + "FixedPrice":{"shape":"Double"}, + "UsagePrice":{"shape":"Double"}, + "CurrencyCode":{"shape":"String"}, + "DBInstanceCount":{"shape":"Integer"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"Boolean"}, + "State":{"shape":"String"}, + "RecurringCharges":{"shape":"RecurringChargeList"} + }, + "wrapper":true + }, + "ReservedDBInstanceAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceAlreadyExists", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstanceList":{ + "type":"list", + "member":{ + "shape":"ReservedDBInstance", + "locationName":"ReservedDBInstance" + } + }, + "ReservedDBInstanceMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReservedDBInstances":{"shape":"ReservedDBInstanceList"} + } + }, + "ReservedDBInstanceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstanceQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstancesOffering":{ + "type":"structure", + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"Integer"}, + "FixedPrice":{"shape":"Double"}, + "UsagePrice":{"shape":"Double"}, + "CurrencyCode":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"Boolean"}, + "RecurringCharges":{"shape":"RecurringChargeList"} + }, + "wrapper":true + }, + "ReservedDBInstancesOfferingList":{ + "type":"list", + "member":{ + "shape":"ReservedDBInstancesOffering", + "locationName":"ReservedDBInstancesOffering" + } + }, + "ReservedDBInstancesOfferingMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReservedDBInstancesOfferings":{"shape":"ReservedDBInstancesOfferingList"} + } + }, + "ReservedDBInstancesOfferingNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstancesOfferingNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResetDBParameterGroupMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "ResetAllParameters":{"shape":"Boolean"}, + "Parameters":{"shape":"ParametersList"} + } + }, + "RestoreDBInstanceFromDBSnapshotMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "DBSnapshotIdentifier" + ], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Engine":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "Tags":{"shape":"TagList"}, + "StorageType":{"shape":"String"}, + "TdeCredentialArn":{"shape":"String"}, + "TdeCredentialPassword":{"shape":"String"} + } + }, + "RestoreDBInstanceFromDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RestoreDBInstanceToPointInTimeMessage":{ + "type":"structure", + "required":[ + "SourceDBInstanceIdentifier", + "TargetDBInstanceIdentifier" + ], + "members":{ + "SourceDBInstanceIdentifier":{"shape":"String"}, + "TargetDBInstanceIdentifier":{"shape":"String"}, + "RestoreTime":{"shape":"TStamp"}, + "UseLatestRestorableTime":{"shape":"Boolean"}, + "DBInstanceClass":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Engine":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "Tags":{"shape":"TagList"}, + "StorageType":{"shape":"String"}, + "TdeCredentialArn":{"shape":"String"}, + "TdeCredentialPassword":{"shape":"String"} + } + }, + "RestoreDBInstanceToPointInTimeResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RevokeDBSecurityGroupIngressMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "CIDRIP":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "RevokeDBSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "SNSInvalidTopicFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSInvalidTopic", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SNSNoAuthorizationFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSNoAuthorization", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SNSTopicArnNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSTopicArnNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SnapshotQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SourceIdsList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SourceId" + } + }, + "SourceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SourceType":{ + "type":"string", + "enum":[ + "db-instance", + "db-parameter-group", + "db-security-group", + "db-snapshot" + ] + }, + "StorageQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"StorageQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "StorageTypeNotSupportedFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"StorageTypeNotSupported", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "String":{"type":"string"}, + "Subnet":{ + "type":"structure", + "members":{ + "SubnetIdentifier":{"shape":"String"}, + "SubnetAvailabilityZone":{"shape":"AvailabilityZone"}, + "SubnetStatus":{"shape":"String"} + } + }, + "SubnetAlreadyInUse":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubnetAlreadyInUse", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubnetIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SubnetIdentifier" + } + }, + "SubnetList":{ + "type":"list", + "member":{ + "shape":"Subnet", + "locationName":"Subnet" + } + }, + "SubscriptionAlreadyExistFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionAlreadyExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubscriptionCategoryNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionCategoryNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SubscriptionNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SupportedCharacterSetsList":{ + "type":"list", + "member":{ + "shape":"CharacterSet", + "locationName":"CharacterSet" + } + }, + "TStamp":{"type":"timestamp"}, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"String"}, + "Value":{"shape":"String"} + } + }, + "TagList":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"Tag" + } + }, + "TagListMessage":{ + "type":"structure", + "members":{ + "TagList":{"shape":"TagList"} + } + }, + "VpcSecurityGroupIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpcSecurityGroupId" + } + }, + "VpcSecurityGroupMembership":{ + "type":"structure", + "members":{ + "VpcSecurityGroupId":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "VpcSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"VpcSecurityGroupMembership", + "locationName":"VpcSecurityGroupMembership" + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-09-01/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-09-01/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-09-01/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-09-01/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1932 @@ +{ + "version": "2.0", + "service": "Amazon Relational Database Service

    Amazon Relational Database Service (Amazon RDS) is a web service that makes it easier to set up, operate, and scale a relational database in the cloud. It provides cost-efficient, resizable capacity for an industry-standard relational database and manages common database administration tasks, freeing up developers to focus on what makes their applications and businesses unique.

    Amazon RDS gives you access to the capabilities of a MySQL, PostgreSQL, Microsoft SQL Server, or Oracle database server. This means the code, applications, and tools you already use today with your existing databases work with Amazon RDS without modification. Amazon RDS automatically backs up your database and maintains the database software that powers your DB instance. Amazon RDS is flexible: you can scale your database instance's compute resources and storage capacity to meet your application's demand. As with all Amazon Web Services, there are no up-front investments, and you pay only for the resources you use.

    This is an interface reference for Amazon RDS. It contains documentation for a programming or command line interface you can use to manage Amazon RDS. Note that Amazon RDS is asynchronous, which means that some interfaces may require techniques such as polling or callback functions to determine when a command has been applied. In this reference, the parameter descriptions indicate whether a command is applied immediately, on the next instance reboot, or during the maintenance window. For a summary of the Amazon RDS interfaces, go to Available RDS Interfaces.

    ", + "operations": { + "AddSourceIdentifierToSubscription": "

    Adds a source identifier to an existing RDS event notification subscription.

    ", + "AddTagsToResource": "

    Adds metadata tags to an Amazon RDS resource. These tags can also be used with cost allocation reporting to track cost associated with Amazon RDS resources, or used in Condition statement in IAM policy for Amazon RDS.

    For an overview on tagging Amazon RDS resources, see Tagging Amazon RDS Resources.

    ", + "AuthorizeDBSecurityGroupIngress": "

    Enables ingress to a DBSecurityGroup using one of two forms of authorization. First, EC2 or VPC security groups can be added to the DBSecurityGroup if the application using the database is running on EC2 or VPC instances. Second, IP ranges are available if the application accessing your database is running on the Internet. Required parameters for this API are one of CIDR range, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId for non-VPC).

    You cannot authorize ingress from an EC2 security group in one Region to an Amazon RDS DB instance in another. You cannot authorize ingress from a VPC security group in one VPC to an Amazon RDS DB instance in another.

    For an overview of CIDR ranges, go to the Wikipedia Tutorial.

    ", + "CopyDBParameterGroup": "

    Copies the specified DB parameter group.

    ", + "CopyDBSnapshot": "

    Copies the specified DBSnapshot. The source DBSnapshot must be in the \"available\" state.

    ", + "CopyOptionGroup": "

    Copies the specified option group.

    ", + "CreateDBInstance": "

    Creates a new DB instance.

    ", + "CreateDBInstanceReadReplica": "

    Creates a DB instance that acts as a Read Replica of a source DB instance.

    All Read Replica DB instances are created as Single-AZ deployments with backups disabled. All other DB instance attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance, except as specified below.

    The source DB instance must have backup retention enabled.

    ", + "CreateDBParameterGroup": "

    Creates a new DB parameter group.

    A DB parameter group is initially created with the default parameters for the database engine used by the DB instance. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBParameterGroup. Once you've created a DB parameter group, you need to associate it with your DB instance using ModifyDBInstance. When you associate a new DB parameter group with a running DB instance, you need to reboot the DB instance without failover for the new DB parameter group and associated settings to take effect.

    After you create a DB parameter group, you should wait at least 5 minutes before creating your first DB instance that uses that DB parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the parameter group is used as the default for a new DB instance. This is especially important for parameters that are critical when creating the default database for a DB instance, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBParameters command to verify that your DB parameter group has been created or modified.

    ", + "CreateDBSecurityGroup": "

    Creates a new DB security group. DB security groups control access to a DB instance.

    ", + "CreateDBSnapshot": "

    Creates a DBSnapshot. The source DBInstance must be in \"available\" state.

    ", + "CreateDBSubnetGroup": "

    Creates a new DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the region.

    ", + "CreateEventSubscription": "

    Creates an RDS event notification subscription. This action requires a topic ARN (Amazon Resource Name) created by either the RDS console, the SNS console, or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.

    You can specify the type of source (SourceType) you want to be notified of, provide a list of RDS sources (SourceIds) that triggers the events, and provide a list of event categories (EventCategories) for events you want to be notified of. For example, you can specify SourceType = db-instance, SourceIds = mydbinstance1, mydbinstance2 and EventCategories = Availability, Backup.

    If you specify both the SourceType and SourceIds, such as SourceType = db-instance and SourceIdentifier = myDBInstance1, you will be notified of all the db-instance events for the specified source. If you specify a SourceType but do not specify a SourceIdentifier, you will receive notice of the events for that source type for all your RDS sources. If you do not specify either the SourceType nor the SourceIdentifier, you will be notified of events generated from all RDS sources belonging to your customer account.

    ", + "CreateOptionGroup": "

    Creates a new option group. You can create up to 20 option groups.

    ", + "DeleteDBInstance": "

    The DeleteDBInstance action deletes a previously provisioned DB instance. A successful response from the web service indicates the request was received correctly. When you delete a DB instance, all automated backups for that instance are deleted and cannot be recovered. Manual DB snapshots of the DB instance to be deleted are not deleted.

    If a final DB snapshot is requested the status of the RDS instance will be \"deleting\" until the DB snapshot is created. The API action DescribeDBInstance is used to monitor the status of this operation. The action cannot be canceled or reverted once submitted.

    ", + "DeleteDBParameterGroup": "

    Deletes a specified DBParameterGroup. The DBParameterGroup to be deleted cannot be associated with any DB instances.

    The specified DB parameter group cannot be associated with any DB instances. ", + "DeleteDBSecurityGroup": "

    Deletes a DB security group.

    The specified DB security group must not be associated with any DB instances.", + "DeleteDBSnapshot": "

    Deletes a DBSnapshot. If the snapshot is being copied, the copy operation is terminated.

    The DBSnapshot must be in the available state to be deleted.", + "DeleteDBSubnetGroup": "

    Deletes a DB subnet group.

    The specified database subnet group must not be associated with any DB instances.", + "DeleteEventSubscription": "

    Deletes an RDS event notification subscription.

    ", + "DeleteOptionGroup": "

    Deletes an existing option group.

    ", + "DescribeDBEngineVersions": "

    Returns a list of the available DB engines.

    ", + "DescribeDBInstances": "

    Returns information about provisioned RDS instances. This API supports pagination.

    ", + "DescribeDBLogFiles": "

    Returns a list of DB log files for the DB instance.

    ", + "DescribeDBParameterGroups": "

    Returns a list of DBParameterGroup descriptions. If a DBParameterGroupName is specified, the list will contain only the description of the specified DB parameter group.

    ", + "DescribeDBParameters": "

    Returns the detailed parameter list for a particular DB parameter group.

    ", + "DescribeDBSecurityGroups": "

    Returns a list of DBSecurityGroup descriptions. If a DBSecurityGroupName is specified, the list will contain only the descriptions of the specified DB security group.

    ", + "DescribeDBSnapshots": "

    Returns information about DB snapshots. This API supports pagination.

    ", + "DescribeDBSubnetGroups": "

    Returns a list of DBSubnetGroup descriptions. If a DBSubnetGroupName is specified, the list will contain only the descriptions of the specified DBSubnetGroup.

    For an overview of CIDR ranges, go to the Wikipedia Tutorial.

    ", + "DescribeEngineDefaultParameters": "

    Returns the default engine and system parameter information for the specified database engine.

    ", + "DescribeEventCategories": "

    Displays a list of categories for all event source types, or, if specified, for a specified source type. You can see a list of the event categories and source types in the Events topic in the Amazon RDS User Guide.

    ", + "DescribeEventSubscriptions": "

    Lists all the subscription descriptions for a customer account. The description for a subscription includes SubscriptionName, SNSTopicARN, CustomerID, SourceType, SourceID, CreationTime, and Status.

    If you specify a SubscriptionName, lists the description for that subscription.

    ", + "DescribeEvents": "

    Returns events related to DB instances, DB security groups, DB snapshots, and DB parameter groups for the past 14 days. Events specific to a particular DB instance, DB security group, database snapshot, or DB parameter group can be obtained by providing the name as a parameter. By default, the past hour of events are returned.

    ", + "DescribeOptionGroupOptions": "

    Describes all available options.

    ", + "DescribeOptionGroups": "

    Describes the available option groups.

    ", + "DescribeOrderableDBInstanceOptions": "

    Returns a list of orderable DB instance options for the specified engine.

    ", + "DescribeReservedDBInstances": "

    Returns information about reserved DB instances for this account, or about a specified reserved DB instance.

    ", + "DescribeReservedDBInstancesOfferings": "

    Lists available reserved DB instance offerings.

    ", + "DownloadDBLogFilePortion": "

    Downloads all or a portion of the specified log file.

    ", + "ListTagsForResource": "

    Lists all tags on an Amazon RDS resource.

    For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources.

    ", + "ModifyDBInstance": "

    Modify settings for a DB instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request.

    ", + "ModifyDBParameterGroup": "

    Modifies the parameters of a DB parameter group. To modify more than one parameter, submit a list of the following: ParameterName, ParameterValue, and ApplyMethod. A maximum of 20 parameters can be modified in a single request.

    Changes to dynamic parameters are applied immediately. Changes to static parameters require a reboot without failover to the DB instance associated with the parameter group before the change can take effect.

    After you modify a DB parameter group, you should wait at least 5 minutes before creating your first DB instance that uses that DB parameter group as the default parameter group. This allows Amazon RDS to fully complete the modify action before the parameter group is used as the default for a new DB instance. This is especially important for parameters that are critical when creating the default database for a DB instance, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBParameters command to verify that your DB parameter group has been created or modified.

    ", + "ModifyDBSubnetGroup": "

    Modifies an existing DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the region.

    ", + "ModifyEventSubscription": "

    Modifies an existing RDS event notification subscription. Note that you cannot modify the source identifiers using this call; to change source identifiers for a subscription, use the AddSourceIdentifierToSubscription and RemoveSourceIdentifierFromSubscription calls.

    You can see a list of the event categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    ", + "ModifyOptionGroup": "

    Modifies an existing option group.

    ", + "PromoteReadReplica": "

    Promotes a Read Replica DB instance to a standalone DB instance.

    We recommend that you enable automated backups on your Read Replica before promoting the Read Replica. This ensures that no backup is taken during the promotion process. Once the instance is promoted to a primary instance, backups are taken based on your backup settings.

    ", + "PurchaseReservedDBInstancesOffering": "

    Purchases a reserved DB instance offering.

    ", + "RebootDBInstance": "

    Rebooting a DB instance restarts the database engine service. A reboot also applies to the DB instance any modifications to the associated DB parameter group that were pending. Rebooting a DB instance results in a momentary outage of the instance, during which the DB instance status is set to rebooting. If the RDS instance is configured for MultiAZ, it is possible that the reboot will be conducted through a failover. An Amazon RDS event is created when the reboot is completed.

    If your DB instance is deployed in multiple Availability Zones, you can force a failover from one AZ to the other during the reboot. You might force a failover to test the availability of your DB instance deployment or to restore operations to the original AZ after a failover occurs.

    The time required to reboot is a function of the specific database engine's crash recovery process. To improve the reboot time, we recommend that you reduce database activities as much as possible during the reboot process to reduce rollback activity for in-transit transactions.

    ", + "RemoveSourceIdentifierFromSubscription": "

    Removes a source identifier from an existing RDS event notification subscription.

    ", + "RemoveTagsFromResource": "

    Removes metadata tags from an Amazon RDS resource.

    For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources.

    ", + "ResetDBParameterGroup": "

    Modifies the parameters of a DB parameter group to the engine/system default value. To reset specific parameters submit a list of the following: ParameterName and ApplyMethod. To reset the entire DB parameter group, specify the DBParameterGroup name and ResetAllParameters parameters. When resetting the entire group, dynamic parameters are updated immediately and static parameters are set to pending-reboot to take effect on the next DB instance restart or RebootDBInstance request.

    ", + "RestoreDBInstanceFromDBSnapshot": "

    Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with the same configuration as the original source database, except that the new RDS instance is created with the default security group.

    If your intent is to replace your original DB instance with the new, restored DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot action. RDS does not allow two DB instances with the same name. Once you have renamed your original DB instance with a different identifier, then you can pass the original name of the DB instance as the DBInstanceIdentifier in the call to the RestoreDBInstanceFromDBSnapshot action. The result is that you will replace the original DB instance with the DB instance created from the snapshot.

    ", + "RestoreDBInstanceToPointInTime": "

    Restores a DB instance to an arbitrary point-in-time. Users can restore to any point in time before the LatestRestorableTime for up to BackupRetentionPeriod days. The target database is created from the source database with the same configuration as the original database except that the DB instance is created with the default DB security group.

    ", + "RevokeDBSecurityGroupIngress": "

    Revokes ingress from a DBSecurityGroup for previously authorized IP ranges or EC2 or VPC Security Groups. Required parameters for this API are one of CIDRIP, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId).

    " + }, + "shapes": { + "AddSourceIdentifierToSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "AddSourceIdentifierToSubscriptionResult": { + "base": null, + "refs": { + } + }, + "AddTagsToResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "ApplyMethod": { + "base": null, + "refs": { + "Parameter$ApplyMethod": "

    Indicates when to apply parameter updates.

    " + } + }, + "AuthorizationAlreadyExistsFault": { + "base": "

    The specified CIDRIP or EC2 security group is already authorized for the specified DB security group.

    ", + "refs": { + } + }, + "AuthorizationNotFoundFault": { + "base": "

    Specified CIDRIP or EC2 security group is not authorized for the specified DB security group.

    RDS may not also be authorized via IAM to perform necessary actions on your behalf.

    ", + "refs": { + } + }, + "AuthorizationQuotaExceededFault": { + "base": "

    DB security group authorization quota has been reached.

    ", + "refs": { + } + }, + "AuthorizeDBSecurityGroupIngressMessage": { + "base": "

    ", + "refs": { + } + }, + "AuthorizeDBSecurityGroupIngressResult": { + "base": null, + "refs": { + } + }, + "AvailabilityZone": { + "base": "

    Contains Availability Zone information.

    This data type is used as an element in the following data type:

    ", + "refs": { + "AvailabilityZoneList$member": null, + "Subnet$SubnetAvailabilityZone": null + } + }, + "AvailabilityZoneList": { + "base": null, + "refs": { + "OrderableDBInstanceOption$AvailabilityZones": "

    A list of availability zones for the orderable DB instance.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "DBInstance$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment.

    ", + "DBInstance$AutoMinorVersionUpgrade": "

    Indicates that minor version patches are applied automatically.

    ", + "DBInstance$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "DBInstanceStatusInfo$Normal": "

    Boolean value that is true if the instance is operating normally, or false if the instance is in an error state.

    ", + "DeleteDBInstanceMessage$SkipFinalSnapshot": "

    Determines whether a final DB snapshot is created before the DB instance is deleted. If true is specified, no DBSnapshot is created. If false is specified, a DB snapshot is created before the DB instance is deleted.

    Specify true when deleting a Read Replica.

    The FinalDBSnapshotIdentifier parameter must be specified if SkipFinalSnapshot is false.

    Default: false

    ", + "DescribeDBEngineVersionsMessage$DefaultOnly": "

    Indicates that only the default version of the specified engine or engine and major version combination is returned.

    ", + "DownloadDBLogFilePortionDetails$AdditionalDataPending": "

    Boolean value that if true, indicates there is more data to be downloaded.

    ", + "EventSubscription$Enabled": "

    A Boolean value indicating if the subscription is enabled. True indicates the subscription is enabled.

    ", + "ModifyDBInstanceMessage$ApplyImmediately": "

    Specifies whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB instance.

    If this parameter is set to false, changes to the DB instance are applied during the next maintenance window. Some parameter changes can cause an outage and will be applied on the next call to RebootDBInstance, or the next failure reboot. Review the table of parameters in Modifying a DB Instance and Using the Apply Immediately Parameter to see the impact that setting ApplyImmediately to true or false has for each modified parameter and to determine when the changes will be applied.

    Default: false

    ", + "ModifyDBInstanceMessage$AllowMajorVersionUpgrade": "

    Indicates that major version upgrades are allowed. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints: This parameter must be set to true when specifying a value for the EngineVersion parameter that is a different major version than the DB instance's current version.

    ", + "ModifyOptionGroupMessage$ApplyImmediately": "

    Indicates whether the changes should be applied immediately, or during the next maintenance window for each instance associated with the option group.

    ", + "Option$Persistent": "

    Indicate if this option is persistent.

    ", + "Option$Permanent": "

    Indicate if this option is permanent.

    ", + "OptionGroup$AllowsVpcAndNonVpcInstanceMemberships": "

    Indicates whether this option group can be applied to both VPC and non-VPC instances. The value true indicates the option group can be applied to both VPC and non-VPC instances.

    ", + "OptionGroupOption$PortRequired": "

    Specifies whether the option requires a port.

    ", + "OptionGroupOption$Persistent": "

    A persistent option cannot be removed from the option group once the option group is used, but this option can be removed from the db instance while modifying the related data and assigning another option group without this option.

    ", + "OptionGroupOption$Permanent": "

    A permanent option cannot be removed from the option group once the option group is used, and it cannot be removed from the db instance after assigning an option group with this permanent option.

    ", + "OptionGroupOptionSetting$IsModifiable": "

    Boolean value where true indicates that this option group option can be changed from the default value.

    ", + "OptionSetting$IsModifiable": "

    A Boolean value that, when true, indicates the option setting can be modified from the default.

    ", + "OptionSetting$IsCollection": "

    Indicates if the option setting is part of a collection.

    ", + "OrderableDBInstanceOption$MultiAZCapable": "

    Indicates whether this orderable DB instance is multi-AZ capable.

    ", + "OrderableDBInstanceOption$ReadReplicaCapable": "

    Indicates whether this orderable DB instance can have a Read Replica.

    ", + "OrderableDBInstanceOption$Vpc": "

    Indicates whether this is a VPC orderable DB instance.

    ", + "OrderableDBInstanceOption$SupportsIops": "

    Indicates whether this orderable DB instance supports provisioned IOPS.

    ", + "Parameter$IsModifiable": "

    Indicates whether (true) or not (false) the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.

    ", + "ReservedDBInstance$MultiAZ": "

    Indicates if the reservation applies to Multi-AZ deployments.

    ", + "ReservedDBInstancesOffering$MultiAZ": "

    Indicates if the offering applies to Multi-AZ deployments.

    ", + "ResetDBParameterGroupMessage$ResetAllParameters": "

    Specifies whether (true) or not (false) to reset all parameters in the DB parameter group to default values.

    Default: true

    ", + "RestoreDBInstanceToPointInTimeMessage$UseLatestRestorableTime": "

    Specifies whether (true) or not (false) the DB instance is restored from the latest backup time.

    Default: false

    Constraints: Cannot be specified if RestoreTime parameter is provided.

    " + } + }, + "BooleanOptional": { + "base": null, + "refs": { + "CreateDBInstanceMessage$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment. You cannot set the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "CreateDBInstanceMessage$AutoMinorVersionUpgrade": "

    Indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window.

    Default: true

    ", + "CreateDBInstanceMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "CreateDBInstanceReadReplicaMessage$AutoMinorVersionUpgrade": "

    Indicates that minor engine upgrades will be applied automatically to the Read Replica during the maintenance window.

    Default: Inherits from the source DB instance

    ", + "CreateDBInstanceReadReplicaMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "CreateEventSubscriptionMessage$Enabled": "

    A Boolean value; set to true to activate the subscription, set to false to create the subscription but not active it.

    ", + "DescribeDBEngineVersionsMessage$ListSupportedCharacterSets": "

    If this parameter is specified, and if the requested engine supports the CharacterSetName parameter for CreateDBInstance, the response includes a list of supported character sets for each engine version.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Vpc": "

    The VPC filter value. Specify this parameter to show only the available VPC or non-VPC offerings.

    ", + "DescribeReservedDBInstancesMessage$MultiAZ": "

    The Multi-AZ filter value. Specify this parameter to show only those reservations matching the specified Multi-AZ parameter.

    ", + "DescribeReservedDBInstancesOfferingsMessage$MultiAZ": "

    The Multi-AZ filter value. Specify this parameter to show only the available offerings matching the specified Multi-AZ parameter.

    ", + "ModifyDBInstanceMessage$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    Constraints: Cannot be specified if the DB instance is a Read Replica.

    ", + "ModifyDBInstanceMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB instance during the maintenance window. Changing this parameter does not result in an outage except in the following case and the change is asynchronously applied as soon as possible. An outage will result if this parameter is set to true during the maintenance window, and a newer minor version is available, and RDS has enabled auto patching for that engine version.

    ", + "ModifyEventSubscriptionMessage$Enabled": "

    A Boolean value; set to true to activate the subscription.

    ", + "PendingModifiedValues$MultiAZ": "

    Indicates that the Single-AZ DB instance is to change to a Multi-AZ deployment.

    ", + "RebootDBInstanceMessage$ForceFailover": "

    When true, the reboot will be conducted through a MultiAZ failover.

    Constraint: You cannot specify true if the instance is not configured for MultiAZ.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB instance during the maintenance window.

    ", + "RestoreDBInstanceToPointInTimeMessage$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "RestoreDBInstanceToPointInTimeMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "RestoreDBInstanceToPointInTimeMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB instance during the maintenance window.

    " + } + }, + "CharacterSet": { + "base": "

    This data type is used as a response element in the action DescribeDBEngineVersions.

    ", + "refs": { + "DBEngineVersion$DefaultCharacterSet": "

    The default character set for new instances of this engine version, if the CharacterSetName parameter of the CreateDBInstance API is not specified.

    ", + "SupportedCharacterSetsList$member": null + } + }, + "CopyDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CopyDBParameterGroupResult": { + "base": null, + "refs": { + } + }, + "CopyDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "CopyDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "CopyOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CopyOptionGroupResult": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBInstanceReadReplicaMessage": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceReadReplicaResult": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceResult": { + "base": null, + "refs": { + } + }, + "CreateDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBParameterGroupResult": { + "base": null, + "refs": { + } + }, + "CreateDBSecurityGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSecurityGroupResult": { + "base": null, + "refs": { + } + }, + "CreateDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "CreateDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSubnetGroupResult": { + "base": null, + "refs": { + } + }, + "CreateEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "CreateOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateOptionGroupResult": { + "base": null, + "refs": { + } + }, + "DBEngineVersion": { + "base": "

    This data type is used as a response element in the action DescribeDBEngineVersions.

    ", + "refs": { + "DBEngineVersionList$member": null + } + }, + "DBEngineVersionList": { + "base": null, + "refs": { + "DBEngineVersionMessage$DBEngineVersions": "

    A list of DBEngineVersion elements.

    " + } + }, + "DBEngineVersionMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBEngineVersions action.

    ", + "refs": { + } + }, + "DBInstance": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBInstances action.

    ", + "refs": { + "CreateDBInstanceReadReplicaResult$DBInstance": null, + "CreateDBInstanceResult$DBInstance": null, + "DBInstanceList$member": null, + "DeleteDBInstanceResult$DBInstance": null, + "ModifyDBInstanceResult$DBInstance": null, + "PromoteReadReplicaResult$DBInstance": null, + "RebootDBInstanceResult$DBInstance": null, + "RestoreDBInstanceFromDBSnapshotResult$DBInstance": null, + "RestoreDBInstanceToPointInTimeResult$DBInstance": null + } + }, + "DBInstanceAlreadyExistsFault": { + "base": "

    User already has a DB instance with the given identifier.

    ", + "refs": { + } + }, + "DBInstanceList": { + "base": null, + "refs": { + "DBInstanceMessage$DBInstances": "

    A list of DBInstance instances.

    " + } + }, + "DBInstanceMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBInstances action.

    ", + "refs": { + } + }, + "DBInstanceNotFoundFault": { + "base": "

    DBInstanceIdentifier does not refer to an existing DB instance.

    ", + "refs": { + } + }, + "DBInstanceStatusInfo": { + "base": "

    Provides a list of status information for a DB instance.

    ", + "refs": { + "DBInstanceStatusInfoList$member": null + } + }, + "DBInstanceStatusInfoList": { + "base": null, + "refs": { + "DBInstance$StatusInfos": "

    The status of a Read Replica. If the instance is not a Read Replica, this will be blank.

    " + } + }, + "DBLogFileNotFoundFault": { + "base": "

    LogFileName does not refer to an existing DB log file.

    ", + "refs": { + } + }, + "DBParameterGroup": { + "base": "

    Contains the result of a successful invocation of the CreateDBParameterGroup action.

    This data type is used as a request parameter in the DeleteDBParameterGroup action, and as a response element in the DescribeDBParameterGroups action.

    ", + "refs": { + "CopyDBParameterGroupResult$DBParameterGroup": null, + "CreateDBParameterGroupResult$DBParameterGroup": null, + "DBParameterGroupList$member": null + } + }, + "DBParameterGroupAlreadyExistsFault": { + "base": "

    A DB parameter group with the same name exists.

    ", + "refs": { + } + }, + "DBParameterGroupDetails": { + "base": "

    Contains the result of a successful invocation of the DescribeDBParameters action.

    ", + "refs": { + } + }, + "DBParameterGroupList": { + "base": null, + "refs": { + "DBParameterGroupsMessage$DBParameterGroups": "

    A list of DBParameterGroup instances.

    " + } + }, + "DBParameterGroupNameMessage": { + "base": "

    Contains the result of a successful invocation of the ModifyDBParameterGroup or ResetDBParameterGroup action.

    ", + "refs": { + } + }, + "DBParameterGroupNotFoundFault": { + "base": "

    DBParameterGroupName does not refer to an existing DB parameter group.

    ", + "refs": { + } + }, + "DBParameterGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB parameter groups.

    ", + "refs": { + } + }, + "DBParameterGroupStatus": { + "base": "

    The status of the DB parameter group.

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBParameterGroupStatusList$member": null + } + }, + "DBParameterGroupStatusList": { + "base": null, + "refs": { + "DBInstance$DBParameterGroups": "

    Provides the list of DB parameter groups applied to this DB instance.

    " + } + }, + "DBParameterGroupsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBParameterGroups action.

    ", + "refs": { + } + }, + "DBSecurityGroup": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSecurityGroups action.

    ", + "refs": { + "AuthorizeDBSecurityGroupIngressResult$DBSecurityGroup": null, + "CreateDBSecurityGroupResult$DBSecurityGroup": null, + "DBSecurityGroups$member": null, + "RevokeDBSecurityGroupIngressResult$DBSecurityGroup": null + } + }, + "DBSecurityGroupAlreadyExistsFault": { + "base": "

    A DB security group with the name specified in DBSecurityGroupName already exists.

    ", + "refs": { + } + }, + "DBSecurityGroupMembership": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBSecurityGroupMembershipList$member": null + } + }, + "DBSecurityGroupMembershipList": { + "base": null, + "refs": { + "DBInstance$DBSecurityGroups": "

    Provides List of DB security group elements containing only DBSecurityGroup.Name and DBSecurityGroup.Status subelements.

    ", + "Option$DBSecurityGroupMemberships": "

    If the option requires access to a port, then this DB security group allows access to the port.

    " + } + }, + "DBSecurityGroupMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSecurityGroups action.

    ", + "refs": { + } + }, + "DBSecurityGroupNameList": { + "base": null, + "refs": { + "CreateDBInstanceMessage$DBSecurityGroups": "

    A list of DB security groups to associate with this DB instance.

    Default: The default DB security group for the database engine.

    ", + "ModifyDBInstanceMessage$DBSecurityGroups": "

    A list of DB security groups to authorize on this DB instance. Changing this setting does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "OptionConfiguration$DBSecurityGroupMemberships": "

    A list of DBSecurityGroupMemebrship name strings used for this option.

    " + } + }, + "DBSecurityGroupNotFoundFault": { + "base": "

    DBSecurityGroupName does not refer to an existing DB security group.

    ", + "refs": { + } + }, + "DBSecurityGroupNotSupportedFault": { + "base": "

    A DB security group is not allowed for this action.

    ", + "refs": { + } + }, + "DBSecurityGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB security groups.

    ", + "refs": { + } + }, + "DBSecurityGroups": { + "base": null, + "refs": { + "DBSecurityGroupMessage$DBSecurityGroups": "

    A list of DBSecurityGroup instances.

    " + } + }, + "DBSnapshot": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSnapshots action.

    ", + "refs": { + "CopyDBSnapshotResult$DBSnapshot": null, + "CreateDBSnapshotResult$DBSnapshot": null, + "DBSnapshotList$member": null, + "DeleteDBSnapshotResult$DBSnapshot": null + } + }, + "DBSnapshotAlreadyExistsFault": { + "base": "

    DBSnapshotIdentifier is already used by an existing snapshot.

    ", + "refs": { + } + }, + "DBSnapshotList": { + "base": null, + "refs": { + "DBSnapshotMessage$DBSnapshots": "

    A list of DBSnapshot instances.

    " + } + }, + "DBSnapshotMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSnapshots action.

    ", + "refs": { + } + }, + "DBSnapshotNotFoundFault": { + "base": "

    DBSnapshotIdentifier does not refer to an existing DB snapshot.

    ", + "refs": { + } + }, + "DBSubnetGroup": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSubnetGroups action.

    ", + "refs": { + "CreateDBSubnetGroupResult$DBSubnetGroup": null, + "DBInstance$DBSubnetGroup": "

    Specifies information on the subnet group associated with the DB instance, including the name, description, and subnets in the subnet group.

    ", + "DBSubnetGroups$member": null, + "ModifyDBSubnetGroupResult$DBSubnetGroup": null + } + }, + "DBSubnetGroupAlreadyExistsFault": { + "base": "

    DBSubnetGroupName is already used by an existing DB subnet group.

    ", + "refs": { + } + }, + "DBSubnetGroupDoesNotCoverEnoughAZs": { + "base": "

    Subnets in the DB subnet group should cover at least two Availability Zones unless there is only one Availability Zone.

    ", + "refs": { + } + }, + "DBSubnetGroupMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSubnetGroups action.

    ", + "refs": { + } + }, + "DBSubnetGroupNotAllowedFault": { + "base": "

    Indicates that the DBSubnetGroup should not be specified while creating read replicas that lie in the same region as the source instance.

    ", + "refs": { + } + }, + "DBSubnetGroupNotFoundFault": { + "base": "

    DBSubnetGroupName does not refer to an existing DB subnet group.

    ", + "refs": { + } + }, + "DBSubnetGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB subnet groups.

    ", + "refs": { + } + }, + "DBSubnetGroups": { + "base": null, + "refs": { + "DBSubnetGroupMessage$DBSubnetGroups": "

    A list of DBSubnetGroup instances.

    " + } + }, + "DBSubnetQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of subnets in a DB subnet groups.

    ", + "refs": { + } + }, + "DBUpgradeDependencyFailureFault": { + "base": "

    The DB upgrade failed because a resource the DB depends on could not be modified.

    ", + "refs": { + } + }, + "DeleteDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBInstanceResult": { + "base": null, + "refs": { + } + }, + "DeleteDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSecurityGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "DeleteDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "DeleteOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBEngineVersionsMessage": { + "base": null, + "refs": { + } + }, + "DescribeDBInstancesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBLogFilesDetails": { + "base": "

    This data type is used as a response element to DescribeDBLogFiles.

    ", + "refs": { + "DescribeDBLogFilesList$member": null + } + }, + "DescribeDBLogFilesList": { + "base": null, + "refs": { + "DescribeDBLogFilesResponse$DescribeDBLogFiles": "

    The DB log files returned.

    " + } + }, + "DescribeDBLogFilesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBLogFilesResponse": { + "base": "

    The response from a call to DescribeDBLogFiles.

    ", + "refs": { + } + }, + "DescribeDBParameterGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBParametersMessage": { + "base": null, + "refs": { + } + }, + "DescribeDBSecurityGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBSnapshotsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBSubnetGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEngineDefaultParametersMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEngineDefaultParametersResult": { + "base": null, + "refs": { + } + }, + "DescribeEventCategoriesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEventSubscriptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEventsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOptionGroupOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOptionGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOrderableDBInstanceOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeReservedDBInstancesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeReservedDBInstancesOfferingsMessage": { + "base": "

    ", + "refs": { + } + }, + "Double": { + "base": null, + "refs": { + "RecurringCharge$RecurringChargeAmount": "

    The amount of the recurring charge.

    ", + "ReservedDBInstance$FixedPrice": "

    The fixed price charged for this reserved DB instance.

    ", + "ReservedDBInstance$UsagePrice": "

    The hourly price charged for this reserved DB instance.

    ", + "ReservedDBInstancesOffering$FixedPrice": "

    The fixed price charged for this offering.

    ", + "ReservedDBInstancesOffering$UsagePrice": "

    The hourly price charged for this offering.

    " + } + }, + "DownloadDBLogFilePortionDetails": { + "base": "

    This data type is used as a response element to DownloadDBLogFilePortion.

    ", + "refs": { + } + }, + "DownloadDBLogFilePortionMessage": { + "base": "

    ", + "refs": { + } + }, + "EC2SecurityGroup": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "EC2SecurityGroupList$member": null + } + }, + "EC2SecurityGroupList": { + "base": null, + "refs": { + "DBSecurityGroup$EC2SecurityGroups": "

    Contains a list of EC2SecurityGroup elements.

    " + } + }, + "Endpoint": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBInstance$Endpoint": "

    Specifies the connection endpoint.

    " + } + }, + "EngineDefaults": { + "base": "

    Contains the result of a successful invocation of the DescribeEngineDefaultParameters action.

    ", + "refs": { + "DescribeEngineDefaultParametersResult$EngineDefaults": null + } + }, + "Event": { + "base": "

    This data type is used as a response element in the DescribeEvents action.

    ", + "refs": { + "EventList$member": null + } + }, + "EventCategoriesList": { + "base": null, + "refs": { + "CreateEventSubscriptionMessage$EventCategories": "

    A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    ", + "DescribeEventsMessage$EventCategories": "

    A list of event categories that trigger notifications for a event notification subscription.

    ", + "Event$EventCategories": "

    Specifies the category for the event.

    ", + "EventCategoriesMap$EventCategories": "

    The event categories for the specified source type

    ", + "EventSubscription$EventCategoriesList": "

    A list of event categories for the RDS event notification subscription.

    ", + "ModifyEventSubscriptionMessage$EventCategories": "

    A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    " + } + }, + "EventCategoriesMap": { + "base": "

    Contains the results of a successful invocation of the DescribeEventCategories action.

    ", + "refs": { + "EventCategoriesMapList$member": null + } + }, + "EventCategoriesMapList": { + "base": null, + "refs": { + "EventCategoriesMessage$EventCategoriesMapList": "

    A list of EventCategoriesMap data types.

    " + } + }, + "EventCategoriesMessage": { + "base": "

    Data returned from the DescribeEventCategories action.

    ", + "refs": { + } + }, + "EventList": { + "base": null, + "refs": { + "EventsMessage$Events": "

    A list of Event instances.

    " + } + }, + "EventSubscription": { + "base": "

    Contains the results of a successful invocation of the DescribeEventSubscriptions action.

    ", + "refs": { + "AddSourceIdentifierToSubscriptionResult$EventSubscription": null, + "CreateEventSubscriptionResult$EventSubscription": null, + "DeleteEventSubscriptionResult$EventSubscription": null, + "EventSubscriptionsList$member": null, + "ModifyEventSubscriptionResult$EventSubscription": null, + "RemoveSourceIdentifierFromSubscriptionResult$EventSubscription": null + } + }, + "EventSubscriptionQuotaExceededFault": { + "base": "

    You have reached the maximum number of event subscriptions.

    ", + "refs": { + } + }, + "EventSubscriptionsList": { + "base": null, + "refs": { + "EventSubscriptionsMessage$EventSubscriptionsList": "

    A list of EventSubscriptions data types.

    " + } + }, + "EventSubscriptionsMessage": { + "base": "

    Data returned by the DescribeEventSubscriptions action.

    ", + "refs": { + } + }, + "EventsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeEvents action.

    ", + "refs": { + } + }, + "Filter": { + "base": null, + "refs": { + "FilterList$member": null + } + }, + "FilterList": { + "base": null, + "refs": { + "DescribeDBEngineVersionsMessage$Filters": "

    Not currently supported.

    ", + "DescribeDBInstancesMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBLogFilesMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBParameterGroupsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBParametersMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBSecurityGroupsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBSnapshotsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBSubnetGroupsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeEngineDefaultParametersMessage$Filters": "

    Not currently supported.

    ", + "DescribeEventCategoriesMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeEventSubscriptionsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeEventsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeOptionGroupOptionsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeOptionGroupsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeReservedDBInstancesMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeReservedDBInstancesOfferingsMessage$Filters": "

    This parameter is not currently supported.

    ", + "ListTagsForResourceMessage$Filters": "

    This parameter is not currently supported.

    " + } + }, + "FilterValueList": { + "base": null, + "refs": { + "Filter$Values": "

    This parameter is not currently supported.

    " + } + }, + "IPRange": { + "base": "

    This data type is used as a response element in the DescribeDBSecurityGroups action.

    ", + "refs": { + "IPRangeList$member": null + } + }, + "IPRangeList": { + "base": null, + "refs": { + "DBSecurityGroup$IPRanges": "

    Contains a list of IPRange elements.

    " + } + }, + "InstanceQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB instances.

    ", + "refs": { + } + }, + "InsufficientDBInstanceCapacityFault": { + "base": "

    Specified DB instance class is not available in the specified Availability Zone.

    ", + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "DBInstance$AllocatedStorage": "

    Specifies the allocated storage size specified in gigabytes.

    ", + "DBInstance$BackupRetentionPeriod": "

    Specifies the number of days for which automatic DB snapshots are retained.

    ", + "DBSnapshot$AllocatedStorage": "

    Specifies the allocated storage size in gigabytes (GB).

    ", + "DBSnapshot$Port": "

    Specifies the port that the database engine was listening on at the time of the snapshot.

    ", + "DBSnapshot$PercentProgress": "

    The percentage of the estimated data that has been transferred.

    ", + "DownloadDBLogFilePortionMessage$NumberOfLines": "

    The number of lines to download.

    If the NumberOfLines parameter is specified, then the block of lines returned can be from the beginning or the end of the log file, depending on the value of the Marker parameter.

    • If neither Marker or NumberOfLines are specified, the entire log file is returned.

    • If NumberOfLines is specified and Marker is not specified, then the most recent lines from the end of the log file are returned.

    • If Marker is specified as \"0\", then the specified number of lines from the beginning of the log file are returned.

    • You can download the log file in blocks of lines by specifying the size of the block using the NumberOfLines parameter, and by specifying a value of \"0\" for the Marker parameter in your first request. Include the Marker value returned in the response as the Marker value for the next request, continuing until the AdditionalDataPending response element returns false.

    ", + "Endpoint$Port": "

    Specifies the port that the database engine is listening on.

    ", + "ReservedDBInstance$Duration": "

    The duration of the reservation in seconds.

    ", + "ReservedDBInstance$DBInstanceCount": "

    The number of reserved DB instances.

    ", + "ReservedDBInstancesOffering$Duration": "

    The duration of the offering in seconds.

    " + } + }, + "IntegerOptional": { + "base": null, + "refs": { + "CreateDBInstanceMessage$AllocatedStorage": "

    The amount of storage (in gigabytes) to be initially allocated for the database instance.

    Type: Integer

    MySQL

    Constraints: Must be an integer from 5 to 3072.

    PostgreSQL

    Constraints: Must be an integer from 5 to 3072.

    Oracle

    Constraints: Must be an integer from 10 to 3072.

    SQL Server

    Constraints: Must be an integer from 200 to 1024 (Standard Edition and Enterprise Edition) or from 20 to 1024 (Express Edition and Web Edition)

    ", + "CreateDBInstanceMessage$BackupRetentionPeriod": "

    The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Default: 1

    Constraints:

    • Must be a value from 0 to 35
    • Cannot be set to 0 if the DB instance is a source to Read Replicas
    ", + "CreateDBInstanceMessage$Port": "

    The port number on which the database accepts connections.

    MySQL

    Default: 3306

    Valid Values: 1150-65535

    Type: Integer

    PostgreSQL

    Default: 5432

    Valid Values: 1150-65535

    Type: Integer

    Oracle

    Default: 1521

    Valid Values: 1150-65535

    SQL Server

    Default: 1433

    Valid Values: 1150-65535 except for 1434, 3389, 47001, 49152, and 49152 through 49156.

    ", + "CreateDBInstanceMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.

    Constraints: To use PIOPS, this value must be an integer greater than 1000.

    ", + "CreateDBInstanceReadReplicaMessage$Port": "

    The port number that the DB instance uses for connections.

    Default: Inherits from the source DB instance

    Valid Values: 1150-65535

    ", + "CreateDBInstanceReadReplicaMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.

    ", + "DBInstance$Iops": "

    Specifies the Provisioned IOPS (I/O operations per second) value.

    ", + "DBSnapshot$Iops": "

    Specifies the Provisioned IOPS (I/O operations per second) value of the DB instance at the time of the snapshot.

    ", + "DescribeDBEngineVersionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBInstancesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBLogFilesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    ", + "DescribeDBParameterGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBParametersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBSecurityGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBSnapshotsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeDBSubnetGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeEngineDefaultParametersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeEventSubscriptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeEventsMessage$Duration": "

    The number of minutes to retrieve events for.

    Default: 60

    ", + "DescribeEventsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results may be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeOptionGroupOptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeOptionGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeOrderableDBInstanceOptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeReservedDBInstancesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "DescribeReservedDBInstancesOfferingsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: minimum 20, maximum 100

    ", + "ModifyDBInstanceMessage$AllocatedStorage": "

    The new storage capacity of the RDS instance. Changing this setting does not result in an outage and the change is applied during the next maintenance window unless ApplyImmediately is set to true for this request.

    MySQL

    Default: Uses existing setting

    Valid Values: 5-3072

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    Type: Integer

    PostgreSQL

    Default: Uses existing setting

    Valid Values: 5-3072

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    Type: Integer

    Oracle

    Default: Uses existing setting

    Valid Values: 10-3072

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    SQL Server

    Cannot be modified.

    If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance will be available for use, but may experience performance degradation. While the migration takes place, nightly backups for the instance will be suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance.

    ", + "ModifyDBInstanceMessage$BackupRetentionPeriod": "

    The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Changing this parameter can result in an outage if you change from 0 to a non-zero value or from a non-zero value to 0. These changes are applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously applied as soon as possible.

    Default: Uses existing setting

    Constraints:

    • Must be a value from 0 to 35
    • Can be specified for a MySQL Read Replica only if the source is running MySQL 5.6
    • Can be specified for a PostgreSQL Read Replica only if the source is running PostgreSQL 9.3.5
    • Cannot be set to 0 if the DB instance is a source to Read Replicas
    ", + "ModifyDBInstanceMessage$Iops": "

    The new Provisioned IOPS (I/O operations per second) value for the RDS instance. Changing this setting does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    Default: Uses existing setting

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. If you are migrating from Provisioned IOPS to standard storage, set this value to 0. The DB instance will require a reboot for the change in storage type to take effect.

    SQL Server

    Setting the IOPS value for the SQL Server database engine is not supported.

    Type: Integer

    If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance will be available for use, but may experience performance degradation. While the migration takes place, nightly backups for the instance will be suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance.

    ", + "Option$Port": "

    If required, the port configured for this option to use.

    ", + "OptionConfiguration$Port": "

    The optional port for the option.

    ", + "OptionGroupOption$DefaultPort": "

    If the option requires a port, specifies the default port for the option.

    ", + "PendingModifiedValues$AllocatedStorage": "

    Contains the new AllocatedStorage size for the DB instance that will be applied or is in progress.

    ", + "PendingModifiedValues$Port": "

    Specifies the pending port for the DB instance.

    ", + "PendingModifiedValues$BackupRetentionPeriod": "

    Specifies the pending number of days for which automated backups are retained.

    ", + "PendingModifiedValues$Iops": "

    Specifies the new Provisioned IOPS value for the DB instance that will be applied or is being applied.

    ", + "PromoteReadReplicaMessage$BackupRetentionPeriod": "

    The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Default: 1

    Constraints:

    • Must be a value from 0 to 8
    ", + "PurchaseReservedDBInstancesOfferingMessage$DBInstanceCount": "

    The number of instances to reserve.

    Default: 1

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Port": "

    The port number on which the database accepts connections.

    Default: The same port as the original DB instance

    Constraints: Value must be 1150-65535

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Iops": "

    Specifies the amount of provisioned IOPS for the DB instance, expressed in I/O operations per second. If this parameter is not specified, the IOPS value will be taken from the backup. If this parameter is set to 0, the new instance will be converted to a non-PIOPS instance, which will take additional time, though your DB instance will be available for connections before the conversion starts.

    Constraints: Must be an integer greater than 1000.

    SQL Server

    Setting the IOPS value for the SQL Server database engine is not supported.

    ", + "RestoreDBInstanceToPointInTimeMessage$Port": "

    The port number on which the database accepts connections.

    Constraints: Value must be 1150-65535

    Default: The same port as the original DB instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.

    Constraints: Must be an integer greater than 1000.

    SQL Server

    Setting the IOPS value for the SQL Server database engine is not supported.

    " + } + }, + "InvalidDBInstanceStateFault": { + "base": "

    The specified DB instance is not in the available state.

    ", + "refs": { + } + }, + "InvalidDBParameterGroupStateFault": { + "base": "

    The DB parameter group cannot be deleted because it is in use.

    ", + "refs": { + } + }, + "InvalidDBSecurityGroupStateFault": { + "base": "

    The state of the DB security group does not allow deletion.

    ", + "refs": { + } + }, + "InvalidDBSnapshotStateFault": { + "base": "

    The state of the DB snapshot does not allow deletion.

    ", + "refs": { + } + }, + "InvalidDBSubnetGroupFault": { + "base": "

    Indicates the DBSubnetGroup does not belong to the same VPC as that of an existing cross region read replica of the same source instance.

    ", + "refs": { + } + }, + "InvalidDBSubnetGroupStateFault": { + "base": "

    The DB subnet group cannot be deleted because it is in use.

    ", + "refs": { + } + }, + "InvalidDBSubnetStateFault": { + "base": "

    The DB subnet is not in the available state.

    ", + "refs": { + } + }, + "InvalidEventSubscriptionStateFault": { + "base": "

    This error can occur if someone else is modifying a subscription. You should retry the action.

    ", + "refs": { + } + }, + "InvalidOptionGroupStateFault": { + "base": "

    The option group is not in the available state.

    ", + "refs": { + } + }, + "InvalidRestoreFault": { + "base": "

    Cannot restore from vpc backup to non-vpc DB instance.

    ", + "refs": { + } + }, + "InvalidSubnet": { + "base": "

    The requested subnet is invalid, or multiple subnets were requested that are not all in a common VPC.

    ", + "refs": { + } + }, + "InvalidVPCNetworkStateFault": { + "base": "

    DB subnet group does not cover all Availability Zones after it is created because users' change.

    ", + "refs": { + } + }, + "KeyList": { + "base": null, + "refs": { + "RemoveTagsFromResourceMessage$TagKeys": "

    The tag key (name) of the tag to be removed.

    " + } + }, + "ListTagsForResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "Long": { + "base": null, + "refs": { + "DescribeDBLogFilesDetails$LastWritten": "

    A POSIX timestamp when the last log entry was written.

    ", + "DescribeDBLogFilesDetails$Size": "

    The size, in bytes, of the log file for the specified DB instance.

    ", + "DescribeDBLogFilesMessage$FileLastWritten": "

    Filters the available log files for files written since the specified date, in POSIX timestamp format.

    ", + "DescribeDBLogFilesMessage$FileSize": "

    Filters the available log files for files larger than the specified size.

    " + } + }, + "ModifyDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBInstanceResult": { + "base": null, + "refs": { + } + }, + "ModifyDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBSubnetGroupResult": { + "base": null, + "refs": { + } + }, + "ModifyEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "ModifyOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyOptionGroupResult": { + "base": null, + "refs": { + } + }, + "Option": { + "base": "

    Option details.

    ", + "refs": { + "OptionsList$member": null + } + }, + "OptionConfiguration": { + "base": "

    A list of all available options

    ", + "refs": { + "OptionConfigurationList$member": null + } + }, + "OptionConfigurationList": { + "base": null, + "refs": { + "ModifyOptionGroupMessage$OptionsToInclude": "

    Options in this list are added to the option group or, if already present, the specified configuration is used to update the existing configuration.

    " + } + }, + "OptionGroup": { + "base": "

    ", + "refs": { + "CopyOptionGroupResult$OptionGroup": null, + "CreateOptionGroupResult$OptionGroup": null, + "ModifyOptionGroupResult$OptionGroup": null, + "OptionGroupsList$member": null + } + }, + "OptionGroupAlreadyExistsFault": { + "base": "

    The option group you are trying to create already exists.

    ", + "refs": { + } + }, + "OptionGroupMembership": { + "base": "

    Provides information on the option groups the DB instance is a member of.

    ", + "refs": { + "OptionGroupMembershipList$member": null + } + }, + "OptionGroupMembershipList": { + "base": null, + "refs": { + "DBInstance$OptionGroupMemberships": "

    Provides the list of option group memberships for this DB instance.

    " + } + }, + "OptionGroupNotFoundFault": { + "base": "

    The specified option group could not be found.

    ", + "refs": { + } + }, + "OptionGroupOption": { + "base": "

    Available option.

    ", + "refs": { + "OptionGroupOptionsList$member": null + } + }, + "OptionGroupOptionSetting": { + "base": "

    Option group option settings are used to display settings available for each option with their default values and other information. These values are used with the DescribeOptionGroupOptions action.

    ", + "refs": { + "OptionGroupOptionSettingsList$member": null + } + }, + "OptionGroupOptionSettingsList": { + "base": null, + "refs": { + "OptionGroupOption$OptionGroupOptionSettings": "

    Specifies the option settings that are available (and the default value) for each option in an option group.

    " + } + }, + "OptionGroupOptionsList": { + "base": "

    List of available option group options.

    ", + "refs": { + "OptionGroupOptionsMessage$OptionGroupOptions": null + } + }, + "OptionGroupOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "OptionGroupQuotaExceededFault": { + "base": "

    The quota of 20 option groups was exceeded for this AWS account.

    ", + "refs": { + } + }, + "OptionGroups": { + "base": "

    List of option groups.

    ", + "refs": { + } + }, + "OptionGroupsList": { + "base": null, + "refs": { + "OptionGroups$OptionGroupsList": "

    List of option groups.

    " + } + }, + "OptionNamesList": { + "base": null, + "refs": { + "ModifyOptionGroupMessage$OptionsToRemove": "

    Options in this list are removed from the option group.

    " + } + }, + "OptionSetting": { + "base": "

    Option settings are the actual settings being applied or configured for that option. It is used when you modify an option group or describe option groups. For example, the NATIVE_NETWORK_ENCRYPTION option has a setting called SQLNET.ENCRYPTION_SERVER that can have several different values.

    ", + "refs": { + "OptionSettingConfigurationList$member": null, + "OptionSettingsList$member": null + } + }, + "OptionSettingConfigurationList": { + "base": null, + "refs": { + "Option$OptionSettings": "

    The option settings for this option.

    " + } + }, + "OptionSettingsList": { + "base": null, + "refs": { + "OptionConfiguration$OptionSettings": "

    The option settings to include in an option group.

    " + } + }, + "OptionsDependedOn": { + "base": null, + "refs": { + "OptionGroupOption$OptionsDependedOn": "

    List of all options that are prerequisites for this option.

    " + } + }, + "OptionsList": { + "base": null, + "refs": { + "OptionGroup$Options": "

    Indicates what options are available in the option group.

    " + } + }, + "OrderableDBInstanceOption": { + "base": "

    Contains a list of available options for a DB instance

    This data type is used as a response element in the DescribeOrderableDBInstanceOptions action.

    ", + "refs": { + "OrderableDBInstanceOptionsList$member": null + } + }, + "OrderableDBInstanceOptionsList": { + "base": null, + "refs": { + "OrderableDBInstanceOptionsMessage$OrderableDBInstanceOptions": "

    An OrderableDBInstanceOption structure containing information about orderable options for the DB instance.

    " + } + }, + "OrderableDBInstanceOptionsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeOrderableDBInstanceOptions action.

    ", + "refs": { + } + }, + "Parameter": { + "base": "

    This data type is used as a request parameter in the ModifyDBParameterGroup and ResetDBParameterGroup actions.

    This data type is used as a response element in the DescribeEngineDefaultParameters and DescribeDBParameters actions.

    ", + "refs": { + "ParametersList$member": null + } + }, + "ParametersList": { + "base": null, + "refs": { + "DBParameterGroupDetails$Parameters": "

    A list of Parameter values.

    ", + "EngineDefaults$Parameters": "

    Contains a list of engine default parameters.

    ", + "ModifyDBParameterGroupMessage$Parameters": "

    An array of parameter names, values, and the apply method for the parameter update. At least one parameter name, value, and apply method must be supplied; subsequent arguments are optional. A maximum of 20 parameters may be modified in a single request.

    Valid Values (for the application method): immediate | pending-reboot

    You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when you reboot the DB instance without failover. ", + "ResetDBParameterGroupMessage$Parameters": "

    An array of parameter names, values, and the apply method for the parameter update. At least one parameter name, value, and apply method must be supplied; subsequent arguments are optional. A maximum of 20 parameters may be modified in a single request.

    MySQL

    Valid Values (for Apply method): immediate | pending-reboot

    You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when DB instance reboots.

    Oracle

    Valid Values (for Apply method): pending-reboot

    " + } + }, + "PendingModifiedValues": { + "base": "

    This data type is used as a response element in the ModifyDBInstance action.

    ", + "refs": { + "DBInstance$PendingModifiedValues": "

    Specifies that changes to the DB instance are pending. This element is only included when changes are pending. Specific changes are identified by subelements.

    " + } + }, + "PointInTimeRestoreNotEnabledFault": { + "base": "

    SourceDBInstanceIdentifier refers to a DB instance with BackupRetentionPeriod equal to 0.

    ", + "refs": { + } + }, + "PromoteReadReplicaMessage": { + "base": "

    ", + "refs": { + } + }, + "PromoteReadReplicaResult": { + "base": null, + "refs": { + } + }, + "ProvisionedIopsNotAvailableInAZFault": { + "base": "

    Provisioned IOPS not available in the specified Availability Zone.

    ", + "refs": { + } + }, + "PurchaseReservedDBInstancesOfferingMessage": { + "base": "

    ", + "refs": { + } + }, + "PurchaseReservedDBInstancesOfferingResult": { + "base": null, + "refs": { + } + }, + "ReadReplicaDBInstanceIdentifierList": { + "base": null, + "refs": { + "DBInstance$ReadReplicaDBInstanceIdentifiers": "

    Contains one or more identifiers of the Read Replicas associated with this DB instance.

    " + } + }, + "RebootDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "RebootDBInstanceResult": { + "base": null, + "refs": { + } + }, + "RecurringCharge": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstances and DescribeReservedDBInstancesOfferings actions.

    ", + "refs": { + "RecurringChargeList$member": null + } + }, + "RecurringChargeList": { + "base": null, + "refs": { + "ReservedDBInstance$RecurringCharges": "

    The recurring price charged to run this reserved DB instance.

    ", + "ReservedDBInstancesOffering$RecurringCharges": "

    The recurring price charged to run this reserved DB instance.

    " + } + }, + "RemoveSourceIdentifierFromSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "RemoveSourceIdentifierFromSubscriptionResult": { + "base": null, + "refs": { + } + }, + "RemoveTagsFromResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "ReservedDBInstance": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstances and PurchaseReservedDBInstancesOffering actions.

    ", + "refs": { + "PurchaseReservedDBInstancesOfferingResult$ReservedDBInstance": null, + "ReservedDBInstanceList$member": null + } + }, + "ReservedDBInstanceAlreadyExistsFault": { + "base": "

    User already has a reservation with the given identifier.

    ", + "refs": { + } + }, + "ReservedDBInstanceList": { + "base": null, + "refs": { + "ReservedDBInstanceMessage$ReservedDBInstances": "

    A list of reserved DB instances.

    " + } + }, + "ReservedDBInstanceMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeReservedDBInstances action.

    ", + "refs": { + } + }, + "ReservedDBInstanceNotFoundFault": { + "base": "

    The specified reserved DB Instance not found.

    ", + "refs": { + } + }, + "ReservedDBInstanceQuotaExceededFault": { + "base": "

    Request would exceed the user's DB Instance quota.

    ", + "refs": { + } + }, + "ReservedDBInstancesOffering": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstancesOfferings action.

    ", + "refs": { + "ReservedDBInstancesOfferingList$member": null + } + }, + "ReservedDBInstancesOfferingList": { + "base": null, + "refs": { + "ReservedDBInstancesOfferingMessage$ReservedDBInstancesOfferings": "

    A list of reserved DB instance offerings.

    " + } + }, + "ReservedDBInstancesOfferingMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeReservedDBInstancesOfferings action.

    ", + "refs": { + } + }, + "ReservedDBInstancesOfferingNotFoundFault": { + "base": "

    Specified offering does not exist.

    ", + "refs": { + } + }, + "ResetDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBInstanceFromDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBInstanceFromDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "RestoreDBInstanceToPointInTimeMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBInstanceToPointInTimeResult": { + "base": null, + "refs": { + } + }, + "RevokeDBSecurityGroupIngressMessage": { + "base": "

    ", + "refs": { + } + }, + "RevokeDBSecurityGroupIngressResult": { + "base": null, + "refs": { + } + }, + "SNSInvalidTopicFault": { + "base": "

    SNS has responded that there is a problem with the SND topic specified.

    ", + "refs": { + } + }, + "SNSNoAuthorizationFault": { + "base": "

    You do not have permission to publish to the SNS topic ARN.

    ", + "refs": { + } + }, + "SNSTopicArnNotFoundFault": { + "base": "

    The SNS topic ARN does not exist.

    ", + "refs": { + } + }, + "SnapshotQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB snapshots.

    ", + "refs": { + } + }, + "SourceIdsList": { + "base": null, + "refs": { + "CreateEventSubscriptionMessage$SourceIds": "

    The list of identifiers of the event sources for which events will be returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it cannot end with a hyphen or contain two consecutive hyphens.

    Constraints:

    • If SourceIds are supplied, SourceType must also be provided.
    • If the source type is a DB instance, then a DBInstanceIdentifier must be supplied.
    • If the source type is a DB security group, a DBSecurityGroupName must be supplied.
    • If the source type is a DB parameter group, a DBParameterGroupName must be supplied.
    • If the source type is a DB snapshot, a DBSnapshotIdentifier must be supplied.
    ", + "EventSubscription$SourceIdsList": "

    A list of source IDs for the RDS event notification subscription.

    " + } + }, + "SourceNotFoundFault": { + "base": "

    The requested source could not be found.

    ", + "refs": { + } + }, + "SourceType": { + "base": null, + "refs": { + "DescribeEventsMessage$SourceType": "

    The event source to retrieve events for. If no value is specified, all events are returned.

    ", + "Event$SourceType": "

    Specifies the source type for this event.

    " + } + }, + "StorageQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed amount of storage available across all DB instances.

    ", + "refs": { + } + }, + "StorageTypeNotSupportedFault": { + "base": "

    StorageType specified cannot be associated with the DB Instance.

    ", + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "AddSourceIdentifierToSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to add a source identifier to.

    ", + "AddSourceIdentifierToSubscriptionMessage$SourceIdentifier": "

    The identifier of the event source to be added. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it cannot end with a hyphen or contain two consecutive hyphens.

    Constraints:

    • If the source type is a DB instance, then a DBInstanceIdentifier must be supplied.
    • If the source type is a DB security group, a DBSecurityGroupName must be supplied.
    • If the source type is a DB parameter group, a DBParameterGroupName must be supplied.
    • If the source type is a DB snapshot, a DBSnapshotIdentifier must be supplied.
    ", + "AddTagsToResourceMessage$ResourceName": "

    The Amazon RDS resource the tags will be added to. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    ", + "AuthorizeDBSecurityGroupIngressMessage$DBSecurityGroupName": "

    The name of the DB security group to add authorization to.

    ", + "AuthorizeDBSecurityGroupIngressMessage$CIDRIP": "

    The IP range to authorize.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupName": "

    Name of the EC2 security group to authorize. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupId": "

    Id of the EC2 security group to authorize. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "

    AWS Account Number of the owner of the EC2 security group specified in the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AvailabilityZone$Name": "

    The name of the availability zone.

    ", + "CharacterSet$CharacterSetName": "

    The name of the character set.

    ", + "CharacterSet$CharacterSetDescription": "

    The description of the character set.

    ", + "CopyDBParameterGroupMessage$SourceDBParameterGroupIdentifier": "

    The identifier or ARN for the source DB parameter group.

    Constraints:

    • Must specify a valid DB parameter group.
    • If the source DB parameter group is in the same region as the copy, specify a valid DB parameter group identifier, for example my-db-param-group, or a valid ARN.
    • If the source DB parameter group is in a different region than the copy, specify a valid DB parameter group ARN, for example arn:aws:rds:us-west-2:123456789012:pg:special-parameters.
    ", + "CopyDBParameterGroupMessage$TargetDBParameterGroupIdentifier": "

    The identifier for the copied DB parameter group.

    Constraints:

    • Cannot be null, empty, or blank
    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-db-parameter-group

    ", + "CopyDBParameterGroupMessage$TargetDBParameterGroupDescription": "

    A description for the copied DB parameter group.

    ", + "CopyDBSnapshotMessage$SourceDBSnapshotIdentifier": "

    The identifier for the source DB snapshot.

    Constraints:

    • Must specify a valid system snapshot in the \"available\" state.
    • If the source snapshot is in the same region as the copy, specify a valid DB snapshot identifier.
    • If the source snapshot is in a different region than the copy, specify a valid DB snapshot ARN. For more information, go to Copying a DB Snapshot.

    Example: rds:mydb-2012-04-02-00-01

    Example: arn:aws:rds:rr-regn-1:123456789012:snapshot:mysql-instance1-snapshot-20130805

    ", + "CopyDBSnapshotMessage$TargetDBSnapshotIdentifier": "

    The identifier for the copied snapshot.

    Constraints:

    • Cannot be null, empty, or blank
    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-db-snapshot

    ", + "CopyOptionGroupMessage$SourceOptionGroupIdentifier": "

    The identifier or ARN for the source option group.

    Constraints:

    • Must specify a valid option group.
    • If the source option group is in the same region as the copy, specify a valid option group identifier, for example my-option-group, or a valid ARN.
    • If the source option group is in a different region than the copy, specify a valid option group ARN, for example arn:aws:rds:us-west-2:123456789012:og:special-options.
    ", + "CopyOptionGroupMessage$TargetOptionGroupIdentifier": "

    The identifier for the copied option group.

    Constraints:

    • Cannot be null, empty, or blank
    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-option-group

    ", + "CopyOptionGroupMessage$TargetOptionGroupDescription": "

    The description for the copied option group.

    ", + "CreateDBInstanceMessage$DBName": "

    The meaning of this parameter differs according to the database engine you use.

    Type: String

    MySQL

    The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance.

    Constraints:

    • Must contain 1 to 64 alphanumeric characters
    • Cannot be a word reserved by the specified database engine

    PostgreSQL

    The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance.

    Constraints:

    • Must contain 1 to 63 alphanumeric characters
    • Must begin with a letter or an underscore. Subsequent characters can be letters, underscores, or digits (0-9).
    • Cannot be a word reserved by the specified database engine

    Oracle

    The Oracle System ID (SID) of the created DB instance.

    Default: ORCL

    Constraints:

    • Cannot be longer than 8 characters

    SQL Server

    Not applicable. Must be null.

    ", + "CreateDBInstanceMessage$DBInstanceIdentifier": "

    The DB instance identifier. This parameter is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 for SQL Server).
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.

    Example: mydbinstance

    ", + "CreateDBInstanceMessage$DBInstanceClass": "

    The compute and memory capacity of the DB instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium

    ", + "CreateDBInstanceMessage$Engine": "

    The name of the database engine to be used for this instance.

    Valid Values: MySQL | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee | sqlserver-se | sqlserver-ex | sqlserver-web | postgres

    Not every database engine is available for every AWS region.

    ", + "CreateDBInstanceMessage$MasterUsername": "

    The name of master user for the client DB instance.

    MySQL

    Constraints:

    • Must be 1 to 16 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.

    Type: String

    Oracle

    Constraints:

    • Must be 1 to 30 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.

    SQL Server

    Constraints:

    • Must be 1 to 128 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.
    ", + "CreateDBInstanceMessage$MasterUserPassword": "

    The password for the master database user. Can be any printable ASCII character except \"/\", \"\"\", or \"@\".

    Type: String

    MySQL

    Constraints: Must contain from 8 to 41 characters.

    Oracle

    Constraints: Must contain from 8 to 30 characters.

    SQL Server

    Constraints: Must contain from 8 to 128 characters.

    ", + "CreateDBInstanceMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in. For information on regions and Availability Zones, see Regions and Availability Zones.

    Default: A random, system-chosen Availability Zone in the endpoint's region.

    Example: us-east-1d

    Constraint: The AvailabilityZone parameter cannot be specified if the MultiAZ parameter is set to true. The specified Availability Zone must be in the same region as the current endpoint.

    ", + "CreateDBInstanceMessage$DBSubnetGroupName": "

    A DB subnet group to associate with this DB instance.

    If there is no DB subnet group, then it is a non-VPC DB instance.

    ", + "CreateDBInstanceMessage$PreferredMaintenanceWindow": "

    The weekly time range (in UTC) during which system maintenance can occur. For more information, see DB Instance Maintenance.

    Format: ddd:hh24:mi-ddd:hh24:mi

    Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

    Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

    Constraints: Minimum 30-minute window.

    ", + "CreateDBInstanceMessage$DBParameterGroupName": "

    The name of the DB parameter group to associate with this DB instance. If this argument is omitted, the default DBParameterGroup for the specified engine will be used.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "CreateDBInstanceMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter. For more information, see DB Instance Backups.

    Default: A 30-minute window selected at random from an 8-hour block of time per region. See the Amazon RDS User Guide for the time blocks for each region from which the default backup windows are assigned.

    Constraints: Must be in the format hh24:mi-hh24:mi. Times should be Universal Time Coordinated (UTC). Must not conflict with the preferred maintenance window. Must be at least 30 minutes.

    ", + "CreateDBInstanceMessage$EngineVersion": "

    The version number of the database engine to use.

    The following are the database engines and major and minor versions that are available with Amazon RDS. Not every database engine is available for every AWS region.

    MySQL

    • Version 5.1: 5.1.45 | 5.1.49 | 5.1.50 | 5.1.57 | 5.1.61 | 5.1.62 | 5.1.63 | 5.1.69 | 5.1.71 | 5.1.73
    • Version 5.5: 5.5.12 | 5.5.20 | 5.5.23 | 5.5.25a | 5.5.27 | 5.5.31 | 5.5.33 | 5.5.37 | 5.5.38 | 5.5.8
    • Version 5.6: 5.6.12 | 5.6.13 | 5.6.17 | 5.6.19 | 5.6.21

    Oracle Database Enterprise Edition (oracle-ee)

    • Version 11.2: 11.2.0.2.v3 | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7 | 11.2.0.3.v1 | 11.2.0.4.v1

    Oracle Database Standard Edition (oracle-se)

    • Version 11.2: 11.2.0.2.v3 | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7 | 11.2.0.3.v1 | 11.2.0.4.v1

    Oracle Database Standard Edition One (oracle-se1)

    • Version 11.2: 11.2.0.2.v3 | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7 | 11.2.0.3.v1 | 11.2.0.4.v1

    PostgreSQL

    • Version 9.3: 9.3.1 | 9.3.2 | 9.3.3

    Microsoft SQL Server Enterprise Edition (sqlserver-ee)

    • Version 10.5: 10.50.2789.0.v1
    • Version 11.0: 11.00.2100.60.v1

    Microsoft SQL Server Express Edition (sqlserver-ex)

    • Version 10.5: 10.50.2789.0.v1
    • Version 11.0: 11.00.2100.60.v1

    Microsoft SQL Server Standard Edition (sqlserver-se)

    • Version 10.5: 10.50.2789.0.v1
    • Version 11.0: 11.00.2100.60.v1

    Microsoft SQL Server Web Edition (sqlserver-web)

    • Version 10.5: 10.50.2789.0.v1
    • Version 11.0: 11.00.2100.60.v1
    ", + "CreateDBInstanceMessage$LicenseModel": "

    License model information for this DB instance.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "CreateDBInstanceMessage$OptionGroupName": "

    Indicates that the DB instance should be associated with the specified option group.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "CreateDBInstanceMessage$CharacterSetName": "

    For supported engines, indicates that the DB instance should be associated with the specified CharacterSet.

    ", + "CreateDBInstanceMessage$StorageType": "

    Specifies storage type to be associated with the DB Instance.

    Valid values: standard | gp2 | io1

    If you specify io1, you must also include a value for the Iops parameter.

    Default: io1 if the Iops parameter is specified; otherwise standard

    ", + "CreateDBInstanceMessage$TdeCredentialArn": "

    The ARN from the Key Store with which to associate the instance for TDE encryption.

    ", + "CreateDBInstanceMessage$TdeCredentialPassword": "

    The password for the given ARN from the Key Store in order to access the device.

    ", + "CreateDBInstanceReadReplicaMessage$DBInstanceIdentifier": "

    The DB instance identifier of the Read Replica. This is the unique key that identifies a DB instance. This parameter is stored as a lowercase string.

    ", + "CreateDBInstanceReadReplicaMessage$SourceDBInstanceIdentifier": "

    The identifier of the DB instance that will act as the source for the Read Replica. Each DB instance can have up to five Read Replicas.

    Constraints:

    • Must be the identifier of an existing DB instance.
    • Can specify a DB instance that is a MySQL Read Replica only if the source is running MySQL 5.6.
    • Can specify a DB instance that is a PostgreSQL Read Replica only if the source is running PostgreSQL 9.3.5.
    • The specified DB instance must have automatic backups enabled, its backup retention period must be greater than 0.
    • If the source DB instance is in the same region as the Read Replica, specify a valid DB instance identifier.
    • If the source DB instance is in a different region than the Read Replica, specify a valid DB instance ARN. For more information, go to Constructing a Amazon RDS Amazon Resource Name (ARN).
    ", + "CreateDBInstanceReadReplicaMessage$DBInstanceClass": "

    The compute and memory capacity of the Read Replica.

    Valid Values: db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium

    Default: Inherits from the source DB instance.

    ", + "CreateDBInstanceReadReplicaMessage$AvailabilityZone": "

    The Amazon EC2 Availability Zone that the Read Replica will be created in.

    Default: A random, system-chosen Availability Zone in the endpoint's region.

    Example: us-east-1d

    ", + "CreateDBInstanceReadReplicaMessage$OptionGroupName": "

    The option group the DB instance will be associated with. If omitted, the default option group for the engine specified will be used.

    ", + "CreateDBInstanceReadReplicaMessage$DBSubnetGroupName": "

    Specifies a DB subnet group for the DB instance. The new DB instance will be created in the VPC associated with the DB subnet group. If no DB subnet group is specified, then the new DB instance is not created in a VPC.

    Constraints:

    • Can only be specified if the source DB instance identifier specifies a DB instance in another region.
    • The specified DB subnet group must be in the same region in which the operation is running.
    • All Read Replicas in one region that are created from the same source DB instance must either:
      • Specify DB subnet groups from the same VPC. All these Read Replicas will be created in the same VPC.
      • Not specify a DB subnet group. All these Read Replicas will be created outside of any VPC.
    ", + "CreateDBInstanceReadReplicaMessage$StorageType": "

    Specifies storage type to be associated with the DB Instance Read Replica.

    Valid values: standard | gp2 | io1

    If you specify io1, you must also include a value for the Iops parameter.

    Default: io1 if the Iops parameter is specified; otherwise standard

    ", + "CreateDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB parameter group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    This value is stored as a lower-case string.", + "CreateDBParameterGroupMessage$DBParameterGroupFamily": "

    The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a database engine and engine version compatible with that DB parameter group family.

    ", + "CreateDBParameterGroupMessage$Description": "

    The description for the DB parameter group.

    ", + "CreateDBSecurityGroupMessage$DBSecurityGroupName": "

    The name for the DB security group. This value is stored as a lowercase string.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    • Must not be \"Default\"
    • May not contain spaces

    Example: mysecuritygroup

    ", + "CreateDBSecurityGroupMessage$DBSecurityGroupDescription": "

    The description for the DB security group.

    ", + "CreateDBSnapshotMessage$DBSnapshotIdentifier": "

    The identifier for the DB snapshot.

    Constraints:

    • Cannot be null, empty, or blank
    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-snapshot-id

    ", + "CreateDBSnapshotMessage$DBInstanceIdentifier": "

    The DB instance identifier. This is the unique key that identifies a DB instance.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "CreateDBSubnetGroupMessage$DBSubnetGroupName": "

    The name for the DB subnet group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters or hyphens. Must not be \"Default\".

    Example: mySubnetgroup

    ", + "CreateDBSubnetGroupMessage$DBSubnetGroupDescription": "

    The description for the DB subnet group.

    ", + "CreateEventSubscriptionMessage$SubscriptionName": "

    The name of the subscription.

    Constraints: The name must be less than 255 characters.

    ", + "CreateEventSubscriptionMessage$SnsTopicArn": "

    The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.

    ", + "CreateEventSubscriptionMessage$SourceType": "

    The type of source that will be generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "CreateOptionGroupMessage$OptionGroupName": "

    Specifies the name of the option group to be created.

    Constraints:

    • Must be 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: myoptiongroup

    ", + "CreateOptionGroupMessage$EngineName": "

    Specifies the name of the engine that this option group should be associated with.

    ", + "CreateOptionGroupMessage$MajorEngineVersion": "

    Specifies the major version of the engine that this option group should be associated with.

    ", + "CreateOptionGroupMessage$OptionGroupDescription": "

    The description of the option group.

    ", + "DBEngineVersion$Engine": "

    The name of the database engine.

    ", + "DBEngineVersion$EngineVersion": "

    The version number of the database engine.

    ", + "DBEngineVersion$DBParameterGroupFamily": "

    The name of the DB parameter group family for the database engine.

    ", + "DBEngineVersion$DBEngineDescription": "

    The description of the database engine.

    ", + "DBEngineVersion$DBEngineVersionDescription": "

    The description of the database engine version.

    ", + "DBEngineVersionMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBInstance$DBInstanceIdentifier": "

    Contains a user-supplied database identifier. This is the unique key that identifies a DB instance.

    ", + "DBInstance$DBInstanceClass": "

    Contains the name of the compute and memory capacity class of the DB instance.

    ", + "DBInstance$Engine": "

    Provides the name of the database engine to be used for this DB instance.

    ", + "DBInstance$DBInstanceStatus": "

    Specifies the current state of this database.

    ", + "DBInstance$MasterUsername": "

    Contains the master username for the DB instance.

    ", + "DBInstance$DBName": "

    The meaning of this parameter differs according to the database engine you use. For example, this value returns either MySQL or PostgreSQL information when returning values from CreateDBInstanceReadReplica since Read Replicas are only supported for MySQL and PostgreSQL.

    MySQL, SQL Server, PostgreSQL

    Contains the name of the initial database of this instance that was provided at create time, if one was specified when the DB instance was created. This same name is returned for the life of the DB instance.

    Type: String

    Oracle

    Contains the Oracle System ID (SID) of the created DB instance. Not shown when the returned parameters do not apply to an Oracle DB instance.

    ", + "DBInstance$PreferredBackupWindow": "

    Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod.

    ", + "DBInstance$AvailabilityZone": "

    Specifies the name of the Availability Zone the DB instance is located in.

    ", + "DBInstance$PreferredMaintenanceWindow": "

    Specifies the weekly time range (in UTC) during which system maintenance can occur.

    ", + "DBInstance$EngineVersion": "

    Indicates the database engine version.

    ", + "DBInstance$ReadReplicaSourceDBInstanceIdentifier": "

    Contains the identifier of the source DB instance if this DB instance is a Read Replica.

    ", + "DBInstance$LicenseModel": "

    License model information for this DB instance.

    ", + "DBInstance$CharacterSetName": "

    If present, specifies the name of the character set that this instance is associated with.

    ", + "DBInstance$SecondaryAvailabilityZone": "

    If present, specifies the name of the secondary Availability Zone for a DB instance with multi-AZ support.

    ", + "DBInstance$StorageType": "

    Specifies storage type associated with DB Instance.

    ", + "DBInstance$TdeCredentialArn": "

    The ARN from the Key Store with which the instance is associated for TDE encryption.

    ", + "DBInstanceMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DBInstanceStatusInfo$StatusType": "

    This value is currently \"read replication.\"

    ", + "DBInstanceStatusInfo$Status": "

    Status of the DB instance. For a StatusType of read replica, the values can be replicating, error, stopped, or terminated.

    ", + "DBInstanceStatusInfo$Message": "

    Details of the error if there is an error for the instance. If the instance is not in an error state, this value is blank.

    ", + "DBParameterGroup$DBParameterGroupName": "

    Provides the name of the DB parameter group.

    ", + "DBParameterGroup$DBParameterGroupFamily": "

    Provides the name of the DB parameter group family that this DB parameter group is compatible with.

    ", + "DBParameterGroup$Description": "

    Provides the customer-specified description for this DB parameter group.

    ", + "DBParameterGroupDetails$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBParameterGroupNameMessage$DBParameterGroupName": "

    The name of the DB parameter group.

    ", + "DBParameterGroupStatus$DBParameterGroupName": "

    The name of the DP parameter group.

    ", + "DBParameterGroupStatus$ParameterApplyStatus": "

    The status of parameter updates.

    ", + "DBParameterGroupsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSecurityGroup$OwnerId": "

    Provides the AWS ID of the owner of a specific DB security group.

    ", + "DBSecurityGroup$DBSecurityGroupName": "

    Specifies the name of the DB security group.

    ", + "DBSecurityGroup$DBSecurityGroupDescription": "

    Provides the description of the DB security group.

    ", + "DBSecurityGroup$VpcId": "

    Provides the VpcId of the DB security group.

    ", + "DBSecurityGroupMembership$DBSecurityGroupName": "

    The name of the DB security group.

    ", + "DBSecurityGroupMembership$Status": "

    The status of the DB security group.

    ", + "DBSecurityGroupMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSecurityGroupNameList$member": null, + "DBSnapshot$DBSnapshotIdentifier": "

    Specifies the identifier for the DB snapshot.

    ", + "DBSnapshot$DBInstanceIdentifier": "

    Specifies the DB instance identifier of the DB instance this DB snapshot was created from.

    ", + "DBSnapshot$Engine": "

    Specifies the name of the database engine.

    ", + "DBSnapshot$Status": "

    Specifies the status of this DB snapshot.

    ", + "DBSnapshot$AvailabilityZone": "

    Specifies the name of the Availability Zone the DB instance was located in at the time of the DB snapshot.

    ", + "DBSnapshot$VpcId": "

    Provides the Vpc Id associated with the DB snapshot.

    ", + "DBSnapshot$MasterUsername": "

    Provides the master username for the DB snapshot.

    ", + "DBSnapshot$EngineVersion": "

    Specifies the version of the database engine.

    ", + "DBSnapshot$LicenseModel": "

    License model information for the restored DB instance.

    ", + "DBSnapshot$SnapshotType": "

    Provides the type of the DB snapshot.

    ", + "DBSnapshot$OptionGroupName": "

    Provides the option group name for the DB snapshot.

    ", + "DBSnapshot$SourceRegion": "

    The region that the DB snapshot was created in or copied from.

    ", + "DBSnapshot$StorageType": "

    Specifies storage type associated with DB Snapshot.

    ", + "DBSnapshot$TdeCredentialArn": "

    The ARN from the Key Store with which to associate the instance for TDE encryption.

    ", + "DBSnapshotMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSubnetGroup$DBSubnetGroupName": "

    Specifies the name of the DB subnet group.

    ", + "DBSubnetGroup$DBSubnetGroupDescription": "

    Provides the description of the DB subnet group.

    ", + "DBSubnetGroup$VpcId": "

    Provides the VpcId of the DB subnet group.

    ", + "DBSubnetGroup$SubnetGroupStatus": "

    Provides the status of the DB subnet group.

    ", + "DBSubnetGroupMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DeleteDBInstanceMessage$DBInstanceIdentifier": "

    The DB instance identifier for the DB instance to be deleted. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DeleteDBInstanceMessage$FinalDBSnapshotIdentifier": "

    The DBSnapshotIdentifier of the new DBSnapshot created when SkipFinalSnapshot is set to false.

    Specifying this parameter and also setting the SkipFinalShapshot parameter to true results in an error.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    • Cannot be specified when deleting a Read Replica.
    ", + "DeleteDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB parameter group.

    Constraints:

    • Must be the name of an existing DB parameter group
    • You cannot delete a default DB parameter group
    • Cannot be associated with any DB instances
    ", + "DeleteDBSecurityGroupMessage$DBSecurityGroupName": "

    The name of the DB security group to delete.

    You cannot delete the default DB security group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    • Must not be \"Default\"
    • May not contain spaces
    ", + "DeleteDBSnapshotMessage$DBSnapshotIdentifier": "

    The DBSnapshot identifier.

    Constraints: Must be the name of an existing DB snapshot in the available state.

    ", + "DeleteDBSubnetGroupMessage$DBSubnetGroupName": "

    The name of the database subnet group to delete.

    You cannot delete the default subnet group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DeleteEventSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to delete.

    ", + "DeleteOptionGroupMessage$OptionGroupName": "

    The name of the option group to be deleted.

    You cannot delete default option groups.", + "DescribeDBEngineVersionsMessage$Engine": "

    The database engine to return.

    ", + "DescribeDBEngineVersionsMessage$EngineVersion": "

    The database engine version to return.

    Example: 5.1.49

    ", + "DescribeDBEngineVersionsMessage$DBParameterGroupFamily": "

    The name of a specific DB parameter group family to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBEngineVersionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBInstancesMessage$DBInstanceIdentifier": "

    The user-supplied instance identifier. If this parameter is specified, information from only the specific DB instance is returned. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBInstancesMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBInstances request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DescribeDBLogFilesDetails$LogFileName": "

    The name of the log file for the specified DB instance.

    ", + "DescribeDBLogFilesMessage$DBInstanceIdentifier": "

    The customer-assigned name of the DB instance that contains the log files you want to list.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBLogFilesMessage$FilenameContains": "

    Filters the available log files for log file names that contain the specified string.

    ", + "DescribeDBLogFilesMessage$Marker": "

    The pagination token provided in the previous request. If this parameter is specified the response includes only records beyond the marker, up to MaxRecords.

    ", + "DescribeDBLogFilesResponse$Marker": "

    A pagination token that can be used in a subsequent DescribeDBLogFiles request.

    ", + "DescribeDBParameterGroupsMessage$DBParameterGroupName": "

    The name of a specific DB parameter group to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBParameterGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBParameterGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBParametersMessage$DBParameterGroupName": "

    The name of a specific DB parameter group to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBParametersMessage$Source": "

    The parameter types to return.

    Default: All parameter types returned

    Valid Values: user | system | engine-default

    ", + "DescribeDBParametersMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSecurityGroupsMessage$DBSecurityGroupName": "

    The name of the DB security group to return details for.

    ", + "DescribeDBSecurityGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSecurityGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSnapshotsMessage$DBInstanceIdentifier": "

    A DB instance identifier to retrieve the list of DB snapshots for. Cannot be used in conjunction with DBSnapshotIdentifier. This parameter is not case sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBSnapshotsMessage$DBSnapshotIdentifier": "

    A specific DB snapshot identifier to describe. Cannot be used in conjunction with DBInstanceIdentifier. This value is stored as a lowercase string.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    • If this is the identifier of an automated snapshot, the SnapshotType parameter must also be specified.
    ", + "DescribeDBSnapshotsMessage$SnapshotType": "

    The type of snapshots that will be returned. Values can be \"automated\" or \"manual.\" If not specified, the returned results will include all snapshots types.

    ", + "DescribeDBSnapshotsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSnapshots request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSubnetGroupsMessage$DBSubnetGroupName": "

    The name of the DB subnet group to return details for.

    ", + "DescribeDBSubnetGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSubnetGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEngineDefaultParametersMessage$DBParameterGroupFamily": "

    The name of the DB parameter group family.

    ", + "DescribeEngineDefaultParametersMessage$Marker": "

    An optional pagination token provided by a previous DescribeEngineDefaultParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEventCategoriesMessage$SourceType": "

    The type of source that will be generating the events.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "DescribeEventSubscriptionsMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to describe.

    ", + "DescribeEventSubscriptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DescribeEventsMessage$SourceIdentifier": "

    The identifier of the event source for which events will be returned. If not specified, then all sources are included in the response.

    Constraints:

    • If SourceIdentifier is supplied, SourceType must also be provided.
    • If the source type is DBInstance, then a DBInstanceIdentifier must be supplied.
    • If the source type is DBSecurityGroup, a DBSecurityGroupName must be supplied.
    • If the source type is DBParameterGroup, a DBParameterGroupName must be supplied.
    • If the source type is DBSnapshot, a DBSnapshotIdentifier must be supplied.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    ", + "DescribeEventsMessage$Marker": "

    An optional pagination token provided by a previous DescribeEvents request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupOptionsMessage$EngineName": "

    A required parameter. Options available for the given Engine name will be described.

    ", + "DescribeOptionGroupOptionsMessage$MajorEngineVersion": "

    If specified, filters the results to include only options for the specified major engine version.

    ", + "DescribeOptionGroupOptionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupsMessage$OptionGroupName": "

    The name of the option group to describe. Cannot be supplied together with EngineName or MajorEngineVersion.

    ", + "DescribeOptionGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOptionGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupsMessage$EngineName": "

    Filters the list of option groups to only include groups associated with a specific database engine.

    ", + "DescribeOptionGroupsMessage$MajorEngineVersion": "

    Filters the list of option groups to only include groups associated with a specific database engine version. If specified, then EngineName must also be specified.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Engine": "

    The name of the engine to retrieve DB instance options for.

    ", + "DescribeOrderableDBInstanceOptionsMessage$EngineVersion": "

    The engine version filter value. Specify this parameter to show only the available offerings matching the specified engine version.

    ", + "DescribeOrderableDBInstanceOptionsMessage$DBInstanceClass": "

    The DB instance class filter value. Specify this parameter to show only the available offerings matching the specified DB instance class.

    ", + "DescribeOrderableDBInstanceOptionsMessage$LicenseModel": "

    The license model filter value. Specify this parameter to show only the available offerings matching the specified license model.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DescribeReservedDBInstancesMessage$ReservedDBInstanceId": "

    The reserved DB instance identifier filter value. Specify this parameter to show only the reservation that matches the specified reservation ID.

    ", + "DescribeReservedDBInstancesMessage$ReservedDBInstancesOfferingId": "

    The offering identifier filter value. Specify this parameter to show only purchased reservations matching the specified offering identifier.

    ", + "DescribeReservedDBInstancesMessage$DBInstanceClass": "

    The DB instance class filter value. Specify this parameter to show only those reservations matching the specified DB instances class.

    ", + "DescribeReservedDBInstancesMessage$Duration": "

    The duration filter value, specified in years or seconds. Specify this parameter to show only reservations for this duration.

    Valid Values: 1 | 3 | 31536000 | 94608000

    ", + "DescribeReservedDBInstancesMessage$ProductDescription": "

    The product description filter value. Specify this parameter to show only those reservations matching the specified product description.

    ", + "DescribeReservedDBInstancesMessage$OfferingType": "

    The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type.

    Valid Values: \"Light Utilization\" | \"Medium Utilization\" | \"Heavy Utilization\"

    ", + "DescribeReservedDBInstancesMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeReservedDBInstancesOfferingsMessage$ReservedDBInstancesOfferingId": "

    The offering identifier filter value. Specify this parameter to show only the available offering that matches the specified reservation identifier.

    Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706

    ", + "DescribeReservedDBInstancesOfferingsMessage$DBInstanceClass": "

    The DB instance class filter value. Specify this parameter to show only the available offerings matching the specified DB instance class.

    ", + "DescribeReservedDBInstancesOfferingsMessage$Duration": "

    Duration filter value, specified in years or seconds. Specify this parameter to show only reservations for this duration.

    Valid Values: 1 | 3 | 31536000 | 94608000

    ", + "DescribeReservedDBInstancesOfferingsMessage$ProductDescription": "

    Product description filter value. Specify this parameter to show only the available offerings matching the specified product description.

    ", + "DescribeReservedDBInstancesOfferingsMessage$OfferingType": "

    The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type.

    Valid Values: \"Light Utilization\" | \"Medium Utilization\" | \"Heavy Utilization\"

    ", + "DescribeReservedDBInstancesOfferingsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DownloadDBLogFilePortionDetails$LogFileData": "

    Entries from the specified log file.

    ", + "DownloadDBLogFilePortionDetails$Marker": "

    A pagination token that can be used in a subsequent DownloadDBLogFilePortion request.

    ", + "DownloadDBLogFilePortionMessage$DBInstanceIdentifier": "

    The customer-assigned name of the DB instance that contains the log files you want to list.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DownloadDBLogFilePortionMessage$LogFileName": "

    The name of the log file to be downloaded.

    ", + "DownloadDBLogFilePortionMessage$Marker": "

    The pagination token provided in the previous request or \"0\". If the Marker parameter is specified the response includes only records beyond the marker until the end of the file or up to NumberOfLines.

    ", + "EC2SecurityGroup$Status": "

    Provides the status of the EC2 security group. Status can be \"authorizing\", \"authorized\", \"revoking\", and \"revoked\".

    ", + "EC2SecurityGroup$EC2SecurityGroupName": "

    Specifies the name of the EC2 security group.

    ", + "EC2SecurityGroup$EC2SecurityGroupId": "

    Specifies the id of the EC2 security group.

    ", + "EC2SecurityGroup$EC2SecurityGroupOwnerId": "

    Specifies the AWS ID of the owner of the EC2 security group specified in the EC2SecurityGroupName field.

    ", + "Endpoint$Address": "

    Specifies the DNS address of the DB instance.

    ", + "EngineDefaults$DBParameterGroupFamily": "

    Specifies the name of the DB parameter group family which the engine default parameters apply to.

    ", + "EngineDefaults$Marker": "

    An optional pagination token provided by a previous EngineDefaults request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "Event$SourceIdentifier": "

    Provides the identifier for the source of the event.

    ", + "Event$Message": "

    Provides the text of this event.

    ", + "EventCategoriesList$member": null, + "EventCategoriesMap$SourceType": "

    The source type that the returned categories belong to

    ", + "EventSubscription$CustomerAwsId": "

    The AWS customer account associated with the RDS event notification subscription.

    ", + "EventSubscription$CustSubscriptionId": "

    The RDS event notification subscription Id.

    ", + "EventSubscription$SnsTopicArn": "

    The topic ARN of the RDS event notification subscription.

    ", + "EventSubscription$Status": "

    The status of the RDS event notification subscription.

    Constraints:

    Can be one of the following: creating | modifying | deleting | active | no-permission | topic-not-exist

    The status \"no-permission\" indicates that RDS no longer has permission to post to the SNS topic. The status \"topic-not-exist\" indicates that the topic was deleted after the subscription was created.

    ", + "EventSubscription$SubscriptionCreationTime": "

    The time the RDS event notification subscription was created.

    ", + "EventSubscription$SourceType": "

    The source type for the RDS event notification subscription.

    ", + "EventSubscriptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "EventsMessage$Marker": "

    An optional pagination token provided by a previous Events request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "Filter$Name": "

    This parameter is not currently supported.

    ", + "FilterValueList$member": null, + "IPRange$Status": "

    Specifies the status of the IP range. Status can be \"authorizing\", \"authorized\", \"revoking\", and \"revoked\".

    ", + "IPRange$CIDRIP": "

    Specifies the IP range.

    ", + "KeyList$member": null, + "ListTagsForResourceMessage$ResourceName": "

    The Amazon RDS resource with tags to be listed. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    ", + "ModifyDBInstanceMessage$DBInstanceIdentifier": "

    The DB instance identifier. This value is stored as a lowercase string.

    Constraints:

    • Must be the identifier for an existing DB instance
    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "ModifyDBInstanceMessage$DBInstanceClass": "

    The new compute and memory capacity of the DB instance. To determine the instance classes that are available for a particular DB engine, use the DescribeOrderableDBInstanceOptions action.

    Passing a value for this setting causes an outage during the change and is applied during the next maintenance window, unless ApplyImmediately is specified as true for this request.

    Default: Uses existing setting

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium

    ", + "ModifyDBInstanceMessage$MasterUserPassword": "

    The new password for the DB instance master user. Can be any printable ASCII character except \"/\", \"\"\", or \"@\".

    Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response.

    Default: Uses existing setting

    Constraints: Must be 8 to 41 alphanumeric characters (MySQL), 8 to 30 alphanumeric characters (Oracle), or 8 to 128 alphanumeric characters (SQL Server).

    Amazon RDS API actions never return the password, so this action provides a way to regain access to a master instance user if the password is lost. This includes restoring privileges that may have been accidentally revoked. ", + "ModifyDBInstanceMessage$DBParameterGroupName": "

    The name of the DB parameter group to apply to the DB instance. Changing this setting does not result in an outage. The parameter group name itself is changed immediately, but the actual parameter changes are not applied until you reboot the instance without failover. The db instance will NOT be rebooted automatically and the parameter changes will NOT be applied during the next maintenance window.

    Default: Uses existing setting

    Constraints: The DB parameter group must be in the same DB parameter group family as this DB instance.

    ", + "ModifyDBInstanceMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints:

    • Must be in the format hh24:mi-hh24:mi
    • Times should be Universal Time Coordinated (UTC)
    • Must not conflict with the preferred maintenance window
    • Must be at least 30 minutes
    ", + "ModifyDBInstanceMessage$PreferredMaintenanceWindow": "

    The weekly time range (in UTC) during which system maintenance can occur, which may result in an outage. Changing this parameter does not result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If there are pending actions that cause a reboot, and the maintenance window is changed to include the current time, then changing this parameter will cause a reboot of the DB instance. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied.

    Default: Uses existing setting

    Format: ddd:hh24:mi-ddd:hh24:mi

    Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun

    Constraints: Must be at least 30 minutes

    ", + "ModifyDBInstanceMessage$EngineVersion": "

    The version number of the database engine to upgrade to. Changing this parameter results in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    For major version upgrades, if a non-default DB parameter group is currently in use, a new DB parameter group in the DB parameter group family for the new engine version must be specified. The new DB parameter group can be the default for that DB parameter group family.

    For a list of valid engine versions, see CreateDBInstance.

    ", + "ModifyDBInstanceMessage$OptionGroupName": "

    Indicates that the DB instance should be associated with the specified option group. Changing this parameter does not result in an outage except in the following case and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "ModifyDBInstanceMessage$NewDBInstanceIdentifier": "

    The new DB instance identifier for the DB instance when renaming a DB instance. When you change the DB instance identifier, an instance reboot will occur immediately if you set Apply Immediately to true, or will occur during the next maintenance window if Apply Immediately to false. This value is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "ModifyDBInstanceMessage$StorageType": "

    Specifies storage type to be associated with the DB Instance.

    Valid values: standard | gp2 | io1

    If you specify io1, you must also include a value for the Iops parameter.

    Default: io1 if the Iops parameter is specified; otherwise standard

    ", + "ModifyDBInstanceMessage$TdeCredentialArn": "

    The ARN from the Key Store with which to associate the instance for TDE encryption.

    ", + "ModifyDBInstanceMessage$TdeCredentialPassword": "

    The password for the given ARN from the Key Store in order to access the device.

    ", + "ModifyDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB parameter group.

    Constraints:

    • Must be the name of an existing DB parameter group
    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "ModifyDBSubnetGroupMessage$DBSubnetGroupName": "

    The name for the DB subnet group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters or hyphens. Must not be \"Default\".

    Example: mySubnetgroup

    ", + "ModifyDBSubnetGroupMessage$DBSubnetGroupDescription": "

    The description for the DB subnet group.

    ", + "ModifyEventSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription.

    ", + "ModifyEventSubscriptionMessage$SnsTopicArn": "

    The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.

    ", + "ModifyEventSubscriptionMessage$SourceType": "

    The type of source that will be generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "ModifyOptionGroupMessage$OptionGroupName": "

    The name of the option group to be modified.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "Option$OptionName": "

    The name of the option.

    ", + "Option$OptionDescription": "

    The description of the option.

    ", + "OptionConfiguration$OptionName": "

    The configuration of options to include in a group.

    ", + "OptionGroup$OptionGroupName": "

    Specifies the name of the option group.

    ", + "OptionGroup$OptionGroupDescription": "

    Provides a description of the option group.

    ", + "OptionGroup$EngineName": "

    Engine name that this option group can be applied to.

    ", + "OptionGroup$MajorEngineVersion": "

    Indicates the major engine version associated with this option group.

    ", + "OptionGroup$VpcId": "

    If AllowsVpcAndNonVpcInstanceMemberships is false, this field is blank. If AllowsVpcAndNonVpcInstanceMemberships is true and this field is blank, then this option group can be applied to both VPC and non-VPC instances. If this field contains a value, then this option group can only be applied to instances that are in the VPC indicated by this field.

    ", + "OptionGroupMembership$OptionGroupName": "

    The name of the option group that the instance belongs to.

    ", + "OptionGroupMembership$Status": "

    The status of the DB instance's option group membership (e.g. in-sync, pending, pending-maintenance, applying).

    ", + "OptionGroupOption$Name": "

    The name of the option.

    ", + "OptionGroupOption$Description": "

    The description of the option.

    ", + "OptionGroupOption$EngineName": "

    The name of the engine that this option can be applied to.

    ", + "OptionGroupOption$MajorEngineVersion": "

    Indicates the major engine version that the option is available for.

    ", + "OptionGroupOption$MinimumRequiredMinorEngineVersion": "

    The minimum required engine version for the option to be applied.

    ", + "OptionGroupOptionSetting$SettingName": "

    The name of the option group option.

    ", + "OptionGroupOptionSetting$SettingDescription": "

    The description of the option group option.

    ", + "OptionGroupOptionSetting$DefaultValue": "

    The default value for the option group option.

    ", + "OptionGroupOptionSetting$ApplyType": "

    The DB engine specific parameter type for the option group option.

    ", + "OptionGroupOptionSetting$AllowedValues": "

    Indicates the acceptable values for the option group option.

    ", + "OptionGroupOptionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "OptionGroups$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "OptionNamesList$member": null, + "OptionSetting$Name": "

    The name of the option that has settings that you can set.

    ", + "OptionSetting$Value": "

    The current value of the option setting.

    ", + "OptionSetting$DefaultValue": "

    The default value of the option setting.

    ", + "OptionSetting$Description": "

    The description of the option setting.

    ", + "OptionSetting$ApplyType": "

    The DB engine specific parameter type.

    ", + "OptionSetting$DataType": "

    The data type of the option setting.

    ", + "OptionSetting$AllowedValues": "

    The allowed values of the option setting.

    ", + "OptionsDependedOn$member": null, + "OrderableDBInstanceOption$Engine": "

    The engine type of the orderable DB instance.

    ", + "OrderableDBInstanceOption$EngineVersion": "

    The engine version of the orderable DB instance.

    ", + "OrderableDBInstanceOption$DBInstanceClass": "

    The DB instance Class for the orderable DB instance

    ", + "OrderableDBInstanceOption$LicenseModel": "

    The license model for the orderable DB instance.

    ", + "OrderableDBInstanceOption$StorageType": "

    The storage type for this orderable DB instance.

    ", + "OrderableDBInstanceOptionsMessage$Marker": "

    An optional pagination token provided by a previous OrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "Parameter$ParameterName": "

    Specifies the name of the parameter.

    ", + "Parameter$ParameterValue": "

    Specifies the value of the parameter.

    ", + "Parameter$Description": "

    Provides a description of the parameter.

    ", + "Parameter$Source": "

    Indicates the source of the parameter value.

    ", + "Parameter$ApplyType": "

    Specifies the engine specific parameters type.

    ", + "Parameter$DataType": "

    Specifies the valid data type for the parameter.

    ", + "Parameter$AllowedValues": "

    Specifies the valid range of values for the parameter.

    ", + "Parameter$MinimumEngineVersion": "

    The earliest engine version to which the parameter can apply.

    ", + "PendingModifiedValues$DBInstanceClass": "

    Contains the new DBInstanceClass for the DB instance that will be applied or is in progress.

    ", + "PendingModifiedValues$MasterUserPassword": "

    Contains the pending or in-progress change of the master credentials for the DB instance.

    ", + "PendingModifiedValues$EngineVersion": "

    Indicates the database engine version.

    ", + "PendingModifiedValues$DBInstanceIdentifier": "

    Contains the new DBInstanceIdentifier for the DB instance that will be applied or is in progress.

    ", + "PendingModifiedValues$StorageType": "

    Specifies storage type to be associated with the DB instance.

    ", + "PromoteReadReplicaMessage$DBInstanceIdentifier": "

    The DB instance identifier. This value is stored as a lowercase string.

    Constraints:

    • Must be the identifier for an existing Read Replica DB instance
    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: mydbinstance

    ", + "PromoteReadReplicaMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

    Default: A 30-minute window selected at random from an 8-hour block of time per region. See the Amazon RDS User Guide for the time blocks for each region from which the default backup windows are assigned.

    Constraints: Must be in the format hh24:mi-hh24:mi. Times should be Universal Time Coordinated (UTC). Must not conflict with the preferred maintenance window. Must be at least 30 minutes.

    ", + "PurchaseReservedDBInstancesOfferingMessage$ReservedDBInstancesOfferingId": "

    The ID of the Reserved DB instance offering to purchase.

    Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706

    ", + "PurchaseReservedDBInstancesOfferingMessage$ReservedDBInstanceId": "

    Customer-specified identifier to track this reservation.

    Example: myreservationID

    ", + "ReadReplicaDBInstanceIdentifierList$member": null, + "RebootDBInstanceMessage$DBInstanceIdentifier": "

    The DB instance identifier. This parameter is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RecurringCharge$RecurringChargeFrequency": "

    The frequency of the recurring charge.

    ", + "RemoveSourceIdentifierFromSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to remove a source identifier from.

    ", + "RemoveSourceIdentifierFromSubscriptionMessage$SourceIdentifier": "

    The source identifier to be removed from the subscription, such as the DB instance identifier for a DB instance or the name of a security group.

    ", + "RemoveTagsFromResourceMessage$ResourceName": "

    The Amazon RDS resource the tags will be removed from. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    ", + "ReservedDBInstance$ReservedDBInstanceId": "

    The unique identifier for the reservation.

    ", + "ReservedDBInstance$ReservedDBInstancesOfferingId": "

    The offering identifier.

    ", + "ReservedDBInstance$DBInstanceClass": "

    The DB instance class for the reserved DB instance.

    ", + "ReservedDBInstance$CurrencyCode": "

    The currency code for the reserved DB instance.

    ", + "ReservedDBInstance$ProductDescription": "

    The description of the reserved DB instance.

    ", + "ReservedDBInstance$OfferingType": "

    The offering type of this reserved DB instance.

    ", + "ReservedDBInstance$State": "

    The state of the reserved DB instance.

    ", + "ReservedDBInstanceMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "ReservedDBInstancesOffering$ReservedDBInstancesOfferingId": "

    The offering identifier.

    ", + "ReservedDBInstancesOffering$DBInstanceClass": "

    The DB instance class for the reserved DB instance.

    ", + "ReservedDBInstancesOffering$CurrencyCode": "

    The currency code for the reserved DB instance offering.

    ", + "ReservedDBInstancesOffering$ProductDescription": "

    The database engine used by the offering.

    ", + "ReservedDBInstancesOffering$OfferingType": "

    The offering type.

    ", + "ReservedDBInstancesOfferingMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "ResetDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB parameter group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBInstanceIdentifier": "

    Name of the DB instance to create from the DB snapshot. This parameter isn't case sensitive.

    Constraints:

    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-snapshot-id

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBSnapshotIdentifier": "

    The identifier for the DB snapshot to restore from.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBInstanceClass": "

    The compute and memory capacity of the Amazon RDS DB instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium

    ", + "RestoreDBInstanceFromDBSnapshotMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in.

    Default: A random, system-chosen Availability Zone.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    Example: us-east-1a

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBSubnetGroupName": "

    The DB subnet group name to use for the new instance.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$LicenseModel": "

    License model information for the restored DB instance.

    Default: Same as source.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBName": "

    The database name for the restored DB instance.

    This parameter doesn't apply to the MySQL engine.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Engine": "

    The database engine to use for the new instance.

    Default: The same as source

    Constraint: Must be compatible with the engine of the source

    Valid Values: MySQL | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee | sqlserver-se | sqlserver-ex | sqlserver-web | postgres

    ", + "RestoreDBInstanceFromDBSnapshotMessage$OptionGroupName": "

    The name of the option group to be used for the restored DB instance.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "RestoreDBInstanceFromDBSnapshotMessage$StorageType": "

    Specifies storage type to be associated with the DB Instance.

    Valid values: standard | gp2 | io1

    If you specify io1, you must also include a value for the Iops parameter.

    Default: io1 if the Iops parameter is specified; otherwise standard

    ", + "RestoreDBInstanceFromDBSnapshotMessage$TdeCredentialArn": "

    The ARN from the Key Store with which to associate the instance for TDE encryption.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$TdeCredentialPassword": "

    The password for the given ARN from the Key Store in order to access the device.

    ", + "RestoreDBInstanceToPointInTimeMessage$SourceDBInstanceIdentifier": "

    The identifier of the source DB instance from which to restore.

    Constraints:

    • Must be the identifier of an existing database instance
    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceToPointInTimeMessage$TargetDBInstanceIdentifier": "

    The name of the new database instance to be created.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceToPointInTimeMessage$DBInstanceClass": "

    The compute and memory capacity of the Amazon RDS DB instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium

    Default: The same DBInstanceClass as the original DB instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in.

    Default: A random, system-chosen Availability Zone.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    Example: us-east-1a

    ", + "RestoreDBInstanceToPointInTimeMessage$DBSubnetGroupName": "

    The DB subnet group name to use for the new instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$LicenseModel": "

    License model information for the restored DB instance.

    Default: Same as source.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "RestoreDBInstanceToPointInTimeMessage$DBName": "

    The database name for the restored DB instance.

    This parameter is not used for the MySQL engine.

    ", + "RestoreDBInstanceToPointInTimeMessage$Engine": "

    The database engine to use for the new instance.

    Default: The same as source

    Constraint: Must be compatible with the engine of the source

    Valid Values: MySQL | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee | sqlserver-se | sqlserver-ex | sqlserver-web | postgres

    ", + "RestoreDBInstanceToPointInTimeMessage$OptionGroupName": "

    The name of the option group to be used for the restored DB instance.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "RestoreDBInstanceToPointInTimeMessage$StorageType": "

    Specifies storage type to be associated with the DB Instance.

    Valid values: standard | gp2 | io1

    If you specify io1, you must also include a value for the Iops parameter.

    Default: io1 if the Iops parameter is specified; otherwise standard

    ", + "RestoreDBInstanceToPointInTimeMessage$TdeCredentialArn": "

    The ARN from the Key Store with which to associate the instance for TDE encryption.

    ", + "RestoreDBInstanceToPointInTimeMessage$TdeCredentialPassword": "

    The password for the given ARN from the Key Store in order to access the device.

    ", + "RevokeDBSecurityGroupIngressMessage$DBSecurityGroupName": "

    The name of the DB security group to revoke ingress from.

    ", + "RevokeDBSecurityGroupIngressMessage$CIDRIP": "

    The IP range to revoke access from. Must be a valid CIDR range. If CIDRIP is specified, EC2SecurityGroupName, EC2SecurityGroupId and EC2SecurityGroupOwnerId cannot be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupName": "

    The name of the EC2 security group to revoke access from. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupId": "

    The id of the EC2 security group to revoke access from. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "

    The AWS Account Number of the owner of the EC2 security group specified in the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "SourceIdsList$member": null, + "Subnet$SubnetIdentifier": "

    Specifies the identifier of the subnet.

    ", + "Subnet$SubnetStatus": "

    Specifies the status of the subnet.

    ", + "SubnetIdentifierList$member": null, + "Tag$Key": "

    A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and cannot be prefixed with \"aws:\" or \"rds:\". The string may only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

    ", + "Tag$Value": "

    A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and cannot be prefixed with \"aws:\" or \"rds:\". The string may only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

    ", + "VpcSecurityGroupIdList$member": null, + "VpcSecurityGroupMembership$VpcSecurityGroupId": "

    The name of the VPC security group.

    ", + "VpcSecurityGroupMembership$Status": "

    The status of the VPC security group.

    " + } + }, + "Subnet": { + "base": "

    This data type is used as a response element in the DescribeDBSubnetGroups action.

    ", + "refs": { + "SubnetList$member": null + } + }, + "SubnetAlreadyInUse": { + "base": "

    The DB subnet is already in use in the Availability Zone.

    ", + "refs": { + } + }, + "SubnetIdentifierList": { + "base": null, + "refs": { + "CreateDBSubnetGroupMessage$SubnetIds": "

    The EC2 Subnet IDs for the DB subnet group.

    ", + "ModifyDBSubnetGroupMessage$SubnetIds": "

    The EC2 subnet IDs for the DB subnet group.

    " + } + }, + "SubnetList": { + "base": null, + "refs": { + "DBSubnetGroup$Subnets": "

    Contains a list of Subnet elements.

    " + } + }, + "SubscriptionAlreadyExistFault": { + "base": "

    The supplied subscription name already exists.

    ", + "refs": { + } + }, + "SubscriptionCategoryNotFoundFault": { + "base": "

    The supplied category does not exist.

    ", + "refs": { + } + }, + "SubscriptionNotFoundFault": { + "base": "

    The subscription name does not exist.

    ", + "refs": { + } + }, + "SupportedCharacterSetsList": { + "base": null, + "refs": { + "DBEngineVersion$SupportedCharacterSets": "

    A list of the character sets supported by this engine for the CharacterSetName parameter of the CreateDBInstance API.

    " + } + }, + "TStamp": { + "base": null, + "refs": { + "DBInstance$InstanceCreateTime": "

    Provides the date and time the DB instance was created.

    ", + "DBInstance$LatestRestorableTime": "

    Specifies the latest time to which a database can be restored with point-in-time restore.

    ", + "DBSnapshot$SnapshotCreateTime": "

    Provides the time (UTC) when the snapshot was taken.

    ", + "DBSnapshot$InstanceCreateTime": "

    Specifies the time (UTC) when the snapshot was taken.

    ", + "DescribeEventsMessage$StartTime": "

    The beginning of the time interval to retrieve events for, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

    Example: 2009-07-08T18:00Z

    ", + "DescribeEventsMessage$EndTime": "

    The end of the time interval for which to retrieve events, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

    Example: 2009-07-08T18:00Z

    ", + "Event$Date": "

    Specifies the date and time of the event.

    ", + "ReservedDBInstance$StartTime": "

    The time the reservation started.

    ", + "RestoreDBInstanceToPointInTimeMessage$RestoreTime": "

    The date and time to restore from.

    Valid Values: Value must be a UTC time

    Constraints:

    • Must be before the latest restorable time for the DB instance
    • Cannot be specified if UseLatestRestorableTime parameter is true

    Example: 2009-09-07T23:45:00Z

    " + } + }, + "Tag": { + "base": "

    Metadata assigned to an Amazon RDS resource consisting of a key-value pair.

    ", + "refs": { + "TagList$member": null + } + }, + "TagList": { + "base": "

    A list of tags.

    ", + "refs": { + "AddTagsToResourceMessage$Tags": "

    The tags to be assigned to the Amazon RDS resource.

    ", + "CopyDBParameterGroupMessage$Tags": null, + "CopyDBSnapshotMessage$Tags": null, + "CopyOptionGroupMessage$Tags": null, + "CreateDBInstanceMessage$Tags": null, + "CreateDBInstanceReadReplicaMessage$Tags": null, + "CreateDBParameterGroupMessage$Tags": null, + "CreateDBSecurityGroupMessage$Tags": null, + "CreateDBSnapshotMessage$Tags": null, + "CreateDBSubnetGroupMessage$Tags": null, + "CreateEventSubscriptionMessage$Tags": null, + "CreateOptionGroupMessage$Tags": null, + "PurchaseReservedDBInstancesOfferingMessage$Tags": null, + "RestoreDBInstanceFromDBSnapshotMessage$Tags": null, + "RestoreDBInstanceToPointInTimeMessage$Tags": null, + "TagListMessage$TagList": "

    List of tags returned by the ListTagsForResource operation.

    " + } + }, + "TagListMessage": { + "base": "

    ", + "refs": { + } + }, + "VpcSecurityGroupIdList": { + "base": null, + "refs": { + "CreateDBInstanceMessage$VpcSecurityGroupIds": "

    A list of EC2 VPC security groups to associate with this DB instance.

    Default: The default EC2 VPC security group for the DB subnet group's VPC.

    ", + "ModifyDBInstanceMessage$VpcSecurityGroupIds": "

    A list of EC2 VPC security groups to authorize on this DB instance. This change is asynchronously applied as soon as possible.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "OptionConfiguration$VpcSecurityGroupMemberships": "

    A list of VpcSecurityGroupMemebrship name strings used for this option.

    " + } + }, + "VpcSecurityGroupMembership": { + "base": "

    This data type is used as a response element for queries on VPC security group membership.

    ", + "refs": { + "VpcSecurityGroupMembershipList$member": null + } + }, + "VpcSecurityGroupMembershipList": { + "base": null, + "refs": { + "DBInstance$VpcSecurityGroups": "

    Provides List of VPC security group elements that the DB instance belongs to.

    ", + "Option$VpcSecurityGroupMemberships": "

    If the option requires access to a port, then this VPC security group allows access to the port.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-09-01/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-09-01/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-09-01/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-09-01/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,4537 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-10-31", + "endpointPrefix":"rds", + "protocol":"query", + "serviceAbbreviation":"Amazon RDS", + "serviceFullName":"Amazon Relational Database Service", + "signatureVersion":"v4", + "xmlNamespace":"http://rds.amazonaws.com/doc/2014-10-31/" + }, + "operations":{ + "AddSourceIdentifierToSubscription":{ + "name":"AddSourceIdentifierToSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddSourceIdentifierToSubscriptionMessage"}, + "output":{ + "shape":"AddSourceIdentifierToSubscriptionResult", + "resultWrapper":"AddSourceIdentifierToSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "AddTagsToResource":{ + "name":"AddTagsToResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsToResourceMessage"}, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "ApplyPendingMaintenanceAction":{ + "name":"ApplyPendingMaintenanceAction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ApplyPendingMaintenanceActionMessage"}, + "output":{ + "shape":"ApplyPendingMaintenanceActionResult", + "resultWrapper":"ApplyPendingMaintenanceActionResult" + }, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ] + }, + "AuthorizeDBSecurityGroupIngress":{ + "name":"AuthorizeDBSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AuthorizeDBSecurityGroupIngressMessage"}, + "output":{ + "shape":"AuthorizeDBSecurityGroupIngressResult", + "resultWrapper":"AuthorizeDBSecurityGroupIngressResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"AuthorizationAlreadyExistsFault"}, + {"shape":"AuthorizationQuotaExceededFault"} + ] + }, + "CopyDBClusterSnapshot":{ + "name":"CopyDBClusterSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyDBClusterSnapshotMessage"}, + "output":{ + "shape":"CopyDBClusterSnapshotResult", + "resultWrapper":"CopyDBClusterSnapshotResult" + }, + "errors":[ + {"shape":"DBClusterSnapshotAlreadyExistsFault"}, + {"shape":"DBClusterSnapshotNotFoundFault"}, + {"shape":"InvalidDBClusterStateFault"} + ] + }, + "CopyDBParameterGroup":{ + "name":"CopyDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyDBParameterGroupMessage"}, + "output":{ + "shape":"CopyDBParameterGroupResult", + "resultWrapper":"CopyDBParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBParameterGroupAlreadyExistsFault"}, + {"shape":"DBParameterGroupQuotaExceededFault"} + ] + }, + "CopyDBSnapshot":{ + "name":"CopyDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyDBSnapshotMessage"}, + "output":{ + "shape":"CopyDBSnapshotResult", + "resultWrapper":"CopyDBSnapshotResult" + }, + "errors":[ + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "CopyOptionGroup":{ + "name":"CopyOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyOptionGroupMessage"}, + "output":{ + "shape":"CopyOptionGroupResult", + "resultWrapper":"CopyOptionGroupResult" + }, + "errors":[ + {"shape":"OptionGroupAlreadyExistsFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"OptionGroupQuotaExceededFault"} + ] + }, + "CreateDBCluster":{ + "name":"CreateDBCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBClusterMessage"}, + "output":{ + "shape":"CreateDBClusterResult", + "resultWrapper":"CreateDBClusterResult" + }, + "errors":[ + {"shape":"DBClusterAlreadyExistsFault"}, + {"shape":"InsufficientStorageClusterCapacityFault"}, + {"shape":"DBClusterQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"InvalidDBSubnetGroupStateFault"}, + {"shape":"InvalidSubnet"}, + {"shape":"DBClusterParameterGroupNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"} + ] + }, + "CreateDBClusterParameterGroup":{ + "name":"CreateDBClusterParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBClusterParameterGroupMessage"}, + "output":{ + "shape":"CreateDBClusterParameterGroupResult", + "resultWrapper":"CreateDBClusterParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupQuotaExceededFault"}, + {"shape":"DBParameterGroupAlreadyExistsFault"} + ] + }, + "CreateDBClusterSnapshot":{ + "name":"CreateDBClusterSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBClusterSnapshotMessage"}, + "output":{ + "shape":"CreateDBClusterSnapshotResult", + "resultWrapper":"CreateDBClusterSnapshotResult" + }, + "errors":[ + {"shape":"DBClusterSnapshotAlreadyExistsFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"DBClusterNotFoundFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "CreateDBInstance":{ + "name":"CreateDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBInstanceMessage"}, + "output":{ + "shape":"CreateDBInstanceResult", + "resultWrapper":"CreateDBInstanceResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"InvalidSubnet"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"DBClusterNotFoundFault"}, + {"shape":"StorageTypeNotSupportedFault"}, + {"shape":"AuthorizationNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"} + ] + }, + "CreateDBInstanceReadReplica":{ + "name":"CreateDBInstanceReadReplica", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBInstanceReadReplicaMessage"}, + "output":{ + "shape":"CreateDBInstanceReadReplicaResult", + "resultWrapper":"CreateDBInstanceReadReplicaResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"DBSubnetGroupNotAllowedFault"}, + {"shape":"InvalidDBSubnetGroupFault"}, + {"shape":"StorageTypeNotSupportedFault"}, + {"shape":"KMSKeyNotAccessibleFault"} + ] + }, + "CreateDBParameterGroup":{ + "name":"CreateDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBParameterGroupMessage"}, + "output":{ + "shape":"CreateDBParameterGroupResult", + "resultWrapper":"CreateDBParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupQuotaExceededFault"}, + {"shape":"DBParameterGroupAlreadyExistsFault"} + ] + }, + "CreateDBSecurityGroup":{ + "name":"CreateDBSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSecurityGroupMessage"}, + "output":{ + "shape":"CreateDBSecurityGroupResult", + "resultWrapper":"CreateDBSecurityGroupResult" + }, + "errors":[ + {"shape":"DBSecurityGroupAlreadyExistsFault"}, + {"shape":"DBSecurityGroupQuotaExceededFault"}, + {"shape":"DBSecurityGroupNotSupportedFault"} + ] + }, + "CreateDBSnapshot":{ + "name":"CreateDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSnapshotMessage"}, + "output":{ + "shape":"CreateDBSnapshotResult", + "resultWrapper":"CreateDBSnapshotResult" + }, + "errors":[ + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "CreateDBSubnetGroup":{ + "name":"CreateDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSubnetGroupMessage"}, + "output":{ + "shape":"CreateDBSubnetGroupResult", + "resultWrapper":"CreateDBSubnetGroupResult" + }, + "errors":[ + {"shape":"DBSubnetGroupAlreadyExistsFault"}, + {"shape":"DBSubnetGroupQuotaExceededFault"}, + {"shape":"DBSubnetQuotaExceededFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"} + ] + }, + "CreateEventSubscription":{ + "name":"CreateEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateEventSubscriptionMessage"}, + "output":{ + "shape":"CreateEventSubscriptionResult", + "resultWrapper":"CreateEventSubscriptionResult" + }, + "errors":[ + {"shape":"EventSubscriptionQuotaExceededFault"}, + {"shape":"SubscriptionAlreadyExistFault"}, + {"shape":"SNSInvalidTopicFault"}, + {"shape":"SNSNoAuthorizationFault"}, + {"shape":"SNSTopicArnNotFoundFault"}, + {"shape":"SubscriptionCategoryNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "CreateOptionGroup":{ + "name":"CreateOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateOptionGroupMessage"}, + "output":{ + "shape":"CreateOptionGroupResult", + "resultWrapper":"CreateOptionGroupResult" + }, + "errors":[ + {"shape":"OptionGroupAlreadyExistsFault"}, + {"shape":"OptionGroupQuotaExceededFault"} + ] + }, + "DeleteDBCluster":{ + "name":"DeleteDBCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBClusterMessage"}, + "output":{ + "shape":"DeleteDBClusterResult", + "resultWrapper":"DeleteDBClusterResult" + }, + "errors":[ + {"shape":"DBClusterNotFoundFault"}, + {"shape":"InvalidDBClusterStateFault"} + ] + }, + "DeleteDBClusterParameterGroup":{ + "name":"DeleteDBClusterParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBClusterParameterGroupMessage"}, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DeleteDBClusterSnapshot":{ + "name":"DeleteDBClusterSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBClusterSnapshotMessage"}, + "output":{ + "shape":"DeleteDBClusterSnapshotResult", + "resultWrapper":"DeleteDBClusterSnapshotResult" + }, + "errors":[ + {"shape":"InvalidDBClusterSnapshotStateFault"}, + {"shape":"DBClusterSnapshotNotFoundFault"} + ] + }, + "DeleteDBInstance":{ + "name":"DeleteDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBInstanceMessage"}, + "output":{ + "shape":"DeleteDBInstanceResult", + "resultWrapper":"DeleteDBInstanceResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"SnapshotQuotaExceededFault"} + ] + }, + "DeleteDBParameterGroup":{ + "name":"DeleteDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBParameterGroupMessage"}, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DeleteDBSecurityGroup":{ + "name":"DeleteDBSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSecurityGroupMessage"}, + "errors":[ + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"DBSecurityGroupNotFoundFault"} + ] + }, + "DeleteDBSnapshot":{ + "name":"DeleteDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSnapshotMessage"}, + "output":{ + "shape":"DeleteDBSnapshotResult", + "resultWrapper":"DeleteDBSnapshotResult" + }, + "errors":[ + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "DeleteDBSubnetGroup":{ + "name":"DeleteDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSubnetGroupMessage"}, + "errors":[ + {"shape":"InvalidDBSubnetGroupStateFault"}, + {"shape":"InvalidDBSubnetStateFault"}, + {"shape":"DBSubnetGroupNotFoundFault"} + ] + }, + "DeleteEventSubscription":{ + "name":"DeleteEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEventSubscriptionMessage"}, + "output":{ + "shape":"DeleteEventSubscriptionResult", + "resultWrapper":"DeleteEventSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"InvalidEventSubscriptionStateFault"} + ] + }, + "DeleteOptionGroup":{ + "name":"DeleteOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteOptionGroupMessage"}, + "errors":[ + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"InvalidOptionGroupStateFault"} + ] + }, + "DescribeAccountAttributes":{ + "name":"DescribeAccountAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAccountAttributesMessage"}, + "output":{ + "shape":"AccountAttributesMessage", + "resultWrapper":"DescribeAccountAttributesResult" + } + }, + "DescribeCertificates":{ + "name":"DescribeCertificates", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCertificatesMessage"}, + "output":{ + "shape":"CertificateMessage", + "resultWrapper":"DescribeCertificatesResult" + }, + "errors":[ + {"shape":"CertificateNotFoundFault"} + ] + }, + "DescribeDBClusterParameterGroups":{ + "name":"DescribeDBClusterParameterGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBClusterParameterGroupsMessage"}, + "output":{ + "shape":"DBClusterParameterGroupsMessage", + "resultWrapper":"DescribeDBClusterParameterGroupsResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DescribeDBClusterParameters":{ + "name":"DescribeDBClusterParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBClusterParametersMessage"}, + "output":{ + "shape":"DBClusterParameterGroupDetails", + "resultWrapper":"DescribeDBClusterParametersResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DescribeDBClusterSnapshots":{ + "name":"DescribeDBClusterSnapshots", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBClusterSnapshotsMessage"}, + "output":{ + "shape":"DBClusterSnapshotMessage", + "resultWrapper":"DescribeDBClusterSnapshotsResult" + }, + "errors":[ + {"shape":"DBClusterSnapshotNotFoundFault"} + ] + }, + "DescribeDBClusters":{ + "name":"DescribeDBClusters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBClustersMessage"}, + "output":{ + "shape":"DBClusterMessage", + "resultWrapper":"DescribeDBClustersResult" + }, + "errors":[ + {"shape":"DBClusterNotFoundFault"} + ] + }, + "DescribeDBEngineVersions":{ + "name":"DescribeDBEngineVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBEngineVersionsMessage"}, + "output":{ + "shape":"DBEngineVersionMessage", + "resultWrapper":"DescribeDBEngineVersionsResult" + } + }, + "DescribeDBInstances":{ + "name":"DescribeDBInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBInstancesMessage"}, + "output":{ + "shape":"DBInstanceMessage", + "resultWrapper":"DescribeDBInstancesResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "DescribeDBLogFiles":{ + "name":"DescribeDBLogFiles", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBLogFilesMessage"}, + "output":{ + "shape":"DescribeDBLogFilesResponse", + "resultWrapper":"DescribeDBLogFilesResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "DescribeDBParameterGroups":{ + "name":"DescribeDBParameterGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBParameterGroupsMessage"}, + "output":{ + "shape":"DBParameterGroupsMessage", + "resultWrapper":"DescribeDBParameterGroupsResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DescribeDBParameters":{ + "name":"DescribeDBParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBParametersMessage"}, + "output":{ + "shape":"DBParameterGroupDetails", + "resultWrapper":"DescribeDBParametersResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "DescribeDBSecurityGroups":{ + "name":"DescribeDBSecurityGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSecurityGroupsMessage"}, + "output":{ + "shape":"DBSecurityGroupMessage", + "resultWrapper":"DescribeDBSecurityGroupsResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"} + ] + }, + "DescribeDBSnapshotAttributes":{ + "name":"DescribeDBSnapshotAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSnapshotAttributesMessage"}, + "output":{ + "shape":"DescribeDBSnapshotAttributesResult", + "resultWrapper":"DescribeDBSnapshotAttributesResult" + }, + "errors":[ + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "DescribeDBSnapshots":{ + "name":"DescribeDBSnapshots", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSnapshotsMessage"}, + "output":{ + "shape":"DBSnapshotMessage", + "resultWrapper":"DescribeDBSnapshotsResult" + }, + "errors":[ + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "DescribeDBSubnetGroups":{ + "name":"DescribeDBSubnetGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSubnetGroupsMessage"}, + "output":{ + "shape":"DBSubnetGroupMessage", + "resultWrapper":"DescribeDBSubnetGroupsResult" + }, + "errors":[ + {"shape":"DBSubnetGroupNotFoundFault"} + ] + }, + "DescribeEngineDefaultClusterParameters":{ + "name":"DescribeEngineDefaultClusterParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEngineDefaultClusterParametersMessage"}, + "output":{ + "shape":"DescribeEngineDefaultClusterParametersResult", + "resultWrapper":"DescribeEngineDefaultClusterParametersResult" + } + }, + "DescribeEngineDefaultParameters":{ + "name":"DescribeEngineDefaultParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEngineDefaultParametersMessage"}, + "output":{ + "shape":"DescribeEngineDefaultParametersResult", + "resultWrapper":"DescribeEngineDefaultParametersResult" + } + }, + "DescribeEventCategories":{ + "name":"DescribeEventCategories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventCategoriesMessage"}, + "output":{ + "shape":"EventCategoriesMessage", + "resultWrapper":"DescribeEventCategoriesResult" + } + }, + "DescribeEventSubscriptions":{ + "name":"DescribeEventSubscriptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventSubscriptionsMessage"}, + "output":{ + "shape":"EventSubscriptionsMessage", + "resultWrapper":"DescribeEventSubscriptionsResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"} + ] + }, + "DescribeEvents":{ + "name":"DescribeEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventsMessage"}, + "output":{ + "shape":"EventsMessage", + "resultWrapper":"DescribeEventsResult" + } + }, + "DescribeOptionGroupOptions":{ + "name":"DescribeOptionGroupOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOptionGroupOptionsMessage"}, + "output":{ + "shape":"OptionGroupOptionsMessage", + "resultWrapper":"DescribeOptionGroupOptionsResult" + } + }, + "DescribeOptionGroups":{ + "name":"DescribeOptionGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOptionGroupsMessage"}, + "output":{ + "shape":"OptionGroups", + "resultWrapper":"DescribeOptionGroupsResult" + }, + "errors":[ + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "DescribeOrderableDBInstanceOptions":{ + "name":"DescribeOrderableDBInstanceOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOrderableDBInstanceOptionsMessage"}, + "output":{ + "shape":"OrderableDBInstanceOptionsMessage", + "resultWrapper":"DescribeOrderableDBInstanceOptionsResult" + } + }, + "DescribePendingMaintenanceActions":{ + "name":"DescribePendingMaintenanceActions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePendingMaintenanceActionsMessage"}, + "output":{ + "shape":"PendingMaintenanceActionsMessage", + "resultWrapper":"DescribePendingMaintenanceActionsResult" + }, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ] + }, + "DescribeReservedDBInstances":{ + "name":"DescribeReservedDBInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedDBInstancesMessage"}, + "output":{ + "shape":"ReservedDBInstanceMessage", + "resultWrapper":"DescribeReservedDBInstancesResult" + }, + "errors":[ + {"shape":"ReservedDBInstanceNotFoundFault"} + ] + }, + "DescribeReservedDBInstancesOfferings":{ + "name":"DescribeReservedDBInstancesOfferings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedDBInstancesOfferingsMessage"}, + "output":{ + "shape":"ReservedDBInstancesOfferingMessage", + "resultWrapper":"DescribeReservedDBInstancesOfferingsResult" + }, + "errors":[ + {"shape":"ReservedDBInstancesOfferingNotFoundFault"} + ] + }, + "DownloadDBLogFilePortion":{ + "name":"DownloadDBLogFilePortion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DownloadDBLogFilePortionMessage"}, + "output":{ + "shape":"DownloadDBLogFilePortionDetails", + "resultWrapper":"DownloadDBLogFilePortionResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBLogFileNotFoundFault"} + ] + }, + "FailoverDBCluster":{ + "name":"FailoverDBCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"FailoverDBClusterMessage"}, + "output":{ + "shape":"FailoverDBClusterResult", + "resultWrapper":"FailoverDBClusterResult" + }, + "errors":[ + {"shape":"DBClusterNotFoundFault"}, + {"shape":"InvalidDBClusterStateFault"} + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceMessage"}, + "output":{ + "shape":"TagListMessage", + "resultWrapper":"ListTagsForResourceResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "ModifyDBCluster":{ + "name":"ModifyDBCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBClusterMessage"}, + "output":{ + "shape":"ModifyDBClusterResult", + "resultWrapper":"ModifyDBClusterResult" + }, + "errors":[ + {"shape":"DBClusterNotFoundFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidDBSubnetGroupStateFault"}, + {"shape":"InvalidSubnet"}, + {"shape":"DBClusterParameterGroupNotFoundFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBClusterAlreadyExistsFault"} + ] + }, + "ModifyDBClusterParameterGroup":{ + "name":"ModifyDBClusterParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBClusterParameterGroupMessage"}, + "output":{ + "shape":"DBClusterParameterGroupNameMessage", + "resultWrapper":"ModifyDBClusterParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"InvalidDBParameterGroupStateFault"} + ] + }, + "ModifyDBInstance":{ + "name":"ModifyDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBInstanceMessage"}, + "output":{ + "shape":"ModifyDBInstanceResult", + "resultWrapper":"ModifyDBInstanceResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"DBUpgradeDependencyFailureFault"}, + {"shape":"StorageTypeNotSupportedFault"}, + {"shape":"AuthorizationNotFoundFault"}, + {"shape":"CertificateNotFoundFault"} + ] + }, + "ModifyDBParameterGroup":{ + "name":"ModifyDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBParameterGroupMessage"}, + "output":{ + "shape":"DBParameterGroupNameMessage", + "resultWrapper":"ModifyDBParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"InvalidDBParameterGroupStateFault"} + ] + }, + "ModifyDBSnapshotAttribute":{ + "name":"ModifyDBSnapshotAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBSnapshotAttributeMessage"}, + "output":{ + "shape":"ModifyDBSnapshotAttributeResult", + "resultWrapper":"ModifyDBSnapshotAttributeResult" + }, + "errors":[ + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"SharedSnapshotQuotaExceededFault"} + ] + }, + "ModifyDBSubnetGroup":{ + "name":"ModifyDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBSubnetGroupMessage"}, + "output":{ + "shape":"ModifyDBSubnetGroupResult", + "resultWrapper":"ModifyDBSubnetGroupResult" + }, + "errors":[ + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetQuotaExceededFault"}, + {"shape":"SubnetAlreadyInUse"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"} + ] + }, + "ModifyEventSubscription":{ + "name":"ModifyEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyEventSubscriptionMessage"}, + "output":{ + "shape":"ModifyEventSubscriptionResult", + "resultWrapper":"ModifyEventSubscriptionResult" + }, + "errors":[ + {"shape":"EventSubscriptionQuotaExceededFault"}, + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SNSInvalidTopicFault"}, + {"shape":"SNSNoAuthorizationFault"}, + {"shape":"SNSTopicArnNotFoundFault"}, + {"shape":"SubscriptionCategoryNotFoundFault"} + ] + }, + "ModifyOptionGroup":{ + "name":"ModifyOptionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyOptionGroupMessage"}, + "output":{ + "shape":"ModifyOptionGroupResult", + "resultWrapper":"ModifyOptionGroupResult" + }, + "errors":[ + {"shape":"InvalidOptionGroupStateFault"}, + {"shape":"OptionGroupNotFoundFault"} + ] + }, + "PromoteReadReplica":{ + "name":"PromoteReadReplica", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PromoteReadReplicaMessage"}, + "output":{ + "shape":"PromoteReadReplicaResult", + "resultWrapper":"PromoteReadReplicaResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "PurchaseReservedDBInstancesOffering":{ + "name":"PurchaseReservedDBInstancesOffering", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PurchaseReservedDBInstancesOfferingMessage"}, + "output":{ + "shape":"PurchaseReservedDBInstancesOfferingResult", + "resultWrapper":"PurchaseReservedDBInstancesOfferingResult" + }, + "errors":[ + {"shape":"ReservedDBInstancesOfferingNotFoundFault"}, + {"shape":"ReservedDBInstanceAlreadyExistsFault"}, + {"shape":"ReservedDBInstanceQuotaExceededFault"} + ] + }, + "RebootDBInstance":{ + "name":"RebootDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootDBInstanceMessage"}, + "output":{ + "shape":"RebootDBInstanceResult", + "resultWrapper":"RebootDBInstanceResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"} + ] + }, + "RemoveSourceIdentifierFromSubscription":{ + "name":"RemoveSourceIdentifierFromSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveSourceIdentifierFromSubscriptionMessage"}, + "output":{ + "shape":"RemoveSourceIdentifierFromSubscriptionResult", + "resultWrapper":"RemoveSourceIdentifierFromSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ] + }, + "RemoveTagsFromResource":{ + "name":"RemoveTagsFromResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsFromResourceMessage"}, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"} + ] + }, + "ResetDBClusterParameterGroup":{ + "name":"ResetDBClusterParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetDBClusterParameterGroupMessage"}, + "output":{ + "shape":"DBClusterParameterGroupNameMessage", + "resultWrapper":"ResetDBClusterParameterGroupResult" + }, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "ResetDBParameterGroup":{ + "name":"ResetDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetDBParameterGroupMessage"}, + "output":{ + "shape":"DBParameterGroupNameMessage", + "resultWrapper":"ResetDBParameterGroupResult" + }, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ] + }, + "RestoreDBClusterFromSnapshot":{ + "name":"RestoreDBClusterFromSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreDBClusterFromSnapshotMessage"}, + "output":{ + "shape":"RestoreDBClusterFromSnapshotResult", + "resultWrapper":"RestoreDBClusterFromSnapshotResult" + }, + "errors":[ + {"shape":"DBClusterAlreadyExistsFault"}, + {"shape":"DBClusterQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"DBClusterSnapshotNotFoundFault"}, + {"shape":"InsufficientDBClusterCapacityFault"}, + {"shape":"InsufficientStorageClusterCapacityFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"InvalidDBClusterSnapshotStateFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"InvalidSubnet"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"} + ] + }, + "RestoreDBClusterToPointInTime":{ + "name":"RestoreDBClusterToPointInTime", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreDBClusterToPointInTimeMessage"}, + "output":{ + "shape":"RestoreDBClusterToPointInTimeResult", + "resultWrapper":"RestoreDBClusterToPointInTimeResult" + }, + "errors":[ + {"shape":"DBClusterAlreadyExistsFault"}, + {"shape":"DBClusterQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBClusterNotFoundFault"}, + {"shape":"DBClusterSnapshotNotFoundFault"}, + {"shape":"InsufficientDBClusterCapacityFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"InvalidDBClusterSnapshotStateFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"InvalidSubnet"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"} + ] + }, + "RestoreDBInstanceFromDBSnapshot":{ + "name":"RestoreDBInstanceFromDBSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreDBInstanceFromDBSnapshotMessage"}, + "output":{ + "shape":"RestoreDBInstanceFromDBSnapshotResult", + "resultWrapper":"RestoreDBInstanceFromDBSnapshotResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"StorageTypeNotSupportedFault"}, + {"shape":"AuthorizationNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"DBSecurityGroupNotFoundFault"} + ] + }, + "RestoreDBInstanceToPointInTime":{ + "name":"RestoreDBInstanceToPointInTime", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreDBInstanceToPointInTimeMessage"}, + "output":{ + "shape":"RestoreDBInstanceToPointInTimeResult", + "resultWrapper":"RestoreDBInstanceToPointInTimeResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"PointInTimeRestoreNotEnabledFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"StorageTypeNotSupportedFault"}, + {"shape":"AuthorizationNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"DBSecurityGroupNotFoundFault"} + ] + }, + "RevokeDBSecurityGroupIngress":{ + "name":"RevokeDBSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeDBSecurityGroupIngressMessage"}, + "output":{ + "shape":"RevokeDBSecurityGroupIngressResult", + "resultWrapper":"RevokeDBSecurityGroupIngressResult" + }, + "errors":[ + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"AuthorizationNotFoundFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"} + ] + } + }, + "shapes":{ + "AccountAttributesMessage":{ + "type":"structure", + "members":{ + "AccountQuotas":{"shape":"AccountQuotaList"} + } + }, + "AccountQuota":{ + "type":"structure", + "members":{ + "AccountQuotaName":{"shape":"String"}, + "Used":{"shape":"Long"}, + "Max":{"shape":"Long"} + }, + "wrapper":true + }, + "AccountQuotaList":{ + "type":"list", + "member":{ + "shape":"AccountQuota", + "locationName":"AccountQuota" + } + }, + "AddSourceIdentifierToSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SourceIdentifier" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SourceIdentifier":{"shape":"String"} + } + }, + "AddSourceIdentifierToSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "AddTagsToResourceMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "Tags" + ], + "members":{ + "ResourceName":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "ApplyMethod":{ + "type":"string", + "enum":[ + "immediate", + "pending-reboot" + ] + }, + "ApplyPendingMaintenanceActionMessage":{ + "type":"structure", + "required":[ + "ResourceIdentifier", + "ApplyAction", + "OptInType" + ], + "members":{ + "ResourceIdentifier":{"shape":"String"}, + "ApplyAction":{"shape":"String"}, + "OptInType":{"shape":"String"} + } + }, + "ApplyPendingMaintenanceActionResult":{ + "type":"structure", + "members":{ + "ResourcePendingMaintenanceActions":{"shape":"ResourcePendingMaintenanceActions"} + } + }, + "AttributeValueList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"AttributeValue" + } + }, + "AuthorizationAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AuthorizationNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "AuthorizationQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AuthorizeDBSecurityGroupIngressMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "CIDRIP":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "AuthorizeDBSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "AvailabilityZone":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"} + }, + "wrapper":true + }, + "AvailabilityZoneList":{ + "type":"list", + "member":{ + "shape":"AvailabilityZone", + "locationName":"AvailabilityZone" + } + }, + "AvailabilityZones":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"AvailabilityZone" + } + }, + "Boolean":{"type":"boolean"}, + "BooleanOptional":{"type":"boolean"}, + "Certificate":{ + "type":"structure", + "members":{ + "CertificateIdentifier":{"shape":"String"}, + "CertificateType":{"shape":"String"}, + "Thumbprint":{"shape":"String"}, + "ValidFrom":{"shape":"TStamp"}, + "ValidTill":{"shape":"TStamp"} + }, + "wrapper":true + }, + "CertificateList":{ + "type":"list", + "member":{ + "shape":"Certificate", + "locationName":"Certificate" + } + }, + "CertificateMessage":{ + "type":"structure", + "members":{ + "Certificates":{"shape":"CertificateList"}, + "Marker":{"shape":"String"} + } + }, + "CertificateNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CertificateNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "CharacterSet":{ + "type":"structure", + "members":{ + "CharacterSetName":{"shape":"String"}, + "CharacterSetDescription":{"shape":"String"} + } + }, + "CopyDBClusterSnapshotMessage":{ + "type":"structure", + "required":[ + "SourceDBClusterSnapshotIdentifier", + "TargetDBClusterSnapshotIdentifier" + ], + "members":{ + "SourceDBClusterSnapshotIdentifier":{"shape":"String"}, + "TargetDBClusterSnapshotIdentifier":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CopyDBClusterSnapshotResult":{ + "type":"structure", + "members":{ + "DBClusterSnapshot":{"shape":"DBClusterSnapshot"} + } + }, + "CopyDBParameterGroupMessage":{ + "type":"structure", + "required":[ + "SourceDBParameterGroupIdentifier", + "TargetDBParameterGroupIdentifier", + "TargetDBParameterGroupDescription" + ], + "members":{ + "SourceDBParameterGroupIdentifier":{"shape":"String"}, + "TargetDBParameterGroupIdentifier":{"shape":"String"}, + "TargetDBParameterGroupDescription":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CopyDBParameterGroupResult":{ + "type":"structure", + "members":{ + "DBParameterGroup":{"shape":"DBParameterGroup"} + } + }, + "CopyDBSnapshotMessage":{ + "type":"structure", + "required":[ + "SourceDBSnapshotIdentifier", + "TargetDBSnapshotIdentifier" + ], + "members":{ + "SourceDBSnapshotIdentifier":{"shape":"String"}, + "TargetDBSnapshotIdentifier":{"shape":"String"}, + "Tags":{"shape":"TagList"}, + "CopyTags":{"shape":"BooleanOptional"} + } + }, + "CopyDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "CopyOptionGroupMessage":{ + "type":"structure", + "required":[ + "SourceOptionGroupIdentifier", + "TargetOptionGroupIdentifier", + "TargetOptionGroupDescription" + ], + "members":{ + "SourceOptionGroupIdentifier":{"shape":"String"}, + "TargetOptionGroupIdentifier":{"shape":"String"}, + "TargetOptionGroupDescription":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CopyOptionGroupResult":{ + "type":"structure", + "members":{ + "OptionGroup":{"shape":"OptionGroup"} + } + }, + "CreateDBClusterMessage":{ + "type":"structure", + "required":[ + "DBClusterIdentifier", + "Engine", + "MasterUsername", + "MasterUserPassword" + ], + "members":{ + "AvailabilityZones":{"shape":"AvailabilityZones"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "CharacterSetName":{"shape":"String"}, + "DatabaseName":{"shape":"String"}, + "DBClusterIdentifier":{"shape":"String"}, + "DBClusterParameterGroupName":{"shape":"String"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "DBSubnetGroupName":{"shape":"String"}, + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "MasterUsername":{"shape":"String"}, + "MasterUserPassword":{"shape":"String"}, + "OptionGroupName":{"shape":"String"}, + "PreferredBackupWindow":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "Tags":{"shape":"TagList"}, + "StorageEncrypted":{"shape":"BooleanOptional"}, + "KmsKeyId":{"shape":"String"} + } + }, + "CreateDBClusterParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBClusterParameterGroupName", + "DBParameterGroupFamily", + "Description" + ], + "members":{ + "DBClusterParameterGroupName":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBClusterParameterGroupResult":{ + "type":"structure", + "members":{ + "DBClusterParameterGroup":{"shape":"DBClusterParameterGroup"} + } + }, + "CreateDBClusterResult":{ + "type":"structure", + "members":{ + "DBCluster":{"shape":"DBCluster"} + } + }, + "CreateDBClusterSnapshotMessage":{ + "type":"structure", + "required":[ + "DBClusterSnapshotIdentifier", + "DBClusterIdentifier" + ], + "members":{ + "DBClusterSnapshotIdentifier":{"shape":"String"}, + "DBClusterIdentifier":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBClusterSnapshotResult":{ + "type":"structure", + "members":{ + "DBClusterSnapshot":{"shape":"DBClusterSnapshot"} + } + }, + "CreateDBInstanceMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "DBInstanceClass", + "Engine" + ], + "members":{ + "DBName":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "DBInstanceClass":{"shape":"String"}, + "Engine":{"shape":"String"}, + "MasterUsername":{"shape":"String"}, + "MasterUserPassword":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "DBParameterGroupName":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "CharacterSetName":{"shape":"String"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "Tags":{"shape":"TagList"}, + "DBClusterIdentifier":{"shape":"String"}, + "StorageType":{"shape":"String"}, + "TdeCredentialArn":{"shape":"String"}, + "TdeCredentialPassword":{"shape":"String"}, + "StorageEncrypted":{"shape":"BooleanOptional"}, + "KmsKeyId":{"shape":"String"}, + "CopyTagsToSnapshot":{"shape":"BooleanOptional"}, + "MonitoringInterval":{"shape":"IntegerOptional"}, + "MonitoringRoleArn":{"shape":"String"} + } + }, + "CreateDBInstanceReadReplicaMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "SourceDBInstanceIdentifier" + ], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "SourceDBInstanceIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "Tags":{"shape":"TagList"}, + "DBSubnetGroupName":{"shape":"String"}, + "StorageType":{"shape":"String"}, + "CopyTagsToSnapshot":{"shape":"BooleanOptional"}, + "MonitoringInterval":{"shape":"IntegerOptional"}, + "MonitoringRoleArn":{"shape":"String"} + } + }, + "CreateDBInstanceReadReplicaResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "CreateDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "CreateDBParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBParameterGroupName", + "DBParameterGroupFamily", + "Description" + ], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBParameterGroupResult":{ + "type":"structure", + "members":{ + "DBParameterGroup":{"shape":"DBParameterGroup"} + } + }, + "CreateDBSecurityGroupMessage":{ + "type":"structure", + "required":[ + "DBSecurityGroupName", + "DBSecurityGroupDescription" + ], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "DBSecurityGroupDescription":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBSecurityGroupResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "CreateDBSnapshotMessage":{ + "type":"structure", + "required":[ + "DBSnapshotIdentifier", + "DBInstanceIdentifier" + ], + "members":{ + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "CreateDBSubnetGroupMessage":{ + "type":"structure", + "required":[ + "DBSubnetGroupName", + "DBSubnetGroupDescription", + "SubnetIds" + ], + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBSubnetGroupResult":{ + "type":"structure", + "members":{ + "DBSubnetGroup":{"shape":"DBSubnetGroup"} + } + }, + "CreateEventSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SnsTopicArn" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "SourceIds":{"shape":"SourceIdsList"}, + "Enabled":{"shape":"BooleanOptional"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "CreateOptionGroupMessage":{ + "type":"structure", + "required":[ + "OptionGroupName", + "EngineName", + "MajorEngineVersion", + "OptionGroupDescription" + ], + "members":{ + "OptionGroupName":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "OptionGroupDescription":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateOptionGroupResult":{ + "type":"structure", + "members":{ + "OptionGroup":{"shape":"OptionGroup"} + } + }, + "DBCluster":{ + "type":"structure", + "members":{ + "AllocatedStorage":{"shape":"IntegerOptional"}, + "AvailabilityZones":{"shape":"AvailabilityZones"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "CharacterSetName":{"shape":"String"}, + "DatabaseName":{"shape":"String"}, + "DBClusterIdentifier":{"shape":"String"}, + "DBClusterParameterGroup":{"shape":"String"}, + "DBSubnetGroup":{"shape":"String"}, + "Status":{"shape":"String"}, + "PercentProgress":{"shape":"String"}, + "EarliestRestorableTime":{"shape":"TStamp"}, + "Endpoint":{"shape":"String"}, + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "LatestRestorableTime":{"shape":"TStamp"}, + "Port":{"shape":"IntegerOptional"}, + "MasterUsername":{"shape":"String"}, + "DBClusterOptionGroupMemberships":{"shape":"DBClusterOptionGroupMemberships"}, + "PreferredBackupWindow":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "DBClusterMembers":{"shape":"DBClusterMemberList"}, + "VpcSecurityGroups":{"shape":"VpcSecurityGroupMembershipList"}, + "HostedZoneId":{"shape":"String"}, + "StorageEncrypted":{"shape":"Boolean"}, + "KmsKeyId":{"shape":"String"}, + "DbClusterResourceId":{"shape":"String"} + }, + "wrapper":true + }, + "DBClusterAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBClusterAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBClusterList":{ + "type":"list", + "member":{ + "shape":"DBCluster", + "locationName":"DBCluster" + } + }, + "DBClusterMember":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "IsClusterWriter":{"shape":"Boolean"}, + "DBClusterParameterGroupStatus":{"shape":"String"} + }, + "wrapper":true + }, + "DBClusterMemberList":{ + "type":"list", + "member":{ + "shape":"DBClusterMember", + "locationName":"DBClusterMember" + } + }, + "DBClusterMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBClusters":{"shape":"DBClusterList"} + } + }, + "DBClusterNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBClusterNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBClusterOptionGroupMemberships":{ + "type":"list", + "member":{ + "shape":"DBClusterOptionGroupStatus", + "locationName":"DBClusterOptionGroup" + } + }, + "DBClusterOptionGroupStatus":{ + "type":"structure", + "members":{ + "DBClusterOptionGroupName":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "DBClusterParameterGroup":{ + "type":"structure", + "members":{ + "DBClusterParameterGroupName":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"} + }, + "wrapper":true + }, + "DBClusterParameterGroupDetails":{ + "type":"structure", + "members":{ + "Parameters":{"shape":"ParametersList"}, + "Marker":{"shape":"String"} + } + }, + "DBClusterParameterGroupList":{ + "type":"list", + "member":{ + "shape":"DBClusterParameterGroup", + "locationName":"DBClusterParameterGroup" + } + }, + "DBClusterParameterGroupNameMessage":{ + "type":"structure", + "members":{ + "DBClusterParameterGroupName":{"shape":"String"} + } + }, + "DBClusterParameterGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBClusterParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBClusterParameterGroupsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBClusterParameterGroups":{"shape":"DBClusterParameterGroupList"} + } + }, + "DBClusterQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBClusterQuotaExceededFault", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "DBClusterSnapshot":{ + "type":"structure", + "members":{ + "AvailabilityZones":{"shape":"AvailabilityZones"}, + "DBClusterSnapshotIdentifier":{"shape":"String"}, + "DBClusterIdentifier":{"shape":"String"}, + "SnapshotCreateTime":{"shape":"TStamp"}, + "Engine":{"shape":"String"}, + "AllocatedStorage":{"shape":"Integer"}, + "Status":{"shape":"String"}, + "Port":{"shape":"Integer"}, + "VpcId":{"shape":"String"}, + "ClusterCreateTime":{"shape":"TStamp"}, + "MasterUsername":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "SnapshotType":{"shape":"String"}, + "PercentProgress":{"shape":"Integer"}, + "StorageEncrypted":{"shape":"Boolean"}, + "KmsKeyId":{"shape":"String"} + }, + "wrapper":true + }, + "DBClusterSnapshotAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBClusterSnapshotAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBClusterSnapshotList":{ + "type":"list", + "member":{ + "shape":"DBClusterSnapshot", + "locationName":"DBClusterSnapshot" + } + }, + "DBClusterSnapshotMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBClusterSnapshots":{"shape":"DBClusterSnapshotList"} + } + }, + "DBClusterSnapshotNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBClusterSnapshotNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBEngineVersion":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "DBEngineDescription":{"shape":"String"}, + "DBEngineVersionDescription":{"shape":"String"}, + "DefaultCharacterSet":{"shape":"CharacterSet"}, + "SupportedCharacterSets":{"shape":"SupportedCharacterSetsList"}, + "ValidUpgradeTarget":{"shape":"ValidUpgradeTargetList"} + } + }, + "DBEngineVersionList":{ + "type":"list", + "member":{ + "shape":"DBEngineVersion", + "locationName":"DBEngineVersion" + } + }, + "DBEngineVersionMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBEngineVersions":{"shape":"DBEngineVersionList"} + } + }, + "DBInstance":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Engine":{"shape":"String"}, + "DBInstanceStatus":{"shape":"String"}, + "MasterUsername":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Endpoint":{"shape":"Endpoint"}, + "AllocatedStorage":{"shape":"Integer"}, + "InstanceCreateTime":{"shape":"TStamp"}, + "PreferredBackupWindow":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"Integer"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupMembershipList"}, + "VpcSecurityGroups":{"shape":"VpcSecurityGroupMembershipList"}, + "DBParameterGroups":{"shape":"DBParameterGroupStatusList"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroup":{"shape":"DBSubnetGroup"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "PendingModifiedValues":{"shape":"PendingModifiedValues"}, + "LatestRestorableTime":{"shape":"TStamp"}, + "MultiAZ":{"shape":"Boolean"}, + "EngineVersion":{"shape":"String"}, + "AutoMinorVersionUpgrade":{"shape":"Boolean"}, + "ReadReplicaSourceDBInstanceIdentifier":{"shape":"String"}, + "ReadReplicaDBInstanceIdentifiers":{"shape":"ReadReplicaDBInstanceIdentifierList"}, + "LicenseModel":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupMemberships":{"shape":"OptionGroupMembershipList"}, + "CharacterSetName":{"shape":"String"}, + "SecondaryAvailabilityZone":{"shape":"String"}, + "PubliclyAccessible":{"shape":"Boolean"}, + "StatusInfos":{"shape":"DBInstanceStatusInfoList"}, + "StorageType":{"shape":"String"}, + "TdeCredentialArn":{"shape":"String"}, + "DbInstancePort":{"shape":"Integer"}, + "DBClusterIdentifier":{"shape":"String"}, + "StorageEncrypted":{"shape":"Boolean"}, + "KmsKeyId":{"shape":"String"}, + "DbiResourceId":{"shape":"String"}, + "CACertificateIdentifier":{"shape":"String"}, + "CopyTagsToSnapshot":{"shape":"Boolean"}, + "MonitoringInterval":{"shape":"IntegerOptional"}, + "EnhancedMonitoringResourceArn":{"shape":"String"}, + "MonitoringRoleArn":{"shape":"String"} + }, + "wrapper":true + }, + "DBInstanceAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBInstanceAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBInstanceList":{ + "type":"list", + "member":{ + "shape":"DBInstance", + "locationName":"DBInstance" + } + }, + "DBInstanceMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBInstances":{"shape":"DBInstanceList"} + } + }, + "DBInstanceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBInstanceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBInstanceStatusInfo":{ + "type":"structure", + "members":{ + "StatusType":{"shape":"String"}, + "Normal":{"shape":"Boolean"}, + "Status":{"shape":"String"}, + "Message":{"shape":"String"} + } + }, + "DBInstanceStatusInfoList":{ + "type":"list", + "member":{ + "shape":"DBInstanceStatusInfo", + "locationName":"DBInstanceStatusInfo" + } + }, + "DBLogFileNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBLogFileNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroup":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"} + }, + "wrapper":true + }, + "DBParameterGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupDetails":{ + "type":"structure", + "members":{ + "Parameters":{"shape":"ParametersList"}, + "Marker":{"shape":"String"} + } + }, + "DBParameterGroupList":{ + "type":"list", + "member":{ + "shape":"DBParameterGroup", + "locationName":"DBParameterGroup" + } + }, + "DBParameterGroupNameMessage":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"} + } + }, + "DBParameterGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBParameterGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupStatus":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "ParameterApplyStatus":{"shape":"String"} + } + }, + "DBParameterGroupStatusList":{ + "type":"list", + "member":{ + "shape":"DBParameterGroupStatus", + "locationName":"DBParameterGroup" + } + }, + "DBParameterGroupsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBParameterGroups":{"shape":"DBParameterGroupList"} + } + }, + "DBSecurityGroup":{ + "type":"structure", + "members":{ + "OwnerId":{"shape":"String"}, + "DBSecurityGroupName":{"shape":"String"}, + "DBSecurityGroupDescription":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "EC2SecurityGroups":{"shape":"EC2SecurityGroupList"}, + "IPRanges":{"shape":"IPRangeList"} + }, + "wrapper":true + }, + "DBSecurityGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupMembership":{ + "type":"structure", + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "DBSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"DBSecurityGroupMembership", + "locationName":"DBSecurityGroup" + } + }, + "DBSecurityGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroups"} + } + }, + "DBSecurityGroupNameList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"DBSecurityGroupName" + } + }, + "DBSecurityGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupNotSupportedFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSecurityGroupNotSupported", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"QuotaExceeded.DBSecurityGroup", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSecurityGroups":{ + "type":"list", + "member":{ + "shape":"DBSecurityGroup", + "locationName":"DBSecurityGroup" + } + }, + "DBSnapshot":{ + "type":"structure", + "members":{ + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceIdentifier":{"shape":"String"}, + "SnapshotCreateTime":{"shape":"TStamp"}, + "Engine":{"shape":"String"}, + "AllocatedStorage":{"shape":"Integer"}, + "Status":{"shape":"String"}, + "Port":{"shape":"Integer"}, + "AvailabilityZone":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "InstanceCreateTime":{"shape":"TStamp"}, + "MasterUsername":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "SnapshotType":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "PercentProgress":{"shape":"Integer"}, + "SourceRegion":{"shape":"String"}, + "SourceDBSnapshotIdentifier":{"shape":"String"}, + "StorageType":{"shape":"String"}, + "TdeCredentialArn":{"shape":"String"}, + "Encrypted":{"shape":"Boolean"}, + "KmsKeyId":{"shape":"String"} + }, + "wrapper":true + }, + "DBSnapshotAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSnapshotAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSnapshotAttribute":{ + "type":"structure", + "members":{ + "AttributeName":{"shape":"String"}, + "AttributeValues":{"shape":"AttributeValueList"} + }, + "wrapper":true + }, + "DBSnapshotAttributeList":{ + "type":"list", + "member":{ + "shape":"DBSnapshotAttribute", + "locationName":"DBSnapshotAttribute" + } + }, + "DBSnapshotAttributesResult":{ + "type":"structure", + "members":{ + "DBSnapshotIdentifier":{"shape":"String"}, + "DBSnapshotAttributes":{"shape":"DBSnapshotAttributeList"} + }, + "wrapper":true + }, + "DBSnapshotList":{ + "type":"list", + "member":{ + "shape":"DBSnapshot", + "locationName":"DBSnapshot" + } + }, + "DBSnapshotMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSnapshots":{"shape":"DBSnapshotList"} + } + }, + "DBSnapshotNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSnapshotNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroup":{ + "type":"structure", + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "SubnetGroupStatus":{"shape":"String"}, + "Subnets":{"shape":"SubnetList"} + }, + "wrapper":true + }, + "DBSubnetGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupDoesNotCoverEnoughAZs":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupDoesNotCoverEnoughAZs", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "DBSubnetGroups":{"shape":"DBSubnetGroups"} + } + }, + "DBSubnetGroupNotAllowedFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupNotAllowedFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroups":{ + "type":"list", + "member":{ + "shape":"DBSubnetGroup", + "locationName":"DBSubnetGroup" + } + }, + "DBSubnetQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBSubnetQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBUpgradeDependencyFailureFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"DBUpgradeDependencyFailure", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DeleteDBClusterMessage":{ + "type":"structure", + "required":["DBClusterIdentifier"], + "members":{ + "DBClusterIdentifier":{"shape":"String"}, + "SkipFinalSnapshot":{"shape":"Boolean"}, + "FinalDBSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteDBClusterParameterGroupMessage":{ + "type":"structure", + "required":["DBClusterParameterGroupName"], + "members":{ + "DBClusterParameterGroupName":{"shape":"String"} + } + }, + "DeleteDBClusterResult":{ + "type":"structure", + "members":{ + "DBCluster":{"shape":"DBCluster"} + } + }, + "DeleteDBClusterSnapshotMessage":{ + "type":"structure", + "required":["DBClusterSnapshotIdentifier"], + "members":{ + "DBClusterSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteDBClusterSnapshotResult":{ + "type":"structure", + "members":{ + "DBClusterSnapshot":{"shape":"DBClusterSnapshot"} + } + }, + "DeleteDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "SkipFinalSnapshot":{"shape":"Boolean"}, + "FinalDBSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "DeleteDBParameterGroupMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"} + } + }, + "DeleteDBSecurityGroupMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"} + } + }, + "DeleteDBSnapshotMessage":{ + "type":"structure", + "required":["DBSnapshotIdentifier"], + "members":{ + "DBSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBSnapshot":{"shape":"DBSnapshot"} + } + }, + "DeleteDBSubnetGroupMessage":{ + "type":"structure", + "required":["DBSubnetGroupName"], + "members":{ + "DBSubnetGroupName":{"shape":"String"} + } + }, + "DeleteEventSubscriptionMessage":{ + "type":"structure", + "required":["SubscriptionName"], + "members":{ + "SubscriptionName":{"shape":"String"} + } + }, + "DeleteEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "DeleteOptionGroupMessage":{ + "type":"structure", + "required":["OptionGroupName"], + "members":{ + "OptionGroupName":{"shape":"String"} + } + }, + "DescribeAccountAttributesMessage":{ + "type":"structure", + "members":{ + } + }, + "DescribeCertificatesMessage":{ + "type":"structure", + "members":{ + "CertificateIdentifier":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBClusterParameterGroupsMessage":{ + "type":"structure", + "members":{ + "DBClusterParameterGroupName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBClusterParametersMessage":{ + "type":"structure", + "required":["DBClusterParameterGroupName"], + "members":{ + "DBClusterParameterGroupName":{"shape":"String"}, + "Source":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBClusterSnapshotsMessage":{ + "type":"structure", + "members":{ + "DBClusterIdentifier":{"shape":"String"}, + "DBClusterSnapshotIdentifier":{"shape":"String"}, + "SnapshotType":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBClustersMessage":{ + "type":"structure", + "members":{ + "DBClusterIdentifier":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBEngineVersionsMessage":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBParameterGroupFamily":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "DefaultOnly":{"shape":"Boolean"}, + "ListSupportedCharacterSets":{"shape":"BooleanOptional"} + } + }, + "DescribeDBInstancesMessage":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBLogFilesDetails":{ + "type":"structure", + "members":{ + "LogFileName":{"shape":"String"}, + "LastWritten":{"shape":"Long"}, + "Size":{"shape":"Long"} + } + }, + "DescribeDBLogFilesList":{ + "type":"list", + "member":{ + "shape":"DescribeDBLogFilesDetails", + "locationName":"DescribeDBLogFilesDetails" + } + }, + "DescribeDBLogFilesMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "FilenameContains":{"shape":"String"}, + "FileLastWritten":{"shape":"Long"}, + "FileSize":{"shape":"Long"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBLogFilesResponse":{ + "type":"structure", + "members":{ + "DescribeDBLogFiles":{"shape":"DescribeDBLogFilesList"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBParameterGroupsMessage":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBParametersMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "Source":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBSecurityGroupsMessage":{ + "type":"structure", + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeDBSnapshotAttributesMessage":{ + "type":"structure", + "members":{ + "DBSnapshotIdentifier":{"shape":"String"} + } + }, + "DescribeDBSnapshotAttributesResult":{ + "type":"structure", + "members":{ + "DBSnapshotAttributesResult":{"shape":"DBSnapshotAttributesResult"} + } + }, + "DescribeDBSnapshotsMessage":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBSnapshotIdentifier":{"shape":"String"}, + "SnapshotType":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "IncludeShared":{"shape":"Boolean"}, + "IncludePublic":{"shape":"Boolean"} + } + }, + "DescribeDBSubnetGroupsMessage":{ + "type":"structure", + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEngineDefaultClusterParametersMessage":{ + "type":"structure", + "required":["DBParameterGroupFamily"], + "members":{ + "DBParameterGroupFamily":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEngineDefaultClusterParametersResult":{ + "type":"structure", + "members":{ + "EngineDefaults":{"shape":"EngineDefaults"} + } + }, + "DescribeEngineDefaultParametersMessage":{ + "type":"structure", + "required":["DBParameterGroupFamily"], + "members":{ + "DBParameterGroupFamily":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEngineDefaultParametersResult":{ + "type":"structure", + "members":{ + "EngineDefaults":{"shape":"EngineDefaults"} + } + }, + "DescribeEventCategoriesMessage":{ + "type":"structure", + "members":{ + "SourceType":{"shape":"String"}, + "Filters":{"shape":"FilterList"} + } + }, + "DescribeEventSubscriptionsMessage":{ + "type":"structure", + "members":{ + "SubscriptionName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEventsMessage":{ + "type":"structure", + "members":{ + "SourceIdentifier":{"shape":"String"}, + "SourceType":{"shape":"SourceType"}, + "StartTime":{"shape":"TStamp"}, + "EndTime":{"shape":"TStamp"}, + "Duration":{"shape":"IntegerOptional"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeOptionGroupOptionsMessage":{ + "type":"structure", + "required":["EngineName"], + "members":{ + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeOptionGroupsMessage":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "Marker":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"} + } + }, + "DescribeOrderableDBInstanceOptionsMessage":{ + "type":"structure", + "required":["Engine"], + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "Vpc":{"shape":"BooleanOptional"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribePendingMaintenanceActionsMessage":{ + "type":"structure", + "members":{ + "ResourceIdentifier":{"shape":"String"}, + "Filters":{"shape":"FilterList"}, + "Marker":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"} + } + }, + "DescribeReservedDBInstancesMessage":{ + "type":"structure", + "members":{ + "ReservedDBInstanceId":{"shape":"String"}, + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReservedDBInstancesOfferingsMessage":{ + "type":"structure", + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "Double":{"type":"double"}, + "DownloadDBLogFilePortionDetails":{ + "type":"structure", + "members":{ + "LogFileData":{"shape":"String"}, + "Marker":{"shape":"String"}, + "AdditionalDataPending":{"shape":"Boolean"} + } + }, + "DownloadDBLogFilePortionMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "LogFileName" + ], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "LogFileName":{"shape":"String"}, + "Marker":{"shape":"String"}, + "NumberOfLines":{"shape":"Integer"} + } + }, + "EC2SecurityGroup":{ + "type":"structure", + "members":{ + "Status":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "EC2SecurityGroupList":{ + "type":"list", + "member":{ + "shape":"EC2SecurityGroup", + "locationName":"EC2SecurityGroup" + } + }, + "Endpoint":{ + "type":"structure", + "members":{ + "Address":{"shape":"String"}, + "Port":{"shape":"Integer"}, + "HostedZoneId":{"shape":"String"} + } + }, + "EngineDefaults":{ + "type":"structure", + "members":{ + "DBParameterGroupFamily":{"shape":"String"}, + "Marker":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"} + }, + "wrapper":true + }, + "Event":{ + "type":"structure", + "members":{ + "SourceIdentifier":{"shape":"String"}, + "SourceType":{"shape":"SourceType"}, + "Message":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Date":{"shape":"TStamp"} + } + }, + "EventCategoriesList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"EventCategory" + } + }, + "EventCategoriesMap":{ + "type":"structure", + "members":{ + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"} + }, + "wrapper":true + }, + "EventCategoriesMapList":{ + "type":"list", + "member":{ + "shape":"EventCategoriesMap", + "locationName":"EventCategoriesMap" + } + }, + "EventCategoriesMessage":{ + "type":"structure", + "members":{ + "EventCategoriesMapList":{"shape":"EventCategoriesMapList"} + } + }, + "EventList":{ + "type":"list", + "member":{ + "shape":"Event", + "locationName":"Event" + } + }, + "EventSubscription":{ + "type":"structure", + "members":{ + "CustomerAwsId":{"shape":"String"}, + "CustSubscriptionId":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "Status":{"shape":"String"}, + "SubscriptionCreationTime":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "SourceIdsList":{"shape":"SourceIdsList"}, + "EventCategoriesList":{"shape":"EventCategoriesList"}, + "Enabled":{"shape":"Boolean"} + }, + "wrapper":true + }, + "EventSubscriptionQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"EventSubscriptionQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "EventSubscriptionsList":{ + "type":"list", + "member":{ + "shape":"EventSubscription", + "locationName":"EventSubscription" + } + }, + "EventSubscriptionsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "EventSubscriptionsList":{"shape":"EventSubscriptionsList"} + } + }, + "EventsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "Events":{"shape":"EventList"} + } + }, + "FailoverDBClusterMessage":{ + "type":"structure", + "members":{ + "DBClusterIdentifier":{"shape":"String"} + } + }, + "FailoverDBClusterResult":{ + "type":"structure", + "members":{ + "DBCluster":{"shape":"DBCluster"} + } + }, + "Filter":{ + "type":"structure", + "required":[ + "Name", + "Values" + ], + "members":{ + "Name":{"shape":"String"}, + "Values":{"shape":"FilterValueList"} + } + }, + "FilterList":{ + "type":"list", + "member":{ + "shape":"Filter", + "locationName":"Filter" + } + }, + "FilterValueList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"Value" + } + }, + "IPRange":{ + "type":"structure", + "members":{ + "Status":{"shape":"String"}, + "CIDRIP":{"shape":"String"} + } + }, + "IPRangeList":{ + "type":"list", + "member":{ + "shape":"IPRange", + "locationName":"IPRange" + } + }, + "InstanceQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InstanceQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InsufficientDBClusterCapacityFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InsufficientDBClusterCapacityFault", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "InsufficientDBInstanceCapacityFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InsufficientDBInstanceCapacity", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InsufficientStorageClusterCapacityFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InsufficientStorageClusterCapacity", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Integer":{"type":"integer"}, + "IntegerOptional":{"type":"integer"}, + "InvalidDBClusterSnapshotStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBClusterSnapshotStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBClusterStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBClusterStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBInstanceStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBInstanceState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBParameterGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBParameterGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSecurityGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSecurityGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSnapshotStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSnapshotState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetGroupFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSubnetGroupFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSubnetGroupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidDBSubnetStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidEventSubscriptionStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidEventSubscriptionState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidOptionGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidOptionGroupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidRestoreFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidRestoreFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSubnet":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidSubnet", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidVPCNetworkStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidVPCNetworkStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "KMSKeyNotAccessibleFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"KMSKeyNotAccessibleFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "KeyList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ListTagsForResourceMessage":{ + "type":"structure", + "required":["ResourceName"], + "members":{ + "ResourceName":{"shape":"String"}, + "Filters":{"shape":"FilterList"} + } + }, + "Long":{"type":"long"}, + "ModifyDBClusterMessage":{ + "type":"structure", + "required":["DBClusterIdentifier"], + "members":{ + "DBClusterIdentifier":{"shape":"String"}, + "NewDBClusterIdentifier":{"shape":"String"}, + "ApplyImmediately":{"shape":"Boolean"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "DBClusterParameterGroupName":{"shape":"String"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "Port":{"shape":"IntegerOptional"}, + "MasterUserPassword":{"shape":"String"}, + "OptionGroupName":{"shape":"String"}, + "PreferredBackupWindow":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"} + } + }, + "ModifyDBClusterParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBClusterParameterGroupName", + "Parameters" + ], + "members":{ + "DBClusterParameterGroupName":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"} + } + }, + "ModifyDBClusterResult":{ + "type":"structure", + "members":{ + "DBCluster":{"shape":"DBCluster"} + } + }, + "ModifyDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "DBInstanceClass":{"shape":"String"}, + "DBSecurityGroups":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "ApplyImmediately":{"shape":"Boolean"}, + "MasterUserPassword":{"shape":"String"}, + "DBParameterGroupName":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "AllowMajorVersionUpgrade":{"shape":"Boolean"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "NewDBInstanceIdentifier":{"shape":"String"}, + "StorageType":{"shape":"String"}, + "TdeCredentialArn":{"shape":"String"}, + "TdeCredentialPassword":{"shape":"String"}, + "CACertificateIdentifier":{"shape":"String"}, + "CopyTagsToSnapshot":{"shape":"BooleanOptional"}, + "MonitoringInterval":{"shape":"IntegerOptional"}, + "DBPortNumber":{"shape":"IntegerOptional"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "MonitoringRoleArn":{"shape":"String"} + } + }, + "ModifyDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "ModifyDBParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBParameterGroupName", + "Parameters" + ], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"} + } + }, + "ModifyDBSnapshotAttributeMessage":{ + "type":"structure", + "required":["DBSnapshotIdentifier"], + "members":{ + "DBSnapshotIdentifier":{"shape":"String"}, + "AttributeName":{"shape":"String"}, + "ValuesToAdd":{"shape":"AttributeValueList"}, + "ValuesToRemove":{"shape":"AttributeValueList"} + } + }, + "ModifyDBSnapshotAttributeResult":{ + "type":"structure", + "members":{ + "DBSnapshotAttributesResult":{"shape":"DBSnapshotAttributesResult"} + } + }, + "ModifyDBSubnetGroupMessage":{ + "type":"structure", + "required":[ + "DBSubnetGroupName", + "SubnetIds" + ], + "members":{ + "DBSubnetGroupName":{"shape":"String"}, + "DBSubnetGroupDescription":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"} + } + }, + "ModifyDBSubnetGroupResult":{ + "type":"structure", + "members":{ + "DBSubnetGroup":{"shape":"DBSubnetGroup"} + } + }, + "ModifyEventSubscriptionMessage":{ + "type":"structure", + "required":["SubscriptionName"], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Enabled":{"shape":"BooleanOptional"} + } + }, + "ModifyEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "ModifyOptionGroupMessage":{ + "type":"structure", + "required":["OptionGroupName"], + "members":{ + "OptionGroupName":{"shape":"String"}, + "OptionsToInclude":{"shape":"OptionConfigurationList"}, + "OptionsToRemove":{"shape":"OptionNamesList"}, + "ApplyImmediately":{"shape":"Boolean"} + } + }, + "ModifyOptionGroupResult":{ + "type":"structure", + "members":{ + "OptionGroup":{"shape":"OptionGroup"} + } + }, + "Option":{ + "type":"structure", + "members":{ + "OptionName":{"shape":"String"}, + "OptionDescription":{"shape":"String"}, + "Persistent":{"shape":"Boolean"}, + "Permanent":{"shape":"Boolean"}, + "Port":{"shape":"IntegerOptional"}, + "OptionSettings":{"shape":"OptionSettingConfigurationList"}, + "DBSecurityGroupMemberships":{"shape":"DBSecurityGroupMembershipList"}, + "VpcSecurityGroupMemberships":{"shape":"VpcSecurityGroupMembershipList"} + } + }, + "OptionConfiguration":{ + "type":"structure", + "required":["OptionName"], + "members":{ + "OptionName":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "DBSecurityGroupMemberships":{"shape":"DBSecurityGroupNameList"}, + "VpcSecurityGroupMemberships":{"shape":"VpcSecurityGroupIdList"}, + "OptionSettings":{"shape":"OptionSettingsList"} + } + }, + "OptionConfigurationList":{ + "type":"list", + "member":{ + "shape":"OptionConfiguration", + "locationName":"OptionConfiguration" + } + }, + "OptionGroup":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "OptionGroupDescription":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "Options":{"shape":"OptionsList"}, + "AllowsVpcAndNonVpcInstanceMemberships":{"shape":"Boolean"}, + "VpcId":{"shape":"String"} + }, + "wrapper":true + }, + "OptionGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "OptionGroupMembership":{ + "type":"structure", + "members":{ + "OptionGroupName":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "OptionGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"OptionGroupMembership", + "locationName":"OptionGroupMembership" + } + }, + "OptionGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "OptionGroupOption":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Description":{"shape":"String"}, + "EngineName":{"shape":"String"}, + "MajorEngineVersion":{"shape":"String"}, + "MinimumRequiredMinorEngineVersion":{"shape":"String"}, + "PortRequired":{"shape":"Boolean"}, + "DefaultPort":{"shape":"IntegerOptional"}, + "OptionsDependedOn":{"shape":"OptionsDependedOn"}, + "Persistent":{"shape":"Boolean"}, + "Permanent":{"shape":"Boolean"}, + "OptionGroupOptionSettings":{"shape":"OptionGroupOptionSettingsList"} + } + }, + "OptionGroupOptionSetting":{ + "type":"structure", + "members":{ + "SettingName":{"shape":"String"}, + "SettingDescription":{"shape":"String"}, + "DefaultValue":{"shape":"String"}, + "ApplyType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"} + } + }, + "OptionGroupOptionSettingsList":{ + "type":"list", + "member":{ + "shape":"OptionGroupOptionSetting", + "locationName":"OptionGroupOptionSetting" + } + }, + "OptionGroupOptionsList":{ + "type":"list", + "member":{ + "shape":"OptionGroupOption", + "locationName":"OptionGroupOption" + } + }, + "OptionGroupOptionsMessage":{ + "type":"structure", + "members":{ + "OptionGroupOptions":{"shape":"OptionGroupOptionsList"}, + "Marker":{"shape":"String"} + } + }, + "OptionGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "OptionGroups":{ + "type":"structure", + "members":{ + "OptionGroupsList":{"shape":"OptionGroupsList"}, + "Marker":{"shape":"String"} + } + }, + "OptionGroupsList":{ + "type":"list", + "member":{ + "shape":"OptionGroup", + "locationName":"OptionGroup" + } + }, + "OptionNamesList":{ + "type":"list", + "member":{"shape":"String"} + }, + "OptionSetting":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Value":{"shape":"String"}, + "DefaultValue":{"shape":"String"}, + "Description":{"shape":"String"}, + "ApplyType":{"shape":"String"}, + "DataType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"}, + "IsCollection":{"shape":"Boolean"} + } + }, + "OptionSettingConfigurationList":{ + "type":"list", + "member":{ + "shape":"OptionSetting", + "locationName":"OptionSetting" + } + }, + "OptionSettingsList":{ + "type":"list", + "member":{ + "shape":"OptionSetting", + "locationName":"OptionSetting" + } + }, + "OptionsDependedOn":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"OptionName" + } + }, + "OptionsList":{ + "type":"list", + "member":{ + "shape":"Option", + "locationName":"Option" + } + }, + "OrderableDBInstanceOption":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "LicenseModel":{"shape":"String"}, + "AvailabilityZones":{"shape":"AvailabilityZoneList"}, + "MultiAZCapable":{"shape":"Boolean"}, + "ReadReplicaCapable":{"shape":"Boolean"}, + "Vpc":{"shape":"Boolean"}, + "SupportsStorageEncryption":{"shape":"Boolean"}, + "StorageType":{"shape":"String"}, + "SupportsIops":{"shape":"Boolean"}, + "SupportsEnhancedMonitoring":{"shape":"Boolean"} + }, + "wrapper":true + }, + "OrderableDBInstanceOptionsList":{ + "type":"list", + "member":{ + "shape":"OrderableDBInstanceOption", + "locationName":"OrderableDBInstanceOption" + } + }, + "OrderableDBInstanceOptionsMessage":{ + "type":"structure", + "members":{ + "OrderableDBInstanceOptions":{"shape":"OrderableDBInstanceOptionsList"}, + "Marker":{"shape":"String"} + } + }, + "Parameter":{ + "type":"structure", + "members":{ + "ParameterName":{"shape":"String"}, + "ParameterValue":{"shape":"String"}, + "Description":{"shape":"String"}, + "Source":{"shape":"String"}, + "ApplyType":{"shape":"String"}, + "DataType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "IsModifiable":{"shape":"Boolean"}, + "MinimumEngineVersion":{"shape":"String"}, + "ApplyMethod":{"shape":"ApplyMethod"} + } + }, + "ParametersList":{ + "type":"list", + "member":{ + "shape":"Parameter", + "locationName":"Parameter" + } + }, + "PendingMaintenanceAction":{ + "type":"structure", + "members":{ + "Action":{"shape":"String"}, + "AutoAppliedAfterDate":{"shape":"TStamp"}, + "ForcedApplyDate":{"shape":"TStamp"}, + "OptInStatus":{"shape":"String"}, + "CurrentApplyDate":{"shape":"TStamp"}, + "Description":{"shape":"String"} + } + }, + "PendingMaintenanceActionDetails":{ + "type":"list", + "member":{ + "shape":"PendingMaintenanceAction", + "locationName":"PendingMaintenanceAction" + } + }, + "PendingMaintenanceActions":{ + "type":"list", + "member":{ + "shape":"ResourcePendingMaintenanceActions", + "locationName":"ResourcePendingMaintenanceActions" + } + }, + "PendingMaintenanceActionsMessage":{ + "type":"structure", + "members":{ + "PendingMaintenanceActions":{"shape":"PendingMaintenanceActions"}, + "Marker":{"shape":"String"} + } + }, + "PendingModifiedValues":{ + "type":"structure", + "members":{ + "DBInstanceClass":{"shape":"String"}, + "AllocatedStorage":{"shape":"IntegerOptional"}, + "MasterUserPassword":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "EngineVersion":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "DBInstanceIdentifier":{"shape":"String"}, + "StorageType":{"shape":"String"}, + "CACertificateIdentifier":{"shape":"String"} + } + }, + "PointInTimeRestoreNotEnabledFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"PointInTimeRestoreNotEnabled", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "PromoteReadReplicaMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "BackupRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredBackupWindow":{"shape":"String"} + } + }, + "PromoteReadReplicaResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "ProvisionedIopsNotAvailableInAZFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ProvisionedIopsNotAvailableInAZFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "PurchaseReservedDBInstancesOfferingMessage":{ + "type":"structure", + "required":["ReservedDBInstancesOfferingId"], + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "ReservedDBInstanceId":{"shape":"String"}, + "DBInstanceCount":{"shape":"IntegerOptional"}, + "Tags":{"shape":"TagList"} + } + }, + "PurchaseReservedDBInstancesOfferingResult":{ + "type":"structure", + "members":{ + "ReservedDBInstance":{"shape":"ReservedDBInstance"} + } + }, + "ReadReplicaDBInstanceIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ReadReplicaDBInstanceIdentifier" + } + }, + "RebootDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "ForceFailover":{"shape":"BooleanOptional"} + } + }, + "RebootDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RecurringCharge":{ + "type":"structure", + "members":{ + "RecurringChargeAmount":{"shape":"Double"}, + "RecurringChargeFrequency":{"shape":"String"} + }, + "wrapper":true + }, + "RecurringChargeList":{ + "type":"list", + "member":{ + "shape":"RecurringCharge", + "locationName":"RecurringCharge" + } + }, + "RemoveSourceIdentifierFromSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SourceIdentifier" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SourceIdentifier":{"shape":"String"} + } + }, + "RemoveSourceIdentifierFromSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "RemoveTagsFromResourceMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "TagKeys" + ], + "members":{ + "ResourceName":{"shape":"String"}, + "TagKeys":{"shape":"KeyList"} + } + }, + "ReservedDBInstance":{ + "type":"structure", + "members":{ + "ReservedDBInstanceId":{"shape":"String"}, + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "StartTime":{"shape":"TStamp"}, + "Duration":{"shape":"Integer"}, + "FixedPrice":{"shape":"Double"}, + "UsagePrice":{"shape":"Double"}, + "CurrencyCode":{"shape":"String"}, + "DBInstanceCount":{"shape":"Integer"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"Boolean"}, + "State":{"shape":"String"}, + "RecurringCharges":{"shape":"RecurringChargeList"} + }, + "wrapper":true + }, + "ReservedDBInstanceAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceAlreadyExists", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstanceList":{ + "type":"list", + "member":{ + "shape":"ReservedDBInstance", + "locationName":"ReservedDBInstance" + } + }, + "ReservedDBInstanceMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReservedDBInstances":{"shape":"ReservedDBInstanceList"} + } + }, + "ReservedDBInstanceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstanceQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstanceQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ReservedDBInstancesOffering":{ + "type":"structure", + "members":{ + "ReservedDBInstancesOfferingId":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Duration":{"shape":"Integer"}, + "FixedPrice":{"shape":"Double"}, + "UsagePrice":{"shape":"Double"}, + "CurrencyCode":{"shape":"String"}, + "ProductDescription":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "MultiAZ":{"shape":"Boolean"}, + "RecurringCharges":{"shape":"RecurringChargeList"} + }, + "wrapper":true + }, + "ReservedDBInstancesOfferingList":{ + "type":"list", + "member":{ + "shape":"ReservedDBInstancesOffering", + "locationName":"ReservedDBInstancesOffering" + } + }, + "ReservedDBInstancesOfferingMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReservedDBInstancesOfferings":{"shape":"ReservedDBInstancesOfferingList"} + } + }, + "ReservedDBInstancesOfferingNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedDBInstancesOfferingNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResetDBClusterParameterGroupMessage":{ + "type":"structure", + "required":["DBClusterParameterGroupName"], + "members":{ + "DBClusterParameterGroupName":{"shape":"String"}, + "ResetAllParameters":{"shape":"Boolean"}, + "Parameters":{"shape":"ParametersList"} + } + }, + "ResetDBParameterGroupMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{"shape":"String"}, + "ResetAllParameters":{"shape":"Boolean"}, + "Parameters":{"shape":"ParametersList"} + } + }, + "ResourceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ResourceNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResourcePendingMaintenanceActions":{ + "type":"structure", + "members":{ + "ResourceIdentifier":{"shape":"String"}, + "PendingMaintenanceActionDetails":{"shape":"PendingMaintenanceActionDetails"} + }, + "wrapper":true + }, + "RestoreDBClusterFromSnapshotMessage":{ + "type":"structure", + "required":[ + "DBClusterIdentifier", + "SnapshotIdentifier", + "Engine" + ], + "members":{ + "AvailabilityZones":{"shape":"AvailabilityZones"}, + "DBClusterIdentifier":{"shape":"String"}, + "SnapshotIdentifier":{"shape":"String"}, + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "DBSubnetGroupName":{"shape":"String"}, + "DatabaseName":{"shape":"String"}, + "OptionGroupName":{"shape":"String"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "Tags":{"shape":"TagList"}, + "KmsKeyId":{"shape":"String"} + } + }, + "RestoreDBClusterFromSnapshotResult":{ + "type":"structure", + "members":{ + "DBCluster":{"shape":"DBCluster"} + } + }, + "RestoreDBClusterToPointInTimeMessage":{ + "type":"structure", + "required":[ + "DBClusterIdentifier", + "SourceDBClusterIdentifier" + ], + "members":{ + "DBClusterIdentifier":{"shape":"String"}, + "SourceDBClusterIdentifier":{"shape":"String"}, + "RestoreToTime":{"shape":"TStamp"}, + "UseLatestRestorableTime":{"shape":"Boolean"}, + "Port":{"shape":"IntegerOptional"}, + "DBSubnetGroupName":{"shape":"String"}, + "OptionGroupName":{"shape":"String"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "Tags":{"shape":"TagList"}, + "KmsKeyId":{"shape":"String"} + } + }, + "RestoreDBClusterToPointInTimeResult":{ + "type":"structure", + "members":{ + "DBCluster":{"shape":"DBCluster"} + } + }, + "RestoreDBInstanceFromDBSnapshotMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "DBSnapshotIdentifier" + ], + "members":{ + "DBInstanceIdentifier":{"shape":"String"}, + "DBSnapshotIdentifier":{"shape":"String"}, + "DBInstanceClass":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Engine":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "Tags":{"shape":"TagList"}, + "StorageType":{"shape":"String"}, + "TdeCredentialArn":{"shape":"String"}, + "TdeCredentialPassword":{"shape":"String"}, + "CopyTagsToSnapshot":{"shape":"BooleanOptional"} + } + }, + "RestoreDBInstanceFromDBSnapshotResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RestoreDBInstanceToPointInTimeMessage":{ + "type":"structure", + "required":[ + "SourceDBInstanceIdentifier", + "TargetDBInstanceIdentifier" + ], + "members":{ + "SourceDBInstanceIdentifier":{"shape":"String"}, + "TargetDBInstanceIdentifier":{"shape":"String"}, + "RestoreTime":{"shape":"TStamp"}, + "UseLatestRestorableTime":{"shape":"Boolean"}, + "DBInstanceClass":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AvailabilityZone":{"shape":"String"}, + "DBSubnetGroupName":{"shape":"String"}, + "MultiAZ":{"shape":"BooleanOptional"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, + "LicenseModel":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Engine":{"shape":"String"}, + "Iops":{"shape":"IntegerOptional"}, + "OptionGroupName":{"shape":"String"}, + "CopyTagsToSnapshot":{"shape":"BooleanOptional"}, + "Tags":{"shape":"TagList"}, + "StorageType":{"shape":"String"}, + "TdeCredentialArn":{"shape":"String"}, + "TdeCredentialPassword":{"shape":"String"} + } + }, + "RestoreDBInstanceToPointInTimeResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RevokeDBSecurityGroupIngressMessage":{ + "type":"structure", + "required":["DBSecurityGroupName"], + "members":{ + "DBSecurityGroupName":{"shape":"String"}, + "CIDRIP":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupId":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "RevokeDBSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "DBSecurityGroup":{"shape":"DBSecurityGroup"} + } + }, + "SNSInvalidTopicFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSInvalidTopic", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SNSNoAuthorizationFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSNoAuthorization", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SNSTopicArnNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSTopicArnNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SharedSnapshotQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SharedSnapshotQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SnapshotQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SourceIdsList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SourceId" + } + }, + "SourceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SourceType":{ + "type":"string", + "enum":[ + "db-instance", + "db-parameter-group", + "db-security-group", + "db-snapshot", + "db-cluster", + "db-cluster-snapshot" + ] + }, + "StorageQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"StorageQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "StorageTypeNotSupportedFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"StorageTypeNotSupported", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "String":{"type":"string"}, + "Subnet":{ + "type":"structure", + "members":{ + "SubnetIdentifier":{"shape":"String"}, + "SubnetAvailabilityZone":{"shape":"AvailabilityZone"}, + "SubnetStatus":{"shape":"String"} + } + }, + "SubnetAlreadyInUse":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubnetAlreadyInUse", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubnetIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SubnetIdentifier" + } + }, + "SubnetList":{ + "type":"list", + "member":{ + "shape":"Subnet", + "locationName":"Subnet" + } + }, + "SubscriptionAlreadyExistFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionAlreadyExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubscriptionCategoryNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionCategoryNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SubscriptionNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SupportedCharacterSetsList":{ + "type":"list", + "member":{ + "shape":"CharacterSet", + "locationName":"CharacterSet" + } + }, + "TStamp":{"type":"timestamp"}, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"String"}, + "Value":{"shape":"String"} + } + }, + "TagList":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"Tag" + } + }, + "TagListMessage":{ + "type":"structure", + "members":{ + "TagList":{"shape":"TagList"} + } + }, + "UpgradeTarget":{ + "type":"structure", + "members":{ + "Engine":{"shape":"String"}, + "EngineVersion":{"shape":"String"}, + "Description":{"shape":"String"}, + "AutoUpgrade":{"shape":"Boolean"}, + "IsMajorVersionUpgrade":{"shape":"Boolean"} + } + }, + "ValidUpgradeTargetList":{ + "type":"list", + "member":{ + "shape":"UpgradeTarget", + "locationName":"UpgradeTarget" + } + }, + "VpcSecurityGroupIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpcSecurityGroupId" + } + }, + "VpcSecurityGroupMembership":{ + "type":"structure", + "members":{ + "VpcSecurityGroupId":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "VpcSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"VpcSecurityGroupMembership", + "locationName":"VpcSecurityGroupMembership" + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2648 @@ +{ + "version": "2.0", + "service": "Amazon Relational Database Service

    Amazon Relational Database Service (Amazon RDS) is a web service that makes it easier to set up, operate, and scale a relational database in the cloud. It provides cost-efficient, resizeable capacity for an industry-standard relational database and manages common database administration tasks, freeing up developers to focus on what makes their applications and businesses unique.

    Amazon RDS gives you access to the capabilities of a MySQL, MariaDB, PostgreSQL, Microsoft SQL Server, Oracle, or Aurora database server. This means the code, applications, and tools you already use today with your existing databases work with Amazon RDS without modification. Amazon RDS automatically backs up your database and maintains the database software that powers your DB instance. Amazon RDS is flexible: you can scale your database instance's compute resources and storage capacity to meet your application's demand. As with all Amazon Web Services, there are no up-front investments, and you pay only for the resources you use.

    This is an interface reference for Amazon RDS. It contains documentation for a programming or command line interface you can use to manage Amazon RDS. Note that Amazon RDS is asynchronous, which means that some interfaces might require techniques such as polling or callback functions to determine when a command has been applied. In this reference, the parameter descriptions indicate whether a command is applied immediately, on the next instance reboot, or during the maintenance window. For a summary of the Amazon RDS interfaces, go to Available RDS Interfaces.

    ", + "operations": { + "AddSourceIdentifierToSubscription": "

    Adds a source identifier to an existing RDS event notification subscription.

    ", + "AddTagsToResource": "

    Adds metadata tags to an Amazon RDS resource. These tags can also be used with cost allocation reporting to track cost associated with Amazon RDS resources, or used in a Condition statement in an IAM policy for Amazon RDS.

    For an overview on tagging Amazon RDS resources, see Tagging Amazon RDS Resources.

    ", + "ApplyPendingMaintenanceAction": "

    Applies a pending maintenance action to a resource (for example, to a DB instance).

    ", + "AuthorizeDBSecurityGroupIngress": "

    Enables ingress to a DBSecurityGroup using one of two forms of authorization. First, EC2 or VPC security groups can be added to the DBSecurityGroup if the application using the database is running on EC2 or VPC instances. Second, IP ranges are available if the application accessing your database is running on the Internet. Required parameters for this API are one of CIDR range, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId for non-VPC).

    You cannot authorize ingress from an EC2 security group in one region to an Amazon RDS DB instance in another. You cannot authorize ingress from a VPC security group in one VPC to an Amazon RDS DB instance in another.

    For an overview of CIDR ranges, go to the Wikipedia Tutorial.

    ", + "CopyDBClusterSnapshot": "

    Creates a snapshot of a DB cluster. For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "CopyDBParameterGroup": "

    Copies the specified DB parameter group.

    ", + "CopyDBSnapshot": "

    Copies the specified DBSnapshot. The source DB snapshot must be in the \"available\" state.

    If you are copying from a shared manual DB snapshot, the SourceDBSnapshotIdentifier must be the ARN of the shared DB snapshot.

    ", + "CopyOptionGroup": "

    Copies the specified option group.

    ", + "CreateDBCluster": "

    Creates a new Amazon Aurora DB cluster. For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "CreateDBClusterParameterGroup": "

    Creates a new DB cluster parameter group.

    Parameters in a DB cluster parameter group apply to all of the instances in a DB cluster.

    A DB cluster parameter group is initially created with the default parameters for the database engine used by instances in the DB cluster. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBClusterParameterGroup. Once you've created a DB cluster parameter group, you need to associate it with your DB cluster using ModifyDBCluster. When you associate a new DB cluster parameter group with a running DB cluster, you need to reboot the DB instances in the DB cluster without failover for the new DB cluster parameter group and associated settings to take effect.

    After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the DB cluster parameter group is used as the default for a new DB cluster. This is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBClusterParameters command to verify that your DB cluster parameter group has been created or modified.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "CreateDBClusterSnapshot": "

    Creates a snapshot of a DB cluster. For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "CreateDBInstance": "

    Creates a new DB instance.

    ", + "CreateDBInstanceReadReplica": "

    Creates a DB instance for a DB instance running MySQL, MariaDB, or PostgreSQL that acts as a Read Replica of a source DB instance.

    All Read Replica DB instances are created as Single-AZ deployments with backups disabled. All other DB instance attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance, except as specified below.

    The source DB instance must have backup retention enabled.

    ", + "CreateDBParameterGroup": "

    Creates a new DB parameter group.

    A DB parameter group is initially created with the default parameters for the database engine used by the DB instance. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBParameterGroup. Once you've created a DB parameter group, you need to associate it with your DB instance using ModifyDBInstance. When you associate a new DB parameter group with a running DB instance, you need to reboot the DB instance without failover for the new DB parameter group and associated settings to take effect.

    After you create a DB parameter group, you should wait at least 5 minutes before creating your first DB instance that uses that DB parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the parameter group is used as the default for a new DB instance. This is especially important for parameters that are critical when creating the default database for a DB instance, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBParameters command to verify that your DB parameter group has been created or modified.

    ", + "CreateDBSecurityGroup": "

    Creates a new DB security group. DB security groups control access to a DB instance.

    ", + "CreateDBSnapshot": "

    Creates a DBSnapshot. The source DBInstance must be in \"available\" state.

    ", + "CreateDBSubnetGroup": "

    Creates a new DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the region.

    ", + "CreateEventSubscription": "

    Creates an RDS event notification subscription. This action requires a topic ARN (Amazon Resource Name) created by either the RDS console, the SNS console, or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.

    You can specify the type of source (SourceType) you want to be notified of, provide a list of RDS sources (SourceIds) that triggers the events, and provide a list of event categories (EventCategories) for events you want to be notified of. For example, you can specify SourceType = db-instance, SourceIds = mydbinstance1, mydbinstance2 and EventCategories = Availability, Backup.

    If you specify both the SourceType and SourceIds, such as SourceType = db-instance and SourceIdentifier = myDBInstance1, you will be notified of all the db-instance events for the specified source. If you specify a SourceType but do not specify a SourceIdentifier, you will receive notice of the events for that source type for all your RDS sources. If you do not specify either the SourceType nor the SourceIdentifier, you will be notified of events generated from all RDS sources belonging to your customer account.

    ", + "CreateOptionGroup": "

    Creates a new option group. You can create up to 20 option groups.

    ", + "DeleteDBCluster": "

    The DeleteDBCluster action deletes a previously provisioned DB cluster. A successful response from the web service indicates the request was received correctly. When you delete a DB cluster, all automated backups for that DB cluster are deleted and cannot be recovered. Manual DB cluster snapshots of the DB cluster to be deleted are not deleted.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "DeleteDBClusterParameterGroup": "

    Deletes a specified DB cluster parameter group. The DB cluster parameter group to be deleted cannot be associated with any DB clusters.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "DeleteDBClusterSnapshot": "

    Deletes a DB cluster snapshot. If the snapshot is being copied, the copy operation is terminated.

    The DB cluster snapshot must be in the available state to be deleted.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "DeleteDBInstance": "

    The DeleteDBInstance action deletes a previously provisioned DB instance. A successful response from the web service indicates the request was received correctly. When you delete a DB instance, all automated backups for that instance are deleted and cannot be recovered. Manual DB snapshots of the DB instance to be deleted are not deleted.

    If a final DB snapshot is requested the status of the RDS instance will be \"deleting\" until the DB snapshot is created. The API action DescribeDBInstance is used to monitor the status of this operation. The action cannot be canceled or reverted once submitted.

    Note that when a DB instance is in a failure state and has a status of 'failed', 'incompatible-restore', or 'incompatible-network', it can only be deleted when the SkipFinalSnapshot parameter is set to \"true\".

    ", + "DeleteDBParameterGroup": "

    Deletes a specified DBParameterGroup. The DBParameterGroup to be deleted cannot be associated with any DB instances.

    ", + "DeleteDBSecurityGroup": "

    Deletes a DB security group.

    The specified DB security group must not be associated with any DB instances.", + "DeleteDBSnapshot": "

    Deletes a DBSnapshot. If the snapshot is being copied, the copy operation is terminated.

    The DBSnapshot must be in the available state to be deleted.", + "DeleteDBSubnetGroup": "

    Deletes a DB subnet group.

    The specified database subnet group must not be associated with any DB instances.", + "DeleteEventSubscription": "

    Deletes an RDS event notification subscription.

    ", + "DeleteOptionGroup": "

    Deletes an existing option group.

    ", + "DescribeAccountAttributes": "

    Lists all of the attributes for a customer account. The attributes include Amazon RDS quotas for the account, such as the number of DB instances allowed. The description for a quota includes the quota name, current usage toward that quota, and the quota's maximum value.

    This command does not take any parameters.

    ", + "DescribeCertificates": "

    Lists the set of CA certificates provided by Amazon RDS for this AWS account.

    ", + "DescribeDBClusterParameterGroups": "

    Returns a list of DBClusterParameterGroup descriptions. If a DBClusterParameterGroupName parameter is specified, the list will contain only the description of the specified DB cluster parameter group.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "DescribeDBClusterParameters": "

    Returns the detailed parameter list for a particular DB cluster parameter group.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "DescribeDBClusterSnapshots": "

    Returns information about DB cluster snapshots. This API supports pagination.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "DescribeDBClusters": "

    Returns information about provisioned Aurora DB clusters. This API supports pagination.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "DescribeDBEngineVersions": "

    Returns a list of the available DB engines.

    ", + "DescribeDBInstances": "

    Returns information about provisioned RDS instances. This API supports pagination.

    ", + "DescribeDBLogFiles": "

    Returns a list of DB log files for the DB instance.

    ", + "DescribeDBParameterGroups": "

    Returns a list of DBParameterGroup descriptions. If a DBParameterGroupName is specified, the list will contain only the description of the specified DB parameter group.

    ", + "DescribeDBParameters": "

    Returns the detailed parameter list for a particular DB parameter group.

    ", + "DescribeDBSecurityGroups": "

    Returns a list of DBSecurityGroup descriptions. If a DBSecurityGroupName is specified, the list will contain only the descriptions of the specified DB security group.

    ", + "DescribeDBSnapshotAttributes": "

    Returns a list of DB snapshot attribute names and values for a manual DB snapshot.

    When sharing snapshots with other AWS accounts, DescribeDBSnapshotAttributes returns the restore attribute and a list of the AWS account ids that are authorized to copy or restore the manual DB snapshot. If all is included in the list of values for the restore attribute, then the manual DB snapshot is public and can be copied or restored by all AWS accounts.

    To add or remove access for an AWS account to copy or restore a manual DB snapshot, or to make the manual DB snapshot public or private, use the ModifyDBSnapshotAttribute API.

    ", + "DescribeDBSnapshots": "

    Returns information about DB snapshots. This API supports pagination.

    ", + "DescribeDBSubnetGroups": "

    Returns a list of DBSubnetGroup descriptions. If a DBSubnetGroupName is specified, the list will contain only the descriptions of the specified DBSubnetGroup.

    For an overview of CIDR ranges, go to the Wikipedia Tutorial.

    ", + "DescribeEngineDefaultClusterParameters": "

    Returns the default engine and system parameter information for the cluster database engine.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "DescribeEngineDefaultParameters": "

    Returns the default engine and system parameter information for the specified database engine.

    ", + "DescribeEventCategories": "

    Displays a list of categories for all event source types, or, if specified, for a specified source type. You can see a list of the event categories and source types in the Events topic in the Amazon RDS User Guide.

    ", + "DescribeEventSubscriptions": "

    Lists all the subscription descriptions for a customer account. The description for a subscription includes SubscriptionName, SNSTopicARN, CustomerID, SourceType, SourceID, CreationTime, and Status.

    If you specify a SubscriptionName, lists the description for that subscription.

    ", + "DescribeEvents": "

    Returns events related to DB instances, DB security groups, DB snapshots, and DB parameter groups for the past 14 days. Events specific to a particular DB instance, DB security group, database snapshot, or DB parameter group can be obtained by providing the name as a parameter. By default, the past hour of events are returned.

    ", + "DescribeOptionGroupOptions": "

    Describes all available options.

    ", + "DescribeOptionGroups": "

    Describes the available option groups.

    ", + "DescribeOrderableDBInstanceOptions": "

    Returns a list of orderable DB instance options for the specified engine.

    ", + "DescribePendingMaintenanceActions": "

    Returns a list of resources (for example, DB instances) that have at least one pending maintenance action.

    ", + "DescribeReservedDBInstances": "

    Returns information about reserved DB instances for this account, or about a specified reserved DB instance.

    ", + "DescribeReservedDBInstancesOfferings": "

    Lists available reserved DB instance offerings.

    ", + "DownloadDBLogFilePortion": "

    Downloads all or a portion of the specified log file, up to 1 MB in size.

    ", + "FailoverDBCluster": "

    Forces a failover for a DB cluster.

    A failover for a DB cluster promotes one of the read-only instances in the DB cluster to the master DB instance (the cluster writer) and deletes the current primary instance.

    Amazon Aurora will automatically fail over to a read-only instance, if one exists, when the primary instance fails. You can force a failover when you want to simulate a failure of a DB instance for testing. Because each instance in a DB cluster has its own endpoint address, you will need to clean up and re-establish any existing connections that use those endpoint addresses when the failover is complete.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "ListTagsForResource": "

    Lists all tags on an Amazon RDS resource.

    For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources.

    ", + "ModifyDBCluster": "

    Modify a setting for an Amazon Aurora DB cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "ModifyDBClusterParameterGroup": "

    Modifies the parameters of a DB cluster parameter group. To modify more than one parameter, submit a list of the following: ParameterName, ParameterValue, and ApplyMethod. A maximum of 20 parameters can be modified in a single request.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    Changes to dynamic parameters are applied immediately. Changes to static parameters require a reboot without failover to the DB cluster associated with the parameter group before the change can take effect.

    After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the parameter group is used as the default for a new DB cluster. This is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBClusterParameters command to verify that your DB cluster parameter group has been created or modified.

    ", + "ModifyDBInstance": "

    Modify settings for a DB instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request.

    ", + "ModifyDBParameterGroup": "

    Modifies the parameters of a DB parameter group. To modify more than one parameter, submit a list of the following: ParameterName, ParameterValue, and ApplyMethod. A maximum of 20 parameters can be modified in a single request.

    Changes to dynamic parameters are applied immediately. Changes to static parameters require a reboot without failover to the DB instance associated with the parameter group before the change can take effect.

    After you modify a DB parameter group, you should wait at least 5 minutes before creating your first DB instance that uses that DB parameter group as the default parameter group. This allows Amazon RDS to fully complete the modify action before the parameter group is used as the default for a new DB instance. This is especially important for parameters that are critical when creating the default database for a DB instance, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBParameters command to verify that your DB parameter group has been created or modified.

    ", + "ModifyDBSnapshotAttribute": "

    Adds an attribute and values to, or removes an attribute and values from a manual DB snapshot.

    To share a manual DB snapshot with other AWS accounts, specify restore as the AttributeName and use the ValuesToAdd parameter to add a list of the AWS account ids that are authorized to retore the manual DB snapshot. Uses the value all to make the manual DB snapshot public and can by copied or restored by all AWS accounts. Do not add the all value for any manual DB snapshots that contain private information that you do not want to be available to all AWS accounts.

    To view which AWS accounts have access to copy or restore a manual DB snapshot, or whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes API.

    If the manual DB snapshot is encrypted, it cannot be shared.

    ", + "ModifyDBSubnetGroup": "

    Modifies an existing DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the region.

    ", + "ModifyEventSubscription": "

    Modifies an existing RDS event notification subscription. Note that you cannot modify the source identifiers using this call; to change source identifiers for a subscription, use the AddSourceIdentifierToSubscription and RemoveSourceIdentifierFromSubscription calls.

    You can see a list of the event categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    ", + "ModifyOptionGroup": "

    Modifies an existing option group.

    ", + "PromoteReadReplica": "

    Promotes a Read Replica DB instance to a standalone DB instance.

    We recommend that you enable automated backups on your Read Replica before promoting the Read Replica. This ensures that no backup is taken during the promotion process. Once the instance is promoted to a primary instance, backups are taken based on your backup settings.

    ", + "PurchaseReservedDBInstancesOffering": "

    Purchases a reserved DB instance offering.

    ", + "RebootDBInstance": "

    Rebooting a DB instance restarts the database engine service. A reboot also applies to the DB instance any modifications to the associated DB parameter group that were pending. Rebooting a DB instance results in a momentary outage of the instance, during which the DB instance status is set to rebooting. If the RDS instance is configured for MultiAZ, it is possible that the reboot will be conducted through a failover. An Amazon RDS event is created when the reboot is completed.

    If your DB instance is deployed in multiple Availability Zones, you can force a failover from one AZ to the other during the reboot. You might force a failover to test the availability of your DB instance deployment or to restore operations to the original AZ after a failover occurs.

    The time required to reboot is a function of the specific database engine's crash recovery process. To improve the reboot time, we recommend that you reduce database activities as much as possible during the reboot process to reduce rollback activity for in-transit transactions.

    ", + "RemoveSourceIdentifierFromSubscription": "

    Removes a source identifier from an existing RDS event notification subscription.

    ", + "RemoveTagsFromResource": "

    Removes metadata tags from an Amazon RDS resource.

    For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources.

    ", + "ResetDBClusterParameterGroup": "

    Modifies the parameters of a DB cluster parameter group to the default value. To reset specific parameters submit a list of the following: ParameterName and ApplyMethod. To reset the entire DB cluster parameter group, specify the DBClusterParameterGroupName and ResetAllParameters parameters.

    When resetting the entire group, dynamic parameters are updated immediately and static parameters are set to pending-reboot to take effect on the next DB instance restart or RebootDBInstance request. You must call RebootDBInstance for every DB instance in your DB cluster that you want the updated static parameter to apply to.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "ResetDBParameterGroup": "

    Modifies the parameters of a DB parameter group to the engine/system default value. To reset specific parameters submit a list of the following: ParameterName and ApplyMethod. To reset the entire DB parameter group, specify the DBParameterGroup name and ResetAllParameters parameters. When resetting the entire group, dynamic parameters are updated immediately and static parameters are set to pending-reboot to take effect on the next DB instance restart or RebootDBInstance request.

    ", + "RestoreDBClusterFromSnapshot": "

    Creates a new DB cluster from a DB cluster snapshot. The target DB cluster is created from the source DB cluster restore point with the same configuration as the original source DB cluster, except that the new DB cluster is created with the default security group.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "RestoreDBClusterToPointInTime": "

    Restores a DB cluster to an arbitrary point in time. Users can restore to any point in time before LatestRestorableTime for up to BackupRetentionPeriod days. The target DB cluster is created from the source DB cluster with the same configuration as the original DB cluster, except that the new DB cluster is created with the default DB security group.

    For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

    ", + "RestoreDBInstanceFromDBSnapshot": "

    Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with the most of original configuration, but in a system chosen availability zone with the default security group, the default subnet group, and the default DB parameter group. By default, the new DB instance is created as a single-AZ deployment except when the instance is a SQL Server instance that has an option group that is associated with mirroring; in this case, the instance becomes a mirrored AZ deployment and not a single-AZ deployment.

    If your intent is to replace your original DB instance with the new, restored DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot action. RDS does not allow two DB instances with the same name. Once you have renamed your original DB instance with a different identifier, then you can pass the original name of the DB instance as the DBInstanceIdentifier in the call to the RestoreDBInstanceFromDBSnapshot action. The result is that you will replace the original DB instance with the DB instance created from the snapshot.

    If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier must be the ARN of the shared DB snapshot.

    ", + "RestoreDBInstanceToPointInTime": "

    Restores a DB instance to an arbitrary point-in-time. Users can restore to any point in time before the LatestRestorableTime for up to BackupRetentionPeriod days. The target database is created with the most of original configuration, but in a system chosen availability zone with the default security group, the default subnet group, and the default DB parameter group. By default, the new DB instance is created as a single-AZ deployment except when the instance is a SQL Server instance that has an option group that is associated with mirroring; in this case, the instance becomes a mirrored deployment and not a single-AZ deployment.

    ", + "RevokeDBSecurityGroupIngress": "

    Revokes ingress from a DBSecurityGroup for previously authorized IP ranges or EC2 or VPC Security Groups. Required parameters for this API are one of CIDRIP, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId).

    " + }, + "shapes": { + "AccountAttributesMessage": { + "base": "

    Data returned by the DescribeAccountAttributes action.

    ", + "refs": { + } + }, + "AccountQuota": { + "base": "

    Describes a quota for an AWS account, for example, the number of DB instances allowed.

    ", + "refs": { + "AccountQuotaList$member": null + } + }, + "AccountQuotaList": { + "base": null, + "refs": { + "AccountAttributesMessage$AccountQuotas": "

    A list of AccountQuota objects. Within this list, each quota has a name, a count of usage toward the quota maximum, and a maximum value for the quota.

    " + } + }, + "AddSourceIdentifierToSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "AddSourceIdentifierToSubscriptionResult": { + "base": null, + "refs": { + } + }, + "AddTagsToResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "ApplyMethod": { + "base": null, + "refs": { + "Parameter$ApplyMethod": "

    Indicates when to apply parameter updates.

    " + } + }, + "ApplyPendingMaintenanceActionMessage": { + "base": "

    ", + "refs": { + } + }, + "ApplyPendingMaintenanceActionResult": { + "base": null, + "refs": { + } + }, + "AttributeValueList": { + "base": null, + "refs": { + "DBSnapshotAttribute$AttributeValues": "

    The value(s) for the manual DB snapshot attribute.

    If the AttributeName field is restore, then this field returns a list of AWS account ids that are authorized to copy or restore the manual DB snapshot. If a value of all is in the list, then the manual DB snapshot is public and available for any AWS account to copy or restore.

    ", + "ModifyDBSnapshotAttributeMessage$ValuesToAdd": "

    A list of DB snapshot attributes to add to the attribute specified by AttributeName.

    To authorize other AWS Accounts to copy or restore a manual snapshot, this is one or more AWS account identifiers, or all to make the manual DB snapshot restorable by any AWS account. Do not add the all value for any manual DB snapshots that contain private information that you do not want to be available to all AWS accounts.

    ", + "ModifyDBSnapshotAttributeMessage$ValuesToRemove": "

    A list of DB snapshot attributes to remove from the attribute specified by AttributeName.

    To remove authorization for other AWS Accounts to copy or restore a manual snapshot, this is one or more AWS account identifiers, or all to remove authorization for any AWS account to copy or restore the DB snapshot. If you specify all, AWS accounts that have their account identifier explicitly added to the restore attribute can still copy or restore the manual DB snapshot.

    " + } + }, + "AuthorizationAlreadyExistsFault": { + "base": "

    The specified CIDRIP or EC2 security group is already authorized for the specified DB security group.

    ", + "refs": { + } + }, + "AuthorizationNotFoundFault": { + "base": "

    Specified CIDRIP or EC2 security group is not authorized for the specified DB security group.

    RDS may not also be authorized via IAM to perform necessary actions on your behalf.

    ", + "refs": { + } + }, + "AuthorizationQuotaExceededFault": { + "base": "

    DB security group authorization quota has been reached.

    ", + "refs": { + } + }, + "AuthorizeDBSecurityGroupIngressMessage": { + "base": "

    ", + "refs": { + } + }, + "AuthorizeDBSecurityGroupIngressResult": { + "base": null, + "refs": { + } + }, + "AvailabilityZone": { + "base": "

    Contains Availability Zone information.

    This data type is used as an element in the following data type:

    ", + "refs": { + "AvailabilityZoneList$member": null, + "Subnet$SubnetAvailabilityZone": null + } + }, + "AvailabilityZoneList": { + "base": null, + "refs": { + "OrderableDBInstanceOption$AvailabilityZones": "

    A list of Availability Zones for the orderable DB instance.

    " + } + }, + "AvailabilityZones": { + "base": null, + "refs": { + "CreateDBClusterMessage$AvailabilityZones": "

    A list of EC2 Availability Zones that instances in the DB cluster can be created in. For information on regions and Availability Zones, see Regions and Availability Zones.

    ", + "DBCluster$AvailabilityZones": "

    Provides the list of EC2 Availability Zones that instances in the DB cluster can be created in.

    ", + "DBClusterSnapshot$AvailabilityZones": "

    Provides the list of EC2 Availability Zones that instances in the DB cluster snapshot can be restored in.

    ", + "RestoreDBClusterFromSnapshotMessage$AvailabilityZones": "

    Provides the list of EC2 Availability Zones that instances in the restored DB cluster can be created in.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "DBCluster$StorageEncrypted": "

    Specifies whether the DB cluster is encrypted.

    ", + "DBClusterMember$IsClusterWriter": "

    Value that is true if the cluster member is the primary instance for the DB cluster and false otherwise.

    ", + "DBClusterSnapshot$StorageEncrypted": "

    Specifies whether the DB cluster snapshot is encrypted.

    ", + "DBInstance$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment.

    ", + "DBInstance$AutoMinorVersionUpgrade": "

    Indicates that minor version patches are applied automatically.

    ", + "DBInstance$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "DBInstance$StorageEncrypted": "

    Specifies whether the DB instance is encrypted.

    ", + "DBInstance$CopyTagsToSnapshot": "

    Specifies whether tags are copied from the DB instance to snapshots of the DB instance.

    ", + "DBInstanceStatusInfo$Normal": "

    Boolean value that is true if the instance is operating normally, or false if the instance is in an error state.

    ", + "DBSnapshot$Encrypted": "

    Specifies whether the DB snapshot is encrypted.

    ", + "DeleteDBClusterMessage$SkipFinalSnapshot": "

    Determines whether a final DB cluster snapshot is created before the DB cluster is deleted. If true is specified, no DB cluster snapshot is created. If false is specified, a DB cluster snapshot is created before the DB cluster is deleted.

    You must specify a FinalDBSnapshotIdentifier parameter if SkipFinalSnapshot is false.

    Default: false

    ", + "DeleteDBInstanceMessage$SkipFinalSnapshot": "

    Determines whether a final DB snapshot is created before the DB instance is deleted. If true is specified, no DBSnapshot is created. If false is specified, a DB snapshot is created before the DB instance is deleted.

    Note that when a DB instance is in a failure state and has a status of 'failed', 'incompatible-restore', or 'incompatible-network', it can only be deleted when the SkipFinalSnapshot parameter is set to \"true\".

    Specify true when deleting a Read Replica.

    The FinalDBSnapshotIdentifier parameter must be specified if SkipFinalSnapshot is false.

    Default: false

    ", + "DescribeDBEngineVersionsMessage$DefaultOnly": "

    Indicates that only the default version of the specified engine or engine and major version combination is returned.

    ", + "DescribeDBSnapshotsMessage$IncludeShared": "

    True to include shared manual DB snapshots from other AWS accounts that this AWS account has been given permission to copy or restore; otherwise false. The default is false.

    An AWS account is given permission to restore a manual DB snapshot from another AWS account by the ModifyDBSnapshotAttribute API.

    ", + "DescribeDBSnapshotsMessage$IncludePublic": "

    True to include manual DB snapshots that are public and can be copied or restored by any AWS account; otherwise false. The default is false.

    An manual DB snapshot is shared as public by the ModifyDBSnapshotAttribute API.

    ", + "DownloadDBLogFilePortionDetails$AdditionalDataPending": "

    Boolean value that if true, indicates there is more data to be downloaded.

    ", + "EventSubscription$Enabled": "

    A Boolean value indicating if the subscription is enabled. True indicates the subscription is enabled.

    ", + "ModifyDBClusterMessage$ApplyImmediately": "

    A value that specifies whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB cluster.

    If this parameter is set to false, changes to the DB cluster are applied during the next maintenance window.

    Default: false

    ", + "ModifyDBInstanceMessage$ApplyImmediately": "

    Specifies whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB instance.

    If this parameter is set to false, changes to the DB instance are applied during the next maintenance window. Some parameter changes can cause an outage and will be applied on the next call to RebootDBInstance, or the next failure reboot. Review the table of parameters in Modifying a DB Instance and Using the Apply Immediately Parameter to see the impact that setting ApplyImmediately to true or false has for each modified parameter and to determine when the changes will be applied.

    Default: false

    ", + "ModifyDBInstanceMessage$AllowMajorVersionUpgrade": "

    Indicates that major version upgrades are allowed. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints: This parameter must be set to true when specifying a value for the EngineVersion parameter that is a different major version than the DB instance's current version.

    ", + "ModifyOptionGroupMessage$ApplyImmediately": "

    Indicates whether the changes should be applied immediately, or during the next maintenance window for each instance associated with the option group.

    ", + "Option$Persistent": "

    Indicate if this option is persistent.

    ", + "Option$Permanent": "

    Indicate if this option is permanent.

    ", + "OptionGroup$AllowsVpcAndNonVpcInstanceMemberships": "

    Indicates whether this option group can be applied to both VPC and non-VPC instances. The value true indicates the option group can be applied to both VPC and non-VPC instances.

    ", + "OptionGroupOption$PortRequired": "

    Specifies whether the option requires a port.

    ", + "OptionGroupOption$Persistent": "

    A persistent option cannot be removed from the option group once the option group is used, but this option can be removed from the db instance while modifying the related data and assigning another option group without this option.

    ", + "OptionGroupOption$Permanent": "

    A permanent option cannot be removed from the option group once the option group is used, and it cannot be removed from the db instance after assigning an option group with this permanent option.

    ", + "OptionGroupOptionSetting$IsModifiable": "

    Boolean value where true indicates that this option group option can be changed from the default value.

    ", + "OptionSetting$IsModifiable": "

    A Boolean value that, when true, indicates the option setting can be modified from the default.

    ", + "OptionSetting$IsCollection": "

    Indicates if the option setting is part of a collection.

    ", + "OrderableDBInstanceOption$MultiAZCapable": "

    Indicates whether this orderable DB instance is multi-AZ capable.

    ", + "OrderableDBInstanceOption$ReadReplicaCapable": "

    Indicates whether this orderable DB instance can have a Read Replica.

    ", + "OrderableDBInstanceOption$Vpc": "

    Indicates whether this is a VPC orderable DB instance.

    ", + "OrderableDBInstanceOption$SupportsStorageEncryption": "

    Indicates whether this orderable DB instance supports encrypted storage.

    ", + "OrderableDBInstanceOption$SupportsIops": "

    Indicates whether this orderable DB instance supports provisioned IOPS.

    ", + "OrderableDBInstanceOption$SupportsEnhancedMonitoring": "

    Indicates whether the DB instance supports enhanced monitoring at intervals from 1 to 60 seconds.

    ", + "Parameter$IsModifiable": "

    Indicates whether (true) or not (false) the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.

    ", + "ReservedDBInstance$MultiAZ": "

    Indicates if the reservation applies to Multi-AZ deployments.

    ", + "ReservedDBInstancesOffering$MultiAZ": "

    Indicates if the offering applies to Multi-AZ deployments.

    ", + "ResetDBClusterParameterGroupMessage$ResetAllParameters": "

    A value that is set to true to reset all parameters in the DB cluster parameter group to their default values, and false otherwise. You cannot use this parameter if there is a list of parameter names specified for the Parameters parameter.

    ", + "ResetDBParameterGroupMessage$ResetAllParameters": "

    Specifies whether (true) or not (false) to reset all parameters in the DB parameter group to default values.

    Default: true

    ", + "RestoreDBClusterToPointInTimeMessage$UseLatestRestorableTime": "

    A value that is set to true to restore the DB cluster to the latest restorable backup time, and false otherwise.

    Default: false

    Constraints: Cannot be specified if RestoreToTime parameter is provided.

    ", + "RestoreDBInstanceToPointInTimeMessage$UseLatestRestorableTime": "

    Specifies whether (true) or not (false) the DB instance is restored from the latest backup time.

    Default: false

    Constraints: Cannot be specified if RestoreTime parameter is provided.

    ", + "UpgradeTarget$AutoUpgrade": "

    A value that indicates whether the target version will be applied to any source DB instances that have AutoMinorVersionUpgrade set to true.

    ", + "UpgradeTarget$IsMajorVersionUpgrade": "

    A value that indicates whether a database engine will be upgraded to a major version.

    " + } + }, + "BooleanOptional": { + "base": null, + "refs": { + "CopyDBSnapshotMessage$CopyTags": "

    True to copy all tags from the source DB snapshot to the target DB snapshot; otherwise false. The default is false.

    ", + "CreateDBClusterMessage$StorageEncrypted": "

    Specifies whether the DB cluster is encrypted.

    ", + "CreateDBInstanceMessage$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment. You cannot set the AvailabilityZone parameter if the MultiAZ parameter is set to true. Do not set this value if you want a Multi-AZ deployment for a SQL Server DB instance. Multi-AZ for SQL Server is set using the Mirroring option in an option group.

    ", + "CreateDBInstanceMessage$AutoMinorVersionUpgrade": "

    Indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window.

    Default: true

    ", + "CreateDBInstanceMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC: true
    • VPC: false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "CreateDBInstanceMessage$StorageEncrypted": "

    Specifies whether the DB instance is encrypted.

    Default: false

    ", + "CreateDBInstanceMessage$CopyTagsToSnapshot": "

    True to copy all tags from the DB instance to snapshots of the DB instance; otherwise false. The default is false.

    ", + "CreateDBInstanceReadReplicaMessage$AutoMinorVersionUpgrade": "

    Indicates that minor engine upgrades will be applied automatically to the Read Replica during the maintenance window.

    Default: Inherits from the source DB instance

    ", + "CreateDBInstanceReadReplicaMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "CreateDBInstanceReadReplicaMessage$CopyTagsToSnapshot": "

    True to copy all tags from the Read Replica to snapshots of the Read Replica; otherwise false. The default is false.

    ", + "CreateEventSubscriptionMessage$Enabled": "

    A Boolean value; set to true to activate the subscription, set to false to create the subscription but not active it.

    ", + "DescribeDBEngineVersionsMessage$ListSupportedCharacterSets": "

    If this parameter is specified, and if the requested engine supports the CharacterSetName parameter for CreateDBInstance, the response includes a list of supported character sets for each engine version.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Vpc": "

    The VPC filter value. Specify this parameter to show only the available VPC or non-VPC offerings.

    ", + "DescribeReservedDBInstancesMessage$MultiAZ": "

    The Multi-AZ filter value. Specify this parameter to show only those reservations matching the specified Multi-AZ parameter.

    ", + "DescribeReservedDBInstancesOfferingsMessage$MultiAZ": "

    The Multi-AZ filter value. Specify this parameter to show only the available offerings matching the specified Multi-AZ parameter.

    ", + "ModifyDBInstanceMessage$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    Constraints: Cannot be specified if the DB instance is a Read Replica. This parameter cannot be used with SQL Server DB instances. Multi-AZ for SQL Server DB instances is set using the Mirroring option in an option group associated with the DB instance.

    ", + "ModifyDBInstanceMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB instance during the maintenance window. Changing this parameter does not result in an outage except in the following case and the change is asynchronously applied as soon as possible. An outage will result if this parameter is set to true during the maintenance window, and a newer minor version is available, and RDS has enabled auto patching for that engine version.

    ", + "ModifyDBInstanceMessage$CopyTagsToSnapshot": "

    True to copy all tags from the DB instance to snapshots of the DB instance; otherwise false. The default is false.

    ", + "ModifyDBInstanceMessage$PubliclyAccessible": "

    True to make the DB instance Internet-facing with a publicly resolvable DNS name, which resolves to a public IP address. False to make the DB instance internal with a DNS name that resolves to a private IP address.

    PubliclyAccessible only applies to DB instances in a VPC. The DB instance must be part of a public subnet and PubliclyAccessible must be true in order for it to be publicly accessible.

    Changes to the PubliclyAccessible parameter are applied immediately regardless of the value of the ApplyImmediately parameter.

    Default: false

    ", + "ModifyEventSubscriptionMessage$Enabled": "

    A Boolean value; set to true to activate the subscription.

    ", + "PendingModifiedValues$MultiAZ": "

    Indicates that the Single-AZ DB instance is to change to a Multi-AZ deployment.

    ", + "RebootDBInstanceMessage$ForceFailover": "

    When true, the reboot will be conducted through a MultiAZ failover.

    Constraint: You cannot specify true if the instance is not configured for MultiAZ.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC: true
    • VPC: false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB instance during the maintenance window.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$CopyTagsToSnapshot": "

    True to copy all tags from the restored DB instance to snapshots of the DB instance; otherwise false. The default is false.

    ", + "RestoreDBInstanceToPointInTimeMessage$MultiAZ": "

    Specifies if the DB instance is a Multi-AZ deployment.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    ", + "RestoreDBInstanceToPointInTimeMessage$PubliclyAccessible": "

    Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

    Default: The default behavior varies depending on whether a VPC has been requested or not. The following list shows the default behavior in each case.

    • Default VPC:true
    • VPC:false

    If no DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be publicly accessible. If a specific DB subnet group has been specified as part of the request and the PubliclyAccessible value has not been set, the DB instance will be private.

    ", + "RestoreDBInstanceToPointInTimeMessage$AutoMinorVersionUpgrade": "

    Indicates that minor version upgrades will be applied automatically to the DB instance during the maintenance window.

    ", + "RestoreDBInstanceToPointInTimeMessage$CopyTagsToSnapshot": "

    True to copy all tags from the restored DB instance to snapshots of the DB instance; otherwise false. The default is false.

    " + } + }, + "Certificate": { + "base": "

    A CA certificate for an AWS account.

    ", + "refs": { + "CertificateList$member": null + } + }, + "CertificateList": { + "base": null, + "refs": { + "CertificateMessage$Certificates": "

    The list of Certificate objects for the AWS account.

    " + } + }, + "CertificateMessage": { + "base": "

    Data returned by the DescribeCertificates action.

    ", + "refs": { + } + }, + "CertificateNotFoundFault": { + "base": "

    CertificateIdentifier does not refer to an existing certificate.

    ", + "refs": { + } + }, + "CharacterSet": { + "base": "

    This data type is used as a response element in the action DescribeDBEngineVersions.

    ", + "refs": { + "DBEngineVersion$DefaultCharacterSet": "

    The default character set for new instances of this engine version, if the CharacterSetName parameter of the CreateDBInstance API is not specified.

    ", + "SupportedCharacterSetsList$member": null + } + }, + "CopyDBClusterSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "CopyDBClusterSnapshotResult": { + "base": null, + "refs": { + } + }, + "CopyDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CopyDBParameterGroupResult": { + "base": null, + "refs": { + } + }, + "CopyDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "CopyDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "CopyOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CopyOptionGroupResult": { + "base": null, + "refs": { + } + }, + "CreateDBClusterMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBClusterParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBClusterParameterGroupResult": { + "base": null, + "refs": { + } + }, + "CreateDBClusterResult": { + "base": null, + "refs": { + } + }, + "CreateDBClusterSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBClusterSnapshotResult": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBInstanceReadReplicaMessage": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceReadReplicaResult": { + "base": null, + "refs": { + } + }, + "CreateDBInstanceResult": { + "base": null, + "refs": { + } + }, + "CreateDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBParameterGroupResult": { + "base": null, + "refs": { + } + }, + "CreateDBSecurityGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSecurityGroupResult": { + "base": null, + "refs": { + } + }, + "CreateDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "CreateDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateDBSubnetGroupResult": { + "base": null, + "refs": { + } + }, + "CreateEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "CreateOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateOptionGroupResult": { + "base": null, + "refs": { + } + }, + "DBCluster": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBClusters action.

    ", + "refs": { + "CreateDBClusterResult$DBCluster": null, + "DBClusterList$member": null, + "DeleteDBClusterResult$DBCluster": null, + "FailoverDBClusterResult$DBCluster": null, + "ModifyDBClusterResult$DBCluster": null, + "RestoreDBClusterFromSnapshotResult$DBCluster": null, + "RestoreDBClusterToPointInTimeResult$DBCluster": null + } + }, + "DBClusterAlreadyExistsFault": { + "base": "

    User already has a DB cluster with the given identifier.

    ", + "refs": { + } + }, + "DBClusterList": { + "base": null, + "refs": { + "DBClusterMessage$DBClusters": "

    Contains a list of DB clusters for the user.

    " + } + }, + "DBClusterMember": { + "base": "

    Contains information about an instance that is part of a DB cluster.

    ", + "refs": { + "DBClusterMemberList$member": null + } + }, + "DBClusterMemberList": { + "base": null, + "refs": { + "DBCluster$DBClusterMembers": "

    Provides the list of instances that make up the DB cluster.

    " + } + }, + "DBClusterMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBClusters action.

    ", + "refs": { + } + }, + "DBClusterNotFoundFault": { + "base": "

    DBClusterIdentifier does not refer to an existing DB cluster.

    ", + "refs": { + } + }, + "DBClusterOptionGroupMemberships": { + "base": null, + "refs": { + "DBCluster$DBClusterOptionGroupMemberships": "

    Provides the list of option group memberships for this DB cluster.

    " + } + }, + "DBClusterOptionGroupStatus": { + "base": "

    Contains status information for a DB cluster option group.

    ", + "refs": { + "DBClusterOptionGroupMemberships$member": null + } + }, + "DBClusterParameterGroup": { + "base": "

    Contains the result of a successful invocation of the CreateDBClusterParameterGroup action.

    This data type is used as a request parameter in the DeleteDBClusterParameterGroup action, and as a response element in the DescribeDBClusterParameterGroups action.

    ", + "refs": { + "CreateDBClusterParameterGroupResult$DBClusterParameterGroup": null, + "DBClusterParameterGroupList$member": null + } + }, + "DBClusterParameterGroupDetails": { + "base": "

    Provides details about a DB cluster parameter group including the parameters in the DB cluster parameter group.

    ", + "refs": { + } + }, + "DBClusterParameterGroupList": { + "base": null, + "refs": { + "DBClusterParameterGroupsMessage$DBClusterParameterGroups": "

    A list of DB cluster parameter groups.

    " + } + }, + "DBClusterParameterGroupNameMessage": { + "base": "

    ", + "refs": { + } + }, + "DBClusterParameterGroupNotFoundFault": { + "base": "

    DBClusterParameterGroupName does not refer to an existing DB Cluster parameter group.

    ", + "refs": { + } + }, + "DBClusterParameterGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DBClusterQuotaExceededFault": { + "base": "

    User attempted to create a new DB cluster and the user has already reached the maximum allowed DB cluster quota.

    ", + "refs": { + } + }, + "DBClusterSnapshot": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBClusterSnapshots action.

    ", + "refs": { + "CopyDBClusterSnapshotResult$DBClusterSnapshot": null, + "CreateDBClusterSnapshotResult$DBClusterSnapshot": null, + "DBClusterSnapshotList$member": null, + "DeleteDBClusterSnapshotResult$DBClusterSnapshot": null + } + }, + "DBClusterSnapshotAlreadyExistsFault": { + "base": "

    User already has a DB cluster snapshot with the given identifier.

    ", + "refs": { + } + }, + "DBClusterSnapshotList": { + "base": null, + "refs": { + "DBClusterSnapshotMessage$DBClusterSnapshots": "

    Provides a list of DB cluster snapshots for the user.

    " + } + }, + "DBClusterSnapshotMessage": { + "base": "

    Provides a list of DB cluster snapshots for the user as the result of a call to the DescribeDBClusterSnapshots action.

    ", + "refs": { + } + }, + "DBClusterSnapshotNotFoundFault": { + "base": "

    DBClusterSnapshotIdentifier does not refer to an existing DB cluster snapshot.

    ", + "refs": { + } + }, + "DBEngineVersion": { + "base": "

    This data type is used as a response element in the action DescribeDBEngineVersions.

    ", + "refs": { + "DBEngineVersionList$member": null + } + }, + "DBEngineVersionList": { + "base": null, + "refs": { + "DBEngineVersionMessage$DBEngineVersions": "

    A list of DBEngineVersion elements.

    " + } + }, + "DBEngineVersionMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBEngineVersions action.

    ", + "refs": { + } + }, + "DBInstance": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBInstances action.

    ", + "refs": { + "CreateDBInstanceReadReplicaResult$DBInstance": null, + "CreateDBInstanceResult$DBInstance": null, + "DBInstanceList$member": null, + "DeleteDBInstanceResult$DBInstance": null, + "ModifyDBInstanceResult$DBInstance": null, + "PromoteReadReplicaResult$DBInstance": null, + "RebootDBInstanceResult$DBInstance": null, + "RestoreDBInstanceFromDBSnapshotResult$DBInstance": null, + "RestoreDBInstanceToPointInTimeResult$DBInstance": null + } + }, + "DBInstanceAlreadyExistsFault": { + "base": "

    User already has a DB instance with the given identifier.

    ", + "refs": { + } + }, + "DBInstanceList": { + "base": null, + "refs": { + "DBInstanceMessage$DBInstances": "

    A list of DBInstance instances.

    " + } + }, + "DBInstanceMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBInstances action.

    ", + "refs": { + } + }, + "DBInstanceNotFoundFault": { + "base": "

    DBInstanceIdentifier does not refer to an existing DB instance.

    ", + "refs": { + } + }, + "DBInstanceStatusInfo": { + "base": "

    Provides a list of status information for a DB instance.

    ", + "refs": { + "DBInstanceStatusInfoList$member": null + } + }, + "DBInstanceStatusInfoList": { + "base": null, + "refs": { + "DBInstance$StatusInfos": "

    The status of a Read Replica. If the instance is not a Read Replica, this will be blank.

    " + } + }, + "DBLogFileNotFoundFault": { + "base": "

    LogFileName does not refer to an existing DB log file.

    ", + "refs": { + } + }, + "DBParameterGroup": { + "base": "

    Contains the result of a successful invocation of the CreateDBParameterGroup action.

    This data type is used as a request parameter in the DeleteDBParameterGroup action, and as a response element in the DescribeDBParameterGroups action.

    ", + "refs": { + "CopyDBParameterGroupResult$DBParameterGroup": null, + "CreateDBParameterGroupResult$DBParameterGroup": null, + "DBParameterGroupList$member": null + } + }, + "DBParameterGroupAlreadyExistsFault": { + "base": "

    A DB parameter group with the same name exists.

    ", + "refs": { + } + }, + "DBParameterGroupDetails": { + "base": "

    Contains the result of a successful invocation of the DescribeDBParameters action.

    ", + "refs": { + } + }, + "DBParameterGroupList": { + "base": null, + "refs": { + "DBParameterGroupsMessage$DBParameterGroups": "

    A list of DBParameterGroup instances.

    " + } + }, + "DBParameterGroupNameMessage": { + "base": "

    Contains the result of a successful invocation of the ModifyDBParameterGroup or ResetDBParameterGroup action.

    ", + "refs": { + } + }, + "DBParameterGroupNotFoundFault": { + "base": "

    DBParameterGroupName does not refer to an existing DB parameter group.

    ", + "refs": { + } + }, + "DBParameterGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB parameter groups.

    ", + "refs": { + } + }, + "DBParameterGroupStatus": { + "base": "

    The status of the DB parameter group.

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBParameterGroupStatusList$member": null + } + }, + "DBParameterGroupStatusList": { + "base": null, + "refs": { + "DBInstance$DBParameterGroups": "

    Provides the list of DB parameter groups applied to this DB instance.

    " + } + }, + "DBParameterGroupsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBParameterGroups action.

    ", + "refs": { + } + }, + "DBSecurityGroup": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSecurityGroups action.

    ", + "refs": { + "AuthorizeDBSecurityGroupIngressResult$DBSecurityGroup": null, + "CreateDBSecurityGroupResult$DBSecurityGroup": null, + "DBSecurityGroups$member": null, + "RevokeDBSecurityGroupIngressResult$DBSecurityGroup": null + } + }, + "DBSecurityGroupAlreadyExistsFault": { + "base": "

    A DB security group with the name specified in DBSecurityGroupName already exists.

    ", + "refs": { + } + }, + "DBSecurityGroupMembership": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBSecurityGroupMembershipList$member": null + } + }, + "DBSecurityGroupMembershipList": { + "base": null, + "refs": { + "DBInstance$DBSecurityGroups": "

    Provides List of DB security group elements containing only DBSecurityGroup.Name and DBSecurityGroup.Status subelements.

    ", + "Option$DBSecurityGroupMemberships": "

    If the option requires access to a port, then this DB security group allows access to the port.

    " + } + }, + "DBSecurityGroupMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSecurityGroups action.

    ", + "refs": { + } + }, + "DBSecurityGroupNameList": { + "base": null, + "refs": { + "CreateDBInstanceMessage$DBSecurityGroups": "

    A list of DB security groups to associate with this DB instance.

    Default: The default DB security group for the database engine.

    ", + "ModifyDBInstanceMessage$DBSecurityGroups": "

    A list of DB security groups to authorize on this DB instance. Changing this setting does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "OptionConfiguration$DBSecurityGroupMemberships": "

    A list of DBSecurityGroupMemebrship name strings used for this option.

    " + } + }, + "DBSecurityGroupNotFoundFault": { + "base": "

    DBSecurityGroupName does not refer to an existing DB security group.

    ", + "refs": { + } + }, + "DBSecurityGroupNotSupportedFault": { + "base": "

    A DB security group is not allowed for this action.

    ", + "refs": { + } + }, + "DBSecurityGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB security groups.

    ", + "refs": { + } + }, + "DBSecurityGroups": { + "base": null, + "refs": { + "DBSecurityGroupMessage$DBSecurityGroups": "

    A list of DBSecurityGroup instances.

    " + } + }, + "DBSnapshot": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSnapshots action.

    ", + "refs": { + "CopyDBSnapshotResult$DBSnapshot": null, + "CreateDBSnapshotResult$DBSnapshot": null, + "DBSnapshotList$member": null, + "DeleteDBSnapshotResult$DBSnapshot": null + } + }, + "DBSnapshotAlreadyExistsFault": { + "base": "

    DBSnapshotIdentifier is already used by an existing snapshot.

    ", + "refs": { + } + }, + "DBSnapshotAttribute": { + "base": "

    Contains the name and values of a manual DB snapshot attribute

    Manual DB snapshot attributes are used to authorize other AWS accounts to restore a manual DB snapshot. For more information, see the ModifyDBSnapshotAttribute API.

    ", + "refs": { + "DBSnapshotAttributeList$member": null + } + }, + "DBSnapshotAttributeList": { + "base": null, + "refs": { + "DBSnapshotAttributesResult$DBSnapshotAttributes": "

    The list of attributes and values for the manual DB snapshot.

    " + } + }, + "DBSnapshotAttributesResult": { + "base": "

    Contains the results of a successful call to the DescribeDBSnapshotAttributes API.

    Manual DB snapshot attributes are used to authorize other AWS accounts to copy or restore a manual DB snapshot. For more information, see the ModifyDBSnapshotAttribute API.

    ", + "refs": { + "DescribeDBSnapshotAttributesResult$DBSnapshotAttributesResult": null, + "ModifyDBSnapshotAttributeResult$DBSnapshotAttributesResult": null + } + }, + "DBSnapshotList": { + "base": null, + "refs": { + "DBSnapshotMessage$DBSnapshots": "

    A list of DBSnapshot instances.

    " + } + }, + "DBSnapshotMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSnapshots action.

    ", + "refs": { + } + }, + "DBSnapshotNotFoundFault": { + "base": "

    DBSnapshotIdentifier does not refer to an existing DB snapshot.

    ", + "refs": { + } + }, + "DBSubnetGroup": { + "base": "

    Contains the result of a successful invocation of the following actions:

    This data type is used as a response element in the DescribeDBSubnetGroups action.

    ", + "refs": { + "CreateDBSubnetGroupResult$DBSubnetGroup": null, + "DBInstance$DBSubnetGroup": "

    Specifies information on the subnet group associated with the DB instance, including the name, description, and subnets in the subnet group.

    ", + "DBSubnetGroups$member": null, + "ModifyDBSubnetGroupResult$DBSubnetGroup": null + } + }, + "DBSubnetGroupAlreadyExistsFault": { + "base": "

    DBSubnetGroupName is already used by an existing DB subnet group.

    ", + "refs": { + } + }, + "DBSubnetGroupDoesNotCoverEnoughAZs": { + "base": "

    Subnets in the DB subnet group should cover at least two Availability Zones unless there is only one Availability Zone.

    ", + "refs": { + } + }, + "DBSubnetGroupMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeDBSubnetGroups action.

    ", + "refs": { + } + }, + "DBSubnetGroupNotAllowedFault": { + "base": "

    Indicates that the DBSubnetGroup should not be specified while creating read replicas that lie in the same region as the source instance.

    ", + "refs": { + } + }, + "DBSubnetGroupNotFoundFault": { + "base": "

    DBSubnetGroupName does not refer to an existing DB subnet group.

    ", + "refs": { + } + }, + "DBSubnetGroupQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB subnet groups.

    ", + "refs": { + } + }, + "DBSubnetGroups": { + "base": null, + "refs": { + "DBSubnetGroupMessage$DBSubnetGroups": "

    A list of DBSubnetGroup instances.

    " + } + }, + "DBSubnetQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of subnets in a DB subnet groups.

    ", + "refs": { + } + }, + "DBUpgradeDependencyFailureFault": { + "base": "

    The DB upgrade failed because a resource the DB depends on could not be modified.

    ", + "refs": { + } + }, + "DeleteDBClusterMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBClusterParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBClusterResult": { + "base": null, + "refs": { + } + }, + "DeleteDBClusterSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBClusterSnapshotResult": { + "base": null, + "refs": { + } + }, + "DeleteDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBInstanceResult": { + "base": null, + "refs": { + } + }, + "DeleteDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSecurityGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "DeleteDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "DeleteOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeAccountAttributesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeCertificatesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBClusterParameterGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBClusterParametersMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBClusterSnapshotsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBClustersMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBEngineVersionsMessage": { + "base": null, + "refs": { + } + }, + "DescribeDBInstancesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBLogFilesDetails": { + "base": "

    This data type is used as a response element to DescribeDBLogFiles.

    ", + "refs": { + "DescribeDBLogFilesList$member": null + } + }, + "DescribeDBLogFilesList": { + "base": null, + "refs": { + "DescribeDBLogFilesResponse$DescribeDBLogFiles": "

    The DB log files returned.

    " + } + }, + "DescribeDBLogFilesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBLogFilesResponse": { + "base": "

    The response from a call to DescribeDBLogFiles.

    ", + "refs": { + } + }, + "DescribeDBParameterGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBParametersMessage": { + "base": null, + "refs": { + } + }, + "DescribeDBSecurityGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBSnapshotAttributesMessage": { + "base": null, + "refs": { + } + }, + "DescribeDBSnapshotAttributesResult": { + "base": null, + "refs": { + } + }, + "DescribeDBSnapshotsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDBSubnetGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEngineDefaultClusterParametersMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEngineDefaultClusterParametersResult": { + "base": null, + "refs": { + } + }, + "DescribeEngineDefaultParametersMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEngineDefaultParametersResult": { + "base": null, + "refs": { + } + }, + "DescribeEventCategoriesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEventSubscriptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEventsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOptionGroupOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOptionGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOrderableDBInstanceOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribePendingMaintenanceActionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeReservedDBInstancesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeReservedDBInstancesOfferingsMessage": { + "base": "

    ", + "refs": { + } + }, + "Double": { + "base": null, + "refs": { + "RecurringCharge$RecurringChargeAmount": "

    The amount of the recurring charge.

    ", + "ReservedDBInstance$FixedPrice": "

    The fixed price charged for this reserved DB instance.

    ", + "ReservedDBInstance$UsagePrice": "

    The hourly price charged for this reserved DB instance.

    ", + "ReservedDBInstancesOffering$FixedPrice": "

    The fixed price charged for this offering.

    ", + "ReservedDBInstancesOffering$UsagePrice": "

    The hourly price charged for this offering.

    " + } + }, + "DownloadDBLogFilePortionDetails": { + "base": "

    This data type is used as a response element to DownloadDBLogFilePortion.

    ", + "refs": { + } + }, + "DownloadDBLogFilePortionMessage": { + "base": "

    ", + "refs": { + } + }, + "EC2SecurityGroup": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "EC2SecurityGroupList$member": null + } + }, + "EC2SecurityGroupList": { + "base": null, + "refs": { + "DBSecurityGroup$EC2SecurityGroups": "

    Contains a list of EC2SecurityGroup elements.

    " + } + }, + "Endpoint": { + "base": "

    This data type is used as a response element in the following actions:

    ", + "refs": { + "DBInstance$Endpoint": "

    Specifies the connection endpoint.

    " + } + }, + "EngineDefaults": { + "base": "

    Contains the result of a successful invocation of the DescribeEngineDefaultParameters action.

    ", + "refs": { + "DescribeEngineDefaultClusterParametersResult$EngineDefaults": null, + "DescribeEngineDefaultParametersResult$EngineDefaults": null + } + }, + "Event": { + "base": "

    This data type is used as a response element in the DescribeEvents action.

    ", + "refs": { + "EventList$member": null + } + }, + "EventCategoriesList": { + "base": null, + "refs": { + "CreateEventSubscriptionMessage$EventCategories": "

    A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    ", + "DescribeEventsMessage$EventCategories": "

    A list of event categories that trigger notifications for a event notification subscription.

    ", + "Event$EventCategories": "

    Specifies the category for the event.

    ", + "EventCategoriesMap$EventCategories": "

    The event categories for the specified source type

    ", + "EventSubscription$EventCategoriesList": "

    A list of event categories for the RDS event notification subscription.

    ", + "ModifyEventSubscriptionMessage$EventCategories": "

    A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    " + } + }, + "EventCategoriesMap": { + "base": "

    Contains the results of a successful invocation of the DescribeEventCategories action.

    ", + "refs": { + "EventCategoriesMapList$member": null + } + }, + "EventCategoriesMapList": { + "base": null, + "refs": { + "EventCategoriesMessage$EventCategoriesMapList": "

    A list of EventCategoriesMap data types.

    " + } + }, + "EventCategoriesMessage": { + "base": "

    Data returned from the DescribeEventCategories action.

    ", + "refs": { + } + }, + "EventList": { + "base": null, + "refs": { + "EventsMessage$Events": "

    A list of Event instances.

    " + } + }, + "EventSubscription": { + "base": "

    Contains the results of a successful invocation of the DescribeEventSubscriptions action.

    ", + "refs": { + "AddSourceIdentifierToSubscriptionResult$EventSubscription": null, + "CreateEventSubscriptionResult$EventSubscription": null, + "DeleteEventSubscriptionResult$EventSubscription": null, + "EventSubscriptionsList$member": null, + "ModifyEventSubscriptionResult$EventSubscription": null, + "RemoveSourceIdentifierFromSubscriptionResult$EventSubscription": null + } + }, + "EventSubscriptionQuotaExceededFault": { + "base": "

    You have reached the maximum number of event subscriptions.

    ", + "refs": { + } + }, + "EventSubscriptionsList": { + "base": null, + "refs": { + "EventSubscriptionsMessage$EventSubscriptionsList": "

    A list of EventSubscriptions data types.

    " + } + }, + "EventSubscriptionsMessage": { + "base": "

    Data returned by the DescribeEventSubscriptions action.

    ", + "refs": { + } + }, + "EventsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeEvents action.

    ", + "refs": { + } + }, + "FailoverDBClusterMessage": { + "base": "

    ", + "refs": { + } + }, + "FailoverDBClusterResult": { + "base": null, + "refs": { + } + }, + "Filter": { + "base": null, + "refs": { + "FilterList$member": null + } + }, + "FilterList": { + "base": null, + "refs": { + "DescribeCertificatesMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBClusterParameterGroupsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBClusterParametersMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBClusterSnapshotsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBClustersMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBEngineVersionsMessage$Filters": "

    Not currently supported.

    ", + "DescribeDBInstancesMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBLogFilesMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBParameterGroupsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBParametersMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBSecurityGroupsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBSnapshotsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeDBSubnetGroupsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeEngineDefaultClusterParametersMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeEngineDefaultParametersMessage$Filters": "

    Not currently supported.

    ", + "DescribeEventCategoriesMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeEventSubscriptionsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeEventsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeOptionGroupOptionsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeOptionGroupsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribePendingMaintenanceActionsMessage$Filters": "

    A filter that specifies one or more resources to return pending maintenance actions for.

    Supported filters:

    • db-instance-id - Accepts DB instance identifiers and DB instance Amazon Resource Names (ARNs). The results list will only include pending maintenance actions for the DB instances identified by these ARNs.
    ", + "DescribeReservedDBInstancesMessage$Filters": "

    This parameter is not currently supported.

    ", + "DescribeReservedDBInstancesOfferingsMessage$Filters": "

    This parameter is not currently supported.

    ", + "ListTagsForResourceMessage$Filters": "

    This parameter is not currently supported.

    " + } + }, + "FilterValueList": { + "base": null, + "refs": { + "Filter$Values": "

    This parameter is not currently supported.

    " + } + }, + "IPRange": { + "base": "

    This data type is used as a response element in the DescribeDBSecurityGroups action.

    ", + "refs": { + "IPRangeList$member": null + } + }, + "IPRangeList": { + "base": null, + "refs": { + "DBSecurityGroup$IPRanges": "

    Contains a list of IPRange elements.

    " + } + }, + "InstanceQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB instances.

    ", + "refs": { + } + }, + "InsufficientDBClusterCapacityFault": { + "base": "

    The DB cluster does not have enough capacity for the current operation.

    ", + "refs": { + } + }, + "InsufficientDBInstanceCapacityFault": { + "base": "

    Specified DB instance class is not available in the specified Availability Zone.

    ", + "refs": { + } + }, + "InsufficientStorageClusterCapacityFault": { + "base": "

    There is insufficient storage available for the current action. You may be able to resolve this error by updating your subnet group to use different Availability Zones that have more storage available.

    ", + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "DBClusterSnapshot$AllocatedStorage": "

    Specifies the allocated storage size in gigabytes (GB).

    ", + "DBClusterSnapshot$Port": "

    Specifies the port that the DB cluster was listening on at the time of the snapshot.

    ", + "DBClusterSnapshot$PercentProgress": "

    Specifies the percentage of the estimated data that has been transferred.

    ", + "DBInstance$AllocatedStorage": "

    Specifies the allocated storage size specified in gigabytes.

    ", + "DBInstance$BackupRetentionPeriod": "

    Specifies the number of days for which automatic DB snapshots are retained.

    ", + "DBInstance$DbInstancePort": "

    Specifies the port that the DB instance listens on. If the DB instance is part of a DB cluster, this can be a different port than the DB cluster port.

    ", + "DBSnapshot$AllocatedStorage": "

    Specifies the allocated storage size in gigabytes (GB).

    ", + "DBSnapshot$Port": "

    Specifies the port that the database engine was listening on at the time of the snapshot.

    ", + "DBSnapshot$PercentProgress": "

    The percentage of the estimated data that has been transferred.

    ", + "DownloadDBLogFilePortionMessage$NumberOfLines": "

    The number of lines to download. If the number of lines specified results in a file over 1 MB in size, the file will be truncated at 1 MB in size.

    If the NumberOfLines parameter is specified, then the block of lines returned can be from the beginning or the end of the log file, depending on the value of the Marker parameter.

    • If neither Marker or NumberOfLines are specified, the entire log file is returned.

    • If NumberOfLines is specified and Marker is not specified, then the most recent lines from the end of the log file are returned.

    • If Marker is specified as \"0\", then the specified number of lines from the beginning of the log file are returned.

    • You can download the log file in blocks of lines by specifying the size of the block using the NumberOfLines parameter, and by specifying a value of \"0\" for the Marker parameter in your first request. Include the Marker value returned in the response as the Marker value for the next request, continuing until the AdditionalDataPending response element returns false.

    ", + "Endpoint$Port": "

    Specifies the port that the database engine is listening on.

    ", + "ReservedDBInstance$Duration": "

    The duration of the reservation in seconds.

    ", + "ReservedDBInstance$DBInstanceCount": "

    The number of reserved DB instances.

    ", + "ReservedDBInstancesOffering$Duration": "

    The duration of the offering in seconds.

    " + } + }, + "IntegerOptional": { + "base": null, + "refs": { + "CreateDBClusterMessage$BackupRetentionPeriod": "

    The number of days for which automated backups are retained. You must specify a minimum value of 1.

    Default: 1

    Constraints:

    • Must be a value from 1 to 35
    ", + "CreateDBClusterMessage$Port": "

    The port number on which the instances in the DB cluster accept connections.

    Default: 3306

    ", + "CreateDBInstanceMessage$AllocatedStorage": "

    The amount of storage (in gigabytes) to be initially allocated for the database instance.

    Type: Integer

    MySQL

    Constraints: Must be an integer from 5 to 6144.

    MariaDB

    Constraints: Must be an integer from 5 to 6144.

    PostgreSQL

    Constraints: Must be an integer from 5 to 6144.

    Oracle

    Constraints: Must be an integer from 10 to 6144.

    SQL Server

    Constraints: Must be an integer from 200 to 4096 (Standard Edition and Enterprise Edition) or from 20 to 4096 (Express Edition and Web Edition)

    ", + "CreateDBInstanceMessage$BackupRetentionPeriod": "

    The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Default: 1

    Constraints:

    • Must be a value from 0 to 35
    • Cannot be set to 0 if the DB instance is a source to Read Replicas
    ", + "CreateDBInstanceMessage$Port": "

    The port number on which the database accepts connections.

    MySQL

    Default: 3306

    Valid Values: 1150-65535

    Type: Integer

    MariaDB

    Default: 3306

    Valid Values: 1150-65535

    Type: Integer

    PostgreSQL

    Default: 5432

    Valid Values: 1150-65535

    Type: Integer

    Oracle

    Default: 1521

    Valid Values: 1150-65535

    SQL Server

    Default: 1433

    Valid Values: 1150-65535 except for 1434, 3389, 47001, 49152, and 49152 through 49156.

    Amazon Aurora

    Default: 3306

    Valid Values: 1150-65535

    Type: Integer

    ", + "CreateDBInstanceMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.

    Constraints: To use PIOPS, this value must be an integer greater than 1000.

    ", + "CreateDBInstanceMessage$MonitoringInterval": "

    The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 60.

    If MonitoringRoleArn is specified, then you must also set MonitoringInterval to a value other than 0.

    Valid Values: 0, 1, 5, 10, 15, 30, 60

    ", + "CreateDBInstanceReadReplicaMessage$Port": "

    The port number that the DB instance uses for connections.

    Default: Inherits from the source DB instance

    Valid Values: 1150-65535

    ", + "CreateDBInstanceReadReplicaMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.

    ", + "CreateDBInstanceReadReplicaMessage$MonitoringInterval": "

    The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the Read Replica. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 60.

    If MonitoringRoleArn is specified, then you must also set MonitoringInterval to a value other than 0.

    Valid Values: 0, 1, 5, 10, 15, 30, 60

    ", + "DBCluster$AllocatedStorage": "

    Specifies the allocated storage size in gigabytes (GB).

    ", + "DBCluster$BackupRetentionPeriod": "

    Specifies the number of days for which automatic DB snapshots are retained.

    ", + "DBCluster$Port": "

    Specifies the port that the database engine is listening on.

    ", + "DBInstance$Iops": "

    Specifies the Provisioned IOPS (I/O operations per second) value.

    ", + "DBInstance$MonitoringInterval": "

    The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance.

    ", + "DBSnapshot$Iops": "

    Specifies the Provisioned IOPS (I/O operations per second) value of the DB instance at the time of the snapshot.

    ", + "DescribeCertificatesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeDBClusterParameterGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeDBClusterParametersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeDBClusterSnapshotsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeDBClustersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeDBEngineVersionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeDBInstancesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeDBLogFilesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    ", + "DescribeDBParameterGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeDBParametersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeDBSecurityGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeDBSnapshotsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeDBSubnetGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeEngineDefaultClusterParametersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeEngineDefaultParametersMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeEventSubscriptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeEventsMessage$Duration": "

    The number of minutes to retrieve events for.

    Default: 60

    ", + "DescribeEventsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeOptionGroupOptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeOptionGroupsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeOrderableDBInstanceOptionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribePendingMaintenanceActionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeReservedDBInstancesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "DescribeReservedDBInstancesOfferingsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", + "ModifyDBClusterMessage$BackupRetentionPeriod": "

    The number of days for which automated backups are retained. You must specify a minimum value of 1.

    Default: 1

    Constraints:

    • Must be a value from 1 to 35
    ", + "ModifyDBClusterMessage$Port": "

    The port number on which the DB cluster accepts connections.

    Constraints: Value must be 1150-65535

    Default: The same port as the original DB cluster.

    ", + "ModifyDBInstanceMessage$AllocatedStorage": "

    The new storage capacity of the RDS instance. Changing this setting does not result in an outage and the change is applied during the next maintenance window unless ApplyImmediately is set to true for this request.

    MySQL

    Default: Uses existing setting

    Valid Values: 5-6144

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    Type: Integer

    MariaDB

    Default: Uses existing setting

    Valid Values: 5-6144

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    Type: Integer

    PostgreSQL

    Default: Uses existing setting

    Valid Values: 5-6144

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    Type: Integer

    Oracle

    Default: Uses existing setting

    Valid Values: 10-6144

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    SQL Server

    Cannot be modified.

    If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance will be available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance will be suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a Read Replica for the instance, and creating a DB snapshot of the instance.

    ", + "ModifyDBInstanceMessage$BackupRetentionPeriod": "

    The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Changing this parameter can result in an outage if you change from 0 to a non-zero value or from a non-zero value to 0. These changes are applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously applied as soon as possible.

    Default: Uses existing setting

    Constraints:

    • Must be a value from 0 to 35
    • Can be specified for a MySQL Read Replica only if the source is running MySQL 5.6
    • Can be specified for a PostgreSQL Read Replica only if the source is running PostgreSQL 9.3.5
    • Cannot be set to 0 if the DB instance is a source to Read Replicas
    ", + "ModifyDBInstanceMessage$Iops": "

    The new Provisioned IOPS (I/O operations per second) value for the RDS instance. Changing this setting does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    Default: Uses existing setting

    Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. If you are migrating from Provisioned IOPS to standard storage, set this value to 0. The DB instance will require a reboot for the change in storage type to take effect.

    SQL Server

    Setting the IOPS value for the SQL Server database engine is not supported.

    Type: Integer

    If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance will be available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance will be suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a Read Replica for the instance, and creating a DB snapshot of the instance.

    ", + "ModifyDBInstanceMessage$MonitoringInterval": "

    The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 60.

    If MonitoringRoleArn is specified, then you must also set MonitoringInterval to a value other than 0.

    Valid Values: 0, 1, 5, 10, 15, 30, 60

    ", + "ModifyDBInstanceMessage$DBPortNumber": "

    The port number on which the database accepts connections.

    The value of the DBPortNumber parameter must not match any of the port values specified for options in the option group for the DB instance.

    Your database will restart when you change the DBPortNumber value regardless of the value of the ApplyImmediately parameter.

    MySQL

    Default: 3306

    Valid Values: 1150-65535

    MariaDB

    Default: 3306

    Valid Values: 1150-65535

    PostgreSQL

    Default: 5432

    Valid Values: 1150-65535

    Oracle

    Default: 1521

    Valid Values: 1150-65535

    SQL Server

    Default: 1433

    Valid Values: 1150-65535 except for 1434, 3389, 47001, 49152, and 49152 through 49156.

    Amazon Aurora

    Default: 3306

    Valid Values: 1150-65535

    ", + "Option$Port": "

    If required, the port configured for this option to use.

    ", + "OptionConfiguration$Port": "

    The optional port for the option.

    ", + "OptionGroupOption$DefaultPort": "

    If the option requires a port, specifies the default port for the option.

    ", + "PendingModifiedValues$AllocatedStorage": "

    Contains the new AllocatedStorage size for the DB instance that will be applied or is in progress.

    ", + "PendingModifiedValues$Port": "

    Specifies the pending port for the DB instance.

    ", + "PendingModifiedValues$BackupRetentionPeriod": "

    Specifies the pending number of days for which automated backups are retained.

    ", + "PendingModifiedValues$Iops": "

    Specifies the new Provisioned IOPS value for the DB instance that will be applied or is being applied.

    ", + "PromoteReadReplicaMessage$BackupRetentionPeriod": "

    The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

    Default: 1

    Constraints:

    • Must be a value from 0 to 8
    ", + "PurchaseReservedDBInstancesOfferingMessage$DBInstanceCount": "

    The number of instances to reserve.

    Default: 1

    ", + "RestoreDBClusterFromSnapshotMessage$Port": "

    The port number on which the new DB cluster accepts connections.

    Constraints: Value must be 1150-65535

    Default: The same port as the original DB cluster.

    ", + "RestoreDBClusterToPointInTimeMessage$Port": "

    The port number on which the new DB cluster accepts connections.

    Constraints: Value must be 1150-65535

    Default: The same port as the original DB cluster.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Port": "

    The port number on which the database accepts connections.

    Default: The same port as the original DB instance

    Constraints: Value must be 1150-65535

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Iops": "

    Specifies the amount of provisioned IOPS for the DB instance, expressed in I/O operations per second. If this parameter is not specified, the IOPS value will be taken from the backup. If this parameter is set to 0, the new instance will be converted to a non-PIOPS instance, which will take additional time, though your DB instance will be available for connections before the conversion starts.

    Constraints: Must be an integer greater than 1000.

    SQL Server

    Setting the IOPS value for the SQL Server database engine is not supported.

    ", + "RestoreDBInstanceToPointInTimeMessage$Port": "

    The port number on which the database accepts connections.

    Constraints: Value must be 1150-65535

    Default: The same port as the original DB instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.

    Constraints: Must be an integer greater than 1000.

    SQL Server

    Setting the IOPS value for the SQL Server database engine is not supported.

    " + } + }, + "InvalidDBClusterSnapshotStateFault": { + "base": "

    The supplied value is not a valid DB cluster snapshot state.

    ", + "refs": { + } + }, + "InvalidDBClusterStateFault": { + "base": "

    The supplied value is not a valid DB cluster state.

    ", + "refs": { + } + }, + "InvalidDBInstanceStateFault": { + "base": "

    The specified DB instance is not in the available state.

    ", + "refs": { + } + }, + "InvalidDBParameterGroupStateFault": { + "base": "

    The DB parameter group cannot be deleted because it is in use.

    ", + "refs": { + } + }, + "InvalidDBSecurityGroupStateFault": { + "base": "

    The state of the DB security group does not allow deletion.

    ", + "refs": { + } + }, + "InvalidDBSnapshotStateFault": { + "base": "

    The state of the DB snapshot does not allow deletion.

    ", + "refs": { + } + }, + "InvalidDBSubnetGroupFault": { + "base": "

    Indicates the DBSubnetGroup does not belong to the same VPC as that of an existing cross region read replica of the same source instance.

    ", + "refs": { + } + }, + "InvalidDBSubnetGroupStateFault": { + "base": "

    The DB subnet group cannot be deleted because it is in use.

    ", + "refs": { + } + }, + "InvalidDBSubnetStateFault": { + "base": "

    The DB subnet is not in the available state.

    ", + "refs": { + } + }, + "InvalidEventSubscriptionStateFault": { + "base": "

    This error can occur if someone else is modifying a subscription. You should retry the action.

    ", + "refs": { + } + }, + "InvalidOptionGroupStateFault": { + "base": "

    The option group is not in the available state.

    ", + "refs": { + } + }, + "InvalidRestoreFault": { + "base": "

    Cannot restore from vpc backup to non-vpc DB instance.

    ", + "refs": { + } + }, + "InvalidSubnet": { + "base": "

    The requested subnet is invalid, or multiple subnets were requested that are not all in a common VPC.

    ", + "refs": { + } + }, + "InvalidVPCNetworkStateFault": { + "base": "

    DB subnet group does not cover all Availability Zones after it is created because users' change.

    ", + "refs": { + } + }, + "KMSKeyNotAccessibleFault": { + "base": "

    Error accessing KMS key.

    ", + "refs": { + } + }, + "KeyList": { + "base": null, + "refs": { + "RemoveTagsFromResourceMessage$TagKeys": "

    The tag key (name) of the tag to be removed.

    " + } + }, + "ListTagsForResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "Long": { + "base": null, + "refs": { + "AccountQuota$Used": "

    The amount currently used toward the quota maximum.

    ", + "AccountQuota$Max": "

    The maximum allowed value for the quota.

    ", + "DescribeDBLogFilesDetails$LastWritten": "

    A POSIX timestamp when the last log entry was written.

    ", + "DescribeDBLogFilesDetails$Size": "

    The size, in bytes, of the log file for the specified DB instance.

    ", + "DescribeDBLogFilesMessage$FileLastWritten": "

    Filters the available log files for files written since the specified date, in POSIX timestamp format with milliseconds.

    ", + "DescribeDBLogFilesMessage$FileSize": "

    Filters the available log files for files larger than the specified size.

    " + } + }, + "ModifyDBClusterMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBClusterParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBClusterResult": { + "base": null, + "refs": { + } + }, + "ModifyDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBInstanceResult": { + "base": null, + "refs": { + } + }, + "ModifyDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBSnapshotAttributeMessage": { + "base": null, + "refs": { + } + }, + "ModifyDBSnapshotAttributeResult": { + "base": null, + "refs": { + } + }, + "ModifyDBSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyDBSubnetGroupResult": { + "base": null, + "refs": { + } + }, + "ModifyEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "ModifyOptionGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyOptionGroupResult": { + "base": null, + "refs": { + } + }, + "Option": { + "base": "

    Option details.

    ", + "refs": { + "OptionsList$member": null + } + }, + "OptionConfiguration": { + "base": "

    A list of all available options

    ", + "refs": { + "OptionConfigurationList$member": null + } + }, + "OptionConfigurationList": { + "base": null, + "refs": { + "ModifyOptionGroupMessage$OptionsToInclude": "

    Options in this list are added to the option group or, if already present, the specified configuration is used to update the existing configuration.

    " + } + }, + "OptionGroup": { + "base": "

    ", + "refs": { + "CopyOptionGroupResult$OptionGroup": null, + "CreateOptionGroupResult$OptionGroup": null, + "ModifyOptionGroupResult$OptionGroup": null, + "OptionGroupsList$member": null + } + }, + "OptionGroupAlreadyExistsFault": { + "base": "

    The option group you are trying to create already exists.

    ", + "refs": { + } + }, + "OptionGroupMembership": { + "base": "

    Provides information on the option groups the DB instance is a member of.

    ", + "refs": { + "OptionGroupMembershipList$member": null + } + }, + "OptionGroupMembershipList": { + "base": null, + "refs": { + "DBInstance$OptionGroupMemberships": "

    Provides the list of option group memberships for this DB instance.

    " + } + }, + "OptionGroupNotFoundFault": { + "base": "

    The specified option group could not be found.

    ", + "refs": { + } + }, + "OptionGroupOption": { + "base": "

    Available option.

    ", + "refs": { + "OptionGroupOptionsList$member": null + } + }, + "OptionGroupOptionSetting": { + "base": "

    Option group option settings are used to display settings available for each option with their default values and other information. These values are used with the DescribeOptionGroupOptions action.

    ", + "refs": { + "OptionGroupOptionSettingsList$member": null + } + }, + "OptionGroupOptionSettingsList": { + "base": null, + "refs": { + "OptionGroupOption$OptionGroupOptionSettings": "

    Specifies the option settings that are available (and the default value) for each option in an option group.

    " + } + }, + "OptionGroupOptionsList": { + "base": "

    List of available option group options.

    ", + "refs": { + "OptionGroupOptionsMessage$OptionGroupOptions": null + } + }, + "OptionGroupOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "OptionGroupQuotaExceededFault": { + "base": "

    The quota of 20 option groups was exceeded for this AWS account.

    ", + "refs": { + } + }, + "OptionGroups": { + "base": "

    List of option groups.

    ", + "refs": { + } + }, + "OptionGroupsList": { + "base": null, + "refs": { + "OptionGroups$OptionGroupsList": "

    List of option groups.

    " + } + }, + "OptionNamesList": { + "base": null, + "refs": { + "ModifyOptionGroupMessage$OptionsToRemove": "

    Options in this list are removed from the option group.

    " + } + }, + "OptionSetting": { + "base": "

    Option settings are the actual settings being applied or configured for that option. It is used when you modify an option group or describe option groups. For example, the NATIVE_NETWORK_ENCRYPTION option has a setting called SQLNET.ENCRYPTION_SERVER that can have several different values.

    ", + "refs": { + "OptionSettingConfigurationList$member": null, + "OptionSettingsList$member": null + } + }, + "OptionSettingConfigurationList": { + "base": null, + "refs": { + "Option$OptionSettings": "

    The option settings for this option.

    " + } + }, + "OptionSettingsList": { + "base": null, + "refs": { + "OptionConfiguration$OptionSettings": "

    The option settings to include in an option group.

    " + } + }, + "OptionsDependedOn": { + "base": null, + "refs": { + "OptionGroupOption$OptionsDependedOn": "

    List of all options that are prerequisites for this option.

    " + } + }, + "OptionsList": { + "base": null, + "refs": { + "OptionGroup$Options": "

    Indicates what options are available in the option group.

    " + } + }, + "OrderableDBInstanceOption": { + "base": "

    Contains a list of available options for a DB instance

    This data type is used as a response element in the DescribeOrderableDBInstanceOptions action.

    ", + "refs": { + "OrderableDBInstanceOptionsList$member": null + } + }, + "OrderableDBInstanceOptionsList": { + "base": null, + "refs": { + "OrderableDBInstanceOptionsMessage$OrderableDBInstanceOptions": "

    An OrderableDBInstanceOption structure containing information about orderable options for the DB instance.

    " + } + }, + "OrderableDBInstanceOptionsMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeOrderableDBInstanceOptions action.

    ", + "refs": { + } + }, + "Parameter": { + "base": "

    This data type is used as a request parameter in the ModifyDBParameterGroup and ResetDBParameterGroup actions.

    This data type is used as a response element in the DescribeEngineDefaultParameters and DescribeDBParameters actions.

    ", + "refs": { + "ParametersList$member": null + } + }, + "ParametersList": { + "base": null, + "refs": { + "DBClusterParameterGroupDetails$Parameters": "

    Provides a list of parameters for the DB cluster parameter group.

    ", + "DBParameterGroupDetails$Parameters": "

    A list of Parameter values.

    ", + "EngineDefaults$Parameters": "

    Contains a list of engine default parameters.

    ", + "ModifyDBClusterParameterGroupMessage$Parameters": "

    A list of parameters in the DB cluster parameter group to modify.

    ", + "ModifyDBParameterGroupMessage$Parameters": "

    An array of parameter names, values, and the apply method for the parameter update. At least one parameter name, value, and apply method must be supplied; subsequent arguments are optional. A maximum of 20 parameters can be modified in a single request.

    Valid Values (for the application method): immediate | pending-reboot

    You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when you reboot the DB instance without failover. ", + "ResetDBClusterParameterGroupMessage$Parameters": "

    A list of parameter names in the DB cluster parameter group to reset to the default values. You cannot use this parameter if the ResetAllParameters parameter is set to true.

    ", + "ResetDBParameterGroupMessage$Parameters": "

    An array of parameter names, values, and the apply method for the parameter update. At least one parameter name, value, and apply method must be supplied; subsequent arguments are optional. A maximum of 20 parameters can be modified in a single request.

    MySQL

    Valid Values (for Apply method): immediate | pending-reboot

    You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when DB instance reboots.

    MariaDB

    Valid Values (for Apply method): immediate | pending-reboot

    You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when DB instance reboots.

    Oracle

    Valid Values (for Apply method): pending-reboot

    " + } + }, + "PendingMaintenanceAction": { + "base": "

    Provides information about a pending maintenance action for a resource.

    ", + "refs": { + "PendingMaintenanceActionDetails$member": null + } + }, + "PendingMaintenanceActionDetails": { + "base": null, + "refs": { + "ResourcePendingMaintenanceActions$PendingMaintenanceActionDetails": "

    A list that provides details about the pending maintenance actions for the resource.

    " + } + }, + "PendingMaintenanceActions": { + "base": null, + "refs": { + "PendingMaintenanceActionsMessage$PendingMaintenanceActions": "

    A list of the pending maintenance actions for the resource.

    " + } + }, + "PendingMaintenanceActionsMessage": { + "base": "

    Data returned from the DescribePendingMaintenanceActions action.

    ", + "refs": { + } + }, + "PendingModifiedValues": { + "base": "

    This data type is used as a response element in the ModifyDBInstance action.

    ", + "refs": { + "DBInstance$PendingModifiedValues": "

    Specifies that changes to the DB instance are pending. This element is only included when changes are pending. Specific changes are identified by subelements.

    " + } + }, + "PointInTimeRestoreNotEnabledFault": { + "base": "

    SourceDBInstanceIdentifier refers to a DB instance with BackupRetentionPeriod equal to 0.

    ", + "refs": { + } + }, + "PromoteReadReplicaMessage": { + "base": "

    ", + "refs": { + } + }, + "PromoteReadReplicaResult": { + "base": null, + "refs": { + } + }, + "ProvisionedIopsNotAvailableInAZFault": { + "base": "

    Provisioned IOPS not available in the specified Availability Zone.

    ", + "refs": { + } + }, + "PurchaseReservedDBInstancesOfferingMessage": { + "base": "

    ", + "refs": { + } + }, + "PurchaseReservedDBInstancesOfferingResult": { + "base": null, + "refs": { + } + }, + "ReadReplicaDBInstanceIdentifierList": { + "base": null, + "refs": { + "DBInstance$ReadReplicaDBInstanceIdentifiers": "

    Contains one or more identifiers of the Read Replicas associated with this DB instance.

    " + } + }, + "RebootDBInstanceMessage": { + "base": "

    ", + "refs": { + } + }, + "RebootDBInstanceResult": { + "base": null, + "refs": { + } + }, + "RecurringCharge": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstances and DescribeReservedDBInstancesOfferings actions.

    ", + "refs": { + "RecurringChargeList$member": null + } + }, + "RecurringChargeList": { + "base": null, + "refs": { + "ReservedDBInstance$RecurringCharges": "

    The recurring price charged to run this reserved DB instance.

    ", + "ReservedDBInstancesOffering$RecurringCharges": "

    The recurring price charged to run this reserved DB instance.

    " + } + }, + "RemoveSourceIdentifierFromSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "RemoveSourceIdentifierFromSubscriptionResult": { + "base": null, + "refs": { + } + }, + "RemoveTagsFromResourceMessage": { + "base": "

    ", + "refs": { + } + }, + "ReservedDBInstance": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstances and PurchaseReservedDBInstancesOffering actions.

    ", + "refs": { + "PurchaseReservedDBInstancesOfferingResult$ReservedDBInstance": null, + "ReservedDBInstanceList$member": null + } + }, + "ReservedDBInstanceAlreadyExistsFault": { + "base": "

    User already has a reservation with the given identifier.

    ", + "refs": { + } + }, + "ReservedDBInstanceList": { + "base": null, + "refs": { + "ReservedDBInstanceMessage$ReservedDBInstances": "

    A list of reserved DB instances.

    " + } + }, + "ReservedDBInstanceMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeReservedDBInstances action.

    ", + "refs": { + } + }, + "ReservedDBInstanceNotFoundFault": { + "base": "

    The specified reserved DB Instance not found.

    ", + "refs": { + } + }, + "ReservedDBInstanceQuotaExceededFault": { + "base": "

    Request would exceed the user's DB Instance quota.

    ", + "refs": { + } + }, + "ReservedDBInstancesOffering": { + "base": "

    This data type is used as a response element in the DescribeReservedDBInstancesOfferings action.

    ", + "refs": { + "ReservedDBInstancesOfferingList$member": null + } + }, + "ReservedDBInstancesOfferingList": { + "base": null, + "refs": { + "ReservedDBInstancesOfferingMessage$ReservedDBInstancesOfferings": "

    A list of reserved DB instance offerings.

    " + } + }, + "ReservedDBInstancesOfferingMessage": { + "base": "

    Contains the result of a successful invocation of the DescribeReservedDBInstancesOfferings action.

    ", + "refs": { + } + }, + "ReservedDBInstancesOfferingNotFoundFault": { + "base": "

    Specified offering does not exist.

    ", + "refs": { + } + }, + "ResetDBClusterParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ResetDBParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ResourceNotFoundFault": { + "base": "

    The specified resource ID was not found.

    ", + "refs": { + } + }, + "ResourcePendingMaintenanceActions": { + "base": "

    Describes the pending maintenance actions for a resource.

    ", + "refs": { + "ApplyPendingMaintenanceActionResult$ResourcePendingMaintenanceActions": null, + "PendingMaintenanceActions$member": null + } + }, + "RestoreDBClusterFromSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBClusterFromSnapshotResult": { + "base": null, + "refs": { + } + }, + "RestoreDBClusterToPointInTimeMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBClusterToPointInTimeResult": { + "base": null, + "refs": { + } + }, + "RestoreDBInstanceFromDBSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBInstanceFromDBSnapshotResult": { + "base": null, + "refs": { + } + }, + "RestoreDBInstanceToPointInTimeMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreDBInstanceToPointInTimeResult": { + "base": null, + "refs": { + } + }, + "RevokeDBSecurityGroupIngressMessage": { + "base": "

    ", + "refs": { + } + }, + "RevokeDBSecurityGroupIngressResult": { + "base": null, + "refs": { + } + }, + "SNSInvalidTopicFault": { + "base": "

    SNS has responded that there is a problem with the SND topic specified.

    ", + "refs": { + } + }, + "SNSNoAuthorizationFault": { + "base": "

    You do not have permission to publish to the SNS topic ARN.

    ", + "refs": { + } + }, + "SNSTopicArnNotFoundFault": { + "base": "

    The SNS topic ARN does not exist.

    ", + "refs": { + } + }, + "SharedSnapshotQuotaExceededFault": { + "base": "

    You have exceeded the maximum number of account ids that you can share a manual DB snapshot with.

    ", + "refs": { + } + }, + "SnapshotQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed number of DB snapshots.

    ", + "refs": { + } + }, + "SourceIdsList": { + "base": null, + "refs": { + "CreateEventSubscriptionMessage$SourceIds": "

    The list of identifiers of the event sources for which events will be returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it cannot end with a hyphen or contain two consecutive hyphens.

    Constraints:

    • If SourceIds are supplied, SourceType must also be provided.
    • If the source type is a DB instance, then a DBInstanceIdentifier must be supplied.
    • If the source type is a DB security group, a DBSecurityGroupName must be supplied.
    • If the source type is a DB parameter group, a DBParameterGroupName must be supplied.
    • If the source type is a DB snapshot, a DBSnapshotIdentifier must be supplied.
    ", + "EventSubscription$SourceIdsList": "

    A list of source IDs for the RDS event notification subscription.

    " + } + }, + "SourceNotFoundFault": { + "base": "

    The requested source could not be found.

    ", + "refs": { + } + }, + "SourceType": { + "base": null, + "refs": { + "DescribeEventsMessage$SourceType": "

    The event source to retrieve events for. If no value is specified, all events are returned.

    ", + "Event$SourceType": "

    Specifies the source type for this event.

    " + } + }, + "StorageQuotaExceededFault": { + "base": "

    Request would result in user exceeding the allowed amount of storage available across all DB instances.

    ", + "refs": { + } + }, + "StorageTypeNotSupportedFault": { + "base": "

    StorageType specified cannot be associated with the DB Instance.

    ", + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "AccountQuota$AccountQuotaName": "

    The name of the Amazon RDS quota for this AWS account.

    ", + "AddSourceIdentifierToSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to add a source identifier to.

    ", + "AddSourceIdentifierToSubscriptionMessage$SourceIdentifier": "

    The identifier of the event source to be added. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it cannot end with a hyphen or contain two consecutive hyphens.

    Constraints:

    • If the source type is a DB instance, then a DBInstanceIdentifier must be supplied.
    • If the source type is a DB security group, a DBSecurityGroupName must be supplied.
    • If the source type is a DB parameter group, a DBParameterGroupName must be supplied.
    • If the source type is a DB snapshot, a DBSnapshotIdentifier must be supplied.
    ", + "AddTagsToResourceMessage$ResourceName": "

    The Amazon RDS resource the tags will be added to. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    ", + "ApplyPendingMaintenanceActionMessage$ResourceIdentifier": "

    The RDS Amazon Resource Name (ARN) of the resource that the pending maintenance action applies to. For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    ", + "ApplyPendingMaintenanceActionMessage$ApplyAction": "

    The pending maintenance action to apply to this resource.

    ", + "ApplyPendingMaintenanceActionMessage$OptInType": "

    A value that specifies the type of opt-in request, or undoes an opt-in request. An opt-in request of type immediate cannot be undone.

    Valid values:

    • immediate - Apply the maintenance action immediately.
    • next-maintenance - Apply the maintenance action during the next maintenance window for the resource.
    • undo-opt-in - Cancel any existing next-maintenance opt-in requests.
    ", + "AttributeValueList$member": null, + "AuthorizeDBSecurityGroupIngressMessage$DBSecurityGroupName": "

    The name of the DB security group to add authorization to.

    ", + "AuthorizeDBSecurityGroupIngressMessage$CIDRIP": "

    The IP range to authorize.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupName": "

    Name of the EC2 security group to authorize. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupId": "

    Id of the EC2 security group to authorize. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AuthorizeDBSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "

    AWS account number of the owner of the EC2 security group specified in the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "AvailabilityZone$Name": "

    The name of the availability zone.

    ", + "AvailabilityZones$member": null, + "Certificate$CertificateIdentifier": "

    The unique key that identifies a certificate.

    ", + "Certificate$CertificateType": "

    The type of the certificate.

    ", + "Certificate$Thumbprint": "

    The thumbprint of the certificate.

    ", + "CertificateMessage$Marker": "

    An optional pagination token provided by a previous DescribeCertificates request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "CharacterSet$CharacterSetName": "

    The name of the character set.

    ", + "CharacterSet$CharacterSetDescription": "

    The description of the character set.

    ", + "CopyDBClusterSnapshotMessage$SourceDBClusterSnapshotIdentifier": "

    The identifier of the DB cluster snapshot to copy. This parameter is not case-sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens.
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.

    Example: my-cluster-snapshot1

    ", + "CopyDBClusterSnapshotMessage$TargetDBClusterSnapshotIdentifier": "

    The identifier of the new DB cluster snapshot to create from the source DB cluster snapshot. This parameter is not case-sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens.
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.

    Example: my-cluster-snapshot2

    ", + "CopyDBParameterGroupMessage$SourceDBParameterGroupIdentifier": "

    The identifier or ARN for the source DB parameter group. For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    Constraints:

    • Must specify a valid DB parameter group.
    • If the source DB parameter group is in the same region as the copy, specify a valid DB parameter group identifier, for example my-db-param-group, or a valid ARN.
    • If the source DB parameter group is in a different region than the copy, specify a valid DB parameter group ARN, for example arn:aws:rds:us-west-2:123456789012:pg:special-parameters.
    ", + "CopyDBParameterGroupMessage$TargetDBParameterGroupIdentifier": "

    The identifier for the copied DB parameter group.

    Constraints:

    • Cannot be null, empty, or blank
    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-db-parameter-group

    ", + "CopyDBParameterGroupMessage$TargetDBParameterGroupDescription": "

    A description for the copied DB parameter group.

    ", + "CopyDBSnapshotMessage$SourceDBSnapshotIdentifier": "

    The identifier for the source DB snapshot.

    If you are copying from a shared manual DB snapshot, this must be the ARN of the shared DB snapshot.

    Constraints:

    • Must specify a valid system snapshot in the \"available\" state.
    • If the source snapshot is in the same region as the copy, specify a valid DB snapshot identifier.
    • If the source snapshot is in a different region than the copy, specify a valid DB snapshot ARN. For more information, go to Copying a DB Snapshot.

    Example: rds:mydb-2012-04-02-00-01

    Example: arn:aws:rds:rr-regn-1:123456789012:snapshot:mysql-instance1-snapshot-20130805

    ", + "CopyDBSnapshotMessage$TargetDBSnapshotIdentifier": "

    The identifier for the copied snapshot.

    Constraints:

    • Cannot be null, empty, or blank
    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-db-snapshot

    ", + "CopyOptionGroupMessage$SourceOptionGroupIdentifier": "

    The identifier or ARN for the source option group. For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    Constraints:

    • Must specify a valid option group.
    • If the source option group is in the same region as the copy, specify a valid option group identifier, for example my-option-group, or a valid ARN.
    • If the source option group is in a different region than the copy, specify a valid option group ARN, for example arn:aws:rds:us-west-2:123456789012:og:special-options.
    ", + "CopyOptionGroupMessage$TargetOptionGroupIdentifier": "

    The identifier for the copied option group.

    Constraints:

    • Cannot be null, empty, or blank
    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-option-group

    ", + "CopyOptionGroupMessage$TargetOptionGroupDescription": "

    The description for the copied option group.

    ", + "CreateDBClusterMessage$CharacterSetName": "

    A value that indicates that the DB cluster should be associated with the specified CharacterSet.

    ", + "CreateDBClusterMessage$DatabaseName": "

    The name for your database of up to 8 alpha-numeric characters. If you do not provide a name, Amazon RDS will not create a database in the DB cluster you are creating.

    ", + "CreateDBClusterMessage$DBClusterIdentifier": "

    The DB cluster identifier. This parameter is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens.
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.

    Example: my-cluster1

    ", + "CreateDBClusterMessage$DBClusterParameterGroupName": "

    The name of the DB cluster parameter group to associate with this DB cluster. If this argument is omitted, default.aurora5.6 for the specified engine will be used.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "CreateDBClusterMessage$DBSubnetGroupName": "

    A DB subnet group to associate with this DB cluster.

    ", + "CreateDBClusterMessage$Engine": "

    The name of the database engine to be used for this DB cluster.

    Valid Values: aurora

    ", + "CreateDBClusterMessage$EngineVersion": "

    The version number of the database engine to use.

    Aurora

    Example: 5.6.10a

    ", + "CreateDBClusterMessage$MasterUsername": "

    The name of the master user for the client DB cluster.

    Constraints:

    • Must be 1 to 16 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.
    ", + "CreateDBClusterMessage$MasterUserPassword": "

    The password for the master database user. This password can contain any printable ASCII character except \"/\", \"\"\", or \"@\".

    Constraints: Must contain from 8 to 41 characters.

    ", + "CreateDBClusterMessage$OptionGroupName": "

    A value that indicates that the DB cluster should be associated with the specified option group.

    Permanent options cannot be removed from an option group. The option group cannot be removed from a DB cluster once it is associated with a DB cluster.

    ", + "CreateDBClusterMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.

    Default: A 30-minute window selected at random from an 8-hour block of time per region. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

    Constraints:

    • Must be in the format hh24:mi-hh24:mi.
    • Times should be in Universal Coordinated Time (UTC).
    • Must not conflict with the preferred maintenance window.
    • Must be at least 30 minutes.
    ", + "CreateDBClusterMessage$PreferredMaintenanceWindow": "

    The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

    Format: ddd:hh24:mi-ddd:hh24:mi

    Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

    Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

    Constraints: Minimum 30-minute window.

    ", + "CreateDBClusterMessage$KmsKeyId": "

    The KMS key identifier for an encrypted DB cluster.

    The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are creating a DB cluster with the same AWS account that owns the KMS encryption key used to encrypt the new DB cluster, then you can use the KMS key alias instead of the ARN for the KM encryption key.

    If the StorageEncrypted parameter is true, and you do not specify a value for the KmsKeyId parameter, then Amazon RDS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.

    ", + "CreateDBClusterParameterGroupMessage$DBClusterParameterGroupName": "

    The name of the DB cluster parameter group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    This value is stored as a lowercase string.", + "CreateDBClusterParameterGroupMessage$DBParameterGroupFamily": "

    The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a database engine and engine version compatible with that DB cluster parameter group family.

    ", + "CreateDBClusterParameterGroupMessage$Description": "

    The description for the DB cluster parameter group.

    ", + "CreateDBClusterSnapshotMessage$DBClusterSnapshotIdentifier": "

    The identifier of the DB cluster snapshot. This parameter is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens.
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.

    Example: my-cluster1-snapshot1

    ", + "CreateDBClusterSnapshotMessage$DBClusterIdentifier": "

    The identifier of the DB cluster to create a snapshot for. This parameter is not case-sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens.
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.

    Example: my-cluster1

    ", + "CreateDBInstanceMessage$DBName": "

    The meaning of this parameter differs according to the database engine you use.

    Type: String

    MySQL

    The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance.

    Constraints:

    • Must contain 1 to 64 alphanumeric characters
    • Cannot be a word reserved by the specified database engine

    MariaDB

    The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance.

    Constraints:

    • Must contain 1 to 64 alphanumeric characters
    • Cannot be a word reserved by the specified database engine

    PostgreSQL

    The name of the database to create when the DB instance is created. If this parameter is not specified, the default \"postgres\" database is created in the DB instance.

    Constraints:

    • Must contain 1 to 63 alphanumeric characters
    • Must begin with a letter or an underscore. Subsequent characters can be letters, underscores, or digits (0-9).
    • Cannot be a word reserved by the specified database engine

    Oracle

    The Oracle System ID (SID) of the created DB instance.

    Default: ORCL

    Constraints:

    • Cannot be longer than 8 characters

    SQL Server

    Not applicable. Must be null.

    Amazon Aurora

    The name of the database to create when the primary instance of the DB cluster is created. If this parameter is not specified, no database is created in the DB instance.

    Constraints:

    • Must contain 1 to 64 alphanumeric characters
    • Cannot be a word reserved by the specified database engine
    ", + "CreateDBInstanceMessage$DBInstanceIdentifier": "

    The DB instance identifier. This parameter is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 for SQL Server).
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.

    Example: mydbinstance

    ", + "CreateDBInstanceMessage$DBInstanceClass": "

    The compute and memory capacity of the DB instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge | db.m4.4xlarge | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium | db.t2.large

    ", + "CreateDBInstanceMessage$Engine": "

    The name of the database engine to be used for this instance.

    Valid Values: MySQL | mariadb | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee | sqlserver-se | sqlserver-ex | sqlserver-web | postgres | aurora

    Not every database engine is available for every AWS region.

    ", + "CreateDBInstanceMessage$MasterUsername": "

    The name of master user for the client DB instance.

    MySQL

    Constraints:

    • Must be 1 to 16 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.

    MariaDB

    Constraints:

    • Must be 1 to 16 alphanumeric characters.
    • Cannot be a reserved word for the chosen database engine.

    Type: String

    Oracle

    Constraints:

    • Must be 1 to 30 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.

    SQL Server

    Constraints:

    • Must be 1 to 128 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.

    PostgreSQL

    Constraints:

    • Must be 1 to 63 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word for the chosen database engine.
    ", + "CreateDBInstanceMessage$MasterUserPassword": "

    The password for the master database user. Can be any printable ASCII character except \"/\", \"\"\", or \"@\".

    Type: String

    MySQL

    Constraints: Must contain from 8 to 41 characters.

    MariaDB

    Constraints: Must contain from 8 to 41 characters.

    Oracle

    Constraints: Must contain from 8 to 30 characters.

    SQL Server

    Constraints: Must contain from 8 to 128 characters.

    PostgreSQL

    Constraints: Must contain from 8 to 128 characters.

    Amazon Aurora

    Constraints: Must contain from 8 to 41 characters.

    ", + "CreateDBInstanceMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in. For information on regions and Availability Zones, see Regions and Availability Zones.

    Default: A random, system-chosen Availability Zone in the endpoint's region.

    Example: us-east-1d

    Constraint: The AvailabilityZone parameter cannot be specified if the MultiAZ parameter is set to true. The specified Availability Zone must be in the same region as the current endpoint.

    ", + "CreateDBInstanceMessage$DBSubnetGroupName": "

    A DB subnet group to associate with this DB instance.

    If there is no DB subnet group, then it is a non-VPC DB instance.

    ", + "CreateDBInstanceMessage$PreferredMaintenanceWindow": "

    The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC). For more information, see DB Instance Maintenance.

    Format: ddd:hh24:mi-ddd:hh24:mi

    Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

    Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

    Constraints: Minimum 30-minute window.

    ", + "CreateDBInstanceMessage$DBParameterGroupName": "

    The name of the DB parameter group to associate with this DB instance. If this argument is omitted, the default DBParameterGroup for the specified engine will be used.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "CreateDBInstanceMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter. For more information, see DB Instance Backups.

    Default: A 30-minute window selected at random from an 8-hour block of time per region. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

    Constraints:

    • Must be in the format hh24:mi-hh24:mi.
    • Times should be in Universal Coordinated Time (UTC).
    • Must not conflict with the preferred maintenance window.
    • Must be at least 30 minutes.
    ", + "CreateDBInstanceMessage$EngineVersion": "

    The version number of the database engine to use.

    The following are the database engines and major and minor versions that are available with Amazon RDS. Not every database engine is available for every AWS region.

    MySQL

    • Version 5.1 (Only available in the following regions: ap-northeast-1, ap-southeast-1, ap-southeast-2, eu-west-1, sa-east-1, us-west-1, us-west-2): 5.1.73a | 5.1.73b
    • Version 5.5 (Only available in the following regions: ap-northeast-1, ap-southeast-1, ap-southeast-2, eu-west-1, sa-east-1, us-west-1, us-west-2): 5.5.40 | 5.5.40a
    • Version 5.5 (Available in all regions): 5.5.40b | 5.5.41 | 5.5.42
    • Version 5.6 (Available in all regions): 5.6.19a | 5.6.19b | 5.6.21 | 5.6.21b | 5.6.22 | 5.6.23

    MariaDB

    • Version 10.0 (Available in all regions except AWS GovCloud (US) Region (us-gov-west-1)): 10.0.17

    Oracle Database Enterprise Edition (oracle-ee)

    • Version 11.2 (Only available in the following regions: ap-northeast-1, ap-southeast-1, ap-southeast-2, eu-west-1, sa-east-1, us-west-1, us-west-2): 11.2.0.2.v3 | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7
    • Version 11.2 (Available in all regions): 11.2.0.3.v1 | 11.2.0.3.v2 | 11.2.0.3.v3 | 11.2.0.4.v1 | 11.2.0.4.v3 | 11.2.0.4.v4
    • Version 12.1 (Available in all regions): 12.1.0.1.v1 | 12.1.0.1.v2 | 12.1.0.2.v1

    Oracle Database Standard Edition (oracle-se)

    • Version 11.2 (Only available in the following regions: us-west-1): 11.2.0.2.v3 | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7
    • Version 11.2 (Only available in the following regions: eu-central-1, us-west-1): 11.2.0.3.v1 | 11.2.0.3.v2 | 11.2.0.3.v3 | 11.2.0.4.v1 | 11.2.0.4.v3 | 11.2.0.4.v4
    • Version 12.1 (Only available in the following regions: eu-central-1, us-west-1): 12.1.0.1.v1 | 12.1.0.1.v2

    Oracle Database Standard Edition One (oracle-se1)

    • Version 11.2 (Only available in the following regions: us-west-1): 11.2.0.2.v3 | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7
    • Version 11.2 (Only available in the following regions: eu-central-1, us-west-1): 11.2.0.3.v1 | 11.2.0.3.v2 | 11.2.0.3.v3 | 11.2.0.4.v1 | 11.2.0.4.v3 | 11.2.0.4.v4
    • Version 12.1 (Only available in the following regions: eu-central-1, us-west-1): 12.1.0.1.v1 | 12.1.0.1.v2

    PostgreSQL

    • Version 9.3 (Only available in the following regions: ap-northeast-1, ap-southeast-1, ap-southeast-2, eu-west-1, sa-east-1, us-west-1, us-west-2): 9.3.1 | 9.3.2
    • Version 9.3 (Available in all regions): 9.3.3 | 9.3.5 | 9.3.6 | 9.3.9 | 9.3.10
    • Version 9.4 (Available in all regions): 9.4.1 | 9.4.4 | 9.4.5

    Microsoft SQL Server Enterprise Edition (sqlserver-ee)

    • Version 10.50 (Available in all regions): 10.50.2789.0.v1
    • Version 10.50 (Available in all regions): 10.50.6000.34.v1
    • Version 11.00 (Available in all regions): 11.00.2100.60.v1
    • Version 11.00 (Available in all regions): 11.00.5058.0.v1

    Microsoft SQL Server Express Edition (sqlserver-ex)

    • Version 10.50 (Available in all regions): 10.50.2789.0.v1
    • Version 10.50 (Available in all regions): 10.50.6000.34.v1
    • Version 11.00 (Available in all regions): 11.00.2100.60.v1
    • Version 11.00 (Available in all regions): 11.00.5058.0.v1
    • Version 12.00 (Available in all regions): 12.00.4422.0.v1

    Microsoft SQL Server Standard Edition (sqlserver-se)

    • Version 10.50 (Available in all regions): 10.50.2789.0.v1
    • Version 10.50 (Available in all regions): 10.50.6000.34.v1
    • Version 11.00 (Available in all regions): 11.00.2100.60.v1
    • Version 11.00 (Available in all regions): 11.00.5058.0.v1
    • Version 12.00 (Available in all regions): 12.00.4422.0.v1

    Microsoft SQL Server Web Edition (sqlserver-web)

    • Version 10.50 (Available in all regions): 10.50.2789.0.v1
    • Version 10.50 (Available in all regions): 10.50.6000.34.v1
    • Version 11.00 (Available in all regions): 11.00.2100.60.v1
    • Version 11.00 (Available in all regions): 11.00.5058.0.v1
    • Version 12.00 (Available in all regions): 12.00.4422.0.v1
    ", + "CreateDBInstanceMessage$LicenseModel": "

    License model information for this DB instance.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "CreateDBInstanceMessage$OptionGroupName": "

    Indicates that the DB instance should be associated with the specified option group.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "CreateDBInstanceMessage$CharacterSetName": "

    For supported engines, indicates that the DB instance should be associated with the specified CharacterSet.

    ", + "CreateDBInstanceMessage$DBClusterIdentifier": "

    The identifier of the DB cluster that the instance will belong to.

    For information on creating a DB cluster, see CreateDBCluster.

    Type: String

    ", + "CreateDBInstanceMessage$StorageType": "

    Specifies the storage type to be associated with the DB instance.

    Valid values: standard | gp2 | io1

    If you specify io1, you must also include a value for the Iops parameter.

    Default: io1 if the Iops parameter is specified; otherwise standard

    ", + "CreateDBInstanceMessage$TdeCredentialArn": "

    The ARN from the Key Store with which to associate the instance for TDE encryption.

    ", + "CreateDBInstanceMessage$TdeCredentialPassword": "

    The password for the given ARN from the Key Store in order to access the device.

    ", + "CreateDBInstanceMessage$KmsKeyId": "

    The KMS key identifier for an encrypted DB instance.

    The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are creating a DB instance with the same AWS account that owns the KMS encryption key used to encrypt the new DB instance, then you can use the KMS key alias instead of the ARN for the KM encryption key.

    If the StorageEncrypted parameter is true, and you do not specify a value for the KmsKeyId parameter, then Amazon RDS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.

    ", + "CreateDBInstanceMessage$MonitoringRoleArn": "

    The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, go to To create an IAM role for Amazon RDS Enhanced Monitoring.

    If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value.

    ", + "CreateDBInstanceReadReplicaMessage$DBInstanceIdentifier": "

    The DB instance identifier of the Read Replica. This identifier is the unique key that identifies a DB instance. This parameter is stored as a lowercase string.

    ", + "CreateDBInstanceReadReplicaMessage$SourceDBInstanceIdentifier": "

    The identifier of the DB instance that will act as the source for the Read Replica. Each DB instance can have up to five Read Replicas.

    Constraints:

    • Must be the identifier of an existing MySQL, MariaDB, or PostgreSQL DB instance.
    • Can specify a DB instance that is a MySQL Read Replica only if the source is running MySQL 5.6.
    • Can specify a DB instance that is a PostgreSQL Read Replica only if the source is running PostgreSQL 9.3.5.
    • The specified DB instance must have automatic backups enabled, its backup retention period must be greater than 0.
    • If the source DB instance is in the same region as the Read Replica, specify a valid DB instance identifier.
    • If the source DB instance is in a different region than the Read Replica, specify a valid DB instance ARN. For more information, go to Constructing a Amazon RDS Amazon Resource Name (ARN).
    ", + "CreateDBInstanceReadReplicaMessage$DBInstanceClass": "

    The compute and memory capacity of the Read Replica.

    Valid Values: db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge | db.m4.4xlarge | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium | db.t2.large

    Default: Inherits from the source DB instance.

    ", + "CreateDBInstanceReadReplicaMessage$AvailabilityZone": "

    The Amazon EC2 Availability Zone that the Read Replica will be created in.

    Default: A random, system-chosen Availability Zone in the endpoint's region.

    Example: us-east-1d

    ", + "CreateDBInstanceReadReplicaMessage$OptionGroupName": "

    The option group the DB instance will be associated with. If omitted, the default option group for the engine specified will be used.

    ", + "CreateDBInstanceReadReplicaMessage$DBSubnetGroupName": "

    Specifies a DB subnet group for the DB instance. The new DB instance will be created in the VPC associated with the DB subnet group. If no DB subnet group is specified, then the new DB instance is not created in a VPC.

    Constraints:

    • Can only be specified if the source DB instance identifier specifies a DB instance in another region.
    • The specified DB subnet group must be in the same region in which the operation is running.
    • All Read Replicas in one region that are created from the same source DB instance must either:
      • Specify DB subnet groups from the same VPC. All these Read Replicas will be created in the same VPC.
      • Not specify a DB subnet group. All these Read Replicas will be created outside of any VPC.
    ", + "CreateDBInstanceReadReplicaMessage$StorageType": "

    Specifies the storage type to be associated with the Read Replica.

    Valid values: standard | gp2 | io1

    If you specify io1, you must also include a value for the Iops parameter.

    Default: io1 if the Iops parameter is specified; otherwise standard

    ", + "CreateDBInstanceReadReplicaMessage$MonitoringRoleArn": "

    The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, go to To create an IAM role for Amazon RDS Enhanced Monitoring.

    If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value.

    ", + "CreateDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB parameter group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    This value is stored as a lowercase string.", + "CreateDBParameterGroupMessage$DBParameterGroupFamily": "

    The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a database engine and engine version compatible with that DB parameter group family.

    ", + "CreateDBParameterGroupMessage$Description": "

    The description for the DB parameter group.

    ", + "CreateDBSecurityGroupMessage$DBSecurityGroupName": "

    The name for the DB security group. This value is stored as a lowercase string.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    • Must not be \"Default\"
    • Cannot contain spaces

    Example: mysecuritygroup

    ", + "CreateDBSecurityGroupMessage$DBSecurityGroupDescription": "

    The description for the DB security group.

    ", + "CreateDBSnapshotMessage$DBSnapshotIdentifier": "

    The identifier for the DB snapshot.

    Constraints:

    • Cannot be null, empty, or blank
    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-snapshot-id

    ", + "CreateDBSnapshotMessage$DBInstanceIdentifier": "

    The DB instance identifier. This is the unique key that identifies a DB instance.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "CreateDBSubnetGroupMessage$DBSubnetGroupName": "

    The name for the DB subnet group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters, periods, underscores, or hyphens. Must not be default.

    Example: mySubnetgroup

    ", + "CreateDBSubnetGroupMessage$DBSubnetGroupDescription": "

    The description for the DB subnet group.

    ", + "CreateEventSubscriptionMessage$SubscriptionName": "

    The name of the subscription.

    Constraints: The name must be less than 255 characters.

    ", + "CreateEventSubscriptionMessage$SnsTopicArn": "

    The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.

    ", + "CreateEventSubscriptionMessage$SourceType": "

    The type of source that will be generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "CreateOptionGroupMessage$OptionGroupName": "

    Specifies the name of the option group to be created.

    Constraints:

    • Must be 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: myoptiongroup

    ", + "CreateOptionGroupMessage$EngineName": "

    Specifies the name of the engine that this option group should be associated with.

    ", + "CreateOptionGroupMessage$MajorEngineVersion": "

    Specifies the major version of the engine that this option group should be associated with.

    ", + "CreateOptionGroupMessage$OptionGroupDescription": "

    The description of the option group.

    ", + "DBCluster$CharacterSetName": "

    If present, specifies the name of the character set that this cluster is associated with.

    ", + "DBCluster$DatabaseName": "

    Contains the name of the initial database of this DB cluster that was provided at create time, if one was specified when the DB cluster was created. This same name is returned for the life of the DB cluster.

    ", + "DBCluster$DBClusterIdentifier": "

    Contains a user-supplied DB cluster identifier. This identifier is the unique key that identifies a DB cluster.

    ", + "DBCluster$DBClusterParameterGroup": "

    Specifies the name of the DB cluster parameter group for the DB cluster.

    ", + "DBCluster$DBSubnetGroup": "

    Specifies information on the subnet group associated with the DB cluster, including the name, description, and subnets in the subnet group.

    ", + "DBCluster$Status": "

    Specifies the current state of this DB cluster.

    ", + "DBCluster$PercentProgress": "

    Specifies the progress of the operation as a percentage.

    ", + "DBCluster$Endpoint": "

    Specifies the connection endpoint for the primary instance of the DB cluster.

    ", + "DBCluster$Engine": "

    Provides the name of the database engine to be used for this DB cluster.

    ", + "DBCluster$EngineVersion": "

    Indicates the database engine version.

    ", + "DBCluster$MasterUsername": "

    Contains the master username for the DB cluster.

    ", + "DBCluster$PreferredBackupWindow": "

    Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod.

    ", + "DBCluster$PreferredMaintenanceWindow": "

    Specifies the weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

    ", + "DBCluster$HostedZoneId": "

    Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.

    ", + "DBCluster$KmsKeyId": "

    If StorageEncrypted is true, the KMS key identifier for the encrypted DB cluster.

    ", + "DBCluster$DbClusterResourceId": "

    If StorageEncrypted is true, the region-unique, immutable identifier for the encrypted DB cluster. This identifier is found in AWS CloudTrail log entries whenever the KMS key for the DB cluster is accessed.

    ", + "DBClusterMember$DBInstanceIdentifier": "

    Specifies the instance identifier for this member of the DB cluster.

    ", + "DBClusterMember$DBClusterParameterGroupStatus": "

    Specifies the status of the DB cluster parameter group for this member of the DB cluster.

    ", + "DBClusterMessage$Marker": "

    A pagination token that can be used in a subsequent DescribeDBClusters request.

    ", + "DBClusterOptionGroupStatus$DBClusterOptionGroupName": "

    Specifies the name of the DB cluster option group.

    ", + "DBClusterOptionGroupStatus$Status": "

    Specifies the status of the DB cluster option group.

    ", + "DBClusterParameterGroup$DBClusterParameterGroupName": "

    Provides the name of the DB cluster parameter group.

    ", + "DBClusterParameterGroup$DBParameterGroupFamily": "

    Provides the name of the DB parameter group family that this DB cluster parameter group is compatible with.

    ", + "DBClusterParameterGroup$Description": "

    Provides the customer-specified description for this DB cluster parameter group.

    ", + "DBClusterParameterGroupDetails$Marker": "

    An optional pagination token provided by a previous DescribeDBClusterParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DBClusterParameterGroupNameMessage$DBClusterParameterGroupName": "

    The name of the DB cluster parameter group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    This value is stored as a lowercase string.", + "DBClusterParameterGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBClusterParameterGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBClusterSnapshot$DBClusterSnapshotIdentifier": "

    Specifies the identifier for the DB cluster snapshot.

    ", + "DBClusterSnapshot$DBClusterIdentifier": "

    Specifies the DB cluster identifier of the DB cluster that this DB cluster snapshot was created from.

    ", + "DBClusterSnapshot$Engine": "

    Specifies the name of the database engine.

    ", + "DBClusterSnapshot$Status": "

    Specifies the status of this DB cluster snapshot.

    ", + "DBClusterSnapshot$VpcId": "

    Provides the VPC ID associated with the DB cluster snapshot.

    ", + "DBClusterSnapshot$MasterUsername": "

    Provides the master username for the DB cluster snapshot.

    ", + "DBClusterSnapshot$EngineVersion": "

    Provides the version of the database engine for this DB cluster snapshot.

    ", + "DBClusterSnapshot$LicenseModel": "

    Provides the license model information for this DB cluster snapshot.

    ", + "DBClusterSnapshot$SnapshotType": "

    Provides the type of the DB cluster snapshot.

    ", + "DBClusterSnapshot$KmsKeyId": "

    If StorageEncrypted is true, the KMS key identifier for the encrypted DB cluster snapshot.

    ", + "DBClusterSnapshotMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBClusterSnapshots request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBEngineVersion$Engine": "

    The name of the database engine.

    ", + "DBEngineVersion$EngineVersion": "

    The version number of the database engine.

    ", + "DBEngineVersion$DBParameterGroupFamily": "

    The name of the DB parameter group family for the database engine.

    ", + "DBEngineVersion$DBEngineDescription": "

    The description of the database engine.

    ", + "DBEngineVersion$DBEngineVersionDescription": "

    The description of the database engine version.

    ", + "DBEngineVersionMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBInstance$DBInstanceIdentifier": "

    Contains a user-supplied database identifier. This identifier is the unique key that identifies a DB instance.

    ", + "DBInstance$DBInstanceClass": "

    Contains the name of the compute and memory capacity class of the DB instance.

    ", + "DBInstance$Engine": "

    Provides the name of the database engine to be used for this DB instance.

    ", + "DBInstance$DBInstanceStatus": "

    Specifies the current state of this database.

    ", + "DBInstance$MasterUsername": "

    Contains the master username for the DB instance.

    ", + "DBInstance$DBName": "

    The meaning of this parameter differs according to the database engine you use. For example, this value returns MySQL, MariaDB, or PostgreSQL information when returning values from CreateDBInstanceReadReplica since Read Replicas are only supported for these engines.

    MySQL, MariaDB, SQL Server, PostgreSQL, Amazon Aurora

    Contains the name of the initial database of this instance that was provided at create time, if one was specified when the DB instance was created. This same name is returned for the life of the DB instance.

    Type: String

    Oracle

    Contains the Oracle System ID (SID) of the created DB instance. Not shown when the returned parameters do not apply to an Oracle DB instance.

    ", + "DBInstance$PreferredBackupWindow": "

    Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod.

    ", + "DBInstance$AvailabilityZone": "

    Specifies the name of the Availability Zone the DB instance is located in.

    ", + "DBInstance$PreferredMaintenanceWindow": "

    Specifies the weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

    ", + "DBInstance$EngineVersion": "

    Indicates the database engine version.

    ", + "DBInstance$ReadReplicaSourceDBInstanceIdentifier": "

    Contains the identifier of the source DB instance if this DB instance is a Read Replica.

    ", + "DBInstance$LicenseModel": "

    License model information for this DB instance.

    ", + "DBInstance$CharacterSetName": "

    If present, specifies the name of the character set that this instance is associated with.

    ", + "DBInstance$SecondaryAvailabilityZone": "

    If present, specifies the name of the secondary Availability Zone for a DB instance with multi-AZ support.

    ", + "DBInstance$StorageType": "

    Specifies the storage type associated with DB instance.

    ", + "DBInstance$TdeCredentialArn": "

    The ARN from the Key Store with which the instance is associated for TDE encryption.

    ", + "DBInstance$DBClusterIdentifier": "

    If the DB instance is a member of a DB cluster, contains the name of the DB cluster that the DB instance is a member of.

    ", + "DBInstance$KmsKeyId": "

    If StorageEncrypted is true, the KMS key identifier for the encrypted DB instance.

    ", + "DBInstance$DbiResourceId": "

    If StorageEncrypted is true, the region-unique, immutable identifier for the encrypted DB instance. This identifier is found in AWS CloudTrail log entries whenever the KMS key for the DB instance is accessed.

    ", + "DBInstance$CACertificateIdentifier": "

    The identifier of the CA certificate for this DB instance.

    ", + "DBInstance$EnhancedMonitoringResourceArn": "

    The Amazon Resource Name (ARN) of the Amazon CloudWatch Logs log stream that receives the Enhanced Monitoring metrics data for the DB instance.

    ", + "DBInstance$MonitoringRoleArn": "

    The ARN for the IAM role that permits RDS to send Enhanced Monitoring metrics to CloudWatch Logs.

    ", + "DBInstanceMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DBInstanceStatusInfo$StatusType": "

    This value is currently \"read replication.\"

    ", + "DBInstanceStatusInfo$Status": "

    Status of the DB instance. For a StatusType of read replica, the values can be replicating, error, stopped, or terminated.

    ", + "DBInstanceStatusInfo$Message": "

    Details of the error if there is an error for the instance. If the instance is not in an error state, this value is blank.

    ", + "DBParameterGroup$DBParameterGroupName": "

    Provides the name of the DB parameter group.

    ", + "DBParameterGroup$DBParameterGroupFamily": "

    Provides the name of the DB parameter group family that this DB parameter group is compatible with.

    ", + "DBParameterGroup$Description": "

    Provides the customer-specified description for this DB parameter group.

    ", + "DBParameterGroupDetails$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBParameterGroupNameMessage$DBParameterGroupName": "

    Provides the name of the DB parameter group.

    ", + "DBParameterGroupStatus$DBParameterGroupName": "

    The name of the DP parameter group.

    ", + "DBParameterGroupStatus$ParameterApplyStatus": "

    The status of parameter updates.

    ", + "DBParameterGroupsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSecurityGroup$OwnerId": "

    Provides the AWS ID of the owner of a specific DB security group.

    ", + "DBSecurityGroup$DBSecurityGroupName": "

    Specifies the name of the DB security group.

    ", + "DBSecurityGroup$DBSecurityGroupDescription": "

    Provides the description of the DB security group.

    ", + "DBSecurityGroup$VpcId": "

    Provides the VpcId of the DB security group.

    ", + "DBSecurityGroupMembership$DBSecurityGroupName": "

    The name of the DB security group.

    ", + "DBSecurityGroupMembership$Status": "

    The status of the DB security group.

    ", + "DBSecurityGroupMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSecurityGroupNameList$member": null, + "DBSnapshot$DBSnapshotIdentifier": "

    Specifies the identifier for the DB snapshot.

    ", + "DBSnapshot$DBInstanceIdentifier": "

    Specifies the DB instance identifier of the DB instance this DB snapshot was created from.

    ", + "DBSnapshot$Engine": "

    Specifies the name of the database engine.

    ", + "DBSnapshot$Status": "

    Specifies the status of this DB snapshot.

    ", + "DBSnapshot$AvailabilityZone": "

    Specifies the name of the Availability Zone the DB instance was located in at the time of the DB snapshot.

    ", + "DBSnapshot$VpcId": "

    Provides the VPC ID associated with the DB snapshot.

    ", + "DBSnapshot$MasterUsername": "

    Provides the master username for the DB snapshot.

    ", + "DBSnapshot$EngineVersion": "

    Specifies the version of the database engine.

    ", + "DBSnapshot$LicenseModel": "

    License model information for the restored DB instance.

    ", + "DBSnapshot$SnapshotType": "

    Provides the type of the DB snapshot.

    ", + "DBSnapshot$OptionGroupName": "

    Provides the option group name for the DB snapshot.

    ", + "DBSnapshot$SourceRegion": "

    The region that the DB snapshot was created in or copied from.

    ", + "DBSnapshot$SourceDBSnapshotIdentifier": "

    The DB snapshot Arn that the DB snapshot was copied from. It only has value in case of cross customer or cross region copy.

    ", + "DBSnapshot$StorageType": "

    Specifies the storage type associated with DB Snapshot.

    ", + "DBSnapshot$TdeCredentialArn": "

    The ARN from the Key Store with which to associate the instance for TDE encryption.

    ", + "DBSnapshot$KmsKeyId": "

    If Encrypted is true, the KMS key identifier for the encrypted DB snapshot.

    ", + "DBSnapshotAttribute$AttributeName": "

    The name of the manual DB snapshot attribute.

    An attribute name of restore applies to the list of AWS accounts that have permission to copy or restore the manual DB snapshot.

    ", + "DBSnapshotAttributesResult$DBSnapshotIdentifier": "

    The identifier of the manual DB snapshot that the attributes apply to.

    ", + "DBSnapshotMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DBSubnetGroup$DBSubnetGroupName": "

    Specifies the name of the DB subnet group.

    ", + "DBSubnetGroup$DBSubnetGroupDescription": "

    Provides the description of the DB subnet group.

    ", + "DBSubnetGroup$VpcId": "

    Provides the VpcId of the DB subnet group.

    ", + "DBSubnetGroup$SubnetGroupStatus": "

    Provides the status of the DB subnet group.

    ", + "DBSubnetGroupMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DeleteDBClusterMessage$DBClusterIdentifier": "

    The DB cluster identifier for the DB cluster to be deleted. This parameter isn't case-sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DeleteDBClusterMessage$FinalDBSnapshotIdentifier": "

    The DB cluster snapshot identifier of the new DB cluster snapshot created when SkipFinalSnapshot is set to false.

    Specifying this parameter and also setting the SkipFinalShapshot parameter to true results in an error.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DeleteDBClusterParameterGroupMessage$DBClusterParameterGroupName": "

    The name of the DB cluster parameter group.

    Constraints:

    • Must be the name of an existing DB cluster parameter group.
    • You cannot delete a default DB cluster parameter group.
    • Cannot be associated with any DB clusters.
    ", + "DeleteDBClusterSnapshotMessage$DBClusterSnapshotIdentifier": "

    The identifier of the DB cluster snapshot to delete.

    Constraints: Must be the name of an existing DB cluster snapshot in the available state.

    ", + "DeleteDBInstanceMessage$DBInstanceIdentifier": "

    The DB instance identifier for the DB instance to be deleted. This parameter isn't case-sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DeleteDBInstanceMessage$FinalDBSnapshotIdentifier": "

    The DBSnapshotIdentifier of the new DBSnapshot created when SkipFinalSnapshot is set to false.

    Specifying this parameter and also setting the SkipFinalShapshot parameter to true results in an error.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    • Cannot be specified when deleting a Read Replica.
    ", + "DeleteDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB parameter group.

    Constraints:

    • Must be the name of an existing DB parameter group
    • You cannot delete a default DB parameter group
    • Cannot be associated with any DB instances
    ", + "DeleteDBSecurityGroupMessage$DBSecurityGroupName": "

    The name of the DB security group to delete.

    You cannot delete the default DB security group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    • Must not be \"Default\"
    • Cannot contain spaces
    ", + "DeleteDBSnapshotMessage$DBSnapshotIdentifier": "

    The DBSnapshot identifier.

    Constraints: Must be the name of an existing DB snapshot in the available state.

    ", + "DeleteDBSubnetGroupMessage$DBSubnetGroupName": "

    The name of the database subnet group to delete.

    You cannot delete the default subnet group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DeleteEventSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to delete.

    ", + "DeleteOptionGroupMessage$OptionGroupName": "

    The name of the option group to be deleted.

    You cannot delete default option groups.", + "DescribeCertificatesMessage$CertificateIdentifier": "

    The user-supplied certificate identifier. If this parameter is specified, information for only the identified certificate is returned. This parameter isn't case-sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeCertificatesMessage$Marker": "

    An optional pagination token provided by a previous DescribeCertificates request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBClusterParameterGroupsMessage$DBClusterParameterGroupName": "

    The name of a specific DB cluster parameter group to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBClusterParameterGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBClusterParameterGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBClusterParametersMessage$DBClusterParameterGroupName": "

    The name of a specific DB cluster parameter group to return parameter details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBClusterParametersMessage$Source": "

    A value that indicates to return only parameters for a specific source. Parameter sources can be engine, service, or customer.

    ", + "DescribeDBClusterParametersMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBClusterParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBClusterSnapshotsMessage$DBClusterIdentifier": "

    A DB cluster identifier to retrieve the list of DB cluster snapshots for. This parameter cannot be used in conjunction with the DBClusterSnapshotIdentifier parameter. This parameter is not case-sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBClusterSnapshotsMessage$DBClusterSnapshotIdentifier": "

    A specific DB cluster snapshot identifier to describe. This parameter cannot be used in conjunction with the DBClusterIdentifier parameter. This value is stored as a lowercase string.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    • If this is the identifier of an automated snapshot, the SnapshotType parameter must also be specified.
    ", + "DescribeDBClusterSnapshotsMessage$SnapshotType": "

    The type of DB cluster snapshots that will be returned. Values can be automated or manual. If this parameter is not specified, the returned results will include all snapshot types.

    ", + "DescribeDBClusterSnapshotsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBClusterSnapshots request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBClustersMessage$DBClusterIdentifier": "

    The user-supplied DB cluster identifier. If this parameter is specified, information from only the specific DB cluster is returned. This parameter isn't case-sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBClustersMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBClusters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBEngineVersionsMessage$Engine": "

    The database engine to return.

    ", + "DescribeDBEngineVersionsMessage$EngineVersion": "

    The database engine version to return.

    Example: 5.1.49

    ", + "DescribeDBEngineVersionsMessage$DBParameterGroupFamily": "

    The name of a specific DB parameter group family to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBEngineVersionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBInstancesMessage$DBInstanceIdentifier": "

    The user-supplied instance identifier. If this parameter is specified, information from only the specific DB instance is returned. This parameter isn't case-sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBInstancesMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBInstances request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBLogFilesDetails$LogFileName": "

    The name of the log file for the specified DB instance.

    ", + "DescribeDBLogFilesMessage$DBInstanceIdentifier": "

    The customer-assigned name of the DB instance that contains the log files you want to list.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBLogFilesMessage$FilenameContains": "

    Filters the available log files for log file names that contain the specified string.

    ", + "DescribeDBLogFilesMessage$Marker": "

    The pagination token provided in the previous request. If this parameter is specified the response includes only records beyond the marker, up to MaxRecords.

    ", + "DescribeDBLogFilesResponse$Marker": "

    A pagination token that can be used in a subsequent DescribeDBLogFiles request.

    ", + "DescribeDBParameterGroupsMessage$DBParameterGroupName": "

    The name of a specific DB parameter group to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBParameterGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBParameterGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBParametersMessage$DBParameterGroupName": "

    The name of a specific DB parameter group to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBParametersMessage$Source": "

    The parameter types to return.

    Default: All parameter types returned

    Valid Values: user | system | engine-default

    ", + "DescribeDBParametersMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSecurityGroupsMessage$DBSecurityGroupName": "

    The name of the DB security group to return details for.

    ", + "DescribeDBSecurityGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSecurityGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSnapshotAttributesMessage$DBSnapshotIdentifier": "

    The identifier for the DB snapshot to modify the attributes for.

    ", + "DescribeDBSnapshotsMessage$DBInstanceIdentifier": "

    A DB instance identifier to retrieve the list of DB snapshots for. This parameter cannot be used in conjunction with DBSnapshotIdentifier. This parameter is not case-sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeDBSnapshotsMessage$DBSnapshotIdentifier": "

    A specific DB snapshot identifier to describe. This parameter cannot be used in conjunction with DBInstanceIdentifier. This value is stored as a lowercase string.

    Constraints:

    • Must be 1 to 255 alphanumeric characters.
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    • If this is the identifier of an automated snapshot, the SnapshotType parameter must also be specified.
    ", + "DescribeDBSnapshotsMessage$SnapshotType": "

    The type of snapshots that will be returned. You can specify one of the following values:

    • automated - Return all DB snapshots that have been automatically taken by Amazon RDS for my AWS account.
    • manual - Return all DB snapshots that have been taken by my AWS account.
    • shared - Return all manual DB snapshots that have been shared to my AWS account.
    • public - Return all DB snapshots that have been marked as public.

    If you do not specify a SnapshotType, then both automated and manual snapshots are returned. You can include shared snapshots with these results by setting the IncludeShared parameter to true. You can include public snapshots with these results by setting the IncludePublic parameter to true.

    The IncludeShared and IncludePublic parameters do not apply for SnapshotType values of manual or automated. The IncludePublic parameter does not apply when SnapshotType is set to shared. the IncludeShared parameter does not apply when SnapshotType is set to public.

    ", + "DescribeDBSnapshotsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSnapshots request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeDBSubnetGroupsMessage$DBSubnetGroupName": "

    The name of the DB subnet group to return details for.

    ", + "DescribeDBSubnetGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeDBSubnetGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEngineDefaultClusterParametersMessage$DBParameterGroupFamily": "

    The name of the DB cluster parameter group family to return engine parameter information for.

    ", + "DescribeEngineDefaultClusterParametersMessage$Marker": "

    An optional pagination token provided by a previous DescribeEngineDefaultClusterParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEngineDefaultParametersMessage$DBParameterGroupFamily": "

    The name of the DB parameter group family.

    ", + "DescribeEngineDefaultParametersMessage$Marker": "

    An optional pagination token provided by a previous DescribeEngineDefaultParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeEventCategoriesMessage$SourceType": "

    The type of source that will be generating the events.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "DescribeEventSubscriptionsMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to describe.

    ", + "DescribeEventSubscriptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DescribeEventsMessage$SourceIdentifier": "

    The identifier of the event source for which events will be returned. If not specified, then all sources are included in the response.

    Constraints:

    • If SourceIdentifier is supplied, SourceType must also be provided.
    • If the source type is DBInstance, then a DBInstanceIdentifier must be supplied.
    • If the source type is DBSecurityGroup, a DBSecurityGroupName must be supplied.
    • If the source type is DBParameterGroup, a DBParameterGroupName must be supplied.
    • If the source type is DBSnapshot, a DBSnapshotIdentifier must be supplied.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    ", + "DescribeEventsMessage$Marker": "

    An optional pagination token provided by a previous DescribeEvents request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupOptionsMessage$EngineName": "

    A required parameter. Options available for the given engine name will be described.

    ", + "DescribeOptionGroupOptionsMessage$MajorEngineVersion": "

    If specified, filters the results to include only options for the specified major engine version.

    ", + "DescribeOptionGroupOptionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupsMessage$OptionGroupName": "

    The name of the option group to describe. Cannot be supplied together with EngineName or MajorEngineVersion.

    ", + "DescribeOptionGroupsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOptionGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeOptionGroupsMessage$EngineName": "

    Filters the list of option groups to only include groups associated with a specific database engine.

    ", + "DescribeOptionGroupsMessage$MajorEngineVersion": "

    Filters the list of option groups to only include groups associated with a specific database engine version. If specified, then EngineName must also be specified.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Engine": "

    The name of the engine to retrieve DB instance options for.

    ", + "DescribeOrderableDBInstanceOptionsMessage$EngineVersion": "

    The engine version filter value. Specify this parameter to show only the available offerings matching the specified engine version.

    ", + "DescribeOrderableDBInstanceOptionsMessage$DBInstanceClass": "

    The DB instance class filter value. Specify this parameter to show only the available offerings matching the specified DB instance class.

    ", + "DescribeOrderableDBInstanceOptionsMessage$LicenseModel": "

    The license model filter value. Specify this parameter to show only the available offerings matching the specified license model.

    ", + "DescribeOrderableDBInstanceOptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "DescribePendingMaintenanceActionsMessage$ResourceIdentifier": "

    The ARN of a resource to return pending maintenance actions for.

    ", + "DescribePendingMaintenanceActionsMessage$Marker": "

    An optional pagination token provided by a previous DescribePendingMaintenanceActions request. If this parameter is specified, the response includes only records beyond the marker, up to a number of records specified by MaxRecords.

    ", + "DescribeReservedDBInstancesMessage$ReservedDBInstanceId": "

    The reserved DB instance identifier filter value. Specify this parameter to show only the reservation that matches the specified reservation ID.

    ", + "DescribeReservedDBInstancesMessage$ReservedDBInstancesOfferingId": "

    The offering identifier filter value. Specify this parameter to show only purchased reservations matching the specified offering identifier.

    ", + "DescribeReservedDBInstancesMessage$DBInstanceClass": "

    The DB instance class filter value. Specify this parameter to show only those reservations matching the specified DB instances class.

    ", + "DescribeReservedDBInstancesMessage$Duration": "

    The duration filter value, specified in years or seconds. Specify this parameter to show only reservations for this duration.

    Valid Values: 1 | 3 | 31536000 | 94608000

    ", + "DescribeReservedDBInstancesMessage$ProductDescription": "

    The product description filter value. Specify this parameter to show only those reservations matching the specified product description.

    ", + "DescribeReservedDBInstancesMessage$OfferingType": "

    The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type.

    Valid Values: \"Partial Upfront\" | \"All Upfront\" | \"No Upfront\"

    ", + "DescribeReservedDBInstancesMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DescribeReservedDBInstancesOfferingsMessage$ReservedDBInstancesOfferingId": "

    The offering identifier filter value. Specify this parameter to show only the available offering that matches the specified reservation identifier.

    Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706

    ", + "DescribeReservedDBInstancesOfferingsMessage$DBInstanceClass": "

    The DB instance class filter value. Specify this parameter to show only the available offerings matching the specified DB instance class.

    ", + "DescribeReservedDBInstancesOfferingsMessage$Duration": "

    Duration filter value, specified in years or seconds. Specify this parameter to show only reservations for this duration.

    Valid Values: 1 | 3 | 31536000 | 94608000

    ", + "DescribeReservedDBInstancesOfferingsMessage$ProductDescription": "

    Product description filter value. Specify this parameter to show only the available offerings matching the specified product description.

    ", + "DescribeReservedDBInstancesOfferingsMessage$OfferingType": "

    The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type.

    Valid Values: \"Partial Upfront\" | \"All Upfront\" | \"No Upfront\"

    ", + "DescribeReservedDBInstancesOfferingsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "DownloadDBLogFilePortionDetails$LogFileData": "

    Entries from the specified log file.

    ", + "DownloadDBLogFilePortionDetails$Marker": "

    A pagination token that can be used in a subsequent DownloadDBLogFilePortion request.

    ", + "DownloadDBLogFilePortionMessage$DBInstanceIdentifier": "

    The customer-assigned name of the DB instance that contains the log files you want to list.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DownloadDBLogFilePortionMessage$LogFileName": "

    The name of the log file to be downloaded.

    ", + "DownloadDBLogFilePortionMessage$Marker": "

    The pagination token provided in the previous request or \"0\". If the Marker parameter is specified the response includes only records beyond the marker until the end of the file or up to NumberOfLines.

    ", + "EC2SecurityGroup$Status": "

    Provides the status of the EC2 security group. Status can be \"authorizing\", \"authorized\", \"revoking\", and \"revoked\".

    ", + "EC2SecurityGroup$EC2SecurityGroupName": "

    Specifies the name of the EC2 security group.

    ", + "EC2SecurityGroup$EC2SecurityGroupId": "

    Specifies the id of the EC2 security group.

    ", + "EC2SecurityGroup$EC2SecurityGroupOwnerId": "

    Specifies the AWS ID of the owner of the EC2 security group specified in the EC2SecurityGroupName field.

    ", + "Endpoint$Address": "

    Specifies the DNS address of the DB instance.

    ", + "Endpoint$HostedZoneId": "

    Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.

    ", + "EngineDefaults$DBParameterGroupFamily": "

    Specifies the name of the DB parameter group family that the engine default parameters apply to.

    ", + "EngineDefaults$Marker": "

    An optional pagination token provided by a previous EngineDefaults request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "Event$SourceIdentifier": "

    Provides the identifier for the source of the event.

    ", + "Event$Message": "

    Provides the text of this event.

    ", + "EventCategoriesList$member": null, + "EventCategoriesMap$SourceType": "

    The source type that the returned categories belong to

    ", + "EventSubscription$CustomerAwsId": "

    The AWS customer account associated with the RDS event notification subscription.

    ", + "EventSubscription$CustSubscriptionId": "

    The RDS event notification subscription Id.

    ", + "EventSubscription$SnsTopicArn": "

    The topic ARN of the RDS event notification subscription.

    ", + "EventSubscription$Status": "

    The status of the RDS event notification subscription.

    Constraints:

    Can be one of the following: creating | modifying | deleting | active | no-permission | topic-not-exist

    The status \"no-permission\" indicates that RDS no longer has permission to post to the SNS topic. The status \"topic-not-exist\" indicates that the topic was deleted after the subscription was created.

    ", + "EventSubscription$SubscriptionCreationTime": "

    The time the RDS event notification subscription was created.

    ", + "EventSubscription$SourceType": "

    The source type for the RDS event notification subscription.

    ", + "EventSubscriptionsMessage$Marker": "

    An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "EventsMessage$Marker": "

    An optional pagination token provided by a previous Events request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "FailoverDBClusterMessage$DBClusterIdentifier": "

    A DB cluster identifier to force a failover for. This parameter is not case-sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "Filter$Name": "

    This parameter is not currently supported.

    ", + "FilterValueList$member": null, + "IPRange$Status": "

    Specifies the status of the IP range. Status can be \"authorizing\", \"authorized\", \"revoking\", and \"revoked\".

    ", + "IPRange$CIDRIP": "

    Specifies the IP range.

    ", + "KeyList$member": null, + "ListTagsForResourceMessage$ResourceName": "

    The Amazon RDS resource with tags to be listed. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    ", + "ModifyDBClusterMessage$DBClusterIdentifier": "

    The DB cluster identifier for the cluster being modified. This parameter is not case-sensitive.

    Constraints:

    • Must be the identifier for an existing DB cluster.
    • Must contain from 1 to 63 alphanumeric characters or hyphens.
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    ", + "ModifyDBClusterMessage$NewDBClusterIdentifier": "

    The new DB cluster identifier for the DB cluster when renaming a DB cluster. This value is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-cluster2

    ", + "ModifyDBClusterMessage$DBClusterParameterGroupName": "

    The name of the DB cluster parameter group to use for the DB cluster.

    ", + "ModifyDBClusterMessage$MasterUserPassword": "

    The new password for the master database user. This password can contain any printable ASCII character except \"/\", \"\"\", or \"@\".

    Constraints: Must contain from 8 to 41 characters.

    ", + "ModifyDBClusterMessage$OptionGroupName": "

    A value that indicates that the DB cluster should be associated with the specified option group. Changing this parameter does not result in an outage except in the following case, and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted.

    Permanent options cannot be removed from an option group. The option group cannot be removed from a DB cluster once it is associated with a DB cluster.

    ", + "ModifyDBClusterMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

    Default: A 30-minute window selected at random from an 8-hour block of time per region. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

    Constraints:

    • Must be in the format hh24:mi-hh24:mi.
    • Times should be in Universal Coordinated Time (UTC).
    • Must not conflict with the preferred maintenance window.
    • Must be at least 30 minutes.
    ", + "ModifyDBClusterMessage$PreferredMaintenanceWindow": "

    The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

    Format: ddd:hh24:mi-ddd:hh24:mi

    Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

    Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

    Constraints: Minimum 30-minute window.

    ", + "ModifyDBClusterParameterGroupMessage$DBClusterParameterGroupName": "

    The name of the DB cluster parameter group to modify.

    ", + "ModifyDBInstanceMessage$DBInstanceIdentifier": "

    The DB instance identifier. This value is stored as a lowercase string.

    Constraints:

    • Must be the identifier for an existing DB instance
    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "ModifyDBInstanceMessage$DBInstanceClass": "

    The new compute and memory capacity of the DB instance. To determine the instance classes that are available for a particular DB engine, use the DescribeOrderableDBInstanceOptions action.

    Passing a value for this setting causes an outage during the change and is applied during the next maintenance window, unless ApplyImmediately is specified as true for this request.

    Default: Uses existing setting

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge | db.m4.4xlarge | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium | db.t2.large

    ", + "ModifyDBInstanceMessage$MasterUserPassword": "

    The new password for the DB instance master user. Can be any printable ASCII character except \"/\", \"\"\", or \"@\".

    Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response.

    Default: Uses existing setting

    Constraints: Must be 8 to 41 alphanumeric characters (MySQL, MariaDB, and Amazon Aurora), 8 to 30 alphanumeric characters (Oracle), or 8 to 128 alphanumeric characters (SQL Server).

    Amazon RDS API actions never return the password, so this action provides a way to regain access to a primary instance user if the password is lost. This includes restoring privileges that might have been accidentally revoked. ", + "ModifyDBInstanceMessage$DBParameterGroupName": "

    The name of the DB parameter group to apply to the DB instance. Changing this setting does not result in an outage. The parameter group name itself is changed immediately, but the actual parameter changes are not applied until you reboot the instance without failover. The db instance will NOT be rebooted automatically and the parameter changes will NOT be applied during the next maintenance window.

    Default: Uses existing setting

    Constraints: The DB parameter group must be in the same DB parameter group family as this DB instance.

    ", + "ModifyDBInstanceMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod parameter. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

    Constraints:

    • Must be in the format hh24:mi-hh24:mi
    • Times should be in Universal Time Coordinated (UTC)
    • Must not conflict with the preferred maintenance window
    • Must be at least 30 minutes
    ", + "ModifyDBInstanceMessage$PreferredMaintenanceWindow": "

    The weekly time range (in UTC) during which system maintenance can occur, which might result in an outage. Changing this parameter does not result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If there are pending actions that cause a reboot, and the maintenance window is changed to include the current time, then changing this parameter will cause a reboot of the DB instance. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied.

    Default: Uses existing setting

    Format: ddd:hh24:mi-ddd:hh24:mi

    Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun

    Constraints: Must be at least 30 minutes

    ", + "ModifyDBInstanceMessage$EngineVersion": "

    The version number of the database engine to upgrade to. Changing this parameter results in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

    For major version upgrades, if a non-default DB parameter group is currently in use, a new DB parameter group in the DB parameter group family for the new engine version must be specified. The new DB parameter group can be the default for that DB parameter group family.

    For a list of valid engine versions, see CreateDBInstance.

    ", + "ModifyDBInstanceMessage$OptionGroupName": "

    Indicates that the DB instance should be associated with the specified option group. Changing this parameter does not result in an outage except in the following case and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "ModifyDBInstanceMessage$NewDBInstanceIdentifier": "

    The new DB instance identifier for the DB instance when renaming a DB instance. When you change the DB instance identifier, an instance reboot will occur immediately if you set Apply Immediately to true, or will occur during the next maintenance window if Apply Immediately to false. This value is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "ModifyDBInstanceMessage$StorageType": "

    Specifies the storage type to be associated with the DB instance.

    Valid values: standard | gp2 | io1

    If you specify io1, you must also include a value for the Iops parameter.

    Default: io1 if the Iops parameter is specified; otherwise standard

    ", + "ModifyDBInstanceMessage$TdeCredentialArn": "

    The ARN from the Key Store with which to associate the instance for TDE encryption.

    ", + "ModifyDBInstanceMessage$TdeCredentialPassword": "

    The password for the given ARN from the Key Store in order to access the device.

    ", + "ModifyDBInstanceMessage$CACertificateIdentifier": "

    Indicates the certificate that needs to be associated with the instance.

    ", + "ModifyDBInstanceMessage$MonitoringRoleArn": "

    The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, go to To create an IAM role for Amazon RDS Enhanced Monitoring.

    If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value.

    ", + "ModifyDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB parameter group.

    Constraints:

    • Must be the name of an existing DB parameter group
    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "ModifyDBSnapshotAttributeMessage$DBSnapshotIdentifier": "

    The identifier for the DB snapshot to modify the attributes for.

    ", + "ModifyDBSnapshotAttributeMessage$AttributeName": "

    The name of the DB snapshot attribute to modify.

    To manage authorization for other AWS accounts to copy or restore a manual DB snapshot, this value is restore.

    ", + "ModifyDBSubnetGroupMessage$DBSubnetGroupName": "

    The name for the DB subnet group. This value is stored as a lowercase string.

    Constraints: Must contain no more than 255 alphanumeric characters or hyphens. Must not be \"Default\".

    Example: mySubnetgroup

    ", + "ModifyDBSubnetGroupMessage$DBSubnetGroupDescription": "

    The description for the DB subnet group.

    ", + "ModifyEventSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription.

    ", + "ModifyEventSubscriptionMessage$SnsTopicArn": "

    The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.

    ", + "ModifyEventSubscriptionMessage$SourceType": "

    The type of source that will be generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned.

    Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

    ", + "ModifyOptionGroupMessage$OptionGroupName": "

    The name of the option group to be modified.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "Option$OptionName": "

    The name of the option.

    ", + "Option$OptionDescription": "

    The description of the option.

    ", + "OptionConfiguration$OptionName": "

    The configuration of options to include in a group.

    ", + "OptionGroup$OptionGroupName": "

    Specifies the name of the option group.

    ", + "OptionGroup$OptionGroupDescription": "

    Provides a description of the option group.

    ", + "OptionGroup$EngineName": "

    Indicates the name of the engine that this option group can be applied to.

    ", + "OptionGroup$MajorEngineVersion": "

    Indicates the major engine version associated with this option group.

    ", + "OptionGroup$VpcId": "

    If AllowsVpcAndNonVpcInstanceMemberships is false, this field is blank. If AllowsVpcAndNonVpcInstanceMemberships is true and this field is blank, then this option group can be applied to both VPC and non-VPC instances. If this field contains a value, then this option group can only be applied to instances that are in the VPC indicated by this field.

    ", + "OptionGroupMembership$OptionGroupName": "

    The name of the option group that the instance belongs to.

    ", + "OptionGroupMembership$Status": "

    The status of the DB instance's option group membership (e.g. in-sync, pending, pending-maintenance, applying).

    ", + "OptionGroupOption$Name": "

    The name of the option.

    ", + "OptionGroupOption$Description": "

    The description of the option.

    ", + "OptionGroupOption$EngineName": "

    The name of the engine that this option can be applied to.

    ", + "OptionGroupOption$MajorEngineVersion": "

    Indicates the major engine version that the option is available for.

    ", + "OptionGroupOption$MinimumRequiredMinorEngineVersion": "

    The minimum required engine version for the option to be applied.

    ", + "OptionGroupOptionSetting$SettingName": "

    The name of the option group option.

    ", + "OptionGroupOptionSetting$SettingDescription": "

    The description of the option group option.

    ", + "OptionGroupOptionSetting$DefaultValue": "

    The default value for the option group option.

    ", + "OptionGroupOptionSetting$ApplyType": "

    The DB engine specific parameter type for the option group option.

    ", + "OptionGroupOptionSetting$AllowedValues": "

    Indicates the acceptable values for the option group option.

    ", + "OptionGroupOptionsMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "OptionGroups$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "OptionNamesList$member": null, + "OptionSetting$Name": "

    The name of the option that has settings that you can set.

    ", + "OptionSetting$Value": "

    The current value of the option setting.

    ", + "OptionSetting$DefaultValue": "

    The default value of the option setting.

    ", + "OptionSetting$Description": "

    The description of the option setting.

    ", + "OptionSetting$ApplyType": "

    The DB engine specific parameter type.

    ", + "OptionSetting$DataType": "

    The data type of the option setting.

    ", + "OptionSetting$AllowedValues": "

    The allowed values of the option setting.

    ", + "OptionsDependedOn$member": null, + "OrderableDBInstanceOption$Engine": "

    The engine type of the orderable DB instance.

    ", + "OrderableDBInstanceOption$EngineVersion": "

    The engine version of the orderable DB instance.

    ", + "OrderableDBInstanceOption$DBInstanceClass": "

    The DB instance class for the orderable DB instance.

    ", + "OrderableDBInstanceOption$LicenseModel": "

    The license model for the orderable DB instance.

    ", + "OrderableDBInstanceOption$StorageType": "

    Indicates the storage type for this orderable DB instance.

    ", + "OrderableDBInstanceOptionsMessage$Marker": "

    An optional pagination token provided by a previous OrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

    ", + "Parameter$ParameterName": "

    Specifies the name of the parameter.

    ", + "Parameter$ParameterValue": "

    Specifies the value of the parameter.

    ", + "Parameter$Description": "

    Provides a description of the parameter.

    ", + "Parameter$Source": "

    Indicates the source of the parameter value.

    ", + "Parameter$ApplyType": "

    Specifies the engine specific parameters type.

    ", + "Parameter$DataType": "

    Specifies the valid data type for the parameter.

    ", + "Parameter$AllowedValues": "

    Specifies the valid range of values for the parameter.

    ", + "Parameter$MinimumEngineVersion": "

    The earliest engine version to which the parameter can apply.

    ", + "PendingMaintenanceAction$Action": "

    The type of pending maintenance action that is available for the resource.

    ", + "PendingMaintenanceAction$OptInStatus": "

    Indicates the type of opt-in request that has been received for the resource.

    ", + "PendingMaintenanceAction$Description": "

    A description providing more detail about the maintenance action.

    ", + "PendingMaintenanceActionsMessage$Marker": "

    An optional pagination token provided by a previous DescribePendingMaintenanceActions request. If this parameter is specified, the response includes only records beyond the marker, up to a number of records specified by MaxRecords.

    ", + "PendingModifiedValues$DBInstanceClass": "

    Contains the new DBInstanceClass for the DB instance that will be applied or is in progress.

    ", + "PendingModifiedValues$MasterUserPassword": "

    Contains the pending or in-progress change of the master credentials for the DB instance.

    ", + "PendingModifiedValues$EngineVersion": "

    Indicates the database engine version.

    ", + "PendingModifiedValues$DBInstanceIdentifier": "

    Contains the new DBInstanceIdentifier for the DB instance that will be applied or is in progress.

    ", + "PendingModifiedValues$StorageType": "

    Specifies the storage type to be associated with the DB instance.

    ", + "PendingModifiedValues$CACertificateIdentifier": "

    Specifies the identifier of the CA certificate for the DB instance.

    ", + "PromoteReadReplicaMessage$DBInstanceIdentifier": "

    The DB instance identifier. This value is stored as a lowercase string.

    Constraints:

    • Must be the identifier for an existing Read Replica DB instance
    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: mydbinstance

    ", + "PromoteReadReplicaMessage$PreferredBackupWindow": "

    The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

    Default: A 30-minute window selected at random from an 8-hour block of time per region. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

    Constraints:

    • Must be in the format hh24:mi-hh24:mi.
    • Times should be in Universal Coordinated Time (UTC).
    • Must not conflict with the preferred maintenance window.
    • Must be at least 30 minutes.
    ", + "PurchaseReservedDBInstancesOfferingMessage$ReservedDBInstancesOfferingId": "

    The ID of the Reserved DB instance offering to purchase.

    Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706

    ", + "PurchaseReservedDBInstancesOfferingMessage$ReservedDBInstanceId": "

    Customer-specified identifier to track this reservation.

    Example: myreservationID

    ", + "ReadReplicaDBInstanceIdentifierList$member": null, + "RebootDBInstanceMessage$DBInstanceIdentifier": "

    The DB instance identifier. This parameter is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RecurringCharge$RecurringChargeFrequency": "

    The frequency of the recurring charge.

    ", + "RemoveSourceIdentifierFromSubscriptionMessage$SubscriptionName": "

    The name of the RDS event notification subscription you want to remove a source identifier from.

    ", + "RemoveSourceIdentifierFromSubscriptionMessage$SourceIdentifier": "

    The source identifier to be removed from the subscription, such as the DB instance identifier for a DB instance or the name of a security group.

    ", + "RemoveTagsFromResourceMessage$ResourceName": "

    The Amazon RDS resource the tags will be removed from. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    ", + "ReservedDBInstance$ReservedDBInstanceId": "

    The unique identifier for the reservation.

    ", + "ReservedDBInstance$ReservedDBInstancesOfferingId": "

    The offering identifier.

    ", + "ReservedDBInstance$DBInstanceClass": "

    The DB instance class for the reserved DB instance.

    ", + "ReservedDBInstance$CurrencyCode": "

    The currency code for the reserved DB instance.

    ", + "ReservedDBInstance$ProductDescription": "

    The description of the reserved DB instance.

    ", + "ReservedDBInstance$OfferingType": "

    The offering type of this reserved DB instance.

    ", + "ReservedDBInstance$State": "

    The state of the reserved DB instance.

    ", + "ReservedDBInstanceMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "ReservedDBInstancesOffering$ReservedDBInstancesOfferingId": "

    The offering identifier.

    ", + "ReservedDBInstancesOffering$DBInstanceClass": "

    The DB instance class for the reserved DB instance.

    ", + "ReservedDBInstancesOffering$CurrencyCode": "

    The currency code for the reserved DB instance offering.

    ", + "ReservedDBInstancesOffering$ProductDescription": "

    The database engine used by the offering.

    ", + "ReservedDBInstancesOffering$OfferingType": "

    The offering type.

    ", + "ReservedDBInstancesOfferingMessage$Marker": "

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    ", + "ResetDBClusterParameterGroupMessage$DBClusterParameterGroupName": "

    The name of the DB cluster parameter group to reset.

    ", + "ResetDBParameterGroupMessage$DBParameterGroupName": "

    The name of the DB parameter group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "ResourcePendingMaintenanceActions$ResourceIdentifier": "

    The ARN of the resource that has pending maintenance actions.

    ", + "RestoreDBClusterFromSnapshotMessage$DBClusterIdentifier": "

    The name of the DB cluster to create from the DB cluster snapshot. This parameter isn't case-sensitive.

    Constraints:

    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-snapshot-id

    ", + "RestoreDBClusterFromSnapshotMessage$SnapshotIdentifier": "

    The identifier for the DB cluster snapshot to restore from.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBClusterFromSnapshotMessage$Engine": "

    The database engine to use for the new DB cluster.

    Default: The same as source

    Constraint: Must be compatible with the engine of the source

    ", + "RestoreDBClusterFromSnapshotMessage$EngineVersion": "

    The version of the database engine to use for the new DB cluster.

    ", + "RestoreDBClusterFromSnapshotMessage$DBSubnetGroupName": "

    The name of the DB subnet group to use for the new DB cluster.

    ", + "RestoreDBClusterFromSnapshotMessage$DatabaseName": "

    The database name for the restored DB cluster.

    ", + "RestoreDBClusterFromSnapshotMessage$OptionGroupName": "

    The name of the option group to use for the restored DB cluster.

    ", + "RestoreDBClusterFromSnapshotMessage$KmsKeyId": "

    The KMS key identifier to use when restoring an encrypted DB cluster from an encrypted DB cluster snapshot.

    The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are restoring a DB cluster with the same AWS account that owns the KMS encryption key used to encrypt the new DB cluster, then you can use the KMS key alias instead of the ARN for the KMS encryption key.

    If you do not specify a value for the KmsKeyId parameter, then the following will occur:

    • If the DB cluster snapshot is encrypted, then the restored DB cluster is encrypted using the KMS key that was used to encrypt the DB cluster snapshot.
    • If the DB cluster snapshot is not encrypted, then the restored DB cluster is not encrypted.

    If SnapshotIdentifier refers to a DB cluster snapshot that is not encrypted, and you specify a value for the KmsKeyId parameter, then the restore request is rejected.

    ", + "RestoreDBClusterToPointInTimeMessage$DBClusterIdentifier": "

    The name of the new DB cluster to be created.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBClusterToPointInTimeMessage$SourceDBClusterIdentifier": "

    The identifier of the source DB cluster from which to restore.

    Constraints:

    • Must be the identifier of an existing database instance
    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBClusterToPointInTimeMessage$DBSubnetGroupName": "

    The DB subnet group name to use for the new DB cluster.

    ", + "RestoreDBClusterToPointInTimeMessage$OptionGroupName": "

    The name of the option group for the new DB cluster.

    ", + "RestoreDBClusterToPointInTimeMessage$KmsKeyId": "

    The KMS key identifier to use when restoring an encrypted DB cluster from an encrypted DB cluster.

    The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are restoring a DB cluster with the same AWS account that owns the KMS encryption key used to encrypt the new DB cluster, then you can use the KMS key alias instead of the ARN for the KMS encryption key.

    You can restore to a new DB cluster and encrypt the new DB cluster with a KMS key that is different than the KMS key used to encrypt the source DB cluster. The new DB cluster will be encrypted with the KMS key identified by the KmsKeyId parameter.

    If you do not specify a value for the KmsKeyId parameter, then the following will occur:

    • If the DB cluster is encrypted, then the restored DB cluster is encrypted using the KMS key that was used to encrypt the source DB cluster.
  • If the DB cluster is not encrypted, then the restored DB cluster is not encrypted.
  • If DBClusterIdentifier refers to a DB cluster that is note encrypted, then the restore request is rejected.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBInstanceIdentifier": "

    Name of the DB instance to create from the DB snapshot. This parameter isn't case-sensitive.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 for SQL Server)
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-snapshot-id

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBSnapshotIdentifier": "

    The identifier for the DB snapshot to restore from.

    Constraints:

    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier must be the ARN of the shared DB snapshot.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBInstanceClass": "

    The compute and memory capacity of the Amazon RDS DB instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge | db.m4.4xlarge | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium | db.t2.large

    ", + "RestoreDBInstanceFromDBSnapshotMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in.

    Default: A random, system-chosen Availability Zone.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    Example: us-east-1a

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBSubnetGroupName": "

    The DB subnet group name to use for the new instance.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$LicenseModel": "

    License model information for the restored DB instance.

    Default: Same as source.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "RestoreDBInstanceFromDBSnapshotMessage$DBName": "

    The database name for the restored DB instance.

    This parameter doesn't apply to the MySQL or MariaDB engines.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$Engine": "

    The database engine to use for the new instance.

    Default: The same as source

    Constraint: Must be compatible with the engine of the source

    Valid Values: MySQL | mariadb | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee | sqlserver-se | sqlserver-ex | sqlserver-web | postgres | aurora

    ", + "RestoreDBInstanceFromDBSnapshotMessage$OptionGroupName": "

    The name of the option group to be used for the restored DB instance.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "RestoreDBInstanceFromDBSnapshotMessage$StorageType": "

    Specifies the storage type to be associated with the DB instance.

    Valid values: standard | gp2 | io1

    If you specify io1, you must also include a value for the Iops parameter.

    Default: io1 if the Iops parameter is specified; otherwise standard

    ", + "RestoreDBInstanceFromDBSnapshotMessage$TdeCredentialArn": "

    The ARN from the Key Store with which to associate the instance for TDE encryption.

    ", + "RestoreDBInstanceFromDBSnapshotMessage$TdeCredentialPassword": "

    The password for the given ARN from the Key Store in order to access the device.

    ", + "RestoreDBInstanceToPointInTimeMessage$SourceDBInstanceIdentifier": "

    The identifier of the source DB instance from which to restore.

    Constraints:

    • Must be the identifier of an existing database instance
    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceToPointInTimeMessage$TargetDBInstanceIdentifier": "

    The name of the new database instance to be created.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreDBInstanceToPointInTimeMessage$DBInstanceClass": "

    The compute and memory capacity of the Amazon RDS DB instance.

    Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge | db.m4.4xlarge | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium | db.t2.large

    Default: The same DBInstanceClass as the original DB instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$AvailabilityZone": "

    The EC2 Availability Zone that the database instance will be created in.

    Default: A random, system-chosen Availability Zone.

    Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ parameter is set to true.

    Example: us-east-1a

    ", + "RestoreDBInstanceToPointInTimeMessage$DBSubnetGroupName": "

    The DB subnet group name to use for the new instance.

    ", + "RestoreDBInstanceToPointInTimeMessage$LicenseModel": "

    License model information for the restored DB instance.

    Default: Same as source.

    Valid values: license-included | bring-your-own-license | general-public-license

    ", + "RestoreDBInstanceToPointInTimeMessage$DBName": "

    The database name for the restored DB instance.

    This parameter is not used for the MySQL or MariaDB engines.

    ", + "RestoreDBInstanceToPointInTimeMessage$Engine": "

    The database engine to use for the new instance.

    Default: The same as source

    Constraint: Must be compatible with the engine of the source

    Valid Values: MySQL | mariadb | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee | sqlserver-se | sqlserver-ex | sqlserver-web | postgres| aurora

    ", + "RestoreDBInstanceToPointInTimeMessage$OptionGroupName": "

    The name of the option group to be used for the restored DB instance.

    Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance

    ", + "RestoreDBInstanceToPointInTimeMessage$StorageType": "

    Specifies the storage type to be associated with the DB instance.

    Valid values: standard | gp2 | io1

    If you specify io1, you must also include a value for the Iops parameter.

    Default: io1 if the Iops parameter is specified; otherwise standard

    ", + "RestoreDBInstanceToPointInTimeMessage$TdeCredentialArn": "

    The ARN from the Key Store with which to associate the instance for TDE encryption.

    ", + "RestoreDBInstanceToPointInTimeMessage$TdeCredentialPassword": "

    The password for the given ARN from the Key Store in order to access the device.

    ", + "RevokeDBSecurityGroupIngressMessage$DBSecurityGroupName": "

    The name of the DB security group to revoke ingress from.

    ", + "RevokeDBSecurityGroupIngressMessage$CIDRIP": "

    The IP range to revoke access from. Must be a valid CIDR range. If CIDRIP is specified, EC2SecurityGroupName, EC2SecurityGroupId and EC2SecurityGroupOwnerId cannot be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupName": "

    The name of the EC2 security group to revoke access from. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupId": "

    The id of the EC2 security group to revoke access from. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "RevokeDBSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "

    The AWS Account Number of the owner of the EC2 security group specified in the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId must be provided.

    ", + "SourceIdsList$member": null, + "Subnet$SubnetIdentifier": "

    Specifies the identifier of the subnet.

    ", + "Subnet$SubnetStatus": "

    Specifies the status of the subnet.

    ", + "SubnetIdentifierList$member": null, + "Tag$Key": "

    A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and cannot be prefixed with \"aws:\" or \"rds:\". The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

    ", + "Tag$Value": "

    A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and cannot be prefixed with \"aws:\" or \"rds:\". The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

    ", + "UpgradeTarget$Engine": "

    The name of the upgrade target database engine.

    ", + "UpgradeTarget$EngineVersion": "

    The version number of the upgrade target database engine.

    ", + "UpgradeTarget$Description": "

    The version of the database engine that a DB instance can be upgraded to.

    ", + "VpcSecurityGroupIdList$member": null, + "VpcSecurityGroupMembership$VpcSecurityGroupId": "

    The name of the VPC security group.

    ", + "VpcSecurityGroupMembership$Status": "

    The status of the VPC security group.

    " + } + }, + "Subnet": { + "base": "

    This data type is used as a response element in the DescribeDBSubnetGroups action.

    ", + "refs": { + "SubnetList$member": null + } + }, + "SubnetAlreadyInUse": { + "base": "

    The DB subnet is already in use in the Availability Zone.

    ", + "refs": { + } + }, + "SubnetIdentifierList": { + "base": null, + "refs": { + "CreateDBSubnetGroupMessage$SubnetIds": "

    The EC2 Subnet IDs for the DB subnet group.

    ", + "ModifyDBSubnetGroupMessage$SubnetIds": "

    The EC2 subnet IDs for the DB subnet group.

    " + } + }, + "SubnetList": { + "base": null, + "refs": { + "DBSubnetGroup$Subnets": "

    Contains a list of Subnet elements.

    " + } + }, + "SubscriptionAlreadyExistFault": { + "base": "

    The supplied subscription name already exists.

    ", + "refs": { + } + }, + "SubscriptionCategoryNotFoundFault": { + "base": "

    The supplied category does not exist.

    ", + "refs": { + } + }, + "SubscriptionNotFoundFault": { + "base": "

    The subscription name does not exist.

    ", + "refs": { + } + }, + "SupportedCharacterSetsList": { + "base": null, + "refs": { + "DBEngineVersion$SupportedCharacterSets": "

    A list of the character sets supported by this engine for the CharacterSetName parameter of the CreateDBInstance API.

    " + } + }, + "TStamp": { + "base": null, + "refs": { + "Certificate$ValidFrom": "

    The starting date from which the certificate is valid.

    ", + "Certificate$ValidTill": "

    The final date that the certificate continues to be valid.

    ", + "DBCluster$EarliestRestorableTime": "

    Specifies the earliest time to which a database can be restored with point-in-time restore.

    ", + "DBCluster$LatestRestorableTime": "

    Specifies the latest time to which a database can be restored with point-in-time restore.

    ", + "DBClusterSnapshot$SnapshotCreateTime": "

    Provides the time when the snapshot was taken, in Universal Coordinated Time (UTC).

    ", + "DBClusterSnapshot$ClusterCreateTime": "

    Specifies the time when the DB cluster was created, in Universal Coordinated Time (UTC).

    ", + "DBInstance$InstanceCreateTime": "

    Provides the date and time the DB instance was created.

    ", + "DBInstance$LatestRestorableTime": "

    Specifies the latest time to which a database can be restored with point-in-time restore.

    ", + "DBSnapshot$SnapshotCreateTime": "

    Provides the time when the snapshot was taken, in Universal Coordinated Time (UTC).

    ", + "DBSnapshot$InstanceCreateTime": "

    Specifies the time when the snapshot was taken, in Universal Coordinated Time (UTC).

    ", + "DescribeEventsMessage$StartTime": "

    The beginning of the time interval to retrieve events for, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

    Example: 2009-07-08T18:00Z

    ", + "DescribeEventsMessage$EndTime": "

    The end of the time interval for which to retrieve events, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

    Example: 2009-07-08T18:00Z

    ", + "Event$Date": "

    Specifies the date and time of the event.

    ", + "PendingMaintenanceAction$AutoAppliedAfterDate": "

    The date of the maintenance window when the action will be applied. The maintenance action will be applied to the resource during its first maintenance window after this date. If this date is specified, any next-maintenance opt-in requests are ignored.

    ", + "PendingMaintenanceAction$ForcedApplyDate": "

    The date when the maintenance action will be automatically applied. The maintenance action will be applied to the resource on this date regardless of the maintenance window for the resource. If this date is specified, any immediate opt-in requests are ignored.

    ", + "PendingMaintenanceAction$CurrentApplyDate": "

    The effective date when the pending maintenance action will be applied to the resource. This date takes into account opt-in requests received from the ApplyPendingMaintenanceAction API, the AutoAppliedAfterDate, and the ForcedApplyDate. This value is blank if an opt-in request has not been received and nothing has been specified as AutoAppliedAfterDate or ForcedApplyDate.

    ", + "ReservedDBInstance$StartTime": "

    The time the reservation started.

    ", + "RestoreDBClusterToPointInTimeMessage$RestoreToTime": "

    The date and time to restore the DB cluster to.

    Valid Values: Value must be a time in Universal Coordinated Time (UTC) format

    Constraints:

    • Must be before the latest restorable time for the DB instance
    • Cannot be specified if UseLatestRestorableTime parameter is true

    Example: 2015-03-07T23:45:00Z

    ", + "RestoreDBInstanceToPointInTimeMessage$RestoreTime": "

    The date and time to restore from.

    Valid Values: Value must be a time in Universal Coordinated Time (UTC) format

    Constraints:

    • Must be before the latest restorable time for the DB instance
    • Cannot be specified if UseLatestRestorableTime parameter is true

    Example: 2009-09-07T23:45:00Z

    " + } + }, + "Tag": { + "base": "

    Metadata assigned to an Amazon RDS resource consisting of a key-value pair.

    ", + "refs": { + "TagList$member": null + } + }, + "TagList": { + "base": "

    A list of tags.

    ", + "refs": { + "AddTagsToResourceMessage$Tags": "

    The tags to be assigned to the Amazon RDS resource.

    ", + "CopyDBClusterSnapshotMessage$Tags": null, + "CopyDBParameterGroupMessage$Tags": null, + "CopyDBSnapshotMessage$Tags": null, + "CopyOptionGroupMessage$Tags": null, + "CreateDBClusterMessage$Tags": null, + "CreateDBClusterParameterGroupMessage$Tags": null, + "CreateDBClusterSnapshotMessage$Tags": "

    The tags to be assigned to the DB cluster snapshot.

    ", + "CreateDBInstanceMessage$Tags": null, + "CreateDBInstanceReadReplicaMessage$Tags": null, + "CreateDBParameterGroupMessage$Tags": null, + "CreateDBSecurityGroupMessage$Tags": null, + "CreateDBSnapshotMessage$Tags": null, + "CreateDBSubnetGroupMessage$Tags": null, + "CreateEventSubscriptionMessage$Tags": null, + "CreateOptionGroupMessage$Tags": null, + "PurchaseReservedDBInstancesOfferingMessage$Tags": null, + "RestoreDBClusterFromSnapshotMessage$Tags": "

    The tags to be assigned to the restored DB cluster.

    ", + "RestoreDBClusterToPointInTimeMessage$Tags": null, + "RestoreDBInstanceFromDBSnapshotMessage$Tags": null, + "RestoreDBInstanceToPointInTimeMessage$Tags": null, + "TagListMessage$TagList": "

    List of tags returned by the ListTagsForResource operation.

    " + } + }, + "TagListMessage": { + "base": "

    ", + "refs": { + } + }, + "UpgradeTarget": { + "base": "

    The version of the database engine that a DB instance can be upgraded to.

    ", + "refs": { + "ValidUpgradeTargetList$member": null + } + }, + "ValidUpgradeTargetList": { + "base": null, + "refs": { + "DBEngineVersion$ValidUpgradeTarget": "

    A list of engine versions that this database engine version can be upgraded to.

    " + } + }, + "VpcSecurityGroupIdList": { + "base": null, + "refs": { + "CreateDBClusterMessage$VpcSecurityGroupIds": "

    A list of EC2 VPC security groups to associate with this DB cluster.

    ", + "CreateDBInstanceMessage$VpcSecurityGroupIds": "

    A list of EC2 VPC security groups to associate with this DB instance.

    Default: The default EC2 VPC security group for the DB subnet group's VPC.

    ", + "ModifyDBClusterMessage$VpcSecurityGroupIds": "

    A lst of VPC security groups that the DB cluster will belong to.

    ", + "ModifyDBInstanceMessage$VpcSecurityGroupIds": "

    A list of EC2 VPC security groups to authorize on this DB instance. This change is asynchronously applied as soon as possible.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "OptionConfiguration$VpcSecurityGroupMemberships": "

    A list of VpcSecurityGroupMemebrship name strings used for this option.

    ", + "RestoreDBClusterFromSnapshotMessage$VpcSecurityGroupIds": "

    A list of VPC security groups that the new DB cluster will belong to.

    ", + "RestoreDBClusterToPointInTimeMessage$VpcSecurityGroupIds": "

    A lst of VPC security groups that the new DB cluster belongs to.

    " + } + }, + "VpcSecurityGroupMembership": { + "base": "

    This data type is used as a response element for queries on VPC security group membership.

    ", + "refs": { + "VpcSecurityGroupMembershipList$member": null + } + }, + "VpcSecurityGroupMembershipList": { + "base": null, + "refs": { + "DBCluster$VpcSecurityGroups": "

    Provides a list of VPC security groups that the DB cluster belongs to.

    ", + "DBInstance$VpcSecurityGroups": "

    Provides List of VPC security group elements that the DB instance belongs to.

    ", + "Option$VpcSecurityGroupMemberships": "

    If the option requires access to a port, then this VPC security group allows access to the port.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,110 @@ +{ + "pagination": { + "DescribeDBEngineVersions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBEngineVersions" + }, + "DescribeDBInstances": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBInstances" + }, + "DescribeDBLogFiles": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DescribeDBLogFiles" + }, + "DescribeDBParameterGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBParameterGroups" + }, + "DescribeDBParameters": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Parameters" + }, + "DescribeDBSecurityGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBSecurityGroups" + }, + "DescribeDBSnapshots": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBSnapshots" + }, + "DescribeDBSubnetGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "DBSubnetGroups" + }, + "DescribeEngineDefaultParameters": { + "input_token": "Marker", + "output_token": "EngineDefaults.Marker", + "limit_key": "MaxRecords", + "result_key": "EngineDefaults.Parameters" + }, + "DescribeEventSubscriptions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "EventSubscriptionsList" + }, + "DescribeEvents": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Events" + }, + "DescribeOptionGroupOptions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "OptionGroupOptions" + }, + "DescribeOptionGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "OptionGroupsList" + }, + "DescribeOrderableDBInstanceOptions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "OrderableDBInstanceOptions" + }, + "DescribeReservedDBInstances": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ReservedDBInstances" + }, + "DescribeReservedDBInstancesOfferings": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ReservedDBInstancesOfferings" + }, + "DownloadDBLogFilePortion": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "NumberOfLines", + "more_results": "AdditionalDataPending", + "result_key": "LogFileData" + }, + "ListTagsForResource": { + "result_key": "TagList" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/waiters-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/waiters-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/waiters-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/waiters-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,96 @@ +{ + "version": 2, + "waiters": { + "DBInstanceAvailable": { + "delay": 30, + "operation": "DescribeDBInstances", + "maxAttempts": 60, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "deleting", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "failed", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "incompatible-restore", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "incompatible-parameters", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "incompatible-restore", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + } + ] + }, + "DBInstanceDeleted": { + "delay": 30, + "operation": "DescribeDBInstances", + "maxAttempts": 60, + "acceptors": [ + { + "expected": "deleted", + "matcher": "pathAll", + "state": "success", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "DBInstanceNotFound", + "matcher": "error", + "state": "success" + }, + { + "expected": "creating", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "modifying", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "rebooting", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "resetting-master-credentials", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + } + ] + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5275 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2012-12-01", + "endpointPrefix":"redshift", + "serviceFullName":"Amazon Redshift", + "signatureVersion":"v4", + "xmlNamespace":"http://redshift.amazonaws.com/doc/2012-12-01/", + "protocol":"query" + }, + "operations":{ + "AuthorizeClusterSecurityGroupIngress":{ + "name":"AuthorizeClusterSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AuthorizeClusterSecurityGroupIngressMessage"}, + "output":{ + "shape":"AuthorizeClusterSecurityGroupIngressResult", + "wrapper":true, + "resultWrapper":"AuthorizeClusterSecurityGroupIngressResult" + }, + "errors":[ + { + "shape":"ClusterSecurityGroupNotFoundFault", + "error":{ + "code":"ClusterSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidClusterSecurityGroupStateFault", + "error":{ + "code":"InvalidClusterSecurityGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"AuthorizationAlreadyExistsFault", + "error":{ + "code":"AuthorizationAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"AuthorizationQuotaExceededFault", + "error":{ + "code":"AuthorizationQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "AuthorizeSnapshotAccess":{ + "name":"AuthorizeSnapshotAccess", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AuthorizeSnapshotAccessMessage"}, + "output":{ + "shape":"AuthorizeSnapshotAccessResult", + "wrapper":true, + "resultWrapper":"AuthorizeSnapshotAccessResult" + }, + "errors":[ + { + "shape":"ClusterSnapshotNotFoundFault", + "error":{ + "code":"ClusterSnapshotNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"AuthorizationAlreadyExistsFault", + "error":{ + "code":"AuthorizationAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"AuthorizationQuotaExceededFault", + "error":{ + "code":"AuthorizationQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "CopyClusterSnapshot":{ + "name":"CopyClusterSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyClusterSnapshotMessage"}, + "output":{ + "shape":"CopyClusterSnapshotResult", + "wrapper":true, + "resultWrapper":"CopyClusterSnapshotResult" + }, + "errors":[ + { + "shape":"ClusterSnapshotAlreadyExistsFault", + "error":{ + "code":"ClusterSnapshotAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterSnapshotNotFoundFault", + "error":{ + "code":"ClusterSnapshotNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidClusterSnapshotStateFault", + "error":{ + "code":"InvalidClusterSnapshotState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterSnapshotQuotaExceededFault", + "error":{ + "code":"ClusterSnapshotQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateCluster":{ + "name":"CreateCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateClusterMessage"}, + "output":{ + "shape":"CreateClusterResult", + "wrapper":true, + "resultWrapper":"CreateClusterResult" + }, + "errors":[ + { + "shape":"ClusterAlreadyExistsFault", + "error":{ + "code":"ClusterAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InsufficientClusterCapacityFault", + "error":{ + "code":"InsufficientClusterCapacity", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterParameterGroupNotFoundFault", + "error":{ + "code":"ClusterParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterSecurityGroupNotFoundFault", + "error":{ + "code":"ClusterSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterQuotaExceededFault", + "error":{ + "code":"ClusterQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NumberOfNodesQuotaExceededFault", + "error":{ + "code":"NumberOfNodesQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NumberOfNodesPerClusterLimitExceededFault", + "error":{ + "code":"NumberOfNodesPerClusterLimitExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterSubnetGroupNotFoundFault", + "error":{ + "code":"ClusterSubnetGroupNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidVPCNetworkStateFault", + "error":{ + "code":"InvalidVPCNetworkStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidClusterSubnetGroupStateFault", + "error":{ + "code":"InvalidClusterSubnetGroupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidSubnet", + "error":{ + "code":"InvalidSubnet", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"UnauthorizedOperation", + "error":{ + "code":"UnauthorizedOperation", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"HsmClientCertificateNotFoundFault", + "error":{ + "code":"HsmClientCertificateNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"HsmConfigurationNotFoundFault", + "error":{ + "code":"HsmConfigurationNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidElasticIpFault", + "error":{ + "code":"InvalidElasticIpFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TagLimitExceededFault", + "error":{ + "code":"TagLimitExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTagFault", + "error":{ + "code":"InvalidTagFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"LimitExceededFault", + "error":{ + "code":"LimitExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateClusterParameterGroup":{ + "name":"CreateClusterParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateClusterParameterGroupMessage"}, + "output":{ + "shape":"CreateClusterParameterGroupResult", + "wrapper":true, + "resultWrapper":"CreateClusterParameterGroupResult" + }, + "errors":[ + { + "shape":"ClusterParameterGroupQuotaExceededFault", + "error":{ + "code":"ClusterParameterGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterParameterGroupAlreadyExistsFault", + "error":{ + "code":"ClusterParameterGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TagLimitExceededFault", + "error":{ + "code":"TagLimitExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTagFault", + "error":{ + "code":"InvalidTagFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateClusterSecurityGroup":{ + "name":"CreateClusterSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateClusterSecurityGroupMessage"}, + "output":{ + "shape":"CreateClusterSecurityGroupResult", + "wrapper":true, + "resultWrapper":"CreateClusterSecurityGroupResult" + }, + "errors":[ + { + "shape":"ClusterSecurityGroupAlreadyExistsFault", + "error":{ + "code":"ClusterSecurityGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterSecurityGroupQuotaExceededFault", + "error":{ + "code":"QuotaExceeded.ClusterSecurityGroup", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TagLimitExceededFault", + "error":{ + "code":"TagLimitExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTagFault", + "error":{ + "code":"InvalidTagFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateClusterSnapshot":{ + "name":"CreateClusterSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateClusterSnapshotMessage"}, + "output":{ + "shape":"CreateClusterSnapshotResult", + "wrapper":true, + "resultWrapper":"CreateClusterSnapshotResult" + }, + "errors":[ + { + "shape":"ClusterSnapshotAlreadyExistsFault", + "error":{ + "code":"ClusterSnapshotAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidClusterStateFault", + "error":{ + "code":"InvalidClusterState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterNotFoundFault", + "error":{ + "code":"ClusterNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterSnapshotQuotaExceededFault", + "error":{ + "code":"ClusterSnapshotQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TagLimitExceededFault", + "error":{ + "code":"TagLimitExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTagFault", + "error":{ + "code":"InvalidTagFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateClusterSubnetGroup":{ + "name":"CreateClusterSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateClusterSubnetGroupMessage"}, + "output":{ + "shape":"CreateClusterSubnetGroupResult", + "wrapper":true, + "resultWrapper":"CreateClusterSubnetGroupResult" + }, + "errors":[ + { + "shape":"ClusterSubnetGroupAlreadyExistsFault", + "error":{ + "code":"ClusterSubnetGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterSubnetGroupQuotaExceededFault", + "error":{ + "code":"ClusterSubnetGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterSubnetQuotaExceededFault", + "error":{ + "code":"ClusterSubnetQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidSubnet", + "error":{ + "code":"InvalidSubnet", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"UnauthorizedOperation", + "error":{ + "code":"UnauthorizedOperation", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TagLimitExceededFault", + "error":{ + "code":"TagLimitExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTagFault", + "error":{ + "code":"InvalidTagFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateEventSubscription":{ + "name":"CreateEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateEventSubscriptionMessage"}, + "output":{ + "shape":"CreateEventSubscriptionResult", + "wrapper":true, + "resultWrapper":"CreateEventSubscriptionResult" + }, + "errors":[ + { + "shape":"EventSubscriptionQuotaExceededFault", + "error":{ + "code":"EventSubscriptionQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SubscriptionAlreadyExistFault", + "error":{ + "code":"SubscriptionAlreadyExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SNSInvalidTopicFault", + "error":{ + "code":"SNSInvalidTopic", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SNSNoAuthorizationFault", + "error":{ + "code":"SNSNoAuthorization", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SNSTopicArnNotFoundFault", + "error":{ + "code":"SNSTopicArnNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SubscriptionEventIdNotFoundFault", + "error":{ + "code":"SubscriptionEventIdNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SubscriptionCategoryNotFoundFault", + "error":{ + "code":"SubscriptionCategoryNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SubscriptionSeverityNotFoundFault", + "error":{ + "code":"SubscriptionSeverityNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SourceNotFoundFault", + "error":{ + "code":"SourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TagLimitExceededFault", + "error":{ + "code":"TagLimitExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTagFault", + "error":{ + "code":"InvalidTagFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateHsmClientCertificate":{ + "name":"CreateHsmClientCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateHsmClientCertificateMessage"}, + "output":{ + "shape":"CreateHsmClientCertificateResult", + "wrapper":true, + "resultWrapper":"CreateHsmClientCertificateResult" + }, + "errors":[ + { + "shape":"HsmClientCertificateAlreadyExistsFault", + "error":{ + "code":"HsmClientCertificateAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"HsmClientCertificateQuotaExceededFault", + "error":{ + "code":"HsmClientCertificateQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TagLimitExceededFault", + "error":{ + "code":"TagLimitExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTagFault", + "error":{ + "code":"InvalidTagFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateHsmConfiguration":{ + "name":"CreateHsmConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateHsmConfigurationMessage"}, + "output":{ + "shape":"CreateHsmConfigurationResult", + "wrapper":true, + "resultWrapper":"CreateHsmConfigurationResult" + }, + "errors":[ + { + "shape":"HsmConfigurationAlreadyExistsFault", + "error":{ + "code":"HsmConfigurationAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"HsmConfigurationQuotaExceededFault", + "error":{ + "code":"HsmConfigurationQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TagLimitExceededFault", + "error":{ + "code":"TagLimitExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTagFault", + "error":{ + "code":"InvalidTagFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateSnapshotCopyGrant":{ + "name":"CreateSnapshotCopyGrant", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSnapshotCopyGrantMessage"}, + "output":{ + "shape":"CreateSnapshotCopyGrantResult", + "wrapper":true, + "resultWrapper":"CreateSnapshotCopyGrantResult" + }, + "errors":[ + { + "shape":"SnapshotCopyGrantAlreadyExistsFault", + "error":{ + "code":"SnapshotCopyGrantAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SnapshotCopyGrantQuotaExceededFault", + "error":{ + "code":"SnapshotCopyGrantQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"LimitExceededFault", + "error":{ + "code":"LimitExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TagLimitExceededFault", + "error":{ + "code":"TagLimitExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTagFault", + "error":{ + "code":"InvalidTagFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateTags":{ + "name":"CreateTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTagsMessage"}, + "errors":[ + { + "shape":"TagLimitExceededFault", + "error":{ + "code":"TagLimitExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResourceNotFoundFault", + "error":{ + "code":"ResourceNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTagFault", + "error":{ + "code":"InvalidTagFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteCluster":{ + "name":"DeleteCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteClusterMessage"}, + "output":{ + "shape":"DeleteClusterResult", + "wrapper":true, + "resultWrapper":"DeleteClusterResult" + }, + "errors":[ + { + "shape":"ClusterNotFoundFault", + "error":{ + "code":"ClusterNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidClusterStateFault", + "error":{ + "code":"InvalidClusterState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterSnapshotAlreadyExistsFault", + "error":{ + "code":"ClusterSnapshotAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterSnapshotQuotaExceededFault", + "error":{ + "code":"ClusterSnapshotQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteClusterParameterGroup":{ + "name":"DeleteClusterParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteClusterParameterGroupMessage"}, + "errors":[ + { + "shape":"InvalidClusterParameterGroupStateFault", + "error":{ + "code":"InvalidClusterParameterGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterParameterGroupNotFoundFault", + "error":{ + "code":"ClusterParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteClusterSecurityGroup":{ + "name":"DeleteClusterSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteClusterSecurityGroupMessage"}, + "errors":[ + { + "shape":"InvalidClusterSecurityGroupStateFault", + "error":{ + "code":"InvalidClusterSecurityGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterSecurityGroupNotFoundFault", + "error":{ + "code":"ClusterSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteClusterSnapshot":{ + "name":"DeleteClusterSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteClusterSnapshotMessage"}, + "output":{ + "shape":"DeleteClusterSnapshotResult", + "wrapper":true, + "resultWrapper":"DeleteClusterSnapshotResult" + }, + "errors":[ + { + "shape":"InvalidClusterSnapshotStateFault", + "error":{ + "code":"InvalidClusterSnapshotState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterSnapshotNotFoundFault", + "error":{ + "code":"ClusterSnapshotNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteClusterSubnetGroup":{ + "name":"DeleteClusterSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteClusterSubnetGroupMessage"}, + "errors":[ + { + "shape":"InvalidClusterSubnetGroupStateFault", + "error":{ + "code":"InvalidClusterSubnetGroupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidClusterSubnetStateFault", + "error":{ + "code":"InvalidClusterSubnetStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterSubnetGroupNotFoundFault", + "error":{ + "code":"ClusterSubnetGroupNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteEventSubscription":{ + "name":"DeleteEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEventSubscriptionMessage"}, + "errors":[ + { + "shape":"SubscriptionNotFoundFault", + "error":{ + "code":"SubscriptionNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidSubscriptionStateFault", + "error":{ + "code":"InvalidSubscriptionStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteHsmClientCertificate":{ + "name":"DeleteHsmClientCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteHsmClientCertificateMessage"}, + "errors":[ + { + "shape":"InvalidHsmClientCertificateStateFault", + "error":{ + "code":"InvalidHsmClientCertificateStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"HsmClientCertificateNotFoundFault", + "error":{ + "code":"HsmClientCertificateNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteHsmConfiguration":{ + "name":"DeleteHsmConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteHsmConfigurationMessage"}, + "errors":[ + { + "shape":"InvalidHsmConfigurationStateFault", + "error":{ + "code":"InvalidHsmConfigurationStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"HsmConfigurationNotFoundFault", + "error":{ + "code":"HsmConfigurationNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteSnapshotCopyGrant":{ + "name":"DeleteSnapshotCopyGrant", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSnapshotCopyGrantMessage"}, + "errors":[ + { + "shape":"InvalidSnapshotCopyGrantStateFault", + "error":{ + "code":"InvalidSnapshotCopyGrantStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SnapshotCopyGrantNotFoundFault", + "error":{ + "code":"SnapshotCopyGrantNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteTags":{ + "name":"DeleteTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTagsMessage"}, + "errors":[ + { + "shape":"ResourceNotFoundFault", + "error":{ + "code":"ResourceNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTagFault", + "error":{ + "code":"InvalidTagFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeClusterParameterGroups":{ + "name":"DescribeClusterParameterGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeClusterParameterGroupsMessage"}, + "output":{ + "shape":"ClusterParameterGroupsMessage", + "resultWrapper":"DescribeClusterParameterGroupsResult" + }, + "errors":[ + { + "shape":"ClusterParameterGroupNotFoundFault", + "error":{ + "code":"ClusterParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTagFault", + "error":{ + "code":"InvalidTagFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeClusterParameters":{ + "name":"DescribeClusterParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeClusterParametersMessage"}, + "output":{ + "shape":"ClusterParameterGroupDetails", + "resultWrapper":"DescribeClusterParametersResult" + }, + "errors":[ + { + "shape":"ClusterParameterGroupNotFoundFault", + "error":{ + "code":"ClusterParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeClusterSecurityGroups":{ + "name":"DescribeClusterSecurityGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeClusterSecurityGroupsMessage"}, + "output":{ + "shape":"ClusterSecurityGroupMessage", + "resultWrapper":"DescribeClusterSecurityGroupsResult" + }, + "errors":[ + { + "shape":"ClusterSecurityGroupNotFoundFault", + "error":{ + "code":"ClusterSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTagFault", + "error":{ + "code":"InvalidTagFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeClusterSnapshots":{ + "name":"DescribeClusterSnapshots", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeClusterSnapshotsMessage"}, + "output":{ + "shape":"SnapshotMessage", + "resultWrapper":"DescribeClusterSnapshotsResult" + }, + "errors":[ + { + "shape":"ClusterSnapshotNotFoundFault", + "error":{ + "code":"ClusterSnapshotNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTagFault", + "error":{ + "code":"InvalidTagFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeClusterSubnetGroups":{ + "name":"DescribeClusterSubnetGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeClusterSubnetGroupsMessage"}, + "output":{ + "shape":"ClusterSubnetGroupMessage", + "resultWrapper":"DescribeClusterSubnetGroupsResult" + }, + "errors":[ + { + "shape":"ClusterSubnetGroupNotFoundFault", + "error":{ + "code":"ClusterSubnetGroupNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTagFault", + "error":{ + "code":"InvalidTagFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeClusterVersions":{ + "name":"DescribeClusterVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeClusterVersionsMessage"}, + "output":{ + "shape":"ClusterVersionsMessage", + "resultWrapper":"DescribeClusterVersionsResult" + } + }, + "DescribeClusters":{ + "name":"DescribeClusters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeClustersMessage"}, + "output":{ + "shape":"ClustersMessage", + "resultWrapper":"DescribeClustersResult" + }, + "errors":[ + { + "shape":"ClusterNotFoundFault", + "error":{ + "code":"ClusterNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTagFault", + "error":{ + "code":"InvalidTagFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeDefaultClusterParameters":{ + "name":"DescribeDefaultClusterParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDefaultClusterParametersMessage"}, + "output":{ + "shape":"DescribeDefaultClusterParametersResult", + "wrapper":true, + "resultWrapper":"DescribeDefaultClusterParametersResult" + } + }, + "DescribeEventCategories":{ + "name":"DescribeEventCategories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventCategoriesMessage"}, + "output":{ + "shape":"EventCategoriesMessage", + "resultWrapper":"DescribeEventCategoriesResult" + } + }, + "DescribeEventSubscriptions":{ + "name":"DescribeEventSubscriptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventSubscriptionsMessage"}, + "output":{ + "shape":"EventSubscriptionsMessage", + "resultWrapper":"DescribeEventSubscriptionsResult" + }, + "errors":[ + { + "shape":"SubscriptionNotFoundFault", + "error":{ + "code":"SubscriptionNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeEvents":{ + "name":"DescribeEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventsMessage"}, + "output":{ + "shape":"EventsMessage", + "resultWrapper":"DescribeEventsResult" + } + }, + "DescribeHsmClientCertificates":{ + "name":"DescribeHsmClientCertificates", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeHsmClientCertificatesMessage"}, + "output":{ + "shape":"HsmClientCertificateMessage", + "resultWrapper":"DescribeHsmClientCertificatesResult" + }, + "errors":[ + { + "shape":"HsmClientCertificateNotFoundFault", + "error":{ + "code":"HsmClientCertificateNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTagFault", + "error":{ + "code":"InvalidTagFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeHsmConfigurations":{ + "name":"DescribeHsmConfigurations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeHsmConfigurationsMessage"}, + "output":{ + "shape":"HsmConfigurationMessage", + "resultWrapper":"DescribeHsmConfigurationsResult" + }, + "errors":[ + { + "shape":"HsmConfigurationNotFoundFault", + "error":{ + "code":"HsmConfigurationNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTagFault", + "error":{ + "code":"InvalidTagFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeLoggingStatus":{ + "name":"DescribeLoggingStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLoggingStatusMessage"}, + "output":{ + "shape":"LoggingStatus", + "resultWrapper":"DescribeLoggingStatusResult" + }, + "errors":[ + { + "shape":"ClusterNotFoundFault", + "error":{ + "code":"ClusterNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeOrderableClusterOptions":{ + "name":"DescribeOrderableClusterOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOrderableClusterOptionsMessage"}, + "output":{ + "shape":"OrderableClusterOptionsMessage", + "resultWrapper":"DescribeOrderableClusterOptionsResult" + } + }, + "DescribeReservedNodeOfferings":{ + "name":"DescribeReservedNodeOfferings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedNodeOfferingsMessage"}, + "output":{ + "shape":"ReservedNodeOfferingsMessage", + "resultWrapper":"DescribeReservedNodeOfferingsResult" + }, + "errors":[ + { + "shape":"ReservedNodeOfferingNotFoundFault", + "error":{ + "code":"ReservedNodeOfferingNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"UnsupportedOperationFault", + "error":{ + "code":"UnsupportedOperation", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeReservedNodes":{ + "name":"DescribeReservedNodes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReservedNodesMessage"}, + "output":{ + "shape":"ReservedNodesMessage", + "resultWrapper":"DescribeReservedNodesResult" + }, + "errors":[ + { + "shape":"ReservedNodeNotFoundFault", + "error":{ + "code":"ReservedNodeNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeResize":{ + "name":"DescribeResize", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeResizeMessage"}, + "output":{ + "shape":"ResizeProgressMessage", + "resultWrapper":"DescribeResizeResult" + }, + "errors":[ + { + "shape":"ClusterNotFoundFault", + "error":{ + "code":"ClusterNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ResizeNotFoundFault", + "error":{ + "code":"ResizeNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeSnapshotCopyGrants":{ + "name":"DescribeSnapshotCopyGrants", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSnapshotCopyGrantsMessage"}, + "output":{ + "shape":"SnapshotCopyGrantMessage", + "resultWrapper":"DescribeSnapshotCopyGrantsResult" + }, + "errors":[ + { + "shape":"SnapshotCopyGrantNotFoundFault", + "error":{ + "code":"SnapshotCopyGrantNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTagFault", + "error":{ + "code":"InvalidTagFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DescribeTags":{ + "name":"DescribeTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTagsMessage"}, + "output":{ + "shape":"TaggedResourceListMessage", + "resultWrapper":"DescribeTagsResult" + }, + "errors":[ + { + "shape":"ResourceNotFoundFault", + "error":{ + "code":"ResourceNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidTagFault", + "error":{ + "code":"InvalidTagFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DisableLogging":{ + "name":"DisableLogging", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableLoggingMessage"}, + "output":{ + "shape":"LoggingStatus", + "resultWrapper":"DisableLoggingResult" + }, + "errors":[ + { + "shape":"ClusterNotFoundFault", + "error":{ + "code":"ClusterNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + }, + "DisableSnapshotCopy":{ + "name":"DisableSnapshotCopy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableSnapshotCopyMessage"}, + "output":{ + "shape":"DisableSnapshotCopyResult", + "wrapper":true, + "resultWrapper":"DisableSnapshotCopyResult" + }, + "errors":[ + { + "shape":"ClusterNotFoundFault", + "error":{ + "code":"ClusterNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SnapshotCopyAlreadyDisabledFault", + "error":{ + "code":"SnapshotCopyAlreadyDisabledFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidClusterStateFault", + "error":{ + "code":"InvalidClusterState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"UnauthorizedOperation", + "error":{ + "code":"UnauthorizedOperation", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "EnableLogging":{ + "name":"EnableLogging", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableLoggingMessage"}, + "output":{ + "shape":"LoggingStatus", + "resultWrapper":"EnableLoggingResult" + }, + "errors":[ + { + "shape":"ClusterNotFoundFault", + "error":{ + "code":"ClusterNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"BucketNotFoundFault", + "error":{ + "code":"BucketNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InsufficientS3BucketPolicyFault", + "error":{ + "code":"InsufficientS3BucketPolicyFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidS3KeyPrefixFault", + "error":{ + "code":"InvalidS3KeyPrefixFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidS3BucketNameFault", + "error":{ + "code":"InvalidS3BucketNameFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "EnableSnapshotCopy":{ + "name":"EnableSnapshotCopy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableSnapshotCopyMessage"}, + "output":{ + "shape":"EnableSnapshotCopyResult", + "wrapper":true, + "resultWrapper":"EnableSnapshotCopyResult" + }, + "errors":[ + { + "shape":"IncompatibleOrderableOptions", + "error":{ + "code":"IncompatibleOrderableOptions", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidClusterStateFault", + "error":{ + "code":"InvalidClusterState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterNotFoundFault", + "error":{ + "code":"ClusterNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"CopyToRegionDisabledFault", + "error":{ + "code":"CopyToRegionDisabledFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SnapshotCopyAlreadyEnabledFault", + "error":{ + "code":"SnapshotCopyAlreadyEnabledFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"UnknownSnapshotCopyRegionFault", + "error":{ + "code":"UnknownSnapshotCopyRegionFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"UnauthorizedOperation", + "error":{ + "code":"UnauthorizedOperation", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SnapshotCopyGrantNotFoundFault", + "error":{ + "code":"SnapshotCopyGrantNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"LimitExceededFault", + "error":{ + "code":"LimitExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "ModifyCluster":{ + "name":"ModifyCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyClusterMessage"}, + "output":{ + "shape":"ModifyClusterResult", + "wrapper":true, + "resultWrapper":"ModifyClusterResult" + }, + "errors":[ + { + "shape":"InvalidClusterStateFault", + "error":{ + "code":"InvalidClusterState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidClusterSecurityGroupStateFault", + "error":{ + "code":"InvalidClusterSecurityGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterNotFoundFault", + "error":{ + "code":"ClusterNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NumberOfNodesQuotaExceededFault", + "error":{ + "code":"NumberOfNodesQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterSecurityGroupNotFoundFault", + "error":{ + "code":"ClusterSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterParameterGroupNotFoundFault", + "error":{ + "code":"ClusterParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InsufficientClusterCapacityFault", + "error":{ + "code":"InsufficientClusterCapacity", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"UnsupportedOptionFault", + "error":{ + "code":"UnsupportedOptionFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"UnauthorizedOperation", + "error":{ + "code":"UnauthorizedOperation", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"HsmClientCertificateNotFoundFault", + "error":{ + "code":"HsmClientCertificateNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"HsmConfigurationNotFoundFault", + "error":{ + "code":"HsmConfigurationNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterAlreadyExistsFault", + "error":{ + "code":"ClusterAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"LimitExceededFault", + "error":{ + "code":"LimitExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "ModifyClusterParameterGroup":{ + "name":"ModifyClusterParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyClusterParameterGroupMessage"}, + "output":{ + "shape":"ClusterParameterGroupNameMessage", + "resultWrapper":"ModifyClusterParameterGroupResult" + }, + "errors":[ + { + "shape":"ClusterParameterGroupNotFoundFault", + "error":{ + "code":"ClusterParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidClusterParameterGroupStateFault", + "error":{ + "code":"InvalidClusterParameterGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "ModifyClusterSubnetGroup":{ + "name":"ModifyClusterSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyClusterSubnetGroupMessage"}, + "output":{ + "shape":"ModifyClusterSubnetGroupResult", + "wrapper":true, + "resultWrapper":"ModifyClusterSubnetGroupResult" + }, + "errors":[ + { + "shape":"ClusterSubnetGroupNotFoundFault", + "error":{ + "code":"ClusterSubnetGroupNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterSubnetQuotaExceededFault", + "error":{ + "code":"ClusterSubnetQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SubnetAlreadyInUse", + "error":{ + "code":"SubnetAlreadyInUse", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidSubnet", + "error":{ + "code":"InvalidSubnet", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"UnauthorizedOperation", + "error":{ + "code":"UnauthorizedOperation", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "ModifyEventSubscription":{ + "name":"ModifyEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyEventSubscriptionMessage"}, + "output":{ + "shape":"ModifyEventSubscriptionResult", + "wrapper":true, + "resultWrapper":"ModifyEventSubscriptionResult" + }, + "errors":[ + { + "shape":"SubscriptionNotFoundFault", + "error":{ + "code":"SubscriptionNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SNSInvalidTopicFault", + "error":{ + "code":"SNSInvalidTopic", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SNSNoAuthorizationFault", + "error":{ + "code":"SNSNoAuthorization", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SNSTopicArnNotFoundFault", + "error":{ + "code":"SNSTopicArnNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SubscriptionEventIdNotFoundFault", + "error":{ + "code":"SubscriptionEventIdNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SubscriptionCategoryNotFoundFault", + "error":{ + "code":"SubscriptionCategoryNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SubscriptionSeverityNotFoundFault", + "error":{ + "code":"SubscriptionSeverityNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SourceNotFoundFault", + "error":{ + "code":"SourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidSubscriptionStateFault", + "error":{ + "code":"InvalidSubscriptionStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "ModifySnapshotCopyRetentionPeriod":{ + "name":"ModifySnapshotCopyRetentionPeriod", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifySnapshotCopyRetentionPeriodMessage"}, + "output":{ + "shape":"ModifySnapshotCopyRetentionPeriodResult", + "wrapper":true, + "resultWrapper":"ModifySnapshotCopyRetentionPeriodResult" + }, + "errors":[ + { + "shape":"ClusterNotFoundFault", + "error":{ + "code":"ClusterNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"SnapshotCopyDisabledFault", + "error":{ + "code":"SnapshotCopyDisabledFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"UnauthorizedOperation", + "error":{ + "code":"UnauthorizedOperation", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidClusterStateFault", + "error":{ + "code":"InvalidClusterState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "PurchaseReservedNodeOffering":{ + "name":"PurchaseReservedNodeOffering", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PurchaseReservedNodeOfferingMessage"}, + "output":{ + "shape":"PurchaseReservedNodeOfferingResult", + "wrapper":true, + "resultWrapper":"PurchaseReservedNodeOfferingResult" + }, + "errors":[ + { + "shape":"ReservedNodeOfferingNotFoundFault", + "error":{ + "code":"ReservedNodeOfferingNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ReservedNodeAlreadyExistsFault", + "error":{ + "code":"ReservedNodeAlreadyExists", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ReservedNodeQuotaExceededFault", + "error":{ + "code":"ReservedNodeQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"UnsupportedOperationFault", + "error":{ + "code":"UnsupportedOperation", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "RebootCluster":{ + "name":"RebootCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootClusterMessage"}, + "output":{ + "shape":"RebootClusterResult", + "wrapper":true, + "resultWrapper":"RebootClusterResult" + }, + "errors":[ + { + "shape":"InvalidClusterStateFault", + "error":{ + "code":"InvalidClusterState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterNotFoundFault", + "error":{ + "code":"ClusterNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + }, + "ResetClusterParameterGroup":{ + "name":"ResetClusterParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetClusterParameterGroupMessage"}, + "output":{ + "shape":"ClusterParameterGroupNameMessage", + "resultWrapper":"ResetClusterParameterGroupResult" + }, + "errors":[ + { + "shape":"InvalidClusterParameterGroupStateFault", + "error":{ + "code":"InvalidClusterParameterGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterParameterGroupNotFoundFault", + "error":{ + "code":"ClusterParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + }, + "RestoreFromClusterSnapshot":{ + "name":"RestoreFromClusterSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreFromClusterSnapshotMessage"}, + "output":{ + "shape":"RestoreFromClusterSnapshotResult", + "wrapper":true, + "resultWrapper":"RestoreFromClusterSnapshotResult" + }, + "errors":[ + { + "shape":"AccessToSnapshotDeniedFault", + "error":{ + "code":"AccessToSnapshotDenied", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterAlreadyExistsFault", + "error":{ + "code":"ClusterAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterSnapshotNotFoundFault", + "error":{ + "code":"ClusterSnapshotNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterQuotaExceededFault", + "error":{ + "code":"ClusterQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InsufficientClusterCapacityFault", + "error":{ + "code":"InsufficientClusterCapacity", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidClusterSnapshotStateFault", + "error":{ + "code":"InvalidClusterSnapshotState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidRestoreFault", + "error":{ + "code":"InvalidRestore", + "httpStatusCode":406, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NumberOfNodesQuotaExceededFault", + "error":{ + "code":"NumberOfNodesQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NumberOfNodesPerClusterLimitExceededFault", + "error":{ + "code":"NumberOfNodesPerClusterLimitExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidVPCNetworkStateFault", + "error":{ + "code":"InvalidVPCNetworkStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidClusterSubnetGroupStateFault", + "error":{ + "code":"InvalidClusterSubnetGroupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidSubnet", + "error":{ + "code":"InvalidSubnet", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterSubnetGroupNotFoundFault", + "error":{ + "code":"ClusterSubnetGroupNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"UnauthorizedOperation", + "error":{ + "code":"UnauthorizedOperation", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"HsmClientCertificateNotFoundFault", + "error":{ + "code":"HsmClientCertificateNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"HsmConfigurationNotFoundFault", + "error":{ + "code":"HsmConfigurationNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidElasticIpFault", + "error":{ + "code":"InvalidElasticIpFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterParameterGroupNotFoundFault", + "error":{ + "code":"ClusterParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterSecurityGroupNotFoundFault", + "error":{ + "code":"ClusterSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"LimitExceededFault", + "error":{ + "code":"LimitExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "RevokeClusterSecurityGroupIngress":{ + "name":"RevokeClusterSecurityGroupIngress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeClusterSecurityGroupIngressMessage"}, + "output":{ + "shape":"RevokeClusterSecurityGroupIngressResult", + "wrapper":true, + "resultWrapper":"RevokeClusterSecurityGroupIngressResult" + }, + "errors":[ + { + "shape":"ClusterSecurityGroupNotFoundFault", + "error":{ + "code":"ClusterSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"AuthorizationNotFoundFault", + "error":{ + "code":"AuthorizationNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidClusterSecurityGroupStateFault", + "error":{ + "code":"InvalidClusterSecurityGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "RevokeSnapshotAccess":{ + "name":"RevokeSnapshotAccess", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeSnapshotAccessMessage"}, + "output":{ + "shape":"RevokeSnapshotAccessResult", + "wrapper":true, + "resultWrapper":"RevokeSnapshotAccessResult" + }, + "errors":[ + { + "shape":"AccessToSnapshotDeniedFault", + "error":{ + "code":"AccessToSnapshotDenied", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"AuthorizationNotFoundFault", + "error":{ + "code":"AuthorizationNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ClusterSnapshotNotFoundFault", + "error":{ + "code":"ClusterSnapshotNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + }, + "RotateEncryptionKey":{ + "name":"RotateEncryptionKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RotateEncryptionKeyMessage"}, + "output":{ + "shape":"RotateEncryptionKeyResult", + "wrapper":true, + "resultWrapper":"RotateEncryptionKeyResult" + }, + "errors":[ + { + "shape":"ClusterNotFoundFault", + "error":{ + "code":"ClusterNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidClusterStateFault", + "error":{ + "code":"InvalidClusterState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + } + }, + "shapes":{ + "AccessToSnapshotDeniedFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AccessToSnapshotDenied", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AccountWithRestoreAccess":{ + "type":"structure", + "members":{ + "AccountId":{"shape":"String"} + } + }, + "AccountsWithRestoreAccessList":{ + "type":"list", + "member":{ + "shape":"AccountWithRestoreAccess", + "locationName":"AccountWithRestoreAccess" + } + }, + "AuthorizationAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AuthorizationNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "AuthorizationQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AuthorizationQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "AuthorizeClusterSecurityGroupIngressMessage":{ + "type":"structure", + "required":["ClusterSecurityGroupName"], + "members":{ + "ClusterSecurityGroupName":{"shape":"String"}, + "CIDRIP":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "AuthorizeSnapshotAccessMessage":{ + "type":"structure", + "required":[ + "SnapshotIdentifier", + "AccountWithRestoreAccess" + ], + "members":{ + "SnapshotIdentifier":{"shape":"String"}, + "SnapshotClusterIdentifier":{"shape":"String"}, + "AccountWithRestoreAccess":{"shape":"String"} + } + }, + "AvailabilityZone":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"} + }, + "wrapper":true + }, + "AvailabilityZoneList":{ + "type":"list", + "member":{ + "shape":"AvailabilityZone", + "locationName":"AvailabilityZone" + } + }, + "Boolean":{"type":"boolean"}, + "BooleanOptional":{"type":"boolean"}, + "BucketNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"BucketNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Cluster":{ + "type":"structure", + "members":{ + "ClusterIdentifier":{"shape":"String"}, + "NodeType":{"shape":"String"}, + "ClusterStatus":{"shape":"String"}, + "ModifyStatus":{"shape":"String"}, + "MasterUsername":{"shape":"String"}, + "DBName":{"shape":"String"}, + "Endpoint":{"shape":"Endpoint"}, + "ClusterCreateTime":{"shape":"TStamp"}, + "AutomatedSnapshotRetentionPeriod":{"shape":"Integer"}, + "ClusterSecurityGroups":{"shape":"ClusterSecurityGroupMembershipList"}, + "VpcSecurityGroups":{"shape":"VpcSecurityGroupMembershipList"}, + "ClusterParameterGroups":{"shape":"ClusterParameterGroupStatusList"}, + "ClusterSubnetGroupName":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "PendingModifiedValues":{"shape":"PendingModifiedValues"}, + "ClusterVersion":{"shape":"String"}, + "AllowVersionUpgrade":{"shape":"Boolean"}, + "NumberOfNodes":{"shape":"Integer"}, + "PubliclyAccessible":{"shape":"Boolean"}, + "Encrypted":{"shape":"Boolean"}, + "RestoreStatus":{"shape":"RestoreStatus"}, + "HsmStatus":{"shape":"HsmStatus"}, + "ClusterSnapshotCopyStatus":{"shape":"ClusterSnapshotCopyStatus"}, + "ClusterPublicKey":{"shape":"String"}, + "ClusterNodes":{"shape":"ClusterNodesList"}, + "ElasticIpStatus":{"shape":"ElasticIpStatus"}, + "ClusterRevisionNumber":{"shape":"String"}, + "Tags":{"shape":"TagList"}, + "KmsKeyId":{"shape":"String"} + }, + "wrapper":true + }, + "ClusterAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ClusterList":{ + "type":"list", + "member":{ + "shape":"Cluster", + "locationName":"Cluster" + } + }, + "ClusterNode":{ + "type":"structure", + "members":{ + "NodeRole":{"shape":"String"}, + "PrivateIPAddress":{"shape":"String"}, + "PublicIPAddress":{"shape":"String"} + } + }, + "ClusterNodesList":{ + "type":"list", + "member":{"shape":"ClusterNode"} + }, + "ClusterNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ClusterParameterGroup":{ + "type":"structure", + "members":{ + "ParameterGroupName":{"shape":"String"}, + "ParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"}, + "Tags":{"shape":"TagList"} + }, + "wrapper":true + }, + "ClusterParameterGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterParameterGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ClusterParameterGroupDetails":{ + "type":"structure", + "members":{ + "Parameters":{"shape":"ParametersList"}, + "Marker":{"shape":"String"} + } + }, + "ClusterParameterGroupNameMessage":{ + "type":"structure", + "members":{ + "ParameterGroupName":{"shape":"String"}, + "ParameterGroupStatus":{"shape":"String"} + } + }, + "ClusterParameterGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ClusterParameterGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterParameterGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ClusterParameterGroupStatus":{ + "type":"structure", + "members":{ + "ParameterGroupName":{"shape":"String"}, + "ParameterApplyStatus":{"shape":"String"}, + "ClusterParameterStatusList":{"shape":"ClusterParameterStatusList"} + } + }, + "ClusterParameterGroupStatusList":{ + "type":"list", + "member":{ + "shape":"ClusterParameterGroupStatus", + "locationName":"ClusterParameterGroup" + } + }, + "ClusterParameterGroupsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ParameterGroups":{"shape":"ParameterGroupList"} + } + }, + "ClusterParameterStatus":{ + "type":"structure", + "members":{ + "ParameterName":{"shape":"String"}, + "ParameterApplyStatus":{"shape":"String"}, + "ParameterApplyErrorDescription":{"shape":"String"} + } + }, + "ClusterParameterStatusList":{ + "type":"list", + "member":{"shape":"ClusterParameterStatus"} + }, + "ClusterQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ClusterSecurityGroup":{ + "type":"structure", + "members":{ + "ClusterSecurityGroupName":{"shape":"String"}, + "Description":{"shape":"String"}, + "EC2SecurityGroups":{"shape":"EC2SecurityGroupList"}, + "IPRanges":{"shape":"IPRangeList"}, + "Tags":{"shape":"TagList"} + }, + "wrapper":true + }, + "ClusterSecurityGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterSecurityGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ClusterSecurityGroupMembership":{ + "type":"structure", + "members":{ + "ClusterSecurityGroupName":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "ClusterSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"ClusterSecurityGroupMembership", + "locationName":"ClusterSecurityGroup" + } + }, + "ClusterSecurityGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ClusterSecurityGroups":{"shape":"ClusterSecurityGroups"} + } + }, + "ClusterSecurityGroupNameList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ClusterSecurityGroupName" + } + }, + "ClusterSecurityGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ClusterSecurityGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"QuotaExceeded.ClusterSecurityGroup", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ClusterSecurityGroups":{ + "type":"list", + "member":{ + "shape":"ClusterSecurityGroup", + "locationName":"ClusterSecurityGroup" + } + }, + "ClusterSnapshotAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterSnapshotAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ClusterSnapshotCopyStatus":{ + "type":"structure", + "members":{ + "DestinationRegion":{"shape":"String"}, + "RetentionPeriod":{"shape":"Long"}, + "SnapshotCopyGrantName":{"shape":"String"} + } + }, + "ClusterSnapshotNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterSnapshotNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ClusterSnapshotQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterSnapshotQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ClusterSubnetGroup":{ + "type":"structure", + "members":{ + "ClusterSubnetGroupName":{"shape":"String"}, + "Description":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "SubnetGroupStatus":{"shape":"String"}, + "Subnets":{"shape":"SubnetList"}, + "Tags":{"shape":"TagList"} + }, + "wrapper":true + }, + "ClusterSubnetGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterSubnetGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ClusterSubnetGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ClusterSubnetGroups":{"shape":"ClusterSubnetGroups"} + } + }, + "ClusterSubnetGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterSubnetGroupNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ClusterSubnetGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterSubnetGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ClusterSubnetGroups":{ + "type":"list", + "member":{ + "shape":"ClusterSubnetGroup", + "locationName":"ClusterSubnetGroup" + } + }, + "ClusterSubnetQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ClusterSubnetQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ClusterVersion":{ + "type":"structure", + "members":{ + "ClusterVersion":{"shape":"String"}, + "ClusterParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"} + } + }, + "ClusterVersionList":{ + "type":"list", + "member":{ + "shape":"ClusterVersion", + "locationName":"ClusterVersion" + } + }, + "ClusterVersionsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ClusterVersions":{"shape":"ClusterVersionList"} + } + }, + "ClustersMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "Clusters":{"shape":"ClusterList"} + } + }, + "CopyClusterSnapshotMessage":{ + "type":"structure", + "required":[ + "SourceSnapshotIdentifier", + "TargetSnapshotIdentifier" + ], + "members":{ + "SourceSnapshotIdentifier":{"shape":"String"}, + "SourceSnapshotClusterIdentifier":{"shape":"String"}, + "TargetSnapshotIdentifier":{"shape":"String"} + } + }, + "CopyToRegionDisabledFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"CopyToRegionDisabledFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CreateClusterMessage":{ + "type":"structure", + "required":[ + "ClusterIdentifier", + "NodeType", + "MasterUsername", + "MasterUserPassword" + ], + "members":{ + "DBName":{"shape":"String"}, + "ClusterIdentifier":{"shape":"String"}, + "ClusterType":{"shape":"String"}, + "NodeType":{"shape":"String"}, + "MasterUsername":{"shape":"String"}, + "MasterUserPassword":{"shape":"String"}, + "ClusterSecurityGroups":{"shape":"ClusterSecurityGroupNameList"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "ClusterSubnetGroupName":{"shape":"String"}, + "AvailabilityZone":{"shape":"String"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "ClusterParameterGroupName":{"shape":"String"}, + "AutomatedSnapshotRetentionPeriod":{"shape":"IntegerOptional"}, + "Port":{"shape":"IntegerOptional"}, + "ClusterVersion":{"shape":"String"}, + "AllowVersionUpgrade":{"shape":"BooleanOptional"}, + "NumberOfNodes":{"shape":"IntegerOptional"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "Encrypted":{"shape":"BooleanOptional"}, + "HsmClientCertificateIdentifier":{"shape":"String"}, + "HsmConfigurationIdentifier":{"shape":"String"}, + "ElasticIp":{"shape":"String"}, + "Tags":{"shape":"TagList"}, + "KmsKeyId":{"shape":"String"} + } + }, + "CreateClusterParameterGroupMessage":{ + "type":"structure", + "required":[ + "ParameterGroupName", + "ParameterGroupFamily", + "Description" + ], + "members":{ + "ParameterGroupName":{"shape":"String"}, + "ParameterGroupFamily":{"shape":"String"}, + "Description":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateClusterSecurityGroupMessage":{ + "type":"structure", + "required":[ + "ClusterSecurityGroupName", + "Description" + ], + "members":{ + "ClusterSecurityGroupName":{"shape":"String"}, + "Description":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateClusterSnapshotMessage":{ + "type":"structure", + "required":[ + "SnapshotIdentifier", + "ClusterIdentifier" + ], + "members":{ + "SnapshotIdentifier":{"shape":"String"}, + "ClusterIdentifier":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateClusterSubnetGroupMessage":{ + "type":"structure", + "required":[ + "ClusterSubnetGroupName", + "Description", + "SubnetIds" + ], + "members":{ + "ClusterSubnetGroupName":{"shape":"String"}, + "Description":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateEventSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SnsTopicArn" + ], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "SourceIds":{"shape":"SourceIdsList"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Severity":{"shape":"String"}, + "Enabled":{"shape":"BooleanOptional"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateHsmClientCertificateMessage":{ + "type":"structure", + "required":["HsmClientCertificateIdentifier"], + "members":{ + "HsmClientCertificateIdentifier":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateHsmConfigurationMessage":{ + "type":"structure", + "required":[ + "HsmConfigurationIdentifier", + "Description", + "HsmIpAddress", + "HsmPartitionName", + "HsmPartitionPassword", + "HsmServerPublicCertificate" + ], + "members":{ + "HsmConfigurationIdentifier":{"shape":"String"}, + "Description":{"shape":"String"}, + "HsmIpAddress":{"shape":"String"}, + "HsmPartitionName":{"shape":"String"}, + "HsmPartitionPassword":{"shape":"String"}, + "HsmServerPublicCertificate":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateSnapshotCopyGrantMessage":{ + "type":"structure", + "required":["SnapshotCopyGrantName"], + "members":{ + "SnapshotCopyGrantName":{"shape":"String"}, + "KmsKeyId":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateTagsMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "Tags" + ], + "members":{ + "ResourceName":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "DefaultClusterParameters":{ + "type":"structure", + "members":{ + "ParameterGroupFamily":{"shape":"String"}, + "Marker":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"} + }, + "wrapper":true + }, + "DeleteClusterMessage":{ + "type":"structure", + "required":["ClusterIdentifier"], + "members":{ + "ClusterIdentifier":{"shape":"String"}, + "SkipFinalClusterSnapshot":{"shape":"Boolean"}, + "FinalClusterSnapshotIdentifier":{"shape":"String"} + } + }, + "DeleteClusterParameterGroupMessage":{ + "type":"structure", + "required":["ParameterGroupName"], + "members":{ + "ParameterGroupName":{"shape":"String"} + } + }, + "DeleteClusterSecurityGroupMessage":{ + "type":"structure", + "required":["ClusterSecurityGroupName"], + "members":{ + "ClusterSecurityGroupName":{"shape":"String"} + } + }, + "DeleteClusterSnapshotMessage":{ + "type":"structure", + "required":["SnapshotIdentifier"], + "members":{ + "SnapshotIdentifier":{"shape":"String"}, + "SnapshotClusterIdentifier":{"shape":"String"} + } + }, + "DeleteClusterSubnetGroupMessage":{ + "type":"structure", + "required":["ClusterSubnetGroupName"], + "members":{ + "ClusterSubnetGroupName":{"shape":"String"} + } + }, + "DeleteEventSubscriptionMessage":{ + "type":"structure", + "required":["SubscriptionName"], + "members":{ + "SubscriptionName":{"shape":"String"} + } + }, + "DeleteHsmClientCertificateMessage":{ + "type":"structure", + "required":["HsmClientCertificateIdentifier"], + "members":{ + "HsmClientCertificateIdentifier":{"shape":"String"} + } + }, + "DeleteHsmConfigurationMessage":{ + "type":"structure", + "required":["HsmConfigurationIdentifier"], + "members":{ + "HsmConfigurationIdentifier":{"shape":"String"} + } + }, + "DeleteSnapshotCopyGrantMessage":{ + "type":"structure", + "required":["SnapshotCopyGrantName"], + "members":{ + "SnapshotCopyGrantName":{"shape":"String"} + } + }, + "DeleteTagsMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "TagKeys" + ], + "members":{ + "ResourceName":{"shape":"String"}, + "TagKeys":{"shape":"TagKeyList"} + } + }, + "DescribeClusterParameterGroupsMessage":{ + "type":"structure", + "members":{ + "ParameterGroupName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "TagKeys":{"shape":"TagKeyList"}, + "TagValues":{"shape":"TagValueList"} + } + }, + "DescribeClusterParametersMessage":{ + "type":"structure", + "required":["ParameterGroupName"], + "members":{ + "ParameterGroupName":{"shape":"String"}, + "Source":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeClusterSecurityGroupsMessage":{ + "type":"structure", + "members":{ + "ClusterSecurityGroupName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "TagKeys":{"shape":"TagKeyList"}, + "TagValues":{"shape":"TagValueList"} + } + }, + "DescribeClusterSnapshotsMessage":{ + "type":"structure", + "members":{ + "ClusterIdentifier":{"shape":"String"}, + "SnapshotIdentifier":{"shape":"String"}, + "SnapshotType":{"shape":"String"}, + "StartTime":{"shape":"TStamp"}, + "EndTime":{"shape":"TStamp"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "OwnerAccount":{"shape":"String"}, + "TagKeys":{"shape":"TagKeyList"}, + "TagValues":{"shape":"TagValueList"} + } + }, + "DescribeClusterSubnetGroupsMessage":{ + "type":"structure", + "members":{ + "ClusterSubnetGroupName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "TagKeys":{"shape":"TagKeyList"}, + "TagValues":{"shape":"TagValueList"} + } + }, + "DescribeClusterVersionsMessage":{ + "type":"structure", + "members":{ + "ClusterVersion":{"shape":"String"}, + "ClusterParameterGroupFamily":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeClustersMessage":{ + "type":"structure", + "members":{ + "ClusterIdentifier":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "TagKeys":{"shape":"TagKeyList"}, + "TagValues":{"shape":"TagValueList"} + } + }, + "DescribeDefaultClusterParametersMessage":{ + "type":"structure", + "required":["ParameterGroupFamily"], + "members":{ + "ParameterGroupFamily":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEventCategoriesMessage":{ + "type":"structure", + "members":{ + "SourceType":{"shape":"String"} + } + }, + "DescribeEventSubscriptionsMessage":{ + "type":"structure", + "members":{ + "SubscriptionName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeEventsMessage":{ + "type":"structure", + "members":{ + "SourceIdentifier":{"shape":"String"}, + "SourceType":{"shape":"SourceType"}, + "StartTime":{"shape":"TStamp"}, + "EndTime":{"shape":"TStamp"}, + "Duration":{"shape":"IntegerOptional"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeHsmClientCertificatesMessage":{ + "type":"structure", + "members":{ + "HsmClientCertificateIdentifier":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "TagKeys":{"shape":"TagKeyList"}, + "TagValues":{"shape":"TagValueList"} + } + }, + "DescribeHsmConfigurationsMessage":{ + "type":"structure", + "members":{ + "HsmConfigurationIdentifier":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "TagKeys":{"shape":"TagKeyList"}, + "TagValues":{"shape":"TagValueList"} + } + }, + "DescribeLoggingStatusMessage":{ + "type":"structure", + "required":["ClusterIdentifier"], + "members":{ + "ClusterIdentifier":{"shape":"String"} + } + }, + "DescribeOrderableClusterOptionsMessage":{ + "type":"structure", + "members":{ + "ClusterVersion":{"shape":"String"}, + "NodeType":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReservedNodeOfferingsMessage":{ + "type":"structure", + "members":{ + "ReservedNodeOfferingId":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReservedNodesMessage":{ + "type":"structure", + "members":{ + "ReservedNodeId":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeResizeMessage":{ + "type":"structure", + "required":["ClusterIdentifier"], + "members":{ + "ClusterIdentifier":{"shape":"String"} + } + }, + "DescribeSnapshotCopyGrantsMessage":{ + "type":"structure", + "members":{ + "SnapshotCopyGrantName":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "TagKeys":{"shape":"TagKeyList"}, + "TagValues":{"shape":"TagValueList"} + } + }, + "DescribeTagsMessage":{ + "type":"structure", + "members":{ + "ResourceName":{"shape":"String"}, + "ResourceType":{"shape":"String"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"}, + "TagKeys":{"shape":"TagKeyList"}, + "TagValues":{"shape":"TagValueList"} + } + }, + "DisableLoggingMessage":{ + "type":"structure", + "required":["ClusterIdentifier"], + "members":{ + "ClusterIdentifier":{"shape":"String"} + } + }, + "DisableSnapshotCopyMessage":{ + "type":"structure", + "required":["ClusterIdentifier"], + "members":{ + "ClusterIdentifier":{"shape":"String"} + } + }, + "Double":{"type":"double"}, + "DoubleOptional":{"type":"double"}, + "EC2SecurityGroup":{ + "type":"structure", + "members":{ + "Status":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "EC2SecurityGroupList":{ + "type":"list", + "member":{ + "shape":"EC2SecurityGroup", + "locationName":"EC2SecurityGroup" + } + }, + "ElasticIpStatus":{ + "type":"structure", + "members":{ + "ElasticIp":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "EnableLoggingMessage":{ + "type":"structure", + "required":[ + "ClusterIdentifier", + "BucketName" + ], + "members":{ + "ClusterIdentifier":{"shape":"String"}, + "BucketName":{"shape":"String"}, + "S3KeyPrefix":{"shape":"String"} + } + }, + "EnableSnapshotCopyMessage":{ + "type":"structure", + "required":[ + "ClusterIdentifier", + "DestinationRegion" + ], + "members":{ + "ClusterIdentifier":{"shape":"String"}, + "DestinationRegion":{"shape":"String"}, + "RetentionPeriod":{"shape":"IntegerOptional"}, + "SnapshotCopyGrantName":{"shape":"String"} + } + }, + "Endpoint":{ + "type":"structure", + "members":{ + "Address":{"shape":"String"}, + "Port":{"shape":"Integer"} + } + }, + "Event":{ + "type":"structure", + "members":{ + "SourceIdentifier":{"shape":"String"}, + "SourceType":{"shape":"SourceType"}, + "Message":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Severity":{"shape":"String"}, + "Date":{"shape":"TStamp"}, + "EventId":{"shape":"String"} + } + }, + "EventCategoriesList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"EventCategory" + } + }, + "EventCategoriesMap":{ + "type":"structure", + "members":{ + "SourceType":{"shape":"String"}, + "Events":{"shape":"EventInfoMapList"} + }, + "wrapper":true + }, + "EventCategoriesMapList":{ + "type":"list", + "member":{ + "shape":"EventCategoriesMap", + "locationName":"EventCategoriesMap" + } + }, + "EventCategoriesMessage":{ + "type":"structure", + "members":{ + "EventCategoriesMapList":{"shape":"EventCategoriesMapList"} + } + }, + "EventInfoMap":{ + "type":"structure", + "members":{ + "EventId":{"shape":"String"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "EventDescription":{"shape":"String"}, + "Severity":{"shape":"String"} + }, + "wrapper":true + }, + "EventInfoMapList":{ + "type":"list", + "member":{ + "shape":"EventInfoMap", + "locationName":"EventInfoMap" + } + }, + "EventList":{ + "type":"list", + "member":{ + "shape":"Event", + "locationName":"Event" + } + }, + "EventSubscription":{ + "type":"structure", + "members":{ + "CustomerAwsId":{"shape":"String"}, + "CustSubscriptionId":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "Status":{"shape":"String"}, + "SubscriptionCreationTime":{"shape":"TStamp"}, + "SourceType":{"shape":"String"}, + "SourceIdsList":{"shape":"SourceIdsList"}, + "EventCategoriesList":{"shape":"EventCategoriesList"}, + "Severity":{"shape":"String"}, + "Enabled":{"shape":"Boolean"}, + "Tags":{"shape":"TagList"} + }, + "wrapper":true + }, + "EventSubscriptionQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"EventSubscriptionQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "EventSubscriptionsList":{ + "type":"list", + "member":{ + "shape":"EventSubscription", + "locationName":"EventSubscription" + } + }, + "EventSubscriptionsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "EventSubscriptionsList":{"shape":"EventSubscriptionsList"} + } + }, + "EventsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "Events":{"shape":"EventList"} + } + }, + "HsmClientCertificate":{ + "type":"structure", + "members":{ + "HsmClientCertificateIdentifier":{"shape":"String"}, + "HsmClientCertificatePublicKey":{"shape":"String"}, + "Tags":{"shape":"TagList"} + }, + "wrapper":true + }, + "HsmClientCertificateAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"HsmClientCertificateAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "HsmClientCertificateList":{ + "type":"list", + "member":{ + "shape":"HsmClientCertificate", + "locationName":"HsmClientCertificate" + } + }, + "HsmClientCertificateMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "HsmClientCertificates":{"shape":"HsmClientCertificateList"} + } + }, + "HsmClientCertificateNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"HsmClientCertificateNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "HsmClientCertificateQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"HsmClientCertificateQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "HsmConfiguration":{ + "type":"structure", + "members":{ + "HsmConfigurationIdentifier":{"shape":"String"}, + "Description":{"shape":"String"}, + "HsmIpAddress":{"shape":"String"}, + "HsmPartitionName":{"shape":"String"}, + "Tags":{"shape":"TagList"} + }, + "wrapper":true + }, + "HsmConfigurationAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"HsmConfigurationAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "HsmConfigurationList":{ + "type":"list", + "member":{ + "shape":"HsmConfiguration", + "locationName":"HsmConfiguration" + } + }, + "HsmConfigurationMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "HsmConfigurations":{"shape":"HsmConfigurationList"} + } + }, + "HsmConfigurationNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"HsmConfigurationNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "HsmConfigurationQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"HsmConfigurationQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "HsmStatus":{ + "type":"structure", + "members":{ + "HsmClientCertificateIdentifier":{"shape":"String"}, + "HsmConfigurationIdentifier":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "IPRange":{ + "type":"structure", + "members":{ + "Status":{"shape":"String"}, + "CIDRIP":{"shape":"String"}, + "Tags":{"shape":"TagList"} + } + }, + "IPRangeList":{ + "type":"list", + "member":{ + "shape":"IPRange", + "locationName":"IPRange" + } + }, + "ImportTablesCompleted":{ + "type":"list", + "member":{"shape":"String"} + }, + "ImportTablesInProgress":{ + "type":"list", + "member":{"shape":"String"} + }, + "ImportTablesNotStarted":{ + "type":"list", + "member":{"shape":"String"} + }, + "IncompatibleOrderableOptions":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"IncompatibleOrderableOptions", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InsufficientClusterCapacityFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InsufficientClusterCapacity", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InsufficientS3BucketPolicyFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InsufficientS3BucketPolicyFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Integer":{"type":"integer"}, + "IntegerOptional":{"type":"integer"}, + "InvalidClusterParameterGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidClusterParameterGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidClusterSecurityGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidClusterSecurityGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidClusterSnapshotStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidClusterSnapshotState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidClusterStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidClusterState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidClusterSubnetGroupStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidClusterSubnetGroupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidClusterSubnetStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidClusterSubnetStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidElasticIpFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidElasticIpFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidHsmClientCertificateStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidHsmClientCertificateStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidHsmConfigurationStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidHsmConfigurationStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidRestoreFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidRestore", + "httpStatusCode":406, + "senderFault":true + }, + "exception":true + }, + "InvalidS3BucketNameFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidS3BucketNameFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidS3KeyPrefixFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidS3KeyPrefixFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSnapshotCopyGrantStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidSnapshotCopyGrantStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSubnet":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidSubnet", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSubscriptionStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidSubscriptionStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidTagFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidTagFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidVPCNetworkStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidVPCNetworkStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "LimitExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"LimitExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "LoggingStatus":{ + "type":"structure", + "members":{ + "LoggingEnabled":{"shape":"Boolean"}, + "BucketName":{"shape":"String"}, + "S3KeyPrefix":{"shape":"String"}, + "LastSuccessfulDeliveryTime":{"shape":"TStamp"}, + "LastFailureTime":{"shape":"TStamp"}, + "LastFailureMessage":{"shape":"String"} + } + }, + "Long":{"type":"long"}, + "LongOptional":{"type":"long"}, + "ModifyClusterMessage":{ + "type":"structure", + "required":["ClusterIdentifier"], + "members":{ + "ClusterIdentifier":{"shape":"String"}, + "ClusterType":{"shape":"String"}, + "NodeType":{"shape":"String"}, + "NumberOfNodes":{"shape":"IntegerOptional"}, + "ClusterSecurityGroups":{"shape":"ClusterSecurityGroupNameList"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "MasterUserPassword":{"shape":"String"}, + "ClusterParameterGroupName":{"shape":"String"}, + "AutomatedSnapshotRetentionPeriod":{"shape":"IntegerOptional"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "ClusterVersion":{"shape":"String"}, + "AllowVersionUpgrade":{"shape":"BooleanOptional"}, + "HsmClientCertificateIdentifier":{"shape":"String"}, + "HsmConfigurationIdentifier":{"shape":"String"}, + "NewClusterIdentifier":{"shape":"String"} + } + }, + "ModifyClusterParameterGroupMessage":{ + "type":"structure", + "required":[ + "ParameterGroupName", + "Parameters" + ], + "members":{ + "ParameterGroupName":{"shape":"String"}, + "Parameters":{"shape":"ParametersList"} + } + }, + "ModifyClusterSubnetGroupMessage":{ + "type":"structure", + "required":[ + "ClusterSubnetGroupName", + "SubnetIds" + ], + "members":{ + "ClusterSubnetGroupName":{"shape":"String"}, + "Description":{"shape":"String"}, + "SubnetIds":{"shape":"SubnetIdentifierList"} + } + }, + "ModifyEventSubscriptionMessage":{ + "type":"structure", + "required":["SubscriptionName"], + "members":{ + "SubscriptionName":{"shape":"String"}, + "SnsTopicArn":{"shape":"String"}, + "SourceType":{"shape":"String"}, + "SourceIds":{"shape":"SourceIdsList"}, + "EventCategories":{"shape":"EventCategoriesList"}, + "Severity":{"shape":"String"}, + "Enabled":{"shape":"BooleanOptional"} + } + }, + "ModifySnapshotCopyRetentionPeriodMessage":{ + "type":"structure", + "required":[ + "ClusterIdentifier", + "RetentionPeriod" + ], + "members":{ + "ClusterIdentifier":{"shape":"String"}, + "RetentionPeriod":{"shape":"Integer"} + } + }, + "NumberOfNodesPerClusterLimitExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"NumberOfNodesPerClusterLimitExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "NumberOfNodesQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"NumberOfNodesQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "OrderableClusterOption":{ + "type":"structure", + "members":{ + "ClusterVersion":{"shape":"String"}, + "ClusterType":{"shape":"String"}, + "NodeType":{"shape":"String"}, + "AvailabilityZones":{"shape":"AvailabilityZoneList"} + }, + "wrapper":true + }, + "OrderableClusterOptionsList":{ + "type":"list", + "member":{ + "shape":"OrderableClusterOption", + "locationName":"OrderableClusterOption" + } + }, + "OrderableClusterOptionsMessage":{ + "type":"structure", + "members":{ + "OrderableClusterOptions":{"shape":"OrderableClusterOptionsList"}, + "Marker":{"shape":"String"} + } + }, + "Parameter":{ + "type":"structure", + "members":{ + "ParameterName":{"shape":"String"}, + "ParameterValue":{"shape":"String"}, + "Description":{"shape":"String"}, + "Source":{"shape":"String"}, + "DataType":{"shape":"String"}, + "AllowedValues":{"shape":"String"}, + "ApplyType":{"shape":"ParameterApplyType"}, + "IsModifiable":{"shape":"Boolean"}, + "MinimumEngineVersion":{"shape":"String"} + } + }, + "ParameterApplyType":{ + "type":"string", + "enum":[ + "static", + "dynamic" + ] + }, + "ParameterGroupList":{ + "type":"list", + "member":{ + "shape":"ClusterParameterGroup", + "locationName":"ClusterParameterGroup" + } + }, + "ParametersList":{ + "type":"list", + "member":{ + "shape":"Parameter", + "locationName":"Parameter" + } + }, + "PendingModifiedValues":{ + "type":"structure", + "members":{ + "MasterUserPassword":{"shape":"String"}, + "NodeType":{"shape":"String"}, + "NumberOfNodes":{"shape":"IntegerOptional"}, + "ClusterType":{"shape":"String"}, + "ClusterVersion":{"shape":"String"}, + "AutomatedSnapshotRetentionPeriod":{"shape":"IntegerOptional"}, + "ClusterIdentifier":{"shape":"String"} + } + }, + "PurchaseReservedNodeOfferingMessage":{ + "type":"structure", + "required":["ReservedNodeOfferingId"], + "members":{ + "ReservedNodeOfferingId":{"shape":"String"}, + "NodeCount":{"shape":"IntegerOptional"} + } + }, + "RebootClusterMessage":{ + "type":"structure", + "required":["ClusterIdentifier"], + "members":{ + "ClusterIdentifier":{"shape":"String"} + } + }, + "RecurringCharge":{ + "type":"structure", + "members":{ + "RecurringChargeAmount":{"shape":"Double"}, + "RecurringChargeFrequency":{"shape":"String"} + }, + "wrapper":true + }, + "RecurringChargeList":{ + "type":"list", + "member":{ + "shape":"RecurringCharge", + "locationName":"RecurringCharge" + } + }, + "ReservedNode":{ + "type":"structure", + "members":{ + "ReservedNodeId":{"shape":"String"}, + "ReservedNodeOfferingId":{"shape":"String"}, + "NodeType":{"shape":"String"}, + "StartTime":{"shape":"TStamp"}, + "Duration":{"shape":"Integer"}, + "FixedPrice":{"shape":"Double"}, + "UsagePrice":{"shape":"Double"}, + "CurrencyCode":{"shape":"String"}, + "NodeCount":{"shape":"Integer"}, + "State":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "RecurringCharges":{"shape":"RecurringChargeList"} + }, + "wrapper":true + }, + "ReservedNodeAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedNodeAlreadyExists", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedNodeList":{ + "type":"list", + "member":{ + "shape":"ReservedNode", + "locationName":"ReservedNode" + } + }, + "ReservedNodeNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedNodeNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedNodeOffering":{ + "type":"structure", + "members":{ + "ReservedNodeOfferingId":{"shape":"String"}, + "NodeType":{"shape":"String"}, + "Duration":{"shape":"Integer"}, + "FixedPrice":{"shape":"Double"}, + "UsagePrice":{"shape":"Double"}, + "CurrencyCode":{"shape":"String"}, + "OfferingType":{"shape":"String"}, + "RecurringCharges":{"shape":"RecurringChargeList"} + }, + "wrapper":true + }, + "ReservedNodeOfferingList":{ + "type":"list", + "member":{ + "shape":"ReservedNodeOffering", + "locationName":"ReservedNodeOffering" + } + }, + "ReservedNodeOfferingNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedNodeOfferingNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ReservedNodeOfferingsMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReservedNodeOfferings":{"shape":"ReservedNodeOfferingList"} + } + }, + "ReservedNodeQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ReservedNodeQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ReservedNodesMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReservedNodes":{"shape":"ReservedNodeList"} + } + }, + "ResetClusterParameterGroupMessage":{ + "type":"structure", + "required":["ParameterGroupName"], + "members":{ + "ParameterGroupName":{"shape":"String"}, + "ResetAllParameters":{"shape":"Boolean"}, + "Parameters":{"shape":"ParametersList"} + } + }, + "ResizeNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ResizeNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResizeProgressMessage":{ + "type":"structure", + "members":{ + "TargetNodeType":{"shape":"String"}, + "TargetNumberOfNodes":{"shape":"IntegerOptional"}, + "TargetClusterType":{"shape":"String"}, + "Status":{"shape":"String"}, + "ImportTablesCompleted":{"shape":"ImportTablesCompleted"}, + "ImportTablesInProgress":{"shape":"ImportTablesInProgress"}, + "ImportTablesNotStarted":{"shape":"ImportTablesNotStarted"}, + "AvgResizeRateInMegaBytesPerSecond":{"shape":"DoubleOptional"}, + "TotalResizeDataInMegaBytes":{"shape":"LongOptional"}, + "ProgressInMegaBytes":{"shape":"LongOptional"}, + "ElapsedTimeInSeconds":{"shape":"LongOptional"}, + "EstimatedTimeToCompletionInSeconds":{"shape":"LongOptional"} + } + }, + "ResourceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ResourceNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "RestorableNodeTypeList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"NodeType" + } + }, + "RestoreFromClusterSnapshotMessage":{ + "type":"structure", + "required":[ + "ClusterIdentifier", + "SnapshotIdentifier" + ], + "members":{ + "ClusterIdentifier":{"shape":"String"}, + "SnapshotIdentifier":{"shape":"String"}, + "SnapshotClusterIdentifier":{"shape":"String"}, + "Port":{"shape":"IntegerOptional"}, + "AvailabilityZone":{"shape":"String"}, + "AllowVersionUpgrade":{"shape":"BooleanOptional"}, + "ClusterSubnetGroupName":{"shape":"String"}, + "PubliclyAccessible":{"shape":"BooleanOptional"}, + "OwnerAccount":{"shape":"String"}, + "HsmClientCertificateIdentifier":{"shape":"String"}, + "HsmConfigurationIdentifier":{"shape":"String"}, + "ElasticIp":{"shape":"String"}, + "ClusterParameterGroupName":{"shape":"String"}, + "ClusterSecurityGroups":{"shape":"ClusterSecurityGroupNameList"}, + "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, + "PreferredMaintenanceWindow":{"shape":"String"}, + "AutomatedSnapshotRetentionPeriod":{"shape":"IntegerOptional"}, + "KmsKeyId":{"shape":"String"}, + "NodeType":{"shape":"String"} + } + }, + "RestoreStatus":{ + "type":"structure", + "members":{ + "Status":{"shape":"String"}, + "CurrentRestoreRateInMegaBytesPerSecond":{"shape":"Double"}, + "SnapshotSizeInMegaBytes":{"shape":"Long"}, + "ProgressInMegaBytes":{"shape":"Long"}, + "ElapsedTimeInSeconds":{"shape":"Long"}, + "EstimatedTimeToCompletionInSeconds":{"shape":"Long"} + } + }, + "RevokeClusterSecurityGroupIngressMessage":{ + "type":"structure", + "required":["ClusterSecurityGroupName"], + "members":{ + "ClusterSecurityGroupName":{"shape":"String"}, + "CIDRIP":{"shape":"String"}, + "EC2SecurityGroupName":{"shape":"String"}, + "EC2SecurityGroupOwnerId":{"shape":"String"} + } + }, + "RevokeSnapshotAccessMessage":{ + "type":"structure", + "required":[ + "SnapshotIdentifier", + "AccountWithRestoreAccess" + ], + "members":{ + "SnapshotIdentifier":{"shape":"String"}, + "SnapshotClusterIdentifier":{"shape":"String"}, + "AccountWithRestoreAccess":{"shape":"String"} + } + }, + "RotateEncryptionKeyMessage":{ + "type":"structure", + "required":["ClusterIdentifier"], + "members":{ + "ClusterIdentifier":{"shape":"String"} + } + }, + "SNSInvalidTopicFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSInvalidTopic", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SNSNoAuthorizationFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSNoAuthorization", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SNSTopicArnNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSTopicArnNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "Snapshot":{ + "type":"structure", + "members":{ + "SnapshotIdentifier":{"shape":"String"}, + "ClusterIdentifier":{"shape":"String"}, + "SnapshotCreateTime":{"shape":"TStamp"}, + "Status":{"shape":"String"}, + "Port":{"shape":"Integer"}, + "AvailabilityZone":{"shape":"String"}, + "ClusterCreateTime":{"shape":"TStamp"}, + "MasterUsername":{"shape":"String"}, + "ClusterVersion":{"shape":"String"}, + "SnapshotType":{"shape":"String"}, + "NodeType":{"shape":"String"}, + "NumberOfNodes":{"shape":"Integer"}, + "DBName":{"shape":"String"}, + "VpcId":{"shape":"String"}, + "Encrypted":{"shape":"Boolean"}, + "KmsKeyId":{"shape":"String"}, + "EncryptedWithHSM":{"shape":"Boolean"}, + "AccountsWithRestoreAccess":{"shape":"AccountsWithRestoreAccessList"}, + "OwnerAccount":{"shape":"String"}, + "TotalBackupSizeInMegaBytes":{"shape":"Double"}, + "ActualIncrementalBackupSizeInMegaBytes":{"shape":"Double"}, + "BackupProgressInMegaBytes":{"shape":"Double"}, + "CurrentBackupRateInMegaBytesPerSecond":{"shape":"Double"}, + "EstimatedSecondsToCompletion":{"shape":"Long"}, + "ElapsedTimeInSeconds":{"shape":"Long"}, + "SourceRegion":{"shape":"String"}, + "Tags":{"shape":"TagList"}, + "RestorableNodeTypes":{"shape":"RestorableNodeTypeList"} + }, + "wrapper":true + }, + "SnapshotCopyAlreadyDisabledFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotCopyAlreadyDisabledFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SnapshotCopyAlreadyEnabledFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotCopyAlreadyEnabledFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SnapshotCopyDisabledFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotCopyDisabledFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SnapshotCopyGrant":{ + "type":"structure", + "members":{ + "SnapshotCopyGrantName":{"shape":"String"}, + "KmsKeyId":{"shape":"String"}, + "Tags":{"shape":"TagList"} + }, + "wrapper":true + }, + "SnapshotCopyGrantAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotCopyGrantAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SnapshotCopyGrantList":{ + "type":"list", + "member":{ + "shape":"SnapshotCopyGrant", + "locationName":"SnapshotCopyGrant" + } + }, + "SnapshotCopyGrantMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "SnapshotCopyGrants":{"shape":"SnapshotCopyGrantList"} + } + }, + "SnapshotCopyGrantNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotCopyGrantNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SnapshotCopyGrantQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SnapshotCopyGrantQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SnapshotList":{ + "type":"list", + "member":{ + "shape":"Snapshot", + "locationName":"Snapshot" + } + }, + "SnapshotMessage":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "Snapshots":{"shape":"SnapshotList"} + } + }, + "SourceIdsList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SourceId" + } + }, + "SourceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SourceType":{ + "type":"string", + "enum":[ + "cluster", + "cluster-parameter-group", + "cluster-security-group", + "cluster-snapshot" + ] + }, + "String":{"type":"string"}, + "Subnet":{ + "type":"structure", + "members":{ + "SubnetIdentifier":{"shape":"String"}, + "SubnetAvailabilityZone":{"shape":"AvailabilityZone"}, + "SubnetStatus":{"shape":"String"} + } + }, + "SubnetAlreadyInUse":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubnetAlreadyInUse", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubnetIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SubnetIdentifier" + } + }, + "SubnetList":{ + "type":"list", + "member":{ + "shape":"Subnet", + "locationName":"Subnet" + } + }, + "SubscriptionAlreadyExistFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionAlreadyExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubscriptionCategoryNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionCategoryNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SubscriptionEventIdNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionEventIdNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SubscriptionNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SubscriptionSeverityNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionSeverityNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "TStamp":{"type":"timestamp"}, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"String"}, + "Value":{"shape":"String"} + } + }, + "TagKeyList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"TagKey" + } + }, + "TagLimitExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"TagLimitExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "TagList":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"Tag" + } + }, + "TagValueList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"TagValue" + } + }, + "TaggedResource":{ + "type":"structure", + "members":{ + "Tag":{"shape":"Tag"}, + "ResourceName":{"shape":"String"}, + "ResourceType":{"shape":"String"} + } + }, + "TaggedResourceList":{ + "type":"list", + "member":{ + "shape":"TaggedResource", + "locationName":"TaggedResource" + } + }, + "TaggedResourceListMessage":{ + "type":"structure", + "members":{ + "TaggedResources":{"shape":"TaggedResourceList"}, + "Marker":{"shape":"String"} + } + }, + "UnauthorizedOperation":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"UnauthorizedOperation", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "UnknownSnapshotCopyRegionFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"UnknownSnapshotCopyRegionFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "UnsupportedOperationFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"UnsupportedOperation", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "UnsupportedOptionFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"UnsupportedOptionFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "VpcSecurityGroupIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpcSecurityGroupId" + } + }, + "VpcSecurityGroupMembership":{ + "type":"structure", + "members":{ + "VpcSecurityGroupId":{"shape":"String"}, + "Status":{"shape":"String"} + } + }, + "VpcSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"VpcSecurityGroupMembership", + "locationName":"VpcSecurityGroup" + } + }, + "AuthorizeClusterSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "ClusterSecurityGroup":{"shape":"ClusterSecurityGroup"} + } + }, + "AuthorizeSnapshotAccessResult":{ + "type":"structure", + "members":{ + "Snapshot":{"shape":"Snapshot"} + } + }, + "CopyClusterSnapshotResult":{ + "type":"structure", + "members":{ + "Snapshot":{"shape":"Snapshot"} + } + }, + "CreateClusterResult":{ + "type":"structure", + "members":{ + "Cluster":{"shape":"Cluster"} + } + }, + "CreateClusterParameterGroupResult":{ + "type":"structure", + "members":{ + "ClusterParameterGroup":{"shape":"ClusterParameterGroup"} + } + }, + "CreateClusterSecurityGroupResult":{ + "type":"structure", + "members":{ + "ClusterSecurityGroup":{"shape":"ClusterSecurityGroup"} + } + }, + "CreateClusterSnapshotResult":{ + "type":"structure", + "members":{ + "Snapshot":{"shape":"Snapshot"} + } + }, + "CreateClusterSubnetGroupResult":{ + "type":"structure", + "members":{ + "ClusterSubnetGroup":{"shape":"ClusterSubnetGroup"} + } + }, + "CreateEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "CreateHsmClientCertificateResult":{ + "type":"structure", + "members":{ + "HsmClientCertificate":{"shape":"HsmClientCertificate"} + } + }, + "CreateHsmConfigurationResult":{ + "type":"structure", + "members":{ + "HsmConfiguration":{"shape":"HsmConfiguration"} + } + }, + "CreateSnapshotCopyGrantResult":{ + "type":"structure", + "members":{ + "SnapshotCopyGrant":{"shape":"SnapshotCopyGrant"} + } + }, + "DeleteClusterResult":{ + "type":"structure", + "members":{ + "Cluster":{"shape":"Cluster"} + } + }, + "DeleteClusterSnapshotResult":{ + "type":"structure", + "members":{ + "Snapshot":{"shape":"Snapshot"} + } + }, + "DescribeDefaultClusterParametersResult":{ + "type":"structure", + "members":{ + "DefaultClusterParameters":{"shape":"DefaultClusterParameters"} + } + }, + "DisableSnapshotCopyResult":{ + "type":"structure", + "members":{ + "Cluster":{"shape":"Cluster"} + } + }, + "EnableSnapshotCopyResult":{ + "type":"structure", + "members":{ + "Cluster":{"shape":"Cluster"} + } + }, + "ModifyClusterResult":{ + "type":"structure", + "members":{ + "Cluster":{"shape":"Cluster"} + } + }, + "ModifyClusterSubnetGroupResult":{ + "type":"structure", + "members":{ + "ClusterSubnetGroup":{"shape":"ClusterSubnetGroup"} + } + }, + "ModifyEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "ModifySnapshotCopyRetentionPeriodResult":{ + "type":"structure", + "members":{ + "Cluster":{"shape":"Cluster"} + } + }, + "PurchaseReservedNodeOfferingResult":{ + "type":"structure", + "members":{ + "ReservedNode":{"shape":"ReservedNode"} + } + }, + "RebootClusterResult":{ + "type":"structure", + "members":{ + "Cluster":{"shape":"Cluster"} + } + }, + "RestoreFromClusterSnapshotResult":{ + "type":"structure", + "members":{ + "Cluster":{"shape":"Cluster"} + } + }, + "RevokeClusterSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "ClusterSecurityGroup":{"shape":"ClusterSecurityGroup"} + } + }, + "RevokeSnapshotAccessResult":{ + "type":"structure", + "members":{ + "Snapshot":{"shape":"Snapshot"} + } + }, + "RotateEncryptionKeyResult":{ + "type":"structure", + "members":{ + "Cluster":{"shape":"Cluster"} + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2006 @@ +{ + "version": "2.0", + "operations": { + "AuthorizeClusterSecurityGroupIngress": "

    Adds an inbound (ingress) rule to an Amazon Redshift security group. Depending on whether the application accessing your cluster is running on the Internet or an EC2 instance, you can authorize inbound access to either a Classless Interdomain Routing (CIDR) IP address range or an EC2 security group. You can add as many as 20 ingress rules to an Amazon Redshift security group.

    The EC2 security group must be defined in the AWS region where the cluster resides.

    For an overview of CIDR blocks, see the Wikipedia article on Classless Inter-Domain Routing.

    You must also associate the security group with a cluster so that clients running on these IP addresses or the EC2 instance are authorized to connect to the cluster. For information about managing security groups, go to Working with Security Groups in the Amazon Redshift Cluster Management Guide.

    ", + "AuthorizeSnapshotAccess": "

    Authorizes the specified AWS customer account to restore the specified snapshot.

    For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.

    ", + "CopyClusterSnapshot": "

    Copies the specified automated cluster snapshot to a new manual cluster snapshot. The source must be an automated snapshot and it must be in the available state.

    When you delete a cluster, Amazon Redshift deletes any automated snapshots of the cluster. Also, when the retention period of the snapshot expires, Amazon Redshift automatically deletes it. If you want to keep an automated snapshot for a longer period, you can make a manual copy of the snapshot. Manual snapshots are retained until you delete them.

    For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.

    ", + "CreateCluster": "

    Creates a new cluster. To create the cluster in virtual private cloud (VPC), you must provide cluster subnet group name. If you don't provide a cluster subnet group name or the cluster security group parameter, Amazon Redshift creates a non-VPC cluster, it associates the default cluster security group with the cluster. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide .

    ", + "CreateClusterParameterGroup": "

    Creates an Amazon Redshift parameter group.

    Creating parameter groups is independent of creating clusters. You can associate a cluster with a parameter group when you create the cluster. You can also associate an existing cluster with a parameter group after the cluster is created by using ModifyCluster.

    Parameters in the parameter group define specific behavior that applies to the databases you create on the cluster. For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

    ", + "CreateClusterSecurityGroup": "

    Creates a new Amazon Redshift security group. You use security groups to control access to non-VPC clusters.

    For information about managing security groups, go to Amazon Redshift Cluster Security Groups in the Amazon Redshift Cluster Management Guide.

    ", + "CreateClusterSnapshot": "

    Creates a manual snapshot of the specified cluster. The cluster must be in the available state.

    For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.

    ", + "CreateClusterSubnetGroup": "

    Creates a new Amazon Redshift subnet group. You must provide a list of one or more subnets in your existing Amazon Virtual Private Cloud (Amazon VPC) when creating Amazon Redshift subnet group.

    For information about subnet groups, go to Amazon Redshift Cluster Subnet Groups in the Amazon Redshift Cluster Management Guide.

    ", + "CreateEventSubscription": "

    Creates an Amazon Redshift event notification subscription. This action requires an ARN (Amazon Resource Name) of an Amazon SNS topic created by either the Amazon Redshift console, the Amazon SNS console, or the Amazon SNS API. To obtain an ARN with Amazon SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.

    You can specify the source type, and lists of Amazon Redshift source IDs, event categories, and event severities. Notifications will be sent for all events you want that match those criteria. For example, you can specify source type = cluster, source ID = my-cluster-1 and mycluster2, event categories = Availability, Backup, and severity = ERROR. The subscription will only send notifications for those ERROR events in the Availability and Backup categories for the specified clusters.

    If you specify both the source type and source IDs, such as source type = cluster and source identifier = my-cluster-1, notifications will be sent for all the cluster events for my-cluster-1. If you specify a source type but do not specify a source identifier, you will receive notice of the events for the objects of that type in your AWS account. If you do not specify either the SourceType nor the SourceIdentifier, you will be notified of events generated from all Amazon Redshift sources belonging to your AWS account. You must specify a source type if you specify a source ID.

    ", + "CreateHsmClientCertificate": "

    Creates an HSM client certificate that an Amazon Redshift cluster will use to connect to the client's HSM in order to store and retrieve the keys used to encrypt the cluster databases.

    The command returns a public key, which you must store in the HSM. In addition to creating the HSM certificate, you must create an Amazon Redshift HSM configuration that provides a cluster the information needed to store and use encryption keys in the HSM. For more information, go to Hardware Security Modules in the Amazon Redshift Cluster Management Guide.

    ", + "CreateHsmConfiguration": "

    Creates an HSM configuration that contains the information required by an Amazon Redshift cluster to store and use database encryption keys in a Hardware Security Module (HSM). After creating the HSM configuration, you can specify it as a parameter when creating a cluster. The cluster will then store its encryption keys in the HSM.

    In addition to creating an HSM configuration, you must also create an HSM client certificate. For more information, go to Hardware Security Modules in the Amazon Redshift Cluster Management Guide.

    ", + "CreateSnapshotCopyGrant": "

    Creates a snapshot copy grant that permits Amazon Redshift to use a customer master key (CMK) from AWS Key Management Service (AWS KMS) to encrypt copied snapshots in a destination region.

    For more information about managing snapshot copy grants, go to Amazon Redshift Database Encryption in the Amazon Redshift Cluster Management Guide.

    ", + "CreateTags": "

    Adds one or more tags to a specified resource.

    A resource can have up to 10 tags. If you try to create more than 10 tags for a resource, you will receive an error and the attempt will fail.

    If you specify a key that already exists for the resource, the value for that key will be updated with the new value.

    ", + "DeleteCluster": "

    Deletes a previously provisioned cluster. A successful response from the web service indicates that the request was received correctly. Use DescribeClusters to monitor the status of the deletion. The delete operation cannot be canceled or reverted once submitted. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide .

    If you want to shut down the cluster and retain it for future use, set SkipFinalClusterSnapshot to false and specify a name for FinalClusterSnapshotIdentifier. You can later restore this snapshot to resume using the cluster. If a final cluster snapshot is requested, the status of the cluster will be \"final-snapshot\" while the snapshot is being taken, then it's \"deleting\" once Amazon Redshift begins deleting the cluster.

    For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide .

    ", + "DeleteClusterParameterGroup": "

    Deletes a specified Amazon Redshift parameter group. You cannot delete a parameter group if it is associated with a cluster.

    ", + "DeleteClusterSecurityGroup": "

    Deletes an Amazon Redshift security group.

    You cannot delete a security group that is associated with any clusters. You cannot delete the default security group.

    For information about managing security groups, go to Amazon Redshift Cluster Security Groups in the Amazon Redshift Cluster Management Guide.

    ", + "DeleteClusterSnapshot": "

    Deletes the specified manual snapshot. The snapshot must be in the available state, with no other users authorized to access the snapshot.

    Unlike automated snapshots, manual snapshots are retained even after you delete your cluster. Amazon Redshift does not delete your manual snapshots. You must delete manual snapshot explicitly to avoid getting charged. If other accounts are authorized to access the snapshot, you must revoke all of the authorizations before you can delete the snapshot.

    ", + "DeleteClusterSubnetGroup": "

    Deletes the specified cluster subnet group.

    ", + "DeleteEventSubscription": "

    Deletes an Amazon Redshift event notification subscription.

    ", + "DeleteHsmClientCertificate": "

    Deletes the specified HSM client certificate.

    ", + "DeleteHsmConfiguration": "

    Deletes the specified Amazon Redshift HSM configuration.

    ", + "DeleteSnapshotCopyGrant": "

    Deletes the specified snapshot copy grant.

    ", + "DeleteTags": "

    Deletes a tag or tags from a resource. You must provide the ARN of the resource from which you want to delete the tag or tags.

    ", + "DescribeClusterParameterGroups": "

    Returns a list of Amazon Redshift parameter groups, including parameter groups you created and the default parameter group. For each parameter group, the response includes the parameter group name, description, and parameter group family name. You can optionally specify a name to retrieve the description of a specific parameter group.

    For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

    If you specify both tag keys and tag values in the same request, Amazon Redshift returns all parameter groups that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all parameter groups that have any combination of those values are returned.

    If both tag keys and values are omitted from the request, parameter groups are returned regardless of whether they have tag keys or values associated with them.

    ", + "DescribeClusterParameters": "

    Returns a detailed list of parameters contained within the specified Amazon Redshift parameter group. For each parameter the response includes information such as parameter name, description, data type, value, whether the parameter value is modifiable, and so on.

    You can specify source filter to retrieve parameters of only specific type. For example, to retrieve parameters that were modified by a user action such as from ModifyClusterParameterGroup, you can specify source equal to user.

    For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

    ", + "DescribeClusterSecurityGroups": "

    Returns information about Amazon Redshift security groups. If the name of a security group is specified, the response will contain only information about only that security group.

    For information about managing security groups, go to Amazon Redshift Cluster Security Groups in the Amazon Redshift Cluster Management Guide.

    If you specify both tag keys and tag values in the same request, Amazon Redshift returns all security groups that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all security groups that have any combination of those values are returned.

    If both tag keys and values are omitted from the request, security groups are returned regardless of whether they have tag keys or values associated with them.

    ", + "DescribeClusterSnapshots": "

    Returns one or more snapshot objects, which contain metadata about your cluster snapshots. By default, this operation returns information about all snapshots of all clusters that are owned by you AWS customer account. No information is returned for snapshots owned by inactive AWS customer accounts.

    If you specify both tag keys and tag values in the same request, Amazon Redshift returns all snapshots that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all snapshots that have any combination of those values are returned. Only snapshots that you own are returned in the response; shared snapshots are not returned with the tag key and tag value request parameters.

    If both tag keys and values are omitted from the request, snapshots are returned regardless of whether they have tag keys or values associated with them.

    ", + "DescribeClusterSubnetGroups": "

    Returns one or more cluster subnet group objects, which contain metadata about your cluster subnet groups. By default, this operation returns information about all cluster subnet groups that are defined in you AWS account.

    If you specify both tag keys and tag values in the same request, Amazon Redshift returns all subnet groups that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all subnet groups that have any combination of those values are returned.

    If both tag keys and values are omitted from the request, subnet groups are returned regardless of whether they have tag keys or values associated with them.

    ", + "DescribeClusterVersions": "

    Returns descriptions of the available Amazon Redshift cluster versions. You can call this operation even before creating any clusters to learn more about the Amazon Redshift versions. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide

    ", + "DescribeClusters": "

    Returns properties of provisioned clusters including general cluster properties, cluster database properties, maintenance and backup properties, and security and access properties. This operation supports pagination. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide .

    If you specify both tag keys and tag values in the same request, Amazon Redshift returns all clusters that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all clusters that have any combination of those values are returned.

    If both tag keys and values are omitted from the request, clusters are returned regardless of whether they have tag keys or values associated with them.

    ", + "DescribeDefaultClusterParameters": "

    Returns a list of parameter settings for the specified parameter group family.

    For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

    ", + "DescribeEventCategories": "

    Displays a list of event categories for all event source types, or for a specified source type. For a list of the event categories and source types, go to Amazon Redshift Event Notifications.

    ", + "DescribeEventSubscriptions": "

    Lists descriptions of all the Amazon Redshift event notifications subscription for a customer account. If you specify a subscription name, lists the description for that subscription.

    ", + "DescribeEvents": "

    Returns events related to clusters, security groups, snapshots, and parameter groups for the past 14 days. Events specific to a particular cluster, security group, snapshot or parameter group can be obtained by providing the name as a parameter. By default, the past hour of events are returned.

    ", + "DescribeHsmClientCertificates": "

    Returns information about the specified HSM client certificate. If no certificate ID is specified, returns information about all the HSM certificates owned by your AWS customer account.

    If you specify both tag keys and tag values in the same request, Amazon Redshift returns all HSM client certificates that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all HSM client certificates that have any combination of those values are returned.

    If both tag keys and values are omitted from the request, HSM client certificates are returned regardless of whether they have tag keys or values associated with them.

    ", + "DescribeHsmConfigurations": "

    Returns information about the specified Amazon Redshift HSM configuration. If no configuration ID is specified, returns information about all the HSM configurations owned by your AWS customer account.

    If you specify both tag keys and tag values in the same request, Amazon Redshift returns all HSM connections that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all HSM connections that have any combination of those values are returned.

    If both tag keys and values are omitted from the request, HSM connections are returned regardless of whether they have tag keys or values associated with them.

    ", + "DescribeLoggingStatus": "

    Describes whether information, such as queries and connection attempts, is being logged for the specified Amazon Redshift cluster.

    ", + "DescribeOrderableClusterOptions": "

    Returns a list of orderable cluster options. Before you create a new cluster you can use this operation to find what options are available, such as the EC2 Availability Zones (AZ) in the specific AWS region that you can specify, and the node types you can request. The node types differ by available storage, memory, CPU and price. With the cost involved you might want to obtain a list of cluster options in the specific region and specify values when creating a cluster. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide

    ", + "DescribeReservedNodeOfferings": "

    Returns a list of the available reserved node offerings by Amazon Redshift with their descriptions including the node type, the fixed and recurring costs of reserving the node and duration the node will be reserved for you. These descriptions help you determine which reserve node offering you want to purchase. You then use the unique offering ID in you call to PurchaseReservedNodeOffering to reserve one or more nodes for your Amazon Redshift cluster.

    For more information about reserved node offerings, go to Purchasing Reserved Nodes in the Amazon Redshift Cluster Management Guide.

    ", + "DescribeReservedNodes": "

    Returns the descriptions of the reserved nodes.

    ", + "DescribeResize": "

    Returns information about the last resize operation for the specified cluster. If no resize operation has ever been initiated for the specified cluster, a HTTP 404 error is returned. If a resize operation was initiated and completed, the status of the resize remains as SUCCEEDED until the next resize.

    A resize operation can be requested using ModifyCluster and specifying a different number or type of nodes for the cluster.

    ", + "DescribeSnapshotCopyGrants": "

    Returns a list of snapshot copy grants owned by the AWS account in the destination region.

    For more information about managing snapshot copy grants, go to Amazon Redshift Database Encryption in the Amazon Redshift Cluster Management Guide.

    ", + "DescribeTags": "

    Returns a list of tags. You can return tags from a specific resource by specifying an ARN, or you can return all tags for a given type of resource, such as clusters, snapshots, and so on.

    The following are limitations for DescribeTags:

    • You cannot specify an ARN and a resource-type value together in the same request.
    • You cannot use the MaxRecords and Marker parameters together with the ARN parameter.
    • The MaxRecords parameter can be a range from 10 to 50 results to return in a request.

    If you specify both tag keys and tag values in the same request, Amazon Redshift returns all resources that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all resources that have any combination of those values are returned.

    If both tag keys and values are omitted from the request, resources are returned regardless of whether they have tag keys or values associated with them.

    ", + "DisableLogging": "

    Stops logging information, such as queries and connection attempts, for the specified Amazon Redshift cluster.

    ", + "DisableSnapshotCopy": "

    Disables the automatic copying of snapshots from one region to another region for a specified cluster.

    If your cluster and its snapshots are encrypted using a customer master key (CMK) from AWS KMS, use DeleteSnapshotCopyGrant to delete the grant that grants Amazon Redshift permission to the CMK in the destination region.

    ", + "EnableLogging": "

    Starts logging information, such as queries and connection attempts, for the specified Amazon Redshift cluster.

    ", + "EnableSnapshotCopy": "

    Enables the automatic copy of snapshots from one region to another region for a specified cluster.

    ", + "ModifyCluster": "

    Modifies the settings for a cluster. For example, you can add another security or parameter group, update the preferred maintenance window, or change the master user password. Resetting a cluster password or modifying the security groups associated with a cluster do not need a reboot. However, modifying a parameter group requires a reboot for parameters to take effect. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide .

    You can also change node type and the number of nodes to scale up or down the cluster. When resizing a cluster, you must specify both the number of nodes and the node type even if one of the parameters does not change.

    ", + "ModifyClusterParameterGroup": "

    Modifies the parameters of a parameter group.

    For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

    ", + "ModifyClusterSubnetGroup": "

    Modifies a cluster subnet group to include the specified list of VPC subnets. The operation replaces the existing list of subnets with the new list of subnets.

    ", + "ModifyEventSubscription": "

    Modifies an existing Amazon Redshift event notification subscription.

    ", + "ModifySnapshotCopyRetentionPeriod": "

    Modifies the number of days to retain automated snapshots in the destination region after they are copied from the source region.

    ", + "PurchaseReservedNodeOffering": "

    Allows you to purchase reserved nodes. Amazon Redshift offers a predefined set of reserved node offerings. You can purchase one or more of the offerings. You can call the DescribeReservedNodeOfferings API to obtain the available reserved node offerings. You can call this API by providing a specific reserved node offering and the number of nodes you want to reserve.

    For more information about reserved node offerings, go to Purchasing Reserved Nodes in the Amazon Redshift Cluster Management Guide.

    ", + "RebootCluster": "

    Reboots a cluster. This action is taken as soon as possible. It results in a momentary outage to the cluster, during which the cluster status is set to rebooting. A cluster event is created when the reboot is completed. Any pending cluster modifications (see ModifyCluster) are applied at this reboot. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide

    ", + "ResetClusterParameterGroup": "

    Sets one or more parameters of the specified parameter group to their default values and sets the source values of the parameters to \"engine-default\". To reset the entire parameter group specify the ResetAllParameters parameter. For parameter changes to take effect you must reboot any associated clusters.

    ", + "RestoreFromClusterSnapshot": "

    Creates a new cluster from a snapshot. By default, Amazon Redshift creates the resulting cluster with the same configuration as the original cluster from which the snapshot was created, except that the new cluster is created with the default cluster security and parameter groups. After Amazon Redshift creates the cluster, you can use the ModifyCluster API to associate a different security group and different parameter group with the restored cluster. If you are using a DS node type, you can also choose to change to another DS node type of the same size during restore.

    If you restore a cluster into a VPC, you must provide a cluster subnet group where you want the cluster restored.

    For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.

    ", + "RevokeClusterSecurityGroupIngress": "

    Revokes an ingress rule in an Amazon Redshift security group for a previously authorized IP range or Amazon EC2 security group. To add an ingress rule, see AuthorizeClusterSecurityGroupIngress. For information about managing security groups, go to Amazon Redshift Cluster Security Groups in the Amazon Redshift Cluster Management Guide.

    ", + "RevokeSnapshotAccess": "

    Removes the ability of the specified AWS customer account to restore the specified snapshot. If the account is currently restoring the snapshot, the restore will run to completion.

    For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.

    ", + "RotateEncryptionKey": "

    Rotates the encryption keys for a cluster.

    " + }, + "service": "Amazon Redshift Overview

    This is an interface reference for Amazon Redshift. It contains documentation for one of the programming or command line interfaces you can use to manage Amazon Redshift clusters. Note that Amazon Redshift is asynchronous, which means that some interfaces may require techniques, such as polling or asynchronous callback handlers, to determine when a command has been applied. In this reference, the parameter descriptions indicate whether a change is applied immediately, on the next instance reboot, or during the next maintenance window. For a summary of the Amazon Redshift cluster management interfaces, go to Using the Amazon Redshift Management Interfaces .

    Amazon Redshift manages all the work of setting up, operating, and scaling a data warehouse: provisioning capacity, monitoring and backing up the cluster, and applying patches and upgrades to the Amazon Redshift engine. You can focus on using your data to acquire new insights for your business and customers.

    If you are a first-time user of Amazon Redshift, we recommend that you begin by reading the The Amazon Redshift Getting Started Guide

    If you are a database developer, the Amazon Redshift Database Developer Guide explains how to design, build, query, and maintain the databases that make up your data warehouse.

    ", + "shapes": { + "AccessToSnapshotDeniedFault": { + "base": "

    The owner of the specified snapshot has not authorized your account to access the snapshot.

    ", + "refs": { + } + }, + "AccountWithRestoreAccess": { + "base": "

    Describes an AWS customer account authorized to restore a snapshot.

    ", + "refs": { + "AccountsWithRestoreAccessList$member": null + } + }, + "AccountsWithRestoreAccessList": { + "base": null, + "refs": { + "Snapshot$AccountsWithRestoreAccess": "

    A list of the AWS customer accounts authorized to restore the snapshot. Returns null if no accounts are authorized. Visible only to the snapshot owner.

    " + } + }, + "AuthorizationAlreadyExistsFault": { + "base": "

    The specified CIDR block or EC2 security group is already authorized for the specified cluster security group.

    ", + "refs": { + } + }, + "AuthorizationNotFoundFault": { + "base": "

    The specified CIDR IP range or EC2 security group is not authorized for the specified cluster security group.

    ", + "refs": { + } + }, + "AuthorizationQuotaExceededFault": { + "base": "

    The authorization quota for the cluster security group has been reached.

    ", + "refs": { + } + }, + "AuthorizeClusterSecurityGroupIngressMessage": { + "base": "

    ???

    ", + "refs": { + } + }, + "AuthorizeSnapshotAccessMessage": { + "base": "

    ", + "refs": { + } + }, + "AvailabilityZone": { + "base": "

    Describes an availability zone.

    ", + "refs": { + "AvailabilityZoneList$member": null, + "Subnet$SubnetAvailabilityZone": null + } + }, + "AvailabilityZoneList": { + "base": null, + "refs": { + "OrderableClusterOption$AvailabilityZones": "

    A list of availability zones for the orderable cluster.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "Cluster$AllowVersionUpgrade": "

    If true, major version upgrades will be applied automatically to the cluster during the maintenance window.

    ", + "Cluster$PubliclyAccessible": "

    If true, the cluster can be accessed from a public network.

    ", + "Cluster$Encrypted": "

    If true, data in the cluster is encrypted at rest.

    ", + "DeleteClusterMessage$SkipFinalClusterSnapshot": "

    Determines whether a final snapshot of the cluster is created before Amazon Redshift deletes the cluster. If true, a final cluster snapshot is not created. If false, a final cluster snapshot is created before the cluster is deleted.

    The FinalClusterSnapshotIdentifier parameter must be specified if SkipFinalClusterSnapshot is false.

    Default: false

    ", + "EventSubscription$Enabled": "

    A Boolean value indicating whether the subscription is enabled. true indicates the subscription is enabled.

    ", + "LoggingStatus$LoggingEnabled": "

    true if logging is on, false if logging is off.

    ", + "Parameter$IsModifiable": "

    If true, the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.

    ", + "ResetClusterParameterGroupMessage$ResetAllParameters": "

    If true, all parameters in the specified parameter group will be reset to their default values.

    Default: true

    ", + "Snapshot$Encrypted": "

    If true, the data in the snapshot is encrypted at rest.

    ", + "Snapshot$EncryptedWithHSM": "

    A boolean that indicates whether the snapshot data is encrypted using the HSM keys of the source cluster. true indicates that the data is encrypted using HSM keys.

    " + } + }, + "BooleanOptional": { + "base": null, + "refs": { + "CreateClusterMessage$AllowVersionUpgrade": "

    If true, major version upgrades can be applied during the maintenance window to the Amazon Redshift engine that is running on the cluster.

    When a new major version of the Amazon Redshift engine is released, you can request that the service automatically apply upgrades during the maintenance window to the Amazon Redshift engine that is running on your cluster.

    Default: true

    ", + "CreateClusterMessage$PubliclyAccessible": "

    If true, the cluster can be accessed from a public network.

    ", + "CreateClusterMessage$Encrypted": "

    If true, the data in the cluster is encrypted at rest.

    Default: false

    ", + "CreateEventSubscriptionMessage$Enabled": "

    A Boolean value; set to true to activate the subscription, set to false to create the subscription but not active it.

    ", + "ModifyClusterMessage$AllowVersionUpgrade": "

    If true, major version upgrades will be applied automatically to the cluster during the maintenance window.

    Default: false

    ", + "ModifyEventSubscriptionMessage$Enabled": "

    A Boolean value indicating if the subscription is enabled. true indicates the subscription is enabled

    ", + "RestoreFromClusterSnapshotMessage$AllowVersionUpgrade": "

    If true, major version upgrades can be applied during the maintenance window to the Amazon Redshift engine that is running on the cluster.

    Default: true

    ", + "RestoreFromClusterSnapshotMessage$PubliclyAccessible": "

    If true, the cluster can be accessed from a public network.

    " + } + }, + "BucketNotFoundFault": { + "base": "

    Could not find the specified S3 bucket.

    ", + "refs": { + } + }, + "Cluster": { + "base": "

    Describes a cluster.

    ", + "refs": { + "ClusterList$member": null, + "CreateClusterResult$Cluster": null, + "DeleteClusterResult$Cluster": null, + "DisableSnapshotCopyResult$Cluster": null, + "EnableSnapshotCopyResult$Cluster": null, + "ModifyClusterResult$Cluster": null, + "ModifySnapshotCopyRetentionPeriodResult$Cluster": null, + "RebootClusterResult$Cluster": null, + "RestoreFromClusterSnapshotResult$Cluster": null, + "RotateEncryptionKeyResult$Cluster": null + } + }, + "ClusterAlreadyExistsFault": { + "base": "

    The account already has a cluster with the given identifier.

    ", + "refs": { + } + }, + "ClusterList": { + "base": null, + "refs": { + "ClustersMessage$Clusters": "

    A list of Cluster objects, where each object describes one cluster.

    " + } + }, + "ClusterNode": { + "base": "

    The identifier of a node in a cluster.

    ", + "refs": { + "ClusterNodesList$member": null + } + }, + "ClusterNodesList": { + "base": null, + "refs": { + "Cluster$ClusterNodes": "

    The nodes in a cluster.

    " + } + }, + "ClusterNotFoundFault": { + "base": "

    The ClusterIdentifier parameter does not refer to an existing cluster.

    ", + "refs": { + } + }, + "ClusterParameterGroup": { + "base": "

    Describes a parameter group.

    ", + "refs": { + "ParameterGroupList$member": null, + "CreateClusterParameterGroupResult$ClusterParameterGroup": null + } + }, + "ClusterParameterGroupAlreadyExistsFault": { + "base": "

    A cluster parameter group with the same name already exists.

    ", + "refs": { + } + }, + "ClusterParameterGroupDetails": { + "base": "

    Contains the output from the DescribeClusterParameters action.

    ", + "refs": { + } + }, + "ClusterParameterGroupNameMessage": { + "base": "

    Contains the output from the ModifyClusterParameterGroup and ResetClusterParameterGroup actions and indicate the parameter group involved and the status of the operation on the parameter group.

    ", + "refs": { + } + }, + "ClusterParameterGroupNotFoundFault": { + "base": "

    The parameter group name does not refer to an existing parameter group.

    ", + "refs": { + } + }, + "ClusterParameterGroupQuotaExceededFault": { + "base": "

    The request would result in the user exceeding the allowed number of cluster parameter groups. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

    ", + "refs": { + } + }, + "ClusterParameterGroupStatus": { + "base": "

    Describes the status of a parameter group.

    ", + "refs": { + "ClusterParameterGroupStatusList$member": null + } + }, + "ClusterParameterGroupStatusList": { + "base": null, + "refs": { + "Cluster$ClusterParameterGroups": "

    The list of cluster parameter groups that are associated with this cluster. Each parameter group in the list is returned with its status.

    " + } + }, + "ClusterParameterGroupsMessage": { + "base": "

    Contains the output from the DescribeClusterParameterGroups action.

    ", + "refs": { + } + }, + "ClusterParameterStatus": { + "base": "

    Describes the status of a parameter group.

    ", + "refs": { + "ClusterParameterStatusList$member": null + } + }, + "ClusterParameterStatusList": { + "base": null, + "refs": { + "ClusterParameterGroupStatus$ClusterParameterStatusList": "

    The list of parameter statuses.

    For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

    " + } + }, + "ClusterQuotaExceededFault": { + "base": "

    The request would exceed the allowed number of cluster instances for this account. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

    ", + "refs": { + } + }, + "ClusterSecurityGroup": { + "base": "

    Describes a security group.

    ", + "refs": { + "ClusterSecurityGroups$member": null, + "AuthorizeClusterSecurityGroupIngressResult$ClusterSecurityGroup": null, + "CreateClusterSecurityGroupResult$ClusterSecurityGroup": null, + "RevokeClusterSecurityGroupIngressResult$ClusterSecurityGroup": null + } + }, + "ClusterSecurityGroupAlreadyExistsFault": { + "base": "

    A cluster security group with the same name already exists.

    ", + "refs": { + } + }, + "ClusterSecurityGroupMembership": { + "base": "

    Describes a security group.

    ", + "refs": { + "ClusterSecurityGroupMembershipList$member": null + } + }, + "ClusterSecurityGroupMembershipList": { + "base": null, + "refs": { + "Cluster$ClusterSecurityGroups": "

    A list of cluster security group that are associated with the cluster. Each security group is represented by an element that contains ClusterSecurityGroup.Name and ClusterSecurityGroup.Status subelements.

    Cluster security groups are used when the cluster is not created in a VPC. Clusters that are created in a VPC use VPC security groups, which are listed by the VpcSecurityGroups parameter.

    " + } + }, + "ClusterSecurityGroupMessage": { + "base": "

    Contains the output from the DescribeClusterSecurityGroups action.

    ", + "refs": { + } + }, + "ClusterSecurityGroupNameList": { + "base": null, + "refs": { + "CreateClusterMessage$ClusterSecurityGroups": "

    A list of security groups to be associated with this cluster.

    Default: The default cluster security group for Amazon Redshift.

    ", + "ModifyClusterMessage$ClusterSecurityGroups": "

    A list of cluster security groups to be authorized on this cluster. This change is asynchronously applied as soon as possible.

    Security groups currently associated with the cluster, and not in the list of groups to apply, will be revoked from the cluster.

    Constraints:

    • Must be 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "RestoreFromClusterSnapshotMessage$ClusterSecurityGroups": "

    A list of security groups to be associated with this cluster.

    Default: The default cluster security group for Amazon Redshift.

    Cluster security groups only apply to clusters outside of VPCs.

    " + } + }, + "ClusterSecurityGroupNotFoundFault": { + "base": "

    The cluster security group name does not refer to an existing cluster security group.

    ", + "refs": { + } + }, + "ClusterSecurityGroupQuotaExceededFault": { + "base": "

    The request would result in the user exceeding the allowed number of cluster security groups. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

    ", + "refs": { + } + }, + "ClusterSecurityGroups": { + "base": null, + "refs": { + "ClusterSecurityGroupMessage$ClusterSecurityGroups": "

    A list of ClusterSecurityGroup instances.

    " + } + }, + "ClusterSnapshotAlreadyExistsFault": { + "base": "

    The value specified as a snapshot identifier is already used by an existing snapshot.

    ", + "refs": { + } + }, + "ClusterSnapshotCopyStatus": { + "base": "

    Returns the destination region and retention period that are configured for cross-region snapshot copy.

    ", + "refs": { + "Cluster$ClusterSnapshotCopyStatus": "

    Returns the destination region and retention period that are configured for cross-region snapshot copy.

    " + } + }, + "ClusterSnapshotNotFoundFault": { + "base": "

    The snapshot identifier does not refer to an existing cluster snapshot.

    ", + "refs": { + } + }, + "ClusterSnapshotQuotaExceededFault": { + "base": "

    The request would result in the user exceeding the allowed number of cluster snapshots.

    ", + "refs": { + } + }, + "ClusterSubnetGroup": { + "base": "

    Describes a subnet group.

    ", + "refs": { + "ClusterSubnetGroups$member": null, + "CreateClusterSubnetGroupResult$ClusterSubnetGroup": null, + "ModifyClusterSubnetGroupResult$ClusterSubnetGroup": null + } + }, + "ClusterSubnetGroupAlreadyExistsFault": { + "base": "

    A ClusterSubnetGroupName is already used by an existing cluster subnet group.

    ", + "refs": { + } + }, + "ClusterSubnetGroupMessage": { + "base": "

    Contains the output from the DescribeClusterSubnetGroups action.

    ", + "refs": { + } + }, + "ClusterSubnetGroupNotFoundFault": { + "base": "

    The cluster subnet group name does not refer to an existing cluster subnet group.

    ", + "refs": { + } + }, + "ClusterSubnetGroupQuotaExceededFault": { + "base": "

    The request would result in user exceeding the allowed number of cluster subnet groups. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

    ", + "refs": { + } + }, + "ClusterSubnetGroups": { + "base": null, + "refs": { + "ClusterSubnetGroupMessage$ClusterSubnetGroups": "

    A list of ClusterSubnetGroup instances.

    " + } + }, + "ClusterSubnetQuotaExceededFault": { + "base": "

    The request would result in user exceeding the allowed number of subnets in a cluster subnet groups. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

    ", + "refs": { + } + }, + "ClusterVersion": { + "base": "

    Describes a cluster version, including the parameter group family and description of the version.

    ", + "refs": { + "ClusterVersionList$member": null + } + }, + "ClusterVersionList": { + "base": null, + "refs": { + "ClusterVersionsMessage$ClusterVersions": "

    A list of Version elements.

    " + } + }, + "ClusterVersionsMessage": { + "base": "

    Contains the output from the DescribeClusterVersions action.

    ", + "refs": { + } + }, + "ClustersMessage": { + "base": "

    Contains the output from the DescribeClusters action.

    ", + "refs": { + } + }, + "CopyClusterSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "CopyToRegionDisabledFault": { + "base": "

    Cross-region snapshot copy was temporarily disabled. Try your request again.

    ", + "refs": { + } + }, + "CreateClusterMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateClusterParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateClusterSecurityGroupMessage": { + "base": "

    ???

    ", + "refs": { + } + }, + "CreateClusterSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateClusterSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateHsmClientCertificateMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateHsmConfigurationMessage": { + "base": "

    ", + "refs": { + } + }, + "CreateSnapshotCopyGrantMessage": { + "base": "

    The result of the CreateSnapshotCopyGrant action.

    ", + "refs": { + } + }, + "CreateTagsMessage": { + "base": "

    Contains the output from the CreateTags action.

    ", + "refs": { + } + }, + "DefaultClusterParameters": { + "base": "

    Describes the default cluster parameters for a parameter group family.

    ", + "refs": { + "DescribeDefaultClusterParametersResult$DefaultClusterParameters": null + } + }, + "DeleteClusterMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteClusterParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteClusterSecurityGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteClusterSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteClusterSubnetGroupMessage": { + "base": null, + "refs": { + } + }, + "DeleteEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteHsmClientCertificateMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteHsmConfigurationMessage": { + "base": "

    ", + "refs": { + } + }, + "DeleteSnapshotCopyGrantMessage": { + "base": "

    The result of the DeleteSnapshotCopyGrant action.

    ", + "refs": { + } + }, + "DeleteTagsMessage": { + "base": "

    Contains the output from the DeleteTags action.

    ", + "refs": { + } + }, + "DescribeClusterParameterGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeClusterParametersMessage": { + "base": null, + "refs": { + } + }, + "DescribeClusterSecurityGroupsMessage": { + "base": "

    ???

    ", + "refs": { + } + }, + "DescribeClusterSnapshotsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeClusterSubnetGroupsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeClusterVersionsMessage": { + "base": null, + "refs": { + } + }, + "DescribeClustersMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeDefaultClusterParametersMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEventCategoriesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEventSubscriptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeEventsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeHsmClientCertificatesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeHsmConfigurationsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeLoggingStatusMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeOrderableClusterOptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeReservedNodeOfferingsMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeReservedNodesMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeResizeMessage": { + "base": "

    ", + "refs": { + } + }, + "DescribeSnapshotCopyGrantsMessage": { + "base": "

    The result of the DescribeSnapshotCopyGrants action.

    ", + "refs": { + } + }, + "DescribeTagsMessage": { + "base": "

    Contains the output from the DescribeTags action.

    ", + "refs": { + } + }, + "DisableLoggingMessage": { + "base": "

    ", + "refs": { + } + }, + "DisableSnapshotCopyMessage": { + "base": "

    ", + "refs": { + } + }, + "Double": { + "base": null, + "refs": { + "RecurringCharge$RecurringChargeAmount": "

    The amount charged per the period of time specified by the recurring charge frequency.

    ", + "ReservedNode$FixedPrice": "

    The fixed cost Amazon Redshift charges you for this reserved node.

    ", + "ReservedNode$UsagePrice": "

    The hourly rate Amazon Redshift charges you for this reserved node.

    ", + "ReservedNodeOffering$FixedPrice": "

    The upfront fixed charge you will pay to purchase the specific reserved node offering.

    ", + "ReservedNodeOffering$UsagePrice": "

    The rate you are charged for each hour the cluster that is using the offering is running.

    ", + "RestoreStatus$CurrentRestoreRateInMegaBytesPerSecond": "

    The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup.

    ", + "Snapshot$TotalBackupSizeInMegaBytes": "

    The size of the complete set of backup data that would be used to restore the cluster.

    ", + "Snapshot$ActualIncrementalBackupSizeInMegaBytes": "

    The size of the incremental backup.

    ", + "Snapshot$BackupProgressInMegaBytes": "

    The number of megabytes that have been transferred to the snapshot backup.

    ", + "Snapshot$CurrentBackupRateInMegaBytesPerSecond": "

    The number of megabytes per second being transferred to the snapshot backup. Returns 0 for a completed backup.

    " + } + }, + "DoubleOptional": { + "base": null, + "refs": { + "ResizeProgressMessage$AvgResizeRateInMegaBytesPerSecond": "

    The average rate of the resize operation over the last few minutes, measured in megabytes per second. After the resize operation completes, this value shows the average rate of the entire resize operation.

    " + } + }, + "EC2SecurityGroup": { + "base": "

    Describes an Amazon EC2 security group.

    ", + "refs": { + "EC2SecurityGroupList$member": null + } + }, + "EC2SecurityGroupList": { + "base": null, + "refs": { + "ClusterSecurityGroup$EC2SecurityGroups": "

    A list of EC2 security groups that are permitted to access clusters associated with this cluster security group.

    " + } + }, + "ElasticIpStatus": { + "base": "

    Describes the status of the elastic IP (EIP) address.

    ", + "refs": { + "Cluster$ElasticIpStatus": "

    Describes the status of the elastic IP (EIP) address.

    " + } + }, + "EnableLoggingMessage": { + "base": "

    ", + "refs": { + } + }, + "EnableSnapshotCopyMessage": { + "base": "

    ", + "refs": { + } + }, + "Endpoint": { + "base": "

    Describes a connection endpoint.

    ", + "refs": { + "Cluster$Endpoint": "

    The connection endpoint.

    " + } + }, + "Event": { + "base": "

    Describes an event.

    ", + "refs": { + "EventList$member": null + } + }, + "EventCategoriesList": { + "base": null, + "refs": { + "CreateEventSubscriptionMessage$EventCategories": "

    Specifies the Amazon Redshift event categories to be published by the event notification subscription.

    Values: Configuration, Management, Monitoring, Security

    ", + "Event$EventCategories": "

    A list of the event categories.

    Values: Configuration, Management, Monitoring, Security

    ", + "EventInfoMap$EventCategories": "

    The category of an Amazon Redshift event.

    ", + "EventSubscription$EventCategoriesList": "

    The list of Amazon Redshift event categories specified in the event notification subscription.

    Values: Configuration, Management, Monitoring, Security

    ", + "ModifyEventSubscriptionMessage$EventCategories": "

    Specifies the Amazon Redshift event categories to be published by the event notification subscription.

    Values: Configuration, Management, Monitoring, Security

    " + } + }, + "EventCategoriesMap": { + "base": null, + "refs": { + "EventCategoriesMapList$member": null + } + }, + "EventCategoriesMapList": { + "base": null, + "refs": { + "EventCategoriesMessage$EventCategoriesMapList": "

    A list of event categories descriptions.

    " + } + }, + "EventCategoriesMessage": { + "base": "

    ", + "refs": { + } + }, + "EventInfoMap": { + "base": null, + "refs": { + "EventInfoMapList$member": null + } + }, + "EventInfoMapList": { + "base": null, + "refs": { + "EventCategoriesMap$Events": "

    The events in the event category.

    " + } + }, + "EventList": { + "base": null, + "refs": { + "EventsMessage$Events": "

    A list of Event instances.

    " + } + }, + "EventSubscription": { + "base": null, + "refs": { + "EventSubscriptionsList$member": null, + "CreateEventSubscriptionResult$EventSubscription": null, + "ModifyEventSubscriptionResult$EventSubscription": null + } + }, + "EventSubscriptionQuotaExceededFault": { + "base": "

    The request would exceed the allowed number of event subscriptions for this account. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

    ", + "refs": { + } + }, + "EventSubscriptionsList": { + "base": null, + "refs": { + "EventSubscriptionsMessage$EventSubscriptionsList": "

    A list of event subscriptions.

    " + } + }, + "EventSubscriptionsMessage": { + "base": "

    ", + "refs": { + } + }, + "EventsMessage": { + "base": "

    Contains the output from the DescribeEvents action.

    ", + "refs": { + } + }, + "HsmClientCertificate": { + "base": "

    Returns information about an HSM client certificate. The certificate is stored in a secure Hardware Storage Module (HSM), and used by the Amazon Redshift cluster to encrypt data files.

    ", + "refs": { + "HsmClientCertificateList$member": null, + "CreateHsmClientCertificateResult$HsmClientCertificate": null + } + }, + "HsmClientCertificateAlreadyExistsFault": { + "base": "

    There is already an existing Amazon Redshift HSM client certificate with the specified identifier.

    ", + "refs": { + } + }, + "HsmClientCertificateList": { + "base": null, + "refs": { + "HsmClientCertificateMessage$HsmClientCertificates": "

    A list of the identifiers for one or more HSM client certificates used by Amazon Redshift clusters to store and retrieve database encryption keys in an HSM.

    " + } + }, + "HsmClientCertificateMessage": { + "base": "

    ", + "refs": { + } + }, + "HsmClientCertificateNotFoundFault": { + "base": "

    There is no Amazon Redshift HSM client certificate with the specified identifier.

    ", + "refs": { + } + }, + "HsmClientCertificateQuotaExceededFault": { + "base": "

    The quota for HSM client certificates has been reached. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

    ", + "refs": { + } + }, + "HsmConfiguration": { + "base": "

    Returns information about an HSM configuration, which is an object that describes to Amazon Redshift clusters the information they require to connect to an HSM where they can store database encryption keys.

    ", + "refs": { + "HsmConfigurationList$member": null, + "CreateHsmConfigurationResult$HsmConfiguration": null + } + }, + "HsmConfigurationAlreadyExistsFault": { + "base": "

    There is already an existing Amazon Redshift HSM configuration with the specified identifier.

    ", + "refs": { + } + }, + "HsmConfigurationList": { + "base": null, + "refs": { + "HsmConfigurationMessage$HsmConfigurations": "

    A list of Amazon Redshift HSM configurations.

    " + } + }, + "HsmConfigurationMessage": { + "base": "

    ", + "refs": { + } + }, + "HsmConfigurationNotFoundFault": { + "base": "

    There is no Amazon Redshift HSM configuration with the specified identifier.

    ", + "refs": { + } + }, + "HsmConfigurationQuotaExceededFault": { + "base": "

    The quota for HSM configurations has been reached. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

    ", + "refs": { + } + }, + "HsmStatus": { + "base": "

    ", + "refs": { + "Cluster$HsmStatus": "

    Reports whether the Amazon Redshift cluster has finished applying any HSM settings changes specified in a modify cluster command.

    Values: active, applying

    " + } + }, + "IPRange": { + "base": "

    Describes an IP range used in a security group.

    ", + "refs": { + "IPRangeList$member": null + } + }, + "IPRangeList": { + "base": null, + "refs": { + "ClusterSecurityGroup$IPRanges": "

    A list of IP ranges (CIDR blocks) that are permitted to access clusters associated with this cluster security group.

    " + } + }, + "ImportTablesCompleted": { + "base": null, + "refs": { + "ResizeProgressMessage$ImportTablesCompleted": "

    The names of tables that have been completely imported .

    Valid Values: List of table names.

    " + } + }, + "ImportTablesInProgress": { + "base": null, + "refs": { + "ResizeProgressMessage$ImportTablesInProgress": "

    The names of tables that are being currently imported.

    Valid Values: List of table names.

    " + } + }, + "ImportTablesNotStarted": { + "base": null, + "refs": { + "ResizeProgressMessage$ImportTablesNotStarted": "

    The names of tables that have not been yet imported.

    Valid Values: List of table names

    " + } + }, + "IncompatibleOrderableOptions": { + "base": "

    The specified options are incompatible.

    ", + "refs": { + } + }, + "InsufficientClusterCapacityFault": { + "base": "

    The number of nodes specified exceeds the allotted capacity of the cluster.

    ", + "refs": { + } + }, + "InsufficientS3BucketPolicyFault": { + "base": "

    The cluster does not have read bucket or put object permissions on the S3 bucket specified when enabling logging.

    ", + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "Cluster$AutomatedSnapshotRetentionPeriod": "

    The number of days that automatic cluster snapshots are retained.

    ", + "Cluster$NumberOfNodes": "

    The number of compute nodes in the cluster.

    ", + "Endpoint$Port": "

    The port that the database engine is listening on.

    ", + "ModifySnapshotCopyRetentionPeriodMessage$RetentionPeriod": "

    The number of days to retain automated snapshots in the destination region after they are copied from the source region.

    If you decrease the retention period for automated snapshots that are copied to a destination region, Amazon Redshift will delete any existing automated snapshots that were copied to the destination region and that fall outside of the new retention period.

    Constraints: Must be at least 1 and no more than 35.

    ", + "ReservedNode$Duration": "

    The duration of the node reservation in seconds.

    ", + "ReservedNode$NodeCount": "

    The number of reserved compute nodes.

    ", + "ReservedNodeOffering$Duration": "

    The duration, in seconds, for which the offering will reserve the node.

    ", + "Snapshot$Port": "

    The port that the cluster is listening on.

    ", + "Snapshot$NumberOfNodes": "

    The number of nodes in the cluster.

    " + } + }, + "IntegerOptional": { + "base": null, + "refs": { + "CreateClusterMessage$AutomatedSnapshotRetentionPeriod": "

    The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Even if automated snapshots are disabled, you can still create manual snapshots when you want with CreateClusterSnapshot.

    Default: 1

    Constraints: Must be a value from 0 to 35.

    ", + "CreateClusterMessage$Port": "

    The port number on which the cluster accepts incoming connections.

    The cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections.

    Default: 5439

    Valid Values: 1150-65535

    ", + "CreateClusterMessage$NumberOfNodes": "

    The number of compute nodes in the cluster. This parameter is required when the ClusterType parameter is specified as multi-node.

    For information about determining how many nodes you need, go to Working with Clusters in the Amazon Redshift Cluster Management Guide.

    If you don't specify this parameter, you get a single-node cluster. When requesting a multi-node cluster, you must specify the number of nodes that you want in the cluster.

    Default: 1

    Constraints: Value must be at least 1 and no more than 100.

    ", + "DescribeClusterParameterGroupsMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeClusterParametersMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeClusterSecurityGroupsMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeClusterSnapshotsMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeClusterSubnetGroupsMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeClusterVersionsMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeClustersMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeDefaultClusterParametersMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeEventSubscriptionsMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeEventsMessage$Duration": "

    The number of minutes prior to the time of the request for which to retrieve events. For example, if the request is sent at 18:00 and you specify a duration of 60, then only events which have occurred after 17:00 will be returned.

    Default: 60

    ", + "DescribeEventsMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeHsmClientCertificatesMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeHsmConfigurationsMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeOrderableClusterOptionsMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeReservedNodeOfferingsMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeReservedNodesMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeSnapshotCopyGrantsMessage$MaxRecords": "

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    ", + "DescribeTagsMessage$MaxRecords": "

    The maximum number or response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    ", + "EnableSnapshotCopyMessage$RetentionPeriod": "

    The number of days to retain automated snapshots in the destination region after they are copied from the source region.

    Default: 7.

    Constraints: Must be at least 1 and no more than 35.

    ", + "ModifyClusterMessage$NumberOfNodes": "

    The new number of nodes of the cluster. If you specify a new number of nodes, you must also specify the node type parameter.

    When you submit your request to resize a cluster, Amazon Redshift sets access permissions for the cluster to read-only. After Amazon Redshift provisions a new cluster according to your resize requirements, there will be a temporary outage while the old cluster is deleted and your connection is switched to the new cluster. When the new connection is complete, the original access permissions for the cluster are restored. You can use DescribeResize to track the progress of the resize request.

    Valid Values: Integer greater than 0.

    ", + "ModifyClusterMessage$AutomatedSnapshotRetentionPeriod": "

    The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Even if automated snapshots are disabled, you can still create manual snapshots when you want with CreateClusterSnapshot.

    If you decrease the automated snapshot retention period from its current value, existing automated snapshots that fall outside of the new retention period will be immediately deleted.

    Default: Uses existing setting.

    Constraints: Must be a value from 0 to 35.

    ", + "PendingModifiedValues$NumberOfNodes": "

    The pending or in-progress change of the number of nodes in the cluster.

    ", + "PendingModifiedValues$AutomatedSnapshotRetentionPeriod": "

    The pending or in-progress change of the automated snapshot retention period.

    ", + "PurchaseReservedNodeOfferingMessage$NodeCount": "

    The number of reserved nodes you want to purchase.

    Default: 1

    ", + "ResizeProgressMessage$TargetNumberOfNodes": "

    The number of nodes that the cluster will have after the resize operation is complete.

    ", + "RestoreFromClusterSnapshotMessage$Port": "

    The port number on which the cluster accepts connections.

    Default: The same port as the original cluster.

    Constraints: Must be between 1115 and 65535.

    ", + "RestoreFromClusterSnapshotMessage$AutomatedSnapshotRetentionPeriod": "

    The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Even if automated snapshots are disabled, you can still create manual snapshots when you want with CreateClusterSnapshot.

    Default: The value selected for the cluster from which the snapshot was taken.

    Constraints: Must be a value from 0 to 35.

    " + } + }, + "InvalidClusterParameterGroupStateFault": { + "base": "

    The cluster parameter group action can not be completed because another task is in progress that involves the parameter group. Wait a few moments and try the operation again.

    ", + "refs": { + } + }, + "InvalidClusterSecurityGroupStateFault": { + "base": "

    The state of the cluster security group is not available.

    ", + "refs": { + } + }, + "InvalidClusterSnapshotStateFault": { + "base": "

    The state of the cluster snapshot is not available, or other accounts are authorized to access the snapshot.

    ", + "refs": { + } + }, + "InvalidClusterStateFault": { + "base": "

    The specified cluster is not in the available state.

    ", + "refs": { + } + }, + "InvalidClusterSubnetGroupStateFault": { + "base": "

    The cluster subnet group cannot be deleted because it is in use.

    ", + "refs": { + } + }, + "InvalidClusterSubnetStateFault": { + "base": "

    The state of the subnet is invalid.

    ", + "refs": { + } + }, + "InvalidElasticIpFault": { + "base": "

    The Elastic IP (EIP) is invalid or cannot be found.

    ", + "refs": { + } + }, + "InvalidHsmClientCertificateStateFault": { + "base": "

    The specified HSM client certificate is not in the available state, or it is still in use by one or more Amazon Redshift clusters.

    ", + "refs": { + } + }, + "InvalidHsmConfigurationStateFault": { + "base": "

    The specified HSM configuration is not in the available state, or it is still in use by one or more Amazon Redshift clusters.

    ", + "refs": { + } + }, + "InvalidRestoreFault": { + "base": "

    The restore is invalid.

    ", + "refs": { + } + }, + "InvalidS3BucketNameFault": { + "base": "

    The S3 bucket name is invalid. For more information about naming rules, go to Bucket Restrictions and Limitations in the Amazon Simple Storage Service (S3) Developer Guide.

    ", + "refs": { + } + }, + "InvalidS3KeyPrefixFault": { + "base": "

    The string specified for the logging S3 key prefix does not comply with the documented constraints.

    ", + "refs": { + } + }, + "InvalidSnapshotCopyGrantStateFault": { + "base": "

    The snapshot copy grant can't be deleted because it is used by one or more clusters.

    ", + "refs": { + } + }, + "InvalidSubnet": { + "base": "

    The requested subnet is not valid, or not all of the subnets are in the same VPC.

    ", + "refs": { + } + }, + "InvalidSubscriptionStateFault": { + "base": "

    The subscription request is invalid because it is a duplicate request. This subscription request is already in progress.

    ", + "refs": { + } + }, + "InvalidTagFault": { + "base": "

    The tag is invalid.

    ", + "refs": { + } + }, + "InvalidVPCNetworkStateFault": { + "base": "

    The cluster subnet group does not cover all Availability Zones.

    ", + "refs": { + } + }, + "LimitExceededFault": { + "base": "

    The encryption key has exceeded its grant limit in AWS KMS.

    ", + "refs": { + } + }, + "LoggingStatus": { + "base": "

    Describes the status of logging for a cluster.

    ", + "refs": { + } + }, + "Long": { + "base": null, + "refs": { + "ClusterSnapshotCopyStatus$RetentionPeriod": "

    The number of days that automated snapshots are retained in the destination region after they are copied from a source region.

    ", + "RestoreStatus$SnapshotSizeInMegaBytes": "

    The size of the set of snapshot data used to restore the cluster.

    ", + "RestoreStatus$ProgressInMegaBytes": "

    The number of megabytes that have been transferred from snapshot storage.

    ", + "RestoreStatus$ElapsedTimeInSeconds": "

    The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish.

    ", + "RestoreStatus$EstimatedTimeToCompletionInSeconds": "

    The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore.

    ", + "Snapshot$EstimatedSecondsToCompletion": "

    The estimate of the time remaining before the snapshot backup will complete. Returns 0 for a completed backup.

    ", + "Snapshot$ElapsedTimeInSeconds": "

    The amount of time an in-progress snapshot backup has been running, or the amount of time it took a completed backup to finish.

    " + } + }, + "LongOptional": { + "base": null, + "refs": { + "ResizeProgressMessage$TotalResizeDataInMegaBytes": "

    The estimated total amount of data, in megabytes, on the cluster before the resize operation began.

    ", + "ResizeProgressMessage$ProgressInMegaBytes": "

    While the resize operation is in progress, this value shows the current amount of data, in megabytes, that has been processed so far. When the resize operation is complete, this value shows the total amount of data, in megabytes, on the cluster, which may be more or less than TotalResizeDataInMegaBytes (the estimated total amount of data before resize).

    ", + "ResizeProgressMessage$ElapsedTimeInSeconds": "

    The amount of seconds that have elapsed since the resize operation began. After the resize operation completes, this value shows the total actual time, in seconds, for the resize operation.

    ", + "ResizeProgressMessage$EstimatedTimeToCompletionInSeconds": "

    The estimated time remaining, in seconds, until the resize operation is complete. This value is calculated based on the average resize rate and the estimated amount of data remaining to be processed. Once the resize operation is complete, this value will be 0.

    " + } + }, + "ModifyClusterMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyClusterParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyClusterSubnetGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifyEventSubscriptionMessage": { + "base": "

    ", + "refs": { + } + }, + "ModifySnapshotCopyRetentionPeriodMessage": { + "base": "

    ", + "refs": { + } + }, + "NumberOfNodesPerClusterLimitExceededFault": { + "base": "

    The operation would exceed the number of nodes allowed for a cluster.

    ", + "refs": { + } + }, + "NumberOfNodesQuotaExceededFault": { + "base": "

    The operation would exceed the number of nodes allotted to the account. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

    ", + "refs": { + } + }, + "OrderableClusterOption": { + "base": "

    Describes an orderable cluster option.

    ", + "refs": { + "OrderableClusterOptionsList$member": null + } + }, + "OrderableClusterOptionsList": { + "base": null, + "refs": { + "OrderableClusterOptionsMessage$OrderableClusterOptions": "

    An OrderableClusterOption structure containing information about orderable options for the Cluster.

    " + } + }, + "OrderableClusterOptionsMessage": { + "base": "

    Contains the output from the DescribeOrderableClusterOptions action.

    ", + "refs": { + } + }, + "Parameter": { + "base": "

    Describes a parameter in a cluster parameter group.

    ", + "refs": { + "ParametersList$member": null + } + }, + "ParameterApplyType": { + "base": null, + "refs": { + "Parameter$ApplyType": "

    Specifies how to apply the parameter. Supported value: static.

    " + } + }, + "ParameterGroupList": { + "base": null, + "refs": { + "ClusterParameterGroupsMessage$ParameterGroups": "

    A list of ClusterParameterGroup instances. Each instance describes one cluster parameter group.

    " + } + }, + "ParametersList": { + "base": null, + "refs": { + "ClusterParameterGroupDetails$Parameters": "

    A list of Parameter instances. Each instance lists the parameters of one cluster parameter group.

    ", + "DefaultClusterParameters$Parameters": "

    The list of cluster default parameters.

    ", + "ModifyClusterParameterGroupMessage$Parameters": "

    An array of parameters to be modified. A maximum of 20 parameters can be modified in a single request.

    For each parameter to be modified, you must supply at least the parameter name and parameter value; other name-value pairs of the parameter are optional.

    For the workload management (WLM) configuration, you must supply all the name-value pairs in the wlm_json_configuration parameter.

    ", + "ResetClusterParameterGroupMessage$Parameters": "

    An array of names of parameters to be reset. If ResetAllParameters option is not used, then at least one parameter name must be supplied.

    Constraints: A maximum of 20 parameters can be reset in a single request.

    " + } + }, + "PendingModifiedValues": { + "base": "

    Describes cluster attributes that are in a pending state. A change to one or more the attributes was requested and is in progress or will be applied.

    ", + "refs": { + "Cluster$PendingModifiedValues": "

    If present, changes to the cluster are pending. Specific pending changes are identified by subelements.

    " + } + }, + "PurchaseReservedNodeOfferingMessage": { + "base": "

    ", + "refs": { + } + }, + "RebootClusterMessage": { + "base": "

    ", + "refs": { + } + }, + "RecurringCharge": { + "base": "

    Describes a recurring charge.

    ", + "refs": { + "RecurringChargeList$member": null + } + }, + "RecurringChargeList": { + "base": null, + "refs": { + "ReservedNode$RecurringCharges": "

    The recurring charges for the reserved node.

    ", + "ReservedNodeOffering$RecurringCharges": "

    The charge to your account regardless of whether you are creating any clusters using the node offering. Recurring charges are only in effect for heavy-utilization reserved nodes.

    " + } + }, + "ReservedNode": { + "base": "

    Describes a reserved node. You can call the DescribeReservedNodeOfferings API to obtain the available reserved node offerings.

    ", + "refs": { + "ReservedNodeList$member": null, + "PurchaseReservedNodeOfferingResult$ReservedNode": null + } + }, + "ReservedNodeAlreadyExistsFault": { + "base": "

    User already has a reservation with the given identifier.

    ", + "refs": { + } + }, + "ReservedNodeList": { + "base": null, + "refs": { + "ReservedNodesMessage$ReservedNodes": "

    The list of reserved nodes.

    " + } + }, + "ReservedNodeNotFoundFault": { + "base": "

    The specified reserved compute node not found.

    ", + "refs": { + } + }, + "ReservedNodeOffering": { + "base": "

    Describes a reserved node offering.

    ", + "refs": { + "ReservedNodeOfferingList$member": null + } + }, + "ReservedNodeOfferingList": { + "base": null, + "refs": { + "ReservedNodeOfferingsMessage$ReservedNodeOfferings": "

    A list of reserved node offerings.

    " + } + }, + "ReservedNodeOfferingNotFoundFault": { + "base": "

    Specified offering does not exist.

    ", + "refs": { + } + }, + "ReservedNodeOfferingsMessage": { + "base": "

    Contains the output from the DescribeReservedNodeOfferings action.

    ", + "refs": { + } + }, + "ReservedNodeQuotaExceededFault": { + "base": "

    Request would exceed the user's compute node quota. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

    ", + "refs": { + } + }, + "ReservedNodesMessage": { + "base": "

    Contains the output from the DescribeReservedNodes action.

    ", + "refs": { + } + }, + "ResetClusterParameterGroupMessage": { + "base": "

    ", + "refs": { + } + }, + "ResizeNotFoundFault": { + "base": "

    A resize operation for the specified cluster is not found.

    ", + "refs": { + } + }, + "ResizeProgressMessage": { + "base": "

    Describes the result of a cluster resize operation.

    ", + "refs": { + } + }, + "ResourceNotFoundFault": { + "base": "

    The resource could not be found.

    ", + "refs": { + } + }, + "RestorableNodeTypeList": { + "base": null, + "refs": { + "Snapshot$RestorableNodeTypes": "

    The list of node types that this cluster snapshot is able to restore into.

    " + } + }, + "RestoreFromClusterSnapshotMessage": { + "base": "

    ", + "refs": { + } + }, + "RestoreStatus": { + "base": "

    Describes the status of a cluster restore action. Returns null if the cluster was not created by restoring a snapshot.

    ", + "refs": { + "Cluster$RestoreStatus": "

    Describes the status of a cluster restore action. Returns null if the cluster was not created by restoring a snapshot.

    " + } + }, + "RevokeClusterSecurityGroupIngressMessage": { + "base": "

    ???

    ", + "refs": { + } + }, + "RevokeSnapshotAccessMessage": { + "base": "

    ", + "refs": { + } + }, + "RotateEncryptionKeyMessage": { + "base": "

    ", + "refs": { + } + }, + "SNSInvalidTopicFault": { + "base": "

    Amazon SNS has responded that there is a problem with the specified Amazon SNS topic.

    ", + "refs": { + } + }, + "SNSNoAuthorizationFault": { + "base": "

    You do not have permission to publish to the specified Amazon SNS topic.

    ", + "refs": { + } + }, + "SNSTopicArnNotFoundFault": { + "base": "

    An Amazon SNS topic with the specified Amazon Resource Name (ARN) does not exist.

    ", + "refs": { + } + }, + "Snapshot": { + "base": "

    Describes a snapshot.

    ", + "refs": { + "SnapshotList$member": null, + "AuthorizeSnapshotAccessResult$Snapshot": null, + "CopyClusterSnapshotResult$Snapshot": null, + "CreateClusterSnapshotResult$Snapshot": null, + "DeleteClusterSnapshotResult$Snapshot": null, + "RevokeSnapshotAccessResult$Snapshot": null + } + }, + "SnapshotCopyAlreadyDisabledFault": { + "base": "

    The cluster already has cross-region snapshot copy disabled.

    ", + "refs": { + } + }, + "SnapshotCopyAlreadyEnabledFault": { + "base": "

    The cluster already has cross-region snapshot copy enabled.

    ", + "refs": { + } + }, + "SnapshotCopyDisabledFault": { + "base": "

    Cross-region snapshot copy was temporarily disabled. Try your request again.

    ", + "refs": { + } + }, + "SnapshotCopyGrant": { + "base": "

    The snapshot copy grant that grants Amazon Redshift permission to encrypt copied snapshots with the specified customer master key (CMK) from AWS KMS in the destination region.

    For more information about managing snapshot copy grants, go to Amazon Redshift Database Encryption in the Amazon Redshift Cluster Management Guide.

    ", + "refs": { + "SnapshotCopyGrantList$member": null, + "CreateSnapshotCopyGrantResult$SnapshotCopyGrant": null + } + }, + "SnapshotCopyGrantAlreadyExistsFault": { + "base": "

    The snapshot copy grant can't be created because a grant with the same name already exists.

    ", + "refs": { + } + }, + "SnapshotCopyGrantList": { + "base": null, + "refs": { + "SnapshotCopyGrantMessage$SnapshotCopyGrants": "

    The list of snapshot copy grants.

    " + } + }, + "SnapshotCopyGrantMessage": { + "base": "

    The result of the snapshot copy grant.

    ", + "refs": { + } + }, + "SnapshotCopyGrantNotFoundFault": { + "base": "

    The specified snapshot copy grant can't be found. Make sure that the name is typed correctly and that the grant exists in the destination region.

    ", + "refs": { + } + }, + "SnapshotCopyGrantQuotaExceededFault": { + "base": "

    The AWS account has exceeded the maximum number of snapshot copy grants in this region.

    ", + "refs": { + } + }, + "SnapshotList": { + "base": null, + "refs": { + "SnapshotMessage$Snapshots": "

    A list of Snapshot instances.

    " + } + }, + "SnapshotMessage": { + "base": "

    Contains the output from the DescribeClusterSnapshots action.

    ", + "refs": { + } + }, + "SourceIdsList": { + "base": null, + "refs": { + "CreateEventSubscriptionMessage$SourceIds": "

    A list of one or more identifiers of Amazon Redshift source objects. All of the objects must be of the same type as was specified in the source type parameter. The event subscription will return only events generated by the specified objects. If not specified, then events are returned for all objects within the source type specified.

    Example: my-cluster-1, my-cluster-2

    Example: my-snapshot-20131010

    ", + "EventSubscription$SourceIdsList": "

    A list of the sources that publish events to the Amazon Redshift event notification subscription.

    ", + "ModifyEventSubscriptionMessage$SourceIds": "

    A list of one or more identifiers of Amazon Redshift source objects. All of the objects must be of the same type as was specified in the source type parameter. The event subscription will return only events generated by the specified objects. If not specified, then events are returned for all objects within the source type specified.

    Example: my-cluster-1, my-cluster-2

    Example: my-snapshot-20131010

    " + } + }, + "SourceNotFoundFault": { + "base": "

    The specified Amazon Redshift event source could not be found.

    ", + "refs": { + } + }, + "SourceType": { + "base": null, + "refs": { + "DescribeEventsMessage$SourceType": "

    The event source to retrieve events for. If no value is specified, all events are returned.

    Constraints:

    If SourceType is supplied, SourceIdentifier must also be provided.

    • Specify cluster when SourceIdentifier is a cluster identifier.
    • Specify cluster-security-group when SourceIdentifier is a cluster security group name.
    • Specify cluster-parameter-group when SourceIdentifier is a cluster parameter group name.
    • Specify cluster-snapshot when SourceIdentifier is a cluster snapshot identifier.
    ", + "Event$SourceType": "

    The source type for this event.

    " + } + }, + "String": { + "base": null, + "refs": { + "AccountWithRestoreAccess$AccountId": "

    The identifier of an AWS customer account authorized to restore a snapshot.

    ", + "AuthorizeClusterSecurityGroupIngressMessage$ClusterSecurityGroupName": "

    The name of the security group to which the ingress rule is added.

    ", + "AuthorizeClusterSecurityGroupIngressMessage$CIDRIP": "

    The IP range to be added the Amazon Redshift security group.

    ", + "AuthorizeClusterSecurityGroupIngressMessage$EC2SecurityGroupName": "

    The EC2 security group to be added the Amazon Redshift security group.

    ", + "AuthorizeClusterSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "

    The AWS account number of the owner of the security group specified by the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value.

    Example: 111122223333

    ", + "AuthorizeSnapshotAccessMessage$SnapshotIdentifier": "

    The identifier of the snapshot the account is authorized to restore.

    ", + "AuthorizeSnapshotAccessMessage$SnapshotClusterIdentifier": "

    The identifier of the cluster the snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

    ", + "AuthorizeSnapshotAccessMessage$AccountWithRestoreAccess": "

    The identifier of the AWS customer account authorized to restore the specified snapshot.

    ", + "AvailabilityZone$Name": "

    The name of the availability zone.

    ", + "Cluster$ClusterIdentifier": "

    The unique identifier of the cluster.

    ", + "Cluster$NodeType": "

    The node type for the nodes in the cluster.

    ", + "Cluster$ClusterStatus": "

    The current state of this cluster. Possible values include available, creating, deleting, rebooting, renaming, and resizing.

    ", + "Cluster$ModifyStatus": "

    The status of a modify operation, if any, initiated for the cluster.

    ", + "Cluster$MasterUsername": "

    The master user name for the cluster. This name is used to connect to the database that is specified in DBName.

    ", + "Cluster$DBName": "

    The name of the initial database that was created when the cluster was created. This same name is returned for the life of the cluster. If an initial database was not specified, a database named \"dev\" was created by default.

    ", + "Cluster$ClusterSubnetGroupName": "

    The name of the subnet group that is associated with the cluster. This parameter is valid only when the cluster is in a VPC.

    ", + "Cluster$VpcId": "

    The identifier of the VPC the cluster is in, if the cluster is in a VPC.

    ", + "Cluster$AvailabilityZone": "

    The name of the Availability Zone in which the cluster is located.

    ", + "Cluster$PreferredMaintenanceWindow": "

    The weekly time range (in UTC) during which system maintenance can occur.

    ", + "Cluster$ClusterVersion": "

    The version ID of the Amazon Redshift engine that is running on the cluster.

    ", + "Cluster$ClusterPublicKey": "

    The public key for the cluster.

    ", + "Cluster$ClusterRevisionNumber": "

    The specific revision number of the database in the cluster.

    ", + "Cluster$KmsKeyId": "

    The AWS Key Management Service (KMS) key ID of the encryption key used to encrypt data in the cluster.

    ", + "ClusterNode$NodeRole": "

    Whether the node is a leader node or a compute node.

    ", + "ClusterNode$PrivateIPAddress": "

    The private IP address of a node within a cluster.

    ", + "ClusterNode$PublicIPAddress": "

    The public IP address of a node within a cluster.

    ", + "ClusterParameterGroup$ParameterGroupName": "

    The name of the cluster parameter group.

    ", + "ClusterParameterGroup$ParameterGroupFamily": "

    The name of the cluster parameter group family that this cluster parameter group is compatible with.

    ", + "ClusterParameterGroup$Description": "

    The description of the parameter group.

    ", + "ClusterParameterGroupDetails$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "ClusterParameterGroupNameMessage$ParameterGroupName": "

    The name of the cluster parameter group.

    ", + "ClusterParameterGroupNameMessage$ParameterGroupStatus": "

    The status of the parameter group. For example, if you made a change to a parameter group name-value pair, then the change could be pending a reboot of an associated cluster.

    ", + "ClusterParameterGroupStatus$ParameterGroupName": "

    The name of the cluster parameter group.

    ", + "ClusterParameterGroupStatus$ParameterApplyStatus": "

    The status of parameter updates.

    ", + "ClusterParameterGroupsMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "ClusterParameterStatus$ParameterName": "

    The name of the parameter.

    ", + "ClusterParameterStatus$ParameterApplyStatus": "

    The status of the parameter that indicates whether the parameter is in sync with the database, waiting for a cluster reboot, or encountered an error when being applied.

    The following are possible statuses and descriptions.

    • in-sync: The parameter value is in sync with the database.
    • pending-reboot: The parameter value will be applied after the cluster reboots.
    • applying: The parameter value is being applied to the database.
    • invalid-parameter: Cannot apply the parameter value because it has an invalid value or syntax.
    • apply-deferred: The parameter contains static property changes. The changes are deferred until the cluster reboots.
    • apply-error: Cannot connect to the cluster. The parameter change will be applied after the cluster reboots.
    • unknown-error: Cannot apply the parameter change right now. The change will be applied after the cluster reboots.

    ", + "ClusterParameterStatus$ParameterApplyErrorDescription": "

    The error that prevented the parameter from being applied to the database.

    ", + "ClusterSecurityGroup$ClusterSecurityGroupName": "

    The name of the cluster security group to which the operation was applied.

    ", + "ClusterSecurityGroup$Description": "

    A description of the security group.

    ", + "ClusterSecurityGroupMembership$ClusterSecurityGroupName": "

    The name of the cluster security group.

    ", + "ClusterSecurityGroupMembership$Status": "

    The status of the cluster security group.

    ", + "ClusterSecurityGroupMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "ClusterSecurityGroupNameList$member": null, + "ClusterSnapshotCopyStatus$DestinationRegion": "

    The destination region that snapshots are automatically copied to when cross-region snapshot copy is enabled.

    ", + "ClusterSnapshotCopyStatus$SnapshotCopyGrantName": "

    The name of the snapshot copy grant.

    ", + "ClusterSubnetGroup$ClusterSubnetGroupName": "

    The name of the cluster subnet group.

    ", + "ClusterSubnetGroup$Description": "

    The description of the cluster subnet group.

    ", + "ClusterSubnetGroup$VpcId": "

    The VPC ID of the cluster subnet group.

    ", + "ClusterSubnetGroup$SubnetGroupStatus": "

    The status of the cluster subnet group. Possible values are Complete, Incomplete and Invalid.

    ", + "ClusterSubnetGroupMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "ClusterVersion$ClusterVersion": "

    The version number used by the cluster.

    ", + "ClusterVersion$ClusterParameterGroupFamily": "

    The name of the cluster parameter group family for the cluster.

    ", + "ClusterVersion$Description": "

    The description of the cluster version.

    ", + "ClusterVersionsMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "ClustersMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "CopyClusterSnapshotMessage$SourceSnapshotIdentifier": "

    The identifier for the source snapshot.

    Constraints:

    • Must be the identifier for a valid automated snapshot whose state is available.
    ", + "CopyClusterSnapshotMessage$SourceSnapshotClusterIdentifier": "

    The identifier of the cluster the source snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

    Constraints:

    • Must be the identifier for a valid cluster.
    ", + "CopyClusterSnapshotMessage$TargetSnapshotIdentifier": "

    The identifier given to the new manual snapshot.

    Constraints:

    • Cannot be null, empty, or blank.
    • Must contain from 1 to 255 alphanumeric characters or hyphens.
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    • Must be unique for the AWS account that is making the request.
    ", + "CreateClusterMessage$DBName": "

    The name of the first database to be created when the cluster is created.

    To create additional databases after the cluster is created, connect to the cluster with a SQL client and use SQL commands to create a database. For more information, go to Create a Database in the Amazon Redshift Database Developer Guide.

    Default: dev

    Constraints:

    • Must contain 1 to 64 alphanumeric characters.
    • Must contain only lowercase letters.
    • Cannot be a word that is reserved by the service. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide.
    ", + "CreateClusterMessage$ClusterIdentifier": "

    A unique identifier for the cluster. You use this identifier to refer to the cluster for any subsequent cluster operations such as deleting or modifying. The identifier also appears in the Amazon Redshift console.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens.
    • Alphabetic characters must be lowercase.
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    • Must be unique for all clusters within an AWS account.

    Example: myexamplecluster

    ", + "CreateClusterMessage$ClusterType": "

    The type of the cluster. When cluster type is specified as

    • single-node, the NumberOfNodes parameter is not required.
    • multi-node, the NumberOfNodes parameter is required.

    Valid Values: multi-node | single-node

    Default: multi-node

    ", + "CreateClusterMessage$NodeType": "

    The node type to be provisioned for the cluster. For information about node types, go to Working with Clusters in the Amazon Redshift Cluster Management Guide.

    Valid Values: ds1.xlarge | ds1.8xlarge | ds2.xlarge | ds2.8xlarge | dc1.large | dc1.8xlarge.

    ", + "CreateClusterMessage$MasterUsername": "

    The user name associated with the master user account for the cluster that is being created.

    Constraints:

    • Must be 1 - 128 alphanumeric characters.
    • First character must be a letter.
    • Cannot be a reserved word. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide.
    ", + "CreateClusterMessage$MasterUserPassword": "

    The password associated with the master user account for the cluster that is being created.

    Constraints:

    • Must be between 8 and 64 characters in length.
    • Must contain at least one uppercase letter.
    • Must contain at least one lowercase letter.
    • Must contain one number.
    • Can be any printable ASCII character (ASCII code 33 to 126) except ' (single quote), \" (double quote), \\, /, @, or space.
    ", + "CreateClusterMessage$ClusterSubnetGroupName": "

    The name of a cluster subnet group to be associated with this cluster.

    If this parameter is not provided the resulting cluster will be deployed outside virtual private cloud (VPC).

    ", + "CreateClusterMessage$AvailabilityZone": "

    The EC2 Availability Zone (AZ) in which you want Amazon Redshift to provision the cluster. For example, if you have several EC2 instances running in a specific Availability Zone, then you might want the cluster to be provisioned in the same zone in order to decrease network latency.

    Default: A random, system-chosen Availability Zone in the region that is specified by the endpoint.

    Example: us-east-1d

    Constraint: The specified Availability Zone must be in the same region as the current endpoint.

    ", + "CreateClusterMessage$PreferredMaintenanceWindow": "

    The weekly time range (in UTC) during which automated cluster maintenance can occur.

    Format: ddd:hh24:mi-ddd:hh24:mi

    Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. For more information about the time blocks for each region, see Maintenance Windows in Amazon Redshift Cluster Management Guide.

    Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun

    Constraints: Minimum 30-minute window.

    ", + "CreateClusterMessage$ClusterParameterGroupName": "

    The name of the parameter group to be associated with this cluster.

    Default: The default Amazon Redshift cluster parameter group. For information about the default parameter group, go to Working with Amazon Redshift Parameter Groups

    Constraints:

    • Must be 1 to 255 alphanumeric characters or hyphens.
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    ", + "CreateClusterMessage$ClusterVersion": "

    The version of the Amazon Redshift engine software that you want to deploy on the cluster.

    The version selected runs on all the nodes in the cluster.

    Constraints: Only version 1.0 is currently available.

    Example: 1.0

    ", + "CreateClusterMessage$HsmClientCertificateIdentifier": "

    Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.

    ", + "CreateClusterMessage$HsmConfigurationIdentifier": "

    Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.

    ", + "CreateClusterMessage$ElasticIp": "

    The Elastic IP (EIP) address for the cluster.

    Constraints: The cluster must be provisioned in EC2-VPC and publicly-accessible through an Internet gateway. For more information about provisioning clusters in EC2-VPC, go to Supported Platforms to Launch Your Cluster in the Amazon Redshift Cluster Management Guide.

    ", + "CreateClusterMessage$KmsKeyId": "

    The AWS Key Management Service (KMS) key ID of the encryption key that you want to use to encrypt data in the cluster.

    ", + "CreateClusterParameterGroupMessage$ParameterGroupName": "

    The name of the cluster parameter group.

    Constraints:

    • Must be 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    • Must be unique withing your AWS account.
    This value is stored as a lower-case string.", + "CreateClusterParameterGroupMessage$ParameterGroupFamily": "

    The Amazon Redshift engine version to which the cluster parameter group applies. The cluster engine version determines the set of parameters.

    To get a list of valid parameter group family names, you can call DescribeClusterParameterGroups. By default, Amazon Redshift returns a list of all the parameter groups that are owned by your AWS account, including the default parameter groups for each Amazon Redshift engine version. The parameter group family names associated with the default parameter groups provide you the valid values. For example, a valid family name is \"redshift-1.0\".

    ", + "CreateClusterParameterGroupMessage$Description": "

    A description of the parameter group.

    ", + "CreateClusterSecurityGroupMessage$ClusterSecurityGroupName": "

    The name for the security group. Amazon Redshift stores the value as a lowercase string.

    Constraints:

    • Must contain no more than 255 alphanumeric characters or hyphens.
    • Must not be \"Default\".
    • Must be unique for all security groups that are created by your AWS account.

    Example: examplesecuritygroup

    ", + "CreateClusterSecurityGroupMessage$Description": "

    A description for the security group.

    ", + "CreateClusterSnapshotMessage$SnapshotIdentifier": "

    A unique identifier for the snapshot that you are requesting. This identifier must be unique for all snapshots within the AWS account.

    Constraints:

    • Cannot be null, empty, or blank
    • Must contain from 1 to 255 alphanumeric characters or hyphens
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens

    Example: my-snapshot-id

    ", + "CreateClusterSnapshotMessage$ClusterIdentifier": "

    The cluster identifier for which you want a snapshot.

    ", + "CreateClusterSubnetGroupMessage$ClusterSubnetGroupName": "

    The name for the subnet group. Amazon Redshift stores the value as a lowercase string.

    Constraints:

    • Must contain no more than 255 alphanumeric characters or hyphens.
    • Must not be \"Default\".
    • Must be unique for all subnet groups that are created by your AWS account.

    Example: examplesubnetgroup

    ", + "CreateClusterSubnetGroupMessage$Description": "

    A description for the subnet group.

    ", + "CreateEventSubscriptionMessage$SubscriptionName": "

    The name of the event subscription to be created.

    Constraints:

    • Cannot be null, empty, or blank.
    • Must contain from 1 to 255 alphanumeric characters or hyphens.
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    ", + "CreateEventSubscriptionMessage$SnsTopicArn": "

    The Amazon Resource Name (ARN) of the Amazon SNS topic used to transmit the event notifications. The ARN is created by Amazon SNS when you create a topic and subscribe to it.

    ", + "CreateEventSubscriptionMessage$SourceType": "

    The type of source that will be generating the events. For example, if you want to be notified of events generated by a cluster, you would set this parameter to cluster. If this value is not specified, events are returned for all Amazon Redshift objects in your AWS account. You must specify a source type in order to specify source IDs.

    Valid values: cluster, cluster-parameter-group, cluster-security-group, and cluster-snapshot.

    ", + "CreateEventSubscriptionMessage$Severity": "

    Specifies the Amazon Redshift event severity to be published by the event notification subscription.

    Values: ERROR, INFO

    ", + "CreateHsmClientCertificateMessage$HsmClientCertificateIdentifier": "

    The identifier to be assigned to the new HSM client certificate that the cluster will use to connect to the HSM to use the database encryption keys.

    ", + "CreateHsmConfigurationMessage$HsmConfigurationIdentifier": "

    The identifier to be assigned to the new Amazon Redshift HSM configuration.

    ", + "CreateHsmConfigurationMessage$Description": "

    A text description of the HSM configuration to be created.

    ", + "CreateHsmConfigurationMessage$HsmIpAddress": "

    The IP address that the Amazon Redshift cluster must use to access the HSM.

    ", + "CreateHsmConfigurationMessage$HsmPartitionName": "

    The name of the partition in the HSM where the Amazon Redshift clusters will store their database encryption keys.

    ", + "CreateHsmConfigurationMessage$HsmPartitionPassword": "

    The password required to access the HSM partition.

    ", + "CreateHsmConfigurationMessage$HsmServerPublicCertificate": "

    The HSMs public certificate file. When using Cloud HSM, the file name is server.pem.

    ", + "CreateSnapshotCopyGrantMessage$SnapshotCopyGrantName": "

    The name of the snapshot copy grant. This name must be unique in the region for the AWS account.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens.
    • Alphabetic characters must be lowercase.
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    • Must be unique for all clusters within an AWS account.

    ", + "CreateSnapshotCopyGrantMessage$KmsKeyId": "

    The unique identifier of the customer master key (CMK) to which to grant Amazon Redshift permission. If no key is specified, the default key is used.

    ", + "CreateTagsMessage$ResourceName": "

    The Amazon Resource Name (ARN) to which you want to add the tag or tags. For example, arn:aws:redshift:us-east-1:123456789:cluster:t1.

    ", + "DefaultClusterParameters$ParameterGroupFamily": "

    The name of the cluster parameter group family to which the engine default parameters apply.

    ", + "DefaultClusterParameters$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "DeleteClusterMessage$ClusterIdentifier": "

    The identifier of the cluster to be deleted.

    Constraints:

    • Must contain lowercase characters.
    • Must contain from 1 to 63 alphanumeric characters or hyphens.
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    ", + "DeleteClusterMessage$FinalClusterSnapshotIdentifier": "

    The identifier of the final snapshot that is to be created immediately before deleting the cluster. If this parameter is provided, SkipFinalClusterSnapshot must be false.

    Constraints:

    • Must be 1 to 255 alphanumeric characters.
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    ", + "DeleteClusterParameterGroupMessage$ParameterGroupName": "

    The name of the parameter group to be deleted.

    Constraints:

    • Must be the name of an existing cluster parameter group.
    • Cannot delete a default cluster parameter group.
    ", + "DeleteClusterSecurityGroupMessage$ClusterSecurityGroupName": "

    The name of the cluster security group to be deleted.

    ", + "DeleteClusterSnapshotMessage$SnapshotIdentifier": "

    The unique identifier of the manual snapshot to be deleted.

    Constraints: Must be the name of an existing snapshot that is in the available state.

    ", + "DeleteClusterSnapshotMessage$SnapshotClusterIdentifier": "

    The unique identifier of the cluster the snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

    Constraints: Must be the name of valid cluster.

    ", + "DeleteClusterSubnetGroupMessage$ClusterSubnetGroupName": "

    The name of the cluster subnet group name to be deleted.

    ", + "DeleteEventSubscriptionMessage$SubscriptionName": "

    The name of the Amazon Redshift event notification subscription to be deleted.

    ", + "DeleteHsmClientCertificateMessage$HsmClientCertificateIdentifier": "

    The identifier of the HSM client certificate to be deleted.

    ", + "DeleteHsmConfigurationMessage$HsmConfigurationIdentifier": "

    The identifier of the Amazon Redshift HSM configuration to be deleted.

    ", + "DeleteSnapshotCopyGrantMessage$SnapshotCopyGrantName": "

    The name of the snapshot copy grant to delete.

    ", + "DeleteTagsMessage$ResourceName": "

    The Amazon Resource Name (ARN) from which you want to remove the tag or tags. For example, arn:aws:redshift:us-east-1:123456789:cluster:t1.

    ", + "DescribeClusterParameterGroupsMessage$ParameterGroupName": "

    The name of a specific parameter group for which to return details. By default, details about all parameter groups and the default parameter group are returned.

    ", + "DescribeClusterParameterGroupsMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterParameterGroups request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    ", + "DescribeClusterParametersMessage$ParameterGroupName": "

    The name of a cluster parameter group for which to return details.

    ", + "DescribeClusterParametersMessage$Source": "

    The parameter types to return. Specify user to show parameters that are different form the default. Similarly, specify engine-default to show parameters that are the same as the default parameter group.

    Default: All parameter types returned.

    Valid Values: user | engine-default

    ", + "DescribeClusterParametersMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterParameters request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    ", + "DescribeClusterSecurityGroupsMessage$ClusterSecurityGroupName": "

    The name of a cluster security group for which you are requesting details. You can specify either the Marker parameter or a ClusterSecurityGroupName parameter, but not both.

    Example: securitygroup1

    ", + "DescribeClusterSecurityGroupsMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterSecurityGroups request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    Constraints: You can specify either the ClusterSecurityGroupName parameter or the Marker parameter, but not both.

    ", + "DescribeClusterSnapshotsMessage$ClusterIdentifier": "

    The identifier of the cluster for which information about snapshots is requested.

    ", + "DescribeClusterSnapshotsMessage$SnapshotIdentifier": "

    The snapshot identifier of the snapshot about which to return information.

    ", + "DescribeClusterSnapshotsMessage$SnapshotType": "

    The type of snapshots for which you are requesting information. By default, snapshots of all types are returned.

    Valid Values: automated | manual

    ", + "DescribeClusterSnapshotsMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterSnapshots request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    ", + "DescribeClusterSnapshotsMessage$OwnerAccount": "

    The AWS customer account used to create or copy the snapshot. Use this field to filter the results to snapshots owned by a particular account. To describe snapshots you own, either specify your AWS customer account, or do not specify the parameter.

    ", + "DescribeClusterSubnetGroupsMessage$ClusterSubnetGroupName": "

    The name of the cluster subnet group for which information is requested.

    ", + "DescribeClusterSubnetGroupsMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterSubnetGroups request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    ", + "DescribeClusterVersionsMessage$ClusterVersion": "

    The specific cluster version to return.

    Example: 1.0

    ", + "DescribeClusterVersionsMessage$ClusterParameterGroupFamily": "

    The name of a specific cluster parameter group family to return details for.

    Constraints:

    • Must be 1 to 255 alphanumeric characters
    • First character must be a letter
    • Cannot end with a hyphen or contain two consecutive hyphens
    ", + "DescribeClusterVersionsMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterVersions request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    ", + "DescribeClustersMessage$ClusterIdentifier": "

    The unique identifier of a cluster whose properties you are requesting. This parameter is case sensitive.

    The default is that all clusters defined for an account are returned.

    ", + "DescribeClustersMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusters request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    Constraints: You can specify either the ClusterIdentifier parameter or the Marker parameter, but not both.

    ", + "DescribeDefaultClusterParametersMessage$ParameterGroupFamily": "

    The name of the cluster parameter group family.

    ", + "DescribeDefaultClusterParametersMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeDefaultClusterParameters request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    ", + "DescribeEventCategoriesMessage$SourceType": "

    The source type, such as cluster or parameter group, to which the described event categories apply.

    Valid values: cluster, snapshot, parameter group, and security group.

    ", + "DescribeEventSubscriptionsMessage$SubscriptionName": "

    The name of the Amazon Redshift event notification subscription to be described.

    ", + "DescribeEventSubscriptionsMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeEventSubscriptions request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    ", + "DescribeEventsMessage$SourceIdentifier": "

    The identifier of the event source for which events will be returned. If this parameter is not specified, then all sources are included in the response.

    Constraints:

    If SourceIdentifier is supplied, SourceType must also be provided.

    • Specify a cluster identifier when SourceType is cluster.
    • Specify a cluster security group name when SourceType is cluster-security-group.
    • Specify a cluster parameter group name when SourceType is cluster-parameter-group.
    • Specify a cluster snapshot identifier when SourceType is cluster-snapshot.
    ", + "DescribeEventsMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeEvents request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    ", + "DescribeHsmClientCertificatesMessage$HsmClientCertificateIdentifier": "

    The identifier of a specific HSM client certificate for which you want information. If no identifier is specified, information is returned for all HSM client certificates owned by your AWS customer account.

    ", + "DescribeHsmClientCertificatesMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeHsmClientCertificates request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    ", + "DescribeHsmConfigurationsMessage$HsmConfigurationIdentifier": "

    The identifier of a specific Amazon Redshift HSM configuration to be described. If no identifier is specified, information is returned for all HSM configurations owned by your AWS customer account.

    ", + "DescribeHsmConfigurationsMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeHsmConfigurations request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    ", + "DescribeLoggingStatusMessage$ClusterIdentifier": "

    The identifier of the cluster to get the logging status from.

    Example: examplecluster

    ", + "DescribeOrderableClusterOptionsMessage$ClusterVersion": "

    The version filter value. Specify this parameter to show only the available offerings matching the specified version.

    Default: All versions.

    Constraints: Must be one of the version returned from DescribeClusterVersions.

    ", + "DescribeOrderableClusterOptionsMessage$NodeType": "

    The node type filter value. Specify this parameter to show only the available offerings matching the specified node type.

    ", + "DescribeOrderableClusterOptionsMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeOrderableClusterOptions request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    ", + "DescribeReservedNodeOfferingsMessage$ReservedNodeOfferingId": "

    The unique identifier for the offering.

    ", + "DescribeReservedNodeOfferingsMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeReservedNodeOfferings request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    ", + "DescribeReservedNodesMessage$ReservedNodeId": "

    Identifier for the node reservation.

    ", + "DescribeReservedNodesMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeReservedNodes request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    ", + "DescribeResizeMessage$ClusterIdentifier": "

    The unique identifier of a cluster whose resize progress you are requesting. This parameter is case-sensitive.

    By default, resize operations for all clusters defined for an AWS account are returned.

    ", + "DescribeSnapshotCopyGrantsMessage$SnapshotCopyGrantName": "

    The name of the snapshot copy grant.

    ", + "DescribeSnapshotCopyGrantsMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeSnapshotCopyGrant request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    Constraints: You can specify either the SnapshotCopyGrantName parameter or the Marker parameter, but not both.

    ", + "DescribeTagsMessage$ResourceName": "

    The Amazon Resource Name (ARN) for which you want to describe the tag or tags. For example, arn:aws:redshift:us-east-1:123456789:cluster:t1.

    ", + "DescribeTagsMessage$ResourceType": "

    The type of resource with which you want to view tags. Valid resource types are:

    • Cluster
    • CIDR/IP
    • EC2 security group
    • Snapshot
    • Cluster security group
    • Subnet group
    • HSM connection
    • HSM certificate
    • Parameter group
    • Snapshot copy grant

    For more information about Amazon Redshift resource types and constructing ARNs, go to Constructing an Amazon Redshift Amazon Resource Name (ARN) in the Amazon Redshift Cluster Management Guide.

    ", + "DescribeTagsMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the marker parameter and retrying the command. If the marker field is empty, all response records have been retrieved for the request.

    ", + "DisableLoggingMessage$ClusterIdentifier": "

    The identifier of the cluster on which logging is to be stopped.

    Example: examplecluster

    ", + "DisableSnapshotCopyMessage$ClusterIdentifier": "

    The unique identifier of the source cluster that you want to disable copying of snapshots to a destination region.

    Constraints: Must be the valid name of an existing cluster that has cross-region snapshot copy enabled.

    ", + "EC2SecurityGroup$Status": "

    The status of the EC2 security group.

    ", + "EC2SecurityGroup$EC2SecurityGroupName": "

    The name of the EC2 Security Group.

    ", + "EC2SecurityGroup$EC2SecurityGroupOwnerId": "

    The AWS ID of the owner of the EC2 security group specified in the EC2SecurityGroupName field.

    ", + "ElasticIpStatus$ElasticIp": "

    The elastic IP (EIP) address for the cluster.

    ", + "ElasticIpStatus$Status": "

    Describes the status of the elastic IP (EIP) address.

    ", + "EnableLoggingMessage$ClusterIdentifier": "

    The identifier of the cluster on which logging is to be started.

    Example: examplecluster

    ", + "EnableLoggingMessage$BucketName": "

    The name of an existing S3 bucket where the log files are to be stored.

    Constraints:

    • Must be in the same region as the cluster
    • The cluster must have read bucket and put object permissions
    ", + "EnableLoggingMessage$S3KeyPrefix": "

    The prefix applied to the log file names.

    Constraints:

    • Cannot exceed 512 characters
    • Cannot contain spaces( ), double quotes (\"), single quotes ('), a backslash (\\), or control characters. The hexadecimal codes for invalid characters are:
      • x00 to x20
      • x22
      • x27
      • x5c
      • x7f or larger
    ", + "EnableSnapshotCopyMessage$ClusterIdentifier": "

    The unique identifier of the source cluster to copy snapshots from.

    Constraints: Must be the valid name of an existing cluster that does not already have cross-region snapshot copy enabled.

    ", + "EnableSnapshotCopyMessage$DestinationRegion": "

    The destination region that you want to copy snapshots to.

    Constraints: Must be the name of a valid region. For more information, see Regions and Endpoints in the Amazon Web Services General Reference.

    ", + "EnableSnapshotCopyMessage$SnapshotCopyGrantName": "

    The name of the snapshot copy grant to use when snapshots of an AWS KMS-encrypted cluster are copied to the destination region.

    ", + "Endpoint$Address": "

    The DNS address of the Cluster.

    ", + "Event$SourceIdentifier": "

    The identifier for the source of the event.

    ", + "Event$Message": "

    The text of this event.

    ", + "Event$Severity": "

    The severity of the event.

    Values: ERROR, INFO

    ", + "Event$EventId": "

    The identifier of the event.

    ", + "EventCategoriesList$member": null, + "EventCategoriesMap$SourceType": "

    The Amazon Redshift source type, such as cluster or cluster-snapshot, that the returned categories belong to.

    ", + "EventInfoMap$EventId": "

    The identifier of an Amazon Redshift event.

    ", + "EventInfoMap$EventDescription": "

    The description of an Amazon Redshift event.

    ", + "EventInfoMap$Severity": "

    The severity of the event.

    Values: ERROR, INFO

    ", + "EventSubscription$CustomerAwsId": "

    The AWS customer account associated with the Amazon Redshift event notification subscription.

    ", + "EventSubscription$CustSubscriptionId": "

    The name of the Amazon Redshift event notification subscription.

    ", + "EventSubscription$SnsTopicArn": "

    The Amazon Resource Name (ARN) of the Amazon SNS topic used by the event notification subscription.

    ", + "EventSubscription$Status": "

    The status of the Amazon Redshift event notification subscription.

    Constraints:

    • Can be one of the following: active | no-permission | topic-not-exist
    • The status \"no-permission\" indicates that Amazon Redshift no longer has permission to post to the Amazon SNS topic. The status \"topic-not-exist\" indicates that the topic was deleted after the subscription was created.
    ", + "EventSubscription$SourceType": "

    The source type of the events returned the Amazon Redshift event notification, such as cluster, or cluster-snapshot.

    ", + "EventSubscription$Severity": "

    The event severity specified in the Amazon Redshift event notification subscription.

    Values: ERROR, INFO

    ", + "EventSubscriptionsMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "EventsMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "HsmClientCertificate$HsmClientCertificateIdentifier": "

    The identifier of the HSM client certificate.

    ", + "HsmClientCertificate$HsmClientCertificatePublicKey": "

    The public key that the Amazon Redshift cluster will use to connect to the HSM. You must register the public key in the HSM.

    ", + "HsmClientCertificateMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "HsmConfiguration$HsmConfigurationIdentifier": "

    The name of the Amazon Redshift HSM configuration.

    ", + "HsmConfiguration$Description": "

    A text description of the HSM configuration.

    ", + "HsmConfiguration$HsmIpAddress": "

    The IP address that the Amazon Redshift cluster must use to access the HSM.

    ", + "HsmConfiguration$HsmPartitionName": "

    The name of the partition in the HSM where the Amazon Redshift clusters will store their database encryption keys.

    ", + "HsmConfigurationMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "HsmStatus$HsmClientCertificateIdentifier": "

    Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.

    ", + "HsmStatus$HsmConfigurationIdentifier": "

    Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.

    ", + "HsmStatus$Status": "

    Reports whether the Amazon Redshift cluster has finished applying any HSM settings changes specified in a modify cluster command.

    Values: active, applying

    ", + "IPRange$Status": "

    The status of the IP range, for example, \"authorized\".

    ", + "IPRange$CIDRIP": "

    The IP range in Classless Inter-Domain Routing (CIDR) notation.

    ", + "ImportTablesCompleted$member": null, + "ImportTablesInProgress$member": null, + "ImportTablesNotStarted$member": null, + "LoggingStatus$BucketName": "

    The name of the S3 bucket where the log files are stored.

    ", + "LoggingStatus$S3KeyPrefix": "

    The prefix applied to the log file names.

    ", + "LoggingStatus$LastFailureMessage": "

    The message indicating that logs failed to be delivered.

    ", + "ModifyClusterMessage$ClusterIdentifier": "

    The unique identifier of the cluster to be modified.

    Example: examplecluster

    ", + "ModifyClusterMessage$ClusterType": "

    The new cluster type.

    When you submit your cluster resize request, your existing cluster goes into a read-only mode. After Amazon Redshift provisions a new cluster based on your resize requirements, there will be outage for a period while the old cluster is deleted and your connection is switched to the new cluster. You can use DescribeResize to track the progress of the resize request.

    Valid Values: multi-node | single-node

    ", + "ModifyClusterMessage$NodeType": "

    The new node type of the cluster. If you specify a new node type, you must also specify the number of nodes parameter.

    When you submit your request to resize a cluster, Amazon Redshift sets access permissions for the cluster to read-only. After Amazon Redshift provisions a new cluster according to your resize requirements, there will be a temporary outage while the old cluster is deleted and your connection is switched to the new cluster. When the new connection is complete, the original access permissions for the cluster are restored. You can use DescribeResize to track the progress of the resize request.

    Valid Values: ds1.xlarge | ds1.8xlarge | ds2.xlarge | ds2.8xlarge | dc1.large | dc1.8xlarge.

    ", + "ModifyClusterMessage$MasterUserPassword": "

    The new password for the cluster master user. This change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response. Operations never return the password, so this operation provides a way to regain access to the master user account for a cluster if the password is lost.

    Default: Uses existing setting.

    Constraints:

    • Must be between 8 and 64 characters in length.
    • Must contain at least one uppercase letter.
    • Must contain at least one lowercase letter.
    • Must contain one number.
    • Can be any printable ASCII character (ASCII code 33 to 126) except ' (single quote), \" (double quote), \\, /, @, or space.
    ", + "ModifyClusterMessage$ClusterParameterGroupName": "

    The name of the cluster parameter group to apply to this cluster. This change is applied only after the cluster is rebooted. To reboot a cluster use RebootCluster.

    Default: Uses existing setting.

    Constraints: The cluster parameter group must be in the same parameter group family that matches the cluster version.

    ", + "ModifyClusterMessage$PreferredMaintenanceWindow": "

    The weekly time range (in UTC) during which system maintenance can occur, if necessary. If system maintenance is necessary during the window, it may result in an outage.

    This maintenance window change is made immediately. If the new maintenance window indicates the current time, there must be at least 120 minutes between the current time and end of the window in order to ensure that pending changes are applied.

    Default: Uses existing setting.

    Format: ddd:hh24:mi-ddd:hh24:mi, for example wed:07:30-wed:08:00.

    Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun

    Constraints: Must be at least 30 minutes.

    ", + "ModifyClusterMessage$ClusterVersion": "

    The new version number of the Amazon Redshift engine to upgrade to.

    For major version upgrades, if a non-default cluster parameter group is currently in use, a new cluster parameter group in the cluster parameter group family for the new version must be specified. The new cluster parameter group can be the default for that cluster parameter group family. For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

    Example: 1.0

    ", + "ModifyClusterMessage$HsmClientCertificateIdentifier": "

    Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.

    ", + "ModifyClusterMessage$HsmConfigurationIdentifier": "

    Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.

    ", + "ModifyClusterMessage$NewClusterIdentifier": "

    The new identifier for the cluster.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens.
    • Alphabetic characters must be lowercase.
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    • Must be unique for all clusters within an AWS account.

    Example: examplecluster

    ", + "ModifyClusterParameterGroupMessage$ParameterGroupName": "

    The name of the parameter group to be modified.

    ", + "ModifyClusterSubnetGroupMessage$ClusterSubnetGroupName": "

    The name of the subnet group to be modified.

    ", + "ModifyClusterSubnetGroupMessage$Description": "

    A text description of the subnet group to be modified.

    ", + "ModifyEventSubscriptionMessage$SubscriptionName": "

    The name of the modified Amazon Redshift event notification subscription.

    ", + "ModifyEventSubscriptionMessage$SnsTopicArn": "

    The Amazon Resource Name (ARN) of the SNS topic to be used by the event notification subscription.

    ", + "ModifyEventSubscriptionMessage$SourceType": "

    The type of source that will be generating the events. For example, if you want to be notified of events generated by a cluster, you would set this parameter to cluster. If this value is not specified, events are returned for all Amazon Redshift objects in your AWS account. You must specify a source type in order to specify source IDs.

    Valid values: cluster, cluster-parameter-group, cluster-security-group, and cluster-snapshot.

    ", + "ModifyEventSubscriptionMessage$Severity": "

    Specifies the Amazon Redshift event severity to be published by the event notification subscription.

    Values: ERROR, INFO

    ", + "ModifySnapshotCopyRetentionPeriodMessage$ClusterIdentifier": "

    The unique identifier of the cluster for which you want to change the retention period for automated snapshots that are copied to a destination region.

    Constraints: Must be the valid name of an existing cluster that has cross-region snapshot copy enabled.

    ", + "OrderableClusterOption$ClusterVersion": "

    The version of the orderable cluster.

    ", + "OrderableClusterOption$ClusterType": "

    The cluster type, for example multi-node.

    ", + "OrderableClusterOption$NodeType": "

    The node type for the orderable cluster.

    ", + "OrderableClusterOptionsMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "Parameter$ParameterName": "

    The name of the parameter.

    ", + "Parameter$ParameterValue": "

    The value of the parameter.

    ", + "Parameter$Description": "

    A description of the parameter.

    ", + "Parameter$Source": "

    The source of the parameter value, such as \"engine-default\" or \"user\".

    ", + "Parameter$DataType": "

    The data type of the parameter.

    ", + "Parameter$AllowedValues": "

    The valid range of values for the parameter.

    ", + "Parameter$MinimumEngineVersion": "

    The earliest engine version to which the parameter can apply.

    ", + "PendingModifiedValues$MasterUserPassword": "

    The pending or in-progress change of the master user password for the cluster.

    ", + "PendingModifiedValues$NodeType": "

    The pending or in-progress change of the cluster's node type.

    ", + "PendingModifiedValues$ClusterType": "

    The pending or in-progress change of the cluster type.

    ", + "PendingModifiedValues$ClusterVersion": "

    The pending or in-progress change of the service version.

    ", + "PendingModifiedValues$ClusterIdentifier": "

    The pending or in-progress change of the new identifier for the cluster.

    ", + "PurchaseReservedNodeOfferingMessage$ReservedNodeOfferingId": "

    The unique identifier of the reserved node offering you want to purchase.

    ", + "RebootClusterMessage$ClusterIdentifier": "

    The cluster identifier.

    ", + "RecurringCharge$RecurringChargeFrequency": "

    The frequency at which the recurring charge amount is applied.

    ", + "ReservedNode$ReservedNodeId": "

    The unique identifier for the reservation.

    ", + "ReservedNode$ReservedNodeOfferingId": "

    The identifier for the reserved node offering.

    ", + "ReservedNode$NodeType": "

    The node type of the reserved node.

    ", + "ReservedNode$CurrencyCode": "

    The currency code for the reserved cluster.

    ", + "ReservedNode$State": "

    The state of the reserved compute node.

    Possible Values:

    • pending-payment-This reserved node has recently been purchased, and the sale has been approved, but payment has not yet been confirmed.
    • active-This reserved node is owned by the caller and is available for use.
    • payment-failed-Payment failed for the purchase attempt.
    ", + "ReservedNode$OfferingType": "

    The anticipated utilization of the reserved node, as defined in the reserved node offering.

    ", + "ReservedNodeOffering$ReservedNodeOfferingId": "

    The offering identifier.

    ", + "ReservedNodeOffering$NodeType": "

    The node type offered by the reserved node offering.

    ", + "ReservedNodeOffering$CurrencyCode": "

    The currency code for the compute nodes offering.

    ", + "ReservedNodeOffering$OfferingType": "

    The anticipated utilization of the reserved node, as defined in the reserved node offering.

    ", + "ReservedNodeOfferingsMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "ReservedNodesMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "ResetClusterParameterGroupMessage$ParameterGroupName": "

    The name of the cluster parameter group to be reset.

    ", + "ResizeProgressMessage$TargetNodeType": "

    The node type that the cluster will have after the resize operation is complete.

    ", + "ResizeProgressMessage$TargetClusterType": "

    The cluster type after the resize operation is complete.

    Valid Values: multi-node | single-node

    ", + "ResizeProgressMessage$Status": "

    The status of the resize operation.

    Valid Values: NONE | IN_PROGRESS | FAILED | SUCCEEDED

    ", + "RestorableNodeTypeList$member": null, + "RestoreFromClusterSnapshotMessage$ClusterIdentifier": "

    The identifier of the cluster that will be created from restoring the snapshot.

    Constraints:

    • Must contain from 1 to 63 alphanumeric characters or hyphens.
    • Alphabetic characters must be lowercase.
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    • Must be unique for all clusters within an AWS account.

    ", + "RestoreFromClusterSnapshotMessage$SnapshotIdentifier": "

    The name of the snapshot from which to create the new cluster. This parameter isn't case sensitive.

    Example: my-snapshot-id

    ", + "RestoreFromClusterSnapshotMessage$SnapshotClusterIdentifier": "

    The name of the cluster the source snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

    ", + "RestoreFromClusterSnapshotMessage$AvailabilityZone": "

    The Amazon EC2 Availability Zone in which to restore the cluster.

    Default: A random, system-chosen Availability Zone.

    Example: us-east-1a

    ", + "RestoreFromClusterSnapshotMessage$ClusterSubnetGroupName": "

    The name of the subnet group where you want to cluster restored.

    A snapshot of cluster in VPC can be restored only in VPC. Therefore, you must provide subnet group name where you want the cluster restored.

    ", + "RestoreFromClusterSnapshotMessage$OwnerAccount": "

    The AWS customer account used to create or copy the snapshot. Required if you are restoring a snapshot you do not own, optional if you own the snapshot.

    ", + "RestoreFromClusterSnapshotMessage$HsmClientCertificateIdentifier": "

    Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.

    ", + "RestoreFromClusterSnapshotMessage$HsmConfigurationIdentifier": "

    Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.

    ", + "RestoreFromClusterSnapshotMessage$ElasticIp": "

    The elastic IP (EIP) address for the cluster.

    ", + "RestoreFromClusterSnapshotMessage$ClusterParameterGroupName": "

    The name of the parameter group to be associated with this cluster.

    Default: The default Amazon Redshift cluster parameter group. For information about the default parameter group, go to Working with Amazon Redshift Parameter Groups.

    Constraints:

    • Must be 1 to 255 alphanumeric characters or hyphens.
    • First character must be a letter.
    • Cannot end with a hyphen or contain two consecutive hyphens.
    ", + "RestoreFromClusterSnapshotMessage$PreferredMaintenanceWindow": "

    The weekly time range (in UTC) during which automated cluster maintenance can occur.

    Format: ddd:hh24:mi-ddd:hh24:mi

    Default: The value selected for the cluster from which the snapshot was taken. For more information about the time blocks for each region, see Maintenance Windows in Amazon Redshift Cluster Management Guide.

    Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun

    Constraints: Minimum 30-minute window.

    ", + "RestoreFromClusterSnapshotMessage$KmsKeyId": "

    The AWS Key Management Service (KMS) key ID of the encryption key that you want to use to encrypt data in the cluster that you restore from a shared snapshot.

    ", + "RestoreFromClusterSnapshotMessage$NodeType": "

    The node type that the restored cluster will be provisioned with.

    Default: The node type of the cluster from which the snapshot was taken. You can modify this if you are using any DS node type. In that case, you can choose to restore into another DS node type of the same size. For example, you can restore ds1.8xlarge into ds2.8xlarge, or ds2.xlarge into ds1.xlarge. If you have a DC instance type, you must restore into that same instance type and size. In other words, you can only restore a dc1.large instance type into another dc1.large instance type. For more information about node types, see About Clusters and Nodes in the Amazon Redshift Cluster Management Guide

    ", + "RestoreStatus$Status": "

    The status of the restore action. Returns starting, restoring, completed, or failed.

    ", + "RevokeClusterSecurityGroupIngressMessage$ClusterSecurityGroupName": "

    The name of the security Group from which to revoke the ingress rule.

    ", + "RevokeClusterSecurityGroupIngressMessage$CIDRIP": "

    The IP range for which to revoke access. This range must be a valid Classless Inter-Domain Routing (CIDR) block of IP addresses. If CIDRIP is specified, EC2SecurityGroupName and EC2SecurityGroupOwnerId cannot be provided.

    ", + "RevokeClusterSecurityGroupIngressMessage$EC2SecurityGroupName": "

    The name of the EC2 Security Group whose access is to be revoked. If EC2SecurityGroupName is specified, EC2SecurityGroupOwnerId must also be provided and CIDRIP cannot be provided.

    ", + "RevokeClusterSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "

    The AWS account number of the owner of the security group specified in the EC2SecurityGroupName parameter. The AWS access key ID is not an acceptable value. If EC2SecurityGroupOwnerId is specified, EC2SecurityGroupName must also be provided. and CIDRIP cannot be provided.

    Example: 111122223333

    ", + "RevokeSnapshotAccessMessage$SnapshotIdentifier": "

    The identifier of the snapshot that the account can no longer access.

    ", + "RevokeSnapshotAccessMessage$SnapshotClusterIdentifier": "

    The identifier of the cluster the snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

    ", + "RevokeSnapshotAccessMessage$AccountWithRestoreAccess": "

    The identifier of the AWS customer account that can no longer restore the specified snapshot.

    ", + "RotateEncryptionKeyMessage$ClusterIdentifier": "

    The unique identifier of the cluster that you want to rotate the encryption keys for.

    Constraints: Must be the name of valid cluster that has encryption enabled.

    ", + "Snapshot$SnapshotIdentifier": "

    The snapshot identifier that is provided in the request.

    ", + "Snapshot$ClusterIdentifier": "

    The identifier of the cluster for which the snapshot was taken.

    ", + "Snapshot$Status": "

    The snapshot status. The value of the status depends on the API operation used.

    ", + "Snapshot$AvailabilityZone": "

    The Availability Zone in which the cluster was created.

    ", + "Snapshot$MasterUsername": "

    The master user name for the cluster.

    ", + "Snapshot$ClusterVersion": "

    The version ID of the Amazon Redshift engine that is running on the cluster.

    ", + "Snapshot$SnapshotType": "

    The snapshot type. Snapshots created using CreateClusterSnapshot and CopyClusterSnapshot will be of type \"manual\".

    ", + "Snapshot$NodeType": "

    The node type of the nodes in the cluster.

    ", + "Snapshot$DBName": "

    The name of the database that was created when the cluster was created.

    ", + "Snapshot$VpcId": "

    The VPC identifier of the cluster if the snapshot is from a cluster in a VPC. Otherwise, this field is not in the output.

    ", + "Snapshot$KmsKeyId": "

    The AWS Key Management Service (KMS) key ID of the encryption key that was used to encrypt data in the cluster from which the snapshot was taken.

    ", + "Snapshot$OwnerAccount": "

    For manual snapshots, the AWS customer account used to create or copy the snapshot. For automatic snapshots, the owner of the cluster. The owner can perform all snapshot actions, such as sharing a manual snapshot.

    ", + "Snapshot$SourceRegion": "

    The source region from which the snapshot was copied.

    ", + "SnapshotCopyGrant$SnapshotCopyGrantName": "

    The name of the snapshot copy grant.

    ", + "SnapshotCopyGrant$KmsKeyId": "

    The unique identifier of the customer master key (CMK) in AWS KMS to which Amazon Redshift is granted permission.

    ", + "SnapshotCopyGrantMessage$Marker": "

    An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeSnapshotCopyGrant request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

    Constraints: You can specify either the SnapshotCopyGrantName parameter or the Marker parameter, but not both.

    ", + "SnapshotMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "SourceIdsList$member": null, + "Subnet$SubnetIdentifier": "

    The identifier of the subnet.

    ", + "Subnet$SubnetStatus": "

    The status of the subnet.

    ", + "SubnetIdentifierList$member": null, + "Tag$Key": "

    The key, or name, for the resource tag.

    ", + "Tag$Value": "

    The value for the resource tag.

    ", + "TagKeyList$member": null, + "TagValueList$member": null, + "TaggedResource$ResourceName": "

    The Amazon Resource Name (ARN) with which the tag is associated. For example, arn:aws:redshift:us-east-1:123456789:cluster:t1.

    ", + "TaggedResource$ResourceType": "

    The type of resource with which the tag is associated. Valid resource types are:

    • Cluster
    • CIDR/IP
    • EC2 security group
    • Snapshot
    • Cluster security group
    • Subnet group
    • HSM connection
    • HSM certificate
    • Parameter group

    For more information about Amazon Redshift resource types and constructing ARNs, go to Constructing an Amazon Redshift Amazon Resource Name (ARN) in the Amazon Redshift Cluster Management Guide.

    ", + "TaggedResourceListMessage$Marker": "

    A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

    ", + "VpcSecurityGroupIdList$member": null, + "VpcSecurityGroupMembership$VpcSecurityGroupId": null, + "VpcSecurityGroupMembership$Status": null + } + }, + "Subnet": { + "base": "

    Describes a subnet.

    ", + "refs": { + "SubnetList$member": null + } + }, + "SubnetAlreadyInUse": { + "base": "

    A specified subnet is already in use by another cluster.

    ", + "refs": { + } + }, + "SubnetIdentifierList": { + "base": null, + "refs": { + "CreateClusterSubnetGroupMessage$SubnetIds": "

    An array of VPC subnet IDs. A maximum of 20 subnets can be modified in a single request.

    ", + "ModifyClusterSubnetGroupMessage$SubnetIds": "

    An array of VPC subnet IDs. A maximum of 20 subnets can be modified in a single request.

    " + } + }, + "SubnetList": { + "base": null, + "refs": { + "ClusterSubnetGroup$Subnets": "

    A list of the VPC Subnet elements.

    " + } + }, + "SubscriptionAlreadyExistFault": { + "base": "

    There is already an existing event notification subscription with the specified name.

    ", + "refs": { + } + }, + "SubscriptionCategoryNotFoundFault": { + "base": "

    The value specified for the event category was not one of the allowed values, or it specified a category that does not apply to the specified source type. The allowed values are Configuration, Management, Monitoring, and Security.

    ", + "refs": { + } + }, + "SubscriptionEventIdNotFoundFault": { + "base": "

    An Amazon Redshift event with the specified event ID does not exist.

    ", + "refs": { + } + }, + "SubscriptionNotFoundFault": { + "base": "

    An Amazon Redshift event notification subscription with the specified name does not exist.

    ", + "refs": { + } + }, + "SubscriptionSeverityNotFoundFault": { + "base": "

    The value specified for the event severity was not one of the allowed values, or it specified a severity that does not apply to the specified source type. The allowed values are ERROR and INFO.

    ", + "refs": { + } + }, + "TStamp": { + "base": null, + "refs": { + "Cluster$ClusterCreateTime": "

    The date and time that the cluster was created.

    ", + "DescribeClusterSnapshotsMessage$StartTime": "

    A value that requests only snapshots created at or after the specified time. The time value is specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

    Example: 2012-07-16T18:00:00Z

    ", + "DescribeClusterSnapshotsMessage$EndTime": "

    A time value that requests only snapshots created at or before the specified time. The time value is specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

    Example: 2012-07-16T18:00:00Z

    ", + "DescribeEventsMessage$StartTime": "

    The beginning of the time interval to retrieve events for, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

    Example: 2009-07-08T18:00Z

    ", + "DescribeEventsMessage$EndTime": "

    The end of the time interval for which to retrieve events, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

    Example: 2009-07-08T18:00Z

    ", + "Event$Date": "

    The date and time of the event.

    ", + "EventSubscription$SubscriptionCreationTime": "

    The date and time the Amazon Redshift event notification subscription was created.

    ", + "LoggingStatus$LastSuccessfulDeliveryTime": "

    The last time when logs were delivered.

    ", + "LoggingStatus$LastFailureTime": "

    The last time when logs failed to be delivered.

    ", + "ReservedNode$StartTime": "

    The time the reservation started. You purchase a reserved node offering for a duration. This is the start time of that duration.

    ", + "Snapshot$SnapshotCreateTime": "

    The time (UTC) when Amazon Redshift began the snapshot. A snapshot contains a copy of the cluster data as of this exact time.

    ", + "Snapshot$ClusterCreateTime": "

    The time (UTC) when the cluster was originally created.

    " + } + }, + "Tag": { + "base": "

    A tag consisting of a name/value pair for a resource.

    ", + "refs": { + "TagList$member": null, + "TaggedResource$Tag": "

    The tag for the resource.

    " + } + }, + "TagKeyList": { + "base": null, + "refs": { + "DeleteTagsMessage$TagKeys": "

    The tag key that you want to delete.

    ", + "DescribeClusterParameterGroupsMessage$TagKeys": "

    A tag key or keys for which you want to return all matching cluster parameter groups that are associated with the specified key or keys. For example, suppose that you have parameter groups that are tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the parameter groups that have either or both of these tag keys associated with them.

    ", + "DescribeClusterSecurityGroupsMessage$TagKeys": "

    A tag key or keys for which you want to return all matching cluster security groups that are associated with the specified key or keys. For example, suppose that you have security groups that are tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the security groups that have either or both of these tag keys associated with them.

    ", + "DescribeClusterSnapshotsMessage$TagKeys": "

    A tag key or keys for which you want to return all matching cluster snapshots that are associated with the specified key or keys. For example, suppose that you have snapshots that are tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the snapshots that have either or both of these tag keys associated with them.

    ", + "DescribeClusterSubnetGroupsMessage$TagKeys": "

    A tag key or keys for which you want to return all matching cluster subnet groups that are associated with the specified key or keys. For example, suppose that you have subnet groups that are tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the subnet groups that have either or both of these tag keys associated with them.

    ", + "DescribeClustersMessage$TagKeys": "

    A tag key or keys for which you want to return all matching clusters that are associated with the specified key or keys. For example, suppose that you have clusters that are tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the clusters that have either or both of these tag keys associated with them.

    ", + "DescribeHsmClientCertificatesMessage$TagKeys": "

    A tag key or keys for which you want to return all matching HSM client certificates that are associated with the specified key or keys. For example, suppose that you have HSM client certificates that are tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the HSM client certificates that have either or both of these tag keys associated with them.

    ", + "DescribeHsmConfigurationsMessage$TagKeys": "

    A tag key or keys for which you want to return all matching HSM configurations that are associated with the specified key or keys. For example, suppose that you have HSM configurations that are tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the HSM configurations that have either or both of these tag keys associated with them.

    ", + "DescribeSnapshotCopyGrantsMessage$TagKeys": "

    A tag key or keys for which you want to return all matching resources that are associated with the specified key or keys. For example, suppose that you have resources tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with all resources that have either or both of these tag keys associated with them.

    ", + "DescribeTagsMessage$TagKeys": "

    A tag key or keys for which you want to return all matching resources that are associated with the specified key or keys. For example, suppose that you have resources tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with all resources that have either or both of these tag keys associated with them.

    " + } + }, + "TagLimitExceededFault": { + "base": "

    The request exceeds the limit of 10 tags for the resource.

    ", + "refs": { + } + }, + "TagList": { + "base": null, + "refs": { + "Cluster$Tags": "

    The list of tags for the cluster.

    ", + "ClusterParameterGroup$Tags": "

    The list of tags for the cluster parameter group.

    ", + "ClusterSecurityGroup$Tags": "

    The list of tags for the cluster security group.

    ", + "ClusterSubnetGroup$Tags": "

    The list of tags for the cluster subnet group.

    ", + "CreateClusterMessage$Tags": "

    A list of tag instances.

    ", + "CreateClusterParameterGroupMessage$Tags": "

    A list of tag instances.

    ", + "CreateClusterSecurityGroupMessage$Tags": "

    A list of tag instances.

    ", + "CreateClusterSnapshotMessage$Tags": "

    A list of tag instances.

    ", + "CreateClusterSubnetGroupMessage$Tags": "

    A list of tag instances.

    ", + "CreateEventSubscriptionMessage$Tags": "

    A list of tag instances.

    ", + "CreateHsmClientCertificateMessage$Tags": "

    A list of tag instances.

    ", + "CreateHsmConfigurationMessage$Tags": "

    A list of tag instances.

    ", + "CreateSnapshotCopyGrantMessage$Tags": "

    A list of tag instances.

    ", + "CreateTagsMessage$Tags": "

    One or more name/value pairs to add as tags to the specified resource. Each tag name is passed in with the parameter Key and the corresponding value is passed in with the parameter Value. The Key and Value parameters are separated by a comma (,). Separate multiple tags with a space. For example, --tags \"Key\"=\"owner\",\"Value\"=\"admin\" \"Key\"=\"environment\",\"Value\"=\"test\" \"Key\"=\"version\",\"Value\"=\"1.0\".

    ", + "EC2SecurityGroup$Tags": "

    The list of tags for the EC2 security group.

    ", + "EventSubscription$Tags": "

    The list of tags for the event subscription.

    ", + "HsmClientCertificate$Tags": "

    The list of tags for the HSM client certificate.

    ", + "HsmConfiguration$Tags": "

    The list of tags for the HSM configuration.

    ", + "IPRange$Tags": "

    The list of tags for the IP range.

    ", + "Snapshot$Tags": "

    The list of tags for the cluster snapshot.

    ", + "SnapshotCopyGrant$Tags": "

    A list of tag instances.

    " + } + }, + "TagValueList": { + "base": null, + "refs": { + "DescribeClusterParameterGroupsMessage$TagValues": "

    A tag value or values for which you want to return all matching cluster parameter groups that are associated with the specified tag value or values. For example, suppose that you have parameter groups that are tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with the parameter groups that have either or both of these tag values associated with them.

    ", + "DescribeClusterSecurityGroupsMessage$TagValues": "

    A tag value or values for which you want to return all matching cluster security groups that are associated with the specified tag value or values. For example, suppose that you have security groups that are tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with the security groups that have either or both of these tag values associated with them.

    ", + "DescribeClusterSnapshotsMessage$TagValues": "

    A tag value or values for which you want to return all matching cluster snapshots that are associated with the specified tag value or values. For example, suppose that you have snapshots that are tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with the snapshots that have either or both of these tag values associated with them.

    ", + "DescribeClusterSubnetGroupsMessage$TagValues": "

    A tag value or values for which you want to return all matching cluster subnet groups that are associated with the specified tag value or values. For example, suppose that you have subnet groups that are tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with the subnet groups that have either or both of these tag values associated with them.

    ", + "DescribeClustersMessage$TagValues": "

    A tag value or values for which you want to return all matching clusters that are associated with the specified tag value or values. For example, suppose that you have clusters that are tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with the clusters that have either or both of these tag values associated with them.

    ", + "DescribeHsmClientCertificatesMessage$TagValues": "

    A tag value or values for which you want to return all matching HSM client certificates that are associated with the specified tag value or values. For example, suppose that you have HSM client certificates that are tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with the HSM client certificates that have either or both of these tag values associated with them.

    ", + "DescribeHsmConfigurationsMessage$TagValues": "

    A tag value or values for which you want to return all matching HSM configurations that are associated with the specified tag value or values. For example, suppose that you have HSM configurations that are tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with the HSM configurations that have either or both of these tag values associated with them.

    ", + "DescribeSnapshotCopyGrantsMessage$TagValues": "

    A tag value or values for which you want to return all matching resources that are associated with the specified value or values. For example, suppose that you have resources tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with all resources that have either or both of these tag values associated with them.

    ", + "DescribeTagsMessage$TagValues": "

    A tag value or values for which you want to return all matching resources that are associated with the specified value or values. For example, suppose that you have resources tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with all resources that have either or both of these tag values associated with them.

    " + } + }, + "TaggedResource": { + "base": "

    A tag and its associated resource.

    ", + "refs": { + "TaggedResourceList$member": null + } + }, + "TaggedResourceList": { + "base": null, + "refs": { + "TaggedResourceListMessage$TaggedResources": "

    A list of tags with their associated resources.

    " + } + }, + "TaggedResourceListMessage": { + "base": "

    Contains the output from the DescribeTags action.

    ", + "refs": { + } + }, + "UnauthorizedOperation": { + "base": "

    Your account is not authorized to perform the requested operation.

    ", + "refs": { + } + }, + "UnknownSnapshotCopyRegionFault": { + "base": "

    The specified region is incorrect or does not exist.

    ", + "refs": { + } + }, + "UnsupportedOperationFault": { + "base": "

    The requested operation isn't supported.

    ", + "refs": { + } + }, + "UnsupportedOptionFault": { + "base": "

    A request option was specified that is not supported.

    ", + "refs": { + } + }, + "VpcSecurityGroupIdList": { + "base": null, + "refs": { + "CreateClusterMessage$VpcSecurityGroupIds": "

    A list of Virtual Private Cloud (VPC) security groups to be associated with the cluster.

    Default: The default VPC security group is associated with the cluster.

    ", + "ModifyClusterMessage$VpcSecurityGroupIds": "

    A list of virtual private cloud (VPC) security groups to be associated with the cluster.

    ", + "RestoreFromClusterSnapshotMessage$VpcSecurityGroupIds": "

    A list of Virtual Private Cloud (VPC) security groups to be associated with the cluster.

    Default: The default VPC security group is associated with the cluster.

    VPC security groups only apply to clusters in VPCs.

    " + } + }, + "VpcSecurityGroupMembership": { + "base": "

    Describes the members of a VPC security group.

    ", + "refs": { + "VpcSecurityGroupMembershipList$member": null + } + }, + "VpcSecurityGroupMembershipList": { + "base": null, + "refs": { + "Cluster$VpcSecurityGroups": "

    A list of Virtual Private Cloud (VPC) security groups that are associated with the cluster. This parameter is returned only if the cluster is in a VPC.

    " + } + }, + "AuthorizeClusterSecurityGroupIngressResult": { + "base": null, + "refs": { + } + }, + "AuthorizeSnapshotAccessResult": { + "base": null, + "refs": { + } + }, + "CopyClusterSnapshotResult": { + "base": null, + "refs": { + } + }, + "CreateClusterResult": { + "base": null, + "refs": { + } + }, + "CreateClusterParameterGroupResult": { + "base": null, + "refs": { + } + }, + "CreateClusterSecurityGroupResult": { + "base": null, + "refs": { + } + }, + "CreateClusterSnapshotResult": { + "base": null, + "refs": { + } + }, + "CreateClusterSubnetGroupResult": { + "base": null, + "refs": { + } + }, + "CreateEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "CreateHsmClientCertificateResult": { + "base": null, + "refs": { + } + }, + "CreateHsmConfigurationResult": { + "base": null, + "refs": { + } + }, + "CreateSnapshotCopyGrantResult": { + "base": null, + "refs": { + } + }, + "DeleteClusterResult": { + "base": null, + "refs": { + } + }, + "DeleteClusterSnapshotResult": { + "base": null, + "refs": { + } + }, + "DescribeDefaultClusterParametersResult": { + "base": null, + "refs": { + } + }, + "DisableSnapshotCopyResult": { + "base": null, + "refs": { + } + }, + "EnableSnapshotCopyResult": { + "base": null, + "refs": { + } + }, + "ModifyClusterResult": { + "base": null, + "refs": { + } + }, + "ModifyClusterSubnetGroupResult": { + "base": null, + "refs": { + } + }, + "ModifyEventSubscriptionResult": { + "base": null, + "refs": { + } + }, + "ModifySnapshotCopyRetentionPeriodResult": { + "base": null, + "refs": { + } + }, + "PurchaseReservedNodeOfferingResult": { + "base": null, + "refs": { + } + }, + "RebootClusterResult": { + "base": null, + "refs": { + } + }, + "RestoreFromClusterSnapshotResult": { + "base": null, + "refs": { + } + }, + "RevokeClusterSecurityGroupIngressResult": { + "base": null, + "refs": { + } + }, + "RevokeSnapshotAccessResult": { + "base": null, + "refs": { + } + }, + "RotateEncryptionKeyResult": { + "base": null, + "refs": { + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,94 @@ +{ + "pagination": { + "DescribeClusterParameterGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ParameterGroups" + }, + "DescribeClusterParameters": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Parameters" + }, + "DescribeClusterSecurityGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ClusterSecurityGroups" + }, + "DescribeClusterSnapshots": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Snapshots" + }, + "DescribeClusterSubnetGroups": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ClusterSubnetGroups" + }, + "DescribeClusterVersions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ClusterVersions" + }, + "DescribeClusters": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Clusters" + }, + "DescribeDefaultClusterParameters": { + "input_token": "Marker", + "output_token": "DefaultClusterParameters.Marker", + "limit_key": "MaxRecords", + "result_key": "DefaultClusterParameters.Parameters" + }, + "DescribeEventSubscriptions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "EventSubscriptionsList" + }, + "DescribeEvents": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "Events" + }, + "DescribeHsmClientCertificates": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "HsmClientCertificates" + }, + "DescribeHsmConfigurations": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "HsmConfigurations" + }, + "DescribeOrderableClusterOptions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "OrderableClusterOptions" + }, + "DescribeReservedNodeOfferings": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ReservedNodeOfferings" + }, + "DescribeReservedNodes": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords", + "result_key": "ReservedNodes" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/waiters-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/waiters-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/waiters-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/waiters-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,97 @@ +{ + "version": 2, + "waiters": { + "ClusterAvailable": { + "delay": 60, + "operation": "DescribeClusters", + "maxAttempts": 30, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "Clusters[].ClusterStatus" + }, + { + "expected": "deleting", + "matcher": "pathAny", + "state": "failure", + "argument": "Clusters[].ClusterStatus" + }, + { + "expected": "ClusterNotFound", + "matcher": "error", + "state": "retry" + } + ] + }, + "ClusterDeleted": { + "delay": 60, + "operation": "DescribeClusters", + "maxAttempts": 30, + "acceptors": [ + { + "expected": "ClusterNotFound", + "matcher": "error", + "state": "success" + }, + { + "expected": "creating", + "matcher": "pathAny", + "state": "failure", + "argument": "Clusters[].ClusterStatus" + }, + { + "expected": "pathAny", + "matcher": "pathList", + "state": "failure", + "argument": "Clusters[].ClusterStatus" + } + ] + }, + "ClusterRestored": { + "operation": "DescribeClusters", + "maxAttempts": 30, + "delay": 60, + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "Clusters[].RestoreStatus.Status", + "expected": "completed" + }, + { + "state": "failure", + "matcher": "pathAny", + "argument": "Clusters[].ClusterStatus", + "expected": "deleting" + } + ] + }, + "SnapshotAvailable": { + "delay": 15, + "operation": "DescribeClusterSnapshots", + "maxAttempts": 20, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "Snapshots[].Status" + }, + { + "expected": "failed", + "matcher": "pathAny", + "state": "failure", + "argument": "Snapshots[].Status" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "Snapshots[].Status" + } + ] + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,3067 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2013-04-01", + "endpointPrefix":"route53", + "globalEndpoint":"route53.amazonaws.com", + "protocol":"rest-xml", + "serviceAbbreviation":"Route 53", + "serviceFullName":"Amazon Route 53", + "signatureVersion":"v4" + }, + "operations":{ + "AssociateVPCWithHostedZone":{ + "name":"AssociateVPCWithHostedZone", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/hostedzone/{Id}/associatevpc" + }, + "input":{ + "shape":"AssociateVPCWithHostedZoneRequest", + "locationName":"AssociateVPCWithHostedZoneRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"AssociateVPCWithHostedZoneResponse"}, + "errors":[ + {"shape":"NoSuchHostedZone"}, + {"shape":"InvalidVPCId"}, + {"shape":"InvalidInput"}, + {"shape":"PublicZoneVPCAssociation"}, + {"shape":"ConflictingDomainExists"}, + {"shape":"LimitsExceeded"} + ] + }, + "ChangeResourceRecordSets":{ + "name":"ChangeResourceRecordSets", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/hostedzone/{Id}/rrset/" + }, + "input":{ + "shape":"ChangeResourceRecordSetsRequest", + "locationName":"ChangeResourceRecordSetsRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"ChangeResourceRecordSetsResponse"}, + "errors":[ + {"shape":"NoSuchHostedZone"}, + {"shape":"NoSuchHealthCheck"}, + {"shape":"InvalidChangeBatch"}, + {"shape":"InvalidInput"}, + {"shape":"PriorRequestNotComplete"} + ] + }, + "ChangeTagsForResource":{ + "name":"ChangeTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/tags/{ResourceType}/{ResourceId}" + }, + "input":{ + "shape":"ChangeTagsForResourceRequest", + "locationName":"ChangeTagsForResourceRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"ChangeTagsForResourceResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"NoSuchHealthCheck"}, + {"shape":"NoSuchHostedZone"}, + {"shape":"PriorRequestNotComplete"}, + {"shape":"ThrottlingException"} + ] + }, + "CreateHealthCheck":{ + "name":"CreateHealthCheck", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/healthcheck", + "responseCode":201 + }, + "input":{ + "shape":"CreateHealthCheckRequest", + "locationName":"CreateHealthCheckRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"CreateHealthCheckResponse"}, + "errors":[ + {"shape":"TooManyHealthChecks"}, + {"shape":"HealthCheckAlreadyExists"}, + {"shape":"InvalidInput"} + ] + }, + "CreateHostedZone":{ + "name":"CreateHostedZone", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/hostedzone", + "responseCode":201 + }, + "input":{ + "shape":"CreateHostedZoneRequest", + "locationName":"CreateHostedZoneRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"CreateHostedZoneResponse"}, + "errors":[ + {"shape":"InvalidDomainName"}, + {"shape":"HostedZoneAlreadyExists"}, + {"shape":"TooManyHostedZones"}, + {"shape":"InvalidVPCId"}, + {"shape":"InvalidInput"}, + {"shape":"DelegationSetNotAvailable"}, + {"shape":"ConflictingDomainExists"}, + {"shape":"NoSuchDelegationSet"}, + {"shape":"DelegationSetNotReusable"} + ] + }, + "CreateReusableDelegationSet":{ + "name":"CreateReusableDelegationSet", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/delegationset", + "responseCode":201 + }, + "input":{ + "shape":"CreateReusableDelegationSetRequest", + "locationName":"CreateReusableDelegationSetRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"CreateReusableDelegationSetResponse"}, + "errors":[ + {"shape":"DelegationSetAlreadyCreated"}, + {"shape":"LimitsExceeded"}, + {"shape":"HostedZoneNotFound"}, + {"shape":"InvalidArgument"}, + {"shape":"InvalidInput"}, + {"shape":"DelegationSetNotAvailable"}, + {"shape":"DelegationSetAlreadyReusable"} + ] + }, + "CreateTrafficPolicy":{ + "name":"CreateTrafficPolicy", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/trafficpolicy", + "responseCode":201 + }, + "input":{ + "shape":"CreateTrafficPolicyRequest", + "locationName":"CreateTrafficPolicyRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"CreateTrafficPolicyResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"TooManyTrafficPolicies"}, + {"shape":"TrafficPolicyAlreadyExists"}, + {"shape":"InvalidTrafficPolicyDocument"} + ] + }, + "CreateTrafficPolicyInstance":{ + "name":"CreateTrafficPolicyInstance", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/trafficpolicyinstance", + "responseCode":201 + }, + "input":{ + "shape":"CreateTrafficPolicyInstanceRequest", + "locationName":"CreateTrafficPolicyInstanceRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"CreateTrafficPolicyInstanceResponse"}, + "errors":[ + {"shape":"NoSuchHostedZone"}, + {"shape":"InvalidInput"}, + {"shape":"TooManyTrafficPolicyInstances"}, + {"shape":"NoSuchTrafficPolicy"}, + {"shape":"TrafficPolicyInstanceAlreadyExists"} + ] + }, + "CreateTrafficPolicyVersion":{ + "name":"CreateTrafficPolicyVersion", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/trafficpolicy/{Id}", + "responseCode":201 + }, + "input":{ + "shape":"CreateTrafficPolicyVersionRequest", + "locationName":"CreateTrafficPolicyVersionRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"CreateTrafficPolicyVersionResponse"}, + "errors":[ + {"shape":"NoSuchTrafficPolicy"}, + {"shape":"InvalidInput"}, + {"shape":"ConcurrentModification"}, + {"shape":"InvalidTrafficPolicyDocument"} + ] + }, + "DeleteHealthCheck":{ + "name":"DeleteHealthCheck", + "http":{ + "method":"DELETE", + "requestUri":"/2013-04-01/healthcheck/{HealthCheckId}" + }, + "input":{"shape":"DeleteHealthCheckRequest"}, + "output":{"shape":"DeleteHealthCheckResponse"}, + "errors":[ + {"shape":"NoSuchHealthCheck"}, + {"shape":"HealthCheckInUse"}, + {"shape":"InvalidInput"} + ] + }, + "DeleteHostedZone":{ + "name":"DeleteHostedZone", + "http":{ + "method":"DELETE", + "requestUri":"/2013-04-01/hostedzone/{Id}" + }, + "input":{"shape":"DeleteHostedZoneRequest"}, + "output":{"shape":"DeleteHostedZoneResponse"}, + "errors":[ + {"shape":"NoSuchHostedZone"}, + {"shape":"HostedZoneNotEmpty"}, + {"shape":"PriorRequestNotComplete"}, + {"shape":"InvalidInput"} + ] + }, + "DeleteReusableDelegationSet":{ + "name":"DeleteReusableDelegationSet", + "http":{ + "method":"DELETE", + "requestUri":"/2013-04-01/delegationset/{Id}" + }, + "input":{"shape":"DeleteReusableDelegationSetRequest"}, + "output":{"shape":"DeleteReusableDelegationSetResponse"}, + "errors":[ + {"shape":"NoSuchDelegationSet"}, + {"shape":"DelegationSetInUse"}, + {"shape":"DelegationSetNotReusable"}, + {"shape":"InvalidInput"} + ] + }, + "DeleteTrafficPolicy":{ + "name":"DeleteTrafficPolicy", + "http":{ + "method":"DELETE", + "requestUri":"/2013-04-01/trafficpolicy/{Id}/{Version}" + }, + "input":{"shape":"DeleteTrafficPolicyRequest"}, + "output":{"shape":"DeleteTrafficPolicyResponse"}, + "errors":[ + {"shape":"NoSuchTrafficPolicy"}, + {"shape":"InvalidInput"}, + {"shape":"TrafficPolicyInUse"}, + {"shape":"ConcurrentModification"} + ] + }, + "DeleteTrafficPolicyInstance":{ + "name":"DeleteTrafficPolicyInstance", + "http":{ + "method":"DELETE", + "requestUri":"/2013-04-01/trafficpolicyinstance/{Id}" + }, + "input":{"shape":"DeleteTrafficPolicyInstanceRequest"}, + "output":{"shape":"DeleteTrafficPolicyInstanceResponse"}, + "errors":[ + {"shape":"NoSuchTrafficPolicyInstance"}, + {"shape":"InvalidInput"}, + {"shape":"PriorRequestNotComplete"} + ] + }, + "DisassociateVPCFromHostedZone":{ + "name":"DisassociateVPCFromHostedZone", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/hostedzone/{Id}/disassociatevpc" + }, + "input":{ + "shape":"DisassociateVPCFromHostedZoneRequest", + "locationName":"DisassociateVPCFromHostedZoneRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"DisassociateVPCFromHostedZoneResponse"}, + "errors":[ + {"shape":"NoSuchHostedZone"}, + {"shape":"InvalidVPCId"}, + {"shape":"VPCAssociationNotFound"}, + {"shape":"LastVPCAssociation"}, + {"shape":"InvalidInput"} + ] + }, + "GetChange":{ + "name":"GetChange", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/change/{Id}" + }, + "input":{"shape":"GetChangeRequest"}, + "output":{"shape":"GetChangeResponse"}, + "errors":[ + {"shape":"NoSuchChange"}, + {"shape":"InvalidInput"} + ] + }, + "GetChangeDetails":{ + "name":"GetChangeDetails", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/changedetails/{Id}" + }, + "input":{"shape":"GetChangeDetailsRequest"}, + "output":{"shape":"GetChangeDetailsResponse"}, + "errors":[ + {"shape":"NoSuchChange"}, + {"shape":"InvalidInput"} + ] + }, + "GetCheckerIpRanges":{ + "name":"GetCheckerIpRanges", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/checkeripranges" + }, + "input":{"shape":"GetCheckerIpRangesRequest"}, + "output":{"shape":"GetCheckerIpRangesResponse"} + }, + "GetGeoLocation":{ + "name":"GetGeoLocation", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/geolocation" + }, + "input":{"shape":"GetGeoLocationRequest"}, + "output":{"shape":"GetGeoLocationResponse"}, + "errors":[ + {"shape":"NoSuchGeoLocation"}, + {"shape":"InvalidInput"} + ] + }, + "GetHealthCheck":{ + "name":"GetHealthCheck", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/healthcheck/{HealthCheckId}" + }, + "input":{"shape":"GetHealthCheckRequest"}, + "output":{"shape":"GetHealthCheckResponse"}, + "errors":[ + {"shape":"NoSuchHealthCheck"}, + {"shape":"InvalidInput"}, + {"shape":"IncompatibleVersion"} + ] + }, + "GetHealthCheckCount":{ + "name":"GetHealthCheckCount", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/healthcheckcount" + }, + "input":{"shape":"GetHealthCheckCountRequest"}, + "output":{"shape":"GetHealthCheckCountResponse"} + }, + "GetHealthCheckLastFailureReason":{ + "name":"GetHealthCheckLastFailureReason", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/healthcheck/{HealthCheckId}/lastfailurereason" + }, + "input":{"shape":"GetHealthCheckLastFailureReasonRequest"}, + "output":{"shape":"GetHealthCheckLastFailureReasonResponse"}, + "errors":[ + {"shape":"NoSuchHealthCheck"}, + {"shape":"InvalidInput"} + ] + }, + "GetHealthCheckStatus":{ + "name":"GetHealthCheckStatus", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/healthcheck/{HealthCheckId}/status" + }, + "input":{"shape":"GetHealthCheckStatusRequest"}, + "output":{"shape":"GetHealthCheckStatusResponse"}, + "errors":[ + {"shape":"NoSuchHealthCheck"}, + {"shape":"InvalidInput"} + ] + }, + "GetHostedZone":{ + "name":"GetHostedZone", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/hostedzone/{Id}" + }, + "input":{"shape":"GetHostedZoneRequest"}, + "output":{"shape":"GetHostedZoneResponse"}, + "errors":[ + {"shape":"NoSuchHostedZone"}, + {"shape":"InvalidInput"} + ] + }, + "GetHostedZoneCount":{ + "name":"GetHostedZoneCount", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/hostedzonecount" + }, + "input":{"shape":"GetHostedZoneCountRequest"}, + "output":{"shape":"GetHostedZoneCountResponse"}, + "errors":[ + {"shape":"InvalidInput"} + ] + }, + "GetReusableDelegationSet":{ + "name":"GetReusableDelegationSet", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/delegationset/{Id}" + }, + "input":{"shape":"GetReusableDelegationSetRequest"}, + "output":{"shape":"GetReusableDelegationSetResponse"}, + "errors":[ + {"shape":"NoSuchDelegationSet"}, + {"shape":"DelegationSetNotReusable"}, + {"shape":"InvalidInput"} + ] + }, + "GetTrafficPolicy":{ + "name":"GetTrafficPolicy", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/trafficpolicy/{Id}/{Version}" + }, + "input":{"shape":"GetTrafficPolicyRequest"}, + "output":{"shape":"GetTrafficPolicyResponse"}, + "errors":[ + {"shape":"NoSuchTrafficPolicy"}, + {"shape":"InvalidInput"} + ] + }, + "GetTrafficPolicyInstance":{ + "name":"GetTrafficPolicyInstance", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/trafficpolicyinstance/{Id}" + }, + "input":{"shape":"GetTrafficPolicyInstanceRequest"}, + "output":{"shape":"GetTrafficPolicyInstanceResponse"}, + "errors":[ + {"shape":"NoSuchTrafficPolicyInstance"}, + {"shape":"InvalidInput"} + ] + }, + "GetTrafficPolicyInstanceCount":{ + "name":"GetTrafficPolicyInstanceCount", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/trafficpolicyinstancecount" + }, + "input":{"shape":"GetTrafficPolicyInstanceCountRequest"}, + "output":{"shape":"GetTrafficPolicyInstanceCountResponse"} + }, + "ListChangeBatchesByHostedZone":{ + "name":"ListChangeBatchesByHostedZone", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/hostedzone/{Id}/changes" + }, + "input":{"shape":"ListChangeBatchesByHostedZoneRequest"}, + "output":{"shape":"ListChangeBatchesByHostedZoneResponse"}, + "errors":[ + {"shape":"NoSuchHostedZone"}, + {"shape":"InvalidInput"} + ] + }, + "ListChangeBatchesByRRSet":{ + "name":"ListChangeBatchesByRRSet", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/hostedzone/{Id}/rrsChanges" + }, + "input":{"shape":"ListChangeBatchesByRRSetRequest"}, + "output":{"shape":"ListChangeBatchesByRRSetResponse"}, + "errors":[ + {"shape":"NoSuchHostedZone"}, + {"shape":"InvalidInput"} + ] + }, + "ListGeoLocations":{ + "name":"ListGeoLocations", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/geolocations" + }, + "input":{"shape":"ListGeoLocationsRequest"}, + "output":{"shape":"ListGeoLocationsResponse"}, + "errors":[ + {"shape":"InvalidInput"} + ] + }, + "ListHealthChecks":{ + "name":"ListHealthChecks", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/healthcheck" + }, + "input":{"shape":"ListHealthChecksRequest"}, + "output":{"shape":"ListHealthChecksResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"IncompatibleVersion"} + ] + }, + "ListHostedZones":{ + "name":"ListHostedZones", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/hostedzone" + }, + "input":{"shape":"ListHostedZonesRequest"}, + "output":{"shape":"ListHostedZonesResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"NoSuchDelegationSet"}, + {"shape":"DelegationSetNotReusable"} + ] + }, + "ListHostedZonesByName":{ + "name":"ListHostedZonesByName", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/hostedzonesbyname" + }, + "input":{"shape":"ListHostedZonesByNameRequest"}, + "output":{"shape":"ListHostedZonesByNameResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"InvalidDomainName"} + ] + }, + "ListResourceRecordSets":{ + "name":"ListResourceRecordSets", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/hostedzone/{Id}/rrset" + }, + "input":{"shape":"ListResourceRecordSetsRequest"}, + "output":{"shape":"ListResourceRecordSetsResponse"}, + "errors":[ + {"shape":"NoSuchHostedZone"}, + {"shape":"InvalidInput"} + ] + }, + "ListReusableDelegationSets":{ + "name":"ListReusableDelegationSets", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/delegationset" + }, + "input":{"shape":"ListReusableDelegationSetsRequest"}, + "output":{"shape":"ListReusableDelegationSetsResponse"}, + "errors":[ + {"shape":"InvalidInput"} + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/tags/{ResourceType}/{ResourceId}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"NoSuchHealthCheck"}, + {"shape":"NoSuchHostedZone"}, + {"shape":"PriorRequestNotComplete"}, + {"shape":"ThrottlingException"} + ] + }, + "ListTagsForResources":{ + "name":"ListTagsForResources", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/tags/{ResourceType}" + }, + "input":{ + "shape":"ListTagsForResourcesRequest", + "locationName":"ListTagsForResourcesRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"ListTagsForResourcesResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"NoSuchHealthCheck"}, + {"shape":"NoSuchHostedZone"}, + {"shape":"PriorRequestNotComplete"}, + {"shape":"ThrottlingException"} + ] + }, + "ListTrafficPolicies":{ + "name":"ListTrafficPolicies", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/trafficpolicies" + }, + "input":{"shape":"ListTrafficPoliciesRequest"}, + "output":{"shape":"ListTrafficPoliciesResponse"}, + "errors":[ + {"shape":"InvalidInput"} + ] + }, + "ListTrafficPolicyInstances":{ + "name":"ListTrafficPolicyInstances", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/trafficpolicyinstances" + }, + "input":{"shape":"ListTrafficPolicyInstancesRequest"}, + "output":{"shape":"ListTrafficPolicyInstancesResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"NoSuchTrafficPolicyInstance"} + ] + }, + "ListTrafficPolicyInstancesByHostedZone":{ + "name":"ListTrafficPolicyInstancesByHostedZone", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/trafficpolicyinstances/hostedzone" + }, + "input":{"shape":"ListTrafficPolicyInstancesByHostedZoneRequest"}, + "output":{"shape":"ListTrafficPolicyInstancesByHostedZoneResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"NoSuchTrafficPolicyInstance"}, + {"shape":"NoSuchHostedZone"} + ] + }, + "ListTrafficPolicyInstancesByPolicy":{ + "name":"ListTrafficPolicyInstancesByPolicy", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/trafficpolicyinstances/trafficpolicy" + }, + "input":{"shape":"ListTrafficPolicyInstancesByPolicyRequest"}, + "output":{"shape":"ListTrafficPolicyInstancesByPolicyResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"NoSuchTrafficPolicyInstance"}, + {"shape":"NoSuchTrafficPolicy"} + ] + }, + "ListTrafficPolicyVersions":{ + "name":"ListTrafficPolicyVersions", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/trafficpolicies/{Id}/versions" + }, + "input":{"shape":"ListTrafficPolicyVersionsRequest"}, + "output":{"shape":"ListTrafficPolicyVersionsResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"NoSuchTrafficPolicy"} + ] + }, + "UpdateHealthCheck":{ + "name":"UpdateHealthCheck", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/healthcheck/{HealthCheckId}" + }, + "input":{ + "shape":"UpdateHealthCheckRequest", + "locationName":"UpdateHealthCheckRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"UpdateHealthCheckResponse"}, + "errors":[ + {"shape":"NoSuchHealthCheck"}, + {"shape":"InvalidInput"}, + {"shape":"HealthCheckVersionMismatch"} + ] + }, + "UpdateHostedZoneComment":{ + "name":"UpdateHostedZoneComment", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/hostedzone/{Id}" + }, + "input":{ + "shape":"UpdateHostedZoneCommentRequest", + "locationName":"UpdateHostedZoneCommentRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"UpdateHostedZoneCommentResponse"}, + "errors":[ + {"shape":"NoSuchHostedZone"}, + {"shape":"InvalidInput"} + ] + }, + "UpdateTrafficPolicyComment":{ + "name":"UpdateTrafficPolicyComment", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/trafficpolicy/{Id}/{Version}" + }, + "input":{ + "shape":"UpdateTrafficPolicyCommentRequest", + "locationName":"UpdateTrafficPolicyCommentRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"UpdateTrafficPolicyCommentResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"NoSuchTrafficPolicy"}, + {"shape":"ConcurrentModification"} + ] + }, + "UpdateTrafficPolicyInstance":{ + "name":"UpdateTrafficPolicyInstance", + "http":{ + "method":"POST", + "requestUri":"/2013-04-01/trafficpolicyinstance/{Id}" + }, + "input":{ + "shape":"UpdateTrafficPolicyInstanceRequest", + "locationName":"UpdateTrafficPolicyInstanceRequest", + "xmlNamespace":{"uri":"https://route53.amazonaws.com/doc/2013-04-01/"} + }, + "output":{"shape":"UpdateTrafficPolicyInstanceResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"NoSuchTrafficPolicy"}, + {"shape":"NoSuchTrafficPolicyInstance"}, + {"shape":"PriorRequestNotComplete"}, + {"shape":"ConflictingTypes"} + ] + } + }, + "shapes":{ + "AWSAccountID":{"type":"string"}, + "AliasHealthEnabled":{"type":"boolean"}, + "AliasTarget":{ + "type":"structure", + "required":[ + "HostedZoneId", + "DNSName", + "EvaluateTargetHealth" + ], + "members":{ + "HostedZoneId":{"shape":"ResourceId"}, + "DNSName":{"shape":"DNSName"}, + "EvaluateTargetHealth":{"shape":"AliasHealthEnabled"} + } + }, + "AssociateVPCComment":{"type":"string"}, + "AssociateVPCWithHostedZoneRequest":{ + "type":"structure", + "required":[ + "HostedZoneId", + "VPC" + ], + "members":{ + "HostedZoneId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"Id" + }, + "VPC":{"shape":"VPC"}, + "Comment":{"shape":"AssociateVPCComment"} + } + }, + "AssociateVPCWithHostedZoneResponse":{ + "type":"structure", + "required":["ChangeInfo"], + "members":{ + "ChangeInfo":{"shape":"ChangeInfo"} + } + }, + "Change":{ + "type":"structure", + "required":[ + "Action", + "ResourceRecordSet" + ], + "members":{ + "Action":{"shape":"ChangeAction"}, + "ResourceRecordSet":{"shape":"ResourceRecordSet"} + } + }, + "ChangeAction":{ + "type":"string", + "enum":[ + "CREATE", + "DELETE", + "UPSERT" + ] + }, + "ChangeBatch":{ + "type":"structure", + "required":["Changes"], + "members":{ + "Comment":{"shape":"ResourceDescription"}, + "Changes":{"shape":"Changes"} + } + }, + "ChangeBatchRecord":{ + "type":"structure", + "required":[ + "Id", + "Status" + ], + "members":{ + "Id":{"shape":"ResourceId"}, + "SubmittedAt":{"shape":"TimeStamp"}, + "Status":{"shape":"ChangeStatus"}, + "Comment":{"shape":"ResourceDescription"}, + "Submitter":{"shape":"AWSAccountID"}, + "Changes":{"shape":"Changes"} + } + }, + "ChangeBatchRecords":{ + "type":"list", + "member":{ + "shape":"ChangeBatchRecord", + "locationName":"ChangeBatchRecord" + }, + "min":1 + }, + "ChangeInfo":{ + "type":"structure", + "required":[ + "Id", + "Status", + "SubmittedAt" + ], + "members":{ + "Id":{"shape":"ResourceId"}, + "Status":{"shape":"ChangeStatus"}, + "SubmittedAt":{"shape":"TimeStamp"}, + "Comment":{"shape":"ResourceDescription"} + } + }, + "ChangeResourceRecordSetsRequest":{ + "type":"structure", + "required":[ + "HostedZoneId", + "ChangeBatch" + ], + "members":{ + "HostedZoneId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"Id" + }, + "ChangeBatch":{"shape":"ChangeBatch"} + } + }, + "ChangeResourceRecordSetsResponse":{ + "type":"structure", + "required":["ChangeInfo"], + "members":{ + "ChangeInfo":{"shape":"ChangeInfo"} + } + }, + "ChangeStatus":{ + "type":"string", + "enum":[ + "PENDING", + "INSYNC" + ] + }, + "ChangeTagsForResourceRequest":{ + "type":"structure", + "required":[ + "ResourceType", + "ResourceId" + ], + "members":{ + "ResourceType":{ + "shape":"TagResourceType", + "location":"uri", + "locationName":"ResourceType" + }, + "ResourceId":{ + "shape":"TagResourceId", + "location":"uri", + "locationName":"ResourceId" + }, + "AddTags":{"shape":"TagList"}, + "RemoveTagKeys":{"shape":"TagKeyList"} + } + }, + "ChangeTagsForResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "Changes":{ + "type":"list", + "member":{ + "shape":"Change", + "locationName":"Change" + }, + "min":1 + }, + "CheckerIpRanges":{ + "type":"list", + "member":{"shape":"IPAddressCidr"} + }, + "ChildHealthCheckList":{ + "type":"list", + "member":{ + "shape":"HealthCheckId", + "locationName":"ChildHealthCheck" + }, + "max":256 + }, + "ConcurrentModification":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "ConflictingDomainExists":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ConflictingTypes":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "CreateHealthCheckRequest":{ + "type":"structure", + "required":[ + "CallerReference", + "HealthCheckConfig" + ], + "members":{ + "CallerReference":{"shape":"HealthCheckNonce"}, + "HealthCheckConfig":{"shape":"HealthCheckConfig"} + } + }, + "CreateHealthCheckResponse":{ + "type":"structure", + "required":[ + "HealthCheck", + "Location" + ], + "members":{ + "HealthCheck":{"shape":"HealthCheck"}, + "Location":{ + "shape":"ResourceURI", + "location":"header", + "locationName":"Location" + } + } + }, + "CreateHostedZoneRequest":{ + "type":"structure", + "required":[ + "Name", + "CallerReference" + ], + "members":{ + "Name":{"shape":"DNSName"}, + "VPC":{"shape":"VPC"}, + "CallerReference":{"shape":"Nonce"}, + "HostedZoneConfig":{"shape":"HostedZoneConfig"}, + "DelegationSetId":{"shape":"ResourceId"} + } + }, + "CreateHostedZoneResponse":{ + "type":"structure", + "required":[ + "HostedZone", + "ChangeInfo", + "DelegationSet", + "Location" + ], + "members":{ + "HostedZone":{"shape":"HostedZone"}, + "ChangeInfo":{"shape":"ChangeInfo"}, + "DelegationSet":{"shape":"DelegationSet"}, + "VPC":{"shape":"VPC"}, + "Location":{ + "shape":"ResourceURI", + "location":"header", + "locationName":"Location" + } + } + }, + "CreateReusableDelegationSetRequest":{ + "type":"structure", + "required":["CallerReference"], + "members":{ + "CallerReference":{"shape":"Nonce"}, + "HostedZoneId":{"shape":"ResourceId"} + } + }, + "CreateReusableDelegationSetResponse":{ + "type":"structure", + "required":[ + "DelegationSet", + "Location" + ], + "members":{ + "DelegationSet":{"shape":"DelegationSet"}, + "Location":{ + "shape":"ResourceURI", + "location":"header", + "locationName":"Location" + } + } + }, + "CreateTrafficPolicyInstanceRequest":{ + "type":"structure", + "required":[ + "HostedZoneId", + "Name", + "TTL", + "TrafficPolicyId", + "TrafficPolicyVersion" + ], + "members":{ + "HostedZoneId":{"shape":"ResourceId"}, + "Name":{"shape":"DNSName"}, + "TTL":{"shape":"TTL"}, + "TrafficPolicyId":{"shape":"TrafficPolicyId"}, + "TrafficPolicyVersion":{"shape":"TrafficPolicyVersion"} + } + }, + "CreateTrafficPolicyInstanceResponse":{ + "type":"structure", + "required":[ + "TrafficPolicyInstance", + "Location" + ], + "members":{ + "TrafficPolicyInstance":{"shape":"TrafficPolicyInstance"}, + "Location":{ + "shape":"ResourceURI", + "location":"header", + "locationName":"Location" + } + } + }, + "CreateTrafficPolicyRequest":{ + "type":"structure", + "required":[ + "Name", + "Document" + ], + "members":{ + "Name":{"shape":"TrafficPolicyName"}, + "Document":{"shape":"TrafficPolicyDocument"}, + "Comment":{"shape":"TrafficPolicyComment"} + } + }, + "CreateTrafficPolicyResponse":{ + "type":"structure", + "required":[ + "TrafficPolicy", + "Location" + ], + "members":{ + "TrafficPolicy":{"shape":"TrafficPolicy"}, + "Location":{ + "shape":"ResourceURI", + "location":"header", + "locationName":"Location" + } + } + }, + "CreateTrafficPolicyVersionRequest":{ + "type":"structure", + "required":[ + "Id", + "Document" + ], + "members":{ + "Id":{ + "shape":"TrafficPolicyId", + "location":"uri", + "locationName":"Id" + }, + "Document":{"shape":"TrafficPolicyDocument"}, + "Comment":{"shape":"TrafficPolicyComment"} + } + }, + "CreateTrafficPolicyVersionResponse":{ + "type":"structure", + "required":[ + "TrafficPolicy", + "Location" + ], + "members":{ + "TrafficPolicy":{"shape":"TrafficPolicy"}, + "Location":{ + "shape":"ResourceURI", + "location":"header", + "locationName":"Location" + } + } + }, + "DNSName":{ + "type":"string", + "max":1024 + }, + "Date":{ + "type":"string", + "max":256 + }, + "DelegationSet":{ + "type":"structure", + "required":["NameServers"], + "members":{ + "Id":{"shape":"ResourceId"}, + "CallerReference":{"shape":"Nonce"}, + "NameServers":{"shape":"DelegationSetNameServers"} + } + }, + "DelegationSetAlreadyCreated":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "DelegationSetAlreadyReusable":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "DelegationSetInUse":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "DelegationSetNameServers":{ + "type":"list", + "member":{ + "shape":"DNSName", + "locationName":"NameServer" + }, + "min":1 + }, + "DelegationSetNotAvailable":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "DelegationSetNotReusable":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "DelegationSets":{ + "type":"list", + "member":{ + "shape":"DelegationSet", + "locationName":"DelegationSet" + } + }, + "DeleteHealthCheckRequest":{ + "type":"structure", + "required":["HealthCheckId"], + "members":{ + "HealthCheckId":{ + "shape":"HealthCheckId", + "location":"uri", + "locationName":"HealthCheckId" + } + } + }, + "DeleteHealthCheckResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteHostedZoneRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"Id" + } + } + }, + "DeleteHostedZoneResponse":{ + "type":"structure", + "required":["ChangeInfo"], + "members":{ + "ChangeInfo":{"shape":"ChangeInfo"} + } + }, + "DeleteReusableDelegationSetRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"Id" + } + } + }, + "DeleteReusableDelegationSetResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteTrafficPolicyInstanceRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"TrafficPolicyInstanceId", + "location":"uri", + "locationName":"Id" + } + } + }, + "DeleteTrafficPolicyInstanceResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteTrafficPolicyRequest":{ + "type":"structure", + "required":[ + "Id", + "Version" + ], + "members":{ + "Id":{ + "shape":"TrafficPolicyId", + "location":"uri", + "locationName":"Id" + }, + "Version":{ + "shape":"TrafficPolicyVersion", + "location":"uri", + "locationName":"Version" + } + } + }, + "DeleteTrafficPolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "DisassociateVPCComment":{"type":"string"}, + "DisassociateVPCFromHostedZoneRequest":{ + "type":"structure", + "required":[ + "HostedZoneId", + "VPC" + ], + "members":{ + "HostedZoneId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"Id" + }, + "VPC":{"shape":"VPC"}, + "Comment":{"shape":"DisassociateVPCComment"} + } + }, + "DisassociateVPCFromHostedZoneResponse":{ + "type":"structure", + "required":["ChangeInfo"], + "members":{ + "ChangeInfo":{"shape":"ChangeInfo"} + } + }, + "ErrorMessage":{"type":"string"}, + "ErrorMessages":{ + "type":"list", + "member":{ + "shape":"ErrorMessage", + "locationName":"Message" + } + }, + "FailureThreshold":{ + "type":"integer", + "max":10, + "min":1 + }, + "FullyQualifiedDomainName":{ + "type":"string", + "max":255 + }, + "GeoLocation":{ + "type":"structure", + "members":{ + "ContinentCode":{"shape":"GeoLocationContinentCode"}, + "CountryCode":{"shape":"GeoLocationCountryCode"}, + "SubdivisionCode":{"shape":"GeoLocationSubdivisionCode"} + } + }, + "GeoLocationContinentCode":{ + "type":"string", + "max":2, + "min":2 + }, + "GeoLocationContinentName":{ + "type":"string", + "max":32, + "min":1 + }, + "GeoLocationCountryCode":{ + "type":"string", + "max":2, + "min":1 + }, + "GeoLocationCountryName":{ + "type":"string", + "max":64, + "min":1 + }, + "GeoLocationDetails":{ + "type":"structure", + "members":{ + "ContinentCode":{"shape":"GeoLocationContinentCode"}, + "ContinentName":{"shape":"GeoLocationContinentName"}, + "CountryCode":{"shape":"GeoLocationCountryCode"}, + "CountryName":{"shape":"GeoLocationCountryName"}, + "SubdivisionCode":{"shape":"GeoLocationSubdivisionCode"}, + "SubdivisionName":{"shape":"GeoLocationSubdivisionName"} + } + }, + "GeoLocationDetailsList":{ + "type":"list", + "member":{ + "shape":"GeoLocationDetails", + "locationName":"GeoLocationDetails" + } + }, + "GeoLocationSubdivisionCode":{ + "type":"string", + "max":3, + "min":1 + }, + "GeoLocationSubdivisionName":{ + "type":"string", + "max":64, + "min":1 + }, + "GetChangeDetailsRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetChangeDetailsResponse":{ + "type":"structure", + "required":["ChangeBatchRecord"], + "members":{ + "ChangeBatchRecord":{"shape":"ChangeBatchRecord"} + } + }, + "GetChangeRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetChangeResponse":{ + "type":"structure", + "required":["ChangeInfo"], + "members":{ + "ChangeInfo":{"shape":"ChangeInfo"} + } + }, + "GetCheckerIpRangesRequest":{ + "type":"structure", + "members":{ + } + }, + "GetCheckerIpRangesResponse":{ + "type":"structure", + "required":["CheckerIpRanges"], + "members":{ + "CheckerIpRanges":{"shape":"CheckerIpRanges"} + } + }, + "GetGeoLocationRequest":{ + "type":"structure", + "members":{ + "ContinentCode":{ + "shape":"GeoLocationContinentCode", + "location":"querystring", + "locationName":"continentcode" + }, + "CountryCode":{ + "shape":"GeoLocationCountryCode", + "location":"querystring", + "locationName":"countrycode" + }, + "SubdivisionCode":{ + "shape":"GeoLocationSubdivisionCode", + "location":"querystring", + "locationName":"subdivisioncode" + } + } + }, + "GetGeoLocationResponse":{ + "type":"structure", + "required":["GeoLocationDetails"], + "members":{ + "GeoLocationDetails":{"shape":"GeoLocationDetails"} + } + }, + "GetHealthCheckCountRequest":{ + "type":"structure", + "members":{ + } + }, + "GetHealthCheckCountResponse":{ + "type":"structure", + "required":["HealthCheckCount"], + "members":{ + "HealthCheckCount":{"shape":"HealthCheckCount"} + } + }, + "GetHealthCheckLastFailureReasonRequest":{ + "type":"structure", + "required":["HealthCheckId"], + "members":{ + "HealthCheckId":{ + "shape":"HealthCheckId", + "location":"uri", + "locationName":"HealthCheckId" + } + } + }, + "GetHealthCheckLastFailureReasonResponse":{ + "type":"structure", + "required":["HealthCheckObservations"], + "members":{ + "HealthCheckObservations":{"shape":"HealthCheckObservations"} + } + }, + "GetHealthCheckRequest":{ + "type":"structure", + "required":["HealthCheckId"], + "members":{ + "HealthCheckId":{ + "shape":"HealthCheckId", + "location":"uri", + "locationName":"HealthCheckId" + } + } + }, + "GetHealthCheckResponse":{ + "type":"structure", + "required":["HealthCheck"], + "members":{ + "HealthCheck":{"shape":"HealthCheck"} + } + }, + "GetHealthCheckStatusRequest":{ + "type":"structure", + "required":["HealthCheckId"], + "members":{ + "HealthCheckId":{ + "shape":"HealthCheckId", + "location":"uri", + "locationName":"HealthCheckId" + } + } + }, + "GetHealthCheckStatusResponse":{ + "type":"structure", + "required":["HealthCheckObservations"], + "members":{ + "HealthCheckObservations":{"shape":"HealthCheckObservations"} + } + }, + "GetHostedZoneCountRequest":{ + "type":"structure", + "members":{ + } + }, + "GetHostedZoneCountResponse":{ + "type":"structure", + "required":["HostedZoneCount"], + "members":{ + "HostedZoneCount":{"shape":"HostedZoneCount"} + } + }, + "GetHostedZoneRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetHostedZoneResponse":{ + "type":"structure", + "required":["HostedZone"], + "members":{ + "HostedZone":{"shape":"HostedZone"}, + "DelegationSet":{"shape":"DelegationSet"}, + "VPCs":{"shape":"VPCs"} + } + }, + "GetReusableDelegationSetRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetReusableDelegationSetResponse":{ + "type":"structure", + "required":["DelegationSet"], + "members":{ + "DelegationSet":{"shape":"DelegationSet"} + } + }, + "GetTrafficPolicyInstanceCountRequest":{ + "type":"structure", + "members":{ + } + }, + "GetTrafficPolicyInstanceCountResponse":{ + "type":"structure", + "required":["TrafficPolicyInstanceCount"], + "members":{ + "TrafficPolicyInstanceCount":{"shape":"TrafficPolicyInstanceCount"} + } + }, + "GetTrafficPolicyInstanceRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"TrafficPolicyInstanceId", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetTrafficPolicyInstanceResponse":{ + "type":"structure", + "required":["TrafficPolicyInstance"], + "members":{ + "TrafficPolicyInstance":{"shape":"TrafficPolicyInstance"} + } + }, + "GetTrafficPolicyRequest":{ + "type":"structure", + "required":[ + "Id", + "Version" + ], + "members":{ + "Id":{ + "shape":"TrafficPolicyId", + "location":"uri", + "locationName":"Id" + }, + "Version":{ + "shape":"TrafficPolicyVersion", + "location":"uri", + "locationName":"Version" + } + } + }, + "GetTrafficPolicyResponse":{ + "type":"structure", + "required":["TrafficPolicy"], + "members":{ + "TrafficPolicy":{"shape":"TrafficPolicy"} + } + }, + "HealthCheck":{ + "type":"structure", + "required":[ + "Id", + "CallerReference", + "HealthCheckConfig", + "HealthCheckVersion" + ], + "members":{ + "Id":{"shape":"HealthCheckId"}, + "CallerReference":{"shape":"HealthCheckNonce"}, + "HealthCheckConfig":{"shape":"HealthCheckConfig"}, + "HealthCheckVersion":{"shape":"HealthCheckVersion"} + } + }, + "HealthCheckAlreadyExists":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "HealthCheckConfig":{ + "type":"structure", + "required":["Type"], + "members":{ + "IPAddress":{"shape":"IPAddress"}, + "Port":{"shape":"Port"}, + "Type":{"shape":"HealthCheckType"}, + "ResourcePath":{"shape":"ResourcePath"}, + "FullyQualifiedDomainName":{"shape":"FullyQualifiedDomainName"}, + "SearchString":{"shape":"SearchString"}, + "RequestInterval":{"shape":"RequestInterval"}, + "FailureThreshold":{"shape":"FailureThreshold"}, + "MeasureLatency":{"shape":"MeasureLatency"}, + "Inverted":{"shape":"Inverted"}, + "HealthThreshold":{"shape":"HealthThreshold"}, + "ChildHealthChecks":{"shape":"ChildHealthCheckList"} + } + }, + "HealthCheckCount":{"type":"long"}, + "HealthCheckId":{ + "type":"string", + "max":64 + }, + "HealthCheckInUse":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "HealthCheckNonce":{ + "type":"string", + "max":64, + "min":1 + }, + "HealthCheckObservation":{ + "type":"structure", + "members":{ + "IPAddress":{"shape":"IPAddress"}, + "StatusReport":{"shape":"StatusReport"} + } + }, + "HealthCheckObservations":{ + "type":"list", + "member":{ + "shape":"HealthCheckObservation", + "locationName":"HealthCheckObservation" + } + }, + "HealthCheckType":{ + "type":"string", + "enum":[ + "HTTP", + "HTTPS", + "HTTP_STR_MATCH", + "HTTPS_STR_MATCH", + "TCP", + "CALCULATED" + ] + }, + "HealthCheckVersion":{ + "type":"long", + "min":1 + }, + "HealthCheckVersionMismatch":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "HealthChecks":{ + "type":"list", + "member":{ + "shape":"HealthCheck", + "locationName":"HealthCheck" + } + }, + "HealthThreshold":{ + "type":"integer", + "max":256, + "min":0 + }, + "HostedZone":{ + "type":"structure", + "required":[ + "Id", + "Name", + "CallerReference" + ], + "members":{ + "Id":{"shape":"ResourceId"}, + "Name":{"shape":"DNSName"}, + "CallerReference":{"shape":"Nonce"}, + "Config":{"shape":"HostedZoneConfig"}, + "ResourceRecordSetCount":{"shape":"HostedZoneRRSetCount"} + } + }, + "HostedZoneAlreadyExists":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "HostedZoneConfig":{ + "type":"structure", + "members":{ + "Comment":{"shape":"ResourceDescription"}, + "PrivateZone":{"shape":"IsPrivateZone"} + } + }, + "HostedZoneCount":{"type":"long"}, + "HostedZoneNotEmpty":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "HostedZoneNotFound":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "HostedZoneRRSetCount":{"type":"long"}, + "HostedZones":{ + "type":"list", + "member":{ + "shape":"HostedZone", + "locationName":"HostedZone" + } + }, + "IPAddress":{ + "type":"string", + "max":15, + "pattern":"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$" + }, + "IPAddressCidr":{"type":"string"}, + "IncompatibleVersion":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidArgument":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "InvalidChangeBatch":{ + "type":"structure", + "members":{ + "messages":{"shape":"ErrorMessages"} + }, + "exception":true + }, + "InvalidDomainName":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidInput":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidTrafficPolicyDocument":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidVPCId":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "Inverted":{"type":"boolean"}, + "IsPrivateZone":{"type":"boolean"}, + "LastVPCAssociation":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "LimitsExceeded":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ListChangeBatchesByHostedZoneRequest":{ + "type":"structure", + "required":[ + "HostedZoneId", + "StartDate", + "EndDate" + ], + "members":{ + "HostedZoneId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"Id" + }, + "StartDate":{ + "shape":"Date", + "location":"querystring", + "locationName":"startDate" + }, + "EndDate":{ + "shape":"Date", + "location":"querystring", + "locationName":"endDate" + }, + "MaxItems":{ + "shape":"PageMaxItems", + "location":"querystring", + "locationName":"maxItems" + }, + "Marker":{ + "shape":"PageMarker", + "location":"querystring", + "locationName":"marker" + } + } + }, + "ListChangeBatchesByHostedZoneResponse":{ + "type":"structure", + "required":[ + "MaxItems", + "Marker", + "ChangeBatchRecords" + ], + "members":{ + "MaxItems":{"shape":"PageMaxItems"}, + "Marker":{"shape":"PageMarker"}, + "IsTruncated":{"shape":"PageTruncated"}, + "ChangeBatchRecords":{"shape":"ChangeBatchRecords"}, + "NextMarker":{"shape":"PageMarker"} + } + }, + "ListChangeBatchesByRRSetRequest":{ + "type":"structure", + "required":[ + "HostedZoneId", + "Name", + "Type", + "StartDate", + "EndDate" + ], + "members":{ + "HostedZoneId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"Id" + }, + "Name":{ + "shape":"DNSName", + "location":"querystring", + "locationName":"rrSet_name" + }, + "Type":{ + "shape":"RRType", + "location":"querystring", + "locationName":"type" + }, + "SetIdentifier":{ + "shape":"ResourceRecordSetIdentifier", + "location":"querystring", + "locationName":"identifier" + }, + "StartDate":{ + "shape":"Date", + "location":"querystring", + "locationName":"startDate" + }, + "EndDate":{ + "shape":"Date", + "location":"querystring", + "locationName":"endDate" + }, + "MaxItems":{ + "shape":"PageMaxItems", + "location":"querystring", + "locationName":"maxItems" + }, + "Marker":{ + "shape":"PageMarker", + "location":"querystring", + "locationName":"marker" + } + } + }, + "ListChangeBatchesByRRSetResponse":{ + "type":"structure", + "required":[ + "MaxItems", + "Marker", + "ChangeBatchRecords" + ], + "members":{ + "MaxItems":{"shape":"PageMaxItems"}, + "Marker":{"shape":"PageMarker"}, + "IsTruncated":{"shape":"PageTruncated"}, + "ChangeBatchRecords":{"shape":"ChangeBatchRecords"}, + "NextMarker":{"shape":"PageMarker"} + } + }, + "ListGeoLocationsRequest":{ + "type":"structure", + "members":{ + "StartContinentCode":{ + "shape":"GeoLocationContinentCode", + "location":"querystring", + "locationName":"startcontinentcode" + }, + "StartCountryCode":{ + "shape":"GeoLocationCountryCode", + "location":"querystring", + "locationName":"startcountrycode" + }, + "StartSubdivisionCode":{ + "shape":"GeoLocationSubdivisionCode", + "location":"querystring", + "locationName":"startsubdivisioncode" + }, + "MaxItems":{ + "shape":"PageMaxItems", + "location":"querystring", + "locationName":"maxitems" + } + } + }, + "ListGeoLocationsResponse":{ + "type":"structure", + "required":[ + "GeoLocationDetailsList", + "IsTruncated", + "MaxItems" + ], + "members":{ + "GeoLocationDetailsList":{"shape":"GeoLocationDetailsList"}, + "IsTruncated":{"shape":"PageTruncated"}, + "NextContinentCode":{"shape":"GeoLocationContinentCode"}, + "NextCountryCode":{"shape":"GeoLocationCountryCode"}, + "NextSubdivisionCode":{"shape":"GeoLocationSubdivisionCode"}, + "MaxItems":{"shape":"PageMaxItems"} + } + }, + "ListHealthChecksRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"PageMarker", + "location":"querystring", + "locationName":"marker" + }, + "MaxItems":{ + "shape":"PageMaxItems", + "location":"querystring", + "locationName":"maxitems" + } + } + }, + "ListHealthChecksResponse":{ + "type":"structure", + "required":[ + "HealthChecks", + "Marker", + "IsTruncated", + "MaxItems" + ], + "members":{ + "HealthChecks":{"shape":"HealthChecks"}, + "Marker":{"shape":"PageMarker"}, + "IsTruncated":{"shape":"PageTruncated"}, + "NextMarker":{"shape":"PageMarker"}, + "MaxItems":{"shape":"PageMaxItems"} + } + }, + "ListHostedZonesByNameRequest":{ + "type":"structure", + "members":{ + "DNSName":{ + "shape":"DNSName", + "location":"querystring", + "locationName":"dnsname" + }, + "HostedZoneId":{ + "shape":"ResourceId", + "location":"querystring", + "locationName":"hostedzoneid" + }, + "MaxItems":{ + "shape":"PageMaxItems", + "location":"querystring", + "locationName":"maxitems" + } + } + }, + "ListHostedZonesByNameResponse":{ + "type":"structure", + "required":[ + "HostedZones", + "IsTruncated", + "MaxItems" + ], + "members":{ + "HostedZones":{"shape":"HostedZones"}, + "DNSName":{"shape":"DNSName"}, + "HostedZoneId":{"shape":"ResourceId"}, + "IsTruncated":{"shape":"PageTruncated"}, + "NextDNSName":{"shape":"DNSName"}, + "NextHostedZoneId":{"shape":"ResourceId"}, + "MaxItems":{"shape":"PageMaxItems"} + } + }, + "ListHostedZonesRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"PageMarker", + "location":"querystring", + "locationName":"marker" + }, + "MaxItems":{ + "shape":"PageMaxItems", + "location":"querystring", + "locationName":"maxitems" + }, + "DelegationSetId":{ + "shape":"ResourceId", + "location":"querystring", + "locationName":"delegationsetid" + } + } + }, + "ListHostedZonesResponse":{ + "type":"structure", + "required":[ + "HostedZones", + "Marker", + "IsTruncated", + "MaxItems" + ], + "members":{ + "HostedZones":{"shape":"HostedZones"}, + "Marker":{"shape":"PageMarker"}, + "IsTruncated":{"shape":"PageTruncated"}, + "NextMarker":{"shape":"PageMarker"}, + "MaxItems":{"shape":"PageMaxItems"} + } + }, + "ListResourceRecordSetsRequest":{ + "type":"structure", + "required":["HostedZoneId"], + "members":{ + "HostedZoneId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"Id" + }, + "StartRecordName":{ + "shape":"DNSName", + "location":"querystring", + "locationName":"name" + }, + "StartRecordType":{ + "shape":"RRType", + "location":"querystring", + "locationName":"type" + }, + "StartRecordIdentifier":{ + "shape":"ResourceRecordSetIdentifier", + "location":"querystring", + "locationName":"identifier" + }, + "MaxItems":{ + "shape":"PageMaxItems", + "location":"querystring", + "locationName":"maxitems" + } + } + }, + "ListResourceRecordSetsResponse":{ + "type":"structure", + "required":[ + "ResourceRecordSets", + "IsTruncated", + "MaxItems" + ], + "members":{ + "ResourceRecordSets":{"shape":"ResourceRecordSets"}, + "IsTruncated":{"shape":"PageTruncated"}, + "NextRecordName":{"shape":"DNSName"}, + "NextRecordType":{"shape":"RRType"}, + "NextRecordIdentifier":{"shape":"ResourceRecordSetIdentifier"}, + "MaxItems":{"shape":"PageMaxItems"} + } + }, + "ListReusableDelegationSetsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"PageMarker", + "location":"querystring", + "locationName":"marker" + }, + "MaxItems":{ + "shape":"PageMaxItems", + "location":"querystring", + "locationName":"maxitems" + } + } + }, + "ListReusableDelegationSetsResponse":{ + "type":"structure", + "required":[ + "DelegationSets", + "Marker", + "IsTruncated", + "MaxItems" + ], + "members":{ + "DelegationSets":{"shape":"DelegationSets"}, + "Marker":{"shape":"PageMarker"}, + "IsTruncated":{"shape":"PageTruncated"}, + "NextMarker":{"shape":"PageMarker"}, + "MaxItems":{"shape":"PageMaxItems"} + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":[ + "ResourceType", + "ResourceId" + ], + "members":{ + "ResourceType":{ + "shape":"TagResourceType", + "location":"uri", + "locationName":"ResourceType" + }, + "ResourceId":{ + "shape":"TagResourceId", + "location":"uri", + "locationName":"ResourceId" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "required":["ResourceTagSet"], + "members":{ + "ResourceTagSet":{"shape":"ResourceTagSet"} + } + }, + "ListTagsForResourcesRequest":{ + "type":"structure", + "required":[ + "ResourceType", + "ResourceIds" + ], + "members":{ + "ResourceType":{ + "shape":"TagResourceType", + "location":"uri", + "locationName":"ResourceType" + }, + "ResourceIds":{"shape":"TagResourceIdList"} + } + }, + "ListTagsForResourcesResponse":{ + "type":"structure", + "required":["ResourceTagSets"], + "members":{ + "ResourceTagSets":{"shape":"ResourceTagSetList"} + } + }, + "ListTrafficPoliciesRequest":{ + "type":"structure", + "members":{ + "TrafficPolicyIdMarker":{ + "shape":"TrafficPolicyId", + "location":"querystring", + "locationName":"trafficpolicyid" + }, + "MaxItems":{ + "shape":"PageMaxItems", + "location":"querystring", + "locationName":"maxitems" + } + } + }, + "ListTrafficPoliciesResponse":{ + "type":"structure", + "required":[ + "TrafficPolicySummaries", + "IsTruncated", + "TrafficPolicyIdMarker", + "MaxItems" + ], + "members":{ + "TrafficPolicySummaries":{"shape":"TrafficPolicySummaries"}, + "IsTruncated":{"shape":"PageTruncated"}, + "TrafficPolicyIdMarker":{"shape":"TrafficPolicyId"}, + "MaxItems":{"shape":"PageMaxItems"} + } + }, + "ListTrafficPolicyInstancesByHostedZoneRequest":{ + "type":"structure", + "required":["HostedZoneId"], + "members":{ + "HostedZoneId":{ + "shape":"ResourceId", + "location":"querystring", + "locationName":"id" + }, + "TrafficPolicyInstanceNameMarker":{ + "shape":"DNSName", + "location":"querystring", + "locationName":"trafficpolicyinstancename" + }, + "TrafficPolicyInstanceTypeMarker":{ + "shape":"RRType", + "location":"querystring", + "locationName":"trafficpolicyinstancetype" + }, + "MaxItems":{ + "shape":"PageMaxItems", + "location":"querystring", + "locationName":"maxitems" + } + } + }, + "ListTrafficPolicyInstancesByHostedZoneResponse":{ + "type":"structure", + "required":[ + "TrafficPolicyInstances", + "IsTruncated", + "MaxItems" + ], + "members":{ + "TrafficPolicyInstances":{"shape":"TrafficPolicyInstances"}, + "TrafficPolicyInstanceNameMarker":{"shape":"DNSName"}, + "TrafficPolicyInstanceTypeMarker":{"shape":"RRType"}, + "IsTruncated":{"shape":"PageTruncated"}, + "MaxItems":{"shape":"PageMaxItems"} + } + }, + "ListTrafficPolicyInstancesByPolicyRequest":{ + "type":"structure", + "required":[ + "TrafficPolicyId", + "TrafficPolicyVersion" + ], + "members":{ + "TrafficPolicyId":{ + "shape":"TrafficPolicyId", + "location":"querystring", + "locationName":"id" + }, + "TrafficPolicyVersion":{ + "shape":"TrafficPolicyVersion", + "location":"querystring", + "locationName":"version" + }, + "HostedZoneIdMarker":{ + "shape":"ResourceId", + "location":"querystring", + "locationName":"hostedzoneid" + }, + "TrafficPolicyInstanceNameMarker":{ + "shape":"DNSName", + "location":"querystring", + "locationName":"trafficpolicyinstancename" + }, + "TrafficPolicyInstanceTypeMarker":{ + "shape":"RRType", + "location":"querystring", + "locationName":"trafficpolicyinstancetype" + }, + "MaxItems":{ + "shape":"PageMaxItems", + "location":"querystring", + "locationName":"maxitems" + } + } + }, + "ListTrafficPolicyInstancesByPolicyResponse":{ + "type":"structure", + "required":[ + "TrafficPolicyInstances", + "IsTruncated", + "MaxItems" + ], + "members":{ + "TrafficPolicyInstances":{"shape":"TrafficPolicyInstances"}, + "HostedZoneIdMarker":{"shape":"ResourceId"}, + "TrafficPolicyInstanceNameMarker":{"shape":"DNSName"}, + "TrafficPolicyInstanceTypeMarker":{"shape":"RRType"}, + "IsTruncated":{"shape":"PageTruncated"}, + "MaxItems":{"shape":"PageMaxItems"} + } + }, + "ListTrafficPolicyInstancesRequest":{ + "type":"structure", + "members":{ + "HostedZoneIdMarker":{ + "shape":"ResourceId", + "location":"querystring", + "locationName":"hostedzoneid" + }, + "TrafficPolicyInstanceNameMarker":{ + "shape":"DNSName", + "location":"querystring", + "locationName":"trafficpolicyinstancename" + }, + "TrafficPolicyInstanceTypeMarker":{ + "shape":"RRType", + "location":"querystring", + "locationName":"trafficpolicyinstancetype" + }, + "MaxItems":{ + "shape":"PageMaxItems", + "location":"querystring", + "locationName":"maxitems" + } + } + }, + "ListTrafficPolicyInstancesResponse":{ + "type":"structure", + "required":[ + "TrafficPolicyInstances", + "IsTruncated", + "MaxItems" + ], + "members":{ + "TrafficPolicyInstances":{"shape":"TrafficPolicyInstances"}, + "HostedZoneIdMarker":{"shape":"ResourceId"}, + "TrafficPolicyInstanceNameMarker":{"shape":"DNSName"}, + "TrafficPolicyInstanceTypeMarker":{"shape":"RRType"}, + "IsTruncated":{"shape":"PageTruncated"}, + "MaxItems":{"shape":"PageMaxItems"} + } + }, + "ListTrafficPolicyVersionsRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"TrafficPolicyId", + "location":"uri", + "locationName":"Id" + }, + "TrafficPolicyVersionMarker":{ + "shape":"TrafficPolicyVersionMarker", + "location":"querystring", + "locationName":"trafficpolicyversion" + }, + "MaxItems":{ + "shape":"PageMaxItems", + "location":"querystring", + "locationName":"maxitems" + } + } + }, + "ListTrafficPolicyVersionsResponse":{ + "type":"structure", + "required":[ + "TrafficPolicies", + "IsTruncated", + "TrafficPolicyVersionMarker", + "MaxItems" + ], + "members":{ + "TrafficPolicies":{"shape":"TrafficPolicies"}, + "IsTruncated":{"shape":"PageTruncated"}, + "TrafficPolicyVersionMarker":{"shape":"TrafficPolicyVersionMarker"}, + "MaxItems":{"shape":"PageMaxItems"} + } + }, + "MeasureLatency":{"type":"boolean"}, + "Message":{ + "type":"string", + "max":1024 + }, + "NoSuchChange":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchDelegationSet":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "NoSuchGeoLocation":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchHealthCheck":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchHostedZone":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchTrafficPolicy":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "NoSuchTrafficPolicyInstance":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "Nonce":{ + "type":"string", + "max":128, + "min":1 + }, + "PageMarker":{ + "type":"string", + "max":64 + }, + "PageMaxItems":{"type":"string"}, + "PageTruncated":{"type":"boolean"}, + "Port":{ + "type":"integer", + "max":65535, + "min":1 + }, + "PriorRequestNotComplete":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "PublicZoneVPCAssociation":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "RData":{ + "type":"string", + "max":4000 + }, + "RRType":{ + "type":"string", + "enum":[ + "SOA", + "A", + "TXT", + "NS", + "CNAME", + "MX", + "PTR", + "SRV", + "SPF", + "AAAA" + ] + }, + "RequestInterval":{ + "type":"integer", + "max":30, + "min":10 + }, + "ResourceDescription":{ + "type":"string", + "max":256 + }, + "ResourceId":{ + "type":"string", + "max":32 + }, + "ResourcePath":{ + "type":"string", + "max":255 + }, + "ResourceRecord":{ + "type":"structure", + "required":["Value"], + "members":{ + "Value":{"shape":"RData"} + } + }, + "ResourceRecordSet":{ + "type":"structure", + "required":[ + "Name", + "Type" + ], + "members":{ + "Name":{"shape":"DNSName"}, + "Type":{"shape":"RRType"}, + "SetIdentifier":{"shape":"ResourceRecordSetIdentifier"}, + "Weight":{"shape":"ResourceRecordSetWeight"}, + "Region":{"shape":"ResourceRecordSetRegion"}, + "GeoLocation":{"shape":"GeoLocation"}, + "Failover":{"shape":"ResourceRecordSetFailover"}, + "TTL":{"shape":"TTL"}, + "ResourceRecords":{"shape":"ResourceRecords"}, + "AliasTarget":{"shape":"AliasTarget"}, + "HealthCheckId":{"shape":"HealthCheckId"}, + "TrafficPolicyInstanceId":{"shape":"TrafficPolicyInstanceId"} + } + }, + "ResourceRecordSetFailover":{ + "type":"string", + "enum":[ + "PRIMARY", + "SECONDARY" + ] + }, + "ResourceRecordSetIdentifier":{ + "type":"string", + "max":128, + "min":1 + }, + "ResourceRecordSetRegion":{ + "type":"string", + "enum":[ + "us-east-1", + "us-west-1", + "us-west-2", + "eu-west-1", + "eu-central-1", + "ap-southeast-1", + "ap-southeast-2", + "ap-northeast-1", + "sa-east-1", + "cn-north-1" + ], + "max":64, + "min":1 + }, + "ResourceRecordSetWeight":{ + "type":"long", + "max":255, + "min":0 + }, + "ResourceRecordSets":{ + "type":"list", + "member":{ + "shape":"ResourceRecordSet", + "locationName":"ResourceRecordSet" + } + }, + "ResourceRecords":{ + "type":"list", + "member":{ + "shape":"ResourceRecord", + "locationName":"ResourceRecord" + }, + "min":1 + }, + "ResourceTagSet":{ + "type":"structure", + "members":{ + "ResourceType":{"shape":"TagResourceType"}, + "ResourceId":{"shape":"TagResourceId"}, + "Tags":{"shape":"TagList"} + } + }, + "ResourceTagSetList":{ + "type":"list", + "member":{ + "shape":"ResourceTagSet", + "locationName":"ResourceTagSet" + } + }, + "ResourceURI":{ + "type":"string", + "max":1024 + }, + "SearchString":{ + "type":"string", + "max":255 + }, + "Status":{"type":"string"}, + "StatusReport":{ + "type":"structure", + "members":{ + "Status":{"shape":"Status"}, + "CheckedTime":{"shape":"TimeStamp"} + } + }, + "TTL":{ + "type":"long", + "max":2147483647, + "min":0 + }, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{ + "type":"string", + "max":128 + }, + "TagKeyList":{ + "type":"list", + "member":{ + "shape":"TagKey", + "locationName":"Key" + }, + "max":10, + "min":1 + }, + "TagList":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"Tag" + }, + "max":10, + "min":1 + }, + "TagResourceId":{ + "type":"string", + "max":64 + }, + "TagResourceIdList":{ + "type":"list", + "member":{ + "shape":"TagResourceId", + "locationName":"ResourceId" + }, + "max":10, + "min":1 + }, + "TagResourceType":{ + "type":"string", + "enum":[ + "healthcheck", + "hostedzone" + ] + }, + "TagValue":{ + "type":"string", + "max":256 + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TimeStamp":{"type":"timestamp"}, + "TooManyHealthChecks":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "TooManyHostedZones":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyTrafficPolicies":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyTrafficPolicyInstances":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TrafficPolicies":{ + "type":"list", + "member":{ + "shape":"TrafficPolicy", + "locationName":"TrafficPolicy" + } + }, + "TrafficPolicy":{ + "type":"structure", + "required":[ + "Id", + "Version", + "Name", + "Type", + "Document" + ], + "members":{ + "Id":{"shape":"TrafficPolicyId"}, + "Version":{"shape":"TrafficPolicyVersion"}, + "Name":{"shape":"TrafficPolicyName"}, + "Type":{"shape":"RRType"}, + "Document":{"shape":"TrafficPolicyDocument"}, + "Comment":{"shape":"TrafficPolicyComment"} + } + }, + "TrafficPolicyAlreadyExists":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "TrafficPolicyComment":{ + "type":"string", + "max":1024 + }, + "TrafficPolicyDocument":{ + "type":"string", + "max":102400 + }, + "TrafficPolicyId":{ + "type":"string", + "max":36 + }, + "TrafficPolicyInUse":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "TrafficPolicyInstance":{ + "type":"structure", + "required":[ + "Id", + "HostedZoneId", + "Name", + "TTL", + "State", + "Message", + "TrafficPolicyId", + "TrafficPolicyVersion", + "TrafficPolicyType" + ], + "members":{ + "Id":{"shape":"TrafficPolicyInstanceId"}, + "HostedZoneId":{"shape":"ResourceId"}, + "Name":{"shape":"DNSName"}, + "TTL":{"shape":"TTL"}, + "State":{"shape":"TrafficPolicyInstanceState"}, + "Message":{"shape":"Message"}, + "TrafficPolicyId":{"shape":"TrafficPolicyId"}, + "TrafficPolicyVersion":{"shape":"TrafficPolicyVersion"}, + "TrafficPolicyType":{"shape":"RRType"} + } + }, + "TrafficPolicyInstanceAlreadyExists":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "TrafficPolicyInstanceCount":{"type":"integer"}, + "TrafficPolicyInstanceId":{ + "type":"string", + "max":36 + }, + "TrafficPolicyInstanceState":{"type":"string"}, + "TrafficPolicyInstances":{ + "type":"list", + "member":{ + "shape":"TrafficPolicyInstance", + "locationName":"TrafficPolicyInstance" + } + }, + "TrafficPolicyName":{ + "type":"string", + "max":512 + }, + "TrafficPolicySummaries":{ + "type":"list", + "member":{ + "shape":"TrafficPolicySummary", + "locationName":"TrafficPolicySummary" + } + }, + "TrafficPolicySummary":{ + "type":"structure", + "required":[ + "Id", + "Name", + "Type", + "LatestVersion", + "TrafficPolicyCount" + ], + "members":{ + "Id":{"shape":"TrafficPolicyId"}, + "Name":{"shape":"TrafficPolicyName"}, + "Type":{"shape":"RRType"}, + "LatestVersion":{"shape":"TrafficPolicyVersion"}, + "TrafficPolicyCount":{"shape":"TrafficPolicyVersion"} + } + }, + "TrafficPolicyVersion":{ + "type":"integer", + "max":1000, + "min":1 + }, + "TrafficPolicyVersionMarker":{ + "type":"string", + "max":4 + }, + "UpdateHealthCheckRequest":{ + "type":"structure", + "required":["HealthCheckId"], + "members":{ + "HealthCheckId":{ + "shape":"HealthCheckId", + "location":"uri", + "locationName":"HealthCheckId" + }, + "HealthCheckVersion":{"shape":"HealthCheckVersion"}, + "IPAddress":{"shape":"IPAddress"}, + "Port":{"shape":"Port"}, + "ResourcePath":{"shape":"ResourcePath"}, + "FullyQualifiedDomainName":{"shape":"FullyQualifiedDomainName"}, + "SearchString":{"shape":"SearchString"}, + "FailureThreshold":{"shape":"FailureThreshold"}, + "Inverted":{"shape":"Inverted"}, + "HealthThreshold":{"shape":"HealthThreshold"}, + "ChildHealthChecks":{"shape":"ChildHealthCheckList"} + } + }, + "UpdateHealthCheckResponse":{ + "type":"structure", + "required":["HealthCheck"], + "members":{ + "HealthCheck":{"shape":"HealthCheck"} + } + }, + "UpdateHostedZoneCommentRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"Id" + }, + "Comment":{"shape":"ResourceDescription"} + } + }, + "UpdateHostedZoneCommentResponse":{ + "type":"structure", + "required":["HostedZone"], + "members":{ + "HostedZone":{"shape":"HostedZone"} + } + }, + "UpdateTrafficPolicyCommentRequest":{ + "type":"structure", + "required":[ + "Id", + "Version", + "Comment" + ], + "members":{ + "Id":{ + "shape":"TrafficPolicyId", + "location":"uri", + "locationName":"Id" + }, + "Version":{ + "shape":"TrafficPolicyVersion", + "location":"uri", + "locationName":"Version" + }, + "Comment":{"shape":"TrafficPolicyComment"} + } + }, + "UpdateTrafficPolicyCommentResponse":{ + "type":"structure", + "required":["TrafficPolicy"], + "members":{ + "TrafficPolicy":{"shape":"TrafficPolicy"} + } + }, + "UpdateTrafficPolicyInstanceRequest":{ + "type":"structure", + "required":[ + "Id", + "TTL", + "TrafficPolicyId", + "TrafficPolicyVersion" + ], + "members":{ + "Id":{ + "shape":"TrafficPolicyInstanceId", + "location":"uri", + "locationName":"Id" + }, + "TTL":{"shape":"TTL"}, + "TrafficPolicyId":{"shape":"TrafficPolicyId"}, + "TrafficPolicyVersion":{"shape":"TrafficPolicyVersion"} + } + }, + "UpdateTrafficPolicyInstanceResponse":{ + "type":"structure", + "required":["TrafficPolicyInstance"], + "members":{ + "TrafficPolicyInstance":{"shape":"TrafficPolicyInstance"} + } + }, + "VPC":{ + "type":"structure", + "members":{ + "VPCRegion":{"shape":"VPCRegion"}, + "VPCId":{"shape":"VPCId"} + } + }, + "VPCAssociationNotFound":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "VPCId":{ + "type":"string", + "max":1024 + }, + "VPCRegion":{ + "type":"string", + "enum":[ + "us-east-1", + "us-west-1", + "us-west-2", + "eu-west-1", + "eu-central-1", + "ap-southeast-1", + "ap-southeast-2", + "ap-northeast-1", + "sa-east-1", + "cn-north-1" + ], + "max":64, + "min":1 + }, + "VPCs":{ + "type":"list", + "member":{ + "shape":"VPC", + "locationName":"VPC" + }, + "min":1 + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1680 @@ +{ + "version": "2.0", + "service": null, + "operations": { + "AssociateVPCWithHostedZone": "

    This action associates a VPC with an hosted zone.

    To associate a VPC with an hosted zone, send a POST request to the 2013-04-01/hostedzone/hosted zone ID/associatevpc resource. The request body must include an XML document with a AssociateVPCWithHostedZoneRequest element. The response returns the AssociateVPCWithHostedZoneResponse element that contains ChangeInfo for you to track the progress of the AssociateVPCWithHostedZoneRequest you made. See GetChange operation for how to track the progress of your change.

    ", + "ChangeResourceRecordSets": "

    Use this action to create or change your authoritative DNS information. To use this action, send a POST request to the 2013-04-01/hostedzone/hosted Zone ID/rrset resource. The request body must include an XML document with a ChangeResourceRecordSetsRequest element.

    Changes are a list of change items and are considered transactional. For more information on transactional changes, also known as change batches, see POST ChangeResourceRecordSets in the Amazon Route 53 API Reference.

    Due to the nature of transactional changes, you cannot delete the same resource record set more than once in a single change batch. If you attempt to delete the same change batch more than once, Amazon Route 53 returns an InvalidChangeBatch error.

    In response to a ChangeResourceRecordSets request, your DNS data is changed on all Amazon Route 53 DNS servers. Initially, the status of a change is PENDING. This means the change has not yet propagated to all the authoritative Amazon Route 53 DNS servers. When the change is propagated to all hosts, the change returns a status of INSYNC.

    Note the following limitations on a ChangeResourceRecordSets request:

    • A request cannot contain more than 100 Change elements.
    • A request cannot contain more than 1000 ResourceRecord elements.
    • The sum of the number of characters (including spaces) in all Value elements in a request cannot exceed 32,000 characters.
    ", + "ChangeTagsForResource": null, + "CreateHealthCheck": "

    This action creates a new health check.

    To create a new health check, send a POST request to the 2013-04-01/healthcheck resource. The request body must include an XML document with a CreateHealthCheckRequest element. The response returns the CreateHealthCheckResponse element that contains metadata about the health check.

    ", + "CreateHostedZone": "

    This action creates a new hosted zone.

    To create a new hosted zone, send a POST request to the 2013-04-01/hostedzone resource. The request body must include an XML document with a CreateHostedZoneRequest element. The response returns the CreateHostedZoneResponse element that contains metadata about the hosted zone.

    Amazon Route 53 automatically creates a default SOA record and four NS records for the zone. The NS records in the hosted zone are the name servers you give your registrar to delegate your domain to. For more information about SOA and NS records, see NS and SOA Records that Amazon Route 53 Creates for a Hosted Zone in the Amazon Route 53 Developer Guide.

    When you create a zone, its initial status is PENDING. This means that it is not yet available on all DNS servers. The status of the zone changes to INSYNC when the NS and SOA records are available on all Amazon Route 53 DNS servers.

    When trying to create a hosted zone using a reusable delegation set, you could specify an optional DelegationSetId, and Route53 would assign those 4 NS records for the zone, instead of alloting a new one.

    ", + "CreateReusableDelegationSet": "

    This action creates a reusable delegationSet.

    To create a new reusable delegationSet, send a POST request to the 2013-04-01/delegationset resource. The request body must include an XML document with a CreateReusableDelegationSetRequest element. The response returns the CreateReusableDelegationSetResponse element that contains metadata about the delegationSet.

    If the optional parameter HostedZoneId is specified, it marks the delegationSet associated with that particular hosted zone as reusable.

    ", + "CreateTrafficPolicy": "

    Creates a traffic policy, which you use to create multiple DNS resource record sets for one domain name (such as example.com) or one subdomain name (such as www.example.com).

    To create a traffic policy, send a POST request to the 2013-04-01/trafficpolicy resource. The request body must include an XML document with a CreateTrafficPolicyRequest element. The response includes the CreateTrafficPolicyResponse element, which contains information about the new traffic policy.

    ", + "CreateTrafficPolicyInstance": "

    Creates resource record sets in a specified hosted zone based on the settings in a specified traffic policy version. In addition, CreateTrafficPolicyInstance associates the resource record sets with a specified domain name (such as example.com) or subdomain name (such as www.example.com). Amazon Route 53 responds to DNS queries for the domain or subdomain name by using the resource record sets that CreateTrafficPolicyInstance created.

    To create a traffic policy instance, send a POST request to the 2013-04-01/trafficpolicyinstance resource. The request body must include an XML document with a CreateTrafficPolicyRequest element. The response returns the CreateTrafficPolicyInstanceResponse element, which contains information about the traffic policy instance.

    ", + "CreateTrafficPolicyVersion": "

    Creates a new version of an existing traffic policy. When you create a new version of a traffic policy, you specify the ID of the traffic policy that you want to update and a JSON-formatted document that describes the new version.

    You use traffic policies to create multiple DNS resource record sets for one domain name (such as example.com) or one subdomain name (such as www.example.com).

    To create a new version, send a POST request to the 2013-04-01/trafficpolicy/ resource. The request body includes an XML document with a CreateTrafficPolicyVersionRequest element. The response returns the CreateTrafficPolicyVersionResponse element, which contains information about the new version of the traffic policy.

    ", + "DeleteHealthCheck": "

    This action deletes a health check. To delete a health check, send a DELETE request to the 2013-04-01/healthcheck/health check ID resource.

    You can delete a health check only if there are no resource record sets associated with this health check. If resource record sets are associated with this health check, you must disassociate them before you can delete your health check. If you try to delete a health check that is associated with resource record sets, Amazon Route 53 will deny your request with a HealthCheckInUse error. For information about disassociating the records from your health check, see ChangeResourceRecordSets.", + "DeleteHostedZone": "

    This action deletes a hosted zone. To delete a hosted zone, send a DELETE request to the 2013-04-01/hostedzone/hosted zone ID resource.

    For more information about deleting a hosted zone, see Deleting a Hosted Zone in the Amazon Route 53 Developer Guide.

    You can delete a hosted zone only if there are no resource record sets other than the default SOA record and NS resource record sets. If your hosted zone contains other resource record sets, you must delete them before you can delete your hosted zone. If you try to delete a hosted zone that contains other resource record sets, Amazon Route 53 will deny your request with a HostedZoneNotEmpty error. For information about deleting records from your hosted zone, see ChangeResourceRecordSets.", + "DeleteReusableDelegationSet": "

    This action deletes a reusable delegation set. To delete a reusable delegation set, send a DELETE request to the 2013-04-01/delegationset/delegation set ID resource.

    You can delete a reusable delegation set only if there are no associated hosted zones. If your reusable delegation set contains associated hosted zones, you must delete them before you can delete your reusable delegation set. If you try to delete a reusable delegation set that contains associated hosted zones, Amazon Route 53 will deny your request with a DelegationSetInUse error.", + "DeleteTrafficPolicy": "

    Deletes a traffic policy. To delete a traffic policy, send a DELETE request to the 2013-04-01/trafficpolicy resource.

    ", + "DeleteTrafficPolicyInstance": "

    Deletes a traffic policy instance and all of the resource record sets that Amazon Route 53 created when you created the instance.

    To delete a traffic policy instance, send a DELETE request to the 2013-04-01/trafficpolicy/traffic policy instance ID resource.

    When you delete a traffic policy instance, Amazon Route 53 also deletes all of the resource record sets that were created when you created the traffic policy instance.", + "DisassociateVPCFromHostedZone": "

    This action disassociates a VPC from an hosted zone.

    To disassociate a VPC to a hosted zone, send a POST request to the 2013-04-01/hostedzone/hosted zone ID/disassociatevpc resource. The request body must include an XML document with a DisassociateVPCFromHostedZoneRequest element. The response returns the DisassociateVPCFromHostedZoneResponse element that contains ChangeInfo for you to track the progress of the DisassociateVPCFromHostedZoneRequest you made. See GetChange operation for how to track the progress of your change.

    ", + "GetChange": "

    This action returns the current status of a change batch request. The status is one of the following values:

    - PENDING indicates that the changes in this request have not replicated to all Amazon Route 53 DNS servers. This is the initial status of all change batch requests.

    - INSYNC indicates that the changes have replicated to all Amazon Route 53 DNS servers.

    ", + "GetChangeDetails": "

    This action returns the status and changes of a change batch request.

    ", + "GetCheckerIpRanges": "

    To retrieve a list of the IP ranges used by Amazon Route 53 health checkers to check the health of your resources, send a GET request to the 2013-04-01/checkeripranges resource. You can use these IP addresses to configure router and firewall rules to allow health checkers to check the health of your resources.

    ", + "GetGeoLocation": "

    To retrieve a single geo location, send a GET request to the 2013-04-01/geolocation resource with one of these options: continentcode | countrycode | countrycode and subdivisioncode.

    ", + "GetHealthCheck": "

    To retrieve the health check, send a GET request to the 2013-04-01/healthcheck/health check ID resource.

    ", + "GetHealthCheckCount": "

    To retrieve a count of all your health checks, send a GET request to the 2013-04-01/healthcheckcount resource.

    ", + "GetHealthCheckLastFailureReason": "

    If you want to learn why a health check is currently failing or why it failed most recently (if at all), you can get the failure reason for the most recent failure. Send a GET request to the 2013-04-01/healthcheck/health check ID/lastfailurereason resource.

    ", + "GetHealthCheckStatus": "

    To retrieve the health check status, send a GET request to the 2013-04-01/healthcheck/health check ID/status resource. You can use this call to get a health check's current status.

    ", + "GetHostedZone": "

    To retrieve the delegation set for a hosted zone, send a GET request to the 2013-04-01/hostedzone/hosted zone ID resource. The delegation set is the four Amazon Route 53 name servers that were assigned to the hosted zone when you created it.

    ", + "GetHostedZoneCount": "

    To retrieve a count of all your hosted zones, send a GET request to the 2013-04-01/hostedzonecount resource.

    ", + "GetReusableDelegationSet": "

    To retrieve the reusable delegation set, send a GET request to the 2013-04-01/delegationset/delegation set ID resource.

    ", + "GetTrafficPolicy": "

    Gets information about a specific traffic policy version. To get the information, send a GET request to the 2013-04-01/trafficpolicy resource.

    ", + "GetTrafficPolicyInstance": "

    Gets information about a specified traffic policy instance.

    To get information about the traffic policy instance, send a GET request to the 2013-04-01/trafficpolicyinstance resource.

    After you submit a CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance request, there's a brief delay while Amazon Route 53 creates the resource record sets that are specified in the traffic policy definition. For more information, see the State response element. ", + "GetTrafficPolicyInstanceCount": "

    Gets the number of traffic policy instances that are associated with the current AWS account.

    To get the number of traffic policy instances, send a GET request to the 2013-04-01/trafficpolicyinstancecount resource.

    ", + "ListChangeBatchesByHostedZone": "

    This action gets the list of ChangeBatches in a given time period for a given hosted zone.

    ", + "ListChangeBatchesByRRSet": "

    This action gets the list of ChangeBatches in a given time period for a given hosted zone and RRSet.

    ", + "ListGeoLocations": "

    To retrieve a list of supported geo locations, send a GET request to the 2013-04-01/geolocations resource. The response to this request includes a GeoLocationDetailsList element with zero, one, or multiple GeoLocationDetails child elements. The list is sorted by country code, and then subdivision code, followed by continents at the end of the list.

    By default, the list of geo locations is displayed on a single page. You can control the length of the page that is displayed by using the MaxItems parameter. If the list is truncated, IsTruncated will be set to true and a combination of NextContinentCode, NextCountryCode, NextSubdivisionCode will be populated. You can pass these as parameters to StartContinentCode, StartCountryCode, StartSubdivisionCode to control the geo location that the list begins with.

    ", + "ListHealthChecks": "

    To retrieve a list of your health checks, send a GET request to the 2013-04-01/healthcheck resource. The response to this request includes a HealthChecks element with zero, one, or multiple HealthCheck child elements. By default, the list of health checks is displayed on a single page. You can control the length of the page that is displayed by using the MaxItems parameter. You can use the Marker parameter to control the health check that the list begins with.

    Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to a value greater than 100, Amazon Route 53 returns only the first 100.", + "ListHostedZones": "

    To retrieve a list of your hosted zones, send a GET request to the 2013-04-01/hostedzone resource. The response to this request includes a HostedZones element with zero, one, or multiple HostedZone child elements. By default, the list of hosted zones is displayed on a single page. You can control the length of the page that is displayed by using the MaxItems parameter. You can use the Marker parameter to control the hosted zone that the list begins with.

    Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to a value greater than 100, Amazon Route 53 returns only the first 100.", + "ListHostedZonesByName": "

    To retrieve a list of your hosted zones in lexicographic order, send a GET request to the 2013-04-01/hostedzonesbyname resource. The response to this request includes a HostedZones element with zero or more HostedZone child elements lexicographically ordered by DNS name. By default, the list of hosted zones is displayed on a single page. You can control the length of the page that is displayed by using the MaxItems parameter. You can use the DNSName and HostedZoneId parameters to control the hosted zone that the list begins with.

    Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to a value greater than 100, Amazon Route 53 returns only the first 100.", + "ListResourceRecordSets": "

    Imagine all the resource record sets in a zone listed out in front of you. Imagine them sorted lexicographically first by DNS name (with the labels reversed, like \"com.amazon.www\" for example), and secondarily, lexicographically by record type. This operation retrieves at most MaxItems resource record sets from this list, in order, starting at a position specified by the Name and Type arguments:

    • If both Name and Type are omitted, this means start the results at the first RRSET in the HostedZone.
    • If Name is specified but Type is omitted, this means start the results at the first RRSET in the list whose name is greater than or equal to Name.
    • If both Name and Type are specified, this means start the results at the first RRSET in the list whose name is greater than or equal to Name and whose type is greater than or equal to Type.
    • It is an error to specify the Type but not the Name.

    Use ListResourceRecordSets to retrieve a single known record set by specifying the record set's name and type, and setting MaxItems = 1

    To retrieve all the records in a HostedZone, first pause any processes making calls to ChangeResourceRecordSets. Initially call ListResourceRecordSets without a Name and Type to get the first page of record sets. For subsequent calls, set Name and Type to the NextName and NextType values returned by the previous response.

    In the presence of concurrent ChangeResourceRecordSets calls, there is no consistency of results across calls to ListResourceRecordSets. The only way to get a consistent multi-page snapshot of all RRSETs in a zone is to stop making changes while pagination is in progress.

    However, the results from ListResourceRecordSets are consistent within a page. If MakeChange calls are taking place concurrently, the result of each one will either be completely visible in your results or not at all. You will not see partial changes, or changes that do not ultimately succeed. (This follows from the fact that MakeChange is atomic)

    The results from ListResourceRecordSets are strongly consistent with ChangeResourceRecordSets. To be precise, if a single process makes a call to ChangeResourceRecordSets and receives a successful response, the effects of that change will be visible in a subsequent call to ListResourceRecordSets by that process.

    ", + "ListReusableDelegationSets": "

    To retrieve a list of your reusable delegation sets, send a GET request to the 2013-04-01/delegationset resource. The response to this request includes a DelegationSets element with zero, one, or multiple DelegationSet child elements. By default, the list of delegation sets is displayed on a single page. You can control the length of the page that is displayed by using the MaxItems parameter. You can use the Marker parameter to control the delegation set that the list begins with.

    Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to a value greater than 100, Amazon Route 53 returns only the first 100.", + "ListTagsForResource": null, + "ListTagsForResources": null, + "ListTrafficPolicies": "

    Gets information about the latest version for every traffic policy that is associated with the current AWS account. To get the information, send a GET request to the 2013-04-01/trafficpolicy resource.

    Amazon Route 53 returns a maximum of 100 items in each response. If you have a lot of traffic policies, you can use the maxitems parameter to list them in groups of up to 100.

    The response includes three values that help you navigate from one group of maxitems traffic policies to the next:

    • IsTruncated
    • If the value of IsTruncated in the response is true, there are more traffic policies associated with the current AWS account.

      If IsTruncated is false, this response includes the last traffic policy that is associated with the current account.

    • TrafficPolicyIdMarker
    • If IsTruncated is true, TrafficPolicyIdMarker is the ID of the first traffic policy in the next group of MaxItems traffic policies. If you want to list more traffic policies, make another call to ListTrafficPolicies, and specify the value of the TrafficPolicyIdMarker element from the response in the TrafficPolicyIdMarker request parameter.

      If IsTruncated is false, the TrafficPolicyIdMarker element is omitted from the response.

    • MaxItems
    • The value that you specified for the MaxItems parameter in the request that produced the current response.

    ", + "ListTrafficPolicyInstances": "

    Gets information about the traffic policy instances that you created by using the current AWS account.

    After you submit an UpdateTrafficPolicyInstance request, there's a brief delay while Amazon Route 53 creates the resource record sets that are specified in the traffic policy definition. For more information, see the State response element.

    To get information about the traffic policy instances that are associated with the current AWS account, send a GET request to the 2013-04-01/trafficpolicyinstance resource.

    Amazon Route 53 returns a maximum of 100 items in each response. If you have a lot of traffic policy instances, you can use the MaxItems parameter to list them in groups of up to 100.

    The response includes five values that help you navigate from one group of MaxItems traffic policy instances to the next:

    • IsTruncated
    • If the value of IsTruncated in the response is true, there are more traffic policy instances associated with the current AWS account.

      If IsTruncated is false, this response includes the last traffic policy instance that is associated with the current account.

    • MaxItems
    • The value that you specified for the MaxItems parameter in the request that produced the current response.

    • HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker
    • If IsTruncated is true, these three values in the response represent the first traffic policy instance in the next group of MaxItems traffic policy instances. To list more traffic policy instances, make another call to ListTrafficPolicyInstances, and specify these values in the corresponding request parameters.

      If IsTruncated is false, all three elements are omitted from the response.

    ", + "ListTrafficPolicyInstancesByHostedZone": "

    Gets information about the traffic policy instances that you created in a specified hosted zone.

    After you submit an UpdateTrafficPolicyInstance request, there's a brief delay while Amazon Route 53 creates the resource record sets that are specified in the traffic policy definition. For more information, see the State response element.

    To get information about the traffic policy instances that you created in a specified hosted zone, send a GET request to the 2013-04-01/trafficpolicyinstance resource and include the ID of the hosted zone.

    Amazon Route 53 returns a maximum of 100 items in each response. If you have a lot of traffic policy instances, you can use the MaxItems parameter to list them in groups of up to 100.

    The response includes four values that help you navigate from one group of MaxItems traffic policy instances to the next:

    • IsTruncated
    • If the value of IsTruncated in the response is true, there are more traffic policy instances associated with the current AWS account.

      If IsTruncated is false, this response includes the last traffic policy instance that is associated with the current account.

    • MaxItems
    • The value that you specified for the MaxItems parameter in the request that produced the current response.

    • TrafficPolicyInstanceNameMarker and TrafficPolicyInstanceTypeMarker
    • If IsTruncated is true, these two values in the response represent the first traffic policy instance in the next group of MaxItems traffic policy instances. To list more traffic policy instances, make another call to ListTrafficPolicyInstancesByHostedZone, and specify these values in the corresponding request parameters.

      If IsTruncated is false, all three elements are omitted from the response.

    ", + "ListTrafficPolicyInstancesByPolicy": "

    Gets information about the traffic policy instances that you created by using a specify traffic policy version.

    After you submit a CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance request, there's a brief delay while Amazon Route 53 creates the resource record sets that are specified in the traffic policy definition. For more information, see the State response element.

    To get information about the traffic policy instances that you created by using a specify traffic policy version, send a GET request to the 2013-04-01/trafficpolicyinstance resource and include the ID and version of the traffic policy.

    Amazon Route 53 returns a maximum of 100 items in each response. If you have a lot of traffic policy instances, you can use the MaxItems parameter to list them in groups of up to 100.

    The response includes five values that help you navigate from one group of MaxItems traffic policy instances to the next:

    • IsTruncated

      If the value of IsTruncated in the response is true, there are more traffic policy instances associated with the specified traffic policy.

      If IsTruncated is false, this response includes the last traffic policy instance that is associated with the specified traffic policy.

    • MaxItems

      The value that you specified for the MaxItems parameter in the request that produced the current response.

    • HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker

      If IsTruncated is true, these values in the response represent the first traffic policy instance in the next group of MaxItems traffic policy instances. To list more traffic policy instances, make another call to ListTrafficPolicyInstancesByPolicy, and specify these values in the corresponding request parameters.

      If IsTruncated is false, all three elements are omitted from the response.

    ", + "ListTrafficPolicyVersions": "

    Gets information about all of the versions for a specified traffic policy. ListTrafficPolicyVersions lists only versions that have not been deleted.

    Amazon Route 53 returns a maximum of 100 items in each response. If you have a lot of traffic policies, you can use the maxitems parameter to list them in groups of up to 100.

    The response includes three values that help you navigate from one group of maxitemsmaxitems traffic policies to the next:

    • IsTruncated
    • If the value of IsTruncated in the response is true, there are more traffic policy versions associated with the specified traffic policy.

      If IsTruncated is false, this response includes the last traffic policy version that is associated with the specified traffic policy.

    • TrafficPolicyVersionMarker
    • The ID of the next traffic policy version that is associated with the current AWS account. If you want to list more traffic policies, make another call to ListTrafficPolicyVersions, and specify the value of the TrafficPolicyVersionMarker element in the TrafficPolicyVersionMarker request parameter.

      If IsTruncated is false, Amazon Route 53 omits the TrafficPolicyVersionMarker element from the response.

    • MaxItems
    • The value that you specified for the MaxItems parameter in the request that produced the current response.

    ", + "UpdateHealthCheck": "

    This action updates an existing health check.

    To update a health check, send a POST request to the 2013-04-01/healthcheck/health check ID resource. The request body must include an XML document with an UpdateHealthCheckRequest element. The response returns an UpdateHealthCheckResponse element, which contains metadata about the health check.

    ", + "UpdateHostedZoneComment": "

    To update the hosted zone comment, send a POST request to the 2013-04-01/hostedzone/hosted zone ID resource. The request body must include an XML document with a UpdateHostedZoneCommentRequest element. The response to this request includes the modified HostedZone element.

    The comment can have a maximum length of 256 characters.", + "UpdateTrafficPolicyComment": "

    Updates the comment for a specified traffic policy version.

    To update the comment, send a POST request to the /2013-04-01/trafficpolicy/ resource.

    The request body must include an XML document with an UpdateTrafficPolicyCommentRequest element.

    ", + "UpdateTrafficPolicyInstance": "

    Updates the resource record sets in a specified hosted zone that were created based on the settings in a specified traffic policy version.

    The DNS type of the resource record sets that you're updating must match the DNS type in the JSON document that is associated with the traffic policy version that you're using to update the traffic policy instance.

    When you update a traffic policy instance, Amazon Route 53 continues to respond to DNS queries for the root resource record set name (such as example.com) while it replaces one group of resource record sets with another. Amazon Route 53 performs the following operations:

    1. Amazon Route 53 creates a new group of resource record sets based on the specified traffic policy. This is true regardless of how substantial the differences are between the existing resource record sets and the new resource record sets.
    2. When all of the new resource record sets have been created, Amazon Route 53 starts to respond to DNS queries for the root resource record set name (such as example.com) by using the new resource record sets.
    3. Amazon Route 53 deletes the old group of resource record sets that are associated with the root resource record set name.

    To update a traffic policy instance, send a POST request to the /2013-04-01/trafficpolicyinstance/traffic policy ID resource. The request body must include an XML document with an UpdateTrafficPolicyInstanceRequest element.

    " + }, + "shapes": { + "AWSAccountID": { + "base": null, + "refs": { + "ChangeBatchRecord$Submitter": "

    The AWS account ID attached to the changes.

    " + } + }, + "AliasHealthEnabled": { + "base": null, + "refs": { + "AliasTarget$EvaluateTargetHealth": "

    Alias resource record sets only: If you set the value of EvaluateTargetHealth to true for the resource record set or sets in an alias, weighted alias, latency alias, or failover alias resource record set, and if you specify a value for HealthCheckId for every resource record set that is referenced by these alias resource record sets, the alias resource record sets inherit the health of the referenced resource record sets.

    In this configuration, when Amazon Route 53 receives a DNS query for an alias resource record set:

    1. Amazon Route 53 looks at the resource record sets that are referenced by the alias resource record sets to determine which health checks they're using.
    2. Amazon Route 53 checks the current status of each health check. (Amazon Route 53 periodically checks the health of the endpoint that is specified in a health check; it doesn't perform the health check when the DNS query arrives.)
    3. Based on the status of the health checks, Amazon Route 53 determines which resource record sets are healthy. Unhealthy resource record sets are immediately removed from consideration. In addition, if all of the resource record sets that are referenced by an alias resource record set are unhealthy, that alias resource record set also is immediately removed from consideration.
    4. Based on the configuration of the alias resource record sets (weighted alias or latency alias, for example) and the configuration of the resource record sets that they reference, Amazon Route 53 chooses a resource record set from the healthy resource record sets, and responds to the query.

    Note the following:

    • You cannot set EvaluateTargetHealth to true when the alias target is a CloudFront distribution.
    • If the AWS resource that you specify in AliasTarget is a resource record set or a group of resource record sets (for example, a group of weighted resource record sets), but it is not another alias resource record set, we recommend that you associate a health check with all of the resource record sets in the alias target. For more information, see What Happens When You Omit Health Checks? in the Amazon Route 53 Developer Guide.
    • If you specify an ELB load balancer in AliasTarget, Elastic Load Balancing routes queries only to the healthy Amazon EC2 instances that are registered with the load balancer. If no Amazon EC2 instances are healthy or if the load balancer itself is unhealthy, and if EvaluateTargetHealth is true for the corresponding alias resource record set, Amazon Route 53 routes queries to other resources.
    • When you create a load balancer, you configure settings for Elastic Load Balancing health checks; they're not Amazon Route 53 health checks, but they perform a similar function. Do not create Amazon Route 53 health checks for the Amazon EC2 instances that you register with an ELB load balancer. For more information, see How Health Checks Work in More Complex Amazon Route 53 Configurations in the Amazon Route 53 Developer Guide.

    We recommend that you set EvaluateTargetHealth to true only when you have enough idle capacity to handle the failure of one or more endpoints.

    For more information and examples, see Amazon Route 53 Health Checks and DNS Failover in the Amazon Route 53 Developer Guide.

    " + } + }, + "AliasTarget": { + "base": "

    Alias resource record sets only: Information about the CloudFront distribution, ELB load balancer, Amazon S3 bucket, or Amazon Route 53 resource record set to which you are routing traffic.

    If you're creating resource record sets for a private hosted zone, note the following:

    • You can create alias resource record sets only for Amazon Route 53 resource record sets in the same private hosted zone. Creating alias resource record sets for CloudFront distributions, ELB load balancers, and Amazon S3 buckets is not supported.
    • You can't create alias resource record sets for failover, geolocation, or latency resource record sets in a private hosted zone.

    For more information and an example, see Example: Creating Alias Resource Record Sets in the Amazon Route 53 API Reference.

    ", + "refs": { + "ResourceRecordSet$AliasTarget": "

    Alias resource record sets only: Information about the AWS resource to which you are redirecting traffic.

    " + } + }, + "AssociateVPCComment": { + "base": null, + "refs": { + "AssociateVPCWithHostedZoneRequest$Comment": "

    Optional: Any comments you want to include about a AssociateVPCWithHostedZoneRequest.

    " + } + }, + "AssociateVPCWithHostedZoneRequest": { + "base": "

    A complex type that contains information about the request to associate a VPC with an hosted zone.

    ", + "refs": { + } + }, + "AssociateVPCWithHostedZoneResponse": { + "base": "

    A complex type containing the response information for the request.

    ", + "refs": { + } + }, + "Change": { + "base": "

    A complex type that contains the information for each change in a change batch request.

    ", + "refs": { + "Changes$member": null + } + }, + "ChangeAction": { + "base": null, + "refs": { + "Change$Action": "

    The action to perform:

    • CREATE: Creates a resource record set that has the specified values.
    • DELETE: Deletes a existing resource record set that has the specified values for Name, Type, SetIdentifier (for latency, weighted, geolocation, and failover resource record sets), and TTL (except alias resource record sets, for which the TTL is determined by the AWS resource that you're routing DNS queries to).
    • UPSERT: If a resource record set does not already exist, Amazon Route 53 creates it. If a resource record set does exist, Amazon Route 53 updates it with the values in the request. Amazon Route 53 can update an existing resource record set only when all of the following values match: Name, Type, and SetIdentifier (for weighted, latency, geolocation, and failover resource record sets).
    " + } + }, + "ChangeBatch": { + "base": "

    A complex type that contains an optional comment and the changes that you want to make with a change batch request.

    ", + "refs": { + "ChangeResourceRecordSetsRequest$ChangeBatch": "

    A complex type that contains an optional comment and the Changes element.

    " + } + }, + "ChangeBatchRecord": { + "base": "

    A complex type that lists the changes and information for a ChangeBatch.

    ", + "refs": { + "ChangeBatchRecords$member": null, + "GetChangeDetailsResponse$ChangeBatchRecord": "

    A complex type that contains information about the specified change batch, including the change batch ID, the status of the change, and the contained changes.

    " + } + }, + "ChangeBatchRecords": { + "base": null, + "refs": { + "ListChangeBatchesByHostedZoneResponse$ChangeBatchRecords": "

    The change batches within the given hosted zone and time period.

    ", + "ListChangeBatchesByRRSetResponse$ChangeBatchRecords": "

    The change batches within the given hosted zone and time period.

    " + } + }, + "ChangeInfo": { + "base": "

    A complex type that describes change information about changes made to your hosted zone.

    This element contains an ID that you use when performing a GetChange action to get detailed information about the change.

    ", + "refs": { + "AssociateVPCWithHostedZoneResponse$ChangeInfo": "

    A complex type that contains the ID, the status, and the date and time of your AssociateVPCWithHostedZoneRequest.

    ", + "ChangeResourceRecordSetsResponse$ChangeInfo": "

    A complex type that contains information about changes made to your hosted zone.

    This element contains an ID that you use when performing a GetChange action to get detailed information about the change.

    ", + "CreateHostedZoneResponse$ChangeInfo": "

    A complex type that contains information about the request to create a hosted zone. This includes an ID that you use when you call the GetChange action to get the current status of the change request.

    ", + "DeleteHostedZoneResponse$ChangeInfo": "

    A complex type that contains the ID, the status, and the date and time of your delete request.

    ", + "DisassociateVPCFromHostedZoneResponse$ChangeInfo": "

    A complex type that contains the ID, the status, and the date and time of your DisassociateVPCFromHostedZoneRequest.

    ", + "GetChangeResponse$ChangeInfo": "

    A complex type that contains information about the specified change batch, including the change batch ID, the status of the change, and the date and time of the request.

    " + } + }, + "ChangeResourceRecordSetsRequest": { + "base": "

    A complex type that contains a change batch.

    ", + "refs": { + } + }, + "ChangeResourceRecordSetsResponse": { + "base": "

    A complex type containing the response for the request.

    ", + "refs": { + } + }, + "ChangeStatus": { + "base": null, + "refs": { + "ChangeBatchRecord$Status": "

    The current state of the request. PENDING indicates that this request has not yet been applied to all Amazon Route 53 DNS servers.

    Valid Values: PENDING | INSYNC

    ", + "ChangeInfo$Status": "

    The current state of the request. PENDING indicates that this request has not yet been applied to all Amazon Route 53 DNS servers.

    Valid Values: PENDING | INSYNC

    " + } + }, + "ChangeTagsForResourceRequest": { + "base": "

    A complex type containing information about a request to add, change, or delete the tags that are associated with a resource.

    ", + "refs": { + } + }, + "ChangeTagsForResourceResponse": { + "base": "

    Empty response for the request.

    ", + "refs": { + } + }, + "Changes": { + "base": null, + "refs": { + "ChangeBatch$Changes": "

    A complex type that contains one Change element for each resource record set that you want to create or delete.

    ", + "ChangeBatchRecord$Changes": "

    A list of changes made in the ChangeBatch.

    " + } + }, + "CheckerIpRanges": { + "base": null, + "refs": { + "GetCheckerIpRangesResponse$CheckerIpRanges": "

    A complex type that contains sorted list of IP ranges in CIDR format for Amazon Route 53 health checkers.

    " + } + }, + "ChildHealthCheckList": { + "base": null, + "refs": { + "HealthCheckConfig$ChildHealthChecks": "

    For a specified parent health check, a list of HealthCheckId values for the associated child health checks.

    ", + "UpdateHealthCheckRequest$ChildHealthChecks": "

    For a specified parent health check, a list of HealthCheckId values for the associated child health checks.

    Specify this value only if you want to change it.

    " + } + }, + "ConcurrentModification": { + "base": "

    Another user submitted a request to update the object at the same time that you did. Retry the request.

    ", + "refs": { + } + }, + "ConflictingDomainExists": { + "base": null, + "refs": { + } + }, + "ConflictingTypes": { + "base": "

    You tried to update a traffic policy instance by using a traffic policy version that has a different DNS type than the current type for the instance. You specified the type in the JSON document in the CreateTrafficPolicy or CreateTrafficPolicyVersionrequest.

    ", + "refs": { + } + }, + "CreateHealthCheckRequest": { + "base": "

    >A complex type that contains information about the request to create a health check.

    ", + "refs": { + } + }, + "CreateHealthCheckResponse": { + "base": "

    A complex type containing the response information for the new health check.

    ", + "refs": { + } + }, + "CreateHostedZoneRequest": { + "base": "

    A complex type that contains information about the request to create a hosted zone.

    ", + "refs": { + } + }, + "CreateHostedZoneResponse": { + "base": "

    A complex type containing the response information for the new hosted zone.

    ", + "refs": { + } + }, + "CreateReusableDelegationSetRequest": { + "base": null, + "refs": { + } + }, + "CreateReusableDelegationSetResponse": { + "base": null, + "refs": { + } + }, + "CreateTrafficPolicyInstanceRequest": { + "base": "

    A complex type that contains information about the resource record sets that you want to create based on a specified traffic policy.

    ", + "refs": { + } + }, + "CreateTrafficPolicyInstanceResponse": { + "base": "

    A complex type that contains the response information for the CreateTrafficPolicyInstance request.

    ", + "refs": { + } + }, + "CreateTrafficPolicyRequest": { + "base": "

    A complex type that contains information about the traffic policy that you want to create.

    ", + "refs": { + } + }, + "CreateTrafficPolicyResponse": { + "base": "

    A complex type that contains the response information for the CreateTrafficPolicy request.

    ", + "refs": { + } + }, + "CreateTrafficPolicyVersionRequest": { + "base": "

    A complex type that contains information about the traffic policy for which you want to create a new version.

    ", + "refs": { + } + }, + "CreateTrafficPolicyVersionResponse": { + "base": "

    A complex type that contains the response information for the CreateTrafficPolicyVersion request.

    ", + "refs": { + } + }, + "DNSName": { + "base": null, + "refs": { + "AliasTarget$DNSName": "

    Alias resource record sets only: The external DNS name associated with the AWS Resource. The value that you specify depends on where you want to route queries:

    • A CloudFront distribution: Specify the domain name that CloudFront assigned when you created your distribution. Your CloudFront distribution must include an alternate domain name that matches the name of the resource record set. For example, if the name of the resource record set is acme.example.com, your CloudFront distribution must include acme.example.com as one of the alternate domain names. For more information, see Using Alternate Domain Names (CNAMEs) in the Amazon CloudFront Developer Guide.
    • An ELB load balancer: Specify the DNS name associated with the load balancer. You can get the DNS name by using the AWS Management Console, the ELB API, or the AWS CLI. Use the same method to get values for HostedZoneId and DNSName. If you get one value from the console and the other value from the API or the CLI, creating the resource record set will fail.
    • An Amazon S3 bucket that is configured as a static website: Specify the domain name of the Amazon S3 website endpoint in which you created the bucket; for example, s3-website-us-east-1.amazonaws.com. For more information about valid values, see the table Amazon Simple Storage Service (S3) Website Endpoints in the Amazon Web Services General Reference. For more information about using Amazon S3 buckets for websites, see Hosting a Static Website on Amazon S3 in the Amazon Simple Storage Service Developer Guide.

    For more information and an example, see Example: Creating Alias Resource Record Sets in the Amazon Route 53 API Reference.

    ", + "CreateHostedZoneRequest$Name": "

    The name of the domain. This must be a fully-specified domain, for example, www.example.com. The trailing dot is optional; Amazon Route 53 assumes that the domain name is fully qualified. This means that Amazon Route 53 treats www.example.com (without a trailing dot) and www.example.com. (with a trailing dot) as identical.

    This is the name you have registered with your DNS registrar. You should ask your registrar to change the authoritative name servers for your domain to the set of NameServers elements returned in DelegationSet.

    ", + "CreateTrafficPolicyInstanceRequest$Name": "

    The domain name (such as example.com) or subdomain name (such as www.example.com) for which Amazon Route 53 responds to DNS queries by using the resource record sets that Amazon Route 53 creates for this traffic policy instance.

    ", + "DelegationSetNameServers$member": null, + "HostedZone$Name": "

    The name of the domain. This must be a fully-specified domain, for example, www.example.com. The trailing dot is optional; Amazon Route 53 assumes that the domain name is fully qualified. This means that Amazon Route 53 treats www.example.com (without a trailing dot) and www.example.com. (with a trailing dot) as identical.

    This is the name you have registered with your DNS registrar. You should ask your registrar to change the authoritative name servers for your domain to the set of NameServers elements returned in DelegationSet.

    ", + "ListChangeBatchesByRRSetRequest$Name": "

    The name of the RRSet that you want to see changes for.

    ", + "ListHostedZonesByNameRequest$DNSName": "

    The first name in the lexicographic ordering of domain names that you want the ListHostedZonesByNameRequest request to list.

    If the request returned more than one page of results, submit another request and specify the value of NextDNSName and NextHostedZoneId from the last response in the DNSName and HostedZoneId parameters to get the next page of results.

    ", + "ListHostedZonesByNameResponse$DNSName": "

    The DNSName value sent in the request.

    ", + "ListHostedZonesByNameResponse$NextDNSName": "

    If ListHostedZonesByNameResponse$IsTruncated is true, there are more hosted zones associated with the current AWS account. To get the next page of results, make another request to ListHostedZonesByName. Specify the value of ListHostedZonesByNameResponse$NextDNSName in the ListHostedZonesByNameRequest$DNSName element and ListHostedZonesByNameResponse$NextHostedZoneId in the ListHostedZonesByNameRequest$HostedZoneId element.

    ", + "ListResourceRecordSetsRequest$StartRecordName": "

    The first name in the lexicographic ordering of domain names that you want the ListResourceRecordSets request to list.

    ", + "ListResourceRecordSetsResponse$NextRecordName": "

    If the results were truncated, the name of the next record in the list. This element is present only if ListResourceRecordSetsResponse$IsTruncated is true.

    ", + "ListTrafficPolicyInstancesByHostedZoneRequest$TrafficPolicyInstanceNameMarker": "

    For the first request to ListTrafficPolicyInstancesByHostedZone, omit this value.

    If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceNameMarker is the name of the first traffic policy instance in the next group of MaxItems traffic policy instances.

    If the value of IsTruncated in the previous response was false, there are no more traffic policy instances to get for this hosted zone.

    If the value of IsTruncated in the previous response was false, omit this value.

    ", + "ListTrafficPolicyInstancesByHostedZoneResponse$TrafficPolicyInstanceNameMarker": "

    If IsTruncated is true, TrafficPolicyInstanceNameMarker is the name of the first traffic policy instance in the next group of MaxItems traffic policy instances.

    ", + "ListTrafficPolicyInstancesByPolicyRequest$TrafficPolicyInstanceNameMarker": "

    For the first request to ListTrafficPolicyInstancesByPolicy, omit this value.

    If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceNameMarker is the name of the first traffic policy instance in the next group of MaxItems traffic policy instances.

    If the value of IsTruncated in the previous response was false, there are no more traffic policy instances to get for this hosted zone.

    If the value of IsTruncated in the previous response was false, omit this value.

    ", + "ListTrafficPolicyInstancesByPolicyResponse$TrafficPolicyInstanceNameMarker": "

    If IsTruncated is true, TrafficPolicyInstanceNameMarker is the name of the first traffic policy instance in the next group of MaxItems traffic policy instances.

    ", + "ListTrafficPolicyInstancesRequest$TrafficPolicyInstanceNameMarker": "

    For the first request to ListTrafficPolicyInstances, omit this value.

    If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceNameMarker is the name of the first traffic policy instance in the next group of MaxItems traffic policy instances.

    If the value of IsTruncated in the previous response was false, there are no more traffic policy instances to get.

    ", + "ListTrafficPolicyInstancesResponse$TrafficPolicyInstanceNameMarker": "

    If IsTruncated is true, TrafficPolicyInstanceNameMarker is the name of the first traffic policy instance in the next group of MaxItems traffic policy instances.

    ", + "ResourceRecordSet$Name": "

    The name of the domain you want to perform the action on.

    Enter a fully qualified domain name, for example, www.example.com. You can optionally include a trailing dot. If you omit the trailing dot, Amazon Route 53 still assumes that the domain name that you specify is fully qualified. This means that Amazon Route 53 treats www.example.com (without a trailing dot) and www.example.com. (with a trailing dot) as identical.

    For information about how to specify characters other than a-z, 0-9, and - (hyphen) and how to specify internationalized domain names, see DNS Domain Name Format in the Amazon Route 53 Developer Guide.

    You can use an asterisk (*) character in the name. DNS treats the * character either as a wildcard or as the * character (ASCII 42), depending on where it appears in the name. For more information, see Using an Asterisk (*) in the Names of Hosted Zones and Resource Record Sets in the Amazon Route 53 Developer Guide

    You can't use the * wildcard for resource records sets that have a type of NS.", + "TrafficPolicyInstance$Name": null + } + }, + "Date": { + "base": null, + "refs": { + "ListChangeBatchesByHostedZoneRequest$StartDate": "

    The start of the time period you want to see changes for.

    ", + "ListChangeBatchesByHostedZoneRequest$EndDate": "

    The end of the time period you want to see changes for.

    ", + "ListChangeBatchesByRRSetRequest$StartDate": "

    The start of the time period you want to see changes for.

    ", + "ListChangeBatchesByRRSetRequest$EndDate": "

    The end of the time period you want to see changes for.

    " + } + }, + "DelegationSet": { + "base": "

    A complex type that contains name server information.

    ", + "refs": { + "CreateHostedZoneResponse$DelegationSet": "

    A complex type that contains name server information.

    ", + "CreateReusableDelegationSetResponse$DelegationSet": "

    A complex type that contains name server information.

    ", + "DelegationSets$member": null, + "GetHostedZoneResponse$DelegationSet": "

    A complex type that contains information about the name servers for the specified hosted zone.

    ", + "GetReusableDelegationSetResponse$DelegationSet": "

    A complex type that contains the information about the nameservers for the specified delegation set ID.

    " + } + }, + "DelegationSetAlreadyCreated": { + "base": "

    A delegation set with the same owner and caller reference combination has already been created.

    ", + "refs": { + } + }, + "DelegationSetAlreadyReusable": { + "base": "

    The specified delegation set has already been marked as reusable.

    ", + "refs": { + } + }, + "DelegationSetInUse": { + "base": "

    The specified delegation contains associated hosted zones which must be deleted before the reusable delegation set can be deleted.

    ", + "refs": { + } + }, + "DelegationSetNameServers": { + "base": null, + "refs": { + "DelegationSet$NameServers": "

    A complex type that contains the authoritative name servers for the hosted zone. Use the method provided by your domain registrar to add an NS record to your domain for each NameServer that is assigned to your hosted zone.

    " + } + }, + "DelegationSetNotAvailable": { + "base": "

    Amazon Route 53 allows some duplicate domain names, but there is a maximum number of duplicate names. This error indicates that you have reached that maximum. If you want to create another hosted zone with the same name and Amazon Route 53 generates this error, you can request an increase to the limit on the Contact Us page.

    ", + "refs": { + } + }, + "DelegationSetNotReusable": { + "base": "

    The specified delegation set has not been marked as reusable.

    ", + "refs": { + } + }, + "DelegationSets": { + "base": null, + "refs": { + "ListReusableDelegationSetsResponse$DelegationSets": "

    A complex type that contains information about the reusable delegation sets associated with the current AWS account.

    " + } + }, + "DeleteHealthCheckRequest": { + "base": "

    A complex type containing the request information for delete health check.

    ", + "refs": { + } + }, + "DeleteHealthCheckResponse": { + "base": "

    Empty response for the request.

    ", + "refs": { + } + }, + "DeleteHostedZoneRequest": { + "base": "

    A complex type that contains information about the hosted zone that you want to delete.

    ", + "refs": { + } + }, + "DeleteHostedZoneResponse": { + "base": "

    A complex type containing the response information for the request.

    ", + "refs": { + } + }, + "DeleteReusableDelegationSetRequest": { + "base": "

    A complex type containing the information for the delete request.

    ", + "refs": { + } + }, + "DeleteReusableDelegationSetResponse": { + "base": "

    Empty response for the request.

    ", + "refs": { + } + }, + "DeleteTrafficPolicyInstanceRequest": { + "base": "

    A complex type that contains information about the traffic policy instance that you want to delete.

    ", + "refs": { + } + }, + "DeleteTrafficPolicyInstanceResponse": { + "base": "

    An empty element.

    ", + "refs": { + } + }, + "DeleteTrafficPolicyRequest": { + "base": "

    A request to delete a specified traffic policy version.

    ", + "refs": { + } + }, + "DeleteTrafficPolicyResponse": { + "base": "

    An empty element.

    ", + "refs": { + } + }, + "DisassociateVPCComment": { + "base": null, + "refs": { + "DisassociateVPCFromHostedZoneRequest$Comment": "

    Optional: Any comments you want to include about a DisassociateVPCFromHostedZoneRequest.

    " + } + }, + "DisassociateVPCFromHostedZoneRequest": { + "base": "

    A complex type that contains information about the request to disassociate a VPC from an hosted zone.

    ", + "refs": { + } + }, + "DisassociateVPCFromHostedZoneResponse": { + "base": "

    A complex type containing the response information for the request.

    ", + "refs": { + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "ConcurrentModification$message": "

    Descriptive message for the error response.

    ", + "ConflictingDomainExists$message": null, + "ConflictingTypes$message": "

    Descriptive message for the error response.

    ", + "DelegationSetAlreadyCreated$message": "

    Descriptive message for the error response.

    ", + "DelegationSetAlreadyReusable$message": "

    Descriptive message for the error response.

    ", + "DelegationSetInUse$message": "

    Descriptive message for the error response.

    ", + "DelegationSetNotAvailable$message": "

    Descriptive message for the error response.

    ", + "DelegationSetNotReusable$message": "

    Descriptive message for the error response.

    ", + "ErrorMessages$member": null, + "HealthCheckAlreadyExists$message": "

    Descriptive message for the error response.

    ", + "HealthCheckInUse$message": "

    Descriptive message for the error response.

    ", + "HealthCheckVersionMismatch$message": null, + "HostedZoneAlreadyExists$message": "

    Descriptive message for the error response.

    ", + "HostedZoneNotEmpty$message": "

    Descriptive message for the error response.

    ", + "HostedZoneNotFound$message": "

    Descriptive message for the error response.

    ", + "IncompatibleVersion$message": null, + "InvalidArgument$message": "

    Descriptive message for the error response.

    ", + "InvalidDomainName$message": "

    Descriptive message for the error response.

    ", + "InvalidInput$message": "

    Descriptive message for the error response.

    ", + "InvalidTrafficPolicyDocument$message": "

    Descriptive message for the error response.

    ", + "InvalidVPCId$message": "

    Descriptive message for the error response.

    ", + "LastVPCAssociation$message": "

    Descriptive message for the error response.

    ", + "LimitsExceeded$message": "

    Descriptive message for the error response.

    ", + "NoSuchChange$message": null, + "NoSuchDelegationSet$message": "

    Descriptive message for the error response.

    ", + "NoSuchGeoLocation$message": "

    Descriptive message for the error response.

    ", + "NoSuchHealthCheck$message": "

    Descriptive message for the error response.

    ", + "NoSuchHostedZone$message": null, + "NoSuchTrafficPolicy$message": "

    Descriptive message for the error response.

    ", + "NoSuchTrafficPolicyInstance$message": "

    Descriptive message for the error response.

    ", + "PriorRequestNotComplete$message": null, + "PublicZoneVPCAssociation$message": "

    Descriptive message for the error response.

    ", + "ThrottlingException$message": null, + "TooManyHealthChecks$message": null, + "TooManyHostedZones$message": "

    Descriptive message for the error response.

    ", + "TooManyTrafficPolicies$message": "

    Descriptive message for the error response.

    ", + "TooManyTrafficPolicyInstances$message": "

    Descriptive message for the error response.

    ", + "TrafficPolicyAlreadyExists$message": "

    Descriptive message for the error response.

    ", + "TrafficPolicyInUse$message": "

    Descriptive message for the error response.

    ", + "TrafficPolicyInstanceAlreadyExists$message": "

    Descriptive message for the error response.

    ", + "VPCAssociationNotFound$message": "

    Descriptive message for the error response.

    " + } + }, + "ErrorMessages": { + "base": null, + "refs": { + "InvalidChangeBatch$messages": "

    Descriptive message for the error response.

    " + } + }, + "FailureThreshold": { + "base": null, + "refs": { + "HealthCheckConfig$FailureThreshold": "

    The number of consecutive health checks that an endpoint must pass or fail for Amazon Route 53 to change the current status of the endpoint from unhealthy to healthy or vice versa.

    Valid values are integers between 1 and 10. For more information, see \"How Amazon Route 53 Determines Whether an Endpoint Is Healthy\" in the Amazon Route 53 Developer Guide.

    ", + "UpdateHealthCheckRequest$FailureThreshold": "

    The number of consecutive health checks that an endpoint must pass or fail for Amazon Route 53 to change the current status of the endpoint from unhealthy to healthy or vice versa.

    Valid values are integers between 1 and 10. For more information, see \"How Amazon Route 53 Determines Whether an Endpoint Is Healthy\" in the Amazon Route 53 Developer Guide.

    Specify this value only if you want to change it.

    " + } + }, + "FullyQualifiedDomainName": { + "base": null, + "refs": { + "HealthCheckConfig$FullyQualifiedDomainName": "

    Fully qualified domain name of the instance to be health checked.

    ", + "UpdateHealthCheckRequest$FullyQualifiedDomainName": "

    Fully qualified domain name of the instance to be health checked.

    Specify this value only if you want to change it.

    " + } + }, + "GeoLocation": { + "base": "

    A complex type that contains information about a geo location.

    ", + "refs": { + "ResourceRecordSet$GeoLocation": "

    Geo location resource record sets only: A complex type that lets you control how Amazon Route 53 responds to DNS queries based on the geographic origin of the query. For example, if you want all queries from Africa to be routed to a web server with an IP address of 192.0.2.111, create a resource record set with a Type of A and a ContinentCode of AF.

    You can create geolocation and geolocation alias resource record sets only in public hosted zones.

    If you create separate resource record sets for overlapping geographic regions (for example, one resource record set for a continent and one for a country on the same continent), priority goes to the smallest geographic region. This allows you to route most queries for a continent to one resource and to route queries for a country on that continent to a different resource.

    You cannot create two geolocation resource record sets that specify the same geographic location.

    The value * in the CountryCode element matches all geographic locations that aren't specified in other geolocation resource record sets that have the same values for the Name and Type elements.

    Geolocation works by mapping IP addresses to locations. However, some IP addresses aren't mapped to geographic locations, so even if you create geolocation resource record sets that cover all seven continents, Amazon Route 53 will receive some DNS queries from locations that it can't identify. We recommend that you create a resource record set for which the value of CountryCode is *, which handles both queries that come from locations for which you haven't created geolocation resource record sets and queries from IP addresses that aren't mapped to a location. If you don't create a * resource record set, Amazon Route 53 returns a \"no answer\" response for queries from those locations.

    You cannot create non-geolocation resource record sets that have the same values for the Name and Type elements as geolocation resource record sets.

    " + } + }, + "GeoLocationContinentCode": { + "base": null, + "refs": { + "GeoLocation$ContinentCode": "

    The code for a continent geo location. Note: only continent locations have a continent code.

    Valid values: AF | AN | AS | EU | OC | NA | SA

    Constraint: Specifying ContinentCode with either CountryCode or SubdivisionCode returns an InvalidInput error.

    ", + "GeoLocationDetails$ContinentCode": "

    The code for a continent geo location. Note: only continent locations have a continent code.

    ", + "GetGeoLocationRequest$ContinentCode": "

    The code for a continent geo location. Note: only continent locations have a continent code.

    Valid values: AF | AN | AS | EU | OC | NA | SA

    Constraint: Specifying ContinentCode with either CountryCode or SubdivisionCode returns an InvalidInput error.

    ", + "ListGeoLocationsRequest$StartContinentCode": "

    The first continent code in the lexicographic ordering of geo locations that you want the ListGeoLocations request to list. For non-continent geo locations, this should be null.

    Valid values: AF | AN | AS | EU | OC | NA | SA

    Constraint: Specifying ContinentCode with either CountryCode or SubdivisionCode returns an InvalidInput error.

    ", + "ListGeoLocationsResponse$NextContinentCode": "

    If the results were truncated, the continent code of the next geo location in the list. This element is present only if ListGeoLocationsResponse$IsTruncated is true and the next geo location to list is a continent location.

    " + } + }, + "GeoLocationContinentName": { + "base": null, + "refs": { + "GeoLocationDetails$ContinentName": "

    The name of the continent. This element is only present if ContinentCode is also present.

    " + } + }, + "GeoLocationCountryCode": { + "base": null, + "refs": { + "GeoLocation$CountryCode": "

    The code for a country geo location. The default location uses '*' for the country code and will match all locations that are not matched by a geo location.

    The default geo location uses a * for the country code. All other country codes follow the ISO 3166 two-character code.

    ", + "GeoLocationDetails$CountryCode": "

    The code for a country geo location. The default location uses '*' for the country code and will match all locations that are not matched by a geo location.

    The default geo location uses a * for the country code. All other country codes follow the ISO 3166 two-character code.

    ", + "GetGeoLocationRequest$CountryCode": "

    The code for a country geo location. The default location uses '*' for the country code and will match all locations that are not matched by a geo location.

    The default geo location uses a * for the country code. All other country codes follow the ISO 3166 two-character code.

    ", + "ListGeoLocationsRequest$StartCountryCode": "

    The first country code in the lexicographic ordering of geo locations that you want the ListGeoLocations request to list.

    The default geo location uses a * for the country code. All other country codes follow the ISO 3166 two-character code.

    ", + "ListGeoLocationsResponse$NextCountryCode": "

    If the results were truncated, the country code of the next geo location in the list. This element is present only if ListGeoLocationsResponse$IsTruncated is true and the next geo location to list is not a continent location.

    " + } + }, + "GeoLocationCountryName": { + "base": null, + "refs": { + "GeoLocationDetails$CountryName": "

    The name of the country. This element is only present if CountryCode is also present.

    " + } + }, + "GeoLocationDetails": { + "base": "

    A complex type that contains information about a GeoLocation.

    ", + "refs": { + "GeoLocationDetailsList$member": null, + "GetGeoLocationResponse$GeoLocationDetails": "

    A complex type that contains the information about the specified geo location.

    " + } + }, + "GeoLocationDetailsList": { + "base": null, + "refs": { + "ListGeoLocationsResponse$GeoLocationDetailsList": "

    A complex type that contains information about the geo locations that are returned by the request.

    " + } + }, + "GeoLocationSubdivisionCode": { + "base": null, + "refs": { + "GeoLocation$SubdivisionCode": "

    The code for a country's subdivision (e.g., a province of Canada). A subdivision code is only valid with the appropriate country code.

    Constraint: Specifying SubdivisionCode without CountryCode returns an InvalidInput error.

    ", + "GeoLocationDetails$SubdivisionCode": "

    The code for a country's subdivision (e.g., a province of Canada). A subdivision code is only valid with the appropriate country code.

    ", + "GetGeoLocationRequest$SubdivisionCode": "

    The code for a country's subdivision (e.g., a province of Canada). A subdivision code is only valid with the appropriate country code.

    Constraint: Specifying SubdivisionCode without CountryCode returns an InvalidInput error.

    ", + "ListGeoLocationsRequest$StartSubdivisionCode": "

    The first subdivision code in the lexicographic ordering of geo locations that you want the ListGeoLocations request to list.

    Constraint: Specifying SubdivisionCode without CountryCode returns an InvalidInput error.

    ", + "ListGeoLocationsResponse$NextSubdivisionCode": "

    If the results were truncated, the subdivision code of the next geo location in the list. This element is present only if ListGeoLocationsResponse$IsTruncated is true and the next geo location has a subdivision.

    " + } + }, + "GeoLocationSubdivisionName": { + "base": null, + "refs": { + "GeoLocationDetails$SubdivisionName": "

    The name of the subdivision. This element is only present if SubdivisionCode is also present.

    " + } + }, + "GetChangeDetailsRequest": { + "base": "

    The input for a GetChangeDetails request.

    ", + "refs": { + } + }, + "GetChangeDetailsResponse": { + "base": "

    A complex type that contains the ChangeBatchRecord element.

    ", + "refs": { + } + }, + "GetChangeRequest": { + "base": "

    The input for a GetChange request.

    ", + "refs": { + } + }, + "GetChangeResponse": { + "base": "

    A complex type that contains the ChangeInfo element.

    ", + "refs": { + } + }, + "GetCheckerIpRangesRequest": { + "base": "

    Empty request.

    ", + "refs": { + } + }, + "GetCheckerIpRangesResponse": { + "base": "

    A complex type that contains the CheckerIpRanges element.

    ", + "refs": { + } + }, + "GetGeoLocationRequest": { + "base": "

    A complex type that contains information about the request to get a geo location.

    ", + "refs": { + } + }, + "GetGeoLocationResponse": { + "base": "

    A complex type containing information about the specified geo location.

    ", + "refs": { + } + }, + "GetHealthCheckCountRequest": { + "base": "

    To retrieve a count of all your health checks, send a GET request to the 2013-04-01/healthcheckcount resource.

    ", + "refs": { + } + }, + "GetHealthCheckCountResponse": { + "base": "

    A complex type that contains the count of health checks associated with the current AWS account.

    ", + "refs": { + } + }, + "GetHealthCheckLastFailureReasonRequest": { + "base": "

    A complex type that contains information about the request to get the most recent failure reason for a health check.

    ", + "refs": { + } + }, + "GetHealthCheckLastFailureReasonResponse": { + "base": "

    A complex type that contains information about the most recent failure for the specified health check.

    ", + "refs": { + } + }, + "GetHealthCheckRequest": { + "base": "

    A complex type that contains information about the request to get a health check.

    ", + "refs": { + } + }, + "GetHealthCheckResponse": { + "base": "

    A complex type containing information about the specified health check.

    ", + "refs": { + } + }, + "GetHealthCheckStatusRequest": { + "base": "

    A complex type that contains information about the request to get health check status for a health check.

    ", + "refs": { + } + }, + "GetHealthCheckStatusResponse": { + "base": "

    A complex type that contains information about the status of the specified health check.

    ", + "refs": { + } + }, + "GetHostedZoneCountRequest": { + "base": "

    To retrieve a count of all your hosted zones, send a GET request to the 2013-04-01/hostedzonecount resource.

    ", + "refs": { + } + }, + "GetHostedZoneCountResponse": { + "base": "

    A complex type that contains the count of hosted zones associated with the current AWS account.

    ", + "refs": { + } + }, + "GetHostedZoneRequest": { + "base": "

    The input for a GetHostedZone request.

    ", + "refs": { + } + }, + "GetHostedZoneResponse": { + "base": "

    A complex type containing information about the specified hosted zone.

    ", + "refs": { + } + }, + "GetReusableDelegationSetRequest": { + "base": "

    The input for a GetReusableDelegationSet request.

    ", + "refs": { + } + }, + "GetReusableDelegationSetResponse": { + "base": "

    A complex type containing information about the specified reusable delegation set.

    ", + "refs": { + } + }, + "GetTrafficPolicyInstanceCountRequest": { + "base": "

    To retrieve a count of all your traffic policy instances, send a GET request to the 2013-04-01/trafficpolicyinstancecount resource.

    ", + "refs": { + } + }, + "GetTrafficPolicyInstanceCountResponse": { + "base": "

    A complex type that contains information about the number of traffic policy instances that are associated with the current AWS account.

    ", + "refs": { + } + }, + "GetTrafficPolicyInstanceRequest": { + "base": "

    Gets information about a specified traffic policy instance.

    To get information about a traffic policy instance, send a GET request to the 2013-04-01/trafficpolicyinstance/Id resource.

    ", + "refs": { + } + }, + "GetTrafficPolicyInstanceResponse": { + "base": "

    A complex type that contains information about the resource record sets that Amazon Route 53 created based on a specified traffic policy.

    ", + "refs": { + } + }, + "GetTrafficPolicyRequest": { + "base": "

    Gets information about a specific traffic policy version. To get the information, send a GET request to the 2013-04-01/trafficpolicy resource, and specify the ID and the version of the traffic policy.

    ", + "refs": { + } + }, + "GetTrafficPolicyResponse": { + "base": "

    A complex type that contains the response information for the request.

    ", + "refs": { + } + }, + "HealthCheck": { + "base": "

    A complex type that contains identifying information about the health check.

    ", + "refs": { + "CreateHealthCheckResponse$HealthCheck": "

    A complex type that contains identifying information about the health check.

    ", + "GetHealthCheckResponse$HealthCheck": "

    A complex type that contains the information about the specified health check.

    ", + "HealthChecks$member": null, + "UpdateHealthCheckResponse$HealthCheck": null + } + }, + "HealthCheckAlreadyExists": { + "base": "

    The health check you are trying to create already exists. Amazon Route 53 returns this error when a health check has already been created with the specified CallerReference.

    ", + "refs": { + } + }, + "HealthCheckConfig": { + "base": "

    A complex type that contains the health check configuration.

    ", + "refs": { + "CreateHealthCheckRequest$HealthCheckConfig": "

    A complex type that contains health check configuration.

    ", + "HealthCheck$HealthCheckConfig": "

    A complex type that contains the health check configuration.

    " + } + }, + "HealthCheckCount": { + "base": null, + "refs": { + "GetHealthCheckCountResponse$HealthCheckCount": "

    The number of health checks associated with the current AWS account.

    " + } + }, + "HealthCheckId": { + "base": null, + "refs": { + "ChildHealthCheckList$member": null, + "DeleteHealthCheckRequest$HealthCheckId": "

    The ID of the health check to delete.

    ", + "GetHealthCheckLastFailureReasonRequest$HealthCheckId": "

    The ID of the health check for which you want to retrieve the reason for the most recent failure.

    ", + "GetHealthCheckRequest$HealthCheckId": "

    The ID of the health check to retrieve.

    ", + "GetHealthCheckStatusRequest$HealthCheckId": "

    If you want Amazon Route 53 to return this resource record set in response to a DNS query only when a health check is passing, include the HealthCheckId element and specify the ID of the applicable health check.

    Amazon Route 53 determines whether a resource record set is healthy by periodically sending a request to the endpoint that is specified in the health check. If that endpoint returns an HTTP status code of 2xx or 3xx, the endpoint is healthy. If the endpoint returns an HTTP status code of 400 or greater, or if the endpoint doesn't respond for a certain amount of time, Amazon Route 53 considers the endpoint unhealthy and also considers the resource record set unhealthy.

    The HealthCheckId element is only useful when Amazon Route 53 is choosing between two or more resource record sets to respond to a DNS query, and you want Amazon Route 53 to base the choice in part on the status of a health check. Configuring health checks only makes sense in the following configurations:

    • You're checking the health of the resource record sets in a weighted, latency, geolocation, or failover resource record set, and you specify health check IDs for all of the resource record sets. If the health check for one resource record set specifies an endpoint that is not healthy, Amazon Route 53 stops responding to queries using the value for that resource record set.
    • You set EvaluateTargetHealth to true for the resource record sets in an alias, weighted alias, latency alias, geolocation alias, or failover alias resource record set, and you specify health check IDs for all of the resource record sets that are referenced by the alias resource record sets. For more information about this configuration, see EvaluateTargetHealth.

      Amazon Route 53 doesn't check the health of the endpoint specified in the resource record set, for example, the endpoint specified by the IP address in the Value element. When you add a HealthCheckId element to a resource record set, Amazon Route 53 checks the health of the endpoint that you specified in the health check.

    For geolocation resource record sets, if an endpoint is unhealthy, Amazon Route 53 looks for a resource record set for the larger, associated geographic region. For example, suppose you have resource record sets for a state in the United States, for the United States, for North America, and for all locations. If the endpoint for the state resource record set is unhealthy, Amazon Route 53 checks the resource record sets for the United States, for North America, and for all locations (a resource record set for which the value of CountryCode is *), in that order, until it finds a resource record set for which the endpoint is healthy.

    If your health checks specify the endpoint only by domain name, we recommend that you create a separate health check for each endpoint. For example, create a health check for each HTTP server that is serving content for www.example.com. For the value of FullyQualifiedDomainName, specify the domain name of the server (such as us-east-1-www.example.com), not the name of the resource record sets (example.com).

    In this configuration, if you create a health check for which the value of FullyQualifiedDomainName matches the name of the resource record sets and then associate the health check with those resource record sets, health check results will be unpredictable.", + "HealthCheck$Id": "

    The ID of the specified health check.

    ", + "ResourceRecordSet$HealthCheckId": "

    Health Check resource record sets only, not required for alias resource record sets: An identifier that is used to identify health check associated with the resource record set.

    ", + "UpdateHealthCheckRequest$HealthCheckId": "

    The ID of the health check to update.

    " + } + }, + "HealthCheckInUse": { + "base": "

    There are resource records associated with this health check. Before you can delete the health check, you must disassociate it from the resource record sets.

    ", + "refs": { + } + }, + "HealthCheckNonce": { + "base": null, + "refs": { + "CreateHealthCheckRequest$CallerReference": "

    A unique string that identifies the request and that allows failed CreateHealthCheck requests to be retried without the risk of executing the operation twice. You must use a unique CallerReference string every time you create a health check. CallerReference can be any unique string; you might choose to use a string that identifies your project.

    Valid characters are any Unicode code points that are legal in an XML 1.0 document. The UTF-8 encoding of the value must be less than 128 bytes.

    ", + "HealthCheck$CallerReference": "

    A unique string that identifies the request to create the health check.

    " + } + }, + "HealthCheckObservation": { + "base": "

    A complex type that contains the IP address of a Amazon Route 53 health checker and the reason for the health check status.

    ", + "refs": { + "HealthCheckObservations$member": null + } + }, + "HealthCheckObservations": { + "base": null, + "refs": { + "GetHealthCheckLastFailureReasonResponse$HealthCheckObservations": "

    A list that contains one HealthCheckObservation element for each Amazon Route 53 health checker.

    ", + "GetHealthCheckStatusResponse$HealthCheckObservations": "

    A list that contains one HealthCheckObservation element for each Amazon Route 53 health checker.

    " + } + }, + "HealthCheckType": { + "base": null, + "refs": { + "HealthCheckConfig$Type": "

    The type of health check to be performed. Currently supported types are TCP, HTTP, HTTPS, HTTP_STR_MATCH, and HTTPS_STR_MATCH.

    " + } + }, + "HealthCheckVersion": { + "base": null, + "refs": { + "HealthCheck$HealthCheckVersion": "

    The version of the health check. You can optionally pass this value in a call to UpdateHealthCheck to prevent overwriting another change to the health check.

    ", + "UpdateHealthCheckRequest$HealthCheckVersion": "

    Optional. When you specify a health check version, Amazon Route 53 compares this value with the current value in the health check, which prevents you from updating the health check when the versions don't match. Using HealthCheckVersion lets you prevent overwriting another change to the health check.

    " + } + }, + "HealthCheckVersionMismatch": { + "base": null, + "refs": { + } + }, + "HealthChecks": { + "base": null, + "refs": { + "ListHealthChecksResponse$HealthChecks": "

    A complex type that contains information about the health checks associated with the current AWS account.

    " + } + }, + "HealthThreshold": { + "base": null, + "refs": { + "HealthCheckConfig$HealthThreshold": "

    The minimum number of child health checks that must be healthy for Amazon Route 53 to consider the parent health check to be healthy. Valid values are integers between 0 and 256, inclusive.

    ", + "UpdateHealthCheckRequest$HealthThreshold": "

    The minimum number of child health checks that must be healthy for Amazon Route 53 to consider the parent health check to be healthy. Valid values are integers between 0 and 256, inclusive.

    Specify this value only if you want to change it.

    " + } + }, + "HostedZone": { + "base": "

    A complex type that contain information about the specified hosted zone.

    ", + "refs": { + "CreateHostedZoneResponse$HostedZone": "

    A complex type that contains identifying information about the hosted zone.

    ", + "GetHostedZoneResponse$HostedZone": "

    A complex type that contains the information about the specified hosted zone.

    ", + "HostedZones$member": null, + "UpdateHostedZoneCommentResponse$HostedZone": null + } + }, + "HostedZoneAlreadyExists": { + "base": "

    The hosted zone you are trying to create already exists. Amazon Route 53 returns this error when a hosted zone has already been created with the specified CallerReference.

    ", + "refs": { + } + }, + "HostedZoneConfig": { + "base": "

    A complex type that contains an optional comment about your hosted zone. If you don't want to specify a comment, you can omit the HostedZoneConfig and Comment elements from the XML document.

    ", + "refs": { + "CreateHostedZoneRequest$HostedZoneConfig": "

    A complex type that contains an optional comment about your hosted zone.

    ", + "HostedZone$Config": "

    A complex type that contains the Comment element.

    " + } + }, + "HostedZoneCount": { + "base": null, + "refs": { + "GetHostedZoneCountResponse$HostedZoneCount": "

    The number of hosted zones associated with the current AWS account.

    " + } + }, + "HostedZoneNotEmpty": { + "base": "

    The hosted zone contains resource record sets in addition to the default NS and SOA resource record sets. Before you can delete the hosted zone, you must delete the additional resource record sets.

    ", + "refs": { + } + }, + "HostedZoneNotFound": { + "base": "

    The specified HostedZone cannot be found.

    ", + "refs": { + } + }, + "HostedZoneRRSetCount": { + "base": null, + "refs": { + "HostedZone$ResourceRecordSetCount": "

    Total number of resource record sets in the hosted zone.

    " + } + }, + "HostedZones": { + "base": null, + "refs": { + "ListHostedZonesByNameResponse$HostedZones": "

    A complex type that contains information about the hosted zones associated with the current AWS account.

    ", + "ListHostedZonesResponse$HostedZones": "

    A complex type that contains information about the hosted zones associated with the current AWS account.

    " + } + }, + "IPAddress": { + "base": null, + "refs": { + "HealthCheckConfig$IPAddress": "

    IP Address of the instance being checked.

    ", + "HealthCheckObservation$IPAddress": "

    The IP address of the Amazon Route 53 health checker that performed the health check.

    ", + "UpdateHealthCheckRequest$IPAddress": "

    The IP address of the resource that you want to check.

    Specify this value only if you want to change it.

    " + } + }, + "IPAddressCidr": { + "base": null, + "refs": { + "CheckerIpRanges$member": null + } + }, + "IncompatibleVersion": { + "base": "

    The resource you are trying to access is unsupported on this Amazon Route 53 endpoint. Please consider using a newer endpoint or a tool that does so.

    ", + "refs": { + } + }, + "InvalidArgument": { + "base": "

    At least one of the specified arguments is invalid.

    ", + "refs": { + } + }, + "InvalidChangeBatch": { + "base": "

    This error contains a list of one or more error messages. Each error message indicates one error in the change batch. For more information, see Example InvalidChangeBatch Errors.

    ", + "refs": { + } + }, + "InvalidDomainName": { + "base": "

    This error indicates that the specified domain name is not valid.

    ", + "refs": { + } + }, + "InvalidInput": { + "base": "

    Some value specified in the request is invalid or the XML document is malformed.

    ", + "refs": { + } + }, + "InvalidTrafficPolicyDocument": { + "base": "

    The format of the traffic policy document that you specified in the Document element is invalid.

    ", + "refs": { + } + }, + "InvalidVPCId": { + "base": "

    The hosted zone you are trying to create for your VPC_ID does not belong to you. Amazon Route 53 returns this error when the VPC specified by VPCId does not belong to you.

    ", + "refs": { + } + }, + "Inverted": { + "base": null, + "refs": { + "HealthCheckConfig$Inverted": "

    A boolean value that indicates whether the status of health check should be inverted. For example, if a health check is healthy but Inverted is True, then Amazon Route 53 considers the health check to be unhealthy.

    ", + "UpdateHealthCheckRequest$Inverted": "

    A boolean value that indicates whether the status of health check should be inverted. For example, if a health check is healthy but Inverted is True, then Amazon Route 53 considers the health check to be unhealthy.

    Specify this value only if you want to change it.

    " + } + }, + "IsPrivateZone": { + "base": null, + "refs": { + "HostedZoneConfig$PrivateZone": null + } + }, + "LastVPCAssociation": { + "base": "

    The VPC you are trying to disassociate from the hosted zone is the last the VPC that is associated with the hosted zone. Amazon Route 53 currently doesn't support disassociate the last VPC from the hosted zone.

    ", + "refs": { + } + }, + "LimitsExceeded": { + "base": "

    The limits specified for a resource have been exceeded.

    ", + "refs": { + } + }, + "ListChangeBatchesByHostedZoneRequest": { + "base": "

    The input for a ListChangeBatchesByHostedZone request.

    ", + "refs": { + } + }, + "ListChangeBatchesByHostedZoneResponse": { + "base": "

    The input for a ListChangeBatchesByHostedZone request.

    ", + "refs": { + } + }, + "ListChangeBatchesByRRSetRequest": { + "base": "

    The input for a ListChangeBatchesByRRSet request.

    ", + "refs": { + } + }, + "ListChangeBatchesByRRSetResponse": { + "base": "

    The input for a ListChangeBatchesByRRSet request.

    ", + "refs": { + } + }, + "ListGeoLocationsRequest": { + "base": "

    The input for a ListGeoLocations request.

    ", + "refs": { + } + }, + "ListGeoLocationsResponse": { + "base": "

    A complex type that contains information about the geo locations that are returned by the request and information about the response.

    ", + "refs": { + } + }, + "ListHealthChecksRequest": { + "base": "

    To retrieve a list of your health checks, send a GET request to the 2013-04-01/healthcheck resource. The response to this request includes a HealthChecks element with zero or more HealthCheck child elements. By default, the list of health checks is displayed on a single page. You can control the length of the page that is displayed by using the MaxItems parameter. You can use the Marker parameter to control the health check that the list begins with.

    Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to a value greater than 100, Amazon Route 53 returns only the first 100.", + "refs": { + } + }, + "ListHealthChecksResponse": { + "base": "

    A complex type that contains the response for the request.

    ", + "refs": { + } + }, + "ListHostedZonesByNameRequest": { + "base": "

    To retrieve a list of your hosted zones in lexicographic order, send a GET request to the 2013-04-01/hostedzonesbyname resource. The response to this request includes a HostedZones element with zero or more HostedZone child elements lexicographically ordered by DNS name. By default, the list of hosted zones is displayed on a single page. You can control the length of the page that is displayed by using the MaxItems parameter. You can use the DNSName and HostedZoneId parameters to control the hosted zone that the list begins with.

    For more information about listing hosted zones, see Listing the Hosted Zones for an AWS Account in the Amazon Route 53 Developer Guide.

    ", + "refs": { + } + }, + "ListHostedZonesByNameResponse": { + "base": "

    A complex type that contains the response for the request.

    ", + "refs": { + } + }, + "ListHostedZonesRequest": { + "base": "

    To retrieve a list of your hosted zones, send a GET request to the 2013-04-01/hostedzone resource. The response to this request includes a HostedZones element with zero or more HostedZone child elements. By default, the list of hosted zones is displayed on a single page. You can control the length of the page that is displayed by using the MaxItems parameter. You can use the Marker parameter to control the hosted zone that the list begins with. For more information about listing hosted zones, see Listing the Hosted Zones for an AWS Account in the Amazon Route 53 Developer Guide.

    Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to a value greater than 100, Amazon Route 53 returns only the first 100.", + "refs": { + } + }, + "ListHostedZonesResponse": { + "base": "

    A complex type that contains the response for the request.

    ", + "refs": { + } + }, + "ListResourceRecordSetsRequest": { + "base": "

    The input for a ListResourceRecordSets request.

    ", + "refs": { + } + }, + "ListResourceRecordSetsResponse": { + "base": "

    A complex type that contains information about the resource record sets that are returned by the request and information about the response.

    ", + "refs": { + } + }, + "ListReusableDelegationSetsRequest": { + "base": "

    To retrieve a list of your reusable delegation sets, send a GET request to the 2013-04-01/delegationset resource. The response to this request includes a DelegationSets element with zero or more DelegationSet child elements. By default, the list of reusable delegation sets is displayed on a single page. You can control the length of the page that is displayed by using the MaxItems parameter. You can use the Marker parameter to control the delegation set that the list begins with.

    Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to a value greater than 100, Amazon Route 53 returns only the first 100.", + "refs": { + } + }, + "ListReusableDelegationSetsResponse": { + "base": "

    A complex type that contains the response for the request.

    ", + "refs": { + } + }, + "ListTagsForResourceRequest": { + "base": "

    A complex type containing information about a request for a list of the tags that are associated with an individual resource.

    ", + "refs": { + } + }, + "ListTagsForResourceResponse": { + "base": "

    A complex type containing tags for the specified resource.

    ", + "refs": { + } + }, + "ListTagsForResourcesRequest": { + "base": "

    A complex type containing information about a request for a list of the tags that are associated with up to 10 specified resources.

    ", + "refs": { + } + }, + "ListTagsForResourcesResponse": { + "base": "

    A complex type containing tags for the specified resources.

    ", + "refs": { + } + }, + "ListTrafficPoliciesRequest": { + "base": "

    A complex type that contains the information about the request to list the traffic policies that are associated with the current AWS account.

    ", + "refs": { + } + }, + "ListTrafficPoliciesResponse": { + "base": "

    A complex type that contains the response information for the request.

    ", + "refs": { + } + }, + "ListTrafficPolicyInstancesByHostedZoneRequest": { + "base": "

    A request for the traffic policy instances that you created in a specified hosted zone.

    ", + "refs": { + } + }, + "ListTrafficPolicyInstancesByHostedZoneResponse": { + "base": "

    A complex type that contains the response information for the request.

    ", + "refs": { + } + }, + "ListTrafficPolicyInstancesByPolicyRequest": { + "base": "

    A complex type that contains the information about the request to list your traffic policy instances.

    ", + "refs": { + } + }, + "ListTrafficPolicyInstancesByPolicyResponse": { + "base": "

    A complex type that contains the response information for the request.

    ", + "refs": { + } + }, + "ListTrafficPolicyInstancesRequest": { + "base": "

    A complex type that contains the information about the request to list your traffic policy instances.

    ", + "refs": { + } + }, + "ListTrafficPolicyInstancesResponse": { + "base": "

    A complex type that contains the response information for the request.

    ", + "refs": { + } + }, + "ListTrafficPolicyVersionsRequest": { + "base": "

    A complex type that contains the information about the request to list your traffic policies.

    ", + "refs": { + } + }, + "ListTrafficPolicyVersionsResponse": { + "base": "

    A complex type that contains the response information for the request.

    ", + "refs": { + } + }, + "MeasureLatency": { + "base": null, + "refs": { + "HealthCheckConfig$MeasureLatency": "

    A Boolean value that indicates whether you want Amazon Route 53 to measure the latency between health checkers in multiple AWS regions and your endpoint and to display CloudWatch latency graphs in the Amazon Route 53 console.

    " + } + }, + "Message": { + "base": null, + "refs": { + "TrafficPolicyInstance$Message": null + } + }, + "NoSuchChange": { + "base": null, + "refs": { + } + }, + "NoSuchDelegationSet": { + "base": "

    The specified delegation set does not exist.

    ", + "refs": { + } + }, + "NoSuchGeoLocation": { + "base": "

    The geo location you are trying to get does not exist.

    ", + "refs": { + } + }, + "NoSuchHealthCheck": { + "base": "

    The health check you are trying to get or delete does not exist.

    ", + "refs": { + } + }, + "NoSuchHostedZone": { + "base": null, + "refs": { + } + }, + "NoSuchTrafficPolicy": { + "base": "

    No traffic policy exists with the specified ID.

    ", + "refs": { + } + }, + "NoSuchTrafficPolicyInstance": { + "base": "

    No traffic policy instance exists with the specified ID.

    ", + "refs": { + } + }, + "Nonce": { + "base": null, + "refs": { + "CreateHostedZoneRequest$CallerReference": "

    A unique string that identifies the request and that allows failed CreateHostedZone requests to be retried without the risk of executing the operation twice. You must use a unique CallerReference string every time you create a hosted zone. CallerReference can be any unique string; you might choose to use a string that identifies your project, such as DNSMigration_01.

    Valid characters are any Unicode code points that are legal in an XML 1.0 document. The UTF-8 encoding of the value must be less than 128 bytes.

    ", + "CreateReusableDelegationSetRequest$CallerReference": "

    A unique string that identifies the request and that allows failed CreateReusableDelegationSet requests to be retried without the risk of executing the operation twice. You must use a unique CallerReference string every time you create a reusable delegation set. CallerReference can be any unique string; you might choose to use a string that identifies your project, such as DNSMigration_01.

    Valid characters are any Unicode code points that are legal in an XML 1.0 document. The UTF-8 encoding of the value must be less than 128 bytes.

    ", + "DelegationSet$CallerReference": null, + "HostedZone$CallerReference": "

    A unique string that identifies the request to create the hosted zone.

    " + } + }, + "PageMarker": { + "base": null, + "refs": { + "ListChangeBatchesByHostedZoneRequest$Marker": "

    The page marker.

    ", + "ListChangeBatchesByHostedZoneResponse$Marker": "

    The page marker.

    ", + "ListChangeBatchesByHostedZoneResponse$NextMarker": "

    The next page marker.

    ", + "ListChangeBatchesByRRSetRequest$Marker": "

    The page marker.

    ", + "ListChangeBatchesByRRSetResponse$Marker": "

    The page marker.

    ", + "ListChangeBatchesByRRSetResponse$NextMarker": "

    The next page marker.

    ", + "ListHealthChecksRequest$Marker": "

    If the request returned more than one page of results, submit another request and specify the value of NextMarker from the last response in the marker parameter to get the next page of results.

    ", + "ListHealthChecksResponse$Marker": "

    If the request returned more than one page of results, submit another request and specify the value of NextMarker from the last response in the marker parameter to get the next page of results.

    ", + "ListHealthChecksResponse$NextMarker": "

    Indicates where to continue listing health checks. If ListHealthChecksResponse$IsTruncated is true, make another request to ListHealthChecks and include the value of the NextMarker element in the Marker element to get the next page of results.

    ", + "ListHostedZonesRequest$Marker": "

    If the request returned more than one page of results, submit another request and specify the value of NextMarker from the last response in the marker parameter to get the next page of results.

    ", + "ListHostedZonesResponse$Marker": "

    If the request returned more than one page of results, submit another request and specify the value of NextMarker from the last response in the marker parameter to get the next page of results.

    ", + "ListHostedZonesResponse$NextMarker": "

    Indicates where to continue listing hosted zones. If ListHostedZonesResponse$IsTruncated is true, make another request to ListHostedZones and include the value of the NextMarker element in the Marker element to get the next page of results.

    ", + "ListReusableDelegationSetsRequest$Marker": "

    If the request returned more than one page of results, submit another request and specify the value of NextMarker from the last response in the marker parameter to get the next page of results.

    ", + "ListReusableDelegationSetsResponse$Marker": "

    If the request returned more than one page of results, submit another request and specify the value of NextMarker from the last response in the marker parameter to get the next page of results.

    ", + "ListReusableDelegationSetsResponse$NextMarker": "

    Indicates where to continue listing reusable delegation sets. If ListReusableDelegationSetsResponse$IsTruncated is true, make another request to ListReusableDelegationSets and include the value of the NextMarker element in the Marker element to get the next page of results.

    " + } + }, + "PageMaxItems": { + "base": null, + "refs": { + "ListChangeBatchesByHostedZoneRequest$MaxItems": "

    The maximum number of items on a page.

    ", + "ListChangeBatchesByHostedZoneResponse$MaxItems": "

    The maximum number of items on a page.

    ", + "ListChangeBatchesByRRSetRequest$MaxItems": "

    The maximum number of items on a page.

    ", + "ListChangeBatchesByRRSetResponse$MaxItems": "

    The maximum number of items on a page.

    ", + "ListGeoLocationsRequest$MaxItems": "

    The maximum number of geo locations you want in the response body.

    ", + "ListGeoLocationsResponse$MaxItems": "

    The maximum number of records you requested. The maximum value of MaxItems is 100.

    ", + "ListHealthChecksRequest$MaxItems": "

    Specify the maximum number of health checks to return per page of results.

    ", + "ListHealthChecksResponse$MaxItems": "

    The maximum number of health checks to be included in the response body. If the number of health checks associated with this AWS account exceeds MaxItems, the value of ListHealthChecksResponse$IsTruncated in the response is true. Call ListHealthChecks again and specify the value of ListHealthChecksResponse$NextMarker in the ListHostedZonesRequest$Marker element to get the next page of results.

    ", + "ListHostedZonesByNameRequest$MaxItems": "

    Specify the maximum number of hosted zones to return per page of results.

    ", + "ListHostedZonesByNameResponse$MaxItems": "

    The maximum number of hosted zones to be included in the response body. If the number of hosted zones associated with this AWS account exceeds MaxItems, the value of ListHostedZonesByNameResponse$IsTruncated in the response is true. Call ListHostedZonesByName again and specify the value of ListHostedZonesByNameResponse$NextDNSName and ListHostedZonesByNameResponse$NextHostedZoneId elements respectively to get the next page of results.

    ", + "ListHostedZonesRequest$MaxItems": "

    Specify the maximum number of hosted zones to return per page of results.

    ", + "ListHostedZonesResponse$MaxItems": "

    The maximum number of hosted zones to be included in the response body. If the number of hosted zones associated with this AWS account exceeds MaxItems, the value of ListHostedZonesResponse$IsTruncated in the response is true. Call ListHostedZones again and specify the value of ListHostedZonesResponse$NextMarker in the ListHostedZonesRequest$Marker element to get the next page of results.

    ", + "ListResourceRecordSetsRequest$MaxItems": "

    The maximum number of records you want in the response body.

    ", + "ListResourceRecordSetsResponse$MaxItems": "

    The maximum number of records you requested. The maximum value of MaxItems is 100.

    ", + "ListReusableDelegationSetsRequest$MaxItems": "

    Specify the maximum number of reusable delegation sets to return per page of results.

    ", + "ListReusableDelegationSetsResponse$MaxItems": "

    The maximum number of reusable delegation sets to be included in the response body. If the number of reusable delegation sets associated with this AWS account exceeds MaxItems, the value of ListReusablDelegationSetsResponse$IsTruncated in the response is true. Call ListReusableDelegationSets again and specify the value of ListReusableDelegationSetsResponse$NextMarker in the ListReusableDelegationSetsRequest$Marker element to get the next page of results.

    ", + "ListTrafficPoliciesRequest$MaxItems": "

    The maximum number of traffic policies to be included in the response body for this request. If you have more than MaxItems traffic policies, the value of the IsTruncated element in the response is true, and the value of the TrafficPolicyIdMarker element is the ID of the first traffic policy in the next group of MaxItems traffic policies.

    ", + "ListTrafficPoliciesResponse$MaxItems": "

    The value that you specified for the MaxItems parameter in the call to ListTrafficPolicies that produced the current response.

    ", + "ListTrafficPolicyInstancesByHostedZoneRequest$MaxItems": "

    The maximum number of traffic policy instances to be included in the response body for this request. If you have more than MaxItems traffic policy instances, the value of the IsTruncated element in the response is true, and the values of HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker represent the first traffic policy instance in the next group of MaxItems traffic policy instances.

    ", + "ListTrafficPolicyInstancesByHostedZoneResponse$MaxItems": "

    The value that you specified for the MaxItems parameter in the call to ListTrafficPolicyInstancesByHostedZone that produced the current response.

    ", + "ListTrafficPolicyInstancesByPolicyRequest$MaxItems": "

    The maximum number of traffic policy instances to be included in the response body for this request. If you have more than MaxItems traffic policy instances, the value of the IsTruncated element in the response is true, and the values of HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker represent the first traffic policy instance in the next group of MaxItems traffic policy instances.

    ", + "ListTrafficPolicyInstancesByPolicyResponse$MaxItems": "

    The value that you specified for the MaxItems parameter in the call to ListTrafficPolicyInstancesByPolicy that produced the current response.

    ", + "ListTrafficPolicyInstancesRequest$MaxItems": "

    The maximum number of traffic policy instances to be included in the response body for this request. If you have more than MaxItems traffic policy instances, the value of the IsTruncated element in the response is true, and the values of HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker represent the first traffic policy instance in the next group of MaxItems traffic policy instances.

    ", + "ListTrafficPolicyInstancesResponse$MaxItems": "

    The value that you specified for the MaxItems parameter in the call to ListTrafficPolicyInstances that produced the current response.

    ", + "ListTrafficPolicyVersionsRequest$MaxItems": "

    The maximum number of traffic policy versions that you want Amazon Route 53 to include in the response body for this request. If the specified traffic policy has more than MaxItems versions, the value of the IsTruncated element in the response is true, and the value of the TrafficPolicyVersionMarker element is the ID of the first version in the next group of MaxItems traffic policy versions.

    ", + "ListTrafficPolicyVersionsResponse$MaxItems": "

    The value that you specified for the maxitems parameter in the call to ListTrafficPolicyVersions that produced the current response.

    " + } + }, + "PageTruncated": { + "base": null, + "refs": { + "ListChangeBatchesByHostedZoneResponse$IsTruncated": "

    A flag that indicates if there are more change batches to list.

    ", + "ListChangeBatchesByRRSetResponse$IsTruncated": "

    A flag that indicates if there are more change batches to list.

    ", + "ListGeoLocationsResponse$IsTruncated": "

    A flag that indicates whether there are more geo locations to be listed. If your results were truncated, you can make a follow-up request for the next page of results by using the values included in the ListGeoLocationsResponse$NextContinentCode, ListGeoLocationsResponse$NextCountryCode and ListGeoLocationsResponse$NextSubdivisionCode elements.

    Valid Values: true | false

    ", + "ListHealthChecksResponse$IsTruncated": "

    A flag indicating whether there are more health checks to be listed. If your results were truncated, you can make a follow-up request for the next page of results by using the Marker element.

    Valid Values: true | false

    ", + "ListHostedZonesByNameResponse$IsTruncated": "

    A flag indicating whether there are more hosted zones to be listed. If your results were truncated, you can make a follow-up request for the next page of results by using the NextDNSName and NextHostedZoneId elements.

    Valid Values: true | false

    ", + "ListHostedZonesResponse$IsTruncated": "

    A flag indicating whether there are more hosted zones to be listed. If your results were truncated, you can make a follow-up request for the next page of results by using the Marker element.

    Valid Values: true | false

    ", + "ListResourceRecordSetsResponse$IsTruncated": "

    A flag that indicates whether there are more resource record sets to be listed. If your results were truncated, you can make a follow-up request for the next page of results by using the ListResourceRecordSetsResponse$NextRecordName element.

    Valid Values: true | false

    ", + "ListReusableDelegationSetsResponse$IsTruncated": "

    A flag indicating whether there are more reusable delegation sets to be listed. If your results were truncated, you can make a follow-up request for the next page of results by using the Marker element.

    Valid Values: true | false

    ", + "ListTrafficPoliciesResponse$IsTruncated": "

    A flag that indicates whether there are more traffic policies to be listed. If the response was truncated, you can get the next group of MaxItems traffic policies by calling ListTrafficPolicies again and specifying the value of the TrafficPolicyIdMarker element in the TrafficPolicyIdMarker request parameter.

    Valid Values: true | false

    ", + "ListTrafficPolicyInstancesByHostedZoneResponse$IsTruncated": "

    A flag that indicates whether there are more traffic policy instances to be listed. If the response was truncated, you can get the next group of MaxItems traffic policy instances by calling ListTrafficPolicyInstancesByHostedZone again and specifying the values of the HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker elements in the corresponding request parameters.

    Valid Values: true | false

    ", + "ListTrafficPolicyInstancesByPolicyResponse$IsTruncated": "

    A flag that indicates whether there are more traffic policy instances to be listed. If the response was truncated, you can get the next group of MaxItems traffic policy instances by calling ListTrafficPolicyInstancesByPolicy again and specifying the values of the HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker elements in the corresponding request parameters.

    Valid Values: true | false

    ", + "ListTrafficPolicyInstancesResponse$IsTruncated": "

    A flag that indicates whether there are more traffic policy instances to be listed. If the response was truncated, you can get the next group of MaxItems traffic policy instances by calling ListTrafficPolicyInstances again and specifying the values of the HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker elements in the corresponding request parameters.

    Valid Values: true | false

    ", + "ListTrafficPolicyVersionsResponse$IsTruncated": "

    A flag that indicates whether there are more traffic policies to be listed. If the response was truncated, you can get the next group of maxitems traffic policies by calling ListTrafficPolicyVersions again and specifying the value of the NextMarker element in the marker parameter.

    Valid Values: true | false

    " + } + }, + "Port": { + "base": null, + "refs": { + "HealthCheckConfig$Port": "

    Port on which connection will be opened to the instance to health check. For HTTP and HTTP_STR_MATCH this defaults to 80 if the port is not specified. For HTTPS and HTTPS_STR_MATCH this defaults to 443 if the port is not specified.

    ", + "UpdateHealthCheckRequest$Port": "

    The port on which you want Amazon Route 53 to open a connection to perform health checks.

    Specify this value only if you want to change it.

    " + } + }, + "PriorRequestNotComplete": { + "base": "

    The request was rejected because Amazon Route 53 was still processing a prior request.

    ", + "refs": { + } + }, + "PublicZoneVPCAssociation": { + "base": "

    The hosted zone you are trying to associate VPC with doesn't have any VPC association. Amazon Route 53 currently doesn't support associate a VPC with a public hosted zone.

    ", + "refs": { + } + }, + "RData": { + "base": null, + "refs": { + "ResourceRecord$Value": "

    The current or new DNS record value, not to exceed 4,000 characters. In the case of a DELETE action, if the current value does not match the actual value, an error is returned. For descriptions about how to format Value for different record types, see Supported DNS Resource Record Types in the Amazon Route 53 Developer Guide.

    You can specify more than one value for all record types except CNAME and SOA.

    " + } + }, + "RRType": { + "base": null, + "refs": { + "ListChangeBatchesByRRSetRequest$Type": "

    The type of the RRSet that you want to see changes for.

    ", + "ListResourceRecordSetsRequest$StartRecordType": "

    The DNS type at which to begin the listing of resource record sets.

    Valid values: A | AAAA | CNAME | MX | NS | PTR | SOA | SPF | SRV | TXT

    Values for Weighted Resource Record Sets: A | AAAA | CNAME | TXT

    Values for Regional Resource Record Sets: A | AAAA | CNAME | TXT

    Values for Alias Resource Record Sets: A | AAAA

    Constraint: Specifying type without specifying name returns an InvalidInput error.

    ", + "ListResourceRecordSetsResponse$NextRecordType": "

    If the results were truncated, the type of the next record in the list. This element is present only if ListResourceRecordSetsResponse$IsTruncated is true.

    ", + "ListTrafficPolicyInstancesByHostedZoneRequest$TrafficPolicyInstanceTypeMarker": "

    For the first request to ListTrafficPolicyInstancesByHostedZone, omit this value.

    If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceTypeMarker is the DNS type of the first traffic policy instance in the next group of MaxItems traffic policy instances.

    If the value of IsTruncated in the previous response was false, there are no more traffic policy instances to get for this hosted zone.

    ", + "ListTrafficPolicyInstancesByHostedZoneResponse$TrafficPolicyInstanceTypeMarker": "

    If IsTruncated is true, TrafficPolicyInstanceTypeMarker is the DNS type of the resource record sets that are associated with the first traffic policy instance in the next group of MaxItems traffic policy instances.

    ", + "ListTrafficPolicyInstancesByPolicyRequest$TrafficPolicyInstanceTypeMarker": "

    For the first request to ListTrafficPolicyInstancesByPolicy, omit this value.

    If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceTypeMarker is the DNS type of the first traffic policy instance in the next group of MaxItems traffic policy instances.

    If the value of IsTruncated in the previous response was false, there are no more traffic policy instances to get for this hosted zone.

    ", + "ListTrafficPolicyInstancesByPolicyResponse$TrafficPolicyInstanceTypeMarker": "

    If IsTruncated is true, TrafficPolicyInstanceTypeMarker is the DNS type of the resource record sets that are associated with the first traffic policy instance in the next group of MaxItems traffic policy instances.

    ", + "ListTrafficPolicyInstancesRequest$TrafficPolicyInstanceTypeMarker": "

    For the first request to ListTrafficPolicyInstances, omit this value.

    If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceTypeMarker is the DNS type of the first traffic policy instance in the next group of MaxItems traffic policy instances.

    If the value of IsTruncated in the previous response was false, there are no more traffic policy instances to get.

    ", + "ListTrafficPolicyInstancesResponse$TrafficPolicyInstanceTypeMarker": "

    If IsTruncated is true, TrafficPolicyInstanceTypeMarker is the DNS type of the resource record sets that are associated with the first traffic policy instance in the next group of MaxItems traffic policy instances.

    ", + "ResourceRecordSet$Type": "

    The DNS record type. For information about different record types and how data is encoded for them, see Supported DNS Resource Record Types in the Amazon Route 53 Developer Guide.

    Valid values for basic resource record sets: A | AAAA | CNAME | MX | NS | PTR | SOA | SPF | SRV | TXT

    Values for weighted, latency, geolocation, and failover resource record sets: A | AAAA | CNAME | MX | PTR | SPF | SRV | TXT. When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.

    SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of Type is SPF. RFC 7208, Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1, has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, The SPF DNS Record Type.

    Values for alias resource record sets:

    • CloudFront distributions: A
    • ELB load balancers: A | AAAA
    • Amazon S3 buckets: A
    • Another resource record set in this hosted zone: Specify the type of the resource record set for which you're creating the alias. Specify any value except NS or SOA.
    ", + "TrafficPolicy$Type": null, + "TrafficPolicyInstance$TrafficPolicyType": null, + "TrafficPolicySummary$Type": null + } + }, + "RequestInterval": { + "base": null, + "refs": { + "HealthCheckConfig$RequestInterval": "

    The number of seconds between the time that Amazon Route 53 gets a response from your endpoint and the time that it sends the next health-check request.

    Each Amazon Route 53 health checker makes requests at this interval. Valid values are 10 and 30. The default value is 30.

    " + } + }, + "ResourceDescription": { + "base": null, + "refs": { + "ChangeBatch$Comment": "

    Optional: Any comments you want to include about a change batch request.

    ", + "ChangeBatchRecord$Comment": "

    A complex type that describes change information about changes made to your hosted zone.

    This element contains an ID that you use when performing a GetChange action to get detailed information about the change.

    ", + "ChangeInfo$Comment": "

    A complex type that describes change information about changes made to your hosted zone.

    This element contains an ID that you use when performing a GetChange action to get detailed information about the change.

    ", + "HostedZoneConfig$Comment": "

    An optional comment about your hosted zone. If you don't want to specify a comment, you can omit the HostedZoneConfig and Comment elements from the XML document.

    ", + "UpdateHostedZoneCommentRequest$Comment": "

    A comment about your hosted zone.

    " + } + }, + "ResourceId": { + "base": null, + "refs": { + "AliasTarget$HostedZoneId": "

    Alias resource record sets only: The value you use depends on where you want to route queries:

    • A CloudFront distribution: Specify Z2FDTNDATAQYW2.
    • An ELB load balancer: Specify the value of the hosted zone ID for the load balancer. You can get the hosted zone ID by using the AWS Management Console, the ELB API, or the AWS CLI. Use the same method to get values for HostedZoneId and DNSName. If you get one value from the console and the other value from the API or the CLI, creating the resource record set will fail.
    • An Amazon S3 bucket that is configured as a static website: Specify the hosted zone ID for the Amazon S3 website endpoint in which you created the bucket. For more information about valid values, see the table Amazon Simple Storage Service (S3) Website Endpoints in the Amazon Web Services General Reference.
    • Another Amazon Route 53 resource record set in your hosted zone: Specify the hosted zone ID of your hosted zone. (An alias resource record set cannot reference a resource record set in a different hosted zone.)

    For more information and an example, see Example: Creating Alias Resource Record Sets in the Amazon Route 53 API Reference.

    ", + "AssociateVPCWithHostedZoneRequest$HostedZoneId": "

    The ID of the hosted zone you want to associate your VPC with.

    Note that you cannot associate a VPC with a hosted zone that doesn't have an existing VPC association.

    ", + "ChangeBatchRecord$Id": "

    The ID of the request. Use this ID to track when the change has completed across all Amazon Route 53 DNS servers.

    ", + "ChangeInfo$Id": "

    The ID of the request. Use this ID to track when the change has completed across all Amazon Route 53 DNS servers.

    ", + "ChangeResourceRecordSetsRequest$HostedZoneId": "

    The ID of the hosted zone that contains the resource record sets that you want to change.

    ", + "CreateHostedZoneRequest$DelegationSetId": "

    The delegation set id of the reusable delgation set whose NS records you want to assign to the new hosted zone.

    ", + "CreateReusableDelegationSetRequest$HostedZoneId": "

    The ID of the hosted zone whose delegation set you want to mark as reusable. It is an optional parameter.

    ", + "CreateTrafficPolicyInstanceRequest$HostedZoneId": "

    The ID of the hosted zone in which you want Amazon Route 53 to create resource record sets by using the configuration in a traffic policy.

    ", + "DelegationSet$Id": null, + "DeleteHostedZoneRequest$Id": "

    The ID of the hosted zone you want to delete.

    ", + "DeleteReusableDelegationSetRequest$Id": "

    The ID of the reusable delegation set you want to delete.

    ", + "DisassociateVPCFromHostedZoneRequest$HostedZoneId": "

    The ID of the hosted zone you want to disassociate your VPC from.

    Note that you cannot disassociate the last VPC from a hosted zone.

    ", + "GetChangeDetailsRequest$Id": "

    The ID of the change batch request. The value that you specify here is the value that ChangeResourceRecordSets returned in the Id element when you submitted the request.

    ", + "GetChangeRequest$Id": "

    The ID of the change batch request. The value that you specify here is the value that ChangeResourceRecordSets returned in the Id element when you submitted the request.

    ", + "GetHostedZoneRequest$Id": "

    The ID of the hosted zone for which you want to get a list of the name servers in the delegation set.

    ", + "GetReusableDelegationSetRequest$Id": "

    The ID of the reusable delegation set for which you want to get a list of the name server.

    ", + "HostedZone$Id": "

    The ID of the specified hosted zone.

    ", + "ListChangeBatchesByHostedZoneRequest$HostedZoneId": "

    The ID of the hosted zone that you want to see changes for.

    ", + "ListChangeBatchesByRRSetRequest$HostedZoneId": "

    The ID of the hosted zone that you want to see changes for.

    ", + "ListHostedZonesByNameRequest$HostedZoneId": "

    If the request returned more than one page of results, submit another request and specify the value of NextDNSName and NextHostedZoneId from the last response in the DNSName and HostedZoneId parameters to get the next page of results.

    ", + "ListHostedZonesByNameResponse$HostedZoneId": "

    The HostedZoneId value sent in the request.

    ", + "ListHostedZonesByNameResponse$NextHostedZoneId": "

    If ListHostedZonesByNameResponse$IsTruncated is true, there are more hosted zones associated with the current AWS account. To get the next page of results, make another request to ListHostedZonesByName. Specify the value of ListHostedZonesByNameResponse$NextDNSName in the ListHostedZonesByNameRequest$DNSName element and ListHostedZonesByNameResponse$NextHostedZoneId in the ListHostedZonesByNameRequest$HostedZoneId element.

    ", + "ListHostedZonesRequest$DelegationSetId": null, + "ListResourceRecordSetsRequest$HostedZoneId": "

    The ID of the hosted zone that contains the resource record sets that you want to get.

    ", + "ListTrafficPolicyInstancesByHostedZoneRequest$HostedZoneId": "

    The ID of the hosted zone for which you want to list traffic policy instances.

    ", + "ListTrafficPolicyInstancesByPolicyRequest$HostedZoneIdMarker": "

    For the first request to ListTrafficPolicyInstancesByPolicy, omit this value.

    If the value of IsTruncated in the previous response was true, HostedZoneIdMarker is the ID of the hosted zone for the first traffic policy instance in the next group of MaxItems traffic policy instances.

    If the value of IsTruncated in the previous response was false, there are no more traffic policy instances to get for this hosted zone.

    If the value of IsTruncated in the previous response was false, omit this value.

    ", + "ListTrafficPolicyInstancesByPolicyResponse$HostedZoneIdMarker": "

    If IsTruncated is true, HostedZoneIdMarker is the ID of the hosted zone of the first traffic policy instance in the next group of MaxItems traffic policy instances.

    ", + "ListTrafficPolicyInstancesRequest$HostedZoneIdMarker": "

    For the first request to ListTrafficPolicyInstances, omit this value.

    If the value of IsTruncated in the previous response was true, you have more traffic policy instances. To get the next group of MaxItems traffic policy instances, submit another ListTrafficPolicyInstances request. For the value of HostedZoneIdMarker, specify the value of HostedZoneIdMarker from the previous response, which is the hosted zone ID of the first traffic policy instance in the next group of MaxItems traffic policy instances.

    If the value of IsTruncated in the previous response was false, there are no more traffic policy instances to get.

    ", + "ListTrafficPolicyInstancesResponse$HostedZoneIdMarker": "

    If IsTruncated is true, HostedZoneIdMarker is the ID of the hosted zone of the first traffic policy instance in the next group of MaxItems traffic policy instances.

    ", + "TrafficPolicyInstance$HostedZoneId": null, + "UpdateHostedZoneCommentRequest$Id": "

    The ID of the hosted zone you want to update.

    " + } + }, + "ResourcePath": { + "base": null, + "refs": { + "HealthCheckConfig$ResourcePath": "

    Path to ping on the instance to check the health. Required for HTTP, HTTPS, HTTP_STR_MATCH, and HTTPS_STR_MATCH health checks. The HTTP request is issued to the instance on the given port and path.

    ", + "UpdateHealthCheckRequest$ResourcePath": "

    The path that you want Amazon Route 53 to request when performing health checks. The path can be any value for which your endpoint will return an HTTP status code of 2xx or 3xx when the endpoint is healthy, for example the file /docs/route53-health-check.html.

    Specify this value only if you want to change it.

    " + } + }, + "ResourceRecord": { + "base": "

    A complex type that contains the value of the Value element for the current resource record set.

    ", + "refs": { + "ResourceRecords$member": null + } + }, + "ResourceRecordSet": { + "base": "

    A complex type that contains information about the current resource record set.

    ", + "refs": { + "Change$ResourceRecordSet": "

    Information about the resource record set to create or delete.

    ", + "ResourceRecordSets$member": null + } + }, + "ResourceRecordSetFailover": { + "base": null, + "refs": { + "ResourceRecordSet$Failover": "

    Failover resource record sets only: To configure failover, you add the Failover element to two resource record sets. For one resource record set, you specify PRIMARY as the value for Failover; for the other resource record set, you specify SECONDARY. In addition, you include the HealthCheckId element and specify the health check that you want Amazon Route 53 to perform for each resource record set.

    You can create failover and failover alias resource record sets only in public hosted zones.

    Except where noted, the following failover behaviors assume that you have included the HealthCheckId element in both resource record sets:

    • When the primary resource record set is healthy, Amazon Route 53 responds to DNS queries with the applicable value from the primary resource record set regardless of the health of the secondary resource record set.
    • When the primary resource record set is unhealthy and the secondary resource record set is healthy, Amazon Route 53 responds to DNS queries with the applicable value from the secondary resource record set.
    • When the secondary resource record set is unhealthy, Amazon Route 53 responds to DNS queries with the applicable value from the primary resource record set regardless of the health of the primary resource record set.
    • If you omit the HealthCheckId element for the secondary resource record set, and if the primary resource record set is unhealthy, Amazon Route 53 always responds to DNS queries with the applicable value from the secondary resource record set. This is true regardless of the health of the associated endpoint.

    You cannot create non-failover resource record sets that have the same values for the Name and Type elements as failover resource record sets.

    For failover alias resource record sets, you must also include the EvaluateTargetHealth element and set the value to true.

    For more information about configuring failover for Amazon Route 53, see Amazon Route 53 Health Checks and DNS Failover in the Amazon Route 53 Developer Guide.

    Valid values: PRIMARY | SECONDARY

    " + } + }, + "ResourceRecordSetIdentifier": { + "base": null, + "refs": { + "ListChangeBatchesByRRSetRequest$SetIdentifier": "

    The identifier of the RRSet that you want to see changes for.

    ", + "ListResourceRecordSetsRequest$StartRecordIdentifier": "

    Weighted resource record sets only: If results were truncated for a given DNS name and type, specify the value of ListResourceRecordSetsResponse$NextRecordIdentifier from the previous response to get the next resource record set that has the current DNS name and type.

    ", + "ListResourceRecordSetsResponse$NextRecordIdentifier": "

    Weighted resource record sets only: If results were truncated for a given DNS name and type, the value of SetIdentifier for the next resource record set that has the current DNS name and type.

    ", + "ResourceRecordSet$SetIdentifier": "

    Weighted, Latency, Geo, and Failover resource record sets only: An identifier that differentiates among multiple resource record sets that have the same combination of DNS name and type. The value of SetIdentifier must be unique for each resource record set that has the same combination of DNS name and type.

    " + } + }, + "ResourceRecordSetRegion": { + "base": null, + "refs": { + "ResourceRecordSet$Region": "

    Latency-based resource record sets only: The Amazon EC2 region where the resource that is specified in this resource record set resides. The resource typically is an AWS resource, such as an Amazon EC2 instance or an ELB load balancer, and is referred to by an IP address or a DNS domain name, depending on the record type.

    You can create latency and latency alias resource record sets only in public hosted zones.

    When Amazon Route 53 receives a DNS query for a domain name and type for which you have created latency resource record sets, Amazon Route 53 selects the latency resource record set that has the lowest latency between the end user and the associated Amazon EC2 region. Amazon Route 53 then returns the value that is associated with the selected resource record set.

    Note the following:

    • You can only specify one ResourceRecord per latency resource record set.
    • You can only create one latency resource record set for each Amazon EC2 region.
    • You are not required to create latency resource record sets for all Amazon EC2 regions. Amazon Route 53 will choose the region with the best latency from among the regions for which you create latency resource record sets.
    • You cannot create non-latency resource record sets that have the same values for the Name and Type elements as latency resource record sets.
    " + } + }, + "ResourceRecordSetWeight": { + "base": null, + "refs": { + "ResourceRecordSet$Weight": "

    Weighted resource record sets only: Among resource record sets that have the same combination of DNS name and type, a value that determines the proportion of DNS queries that Amazon Route 53 responds to using the current resource record set. Amazon Route 53 calculates the sum of the weights for the resource record sets that have the same combination of DNS name and type. Amazon Route 53 then responds to queries based on the ratio of a resource's weight to the total. Note the following:

    • You must specify a value for the Weight element for every weighted resource record set.
    • You can only specify one ResourceRecord per weighted resource record set.
    • You cannot create latency, failover, or geolocation resource record sets that have the same values for the Name and Type elements as weighted resource record sets.
    • You can create a maximum of 100 weighted resource record sets that have the same values for the Name and Type elements.
    • For weighted (but not weighted alias) resource record sets, if you set Weight to 0 for a resource record set, Amazon Route 53 never responds to queries with the applicable value for that resource record set. However, if you set Weight to 0 for all resource record sets that have the same combination of DNS name and type, traffic is routed to all resources with equal probability.

      The effect of setting Weight to 0 is different when you associate health checks with weighted resource record sets. For more information, see Options for Configuring Amazon Route 53 Active-Active and Active-Passive Failover in the Amazon Route 53 Developer Guide.

    " + } + }, + "ResourceRecordSets": { + "base": null, + "refs": { + "ListResourceRecordSetsResponse$ResourceRecordSets": "

    A complex type that contains information about the resource record sets that are returned by the request.

    " + } + }, + "ResourceRecords": { + "base": null, + "refs": { + "ResourceRecordSet$ResourceRecords": "

    A complex type that contains the resource records for the current resource record set.

    " + } + }, + "ResourceTagSet": { + "base": "

    A complex type containing a resource and its associated tags.

    ", + "refs": { + "ListTagsForResourceResponse$ResourceTagSet": "

    A ResourceTagSet containing tags associated with the specified resource.

    ", + "ResourceTagSetList$member": null + } + }, + "ResourceTagSetList": { + "base": null, + "refs": { + "ListTagsForResourcesResponse$ResourceTagSets": "

    A list of ResourceTagSets containing tags associated with the specified resources.

    " + } + }, + "ResourceURI": { + "base": null, + "refs": { + "CreateHealthCheckResponse$Location": "

    The unique URL representing the new health check.

    ", + "CreateHostedZoneResponse$Location": "

    The unique URL representing the new hosted zone.

    ", + "CreateReusableDelegationSetResponse$Location": "

    The unique URL representing the new reusbale delegation set.

    ", + "CreateTrafficPolicyInstanceResponse$Location": "

    A unique URL that represents a new traffic policy instance.

    ", + "CreateTrafficPolicyResponse$Location": null, + "CreateTrafficPolicyVersionResponse$Location": null + } + }, + "SearchString": { + "base": null, + "refs": { + "HealthCheckConfig$SearchString": "

    A string to search for in the body of a health check response. Required for HTTP_STR_MATCH and HTTPS_STR_MATCH health checks.

    ", + "UpdateHealthCheckRequest$SearchString": "

    If the value of Type is HTTP_STR_MATCH or HTTP_STR_MATCH, the string that you want Amazon Route 53 to search for in the response body from the specified resource. If the string appears in the response body, Amazon Route 53 considers the resource healthy.

    Specify this value only if you want to change it.

    " + } + }, + "Status": { + "base": null, + "refs": { + "StatusReport$Status": "

    The observed health check status.

    " + } + }, + "StatusReport": { + "base": "

    A complex type that contains information about the health check status for the current observation.

    ", + "refs": { + "HealthCheckObservation$StatusReport": "

    A complex type that contains information about the health check status for the current observation.

    " + } + }, + "TTL": { + "base": null, + "refs": { + "CreateTrafficPolicyInstanceRequest$TTL": "

    The TTL that you want Amazon Route 53 to assign to all of the resource record sets that it creates in the specified hosted zone.

    ", + "ResourceRecordSet$TTL": "

    The cache time to live for the current resource record set. Note the following:

    • If you're creating an alias resource record set, omit TTL. Amazon Route 53 uses the value of TTL for the alias target.
    • If you're associating this resource record set with a health check (if you're adding a HealthCheckId element), we recommend that you specify a TTL of 60 seconds or less so clients respond quickly to changes in health status.
    • All of the resource record sets in a group of weighted, latency, geolocation, or failover resource record sets must have the same value for TTL.
    • If a group of weighted resource record sets includes one or more weighted alias resource record sets for which the alias target is an ELB load balancer, we recommend that you specify a TTL of 60 seconds for all of the non-alias weighted resource record sets that have the same name and type. Values other than 60 seconds (the TTL for load balancers) will change the effect of the values that you specify for Weight.
    ", + "TrafficPolicyInstance$TTL": null, + "UpdateTrafficPolicyInstanceRequest$TTL": "

    The TTL that you want Amazon Route 53 to assign to all of the updated resource record sets.

    " + } + }, + "Tag": { + "base": "

    A single tag containing a key and value.

    ", + "refs": { + "TagList$member": null + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": "

    The key for a Tag.

    ", + "TagKeyList$member": null + } + }, + "TagKeyList": { + "base": null, + "refs": { + "ChangeTagsForResourceRequest$RemoveTagKeys": "

    A list of Tag keys that you want to remove from the specified resource.

    " + } + }, + "TagList": { + "base": null, + "refs": { + "ChangeTagsForResourceRequest$AddTags": "

    A complex type that contains a list of Tag elements. Each Tag element identifies a tag that you want to add or update for the specified resource.

    ", + "ResourceTagSet$Tags": "

    The tags associated with the specified resource.

    " + } + }, + "TagResourceId": { + "base": null, + "refs": { + "ChangeTagsForResourceRequest$ResourceId": "

    The ID of the resource for which you want to add, change, or delete tags.

    ", + "ListTagsForResourceRequest$ResourceId": "

    The ID of the resource for which you want to retrieve tags.

    ", + "ResourceTagSet$ResourceId": "

    The ID for the specified resource.

    ", + "TagResourceIdList$member": null + } + }, + "TagResourceIdList": { + "base": null, + "refs": { + "ListTagsForResourcesRequest$ResourceIds": "

    A complex type that contains the ResourceId element for each resource for which you want to get a list of tags.

    " + } + }, + "TagResourceType": { + "base": null, + "refs": { + "ChangeTagsForResourceRequest$ResourceType": "

    The type of the resource.

    - The resource type for health checks is healthcheck.

    - The resource type for hosted zones is hostedzone.

    ", + "ListTagsForResourceRequest$ResourceType": "

    The type of the resource.

    - The resource type for health checks is healthcheck.

    - The resource type for hosted zones is hostedzone.

    ", + "ListTagsForResourcesRequest$ResourceType": "

    The type of the resources.

    - The resource type for health checks is healthcheck.

    - The resource type for hosted zones is hostedzone.

    ", + "ResourceTagSet$ResourceType": "

    The type of the resource.

    - The resource type for health checks is healthcheck.

    - The resource type for hosted zones is hostedzone.

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": "

    The value for a Tag.

    " + } + }, + "ThrottlingException": { + "base": null, + "refs": { + } + }, + "TimeStamp": { + "base": null, + "refs": { + "ChangeBatchRecord$SubmittedAt": "

    The date and time the change was submitted, in the format YYYY-MM-DDThh:mm:ssZ, as specified in the ISO 8601 standard (for example, 2009-11-19T19:37:58Z). The Z after the time indicates that the time is listed in Coordinated Universal Time (UTC).

    ", + "ChangeInfo$SubmittedAt": "

    The date and time the change was submitted, in the format YYYY-MM-DDThh:mm:ssZ, as specified in the ISO 8601 standard (for example, 2009-11-19T19:37:58Z). The Z after the time indicates that the time is listed in Coordinated Universal Time (UTC).

    ", + "StatusReport$CheckedTime": "

    The date and time the health check status was observed, in the format YYYY-MM-DDThh:mm:ssZ, as specified in the ISO 8601 standard (for example, 2009-11-19T19:37:58Z). The Z after the time indicates that the time is listed in Coordinated Universal Time (UTC).

    " + } + }, + "TooManyHealthChecks": { + "base": null, + "refs": { + } + }, + "TooManyHostedZones": { + "base": "

    This error indicates that you've reached the maximum number of hosted zones that can be created for the current AWS account. You can request an increase to the limit on the Contact Us page.

    ", + "refs": { + } + }, + "TooManyTrafficPolicies": { + "base": "

    You've created the maximum number of traffic policies that can be created for the current AWS account. You can request an increase to the limit on the Contact Us page.

    ", + "refs": { + } + }, + "TooManyTrafficPolicyInstances": { + "base": "

    You've created the maximum number of traffic policy instances that can be created for the current AWS account. You can request an increase to the limit on the Contact Us page.

    ", + "refs": { + } + }, + "TrafficPolicies": { + "base": null, + "refs": { + "ListTrafficPolicyVersionsResponse$TrafficPolicies": "

    A list that contains one TrafficPolicy element for each traffic policy version that is associated with the specified traffic policy.

    " + } + }, + "TrafficPolicy": { + "base": null, + "refs": { + "CreateTrafficPolicyResponse$TrafficPolicy": "

    A complex type that contains settings for the new traffic policy.

    ", + "CreateTrafficPolicyVersionResponse$TrafficPolicy": "

    A complex type that contains settings for the new version of the traffic policy.

    ", + "GetTrafficPolicyResponse$TrafficPolicy": "

    A complex type that contains settings for the specified traffic policy.

    ", + "TrafficPolicies$member": null, + "UpdateTrafficPolicyCommentResponse$TrafficPolicy": "

    A complex type that contains settings for the specified traffic policy.

    " + } + }, + "TrafficPolicyAlreadyExists": { + "base": "

    A traffic policy that has the same value for Name already exists.

    ", + "refs": { + } + }, + "TrafficPolicyComment": { + "base": null, + "refs": { + "CreateTrafficPolicyRequest$Comment": "

    Any comments that you want to include about the traffic policy.

    ", + "CreateTrafficPolicyVersionRequest$Comment": "

    Any comments that you want to include about the new traffic policy version.

    ", + "TrafficPolicy$Comment": null, + "UpdateTrafficPolicyCommentRequest$Comment": "

    The new comment for the specified traffic policy and version.

    " + } + }, + "TrafficPolicyDocument": { + "base": null, + "refs": { + "CreateTrafficPolicyRequest$Document": "

    The definition of this traffic policy in JSON format.

    ", + "CreateTrafficPolicyVersionRequest$Document": "

    The definition of a new traffic policy version, in JSON format. You must specify the full definition of the new traffic policy. You cannot specify just the differences between the new version and a previous version.

    ", + "TrafficPolicy$Document": null + } + }, + "TrafficPolicyId": { + "base": null, + "refs": { + "CreateTrafficPolicyInstanceRequest$TrafficPolicyId": "

    The ID of the traffic policy that you want to use to create resource record sets in the specified hosted zone.

    ", + "CreateTrafficPolicyVersionRequest$Id": "

    The ID of the traffic policy for which you want to create a new version.

    ", + "DeleteTrafficPolicyRequest$Id": "

    The ID of the traffic policy that you want to delete.

    ", + "GetTrafficPolicyRequest$Id": "

    The ID of the traffic policy that you want to get information about.

    ", + "ListTrafficPoliciesRequest$TrafficPolicyIdMarker": "

    For your first request to ListTrafficPolicies, do not include the TrafficPolicyIdMarker parameter.

    If you have more traffic policies than the value of MaxItems, ListTrafficPolicies returns only the first MaxItems traffic policies. To get the next group of MaxItems policies, submit another request to ListTrafficPolicies. For the value of TrafficPolicyIdMarker, specify the value of the TrafficPolicyIdMarker element that was returned in the previous response.

    Policies are listed in the order in which they were created.

    ", + "ListTrafficPoliciesResponse$TrafficPolicyIdMarker": "

    If the value of IsTruncated is true, TrafficPolicyIdMarker is the ID of the first traffic policy in the next group of MaxItems traffic policies.

    ", + "ListTrafficPolicyInstancesByPolicyRequest$TrafficPolicyId": "

    The ID of the traffic policy for which you want to list traffic policy instances.

    ", + "ListTrafficPolicyVersionsRequest$Id": "

    Specify the value of Id of the traffic policy for which you want to list all versions.

    ", + "TrafficPolicy$Id": null, + "TrafficPolicyInstance$TrafficPolicyId": null, + "TrafficPolicySummary$Id": null, + "UpdateTrafficPolicyCommentRequest$Id": "

    The value of Id for the traffic policy for which you want to update the comment.

    ", + "UpdateTrafficPolicyInstanceRequest$TrafficPolicyId": "

    The ID of the traffic policy that you want Amazon Route 53 to use to update resource record sets for the specified traffic policy instance.

    " + } + }, + "TrafficPolicyInUse": { + "base": "

    One or more traffic policy instances were created by using the specified traffic policy.

    ", + "refs": { + } + }, + "TrafficPolicyInstance": { + "base": null, + "refs": { + "CreateTrafficPolicyInstanceResponse$TrafficPolicyInstance": "

    A complex type that contains settings for the new traffic policy instance.

    ", + "GetTrafficPolicyInstanceResponse$TrafficPolicyInstance": "

    A complex type that contains settings for the traffic policy instance.

    ", + "TrafficPolicyInstances$member": null, + "UpdateTrafficPolicyInstanceResponse$TrafficPolicyInstance": "

    A complex type that contains settings for the updated traffic policy instance.

    " + } + }, + "TrafficPolicyInstanceAlreadyExists": { + "base": "

    Traffic policy instance with given Id already exists.

    ", + "refs": { + } + }, + "TrafficPolicyInstanceCount": { + "base": null, + "refs": { + "GetTrafficPolicyInstanceCountResponse$TrafficPolicyInstanceCount": "

    The number of traffic policy instances that are associated with the current AWS account.

    " + } + }, + "TrafficPolicyInstanceId": { + "base": null, + "refs": { + "DeleteTrafficPolicyInstanceRequest$Id": "

    The ID of the traffic policy instance that you want to delete.

    When you delete a traffic policy instance, Amazon Route 53 also deletes all of the resource record sets that were created when you created the traffic policy instance.", + "GetTrafficPolicyInstanceRequest$Id": "

    The ID of the traffic policy instance that you want to get information about.

    ", + "ResourceRecordSet$TrafficPolicyInstanceId": null, + "TrafficPolicyInstance$Id": null, + "UpdateTrafficPolicyInstanceRequest$Id": "

    The ID of the traffic policy instance that you want to update.

    " + } + }, + "TrafficPolicyInstanceState": { + "base": null, + "refs": { + "TrafficPolicyInstance$State": null + } + }, + "TrafficPolicyInstances": { + "base": null, + "refs": { + "ListTrafficPolicyInstancesByHostedZoneResponse$TrafficPolicyInstances": "

    A list that contains one TrafficPolicyInstance element for each traffic policy instance that matches the elements in the request.

    ", + "ListTrafficPolicyInstancesByPolicyResponse$TrafficPolicyInstances": "

    A list that contains one TrafficPolicyInstance element for each traffic policy instance that matches the elements in the request.

    ", + "ListTrafficPolicyInstancesResponse$TrafficPolicyInstances": "

    A list that contains one TrafficPolicyInstance element for each traffic policy instance that matches the elements in the request.

    " + } + }, + "TrafficPolicyName": { + "base": null, + "refs": { + "CreateTrafficPolicyRequest$Name": "

    The name of the traffic policy.

    ", + "TrafficPolicy$Name": null, + "TrafficPolicySummary$Name": null + } + }, + "TrafficPolicySummaries": { + "base": null, + "refs": { + "ListTrafficPoliciesResponse$TrafficPolicySummaries": "

    A list that contains one TrafficPolicySummary element for each traffic policy that was created by the current AWS account.

    " + } + }, + "TrafficPolicySummary": { + "base": null, + "refs": { + "TrafficPolicySummaries$member": null + } + }, + "TrafficPolicyVersion": { + "base": null, + "refs": { + "CreateTrafficPolicyInstanceRequest$TrafficPolicyVersion": "

    The version of the traffic policy that you want to use to create resource record sets in the specified hosted zone.

    ", + "DeleteTrafficPolicyRequest$Version": "

    The version number of the traffic policy that you want to delete.

    ", + "GetTrafficPolicyRequest$Version": "

    The version number of the traffic policy that you want to get information about.

    ", + "ListTrafficPolicyInstancesByPolicyRequest$TrafficPolicyVersion": "

    The version of the traffic policy for which you want to list traffic policy instances. The version must be associated with the traffic policy that is specified by TrafficPolicyId.

    ", + "TrafficPolicy$Version": null, + "TrafficPolicyInstance$TrafficPolicyVersion": null, + "TrafficPolicySummary$LatestVersion": null, + "TrafficPolicySummary$TrafficPolicyCount": null, + "UpdateTrafficPolicyCommentRequest$Version": "

    The value of Version for the traffic policy for which you want to update the comment.

    ", + "UpdateTrafficPolicyInstanceRequest$TrafficPolicyVersion": "

    The version of the traffic policy that you want Amazon Route 53 to use to update resource record sets for the specified traffic policy instance.

    " + } + }, + "TrafficPolicyVersionMarker": { + "base": null, + "refs": { + "ListTrafficPolicyVersionsRequest$TrafficPolicyVersionMarker": "

    For your first request to ListTrafficPolicyVersions, do not include the TrafficPolicyVersionMarker parameter.

    If you have more traffic policy versions than the value of MaxItems, ListTrafficPolicyVersions returns only the first group of MaxItems versions. To get the next group of MaxItems traffic policy versions, submit another request to ListTrafficPolicyVersions. For the value of TrafficPolicyVersionMarker, specify the value of the TrafficPolicyVersionMarker element that was returned in the previous response.

    Traffic policy versions are listed in sequential order.

    ", + "ListTrafficPolicyVersionsResponse$TrafficPolicyVersionMarker": "

    If IsTruncated is true, the value of TrafficPolicyVersionMarker identifies the first traffic policy in the next group of MaxItems traffic policies. Call ListTrafficPolicyVersions again and specify the value of TrafficPolicyVersionMarker in the TrafficPolicyVersionMarker request parameter.

    This element is present only if IsTruncated is true.

    " + } + }, + "UpdateHealthCheckRequest": { + "base": "

    >A complex type that contains information about the request to update a health check.

    ", + "refs": { + } + }, + "UpdateHealthCheckResponse": { + "base": null, + "refs": { + } + }, + "UpdateHostedZoneCommentRequest": { + "base": "

    A complex type that contains information about the request to update a hosted zone comment.

    ", + "refs": { + } + }, + "UpdateHostedZoneCommentResponse": { + "base": "

    A complex type containing information about the specified hosted zone after the update.

    ", + "refs": { + } + }, + "UpdateTrafficPolicyCommentRequest": { + "base": "

    A complex type that contains information about the traffic policy for which you want to update the comment.

    ", + "refs": { + } + }, + "UpdateTrafficPolicyCommentResponse": { + "base": "

    A complex type that contains the response information for the traffic policy.

    ", + "refs": { + } + }, + "UpdateTrafficPolicyInstanceRequest": { + "base": "

    A complex type that contains information about the resource record sets that you want to update based on a specified traffic policy instance.

    ", + "refs": { + } + }, + "UpdateTrafficPolicyInstanceResponse": { + "base": "

    A complex type that contains information about the resource record sets that Amazon Route 53 created based on a specified traffic policy.

    ", + "refs": { + } + }, + "VPC": { + "base": null, + "refs": { + "AssociateVPCWithHostedZoneRequest$VPC": "

    The VPC that you want your hosted zone to be associated with.

    ", + "CreateHostedZoneRequest$VPC": "

    The VPC that you want your hosted zone to be associated with. By providing this parameter, your newly created hosted cannot be resolved anywhere other than the given VPC.

    ", + "CreateHostedZoneResponse$VPC": null, + "DisassociateVPCFromHostedZoneRequest$VPC": "

    The VPC that you want your hosted zone to be disassociated from.

    ", + "VPCs$member": null + } + }, + "VPCAssociationNotFound": { + "base": "

    The VPC you specified is not currently associated with the hosted zone.

    ", + "refs": { + } + }, + "VPCId": { + "base": "

    A VPC ID

    ", + "refs": { + "VPC$VPCId": null + } + }, + "VPCRegion": { + "base": null, + "refs": { + "VPC$VPCRegion": null + } + }, + "VPCs": { + "base": "

    A list of VPCs

    ", + "refs": { + "GetHostedZoneResponse$VPCs": "

    A complex type that contains information about VPCs associated with the specified hosted zone.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,33 @@ +{ + "pagination": { + "ListHealthChecks": { + "input_token": "Marker", + "output_token": "NextMarker", + "more_results": "IsTruncated", + "limit_key": "MaxItems", + "result_key": "HealthChecks" + }, + "ListHostedZones": { + "input_token": "Marker", + "output_token": "NextMarker", + "more_results": "IsTruncated", + "limit_key": "MaxItems", + "result_key": "HostedZones" + }, + "ListResourceRecordSets": { + "more_results": "IsTruncated", + "limit_key": "MaxItems", + "result_key": "ResourceRecordSets", + "input_token": [ + "StartRecordName", + "StartRecordType", + "StartRecordIdentifier" + ], + "output_token": [ + "NextRecordName", + "NextRecordType", + "NextRecordIdentifier" + ] + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1374 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-05-15", + "endpointPrefix":"route53domains", + "jsonVersion":"1.1", + "serviceFullName":"Amazon Route 53 Domains", + "signatureVersion":"v4", + "targetPrefix":"Route53Domains_v20140515", + "protocol":"json" + }, + "operations":{ + "CheckDomainAvailability":{ + "name":"CheckDomainAvailability", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CheckDomainAvailabilityRequest"}, + "output":{"shape":"CheckDomainAvailabilityResponse"}, + "errors":[ + { + "shape":"InvalidInput", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"UnsupportedTLD", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "DeleteTagsForDomain":{ + "name":"DeleteTagsForDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTagsForDomainRequest"}, + "output":{"shape":"DeleteTagsForDomainResponse"}, + "errors":[ + { + "shape":"InvalidInput", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"OperationLimitExceeded", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"UnsupportedTLD", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "DisableDomainAutoRenew":{ + "name":"DisableDomainAutoRenew", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableDomainAutoRenewRequest"}, + "output":{"shape":"DisableDomainAutoRenewResponse"}, + "errors":[ + { + "shape":"InvalidInput", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"UnsupportedTLD", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "DisableDomainTransferLock":{ + "name":"DisableDomainTransferLock", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableDomainTransferLockRequest"}, + "output":{"shape":"DisableDomainTransferLockResponse"}, + "errors":[ + { + "shape":"InvalidInput", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"DuplicateRequest", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TLDRulesViolation", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"OperationLimitExceeded", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"UnsupportedTLD", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "EnableDomainAutoRenew":{ + "name":"EnableDomainAutoRenew", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableDomainAutoRenewRequest"}, + "output":{"shape":"EnableDomainAutoRenewResponse"}, + "errors":[ + { + "shape":"InvalidInput", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"UnsupportedTLD", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "EnableDomainTransferLock":{ + "name":"EnableDomainTransferLock", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableDomainTransferLockRequest"}, + "output":{"shape":"EnableDomainTransferLockResponse"}, + "errors":[ + { + "shape":"InvalidInput", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"DuplicateRequest", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TLDRulesViolation", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"OperationLimitExceeded", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"UnsupportedTLD", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "GetDomainDetail":{ + "name":"GetDomainDetail", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDomainDetailRequest"}, + "output":{"shape":"GetDomainDetailResponse"}, + "errors":[ + { + "shape":"InvalidInput", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"UnsupportedTLD", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "GetOperationDetail":{ + "name":"GetOperationDetail", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetOperationDetailRequest"}, + "output":{"shape":"GetOperationDetailResponse"}, + "errors":[ + { + "shape":"InvalidInput", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "ListDomains":{ + "name":"ListDomains", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDomainsRequest"}, + "output":{"shape":"ListDomainsResponse"}, + "errors":[ + { + "shape":"InvalidInput", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "ListOperations":{ + "name":"ListOperations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListOperationsRequest"}, + "output":{"shape":"ListOperationsResponse"}, + "errors":[ + { + "shape":"InvalidInput", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "ListTagsForDomain":{ + "name":"ListTagsForDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForDomainRequest"}, + "output":{"shape":"ListTagsForDomainResponse"}, + "errors":[ + { + "shape":"InvalidInput", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"OperationLimitExceeded", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"UnsupportedTLD", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "RegisterDomain":{ + "name":"RegisterDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterDomainRequest"}, + "output":{"shape":"RegisterDomainResponse"}, + "errors":[ + { + "shape":"InvalidInput", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"UnsupportedTLD", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"DuplicateRequest", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TLDRulesViolation", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"DomainLimitExceeded", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"OperationLimitExceeded", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "RetrieveDomainAuthCode":{ + "name":"RetrieveDomainAuthCode", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RetrieveDomainAuthCodeRequest"}, + "output":{"shape":"RetrieveDomainAuthCodeResponse"}, + "errors":[ + { + "shape":"InvalidInput", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"UnsupportedTLD", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "TransferDomain":{ + "name":"TransferDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TransferDomainRequest"}, + "output":{"shape":"TransferDomainResponse"}, + "errors":[ + { + "shape":"InvalidInput", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"UnsupportedTLD", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"DuplicateRequest", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TLDRulesViolation", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"DomainLimitExceeded", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"OperationLimitExceeded", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "UpdateDomainContact":{ + "name":"UpdateDomainContact", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDomainContactRequest"}, + "output":{"shape":"UpdateDomainContactResponse"}, + "errors":[ + { + "shape":"InvalidInput", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"DuplicateRequest", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TLDRulesViolation", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"OperationLimitExceeded", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"UnsupportedTLD", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "UpdateDomainContactPrivacy":{ + "name":"UpdateDomainContactPrivacy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDomainContactPrivacyRequest"}, + "output":{"shape":"UpdateDomainContactPrivacyResponse"}, + "errors":[ + { + "shape":"InvalidInput", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"DuplicateRequest", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TLDRulesViolation", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"OperationLimitExceeded", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"UnsupportedTLD", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "UpdateDomainNameservers":{ + "name":"UpdateDomainNameservers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDomainNameserversRequest"}, + "output":{"shape":"UpdateDomainNameserversResponse"}, + "errors":[ + { + "shape":"InvalidInput", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"DuplicateRequest", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"TLDRulesViolation", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"OperationLimitExceeded", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"UnsupportedTLD", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + }, + "UpdateTagsForDomain":{ + "name":"UpdateTagsForDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateTagsForDomainRequest"}, + "output":{"shape":"UpdateTagsForDomainResponse"}, + "errors":[ + { + "shape":"InvalidInput", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"OperationLimitExceeded", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"UnsupportedTLD", + "error":{"httpStatusCode":400}, + "exception":true + } + ] + } + }, + "shapes":{ + "AddressLine":{ + "type":"string", + "max":255 + }, + "Boolean":{"type":"boolean"}, + "CheckDomainAvailabilityRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"}, + "IdnLangCode":{"shape":"LangCode"} + } + }, + "CheckDomainAvailabilityResponse":{ + "type":"structure", + "required":["Availability"], + "members":{ + "Availability":{"shape":"DomainAvailability"} + } + }, + "City":{ + "type":"string", + "max":255 + }, + "ContactDetail":{ + "type":"structure", + "members":{ + "FirstName":{"shape":"ContactName"}, + "LastName":{"shape":"ContactName"}, + "ContactType":{"shape":"ContactType"}, + "OrganizationName":{"shape":"ContactName"}, + "AddressLine1":{"shape":"AddressLine"}, + "AddressLine2":{"shape":"AddressLine"}, + "City":{"shape":"City"}, + "State":{"shape":"State"}, + "CountryCode":{"shape":"CountryCode"}, + "ZipCode":{"shape":"ZipCode"}, + "PhoneNumber":{"shape":"ContactNumber"}, + "Email":{"shape":"Email"}, + "Fax":{"shape":"ContactNumber"}, + "ExtraParams":{"shape":"ExtraParamList"} + }, + "sensitive":true + }, + "ContactName":{ + "type":"string", + "max":255 + }, + "ContactNumber":{ + "type":"string", + "max":30 + }, + "ContactType":{ + "type":"string", + "enum":[ + "PERSON", + "COMPANY", + "ASSOCIATION", + "PUBLIC_BODY", + "RESELLER" + ] + }, + "CountryCode":{ + "type":"string", + "enum":[ + "AD", + "AE", + "AF", + "AG", + "AI", + "AL", + "AM", + "AN", + "AO", + "AQ", + "AR", + "AS", + "AT", + "AU", + "AW", + "AZ", + "BA", + "BB", + "BD", + "BE", + "BF", + "BG", + "BH", + "BI", + "BJ", + "BL", + "BM", + "BN", + "BO", + "BR", + "BS", + "BT", + "BW", + "BY", + "BZ", + "CA", + "CC", + "CD", + "CF", + "CG", + "CH", + "CI", + "CK", + "CL", + "CM", + "CN", + "CO", + "CR", + "CU", + "CV", + "CX", + "CY", + "CZ", + "DE", + "DJ", + "DK", + "DM", + "DO", + "DZ", + "EC", + "EE", + "EG", + "ER", + "ES", + "ET", + "FI", + "FJ", + "FK", + "FM", + "FO", + "FR", + "GA", + "GB", + "GD", + "GE", + "GH", + "GI", + "GL", + "GM", + "GN", + "GQ", + "GR", + "GT", + "GU", + "GW", + "GY", + "HK", + "HN", + "HR", + "HT", + "HU", + "ID", + "IE", + "IL", + "IM", + "IN", + "IQ", + "IR", + "IS", + "IT", + "JM", + "JO", + "JP", + "KE", + "KG", + "KH", + "KI", + "KM", + "KN", + "KP", + "KR", + "KW", + "KY", + "KZ", + "LA", + "LB", + "LC", + "LI", + "LK", + "LR", + "LS", + "LT", + "LU", + "LV", + "LY", + "MA", + "MC", + "MD", + "ME", + "MF", + "MG", + "MH", + "MK", + "ML", + "MM", + "MN", + "MO", + "MP", + "MR", + "MS", + "MT", + "MU", + "MV", + "MW", + "MX", + "MY", + "MZ", + "NA", + "NC", + "NE", + "NG", + "NI", + "NL", + "NO", + "NP", + "NR", + "NU", + "NZ", + "OM", + "PA", + "PE", + "PF", + "PG", + "PH", + "PK", + "PL", + "PM", + "PN", + "PR", + "PT", + "PW", + "PY", + "QA", + "RO", + "RS", + "RU", + "RW", + "SA", + "SB", + "SC", + "SD", + "SE", + "SG", + "SH", + "SI", + "SK", + "SL", + "SM", + "SN", + "SO", + "SR", + "ST", + "SV", + "SY", + "SZ", + "TC", + "TD", + "TG", + "TH", + "TJ", + "TK", + "TL", + "TM", + "TN", + "TO", + "TR", + "TT", + "TV", + "TW", + "TZ", + "UA", + "UG", + "US", + "UY", + "UZ", + "VA", + "VC", + "VE", + "VG", + "VI", + "VN", + "VU", + "WF", + "WS", + "YE", + "YT", + "ZA", + "ZM", + "ZW" + ] + }, + "DNSSec":{"type":"string"}, + "DeleteTagsForDomainRequest":{ + "type":"structure", + "required":[ + "DomainName", + "TagsToDelete" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "TagsToDelete":{"shape":"TagKeyList"} + } + }, + "DeleteTagsForDomainResponse":{ + "type":"structure", + "members":{ + } + }, + "DisableDomainAutoRenewRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"} + } + }, + "DisableDomainAutoRenewResponse":{ + "type":"structure", + "members":{ + } + }, + "DisableDomainTransferLockRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"} + } + }, + "DisableDomainTransferLockResponse":{ + "type":"structure", + "required":["OperationId"], + "members":{ + "OperationId":{"shape":"OperationId"} + } + }, + "DomainAuthCode":{ + "type":"string", + "max":1024, + "sensitive":true + }, + "DomainAvailability":{ + "type":"string", + "enum":[ + "AVAILABLE", + "AVAILABLE_RESERVED", + "AVAILABLE_PREORDER", + "UNAVAILABLE", + "UNAVAILABLE_PREMIUM", + "UNAVAILABLE_RESTRICTED", + "RESERVED", + "DONT_KNOW" + ] + }, + "DomainLimitExceeded":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "DomainName":{ + "type":"string", + "max":255, + "pattern":"[a-zA-Z0-9_\\-.]*" + }, + "DomainStatus":{"type":"string"}, + "DomainStatusList":{ + "type":"list", + "member":{"shape":"DomainStatus"} + }, + "DomainSummary":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"}, + "AutoRenew":{"shape":"Boolean"}, + "TransferLock":{"shape":"Boolean"}, + "Expiry":{"shape":"Timestamp"} + } + }, + "DomainSummaryList":{ + "type":"list", + "member":{"shape":"DomainSummary"} + }, + "DuplicateRequest":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "DurationInYears":{ + "type":"integer", + "min":1, + "max":10 + }, + "Email":{ + "type":"string", + "max":254 + }, + "EnableDomainAutoRenewRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"} + } + }, + "EnableDomainAutoRenewResponse":{ + "type":"structure", + "members":{ + } + }, + "EnableDomainTransferLockRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"} + } + }, + "EnableDomainTransferLockResponse":{ + "type":"structure", + "required":["OperationId"], + "members":{ + "OperationId":{"shape":"OperationId"} + } + }, + "ErrorMessage":{"type":"string"}, + "ExtraParam":{ + "type":"structure", + "required":[ + "Name", + "Value" + ], + "members":{ + "Name":{"shape":"ExtraParamName"}, + "Value":{"shape":"ExtraParamValue"} + } + }, + "ExtraParamList":{ + "type":"list", + "member":{"shape":"ExtraParam"} + }, + "ExtraParamName":{ + "type":"string", + "enum":[ + "DUNS_NUMBER", + "BRAND_NUMBER", + "BIRTH_DEPARTMENT", + "BIRTH_DATE_IN_YYYY_MM_DD", + "BIRTH_COUNTRY", + "BIRTH_CITY", + "DOCUMENT_NUMBER", + "AU_ID_NUMBER", + "AU_ID_TYPE", + "CA_LEGAL_TYPE", + "ES_IDENTIFICATION", + "ES_IDENTIFICATION_TYPE", + "ES_LEGAL_FORM", + "FI_BUSINESS_NUMBER", + "FI_ID_NUMBER", + "IT_PIN", + "RU_PASSPORT_DATA", + "SE_ID_NUMBER", + "SG_ID_NUMBER", + "VAT_NUMBER" + ] + }, + "ExtraParamValue":{ + "type":"string", + "max":2048 + }, + "FIAuthKey":{"type":"string"}, + "GetDomainDetailRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"} + } + }, + "GetDomainDetailResponse":{ + "type":"structure", + "required":[ + "DomainName", + "Nameservers", + "AdminContact", + "RegistrantContact", + "TechContact" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "Nameservers":{"shape":"NameserverList"}, + "AutoRenew":{"shape":"Boolean"}, + "AdminContact":{"shape":"ContactDetail"}, + "RegistrantContact":{"shape":"ContactDetail"}, + "TechContact":{"shape":"ContactDetail"}, + "AdminPrivacy":{"shape":"Boolean"}, + "RegistrantPrivacy":{"shape":"Boolean"}, + "TechPrivacy":{"shape":"Boolean"}, + "RegistrarName":{"shape":"RegistrarName"}, + "WhoIsServer":{"shape":"RegistrarWhoIsServer"}, + "RegistrarUrl":{"shape":"RegistrarUrl"}, + "AbuseContactEmail":{"shape":"Email"}, + "AbuseContactPhone":{"shape":"ContactNumber"}, + "RegistryDomainId":{"shape":"RegistryDomainId"}, + "CreationDate":{"shape":"Timestamp"}, + "UpdatedDate":{"shape":"Timestamp"}, + "ExpirationDate":{"shape":"Timestamp"}, + "Reseller":{"shape":"Reseller"}, + "DnsSec":{"shape":"DNSSec"}, + "StatusList":{"shape":"DomainStatusList"} + } + }, + "GetOperationDetailRequest":{ + "type":"structure", + "required":["OperationId"], + "members":{ + "OperationId":{"shape":"OperationId"} + } + }, + "GetOperationDetailResponse":{ + "type":"structure", + "members":{ + "OperationId":{"shape":"OperationId"}, + "Status":{"shape":"OperationStatus"}, + "Message":{"shape":"ErrorMessage"}, + "DomainName":{"shape":"DomainName"}, + "Type":{"shape":"OperationType"}, + "SubmittedDate":{"shape":"Timestamp"} + } + }, + "GlueIp":{ + "type":"string", + "max":45 + }, + "GlueIpList":{ + "type":"list", + "member":{"shape":"GlueIp"} + }, + "HostName":{ + "type":"string", + "max":255, + "pattern":"[a-zA-Z0-9_\\-.]*" + }, + "InvalidInput":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "LangCode":{ + "type":"string", + "max":3 + }, + "ListDomainsRequest":{ + "type":"structure", + "members":{ + "Marker":{"shape":"PageMarker"}, + "MaxItems":{"shape":"PageMaxItems"} + } + }, + "ListDomainsResponse":{ + "type":"structure", + "required":["Domains"], + "members":{ + "Domains":{"shape":"DomainSummaryList"}, + "NextPageMarker":{"shape":"PageMarker"} + } + }, + "ListOperationsRequest":{ + "type":"structure", + "members":{ + "Marker":{"shape":"PageMarker"}, + "MaxItems":{"shape":"PageMaxItems"} + } + }, + "ListOperationsResponse":{ + "type":"structure", + "required":["Operations"], + "members":{ + "Operations":{"shape":"OperationSummaryList"}, + "NextPageMarker":{"shape":"PageMarker"} + } + }, + "ListTagsForDomainRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"} + } + }, + "ListTagsForDomainResponse":{ + "type":"structure", + "required":["TagList"], + "members":{ + "TagList":{"shape":"TagList"} + } + }, + "Nameserver":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"HostName"}, + "GlueIps":{"shape":"GlueIpList"} + } + }, + "NameserverList":{ + "type":"list", + "member":{"shape":"Nameserver"} + }, + "OperationId":{ + "type":"string", + "max":255 + }, + "OperationLimitExceeded":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "OperationStatus":{ + "type":"string", + "enum":[ + "SUBMITTED", + "IN_PROGRESS", + "ERROR", + "SUCCESSFUL", + "FAILED" + ] + }, + "OperationSummary":{ + "type":"structure", + "required":[ + "OperationId", + "Status", + "Type", + "SubmittedDate" + ], + "members":{ + "OperationId":{"shape":"OperationId"}, + "Status":{"shape":"OperationStatus"}, + "Type":{"shape":"OperationType"}, + "SubmittedDate":{"shape":"Timestamp"} + } + }, + "OperationSummaryList":{ + "type":"list", + "member":{"shape":"OperationSummary"} + }, + "OperationType":{ + "type":"string", + "enum":[ + "REGISTER_DOMAIN", + "DELETE_DOMAIN", + "TRANSFER_IN_DOMAIN", + "UPDATE_DOMAIN_CONTACT", + "UPDATE_NAMESERVER", + "CHANGE_PRIVACY_PROTECTION", + "DOMAIN_LOCK" + ] + }, + "PageMarker":{ + "type":"string", + "max":4096 + }, + "PageMaxItems":{ + "type":"integer", + "max":100 + }, + "RegisterDomainRequest":{ + "type":"structure", + "required":[ + "DomainName", + "DurationInYears", + "AdminContact", + "RegistrantContact", + "TechContact" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "IdnLangCode":{"shape":"LangCode"}, + "DurationInYears":{"shape":"DurationInYears"}, + "AutoRenew":{"shape":"Boolean"}, + "AdminContact":{"shape":"ContactDetail"}, + "RegistrantContact":{"shape":"ContactDetail"}, + "TechContact":{"shape":"ContactDetail"}, + "PrivacyProtectAdminContact":{"shape":"Boolean"}, + "PrivacyProtectRegistrantContact":{"shape":"Boolean"}, + "PrivacyProtectTechContact":{"shape":"Boolean"} + } + }, + "RegisterDomainResponse":{ + "type":"structure", + "required":["OperationId"], + "members":{ + "OperationId":{"shape":"OperationId"} + } + }, + "RegistrarName":{"type":"string"}, + "RegistrarUrl":{"type":"string"}, + "RegistrarWhoIsServer":{"type":"string"}, + "RegistryDomainId":{"type":"string"}, + "Reseller":{"type":"string"}, + "RetrieveDomainAuthCodeRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"} + } + }, + "RetrieveDomainAuthCodeResponse":{ + "type":"structure", + "required":["AuthCode"], + "members":{ + "AuthCode":{"shape":"DomainAuthCode"} + } + }, + "State":{ + "type":"string", + "max":255 + }, + "TLDRulesViolation":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "Tag":{ + "type":"structure", + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{"type":"string"}, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"} + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "TagValue":{"type":"string"}, + "Timestamp":{"type":"timestamp"}, + "TransferDomainRequest":{ + "type":"structure", + "required":[ + "DomainName", + "DurationInYears", + "AdminContact", + "RegistrantContact", + "TechContact" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "IdnLangCode":{"shape":"LangCode"}, + "DurationInYears":{"shape":"DurationInYears"}, + "Nameservers":{"shape":"NameserverList"}, + "AuthCode":{"shape":"DomainAuthCode"}, + "AutoRenew":{"shape":"Boolean"}, + "AdminContact":{"shape":"ContactDetail"}, + "RegistrantContact":{"shape":"ContactDetail"}, + "TechContact":{"shape":"ContactDetail"}, + "PrivacyProtectAdminContact":{"shape":"Boolean"}, + "PrivacyProtectRegistrantContact":{"shape":"Boolean"}, + "PrivacyProtectTechContact":{"shape":"Boolean"} + } + }, + "TransferDomainResponse":{ + "type":"structure", + "required":["OperationId"], + "members":{ + "OperationId":{"shape":"OperationId"} + } + }, + "UnsupportedTLD":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "UpdateDomainContactPrivacyRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"}, + "AdminPrivacy":{"shape":"Boolean"}, + "RegistrantPrivacy":{"shape":"Boolean"}, + "TechPrivacy":{"shape":"Boolean"} + } + }, + "UpdateDomainContactPrivacyResponse":{ + "type":"structure", + "required":["OperationId"], + "members":{ + "OperationId":{"shape":"OperationId"} + } + }, + "UpdateDomainContactRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"}, + "AdminContact":{"shape":"ContactDetail"}, + "RegistrantContact":{"shape":"ContactDetail"}, + "TechContact":{"shape":"ContactDetail"} + } + }, + "UpdateDomainContactResponse":{ + "type":"structure", + "required":["OperationId"], + "members":{ + "OperationId":{"shape":"OperationId"} + } + }, + "UpdateDomainNameserversRequest":{ + "type":"structure", + "required":[ + "DomainName", + "Nameservers" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "FIAuthKey":{"shape":"FIAuthKey"}, + "Nameservers":{"shape":"NameserverList"} + } + }, + "UpdateDomainNameserversResponse":{ + "type":"structure", + "required":["OperationId"], + "members":{ + "OperationId":{"shape":"OperationId"} + } + }, + "UpdateTagsForDomainRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"}, + "TagsToUpdate":{"shape":"TagList"} + } + }, + "UpdateTagsForDomainResponse":{ + "type":"structure", + "members":{ + } + }, + "ZipCode":{ + "type":"string", + "max":255 + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,620 @@ +{ + "version": "2.0", + "operations": { + "CheckDomainAvailability": "

    This operation checks the availability of one domain name. You can access this API without authenticating. Note that if the availability status of a domain is pending, you must submit another request to determine the availability of the domain name.

    ", + "DeleteTagsForDomain": "

    This operation deletes the specified tags for a domain.

    All tag operations are eventually consistent; subsequent operations may not immediately represent all issued operations.

    ", + "DisableDomainAutoRenew": "

    This operation disables automatic renewal of domain registration for the specified domain.

    Caution! Amazon Route 53 doesn't have a manual renewal process, so if you disable automatic renewal, registration for the domain will not be renewed when the expiration date passes, and you will lose control of the domain name.", + "DisableDomainTransferLock": "

    This operation removes the transfer lock on the domain (specifically the clientTransferProhibited status) to allow domain transfers. We recommend you refrain from performing this action unless you intend to transfer the domain to a different registrar. Successful submission returns an operation ID that you can use to track the progress and completion of the action. If the request is not completed successfully, the domain registrant will be notified by email.

    ", + "EnableDomainAutoRenew": "

    This operation configures Amazon Route 53 to automatically renew the specified domain before the domain registration expires. The cost of renewing your domain registration is billed to your AWS account.

    The period during which you can renew a domain name varies by TLD. For a list of TLDs and their renewal policies, see \"Renewal, restoration, and deletion times\" on the website for our registrar partner, Gandi. Route 53 requires that you renew before the end of the renewal period that is listed on the Gandi website so we can complete processing before the deadline.

    ", + "EnableDomainTransferLock": "

    This operation sets the transfer lock on the domain (specifically the clientTransferProhibited status) to prevent domain transfers. Successful submission returns an operation ID that you can use to track the progress and completion of the action. If the request is not completed successfully, the domain registrant will be notified by email.

    ", + "GetDomainDetail": "

    This operation returns detailed information about the domain. The domain's contact information is also returned as part of the output.

    ", + "GetOperationDetail": "

    This operation returns the current status of an operation that is not completed.

    ", + "ListDomains": "

    This operation returns all the domain names registered with Amazon Route 53 for the current AWS account.

    ", + "ListOperations": "

    This operation returns the operation IDs of operations that are not yet complete.

    ", + "ListTagsForDomain": "

    This operation returns all of the tags that are associated with the specified domain.

    All tag operations are eventually consistent; subsequent operations may not immediately represent all issued operations.

    ", + "RegisterDomain": "

    This operation registers a domain. Domains are registered by the AWS registrar partner, Gandi. For some top-level domains (TLDs), this operation requires extra parameters.

    When you register a domain, Amazon Route 53 does the following:

    • Creates a Amazon Route 53 hosted zone that has the same name as the domain. Amazon Route 53 assigns four name servers to your hosted zone and automatically updates your domain registration with the names of these name servers.
    • Enables autorenew, so your domain registration will renew automatically each year. We'll notify you in advance of the renewal date so you can choose whether to renew the registration.
    • Optionally enables privacy protection, so WHOIS queries return contact information for our registrar partner, Gandi, instead of the information you entered for registrant, admin, and tech contacts.
    • If registration is successful, returns an operation ID that you can use to track the progress and completion of the action. If the request is not completed successfully, the domain registrant is notified by email.
    • Charges your AWS account an amount based on the top-level domain. For more information, see Amazon Route 53 Pricing.
    ", + "RetrieveDomainAuthCode": "

    This operation returns the AuthCode for the domain. To transfer a domain to another registrar, you provide this value to the new registrar.

    ", + "TransferDomain": "

    This operation transfers a domain from another registrar to Amazon Route 53. When the transfer is complete, the domain is registered with the AWS registrar partner, Gandi.

    For transfer requirements, a detailed procedure, and information about viewing the status of a domain transfer, see Transferring Registration for a Domain to Amazon Route 53 in the Amazon Route 53 Developer Guide.

    If the registrar for your domain is also the DNS service provider for the domain, we highly recommend that you consider transferring your DNS service to Amazon Route 53 or to another DNS service provider before you transfer your registration. Some registrars provide free DNS service when you purchase a domain registration. When you transfer the registration, the previous registrar will not renew your domain registration and could end your DNS service at any time.

    Caution! If the registrar for your domain is also the DNS service provider for the domain and you don't transfer DNS service to another provider, your website, email, and the web applications associated with the domain might become unavailable.

    If the transfer is successful, this method returns an operation ID that you can use to track the progress and completion of the action. If the transfer doesn't complete successfully, the domain registrant will be notified by email.

    ", + "UpdateDomainContact": "

    This operation updates the contact information for a particular domain. Information for at least one contact (registrant, administrator, or technical) must be supplied for update.

    If the update is successful, this method returns an operation ID that you can use to track the progress and completion of the action. If the request is not completed successfully, the domain registrant will be notified by email.

    ", + "UpdateDomainContactPrivacy": "

    This operation updates the specified domain contact's privacy setting. When the privacy option is enabled, personal information such as postal or email address is hidden from the results of a public WHOIS query. The privacy services are provided by the AWS registrar, Gandi. For more information, see the Gandi privacy features.

    This operation only affects the privacy of the specified contact type (registrant, administrator, or tech). Successful acceptance returns an operation ID that you can use with GetOperationDetail to track the progress and completion of the action. If the request is not completed successfully, the domain registrant will be notified by email.

    ", + "UpdateDomainNameservers": "

    This operation replaces the current set of name servers for the domain with the specified set of name servers. If you use Amazon Route 53 as your DNS service, specify the four name servers in the delegation set for the hosted zone for the domain.

    If successful, this operation returns an operation ID that you can use to track the progress and completion of the action. If the request is not completed successfully, the domain registrant will be notified by email.

    ", + "UpdateTagsForDomain": "

    This operation adds or updates tags for a specified domain.

    All tag operations are eventually consistent; subsequent operations may not immediately represent all issued operations.

    " + }, + "service": null, + "shapes": { + "AddressLine": { + "base": null, + "refs": { + "ContactDetail$AddressLine1": "

    First line of the contact's address.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    Parents: RegistrantContact, AdminContact, TechContact

    Required: Yes

    ", + "ContactDetail$AddressLine2": "

    Second line of contact's address, if any.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    Parents: RegistrantContact, AdminContact, TechContact

    Required: No

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "DomainSummary$AutoRenew": "

    Indicates whether the domain is automatically renewed upon expiration.

    Type: Boolean

    Valid values: True | False

    ", + "DomainSummary$TransferLock": "

    Indicates whether a domain is locked from unauthorized transfer to another party.

    Type: Boolean

    Valid values: True | False

    ", + "GetDomainDetailResponse$AutoRenew": "

    Specifies whether the domain registration is set to renew automatically.

    Type: Boolean

    ", + "GetDomainDetailResponse$AdminPrivacy": "

    Specifies whether contact information for the admin contact is concealed from WHOIS queries. If the value is true, WHOIS (\"who is\") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.

    Type: Boolean

    ", + "GetDomainDetailResponse$RegistrantPrivacy": "

    Specifies whether contact information for the registrant contact is concealed from WHOIS queries. If the value is true, WHOIS (\"who is\") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.

    Type: Boolean

    ", + "GetDomainDetailResponse$TechPrivacy": "

    Specifies whether contact information for the tech contact is concealed from WHOIS queries. If the value is true, WHOIS (\"who is\") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.

    Type: Boolean

    ", + "RegisterDomainRequest$AutoRenew": "

    Indicates whether the domain will be automatically renewed (true) or not (false). Autorenewal only takes effect after the account is charged.

    Type: Boolean

    Valid values: true | false

    Default: true

    Required: No

    ", + "RegisterDomainRequest$PrivacyProtectAdminContact": "

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.

    Type: Boolean

    Default: true

    Valid values: true | false

    Required: No

    ", + "RegisterDomainRequest$PrivacyProtectRegistrantContact": "

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.

    Type: Boolean

    Default: true

    Valid values: true | false

    Required: No

    ", + "RegisterDomainRequest$PrivacyProtectTechContact": "

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.

    Type: Boolean

    Default: true

    Valid values: true | false

    Required: No

    ", + "TransferDomainRequest$AutoRenew": "

    Indicates whether the domain will be automatically renewed (true) or not (false). Autorenewal only takes effect after the account is charged.

    Type: Boolean

    Valid values: true | false

    Default: true

    Required: No

    ", + "TransferDomainRequest$PrivacyProtectAdminContact": "

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.

    Type: Boolean

    Default: true

    Valid values: true | false

    Required: No

    ", + "TransferDomainRequest$PrivacyProtectRegistrantContact": "

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.

    Type: Boolean

    Default: true

    Valid values: true | false

    Required: No

    ", + "TransferDomainRequest$PrivacyProtectTechContact": "

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.

    Type: Boolean

    Default: true

    Valid values: true | false

    Required: No

    ", + "UpdateDomainContactPrivacyRequest$AdminPrivacy": "

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.

    Type: Boolean

    Default: None

    Valid values: true | false

    Required: No

    ", + "UpdateDomainContactPrivacyRequest$RegistrantPrivacy": "

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.

    Type: Boolean

    Default: None

    Valid values: true | false

    Required: No

    ", + "UpdateDomainContactPrivacyRequest$TechPrivacy": "

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.

    Type: Boolean

    Default: None

    Valid values: true | false

    Required: No

    " + } + }, + "CheckDomainAvailabilityRequest": { + "base": "

    The CheckDomainAvailability request contains the following elements.

    ", + "refs": { + } + }, + "CheckDomainAvailabilityResponse": { + "base": "

    The CheckDomainAvailability response includes the following elements.

    ", + "refs": { + } + }, + "City": { + "base": null, + "refs": { + "ContactDetail$City": "

    The city of the contact's address.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    Parents: RegistrantContact, AdminContact, TechContact

    Required: Yes

    " + } + }, + "ContactDetail": { + "base": "

    ContactDetail includes the following elements.

    ", + "refs": { + "GetDomainDetailResponse$AdminContact": "

    Provides details about the domain administrative contact.

    Type: Complex

    Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, Email, Fax, ExtraParams

    ", + "GetDomainDetailResponse$RegistrantContact": "

    Provides details about the domain registrant.

    Type: Complex

    Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, Email, Fax, ExtraParams

    ", + "GetDomainDetailResponse$TechContact": "

    Provides details about the domain technical contact.

    Type: Complex

    Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, Email, Fax, ExtraParams

    ", + "RegisterDomainRequest$AdminContact": "

    Provides detailed contact information.

    Type: Complex

    Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, Email, Fax, ExtraParams

    Required: Yes

    ", + "RegisterDomainRequest$RegistrantContact": "

    Provides detailed contact information.

    Type: Complex

    Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, Email, Fax, ExtraParams

    Required: Yes

    ", + "RegisterDomainRequest$TechContact": "

    Provides detailed contact information.

    Type: Complex

    Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, Email, Fax, ExtraParams

    Required: Yes

    ", + "TransferDomainRequest$AdminContact": "

    Provides detailed contact information.

    Type: Complex

    Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, Email, Fax, ExtraParams

    Required: Yes

    ", + "TransferDomainRequest$RegistrantContact": "

    Provides detailed contact information.

    Type: Complex

    Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, Email, Fax, ExtraParams

    Required: Yes

    ", + "TransferDomainRequest$TechContact": "

    Provides detailed contact information.

    Type: Complex

    Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, Email, Fax, ExtraParams

    Required: Yes

    ", + "UpdateDomainContactRequest$AdminContact": "

    Provides detailed contact information.

    Type: Complex

    Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, Email, Fax, ExtraParams

    Required: Yes

    ", + "UpdateDomainContactRequest$RegistrantContact": "

    Provides detailed contact information.

    Type: Complex

    Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, Email, Fax, ExtraParams

    Required: Yes

    ", + "UpdateDomainContactRequest$TechContact": "

    Provides detailed contact information.

    Type: Complex

    Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, Email, Fax, ExtraParams

    Required: Yes

    " + } + }, + "ContactName": { + "base": null, + "refs": { + "ContactDetail$FirstName": "

    First name of contact.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    Parents: RegistrantContact, AdminContact, TechContact

    Required: Yes

    ", + "ContactDetail$LastName": "

    Last name of contact.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    Parents: RegistrantContact, AdminContact, TechContact

    Required: Yes

    ", + "ContactDetail$OrganizationName": "

    Name of the organization for contact types other than PERSON.

    Type: String

    Default: None

    Constraints: Maximum 255 characters. Contact type must not be PERSON.

    Parents: RegistrantContact, AdminContact, TechContact

    Required: No

    " + } + }, + "ContactNumber": { + "base": null, + "refs": { + "ContactDetail$PhoneNumber": "

    The phone number of the contact.

    Type: String

    Default: None

    Constraints: Phone number must be specified in the format \"+[country dialing code].[number including any area code>]\". For example, a US phone number might appear as \"+1.1234567890\".

    Parents: RegistrantContact, AdminContact, TechContact

    Required: Yes

    ", + "ContactDetail$Fax": "

    Fax number of the contact.

    Type: String

    Default: None

    Constraints: Phone number must be specified in the format \"+[country dialing code].[number including any area code]\". For example, a US phone number might appear as \"+1.1234567890\".

    Parents: RegistrantContact, AdminContact, TechContact

    Required: No

    ", + "GetDomainDetailResponse$AbuseContactPhone": "

    Phone number for reporting abuse.

    Type: String

    " + } + }, + "ContactType": { + "base": null, + "refs": { + "ContactDetail$ContactType": "

    Indicates whether the contact is a person, company, association, or public organization. If you choose an option other than PERSON, you must enter an organization name, and you can't enable privacy protection for the contact.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    Valid values: PERSON | COMPANY | ASSOCIATION | PUBLIC_BODY

    Parents: RegistrantContact, AdminContact, TechContact

    Required: Yes

    " + } + }, + "CountryCode": { + "base": null, + "refs": { + "ContactDetail$CountryCode": "

    Code for the country of the contact's address.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    Parents: RegistrantContact, AdminContact, TechContact

    Required: Yes

    " + } + }, + "DNSSec": { + "base": null, + "refs": { + "GetDomainDetailResponse$DnsSec": "

    Reserved for future use.

    " + } + }, + "DeleteTagsForDomainRequest": { + "base": "

    The DeleteTagsForDomainRequest includes the following elements.

    ", + "refs": { + } + }, + "DeleteTagsForDomainResponse": { + "base": null, + "refs": { + } + }, + "DisableDomainAutoRenewRequest": { + "base": null, + "refs": { + } + }, + "DisableDomainAutoRenewResponse": { + "base": null, + "refs": { + } + }, + "DisableDomainTransferLockRequest": { + "base": "

    The DisableDomainTransferLock request includes the following element.

    ", + "refs": { + } + }, + "DisableDomainTransferLockResponse": { + "base": "

    The DisableDomainTransferLock response includes the following element.

    ", + "refs": { + } + }, + "DomainAuthCode": { + "base": null, + "refs": { + "RetrieveDomainAuthCodeResponse$AuthCode": "

    The authorization code for the domain.

    Type: String

    ", + "TransferDomainRequest$AuthCode": "

    The authorization code for the domain. You get this value from the current registrar.

    Type: String

    Required: Yes

    " + } + }, + "DomainAvailability": { + "base": null, + "refs": { + "CheckDomainAvailabilityResponse$Availability": "

    Whether the domain name is available for registering.

    You can only register domains designated as AVAILABLE.

    Type: String

    Valid values:

    • AVAILABLE – The domain name is available.
    • AVAILABLE_RESERVED – The domain name is reserved under specific conditions.
    • AVAILABLE_PREORDER – The domain name is available and can be preordered.
    • UNAVAILABLE – The domain name is not available.
    • UNAVAILABLE_PREMIUM – The domain name is not available.
    • UNAVAILABLE_RESTRICTED – The domain name is forbidden.
    • RESERVED – The domain name has been reserved for another person or organization.
    • DONT_KNOW – The TLD registry didn't reply with a definitive answer about whether the domain name is available. Amazon Route 53 can return this response for a variety of reasons, for example, the registry is performing maintenance. Try again later.
    " + } + }, + "DomainLimitExceeded": { + "base": "

    The number of domains has exceeded the allowed threshold for the account.

    ", + "refs": { + } + }, + "DomainName": { + "base": null, + "refs": { + "CheckDomainAvailabilityRequest$DomainName": "

    The name of a domain.

    Type: String

    Default: None

    Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.

    Required: Yes

    ", + "DeleteTagsForDomainRequest$DomainName": "

    The domain for which you want to delete one or more tags.

    The name of a domain.

    Type: String

    Default: None

    Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Hyphens are allowed only when theyaposre surrounded by letters, numbers, or other hyphens. You canapost specify a hyphen at the beginning or end of a label. To specify an Internationalized Domain Name, you must convert the name to Punycode.

    Required: Yes

    ", + "DisableDomainAutoRenewRequest$DomainName": null, + "DisableDomainTransferLockRequest$DomainName": "

    The name of a domain.

    Type: String

    Default: None

    Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.

    Required: Yes

    ", + "DomainSummary$DomainName": "

    The name of a domain.

    Type: String

    ", + "EnableDomainAutoRenewRequest$DomainName": null, + "EnableDomainTransferLockRequest$DomainName": "

    The name of a domain.

    Type: String

    Default: None

    Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.

    Required: Yes

    ", + "GetDomainDetailRequest$DomainName": "

    The name of a domain.

    Type: String

    Default: None

    Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.

    Required: Yes

    ", + "GetDomainDetailResponse$DomainName": "

    The name of a domain.

    Type: String

    ", + "GetOperationDetailResponse$DomainName": "

    The name of a domain.

    Type: String

    ", + "ListTagsForDomainRequest$DomainName": "

    The domain for which you want to get a list of tags.

    ", + "RegisterDomainRequest$DomainName": "

    The name of a domain.

    Type: String

    Default: None

    Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.

    Required: Yes

    ", + "RetrieveDomainAuthCodeRequest$DomainName": "

    The name of a domain.

    Type: String

    Default: None

    Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.

    Required: Yes

    ", + "TransferDomainRequest$DomainName": "

    The name of a domain.

    Type: String

    Default: None

    Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.

    Required: Yes

    ", + "UpdateDomainContactPrivacyRequest$DomainName": "

    The name of a domain.

    Type: String

    Default: None

    Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.

    Required: Yes

    ", + "UpdateDomainContactRequest$DomainName": "

    The name of a domain.

    Type: String

    Default: None

    Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.

    Required: Yes

    ", + "UpdateDomainNameserversRequest$DomainName": "

    The name of a domain.

    Type: String

    Default: None

    Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.

    Required: Yes

    ", + "UpdateTagsForDomainRequest$DomainName": "

    The domain for which you want to add or update tags.

    The name of a domain.

    Type: String

    Default: None

    Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Hyphens are allowed only when theyaposre surrounded by letters, numbers, or other hyphens. You canapost specify a hyphen at the beginning or end of a label. To specify an Internationalized Domain Name, you must convert the name to Punycode.

    Required: Yes

    " + } + }, + "DomainStatus": { + "base": null, + "refs": { + "DomainStatusList$member": null + } + }, + "DomainStatusList": { + "base": null, + "refs": { + "GetDomainDetailResponse$StatusList": "

    An array of domain name status codes, also known as Extensible Provisioning Protocol (EPP) status codes.

    ICANN, the organization that maintains a central database of domain names, has developed a set of domain name status codes that tell you the status of a variety of operations on a domain name, for example, registering a domain name, transferring a domain name to another registrar, renewing the registration for a domain name, and so on. All registrars use this same set of status codes.

    For a current list of domain name status codes and an explanation of what each code means, go to the ICANN website and search for epp status codes. (Search on the ICANN website; web searches sometimes return an old version of the document.)

    Type: Array of String

    " + } + }, + "DomainSummary": { + "base": null, + "refs": { + "DomainSummaryList$member": null + } + }, + "DomainSummaryList": { + "base": null, + "refs": { + "ListDomainsResponse$Domains": "

    A summary of domains.

    Type: Complex type containing a list of domain summaries.

    Children: AutoRenew, DomainName, Expiry, TransferLock

    " + } + }, + "DuplicateRequest": { + "base": "

    The request is already in progress for the domain.

    ", + "refs": { + } + }, + "DurationInYears": { + "base": null, + "refs": { + "RegisterDomainRequest$DurationInYears": "

    The number of years the domain will be registered. Domains are registered for a minimum of one year. The maximum period depends on the top-level domain.

    Type: Integer

    Default: 1

    Valid values: Integer from 1 to 10

    Required: Yes

    ", + "TransferDomainRequest$DurationInYears": "

    The number of years the domain will be registered. Domains are registered for a minimum of one year. The maximum period depends on the top-level domain.

    Type: Integer

    Default: 1

    Valid values: Integer from 1 to 10

    Required: Yes

    " + } + }, + "Email": { + "base": null, + "refs": { + "ContactDetail$Email": "

    Email address of the contact.

    Type: String

    Default: None

    Constraints: Maximum 254 characters.

    Parents: RegistrantContact, AdminContact, TechContact

    Required: Yes

    ", + "GetDomainDetailResponse$AbuseContactEmail": "

    Email address to contact to report incorrect contact information for a domain, to report that the domain is being used to send spam, to report that someone is cybersquatting on a domain name, or report some other type of abuse.

    Type: String

    " + } + }, + "EnableDomainAutoRenewRequest": { + "base": null, + "refs": { + } + }, + "EnableDomainAutoRenewResponse": { + "base": null, + "refs": { + } + }, + "EnableDomainTransferLockRequest": { + "base": "

    The EnableDomainTransferLock request includes the following element.

    ", + "refs": { + } + }, + "EnableDomainTransferLockResponse": { + "base": "

    The EnableDomainTransferLock response includes the following elements.

    ", + "refs": { + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "DomainLimitExceeded$message": null, + "DuplicateRequest$message": null, + "GetOperationDetailResponse$Message": "

    Detailed information on the status including possible errors.

    Type: String

    ", + "InvalidInput$message": null, + "OperationLimitExceeded$message": null, + "TLDRulesViolation$message": null, + "UnsupportedTLD$message": null + } + }, + "ExtraParam": { + "base": "

    ExtraParam includes the following elements.

    ", + "refs": { + "ExtraParamList$member": null + } + }, + "ExtraParamList": { + "base": null, + "refs": { + "ContactDetail$ExtraParams": "

    A list of name-value pairs for parameters required by certain top-level domains.

    Type: Complex

    Default: None

    Parents: RegistrantContact, AdminContact, TechContact

    Children: Name, Value

    Required: No

    " + } + }, + "ExtraParamName": { + "base": null, + "refs": { + "ExtraParam$Name": "

    Name of the additional parameter required by the top-level domain.

    Type: String

    Default: None

    Valid values: DUNS_NUMBER | BRAND_NUMBER | BIRTH_DEPARTMENT | BIRTH_DATE_IN_YYYY_MM_DD | BIRTH_COUNTRY | BIRTH_CITY | DOCUMENT_NUMBER | AU_ID_NUMBER | AU_ID_TYPE | CA_LEGAL_TYPE | ES_IDENTIFICATION | ES_IDENTIFICATION_TYPE | ES_LEGAL_FORM | FI_BUSINESS_NUMBER | FI_ID_NUMBER | IT_PIN | RU_PASSPORT_DATA | SE_ID_NUMBER | SG_ID_NUMBER | VAT_NUMBER

    Parent: ExtraParams

    Required: Yes

    " + } + }, + "ExtraParamValue": { + "base": null, + "refs": { + "ExtraParam$Value": "

    Values corresponding to the additional parameter names required by some top-level domains.

    Type: String

    Default: None

    Constraints: Maximum 2048 characters.

    Parent: ExtraParams

    Required: Yes

    " + } + }, + "FIAuthKey": { + "base": null, + "refs": { + "UpdateDomainNameserversRequest$FIAuthKey": "

    The authorization key for .fi domains

    " + } + }, + "GetDomainDetailRequest": { + "base": "

    The GetDomainDetail request includes the following element.

    ", + "refs": { + } + }, + "GetDomainDetailResponse": { + "base": "

    The GetDomainDetail response includes the following elements.

    ", + "refs": { + } + }, + "GetOperationDetailRequest": { + "base": "

    The GetOperationDetail request includes the following element.

    ", + "refs": { + } + }, + "GetOperationDetailResponse": { + "base": "

    The GetOperationDetail response includes the following elements.

    ", + "refs": { + } + }, + "GlueIp": { + "base": null, + "refs": { + "GlueIpList$member": null + } + }, + "GlueIpList": { + "base": null, + "refs": { + "Nameserver$GlueIps": "

    Glue IP address of a name server entry. Glue IP addresses are required only when the name of the name server is a subdomain of the domain. For example, if your domain is example.com and the name server for the domain is ns.example.com, you need to specify the IP address for ns.example.com.

    Type: List of IP addresses.

    Constraints: The list can contain only one IPv4 and one IPv6 address.

    Parent: Nameservers

    " + } + }, + "HostName": { + "base": null, + "refs": { + "Nameserver$Name": "

    The fully qualified host name of the name server.

    Type: String

    Constraint: Maximum 255 characterss

    Parent: Nameservers

    " + } + }, + "InvalidInput": { + "base": "

    The requested item is not acceptable. For example, for an OperationId it may refer to the ID of an operation that is already completed. For a domain name, it may not be a valid domain name or belong to the requester account.

    ", + "refs": { + } + }, + "LangCode": { + "base": null, + "refs": { + "CheckDomainAvailabilityRequest$IdnLangCode": "

    Reserved for future use.

    ", + "RegisterDomainRequest$IdnLangCode": "

    Reserved for future use.

    ", + "TransferDomainRequest$IdnLangCode": "

    Reserved for future use.

    " + } + }, + "ListDomainsRequest": { + "base": "

    The ListDomains request includes the following elements.

    ", + "refs": { + } + }, + "ListDomainsResponse": { + "base": "

    The ListDomains response includes the following elements.

    ", + "refs": { + } + }, + "ListOperationsRequest": { + "base": "

    The ListOperations request includes the following elements.

    ", + "refs": { + } + }, + "ListOperationsResponse": { + "base": "

    The ListOperations response includes the following elements.

    ", + "refs": { + } + }, + "ListTagsForDomainRequest": { + "base": "

    The ListTagsForDomainRequest includes the following elements.

    ", + "refs": { + } + }, + "ListTagsForDomainResponse": { + "base": "

    The ListTagsForDomain response includes the following elements.

    ", + "refs": { + } + }, + "Nameserver": { + "base": "

    Nameserver includes the following elements.

    ", + "refs": { + "NameserverList$member": null + } + }, + "NameserverList": { + "base": null, + "refs": { + "GetDomainDetailResponse$Nameservers": "

    The name of the domain.

    Type: String

    ", + "TransferDomainRequest$Nameservers": "

    Contains details for the host and glue IP addresses.

    Type: Complex

    Children: GlueIps, Name

    Required: No

    ", + "UpdateDomainNameserversRequest$Nameservers": "

    A list of new name servers for the domain.

    Type: Complex

    Children: Name, GlueIps

    Required: Yes

    " + } + }, + "OperationId": { + "base": null, + "refs": { + "DisableDomainTransferLockResponse$OperationId": "

    Identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    ", + "EnableDomainTransferLockResponse$OperationId": "

    Identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    ", + "GetOperationDetailRequest$OperationId": "

    The identifier for the operation for which you want to get the status. Amazon Route 53 returned the identifier in the response to the original request.

    Type: String

    Default: None

    Required: Yes

    ", + "GetOperationDetailResponse$OperationId": "

    The identifier for the operation.

    Type: String

    ", + "OperationSummary$OperationId": "

    Identifier returned to track the requested action.

    Type: String

    ", + "RegisterDomainResponse$OperationId": "

    Identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    ", + "TransferDomainResponse$OperationId": "

    Identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    ", + "UpdateDomainContactPrivacyResponse$OperationId": "

    Identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    ", + "UpdateDomainContactResponse$OperationId": "

    Identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    ", + "UpdateDomainNameserversResponse$OperationId": "

    Identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    " + } + }, + "OperationLimitExceeded": { + "base": "

    The number of operations or jobs running exceeded the allowed threshold for the account.

    ", + "refs": { + } + }, + "OperationStatus": { + "base": null, + "refs": { + "GetOperationDetailResponse$Status": "

    The current status of the requested operation in the system.

    Type: String

    ", + "OperationSummary$Status": "

    The current status of the requested operation in the system.

    Type: String

    " + } + }, + "OperationSummary": { + "base": "

    OperationSummary includes the following elements.

    ", + "refs": { + "OperationSummaryList$member": null + } + }, + "OperationSummaryList": { + "base": null, + "refs": { + "ListOperationsResponse$Operations": "

    Lists summaries of the operations.

    Type: Complex type containing a list of operation summaries

    Children: OperationId, Status, SubmittedDate, Type

    " + } + }, + "OperationType": { + "base": null, + "refs": { + "GetOperationDetailResponse$Type": "

    The type of operation that was requested.

    Type: String

    ", + "OperationSummary$Type": "

    Type of the action requested.

    Type: String

    Valid values: REGISTER_DOMAIN | DELETE_DOMAIN | TRANSFER_IN_DOMAIN | UPDATE_DOMAIN_CONTACT | UPDATE_NAMESERVER | CHANGE_PRIVACY_PROTECTION | DOMAIN_LOCK

    " + } + }, + "PageMarker": { + "base": null, + "refs": { + "ListDomainsRequest$Marker": "

    For an initial request for a list of domains, omit this element. If the number of domains that are associated with the current AWS account is greater than the value that you specified for MaxItems, you can use Marker to return additional domains. Get the value of NextPageMarker from the previous response, and submit another request that includes the value of NextPageMarker in the Marker element.

    Type: String

    Default: None

    Constraints: The marker must match the value specified in the previous request.

    Required: No

    ", + "ListDomainsResponse$NextPageMarker": "

    If there are more domains than you specified for MaxItems in the request, submit another request and include the value of NextPageMarker in the value of Marker.

    Type: String

    Parent: Operations

    ", + "ListOperationsRequest$Marker": "

    For an initial request for a list of operations, omit this element. If the number of operations that are not yet complete is greater than the value that you specified for MaxItems, you can use Marker to return additional operations. Get the value of NextPageMarker from the previous response, and submit another request that includes the value of NextPageMarker in the Marker element.

    Type: String

    Default: None

    Required: No

    ", + "ListOperationsResponse$NextPageMarker": "

    If there are more operations than you specified for MaxItems in the request, submit another request and include the value of NextPageMarker in the value of Marker.

    Type: String

    Parent: Operations

    " + } + }, + "PageMaxItems": { + "base": null, + "refs": { + "ListDomainsRequest$MaxItems": "

    Number of domains to be returned.

    Type: Integer

    Default: 20

    Constraints: A numeral between 1 and 100.

    Required: No

    ", + "ListOperationsRequest$MaxItems": "

    Number of domains to be returned.

    Type: Integer

    Default: 20

    Constraints: A value between 1 and 100.

    Required: No

    " + } + }, + "RegisterDomainRequest": { + "base": "

    The RegisterDomain request includes the following elements.

    ", + "refs": { + } + }, + "RegisterDomainResponse": { + "base": "

    The RegisterDomain response includes the following element.

    ", + "refs": { + } + }, + "RegistrarName": { + "base": null, + "refs": { + "GetDomainDetailResponse$RegistrarName": "

    Name of the registrar of the domain as identified in the registry. Amazon Route 53 domains are registered by registrar Gandi. The value is \"GANDI SAS\".

    Type: String

    " + } + }, + "RegistrarUrl": { + "base": null, + "refs": { + "GetDomainDetailResponse$RegistrarUrl": "

    Web address of the registrar.

    Type: String

    " + } + }, + "RegistrarWhoIsServer": { + "base": null, + "refs": { + "GetDomainDetailResponse$WhoIsServer": "

    The fully qualified name of the WHOIS server that can answer the WHOIS query for the domain.

    Type: String

    " + } + }, + "RegistryDomainId": { + "base": null, + "refs": { + "GetDomainDetailResponse$RegistryDomainId": "

    Reserved for future use.

    " + } + }, + "Reseller": { + "base": null, + "refs": { + "GetDomainDetailResponse$Reseller": "

    Reseller of the domain. Domains registered or transferred using Amazon Route 53 domains will have \"Amazon\" as the reseller.

    Type: String

    " + } + }, + "RetrieveDomainAuthCodeRequest": { + "base": "

    The RetrieveDomainAuthCode request includes the following element.

    ", + "refs": { + } + }, + "RetrieveDomainAuthCodeResponse": { + "base": "

    The RetrieveDomainAuthCode response includes the following element.

    ", + "refs": { + } + }, + "State": { + "base": null, + "refs": { + "ContactDetail$State": "

    The state or province of the contact's city.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    Parents: RegistrantContact, AdminContact, TechContact

    Required: No

    " + } + }, + "TLDRulesViolation": { + "base": "

    The top-level domain does not support this operation.

    ", + "refs": { + } + }, + "Tag": { + "base": "

    Each tag includes the following elements.

    ", + "refs": { + "TagList$member": null + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": "

    The key (name) of a tag.

    Type: String

    Default: None

    Valid values: A-Z, a-z, 0-9, space, \".:/=+\\-@\"

    Constraints: Each key can be 1-128 characters long.

    Required: Yes

    ", + "TagKeyList$member": null + } + }, + "TagKeyList": { + "base": null, + "refs": { + "DeleteTagsForDomainRequest$TagsToDelete": "

    A list of tag keys to delete.

    Type: A list that contains the keys of the tags that you want to delete.

    Default: None

    Required: No

    '>" + } + }, + "TagList": { + "base": null, + "refs": { + "ListTagsForDomainResponse$TagList": "

    A list of the tags that are associated with the specified domain.

    Type: A complex type containing a list of tags

    Each tag includes the following elements.

    • Key

      The key (name) of a tag.

      Type: String

    • Value

      The value of a tag.

      Type: String

    ", + "UpdateTagsForDomainRequest$TagsToUpdate": "

    A list of the tag keys and values that you want to add or update. If you specify a key that already exists, the corresponding value will be replaced.

    Type: A complex type containing a list of tags

    Default: None

    Required: No

    '>

    Each tag includes the following elements:

    • Key

      The key (name) of a tag.

      Type: String

      Default: None

      Valid values: Unicode characters including alphanumeric, space, and \".:/=+\\-@\"

      Constraints: Each key can be 1-128 characters long.

      Required: Yes

    • Value

      The value of a tag.

      Type: String

      Default: None

      Valid values: Unicode characters including alphanumeric, space, and \".:/=+\\-@\"

      Constraints: Each value can be 0-256 characters long.

      Required: Yes

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": "

    The value of a tag.

    Type: String

    Default: None

    Valid values: A-Z, a-z, 0-9, space, \".:/=+\\-@\"

    Constraints: Each value can be 0-256 characters long.

    Required: Yes

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "DomainSummary$Expiry": "

    Expiration date of the domain in Coordinated Universal Time (UTC).

    Type: Long

    ", + "GetDomainDetailResponse$CreationDate": "

    The date when the domain was created as found in the response to a WHOIS query. The date format is Unix time.

    ", + "GetDomainDetailResponse$UpdatedDate": "

    The last updated date of the domain as found in the response to a WHOIS query. The date format is Unix time.

    ", + "GetDomainDetailResponse$ExpirationDate": "

    The date when the registration for the domain is set to expire. The date format is Unix time.

    ", + "GetOperationDetailResponse$SubmittedDate": "

    The date when the request was submitted.

    ", + "OperationSummary$SubmittedDate": "

    The date when the request was submitted.

    " + } + }, + "TransferDomainRequest": { + "base": "

    The TransferDomain request includes the following elements.

    ", + "refs": { + } + }, + "TransferDomainResponse": { + "base": "

    The TranserDomain response includes the following element.

    ", + "refs": { + } + }, + "UnsupportedTLD": { + "base": "

    Amazon Route 53 does not support this top-level domain.

    ", + "refs": { + } + }, + "UpdateDomainContactPrivacyRequest": { + "base": "

    The UpdateDomainContactPrivacy request includes the following elements.

    ", + "refs": { + } + }, + "UpdateDomainContactPrivacyResponse": { + "base": "

    The UpdateDomainContactPrivacy response includes the following element.

    ", + "refs": { + } + }, + "UpdateDomainContactRequest": { + "base": "

    The UpdateDomainContact request includes the following elements.

    ", + "refs": { + } + }, + "UpdateDomainContactResponse": { + "base": "

    The UpdateDomainContact response includes the following element.

    ", + "refs": { + } + }, + "UpdateDomainNameserversRequest": { + "base": "

    The UpdateDomainNameserver request includes the following elements.

    ", + "refs": { + } + }, + "UpdateDomainNameserversResponse": { + "base": "

    The UpdateDomainNameservers response includes the following element.

    ", + "refs": { + } + }, + "UpdateTagsForDomainRequest": { + "base": "

    The UpdateTagsForDomainRequest includes the following elements.

    ", + "refs": { + } + }, + "UpdateTagsForDomainResponse": { + "base": null, + "refs": { + } + }, + "ZipCode": { + "base": null, + "refs": { + "ContactDetail$ZipCode": "

    The zip or postal code of the contact's address.

    Type: String

    Default: None

    Constraints: Maximum 255 characters.

    Parents: RegistrantContact, AdminContact, TechContact

    Required: No

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,17 @@ +{ + "version": "1.0", + "pagination": { + "ListDomains": { + "limit_key": "MaxItems", + "input_token": "Marker", + "output_token": "NextPageMarker", + "result_key": "Domains" + }, + "ListOperations": { + "limit_key": "MaxItems", + "input_token": "Marker", + "output_token": "NextPageMarker", + "result_key": "Operations" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,4324 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2006-03-01", + "checksumFormat":"md5", + "endpointPrefix":"s3", + "globalEndpoint":"s3.amazonaws.com", + "protocol":"rest-xml", + "serviceAbbreviation":"Amazon S3", + "serviceFullName":"Amazon Simple Storage Service", + "signatureVersion":"s3", + "timestampFormat":"rfc822" + }, + "operations":{ + "AbortMultipartUpload":{ + "name":"AbortMultipartUpload", + "http":{ + "method":"DELETE", + "requestUri":"/{Bucket}/{Key+}" + }, + "input":{"shape":"AbortMultipartUploadRequest"}, + "output":{"shape":"AbortMultipartUploadOutput"}, + "errors":[ + {"shape":"NoSuchUpload"} + ], + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadAbort.html" + }, + "CompleteMultipartUpload":{ + "name":"CompleteMultipartUpload", + "http":{ + "method":"POST", + "requestUri":"/{Bucket}/{Key+}" + }, + "input":{"shape":"CompleteMultipartUploadRequest"}, + "output":{"shape":"CompleteMultipartUploadOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadComplete.html" + }, + "CopyObject":{ + "name":"CopyObject", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}/{Key+}" + }, + "input":{"shape":"CopyObjectRequest"}, + "output":{"shape":"CopyObjectOutput"}, + "errors":[ + {"shape":"ObjectNotInActiveTierError"} + ], + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectCOPY.html", + "alias":"PutObjectCopy" + }, + "CreateBucket":{ + "name":"CreateBucket", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}" + }, + "input":{"shape":"CreateBucketRequest"}, + "output":{"shape":"CreateBucketOutput"}, + "errors":[ + {"shape":"BucketAlreadyExists"} + ], + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html", + "alias":"PutBucket" + }, + "CreateMultipartUpload":{ + "name":"CreateMultipartUpload", + "http":{ + "method":"POST", + "requestUri":"/{Bucket}/{Key+}?uploads" + }, + "input":{"shape":"CreateMultipartUploadRequest"}, + "output":{"shape":"CreateMultipartUploadOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadInitiate.html", + "alias":"InitiateMultipartUpload" + }, + "DeleteBucket":{ + "name":"DeleteBucket", + "http":{ + "method":"DELETE", + "requestUri":"/{Bucket}" + }, + "input":{"shape":"DeleteBucketRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETE.html" + }, + "DeleteBucketCors":{ + "name":"DeleteBucketCors", + "http":{ + "method":"DELETE", + "requestUri":"/{Bucket}?cors" + }, + "input":{"shape":"DeleteBucketCorsRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEcors.html" + }, + "DeleteBucketLifecycle":{ + "name":"DeleteBucketLifecycle", + "http":{ + "method":"DELETE", + "requestUri":"/{Bucket}?lifecycle" + }, + "input":{"shape":"DeleteBucketLifecycleRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETElifecycle.html" + }, + "DeleteBucketPolicy":{ + "name":"DeleteBucketPolicy", + "http":{ + "method":"DELETE", + "requestUri":"/{Bucket}?policy" + }, + "input":{"shape":"DeleteBucketPolicyRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEpolicy.html" + }, + "DeleteBucketReplication":{ + "name":"DeleteBucketReplication", + "http":{ + "method":"DELETE", + "requestUri":"/{Bucket}?replication" + }, + "input":{"shape":"DeleteBucketReplicationRequest"} + }, + "DeleteBucketTagging":{ + "name":"DeleteBucketTagging", + "http":{ + "method":"DELETE", + "requestUri":"/{Bucket}?tagging" + }, + "input":{"shape":"DeleteBucketTaggingRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEtagging.html" + }, + "DeleteBucketWebsite":{ + "name":"DeleteBucketWebsite", + "http":{ + "method":"DELETE", + "requestUri":"/{Bucket}?website" + }, + "input":{"shape":"DeleteBucketWebsiteRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEwebsite.html" + }, + "DeleteObject":{ + "name":"DeleteObject", + "http":{ + "method":"DELETE", + "requestUri":"/{Bucket}/{Key+}" + }, + "input":{"shape":"DeleteObjectRequest"}, + "output":{"shape":"DeleteObjectOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectDELETE.html" + }, + "DeleteObjects":{ + "name":"DeleteObjects", + "http":{ + "method":"POST", + "requestUri":"/{Bucket}?delete" + }, + "input":{"shape":"DeleteObjectsRequest"}, + "output":{"shape":"DeleteObjectsOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/multiobjectdeleteapi.html", + "alias":"DeleteMultipleObjects" + }, + "GetBucketAcl":{ + "name":"GetBucketAcl", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?acl" + }, + "input":{"shape":"GetBucketAclRequest"}, + "output":{"shape":"GetBucketAclOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETacl.html" + }, + "GetBucketCors":{ + "name":"GetBucketCors", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?cors" + }, + "input":{"shape":"GetBucketCorsRequest"}, + "output":{"shape":"GetBucketCorsOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETcors.html" + }, + "GetBucketLifecycle":{ + "name":"GetBucketLifecycle", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?lifecycle" + }, + "input":{"shape":"GetBucketLifecycleRequest"}, + "output":{"shape":"GetBucketLifecycleOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETlifecycle.html", + "deprecated":true + }, + "GetBucketLifecycleConfiguration":{ + "name":"GetBucketLifecycleConfiguration", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?lifecycle" + }, + "input":{"shape":"GetBucketLifecycleConfigurationRequest"}, + "output":{"shape":"GetBucketLifecycleConfigurationOutput"} + }, + "GetBucketLocation":{ + "name":"GetBucketLocation", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?location" + }, + "input":{"shape":"GetBucketLocationRequest"}, + "output":{"shape":"GetBucketLocationOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETlocation.html" + }, + "GetBucketLogging":{ + "name":"GetBucketLogging", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?logging" + }, + "input":{"shape":"GetBucketLoggingRequest"}, + "output":{"shape":"GetBucketLoggingOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETlogging.html" + }, + "GetBucketNotification":{ + "name":"GetBucketNotification", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?notification" + }, + "input":{"shape":"GetBucketNotificationConfigurationRequest"}, + "output":{"shape":"NotificationConfigurationDeprecated"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETnotification.html", + "deprecated":true + }, + "GetBucketNotificationConfiguration":{ + "name":"GetBucketNotificationConfiguration", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?notification" + }, + "input":{"shape":"GetBucketNotificationConfigurationRequest"}, + "output":{"shape":"NotificationConfiguration"} + }, + "GetBucketPolicy":{ + "name":"GetBucketPolicy", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?policy" + }, + "input":{"shape":"GetBucketPolicyRequest"}, + "output":{"shape":"GetBucketPolicyOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETpolicy.html" + }, + "GetBucketReplication":{ + "name":"GetBucketReplication", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?replication" + }, + "input":{"shape":"GetBucketReplicationRequest"}, + "output":{"shape":"GetBucketReplicationOutput"} + }, + "GetBucketRequestPayment":{ + "name":"GetBucketRequestPayment", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?requestPayment" + }, + "input":{"shape":"GetBucketRequestPaymentRequest"}, + "output":{"shape":"GetBucketRequestPaymentOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTrequestPaymentGET.html" + }, + "GetBucketTagging":{ + "name":"GetBucketTagging", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?tagging" + }, + "input":{"shape":"GetBucketTaggingRequest"}, + "output":{"shape":"GetBucketTaggingOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETtagging.html" + }, + "GetBucketVersioning":{ + "name":"GetBucketVersioning", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?versioning" + }, + "input":{"shape":"GetBucketVersioningRequest"}, + "output":{"shape":"GetBucketVersioningOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETversioningStatus.html" + }, + "GetBucketWebsite":{ + "name":"GetBucketWebsite", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?website" + }, + "input":{"shape":"GetBucketWebsiteRequest"}, + "output":{"shape":"GetBucketWebsiteOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETwebsite.html" + }, + "GetObject":{ + "name":"GetObject", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}/{Key+}" + }, + "input":{"shape":"GetObjectRequest"}, + "output":{"shape":"GetObjectOutput"}, + "errors":[ + {"shape":"NoSuchKey"} + ], + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html" + }, + "GetObjectAcl":{ + "name":"GetObjectAcl", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}/{Key+}?acl" + }, + "input":{"shape":"GetObjectAclRequest"}, + "output":{"shape":"GetObjectAclOutput"}, + "errors":[ + {"shape":"NoSuchKey"} + ], + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGETacl.html" + }, + "GetObjectTorrent":{ + "name":"GetObjectTorrent", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}/{Key+}?torrent" + }, + "input":{"shape":"GetObjectTorrentRequest"}, + "output":{"shape":"GetObjectTorrentOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGETtorrent.html" + }, + "HeadBucket":{ + "name":"HeadBucket", + "http":{ + "method":"HEAD", + "requestUri":"/{Bucket}" + }, + "input":{"shape":"HeadBucketRequest"}, + "errors":[ + {"shape":"NoSuchBucket"} + ], + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketHEAD.html" + }, + "HeadObject":{ + "name":"HeadObject", + "http":{ + "method":"HEAD", + "requestUri":"/{Bucket}/{Key+}" + }, + "input":{"shape":"HeadObjectRequest"}, + "output":{"shape":"HeadObjectOutput"}, + "errors":[ + {"shape":"NoSuchKey"} + ], + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectHEAD.html" + }, + "ListBuckets":{ + "name":"ListBuckets", + "http":{ + "method":"GET", + "requestUri":"/" + }, + "output":{"shape":"ListBucketsOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTServiceGET.html", + "alias":"GetService" + }, + "ListMultipartUploads":{ + "name":"ListMultipartUploads", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?uploads" + }, + "input":{"shape":"ListMultipartUploadsRequest"}, + "output":{"shape":"ListMultipartUploadsOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadListMPUpload.html" + }, + "ListObjectVersions":{ + "name":"ListObjectVersions", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?versions" + }, + "input":{"shape":"ListObjectVersionsRequest"}, + "output":{"shape":"ListObjectVersionsOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETVersion.html", + "alias":"GetBucketObjectVersions" + }, + "ListObjects":{ + "name":"ListObjects", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}" + }, + "input":{"shape":"ListObjectsRequest"}, + "output":{"shape":"ListObjectsOutput"}, + "errors":[ + {"shape":"NoSuchBucket"} + ], + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGET.html", + "alias":"GetBucket" + }, + "ListParts":{ + "name":"ListParts", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}/{Key+}" + }, + "input":{"shape":"ListPartsRequest"}, + "output":{"shape":"ListPartsOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadListParts.html" + }, + "PutBucketAcl":{ + "name":"PutBucketAcl", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?acl" + }, + "input":{"shape":"PutBucketAclRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTacl.html" + }, + "PutBucketCors":{ + "name":"PutBucketCors", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?cors" + }, + "input":{"shape":"PutBucketCorsRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTcors.html" + }, + "PutBucketLifecycle":{ + "name":"PutBucketLifecycle", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?lifecycle" + }, + "input":{"shape":"PutBucketLifecycleRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html", + "deprecated":true + }, + "PutBucketLifecycleConfiguration":{ + "name":"PutBucketLifecycleConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?lifecycle" + }, + "input":{"shape":"PutBucketLifecycleConfigurationRequest"} + }, + "PutBucketLogging":{ + "name":"PutBucketLogging", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?logging" + }, + "input":{"shape":"PutBucketLoggingRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTlogging.html" + }, + "PutBucketNotification":{ + "name":"PutBucketNotification", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?notification" + }, + "input":{"shape":"PutBucketNotificationRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTnotification.html", + "deprecated":true + }, + "PutBucketNotificationConfiguration":{ + "name":"PutBucketNotificationConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?notification" + }, + "input":{"shape":"PutBucketNotificationConfigurationRequest"} + }, + "PutBucketPolicy":{ + "name":"PutBucketPolicy", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?policy" + }, + "input":{"shape":"PutBucketPolicyRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTpolicy.html" + }, + "PutBucketReplication":{ + "name":"PutBucketReplication", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?replication" + }, + "input":{"shape":"PutBucketReplicationRequest"} + }, + "PutBucketRequestPayment":{ + "name":"PutBucketRequestPayment", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?requestPayment" + }, + "input":{"shape":"PutBucketRequestPaymentRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTrequestPaymentPUT.html" + }, + "PutBucketTagging":{ + "name":"PutBucketTagging", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?tagging" + }, + "input":{"shape":"PutBucketTaggingRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTtagging.html" + }, + "PutBucketVersioning":{ + "name":"PutBucketVersioning", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?versioning" + }, + "input":{"shape":"PutBucketVersioningRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html" + }, + "PutBucketWebsite":{ + "name":"PutBucketWebsite", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?website" + }, + "input":{"shape":"PutBucketWebsiteRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTwebsite.html" + }, + "PutObject":{ + "name":"PutObject", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}/{Key+}" + }, + "input":{"shape":"PutObjectRequest"}, + "output":{"shape":"PutObjectOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUT.html" + }, + "PutObjectAcl":{ + "name":"PutObjectAcl", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}/{Key+}?acl" + }, + "input":{"shape":"PutObjectAclRequest"}, + "output":{"shape":"PutObjectAclOutput"}, + "errors":[ + {"shape":"NoSuchKey"} + ], + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUTacl.html" + }, + "RestoreObject":{ + "name":"RestoreObject", + "http":{ + "method":"POST", + "requestUri":"/{Bucket}/{Key+}?restore" + }, + "input":{"shape":"RestoreObjectRequest"}, + "output":{"shape":"RestoreObjectOutput"}, + "errors":[ + {"shape":"ObjectAlreadyInActiveTierError"} + ], + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectRestore.html", + "alias":"PostObjectRestore" + }, + "UploadPart":{ + "name":"UploadPart", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}/{Key+}" + }, + "input":{"shape":"UploadPartRequest"}, + "output":{"shape":"UploadPartOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadUploadPart.html" + }, + "UploadPartCopy":{ + "name":"UploadPartCopy", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}/{Key+}" + }, + "input":{"shape":"UploadPartCopyRequest"}, + "output":{"shape":"UploadPartCopyOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html" + } + }, + "shapes":{ + "AbortMultipartUploadOutput":{ + "type":"structure", + "members":{ + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + } + } + }, + "AbortMultipartUploadRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key", + "UploadId" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "UploadId":{ + "shape":"MultipartUploadId", + "location":"querystring", + "locationName":"uploadId" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + } + }, + "AcceptRanges":{"type":"string"}, + "AccessControlPolicy":{ + "type":"structure", + "members":{ + "Grants":{ + "shape":"Grants", + "locationName":"AccessControlList" + }, + "Owner":{"shape":"Owner"} + } + }, + "AllowedHeader":{"type":"string"}, + "AllowedHeaders":{ + "type":"list", + "member":{"shape":"AllowedHeader"}, + "flattened":true + }, + "AllowedMethod":{"type":"string"}, + "AllowedMethods":{ + "type":"list", + "member":{"shape":"AllowedMethod"}, + "flattened":true + }, + "AllowedOrigin":{"type":"string"}, + "AllowedOrigins":{ + "type":"list", + "member":{"shape":"AllowedOrigin"}, + "flattened":true + }, + "Body":{"type":"blob"}, + "Bucket":{ + "type":"structure", + "members":{ + "Name":{"shape":"BucketName"}, + "CreationDate":{"shape":"CreationDate"} + } + }, + "BucketAlreadyExists":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "BucketCannedACL":{ + "type":"string", + "enum":[ + "private", + "public-read", + "public-read-write", + "authenticated-read" + ] + }, + "BucketLifecycleConfiguration":{ + "type":"structure", + "required":["Rules"], + "members":{ + "Rules":{ + "shape":"LifecycleRules", + "locationName":"Rule" + } + } + }, + "BucketLocationConstraint":{ + "type":"string", + "enum":[ + "EU", + "eu-west-1", + "us-west-1", + "us-west-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-northeast-1", + "sa-east-1", + "cn-north-1", + "eu-central-1" + ] + }, + "BucketLoggingStatus":{ + "type":"structure", + "members":{ + "LoggingEnabled":{"shape":"LoggingEnabled"} + } + }, + "BucketLogsPermission":{ + "type":"string", + "enum":[ + "FULL_CONTROL", + "READ", + "WRITE" + ] + }, + "BucketName":{"type":"string"}, + "BucketVersioningStatus":{ + "type":"string", + "enum":[ + "Enabled", + "Suspended" + ] + }, + "Buckets":{ + "type":"list", + "member":{ + "shape":"Bucket", + "locationName":"Bucket" + } + }, + "CORSConfiguration":{ + "type":"structure", + "required":["CORSRules"], + "members":{ + "CORSRules":{ + "shape":"CORSRules", + "locationName":"CORSRule" + } + } + }, + "CORSRule":{ + "type":"structure", + "required":[ + "AllowedMethods", + "AllowedOrigins" + ], + "members":{ + "AllowedHeaders":{ + "shape":"AllowedHeaders", + "locationName":"AllowedHeader" + }, + "AllowedMethods":{ + "shape":"AllowedMethods", + "locationName":"AllowedMethod" + }, + "AllowedOrigins":{ + "shape":"AllowedOrigins", + "locationName":"AllowedOrigin" + }, + "ExposeHeaders":{ + "shape":"ExposeHeaders", + "locationName":"ExposeHeader" + }, + "MaxAgeSeconds":{"shape":"MaxAgeSeconds"} + } + }, + "CORSRules":{ + "type":"list", + "member":{"shape":"CORSRule"}, + "flattened":true + }, + "CacheControl":{"type":"string"}, + "CloudFunction":{"type":"string"}, + "CloudFunctionConfiguration":{ + "type":"structure", + "members":{ + "Id":{"shape":"NotificationId"}, + "Event":{ + "shape":"Event", + "deprecated":true + }, + "Events":{ + "shape":"EventList", + "locationName":"Event" + }, + "CloudFunction":{"shape":"CloudFunction"}, + "InvocationRole":{"shape":"CloudFunctionInvocationRole"} + } + }, + "CloudFunctionInvocationRole":{"type":"string"}, + "Code":{"type":"string"}, + "CommonPrefix":{ + "type":"structure", + "members":{ + "Prefix":{"shape":"Prefix"} + } + }, + "CommonPrefixList":{ + "type":"list", + "member":{"shape":"CommonPrefix"}, + "flattened":true + }, + "CompleteMultipartUploadOutput":{ + "type":"structure", + "members":{ + "Location":{"shape":"Location"}, + "Bucket":{"shape":"BucketName"}, + "Key":{"shape":"ObjectKey"}, + "Expiration":{ + "shape":"Expiration", + "location":"header", + "locationName":"x-amz-expiration" + }, + "ETag":{"shape":"ETag"}, + "ServerSideEncryption":{ + "shape":"ServerSideEncryption", + "location":"header", + "locationName":"x-amz-server-side-encryption" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "location":"header", + "locationName":"x-amz-version-id" + }, + "SSEKMSKeyId":{ + "shape":"SSEKMSKeyId", + "location":"header", + "locationName":"x-amz-server-side-encryption-aws-kms-key-id" + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + } + } + }, + "CompleteMultipartUploadRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key", + "UploadId" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "MultipartUpload":{ + "shape":"CompletedMultipartUpload", + "locationName":"CompleteMultipartUpload", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "UploadId":{ + "shape":"MultipartUploadId", + "location":"querystring", + "locationName":"uploadId" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + }, + "payload":"MultipartUpload" + }, + "CompletedMultipartUpload":{ + "type":"structure", + "members":{ + "Parts":{ + "shape":"CompletedPartList", + "locationName":"Part" + } + } + }, + "CompletedPart":{ + "type":"structure", + "members":{ + "ETag":{"shape":"ETag"}, + "PartNumber":{"shape":"PartNumber"} + } + }, + "CompletedPartList":{ + "type":"list", + "member":{"shape":"CompletedPart"}, + "flattened":true + }, + "Condition":{ + "type":"structure", + "members":{ + "HttpErrorCodeReturnedEquals":{"shape":"HttpErrorCodeReturnedEquals"}, + "KeyPrefixEquals":{"shape":"KeyPrefixEquals"} + } + }, + "ContentDisposition":{"type":"string"}, + "ContentEncoding":{"type":"string"}, + "ContentLanguage":{"type":"string"}, + "ContentLength":{"type":"integer"}, + "ContentMD5":{"type":"string"}, + "ContentRange":{"type":"string"}, + "ContentType":{"type":"string"}, + "CopyObjectOutput":{ + "type":"structure", + "members":{ + "CopyObjectResult":{"shape":"CopyObjectResult"}, + "Expiration":{ + "shape":"Expiration", + "location":"header", + "locationName":"x-amz-expiration" + }, + "CopySourceVersionId":{ + "shape":"CopySourceVersionId", + "location":"header", + "locationName":"x-amz-copy-source-version-id" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "location":"header", + "locationName":"x-amz-version-id" + }, + "ServerSideEncryption":{ + "shape":"ServerSideEncryption", + "location":"header", + "locationName":"x-amz-server-side-encryption" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "SSEKMSKeyId":{ + "shape":"SSEKMSKeyId", + "location":"header", + "locationName":"x-amz-server-side-encryption-aws-kms-key-id" + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + } + }, + "payload":"CopyObjectResult" + }, + "CopyObjectRequest":{ + "type":"structure", + "required":[ + "Bucket", + "CopySource", + "Key" + ], + "members":{ + "ACL":{ + "shape":"ObjectCannedACL", + "location":"header", + "locationName":"x-amz-acl" + }, + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "CacheControl":{ + "shape":"CacheControl", + "location":"header", + "locationName":"Cache-Control" + }, + "ContentDisposition":{ + "shape":"ContentDisposition", + "location":"header", + "locationName":"Content-Disposition" + }, + "ContentEncoding":{ + "shape":"ContentEncoding", + "location":"header", + "locationName":"Content-Encoding" + }, + "ContentLanguage":{ + "shape":"ContentLanguage", + "location":"header", + "locationName":"Content-Language" + }, + "ContentType":{ + "shape":"ContentType", + "location":"header", + "locationName":"Content-Type" + }, + "CopySource":{ + "shape":"CopySource", + "location":"header", + "locationName":"x-amz-copy-source" + }, + "CopySourceIfMatch":{ + "shape":"CopySourceIfMatch", + "location":"header", + "locationName":"x-amz-copy-source-if-match" + }, + "CopySourceIfModifiedSince":{ + "shape":"CopySourceIfModifiedSince", + "location":"header", + "locationName":"x-amz-copy-source-if-modified-since" + }, + "CopySourceIfNoneMatch":{ + "shape":"CopySourceIfNoneMatch", + "location":"header", + "locationName":"x-amz-copy-source-if-none-match" + }, + "CopySourceIfUnmodifiedSince":{ + "shape":"CopySourceIfUnmodifiedSince", + "location":"header", + "locationName":"x-amz-copy-source-if-unmodified-since" + }, + "Expires":{ + "shape":"Expires", + "location":"header", + "locationName":"Expires" + }, + "GrantFullControl":{ + "shape":"GrantFullControl", + "location":"header", + "locationName":"x-amz-grant-full-control" + }, + "GrantRead":{ + "shape":"GrantRead", + "location":"header", + "locationName":"x-amz-grant-read" + }, + "GrantReadACP":{ + "shape":"GrantReadACP", + "location":"header", + "locationName":"x-amz-grant-read-acp" + }, + "GrantWriteACP":{ + "shape":"GrantWriteACP", + "location":"header", + "locationName":"x-amz-grant-write-acp" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "Metadata":{ + "shape":"Metadata", + "location":"headers", + "locationName":"x-amz-meta-" + }, + "MetadataDirective":{ + "shape":"MetadataDirective", + "location":"header", + "locationName":"x-amz-metadata-directive" + }, + "ServerSideEncryption":{ + "shape":"ServerSideEncryption", + "location":"header", + "locationName":"x-amz-server-side-encryption" + }, + "StorageClass":{ + "shape":"StorageClass", + "location":"header", + "locationName":"x-amz-storage-class" + }, + "WebsiteRedirectLocation":{ + "shape":"WebsiteRedirectLocation", + "location":"header", + "locationName":"x-amz-website-redirect-location" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKey":{ + "shape":"SSECustomerKey", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "SSEKMSKeyId":{ + "shape":"SSEKMSKeyId", + "location":"header", + "locationName":"x-amz-server-side-encryption-aws-kms-key-id" + }, + "CopySourceSSECustomerAlgorithm":{ + "shape":"CopySourceSSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-copy-source-server-side-encryption-customer-algorithm" + }, + "CopySourceSSECustomerKey":{ + "shape":"CopySourceSSECustomerKey", + "location":"header", + "locationName":"x-amz-copy-source-server-side-encryption-customer-key" + }, + "CopySourceSSECustomerKeyMD5":{ + "shape":"CopySourceSSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-copy-source-server-side-encryption-customer-key-MD5" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + } + }, + "CopyObjectResult":{ + "type":"structure", + "members":{ + "ETag":{"shape":"ETag"}, + "LastModified":{"shape":"LastModified"} + } + }, + "CopyPartResult":{ + "type":"structure", + "members":{ + "ETag":{"shape":"ETag"}, + "LastModified":{"shape":"LastModified"} + } + }, + "CopySource":{ + "type":"string", + "pattern":"\\/.+\\/.+" + }, + "CopySourceIfMatch":{"type":"string"}, + "CopySourceIfModifiedSince":{"type":"timestamp"}, + "CopySourceIfNoneMatch":{"type":"string"}, + "CopySourceIfUnmodifiedSince":{"type":"timestamp"}, + "CopySourceRange":{"type":"string"}, + "CopySourceSSECustomerAlgorithm":{"type":"string"}, + "CopySourceSSECustomerKey":{ + "type":"string", + "sensitive":true + }, + "CopySourceSSECustomerKeyMD5":{"type":"string"}, + "CopySourceVersionId":{"type":"string"}, + "CreateBucketConfiguration":{ + "type":"structure", + "members":{ + "LocationConstraint":{"shape":"BucketLocationConstraint"} + } + }, + "CreateBucketOutput":{ + "type":"structure", + "members":{ + "Location":{ + "shape":"Location", + "location":"header", + "locationName":"Location" + } + } + }, + "CreateBucketRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "ACL":{ + "shape":"BucketCannedACL", + "location":"header", + "locationName":"x-amz-acl" + }, + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "CreateBucketConfiguration":{ + "shape":"CreateBucketConfiguration", + "locationName":"CreateBucketConfiguration", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "GrantFullControl":{ + "shape":"GrantFullControl", + "location":"header", + "locationName":"x-amz-grant-full-control" + }, + "GrantRead":{ + "shape":"GrantRead", + "location":"header", + "locationName":"x-amz-grant-read" + }, + "GrantReadACP":{ + "shape":"GrantReadACP", + "location":"header", + "locationName":"x-amz-grant-read-acp" + }, + "GrantWrite":{ + "shape":"GrantWrite", + "location":"header", + "locationName":"x-amz-grant-write" + }, + "GrantWriteACP":{ + "shape":"GrantWriteACP", + "location":"header", + "locationName":"x-amz-grant-write-acp" + } + }, + "payload":"CreateBucketConfiguration" + }, + "CreateMultipartUploadOutput":{ + "type":"structure", + "members":{ + "Bucket":{ + "shape":"BucketName", + "locationName":"Bucket" + }, + "Key":{"shape":"ObjectKey"}, + "UploadId":{"shape":"MultipartUploadId"}, + "ServerSideEncryption":{ + "shape":"ServerSideEncryption", + "location":"header", + "locationName":"x-amz-server-side-encryption" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "SSEKMSKeyId":{ + "shape":"SSEKMSKeyId", + "location":"header", + "locationName":"x-amz-server-side-encryption-aws-kms-key-id" + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + } + } + }, + "CreateMultipartUploadRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "ACL":{ + "shape":"ObjectCannedACL", + "location":"header", + "locationName":"x-amz-acl" + }, + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "CacheControl":{ + "shape":"CacheControl", + "location":"header", + "locationName":"Cache-Control" + }, + "ContentDisposition":{ + "shape":"ContentDisposition", + "location":"header", + "locationName":"Content-Disposition" + }, + "ContentEncoding":{ + "shape":"ContentEncoding", + "location":"header", + "locationName":"Content-Encoding" + }, + "ContentLanguage":{ + "shape":"ContentLanguage", + "location":"header", + "locationName":"Content-Language" + }, + "ContentType":{ + "shape":"ContentType", + "location":"header", + "locationName":"Content-Type" + }, + "Expires":{ + "shape":"Expires", + "location":"header", + "locationName":"Expires" + }, + "GrantFullControl":{ + "shape":"GrantFullControl", + "location":"header", + "locationName":"x-amz-grant-full-control" + }, + "GrantRead":{ + "shape":"GrantRead", + "location":"header", + "locationName":"x-amz-grant-read" + }, + "GrantReadACP":{ + "shape":"GrantReadACP", + "location":"header", + "locationName":"x-amz-grant-read-acp" + }, + "GrantWriteACP":{ + "shape":"GrantWriteACP", + "location":"header", + "locationName":"x-amz-grant-write-acp" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "Metadata":{ + "shape":"Metadata", + "location":"headers", + "locationName":"x-amz-meta-" + }, + "ServerSideEncryption":{ + "shape":"ServerSideEncryption", + "location":"header", + "locationName":"x-amz-server-side-encryption" + }, + "StorageClass":{ + "shape":"StorageClass", + "location":"header", + "locationName":"x-amz-storage-class" + }, + "WebsiteRedirectLocation":{ + "shape":"WebsiteRedirectLocation", + "location":"header", + "locationName":"x-amz-website-redirect-location" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKey":{ + "shape":"SSECustomerKey", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "SSEKMSKeyId":{ + "shape":"SSEKMSKeyId", + "location":"header", + "locationName":"x-amz-server-side-encryption-aws-kms-key-id" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + } + }, + "CreationDate":{"type":"timestamp"}, + "Date":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "Days":{"type":"integer"}, + "Delete":{ + "type":"structure", + "required":["Objects"], + "members":{ + "Objects":{ + "shape":"ObjectIdentifierList", + "locationName":"Object" + }, + "Quiet":{"shape":"Quiet"} + } + }, + "DeleteBucketCorsRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "DeleteBucketLifecycleRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "DeleteBucketPolicyRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "DeleteBucketReplicationRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "DeleteBucketRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "DeleteBucketTaggingRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "DeleteBucketWebsiteRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "DeleteMarker":{"type":"boolean"}, + "DeleteMarkerEntry":{ + "type":"structure", + "members":{ + "Owner":{"shape":"Owner"}, + "Key":{"shape":"ObjectKey"}, + "VersionId":{"shape":"ObjectVersionId"}, + "IsLatest":{"shape":"IsLatest"}, + "LastModified":{"shape":"LastModified"} + } + }, + "DeleteMarkerVersionId":{"type":"string"}, + "DeleteMarkers":{ + "type":"list", + "member":{"shape":"DeleteMarkerEntry"}, + "flattened":true + }, + "DeleteObjectOutput":{ + "type":"structure", + "members":{ + "DeleteMarker":{ + "shape":"DeleteMarker", + "location":"header", + "locationName":"x-amz-delete-marker" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "location":"header", + "locationName":"x-amz-version-id" + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + } + } + }, + "DeleteObjectRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "MFA":{ + "shape":"MFA", + "location":"header", + "locationName":"x-amz-mfa" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "location":"querystring", + "locationName":"versionId" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + } + }, + "DeleteObjectsOutput":{ + "type":"structure", + "members":{ + "Deleted":{"shape":"DeletedObjects"}, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + }, + "Errors":{ + "shape":"Errors", + "locationName":"Error" + } + } + }, + "DeleteObjectsRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Delete" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Delete":{ + "shape":"Delete", + "locationName":"Delete", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "MFA":{ + "shape":"MFA", + "location":"header", + "locationName":"x-amz-mfa" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + }, + "payload":"Delete" + }, + "DeletedObject":{ + "type":"structure", + "members":{ + "Key":{"shape":"ObjectKey"}, + "VersionId":{"shape":"ObjectVersionId"}, + "DeleteMarker":{"shape":"DeleteMarker"}, + "DeleteMarkerVersionId":{"shape":"DeleteMarkerVersionId"} + } + }, + "DeletedObjects":{ + "type":"list", + "member":{"shape":"DeletedObject"}, + "flattened":true + }, + "Delimiter":{"type":"string"}, + "Destination":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{"shape":"BucketName"}, + "StorageClass":{"shape":"StorageClass"} + } + }, + "DisplayName":{"type":"string"}, + "ETag":{"type":"string"}, + "EmailAddress":{"type":"string"}, + "EncodingType":{ + "type":"string", + "enum":["url"] + }, + "Error":{ + "type":"structure", + "members":{ + "Key":{"shape":"ObjectKey"}, + "VersionId":{"shape":"ObjectVersionId"}, + "Code":{"shape":"Code"}, + "Message":{"shape":"Message"} + } + }, + "ErrorDocument":{ + "type":"structure", + "required":["Key"], + "members":{ + "Key":{"shape":"ObjectKey"} + } + }, + "Errors":{ + "type":"list", + "member":{"shape":"Error"}, + "flattened":true + }, + "Event":{ + "type":"string", + "enum":[ + "s3:ReducedRedundancyLostObject", + "s3:ObjectCreated:*", + "s3:ObjectCreated:Put", + "s3:ObjectCreated:Post", + "s3:ObjectCreated:Copy", + "s3:ObjectCreated:CompleteMultipartUpload", + "s3:ObjectRemoved:*", + "s3:ObjectRemoved:Delete", + "s3:ObjectRemoved:DeleteMarkerCreated" + ] + }, + "EventList":{ + "type":"list", + "member":{"shape":"Event"}, + "flattened":true + }, + "Expiration":{"type":"string"}, + "ExpirationStatus":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, + "Expires":{"type":"timestamp"}, + "ExposeHeader":{"type":"string"}, + "ExposeHeaders":{ + "type":"list", + "member":{"shape":"ExposeHeader"}, + "flattened":true + }, + "FilterRule":{ + "type":"structure", + "members":{ + "Name":{"shape":"FilterRuleName"}, + "Value":{"shape":"FilterRuleValue"} + } + }, + "FilterRuleList":{ + "type":"list", + "member":{"shape":"FilterRule"}, + "flattened":true + }, + "FilterRuleName":{ + "type":"string", + "enum":[ + "prefix", + "suffix" + ] + }, + "FilterRuleValue":{"type":"string"}, + "GetBucketAclOutput":{ + "type":"structure", + "members":{ + "Owner":{"shape":"Owner"}, + "Grants":{ + "shape":"Grants", + "locationName":"AccessControlList" + } + } + }, + "GetBucketAclRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "GetBucketCorsOutput":{ + "type":"structure", + "members":{ + "CORSRules":{ + "shape":"CORSRules", + "locationName":"CORSRule" + } + } + }, + "GetBucketCorsRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "GetBucketLifecycleConfigurationOutput":{ + "type":"structure", + "members":{ + "Rules":{ + "shape":"LifecycleRules", + "locationName":"Rule" + } + } + }, + "GetBucketLifecycleConfigurationRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "GetBucketLifecycleOutput":{ + "type":"structure", + "members":{ + "Rules":{ + "shape":"Rules", + "locationName":"Rule" + } + } + }, + "GetBucketLifecycleRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "GetBucketLocationOutput":{ + "type":"structure", + "members":{ + "LocationConstraint":{"shape":"BucketLocationConstraint"} + } + }, + "GetBucketLocationRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "GetBucketLoggingOutput":{ + "type":"structure", + "members":{ + "LoggingEnabled":{"shape":"LoggingEnabled"} + } + }, + "GetBucketLoggingRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "GetBucketNotificationConfigurationRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "GetBucketPolicyOutput":{ + "type":"structure", + "members":{ + "Policy":{"shape":"Policy"} + }, + "payload":"Policy" + }, + "GetBucketPolicyRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "GetBucketReplicationOutput":{ + "type":"structure", + "members":{ + "ReplicationConfiguration":{"shape":"ReplicationConfiguration"} + }, + "payload":"ReplicationConfiguration" + }, + "GetBucketReplicationRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "GetBucketRequestPaymentOutput":{ + "type":"structure", + "members":{ + "Payer":{"shape":"Payer"} + } + }, + "GetBucketRequestPaymentRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "GetBucketTaggingOutput":{ + "type":"structure", + "required":["TagSet"], + "members":{ + "TagSet":{"shape":"TagSet"} + } + }, + "GetBucketTaggingRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "GetBucketVersioningOutput":{ + "type":"structure", + "members":{ + "Status":{"shape":"BucketVersioningStatus"}, + "MFADelete":{ + "shape":"MFADeleteStatus", + "locationName":"MfaDelete" + } + } + }, + "GetBucketVersioningRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "GetBucketWebsiteOutput":{ + "type":"structure", + "members":{ + "RedirectAllRequestsTo":{"shape":"RedirectAllRequestsTo"}, + "IndexDocument":{"shape":"IndexDocument"}, + "ErrorDocument":{"shape":"ErrorDocument"}, + "RoutingRules":{"shape":"RoutingRules"} + } + }, + "GetBucketWebsiteRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "GetObjectAclOutput":{ + "type":"structure", + "members":{ + "Owner":{"shape":"Owner"}, + "Grants":{ + "shape":"Grants", + "locationName":"AccessControlList" + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + } + } + }, + "GetObjectAclRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "location":"querystring", + "locationName":"versionId" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + } + }, + "GetObjectOutput":{ + "type":"structure", + "members":{ + "Body":{ + "shape":"Body", + "streaming":true + }, + "DeleteMarker":{ + "shape":"DeleteMarker", + "location":"header", + "locationName":"x-amz-delete-marker" + }, + "AcceptRanges":{ + "shape":"AcceptRanges", + "location":"header", + "locationName":"accept-ranges" + }, + "Expiration":{ + "shape":"Expiration", + "location":"header", + "locationName":"x-amz-expiration" + }, + "Restore":{ + "shape":"Restore", + "location":"header", + "locationName":"x-amz-restore" + }, + "LastModified":{ + "shape":"LastModified", + "location":"header", + "locationName":"Last-Modified" + }, + "ContentLength":{ + "shape":"ContentLength", + "location":"header", + "locationName":"Content-Length" + }, + "ETag":{ + "shape":"ETag", + "location":"header", + "locationName":"ETag" + }, + "MissingMeta":{ + "shape":"MissingMeta", + "location":"header", + "locationName":"x-amz-missing-meta" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "location":"header", + "locationName":"x-amz-version-id" + }, + "CacheControl":{ + "shape":"CacheControl", + "location":"header", + "locationName":"Cache-Control" + }, + "ContentDisposition":{ + "shape":"ContentDisposition", + "location":"header", + "locationName":"Content-Disposition" + }, + "ContentEncoding":{ + "shape":"ContentEncoding", + "location":"header", + "locationName":"Content-Encoding" + }, + "ContentLanguage":{ + "shape":"ContentLanguage", + "location":"header", + "locationName":"Content-Language" + }, + "ContentRange":{ + "shape":"ContentRange", + "location":"header", + "locationName":"Content-Range" + }, + "ContentType":{ + "shape":"ContentType", + "location":"header", + "locationName":"Content-Type" + }, + "Expires":{ + "shape":"Expires", + "location":"header", + "locationName":"Expires" + }, + "WebsiteRedirectLocation":{ + "shape":"WebsiteRedirectLocation", + "location":"header", + "locationName":"x-amz-website-redirect-location" + }, + "ServerSideEncryption":{ + "shape":"ServerSideEncryption", + "location":"header", + "locationName":"x-amz-server-side-encryption" + }, + "Metadata":{ + "shape":"Metadata", + "location":"headers", + "locationName":"x-amz-meta-" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "SSEKMSKeyId":{ + "shape":"SSEKMSKeyId", + "location":"header", + "locationName":"x-amz-server-side-encryption-aws-kms-key-id" + }, + "StorageClass":{ + "shape":"StorageClass", + "location":"header", + "locationName":"x-amz-storage-class" + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + }, + "ReplicationStatus":{ + "shape":"ReplicationStatus", + "location":"header", + "locationName":"x-amz-replication-status" + } + }, + "payload":"Body" + }, + "GetObjectRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "IfMatch":{ + "shape":"IfMatch", + "location":"header", + "locationName":"If-Match" + }, + "IfModifiedSince":{ + "shape":"IfModifiedSince", + "location":"header", + "locationName":"If-Modified-Since" + }, + "IfNoneMatch":{ + "shape":"IfNoneMatch", + "location":"header", + "locationName":"If-None-Match" + }, + "IfUnmodifiedSince":{ + "shape":"IfUnmodifiedSince", + "location":"header", + "locationName":"If-Unmodified-Since" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "Range":{ + "shape":"Range", + "location":"header", + "locationName":"Range" + }, + "ResponseCacheControl":{ + "shape":"ResponseCacheControl", + "location":"querystring", + "locationName":"response-cache-control" + }, + "ResponseContentDisposition":{ + "shape":"ResponseContentDisposition", + "location":"querystring", + "locationName":"response-content-disposition" + }, + "ResponseContentEncoding":{ + "shape":"ResponseContentEncoding", + "location":"querystring", + "locationName":"response-content-encoding" + }, + "ResponseContentLanguage":{ + "shape":"ResponseContentLanguage", + "location":"querystring", + "locationName":"response-content-language" + }, + "ResponseContentType":{ + "shape":"ResponseContentType", + "location":"querystring", + "locationName":"response-content-type" + }, + "ResponseExpires":{ + "shape":"ResponseExpires", + "location":"querystring", + "locationName":"response-expires" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "location":"querystring", + "locationName":"versionId" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKey":{ + "shape":"SSECustomerKey", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + } + }, + "GetObjectTorrentOutput":{ + "type":"structure", + "members":{ + "Body":{ + "shape":"Body", + "streaming":true + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + } + }, + "payload":"Body" + }, + "GetObjectTorrentRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + } + }, + "Grant":{ + "type":"structure", + "members":{ + "Grantee":{"shape":"Grantee"}, + "Permission":{"shape":"Permission"} + } + }, + "GrantFullControl":{"type":"string"}, + "GrantRead":{"type":"string"}, + "GrantReadACP":{"type":"string"}, + "GrantWrite":{"type":"string"}, + "GrantWriteACP":{"type":"string"}, + "Grantee":{ + "type":"structure", + "required":["Type"], + "members":{ + "DisplayName":{"shape":"DisplayName"}, + "EmailAddress":{"shape":"EmailAddress"}, + "ID":{"shape":"ID"}, + "Type":{ + "shape":"Type", + "locationName":"xsi:type", + "xmlAttribute":true + }, + "URI":{"shape":"URI"} + }, + "xmlNamespace":{ + "prefix":"xsi", + "uri":"http://www.w3.org/2001/XMLSchema-instance" + } + }, + "Grants":{ + "type":"list", + "member":{ + "shape":"Grant", + "locationName":"Grant" + } + }, + "HeadBucketRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + } + } + }, + "HeadObjectOutput":{ + "type":"structure", + "members":{ + "DeleteMarker":{ + "shape":"DeleteMarker", + "location":"header", + "locationName":"x-amz-delete-marker" + }, + "AcceptRanges":{ + "shape":"AcceptRanges", + "location":"header", + "locationName":"accept-ranges" + }, + "Expiration":{ + "shape":"Expiration", + "location":"header", + "locationName":"x-amz-expiration" + }, + "Restore":{ + "shape":"Restore", + "location":"header", + "locationName":"x-amz-restore" + }, + "LastModified":{ + "shape":"LastModified", + "location":"header", + "locationName":"Last-Modified" + }, + "ContentLength":{ + "shape":"ContentLength", + "location":"header", + "locationName":"Content-Length" + }, + "ETag":{ + "shape":"ETag", + "location":"header", + "locationName":"ETag" + }, + "MissingMeta":{ + "shape":"MissingMeta", + "location":"header", + "locationName":"x-amz-missing-meta" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "location":"header", + "locationName":"x-amz-version-id" + }, + "CacheControl":{ + "shape":"CacheControl", + "location":"header", + "locationName":"Cache-Control" + }, + "ContentDisposition":{ + "shape":"ContentDisposition", + "location":"header", + "locationName":"Content-Disposition" + }, + "ContentEncoding":{ + "shape":"ContentEncoding", + "location":"header", + "locationName":"Content-Encoding" + }, + "ContentLanguage":{ + "shape":"ContentLanguage", + "location":"header", + "locationName":"Content-Language" + }, + "ContentType":{ + "shape":"ContentType", + "location":"header", + "locationName":"Content-Type" + }, + "Expires":{ + "shape":"Expires", + "location":"header", + "locationName":"Expires" + }, + "WebsiteRedirectLocation":{ + "shape":"WebsiteRedirectLocation", + "location":"header", + "locationName":"x-amz-website-redirect-location" + }, + "ServerSideEncryption":{ + "shape":"ServerSideEncryption", + "location":"header", + "locationName":"x-amz-server-side-encryption" + }, + "Metadata":{ + "shape":"Metadata", + "location":"headers", + "locationName":"x-amz-meta-" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "SSEKMSKeyId":{ + "shape":"SSEKMSKeyId", + "location":"header", + "locationName":"x-amz-server-side-encryption-aws-kms-key-id" + }, + "StorageClass":{ + "shape":"StorageClass", + "location":"header", + "locationName":"x-amz-storage-class" + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + }, + "ReplicationStatus":{ + "shape":"ReplicationStatus", + "location":"header", + "locationName":"x-amz-replication-status" + } + } + }, + "HeadObjectRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "IfMatch":{ + "shape":"IfMatch", + "location":"header", + "locationName":"If-Match" + }, + "IfModifiedSince":{ + "shape":"IfModifiedSince", + "location":"header", + "locationName":"If-Modified-Since" + }, + "IfNoneMatch":{ + "shape":"IfNoneMatch", + "location":"header", + "locationName":"If-None-Match" + }, + "IfUnmodifiedSince":{ + "shape":"IfUnmodifiedSince", + "location":"header", + "locationName":"If-Unmodified-Since" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "Range":{ + "shape":"Range", + "location":"header", + "locationName":"Range" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "location":"querystring", + "locationName":"versionId" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKey":{ + "shape":"SSECustomerKey", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + } + }, + "HostName":{"type":"string"}, + "HttpErrorCodeReturnedEquals":{"type":"string"}, + "HttpRedirectCode":{"type":"string"}, + "ID":{"type":"string"}, + "IfMatch":{"type":"string"}, + "IfModifiedSince":{"type":"timestamp"}, + "IfNoneMatch":{"type":"string"}, + "IfUnmodifiedSince":{"type":"timestamp"}, + "IndexDocument":{ + "type":"structure", + "required":["Suffix"], + "members":{ + "Suffix":{"shape":"Suffix"} + } + }, + "Initiated":{"type":"timestamp"}, + "Initiator":{ + "type":"structure", + "members":{ + "ID":{"shape":"ID"}, + "DisplayName":{"shape":"DisplayName"} + } + }, + "IsLatest":{"type":"boolean"}, + "IsTruncated":{"type":"boolean"}, + "KeyMarker":{"type":"string"}, + "KeyPrefixEquals":{"type":"string"}, + "LambdaFunctionArn":{"type":"string"}, + "LambdaFunctionConfiguration":{ + "type":"structure", + "required":[ + "LambdaFunctionArn", + "Events" + ], + "members":{ + "Id":{"shape":"NotificationId"}, + "LambdaFunctionArn":{ + "shape":"LambdaFunctionArn", + "locationName":"CloudFunction" + }, + "Events":{ + "shape":"EventList", + "locationName":"Event" + }, + "Filter":{"shape":"NotificationConfigurationFilter"} + } + }, + "LambdaFunctionConfigurationList":{ + "type":"list", + "member":{"shape":"LambdaFunctionConfiguration"}, + "flattened":true + }, + "LastModified":{"type":"timestamp"}, + "LifecycleConfiguration":{ + "type":"structure", + "required":["Rules"], + "members":{ + "Rules":{ + "shape":"Rules", + "locationName":"Rule" + } + } + }, + "LifecycleExpiration":{ + "type":"structure", + "members":{ + "Date":{"shape":"Date"}, + "Days":{"shape":"Days"} + } + }, + "LifecycleRule":{ + "type":"structure", + "required":[ + "Prefix", + "Status" + ], + "members":{ + "Expiration":{"shape":"LifecycleExpiration"}, + "ID":{"shape":"ID"}, + "Prefix":{"shape":"Prefix"}, + "Status":{"shape":"ExpirationStatus"}, + "Transitions":{ + "shape":"TransitionList", + "locationName":"Transition" + }, + "NoncurrentVersionTransitions":{ + "shape":"NoncurrentVersionTransitionList", + "locationName":"NoncurrentVersionTransition" + }, + "NoncurrentVersionExpiration":{"shape":"NoncurrentVersionExpiration"} + } + }, + "LifecycleRules":{ + "type":"list", + "member":{"shape":"LifecycleRule"}, + "flattened":true + }, + "ListBucketsOutput":{ + "type":"structure", + "members":{ + "Buckets":{"shape":"Buckets"}, + "Owner":{"shape":"Owner"} + } + }, + "ListMultipartUploadsOutput":{ + "type":"structure", + "members":{ + "Bucket":{"shape":"BucketName"}, + "KeyMarker":{"shape":"KeyMarker"}, + "UploadIdMarker":{"shape":"UploadIdMarker"}, + "NextKeyMarker":{"shape":"NextKeyMarker"}, + "Prefix":{"shape":"Prefix"}, + "Delimiter":{"shape":"Delimiter"}, + "NextUploadIdMarker":{"shape":"NextUploadIdMarker"}, + "MaxUploads":{"shape":"MaxUploads"}, + "IsTruncated":{"shape":"IsTruncated"}, + "Uploads":{ + "shape":"MultipartUploadList", + "locationName":"Upload" + }, + "CommonPrefixes":{"shape":"CommonPrefixList"}, + "EncodingType":{"shape":"EncodingType"} + } + }, + "ListMultipartUploadsRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Delimiter":{ + "shape":"Delimiter", + "location":"querystring", + "locationName":"delimiter" + }, + "EncodingType":{ + "shape":"EncodingType", + "location":"querystring", + "locationName":"encoding-type" + }, + "KeyMarker":{ + "shape":"KeyMarker", + "location":"querystring", + "locationName":"key-marker" + }, + "MaxUploads":{ + "shape":"MaxUploads", + "location":"querystring", + "locationName":"max-uploads" + }, + "Prefix":{ + "shape":"Prefix", + "location":"querystring", + "locationName":"prefix" + }, + "UploadIdMarker":{ + "shape":"UploadIdMarker", + "location":"querystring", + "locationName":"upload-id-marker" + } + } + }, + "ListObjectVersionsOutput":{ + "type":"structure", + "members":{ + "IsTruncated":{"shape":"IsTruncated"}, + "KeyMarker":{"shape":"KeyMarker"}, + "VersionIdMarker":{"shape":"VersionIdMarker"}, + "NextKeyMarker":{"shape":"NextKeyMarker"}, + "NextVersionIdMarker":{"shape":"NextVersionIdMarker"}, + "Versions":{ + "shape":"ObjectVersionList", + "locationName":"Version" + }, + "DeleteMarkers":{ + "shape":"DeleteMarkers", + "locationName":"DeleteMarker" + }, + "Name":{"shape":"BucketName"}, + "Prefix":{"shape":"Prefix"}, + "Delimiter":{"shape":"Delimiter"}, + "MaxKeys":{"shape":"MaxKeys"}, + "CommonPrefixes":{"shape":"CommonPrefixList"}, + "EncodingType":{"shape":"EncodingType"} + } + }, + "ListObjectVersionsRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Delimiter":{ + "shape":"Delimiter", + "location":"querystring", + "locationName":"delimiter" + }, + "EncodingType":{ + "shape":"EncodingType", + "location":"querystring", + "locationName":"encoding-type" + }, + "KeyMarker":{ + "shape":"KeyMarker", + "location":"querystring", + "locationName":"key-marker" + }, + "MaxKeys":{ + "shape":"MaxKeys", + "location":"querystring", + "locationName":"max-keys" + }, + "Prefix":{ + "shape":"Prefix", + "location":"querystring", + "locationName":"prefix" + }, + "VersionIdMarker":{ + "shape":"VersionIdMarker", + "location":"querystring", + "locationName":"version-id-marker" + } + } + }, + "ListObjectsOutput":{ + "type":"structure", + "members":{ + "IsTruncated":{"shape":"IsTruncated"}, + "Marker":{"shape":"Marker"}, + "NextMarker":{"shape":"NextMarker"}, + "Contents":{"shape":"ObjectList"}, + "Name":{"shape":"BucketName"}, + "Prefix":{"shape":"Prefix"}, + "Delimiter":{"shape":"Delimiter"}, + "MaxKeys":{"shape":"MaxKeys"}, + "CommonPrefixes":{"shape":"CommonPrefixList"}, + "EncodingType":{"shape":"EncodingType"} + } + }, + "ListObjectsRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Delimiter":{ + "shape":"Delimiter", + "location":"querystring", + "locationName":"delimiter" + }, + "EncodingType":{ + "shape":"EncodingType", + "location":"querystring", + "locationName":"encoding-type" + }, + "Marker":{ + "shape":"Marker", + "location":"querystring", + "locationName":"marker" + }, + "MaxKeys":{ + "shape":"MaxKeys", + "location":"querystring", + "locationName":"max-keys" + }, + "Prefix":{ + "shape":"Prefix", + "location":"querystring", + "locationName":"prefix" + } + } + }, + "ListPartsOutput":{ + "type":"structure", + "members":{ + "Bucket":{"shape":"BucketName"}, + "Key":{"shape":"ObjectKey"}, + "UploadId":{"shape":"MultipartUploadId"}, + "PartNumberMarker":{"shape":"PartNumberMarker"}, + "NextPartNumberMarker":{"shape":"NextPartNumberMarker"}, + "MaxParts":{"shape":"MaxParts"}, + "IsTruncated":{"shape":"IsTruncated"}, + "Parts":{ + "shape":"Parts", + "locationName":"Part" + }, + "Initiator":{"shape":"Initiator"}, + "Owner":{"shape":"Owner"}, + "StorageClass":{"shape":"StorageClass"}, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + } + } + }, + "ListPartsRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key", + "UploadId" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "MaxParts":{ + "shape":"MaxParts", + "location":"querystring", + "locationName":"max-parts" + }, + "PartNumberMarker":{ + "shape":"PartNumberMarker", + "location":"querystring", + "locationName":"part-number-marker" + }, + "UploadId":{ + "shape":"MultipartUploadId", + "location":"querystring", + "locationName":"uploadId" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + } + }, + "Location":{"type":"string"}, + "LoggingEnabled":{ + "type":"structure", + "members":{ + "TargetBucket":{"shape":"TargetBucket"}, + "TargetGrants":{"shape":"TargetGrants"}, + "TargetPrefix":{"shape":"TargetPrefix"} + } + }, + "MFA":{"type":"string"}, + "MFADelete":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, + "MFADeleteStatus":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, + "Marker":{"type":"string"}, + "MaxAgeSeconds":{"type":"integer"}, + "MaxKeys":{"type":"integer"}, + "MaxParts":{"type":"integer"}, + "MaxUploads":{"type":"integer"}, + "Message":{"type":"string"}, + "Metadata":{ + "type":"map", + "key":{"shape":"MetadataKey"}, + "value":{"shape":"MetadataValue"} + }, + "MetadataDirective":{ + "type":"string", + "enum":[ + "COPY", + "REPLACE" + ] + }, + "MetadataKey":{"type":"string"}, + "MetadataValue":{"type":"string"}, + "MissingMeta":{"type":"integer"}, + "MultipartUpload":{ + "type":"structure", + "members":{ + "UploadId":{"shape":"MultipartUploadId"}, + "Key":{"shape":"ObjectKey"}, + "Initiated":{"shape":"Initiated"}, + "StorageClass":{"shape":"StorageClass"}, + "Owner":{"shape":"Owner"}, + "Initiator":{"shape":"Initiator"} + } + }, + "MultipartUploadId":{"type":"string"}, + "MultipartUploadList":{ + "type":"list", + "member":{"shape":"MultipartUpload"}, + "flattened":true + }, + "NextKeyMarker":{"type":"string"}, + "NextMarker":{"type":"string"}, + "NextPartNumberMarker":{"type":"integer"}, + "NextUploadIdMarker":{"type":"string"}, + "NextVersionIdMarker":{"type":"string"}, + "NoSuchBucket":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "NoSuchKey":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "NoSuchUpload":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "NoncurrentVersionExpiration":{ + "type":"structure", + "members":{ + "NoncurrentDays":{"shape":"Days"} + } + }, + "NoncurrentVersionTransition":{ + "type":"structure", + "members":{ + "NoncurrentDays":{"shape":"Days"}, + "StorageClass":{"shape":"TransitionStorageClass"} + } + }, + "NoncurrentVersionTransitionList":{ + "type":"list", + "member":{"shape":"NoncurrentVersionTransition"}, + "flattened":true + }, + "NotificationConfiguration":{ + "type":"structure", + "members":{ + "TopicConfigurations":{ + "shape":"TopicConfigurationList", + "locationName":"TopicConfiguration" + }, + "QueueConfigurations":{ + "shape":"QueueConfigurationList", + "locationName":"QueueConfiguration" + }, + "LambdaFunctionConfigurations":{ + "shape":"LambdaFunctionConfigurationList", + "locationName":"CloudFunctionConfiguration" + } + } + }, + "NotificationConfigurationDeprecated":{ + "type":"structure", + "members":{ + "TopicConfiguration":{"shape":"TopicConfigurationDeprecated"}, + "QueueConfiguration":{"shape":"QueueConfigurationDeprecated"}, + "CloudFunctionConfiguration":{"shape":"CloudFunctionConfiguration"} + } + }, + "NotificationConfigurationFilter":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"S3KeyFilter", + "locationName":"S3Key" + } + } + }, + "NotificationId":{"type":"string"}, + "Object":{ + "type":"structure", + "members":{ + "Key":{"shape":"ObjectKey"}, + "LastModified":{"shape":"LastModified"}, + "ETag":{"shape":"ETag"}, + "Size":{"shape":"Size"}, + "StorageClass":{"shape":"ObjectStorageClass"}, + "Owner":{"shape":"Owner"} + } + }, + "ObjectAlreadyInActiveTierError":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ObjectCannedACL":{ + "type":"string", + "enum":[ + "private", + "public-read", + "public-read-write", + "authenticated-read", + "aws-exec-read", + "bucket-owner-read", + "bucket-owner-full-control" + ] + }, + "ObjectIdentifier":{ + "type":"structure", + "required":["Key"], + "members":{ + "Key":{"shape":"ObjectKey"}, + "VersionId":{"shape":"ObjectVersionId"} + } + }, + "ObjectIdentifierList":{ + "type":"list", + "member":{"shape":"ObjectIdentifier"}, + "flattened":true + }, + "ObjectKey":{ + "type":"string", + "min":1 + }, + "ObjectList":{ + "type":"list", + "member":{"shape":"Object"}, + "flattened":true + }, + "ObjectNotInActiveTierError":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ObjectStorageClass":{ + "type":"string", + "enum":[ + "STANDARD", + "REDUCED_REDUNDANCY", + "GLACIER" + ] + }, + "ObjectVersion":{ + "type":"structure", + "members":{ + "ETag":{"shape":"ETag"}, + "Size":{"shape":"Size"}, + "StorageClass":{"shape":"ObjectVersionStorageClass"}, + "Key":{"shape":"ObjectKey"}, + "VersionId":{"shape":"ObjectVersionId"}, + "IsLatest":{"shape":"IsLatest"}, + "LastModified":{"shape":"LastModified"}, + "Owner":{"shape":"Owner"} + } + }, + "ObjectVersionId":{"type":"string"}, + "ObjectVersionList":{ + "type":"list", + "member":{"shape":"ObjectVersion"}, + "flattened":true + }, + "ObjectVersionStorageClass":{ + "type":"string", + "enum":["STANDARD"] + }, + "Owner":{ + "type":"structure", + "members":{ + "DisplayName":{"shape":"DisplayName"}, + "ID":{"shape":"ID"} + } + }, + "Part":{ + "type":"structure", + "members":{ + "PartNumber":{"shape":"PartNumber"}, + "LastModified":{"shape":"LastModified"}, + "ETag":{"shape":"ETag"}, + "Size":{"shape":"Size"} + } + }, + "PartNumber":{"type":"integer"}, + "PartNumberMarker":{"type":"integer"}, + "Parts":{ + "type":"list", + "member":{"shape":"Part"}, + "flattened":true + }, + "Payer":{ + "type":"string", + "enum":[ + "Requester", + "BucketOwner" + ] + }, + "Permission":{ + "type":"string", + "enum":[ + "FULL_CONTROL", + "WRITE", + "WRITE_ACP", + "READ", + "READ_ACP" + ] + }, + "Policy":{"type":"string"}, + "Prefix":{"type":"string"}, + "Protocol":{ + "type":"string", + "enum":[ + "http", + "https" + ] + }, + "PutBucketAclRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "ACL":{ + "shape":"BucketCannedACL", + "location":"header", + "locationName":"x-amz-acl" + }, + "AccessControlPolicy":{ + "shape":"AccessControlPolicy", + "locationName":"AccessControlPolicy", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "ContentMD5":{ + "shape":"ContentMD5", + "location":"header", + "locationName":"Content-MD5" + }, + "GrantFullControl":{ + "shape":"GrantFullControl", + "location":"header", + "locationName":"x-amz-grant-full-control" + }, + "GrantRead":{ + "shape":"GrantRead", + "location":"header", + "locationName":"x-amz-grant-read" + }, + "GrantReadACP":{ + "shape":"GrantReadACP", + "location":"header", + "locationName":"x-amz-grant-read-acp" + }, + "GrantWrite":{ + "shape":"GrantWrite", + "location":"header", + "locationName":"x-amz-grant-write" + }, + "GrantWriteACP":{ + "shape":"GrantWriteACP", + "location":"header", + "locationName":"x-amz-grant-write-acp" + } + }, + "payload":"AccessControlPolicy" + }, + "PutBucketCorsRequest":{ + "type":"structure", + "required":[ + "Bucket", + "CORSConfiguration" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "CORSConfiguration":{ + "shape":"CORSConfiguration", + "locationName":"CORSConfiguration", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "ContentMD5":{ + "shape":"ContentMD5", + "location":"header", + "locationName":"Content-MD5" + } + }, + "payload":"CORSConfiguration" + }, + "PutBucketLifecycleConfigurationRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "LifecycleConfiguration":{ + "shape":"BucketLifecycleConfiguration", + "locationName":"LifecycleConfiguration", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + } + }, + "payload":"LifecycleConfiguration" + }, + "PutBucketLifecycleRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "ContentMD5":{ + "shape":"ContentMD5", + "location":"header", + "locationName":"Content-MD5" + }, + "LifecycleConfiguration":{ + "shape":"LifecycleConfiguration", + "locationName":"LifecycleConfiguration", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + } + }, + "payload":"LifecycleConfiguration" + }, + "PutBucketLoggingRequest":{ + "type":"structure", + "required":[ + "Bucket", + "BucketLoggingStatus" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "BucketLoggingStatus":{ + "shape":"BucketLoggingStatus", + "locationName":"BucketLoggingStatus", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "ContentMD5":{ + "shape":"ContentMD5", + "location":"header", + "locationName":"Content-MD5" + } + }, + "payload":"BucketLoggingStatus" + }, + "PutBucketNotificationConfigurationRequest":{ + "type":"structure", + "required":[ + "Bucket", + "NotificationConfiguration" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "NotificationConfiguration":{ + "shape":"NotificationConfiguration", + "locationName":"NotificationConfiguration", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + } + }, + "payload":"NotificationConfiguration" + }, + "PutBucketNotificationRequest":{ + "type":"structure", + "required":[ + "Bucket", + "NotificationConfiguration" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "ContentMD5":{ + "shape":"ContentMD5", + "location":"header", + "locationName":"Content-MD5" + }, + "NotificationConfiguration":{ + "shape":"NotificationConfigurationDeprecated", + "locationName":"NotificationConfiguration", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + } + }, + "payload":"NotificationConfiguration" + }, + "PutBucketPolicyRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Policy" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "ContentMD5":{ + "shape":"ContentMD5", + "location":"header", + "locationName":"Content-MD5" + }, + "Policy":{"shape":"Policy"} + }, + "payload":"Policy" + }, + "PutBucketReplicationRequest":{ + "type":"structure", + "required":[ + "Bucket", + "ReplicationConfiguration" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "ContentMD5":{ + "shape":"ContentMD5", + "location":"header", + "locationName":"Content-MD5" + }, + "ReplicationConfiguration":{ + "shape":"ReplicationConfiguration", + "locationName":"ReplicationConfiguration", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + } + }, + "payload":"ReplicationConfiguration" + }, + "PutBucketRequestPaymentRequest":{ + "type":"structure", + "required":[ + "Bucket", + "RequestPaymentConfiguration" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "ContentMD5":{ + "shape":"ContentMD5", + "location":"header", + "locationName":"Content-MD5" + }, + "RequestPaymentConfiguration":{ + "shape":"RequestPaymentConfiguration", + "locationName":"RequestPaymentConfiguration", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + } + }, + "payload":"RequestPaymentConfiguration" + }, + "PutBucketTaggingRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Tagging" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "ContentMD5":{ + "shape":"ContentMD5", + "location":"header", + "locationName":"Content-MD5" + }, + "Tagging":{ + "shape":"Tagging", + "locationName":"Tagging", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + } + }, + "payload":"Tagging" + }, + "PutBucketVersioningRequest":{ + "type":"structure", + "required":[ + "Bucket", + "VersioningConfiguration" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "ContentMD5":{ + "shape":"ContentMD5", + "location":"header", + "locationName":"Content-MD5" + }, + "MFA":{ + "shape":"MFA", + "location":"header", + "locationName":"x-amz-mfa" + }, + "VersioningConfiguration":{ + "shape":"VersioningConfiguration", + "locationName":"VersioningConfiguration", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + } + }, + "payload":"VersioningConfiguration" + }, + "PutBucketWebsiteRequest":{ + "type":"structure", + "required":[ + "Bucket", + "WebsiteConfiguration" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "ContentMD5":{ + "shape":"ContentMD5", + "location":"header", + "locationName":"Content-MD5" + }, + "WebsiteConfiguration":{ + "shape":"WebsiteConfiguration", + "locationName":"WebsiteConfiguration", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + } + }, + "payload":"WebsiteConfiguration" + }, + "PutObjectAclOutput":{ + "type":"structure", + "members":{ + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + } + } + }, + "PutObjectAclRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "ACL":{ + "shape":"ObjectCannedACL", + "location":"header", + "locationName":"x-amz-acl" + }, + "AccessControlPolicy":{ + "shape":"AccessControlPolicy", + "locationName":"AccessControlPolicy", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "ContentMD5":{ + "shape":"ContentMD5", + "location":"header", + "locationName":"Content-MD5" + }, + "GrantFullControl":{ + "shape":"GrantFullControl", + "location":"header", + "locationName":"x-amz-grant-full-control" + }, + "GrantRead":{ + "shape":"GrantRead", + "location":"header", + "locationName":"x-amz-grant-read" + }, + "GrantReadACP":{ + "shape":"GrantReadACP", + "location":"header", + "locationName":"x-amz-grant-read-acp" + }, + "GrantWrite":{ + "shape":"GrantWrite", + "location":"header", + "locationName":"x-amz-grant-write" + }, + "GrantWriteACP":{ + "shape":"GrantWriteACP", + "location":"header", + "locationName":"x-amz-grant-write-acp" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + }, + "payload":"AccessControlPolicy" + }, + "PutObjectOutput":{ + "type":"structure", + "members":{ + "Expiration":{ + "shape":"Expiration", + "location":"header", + "locationName":"x-amz-expiration" + }, + "ETag":{ + "shape":"ETag", + "location":"header", + "locationName":"ETag" + }, + "ServerSideEncryption":{ + "shape":"ServerSideEncryption", + "location":"header", + "locationName":"x-amz-server-side-encryption" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "location":"header", + "locationName":"x-amz-version-id" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "SSEKMSKeyId":{ + "shape":"SSEKMSKeyId", + "location":"header", + "locationName":"x-amz-server-side-encryption-aws-kms-key-id" + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + } + } + }, + "PutObjectRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "ACL":{ + "shape":"ObjectCannedACL", + "location":"header", + "locationName":"x-amz-acl" + }, + "Body":{ + "shape":"Body", + "streaming":true + }, + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "CacheControl":{ + "shape":"CacheControl", + "location":"header", + "locationName":"Cache-Control" + }, + "ContentDisposition":{ + "shape":"ContentDisposition", + "location":"header", + "locationName":"Content-Disposition" + }, + "ContentEncoding":{ + "shape":"ContentEncoding", + "location":"header", + "locationName":"Content-Encoding" + }, + "ContentLanguage":{ + "shape":"ContentLanguage", + "location":"header", + "locationName":"Content-Language" + }, + "ContentLength":{ + "shape":"ContentLength", + "location":"header", + "locationName":"Content-Length" + }, + "ContentMD5":{ + "shape":"ContentMD5", + "location":"header", + "locationName":"Content-MD5" + }, + "ContentType":{ + "shape":"ContentType", + "location":"header", + "locationName":"Content-Type" + }, + "Expires":{ + "shape":"Expires", + "location":"header", + "locationName":"Expires" + }, + "GrantFullControl":{ + "shape":"GrantFullControl", + "location":"header", + "locationName":"x-amz-grant-full-control" + }, + "GrantRead":{ + "shape":"GrantRead", + "location":"header", + "locationName":"x-amz-grant-read" + }, + "GrantReadACP":{ + "shape":"GrantReadACP", + "location":"header", + "locationName":"x-amz-grant-read-acp" + }, + "GrantWriteACP":{ + "shape":"GrantWriteACP", + "location":"header", + "locationName":"x-amz-grant-write-acp" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "Metadata":{ + "shape":"Metadata", + "location":"headers", + "locationName":"x-amz-meta-" + }, + "ServerSideEncryption":{ + "shape":"ServerSideEncryption", + "location":"header", + "locationName":"x-amz-server-side-encryption" + }, + "StorageClass":{ + "shape":"StorageClass", + "location":"header", + "locationName":"x-amz-storage-class" + }, + "WebsiteRedirectLocation":{ + "shape":"WebsiteRedirectLocation", + "location":"header", + "locationName":"x-amz-website-redirect-location" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKey":{ + "shape":"SSECustomerKey", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "SSEKMSKeyId":{ + "shape":"SSEKMSKeyId", + "location":"header", + "locationName":"x-amz-server-side-encryption-aws-kms-key-id" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + }, + "payload":"Body" + }, + "QueueArn":{"type":"string"}, + "QueueConfiguration":{ + "type":"structure", + "required":[ + "QueueArn", + "Events" + ], + "members":{ + "Id":{"shape":"NotificationId"}, + "QueueArn":{ + "shape":"QueueArn", + "locationName":"Queue" + }, + "Events":{ + "shape":"EventList", + "locationName":"Event" + }, + "Filter":{"shape":"NotificationConfigurationFilter"} + } + }, + "QueueConfigurationDeprecated":{ + "type":"structure", + "members":{ + "Id":{"shape":"NotificationId"}, + "Event":{ + "shape":"Event", + "deprecated":true + }, + "Events":{ + "shape":"EventList", + "locationName":"Event" + }, + "Queue":{"shape":"QueueArn"} + } + }, + "QueueConfigurationList":{ + "type":"list", + "member":{"shape":"QueueConfiguration"}, + "flattened":true + }, + "Quiet":{"type":"boolean"}, + "Range":{"type":"string"}, + "Redirect":{ + "type":"structure", + "members":{ + "HostName":{"shape":"HostName"}, + "HttpRedirectCode":{"shape":"HttpRedirectCode"}, + "Protocol":{"shape":"Protocol"}, + "ReplaceKeyPrefixWith":{"shape":"ReplaceKeyPrefixWith"}, + "ReplaceKeyWith":{"shape":"ReplaceKeyWith"} + } + }, + "RedirectAllRequestsTo":{ + "type":"structure", + "required":["HostName"], + "members":{ + "HostName":{"shape":"HostName"}, + "Protocol":{"shape":"Protocol"} + } + }, + "ReplaceKeyPrefixWith":{"type":"string"}, + "ReplaceKeyWith":{"type":"string"}, + "ReplicationConfiguration":{ + "type":"structure", + "required":[ + "Role", + "Rules" + ], + "members":{ + "Role":{"shape":"Role"}, + "Rules":{ + "shape":"ReplicationRules", + "locationName":"Rule" + } + } + }, + "ReplicationRule":{ + "type":"structure", + "required":[ + "Prefix", + "Status", + "Destination" + ], + "members":{ + "ID":{"shape":"ID"}, + "Prefix":{"shape":"Prefix"}, + "Status":{"shape":"ReplicationRuleStatus"}, + "Destination":{"shape":"Destination"} + } + }, + "ReplicationRuleStatus":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, + "ReplicationRules":{ + "type":"list", + "member":{"shape":"ReplicationRule"}, + "flattened":true + }, + "ReplicationStatus":{ + "type":"string", + "enum":[ + "COMPLETE", + "PENDING", + "FAILED", + "REPLICA" + ] + }, + "RequestCharged":{ + "type":"string", + "enum":["requester"] + }, + "RequestPayer":{ + "type":"string", + "enum":["requester"] + }, + "RequestPaymentConfiguration":{ + "type":"structure", + "required":["Payer"], + "members":{ + "Payer":{"shape":"Payer"} + } + }, + "ResponseCacheControl":{"type":"string"}, + "ResponseContentDisposition":{"type":"string"}, + "ResponseContentEncoding":{"type":"string"}, + "ResponseContentLanguage":{"type":"string"}, + "ResponseContentType":{"type":"string"}, + "ResponseExpires":{"type":"timestamp"}, + "Restore":{"type":"string"}, + "RestoreObjectOutput":{ + "type":"structure", + "members":{ + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + } + } + }, + "RestoreObjectRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "location":"querystring", + "locationName":"versionId" + }, + "RestoreRequest":{ + "shape":"RestoreRequest", + "locationName":"RestoreRequest", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + }, + "payload":"RestoreRequest" + }, + "RestoreRequest":{ + "type":"structure", + "required":["Days"], + "members":{ + "Days":{"shape":"Days"} + } + }, + "Role":{"type":"string"}, + "RoutingRule":{ + "type":"structure", + "required":["Redirect"], + "members":{ + "Condition":{"shape":"Condition"}, + "Redirect":{"shape":"Redirect"} + } + }, + "RoutingRules":{ + "type":"list", + "member":{ + "shape":"RoutingRule", + "locationName":"RoutingRule" + } + }, + "Rule":{ + "type":"structure", + "required":[ + "Prefix", + "Status" + ], + "members":{ + "Expiration":{"shape":"LifecycleExpiration"}, + "ID":{"shape":"ID"}, + "Prefix":{"shape":"Prefix"}, + "Status":{"shape":"ExpirationStatus"}, + "Transition":{"shape":"Transition"}, + "NoncurrentVersionTransition":{"shape":"NoncurrentVersionTransition"}, + "NoncurrentVersionExpiration":{"shape":"NoncurrentVersionExpiration"} + } + }, + "Rules":{ + "type":"list", + "member":{"shape":"Rule"}, + "flattened":true + }, + "S3KeyFilter":{ + "type":"structure", + "members":{ + "FilterRules":{ + "shape":"FilterRuleList", + "locationName":"FilterRule" + } + } + }, + "SSECustomerAlgorithm":{"type":"string"}, + "SSECustomerKey":{ + "type":"string", + "sensitive":true + }, + "SSECustomerKeyMD5":{"type":"string"}, + "SSEKMSKeyId":{ + "type":"string", + "sensitive":true + }, + "ServerSideEncryption":{ + "type":"string", + "enum":[ + "AES256", + "aws:kms" + ] + }, + "Size":{"type":"integer"}, + "StorageClass":{ + "type":"string", + "enum":[ + "STANDARD", + "REDUCED_REDUNDANCY", + "STANDARD_IA" + ] + }, + "Suffix":{"type":"string"}, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{"shape":"ObjectKey"}, + "Value":{"shape":"Value"} + } + }, + "TagSet":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"Tag" + } + }, + "Tagging":{ + "type":"structure", + "required":["TagSet"], + "members":{ + "TagSet":{"shape":"TagSet"} + } + }, + "TargetBucket":{"type":"string"}, + "TargetGrant":{ + "type":"structure", + "members":{ + "Grantee":{"shape":"Grantee"}, + "Permission":{"shape":"BucketLogsPermission"} + } + }, + "TargetGrants":{ + "type":"list", + "member":{ + "shape":"TargetGrant", + "locationName":"Grant" + } + }, + "TargetPrefix":{"type":"string"}, + "TopicArn":{"type":"string"}, + "TopicConfiguration":{ + "type":"structure", + "required":[ + "TopicArn", + "Events" + ], + "members":{ + "Id":{"shape":"NotificationId"}, + "TopicArn":{ + "shape":"TopicArn", + "locationName":"Topic" + }, + "Events":{ + "shape":"EventList", + "locationName":"Event" + }, + "Filter":{"shape":"NotificationConfigurationFilter"} + } + }, + "TopicConfigurationDeprecated":{ + "type":"structure", + "members":{ + "Id":{"shape":"NotificationId"}, + "Events":{ + "shape":"EventList", + "locationName":"Event" + }, + "Event":{ + "shape":"Event", + "deprecated":true + }, + "Topic":{"shape":"TopicArn"} + } + }, + "TopicConfigurationList":{ + "type":"list", + "member":{"shape":"TopicConfiguration"}, + "flattened":true + }, + "Transition":{ + "type":"structure", + "members":{ + "Date":{"shape":"Date"}, + "Days":{"shape":"Days"}, + "StorageClass":{"shape":"TransitionStorageClass"} + } + }, + "TransitionList":{ + "type":"list", + "member":{"shape":"Transition"}, + "flattened":true + }, + "TransitionStorageClass":{ + "type":"string", + "enum":[ + "GLACIER", + "STANDARD_IA" + ] + }, + "Type":{ + "type":"string", + "enum":[ + "CanonicalUser", + "AmazonCustomerByEmail", + "Group" + ] + }, + "URI":{"type":"string"}, + "UploadIdMarker":{"type":"string"}, + "UploadPartCopyOutput":{ + "type":"structure", + "members":{ + "CopySourceVersionId":{ + "shape":"CopySourceVersionId", + "location":"header", + "locationName":"x-amz-copy-source-version-id" + }, + "CopyPartResult":{"shape":"CopyPartResult"}, + "ServerSideEncryption":{ + "shape":"ServerSideEncryption", + "location":"header", + "locationName":"x-amz-server-side-encryption" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "SSEKMSKeyId":{ + "shape":"SSEKMSKeyId", + "location":"header", + "locationName":"x-amz-server-side-encryption-aws-kms-key-id" + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + } + }, + "payload":"CopyPartResult" + }, + "UploadPartCopyRequest":{ + "type":"structure", + "required":[ + "Bucket", + "CopySource", + "Key", + "PartNumber", + "UploadId" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "CopySource":{ + "shape":"CopySource", + "location":"header", + "locationName":"x-amz-copy-source" + }, + "CopySourceIfMatch":{ + "shape":"CopySourceIfMatch", + "location":"header", + "locationName":"x-amz-copy-source-if-match" + }, + "CopySourceIfModifiedSince":{ + "shape":"CopySourceIfModifiedSince", + "location":"header", + "locationName":"x-amz-copy-source-if-modified-since" + }, + "CopySourceIfNoneMatch":{ + "shape":"CopySourceIfNoneMatch", + "location":"header", + "locationName":"x-amz-copy-source-if-none-match" + }, + "CopySourceIfUnmodifiedSince":{ + "shape":"CopySourceIfUnmodifiedSince", + "location":"header", + "locationName":"x-amz-copy-source-if-unmodified-since" + }, + "CopySourceRange":{ + "shape":"CopySourceRange", + "location":"header", + "locationName":"x-amz-copy-source-range" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "PartNumber":{ + "shape":"PartNumber", + "location":"querystring", + "locationName":"partNumber" + }, + "UploadId":{ + "shape":"MultipartUploadId", + "location":"querystring", + "locationName":"uploadId" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKey":{ + "shape":"SSECustomerKey", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "CopySourceSSECustomerAlgorithm":{ + "shape":"CopySourceSSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-copy-source-server-side-encryption-customer-algorithm" + }, + "CopySourceSSECustomerKey":{ + "shape":"CopySourceSSECustomerKey", + "location":"header", + "locationName":"x-amz-copy-source-server-side-encryption-customer-key" + }, + "CopySourceSSECustomerKeyMD5":{ + "shape":"CopySourceSSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-copy-source-server-side-encryption-customer-key-MD5" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + } + }, + "UploadPartOutput":{ + "type":"structure", + "members":{ + "ServerSideEncryption":{ + "shape":"ServerSideEncryption", + "location":"header", + "locationName":"x-amz-server-side-encryption" + }, + "ETag":{ + "shape":"ETag", + "location":"header", + "locationName":"ETag" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "SSEKMSKeyId":{ + "shape":"SSEKMSKeyId", + "location":"header", + "locationName":"x-amz-server-side-encryption-aws-kms-key-id" + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + } + } + }, + "UploadPartRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key", + "PartNumber", + "UploadId" + ], + "members":{ + "Body":{ + "shape":"Body", + "streaming":true + }, + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "ContentLength":{ + "shape":"ContentLength", + "location":"header", + "locationName":"Content-Length" + }, + "ContentMD5":{ + "shape":"ContentMD5", + "location":"header", + "locationName":"Content-MD5" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "PartNumber":{ + "shape":"PartNumber", + "location":"querystring", + "locationName":"partNumber" + }, + "UploadId":{ + "shape":"MultipartUploadId", + "location":"querystring", + "locationName":"uploadId" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKey":{ + "shape":"SSECustomerKey", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + } + }, + "payload":"Body" + }, + "Value":{"type":"string"}, + "VersionIdMarker":{"type":"string"}, + "VersioningConfiguration":{ + "type":"structure", + "members":{ + "MFADelete":{ + "shape":"MFADelete", + "locationName":"MfaDelete" + }, + "Status":{"shape":"BucketVersioningStatus"} + } + }, + "WebsiteConfiguration":{ + "type":"structure", + "members":{ + "ErrorDocument":{"shape":"ErrorDocument"}, + "IndexDocument":{"shape":"IndexDocument"}, + "RedirectAllRequestsTo":{"shape":"RedirectAllRequestsTo"}, + "RoutingRules":{"shape":"RoutingRules"} + } + }, + "WebsiteRedirectLocation":{"type":"string"} + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2318 @@ +{ + "version": "2.0", + "service": null, + "operations": { + "AbortMultipartUpload": "

    Aborts a multipart upload.

    To verify that all parts have been removed, so you don't get charged for the part storage, you should call the List Parts operation and ensure the parts list is empty.

    ", + "CompleteMultipartUpload": "Completes a multipart upload by assembling previously uploaded parts.", + "CopyObject": "Creates a copy of an object that is already stored in Amazon S3.", + "CreateBucket": "Creates a new bucket.", + "CreateMultipartUpload": "

    Initiates a multipart upload and returns an upload ID.

    Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.

    ", + "DeleteBucket": "Deletes the bucket. All objects (including all object versions and Delete Markers) in the bucket must be deleted before the bucket itself can be deleted.", + "DeleteBucketCors": "Deletes the cors configuration information set for the bucket.", + "DeleteBucketLifecycle": "Deletes the lifecycle configuration from the bucket.", + "DeleteBucketPolicy": "Deletes the policy from the bucket.", + "DeleteBucketReplication": null, + "DeleteBucketTagging": "Deletes the tags from the bucket.", + "DeleteBucketWebsite": "This operation removes the website configuration from the bucket.", + "DeleteObject": "Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn't a null version, Amazon S3 does not remove any objects.", + "DeleteObjects": "This operation enables you to delete multiple objects from a bucket using a single HTTP request. You may specify up to 1000 keys.", + "GetBucketAcl": "Gets the access control policy for the bucket.", + "GetBucketCors": "Returns the cors configuration for the bucket.", + "GetBucketLifecycle": "Deprecated, see the GetBucketLifecycleConfiguration operation.", + "GetBucketLifecycleConfiguration": "Returns the lifecycle configuration information set on the bucket.", + "GetBucketLocation": "Returns the region the bucket resides in.", + "GetBucketLogging": "Returns the logging status of a bucket and the permissions users have to view and modify that status. To use GET, you must be the bucket owner.", + "GetBucketNotification": "Deprecated, see the GetBucketNotificationConfiguration operation.", + "GetBucketNotificationConfiguration": "Returns the notification configuration of a bucket.", + "GetBucketPolicy": "Returns the policy of a specified bucket.", + "GetBucketReplication": null, + "GetBucketRequestPayment": "Returns the request payment configuration of a bucket.", + "GetBucketTagging": "Returns the tag set associated with the bucket.", + "GetBucketVersioning": "Returns the versioning state of a bucket.", + "GetBucketWebsite": "Returns the website configuration for a bucket.", + "GetObject": "Retrieves objects from Amazon S3.", + "GetObjectAcl": "Returns the access control list (ACL) of an object.", + "GetObjectTorrent": "Return torrent files from a bucket.", + "HeadBucket": "This operation is useful to determine if a bucket exists and you have permission to access it.", + "HeadObject": "The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.", + "ListBuckets": "Returns a list of all buckets owned by the authenticated sender of the request.", + "ListMultipartUploads": "This operation lists in-progress multipart uploads.", + "ListObjectVersions": "Returns metadata about all of the versions of objects in a bucket.", + "ListObjects": "Returns some or all (up to 1000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket.", + "ListParts": "Lists the parts that have been uploaded for a specific multipart upload.", + "PutBucketAcl": "Sets the permissions on a bucket using access control lists (ACL).", + "PutBucketCors": "Sets the cors configuration for a bucket.", + "PutBucketLifecycle": "Deprecated, see the PutBucketLifecycleConfiguration operation.", + "PutBucketLifecycleConfiguration": "Sets lifecycle configuration for your bucket. If a lifecycle configuration exists, it replaces it.", + "PutBucketLogging": "Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging parameters. To set the logging status of a bucket, you must be the bucket owner.", + "PutBucketNotification": "Deprecated, see the PutBucketNotificationConfiguraiton operation.", + "PutBucketNotificationConfiguration": "Enables notifications of specified events for a bucket.", + "PutBucketPolicy": "Replaces a policy on a bucket. If the bucket already has a policy, the one in this request completely replaces it.", + "PutBucketReplication": "Creates a new replication configuration (or replaces an existing one, if present).", + "PutBucketRequestPayment": "Sets the request payment configuration for a bucket. By default, the bucket owner pays for downloads from the bucket. This configuration parameter enables the bucket owner (only) to specify that the person requesting the download will be charged for the download. Documentation on requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html", + "PutBucketTagging": "Sets the tags for a bucket.", + "PutBucketVersioning": "Sets the versioning state of an existing bucket. To set the versioning state, you must be the bucket owner.", + "PutBucketWebsite": "Set the website configuration for a bucket.", + "PutObject": "Adds an object to a bucket.", + "PutObjectAcl": "uses the acl subresource to set the access control list (ACL) permissions for an object that already exists in a bucket", + "RestoreObject": "Restores an archived copy of an object back into Amazon S3", + "UploadPart": "

    Uploads a part in a multipart upload.

    Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.

    ", + "UploadPartCopy": "Uploads a part by copying data from an existing object as data source." + }, + "shapes": { + "AbortMultipartUploadOutput": { + "base": null, + "refs": { + } + }, + "AbortMultipartUploadRequest": { + "base": null, + "refs": { + } + }, + "AcceptRanges": { + "base": null, + "refs": { + "GetObjectOutput$AcceptRanges": null, + "HeadObjectOutput$AcceptRanges": null + } + }, + "AccessControlPolicy": { + "base": null, + "refs": { + "PutBucketAclRequest$AccessControlPolicy": null, + "PutObjectAclRequest$AccessControlPolicy": null + } + }, + "AllowedHeader": { + "base": null, + "refs": { + "AllowedHeaders$member": null + } + }, + "AllowedHeaders": { + "base": null, + "refs": { + "CORSRule$AllowedHeaders": "Specifies which headers are allowed in a pre-flight OPTIONS request." + } + }, + "AllowedMethod": { + "base": null, + "refs": { + "AllowedMethods$member": null + } + }, + "AllowedMethods": { + "base": null, + "refs": { + "CORSRule$AllowedMethods": "Identifies HTTP methods that the domain/origin specified in the rule is allowed to execute." + } + }, + "AllowedOrigin": { + "base": null, + "refs": { + "AllowedOrigins$member": null + } + }, + "AllowedOrigins": { + "base": null, + "refs": { + "CORSRule$AllowedOrigins": "One or more origins you want customers to be able to access the bucket from." + } + }, + "Body": { + "base": null, + "refs": { + "GetObjectOutput$Body": "Object data.", + "GetObjectTorrentOutput$Body": null, + "PutObjectRequest$Body": "Object data.", + "UploadPartRequest$Body": null + } + }, + "Bucket": { + "base": null, + "refs": { + "Buckets$member": null + } + }, + "BucketAlreadyExists": { + "base": "The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.", + "refs": { + } + }, + "BucketCannedACL": { + "base": null, + "refs": { + "CreateBucketRequest$ACL": "The canned ACL to apply to the bucket.", + "PutBucketAclRequest$ACL": "The canned ACL to apply to the bucket." + } + }, + "BucketLifecycleConfiguration": { + "base": null, + "refs": { + "PutBucketLifecycleConfigurationRequest$LifecycleConfiguration": null + } + }, + "BucketLocationConstraint": { + "base": null, + "refs": { + "CreateBucketConfiguration$LocationConstraint": "Specifies the region where the bucket will be created. If you don't specify a region, the bucket will be created in US Standard.", + "GetBucketLocationOutput$LocationConstraint": null + } + }, + "BucketLoggingStatus": { + "base": null, + "refs": { + "PutBucketLoggingRequest$BucketLoggingStatus": null + } + }, + "BucketLogsPermission": { + "base": null, + "refs": { + "TargetGrant$Permission": "Logging permissions assigned to the Grantee for the bucket." + } + }, + "BucketName": { + "base": null, + "refs": { + "AbortMultipartUploadRequest$Bucket": null, + "Bucket$Name": "The name of the bucket.", + "CompleteMultipartUploadOutput$Bucket": null, + "CompleteMultipartUploadRequest$Bucket": null, + "CopyObjectRequest$Bucket": null, + "CreateBucketRequest$Bucket": null, + "CreateMultipartUploadOutput$Bucket": "Name of the bucket to which the multipart upload was initiated.", + "CreateMultipartUploadRequest$Bucket": null, + "DeleteBucketCorsRequest$Bucket": null, + "DeleteBucketLifecycleRequest$Bucket": null, + "DeleteBucketPolicyRequest$Bucket": null, + "DeleteBucketReplicationRequest$Bucket": null, + "DeleteBucketRequest$Bucket": null, + "DeleteBucketTaggingRequest$Bucket": null, + "DeleteBucketWebsiteRequest$Bucket": null, + "DeleteObjectRequest$Bucket": null, + "DeleteObjectsRequest$Bucket": null, + "Destination$Bucket": "Amazon resource name (ARN) of the bucket where you want Amazon S3 to store replicas of the object identified by the rule.", + "GetBucketAclRequest$Bucket": null, + "GetBucketCorsRequest$Bucket": null, + "GetBucketLifecycleConfigurationRequest$Bucket": null, + "GetBucketLifecycleRequest$Bucket": null, + "GetBucketLocationRequest$Bucket": null, + "GetBucketLoggingRequest$Bucket": null, + "GetBucketNotificationConfigurationRequest$Bucket": "Name of the buket to get the notification configuration for.", + "GetBucketPolicyRequest$Bucket": null, + "GetBucketReplicationRequest$Bucket": null, + "GetBucketRequestPaymentRequest$Bucket": null, + "GetBucketTaggingRequest$Bucket": null, + "GetBucketVersioningRequest$Bucket": null, + "GetBucketWebsiteRequest$Bucket": null, + "GetObjectAclRequest$Bucket": null, + "GetObjectRequest$Bucket": null, + "GetObjectTorrentRequest$Bucket": null, + "HeadBucketRequest$Bucket": null, + "HeadObjectRequest$Bucket": null, + "ListMultipartUploadsOutput$Bucket": "Name of the bucket to which the multipart upload was initiated.", + "ListMultipartUploadsRequest$Bucket": null, + "ListObjectVersionsOutput$Name": null, + "ListObjectVersionsRequest$Bucket": null, + "ListObjectsOutput$Name": null, + "ListObjectsRequest$Bucket": null, + "ListPartsOutput$Bucket": "Name of the bucket to which the multipart upload was initiated.", + "ListPartsRequest$Bucket": null, + "PutBucketAclRequest$Bucket": null, + "PutBucketCorsRequest$Bucket": null, + "PutBucketLifecycleConfigurationRequest$Bucket": null, + "PutBucketLifecycleRequest$Bucket": null, + "PutBucketLoggingRequest$Bucket": null, + "PutBucketNotificationConfigurationRequest$Bucket": null, + "PutBucketNotificationRequest$Bucket": null, + "PutBucketPolicyRequest$Bucket": null, + "PutBucketReplicationRequest$Bucket": null, + "PutBucketRequestPaymentRequest$Bucket": null, + "PutBucketTaggingRequest$Bucket": null, + "PutBucketVersioningRequest$Bucket": null, + "PutBucketWebsiteRequest$Bucket": null, + "PutObjectAclRequest$Bucket": null, + "PutObjectRequest$Bucket": null, + "RestoreObjectRequest$Bucket": null, + "UploadPartCopyRequest$Bucket": null, + "UploadPartRequest$Bucket": null + } + }, + "BucketVersioningStatus": { + "base": null, + "refs": { + "GetBucketVersioningOutput$Status": "The versioning state of the bucket.", + "VersioningConfiguration$Status": "The versioning state of the bucket." + } + }, + "Buckets": { + "base": null, + "refs": { + "ListBucketsOutput$Buckets": null + } + }, + "CORSConfiguration": { + "base": null, + "refs": { + "PutBucketCorsRequest$CORSConfiguration": null + } + }, + "CORSRule": { + "base": null, + "refs": { + "CORSRules$member": null + } + }, + "CORSRules": { + "base": null, + "refs": { + "CORSConfiguration$CORSRules": null, + "GetBucketCorsOutput$CORSRules": null + } + }, + "CacheControl": { + "base": null, + "refs": { + "CopyObjectRequest$CacheControl": "Specifies caching behavior along the request/reply chain.", + "CreateMultipartUploadRequest$CacheControl": "Specifies caching behavior along the request/reply chain.", + "GetObjectOutput$CacheControl": "Specifies caching behavior along the request/reply chain.", + "HeadObjectOutput$CacheControl": "Specifies caching behavior along the request/reply chain.", + "PutObjectRequest$CacheControl": "Specifies caching behavior along the request/reply chain." + } + }, + "CloudFunction": { + "base": null, + "refs": { + "CloudFunctionConfiguration$CloudFunction": null + } + }, + "CloudFunctionConfiguration": { + "base": null, + "refs": { + "NotificationConfigurationDeprecated$CloudFunctionConfiguration": null + } + }, + "CloudFunctionInvocationRole": { + "base": null, + "refs": { + "CloudFunctionConfiguration$InvocationRole": null + } + }, + "Code": { + "base": null, + "refs": { + "Error$Code": null + } + }, + "CommonPrefix": { + "base": null, + "refs": { + "CommonPrefixList$member": null + } + }, + "CommonPrefixList": { + "base": null, + "refs": { + "ListMultipartUploadsOutput$CommonPrefixes": null, + "ListObjectVersionsOutput$CommonPrefixes": null, + "ListObjectsOutput$CommonPrefixes": null + } + }, + "CompleteMultipartUploadOutput": { + "base": null, + "refs": { + } + }, + "CompleteMultipartUploadRequest": { + "base": null, + "refs": { + } + }, + "CompletedMultipartUpload": { + "base": null, + "refs": { + "CompleteMultipartUploadRequest$MultipartUpload": null + } + }, + "CompletedPart": { + "base": null, + "refs": { + "CompletedPartList$member": null + } + }, + "CompletedPartList": { + "base": null, + "refs": { + "CompletedMultipartUpload$Parts": null + } + }, + "Condition": { + "base": null, + "refs": { + "RoutingRule$Condition": "A container for describing a condition that must be met for the specified redirect to apply. For example, 1. If request is for pages in the /docs folder, redirect to the /documents folder. 2. If request results in HTTP error 4xx, redirect request to another host where you might process the error." + } + }, + "ContentDisposition": { + "base": null, + "refs": { + "CopyObjectRequest$ContentDisposition": "Specifies presentational information for the object.", + "CreateMultipartUploadRequest$ContentDisposition": "Specifies presentational information for the object.", + "GetObjectOutput$ContentDisposition": "Specifies presentational information for the object.", + "HeadObjectOutput$ContentDisposition": "Specifies presentational information for the object.", + "PutObjectRequest$ContentDisposition": "Specifies presentational information for the object." + } + }, + "ContentEncoding": { + "base": null, + "refs": { + "CopyObjectRequest$ContentEncoding": "Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.", + "CreateMultipartUploadRequest$ContentEncoding": "Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.", + "GetObjectOutput$ContentEncoding": "Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.", + "HeadObjectOutput$ContentEncoding": "Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.", + "PutObjectRequest$ContentEncoding": "Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field." + } + }, + "ContentLanguage": { + "base": null, + "refs": { + "CopyObjectRequest$ContentLanguage": "The language the content is in.", + "CreateMultipartUploadRequest$ContentLanguage": "The language the content is in.", + "GetObjectOutput$ContentLanguage": "The language the content is in.", + "HeadObjectOutput$ContentLanguage": "The language the content is in.", + "PutObjectRequest$ContentLanguage": "The language the content is in." + } + }, + "ContentLength": { + "base": null, + "refs": { + "GetObjectOutput$ContentLength": "Size of the body in bytes.", + "HeadObjectOutput$ContentLength": "Size of the body in bytes.", + "PutObjectRequest$ContentLength": "Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically.", + "UploadPartRequest$ContentLength": "Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically." + } + }, + "ContentMD5": { + "base": null, + "refs": { + "PutBucketAclRequest$ContentMD5": null, + "PutBucketCorsRequest$ContentMD5": null, + "PutBucketLifecycleRequest$ContentMD5": null, + "PutBucketLoggingRequest$ContentMD5": null, + "PutBucketNotificationRequest$ContentMD5": null, + "PutBucketPolicyRequest$ContentMD5": null, + "PutBucketReplicationRequest$ContentMD5": null, + "PutBucketRequestPaymentRequest$ContentMD5": null, + "PutBucketTaggingRequest$ContentMD5": null, + "PutBucketVersioningRequest$ContentMD5": null, + "PutBucketWebsiteRequest$ContentMD5": null, + "PutObjectAclRequest$ContentMD5": null, + "PutObjectRequest$ContentMD5": null, + "UploadPartRequest$ContentMD5": null + } + }, + "ContentRange": { + "base": null, + "refs": { + "GetObjectOutput$ContentRange": "The portion of the object returned in the response." + } + }, + "ContentType": { + "base": null, + "refs": { + "CopyObjectRequest$ContentType": "A standard MIME type describing the format of the object data.", + "CreateMultipartUploadRequest$ContentType": "A standard MIME type describing the format of the object data.", + "GetObjectOutput$ContentType": "A standard MIME type describing the format of the object data.", + "HeadObjectOutput$ContentType": "A standard MIME type describing the format of the object data.", + "PutObjectRequest$ContentType": "A standard MIME type describing the format of the object data." + } + }, + "CopyObjectOutput": { + "base": null, + "refs": { + } + }, + "CopyObjectRequest": { + "base": null, + "refs": { + } + }, + "CopyObjectResult": { + "base": null, + "refs": { + "CopyObjectOutput$CopyObjectResult": null + } + }, + "CopyPartResult": { + "base": null, + "refs": { + "UploadPartCopyOutput$CopyPartResult": null + } + }, + "CopySource": { + "base": null, + "refs": { + "CopyObjectRequest$CopySource": "The name of the source bucket and key name of the source object, separated by a slash (/). Must be URL-encoded.", + "UploadPartCopyRequest$CopySource": "The name of the source bucket and key name of the source object, separated by a slash (/). Must be URL-encoded." + } + }, + "CopySourceIfMatch": { + "base": null, + "refs": { + "CopyObjectRequest$CopySourceIfMatch": "Copies the object if its entity tag (ETag) matches the specified tag.", + "UploadPartCopyRequest$CopySourceIfMatch": "Copies the object if its entity tag (ETag) matches the specified tag." + } + }, + "CopySourceIfModifiedSince": { + "base": null, + "refs": { + "CopyObjectRequest$CopySourceIfModifiedSince": "Copies the object if it has been modified since the specified time.", + "UploadPartCopyRequest$CopySourceIfModifiedSince": "Copies the object if it has been modified since the specified time." + } + }, + "CopySourceIfNoneMatch": { + "base": null, + "refs": { + "CopyObjectRequest$CopySourceIfNoneMatch": "Copies the object if its entity tag (ETag) is different than the specified ETag.", + "UploadPartCopyRequest$CopySourceIfNoneMatch": "Copies the object if its entity tag (ETag) is different than the specified ETag." + } + }, + "CopySourceIfUnmodifiedSince": { + "base": null, + "refs": { + "CopyObjectRequest$CopySourceIfUnmodifiedSince": "Copies the object if it hasn't been modified since the specified time.", + "UploadPartCopyRequest$CopySourceIfUnmodifiedSince": "Copies the object if it hasn't been modified since the specified time." + } + }, + "CopySourceRange": { + "base": null, + "refs": { + "UploadPartCopyRequest$CopySourceRange": "The range of bytes to copy from the source object. The range value must use the form bytes=first-last, where the first and last are the zero-based byte offsets to copy. For example, bytes=0-9 indicates that you want to copy the first ten bytes of the source. You can copy a range only if the source object is greater than 5 GB." + } + }, + "CopySourceSSECustomerAlgorithm": { + "base": null, + "refs": { + "CopyObjectRequest$CopySourceSSECustomerAlgorithm": "Specifies the algorithm to use when decrypting the source object (e.g., AES256).", + "UploadPartCopyRequest$CopySourceSSECustomerAlgorithm": "Specifies the algorithm to use when decrypting the source object (e.g., AES256)." + } + }, + "CopySourceSSECustomerKey": { + "base": null, + "refs": { + "CopyObjectRequest$CopySourceSSECustomerKey": "Specifies the customer-provided encryption key for Amazon S3 to use to decrypt the source object. The encryption key provided in this header must be one that was used when the source object was created.", + "UploadPartCopyRequest$CopySourceSSECustomerKey": "Specifies the customer-provided encryption key for Amazon S3 to use to decrypt the source object. The encryption key provided in this header must be one that was used when the source object was created." + } + }, + "CopySourceSSECustomerKeyMD5": { + "base": null, + "refs": { + "CopyObjectRequest$CopySourceSSECustomerKeyMD5": "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.", + "UploadPartCopyRequest$CopySourceSSECustomerKeyMD5": "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error." + } + }, + "CopySourceVersionId": { + "base": null, + "refs": { + "CopyObjectOutput$CopySourceVersionId": null, + "UploadPartCopyOutput$CopySourceVersionId": "The version of the source object that was copied, if you have enabled versioning on the source bucket." + } + }, + "CreateBucketConfiguration": { + "base": null, + "refs": { + "CreateBucketRequest$CreateBucketConfiguration": null + } + }, + "CreateBucketOutput": { + "base": null, + "refs": { + } + }, + "CreateBucketRequest": { + "base": null, + "refs": { + } + }, + "CreateMultipartUploadOutput": { + "base": null, + "refs": { + } + }, + "CreateMultipartUploadRequest": { + "base": null, + "refs": { + } + }, + "CreationDate": { + "base": null, + "refs": { + "Bucket$CreationDate": "Date the bucket was created." + } + }, + "Date": { + "base": null, + "refs": { + "LifecycleExpiration$Date": "Indicates at what date the object is to be moved or deleted. Should be in GMT ISO 8601 Format.", + "Transition$Date": "Indicates at what date the object is to be moved or deleted. Should be in GMT ISO 8601 Format." + } + }, + "Days": { + "base": null, + "refs": { + "LifecycleExpiration$Days": "Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer.", + "NoncurrentVersionExpiration$NoncurrentDays": "Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action. For information about the noncurrent days calculations, see How Amazon S3 Calculates When an Object Became Noncurrent in the Amazon Simple Storage Service Developer Guide.", + "NoncurrentVersionTransition$NoncurrentDays": "Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action. For information about the noncurrent days calculations, see How Amazon S3 Calculates When an Object Became Noncurrent in the Amazon Simple Storage Service Developer Guide.", + "RestoreRequest$Days": "Lifetime of the active copy in days", + "Transition$Days": "Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer." + } + }, + "Delete": { + "base": null, + "refs": { + "DeleteObjectsRequest$Delete": null + } + }, + "DeleteBucketCorsRequest": { + "base": null, + "refs": { + } + }, + "DeleteBucketLifecycleRequest": { + "base": null, + "refs": { + } + }, + "DeleteBucketPolicyRequest": { + "base": null, + "refs": { + } + }, + "DeleteBucketReplicationRequest": { + "base": null, + "refs": { + } + }, + "DeleteBucketRequest": { + "base": null, + "refs": { + } + }, + "DeleteBucketTaggingRequest": { + "base": null, + "refs": { + } + }, + "DeleteBucketWebsiteRequest": { + "base": null, + "refs": { + } + }, + "DeleteMarker": { + "base": null, + "refs": { + "DeleteObjectOutput$DeleteMarker": "Specifies whether the versioned object that was permanently deleted was (true) or was not (false) a delete marker.", + "DeletedObject$DeleteMarker": null, + "GetObjectOutput$DeleteMarker": "Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. If false, this response header does not appear in the response.", + "HeadObjectOutput$DeleteMarker": "Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. If false, this response header does not appear in the response." + } + }, + "DeleteMarkerEntry": { + "base": null, + "refs": { + "DeleteMarkers$member": null + } + }, + "DeleteMarkerVersionId": { + "base": null, + "refs": { + "DeletedObject$DeleteMarkerVersionId": null + } + }, + "DeleteMarkers": { + "base": null, + "refs": { + "ListObjectVersionsOutput$DeleteMarkers": null + } + }, + "DeleteObjectOutput": { + "base": null, + "refs": { + } + }, + "DeleteObjectRequest": { + "base": null, + "refs": { + } + }, + "DeleteObjectsOutput": { + "base": null, + "refs": { + } + }, + "DeleteObjectsRequest": { + "base": null, + "refs": { + } + }, + "DeletedObject": { + "base": null, + "refs": { + "DeletedObjects$member": null + } + }, + "DeletedObjects": { + "base": null, + "refs": { + "DeleteObjectsOutput$Deleted": null + } + }, + "Delimiter": { + "base": null, + "refs": { + "ListMultipartUploadsOutput$Delimiter": null, + "ListMultipartUploadsRequest$Delimiter": "Character you use to group keys.", + "ListObjectVersionsOutput$Delimiter": null, + "ListObjectVersionsRequest$Delimiter": "A delimiter is a character you use to group keys.", + "ListObjectsOutput$Delimiter": null, + "ListObjectsRequest$Delimiter": "A delimiter is a character you use to group keys." + } + }, + "Destination": { + "base": null, + "refs": { + "ReplicationRule$Destination": null + } + }, + "DisplayName": { + "base": null, + "refs": { + "Grantee$DisplayName": "Screen name of the grantee.", + "Initiator$DisplayName": "Name of the Principal.", + "Owner$DisplayName": null + } + }, + "ETag": { + "base": null, + "refs": { + "CompleteMultipartUploadOutput$ETag": "Entity tag of the object.", + "CompletedPart$ETag": "Entity tag returned when the part was uploaded.", + "CopyObjectResult$ETag": null, + "CopyPartResult$ETag": "Entity tag of the object.", + "GetObjectOutput$ETag": "An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL", + "HeadObjectOutput$ETag": "An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL", + "Object$ETag": null, + "ObjectVersion$ETag": null, + "Part$ETag": "Entity tag returned when the part was uploaded.", + "PutObjectOutput$ETag": "Entity tag for the uploaded object.", + "UploadPartOutput$ETag": "Entity tag for the uploaded object." + } + }, + "EmailAddress": { + "base": null, + "refs": { + "Grantee$EmailAddress": "Email address of the grantee." + } + }, + "EncodingType": { + "base": "Requests Amazon S3 to encode the object keys in the response and specifies the encoding method to use. An object key may contain any Unicode character; however, XML 1.0 parser cannot parse some characters, such as characters with an ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you can add this parameter to request that Amazon S3 encode the keys in the response.", + "refs": { + "ListMultipartUploadsOutput$EncodingType": "Encoding type used by Amazon S3 to encode object keys in the response.", + "ListMultipartUploadsRequest$EncodingType": null, + "ListObjectVersionsOutput$EncodingType": "Encoding type used by Amazon S3 to encode object keys in the response.", + "ListObjectVersionsRequest$EncodingType": null, + "ListObjectsOutput$EncodingType": "Encoding type used by Amazon S3 to encode object keys in the response.", + "ListObjectsRequest$EncodingType": null + } + }, + "Error": { + "base": null, + "refs": { + "Errors$member": null + } + }, + "ErrorDocument": { + "base": null, + "refs": { + "GetBucketWebsiteOutput$ErrorDocument": null, + "WebsiteConfiguration$ErrorDocument": null + } + }, + "Errors": { + "base": null, + "refs": { + "DeleteObjectsOutput$Errors": null + } + }, + "Event": { + "base": "Bucket event for which to send notifications.", + "refs": { + "CloudFunctionConfiguration$Event": null, + "EventList$member": null, + "QueueConfigurationDeprecated$Event": null, + "TopicConfigurationDeprecated$Event": "Bucket event for which to send notifications." + } + }, + "EventList": { + "base": null, + "refs": { + "CloudFunctionConfiguration$Events": null, + "LambdaFunctionConfiguration$Events": null, + "QueueConfiguration$Events": null, + "QueueConfigurationDeprecated$Events": null, + "TopicConfiguration$Events": null, + "TopicConfigurationDeprecated$Events": null + } + }, + "Expiration": { + "base": null, + "refs": { + "CompleteMultipartUploadOutput$Expiration": "If the object expiration is configured, this will contain the expiration date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.", + "CopyObjectOutput$Expiration": "If the object expiration is configured, the response includes this header.", + "GetObjectOutput$Expiration": "If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the expiry-date and rule-id key value pairs providing object expiration information. The value of the rule-id is URL encoded.", + "HeadObjectOutput$Expiration": "If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the expiry-date and rule-id key value pairs providing object expiration information. The value of the rule-id is URL encoded.", + "PutObjectOutput$Expiration": "If the object expiration is configured, this will contain the expiration date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded." + } + }, + "ExpirationStatus": { + "base": null, + "refs": { + "LifecycleRule$Status": "If 'Enabled', the rule is currently being applied. If 'Disabled', the rule is not currently being applied.", + "Rule$Status": "If 'Enabled', the rule is currently being applied. If 'Disabled', the rule is not currently being applied." + } + }, + "Expires": { + "base": null, + "refs": { + "CopyObjectRequest$Expires": "The date and time at which the object is no longer cacheable.", + "CreateMultipartUploadRequest$Expires": "The date and time at which the object is no longer cacheable.", + "GetObjectOutput$Expires": "The date and time at which the object is no longer cacheable.", + "HeadObjectOutput$Expires": "The date and time at which the object is no longer cacheable.", + "PutObjectRequest$Expires": "The date and time at which the object is no longer cacheable." + } + }, + "ExposeHeader": { + "base": null, + "refs": { + "ExposeHeaders$member": null + } + }, + "ExposeHeaders": { + "base": null, + "refs": { + "CORSRule$ExposeHeaders": "One or more headers in the response that you want customers to be able to access from their applications (for example, from a JavaScript XMLHttpRequest object)." + } + }, + "FilterRule": { + "base": "Container for key value pair that defines the criteria for the filter rule.", + "refs": { + "FilterRuleList$member": null + } + }, + "FilterRuleList": { + "base": "A list of containers for key value pair that defines the criteria for the filter rule.", + "refs": { + "S3KeyFilter$FilterRules": null + } + }, + "FilterRuleName": { + "base": null, + "refs": { + "FilterRule$Name": "Object key name prefix or suffix identifying one or more objects to which the filtering rule applies. Maximum prefix length can be up to 1,024 characters. Overlapping prefixes and suffixes are not supported. For more information, go to Configuring Event Notifications in the Amazon Simple Storage Service Developer Guide." + } + }, + "FilterRuleValue": { + "base": null, + "refs": { + "FilterRule$Value": null + } + }, + "GetBucketAclOutput": { + "base": null, + "refs": { + } + }, + "GetBucketAclRequest": { + "base": null, + "refs": { + } + }, + "GetBucketCorsOutput": { + "base": null, + "refs": { + } + }, + "GetBucketCorsRequest": { + "base": null, + "refs": { + } + }, + "GetBucketLifecycleConfigurationOutput": { + "base": null, + "refs": { + } + }, + "GetBucketLifecycleConfigurationRequest": { + "base": null, + "refs": { + } + }, + "GetBucketLifecycleOutput": { + "base": null, + "refs": { + } + }, + "GetBucketLifecycleRequest": { + "base": null, + "refs": { + } + }, + "GetBucketLocationOutput": { + "base": null, + "refs": { + } + }, + "GetBucketLocationRequest": { + "base": null, + "refs": { + } + }, + "GetBucketLoggingOutput": { + "base": null, + "refs": { + } + }, + "GetBucketLoggingRequest": { + "base": null, + "refs": { + } + }, + "GetBucketNotificationConfigurationRequest": { + "base": null, + "refs": { + } + }, + "GetBucketPolicyOutput": { + "base": null, + "refs": { + } + }, + "GetBucketPolicyRequest": { + "base": null, + "refs": { + } + }, + "GetBucketReplicationOutput": { + "base": null, + "refs": { + } + }, + "GetBucketReplicationRequest": { + "base": null, + "refs": { + } + }, + "GetBucketRequestPaymentOutput": { + "base": null, + "refs": { + } + }, + "GetBucketRequestPaymentRequest": { + "base": null, + "refs": { + } + }, + "GetBucketTaggingOutput": { + "base": null, + "refs": { + } + }, + "GetBucketTaggingRequest": { + "base": null, + "refs": { + } + }, + "GetBucketVersioningOutput": { + "base": null, + "refs": { + } + }, + "GetBucketVersioningRequest": { + "base": null, + "refs": { + } + }, + "GetBucketWebsiteOutput": { + "base": null, + "refs": { + } + }, + "GetBucketWebsiteRequest": { + "base": null, + "refs": { + } + }, + "GetObjectAclOutput": { + "base": null, + "refs": { + } + }, + "GetObjectAclRequest": { + "base": null, + "refs": { + } + }, + "GetObjectOutput": { + "base": null, + "refs": { + } + }, + "GetObjectRequest": { + "base": null, + "refs": { + } + }, + "GetObjectTorrentOutput": { + "base": null, + "refs": { + } + }, + "GetObjectTorrentRequest": { + "base": null, + "refs": { + } + }, + "Grant": { + "base": null, + "refs": { + "Grants$member": null + } + }, + "GrantFullControl": { + "base": null, + "refs": { + "CopyObjectRequest$GrantFullControl": "Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.", + "CreateBucketRequest$GrantFullControl": "Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.", + "CreateMultipartUploadRequest$GrantFullControl": "Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.", + "PutBucketAclRequest$GrantFullControl": "Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.", + "PutObjectAclRequest$GrantFullControl": "Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.", + "PutObjectRequest$GrantFullControl": "Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object." + } + }, + "GrantRead": { + "base": null, + "refs": { + "CopyObjectRequest$GrantRead": "Allows grantee to read the object data and its metadata.", + "CreateBucketRequest$GrantRead": "Allows grantee to list the objects in the bucket.", + "CreateMultipartUploadRequest$GrantRead": "Allows grantee to read the object data and its metadata.", + "PutBucketAclRequest$GrantRead": "Allows grantee to list the objects in the bucket.", + "PutObjectAclRequest$GrantRead": "Allows grantee to list the objects in the bucket.", + "PutObjectRequest$GrantRead": "Allows grantee to read the object data and its metadata." + } + }, + "GrantReadACP": { + "base": null, + "refs": { + "CopyObjectRequest$GrantReadACP": "Allows grantee to read the object ACL.", + "CreateBucketRequest$GrantReadACP": "Allows grantee to read the bucket ACL.", + "CreateMultipartUploadRequest$GrantReadACP": "Allows grantee to read the object ACL.", + "PutBucketAclRequest$GrantReadACP": "Allows grantee to read the bucket ACL.", + "PutObjectAclRequest$GrantReadACP": "Allows grantee to read the bucket ACL.", + "PutObjectRequest$GrantReadACP": "Allows grantee to read the object ACL." + } + }, + "GrantWrite": { + "base": null, + "refs": { + "CreateBucketRequest$GrantWrite": "Allows grantee to create, overwrite, and delete any object in the bucket.", + "PutBucketAclRequest$GrantWrite": "Allows grantee to create, overwrite, and delete any object in the bucket.", + "PutObjectAclRequest$GrantWrite": "Allows grantee to create, overwrite, and delete any object in the bucket." + } + }, + "GrantWriteACP": { + "base": null, + "refs": { + "CopyObjectRequest$GrantWriteACP": "Allows grantee to write the ACL for the applicable object.", + "CreateBucketRequest$GrantWriteACP": "Allows grantee to write the ACL for the applicable bucket.", + "CreateMultipartUploadRequest$GrantWriteACP": "Allows grantee to write the ACL for the applicable object.", + "PutBucketAclRequest$GrantWriteACP": "Allows grantee to write the ACL for the applicable bucket.", + "PutObjectAclRequest$GrantWriteACP": "Allows grantee to write the ACL for the applicable bucket.", + "PutObjectRequest$GrantWriteACP": "Allows grantee to write the ACL for the applicable object." + } + }, + "Grantee": { + "base": null, + "refs": { + "Grant$Grantee": null, + "TargetGrant$Grantee": null + } + }, + "Grants": { + "base": null, + "refs": { + "AccessControlPolicy$Grants": "A list of grants.", + "GetBucketAclOutput$Grants": "A list of grants.", + "GetObjectAclOutput$Grants": "A list of grants." + } + }, + "HeadBucketRequest": { + "base": null, + "refs": { + } + }, + "HeadObjectOutput": { + "base": null, + "refs": { + } + }, + "HeadObjectRequest": { + "base": null, + "refs": { + } + }, + "HostName": { + "base": null, + "refs": { + "Redirect$HostName": "The host name to use in the redirect request.", + "RedirectAllRequestsTo$HostName": "Name of the host where requests will be redirected." + } + }, + "HttpErrorCodeReturnedEquals": { + "base": null, + "refs": { + "Condition$HttpErrorCodeReturnedEquals": "The HTTP error code when the redirect is applied. In the event of an error, if the error code equals this value, then the specified redirect is applied. Required when parent element Condition is specified and sibling KeyPrefixEquals is not specified. If both are specified, then both must be true for the redirect to be applied." + } + }, + "HttpRedirectCode": { + "base": null, + "refs": { + "Redirect$HttpRedirectCode": "The HTTP redirect code to use on the response. Not required if one of the siblings is present." + } + }, + "ID": { + "base": null, + "refs": { + "Grantee$ID": "The canonical user ID of the grantee.", + "Initiator$ID": "If the principal is an AWS account, it provides the Canonical User ID. If the principal is an IAM User, it provides a user ARN value.", + "LifecycleRule$ID": "Unique identifier for the rule. The value cannot be longer than 255 characters.", + "Owner$ID": null, + "ReplicationRule$ID": "Unique identifier for the rule. The value cannot be longer than 255 characters.", + "Rule$ID": "Unique identifier for the rule. The value cannot be longer than 255 characters." + } + }, + "IfMatch": { + "base": null, + "refs": { + "GetObjectRequest$IfMatch": "Return the object only if its entity tag (ETag) is the same as the one specified, otherwise return a 412 (precondition failed).", + "HeadObjectRequest$IfMatch": "Return the object only if its entity tag (ETag) is the same as the one specified, otherwise return a 412 (precondition failed)." + } + }, + "IfModifiedSince": { + "base": null, + "refs": { + "GetObjectRequest$IfModifiedSince": "Return the object only if it has been modified since the specified time, otherwise return a 304 (not modified).", + "HeadObjectRequest$IfModifiedSince": "Return the object only if it has been modified since the specified time, otherwise return a 304 (not modified)." + } + }, + "IfNoneMatch": { + "base": null, + "refs": { + "GetObjectRequest$IfNoneMatch": "Return the object only if its entity tag (ETag) is different from the one specified, otherwise return a 304 (not modified).", + "HeadObjectRequest$IfNoneMatch": "Return the object only if its entity tag (ETag) is different from the one specified, otherwise return a 304 (not modified)." + } + }, + "IfUnmodifiedSince": { + "base": null, + "refs": { + "GetObjectRequest$IfUnmodifiedSince": "Return the object only if it has not been modified since the specified time, otherwise return a 412 (precondition failed).", + "HeadObjectRequest$IfUnmodifiedSince": "Return the object only if it has not been modified since the specified time, otherwise return a 412 (precondition failed)." + } + }, + "IndexDocument": { + "base": null, + "refs": { + "GetBucketWebsiteOutput$IndexDocument": null, + "WebsiteConfiguration$IndexDocument": null + } + }, + "Initiated": { + "base": null, + "refs": { + "MultipartUpload$Initiated": "Date and time at which the multipart upload was initiated." + } + }, + "Initiator": { + "base": null, + "refs": { + "ListPartsOutput$Initiator": "Identifies who initiated the multipart upload.", + "MultipartUpload$Initiator": "Identifies who initiated the multipart upload." + } + }, + "IsLatest": { + "base": null, + "refs": { + "DeleteMarkerEntry$IsLatest": "Specifies whether the object is (true) or is not (false) the latest version of an object.", + "ObjectVersion$IsLatest": "Specifies whether the object is (true) or is not (false) the latest version of an object." + } + }, + "IsTruncated": { + "base": null, + "refs": { + "ListMultipartUploadsOutput$IsTruncated": "Indicates whether the returned list of multipart uploads is truncated. A value of true indicates that the list was truncated. The list can be truncated if the number of multipart uploads exceeds the limit allowed or specified by max uploads.", + "ListObjectVersionsOutput$IsTruncated": "A flag that indicates whether or not Amazon S3 returned all of the results that satisfied the search criteria. If your results were truncated, you can make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker response parameters as a starting place in another request to return the rest of the results.", + "ListObjectsOutput$IsTruncated": "A flag that indicates whether or not Amazon S3 returned all of the results that satisfied the search criteria.", + "ListPartsOutput$IsTruncated": "Indicates whether the returned list of parts is truncated." + } + }, + "KeyMarker": { + "base": null, + "refs": { + "ListMultipartUploadsOutput$KeyMarker": "The key at or after which the listing began.", + "ListMultipartUploadsRequest$KeyMarker": "Together with upload-id-marker, this parameter specifies the multipart upload after which listing should begin.", + "ListObjectVersionsOutput$KeyMarker": "Marks the last Key returned in a truncated response.", + "ListObjectVersionsRequest$KeyMarker": "Specifies the key to start with when listing objects in a bucket." + } + }, + "KeyPrefixEquals": { + "base": null, + "refs": { + "Condition$KeyPrefixEquals": "The object key name prefix when the redirect is applied. For example, to redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. To redirect request for all pages with the prefix docs/, the key prefix will be /docs, which identifies all objects in the docs/ folder. Required when the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals is not specified. If both conditions are specified, both must be true for the redirect to be applied." + } + }, + "LambdaFunctionArn": { + "base": null, + "refs": { + "LambdaFunctionConfiguration$LambdaFunctionArn": "Lambda cloud function ARN that Amazon S3 can invoke when it detects events of the specified type." + } + }, + "LambdaFunctionConfiguration": { + "base": "Container for specifying the AWS Lambda notification configuration.", + "refs": { + "LambdaFunctionConfigurationList$member": null + } + }, + "LambdaFunctionConfigurationList": { + "base": null, + "refs": { + "NotificationConfiguration$LambdaFunctionConfigurations": null + } + }, + "LastModified": { + "base": null, + "refs": { + "CopyObjectResult$LastModified": null, + "CopyPartResult$LastModified": "Date and time at which the object was uploaded.", + "DeleteMarkerEntry$LastModified": "Date and time the object was last modified.", + "GetObjectOutput$LastModified": "Last modified date of the object", + "HeadObjectOutput$LastModified": "Last modified date of the object", + "Object$LastModified": null, + "ObjectVersion$LastModified": "Date and time the object was last modified.", + "Part$LastModified": "Date and time at which the part was uploaded." + } + }, + "LifecycleConfiguration": { + "base": null, + "refs": { + "PutBucketLifecycleRequest$LifecycleConfiguration": null + } + }, + "LifecycleExpiration": { + "base": null, + "refs": { + "LifecycleRule$Expiration": null, + "Rule$Expiration": null + } + }, + "LifecycleRule": { + "base": null, + "refs": { + "LifecycleRules$member": null + } + }, + "LifecycleRules": { + "base": null, + "refs": { + "BucketLifecycleConfiguration$Rules": null, + "GetBucketLifecycleConfigurationOutput$Rules": null + } + }, + "ListBucketsOutput": { + "base": null, + "refs": { + } + }, + "ListMultipartUploadsOutput": { + "base": null, + "refs": { + } + }, + "ListMultipartUploadsRequest": { + "base": null, + "refs": { + } + }, + "ListObjectVersionsOutput": { + "base": null, + "refs": { + } + }, + "ListObjectVersionsRequest": { + "base": null, + "refs": { + } + }, + "ListObjectsOutput": { + "base": null, + "refs": { + } + }, + "ListObjectsRequest": { + "base": null, + "refs": { + } + }, + "ListPartsOutput": { + "base": null, + "refs": { + } + }, + "ListPartsRequest": { + "base": null, + "refs": { + } + }, + "Location": { + "base": null, + "refs": { + "CompleteMultipartUploadOutput$Location": null, + "CreateBucketOutput$Location": null + } + }, + "LoggingEnabled": { + "base": null, + "refs": { + "BucketLoggingStatus$LoggingEnabled": null, + "GetBucketLoggingOutput$LoggingEnabled": null + } + }, + "MFA": { + "base": null, + "refs": { + "DeleteObjectRequest$MFA": "The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device.", + "DeleteObjectsRequest$MFA": "The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device.", + "PutBucketVersioningRequest$MFA": "The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device." + } + }, + "MFADelete": { + "base": null, + "refs": { + "VersioningConfiguration$MFADelete": "Specifies whether MFA delete is enabled in the bucket versioning configuration. This element is only returned if the bucket has been configured with MFA delete. If the bucket has never been so configured, this element is not returned." + } + }, + "MFADeleteStatus": { + "base": null, + "refs": { + "GetBucketVersioningOutput$MFADelete": "Specifies whether MFA delete is enabled in the bucket versioning configuration. This element is only returned if the bucket has been configured with MFA delete. If the bucket has never been so configured, this element is not returned." + } + }, + "Marker": { + "base": null, + "refs": { + "ListObjectsOutput$Marker": null, + "ListObjectsRequest$Marker": "Specifies the key to start with when listing objects in a bucket." + } + }, + "MaxAgeSeconds": { + "base": null, + "refs": { + "CORSRule$MaxAgeSeconds": "The time in seconds that your browser is to cache the preflight response for the specified resource." + } + }, + "MaxKeys": { + "base": null, + "refs": { + "ListObjectVersionsOutput$MaxKeys": null, + "ListObjectVersionsRequest$MaxKeys": "Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.", + "ListObjectsOutput$MaxKeys": null, + "ListObjectsRequest$MaxKeys": "Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more." + } + }, + "MaxParts": { + "base": null, + "refs": { + "ListPartsOutput$MaxParts": "Maximum number of parts that were allowed in the response.", + "ListPartsRequest$MaxParts": "Sets the maximum number of parts to return." + } + }, + "MaxUploads": { + "base": null, + "refs": { + "ListMultipartUploadsOutput$MaxUploads": "Maximum number of multipart uploads that could have been included in the response.", + "ListMultipartUploadsRequest$MaxUploads": "Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the response body. 1,000 is the maximum number of uploads that can be returned in a response." + } + }, + "Message": { + "base": null, + "refs": { + "Error$Message": null + } + }, + "Metadata": { + "base": null, + "refs": { + "CopyObjectRequest$Metadata": "A map of metadata to store with the object in S3.", + "CreateMultipartUploadRequest$Metadata": "A map of metadata to store with the object in S3.", + "GetObjectOutput$Metadata": "A map of metadata to store with the object in S3.", + "HeadObjectOutput$Metadata": "A map of metadata to store with the object in S3.", + "PutObjectRequest$Metadata": "A map of metadata to store with the object in S3." + } + }, + "MetadataDirective": { + "base": null, + "refs": { + "CopyObjectRequest$MetadataDirective": "Specifies whether the metadata is copied from the source object or replaced with metadata provided in the request." + } + }, + "MetadataKey": { + "base": null, + "refs": { + "Metadata$key": null + } + }, + "MetadataValue": { + "base": null, + "refs": { + "Metadata$value": null + } + }, + "MissingMeta": { + "base": null, + "refs": { + "GetObjectOutput$MissingMeta": "This is set to the number of metadata entries not returned in x-amz-meta headers. This can happen if you create metadata using an API like SOAP that supports more flexible metadata than the REST API. For example, using SOAP, you can create metadata whose values are not legal HTTP headers.", + "HeadObjectOutput$MissingMeta": "This is set to the number of metadata entries not returned in x-amz-meta headers. This can happen if you create metadata using an API like SOAP that supports more flexible metadata than the REST API. For example, using SOAP, you can create metadata whose values are not legal HTTP headers." + } + }, + "MultipartUpload": { + "base": null, + "refs": { + "MultipartUploadList$member": null + } + }, + "MultipartUploadId": { + "base": null, + "refs": { + "AbortMultipartUploadRequest$UploadId": null, + "CompleteMultipartUploadRequest$UploadId": null, + "CreateMultipartUploadOutput$UploadId": "ID for the initiated multipart upload.", + "ListPartsOutput$UploadId": "Upload ID identifying the multipart upload whose parts are being listed.", + "ListPartsRequest$UploadId": "Upload ID identifying the multipart upload whose parts are being listed.", + "MultipartUpload$UploadId": "Upload ID that identifies the multipart upload.", + "UploadPartCopyRequest$UploadId": "Upload ID identifying the multipart upload whose part is being copied.", + "UploadPartRequest$UploadId": "Upload ID identifying the multipart upload whose part is being uploaded." + } + }, + "MultipartUploadList": { + "base": null, + "refs": { + "ListMultipartUploadsOutput$Uploads": null + } + }, + "NextKeyMarker": { + "base": null, + "refs": { + "ListMultipartUploadsOutput$NextKeyMarker": "When a list is truncated, this element specifies the value that should be used for the key-marker request parameter in a subsequent request.", + "ListObjectVersionsOutput$NextKeyMarker": "Use this value for the key marker request parameter in a subsequent request." + } + }, + "NextMarker": { + "base": null, + "refs": { + "ListObjectsOutput$NextMarker": "When response is truncated (the IsTruncated element value in the response is true), you can use the key name in this field as marker in the subsequent request to get next set of objects. Amazon S3 lists objects in alphabetical order Note: This element is returned only if you have delimiter request parameter specified. If response does not include the NextMaker and it is truncated, you can use the value of the last Key in the response as the marker in the subsequent request to get the next set of object keys." + } + }, + "NextPartNumberMarker": { + "base": null, + "refs": { + "ListPartsOutput$NextPartNumberMarker": "When a list is truncated, this element specifies the last part in the list, as well as the value to use for the part-number-marker request parameter in a subsequent request." + } + }, + "NextUploadIdMarker": { + "base": null, + "refs": { + "ListMultipartUploadsOutput$NextUploadIdMarker": "When a list is truncated, this element specifies the value that should be used for the upload-id-marker request parameter in a subsequent request." + } + }, + "NextVersionIdMarker": { + "base": null, + "refs": { + "ListObjectVersionsOutput$NextVersionIdMarker": "Use this value for the next version id marker parameter in a subsequent request." + } + }, + "NoSuchBucket": { + "base": "The specified bucket does not exist.", + "refs": { + } + }, + "NoSuchKey": { + "base": "The specified key does not exist.", + "refs": { + } + }, + "NoSuchUpload": { + "base": "The specified multipart upload does not exist.", + "refs": { + } + }, + "NoncurrentVersionExpiration": { + "base": "Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 permanently deletes the noncurrent object versions. You set this lifecycle configuration action on a bucket that has versioning enabled (or suspended) to request that Amazon S3 delete noncurrent object versions at a specific period in the object's lifetime.", + "refs": { + "LifecycleRule$NoncurrentVersionExpiration": null, + "Rule$NoncurrentVersionExpiration": null + } + }, + "NoncurrentVersionTransition": { + "base": "Container for the transition rule that describes when noncurrent objects transition to the STANDARD_IA or GLACIER storage class. If your bucket is versioning-enabled (or versioning is suspended), you can set this action to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA or GLACIER storage class at a specific period in the object's lifetime.", + "refs": { + "NoncurrentVersionTransitionList$member": null, + "Rule$NoncurrentVersionTransition": null + } + }, + "NoncurrentVersionTransitionList": { + "base": null, + "refs": { + "LifecycleRule$NoncurrentVersionTransitions": null + } + }, + "NotificationConfiguration": { + "base": "Container for specifying the notification configuration of the bucket. If this element is empty, notifications are turned off on the bucket.", + "refs": { + "PutBucketNotificationConfigurationRequest$NotificationConfiguration": null + } + }, + "NotificationConfigurationDeprecated": { + "base": null, + "refs": { + "PutBucketNotificationRequest$NotificationConfiguration": null + } + }, + "NotificationConfigurationFilter": { + "base": "Container for object key name filtering rules. For information about key name filtering, go to Configuring Event Notifications in the Amazon Simple Storage Service Developer Guide.", + "refs": { + "LambdaFunctionConfiguration$Filter": null, + "QueueConfiguration$Filter": null, + "TopicConfiguration$Filter": null + } + }, + "NotificationId": { + "base": "Optional unique identifier for configurations in a notification configuration. If you don't provide one, Amazon S3 will assign an ID.", + "refs": { + "CloudFunctionConfiguration$Id": null, + "LambdaFunctionConfiguration$Id": null, + "QueueConfiguration$Id": null, + "QueueConfigurationDeprecated$Id": null, + "TopicConfiguration$Id": null, + "TopicConfigurationDeprecated$Id": null + } + }, + "Object": { + "base": null, + "refs": { + "ObjectList$member": null + } + }, + "ObjectAlreadyInActiveTierError": { + "base": "This operation is not allowed against this storage tier", + "refs": { + } + }, + "ObjectCannedACL": { + "base": null, + "refs": { + "CopyObjectRequest$ACL": "The canned ACL to apply to the object.", + "CreateMultipartUploadRequest$ACL": "The canned ACL to apply to the object.", + "PutObjectAclRequest$ACL": "The canned ACL to apply to the object.", + "PutObjectRequest$ACL": "The canned ACL to apply to the object." + } + }, + "ObjectIdentifier": { + "base": null, + "refs": { + "ObjectIdentifierList$member": null + } + }, + "ObjectIdentifierList": { + "base": null, + "refs": { + "Delete$Objects": null + } + }, + "ObjectKey": { + "base": null, + "refs": { + "AbortMultipartUploadRequest$Key": null, + "CompleteMultipartUploadOutput$Key": null, + "CompleteMultipartUploadRequest$Key": null, + "CopyObjectRequest$Key": null, + "CreateMultipartUploadOutput$Key": "Object key for which the multipart upload was initiated.", + "CreateMultipartUploadRequest$Key": null, + "DeleteMarkerEntry$Key": "The object key.", + "DeleteObjectRequest$Key": null, + "DeletedObject$Key": null, + "Error$Key": null, + "ErrorDocument$Key": "The object key name to use when a 4XX class error occurs.", + "GetObjectAclRequest$Key": null, + "GetObjectRequest$Key": null, + "GetObjectTorrentRequest$Key": null, + "HeadObjectRequest$Key": null, + "ListPartsOutput$Key": "Object key for which the multipart upload was initiated.", + "ListPartsRequest$Key": null, + "MultipartUpload$Key": "Key of the object for which the multipart upload was initiated.", + "Object$Key": null, + "ObjectIdentifier$Key": "Key name of the object to delete.", + "ObjectVersion$Key": "The object key.", + "PutObjectAclRequest$Key": null, + "PutObjectRequest$Key": null, + "RestoreObjectRequest$Key": null, + "Tag$Key": "Name of the tag.", + "UploadPartCopyRequest$Key": null, + "UploadPartRequest$Key": null + } + }, + "ObjectList": { + "base": null, + "refs": { + "ListObjectsOutput$Contents": null + } + }, + "ObjectNotInActiveTierError": { + "base": "The source object of the COPY operation is not in the active tier and is only stored in Amazon Glacier.", + "refs": { + } + }, + "ObjectStorageClass": { + "base": null, + "refs": { + "Object$StorageClass": "The class of storage used to store the object." + } + }, + "ObjectVersion": { + "base": null, + "refs": { + "ObjectVersionList$member": null + } + }, + "ObjectVersionId": { + "base": null, + "refs": { + "CompleteMultipartUploadOutput$VersionId": "Version of the object.", + "CopyObjectOutput$VersionId": "Version ID of the newly created copy.", + "DeleteMarkerEntry$VersionId": "Version ID of an object.", + "DeleteObjectOutput$VersionId": "Returns the version ID of the delete marker created as a result of the DELETE operation.", + "DeleteObjectRequest$VersionId": "VersionId used to reference a specific version of the object.", + "DeletedObject$VersionId": null, + "Error$VersionId": null, + "GetObjectAclRequest$VersionId": "VersionId used to reference a specific version of the object.", + "GetObjectOutput$VersionId": "Version of the object.", + "GetObjectRequest$VersionId": "VersionId used to reference a specific version of the object.", + "HeadObjectOutput$VersionId": "Version of the object.", + "HeadObjectRequest$VersionId": "VersionId used to reference a specific version of the object.", + "ObjectIdentifier$VersionId": "VersionId for the specific version of the object to delete.", + "ObjectVersion$VersionId": "Version ID of an object.", + "PutObjectOutput$VersionId": "Version of the object.", + "RestoreObjectRequest$VersionId": null + } + }, + "ObjectVersionList": { + "base": null, + "refs": { + "ListObjectVersionsOutput$Versions": null + } + }, + "ObjectVersionStorageClass": { + "base": null, + "refs": { + "ObjectVersion$StorageClass": "The class of storage used to store the object." + } + }, + "Owner": { + "base": null, + "refs": { + "AccessControlPolicy$Owner": null, + "DeleteMarkerEntry$Owner": null, + "GetBucketAclOutput$Owner": null, + "GetObjectAclOutput$Owner": null, + "ListBucketsOutput$Owner": null, + "ListPartsOutput$Owner": null, + "MultipartUpload$Owner": null, + "Object$Owner": null, + "ObjectVersion$Owner": null + } + }, + "Part": { + "base": null, + "refs": { + "Parts$member": null + } + }, + "PartNumber": { + "base": null, + "refs": { + "CompletedPart$PartNumber": "Part number that identifies the part. This is a positive integer between 1 and 10,000.", + "Part$PartNumber": "Part number identifying the part. This is a positive integer between 1 and 10,000.", + "UploadPartCopyRequest$PartNumber": "Part number of part being copied. This is a positive integer between 1 and 10,000.", + "UploadPartRequest$PartNumber": "Part number of part being uploaded. This is a positive integer between 1 and 10,000." + } + }, + "PartNumberMarker": { + "base": null, + "refs": { + "ListPartsOutput$PartNumberMarker": "Part number after which listing begins.", + "ListPartsRequest$PartNumberMarker": "Specifies the part after which listing should begin. Only parts with higher part numbers will be listed." + } + }, + "Parts": { + "base": null, + "refs": { + "ListPartsOutput$Parts": null + } + }, + "Payer": { + "base": null, + "refs": { + "GetBucketRequestPaymentOutput$Payer": "Specifies who pays for the download and request fees.", + "RequestPaymentConfiguration$Payer": "Specifies who pays for the download and request fees." + } + }, + "Permission": { + "base": null, + "refs": { + "Grant$Permission": "Specifies the permission given to the grantee." + } + }, + "Policy": { + "base": null, + "refs": { + "GetBucketPolicyOutput$Policy": "The bucket policy as a JSON document.", + "PutBucketPolicyRequest$Policy": "The bucket policy as a JSON document." + } + }, + "Prefix": { + "base": null, + "refs": { + "CommonPrefix$Prefix": null, + "LifecycleRule$Prefix": "Prefix identifying one or more objects to which the rule applies.", + "ListMultipartUploadsOutput$Prefix": "When a prefix is provided in the request, this field contains the specified prefix. The result contains only keys starting with the specified prefix.", + "ListMultipartUploadsRequest$Prefix": "Lists in-progress uploads only for those keys that begin with the specified prefix.", + "ListObjectVersionsOutput$Prefix": null, + "ListObjectVersionsRequest$Prefix": "Limits the response to keys that begin with the specified prefix.", + "ListObjectsOutput$Prefix": null, + "ListObjectsRequest$Prefix": "Limits the response to keys that begin with the specified prefix.", + "ReplicationRule$Prefix": "Object keyname prefix identifying one or more objects to which the rule applies. Maximum prefix length can be up to 1,024 characters. Overlapping prefixes are not supported.", + "Rule$Prefix": "Prefix identifying one or more objects to which the rule applies." + } + }, + "Protocol": { + "base": null, + "refs": { + "Redirect$Protocol": "Protocol to use (http, https) when redirecting requests. The default is the protocol that is used in the original request.", + "RedirectAllRequestsTo$Protocol": "Protocol to use (http, https) when redirecting requests. The default is the protocol that is used in the original request." + } + }, + "PutBucketAclRequest": { + "base": null, + "refs": { + } + }, + "PutBucketCorsRequest": { + "base": null, + "refs": { + } + }, + "PutBucketLifecycleConfigurationRequest": { + "base": null, + "refs": { + } + }, + "PutBucketLifecycleRequest": { + "base": null, + "refs": { + } + }, + "PutBucketLoggingRequest": { + "base": null, + "refs": { + } + }, + "PutBucketNotificationConfigurationRequest": { + "base": null, + "refs": { + } + }, + "PutBucketNotificationRequest": { + "base": null, + "refs": { + } + }, + "PutBucketPolicyRequest": { + "base": null, + "refs": { + } + }, + "PutBucketReplicationRequest": { + "base": null, + "refs": { + } + }, + "PutBucketRequestPaymentRequest": { + "base": null, + "refs": { + } + }, + "PutBucketTaggingRequest": { + "base": null, + "refs": { + } + }, + "PutBucketVersioningRequest": { + "base": null, + "refs": { + } + }, + "PutBucketWebsiteRequest": { + "base": null, + "refs": { + } + }, + "PutObjectAclOutput": { + "base": null, + "refs": { + } + }, + "PutObjectAclRequest": { + "base": null, + "refs": { + } + }, + "PutObjectOutput": { + "base": null, + "refs": { + } + }, + "PutObjectRequest": { + "base": null, + "refs": { + } + }, + "QueueArn": { + "base": null, + "refs": { + "QueueConfiguration$QueueArn": "Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects events of specified type.", + "QueueConfigurationDeprecated$Queue": null + } + }, + "QueueConfiguration": { + "base": "Container for specifying an configuration when you want Amazon S3 to publish events to an Amazon Simple Queue Service (Amazon SQS) queue.", + "refs": { + "QueueConfigurationList$member": null + } + }, + "QueueConfigurationDeprecated": { + "base": null, + "refs": { + "NotificationConfigurationDeprecated$QueueConfiguration": null + } + }, + "QueueConfigurationList": { + "base": null, + "refs": { + "NotificationConfiguration$QueueConfigurations": null + } + }, + "Quiet": { + "base": null, + "refs": { + "Delete$Quiet": "Element to enable quiet mode for the request. When you add this element, you must set its value to true." + } + }, + "Range": { + "base": null, + "refs": { + "GetObjectRequest$Range": "Downloads the specified range bytes of an object. For more information about the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.", + "HeadObjectRequest$Range": "Downloads the specified range bytes of an object. For more information about the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35." + } + }, + "Redirect": { + "base": null, + "refs": { + "RoutingRule$Redirect": "Container for redirect information. You can redirect requests to another host, to another page, or with another protocol. In the event of an error, you can can specify a different error code to return." + } + }, + "RedirectAllRequestsTo": { + "base": null, + "refs": { + "GetBucketWebsiteOutput$RedirectAllRequestsTo": null, + "WebsiteConfiguration$RedirectAllRequestsTo": null + } + }, + "ReplaceKeyPrefixWith": { + "base": null, + "refs": { + "Redirect$ReplaceKeyPrefixWith": "The object key prefix to use in the redirect request. For example, to redirect requests for all pages with prefix docs/ (objects in the docs/ folder) to documents/, you can set a condition block with KeyPrefixEquals set to docs/ and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required if one of the siblings is present. Can be present only if ReplaceKeyWith is not provided." + } + }, + "ReplaceKeyWith": { + "base": null, + "refs": { + "Redirect$ReplaceKeyWith": "The specific object key to use in the redirect request. For example, redirect request to error.html. Not required if one of the sibling is present. Can be present only if ReplaceKeyPrefixWith is not provided." + } + }, + "ReplicationConfiguration": { + "base": "Container for replication rules. You can add as many as 1,000 rules. Total replication configuration size can be up to 2 MB.", + "refs": { + "GetBucketReplicationOutput$ReplicationConfiguration": null, + "PutBucketReplicationRequest$ReplicationConfiguration": null + } + }, + "ReplicationRule": { + "base": null, + "refs": { + "ReplicationRules$member": null + } + }, + "ReplicationRuleStatus": { + "base": null, + "refs": { + "ReplicationRule$Status": "The rule is ignored if status is not Enabled." + } + }, + "ReplicationRules": { + "base": null, + "refs": { + "ReplicationConfiguration$Rules": "Container for information about a particular replication rule. Replication configuration must have at least one rule and can contain up to 1,000 rules." + } + }, + "ReplicationStatus": { + "base": null, + "refs": { + "GetObjectOutput$ReplicationStatus": null, + "HeadObjectOutput$ReplicationStatus": null + } + }, + "RequestCharged": { + "base": "If present, indicates that the requester was successfully charged for the request.", + "refs": { + "AbortMultipartUploadOutput$RequestCharged": null, + "CompleteMultipartUploadOutput$RequestCharged": null, + "CopyObjectOutput$RequestCharged": null, + "CreateMultipartUploadOutput$RequestCharged": null, + "DeleteObjectOutput$RequestCharged": null, + "DeleteObjectsOutput$RequestCharged": null, + "GetObjectAclOutput$RequestCharged": null, + "GetObjectOutput$RequestCharged": null, + "GetObjectTorrentOutput$RequestCharged": null, + "HeadObjectOutput$RequestCharged": null, + "ListPartsOutput$RequestCharged": null, + "PutObjectAclOutput$RequestCharged": null, + "PutObjectOutput$RequestCharged": null, + "RestoreObjectOutput$RequestCharged": null, + "UploadPartCopyOutput$RequestCharged": null, + "UploadPartOutput$RequestCharged": null + } + }, + "RequestPayer": { + "base": "Confirms that the requester knows that she or he will be charged for the request. Bucket owners need not specify this parameter in their requests. Documentation on downloading objects from requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html", + "refs": { + "AbortMultipartUploadRequest$RequestPayer": null, + "CompleteMultipartUploadRequest$RequestPayer": null, + "CopyObjectRequest$RequestPayer": null, + "CreateMultipartUploadRequest$RequestPayer": null, + "DeleteObjectRequest$RequestPayer": null, + "DeleteObjectsRequest$RequestPayer": null, + "GetObjectAclRequest$RequestPayer": null, + "GetObjectRequest$RequestPayer": null, + "GetObjectTorrentRequest$RequestPayer": null, + "HeadObjectRequest$RequestPayer": null, + "ListPartsRequest$RequestPayer": null, + "PutObjectAclRequest$RequestPayer": null, + "PutObjectRequest$RequestPayer": null, + "RestoreObjectRequest$RequestPayer": null, + "UploadPartCopyRequest$RequestPayer": null, + "UploadPartRequest$RequestPayer": null + } + }, + "RequestPaymentConfiguration": { + "base": null, + "refs": { + "PutBucketRequestPaymentRequest$RequestPaymentConfiguration": null + } + }, + "ResponseCacheControl": { + "base": null, + "refs": { + "GetObjectRequest$ResponseCacheControl": "Sets the Cache-Control header of the response." + } + }, + "ResponseContentDisposition": { + "base": null, + "refs": { + "GetObjectRequest$ResponseContentDisposition": "Sets the Content-Disposition header of the response" + } + }, + "ResponseContentEncoding": { + "base": null, + "refs": { + "GetObjectRequest$ResponseContentEncoding": "Sets the Content-Encoding header of the response." + } + }, + "ResponseContentLanguage": { + "base": null, + "refs": { + "GetObjectRequest$ResponseContentLanguage": "Sets the Content-Language header of the response." + } + }, + "ResponseContentType": { + "base": null, + "refs": { + "GetObjectRequest$ResponseContentType": "Sets the Content-Type header of the response." + } + }, + "ResponseExpires": { + "base": null, + "refs": { + "GetObjectRequest$ResponseExpires": "Sets the Expires header of the response." + } + }, + "Restore": { + "base": null, + "refs": { + "GetObjectOutput$Restore": "Provides information about object restoration operation and expiration time of the restored object copy.", + "HeadObjectOutput$Restore": "Provides information about object restoration operation and expiration time of the restored object copy." + } + }, + "RestoreObjectOutput": { + "base": null, + "refs": { + } + }, + "RestoreObjectRequest": { + "base": null, + "refs": { + } + }, + "RestoreRequest": { + "base": null, + "refs": { + "RestoreObjectRequest$RestoreRequest": null + } + }, + "Role": { + "base": null, + "refs": { + "ReplicationConfiguration$Role": "Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating the objects." + } + }, + "RoutingRule": { + "base": null, + "refs": { + "RoutingRules$member": null + } + }, + "RoutingRules": { + "base": null, + "refs": { + "GetBucketWebsiteOutput$RoutingRules": null, + "WebsiteConfiguration$RoutingRules": null + } + }, + "Rule": { + "base": null, + "refs": { + "Rules$member": null + } + }, + "Rules": { + "base": null, + "refs": { + "GetBucketLifecycleOutput$Rules": null, + "LifecycleConfiguration$Rules": null + } + }, + "S3KeyFilter": { + "base": "Container for object key name prefix and suffix filtering rules.", + "refs": { + "NotificationConfigurationFilter$Key": null + } + }, + "SSECustomerAlgorithm": { + "base": null, + "refs": { + "CopyObjectOutput$SSECustomerAlgorithm": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.", + "CopyObjectRequest$SSECustomerAlgorithm": "Specifies the algorithm to use to when encrypting the object (e.g., AES256).", + "CreateMultipartUploadOutput$SSECustomerAlgorithm": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.", + "CreateMultipartUploadRequest$SSECustomerAlgorithm": "Specifies the algorithm to use to when encrypting the object (e.g., AES256).", + "GetObjectOutput$SSECustomerAlgorithm": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.", + "GetObjectRequest$SSECustomerAlgorithm": "Specifies the algorithm to use to when encrypting the object (e.g., AES256).", + "HeadObjectOutput$SSECustomerAlgorithm": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.", + "HeadObjectRequest$SSECustomerAlgorithm": "Specifies the algorithm to use to when encrypting the object (e.g., AES256).", + "PutObjectOutput$SSECustomerAlgorithm": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.", + "PutObjectRequest$SSECustomerAlgorithm": "Specifies the algorithm to use to when encrypting the object (e.g., AES256).", + "UploadPartCopyOutput$SSECustomerAlgorithm": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.", + "UploadPartCopyRequest$SSECustomerAlgorithm": "Specifies the algorithm to use to when encrypting the object (e.g., AES256).", + "UploadPartOutput$SSECustomerAlgorithm": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.", + "UploadPartRequest$SSECustomerAlgorithm": "Specifies the algorithm to use to when encrypting the object (e.g., AES256)." + } + }, + "SSECustomerKey": { + "base": null, + "refs": { + "CopyObjectRequest$SSECustomerKey": "Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.", + "CreateMultipartUploadRequest$SSECustomerKey": "Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.", + "GetObjectRequest$SSECustomerKey": "Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.", + "HeadObjectRequest$SSECustomerKey": "Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.", + "PutObjectRequest$SSECustomerKey": "Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.", + "UploadPartCopyRequest$SSECustomerKey": "Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header. This must be the same encryption key specified in the initiate multipart upload request.", + "UploadPartRequest$SSECustomerKey": "Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header. This must be the same encryption key specified in the initiate multipart upload request." + } + }, + "SSECustomerKeyMD5": { + "base": null, + "refs": { + "CopyObjectOutput$SSECustomerKeyMD5": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.", + "CopyObjectRequest$SSECustomerKeyMD5": "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.", + "CreateMultipartUploadOutput$SSECustomerKeyMD5": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.", + "CreateMultipartUploadRequest$SSECustomerKeyMD5": "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.", + "GetObjectOutput$SSECustomerKeyMD5": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.", + "GetObjectRequest$SSECustomerKeyMD5": "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.", + "HeadObjectOutput$SSECustomerKeyMD5": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.", + "HeadObjectRequest$SSECustomerKeyMD5": "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.", + "PutObjectOutput$SSECustomerKeyMD5": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.", + "PutObjectRequest$SSECustomerKeyMD5": "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.", + "UploadPartCopyOutput$SSECustomerKeyMD5": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.", + "UploadPartCopyRequest$SSECustomerKeyMD5": "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.", + "UploadPartOutput$SSECustomerKeyMD5": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.", + "UploadPartRequest$SSECustomerKeyMD5": "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error." + } + }, + "SSEKMSKeyId": { + "base": null, + "refs": { + "CompleteMultipartUploadOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.", + "CopyObjectOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.", + "CopyObjectRequest$SSEKMSKeyId": "Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version", + "CreateMultipartUploadOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.", + "CreateMultipartUploadRequest$SSEKMSKeyId": "Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version", + "GetObjectOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.", + "HeadObjectOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.", + "PutObjectOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.", + "PutObjectRequest$SSEKMSKeyId": "Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version", + "UploadPartCopyOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.", + "UploadPartOutput$SSEKMSKeyId": "If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object." + } + }, + "ServerSideEncryption": { + "base": null, + "refs": { + "CompleteMultipartUploadOutput$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "CopyObjectOutput$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "CopyObjectRequest$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "CreateMultipartUploadOutput$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "CreateMultipartUploadRequest$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "GetObjectOutput$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "HeadObjectOutput$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "PutObjectOutput$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "PutObjectRequest$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "UploadPartCopyOutput$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).", + "UploadPartOutput$ServerSideEncryption": "The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms)." + } + }, + "Size": { + "base": null, + "refs": { + "Object$Size": null, + "ObjectVersion$Size": "Size in bytes of the object.", + "Part$Size": "Size of the uploaded part data." + } + }, + "StorageClass": { + "base": null, + "refs": { + "CopyObjectRequest$StorageClass": "The type of storage to use for the object. Defaults to 'STANDARD'.", + "CreateMultipartUploadRequest$StorageClass": "The type of storage to use for the object. Defaults to 'STANDARD'.", + "Destination$StorageClass": "The class of storage used to store the object.", + "GetObjectOutput$StorageClass": null, + "HeadObjectOutput$StorageClass": null, + "ListPartsOutput$StorageClass": "The class of storage used to store the object.", + "MultipartUpload$StorageClass": "The class of storage used to store the object.", + "PutObjectRequest$StorageClass": "The type of storage to use for the object. Defaults to 'STANDARD'." + } + }, + "Suffix": { + "base": null, + "refs": { + "IndexDocument$Suffix": "A suffix that is appended to a request that is for a directory on the website endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html) The suffix must not be empty and must not include a slash character." + } + }, + "Tag": { + "base": null, + "refs": { + "TagSet$member": null + } + }, + "TagSet": { + "base": null, + "refs": { + "GetBucketTaggingOutput$TagSet": null, + "Tagging$TagSet": null + } + }, + "Tagging": { + "base": null, + "refs": { + "PutBucketTaggingRequest$Tagging": null + } + }, + "TargetBucket": { + "base": null, + "refs": { + "LoggingEnabled$TargetBucket": "Specifies the bucket where you want Amazon S3 to store server access logs. You can have your logs delivered to any bucket that you own, including the same bucket that is being logged. You can also configure multiple buckets to deliver their logs to the same target bucket. In this case you should choose a different TargetPrefix for each source bucket so that the delivered log files can be distinguished by key." + } + }, + "TargetGrant": { + "base": null, + "refs": { + "TargetGrants$member": null + } + }, + "TargetGrants": { + "base": null, + "refs": { + "LoggingEnabled$TargetGrants": null + } + }, + "TargetPrefix": { + "base": null, + "refs": { + "LoggingEnabled$TargetPrefix": "This element lets you specify a prefix for the keys that the log files will be stored under." + } + }, + "TopicArn": { + "base": null, + "refs": { + "TopicConfiguration$TopicArn": "Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects events of specified type.", + "TopicConfigurationDeprecated$Topic": "Amazon SNS topic to which Amazon S3 will publish a message to report the specified events for the bucket." + } + }, + "TopicConfiguration": { + "base": "Container for specifying the configuration when you want Amazon S3 to publish events to an Amazon Simple Notification Service (Amazon SNS) topic.", + "refs": { + "TopicConfigurationList$member": null + } + }, + "TopicConfigurationDeprecated": { + "base": null, + "refs": { + "NotificationConfigurationDeprecated$TopicConfiguration": null + } + }, + "TopicConfigurationList": { + "base": null, + "refs": { + "NotificationConfiguration$TopicConfigurations": null + } + }, + "Transition": { + "base": null, + "refs": { + "Rule$Transition": null, + "TransitionList$member": null + } + }, + "TransitionList": { + "base": null, + "refs": { + "LifecycleRule$Transitions": null + } + }, + "TransitionStorageClass": { + "base": null, + "refs": { + "NoncurrentVersionTransition$StorageClass": "The class of storage used to store the object.", + "Transition$StorageClass": "The class of storage used to store the object." + } + }, + "Type": { + "base": null, + "refs": { + "Grantee$Type": "Type of grantee" + } + }, + "URI": { + "base": null, + "refs": { + "Grantee$URI": "URI of the grantee group." + } + }, + "UploadIdMarker": { + "base": null, + "refs": { + "ListMultipartUploadsOutput$UploadIdMarker": "Upload ID after which listing began.", + "ListMultipartUploadsRequest$UploadIdMarker": "Together with key-marker, specifies the multipart upload after which listing should begin. If key-marker is not specified, the upload-id-marker parameter is ignored." + } + }, + "UploadPartCopyOutput": { + "base": null, + "refs": { + } + }, + "UploadPartCopyRequest": { + "base": null, + "refs": { + } + }, + "UploadPartOutput": { + "base": null, + "refs": { + } + }, + "UploadPartRequest": { + "base": null, + "refs": { + } + }, + "Value": { + "base": null, + "refs": { + "Tag$Value": "Value of the tag." + } + }, + "VersionIdMarker": { + "base": null, + "refs": { + "ListObjectVersionsOutput$VersionIdMarker": null, + "ListObjectVersionsRequest$VersionIdMarker": "Specifies the object version you want to start listing from." + } + }, + "VersioningConfiguration": { + "base": null, + "refs": { + "PutBucketVersioningRequest$VersioningConfiguration": null + } + }, + "WebsiteConfiguration": { + "base": null, + "refs": { + "PutBucketWebsiteRequest$WebsiteConfiguration": null + } + }, + "WebsiteRedirectLocation": { + "base": null, + "refs": { + "CopyObjectRequest$WebsiteRedirectLocation": "If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.", + "CreateMultipartUploadRequest$WebsiteRedirectLocation": "If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.", + "GetObjectOutput$WebsiteRedirectLocation": "If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.", + "HeadObjectOutput$WebsiteRedirectLocation": "If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.", + "PutObjectRequest$WebsiteRedirectLocation": "If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata." + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,57 @@ +{ + "pagination": { + "ListBuckets": { + "result_key": "Buckets" + }, + "ListMultipartUploads": { + "limit_key": "MaxUploads", + "more_results": "IsTruncated", + "output_token": [ + "NextKeyMarker", + "NextUploadIdMarker" + ], + "input_token": [ + "KeyMarker", + "UploadIdMarker" + ], + "result_key": [ + "Uploads", + "CommonPrefixes" + ] + }, + "ListObjectVersions": { + "more_results": "IsTruncated", + "limit_key": "MaxKeys", + "output_token": [ + "NextKeyMarker", + "NextVersionIdMarker" + ], + "input_token": [ + "KeyMarker", + "VersionIdMarker" + ], + "result_key": [ + "Versions", + "DeleteMarkers", + "CommonPrefixes" + ] + }, + "ListObjects": { + "more_results": "IsTruncated", + "limit_key": "MaxKeys", + "output_token": "NextMarker || Contents[-1].Key", + "input_token": "Marker", + "result_key": [ + "Contents", + "CommonPrefixes" + ] + }, + "ListParts": { + "more_results": "IsTruncated", + "limit_key": "MaxParts", + "output_token": "NextPartNumberMarker", + "input_token": "PartNumberMarker", + "result_key": "Parts" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/waiters-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/waiters-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/waiters-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/s3/2006-03-01/waiters-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,68 @@ +{ + "version": 2, + "waiters": { + "BucketExists": { + "delay": 5, + "operation": "HeadBucket", + "maxAttempts": 20, + "acceptors": [ + { + "expected": 200, + "matcher": "status", + "state": "success" + }, + { + "expected": 403, + "matcher": "status", + "state": "success" + }, + { + "expected": 404, + "matcher": "status", + "state": "retry" + } + ] + }, + "BucketNotExists": { + "delay": 5, + "operation": "HeadBucket", + "maxAttempts": 20, + "acceptors": [ + { + "expected": 404, + "matcher": "status", + "state": "success" + } + ] + }, + "ObjectExists": { + "delay": 5, + "operation": "HeadObject", + "maxAttempts": 20, + "acceptors": [ + { + "expected": 200, + "matcher": "status", + "state": "success" + }, + { + "expected": 404, + "matcher": "status", + "state": "retry" + } + ] + }, + "ObjectNotExists": { + "delay": 5, + "operation": "HeadObject", + "maxAttempts": 20, + "acceptors": [ + { + "expected": 404, + "matcher": "status", + "state": "success" + } + ] + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sdb/2009-04-15/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sdb/2009-04-15/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sdb/2009-04-15/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sdb/2009-04-15/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,971 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2009-04-15", + "endpointPrefix":"sdb", + "serviceFullName":"Amazon SimpleDB", + "signatureVersion":"v2", + "xmlNamespace":"http://sdb.amazonaws.com/doc/2009-04-15/", + "protocol":"query" + }, + "operations":{ + "BatchDeleteAttributes":{ + "name":"BatchDeleteAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchDeleteAttributesRequest"} + }, + "BatchPutAttributes":{ + "name":"BatchPutAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchPutAttributesRequest"}, + "errors":[ + { + "shape":"DuplicateItemName", + "error":{ + "code":"DuplicateItemName", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValue", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"MissingParameter", + "error":{ + "code":"MissingParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NoSuchDomain", + "error":{ + "code":"NoSuchDomain", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NumberItemAttributesExceeded", + "error":{ + "code":"NumberItemAttributesExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NumberDomainAttributesExceeded", + "error":{ + "code":"NumberDomainAttributesExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NumberDomainBytesExceeded", + "error":{ + "code":"NumberDomainBytesExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NumberSubmittedItemsExceeded", + "error":{ + "code":"NumberSubmittedItemsExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NumberSubmittedAttributesExceeded", + "error":{ + "code":"NumberSubmittedAttributesExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateDomain":{ + "name":"CreateDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDomainRequest"}, + "errors":[ + { + "shape":"InvalidParameterValue", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"MissingParameter", + "error":{ + "code":"MissingParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NumberDomainsExceeded", + "error":{ + "code":"NumberDomainsExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteAttributes":{ + "name":"DeleteAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAttributesRequest"}, + "errors":[ + { + "shape":"InvalidParameterValue", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"MissingParameter", + "error":{ + "code":"MissingParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NoSuchDomain", + "error":{ + "code":"NoSuchDomain", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"AttributeDoesNotExist", + "error":{ + "code":"AttributeDoesNotExist", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteDomain":{ + "name":"DeleteDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDomainRequest"}, + "errors":[ + { + "shape":"MissingParameter", + "error":{ + "code":"MissingParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DomainMetadata":{ + "name":"DomainMetadata", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DomainMetadataRequest"}, + "output":{ + "shape":"DomainMetadataResult", + "resultWrapper":"DomainMetadataResult" + }, + "errors":[ + { + "shape":"MissingParameter", + "error":{ + "code":"MissingParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NoSuchDomain", + "error":{ + "code":"NoSuchDomain", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "GetAttributes":{ + "name":"GetAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAttributesRequest"}, + "output":{ + "shape":"GetAttributesResult", + "resultWrapper":"GetAttributesResult" + }, + "errors":[ + { + "shape":"InvalidParameterValue", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"MissingParameter", + "error":{ + "code":"MissingParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NoSuchDomain", + "error":{ + "code":"NoSuchDomain", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "ListDomains":{ + "name":"ListDomains", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDomainsRequest"}, + "output":{ + "shape":"ListDomainsResult", + "resultWrapper":"ListDomainsResult" + }, + "errors":[ + { + "shape":"InvalidParameterValue", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidNextToken", + "error":{ + "code":"InvalidNextToken", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "PutAttributes":{ + "name":"PutAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutAttributesRequest"}, + "errors":[ + { + "shape":"InvalidParameterValue", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"MissingParameter", + "error":{ + "code":"MissingParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NoSuchDomain", + "error":{ + "code":"NoSuchDomain", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NumberDomainAttributesExceeded", + "error":{ + "code":"NumberDomainAttributesExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NumberDomainBytesExceeded", + "error":{ + "code":"NumberDomainBytesExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NumberItemAttributesExceeded", + "error":{ + "code":"NumberItemAttributesExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + { + "shape":"AttributeDoesNotExist", + "error":{ + "code":"AttributeDoesNotExist", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + }, + "Select":{ + "name":"Select", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SelectRequest"}, + "output":{ + "shape":"SelectResult", + "resultWrapper":"SelectResult" + }, + "errors":[ + { + "shape":"InvalidParameterValue", + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidNextToken", + "error":{ + "code":"InvalidNextToken", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidNumberPredicates", + "error":{ + "code":"InvalidNumberPredicates", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidNumberValueTests", + "error":{ + "code":"InvalidNumberValueTests", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidQueryExpression", + "error":{ + "code":"InvalidQueryExpression", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"MissingParameter", + "error":{ + "code":"MissingParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NoSuchDomain", + "error":{ + "code":"NoSuchDomain", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"RequestTimeout", + "error":{ + "code":"RequestTimeout", + "httpStatusCode":408, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TooManyRequestedAttributes", + "error":{ + "code":"TooManyRequestedAttributes", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + } + }, + "shapes":{ + "Attribute":{ + "type":"structure", + "required":[ + "Name", + "Value" + ], + "members":{ + "Name":{"shape":"String"}, + "AlternateNameEncoding":{"shape":"String"}, + "Value":{"shape":"String"}, + "AlternateValueEncoding":{"shape":"String"} + } + }, + "AttributeDoesNotExist":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"AttributeDoesNotExist", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "AttributeList":{ + "type":"list", + "member":{ + "shape":"Attribute", + "locationName":"Attribute" + }, + "flattened":true + }, + "AttributeNameList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"AttributeName" + }, + "flattened":true + }, + "BatchDeleteAttributesRequest":{ + "type":"structure", + "required":[ + "DomainName", + "Items" + ], + "members":{ + "DomainName":{"shape":"String"}, + "Items":{"shape":"DeletableItemList"} + } + }, + "BatchPutAttributesRequest":{ + "type":"structure", + "required":[ + "DomainName", + "Items" + ], + "members":{ + "DomainName":{"shape":"String"}, + "Items":{"shape":"ReplaceableItemList"} + } + }, + "Boolean":{"type":"boolean"}, + "CreateDomainRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"String"} + } + }, + "DeletableAttribute":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"String"}, + "Value":{"shape":"String"} + } + }, + "DeletableAttributeList":{ + "type":"list", + "member":{ + "shape":"DeletableAttribute", + "locationName":"Attribute" + }, + "flattened":true + }, + "DeletableItem":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"String", + "locationName":"ItemName" + }, + "Attributes":{"shape":"DeletableAttributeList"} + } + }, + "DeletableItemList":{ + "type":"list", + "member":{ + "shape":"DeletableItem", + "locationName":"Item" + }, + "flattened":true + }, + "DeleteAttributesRequest":{ + "type":"structure", + "required":[ + "DomainName", + "ItemName" + ], + "members":{ + "DomainName":{"shape":"String"}, + "ItemName":{"shape":"String"}, + "Attributes":{"shape":"DeletableAttributeList"}, + "Expected":{"shape":"UpdateCondition"} + } + }, + "DeleteDomainRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"String"} + } + }, + "DomainMetadataRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"String"} + } + }, + "DomainMetadataResult":{ + "type":"structure", + "members":{ + "ItemCount":{"shape":"Integer"}, + "ItemNamesSizeBytes":{"shape":"Long"}, + "AttributeNameCount":{"shape":"Integer"}, + "AttributeNamesSizeBytes":{"shape":"Long"}, + "AttributeValueCount":{"shape":"Integer"}, + "AttributeValuesSizeBytes":{"shape":"Long"}, + "Timestamp":{"shape":"Integer"} + } + }, + "DomainNameList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"DomainName" + }, + "flattened":true + }, + "DuplicateItemName":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"DuplicateItemName", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Float":{"type":"float"}, + "GetAttributesRequest":{ + "type":"structure", + "required":[ + "DomainName", + "ItemName" + ], + "members":{ + "DomainName":{"shape":"String"}, + "ItemName":{"shape":"String"}, + "AttributeNames":{"shape":"AttributeNameList"}, + "ConsistentRead":{"shape":"Boolean"} + } + }, + "GetAttributesResult":{ + "type":"structure", + "members":{ + "Attributes":{"shape":"AttributeList"} + } + }, + "Integer":{"type":"integer"}, + "InvalidNextToken":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"InvalidNextToken", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidNumberPredicates":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"InvalidNumberPredicates", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidNumberValueTests":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"InvalidNumberValueTests", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidParameterValue":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"InvalidParameterValue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidQueryExpression":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"InvalidQueryExpression", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Item":{ + "type":"structure", + "required":[ + "Name", + "Attributes" + ], + "members":{ + "Name":{"shape":"String"}, + "AlternateNameEncoding":{"shape":"String"}, + "Attributes":{"shape":"AttributeList"} + } + }, + "ItemList":{ + "type":"list", + "member":{ + "shape":"Item", + "locationName":"Item" + }, + "flattened":true + }, + "ListDomainsRequest":{ + "type":"structure", + "members":{ + "MaxNumberOfDomains":{"shape":"Integer"}, + "NextToken":{"shape":"String"} + } + }, + "ListDomainsResult":{ + "type":"structure", + "members":{ + "DomainNames":{"shape":"DomainNameList"}, + "NextToken":{"shape":"String"} + } + }, + "Long":{"type":"long"}, + "MissingParameter":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"MissingParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "NoSuchDomain":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"NoSuchDomain", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "NumberDomainAttributesExceeded":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"NumberDomainAttributesExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "NumberDomainBytesExceeded":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"NumberDomainBytesExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "NumberDomainsExceeded":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"NumberDomainsExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "NumberItemAttributesExceeded":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"NumberItemAttributesExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "NumberSubmittedAttributesExceeded":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"NumberSubmittedAttributesExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "NumberSubmittedItemsExceeded":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"NumberSubmittedItemsExceeded", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "PutAttributesRequest":{ + "type":"structure", + "required":[ + "DomainName", + "ItemName", + "Attributes" + ], + "members":{ + "DomainName":{"shape":"String"}, + "ItemName":{"shape":"String"}, + "Attributes":{"shape":"ReplaceableAttributeList"}, + "Expected":{"shape":"UpdateCondition"} + } + }, + "ReplaceableAttribute":{ + "type":"structure", + "required":[ + "Name", + "Value" + ], + "members":{ + "Name":{"shape":"String"}, + "Value":{"shape":"String"}, + "Replace":{"shape":"Boolean"} + } + }, + "ReplaceableAttributeList":{ + "type":"list", + "member":{ + "shape":"ReplaceableAttribute", + "locationName":"Attribute" + }, + "flattened":true + }, + "ReplaceableItem":{ + "type":"structure", + "required":[ + "Name", + "Attributes" + ], + "members":{ + "Name":{ + "shape":"String", + "locationName":"ItemName" + }, + "Attributes":{"shape":"ReplaceableAttributeList"} + } + }, + "ReplaceableItemList":{ + "type":"list", + "member":{ + "shape":"ReplaceableItem", + "locationName":"Item" + }, + "flattened":true + }, + "RequestTimeout":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"RequestTimeout", + "httpStatusCode":408, + "senderFault":true + }, + "exception":true + }, + "SelectRequest":{ + "type":"structure", + "required":["SelectExpression"], + "members":{ + "SelectExpression":{"shape":"String"}, + "NextToken":{"shape":"String"}, + "ConsistentRead":{"shape":"Boolean"} + } + }, + "SelectResult":{ + "type":"structure", + "members":{ + "Items":{"shape":"ItemList"}, + "NextToken":{"shape":"String"} + } + }, + "String":{"type":"string"}, + "TooManyRequestedAttributes":{ + "type":"structure", + "members":{ + "BoxUsage":{"shape":"Float"} + }, + "error":{ + "code":"TooManyRequestedAttributes", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "UpdateCondition":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Value":{"shape":"String"}, + "Exists":{"shape":"Boolean"} + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sdb/2009-04-15/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sdb/2009-04-15/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sdb/2009-04-15/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sdb/2009-04-15/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,353 @@ +{ + "version": "2.0", + "operations": { + "BatchDeleteAttributes": "

    Performs multiple DeleteAttributes operations in a single call, which reduces round trips and latencies. This enables Amazon SimpleDB to optimize requests, which generally yields better throughput.

    If you specify BatchDeleteAttributes without attributes or values, all the attributes for the item are deleted.

    BatchDeleteAttributes is an idempotent operation; running it multiple times on the same item or attribute doesn't result in an error.

    The BatchDeleteAttributes operation succeeds or fails in its entirety. There are no partial deletes. You can execute multiple BatchDeleteAttributes operations and other operations in parallel. However, large numbers of concurrent BatchDeleteAttributes calls can result in Service Unavailable (503) responses.

    This operation is vulnerable to exceeding the maximum URL size when making a REST request using the HTTP GET method.

    This operation does not support conditions using Expected.X.Name, Expected.X.Value, or Expected.X.Exists.

    The following limitations are enforced for this operation:

    • 1 MB request size
    • 25 item limit per BatchDeleteAttributes operation

    ", + "BatchPutAttributes": "

    The BatchPutAttributes operation creates or replaces attributes within one or more items. By using this operation, the client can perform multiple PutAttribute operation with a single call. This helps yield savings in round trips and latencies, enabling Amazon SimpleDB to optimize requests and generally produce better throughput.

    The client may specify the item name with the Item.X.ItemName parameter. The client may specify new attributes using a combination of the Item.X.Attribute.Y.Name and Item.X.Attribute.Y.Value parameters. The client may specify the first attribute for the first item using the parameters Item.0.Attribute.0.Name and Item.0.Attribute.0.Value, and for the second attribute for the first item by the parameters Item.0.Attribute.1.Name and Item.0.Attribute.1.Value, and so on.

    Attributes are uniquely identified within an item by their name/value combination. For example, a single item can have the attributes { \"first_name\", \"first_value\" } and { \"first_name\", \"second_value\" }. However, it cannot have two attribute instances where both the Item.X.Attribute.Y.Name and Item.X.Attribute.Y.Value are the same.

    Optionally, the requester can supply the Replace parameter for each individual value. Setting this value to true will cause the new attribute values to replace the existing attribute values. For example, if an item I has the attributes { 'a', '1' }, { 'b', '2'} and { 'b', '3' } and the requester does a BatchPutAttributes of {'I', 'b', '4' } with the Replace parameter set to true, the final attributes of the item will be { 'a', '1' } and { 'b', '4' }, replacing the previous values of the 'b' attribute with the new value.

    You cannot specify an empty string as an item or as an attribute name. The BatchPutAttributes operation succeeds or fails in its entirety. There are no partial puts. This operation is vulnerable to exceeding the maximum URL size when making a REST request using the HTTP GET method. This operation does not support conditions using Expected.X.Name, Expected.X.Value, or Expected.X.Exists.

    You can execute multiple BatchPutAttributes operations and other operations in parallel. However, large numbers of concurrent BatchPutAttributes calls can result in Service Unavailable (503) responses.

    The following limitations are enforced for this operation:

    • 256 attribute name-value pairs per item
    • 1 MB request size
    • 1 billion attributes per domain
    • 10 GB of total user data storage per domain
    • 25 item limit per BatchPutAttributes operation

    ", + "CreateDomain": "

    The CreateDomain operation creates a new domain. The domain name should be unique among the domains associated with the Access Key ID provided in the request. The CreateDomain operation may take 10 or more seconds to complete.

    CreateDomain is an idempotent operation; running it multiple times using the same domain name will not result in an error response.

    The client can create up to 100 domains per account.

    If the client requires additional domains, go to http://aws.amazon.com/contact-us/simpledb-limit-request/.

    ", + "DeleteAttributes": "

    Deletes one or more attributes associated with an item. If all attributes of the item are deleted, the item is deleted.

    If DeleteAttributes is called without being passed any attributes or values specified, all the attributes for the item are deleted.

    DeleteAttributes is an idempotent operation; running it multiple times on the same item or attribute does not result in an error response.

    Because Amazon SimpleDB makes multiple copies of item data and uses an eventual consistency update model, performing a GetAttributes or Select operation (read) immediately after a DeleteAttributes or PutAttributes operation (write) might not return updated item data.

    ", + "DeleteDomain": "

    The DeleteDomain operation deletes a domain. Any items (and their attributes) in the domain are deleted as well. The DeleteDomain operation might take 10 or more seconds to complete.

    Running DeleteDomain on a domain that does not exist or running the function multiple times using the same domain name will not result in an error response. ", + "DomainMetadata": "

    Returns information about the domain, including when the domain was created, the number of items and attributes in the domain, and the size of the attribute names and values.

    ", + "GetAttributes": "

    Returns all of the attributes associated with the specified item. Optionally, the attributes returned can be limited to one or more attributes by specifying an attribute name parameter.

    If the item does not exist on the replica that was accessed for this operation, an empty set is returned. The system does not return an error as it cannot guarantee the item does not exist on other replicas.

    If GetAttributes is called without being passed any attribute names, all the attributes for the item are returned. ", + "ListDomains": "

    The ListDomains operation lists all domains associated with the Access Key ID. It returns domain names up to the limit set by MaxNumberOfDomains. A NextToken is returned if there are more than MaxNumberOfDomains domains. Calling ListDomains successive times with the NextToken provided by the operation returns up to MaxNumberOfDomains more domain names with each successive operation call.

    ", + "PutAttributes": "

    The PutAttributes operation creates or replaces attributes in an item. The client may specify new attributes using a combination of the Attribute.X.Name and Attribute.X.Value parameters. The client specifies the first attribute by the parameters Attribute.0.Name and Attribute.0.Value, the second attribute by the parameters Attribute.1.Name and Attribute.1.Value, and so on.

    Attributes are uniquely identified in an item by their name/value combination. For example, a single item can have the attributes { \"first_name\", \"first_value\" } and { \"first_name\", second_value\" }. However, it cannot have two attribute instances where both the Attribute.X.Name and Attribute.X.Value are the same.

    Optionally, the requestor can supply the Replace parameter for each individual attribute. Setting this value to true causes the new attribute value to replace the existing attribute value(s). For example, if an item has the attributes { 'a', '1' }, { 'b', '2'} and { 'b', '3' } and the requestor calls PutAttributes using the attributes { 'b', '4' } with the Replace parameter set to true, the final attributes of the item are changed to { 'a', '1' } and { 'b', '4' }, which replaces the previous values of the 'b' attribute with the new value.

    Using PutAttributes to replace attribute values that do not exist will not result in an error response.

    You cannot specify an empty string as an attribute name.

    Because Amazon SimpleDB makes multiple copies of client data and uses an eventual consistency update model, an immediate GetAttributes or Select operation (read) immediately after a PutAttributes or DeleteAttributes operation (write) might not return the updated data.

    The following limitations are enforced for this operation:

    • 256 total attribute name-value pairs per item
    • One billion attributes per domain
    • 10 GB of total user data storage per domain

    ", + "Select": "

    The Select operation returns a set of attributes for ItemNames that match the select expression. Select is similar to the standard SQL SELECT statement.

    The total size of the response cannot exceed 1 MB in total size. Amazon SimpleDB automatically adjusts the number of items returned per page to enforce this limit. For example, if the client asks to retrieve 2500 items, but each individual item is 10 kB in size, the system returns 100 items and an appropriate NextToken so the client can access the next page of results.

    For information on how to construct select expressions, see Using Select to Create Amazon SimpleDB Queries in the Developer Guide.

    " + }, + "service": "Amazon SimpleDB is a web service providing the core database functions of data indexing and querying in the cloud. By offloading the time and effort associated with building and operating a web-scale database, SimpleDB provides developers the freedom to focus on application development.

    A traditional, clustered relational database requires a sizable upfront capital outlay, is complex to design, and often requires extensive and repetitive database administration. Amazon SimpleDB is dramatically simpler, requiring no schema, automatically indexing your data and providing a simple API for storage and access. This approach eliminates the administrative burden of data modeling, index maintenance, and performance tuning. Developers gain access to this functionality within Amazon's proven computing environment, are able to scale instantly, and pay only for what they use.

    Visit http://aws.amazon.com/simpledb/ for more information.

    ", + "shapes": { + "Attribute": { + "base": "

    ", + "refs": { + "AttributeList$member": null + } + }, + "AttributeDoesNotExist": { + "base": "

    The specified attribute does not exist.

    ", + "refs": { + } + }, + "AttributeList": { + "base": null, + "refs": { + "GetAttributesResult$Attributes": "The list of attributes returned by the operation.", + "Item$Attributes": "A list of attributes." + } + }, + "AttributeNameList": { + "base": null, + "refs": { + "GetAttributesRequest$AttributeNames": "The names of the attributes." + } + }, + "BatchDeleteAttributesRequest": { + "base": null, + "refs": { + } + }, + "BatchPutAttributesRequest": { + "base": null, + "refs": { + } + }, + "Boolean": { + "base": null, + "refs": { + "GetAttributesRequest$ConsistentRead": "Determines whether or not strong consistency should be enforced when data is read from SimpleDB. If true, any data previously written to SimpleDB will be returned. Otherwise, results will be consistent eventually, and the client may not see data that was written immediately before your read.", + "ReplaceableAttribute$Replace": "A flag specifying whether or not to replace the attribute/value pair or to add a new attribute/value pair. The default setting is false.", + "SelectRequest$ConsistentRead": "Determines whether or not strong consistency should be enforced when data is read from SimpleDB. If true, any data previously written to SimpleDB will be returned. Otherwise, results will be consistent eventually, and the client may not see data that was written immediately before your read.", + "UpdateCondition$Exists": "

    A value specifying whether or not the specified attribute must exist with the specified value in order for the update condition to be satisfied. Specify true if the attribute must exist for the update condition to be satisfied. Specify false if the attribute should not exist in order for the update condition to be satisfied.

    " + } + }, + "CreateDomainRequest": { + "base": null, + "refs": { + } + }, + "DeletableAttribute": { + "base": "

    ", + "refs": { + "DeletableAttributeList$member": null + } + }, + "DeletableAttributeList": { + "base": null, + "refs": { + "DeletableItem$Attributes": null, + "DeleteAttributesRequest$Attributes": "A list of Attributes. Similar to columns on a spreadsheet, attributes represent categories of data that can be assigned to items." + } + }, + "DeletableItem": { + "base": null, + "refs": { + "DeletableItemList$member": null + } + }, + "DeletableItemList": { + "base": null, + "refs": { + "BatchDeleteAttributesRequest$Items": "A list of items on which to perform the operation." + } + }, + "DeleteAttributesRequest": { + "base": null, + "refs": { + } + }, + "DeleteDomainRequest": { + "base": null, + "refs": { + } + }, + "DomainMetadataRequest": { + "base": null, + "refs": { + } + }, + "DomainMetadataResult": { + "base": null, + "refs": { + } + }, + "DomainNameList": { + "base": null, + "refs": { + "ListDomainsResult$DomainNames": "A list of domain names that match the expression." + } + }, + "DuplicateItemName": { + "base": "

    The item name was specified more than once.

    ", + "refs": { + } + }, + "Float": { + "base": null, + "refs": { + "AttributeDoesNotExist$BoxUsage": null, + "DuplicateItemName$BoxUsage": null, + "InvalidNextToken$BoxUsage": null, + "InvalidNumberPredicates$BoxUsage": null, + "InvalidNumberValueTests$BoxUsage": null, + "InvalidParameterValue$BoxUsage": null, + "InvalidQueryExpression$BoxUsage": null, + "MissingParameter$BoxUsage": null, + "NoSuchDomain$BoxUsage": null, + "NumberDomainAttributesExceeded$BoxUsage": null, + "NumberDomainBytesExceeded$BoxUsage": null, + "NumberDomainsExceeded$BoxUsage": null, + "NumberItemAttributesExceeded$BoxUsage": null, + "NumberSubmittedAttributesExceeded$BoxUsage": null, + "NumberSubmittedItemsExceeded$BoxUsage": null, + "RequestTimeout$BoxUsage": null, + "TooManyRequestedAttributes$BoxUsage": null + } + }, + "GetAttributesRequest": { + "base": null, + "refs": { + } + }, + "GetAttributesResult": { + "base": null, + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "DomainMetadataResult$ItemCount": "The number of all items in the domain.", + "DomainMetadataResult$AttributeNameCount": "The number of unique attribute names in the domain.", + "DomainMetadataResult$AttributeValueCount": "The number of all attribute name/value pairs in the domain.", + "DomainMetadataResult$Timestamp": "The data and time when metadata was calculated, in Epoch (UNIX) seconds.", + "ListDomainsRequest$MaxNumberOfDomains": "The maximum number of domain names you want returned. The range is 1 to 100. The default setting is 100." + } + }, + "InvalidNextToken": { + "base": "

    The specified NextToken is not valid.

    ", + "refs": { + } + }, + "InvalidNumberPredicates": { + "base": "

    Too many predicates exist in the query expression.

    ", + "refs": { + } + }, + "InvalidNumberValueTests": { + "base": "

    Too many predicates exist in the query expression.

    ", + "refs": { + } + }, + "InvalidParameterValue": { + "base": "

    The value for a parameter is invalid.

    ", + "refs": { + } + }, + "InvalidQueryExpression": { + "base": "

    The specified query expression syntax is not valid.

    ", + "refs": { + } + }, + "Item": { + "base": "

    ", + "refs": { + "ItemList$member": null + } + }, + "ItemList": { + "base": null, + "refs": { + "SelectResult$Items": "A list of items that match the select expression." + } + }, + "ListDomainsRequest": { + "base": null, + "refs": { + } + }, + "ListDomainsResult": { + "base": null, + "refs": { + } + }, + "Long": { + "base": null, + "refs": { + "DomainMetadataResult$ItemNamesSizeBytes": "The total size of all item names in the domain, in bytes.", + "DomainMetadataResult$AttributeNamesSizeBytes": "The total size of all unique attribute names in the domain, in bytes.", + "DomainMetadataResult$AttributeValuesSizeBytes": "The total size of all attribute values in the domain, in bytes." + } + }, + "MissingParameter": { + "base": "

    The request must contain the specified missing parameter.

    ", + "refs": { + } + }, + "NoSuchDomain": { + "base": "

    The specified domain does not exist.

    ", + "refs": { + } + }, + "NumberDomainAttributesExceeded": { + "base": "

    Too many attributes in this domain.

    ", + "refs": { + } + }, + "NumberDomainBytesExceeded": { + "base": "

    Too many bytes in this domain.

    ", + "refs": { + } + }, + "NumberDomainsExceeded": { + "base": "

    Too many domains exist per this account.

    ", + "refs": { + } + }, + "NumberItemAttributesExceeded": { + "base": "

    Too many attributes in this item.

    ", + "refs": { + } + }, + "NumberSubmittedAttributesExceeded": { + "base": "

    Too many attributes exist in a single call.

    ", + "refs": { + } + }, + "NumberSubmittedItemsExceeded": { + "base": "

    Too many items exist in a single call.

    ", + "refs": { + } + }, + "PutAttributesRequest": { + "base": null, + "refs": { + } + }, + "ReplaceableAttribute": { + "base": "

    ", + "refs": { + "ReplaceableAttributeList$member": null + } + }, + "ReplaceableAttributeList": { + "base": null, + "refs": { + "PutAttributesRequest$Attributes": "The list of attributes.", + "ReplaceableItem$Attributes": "The list of attributes for a replaceable item." + } + }, + "ReplaceableItem": { + "base": "

    ", + "refs": { + "ReplaceableItemList$member": null + } + }, + "ReplaceableItemList": { + "base": null, + "refs": { + "BatchPutAttributesRequest$Items": "A list of items on which to perform the operation." + } + }, + "RequestTimeout": { + "base": "

    A timeout occurred when attempting to query the specified domain with specified query expression.

    ", + "refs": { + } + }, + "SelectRequest": { + "base": null, + "refs": { + } + }, + "SelectResult": { + "base": null, + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "Attribute$Name": "The name of the attribute.", + "Attribute$AlternateNameEncoding": "

    ", + "Attribute$Value": "The value of the attribute.", + "Attribute$AlternateValueEncoding": "

    ", + "AttributeNameList$member": null, + "BatchDeleteAttributesRequest$DomainName": "The name of the domain in which the attributes are being deleted.", + "BatchPutAttributesRequest$DomainName": "The name of the domain in which the attributes are being stored.", + "CreateDomainRequest$DomainName": "The name of the domain to create. The name can range between 3 and 255 characters and can contain the following characters: a-z, A-Z, 0-9, '_', '-', and '.'.", + "DeletableAttribute$Name": "The name of the attribute.", + "DeletableAttribute$Value": "The value of the attribute.", + "DeletableItem$Name": null, + "DeleteAttributesRequest$DomainName": "The name of the domain in which to perform the operation.", + "DeleteAttributesRequest$ItemName": "The name of the item. Similar to rows on a spreadsheet, items represent individual objects that contain one or more value-attribute pairs.", + "DeleteDomainRequest$DomainName": "The name of the domain to delete.", + "DomainMetadataRequest$DomainName": "The name of the domain for which to display the metadata of.", + "DomainNameList$member": null, + "GetAttributesRequest$DomainName": "The name of the domain in which to perform the operation.", + "GetAttributesRequest$ItemName": "The name of the item.", + "Item$Name": "The name of the item.", + "Item$AlternateNameEncoding": "

    ", + "ListDomainsRequest$NextToken": "A string informing Amazon SimpleDB where to start the next list of domain names.", + "ListDomainsResult$NextToken": "An opaque token indicating that there are more domains than the specified MaxNumberOfDomains still available.", + "PutAttributesRequest$DomainName": "The name of the domain in which to perform the operation.", + "PutAttributesRequest$ItemName": "The name of the item.", + "ReplaceableAttribute$Name": "The name of the replaceable attribute.", + "ReplaceableAttribute$Value": "The value of the replaceable attribute.", + "ReplaceableItem$Name": "The name of the replaceable item.", + "SelectRequest$SelectExpression": "The expression used to query the domain.", + "SelectRequest$NextToken": "A string informing Amazon SimpleDB where to start the next list of ItemNames.", + "SelectResult$NextToken": "An opaque token indicating that more items than MaxNumberOfItems were matched, the response size exceeded 1 megabyte, or the execution time exceeded 5 seconds.", + "UpdateCondition$Name": "

    The name of the attribute involved in the condition.

    ", + "UpdateCondition$Value": "

    The value of an attribute. This value can only be specified when the Exists parameter is equal to true.

    " + } + }, + "TooManyRequestedAttributes": { + "base": "

    Too many attributes requested.

    ", + "refs": { + } + }, + "UpdateCondition": { + "base": "

    Specifies the conditions under which data should be updated. If an update condition is specified for a request, the data will only be updated if the condition is satisfied. For example, if an attribute with a specific name and value exists, or if a specific attribute doesn't exist.

    ", + "refs": { + "DeleteAttributesRequest$Expected": "The update condition which, if specified, determines whether the specified attributes will be deleted or not. The update condition must be satisfied in order for this request to be processed and the attributes to be deleted.", + "PutAttributesRequest$Expected": "The update condition which, if specified, determines whether the specified attributes will be updated or not. The update condition must be satisfied in order for this request to be processed and the attributes to be updated." + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sdb/2009-04-15/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sdb/2009-04-15/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sdb/2009-04-15/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sdb/2009-04-15/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,15 @@ +{ + "pagination": { + "ListDomains": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxNumberOfDomains", + "result_key": "DomainNames" + }, + "Select": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Items" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sns/2010-03-31/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sns/2010-03-31/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sns/2010-03-31/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sns/2010-03-31/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1760 @@ +{ + "metadata":{ + "apiVersion":"2010-03-31", + "endpointPrefix":"sns", + "serviceAbbreviation":"Amazon SNS", + "serviceFullName":"Amazon Simple Notification Service", + "signatureVersion":"v4", + "xmlNamespace":"http://sns.amazonaws.com/doc/2010-03-31/", + "protocol":"query" + }, + "operations":{ + "AddPermission":{ + "name":"AddPermission", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddPermissionInput"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"AuthorizationErrorException", + "error":{ + "code":"AuthorizationError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + }, + "ConfirmSubscription":{ + "name":"ConfirmSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ConfirmSubscriptionInput"}, + "output":{ + "shape":"ConfirmSubscriptionResponse", + "resultWrapper":"ConfirmSubscriptionResult" + }, + "errors":[ + { + "shape":"SubscriptionLimitExceededException", + "error":{ + "code":"SubscriptionLimitExceeded", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"AuthorizationErrorException", + "error":{ + "code":"AuthorizationError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreatePlatformApplication":{ + "name":"CreatePlatformApplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePlatformApplicationInput"}, + "output":{ + "shape":"CreatePlatformApplicationResponse", + "resultWrapper":"CreatePlatformApplicationResult" + }, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"AuthorizationErrorException", + "error":{ + "code":"AuthorizationError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreatePlatformEndpoint":{ + "name":"CreatePlatformEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePlatformEndpointInput"}, + "output":{ + "shape":"CreateEndpointResponse", + "resultWrapper":"CreatePlatformEndpointResult" + }, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"AuthorizationErrorException", + "error":{ + "code":"AuthorizationError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateTopic":{ + "name":"CreateTopic", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTopicInput"}, + "output":{ + "shape":"CreateTopicResponse", + "resultWrapper":"CreateTopicResult" + }, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"TopicLimitExceededException", + "error":{ + "code":"TopicLimitExceeded", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"AuthorizationErrorException", + "error":{ + "code":"AuthorizationError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteEndpoint":{ + "name":"DeleteEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEndpointInput"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"AuthorizationErrorException", + "error":{ + "code":"AuthorizationError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeletePlatformApplication":{ + "name":"DeletePlatformApplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePlatformApplicationInput"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"AuthorizationErrorException", + "error":{ + "code":"AuthorizationError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteTopic":{ + "name":"DeleteTopic", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTopicInput"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"AuthorizationErrorException", + "error":{ + "code":"AuthorizationError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + }, + "GetEndpointAttributes":{ + "name":"GetEndpointAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetEndpointAttributesInput"}, + "output":{ + "shape":"GetEndpointAttributesResponse", + "resultWrapper":"GetEndpointAttributesResult" + }, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"AuthorizationErrorException", + "error":{ + "code":"AuthorizationError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + }, + "GetPlatformApplicationAttributes":{ + "name":"GetPlatformApplicationAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPlatformApplicationAttributesInput"}, + "output":{ + "shape":"GetPlatformApplicationAttributesResponse", + "resultWrapper":"GetPlatformApplicationAttributesResult" + }, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"AuthorizationErrorException", + "error":{ + "code":"AuthorizationError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + }, + "GetSubscriptionAttributes":{ + "name":"GetSubscriptionAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSubscriptionAttributesInput"}, + "output":{ + "shape":"GetSubscriptionAttributesResponse", + "resultWrapper":"GetSubscriptionAttributesResult" + }, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"AuthorizationErrorException", + "error":{ + "code":"AuthorizationError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + } + ] + }, + "GetTopicAttributes":{ + "name":"GetTopicAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetTopicAttributesInput"}, + "output":{ + "shape":"GetTopicAttributesResponse", + "resultWrapper":"GetTopicAttributesResult" + }, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"AuthorizationErrorException", + "error":{ + "code":"AuthorizationError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + } + ] + }, + "ListEndpointsByPlatformApplication":{ + "name":"ListEndpointsByPlatformApplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListEndpointsByPlatformApplicationInput"}, + "output":{ + "shape":"ListEndpointsByPlatformApplicationResponse", + "resultWrapper":"ListEndpointsByPlatformApplicationResult" + }, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"AuthorizationErrorException", + "error":{ + "code":"AuthorizationError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + }, + "ListPlatformApplications":{ + "name":"ListPlatformApplications", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPlatformApplicationsInput"}, + "output":{ + "shape":"ListPlatformApplicationsResponse", + "resultWrapper":"ListPlatformApplicationsResult" + }, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"AuthorizationErrorException", + "error":{ + "code":"AuthorizationError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + } + ] + }, + "ListSubscriptions":{ + "name":"ListSubscriptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSubscriptionsInput"}, + "output":{ + "shape":"ListSubscriptionsResponse", + "resultWrapper":"ListSubscriptionsResult" + }, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"AuthorizationErrorException", + "error":{ + "code":"AuthorizationError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + } + ] + }, + "ListSubscriptionsByTopic":{ + "name":"ListSubscriptionsByTopic", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSubscriptionsByTopicInput"}, + "output":{ + "shape":"ListSubscriptionsByTopicResponse", + "resultWrapper":"ListSubscriptionsByTopicResult" + }, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"AuthorizationErrorException", + "error":{ + "code":"AuthorizationError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + } + ] + }, + "ListTopics":{ + "name":"ListTopics", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTopicsInput"}, + "output":{ + "shape":"ListTopicsResponse", + "resultWrapper":"ListTopicsResult" + }, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"AuthorizationErrorException", + "error":{ + "code":"AuthorizationError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + } + ] + }, + "Publish":{ + "name":"Publish", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PublishInput"}, + "output":{ + "shape":"PublishResponse", + "resultWrapper":"PublishResult" + }, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterValueException", + "error":{ + "code":"ParameterValueInvalid", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"EndpointDisabledException", + "error":{ + "code":"EndpointDisabled", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"PlatformApplicationDisabledException", + "error":{ + "code":"PlatformApplicationDisabled", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"AuthorizationErrorException", + "error":{ + "code":"AuthorizationError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + } + ] + }, + "RemovePermission":{ + "name":"RemovePermission", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemovePermissionInput"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"AuthorizationErrorException", + "error":{ + "code":"AuthorizationError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + }, + "SetEndpointAttributes":{ + "name":"SetEndpointAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetEndpointAttributesInput"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"AuthorizationErrorException", + "error":{ + "code":"AuthorizationError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + }, + "SetPlatformApplicationAttributes":{ + "name":"SetPlatformApplicationAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetPlatformApplicationAttributesInput"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"AuthorizationErrorException", + "error":{ + "code":"AuthorizationError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + }, + "SetSubscriptionAttributes":{ + "name":"SetSubscriptionAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetSubscriptionAttributesInput"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"AuthorizationErrorException", + "error":{ + "code":"AuthorizationError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + } + ] + }, + "SetTopicAttributes":{ + "name":"SetTopicAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetTopicAttributesInput"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"AuthorizationErrorException", + "error":{ + "code":"AuthorizationError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + } + ] + }, + "Subscribe":{ + "name":"Subscribe", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SubscribeInput"}, + "output":{ + "shape":"SubscribeResponse", + "resultWrapper":"SubscribeResult" + }, + "errors":[ + { + "shape":"SubscriptionLimitExceededException", + "error":{ + "code":"SubscriptionLimitExceeded", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + { + "shape":"AuthorizationErrorException", + "error":{ + "code":"AuthorizationError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + } + ] + }, + "Unsubscribe":{ + "name":"Unsubscribe", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UnsubscribeInput"}, + "errors":[ + { + "shape":"InvalidParameterException", + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InternalErrorException", + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + { + "shape":"AuthorizationErrorException", + "error":{ + "code":"AuthorizationError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + { + "shape":"NotFoundException", + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + } + ] + } + }, + "shapes":{ + "ActionsList":{ + "type":"list", + "member":{"shape":"action"} + }, + "AddPermissionInput":{ + "type":"structure", + "required":[ + "TopicArn", + "Label", + "AWSAccountId", + "ActionName" + ], + "members":{ + "TopicArn":{"shape":"topicARN"}, + "Label":{"shape":"label"}, + "AWSAccountId":{"shape":"DelegatesList"}, + "ActionName":{"shape":"ActionsList"} + } + }, + "AuthorizationErrorException":{ + "type":"structure", + "members":{ + "message":{"shape":"string"} + }, + "error":{ + "code":"AuthorizationError", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "Binary":{"type":"blob"}, + "ConfirmSubscriptionInput":{ + "type":"structure", + "required":[ + "TopicArn", + "Token" + ], + "members":{ + "TopicArn":{"shape":"topicARN"}, + "Token":{"shape":"token"}, + "AuthenticateOnUnsubscribe":{"shape":"authenticateOnUnsubscribe"} + } + }, + "ConfirmSubscriptionResponse":{ + "type":"structure", + "members":{ + "SubscriptionArn":{"shape":"subscriptionARN"} + } + }, + "CreateEndpointResponse":{ + "type":"structure", + "members":{ + "EndpointArn":{"shape":"String"} + } + }, + "CreatePlatformApplicationInput":{ + "type":"structure", + "required":[ + "Name", + "Platform", + "Attributes" + ], + "members":{ + "Name":{"shape":"String"}, + "Platform":{"shape":"String"}, + "Attributes":{"shape":"MapStringToString"} + } + }, + "CreatePlatformApplicationResponse":{ + "type":"structure", + "members":{ + "PlatformApplicationArn":{"shape":"String"} + } + }, + "CreatePlatformEndpointInput":{ + "type":"structure", + "required":[ + "PlatformApplicationArn", + "Token" + ], + "members":{ + "PlatformApplicationArn":{"shape":"String"}, + "Token":{"shape":"String"}, + "CustomUserData":{"shape":"String"}, + "Attributes":{"shape":"MapStringToString"} + } + }, + "CreateTopicInput":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"topicName"} + } + }, + "CreateTopicResponse":{ + "type":"structure", + "members":{ + "TopicArn":{"shape":"topicARN"} + } + }, + "DelegatesList":{ + "type":"list", + "member":{"shape":"delegate"} + }, + "DeleteEndpointInput":{ + "type":"structure", + "required":["EndpointArn"], + "members":{ + "EndpointArn":{"shape":"String"} + } + }, + "DeletePlatformApplicationInput":{ + "type":"structure", + "required":["PlatformApplicationArn"], + "members":{ + "PlatformApplicationArn":{"shape":"String"} + } + }, + "DeleteTopicInput":{ + "type":"structure", + "required":["TopicArn"], + "members":{ + "TopicArn":{"shape":"topicARN"} + } + }, + "Endpoint":{ + "type":"structure", + "members":{ + "EndpointArn":{"shape":"String"}, + "Attributes":{"shape":"MapStringToString"} + } + }, + "EndpointDisabledException":{ + "type":"structure", + "members":{ + "message":{"shape":"string"} + }, + "error":{ + "code":"EndpointDisabled", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "GetEndpointAttributesInput":{ + "type":"structure", + "required":["EndpointArn"], + "members":{ + "EndpointArn":{"shape":"String"} + } + }, + "GetEndpointAttributesResponse":{ + "type":"structure", + "members":{ + "Attributes":{"shape":"MapStringToString"} + } + }, + "GetPlatformApplicationAttributesInput":{ + "type":"structure", + "required":["PlatformApplicationArn"], + "members":{ + "PlatformApplicationArn":{"shape":"String"} + } + }, + "GetPlatformApplicationAttributesResponse":{ + "type":"structure", + "members":{ + "Attributes":{"shape":"MapStringToString"} + } + }, + "GetSubscriptionAttributesInput":{ + "type":"structure", + "required":["SubscriptionArn"], + "members":{ + "SubscriptionArn":{"shape":"subscriptionARN"} + } + }, + "GetSubscriptionAttributesResponse":{ + "type":"structure", + "members":{ + "Attributes":{"shape":"SubscriptionAttributesMap"} + } + }, + "GetTopicAttributesInput":{ + "type":"structure", + "required":["TopicArn"], + "members":{ + "TopicArn":{"shape":"topicARN"} + } + }, + "GetTopicAttributesResponse":{ + "type":"structure", + "members":{ + "Attributes":{"shape":"TopicAttributesMap"} + } + }, + "InternalErrorException":{ + "type":"structure", + "members":{ + "message":{"shape":"string"} + }, + "error":{ + "code":"InternalError", + "httpStatusCode":500 + }, + "exception":true, + "fault":true + }, + "InvalidParameterException":{ + "type":"structure", + "members":{ + "message":{"shape":"string"} + }, + "error":{ + "code":"InvalidParameter", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidParameterValueException":{ + "type":"structure", + "members":{ + "message":{"shape":"string"} + }, + "error":{ + "code":"ParameterValueInvalid", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ListEndpointsByPlatformApplicationInput":{ + "type":"structure", + "required":["PlatformApplicationArn"], + "members":{ + "PlatformApplicationArn":{"shape":"String"}, + "NextToken":{"shape":"String"} + } + }, + "ListEndpointsByPlatformApplicationResponse":{ + "type":"structure", + "members":{ + "Endpoints":{"shape":"ListOfEndpoints"}, + "NextToken":{"shape":"String"} + } + }, + "ListOfEndpoints":{ + "type":"list", + "member":{"shape":"Endpoint"} + }, + "ListOfPlatformApplications":{ + "type":"list", + "member":{"shape":"PlatformApplication"} + }, + "ListPlatformApplicationsInput":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"String"} + } + }, + "ListPlatformApplicationsResponse":{ + "type":"structure", + "members":{ + "PlatformApplications":{"shape":"ListOfPlatformApplications"}, + "NextToken":{"shape":"String"} + } + }, + "ListSubscriptionsByTopicInput":{ + "type":"structure", + "required":["TopicArn"], + "members":{ + "TopicArn":{"shape":"topicARN"}, + "NextToken":{"shape":"nextToken"} + } + }, + "ListSubscriptionsByTopicResponse":{ + "type":"structure", + "members":{ + "Subscriptions":{"shape":"SubscriptionsList"}, + "NextToken":{"shape":"nextToken"} + } + }, + "ListSubscriptionsInput":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"nextToken"} + } + }, + "ListSubscriptionsResponse":{ + "type":"structure", + "members":{ + "Subscriptions":{"shape":"SubscriptionsList"}, + "NextToken":{"shape":"nextToken"} + } + }, + "ListTopicsInput":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"nextToken"} + } + }, + "ListTopicsResponse":{ + "type":"structure", + "members":{ + "Topics":{"shape":"TopicsList"}, + "NextToken":{"shape":"nextToken"} + } + }, + "MapStringToString":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "MessageAttributeMap":{ + "type":"map", + "key":{ + "shape":"String", + "locationName":"Name" + }, + "value":{ + "shape":"MessageAttributeValue", + "locationName":"Value" + } + }, + "MessageAttributeValue":{ + "type":"structure", + "required":["DataType"], + "members":{ + "DataType":{"shape":"String"}, + "StringValue":{"shape":"String"}, + "BinaryValue":{"shape":"Binary"} + } + }, + "NotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"string"} + }, + "error":{ + "code":"NotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "PlatformApplication":{ + "type":"structure", + "members":{ + "PlatformApplicationArn":{"shape":"String"}, + "Attributes":{"shape":"MapStringToString"} + } + }, + "PlatformApplicationDisabledException":{ + "type":"structure", + "members":{ + "message":{"shape":"string"} + }, + "error":{ + "code":"PlatformApplicationDisabled", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "PublishInput":{ + "type":"structure", + "required":["Message"], + "members":{ + "TopicArn":{"shape":"topicARN"}, + "TargetArn":{"shape":"String"}, + "Message":{"shape":"message"}, + "Subject":{"shape":"subject"}, + "MessageStructure":{"shape":"messageStructure"}, + "MessageAttributes":{"shape":"MessageAttributeMap"} + } + }, + "PublishResponse":{ + "type":"structure", + "members":{ + "MessageId":{"shape":"messageId"} + } + }, + "RemovePermissionInput":{ + "type":"structure", + "required":[ + "TopicArn", + "Label" + ], + "members":{ + "TopicArn":{"shape":"topicARN"}, + "Label":{"shape":"label"} + } + }, + "SetEndpointAttributesInput":{ + "type":"structure", + "required":[ + "EndpointArn", + "Attributes" + ], + "members":{ + "EndpointArn":{"shape":"String"}, + "Attributes":{"shape":"MapStringToString"} + } + }, + "SetPlatformApplicationAttributesInput":{ + "type":"structure", + "required":[ + "PlatformApplicationArn", + "Attributes" + ], + "members":{ + "PlatformApplicationArn":{"shape":"String"}, + "Attributes":{"shape":"MapStringToString"} + } + }, + "SetSubscriptionAttributesInput":{ + "type":"structure", + "required":[ + "SubscriptionArn", + "AttributeName" + ], + "members":{ + "SubscriptionArn":{"shape":"subscriptionARN"}, + "AttributeName":{"shape":"attributeName"}, + "AttributeValue":{"shape":"attributeValue"} + } + }, + "SetTopicAttributesInput":{ + "type":"structure", + "required":[ + "TopicArn", + "AttributeName" + ], + "members":{ + "TopicArn":{"shape":"topicARN"}, + "AttributeName":{"shape":"attributeName"}, + "AttributeValue":{"shape":"attributeValue"} + } + }, + "String":{"type":"string"}, + "SubscribeInput":{ + "type":"structure", + "required":[ + "TopicArn", + "Protocol" + ], + "members":{ + "TopicArn":{"shape":"topicARN"}, + "Protocol":{"shape":"protocol"}, + "Endpoint":{"shape":"endpoint"} + } + }, + "SubscribeResponse":{ + "type":"structure", + "members":{ + "SubscriptionArn":{"shape":"subscriptionARN"} + } + }, + "Subscription":{ + "type":"structure", + "members":{ + "SubscriptionArn":{"shape":"subscriptionARN"}, + "Owner":{"shape":"account"}, + "Protocol":{"shape":"protocol"}, + "Endpoint":{"shape":"endpoint"}, + "TopicArn":{"shape":"topicARN"} + } + }, + "SubscriptionAttributesMap":{ + "type":"map", + "key":{"shape":"attributeName"}, + "value":{"shape":"attributeValue"} + }, + "SubscriptionLimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"string"} + }, + "error":{ + "code":"SubscriptionLimitExceeded", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "SubscriptionsList":{ + "type":"list", + "member":{"shape":"Subscription"} + }, + "Topic":{ + "type":"structure", + "members":{ + "TopicArn":{"shape":"topicARN"} + } + }, + "TopicAttributesMap":{ + "type":"map", + "key":{"shape":"attributeName"}, + "value":{"shape":"attributeValue"} + }, + "TopicLimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"string"} + }, + "error":{ + "code":"TopicLimitExceeded", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "TopicsList":{ + "type":"list", + "member":{"shape":"Topic"} + }, + "UnsubscribeInput":{ + "type":"structure", + "required":["SubscriptionArn"], + "members":{ + "SubscriptionArn":{"shape":"subscriptionARN"} + } + }, + "account":{"type":"string"}, + "action":{"type":"string"}, + "attributeName":{"type":"string"}, + "attributeValue":{"type":"string"}, + "authenticateOnUnsubscribe":{"type":"string"}, + "delegate":{"type":"string"}, + "endpoint":{"type":"string"}, + "label":{"type":"string"}, + "message":{"type":"string"}, + "messageId":{"type":"string"}, + "messageStructure":{"type":"string"}, + "nextToken":{"type":"string"}, + "protocol":{"type":"string"}, + "string":{"type":"string"}, + "subject":{"type":"string"}, + "subscriptionARN":{"type":"string"}, + "token":{"type":"string"}, + "topicARN":{"type":"string"}, + "topicName":{"type":"string"} + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sns/2010-03-31/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sns/2010-03-31/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sns/2010-03-31/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sns/2010-03-31/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,564 @@ +{ + "operations": { + "AddPermission": "

    Adds a statement to a topic's access control policy, granting access for the specified AWS accounts to the specified actions.

    ", + "ConfirmSubscription": "

    Verifies an endpoint owner's intent to receive messages by validating the token sent to the endpoint by an earlier Subscribe action. If the token is valid, the action creates a new subscription and returns its Amazon Resource Name (ARN). This call requires an AWS signature only when the AuthenticateOnUnsubscribe flag is set to \"true\".

    ", + "CreatePlatformApplication": "

    Creates a platform application object for one of the supported push notification services, such as APNS and GCM, to which devices and mobile apps may register. You must specify PlatformPrincipal and PlatformCredential attributes when using the CreatePlatformApplication action. The PlatformPrincipal is received from the notification service. For APNS/APNS_SANDBOX, PlatformPrincipal is \"SSL certificate\". For GCM, PlatformPrincipal is not applicable. For ADM, PlatformPrincipal is \"client id\". The PlatformCredential is also received from the notification service. For APNS/APNS_SANDBOX, PlatformCredential is \"private key\". For GCM, PlatformCredential is \"API key\". For ADM, PlatformCredential is \"client secret\". The PlatformApplicationArn that is returned when using CreatePlatformApplication is then used as an attribute for the CreatePlatformEndpoint action. For more information, see Using Amazon SNS Mobile Push Notifications.

    ", + "CreatePlatformEndpoint": "

    Creates an endpoint for a device and mobile app on one of the supported push notification services, such as GCM and APNS. CreatePlatformEndpoint requires the PlatformApplicationArn that is returned from CreatePlatformApplication. The EndpointArn that is returned when using CreatePlatformEndpoint can then be used by the Publish action to send a message to a mobile app or by the Subscribe action for subscription to a topic. The CreatePlatformEndpoint action is idempotent, so if the requester already owns an endpoint with the same device token and attributes, that endpoint's ARN is returned without creating a new endpoint. For more information, see Using Amazon SNS Mobile Push Notifications.

    When using CreatePlatformEndpoint with Baidu, two attributes must be provided: ChannelId and UserId. The token field must also contain the ChannelId. For more information, see Creating an Amazon SNS Endpoint for Baidu.

    ", + "CreateTopic": "

    Creates a topic to which notifications can be published. Users can create at most 3000 topics. For more information, see http://aws.amazon.com/sns. This action is idempotent, so if the requester already owns a topic with the specified name, that topic's ARN is returned without creating a new topic.

    ", + "DeleteEndpoint": "

    Deletes the endpoint from Amazon SNS. This action is idempotent. For more information, see Using Amazon SNS Mobile Push Notifications.

    ", + "DeletePlatformApplication": "

    Deletes a platform application object for one of the supported push notification services, such as APNS and GCM. For more information, see Using Amazon SNS Mobile Push Notifications.

    ", + "DeleteTopic": "

    Deletes a topic and all its subscriptions. Deleting a topic might prevent some messages previously sent to the topic from being delivered to subscribers. This action is idempotent, so deleting a topic that does not exist does not result in an error.

    ", + "GetEndpointAttributes": "

    Retrieves the endpoint attributes for a device on one of the supported push notification services, such as GCM and APNS. For more information, see Using Amazon SNS Mobile Push Notifications.

    ", + "GetPlatformApplicationAttributes": "

    Retrieves the attributes of the platform application object for the supported push notification services, such as APNS and GCM. For more information, see Using Amazon SNS Mobile Push Notifications.

    ", + "GetSubscriptionAttributes": "

    Returns all of the properties of a subscription.

    ", + "GetTopicAttributes": "

    Returns all of the properties of a topic. Topic properties returned might differ based on the authorization of the user.

    ", + "ListEndpointsByPlatformApplication": "

    Lists the endpoints and endpoint attributes for devices in a supported push notification service, such as GCM and APNS. The results for ListEndpointsByPlatformApplication are paginated and return a limited list of endpoints, up to 100. If additional records are available after the first page results, then a NextToken string will be returned. To receive the next page, you call ListEndpointsByPlatformApplication again using the NextToken string received from the previous call. When there are no more records to return, NextToken will be null. For more information, see Using Amazon SNS Mobile Push Notifications.

    ", + "ListPlatformApplications": "

    Lists the platform application objects for the supported push notification services, such as APNS and GCM. The results for ListPlatformApplications are paginated and return a limited list of applications, up to 100. If additional records are available after the first page results, then a NextToken string will be returned. To receive the next page, you call ListPlatformApplications using the NextToken string received from the previous call. When there are no more records to return, NextToken will be null. For more information, see Using Amazon SNS Mobile Push Notifications.

    ", + "ListSubscriptions": "

    Returns a list of the requester's subscriptions. Each call returns a limited list of subscriptions, up to 100. If there are more subscriptions, a NextToken is also returned. Use the NextToken parameter in a new ListSubscriptions call to get further results.

    ", + "ListSubscriptionsByTopic": "

    Returns a list of the subscriptions to a specific topic. Each call returns a limited list of subscriptions, up to 100. If there are more subscriptions, a NextToken is also returned. Use the NextToken parameter in a new ListSubscriptionsByTopic call to get further results.

    ", + "ListTopics": "

    Returns a list of the requester's topics. Each call returns a limited list of topics, up to 100. If there are more topics, a NextToken is also returned. Use the NextToken parameter in a new ListTopics call to get further results.

    ", + "Publish": "

    Sends a message to all of a topic's subscribed endpoints. When a messageId is returned, the message has been saved and Amazon SNS will attempt to deliver it to the topic's subscribers shortly. The format of the outgoing message to each subscribed endpoint depends on the notification protocol selected.

    To use the Publish action for sending a message to a mobile endpoint, such as an app on a Kindle device or mobile phone, you must specify the EndpointArn. The EndpointArn is returned when making a call with the CreatePlatformEndpoint action. The second example below shows a request and response for publishing to a mobile endpoint.

    ", + "RemovePermission": "

    Removes a statement from a topic's access control policy.

    ", + "SetEndpointAttributes": "

    Sets the attributes for an endpoint for a device on one of the supported push notification services, such as GCM and APNS. For more information, see Using Amazon SNS Mobile Push Notifications.

    ", + "SetPlatformApplicationAttributes": "

    Sets the attributes of the platform application object for the supported push notification services, such as APNS and GCM. For more information, see Using Amazon SNS Mobile Push Notifications.

    ", + "SetSubscriptionAttributes": "

    Allows a subscription owner to set an attribute of the topic to a new value.

    ", + "SetTopicAttributes": "

    Allows a topic owner to set an attribute of the topic to a new value.

    ", + "Subscribe": "

    Prepares to subscribe an endpoint by sending the endpoint a confirmation message. To actually create a subscription, the endpoint owner must call the ConfirmSubscription action with the token from the confirmation message. Confirmation tokens are valid for three days.

    ", + "Unsubscribe": "

    Deletes a subscription. If the subscription requires authentication for deletion, only the owner of the subscription or the topic's owner can unsubscribe, and an AWS signature is required. If the Unsubscribe call does not require authentication and the requester is not the subscription owner, a final cancellation message is delivered to the endpoint, so that the endpoint owner can easily resubscribe to the topic if the Unsubscribe request was unintended.

    " + }, + "service": "Amazon Simple Notification Service

    Amazon Simple Notification Service (Amazon SNS) is a web service that enables you to build distributed web-enabled applications. Applications can use Amazon SNS to easily push real-time notification messages to interested subscribers over multiple delivery protocols. For more information about this product see http://aws.amazon.com/sns. For detailed information about Amazon SNS features and their associated API calls, see the Amazon SNS Developer Guide.

    We also provide SDKs that enable you to access Amazon SNS from your preferred programming language. The SDKs contain functionality that automatically takes care of tasks such as: cryptographically signing your service requests, retrying requests, and handling error responses. For a list of available SDKs, go to Tools for Amazon Web Services.

    ", + "shapes": { + "ActionsList": { + "base": null, + "refs": { + "AddPermissionInput$ActionName": "

    The action you want to allow for the specified principal(s).

    Valid values: any Amazon SNS action name.

    " + } + }, + "AddPermissionInput": { + "base": null, + "refs": { + } + }, + "AuthorizationErrorException": { + "base": "

    Indicates that the user has been denied access to the requested resource.

    ", + "refs": { + } + }, + "Binary": { + "base": null, + "refs": { + "MessageAttributeValue$BinaryValue": "

    Binary type attributes can store any binary data, for example, compressed data, encrypted data, or images.

    " + } + }, + "ConfirmSubscriptionInput": { + "base": "Input for ConfirmSubscription action.", + "refs": { + } + }, + "ConfirmSubscriptionResponse": { + "base": "Response for ConfirmSubscriptions action.", + "refs": { + } + }, + "CreateEndpointResponse": { + "base": "

    Response from CreateEndpoint action.

    ", + "refs": { + } + }, + "CreatePlatformApplicationInput": { + "base": "

    Input for CreatePlatformApplication action.

    ", + "refs": { + } + }, + "CreatePlatformApplicationResponse": { + "base": "

    Response from CreatePlatformApplication action.

    ", + "refs": { + } + }, + "CreatePlatformEndpointInput": { + "base": "

    Input for CreatePlatformEndpoint action.

    ", + "refs": { + } + }, + "CreateTopicInput": { + "base": "

    Input for CreateTopic action.

    ", + "refs": { + } + }, + "CreateTopicResponse": { + "base": "

    Response from CreateTopic action.

    ", + "refs": { + } + }, + "DelegatesList": { + "base": null, + "refs": { + "AddPermissionInput$AWSAccountId": "

    The AWS account IDs of the users (principals) who will be given access to the specified actions. The users must have AWS accounts, but do not need to be signed up for this service.

    " + } + }, + "DeleteEndpointInput": { + "base": "

    Input for DeleteEndpoint action.

    ", + "refs": { + } + }, + "DeletePlatformApplicationInput": { + "base": "

    Input for DeletePlatformApplication action.

    ", + "refs": { + } + }, + "DeleteTopicInput": { + "base": null, + "refs": { + } + }, + "Endpoint": { + "base": "

    Endpoint for mobile app and device.

    ", + "refs": { + "ListOfEndpoints$member": null + } + }, + "EndpointDisabledException": { + "base": "

    Exception error indicating endpoint disabled.

    ", + "refs": { + } + }, + "GetEndpointAttributesInput": { + "base": "

    Input for GetEndpointAttributes action.

    ", + "refs": { + } + }, + "GetEndpointAttributesResponse": { + "base": "

    Response from GetEndpointAttributes of the EndpointArn.

    ", + "refs": { + } + }, + "GetPlatformApplicationAttributesInput": { + "base": "

    Input for GetPlatformApplicationAttributes action.

    ", + "refs": { + } + }, + "GetPlatformApplicationAttributesResponse": { + "base": "

    Response for GetPlatformApplicationAttributes action.

    ", + "refs": { + } + }, + "GetSubscriptionAttributesInput": { + "base": "

    Input for GetSubscriptionAttributes.

    ", + "refs": { + } + }, + "GetSubscriptionAttributesResponse": { + "base": "

    Response for GetSubscriptionAttributes action.

    ", + "refs": { + } + }, + "GetTopicAttributesInput": { + "base": "

    Input for GetTopicAttributes action.

    ", + "refs": { + } + }, + "GetTopicAttributesResponse": { + "base": "

    Response for GetTopicAttributes action.

    ", + "refs": { + } + }, + "InternalErrorException": { + "base": "

    Indicates an internal service error.

    ", + "refs": { + } + }, + "InvalidParameterException": { + "base": "

    Indicates that a request parameter does not comply with the associated constraints.

    ", + "refs": { + } + }, + "InvalidParameterValueException": { + "base": "

    Indicates that a request parameter does not comply with the associated constraints.

    ", + "refs": { + } + }, + "ListEndpointsByPlatformApplicationInput": { + "base": "

    Input for ListEndpointsByPlatformApplication action.

    ", + "refs": { + } + }, + "ListEndpointsByPlatformApplicationResponse": { + "base": "

    Response for ListEndpointsByPlatformApplication action.

    ", + "refs": { + } + }, + "ListOfEndpoints": { + "base": null, + "refs": { + "ListEndpointsByPlatformApplicationResponse$Endpoints": "

    Endpoints returned for ListEndpointsByPlatformApplication action.

    " + } + }, + "ListOfPlatformApplications": { + "base": null, + "refs": { + "ListPlatformApplicationsResponse$PlatformApplications": "

    Platform applications returned when calling ListPlatformApplications action.

    " + } + }, + "ListPlatformApplicationsInput": { + "base": "

    Input for ListPlatformApplications action.

    ", + "refs": { + } + }, + "ListPlatformApplicationsResponse": { + "base": "

    Response for ListPlatformApplications action.

    ", + "refs": { + } + }, + "ListSubscriptionsByTopicInput": { + "base": "

    Input for ListSubscriptionsByTopic action.

    ", + "refs": { + } + }, + "ListSubscriptionsByTopicResponse": { + "base": "

    Response for ListSubscriptionsByTopic action.

    ", + "refs": { + } + }, + "ListSubscriptionsInput": { + "base": "Input for ListSubscriptions action.", + "refs": { + } + }, + "ListSubscriptionsResponse": { + "base": "

    Response for ListSubscriptions action

    ", + "refs": { + } + }, + "ListTopicsInput": { + "base": null, + "refs": { + } + }, + "ListTopicsResponse": { + "base": "

    Response for ListTopics action.

    ", + "refs": { + } + }, + "MapStringToString": { + "base": null, + "refs": { + "CreatePlatformApplicationInput$Attributes": "

    For a list of attributes, see SetPlatformApplicationAttributes

    ", + "CreatePlatformEndpointInput$Attributes": "

    For a list of attributes, see SetEndpointAttributes.

    ", + "Endpoint$Attributes": "

    Attributes for endpoint.

    ", + "GetEndpointAttributesResponse$Attributes": "

    Attributes include the following:

    • CustomUserData -- arbitrary user data to associate with the endpoint. Amazon SNS does not use this data. The data must be in UTF-8 format and less than 2KB.
    • Enabled -- flag that enables/disables delivery to the endpoint. Amazon SNS will set this to false when a notification service indicates to Amazon SNS that the endpoint is invalid. Users can set it back to true, typically after updating Token.
    • Token -- device token, also referred to as a registration id, for an app and mobile device. This is returned from the notification service when an app and mobile device are registered with the notification service.
    ", + "GetPlatformApplicationAttributesResponse$Attributes": "

    Attributes include the following:

    • EventEndpointCreated -- Topic ARN to which EndpointCreated event notifications should be sent.
    • EventEndpointDeleted -- Topic ARN to which EndpointDeleted event notifications should be sent.
    • EventEndpointUpdated -- Topic ARN to which EndpointUpdate event notifications should be sent.
    • EventDeliveryFailure -- Topic ARN to which DeliveryFailure event notifications should be sent upon Direct Publish delivery failure (permanent) to one of the application's endpoints.
    ", + "PlatformApplication$Attributes": "

    Attributes for platform application object.

    ", + "SetEndpointAttributesInput$Attributes": "

    A map of the endpoint attributes. Attributes in this map include the following:

    • CustomUserData -- arbitrary user data to associate with the endpoint. Amazon SNS does not use this data. The data must be in UTF-8 format and less than 2KB.
    • Enabled -- flag that enables/disables delivery to the endpoint. Amazon SNS will set this to false when a notification service indicates to Amazon SNS that the endpoint is invalid. Users can set it back to true, typically after updating Token.
    • Token -- device token, also referred to as a registration id, for an app and mobile device. This is returned from the notification service when an app and mobile device are registered with the notification service.
    ", + "SetPlatformApplicationAttributesInput$Attributes": "

    A map of the platform application attributes. Attributes in this map include the following:

    • PlatformCredential -- The credential received from the notification service. For APNS/APNS_SANDBOX, PlatformCredential is \"private key\". For GCM, PlatformCredential is \"API key\". For ADM, PlatformCredential is \"client secret\".
    • PlatformPrincipal -- The principal received from the notification service. For APNS/APNS_SANDBOX, PlatformPrincipal is \"SSL certificate\". For GCM, PlatformPrincipal is not applicable. For ADM, PlatformPrincipal is \"client id\".
    • EventEndpointCreated -- Topic ARN to which EndpointCreated event notifications should be sent.
    • EventEndpointDeleted -- Topic ARN to which EndpointDeleted event notifications should be sent.
    • EventEndpointUpdated -- Topic ARN to which EndpointUpdate event notifications should be sent.
    • EventDeliveryFailure -- Topic ARN to which DeliveryFailure event notifications should be sent upon Direct Publish delivery failure (permanent) to one of the application's endpoints.
    " + } + }, + "MessageAttributeMap": { + "base": null, + "refs": { + "PublishInput$MessageAttributes": "

    Message attributes for Publish action.

    " + } + }, + "MessageAttributeValue": { + "base": "

    The user-specified message attribute value. For string data types, the value attribute has the same restrictions on the content as the message body. For more information, see Publish.

    Name, type, and value must not be empty or null. In addition, the message body should not be empty or null. All parts of the message attribute, including name, type, and value, are included in the message size restriction, which is currently 256 KB (262,144 bytes). For more information, see Using Amazon SNS Message Attributes.

    ", + "refs": { + "MessageAttributeMap$value": null + } + }, + "NotFoundException": { + "base": "

    Indicates that the requested resource does not exist.

    ", + "refs": { + } + }, + "PlatformApplication": { + "base": "

    Platform application object.

    ", + "refs": { + "ListOfPlatformApplications$member": null + } + }, + "PlatformApplicationDisabledException": { + "base": "

    Exception error indicating platform application disabled.

    ", + "refs": { + } + }, + "PublishInput": { + "base": "

    Input for Publish action.

    ", + "refs": { + } + }, + "PublishResponse": { + "base": "

    Response for Publish action.

    ", + "refs": { + } + }, + "RemovePermissionInput": { + "base": "

    Input for RemovePermission action.

    ", + "refs": { + } + }, + "SetEndpointAttributesInput": { + "base": "

    Input for SetEndpointAttributes action.

    ", + "refs": { + } + }, + "SetPlatformApplicationAttributesInput": { + "base": "

    Input for SetPlatformApplicationAttributes action.

    ", + "refs": { + } + }, + "SetSubscriptionAttributesInput": { + "base": "

    Input for SetSubscriptionAttributes action.

    ", + "refs": { + } + }, + "SetTopicAttributesInput": { + "base": "

    Input for SetTopicAttributes action.

    ", + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "CreateEndpointResponse$EndpointArn": "

    EndpointArn returned from CreateEndpoint action.

    ", + "CreatePlatformApplicationInput$Name": "

    Application names must be made up of only uppercase and lowercase ASCII letters, numbers, underscores, hyphens, and periods, and must be between 1 and 256 characters long.

    ", + "CreatePlatformApplicationInput$Platform": "

    The following platforms are supported: ADM (Amazon Device Messaging), APNS (Apple Push Notification Service), APNS_SANDBOX, and GCM (Google Cloud Messaging).

    ", + "CreatePlatformApplicationResponse$PlatformApplicationArn": "

    PlatformApplicationArn is returned.

    ", + "CreatePlatformEndpointInput$PlatformApplicationArn": "

    PlatformApplicationArn returned from CreatePlatformApplication is used to create a an endpoint.

    ", + "CreatePlatformEndpointInput$Token": "

    Unique identifier created by the notification service for an app on a device. The specific name for Token will vary, depending on which notification service is being used. For example, when using APNS as the notification service, you need the device token. Alternatively, when using GCM or ADM, the device token equivalent is called the registration ID.

    ", + "CreatePlatformEndpointInput$CustomUserData": "

    Arbitrary user data to associate with the endpoint. Amazon SNS does not use this data. The data must be in UTF-8 format and less than 2KB.

    ", + "DeleteEndpointInput$EndpointArn": "

    EndpointArn of endpoint to delete.

    ", + "DeletePlatformApplicationInput$PlatformApplicationArn": "

    PlatformApplicationArn of platform application object to delete.

    ", + "Endpoint$EndpointArn": "

    EndpointArn for mobile app and device.

    ", + "GetEndpointAttributesInput$EndpointArn": "

    EndpointArn for GetEndpointAttributes input.

    ", + "GetPlatformApplicationAttributesInput$PlatformApplicationArn": "

    PlatformApplicationArn for GetPlatformApplicationAttributesInput.

    ", + "ListEndpointsByPlatformApplicationInput$PlatformApplicationArn": "

    PlatformApplicationArn for ListEndpointsByPlatformApplicationInput action.

    ", + "ListEndpointsByPlatformApplicationInput$NextToken": "

    NextToken string is used when calling ListEndpointsByPlatformApplication action to retrieve additional records that are available after the first page results.

    ", + "ListEndpointsByPlatformApplicationResponse$NextToken": "

    NextToken string is returned when calling ListEndpointsByPlatformApplication action if additional records are available after the first page results.

    ", + "ListPlatformApplicationsInput$NextToken": "

    NextToken string is used when calling ListPlatformApplications action to retrieve additional records that are available after the first page results.

    ", + "ListPlatformApplicationsResponse$NextToken": "

    NextToken string is returned when calling ListPlatformApplications action if additional records are available after the first page results.

    ", + "MapStringToString$key": null, + "MapStringToString$value": null, + "MessageAttributeMap$key": null, + "MessageAttributeValue$DataType": "

    Amazon SNS supports the following logical data types: String, Number, and Binary. For more information, see Message Attribute Data Types.

    ", + "MessageAttributeValue$StringValue": "

    Strings are Unicode with UTF8 binary encoding. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters.

    ", + "PlatformApplication$PlatformApplicationArn": "

    PlatformApplicationArn for platform application object.

    ", + "PublishInput$TargetArn": "

    Either TopicArn or EndpointArn, but not both.

    ", + "SetEndpointAttributesInput$EndpointArn": "

    EndpointArn used for SetEndpointAttributes action.

    ", + "SetPlatformApplicationAttributesInput$PlatformApplicationArn": "

    PlatformApplicationArn for SetPlatformApplicationAttributes action.

    " + } + }, + "SubscribeInput": { + "base": "Input for Subscribe action.", + "refs": { + } + }, + "SubscribeResponse": { + "base": "Response for Subscribe action.", + "refs": { + } + }, + "Subscription": { + "base": "

    A wrapper type for the attributes of an Amazon SNS subscription.

    ", + "refs": { + "SubscriptionsList$member": null + } + }, + "SubscriptionAttributesMap": { + "base": null, + "refs": { + "GetSubscriptionAttributesResponse$Attributes": "

    A map of the subscription's attributes. Attributes in this map include the following:

    • SubscriptionArn -- the subscription's ARN
    • TopicArn -- the topic ARN that the subscription is associated with
    • Owner -- the AWS account ID of the subscription's owner
    • ConfirmationWasAuthenticated -- true if the subscription confirmation request was authenticated
    • DeliveryPolicy -- the JSON serialization of the subscription's delivery policy
    • EffectiveDeliveryPolicy -- the JSON serialization of the effective delivery policy that takes into account the topic delivery policy and account system defaults
    " + } + }, + "SubscriptionLimitExceededException": { + "base": "

    Indicates that the customer already owns the maximum allowed number of subscriptions.

    ", + "refs": { + } + }, + "SubscriptionsList": { + "base": null, + "refs": { + "ListSubscriptionsByTopicResponse$Subscriptions": "

    A list of subscriptions.

    ", + "ListSubscriptionsResponse$Subscriptions": "

    A list of subscriptions.

    " + } + }, + "Topic": { + "base": "

    A wrapper type for the topic's Amazon Resource Name (ARN). To retrieve a topic's attributes, use GetTopicAttributes.

    ", + "refs": { + "TopicsList$member": null + } + }, + "TopicAttributesMap": { + "base": null, + "refs": { + "GetTopicAttributesResponse$Attributes": "

    A map of the topic's attributes. Attributes in this map include the following:

    • TopicArn -- the topic's ARN
    • Owner -- the AWS account ID of the topic's owner
    • Policy -- the JSON serialization of the topic's access control policy
    • DisplayName -- the human-readable name used in the \"From\" field for notifications to email and email-json endpoints
    • SubscriptionsPending -- the number of subscriptions pending confirmation on this topic
    • SubscriptionsConfirmed -- the number of confirmed subscriptions on this topic
    • SubscriptionsDeleted -- the number of deleted subscriptions on this topic
    • DeliveryPolicy -- the JSON serialization of the topic's delivery policy
    • EffectiveDeliveryPolicy -- the JSON serialization of the effective delivery policy that takes into account system defaults
    " + } + }, + "TopicLimitExceededException": { + "base": "

    Indicates that the customer already owns the maximum allowed number of topics.

    ", + "refs": { + } + }, + "TopicsList": { + "base": null, + "refs": { + "ListTopicsResponse$Topics": "

    A list of topic ARNs.

    " + } + }, + "UnsubscribeInput": { + "base": "

    Input for Unsubscribe action.

    ", + "refs": { + } + }, + "account": { + "base": null, + "refs": { + "Subscription$Owner": "

    The subscription's owner.

    " + } + }, + "action": { + "base": null, + "refs": { + "ActionsList$member": null + } + }, + "attributeName": { + "base": null, + "refs": { + "SetSubscriptionAttributesInput$AttributeName": "

    The name of the attribute you want to set. Only a subset of the subscriptions attributes are mutable.

    Valid values: DeliveryPolicy | RawMessageDelivery

    ", + "SetTopicAttributesInput$AttributeName": "

    The name of the attribute you want to set. Only a subset of the topic's attributes are mutable.

    Valid values: Policy | DisplayName | DeliveryPolicy

    ", + "SubscriptionAttributesMap$key": null, + "TopicAttributesMap$key": null + } + }, + "attributeValue": { + "base": null, + "refs": { + "SetSubscriptionAttributesInput$AttributeValue": "

    The new value for the attribute in JSON format.

    ", + "SetTopicAttributesInput$AttributeValue": "

    The new value for the attribute.

    ", + "SubscriptionAttributesMap$value": null, + "TopicAttributesMap$value": null + } + }, + "authenticateOnUnsubscribe": { + "base": null, + "refs": { + "ConfirmSubscriptionInput$AuthenticateOnUnsubscribe": "

    Disallows unauthenticated unsubscribes of the subscription. If the value of this parameter is true and the request has an AWS signature, then only the topic owner and the subscription owner can unsubscribe the endpoint. The unsubscribe action requires AWS authentication.

    " + } + }, + "delegate": { + "base": null, + "refs": { + "DelegatesList$member": null + } + }, + "endpoint": { + "base": null, + "refs": { + "SubscribeInput$Endpoint": "

    The endpoint that you want to receive notifications. Endpoints vary by protocol:

    • For the http protocol, the endpoint is an URL beginning with \"http://\"
    • For the https protocol, the endpoint is a URL beginning with \"https://\"
    • For the email protocol, the endpoint is an email address
    • For the email-json protocol, the endpoint is an email address
    • For the sms protocol, the endpoint is a phone number of an SMS-enabled device
    • For the sqs protocol, the endpoint is the ARN of an Amazon SQS queue
    • For the application protocol, the endpoint is the EndpointArn of a mobile app and device.
    ", + "Subscription$Endpoint": "

    The subscription's endpoint (format depends on the protocol).

    " + } + }, + "label": { + "base": null, + "refs": { + "AddPermissionInput$Label": "

    A unique identifier for the new policy statement.

    ", + "RemovePermissionInput$Label": "

    The unique label of the statement you want to remove.

    " + } + }, + "message": { + "base": null, + "refs": { + "PublishInput$Message": "

    The message you want to send to the topic.

    If you want to send the same message to all transport protocols, include the text of the message as a String value.

    If you want to send different messages for each transport protocol, set the value of the MessageStructure parameter to json and use a JSON object for the Message parameter. See the Examples section for the format of the JSON object.

    Constraints: Messages must be UTF-8 encoded strings at most 256 KB in size (262144 bytes, not 262144 characters).

    JSON-specific constraints:

    • Keys in the JSON object that correspond to supported transport protocols must have simple JSON string values.
    • The values will be parsed (unescaped) before they are used in outgoing messages.
    • Outbound notifications are JSON encoded (meaning that the characters will be reescaped for sending).
    • Values have a minimum length of 0 (the empty string, \"\", is allowed).
    • Values have a maximum length bounded by the overall message size (so, including multiple protocols may limit message sizes).
    • Non-string values will cause the key to be ignored.
    • Keys that do not correspond to supported transport protocols are ignored.
    • Duplicate keys are not allowed.
    • Failure to parse or validate any key or value in the message will cause the Publish call to return an error (no partial delivery).

    " + } + }, + "messageId": { + "base": null, + "refs": { + "PublishResponse$MessageId": "

    Unique identifier assigned to the published message.

    Length Constraint: Maximum 100 characters

    " + } + }, + "messageStructure": { + "base": null, + "refs": { + "PublishInput$MessageStructure": "

    Set MessageStructure to json if you want to send a different message for each protocol. For example, using one publish action, you can send a short message to your SMS subscribers and a longer message to your email subscribers. If you set MessageStructure to json, the value of the Message parameter must:

    • be a syntactically valid JSON object; and
    • contain at least a top-level JSON key of \"default\" with a value that is a string.

    You can define other top-level keys that define the message you want to send to a specific transport protocol (e.g., \"http\").

    For information about sending different messages for each protocol using the AWS Management Console, go to Create Different Messages for Each Protocol in the Amazon Simple Notification Service Getting Started Guide.

    Valid value: json

    " + } + }, + "nextToken": { + "base": null, + "refs": { + "ListSubscriptionsByTopicInput$NextToken": "

    Token returned by the previous ListSubscriptionsByTopic request.

    ", + "ListSubscriptionsByTopicResponse$NextToken": "

    Token to pass along to the next ListSubscriptionsByTopic request. This element is returned if there are more subscriptions to retrieve.

    ", + "ListSubscriptionsInput$NextToken": "

    Token returned by the previous ListSubscriptions request.

    ", + "ListSubscriptionsResponse$NextToken": "

    Token to pass along to the next ListSubscriptions request. This element is returned if there are more subscriptions to retrieve.

    ", + "ListTopicsInput$NextToken": "

    Token returned by the previous ListTopics request.

    ", + "ListTopicsResponse$NextToken": "

    Token to pass along to the next ListTopics request. This element is returned if there are additional topics to retrieve.

    " + } + }, + "protocol": { + "base": null, + "refs": { + "SubscribeInput$Protocol": "

    The protocol you want to use. Supported protocols include:

    • http -- delivery of JSON-encoded message via HTTP POST
    • https -- delivery of JSON-encoded message via HTTPS POST
    • email -- delivery of message via SMTP
    • email-json -- delivery of JSON-encoded message via SMTP
    • sms -- delivery of message via SMS
    • sqs -- delivery of JSON-encoded message to an Amazon SQS queue
    • application -- delivery of JSON-encoded message to an EndpointArn for a mobile app and device.
    ", + "Subscription$Protocol": "

    The subscription's protocol.

    " + } + }, + "string": { + "base": null, + "refs": { + "AuthorizationErrorException$message": null, + "EndpointDisabledException$message": "

    Message for endpoint disabled.

    ", + "InternalErrorException$message": null, + "InvalidParameterException$message": null, + "InvalidParameterValueException$message": null, + "NotFoundException$message": null, + "PlatformApplicationDisabledException$message": "

    Message for platform application disabled.

    ", + "SubscriptionLimitExceededException$message": null, + "TopicLimitExceededException$message": null + } + }, + "subject": { + "base": null, + "refs": { + "PublishInput$Subject": "

    Optional parameter to be used as the \"Subject\" line when the message is delivered to email endpoints. This field will also be included, if present, in the standard JSON messages delivered to other endpoints.

    Constraints: Subjects must be ASCII text that begins with a letter, number, or punctuation mark; must not include line breaks or control characters; and must be less than 100 characters long.

    " + } + }, + "subscriptionARN": { + "base": null, + "refs": { + "ConfirmSubscriptionResponse$SubscriptionArn": "

    The ARN of the created subscription.

    ", + "GetSubscriptionAttributesInput$SubscriptionArn": "

    The ARN of the subscription whose properties you want to get.

    ", + "SetSubscriptionAttributesInput$SubscriptionArn": "

    The ARN of the subscription to modify.

    ", + "SubscribeResponse$SubscriptionArn": "

    The ARN of the subscription, if the service was able to create a subscription immediately (without requiring endpoint owner confirmation).

    ", + "Subscription$SubscriptionArn": "

    The subscription's ARN.

    ", + "UnsubscribeInput$SubscriptionArn": "

    The ARN of the subscription to be deleted.

    " + } + }, + "token": { + "base": null, + "refs": { + "ConfirmSubscriptionInput$Token": "

    Short-lived token sent to an endpoint during the Subscribe action.

    " + } + }, + "topicARN": { + "base": null, + "refs": { + "AddPermissionInput$TopicArn": "

    The ARN of the topic whose access control policy you wish to modify.

    ", + "ConfirmSubscriptionInput$TopicArn": "

    The ARN of the topic for which you wish to confirm a subscription.

    ", + "CreateTopicResponse$TopicArn": "

    The Amazon Resource Name (ARN) assigned to the created topic.

    ", + "DeleteTopicInput$TopicArn": "

    The ARN of the topic you want to delete.

    ", + "GetTopicAttributesInput$TopicArn": "

    The ARN of the topic whose properties you want to get.

    ", + "ListSubscriptionsByTopicInput$TopicArn": "

    The ARN of the topic for which you wish to find subscriptions.

    ", + "PublishInput$TopicArn": "

    The topic you want to publish to.

    ", + "RemovePermissionInput$TopicArn": "

    The ARN of the topic whose access control policy you wish to modify.

    ", + "SetTopicAttributesInput$TopicArn": "

    The ARN of the topic to modify.

    ", + "SubscribeInput$TopicArn": "

    The ARN of the topic you want to subscribe to.

    ", + "Subscription$TopicArn": "

    The ARN of the subscription's topic.

    ", + "Topic$TopicArn": "

    The topic's ARN.

    " + } + }, + "topicName": { + "base": null, + "refs": { + "CreateTopicInput$Name": "

    The name of the topic you want to create.

    Constraints: Topic names must be made up of only uppercase and lowercase ASCII letters, numbers, underscores, and hyphens, and must be between 1 and 256 characters long.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sns/2010-03-31/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sns/2010-03-31/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sns/2010-03-31/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sns/2010-03-31/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,29 @@ +{ + "pagination": { + "ListEndpointsByPlatformApplication": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Endpoints" + }, + "ListPlatformApplications": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "PlatformApplications" + }, + "ListSubscriptions": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Subscriptions" + }, + "ListSubscriptionsByTopic": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Subscriptions" + }, + "ListTopics": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Topics" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sqs/2012-11-05/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sqs/2012-11-05/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sqs/2012-11-05/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sqs/2012-11-05/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1160 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2012-11-05", + "endpointPrefix":"sqs", + "serviceAbbreviation":"Amazon SQS", + "serviceFullName":"Amazon Simple Queue Service", + "signatureVersion":"v4", + "xmlNamespace":"http://queue.amazonaws.com/doc/2012-11-05/", + "protocol":"query" + }, + "operations":{ + "AddPermission":{ + "name":"AddPermission", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddPermissionRequest"}, + "errors":[ + { + "shape":"OverLimit", + "error":{ + "code":"OverLimit", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + } + ] + }, + "ChangeMessageVisibility":{ + "name":"ChangeMessageVisibility", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ChangeMessageVisibilityRequest"}, + "errors":[ + { + "shape":"MessageNotInflight", + "error":{ + "code":"AWS.SimpleQueueService.MessageNotInflight", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"ReceiptHandleIsInvalid", + "exception":true + } + ] + }, + "ChangeMessageVisibilityBatch":{ + "name":"ChangeMessageVisibilityBatch", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ChangeMessageVisibilityBatchRequest"}, + "output":{ + "shape":"ChangeMessageVisibilityBatchResult", + "resultWrapper":"ChangeMessageVisibilityBatchResult" + }, + "errors":[ + { + "shape":"TooManyEntriesInBatchRequest", + "error":{ + "code":"AWS.SimpleQueueService.TooManyEntriesInBatchRequest", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"EmptyBatchRequest", + "error":{ + "code":"AWS.SimpleQueueService.EmptyBatchRequest", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"BatchEntryIdsNotDistinct", + "error":{ + "code":"AWS.SimpleQueueService.BatchEntryIdsNotDistinct", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidBatchEntryId", + "error":{ + "code":"AWS.SimpleQueueService.InvalidBatchEntryId", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "CreateQueue":{ + "name":"CreateQueue", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateQueueRequest"}, + "output":{ + "shape":"CreateQueueResult", + "resultWrapper":"CreateQueueResult" + }, + "errors":[ + { + "shape":"QueueDeletedRecently", + "error":{ + "code":"AWS.SimpleQueueService.QueueDeletedRecently", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"QueueNameExists", + "error":{ + "code":"QueueAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteMessage":{ + "name":"DeleteMessage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteMessageRequest"}, + "errors":[ + { + "shape":"InvalidIdFormat", + "exception":true + }, + { + "shape":"ReceiptHandleIsInvalid", + "exception":true + } + ] + }, + "DeleteMessageBatch":{ + "name":"DeleteMessageBatch", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteMessageBatchRequest"}, + "output":{ + "shape":"DeleteMessageBatchResult", + "resultWrapper":"DeleteMessageBatchResult" + }, + "errors":[ + { + "shape":"TooManyEntriesInBatchRequest", + "error":{ + "code":"AWS.SimpleQueueService.TooManyEntriesInBatchRequest", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"EmptyBatchRequest", + "error":{ + "code":"AWS.SimpleQueueService.EmptyBatchRequest", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"BatchEntryIdsNotDistinct", + "error":{ + "code":"AWS.SimpleQueueService.BatchEntryIdsNotDistinct", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidBatchEntryId", + "error":{ + "code":"AWS.SimpleQueueService.InvalidBatchEntryId", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "DeleteQueue":{ + "name":"DeleteQueue", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteQueueRequest"} + }, + "GetQueueAttributes":{ + "name":"GetQueueAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetQueueAttributesRequest"}, + "output":{ + "shape":"GetQueueAttributesResult", + "resultWrapper":"GetQueueAttributesResult" + }, + "errors":[ + { + "shape":"InvalidAttributeName", + "exception":true + } + ] + }, + "GetQueueUrl":{ + "name":"GetQueueUrl", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetQueueUrlRequest"}, + "output":{ + "shape":"GetQueueUrlResult", + "resultWrapper":"GetQueueUrlResult" + }, + "errors":[ + { + "shape":"QueueDoesNotExist", + "error":{ + "code":"AWS.SimpleQueueService.NonExistentQueue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "ListDeadLetterSourceQueues":{ + "name":"ListDeadLetterSourceQueues", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDeadLetterSourceQueuesRequest"}, + "output":{ + "shape":"ListDeadLetterSourceQueuesResult", + "resultWrapper":"ListDeadLetterSourceQueuesResult" + }, + "errors":[ + { + "shape":"QueueDoesNotExist", + "error":{ + "code":"AWS.SimpleQueueService.NonExistentQueue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "ListQueues":{ + "name":"ListQueues", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListQueuesRequest"}, + "output":{ + "shape":"ListQueuesResult", + "resultWrapper":"ListQueuesResult" + } + }, + "PurgeQueue":{ + "name":"PurgeQueue", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PurgeQueueRequest"}, + "errors":[ + { + "shape":"QueueDoesNotExist", + "error":{ + "code":"AWS.SimpleQueueService.NonExistentQueue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"PurgeQueueInProgress", + "error":{ + "code":"AWS.SimpleQueueService.PurgeQueueInProgress", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + } + ] + }, + "ReceiveMessage":{ + "name":"ReceiveMessage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReceiveMessageRequest"}, + "output":{ + "shape":"ReceiveMessageResult", + "resultWrapper":"ReceiveMessageResult" + }, + "errors":[ + { + "shape":"OverLimit", + "error":{ + "code":"OverLimit", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + } + ] + }, + "RemovePermission":{ + "name":"RemovePermission", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemovePermissionRequest"} + }, + "SendMessage":{ + "name":"SendMessage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SendMessageRequest"}, + "output":{ + "shape":"SendMessageResult", + "resultWrapper":"SendMessageResult" + }, + "errors":[ + { + "shape":"InvalidMessageContents", + "exception":true + }, + { + "shape":"UnsupportedOperation", + "error":{ + "code":"AWS.SimpleQueueService.UnsupportedOperation", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "SendMessageBatch":{ + "name":"SendMessageBatch", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SendMessageBatchRequest"}, + "output":{ + "shape":"SendMessageBatchResult", + "resultWrapper":"SendMessageBatchResult" + }, + "errors":[ + { + "shape":"TooManyEntriesInBatchRequest", + "error":{ + "code":"AWS.SimpleQueueService.TooManyEntriesInBatchRequest", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"EmptyBatchRequest", + "error":{ + "code":"AWS.SimpleQueueService.EmptyBatchRequest", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"BatchEntryIdsNotDistinct", + "error":{ + "code":"AWS.SimpleQueueService.BatchEntryIdsNotDistinct", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"BatchRequestTooLong", + "error":{ + "code":"AWS.SimpleQueueService.BatchRequestTooLong", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"InvalidBatchEntryId", + "error":{ + "code":"AWS.SimpleQueueService.InvalidBatchEntryId", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + { + "shape":"UnsupportedOperation", + "error":{ + "code":"AWS.SimpleQueueService.UnsupportedOperation", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + ] + }, + "SetQueueAttributes":{ + "name":"SetQueueAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetQueueAttributesRequest"}, + "errors":[ + { + "shape":"InvalidAttributeName", + "exception":true + } + ] + } + }, + "shapes":{ + "AWSAccountIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"AWSAccountId" + }, + "flattened":true + }, + "ActionNameList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ActionName" + }, + "flattened":true + }, + "AddPermissionRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "Label", + "AWSAccountIds", + "Actions" + ], + "members":{ + "QueueUrl":{"shape":"String"}, + "Label":{"shape":"String"}, + "AWSAccountIds":{"shape":"AWSAccountIdList"}, + "Actions":{"shape":"ActionNameList"} + } + }, + "AttributeMap":{ + "type":"map", + "key":{ + "shape":"QueueAttributeName", + "locationName":"Name" + }, + "value":{ + "shape":"String", + "locationName":"Value" + }, + "flattened":true, + "locationName":"Attribute" + }, + "AttributeNameList":{ + "type":"list", + "member":{ + "shape":"QueueAttributeName", + "locationName":"AttributeName" + }, + "flattened":true + }, + "BatchEntryIdsNotDistinct":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.BatchEntryIdsNotDistinct", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "BatchRequestTooLong":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.BatchRequestTooLong", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "BatchResultErrorEntry":{ + "type":"structure", + "required":[ + "Id", + "SenderFault", + "Code" + ], + "members":{ + "Id":{"shape":"String"}, + "SenderFault":{"shape":"Boolean"}, + "Code":{"shape":"String"}, + "Message":{"shape":"String"} + } + }, + "BatchResultErrorEntryList":{ + "type":"list", + "member":{ + "shape":"BatchResultErrorEntry", + "locationName":"BatchResultErrorEntry" + }, + "flattened":true + }, + "Binary":{"type":"blob"}, + "BinaryList":{ + "type":"list", + "member":{ + "shape":"Binary", + "locationName":"BinaryListValue" + } + }, + "Boolean":{"type":"boolean"}, + "ChangeMessageVisibilityBatchRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "Entries" + ], + "members":{ + "QueueUrl":{"shape":"String"}, + "Entries":{"shape":"ChangeMessageVisibilityBatchRequestEntryList"} + } + }, + "ChangeMessageVisibilityBatchRequestEntry":{ + "type":"structure", + "required":[ + "Id", + "ReceiptHandle" + ], + "members":{ + "Id":{"shape":"String"}, + "ReceiptHandle":{"shape":"String"}, + "VisibilityTimeout":{"shape":"Integer"} + } + }, + "ChangeMessageVisibilityBatchRequestEntryList":{ + "type":"list", + "member":{ + "shape":"ChangeMessageVisibilityBatchRequestEntry", + "locationName":"ChangeMessageVisibilityBatchRequestEntry" + }, + "flattened":true + }, + "ChangeMessageVisibilityBatchResult":{ + "type":"structure", + "required":[ + "Successful", + "Failed" + ], + "members":{ + "Successful":{"shape":"ChangeMessageVisibilityBatchResultEntryList"}, + "Failed":{"shape":"BatchResultErrorEntryList"} + } + }, + "ChangeMessageVisibilityBatchResultEntry":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{"shape":"String"} + } + }, + "ChangeMessageVisibilityBatchResultEntryList":{ + "type":"list", + "member":{ + "shape":"ChangeMessageVisibilityBatchResultEntry", + "locationName":"ChangeMessageVisibilityBatchResultEntry" + }, + "flattened":true + }, + "ChangeMessageVisibilityRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "ReceiptHandle", + "VisibilityTimeout" + ], + "members":{ + "QueueUrl":{"shape":"String"}, + "ReceiptHandle":{"shape":"String"}, + "VisibilityTimeout":{"shape":"Integer"} + } + }, + "CreateQueueRequest":{ + "type":"structure", + "required":["QueueName"], + "members":{ + "QueueName":{"shape":"String"}, + "Attributes":{ + "shape":"AttributeMap", + "locationName":"Attribute" + } + } + }, + "CreateQueueResult":{ + "type":"structure", + "members":{ + "QueueUrl":{"shape":"String"} + } + }, + "DeleteMessageBatchRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "Entries" + ], + "members":{ + "QueueUrl":{"shape":"String"}, + "Entries":{"shape":"DeleteMessageBatchRequestEntryList"} + } + }, + "DeleteMessageBatchRequestEntry":{ + "type":"structure", + "required":[ + "Id", + "ReceiptHandle" + ], + "members":{ + "Id":{"shape":"String"}, + "ReceiptHandle":{"shape":"String"} + } + }, + "DeleteMessageBatchRequestEntryList":{ + "type":"list", + "member":{ + "shape":"DeleteMessageBatchRequestEntry", + "locationName":"DeleteMessageBatchRequestEntry" + }, + "flattened":true + }, + "DeleteMessageBatchResult":{ + "type":"structure", + "required":[ + "Successful", + "Failed" + ], + "members":{ + "Successful":{"shape":"DeleteMessageBatchResultEntryList"}, + "Failed":{"shape":"BatchResultErrorEntryList"} + } + }, + "DeleteMessageBatchResultEntry":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{"shape":"String"} + } + }, + "DeleteMessageBatchResultEntryList":{ + "type":"list", + "member":{ + "shape":"DeleteMessageBatchResultEntry", + "locationName":"DeleteMessageBatchResultEntry" + }, + "flattened":true + }, + "DeleteMessageRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "ReceiptHandle" + ], + "members":{ + "QueueUrl":{"shape":"String"}, + "ReceiptHandle":{"shape":"String"} + } + }, + "DeleteQueueRequest":{ + "type":"structure", + "required":["QueueUrl"], + "members":{ + "QueueUrl":{"shape":"String"} + } + }, + "EmptyBatchRequest":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.EmptyBatchRequest", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "GetQueueAttributesRequest":{ + "type":"structure", + "required":["QueueUrl"], + "members":{ + "QueueUrl":{"shape":"String"}, + "AttributeNames":{"shape":"AttributeNameList"} + } + }, + "GetQueueAttributesResult":{ + "type":"structure", + "members":{ + "Attributes":{ + "shape":"AttributeMap", + "locationName":"Attribute" + } + } + }, + "GetQueueUrlRequest":{ + "type":"structure", + "required":["QueueName"], + "members":{ + "QueueName":{"shape":"String"}, + "QueueOwnerAWSAccountId":{"shape":"String"} + } + }, + "GetQueueUrlResult":{ + "type":"structure", + "members":{ + "QueueUrl":{"shape":"String"} + } + }, + "Integer":{"type":"integer"}, + "InvalidAttributeName":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidBatchEntryId":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.InvalidBatchEntryId", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidIdFormat":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidMessageContents":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ListDeadLetterSourceQueuesRequest":{ + "type":"structure", + "required":["QueueUrl"], + "members":{ + "QueueUrl":{"shape":"String"} + } + }, + "ListDeadLetterSourceQueuesResult":{ + "type":"structure", + "required":["queueUrls"], + "members":{ + "queueUrls":{"shape":"QueueUrlList"} + } + }, + "ListQueuesRequest":{ + "type":"structure", + "members":{ + "QueueNamePrefix":{"shape":"String"} + } + }, + "ListQueuesResult":{ + "type":"structure", + "members":{ + "QueueUrls":{"shape":"QueueUrlList"} + } + }, + "Message":{ + "type":"structure", + "members":{ + "MessageId":{"shape":"String"}, + "ReceiptHandle":{"shape":"String"}, + "MD5OfBody":{"shape":"String"}, + "Body":{"shape":"String"}, + "Attributes":{ + "shape":"AttributeMap", + "locationName":"Attribute" + }, + "MD5OfMessageAttributes":{"shape":"String"}, + "MessageAttributes":{ + "shape":"MessageAttributeMap", + "locationName":"MessageAttribute" + } + } + }, + "MessageAttributeMap":{ + "type":"map", + "key":{ + "shape":"String", + "locationName":"Name" + }, + "value":{ + "shape":"MessageAttributeValue", + "locationName":"Value" + }, + "flattened":true + }, + "MessageAttributeName":{"type":"string"}, + "MessageAttributeNameList":{ + "type":"list", + "member":{ + "shape":"MessageAttributeName", + "locationName":"MessageAttributeName" + }, + "flattened":true + }, + "MessageAttributeValue":{ + "type":"structure", + "required":["DataType"], + "members":{ + "StringValue":{"shape":"String"}, + "BinaryValue":{"shape":"Binary"}, + "StringListValues":{ + "shape":"StringList", + "flattened":true, + "locationName":"StringListValue" + }, + "BinaryListValues":{ + "shape":"BinaryList", + "flattened":true, + "locationName":"BinaryListValue" + }, + "DataType":{"shape":"String"} + } + }, + "MessageList":{ + "type":"list", + "member":{ + "shape":"Message", + "locationName":"Message" + }, + "flattened":true + }, + "MessageNotInflight":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.MessageNotInflight", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "OverLimit":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OverLimit", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "PurgeQueueInProgress":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.PurgeQueueInProgress", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "PurgeQueueRequest":{ + "type":"structure", + "required":["QueueUrl"], + "members":{ + "QueueUrl":{"shape":"String"} + } + }, + "QueueAttributeName":{ + "type":"string", + "enum":[ + "Policy", + "VisibilityTimeout", + "MaximumMessageSize", + "MessageRetentionPeriod", + "ApproximateNumberOfMessages", + "ApproximateNumberOfMessagesNotVisible", + "CreatedTimestamp", + "LastModifiedTimestamp", + "QueueArn", + "ApproximateNumberOfMessagesDelayed", + "DelaySeconds", + "ReceiveMessageWaitTimeSeconds", + "RedrivePolicy" + ] + }, + "QueueDeletedRecently":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.QueueDeletedRecently", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "QueueDoesNotExist":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.NonExistentQueue", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "QueueNameExists":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"QueueAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "QueueUrlList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"QueueUrl" + }, + "flattened":true + }, + "ReceiptHandleIsInvalid":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ReceiveMessageRequest":{ + "type":"structure", + "required":["QueueUrl"], + "members":{ + "QueueUrl":{"shape":"String"}, + "AttributeNames":{"shape":"AttributeNameList"}, + "MessageAttributeNames":{"shape":"MessageAttributeNameList"}, + "MaxNumberOfMessages":{"shape":"Integer"}, + "VisibilityTimeout":{"shape":"Integer"}, + "WaitTimeSeconds":{"shape":"Integer"} + } + }, + "ReceiveMessageResult":{ + "type":"structure", + "members":{ + "Messages":{"shape":"MessageList"} + } + }, + "RemovePermissionRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "Label" + ], + "members":{ + "QueueUrl":{"shape":"String"}, + "Label":{"shape":"String"} + } + }, + "SendMessageBatchRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "Entries" + ], + "members":{ + "QueueUrl":{"shape":"String"}, + "Entries":{"shape":"SendMessageBatchRequestEntryList"} + } + }, + "SendMessageBatchRequestEntry":{ + "type":"structure", + "required":[ + "Id", + "MessageBody" + ], + "members":{ + "Id":{"shape":"String"}, + "MessageBody":{"shape":"String"}, + "DelaySeconds":{"shape":"Integer"}, + "MessageAttributes":{ + "shape":"MessageAttributeMap", + "locationName":"MessageAttribute" + } + } + }, + "SendMessageBatchRequestEntryList":{ + "type":"list", + "member":{ + "shape":"SendMessageBatchRequestEntry", + "locationName":"SendMessageBatchRequestEntry" + }, + "flattened":true + }, + "SendMessageBatchResult":{ + "type":"structure", + "required":[ + "Successful", + "Failed" + ], + "members":{ + "Successful":{"shape":"SendMessageBatchResultEntryList"}, + "Failed":{"shape":"BatchResultErrorEntryList"} + } + }, + "SendMessageBatchResultEntry":{ + "type":"structure", + "required":[ + "Id", + "MessageId", + "MD5OfMessageBody" + ], + "members":{ + "Id":{"shape":"String"}, + "MessageId":{"shape":"String"}, + "MD5OfMessageBody":{"shape":"String"}, + "MD5OfMessageAttributes":{"shape":"String"} + } + }, + "SendMessageBatchResultEntryList":{ + "type":"list", + "member":{ + "shape":"SendMessageBatchResultEntry", + "locationName":"SendMessageBatchResultEntry" + }, + "flattened":true + }, + "SendMessageRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "MessageBody" + ], + "members":{ + "QueueUrl":{"shape":"String"}, + "MessageBody":{"shape":"String"}, + "DelaySeconds":{"shape":"Integer"}, + "MessageAttributes":{ + "shape":"MessageAttributeMap", + "locationName":"MessageAttribute" + } + } + }, + "SendMessageResult":{ + "type":"structure", + "members":{ + "MD5OfMessageBody":{"shape":"String"}, + "MD5OfMessageAttributes":{"shape":"String"}, + "MessageId":{"shape":"String"} + } + }, + "SetQueueAttributesRequest":{ + "type":"structure", + "required":[ + "QueueUrl", + "Attributes" + ], + "members":{ + "QueueUrl":{"shape":"String"}, + "Attributes":{ + "shape":"AttributeMap", + "locationName":"Attribute" + } + } + }, + "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"StringListValue" + } + }, + "TooManyEntriesInBatchRequest":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.TooManyEntriesInBatchRequest", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "UnsupportedOperation":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"AWS.SimpleQueueService.UnsupportedOperation", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sqs/2012-11-05/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sqs/2012-11-05/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sqs/2012-11-05/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sqs/2012-11-05/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,504 @@ +{ + "version": "2.0", + "operations": { + "AddPermission": "

    Adds a permission to a queue for a specific principal. This allows for sharing access to the queue.

    When you create a queue, you have full control access rights for the queue. Only you (as owner of the queue) can grant or deny permissions to the queue. For more information about these permissions, see Shared Queues in the Amazon SQS Developer Guide.

    AddPermission writes an Amazon SQS-generated policy. If you want to write your own policy, use SetQueueAttributes to upload your policy. For more information about writing your own policy, see Using The Access Policy Language in the Amazon SQS Developer Guide.

    Some API actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

    &Attribute.1=this

    &Attribute.2=that

    ", + "ChangeMessageVisibility": "

    Changes the visibility timeout of a specified message in a queue to a new value. The maximum allowed timeout value you can set the value to is 12 hours. This means you can't extend the timeout of a message in an existing queue to more than a total visibility timeout of 12 hours. (For more information visibility timeout, see Visibility Timeout in the Amazon SQS Developer Guide.)

    For example, let's say you have a message and its default message visibility timeout is 5 minutes. After 3 minutes, you call ChangeMessageVisiblity with a timeout of 10 minutes. At that time, the timeout for the message would be extended by 10 minutes beyond the time of the ChangeMessageVisibility call. This results in a total visibility timeout of 13 minutes. You can continue to call ChangeMessageVisibility to extend the visibility timeout to a maximum of 12 hours. If you try to extend beyond 12 hours, the request will be rejected.

    There is a 120,000 limit for the number of inflight messages per queue. Messages are inflight after they have been received from the queue by a consuming component, but have not yet been deleted from the queue. If you reach the 120,000 limit, you will receive an OverLimit error message from Amazon SQS. To help avoid reaching the limit, you should delete the messages from the queue after they have been processed. You can also increase the number of queues you use to process the messages.

    If you attempt to set the VisibilityTimeout to an amount more than the maximum time left, Amazon SQS returns an error. It will not automatically recalculate and increase the timeout to the maximum time remaining. Unlike with a queue, when you change the visibility timeout for a specific message, that timeout value is applied immediately but is not saved in memory for that message. If you don't delete a message after it is received, the visibility timeout for the message the next time it is received reverts to the original timeout value, not the value you set with the ChangeMessageVisibility action.", + "ChangeMessageVisibilityBatch": "

    Changes the visibility timeout of multiple messages. This is a batch version of ChangeMessageVisibility. The result of the action on each message is reported individually in the response. You can send up to 10 ChangeMessageVisibility requests with each ChangeMessageVisibilityBatch action.

    Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200. Some API actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

    &Attribute.1=this

    &Attribute.2=that

    ", + "CreateQueue": "

    Creates a new queue, or returns the URL of an existing one. When you request CreateQueue, you provide a name for the queue. To successfully create a new queue, you must provide a name that is unique within the scope of your own queues.

    If you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.

    You may pass one or more attributes in the request. If you do not provide a value for any attribute, the queue will have the default value for that attribute. Permitted attributes are the same that can be set using SetQueueAttributes.

    Use GetQueueUrl to get a queue's URL. GetQueueUrl requires only the QueueName parameter.

    If you provide the name of an existing queue, along with the exact names and values of all the queue's attributes, CreateQueue returns the queue URL for the existing queue. If the queue name, attribute names, or attribute values do not match an existing queue, CreateQueue returns an error.

    Some API actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

    &Attribute.1=this

    &Attribute.2=that

    ", + "DeleteMessage": "

    Deletes the specified message from the specified queue. You specify the message by using the message's receipt handle and not the message ID you received when you sent the message. Even if the message is locked by another reader due to the visibility timeout setting, it is still deleted from the queue. If you leave a message in the queue for longer than the queue's configured retention period, Amazon SQS automatically deletes it.

    The receipt handle is associated with a specific instance of receiving the message. If you receive a message more than once, the receipt handle you get each time you receive the message is different. When you request DeleteMessage, if you don't provide the most recently received receipt handle for the message, the request will still succeed, but the message might not be deleted.

    It is possible you will receive a message even after you have deleted it. This might happen on rare occasions if one of the servers storing a copy of the message is unavailable when you request to delete the message. The copy remains on the server and might be returned to you again on a subsequent receive request. You should create your system to be idempotent so that receiving a particular message more than once is not a problem.

    ", + "DeleteMessageBatch": "

    Deletes up to ten messages from the specified queue. This is a batch version of DeleteMessage. The result of the delete action on each message is reported individually in the response.

    Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

    Some API actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

    &Attribute.1=this

    &Attribute.2=that

    ", + "DeleteQueue": "

    Deletes the queue specified by the queue URL, regardless of whether the queue is empty. If the specified queue does not exist, Amazon SQS returns a successful response.

    Use DeleteQueue with care; once you delete your queue, any messages in the queue are no longer available.

    When you delete a queue, the deletion process takes up to 60 seconds. Requests you send involving that queue during the 60 seconds might succeed. For example, a SendMessage request might succeed, but after the 60 seconds, the queue and that message you sent no longer exist. Also, when you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.

    We reserve the right to delete queues that have had no activity for more than 30 days. For more information, see How Amazon SQS Queues Work in the Amazon SQS Developer Guide.

    ", + "GetQueueAttributes": "

    Gets attributes for the specified queue. The following attributes are supported:

    • All - returns all values.
    • ApproximateNumberOfMessages - returns the approximate number of visible messages in a queue. For more information, see Resources Required to Process Messages in the Amazon SQS Developer Guide.
    • ApproximateNumberOfMessagesNotVisible - returns the approximate number of messages that are not timed-out and not deleted. For more information, see Resources Required to Process Messages in the Amazon SQS Developer Guide.
    • VisibilityTimeout - returns the visibility timeout for the queue. For more information about visibility timeout, see Visibility Timeout in the Amazon SQS Developer Guide.
    • CreatedTimestamp - returns the time when the queue was created (epoch time in seconds).
    • LastModifiedTimestamp - returns the time when the queue was last changed (epoch time in seconds).
    • Policy - returns the queue's policy.
    • MaximumMessageSize - returns the limit of how many bytes a message can contain before Amazon SQS rejects it.
    • MessageRetentionPeriod - returns the number of seconds Amazon SQS retains a message.
    • QueueArn - returns the queue's Amazon resource name (ARN).
    • ApproximateNumberOfMessagesDelayed - returns the approximate number of messages that are pending to be added to the queue.
    • DelaySeconds - returns the default delay on the queue in seconds.
    • ReceiveMessageWaitTimeSeconds - returns the time for which a ReceiveMessage call will wait for a message to arrive.
    • RedrivePolicy - returns the parameters for dead letter queue functionality of the source queue. For more information about RedrivePolicy and dead letter queues, see Using Amazon SQS Dead Letter Queues in the Amazon SQS Developer Guide.

    Going forward, new attributes might be added. If you are writing code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully. Some API actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

    &Attribute.1=this

    &Attribute.2=that

    ", + "GetQueueUrl": "

    Returns the URL of an existing queue. This action provides a simple way to retrieve the URL of an Amazon SQS queue.

    To access a queue that belongs to another AWS account, use the QueueOwnerAWSAccountId parameter to specify the account ID of the queue's owner. The queue's owner must grant you permission to access the queue. For more information about shared queue access, see AddPermission or go to Shared Queues in the Amazon SQS Developer Guide.

    ", + "ListDeadLetterSourceQueues": "

    Returns a list of your queues that have the RedrivePolicy queue attribute configured with a dead letter queue.

    For more information about using dead letter queues, see Using Amazon SQS Dead Letter Queues.

    ", + "ListQueues": "

    Returns a list of your queues. The maximum number of queues that can be returned is 1000. If you specify a value for the optional QueueNamePrefix parameter, only queues with a name beginning with the specified value are returned.

    ", + "PurgeQueue": "

    Deletes the messages in a queue specified by the queue URL.

    When you use the PurgeQueue API, the deleted messages in the queue cannot be retrieved.

    When you purge a queue, the message deletion process takes up to 60 seconds. All messages sent to the queue before calling PurgeQueue will be deleted; messages sent to the queue while it is being purged may be deleted. While the queue is being purged, messages sent to the queue before PurgeQueue was called may be received, but will be deleted within the next minute.

    ", + "ReceiveMessage": "

    Retrieves one or more messages, with a maximum limit of 10 messages, from the specified queue. Long poll support is enabled by using the WaitTimeSeconds parameter. For more information, see Amazon SQS Long Poll in the Amazon SQS Developer Guide.

    Short poll is the default behavior where a weighted random set of machines is sampled on a ReceiveMessage call. This means only the messages on the sampled machines are returned. If the number of messages in the queue is small (less than 1000), it is likely you will get fewer messages than you requested per ReceiveMessage call. If the number of messages in the queue is extremely small, you might not receive any messages in a particular ReceiveMessage response; in which case you should repeat the request.

    For each message returned, the response includes the following:

    • Message body

    • MD5 digest of the message body. For information about MD5, go to http://www.faqs.org/rfcs/rfc1321.html.

    • Message ID you received when you sent the message to the queue.

    • Receipt handle.

    • Message attributes.

    • MD5 digest of the message attributes.

    The receipt handle is the identifier you must provide when deleting the message. For more information, see Queue and Message Identifiers in the Amazon SQS Developer Guide.

    You can provide the VisibilityTimeout parameter in your request, which will be applied to the messages that Amazon SQS returns in the response. If you do not include the parameter, the overall visibility timeout for the queue is used for the returned messages. For more information, see Visibility Timeout in the Amazon SQS Developer Guide.

    Going forward, new attributes might be added. If you are writing code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

    ", + "RemovePermission": "

    Revokes any permissions in the queue policy that matches the specified Label parameter. Only the owner of the queue can remove permissions.

    ", + "SendMessage": "

    Delivers a message to the specified queue. With Amazon SQS, you now have the ability to send large payload messages that are up to 256KB (262,144 bytes) in size. To send large payloads, you must use an AWS SDK that supports SigV4 signing. To verify whether SigV4 is supported for an AWS SDK, check the SDK release notes.

    The following list shows the characters (in Unicode) allowed in your message, according to the W3C XML specification. For more information, go to http://www.w3.org/TR/REC-xml/#charsets If you send any characters not included in the list, your request will be rejected.

    #x9 | #xA | #xD | [#x20 to #xD7FF] | [#xE000 to #xFFFD] | [#x10000 to #x10FFFF]

    ", + "SendMessageBatch": "

    Delivers up to ten messages to the specified queue. This is a batch version of SendMessage. The result of the send action on each message is reported individually in the response. The maximum allowed individual message size is 256 KB (262,144 bytes).

    The maximum total payload size (i.e., the sum of all a batch's individual message lengths) is also 256 KB (262,144 bytes).

    If the DelaySeconds parameter is not specified for an entry, the default for the queue is used.

    The following list shows the characters (in Unicode) that are allowed in your message, according to the W3C XML specification. For more information, go to http://www.faqs.org/rfcs/rfc1321.html. If you send any characters that are not included in the list, your request will be rejected.

    #x9 | #xA | #xD | [#x20 to #xD7FF] | [#xE000 to #xFFFD] | [#x10000 to #x10FFFF]

    Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200. Some API actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

    &Attribute.1=this

    &Attribute.2=that

    ", + "SetQueueAttributes": "

    Sets the value of one or more queue attributes. When you change a queue's attributes, the change can take up to 60 seconds for most of the attributes to propagate throughout the SQS system. Changes made to the MessageRetentionPeriod attribute can take up to 15 minutes.

    Going forward, new attributes might be added. If you are writing code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully." + }, + "service": "

    Welcome to the Amazon Simple Queue Service API Reference. This section describes who should read this guide, how the guide is organized, and other resources related to the Amazon Simple Queue Service (Amazon SQS).

    Amazon SQS offers reliable and scalable hosted queues for storing messages as they travel between computers. By using Amazon SQS, you can move data between distributed components of your applications that perform different tasks without losing messages or requiring each component to be always available.

    Helpful Links:

    We also provide SDKs that enable you to access Amazon SQS from your preferred programming language. The SDKs contain functionality that automatically takes care of tasks such as:

    • Cryptographically signing your service requests
    • Retrying requests
    • Handling error responses

    For a list of available SDKs, go to Tools for Amazon Web Services.

    ", + "shapes": { + "AWSAccountIdList": { + "base": null, + "refs": { + "AddPermissionRequest$AWSAccountIds": "

    The AWS account number of the principal who will be given permission. The principal must have an AWS account, but does not need to be signed up for Amazon SQS. For information about locating the AWS account identification, see Your AWS Identifiers in the Amazon SQS Developer Guide.

    " + } + }, + "ActionNameList": { + "base": null, + "refs": { + "AddPermissionRequest$Actions": "

    The action the client wants to allow for the specified principal. The following are valid values: * | SendMessage | ReceiveMessage | DeleteMessage | ChangeMessageVisibility | GetQueueAttributes | GetQueueUrl. For more information about these actions, see Understanding Permissions in the Amazon SQS Developer Guide.

    Specifying SendMessage, DeleteMessage, or ChangeMessageVisibility for the ActionName.n also grants permissions for the corresponding batch versions of those actions: SendMessageBatch, DeleteMessageBatch, and ChangeMessageVisibilityBatch.

    " + } + }, + "AddPermissionRequest": { + "base": null, + "refs": { + } + }, + "AttributeMap": { + "base": null, + "refs": { + "CreateQueueRequest$Attributes": "

    A map of attributes with their corresponding values.

    The following lists the names, descriptions, and values of the special request parameters the CreateQueue action uses:

    • DelaySeconds - The time in seconds that the delivery of all messages in the queue will be delayed. An integer from 0 to 900 (15 minutes). The default for this attribute is 0 (zero).
    • MaximumMessageSize - The limit of how many bytes a message can contain before Amazon SQS rejects it. An integer from 1024 bytes (1 KiB) up to 262144 bytes (256 KiB). The default for this attribute is 262144 (256 KiB).
    • MessageRetentionPeriod - The number of seconds Amazon SQS retains a message. Integer representing seconds, from 60 (1 minute) to 1209600 (14 days). The default for this attribute is 345600 (4 days).
    • Policy - The queue's policy. A valid AWS policy. For more information about policy structure, see Overview of AWS IAM Policies in the Amazon IAM User Guide.
    • ReceiveMessageWaitTimeSeconds - The time for which a ReceiveMessage call will wait for a message to arrive. An integer from 0 to 20 (seconds). The default for this attribute is 0.
    • VisibilityTimeout - The visibility timeout for the queue. An integer from 0 to 43200 (12 hours). The default for this attribute is 30. For more information about visibility timeout, see Visibility Timeout in the Amazon SQS Developer Guide.

    ", + "GetQueueAttributesResult$Attributes": "

    A map of attributes to the respective values.

    ", + "Message$Attributes": "

    SenderId, SentTimestamp, ApproximateReceiveCount, and/or ApproximateFirstReceiveTimestamp. SentTimestamp and ApproximateFirstReceiveTimestamp are each returned as an integer representing the epoch time in milliseconds.

    ", + "SetQueueAttributesRequest$Attributes": "

    A map of attributes to set.

    The following lists the names, descriptions, and values of the special request parameters the SetQueueAttributes action uses:

    • DelaySeconds - The time in seconds that the delivery of all messages in the queue will be delayed. An integer from 0 to 900 (15 minutes). The default for this attribute is 0 (zero).
    • MaximumMessageSize - The limit of how many bytes a message can contain before Amazon SQS rejects it. An integer from 1024 bytes (1 KiB) up to 262144 bytes (256 KiB). The default for this attribute is 262144 (256 KiB).
    • MessageRetentionPeriod - The number of seconds Amazon SQS retains a message. Integer representing seconds, from 60 (1 minute) to 1209600 (14 days). The default for this attribute is 345600 (4 days).
    • Policy - The queue's policy. A valid AWS policy. For more information about policy structure, see Overview of AWS IAM Policies in the Amazon IAM User Guide.
    • ReceiveMessageWaitTimeSeconds - The time for which a ReceiveMessage call will wait for a message to arrive. An integer from 0 to 20 (seconds). The default for this attribute is 0.
    • VisibilityTimeout - The visibility timeout for the queue. An integer from 0 to 43200 (12 hours). The default for this attribute is 30. For more information about visibility timeout, see Visibility Timeout in the Amazon SQS Developer Guide.
    • RedrivePolicy - The parameters for dead letter queue functionality of the source queue. For more information about RedrivePolicy and dead letter queues, see Using Amazon SQS Dead Letter Queues in the Amazon SQS Developer Guide.

    " + } + }, + "AttributeNameList": { + "base": null, + "refs": { + "GetQueueAttributesRequest$AttributeNames": "

    A list of attributes to retrieve information for.

    ", + "ReceiveMessageRequest$AttributeNames": "

    A list of attributes that need to be returned along with each message.

    The following lists the names and descriptions of the attributes that can be returned:

    • All - returns all values.
    • ApproximateFirstReceiveTimestamp - returns the time when the message was first received from the queue (epoch time in milliseconds).
    • ApproximateReceiveCount - returns the number of times a message has been received from the queue but not deleted.
    • SenderId - returns the AWS account number (or the IP address, if anonymous access is allowed) of the sender.
    • SentTimestamp - returns the time when the message was sent to the queue (epoch time in milliseconds).
    " + } + }, + "BatchEntryIdsNotDistinct": { + "base": "

    Two or more batch entries have the same Id in the request.

    ", + "refs": { + } + }, + "BatchRequestTooLong": { + "base": "

    The length of all the messages put together is more than the limit.

    ", + "refs": { + } + }, + "BatchResultErrorEntry": { + "base": "

    This is used in the responses of batch API to give a detailed description of the result of an action on each entry in the request.

    ", + "refs": { + "BatchResultErrorEntryList$member": null + } + }, + "BatchResultErrorEntryList": { + "base": null, + "refs": { + "ChangeMessageVisibilityBatchResult$Failed": "

    A list of BatchResultErrorEntry items.

    ", + "DeleteMessageBatchResult$Failed": "

    A list of BatchResultErrorEntry items.

    ", + "SendMessageBatchResult$Failed": "

    A list of BatchResultErrorEntry items with the error detail about each message that could not be enqueued.

    " + } + }, + "Binary": { + "base": null, + "refs": { + "BinaryList$member": null, + "MessageAttributeValue$BinaryValue": "

    Binary type attributes can store any binary data, for example, compressed data, encrypted data, or images.

    " + } + }, + "BinaryList": { + "base": null, + "refs": { + "MessageAttributeValue$BinaryListValues": "

    Not implemented. Reserved for future use.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "BatchResultErrorEntry$SenderFault": "

    Whether the error happened due to the sender's fault.

    " + } + }, + "ChangeMessageVisibilityBatchRequest": { + "base": null, + "refs": { + } + }, + "ChangeMessageVisibilityBatchRequestEntry": { + "base": "

    Encloses a receipt handle and an entry id for each message in ChangeMessageVisibilityBatch.

    All of the following parameters are list parameters that must be prefixed with ChangeMessageVisibilityBatchRequestEntry.n, where n is an integer value starting with 1. For example, a parameter list for this action might look like this:

    &ChangeMessageVisibilityBatchRequestEntry.1.Id=change_visibility_msg_2

    &ChangeMessageVisibilityBatchRequestEntry.1.ReceiptHandle=Your_Receipt_Handle

    &ChangeMessageVisibilityBatchRequestEntry.1.VisibilityTimeout=45

    ", + "refs": { + "ChangeMessageVisibilityBatchRequestEntryList$member": null + } + }, + "ChangeMessageVisibilityBatchRequestEntryList": { + "base": null, + "refs": { + "ChangeMessageVisibilityBatchRequest$Entries": "

    A list of receipt handles of the messages for which the visibility timeout must be changed.

    " + } + }, + "ChangeMessageVisibilityBatchResult": { + "base": "

    For each message in the batch, the response contains a ChangeMessageVisibilityBatchResultEntry tag if the message succeeds or a BatchResultErrorEntry tag if the message fails.

    ", + "refs": { + } + }, + "ChangeMessageVisibilityBatchResultEntry": { + "base": "

    Encloses the id of an entry in ChangeMessageVisibilityBatch.

    ", + "refs": { + "ChangeMessageVisibilityBatchResultEntryList$member": null + } + }, + "ChangeMessageVisibilityBatchResultEntryList": { + "base": null, + "refs": { + "ChangeMessageVisibilityBatchResult$Successful": "

    A list of ChangeMessageVisibilityBatchResultEntry items.

    " + } + }, + "ChangeMessageVisibilityRequest": { + "base": null, + "refs": { + } + }, + "CreateQueueRequest": { + "base": null, + "refs": { + } + }, + "CreateQueueResult": { + "base": "

    Returns the QueueUrl element of the created queue.

    ", + "refs": { + } + }, + "DeleteMessageBatchRequest": { + "base": null, + "refs": { + } + }, + "DeleteMessageBatchRequestEntry": { + "base": "

    Encloses a receipt handle and an identifier for it.

    ", + "refs": { + "DeleteMessageBatchRequestEntryList$member": null + } + }, + "DeleteMessageBatchRequestEntryList": { + "base": null, + "refs": { + "DeleteMessageBatchRequest$Entries": "

    A list of receipt handles for the messages to be deleted.

    " + } + }, + "DeleteMessageBatchResult": { + "base": "

    For each message in the batch, the response contains a DeleteMessageBatchResultEntry tag if the message is deleted or a BatchResultErrorEntry tag if the message cannot be deleted.

    ", + "refs": { + } + }, + "DeleteMessageBatchResultEntry": { + "base": "

    Encloses the id an entry in DeleteMessageBatch.

    ", + "refs": { + "DeleteMessageBatchResultEntryList$member": null + } + }, + "DeleteMessageBatchResultEntryList": { + "base": null, + "refs": { + "DeleteMessageBatchResult$Successful": "

    A list of DeleteMessageBatchResultEntry items.

    " + } + }, + "DeleteMessageRequest": { + "base": null, + "refs": { + } + }, + "DeleteQueueRequest": { + "base": null, + "refs": { + } + }, + "EmptyBatchRequest": { + "base": "

    Batch request does not contain an entry.

    ", + "refs": { + } + }, + "GetQueueAttributesRequest": { + "base": null, + "refs": { + } + }, + "GetQueueAttributesResult": { + "base": "A list of returned queue attributes.", + "refs": { + } + }, + "GetQueueUrlRequest": { + "base": null, + "refs": { + } + }, + "GetQueueUrlResult": { + "base": "

    For more information, see Responses in the Amazon SQS Developer Guide.

    ", + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "ChangeMessageVisibilityBatchRequestEntry$VisibilityTimeout": "

    The new value (in seconds) for the message's visibility timeout.

    ", + "ChangeMessageVisibilityRequest$VisibilityTimeout": "

    The new value (in seconds - from 0 to 43200 - maximum 12 hours) for the message's visibility timeout.

    ", + "ReceiveMessageRequest$MaxNumberOfMessages": "

    The maximum number of messages to return. Amazon SQS never returns more messages than this value but may return fewer. Values can be from 1 to 10. Default is 1.

    All of the messages are not necessarily returned.

    ", + "ReceiveMessageRequest$VisibilityTimeout": "

    The duration (in seconds) that the received messages are hidden from subsequent retrieve requests after being retrieved by a ReceiveMessage request.

    ", + "ReceiveMessageRequest$WaitTimeSeconds": "

    The duration (in seconds) for which the call will wait for a message to arrive in the queue before returning. If a message is available, the call will return sooner than WaitTimeSeconds.

    ", + "SendMessageBatchRequestEntry$DelaySeconds": "

    The number of seconds for which the message has to be delayed.

    ", + "SendMessageRequest$DelaySeconds": "

    The number of seconds (0 to 900 - 15 minutes) to delay a specific message. Messages with a positive DelaySeconds value become available for processing after the delay time is finished. If you don't specify a value, the default value for the queue applies.

    " + } + }, + "InvalidAttributeName": { + "base": "

    The attribute referred to does not exist.

    ", + "refs": { + } + }, + "InvalidBatchEntryId": { + "base": "

    The Id of a batch entry in a batch request does not abide by the specification.

    ", + "refs": { + } + }, + "InvalidIdFormat": { + "base": "

    The receipt handle is not valid for the current version.

    ", + "refs": { + } + }, + "InvalidMessageContents": { + "base": "

    The message contains characters outside the allowed set.

    ", + "refs": { + } + }, + "ListDeadLetterSourceQueuesRequest": { + "base": null, + "refs": { + } + }, + "ListDeadLetterSourceQueuesResult": { + "base": "A list of your dead letter source queues.", + "refs": { + } + }, + "ListQueuesRequest": { + "base": null, + "refs": { + } + }, + "ListQueuesResult": { + "base": "A list of your queues.", + "refs": { + } + }, + "Message": { + "base": "

    An Amazon SQS message.

    ", + "refs": { + "MessageList$member": null + } + }, + "MessageAttributeMap": { + "base": null, + "refs": { + "Message$MessageAttributes": "

    Each message attribute consists of a Name, Type, and Value. For more information, see Message Attribute Items.

    ", + "SendMessageBatchRequestEntry$MessageAttributes": "

    Each message attribute consists of a Name, Type, and Value. For more information, see Message Attribute Items.

    ", + "SendMessageRequest$MessageAttributes": "

    Each message attribute consists of a Name, Type, and Value. For more information, see Message Attribute Items.

    " + } + }, + "MessageAttributeName": { + "base": null, + "refs": { + "MessageAttributeNameList$member": null + } + }, + "MessageAttributeNameList": { + "base": null, + "refs": { + "ReceiveMessageRequest$MessageAttributeNames": "

    The name of the message attribute, where N is the index. The message attribute name can contain the following characters: A-Z, a-z, 0-9, underscore (_), hyphen (-), and period (.). The name must not start or end with a period, and it should not have successive periods. The name is case sensitive and must be unique among all attribute names for the message. The name can be up to 256 characters long. The name cannot start with \"AWS.\" or \"Amazon.\" (or any variations in casing), because these prefixes are reserved for use by Amazon Web Services.

    When using ReceiveMessage, you can send a list of attribute names to receive, or you can return all of the attributes by specifying \"All\" or \".*\" in your request. You can also use \"foo.*\" to return all message attributes starting with the \"foo\" prefix.

    " + } + }, + "MessageAttributeValue": { + "base": "

    The user-specified message attribute value. For string data types, the value attribute has the same restrictions on the content as the message body. For more information, see SendMessage.

    Name, type, and value must not be empty or null. In addition, the message body should not be empty or null. All parts of the message attribute, including name, type, and value, are included in the message size restriction, which is currently 256 KB (262,144 bytes).

    ", + "refs": { + "MessageAttributeMap$value": null + } + }, + "MessageList": { + "base": null, + "refs": { + "ReceiveMessageResult$Messages": "

    A list of messages.

    " + } + }, + "MessageNotInflight": { + "base": "

    The message referred to is not in flight.

    ", + "refs": { + } + }, + "OverLimit": { + "base": "

    The action that you requested would violate a limit. For example, ReceiveMessage returns this error if the maximum number of messages inflight has already been reached. AddPermission returns this error if the maximum number of permissions for the queue has already been reached.

    ", + "refs": { + } + }, + "PurgeQueueInProgress": { + "base": "

    Indicates that the specified queue previously received a PurgeQueue request within the last 60 seconds, the time it can take to delete the messages in the queue.

    ", + "refs": { + } + }, + "PurgeQueueRequest": { + "base": null, + "refs": { + } + }, + "QueueAttributeName": { + "base": null, + "refs": { + "AttributeMap$key": "

    The name of a queue attribute.

    ", + "AttributeNameList$member": null + } + }, + "QueueDeletedRecently": { + "base": "

    You must wait 60 seconds after deleting a queue before you can create another with the same name.

    ", + "refs": { + } + }, + "QueueDoesNotExist": { + "base": "

    The queue referred to does not exist.

    ", + "refs": { + } + }, + "QueueNameExists": { + "base": "

    A queue already exists with this name. Amazon SQS returns this error only if the request includes attributes whose values differ from those of the existing queue.

    ", + "refs": { + } + }, + "QueueUrlList": { + "base": null, + "refs": { + "ListDeadLetterSourceQueuesResult$queueUrls": "A list of source queue URLs that have the RedrivePolicy queue attribute configured with a dead letter queue.", + "ListQueuesResult$QueueUrls": "

    A list of queue URLs, up to 1000 entries.

    " + } + }, + "ReceiptHandleIsInvalid": { + "base": "

    The receipt handle provided is not valid.

    ", + "refs": { + } + }, + "ReceiveMessageRequest": { + "base": null, + "refs": { + } + }, + "ReceiveMessageResult": { + "base": "A list of received messages.", + "refs": { + } + }, + "RemovePermissionRequest": { + "base": null, + "refs": { + } + }, + "SendMessageBatchRequest": { + "base": null, + "refs": { + } + }, + "SendMessageBatchRequestEntry": { + "base": "

    Contains the details of a single Amazon SQS message along with a Id.

    ", + "refs": { + "SendMessageBatchRequestEntryList$member": null + } + }, + "SendMessageBatchRequestEntryList": { + "base": null, + "refs": { + "SendMessageBatchRequest$Entries": "

    A list of SendMessageBatchRequestEntry items.

    " + } + }, + "SendMessageBatchResult": { + "base": "

    For each message in the batch, the response contains a SendMessageBatchResultEntry tag if the message succeeds or a BatchResultErrorEntry tag if the message fails.

    ", + "refs": { + } + }, + "SendMessageBatchResultEntry": { + "base": "

    Encloses a message ID for successfully enqueued message of a SendMessageBatch.

    ", + "refs": { + "SendMessageBatchResultEntryList$member": null + } + }, + "SendMessageBatchResultEntryList": { + "base": null, + "refs": { + "SendMessageBatchResult$Successful": "

    A list of SendMessageBatchResultEntry items.

    " + } + }, + "SendMessageRequest": { + "base": null, + "refs": { + } + }, + "SendMessageResult": { + "base": "

    The MD5OfMessageBody and MessageId elements.

    ", + "refs": { + } + }, + "SetQueueAttributesRequest": { + "base": null, + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "AWSAccountIdList$member": null, + "ActionNameList$member": null, + "AddPermissionRequest$QueueUrl": "

    The URL of the Amazon SQS queue to take action on.

    ", + "AddPermissionRequest$Label": "

    The unique identification of the permission you're setting (e.g., AliceSendMessage). Constraints: Maximum 80 characters; alphanumeric characters, hyphens (-), and underscores (_) are allowed.

    ", + "AttributeMap$value": "

    The value of a queue attribute.

    ", + "BatchResultErrorEntry$Id": "

    The id of an entry in a batch request.

    ", + "BatchResultErrorEntry$Code": "

    An error code representing why the action failed on this entry.

    ", + "BatchResultErrorEntry$Message": "

    A message explaining why the action failed on this entry.

    ", + "ChangeMessageVisibilityBatchRequest$QueueUrl": "

    The URL of the Amazon SQS queue to take action on.

    ", + "ChangeMessageVisibilityBatchRequestEntry$Id": "

    An identifier for this particular receipt handle. This is used to communicate the result. Note that the Ids of a batch request need to be unique within the request.

    ", + "ChangeMessageVisibilityBatchRequestEntry$ReceiptHandle": "

    A receipt handle.

    ", + "ChangeMessageVisibilityBatchResultEntry$Id": "

    Represents a message whose visibility timeout has been changed successfully.

    ", + "ChangeMessageVisibilityRequest$QueueUrl": "

    The URL of the Amazon SQS queue to take action on.

    ", + "ChangeMessageVisibilityRequest$ReceiptHandle": "

    The receipt handle associated with the message whose visibility timeout should be changed. This parameter is returned by the ReceiveMessage action.

    ", + "CreateQueueRequest$QueueName": "

    The name for the queue to be created.

    ", + "CreateQueueResult$QueueUrl": "

    The URL for the created Amazon SQS queue.

    ", + "DeleteMessageBatchRequest$QueueUrl": "

    The URL of the Amazon SQS queue to take action on.

    ", + "DeleteMessageBatchRequestEntry$Id": "

    An identifier for this particular receipt handle. This is used to communicate the result. Note that the Ids of a batch request need to be unique within the request.

    ", + "DeleteMessageBatchRequestEntry$ReceiptHandle": "

    A receipt handle.

    ", + "DeleteMessageBatchResultEntry$Id": "

    Represents a successfully deleted message.

    ", + "DeleteMessageRequest$QueueUrl": "

    The URL of the Amazon SQS queue to take action on.

    ", + "DeleteMessageRequest$ReceiptHandle": "

    The receipt handle associated with the message to delete.

    ", + "DeleteQueueRequest$QueueUrl": "

    The URL of the Amazon SQS queue to take action on.

    ", + "GetQueueAttributesRequest$QueueUrl": "

    The URL of the Amazon SQS queue to take action on.

    ", + "GetQueueUrlRequest$QueueName": "

    The name of the queue whose URL must be fetched. Maximum 80 characters; alphanumeric characters, hyphens (-), and underscores (_) are allowed.

    ", + "GetQueueUrlRequest$QueueOwnerAWSAccountId": "

    The AWS account ID of the account that created the queue.

    ", + "GetQueueUrlResult$QueueUrl": "

    The URL for the queue.

    ", + "ListDeadLetterSourceQueuesRequest$QueueUrl": "The queue URL of a dead letter queue.", + "ListQueuesRequest$QueueNamePrefix": "

    A string to use for filtering the list results. Only those queues whose name begins with the specified string are returned.

    ", + "Message$MessageId": "

    A unique identifier for the message. Message IDs are considered unique across all AWS accounts for an extended period of time.

    ", + "Message$ReceiptHandle": "

    An identifier associated with the act of receiving the message. A new receipt handle is returned every time you receive a message. When deleting a message, you provide the last received receipt handle to delete the message.

    ", + "Message$MD5OfBody": "

    An MD5 digest of the non-URL-encoded message body string.

    ", + "Message$Body": "

    The message's contents (not URL-encoded).

    ", + "Message$MD5OfMessageAttributes": "

    An MD5 digest of the non-URL-encoded message attribute string. This can be used to verify that Amazon SQS received the message correctly. Amazon SQS first URL decodes the message before creating the MD5 digest. For information about MD5, go to http://www.faqs.org/rfcs/rfc1321.html.

    ", + "MessageAttributeMap$key": null, + "MessageAttributeValue$StringValue": "

    Strings are Unicode with UTF8 binary encoding. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters.

    ", + "MessageAttributeValue$DataType": "

    Amazon SQS supports the following logical data types: String, Number, and Binary. In addition, you can append your own custom labels. For more information, see Message Attribute Data Types.

    ", + "PurgeQueueRequest$QueueUrl": "

    The queue URL of the queue to delete the messages from when using the PurgeQueue API.

    ", + "QueueUrlList$member": null, + "ReceiveMessageRequest$QueueUrl": "

    The URL of the Amazon SQS queue to take action on.

    ", + "RemovePermissionRequest$QueueUrl": "

    The URL of the Amazon SQS queue to take action on.

    ", + "RemovePermissionRequest$Label": "

    The identification of the permission to remove. This is the label added with the AddPermission action.

    ", + "SendMessageBatchRequest$QueueUrl": "

    The URL of the Amazon SQS queue to take action on.

    ", + "SendMessageBatchRequestEntry$Id": "

    An identifier for the message in this batch. This is used to communicate the result. Note that the Ids of a batch request need to be unique within the request.

    ", + "SendMessageBatchRequestEntry$MessageBody": "

    Body of the message.

    ", + "SendMessageBatchResultEntry$Id": "

    An identifier for the message in this batch.

    ", + "SendMessageBatchResultEntry$MessageId": "

    An identifier for the message.

    ", + "SendMessageBatchResultEntry$MD5OfMessageBody": "

    An MD5 digest of the non-URL-encoded message body string. This can be used to verify that Amazon SQS received the message correctly. Amazon SQS first URL decodes the message before creating the MD5 digest. For information about MD5, go to http://www.faqs.org/rfcs/rfc1321.html.

    ", + "SendMessageBatchResultEntry$MD5OfMessageAttributes": "

    An MD5 digest of the non-URL-encoded message attribute string. This can be used to verify that Amazon SQS received the message batch correctly. Amazon SQS first URL decodes the message before creating the MD5 digest. For information about MD5, go to http://www.faqs.org/rfcs/rfc1321.html.

    ", + "SendMessageRequest$QueueUrl": "

    The URL of the Amazon SQS queue to take action on.

    ", + "SendMessageRequest$MessageBody": "

    The message to send. String maximum 256 KB in size. For a list of allowed characters, see the preceding important note.

    ", + "SendMessageResult$MD5OfMessageBody": "

    An MD5 digest of the non-URL-encoded message body string. This can be used to verify that Amazon SQS received the message correctly. Amazon SQS first URL decodes the message before creating the MD5 digest. For information about MD5, go to http://www.faqs.org/rfcs/rfc1321.html.

    ", + "SendMessageResult$MD5OfMessageAttributes": "

    An MD5 digest of the non-URL-encoded message attribute string. This can be used to verify that Amazon SQS received the message correctly. Amazon SQS first URL decodes the message before creating the MD5 digest. For information about MD5, go to http://www.faqs.org/rfcs/rfc1321.html.

    ", + "SendMessageResult$MessageId": "

    An element containing the message ID of the message sent to the queue. For more information, see Queue and Message Identifiers in the Amazon SQS Developer Guide.

    ", + "SetQueueAttributesRequest$QueueUrl": "

    The URL of the Amazon SQS queue to take action on.

    ", + "StringList$member": null + } + }, + "StringList": { + "base": null, + "refs": { + "MessageAttributeValue$StringListValues": "

    Not implemented. Reserved for future use.

    " + } + }, + "TooManyEntriesInBatchRequest": { + "base": "

    Batch request contains more number of entries than permissible.

    ", + "refs": { + } + }, + "UnsupportedOperation": { + "base": "

    Error code 400. Unsupported operation.

    ", + "refs": { + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sqs/2012-11-05/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sqs/2012-11-05/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sqs/2012-11-05/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sqs/2012-11-05/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,7 @@ +{ + "pagination": { + "ListQueues": { + "result_key": "QueueUrls" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1184 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-11-06", + "endpointPrefix":"ssm", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"Amazon SSM", + "serviceFullName":"Amazon Simple Systems Management Service", + "signatureVersion":"v4", + "targetPrefix":"AmazonSSM" + }, + "operations":{ + "CancelCommand":{ + "name":"CancelCommand", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelCommandRequest"}, + "output":{"shape":"CancelCommandResult"}, + "errors":[ + {"shape":"InvalidCommandId"}, + {"shape":"InvalidInstanceId"}, + {"shape":"DuplicateInstanceId"} + ] + }, + "CreateAssociation":{ + "name":"CreateAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAssociationRequest"}, + "output":{"shape":"CreateAssociationResult"}, + "errors":[ + {"shape":"AssociationAlreadyExists"}, + {"shape":"AssociationLimitExceeded"}, + {"shape":"InternalServerError"}, + {"shape":"InvalidDocument"}, + {"shape":"InvalidInstanceId"}, + {"shape":"UnsupportedPlatformType"}, + {"shape":"InvalidParameters"} + ] + }, + "CreateAssociationBatch":{ + "name":"CreateAssociationBatch", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAssociationBatchRequest"}, + "output":{"shape":"CreateAssociationBatchResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidDocument"}, + {"shape":"InvalidInstanceId"}, + {"shape":"InvalidParameters"}, + {"shape":"DuplicateInstanceId"}, + {"shape":"AssociationLimitExceeded"}, + {"shape":"UnsupportedPlatformType"} + ] + }, + "CreateDocument":{ + "name":"CreateDocument", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDocumentRequest"}, + "output":{"shape":"CreateDocumentResult"}, + "errors":[ + {"shape":"DocumentAlreadyExists"}, + {"shape":"MaxDocumentSizeExceeded"}, + {"shape":"InternalServerError"}, + {"shape":"InvalidDocumentContent"}, + {"shape":"DocumentLimitExceeded"} + ] + }, + "DeleteAssociation":{ + "name":"DeleteAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAssociationRequest"}, + "output":{"shape":"DeleteAssociationResult"}, + "errors":[ + {"shape":"AssociationDoesNotExist"}, + {"shape":"InternalServerError"}, + {"shape":"InvalidDocument"}, + {"shape":"InvalidInstanceId"}, + {"shape":"TooManyUpdates"} + ] + }, + "DeleteDocument":{ + "name":"DeleteDocument", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDocumentRequest"}, + "output":{"shape":"DeleteDocumentResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidDocument"}, + {"shape":"AssociatedInstances"} + ] + }, + "DescribeAssociation":{ + "name":"DescribeAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAssociationRequest"}, + "output":{"shape":"DescribeAssociationResult"}, + "errors":[ + {"shape":"AssociationDoesNotExist"}, + {"shape":"InternalServerError"}, + {"shape":"InvalidDocument"}, + {"shape":"InvalidInstanceId"} + ] + }, + "DescribeDocument":{ + "name":"DescribeDocument", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDocumentRequest"}, + "output":{"shape":"DescribeDocumentResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidDocument"} + ] + }, + "DescribeInstanceInformation":{ + "name":"DescribeInstanceInformation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstanceInformationRequest"}, + "output":{"shape":"DescribeInstanceInformationResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidInstanceId"}, + {"shape":"InvalidNextToken"}, + {"shape":"InvalidInstanceInformationFilterValue"}, + {"shape":"InvalidFilterKey"} + ] + }, + "GetDocument":{ + "name":"GetDocument", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDocumentRequest"}, + "output":{"shape":"GetDocumentResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidDocument"} + ] + }, + "ListAssociations":{ + "name":"ListAssociations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAssociationsRequest"}, + "output":{"shape":"ListAssociationsResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidNextToken"} + ] + }, + "ListCommandInvocations":{ + "name":"ListCommandInvocations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListCommandInvocationsRequest"}, + "output":{"shape":"ListCommandInvocationsResult"}, + "errors":[ + {"shape":"InvalidCommandId"}, + {"shape":"InvalidInstanceId"}, + {"shape":"InvalidFilterKey"}, + {"shape":"InvalidNextToken"} + ] + }, + "ListCommands":{ + "name":"ListCommands", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListCommandsRequest"}, + "output":{"shape":"ListCommandsResult"}, + "errors":[ + {"shape":"InvalidCommandId"}, + {"shape":"InvalidInstanceId"}, + {"shape":"InvalidFilterKey"}, + {"shape":"InvalidNextToken"} + ] + }, + "ListDocuments":{ + "name":"ListDocuments", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDocumentsRequest"}, + "output":{"shape":"ListDocumentsResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidNextToken"}, + {"shape":"InvalidFilterKey"} + ] + }, + "SendCommand":{ + "name":"SendCommand", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SendCommandRequest"}, + "output":{"shape":"SendCommandResult"}, + "errors":[ + {"shape":"DuplicateInstanceId"}, + {"shape":"InvalidInstanceId"}, + {"shape":"InvalidDocument"}, + {"shape":"InvalidOutputFolder"}, + {"shape":"InvalidParameters"}, + {"shape":"UnsupportedPlatformType"} + ] + }, + "UpdateAssociationStatus":{ + "name":"UpdateAssociationStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAssociationStatusRequest"}, + "output":{"shape":"UpdateAssociationStatusResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidInstanceId"}, + {"shape":"InvalidDocument"}, + {"shape":"AssociationDoesNotExist"}, + {"shape":"StatusUnchanged"}, + {"shape":"TooManyUpdates"} + ] + } + }, + "shapes":{ + "AssociatedInstances":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Association":{ + "type":"structure", + "members":{ + "Name":{"shape":"DocumentName"}, + "InstanceId":{"shape":"InstanceId"} + } + }, + "AssociationAlreadyExists":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "AssociationDescription":{ + "type":"structure", + "members":{ + "Name":{"shape":"DocumentName"}, + "InstanceId":{"shape":"InstanceId"}, + "Date":{"shape":"DateTime"}, + "Status":{"shape":"AssociationStatus"}, + "Parameters":{"shape":"Parameters"} + } + }, + "AssociationDescriptionList":{ + "type":"list", + "member":{ + "shape":"AssociationDescription", + "locationName":"AssociationDescription" + } + }, + "AssociationDoesNotExist":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "AssociationFilter":{ + "type":"structure", + "required":[ + "key", + "value" + ], + "members":{ + "key":{"shape":"AssociationFilterKey"}, + "value":{"shape":"AssociationFilterValue"} + } + }, + "AssociationFilterKey":{ + "type":"string", + "enum":[ + "InstanceId", + "Name" + ] + }, + "AssociationFilterList":{ + "type":"list", + "member":{ + "shape":"AssociationFilter", + "locationName":"AssociationFilter" + }, + "min":1 + }, + "AssociationFilterValue":{ + "type":"string", + "min":1 + }, + "AssociationLimitExceeded":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "AssociationList":{ + "type":"list", + "member":{ + "shape":"Association", + "locationName":"Association" + } + }, + "AssociationStatus":{ + "type":"structure", + "required":[ + "Date", + "Name", + "Message" + ], + "members":{ + "Date":{"shape":"DateTime"}, + "Name":{"shape":"AssociationStatusName"}, + "Message":{"shape":"StatusMessage"}, + "AdditionalInfo":{"shape":"StatusAdditionalInfo"} + } + }, + "AssociationStatusName":{ + "type":"string", + "enum":[ + "Pending", + "Success", + "Failed" + ] + }, + "BatchErrorMessage":{"type":"string"}, + "Boolean":{"type":"boolean"}, + "CancelCommandRequest":{ + "type":"structure", + "required":["CommandId"], + "members":{ + "CommandId":{"shape":"CommandId"}, + "InstanceIds":{"shape":"InstanceIdList"} + } + }, + "CancelCommandResult":{ + "type":"structure", + "members":{ + } + }, + "Command":{ + "type":"structure", + "members":{ + "CommandId":{"shape":"CommandId"}, + "DocumentName":{"shape":"DocumentName"}, + "Comment":{"shape":"Comment"}, + "ExpiresAfter":{"shape":"DateTime"}, + "Parameters":{"shape":"Parameters"}, + "InstanceIds":{"shape":"InstanceIdList"}, + "RequestedDateTime":{"shape":"DateTime"}, + "Status":{"shape":"CommandStatus"}, + "OutputS3BucketName":{"shape":"S3BucketName"}, + "OutputS3KeyPrefix":{"shape":"S3KeyPrefix"} + } + }, + "CommandFilter":{ + "type":"structure", + "required":[ + "key", + "value" + ], + "members":{ + "key":{"shape":"CommandFilterKey"}, + "value":{"shape":"CommandFilterValue"} + } + }, + "CommandFilterKey":{ + "type":"string", + "enum":[ + "InvokedAfter", + "InvokedBefore", + "Status" + ] + }, + "CommandFilterList":{ + "type":"list", + "member":{"shape":"CommandFilter"}, + "max":3, + "min":1 + }, + "CommandFilterValue":{ + "type":"string", + "min":1 + }, + "CommandId":{ + "type":"string", + "max":36, + "min":36 + }, + "CommandInvocation":{ + "type":"structure", + "members":{ + "CommandId":{"shape":"CommandId"}, + "InstanceId":{"shape":"InstanceId"}, + "Comment":{"shape":"Comment"}, + "DocumentName":{"shape":"DocumentName"}, + "RequestedDateTime":{"shape":"DateTime"}, + "Status":{"shape":"CommandInvocationStatus"}, + "TraceOutput":{"shape":"InvocationTraceOutput"}, + "CommandPlugins":{"shape":"CommandPluginList"} + } + }, + "CommandInvocationList":{ + "type":"list", + "member":{"shape":"CommandInvocation"} + }, + "CommandInvocationStatus":{ + "type":"string", + "enum":[ + "Pending", + "InProgress", + "Cancelling", + "Success", + "TimedOut", + "Cancelled", + "Failed" + ] + }, + "CommandList":{ + "type":"list", + "member":{"shape":"Command"} + }, + "CommandMaxResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "CommandPlugin":{ + "type":"structure", + "members":{ + "Name":{"shape":"CommandPluginName"}, + "Status":{"shape":"CommandPluginStatus"}, + "ResponseCode":{"shape":"ResponseCode"}, + "ResponseStartDateTime":{"shape":"DateTime"}, + "ResponseFinishDateTime":{"shape":"DateTime"}, + "Output":{"shape":"CommandPluginOutput"}, + "OutputS3BucketName":{"shape":"S3BucketName"}, + "OutputS3KeyPrefix":{"shape":"S3KeyPrefix"} + } + }, + "CommandPluginList":{ + "type":"list", + "member":{"shape":"CommandPlugin"} + }, + "CommandPluginName":{ + "type":"string", + "min":4 + }, + "CommandPluginOutput":{ + "type":"string", + "max":2500 + }, + "CommandPluginStatus":{ + "type":"string", + "enum":[ + "Pending", + "InProgress", + "Success", + "TimedOut", + "Cancelled", + "Failed" + ] + }, + "CommandStatus":{ + "type":"string", + "enum":[ + "Pending", + "InProgress", + "Cancelling", + "Success", + "TimedOut", + "Cancelled", + "Failed" + ] + }, + "Comment":{ + "type":"string", + "max":100 + }, + "CreateAssociationBatchRequest":{ + "type":"structure", + "required":["Entries"], + "members":{ + "Entries":{"shape":"CreateAssociationBatchRequestEntries"} + } + }, + "CreateAssociationBatchRequestEntries":{ + "type":"list", + "member":{ + "shape":"CreateAssociationBatchRequestEntry", + "locationName":"entries" + } + }, + "CreateAssociationBatchRequestEntry":{ + "type":"structure", + "members":{ + "Name":{"shape":"DocumentName"}, + "InstanceId":{"shape":"InstanceId"}, + "Parameters":{"shape":"Parameters"} + } + }, + "CreateAssociationBatchResult":{ + "type":"structure", + "members":{ + "Successful":{"shape":"AssociationDescriptionList"}, + "Failed":{"shape":"FailedCreateAssociationList"} + } + }, + "CreateAssociationRequest":{ + "type":"structure", + "required":[ + "Name", + "InstanceId" + ], + "members":{ + "Name":{"shape":"DocumentName"}, + "InstanceId":{"shape":"InstanceId"}, + "Parameters":{"shape":"Parameters"} + } + }, + "CreateAssociationResult":{ + "type":"structure", + "members":{ + "AssociationDescription":{"shape":"AssociationDescription"} + } + }, + "CreateDocumentRequest":{ + "type":"structure", + "required":[ + "Content", + "Name" + ], + "members":{ + "Content":{"shape":"DocumentContent"}, + "Name":{"shape":"DocumentName"} + } + }, + "CreateDocumentResult":{ + "type":"structure", + "members":{ + "DocumentDescription":{"shape":"DocumentDescription"} + } + }, + "DateTime":{"type":"timestamp"}, + "DeleteAssociationRequest":{ + "type":"structure", + "required":[ + "Name", + "InstanceId" + ], + "members":{ + "Name":{"shape":"DocumentName"}, + "InstanceId":{"shape":"InstanceId"} + } + }, + "DeleteAssociationResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteDocumentRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"DocumentName"} + } + }, + "DeleteDocumentResult":{ + "type":"structure", + "members":{ + } + }, + "DescribeAssociationRequest":{ + "type":"structure", + "required":[ + "Name", + "InstanceId" + ], + "members":{ + "Name":{"shape":"DocumentName"}, + "InstanceId":{"shape":"InstanceId"} + } + }, + "DescribeAssociationResult":{ + "type":"structure", + "members":{ + "AssociationDescription":{"shape":"AssociationDescription"} + } + }, + "DescribeDocumentRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"DocumentName"} + } + }, + "DescribeDocumentResult":{ + "type":"structure", + "members":{ + "Document":{"shape":"DocumentDescription"} + } + }, + "DescribeInstanceInformationRequest":{ + "type":"structure", + "members":{ + "InstanceInformationFilterList":{"shape":"InstanceInformationFilterList"}, + "MaxResults":{ + "shape":"MaxResultsEC2Compatible", + "box":true + }, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeInstanceInformationResult":{ + "type":"structure", + "members":{ + "InstanceInformationList":{"shape":"InstanceInformationList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescriptionInDocument":{"type":"string"}, + "DocumentAlreadyExists":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DocumentContent":{ + "type":"string", + "min":1 + }, + "DocumentDescription":{ + "type":"structure", + "members":{ + "Sha1":{"shape":"DocumentSha1"}, + "Name":{"shape":"DocumentName"}, + "CreatedDate":{"shape":"DateTime"}, + "Status":{"shape":"DocumentStatus"}, + "Description":{"shape":"DescriptionInDocument"}, + "Parameters":{"shape":"DocumentParameterList"}, + "PlatformTypes":{"shape":"PlatformTypeList"} + } + }, + "DocumentFilter":{ + "type":"structure", + "required":[ + "key", + "value" + ], + "members":{ + "key":{"shape":"DocumentFilterKey"}, + "value":{"shape":"DocumentFilterValue"} + } + }, + "DocumentFilterKey":{ + "type":"string", + "enum":[ + "Name", + "Owner", + "PlatformTypes" + ] + }, + "DocumentFilterList":{ + "type":"list", + "member":{ + "shape":"DocumentFilter", + "locationName":"DocumentFilter" + }, + "min":1 + }, + "DocumentFilterValue":{ + "type":"string", + "min":1 + }, + "DocumentIdentifier":{ + "type":"structure", + "members":{ + "Name":{"shape":"DocumentName"}, + "PlatformTypes":{"shape":"PlatformTypeList"} + } + }, + "DocumentIdentifierList":{ + "type":"list", + "member":{ + "shape":"DocumentIdentifier", + "locationName":"DocumentIdentifier" + } + }, + "DocumentLimitExceeded":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "DocumentName":{ + "type":"string", + "pattern":"^[a-zA-Z0-9_\\-.]{3,128}$" + }, + "DocumentParameter":{ + "type":"structure", + "members":{ + "Name":{"shape":"DocumentParameterName"}, + "Type":{"shape":"DocumentParameterType"}, + "Description":{"shape":"DocumentParameterDescrption"}, + "DefaultValue":{"shape":"DocumentParameterDefaultValue"} + } + }, + "DocumentParameterDefaultValue":{"type":"string"}, + "DocumentParameterDescrption":{"type":"string"}, + "DocumentParameterList":{ + "type":"list", + "member":{ + "shape":"DocumentParameter", + "locationName":"DocumentParameter" + } + }, + "DocumentParameterName":{"type":"string"}, + "DocumentParameterType":{ + "type":"string", + "enum":[ + "String", + "StringList" + ] + }, + "DocumentSha1":{"type":"string"}, + "DocumentStatus":{ + "type":"string", + "enum":[ + "Creating", + "Active", + "Deleting" + ] + }, + "DuplicateInstanceId":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "FailedCreateAssociation":{ + "type":"structure", + "members":{ + "Entry":{"shape":"CreateAssociationBatchRequestEntry"}, + "Message":{"shape":"BatchErrorMessage"}, + "Fault":{"shape":"Fault"} + } + }, + "FailedCreateAssociationList":{ + "type":"list", + "member":{ + "shape":"FailedCreateAssociation", + "locationName":"FailedCreateAssociationEntry" + } + }, + "Fault":{ + "type":"string", + "enum":[ + "Client", + "Server", + "Unknown" + ] + }, + "GetDocumentRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"DocumentName"} + } + }, + "GetDocumentResult":{ + "type":"structure", + "members":{ + "Name":{"shape":"DocumentName"}, + "Content":{"shape":"DocumentContent"} + } + }, + "InstanceId":{ + "type":"string", + "pattern":"(^i-(\\w{8}|\\w{17})$)|(^op-\\w{17}$)" + }, + "InstanceIdList":{ + "type":"list", + "member":{"shape":"InstanceId"}, + "max":50, + "min":1 + }, + "InstanceInformation":{ + "type":"structure", + "members":{ + "InstanceId":{"shape":"InstanceId"}, + "PingStatus":{"shape":"PingStatus"}, + "LastPingDateTime":{ + "shape":"DateTime", + "box":true + }, + "AgentVersion":{"shape":"Version"}, + "IsLatestVersion":{ + "shape":"Boolean", + "box":true + }, + "PlatformType":{"shape":"PlatformType"}, + "PlatformName":{"shape":"String"}, + "PlatformVersion":{"shape":"String"} + } + }, + "InstanceInformationFilter":{ + "type":"structure", + "required":[ + "key", + "valueSet" + ], + "members":{ + "key":{"shape":"InstanceInformationFilterKey"}, + "valueSet":{"shape":"InstanceInformationFilterValueSet"} + } + }, + "InstanceInformationFilterKey":{ + "type":"string", + "enum":[ + "InstanceIds", + "AgentVersion", + "PingStatus", + "PlatformTypes" + ] + }, + "InstanceInformationFilterList":{ + "type":"list", + "member":{ + "shape":"InstanceInformationFilter", + "locationName":"InstanceInformationFilter" + }, + "min":1 + }, + "InstanceInformationFilterValue":{ + "type":"string", + "min":1 + }, + "InstanceInformationFilterValueSet":{ + "type":"list", + "member":{ + "shape":"InstanceInformationFilterValue", + "locationName":"InstanceInformationFilterValue" + }, + "max":100, + "min":1 + }, + "InstanceInformationList":{ + "type":"list", + "member":{ + "shape":"InstanceInformation", + "locationName":"InstanceInformation" + } + }, + "InternalServerError":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "InvalidCommandId":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidDocument":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "InvalidDocumentContent":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "InvalidFilterKey":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidInstanceId":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidInstanceInformationFilterValue":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "InvalidNextToken":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidOutputFolder":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidParameters":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "InvocationTraceOutput":{ + "type":"string", + "max":2500 + }, + "ListAssociationsRequest":{ + "type":"structure", + "required":["AssociationFilterList"], + "members":{ + "AssociationFilterList":{"shape":"AssociationFilterList"}, + "MaxResults":{ + "shape":"MaxResults", + "box":true + }, + "NextToken":{"shape":"NextToken"} + } + }, + "ListAssociationsResult":{ + "type":"structure", + "members":{ + "Associations":{"shape":"AssociationList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListCommandInvocationsRequest":{ + "type":"structure", + "members":{ + "CommandId":{"shape":"CommandId"}, + "InstanceId":{"shape":"InstanceId"}, + "MaxResults":{ + "shape":"CommandMaxResults", + "box":true + }, + "NextToken":{"shape":"NextToken"}, + "Filters":{"shape":"CommandFilterList"}, + "Details":{"shape":"Boolean"} + } + }, + "ListCommandInvocationsResult":{ + "type":"structure", + "members":{ + "CommandInvocations":{"shape":"CommandInvocationList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListCommandsRequest":{ + "type":"structure", + "members":{ + "CommandId":{"shape":"CommandId"}, + "InstanceId":{"shape":"InstanceId"}, + "MaxResults":{ + "shape":"CommandMaxResults", + "box":true + }, + "NextToken":{"shape":"NextToken"}, + "Filters":{"shape":"CommandFilterList"} + } + }, + "ListCommandsResult":{ + "type":"structure", + "members":{ + "Commands":{"shape":"CommandList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListDocumentsRequest":{ + "type":"structure", + "members":{ + "DocumentFilterList":{"shape":"DocumentFilterList"}, + "MaxResults":{ + "shape":"MaxResults", + "box":true + }, + "NextToken":{"shape":"NextToken"} + } + }, + "ListDocumentsResult":{ + "type":"structure", + "members":{ + "DocumentIdentifiers":{"shape":"DocumentIdentifierList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "MaxDocumentSizeExceeded":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "MaxResults":{ + "type":"integer", + "max":25, + "min":1 + }, + "MaxResultsEC2Compatible":{ + "type":"integer", + "max":50, + "min":5 + }, + "NextToken":{"type":"string"}, + "ParameterName":{"type":"string"}, + "ParameterValue":{"type":"string"}, + "ParameterValueList":{ + "type":"list", + "member":{"shape":"ParameterValue"} + }, + "Parameters":{ + "type":"map", + "key":{"shape":"ParameterName"}, + "value":{"shape":"ParameterValueList"} + }, + "PingStatus":{ + "type":"string", + "enum":[ + "Online", + "ConnectionLost", + "Inactive" + ] + }, + "PlatformType":{ + "type":"string", + "enum":[ + "Windows", + "Linux" + ] + }, + "PlatformTypeList":{ + "type":"list", + "member":{ + "shape":"PlatformType", + "locationName":"PlatformType" + } + }, + "ResponseCode":{"type":"integer"}, + "S3BucketName":{ + "type":"string", + "max":63, + "min":3 + }, + "S3KeyPrefix":{ + "type":"string", + "max":500 + }, + "SendCommandRequest":{ + "type":"structure", + "required":[ + "InstanceIds", + "DocumentName" + ], + "members":{ + "InstanceIds":{"shape":"InstanceIdList"}, + "DocumentName":{"shape":"DocumentName"}, + "TimeoutSeconds":{ + "shape":"TimeoutSeconds", + "box":true + }, + "Comment":{"shape":"Comment"}, + "Parameters":{"shape":"Parameters"}, + "OutputS3BucketName":{"shape":"S3BucketName"}, + "OutputS3KeyPrefix":{"shape":"S3KeyPrefix"} + } + }, + "SendCommandResult":{ + "type":"structure", + "members":{ + "Command":{"shape":"Command"} + } + }, + "StatusAdditionalInfo":{ + "type":"string", + "max":1024 + }, + "StatusMessage":{ + "type":"string", + "max":1024 + }, + "StatusUnchanged":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "String":{"type":"string"}, + "TimeoutSeconds":{ + "type":"integer", + "max":2592000, + "min":30 + }, + "TooManyUpdates":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "UnsupportedPlatformType":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "UpdateAssociationStatusRequest":{ + "type":"structure", + "required":[ + "Name", + "InstanceId", + "AssociationStatus" + ], + "members":{ + "Name":{"shape":"DocumentName"}, + "InstanceId":{"shape":"InstanceId"}, + "AssociationStatus":{"shape":"AssociationStatus"} + } + }, + "UpdateAssociationStatusResult":{ + "type":"structure", + "members":{ + "AssociationDescription":{"shape":"AssociationDescription"} + } + }, + "Version":{ + "type":"string", + "pattern":"^[0-9]{1,6}(\\.[0-9]{1,6}){2,3}$" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,862 @@ +{ + "version": "2.0", + "service": "

    Simple Systems Manager (SSM) enables you to remotely manage the configuration of your Amazon EC2 instance. Using SSM, you can run scripts or commands using either EC2 Run Command or SSM Config. (SSM Config is currently available only for Windows instances.)

    Run Command

    Run Command provides an on-demand experience for executing commands. You can use pre-defined Amazon SSM documents to perform the actions listed later in this section, or you can create your own documents. With these documents, you can remotely configure your instances by sending commands using the Commands page in the Amazon EC2 console, AWS Tools for Windows PowerShell, or the AWS CLI.

    Run Command reports the status of the command execution for each instance targeted by a command. You can also audit the command execution to understand who executed commands, when, and what changes were made. By switching between different SSM documents, you can quickly configure your instances with different types of commands. To get started with Run Command, verify that your environment meets the prerequisites for remotely running commands on EC2 instances (Linux or Windows).

    SSM Config

    SSM Config is a lightweight instance configuration solution. SSM Config is currently only available for Windows instances. With SSM Config, you can specify a setup configuration for your instances. SSM Config is similar to EC2 User Data, which is another way of running one-time scripts or applying settings during instance launch. SSM Config is an extension of this capability. Using SSM documents, you can specify which actions the system should perform on your instances, including which applications to install, which AWS Directory Service directory to join, which Microsoft PowerShell modules to install, etc. If an instance is missing one or more of these configurations, the system makes those changes. By default, the system checks every five minutes to see if there is a new configuration to apply as defined in a new SSM document. If so, the system updates the instances accordingly. In this way, you can remotely maintain a consistent configuration baseline on your instances. SSM Config is available using the AWS CLI or the AWS Tools for Windows PowerShell. For more information, see Managing Windows Instance Configuration.

    SSM Config and Run Command include the following pre-defined documents.

    Amazon Pre-defined SSM Documents Name Description Platform

    AWS-RunShellScript

    Run shell scripts

    Linux

    AWS-UpdateSSMAgent

    Update the Amazon SSM agent

    Linux

    AWS-JoinDirectoryServiceDomain

    Join an AWS Directory

    Windows

    AWS-RunPowerShellScript

    Run PowerShell commands or scripts

    Windows

    AWS-UpdateEC2Config

    Update the EC2Config service

    Windows

    AWS-ConfigureWindowsUpdate

    Configure Windows Update settings

    Windows

    AWS-InstallApplication

    Install, repair, or uninstall software using an MSI package

    Windows

    AWS-InstallPowerShellModule

    Install PowerShell modules

    Windows

    AWS-ConfigureCloudWatch

    Configure Amazon CloudWatch Logs to monitor applications and systems

    Windows

    The commands or scripts specified in SSM documents run with administrative privilege on your instances because the Amazon SSM agent runs as root on Linux and the EC2Config service runs in the Local System account on Windows. If a user has permission to execute any of the pre-defined SSM documents (any document that begins with AWS-*) then that user also has administrator access to the instance. Delegate access to SSM and Run Command judiciously. This becomes extremely important if you create your own SSM documents. Amazon Web Services does not provide guidance about how to create secure SSM documents. You create SSM documents and delegate access to Run Command at your own risk. As a security best practice, we recommend that you assign access to \"AWS-*\" documents, especially the AWS-RunShellScript document on Linux and the AWS-RunPowerShellScript document on Windows, to trusted administrators only. You can create SSM documents for specific tasks and delegate access to non-administrators. ", + "operations": { + "CancelCommand": "

    Attempts to cancel the command specified by the Command ID. There is no guarantee that the command will be terminated and the underlying process stopped.

    ", + "CreateAssociation": "

    Associates the specified SSM document with the specified instance.

    When you associate an SSM document with an instance, the configuration agent on the instance processes the document and configures the instance as specified.

    If you associate a document with an instance that already has an associated document, the system throws the AssociationAlreadyExists exception.

    ", + "CreateAssociationBatch": "

    Associates the specified SSM document with the specified instances.

    When you associate an SSM document with an instance, the configuration agent on the instance processes the document and configures the instance as specified.

    If you associate a document with an instance that already has an associated document, the system throws the AssociationAlreadyExists exception.

    ", + "CreateDocument": "

    Creates an SSM document.

    After you create an SSM document, you can use CreateAssociation to associate it with one or more running instances.

    ", + "DeleteAssociation": "

    Disassociates the specified SSM document from the specified instance.

    When you disassociate an SSM document from an instance, it does not change the configuration of the instance. To change the configuration state of an instance after you disassociate a document, you must create a new document with the desired configuration and associate it with the instance.

    ", + "DeleteDocument": "

    Deletes the SSM document and all instance associations to the document.

    Before you delete the SSM document, we recommend that you use DeleteAssociation to disassociate all instances that are associated with the document.

    ", + "DescribeAssociation": "

    Describes the associations for the specified SSM document or instance.

    ", + "DescribeDocument": "

    Describes the specified SSM document.

    ", + "DescribeInstanceInformation": "Describes one or more of your instances. You can use this to get information about instances like the operating system platform, the SSM agent version, status etc. If you specify one or more instance IDs, it returns information for those instances. If you do not specify instance IDs, it returns information for all your instances. If you specify an instance ID that is not valid or an instance that you do not own, you receive an error.", + "GetDocument": "

    Gets the contents of the specified SSM document.

    ", + "ListAssociations": "

    Lists the associations for the specified SSM document or instance.

    ", + "ListCommandInvocations": "An invocation is copy of a command sent to a specific instance. A command can apply to one or more instances. A command invocation applies to one instance. For example, if a user executes SendCommand against three instances, then a command invocation is created for each requested instance ID. ListCommandInvocations provide status about command execution.", + "ListCommands": "

    Lists the commands requested by users of the AWS account.

    ", + "ListDocuments": "

    Describes one or more of your SSM documents.

    ", + "SendCommand": "Executes commands on one or more remote instances.", + "UpdateAssociationStatus": "

    Updates the status of the SSM document associated with the specified instance.

    " + }, + "shapes": { + "AssociatedInstances": { + "base": "

    You must disassociate an SSM document from all instances before you can delete it.

    ", + "refs": { + } + }, + "Association": { + "base": "

    Describes an association of an SSM document and an instance.

    ", + "refs": { + "AssociationList$member": null + } + }, + "AssociationAlreadyExists": { + "base": "

    The specified association already exists.

    ", + "refs": { + } + }, + "AssociationDescription": { + "base": "

    Describes the parameters for a document.

    ", + "refs": { + "AssociationDescriptionList$member": null, + "CreateAssociationResult$AssociationDescription": "

    Information about the association.

    ", + "DescribeAssociationResult$AssociationDescription": "

    Information about the association.

    ", + "UpdateAssociationStatusResult$AssociationDescription": "

    Information about the association.

    " + } + }, + "AssociationDescriptionList": { + "base": null, + "refs": { + "CreateAssociationBatchResult$Successful": "

    Information about the associations that succeeded.

    " + } + }, + "AssociationDoesNotExist": { + "base": "

    The specified association does not exist.

    ", + "refs": { + } + }, + "AssociationFilter": { + "base": "

    Describes a filter.

    ", + "refs": { + "AssociationFilterList$member": null + } + }, + "AssociationFilterKey": { + "base": null, + "refs": { + "AssociationFilter$key": "

    The name of the filter.

    " + } + }, + "AssociationFilterList": { + "base": null, + "refs": { + "ListAssociationsRequest$AssociationFilterList": "

    One or more filters. Use a filter to return a more specific list of results.

    " + } + }, + "AssociationFilterValue": { + "base": null, + "refs": { + "AssociationFilter$value": "

    The filter value.

    " + } + }, + "AssociationLimitExceeded": { + "base": "

    You can have at most 2,000 active associations.

    ", + "refs": { + } + }, + "AssociationList": { + "base": null, + "refs": { + "ListAssociationsResult$Associations": "

    The associations.

    " + } + }, + "AssociationStatus": { + "base": "

    Describes an association status.

    ", + "refs": { + "AssociationDescription$Status": "

    The association status.

    ", + "UpdateAssociationStatusRequest$AssociationStatus": "

    The association status.

    " + } + }, + "AssociationStatusName": { + "base": null, + "refs": { + "AssociationStatus$Name": "

    The status.

    " + } + }, + "BatchErrorMessage": { + "base": null, + "refs": { + "FailedCreateAssociation$Message": "

    A description of the failure.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "InstanceInformation$IsLatestVersion": "Indicates whether latest version of the SSM agent is running on your instance.", + "ListCommandInvocationsRequest$Details": "(Optional) If set this returns the response of the command executions and any command output. By default this is set to False." + } + }, + "CancelCommandRequest": { + "base": null, + "refs": { + } + }, + "CancelCommandResult": { + "base": "

    Whether or not the command was successfully canceled. There is no guarantee that a request can be canceled.

    ", + "refs": { + } + }, + "Command": { + "base": "Describes a command request.", + "refs": { + "CommandList$member": null, + "SendCommandResult$Command": "The request as it was received by SSM. Also provides the command ID which can be used future references to this request." + } + }, + "CommandFilter": { + "base": "Describes a command filter.", + "refs": { + "CommandFilterList$member": null + } + }, + "CommandFilterKey": { + "base": null, + "refs": { + "CommandFilter$key": "The name of the filter. For example, requested date and time." + } + }, + "CommandFilterList": { + "base": null, + "refs": { + "ListCommandInvocationsRequest$Filters": "(Optional) One or more filters. Use a filter to return a more specific list of results.", + "ListCommandsRequest$Filters": "(Optional) One or more filters. Use a filter to return a more specific list of results." + } + }, + "CommandFilterValue": { + "base": null, + "refs": { + "CommandFilter$value": "The filter value. For example: June 30, 2015." + } + }, + "CommandId": { + "base": null, + "refs": { + "CancelCommandRequest$CommandId": "The ID of the command you want to cancel.", + "Command$CommandId": "A unique identifier for this command.", + "CommandInvocation$CommandId": "The command against which this invocation was requested.", + "ListCommandInvocationsRequest$CommandId": "(Optional) The invocations for a specific command ID.", + "ListCommandsRequest$CommandId": "(Optional) If provided, lists only the specified command." + } + }, + "CommandInvocation": { + "base": "An invocation is copy of a command sent to a specific instance. A command can apply to one or more instances. A command invocation applies to one instance. For example, if a user executes SendCommand against three instances, then a command invocation is created for each requested instance ID. A command invocation returns status and detail information about a command you executed.", + "refs": { + "CommandInvocationList$member": null + } + }, + "CommandInvocationList": { + "base": null, + "refs": { + "ListCommandInvocationsResult$CommandInvocations": "(Optional) A list of all invocations." + } + }, + "CommandInvocationStatus": { + "base": null, + "refs": { + "CommandInvocation$Status": "Whether or not the invocation succeeded, failed, or is pending." + } + }, + "CommandList": { + "base": null, + "refs": { + "ListCommandsResult$Commands": "(Optional) The list of commands requested by the user." + } + }, + "CommandMaxResults": { + "base": null, + "refs": { + "ListCommandInvocationsRequest$MaxResults": "(Optional) The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.", + "ListCommandsRequest$MaxResults": "(Optional) The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results." + } + }, + "CommandPlugin": { + "base": "Describes plugin details.", + "refs": { + "CommandPluginList$member": null + } + }, + "CommandPluginList": { + "base": null, + "refs": { + "CommandInvocation$CommandPlugins": null + } + }, + "CommandPluginName": { + "base": null, + "refs": { + "CommandPlugin$Name": "The name of the plugin. Must be one of the following: aws:updateAgent, aws:domainjoin, aws:applications, aws:runPowerShellScript, aws:psmodule, aws:cloudWatch, aws:runShellScript, or aws:updateSSMAgent." + } + }, + "CommandPluginOutput": { + "base": null, + "refs": { + "CommandPlugin$Output": "Output of the plugin execution." + } + }, + "CommandPluginStatus": { + "base": null, + "refs": { + "CommandPlugin$Status": "The status of this plugin. You can execute a document with multiple plugins." + } + }, + "CommandStatus": { + "base": null, + "refs": { + "Command$Status": "The status of the command." + } + }, + "Comment": { + "base": null, + "refs": { + "Command$Comment": "User-specified information about the command, such as a brief description of what the command should do.", + "CommandInvocation$Comment": "User-specified information about the command, such as a brief description of what the command should do.", + "SendCommandRequest$Comment": "User-specified information about the command, such as a brief description of what the command should do." + } + }, + "CreateAssociationBatchRequest": { + "base": null, + "refs": { + } + }, + "CreateAssociationBatchRequestEntries": { + "base": null, + "refs": { + "CreateAssociationBatchRequest$Entries": "

    One or more associations.

    " + } + }, + "CreateAssociationBatchRequestEntry": { + "base": "Describes the association of an SSM document and an instance.", + "refs": { + "CreateAssociationBatchRequestEntries$member": null, + "FailedCreateAssociation$Entry": "

    The association.

    " + } + }, + "CreateAssociationBatchResult": { + "base": null, + "refs": { + } + }, + "CreateAssociationRequest": { + "base": null, + "refs": { + } + }, + "CreateAssociationResult": { + "base": null, + "refs": { + } + }, + "CreateDocumentRequest": { + "base": null, + "refs": { + } + }, + "CreateDocumentResult": { + "base": null, + "refs": { + } + }, + "DateTime": { + "base": null, + "refs": { + "AssociationDescription$Date": "

    The date when the association was made.

    ", + "AssociationStatus$Date": "

    The date when the status changed.

    ", + "Command$ExpiresAfter": "If this time is reached and the command has not already started executing, it will not execute. Calculated based on the ExpiresAfter user input provided as part of the SendCommand API.", + "Command$RequestedDateTime": "The date and time the command was requested.", + "CommandInvocation$RequestedDateTime": "The time and date the request was sent to this instance.", + "CommandPlugin$ResponseStartDateTime": "The time the plugin started executing.", + "CommandPlugin$ResponseFinishDateTime": "The time the plugin stopped executing. Could stop prematurely if, for example, a cancel command was sent.", + "DocumentDescription$CreatedDate": "The date when the SSM document was created.", + "InstanceInformation$LastPingDateTime": "The date and time when agent last pinged SSM service." + } + }, + "DeleteAssociationRequest": { + "base": null, + "refs": { + } + }, + "DeleteAssociationResult": { + "base": null, + "refs": { + } + }, + "DeleteDocumentRequest": { + "base": null, + "refs": { + } + }, + "DeleteDocumentResult": { + "base": null, + "refs": { + } + }, + "DescribeAssociationRequest": { + "base": null, + "refs": { + } + }, + "DescribeAssociationResult": { + "base": null, + "refs": { + } + }, + "DescribeDocumentRequest": { + "base": null, + "refs": { + } + }, + "DescribeDocumentResult": { + "base": null, + "refs": { + } + }, + "DescribeInstanceInformationRequest": { + "base": null, + "refs": { + } + }, + "DescribeInstanceInformationResult": { + "base": null, + "refs": { + } + }, + "DescriptionInDocument": { + "base": null, + "refs": { + "DocumentDescription$Description": "A description of the document." + } + }, + "DocumentAlreadyExists": { + "base": "

    The specified SSM document already exists.

    ", + "refs": { + } + }, + "DocumentContent": { + "base": null, + "refs": { + "CreateDocumentRequest$Content": "

    A valid JSON string. For more information about the contents of this string, see SSM Document.

    ", + "GetDocumentResult$Content": "

    The contents of the SSM document.

    " + } + }, + "DocumentDescription": { + "base": "Describes an SSM document.", + "refs": { + "CreateDocumentResult$DocumentDescription": "

    Information about the SSM document.

    ", + "DescribeDocumentResult$Document": "

    Information about the SSM document.

    " + } + }, + "DocumentFilter": { + "base": "

    Describes a filter.

    ", + "refs": { + "DocumentFilterList$member": null + } + }, + "DocumentFilterKey": { + "base": null, + "refs": { + "DocumentFilter$key": "

    The name of the filter.

    " + } + }, + "DocumentFilterList": { + "base": null, + "refs": { + "ListDocumentsRequest$DocumentFilterList": "

    One or more filters. Use a filter to return a more specific list of results.

    " + } + }, + "DocumentFilterValue": { + "base": null, + "refs": { + "DocumentFilter$value": "

    The value of the filter.

    " + } + }, + "DocumentIdentifier": { + "base": "

    Describes the name of an SSM document.

    ", + "refs": { + "DocumentIdentifierList$member": null + } + }, + "DocumentIdentifierList": { + "base": null, + "refs": { + "ListDocumentsResult$DocumentIdentifiers": "

    The names of the SSM documents.

    " + } + }, + "DocumentLimitExceeded": { + "base": "

    You can have at most 100 active SSM documents.

    ", + "refs": { + } + }, + "DocumentName": { + "base": null, + "refs": { + "Association$Name": "

    The name of the SSM document.

    ", + "AssociationDescription$Name": "

    The name of the SSM document.

    ", + "Command$DocumentName": "The name of the SSM document requested for execution.", + "CommandInvocation$DocumentName": "The document name that was requested for execution.", + "CreateAssociationBatchRequestEntry$Name": "The name of the configuration document.", + "CreateAssociationRequest$Name": "

    The name of the SSM document.

    ", + "CreateDocumentRequest$Name": "

    A name for the SSM document.

    ", + "DeleteAssociationRequest$Name": "

    The name of the SSM document.

    ", + "DeleteDocumentRequest$Name": "

    The name of the SSM document.

    ", + "DescribeAssociationRequest$Name": "

    The name of the SSM document.

    ", + "DescribeDocumentRequest$Name": "

    The name of the SSM document.

    ", + "DocumentDescription$Name": "

    The name of the SSM document.

    ", + "DocumentIdentifier$Name": "

    The name of the SSM document.

    ", + "GetDocumentRequest$Name": "

    The name of the SSM document.

    ", + "GetDocumentResult$Name": "

    The name of the SSM document.

    ", + "SendCommandRequest$DocumentName": "Required. The name of the SSM document to execute. This can be an SSM public document or a custom document.", + "UpdateAssociationStatusRequest$Name": "

    The name of the SSM document.

    " + } + }, + "DocumentParameter": { + "base": null, + "refs": { + "DocumentParameterList$member": null + } + }, + "DocumentParameterDefaultValue": { + "base": null, + "refs": { + "DocumentParameter$DefaultValue": "

    If specified, the default values for the parameters. Parameters without a default value are required. Parameters with a default value are optional.

    " + } + }, + "DocumentParameterDescrption": { + "base": null, + "refs": { + "DocumentParameter$Description": "

    A description of what the parameter does, how to use it, the default value, and whether or not the parameter is optional.

    " + } + }, + "DocumentParameterList": { + "base": null, + "refs": { + "DocumentDescription$Parameters": "

    A description of the parameters for a document.

    " + } + }, + "DocumentParameterName": { + "base": null, + "refs": { + "DocumentParameter$Name": "

    The name of the parameter.

    " + } + }, + "DocumentParameterType": { + "base": null, + "refs": { + "DocumentParameter$Type": "

    The type of parameter. The type can be either “String” or “StringList”.

    " + } + }, + "DocumentSha1": { + "base": null, + "refs": { + "DocumentDescription$Sha1": "

    The SHA1 hash of the document, which you can use for verification purposes.

    " + } + }, + "DocumentStatus": { + "base": null, + "refs": { + "DocumentDescription$Status": "

    The status of the SSM document.

    " + } + }, + "DuplicateInstanceId": { + "base": "

    You cannot specify an instance ID in more than one association.

    ", + "refs": { + } + }, + "FailedCreateAssociation": { + "base": "

    Describes a failed association.

    ", + "refs": { + "FailedCreateAssociationList$member": null + } + }, + "FailedCreateAssociationList": { + "base": null, + "refs": { + "CreateAssociationBatchResult$Failed": "

    Information about the associations that failed.

    " + } + }, + "Fault": { + "base": null, + "refs": { + "FailedCreateAssociation$Fault": "

    The source of the failure.

    " + } + }, + "GetDocumentRequest": { + "base": null, + "refs": { + } + }, + "GetDocumentResult": { + "base": null, + "refs": { + } + }, + "InstanceId": { + "base": null, + "refs": { + "Association$InstanceId": "

    The ID of the instance.

    ", + "AssociationDescription$InstanceId": "

    The ID of the instance.

    ", + "CommandInvocation$InstanceId": "The instance ID in which this invocation was requested.", + "CreateAssociationBatchRequestEntry$InstanceId": "The ID of the instance.", + "CreateAssociationRequest$InstanceId": "

    The Windows Server instance ID.

    ", + "DeleteAssociationRequest$InstanceId": "

    The ID of the instance.

    ", + "DescribeAssociationRequest$InstanceId": "

    The Windows Server instance ID.

    ", + "InstanceIdList$member": null, + "InstanceInformation$InstanceId": "The instance ID.", + "ListCommandInvocationsRequest$InstanceId": "(Optional) The command execution details for a specific instance ID.", + "ListCommandsRequest$InstanceId": "(Optional) Lists commands issued against this instance ID.", + "UpdateAssociationStatusRequest$InstanceId": "

    The ID of the instance.

    " + } + }, + "InstanceIdList": { + "base": null, + "refs": { + "CancelCommandRequest$InstanceIds": "

    (Optional) A list of instance IDs on which you want to cancel the command. If not provided, the command is canceled on every instance on which it was requested.

    ", + "Command$InstanceIds": "The instance IDs against which this command was requested.", + "SendCommandRequest$InstanceIds": "Required. The instance IDs where the command should execute." + } + }, + "InstanceInformation": { + "base": "Describes a filter for a specific list of instances.", + "refs": { + "InstanceInformationList$member": null + } + }, + "InstanceInformationFilter": { + "base": "Describes a filter for a specific list of instances.", + "refs": { + "InstanceInformationFilterList$member": null + } + }, + "InstanceInformationFilterKey": { + "base": null, + "refs": { + "InstanceInformationFilter$key": "The name of the filter." + } + }, + "InstanceInformationFilterList": { + "base": null, + "refs": { + "DescribeInstanceInformationRequest$InstanceInformationFilterList": "One or more filters. Use a filter to return a more specific list of instances." + } + }, + "InstanceInformationFilterValue": { + "base": null, + "refs": { + "InstanceInformationFilterValueSet$member": null + } + }, + "InstanceInformationFilterValueSet": { + "base": null, + "refs": { + "InstanceInformationFilter$valueSet": "The filter values." + } + }, + "InstanceInformationList": { + "base": null, + "refs": { + "DescribeInstanceInformationResult$InstanceInformationList": "The instance information list." + } + }, + "InternalServerError": { + "base": "

    An error occurred on the server side.

    ", + "refs": { + } + }, + "InvalidCommandId": { + "base": null, + "refs": { + } + }, + "InvalidDocument": { + "base": "

    The specified document does not exist.

    ", + "refs": { + } + }, + "InvalidDocumentContent": { + "base": "

    The content for the SSM document is not valid.

    ", + "refs": { + } + }, + "InvalidFilterKey": { + "base": "The specified key is not valid.", + "refs": { + } + }, + "InvalidInstanceId": { + "base": "

    The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

    ", + "refs": { + } + }, + "InvalidInstanceInformationFilterValue": { + "base": "The specified filter value is not valid.", + "refs": { + } + }, + "InvalidNextToken": { + "base": "

    The specified token is not valid.

    ", + "refs": { + } + }, + "InvalidOutputFolder": { + "base": "The S3 bucket does not exist.", + "refs": { + } + }, + "InvalidParameters": { + "base": "You must specify values for all required parameters in the SSM document. You can only supply values to parameters defined in the SSM document.", + "refs": { + } + }, + "InvocationTraceOutput": { + "base": null, + "refs": { + "CommandInvocation$TraceOutput": "Gets the trace output sent by the agent." + } + }, + "ListAssociationsRequest": { + "base": null, + "refs": { + } + }, + "ListAssociationsResult": { + "base": null, + "refs": { + } + }, + "ListCommandInvocationsRequest": { + "base": null, + "refs": { + } + }, + "ListCommandInvocationsResult": { + "base": null, + "refs": { + } + }, + "ListCommandsRequest": { + "base": null, + "refs": { + } + }, + "ListCommandsResult": { + "base": null, + "refs": { + } + }, + "ListDocumentsRequest": { + "base": null, + "refs": { + } + }, + "ListDocumentsResult": { + "base": null, + "refs": { + } + }, + "MaxDocumentSizeExceeded": { + "base": "

    The size limit of an SSM document is 64 KB.

    ", + "refs": { + } + }, + "MaxResults": { + "base": null, + "refs": { + "ListAssociationsRequest$MaxResults": "

    The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.

    ", + "ListDocumentsRequest$MaxResults": "

    The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.

    " + } + }, + "MaxResultsEC2Compatible": { + "base": null, + "refs": { + "DescribeInstanceInformationRequest$MaxResults": "The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results." + } + }, + "NextToken": { + "base": null, + "refs": { + "DescribeInstanceInformationRequest$NextToken": "The token for the next set of items to return. (You received this token from a previous call.)", + "DescribeInstanceInformationResult$NextToken": "The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.", + "ListAssociationsRequest$NextToken": "

    The token for the next set of items to return. (You received this token from a previous call.)

    ", + "ListAssociationsResult$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    ", + "ListCommandInvocationsRequest$NextToken": "(Optional) The token for the next set of items to return. (You received this token from a previous call.)", + "ListCommandInvocationsResult$NextToken": "(Optional) The token for the next set of items to return. (You received this token from a previous call.)", + "ListCommandsRequest$NextToken": "(Optional) The token for the next set of items to return. (You received this token from a previous call.)", + "ListCommandsResult$NextToken": "(Optional) The token for the next set of items to return. (You received this token from a previous call.)", + "ListDocumentsRequest$NextToken": "

    The token for the next set of items to return. (You received this token from a previous call.)

    ", + "ListDocumentsResult$NextToken": "

    The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

    " + } + }, + "ParameterName": { + "base": null, + "refs": { + "Parameters$key": null + } + }, + "ParameterValue": { + "base": null, + "refs": { + "ParameterValueList$member": null + } + }, + "ParameterValueList": { + "base": null, + "refs": { + "Parameters$value": null + } + }, + "Parameters": { + "base": null, + "refs": { + "AssociationDescription$Parameters": "A description of the parameters for a document.", + "Command$Parameters": "The parameter values to be inserted in the SSM document when executing the command.", + "CreateAssociationBatchRequestEntry$Parameters": "A description of the parameters for a document.", + "CreateAssociationRequest$Parameters": "The parameters for the documents runtime configuration.", + "SendCommandRequest$Parameters": "The required and optional parameters specified in the SSM document being executed." + } + }, + "PingStatus": { + "base": null, + "refs": { + "InstanceInformation$PingStatus": "Connection status of the SSM agent." + } + }, + "PlatformType": { + "base": null, + "refs": { + "InstanceInformation$PlatformType": "The operating system platform type.", + "PlatformTypeList$member": null + } + }, + "PlatformTypeList": { + "base": null, + "refs": { + "DocumentDescription$PlatformTypes": "The list of OS platforms compatible with this SSM document.", + "DocumentIdentifier$PlatformTypes": "The operating system platform." + } + }, + "ResponseCode": { + "base": null, + "refs": { + "CommandPlugin$ResponseCode": "A numeric response code generated after executing the plugin." + } + }, + "S3BucketName": { + "base": null, + "refs": { + "Command$OutputS3BucketName": "The S3 bucket where the responses to the command executions should be stored. This was requested when issuing the command.", + "CommandPlugin$OutputS3BucketName": "The S3 bucket where the responses to the command executions should be stored. This was requested when issuing the command.", + "SendCommandRequest$OutputS3BucketName": "The name of the S3 bucket where command execution responses should be stored." + } + }, + "S3KeyPrefix": { + "base": null, + "refs": { + "Command$OutputS3KeyPrefix": "The S3 directory path inside the bucket where the responses to the command executions should be stored. This was requested when issuing the command.", + "CommandPlugin$OutputS3KeyPrefix": "The S3 directory path inside the bucket where the responses to the command executions should be stored. This was requested when issuing the command.", + "SendCommandRequest$OutputS3KeyPrefix": "The directory structure within the S3 bucket where the responses should be stored." + } + }, + "SendCommandRequest": { + "base": null, + "refs": { + } + }, + "SendCommandResult": { + "base": null, + "refs": { + } + }, + "StatusAdditionalInfo": { + "base": null, + "refs": { + "AssociationStatus$AdditionalInfo": "

    A user-defined string.

    " + } + }, + "StatusMessage": { + "base": null, + "refs": { + "AssociationStatus$Message": "

    The reason for the status.

    " + } + }, + "StatusUnchanged": { + "base": "

    The updated status is the same as the current status.

    ", + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "InstanceInformation$PlatformName": "The name of the operating system platform running on your instance.", + "InstanceInformation$PlatformVersion": "The version of the OS platform running on your instance.", + "InternalServerError$message": "An error occurred on the server side.", + "InvalidDocument$message": "

    The SSM document does not exist or the document is not available to the user. This exception can be issued by CreateAssociation, CreateAssociationBatch, DeleteAssociation, DeleteDocument, DescribeAssociation, DescribeDocument, GetDocument, SendCommand, or UpdateAssociationStatus.

    ", + "InvalidDocumentContent$message": "

    A description of the validation error.

    ", + "InvalidInstanceInformationFilterValue$message": null, + "InvalidParameters$message": "

    The parameter values entered by the user do not work in the SSM document. For example, incorrect type. This exception can be issued by CreateAssociation, CreateAssociationBatch, or SendCommand.

    ", + "UnsupportedPlatformType$message": null + } + }, + "TimeoutSeconds": { + "base": null, + "refs": { + "SendCommandRequest$TimeoutSeconds": "If this time is reached and the command has not already started executing, it will not execute." + } + }, + "TooManyUpdates": { + "base": "

    There are concurrent updates for a resource that supports one update at a time.

    ", + "refs": { + } + }, + "UnsupportedPlatformType": { + "base": "The document does not support the platform type of the given instance ID(s).", + "refs": { + } + }, + "UpdateAssociationStatusRequest": { + "base": null, + "refs": { + } + }, + "UpdateAssociationStatusResult": { + "base": null, + "refs": { + } + }, + "Version": { + "base": null, + "refs": { + "InstanceInformation$AgentVersion": "The version of the SSM agent running on your instance." + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,28 @@ +{ + "pagination": { + "ListAssociations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Associations" + }, + "ListCommandInvocations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "CommandInvocations" + }, + "ListCommands": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Commands" + }, + "ListDocuments": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "DocumentIdentifiers" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/storagegateway/2013-06-30/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/storagegateway/2013-06-30/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/storagegateway/2013-06-30/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/storagegateway/2013-06-30/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2581 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2013-06-30", + "endpointPrefix":"storagegateway", + "jsonVersion":"1.1", + "serviceFullName":"AWS Storage Gateway", + "signatureVersion":"v4", + "targetPrefix":"StorageGateway_20130630", + "protocol":"json" + }, + "operations":{ + "ActivateGateway":{ + "name":"ActivateGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ActivateGatewayInput"}, + "output":{"shape":"ActivateGatewayOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "AddCache":{ + "name":"AddCache", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddCacheInput"}, + "output":{"shape":"AddCacheOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "AddTagsToResource":{ + "name":"AddTagsToResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsToResourceInput"}, + "output":{"shape":"AddTagsToResourceOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "AddUploadBuffer":{ + "name":"AddUploadBuffer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddUploadBufferInput"}, + "output":{"shape":"AddUploadBufferOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "AddWorkingStorage":{ + "name":"AddWorkingStorage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddWorkingStorageInput"}, + "output":{"shape":"AddWorkingStorageOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "CancelArchival":{ + "name":"CancelArchival", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelArchivalInput"}, + "output":{"shape":"CancelArchivalOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "CancelRetrieval":{ + "name":"CancelRetrieval", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelRetrievalInput"}, + "output":{"shape":"CancelRetrievalOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "CreateCachediSCSIVolume":{ + "name":"CreateCachediSCSIVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCachediSCSIVolumeInput"}, + "output":{"shape":"CreateCachediSCSIVolumeOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "CreateSnapshot":{ + "name":"CreateSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSnapshotInput"}, + "output":{"shape":"CreateSnapshotOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "CreateSnapshotFromVolumeRecoveryPoint":{ + "name":"CreateSnapshotFromVolumeRecoveryPoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSnapshotFromVolumeRecoveryPointInput"}, + "output":{"shape":"CreateSnapshotFromVolumeRecoveryPointOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "CreateStorediSCSIVolume":{ + "name":"CreateStorediSCSIVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateStorediSCSIVolumeInput"}, + "output":{"shape":"CreateStorediSCSIVolumeOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "CreateTapes":{ + "name":"CreateTapes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTapesInput"}, + "output":{"shape":"CreateTapesOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DeleteBandwidthRateLimit":{ + "name":"DeleteBandwidthRateLimit", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteBandwidthRateLimitInput"}, + "output":{"shape":"DeleteBandwidthRateLimitOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DeleteChapCredentials":{ + "name":"DeleteChapCredentials", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteChapCredentialsInput"}, + "output":{"shape":"DeleteChapCredentialsOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DeleteGateway":{ + "name":"DeleteGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteGatewayInput"}, + "output":{"shape":"DeleteGatewayOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DeleteSnapshotSchedule":{ + "name":"DeleteSnapshotSchedule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSnapshotScheduleInput"}, + "output":{"shape":"DeleteSnapshotScheduleOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DeleteTape":{ + "name":"DeleteTape", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTapeInput"}, + "output":{"shape":"DeleteTapeOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DeleteTapeArchive":{ + "name":"DeleteTapeArchive", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTapeArchiveInput"}, + "output":{"shape":"DeleteTapeArchiveOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DeleteVolume":{ + "name":"DeleteVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVolumeInput"}, + "output":{"shape":"DeleteVolumeOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DescribeBandwidthRateLimit":{ + "name":"DescribeBandwidthRateLimit", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeBandwidthRateLimitInput"}, + "output":{"shape":"DescribeBandwidthRateLimitOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DescribeCache":{ + "name":"DescribeCache", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCacheInput"}, + "output":{"shape":"DescribeCacheOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DescribeCachediSCSIVolumes":{ + "name":"DescribeCachediSCSIVolumes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCachediSCSIVolumesInput"}, + "output":{"shape":"DescribeCachediSCSIVolumesOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DescribeChapCredentials":{ + "name":"DescribeChapCredentials", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeChapCredentialsInput"}, + "output":{"shape":"DescribeChapCredentialsOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DescribeGatewayInformation":{ + "name":"DescribeGatewayInformation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeGatewayInformationInput"}, + "output":{"shape":"DescribeGatewayInformationOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DescribeMaintenanceStartTime":{ + "name":"DescribeMaintenanceStartTime", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMaintenanceStartTimeInput"}, + "output":{"shape":"DescribeMaintenanceStartTimeOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DescribeSnapshotSchedule":{ + "name":"DescribeSnapshotSchedule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSnapshotScheduleInput"}, + "output":{"shape":"DescribeSnapshotScheduleOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DescribeStorediSCSIVolumes":{ + "name":"DescribeStorediSCSIVolumes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStorediSCSIVolumesInput"}, + "output":{"shape":"DescribeStorediSCSIVolumesOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DescribeTapeArchives":{ + "name":"DescribeTapeArchives", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTapeArchivesInput"}, + "output":{"shape":"DescribeTapeArchivesOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DescribeTapeRecoveryPoints":{ + "name":"DescribeTapeRecoveryPoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTapeRecoveryPointsInput"}, + "output":{"shape":"DescribeTapeRecoveryPointsOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DescribeTapes":{ + "name":"DescribeTapes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTapesInput"}, + "output":{"shape":"DescribeTapesOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DescribeUploadBuffer":{ + "name":"DescribeUploadBuffer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeUploadBufferInput"}, + "output":{"shape":"DescribeUploadBufferOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DescribeVTLDevices":{ + "name":"DescribeVTLDevices", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVTLDevicesInput"}, + "output":{"shape":"DescribeVTLDevicesOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DescribeWorkingStorage":{ + "name":"DescribeWorkingStorage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeWorkingStorageInput"}, + "output":{"shape":"DescribeWorkingStorageOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "DisableGateway":{ + "name":"DisableGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableGatewayInput"}, + "output":{"shape":"DisableGatewayOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "ListGateways":{ + "name":"ListGateways", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListGatewaysInput"}, + "output":{"shape":"ListGatewaysOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "ListLocalDisks":{ + "name":"ListLocalDisks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListLocalDisksInput"}, + "output":{"shape":"ListLocalDisksOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceInput"}, + "output":{"shape":"ListTagsForResourceOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "ListVolumeInitiators":{ + "name":"ListVolumeInitiators", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListVolumeInitiatorsInput"}, + "output":{"shape":"ListVolumeInitiatorsOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "ListVolumeRecoveryPoints":{ + "name":"ListVolumeRecoveryPoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListVolumeRecoveryPointsInput"}, + "output":{"shape":"ListVolumeRecoveryPointsOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "ListVolumes":{ + "name":"ListVolumes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListVolumesInput"}, + "output":{"shape":"ListVolumesOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "RemoveTagsFromResource":{ + "name":"RemoveTagsFromResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsFromResourceInput"}, + "output":{"shape":"RemoveTagsFromResourceOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "ResetCache":{ + "name":"ResetCache", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetCacheInput"}, + "output":{"shape":"ResetCacheOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "RetrieveTapeArchive":{ + "name":"RetrieveTapeArchive", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RetrieveTapeArchiveInput"}, + "output":{"shape":"RetrieveTapeArchiveOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "RetrieveTapeRecoveryPoint":{ + "name":"RetrieveTapeRecoveryPoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RetrieveTapeRecoveryPointInput"}, + "output":{"shape":"RetrieveTapeRecoveryPointOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "ShutdownGateway":{ + "name":"ShutdownGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ShutdownGatewayInput"}, + "output":{"shape":"ShutdownGatewayOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "StartGateway":{ + "name":"StartGateway", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartGatewayInput"}, + "output":{"shape":"StartGatewayOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "UpdateBandwidthRateLimit":{ + "name":"UpdateBandwidthRateLimit", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateBandwidthRateLimitInput"}, + "output":{"shape":"UpdateBandwidthRateLimitOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "UpdateChapCredentials":{ + "name":"UpdateChapCredentials", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateChapCredentialsInput"}, + "output":{"shape":"UpdateChapCredentialsOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "UpdateGatewayInformation":{ + "name":"UpdateGatewayInformation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateGatewayInformationInput"}, + "output":{"shape":"UpdateGatewayInformationOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "UpdateGatewaySoftwareNow":{ + "name":"UpdateGatewaySoftwareNow", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateGatewaySoftwareNowInput"}, + "output":{"shape":"UpdateGatewaySoftwareNowOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "UpdateMaintenanceStartTime":{ + "name":"UpdateMaintenanceStartTime", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateMaintenanceStartTimeInput"}, + "output":{"shape":"UpdateMaintenanceStartTimeOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "UpdateSnapshotSchedule":{ + "name":"UpdateSnapshotSchedule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateSnapshotScheduleInput"}, + "output":{"shape":"UpdateSnapshotScheduleOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + }, + "UpdateVTLDeviceType":{ + "name":"UpdateVTLDeviceType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateVTLDeviceTypeInput"}, + "output":{"shape":"UpdateVTLDeviceTypeOutput"}, + "errors":[ + { + "shape":"InvalidGatewayRequestException", + "error":{"httpStatusCode":400}, + "exception":true + }, + { + "shape":"InternalServerError", + "error":{"httpStatusCode":500}, + "exception":true + } + ] + } + }, + "shapes":{ + "ActivateGatewayInput":{ + "type":"structure", + "required":[ + "ActivationKey", + "GatewayName", + "GatewayTimezone", + "GatewayRegion" + ], + "members":{ + "ActivationKey":{"shape":"ActivationKey"}, + "GatewayName":{"shape":"GatewayName"}, + "GatewayTimezone":{"shape":"GatewayTimezone"}, + "GatewayRegion":{"shape":"RegionId"}, + "GatewayType":{"shape":"GatewayType"}, + "TapeDriveType":{"shape":"TapeDriveType"}, + "MediumChangerType":{"shape":"MediumChangerType"} + } + }, + "ActivateGatewayOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "ActivationKey":{ + "type":"string", + "min":1, + "max":50 + }, + "AddCacheInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "DiskIds" + ], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "DiskIds":{"shape":"DiskIds"} + } + }, + "AddCacheOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "AddTagsToResourceInput":{ + "type":"structure", + "required":[ + "ResourceARN", + "Tags" + ], + "members":{ + "ResourceARN":{"shape":"ResourceARN"}, + "Tags":{"shape":"Tags"} + } + }, + "AddTagsToResourceOutput":{ + "type":"structure", + "members":{ + "ResourceARN":{"shape":"ResourceARN"} + } + }, + "AddUploadBufferInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "DiskIds" + ], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "DiskIds":{"shape":"DiskIds"} + } + }, + "AddUploadBufferOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "AddWorkingStorageInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "DiskIds" + ], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "DiskIds":{"shape":"DiskIds"} + } + }, + "AddWorkingStorageOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "BandwidthDownloadRateLimit":{ + "type":"long", + "min":102400 + }, + "BandwidthType":{ + "type":"string", + "min":3, + "max":25 + }, + "BandwidthUploadRateLimit":{ + "type":"long", + "min":51200 + }, + "CachediSCSIVolume":{ + "type":"structure", + "members":{ + "VolumeARN":{"shape":"VolumeARN"}, + "VolumeId":{"shape":"VolumeId"}, + "VolumeType":{"shape":"VolumeType"}, + "VolumeStatus":{"shape":"VolumeStatus"}, + "VolumeSizeInBytes":{"shape":"long"}, + "VolumeProgress":{"shape":"DoubleObject"}, + "SourceSnapshotId":{"shape":"SnapshotId"}, + "VolumeiSCSIAttributes":{"shape":"VolumeiSCSIAttributes"} + } + }, + "CachediSCSIVolumes":{ + "type":"list", + "member":{"shape":"CachediSCSIVolume"} + }, + "CancelArchivalInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "TapeARN" + ], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "TapeARN":{"shape":"TapeARN"} + } + }, + "CancelArchivalOutput":{ + "type":"structure", + "members":{ + "TapeARN":{"shape":"TapeARN"} + } + }, + "CancelRetrievalInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "TapeARN" + ], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "TapeARN":{"shape":"TapeARN"} + } + }, + "CancelRetrievalOutput":{ + "type":"structure", + "members":{ + "TapeARN":{"shape":"TapeARN"} + } + }, + "ChapCredentials":{ + "type":"list", + "member":{"shape":"ChapInfo"} + }, + "ChapInfo":{ + "type":"structure", + "members":{ + "TargetARN":{"shape":"TargetARN"}, + "SecretToAuthenticateInitiator":{"shape":"ChapSecret"}, + "InitiatorName":{"shape":"IqnName"}, + "SecretToAuthenticateTarget":{"shape":"ChapSecret"} + } + }, + "ChapSecret":{ + "type":"string", + "min":1, + "max":100 + }, + "ClientToken":{ + "type":"string", + "min":5, + "max":100 + }, + "CreateCachediSCSIVolumeInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "VolumeSizeInBytes", + "TargetName", + "NetworkInterfaceId", + "ClientToken" + ], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "VolumeSizeInBytes":{"shape":"long"}, + "SnapshotId":{"shape":"SnapshotId"}, + "TargetName":{"shape":"TargetName"}, + "NetworkInterfaceId":{"shape":"NetworkInterfaceId"}, + "ClientToken":{"shape":"ClientToken"} + } + }, + "CreateCachediSCSIVolumeOutput":{ + "type":"structure", + "members":{ + "VolumeARN":{"shape":"VolumeARN"}, + "TargetARN":{"shape":"TargetARN"} + } + }, + "CreateSnapshotFromVolumeRecoveryPointInput":{ + "type":"structure", + "required":[ + "VolumeARN", + "SnapshotDescription" + ], + "members":{ + "VolumeARN":{"shape":"VolumeARN"}, + "SnapshotDescription":{"shape":"SnapshotDescription"} + } + }, + "CreateSnapshotFromVolumeRecoveryPointOutput":{ + "type":"structure", + "members":{ + "SnapshotId":{"shape":"SnapshotId"}, + "VolumeARN":{"shape":"VolumeARN"}, + "VolumeRecoveryPointTime":{"shape":"string"} + } + }, + "CreateSnapshotInput":{ + "type":"structure", + "required":[ + "VolumeARN", + "SnapshotDescription" + ], + "members":{ + "VolumeARN":{"shape":"VolumeARN"}, + "SnapshotDescription":{"shape":"SnapshotDescription"} + } + }, + "CreateSnapshotOutput":{ + "type":"structure", + "members":{ + "VolumeARN":{"shape":"VolumeARN"}, + "SnapshotId":{"shape":"SnapshotId"} + } + }, + "CreateStorediSCSIVolumeInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "DiskId", + "PreserveExistingData", + "TargetName", + "NetworkInterfaceId" + ], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "DiskId":{"shape":"DiskId"}, + "SnapshotId":{"shape":"SnapshotId"}, + "PreserveExistingData":{"shape":"boolean"}, + "TargetName":{"shape":"TargetName"}, + "NetworkInterfaceId":{"shape":"NetworkInterfaceId"} + } + }, + "CreateStorediSCSIVolumeOutput":{ + "type":"structure", + "members":{ + "VolumeARN":{"shape":"VolumeARN"}, + "VolumeSizeInBytes":{"shape":"long"}, + "TargetARN":{"shape":"TargetARN"} + } + }, + "CreateTapesInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "TapeSizeInBytes", + "ClientToken", + "NumTapesToCreate", + "TapeBarcodePrefix" + ], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "TapeSizeInBytes":{"shape":"TapeSize"}, + "ClientToken":{"shape":"ClientToken"}, + "NumTapesToCreate":{"shape":"NumTapesToCreate"}, + "TapeBarcodePrefix":{"shape":"TapeBarcodePrefix"} + } + }, + "CreateTapesOutput":{ + "type":"structure", + "members":{ + "TapeARNs":{"shape":"TapeARNs"} + } + }, + "DayOfWeek":{ + "type":"integer", + "min":0, + "max":6 + }, + "DeleteBandwidthRateLimitInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "BandwidthType" + ], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "BandwidthType":{"shape":"BandwidthType"} + } + }, + "DeleteBandwidthRateLimitOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "DeleteChapCredentialsInput":{ + "type":"structure", + "required":[ + "TargetARN", + "InitiatorName" + ], + "members":{ + "TargetARN":{"shape":"TargetARN"}, + "InitiatorName":{"shape":"IqnName"} + } + }, + "DeleteChapCredentialsOutput":{ + "type":"structure", + "members":{ + "TargetARN":{"shape":"TargetARN"}, + "InitiatorName":{"shape":"IqnName"} + } + }, + "DeleteGatewayInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "DeleteGatewayOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "DeleteSnapshotScheduleInput":{ + "type":"structure", + "required":["VolumeARN"], + "members":{ + "VolumeARN":{"shape":"VolumeARN"} + } + }, + "DeleteSnapshotScheduleOutput":{ + "type":"structure", + "members":{ + "VolumeARN":{"shape":"VolumeARN"} + } + }, + "DeleteTapeArchiveInput":{ + "type":"structure", + "required":["TapeARN"], + "members":{ + "TapeARN":{"shape":"TapeARN"} + } + }, + "DeleteTapeArchiveOutput":{ + "type":"structure", + "members":{ + "TapeARN":{"shape":"TapeARN"} + } + }, + "DeleteTapeInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "TapeARN" + ], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "TapeARN":{"shape":"TapeARN"} + } + }, + "DeleteTapeOutput":{ + "type":"structure", + "members":{ + "TapeARN":{"shape":"TapeARN"} + } + }, + "DeleteVolumeInput":{ + "type":"structure", + "required":["VolumeARN"], + "members":{ + "VolumeARN":{"shape":"VolumeARN"} + } + }, + "DeleteVolumeOutput":{ + "type":"structure", + "members":{ + "VolumeARN":{"shape":"VolumeARN"} + } + }, + "DescribeBandwidthRateLimitInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "DescribeBandwidthRateLimitOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "AverageUploadRateLimitInBitsPerSec":{"shape":"BandwidthUploadRateLimit"}, + "AverageDownloadRateLimitInBitsPerSec":{"shape":"BandwidthDownloadRateLimit"} + } + }, + "DescribeCacheInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "DescribeCacheOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "DiskIds":{"shape":"DiskIds"}, + "CacheAllocatedInBytes":{"shape":"long"}, + "CacheUsedPercentage":{"shape":"double"}, + "CacheDirtyPercentage":{"shape":"double"}, + "CacheHitPercentage":{"shape":"double"}, + "CacheMissPercentage":{"shape":"double"} + } + }, + "DescribeCachediSCSIVolumesInput":{ + "type":"structure", + "required":["VolumeARNs"], + "members":{ + "VolumeARNs":{"shape":"VolumeARNs"} + } + }, + "DescribeCachediSCSIVolumesOutput":{ + "type":"structure", + "members":{ + "CachediSCSIVolumes":{"shape":"CachediSCSIVolumes"} + } + }, + "DescribeChapCredentialsInput":{ + "type":"structure", + "required":["TargetARN"], + "members":{ + "TargetARN":{"shape":"TargetARN"} + } + }, + "DescribeChapCredentialsOutput":{ + "type":"structure", + "members":{ + "ChapCredentials":{"shape":"ChapCredentials"} + } + }, + "DescribeGatewayInformationInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "DescribeGatewayInformationOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "GatewayId":{"shape":"GatewayId"}, + "GatewayName":{"shape":"string"}, + "GatewayTimezone":{"shape":"GatewayTimezone"}, + "GatewayState":{"shape":"GatewayState"}, + "GatewayNetworkInterfaces":{"shape":"GatewayNetworkInterfaces"}, + "GatewayType":{"shape":"GatewayType"}, + "NextUpdateAvailabilityDate":{"shape":"NextUpdateAvailabilityDate"}, + "LastSoftwareUpdate":{"shape":"LastSoftwareUpdate"} + } + }, + "DescribeMaintenanceStartTimeInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "DescribeMaintenanceStartTimeOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "HourOfDay":{"shape":"HourOfDay"}, + "MinuteOfHour":{"shape":"MinuteOfHour"}, + "DayOfWeek":{"shape":"DayOfWeek"}, + "Timezone":{"shape":"GatewayTimezone"} + } + }, + "DescribeSnapshotScheduleInput":{ + "type":"structure", + "required":["VolumeARN"], + "members":{ + "VolumeARN":{"shape":"VolumeARN"} + } + }, + "DescribeSnapshotScheduleOutput":{ + "type":"structure", + "members":{ + "VolumeARN":{"shape":"VolumeARN"}, + "StartAt":{"shape":"HourOfDay"}, + "RecurrenceInHours":{"shape":"RecurrenceInHours"}, + "Description":{"shape":"Description"}, + "Timezone":{"shape":"GatewayTimezone"} + } + }, + "DescribeStorediSCSIVolumesInput":{ + "type":"structure", + "required":["VolumeARNs"], + "members":{ + "VolumeARNs":{"shape":"VolumeARNs"} + } + }, + "DescribeStorediSCSIVolumesOutput":{ + "type":"structure", + "members":{ + "StorediSCSIVolumes":{"shape":"StorediSCSIVolumes"} + } + }, + "DescribeTapeArchivesInput":{ + "type":"structure", + "members":{ + "TapeARNs":{"shape":"TapeARNs"}, + "Marker":{"shape":"Marker"}, + "Limit":{"shape":"PositiveIntObject"} + } + }, + "DescribeTapeArchivesOutput":{ + "type":"structure", + "members":{ + "TapeArchives":{"shape":"TapeArchives"}, + "Marker":{"shape":"Marker"} + } + }, + "DescribeTapeRecoveryPointsInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "Marker":{"shape":"Marker"}, + "Limit":{"shape":"PositiveIntObject"} + } + }, + "DescribeTapeRecoveryPointsOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "TapeRecoveryPointInfos":{"shape":"TapeRecoveryPointInfos"}, + "Marker":{"shape":"Marker"} + } + }, + "DescribeTapesInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "TapeARNs":{"shape":"TapeARNs"}, + "Marker":{"shape":"Marker"}, + "Limit":{"shape":"PositiveIntObject"} + } + }, + "DescribeTapesOutput":{ + "type":"structure", + "members":{ + "Tapes":{"shape":"Tapes"}, + "Marker":{"shape":"Marker"} + } + }, + "DescribeUploadBufferInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "DescribeUploadBufferOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "DiskIds":{"shape":"DiskIds"}, + "UploadBufferUsedInBytes":{"shape":"long"}, + "UploadBufferAllocatedInBytes":{"shape":"long"} + } + }, + "DescribeVTLDevicesInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "VTLDeviceARNs":{"shape":"VTLDeviceARNs"}, + "Marker":{"shape":"Marker"}, + "Limit":{"shape":"PositiveIntObject"} + } + }, + "DescribeVTLDevicesOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "VTLDevices":{"shape":"VTLDevices"}, + "Marker":{"shape":"Marker"} + } + }, + "DescribeWorkingStorageInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "DescribeWorkingStorageOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "DiskIds":{"shape":"DiskIds"}, + "WorkingStorageUsedInBytes":{"shape":"long"}, + "WorkingStorageAllocatedInBytes":{"shape":"long"} + } + }, + "Description":{ + "type":"string", + "min":1, + "max":255 + }, + "DeviceType":{ + "type":"string", + "min":2, + "max":50 + }, + "DeviceiSCSIAttributes":{ + "type":"structure", + "members":{ + "TargetARN":{"shape":"TargetARN"}, + "NetworkInterfaceId":{"shape":"NetworkInterfaceId"}, + "NetworkInterfacePort":{"shape":"integer"}, + "ChapEnabled":{"shape":"boolean"} + } + }, + "DisableGatewayInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "DisableGatewayOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "Disk":{ + "type":"structure", + "members":{ + "DiskId":{"shape":"DiskId"}, + "DiskPath":{"shape":"string"}, + "DiskNode":{"shape":"string"}, + "DiskStatus":{"shape":"string"}, + "DiskSizeInBytes":{"shape":"long"}, + "DiskAllocationType":{"shape":"DiskAllocationType"}, + "DiskAllocationResource":{"shape":"string"} + } + }, + "DiskAllocationType":{ + "type":"string", + "min":3, + "max":100 + }, + "DiskId":{ + "type":"string", + "min":1, + "max":300 + }, + "DiskIds":{ + "type":"list", + "member":{"shape":"DiskId"} + }, + "Disks":{ + "type":"list", + "member":{"shape":"Disk"} + }, + "DoubleObject":{"type":"double"}, + "ErrorCode":{ + "type":"string", + "enum":[ + "ActivationKeyExpired", + "ActivationKeyInvalid", + "ActivationKeyNotFound", + "GatewayInternalError", + "GatewayNotConnected", + "GatewayNotFound", + "GatewayProxyNetworkConnectionBusy", + "AuthenticationFailure", + "BandwidthThrottleScheduleNotFound", + "Blocked", + "CannotExportSnapshot", + "ChapCredentialNotFound", + "DiskAlreadyAllocated", + "DiskDoesNotExist", + "DiskSizeGreaterThanVolumeMaxSize", + "DiskSizeLessThanVolumeSize", + "DiskSizeNotGigAligned", + "DuplicateCertificateInfo", + "DuplicateSchedule", + "EndpointNotFound", + "IAMNotSupported", + "InitiatorInvalid", + "InitiatorNotFound", + "InternalError", + "InvalidGateway", + "InvalidEndpoint", + "InvalidParameters", + "InvalidSchedule", + "LocalStorageLimitExceeded", + "LunAlreadyAllocated ", + "LunInvalid", + "MaximumContentLengthExceeded", + "MaximumTapeCartridgeCountExceeded", + "MaximumVolumeCountExceeded", + "NetworkConfigurationChanged", + "NoDisksAvailable", + "NotImplemented", + "NotSupported", + "OperationAborted", + "OutdatedGateway", + "ParametersNotImplemented", + "RegionInvalid", + "RequestTimeout", + "ServiceUnavailable", + "SnapshotDeleted", + "SnapshotIdInvalid", + "SnapshotInProgress", + "SnapshotNotFound", + "SnapshotScheduleNotFound", + "StagingAreaFull", + "StorageFailure", + "TapeCartridgeNotFound", + "TargetAlreadyExists", + "TargetInvalid", + "TargetNotFound", + "UnauthorizedOperation", + "VolumeAlreadyExists", + "VolumeIdInvalid", + "VolumeInUse", + "VolumeNotFound", + "VolumeNotReady" + ] + }, + "GatewayARN":{ + "type":"string", + "min":50, + "max":500 + }, + "GatewayId":{ + "type":"string", + "min":12, + "max":30 + }, + "GatewayInfo":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "GatewayType":{"shape":"GatewayType"}, + "GatewayOperationalState":{"shape":"GatewayOperationalState"}, + "GatewayName":{"shape":"string"} + } + }, + "GatewayName":{ + "type":"string", + "min":2, + "max":255, + "pattern":"^[ -\\.0-\\[\\]-~]*[!-\\.0-\\[\\]-~][ -\\.0-\\[\\]-~]*$" + }, + "GatewayNetworkInterfaces":{ + "type":"list", + "member":{"shape":"NetworkInterface"} + }, + "GatewayOperationalState":{ + "type":"string", + "min":2, + "max":25 + }, + "GatewayState":{ + "type":"string", + "min":2, + "max":25 + }, + "GatewayTimezone":{ + "type":"string", + "min":3, + "max":10 + }, + "GatewayType":{ + "type":"string", + "min":2, + "max":20 + }, + "Gateways":{ + "type":"list", + "member":{"shape":"GatewayInfo"} + }, + "HourOfDay":{ + "type":"integer", + "min":0, + "max":23 + }, + "Initiator":{ + "type":"string", + "min":1, + "max":50 + }, + "Initiators":{ + "type":"list", + "member":{"shape":"Initiator"} + }, + "InternalServerError":{ + "type":"structure", + "members":{ + "message":{"shape":"string"}, + "error":{"shape":"StorageGatewayError"} + }, + "error":{"httpStatusCode":500}, + "exception":true + }, + "InvalidGatewayRequestException":{ + "type":"structure", + "members":{ + "message":{"shape":"string"}, + "error":{"shape":"StorageGatewayError"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "IqnName":{ + "type":"string", + "min":1, + "max":255, + "pattern":"[0-9a-z:.-]+" + }, + "LastSoftwareUpdate":{ + "type":"string", + "min":1, + "max":25 + }, + "ListGatewaysInput":{ + "type":"structure", + "members":{ + "Marker":{"shape":"Marker"}, + "Limit":{"shape":"PositiveIntObject"} + } + }, + "ListGatewaysOutput":{ + "type":"structure", + "members":{ + "Gateways":{"shape":"Gateways"}, + "Marker":{"shape":"Marker"} + } + }, + "ListLocalDisksInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "ListLocalDisksOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "Disks":{"shape":"Disks"} + } + }, + "ListTagsForResourceInput":{ + "type":"structure", + "members":{ + "ResourceARN":{"shape":"ResourceARN"}, + "Marker":{"shape":"Marker"}, + "Limit":{"shape":"PositiveIntObject"} + } + }, + "ListTagsForResourceOutput":{ + "type":"structure", + "members":{ + "ResourceARN":{"shape":"ResourceARN"}, + "Marker":{"shape":"Marker"}, + "Tags":{"shape":"Tags"} + } + }, + "ListVolumeInitiatorsInput":{ + "type":"structure", + "required":["VolumeARN"], + "members":{ + "VolumeARN":{"shape":"VolumeARN"} + } + }, + "ListVolumeInitiatorsOutput":{ + "type":"structure", + "members":{ + "Initiators":{"shape":"Initiators"} + } + }, + "ListVolumeRecoveryPointsInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "ListVolumeRecoveryPointsOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "VolumeRecoveryPointInfos":{"shape":"VolumeRecoveryPointInfos"} + } + }, + "ListVolumesInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "Marker":{"shape":"Marker"}, + "Limit":{"shape":"PositiveIntObject"} + } + }, + "ListVolumesOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "Marker":{"shape":"Marker"}, + "VolumeInfos":{"shape":"VolumeInfos"} + } + }, + "Marker":{ + "type":"string", + "min":1, + "max":1000 + }, + "MediumChangerType":{ + "type":"string", + "min":2, + "max":50 + }, + "MinuteOfHour":{ + "type":"integer", + "min":0, + "max":59 + }, + "NetworkInterface":{ + "type":"structure", + "members":{ + "Ipv4Address":{"shape":"string"}, + "MacAddress":{"shape":"string"}, + "Ipv6Address":{"shape":"string"} + } + }, + "NetworkInterfaceId":{ + "type":"string", + "pattern":"\\A(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)(\\.(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)){3}\\z" + }, + "NextUpdateAvailabilityDate":{ + "type":"string", + "min":1, + "max":25 + }, + "NumTapesToCreate":{ + "type":"integer", + "min":1, + "max":10 + }, + "PositiveIntObject":{ + "type":"integer", + "min":1 + }, + "RecurrenceInHours":{ + "type":"integer", + "min":1, + "max":24 + }, + "RegionId":{ + "type":"string", + "min":1, + "max":25 + }, + "RemoveTagsFromResourceInput":{ + "type":"structure", + "members":{ + "ResourceARN":{"shape":"ResourceARN"}, + "TagKeys":{"shape":"TagKeys"} + } + }, + "RemoveTagsFromResourceOutput":{ + "type":"structure", + "members":{ + "ResourceARN":{"shape":"ResourceARN"} + } + }, + "ResetCacheInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "ResetCacheOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "ResourceARN":{ + "type":"string", + "min":50, + "max":500 + }, + "RetrieveTapeArchiveInput":{ + "type":"structure", + "required":[ + "TapeARN", + "GatewayARN" + ], + "members":{ + "TapeARN":{"shape":"TapeARN"}, + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "RetrieveTapeArchiveOutput":{ + "type":"structure", + "members":{ + "TapeARN":{"shape":"TapeARN"} + } + }, + "RetrieveTapeRecoveryPointInput":{ + "type":"structure", + "required":[ + "TapeARN", + "GatewayARN" + ], + "members":{ + "TapeARN":{"shape":"TapeARN"}, + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "RetrieveTapeRecoveryPointOutput":{ + "type":"structure", + "members":{ + "TapeARN":{"shape":"TapeARN"} + } + }, + "ShutdownGatewayInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "ShutdownGatewayOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "SnapshotDescription":{ + "type":"string", + "min":1, + "max":255 + }, + "SnapshotId":{ + "type":"string", + "pattern":"\\Asnap-[0-9a-fA-F]{8}\\z" + }, + "StartGatewayInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "StartGatewayOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "StorageGatewayError":{ + "type":"structure", + "members":{ + "errorCode":{"shape":"ErrorCode"}, + "errorDetails":{"shape":"errorDetails"} + } + }, + "StorediSCSIVolume":{ + "type":"structure", + "members":{ + "VolumeARN":{"shape":"VolumeARN"}, + "VolumeId":{"shape":"VolumeId"}, + "VolumeType":{"shape":"VolumeType"}, + "VolumeStatus":{"shape":"VolumeStatus"}, + "VolumeSizeInBytes":{"shape":"long"}, + "VolumeProgress":{"shape":"DoubleObject"}, + "VolumeDiskId":{"shape":"DiskId"}, + "SourceSnapshotId":{"shape":"SnapshotId"}, + "PreservedExistingData":{"shape":"boolean"}, + "VolumeiSCSIAttributes":{"shape":"VolumeiSCSIAttributes"} + } + }, + "StorediSCSIVolumes":{ + "type":"list", + "member":{"shape":"StorediSCSIVolume"} + }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{ + "type":"string", + "min":1, + "max":128, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-%@]*)$" + }, + "TagKeys":{ + "type":"list", + "member":{"shape":"TagKey"} + }, + "TagValue":{ + "type":"string", + "max":256 + }, + "Tags":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "Tape":{ + "type":"structure", + "members":{ + "TapeARN":{"shape":"TapeARN"}, + "TapeBarcode":{"shape":"TapeBarcode"}, + "TapeSizeInBytes":{"shape":"TapeSize"}, + "TapeStatus":{"shape":"TapeStatus"}, + "VTLDevice":{"shape":"VTLDeviceARN"}, + "Progress":{"shape":"DoubleObject"} + } + }, + "TapeARN":{ + "type":"string", + "min":50, + "max":500 + }, + "TapeARNs":{ + "type":"list", + "member":{"shape":"TapeARN"} + }, + "TapeArchive":{ + "type":"structure", + "members":{ + "TapeARN":{"shape":"TapeARN"}, + "TapeBarcode":{"shape":"TapeBarcode"}, + "TapeSizeInBytes":{"shape":"TapeSize"}, + "CompletionTime":{"shape":"Time"}, + "RetrievedTo":{"shape":"GatewayARN"}, + "TapeStatus":{"shape":"TapeArchiveStatus"} + } + }, + "TapeArchiveStatus":{"type":"string"}, + "TapeArchives":{ + "type":"list", + "member":{"shape":"TapeArchive"} + }, + "TapeBarcode":{ + "type":"string", + "min":7, + "max":16, + "pattern":"^[A-Z0-9]*$" + }, + "TapeBarcodePrefix":{ + "type":"string", + "min":1, + "max":4, + "pattern":"^[A-Z]*$" + }, + "TapeDriveType":{ + "type":"string", + "min":2, + "max":50 + }, + "TapeRecoveryPointInfo":{ + "type":"structure", + "members":{ + "TapeARN":{"shape":"TapeARN"}, + "TapeRecoveryPointTime":{"shape":"Time"}, + "TapeSizeInBytes":{"shape":"TapeSize"}, + "TapeStatus":{"shape":"TapeRecoveryPointStatus"} + } + }, + "TapeRecoveryPointInfos":{ + "type":"list", + "member":{"shape":"TapeRecoveryPointInfo"} + }, + "TapeRecoveryPointStatus":{"type":"string"}, + "TapeSize":{"type":"long"}, + "TapeStatus":{"type":"string"}, + "Tapes":{ + "type":"list", + "member":{"shape":"Tape"} + }, + "TargetARN":{ + "type":"string", + "min":50, + "max":800 + }, + "TargetName":{ + "type":"string", + "min":1, + "max":200, + "pattern":"^[-\\.;a-z0-9]+$" + }, + "Time":{"type":"timestamp"}, + "UpdateBandwidthRateLimitInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "AverageUploadRateLimitInBitsPerSec":{"shape":"BandwidthUploadRateLimit"}, + "AverageDownloadRateLimitInBitsPerSec":{"shape":"BandwidthDownloadRateLimit"} + } + }, + "UpdateBandwidthRateLimitOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "UpdateChapCredentialsInput":{ + "type":"structure", + "required":[ + "TargetARN", + "SecretToAuthenticateInitiator", + "InitiatorName" + ], + "members":{ + "TargetARN":{"shape":"TargetARN"}, + "SecretToAuthenticateInitiator":{"shape":"ChapSecret"}, + "InitiatorName":{"shape":"IqnName"}, + "SecretToAuthenticateTarget":{"shape":"ChapSecret"} + } + }, + "UpdateChapCredentialsOutput":{ + "type":"structure", + "members":{ + "TargetARN":{"shape":"TargetARN"}, + "InitiatorName":{"shape":"IqnName"} + } + }, + "UpdateGatewayInformationInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "GatewayName":{"shape":"GatewayName"}, + "GatewayTimezone":{"shape":"GatewayTimezone"} + } + }, + "UpdateGatewayInformationOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "GatewayName":{"shape":"string"} + } + }, + "UpdateGatewaySoftwareNowInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "UpdateGatewaySoftwareNowOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "UpdateMaintenanceStartTimeInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "HourOfDay", + "MinuteOfHour", + "DayOfWeek" + ], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "HourOfDay":{"shape":"HourOfDay"}, + "MinuteOfHour":{"shape":"MinuteOfHour"}, + "DayOfWeek":{"shape":"DayOfWeek"} + } + }, + "UpdateMaintenanceStartTimeOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "UpdateSnapshotScheduleInput":{ + "type":"structure", + "required":[ + "VolumeARN", + "StartAt", + "RecurrenceInHours" + ], + "members":{ + "VolumeARN":{"shape":"VolumeARN"}, + "StartAt":{"shape":"HourOfDay"}, + "RecurrenceInHours":{"shape":"RecurrenceInHours"}, + "Description":{"shape":"Description"} + } + }, + "UpdateSnapshotScheduleOutput":{ + "type":"structure", + "members":{ + "VolumeARN":{"shape":"VolumeARN"} + } + }, + "UpdateVTLDeviceTypeInput":{ + "type":"structure", + "required":[ + "VTLDeviceARN", + "DeviceType" + ], + "members":{ + "VTLDeviceARN":{"shape":"VTLDeviceARN"}, + "DeviceType":{"shape":"DeviceType"} + } + }, + "UpdateVTLDeviceTypeOutput":{ + "type":"structure", + "members":{ + "VTLDeviceARN":{"shape":"VTLDeviceARN"} + } + }, + "VTLDevice":{ + "type":"structure", + "members":{ + "VTLDeviceARN":{"shape":"VTLDeviceARN"}, + "VTLDeviceType":{"shape":"VTLDeviceType"}, + "VTLDeviceVendor":{"shape":"VTLDeviceVendor"}, + "VTLDeviceProductIdentifier":{"shape":"VTLDeviceProductIdentifier"}, + "DeviceiSCSIAttributes":{"shape":"DeviceiSCSIAttributes"} + } + }, + "VTLDeviceARN":{ + "type":"string", + "min":50, + "max":500 + }, + "VTLDeviceARNs":{ + "type":"list", + "member":{"shape":"VTLDeviceARN"} + }, + "VTLDeviceProductIdentifier":{"type":"string"}, + "VTLDeviceType":{"type":"string"}, + "VTLDeviceVendor":{"type":"string"}, + "VTLDevices":{ + "type":"list", + "member":{"shape":"VTLDevice"} + }, + "VolumeARN":{ + "type":"string", + "min":50, + "max":500 + }, + "VolumeARNs":{ + "type":"list", + "member":{"shape":"VolumeARN"} + }, + "VolumeId":{ + "type":"string", + "min":12, + "max":30 + }, + "VolumeInfo":{ + "type":"structure", + "members":{ + "VolumeARN":{"shape":"VolumeARN"}, + "VolumeType":{"shape":"VolumeType"} + } + }, + "VolumeInfos":{ + "type":"list", + "member":{"shape":"VolumeInfo"} + }, + "VolumeRecoveryPointInfo":{ + "type":"structure", + "members":{ + "VolumeARN":{"shape":"VolumeARN"}, + "VolumeSizeInBytes":{"shape":"long"}, + "VolumeUsageInBytes":{"shape":"long"}, + "VolumeRecoveryPointTime":{"shape":"string"} + } + }, + "VolumeRecoveryPointInfos":{ + "type":"list", + "member":{"shape":"VolumeRecoveryPointInfo"} + }, + "VolumeStatus":{ + "type":"string", + "min":3, + "max":50 + }, + "VolumeType":{ + "type":"string", + "min":3, + "max":100 + }, + "VolumeiSCSIAttributes":{ + "type":"structure", + "members":{ + "TargetARN":{"shape":"TargetARN"}, + "NetworkInterfaceId":{"shape":"NetworkInterfaceId"}, + "NetworkInterfacePort":{"shape":"integer"}, + "LunNumber":{"shape":"PositiveIntObject"}, + "ChapEnabled":{"shape":"boolean"} + } + }, + "boolean":{"type":"boolean"}, + "double":{"type":"double"}, + "errorDetails":{ + "type":"map", + "key":{"shape":"string"}, + "value":{"shape":"string"} + }, + "integer":{"type":"integer"}, + "long":{"type":"long"}, + "string":{"type":"string"} + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/storagegateway/2013-06-30/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/storagegateway/2013-06-30/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/storagegateway/2013-06-30/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/storagegateway/2013-06-30/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1413 @@ +{ + "version": "2.0", + "operations": { + "ActivateGateway": "

    This operation activates the gateway you previously deployed on your host. For more information, see Activate the AWS Storage Gateway. In the activation process, you specify information such as the region you want to use for storing snapshots, the time zone for scheduled snapshots the gateway snapshot schedule window, an activation key, and a name for your gateway. The activation process also associates your gateway with your account; for more information, see UpdateGatewayInformation.

    You must turn on the gateway VM before you can activate your gateway.", + "AddCache": "

    This operation configures one or more gateway local disks as cache for a cached-volume gateway. This operation is supported only for the gateway-cached volume architecture (see Storage Gateway Concepts).

    In the request, you specify the gateway Amazon Resource Name (ARN) to which you want to add cache, and one or more disk IDs that you want to configure as cache.

    ", + "AddTagsToResource": "

    This operation adds one or more tags to the specified resource. You use tags to add metadata to resources, which you can use to categorize these resources. For example, you can categorize resources by purpose, owner, environment, or team. Each tag consists of a key and a value, which you define. You can add tags to the following AWS Storage Gateway resources:

    • Storage gateways of all types

    • Storage Volumes

    • Virtual Tapes

    You can create a maximum of 10 tags for each resource. Virtual tapes and storage volumes that are recovered to a new gateway maintain their tags.

    ", + "AddUploadBuffer": "

    This operation configures one or more gateway local disks as upload buffer for a specified gateway. This operation is supported for both the gateway-stored and gateway-cached volume architectures.

    In the request, you specify the gateway Amazon Resource Name (ARN) to which you want to add upload buffer, and one or more disk IDs that you want to configure as upload buffer.

    ", + "AddWorkingStorage": "

    This operation configures one or more gateway local disks as working storage for a gateway. This operation is supported only for the gateway-stored volume architecture. This operation is deprecated method in cached-volumes API version (20120630). Use AddUploadBuffer instead.

    Working storage is also referred to as upload buffer. You can also use the AddUploadBuffer operation to add upload buffer to a stored-volume gateway.

    In the request, you specify the gateway Amazon Resource Name (ARN) to which you want to add working storage, and one or more disk IDs that you want to configure as working storage.

    ", + "CancelArchival": "

    Cancels archiving of a virtual tape to the virtual tape shelf (VTS) after the archiving process is initiated.

    ", + "CancelRetrieval": "

    Cancels retrieval of a virtual tape from the virtual tape shelf (VTS) to a gateway after the retrieval process is initiated. The virtual tape is returned to the VTS.

    ", + "CreateCachediSCSIVolume": "

    This operation creates a cached volume on a specified cached gateway. This operation is supported only for the gateway-cached volume architecture.

    Cache storage must be allocated to the gateway before you can create a cached volume. Use the AddCache operation to add cache storage to a gateway.

    In the request, you must specify the gateway, size of the volume in bytes, the iSCSI target name, an IP address on which to expose the target, and a unique client token. In response, AWS Storage Gateway creates the volume and returns information about it such as the volume Amazon Resource Name (ARN), its size, and the iSCSI target ARN that initiators can use to connect to the volume target.

    ", + "CreateSnapshot": "

    This operation initiates a snapshot of a volume.

    AWS Storage Gateway provides the ability to back up point-in-time snapshots of your data to Amazon Simple Storage (S3) for durable off-site recovery, as well as import the data to an Amazon Elastic Block Store (EBS) volume in Amazon Elastic Compute Cloud (EC2). You can take snapshots of your gateway volume on a scheduled or ad-hoc basis. This API enables you to take ad-hoc snapshot. For more information, see Working With Snapshots in the AWS Storage Gateway Console.

    In the CreateSnapshot request you identify the volume by providing its Amazon Resource Name (ARN). You must also provide description for the snapshot. When AWS Storage Gateway takes the snapshot of specified volume, the snapshot and description appears in the AWS Storage Gateway Console. In response, AWS Storage Gateway returns you a snapshot ID. You can use this snapshot ID to check the snapshot progress or later use it when you want to create a volume from a snapshot.

    To list or delete a snapshot, you must use the Amazon EC2 API. For more information, see DescribeSnapshots or DeleteSnapshot in the EC2 API reference.", + "CreateSnapshotFromVolumeRecoveryPoint": "

    This operation initiates a snapshot of a gateway from a volume recovery point. This operation is supported only for the gateway-cached volume architecture (see ).

    A volume recovery point is a point in time at which all data of the volume is consistent and from which you can create a snapshot. To get a list of volume recovery point for gateway-cached volumes, use ListVolumeRecoveryPoints.

    In the CreateSnapshotFromVolumeRecoveryPoint request, you identify the volume by providing its Amazon Resource Name (ARN). You must also provide a description for the snapshot. When AWS Storage Gateway takes a snapshot of the specified volume, the snapshot and its description appear in the AWS Storage Gateway console. In response, AWS Storage Gateway returns you a snapshot ID. You can use this snapshot ID to check the snapshot progress or later use it when you want to create a volume from a snapshot.

    To list or delete a snapshot, you must use the Amazon EC2 API. For more information, in Amazon Elastic Compute Cloud API Reference.

    ", + "CreateStorediSCSIVolume": "

    This operation creates a volume on a specified gateway. This operation is supported only for the gateway-stored volume architecture.

    The size of the volume to create is inferred from the disk size. You can choose to preserve existing data on the disk, create volume from an existing snapshot, or create an empty volume. If you choose to create an empty gateway volume, then any existing data on the disk is erased.

    In the request you must specify the gateway and the disk information on which you are creating the volume. In response, AWS Storage Gateway creates the volume and returns volume information such as the volume Amazon Resource Name (ARN), its size, and the iSCSI target ARN that initiators can use to connect to the volume target.

    ", + "CreateTapes": "

    Creates one or more virtual tapes. You write data to the virtual tapes and then archive the tapes.

    Cache storage must be allocated to the gateway before you can create virtual tapes. Use the AddCache operation to add cache storage to a gateway. ", + "DeleteBandwidthRateLimit": "

    This operation deletes the bandwidth rate limits of a gateway. You can delete either the upload and download bandwidth rate limit, or you can delete both. If you delete only one of the limits, the other limit remains unchanged. To specify which gateway to work with, use the Amazon Resource Name (ARN) of the gateway in your request.

    ", + "DeleteChapCredentials": "

    This operation deletes Challenge-Handshake Authentication Protocol (CHAP) credentials for a specified iSCSI target and initiator pair.

    ", + "DeleteGateway": "

    This operation deletes a gateway. To specify which gateway to delete, use the Amazon Resource Name (ARN) of the gateway in your request. The operation deletes the gateway; however, it does not delete the gateway virtual machine (VM) from your host computer.

    After you delete a gateway, you cannot reactivate it. Completed snapshots of the gateway volumes are not deleted upon deleting the gateway, however, pending snapshots will not complete. After you delete a gateway, your next step is to remove it from your environment.

    You no longer pay software charges after the gateway is deleted; however, your existing Amazon EBS snapshots persist and you will continue to be billed for these snapshots. You can choose to remove all remaining Amazon EBS snapshots by canceling your Amazon EC2 subscription.  If you prefer not to cancel your Amazon EC2 subscription, you can delete your snapshots using the Amazon EC2 console. For more information, see the AWS Storage Gateway Detail Page.

    ", + "DeleteSnapshotSchedule": "

    This operation deletes a snapshot of a volume.

    You can take snapshots of your gateway volumes on a scheduled or ad-hoc basis. This API enables you to delete a snapshot schedule for a volume. For more information, see Working with Snapshots. In the DeleteSnapshotSchedule request, you identify the volume by providing its Amazon Resource Name (ARN).

    To list or delete a snapshot, you must use the Amazon EC2 API. in Amazon Elastic Compute Cloud API Reference.

    ", + "DeleteTape": "

    Deletes the specified virtual tape.

    ", + "DeleteTapeArchive": "

    Deletes the specified virtual tape from the virtual tape shelf (VTS).

    ", + "DeleteVolume": "

    This operation deletes the specified gateway volume that you previously created using the CreateCachediSCSIVolume or CreateStorediSCSIVolume API. For gateway-stored volumes, the local disk that was configured as the storage volume is not deleted. You can reuse the local disk to create another storage volume.

    Before you delete a gateway volume, make sure there are no iSCSI connections to the volume you are deleting. You should also make sure there is no snapshot in progress. You can use the Amazon Elastic Compute Cloud (Amazon EC2) API to query snapshots on the volume you are deleting and check the snapshot status. For more information, go to DescribeSnapshots in the Amazon Elastic Compute Cloud API Reference.

    In the request, you must provide the Amazon Resource Name (ARN) of the storage volume you want to delete.

    ", + "DescribeBandwidthRateLimit": "

    This operation returns the bandwidth rate limits of a gateway. By default, these limits are not set, which means no bandwidth rate limiting is in effect.

    This operation only returns a value for a bandwidth rate limit only if the limit is set. If no limits are set for the gateway, then this operation returns only the gateway ARN in the response body. To specify which gateway to describe, use the Amazon Resource Name (ARN) of the gateway in your request.

    ", + "DescribeCache": "

    This operation returns information about the cache of a gateway. This operation is supported only for the gateway-cached volume architecture.

    The response includes disk IDs that are configured as cache, and it includes the amount of cache allocated and used.

    ", + "DescribeCachediSCSIVolumes": "

    This operation returns a description of the gateway volumes specified in the request. This operation is supported only for the gateway-cached volume architecture.

    The list of gateway volumes in the request must be from one gateway. In the response Amazon Storage Gateway returns volume information sorted by volume Amazon Resource Name (ARN).

    ", + "DescribeChapCredentials": "

    This operation returns an array of Challenge-Handshake Authentication Protocol (CHAP) credentials information for a specified iSCSI target, one for each target-initiator pair.

    ", + "DescribeGatewayInformation": "

    This operation returns metadata about a gateway such as its name, network interfaces, configured time zone, and the state (whether the gateway is running or not). To specify which gateway to describe, use the Amazon Resource Name (ARN) of the gateway in your request.

    ", + "DescribeMaintenanceStartTime": "

    This operation returns your gateway's weekly maintenance start time including the day and time of the week. Note that values are in terms of the gateway's time zone.

    ", + "DescribeSnapshotSchedule": "

    This operation describes the snapshot schedule for the specified gateway volume. The snapshot schedule information includes intervals at which snapshots are automatically initiated on the volume.

    ", + "DescribeStorediSCSIVolumes": "

    This operation returns the description of the gateway volumes specified in the request. The list of gateway volumes in the request must be from one gateway. In the response Amazon Storage Gateway returns volume information sorted by volume ARNs.

    ", + "DescribeTapeArchives": "

    Returns a description of specified virtual tapes in the virtual tape shelf (VTS).

    If a specific TapeARN is not specified, AWS Storage Gateway returns a description of all virtual tapes found in the VTS associated with your account.

    ", + "DescribeTapeRecoveryPoints": "

    Returns a list of virtual tape recovery points that are available for the specified gateway-VTL.

    A recovery point is a point in time view of a virtual tape at which all the data on the virtual tape is consistent. If your gateway crashes, virtual tapes that have recovery points can be recovered to a new gateway.

    ", + "DescribeTapes": "

    Returns a description of the specified Amazon Resource Name (ARN) of virtual tapes. If a TapeARN is not specified, returns a description of all virtual tapes associated with the specified gateway.

    ", + "DescribeUploadBuffer": "

    This operation returns information about the upload buffer of a gateway. This operation is supported for both the gateway-stored and gateway-cached volume architectures.

    The response includes disk IDs that are configured as upload buffer space, and it includes the amount of upload buffer space allocated and used.

    ", + "DescribeVTLDevices": "

    Returns a description of virtual tape library (VTL) devices for the specified gateway. In the response, AWS Storage Gateway returns VTL device information.

    The list of VTL devices must be from one gateway.

    ", + "DescribeWorkingStorage": "

    This operation returns information about the working storage of a gateway. This operation is supported only for the gateway-stored volume architecture. This operation is deprecated in cached-volumes API version (20120630). Use DescribeUploadBuffer instead.

    Working storage is also referred to as upload buffer. You can also use the DescribeUploadBuffer operation to add upload buffer to a stored-volume gateway.

    The response includes disk IDs that are configured as working storage, and it includes the amount of working storage allocated and used.

    ", + "DisableGateway": "

    Disables a gateway when the gateway is no longer functioning. For example, if your gateway VM is damaged, you can disable the gateway so you can recover virtual tapes.

    Use this operation for a gateway-VTL that is not reachable or not functioning.

    Once a gateway is disabled it cannot be enabled.", + "ListGateways": "

    This operation lists gateways owned by an AWS account in a region specified in the request. The returned list is ordered by gateway Amazon Resource Name (ARN).

    By default, the operation returns a maximum of 100 gateways. This operation supports pagination that allows you to optionally reduce the number of gateways returned in a response.

    If you have more gateways than are returned in a response-that is, the response returns only a truncated list of your gateways-the response contains a marker that you can specify in your next request to fetch the next page of gateways.

    ", + "ListLocalDisks": "

    This operation returns a list of the gateway's local disks. To specify which gateway to describe, you use the Amazon Resource Name (ARN) of the gateway in the body of the request.

    The request returns a list of all disks, specifying which are configured as working storage, cache storage, or stored volume or not configured at all. The response includes a DiskStatus field. This field can have a value of present (the disk is available to use), missing (the disk is no longer connected to the gateway), or mismatch (the disk node is occupied by a disk that has incorrect metadata or the disk content is corrupted).

    ", + "ListTagsForResource": "

    This operation lists the tags that have been added to the specified resource.

    ", + "ListVolumeInitiators": "

    This operation lists iSCSI initiators that are connected to a volume. You can use this operation to determine whether a volume is being used or not.

    ", + "ListVolumeRecoveryPoints": "

    This operation lists the recovery points for a specified gateway. This operation is supported only for the gateway-cached volume architecture.

    Each gateway-cached volume has one recovery point. A volume recovery point is a point in time at which all data of the volume is consistent and from which you can create a snapshot. To create a snapshot from a volume recovery point use the CreateSnapshotFromVolumeRecoveryPoint operation.

    ", + "ListVolumes": "

    This operation lists the iSCSI stored volumes of a gateway. Results are sorted by volume ARN. The response includes only the volume ARNs. If you want additional volume information, use the DescribeStorediSCSIVolumes API.

    The operation supports pagination. By default, the operation returns a maximum of up to 100 volumes. You can optionally specify the Limit field in the body to limit the number of volumes in the response. If the number of volumes returned in the response is truncated, the response includes a Marker field. You can use this Marker value in your subsequent request to retrieve the next set of volumes.

    ", + "RemoveTagsFromResource": "

    This operation removes one or more tags from the specified resource.

    ", + "ResetCache": "

    This operation resets all cache disks that have encountered a error and makes the disks available for reconfiguration as cache storage. If your cache disk encounters a error, the gateway prevents read and write operations on virtual tapes in the gateway. For example, an error can occur when a disk is corrupted or removed from the gateway. When a cache is reset, the gateway loses its cache storage. At this point you can reconfigure the disks as cache disks.

    If the cache disk you are resetting contains data that has not been uploaded to Amazon S3 yet, that data can be lost. After you reset cache disks, there will be no configured cache disks left in the gateway, so you must configure at least one new cache disk for your gateway to function properly.

    ", + "RetrieveTapeArchive": "

    Retrieves an archived virtual tape from the virtual tape shelf (VTS) to a gateway-VTL. Virtual tapes archived in the VTS are not associated with any gateway. However after a tape is retrieved, it is associated with a gateway, even though it is also listed in the VTS.

    Once a tape is successfully retrieved to a gateway, it cannot be retrieved again to another gateway. You must archive the tape again before you can retrieve it to another gateway.

    ", + "RetrieveTapeRecoveryPoint": "

    Retrieves the recovery point for the specified virtual tape.

    A recovery point is a point in time view of a virtual tape at which all the data on the tape is consistent. If your gateway crashes, virtual tapes that have recovery points can be recovered to a new gateway.

    The virtual tape can be retrieved to only one gateway. The retrieved tape is read-only. The virtual tape can be retrieved to only a gateway-VTL. There is no charge for retrieving recovery points.", + "ShutdownGateway": "

    This operation shuts down a gateway. To specify which gateway to shut down, use the Amazon Resource Name (ARN) of the gateway in the body of your request.

    The operation shuts down the gateway service component running in the storage gateway's virtual machine (VM) and not the VM.

    If you want to shut down the VM, it is recommended that you first shut down the gateway component in the VM to avoid unpredictable conditions.

    After the gateway is shutdown, you cannot call any other API except StartGateway, DescribeGatewayInformation, and ListGateways. For more information, see ActivateGateway. Your applications cannot read from or write to the gateway's storage volumes, and there are no snapshots taken.

    When you make a shutdown request, you will get a 200 OK success response immediately. However, it might take some time for the gateway to shut down. You can call the DescribeGatewayInformation API to check the status. For more information, see ActivateGateway.

    If do not intend to use the gateway again, you must delete the gateway (using DeleteGateway) to no longer pay software charges associated with the gateway.

    ", + "StartGateway": "

    This operation starts a gateway that you previously shut down (see ShutdownGateway). After the gateway starts, you can then make other API calls, your applications can read from or write to the gateway's storage volumes and you will be able to take snapshot backups.

    When you make a request, you will get a 200 OK success response immediately. However, it might take some time for the gateway to be ready. You should call DescribeGatewayInformation and check the status before making any additional API calls. For more information, see ActivateGateway.

    To specify which gateway to start, use the Amazon Resource Name (ARN) of the gateway in your request.

    ", + "UpdateBandwidthRateLimit": "

    This operation updates the bandwidth rate limits of a gateway. You can update both the upload and download bandwidth rate limit or specify only one of the two. If you don't set a bandwidth rate limit, the existing rate limit remains.

    By default, a gateway's bandwidth rate limits are not set. If you don't set any limit, the gateway does not have any limitations on its bandwidth usage and could potentially use the maximum available bandwidth.

    To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in your request.

    ", + "UpdateChapCredentials": "

    This operation updates the Challenge-Handshake Authentication Protocol (CHAP) credentials for a specified iSCSI target. By default, a gateway does not have CHAP enabled; however, for added security, you might use it.

    When you update CHAP credentials, all existing connections on the target are closed and initiators must reconnect with the new credentials.

    ", + "UpdateGatewayInformation": "

    This operation updates a gateway's metadata, which includes the gateway's name and time zone. To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in your request.

    ", + "UpdateGatewaySoftwareNow": "

    This operation updates the gateway virtual machine (VM) software. The request immediately triggers the software update.

    When you make this request, you get a 200 OK success response immediately. However, it might take some time for the update to complete. You can call DescribeGatewayInformation to verify the gateway is in the STATE_RUNNING state. A software update forces a system restart of your gateway. You can minimize the chance of any disruption to your applications by increasing your iSCSI Initiators' timeouts. For more information about increasing iSCSI Initiator timeouts for Windows and Linux, see Customizing Your Windows iSCSI Settings and Customizing Your Linux iSCSI Settings, respectively.", + "UpdateMaintenanceStartTime": "

    This operation updates a gateway's weekly maintenance start time information, including day and time of the week. The maintenance time is the time in your gateway's time zone.

    ", + "UpdateSnapshotSchedule": "

    This operation updates a snapshot schedule configured for a gateway volume.

    The default snapshot schedule for volume is once every 24 hours, starting at the creation time of the volume. You can use this API to change the snapshot schedule configured for the volume.

    In the request you must identify the gateway volume whose snapshot schedule you want to update, and the schedule information, including when you want the snapshot to begin on a day and the frequency (in hours) of snapshots.

    ", + "UpdateVTLDeviceType": "

    This operation updates the type of medium changer in a gateway-VTL. When you activate a gateway-VTL, you select a medium changer type for the gateway-VTL. This operation enables you to select a different type of medium changer after a gateway-VTL is activated.

    " + }, + "service": "AWS Storage Gateway Service

    AWS Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and AWS's storage infrastructure. The service enables you to securely upload data to the AWS cloud for cost effective backup and rapid disaster recovery.

    Use the following links to get started using the AWS Storage Gateway Service API Reference:

    ", + "shapes": { + "ActivateGatewayInput": { + "base": "

    A JSON object containing one or more of the following fields:

    ", + "refs": { + } + }, + "ActivateGatewayOutput": { + "base": "

    AWS Storage Gateway returns the Amazon Resource Name (ARN) of the activated gateway. It is a string made of information such as your account, gateway name, and region. This ARN is used to reference the gateway in other API operations as well as resource-based authorization.

    ", + "refs": { + } + }, + "ActivationKey": { + "base": null, + "refs": { + "ActivateGatewayInput$ActivationKey": "

    Your gateway activation key. You can obtain the activation key by sending an HTTP GET request with redirects enabled to the gateway IP address (port 80). The redirect URL returned in the response provides you the activation key for your gateway in the query string parameter activationKey. It may also include other activation-related parameters, however, these are merely defaults -- the arguments you pass to the ActivateGateway API call determine the actual configuration of your gateway.

    " + } + }, + "AddCacheInput": { + "base": null, + "refs": { + } + }, + "AddCacheOutput": { + "base": null, + "refs": { + } + }, + "AddTagsToResourceInput": { + "base": "

    AddTagsToResourceInput

    ", + "refs": { + } + }, + "AddTagsToResourceOutput": { + "base": "

    AddTagsToResourceOutput

    ", + "refs": { + } + }, + "AddUploadBufferInput": { + "base": null, + "refs": { + } + }, + "AddUploadBufferOutput": { + "base": null, + "refs": { + } + }, + "AddWorkingStorageInput": { + "base": "

    A JSON object containing one or more of the following fields:

    ", + "refs": { + } + }, + "AddWorkingStorageOutput": { + "base": "

    A JSON object containing the of the gateway for which working storage was configured.

    ", + "refs": { + } + }, + "BandwidthDownloadRateLimit": { + "base": null, + "refs": { + "DescribeBandwidthRateLimitOutput$AverageDownloadRateLimitInBitsPerSec": "

    The average download bandwidth rate limit in bits per second. This field does not appear in the response if the download rate limit is not set.

    ", + "UpdateBandwidthRateLimitInput$AverageDownloadRateLimitInBitsPerSec": "

    The average download bandwidth rate limit in bits per second.

    " + } + }, + "BandwidthType": { + "base": null, + "refs": { + "DeleteBandwidthRateLimitInput$BandwidthType": null + } + }, + "BandwidthUploadRateLimit": { + "base": null, + "refs": { + "DescribeBandwidthRateLimitOutput$AverageUploadRateLimitInBitsPerSec": "

    The average upload bandwidth rate limit in bits per second. This field does not appear in the response if the upload rate limit is not set.

    ", + "UpdateBandwidthRateLimitInput$AverageUploadRateLimitInBitsPerSec": "

    The average upload bandwidth rate limit in bits per second.

    " + } + }, + "CachediSCSIVolume": { + "base": null, + "refs": { + "CachediSCSIVolumes$member": null + } + }, + "CachediSCSIVolumes": { + "base": null, + "refs": { + "DescribeCachediSCSIVolumesOutput$CachediSCSIVolumes": "

    An array of objects where each object contains metadata about one cached volume.

    " + } + }, + "CancelArchivalInput": { + "base": "

    CancelArchivalInput

    ", + "refs": { + } + }, + "CancelArchivalOutput": { + "base": "

    CancelArchivalOutput

    ", + "refs": { + } + }, + "CancelRetrievalInput": { + "base": "

    CancelRetrievalInput

    ", + "refs": { + } + }, + "CancelRetrievalOutput": { + "base": "

    CancelRetrievalOutput

    ", + "refs": { + } + }, + "ChapCredentials": { + "base": null, + "refs": { + "DescribeChapCredentialsOutput$ChapCredentials": "

    An array of ChapInfo objects that represent CHAP credentials. Each object in the array contains CHAP credential information for one target-initiator pair. If no CHAP credentials are set, an empty array is returned. CHAP credential information is provided in a JSON object with the following fields:

    • InitiatorName: The iSCSI initiator that connects to the target.

    • SecretToAuthenticateInitiator: The secret key that the initiator (for example, the Windows client) must provide to participate in mutual CHAP with the target.

    • SecretToAuthenticateTarget: The secret key that the target must provide to participate in mutual CHAP with the initiator (e.g. Windows client).

    • TargetARN: The Amazon Resource Name (ARN) of the storage volume.

    " + } + }, + "ChapInfo": { + "base": "

    Describes Challenge-Handshake Authentication Protocol (CHAP) information that supports authentication between your gateway and iSCSI initiators.

    ", + "refs": { + "ChapCredentials$member": null + } + }, + "ChapSecret": { + "base": null, + "refs": { + "ChapInfo$SecretToAuthenticateInitiator": "

    The secret key that the initiator (for example, the Windows client) must provide to participate in mutual CHAP with the target.

    ", + "ChapInfo$SecretToAuthenticateTarget": "

    The secret key that the target must provide to participate in mutual CHAP with the initiator (e.g. Windows client).

    ", + "UpdateChapCredentialsInput$SecretToAuthenticateInitiator": "

    The secret key that the initiator (for example, the Windows client) must provide to participate in mutual CHAP with the target.

    The secret key must be between 12 and 16 bytes when encoded in UTF-8.", + "UpdateChapCredentialsInput$SecretToAuthenticateTarget": "

    The secret key that the target must provide to participate in mutual CHAP with the initiator (e.g. Windows client).

    Byte constraints: Minimum bytes of 12. Maximum bytes of 16.

    The secret key must be between 12 and 16 bytes when encoded in UTF-8." + } + }, + "ClientToken": { + "base": null, + "refs": { + "CreateCachediSCSIVolumeInput$ClientToken": null, + "CreateTapesInput$ClientToken": "

    A unique identifier that you use to retry a request. If you retry a request, use the same ClientToken you specified in the initial request.

    Using the same ClientToken prevents creating the tape multiple times." + } + }, + "CreateCachediSCSIVolumeInput": { + "base": null, + "refs": { + } + }, + "CreateCachediSCSIVolumeOutput": { + "base": null, + "refs": { + } + }, + "CreateSnapshotFromVolumeRecoveryPointInput": { + "base": null, + "refs": { + } + }, + "CreateSnapshotFromVolumeRecoveryPointOutput": { + "base": null, + "refs": { + } + }, + "CreateSnapshotInput": { + "base": "

    A JSON object containing one or more of the following fields:

    ", + "refs": { + } + }, + "CreateSnapshotOutput": { + "base": "

    A JSON object containing the following fields:

    ", + "refs": { + } + }, + "CreateStorediSCSIVolumeInput": { + "base": "

    A JSON object containing one or more of the following fields:

    ", + "refs": { + } + }, + "CreateStorediSCSIVolumeOutput": { + "base": "

    A JSON object containing the following fields:

    ", + "refs": { + } + }, + "CreateTapesInput": { + "base": "

    CreateTapesInput

    ", + "refs": { + } + }, + "CreateTapesOutput": { + "base": "

    CreateTapeOutput

    ", + "refs": { + } + }, + "DayOfWeek": { + "base": null, + "refs": { + "DescribeMaintenanceStartTimeOutput$DayOfWeek": null, + "UpdateMaintenanceStartTimeInput$DayOfWeek": "

    The maintenance start time day of the week.

    " + } + }, + "DeleteBandwidthRateLimitInput": { + "base": null, + "refs": { + } + }, + "DeleteBandwidthRateLimitOutput": { + "base": "

    A JSON object containing the of the gateway whose bandwidth rate information was deleted.

    ", + "refs": { + } + }, + "DeleteChapCredentialsInput": { + "base": "

    A JSON object containing one or more of the following fields:

    ", + "refs": { + } + }, + "DeleteChapCredentialsOutput": { + "base": "

    A JSON object containing the following fields:

    ", + "refs": { + } + }, + "DeleteGatewayInput": { + "base": "

    A JSON object containing the id of the gateway to delete.

    ", + "refs": { + } + }, + "DeleteGatewayOutput": { + "base": "

    A JSON object containing the id of the deleted gateway.

    ", + "refs": { + } + }, + "DeleteSnapshotScheduleInput": { + "base": null, + "refs": { + } + }, + "DeleteSnapshotScheduleOutput": { + "base": null, + "refs": { + } + }, + "DeleteTapeArchiveInput": { + "base": "

    DeleteTapeArchiveInput

    ", + "refs": { + } + }, + "DeleteTapeArchiveOutput": { + "base": "

    DeleteTapeArchiveOutput

    ", + "refs": { + } + }, + "DeleteTapeInput": { + "base": "

    DeleteTapeInput

    ", + "refs": { + } + }, + "DeleteTapeOutput": { + "base": "

    DeleteTapeOutput

    ", + "refs": { + } + }, + "DeleteVolumeInput": { + "base": "

    A JSON object containing the DeleteVolumeInput$VolumeARN to delete.

    ", + "refs": { + } + }, + "DeleteVolumeOutput": { + "base": "

    A JSON object containing the of the storage volume that was deleted

    ", + "refs": { + } + }, + "DescribeBandwidthRateLimitInput": { + "base": "

    A JSON object containing the of the gateway.

    ", + "refs": { + } + }, + "DescribeBandwidthRateLimitOutput": { + "base": "

    A JSON object containing the following fields:

    ", + "refs": { + } + }, + "DescribeCacheInput": { + "base": null, + "refs": { + } + }, + "DescribeCacheOutput": { + "base": null, + "refs": { + } + }, + "DescribeCachediSCSIVolumesInput": { + "base": null, + "refs": { + } + }, + "DescribeCachediSCSIVolumesOutput": { + "base": "

    A JSON object containing the following fields:

    ", + "refs": { + } + }, + "DescribeChapCredentialsInput": { + "base": "

    A JSON object containing the Amazon Resource Name (ARN) of the iSCSI volume target.

    ", + "refs": { + } + }, + "DescribeChapCredentialsOutput": { + "base": "

    A JSON object containing a .

    ", + "refs": { + } + }, + "DescribeGatewayInformationInput": { + "base": "

    A JSON object containing the id of the gateway.

    ", + "refs": { + } + }, + "DescribeGatewayInformationOutput": { + "base": "

    A JSON object containing the following fields:

    ", + "refs": { + } + }, + "DescribeMaintenanceStartTimeInput": { + "base": "

    A JSON object containing the of the gateway.

    ", + "refs": { + } + }, + "DescribeMaintenanceStartTimeOutput": { + "base": null, + "refs": { + } + }, + "DescribeSnapshotScheduleInput": { + "base": "

    A JSON object containing the DescribeSnapshotScheduleInput$VolumeARN of the volume.

    ", + "refs": { + } + }, + "DescribeSnapshotScheduleOutput": { + "base": null, + "refs": { + } + }, + "DescribeStorediSCSIVolumesInput": { + "base": "

    A JSON Object containing a list of DescribeStorediSCSIVolumesInput$VolumeARNs.

    ", + "refs": { + } + }, + "DescribeStorediSCSIVolumesOutput": { + "base": null, + "refs": { + } + }, + "DescribeTapeArchivesInput": { + "base": "

    DescribeTapeArchivesInput

    ", + "refs": { + } + }, + "DescribeTapeArchivesOutput": { + "base": "

    DescribeTapeArchivesOutput

    ", + "refs": { + } + }, + "DescribeTapeRecoveryPointsInput": { + "base": "

    DescribeTapeRecoveryPointsInput

    ", + "refs": { + } + }, + "DescribeTapeRecoveryPointsOutput": { + "base": "

    DescribeTapeRecoveryPointsOutput

    ", + "refs": { + } + }, + "DescribeTapesInput": { + "base": "

    DescribeTapesInput

    ", + "refs": { + } + }, + "DescribeTapesOutput": { + "base": "

    DescribeTapesOutput

    ", + "refs": { + } + }, + "DescribeUploadBufferInput": { + "base": null, + "refs": { + } + }, + "DescribeUploadBufferOutput": { + "base": null, + "refs": { + } + }, + "DescribeVTLDevicesInput": { + "base": "

    DescribeVTLDevicesInput

    ", + "refs": { + } + }, + "DescribeVTLDevicesOutput": { + "base": "

    DescribeVTLDevicesOutput

    ", + "refs": { + } + }, + "DescribeWorkingStorageInput": { + "base": "

    A JSON object containing the of the gateway.

    ", + "refs": { + } + }, + "DescribeWorkingStorageOutput": { + "base": "

    A JSON object containing the following fields:

    ", + "refs": { + } + }, + "Description": { + "base": null, + "refs": { + "DescribeSnapshotScheduleOutput$Description": null, + "UpdateSnapshotScheduleInput$Description": "

    Optional description of the snapshot that overwrites the existing description.

    " + } + }, + "DeviceType": { + "base": null, + "refs": { + "UpdateVTLDeviceTypeInput$DeviceType": "

    The type of medium changer you want to select.

    Valid Values: \"STK-L700\", \"AWS-Gateway-VTL\"

    " + } + }, + "DeviceiSCSIAttributes": { + "base": "

    Lists iSCSI information about a VTL device.

    ", + "refs": { + "VTLDevice$DeviceiSCSIAttributes": "

    A list of iSCSI information about a VTL device.

    " + } + }, + "DisableGatewayInput": { + "base": "

    DisableGatewayInput

    ", + "refs": { + } + }, + "DisableGatewayOutput": { + "base": "

    DisableGatewayOutput

    ", + "refs": { + } + }, + "Disk": { + "base": null, + "refs": { + "Disks$member": null + } + }, + "DiskAllocationType": { + "base": null, + "refs": { + "Disk$DiskAllocationType": null + } + }, + "DiskId": { + "base": null, + "refs": { + "CreateStorediSCSIVolumeInput$DiskId": "

    The unique identifier for the gateway local disk that is configured as a stored volume. Use ListLocalDisks to list disk IDs for a gateway.

    ", + "Disk$DiskId": null, + "DiskIds$member": null, + "StorediSCSIVolume$VolumeDiskId": null + } + }, + "DiskIds": { + "base": null, + "refs": { + "AddCacheInput$DiskIds": null, + "AddUploadBufferInput$DiskIds": null, + "AddWorkingStorageInput$DiskIds": "

    An array of strings that identify disks that are to be configured as working storage. Each string have a minimum length of 1 and maximum length of 300. You can get the disk IDs from the ListLocalDisks API.

    ", + "DescribeCacheOutput$DiskIds": null, + "DescribeUploadBufferOutput$DiskIds": null, + "DescribeWorkingStorageOutput$DiskIds": "

    An array of the gateway's local disk IDs that are configured as working storage. Each local disk ID is specified as a string (minimum length of 1 and maximum length of 300). If no local disks are configured as working storage, then the DiskIds array is empty.

    " + } + }, + "Disks": { + "base": null, + "refs": { + "ListLocalDisksOutput$Disks": null + } + }, + "DoubleObject": { + "base": null, + "refs": { + "CachediSCSIVolume$VolumeProgress": null, + "StorediSCSIVolume$VolumeProgress": null, + "Tape$Progress": "

    For archiving virtual tapes, indicates how much data remains to be uploaded before archiving is complete.

    Range: 0 (not started) to 100 (complete).

    " + } + }, + "ErrorCode": { + "base": null, + "refs": { + "StorageGatewayError$errorCode": "

    Additional information about the error.

    " + } + }, + "GatewayARN": { + "base": "

    The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.

    ", + "refs": { + "ActivateGatewayOutput$GatewayARN": null, + "AddCacheInput$GatewayARN": null, + "AddCacheOutput$GatewayARN": null, + "AddUploadBufferInput$GatewayARN": null, + "AddUploadBufferOutput$GatewayARN": null, + "AddWorkingStorageInput$GatewayARN": null, + "AddWorkingStorageOutput$GatewayARN": null, + "CancelArchivalInput$GatewayARN": null, + "CancelRetrievalInput$GatewayARN": null, + "CreateCachediSCSIVolumeInput$GatewayARN": null, + "CreateStorediSCSIVolumeInput$GatewayARN": null, + "CreateTapesInput$GatewayARN": "

    The unique Amazon Resource Name(ARN) that represents the gateway to associate the virtual tapes with. Use the ListGateways operation to return a list of gateways for your account and region.

    ", + "DeleteBandwidthRateLimitInput$GatewayARN": null, + "DeleteBandwidthRateLimitOutput$GatewayARN": null, + "DeleteGatewayInput$GatewayARN": null, + "DeleteGatewayOutput$GatewayARN": null, + "DeleteTapeInput$GatewayARN": "

    The unique Amazon Resource Name (ARN) of the gateway that the virtual tape to delete is associated with. Use the ListGateways operation to return a list of gateways for your account and region.

    ", + "DescribeBandwidthRateLimitInput$GatewayARN": null, + "DescribeBandwidthRateLimitOutput$GatewayARN": null, + "DescribeCacheInput$GatewayARN": null, + "DescribeCacheOutput$GatewayARN": null, + "DescribeGatewayInformationInput$GatewayARN": null, + "DescribeGatewayInformationOutput$GatewayARN": null, + "DescribeMaintenanceStartTimeInput$GatewayARN": null, + "DescribeMaintenanceStartTimeOutput$GatewayARN": null, + "DescribeTapeRecoveryPointsInput$GatewayARN": null, + "DescribeTapeRecoveryPointsOutput$GatewayARN": null, + "DescribeTapesInput$GatewayARN": null, + "DescribeUploadBufferInput$GatewayARN": null, + "DescribeUploadBufferOutput$GatewayARN": null, + "DescribeVTLDevicesInput$GatewayARN": null, + "DescribeVTLDevicesOutput$GatewayARN": null, + "DescribeWorkingStorageInput$GatewayARN": null, + "DescribeWorkingStorageOutput$GatewayARN": null, + "DisableGatewayInput$GatewayARN": null, + "DisableGatewayOutput$GatewayARN": "

    The unique Amazon Resource Name of the disabled gateway.

    ", + "GatewayInfo$GatewayARN": null, + "ListLocalDisksInput$GatewayARN": null, + "ListLocalDisksOutput$GatewayARN": null, + "ListVolumeRecoveryPointsInput$GatewayARN": null, + "ListVolumeRecoveryPointsOutput$GatewayARN": null, + "ListVolumesInput$GatewayARN": null, + "ListVolumesOutput$GatewayARN": null, + "ResetCacheInput$GatewayARN": null, + "ResetCacheOutput$GatewayARN": null, + "RetrieveTapeArchiveInput$GatewayARN": "

    The Amazon Resource Name (ARN) of the gateway you want to retrieve the virtual tape to. Use the ListGateways operation to return a list of gateways for your account and region.

    You retrieve archived virtual tapes to only one gateway and the gateway must be a gateway-VTL.

    ", + "RetrieveTapeRecoveryPointInput$GatewayARN": null, + "ShutdownGatewayInput$GatewayARN": null, + "ShutdownGatewayOutput$GatewayARN": null, + "StartGatewayInput$GatewayARN": null, + "StartGatewayOutput$GatewayARN": null, + "TapeArchive$RetrievedTo": "

    The Amazon Resource Name (ARN) of the gateway-VTL that the virtual tape is being retrieved to.

    The virtual tape is retrieved from the virtual tape shelf (VTS).

    ", + "UpdateBandwidthRateLimitInput$GatewayARN": null, + "UpdateBandwidthRateLimitOutput$GatewayARN": null, + "UpdateGatewayInformationInput$GatewayARN": null, + "UpdateGatewayInformationOutput$GatewayARN": null, + "UpdateGatewaySoftwareNowInput$GatewayARN": null, + "UpdateGatewaySoftwareNowOutput$GatewayARN": null, + "UpdateMaintenanceStartTimeInput$GatewayARN": null, + "UpdateMaintenanceStartTimeOutput$GatewayARN": null + } + }, + "GatewayId": { + "base": null, + "refs": { + "DescribeGatewayInformationOutput$GatewayId": "

    The gateway ID.

    " + } + }, + "GatewayInfo": { + "base": null, + "refs": { + "Gateways$member": null + } + }, + "GatewayName": { + "base": "

    A unique identifier for your gateway. This name becomes part of the gateway Amazon Resources Name (ARN) which is what you use as an input to other operations.

    ", + "refs": { + "ActivateGatewayInput$GatewayName": null, + "UpdateGatewayInformationInput$GatewayName": null + } + }, + "GatewayNetworkInterfaces": { + "base": null, + "refs": { + "DescribeGatewayInformationOutput$GatewayNetworkInterfaces": "

    A NetworkInterface array that contains descriptions of the gateway network interfaces.

    " + } + }, + "GatewayOperationalState": { + "base": null, + "refs": { + "GatewayInfo$GatewayOperationalState": null + } + }, + "GatewayState": { + "base": null, + "refs": { + "DescribeGatewayInformationOutput$GatewayState": "

    One of the values that indicates the operating state of the gateway.

    " + } + }, + "GatewayTimezone": { + "base": null, + "refs": { + "ActivateGatewayInput$GatewayTimezone": "

    One of the values that indicates the time zone you want to set for the gateway. The time zone is used, for example, for scheduling snapshots and your gateway's maintenance schedule.

    ", + "DescribeGatewayInformationOutput$GatewayTimezone": "

    One of the values that indicates the time zone configured for the gateway.

    ", + "DescribeMaintenanceStartTimeOutput$Timezone": null, + "DescribeSnapshotScheduleOutput$Timezone": null, + "UpdateGatewayInformationInput$GatewayTimezone": null + } + }, + "GatewayType": { + "base": null, + "refs": { + "ActivateGatewayInput$GatewayType": "

    One of the values that defines the type of gateway to activate. The type specified is critical to all later functions of the gateway and cannot be changed after activation. The default value is STORED.

    ", + "DescribeGatewayInformationOutput$GatewayType": "

    The type of the gateway.

    ", + "GatewayInfo$GatewayType": null + } + }, + "Gateways": { + "base": null, + "refs": { + "ListGatewaysOutput$Gateways": null + } + }, + "HourOfDay": { + "base": null, + "refs": { + "DescribeMaintenanceStartTimeOutput$HourOfDay": null, + "DescribeSnapshotScheduleOutput$StartAt": null, + "UpdateMaintenanceStartTimeInput$HourOfDay": "

    The hour component of the maintenance start time represented as hh, where hh is the hour (00 to 23). The hour of the day is in the time zone of the gateway.

    ", + "UpdateSnapshotScheduleInput$StartAt": "

    The hour of the day at which the snapshot schedule begins represented as hh, where hh is the hour (0 to 23). The hour of the day is in the time zone of the gateway.

    " + } + }, + "Initiator": { + "base": null, + "refs": { + "Initiators$member": null + } + }, + "Initiators": { + "base": null, + "refs": { + "ListVolumeInitiatorsOutput$Initiators": "

    The host names and port numbers of all iSCSI initiators that are connected to the gateway.

    " + } + }, + "InternalServerError": { + "base": "

    An internal server error has occurred during the request. See the error and message fields for more information.

    ", + "refs": { + } + }, + "InvalidGatewayRequestException": { + "base": "

    An exception occurred because an invalid gateway request was issued to the service. See the error and message fields for more information.

    ", + "refs": { + } + }, + "IqnName": { + "base": null, + "refs": { + "ChapInfo$InitiatorName": "

    The iSCSI initiator that connects to the target.

    ", + "DeleteChapCredentialsInput$InitiatorName": "

    The iSCSI initiator that connects to the target.

    ", + "DeleteChapCredentialsOutput$InitiatorName": "

    The iSCSI initiator that connects to the target.

    ", + "UpdateChapCredentialsInput$InitiatorName": "

    The iSCSI initiator that connects to the target.

    ", + "UpdateChapCredentialsOutput$InitiatorName": "

    The iSCSI initiator that connects to the target. This is the same initiator name specified in the request.

    " + } + }, + "LastSoftwareUpdate": { + "base": null, + "refs": { + "DescribeGatewayInformationOutput$LastSoftwareUpdate": "

    The date on which the last software update was applied to the gateway. If the gateway has never been updated, this field does not return a value in the response.

    " + } + }, + "ListGatewaysInput": { + "base": "

    A JSON object containing zero or more of the following fields:

    ", + "refs": { + } + }, + "ListGatewaysOutput": { + "base": null, + "refs": { + } + }, + "ListLocalDisksInput": { + "base": "

    A JSON object containing the of the gateway.

    ", + "refs": { + } + }, + "ListLocalDisksOutput": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceInput": { + "base": "

    ListTagsForResourceInput

    ", + "refs": { + } + }, + "ListTagsForResourceOutput": { + "base": "

    ListTagsForResourceOutput

    ", + "refs": { + } + }, + "ListVolumeInitiatorsInput": { + "base": "

    ListVolumeInitiatorsInput

    ", + "refs": { + } + }, + "ListVolumeInitiatorsOutput": { + "base": "

    ListVolumeInitiatorsOutput

    ", + "refs": { + } + }, + "ListVolumeRecoveryPointsInput": { + "base": null, + "refs": { + } + }, + "ListVolumeRecoveryPointsOutput": { + "base": null, + "refs": { + } + }, + "ListVolumesInput": { + "base": "

    A JSON object that contains one or more of the following fields:

    ", + "refs": { + } + }, + "ListVolumesOutput": { + "base": null, + "refs": { + } + }, + "Marker": { + "base": null, + "refs": { + "DescribeTapeArchivesInput$Marker": "

    An opaque string that indicates the position at which to begin describing virtual tapes.

    ", + "DescribeTapeArchivesOutput$Marker": "

    An opaque string that indicates the position at which the virtual tapes that were fetched for description ended. Use this marker in your next request to fetch the next set of virtual tapes in the virtual tape shelf (VTS). If there are no more virtual tapes to describe, this field does not appear in the response.

    ", + "DescribeTapeRecoveryPointsInput$Marker": "

    An opaque string that indicates the position at which to begin describing the virtual tape recovery points.

    ", + "DescribeTapeRecoveryPointsOutput$Marker": "

    An opaque string that indicates the position at which the virtual tape recovery points that were listed for description ended.

    Use this marker in your next request to list the next set of virtual tape recovery points in the list. If there are no more recovery points to describe, this field does not appear in the response.

    ", + "DescribeTapesInput$Marker": "

    A marker value, obtained in a previous call to DescribeTapes. This marker indicates which page of results to retrieve.

    If not specified, the first page of results is retrieved.

    ", + "DescribeTapesOutput$Marker": "

    An opaque string which can be used as part of a subsequent DescribeTapes call to retrieve the next page of results.

    If a response does not contain a marker, then there are no more results to be retrieved.

    ", + "DescribeVTLDevicesInput$Marker": "

    An opaque string that indicates the position at which to begin describing the VTL devices.

    ", + "DescribeVTLDevicesOutput$Marker": "

    An opaque string that indicates the position at which the VTL devices that were fetched for description ended. Use the marker in your next request to fetch the next set of VTL devices in the list. If there are no more VTL devices to describe, this field does not appear in the response.

    ", + "ListGatewaysInput$Marker": "

    An opaque string that indicates the position at which to begin the returned list of gateways.

    ", + "ListGatewaysOutput$Marker": null, + "ListTagsForResourceInput$Marker": "

    An opaque string that indicates the position at which to begin returning the list of tags.

    ", + "ListTagsForResourceOutput$Marker": "

    An opaque string that indicates the position at which to stop returning the list of tags.

    ", + "ListVolumesInput$Marker": "

    A string that indicates the position at which to begin the returned list of volumes. Obtain the marker from the response of a previous List iSCSI Volumes request.

    ", + "ListVolumesOutput$Marker": null + } + }, + "MediumChangerType": { + "base": null, + "refs": { + "ActivateGatewayInput$MediumChangerType": "

    The value that indicates the type of medium changer to use for gateway-VTL. This field is optional.

    Valid Values: \"STK-L700\", \"AWS-Gateway-VTL\"

    " + } + }, + "MinuteOfHour": { + "base": null, + "refs": { + "DescribeMaintenanceStartTimeOutput$MinuteOfHour": null, + "UpdateMaintenanceStartTimeInput$MinuteOfHour": "

    The minute component of the maintenance start time represented as mm, where mm is the minute (00 to 59). The minute of the hour is in the time zone of the gateway.

    " + } + }, + "NetworkInterface": { + "base": "

    Describes a gateway's network interface.

    ", + "refs": { + "GatewayNetworkInterfaces$member": null + } + }, + "NetworkInterfaceId": { + "base": null, + "refs": { + "CreateCachediSCSIVolumeInput$NetworkInterfaceId": null, + "CreateStorediSCSIVolumeInput$NetworkInterfaceId": "

    The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a list of the network interfaces available on a gateway.

    Valid Values: A valid IP address.

    ", + "DeviceiSCSIAttributes$NetworkInterfaceId": "

    The network interface identifier of the VTL device.

    ", + "VolumeiSCSIAttributes$NetworkInterfaceId": "

    The network interface identifier.

    " + } + }, + "NextUpdateAvailabilityDate": { + "base": null, + "refs": { + "DescribeGatewayInformationOutput$NextUpdateAvailabilityDate": "

    The date on which an update to the gateway is available. This date is in the time zone of the gateway. If the gateway is not available for an update this field is not returned in the response.

    " + } + }, + "NumTapesToCreate": { + "base": null, + "refs": { + "CreateTapesInput$NumTapesToCreate": "

    The number of virtual tapes you want to create.

    " + } + }, + "PositiveIntObject": { + "base": null, + "refs": { + "DescribeTapeArchivesInput$Limit": "

    Specifies that the number of virtual tapes descried be limited to the specified number.

    ", + "DescribeTapeRecoveryPointsInput$Limit": "

    Specifies that the number of virtual tape recovery points that are described be limited to the specified number.

    ", + "DescribeTapesInput$Limit": "

    Specifies that the number of virtual tapes described be limited to the specified number.

    Amazon Web Services may impose its own limit, if this field is not set.", + "DescribeVTLDevicesInput$Limit": "

    Specifies that the number of VTL devices described be limited to the specified number.

    ", + "ListGatewaysInput$Limit": "

    Specifies that the list of gateways returned be limited to the specified number of items.

    ", + "ListTagsForResourceInput$Limit": "

    Specifies that the list of tags returned be limited to the specified number of items.

    ", + "ListVolumesInput$Limit": "

    Specifies that the list of volumes returned be limited to the specified number of items.

    ", + "VolumeiSCSIAttributes$LunNumber": "

    The logical disk number.

    " + } + }, + "RecurrenceInHours": { + "base": null, + "refs": { + "DescribeSnapshotScheduleOutput$RecurrenceInHours": null, + "UpdateSnapshotScheduleInput$RecurrenceInHours": "

    Frequency of snapshots. Specify the number of hours between snapshots.

    " + } + }, + "RegionId": { + "base": null, + "refs": { + "ActivateGatewayInput$GatewayRegion": "

    One of the values that indicates the region where you want to store the snapshot backups. The gateway region specified must be the same region as the region in your Host header in the request. For more information about available regions and endpoints for AWS Storage Gateway, see Regions and Endpoints in the Amazon Web Services Glossary.

    Valid Values: \"us-east-1\", \"us-west-1\", \"us-west-2\", \"eu-west-1\", \"eu-central-1\", \"ap-northeast-1\", \"ap-southeast-1\", \"ap-southeast-2\", \"sa-east-1\"

    " + } + }, + "RemoveTagsFromResourceInput": { + "base": "

    RemoveTagsFromResourceInput

    ", + "refs": { + } + }, + "RemoveTagsFromResourceOutput": { + "base": "

    RemoveTagsFromResourceOutput

    ", + "refs": { + } + }, + "ResetCacheInput": { + "base": null, + "refs": { + } + }, + "ResetCacheOutput": { + "base": null, + "refs": { + } + }, + "ResourceARN": { + "base": null, + "refs": { + "AddTagsToResourceInput$ResourceARN": "

    The Amazon Resource Name (ARN) of the resource you want to add tags to.

    ", + "AddTagsToResourceOutput$ResourceARN": "

    The Amazon Resource Name (ARN) of the resource you want to add tags to.

    ", + "ListTagsForResourceInput$ResourceARN": "

    The Amazon Resource Name (ARN) of the resource for which you want to list tags.

    ", + "ListTagsForResourceOutput$ResourceARN": "

    he Amazon Resource Name (ARN) of the resource for which you want to list tags.

    ", + "RemoveTagsFromResourceInput$ResourceARN": "

    The Amazon Resource Name (ARN) of the resource you want to remove the tags from.

    ", + "RemoveTagsFromResourceOutput$ResourceARN": "

    The Amazon Resource Name (ARN) of the resource that the tags were removed from.

    " + } + }, + "RetrieveTapeArchiveInput": { + "base": "

    RetrieveTapeArchiveInput

    ", + "refs": { + } + }, + "RetrieveTapeArchiveOutput": { + "base": "

    RetrieveTapeArchiveOutput

    ", + "refs": { + } + }, + "RetrieveTapeRecoveryPointInput": { + "base": "

    RetrieveTapeRecoveryPointInput

    ", + "refs": { + } + }, + "RetrieveTapeRecoveryPointOutput": { + "base": "

    RetrieveTapeRecoveryPointOutput

    ", + "refs": { + } + }, + "ShutdownGatewayInput": { + "base": "

    A JSON object containing the of the gateway to shut down.

    ", + "refs": { + } + }, + "ShutdownGatewayOutput": { + "base": "

    A JSON object containing the of the gateway that was shut down.

    ", + "refs": { + } + }, + "SnapshotDescription": { + "base": null, + "refs": { + "CreateSnapshotFromVolumeRecoveryPointInput$SnapshotDescription": null, + "CreateSnapshotInput$SnapshotDescription": "

    Textual description of the snapshot that appears in the Amazon EC2 console, Elastic Block Store snapshots panel in the Description field, and in the AWS Storage Gateway snapshot Details pane, Description field

    " + } + }, + "SnapshotId": { + "base": null, + "refs": { + "CachediSCSIVolume$SourceSnapshotId": null, + "CreateCachediSCSIVolumeInput$SnapshotId": null, + "CreateSnapshotFromVolumeRecoveryPointOutput$SnapshotId": null, + "CreateSnapshotOutput$SnapshotId": "

    The snapshot ID that is used to refer to the snapshot in future operations such as describing snapshots (Amazon Elastic Compute Cloud API DescribeSnapshots) or creating a volume from a snapshot (CreateStorediSCSIVolume).

    ", + "CreateStorediSCSIVolumeInput$SnapshotId": "

    The snapshot ID (e.g. \"snap-1122aabb\") of the snapshot to restore as the new stored volume. Specify this field if you want to create the iSCSI storage volume from a snapshot otherwise do not include this field. To list snapshots for your account use DescribeSnapshots in the Amazon Elastic Compute Cloud API Reference.

    ", + "StorediSCSIVolume$SourceSnapshotId": null + } + }, + "StartGatewayInput": { + "base": "

    A JSON object containing the of the gateway to start.

    ", + "refs": { + } + }, + "StartGatewayOutput": { + "base": "

    A JSON object containing the of the gateway that was restarted.

    ", + "refs": { + } + }, + "StorageGatewayError": { + "base": "

    Provides additional information about an error that was returned by the service as an or. See the errorCode and errorDetails members for more information about the error.

    ", + "refs": { + "InternalServerError$error": "

    A StorageGatewayError that provides more detail about the cause of the error.

    ", + "InvalidGatewayRequestException$error": "

    A StorageGatewayError that provides more detail about the cause of the error.

    " + } + }, + "StorediSCSIVolume": { + "base": null, + "refs": { + "StorediSCSIVolumes$member": null + } + }, + "StorediSCSIVolumes": { + "base": null, + "refs": { + "DescribeStorediSCSIVolumesOutput$StorediSCSIVolumes": null + } + }, + "Tag": { + "base": null, + "refs": { + "Tags$member": null + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": null, + "TagKeys$member": null + } + }, + "TagKeys": { + "base": null, + "refs": { + "RemoveTagsFromResourceInput$TagKeys": "

    The keys of the tags you want to remove from the specified resource. A tag is composed of a key/value pair.

    " + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": null + } + }, + "Tags": { + "base": null, + "refs": { + "AddTagsToResourceInput$Tags": "

    The key-value pair that represents the tag you want to add to the resource. The value can be an empty string.

    Valid characters for key and value are letters, spaces, and numbers representable in UTF-8 format, and the following special characters: + - = . _ : / @.

    ", + "ListTagsForResourceOutput$Tags": "

    An array that contains the tags for the specified resource.

    " + } + }, + "Tape": { + "base": "

    Describes a virtual tape object.

    ", + "refs": { + "Tapes$member": null + } + }, + "TapeARN": { + "base": null, + "refs": { + "CancelArchivalInput$TapeARN": "

    The Amazon Resource Name (ARN) of the virtual tape you want to cancel archiving for.

    ", + "CancelArchivalOutput$TapeARN": "

    The Amazon Resource Name (ARN) of the virtual tape for which archiving was canceled.

    ", + "CancelRetrievalInput$TapeARN": "

    The Amazon Resource Name (ARN) of the virtual tape you want to cancel retrieval for.

    ", + "CancelRetrievalOutput$TapeARN": "

    The Amazon Resource Name (ARN) of the virtual tape for which retrieval was canceled.

    ", + "DeleteTapeArchiveInput$TapeARN": "

    The Amazon Resource Name (ARN) of the virtual tape to delete from the virtual tape shelf (VTS).

    ", + "DeleteTapeArchiveOutput$TapeARN": "

    The Amazon Resource Name (ARN) of the virtual tape that was deleted from the virtual tape shelf (VTS).

    ", + "DeleteTapeInput$TapeARN": "

    The Amazon Resource Name (ARN) of the virtual tape to delete.

    ", + "DeleteTapeOutput$TapeARN": "

    The Amazon Resource Name (ARN) of the deleted virtual tape.

    ", + "RetrieveTapeArchiveInput$TapeARN": "

    The Amazon Resource Name (ARN) of the virtual tape you want to retrieve from the virtual tape shelf (VTS).

    ", + "RetrieveTapeArchiveOutput$TapeARN": "

    The Amazon Resource Name (ARN) of the retrieved virtual tape.

    ", + "RetrieveTapeRecoveryPointInput$TapeARN": "

    The Amazon Resource Name (ARN) of the virtual tape for which you want to retrieve the recovery point.

    ", + "RetrieveTapeRecoveryPointOutput$TapeARN": "

    The Amazon Resource Name (ARN) of the virtual tape for which the recovery point was retrieved.

    ", + "Tape$TapeARN": "

    The Amazon Resource Name (ARN) of the virtual tape.

    ", + "TapeARNs$member": null, + "TapeArchive$TapeARN": "

    The Amazon Resource Name (ARN) of an archived virtual tape.

    ", + "TapeRecoveryPointInfo$TapeARN": "

    The Amazon Resource Name (ARN) of the virtual tape.

    " + } + }, + "TapeARNs": { + "base": null, + "refs": { + "CreateTapesOutput$TapeARNs": "

    A list of unique Amazon Resource Named (ARN) that represents the virtual tapes that were created.

    ", + "DescribeTapeArchivesInput$TapeARNs": "

    Specifies one or more unique Amazon Resource Names (ARNs) that represent the virtual tapes you want to describe.

    ", + "DescribeTapesInput$TapeARNs": "

    Specifies one or more unique Amazon Resource Names (ARNs) that represent the virtual tapes you want to describe. If this parameter is not specified, AWS Storage Gateway returns a description of all virtual tapes associated with the specified gateway.

    " + } + }, + "TapeArchive": { + "base": "

    Represents a virtual tape that is archived in the virtual tape shelf (VTS).

    ", + "refs": { + "TapeArchives$member": null + } + }, + "TapeArchiveStatus": { + "base": null, + "refs": { + "TapeArchive$TapeStatus": "

    The current state of the archived virtual tape.

    " + } + }, + "TapeArchives": { + "base": null, + "refs": { + "DescribeTapeArchivesOutput$TapeArchives": "

    An array of virtual tape objects in the virtual tape shelf (VTS). The description includes of the Amazon Resource Name(ARN) of the virtual tapes. The information returned includes the Amazon Resource Names (ARNs) of the tapes, size of the tapes, status of the tapes, progress of the description and tape barcode.

    " + } + }, + "TapeBarcode": { + "base": null, + "refs": { + "Tape$TapeBarcode": "

    The barcode that identifies a specific virtual tape.

    ", + "TapeArchive$TapeBarcode": "

    The barcode that identifies the archived virtual tape.

    " + } + }, + "TapeBarcodePrefix": { + "base": null, + "refs": { + "CreateTapesInput$TapeBarcodePrefix": "

    A prefix you append to the barcode of the virtual tape you are creating. This makes a barcode unique.

    The prefix must be 1 to 4 characters in length and must be upper-case letters A-Z." + } + }, + "TapeDriveType": { + "base": null, + "refs": { + "ActivateGatewayInput$TapeDriveType": "

    The value that indicates the type of tape drive to use for gateway-VTL. This field is optional.

    Valid Values: \"IBM-ULT3580-TD5\"

    " + } + }, + "TapeRecoveryPointInfo": { + "base": "

    Describes a recovery point.

    ", + "refs": { + "TapeRecoveryPointInfos$member": null + } + }, + "TapeRecoveryPointInfos": { + "base": null, + "refs": { + "DescribeTapeRecoveryPointsOutput$TapeRecoveryPointInfos": "

    An array of TapeRecoveryPointInfos that are available for the specified gateway.

    " + } + }, + "TapeRecoveryPointStatus": { + "base": null, + "refs": { + "TapeRecoveryPointInfo$TapeStatus": null + } + }, + "TapeSize": { + "base": null, + "refs": { + "CreateTapesInput$TapeSizeInBytes": "

    The size, in bytes, of the virtual tapes you want to create.

    The size must be gigabyte (1024*1024*1024 byte) aligned.", + "Tape$TapeSizeInBytes": "

    The size, in bytes, of the virtual tape.

    ", + "TapeArchive$TapeSizeInBytes": "

    The size, in bytes, of the archived virtual tape.

    ", + "TapeRecoveryPointInfo$TapeSizeInBytes": "

    The size, in bytes, of the virtual tapes to recover.

    " + } + }, + "TapeStatus": { + "base": null, + "refs": { + "Tape$TapeStatus": "

    The current state of the virtual tape.

    " + } + }, + "Tapes": { + "base": null, + "refs": { + "DescribeTapesOutput$Tapes": "

    An array of virtual tape descriptions.

    " + } + }, + "TargetARN": { + "base": null, + "refs": { + "ChapInfo$TargetARN": "

    The Amazon Resource Name (ARN) of the volume.

    Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).

    ", + "CreateCachediSCSIVolumeOutput$TargetARN": null, + "CreateStorediSCSIVolumeOutput$TargetARN": "

    he Amazon Resource Name (ARN) of the volume target that includes the iSCSI name that initiators can use to connect to the target.

    ", + "DeleteChapCredentialsInput$TargetARN": "

    The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes operation to return to retrieve the TargetARN for specified VolumeARN.

    ", + "DeleteChapCredentialsOutput$TargetARN": "

    The Amazon Resource Name (ARN) of the target.

    ", + "DescribeChapCredentialsInput$TargetARN": "

    The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes operation to return to retrieve the TargetARN for specified VolumeARN.

    ", + "DeviceiSCSIAttributes$TargetARN": "

    Specifies the unique Amazon Resource Name(ARN) that encodes the iSCSI qualified name(iqn) of a tape drive or media changer target.

    ", + "UpdateChapCredentialsInput$TargetARN": "

    The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes operation to return the TargetARN for specified VolumeARN.

    ", + "UpdateChapCredentialsOutput$TargetARN": "

    The Amazon Resource Name (ARN) of the target. This is the same target specified in the request.

    ", + "VolumeiSCSIAttributes$TargetARN": "

    The Amazon Resource Name (ARN) of the volume target.

    " + } + }, + "TargetName": { + "base": null, + "refs": { + "CreateCachediSCSIVolumeInput$TargetName": null, + "CreateStorediSCSIVolumeInput$TargetName": "

    The name of the iSCSI target used by initiators to connect to the target and as a suffix for the target ARN. For example, specifying TargetName as myvolume results in the target ARN of arn:aws:storagegateway:us-east-1:111122223333:gateway/mygateway/target/iqn.1997-05.com.amazon:myvolume. The target name must be unique across all volumes of a gateway.

    " + } + }, + "Time": { + "base": null, + "refs": { + "TapeArchive$CompletionTime": "

    The time that the archiving of the virtual tape was completed.

    The string format of the completion time is in the ISO8601 extended YYYY-MM-DD'T'HH:MM:SS'Z' format.

    ", + "TapeRecoveryPointInfo$TapeRecoveryPointTime": "

    The time when the point-in-time view of the virtual tape was replicated for later recovery.

    The string format of the tape recovery point time is in the ISO8601 extended YYYY-MM-DD'T'HH:MM:SS'Z' format.

    " + } + }, + "UpdateBandwidthRateLimitInput": { + "base": "

    A JSON object containing one or more of the following fields:

    ", + "refs": { + } + }, + "UpdateBandwidthRateLimitOutput": { + "base": "

    A JSON object containing the of the gateway whose throttle information was updated.

    ", + "refs": { + } + }, + "UpdateChapCredentialsInput": { + "base": "

    A JSON object containing one or more of the following fields:

    ", + "refs": { + } + }, + "UpdateChapCredentialsOutput": { + "base": "

    A JSON object containing the following fields:

    ", + "refs": { + } + }, + "UpdateGatewayInformationInput": { + "base": null, + "refs": { + } + }, + "UpdateGatewayInformationOutput": { + "base": "

    A JSON object containing the of the gateway that was updated.

    ", + "refs": { + } + }, + "UpdateGatewaySoftwareNowInput": { + "base": "

    A JSON object containing the of the gateway to update.

    ", + "refs": { + } + }, + "UpdateGatewaySoftwareNowOutput": { + "base": "

    A JSON object containing the of the gateway that was updated.

    ", + "refs": { + } + }, + "UpdateMaintenanceStartTimeInput": { + "base": "

    A JSON object containing the following fields:

    ", + "refs": { + } + }, + "UpdateMaintenanceStartTimeOutput": { + "base": "

    A JSON object containing the of the gateway whose maintenance start time is updated.

    ", + "refs": { + } + }, + "UpdateSnapshotScheduleInput": { + "base": "

    A JSON object containing one or more of the following fields:

    ", + "refs": { + } + }, + "UpdateSnapshotScheduleOutput": { + "base": "

    A JSON object containing the of the updated storage volume.

    ", + "refs": { + } + }, + "UpdateVTLDeviceTypeInput": { + "base": "

    UpdateVTLDeviceTypeInput

    ", + "refs": { + } + }, + "UpdateVTLDeviceTypeOutput": { + "base": "

    UpdateVTLDeviceTypeOutput

    ", + "refs": { + } + }, + "VTLDevice": { + "base": "

    Represents a device object associated with a gateway-VTL.

    ", + "refs": { + "VTLDevices$member": null + } + }, + "VTLDeviceARN": { + "base": null, + "refs": { + "Tape$VTLDevice": "

    The virtual tape library (VTL) device that the virtual tape is associated with.

    ", + "UpdateVTLDeviceTypeInput$VTLDeviceARN": "

    The Amazon Resource Name (ARN) of the medium changer you want to select.

    ", + "UpdateVTLDeviceTypeOutput$VTLDeviceARN": "

    The Amazon Resource Name (ARN) of the medium changer you have selected.

    ", + "VTLDevice$VTLDeviceARN": "

    Specifies the unique Amazon Resource Name (ARN) of the device (tape drive or media changer).

    ", + "VTLDeviceARNs$member": null + } + }, + "VTLDeviceARNs": { + "base": null, + "refs": { + "DescribeVTLDevicesInput$VTLDeviceARNs": "

    An array of strings, where each string represents the Amazon Resource Name (ARN) of a VTL device.

    All of the specified VTL devices must be from the same gateway. If no VTL devices are specified, the result will contain all devices on the specified gateway." + } + }, + "VTLDeviceProductIdentifier": { + "base": null, + "refs": { + "VTLDevice$VTLDeviceProductIdentifier": null + } + }, + "VTLDeviceType": { + "base": null, + "refs": { + "VTLDevice$VTLDeviceType": null + } + }, + "VTLDeviceVendor": { + "base": null, + "refs": { + "VTLDevice$VTLDeviceVendor": null + } + }, + "VTLDevices": { + "base": null, + "refs": { + "DescribeVTLDevicesOutput$VTLDevices": "

    An array of VTL device objects composed of the Amazon Resource Name(ARN) of the VTL devices.

    " + } + }, + "VolumeARN": { + "base": null, + "refs": { + "CachediSCSIVolume$VolumeARN": null, + "CreateCachediSCSIVolumeOutput$VolumeARN": null, + "CreateSnapshotFromVolumeRecoveryPointInput$VolumeARN": null, + "CreateSnapshotFromVolumeRecoveryPointOutput$VolumeARN": null, + "CreateSnapshotInput$VolumeARN": "

    The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation to return a list of gateway volumes.

    ", + "CreateSnapshotOutput$VolumeARN": "

    The Amazon Resource Name (ARN) of the volume of which the snapshot was taken.

    ", + "CreateStorediSCSIVolumeOutput$VolumeARN": "

    The Amazon Resource Name (ARN) of the configured volume.

    ", + "DeleteSnapshotScheduleInput$VolumeARN": null, + "DeleteSnapshotScheduleOutput$VolumeARN": null, + "DeleteVolumeInput$VolumeARN": "

    The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation to return a list of gateway volumes.

    ", + "DeleteVolumeOutput$VolumeARN": "

    The Amazon Resource Name (ARN) of the storage volume that was deleted. It is the same ARN you provided in the request.

    ", + "DescribeSnapshotScheduleInput$VolumeARN": "

    The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation to return a list of gateway volumes.

    ", + "DescribeSnapshotScheduleOutput$VolumeARN": null, + "ListVolumeInitiatorsInput$VolumeARN": "

    The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation to return a list of gateway volumes for the gateway.

    ", + "StorediSCSIVolume$VolumeARN": null, + "UpdateSnapshotScheduleInput$VolumeARN": "

    The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation to return a list of gateway volumes.

    ", + "UpdateSnapshotScheduleOutput$VolumeARN": null, + "VolumeARNs$member": null, + "VolumeInfo$VolumeARN": null, + "VolumeRecoveryPointInfo$VolumeARN": null + } + }, + "VolumeARNs": { + "base": null, + "refs": { + "DescribeCachediSCSIVolumesInput$VolumeARNs": null, + "DescribeStorediSCSIVolumesInput$VolumeARNs": "

    An array of strings where each string represents the Amazon Resource Name (ARN) of a stored volume. All of the specified stored volumes must from the same gateway. Use ListVolumes to get volume ARNs for a gateway.

    " + } + }, + "VolumeId": { + "base": null, + "refs": { + "CachediSCSIVolume$VolumeId": null, + "StorediSCSIVolume$VolumeId": null + } + }, + "VolumeInfo": { + "base": null, + "refs": { + "VolumeInfos$member": null + } + }, + "VolumeInfos": { + "base": null, + "refs": { + "ListVolumesOutput$VolumeInfos": null + } + }, + "VolumeRecoveryPointInfo": { + "base": null, + "refs": { + "VolumeRecoveryPointInfos$member": null + } + }, + "VolumeRecoveryPointInfos": { + "base": null, + "refs": { + "ListVolumeRecoveryPointsOutput$VolumeRecoveryPointInfos": null + } + }, + "VolumeStatus": { + "base": null, + "refs": { + "CachediSCSIVolume$VolumeStatus": null, + "StorediSCSIVolume$VolumeStatus": null + } + }, + "VolumeType": { + "base": null, + "refs": { + "CachediSCSIVolume$VolumeType": null, + "StorediSCSIVolume$VolumeType": null, + "VolumeInfo$VolumeType": null + } + }, + "VolumeiSCSIAttributes": { + "base": "

    Lists iSCSI information about a volume.

    ", + "refs": { + "CachediSCSIVolume$VolumeiSCSIAttributes": null, + "StorediSCSIVolume$VolumeiSCSIAttributes": null + } + }, + "boolean": { + "base": null, + "refs": { + "CreateStorediSCSIVolumeInput$PreserveExistingData": "

    Specify this field as true if you want to preserve the data on the local disk. Otherwise, specifying this field as false creates an empty volume.

    Valid Values: true, false

    ", + "DeviceiSCSIAttributes$ChapEnabled": "

    Indicates whether mutual CHAP is enabled for the iSCSI target.

    ", + "StorediSCSIVolume$PreservedExistingData": null, + "VolumeiSCSIAttributes$ChapEnabled": "

    Indicates whether mutual CHAP is enabled for the iSCSI target.

    " + } + }, + "double": { + "base": null, + "refs": { + "DescribeCacheOutput$CacheUsedPercentage": null, + "DescribeCacheOutput$CacheDirtyPercentage": null, + "DescribeCacheOutput$CacheHitPercentage": null, + "DescribeCacheOutput$CacheMissPercentage": null + } + }, + "errorDetails": { + "base": null, + "refs": { + "StorageGatewayError$errorDetails": "

    Human-readable text that provides detail about the error that occurred.

    " + } + }, + "integer": { + "base": null, + "refs": { + "DeviceiSCSIAttributes$NetworkInterfacePort": "

    The port used to communicate with iSCSI VTL device targets.

    ", + "VolumeiSCSIAttributes$NetworkInterfacePort": "

    The port used to communicate with iSCSI targets.

    " + } + }, + "long": { + "base": null, + "refs": { + "CachediSCSIVolume$VolumeSizeInBytes": null, + "CreateCachediSCSIVolumeInput$VolumeSizeInBytes": null, + "CreateStorediSCSIVolumeOutput$VolumeSizeInBytes": "

    The size of the volume in bytes.

    ", + "DescribeCacheOutput$CacheAllocatedInBytes": null, + "DescribeUploadBufferOutput$UploadBufferUsedInBytes": null, + "DescribeUploadBufferOutput$UploadBufferAllocatedInBytes": null, + "DescribeWorkingStorageOutput$WorkingStorageUsedInBytes": "

    The total working storage in bytes in use by the gateway. If no working storage is configured for the gateway, this field returns 0.

    ", + "DescribeWorkingStorageOutput$WorkingStorageAllocatedInBytes": "

    The total working storage in bytes allocated for the gateway. If no working storage is configured for the gateway, this field returns 0.

    ", + "Disk$DiskSizeInBytes": null, + "StorediSCSIVolume$VolumeSizeInBytes": null, + "VolumeRecoveryPointInfo$VolumeSizeInBytes": null, + "VolumeRecoveryPointInfo$VolumeUsageInBytes": null + } + }, + "string": { + "base": null, + "refs": { + "CreateSnapshotFromVolumeRecoveryPointOutput$VolumeRecoveryPointTime": null, + "DescribeGatewayInformationOutput$GatewayName": "

    The gateway name.

    ", + "Disk$DiskPath": null, + "Disk$DiskNode": null, + "Disk$DiskStatus": null, + "Disk$DiskAllocationResource": null, + "GatewayInfo$GatewayName": null, + "InternalServerError$message": "

    A human-readable message describing the error that occurred.

    ", + "InvalidGatewayRequestException$message": "

    A human-readable message describing the error that occurred.

    ", + "NetworkInterface$Ipv4Address": "

    The Internet Protocol version 4 (IPv4) address of the interface.

    ", + "NetworkInterface$MacAddress": "

    The Media Access Control (MAC) address of the interface.

    This is currently unsupported and will not be returned in output.", + "NetworkInterface$Ipv6Address": "

    The Internet Protocol version 6 (IPv6) address of the interface. Currently not supported.

    ", + "UpdateGatewayInformationOutput$GatewayName": null, + "VolumeRecoveryPointInfo$VolumeRecoveryPointTime": null, + "errorDetails$key": null, + "errorDetails$value": null + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/storagegateway/2013-06-30/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/storagegateway/2013-06-30/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/storagegateway/2013-06-30/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/storagegateway/2013-06-30/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,52 @@ +{ + "pagination": { + "DescribeCachediSCSIVolumes": { + "result_key": "CachediSCSIVolumes" + }, + "DescribeStorediSCSIVolumes": { + "result_key": "StorediSCSIVolumes" + }, + "DescribeTapeArchives": { + "input_token": "Marker", + "limit_key": "Limit", + "output_token": "Marker", + "result_key": "TapeArchives" + }, + "DescribeTapeRecoveryPoints": { + "input_token": "Marker", + "limit_key": "Limit", + "output_token": "Marker", + "result_key": "TapeRecoveryPointInfos" + }, + "DescribeTapes": { + "input_token": "Marker", + "limit_key": "Limit", + "output_token": "Marker", + "result_key": "Tapes" + }, + "DescribeVTLDevices": { + "input_token": "Marker", + "limit_key": "Limit", + "output_token": "Marker", + "result_key": "VTLDevices" + }, + "ListGateways": { + "input_token": "Marker", + "limit_key": "Limit", + "output_token": "Marker", + "result_key": "Gateways" + }, + "ListLocalDisks": { + "result_key": "Disks" + }, + "ListVolumeRecoveryPoints": { + "result_key": "VolumeRecoveryPointInfos" + }, + "ListVolumes": { + "input_token": "Marker", + "limit_key": "Limit", + "output_token": "Marker", + "result_key": "VolumeInfos" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/streams.dynamodb/2012-08-10/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/streams.dynamodb/2012-08-10/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/streams.dynamodb/2012-08-10/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/streams.dynamodb/2012-08-10/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,436 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2012-08-10", + "endpointPrefix":"streams.dynamodb", + "jsonVersion":"1.0", + "serviceFullName":"Amazon DynamoDB Streams", + "signatureVersion":"v4", + "signingName":"dynamodb", + "targetPrefix":"DynamoDBStreams_20120810", + "protocol":"json" + }, + "operations":{ + "DescribeStream":{ + "name":"DescribeStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStreamInput"}, + "output":{"shape":"DescribeStreamOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + }, + "GetRecords":{ + "name":"GetRecords", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRecordsInput"}, + "output":{"shape":"GetRecordsOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"LimitExceededException", + "exception":true + }, + { + "shape":"InternalServerError", + "exception":true, + "fault":true + }, + { + "shape":"ExpiredIteratorException", + "exception":true + }, + { + "shape":"TrimmedDataAccessException", + "exception":true + } + ] + }, + "GetShardIterator":{ + "name":"GetShardIterator", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetShardIteratorInput"}, + "output":{"shape":"GetShardIteratorOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"InternalServerError", + "exception":true, + "fault":true + }, + { + "shape":"TrimmedDataAccessException", + "exception":true + } + ] + }, + "ListStreams":{ + "name":"ListStreams", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListStreamsInput"}, + "output":{"shape":"ListStreamsOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + } + }, + "shapes":{ + "AttributeMap":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValue"} + }, + "AttributeName":{ + "type":"string", + "max":65535 + }, + "AttributeValue":{ + "type":"structure", + "members":{ + "S":{"shape":"StringAttributeValue"}, + "N":{"shape":"NumberAttributeValue"}, + "B":{"shape":"BinaryAttributeValue"}, + "SS":{"shape":"StringSetAttributeValue"}, + "NS":{"shape":"NumberSetAttributeValue"}, + "BS":{"shape":"BinarySetAttributeValue"}, + "M":{"shape":"MapAttributeValue"}, + "L":{"shape":"ListAttributeValue"}, + "NULL":{"shape":"NullAttributeValue"}, + "BOOL":{"shape":"BooleanAttributeValue"} + } + }, + "BinaryAttributeValue":{"type":"blob"}, + "BinarySetAttributeValue":{ + "type":"list", + "member":{"shape":"BinaryAttributeValue"} + }, + "BooleanAttributeValue":{"type":"boolean"}, + "Date":{"type":"timestamp"}, + "DescribeStreamInput":{ + "type":"structure", + "required":["StreamArn"], + "members":{ + "StreamArn":{"shape":"StreamArn"}, + "Limit":{"shape":"PositiveIntegerObject"}, + "ExclusiveStartShardId":{"shape":"ShardId"} + } + }, + "DescribeStreamOutput":{ + "type":"structure", + "members":{ + "StreamDescription":{"shape":"StreamDescription"} + } + }, + "ErrorMessage":{"type":"string"}, + "ExpiredIteratorException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "GetRecordsInput":{ + "type":"structure", + "required":["ShardIterator"], + "members":{ + "ShardIterator":{"shape":"ShardIterator"}, + "Limit":{"shape":"PositiveIntegerObject"} + } + }, + "GetRecordsOutput":{ + "type":"structure", + "members":{ + "Records":{"shape":"RecordList"}, + "NextShardIterator":{"shape":"ShardIterator"} + } + }, + "GetShardIteratorInput":{ + "type":"structure", + "required":[ + "StreamArn", + "ShardId", + "ShardIteratorType" + ], + "members":{ + "StreamArn":{"shape":"StreamArn"}, + "ShardId":{"shape":"ShardId"}, + "ShardIteratorType":{"shape":"ShardIteratorType"}, + "SequenceNumber":{"shape":"SequenceNumber"} + } + }, + "GetShardIteratorOutput":{ + "type":"structure", + "members":{ + "ShardIterator":{"shape":"ShardIterator"} + } + }, + "InternalServerError":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true, + "fault":true + }, + "KeySchema":{ + "type":"list", + "member":{"shape":"KeySchemaElement"}, + "min":1, + "max":2 + }, + "KeySchemaAttributeName":{ + "type":"string", + "min":1, + "max":255 + }, + "KeySchemaElement":{ + "type":"structure", + "required":[ + "AttributeName", + "KeyType" + ], + "members":{ + "AttributeName":{"shape":"KeySchemaAttributeName"}, + "KeyType":{"shape":"KeyType"} + } + }, + "KeyType":{ + "type":"string", + "enum":[ + "HASH", + "RANGE" + ] + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ListAttributeValue":{ + "type":"list", + "member":{"shape":"AttributeValue"} + }, + "ListStreamsInput":{ + "type":"structure", + "members":{ + "TableName":{"shape":"TableName"}, + "Limit":{"shape":"PositiveIntegerObject"}, + "ExclusiveStartStreamArn":{"shape":"StreamArn"} + } + }, + "ListStreamsOutput":{ + "type":"structure", + "members":{ + "Streams":{"shape":"StreamList"}, + "LastEvaluatedStreamArn":{"shape":"StreamArn"} + } + }, + "MapAttributeValue":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValue"} + }, + "NullAttributeValue":{"type":"boolean"}, + "NumberAttributeValue":{"type":"string"}, + "NumberSetAttributeValue":{ + "type":"list", + "member":{"shape":"NumberAttributeValue"} + }, + "OperationType":{ + "type":"string", + "enum":[ + "INSERT", + "MODIFY", + "REMOVE" + ] + }, + "PositiveIntegerObject":{ + "type":"integer", + "min":1 + }, + "PositiveLongObject":{ + "type":"long", + "min":1 + }, + "Record":{ + "type":"structure", + "members":{ + "eventID":{"shape":"String"}, + "eventName":{"shape":"OperationType"}, + "eventVersion":{"shape":"String"}, + "eventSource":{"shape":"String"}, + "awsRegion":{"shape":"String"}, + "dynamodb":{"shape":"StreamRecord"} + } + }, + "RecordList":{ + "type":"list", + "member":{"shape":"Record"} + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "SequenceNumber":{ + "type":"string", + "min":21, + "max":40 + }, + "SequenceNumberRange":{ + "type":"structure", + "members":{ + "StartingSequenceNumber":{"shape":"SequenceNumber"}, + "EndingSequenceNumber":{"shape":"SequenceNumber"} + } + }, + "Shard":{ + "type":"structure", + "members":{ + "ShardId":{"shape":"ShardId"}, + "SequenceNumberRange":{"shape":"SequenceNumberRange"}, + "ParentShardId":{"shape":"ShardId"} + } + }, + "ShardDescriptionList":{ + "type":"list", + "member":{"shape":"Shard"} + }, + "ShardId":{ + "type":"string", + "min":28, + "max":65 + }, + "ShardIterator":{ + "type":"string", + "min":1, + "max":2048 + }, + "ShardIteratorType":{ + "type":"string", + "enum":[ + "TRIM_HORIZON", + "LATEST", + "AT_SEQUENCE_NUMBER", + "AFTER_SEQUENCE_NUMBER" + ] + }, + "Stream":{ + "type":"structure", + "members":{ + "StreamArn":{"shape":"StreamArn"}, + "TableName":{"shape":"TableName"}, + "StreamLabel":{"shape":"String"} + } + }, + "StreamArn":{ + "type":"string", + "min":37, + "max":1024 + }, + "StreamDescription":{ + "type":"structure", + "members":{ + "StreamArn":{"shape":"StreamArn"}, + "StreamLabel":{"shape":"String"}, + "StreamStatus":{"shape":"StreamStatus"}, + "StreamViewType":{"shape":"StreamViewType"}, + "CreationRequestDateTime":{"shape":"Date"}, + "TableName":{"shape":"TableName"}, + "KeySchema":{"shape":"KeySchema"}, + "Shards":{"shape":"ShardDescriptionList"}, + "LastEvaluatedShardId":{"shape":"ShardId"} + } + }, + "StreamList":{ + "type":"list", + "member":{"shape":"Stream"} + }, + "StreamRecord":{ + "type":"structure", + "members":{ + "Keys":{"shape":"AttributeMap"}, + "NewImage":{"shape":"AttributeMap"}, + "OldImage":{"shape":"AttributeMap"}, + "SequenceNumber":{"shape":"SequenceNumber"}, + "SizeBytes":{"shape":"PositiveLongObject"}, + "StreamViewType":{"shape":"StreamViewType"} + } + }, + "StreamStatus":{ + "type":"string", + "enum":[ + "ENABLING", + "ENABLED", + "DISABLING", + "DISABLED" + ] + }, + "StreamViewType":{ + "type":"string", + "enum":[ + "NEW_IMAGE", + "OLD_IMAGE", + "NEW_AND_OLD_IMAGES", + "KEYS_ONLY" + ] + }, + "String":{"type":"string"}, + "StringAttributeValue":{"type":"string"}, + "StringSetAttributeValue":{ + "type":"list", + "member":{"shape":"StringAttributeValue"} + }, + "TableName":{ + "type":"string", + "min":3, + "max":255, + "pattern":"[a-zA-Z0-9_.-]+" + }, + "TrimmedDataAccessException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/streams.dynamodb/2012-08-10/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/streams.dynamodb/2012-08-10/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/streams.dynamodb/2012-08-10/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/streams.dynamodb/2012-08-10/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,353 @@ +{ + "version": "2.0", + "operations": { + "DescribeStream": "

    Returns information about a stream, including the current status of the stream, its Amazon Resource Name (ARN), the composition of its shards, and its corresponding DynamoDB table.

    You can call DescribeStream at a maximum rate of 10 times per second.

    Each shard in the stream has a SequenceNumberRange associated with it. If the SequenceNumberRange has a StartingSequenceNumber but no EndingSequenceNumber, then the shard is still open (able to receive more stream records). If both StartingSequenceNumber and EndingSequenceNumber are present, the that shared is closed and can no longer receive more data.

    ", + "GetRecords": "

    Retrieves the stream records from a given shard.

    Specify a shard iterator using the ShardIterator parameter. The shard iterator specifies the position in the shard from which you want to start reading stream records sequentially. If there are no stream records available in the portion of the shard that the iterator points to, GetRecords returns an empty list. Note that it might take multiple calls to get to a portion of the shard that contains stream records.

    GetRecords can retrieve a maximum of 1 MB of data or 2000 stream records, whichever comes first.

    ", + "GetShardIterator": "

    Returns a shard iterator. A shard iterator provides information about how to retrieve the stream records from within a shard. Use the shard iterator in a subsequent GetRecords request to read the stream records from the shard.

    A shard iterator expires 15 minutes after it is returned to the requester.

    ", + "ListStreams": "

    Returns an array of stream ARNs associated with the current account and endpoint. If the TableName parameter is present, then ListStreams will return only the streams ARNs for that table.

    You can call ListStreams at a maximum rate of 5 times per second.

    " + }, + "service": "Amazon DynamoDB Streams

    This is the Amazon DynamoDB Streams API Reference. This guide describes the low-level API actions for accessing streams and processing stream records. For information about application development with DynamoDB Streams, see the Amazon DynamoDB Developer Guide.

    Note that this document is intended for use with the following DynamoDB documentation:

    The following are short descriptions of each low-level DynamoDB Streams API action, organized by function.

    • DescribeStream - Returns detailed information about a particular stream.

    • GetRecords - Retrieves the stream records from within a shard.

    • GetShardIterator - Returns information on how to retrieve the streams record from a shard with a given shard ID.

    • ListStreams - Returns a list of all the streams associated with the current AWS account and endpoint.

    ", + "shapes": { + "AttributeMap": { + "base": null, + "refs": { + "StreamRecord$Keys": "

    The primary key attribute(s) for the DynamoDB item that was modified.

    ", + "StreamRecord$NewImage": "

    The item in the DynamoDB table as it appeared after it was modified.

    ", + "StreamRecord$OldImage": "

    The item in the DynamoDB table as it appeared before it was modified.

    " + } + }, + "AttributeName": { + "base": null, + "refs": { + "AttributeMap$key": null, + "MapAttributeValue$key": null + } + }, + "AttributeValue": { + "base": "

    Represents the data for an attribute. You can set one, and only one, of the elements.

    Each attribute in an item is a name-value pair. An attribute can be single-valued or multi-valued set. For example, a book item can have title and authors attributes. Each book has one title but can have many authors. The multi-valued attribute is a set; duplicate values are not allowed.

    ", + "refs": { + "AttributeMap$value": null, + "ListAttributeValue$member": null, + "MapAttributeValue$value": null + } + }, + "BinaryAttributeValue": { + "base": null, + "refs": { + "AttributeValue$B": "

    A Binary data type.

    ", + "BinarySetAttributeValue$member": null + } + }, + "BinarySetAttributeValue": { + "base": null, + "refs": { + "AttributeValue$BS": "

    A Binary Set data type.

    " + } + }, + "BooleanAttributeValue": { + "base": null, + "refs": { + "AttributeValue$BOOL": "

    A Boolean data type.

    " + } + }, + "Date": { + "base": null, + "refs": { + "StreamDescription$CreationRequestDateTime": "

    The date and time when the request to create this stream was issued.

    " + } + }, + "DescribeStreamInput": { + "base": "

    Represents the input of a DescribeStream operation.

    ", + "refs": { + } + }, + "DescribeStreamOutput": { + "base": "

    Represents the output of a DescribeStream operation.

    ", + "refs": { + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "ExpiredIteratorException$message": "

    The provided iterator exceeds the maximum age allowed.

    ", + "InternalServerError$message": "

    The server encountered an internal error trying to fulfill the request.

    ", + "LimitExceededException$message": "

    Too many operations for a given subscriber.

    ", + "ResourceNotFoundException$message": "

    The resource which is being requested does not exist.

    ", + "TrimmedDataAccessException$message": "

    \"The data you are trying to access has been trimmed.

    " + } + }, + "ExpiredIteratorException": { + "base": "

    The shard iterator has expired and can no longer be used to retrieve stream records. A shard iterator expires 15 minutes after it is retrieved using the GetShardIterator action.

    ", + "refs": { + } + }, + "GetRecordsInput": { + "base": "

    Represents the input of a GetRecords operation.

    ", + "refs": { + } + }, + "GetRecordsOutput": { + "base": "

    Represents the output of a GetRecords operation.

    ", + "refs": { + } + }, + "GetShardIteratorInput": { + "base": "

    Represents the input of a GetShardIterator operation.

    ", + "refs": { + } + }, + "GetShardIteratorOutput": { + "base": "

    Represents the output of a GetShardIterator operation.

    ", + "refs": { + } + }, + "InternalServerError": { + "base": "

    An error occurred on the server side.

    ", + "refs": { + } + }, + "KeySchema": { + "base": null, + "refs": { + "StreamDescription$KeySchema": "

    The key attribute(s) of the stream's DynamoDB table.

    " + } + }, + "KeySchemaAttributeName": { + "base": null, + "refs": { + "KeySchemaElement$AttributeName": "

    The name of a key attribute.

    " + } + }, + "KeySchemaElement": { + "base": "

    Represents a single element of a key schema. A key schema specifies the attributes that make up the primary key of a table, or the key attributes of an index.

    A KeySchemaElement represents exactly one attribute of the primary key. For example, a hash type primary key would be represented by one KeySchemaElement. A hash-and-range type primary key would require one KeySchemaElement for the hash attribute, and another KeySchemaElement for the range attribute.

    ", + "refs": { + "KeySchema$member": null + } + }, + "KeyType": { + "base": null, + "refs": { + "KeySchemaElement$KeyType": "

    The attribute data, consisting of the data type and the attribute value itself.

    " + } + }, + "LimitExceededException": { + "base": "

    Your request rate is too high. The AWS SDKs for DynamoDB automatically retry requests that receive this exception. Your request is eventually successful, unless your retry queue is too large to finish. Reduce the frequency of requests and use exponential backoff. For more information, go to Error Retries and Exponential Backoff in the Amazon DynamoDB Developer Guide.

    ", + "refs": { + } + }, + "ListAttributeValue": { + "base": null, + "refs": { + "AttributeValue$L": "

    A List data type.

    " + } + }, + "ListStreamsInput": { + "base": "

    Represents the input of a ListStreams operation.

    ", + "refs": { + } + }, + "ListStreamsOutput": { + "base": "

    Represents the output of a ListStreams operation.

    ", + "refs": { + } + }, + "MapAttributeValue": { + "base": null, + "refs": { + "AttributeValue$M": "

    A Map data type.

    " + } + }, + "NullAttributeValue": { + "base": null, + "refs": { + "AttributeValue$NULL": "

    A Null data type.

    " + } + }, + "NumberAttributeValue": { + "base": null, + "refs": { + "AttributeValue$N": "

    A Number data type.

    ", + "NumberSetAttributeValue$member": null + } + }, + "NumberSetAttributeValue": { + "base": null, + "refs": { + "AttributeValue$NS": "

    A Number Set data type.

    " + } + }, + "OperationType": { + "base": null, + "refs": { + "Record$eventName": "

    The type of data modification that was performed on the DynamoDB table:

    • INSERT - a new item was added to the table.

    • MODIFY - one or more of the item's attributes were updated.

    • REMOVE - the item was deleted from the table

    " + } + }, + "PositiveIntegerObject": { + "base": null, + "refs": { + "DescribeStreamInput$Limit": "

    The maximum number of shard objects to return. The upper limit is 100.

    ", + "GetRecordsInput$Limit": "

    The maximum number of records to return from the shard. The upper limit is 1000.

    ", + "ListStreamsInput$Limit": "

    The maximum number of streams to return. The upper limit is 100.

    " + } + }, + "PositiveLongObject": { + "base": null, + "refs": { + "StreamRecord$SizeBytes": "

    The size of the stream record, in bytes.

    " + } + }, + "Record": { + "base": "

    A description of a unique event within a stream.

    ", + "refs": { + "RecordList$member": null + } + }, + "RecordList": { + "base": null, + "refs": { + "GetRecordsOutput$Records": "

    The stream records from the shard, which were retrieved using the shard iterator.

    " + } + }, + "ResourceNotFoundException": { + "base": "

    The operation tried to access a nonexistent stream.

    ", + "refs": { + } + }, + "SequenceNumber": { + "base": null, + "refs": { + "GetShardIteratorInput$SequenceNumber": "

    The sequence number of a stream record in the shard from which to start reading.

    ", + "SequenceNumberRange$StartingSequenceNumber": "

    The first sequence number.

    ", + "SequenceNumberRange$EndingSequenceNumber": "

    The last sequence number.

    ", + "StreamRecord$SequenceNumber": "

    The sequence number of the stream record.

    " + } + }, + "SequenceNumberRange": { + "base": "

    The beginning and ending sequence numbers for the stream records contained within a shard.

    ", + "refs": { + "Shard$SequenceNumberRange": "

    The range of possible sequence numbers for the shard.

    " + } + }, + "Shard": { + "base": "

    A uniquely identified group of stream records within a stream.

    ", + "refs": { + "ShardDescriptionList$member": null + } + }, + "ShardDescriptionList": { + "base": null, + "refs": { + "StreamDescription$Shards": "

    The shards that comprise the stream.

    " + } + }, + "ShardId": { + "base": null, + "refs": { + "DescribeStreamInput$ExclusiveStartShardId": "

    The shard ID of the first item that this operation will evaluate. Use the value that was returned for LastEvaluatedShardId in the previous operation.

    ", + "GetShardIteratorInput$ShardId": "

    The identifier of the shard. The iterator will be returned for this shard ID.

    ", + "Shard$ShardId": "

    The system-generated identifier for this shard.

    ", + "Shard$ParentShardId": "

    The shard ID of the current shard's parent.

    ", + "StreamDescription$LastEvaluatedShardId": "

    The shard ID of the item where the operation stopped, inclusive of the previous result set. Use this value to start a new operation, excluding this value in the new request.

    If LastEvaluatedShardId is empty, then the \"last page\" of results has been processed and there is currently no more data to be retrieved.

    If LastEvaluatedShardId is not empty, it does not necessarily mean that there is more data in the result set. The only way to know when you have reached the end of the result set is when LastEvaluatedShardId is empty.

    " + } + }, + "ShardIterator": { + "base": null, + "refs": { + "GetRecordsInput$ShardIterator": "

    A shard iterator that was retrieved from a previous GetShardIterator operation. This iterator can be used to access the stream records in this shard.

    ", + "GetRecordsOutput$NextShardIterator": "

    The next position in the shard from which to start sequentially reading stream records. If set to null, the shard has been closed and the requested iterator will not return any more data.

    ", + "GetShardIteratorOutput$ShardIterator": "

    The position in the shard from which to start reading stream records sequentially. A shard iterator specifies this position using the sequence number of a stream record in a shard.

    " + } + }, + "ShardIteratorType": { + "base": null, + "refs": { + "GetShardIteratorInput$ShardIteratorType": "

    Determines how the shard iterator is used to start reading stream records from the shard:

    • AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted by a specific sequence number.

    • AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted by a specific sequence number.

    • TRIM_HORIZON - Start reading at the last (untrimmed) stream record, which is the oldest record in the shard. In DynamoDB Streams, there is a 24 hour limit on data retention. Stream records whose age exceeds this limit are subject to removal (trimming) from the stream.

    • LATEST - Start reading just after the most recent stream record in the shard, so that you always read the most recent data in the shard.

    " + } + }, + "Stream": { + "base": "

    Represents all of the data describing a particular stream.

    ", + "refs": { + "StreamList$member": null + } + }, + "StreamArn": { + "base": null, + "refs": { + "DescribeStreamInput$StreamArn": "

    The Amazon Resource Name (ARN) for the stream.

    ", + "GetShardIteratorInput$StreamArn": "

    The Amazon Resource Name (ARN) for the stream.

    ", + "ListStreamsInput$ExclusiveStartStreamArn": "

    The ARN (Amazon Resource Name) of the first item that this operation will evaluate. Use the value that was returned for LastEvaluatedStreamArn in the previous operation.

    ", + "ListStreamsOutput$LastEvaluatedStreamArn": "

    The stream ARN of the item where the operation stopped, inclusive of the previous result set. Use this value to start a new operation, excluding this value in the new request.

    If LastEvaluatedStreamArn is empty, then the \"last page\" of results has been processed and there is no more data to be retrieved.

    If LastEvaluatedStreamArn is not empty, it does not necessarily mean that there is more data in the result set. The only way to know when you have reached the end of the result set is when LastEvaluatedStreamArn is empty.

    ", + "Stream$StreamArn": "

    The Amazon Resource Name (ARN) for the stream.

    ", + "StreamDescription$StreamArn": "

    The Amazon Resource Name (ARN) for the stream.

    " + } + }, + "StreamDescription": { + "base": "

    Represents all of the data describing a particular stream.

    ", + "refs": { + "DescribeStreamOutput$StreamDescription": "

    A complete description of the stream, including its creation date and time, the DynamoDB table associated with the stream, the shard IDs within the stream, and the beginning and ending sequence numbers of stream records within the shards.

    " + } + }, + "StreamList": { + "base": null, + "refs": { + "ListStreamsOutput$Streams": "

    A list of stream descriptors associated with the current account and endpoint.

    " + } + }, + "StreamRecord": { + "base": "

    A description of a single data modification that was performed on an item in a DynamoDB table.

    ", + "refs": { + "Record$dynamodb": "

    The main body of the stream record, containing all of the DynamoDB-specific fields.

    " + } + }, + "StreamStatus": { + "base": null, + "refs": { + "StreamDescription$StreamStatus": "

    Indicates the current status of the stream:

    • ENABLING - Streams is currently being enabled on the DynamoDB table.

    • ENABLING - the stream is enabled.

    • DISABLING - Streams is currently being disabled on the DynamoDB table.

    • DISABLED - the stream is disabled.

    " + } + }, + "StreamViewType": { + "base": null, + "refs": { + "StreamDescription$StreamViewType": "

    Indicates the format of the records within this stream:

    • KEYS_ONLY - only the key attributes of items that were modified in the DynamoDB table.

    • NEW_IMAGE - entire item from the table, as it appeared after they were modified.

    • OLD_IMAGE - entire item from the table, as it appeared before they were modified.

    • NEW_AND_OLD_IMAGES - both the new and the old images of the items from the table.

    ", + "StreamRecord$StreamViewType": "

    The type of data from the modified DynamoDB item that was captured in this stream record:

    • KEYS_ONLY - only the key attributes of the modified item.

    • NEW_IMAGE - the entire item, as it appears after it was modified.

    • OLD_IMAGE - the entire item, as it appeared before it was modified.

    • NEW_AND_OLD_IMAGES — both the new and the old item images of the item.

    " + } + }, + "String": { + "base": null, + "refs": { + "Record$eventID": "

    A globally unique identifier for the event that was recorded in this stream record.

    ", + "Record$eventVersion": "

    The version number of the stream record format. Currently, this is 1.0.

    ", + "Record$eventSource": "

    The AWS service from which the stream record originated. For DynamoDB Streams, this is aws:dynamodb.

    ", + "Record$awsRegion": "

    The region in which the GetRecords request was received.

    ", + "Stream$StreamLabel": "

    A timestamp, in ISO 8601 format, for this stream.

    Note that LatestStreamLabel is not a unique identifier for the stream, because it is possible that a stream from another table might have the same timestamp. However, the combination of the following three elements is guaranteed to be unique:

    • the AWS customer ID.

    • the table name

    • the StreamLabel

    ", + "StreamDescription$StreamLabel": "

    A timestamp, in ISO 8601 format, for this stream.

    Note that LatestStreamLabel is not a unique identifier for the stream, because it is possible that a stream from another table might have the same timestamp. However, the combination of the following three elements is guaranteed to be unique:

    • the AWS customer ID.

    • the table name

    • the StreamLabel

    " + } + }, + "StringAttributeValue": { + "base": null, + "refs": { + "AttributeValue$S": "

    A String data type.

    ", + "StringSetAttributeValue$member": null + } + }, + "StringSetAttributeValue": { + "base": null, + "refs": { + "AttributeValue$SS": "

    A String Set data type.

    " + } + }, + "TableName": { + "base": null, + "refs": { + "ListStreamsInput$TableName": "

    If this parameter is provided, then only the streams associated with this table name are returned.

    ", + "Stream$TableName": "

    The DynamoDB table with which the stream is associated.

    ", + "StreamDescription$TableName": "

    The DynamoDB table with which the stream is associated.

    " + } + }, + "TrimmedDataAccessException": { + "base": "

    The operation attempted to read past the oldest stream record in a shard.

    In DynamoDB Streams, there is a 24 hour limit on data retention. Stream records whose age exceeds this limit are subject to removal (trimming) from the stream. You might receive a TrimmedDataAccessException if:

    • You request a shard iterator with a sequence number older than the trim point (24 hours).
    • You obtain a shard iterator, but before you use the iterator in a GetRecords request, a stream record in the shard exceeds the 24 hour period and is trimmed. This causes the iterator to access a record that no longer exists.
    ", + "refs": { + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sts/2011-06-15/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sts/2011-06-15/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sts/2011-06-15/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sts/2011-06-15/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,493 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2011-06-15", + "endpointPrefix":"sts", + "globalEndpoint":"sts.amazonaws.com", + "protocol":"query", + "serviceAbbreviation":"AWS STS", + "serviceFullName":"AWS Security Token Service", + "signatureVersion":"v4", + "xmlNamespace":"https://sts.amazonaws.com/doc/2011-06-15/" + }, + "operations":{ + "AssumeRole":{ + "name":"AssumeRole", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssumeRoleRequest"}, + "output":{ + "shape":"AssumeRoleResponse", + "resultWrapper":"AssumeRoleResult" + }, + "errors":[ + {"shape":"MalformedPolicyDocumentException"}, + {"shape":"PackedPolicyTooLargeException"}, + {"shape":"RegionDisabledException"} + ] + }, + "AssumeRoleWithSAML":{ + "name":"AssumeRoleWithSAML", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssumeRoleWithSAMLRequest"}, + "output":{ + "shape":"AssumeRoleWithSAMLResponse", + "resultWrapper":"AssumeRoleWithSAMLResult" + }, + "errors":[ + {"shape":"MalformedPolicyDocumentException"}, + {"shape":"PackedPolicyTooLargeException"}, + {"shape":"IDPRejectedClaimException"}, + {"shape":"InvalidIdentityTokenException"}, + {"shape":"ExpiredTokenException"}, + {"shape":"RegionDisabledException"} + ] + }, + "AssumeRoleWithWebIdentity":{ + "name":"AssumeRoleWithWebIdentity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssumeRoleWithWebIdentityRequest"}, + "output":{ + "shape":"AssumeRoleWithWebIdentityResponse", + "resultWrapper":"AssumeRoleWithWebIdentityResult" + }, + "errors":[ + {"shape":"MalformedPolicyDocumentException"}, + {"shape":"PackedPolicyTooLargeException"}, + {"shape":"IDPRejectedClaimException"}, + {"shape":"IDPCommunicationErrorException"}, + {"shape":"InvalidIdentityTokenException"}, + {"shape":"ExpiredTokenException"}, + {"shape":"RegionDisabledException"} + ] + }, + "DecodeAuthorizationMessage":{ + "name":"DecodeAuthorizationMessage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DecodeAuthorizationMessageRequest"}, + "output":{ + "shape":"DecodeAuthorizationMessageResponse", + "resultWrapper":"DecodeAuthorizationMessageResult" + }, + "errors":[ + {"shape":"InvalidAuthorizationMessageException"} + ] + }, + "GetFederationToken":{ + "name":"GetFederationToken", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetFederationTokenRequest"}, + "output":{ + "shape":"GetFederationTokenResponse", + "resultWrapper":"GetFederationTokenResult" + }, + "errors":[ + {"shape":"MalformedPolicyDocumentException"}, + {"shape":"PackedPolicyTooLargeException"}, + {"shape":"RegionDisabledException"} + ] + }, + "GetSessionToken":{ + "name":"GetSessionToken", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSessionTokenRequest"}, + "output":{ + "shape":"GetSessionTokenResponse", + "resultWrapper":"GetSessionTokenResult" + }, + "errors":[ + {"shape":"RegionDisabledException"} + ] + } + }, + "shapes":{ + "AssumeRoleRequest":{ + "type":"structure", + "required":[ + "RoleArn", + "RoleSessionName" + ], + "members":{ + "RoleArn":{"shape":"arnType"}, + "RoleSessionName":{"shape":"roleSessionNameType"}, + "Policy":{"shape":"sessionPolicyDocumentType"}, + "DurationSeconds":{"shape":"roleDurationSecondsType"}, + "ExternalId":{"shape":"externalIdType"}, + "SerialNumber":{"shape":"serialNumberType"}, + "TokenCode":{"shape":"tokenCodeType"} + } + }, + "AssumeRoleResponse":{ + "type":"structure", + "members":{ + "Credentials":{"shape":"Credentials"}, + "AssumedRoleUser":{"shape":"AssumedRoleUser"}, + "PackedPolicySize":{"shape":"nonNegativeIntegerType"} + } + }, + "AssumeRoleWithSAMLRequest":{ + "type":"structure", + "required":[ + "RoleArn", + "PrincipalArn", + "SAMLAssertion" + ], + "members":{ + "RoleArn":{"shape":"arnType"}, + "PrincipalArn":{"shape":"arnType"}, + "SAMLAssertion":{"shape":"SAMLAssertionType"}, + "Policy":{"shape":"sessionPolicyDocumentType"}, + "DurationSeconds":{"shape":"roleDurationSecondsType"} + } + }, + "AssumeRoleWithSAMLResponse":{ + "type":"structure", + "members":{ + "Credentials":{"shape":"Credentials"}, + "AssumedRoleUser":{"shape":"AssumedRoleUser"}, + "PackedPolicySize":{"shape":"nonNegativeIntegerType"}, + "Subject":{"shape":"Subject"}, + "SubjectType":{"shape":"SubjectType"}, + "Issuer":{"shape":"Issuer"}, + "Audience":{"shape":"Audience"}, + "NameQualifier":{"shape":"NameQualifier"} + } + }, + "AssumeRoleWithWebIdentityRequest":{ + "type":"structure", + "required":[ + "RoleArn", + "RoleSessionName", + "WebIdentityToken" + ], + "members":{ + "RoleArn":{"shape":"arnType"}, + "RoleSessionName":{"shape":"roleSessionNameType"}, + "WebIdentityToken":{"shape":"clientTokenType"}, + "ProviderId":{"shape":"urlType"}, + "Policy":{"shape":"sessionPolicyDocumentType"}, + "DurationSeconds":{"shape":"roleDurationSecondsType"} + } + }, + "AssumeRoleWithWebIdentityResponse":{ + "type":"structure", + "members":{ + "Credentials":{"shape":"Credentials"}, + "SubjectFromWebIdentityToken":{"shape":"webIdentitySubjectType"}, + "AssumedRoleUser":{"shape":"AssumedRoleUser"}, + "PackedPolicySize":{"shape":"nonNegativeIntegerType"}, + "Provider":{"shape":"Issuer"}, + "Audience":{"shape":"Audience"} + } + }, + "AssumedRoleUser":{ + "type":"structure", + "required":[ + "AssumedRoleId", + "Arn" + ], + "members":{ + "AssumedRoleId":{"shape":"assumedRoleIdType"}, + "Arn":{"shape":"arnType"} + } + }, + "Audience":{"type":"string"}, + "Credentials":{ + "type":"structure", + "required":[ + "AccessKeyId", + "SecretAccessKey", + "SessionToken", + "Expiration" + ], + "members":{ + "AccessKeyId":{"shape":"accessKeyIdType"}, + "SecretAccessKey":{"shape":"accessKeySecretType"}, + "SessionToken":{"shape":"tokenType"}, + "Expiration":{"shape":"dateType"} + } + }, + "DecodeAuthorizationMessageRequest":{ + "type":"structure", + "required":["EncodedMessage"], + "members":{ + "EncodedMessage":{"shape":"encodedMessageType"} + } + }, + "DecodeAuthorizationMessageResponse":{ + "type":"structure", + "members":{ + "DecodedMessage":{"shape":"decodedMessageType"} + } + }, + "ExpiredTokenException":{ + "type":"structure", + "members":{ + "message":{"shape":"expiredIdentityTokenMessage"} + }, + "error":{ + "code":"ExpiredTokenException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "FederatedUser":{ + "type":"structure", + "required":[ + "FederatedUserId", + "Arn" + ], + "members":{ + "FederatedUserId":{"shape":"federatedIdType"}, + "Arn":{"shape":"arnType"} + } + }, + "GetFederationTokenRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"userNameType"}, + "Policy":{"shape":"sessionPolicyDocumentType"}, + "DurationSeconds":{"shape":"durationSecondsType"} + } + }, + "GetFederationTokenResponse":{ + "type":"structure", + "members":{ + "Credentials":{"shape":"Credentials"}, + "FederatedUser":{"shape":"FederatedUser"}, + "PackedPolicySize":{"shape":"nonNegativeIntegerType"} + } + }, + "GetSessionTokenRequest":{ + "type":"structure", + "members":{ + "DurationSeconds":{"shape":"durationSecondsType"}, + "SerialNumber":{"shape":"serialNumberType"}, + "TokenCode":{"shape":"tokenCodeType"} + } + }, + "GetSessionTokenResponse":{ + "type":"structure", + "members":{ + "Credentials":{"shape":"Credentials"} + } + }, + "IDPCommunicationErrorException":{ + "type":"structure", + "members":{ + "message":{"shape":"idpCommunicationErrorMessage"} + }, + "error":{ + "code":"IDPCommunicationError", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "IDPRejectedClaimException":{ + "type":"structure", + "members":{ + "message":{"shape":"idpRejectedClaimMessage"} + }, + "error":{ + "code":"IDPRejectedClaim", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "InvalidAuthorizationMessageException":{ + "type":"structure", + "members":{ + "message":{"shape":"invalidAuthorizationMessage"} + }, + "error":{ + "code":"InvalidAuthorizationMessageException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidIdentityTokenException":{ + "type":"structure", + "members":{ + "message":{"shape":"invalidIdentityTokenMessage"} + }, + "error":{ + "code":"InvalidIdentityToken", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Issuer":{"type":"string"}, + "MalformedPolicyDocumentException":{ + "type":"structure", + "members":{ + "message":{"shape":"malformedPolicyDocumentMessage"} + }, + "error":{ + "code":"MalformedPolicyDocument", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "NameQualifier":{"type":"string"}, + "PackedPolicyTooLargeException":{ + "type":"structure", + "members":{ + "message":{"shape":"packedPolicyTooLargeMessage"} + }, + "error":{ + "code":"PackedPolicyTooLarge", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "RegionDisabledException":{ + "type":"structure", + "members":{ + "message":{"shape":"regionDisabledMessage"} + }, + "error":{ + "code":"RegionDisabledException", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "SAMLAssertionType":{ + "type":"string", + "max":50000, + "min":4 + }, + "Subject":{"type":"string"}, + "SubjectType":{"type":"string"}, + "accessKeyIdType":{ + "type":"string", + "max":32, + "min":16, + "pattern":"[\\w]*" + }, + "accessKeySecretType":{"type":"string"}, + "arnType":{ + "type":"string", + "max":2048, + "min":20 + }, + "assumedRoleIdType":{ + "type":"string", + "max":96, + "min":2, + "pattern":"[\\w+=,.@:-]*" + }, + "clientTokenType":{ + "type":"string", + "max":2048, + "min":4 + }, + "dateType":{"type":"timestamp"}, + "decodedMessageType":{"type":"string"}, + "durationSecondsType":{ + "type":"integer", + "max":129600, + "min":900 + }, + "encodedMessageType":{ + "type":"string", + "max":10240, + "min":1 + }, + "expiredIdentityTokenMessage":{"type":"string"}, + "externalIdType":{ + "type":"string", + "max":1224, + "min":2, + "pattern":"[\\w+=,.@:\\/-]*" + }, + "federatedIdType":{ + "type":"string", + "max":96, + "min":2, + "pattern":"[\\w+=,.@\\:-]*" + }, + "idpCommunicationErrorMessage":{"type":"string"}, + "idpRejectedClaimMessage":{"type":"string"}, + "invalidAuthorizationMessage":{"type":"string"}, + "invalidIdentityTokenMessage":{"type":"string"}, + "malformedPolicyDocumentMessage":{"type":"string"}, + "nonNegativeIntegerType":{ + "type":"integer", + "min":0 + }, + "packedPolicyTooLargeMessage":{"type":"string"}, + "regionDisabledMessage":{"type":"string"}, + "roleDurationSecondsType":{ + "type":"integer", + "max":3600, + "min":900 + }, + "roleSessionNameType":{ + "type":"string", + "max":64, + "min":2, + "pattern":"[\\w+=,.@-]*" + }, + "serialNumberType":{ + "type":"string", + "max":256, + "min":9, + "pattern":"[\\w+=/:,.@-]*" + }, + "sessionPolicyDocumentType":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+" + }, + "tokenCodeType":{ + "type":"string", + "max":6, + "min":6, + "pattern":"[\\d]*" + }, + "tokenType":{"type":"string"}, + "urlType":{ + "type":"string", + "max":2048, + "min":4 + }, + "userNameType":{ + "type":"string", + "max":32, + "min":2, + "pattern":"[\\w+=,.@-]*" + }, + "webIdentitySubjectType":{ + "type":"string", + "max":255, + "min":6 + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sts/2011-06-15/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sts/2011-06-15/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sts/2011-06-15/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sts/2011-06-15/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,367 @@ +{ + "version": "2.0", + "service": "AWS Security Token Service

    The AWS Security Token Service (STS) is a web service that enables you to request temporary, limited-privilege credentials for AWS Identity and Access Management (IAM) users or for users that you authenticate (federated users). This guide provides descriptions of the STS API. For more detailed information about using this service, go to Temporary Security Credentials.

    As an alternative to using the API, you can use one of the AWS SDKs, which consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient way to create programmatic access to STS. For example, the SDKs take care of cryptographically signing requests, managing errors, and retrying requests automatically. For information about the AWS SDKs, including how to download and install them, see the Tools for Amazon Web Services page.

    For information about setting up signatures and authorization through the API, go to Signing AWS API Requests in the AWS General Reference. For general information about the Query API, go to Making Query Requests in Using IAM. For information about using security tokens with other AWS products, go to AWS Services That Work with IAM in the Using IAM.

    If you're new to AWS and need additional technical information about a specific AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/.

    Endpoints

    The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com that maps to the US East (N. Virginia) region. Additional regions are available, but must first be activated in the AWS Management Console before you can use a different region's endpoint. For more information about activating a region for STS see Activating STS in a New Region in the Using IAM.

    For information about STS endpoints, see Regions and Endpoints in the AWS General Reference.

    Recording API requests

    STS supports AWS CloudTrail, which is a service that records AWS calls for your AWS account and delivers log files to an Amazon S3 bucket. By using information collected by CloudTrail, you can determine what requests were successfully made to STS, who made the request, when it was made, and so on. To learn more about CloudTrail, including how to turn it on and find your log files, see the AWS CloudTrail User Guide.

    ", + "operations": { + "AssumeRole": "

    Returns a set of temporary security credentials (consisting of an access key ID, a secret access key, and a security token) that you can use to access AWS resources that you might not normally have access to. Typically, you use AssumeRole for cross-account access or federation.

    Important: You cannot call AssumeRole by using AWS account credentials; access will be denied. You must use IAM user credentials or temporary security credentials to call AssumeRole.

    For cross-account access, imagine that you own multiple accounts and need to access resources in each account. You could create long-term credentials in each account to access those resources. However, managing all those credentials and remembering which one can access which account can be time consuming. Instead, you can create one set of long-term credentials in one account and then use temporary security credentials to access all the other accounts by assuming roles in those accounts. For more information about roles, see IAM Roles (Delegation and Federation) in the Using IAM.

    For federation, you can, for example, grant single sign-on access to the AWS Management Console. If you already have an identity and authentication system in your corporate network, you don't have to recreate user identities in AWS in order to grant those user identities access to AWS. Instead, after a user has been authenticated, you call AssumeRole (and specify the role with the appropriate permissions) to get temporary security credentials for that user. With those temporary security credentials, you construct a sign-in URL that users can use to access the console. For more information, see Common Scenarios for Temporary Credentials in the Using IAM.

    The temporary security credentials are valid for the duration that you specified when calling AssumeRole, which can be from 900 seconds (15 minutes) to 3600 seconds (1 hour). The default is 1 hour.

    Optionally, you can pass an IAM access policy to this operation. If you choose not to pass a policy, the temporary security credentials that are returned by the operation have the permissions that are defined in the access policy of the role that is being assumed. If you pass a policy to this operation, the temporary security credentials that are returned by the operation have the permissions that are allowed by both the access policy of the role that is being assumed, and the policy that you pass. This gives you a way to further restrict the permissions for the resulting temporary security credentials. You cannot use the passed policy to grant permissions that are in excess of those allowed by the access policy of the role that is being assumed. For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity in the Using IAM.

    To assume a role, your AWS account must be trusted by the role. The trust relationship is defined in the role's trust policy when the role is created. You must also have a policy that allows you to call sts:AssumeRole.

    Using MFA with AssumeRole

    You can optionally include multi-factor authentication (MFA) information when you call AssumeRole. This is useful for cross-account scenarios in which you want to make sure that the user who is assuming the role has been authenticated using an AWS MFA device. In that scenario, the trust policy of the role being assumed includes a condition that tests for MFA authentication; if the caller does not include valid MFA information, the request to assume the role is denied. The condition in a trust policy that tests for MFA authentication might look like the following example.

    \"Condition\": {\"Bool\": {\"aws:MultiFactorAuthPresent\": true}}

    For more information, see Configuring MFA-Protected API Access in the Using IAM guide.

    To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode parameters. The SerialNumber value identifies the user's hardware or virtual MFA device. The TokenCode is the time-based one-time password (TOTP) that the MFA devices produces.

    ", + "AssumeRoleWithSAML": "

    Returns a set of temporary security credentials for users who have been authenticated via a SAML authentication response. This operation provides a mechanism for tying an enterprise identity store or directory to role-based AWS access without user-specific credentials or configuration.

    The temporary security credentials returned by this operation consist of an access key ID, a secret access key, and a security token. Applications can use these temporary security credentials to sign calls to AWS services. The credentials are valid for the duration that you specified when calling AssumeRoleWithSAML, which can be up to 3600 seconds (1 hour) or until the time specified in the SAML authentication response's SessionNotOnOrAfter value, whichever is shorter.

    The maximum duration for a session is 1 hour, and the minimum duration is 15 minutes, even if values outside this range are specified.

    Optionally, you can pass an IAM access policy to this operation. If you choose not to pass a policy, the temporary security credentials that are returned by the operation have the permissions that are defined in the access policy of the role that is being assumed. If you pass a policy to this operation, the temporary security credentials that are returned by the operation have the permissions that are allowed by both the access policy of the role that is being assumed, and the policy that you pass. This gives you a way to further restrict the permissions for the resulting temporary security credentials. You cannot use the passed policy to grant permissions that are in excess of those allowed by the access policy of the role that is being assumed. For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity in the Using IAM.

    Before your application can call AssumeRoleWithSAML, you must configure your SAML identity provider (IdP) to issue the claims required by AWS. Additionally, you must use AWS Identity and Access Management (IAM) to create a SAML provider entity in your AWS account that represents your identity provider, and create an IAM role that specifies this SAML provider in its trust policy.

    Calling AssumeRoleWithSAML does not require the use of AWS security credentials. The identity of the caller is validated by using keys in the metadata document that is uploaded for the SAML provider entity for your identity provider.

    For more information, see the following resources:

    ", + "AssumeRoleWithWebIdentity": "

    Returns a set of temporary security credentials for users who have been authenticated in a mobile or web application with a web identity provider, such as Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible identity provider.

    For mobile applications, we recommend that you use Amazon Cognito. You can use Amazon Cognito with the AWS SDK for iOS and the AWS SDK for Android to uniquely identify a user and supply the user with a consistent identity throughout the lifetime of an application.

    To learn more about Amazon Cognito, see Amazon Cognito Overview in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview in the AWS SDK for iOS Developer Guide.

    Calling AssumeRoleWithWebIdentity does not require the use of AWS security credentials. Therefore, you can distribute an application (for example, on mobile devices) that requests temporary security credentials without including long-term AWS credentials in the application, and without deploying server-based proxy services that use long-term AWS credentials. Instead, the identity of the caller is validated by using a token from the web identity provider.

    The temporary security credentials returned by this API consist of an access key ID, a secret access key, and a security token. Applications can use these temporary security credentials to sign calls to AWS service APIs. The credentials are valid for the duration that you specified when calling AssumeRoleWithWebIdentity, which can be from 900 seconds (15 minutes) to 3600 seconds (1 hour). By default, the temporary security credentials are valid for 1 hour.

    Optionally, you can pass an IAM access policy to this operation. If you choose not to pass a policy, the temporary security credentials that are returned by the operation have the permissions that are defined in the access policy of the role that is being assumed. If you pass a policy to this operation, the temporary security credentials that are returned by the operation have the permissions that are allowed by both the access policy of the role that is being assumed, and the policy that you pass. This gives you a way to further restrict the permissions for the resulting temporary security credentials. You cannot use the passed policy to grant permissions that are in excess of those allowed by the access policy of the role that is being assumed. For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity in the Using IAM.

    Before your application can call AssumeRoleWithWebIdentity, you must have an identity token from a supported identity provider and create a role that the application can assume. The role that your application assumes must trust the identity provider that is associated with the identity token. In other words, the identity provider must be specified in the role's trust policy.

    For more information about how to use web identity federation and the AssumeRoleWithWebIdentity API, see the following resources:

    ", + "DecodeAuthorizationMessage": "

    Decodes additional information about the authorization status of a request from an encoded message returned in response to an AWS request.

    For example, if a user is not authorized to perform an action that he or she has requested, the request returns a Client.UnauthorizedOperation response (an HTTP 403 response). Some AWS actions additionally return an encoded message that can provide details about this authorization failure.

    Only certain AWS actions return an encoded authorization message. The documentation for an individual action indicates whether that action returns an encoded message in addition to returning an HTTP code.

    The message is encoded because the details of the authorization status can constitute privileged information that the user who requested the action should not see. To decode an authorization status message, a user must be granted permissions via an IAM policy to request the DecodeAuthorizationMessage (sts:DecodeAuthorizationMessage) action.

    The decoded message includes the following type of information:

    • Whether the request was denied due to an explicit deny or due to the absence of an explicit allow. For more information, see Determining Whether a Request is Allowed or Denied in the Using IAM.
    • The principal who made the request.
    • The requested action.
    • The requested resource.
    • The values of condition keys in the context of the user's request.
    ", + "GetFederationToken": "

    Returns a set of temporary security credentials (consisting of an access key ID, a secret access key, and a security token) for a federated user. A typical use is in a proxy application that gets temporary security credentials on behalf of distributed applications inside a corporate network. Because you must call the GetFederationToken action using the long-term security credentials of an IAM user, this call is appropriate in contexts where those credentials can be safely stored, usually in a server-based application.

    If you are creating a mobile-based or browser-based app that can authenticate users using a web identity provider like Login with Amazon, Facebook, Google, or an OpenID Connect-compatible identity provider, we recommend that you use Amazon Cognito or AssumeRoleWithWebIdentity. For more information, see Federation Through a Web-based Identity Provider.

    The GetFederationToken action must be called by using the long-term AWS security credentials of an IAM user. You can also call GetFederationToken using the security credentials of an AWS account (root), but this is not recommended. Instead, we recommend that you create an IAM user for the purpose of the proxy application and then attach a policy to the IAM user that limits federated users to only the actions and resources they need access to. For more information, see IAM Best Practices in the Using IAM.

    The temporary security credentials that are obtained by using the long-term credentials of an IAM user are valid for the specified duration, between 900 seconds (15 minutes) and 129600 seconds (36 hours). Temporary credentials that are obtained by using AWS account (root) credentials have a maximum duration of 3600 seconds (1 hour)

    Permissions

    The permissions for the temporary security credentials returned by GetFederationToken are determined by a combination of the following:

    • The policy or policies that are attached to the IAM user whose credentials are used to call GetFederationToken.
    • The policy that is passed as a parameter in the call.

    The passed policy is attached to the temporary security credentials that result from the GetFederationToken API call--that is, to the federated user. When the federated user makes an AWS request, AWS evaluates the policy attached to the federated user in combination with the policy or policies attached to the IAM user whose credentials were used to call GetFederationToken. AWS allows the federated user's request only when both the federated user and the IAM user are explicitly allowed to perform the requested action. The passed policy cannot grant more permissions than those that are defined in the IAM user policy.

    A typical use case is that the permissions of the IAM user whose credentials are used to call GetFederationToken are designed to allow access to all the actions and resources that any federated user will need. Then, for individual users, you pass a policy to the operation that scopes down the permissions to a level that's appropriate to that individual user, using a policy that allows only a subset of permissions that are granted to the IAM user.

    If you do not pass a policy, the resulting temporary security credentials have no effective permissions. The only exception is when the temporary security credentials are used to access a resource that has a resource-based policy that specifically allows the federated user to access the resource.

    For more information about how permissions work, see Permissions for GetFederationToken. For information about using GetFederationToken to create temporary security credentials, see GetFederationToken—Federation Through a Custom Identity Broker.

    ", + "GetSessionToken": "

    Returns a set of temporary credentials for an AWS account or IAM user. The credentials consist of an access key ID, a secret access key, and a security token. Typically, you use GetSessionToken if you want to use MFA to protect programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled IAM users would need to call GetSessionToken and submit an MFA code that is associated with their MFA device. Using the temporary security credentials that are returned from the call, IAM users can then make programmatic calls to APIs that require MFA authentication. If you do not supply a correct MFA code, then the API returns an access denied error.

    The GetSessionToken action must be called by using the long-term AWS security credentials of the AWS account or an IAM user. Credentials that are created by IAM users are valid for the duration that you specify, between 900 seconds (15 minutes) and 129600 seconds (36 hours); credentials that are created by using account credentials have a maximum duration of 3600 seconds (1 hour).

    We recommend that you do not call GetSessionToken with root account credentials. Instead, follow our best practices by creating one or more IAM users, giving them the necessary permissions, and using IAM users for everyday interaction with AWS.

    The permissions associated with the temporary security credentials returned by GetSessionToken are based on the permissions associated with account or IAM user whose credentials are used to call the action. If GetSessionToken is called using root account credentials, the temporary credentials have root account permissions. Similarly, if GetSessionToken is called using the credentials of an IAM user, the temporary credentials have the same permissions as the IAM user.

    For more information about using GetSessionToken to create temporary credentials, go to Temporary Credentials for Users in Untrusted Environments in the Using IAM.

    " + }, + "shapes": { + "AssumeRoleRequest": { + "base": null, + "refs": { + } + }, + "AssumeRoleResponse": { + "base": "

    Contains the response to a successful AssumeRole request, including temporary AWS credentials that can be used to make AWS requests.

    ", + "refs": { + } + }, + "AssumeRoleWithSAMLRequest": { + "base": null, + "refs": { + } + }, + "AssumeRoleWithSAMLResponse": { + "base": "

    Contains the response to a successful AssumeRoleWithSAML request, including temporary AWS credentials that can be used to make AWS requests.

    ", + "refs": { + } + }, + "AssumeRoleWithWebIdentityRequest": { + "base": null, + "refs": { + } + }, + "AssumeRoleWithWebIdentityResponse": { + "base": "

    Contains the response to a successful AssumeRoleWithWebIdentity request, including temporary AWS credentials that can be used to make AWS requests.

    ", + "refs": { + } + }, + "AssumedRoleUser": { + "base": "

    The identifiers for the temporary security credentials that the operation returns.

    ", + "refs": { + "AssumeRoleResponse$AssumedRoleUser": "

    The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers that you can use to refer to the resulting temporary security credentials. For example, you can reference these credentials as a principal in a resource-based policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName that you specified when you called AssumeRole.

    ", + "AssumeRoleWithSAMLResponse$AssumedRoleUser": null, + "AssumeRoleWithWebIdentityResponse$AssumedRoleUser": "

    The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers that you can use to refer to the resulting temporary security credentials. For example, you can reference these credentials as a principal in a resource-based policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName that you specified when you called AssumeRole.

    " + } + }, + "Audience": { + "base": null, + "refs": { + "AssumeRoleWithSAMLResponse$Audience": "

    The value of the Recipient attribute of the SubjectConfirmationData element of the SAML assertion.

    ", + "AssumeRoleWithWebIdentityResponse$Audience": "

    The intended audience (also known as client ID) of the web identity token. This is traditionally the client identifier issued to the application that requested the web identity token.

    " + } + }, + "Credentials": { + "base": "

    AWS credentials for API authentication.

    ", + "refs": { + "AssumeRoleResponse$Credentials": "

    The temporary security credentials, which include an access key ID, a secret access key, and a security (or session) token.

    Note: The size of the security token that STS APIs return is not fixed. We strongly recommend that you make no assumptions about the maximum size. As of this writing, the typical size is less than 4096 bytes, but that can vary. Also, future updates to AWS might require larger sizes.

    ", + "AssumeRoleWithSAMLResponse$Credentials": "

    The temporary security credentials, which include an access key ID, a secret access key, and a security (or session) token.

    Note: The size of the security token that STS APIs return is not fixed. We strongly recommend that you make no assumptions about the maximum size. As of this writing, the typical size is less than 4096 bytes, but that can vary. Also, future updates to AWS might require larger sizes.

    ", + "AssumeRoleWithWebIdentityResponse$Credentials": "

    The temporary security credentials, which include an access key ID, a secret access key, and a security token.

    Note: The size of the security token that STS APIs return is not fixed. We strongly recommend that you make no assumptions about the maximum size. As of this writing, the typical size is less than 4096 bytes, but that can vary. Also, future updates to AWS might require larger sizes.

    ", + "GetFederationTokenResponse$Credentials": "

    The temporary security credentials, which include an access key ID, a secret access key, and a security (or session) token.

    Note: The size of the security token that STS APIs return is not fixed. We strongly recommend that you make no assumptions about the maximum size. As of this writing, the typical size is less than 4096 bytes, but that can vary. Also, future updates to AWS might require larger sizes.

    ", + "GetSessionTokenResponse$Credentials": "

    The temporary security credentials, which include an access key ID, a secret access key, and a security (or session) token.

    Note: The size of the security token that STS APIs return is not fixed. We strongly recommend that you make no assumptions about the maximum size. As of this writing, the typical size is less than 4096 bytes, but that can vary. Also, future updates to AWS might require larger sizes.

    " + } + }, + "DecodeAuthorizationMessageRequest": { + "base": null, + "refs": { + } + }, + "DecodeAuthorizationMessageResponse": { + "base": "

    A document that contains additional information about the authorization status of a request from an encoded message that is returned in response to an AWS request.

    ", + "refs": { + } + }, + "ExpiredTokenException": { + "base": "

    The web identity token that was passed is expired or is not valid. Get a new identity token from the identity provider and then retry the request.

    ", + "refs": { + } + }, + "FederatedUser": { + "base": "

    Identifiers for the federated user that is associated with the credentials.

    ", + "refs": { + "GetFederationTokenResponse$FederatedUser": "

    Identifiers for the federated user associated with the credentials (such as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You can use the federated user's ARN in your resource-based policies, such as an Amazon S3 bucket policy.

    " + } + }, + "GetFederationTokenRequest": { + "base": null, + "refs": { + } + }, + "GetFederationTokenResponse": { + "base": "

    Contains the response to a successful GetFederationToken request, including temporary AWS credentials that can be used to make AWS requests.

    ", + "refs": { + } + }, + "GetSessionTokenRequest": { + "base": null, + "refs": { + } + }, + "GetSessionTokenResponse": { + "base": "

    Contains the response to a successful GetSessionToken request, including temporary AWS credentials that can be used to make AWS requests.

    ", + "refs": { + } + }, + "IDPCommunicationErrorException": { + "base": "

    The request could not be fulfilled because the non-AWS identity provider (IDP) that was asked to verify the incoming identity token could not be reached. This is often a transient error caused by network conditions. Retry the request a limited number of times so that you don't exceed the request rate. If the error persists, the non-AWS identity provider might be down or not responding.

    ", + "refs": { + } + }, + "IDPRejectedClaimException": { + "base": "

    The identity provider (IdP) reported that authentication failed. This might be because the claim is invalid.

    If this error is returned for the AssumeRoleWithWebIdentity operation, it can also mean that the claim has expired or has been explicitly revoked.

    ", + "refs": { + } + }, + "InvalidAuthorizationMessageException": { + "base": "

    The error returned if the message passed to DecodeAuthorizationMessage was invalid. This can happen if the token contains invalid characters, such as linebreaks.

    ", + "refs": { + } + }, + "InvalidIdentityTokenException": { + "base": "

    The web identity token that was passed could not be validated by AWS. Get a new identity token from the identity provider and then retry the request.

    ", + "refs": { + } + }, + "Issuer": { + "base": null, + "refs": { + "AssumeRoleWithSAMLResponse$Issuer": "

    The value of the Issuer element of the SAML assertion.

    ", + "AssumeRoleWithWebIdentityResponse$Provider": "

    The issuing authority of the web identity token presented. For OpenID Connect ID Tokens this contains the value of the iss field. For OAuth 2.0 access tokens, this contains the value of the ProviderId parameter that was passed in the AssumeRoleWithWebIdentity request.

    " + } + }, + "MalformedPolicyDocumentException": { + "base": "

    The request was rejected because the policy document was malformed. The error message describes the specific error.

    ", + "refs": { + } + }, + "NameQualifier": { + "base": null, + "refs": { + "AssumeRoleWithSAMLResponse$NameQualifier": "

    A hash value based on the concatenation of the Issuer response value, the AWS account ID, and the friendly name (the last part of the ARN) of the SAML provider in IAM. The combination of NameQualifier and Subject can be used to uniquely identify a federated user.

    The following pseudocode shows how the hash value is calculated:

    BASE64 ( SHA1 ( \"https://example.com/saml\" + \"123456789012\" + \"/MySAMLIdP\" ) )

    " + } + }, + "PackedPolicyTooLargeException": { + "base": "

    The request was rejected because the policy document was too large. The error message describes how big the policy document is, in packed form, as a percentage of what the API allows.

    ", + "refs": { + } + }, + "RegionDisabledException": { + "base": "

    STS is not activated in the requested region for the account that is being asked to create temporary credentials. The account administrator must activate STS in that region using the IAM Console. For more information, see Activating and Deactivating AWS STS in an AWS Region in the Using IAM.

    ", + "refs": { + } + }, + "SAMLAssertionType": { + "base": null, + "refs": { + "AssumeRoleWithSAMLRequest$SAMLAssertion": "

    The base-64 encoded SAML authentication response provided by the IdP.

    For more information, see Configuring a Relying Party and Adding Claims in the Using IAM guide.

    " + } + }, + "Subject": { + "base": null, + "refs": { + "AssumeRoleWithSAMLResponse$Subject": "

    The value of the NameID element in the Subject element of the SAML assertion.

    " + } + }, + "SubjectType": { + "base": null, + "refs": { + "AssumeRoleWithSAMLResponse$SubjectType": "

    The format of the name ID, as defined by the Format attribute in the NameID element of the SAML assertion. Typical examples of the format are transient or persistent.

    If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient is returned as transient. If the format includes any other prefix, the format is returned with no modifications.

    " + } + }, + "accessKeyIdType": { + "base": null, + "refs": { + "Credentials$AccessKeyId": "

    The access key ID that identifies the temporary security credentials.

    " + } + }, + "accessKeySecretType": { + "base": null, + "refs": { + "Credentials$SecretAccessKey": "

    The secret access key that can be used to sign requests.

    " + } + }, + "arnType": { + "base": null, + "refs": { + "AssumeRoleRequest$RoleArn": "

    The Amazon Resource Name (ARN) of the role to assume.

    ", + "AssumeRoleWithSAMLRequest$RoleArn": "

    The Amazon Resource Name (ARN) of the role that the caller is assuming.

    ", + "AssumeRoleWithSAMLRequest$PrincipalArn": "

    The Amazon Resource Name (ARN) of the SAML provider in IAM that describes the IdP.

    ", + "AssumeRoleWithWebIdentityRequest$RoleArn": "

    The Amazon Resource Name (ARN) of the role that the caller is assuming.

    ", + "AssumedRoleUser$Arn": "

    The ARN of the temporary security credentials that are returned from the AssumeRole action. For more information about ARNs and how to use them in policies, see IAM Identifiers in Using IAM.

    ", + "FederatedUser$Arn": "

    The ARN that specifies the federated user that is associated with the credentials. For more information about ARNs and how to use them in policies, see IAM Identifiers in Using IAM.

    " + } + }, + "assumedRoleIdType": { + "base": null, + "refs": { + "AssumedRoleUser$AssumedRoleId": "

    A unique identifier that contains the role ID and the role session name of the role that is being assumed. The role ID is generated by AWS when the role is created.

    " + } + }, + "clientTokenType": { + "base": null, + "refs": { + "AssumeRoleWithWebIdentityRequest$WebIdentityToken": "

    The OAuth 2.0 access token or OpenID Connect ID token that is provided by the identity provider. Your application must get this token by authenticating the user who is using your application with a web identity provider before the application makes an AssumeRoleWithWebIdentity call.

    " + } + }, + "dateType": { + "base": null, + "refs": { + "Credentials$Expiration": "

    The date on which the current credentials expire.

    " + } + }, + "decodedMessageType": { + "base": null, + "refs": { + "DecodeAuthorizationMessageResponse$DecodedMessage": "

    An XML document that contains the decoded message. For more information, see DecodeAuthorizationMessage.

    " + } + }, + "durationSecondsType": { + "base": null, + "refs": { + "GetFederationTokenRequest$DurationSeconds": "

    The duration, in seconds, that the session should last. Acceptable durations for federation sessions range from 900 seconds (15 minutes) to 129600 seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained using AWS account (root) credentials are restricted to a maximum of 3600 seconds (one hour). If the specified duration is longer than one hour, the session obtained by using AWS account (root) credentials defaults to one hour.

    ", + "GetSessionTokenRequest$DurationSeconds": "

    The duration, in seconds, that the credentials should remain valid. Acceptable durations for IAM user sessions range from 900 seconds (15 minutes) to 129600 seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions for AWS account owners are restricted to a maximum of 3600 seconds (one hour). If the duration is longer than one hour, the session for AWS account owners defaults to one hour.

    " + } + }, + "encodedMessageType": { + "base": null, + "refs": { + "DecodeAuthorizationMessageRequest$EncodedMessage": "

    The encoded message that was returned with the response.

    " + } + }, + "expiredIdentityTokenMessage": { + "base": null, + "refs": { + "ExpiredTokenException$message": null + } + }, + "externalIdType": { + "base": null, + "refs": { + "AssumeRoleRequest$ExternalId": "

    A unique identifier that is used by third parties when assuming roles in their customers' accounts. For each role that the third party can assume, they should instruct their customers to ensure the role's trust policy checks for the external ID that the third party generated. Each time the third party assumes the role, they should pass the customer's external ID. The external ID is useful in order to help third parties bind a role to the customer who created it. For more information about the external ID, see How to Use an External ID When Granting Access to Your AWS Resources to a Third Party in the Using IAM.

    " + } + }, + "federatedIdType": { + "base": null, + "refs": { + "FederatedUser$FederatedUserId": "

    The string that identifies the federated user associated with the credentials, similar to the unique ID of an IAM user.

    " + } + }, + "idpCommunicationErrorMessage": { + "base": null, + "refs": { + "IDPCommunicationErrorException$message": null + } + }, + "idpRejectedClaimMessage": { + "base": null, + "refs": { + "IDPRejectedClaimException$message": null + } + }, + "invalidAuthorizationMessage": { + "base": null, + "refs": { + "InvalidAuthorizationMessageException$message": null + } + }, + "invalidIdentityTokenMessage": { + "base": null, + "refs": { + "InvalidIdentityTokenException$message": null + } + }, + "malformedPolicyDocumentMessage": { + "base": null, + "refs": { + "MalformedPolicyDocumentException$message": null + } + }, + "nonNegativeIntegerType": { + "base": null, + "refs": { + "AssumeRoleResponse$PackedPolicySize": "

    A percentage value that indicates the size of the policy in packed form. The service rejects any policy with a packed size greater than 100 percent, which means the policy exceeded the allowed space.

    ", + "AssumeRoleWithSAMLResponse$PackedPolicySize": "

    A percentage value that indicates the size of the policy in packed form. The service rejects any policy with a packed size greater than 100 percent, which means the policy exceeded the allowed space.

    ", + "AssumeRoleWithWebIdentityResponse$PackedPolicySize": "

    A percentage value that indicates the size of the policy in packed form. The service rejects any policy with a packed size greater than 100 percent, which means the policy exceeded the allowed space.

    ", + "GetFederationTokenResponse$PackedPolicySize": "

    A percentage value indicating the size of the policy in packed form. The service rejects policies for which the packed size is greater than 100 percent of the allowed value.

    " + } + }, + "packedPolicyTooLargeMessage": { + "base": null, + "refs": { + "PackedPolicyTooLargeException$message": null + } + }, + "regionDisabledMessage": { + "base": null, + "refs": { + "RegionDisabledException$message": null + } + }, + "roleDurationSecondsType": { + "base": null, + "refs": { + "AssumeRoleRequest$DurationSeconds": "

    The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set to 3600 seconds.

    ", + "AssumeRoleWithSAMLRequest$DurationSeconds": "

    The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set to 3600 seconds. An expiration can also be specified in the SAML authentication response's SessionNotOnOrAfter value. The actual expiration time is whichever value is shorter.

    The maximum duration for a session is 1 hour, and the minimum duration is 15 minutes, even if values outside this range are specified. ", + "AssumeRoleWithWebIdentityRequest$DurationSeconds": "

    The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set to 3600 seconds.

    " + } + }, + "roleSessionNameType": { + "base": null, + "refs": { + "AssumeRoleRequest$RoleSessionName": "

    An identifier for the assumed role session.

    Use the role session name to uniquely identify a session when the same role is assumed by different principals or for different reasons. In cross-account scenarios, the role session name is visible to, and can be logged by the account that owns the role. The role session name is also used in the ARN of the assumed role principal. This means that subsequent cross-account API requests using the temporary security credentials will expose the role session name to the external account in their CloudTrail logs.

    ", + "AssumeRoleWithWebIdentityRequest$RoleSessionName": "

    An identifier for the assumed role session. Typically, you pass the name or identifier that is associated with the user who is using your application. That way, the temporary security credentials that your application will use are associated with that user. This session name is included as part of the ARN and assumed role ID in the AssumedRoleUser response element.

    " + } + }, + "serialNumberType": { + "base": null, + "refs": { + "AssumeRoleRequest$SerialNumber": "

    The identification number of the MFA device that is associated with the user who is making the AssumeRole call. Specify this value if the trust policy of the role being assumed includes a condition that requires MFA authentication. The value is either the serial number for a hardware device (such as GAHT12345678) or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).

    ", + "GetSessionTokenRequest$SerialNumber": "

    The identification number of the MFA device that is associated with the IAM user who is making the GetSessionToken call. Specify this value if the IAM user has a policy that requires MFA authentication. The value is either the serial number for a hardware device (such as GAHT12345678) or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). You can find the device for an IAM user by going to the AWS Management Console and viewing the user's security credentials.

    " + } + }, + "sessionPolicyDocumentType": { + "base": null, + "refs": { + "AssumeRoleRequest$Policy": "

    An IAM policy in JSON format.

    This parameter is optional. If you pass a policy, the temporary security credentials that are returned by the operation have the permissions that are allowed by both (the intersection of) the access policy of the role that is being assumed, and the policy that you pass. This gives you a way to further restrict the permissions for the resulting temporary security credentials. You cannot use the passed policy to grant permissions that are in excess of those allowed by the access policy of the role that is being assumed. For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity in the Using IAM.

    The policy plain text must be 2048 bytes or shorter. However, an internal conversion compresses it into a packed binary format with a separate limit. The PackedPolicySize response element indicates by percentage how close to the upper size limit the policy is, with 100% equaling the maximum allowed size.", + "AssumeRoleWithSAMLRequest$Policy": "

    An IAM policy in JSON format.

    The policy parameter is optional. If you pass a policy, the temporary security credentials that are returned by the operation have the permissions that are allowed by both the access policy of the role that is being assumed, and the policy that you pass. This gives you a way to further restrict the permissions for the resulting temporary security credentials. You cannot use the passed policy to grant permissions that are in excess of those allowed by the access policy of the role that is being assumed. For more information, Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity in the Using IAM.

    The policy plain text must be 2048 bytes or shorter. However, an internal conversion compresses it into a packed binary format with a separate limit. The PackedPolicySize response element indicates by percentage how close to the upper size limit the policy is, with 100% equaling the maximum allowed size. ", + "AssumeRoleWithWebIdentityRequest$Policy": "

    An IAM policy in JSON format.

    The policy parameter is optional. If you pass a policy, the temporary security credentials that are returned by the operation have the permissions that are allowed by both the access policy of the role that is being assumed, and the policy that you pass. This gives you a way to further restrict the permissions for the resulting temporary security credentials. You cannot use the passed policy to grant permissions that are in excess of those allowed by the access policy of the role that is being assumed. For more information, see Permissions for AssumeRoleWithWebIdentity in the Using IAM.

    The policy plain text must be 2048 bytes or shorter. However, an internal conversion compresses it into a packed binary format with a separate limit. The PackedPolicySize response element indicates by percentage how close to the upper size limit the policy is, with 100% equaling the maximum allowed size. ", + "GetFederationTokenRequest$Policy": "

    An IAM policy in JSON format that is passed with the GetFederationToken call and evaluated along with the policy or policies that are attached to the IAM user whose credentials are used to call GetFederationToken. The passed policy is used to scope down the permissions that are available to the IAM user, by allowing only a subset of the permissions that are granted to the IAM user. The passed policy cannot grant more permissions than those granted to the IAM user. The final permissions for the federated user are the most restrictive set based on the intersection of the passed policy and the IAM user policy.

    If you do not pass a policy, the resulting temporary security credentials have no effective permissions. The only exception is when the temporary security credentials are used to access a resource that has a resource-based policy that specifically allows the federated user to access the resource.

    The policy plain text must be 2048 bytes or shorter. However, an internal conversion compresses it into a packed binary format with a separate limit. The PackedPolicySize response element indicates by percentage how close to the upper size limit the policy is, with 100% equaling the maximum allowed size.

    For more information about how permissions work, see Permissions for GetFederationToken.

    " + } + }, + "tokenCodeType": { + "base": null, + "refs": { + "AssumeRoleRequest$TokenCode": "

    The value provided by the MFA device, if the trust policy of the role being assumed requires MFA (that is, if the policy includes a condition that tests for MFA). If the role being assumed requires MFA and if the TokenCode value is missing or expired, the AssumeRole call returns an \"access denied\" error.

    ", + "GetSessionTokenRequest$TokenCode": "

    The value provided by the MFA device, if MFA is required. If any policy requires the IAM user to submit an MFA code, specify this value. If MFA authentication is required, and the user does not provide a code when requesting a set of temporary security credentials, the user will receive an \"access denied\" response when requesting resources that require MFA authentication.

    " + } + }, + "tokenType": { + "base": null, + "refs": { + "Credentials$SessionToken": "

    The token that users must pass to the service API to use the temporary credentials.

    " + } + }, + "urlType": { + "base": null, + "refs": { + "AssumeRoleWithWebIdentityRequest$ProviderId": "

    The fully qualified host component of the domain name of the identity provider.

    Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com and graph.facebook.com are the only supported identity providers for OAuth 2.0 access tokens. Do not include URL schemes and port numbers.

    Do not specify this value for OpenID Connect ID tokens.

    " + } + }, + "userNameType": { + "base": null, + "refs": { + "GetFederationTokenRequest$Name": "

    The name of the federated user. The name is used as an identifier for the temporary security credentials (such as Bob). For example, you can reference the federated user name in a resource-based policy, such as in an Amazon S3 bucket policy.

    " + } + }, + "webIdentitySubjectType": { + "base": null, + "refs": { + "AssumeRoleWithWebIdentityResponse$SubjectFromWebIdentityToken": "

    The unique user identifier that is returned by the identity provider. This identifier is associated with the WebIdentityToken that was submitted with the AssumeRoleWithWebIdentity call. The identifier is typically unique to the user and the application that acquired the WebIdentityToken (pairwise identifier). For OpenID Connect ID tokens, this field contains the value returned by the identity provider as the token's sub (Subject) claim.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sts/2011-06-15/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sts/2011-06-15/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sts/2011-06-15/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/sts/2011-06-15/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/support/2013-04-15/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/support/2013-04-15/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/support/2013-04-15/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/support/2013-04-15/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,869 @@ +{ + "metadata":{ + "apiVersion":"2013-04-15", + "endpointPrefix":"support", + "jsonVersion":"1.1", + "serviceFullName":"AWS Support", + "signatureVersion":"v4", + "targetPrefix":"AWSSupport_20130415", + "protocol":"json" + }, + "operations":{ + "AddAttachmentsToSet":{ + "name":"AddAttachmentsToSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddAttachmentsToSetRequest"}, + "output":{"shape":"AddAttachmentsToSetResponse"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + }, + { + "shape":"AttachmentSetIdNotFound", + "exception":true + }, + { + "shape":"AttachmentSetExpired", + "exception":true + }, + { + "shape":"AttachmentSetSizeLimitExceeded", + "exception":true + }, + { + "shape":"AttachmentLimitExceeded", + "exception":true + } + ] + }, + "AddCommunicationToCase":{ + "name":"AddCommunicationToCase", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddCommunicationToCaseRequest"}, + "output":{"shape":"AddCommunicationToCaseResponse"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + }, + { + "shape":"CaseIdNotFound", + "exception":true + }, + { + "shape":"AttachmentSetIdNotFound", + "exception":true + }, + { + "shape":"AttachmentSetExpired", + "exception":true + } + ] + }, + "CreateCase":{ + "name":"CreateCase", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCaseRequest"}, + "output":{"shape":"CreateCaseResponse"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + }, + { + "shape":"CaseCreationLimitExceeded", + "exception":true + }, + { + "shape":"AttachmentSetIdNotFound", + "exception":true + }, + { + "shape":"AttachmentSetExpired", + "exception":true + } + ] + }, + "DescribeAttachment":{ + "name":"DescribeAttachment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAttachmentRequest"}, + "output":{"shape":"DescribeAttachmentResponse"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + }, + { + "shape":"DescribeAttachmentLimitExceeded", + "exception":true + }, + { + "shape":"AttachmentIdNotFound", + "exception":true + } + ] + }, + "DescribeCases":{ + "name":"DescribeCases", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCasesRequest"}, + "output":{"shape":"DescribeCasesResponse"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + }, + { + "shape":"CaseIdNotFound", + "exception":true + } + ] + }, + "DescribeCommunications":{ + "name":"DescribeCommunications", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCommunicationsRequest"}, + "output":{"shape":"DescribeCommunicationsResponse"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + }, + { + "shape":"CaseIdNotFound", + "exception":true + } + ] + }, + "DescribeServices":{ + "name":"DescribeServices", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeServicesRequest"}, + "output":{"shape":"DescribeServicesResponse"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + }, + "DescribeSeverityLevels":{ + "name":"DescribeSeverityLevels", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSeverityLevelsRequest"}, + "output":{"shape":"DescribeSeverityLevelsResponse"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + }, + "DescribeTrustedAdvisorCheckRefreshStatuses":{ + "name":"DescribeTrustedAdvisorCheckRefreshStatuses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTrustedAdvisorCheckRefreshStatusesRequest"}, + "output":{"shape":"DescribeTrustedAdvisorCheckRefreshStatusesResponse"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + }, + "DescribeTrustedAdvisorCheckResult":{ + "name":"DescribeTrustedAdvisorCheckResult", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTrustedAdvisorCheckResultRequest"}, + "output":{"shape":"DescribeTrustedAdvisorCheckResultResponse"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + }, + "DescribeTrustedAdvisorCheckSummaries":{ + "name":"DescribeTrustedAdvisorCheckSummaries", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTrustedAdvisorCheckSummariesRequest"}, + "output":{"shape":"DescribeTrustedAdvisorCheckSummariesResponse"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + }, + "DescribeTrustedAdvisorChecks":{ + "name":"DescribeTrustedAdvisorChecks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTrustedAdvisorChecksRequest"}, + "output":{"shape":"DescribeTrustedAdvisorChecksResponse"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + }, + "RefreshTrustedAdvisorCheck":{ + "name":"RefreshTrustedAdvisorCheck", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RefreshTrustedAdvisorCheckRequest"}, + "output":{"shape":"RefreshTrustedAdvisorCheckResponse"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + }, + "ResolveCase":{ + "name":"ResolveCase", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResolveCaseRequest"}, + "output":{"shape":"ResolveCaseResponse"}, + "errors":[ + { + "shape":"InternalServerError", + "exception":true, + "fault":true + }, + { + "shape":"CaseIdNotFound", + "exception":true + } + ] + } + }, + "shapes":{ + "AddAttachmentsToSetRequest":{ + "type":"structure", + "required":["attachments"], + "members":{ + "attachmentSetId":{"shape":"AttachmentSetId"}, + "attachments":{"shape":"Attachments"} + } + }, + "AddAttachmentsToSetResponse":{ + "type":"structure", + "members":{ + "attachmentSetId":{"shape":"AttachmentSetId"}, + "expiryTime":{"shape":"ExpiryTime"} + } + }, + "AddCommunicationToCaseRequest":{ + "type":"structure", + "required":["communicationBody"], + "members":{ + "caseId":{"shape":"CaseId"}, + "communicationBody":{"shape":"CommunicationBody"}, + "ccEmailAddresses":{"shape":"CcEmailAddressList"}, + "attachmentSetId":{"shape":"AttachmentSetId"} + } + }, + "AddCommunicationToCaseResponse":{ + "type":"structure", + "members":{ + "result":{"shape":"Result"} + } + }, + "AfterTime":{"type":"string"}, + "Attachment":{ + "type":"structure", + "members":{ + "fileName":{"shape":"FileName"}, + "data":{"shape":"Data"} + } + }, + "AttachmentDetails":{ + "type":"structure", + "members":{ + "attachmentId":{"shape":"AttachmentId"}, + "fileName":{"shape":"FileName"} + } + }, + "AttachmentId":{"type":"string"}, + "AttachmentIdNotFound":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "AttachmentLimitExceeded":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "AttachmentSet":{ + "type":"list", + "member":{"shape":"AttachmentDetails"} + }, + "AttachmentSetExpired":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "AttachmentSetId":{"type":"string"}, + "AttachmentSetIdNotFound":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "AttachmentSetSizeLimitExceeded":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "Attachments":{ + "type":"list", + "member":{"shape":"Attachment"} + }, + "BeforeTime":{"type":"string"}, + "Boolean":{"type":"boolean"}, + "CaseCreationLimitExceeded":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "CaseDetails":{ + "type":"structure", + "members":{ + "caseId":{"shape":"CaseId"}, + "displayId":{"shape":"DisplayId"}, + "subject":{"shape":"Subject"}, + "status":{"shape":"Status"}, + "serviceCode":{"shape":"ServiceCode"}, + "categoryCode":{"shape":"CategoryCode"}, + "severityCode":{"shape":"SeverityCode"}, + "submittedBy":{"shape":"SubmittedBy"}, + "timeCreated":{"shape":"TimeCreated"}, + "recentCommunications":{"shape":"RecentCaseCommunications"}, + "ccEmailAddresses":{"shape":"CcEmailAddressList"}, + "language":{"shape":"Language"} + } + }, + "CaseId":{"type":"string"}, + "CaseIdList":{ + "type":"list", + "member":{"shape":"CaseId"}, + "min":0, + "max":100 + }, + "CaseIdNotFound":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "CaseList":{ + "type":"list", + "member":{"shape":"CaseDetails"} + }, + "CaseStatus":{"type":"string"}, + "Category":{ + "type":"structure", + "members":{ + "code":{"shape":"CategoryCode"}, + "name":{"shape":"CategoryName"} + } + }, + "CategoryCode":{"type":"string"}, + "CategoryList":{ + "type":"list", + "member":{"shape":"Category"} + }, + "CategoryName":{"type":"string"}, + "CcEmailAddress":{"type":"string"}, + "CcEmailAddressList":{ + "type":"list", + "member":{"shape":"CcEmailAddress"} + }, + "Communication":{ + "type":"structure", + "members":{ + "caseId":{"shape":"CaseId"}, + "body":{"shape":"CommunicationBody"}, + "submittedBy":{"shape":"SubmittedBy"}, + "timeCreated":{"shape":"TimeCreated"}, + "attachmentSet":{"shape":"AttachmentSet"} + } + }, + "CommunicationBody":{"type":"string"}, + "CommunicationList":{ + "type":"list", + "member":{"shape":"Communication"} + }, + "CreateCaseRequest":{ + "type":"structure", + "required":[ + "subject", + "communicationBody" + ], + "members":{ + "subject":{"shape":"Subject"}, + "serviceCode":{"shape":"ServiceCode"}, + "severityCode":{"shape":"SeverityCode"}, + "categoryCode":{"shape":"CategoryCode"}, + "communicationBody":{"shape":"CommunicationBody"}, + "ccEmailAddresses":{"shape":"CcEmailAddressList"}, + "language":{"shape":"Language"}, + "issueType":{"shape":"IssueType"}, + "attachmentSetId":{"shape":"AttachmentSetId"} + } + }, + "CreateCaseResponse":{ + "type":"structure", + "members":{ + "caseId":{"shape":"CaseId"} + } + }, + "Data":{"type":"blob"}, + "DescribeAttachmentLimitExceeded":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "DescribeAttachmentRequest":{ + "type":"structure", + "required":["attachmentId"], + "members":{ + "attachmentId":{"shape":"AttachmentId"} + } + }, + "DescribeAttachmentResponse":{ + "type":"structure", + "members":{ + "attachment":{"shape":"Attachment"} + } + }, + "DescribeCasesRequest":{ + "type":"structure", + "members":{ + "caseIdList":{"shape":"CaseIdList"}, + "displayId":{"shape":"DisplayId"}, + "afterTime":{"shape":"AfterTime"}, + "beforeTime":{"shape":"BeforeTime"}, + "includeResolvedCases":{"shape":"IncludeResolvedCases"}, + "nextToken":{"shape":"NextToken"}, + "maxResults":{"shape":"MaxResults"}, + "language":{"shape":"Language"}, + "includeCommunications":{"shape":"IncludeCommunications"} + } + }, + "DescribeCasesResponse":{ + "type":"structure", + "members":{ + "cases":{"shape":"CaseList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "DescribeCommunicationsRequest":{ + "type":"structure", + "required":["caseId"], + "members":{ + "caseId":{"shape":"CaseId"}, + "beforeTime":{"shape":"BeforeTime"}, + "afterTime":{"shape":"AfterTime"}, + "nextToken":{"shape":"NextToken"}, + "maxResults":{"shape":"MaxResults"} + } + }, + "DescribeCommunicationsResponse":{ + "type":"structure", + "members":{ + "communications":{"shape":"CommunicationList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "DescribeServicesRequest":{ + "type":"structure", + "members":{ + "serviceCodeList":{"shape":"ServiceCodeList"}, + "language":{"shape":"Language"} + } + }, + "DescribeServicesResponse":{ + "type":"structure", + "members":{ + "services":{"shape":"ServiceList"} + } + }, + "DescribeSeverityLevelsRequest":{ + "type":"structure", + "members":{ + "language":{"shape":"Language"} + } + }, + "DescribeSeverityLevelsResponse":{ + "type":"structure", + "members":{ + "severityLevels":{"shape":"SeverityLevelsList"} + } + }, + "DescribeTrustedAdvisorCheckRefreshStatusesRequest":{ + "type":"structure", + "required":["checkIds"], + "members":{ + "checkIds":{"shape":"StringList"} + } + }, + "DescribeTrustedAdvisorCheckRefreshStatusesResponse":{ + "type":"structure", + "required":["statuses"], + "members":{ + "statuses":{"shape":"TrustedAdvisorCheckRefreshStatusList"} + } + }, + "DescribeTrustedAdvisorCheckResultRequest":{ + "type":"structure", + "required":["checkId"], + "members":{ + "checkId":{"shape":"String"}, + "language":{"shape":"String"} + } + }, + "DescribeTrustedAdvisorCheckResultResponse":{ + "type":"structure", + "members":{ + "result":{"shape":"TrustedAdvisorCheckResult"} + } + }, + "DescribeTrustedAdvisorCheckSummariesRequest":{ + "type":"structure", + "required":["checkIds"], + "members":{ + "checkIds":{"shape":"StringList"} + } + }, + "DescribeTrustedAdvisorCheckSummariesResponse":{ + "type":"structure", + "required":["summaries"], + "members":{ + "summaries":{"shape":"TrustedAdvisorCheckSummaryList"} + } + }, + "DescribeTrustedAdvisorChecksRequest":{ + "type":"structure", + "required":["language"], + "members":{ + "language":{"shape":"String"} + } + }, + "DescribeTrustedAdvisorChecksResponse":{ + "type":"structure", + "required":["checks"], + "members":{ + "checks":{"shape":"TrustedAdvisorCheckList"} + } + }, + "DisplayId":{"type":"string"}, + "Double":{"type":"double"}, + "ErrorMessage":{"type":"string"}, + "ExpiryTime":{"type":"string"}, + "FileName":{"type":"string"}, + "IncludeCommunications":{"type":"boolean"}, + "IncludeResolvedCases":{"type":"boolean"}, + "InternalServerError":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true, + "fault":true + }, + "IssueType":{"type":"string"}, + "Language":{"type":"string"}, + "Long":{"type":"long"}, + "MaxResults":{ + "type":"integer", + "min":10, + "max":100 + }, + "NextToken":{"type":"string"}, + "RecentCaseCommunications":{ + "type":"structure", + "members":{ + "communications":{"shape":"CommunicationList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "RefreshTrustedAdvisorCheckRequest":{ + "type":"structure", + "required":["checkId"], + "members":{ + "checkId":{"shape":"String"} + } + }, + "RefreshTrustedAdvisorCheckResponse":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{"shape":"TrustedAdvisorCheckRefreshStatus"} + } + }, + "ResolveCaseRequest":{ + "type":"structure", + "members":{ + "caseId":{"shape":"CaseId"} + } + }, + "ResolveCaseResponse":{ + "type":"structure", + "members":{ + "initialCaseStatus":{"shape":"CaseStatus"}, + "finalCaseStatus":{"shape":"CaseStatus"} + } + }, + "Result":{"type":"boolean"}, + "Service":{ + "type":"structure", + "members":{ + "code":{"shape":"ServiceCode"}, + "name":{"shape":"ServiceName"}, + "categories":{"shape":"CategoryList"} + } + }, + "ServiceCode":{ + "type":"string", + "pattern":"[0-9a-z\\-_]+" + }, + "ServiceCodeList":{ + "type":"list", + "member":{"shape":"ServiceCode"}, + "min":0, + "max":100 + }, + "ServiceList":{ + "type":"list", + "member":{"shape":"Service"} + }, + "ServiceName":{"type":"string"}, + "SeverityCode":{"type":"string"}, + "SeverityLevel":{ + "type":"structure", + "members":{ + "code":{"shape":"SeverityLevelCode"}, + "name":{"shape":"SeverityLevelName"} + } + }, + "SeverityLevelCode":{"type":"string"}, + "SeverityLevelName":{"type":"string"}, + "SeverityLevelsList":{ + "type":"list", + "member":{"shape":"SeverityLevel"} + }, + "Status":{"type":"string"}, + "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "Subject":{"type":"string"}, + "SubmittedBy":{"type":"string"}, + "TimeCreated":{"type":"string"}, + "TrustedAdvisorCategorySpecificSummary":{ + "type":"structure", + "members":{ + "costOptimizing":{"shape":"TrustedAdvisorCostOptimizingSummary"} + } + }, + "TrustedAdvisorCheckDescription":{ + "type":"structure", + "required":[ + "id", + "name", + "description", + "category", + "metadata" + ], + "members":{ + "id":{"shape":"String"}, + "name":{"shape":"String"}, + "description":{"shape":"String"}, + "category":{"shape":"String"}, + "metadata":{"shape":"StringList"} + } + }, + "TrustedAdvisorCheckList":{ + "type":"list", + "member":{"shape":"TrustedAdvisorCheckDescription"} + }, + "TrustedAdvisorCheckRefreshStatus":{ + "type":"structure", + "required":[ + "checkId", + "status", + "millisUntilNextRefreshable" + ], + "members":{ + "checkId":{"shape":"String"}, + "status":{"shape":"String"}, + "millisUntilNextRefreshable":{"shape":"Long"} + } + }, + "TrustedAdvisorCheckRefreshStatusList":{ + "type":"list", + "member":{"shape":"TrustedAdvisorCheckRefreshStatus"} + }, + "TrustedAdvisorCheckResult":{ + "type":"structure", + "required":[ + "checkId", + "timestamp", + "status", + "resourcesSummary", + "categorySpecificSummary", + "flaggedResources" + ], + "members":{ + "checkId":{"shape":"String"}, + "timestamp":{"shape":"String"}, + "status":{"shape":"String"}, + "resourcesSummary":{"shape":"TrustedAdvisorResourcesSummary"}, + "categorySpecificSummary":{"shape":"TrustedAdvisorCategorySpecificSummary"}, + "flaggedResources":{"shape":"TrustedAdvisorResourceDetailList"} + } + }, + "TrustedAdvisorCheckSummary":{ + "type":"structure", + "required":[ + "checkId", + "timestamp", + "status", + "resourcesSummary", + "categorySpecificSummary" + ], + "members":{ + "checkId":{"shape":"String"}, + "timestamp":{"shape":"String"}, + "status":{"shape":"String"}, + "hasFlaggedResources":{"shape":"Boolean"}, + "resourcesSummary":{"shape":"TrustedAdvisorResourcesSummary"}, + "categorySpecificSummary":{"shape":"TrustedAdvisorCategorySpecificSummary"} + } + }, + "TrustedAdvisorCheckSummaryList":{ + "type":"list", + "member":{"shape":"TrustedAdvisorCheckSummary"} + }, + "TrustedAdvisorCostOptimizingSummary":{ + "type":"structure", + "required":[ + "estimatedMonthlySavings", + "estimatedPercentMonthlySavings" + ], + "members":{ + "estimatedMonthlySavings":{"shape":"Double"}, + "estimatedPercentMonthlySavings":{"shape":"Double"} + } + }, + "TrustedAdvisorResourceDetail":{ + "type":"structure", + "required":[ + "status", + "region", + "resourceId", + "metadata" + ], + "members":{ + "status":{"shape":"String"}, + "region":{"shape":"String"}, + "resourceId":{"shape":"String"}, + "isSuppressed":{"shape":"Boolean"}, + "metadata":{"shape":"StringList"} + } + }, + "TrustedAdvisorResourceDetailList":{ + "type":"list", + "member":{"shape":"TrustedAdvisorResourceDetail"} + }, + "TrustedAdvisorResourcesSummary":{ + "type":"structure", + "required":[ + "resourcesProcessed", + "resourcesFlagged", + "resourcesIgnored", + "resourcesSuppressed" + ], + "members":{ + "resourcesProcessed":{"shape":"Long"}, + "resourcesFlagged":{"shape":"Long"}, + "resourcesIgnored":{"shape":"Long"}, + "resourcesSuppressed":{"shape":"Long"} + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/support/2013-04-15/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/support/2013-04-15/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/support/2013-04-15/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/support/2013-04-15/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,680 @@ +{ + "operations": { + "AddAttachmentsToSet": "

    Adds one or more attachments to an attachment set. If an AttachmentSetId is not specified, a new attachment set is created, and the ID of the set is returned in the response. If an AttachmentSetId is specified, the attachments are added to the specified set, if it exists.

    An attachment set is a temporary container for attachments that are to be added to a case or case communication. The set is available for one hour after it is created; the ExpiryTime returned in the response indicates when the set expires. The maximum number of attachments in a set is 3, and the maximum size of any attachment in the set is 5 MB.

    ", + "AddCommunicationToCase": "

    Adds additional customer communication to an AWS Support case. You use the CaseId value to identify the case to add communication to. You can list a set of email addresses to copy on the communication using the CcEmailAddresses value. The CommunicationBody value contains the text of the communication.

    The response indicates the success or failure of the request.

    This operation implements a subset of the features of the AWS Support Center.

    ", + "CreateCase": "

    Creates a new case in the AWS Support Center. This operation is modeled on the behavior of the AWS Support Center Create Case page. Its parameters require you to specify the following information:

    1. IssueType. The type of issue for the case. You can specify either \"customer-service\" or \"technical.\" If you do not indicate a value, the default is \"technical.\"
    2. ServiceCode. The code for an AWS service. You obtain the ServiceCode by calling DescribeServices.
    3. CategoryCode. The category for the service defined for the ServiceCode value. You also obtain the category code for a service by calling DescribeServices. Each AWS service defines its own set of category codes.
    4. SeverityCode. A value that indicates the urgency of the case, which in turn determines the response time according to your service level agreement with AWS Support. You obtain the SeverityCode by calling DescribeSeverityLevels.
    5. Subject. The Subject field on the AWS Support Center Create Case page.
    6. CommunicationBody. The Description field on the AWS Support Center Create Case page.
    7. AttachmentSetId. The ID of a set of attachments that has been created by using AddAttachmentsToSet.
    8. Language. The human language in which AWS Support handles the case. English and Japanese are currently supported.
    9. CcEmailAddresses. The AWS Support Center CC field on the Create Case page. You can list email addresses to be copied on any correspondence about the case. The account that opens the case is already identified by passing the AWS Credentials in the HTTP POST method or in a method or function call from one of the programming languages supported by an AWS SDK.

    To add additional communication or attachments to an existing case, use AddCommunicationToCase.

    A successful CreateCase request returns an AWS Support case number. Case numbers are used by the DescribeCases operation to retrieve existing AWS Support cases.

    ", + "DescribeAttachment": "

    Returns the attachment that has the specified ID. Attachment IDs are generated by the case management system when you add an attachment to a case or case communication. Attachment IDs are returned in the AttachmentDetails objects that are returned by the DescribeCommunications operation.

    ", + "DescribeCases": "

    Returns a list of cases that you specify by passing one or more case IDs. In addition, you can filter the cases by date by setting values for the AfterTime and BeforeTime request parameters. You can set values for the IncludeResolvedCases and IncludeCommunications request parameters to control how much information is returned.

    Case data is available for 12 months after creation. If a case was created more than 12 months ago, a request for data might cause an error.

    The response returns the following in JSON format:

    1. One or more CaseDetails data types.
    2. One or more NextToken values, which specify where to paginate the returned records represented by the CaseDetails objects.
    ", + "DescribeCommunications": "

    Returns communications (and attachments) for one or more support cases. You can use the AfterTime and BeforeTime parameters to filter by date. You can use the CaseId parameter to restrict the results to a particular case.

    Case data is available for 12 months after creation. If a case was created more than 12 months ago, a request for data might cause an error.

    You can use the MaxResults and NextToken parameters to control the pagination of the result set. Set MaxResults to the number of cases you want displayed on each page, and use NextToken to specify the resumption of pagination.

    ", + "DescribeServices": "

    Returns the current list of AWS services and a list of service categories that applies to each one. You then use service names and categories in your CreateCase requests. Each AWS service has its own set of categories.

    The service codes and category codes correspond to the values that are displayed in the Service and Category drop-down lists on the AWS Support Center Create Case page. The values in those fields, however, do not necessarily match the service codes and categories returned by the DescribeServices request. Always use the service codes and categories obtained programmatically. This practice ensures that you always have the most recent set of service and category codes.

    ", + "DescribeSeverityLevels": "

    Returns the list of severity levels that you can assign to an AWS Support case. The severity level for a case is also a field in the CaseDetails data type included in any CreateCase request.

    ", + "DescribeTrustedAdvisorCheckRefreshStatuses": "

    Returns the refresh status of the Trusted Advisor checks that have the specified check IDs. Check IDs can be obtained by calling DescribeTrustedAdvisorChecks.

    ", + "DescribeTrustedAdvisorCheckResult": "

    Returns the results of the Trusted Advisor check that has the specified check ID. Check IDs can be obtained by calling DescribeTrustedAdvisorChecks.

    The response contains a TrustedAdvisorCheckResult object, which contains these three objects:

    In addition, the response contains these fields:

    • Status. The alert status of the check: \"ok\" (green), \"warning\" (yellow), \"error\" (red), or \"not_available\".
    • Timestamp. The time of the last refresh of the check.
    • CheckId. The unique identifier for the check.
    ", + "DescribeTrustedAdvisorCheckSummaries": "

    Returns the summaries of the results of the Trusted Advisor checks that have the specified check IDs. Check IDs can be obtained by calling DescribeTrustedAdvisorChecks.

    The response contains an array of TrustedAdvisorCheckSummary objects.

    ", + "DescribeTrustedAdvisorChecks": "

    Returns information about all available Trusted Advisor checks, including name, ID, category, description, and metadata. You must specify a language code; English (\"en\") and Japanese (\"ja\") are currently supported. The response contains a TrustedAdvisorCheckDescription for each check.

    ", + "RefreshTrustedAdvisorCheck": "

    Requests a refresh of the Trusted Advisor check that has the specified check ID. Check IDs can be obtained by calling DescribeTrustedAdvisorChecks.

    The response contains a TrustedAdvisorCheckRefreshStatus object, which contains these fields:

    • Status. The refresh status of the check: \"none\", \"enqueued\", \"processing\", \"success\", or \"abandoned\".
    • MillisUntilNextRefreshable. The amount of time, in milliseconds, until the check is eligible for refresh.
    • CheckId. The unique identifier for the check.
    ", + "ResolveCase": "

    Takes a CaseId and returns the initial state of the case along with the state of the case after the call to ResolveCase completed.

    " + }, + "service": "AWS Support

    The AWS Support API reference is intended for programmers who need detailed information about the AWS Support operations and data types. This service enables you to manage your AWS Support cases programmatically. It uses HTTP methods that return results in JSON format.

    The AWS Support service also exposes a set of Trusted Advisor features. You can retrieve a list of checks and their descriptions, get check results, specify checks to refresh, and get the refresh status of checks.

    The following list describes the AWS Support case management operations:

    The following list describes the operations available from the AWS Support service for Trusted Advisor:

    For authentication of requests, AWS Support uses Signature Version 4 Signing Process.

    See About the AWS Support API in the AWS Support User Guide for information about how to use this service to create and manage your support cases, and how to call Trusted Advisor for results of checks on your resources.

    ", + "shapes": { + "AddAttachmentsToSetRequest": { + "base": null, + "refs": { + } + }, + "AddAttachmentsToSetResponse": { + "base": "

    The ID and expiry time of the attachment set returned by the AddAttachmentsToSet operation.

    ", + "refs": { + } + }, + "AddCommunicationToCaseRequest": { + "base": "

    To be written.

    ", + "refs": { + } + }, + "AddCommunicationToCaseResponse": { + "base": "

    The result of the AddCommunicationToCase operation.

    ", + "refs": { + } + }, + "AfterTime": { + "base": null, + "refs": { + "DescribeCasesRequest$afterTime": "

    The start date for a filtered date search on support case communications. Case communications are available for 12 months after creation.

    ", + "DescribeCommunicationsRequest$afterTime": "

    The start date for a filtered date search on support case communications. Case communications are available for 12 months after creation.

    " + } + }, + "Attachment": { + "base": "

    An attachment to a case communication. The attachment consists of the file name and the content of the file.

    ", + "refs": { + "Attachments$member": null, + "DescribeAttachmentResponse$attachment": "

    The attachment content and file name.

    " + } + }, + "AttachmentDetails": { + "base": "

    The file name and ID of an attachment to a case communication. You can use the ID to retrieve the attachment with the DescribeAttachment operation.

    ", + "refs": { + "AttachmentSet$member": null + } + }, + "AttachmentId": { + "base": null, + "refs": { + "AttachmentDetails$attachmentId": "

    The ID of the attachment.

    ", + "DescribeAttachmentRequest$attachmentId": "

    The ID of the attachment to return. Attachment IDs are returned by the DescribeCommunications operation.

    " + } + }, + "AttachmentIdNotFound": { + "base": "

    An attachment with the specified ID could not be found.

    ", + "refs": { + } + }, + "AttachmentLimitExceeded": { + "base": "

    The limit for the number of attachment sets created in a short period of time has been exceeded.

    ", + "refs": { + } + }, + "AttachmentSet": { + "base": null, + "refs": { + "Communication$attachmentSet": "

    Information about the attachments to the case communication.

    " + } + }, + "AttachmentSetExpired": { + "base": "

    The expiration time of the attachment set has passed. The set expires 1 hour after it is created.

    ", + "refs": { + } + }, + "AttachmentSetId": { + "base": null, + "refs": { + "AddAttachmentsToSetRequest$attachmentSetId": "

    The ID of the attachment set. If an AttachmentSetId is not specified, a new attachment set is created, and the ID of the set is returned in the response. If an AttachmentSetId is specified, the attachments are added to the specified set, if it exists.

    ", + "AddAttachmentsToSetResponse$attachmentSetId": "

    The ID of the attachment set. If an AttachmentSetId was not specified, a new attachment set is created, and the ID of the set is returned in the response. If an AttachmentSetId was specified, the attachments are added to the specified set, if it exists.

    ", + "AddCommunicationToCaseRequest$attachmentSetId": "

    The ID of a set of one or more attachments for the communication to add to the case. Create the set by calling AddAttachmentsToSet

    ", + "CreateCaseRequest$attachmentSetId": "

    The ID of a set of one or more attachments for the case. Create the set by using AddAttachmentsToSet.

    " + } + }, + "AttachmentSetIdNotFound": { + "base": "

    An attachment set with the specified ID could not be found.

    ", + "refs": { + } + }, + "AttachmentSetSizeLimitExceeded": { + "base": "

    A limit for the size of an attachment set has been exceeded. The limits are 3 attachments and 5 MB per attachment.

    ", + "refs": { + } + }, + "Attachments": { + "base": null, + "refs": { + "AddAttachmentsToSetRequest$attachments": "

    One or more attachments to add to the set. The limit is 3 attachments per set, and the size limit is 5 MB per attachment.

    " + } + }, + "BeforeTime": { + "base": null, + "refs": { + "DescribeCasesRequest$beforeTime": "

    The end date for a filtered date search on support case communications. Case communications are available for 12 months after creation.

    ", + "DescribeCommunicationsRequest$beforeTime": "

    The end date for a filtered date search on support case communications. Case communications are available for 12 months after creation.

    " + } + }, + "Boolean": { + "base": null, + "refs": { + "TrustedAdvisorCheckSummary$hasFlaggedResources": "

    Specifies whether the Trusted Advisor check has flagged resources.

    ", + "TrustedAdvisorResourceDetail$isSuppressed": "

    Specifies whether the AWS resource was ignored by Trusted Advisor because it was marked as suppressed by the user.

    " + } + }, + "CaseCreationLimitExceeded": { + "base": "

    The case creation limit for the account has been exceeded.

    ", + "refs": { + } + }, + "CaseDetails": { + "base": "

    A JSON-formatted object that contains the metadata for a support case. It is contained the response from a DescribeCases request. CaseDetails contains the following fields:

    1. CaseID. The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47.
    2. CategoryCode. The category of problem for the AWS Support case. Corresponds to the CategoryCode values returned by a call to DescribeServices.
    3. DisplayId. The identifier for the case on pages in the AWS Support Center.
    4. Language. The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be passed explicitly for operations that take them.
    5. RecentCommunications. One or more Communication objects. Fields of these objects are Attachments, Body, CaseId, SubmittedBy, and TimeCreated.
    6. NextToken. A resumption point for pagination.
    7. ServiceCode. The identifier for the AWS service that corresponds to the service code defined in the call to DescribeServices.
    8. SeverityCode. The severity code assigned to the case. Contains one of the values returned by the call to DescribeSeverityLevels.
    9. Status. The status of the case in the AWS Support Center.
    10. Subject. The subject line of the case.
    11. SubmittedBy. The email address of the account that submitted the case.
    12. TimeCreated. The time the case was created, in ISO-8601 format.
    ", + "refs": { + "CaseList$member": null + } + }, + "CaseId": { + "base": null, + "refs": { + "AddCommunicationToCaseRequest$caseId": "

    The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47

    ", + "CaseDetails$caseId": "

    The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47

    ", + "CaseIdList$member": null, + "Communication$caseId": "

    The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47

    ", + "CreateCaseResponse$caseId": "

    The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47

    ", + "DescribeCommunicationsRequest$caseId": "

    The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47

    ", + "ResolveCaseRequest$caseId": "

    The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47

    " + } + }, + "CaseIdList": { + "base": null, + "refs": { + "DescribeCasesRequest$caseIdList": "

    A list of ID numbers of the support cases you want returned. The maximum number of cases is 100.

    " + } + }, + "CaseIdNotFound": { + "base": "

    The requested CaseId could not be located.

    ", + "refs": { + } + }, + "CaseList": { + "base": null, + "refs": { + "DescribeCasesResponse$cases": "

    The details for the cases that match the request.

    " + } + }, + "CaseStatus": { + "base": null, + "refs": { + "ResolveCaseResponse$initialCaseStatus": "

    The status of the case when the ResolveCase request was sent.

    ", + "ResolveCaseResponse$finalCaseStatus": "

    The status of the case after the ResolveCase request was processed.

    " + } + }, + "Category": { + "base": "

    A JSON-formatted name/value pair that represents the category name and category code of the problem, selected from the DescribeServices response for each AWS service.

    ", + "refs": { + "CategoryList$member": null + } + }, + "CategoryCode": { + "base": null, + "refs": { + "CaseDetails$categoryCode": "

    The category of problem for the AWS Support case.

    ", + "Category$code": "

    The category code for the support case.

    ", + "CreateCaseRequest$categoryCode": "

    The category of problem for the AWS Support case.

    " + } + }, + "CategoryList": { + "base": null, + "refs": { + "Service$categories": "

    A list of categories that describe the type of support issue a case describes. Categories consist of a category name and a category code. Category names and codes are passed to AWS Support when you call CreateCase.

    " + } + }, + "CategoryName": { + "base": null, + "refs": { + "Category$name": "

    The category name for the support case.

    " + } + }, + "CcEmailAddress": { + "base": null, + "refs": { + "CcEmailAddressList$member": null + } + }, + "CcEmailAddressList": { + "base": null, + "refs": { + "AddCommunicationToCaseRequest$ccEmailAddresses": "

    The email addresses in the CC line of an email to be added to the support case.

    ", + "CaseDetails$ccEmailAddresses": "

    The email addresses that receive copies of communication about the case.

    ", + "CreateCaseRequest$ccEmailAddresses": "

    A list of email addresses that AWS Support copies on case correspondence.

    " + } + }, + "Communication": { + "base": "

    A communication associated with an AWS Support case. The communication consists of the case ID, the message body, attachment information, the account email address, and the date and time of the communication.

    ", + "refs": { + "CommunicationList$member": null + } + }, + "CommunicationBody": { + "base": null, + "refs": { + "AddCommunicationToCaseRequest$communicationBody": "

    The body of an email communication to add to the support case.

    ", + "Communication$body": "

    The text of the communication between the customer and AWS Support.

    ", + "CreateCaseRequest$communicationBody": "

    The communication body text when you create an AWS Support case by calling CreateCase.

    " + } + }, + "CommunicationList": { + "base": null, + "refs": { + "DescribeCommunicationsResponse$communications": "

    The communications for the case.

    ", + "RecentCaseCommunications$communications": "

    The five most recent communications associated with the case.

    " + } + }, + "CreateCaseRequest": { + "base": null, + "refs": { + } + }, + "CreateCaseResponse": { + "base": "

    The AWS Support case ID returned by a successful completion of the CreateCase operation.

    ", + "refs": { + } + }, + "Data": { + "base": null, + "refs": { + "Attachment$data": "

    The content of the attachment file.

    " + } + }, + "DescribeAttachmentLimitExceeded": { + "base": "

    The limit for the number of DescribeAttachment requests in a short period of time has been exceeded.

    ", + "refs": { + } + }, + "DescribeAttachmentRequest": { + "base": null, + "refs": { + } + }, + "DescribeAttachmentResponse": { + "base": "

    The content and file name of the attachment returned by the DescribeAttachment operation.

    ", + "refs": { + } + }, + "DescribeCasesRequest": { + "base": null, + "refs": { + } + }, + "DescribeCasesResponse": { + "base": "

    Returns an array of CaseDetails objects and a NextToken that defines a point for pagination in the result set.

    ", + "refs": { + } + }, + "DescribeCommunicationsRequest": { + "base": null, + "refs": { + } + }, + "DescribeCommunicationsResponse": { + "base": "

    The communications returned by the DescribeCommunications operation.

    ", + "refs": { + } + }, + "DescribeServicesRequest": { + "base": null, + "refs": { + } + }, + "DescribeServicesResponse": { + "base": "

    The list of AWS services returned by the DescribeServices operation.

    ", + "refs": { + } + }, + "DescribeSeverityLevelsRequest": { + "base": null, + "refs": { + } + }, + "DescribeSeverityLevelsResponse": { + "base": "

    The list of severity levels returned by the DescribeSeverityLevels operation.

    ", + "refs": { + } + }, + "DescribeTrustedAdvisorCheckRefreshStatusesRequest": { + "base": null, + "refs": { + } + }, + "DescribeTrustedAdvisorCheckRefreshStatusesResponse": { + "base": "

    The statuses of the Trusted Advisor checks returned by the DescribeTrustedAdvisorCheckRefreshStatuses operation.

    ", + "refs": { + } + }, + "DescribeTrustedAdvisorCheckResultRequest": { + "base": null, + "refs": { + } + }, + "DescribeTrustedAdvisorCheckResultResponse": { + "base": "

    The result of the Trusted Advisor check returned by the DescribeTrustedAdvisorCheckResult operation.

    ", + "refs": { + } + }, + "DescribeTrustedAdvisorCheckSummariesRequest": { + "base": null, + "refs": { + } + }, + "DescribeTrustedAdvisorCheckSummariesResponse": { + "base": "

    The summaries of the Trusted Advisor checks returned by the DescribeTrustedAdvisorCheckSummaries operation.

    ", + "refs": { + } + }, + "DescribeTrustedAdvisorChecksRequest": { + "base": null, + "refs": { + } + }, + "DescribeTrustedAdvisorChecksResponse": { + "base": "

    Information about the Trusted Advisor checks returned by the DescribeTrustedAdvisorChecks operation.

    ", + "refs": { + } + }, + "DisplayId": { + "base": null, + "refs": { + "CaseDetails$displayId": "

    The ID displayed for the case in the AWS Support Center. This is a numeric string.

    ", + "DescribeCasesRequest$displayId": "

    The ID displayed for a case in the AWS Support Center user interface.

    " + } + }, + "Double": { + "base": null, + "refs": { + "TrustedAdvisorCostOptimizingSummary$estimatedMonthlySavings": "

    The estimated monthly savings that might be realized if the recommended actions are taken.

    ", + "TrustedAdvisorCostOptimizingSummary$estimatedPercentMonthlySavings": "

    The estimated percentage of savings that might be realized if the recommended actions are taken.

    " + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "AttachmentIdNotFound$message": "

    An attachment with the specified ID could not be found.

    ", + "AttachmentLimitExceeded$message": "

    The limit for the number of attachment sets created in a short period of time has been exceeded.

    ", + "AttachmentSetExpired$message": "

    The expiration time of the attachment set has passed. The set expires 1 hour after it is created.

    ", + "AttachmentSetIdNotFound$message": "

    An attachment set with the specified ID could not be found.

    ", + "AttachmentSetSizeLimitExceeded$message": "

    A limit for the size of an attachment set has been exceeded. The limits are 3 attachments and 5 MB per attachment.

    ", + "CaseCreationLimitExceeded$message": "

    An error message that indicates that you have exceeded the number of cases you can have open.

    ", + "CaseIdNotFound$message": "

    The requested CaseId could not be located.

    ", + "DescribeAttachmentLimitExceeded$message": "

    The limit for the number of DescribeAttachment requests in a short period of time has been exceeded.

    ", + "InternalServerError$message": "

    An internal server error occurred.

    " + } + }, + "ExpiryTime": { + "base": null, + "refs": { + "AddAttachmentsToSetResponse$expiryTime": "

    The time and date when the attachment set expires.

    " + } + }, + "FileName": { + "base": null, + "refs": { + "Attachment$fileName": "

    The name of the attachment file.

    ", + "AttachmentDetails$fileName": "

    The file name of the attachment.

    " + } + }, + "IncludeCommunications": { + "base": null, + "refs": { + "DescribeCasesRequest$includeCommunications": "

    Specifies whether communications should be included in the DescribeCases results. The default is true.

    " + } + }, + "IncludeResolvedCases": { + "base": null, + "refs": { + "DescribeCasesRequest$includeResolvedCases": "

    Specifies whether resolved support cases should be included in the DescribeCases results. The default is false.

    " + } + }, + "InternalServerError": { + "base": "

    An internal server error occurred.

    ", + "refs": { + } + }, + "IssueType": { + "base": null, + "refs": { + "CreateCaseRequest$issueType": "

    The type of issue for the case. You can specify either \"customer-service\" or \"technical.\" If you do not indicate a value, the default is \"technical.\"

    " + } + }, + "Language": { + "base": null, + "refs": { + "CaseDetails$language": "

    The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be passed explicitly for operations that take them.

    ", + "CreateCaseRequest$language": "

    The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be passed explicitly for operations that take them.

    ", + "DescribeCasesRequest$language": "

    The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be passed explicitly for operations that take them.

    ", + "DescribeServicesRequest$language": "

    The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be passed explicitly for operations that take them.

    ", + "DescribeSeverityLevelsRequest$language": "

    The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be passed explicitly for operations that take them.

    " + } + }, + "Long": { + "base": null, + "refs": { + "TrustedAdvisorCheckRefreshStatus$millisUntilNextRefreshable": "

    The amount of time, in milliseconds, until the Trusted Advisor check is eligible for refresh.

    ", + "TrustedAdvisorResourcesSummary$resourcesProcessed": "

    The number of AWS resources that were analyzed by the Trusted Advisor check.

    ", + "TrustedAdvisorResourcesSummary$resourcesFlagged": "

    The number of AWS resources that were flagged (listed) by the Trusted Advisor check.

    ", + "TrustedAdvisorResourcesSummary$resourcesIgnored": "

    The number of AWS resources ignored by Trusted Advisor because information was unavailable.

    ", + "TrustedAdvisorResourcesSummary$resourcesSuppressed": "

    The number of AWS resources ignored by Trusted Advisor because they were marked as suppressed by the user.

    " + } + }, + "MaxResults": { + "base": null, + "refs": { + "DescribeCasesRequest$maxResults": "

    The maximum number of results to return before paginating.

    ", + "DescribeCommunicationsRequest$maxResults": "

    The maximum number of results to return before paginating.

    " + } + }, + "NextToken": { + "base": null, + "refs": { + "DescribeCasesRequest$nextToken": "

    A resumption point for pagination.

    ", + "DescribeCasesResponse$nextToken": "

    A resumption point for pagination.

    ", + "DescribeCommunicationsRequest$nextToken": "

    A resumption point for pagination.

    ", + "DescribeCommunicationsResponse$nextToken": "

    A resumption point for pagination.

    ", + "RecentCaseCommunications$nextToken": "

    A resumption point for pagination.

    " + } + }, + "RecentCaseCommunications": { + "base": "

    The five most recent communications associated with the case.

    ", + "refs": { + "CaseDetails$recentCommunications": "

    The five most recent communications between you and AWS Support Center, including the IDs of any attachments to the communications. Also includes a nextToken that you can use to retrieve earlier communications.

    " + } + }, + "RefreshTrustedAdvisorCheckRequest": { + "base": null, + "refs": { + } + }, + "RefreshTrustedAdvisorCheckResponse": { + "base": "

    The current refresh status of a Trusted Advisor check.

    ", + "refs": { + } + }, + "ResolveCaseRequest": { + "base": null, + "refs": { + } + }, + "ResolveCaseResponse": { + "base": "

    The status of the case returned by the ResolveCase operation.

    ", + "refs": { + } + }, + "Result": { + "base": null, + "refs": { + "AddCommunicationToCaseResponse$result": "

    True if AddCommunicationToCase succeeds. Otherwise, returns an error.

    " + } + }, + "Service": { + "base": "

    Information about an AWS service returned by the DescribeServices operation.

    ", + "refs": { + "ServiceList$member": null + } + }, + "ServiceCode": { + "base": null, + "refs": { + "CaseDetails$serviceCode": "

    The code for the AWS service returned by the call to DescribeServices.

    ", + "CreateCaseRequest$serviceCode": "

    The code for the AWS service returned by the call to DescribeServices.

    ", + "Service$code": "

    The code for an AWS service returned by the DescribeServices response. The Name element contains the corresponding friendly name.

    ", + "ServiceCodeList$member": null + } + }, + "ServiceCodeList": { + "base": null, + "refs": { + "DescribeServicesRequest$serviceCodeList": "

    A JSON-formatted list of service codes available for AWS services.

    " + } + }, + "ServiceList": { + "base": null, + "refs": { + "DescribeServicesResponse$services": "

    A JSON-formatted list of AWS services.

    " + } + }, + "ServiceName": { + "base": null, + "refs": { + "Service$name": "

    The friendly name for an AWS service. The Code element contains the corresponding code.

    " + } + }, + "SeverityCode": { + "base": null, + "refs": { + "CaseDetails$severityCode": "

    The code for the severity level returned by the call to DescribeSeverityLevels.

    ", + "CreateCaseRequest$severityCode": "

    The code for the severity level returned by the call to DescribeSeverityLevels.

    The availability of severity levels depends on each customer's support subscription. In other words, your subscription may not necessarily require the urgent level of response time.

    " + } + }, + "SeverityLevel": { + "base": "

    A code and name pair that represent a severity level that can be applied to a support case.

    ", + "refs": { + "SeverityLevelsList$member": null + } + }, + "SeverityLevelCode": { + "base": null, + "refs": { + "SeverityLevel$code": "

    One of four values: \"low,\" \"medium,\" \"high,\" and \"urgent\". These values correspond to response times returned to the caller in SeverityLevel.name.

    " + } + }, + "SeverityLevelName": { + "base": null, + "refs": { + "SeverityLevel$name": "

    The name of the severity level that corresponds to the severity level code.

    " + } + }, + "SeverityLevelsList": { + "base": null, + "refs": { + "DescribeSeverityLevelsResponse$severityLevels": "

    The available severity levels for the support case. Available severity levels are defined by your service level agreement with AWS.

    " + } + }, + "Status": { + "base": null, + "refs": { + "CaseDetails$status": "

    The status of the case.

    " + } + }, + "String": { + "base": null, + "refs": { + "DescribeTrustedAdvisorCheckResultRequest$checkId": "

    The unique identifier for the Trusted Advisor check.

    ", + "DescribeTrustedAdvisorCheckResultRequest$language": "

    The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be passed explicitly for operations that take them.

    ", + "DescribeTrustedAdvisorChecksRequest$language": "

    The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be passed explicitly for operations that take them.

    ", + "RefreshTrustedAdvisorCheckRequest$checkId": "

    The unique identifier for the Trusted Advisor check.

    ", + "StringList$member": null, + "TrustedAdvisorCheckDescription$id": "

    The unique identifier for the Trusted Advisor check.

    ", + "TrustedAdvisorCheckDescription$name": "

    The display name for the Trusted Advisor check.

    ", + "TrustedAdvisorCheckDescription$description": "

    The description of the Trusted Advisor check, which includes the alert criteria and recommended actions (contains HTML markup).

    ", + "TrustedAdvisorCheckDescription$category": "

    The category of the Trusted Advisor check.

    ", + "TrustedAdvisorCheckRefreshStatus$checkId": "

    The unique identifier for the Trusted Advisor check.

    ", + "TrustedAdvisorCheckRefreshStatus$status": "

    The status of the Trusted Advisor check for which a refresh has been requested: \"none\", \"enqueued\", \"processing\", \"success\", or \"abandoned\".

    ", + "TrustedAdvisorCheckResult$checkId": "

    The unique identifier for the Trusted Advisor check.

    ", + "TrustedAdvisorCheckResult$timestamp": "

    The time of the last refresh of the check.

    ", + "TrustedAdvisorCheckResult$status": "

    The alert status of the check: \"ok\" (green), \"warning\" (yellow), \"error\" (red), or \"not_available\".

    ", + "TrustedAdvisorCheckSummary$checkId": "

    The unique identifier for the Trusted Advisor check.

    ", + "TrustedAdvisorCheckSummary$timestamp": "

    The time of the last refresh of the check.

    ", + "TrustedAdvisorCheckSummary$status": "

    The alert status of the check: \"ok\" (green), \"warning\" (yellow), \"error\" (red), or \"not_available\".

    ", + "TrustedAdvisorResourceDetail$status": "

    The status code for the resource identified in the Trusted Advisor check.

    ", + "TrustedAdvisorResourceDetail$region": "

    The AWS region in which the identified resource is located.

    ", + "TrustedAdvisorResourceDetail$resourceId": "

    The unique identifier for the identified resource.

    " + } + }, + "StringList": { + "base": null, + "refs": { + "DescribeTrustedAdvisorCheckRefreshStatusesRequest$checkIds": "

    The IDs of the Trusted Advisor checks.

    ", + "DescribeTrustedAdvisorCheckSummariesRequest$checkIds": "

    The IDs of the Trusted Advisor checks.

    ", + "TrustedAdvisorCheckDescription$metadata": "

    The column headings for the data returned by the Trusted Advisor check. The order of the headings corresponds to the order of the data in the Metadata element of the TrustedAdvisorResourceDetail for the check. Metadata contains all the data that is shown in the Excel download, even in those cases where the UI shows just summary data.

    ", + "TrustedAdvisorResourceDetail$metadata": "

    Additional information about the identified resource. The exact metadata and its order can be obtained by inspecting the TrustedAdvisorCheckDescription object returned by the call to DescribeTrustedAdvisorChecks. Metadata contains all the data that is shown in the Excel download, even in those cases where the UI shows just summary data.

    " + } + }, + "Subject": { + "base": null, + "refs": { + "CaseDetails$subject": "

    The subject line for the case in the AWS Support Center.

    ", + "CreateCaseRequest$subject": "

    The title of the AWS Support case.

    " + } + }, + "SubmittedBy": { + "base": null, + "refs": { + "CaseDetails$submittedBy": "

    The email address of the account that submitted the case.

    ", + "Communication$submittedBy": "

    The email address of the account that submitted the AWS Support case.

    " + } + }, + "TimeCreated": { + "base": null, + "refs": { + "CaseDetails$timeCreated": "

    The time that the case was case created in the AWS Support Center.

    ", + "Communication$timeCreated": "

    The time the communication was created.

    " + } + }, + "TrustedAdvisorCategorySpecificSummary": { + "base": "

    The container for summary information that relates to the category of the Trusted Advisor check.

    ", + "refs": { + "TrustedAdvisorCheckResult$categorySpecificSummary": "

    Summary information that relates to the category of the check. Cost Optimizing is the only category that is currently supported.

    ", + "TrustedAdvisorCheckSummary$categorySpecificSummary": "

    Summary information that relates to the category of the check. Cost Optimizing is the only category that is currently supported.

    " + } + }, + "TrustedAdvisorCheckDescription": { + "base": "

    The description and metadata for a Trusted Advisor check.

    ", + "refs": { + "TrustedAdvisorCheckList$member": null + } + }, + "TrustedAdvisorCheckList": { + "base": null, + "refs": { + "DescribeTrustedAdvisorChecksResponse$checks": "

    Information about all available Trusted Advisor checks.

    " + } + }, + "TrustedAdvisorCheckRefreshStatus": { + "base": "

    The refresh status of a Trusted Advisor check.

    ", + "refs": { + "RefreshTrustedAdvisorCheckResponse$status": "

    The current refresh status for a check, including the amount of time until the check is eligible for refresh.

    ", + "TrustedAdvisorCheckRefreshStatusList$member": null + } + }, + "TrustedAdvisorCheckRefreshStatusList": { + "base": null, + "refs": { + "DescribeTrustedAdvisorCheckRefreshStatusesResponse$statuses": "

    The refresh status of the specified Trusted Advisor checks.

    " + } + }, + "TrustedAdvisorCheckResult": { + "base": "

    The results of a Trusted Advisor check returned by DescribeTrustedAdvisorCheckResult.

    ", + "refs": { + "DescribeTrustedAdvisorCheckResultResponse$result": "

    The detailed results of the Trusted Advisor check.

    " + } + }, + "TrustedAdvisorCheckSummary": { + "base": "

    A summary of a Trusted Advisor check result, including the alert status, last refresh, and number of resources examined.

    ", + "refs": { + "TrustedAdvisorCheckSummaryList$member": null + } + }, + "TrustedAdvisorCheckSummaryList": { + "base": null, + "refs": { + "DescribeTrustedAdvisorCheckSummariesResponse$summaries": "

    The summary information for the requested Trusted Advisor checks.

    " + } + }, + "TrustedAdvisorCostOptimizingSummary": { + "base": "

    The estimated cost savings that might be realized if the recommended actions are taken.

    ", + "refs": { + "TrustedAdvisorCategorySpecificSummary$costOptimizing": "

    The summary information about cost savings for a Trusted Advisor check that is in the Cost Optimizing category.

    " + } + }, + "TrustedAdvisorResourceDetail": { + "base": "

    Contains information about a resource identified by a Trusted Advisor check.

    ", + "refs": { + "TrustedAdvisorResourceDetailList$member": null + } + }, + "TrustedAdvisorResourceDetailList": { + "base": null, + "refs": { + "TrustedAdvisorCheckResult$flaggedResources": "

    The details about each resource listed in the check result.

    " + } + }, + "TrustedAdvisorResourcesSummary": { + "base": "

    Details about AWS resources that were analyzed in a call to Trusted Advisor DescribeTrustedAdvisorCheckSummaries.

    ", + "refs": { + "TrustedAdvisorCheckResult$resourcesSummary": null, + "TrustedAdvisorCheckSummary$resourcesSummary": null + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/support/2013-04-15/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/support/2013-04-15/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/support/2013-04-15/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/support/2013-04-15/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,25 @@ +{ + "pagination": { + "DescribeCases": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "cases" + }, + "DescribeCommunications": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "communications" + }, + "DescribeServices": { + "result_key": "services" + }, + "DescribeTrustedAdvisorCheckRefreshStatuses": { + "result_key": "statuses" + }, + "DescribeTrustedAdvisorCheckSummaries": { + "result_key": "summaries" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/swf/2012-01-25/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/swf/2012-01-25/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/swf/2012-01-25/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/swf/2012-01-25/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2838 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2012-01-25", + "endpointPrefix":"swf", + "jsonVersion":"1.0", + "serviceAbbreviation":"Amazon SWF", + "serviceFullName":"Amazon Simple Workflow Service", + "signatureVersion":"v4", + "targetPrefix":"SimpleWorkflowService", + "timestampFormat":"unixTimestamp", + "protocol":"json" + }, + "operations":{ + "CountClosedWorkflowExecutions":{ + "name":"CountClosedWorkflowExecutions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CountClosedWorkflowExecutionsInput"}, + "output":{"shape":"WorkflowExecutionCount"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "CountOpenWorkflowExecutions":{ + "name":"CountOpenWorkflowExecutions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CountOpenWorkflowExecutionsInput"}, + "output":{"shape":"WorkflowExecutionCount"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "CountPendingActivityTasks":{ + "name":"CountPendingActivityTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CountPendingActivityTasksInput"}, + "output":{"shape":"PendingTaskCount"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "CountPendingDecisionTasks":{ + "name":"CountPendingDecisionTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CountPendingDecisionTasksInput"}, + "output":{"shape":"PendingTaskCount"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "DeprecateActivityType":{ + "name":"DeprecateActivityType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeprecateActivityTypeInput"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"TypeDeprecatedFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "DeprecateDomain":{ + "name":"DeprecateDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeprecateDomainInput"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"DomainDeprecatedFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "DeprecateWorkflowType":{ + "name":"DeprecateWorkflowType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeprecateWorkflowTypeInput"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"TypeDeprecatedFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "DescribeActivityType":{ + "name":"DescribeActivityType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeActivityTypeInput"}, + "output":{"shape":"ActivityTypeDetail"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "DescribeDomain":{ + "name":"DescribeDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDomainInput"}, + "output":{"shape":"DomainDetail"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "DescribeWorkflowExecution":{ + "name":"DescribeWorkflowExecution", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeWorkflowExecutionInput"}, + "output":{"shape":"WorkflowExecutionDetail"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "DescribeWorkflowType":{ + "name":"DescribeWorkflowType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeWorkflowTypeInput"}, + "output":{"shape":"WorkflowTypeDetail"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "GetWorkflowExecutionHistory":{ + "name":"GetWorkflowExecutionHistory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetWorkflowExecutionHistoryInput"}, + "output":{"shape":"History"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "ListActivityTypes":{ + "name":"ListActivityTypes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListActivityTypesInput"}, + "output":{"shape":"ActivityTypeInfos"}, + "errors":[ + { + "shape":"OperationNotPermittedFault", + "exception":true + }, + { + "shape":"UnknownResourceFault", + "exception":true + } + ] + }, + "ListClosedWorkflowExecutions":{ + "name":"ListClosedWorkflowExecutions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListClosedWorkflowExecutionsInput"}, + "output":{"shape":"WorkflowExecutionInfos"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "ListDomains":{ + "name":"ListDomains", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDomainsInput"}, + "output":{"shape":"DomainInfos"}, + "errors":[ + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "ListOpenWorkflowExecutions":{ + "name":"ListOpenWorkflowExecutions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListOpenWorkflowExecutionsInput"}, + "output":{"shape":"WorkflowExecutionInfos"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "ListWorkflowTypes":{ + "name":"ListWorkflowTypes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListWorkflowTypesInput"}, + "output":{"shape":"WorkflowTypeInfos"}, + "errors":[ + { + "shape":"OperationNotPermittedFault", + "exception":true + }, + { + "shape":"UnknownResourceFault", + "exception":true + } + ] + }, + "PollForActivityTask":{ + "name":"PollForActivityTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PollForActivityTaskInput"}, + "output":{"shape":"ActivityTask"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + }, + { + "shape":"LimitExceededFault", + "exception":true + } + ] + }, + "PollForDecisionTask":{ + "name":"PollForDecisionTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PollForDecisionTaskInput"}, + "output":{"shape":"DecisionTask"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + }, + { + "shape":"LimitExceededFault", + "exception":true + } + ] + }, + "RecordActivityTaskHeartbeat":{ + "name":"RecordActivityTaskHeartbeat", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RecordActivityTaskHeartbeatInput"}, + "output":{"shape":"ActivityTaskStatus"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "RegisterActivityType":{ + "name":"RegisterActivityType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterActivityTypeInput"}, + "errors":[ + { + "shape":"TypeAlreadyExistsFault", + "exception":true + }, + { + "shape":"LimitExceededFault", + "exception":true + }, + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "RegisterDomain":{ + "name":"RegisterDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterDomainInput"}, + "errors":[ + { + "shape":"DomainAlreadyExistsFault", + "exception":true + }, + { + "shape":"LimitExceededFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "RegisterWorkflowType":{ + "name":"RegisterWorkflowType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterWorkflowTypeInput"}, + "errors":[ + { + "shape":"TypeAlreadyExistsFault", + "exception":true + }, + { + "shape":"LimitExceededFault", + "exception":true + }, + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "RequestCancelWorkflowExecution":{ + "name":"RequestCancelWorkflowExecution", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RequestCancelWorkflowExecutionInput"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "RespondActivityTaskCanceled":{ + "name":"RespondActivityTaskCanceled", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RespondActivityTaskCanceledInput"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "RespondActivityTaskCompleted":{ + "name":"RespondActivityTaskCompleted", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RespondActivityTaskCompletedInput"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "RespondActivityTaskFailed":{ + "name":"RespondActivityTaskFailed", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RespondActivityTaskFailedInput"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "RespondDecisionTaskCompleted":{ + "name":"RespondDecisionTaskCompleted", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RespondDecisionTaskCompletedInput"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "SignalWorkflowExecution":{ + "name":"SignalWorkflowExecution", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SignalWorkflowExecutionInput"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + }, + "StartWorkflowExecution":{ + "name":"StartWorkflowExecution", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartWorkflowExecutionInput"}, + "output":{"shape":"Run"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"TypeDeprecatedFault", + "exception":true + }, + { + "shape":"WorkflowExecutionAlreadyStartedFault", + "exception":true + }, + { + "shape":"LimitExceededFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + }, + { + "shape":"DefaultUndefinedFault", + "exception":true + } + ] + }, + "TerminateWorkflowExecution":{ + "name":"TerminateWorkflowExecution", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TerminateWorkflowExecutionInput"}, + "errors":[ + { + "shape":"UnknownResourceFault", + "exception":true + }, + { + "shape":"OperationNotPermittedFault", + "exception":true + } + ] + } + }, + "shapes":{ + "ActivityId":{ + "type":"string", + "min":1, + "max":256 + }, + "ActivityTask":{ + "type":"structure", + "required":[ + "taskToken", + "activityId", + "startedEventId", + "workflowExecution", + "activityType" + ], + "members":{ + "taskToken":{"shape":"TaskToken"}, + "activityId":{"shape":"ActivityId"}, + "startedEventId":{"shape":"EventId"}, + "workflowExecution":{"shape":"WorkflowExecution"}, + "activityType":{"shape":"ActivityType"}, + "input":{"shape":"Data"} + } + }, + "ActivityTaskCancelRequestedEventAttributes":{ + "type":"structure", + "required":[ + "decisionTaskCompletedEventId", + "activityId" + ], + "members":{ + "decisionTaskCompletedEventId":{"shape":"EventId"}, + "activityId":{"shape":"ActivityId"} + } + }, + "ActivityTaskCanceledEventAttributes":{ + "type":"structure", + "required":[ + "scheduledEventId", + "startedEventId" + ], + "members":{ + "details":{"shape":"Data"}, + "scheduledEventId":{"shape":"EventId"}, + "startedEventId":{"shape":"EventId"}, + "latestCancelRequestedEventId":{"shape":"EventId"} + } + }, + "ActivityTaskCompletedEventAttributes":{ + "type":"structure", + "required":[ + "scheduledEventId", + "startedEventId" + ], + "members":{ + "result":{"shape":"Data"}, + "scheduledEventId":{"shape":"EventId"}, + "startedEventId":{"shape":"EventId"} + } + }, + "ActivityTaskFailedEventAttributes":{ + "type":"structure", + "required":[ + "scheduledEventId", + "startedEventId" + ], + "members":{ + "reason":{"shape":"FailureReason"}, + "details":{"shape":"Data"}, + "scheduledEventId":{"shape":"EventId"}, + "startedEventId":{"shape":"EventId"} + } + }, + "ActivityTaskScheduledEventAttributes":{ + "type":"structure", + "required":[ + "activityType", + "activityId", + "taskList", + "decisionTaskCompletedEventId" + ], + "members":{ + "activityType":{"shape":"ActivityType"}, + "activityId":{"shape":"ActivityId"}, + "input":{"shape":"Data"}, + "control":{"shape":"Data"}, + "scheduleToStartTimeout":{"shape":"DurationInSecondsOptional"}, + "scheduleToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "startToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "taskList":{"shape":"TaskList"}, + "taskPriority":{"shape":"TaskPriority"}, + "decisionTaskCompletedEventId":{"shape":"EventId"}, + "heartbeatTimeout":{"shape":"DurationInSecondsOptional"} + } + }, + "ActivityTaskStartedEventAttributes":{ + "type":"structure", + "required":["scheduledEventId"], + "members":{ + "identity":{"shape":"Identity"}, + "scheduledEventId":{"shape":"EventId"} + } + }, + "ActivityTaskStatus":{ + "type":"structure", + "required":["cancelRequested"], + "members":{ + "cancelRequested":{"shape":"Canceled"} + } + }, + "ActivityTaskTimedOutEventAttributes":{ + "type":"structure", + "required":[ + "timeoutType", + "scheduledEventId", + "startedEventId" + ], + "members":{ + "timeoutType":{"shape":"ActivityTaskTimeoutType"}, + "scheduledEventId":{"shape":"EventId"}, + "startedEventId":{"shape":"EventId"}, + "details":{"shape":"LimitedData"} + } + }, + "ActivityTaskTimeoutType":{ + "type":"string", + "enum":[ + "START_TO_CLOSE", + "SCHEDULE_TO_START", + "SCHEDULE_TO_CLOSE", + "HEARTBEAT" + ] + }, + "ActivityType":{ + "type":"structure", + "required":[ + "name", + "version" + ], + "members":{ + "name":{"shape":"Name"}, + "version":{"shape":"Version"} + } + }, + "ActivityTypeConfiguration":{ + "type":"structure", + "members":{ + "defaultTaskStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "defaultTaskHeartbeatTimeout":{"shape":"DurationInSecondsOptional"}, + "defaultTaskList":{"shape":"TaskList"}, + "defaultTaskPriority":{"shape":"TaskPriority"}, + "defaultTaskScheduleToStartTimeout":{"shape":"DurationInSecondsOptional"}, + "defaultTaskScheduleToCloseTimeout":{"shape":"DurationInSecondsOptional"} + } + }, + "ActivityTypeDetail":{ + "type":"structure", + "required":[ + "typeInfo", + "configuration" + ], + "members":{ + "typeInfo":{"shape":"ActivityTypeInfo"}, + "configuration":{"shape":"ActivityTypeConfiguration"} + } + }, + "ActivityTypeInfo":{ + "type":"structure", + "required":[ + "activityType", + "status", + "creationDate" + ], + "members":{ + "activityType":{"shape":"ActivityType"}, + "status":{"shape":"RegistrationStatus"}, + "description":{"shape":"Description"}, + "creationDate":{"shape":"Timestamp"}, + "deprecationDate":{"shape":"Timestamp"} + } + }, + "ActivityTypeInfoList":{ + "type":"list", + "member":{"shape":"ActivityTypeInfo"} + }, + "ActivityTypeInfos":{ + "type":"structure", + "required":["typeInfos"], + "members":{ + "typeInfos":{"shape":"ActivityTypeInfoList"}, + "nextPageToken":{"shape":"PageToken"} + } + }, + "Arn":{ + "type":"string", + "min":1, + "max":1224 + }, + "CancelTimerDecisionAttributes":{ + "type":"structure", + "required":["timerId"], + "members":{ + "timerId":{"shape":"TimerId"} + } + }, + "CancelTimerFailedCause":{ + "type":"string", + "enum":[ + "TIMER_ID_UNKNOWN", + "OPERATION_NOT_PERMITTED" + ] + }, + "CancelTimerFailedEventAttributes":{ + "type":"structure", + "required":[ + "timerId", + "cause", + "decisionTaskCompletedEventId" + ], + "members":{ + "timerId":{"shape":"TimerId"}, + "cause":{"shape":"CancelTimerFailedCause"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "CancelWorkflowExecutionDecisionAttributes":{ + "type":"structure", + "members":{ + "details":{"shape":"Data"} + } + }, + "CancelWorkflowExecutionFailedCause":{ + "type":"string", + "enum":[ + "UNHANDLED_DECISION", + "OPERATION_NOT_PERMITTED" + ] + }, + "CancelWorkflowExecutionFailedEventAttributes":{ + "type":"structure", + "required":[ + "cause", + "decisionTaskCompletedEventId" + ], + "members":{ + "cause":{"shape":"CancelWorkflowExecutionFailedCause"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "Canceled":{"type":"boolean"}, + "CauseMessage":{ + "type":"string", + "max":1728 + }, + "ChildPolicy":{ + "type":"string", + "enum":[ + "TERMINATE", + "REQUEST_CANCEL", + "ABANDON" + ] + }, + "ChildWorkflowExecutionCanceledEventAttributes":{ + "type":"structure", + "required":[ + "workflowExecution", + "workflowType", + "initiatedEventId", + "startedEventId" + ], + "members":{ + "workflowExecution":{"shape":"WorkflowExecution"}, + "workflowType":{"shape":"WorkflowType"}, + "details":{"shape":"Data"}, + "initiatedEventId":{"shape":"EventId"}, + "startedEventId":{"shape":"EventId"} + } + }, + "ChildWorkflowExecutionCompletedEventAttributes":{ + "type":"structure", + "required":[ + "workflowExecution", + "workflowType", + "initiatedEventId", + "startedEventId" + ], + "members":{ + "workflowExecution":{"shape":"WorkflowExecution"}, + "workflowType":{"shape":"WorkflowType"}, + "result":{"shape":"Data"}, + "initiatedEventId":{"shape":"EventId"}, + "startedEventId":{"shape":"EventId"} + } + }, + "ChildWorkflowExecutionFailedEventAttributes":{ + "type":"structure", + "required":[ + "workflowExecution", + "workflowType", + "initiatedEventId", + "startedEventId" + ], + "members":{ + "workflowExecution":{"shape":"WorkflowExecution"}, + "workflowType":{"shape":"WorkflowType"}, + "reason":{"shape":"FailureReason"}, + "details":{"shape":"Data"}, + "initiatedEventId":{"shape":"EventId"}, + "startedEventId":{"shape":"EventId"} + } + }, + "ChildWorkflowExecutionStartedEventAttributes":{ + "type":"structure", + "required":[ + "workflowExecution", + "workflowType", + "initiatedEventId" + ], + "members":{ + "workflowExecution":{"shape":"WorkflowExecution"}, + "workflowType":{"shape":"WorkflowType"}, + "initiatedEventId":{"shape":"EventId"} + } + }, + "ChildWorkflowExecutionTerminatedEventAttributes":{ + "type":"structure", + "required":[ + "workflowExecution", + "workflowType", + "initiatedEventId", + "startedEventId" + ], + "members":{ + "workflowExecution":{"shape":"WorkflowExecution"}, + "workflowType":{"shape":"WorkflowType"}, + "initiatedEventId":{"shape":"EventId"}, + "startedEventId":{"shape":"EventId"} + } + }, + "ChildWorkflowExecutionTimedOutEventAttributes":{ + "type":"structure", + "required":[ + "workflowExecution", + "workflowType", + "timeoutType", + "initiatedEventId", + "startedEventId" + ], + "members":{ + "workflowExecution":{"shape":"WorkflowExecution"}, + "workflowType":{"shape":"WorkflowType"}, + "timeoutType":{"shape":"WorkflowExecutionTimeoutType"}, + "initiatedEventId":{"shape":"EventId"}, + "startedEventId":{"shape":"EventId"} + } + }, + "CloseStatus":{ + "type":"string", + "enum":[ + "COMPLETED", + "FAILED", + "CANCELED", + "TERMINATED", + "CONTINUED_AS_NEW", + "TIMED_OUT" + ] + }, + "CloseStatusFilter":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{"shape":"CloseStatus"} + } + }, + "CompleteWorkflowExecutionDecisionAttributes":{ + "type":"structure", + "members":{ + "result":{"shape":"Data"} + } + }, + "CompleteWorkflowExecutionFailedCause":{ + "type":"string", + "enum":[ + "UNHANDLED_DECISION", + "OPERATION_NOT_PERMITTED" + ] + }, + "CompleteWorkflowExecutionFailedEventAttributes":{ + "type":"structure", + "required":[ + "cause", + "decisionTaskCompletedEventId" + ], + "members":{ + "cause":{"shape":"CompleteWorkflowExecutionFailedCause"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "ContinueAsNewWorkflowExecutionDecisionAttributes":{ + "type":"structure", + "members":{ + "input":{"shape":"Data"}, + "executionStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "taskList":{"shape":"TaskList"}, + "taskPriority":{"shape":"TaskPriority"}, + "taskStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "childPolicy":{"shape":"ChildPolicy"}, + "tagList":{"shape":"TagList"}, + "workflowTypeVersion":{"shape":"Version"}, + "lambdaRole":{"shape":"Arn"} + } + }, + "ContinueAsNewWorkflowExecutionFailedCause":{ + "type":"string", + "enum":[ + "UNHANDLED_DECISION", + "WORKFLOW_TYPE_DEPRECATED", + "WORKFLOW_TYPE_DOES_NOT_EXIST", + "DEFAULT_EXECUTION_START_TO_CLOSE_TIMEOUT_UNDEFINED", + "DEFAULT_TASK_START_TO_CLOSE_TIMEOUT_UNDEFINED", + "DEFAULT_TASK_LIST_UNDEFINED", + "DEFAULT_CHILD_POLICY_UNDEFINED", + "CONTINUE_AS_NEW_WORKFLOW_EXECUTION_RATE_EXCEEDED", + "OPERATION_NOT_PERMITTED" + ] + }, + "ContinueAsNewWorkflowExecutionFailedEventAttributes":{ + "type":"structure", + "required":[ + "cause", + "decisionTaskCompletedEventId" + ], + "members":{ + "cause":{"shape":"ContinueAsNewWorkflowExecutionFailedCause"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "Count":{ + "type":"integer", + "min":0 + }, + "CountClosedWorkflowExecutionsInput":{ + "type":"structure", + "required":["domain"], + "members":{ + "domain":{"shape":"DomainName"}, + "startTimeFilter":{"shape":"ExecutionTimeFilter"}, + "closeTimeFilter":{"shape":"ExecutionTimeFilter"}, + "executionFilter":{"shape":"WorkflowExecutionFilter"}, + "typeFilter":{"shape":"WorkflowTypeFilter"}, + "tagFilter":{"shape":"TagFilter"}, + "closeStatusFilter":{"shape":"CloseStatusFilter"} + } + }, + "CountOpenWorkflowExecutionsInput":{ + "type":"structure", + "required":[ + "domain", + "startTimeFilter" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "startTimeFilter":{"shape":"ExecutionTimeFilter"}, + "typeFilter":{"shape":"WorkflowTypeFilter"}, + "tagFilter":{"shape":"TagFilter"}, + "executionFilter":{"shape":"WorkflowExecutionFilter"} + } + }, + "CountPendingActivityTasksInput":{ + "type":"structure", + "required":[ + "domain", + "taskList" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "taskList":{"shape":"TaskList"} + } + }, + "CountPendingDecisionTasksInput":{ + "type":"structure", + "required":[ + "domain", + "taskList" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "taskList":{"shape":"TaskList"} + } + }, + "Data":{ + "type":"string", + "max":32768 + }, + "Decision":{ + "type":"structure", + "required":["decisionType"], + "members":{ + "decisionType":{"shape":"DecisionType"}, + "scheduleActivityTaskDecisionAttributes":{"shape":"ScheduleActivityTaskDecisionAttributes"}, + "requestCancelActivityTaskDecisionAttributes":{"shape":"RequestCancelActivityTaskDecisionAttributes"}, + "completeWorkflowExecutionDecisionAttributes":{"shape":"CompleteWorkflowExecutionDecisionAttributes"}, + "failWorkflowExecutionDecisionAttributes":{"shape":"FailWorkflowExecutionDecisionAttributes"}, + "cancelWorkflowExecutionDecisionAttributes":{"shape":"CancelWorkflowExecutionDecisionAttributes"}, + "continueAsNewWorkflowExecutionDecisionAttributes":{"shape":"ContinueAsNewWorkflowExecutionDecisionAttributes"}, + "recordMarkerDecisionAttributes":{"shape":"RecordMarkerDecisionAttributes"}, + "startTimerDecisionAttributes":{"shape":"StartTimerDecisionAttributes"}, + "cancelTimerDecisionAttributes":{"shape":"CancelTimerDecisionAttributes"}, + "signalExternalWorkflowExecutionDecisionAttributes":{"shape":"SignalExternalWorkflowExecutionDecisionAttributes"}, + "requestCancelExternalWorkflowExecutionDecisionAttributes":{"shape":"RequestCancelExternalWorkflowExecutionDecisionAttributes"}, + "startChildWorkflowExecutionDecisionAttributes":{"shape":"StartChildWorkflowExecutionDecisionAttributes"}, + "scheduleLambdaFunctionDecisionAttributes":{"shape":"ScheduleLambdaFunctionDecisionAttributes"} + } + }, + "DecisionList":{ + "type":"list", + "member":{"shape":"Decision"} + }, + "DecisionTask":{ + "type":"structure", + "required":[ + "taskToken", + "startedEventId", + "workflowExecution", + "workflowType", + "events" + ], + "members":{ + "taskToken":{"shape":"TaskToken"}, + "startedEventId":{"shape":"EventId"}, + "workflowExecution":{"shape":"WorkflowExecution"}, + "workflowType":{"shape":"WorkflowType"}, + "events":{"shape":"HistoryEventList"}, + "nextPageToken":{"shape":"PageToken"}, + "previousStartedEventId":{"shape":"EventId"} + } + }, + "DecisionTaskCompletedEventAttributes":{ + "type":"structure", + "required":[ + "scheduledEventId", + "startedEventId" + ], + "members":{ + "executionContext":{"shape":"Data"}, + "scheduledEventId":{"shape":"EventId"}, + "startedEventId":{"shape":"EventId"} + } + }, + "DecisionTaskScheduledEventAttributes":{ + "type":"structure", + "required":["taskList"], + "members":{ + "taskList":{"shape":"TaskList"}, + "taskPriority":{"shape":"TaskPriority"}, + "startToCloseTimeout":{"shape":"DurationInSecondsOptional"} + } + }, + "DecisionTaskStartedEventAttributes":{ + "type":"structure", + "required":["scheduledEventId"], + "members":{ + "identity":{"shape":"Identity"}, + "scheduledEventId":{"shape":"EventId"} + } + }, + "DecisionTaskTimedOutEventAttributes":{ + "type":"structure", + "required":[ + "timeoutType", + "scheduledEventId", + "startedEventId" + ], + "members":{ + "timeoutType":{"shape":"DecisionTaskTimeoutType"}, + "scheduledEventId":{"shape":"EventId"}, + "startedEventId":{"shape":"EventId"} + } + }, + "DecisionTaskTimeoutType":{ + "type":"string", + "enum":["START_TO_CLOSE"] + }, + "DecisionType":{ + "type":"string", + "enum":[ + "ScheduleActivityTask", + "RequestCancelActivityTask", + "CompleteWorkflowExecution", + "FailWorkflowExecution", + "CancelWorkflowExecution", + "ContinueAsNewWorkflowExecution", + "RecordMarker", + "StartTimer", + "CancelTimer", + "SignalExternalWorkflowExecution", + "RequestCancelExternalWorkflowExecution", + "StartChildWorkflowExecution", + "ScheduleLambdaFunction" + ] + }, + "DefaultUndefinedFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "DeprecateActivityTypeInput":{ + "type":"structure", + "required":[ + "domain", + "activityType" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "activityType":{"shape":"ActivityType"} + } + }, + "DeprecateDomainInput":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"DomainName"} + } + }, + "DeprecateWorkflowTypeInput":{ + "type":"structure", + "required":[ + "domain", + "workflowType" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "workflowType":{"shape":"WorkflowType"} + } + }, + "DescribeActivityTypeInput":{ + "type":"structure", + "required":[ + "domain", + "activityType" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "activityType":{"shape":"ActivityType"} + } + }, + "DescribeDomainInput":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"DomainName"} + } + }, + "DescribeWorkflowExecutionInput":{ + "type":"structure", + "required":[ + "domain", + "execution" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "execution":{"shape":"WorkflowExecution"} + } + }, + "DescribeWorkflowTypeInput":{ + "type":"structure", + "required":[ + "domain", + "workflowType" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "workflowType":{"shape":"WorkflowType"} + } + }, + "Description":{ + "type":"string", + "max":1024 + }, + "DomainAlreadyExistsFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "DomainConfiguration":{ + "type":"structure", + "required":["workflowExecutionRetentionPeriodInDays"], + "members":{ + "workflowExecutionRetentionPeriodInDays":{"shape":"DurationInDays"} + } + }, + "DomainDeprecatedFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "DomainDetail":{ + "type":"structure", + "required":[ + "domainInfo", + "configuration" + ], + "members":{ + "domainInfo":{"shape":"DomainInfo"}, + "configuration":{"shape":"DomainConfiguration"} + } + }, + "DomainInfo":{ + "type":"structure", + "required":[ + "name", + "status" + ], + "members":{ + "name":{"shape":"DomainName"}, + "status":{"shape":"RegistrationStatus"}, + "description":{"shape":"Description"} + } + }, + "DomainInfoList":{ + "type":"list", + "member":{"shape":"DomainInfo"} + }, + "DomainInfos":{ + "type":"structure", + "required":["domainInfos"], + "members":{ + "domainInfos":{"shape":"DomainInfoList"}, + "nextPageToken":{"shape":"PageToken"} + } + }, + "DomainName":{ + "type":"string", + "min":1, + "max":256 + }, + "DurationInDays":{ + "type":"string", + "min":1, + "max":8 + }, + "DurationInSeconds":{ + "type":"string", + "min":1, + "max":8 + }, + "DurationInSecondsOptional":{ + "type":"string", + "max":8 + }, + "ErrorMessage":{"type":"string"}, + "EventId":{"type":"long"}, + "EventType":{ + "type":"string", + "enum":[ + "WorkflowExecutionStarted", + "WorkflowExecutionCancelRequested", + "WorkflowExecutionCompleted", + "CompleteWorkflowExecutionFailed", + "WorkflowExecutionFailed", + "FailWorkflowExecutionFailed", + "WorkflowExecutionTimedOut", + "WorkflowExecutionCanceled", + "CancelWorkflowExecutionFailed", + "WorkflowExecutionContinuedAsNew", + "ContinueAsNewWorkflowExecutionFailed", + "WorkflowExecutionTerminated", + "DecisionTaskScheduled", + "DecisionTaskStarted", + "DecisionTaskCompleted", + "DecisionTaskTimedOut", + "ActivityTaskScheduled", + "ScheduleActivityTaskFailed", + "ActivityTaskStarted", + "ActivityTaskCompleted", + "ActivityTaskFailed", + "ActivityTaskTimedOut", + "ActivityTaskCanceled", + "ActivityTaskCancelRequested", + "RequestCancelActivityTaskFailed", + "WorkflowExecutionSignaled", + "MarkerRecorded", + "RecordMarkerFailed", + "TimerStarted", + "StartTimerFailed", + "TimerFired", + "TimerCanceled", + "CancelTimerFailed", + "StartChildWorkflowExecutionInitiated", + "StartChildWorkflowExecutionFailed", + "ChildWorkflowExecutionStarted", + "ChildWorkflowExecutionCompleted", + "ChildWorkflowExecutionFailed", + "ChildWorkflowExecutionTimedOut", + "ChildWorkflowExecutionCanceled", + "ChildWorkflowExecutionTerminated", + "SignalExternalWorkflowExecutionInitiated", + "SignalExternalWorkflowExecutionFailed", + "ExternalWorkflowExecutionSignaled", + "RequestCancelExternalWorkflowExecutionInitiated", + "RequestCancelExternalWorkflowExecutionFailed", + "ExternalWorkflowExecutionCancelRequested", + "LambdaFunctionScheduled", + "LambdaFunctionStarted", + "LambdaFunctionCompleted", + "LambdaFunctionFailed", + "LambdaFunctionTimedOut", + "ScheduleLambdaFunctionFailed", + "StartLambdaFunctionFailed" + ] + }, + "ExecutionStatus":{ + "type":"string", + "enum":[ + "OPEN", + "CLOSED" + ] + }, + "ExecutionTimeFilter":{ + "type":"structure", + "required":["oldestDate"], + "members":{ + "oldestDate":{"shape":"Timestamp"}, + "latestDate":{"shape":"Timestamp"} + } + }, + "ExternalWorkflowExecutionCancelRequestedEventAttributes":{ + "type":"structure", + "required":[ + "workflowExecution", + "initiatedEventId" + ], + "members":{ + "workflowExecution":{"shape":"WorkflowExecution"}, + "initiatedEventId":{"shape":"EventId"} + } + }, + "ExternalWorkflowExecutionSignaledEventAttributes":{ + "type":"structure", + "required":[ + "workflowExecution", + "initiatedEventId" + ], + "members":{ + "workflowExecution":{"shape":"WorkflowExecution"}, + "initiatedEventId":{"shape":"EventId"} + } + }, + "FailWorkflowExecutionDecisionAttributes":{ + "type":"structure", + "members":{ + "reason":{"shape":"FailureReason"}, + "details":{"shape":"Data"} + } + }, + "FailWorkflowExecutionFailedCause":{ + "type":"string", + "enum":[ + "UNHANDLED_DECISION", + "OPERATION_NOT_PERMITTED" + ] + }, + "FailWorkflowExecutionFailedEventAttributes":{ + "type":"structure", + "required":[ + "cause", + "decisionTaskCompletedEventId" + ], + "members":{ + "cause":{"shape":"FailWorkflowExecutionFailedCause"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "FailureReason":{ + "type":"string", + "max":256 + }, + "FunctionId":{ + "type":"string", + "min":1, + "max":256 + }, + "FunctionInput":{ + "type":"string", + "min":1, + "max":32768 + }, + "FunctionName":{ + "type":"string", + "min":1, + "max":64 + }, + "GetWorkflowExecutionHistoryInput":{ + "type":"structure", + "required":[ + "domain", + "execution" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "execution":{"shape":"WorkflowExecution"}, + "nextPageToken":{"shape":"PageToken"}, + "maximumPageSize":{"shape":"PageSize"}, + "reverseOrder":{"shape":"ReverseOrder"} + } + }, + "History":{ + "type":"structure", + "required":["events"], + "members":{ + "events":{"shape":"HistoryEventList"}, + "nextPageToken":{"shape":"PageToken"} + } + }, + "HistoryEvent":{ + "type":"structure", + "required":[ + "eventTimestamp", + "eventType", + "eventId" + ], + "members":{ + "eventTimestamp":{"shape":"Timestamp"}, + "eventType":{"shape":"EventType"}, + "eventId":{"shape":"EventId"}, + "workflowExecutionStartedEventAttributes":{"shape":"WorkflowExecutionStartedEventAttributes"}, + "workflowExecutionCompletedEventAttributes":{"shape":"WorkflowExecutionCompletedEventAttributes"}, + "completeWorkflowExecutionFailedEventAttributes":{"shape":"CompleteWorkflowExecutionFailedEventAttributes"}, + "workflowExecutionFailedEventAttributes":{"shape":"WorkflowExecutionFailedEventAttributes"}, + "failWorkflowExecutionFailedEventAttributes":{"shape":"FailWorkflowExecutionFailedEventAttributes"}, + "workflowExecutionTimedOutEventAttributes":{"shape":"WorkflowExecutionTimedOutEventAttributes"}, + "workflowExecutionCanceledEventAttributes":{"shape":"WorkflowExecutionCanceledEventAttributes"}, + "cancelWorkflowExecutionFailedEventAttributes":{"shape":"CancelWorkflowExecutionFailedEventAttributes"}, + "workflowExecutionContinuedAsNewEventAttributes":{"shape":"WorkflowExecutionContinuedAsNewEventAttributes"}, + "continueAsNewWorkflowExecutionFailedEventAttributes":{"shape":"ContinueAsNewWorkflowExecutionFailedEventAttributes"}, + "workflowExecutionTerminatedEventAttributes":{"shape":"WorkflowExecutionTerminatedEventAttributes"}, + "workflowExecutionCancelRequestedEventAttributes":{"shape":"WorkflowExecutionCancelRequestedEventAttributes"}, + "decisionTaskScheduledEventAttributes":{"shape":"DecisionTaskScheduledEventAttributes"}, + "decisionTaskStartedEventAttributes":{"shape":"DecisionTaskStartedEventAttributes"}, + "decisionTaskCompletedEventAttributes":{"shape":"DecisionTaskCompletedEventAttributes"}, + "decisionTaskTimedOutEventAttributes":{"shape":"DecisionTaskTimedOutEventAttributes"}, + "activityTaskScheduledEventAttributes":{"shape":"ActivityTaskScheduledEventAttributes"}, + "activityTaskStartedEventAttributes":{"shape":"ActivityTaskStartedEventAttributes"}, + "activityTaskCompletedEventAttributes":{"shape":"ActivityTaskCompletedEventAttributes"}, + "activityTaskFailedEventAttributes":{"shape":"ActivityTaskFailedEventAttributes"}, + "activityTaskTimedOutEventAttributes":{"shape":"ActivityTaskTimedOutEventAttributes"}, + "activityTaskCanceledEventAttributes":{"shape":"ActivityTaskCanceledEventAttributes"}, + "activityTaskCancelRequestedEventAttributes":{"shape":"ActivityTaskCancelRequestedEventAttributes"}, + "workflowExecutionSignaledEventAttributes":{"shape":"WorkflowExecutionSignaledEventAttributes"}, + "markerRecordedEventAttributes":{"shape":"MarkerRecordedEventAttributes"}, + "recordMarkerFailedEventAttributes":{"shape":"RecordMarkerFailedEventAttributes"}, + "timerStartedEventAttributes":{"shape":"TimerStartedEventAttributes"}, + "timerFiredEventAttributes":{"shape":"TimerFiredEventAttributes"}, + "timerCanceledEventAttributes":{"shape":"TimerCanceledEventAttributes"}, + "startChildWorkflowExecutionInitiatedEventAttributes":{"shape":"StartChildWorkflowExecutionInitiatedEventAttributes"}, + "childWorkflowExecutionStartedEventAttributes":{"shape":"ChildWorkflowExecutionStartedEventAttributes"}, + "childWorkflowExecutionCompletedEventAttributes":{"shape":"ChildWorkflowExecutionCompletedEventAttributes"}, + "childWorkflowExecutionFailedEventAttributes":{"shape":"ChildWorkflowExecutionFailedEventAttributes"}, + "childWorkflowExecutionTimedOutEventAttributes":{"shape":"ChildWorkflowExecutionTimedOutEventAttributes"}, + "childWorkflowExecutionCanceledEventAttributes":{"shape":"ChildWorkflowExecutionCanceledEventAttributes"}, + "childWorkflowExecutionTerminatedEventAttributes":{"shape":"ChildWorkflowExecutionTerminatedEventAttributes"}, + "signalExternalWorkflowExecutionInitiatedEventAttributes":{"shape":"SignalExternalWorkflowExecutionInitiatedEventAttributes"}, + "externalWorkflowExecutionSignaledEventAttributes":{"shape":"ExternalWorkflowExecutionSignaledEventAttributes"}, + "signalExternalWorkflowExecutionFailedEventAttributes":{"shape":"SignalExternalWorkflowExecutionFailedEventAttributes"}, + "externalWorkflowExecutionCancelRequestedEventAttributes":{"shape":"ExternalWorkflowExecutionCancelRequestedEventAttributes"}, + "requestCancelExternalWorkflowExecutionInitiatedEventAttributes":{"shape":"RequestCancelExternalWorkflowExecutionInitiatedEventAttributes"}, + "requestCancelExternalWorkflowExecutionFailedEventAttributes":{"shape":"RequestCancelExternalWorkflowExecutionFailedEventAttributes"}, + "scheduleActivityTaskFailedEventAttributes":{"shape":"ScheduleActivityTaskFailedEventAttributes"}, + "requestCancelActivityTaskFailedEventAttributes":{"shape":"RequestCancelActivityTaskFailedEventAttributes"}, + "startTimerFailedEventAttributes":{"shape":"StartTimerFailedEventAttributes"}, + "cancelTimerFailedEventAttributes":{"shape":"CancelTimerFailedEventAttributes"}, + "startChildWorkflowExecutionFailedEventAttributes":{"shape":"StartChildWorkflowExecutionFailedEventAttributes"}, + "lambdaFunctionScheduledEventAttributes":{"shape":"LambdaFunctionScheduledEventAttributes"}, + "lambdaFunctionStartedEventAttributes":{"shape":"LambdaFunctionStartedEventAttributes"}, + "lambdaFunctionCompletedEventAttributes":{"shape":"LambdaFunctionCompletedEventAttributes"}, + "lambdaFunctionFailedEventAttributes":{"shape":"LambdaFunctionFailedEventAttributes"}, + "lambdaFunctionTimedOutEventAttributes":{"shape":"LambdaFunctionTimedOutEventAttributes"}, + "scheduleLambdaFunctionFailedEventAttributes":{"shape":"ScheduleLambdaFunctionFailedEventAttributes"}, + "startLambdaFunctionFailedEventAttributes":{"shape":"StartLambdaFunctionFailedEventAttributes"} + } + }, + "HistoryEventList":{ + "type":"list", + "member":{"shape":"HistoryEvent"} + }, + "Identity":{ + "type":"string", + "max":256 + }, + "LambdaFunctionCompletedEventAttributes":{ + "type":"structure", + "required":[ + "scheduledEventId", + "startedEventId" + ], + "members":{ + "scheduledEventId":{"shape":"EventId"}, + "startedEventId":{"shape":"EventId"}, + "result":{"shape":"Data"} + } + }, + "LambdaFunctionFailedEventAttributes":{ + "type":"structure", + "required":[ + "scheduledEventId", + "startedEventId" + ], + "members":{ + "scheduledEventId":{"shape":"EventId"}, + "startedEventId":{"shape":"EventId"}, + "reason":{"shape":"FailureReason"}, + "details":{"shape":"Data"} + } + }, + "LambdaFunctionScheduledEventAttributes":{ + "type":"structure", + "required":[ + "id", + "name", + "decisionTaskCompletedEventId" + ], + "members":{ + "id":{"shape":"FunctionId"}, + "name":{"shape":"FunctionName"}, + "input":{"shape":"FunctionInput"}, + "startToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "LambdaFunctionStartedEventAttributes":{ + "type":"structure", + "required":["scheduledEventId"], + "members":{ + "scheduledEventId":{"shape":"EventId"} + } + }, + "LambdaFunctionTimedOutEventAttributes":{ + "type":"structure", + "required":[ + "scheduledEventId", + "startedEventId" + ], + "members":{ + "scheduledEventId":{"shape":"EventId"}, + "startedEventId":{"shape":"EventId"}, + "timeoutType":{"shape":"LambdaFunctionTimeoutType"} + } + }, + "LambdaFunctionTimeoutType":{ + "type":"string", + "enum":["START_TO_CLOSE"] + }, + "LimitExceededFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "LimitedData":{ + "type":"string", + "max":2048 + }, + "ListActivityTypesInput":{ + "type":"structure", + "required":[ + "domain", + "registrationStatus" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "name":{"shape":"Name"}, + "registrationStatus":{"shape":"RegistrationStatus"}, + "nextPageToken":{"shape":"PageToken"}, + "maximumPageSize":{"shape":"PageSize"}, + "reverseOrder":{"shape":"ReverseOrder"} + } + }, + "ListClosedWorkflowExecutionsInput":{ + "type":"structure", + "required":["domain"], + "members":{ + "domain":{"shape":"DomainName"}, + "startTimeFilter":{"shape":"ExecutionTimeFilter"}, + "closeTimeFilter":{"shape":"ExecutionTimeFilter"}, + "executionFilter":{"shape":"WorkflowExecutionFilter"}, + "closeStatusFilter":{"shape":"CloseStatusFilter"}, + "typeFilter":{"shape":"WorkflowTypeFilter"}, + "tagFilter":{"shape":"TagFilter"}, + "nextPageToken":{"shape":"PageToken"}, + "maximumPageSize":{"shape":"PageSize"}, + "reverseOrder":{"shape":"ReverseOrder"} + } + }, + "ListDomainsInput":{ + "type":"structure", + "required":["registrationStatus"], + "members":{ + "nextPageToken":{"shape":"PageToken"}, + "registrationStatus":{"shape":"RegistrationStatus"}, + "maximumPageSize":{"shape":"PageSize"}, + "reverseOrder":{"shape":"ReverseOrder"} + } + }, + "ListOpenWorkflowExecutionsInput":{ + "type":"structure", + "required":[ + "domain", + "startTimeFilter" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "startTimeFilter":{"shape":"ExecutionTimeFilter"}, + "typeFilter":{"shape":"WorkflowTypeFilter"}, + "tagFilter":{"shape":"TagFilter"}, + "nextPageToken":{"shape":"PageToken"}, + "maximumPageSize":{"shape":"PageSize"}, + "reverseOrder":{"shape":"ReverseOrder"}, + "executionFilter":{"shape":"WorkflowExecutionFilter"} + } + }, + "ListWorkflowTypesInput":{ + "type":"structure", + "required":[ + "domain", + "registrationStatus" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "name":{"shape":"Name"}, + "registrationStatus":{"shape":"RegistrationStatus"}, + "nextPageToken":{"shape":"PageToken"}, + "maximumPageSize":{"shape":"PageSize"}, + "reverseOrder":{"shape":"ReverseOrder"} + } + }, + "MarkerName":{ + "type":"string", + "min":1, + "max":256 + }, + "MarkerRecordedEventAttributes":{ + "type":"structure", + "required":[ + "markerName", + "decisionTaskCompletedEventId" + ], + "members":{ + "markerName":{"shape":"MarkerName"}, + "details":{"shape":"Data"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "Name":{ + "type":"string", + "min":1, + "max":256 + }, + "OpenDecisionTasksCount":{ + "type":"integer", + "min":0, + "max":1 + }, + "OperationNotPermittedFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "PageSize":{ + "type":"integer", + "min":0, + "max":1000 + }, + "PageToken":{ + "type":"string", + "max":2048 + }, + "PendingTaskCount":{ + "type":"structure", + "required":["count"], + "members":{ + "count":{"shape":"Count"}, + "truncated":{"shape":"Truncated"} + } + }, + "PollForActivityTaskInput":{ + "type":"structure", + "required":[ + "domain", + "taskList" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "taskList":{"shape":"TaskList"}, + "identity":{"shape":"Identity"} + } + }, + "PollForDecisionTaskInput":{ + "type":"structure", + "required":[ + "domain", + "taskList" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "taskList":{"shape":"TaskList"}, + "identity":{"shape":"Identity"}, + "nextPageToken":{"shape":"PageToken"}, + "maximumPageSize":{"shape":"PageSize"}, + "reverseOrder":{"shape":"ReverseOrder"} + } + }, + "RecordActivityTaskHeartbeatInput":{ + "type":"structure", + "required":["taskToken"], + "members":{ + "taskToken":{"shape":"TaskToken"}, + "details":{"shape":"LimitedData"} + } + }, + "RecordMarkerDecisionAttributes":{ + "type":"structure", + "required":["markerName"], + "members":{ + "markerName":{"shape":"MarkerName"}, + "details":{"shape":"Data"} + } + }, + "RecordMarkerFailedCause":{ + "type":"string", + "enum":["OPERATION_NOT_PERMITTED"] + }, + "RecordMarkerFailedEventAttributes":{ + "type":"structure", + "required":[ + "markerName", + "cause", + "decisionTaskCompletedEventId" + ], + "members":{ + "markerName":{"shape":"MarkerName"}, + "cause":{"shape":"RecordMarkerFailedCause"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "RegisterActivityTypeInput":{ + "type":"structure", + "required":[ + "domain", + "name", + "version" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "name":{"shape":"Name"}, + "version":{"shape":"Version"}, + "description":{"shape":"Description"}, + "defaultTaskStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "defaultTaskHeartbeatTimeout":{"shape":"DurationInSecondsOptional"}, + "defaultTaskList":{"shape":"TaskList"}, + "defaultTaskPriority":{"shape":"TaskPriority"}, + "defaultTaskScheduleToStartTimeout":{"shape":"DurationInSecondsOptional"}, + "defaultTaskScheduleToCloseTimeout":{"shape":"DurationInSecondsOptional"} + } + }, + "RegisterDomainInput":{ + "type":"structure", + "required":[ + "name", + "workflowExecutionRetentionPeriodInDays" + ], + "members":{ + "name":{"shape":"DomainName"}, + "description":{"shape":"Description"}, + "workflowExecutionRetentionPeriodInDays":{"shape":"DurationInDays"} + } + }, + "RegisterWorkflowTypeInput":{ + "type":"structure", + "required":[ + "domain", + "name", + "version" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "name":{"shape":"Name"}, + "version":{"shape":"Version"}, + "description":{"shape":"Description"}, + "defaultTaskStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "defaultExecutionStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "defaultTaskList":{"shape":"TaskList"}, + "defaultTaskPriority":{"shape":"TaskPriority"}, + "defaultChildPolicy":{"shape":"ChildPolicy"}, + "defaultLambdaRole":{"shape":"Arn"} + } + }, + "RegistrationStatus":{ + "type":"string", + "enum":[ + "REGISTERED", + "DEPRECATED" + ] + }, + "RequestCancelActivityTaskDecisionAttributes":{ + "type":"structure", + "required":["activityId"], + "members":{ + "activityId":{"shape":"ActivityId"} + } + }, + "RequestCancelActivityTaskFailedCause":{ + "type":"string", + "enum":[ + "ACTIVITY_ID_UNKNOWN", + "OPERATION_NOT_PERMITTED" + ] + }, + "RequestCancelActivityTaskFailedEventAttributes":{ + "type":"structure", + "required":[ + "activityId", + "cause", + "decisionTaskCompletedEventId" + ], + "members":{ + "activityId":{"shape":"ActivityId"}, + "cause":{"shape":"RequestCancelActivityTaskFailedCause"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "RequestCancelExternalWorkflowExecutionDecisionAttributes":{ + "type":"structure", + "required":["workflowId"], + "members":{ + "workflowId":{"shape":"WorkflowId"}, + "runId":{"shape":"RunIdOptional"}, + "control":{"shape":"Data"} + } + }, + "RequestCancelExternalWorkflowExecutionFailedCause":{ + "type":"string", + "enum":[ + "UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION", + "REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_RATE_EXCEEDED", + "OPERATION_NOT_PERMITTED" + ] + }, + "RequestCancelExternalWorkflowExecutionFailedEventAttributes":{ + "type":"structure", + "required":[ + "workflowId", + "cause", + "initiatedEventId", + "decisionTaskCompletedEventId" + ], + "members":{ + "workflowId":{"shape":"WorkflowId"}, + "runId":{"shape":"RunIdOptional"}, + "cause":{"shape":"RequestCancelExternalWorkflowExecutionFailedCause"}, + "initiatedEventId":{"shape":"EventId"}, + "decisionTaskCompletedEventId":{"shape":"EventId"}, + "control":{"shape":"Data"} + } + }, + "RequestCancelExternalWorkflowExecutionInitiatedEventAttributes":{ + "type":"structure", + "required":[ + "workflowId", + "decisionTaskCompletedEventId" + ], + "members":{ + "workflowId":{"shape":"WorkflowId"}, + "runId":{"shape":"RunIdOptional"}, + "decisionTaskCompletedEventId":{"shape":"EventId"}, + "control":{"shape":"Data"} + } + }, + "RequestCancelWorkflowExecutionInput":{ + "type":"structure", + "required":[ + "domain", + "workflowId" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "workflowId":{"shape":"WorkflowId"}, + "runId":{"shape":"RunIdOptional"} + } + }, + "RespondActivityTaskCanceledInput":{ + "type":"structure", + "required":["taskToken"], + "members":{ + "taskToken":{"shape":"TaskToken"}, + "details":{"shape":"Data"} + } + }, + "RespondActivityTaskCompletedInput":{ + "type":"structure", + "required":["taskToken"], + "members":{ + "taskToken":{"shape":"TaskToken"}, + "result":{"shape":"Data"} + } + }, + "RespondActivityTaskFailedInput":{ + "type":"structure", + "required":["taskToken"], + "members":{ + "taskToken":{"shape":"TaskToken"}, + "reason":{"shape":"FailureReason"}, + "details":{"shape":"Data"} + } + }, + "RespondDecisionTaskCompletedInput":{ + "type":"structure", + "required":["taskToken"], + "members":{ + "taskToken":{"shape":"TaskToken"}, + "decisions":{"shape":"DecisionList"}, + "executionContext":{"shape":"Data"} + } + }, + "ReverseOrder":{"type":"boolean"}, + "Run":{ + "type":"structure", + "members":{ + "runId":{"shape":"RunId"} + } + }, + "RunId":{ + "type":"string", + "min":1, + "max":64 + }, + "RunIdOptional":{ + "type":"string", + "max":64 + }, + "ScheduleActivityTaskDecisionAttributes":{ + "type":"structure", + "required":[ + "activityType", + "activityId" + ], + "members":{ + "activityType":{"shape":"ActivityType"}, + "activityId":{"shape":"ActivityId"}, + "control":{"shape":"Data"}, + "input":{"shape":"Data"}, + "scheduleToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "taskList":{"shape":"TaskList"}, + "taskPriority":{"shape":"TaskPriority"}, + "scheduleToStartTimeout":{"shape":"DurationInSecondsOptional"}, + "startToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "heartbeatTimeout":{"shape":"DurationInSecondsOptional"} + } + }, + "ScheduleActivityTaskFailedCause":{ + "type":"string", + "enum":[ + "ACTIVITY_TYPE_DEPRECATED", + "ACTIVITY_TYPE_DOES_NOT_EXIST", + "ACTIVITY_ID_ALREADY_IN_USE", + "OPEN_ACTIVITIES_LIMIT_EXCEEDED", + "ACTIVITY_CREATION_RATE_EXCEEDED", + "DEFAULT_SCHEDULE_TO_CLOSE_TIMEOUT_UNDEFINED", + "DEFAULT_TASK_LIST_UNDEFINED", + "DEFAULT_SCHEDULE_TO_START_TIMEOUT_UNDEFINED", + "DEFAULT_START_TO_CLOSE_TIMEOUT_UNDEFINED", + "DEFAULT_HEARTBEAT_TIMEOUT_UNDEFINED", + "OPERATION_NOT_PERMITTED" + ] + }, + "ScheduleActivityTaskFailedEventAttributes":{ + "type":"structure", + "required":[ + "activityType", + "activityId", + "cause", + "decisionTaskCompletedEventId" + ], + "members":{ + "activityType":{"shape":"ActivityType"}, + "activityId":{"shape":"ActivityId"}, + "cause":{"shape":"ScheduleActivityTaskFailedCause"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "ScheduleLambdaFunctionDecisionAttributes":{ + "type":"structure", + "required":[ + "id", + "name" + ], + "members":{ + "id":{"shape":"FunctionId"}, + "name":{"shape":"FunctionName"}, + "input":{"shape":"FunctionInput"}, + "startToCloseTimeout":{"shape":"DurationInSecondsOptional"} + } + }, + "ScheduleLambdaFunctionFailedCause":{ + "type":"string", + "enum":[ + "ID_ALREADY_IN_USE", + "OPEN_LAMBDA_FUNCTIONS_LIMIT_EXCEEDED", + "LAMBDA_FUNCTION_CREATION_RATE_EXCEEDED", + "LAMBDA_SERVICE_NOT_AVAILABLE_IN_REGION" + ] + }, + "ScheduleLambdaFunctionFailedEventAttributes":{ + "type":"structure", + "required":[ + "id", + "name", + "cause", + "decisionTaskCompletedEventId" + ], + "members":{ + "id":{"shape":"FunctionId"}, + "name":{"shape":"FunctionName"}, + "cause":{"shape":"ScheduleLambdaFunctionFailedCause"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "SignalExternalWorkflowExecutionDecisionAttributes":{ + "type":"structure", + "required":[ + "workflowId", + "signalName" + ], + "members":{ + "workflowId":{"shape":"WorkflowId"}, + "runId":{"shape":"RunIdOptional"}, + "signalName":{"shape":"SignalName"}, + "input":{"shape":"Data"}, + "control":{"shape":"Data"} + } + }, + "SignalExternalWorkflowExecutionFailedCause":{ + "type":"string", + "enum":[ + "UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION", + "SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_RATE_EXCEEDED", + "OPERATION_NOT_PERMITTED" + ] + }, + "SignalExternalWorkflowExecutionFailedEventAttributes":{ + "type":"structure", + "required":[ + "workflowId", + "cause", + "initiatedEventId", + "decisionTaskCompletedEventId" + ], + "members":{ + "workflowId":{"shape":"WorkflowId"}, + "runId":{"shape":"RunIdOptional"}, + "cause":{"shape":"SignalExternalWorkflowExecutionFailedCause"}, + "initiatedEventId":{"shape":"EventId"}, + "decisionTaskCompletedEventId":{"shape":"EventId"}, + "control":{"shape":"Data"} + } + }, + "SignalExternalWorkflowExecutionInitiatedEventAttributes":{ + "type":"structure", + "required":[ + "workflowId", + "signalName", + "decisionTaskCompletedEventId" + ], + "members":{ + "workflowId":{"shape":"WorkflowId"}, + "runId":{"shape":"RunIdOptional"}, + "signalName":{"shape":"SignalName"}, + "input":{"shape":"Data"}, + "decisionTaskCompletedEventId":{"shape":"EventId"}, + "control":{"shape":"Data"} + } + }, + "SignalName":{ + "type":"string", + "min":1, + "max":256 + }, + "SignalWorkflowExecutionInput":{ + "type":"structure", + "required":[ + "domain", + "workflowId", + "signalName" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "workflowId":{"shape":"WorkflowId"}, + "runId":{"shape":"RunIdOptional"}, + "signalName":{"shape":"SignalName"}, + "input":{"shape":"Data"} + } + }, + "StartChildWorkflowExecutionDecisionAttributes":{ + "type":"structure", + "required":[ + "workflowType", + "workflowId" + ], + "members":{ + "workflowType":{"shape":"WorkflowType"}, + "workflowId":{"shape":"WorkflowId"}, + "control":{"shape":"Data"}, + "input":{"shape":"Data"}, + "executionStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "taskList":{"shape":"TaskList"}, + "taskPriority":{"shape":"TaskPriority"}, + "taskStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "childPolicy":{"shape":"ChildPolicy"}, + "tagList":{"shape":"TagList"}, + "lambdaRole":{"shape":"Arn"} + } + }, + "StartChildWorkflowExecutionFailedCause":{ + "type":"string", + "enum":[ + "WORKFLOW_TYPE_DOES_NOT_EXIST", + "WORKFLOW_TYPE_DEPRECATED", + "OPEN_CHILDREN_LIMIT_EXCEEDED", + "OPEN_WORKFLOWS_LIMIT_EXCEEDED", + "CHILD_CREATION_RATE_EXCEEDED", + "WORKFLOW_ALREADY_RUNNING", + "DEFAULT_EXECUTION_START_TO_CLOSE_TIMEOUT_UNDEFINED", + "DEFAULT_TASK_LIST_UNDEFINED", + "DEFAULT_TASK_START_TO_CLOSE_TIMEOUT_UNDEFINED", + "DEFAULT_CHILD_POLICY_UNDEFINED", + "OPERATION_NOT_PERMITTED" + ] + }, + "StartChildWorkflowExecutionFailedEventAttributes":{ + "type":"structure", + "required":[ + "workflowType", + "cause", + "workflowId", + "initiatedEventId", + "decisionTaskCompletedEventId" + ], + "members":{ + "workflowType":{"shape":"WorkflowType"}, + "cause":{"shape":"StartChildWorkflowExecutionFailedCause"}, + "workflowId":{"shape":"WorkflowId"}, + "initiatedEventId":{"shape":"EventId"}, + "decisionTaskCompletedEventId":{"shape":"EventId"}, + "control":{"shape":"Data"} + } + }, + "StartChildWorkflowExecutionInitiatedEventAttributes":{ + "type":"structure", + "required":[ + "workflowId", + "workflowType", + "taskList", + "decisionTaskCompletedEventId", + "childPolicy" + ], + "members":{ + "workflowId":{"shape":"WorkflowId"}, + "workflowType":{"shape":"WorkflowType"}, + "control":{"shape":"Data"}, + "input":{"shape":"Data"}, + "executionStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "taskList":{"shape":"TaskList"}, + "taskPriority":{"shape":"TaskPriority"}, + "decisionTaskCompletedEventId":{"shape":"EventId"}, + "childPolicy":{"shape":"ChildPolicy"}, + "taskStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "tagList":{"shape":"TagList"}, + "lambdaRole":{"shape":"Arn"} + } + }, + "StartLambdaFunctionFailedCause":{ + "type":"string", + "enum":["ASSUME_ROLE_FAILED"] + }, + "StartLambdaFunctionFailedEventAttributes":{ + "type":"structure", + "members":{ + "scheduledEventId":{"shape":"EventId"}, + "cause":{"shape":"StartLambdaFunctionFailedCause"}, + "message":{"shape":"CauseMessage"} + } + }, + "StartTimerDecisionAttributes":{ + "type":"structure", + "required":[ + "timerId", + "startToFireTimeout" + ], + "members":{ + "timerId":{"shape":"TimerId"}, + "control":{"shape":"Data"}, + "startToFireTimeout":{"shape":"DurationInSeconds"} + } + }, + "StartTimerFailedCause":{ + "type":"string", + "enum":[ + "TIMER_ID_ALREADY_IN_USE", + "OPEN_TIMERS_LIMIT_EXCEEDED", + "TIMER_CREATION_RATE_EXCEEDED", + "OPERATION_NOT_PERMITTED" + ] + }, + "StartTimerFailedEventAttributes":{ + "type":"structure", + "required":[ + "timerId", + "cause", + "decisionTaskCompletedEventId" + ], + "members":{ + "timerId":{"shape":"TimerId"}, + "cause":{"shape":"StartTimerFailedCause"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "StartWorkflowExecutionInput":{ + "type":"structure", + "required":[ + "domain", + "workflowId", + "workflowType" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "workflowId":{"shape":"WorkflowId"}, + "workflowType":{"shape":"WorkflowType"}, + "taskList":{"shape":"TaskList"}, + "taskPriority":{"shape":"TaskPriority"}, + "input":{"shape":"Data"}, + "executionStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "tagList":{"shape":"TagList"}, + "taskStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "childPolicy":{"shape":"ChildPolicy"}, + "lambdaRole":{"shape":"Arn"} + } + }, + "Tag":{ + "type":"string", + "min":1, + "max":256 + }, + "TagFilter":{ + "type":"structure", + "required":["tag"], + "members":{ + "tag":{"shape":"Tag"} + } + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":5 + }, + "TaskList":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"Name"} + } + }, + "TaskPriority":{ + "type":"string", + "max":11 + }, + "TaskToken":{ + "type":"string", + "min":1, + "max":1024 + }, + "TerminateReason":{ + "type":"string", + "max":256 + }, + "TerminateWorkflowExecutionInput":{ + "type":"structure", + "required":[ + "domain", + "workflowId" + ], + "members":{ + "domain":{"shape":"DomainName"}, + "workflowId":{"shape":"WorkflowId"}, + "runId":{"shape":"RunIdOptional"}, + "reason":{"shape":"TerminateReason"}, + "details":{"shape":"Data"}, + "childPolicy":{"shape":"ChildPolicy"} + } + }, + "TimerCanceledEventAttributes":{ + "type":"structure", + "required":[ + "timerId", + "startedEventId", + "decisionTaskCompletedEventId" + ], + "members":{ + "timerId":{"shape":"TimerId"}, + "startedEventId":{"shape":"EventId"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "TimerFiredEventAttributes":{ + "type":"structure", + "required":[ + "timerId", + "startedEventId" + ], + "members":{ + "timerId":{"shape":"TimerId"}, + "startedEventId":{"shape":"EventId"} + } + }, + "TimerId":{ + "type":"string", + "min":1, + "max":256 + }, + "TimerStartedEventAttributes":{ + "type":"structure", + "required":[ + "timerId", + "startToFireTimeout", + "decisionTaskCompletedEventId" + ], + "members":{ + "timerId":{"shape":"TimerId"}, + "control":{"shape":"Data"}, + "startToFireTimeout":{"shape":"DurationInSeconds"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "Timestamp":{"type":"timestamp"}, + "Truncated":{"type":"boolean"}, + "TypeAlreadyExistsFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "TypeDeprecatedFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "UnknownResourceFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "Version":{ + "type":"string", + "min":1, + "max":64 + }, + "VersionOptional":{ + "type":"string", + "max":64 + }, + "WorkflowExecution":{ + "type":"structure", + "required":[ + "workflowId", + "runId" + ], + "members":{ + "workflowId":{"shape":"WorkflowId"}, + "runId":{"shape":"RunId"} + } + }, + "WorkflowExecutionAlreadyStartedFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "WorkflowExecutionCancelRequestedCause":{ + "type":"string", + "enum":["CHILD_POLICY_APPLIED"] + }, + "WorkflowExecutionCancelRequestedEventAttributes":{ + "type":"structure", + "members":{ + "externalWorkflowExecution":{"shape":"WorkflowExecution"}, + "externalInitiatedEventId":{"shape":"EventId"}, + "cause":{"shape":"WorkflowExecutionCancelRequestedCause"} + } + }, + "WorkflowExecutionCanceledEventAttributes":{ + "type":"structure", + "required":["decisionTaskCompletedEventId"], + "members":{ + "details":{"shape":"Data"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "WorkflowExecutionCompletedEventAttributes":{ + "type":"structure", + "required":["decisionTaskCompletedEventId"], + "members":{ + "result":{"shape":"Data"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "WorkflowExecutionConfiguration":{ + "type":"structure", + "required":[ + "taskStartToCloseTimeout", + "executionStartToCloseTimeout", + "taskList", + "childPolicy" + ], + "members":{ + "taskStartToCloseTimeout":{"shape":"DurationInSeconds"}, + "executionStartToCloseTimeout":{"shape":"DurationInSeconds"}, + "taskList":{"shape":"TaskList"}, + "taskPriority":{"shape":"TaskPriority"}, + "childPolicy":{"shape":"ChildPolicy"}, + "lambdaRole":{"shape":"Arn"} + } + }, + "WorkflowExecutionContinuedAsNewEventAttributes":{ + "type":"structure", + "required":[ + "decisionTaskCompletedEventId", + "newExecutionRunId", + "taskList", + "childPolicy", + "workflowType" + ], + "members":{ + "input":{"shape":"Data"}, + "decisionTaskCompletedEventId":{"shape":"EventId"}, + "newExecutionRunId":{"shape":"RunId"}, + "executionStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "taskList":{"shape":"TaskList"}, + "taskPriority":{"shape":"TaskPriority"}, + "taskStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "childPolicy":{"shape":"ChildPolicy"}, + "tagList":{"shape":"TagList"}, + "workflowType":{"shape":"WorkflowType"}, + "lambdaRole":{"shape":"Arn"} + } + }, + "WorkflowExecutionCount":{ + "type":"structure", + "required":["count"], + "members":{ + "count":{"shape":"Count"}, + "truncated":{"shape":"Truncated"} + } + }, + "WorkflowExecutionDetail":{ + "type":"structure", + "required":[ + "executionInfo", + "executionConfiguration", + "openCounts" + ], + "members":{ + "executionInfo":{"shape":"WorkflowExecutionInfo"}, + "executionConfiguration":{"shape":"WorkflowExecutionConfiguration"}, + "openCounts":{"shape":"WorkflowExecutionOpenCounts"}, + "latestActivityTaskTimestamp":{"shape":"Timestamp"}, + "latestExecutionContext":{"shape":"Data"} + } + }, + "WorkflowExecutionFailedEventAttributes":{ + "type":"structure", + "required":["decisionTaskCompletedEventId"], + "members":{ + "reason":{"shape":"FailureReason"}, + "details":{"shape":"Data"}, + "decisionTaskCompletedEventId":{"shape":"EventId"} + } + }, + "WorkflowExecutionFilter":{ + "type":"structure", + "required":["workflowId"], + "members":{ + "workflowId":{"shape":"WorkflowId"} + } + }, + "WorkflowExecutionInfo":{ + "type":"structure", + "required":[ + "execution", + "workflowType", + "startTimestamp", + "executionStatus" + ], + "members":{ + "execution":{"shape":"WorkflowExecution"}, + "workflowType":{"shape":"WorkflowType"}, + "startTimestamp":{"shape":"Timestamp"}, + "closeTimestamp":{"shape":"Timestamp"}, + "executionStatus":{"shape":"ExecutionStatus"}, + "closeStatus":{"shape":"CloseStatus"}, + "parent":{"shape":"WorkflowExecution"}, + "tagList":{"shape":"TagList"}, + "cancelRequested":{"shape":"Canceled"} + } + }, + "WorkflowExecutionInfoList":{ + "type":"list", + "member":{"shape":"WorkflowExecutionInfo"} + }, + "WorkflowExecutionInfos":{ + "type":"structure", + "required":["executionInfos"], + "members":{ + "executionInfos":{"shape":"WorkflowExecutionInfoList"}, + "nextPageToken":{"shape":"PageToken"} + } + }, + "WorkflowExecutionOpenCounts":{ + "type":"structure", + "required":[ + "openActivityTasks", + "openDecisionTasks", + "openTimers", + "openChildWorkflowExecutions" + ], + "members":{ + "openActivityTasks":{"shape":"Count"}, + "openDecisionTasks":{"shape":"OpenDecisionTasksCount"}, + "openTimers":{"shape":"Count"}, + "openChildWorkflowExecutions":{"shape":"Count"}, + "openLambdaFunctions":{"shape":"Count"} + } + }, + "WorkflowExecutionSignaledEventAttributes":{ + "type":"structure", + "required":["signalName"], + "members":{ + "signalName":{"shape":"SignalName"}, + "input":{"shape":"Data"}, + "externalWorkflowExecution":{"shape":"WorkflowExecution"}, + "externalInitiatedEventId":{"shape":"EventId"} + } + }, + "WorkflowExecutionStartedEventAttributes":{ + "type":"structure", + "required":[ + "childPolicy", + "taskList", + "workflowType" + ], + "members":{ + "input":{"shape":"Data"}, + "executionStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "taskStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "childPolicy":{"shape":"ChildPolicy"}, + "taskList":{"shape":"TaskList"}, + "workflowType":{"shape":"WorkflowType"}, + "tagList":{"shape":"TagList"}, + "taskPriority":{"shape":"TaskPriority"}, + "continuedExecutionRunId":{"shape":"RunIdOptional"}, + "parentWorkflowExecution":{"shape":"WorkflowExecution"}, + "parentInitiatedEventId":{"shape":"EventId"}, + "lambdaRole":{"shape":"Arn"} + } + }, + "WorkflowExecutionTerminatedCause":{ + "type":"string", + "enum":[ + "CHILD_POLICY_APPLIED", + "EVENT_LIMIT_EXCEEDED", + "OPERATOR_INITIATED" + ] + }, + "WorkflowExecutionTerminatedEventAttributes":{ + "type":"structure", + "required":["childPolicy"], + "members":{ + "reason":{"shape":"TerminateReason"}, + "details":{"shape":"Data"}, + "childPolicy":{"shape":"ChildPolicy"}, + "cause":{"shape":"WorkflowExecutionTerminatedCause"} + } + }, + "WorkflowExecutionTimedOutEventAttributes":{ + "type":"structure", + "required":[ + "timeoutType", + "childPolicy" + ], + "members":{ + "timeoutType":{"shape":"WorkflowExecutionTimeoutType"}, + "childPolicy":{"shape":"ChildPolicy"} + } + }, + "WorkflowExecutionTimeoutType":{ + "type":"string", + "enum":["START_TO_CLOSE"] + }, + "WorkflowId":{ + "type":"string", + "min":1, + "max":256 + }, + "WorkflowType":{ + "type":"structure", + "required":[ + "name", + "version" + ], + "members":{ + "name":{"shape":"Name"}, + "version":{"shape":"Version"} + } + }, + "WorkflowTypeConfiguration":{ + "type":"structure", + "members":{ + "defaultTaskStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "defaultExecutionStartToCloseTimeout":{"shape":"DurationInSecondsOptional"}, + "defaultTaskList":{"shape":"TaskList"}, + "defaultTaskPriority":{"shape":"TaskPriority"}, + "defaultChildPolicy":{"shape":"ChildPolicy"}, + "defaultLambdaRole":{"shape":"Arn"} + } + }, + "WorkflowTypeDetail":{ + "type":"structure", + "required":[ + "typeInfo", + "configuration" + ], + "members":{ + "typeInfo":{"shape":"WorkflowTypeInfo"}, + "configuration":{"shape":"WorkflowTypeConfiguration"} + } + }, + "WorkflowTypeFilter":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"Name"}, + "version":{"shape":"VersionOptional"} + } + }, + "WorkflowTypeInfo":{ + "type":"structure", + "required":[ + "workflowType", + "status", + "creationDate" + ], + "members":{ + "workflowType":{"shape":"WorkflowType"}, + "status":{"shape":"RegistrationStatus"}, + "description":{"shape":"Description"}, + "creationDate":{"shape":"Timestamp"}, + "deprecationDate":{"shape":"Timestamp"} + } + }, + "WorkflowTypeInfoList":{ + "type":"list", + "member":{"shape":"WorkflowTypeInfo"} + }, + "WorkflowTypeInfos":{ + "type":"structure", + "required":["typeInfos"], + "members":{ + "typeInfos":{"shape":"WorkflowTypeInfoList"}, + "nextPageToken":{"shape":"PageToken"} + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/swf/2012-01-25/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/swf/2012-01-25/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/swf/2012-01-25/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/swf/2012-01-25/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1695 @@ +{ + "version": "2.0", + "operations": { + "CountClosedWorkflowExecutions": "

    Returns the number of closed workflow executions within the given domain that meet the specified filtering criteria.

    This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • tagFilter.tag: String constraint. The key is swf:tagFilter.tag.
      • typeFilter.name: String constraint. The key is swf:typeFilter.name.
      • typeFilter.version: String constraint. The key is swf:typeFilter.version.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "CountOpenWorkflowExecutions": "

    Returns the number of open workflow executions within the given domain that meet the specified filtering criteria.

    This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • tagFilter.tag: String constraint. The key is swf:tagFilter.tag.
      • typeFilter.name: String constraint. The key is swf:typeFilter.name.
      • typeFilter.version: String constraint. The key is swf:typeFilter.version.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "CountPendingActivityTasks": "

    Returns the estimated number of activity tasks in the specified task list. The count returned is an approximation and is not guaranteed to be exact. If you specify a task list that no activity task was ever scheduled in then 0 will be returned.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the taskList.name parameter by using a Condition element with the swf:taskList.name key to allow the action to access only certain task lists.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "CountPendingDecisionTasks": "

    Returns the estimated number of decision tasks in the specified task list. The count returned is an approximation and is not guaranteed to be exact. If you specify a task list that no decision task was ever scheduled in then 0 will be returned.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the taskList.name parameter by using a Condition element with the swf:taskList.name key to allow the action to access only certain task lists.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "DeprecateActivityType": "

    Deprecates the specified activity type. After an activity type has been deprecated, you cannot create new tasks of that activity type. Tasks of this type that were scheduled before the type was deprecated will continue to run.

    This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • activityType.name: String constraint. The key is swf:activityType.name.
      • activityType.version: String constraint. The key is swf:activityType.version.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "DeprecateDomain": "

    Deprecates the specified domain. After a domain has been deprecated it cannot be used to create new workflow executions or register new types. However, you can still use visibility actions on this domain. Deprecating a domain also deprecates all activity and workflow types registered in the domain. Executions that were started before the domain was deprecated will continue to run.

    This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "DeprecateWorkflowType": "

    Deprecates the specified workflow type. After a workflow type has been deprecated, you cannot create new executions of that type. Executions that were started before the type was deprecated will continue to run. A deprecated workflow type may still be used when calling visibility actions.

    This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • workflowType.name: String constraint. The key is swf:workflowType.name.
      • workflowType.version: String constraint. The key is swf:workflowType.version.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "DescribeActivityType": "

    Returns information about the specified activity type. This includes configuration settings provided when the type was registered and other general information about the type.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • activityType.name: String constraint. The key is swf:activityType.name.
      • activityType.version: String constraint. The key is swf:activityType.version.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "DescribeDomain": "

    Returns information about the specified domain, including description and status.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "DescribeWorkflowExecution": "

    Returns information about the specified workflow execution including its type and some statistics.

    This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "DescribeWorkflowType": "

    Returns information about the specified workflow type. This includes configuration settings specified when the type was registered and other information such as creation date, current status, and so on.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • workflowType.name: String constraint. The key is swf:workflowType.name.
      • workflowType.version: String constraint. The key is swf:workflowType.version.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "GetWorkflowExecutionHistory": "

    Returns the history of the specified workflow execution. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

    This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "ListActivityTypes": "

    Returns information about all activities registered in the specified domain that match the specified name and registration status. The result includes information like creation date, current status of the activity, etc. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "ListClosedWorkflowExecutions": "

    Returns a list of closed workflow executions in the specified domain that meet the filtering criteria. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

    This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • tagFilter.tag: String constraint. The key is swf:tagFilter.tag.
      • typeFilter.name: String constraint. The key is swf:typeFilter.name.
      • typeFilter.version: String constraint. The key is swf:typeFilter.version.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "ListDomains": "

    Returns the list of domains registered in the account. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

    This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains. The element must be set to arn:aws:swf::AccountID:domain/*, where AccountID is the account ID, with no dashes.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "ListOpenWorkflowExecutions": "

    Returns a list of open workflow executions in the specified domain that meet the filtering criteria. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.

    This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • tagFilter.tag: String constraint. The key is swf:tagFilter.tag.
      • typeFilter.name: String constraint. The key is swf:typeFilter.name.
      • typeFilter.version: String constraint. The key is swf:typeFilter.version.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "ListWorkflowTypes": "

    Returns information about workflow types in the specified domain. The results may be split into multiple pages that can be retrieved by making the call repeatedly.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "PollForActivityTask": "

    Used by workers to get an ActivityTask from the specified activity taskList. This initiates a long poll, where the service holds the HTTP connection open and responds as soon as a task becomes available. The maximum time the service holds on to the request before responding is 60 seconds. If no task is available within 60 seconds, the poll will return an empty result. An empty result, in this context, means that an ActivityTask is returned, but that the value of taskToken is an empty string. If a task is returned, the worker should use its type to identify and process it correctly.

    Workers should set their client side socket timeout to at least 70 seconds (10 seconds higher than the maximum time service may hold the poll request).

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the taskList.name parameter by using a Condition element with the swf:taskList.name key to allow the action to access only certain task lists.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "PollForDecisionTask": "

    Used by deciders to get a DecisionTask from the specified decision taskList. A decision task may be returned for any open workflow execution that is using the specified task list. The task includes a paginated view of the history of the workflow execution. The decider should use the workflow type and the history to determine how to properly handle the task.

    This action initiates a long poll, where the service holds the HTTP connection open and responds as soon a task becomes available. If no decision task is available in the specified task list before the timeout of 60 seconds expires, an empty result is returned. An empty result, in this context, means that a DecisionTask is returned, but that the value of taskToken is an empty string.

    Deciders should set their client-side socket timeout to at least 70 seconds (10 seconds higher than the timeout). Because the number of workflow history events for a single workflow execution might be very large, the result returned might be split up across a number of pages. To retrieve subsequent pages, make additional calls to PollForDecisionTask using the nextPageToken returned by the initial call. Note that you do not call GetWorkflowExecutionHistory with this nextPageToken. Instead, call PollForDecisionTask again.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the taskList.name parameter by using a Condition element with the swf:taskList.name key to allow the action to access only certain task lists.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "RecordActivityTaskHeartbeat": "

    Used by activity workers to report to the service that the ActivityTask represented by the specified taskToken is still making progress. The worker can also (optionally) specify details of the progress, for example percent complete, using the details parameter. This action can also be used by the worker as a mechanism to check if cancellation is being requested for the activity task. If a cancellation is being attempted for the specified task, then the boolean cancelRequested flag returned by the service is set to true.

    This action resets the taskHeartbeatTimeout clock. The taskHeartbeatTimeout is specified in RegisterActivityType.

    This action does not in itself create an event in the workflow execution history. However, if the task times out, the workflow execution history will contain a ActivityTaskTimedOut event that contains the information from the last heartbeat generated by the activity worker.

    The taskStartToCloseTimeout of an activity type is the maximum duration of an activity task, regardless of the number of RecordActivityTaskHeartbeat requests received. The taskStartToCloseTimeout is also specified in RegisterActivityType. This operation is only useful for long-lived activities to report liveliness of the task and to determine if a cancellation is being attempted. If the cancelRequested flag returns true, a cancellation is being attempted. If the worker can cancel the activity, it should respond with RespondActivityTaskCanceled. Otherwise, it should ignore the cancellation request.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "RegisterActivityType": "

    Registers a new activity type along with its configuration settings in the specified domain.

    A TypeAlreadyExists fault is returned if the type already exists in the domain. You cannot change any configuration settings of the type after its registration, and it must be registered as a new version.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • defaultTaskList.name: String constraint. The key is swf:defaultTaskList.name.
      • name: String constraint. The key is swf:name.
      • version: String constraint. The key is swf:version.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "RegisterDomain": "

    Registers a new domain.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • You cannot use an IAM policy to control domain access for this action. The name of the domain being registered is available as the resource of this action.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "RegisterWorkflowType": "

    Registers a new workflow type and its configuration settings in the specified domain.

    The retention period for the workflow history is set by the RegisterDomain action.

    If the type already exists, then a TypeAlreadyExists fault is returned. You cannot change the configuration settings of a workflow type once it is registered and it must be registered as a new version.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • defaultTaskList.name: String constraint. The key is swf:defaultTaskList.name.
      • name: String constraint. The key is swf:name.
      • version: String constraint. The key is swf:version.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "RequestCancelWorkflowExecution": "

    Records a WorkflowExecutionCancelRequested event in the currently running workflow execution identified by the given domain, workflowId, and runId. This logically requests the cancellation of the workflow execution as a whole. It is up to the decider to take appropriate actions when it receives an execution history with this event.

    If the runId is not specified, the WorkflowExecutionCancelRequested event is recorded in the history of the current open workflow execution with the specified workflowId in the domain. Because this action allows the workflow to properly clean up and gracefully close, it should be used instead of TerminateWorkflowExecution when possible.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "RespondActivityTaskCanceled": "

    Used by workers to tell the service that the ActivityTask identified by the taskToken was successfully canceled. Additional details can be optionally provided using the details argument.

    These details (if provided) appear in the ActivityTaskCanceled event added to the workflow history.

    Only use this operation if the canceled flag of a RecordActivityTaskHeartbeat request returns true and if the activity can be safely undone or abandoned.

    A task is considered open from the time that it is scheduled until it is closed. Therefore a task is reported as open while a worker is processing it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, RespondActivityTaskCanceled, RespondActivityTaskFailed, or the task has timed out.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "RespondActivityTaskCompleted": "

    Used by workers to tell the service that the ActivityTask identified by the taskToken completed successfully with a result (if provided). The result appears in the ActivityTaskCompleted event in the workflow history.

    If the requested task does not complete successfully, use RespondActivityTaskFailed instead. If the worker finds that the task is canceled through the canceled flag returned by RecordActivityTaskHeartbeat, it should cancel the task, clean up and then call RespondActivityTaskCanceled.

    A task is considered open from the time that it is scheduled until it is closed. Therefore a task is reported as open while a worker is processing it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, RespondActivityTaskCanceled, RespondActivityTaskFailed, or the task has timed out.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "RespondActivityTaskFailed": "

    Used by workers to tell the service that the ActivityTask identified by the taskToken has failed with reason (if specified). The reason and details appear in the ActivityTaskFailed event added to the workflow history.

    A task is considered open from the time that it is scheduled until it is closed. Therefore a task is reported as open while a worker is processing it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, RespondActivityTaskCanceled, RespondActivityTaskFailed, or the task has timed out.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "RespondDecisionTaskCompleted": "

    Used by deciders to tell the service that the DecisionTask identified by the taskToken has successfully completed. The decisions argument specifies the list of decisions made while processing the task.

    A DecisionTaskCompleted event is added to the workflow history. The executionContext specified is attached to the event in the workflow execution history.

    Access Control

    If an IAM policy grants permission to use RespondDecisionTaskCompleted, it can express permissions for the list of decisions in the decisions parameter. Each of the decisions has one or more parameters, much like a regular API call. To allow for policies to be as readable as possible, you can express permissions on decisions as if they were actual API calls, including applying conditions to some parameters. For more information, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "SignalWorkflowExecution": "

    Records a WorkflowExecutionSignaled event in the workflow execution history and creates a decision task for the workflow execution identified by the given domain, workflowId and runId. The event is recorded with the specified user defined signalName and input (if provided).

    If a runId is not specified, then the WorkflowExecutionSignaled event is recorded in the history of the current open workflow with the matching workflowId in the domain. If the specified workflow execution is not open, this method fails with UnknownResource.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "StartWorkflowExecution": "

    Starts an execution of the workflow type in the specified domain using the provided workflowId and input data.

    This action returns the newly started workflow execution.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • tagList.member.0: The key is swf:tagList.member.0.
      • tagList.member.1: The key is swf:tagList.member.1.
      • tagList.member.2: The key is swf:tagList.member.2.
      • tagList.member.3: The key is swf:tagList.member.3.
      • tagList.member.4: The key is swf:tagList.member.4.
      • taskList: String constraint. The key is swf:taskList.name.
      • workflowType.name: String constraint. The key is swf:workflowType.name.
      • workflowType.version: String constraint. The key is swf:workflowType.version.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "TerminateWorkflowExecution": "

    Records a WorkflowExecutionTerminated event and forces closure of the workflow execution identified by the given domain, runId, and workflowId. The child policy, registered with the workflow type or specified when starting this execution, is applied to any open child workflow executions of this workflow execution.

    If the identified workflow execution was in progress, it is terminated immediately. If a runId is not specified, then the WorkflowExecutionTerminated event is recorded in the history of the current open workflow with the matching workflowId in the domain. You should consider using RequestCancelWorkflowExecution action instead because it allows the workflow to gracefully close while TerminateWorkflowExecution does not.

    Access Control

    You can use IAM policies to control this action's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    " + }, + "service": "Amazon Simple Workflow Service

    The Amazon Simple Workflow Service (Amazon SWF) makes it easy to build applications that use Amazon's cloud to coordinate work across distributed components. In Amazon SWF, a task represents a logical unit of work that is performed by a component of your workflow. Coordinating tasks in a workflow involves managing intertask dependencies, scheduling, and concurrency in accordance with the logical flow of the application.

    Amazon SWF gives you full control over implementing tasks and coordinating them without worrying about underlying complexities such as tracking their progress and maintaining their state.

    This documentation serves as reference only. For a broader overview of the Amazon SWF programming model, see the Amazon SWF Developer Guide.

    ", + "shapes": { + "ActivityId": { + "base": null, + "refs": { + "ActivityTask$activityId": "

    The unique ID of the task.

    ", + "ActivityTaskCancelRequestedEventAttributes$activityId": "

    The unique ID of the task.

    ", + "ActivityTaskScheduledEventAttributes$activityId": "

    The unique ID of the activity task.

    ", + "RequestCancelActivityTaskDecisionAttributes$activityId": "

    The activityId of the activity task to be canceled.

    ", + "RequestCancelActivityTaskFailedEventAttributes$activityId": "

    The activityId provided in the RequestCancelActivityTask decision that failed.

    ", + "ScheduleActivityTaskDecisionAttributes$activityId": "

    Required. The activityId of the activity task.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "ScheduleActivityTaskFailedEventAttributes$activityId": "

    The activityId provided in the ScheduleActivityTask decision that failed.

    " + } + }, + "ActivityTask": { + "base": "

    Unit of work sent to an activity worker.

    ", + "refs": { + } + }, + "ActivityTaskCancelRequestedEventAttributes": { + "base": "

    Provides details of the ActivityTaskCancelRequested event.

    ", + "refs": { + "HistoryEvent$activityTaskCancelRequestedEventAttributes": "

    If the event is of type ActivityTaskcancelRequested then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ActivityTaskCanceledEventAttributes": { + "base": "

    Provides details of the ActivityTaskCanceled event.

    ", + "refs": { + "HistoryEvent$activityTaskCanceledEventAttributes": "

    If the event is of type ActivityTaskCanceled then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ActivityTaskCompletedEventAttributes": { + "base": "

    Provides details of the ActivityTaskCompleted event.

    ", + "refs": { + "HistoryEvent$activityTaskCompletedEventAttributes": "

    If the event is of type ActivityTaskCompleted then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ActivityTaskFailedEventAttributes": { + "base": "

    Provides details of the ActivityTaskFailed event.

    ", + "refs": { + "HistoryEvent$activityTaskFailedEventAttributes": "

    If the event is of type ActivityTaskFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ActivityTaskScheduledEventAttributes": { + "base": "

    Provides details of the ActivityTaskScheduled event.

    ", + "refs": { + "HistoryEvent$activityTaskScheduledEventAttributes": "

    If the event is of type ActivityTaskScheduled then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ActivityTaskStartedEventAttributes": { + "base": "

    Provides details of the ActivityTaskStarted event.

    ", + "refs": { + "HistoryEvent$activityTaskStartedEventAttributes": "

    If the event is of type ActivityTaskStarted then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ActivityTaskStatus": { + "base": "

    Status information about an activity task.

    ", + "refs": { + } + }, + "ActivityTaskTimedOutEventAttributes": { + "base": "

    Provides details of the ActivityTaskTimedOut event.

    ", + "refs": { + "HistoryEvent$activityTaskTimedOutEventAttributes": "

    If the event is of type ActivityTaskTimedOut then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ActivityTaskTimeoutType": { + "base": null, + "refs": { + "ActivityTaskTimedOutEventAttributes$timeoutType": "

    The type of the timeout that caused this event.

    " + } + }, + "ActivityType": { + "base": "

    Represents an activity type.

    ", + "refs": { + "ActivityTask$activityType": "

    The type of this activity task.

    ", + "ActivityTaskScheduledEventAttributes$activityType": "

    The type of the activity task.

    ", + "ActivityTypeInfo$activityType": "

    The ActivityType type structure representing the activity type.

    ", + "DeprecateActivityTypeInput$activityType": "

    The activity type to deprecate.

    ", + "DescribeActivityTypeInput$activityType": "

    The activity type to get information about. Activity types are identified by the name and version that were supplied when the activity was registered.

    ", + "ScheduleActivityTaskDecisionAttributes$activityType": "

    Required. The type of the activity task to schedule.

    ", + "ScheduleActivityTaskFailedEventAttributes$activityType": "

    The activity type provided in the ScheduleActivityTask decision that failed.

    " + } + }, + "ActivityTypeConfiguration": { + "base": "

    Configuration settings registered with the activity type.

    ", + "refs": { + "ActivityTypeDetail$configuration": "

    The configuration settings registered with the activity type.

    " + } + }, + "ActivityTypeDetail": { + "base": "

    Detailed information about an activity type.

    ", + "refs": { + } + }, + "ActivityTypeInfo": { + "base": "

    Detailed information about an activity type.

    ", + "refs": { + "ActivityTypeDetail$typeInfo": "

    General information about the activity type.

    The status of activity type (returned in the ActivityTypeInfo structure) can be one of the following.

    • REGISTERED: The type is registered and available. Workers supporting this type should be running.
    • DEPRECATED: The type was deprecated using DeprecateActivityType, but is still in use. You should keep workers supporting this type running. You cannot create new tasks of this type.
    ", + "ActivityTypeInfoList$member": null + } + }, + "ActivityTypeInfoList": { + "base": null, + "refs": { + "ActivityTypeInfos$typeInfos": "

    List of activity type information.

    " + } + }, + "ActivityTypeInfos": { + "base": "

    Contains a paginated list of activity type information structures.

    ", + "refs": { + } + }, + "Arn": { + "base": null, + "refs": { + "ContinueAsNewWorkflowExecutionDecisionAttributes$lambdaRole": "

    The ARN of an IAM role that authorizes Amazon SWF to invoke AWS Lambda functions.

    In order for this workflow execution to invoke AWS Lambda functions, an appropriate IAM role must be specified either as a default for the workflow type or through this field.", + "RegisterWorkflowTypeInput$defaultLambdaRole": "

    The ARN of the default IAM role to use when a workflow execution of this type invokes AWS Lambda functions.

    This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution and ContinueAsNewWorkflowExecution decision.

    ", + "StartChildWorkflowExecutionDecisionAttributes$lambdaRole": "

    The ARN of an IAM role that authorizes Amazon SWF to invoke AWS Lambda functions.

    In order for this workflow execution to invoke AWS Lambda functions, an appropriate IAM role must be specified either as a default for the workflow type or through this field.", + "StartChildWorkflowExecutionInitiatedEventAttributes$lambdaRole": "

    The IAM role attached to this workflow execution to use when invoking AWS Lambda functions.

    ", + "StartWorkflowExecutionInput$lambdaRole": "

    The ARN of an IAM role that authorizes Amazon SWF to invoke AWS Lambda functions.

    In order for this workflow execution to invoke AWS Lambda functions, an appropriate IAM role must be specified either as a default for the workflow type or through this field.", + "WorkflowExecutionConfiguration$lambdaRole": "

    The IAM role used by this workflow execution when invoking AWS Lambda functions.

    ", + "WorkflowExecutionContinuedAsNewEventAttributes$lambdaRole": "

    The IAM role attached to this workflow execution to use when invoking AWS Lambda functions.

    ", + "WorkflowExecutionStartedEventAttributes$lambdaRole": "

    The IAM role attached to this workflow execution to use when invoking AWS Lambda functions.

    ", + "WorkflowTypeConfiguration$defaultLambdaRole": "

    The default IAM role to use when a workflow execution invokes a AWS Lambda function.

    " + } + }, + "CancelTimerDecisionAttributes": { + "base": "

    Provides details of the CancelTimer decision.

    Access Control

    You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "refs": { + "Decision$cancelTimerDecisionAttributes": "

    Provides details of the CancelTimer decision. It is not set for other decision types.

    " + } + }, + "CancelTimerFailedCause": { + "base": null, + "refs": { + "CancelTimerFailedEventAttributes$cause": "

    The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

    If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows." + } + }, + "CancelTimerFailedEventAttributes": { + "base": "

    Provides details of the CancelTimerFailed event.

    ", + "refs": { + "HistoryEvent$cancelTimerFailedEventAttributes": "

    If the event is of type CancelTimerFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "CancelWorkflowExecutionDecisionAttributes": { + "base": "

    Provides details of the CancelWorkflowExecution decision.

    Access Control

    You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "refs": { + "Decision$cancelWorkflowExecutionDecisionAttributes": "

    Provides details of the CancelWorkflowExecution decision. It is not set for other decision types.

    " + } + }, + "CancelWorkflowExecutionFailedCause": { + "base": null, + "refs": { + "CancelWorkflowExecutionFailedEventAttributes$cause": "

    The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

    If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows." + } + }, + "CancelWorkflowExecutionFailedEventAttributes": { + "base": "

    Provides details of the CancelWorkflowExecutionFailed event.

    ", + "refs": { + "HistoryEvent$cancelWorkflowExecutionFailedEventAttributes": "

    If the event is of type CancelWorkflowExecutionFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "Canceled": { + "base": null, + "refs": { + "ActivityTaskStatus$cancelRequested": "

    Set to true if cancellation of the task is requested.

    ", + "WorkflowExecutionInfo$cancelRequested": "

    Set to true if a cancellation is requested for this workflow execution.

    " + } + }, + "CauseMessage": { + "base": null, + "refs": { + "StartLambdaFunctionFailedEventAttributes$message": "

    The error message (if any).

    " + } + }, + "ChildPolicy": { + "base": null, + "refs": { + "ContinueAsNewWorkflowExecutionDecisionAttributes$childPolicy": "

    If set, specifies the policy to use for the child workflow executions of the new execution if it is terminated by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout. This policy overrides the default child policy specified when registering the workflow type using RegisterWorkflowType.

    The supported child policies are:

    • TERMINATE: the child executions will be terminated.
    • REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
    • ABANDON: no action will be taken. The child executions will continue to run.
    A child policy for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default child policy was specified at registration time then a fault will be returned.", + "RegisterWorkflowTypeInput$defaultChildPolicy": "

    If set, specifies the default policy to use for the child workflow executions when a workflow execution of this type is terminated, by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.

    The supported child policies are:

    • TERMINATE: the child executions will be terminated.
    • REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
    • ABANDON: no action will be taken. The child executions will continue to run.
    ", + "StartChildWorkflowExecutionDecisionAttributes$childPolicy": "

    Optional. If set, specifies the policy to use for the child workflow executions if the workflow execution being started is terminated by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout. This policy overrides the default child policy specified when registering the workflow type using RegisterWorkflowType.

    The supported child policies are:

    • TERMINATE: the child executions will be terminated.
    • REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
    • ABANDON: no action will be taken. The child executions will continue to run.
    A child policy for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default child policy was specified at registration time then a fault will be returned.", + "StartChildWorkflowExecutionInitiatedEventAttributes$childPolicy": "

    The policy to use for the child workflow executions if this execution gets terminated by explicitly calling the TerminateWorkflowExecution action or due to an expired timeout.

    The supported child policies are:

    • TERMINATE: the child executions will be terminated.
    • REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
    • ABANDON: no action will be taken. The child executions will continue to run.
    ", + "StartWorkflowExecutionInput$childPolicy": "

    If set, specifies the policy to use for the child workflow executions of this workflow execution if it is terminated, by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout. This policy overrides the default child policy specified when registering the workflow type using RegisterWorkflowType.

    The supported child policies are:

    • TERMINATE: the child executions will be terminated.
    • REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
    • ABANDON: no action will be taken. The child executions will continue to run.
    A child policy for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default child policy was specified at registration time then a fault will be returned.", + "TerminateWorkflowExecutionInput$childPolicy": "

    If set, specifies the policy to use for the child workflow executions of the workflow execution being terminated. This policy overrides the child policy specified for the workflow execution at registration time or when starting the execution.

    The supported child policies are:

    • TERMINATE: the child executions will be terminated.
    • REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
    • ABANDON: no action will be taken. The child executions will continue to run.
    A child policy for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default child policy was specified at registration time then a fault will be returned.", + "WorkflowExecutionConfiguration$childPolicy": "

    The policy to use for the child workflow executions if this workflow execution is terminated, by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout.

    The supported child policies are:

    • TERMINATE: the child executions will be terminated.
    • REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
    • ABANDON: no action will be taken. The child executions will continue to run.
    ", + "WorkflowExecutionContinuedAsNewEventAttributes$childPolicy": "

    The policy to use for the child workflow executions of the new execution if it is terminated by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout.

    The supported child policies are:

    • TERMINATE: the child executions will be terminated.
    • REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
    • ABANDON: no action will be taken. The child executions will continue to run.
    ", + "WorkflowExecutionStartedEventAttributes$childPolicy": "

    The policy to use for the child workflow executions if this workflow execution is terminated, by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout.

    The supported child policies are:

    • TERMINATE: the child executions will be terminated.
    • REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
    • ABANDON: no action will be taken. The child executions will continue to run.
    ", + "WorkflowExecutionTerminatedEventAttributes$childPolicy": "

    The policy used for the child workflow executions of this workflow execution.

    The supported child policies are:

    • TERMINATE: the child executions will be terminated.
    • REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
    • ABANDON: no action will be taken. The child executions will continue to run.
    ", + "WorkflowExecutionTimedOutEventAttributes$childPolicy": "

    The policy used for the child workflow executions of this workflow execution.

    The supported child policies are:

    • TERMINATE: the child executions will be terminated.
    • REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
    • ABANDON: no action will be taken. The child executions will continue to run.
    ", + "WorkflowTypeConfiguration$defaultChildPolicy": "

    Optional. The default policy to use for the child workflow executions when a workflow execution of this type is terminated, by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.

    The supported child policies are:

    • TERMINATE: the child executions will be terminated.
    • REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
    • ABANDON: no action will be taken. The child executions will continue to run.
    " + } + }, + "ChildWorkflowExecutionCanceledEventAttributes": { + "base": "

    Provide details of the ChildWorkflowExecutionCanceled event.

    ", + "refs": { + "HistoryEvent$childWorkflowExecutionCanceledEventAttributes": "

    If the event is of type ChildWorkflowExecutionCanceled then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ChildWorkflowExecutionCompletedEventAttributes": { + "base": "

    Provides details of the ChildWorkflowExecutionCompleted event.

    ", + "refs": { + "HistoryEvent$childWorkflowExecutionCompletedEventAttributes": "

    If the event is of type ChildWorkflowExecutionCompleted then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ChildWorkflowExecutionFailedEventAttributes": { + "base": "

    Provides details of the ChildWorkflowExecutionFailed event.

    ", + "refs": { + "HistoryEvent$childWorkflowExecutionFailedEventAttributes": "

    If the event is of type ChildWorkflowExecutionFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ChildWorkflowExecutionStartedEventAttributes": { + "base": "

    Provides details of the ChildWorkflowExecutionStarted event.

    ", + "refs": { + "HistoryEvent$childWorkflowExecutionStartedEventAttributes": "

    If the event is of type ChildWorkflowExecutionStarted then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ChildWorkflowExecutionTerminatedEventAttributes": { + "base": "

    Provides details of the ChildWorkflowExecutionTerminated event.

    ", + "refs": { + "HistoryEvent$childWorkflowExecutionTerminatedEventAttributes": "

    If the event is of type ChildWorkflowExecutionTerminated then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ChildWorkflowExecutionTimedOutEventAttributes": { + "base": "

    Provides details of the ChildWorkflowExecutionTimedOut event.

    ", + "refs": { + "HistoryEvent$childWorkflowExecutionTimedOutEventAttributes": "

    If the event is of type ChildWorkflowExecutionTimedOut then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "CloseStatus": { + "base": null, + "refs": { + "CloseStatusFilter$status": "

    Required. The close status that must match the close status of an execution for it to meet the criteria of this filter.

    ", + "WorkflowExecutionInfo$closeStatus": "

    If the execution status is closed then this specifies how the execution was closed:

    • COMPLETED: the execution was successfully completed.
    • CANCELED: the execution was canceled.Cancellation allows the implementation to gracefully clean up before the execution is closed.
    • TERMINATED: the execution was force terminated.
    • FAILED: the execution failed to complete.
    • TIMED_OUT: the execution did not complete in the alloted time and was automatically timed out.
    • CONTINUED_AS_NEW: the execution is logically continued. This means the current execution was completed and a new execution was started to carry on the workflow.
    " + } + }, + "CloseStatusFilter": { + "base": "

    Used to filter the closed workflow executions in visibility APIs by their close status.

    ", + "refs": { + "CountClosedWorkflowExecutionsInput$closeStatusFilter": "

    If specified, only workflow executions that match this close status are counted. This filter has an affect only if executionStatus is specified as CLOSED.

    closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.", + "ListClosedWorkflowExecutionsInput$closeStatusFilter": "

    If specified, only workflow executions that match this close status are listed. For example, if TERMINATED is specified, then only TERMINATED workflow executions are listed.

    closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request." + } + }, + "CompleteWorkflowExecutionDecisionAttributes": { + "base": "

    Provides details of the CompleteWorkflowExecution decision.

    Access Control

    You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "refs": { + "Decision$completeWorkflowExecutionDecisionAttributes": "

    Provides details of the CompleteWorkflowExecution decision. It is not set for other decision types.

    " + } + }, + "CompleteWorkflowExecutionFailedCause": { + "base": null, + "refs": { + "CompleteWorkflowExecutionFailedEventAttributes$cause": "

    The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

    If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows." + } + }, + "CompleteWorkflowExecutionFailedEventAttributes": { + "base": "

    Provides details of the CompleteWorkflowExecutionFailed event.

    ", + "refs": { + "HistoryEvent$completeWorkflowExecutionFailedEventAttributes": "

    If the event is of type CompleteWorkflowExecutionFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ContinueAsNewWorkflowExecutionDecisionAttributes": { + "base": "

    Provides details of the ContinueAsNewWorkflowExecution decision.

    Access Control

    You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • tag: Optional.. A tag used to identify the workflow execution
      • taskList: String constraint. The key is swf:taskList.name.
      • workflowType.version: String constraint. The key is swf:workflowType.version.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "refs": { + "Decision$continueAsNewWorkflowExecutionDecisionAttributes": "

    Provides details of the ContinueAsNewWorkflowExecution decision. It is not set for other decision types.

    " + } + }, + "ContinueAsNewWorkflowExecutionFailedCause": { + "base": null, + "refs": { + "ContinueAsNewWorkflowExecutionFailedEventAttributes$cause": "

    The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

    If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows." + } + }, + "ContinueAsNewWorkflowExecutionFailedEventAttributes": { + "base": "

    Provides details of the ContinueAsNewWorkflowExecutionFailed event.

    ", + "refs": { + "HistoryEvent$continueAsNewWorkflowExecutionFailedEventAttributes": "

    If the event is of type ContinueAsNewWorkflowExecutionFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "Count": { + "base": null, + "refs": { + "PendingTaskCount$count": "

    The number of tasks in the task list.

    ", + "WorkflowExecutionCount$count": "

    The number of workflow executions.

    ", + "WorkflowExecutionOpenCounts$openActivityTasks": "

    The count of activity tasks whose status is OPEN.

    ", + "WorkflowExecutionOpenCounts$openTimers": "

    The count of timers started by this workflow execution that have not fired yet.

    ", + "WorkflowExecutionOpenCounts$openChildWorkflowExecutions": "

    The count of child workflow executions whose status is OPEN.

    ", + "WorkflowExecutionOpenCounts$openLambdaFunctions": "

    The count of AWS Lambda functions that are currently executing.

    " + } + }, + "CountClosedWorkflowExecutionsInput": { + "base": null, + "refs": { + } + }, + "CountOpenWorkflowExecutionsInput": { + "base": null, + "refs": { + } + }, + "CountPendingActivityTasksInput": { + "base": null, + "refs": { + } + }, + "CountPendingDecisionTasksInput": { + "base": null, + "refs": { + } + }, + "Data": { + "base": null, + "refs": { + "ActivityTask$input": "

    The inputs provided when the activity task was scheduled. The form of the input is user defined and should be meaningful to the activity implementation.

    ", + "ActivityTaskCanceledEventAttributes$details": "

    Details of the cancellation (if any).

    ", + "ActivityTaskCompletedEventAttributes$result": "

    The results of the activity task (if any).

    ", + "ActivityTaskFailedEventAttributes$details": "

    The details of the failure (if any).

    ", + "ActivityTaskScheduledEventAttributes$input": "

    The input provided to the activity task.

    ", + "ActivityTaskScheduledEventAttributes$control": "

    Optional. Data attached to the event that can be used by the decider in subsequent workflow tasks. This data is not sent to the activity.

    ", + "CancelWorkflowExecutionDecisionAttributes$details": "

    Optional. details of the cancellation.

    ", + "ChildWorkflowExecutionCanceledEventAttributes$details": "

    Details of the cancellation (if provided).

    ", + "ChildWorkflowExecutionCompletedEventAttributes$result": "

    The result of the child workflow execution (if any).

    ", + "ChildWorkflowExecutionFailedEventAttributes$details": "

    The details of the failure (if provided).

    ", + "CompleteWorkflowExecutionDecisionAttributes$result": "

    The result of the workflow execution. The form of the result is implementation defined.

    ", + "ContinueAsNewWorkflowExecutionDecisionAttributes$input": "

    The input provided to the new workflow execution.

    ", + "DecisionTaskCompletedEventAttributes$executionContext": "

    User defined context for the workflow execution.

    ", + "FailWorkflowExecutionDecisionAttributes$details": "

    Optional. Details of the failure.

    ", + "LambdaFunctionCompletedEventAttributes$result": "

    The result of the function execution (if any).

    ", + "LambdaFunctionFailedEventAttributes$details": "

    The details of the failure (if any).

    ", + "MarkerRecordedEventAttributes$details": "

    Details of the marker (if any).

    ", + "RecordMarkerDecisionAttributes$details": "

    Optional. details of the marker.

    ", + "RequestCancelExternalWorkflowExecutionDecisionAttributes$control": "

    Optional. Data attached to the event that can be used by the decider in subsequent workflow tasks.

    ", + "RequestCancelExternalWorkflowExecutionFailedEventAttributes$control": null, + "RequestCancelExternalWorkflowExecutionInitiatedEventAttributes$control": "

    Optional. Data attached to the event that can be used by the decider in subsequent workflow tasks.

    ", + "RespondActivityTaskCanceledInput$details": "

    Optional. Information about the cancellation.

    ", + "RespondActivityTaskCompletedInput$result": "

    The result of the activity task. It is a free form string that is implementation specific.

    ", + "RespondActivityTaskFailedInput$details": "

    Optional. Detailed information about the failure.

    ", + "RespondDecisionTaskCompletedInput$executionContext": "

    User defined context to add to workflow execution.

    ", + "ScheduleActivityTaskDecisionAttributes$control": "

    Optional. Data attached to the event that can be used by the decider in subsequent workflow tasks. This data is not sent to the activity.

    ", + "ScheduleActivityTaskDecisionAttributes$input": "

    The input provided to the activity task.

    ", + "SignalExternalWorkflowExecutionDecisionAttributes$input": "

    Optional. Input data to be provided with the signal. The target workflow execution will use the signal name and input data to process the signal.

    ", + "SignalExternalWorkflowExecutionDecisionAttributes$control": "

    Optional. Data attached to the event that can be used by the decider in subsequent decision tasks.

    ", + "SignalExternalWorkflowExecutionFailedEventAttributes$control": null, + "SignalExternalWorkflowExecutionInitiatedEventAttributes$input": "

    Input provided to the signal (if any).

    ", + "SignalExternalWorkflowExecutionInitiatedEventAttributes$control": "

    Optional. data attached to the event that can be used by the decider in subsequent decision tasks.

    ", + "SignalWorkflowExecutionInput$input": "

    Data to attach to the WorkflowExecutionSignaled event in the target workflow execution's history.

    ", + "StartChildWorkflowExecutionDecisionAttributes$control": "

    Optional. Data attached to the event that can be used by the decider in subsequent workflow tasks. This data is not sent to the child workflow execution.

    ", + "StartChildWorkflowExecutionDecisionAttributes$input": "

    The input to be provided to the workflow execution.

    ", + "StartChildWorkflowExecutionFailedEventAttributes$control": null, + "StartChildWorkflowExecutionInitiatedEventAttributes$control": "

    Optional. Data attached to the event that can be used by the decider in subsequent decision tasks. This data is not sent to the activity.

    ", + "StartChildWorkflowExecutionInitiatedEventAttributes$input": "

    The inputs provided to the child workflow execution (if any).

    ", + "StartTimerDecisionAttributes$control": "

    Optional. Data attached to the event that can be used by the decider in subsequent workflow tasks.

    ", + "StartWorkflowExecutionInput$input": "

    The input for the workflow execution. This is a free form string which should be meaningful to the workflow you are starting. This input is made available to the new workflow execution in the WorkflowExecutionStarted history event.

    ", + "TerminateWorkflowExecutionInput$details": "

    Optional. Details for terminating the workflow execution.

    ", + "TimerStartedEventAttributes$control": "

    Optional. Data attached to the event that can be used by the decider in subsequent workflow tasks.

    ", + "WorkflowExecutionCanceledEventAttributes$details": "

    Details for the cancellation (if any).

    ", + "WorkflowExecutionCompletedEventAttributes$result": "

    The result produced by the workflow execution upon successful completion.

    ", + "WorkflowExecutionContinuedAsNewEventAttributes$input": "

    The input provided to the new workflow execution.

    ", + "WorkflowExecutionDetail$latestExecutionContext": "

    The latest executionContext provided by the decider for this workflow execution. A decider can provide an executionContext (a free-form string) when closing a decision task using RespondDecisionTaskCompleted.

    ", + "WorkflowExecutionFailedEventAttributes$details": "

    The details of the failure (if any).

    ", + "WorkflowExecutionSignaledEventAttributes$input": "

    Inputs provided with the signal (if any). The decider can use the signal name and inputs to determine how to process the signal.

    ", + "WorkflowExecutionStartedEventAttributes$input": "

    The input provided to the workflow execution (if any).

    ", + "WorkflowExecutionTerminatedEventAttributes$details": "

    The details provided for the termination (if any).

    " + } + }, + "Decision": { + "base": "

    Specifies a decision made by the decider. A decision can be one of these types:

    • CancelTimer: cancels a previously started timer and records a TimerCanceled event in the history.
    • CancelWorkflowExecution: closes the workflow execution and records a WorkflowExecutionCanceled event in the history.
    • CompleteWorkflowExecution: closes the workflow execution and records a WorkflowExecutionCompleted event in the history .
    • ContinueAsNewWorkflowExecution: closes the workflow execution and starts a new workflow execution of the same type using the same workflow ID and a unique run ID. A WorkflowExecutionContinuedAsNew event is recorded in the history.
    • FailWorkflowExecution: closes the workflow execution and records a WorkflowExecutionFailed event in the history.
    • RecordMarker: records a MarkerRecorded event in the history. Markers can be used for adding custom information in the history for instance to let deciders know that they do not need to look at the history beyond the marker event.
    • RequestCancelActivityTask: attempts to cancel a previously scheduled activity task. If the activity task was scheduled but has not been assigned to a worker, then it will be canceled. If the activity task was already assigned to a worker, then the worker will be informed that cancellation has been requested in the response to RecordActivityTaskHeartbeat.
    • RequestCancelExternalWorkflowExecution: requests that a request be made to cancel the specified external workflow execution and records a RequestCancelExternalWorkflowExecutionInitiated event in the history.
    • ScheduleActivityTask: schedules an activity task.
    • ScheduleLambdaFunction: schedules a AWS Lambda function.
    • SignalExternalWorkflowExecution: requests a signal to be delivered to the specified external workflow execution and records a SignalExternalWorkflowExecutionInitiated event in the history.
    • StartChildWorkflowExecution: requests that a child workflow execution be started and records a StartChildWorkflowExecutionInitiated event in the history. The child workflow execution is a separate workflow execution with its own history.
    • StartTimer: starts a timer for this workflow execution and records a TimerStarted event in the history. This timer will fire after the specified delay and record a TimerFired event.

    Access Control

    If you grant permission to use RespondDecisionTaskCompleted, you can use IAM policies to express permissions for the list of decisions returned by this action as if they were members of the API. Treating decisions as a pseudo API maintains a uniform conceptual model and helps keep policies readable. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    Decision Failure

    Decisions can fail for several reasons

    • The ordering of decisions should follow a logical flow. Some decisions might not make sense in the current context of the workflow execution and will therefore fail.
    • A limit on your account was reached.
    • The decision lacks sufficient permissions.

    One of the following events might be added to the history to indicate an error. The event attribute's cause parameter indicates the cause. If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    • ScheduleActivityTaskFailed: a ScheduleActivityTask decision failed. This could happen if the activity type specified in the decision is not registered, is in a deprecated state, or the decision is not properly configured.
    • ScheduleLambdaFunctionFailed: a ScheduleLambdaFunctionFailed decision failed. This could happen if the AWS Lambda function specified in the decision does not exist, or the AWS Lambda service's limits are exceeded.
    • RequestCancelActivityTaskFailed: a RequestCancelActivityTask decision failed. This could happen if there is no open activity task with the specified activityId.
    • StartTimerFailed: a StartTimer decision failed. This could happen if there is another open timer with the same timerId.
    • CancelTimerFailed: a CancelTimer decision failed. This could happen if there is no open timer with the specified timerId.
    • StartChildWorkflowExecutionFailed: a StartChildWorkflowExecution decision failed. This could happen if the workflow type specified is not registered, is deprecated, or the decision is not properly configured.
    • SignalExternalWorkflowExecutionFailed: a SignalExternalWorkflowExecution decision failed. This could happen if the workflowID specified in the decision was incorrect.
    • RequestCancelExternalWorkflowExecutionFailed: a RequestCancelExternalWorkflowExecution decision failed. This could happen if the workflowID specified in the decision was incorrect.
    • CancelWorkflowExecutionFailed: a CancelWorkflowExecution decision failed. This could happen if there is an unhandled decision task pending in the workflow execution.
    • CompleteWorkflowExecutionFailed: a CompleteWorkflowExecution decision failed. This could happen if there is an unhandled decision task pending in the workflow execution.
    • ContinueAsNewWorkflowExecutionFailed: a ContinueAsNewWorkflowExecution decision failed. This could happen if there is an unhandled decision task pending in the workflow execution or the ContinueAsNewWorkflowExecution decision was not configured correctly.
    • FailWorkflowExecutionFailed: a FailWorkflowExecution decision failed. This could happen if there is an unhandled decision task pending in the workflow execution.

    The preceding error events might occur due to an error in the decider logic, which might put the workflow execution in an unstable state The cause field in the event structure for the error event indicates the cause of the error.

    A workflow execution may be closed by the decider by returning one of the following decisions when completing a decision task: CompleteWorkflowExecution, FailWorkflowExecution, CancelWorkflowExecution and ContinueAsNewWorkflowExecution. An UnhandledDecision fault will be returned if a workflow closing decision is specified and a signal or activity event had been added to the history while the decision task was being performed by the decider. Unlike the above situations which are logic issues, this fault is always possible because of race conditions in a distributed system. The right action here is to call RespondDecisionTaskCompleted without any decisions. This would result in another decision task with these new events included in the history. The decider should handle the new events and may decide to close the workflow execution.

    How to code a decision

    You code a decision by first setting the decision type field to one of the above decision values, and then set the corresponding attributes field shown below:

    ", + "refs": { + "DecisionList$member": null + } + }, + "DecisionList": { + "base": null, + "refs": { + "RespondDecisionTaskCompletedInput$decisions": "

    The list of decisions (possibly empty) made by the decider while processing this decision task. See the docs for the decision structure for details.

    " + } + }, + "DecisionTask": { + "base": "

    A structure that represents a decision task. Decision tasks are sent to deciders in order for them to make decisions.

    ", + "refs": { + } + }, + "DecisionTaskCompletedEventAttributes": { + "base": "

    Provides details of the DecisionTaskCompleted event.

    ", + "refs": { + "HistoryEvent$decisionTaskCompletedEventAttributes": "

    If the event is of type DecisionTaskCompleted then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "DecisionTaskScheduledEventAttributes": { + "base": "

    Provides details about the DecisionTaskScheduled event.

    ", + "refs": { + "HistoryEvent$decisionTaskScheduledEventAttributes": "

    If the event is of type DecisionTaskScheduled then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "DecisionTaskStartedEventAttributes": { + "base": "

    Provides details of the DecisionTaskStarted event.

    ", + "refs": { + "HistoryEvent$decisionTaskStartedEventAttributes": "

    If the event is of type DecisionTaskStarted then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "DecisionTaskTimedOutEventAttributes": { + "base": "

    Provides details of the DecisionTaskTimedOut event.

    ", + "refs": { + "HistoryEvent$decisionTaskTimedOutEventAttributes": "

    If the event is of type DecisionTaskTimedOut then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "DecisionTaskTimeoutType": { + "base": null, + "refs": { + "DecisionTaskTimedOutEventAttributes$timeoutType": "

    The type of timeout that expired before the decision task could be completed.

    " + } + }, + "DecisionType": { + "base": null, + "refs": { + "Decision$decisionType": "

    Specifies the type of the decision.

    " + } + }, + "DefaultUndefinedFault": { + "base": null, + "refs": { + } + }, + "DeprecateActivityTypeInput": { + "base": null, + "refs": { + } + }, + "DeprecateDomainInput": { + "base": null, + "refs": { + } + }, + "DeprecateWorkflowTypeInput": { + "base": null, + "refs": { + } + }, + "DescribeActivityTypeInput": { + "base": null, + "refs": { + } + }, + "DescribeDomainInput": { + "base": null, + "refs": { + } + }, + "DescribeWorkflowExecutionInput": { + "base": null, + "refs": { + } + }, + "DescribeWorkflowTypeInput": { + "base": null, + "refs": { + } + }, + "Description": { + "base": null, + "refs": { + "ActivityTypeInfo$description": "

    The description of the activity type provided in RegisterActivityType.

    ", + "DomainInfo$description": "

    The description of the domain provided through RegisterDomain.

    ", + "RegisterActivityTypeInput$description": "

    A textual description of the activity type.

    ", + "RegisterDomainInput$description": "

    A text description of the domain.

    ", + "RegisterWorkflowTypeInput$description": "

    Textual description of the workflow type.

    ", + "WorkflowTypeInfo$description": "

    The description of the type registered through RegisterWorkflowType.

    " + } + }, + "DomainAlreadyExistsFault": { + "base": "

    Returned if the specified domain already exists. You will get this fault even if the existing domain is in deprecated status.

    ", + "refs": { + } + }, + "DomainConfiguration": { + "base": "

    Contains the configuration settings of a domain.

    ", + "refs": { + "DomainDetail$configuration": null + } + }, + "DomainDeprecatedFault": { + "base": "

    Returned when the specified domain has been deprecated.

    ", + "refs": { + } + }, + "DomainDetail": { + "base": "

    Contains details of a domain.

    ", + "refs": { + } + }, + "DomainInfo": { + "base": "

    Contains general information about a domain.

    ", + "refs": { + "DomainDetail$domainInfo": null, + "DomainInfoList$member": null + } + }, + "DomainInfoList": { + "base": null, + "refs": { + "DomainInfos$domainInfos": "

    A list of DomainInfo structures.

    " + } + }, + "DomainInfos": { + "base": "

    Contains a paginated collection of DomainInfo structures.

    ", + "refs": { + } + }, + "DomainName": { + "base": null, + "refs": { + "CountClosedWorkflowExecutionsInput$domain": "

    The name of the domain containing the workflow executions to count.

    ", + "CountOpenWorkflowExecutionsInput$domain": "

    The name of the domain containing the workflow executions to count.

    ", + "CountPendingActivityTasksInput$domain": "

    The name of the domain that contains the task list.

    ", + "CountPendingDecisionTasksInput$domain": "

    The name of the domain that contains the task list.

    ", + "DeprecateActivityTypeInput$domain": "

    The name of the domain in which the activity type is registered.

    ", + "DeprecateDomainInput$name": "

    The name of the domain to deprecate.

    ", + "DeprecateWorkflowTypeInput$domain": "

    The name of the domain in which the workflow type is registered.

    ", + "DescribeActivityTypeInput$domain": "

    The name of the domain in which the activity type is registered.

    ", + "DescribeDomainInput$name": "

    The name of the domain to describe.

    ", + "DescribeWorkflowExecutionInput$domain": "

    The name of the domain containing the workflow execution.

    ", + "DescribeWorkflowTypeInput$domain": "

    The name of the domain in which this workflow type is registered.

    ", + "DomainInfo$name": "

    The name of the domain. This name is unique within the account.

    ", + "GetWorkflowExecutionHistoryInput$domain": "

    The name of the domain containing the workflow execution.

    ", + "ListActivityTypesInput$domain": "

    The name of the domain in which the activity types have been registered.

    ", + "ListClosedWorkflowExecutionsInput$domain": "

    The name of the domain that contains the workflow executions to list.

    ", + "ListOpenWorkflowExecutionsInput$domain": "

    The name of the domain that contains the workflow executions to list.

    ", + "ListWorkflowTypesInput$domain": "

    The name of the domain in which the workflow types have been registered.

    ", + "PollForActivityTaskInput$domain": "

    The name of the domain that contains the task lists being polled.

    ", + "PollForDecisionTaskInput$domain": "

    The name of the domain containing the task lists to poll.

    ", + "RegisterActivityTypeInput$domain": "

    The name of the domain in which this activity is to be registered.

    ", + "RegisterDomainInput$name": "

    Name of the domain to register. The name must be unique in the region that the domain is registered in.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "RegisterWorkflowTypeInput$domain": "

    The name of the domain in which to register the workflow type.

    ", + "RequestCancelWorkflowExecutionInput$domain": "

    The name of the domain containing the workflow execution to cancel.

    ", + "SignalWorkflowExecutionInput$domain": "

    The name of the domain containing the workflow execution to signal.

    ", + "StartWorkflowExecutionInput$domain": "

    The name of the domain in which the workflow execution is created.

    ", + "TerminateWorkflowExecutionInput$domain": "

    The domain of the workflow execution to terminate.

    " + } + }, + "DurationInDays": { + "base": null, + "refs": { + "DomainConfiguration$workflowExecutionRetentionPeriodInDays": "

    The retention period for workflow executions in this domain.

    ", + "RegisterDomainInput$workflowExecutionRetentionPeriodInDays": "

    The duration (in days) that records and histories of workflow executions on the domain should be kept by the service. After the retention period, the workflow execution is not available in the results of visibility calls.

    If you pass the value NONE or 0 (zero), then the workflow execution history will not be retained. As soon as the workflow execution completes, the execution record and its history are deleted.

    The maximum workflow execution retention period is 90 days. For more information about Amazon SWF service limits, see: Amazon SWF Service Limits in the Amazon SWF Developer Guide.

    " + } + }, + "DurationInSeconds": { + "base": null, + "refs": { + "StartTimerDecisionAttributes$startToFireTimeout": "

    Required. The duration to wait before firing the timer.

    The duration is specified in seconds; an integer greater than or equal to 0.

    ", + "TimerStartedEventAttributes$startToFireTimeout": "

    The duration of time after which the timer will fire.

    The duration is specified in seconds; an integer greater than or equal to 0.

    ", + "WorkflowExecutionConfiguration$taskStartToCloseTimeout": "

    The maximum duration allowed for decision tasks for this workflow execution.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "WorkflowExecutionConfiguration$executionStartToCloseTimeout": "

    The total duration for this workflow execution.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    " + } + }, + "DurationInSecondsOptional": { + "base": null, + "refs": { + "ActivityTaskScheduledEventAttributes$scheduleToStartTimeout": "

    The maximum amount of time the activity task can wait to be assigned to a worker.

    ", + "ActivityTaskScheduledEventAttributes$scheduleToCloseTimeout": "

    The maximum amount of time for this activity task.

    ", + "ActivityTaskScheduledEventAttributes$startToCloseTimeout": "

    The maximum amount of time a worker may take to process the activity task.

    ", + "ActivityTaskScheduledEventAttributes$heartbeatTimeout": "

    The maximum time before which the worker processing this task must report progress by calling RecordActivityTaskHeartbeat. If the timeout is exceeded, the activity task is automatically timed out. If the worker subsequently attempts to record a heartbeat or return a result, it will be ignored.

    ", + "ActivityTypeConfiguration$defaultTaskStartToCloseTimeout": "

    Optional. The default maximum duration for tasks of an activity type specified when registering the activity type. You can override this default when scheduling a task through the ScheduleActivityTask decision.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "ActivityTypeConfiguration$defaultTaskHeartbeatTimeout": "

    Optional. The default maximum time, in seconds, before which a worker processing a task must report progress by calling RecordActivityTaskHeartbeat.

    You can specify this value only when registering an activity type. The registered default value can be overridden when you schedule a task through the ScheduleActivityTask decision. If the activity worker subsequently attempts to record a heartbeat or returns a result, the activity worker receives an UnknownResource fault. In this case, Amazon SWF no longer considers the activity task to be valid; the activity worker should clean up the activity task.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "ActivityTypeConfiguration$defaultTaskScheduleToStartTimeout": "

    Optional. The default maximum duration, specified when registering the activity type, that a task of an activity type can wait before being assigned to a worker. You can override this default when scheduling a task through the ScheduleActivityTask decision.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "ActivityTypeConfiguration$defaultTaskScheduleToCloseTimeout": "

    Optional. The default maximum duration, specified when registering the activity type, for tasks of this activity type. You can override this default when scheduling a task through the ScheduleActivityTask decision.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "ContinueAsNewWorkflowExecutionDecisionAttributes$executionStartToCloseTimeout": "

    If set, specifies the total duration for this workflow execution. This overrides the defaultExecutionStartToCloseTimeout specified when registering the workflow type.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    An execution start-to-close timeout for this workflow execution must be specified either as a default for the workflow type or through this field. If neither this field is set nor a default execution start-to-close timeout was specified at registration time then a fault will be returned.", + "ContinueAsNewWorkflowExecutionDecisionAttributes$taskStartToCloseTimeout": "

    Specifies the maximum duration of decision tasks for the new workflow execution. This parameter overrides the defaultTaskStartToCloseTimout specified when registering the workflow type using RegisterWorkflowType.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    A task start-to-close timeout for the new workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default task start-to-close timeout was specified at registration time then a fault will be returned.", + "DecisionTaskScheduledEventAttributes$startToCloseTimeout": "

    The maximum duration for this decision task. The task is considered timed out if it does not completed within this duration.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "LambdaFunctionScheduledEventAttributes$startToCloseTimeout": "

    The maximum time, in seconds, that the AWS Lambda function can take to execute from start to close before it is marked as failed.

    ", + "RegisterActivityTypeInput$defaultTaskStartToCloseTimeout": "

    If set, specifies the default maximum duration that a worker can take to process tasks of this activity type. This default can be overridden when scheduling an activity task using the ScheduleActivityTask decision.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "RegisterActivityTypeInput$defaultTaskHeartbeatTimeout": "

    If set, specifies the default maximum time before which a worker processing a task of this type must report progress by calling RecordActivityTaskHeartbeat. If the timeout is exceeded, the activity task is automatically timed out. This default can be overridden when scheduling an activity task using the ScheduleActivityTask decision. If the activity worker subsequently attempts to record a heartbeat or returns a result, the activity worker receives an UnknownResource fault. In this case, Amazon SWF no longer considers the activity task to be valid; the activity worker should clean up the activity task.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "RegisterActivityTypeInput$defaultTaskScheduleToStartTimeout": "

    If set, specifies the default maximum duration that a task of this activity type can wait before being assigned to a worker. This default can be overridden when scheduling an activity task using the ScheduleActivityTask decision.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "RegisterActivityTypeInput$defaultTaskScheduleToCloseTimeout": "

    If set, specifies the default maximum duration for a task of this activity type. This default can be overridden when scheduling an activity task using the ScheduleActivityTask decision.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "RegisterWorkflowTypeInput$defaultTaskStartToCloseTimeout": "

    If set, specifies the default maximum duration of decision tasks for this workflow type. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "RegisterWorkflowTypeInput$defaultExecutionStartToCloseTimeout": "

    If set, specifies the default maximum duration for executions of this workflow type. You can override this default when starting an execution through the StartWorkflowExecution action or StartChildWorkflowExecution decision.

    The duration is specified in seconds; an integer greater than or equal to 0. Unlike some of the other timeout parameters in Amazon SWF, you cannot specify a value of \"NONE\" for defaultExecutionStartToCloseTimeout; there is a one-year max limit on the time that a workflow execution can run. Exceeding this limit will always cause the workflow execution to time out.

    ", + "ScheduleActivityTaskDecisionAttributes$scheduleToCloseTimeout": "

    The maximum duration for this activity task.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    A schedule-to-close timeout for this activity task must be specified either as a default for the activity type or through this field. If neither this field is set nor a default schedule-to-close timeout was specified at registration time then a fault will be returned.", + "ScheduleActivityTaskDecisionAttributes$scheduleToStartTimeout": "

    Optional. If set, specifies the maximum duration the activity task can wait to be assigned to a worker. This overrides the default schedule-to-start timeout specified when registering the activity type using RegisterActivityType.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    A schedule-to-start timeout for this activity task must be specified either as a default for the activity type or through this field. If neither this field is set nor a default schedule-to-start timeout was specified at registration time then a fault will be returned.", + "ScheduleActivityTaskDecisionAttributes$startToCloseTimeout": "

    If set, specifies the maximum duration a worker may take to process this activity task. This overrides the default start-to-close timeout specified when registering the activity type using RegisterActivityType.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    A start-to-close timeout for this activity task must be specified either as a default for the activity type or through this field. If neither this field is set nor a default start-to-close timeout was specified at registration time then a fault will be returned.", + "ScheduleActivityTaskDecisionAttributes$heartbeatTimeout": "

    If set, specifies the maximum time before which a worker processing a task of this type must report progress by calling RecordActivityTaskHeartbeat. If the timeout is exceeded, the activity task is automatically timed out. If the worker subsequently attempts to record a heartbeat or returns a result, it will be ignored. This overrides the default heartbeat timeout specified when registering the activity type using RegisterActivityType.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "ScheduleLambdaFunctionDecisionAttributes$startToCloseTimeout": "

    If set, specifies the maximum duration the function may take to execute.

    ", + "StartChildWorkflowExecutionDecisionAttributes$executionStartToCloseTimeout": "

    The total duration for this workflow execution. This overrides the defaultExecutionStartToCloseTimeout specified when registering the workflow type.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    An execution start-to-close timeout for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default execution start-to-close timeout was specified at registration time then a fault will be returned.", + "StartChildWorkflowExecutionDecisionAttributes$taskStartToCloseTimeout": "

    Specifies the maximum duration of decision tasks for this workflow execution. This parameter overrides the defaultTaskStartToCloseTimout specified when registering the workflow type using RegisterWorkflowType.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    A task start-to-close timeout for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default task start-to-close timeout was specified at registration time then a fault will be returned.", + "StartChildWorkflowExecutionInitiatedEventAttributes$executionStartToCloseTimeout": "

    The maximum duration for the child workflow execution. If the workflow execution is not closed within this duration, it will be timed out and force terminated.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "StartChildWorkflowExecutionInitiatedEventAttributes$taskStartToCloseTimeout": "

    The maximum duration allowed for the decision tasks for this workflow execution.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "StartWorkflowExecutionInput$executionStartToCloseTimeout": "

    The total duration for this workflow execution. This overrides the defaultExecutionStartToCloseTimeout specified when registering the workflow type.

    The duration is specified in seconds; an integer greater than or equal to 0. Exceeding this limit will cause the workflow execution to time out. Unlike some of the other timeout parameters in Amazon SWF, you cannot specify a value of \"NONE\" for this timeout; there is a one-year max limit on the time that a workflow execution can run.

    An execution start-to-close timeout must be specified either through this parameter or as a default when the workflow type is registered. If neither this parameter nor a default execution start-to-close timeout is specified, a fault is returned.", + "StartWorkflowExecutionInput$taskStartToCloseTimeout": "

    Specifies the maximum duration of decision tasks for this workflow execution. This parameter overrides the defaultTaskStartToCloseTimout specified when registering the workflow type using RegisterWorkflowType.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    A task start-to-close timeout for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default task start-to-close timeout was specified at registration time then a fault will be returned.", + "WorkflowExecutionContinuedAsNewEventAttributes$executionStartToCloseTimeout": "

    The total duration allowed for the new workflow execution.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "WorkflowExecutionContinuedAsNewEventAttributes$taskStartToCloseTimeout": "

    The maximum duration of decision tasks for the new workflow execution.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "WorkflowExecutionStartedEventAttributes$executionStartToCloseTimeout": "

    The maximum duration for this workflow execution.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "WorkflowExecutionStartedEventAttributes$taskStartToCloseTimeout": "

    The maximum duration of decision tasks for this workflow type.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "WorkflowTypeConfiguration$defaultTaskStartToCloseTimeout": "

    Optional. The default maximum duration, specified when registering the workflow type, that a decision task for executions of this workflow type might take before returning completion or failure. If the task does not close in the specified time then the task is automatically timed out and rescheduled. If the decider eventually reports a completion or failure, it is ignored. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    ", + "WorkflowTypeConfiguration$defaultExecutionStartToCloseTimeout": "

    Optional. The default maximum duration, specified when registering the workflow type, for executions of this workflow type. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.

    The duration is specified in seconds; an integer greater than or equal to 0. The value \"NONE\" can be used to specify unlimited duration.

    " + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "DefaultUndefinedFault$message": null, + "DomainAlreadyExistsFault$message": "

    A description that may help with diagnosing the cause of the fault.

    ", + "DomainDeprecatedFault$message": "

    A description that may help with diagnosing the cause of the fault.

    ", + "LimitExceededFault$message": "

    A description that may help with diagnosing the cause of the fault.

    ", + "OperationNotPermittedFault$message": "

    A description that may help with diagnosing the cause of the fault.

    ", + "TypeAlreadyExistsFault$message": "

    A description that may help with diagnosing the cause of the fault.

    ", + "TypeDeprecatedFault$message": "

    A description that may help with diagnosing the cause of the fault.

    ", + "UnknownResourceFault$message": "

    A description that may help with diagnosing the cause of the fault.

    ", + "WorkflowExecutionAlreadyStartedFault$message": "

    A description that may help with diagnosing the cause of the fault.

    " + } + }, + "EventId": { + "base": null, + "refs": { + "ActivityTask$startedEventId": "

    The ID of the ActivityTaskStarted event recorded in the history.

    ", + "ActivityTaskCancelRequestedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the RequestCancelActivityTask decision for this cancellation request. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ActivityTaskCanceledEventAttributes$scheduledEventId": "

    The ID of the ActivityTaskScheduled event that was recorded when this activity task was scheduled. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ActivityTaskCanceledEventAttributes$startedEventId": "

    The ID of the ActivityTaskStarted event recorded when this activity task was started. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ActivityTaskCanceledEventAttributes$latestCancelRequestedEventId": "

    If set, contains the ID of the last ActivityTaskCancelRequested event recorded for this activity task. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ActivityTaskCompletedEventAttributes$scheduledEventId": "

    The ID of the ActivityTaskScheduled event that was recorded when this activity task was scheduled. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ActivityTaskCompletedEventAttributes$startedEventId": "

    The ID of the ActivityTaskStarted event recorded when this activity task was started. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ActivityTaskFailedEventAttributes$scheduledEventId": "

    The ID of the ActivityTaskScheduled event that was recorded when this activity task was scheduled. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ActivityTaskFailedEventAttributes$startedEventId": "

    The ID of the ActivityTaskStarted event recorded when this activity task was started. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ActivityTaskScheduledEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision that resulted in the scheduling of this activity task. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ActivityTaskStartedEventAttributes$scheduledEventId": "

    The ID of the ActivityTaskScheduled event that was recorded when this activity task was scheduled. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ActivityTaskTimedOutEventAttributes$scheduledEventId": "

    The ID of the ActivityTaskScheduled event that was recorded when this activity task was scheduled. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ActivityTaskTimedOutEventAttributes$startedEventId": "

    The ID of the ActivityTaskStarted event recorded when this activity task was started. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "CancelTimerFailedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the CancelTimer decision to cancel this timer. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "CancelWorkflowExecutionFailedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the CancelWorkflowExecution decision for this cancellation request. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ChildWorkflowExecutionCanceledEventAttributes$initiatedEventId": "

    The ID of the StartChildWorkflowExecutionInitiated event corresponding to the StartChildWorkflowExecution decision to start this child workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ChildWorkflowExecutionCanceledEventAttributes$startedEventId": "

    The ID of the ChildWorkflowExecutionStarted event recorded when this child workflow execution was started. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ChildWorkflowExecutionCompletedEventAttributes$initiatedEventId": "

    The ID of the StartChildWorkflowExecutionInitiated event corresponding to the StartChildWorkflowExecution decision to start this child workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ChildWorkflowExecutionCompletedEventAttributes$startedEventId": "

    The ID of the ChildWorkflowExecutionStarted event recorded when this child workflow execution was started. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ChildWorkflowExecutionFailedEventAttributes$initiatedEventId": "

    The ID of the StartChildWorkflowExecutionInitiated event corresponding to the StartChildWorkflowExecution decision to start this child workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ChildWorkflowExecutionFailedEventAttributes$startedEventId": "

    The ID of the ChildWorkflowExecutionStarted event recorded when this child workflow execution was started. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ChildWorkflowExecutionStartedEventAttributes$initiatedEventId": "

    The ID of the StartChildWorkflowExecutionInitiated event corresponding to the StartChildWorkflowExecution decision to start this child workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ChildWorkflowExecutionTerminatedEventAttributes$initiatedEventId": "

    The ID of the StartChildWorkflowExecutionInitiated event corresponding to the StartChildWorkflowExecution decision to start this child workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ChildWorkflowExecutionTerminatedEventAttributes$startedEventId": "

    The ID of the ChildWorkflowExecutionStarted event recorded when this child workflow execution was started. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ChildWorkflowExecutionTimedOutEventAttributes$initiatedEventId": "

    The ID of the StartChildWorkflowExecutionInitiated event corresponding to the StartChildWorkflowExecution decision to start this child workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ChildWorkflowExecutionTimedOutEventAttributes$startedEventId": "

    The ID of the ChildWorkflowExecutionStarted event recorded when this child workflow execution was started. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "CompleteWorkflowExecutionFailedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the CompleteWorkflowExecution decision to complete this execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ContinueAsNewWorkflowExecutionFailedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the ContinueAsNewWorkflowExecution decision that started this execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "DecisionTask$startedEventId": "

    The ID of the DecisionTaskStarted event recorded in the history.

    ", + "DecisionTask$previousStartedEventId": "

    The ID of the DecisionTaskStarted event of the previous decision task of this workflow execution that was processed by the decider. This can be used to determine the events in the history new since the last decision task received by the decider.

    ", + "DecisionTaskCompletedEventAttributes$scheduledEventId": "

    The ID of the DecisionTaskScheduled event that was recorded when this decision task was scheduled. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "DecisionTaskCompletedEventAttributes$startedEventId": "

    The ID of the DecisionTaskStarted event recorded when this decision task was started. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "DecisionTaskStartedEventAttributes$scheduledEventId": "

    The ID of the DecisionTaskScheduled event that was recorded when this decision task was scheduled. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "DecisionTaskTimedOutEventAttributes$scheduledEventId": "

    The ID of the DecisionTaskScheduled event that was recorded when this decision task was scheduled. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "DecisionTaskTimedOutEventAttributes$startedEventId": "

    The ID of the DecisionTaskStarted event recorded when this decision task was started. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ExternalWorkflowExecutionCancelRequestedEventAttributes$initiatedEventId": "

    The ID of the RequestCancelExternalWorkflowExecutionInitiated event corresponding to the RequestCancelExternalWorkflowExecution decision to cancel this external workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ExternalWorkflowExecutionSignaledEventAttributes$initiatedEventId": "

    The ID of the SignalExternalWorkflowExecutionInitiated event corresponding to the SignalExternalWorkflowExecution decision to request this signal. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "FailWorkflowExecutionFailedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the FailWorkflowExecution decision to fail this execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "HistoryEvent$eventId": "

    The system generated ID of the event. This ID uniquely identifies the event with in the workflow execution history.

    ", + "LambdaFunctionCompletedEventAttributes$scheduledEventId": "

    The ID of the LambdaFunctionScheduled event that was recorded when this AWS Lambda function was scheduled. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "LambdaFunctionCompletedEventAttributes$startedEventId": "

    The ID of the LambdaFunctionStarted event recorded in the history.

    ", + "LambdaFunctionFailedEventAttributes$scheduledEventId": "

    The ID of the LambdaFunctionScheduled event that was recorded when this AWS Lambda function was scheduled. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "LambdaFunctionFailedEventAttributes$startedEventId": "

    The ID of the LambdaFunctionStarted event recorded in the history.

    ", + "LambdaFunctionScheduledEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event for the decision that resulted in the scheduling of this AWS Lambda function. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "LambdaFunctionStartedEventAttributes$scheduledEventId": "

    The ID of the LambdaFunctionScheduled event that was recorded when this AWS Lambda function was scheduled. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "LambdaFunctionTimedOutEventAttributes$scheduledEventId": "

    The ID of the LambdaFunctionScheduled event that was recorded when this AWS Lambda function was scheduled. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "LambdaFunctionTimedOutEventAttributes$startedEventId": "

    The ID of the LambdaFunctionStarted event recorded in the history.

    ", + "MarkerRecordedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the RecordMarker decision that requested this marker. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "RecordMarkerFailedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the RecordMarkerFailed decision for this cancellation request. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "RequestCancelActivityTaskFailedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the RequestCancelActivityTask decision for this cancellation request. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "RequestCancelExternalWorkflowExecutionFailedEventAttributes$initiatedEventId": "

    The ID of the RequestCancelExternalWorkflowExecutionInitiated event corresponding to the RequestCancelExternalWorkflowExecution decision to cancel this external workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "RequestCancelExternalWorkflowExecutionFailedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the RequestCancelExternalWorkflowExecution decision for this cancellation request. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "RequestCancelExternalWorkflowExecutionInitiatedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the RequestCancelExternalWorkflowExecution decision for this cancellation request. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ScheduleActivityTaskFailedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision that resulted in the scheduling of this activity task. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "ScheduleLambdaFunctionFailedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision that resulted in the scheduling of this AWS Lambda function. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "SignalExternalWorkflowExecutionFailedEventAttributes$initiatedEventId": "

    The ID of the SignalExternalWorkflowExecutionInitiated event corresponding to the SignalExternalWorkflowExecution decision to request this signal. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "SignalExternalWorkflowExecutionFailedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the SignalExternalWorkflowExecution decision for this signal. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "SignalExternalWorkflowExecutionInitiatedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the SignalExternalWorkflowExecution decision for this signal. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "StartChildWorkflowExecutionFailedEventAttributes$initiatedEventId": "

    The ID of the StartChildWorkflowExecutionInitiated event corresponding to the StartChildWorkflowExecution decision to start this child workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "StartChildWorkflowExecutionFailedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the StartChildWorkflowExecution decision to request this child workflow execution. This information can be useful for diagnosing problems by tracing back the cause of events.

    ", + "StartChildWorkflowExecutionInitiatedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the StartChildWorkflowExecution decision to request this child workflow execution. This information can be useful for diagnosing problems by tracing back the cause of events.

    ", + "StartLambdaFunctionFailedEventAttributes$scheduledEventId": "

    The ID of the LambdaFunctionScheduled event that was recorded when this AWS Lambda function was scheduled. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "StartTimerFailedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the StartTimer decision for this activity task. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "TimerCanceledEventAttributes$startedEventId": "

    The ID of the TimerStarted event that was recorded when this timer was started. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "TimerCanceledEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the CancelTimer decision to cancel this timer. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "TimerFiredEventAttributes$startedEventId": "

    The ID of the TimerStarted event that was recorded when this timer was started. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "TimerStartedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the StartTimer decision for this activity task. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "WorkflowExecutionCancelRequestedEventAttributes$externalInitiatedEventId": "

    The ID of the RequestCancelExternalWorkflowExecutionInitiated event corresponding to the RequestCancelExternalWorkflowExecution decision to cancel this workflow execution.The source event with this ID can be found in the history of the source workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "WorkflowExecutionCanceledEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the CancelWorkflowExecution decision for this cancellation request. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "WorkflowExecutionCompletedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the CompleteWorkflowExecution decision to complete this execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "WorkflowExecutionContinuedAsNewEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the ContinueAsNewWorkflowExecution decision that started this execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "WorkflowExecutionFailedEventAttributes$decisionTaskCompletedEventId": "

    The ID of the DecisionTaskCompleted event corresponding to the decision task that resulted in the FailWorkflowExecution decision to fail this execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    ", + "WorkflowExecutionSignaledEventAttributes$externalInitiatedEventId": "

    The ID of the SignalExternalWorkflowExecutionInitiated event corresponding to the SignalExternalWorkflow decision to signal this workflow execution.The source event with this ID can be found in the history of the source workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event. This field is set only if the signal was initiated by another workflow execution.

    ", + "WorkflowExecutionStartedEventAttributes$parentInitiatedEventId": "

    The ID of the StartChildWorkflowExecutionInitiated event corresponding to the StartChildWorkflowExecution decision to start this workflow execution. The source event with this ID can be found in the history of the source workflow execution. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    " + } + }, + "EventType": { + "base": null, + "refs": { + "HistoryEvent$eventType": "

    The type of the history event.

    " + } + }, + "ExecutionStatus": { + "base": null, + "refs": { + "WorkflowExecutionInfo$executionStatus": "

    The current status of the execution.

    " + } + }, + "ExecutionTimeFilter": { + "base": "

    Used to filter the workflow executions in visibility APIs by various time-based rules. Each parameter, if specified, defines a rule that must be satisfied by each returned query result. The parameter values are in the Unix Time format. For example: \"oldestDate\": 1325376070.

    ", + "refs": { + "CountClosedWorkflowExecutionsInput$startTimeFilter": "

    If specified, only workflow executions that meet the start time criteria of the filter are counted.

    startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both.", + "CountClosedWorkflowExecutionsInput$closeTimeFilter": "

    If specified, only workflow executions that meet the close time criteria of the filter are counted.

    startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both.", + "CountOpenWorkflowExecutionsInput$startTimeFilter": "

    Specifies the start time criteria that workflow executions must meet in order to be counted.

    ", + "ListClosedWorkflowExecutionsInput$startTimeFilter": "

    If specified, the workflow executions are included in the returned results based on whether their start times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their start times.

    startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both.", + "ListClosedWorkflowExecutionsInput$closeTimeFilter": "

    If specified, the workflow executions are included in the returned results based on whether their close times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their close times.

    startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both.", + "ListOpenWorkflowExecutionsInput$startTimeFilter": "

    Workflow executions are included in the returned results based on whether their start times are within the range specified by this filter.

    " + } + }, + "ExternalWorkflowExecutionCancelRequestedEventAttributes": { + "base": "

    Provides details of the ExternalWorkflowExecutionCancelRequested event.

    ", + "refs": { + "HistoryEvent$externalWorkflowExecutionCancelRequestedEventAttributes": "

    If the event is of type ExternalWorkflowExecutionCancelRequested then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ExternalWorkflowExecutionSignaledEventAttributes": { + "base": "

    Provides details of the ExternalWorkflowExecutionSignaled event.

    ", + "refs": { + "HistoryEvent$externalWorkflowExecutionSignaledEventAttributes": "

    If the event is of type ExternalWorkflowExecutionSignaled then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "FailWorkflowExecutionDecisionAttributes": { + "base": "

    Provides details of the FailWorkflowExecution decision.

    Access Control

    You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "refs": { + "Decision$failWorkflowExecutionDecisionAttributes": "

    Provides details of the FailWorkflowExecution decision. It is not set for other decision types.

    " + } + }, + "FailWorkflowExecutionFailedCause": { + "base": null, + "refs": { + "FailWorkflowExecutionFailedEventAttributes$cause": "

    The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

    If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows." + } + }, + "FailWorkflowExecutionFailedEventAttributes": { + "base": "

    Provides details of the FailWorkflowExecutionFailed event.

    ", + "refs": { + "HistoryEvent$failWorkflowExecutionFailedEventAttributes": "

    If the event is of type FailWorkflowExecutionFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "FailureReason": { + "base": null, + "refs": { + "ActivityTaskFailedEventAttributes$reason": "

    The reason provided for the failure (if any).

    ", + "ChildWorkflowExecutionFailedEventAttributes$reason": "

    The reason for the failure (if provided).

    ", + "FailWorkflowExecutionDecisionAttributes$reason": "

    A descriptive reason for the failure that may help in diagnostics.

    ", + "LambdaFunctionFailedEventAttributes$reason": "

    The reason provided for the failure (if any).

    ", + "RespondActivityTaskFailedInput$reason": "

    Description of the error that may assist in diagnostics.

    ", + "WorkflowExecutionFailedEventAttributes$reason": "

    The descriptive reason provided for the failure (if any).

    " + } + }, + "FunctionId": { + "base": null, + "refs": { + "LambdaFunctionScheduledEventAttributes$id": "

    The unique Amazon SWF ID for the AWS Lambda task.

    ", + "ScheduleLambdaFunctionDecisionAttributes$id": "

    Required. The SWF id of the AWS Lambda task.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "ScheduleLambdaFunctionFailedEventAttributes$id": "

    The unique Amazon SWF ID of the AWS Lambda task.

    " + } + }, + "FunctionInput": { + "base": null, + "refs": { + "LambdaFunctionScheduledEventAttributes$input": "

    Input provided to the AWS Lambda function.

    ", + "ScheduleLambdaFunctionDecisionAttributes$input": "

    The input provided to the AWS Lambda function.

    " + } + }, + "FunctionName": { + "base": null, + "refs": { + "LambdaFunctionScheduledEventAttributes$name": "

    The name of the scheduled AWS Lambda function.

    ", + "ScheduleLambdaFunctionDecisionAttributes$name": "

    Required. The name of the AWS Lambda function to invoke.

    ", + "ScheduleLambdaFunctionFailedEventAttributes$name": "

    The name of the scheduled AWS Lambda function.

    " + } + }, + "GetWorkflowExecutionHistoryInput": { + "base": null, + "refs": { + } + }, + "History": { + "base": "

    Paginated representation of a workflow history for a workflow execution. This is the up to date, complete and authoritative record of the events related to all tasks and events in the life of the workflow execution.

    ", + "refs": { + } + }, + "HistoryEvent": { + "base": "

    Event within a workflow execution. A history event can be one of these types:

    • WorkflowExecutionStarted: The workflow execution was started.
    • WorkflowExecutionCompleted: The workflow execution was closed due to successful completion.
    • WorkflowExecutionFailed: The workflow execution closed due to a failure.
    • WorkflowExecutionTimedOut: The workflow execution was closed because a time out was exceeded.
    • WorkflowExecutionCanceled: The workflow execution was successfully canceled and closed.
    • WorkflowExecutionTerminated: The workflow execution was terminated.
    • WorkflowExecutionContinuedAsNew: The workflow execution was closed and a new execution of the same type was created with the same workflowId.
    • WorkflowExecutionCancelRequested: A request to cancel this workflow execution was made.
    • DecisionTaskScheduled: A decision task was scheduled for the workflow execution.
    • DecisionTaskStarted: The decision task was dispatched to a decider.
    • DecisionTaskCompleted: The decider successfully completed a decision task by calling RespondDecisionTaskCompleted.
    • DecisionTaskTimedOut: The decision task timed out.
    • ActivityTaskScheduled: An activity task was scheduled for execution.
    • ScheduleActivityTaskFailed: Failed to process ScheduleActivityTask decision. This happens when the decision is not configured properly, for example the activity type specified is not registered.
    • ActivityTaskStarted: The scheduled activity task was dispatched to a worker.
    • ActivityTaskCompleted: An activity worker successfully completed an activity task by calling RespondActivityTaskCompleted.
    • ActivityTaskFailed: An activity worker failed an activity task by calling RespondActivityTaskFailed.
    • ActivityTaskTimedOut: The activity task timed out.
    • ActivityTaskCanceled: The activity task was successfully canceled.
    • ActivityTaskCancelRequested: A RequestCancelActivityTask decision was received by the system.
    • RequestCancelActivityTaskFailed: Failed to process RequestCancelActivityTask decision. This happens when the decision is not configured properly.
    • WorkflowExecutionSignaled: An external signal was received for the workflow execution.
    • MarkerRecorded: A marker was recorded in the workflow history as the result of a RecordMarker decision.
    • TimerStarted: A timer was started for the workflow execution due to a StartTimer decision.
    • StartTimerFailed: Failed to process StartTimer decision. This happens when the decision is not configured properly, for example a timer already exists with the specified timer ID.
    • TimerFired: A timer, previously started for this workflow execution, fired.
    • TimerCanceled: A timer, previously started for this workflow execution, was successfully canceled.
    • CancelTimerFailed: Failed to process CancelTimer decision. This happens when the decision is not configured properly, for example no timer exists with the specified timer ID.
    • StartChildWorkflowExecutionInitiated: A request was made to start a child workflow execution.
    • StartChildWorkflowExecutionFailed: Failed to process StartChildWorkflowExecution decision. This happens when the decision is not configured properly, for example the workflow type specified is not registered.
    • ChildWorkflowExecutionStarted: A child workflow execution was successfully started.
    • ChildWorkflowExecutionCompleted: A child workflow execution, started by this workflow execution, completed successfully and was closed.
    • ChildWorkflowExecutionFailed: A child workflow execution, started by this workflow execution, failed to complete successfully and was closed.
    • ChildWorkflowExecutionTimedOut: A child workflow execution, started by this workflow execution, timed out and was closed.
    • ChildWorkflowExecutionCanceled: A child workflow execution, started by this workflow execution, was canceled and closed.
    • ChildWorkflowExecutionTerminated: A child workflow execution, started by this workflow execution, was terminated.
    • SignalExternalWorkflowExecutionInitiated: A request to signal an external workflow was made.
    • ExternalWorkflowExecutionSignaled: A signal, requested by this workflow execution, was successfully delivered to the target external workflow execution.
    • SignalExternalWorkflowExecutionFailed: The request to signal an external workflow execution failed.
    • RequestCancelExternalWorkflowExecutionInitiated: A request was made to request the cancellation of an external workflow execution.
    • ExternalWorkflowExecutionCancelRequested: Request to cancel an external workflow execution was successfully delivered to the target execution.
    • RequestCancelExternalWorkflowExecutionFailed: Request to cancel an external workflow execution failed.
    • LambdaFunctionScheduled: An AWS Lambda function was scheduled for execution.
    • LambdaFunctionStarted: The scheduled function was invoked in the AWS Lambda service.
    • LambdaFunctionCompleted: The AWS Lambda function successfully completed.
    • LambdaFunctionFailed: The AWS Lambda function execution failed.
    • LambdaFunctionTimedOut: The AWS Lambda function execution timed out.
    • ScheduleLambdaFunctionFailed: Failed to process ScheduleLambdaFunction decision. This happens when the workflow execution does not have the proper IAM role attached to invoke AWS Lambda functions.
    • StartLambdaFunctionFailed: Failed to invoke the scheduled function in the AWS Lambda service. This happens when the AWS Lambda service is not available in the current region, or received too many requests.
    ", + "refs": { + "HistoryEventList$member": null + } + }, + "HistoryEventList": { + "base": null, + "refs": { + "DecisionTask$events": "

    A paginated list of history events of the workflow execution. The decider uses this during the processing of the decision task.

    ", + "History$events": "

    The list of history events.

    " + } + }, + "Identity": { + "base": null, + "refs": { + "ActivityTaskStartedEventAttributes$identity": "

    Identity of the worker that was assigned this task. This aids diagnostics when problems arise. The form of this identity is user defined.

    ", + "DecisionTaskStartedEventAttributes$identity": "

    Identity of the decider making the request. This enables diagnostic tracing when problems arise. The form of this identity is user defined.

    ", + "PollForActivityTaskInput$identity": "

    Identity of the worker making the request, recorded in the ActivityTaskStarted event in the workflow history. This enables diagnostic tracing when problems arise. The form of this identity is user defined.

    ", + "PollForDecisionTaskInput$identity": "

    Identity of the decider making the request, which is recorded in the DecisionTaskStarted event in the workflow history. This enables diagnostic tracing when problems arise. The form of this identity is user defined.

    " + } + }, + "LambdaFunctionCompletedEventAttributes": { + "base": "

    Provides details for the LambdaFunctionCompleted event.

    ", + "refs": { + "HistoryEvent$lambdaFunctionCompletedEventAttributes": null + } + }, + "LambdaFunctionFailedEventAttributes": { + "base": "

    Provides details for the LambdaFunctionFailed event.

    ", + "refs": { + "HistoryEvent$lambdaFunctionFailedEventAttributes": null + } + }, + "LambdaFunctionScheduledEventAttributes": { + "base": "

    Provides details for the LambdaFunctionScheduled event.

    ", + "refs": { + "HistoryEvent$lambdaFunctionScheduledEventAttributes": null + } + }, + "LambdaFunctionStartedEventAttributes": { + "base": "

    Provides details for the LambdaFunctionStarted event.

    ", + "refs": { + "HistoryEvent$lambdaFunctionStartedEventAttributes": null + } + }, + "LambdaFunctionTimedOutEventAttributes": { + "base": "

    Provides details for the LambdaFunctionTimedOut event.

    ", + "refs": { + "HistoryEvent$lambdaFunctionTimedOutEventAttributes": null + } + }, + "LambdaFunctionTimeoutType": { + "base": null, + "refs": { + "LambdaFunctionTimedOutEventAttributes$timeoutType": "

    The type of the timeout that caused this event.

    " + } + }, + "LimitExceededFault": { + "base": "

    Returned by any operation if a system imposed limitation has been reached. To address this fault you should either clean up unused resources or increase the limit by contacting AWS.

    ", + "refs": { + } + }, + "LimitedData": { + "base": null, + "refs": { + "ActivityTaskTimedOutEventAttributes$details": "

    Contains the content of the details parameter for the last call made by the activity to RecordActivityTaskHeartbeat.

    ", + "RecordActivityTaskHeartbeatInput$details": "

    If specified, contains details about the progress of the task.

    " + } + }, + "ListActivityTypesInput": { + "base": null, + "refs": { + } + }, + "ListClosedWorkflowExecutionsInput": { + "base": null, + "refs": { + } + }, + "ListDomainsInput": { + "base": null, + "refs": { + } + }, + "ListOpenWorkflowExecutionsInput": { + "base": null, + "refs": { + } + }, + "ListWorkflowTypesInput": { + "base": null, + "refs": { + } + }, + "MarkerName": { + "base": null, + "refs": { + "MarkerRecordedEventAttributes$markerName": "

    The name of the marker.

    ", + "RecordMarkerDecisionAttributes$markerName": "

    Required. The name of the marker.

    ", + "RecordMarkerFailedEventAttributes$markerName": "

    The marker's name.

    " + } + }, + "MarkerRecordedEventAttributes": { + "base": "

    Provides details of the MarkerRecorded event.

    ", + "refs": { + "HistoryEvent$markerRecordedEventAttributes": "

    If the event is of type MarkerRecorded then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "Name": { + "base": null, + "refs": { + "ActivityType$name": "

    The name of this activity.

    The combination of activity type name and version must be unique within a domain.", + "ListActivityTypesInput$name": "

    If specified, only lists the activity types that have this name.

    ", + "ListWorkflowTypesInput$name": "

    If specified, lists the workflow type with this name.

    ", + "RegisterActivityTypeInput$name": "

    The name of the activity type within the domain.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "RegisterWorkflowTypeInput$name": "

    The name of the workflow type.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "TaskList$name": "

    The name of the task list.

    ", + "WorkflowType$name": "

    Required. The name of the workflow type.

    The combination of workflow type name and version must be unique with in a domain.", + "WorkflowTypeFilter$name": "

    Required. Name of the workflow type.

    " + } + }, + "OpenDecisionTasksCount": { + "base": null, + "refs": { + "WorkflowExecutionOpenCounts$openDecisionTasks": "

    The count of decision tasks whose status is OPEN. A workflow execution can have at most one open decision task.

    " + } + }, + "OperationNotPermittedFault": { + "base": "

    Returned when the caller does not have sufficient permissions to invoke the action.

    ", + "refs": { + } + }, + "PageSize": { + "base": null, + "refs": { + "GetWorkflowExecutionHistoryInput$maximumPageSize": "

    The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum.

    This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.

    ", + "ListActivityTypesInput$maximumPageSize": "

    The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum.

    This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.

    ", + "ListClosedWorkflowExecutionsInput$maximumPageSize": "

    The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum.

    This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.

    ", + "ListDomainsInput$maximumPageSize": "

    The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum.

    This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.

    ", + "ListOpenWorkflowExecutionsInput$maximumPageSize": "

    The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum.

    This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.

    ", + "ListWorkflowTypesInput$maximumPageSize": "

    The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum.

    This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.

    ", + "PollForDecisionTaskInput$maximumPageSize": "

    The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum.

    This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.

    " + } + }, + "PageToken": { + "base": null, + "refs": { + "ActivityTypeInfos$nextPageToken": "

    If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

    The configured maximumPageSize determines how many results can be returned in a single call.

    ", + "DecisionTask$nextPageToken": "

    If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

    The configured maximumPageSize determines how many results can be returned in a single call.

    ", + "DomainInfos$nextPageToken": "

    If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

    The configured maximumPageSize determines how many results can be returned in a single call.

    ", + "GetWorkflowExecutionHistoryInput$nextPageToken": "

    If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

    The configured maximumPageSize determines how many results can be returned in a single call.

    ", + "History$nextPageToken": "

    If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

    The configured maximumPageSize determines how many results can be returned in a single call.

    ", + "ListActivityTypesInput$nextPageToken": "

    If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

    The configured maximumPageSize determines how many results can be returned in a single call.

    ", + "ListClosedWorkflowExecutionsInput$nextPageToken": "

    If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

    The configured maximumPageSize determines how many results can be returned in a single call.

    ", + "ListDomainsInput$nextPageToken": "

    If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

    The configured maximumPageSize determines how many results can be returned in a single call.

    ", + "ListOpenWorkflowExecutionsInput$nextPageToken": "

    If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

    The configured maximumPageSize determines how many results can be returned in a single call.

    ", + "ListWorkflowTypesInput$nextPageToken": "

    If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

    The configured maximumPageSize determines how many results can be returned in a single call.

    ", + "PollForDecisionTaskInput$nextPageToken": "

    If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

    The configured maximumPageSize determines how many results can be returned in a single call.

    The nextPageToken returned by this action cannot be used with GetWorkflowExecutionHistory to get the next page. You must call PollForDecisionTask again (with the nextPageToken) to retrieve the next page of history records. Calling PollForDecisionTask with a nextPageToken will not return a new decision task..", + "WorkflowExecutionInfos$nextPageToken": "

    If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

    The configured maximumPageSize determines how many results can be returned in a single call.

    ", + "WorkflowTypeInfos$nextPageToken": "

    If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken. Keep all other arguments unchanged.

    The configured maximumPageSize determines how many results can be returned in a single call.

    " + } + }, + "PendingTaskCount": { + "base": "

    Contains the count of tasks in a task list.

    ", + "refs": { + } + }, + "PollForActivityTaskInput": { + "base": null, + "refs": { + } + }, + "PollForDecisionTaskInput": { + "base": null, + "refs": { + } + }, + "RecordActivityTaskHeartbeatInput": { + "base": null, + "refs": { + } + }, + "RecordMarkerDecisionAttributes": { + "base": "

    Provides details of the RecordMarker decision.

    Access Control

    You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "refs": { + "Decision$recordMarkerDecisionAttributes": "

    Provides details of the RecordMarker decision. It is not set for other decision types.

    " + } + }, + "RecordMarkerFailedCause": { + "base": null, + "refs": { + "RecordMarkerFailedEventAttributes$cause": "

    The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

    If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows." + } + }, + "RecordMarkerFailedEventAttributes": { + "base": "

    Provides details of the RecordMarkerFailed event.

    ", + "refs": { + "HistoryEvent$recordMarkerFailedEventAttributes": "

    If the event is of type DecisionTaskFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "RegisterActivityTypeInput": { + "base": null, + "refs": { + } + }, + "RegisterDomainInput": { + "base": null, + "refs": { + } + }, + "RegisterWorkflowTypeInput": { + "base": null, + "refs": { + } + }, + "RegistrationStatus": { + "base": null, + "refs": { + "ActivityTypeInfo$status": "

    The current status of the activity type.

    ", + "DomainInfo$status": "

    The status of the domain:

    • REGISTERED: The domain is properly registered and available. You can use this domain for registering types and creating new workflow executions.
    • DEPRECATED: The domain was deprecated using DeprecateDomain, but is still in use. You should not create new workflow executions in this domain.
    ", + "ListActivityTypesInput$registrationStatus": "

    Specifies the registration status of the activity types to list.

    ", + "ListDomainsInput$registrationStatus": "

    Specifies the registration status of the domains to list.

    ", + "ListWorkflowTypesInput$registrationStatus": "

    Specifies the registration status of the workflow types to list.

    ", + "WorkflowTypeInfo$status": "

    The current status of the workflow type.

    " + } + }, + "RequestCancelActivityTaskDecisionAttributes": { + "base": "

    Provides details of the RequestCancelActivityTask decision.

    Access Control

    You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "refs": { + "Decision$requestCancelActivityTaskDecisionAttributes": "

    Provides details of the RequestCancelActivityTask decision. It is not set for other decision types.

    " + } + }, + "RequestCancelActivityTaskFailedCause": { + "base": null, + "refs": { + "RequestCancelActivityTaskFailedEventAttributes$cause": "

    The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

    If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows." + } + }, + "RequestCancelActivityTaskFailedEventAttributes": { + "base": "

    Provides details of the RequestCancelActivityTaskFailed event.

    ", + "refs": { + "HistoryEvent$requestCancelActivityTaskFailedEventAttributes": "

    If the event is of type RequestCancelActivityTaskFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "RequestCancelExternalWorkflowExecutionDecisionAttributes": { + "base": "

    Provides details of the RequestCancelExternalWorkflowExecution decision.

    Access Control

    You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "refs": { + "Decision$requestCancelExternalWorkflowExecutionDecisionAttributes": "

    Provides details of the RequestCancelExternalWorkflowExecution decision. It is not set for other decision types.

    " + } + }, + "RequestCancelExternalWorkflowExecutionFailedCause": { + "base": null, + "refs": { + "RequestCancelExternalWorkflowExecutionFailedEventAttributes$cause": "

    The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

    If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows." + } + }, + "RequestCancelExternalWorkflowExecutionFailedEventAttributes": { + "base": "

    Provides details of the RequestCancelExternalWorkflowExecutionFailed event.

    ", + "refs": { + "HistoryEvent$requestCancelExternalWorkflowExecutionFailedEventAttributes": "

    If the event is of type RequestCancelExternalWorkflowExecutionFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "RequestCancelExternalWorkflowExecutionInitiatedEventAttributes": { + "base": "

    Provides details of the RequestCancelExternalWorkflowExecutionInitiated event.

    ", + "refs": { + "HistoryEvent$requestCancelExternalWorkflowExecutionInitiatedEventAttributes": "

    If the event is of type RequestCancelExternalWorkflowExecutionInitiated then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "RequestCancelWorkflowExecutionInput": { + "base": null, + "refs": { + } + }, + "RespondActivityTaskCanceledInput": { + "base": null, + "refs": { + } + }, + "RespondActivityTaskCompletedInput": { + "base": null, + "refs": { + } + }, + "RespondActivityTaskFailedInput": { + "base": null, + "refs": { + } + }, + "RespondDecisionTaskCompletedInput": { + "base": null, + "refs": { + } + }, + "ReverseOrder": { + "base": null, + "refs": { + "GetWorkflowExecutionHistoryInput$reverseOrder": "

    When set to true, returns the events in reverse order. By default the results are returned in ascending order of the eventTimeStamp of the events.

    ", + "ListActivityTypesInput$reverseOrder": "

    When set to true, returns the results in reverse order. By default, the results are returned in ascending alphabetical order by name of the activity types.

    ", + "ListClosedWorkflowExecutionsInput$reverseOrder": "

    When set to true, returns the results in reverse order. By default the results are returned in descending order of the start or the close time of the executions.

    ", + "ListDomainsInput$reverseOrder": "

    When set to true, returns the results in reverse order. By default, the results are returned in ascending alphabetical order by name of the domains.

    ", + "ListOpenWorkflowExecutionsInput$reverseOrder": "

    When set to true, returns the results in reverse order. By default the results are returned in descending order of the start time of the executions.

    ", + "ListWorkflowTypesInput$reverseOrder": "

    When set to true, returns the results in reverse order. By default the results are returned in ascending alphabetical order of the name of the workflow types.

    ", + "PollForDecisionTaskInput$reverseOrder": "

    When set to true, returns the events in reverse order. By default the results are returned in ascending order of the eventTimestamp of the events.

    " + } + }, + "Run": { + "base": "

    Specifies the runId of a workflow execution.

    ", + "refs": { + } + }, + "RunId": { + "base": null, + "refs": { + "Run$runId": "

    The runId of a workflow execution. This ID is generated by the service and can be used to uniquely identify the workflow execution within a domain.

    ", + "WorkflowExecution$runId": "

    A system-generated unique identifier for the workflow execution.

    ", + "WorkflowExecutionContinuedAsNewEventAttributes$newExecutionRunId": "

    The runId of the new workflow execution.

    " + } + }, + "RunIdOptional": { + "base": null, + "refs": { + "RequestCancelExternalWorkflowExecutionDecisionAttributes$runId": "

    The runId of the external workflow execution to cancel.

    ", + "RequestCancelExternalWorkflowExecutionFailedEventAttributes$runId": "

    The runId of the external workflow execution.

    ", + "RequestCancelExternalWorkflowExecutionInitiatedEventAttributes$runId": "

    The runId of the external workflow execution to be canceled.

    ", + "RequestCancelWorkflowExecutionInput$runId": "

    The runId of the workflow execution to cancel.

    ", + "SignalExternalWorkflowExecutionDecisionAttributes$runId": "

    The runId of the workflow execution to be signaled.

    ", + "SignalExternalWorkflowExecutionFailedEventAttributes$runId": "

    The runId of the external workflow execution that the signal was being delivered to.

    ", + "SignalExternalWorkflowExecutionInitiatedEventAttributes$runId": "

    The runId of the external workflow execution to send the signal to.

    ", + "SignalWorkflowExecutionInput$runId": "

    The runId of the workflow execution to signal.

    ", + "TerminateWorkflowExecutionInput$runId": "

    The runId of the workflow execution to terminate.

    ", + "WorkflowExecutionStartedEventAttributes$continuedExecutionRunId": "

    If this workflow execution was started due to a ContinueAsNewWorkflowExecution decision, then it contains the runId of the previous workflow execution that was closed and continued as this execution.

    " + } + }, + "ScheduleActivityTaskDecisionAttributes": { + "base": "

    Provides details of the ScheduleActivityTask decision.

    Access Control

    You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • activityType.name: String constraint. The key is swf:activityType.name.
      • activityType.version: String constraint. The key is swf:activityType.version.
      • taskList: String constraint. The key is swf:taskList.name.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "refs": { + "Decision$scheduleActivityTaskDecisionAttributes": "

    Provides details of the ScheduleActivityTask decision. It is not set for other decision types.

    " + } + }, + "ScheduleActivityTaskFailedCause": { + "base": null, + "refs": { + "ScheduleActivityTaskFailedEventAttributes$cause": "

    The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

    If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows." + } + }, + "ScheduleActivityTaskFailedEventAttributes": { + "base": "

    Provides details of the ScheduleActivityTaskFailed event.

    ", + "refs": { + "HistoryEvent$scheduleActivityTaskFailedEventAttributes": "

    If the event is of type ScheduleActivityTaskFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "ScheduleLambdaFunctionDecisionAttributes": { + "base": "

    Provides details of the ScheduleLambdaFunction decision.

    Access Control

    You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • activityType.name: String constraint. The key is swf:activityType.name.
      • activityType.version: String constraint. The key is swf:activityType.version.
      • taskList: String constraint. The key is swf:taskList.name.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "refs": { + "Decision$scheduleLambdaFunctionDecisionAttributes": null + } + }, + "ScheduleLambdaFunctionFailedCause": { + "base": null, + "refs": { + "ScheduleLambdaFunctionFailedEventAttributes$cause": "

    The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

    If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows." + } + }, + "ScheduleLambdaFunctionFailedEventAttributes": { + "base": "

    Provides details for the ScheduleLambdaFunctionFailed event.

    ", + "refs": { + "HistoryEvent$scheduleLambdaFunctionFailedEventAttributes": null + } + }, + "SignalExternalWorkflowExecutionDecisionAttributes": { + "base": "

    Provides details of the SignalExternalWorkflowExecution decision.

    Access Control

    You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "refs": { + "Decision$signalExternalWorkflowExecutionDecisionAttributes": "

    Provides details of the SignalExternalWorkflowExecution decision. It is not set for other decision types.

    " + } + }, + "SignalExternalWorkflowExecutionFailedCause": { + "base": null, + "refs": { + "SignalExternalWorkflowExecutionFailedEventAttributes$cause": "

    The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

    If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows." + } + }, + "SignalExternalWorkflowExecutionFailedEventAttributes": { + "base": "

    Provides details of the SignalExternalWorkflowExecutionFailed event.

    ", + "refs": { + "HistoryEvent$signalExternalWorkflowExecutionFailedEventAttributes": "

    If the event is of type SignalExternalWorkflowExecutionFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "SignalExternalWorkflowExecutionInitiatedEventAttributes": { + "base": "

    Provides details of the SignalExternalWorkflowExecutionInitiated event.

    ", + "refs": { + "HistoryEvent$signalExternalWorkflowExecutionInitiatedEventAttributes": "

    If the event is of type SignalExternalWorkflowExecutionInitiated then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "SignalName": { + "base": null, + "refs": { + "SignalExternalWorkflowExecutionDecisionAttributes$signalName": "

    Required. The name of the signal.The target workflow execution will use the signal name and input to process the signal.

    ", + "SignalExternalWorkflowExecutionInitiatedEventAttributes$signalName": "

    The name of the signal.

    ", + "SignalWorkflowExecutionInput$signalName": "

    The name of the signal. This name must be meaningful to the target workflow.

    ", + "WorkflowExecutionSignaledEventAttributes$signalName": "

    The name of the signal received. The decider can use the signal name and inputs to determine how to the process the signal.

    " + } + }, + "SignalWorkflowExecutionInput": { + "base": null, + "refs": { + } + }, + "StartChildWorkflowExecutionDecisionAttributes": { + "base": "

    Provides details of the StartChildWorkflowExecution decision.

    Access Control

    You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • Constrain the following parameters by using a Condition element with the appropriate keys.
      • tagList.member.N: The key is \"swf:tagList.N\" where N is the tag number from 0 to 4, inclusive.
      • taskList: String constraint. The key is swf:taskList.name.
      • workflowType.name: String constraint. The key is swf:workflowType.name.
      • workflowType.version: String constraint. The key is swf:workflowType.version.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "refs": { + "Decision$startChildWorkflowExecutionDecisionAttributes": "

    Provides details of the StartChildWorkflowExecution decision. It is not set for other decision types.

    " + } + }, + "StartChildWorkflowExecutionFailedCause": { + "base": null, + "refs": { + "StartChildWorkflowExecutionFailedEventAttributes$cause": "

    The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

    If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows." + } + }, + "StartChildWorkflowExecutionFailedEventAttributes": { + "base": "

    Provides details of the StartChildWorkflowExecutionFailed event.

    ", + "refs": { + "HistoryEvent$startChildWorkflowExecutionFailedEventAttributes": "

    If the event is of type StartChildWorkflowExecutionFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "StartChildWorkflowExecutionInitiatedEventAttributes": { + "base": "

    Provides details of the StartChildWorkflowExecutionInitiated event.

    ", + "refs": { + "HistoryEvent$startChildWorkflowExecutionInitiatedEventAttributes": "

    If the event is of type StartChildWorkflowExecutionInitiated then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "StartLambdaFunctionFailedCause": { + "base": null, + "refs": { + "StartLambdaFunctionFailedEventAttributes$cause": "

    The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

    If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows." + } + }, + "StartLambdaFunctionFailedEventAttributes": { + "base": "

    Provides details for the StartLambdaFunctionFailed event.

    ", + "refs": { + "HistoryEvent$startLambdaFunctionFailedEventAttributes": null + } + }, + "StartTimerDecisionAttributes": { + "base": "

    Provides details of the StartTimer decision.

    Access Control

    You can use IAM policies to control this decision's access to Amazon SWF resources as follows:

    • Use a Resource element with the domain name to limit the action to only specified domains.
    • Use an Action element to allow or deny permission to call this action.
    • You cannot use an IAM policy to constrain this action's parameters.

    If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows.

    ", + "refs": { + "Decision$startTimerDecisionAttributes": "

    Provides details of the StartTimer decision. It is not set for other decision types.

    " + } + }, + "StartTimerFailedCause": { + "base": null, + "refs": { + "StartTimerFailedEventAttributes$cause": "

    The cause of the failure. This information is generated by the system and can be useful for diagnostic purposes.

    If cause is set to OPERATION_NOT_PERMITTED, the decision failed because it lacked sufficient permissions. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows." + } + }, + "StartTimerFailedEventAttributes": { + "base": "

    Provides details of the StartTimerFailed event.

    ", + "refs": { + "HistoryEvent$startTimerFailedEventAttributes": "

    If the event is of type StartTimerFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "StartWorkflowExecutionInput": { + "base": null, + "refs": { + } + }, + "Tag": { + "base": null, + "refs": { + "TagFilter$tag": "

    Required. Specifies the tag that must be associated with the execution for it to meet the filter criteria.

    ", + "TagList$member": null + } + }, + "TagFilter": { + "base": "

    Used to filter the workflow executions in visibility APIs based on a tag.

    ", + "refs": { + "CountClosedWorkflowExecutionsInput$tagFilter": "

    If specified, only executions that have a tag that matches the filter are counted.

    closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.", + "CountOpenWorkflowExecutionsInput$tagFilter": "

    If specified, only executions that have a tag that matches the filter are counted.

    executionFilter, typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.", + "ListClosedWorkflowExecutionsInput$tagFilter": "

    If specified, only executions that have the matching tag are listed.

    closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.", + "ListOpenWorkflowExecutionsInput$tagFilter": "

    If specified, only executions that have the matching tag are listed.

    executionFilter, typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request." + } + }, + "TagList": { + "base": null, + "refs": { + "ContinueAsNewWorkflowExecutionDecisionAttributes$tagList": "

    The list of tags to associate with the new workflow execution. A maximum of 5 tags can be specified. You can list workflow executions with a specific tag by calling ListOpenWorkflowExecutions or ListClosedWorkflowExecutions and specifying a TagFilter.

    ", + "StartChildWorkflowExecutionDecisionAttributes$tagList": "

    The list of tags to associate with the child workflow execution. A maximum of 5 tags can be specified. You can list workflow executions with a specific tag by calling ListOpenWorkflowExecutions or ListClosedWorkflowExecutions and specifying a TagFilter.

    ", + "StartChildWorkflowExecutionInitiatedEventAttributes$tagList": "

    The list of tags to associated with the child workflow execution.

    ", + "StartWorkflowExecutionInput$tagList": "

    The list of tags to associate with the workflow execution. You can specify a maximum of 5 tags. You can list workflow executions with a specific tag by calling ListOpenWorkflowExecutions or ListClosedWorkflowExecutions and specifying a TagFilter.

    ", + "WorkflowExecutionContinuedAsNewEventAttributes$tagList": "

    The list of tags associated with the new workflow execution.

    ", + "WorkflowExecutionInfo$tagList": "

    The list of tags associated with the workflow execution. Tags can be used to identify and list workflow executions of interest through the visibility APIs. A workflow execution can have a maximum of 5 tags.

    ", + "WorkflowExecutionStartedEventAttributes$tagList": "

    The list of tags associated with this workflow execution. An execution can have up to 5 tags.

    " + } + }, + "TaskList": { + "base": "

    Represents a task list.

    ", + "refs": { + "ActivityTaskScheduledEventAttributes$taskList": "

    The task list in which the activity task has been scheduled.

    ", + "ActivityTypeConfiguration$defaultTaskList": "

    Optional. The default task list specified for this activity type at registration. This default is used if a task list is not provided when a task is scheduled through the ScheduleActivityTask decision. You can override the default registered task list when scheduling a task through the ScheduleActivityTask decision.

    ", + "ContinueAsNewWorkflowExecutionDecisionAttributes$taskList": null, + "CountPendingActivityTasksInput$taskList": "

    The name of the task list.

    ", + "CountPendingDecisionTasksInput$taskList": "

    The name of the task list.

    ", + "DecisionTaskScheduledEventAttributes$taskList": "

    The name of the task list in which the decision task was scheduled.

    ", + "PollForActivityTaskInput$taskList": "

    Specifies the task list to poll for activity tasks.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "PollForDecisionTaskInput$taskList": "

    Specifies the task list to poll for decision tasks.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "RegisterActivityTypeInput$defaultTaskList": "

    If set, specifies the default task list to use for scheduling tasks of this activity type. This default task list is used if a task list is not provided when a task is scheduled through the ScheduleActivityTask decision.

    ", + "RegisterWorkflowTypeInput$defaultTaskList": "

    If set, specifies the default task list to use for scheduling decision tasks for executions of this workflow type. This default is used only if a task list is not provided when starting the execution through the StartWorkflowExecution action or StartChildWorkflowExecution decision.

    ", + "ScheduleActivityTaskDecisionAttributes$taskList": "

    If set, specifies the name of the task list in which to schedule the activity task. If not specified, the defaultTaskList registered with the activity type will be used.

    A task list for this activity task must be specified either as a default for the activity type or through this field. If neither this field is set nor a default task list was specified at registration time then a fault will be returned.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "StartChildWorkflowExecutionDecisionAttributes$taskList": "

    The name of the task list to be used for decision tasks of the child workflow execution.

    A task list for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default task list was specified at registration time then a fault will be returned.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "StartChildWorkflowExecutionInitiatedEventAttributes$taskList": "

    The name of the task list used for the decision tasks of the child workflow execution.

    ", + "StartWorkflowExecutionInput$taskList": "

    The task list to use for the decision tasks generated for this workflow execution. This overrides the defaultTaskList specified when registering the workflow type.

    A task list for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default task list was specified at registration time then a fault will be returned.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "WorkflowExecutionConfiguration$taskList": "

    The task list used for the decision tasks generated for this workflow execution.

    ", + "WorkflowExecutionContinuedAsNewEventAttributes$taskList": null, + "WorkflowExecutionStartedEventAttributes$taskList": "

    The name of the task list for scheduling the decision tasks for this workflow execution.

    ", + "WorkflowTypeConfiguration$defaultTaskList": "

    Optional. The default task list, specified when registering the workflow type, for decisions tasks scheduled for workflow executions of this type. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.

    " + } + }, + "TaskPriority": { + "base": null, + "refs": { + "ActivityTaskScheduledEventAttributes$taskPriority": "

    Optional. The priority to assign to the scheduled activity task. If set, this will override any default priority value that was assigned when the activity type was registered.

    Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

    For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide.

    ", + "ActivityTypeConfiguration$defaultTaskPriority": "

    Optional. The default task priority for tasks of this activity type, specified at registration. If not set, then \"0\" will be used as the default priority. This default can be overridden when scheduling an activity task.

    Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

    For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide.

    ", + "ContinueAsNewWorkflowExecutionDecisionAttributes$taskPriority": "

    Optional. The task priority that, if set, specifies the priority for the decision tasks for this workflow execution. This overrides the defaultTaskPriority specified when registering the workflow type. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

    For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide.

    ", + "DecisionTaskScheduledEventAttributes$taskPriority": "

    Optional. A task priority that, if set, specifies the priority for this decision task. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

    For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide.

    ", + "RegisterActivityTypeInput$defaultTaskPriority": "

    The default task priority to assign to the activity type. If not assigned, then \"0\" will be used. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

    For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide.

    ", + "RegisterWorkflowTypeInput$defaultTaskPriority": "

    The default task priority to assign to the workflow type. If not assigned, then \"0\" will be used. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

    For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide.

    ", + "ScheduleActivityTaskDecisionAttributes$taskPriority": "

    Optional. If set, specifies the priority with which the activity task is to be assigned to a worker. This overrides the defaultTaskPriority specified when registering the activity type using RegisterActivityType. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

    For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide.

    ", + "StartChildWorkflowExecutionDecisionAttributes$taskPriority": "

    Optional. A task priority that, if set, specifies the priority for a decision task of this workflow execution. This overrides the defaultTaskPriority specified when registering the workflow type. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

    For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide.

    ", + "StartChildWorkflowExecutionInitiatedEventAttributes$taskPriority": "

    Optional. The priority assigned for the decision tasks for this workflow execution. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

    For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide.

    ", + "StartWorkflowExecutionInput$taskPriority": "

    The task priority to use for this workflow execution. This will override any default priority that was assigned when the workflow type was registered. If not set, then the default task priority for the workflow type will be used. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

    For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide.

    ", + "WorkflowExecutionConfiguration$taskPriority": "

    The priority assigned to decision tasks for this workflow execution. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

    For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide.

    ", + "WorkflowExecutionContinuedAsNewEventAttributes$taskPriority": null, + "WorkflowExecutionStartedEventAttributes$taskPriority": null, + "WorkflowTypeConfiguration$defaultTaskPriority": "

    Optional. The default task priority, specified when registering the workflow type, for all decision tasks of this workflow type. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.

    Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.

    For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide.

    " + } + }, + "TaskToken": { + "base": null, + "refs": { + "ActivityTask$taskToken": "

    The opaque string used as a handle on the task. This token is used by workers to communicate progress and response information back to the system about the task.

    ", + "DecisionTask$taskToken": "

    The opaque string used as a handle on the task. This token is used by workers to communicate progress and response information back to the system about the task.

    ", + "RecordActivityTaskHeartbeatInput$taskToken": "

    The taskToken of the ActivityTask.

    taskToken is generated by the service and should be treated as an opaque value. If the task is passed to another process, its taskToken must also be passed. This enables it to provide its progress and respond with results. ", + "RespondActivityTaskCanceledInput$taskToken": "

    The taskToken of the ActivityTask.

    taskToken is generated by the service and should be treated as an opaque value. If the task is passed to another process, its taskToken must also be passed. This enables it to provide its progress and respond with results.", + "RespondActivityTaskCompletedInput$taskToken": "

    The taskToken of the ActivityTask.

    taskToken is generated by the service and should be treated as an opaque value. If the task is passed to another process, its taskToken must also be passed. This enables it to provide its progress and respond with results.", + "RespondActivityTaskFailedInput$taskToken": "

    The taskToken of the ActivityTask.

    taskToken is generated by the service and should be treated as an opaque value. If the task is passed to another process, its taskToken must also be passed. This enables it to provide its progress and respond with results.", + "RespondDecisionTaskCompletedInput$taskToken": "

    The taskToken from the DecisionTask.

    taskToken is generated by the service and should be treated as an opaque value. If the task is passed to another process, its taskToken must also be passed. This enables it to provide its progress and respond with results." + } + }, + "TerminateReason": { + "base": null, + "refs": { + "TerminateWorkflowExecutionInput$reason": "

    Optional. A descriptive reason for terminating the workflow execution.

    ", + "WorkflowExecutionTerminatedEventAttributes$reason": "

    The reason provided for the termination (if any).

    " + } + }, + "TerminateWorkflowExecutionInput": { + "base": null, + "refs": { + } + }, + "TimerCanceledEventAttributes": { + "base": "

    Provides details of the TimerCanceled event.

    ", + "refs": { + "HistoryEvent$timerCanceledEventAttributes": "

    If the event is of type TimerCanceled then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "TimerFiredEventAttributes": { + "base": "

    Provides details of the TimerFired event.

    ", + "refs": { + "HistoryEvent$timerFiredEventAttributes": "

    If the event is of type TimerFired then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "TimerId": { + "base": null, + "refs": { + "CancelTimerDecisionAttributes$timerId": "

    Required. The unique ID of the timer to cancel.

    ", + "CancelTimerFailedEventAttributes$timerId": "

    The timerId provided in the CancelTimer decision that failed.

    ", + "StartTimerDecisionAttributes$timerId": "

    Required. The unique ID of the timer.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "StartTimerFailedEventAttributes$timerId": "

    The timerId provided in the StartTimer decision that failed.

    ", + "TimerCanceledEventAttributes$timerId": "

    The unique ID of the timer that was canceled.

    ", + "TimerFiredEventAttributes$timerId": "

    The unique ID of the timer that fired.

    ", + "TimerStartedEventAttributes$timerId": "

    The unique ID of the timer that was started.

    " + } + }, + "TimerStartedEventAttributes": { + "base": "

    Provides details of the TimerStarted event.

    ", + "refs": { + "HistoryEvent$timerStartedEventAttributes": "

    If the event is of type TimerStarted then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "ActivityTypeInfo$creationDate": "

    The date and time this activity type was created through RegisterActivityType.

    ", + "ActivityTypeInfo$deprecationDate": "

    If DEPRECATED, the date and time DeprecateActivityType was called.

    ", + "ExecutionTimeFilter$oldestDate": "

    Specifies the oldest start or close date and time to return.

    ", + "ExecutionTimeFilter$latestDate": "

    Specifies the latest start or close date and time to return.

    ", + "HistoryEvent$eventTimestamp": "

    The date and time when the event occurred.

    ", + "WorkflowExecutionDetail$latestActivityTaskTimestamp": "

    The time when the last activity task was scheduled for this workflow execution. You can use this information to determine if the workflow has not made progress for an unusually long period of time and might require a corrective action.

    ", + "WorkflowExecutionInfo$startTimestamp": "

    The time when the execution was started.

    ", + "WorkflowExecutionInfo$closeTimestamp": "

    The time when the workflow execution was closed. Set only if the execution status is CLOSED.

    ", + "WorkflowTypeInfo$creationDate": "

    The date when this type was registered.

    ", + "WorkflowTypeInfo$deprecationDate": "

    If the type is in deprecated state, then it is set to the date when the type was deprecated.

    " + } + }, + "Truncated": { + "base": null, + "refs": { + "PendingTaskCount$truncated": "

    If set to true, indicates that the actual count was more than the maximum supported by this API and the count returned is the truncated value.

    ", + "WorkflowExecutionCount$truncated": "

    If set to true, indicates that the actual count was more than the maximum supported by this API and the count returned is the truncated value.

    " + } + }, + "TypeAlreadyExistsFault": { + "base": "

    Returned if the type already exists in the specified domain. You will get this fault even if the existing type is in deprecated status. You can specify another version if the intent is to create a new distinct version of the type.

    ", + "refs": { + } + }, + "TypeDeprecatedFault": { + "base": "

    Returned when the specified activity or workflow type was already deprecated.

    ", + "refs": { + } + }, + "UnknownResourceFault": { + "base": "

    Returned when the named resource cannot be found with in the scope of this operation (region or domain). This could happen if the named resource was never created or is no longer available for this operation.

    ", + "refs": { + } + }, + "Version": { + "base": null, + "refs": { + "ActivityType$version": "

    The version of this activity.

    The combination of activity type name and version must be unique with in a domain.", + "ContinueAsNewWorkflowExecutionDecisionAttributes$workflowTypeVersion": null, + "RegisterActivityTypeInput$version": "

    The version of the activity type.

    The activity type consists of the name and version, the combination of which must be unique within the domain.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "RegisterWorkflowTypeInput$version": "

    The version of the workflow type.

    The workflow type consists of the name and version, the combination of which must be unique within the domain. To get a list of all currently registered workflow types, use the ListWorkflowTypes action.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "WorkflowType$version": "

    Required. The version of the workflow type.

    The combination of workflow type name and version must be unique with in a domain." + } + }, + "VersionOptional": { + "base": null, + "refs": { + "WorkflowTypeFilter$version": "

    Version of the workflow type.

    " + } + }, + "WorkflowExecution": { + "base": "

    Represents a workflow execution.

    ", + "refs": { + "ActivityTask$workflowExecution": "

    The workflow execution that started this activity task.

    ", + "ChildWorkflowExecutionCanceledEventAttributes$workflowExecution": "

    The child workflow execution that was canceled.

    ", + "ChildWorkflowExecutionCompletedEventAttributes$workflowExecution": "

    The child workflow execution that was completed.

    ", + "ChildWorkflowExecutionFailedEventAttributes$workflowExecution": "

    The child workflow execution that failed.

    ", + "ChildWorkflowExecutionStartedEventAttributes$workflowExecution": "

    The child workflow execution that was started.

    ", + "ChildWorkflowExecutionTerminatedEventAttributes$workflowExecution": "

    The child workflow execution that was terminated.

    ", + "ChildWorkflowExecutionTimedOutEventAttributes$workflowExecution": "

    The child workflow execution that timed out.

    ", + "DecisionTask$workflowExecution": "

    The workflow execution for which this decision task was created.

    ", + "DescribeWorkflowExecutionInput$execution": "

    The workflow execution to describe.

    ", + "ExternalWorkflowExecutionCancelRequestedEventAttributes$workflowExecution": "

    The external workflow execution to which the cancellation request was delivered.

    ", + "ExternalWorkflowExecutionSignaledEventAttributes$workflowExecution": "

    The external workflow execution that the signal was delivered to.

    ", + "GetWorkflowExecutionHistoryInput$execution": "

    Specifies the workflow execution for which to return the history.

    ", + "WorkflowExecutionCancelRequestedEventAttributes$externalWorkflowExecution": "

    The external workflow execution for which the cancellation was requested.

    ", + "WorkflowExecutionInfo$execution": "

    The workflow execution this information is about.

    ", + "WorkflowExecutionInfo$parent": "

    If this workflow execution is a child of another execution then contains the workflow execution that started this execution.

    ", + "WorkflowExecutionSignaledEventAttributes$externalWorkflowExecution": "

    The workflow execution that sent the signal. This is set only of the signal was sent by another workflow execution.

    ", + "WorkflowExecutionStartedEventAttributes$parentWorkflowExecution": "

    The source workflow execution that started this workflow execution. The member is not set if the workflow execution was not started by a workflow.

    " + } + }, + "WorkflowExecutionAlreadyStartedFault": { + "base": "

    Returned by StartWorkflowExecution when an open execution with the same workflowId is already running in the specified domain.

    ", + "refs": { + } + }, + "WorkflowExecutionCancelRequestedCause": { + "base": null, + "refs": { + "WorkflowExecutionCancelRequestedEventAttributes$cause": "

    If set, indicates that the request to cancel the workflow execution was automatically generated, and specifies the cause. This happens if the parent workflow execution times out or is terminated, and the child policy is set to cancel child executions.

    " + } + }, + "WorkflowExecutionCancelRequestedEventAttributes": { + "base": "

    Provides details of the WorkflowExecutionCancelRequested event.

    ", + "refs": { + "HistoryEvent$workflowExecutionCancelRequestedEventAttributes": "

    If the event is of type WorkflowExecutionCancelRequested then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "WorkflowExecutionCanceledEventAttributes": { + "base": "

    Provides details of the WorkflowExecutionCanceled event.

    ", + "refs": { + "HistoryEvent$workflowExecutionCanceledEventAttributes": "

    If the event is of type WorkflowExecutionCanceled then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "WorkflowExecutionCompletedEventAttributes": { + "base": "

    Provides details of the WorkflowExecutionCompleted event.

    ", + "refs": { + "HistoryEvent$workflowExecutionCompletedEventAttributes": "

    If the event is of type WorkflowExecutionCompleted then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "WorkflowExecutionConfiguration": { + "base": "

    The configuration settings for a workflow execution including timeout values, tasklist etc. These configuration settings are determined from the defaults specified when registering the workflow type and those specified when starting the workflow execution.

    ", + "refs": { + "WorkflowExecutionDetail$executionConfiguration": "

    The configuration settings for this workflow execution including timeout values, tasklist etc.

    " + } + }, + "WorkflowExecutionContinuedAsNewEventAttributes": { + "base": "

    Provides details of the WorkflowExecutionContinuedAsNew event.

    ", + "refs": { + "HistoryEvent$workflowExecutionContinuedAsNewEventAttributes": "

    If the event is of type WorkflowExecutionContinuedAsNew then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "WorkflowExecutionCount": { + "base": "

    Contains the count of workflow executions returned from CountOpenWorkflowExecutions or CountClosedWorkflowExecutions

    ", + "refs": { + } + }, + "WorkflowExecutionDetail": { + "base": "

    Contains details about a workflow execution.

    ", + "refs": { + } + }, + "WorkflowExecutionFailedEventAttributes": { + "base": "

    Provides details of the WorkflowExecutionFailed event.

    ", + "refs": { + "HistoryEvent$workflowExecutionFailedEventAttributes": "

    If the event is of type WorkflowExecutionFailed then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "WorkflowExecutionFilter": { + "base": "

    Used to filter the workflow executions in visibility APIs by their workflowId.

    ", + "refs": { + "CountClosedWorkflowExecutionsInput$executionFilter": "

    If specified, only workflow executions matching the WorkflowId in the filter are counted.

    closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.", + "CountOpenWorkflowExecutionsInput$executionFilter": "

    If specified, only workflow executions matching the WorkflowId in the filter are counted.

    executionFilter, typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.", + "ListClosedWorkflowExecutionsInput$executionFilter": "

    If specified, only workflow executions matching the workflow ID specified in the filter are returned.

    closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.", + "ListOpenWorkflowExecutionsInput$executionFilter": "

    If specified, only workflow executions matching the workflow ID specified in the filter are returned.

    executionFilter, typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request." + } + }, + "WorkflowExecutionInfo": { + "base": "

    Contains information about a workflow execution.

    ", + "refs": { + "WorkflowExecutionDetail$executionInfo": "

    Information about the workflow execution.

    ", + "WorkflowExecutionInfoList$member": null + } + }, + "WorkflowExecutionInfoList": { + "base": null, + "refs": { + "WorkflowExecutionInfos$executionInfos": "

    The list of workflow information structures.

    " + } + }, + "WorkflowExecutionInfos": { + "base": "

    Contains a paginated list of information about workflow executions.

    ", + "refs": { + } + }, + "WorkflowExecutionOpenCounts": { + "base": "

    Contains the counts of open tasks, child workflow executions and timers for a workflow execution.

    ", + "refs": { + "WorkflowExecutionDetail$openCounts": "

    The number of tasks for this workflow execution. This includes open and closed tasks of all types.

    " + } + }, + "WorkflowExecutionSignaledEventAttributes": { + "base": "

    Provides details of the WorkflowExecutionSignaled event.

    ", + "refs": { + "HistoryEvent$workflowExecutionSignaledEventAttributes": "

    If the event is of type WorkflowExecutionSignaled then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "WorkflowExecutionStartedEventAttributes": { + "base": "

    Provides details of WorkflowExecutionStarted event.

    ", + "refs": { + "HistoryEvent$workflowExecutionStartedEventAttributes": "

    If the event is of type WorkflowExecutionStarted then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "WorkflowExecutionTerminatedCause": { + "base": null, + "refs": { + "WorkflowExecutionTerminatedEventAttributes$cause": "

    If set, indicates that the workflow execution was automatically terminated, and specifies the cause. This happens if the parent workflow execution times out or is terminated and the child policy is set to terminate child executions.

    " + } + }, + "WorkflowExecutionTerminatedEventAttributes": { + "base": "

    Provides details of the WorkflowExecutionTerminated event.

    ", + "refs": { + "HistoryEvent$workflowExecutionTerminatedEventAttributes": "

    If the event is of type WorkflowExecutionTerminated then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "WorkflowExecutionTimedOutEventAttributes": { + "base": "

    Provides details of the WorkflowExecutionTimedOut event.

    ", + "refs": { + "HistoryEvent$workflowExecutionTimedOutEventAttributes": "

    If the event is of type WorkflowExecutionTimedOut then this member is set and provides detailed information about the event. It is not set for other event types.

    " + } + }, + "WorkflowExecutionTimeoutType": { + "base": null, + "refs": { + "ChildWorkflowExecutionTimedOutEventAttributes$timeoutType": "

    The type of the timeout that caused the child workflow execution to time out.

    ", + "WorkflowExecutionTimedOutEventAttributes$timeoutType": "

    The type of timeout that caused this event.

    " + } + }, + "WorkflowId": { + "base": null, + "refs": { + "RequestCancelExternalWorkflowExecutionDecisionAttributes$workflowId": "

    Required. The workflowId of the external workflow execution to cancel.

    ", + "RequestCancelExternalWorkflowExecutionFailedEventAttributes$workflowId": "

    The workflowId of the external workflow to which the cancel request was to be delivered.

    ", + "RequestCancelExternalWorkflowExecutionInitiatedEventAttributes$workflowId": "

    The workflowId of the external workflow execution to be canceled.

    ", + "RequestCancelWorkflowExecutionInput$workflowId": "

    The workflowId of the workflow execution to cancel.

    ", + "SignalExternalWorkflowExecutionDecisionAttributes$workflowId": "

    Required. The workflowId of the workflow execution to be signaled.

    ", + "SignalExternalWorkflowExecutionFailedEventAttributes$workflowId": "

    The workflowId of the external workflow execution that the signal was being delivered to.

    ", + "SignalExternalWorkflowExecutionInitiatedEventAttributes$workflowId": "

    The workflowId of the external workflow execution.

    ", + "SignalWorkflowExecutionInput$workflowId": "

    The workflowId of the workflow execution to signal.

    ", + "StartChildWorkflowExecutionDecisionAttributes$workflowId": "

    Required. The workflowId of the workflow execution.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "StartChildWorkflowExecutionFailedEventAttributes$workflowId": "

    The workflowId of the child workflow execution.

    ", + "StartChildWorkflowExecutionInitiatedEventAttributes$workflowId": "

    The workflowId of the child workflow execution.

    ", + "StartWorkflowExecutionInput$workflowId": "

    The user defined identifier associated with the workflow execution. You can use this to associate a custom identifier with the workflow execution. You may specify the same identifier if a workflow execution is logically a restart of a previous execution. You cannot have two open workflow executions with the same workflowId at the same time.

    The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (\\u0000-\\u001f | \\u007f - \\u009f). Also, it must not contain the literal string quotarnquot.

    ", + "TerminateWorkflowExecutionInput$workflowId": "

    The workflowId of the workflow execution to terminate.

    ", + "WorkflowExecution$workflowId": "

    The user defined identifier associated with the workflow execution.

    ", + "WorkflowExecutionFilter$workflowId": "

    The workflowId to pass of match the criteria of this filter.

    " + } + }, + "WorkflowType": { + "base": "

    Represents a workflow type.

    ", + "refs": { + "ChildWorkflowExecutionCanceledEventAttributes$workflowType": "

    The type of the child workflow execution.

    ", + "ChildWorkflowExecutionCompletedEventAttributes$workflowType": "

    The type of the child workflow execution.

    ", + "ChildWorkflowExecutionFailedEventAttributes$workflowType": "

    The type of the child workflow execution.

    ", + "ChildWorkflowExecutionStartedEventAttributes$workflowType": "

    The type of the child workflow execution.

    ", + "ChildWorkflowExecutionTerminatedEventAttributes$workflowType": "

    The type of the child workflow execution.

    ", + "ChildWorkflowExecutionTimedOutEventAttributes$workflowType": "

    The type of the child workflow execution.

    ", + "DecisionTask$workflowType": "

    The type of the workflow execution for which this decision task was created.

    ", + "DeprecateWorkflowTypeInput$workflowType": "

    The workflow type to deprecate.

    ", + "DescribeWorkflowTypeInput$workflowType": "

    The workflow type to describe.

    ", + "StartChildWorkflowExecutionDecisionAttributes$workflowType": "

    Required. The type of the workflow execution to be started.

    ", + "StartChildWorkflowExecutionFailedEventAttributes$workflowType": "

    The workflow type provided in the StartChildWorkflowExecution decision that failed.

    ", + "StartChildWorkflowExecutionInitiatedEventAttributes$workflowType": "

    The type of the child workflow execution.

    ", + "StartWorkflowExecutionInput$workflowType": "

    The type of the workflow to start.

    ", + "WorkflowExecutionContinuedAsNewEventAttributes$workflowType": null, + "WorkflowExecutionInfo$workflowType": "

    The type of the workflow execution.

    ", + "WorkflowExecutionStartedEventAttributes$workflowType": "

    The workflow type of this execution.

    ", + "WorkflowTypeInfo$workflowType": "

    The workflow type this information is about.

    " + } + }, + "WorkflowTypeConfiguration": { + "base": "

    The configuration settings of a workflow type.

    ", + "refs": { + "WorkflowTypeDetail$configuration": "

    Configuration settings of the workflow type registered through RegisterWorkflowType

    " + } + }, + "WorkflowTypeDetail": { + "base": "

    Contains details about a workflow type.

    ", + "refs": { + } + }, + "WorkflowTypeFilter": { + "base": "

    Used to filter workflow execution query results by type. Each parameter, if specified, defines a rule that must be satisfied by each returned result.

    ", + "refs": { + "CountClosedWorkflowExecutionsInput$typeFilter": "

    If specified, indicates the type of the workflow executions to be counted.

    closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.", + "CountOpenWorkflowExecutionsInput$typeFilter": "

    Specifies the type of the workflow executions to be counted.

    executionFilter, typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.", + "ListClosedWorkflowExecutionsInput$typeFilter": "

    If specified, only executions of the type specified in the filter are returned.

    closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.", + "ListOpenWorkflowExecutionsInput$typeFilter": "

    If specified, only executions of the type specified in the filter are returned.

    executionFilter, typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request." + } + }, + "WorkflowTypeInfo": { + "base": "

    Contains information about a workflow type.

    ", + "refs": { + "WorkflowTypeDetail$typeInfo": "

    General information about the workflow type.

    The status of the workflow type (returned in the WorkflowTypeInfo structure) can be one of the following.

    • REGISTERED: The type is registered and available. Workers supporting this type should be running.
    • DEPRECATED: The type was deprecated using DeprecateWorkflowType, but is still in use. You should keep workers supporting this type running. You cannot create new workflow executions of this type.
    ", + "WorkflowTypeInfoList$member": null + } + }, + "WorkflowTypeInfoList": { + "base": null, + "refs": { + "WorkflowTypeInfos$typeInfos": "

    The list of workflow type information.

    " + } + }, + "WorkflowTypeInfos": { + "base": "

    Contains a paginated list of information structures about workflow types.

    ", + "refs": { + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/swf/2012-01-25/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/swf/2012-01-25/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/swf/2012-01-25/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/swf/2012-01-25/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,46 @@ +{ + "pagination": { + "GetWorkflowExecutionHistory": { + "limit_key": "maximumPageSize", + "input_token": "nextPageToken", + "output_token": "nextPageToken", + "result_key": "events" + }, + "ListActivityTypes": { + "limit_key": "maximumPageSize", + "input_token": "nextPageToken", + "output_token": "nextPageToken", + "result_key": "typeInfos" + }, + "ListClosedWorkflowExecutions": { + "limit_key": "maximumPageSize", + "input_token": "nextPageToken", + "output_token": "nextPageToken", + "result_key": "executionInfos" + }, + "ListDomains": { + "limit_key": "maximumPageSize", + "input_token": "nextPageToken", + "output_token": "nextPageToken", + "result_key": "domainInfos" + }, + "ListOpenWorkflowExecutions": { + "limit_key": "maximumPageSize", + "input_token": "nextPageToken", + "output_token": "nextPageToken", + "result_key": "executionInfos" + }, + "ListWorkflowTypes": { + "limit_key": "maximumPageSize", + "input_token": "nextPageToken", + "output_token": "nextPageToken", + "result_key": "typeInfos" + }, + "PollForDecisionTask": { + "limit_key": "maximumPageSize", + "input_token": "nextPageToken", + "output_token": "nextPageToken", + "result_key": "events" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/waf/2015-08-24/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/waf/2015-08-24/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/waf/2015-08-24/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/waf/2015-08-24/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1738 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-08-24", + "endpointPrefix":"waf", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"WAF", + "serviceFullName":"AWS WAF", + "signatureVersion":"v4", + "targetPrefix":"AWSWAF_20150824" + }, + "operations":{ + "CreateByteMatchSet":{ + "name":"CreateByteMatchSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateByteMatchSetRequest"}, + "output":{"shape":"CreateByteMatchSetResponse"}, + "errors":[ + {"shape":"WAFDisallowedNameException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFStaleDataException"}, + {"shape":"WAFLimitsExceededException"} + ] + }, + "CreateIPSet":{ + "name":"CreateIPSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateIPSetRequest"}, + "output":{"shape":"CreateIPSetResponse"}, + "errors":[ + {"shape":"WAFStaleDataException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFDisallowedNameException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFLimitsExceededException"} + ] + }, + "CreateRule":{ + "name":"CreateRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRuleRequest"}, + "output":{"shape":"CreateRuleResponse"}, + "errors":[ + {"shape":"WAFStaleDataException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFDisallowedNameException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFLimitsExceededException"} + ] + }, + "CreateSizeConstraintSet":{ + "name":"CreateSizeConstraintSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSizeConstraintSetRequest"}, + "output":{"shape":"CreateSizeConstraintSetResponse"}, + "errors":[ + {"shape":"WAFStaleDataException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFDisallowedNameException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFLimitsExceededException"} + ] + }, + "CreateSqlInjectionMatchSet":{ + "name":"CreateSqlInjectionMatchSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSqlInjectionMatchSetRequest"}, + "output":{"shape":"CreateSqlInjectionMatchSetResponse"}, + "errors":[ + {"shape":"WAFDisallowedNameException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFStaleDataException"}, + {"shape":"WAFLimitsExceededException"} + ] + }, + "CreateWebACL":{ + "name":"CreateWebACL", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateWebACLRequest"}, + "output":{"shape":"CreateWebACLResponse"}, + "errors":[ + {"shape":"WAFStaleDataException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFDisallowedNameException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFLimitsExceededException"} + ] + }, + "DeleteByteMatchSet":{ + "name":"DeleteByteMatchSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteByteMatchSetRequest"}, + "output":{"shape":"DeleteByteMatchSetResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFReferencedItemException"}, + {"shape":"WAFStaleDataException"}, + {"shape":"WAFNonEmptyEntityException"} + ] + }, + "DeleteIPSet":{ + "name":"DeleteIPSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteIPSetRequest"}, + "output":{"shape":"DeleteIPSetResponse"}, + "errors":[ + {"shape":"WAFStaleDataException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFReferencedItemException"}, + {"shape":"WAFNonEmptyEntityException"} + ] + }, + "DeleteRule":{ + "name":"DeleteRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRuleRequest"}, + "output":{"shape":"DeleteRuleResponse"}, + "errors":[ + {"shape":"WAFStaleDataException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFReferencedItemException"}, + {"shape":"WAFNonEmptyEntityException"} + ] + }, + "DeleteSizeConstraintSet":{ + "name":"DeleteSizeConstraintSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSizeConstraintSetRequest"}, + "output":{"shape":"DeleteSizeConstraintSetResponse"}, + "errors":[ + {"shape":"WAFStaleDataException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFReferencedItemException"}, + {"shape":"WAFNonEmptyEntityException"} + ] + }, + "DeleteSqlInjectionMatchSet":{ + "name":"DeleteSqlInjectionMatchSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSqlInjectionMatchSetRequest"}, + "output":{"shape":"DeleteSqlInjectionMatchSetResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFReferencedItemException"}, + {"shape":"WAFStaleDataException"}, + {"shape":"WAFNonEmptyEntityException"} + ] + }, + "DeleteWebACL":{ + "name":"DeleteWebACL", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteWebACLRequest"}, + "output":{"shape":"DeleteWebACLResponse"}, + "errors":[ + {"shape":"WAFStaleDataException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFReferencedItemException"}, + {"shape":"WAFNonEmptyEntityException"} + ] + }, + "GetByteMatchSet":{ + "name":"GetByteMatchSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetByteMatchSetRequest"}, + "output":{"shape":"GetByteMatchSetResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFNonexistentItemException"} + ] + }, + "GetChangeToken":{ + "name":"GetChangeToken", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetChangeTokenRequest"}, + "output":{"shape":"GetChangeTokenResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"} + ] + }, + "GetChangeTokenStatus":{ + "name":"GetChangeTokenStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetChangeTokenStatusRequest"}, + "output":{"shape":"GetChangeTokenStatusResponse"}, + "errors":[ + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFInternalErrorException"} + ] + }, + "GetIPSet":{ + "name":"GetIPSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetIPSetRequest"}, + "output":{"shape":"GetIPSetResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFNonexistentItemException"} + ] + }, + "GetRule":{ + "name":"GetRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRuleRequest"}, + "output":{"shape":"GetRuleResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFNonexistentItemException"} + ] + }, + "GetSampledRequests":{ + "name":"GetSampledRequests", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSampledRequestsRequest"}, + "output":{"shape":"GetSampledRequestsResponse"}, + "errors":[ + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFInternalErrorException"} + ] + }, + "GetSizeConstraintSet":{ + "name":"GetSizeConstraintSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSizeConstraintSetRequest"}, + "output":{"shape":"GetSizeConstraintSetResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFNonexistentItemException"} + ] + }, + "GetSqlInjectionMatchSet":{ + "name":"GetSqlInjectionMatchSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSqlInjectionMatchSetRequest"}, + "output":{"shape":"GetSqlInjectionMatchSetResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFNonexistentItemException"} + ] + }, + "GetWebACL":{ + "name":"GetWebACL", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetWebACLRequest"}, + "output":{"shape":"GetWebACLResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFNonexistentItemException"} + ] + }, + "ListByteMatchSets":{ + "name":"ListByteMatchSets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListByteMatchSetsRequest"}, + "output":{"shape":"ListByteMatchSetsResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"} + ] + }, + "ListIPSets":{ + "name":"ListIPSets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListIPSetsRequest"}, + "output":{"shape":"ListIPSetsResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"} + ] + }, + "ListRules":{ + "name":"ListRules", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRulesRequest"}, + "output":{"shape":"ListRulesResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"} + ] + }, + "ListSizeConstraintSets":{ + "name":"ListSizeConstraintSets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSizeConstraintSetsRequest"}, + "output":{"shape":"ListSizeConstraintSetsResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"} + ] + }, + "ListSqlInjectionMatchSets":{ + "name":"ListSqlInjectionMatchSets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSqlInjectionMatchSetsRequest"}, + "output":{"shape":"ListSqlInjectionMatchSetsResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"} + ] + }, + "ListWebACLs":{ + "name":"ListWebACLs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListWebACLsRequest"}, + "output":{"shape":"ListWebACLsResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"} + ] + }, + "UpdateByteMatchSet":{ + "name":"UpdateByteMatchSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateByteMatchSetRequest"}, + "output":{"shape":"UpdateByteMatchSetResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFInvalidOperationException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentContainerException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFStaleDataException"}, + {"shape":"WAFLimitsExceededException"} + ] + }, + "UpdateIPSet":{ + "name":"UpdateIPSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateIPSetRequest"}, + "output":{"shape":"UpdateIPSetResponse"}, + "errors":[ + {"shape":"WAFStaleDataException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFInvalidOperationException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentContainerException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFReferencedItemException"}, + {"shape":"WAFLimitsExceededException"} + ] + }, + "UpdateRule":{ + "name":"UpdateRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateRuleRequest"}, + "output":{"shape":"UpdateRuleResponse"}, + "errors":[ + {"shape":"WAFStaleDataException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFInvalidOperationException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentContainerException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFReferencedItemException"}, + {"shape":"WAFLimitsExceededException"} + ] + }, + "UpdateSizeConstraintSet":{ + "name":"UpdateSizeConstraintSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateSizeConstraintSetRequest"}, + "output":{"shape":"UpdateSizeConstraintSetResponse"}, + "errors":[ + {"shape":"WAFStaleDataException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFInvalidOperationException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentContainerException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFReferencedItemException"}, + {"shape":"WAFLimitsExceededException"} + ] + }, + "UpdateSqlInjectionMatchSet":{ + "name":"UpdateSqlInjectionMatchSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateSqlInjectionMatchSetRequest"}, + "output":{"shape":"UpdateSqlInjectionMatchSetResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFInvalidOperationException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentContainerException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFStaleDataException"}, + {"shape":"WAFLimitsExceededException"} + ] + }, + "UpdateWebACL":{ + "name":"UpdateWebACL", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateWebACLRequest"}, + "output":{"shape":"UpdateWebACLResponse"}, + "errors":[ + {"shape":"WAFStaleDataException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidAccountException"}, + {"shape":"WAFInvalidOperationException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFNonexistentContainerException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFReferencedItemException"}, + {"shape":"WAFLimitsExceededException"} + ] + } + }, + "shapes":{ + "Action":{"type":"string"}, + "ActivatedRule":{ + "type":"structure", + "required":[ + "Priority", + "RuleId", + "Action" + ], + "members":{ + "Priority":{"shape":"RulePriority"}, + "RuleId":{"shape":"ResourceId"}, + "Action":{"shape":"WafAction"} + } + }, + "ActivatedRules":{ + "type":"list", + "member":{"shape":"ActivatedRule"} + }, + "ByteMatchSet":{ + "type":"structure", + "required":[ + "ByteMatchSetId", + "ByteMatchTuples" + ], + "members":{ + "ByteMatchSetId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"}, + "ByteMatchTuples":{"shape":"ByteMatchTuples"} + } + }, + "ByteMatchSetSummaries":{ + "type":"list", + "member":{"shape":"ByteMatchSetSummary"} + }, + "ByteMatchSetSummary":{ + "type":"structure", + "required":[ + "ByteMatchSetId", + "Name" + ], + "members":{ + "ByteMatchSetId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"} + } + }, + "ByteMatchSetUpdate":{ + "type":"structure", + "required":[ + "Action", + "ByteMatchTuple" + ], + "members":{ + "Action":{"shape":"ChangeAction"}, + "ByteMatchTuple":{"shape":"ByteMatchTuple"} + } + }, + "ByteMatchSetUpdates":{ + "type":"list", + "member":{"shape":"ByteMatchSetUpdate"} + }, + "ByteMatchTargetString":{"type":"blob"}, + "ByteMatchTuple":{ + "type":"structure", + "required":[ + "FieldToMatch", + "TargetString", + "TextTransformation", + "PositionalConstraint" + ], + "members":{ + "FieldToMatch":{"shape":"FieldToMatch"}, + "TargetString":{"shape":"ByteMatchTargetString"}, + "TextTransformation":{"shape":"TextTransformation"}, + "PositionalConstraint":{"shape":"PositionalConstraint"} + } + }, + "ByteMatchTuples":{ + "type":"list", + "member":{"shape":"ByteMatchTuple"} + }, + "ChangeAction":{ + "type":"string", + "enum":[ + "INSERT", + "DELETE" + ] + }, + "ChangeToken":{"type":"string"}, + "ChangeTokenStatus":{ + "type":"string", + "enum":[ + "PROVISIONED", + "PENDING", + "INSYNC" + ] + }, + "ComparisonOperator":{ + "type":"string", + "enum":[ + "EQ", + "NE", + "LE", + "LT", + "GE", + "GT" + ] + }, + "Country":{"type":"string"}, + "CreateByteMatchSetRequest":{ + "type":"structure", + "required":[ + "Name", + "ChangeToken" + ], + "members":{ + "Name":{"shape":"ResourceName"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "CreateByteMatchSetResponse":{ + "type":"structure", + "members":{ + "ByteMatchSet":{"shape":"ByteMatchSet"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "CreateIPSetRequest":{ + "type":"structure", + "required":[ + "Name", + "ChangeToken" + ], + "members":{ + "Name":{"shape":"ResourceName"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "CreateIPSetResponse":{ + "type":"structure", + "members":{ + "IPSet":{"shape":"IPSet"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "CreateRuleRequest":{ + "type":"structure", + "required":[ + "Name", + "MetricName", + "ChangeToken" + ], + "members":{ + "Name":{"shape":"ResourceName"}, + "MetricName":{"shape":"MetricName"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "CreateRuleResponse":{ + "type":"structure", + "members":{ + "Rule":{"shape":"Rule"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "CreateSizeConstraintSetRequest":{ + "type":"structure", + "required":[ + "Name", + "ChangeToken" + ], + "members":{ + "Name":{"shape":"ResourceName"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "CreateSizeConstraintSetResponse":{ + "type":"structure", + "members":{ + "SizeConstraintSet":{"shape":"SizeConstraintSet"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "CreateSqlInjectionMatchSetRequest":{ + "type":"structure", + "required":[ + "Name", + "ChangeToken" + ], + "members":{ + "Name":{"shape":"ResourceName"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "CreateSqlInjectionMatchSetResponse":{ + "type":"structure", + "members":{ + "SqlInjectionMatchSet":{"shape":"SqlInjectionMatchSet"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "CreateWebACLRequest":{ + "type":"structure", + "required":[ + "Name", + "MetricName", + "DefaultAction", + "ChangeToken" + ], + "members":{ + "Name":{"shape":"ResourceName"}, + "MetricName":{"shape":"MetricName"}, + "DefaultAction":{"shape":"WafAction"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "CreateWebACLResponse":{ + "type":"structure", + "members":{ + "WebACL":{"shape":"WebACL"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "DeleteByteMatchSetRequest":{ + "type":"structure", + "required":[ + "ByteMatchSetId", + "ChangeToken" + ], + "members":{ + "ByteMatchSetId":{"shape":"ResourceId"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "DeleteByteMatchSetResponse":{ + "type":"structure", + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "DeleteIPSetRequest":{ + "type":"structure", + "required":[ + "IPSetId", + "ChangeToken" + ], + "members":{ + "IPSetId":{"shape":"ResourceId"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "DeleteIPSetResponse":{ + "type":"structure", + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "DeleteRuleRequest":{ + "type":"structure", + "required":[ + "RuleId", + "ChangeToken" + ], + "members":{ + "RuleId":{"shape":"ResourceId"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "DeleteRuleResponse":{ + "type":"structure", + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "DeleteSizeConstraintSetRequest":{ + "type":"structure", + "required":[ + "SizeConstraintSetId", + "ChangeToken" + ], + "members":{ + "SizeConstraintSetId":{"shape":"ResourceId"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "DeleteSizeConstraintSetResponse":{ + "type":"structure", + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "DeleteSqlInjectionMatchSetRequest":{ + "type":"structure", + "required":[ + "SqlInjectionMatchSetId", + "ChangeToken" + ], + "members":{ + "SqlInjectionMatchSetId":{"shape":"ResourceId"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "DeleteSqlInjectionMatchSetResponse":{ + "type":"structure", + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "DeleteWebACLRequest":{ + "type":"structure", + "required":[ + "WebACLId", + "ChangeToken" + ], + "members":{ + "WebACLId":{"shape":"ResourceId"}, + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "DeleteWebACLResponse":{ + "type":"structure", + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "FieldToMatch":{ + "type":"structure", + "required":["Type"], + "members":{ + "Type":{"shape":"MatchFieldType"}, + "Data":{"shape":"MatchFieldData"} + } + }, + "GetByteMatchSetRequest":{ + "type":"structure", + "required":["ByteMatchSetId"], + "members":{ + "ByteMatchSetId":{"shape":"ResourceId"} + } + }, + "GetByteMatchSetResponse":{ + "type":"structure", + "members":{ + "ByteMatchSet":{"shape":"ByteMatchSet"} + } + }, + "GetChangeTokenRequest":{ + "type":"structure", + "members":{ + } + }, + "GetChangeTokenResponse":{ + "type":"structure", + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "GetChangeTokenStatusRequest":{ + "type":"structure", + "required":["ChangeToken"], + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "GetChangeTokenStatusResponse":{ + "type":"structure", + "members":{ + "ChangeTokenStatus":{"shape":"ChangeTokenStatus"} + } + }, + "GetIPSetRequest":{ + "type":"structure", + "required":["IPSetId"], + "members":{ + "IPSetId":{"shape":"ResourceId"} + } + }, + "GetIPSetResponse":{ + "type":"structure", + "members":{ + "IPSet":{"shape":"IPSet"} + } + }, + "GetRuleRequest":{ + "type":"structure", + "required":["RuleId"], + "members":{ + "RuleId":{"shape":"ResourceId"} + } + }, + "GetRuleResponse":{ + "type":"structure", + "members":{ + "Rule":{"shape":"Rule"} + } + }, + "GetSampledRequestsRequest":{ + "type":"structure", + "required":[ + "WebAclId", + "RuleId", + "TimeWindow", + "MaxItems" + ], + "members":{ + "WebAclId":{"shape":"ResourceId"}, + "RuleId":{"shape":"ResourceId"}, + "TimeWindow":{"shape":"TimeWindow"}, + "MaxItems":{"shape":"ListMaxItems"} + } + }, + "GetSampledRequestsResponse":{ + "type":"structure", + "members":{ + "SampledRequests":{"shape":"SampledHTTPRequests"}, + "PopulationSize":{"shape":"PopulationSize"}, + "TimeWindow":{"shape":"TimeWindow"} + } + }, + "GetSizeConstraintSetRequest":{ + "type":"structure", + "required":["SizeConstraintSetId"], + "members":{ + "SizeConstraintSetId":{"shape":"ResourceId"} + } + }, + "GetSizeConstraintSetResponse":{ + "type":"structure", + "members":{ + "SizeConstraintSet":{"shape":"SizeConstraintSet"} + } + }, + "GetSqlInjectionMatchSetRequest":{ + "type":"structure", + "required":["SqlInjectionMatchSetId"], + "members":{ + "SqlInjectionMatchSetId":{"shape":"ResourceId"} + } + }, + "GetSqlInjectionMatchSetResponse":{ + "type":"structure", + "members":{ + "SqlInjectionMatchSet":{"shape":"SqlInjectionMatchSet"} + } + }, + "GetWebACLRequest":{ + "type":"structure", + "required":["WebACLId"], + "members":{ + "WebACLId":{"shape":"ResourceId"} + } + }, + "GetWebACLResponse":{ + "type":"structure", + "members":{ + "WebACL":{"shape":"WebACL"} + } + }, + "HTTPHeader":{ + "type":"structure", + "members":{ + "Name":{"shape":"HeaderName"}, + "Value":{"shape":"HeaderValue"} + } + }, + "HTTPHeaders":{ + "type":"list", + "member":{"shape":"HTTPHeader"} + }, + "HTTPMethod":{"type":"string"}, + "HTTPRequest":{ + "type":"structure", + "members":{ + "ClientIP":{"shape":"IPString"}, + "Country":{"shape":"Country"}, + "URI":{"shape":"URIString"}, + "Method":{"shape":"HTTPMethod"}, + "HTTPVersion":{"shape":"HTTPVersion"}, + "Headers":{"shape":"HTTPHeaders"} + } + }, + "HTTPVersion":{"type":"string"}, + "HeaderName":{"type":"string"}, + "HeaderValue":{"type":"string"}, + "IPSet":{ + "type":"structure", + "required":[ + "IPSetId", + "IPSetDescriptors" + ], + "members":{ + "IPSetId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"}, + "IPSetDescriptors":{"shape":"IPSetDescriptors"} + } + }, + "IPSetDescriptor":{ + "type":"structure", + "required":[ + "Type", + "Value" + ], + "members":{ + "Type":{"shape":"IPSetDescriptorType"}, + "Value":{"shape":"IPSetDescriptorValue"} + } + }, + "IPSetDescriptorType":{ + "type":"string", + "enum":["IPV4"] + }, + "IPSetDescriptorValue":{"type":"string"}, + "IPSetDescriptors":{ + "type":"list", + "member":{"shape":"IPSetDescriptor"} + }, + "IPSetSummaries":{ + "type":"list", + "member":{"shape":"IPSetSummary"} + }, + "IPSetSummary":{ + "type":"structure", + "required":[ + "IPSetId", + "Name" + ], + "members":{ + "IPSetId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"} + } + }, + "IPSetUpdate":{ + "type":"structure", + "required":[ + "Action", + "IPSetDescriptor" + ], + "members":{ + "Action":{"shape":"ChangeAction"}, + "IPSetDescriptor":{"shape":"IPSetDescriptor"} + } + }, + "IPSetUpdates":{ + "type":"list", + "member":{"shape":"IPSetUpdate"} + }, + "IPString":{"type":"string"}, + "ListByteMatchSetsRequest":{ + "type":"structure", + "required":["Limit"], + "members":{ + "NextMarker":{"shape":"NextMarker"}, + "Limit":{"shape":"PaginationLimit"} + } + }, + "ListByteMatchSetsResponse":{ + "type":"structure", + "members":{ + "NextMarker":{"shape":"NextMarker"}, + "ByteMatchSets":{"shape":"ByteMatchSetSummaries"} + } + }, + "ListIPSetsRequest":{ + "type":"structure", + "required":["Limit"], + "members":{ + "NextMarker":{"shape":"NextMarker"}, + "Limit":{"shape":"PaginationLimit"} + } + }, + "ListIPSetsResponse":{ + "type":"structure", + "members":{ + "NextMarker":{"shape":"NextMarker"}, + "IPSets":{"shape":"IPSetSummaries"} + } + }, + "ListMaxItems":{ + "type":"long", + "max":100, + "min":1 + }, + "ListRulesRequest":{ + "type":"structure", + "required":["Limit"], + "members":{ + "NextMarker":{"shape":"NextMarker"}, + "Limit":{"shape":"PaginationLimit"} + } + }, + "ListRulesResponse":{ + "type":"structure", + "members":{ + "NextMarker":{"shape":"NextMarker"}, + "Rules":{"shape":"RuleSummaries"} + } + }, + "ListSizeConstraintSetsRequest":{ + "type":"structure", + "required":["Limit"], + "members":{ + "NextMarker":{"shape":"NextMarker"}, + "Limit":{"shape":"PaginationLimit"} + } + }, + "ListSizeConstraintSetsResponse":{ + "type":"structure", + "members":{ + "NextMarker":{"shape":"NextMarker"}, + "SizeConstraintSets":{"shape":"SizeConstraintSetSummaries"} + } + }, + "ListSqlInjectionMatchSetsRequest":{ + "type":"structure", + "required":["Limit"], + "members":{ + "NextMarker":{"shape":"NextMarker"}, + "Limit":{"shape":"PaginationLimit"} + } + }, + "ListSqlInjectionMatchSetsResponse":{ + "type":"structure", + "members":{ + "NextMarker":{"shape":"NextMarker"}, + "SqlInjectionMatchSets":{"shape":"SqlInjectionMatchSetSummaries"} + } + }, + "ListWebACLsRequest":{ + "type":"structure", + "required":["Limit"], + "members":{ + "NextMarker":{"shape":"NextMarker"}, + "Limit":{"shape":"PaginationLimit"} + } + }, + "ListWebACLsResponse":{ + "type":"structure", + "members":{ + "NextMarker":{"shape":"NextMarker"}, + "WebACLs":{"shape":"WebACLSummaries"} + } + }, + "MatchFieldData":{"type":"string"}, + "MatchFieldType":{ + "type":"string", + "enum":[ + "URI", + "QUERY_STRING", + "HEADER", + "METHOD", + "BODY" + ] + }, + "MetricName":{"type":"string"}, + "Negated":{"type":"boolean"}, + "NextMarker":{ + "type":"string", + "min":1 + }, + "PaginationLimit":{ + "type":"integer", + "max":100, + "min":1 + }, + "ParameterExceptionField":{ + "type":"string", + "enum":[ + "CHANGE_ACTION", + "WAF_ACTION", + "PREDICATE_TYPE", + "IPSET_TYPE", + "BYTE_MATCH_FIELD_TYPE", + "SQL_INJECTION_MATCH_FIELD_TYPE", + "BYTE_MATCH_TEXT_TRANSFORMATION", + "BYTE_MATCH_POSITIONAL_CONSTRAINT", + "SIZE_CONSTRAINT_COMPARISON_OPERATOR" + ] + }, + "ParameterExceptionParameter":{ + "type":"string", + "min":1 + }, + "ParameterExceptionReason":{ + "type":"string", + "enum":[ + "INVALID_OPTION", + "ILLEGAL_COMBINATION" + ] + }, + "PopulationSize":{"type":"long"}, + "PositionalConstraint":{ + "type":"string", + "enum":[ + "EXACTLY", + "STARTS_WITH", + "ENDS_WITH", + "CONTAINS", + "CONTAINS_WORD" + ] + }, + "Predicate":{ + "type":"structure", + "required":[ + "Negated", + "Type", + "DataId" + ], + "members":{ + "Negated":{"shape":"Negated"}, + "Type":{"shape":"PredicateType"}, + "DataId":{"shape":"ResourceId"} + } + }, + "PredicateType":{ + "type":"string", + "enum":[ + "IPMatch", + "ByteMatch", + "SqlInjectionMatch", + "SizeConstraint" + ] + }, + "Predicates":{ + "type":"list", + "member":{"shape":"Predicate"} + }, + "ResourceId":{ + "type":"string", + "max":128, + "min":1 + }, + "ResourceName":{ + "type":"string", + "max":128, + "min":1 + }, + "Rule":{ + "type":"structure", + "required":[ + "RuleId", + "Predicates" + ], + "members":{ + "RuleId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"}, + "MetricName":{"shape":"MetricName"}, + "Predicates":{"shape":"Predicates"} + } + }, + "RulePriority":{"type":"integer"}, + "RuleSummaries":{ + "type":"list", + "member":{"shape":"RuleSummary"} + }, + "RuleSummary":{ + "type":"structure", + "required":[ + "RuleId", + "Name" + ], + "members":{ + "RuleId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"} + } + }, + "RuleUpdate":{ + "type":"structure", + "required":[ + "Action", + "Predicate" + ], + "members":{ + "Action":{"shape":"ChangeAction"}, + "Predicate":{"shape":"Predicate"} + } + }, + "RuleUpdates":{ + "type":"list", + "member":{"shape":"RuleUpdate"} + }, + "SampleWeight":{ + "type":"long", + "min":0 + }, + "SampledHTTPRequest":{ + "type":"structure", + "required":[ + "Request", + "Weight" + ], + "members":{ + "Request":{"shape":"HTTPRequest"}, + "Weight":{"shape":"SampleWeight"}, + "Timestamp":{"shape":"Timestamp"}, + "Action":{"shape":"Action"} + } + }, + "SampledHTTPRequests":{ + "type":"list", + "member":{"shape":"SampledHTTPRequest"} + }, + "Size":{ + "type":"long", + "min":0 + }, + "SizeConstraint":{ + "type":"structure", + "required":[ + "FieldToMatch", + "TextTransformation", + "ComparisonOperator", + "Size" + ], + "members":{ + "FieldToMatch":{"shape":"FieldToMatch"}, + "TextTransformation":{"shape":"TextTransformation"}, + "ComparisonOperator":{"shape":"ComparisonOperator"}, + "Size":{"shape":"Size"} + } + }, + "SizeConstraintSet":{ + "type":"structure", + "required":[ + "SizeConstraintSetId", + "SizeConstraints" + ], + "members":{ + "SizeConstraintSetId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"}, + "SizeConstraints":{"shape":"SizeConstraints"} + } + }, + "SizeConstraintSetSummaries":{ + "type":"list", + "member":{"shape":"SizeConstraintSetSummary"} + }, + "SizeConstraintSetSummary":{ + "type":"structure", + "required":[ + "SizeConstraintSetId", + "Name" + ], + "members":{ + "SizeConstraintSetId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"} + } + }, + "SizeConstraintSetUpdate":{ + "type":"structure", + "required":[ + "Action", + "SizeConstraint" + ], + "members":{ + "Action":{"shape":"ChangeAction"}, + "SizeConstraint":{"shape":"SizeConstraint"} + } + }, + "SizeConstraintSetUpdates":{ + "type":"list", + "member":{"shape":"SizeConstraintSetUpdate"} + }, + "SizeConstraints":{ + "type":"list", + "member":{"shape":"SizeConstraint"} + }, + "SqlInjectionMatchSet":{ + "type":"structure", + "required":[ + "SqlInjectionMatchSetId", + "SqlInjectionMatchTuples" + ], + "members":{ + "SqlInjectionMatchSetId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"}, + "SqlInjectionMatchTuples":{"shape":"SqlInjectionMatchTuples"} + } + }, + "SqlInjectionMatchSetSummaries":{ + "type":"list", + "member":{"shape":"SqlInjectionMatchSetSummary"} + }, + "SqlInjectionMatchSetSummary":{ + "type":"structure", + "required":[ + "SqlInjectionMatchSetId", + "Name" + ], + "members":{ + "SqlInjectionMatchSetId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"} + } + }, + "SqlInjectionMatchSetUpdate":{ + "type":"structure", + "required":[ + "Action", + "SqlInjectionMatchTuple" + ], + "members":{ + "Action":{"shape":"ChangeAction"}, + "SqlInjectionMatchTuple":{"shape":"SqlInjectionMatchTuple"} + } + }, + "SqlInjectionMatchSetUpdates":{ + "type":"list", + "member":{"shape":"SqlInjectionMatchSetUpdate"} + }, + "SqlInjectionMatchTuple":{ + "type":"structure", + "required":[ + "FieldToMatch", + "TextTransformation" + ], + "members":{ + "FieldToMatch":{"shape":"FieldToMatch"}, + "TextTransformation":{"shape":"TextTransformation"} + } + }, + "SqlInjectionMatchTuples":{ + "type":"list", + "member":{"shape":"SqlInjectionMatchTuple"} + }, + "TextTransformation":{ + "type":"string", + "enum":[ + "NONE", + "COMPRESS_WHITE_SPACE", + "HTML_ENTITY_DECODE", + "LOWERCASE", + "CMD_LINE", + "URL_DECODE" + ] + }, + "TimeWindow":{ + "type":"structure", + "required":[ + "StartTime", + "EndTime" + ], + "members":{ + "StartTime":{"shape":"Timestamp"}, + "EndTime":{"shape":"Timestamp"} + } + }, + "Timestamp":{"type":"timestamp"}, + "URIString":{"type":"string"}, + "UpdateByteMatchSetRequest":{ + "type":"structure", + "required":[ + "ByteMatchSetId", + "ChangeToken", + "Updates" + ], + "members":{ + "ByteMatchSetId":{"shape":"ResourceId"}, + "ChangeToken":{"shape":"ChangeToken"}, + "Updates":{"shape":"ByteMatchSetUpdates"} + } + }, + "UpdateByteMatchSetResponse":{ + "type":"structure", + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "UpdateIPSetRequest":{ + "type":"structure", + "required":[ + "IPSetId", + "ChangeToken", + "Updates" + ], + "members":{ + "IPSetId":{"shape":"ResourceId"}, + "ChangeToken":{"shape":"ChangeToken"}, + "Updates":{"shape":"IPSetUpdates"} + } + }, + "UpdateIPSetResponse":{ + "type":"structure", + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "UpdateRuleRequest":{ + "type":"structure", + "required":[ + "RuleId", + "ChangeToken", + "Updates" + ], + "members":{ + "RuleId":{"shape":"ResourceId"}, + "ChangeToken":{"shape":"ChangeToken"}, + "Updates":{"shape":"RuleUpdates"} + } + }, + "UpdateRuleResponse":{ + "type":"structure", + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "UpdateSizeConstraintSetRequest":{ + "type":"structure", + "required":[ + "SizeConstraintSetId", + "ChangeToken", + "Updates" + ], + "members":{ + "SizeConstraintSetId":{"shape":"ResourceId"}, + "ChangeToken":{"shape":"ChangeToken"}, + "Updates":{"shape":"SizeConstraintSetUpdates"} + } + }, + "UpdateSizeConstraintSetResponse":{ + "type":"structure", + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "UpdateSqlInjectionMatchSetRequest":{ + "type":"structure", + "required":[ + "SqlInjectionMatchSetId", + "ChangeToken", + "Updates" + ], + "members":{ + "SqlInjectionMatchSetId":{"shape":"ResourceId"}, + "ChangeToken":{"shape":"ChangeToken"}, + "Updates":{"shape":"SqlInjectionMatchSetUpdates"} + } + }, + "UpdateSqlInjectionMatchSetResponse":{ + "type":"structure", + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "UpdateWebACLRequest":{ + "type":"structure", + "required":[ + "WebACLId", + "ChangeToken" + ], + "members":{ + "WebACLId":{"shape":"ResourceId"}, + "ChangeToken":{"shape":"ChangeToken"}, + "Updates":{"shape":"WebACLUpdates"}, + "DefaultAction":{"shape":"WafAction"} + } + }, + "UpdateWebACLResponse":{ + "type":"structure", + "members":{ + "ChangeToken":{"shape":"ChangeToken"} + } + }, + "WAFDisallowedNameException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "WAFInternalErrorException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true, + "fault":true + }, + "WAFInvalidAccountException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "WAFInvalidOperationException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "WAFInvalidParameterException":{ + "type":"structure", + "members":{ + "field":{"shape":"ParameterExceptionField"}, + "parameter":{"shape":"ParameterExceptionParameter"}, + "reason":{"shape":"ParameterExceptionReason"} + }, + "exception":true + }, + "WAFLimitsExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "WAFNonEmptyEntityException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "WAFNonexistentContainerException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "WAFNonexistentItemException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "WAFReferencedItemException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "WAFStaleDataException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "exception":true + }, + "WafAction":{ + "type":"structure", + "required":["Type"], + "members":{ + "Type":{"shape":"WafActionType"} + } + }, + "WafActionType":{ + "type":"string", + "enum":[ + "BLOCK", + "ALLOW", + "COUNT" + ] + }, + "WebACL":{ + "type":"structure", + "required":[ + "WebACLId", + "DefaultAction", + "Rules" + ], + "members":{ + "WebACLId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"}, + "MetricName":{"shape":"MetricName"}, + "DefaultAction":{"shape":"WafAction"}, + "Rules":{"shape":"ActivatedRules"} + } + }, + "WebACLSummaries":{ + "type":"list", + "member":{"shape":"WebACLSummary"} + }, + "WebACLSummary":{ + "type":"structure", + "required":[ + "WebACLId", + "Name" + ], + "members":{ + "WebACLId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"} + } + }, + "WebACLUpdate":{ + "type":"structure", + "required":[ + "Action", + "ActivatedRule" + ], + "members":{ + "Action":{"shape":"ChangeAction"}, + "ActivatedRule":{"shape":"ActivatedRule"} + } + }, + "WebACLUpdates":{ + "type":"list", + "member":{"shape":"WebACLUpdate"} + }, + "errorMessage":{"type":"string"} + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/waf/2015-08-24/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/waf/2015-08-24/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/waf/2015-08-24/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/waf/2015-08-24/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1089 @@ +{ + "version": "2.0", + "service": "

    This is the AWS WAF API Reference. This guide is for developers who need detailed information about the AWS WAF API actions, data types, and errors. For detailed information about AWS WAF features and an overview of how to use the AWS WAF API, see the AWS WAF Developer Guide.

    ", + "operations": { + "CreateByteMatchSet": "

    Creates a ByteMatchSet. You then use UpdateByteMatchSet to identify the part of a web request that you want AWS WAF to inspect, such as the values of the User-Agent header or the query string. For example, you can create a ByteMatchSet that matches any requests with User-Agent headers that contain the string BadBot. You can then configure AWS WAF to reject those requests.

    To create and configure a ByteMatchSet, perform the following steps:

    1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateByteMatchSet request.
    2. Submit a CreateByteMatchSet request.
    3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateByteMatchSet request.
    4. Submit an UpdateByteMatchSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

    For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

    ", + "CreateIPSet": "

    Creates an IPSet, which you use to specify which web requests you want to allow or block based on the IP addresses that the requests originate from. For example, if you're receiving a lot of requests from one or more individual IP addresses or one or more ranges of IP addresses and you want to block the requests, you can create an IPSet that contains those IP addresses and then configure AWS WAF to block the requests.

    To create and configure an IPSet, perform the following steps:

    1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateIPSet request.
    2. Submit a CreateIPSet request.
    3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.
    4. Submit an UpdateIPSet request to specify the IP addresses that you want AWS WAF to watch for.

    For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

    ", + "CreateRule": "

    Creates a Rule, which contains the IPSet objects, ByteMatchSet objects, and other predicates that identify the requests that you want to block. If you add more than one predicate to a Rule, a request must match all of the specifications to be allowed or blocked. For example, suppose you add the following to a Rule:

    • An IPSet that matches the IP address 192.0.2.44/32
    • A ByteMatchSet that matches BadBot in the User-Agent header

    You then add the Rule to a WebACL and specify that you want to blocks requests that satisfy the Rule. For a request to be blocked, it must come from the IP address 192.0.2.44 and the User-Agent header in the request must contain the value BadBot.

    To create and configure a Rule, perform the following steps:

    1. Create and update the predicates that you want to include in the Rule. For more information, see CreateByteMatchSet, CreateIPSet, and CreateSqlInjectionMatchSet.
    2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateRule request.
    3. Submit a CreateRule request.
    4. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRule request.
    5. Submit an UpdateRule request to specify the predicates that you want to include in the Rule.
    6. Create and update a WebACL that contains the Rule. For more information, see CreateWebACL.

    For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

    ", + "CreateSizeConstraintSet": "

    Creates a SizeConstraintSet. You then use UpdateSizeConstraintSet to identify the part of a web request that you want AWS WAF to check for length, such as the length of the User-Agent header or the length of the query string. For example, you can create a SizeConstraintSet that matches any requests that have a query string that is longer than 100 bytes. You can then configure AWS WAF to reject those requests.

    To create and configure a SizeConstraintSet, perform the following steps:

    1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateSizeConstraintSet request.
    2. Submit a CreateSizeConstraintSet request.
    3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateSizeConstraintSet request.
    4. Submit an UpdateSizeConstraintSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

    For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

    ", + "CreateSqlInjectionMatchSet": "

    Creates a SqlInjectionMatchSet, which you use to allow, block, or count requests that contain snippets of SQL code in a specified part of web requests. AWS WAF searches for character sequences that are likely to be malicious strings.

    To create and configure a SqlInjectionMatchSet, perform the following steps:

    1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateSqlInjectionMatchSet request.
    2. Submit a CreateSqlInjectionMatchSet request.
    3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateSqlInjectionMatchSet request.
    4. Submit an UpdateSqlInjectionMatchSet request to specify the parts of web requests in which you want to allow, block, or count malicious SQL code.

    For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

    ", + "CreateWebACL": "

    Creates a WebACL, which contains the Rules that identify the CloudFront web requests that you want to allow, block, or count. AWS WAF evaluates Rules in order based on the value of Priority for each Rule.

    You also specify a default action, either ALLOW or BLOCK. If a web request doesn't match any of the Rules in a WebACL, AWS WAF responds to the request with the default action.

    To create and configure a WebACL, perform the following steps:

    1. Create and update the ByteMatchSet objects and other predicates that you want to include in Rules. For more information, see CreateByteMatchSet, UpdateByteMatchSet, CreateIPSet, UpdateIPSet, CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet.
    2. Create and update the Rules that you want to include in the WebACL. For more information, see CreateRule and UpdateRule.
    3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateWebACL request.
    4. Submit a CreateWebACL request.
    5. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateWebACL request.
    6. Submit an UpdateWebACL request to specify the Rules that you want to include in the WebACL, to specify the default action, and to associate the WebACL with a CloudFront distribution.

    For more information about how to use the AWS WAF API, see the AWS WAF Developer Guide.

    ", + "DeleteByteMatchSet": "

    Permanently deletes a ByteMatchSet. You can't delete a ByteMatchSet if it's still used in any Rules or if it still includes any ByteMatchTuple objects (any filters).

    If you just want to remove a ByteMatchSet from a Rule, use UpdateRule.

    To permanently delete a ByteMatchSet, perform the following steps:

    1. Update the ByteMatchSet to remove filters, if any. For more information, see UpdateByteMatchSet.
    2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteByteMatchSet request.
    3. Submit a DeleteByteMatchSet request.
    ", + "DeleteIPSet": "

    Permanently deletes an IPSet. You can't delete an IPSet if it's still used in any Rules or if it still includes any IP addresses.

    If you just want to remove an IPSet from a Rule, use UpdateRule.

    To permanently delete an IPSet from AWS WAF, perform the following steps:

    1. Update the IPSet to remove IP address ranges, if any. For more information, see UpdateIPSet.
    2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteIPSet request.
    3. Submit a DeleteIPSet request.
    ", + "DeleteRule": "

    Permanently deletes a Rule. You can't delete a Rule if it's still used in any WebACL objects or if it still includes any predicates, such as ByteMatchSet objects.

    If you just want to remove a Rule from a WebACL, use UpdateWebACL.

    To permanently delete a Rule from AWS WAF, perform the following steps:

    1. Update the Rule to remove predicates, if any. For more information, see UpdateRule.
    2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteRule request.
    3. Submit a DeleteRule request.
    ", + "DeleteSizeConstraintSet": "

    Permanently deletes a SizeConstraintSet. You can't delete a SizeConstraintSet if it's still used in any Rules or if it still includes any SizeConstraint objects (any filters).

    If you just want to remove a SizeConstraintSet from a Rule, use UpdateRule.

    To permanently delete a SizeConstraintSet, perform the following steps:

    1. Update the SizeConstraintSet to remove filters, if any. For more information, see UpdateSizeConstraintSet.
    2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteSizeConstraintSet request.
    3. Submit a DeleteSizeConstraintSet request.
    ", + "DeleteSqlInjectionMatchSet": "

    Permanently deletes a SqlInjectionMatchSet. You can't delete a SqlInjectionMatchSet if it's still used in any Rules or if it still contains any SqlInjectionMatchTuple objects.

    If you just want to remove a SqlInjectionMatchSet from a Rule, use UpdateRule.

    To permanently delete a SqlInjectionMatchSet from AWS WAF, perform the following steps:

    1. Update the SqlInjectionMatchSet to remove filters, if any. For more information, see UpdateSqlInjectionMatchSet.
    2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteSqlInjectionMatchSet request.
    3. Submit a DeleteSqlInjectionMatchSet request.
    ", + "DeleteWebACL": "

    Permanently deletes a WebACL. You can't delete a WebACL if it still contains any Rules.

    To delete a WebACL, perform the following steps:

    1. Update the WebACL to remove Rules, if any. For more information, see UpdateWebACL.
    2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteWebACL request.
    3. Submit a DeleteWebACL request.
    ", + "GetByteMatchSet": "

    Returns the ByteMatchSet specified by ByteMatchSetId.

    ", + "GetChangeToken": "

    When you want to create, update, or delete AWS WAF objects, get a change token and include the change token in the create, update, or delete request. Change tokens ensure that your application doesn't submit conflicting requests to AWS WAF.

    Each create, update, or delete request must use a unique change token. If your application submits a GetChangeToken request and then submits a second GetChangeToken request before submitting a create, update, or delete request, the second GetChangeToken request returns the same value as the first GetChangeToken request.

    When you use a change token in a create, update, or delete request, the status of the change token changes to PENDING, which indicates that AWS WAF is propagating the change to all AWS WAF servers. Use GetChangeTokenStatus to determine the status of your change token.

    ", + "GetChangeTokenStatus": "

    Returns the status of a ChangeToken that you got by calling GetChangeToken. ChangeTokenStatus is one of the following values:

    • PROVISIONED: You requested the change token by calling GetChangeToken, but you haven't used it yet in a call to create, update, or delete an AWS WAF object.
    • PENDING: AWS WAF is propagating the create, update, or delete request to all AWS WAF servers.
    • IN_SYNC: Propagation is complete.
    ", + "GetIPSet": "

    Returns the IPSet that is specified by IPSetId.

    ", + "GetRule": "

    Returns the Rule that is specified by the RuleId that you included in the GetRule request.

    ", + "GetSampledRequests": "

    Gets detailed information about a specified number of requests--a sample--that AWS WAF randomly selects from among the first 5,000 requests that your AWS resource received during a time range that you choose. You can specify a sample size of up to 100 requests, and you can specify any time range in the previous three hours.

    GetSampledRequests returns a time range, which is usually the time range that you specified. However, if your resource (such as a CloudFront distribution) received 5,000 requests before the specified time range elapsed, GetSampledRequests returns an updated time range. This new time range indicates the actual period during which AWS WAF selected the requests in the sample.

    ", + "GetSizeConstraintSet": "

    Returns the SizeConstraintSet specified by SizeConstraintSetId.

    ", + "GetSqlInjectionMatchSet": "

    Returns the SqlInjectionMatchSet that is specified by SqlInjectionMatchSetId.

    ", + "GetWebACL": "

    Returns the WebACL that is specified by WebACLId.

    ", + "ListByteMatchSets": "

    Returns an array of ByteMatchSetSummary objects.

    ", + "ListIPSets": "

    Returns an array of IPSetSummary objects in the response.

    ", + "ListRules": "

    Returns an array of RuleSummary objects.

    ", + "ListSizeConstraintSets": "

    Returns an array of SizeConstraintSetSummary objects.

    ", + "ListSqlInjectionMatchSets": "

    Returns an array of SqlInjectionMatchSet objects.

    ", + "ListWebACLs": "

    Returns an array of WebACLSummary objects in the response.

    ", + "UpdateByteMatchSet": "

    Inserts or deletes ByteMatchTuple objects (filters) in a ByteMatchSet. For each ByteMatchTuple object, you specify the following values:

    • Whether to insert or delete the object from the array. If you want to change a ByteMatchSetUpdate object, you delete the existing object and add a new one.
    • The part of a web request that you want AWS WAF to inspect, such as a query string or the value of the User-Agent header.
    • The bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to look for. For more information, including how you specify the values for the AWS WAF API and the AWS CLI or SDKs, see TargetString in the ByteMatchTuple data type.
    • Where to look, such as at the beginning or the end of a query string.
    • Whether to perform any conversions on the request, such as converting it to lowercase, before inspecting it for the specified string.

    For example, you can add a ByteMatchSetUpdate object that matches web requests in which User-Agent headers contain the string BadBot. You can then configure AWS WAF to block those requests.

    To create and configure a ByteMatchSet, perform the following steps:

    1. Create a ByteMatchSet. For more information, see CreateByteMatchSet.
    2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateByteMatchSet request.
    3. Submit an UpdateByteMatchSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

    For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

    ", + "UpdateIPSet": "

    Inserts or deletes IPSetDescriptor objects in an IPSet. For each IPSetDescriptor object, you specify the following values:

    • Whether to insert or delete the object from the array. If you want to change an IPSetDescriptor object, you delete the existing object and add a new one.
    • The IP address version, IPv4.
    • The IP address in CIDR notation, for example, 192.0.2.0/24 (for the range of IP addresses from 192.0.2.0 to 192.0.2.255) or 192.0.2.44/32 (for the individual IP address 192.0.2.44).

    AWS WAF supports /8, /16, /24, and /32 IP address ranges. For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

    You use an IPSet to specify which web requests you want to allow or block based on the IP addresses that the requests originated from. For example, if you're receiving a lot of requests from one or a small number of IP addresses and you want to block the requests, you can create an IPSet that specifies those IP addresses, and then configure AWS WAF to block the requests.

    To create and configure an IPSet, perform the following steps:

    1. Submit a CreateIPSet request.
    2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.
    3. Submit an UpdateIPSet request to specify the IP addresses that you want AWS WAF to watch for.

    When you update an IPSet, you specify the IP addresses that you want to add and/or the IP addresses that you want to delete. If you want to change an IP address, you delete the existing IP address and add the new one.

    For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

    ", + "UpdateRule": "

    Inserts or deletes Predicate objects in a Rule. Each Predicate object identifies a predicate, such as a ByteMatchSet or an IPSet, that specifies the web requests that you want to allow, block, or count. If you add more than one predicate to a Rule, a request must match all of the specifications to be allowed, blocked, or counted. For example, suppose you add the following to a Rule:

    • A ByteMatchSet that matches the value BadBot in the User-Agent header
    • An IPSet that matches the IP address 192.0.2.44

    You then add the Rule to a WebACL and specify that you want to block requests that satisfy the Rule. For a request to be blocked, the User-Agent header in the request must contain the value BadBot and the request must originate from the IP address 192.0.2.44.

    To create and configure a Rule, perform the following steps:

    1. Create and update the predicates that you want to include in the Rule.
    2. Create the Rule. See CreateRule.
    3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRule request.
    4. Submit an UpdateRule request to add predicates to the Rule.
    5. Create and update a WebACL that contains the Rule. See CreateWebACL.

    If you want to replace one ByteMatchSet or IPSet with another, you delete the existing one and add the new one.

    For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

    ", + "UpdateSizeConstraintSet": "

    Inserts or deletes SizeConstraint objects (filters) in a SizeConstraintSet. For each SizeConstraint object, you specify the following values:

    • Whether to insert or delete the object from the array. If you want to change a SizeConstraintSetUpdate object, you delete the existing object and add a new one.
    • The part of a web request that you want AWS WAF to evaluate, such as the length of a query string or the length of the User-Agent header.
    • Whether to perform any transformations on the request, such as converting it to lowercase, before checking its length. Note that transformations of the request body are not supported because the AWS resource forwards only the first 8192 bytes of your request to AWS WAF.
    • A ComparisonOperator used for evaluating the selected part of the request against the specified Size, such as equals, greater than, less than, and so on.
    • The length, in bytes, that you want AWS WAF to watch for in selected part of the request. The length is computed after applying the transformation.

    For example, you can add a SizeConstraintSetUpdate object that matches web requests in which the length of the User-Agent header is greater than 100 bytes. You can then configure AWS WAF to block those requests.

    To create and configure a SizeConstraintSet, perform the following steps:

    1. Create a SizeConstraintSet. For more information, see CreateSizeConstraintSet.
    2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateSizeConstraintSet request.
    3. Submit an UpdateSizeConstraintSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

    For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

    ", + "UpdateSqlInjectionMatchSet": "

    Inserts or deletes SqlInjectionMatchTuple objects (filters) in a SqlInjectionMatchSet. For each SqlInjectionMatchTuple object, you specify the following values:

    • Action: Whether to insert the object into or delete the object from the array. To change a SqlInjectionMatchTuple, you delete the existing object and add a new one.
    • FieldToMatch: The part of web requests that you want AWS WAF to inspect and, if you want AWS WAF to inspect a header, the name of the header.
    • TextTransformation: Which text transformation, if any, to perform on the web request before inspecting the request for snippets of malicious SQL code.

    You use SqlInjectionMatchSet objects to specify which CloudFront requests you want to allow, block, or count. For example, if you're receiving requests that contain snippets of SQL code in the query string and you want to block the requests, you can create a SqlInjectionMatchSet with the applicable settings, and then configure AWS WAF to block the requests.

    To create and configure a SqlInjectionMatchSet, perform the following steps:

    1. Submit a CreateSqlInjectionMatchSet request.
    2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.
    3. Submit an UpdateSqlInjectionMatchSet request to specify the parts of web requests that you want AWS WAF to inspect for snippets of SQL code.

    For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

    ", + "UpdateWebACL": "

    Inserts or deletes ActivatedRule objects in a WebACL. Each Rule identifies web requests that you want to allow, block, or count. When you update a WebACL, you specify the following values:

    • A default action for the WebACL, either ALLOW or BLOCK. AWS WAF performs the default action if a request doesn't match the criteria in any of the Rules in a WebACL.
    • The Rules that you want to add and/or delete. If you want to replace one Rule with another, you delete the existing Rule and add the new one.
    • For each Rule, whether you want AWS WAF to allow requests, block requests, or count requests that match the conditions in the Rule.
    • The order in which you want AWS WAF to evaluate the Rules in a WebACL. If you add more than one Rule to a WebACL, AWS WAF evaluates each request against the Rules in order based on the value of Priority. (The Rule that has the lowest value for Priority is evaluated first.) When a web request matches all of the predicates (such as ByteMatchSets and IPSets) in a Rule, AWS WAF immediately takes the corresponding action, allow or block, and doesn't evaluate the request against the remaining Rules in the WebACL, if any.
    • The CloudFront distribution that you want to associate with the WebACL.

    To create and configure a WebACL, perform the following steps:

    1. Create and update the predicates that you want to include in Rules. For more information, see CreateByteMatchSet, UpdateByteMatchSet, CreateIPSet, UpdateIPSet, CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet.
    2. Create and update the Rules that you want to include in the WebACL. For more information, see CreateRule and UpdateRule.
    3. Create a WebACL. See CreateWebACL.
    4. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateWebACL request.
    5. Submit an UpdateWebACL request to specify the Rules that you want to include in the WebACL, to specify the default action, and to associate the WebACL with a CloudFront distribution.

    For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

    " + }, + "shapes": { + "Action": { + "base": null, + "refs": { + "SampledHTTPRequest$Action": "

    The action for the Rule that the request matched: ALLOW, BLOCK, or COUNT.

    " + } + }, + "ActivatedRule": { + "base": "

    The ActivatedRule object in an UpdateWebACL request specifies a Rule that you want to insert or delete, the priority of the Rule in the WebACL, and the action that you want AWS WAF to take when a web request matches the Rule (ALLOW, BLOCK, or COUNT).

    To specify whether to insert or delete a Rule, use the Action parameter in the WebACLUpdate data type.

    ", + "refs": { + "ActivatedRules$member": null, + "WebACLUpdate$ActivatedRule": null + } + }, + "ActivatedRules": { + "base": null, + "refs": { + "WebACL$Rules": "

    An array that contains the action for each Rule in a WebACL, the priority of the Rule, and the ID of the Rule.

    " + } + }, + "ByteMatchSet": { + "base": "

    In a GetByteMatchSet request, ByteMatchSet is a complex type that contains the ByteMatchSetId and Name of a ByteMatchSet, and the values that you specified when you updated the ByteMatchSet.

    A complex type that contains ByteMatchTuple objects, which specify the parts of web requests that you want AWS WAF to inspect and the values that you want AWS WAF to search for. If a ByteMatchSet contains more than one ByteMatchTuple object, a request needs to match the settings in only one ByteMatchTuple to be considered a match.

    ", + "refs": { + "CreateByteMatchSetResponse$ByteMatchSet": "

    A ByteMatchSet that contains no ByteMatchTuple objects.

    ", + "GetByteMatchSetResponse$ByteMatchSet": "

    Information about the ByteMatchSet that you specified in the GetByteMatchSet request. For more information, see the following topics:

    • ByteMatchSet: Contains ByteMatchSetId, ByteMatchTuples, and Name
    • ByteMatchTuples: Contains an array of ByteMatchTuple objects. Each ByteMatchTuple object contains FieldToMatch, PositionalConstraint, TargetString, and TextTransformation
    • FieldToMatch: Contains Data and Type
    " + } + }, + "ByteMatchSetSummaries": { + "base": null, + "refs": { + "ListByteMatchSetsResponse$ByteMatchSets": "

    An array of ByteMatchSetSummary objects.

    " + } + }, + "ByteMatchSetSummary": { + "base": "

    Returned by ListByteMatchSets. Each ByteMatchSetSummary object includes the Name and ByteMatchSetId for one ByteMatchSet.

    ", + "refs": { + "ByteMatchSetSummaries$member": null + } + }, + "ByteMatchSetUpdate": { + "base": "

    In an UpdateByteMatchSet request, ByteMatchSetUpdate specifies whether to insert or delete a ByteMatchTuple and includes the settings for the ByteMatchTuple.

    ", + "refs": { + "ByteMatchSetUpdates$member": null + } + }, + "ByteMatchSetUpdates": { + "base": null, + "refs": { + "UpdateByteMatchSetRequest$Updates": "

    An array of ByteMatchSetUpdate objects that you want to insert into or delete from a ByteMatchSet. For more information, see the applicable data types:

    " + } + }, + "ByteMatchTargetString": { + "base": null, + "refs": { + "ByteMatchTuple$TargetString": "

    The value that you want AWS WAF to search for. AWS WAF searches for the specified string in the part of web requests that you specified in FieldToMatch. The maximum length of the value is 50 bytes.

    Valid values depend on the values that you specified for FieldToMatch:

    • HEADER: The value that you want AWS WAF to search for in the request header that you specified in FieldToMatch, for example, the value of the User-Agent or Referer header.
    • METHOD: The HTTP method, which indicates the type of operation specified in the request. CloudFront supports the following methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, and PUT.
    • QUERY_STRING: The value that you want AWS WAF to search for in the query string, which is the part of a URL that appears after a ? character.
    • URI: The value that you want AWS WAF to search for in the part of a URL that identifies a resource, for example, /images/daily-ad.jpg.
    • BODY: The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet.

    If TargetString includes alphabetic characters A-Z and a-z, note that the value is case sensitive.

    If you're using the AWS WAF API

    Specify a base64-encoded version of the value. The maximum length of the value before you base64-encode it is 50 bytes.

    For example, suppose the value of Type is HEADER and the value of Data is User-Agent. If you want to search the User-Agent header for the value BadBot, you base64-encode BadBot using MIME base64 encoding and include the resulting value, QmFkQm90, in the value of TargetString.

    If you're using the AWS CLI or one of the AWS SDKs

    The value that you want AWS WAF to search for. The SDK automatically base64 encodes the value.

    " + } + }, + "ByteMatchTuple": { + "base": "

    The bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to search for in web requests, the location in requests that you want AWS WAF to search, and other settings.

    ", + "refs": { + "ByteMatchSetUpdate$ByteMatchTuple": "

    Information about the part of a web request that you want AWS WAF to inspect and the value that you want AWS WAF to search for. If you specify DELETE for the value of Action, the ByteMatchTuple values must exactly match the values in the ByteMatchTuple that you want to delete from the ByteMatchSet.

    ", + "ByteMatchTuples$member": null + } + }, + "ByteMatchTuples": { + "base": null, + "refs": { + "ByteMatchSet$ByteMatchTuples": "

    Specifies the bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to search for in web requests, the location in requests that you want AWS WAF to search, and other settings.

    " + } + }, + "ChangeAction": { + "base": null, + "refs": { + "ByteMatchSetUpdate$Action": "

    Specifies whether to insert or delete a ByteMatchTuple.

    ", + "IPSetUpdate$Action": "

    Specifies whether to insert or delete an IP address with UpdateIPSet.

    ", + "RuleUpdate$Action": "

    Specify INSERT to add a Predicate to a Rule. Use DELETE to remove a Predicate from a Rule.

    ", + "SizeConstraintSetUpdate$Action": "

    Specify INSERT to add a SizeConstraintSetUpdate to a SizeConstraintSet. Use DELETE to remove a SizeConstraintSetUpdate from a SizeConstraintSet.

    ", + "SqlInjectionMatchSetUpdate$Action": "

    Specify INSERT to add a SqlInjectionMatchSetUpdate to a SqlInjectionMatchSet. Use DELETE to remove a SqlInjectionMatchSetUpdate from a SqlInjectionMatchSet.

    ", + "WebACLUpdate$Action": "

    Specifies whether to insert a Rule into or delete a Rule from a WebACL.

    " + } + }, + "ChangeToken": { + "base": null, + "refs": { + "CreateByteMatchSetRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "CreateByteMatchSetResponse$ChangeToken": "

    The ChangeToken that you used to submit the CreateByteMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "CreateIPSetRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "CreateIPSetResponse$ChangeToken": "

    The ChangeToken that you used to submit the CreateIPSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "CreateRuleRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "CreateRuleResponse$ChangeToken": "

    The ChangeToken that you used to submit the CreateRule request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "CreateSizeConstraintSetRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "CreateSizeConstraintSetResponse$ChangeToken": "

    The ChangeToken that you used to submit the CreateSizeConstraintSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "CreateSqlInjectionMatchSetRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "CreateSqlInjectionMatchSetResponse$ChangeToken": "

    The ChangeToken that you used to submit the CreateSqlInjectionMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "CreateWebACLRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "CreateWebACLResponse$ChangeToken": "

    The ChangeToken that you used to submit the CreateWebACL request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "DeleteByteMatchSetRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "DeleteByteMatchSetResponse$ChangeToken": "

    The ChangeToken that you used to submit the DeleteByteMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "DeleteIPSetRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "DeleteIPSetResponse$ChangeToken": "

    The ChangeToken that you used to submit the DeleteIPSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "DeleteRuleRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "DeleteRuleResponse$ChangeToken": "

    The ChangeToken that you used to submit the DeleteRule request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "DeleteSizeConstraintSetRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "DeleteSizeConstraintSetResponse$ChangeToken": "

    The ChangeToken that you used to submit the DeleteSizeConstraintSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "DeleteSqlInjectionMatchSetRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "DeleteSqlInjectionMatchSetResponse$ChangeToken": "

    The ChangeToken that you used to submit the DeleteSqlInjectionMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "DeleteWebACLRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "DeleteWebACLResponse$ChangeToken": "

    The ChangeToken that you used to submit the DeleteWebACL request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "GetChangeTokenResponse$ChangeToken": "

    The ChangeToken that you used in the request. Use this value in a GetChangeTokenStatus request to get the current status of the request.

    ", + "GetChangeTokenStatusRequest$ChangeToken": "

    The change token for which you want to get the status. This change token was previously returned in the GetChangeToken response.

    ", + "UpdateByteMatchSetRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "UpdateByteMatchSetResponse$ChangeToken": "

    The ChangeToken that you used to submit the UpdateByteMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "UpdateIPSetRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "UpdateIPSetResponse$ChangeToken": "

    The ChangeToken that you used to submit the UpdateIPSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "UpdateRuleRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "UpdateRuleResponse$ChangeToken": "

    The ChangeToken that you used to submit the UpdateRule request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "UpdateSizeConstraintSetRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "UpdateSizeConstraintSetResponse$ChangeToken": "

    The ChangeToken that you used to submit the UpdateSizeConstraintSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "UpdateSqlInjectionMatchSetRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "UpdateSqlInjectionMatchSetResponse$ChangeToken": "

    The ChangeToken that you used to submit the UpdateSqlInjectionMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    ", + "UpdateWebACLRequest$ChangeToken": "

    The value returned by the most recent call to GetChangeToken.

    ", + "UpdateWebACLResponse$ChangeToken": "

    The ChangeToken that you used to submit the UpdateWebACL request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

    " + } + }, + "ChangeTokenStatus": { + "base": null, + "refs": { + "GetChangeTokenStatusResponse$ChangeTokenStatus": "

    The status of the change token.

    " + } + }, + "ComparisonOperator": { + "base": null, + "refs": { + "SizeConstraint$ComparisonOperator": "

    The type of comparison you want AWS WAF to perform. AWS WAF uses this in combination with the provided Size and FieldToMatch to build an expression in the form of \"Size ComparisonOperator size in bytes of FieldToMatch\". If that expression is true, the SizeConstraint is considered to match.

    EQ: Used to test if the Size is equal to the size of the FieldToMatch

    NE: Used to test if the Size is not equal to the size of the FieldToMatch

    LE: Used to test if the Size is less than or equal to the size of the FieldToMatch

    LT: Used to test if the Size is strictly less than the size of the FieldToMatch

    GE: Used to test if the Size is greater than or equal to the size of the FieldToMatch

    GT: Used to test if the Size is strictly greater than the size of the FieldToMatch

    " + } + }, + "Country": { + "base": null, + "refs": { + "HTTPRequest$Country": "

    The two-letter country code for the country that the request originated from. For a current list of country codes, see the Wikipedia entry ISO 3166-1 alpha-2.

    " + } + }, + "CreateByteMatchSetRequest": { + "base": null, + "refs": { + } + }, + "CreateByteMatchSetResponse": { + "base": null, + "refs": { + } + }, + "CreateIPSetRequest": { + "base": null, + "refs": { + } + }, + "CreateIPSetResponse": { + "base": null, + "refs": { + } + }, + "CreateRuleRequest": { + "base": null, + "refs": { + } + }, + "CreateRuleResponse": { + "base": null, + "refs": { + } + }, + "CreateSizeConstraintSetRequest": { + "base": null, + "refs": { + } + }, + "CreateSizeConstraintSetResponse": { + "base": null, + "refs": { + } + }, + "CreateSqlInjectionMatchSetRequest": { + "base": "

    A request to create a SqlInjectionMatchSet.

    ", + "refs": { + } + }, + "CreateSqlInjectionMatchSetResponse": { + "base": "

    The response to a CreateSqlInjectionMatchSet request.

    ", + "refs": { + } + }, + "CreateWebACLRequest": { + "base": null, + "refs": { + } + }, + "CreateWebACLResponse": { + "base": null, + "refs": { + } + }, + "DeleteByteMatchSetRequest": { + "base": null, + "refs": { + } + }, + "DeleteByteMatchSetResponse": { + "base": null, + "refs": { + } + }, + "DeleteIPSetRequest": { + "base": null, + "refs": { + } + }, + "DeleteIPSetResponse": { + "base": null, + "refs": { + } + }, + "DeleteRuleRequest": { + "base": null, + "refs": { + } + }, + "DeleteRuleResponse": { + "base": null, + "refs": { + } + }, + "DeleteSizeConstraintSetRequest": { + "base": null, + "refs": { + } + }, + "DeleteSizeConstraintSetResponse": { + "base": null, + "refs": { + } + }, + "DeleteSqlInjectionMatchSetRequest": { + "base": "

    A request to delete a SqlInjectionMatchSet from AWS WAF.

    ", + "refs": { + } + }, + "DeleteSqlInjectionMatchSetResponse": { + "base": "

    The response to a request to delete a SqlInjectionMatchSet from AWS WAF.

    ", + "refs": { + } + }, + "DeleteWebACLRequest": { + "base": null, + "refs": { + } + }, + "DeleteWebACLResponse": { + "base": null, + "refs": { + } + }, + "FieldToMatch": { + "base": "

    Specifies where in a web request to look for TargetString.

    ", + "refs": { + "ByteMatchTuple$FieldToMatch": "

    The part of a web request that you want AWS WAF to search, such as a specified header or a query string. For more information, see FieldToMatch.

    ", + "SizeConstraint$FieldToMatch": null, + "SqlInjectionMatchTuple$FieldToMatch": null + } + }, + "GetByteMatchSetRequest": { + "base": null, + "refs": { + } + }, + "GetByteMatchSetResponse": { + "base": null, + "refs": { + } + }, + "GetChangeTokenRequest": { + "base": null, + "refs": { + } + }, + "GetChangeTokenResponse": { + "base": null, + "refs": { + } + }, + "GetChangeTokenStatusRequest": { + "base": null, + "refs": { + } + }, + "GetChangeTokenStatusResponse": { + "base": null, + "refs": { + } + }, + "GetIPSetRequest": { + "base": null, + "refs": { + } + }, + "GetIPSetResponse": { + "base": null, + "refs": { + } + }, + "GetRuleRequest": { + "base": null, + "refs": { + } + }, + "GetRuleResponse": { + "base": null, + "refs": { + } + }, + "GetSampledRequestsRequest": { + "base": null, + "refs": { + } + }, + "GetSampledRequestsResponse": { + "base": null, + "refs": { + } + }, + "GetSizeConstraintSetRequest": { + "base": null, + "refs": { + } + }, + "GetSizeConstraintSetResponse": { + "base": null, + "refs": { + } + }, + "GetSqlInjectionMatchSetRequest": { + "base": "

    A request to get a SqlInjectionMatchSet.

    ", + "refs": { + } + }, + "GetSqlInjectionMatchSetResponse": { + "base": "

    The response to a GetSqlInjectionMatchSet request.

    ", + "refs": { + } + }, + "GetWebACLRequest": { + "base": null, + "refs": { + } + }, + "GetWebACLResponse": { + "base": null, + "refs": { + } + }, + "HTTPHeader": { + "base": "

    The response from a GetSampledRequests request includes an HTTPHeader complex type that appears as Headers in the response syntax. HTTPHeader contains the names and values of all of the headers that appear in one of the web requests that were returned by GetSampledRequests.

    ", + "refs": { + "HTTPHeaders$member": null + } + }, + "HTTPHeaders": { + "base": null, + "refs": { + "HTTPRequest$Headers": "

    A complex type that contains two values for each header in the sampled web request: the name of the header and the value of the header.

    " + } + }, + "HTTPMethod": { + "base": null, + "refs": { + "HTTPRequest$Method": "

    The HTTP method specified in the sampled web request. CloudFront supports the following methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, and PUT.

    " + } + }, + "HTTPRequest": { + "base": "

    The response from a GetSampledRequests request includes an HTTPRequest complex type that appears as Request in the response syntax. HTTPRequest contains information about one of the web requests that were returned by GetSampledRequests.

    ", + "refs": { + "SampledHTTPRequest$Request": "

    A complex type that contains detailed information about the request.

    " + } + }, + "HTTPVersion": { + "base": null, + "refs": { + "HTTPRequest$HTTPVersion": "

    The HTTP version specified in the sampled web request, for example, HTTP/1.1.

    " + } + }, + "HeaderName": { + "base": null, + "refs": { + "HTTPHeader$Name": "

    The name of one of the headers in the sampled web request.

    " + } + }, + "HeaderValue": { + "base": null, + "refs": { + "HTTPHeader$Value": "

    The value of one of the headers in the sampled web request.

    " + } + }, + "IPSet": { + "base": "

    Contains one or more IP addresses or blocks of IP addresses specified in Classless Inter-Domain Routing (CIDR) notation. To specify an individual IP address, you specify the four-part IP address followed by a /32, for example, 192.0.2.0/31. To block a range of IP addresses, you can specify a /24, a /16, or a /8 CIDR. For more information about CIDR notation, perform an Internet search on cidr notation.

    ", + "refs": { + "CreateIPSetResponse$IPSet": "

    The IPSet returned in the CreateIPSet response.

    ", + "GetIPSetResponse$IPSet": "

    Information about the IPSet that you specified in the GetIPSet request. For more information, see the following topics:

    • IPSet: Contains IPSetDescriptors, IPSetId, and Name
    • IPSetDescriptors: Contains an array of IPSetDescriptor objects. Each IPSetDescriptor object contains Type and Value
    " + } + }, + "IPSetDescriptor": { + "base": "

    Specifies the IP address type (IPV4) and the IP address range (in CIDR format) that web requests originate from.

    ", + "refs": { + "IPSetDescriptors$member": null, + "IPSetUpdate$IPSetDescriptor": "

    The IP address type (IPV4) and the IP address range (in CIDR notation) that web requests originate from.

    " + } + }, + "IPSetDescriptorType": { + "base": null, + "refs": { + "IPSetDescriptor$Type": "

    Specify IPV4.

    " + } + }, + "IPSetDescriptorValue": { + "base": null, + "refs": { + "IPSetDescriptor$Value": "

    Specify an IPv4 address by using CIDR notation. For example:

    • To configure AWS WAF to allow, block, or count requests that originated from the IP address 192.0.2.44, specify 192.0.2.44/32.
    • To configure AWS WAF to allow, block, or count requests that originated from IP addresses from 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24.

    AWS WAF supports only /8, /16, /24, and /32 IP addresses.

    For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

    " + } + }, + "IPSetDescriptors": { + "base": null, + "refs": { + "IPSet$IPSetDescriptors": "

    The IP address type (IPV4) and the IP address range (in CIDR notation) that web requests originate from. If the WebACL is associated with a CloudFront distribution, this is the value of one of the following fields in CloudFront access logs:

    • c-ip, if the viewer did not use an HTTP proxy or a load balancer to send the request
    • x-forwarded-for, if the viewer did use an HTTP proxy or a load balancer to send the request
    " + } + }, + "IPSetSummaries": { + "base": null, + "refs": { + "ListIPSetsResponse$IPSets": "

    An array of IPSetSummary objects.

    " + } + }, + "IPSetSummary": { + "base": "

    Contains the identifier and the name of the IPSet.

    ", + "refs": { + "IPSetSummaries$member": null + } + }, + "IPSetUpdate": { + "base": "

    Specifies the type of update to perform to an IPSet with UpdateIPSet.

    ", + "refs": { + "IPSetUpdates$member": null + } + }, + "IPSetUpdates": { + "base": null, + "refs": { + "UpdateIPSetRequest$Updates": "

    An array of IPSetUpdate objects that you want to insert into or delete from an IPSet. For more information, see the applicable data types:

    " + } + }, + "IPString": { + "base": null, + "refs": { + "HTTPRequest$ClientIP": "

    The IP address that the request originated from. If the WebACL is associated with a CloudFront distribution, this is the value of one of the following fields in CloudFront access logs:

    • c-ip, if the viewer did not use an HTTP proxy or a load balancer to send the request
    • x-forwarded-for, if the viewer did use an HTTP proxy or a load balancer to send the request
    " + } + }, + "ListByteMatchSetsRequest": { + "base": null, + "refs": { + } + }, + "ListByteMatchSetsResponse": { + "base": null, + "refs": { + } + }, + "ListIPSetsRequest": { + "base": null, + "refs": { + } + }, + "ListIPSetsResponse": { + "base": null, + "refs": { + } + }, + "ListMaxItems": { + "base": null, + "refs": { + "GetSampledRequestsRequest$MaxItems": "

    The number of requests that you want AWS WAF to return from among the first 5,000 requests that your AWS resource received during the time range. If your resource received fewer requests than the value of MaxItems, GetSampledRequests returns information about all of them.

    " + } + }, + "ListRulesRequest": { + "base": null, + "refs": { + } + }, + "ListRulesResponse": { + "base": null, + "refs": { + } + }, + "ListSizeConstraintSetsRequest": { + "base": null, + "refs": { + } + }, + "ListSizeConstraintSetsResponse": { + "base": null, + "refs": { + } + }, + "ListSqlInjectionMatchSetsRequest": { + "base": "

    A request to list the SqlInjectionMatchSet objects created by the current AWS account.

    ", + "refs": { + } + }, + "ListSqlInjectionMatchSetsResponse": { + "base": "

    The response to a ListSqlInjectionMatchSets request.

    ", + "refs": { + } + }, + "ListWebACLsRequest": { + "base": null, + "refs": { + } + }, + "ListWebACLsResponse": { + "base": null, + "refs": { + } + }, + "MatchFieldData": { + "base": null, + "refs": { + "FieldToMatch$Data": "

    When the value of Type is HEADER, enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer. If the value of Type is any other value, omit Data.

    The name of the header is not case sensitive.

    " + } + }, + "MatchFieldType": { + "base": null, + "refs": { + "FieldToMatch$Type": "

    The part of the web request that you want AWS WAF to search for a specified string. Parts of a request that you can search include the following:

    • HEADER: A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data.
    • METHOD: The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, and PUT.
    • QUERY_STRING: A query string, which is the part of a URL that appears after a ? character, if any.
    • URI: The part of a web request that identifies a resource, for example, /images/daily-ad.jpg.
    • BODY: The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet.
    " + } + }, + "MetricName": { + "base": null, + "refs": { + "CreateRuleRequest$MetricName": "

    A friendly name or description for the metrics for this Rule. The name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain whitespace. You can't change the name of the metric after you create the Rule.

    ", + "CreateWebACLRequest$MetricName": "

    A friendly name or description for the metrics for this WebACL. The name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain whitespace. You can't change MetricName after you create the WebACL.

    ", + "Rule$MetricName": null, + "WebACL$MetricName": null + } + }, + "Negated": { + "base": null, + "refs": { + "Predicate$Negated": "

    Set Negated to False if you want AWS WAF to allow, block, or count requests based on the settings in the specified ByteMatchSet, IPSet, or SqlInjectionMatchSet. For example, if an IPSet includes the IP address 192.0.2.44, AWS WAF will allow or block requests based on that IP address.

    Set Negated to True if you want AWS WAF to allow or block a request based on the negation of the settings in the ByteMatchSet, IPSet, or SqlInjectionMatchSet. For example, if an IPSet includes the IP address 192.0.2.44, AWS WAF will allow, block, or count requests based on all IP addresses except 192.0.2.44.

    " + } + }, + "NextMarker": { + "base": null, + "refs": { + "ListByteMatchSetsRequest$NextMarker": "

    If you specify a value for Limit and you have more ByteMatchSets than the value of Limit, AWS WAF returns a NextMarker value in the response that allows you to list another group of ByteMatchSets. For the second and subsequent ListByteMatchSets requests, specify the value of NextMarker from the previous response to get information about another batch of ByteMatchSets.

    ", + "ListByteMatchSetsResponse$NextMarker": "

    If you have more ByteMatchSet objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more ByteMatchSet objects, submit another ListByteMatchSets request, and specify the NextMarker value from the response in the NextMarker value in the next request.

    ", + "ListIPSetsRequest$NextMarker": "

    If you specify a value for Limit and you have more IPSets than the value of Limit, AWS WAF returns a NextMarker value in the response that allows you to list another group of IPSets. For the second and subsequent ListIPSets requests, specify the value of NextMarker from the previous response to get information about another batch of ByteMatchSets.

    ", + "ListIPSetsResponse$NextMarker": "

    If you have more IPSet objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more IPSet objects, submit another ListIPSets request, and specify the NextMarker value from the response in the NextMarker value in the next request.

    ", + "ListRulesRequest$NextMarker": "

    If you specify a value for Limit and you have more Rules than the value of Limit, AWS WAF returns a NextMarker value in the response that allows you to list another group of Rules. For the second and subsequent ListRules requests, specify the value of NextMarker from the previous response to get information about another batch of Rules.

    ", + "ListRulesResponse$NextMarker": "

    If you have more Rules than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more Rules, submit another ListRules request, and specify the NextMarker value from the response in the NextMarker value in the next request.

    ", + "ListSizeConstraintSetsRequest$NextMarker": "

    If you specify a value for Limit and you have more SizeConstraintSets than the value of Limit, AWS WAF returns a NextMarker value in the response that allows you to list another group of SizeConstraintSets. For the second and subsequent ListSizeConstraintSets requests, specify the value of NextMarker from the previous response to get information about another batch of SizeConstraintSets.

    ", + "ListSizeConstraintSetsResponse$NextMarker": "

    If you have more SizeConstraintSet objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more SizeConstraintSet objects, submit another ListSizeConstraintSets request, and specify the NextMarker value from the response in the NextMarker value in the next request.

    ", + "ListSqlInjectionMatchSetsRequest$NextMarker": "

    If you specify a value for Limit and you have more SqlInjectionMatchSet objects than the value of Limit, AWS WAF returns a NextMarker value in the response that allows you to list another group of SqlInjectionMatchSets. For the second and subsequent ListSqlInjectionMatchSets requests, specify the value of NextMarker from the previous response to get information about another batch of SqlInjectionMatchSets.

    ", + "ListSqlInjectionMatchSetsResponse$NextMarker": "

    If you have more SqlInjectionMatchSet objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more SqlInjectionMatchSet objects, submit another ListSqlInjectionMatchSets request, and specify the NextMarker value from the response in the NextMarker value in the next request.

    ", + "ListWebACLsRequest$NextMarker": "

    If you specify a value for Limit and you have more WebACL objects than the number that you specify for Limit, AWS WAF returns a NextMarker value in the response that allows you to list another group of WebACL objects. For the second and subsequent ListWebACLs requests, specify the value of NextMarker from the previous response to get information about another batch of WebACL objects.

    ", + "ListWebACLsResponse$NextMarker": "

    If you have more WebACL objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more WebACL objects, submit another ListWebACLs request, and specify the NextMarker value from the response in the NextMarker value in the next request.

    " + } + }, + "PaginationLimit": { + "base": null, + "refs": { + "ListByteMatchSetsRequest$Limit": "

    Specifies the number of ByteMatchSet objects that you want AWS WAF to return for this request. If you have more ByteMatchSets objects than the number you specify for Limit, the response includes a NextMarker value that you can use to get another batch of ByteMatchSet objects.

    ", + "ListIPSetsRequest$Limit": "

    Specifies the number of IPSet objects that you want AWS WAF to return for this request. If you have more IPSet objects than the number you specify for Limit, the response includes a NextMarker value that you can use to get another batch of IPSet objects.

    ", + "ListRulesRequest$Limit": "

    Specifies the number of Rules that you want AWS WAF to return for this request. If you have more Rules than the number that you specify for Limit, the response includes a NextMarker value that you can use to get another batch of Rules.

    ", + "ListSizeConstraintSetsRequest$Limit": "

    Specifies the number of SizeConstraintSet objects that you want AWS WAF to return for this request. If you have more SizeConstraintSets objects than the number you specify for Limit, the response includes a NextMarker value that you can use to get another batch of SizeConstraintSet objects.

    ", + "ListSqlInjectionMatchSetsRequest$Limit": "

    Specifies the number of SqlInjectionMatchSet objects that you want AWS WAF to return for this request. If you have more SqlInjectionMatchSet objects than the number you specify for Limit, the response includes a NextMarker value that you can use to get another batch of Rules.

    ", + "ListWebACLsRequest$Limit": "

    Specifies the number of WebACL objects that you want AWS WAF to return for this request. If you have more WebACL objects than the number that you specify for Limit, the response includes a NextMarker value that you can use to get another batch of WebACL objects.

    " + } + }, + "ParameterExceptionField": { + "base": null, + "refs": { + "WAFInvalidParameterException$field": null + } + }, + "ParameterExceptionParameter": { + "base": null, + "refs": { + "WAFInvalidParameterException$parameter": null + } + }, + "ParameterExceptionReason": { + "base": null, + "refs": { + "WAFInvalidParameterException$reason": null + } + }, + "PopulationSize": { + "base": null, + "refs": { + "GetSampledRequestsResponse$PopulationSize": "

    The total number of requests from which GetSampledRequests got a sample of MaxItems requests. If PopulationSize is less than MaxItems, the sample includes every request that your AWS resource received during the specified time range.

    " + } + }, + "PositionalConstraint": { + "base": null, + "refs": { + "ByteMatchTuple$PositionalConstraint": "

    Within the portion of a web request that you want to search (for example, in the query string, if any), specify where you want AWS WAF to search. Valid values include the following:

    CONTAINS

    The specified part of the web request must include the value of TargetString, but the location doesn't matter.

    CONTAINS_WORD

    The specified part of the web request must include the value of TargetString, and TargetString must contain only alphanumeric characters or underscore (A-Z, a-z, 0-9, or _). In addition, TargetString must be a word, which means one of the following:

    • TargetString exactly matches the value of the specified part of the web request, such as the value of a header.
    • TargetString is at the beginning of the specified part of the web request and is followed by a character other than an alphanumeric character or underscore (_), for example, BadBot;.
    • TargetString is at the end of the specified part of the web request and is preceded by a character other than an alphanumeric character or underscore (_), for example, ;BadBot.
    • TargetString is in the middle of the specified part of the web request and is preceded and followed by characters other than alphanumeric characters or underscore (_), for example, -BadBot;.

    EXACTLY

    The value of the specified part of the web request must exactly match the value of TargetString.

    STARTS_WITH

    The value of TargetString must appear at the beginning of the specified part of the web request.

    ENDS_WITH

    The value of TargetString must appear at the end of the specified part of the web request.

    " + } + }, + "Predicate": { + "base": "

    Specifies the ByteMatchSet, IPSet, and SqlInjectionMatchSet objects that you want to add to a Rule and, for each object, indicates whether you want to negate the settings, for example, requests that do NOT originate from the IP address 192.0.2.44.

    ", + "refs": { + "Predicates$member": null, + "RuleUpdate$Predicate": "

    The ID of the Predicate (such as an IPSet) that you want to add to a Rule.

    " + } + }, + "PredicateType": { + "base": null, + "refs": { + "Predicate$Type": "

    The type of predicate in a Rule, such as ByteMatchSet or IPSet.

    " + } + }, + "Predicates": { + "base": null, + "refs": { + "Rule$Predicates": "

    The Predicates object contains one Predicate element for each ByteMatchSet, IPSet, or SqlInjectionMatchSet object that you want to include in a Rule.

    " + } + }, + "ResourceId": { + "base": null, + "refs": { + "ActivatedRule$RuleId": "

    The RuleId for a Rule. You use RuleId to get more information about a Rule (see GetRule), update a Rule (see UpdateRule), insert a Rule into a WebACL or delete a one from a WebACL (see UpdateWebACL), or delete a Rule from AWS WAF (see DeleteRule).

    RuleId is returned by CreateRule and by ListRules.

    ", + "ByteMatchSet$ByteMatchSetId": "

    The ByteMatchSetId for a ByteMatchSet. You use ByteMatchSetId to get information about a ByteMatchSet (see GetByteMatchSet), update a ByteMatchSet (see UpdateByteMatchSet, insert a ByteMatchSet into a Rule or delete one from a Rule (see UpdateRule), and delete a ByteMatchSet from AWS WAF (see DeleteByteMatchSet).

    ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets.

    ", + "ByteMatchSetSummary$ByteMatchSetId": "

    The ByteMatchSetId for a ByteMatchSet. You use ByteMatchSetId to get information about a ByteMatchSet, update a ByteMatchSet, remove a ByteMatchSet from a Rule, and delete a ByteMatchSet from AWS WAF.

    ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets.

    ", + "DeleteByteMatchSetRequest$ByteMatchSetId": "

    The ByteMatchSetId of the ByteMatchSet that you want to delete. ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets.

    ", + "DeleteIPSetRequest$IPSetId": "

    The IPSetId of the IPSet that you want to delete. IPSetId is returned by CreateIPSet and by ListIPSets.

    ", + "DeleteRuleRequest$RuleId": "

    The RuleId of the Rule that you want to delete. RuleId is returned by CreateRule and by ListRules.

    ", + "DeleteSizeConstraintSetRequest$SizeConstraintSetId": "

    The SizeConstraintSetId of the SizeConstraintSet that you want to delete. SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets.

    ", + "DeleteSqlInjectionMatchSetRequest$SqlInjectionMatchSetId": "

    The SqlInjectionMatchSetId of the SqlInjectionMatchSet that you want to delete. SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets.

    ", + "DeleteWebACLRequest$WebACLId": "

    The WebACLId of the WebACL that you want to delete. WebACLId is returned by CreateWebACL and by ListWebACLs.

    ", + "GetByteMatchSetRequest$ByteMatchSetId": "

    The ByteMatchSetId of the ByteMatchSet that you want to get. ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets.

    ", + "GetIPSetRequest$IPSetId": "

    The IPSetId of the IPSet that you want to get. IPSetId is returned by CreateIPSet and by ListIPSets.

    ", + "GetRuleRequest$RuleId": "

    The RuleId of the Rule that you want to get. RuleId is returned by CreateRule and by ListRules.

    ", + "GetSampledRequestsRequest$WebAclId": "

    The WebACLId of the WebACL for which you want GetSampledRequests to return a sample of requests.

    ", + "GetSampledRequestsRequest$RuleId": "

    RuleId is one of two values:

    • The RuleId of the Rule for which you want GetSampledRequests to return a sample of requests.
    • Default_Action, which causes GetSampledRequests to return a sample of the requests that didn't match any of the rules in the specified WebACL.
    ", + "GetSizeConstraintSetRequest$SizeConstraintSetId": "

    The SizeConstraintSetId of the SizeConstraintSet that you want to get. SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets.

    ", + "GetSqlInjectionMatchSetRequest$SqlInjectionMatchSetId": "

    The SqlInjectionMatchSetId of the SqlInjectionMatchSet that you want to get. SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets.

    ", + "GetWebACLRequest$WebACLId": "

    The WebACLId of the WebACL that you want to get. WebACLId is returned by CreateWebACL and by ListWebACLs.

    ", + "IPSet$IPSetId": "

    The IPSetId for an IPSet. You use IPSetId to get information about an IPSet (see GetIPSet), update an IPSet (see UpdateIPSet), insert an IPSet into a Rule or delete one from a Rule (see UpdateRule), and delete an IPSet from AWS WAF (see DeleteIPSet).

    IPSetId is returned by CreateIPSet and by ListIPSets.

    ", + "IPSetSummary$IPSetId": "

    The IPSetId for an IPSet. You can use IPSetId in a GetIPSet request to get detailed information about an IPSet.

    ", + "Predicate$DataId": "

    A unique identifier for a predicate in a Rule, such as ByteMatchSetId or IPSetId. The ID is returned by the corresponding Create or List command.

    ", + "Rule$RuleId": "

    A unique identifier for a Rule. You use RuleId to get more information about a Rule (see GetRule), update a Rule (see UpdateRule), insert a Rule into a WebACL or delete a one from a WebACL (see UpdateWebACL), or delete a Rule from AWS WAF (see DeleteRule).

    RuleId is returned by CreateRule and by ListRules.

    ", + "RuleSummary$RuleId": "

    A unique identifier for a Rule. You use RuleId to get more information about a Rule (see GetRule), update a Rule (see UpdateRule), insert a Rule into a WebACL or delete one from a WebACL (see UpdateWebACL), or delete a Rule from AWS WAF (see DeleteRule).

    RuleId is returned by CreateRule and by ListRules.

    ", + "SizeConstraintSet$SizeConstraintSetId": "

    A unique identifier for a SizeConstraintSet. You use SizeConstraintSetId to get information about a SizeConstraintSet (see GetSizeConstraintSet), update a SizeConstraintSet (see UpdateSizeConstraintSet, insert a SizeConstraintSet into a Rule or delete one from a Rule (see UpdateRule), and delete a SizeConstraintSet from AWS WAF (see DeleteSizeConstraintSet).

    SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets.

    ", + "SizeConstraintSetSummary$SizeConstraintSetId": "

    A unique identifier for a SizeConstraintSet. You use SizeConstraintSetId to get information about a SizeConstraintSet (see GetSizeConstraintSet), update a SizeConstraintSet (see UpdateSizeConstraintSet, insert a SizeConstraintSet into a Rule or delete one from a Rule (see UpdateRule), and delete a SizeConstraintSet from AWS WAF (see DeleteSizeConstraintSet).

    SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets.

    ", + "SqlInjectionMatchSet$SqlInjectionMatchSetId": "

    A unique identifier for a SqlInjectionMatchSet. You use SqlInjectionMatchSetId to get information about a SqlInjectionMatchSet (see GetSqlInjectionMatchSet), update a SqlInjectionMatchSet (see UpdateSqlInjectionMatchSet, insert a SqlInjectionMatchSet into a Rule or delete one from a Rule (see UpdateRule), and delete a SqlInjectionMatchSet from AWS WAF (see DeleteSqlInjectionMatchSet).

    SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets.

    ", + "SqlInjectionMatchSetSummary$SqlInjectionMatchSetId": "

    A unique identifier for a SqlInjectionMatchSet. You use SqlInjectionMatchSetId to get information about a SqlInjectionMatchSet (see GetSqlInjectionMatchSet), update a SqlInjectionMatchSet (see UpdateSqlInjectionMatchSet, insert a SqlInjectionMatchSet into a Rule or delete one from a Rule (see UpdateRule), and delete a SqlInjectionMatchSet from AWS WAF (see DeleteSqlInjectionMatchSet).

    SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets.

    ", + "UpdateByteMatchSetRequest$ByteMatchSetId": "

    The ByteMatchSetId of the ByteMatchSet that you want to update. ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets.

    ", + "UpdateIPSetRequest$IPSetId": "

    The IPSetId of the IPSet that you want to update. IPSetId is returned by CreateIPSet and by ListIPSets.

    ", + "UpdateRuleRequest$RuleId": "

    The RuleId of the Rule that you want to update. RuleId is returned by CreateRule and by ListRules.

    ", + "UpdateSizeConstraintSetRequest$SizeConstraintSetId": "

    The SizeConstraintSetId of the SizeConstraintSet that you want to update. SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets.

    ", + "UpdateSqlInjectionMatchSetRequest$SqlInjectionMatchSetId": "

    The SqlInjectionMatchSetId of the SqlInjectionMatchSet that you want to update. SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets.

    ", + "UpdateWebACLRequest$WebACLId": "

    The WebACLId of the WebACL that you want to update. WebACLId is returned by CreateWebACL and by ListWebACLs.

    ", + "WebACL$WebACLId": "

    A unique identifier for a WebACL. You use WebACLId to get information about a WebACL (see GetWebACL), update a WebACL (see UpdateWebACL, and delete a WebACL from AWS WAF (see DeleteWebACL).

    WebACLId is returned by CreateWebACL and by ListWebACLs.

    ", + "WebACLSummary$WebACLId": "

    A unique identifier for a WebACL. You use WebACLId to get information about a WebACL (see GetWebACL), update a WebACL (see UpdateWebACL, and delete a WebACL from AWS WAF (see DeleteWebACL).

    WebACLId is returned by CreateWebACL and by ListWebACLs.

    " + } + }, + "ResourceName": { + "base": null, + "refs": { + "ByteMatchSet$Name": "

    A friendly name or description of the ByteMatchSet. You can't change Name after you create a ByteMatchSet.

    ", + "ByteMatchSetSummary$Name": "

    A friendly name or description of the ByteMatchSet. You can't change Name after you create a ByteMatchSet.

    ", + "CreateByteMatchSetRequest$Name": "

    A friendly name or description of the ByteMatchSet. You can't change Name after you create a ByteMatchSet.

    ", + "CreateIPSetRequest$Name": "

    A friendly name or description of the IPSet. You can't change Name after you create the IPSet.

    ", + "CreateRuleRequest$Name": "

    A friendly name or description of the Rule. You can't change the name of a Rule after you create it.

    ", + "CreateSizeConstraintSetRequest$Name": "

    A friendly name or description of the SizeConstraintSet. You can't change Name after you create a SizeConstraintSet.

    ", + "CreateSqlInjectionMatchSetRequest$Name": "

    A friendly name or description for the SqlInjectionMatchSet that you're creating. You can't change Name after you create the SqlInjectionMatchSet.

    ", + "CreateWebACLRequest$Name": "

    A friendly name or description of the WebACL. You can't change Name after you create the WebACL.

    ", + "IPSet$Name": "

    A friendly name or description of the IPSet. You can't change the name of an IPSet after you create it.

    ", + "IPSetSummary$Name": "

    A friendly name or description of the IPSet. You can't change the name of an IPSet after you create it.

    ", + "Rule$Name": "

    The friendly name or description for the Rule. You can't change the name of a Rule after you create it.

    ", + "RuleSummary$Name": "

    A friendly name or description of the Rule. You can't change the name of a Rule after you create it.

    ", + "SizeConstraintSet$Name": "

    The name, if any, of the SizeConstraintSet.

    ", + "SizeConstraintSetSummary$Name": "

    The name of the SizeConstraintSet, if any.

    ", + "SqlInjectionMatchSet$Name": "

    The name, if any, of the SqlInjectionMatchSet.

    ", + "SqlInjectionMatchSetSummary$Name": "

    The name of the SqlInjectionMatchSet, if any, specified by Id.

    ", + "WebACL$Name": "

    A friendly name or description of the WebACL. You can't change the name of a WebACL after you create it.

    ", + "WebACLSummary$Name": "

    A friendly name or description of the WebACL. You can't change the name of a WebACL after you create it.

    " + } + }, + "Rule": { + "base": "

    A combination of ByteMatchSet, IPSet, and/or SqlInjectionMatchSet objects that identify the web requests that you want to allow, block, or count. For example, you might create a Rule that includes the following predicates:

    • An IPSet that causes AWS WAF to search for web requests that originate from the IP address 192.0.2.44
    • A ByteMatchSet that causes AWS WAF to search for web requests for which the value of the User-Agent header is BadBot.

    To match the settings in this Rule, a request must originate from 192.0.2.44 AND include a User-Agent header for which the value is BadBot.

    ", + "refs": { + "CreateRuleResponse$Rule": "

    The Rule returned in the CreateRule response.

    ", + "GetRuleResponse$Rule": "

    Information about the Rule that you specified in the GetRule request. For more information, see the following topics:

    • Rule: Contains MetricName, Name, an array of Predicate objects, and RuleId
    • Predicate: Each Predicate object contains DataId, Negated, and Type
    " + } + }, + "RulePriority": { + "base": null, + "refs": { + "ActivatedRule$Priority": "

    Specifies the order in which the Rules in a WebACL are evaluated. Rules with a lower value for Priority are evaluated before Rules with a higher value. The value must be a unique integer. If you add multiple Rules to a WebACL, the values don't need to be consecutive.

    " + } + }, + "RuleSummaries": { + "base": null, + "refs": { + "ListRulesResponse$Rules": "

    An array of RuleSummary objects.

    " + } + }, + "RuleSummary": { + "base": "

    Contains the identifier and the friendly name or description of the Rule.

    ", + "refs": { + "RuleSummaries$member": null + } + }, + "RuleUpdate": { + "base": "

    Specifies a Predicate (such as an IPSet) and indicates whether you want to add it to a Rule or delete it from a Rule.

    ", + "refs": { + "RuleUpdates$member": null + } + }, + "RuleUpdates": { + "base": null, + "refs": { + "UpdateRuleRequest$Updates": "

    An array of RuleUpdate objects that you want to insert into or delete from a Rule. For more information, see the applicable data types:

    " + } + }, + "SampleWeight": { + "base": null, + "refs": { + "SampledHTTPRequest$Weight": "

    A value that indicates how one result in the response relates proportionally to other results in the response. A result that has a weight of 2 represents roughly twice as many CloudFront web requests as a result that has a weight of 1.

    " + } + }, + "SampledHTTPRequest": { + "base": "

    The response from a GetSampledRequests request includes a SampledHTTPRequests complex type that appears as SampledRequests in the response syntax. SampledHTTPRequests contains one SampledHTTPRequest object for each web request that is returned by GetSampledRequests.

    ", + "refs": { + "SampledHTTPRequests$member": null + } + }, + "SampledHTTPRequests": { + "base": null, + "refs": { + "GetSampledRequestsResponse$SampledRequests": "

    A complex type that contains detailed information about each of the requests in the sample.

    " + } + }, + "Size": { + "base": null, + "refs": { + "SizeConstraint$Size": "

    The size in bytes that you want AWS WAF to compare against the size of the specified FieldToMatch. AWS WAF uses this in combination with ComparisonOperator and FieldToMatch to build an expression in the form of \"Size ComparisonOperator size in bytes of FieldToMatch\". If that expression is true, the SizeConstraint is considered to match.

    Valid values for size are 0 - 21474836480 bytes (0 - 20 GB).

    If you specify URI for the value of Type, the / in the URI counts as one character. For example, the URI /logo.jpg is nine characters long.

    " + } + }, + "SizeConstraint": { + "base": "

    Specifies a constraint on the size of a part of the web request. AWS WAF uses the Size, ComparisonOperator, and FieldToMatch to build an expression in the form of \"Size ComparisonOperator size in bytes of FieldToMatch\". If that expression is true, the SizeConstraint is considered to match.

    ", + "refs": { + "SizeConstraintSetUpdate$SizeConstraint": "

    Specifies a constraint on the size of a part of the web request. AWS WAF uses the Size, ComparisonOperator, and FieldToMatch to build an expression in the form of \"Size ComparisonOperator size in bytes of FieldToMatch\". If that expression is true, the SizeConstraint is considered to match.

    ", + "SizeConstraints$member": null + } + }, + "SizeConstraintSet": { + "base": "

    A complex type that contains SizeConstraint objects, which specify the parts of web requests that you want AWS WAF to inspect the size of. If a SizeConstraintSet contains more than one SizeConstraint object, a request only needs to match one constraint to be considered a match.

    ", + "refs": { + "CreateSizeConstraintSetResponse$SizeConstraintSet": "

    A SizeConstraintSet that contains no SizeConstraint objects.

    ", + "GetSizeConstraintSetResponse$SizeConstraintSet": "

    Information about the SizeConstraintSet that you specified in the GetSizeConstraintSet request. For more information, see the following topics:

    " + } + }, + "SizeConstraintSetSummaries": { + "base": null, + "refs": { + "ListSizeConstraintSetsResponse$SizeConstraintSets": "

    An array of SizeConstraintSetSummary objects.

    " + } + }, + "SizeConstraintSetSummary": { + "base": "

    The Id and Name of a SizeConstraintSet.

    ", + "refs": { + "SizeConstraintSetSummaries$member": null + } + }, + "SizeConstraintSetUpdate": { + "base": "

    Specifies the part of a web request that you want to inspect the size of and indicates whether you want to add the specification to a SizeConstraintSet or delete it from a SizeConstraintSet.

    ", + "refs": { + "SizeConstraintSetUpdates$member": null + } + }, + "SizeConstraintSetUpdates": { + "base": null, + "refs": { + "UpdateSizeConstraintSetRequest$Updates": "

    An array of SizeConstraintSetUpdate objects that you want to insert into or delete from a SizeConstraintSet. For more information, see the applicable data types:

    " + } + }, + "SizeConstraints": { + "base": null, + "refs": { + "SizeConstraintSet$SizeConstraints": "

    Specifies the parts of web requests that you want to inspect the size of.

    " + } + }, + "SqlInjectionMatchSet": { + "base": "

    A complex type that contains SqlInjectionMatchTuple objects, which specify the parts of web requests that you want AWS WAF to inspect for snippets of malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header. If a SqlInjectionMatchSet contains more than one SqlInjectionMatchTuple object, a request needs to include snippets of SQL code in only one of the specified parts of the request to be considered a match.

    ", + "refs": { + "CreateSqlInjectionMatchSetResponse$SqlInjectionMatchSet": "

    A SqlInjectionMatchSet.

    ", + "GetSqlInjectionMatchSetResponse$SqlInjectionMatchSet": "

    Information about the SqlInjectionMatchSet that you specified in the GetSqlInjectionMatchSet request. For more information, see the following topics:

    " + } + }, + "SqlInjectionMatchSetSummaries": { + "base": null, + "refs": { + "ListSqlInjectionMatchSetsResponse$SqlInjectionMatchSets": "

    An array of SqlInjectionMatchSetSummary objects.

    " + } + }, + "SqlInjectionMatchSetSummary": { + "base": "

    The Id and Name of a SqlInjectionMatchSet.

    ", + "refs": { + "SqlInjectionMatchSetSummaries$member": null + } + }, + "SqlInjectionMatchSetUpdate": { + "base": "

    Specifies the part of a web request that you want to inspect for snippets of malicious SQL code and indicates whether you want to add the specification to a SqlInjectionMatchSet or delete it from a SqlInjectionMatchSet.

    ", + "refs": { + "SqlInjectionMatchSetUpdates$member": null + } + }, + "SqlInjectionMatchSetUpdates": { + "base": null, + "refs": { + "UpdateSqlInjectionMatchSetRequest$Updates": "

    An array of SqlInjectionMatchSetUpdate objects that you want to insert into or delete from a SqlInjectionMatchSet. For more information, see the applicable data types:

    " + } + }, + "SqlInjectionMatchTuple": { + "base": "

    Specifies the part of a web request that you want AWS WAF to inspect for snippets of malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header.

    ", + "refs": { + "SqlInjectionMatchSetUpdate$SqlInjectionMatchTuple": "

    Specifies the part of a web request that you want AWS WAF to inspect for snippets of malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header.

    ", + "SqlInjectionMatchTuples$member": null + } + }, + "SqlInjectionMatchTuples": { + "base": null, + "refs": { + "SqlInjectionMatchSet$SqlInjectionMatchTuples": "

    Specifies the parts of web requests that you want to inspect for snippets of malicious SQL code.

    " + } + }, + "TextTransformation": { + "base": null, + "refs": { + "ByteMatchTuple$TextTransformation": "

    Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on TargetString before inspecting a request for a match.

    CMD_LINE

    When you're concerned that attackers are injecting an operating system commandline command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

    • Delete the following characters: \\ \" ' ^
    • Delete spaces before the following characters: / (
    • Replace the following characters with a space: , ;
    • Replace multiple spaces with one space
    • Convert uppercase letters (A-Z) to lowercase (a-z)

    COMPRESS_WHITE_SPACE

    Use this option to replace the following characters with a space character (decimal 32):

    • \\f, formfeed, decimal 12
    • \\t, tab, decimal 9
    • \\n, newline, decimal 10
    • \\r, carriage return, decimal 13
    • \\v, vertical tab, decimal 11
    • non-breaking space, decimal 160

    COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

    HTML_ENTITY_DECODE

    Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

    • Replaces (ampersand)quot; with \"
    • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
    • Replaces (ampersand)lt; with a \"less than\" symbol
    • Replaces (ampersand)gt; with >
    • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters
    • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

    LOWERCASE

    Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

    URL_DECODE

    Use this option to decode a URL-encoded value.

    NONE

    Specify NONE if you don't want to perform any text transformations.

    ", + "SizeConstraint$TextTransformation": "

    Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting a request for a match.

    Note that if you choose BODY for the value of Type, you must choose NONE for TextTransformation because CloudFront forwards only the first 8192 bytes for inspection.

    NONE

    Specify NONE if you don't want to perform any text transformations.

    CMD_LINE

    When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

    • Delete the following characters: \\ \" ' ^
    • Delete spaces before the following characters: / (
    • Replace the following characters with a space: , ;
    • Replace multiple spaces with one space
    • Convert uppercase letters (A-Z) to lowercase (a-z)

    COMPRESS_WHITE_SPACE

    Use this option to replace the following characters with a space character (decimal 32):

    • \\f, formfeed, decimal 12
    • \\t, tab, decimal 9
    • \\n, newline, decimal 10
    • \\r, carriage return, decimal 13
    • \\v, vertical tab, decimal 11
    • non-breaking space, decimal 160

    COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

    HTML_ENTITY_DECODE

    Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

    • Replaces (ampersand)quot; with \"
    • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
    • Replaces (ampersand)lt; with a \"less than\" symbol
    • Replaces (ampersand)gt; with >
    • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters
    • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

    LOWERCASE

    Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

    URL_DECODE

    Use this option to decode a URL-encoded value.

    ", + "SqlInjectionMatchTuple$TextTransformation": "

    Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting a request for a match.

    CMD_LINE

    When you're concerned that attackers are injecting an operating system commandline command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

    • Delete the following characters: \\ \" ' ^
    • Delete spaces before the following characters: / (
    • Replace the following characters with a space: , ;
    • Replace multiple spaces with one space
    • Convert uppercase letters (A-Z) to lowercase (a-z)

    COMPRESS_WHITE_SPACE

    Use this option to replace the following characters with a space character (decimal 32):

    • \\f, formfeed, decimal 12
    • \\t, tab, decimal 9
    • \\n, newline, decimal 10
    • \\r, carriage return, decimal 13
    • \\v, vertical tab, decimal 11
    • non-breaking space, decimal 160

    COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

    HTML_ENTITY_DECODE

    Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

    • Replaces (ampersand)quot; with \"
    • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
    • Replaces (ampersand)lt; with a \"less than\" symbol
    • Replaces (ampersand)gt; with >
    • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters
    • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

    LOWERCASE

    Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

    URL_DECODE

    Use this option to decode a URL-encoded value.

    NONE

    Specify NONE if you don't want to perform any text transformations.

    " + } + }, + "TimeWindow": { + "base": "

    In a GetSampledRequests request, the StartTime and EndTime objects specify the time range for which you want AWS WAF to return a sample of web requests.

    In a GetSampledRequests response, the StartTime and EndTime objects specify the time range for which AWS WAF actually returned a sample of web requests. AWS WAF gets the specified number of requests from among the first 5,000 requests that your AWS resource receives during the specified time period. If your resource receives more than 5,000 requests during that period, AWS WAF stops sampling after the 5,000th request. In that case, EndTime is the time that AWS WAF received the 5,000th request.

    ", + "refs": { + "GetSampledRequestsRequest$TimeWindow": "

    The start date and time and the end date and time of the range for which you want GetSampledRequests to return a sample of requests. Specify the date and time in Unix time format (in seconds). You can specify any time range in the previous three hours.

    ", + "GetSampledRequestsResponse$TimeWindow": "

    Usually, TimeWindow is the time range that you specified in the GetSampledRequests request. However, if your AWS resource received more than 5,000 requests during the time range that you specified in the request, GetSampledRequests returns the time range for the first 5,000 requests.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "SampledHTTPRequest$Timestamp": "

    The time at which AWS WAF received the request from your AWS resource, in Unix time format (in seconds).

    ", + "TimeWindow$StartTime": "

    The beginning of the time range from which you want GetSampledRequests to return a sample of the requests that your AWS resource received. You can specify any time range in the previous three hours.

    ", + "TimeWindow$EndTime": "

    The end of the time range from which you want GetSampledRequests to return a sample of the requests that your AWS resource received. You can specify any time range in the previous three hours.

    " + } + }, + "URIString": { + "base": null, + "refs": { + "HTTPRequest$URI": "

    The part of a web request that identifies the resource, for example, /images/daily-ad.jpg.

    " + } + }, + "UpdateByteMatchSetRequest": { + "base": null, + "refs": { + } + }, + "UpdateByteMatchSetResponse": { + "base": null, + "refs": { + } + }, + "UpdateIPSetRequest": { + "base": null, + "refs": { + } + }, + "UpdateIPSetResponse": { + "base": null, + "refs": { + } + }, + "UpdateRuleRequest": { + "base": null, + "refs": { + } + }, + "UpdateRuleResponse": { + "base": null, + "refs": { + } + }, + "UpdateSizeConstraintSetRequest": { + "base": null, + "refs": { + } + }, + "UpdateSizeConstraintSetResponse": { + "base": null, + "refs": { + } + }, + "UpdateSqlInjectionMatchSetRequest": { + "base": "

    A request to update a SqlInjectionMatchSet.

    ", + "refs": { + } + }, + "UpdateSqlInjectionMatchSetResponse": { + "base": "

    The response to an UpdateSqlInjectionMatchSets request.

    ", + "refs": { + } + }, + "UpdateWebACLRequest": { + "base": null, + "refs": { + } + }, + "UpdateWebACLResponse": { + "base": null, + "refs": { + } + }, + "WAFDisallowedNameException": { + "base": "

    The name specified is invalid.

    ", + "refs": { + } + }, + "WAFInternalErrorException": { + "base": "

    The operation failed because of a system problem, even though the request was valid. Retry your request.

    ", + "refs": { + } + }, + "WAFInvalidAccountException": { + "base": "

    The operation failed because you tried to create, update, or delete an object by using an invalid account identifier.

    ", + "refs": { + } + }, + "WAFInvalidOperationException": { + "base": "

    The operation failed because there was nothing to do. For example:

    • You tried to remove a Rule from a WebACL, but the Rule isn't in the specified WebACL.
    • You tried to remove an IP address from an IPSet, but the IP address isn't in the specified IPSet.
    • You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple isn't in the specified WebACL.
    • You tried to add a Rule to a WebACL, but the Rule already exists in the specified WebACL.
    • You tried to add an IP address to an IPSet, but the IP address already exists in the specified IPSet.
    • You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple already exists in the specified WebACL.
    ", + "refs": { + } + }, + "WAFInvalidParameterException": { + "base": "

    The operation failed because AWS WAF didn't recognize a parameter in the request. For example:

    • You specified an invalid parameter name.
    • You specified an invalid value.
    • You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) using an action other than INSERT or DELETE.
    • You tried to create a WebACL with a DefaultAction Type other than ALLOW, BLOCK, or COUNT.
    • You tried to update a WebACL with a WafAction Type other than ALLOW, BLOCK, or COUNT.
    • You tried to update a ByteMatchSet with a FieldToMatch Type other than HEADER, QUERY_STRING, or URI.
    • You tried to update a ByteMatchSet with a Field of HEADER but no value for Data.
    ", + "refs": { + } + }, + "WAFLimitsExceededException": { + "base": "

    The operation exceeds a resource limit, for example, the maximum number of WebACL objects that you can create for an AWS account. For more information, see Limits in the AWS WAF Developer Guide.

    ", + "refs": { + } + }, + "WAFNonEmptyEntityException": { + "base": "

    The operation failed because you tried to delete an object that isn't empty. For example:

    • You tried to delete a WebACL that still contains one or more Rule objects.
    • You tried to delete a Rule that still contains one or more ByteMatchSet objects or other predicates.
    • You tried to delete a ByteMatchSet that contains one or more ByteMatchTuple objects.
    • You tried to delete an IPSet that references one or more IP addresses.
    ", + "refs": { + } + }, + "WAFNonexistentContainerException": { + "base": "

    The operation failed because you tried to add an object to or delete an object from another object that doesn't exist. For example:

    • You tried to add a Rule to or delete a Rule from a WebACL that doesn't exist.
    • You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule that doesn't exist.
    • You tried to add an IP address to or delete an IP address from an IPSet that doesn't exist.
    • You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from a ByteMatchSet that doesn't exist.
    ", + "refs": { + } + }, + "WAFNonexistentItemException": { + "base": "

    The operation failed because the referenced object doesn't exist.

    ", + "refs": { + } + }, + "WAFReferencedItemException": { + "base": "

    The operation failed because you tried to delete an object that is still in use. For example:

    • You tried to delete a ByteMatchSet that is still referenced by a Rule.
    • You tried to delete a Rule that is still referenced by a WebACL.

    ", + "refs": { + } + }, + "WAFStaleDataException": { + "base": "

    The operation failed because you tried to create, update, or delete an object by using a change token that has already been used.

    ", + "refs": { + } + }, + "WafAction": { + "base": "

    For the action that is associated with a rule in a WebACL, specifies the action that you want AWS WAF to perform when a web request matches all of the conditions in a rule. For the default action in a WebACL, specifies the action that you want AWS WAF to take when a web request doesn't match all of the conditions in any of the rules in a WebACL.

    ", + "refs": { + "ActivatedRule$Action": "

    Specifies the action that CloudFront or AWS WAF takes when a web request matches the conditions in the Rule. Valid values for Action include the following:

    • ALLOW: CloudFront responds with the requested object.
    • BLOCK: CloudFront responds with an HTTP 403 (Forbidden) status code.
    • COUNT: AWS WAF increments a counter of requests that match the conditions in the rule and then continues to inspect the web request based on the remaining rules in the web ACL.
    ", + "CreateWebACLRequest$DefaultAction": "

    The action that you want AWS WAF to take when a request doesn't match the criteria specified in any of the Rule objects that are associated with the WebACL.

    ", + "UpdateWebACLRequest$DefaultAction": null, + "WebACL$DefaultAction": "

    The action to perform if none of the Rules contained in the WebACL match. The action is specified by the WafAction object.

    " + } + }, + "WafActionType": { + "base": null, + "refs": { + "WafAction$Type": "

    Specifies how you want AWS WAF to respond to requests that match the settings in a Rule. Valid settings include the following:

    • ALLOW: AWS WAF allows requests
    • BLOCK: AWS WAF blocks requests
    • COUNT: AWS WAF increments a counter of the requests that match all of the conditions in the rule. AWS WAF then continues to inspect the web request based on the remaining rules in the web ACL. You can't specify COUNT for the default action for a WebACL.
    " + } + }, + "WebACL": { + "base": "

    Contains the Rules that identify the requests that you want to allow, block, or count. In a WebACL, you also specify a default action (ALLOW or BLOCK), and the action for each Rule that you add to a WebACL, for example, block requests from specified IP addresses or block requests from specified referrers. You also associate the WebACL with a CloudFront distribution to identify the requests that you want AWS WAF to filter. If you add more than one Rule to a WebACL, a request needs to match only one of the specifications to be allowed, blocked, or counted. For more information, see UpdateWebACL.

    ", + "refs": { + "CreateWebACLResponse$WebACL": "

    The WebACL returned in the CreateWebACL response.

    ", + "GetWebACLResponse$WebACL": "

    Information about the WebACL that you specified in the GetWebACL request. For more information, see the following topics:

    • WebACL: Contains DefaultAction, MetricName, Name, an array of Rule objects, and WebACLId
    • DefaultAction (Data type is WafAction): Contains Type
    • Rules: Contains an array of ActivatedRule objects, which contain Action, Priority, and RuleId
    • Action: Contains Type
    " + } + }, + "WebACLSummaries": { + "base": null, + "refs": { + "ListWebACLsResponse$WebACLs": "

    An array of WebACLSummary objects.

    " + } + }, + "WebACLSummary": { + "base": "

    Contains the identifier and the name or description of the WebACL.

    ", + "refs": { + "WebACLSummaries$member": null + } + }, + "WebACLUpdate": { + "base": "

    Specifies whether to insert a Rule into or delete a Rule from a WebACL.

    ", + "refs": { + "WebACLUpdates$member": null + } + }, + "WebACLUpdates": { + "base": null, + "refs": { + "UpdateWebACLRequest$Updates": "

    An array of updates to make to the WebACL.

    An array of WebACLUpdate objects that you want to insert into or delete from a WebACL. For more information, see the applicable data types:

    " + } + }, + "errorMessage": { + "base": null, + "refs": { + "WAFDisallowedNameException$message": null, + "WAFInternalErrorException$message": null, + "WAFInvalidOperationException$message": null, + "WAFLimitsExceededException$message": null, + "WAFNonEmptyEntityException$message": null, + "WAFNonexistentContainerException$message": null, + "WAFNonexistentItemException$message": null, + "WAFReferencedItemException$message": null, + "WAFStaleDataException$message": null + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/waf/2015-08-24/examples-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/waf/2015-08-24/examples-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/waf/2015-08-24/examples-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/waf/2015-08-24/examples-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/workspaces/2015-04-08/api-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/workspaces/2015-04-08/api-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/workspaces/2015-04-08/api-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/workspaces/2015-04-08/api-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,528 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-04-08", + "endpointPrefix":"workspaces", + "jsonVersion":"1.1", + "serviceFullName":"Amazon WorkSpaces", + "signatureVersion":"v4", + "targetPrefix":"WorkspacesService", + "protocol":"json" + }, + "operations":{ + "CreateWorkspaces":{ + "name":"CreateWorkspaces", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateWorkspacesRequest"}, + "output":{"shape":"CreateWorkspacesResult"}, + "errors":[ + { + "shape":"ResourceLimitExceededException", + "exception":true + } + ] + }, + "DescribeWorkspaceBundles":{ + "name":"DescribeWorkspaceBundles", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeWorkspaceBundlesRequest"}, + "output":{"shape":"DescribeWorkspaceBundlesResult"}, + "errors":[ + { + "shape":"InvalidParameterValuesException", + "exception":true + } + ] + }, + "DescribeWorkspaceDirectories":{ + "name":"DescribeWorkspaceDirectories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeWorkspaceDirectoriesRequest"}, + "output":{"shape":"DescribeWorkspaceDirectoriesResult"}, + "errors":[ + { + "shape":"InvalidParameterValuesException", + "exception":true + } + ] + }, + "DescribeWorkspaces":{ + "name":"DescribeWorkspaces", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeWorkspacesRequest"}, + "output":{"shape":"DescribeWorkspacesResult"}, + "errors":[ + { + "shape":"InvalidParameterValuesException", + "exception":true + }, + { + "shape":"ResourceUnavailableException", + "exception":true + } + ] + }, + "RebootWorkspaces":{ + "name":"RebootWorkspaces", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootWorkspacesRequest"}, + "output":{"shape":"RebootWorkspacesResult"} + }, + "RebuildWorkspaces":{ + "name":"RebuildWorkspaces", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebuildWorkspacesRequest"}, + "output":{"shape":"RebuildWorkspacesResult"} + }, + "TerminateWorkspaces":{ + "name":"TerminateWorkspaces", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TerminateWorkspacesRequest"}, + "output":{"shape":"TerminateWorkspacesResult"} + } + }, + "shapes":{ + "ARN":{ + "type":"string", + "pattern":"^arn:aws:[A-Za-z0-9][A-za-z0-9_/.-]{0,62}:[A-za-z0-9_/.-]{0,63}:[A-za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.-]{0,127}$" + }, + "Alias":{"type":"string"}, + "BooleanObject":{"type":"boolean"}, + "BundleId":{ + "type":"string", + "pattern":"^wsb-[0-9a-z]{8,63}$" + }, + "BundleIdList":{ + "type":"list", + "member":{"shape":"BundleId"}, + "min":1, + "max":25 + }, + "BundleList":{ + "type":"list", + "member":{"shape":"WorkspaceBundle"} + }, + "BundleOwner":{"type":"string"}, + "Compute":{ + "type":"string", + "enum":[ + "VALUE", + "STANDARD", + "PERFORMANCE" + ] + }, + "ComputeType":{ + "type":"structure", + "members":{ + "Name":{"shape":"Compute"} + } + }, + "ComputerName":{"type":"string"}, + "CreateWorkspacesRequest":{ + "type":"structure", + "required":["Workspaces"], + "members":{ + "Workspaces":{"shape":"WorkspaceRequestList"} + } + }, + "CreateWorkspacesResult":{ + "type":"structure", + "members":{ + "FailedRequests":{"shape":"FailedCreateWorkspaceRequests"}, + "PendingRequests":{"shape":"WorkspaceList"} + } + }, + "DefaultOu":{"type":"string"}, + "DefaultWorkspaceCreationProperties":{ + "type":"structure", + "members":{ + "EnableWorkDocs":{"shape":"BooleanObject"}, + "EnableInternetAccess":{"shape":"BooleanObject"}, + "DefaultOu":{"shape":"DefaultOu"}, + "CustomSecurityGroupId":{"shape":"SecurityGroupId"}, + "UserEnabledAsLocalAdministrator":{"shape":"BooleanObject"} + } + }, + "DescribeWorkspaceBundlesRequest":{ + "type":"structure", + "members":{ + "BundleIds":{"shape":"BundleIdList"}, + "Owner":{"shape":"BundleOwner"}, + "NextToken":{"shape":"PaginationToken"} + } + }, + "DescribeWorkspaceBundlesResult":{ + "type":"structure", + "members":{ + "Bundles":{"shape":"BundleList"}, + "NextToken":{"shape":"PaginationToken"} + } + }, + "DescribeWorkspaceDirectoriesRequest":{ + "type":"structure", + "members":{ + "DirectoryIds":{"shape":"DirectoryIdList"}, + "NextToken":{"shape":"PaginationToken"} + } + }, + "DescribeWorkspaceDirectoriesResult":{ + "type":"structure", + "members":{ + "Directories":{"shape":"DirectoryList"}, + "NextToken":{"shape":"PaginationToken"} + } + }, + "DescribeWorkspacesRequest":{ + "type":"structure", + "members":{ + "WorkspaceIds":{"shape":"WorkspaceIdList"}, + "DirectoryId":{"shape":"DirectoryId"}, + "UserName":{"shape":"UserName"}, + "BundleId":{"shape":"BundleId"}, + "Limit":{"shape":"Limit"}, + "NextToken":{"shape":"PaginationToken"} + } + }, + "DescribeWorkspacesResult":{ + "type":"structure", + "members":{ + "Workspaces":{"shape":"WorkspaceList"}, + "NextToken":{"shape":"PaginationToken"} + } + }, + "Description":{"type":"string"}, + "DirectoryId":{ + "type":"string", + "pattern":"^d-[0-9a-f]{8,63}$" + }, + "DirectoryIdList":{ + "type":"list", + "member":{"shape":"DirectoryId"}, + "min":1, + "max":25 + }, + "DirectoryList":{ + "type":"list", + "member":{"shape":"WorkspaceDirectory"} + }, + "DirectoryName":{"type":"string"}, + "DnsIpAddresses":{ + "type":"list", + "member":{"shape":"IpAddress"} + }, + "ErrorType":{"type":"string"}, + "ExceptionMessage":{"type":"string"}, + "FailedCreateWorkspaceRequest":{ + "type":"structure", + "members":{ + "WorkspaceRequest":{"shape":"WorkspaceRequest"}, + "ErrorCode":{"shape":"ErrorType"}, + "ErrorMessage":{"shape":"Description"} + } + }, + "FailedCreateWorkspaceRequests":{ + "type":"list", + "member":{"shape":"FailedCreateWorkspaceRequest"} + }, + "FailedRebootWorkspaceRequests":{ + "type":"list", + "member":{"shape":"FailedWorkspaceChangeRequest"} + }, + "FailedRebuildWorkspaceRequests":{ + "type":"list", + "member":{"shape":"FailedWorkspaceChangeRequest"} + }, + "FailedTerminateWorkspaceRequests":{ + "type":"list", + "member":{"shape":"FailedWorkspaceChangeRequest"} + }, + "FailedWorkspaceChangeRequest":{ + "type":"structure", + "members":{ + "WorkspaceId":{"shape":"WorkspaceId"}, + "ErrorCode":{"shape":"ErrorType"}, + "ErrorMessage":{"shape":"Description"} + } + }, + "InvalidParameterValuesException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "IpAddress":{"type":"string"}, + "Limit":{ + "type":"integer", + "min":1, + "max":25 + }, + "NonEmptyString":{ + "type":"string", + "min":1 + }, + "PaginationToken":{ + "type":"string", + "min":1, + "max":63 + }, + "RebootRequest":{ + "type":"structure", + "required":["WorkspaceId"], + "members":{ + "WorkspaceId":{"shape":"WorkspaceId"} + } + }, + "RebootWorkspaceRequests":{ + "type":"list", + "member":{"shape":"RebootRequest"}, + "min":1, + "max":25 + }, + "RebootWorkspacesRequest":{ + "type":"structure", + "required":["RebootWorkspaceRequests"], + "members":{ + "RebootWorkspaceRequests":{"shape":"RebootWorkspaceRequests"} + } + }, + "RebootWorkspacesResult":{ + "type":"structure", + "members":{ + "FailedRequests":{"shape":"FailedRebootWorkspaceRequests"} + } + }, + "RebuildRequest":{ + "type":"structure", + "required":["WorkspaceId"], + "members":{ + "WorkspaceId":{"shape":"WorkspaceId"} + } + }, + "RebuildWorkspaceRequests":{ + "type":"list", + "member":{"shape":"RebuildRequest"}, + "min":1, + "max":1 + }, + "RebuildWorkspacesRequest":{ + "type":"structure", + "required":["RebuildWorkspaceRequests"], + "members":{ + "RebuildWorkspaceRequests":{"shape":"RebuildWorkspaceRequests"} + } + }, + "RebuildWorkspacesResult":{ + "type":"structure", + "members":{ + "FailedRequests":{"shape":"FailedRebuildWorkspaceRequests"} + } + }, + "RegistrationCode":{ + "type":"string", + "min":1, + "max":20 + }, + "ResourceLimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "ResourceUnavailableException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"}, + "ResourceId":{"shape":"NonEmptyString"} + }, + "exception":true + }, + "SecurityGroupId":{ + "type":"string", + "pattern":"^(sg-[0-9a-f]{8})$" + }, + "SubnetId":{ + "type":"string", + "pattern":"^(subnet-[0-9a-f]{8})$" + }, + "SubnetIds":{ + "type":"list", + "member":{"shape":"SubnetId"} + }, + "TerminateRequest":{ + "type":"structure", + "required":["WorkspaceId"], + "members":{ + "WorkspaceId":{"shape":"WorkspaceId"} + } + }, + "TerminateWorkspaceRequests":{ + "type":"list", + "member":{"shape":"TerminateRequest"}, + "min":1, + "max":25 + }, + "TerminateWorkspacesRequest":{ + "type":"structure", + "required":["TerminateWorkspaceRequests"], + "members":{ + "TerminateWorkspaceRequests":{"shape":"TerminateWorkspaceRequests"} + } + }, + "TerminateWorkspacesResult":{ + "type":"structure", + "members":{ + "FailedRequests":{"shape":"FailedTerminateWorkspaceRequests"} + } + }, + "UserName":{ + "type":"string", + "min":1, + "max":63 + }, + "UserStorage":{ + "type":"structure", + "members":{ + "Capacity":{"shape":"NonEmptyString"} + } + }, + "VolumeEncryptionKey":{"type":"string"}, + "Workspace":{ + "type":"structure", + "members":{ + "WorkspaceId":{"shape":"WorkspaceId"}, + "DirectoryId":{"shape":"DirectoryId"}, + "UserName":{"shape":"UserName"}, + "IpAddress":{"shape":"IpAddress"}, + "State":{"shape":"WorkspaceState"}, + "BundleId":{"shape":"BundleId"}, + "SubnetId":{"shape":"SubnetId"}, + "ErrorMessage":{"shape":"Description"}, + "ErrorCode":{"shape":"WorkspaceErrorCode"}, + "ComputerName":{"shape":"ComputerName"}, + "VolumeEncryptionKey":{"shape":"VolumeEncryptionKey"}, + "UserVolumeEncryptionEnabled":{"shape":"BooleanObject"}, + "RootVolumeEncryptionEnabled":{"shape":"BooleanObject"} + } + }, + "WorkspaceBundle":{ + "type":"structure", + "members":{ + "BundleId":{"shape":"BundleId"}, + "Name":{"shape":"NonEmptyString"}, + "Owner":{"shape":"BundleOwner"}, + "Description":{"shape":"Description"}, + "UserStorage":{"shape":"UserStorage"}, + "ComputeType":{"shape":"ComputeType"} + } + }, + "WorkspaceDirectory":{ + "type":"structure", + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "Alias":{"shape":"Alias"}, + "DirectoryName":{"shape":"DirectoryName"}, + "RegistrationCode":{"shape":"RegistrationCode"}, + "SubnetIds":{"shape":"SubnetIds"}, + "DnsIpAddresses":{"shape":"DnsIpAddresses"}, + "CustomerUserName":{"shape":"UserName"}, + "IamRoleId":{"shape":"ARN"}, + "DirectoryType":{"shape":"WorkspaceDirectoryType"}, + "WorkspaceSecurityGroupId":{"shape":"SecurityGroupId"}, + "State":{"shape":"WorkspaceDirectoryState"}, + "WorkspaceCreationProperties":{"shape":"DefaultWorkspaceCreationProperties"} + } + }, + "WorkspaceDirectoryState":{ + "type":"string", + "enum":[ + "REGISTERING", + "REGISTERED", + "DEREGISTERING", + "DEREGISTERED", + "ERROR" + ] + }, + "WorkspaceDirectoryType":{ + "type":"string", + "enum":[ + "SIMPLE_AD", + "AD_CONNECTOR" + ] + }, + "WorkspaceErrorCode":{"type":"string"}, + "WorkspaceId":{ + "type":"string", + "pattern":"^ws-[0-9a-z]{8,63}$" + }, + "WorkspaceIdList":{ + "type":"list", + "member":{"shape":"WorkspaceId"}, + "min":1, + "max":25 + }, + "WorkspaceList":{ + "type":"list", + "member":{"shape":"Workspace"} + }, + "WorkspaceRequest":{ + "type":"structure", + "required":[ + "DirectoryId", + "UserName", + "BundleId" + ], + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "UserName":{"shape":"UserName"}, + "BundleId":{"shape":"BundleId"}, + "VolumeEncryptionKey":{"shape":"VolumeEncryptionKey"}, + "UserVolumeEncryptionEnabled":{"shape":"BooleanObject"}, + "RootVolumeEncryptionEnabled":{"shape":"BooleanObject"} + } + }, + "WorkspaceRequestList":{ + "type":"list", + "member":{"shape":"WorkspaceRequest"}, + "min":1, + "max":25 + }, + "WorkspaceState":{ + "type":"string", + "enum":[ + "PENDING", + "AVAILABLE", + "IMPAIRED", + "UNHEALTHY", + "REBOOTING", + "REBUILDING", + "TERMINATING", + "TERMINATED", + "SUSPENDED", + "ERROR" + ] + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/workspaces/2015-04-08/docs-2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/workspaces/2015-04-08/docs-2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/workspaces/2015-04-08/docs-2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/workspaces/2015-04-08/docs-2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,474 @@ +{ + "version": "2.0", + "operations": { + "CreateWorkspaces": "

    Creates one or more WorkSpaces.

    This operation is asynchronous and returns before the WorkSpaces are created.

    ", + "DescribeWorkspaceBundles": "

    Obtains information about the WorkSpace bundles that are available to your account in the specified region.

    You can filter the results with either the BundleIds parameter, or the Owner parameter, but not both.

    This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the NextToken response member contains a token that you pass in the next call to this operation to retrieve the next set of items.

    ", + "DescribeWorkspaceDirectories": "

    Retrieves information about the AWS Directory Service directories in the region that are registered with Amazon WorkSpaces and are available to your account.

    This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the NextToken response member contains a token that you pass in the next call to this operation to retrieve the next set of items.

    ", + "DescribeWorkspaces": "

    Obtains information about the specified WorkSpaces.

    Only one of the filter parameters, such as BundleId, DirectoryId, or WorkspaceIds, can be specified at a time.

    This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the NextToken response member contains a token that you pass in the next call to this operation to retrieve the next set of items.

    ", + "RebootWorkspaces": "

    Reboots the specified WorkSpaces.

    To be able to reboot a WorkSpace, the WorkSpace must have a State of AVAILABLE, IMPAIRED, or INOPERABLE.

    This operation is asynchronous and will return before the WorkSpaces have rebooted.

    ", + "RebuildWorkspaces": "

    Rebuilds the specified WorkSpaces.

    Rebuilding a WorkSpace is a potentially destructive action that can result in the loss of data. Rebuilding a WorkSpace causes the following to occur:

    • The system is restored to the image of the bundle that the WorkSpace is created from. Any applications that have been installed, or system settings that have been made since the WorkSpace was created will be lost.
    • The data drive (D drive) is re-created from the last automatic snapshot taken of the data drive. The current contents of the data drive are overwritten. Automatic snapshots of the data drive are taken every 12 hours, so the snapshot can be as much as 12 hours old.

    To be able to rebuild a WorkSpace, the WorkSpace must have a State of AVAILABLE or ERROR.

    This operation is asynchronous and will return before the WorkSpaces have been completely rebuilt.

    ", + "TerminateWorkspaces": "

    Terminates the specified WorkSpaces.

    Terminating a WorkSpace is a permanent action and cannot be undone. The user's data is not maintained and will be destroyed. If you need to archive any user data, contact Amazon Web Services before terminating the WorkSpace.

    You can terminate a WorkSpace that is in any state except SUSPENDED.

    This operation is asynchronous and will return before the WorkSpaces have been completely terminated.

    " + }, + "service": "Amazon WorkSpaces Service

    This is the Amazon WorkSpaces API Reference. This guide provides detailed information about Amazon WorkSpaces operations, data types, parameters, and errors.

    ", + "shapes": { + "ARN": { + "base": null, + "refs": { + "WorkspaceDirectory$IamRoleId": "

    The identifier of the IAM role. This is the role that allows Amazon WorkSpaces to make calls to other services, such as Amazon EC2, on your behalf.

    " + } + }, + "Alias": { + "base": null, + "refs": { + "WorkspaceDirectory$Alias": "

    The directory alias.

    " + } + }, + "BooleanObject": { + "base": null, + "refs": { + "DefaultWorkspaceCreationProperties$EnableWorkDocs": "

    Specifies if the directory is enabled for Amazon WorkDocs.

    ", + "DefaultWorkspaceCreationProperties$EnableInternetAccess": "

    A public IP address will be attached to all WorkSpaces that are created or rebuilt.

    ", + "DefaultWorkspaceCreationProperties$UserEnabledAsLocalAdministrator": "

    The WorkSpace user is an administrator on the WorkSpace.

    ", + "Workspace$UserVolumeEncryptionEnabled": "

    Specifies whether the data stored on the user volume, or D: drive, is encrypted.

    ", + "Workspace$RootVolumeEncryptionEnabled": "

    Specifies whether the data stored on the root volume, or C: drive, is encrypted.

    ", + "WorkspaceRequest$UserVolumeEncryptionEnabled": "

    Specifies whether the data stored on the user volume, or D: drive, is encrypted.

    ", + "WorkspaceRequest$RootVolumeEncryptionEnabled": "

    Specifies whether the data stored on the root volume, or C: drive, is encrypted.

    " + } + }, + "BundleId": { + "base": null, + "refs": { + "BundleIdList$member": null, + "DescribeWorkspacesRequest$BundleId": "

    The identifier of a bundle to obtain the WorkSpaces for. All WorkSpaces that are created from this bundle will be retrieved. This parameter cannot be combined with any other filter parameter.

    ", + "Workspace$BundleId": "

    The identifier of the bundle that the WorkSpace was created from.

    ", + "WorkspaceBundle$BundleId": "

    The bundle identifier.

    ", + "WorkspaceRequest$BundleId": "

    The identifier of the bundle to create the WorkSpace from. You can use the DescribeWorkspaceBundles operation to obtain a list of the bundles that are available.

    " + } + }, + "BundleIdList": { + "base": null, + "refs": { + "DescribeWorkspaceBundlesRequest$BundleIds": "

    An array of strings that contains the identifiers of the bundles to retrieve. This parameter cannot be combined with any other filter parameter.

    " + } + }, + "BundleList": { + "base": null, + "refs": { + "DescribeWorkspaceBundlesResult$Bundles": "

    An array of structures that contain information about the bundles.

    " + } + }, + "BundleOwner": { + "base": null, + "refs": { + "DescribeWorkspaceBundlesRequest$Owner": "

    The owner of the bundles to retrieve. This parameter cannot be combined with any other filter parameter.

    This contains one of the following values:

    • null - Retrieves the bundles that belong to the account making the call.
    • AMAZON - Retrieves the bundles that are provided by AWS.
    ", + "WorkspaceBundle$Owner": "

    The owner of the bundle. This contains the owner's account identifier, or AMAZON if the bundle is provided by AWS.

    " + } + }, + "Compute": { + "base": null, + "refs": { + "ComputeType$Name": "

    The name of the compute type for the bundle.

    " + } + }, + "ComputeType": { + "base": "

    Contains information about the compute type of a WorkSpace bundle.

    ", + "refs": { + "WorkspaceBundle$ComputeType": "

    A ComputeType object that specifies the compute type for the bundle.

    " + } + }, + "ComputerName": { + "base": null, + "refs": { + "Workspace$ComputerName": "

    The name of the WorkSpace as seen by the operating system.

    " + } + }, + "CreateWorkspacesRequest": { + "base": "

    Contains the inputs for the CreateWorkspaces operation.

    ", + "refs": { + } + }, + "CreateWorkspacesResult": { + "base": "

    Contains the result of the CreateWorkspaces operation.

    ", + "refs": { + } + }, + "DefaultOu": { + "base": null, + "refs": { + "DefaultWorkspaceCreationProperties$DefaultOu": "

    The organizational unit (OU) in the directory that the WorkSpace machine accounts are placed in.

    " + } + }, + "DefaultWorkspaceCreationProperties": { + "base": "

    Contains default WorkSpace creation information.

    ", + "refs": { + "WorkspaceDirectory$WorkspaceCreationProperties": "

    A structure that specifies the default creation properties for all WorkSpaces in the directory.

    " + } + }, + "DescribeWorkspaceBundlesRequest": { + "base": "

    Contains the inputs for the DescribeWorkspaceBundles operation.

    ", + "refs": { + } + }, + "DescribeWorkspaceBundlesResult": { + "base": "

    Contains the results of the DescribeWorkspaceBundles operation.

    ", + "refs": { + } + }, + "DescribeWorkspaceDirectoriesRequest": { + "base": "

    Contains the inputs for the DescribeWorkspaceDirectories operation.

    ", + "refs": { + } + }, + "DescribeWorkspaceDirectoriesResult": { + "base": "

    Contains the results of the DescribeWorkspaceDirectories operation.

    ", + "refs": { + } + }, + "DescribeWorkspacesRequest": { + "base": "

    Contains the inputs for the DescribeWorkspaces operation.

    ", + "refs": { + } + }, + "DescribeWorkspacesResult": { + "base": "

    Contains the results for the DescribeWorkspaces operation.

    ", + "refs": { + } + }, + "Description": { + "base": null, + "refs": { + "FailedCreateWorkspaceRequest$ErrorMessage": "

    The textual error message.

    ", + "FailedWorkspaceChangeRequest$ErrorMessage": "

    The textual error message.

    ", + "Workspace$ErrorMessage": "

    If the WorkSpace could not be created, this contains a textual error message that describes the failure.

    ", + "WorkspaceBundle$Description": "

    The bundle description.

    " + } + }, + "DirectoryId": { + "base": null, + "refs": { + "DescribeWorkspacesRequest$DirectoryId": "

    Specifies the directory identifier to which to limit the WorkSpaces. Optionally, you can specify a specific directory user with the UserName parameter. This parameter cannot be combined with any other filter parameter.

    ", + "DirectoryIdList$member": null, + "Workspace$DirectoryId": "

    The identifier of the AWS Directory Service directory that the WorkSpace belongs to.

    ", + "WorkspaceDirectory$DirectoryId": "

    The directory identifier.

    ", + "WorkspaceRequest$DirectoryId": "

    The identifier of the AWS Directory Service directory to create the WorkSpace in. You can use the DescribeWorkspaceDirectories operation to obtain a list of the directories that are available.

    " + } + }, + "DirectoryIdList": { + "base": null, + "refs": { + "DescribeWorkspaceDirectoriesRequest$DirectoryIds": "

    An array of strings that contains the directory identifiers to retrieve information for. If this member is null, all directories are retrieved.

    " + } + }, + "DirectoryList": { + "base": null, + "refs": { + "DescribeWorkspaceDirectoriesResult$Directories": "

    An array of structures that contain information about the directories.

    " + } + }, + "DirectoryName": { + "base": null, + "refs": { + "WorkspaceDirectory$DirectoryName": "

    The name of the directory.

    " + } + }, + "DnsIpAddresses": { + "base": null, + "refs": { + "WorkspaceDirectory$DnsIpAddresses": "

    An array of strings that contains the IP addresses of the DNS servers for the directory.

    " + } + }, + "ErrorType": { + "base": null, + "refs": { + "FailedCreateWorkspaceRequest$ErrorCode": "

    The error code.

    ", + "FailedWorkspaceChangeRequest$ErrorCode": "

    The error code.

    " + } + }, + "ExceptionMessage": { + "base": null, + "refs": { + "InvalidParameterValuesException$message": "

    The exception error message.

    ", + "ResourceLimitExceededException$message": "

    The exception error message.

    ", + "ResourceUnavailableException$message": "

    The exception error message.

    " + } + }, + "FailedCreateWorkspaceRequest": { + "base": "

    Contains information about a WorkSpace that could not be created.

    ", + "refs": { + "FailedCreateWorkspaceRequests$member": null + } + }, + "FailedCreateWorkspaceRequests": { + "base": null, + "refs": { + "CreateWorkspacesResult$FailedRequests": "

    An array of structures that represent the WorkSpaces that could not be created.

    " + } + }, + "FailedRebootWorkspaceRequests": { + "base": null, + "refs": { + "RebootWorkspacesResult$FailedRequests": "

    An array of structures that represent any WorkSpaces that could not be rebooted.

    " + } + }, + "FailedRebuildWorkspaceRequests": { + "base": null, + "refs": { + "RebuildWorkspacesResult$FailedRequests": "

    An array of structures that represent any WorkSpaces that could not be rebuilt.

    " + } + }, + "FailedTerminateWorkspaceRequests": { + "base": null, + "refs": { + "TerminateWorkspacesResult$FailedRequests": "

    An array of structures that represent any WorkSpaces that could not be terminated.

    " + } + }, + "FailedWorkspaceChangeRequest": { + "base": "

    Contains information about a WorkSpace that could not be rebooted (RebootWorkspaces), rebuilt (RebuildWorkspaces), or terminated (TerminateWorkspaces).

    ", + "refs": { + "FailedRebootWorkspaceRequests$member": null, + "FailedRebuildWorkspaceRequests$member": null, + "FailedTerminateWorkspaceRequests$member": null + } + }, + "InvalidParameterValuesException": { + "base": "

    One or more parameter values are not valid.

    ", + "refs": { + } + }, + "IpAddress": { + "base": null, + "refs": { + "DnsIpAddresses$member": null, + "Workspace$IpAddress": "

    The IP address of the WorkSpace.

    " + } + }, + "Limit": { + "base": null, + "refs": { + "DescribeWorkspacesRequest$Limit": "

    The maximum number of items to return.

    " + } + }, + "NonEmptyString": { + "base": null, + "refs": { + "ResourceUnavailableException$ResourceId": "

    The identifier of the resource that is not available.

    ", + "UserStorage$Capacity": "

    The amount of user storage for the bundle.

    ", + "WorkspaceBundle$Name": "

    The name of the bundle.

    " + } + }, + "PaginationToken": { + "base": null, + "refs": { + "DescribeWorkspaceBundlesRequest$NextToken": "

    The NextToken value from a previous call to this operation. Pass null if this is the first call.

    ", + "DescribeWorkspaceBundlesResult$NextToken": "

    If not null, more results are available. Pass this value for the NextToken parameter in a subsequent call to this operation to retrieve the next set of items. This token is valid for one day and must be used within that timeframe.

    ", + "DescribeWorkspaceDirectoriesRequest$NextToken": "

    The NextToken value from a previous call to this operation. Pass null if this is the first call.

    ", + "DescribeWorkspaceDirectoriesResult$NextToken": "

    If not null, more results are available. Pass this value for the NextToken parameter in a subsequent call to this operation to retrieve the next set of items. This token is valid for one day and must be used within that timeframe.

    ", + "DescribeWorkspacesRequest$NextToken": "

    The NextToken value from a previous call to this operation. Pass null if this is the first call.

    ", + "DescribeWorkspacesResult$NextToken": "

    If not null, more results are available. Pass this value for the NextToken parameter in a subsequent call to this operation to retrieve the next set of items. This token is valid for one day and must be used within that timeframe.

    " + } + }, + "RebootRequest": { + "base": "

    Contains information used with the RebootWorkspaces operation to reboot a WorkSpace.

    ", + "refs": { + "RebootWorkspaceRequests$member": null + } + }, + "RebootWorkspaceRequests": { + "base": null, + "refs": { + "RebootWorkspacesRequest$RebootWorkspaceRequests": "

    An array of structures that specify the WorkSpaces to reboot.

    " + } + }, + "RebootWorkspacesRequest": { + "base": "

    Contains the inputs for the RebootWorkspaces operation.

    ", + "refs": { + } + }, + "RebootWorkspacesResult": { + "base": "

    Contains the results of the RebootWorkspaces operation.

    ", + "refs": { + } + }, + "RebuildRequest": { + "base": "

    Contains information used with the RebuildWorkspaces operation to rebuild a WorkSpace.

    ", + "refs": { + "RebuildWorkspaceRequests$member": null + } + }, + "RebuildWorkspaceRequests": { + "base": null, + "refs": { + "RebuildWorkspacesRequest$RebuildWorkspaceRequests": "

    An array of structures that specify the WorkSpaces to rebuild.

    " + } + }, + "RebuildWorkspacesRequest": { + "base": "

    Contains the inputs for the RebuildWorkspaces operation.

    ", + "refs": { + } + }, + "RebuildWorkspacesResult": { + "base": "

    Contains the results of the RebuildWorkspaces operation.

    ", + "refs": { + } + }, + "RegistrationCode": { + "base": null, + "refs": { + "WorkspaceDirectory$RegistrationCode": "

    The registration code for the directory. This is the code that users enter in their Amazon WorkSpaces client application to connect to the directory.

    " + } + }, + "ResourceLimitExceededException": { + "base": "

    Your resource limits have been exceeded.

    ", + "refs": { + } + }, + "ResourceUnavailableException": { + "base": "

    The specified resource is not available.

    ", + "refs": { + } + }, + "SecurityGroupId": { + "base": null, + "refs": { + "DefaultWorkspaceCreationProperties$CustomSecurityGroupId": "

    The identifier of any custom security groups that are applied to the WorkSpaces when they are created.

    ", + "WorkspaceDirectory$WorkspaceSecurityGroupId": "

    The identifier of the security group that is assigned to new WorkSpaces.

    " + } + }, + "SubnetId": { + "base": null, + "refs": { + "SubnetIds$member": null, + "Workspace$SubnetId": "

    The identifier of the subnet that the WorkSpace is in.

    " + } + }, + "SubnetIds": { + "base": null, + "refs": { + "WorkspaceDirectory$SubnetIds": "

    An array of strings that contains the identifiers of the subnets used with the directory.

    " + } + }, + "TerminateRequest": { + "base": "

    Contains information used with the TerminateWorkspaces operation to terminate a WorkSpace.

    ", + "refs": { + "TerminateWorkspaceRequests$member": null + } + }, + "TerminateWorkspaceRequests": { + "base": null, + "refs": { + "TerminateWorkspacesRequest$TerminateWorkspaceRequests": "

    An array of structures that specify the WorkSpaces to terminate.

    " + } + }, + "TerminateWorkspacesRequest": { + "base": "

    Contains the inputs for the TerminateWorkspaces operation.

    ", + "refs": { + } + }, + "TerminateWorkspacesResult": { + "base": "

    Contains the results of the TerminateWorkspaces operation.

    ", + "refs": { + } + }, + "UserName": { + "base": null, + "refs": { + "DescribeWorkspacesRequest$UserName": "

    Used with the DirectoryId parameter to specify the directory user for which to obtain the WorkSpace.

    ", + "Workspace$UserName": "

    The user that the WorkSpace is assigned to.

    ", + "WorkspaceDirectory$CustomerUserName": "

    The user name for the service account.

    ", + "WorkspaceRequest$UserName": "

    The username that the WorkSpace is assigned to. This username must exist in the AWS Directory Service directory specified by the DirectoryId member.

    " + } + }, + "UserStorage": { + "base": "

    Contains information about the user storage for a WorkSpace bundle.

    ", + "refs": { + "WorkspaceBundle$UserStorage": "

    A UserStorage object that specifies the amount of user storage that the bundle contains.

    " + } + }, + "VolumeEncryptionKey": { + "base": null, + "refs": { + "Workspace$VolumeEncryptionKey": "

    The KMS key used to encrypt data stored on your WorkSpace.

    ", + "WorkspaceRequest$VolumeEncryptionKey": "

    The KMS key used to encrypt data stored on your WorkSpace.

    " + } + }, + "Workspace": { + "base": "

    Contains information about a WorkSpace.

    ", + "refs": { + "WorkspaceList$member": null + } + }, + "WorkspaceBundle": { + "base": "

    Contains information about a WorkSpace bundle.

    ", + "refs": { + "BundleList$member": null + } + }, + "WorkspaceDirectory": { + "base": "

    Contains information about an AWS Directory Service directory for use with Amazon WorkSpaces.

    ", + "refs": { + "DirectoryList$member": null + } + }, + "WorkspaceDirectoryState": { + "base": null, + "refs": { + "WorkspaceDirectory$State": "

    The state of the directory's registration with Amazon WorkSpaces

    " + } + }, + "WorkspaceDirectoryType": { + "base": null, + "refs": { + "WorkspaceDirectory$DirectoryType": "

    The directory type.

    " + } + }, + "WorkspaceErrorCode": { + "base": null, + "refs": { + "Workspace$ErrorCode": "

    If the WorkSpace could not be created, this contains the error code.

    " + } + }, + "WorkspaceId": { + "base": null, + "refs": { + "FailedWorkspaceChangeRequest$WorkspaceId": "

    The identifier of the WorkSpace.

    ", + "RebootRequest$WorkspaceId": "

    The identifier of the WorkSpace to reboot.

    ", + "RebuildRequest$WorkspaceId": "

    The identifier of the WorkSpace to rebuild.

    ", + "TerminateRequest$WorkspaceId": "

    The identifier of the WorkSpace to terminate.

    ", + "Workspace$WorkspaceId": "

    The identifier of the WorkSpace.

    ", + "WorkspaceIdList$member": null + } + }, + "WorkspaceIdList": { + "base": null, + "refs": { + "DescribeWorkspacesRequest$WorkspaceIds": "

    An array of strings that contain the identifiers of the WorkSpaces for which to retrieve information. This parameter cannot be combined with any other filter parameter.

    Because the CreateWorkspaces operation is asynchronous, the identifier returned by CreateWorkspaces is not immediately available. If you immediately call DescribeWorkspaces with this identifier, no information will be returned.

    " + } + }, + "WorkspaceList": { + "base": null, + "refs": { + "CreateWorkspacesResult$PendingRequests": "

    An array of structures that represent the WorkSpaces that were created.

    Because this operation is asynchronous, the identifier in WorkspaceId is not immediately available. If you immediately call DescribeWorkspaces with this identifier, no information will be returned.

    ", + "DescribeWorkspacesResult$Workspaces": "

    An array of structures that contain the information about the WorkSpaces.

    Because the CreateWorkspaces operation is asynchronous, some of this information may be incomplete for a newly-created WorkSpace.

    " + } + }, + "WorkspaceRequest": { + "base": "

    Contains information about a WorkSpace creation request.

    ", + "refs": { + "FailedCreateWorkspaceRequest$WorkspaceRequest": "

    A WorkspaceRequest object that contains the information about the WorkSpace that could not be created.

    ", + "WorkspaceRequestList$member": null + } + }, + "WorkspaceRequestList": { + "base": null, + "refs": { + "CreateWorkspacesRequest$Workspaces": "

    An array of structures that specify the WorkSpaces to create.

    " + } + }, + "WorkspaceState": { + "base": null, + "refs": { + "Workspace$State": "

    The operational state of the WorkSpace.

    " + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/workspaces/2015-04-08/paginators-1.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/workspaces/2015-04-08/paginators-1.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/workspaces/2015-04-08/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/apis/workspaces/2015-04-08/paginators-1.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,20 @@ +{ + "pagination": { + "DescribeWorkspaceBundles": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Bundles" + }, + "DescribeWorkspaceDirectories": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Directories" + }, + "DescribeWorkspaces": { + "limit_key": "Limit", + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Workspaces" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/generate.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/generate.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/generate.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/generate.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,352 @@ +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "net/url" + "os" + "os/exec" + "regexp" + "sort" + "strconv" + "strings" + "text/template" + + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/model/api" + "github.com/aws/aws-sdk-go/private/util" +) + +type testSuite struct { + *api.API + Description string + Cases []testCase + title string +} + +type testCase struct { + TestSuite *testSuite + Given *api.Operation + Params interface{} `json:",omitempty"` + Data interface{} `json:"result,omitempty"` + InputTest testExpectation `json:"serialized"` + OutputTest testExpectation `json:"response"` +} + +type testExpectation struct { + Body string + URI string + Headers map[string]string + StatusCode uint `json:"status_code"` +} + +const preamble = ` +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String + +func init() { + protocol.RandReader = &awstesting.ZeroReader{} +} +` + +var reStripSpace = regexp.MustCompile(`\s(\w)`) + +var reImportRemoval = regexp.MustCompile(`(?s:import \((.+?)\))`) + +func removeImports(code string) string { + return reImportRemoval.ReplaceAllString(code, "") +} + +var extraImports = []string{ + "bytes", + "encoding/json", + "encoding/xml", + "io", + "io/ioutil", + "net/http", + "testing", + "time", + "net/url", + "", + "github.com/aws/aws-sdk-go/awstesting", + "github.com/aws/aws-sdk-go/aws/session", + "github.com/aws/aws-sdk-go/private/protocol", + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", + "github.com/aws/aws-sdk-go/private/util", + "github.com/stretchr/testify/assert", +} + +func addImports(code string) string { + importNames := make([]string, len(extraImports)) + for i, n := range extraImports { + if n != "" { + importNames[i] = fmt.Sprintf("%q", n) + } + } + + str := reImportRemoval.ReplaceAllString(code, "import (\n"+strings.Join(importNames, "\n")+"$1\n)") + return str +} + +func (t *testSuite) TestSuite() string { + var buf bytes.Buffer + + t.title = reStripSpace.ReplaceAllStringFunc(t.Description, func(x string) string { + return strings.ToUpper(x[1:]) + }) + t.title = regexp.MustCompile(`\W`).ReplaceAllString(t.title, "") + + for idx, c := range t.Cases { + c.TestSuite = t + buf.WriteString(c.TestCase(idx) + "\n") + } + return buf.String() +} + +var tplInputTestCase = template.Must(template.New("inputcase").Parse(` +func Test{{ .OpName }}(t *testing.T) { + sess := session.New() + svc := New{{ .TestCase.TestSuite.API.StructName }}(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := {{ .ParamsString }} + req, _ := svc.{{ .TestCase.Given.ExportedName }}Request(input) + r := req.HTTPRequest + + // build request + {{ .TestCase.TestSuite.API.ProtocolPackage }}.Build(req) + assert.NoError(t, req.Error) + + {{ if ne .TestCase.InputTest.Body "" }}// assert body + assert.NotNil(t, r.Body) + {{ .BodyAssertions }}{{ end }} + + {{ if ne .TestCase.InputTest.URI "" }}// assert URL + awstesting.AssertURL(t, "https://test{{ .TestCase.InputTest.URI }}", r.URL.String()){{ end }} + + // assert headers +{{ range $k, $v := .TestCase.InputTest.Headers }}assert.Equal(t, "{{ $v }}", r.Header.Get("{{ $k }}")) +{{ end }} +} +`)) + +type tplInputTestCaseData struct { + TestCase *testCase + OpName, ParamsString string +} + +func (t tplInputTestCaseData) BodyAssertions() string { + code := &bytes.Buffer{} + protocol := t.TestCase.TestSuite.API.Metadata.Protocol + + // Extract the body bytes + switch protocol { + case "rest-xml": + fmt.Fprintln(code, "body := util.SortXML(r.Body)") + default: + fmt.Fprintln(code, "body, _ := ioutil.ReadAll(r.Body)") + } + + // Generate the body verification code + expectedBody := util.Trim(t.TestCase.InputTest.Body) + switch protocol { + case "ec2", "query": + fmt.Fprintf(code, "awstesting.AssertQuery(t, `%s`, util.Trim(string(body)))", + expectedBody) + case "rest-xml": + if strings.HasPrefix(expectedBody, "<") { + fmt.Fprintf(code, "awstesting.AssertXML(t, `%s`, util.Trim(string(body)), %s{})", + expectedBody, t.TestCase.Given.InputRef.ShapeName) + } else { + fmt.Fprintf(code, "assert.Equal(t, `%s`, util.Trim(string(body)))", + expectedBody) + } + case "json", "jsonrpc", "rest-json": + if strings.HasPrefix(expectedBody, "{") { + fmt.Fprintf(code, "awstesting.AssertJSON(t, `%s`, util.Trim(string(body)))", + expectedBody) + } else { + fmt.Fprintf(code, "assert.Equal(t, `%s`, util.Trim(string(body)))", + expectedBody) + } + default: + fmt.Fprintf(code, "assert.Equal(t, `%s`, util.Trim(string(body)))", + expectedBody) + } + + return code.String() +} + +var tplOutputTestCase = template.Must(template.New("outputcase").Parse(` +func Test{{ .OpName }}(t *testing.T) { + sess := session.New() + svc := New{{ .TestCase.TestSuite.API.StructName }}(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte({{ .Body }})) + req, out := svc.{{ .TestCase.Given.ExportedName }}Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + {{ range $k, $v := .TestCase.OutputTest.Headers }}req.HTTPResponse.Header.Set("{{ $k }}", "{{ $v }}") + {{ end }} + + // unmarshal response + {{ .TestCase.TestSuite.API.ProtocolPackage }}.UnmarshalMeta(req) + {{ .TestCase.TestSuite.API.ProtocolPackage }}.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + {{ .Assertions }} +} +`)) + +type tplOutputTestCaseData struct { + TestCase *testCase + Body, OpName, Assertions string +} + +func (i *testCase) TestCase(idx int) string { + var buf bytes.Buffer + + opName := i.TestSuite.API.StructName() + i.TestSuite.title + "Case" + strconv.Itoa(idx+1) + + if i.Params != nil { // input test + // query test should sort body as form encoded values + switch i.TestSuite.API.Metadata.Protocol { + case "query", "ec2": + m, _ := url.ParseQuery(i.InputTest.Body) + i.InputTest.Body = m.Encode() + case "rest-xml": + i.InputTest.Body = util.SortXML(bytes.NewReader([]byte(i.InputTest.Body))) + case "json", "rest-json": + i.InputTest.Body = strings.Replace(i.InputTest.Body, " ", "", -1) + } + + input := tplInputTestCaseData{ + TestCase: i, + OpName: strings.ToUpper(opName[0:1]) + opName[1:], + ParamsString: awstesting.ParamsStructFromJSON(i.Params, i.Given.InputRef.Shape, false), + } + + if err := tplInputTestCase.Execute(&buf, input); err != nil { + panic(err) + } + } else { + output := tplOutputTestCaseData{ + TestCase: i, + Body: fmt.Sprintf("%q", i.OutputTest.Body), + OpName: strings.ToUpper(opName[0:1]) + opName[1:], + Assertions: awstesting.GenerateAssertions(i.Data, i.Given.OutputRef.Shape, "out"), + } + + if err := tplOutputTestCase.Execute(&buf, output); err != nil { + panic(err) + } + } + + return buf.String() +} + +// generateTestSuite generates a protocol test suite for a given configuration +// JSON protocol test file. +func generateTestSuite(filename string) string { + inout := "Input" + if strings.Contains(filename, "output/") { + inout = "Output" + } + + var suites []testSuite + f, err := os.Open(filename) + if err != nil { + panic(err) + } + + err = json.NewDecoder(f).Decode(&suites) + if err != nil { + panic(err) + } + + var buf bytes.Buffer + buf.WriteString("package " + suites[0].ProtocolPackage() + "_test\n\n") + + var innerBuf bytes.Buffer + innerBuf.WriteString("//\n// Tests begin here\n//\n\n\n") + + for i, suite := range suites { + svcPrefix := inout + "Service" + strconv.Itoa(i+1) + suite.API.Metadata.ServiceAbbreviation = svcPrefix + "ProtocolTest" + suite.API.Operations = map[string]*api.Operation{} + for idx, c := range suite.Cases { + c.Given.ExportedName = svcPrefix + "TestCaseOperation" + strconv.Itoa(idx+1) + suite.API.Operations[c.Given.ExportedName] = c.Given + } + + suite.API.NoInitMethods = true // don't generate init methods + suite.API.NoStringerMethods = true // don't generate stringer methods + suite.API.NoConstServiceNames = true // don't generate service names + suite.API.Setup() + suite.API.Metadata.EndpointPrefix = suite.API.PackageName() + + // Sort in order for deterministic test generation + names := make([]string, 0, len(suite.API.Shapes)) + for n := range suite.API.Shapes { + names = append(names, n) + } + sort.Strings(names) + for _, name := range names { + s := suite.API.Shapes[name] + s.Rename(svcPrefix + "TestShape" + name) + } + + svcCode := addImports(suite.API.ServiceGoCode()) + if i == 0 { + importMatch := reImportRemoval.FindStringSubmatch(svcCode) + buf.WriteString(importMatch[0] + "\n\n") + buf.WriteString(preamble + "\n\n") + } + svcCode = removeImports(svcCode) + svcCode = strings.Replace(svcCode, "func New(", "func New"+suite.API.StructName()+"(", -1) + svcCode = strings.Replace(svcCode, "func newClient(", "func new"+suite.API.StructName()+"Client(", -1) + svcCode = strings.Replace(svcCode, "return newClient(", "return new"+suite.API.StructName()+"Client(", -1) + buf.WriteString(svcCode + "\n\n") + + apiCode := removeImports(suite.API.APIGoCode()) + apiCode = strings.Replace(apiCode, "var oprw sync.Mutex", "", -1) + apiCode = strings.Replace(apiCode, "oprw.Lock()", "", -1) + apiCode = strings.Replace(apiCode, "defer oprw.Unlock()", "", -1) + buf.WriteString(apiCode + "\n\n") + + innerBuf.WriteString(suite.TestSuite() + "\n") + } + + return buf.String() + innerBuf.String() +} + +func main() { + out := generateTestSuite(os.Args[1]) + if len(os.Args) == 3 { + f, err := os.Create(os.Args[2]) + defer f.Close() + if err != nil { + panic(err) + } + f.WriteString(util.GoFmt(out)) + f.Close() + + c := exec.Command("gofmt", "-s", "-w", os.Args[2]) + if err := c.Run(); err != nil { + panic(err) + } + } else { + fmt.Println(out) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/input/ec2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/input/ec2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/input/ec2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/input/ec2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,422 @@ +[ + { + "description": "Scalar members", + "metadata": { + "protocol": "ec2", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringType" + }, + "Bar": { + "shape": "StringType" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "Foo": "val1", + "Bar": "val2" + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&Foo=val1&Bar=val2" + } + } + ] + }, + { + "description": "Structure with locationName and queryName applied to members", + "metadata": { + "protocol": "ec2", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringType" + }, + "Bar": { + "shape": "StringType", + "locationName": "barLocationName" + }, + "Yuck": { + "shape": "StringType", + "locationName": "yuckLocationName", + "queryName": "yuckQueryName" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "Foo": "val1", + "Bar": "val2", + "Yuck": "val3" + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&Foo=val1&BarLocationName=val2&yuckQueryName=val3" + } + } + ] + }, + { + "description": "Nested structure members", + "metadata": { + "protocol": "ec2", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "StructArg": { + "shape": "StructType", + "locationName": "Struct" + } + } + }, + "StructType": { + "type": "structure", + "members": { + "ScalarArg": { + "shape": "StringType", + "locationName": "Scalar" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "StructArg": { + "ScalarArg": "foo" + } + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&Struct.Scalar=foo" + } + } + ] + }, + { + "description": "List types", + "metadata": { + "protocol": "ec2", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "ListArg": { + "shape": "ListType" + } + } + }, + "ListType": { + "type": "list", + "member": { + "shape": "Strings" + } + }, + "Strings": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "ListArg": [ + "foo", + "bar", + "baz" + ] + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&ListArg.1=foo&ListArg.2=bar&ListArg.3=baz" + } + } + ] + }, + { + "description": "List with location name applied to member", + "metadata": { + "protocol": "ec2", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "ListArg": { + "shape": "ListType", + "locationName": "ListMemberName" + } + } + }, + "ListType": { + "type": "list", + "member": { + "shape": "StringType", + "LocationName": "item" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "ListArg": [ + "a", + "b", + "c" + ] + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&ListMemberName.1=a&ListMemberName.2=b&ListMemberName.3=c" + } + } + ] + }, + { + "description": "List with locationName and queryName", + "metadata": { + "protocol": "ec2", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "ListArg": { + "shape": "ListType", + "locationName": "ListMemberName", + "queryName": "ListQueryName" + } + } + }, + "ListType": { + "type": "list", + "member": { + "shape": "StringType", + "LocationName": "item" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "ListArg": [ + "a", + "b", + "c" + ] + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&ListQueryName.1=a&ListQueryName.2=b&ListQueryName.3=c" + } + } + ] + }, + { + "description": "Base64 encoded Blobs", + "metadata": { + "protocol": "ec2", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "BlobArg": { + "shape": "BlobType" + } + } + }, + "BlobType": { + "type": "blob" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "BlobArg": "foo" + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&BlobArg=Zm9v" + } + } + ] + }, + { + "description": "Timestamp values", + "metadata": { + "protocol": "ec2", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "TimeArg": { + "shape": "TimestampType" + } + } + }, + "TimestampType": { + "type": "timestamp" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "TimeArg": 1422172800 + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&TimeArg=2015-01-25T08%3A00%3A00Z" + } + } + ] + }, + { + "description": "Idempotency token auto fill", + "metadata": { + "protocol": "ec2", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Token": { + "shape": "StringType", + "idempotencyToken": true + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "Token": "abc123" + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "Token=abc123" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "Token=00000000-0000-4000-8000-000000000000" + } + } + ] + } +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/input/json.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/input/json.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/input/json.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/input/json.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,541 @@ +[ + { + "description": "Scalar members", + "metadata": { + "protocol": "json", + "jsonVersion": "1.1", + "targetPrefix": "com.amazonaws.foo" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Name": { + "shape": "StringType" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName", + "http": { + "method": "POST" + } + }, + "params": { + "Name": "myname" + }, + "serialized": { + "body": "{\"Name\": \"myname\"}", + "headers": { + "X-Amz-Target": "com.amazonaws.foo.OperationName", + "Content-Type": "application/x-amz-json-1.1" + }, + "uri": "/" + } + } + ] + }, + { + "description": "Timestamp values", + "metadata": { + "protocol": "json", + "jsonVersion": "1.1", + "targetPrefix": "com.amazonaws.foo" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "TimeArg": { + "shape": "TimestampType" + } + } + }, + "TimestampType": { + "type": "timestamp" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "TimeArg": 1422172800 + }, + "serialized": { + "body": "{\"TimeArg\": 1422172800}", + "headers": { + "X-Amz-Target": "com.amazonaws.foo.OperationName", + "Content-Type": "application/x-amz-json-1.1" + }, + "uri": "/" + } + } + ] + }, + { + "description": "Base64 encoded Blobs", + "metadata": { + "protocol": "json", + "jsonVersion": "1.1", + "targetPrefix": "com.amazonaws.foo" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "BlobArg": { + "shape": "BlobType" + }, + "BlobMap": { + "shape": "BlobMapType" + } + } + }, + "BlobType": { + "type": "blob" + }, + "BlobMapType": { + "type": "map", + "key": {"shape": "StringType"}, + "value": {"shape": "BlobType"} + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "BlobArg": "foo" + }, + "serialized": { + "body": "{\"BlobArg\": \"Zm9v\"}", + "headers": { + "X-Amz-Target": "com.amazonaws.foo.OperationName", + "Content-Type": "application/x-amz-json-1.1" + }, + "uri": "/" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "BlobMap": { + "key1": "foo", + "key2": "bar" + } + }, + "serialized": { + "body": "{\"BlobMap\": {\"key1\": \"Zm9v\", \"key2\": \"YmFy\"}}", + "headers": { + "X-Amz-Target": "com.amazonaws.foo.OperationName", + "Content-Type": "application/x-amz-json-1.1" + }, + "uri": "/" + } + } + ] + }, + { + "description": "Nested blobs", + "metadata": { + "protocol": "json", + "jsonVersion": "1.1", + "targetPrefix": "com.amazonaws.foo" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "ListParam": { + "shape": "ListOfStructures" + } + } + }, + "ListOfStructures": { + "type": "list", + "member": { + "shape": "BlobType" + } + }, + "BlobType": { + "type": "blob" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "ListParam": ["foo", "bar"] + }, + "serialized": { + "body": "{\"ListParam\": [\"Zm9v\", \"YmFy\"]}", + "uri": "/", + "headers": {"X-Amz-Target": "com.amazonaws.foo.OperationName", + "Content-Type": "application/x-amz-json-1.1"} + } + } + ] + }, + { + "description": "Recursive shapes", + "metadata": { + "protocol": "json", + "jsonVersion": "1.1", + "targetPrefix": "com.amazonaws.foo" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "RecursiveStruct": { + "shape": "RecursiveStructType" + } + } + }, + "RecursiveStructType": { + "type": "structure", + "members": { + "NoRecurse": { + "shape": "StringType" + }, + "RecursiveStruct": { + "shape": "RecursiveStructType" + }, + "RecursiveList": { + "shape": "RecursiveListType" + }, + "RecursiveMap": { + "shape": "RecursiveMapType" + } + } + }, + "RecursiveListType": { + "type": "list", + "member": { + "shape": "RecursiveStructType" + } + }, + "RecursiveMapType": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "RecursiveStructType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "NoRecurse": "foo" + } + }, + "serialized": { + "uri": "/", + "headers": { + "X-Amz-Target": "com.amazonaws.foo.OperationName", + "Content-Type": "application/x-amz-json-1.1" + }, + "body": "{\"RecursiveStruct\": {\"NoRecurse\": \"foo\"}}" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveStruct": { + "NoRecurse": "foo" + } + } + }, + "serialized": { + "uri": "/", + "headers": { + "X-Amz-Target": "com.amazonaws.foo.OperationName", + "Content-Type": "application/x-amz-json-1.1" + }, + "body": "{\"RecursiveStruct\": {\"RecursiveStruct\": {\"NoRecurse\": \"foo\"}}}" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveStruct": { + "RecursiveStruct": { + "RecursiveStruct": { + "NoRecurse": "foo" + } + } + } + } + }, + "serialized": { + "uri": "/", + "headers": { + "X-Amz-Target": "com.amazonaws.foo.OperationName", + "Content-Type": "application/x-amz-json-1.1" + }, + "body": "{\"RecursiveStruct\": {\"RecursiveStruct\": {\"RecursiveStruct\": {\"RecursiveStruct\": {\"NoRecurse\": \"foo\"}}}}}" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveList": [ + { + "NoRecurse": "foo" + }, + { + "NoRecurse": "bar" + } + ] + } + }, + "serialized": { + "uri": "/", + "headers": { + "X-Amz-Target": "com.amazonaws.foo.OperationName", + "Content-Type": "application/x-amz-json-1.1" + }, + "body": "{\"RecursiveStruct\": {\"RecursiveList\": [{\"NoRecurse\": \"foo\"}, {\"NoRecurse\": \"bar\"}]}}" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveList": [ + { + "NoRecurse": "foo" + }, + { + "RecursiveStruct": { + "NoRecurse": "bar" + } + } + ] + } + }, + "serialized": { + "uri": "/", + "headers": { + "X-Amz-Target": "com.amazonaws.foo.OperationName", + "Content-Type": "application/x-amz-json-1.1" + }, + "body": "{\"RecursiveStruct\": {\"RecursiveList\": [{\"NoRecurse\": \"foo\"}, {\"RecursiveStruct\": {\"NoRecurse\": \"bar\"}}]}}" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveMap": { + "foo": { + "NoRecurse": "foo" + }, + "bar": { + "NoRecurse": "bar" + } + } + } + }, + "serialized": { + "uri": "/", + "headers": { + "X-Amz-Target": "com.amazonaws.foo.OperationName", + "Content-Type": "application/x-amz-json-1.1" + }, + "body": "{\"RecursiveStruct\": {\"RecursiveMap\": {\"foo\": {\"NoRecurse\": \"foo\"}, \"bar\": {\"NoRecurse\": \"bar\"}}}}" + } + } + ] + }, + { + "description": "Empty maps", + "metadata": { + "protocol": "json", + "jsonVersion": "1.1", + "targetPrefix": "com.amazonaws.foo" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Map": { + "shape": "MapType" + } + } + }, + "MapType": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName", + "http": { + "method": "POST" + } + }, + "params": { + "Map": {} + }, + "serialized": { + "body": "{\"Map\": {}}", + "headers": { + "X-Amz-Target": "com.amazonaws.foo.OperationName", + "Content-Type": "application/x-amz-json-1.1" + }, + "uri": "/" + } + } + ] + }, + { + "description": "Idempotency token auto fill", + "metadata": { + "protocol": "json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Token": { + "shape": "StringType", + "idempotencyToken": true + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "Token": "abc123" + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "{\"Token\": \"abc123\"}" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "{\"Token\": \"00000000-0000-4000-8000-000000000000\"}" + } + } + ] + } +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/input/query.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/input/query.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/input/query.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/input/query.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,842 @@ +[ + { + "description": "Scalar members", + "metadata": { + "protocol": "query", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringType" + }, + "Bar": { + "shape": "StringType" + }, + "Baz": { + "shape": "BooleanType" + } + } + }, + "StringType": { + "type": "string" + }, + "BooleanType": { + "type": "boolean" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "Foo": "val1", + "Bar": "val2" + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&Foo=val1&Bar=val2" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "Baz": true + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&Baz=true" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "Baz": false + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&Baz=false" + } + } + ] + }, + { + "description": "Nested structure members", + "metadata": { + "protocol": "query", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "StructArg": { + "shape": "StructType" + } + } + }, + "StructType": { + "type": "structure", + "members": { + "ScalarArg": { + "shape": "StringType" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "StructArg": { + "ScalarArg": "foo" + } + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&StructArg.ScalarArg=foo" + } + } + ] + }, + { + "description": "List types", + "metadata": { + "protocol": "query", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "ListArg": { + "shape": "ListType" + } + } + }, + "ListType": { + "type": "list", + "member": { + "shape": "Strings" + } + }, + "Strings": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "ListArg": [ + "foo", + "bar", + "baz" + ] + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&ListArg.member.1=foo&ListArg.member.2=bar&ListArg.member.3=baz" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "ListArg": [] + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&ListArg=" + } + } + ] + }, + { + "description": "Flattened list", + "metadata": { + "protocol": "query", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "ScalarArg": { + "shape": "StringType" + }, + "ListArg": { + "shape": "ListType" + }, + "NamedListArg": { + "shape": "NamedListType" + } + } + }, + "ListType": { + "type": "list", + "member": { + "shape": "StringType" + }, + "flattened": true + }, + "NamedListType": { + "type": "list", + "member": { + "shape": "StringType", + "locationName": "Foo" + }, + "flattened": true + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "ScalarArg": "foo", + "ListArg": [ + "a", + "b", + "c" + ] + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&ScalarArg=foo&ListArg.1=a&ListArg.2=b&ListArg.3=c" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "NamedListArg": [ + "a" + ] + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&Foo.1=a" + } + } + ] + }, + { + "description": "Serialize flattened map type", + "metadata": { + "protocol": "query", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "MapArg": { + "shape": "StringMap" + } + } + }, + "StringMap": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "StringType" + }, + "flattened": true + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "MapArg": { + "key1": "val1", + "key2": "val2" + } + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&MapArg.1.key=key1&MapArg.1.value=val1&MapArg.2.key=key2&MapArg.2.value=val2" + } + } + ] + }, + { + "description": "Non flattened list with LocationName", + "metadata": { + "protocol": "query", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "ListArg": { + "shape": "ListType" + } + } + }, + "ListType": { + "type": "list", + "member": { + "shape": "StringType", + "locationName": "item" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "ListArg": [ + "a", + "b", + "c" + ] + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&ListArg.item.1=a&ListArg.item.2=b&ListArg.item.3=c" + } + } + ] + }, + { + "description": "Flattened list with LocationName", + "metadata": { + "protocol": "query", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "ScalarArg": { + "shape": "StringType" + }, + "ListArg": { + "shape": "ListType" + } + } + }, + "ListType": { + "type": "list", + "member": { + "shape": "StringType", + "locationName": "ListArgLocation" + }, + "flattened": true + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "ScalarArg": "foo", + "ListArg": [ + "a", + "b", + "c" + ] + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&ScalarArg=foo&ListArgLocation.1=a&ListArgLocation.2=b&ListArgLocation.3=c" + } + } + ] + }, + { + "description": "Serialize map type", + "metadata": { + "protocol": "query", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "MapArg": { + "shape": "StringMap" + } + } + }, + "StringMap": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "MapArg": { + "key1": "val1", + "key2": "val2" + } + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&MapArg.entry.1.key=key1&MapArg.entry.1.value=val1&MapArg.entry.2.key=key2&MapArg.entry.2.value=val2" + } + } + ] + }, + { + "description": "Serialize map type with locationName", + "metadata": { + "protocol": "query", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "MapArg": { + "shape": "StringMap" + } + } + }, + "StringMap": { + "type": "map", + "key": { + "shape": "StringType", + "locationName": "TheKey" + }, + "value": { + "shape": "StringType", + "locationName": "TheValue" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "MapArg": { + "key1": "val1", + "key2": "val2" + } + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&MapArg.entry.1.TheKey=key1&MapArg.entry.1.TheValue=val1&MapArg.entry.2.TheKey=key2&MapArg.entry.2.TheValue=val2" + } + } + ] + }, + { + "description": "Base64 encoded Blobs", + "metadata": { + "protocol": "query", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "BlobArg": { + "shape": "BlobType" + } + } + }, + "BlobType": { + "type": "blob" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "BlobArg": "foo" + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&BlobArg=Zm9v" + } + } + ] + }, + { + "description": "Timestamp values", + "metadata": { + "protocol": "query", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "TimeArg": { + "shape": "TimestampType" + } + } + }, + "TimestampType": { + "type": "timestamp" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "TimeArg": 1422172800 + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&TimeArg=2015-01-25T08%3A00%3A00Z" + } + } + ] + }, + { + "description": "Recursive shapes", + "metadata": { + "protocol": "query", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "RecursiveStruct": { + "shape": "RecursiveStructType" + } + } + }, + "RecursiveStructType": { + "type": "structure", + "members": { + "NoRecurse": { + "shape": "StringType" + }, + "RecursiveStruct": { + "shape": "RecursiveStructType" + }, + "RecursiveList": { + "shape": "RecursiveListType" + }, + "RecursiveMap": { + "shape": "RecursiveMapType" + } + } + }, + "RecursiveListType": { + "type": "list", + "member": { + "shape": "RecursiveStructType" + } + }, + "RecursiveMapType": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "RecursiveStructType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "NoRecurse": "foo" + } + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&RecursiveStruct.NoRecurse=foo" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveStruct": { + "NoRecurse": "foo" + } + } + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&RecursiveStruct.RecursiveStruct.NoRecurse=foo" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveStruct": { + "RecursiveStruct": { + "RecursiveStruct": { + "NoRecurse": "foo" + } + } + } + } + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&RecursiveStruct.RecursiveStruct.RecursiveStruct.RecursiveStruct.NoRecurse=foo" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveList": [ + { + "NoRecurse": "foo" + }, + { + "NoRecurse": "bar" + } + ] + } + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&RecursiveStruct.RecursiveList.member.1.NoRecurse=foo&RecursiveStruct.RecursiveList.member.2.NoRecurse=bar" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveList": [ + { + "NoRecurse": "foo" + }, + { + "RecursiveStruct": { + "NoRecurse": "bar" + } + } + ] + } + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&RecursiveStruct.RecursiveList.member.1.NoRecurse=foo&RecursiveStruct.RecursiveList.member.2.RecursiveStruct.NoRecurse=bar" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveMap": { + "foo": { + "NoRecurse": "foo" + }, + "bar": { + "NoRecurse": "bar" + } + } + } + }, + "serialized": { + "uri": "/", + "body": "Action=OperationName&Version=2014-01-01&RecursiveStruct.RecursiveMap.entry.1.key=foo&RecursiveStruct.RecursiveMap.entry.1.value.NoRecurse=foo&RecursiveStruct.RecursiveMap.entry.2.key=bar&RecursiveStruct.RecursiveMap.entry.2.value.NoRecurse=bar" + } + } + ] + }, + { + "description": "Idempotency token auto fill", + "metadata": { + "protocol": "query", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Token": { + "shape": "StringType", + "idempotencyToken": true + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "Token": "abc123" + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "Token=abc123" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "Token=00000000-0000-4000-8000-000000000000" + } + } + ] + } +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/input/rest-json.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/input/rest-json.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/input/rest-json.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/input/rest-json.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1115 @@ +[ + { + "description": "URI parameter only with no location name", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "PipelineId": { + "shape": "StringType", + "location": "uri" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "GET", + "requestUri": "/2014-01-01/jobsByPipeline/{PipelineId}" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "PipelineId": "foo" + }, + "serialized": { + "body": "", + "uri": "/2014-01-01/jobsByPipeline/foo", + "headers": {} + } + } + ] + }, + { + "description": "URI parameter only with location name", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringType", + "location": "uri", + "locationName": "PipelineId" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "GET", + "requestUri": "/2014-01-01/jobsByPipeline/{PipelineId}" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "Foo": "bar" + }, + "serialized": { + "body": "", + "uri": "/2014-01-01/jobsByPipeline/bar", + "headers": {} + } + } + ] + }, + { + "description": "String to string maps in querystring", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "PipelineId": { + "shape": "StringType", + "location": "uri" + }, + "QueryDoc": { + "shape": "MapStringStringType", + "location": "querystring" + } + } + }, + "MapStringStringType": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "GET", + "requestUri": "/2014-01-01/jobsByPipeline/{PipelineId}" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "PipelineId": "foo", + "QueryDoc": { + "bar": "baz", + "fizz": "buzz" + } + }, + "serialized": { + "body": "", + "uri": "/2014-01-01/jobsByPipeline/foo?bar=baz&fizz=buzz", + "headers": {} + } + } + ] + }, + { + "description": "String to string list maps in querystring", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "PipelineId": { + "shape": "StringType", + "location": "uri" + }, + "QueryDoc": { + "shape": "MapStringStringListType", + "location": "querystring" + } + } + }, + "MapStringStringListType": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "StringListType" + } + }, + "StringListType": { + "type": "list", + "member": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "GET", + "requestUri": "/2014-01-01/jobsByPipeline/{PipelineId}" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "PipelineId": "id", + "QueryDoc": { + "foo": ["bar", "baz"], + "fizz": ["buzz", "pop"] + } + }, + "serialized": { + "body": "", + "uri": "/2014-01-01/jobsByPipeline/id?foo=bar&foo=baz&fizz=buzz&fizz=pop", + "headers": {} + } + } + ] + }, + { + "description": "URI parameter and querystring params", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "PipelineId": { + "shape": "StringType", + "location": "uri", + "locationName": "PipelineId" + }, + "Ascending": { + "shape": "StringType", + "location": "querystring", + "locationName": "Ascending" + }, + "PageToken": { + "shape": "StringType", + "location": "querystring", + "locationName": "PageToken" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "GET", + "requestUri": "/2014-01-01/jobsByPipeline/{PipelineId}" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "PipelineId": "foo", + "Ascending": "true", + "PageToken": "bar" + }, + "serialized": { + "body": "", + "uri": "/2014-01-01/jobsByPipeline/foo?Ascending=true&PageToken=bar", + "headers": {} + } + } + ] + }, + { + "description": "URI parameter, querystring params and JSON body", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "PipelineId": { + "shape": "StringType", + "location": "uri", + "locationName": "PipelineId" + }, + "Ascending": { + "shape": "StringType", + "location": "querystring", + "locationName": "Ascending" + }, + "PageToken": { + "shape": "StringType", + "location": "querystring", + "locationName": "PageToken" + }, + "Config": { + "shape": "StructType" + } + } + }, + "StringType": { + "type": "string" + }, + "StructType": { + "type": "structure", + "members": { + "A": { + "shape": "StringType" + }, + "B": { + "shape": "StringType" + } + } + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/jobsByPipeline/{PipelineId}" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "PipelineId": "foo", + "Ascending": "true", + "PageToken": "bar", + "Config": { + "A": "one", + "B": "two" + } + }, + "serialized": { + "body": "{\"Config\": {\"A\": \"one\", \"B\": \"two\"}}", + "uri": "/2014-01-01/jobsByPipeline/foo?Ascending=true&PageToken=bar", + "headers": {} + } + } + ] + }, + { + "description": "URI parameter, querystring params, headers and JSON body", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "PipelineId": { + "shape": "StringType", + "location": "uri", + "locationName": "PipelineId" + }, + "Ascending": { + "shape": "StringType", + "location": "querystring", + "locationName": "Ascending" + }, + "Checksum": { + "shape": "StringType", + "location": "header", + "locationName": "x-amz-checksum" + }, + "PageToken": { + "shape": "StringType", + "location": "querystring", + "locationName": "PageToken" + }, + "Config": { + "shape": "StructType" + } + } + }, + "StringType": { + "type": "string" + }, + "StructType": { + "type": "structure", + "members": { + "A": { + "shape": "StringType" + }, + "B": { + "shape": "StringType" + } + } + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/jobsByPipeline/{PipelineId}" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "PipelineId": "foo", + "Ascending": "true", + "Checksum": "12345", + "PageToken": "bar", + "Config": { + "A": "one", + "B": "two" + } + }, + "serialized": { + "body": "{\"Config\": {\"A\": \"one\", \"B\": \"two\"}}", + "uri": "/2014-01-01/jobsByPipeline/foo?Ascending=true&PageToken=bar", + "headers": { + "x-amz-checksum": "12345" + } + } + } + ] + }, + { + "description": "Streaming payload", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "vaultName": { + "shape": "StringType", + "location": "uri", + "locationName": "vaultName" + }, + "checksum": { + "shape": "StringType", + "location": "header", + "locationName": "x-amz-sha256-tree-hash" + }, + "body": { + "shape": "Stream" + } + }, + "required": [ + "vaultName" + ], + "payload": "body" + }, + "StringType": { + "type": "string" + }, + "Stream": { + "type": "blob", + "streaming": true + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/vaults/{vaultName}/archives" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "vaultName": "name", + "checksum": "foo", + "body": "contents" + }, + "serialized": { + "body": "contents", + "uri": "/2014-01-01/vaults/name/archives", + "headers": { + "x-amz-sha256-tree-hash": "foo" + } + } + } + ] + }, + { + "description": "String payload", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "foo": { + "shape": "FooShape" + } + } + }, + "FooShape": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "InputShape", + "payload": "foo" + }, + "name": "OperationName" + }, + "params": { + "foo": "bar" + }, + "serialized": { + "method": "POST", + "body": "bar", + "uri": "/" + } + } + ] + }, + { + "description": "Blob payload", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "foo": { + "shape": "FooShape" + } + } + }, + "FooShape": { + "type": "blob" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "InputShape", + "payload": "foo" + }, + "name": "OperationName" + }, + "params": { + "foo": "bar" + }, + "serialized": { + "method": "POST", + "body": "bar", + "uri": "/" + } + }, + { + "given": { + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "InputShape", + "payload": "foo" + }, + "name": "OperationName" + }, + "params": { + }, + "serialized": { + "method": "POST", + "body": "", + "uri": "/" + } + } + ] + }, + { + "description": "Structure payload", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "foo": { + "shape": "FooShape" + } + } + }, + "FooShape": { + "locationName": "foo", + "type": "structure", + "members": { + "baz": { + "shape": "BazShape" + } + } + }, + "BazShape": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "InputShape", + "payload": "foo" + }, + "name": "OperationName" + }, + "params": { + "foo": { + "baz": "bar" + } + }, + "serialized": { + "method": "POST", + "body": "{\"baz\": \"bar\"}", + "uri": "/" + } + }, + { + "given": { + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "InputShape", + "payload": "foo" + }, + "name": "OperationName" + }, + "params": {}, + "serialized": { + "method": "POST", + "body": "", + "uri": "/" + } + } + ] + }, + { + "description": "Omits null query params, but serializes empty strings", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "foo": { + "location":"querystring", + "locationName":"param-name", + "shape": "Foo" + } + } + }, + "Foo": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "name": "OperationName", + "http": { + "method": "POST", + "requestUri": "/path" + }, + "input": { "shape": "InputShape" } + }, + "params": { "foo": null }, + "serialized": { + "method": "POST", + "body": "", + "uri": "/path" + } + }, + { + "given": { + "name": "OperationName", + "http": { + "method": "POST", + "requestUri": "/path?abc=mno" + }, + "input": { "shape": "InputShape" } + }, + "params": { "foo": "" }, + "serialized": { + "method": "POST", + "body": "", + "uri": "/path?abc=mno¶m-name=" + } + } + ] + }, + { + "description": "Recursive shapes", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "RecursiveStruct": { + "shape": "RecursiveStructType" + } + } + }, + "RecursiveStructType": { + "type": "structure", + "members": { + "NoRecurse": { + "shape": "StringType" + }, + "RecursiveStruct": { + "shape": "RecursiveStructType" + }, + "RecursiveList": { + "shape": "RecursiveListType" + }, + "RecursiveMap": { + "shape": "RecursiveMapType" + } + } + }, + "RecursiveListType": { + "type": "list", + "member": { + "shape": "RecursiveStructType" + } + }, + "RecursiveMapType": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "RecursiveStructType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "NoRecurse": "foo" + } + }, + "serialized": { + "uri": "/path" , + "headers": {}, + "body": "{\"RecursiveStruct\": {\"NoRecurse\": \"foo\"}}" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveStruct": { + "NoRecurse": "foo" + } + } + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "{\"RecursiveStruct\": {\"RecursiveStruct\": {\"NoRecurse\": \"foo\"}}}" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveStruct": { + "RecursiveStruct": { + "RecursiveStruct": { + "NoRecurse": "foo" + } + } + } + } + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "{\"RecursiveStruct\": {\"RecursiveStruct\": {\"RecursiveStruct\": {\"RecursiveStruct\": {\"NoRecurse\": \"foo\"}}}}}" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveList": [ + { + "NoRecurse": "foo" + }, + { + "NoRecurse": "bar" + } + ] + } + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "{\"RecursiveStruct\": {\"RecursiveList\": [{\"NoRecurse\": \"foo\"}, {\"NoRecurse\": \"bar\"}]}}" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveList": [ + { + "NoRecurse": "foo" + }, + { + "RecursiveStruct": { + "NoRecurse": "bar" + } + } + ] + } + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "{\"RecursiveStruct\": {\"RecursiveList\": [{\"NoRecurse\": \"foo\"}, {\"RecursiveStruct\": {\"NoRecurse\": \"bar\"}}]}}" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveMap": { + "foo": { + "NoRecurse": "foo" + }, + "bar": { + "NoRecurse": "bar" + } + } + } + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "{\"RecursiveStruct\": {\"RecursiveMap\": {\"foo\": {\"NoRecurse\": \"foo\"}, \"bar\": {\"NoRecurse\": \"bar\"}}}}" + } + } + ] + }, + { + "description": "Timestamp values", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "TimeArg": { + "shape": "TimestampType" + }, + "TimeArgInHeader": { + "shape": "TimestampType", + "location": "header", + "locationName": "x-amz-timearg" + } + } + }, + "TimestampType": { + "type": "timestamp" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "TimeArg": 1422172800 + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "{\"TimeArg\": 1422172800}" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "TimeArgInHeader": 1422172800 + }, + "serialized": { + "uri": "/path", + "headers": {"x-amz-timearg": "Sun, 25 Jan 2015 08:00:00 GMT"}, + "body": "" + } + } + ] + }, + { + "description": "Named locations in JSON body", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "TimeArg": { + "shape": "TimestampType", + "locationName": "timestamp_location" + } + } + }, + "TimestampType": { + "type": "timestamp" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "TimeArg": 1422172800 + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "{\"timestamp_location\": 1422172800}" + } + } + ] + }, + { + "description": "Idempotency token auto fill", + "metadata": { + "protocol": "rest-json", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Token": { + "shape": "StringType", + "idempotencyToken": true + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "Token": "abc123" + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "{\"Token\": \"abc123\"}" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "{\"Token\": \"00000000-0000-4000-8000-000000000000\"}" + } + } + ] + } +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/input/rest-xml.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/input/rest-xml.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/input/rest-xml.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/input/rest-xml.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1587 @@ +[ + { + "description": "Basic XML serialization", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Name": { + "shape": "StringType" + }, + "Description": { + "shape": "StringType" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/hostedzone" + }, + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "name": "OperationName" + }, + "params": { + "Name": "foo", + "Description": "bar" + }, + "serialized": { + "method": "POST", + "body": "foobar", + "uri": "/2014-01-01/hostedzone", + "headers": {} + } + }, + { + "given": { + "http": { + "method": "PUT", + "requestUri": "/2014-01-01/hostedzone" + }, + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "name": "OperationName" + }, + "params": { + "Name": "foo", + "Description": "bar" + }, + "serialized": { + "method": "PUT", + "body": "foobar", + "uri": "/2014-01-01/hostedzone", + "headers": {} + } + }, + { + "given": { + "http": { + "method": "GET", + "requestUri": "/2014-01-01/hostedzone" + }, + "name": "OperationName" + }, + "params": {}, + "serialized": { + "method": "GET", + "body": "", + "uri": "/2014-01-01/hostedzone", + "headers": {} + } + } + ] + }, + { + "description": "Serialize other scalar types", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "First": { + "shape": "BoolType" + }, + "Second": { + "shape": "BoolType" + }, + "Third": { + "shape": "FloatType" + }, + "Fourth": { + "shape": "IntegerType" + } + } + }, + "BoolType": { + "type": "boolean" + }, + "FloatType": { + "type": "float" + }, + "IntegerType": { + "type": "integer" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/hostedzone" + }, + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "name": "OperationName" + }, + "params": { + "First": true, + "Second": false, + "Third": 1.2, + "Fourth": 3 + }, + "serialized": { + "method": "POST", + "body": "truefalse1.23", + "uri": "/2014-01-01/hostedzone", + "headers": {} + } + } + ] + }, + { + "description": "Nested structures", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "SubStructure": { + "shape": "SubStructure" + }, + "Description": { + "shape": "StringType" + } + } + }, + "SubStructure": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringType" + }, + "Bar": { + "shape": "StringType" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/hostedzone" + }, + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "name": "OperationName" + }, + "params": { + "SubStructure": { + "Foo": "a", + "Bar": "b" + }, + "Description": "baz" + }, + "serialized": { + "method": "POST", + "body": "abbaz", + "uri": "/2014-01-01/hostedzone", + "headers": {} + } + }, + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/hostedzone" + }, + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "name": "OperationName" + }, + "params": { + "SubStructure": { + "Foo": "a", + "Bar": null + }, + "Description": "baz" + }, + "serialized": { + "method": "POST", + "body": "abaz", + "uri": "/2014-01-01/hostedzone", + "headers": {} + } + } + ] + }, + { + "description": "Nested structures", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "SubStructure": { + "shape": "SubStructure" + }, + "Description": { + "shape": "StringType" + } + } + }, + "SubStructure": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringType" + }, + "Bar": { + "shape": "StringType" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/hostedzone" + }, + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "name": "OperationName" + }, + "params": { + "SubStructure": {}, + "Description": "baz" + }, + "serialized": { + "method": "POST", + "body": "baz", + "uri": "/2014-01-01/hostedzone", + "headers": {} + } + } + ] + }, + { + "description": "Non flattened lists", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "ListParam": { + "shape": "ListShape" + } + } + }, + "ListShape": { + "type": "list", + "member": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/hostedzone" + }, + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "name": "OperationName" + }, + "params": { + "ListParam": [ + "one", + "two", + "three" + ] + }, + "serialized": { + "method": "POST", + "body": "onetwothree", + "uri": "/2014-01-01/hostedzone", + "headers": {} + } + } + ] + }, + { + "description": "Non flattened lists with locationName", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "ListParam": { + "shape": "ListShape", + "locationName": "AlternateName" + } + } + }, + "ListShape": { + "type": "list", + "member": { + "shape": "StringType", + "locationName": "NotMember" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/hostedzone" + }, + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "name": "OperationName" + }, + "params": { + "ListParam": [ + "one", + "two", + "three" + ] + }, + "serialized": { + "method": "POST", + "body": "onetwothree", + "uri": "/2014-01-01/hostedzone", + "headers": {} + } + } + ] + }, + { + "description": "Flattened lists", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "ListParam": { + "shape": "ListShape" + } + } + }, + "ListShape": { + "type": "list", + "member": { + "shape": "StringType" + }, + "flattened": true + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/hostedzone" + }, + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "name": "OperationName" + }, + "params": { + "ListParam": [ + "one", + "two", + "three" + ] + }, + "serialized": { + "method": "POST", + "body": "onetwothree", + "uri": "/2014-01-01/hostedzone", + "headers": {} + } + } + ] + }, + { + "description": "Flattened lists with locationName", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "ListParam": { + "shape": "ListShape", + "locationName": "item" + } + } + }, + "ListShape": { + "type": "list", + "member": { + "shape": "StringType" + }, + "flattened": true + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/hostedzone" + }, + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "name": "OperationName" + }, + "params": { + "ListParam": [ + "one", + "two", + "three" + ] + }, + "serialized": { + "method": "POST", + "body": "onetwothree", + "uri": "/2014-01-01/hostedzone", + "headers": {} + } + } + ] + }, + { + "description": "List of structures", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "ListParam": { + "shape": "ListShape", + "locationName": "item" + } + } + }, + "ListShape": { + "type": "list", + "member": { + "shape": "SingleFieldStruct" + }, + "flattened": true + }, + "StringType": { + "type": "string" + }, + "SingleFieldStruct": { + "type": "structure", + "members": { + "Element": { + "shape": "StringType", + "locationName": "value" + } + } + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/hostedzone" + }, + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "name": "OperationName" + }, + "params": { + "ListParam": [ + { + "Element": "one" + }, + { + "Element": "two" + }, + { + "Element": "three" + } + ] + }, + "serialized": { + "method": "POST", + "body": "onetwothree", + "uri": "/2014-01-01/hostedzone", + "headers": {} + } + } + ] + }, + { + "description": "Blob and timestamp shapes", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "StructureParam": { + "shape": "StructureShape" + } + } + }, + "StructureShape": { + "type": "structure", + "members": { + "t": { + "shape": "TShape" + }, + "b": { + "shape": "BShape" + } + } + }, + "TShape": { + "type": "timestamp" + }, + "BShape": { + "type": "blob" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/2014-01-01/hostedzone" + }, + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "name": "OperationName" + }, + "params": { + "StructureParam": { + "t": 1422172800, + "b": "foo" + } + }, + "serialized": { + "method": "POST", + "body": "2015-01-25T08:00:00ZZm9v", + "uri": "/2014-01-01/hostedzone", + "headers": {} + } + } + ] + }, + { + "description": "Header maps", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "foo": { + "shape": "FooShape" + } + } + }, + "FooShape": { + "type": "map", + "location": "headers", + "locationName": "x-foo-", + "key": { + "shape": "FooKeyValue" + }, + "value": { + "shape": "FooKeyValue" + } + }, + "FooKeyValue": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "name": "OperationName" + }, + "params": { + "foo": { + "a": "b", + "c": "d" + } + }, + "serialized": { + "method": "POST", + "body": "", + "uri": "/", + "headers": { + "x-foo-a": "b", + "x-foo-c": "d" + } + } + } + ] + }, + { + "description": "String to string maps in querystring", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "PipelineId": { + "shape": "StringType", + "location": "uri" + }, + "QueryDoc": { + "shape": "MapStringStringType", + "location": "querystring" + } + } + }, + "MapStringStringType": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "GET", + "requestUri": "/2014-01-01/jobsByPipeline/{PipelineId}" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "PipelineId": "foo", + "QueryDoc": { + "bar": "baz", + "fizz": "buzz" + } + }, + "serialized": { + "body": "", + "uri": "/2014-01-01/jobsByPipeline/foo?bar=baz&fizz=buzz", + "headers": {} + } + } + ] + }, + { + "description": "String to string list maps in querystring", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "PipelineId": { + "shape": "StringType", + "location": "uri" + }, + "QueryDoc": { + "shape": "MapStringStringListType", + "location": "querystring" + } + } + }, + "MapStringStringListType": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "StringListType" + } + }, + "StringListType": { + "type": "list", + "member": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "GET", + "requestUri": "/2014-01-01/jobsByPipeline/{PipelineId}" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "PipelineId": "id", + "QueryDoc": { + "foo": ["bar", "baz"], + "fizz": ["buzz", "pop"] + } + }, + "serialized": { + "body": "", + "uri": "/2014-01-01/jobsByPipeline/id?foo=bar&foo=baz&fizz=buzz&fizz=pop", + "headers": {} + } + } + ] + }, + + { + "description": "String payload", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "foo": { + "shape": "FooShape" + } + } + }, + "FooShape": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "InputShape", + "payload": "foo" + }, + "name": "OperationName" + }, + "params": { + "foo": "bar" + }, + "serialized": { + "method": "POST", + "body": "bar", + "uri": "/" + } + } + ] + }, + { + "description": "Blob payload", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "foo": { + "shape": "FooShape" + } + } + }, + "FooShape": { + "type": "blob" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "InputShape", + "payload": "foo" + }, + "name": "OperationName" + }, + "params": { + "foo": "bar" + }, + "serialized": { + "method": "POST", + "body": "bar", + "uri": "/" + } + }, + { + "given": { + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "InputShape", + "payload": "foo" + }, + "name": "OperationName" + }, + "params": { + }, + "serialized": { + "method": "POST", + "body": "", + "uri": "/" + } + } + ] + }, + { + "description": "Structure payload", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "foo": { + "shape": "FooShape" + } + } + }, + "FooShape": { + "locationName": "foo", + "type": "structure", + "members": { + "baz": { + "shape": "BazShape" + } + } + }, + "BazShape": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "InputShape", + "payload": "foo" + }, + "name": "OperationName" + }, + "params": { + "foo": { + "baz": "bar" + } + }, + "serialized": { + "method": "POST", + "body": "bar", + "uri": "/" + } + }, + { + "given": { + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "InputShape", + "payload": "foo" + }, + "name": "OperationName" + }, + "params": {}, + "serialized": { + "method": "POST", + "body": "", + "uri": "/" + } + }, + { + "given": { + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "InputShape", + "payload": "foo" + }, + "name": "OperationName" + }, + "params": { + "foo": {} + }, + "serialized": { + "method": "POST", + "body": "", + "uri": "/" + } + }, + { + "given": { + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "InputShape", + "payload": "foo" + }, + "name": "OperationName" + }, + "params": { + "foo": null + }, + "serialized": { + "method": "POST", + "body": "", + "uri": "/" + } + } + ] + }, + { + "description": "XML Attribute", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Grant": { + "shape": "Grant" + } + } + }, + "Grant": { + "type": "structure", + "locationName": "Grant", + "members": { + "Grantee": { + "shape": "Grantee" + } + } + }, + "Grantee": { + "type": "structure", + "members": { + "Type": { + "shape": "Type", + "locationName": "xsi:type", + "xmlAttribute": true + }, + "EmailAddress": { + "shape": "StringType" + } + }, + "xmlNamespace": { + "prefix": "xsi", + "uri":"http://www.w3.org/2001/XMLSchema-instance" + } + }, + "Type": { + "type": "string" + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "InputShape", + "payload": "Grant" + }, + "name": "OperationName" + }, + "params": { + "Grant": { + "Grantee": { + "EmailAddress": "foo@example.com", + "Type": "CanonicalUser" + } + } + }, + "serialized": { + "method": "POST", + "body": "foo@example.com", + "uri": "/" + } + } + ] + }, + { + "description": "Greedy keys", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Bucket": { + "shape": "BucketShape", + "location": "uri" + }, + "Key": { + "shape": "KeyShape", + "location": "uri" + } + } + }, + "BucketShape": { + "type": "string" + }, + "KeyShape": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "http": { + "method": "GET", + "requestUri": "/{Bucket}/{Key+}" + }, + "input": { + "shape": "InputShape" + }, + "name": "OperationName" + }, + "params": { + "Key": "testing /123", + "Bucket": "my/bucket" + }, + "serialized": { + "method": "GET", + "body": "", + "uri": "/my%2Fbucket/testing%20/123" + } + } + ] + }, + { + "description": "Omits null query params, but serializes empty strings", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "foo": { + "location":"querystring", + "locationName":"param-name", + "shape": "Foo" + } + } + }, + "Foo": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "name": "OperationName", + "http": { + "method": "POST", + "requestUri": "/path" + }, + "input": { "shape": "InputShape" } + }, + "params": { "foo": null }, + "serialized": { + "method": "POST", + "body": "", + "uri": "/path" + } + }, + { + "given": { + "name": "OperationName", + "http": { + "method": "POST", + "requestUri": "/path?abc=mno" + }, + "input": { "shape": "InputShape" } + }, + "params": { "foo": "" }, + "serialized": { + "method": "POST", + "body": "", + "uri": "/path?abc=mno¶m-name=" + } + } + ] + }, + { + "description": "Recursive shapes", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "RecursiveStruct": { + "shape": "RecursiveStructType" + } + } + }, + "RecursiveStructType": { + "type": "structure", + "members": { + "NoRecurse": { + "shape": "StringType" + }, + "RecursiveStruct": { + "shape": "RecursiveStructType" + }, + "RecursiveList": { + "shape": "RecursiveListType" + }, + "RecursiveMap": { + "shape": "RecursiveMapType" + } + } + }, + "RecursiveListType": { + "type": "list", + "member": { + "shape": "RecursiveStructType" + } + }, + "RecursiveMapType": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "RecursiveStructType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "NoRecurse": "foo" + } + }, + "serialized": { + "uri": "/path", + "body": "foo" + } + }, + { + "given": { + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveStruct": { + "NoRecurse": "foo" + } + } + }, + "serialized": { + "uri": "/path", + "body": "foo" + } + }, + { + "given": { + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveStruct": { + "RecursiveStruct": { + "RecursiveStruct": { + "NoRecurse": "foo" + } + } + } + } + }, + "serialized": { + "uri": "/path", + "body": "foo" + } + }, + { + "given": { + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveList": [ + { + "NoRecurse": "foo" + }, + { + "NoRecurse": "bar" + } + ] + } + }, + "serialized": { + "uri": "/path", + "body": "foobar" + } + }, + { + "given": { + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveList": [ + { + "NoRecurse": "foo" + }, + { + "RecursiveStruct": { + "NoRecurse": "bar" + } + } + ] + } + }, + "serialized": { + "uri": "/path", + "body": "foobar" + } + }, + { + "given": { + "input": { + "shape": "InputShape", + "locationName": "OperationRequest", + "xmlNamespace": {"uri": "https://foo/"} + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "RecursiveStruct": { + "RecursiveMap": { + "foo": { + "NoRecurse": "foo" + }, + "bar": { + "NoRecurse": "bar" + } + } + } + }, + "serialized": { + "uri": "/path", + "body": "foofoobarbar" + } + } + ] + }, + { + "description": "Timestamp in header", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "TimeArgInHeader": { + "shape": "TimestampType", + "location": "header", + "locationName": "x-amz-timearg" + } + } + }, + "TimestampType": { + "type": "timestamp" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "TimeArgInHeader": 1422172800 + }, + "serialized": { + "method": "POST", + "body": "", + "uri": "/path", + "headers": {"x-amz-timearg": "Sun, 25 Jan 2015 08:00:00 GMT"} + } + } + ] + }, + { + "description": "Idempotency token auto fill", + "metadata": { + "protocol": "rest-xml", + "apiVersion": "2014-01-01" + }, + "shapes": { + "InputShape": { + "type": "structure", + "members": { + "Token": { + "shape": "StringType", + "idempotencyToken": true + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + "Token": "abc123" + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "abc123" + } + }, + { + "given": { + "input": { + "shape": "InputShape" + }, + "http": { + "method": "POST", + "requestUri": "/path" + }, + "name": "OperationName" + }, + "params": { + }, + "serialized": { + "uri": "/path", + "headers": {}, + "body": "00000000-0000-4000-8000-000000000000" + } + } + ] + } +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/output/ec2.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/output/ec2.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/output/ec2.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/output/ec2.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,454 @@ +[ + { + "description": "Scalar members", + "metadata": { + "protocol": "ec2" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Str": { + "shape": "StringType" + }, + "Num": { + "shape": "IntegerType", + "locationName": "FooNum" + }, + "FalseBool": { + "shape": "BooleanType" + }, + "TrueBool": { + "shape": "BooleanType" + }, + "Float": { + "shape": "FloatType" + }, + "Double": { + "shape": "DoubleType" + }, + "Long": { + "shape": "LongType" + }, + "Char": { + "shape": "CharType" + } + } + }, + "StringType": { + "type": "string" + }, + "IntegerType": { + "type": "integer" + }, + "BooleanType": { + "type": "boolean" + }, + "FloatType": { + "type": "float" + }, + "DoubleType": { + "type": "double" + }, + "LongType": { + "type": "long" + }, + "CharType": { + "type": "character" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Str": "myname", + "Num": 123, + "FalseBool": false, + "TrueBool": true, + "Float": 1.2, + "Double": 1.3, + "Long": 200, + "Char": "a" + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "myname123falsetrue1.21.3200arequest-id" + } + } + ] + }, + { + "description": "Blob", + "metadata": { + "protocol": "ec2" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Blob": { + "shape": "BlobType" + } + } + }, + "BlobType": { + "type": "blob" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Blob": "value" + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "dmFsdWU=requestid" + } + } + ] + }, + { + "description": "Lists", + "metadata": { + "protocol": "ec2" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ListMember": { + "shape": "ListShape" + } + } + }, + "ListShape": { + "type": "list", + "member": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ListMember": ["abc", "123"] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "abc123requestid" + } + } + ] + }, + { + "description": "List with custom member name", + "metadata": { + "protocol": "ec2" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ListMember": { + "shape": "ListShape" + } + } + }, + "ListShape": { + "type": "list", + "member": { + "shape": "StringType", + "locationName": "item" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ListMember": ["abc", "123"] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "abc123requestid" + } + } + ] + }, + { + "description": "Flattened List", + "metadata": { + "protocol": "ec2" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ListMember": { + "shape": "ListType", + "flattened": true + } + } + }, + "ListType": { + "type": "list", + "member": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ListMember": ["abc", "123"] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "abc123requestid" + } + } + ] + }, + { + "description": "Normal map", + "metadata": { + "protocol": "ec2" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Map": { + "shape": "MapType" + } + } + }, + "MapType": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "StructureType" + } + }, + "StructureType": { + "type": "structure", + "members": { + "foo": { + "shape": "StringType" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Map": { + "qux": { + "foo": "bar" + }, + "baz": { + "foo": "bam" + } + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "quxbarbazbamrequestid" + } + } + ] + }, + { + "description": "Flattened map", + "metadata": { + "protocol": "ec2" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Map": { + "shape": "MapType", + "flattened": true + } + } + }, + "MapType": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Map": { + "qux": "bar", + "baz": "bam" + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "quxbarbazbamrequestid" + } + } + ] + }, + { + "description": "Named map", + "metadata": { + "protocol": "ec2" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Map": { + "shape": "MapType", + "flattened": true + } + } + }, + "MapType": { + "type": "map", + "key": { + "shape": "StringType", + "locationName": "foo" + }, + "value": { + "shape": "StringType", + "locationName": "bar" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Map": { + "qux": "bar", + "baz": "bam" + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "quxbarbazbamrequestid" + } + } + ] + }, + { + "description": "Empty string", + "metadata": { + "protocol": "ec2" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringType" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Foo": "" + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "requestid" + } + } + ] + } +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/output/json.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/output/json.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/output/json.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/output/json.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,369 @@ +[ + { + "description": "Scalar members", + "metadata": { + "protocol": "json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Str": { + "shape": "StringType" + }, + "Num": { + "shape": "IntegerType" + }, + "FalseBool": { + "shape": "BooleanType" + }, + "TrueBool": { + "shape": "BooleanType" + }, + "Float": { + "shape": "FloatType" + }, + "Double": { + "shape": "DoubleType" + }, + "Long": { + "shape": "LongType" + }, + "Char": { + "shape": "CharType" + } + } + }, + "StringType": { + "type": "string" + }, + "IntegerType": { + "type": "integer" + }, + "BooleanType": { + "type": "boolean" + }, + "FloatType": { + "type": "float" + }, + "DoubleType": { + "type": "double" + }, + "LongType": { + "type": "long" + }, + "CharType": { + "type": "character" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Str": "myname", + "Num": 123, + "FalseBool": false, + "TrueBool": true, + "Float": 1.2, + "Double": 1.3, + "Long": 200, + "Char": "a" + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "{\"Str\": \"myname\", \"Num\": 123, \"FalseBool\": false, \"TrueBool\": true, \"Float\": 1.2, \"Double\": 1.3, \"Long\": 200, \"Char\": \"a\"}" + } + } + ] + }, + { + "description": "Blob members", + "metadata": { + "protocol": "json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "BlobMember": { + "shape": "BlobType" + }, + "StructMember": { + "shape": "BlobContainer" + } + } + }, + "BlobType": { + "type": "blob" + }, + "BlobContainer": { + "type": "structure", + "members": { + "foo": { + "shape": "BlobType" + } + } + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "BlobMember": "hi!", + "StructMember": { + "foo": "there!" + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "{\"BlobMember\": \"aGkh\", \"StructMember\": {\"foo\": \"dGhlcmUh\"}}" + } + } + ] + }, + { + "description": "Timestamp members", + "metadata": { + "protocol": "json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "TimeMember": { + "shape": "TimeType" + }, + "StructMember": { + "shape": "TimeContainer" + } + } + }, + "TimeType": { + "type": "timestamp" + }, + "TimeContainer": { + "type": "structure", + "members": { + "foo": { + "shape": "TimeType" + } + } + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "TimeMember": 1398796238, + "StructMember": { + "foo": 1398796238 + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "{\"TimeMember\": 1398796238, \"StructMember\": {\"foo\": 1398796238}}" + } + } + ] + }, + { + "description": "Lists", + "metadata": { + "protocol": "json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ListMember": { + "shape": "ListType" + }, + "ListMemberMap": { + "shape": "ListTypeMap" + }, + "ListMemberStruct": { + "shape": "ListTypeStruct" + } + } + }, + "ListType": { + "type": "list", + "member": { + "shape": "StringType" + } + }, + "ListTypeMap": { + "type": "list", + "member": { + "shape": "MapType" + } + }, + "ListTypeStruct": { + "type": "list", + "member": { + "shape": "StructType" + } + }, + "StringType": { + "type": "string" + }, + "StructType": { + "type": "structure", + "members": { + } + }, + "MapType": { + "type": "map", + "key": { "shape": "StringType" }, + "value": { "shape": "StringType" } + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ListMember": ["a", "b"] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "{\"ListMember\": [\"a\", \"b\"]}" + } + }, + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ListMember": ["a", null], + "ListMemberMap": [{}, null, null, {}], + "ListMemberStruct": [{}, null, null, {}] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "{\"ListMember\": [\"a\", null], \"ListMemberMap\": [{}, null, null, {}], \"ListMemberStruct\": [{}, null, null, {}]}" + } + } + ] + }, + { + "description": "Maps", + "metadata": { + "protocol": "json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "MapMember": { + "shape": "MapType" + } + } + }, + "MapType": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "NumberList" + } + }, + "StringType": { + "type": "string" + }, + "NumberList": { + "type": "list", + "member": { + "shape": "IntegerType" + } + }, + "IntegerType": { + "type": "integer" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "MapMember": { + "a": [1, 2], + "b": [3, 4] + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "{\"MapMember\": {\"a\": [1, 2], \"b\": [3, 4]}}" + } + } + ] + }, + { + "description": "Ignores extra data", + "metadata": { + "protocol": "json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "StrType": { + "shape": "StrType" + } + } + }, + "StrType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": {}, + "response": { + "status_code": 200, + "headers": {}, + "body": "{\"foo\": \"bar\"}" + } + } + ] + } +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/output/query.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/output/query.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/output/query.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/output/query.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,775 @@ +[ + { + "description": "Scalar members", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Str": { + "shape": "StringType" + }, + "Num": { + "shape": "IntegerType", + "locationName": "FooNum" + }, + "FalseBool": { + "shape": "BooleanType" + }, + "TrueBool": { + "shape": "BooleanType" + }, + "Float": { + "shape": "FloatType" + }, + "Double": { + "shape": "DoubleType" + }, + "Long": { + "shape": "LongType" + }, + "Char": { + "shape": "CharType" + }, + "Timestamp": { + "shape": "TimestampType" + } + } + }, + "StringType": { + "type": "string" + }, + "IntegerType": { + "type": "integer" + }, + "BooleanType": { + "type": "boolean" + }, + "FloatType": { + "type": "float" + }, + "DoubleType": { + "type": "double" + }, + "LongType": { + "type": "long" + }, + "CharType": { + "type": "character" + }, + "TimestampType": { + "type": "timestamp" + } + }, + "cases": [ + { + "given": { + "output": { + "resultWrapper": "OperationNameResult", + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Str": "myname", + "Num": 123, + "FalseBool": false, + "TrueBool": true, + "Float": 1.2, + "Double": 1.3, + "Long": 200, + "Char": "a", + "Timestamp": 1422172800 + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "myname123falsetrue1.21.3200a2015-01-25T08:00:00Zrequest-id" + } + } + ] + }, + { + "description": "Not all members in response", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Str": { + "shape": "StringType" + }, + "Num": { + "shape": "IntegerType" + } + } + }, + "StringType": { + "type": "string" + }, + "IntegerType": { + "type": "integer" + } + }, + "cases": [ + { + "given": { + "output": { + "resultWrapper": "OperationNameResult", + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Str": "myname" + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "mynamerequest-id" + } + } + ] + }, + { + "description": "Blob", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Blob": { + "shape": "BlobType" + } + } + }, + "BlobType": { + "type": "blob" + } + }, + "cases": [ + { + "given": { + "output": { + "resultWrapper": "OperationNameResult", + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Blob": "value" + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "dmFsdWU=requestid" + } + } + ] + }, + { + "description": "Lists", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ListMember": { + "shape": "ListShape" + } + } + }, + "ListShape": { + "type": "list", + "member": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "resultWrapper": "OperationNameResult", + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ListMember": ["abc", "123"] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "abc123requestid" + } + } + ] + }, + { + "description": "List with custom member name", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ListMember": { + "shape": "ListShape" + } + } + }, + "ListShape": { + "type": "list", + "member": { + "shape": "StringType", + "locationName": "item" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "resultWrapper": "OperationNameResult", + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ListMember": ["abc", "123"] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "abc123requestid" + } + } + ] + }, + { + "description": "Flattened List", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ListMember": { + "shape": "ListType" + } + } + }, + "ListType": { + "type": "list", + "flattened": true, + "member": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "resultWrapper": "OperationNameResult", + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ListMember": ["abc", "123"] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "abc123requestid" + } + } + ] + }, + { + "description": "Flattened single element list", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ListMember": { + "shape": "ListType" + } + } + }, + "ListType": { + "type": "list", + "flattened": true, + "member": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "resultWrapper": "OperationNameResult", + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ListMember": ["abc"] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "abcrequestid" + } + } + ] + }, + { + "description": "List of structures", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "List": { + "shape": "ListOfStructs" + } + } + }, + "ListOfStructs": { + "type": "list", + "member": { + "shape": "StructureShape" + } + }, + "StructureShape": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringShape" + }, + "Bar": { + "shape": "StringShape" + }, + "Baz": { + "shape": "StringShape" + } + } + }, + "StringShape": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "resultWrapper": "OperationNameResult", + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "List": [{"Foo": "firstfoo", "Bar": "firstbar", "Baz": "firstbaz"}, {"Foo": "secondfoo", "Bar": "secondbar", "Baz": "secondbaz"}] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "firstfoofirstbarfirstbazsecondfoosecondbarsecondbazrequestid" + } + } + ] + }, + { + "description": "Flattened list of structures", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "resultWrapper": "OperationNameResult", + "members": { + "List": { + "shape": "ListOfStructs" + } + } + }, + "ListOfStructs": { + "type": "list", + "flattened": true, + "member": { + "shape": "StructureShape" + } + }, + "StructureShape": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringShape" + }, + "Bar": { + "shape": "StringShape" + }, + "Baz": { + "shape": "StringShape" + } + } + }, + "StringShape": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "List": [{"Foo": "firstfoo", "Bar": "firstbar", "Baz": "firstbaz"}, {"Foo": "secondfoo", "Bar": "secondbar", "Baz": "secondbaz"}] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "firstfoofirstbarfirstbazsecondfoosecondbarsecondbazrequestid" + } + } + ] + }, + { + "description": "Flattened list with location name", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "List": { + "shape": "ListType" + } + } + }, + "ListType": { + "type": "list", + "flattened": true, + "member": { + "shape": "StringShape", + "locationName": "NamedList" + } + }, + "StringShape": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "resultWrapper": "OperationNameResult", + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "List": ["a", "b"] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "abrequestid" + } + } + ] + }, + { + "description": "Normal map", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Map": { + "shape": "StringMap" + } + } + }, + "StringMap": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "StructType" + } + }, + "StringType": { + "type": "string" + }, + "StructType": { + "type": "structure", + "members": { + "foo": { + "shape": "StringType" + } + } + } + }, + "cases": [ + { + "given": { + "output": { + "resultWrapper": "OperationNameResult", + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Map": { + "qux": { + "foo": "bar" + }, + "baz": { + "foo": "bam" + } + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "quxbarbazbamrequestid" + } + } + ] + }, + { + "description": "Flattened map", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Map": { + "shape": "StringMap", + "flattened": true + } + } + }, + "StringMap": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "resultWrapper": "OperationNameResult", + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Map": { + "qux": "bar", + "baz": "bam" + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "quxbarbazbamrequestid" + } + } + ] + }, + { + "description": "Flattened map in shape definition", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Map": { + "shape": "StringMap", + "locationName": "Attribute" + } + } + }, + "StringMap": { + "type": "map", + "key": { + "shape": "StringType", + "locationName": "Name" + }, + "value": { + "shape": "StringType", + "locationName": "Value" + }, + "flattened": true, + "locationName": "Attribute" + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "resultWrapper": "OperationNameResult", + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Map": { + "qux": "bar" + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "quxbarrequestid" + } + } + ] + }, + { + "description": "Named map", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Map": { + "shape": "MapType" + } + } + }, + "MapType": { + "type": "map", + "flattened": true, + "key": { + "locationName": "foo", + "shape": "StringType" + }, + "value": { + "locationName": "bar", + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "resultWrapper": "OperationNameResult", + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Map": { + "qux": "bar", + "baz": "bam" + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "quxbarbazbamrequestid" + } + } + ] + }, + { + "description": "Empty string", + "metadata": { + "protocol": "query" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringType" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Foo": "" + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "requestid" + } + } + ] + } +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/output/rest-json.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/output/rest-json.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/output/rest-json.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/output/rest-json.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,608 @@ +[ + { + "description": "Scalar members", + "metadata": { + "protocol": "rest-json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ImaHeader": { + "shape": "HeaderShape" + }, + "ImaHeaderLocation": { + "shape": "HeaderShape", + "locationName": "X-Foo" + }, + "Status": { + "shape": "StatusShape", + "location": "statusCode" + }, + "Str": { + "shape": "StringType" + }, + "Num": { + "shape": "IntegerType" + }, + "FalseBool": { + "shape": "BooleanType" + }, + "TrueBool": { + "shape": "BooleanType" + }, + "Float": { + "shape": "FloatType" + }, + "Double": { + "shape": "DoubleType" + }, + "Long": { + "shape": "LongType" + }, + "Char": { + "shape": "CharType" + } + } + }, + "HeaderShape": { + "type": "string", + "location": "header" + }, + "StatusShape": { + "type": "integer" + }, + "StringType": { + "type": "string" + }, + "IntegerType": { + "type": "integer" + }, + "BooleanType": { + "type": "boolean" + }, + "FloatType": { + "type": "float" + }, + "DoubleType": { + "type": "double" + }, + "LongType": { + "type": "long" + }, + "CharType": { + "type": "character" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ImaHeader": "test", + "ImaHeaderLocation": "abc", + "Status": 200, + "Str": "myname", + "Num": 123, + "FalseBool": false, + "TrueBool": true, + "Float": 1.2, + "Double": 1.3, + "Long": 200, + "Char": "a" + }, + "response": { + "status_code": 200, + "headers": { + "ImaHeader": "test", + "X-Foo": "abc" + }, + "body": "{\"Str\": \"myname\", \"Num\": 123, \"FalseBool\": false, \"TrueBool\": true, \"Float\": 1.2, \"Double\": 1.3, \"Long\": 200, \"Char\": \"a\"}" + } + } + ] + }, + { + "description": "Blob members", + "metadata": { + "protocol": "rest-json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "BlobMember": { + "shape": "BlobType" + }, + "StructMember": { + "shape": "BlobContainer" + } + } + }, + "BlobType": { + "type": "blob" + }, + "BlobContainer": { + "type": "structure", + "members": { + "foo": { + "shape": "BlobType" + } + } + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "BlobMember": "hi!", + "StructMember": { + "foo": "there!" + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "{\"BlobMember\": \"aGkh\", \"StructMember\": {\"foo\": \"dGhlcmUh\"}}" + } + } + ] + }, + { + "description": "Timestamp members", + "metadata": { + "protocol": "rest-json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "TimeMember": { + "shape": "TimeType" + }, + "StructMember": { + "shape": "TimeContainer" + } + } + }, + "TimeType": { + "type": "timestamp" + }, + "TimeContainer": { + "type": "structure", + "members": { + "foo": { + "shape": "TimeType" + } + } + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "TimeMember": 1398796238, + "StructMember": { + "foo": 1398796238 + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "{\"TimeMember\": 1398796238, \"StructMember\": {\"foo\": 1398796238}}" + } + } + ] + }, + { + "description": "Lists", + "metadata": { + "protocol": "rest-json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ListMember": { + "shape": "ListType" + } + } + }, + "ListType": { + "type": "list", + "member": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ListMember": ["a", "b"] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "{\"ListMember\": [\"a\", \"b\"]}" + } + } + ] + }, + { + "description": "Lists with structure member", + "metadata": { + "protocol": "rest-json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ListMember": { + "shape": "ListType" + } + } + }, + "ListType": { + "type": "list", + "member": { + "shape": "SingleStruct" + } + }, + "StringType": { + "type": "string" + }, + "SingleStruct": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringType" + } + } + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ListMember": [{"Foo": "a"}, {"Foo": "b"}] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "{\"ListMember\": [{\"Foo\": \"a\"}, {\"Foo\": \"b\"}]}" + } + } + ] + }, + { + "description": "Maps", + "metadata": { + "protocol": "rest-json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "MapMember": { + "shape": "MapType" + } + } + }, + "MapType": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "ListType" + } + }, + "ListType": { + "type": "list", + "member": { + "shape": "IntegerType" + } + }, + "StringType": { + "type": "string" + }, + "IntegerType": { + "type": "integer" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "MapMember": { + "a": [1, 2], + "b": [3, 4] + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "{\"MapMember\": {\"a\": [1, 2], \"b\": [3, 4]}}" + } + } + ] + }, + { + "description": "Complex Map Values", + "metadata": { + "protocol": "rest-json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "MapMember": { + "shape": "MapType" + } + } + }, + "MapType": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "TimeType" + } + }, + "TimeType": { + "type": "timestamp" + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "MapMember": { + "a": 1398796238, + "b": 1398796238 + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "{\"MapMember\": {\"a\": 1398796238, \"b\": 1398796238}}" + } + } + ] + }, + { + "description": "Ignores extra data", + "metadata": { + "protocol": "rest-json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "StrType": { + "shape": "StrType" + } + } + }, + "StrType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": {}, + "response": { + "status_code": 200, + "headers": {}, + "body": "{\"foo\": \"bar\"}" + } + } + ] + }, + { + "description": "Supports header maps", + "metadata": { + "protocol": "rest-json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "AllHeaders": { + "shape": "HeaderMap", + "location": "headers" + }, + "PrefixedHeaders": { + "shape": "HeaderMap", + "location": "headers", + "locationName": "X-" + } + } + }, + "HeaderMap": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "AllHeaders": { + "Content-Length": "10", + "X-Foo": "bar", + "X-Bam": "boo" + }, + "PrefixedHeaders": { + "Foo": "bar", + "Bam": "boo" + } + }, + "response": { + "status_code": 200, + "headers": { + "Content-Length": "10", + "X-Foo": "bar", + "X-Bam": "boo" + }, + "body": "{}" + } + } + ] + }, + { + "description": "JSON payload", + "metadata": { + "protocol": "rest-json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "payload": "Data", + "members": { + "Header": { + "shape": "StringType", + "location": "header", + "locationName": "X-Foo" + }, + "Data": { + "shape": "BodyStructure" + } + } + }, + "BodyStructure": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringType" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Header": "baz", + "Data": { + "Foo": "abc" + } + }, + "response": { + "status_code": 200, + "headers": { + "X-Foo": "baz" + }, + "body": "{\"Foo\": \"abc\"}" + } + } + ] + }, + { + "description": "Streaming payload", + "metadata": { + "protocol": "rest-json" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "payload": "Stream", + "members": { + "Stream": { + "shape": "Stream" + } + } + }, + "Stream": { + "type": "blob" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Stream": "abc" + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "abc" + } + } + ] + } +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/output/rest-xml.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/output/rest-xml.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/output/rest-xml.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/models/protocol_tests/output/rest-xml.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,720 @@ +[ + { + "description": "Scalar members", + "metadata": { + "protocol": "rest-xml" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ImaHeader": { + "shape": "HeaderShape" + }, + "ImaHeaderLocation": { + "shape": "HeaderShape", + "locationName": "X-Foo" + }, + "Str": { + "shape": "StringType" + }, + "Num": { + "shape": "IntegerType", + "locationName": "FooNum" + }, + "FalseBool": { + "shape": "BooleanType" + }, + "TrueBool": { + "shape": "BooleanType" + }, + "Float": { + "shape": "FloatType" + }, + "Double": { + "shape": "DoubleType" + }, + "Long": { + "shape": "LongType" + }, + "Char": { + "shape": "CharType" + }, + "Timestamp": { + "shape": "TimestampType" + } + } + }, + "StringType": { + "type": "string" + }, + "IntegerType": { + "type": "integer" + }, + "BooleanType": { + "type": "boolean" + }, + "FloatType": { + "type": "float" + }, + "DoubleType": { + "type": "double" + }, + "LongType": { + "type": "long" + }, + "CharType": { + "type": "character" + }, + "HeaderShape": { + "type": "string", + "location": "header" + }, + "StatusShape": { + "type": "integer", + "location": "statusCode" + }, + "TimestampType": { + "type": "timestamp" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ImaHeader": "test", + "ImaHeaderLocation": "abc", + "Str": "myname", + "Num": 123, + "FalseBool": false, + "TrueBool": true, + "Float": 1.2, + "Double": 1.3, + "Long": 200, + "Char": "a", + "Timestamp": 1422172800 + }, + "response": { + "status_code": 200, + "headers": { + "ImaHeader": "test", + "X-Foo": "abc" + }, + "body": "myname123falsetrue1.21.3200a2015-01-25T08:00:00Z" + } + }, + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ImaHeader": "test", + "ImaHeaderLocation": "abc", + "Str": "", + "Num": 123, + "FalseBool": false, + "TrueBool": true, + "Float": 1.2, + "Double": 1.3, + "Long": 200, + "Char": "a", + "Timestamp": 1422172800 + }, + "response": { + "status_code": 200, + "headers": { + "ImaHeader": "test", + "X-Foo": "abc" + }, + "body": "123falsetrue1.21.3200a2015-01-25T08:00:00Z" + } + } + ] + }, + { + "description": "Blob", + "metadata": { + "protocol": "rest-xml" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Blob": { + "shape": "BlobType" + } + } + }, + "BlobType": { + "type": "blob" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Blob": "value" + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "dmFsdWU=" + } + } + ] + }, + { + "description": "Lists", + "metadata": { + "protocol": "rest-xml" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ListMember": { + "shape": "ListShape" + } + } + }, + "ListShape": { + "type": "list", + "member": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ListMember": ["abc", "123"] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "abc123" + } + } + ] + }, + { + "description": "List with custom member name", + "metadata": { + "protocol": "rest-xml" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ListMember": { + "shape": "ListShape" + } + } + }, + "ListShape": { + "type": "list", + "member": { + "shape": "StringType", + "locationName": "item" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ListMember": ["abc", "123"] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "abc123" + } + } + ] + }, + { + "description": "Flattened List", + "metadata": { + "protocol": "rest-xml" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "ListMember": { + "shape": "StringList", + "flattened": true + } + } + }, + "StringList": { + "type": "list", + "member": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "ListMember": ["abc", "123"] + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "abc123" + } + } + ] + }, + { + "description": "Normal map", + "metadata": { + "protocol": "rest-xml" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Map": { + "shape": "StringMap" + } + } + }, + "StringMap": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "SingleStructure" + } + }, + "SingleStructure": { + "type": "structure", + "members": { + "foo": { + "shape": "StringType" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Map": { + "qux": { + "foo": "bar" + }, + "baz": { + "foo": "bam" + } + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "quxbarbazbam" + } + } + ] + }, + { + "description": "Flattened map", + "metadata": { + "protocol": "rest-xml" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Map": { + "shape": "StringMap", + "flattened": true + } + } + }, + "StringMap": { + "type": "map", + "key": { + "shape": "StringType" + }, + "value": { + "shape": "StringType" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Map": { + "qux": "bar", + "baz": "bam" + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "quxbarbazbam" + } + } + ] + }, + { + "description": "Named map", + "metadata": { + "protocol": "rest-xml" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Map": { + "shape": "StringMap" + } + } + }, + "StringMap": { + "type": "map", + "key": { + "shape": "StringType", + "locationName": "foo" + }, + "value": { + "shape": "StringType", + "locationName": "bar" + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Map": { + "qux": "bar", + "baz": "bam" + } + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "quxbarbazbam" + } + } + ] + }, + { + "description": "XML payload", + "metadata": { + "protocol": "rest-xml" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "payload": "Data", + "members": { + "Header": { + "shape": "StringType", + "location": "header", + "locationName": "X-Foo" + }, + "Data": { + "shape": "SingleStructure" + } + } + }, + "StringType": { + "type": "string" + }, + "SingleStructure": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringType" + } + } + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Header": "baz", + "Data": { + "Foo": "abc" + } + }, + "response": { + "status_code": 200, + "headers": { + "X-Foo": "baz" + }, + "body": "abc" + } + } + ] + }, + { + "description": "Streaming payload", + "metadata": { + "protocol": "rest-xml" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "payload": "Stream", + "members": { + "Stream": { + "shape": "BlobStream" + } + } + }, + "BlobStream": { + "type": "blob" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Stream": "abc" + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "abc" + } + } + ] + }, + { + "description": "Scalar members in headers", + "metadata": { + "protocol": "rest-xml" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Str": { + "locationName": "x-str", + "shape": "StringHeaderType" + }, + "Integer": { + "locationName": "x-int", + "shape": "IntegerHeaderType" + }, + "TrueBool": { + "locationName": "x-true-bool", + "shape": "BooleanHeaderType" + }, + "FalseBool": { + "locationName": "x-false-bool", + "shape": "BooleanHeaderType" + }, + "Float": { + "locationName": "x-float", + "shape": "FloatHeaderType" + }, + "Double": { + "locationName": "x-double", + "shape": "DoubleHeaderType" + }, + "Long": { + "locationName": "x-long", + "shape": "LongHeaderType" + }, + "Char": { + "locationName": "x-char", + "shape": "CharHeaderType" + }, + "Timestamp": { + "locationName": "x-timestamp", + "shape": "TimestampHeaderType" + } + } + }, + "StringHeaderType": { + "location": "header", + "type": "string" + }, + "IntegerHeaderType": { + "location": "header", + "type": "integer" + }, + "BooleanHeaderType": { + "location": "header", + "type": "boolean" + }, + "FloatHeaderType": { + "location": "header", + "type": "float" + }, + "DoubleHeaderType": { + "location": "header", + "type": "double" + }, + "LongHeaderType": { + "location": "header", + "type": "long" + }, + "CharHeaderType": { + "location": "header", + "type": "character" + }, + "TimestampHeaderType": { + "location": "header", + "type": "timestamp" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Str": "string", + "Integer": 1, + "TrueBool": true, + "FalseBool": false, + "Float": 1.5, + "Double": 1.5, + "Long": 100, + "Char": "a", + "Timestamp": 1422172800 + }, + "response": { + "status_code": 200, + "headers": { + "x-str": "string", + "x-int": "1", + "x-true-bool": "true", + "x-false-bool": "false", + "x-float": "1.5", + "x-double": "1.5", + "x-long": "100", + "x-char": "a", + "x-timestamp": "Sun, 25 Jan 2015 08:00:00 GMT" + }, + "body": "" + } + } + ] + }, + { + "description": "Empty string", + "metadata": { + "protocol": "rest-xml" + }, + "shapes": { + "OutputShape": { + "type": "structure", + "members": { + "Foo": { + "shape": "StringType" + } + } + }, + "StringType": { + "type": "string" + } + }, + "cases": [ + { + "given": { + "output": { + "shape": "OutputShape" + }, + "name": "OperationName" + }, + "result": { + "Foo": "" + }, + "response": { + "status_code": 200, + "headers": {}, + "body": "requestid" + } + } + ] + } +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/NOTICE.txt aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/NOTICE.txt --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/NOTICE.txt 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/NOTICE.txt 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,65 @@ +// Package endpoints validates regional endpoints for services. +package endpoints + +//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go +//go:generate gofmt -s -w endpoints_map.go + +import ( + "fmt" + "regexp" + "strings" +) + +// NormalizeEndpoint takes and endpoint and service API information to return a +// normalized endpoint and signing region. If the endpoint is not an empty string +// the service name and region will be used to look up the service's API endpoint. +// If the endpoint is provided the scheme will be added if it is not present. +func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL bool) (normEndpoint, signingRegion string) { + if endpoint == "" { + return EndpointForRegion(serviceName, region, disableSSL) + } + + return AddScheme(endpoint, disableSSL), "" +} + +// EndpointForRegion returns an endpoint and its signing region for a service and region. +// if the service and region pair are not found endpoint and signingRegion will be empty. +func EndpointForRegion(svcName, region string, disableSSL bool) (endpoint, signingRegion string) { + derivedKeys := []string{ + region + "/" + svcName, + region + "/*", + "*/" + svcName, + "*/*", + } + + for _, key := range derivedKeys { + if val, ok := endpointsMap.Endpoints[key]; ok { + ep := val.Endpoint + ep = strings.Replace(ep, "{region}", region, -1) + ep = strings.Replace(ep, "{service}", svcName, -1) + + endpoint = ep + signingRegion = val.SigningRegion + break + } + } + + return AddScheme(endpoint, disableSSL), signingRegion +} + +// Regular expression to determine if the endpoint string is prefixed with a scheme. +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS. +func AddScheme(endpoint string, disableSSL bool) string { + if endpoint != "" && !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,92 @@ +{ + "version": 2, + "endpoints": { + "*/*": { + "endpoint": "{service}.{region}.amazonaws.com" + }, + "cn-north-1/*": { + "endpoint": "{service}.{region}.amazonaws.com.cn", + "signatureVersion": "v4" + }, + "us-gov-west-1/iam": { + "endpoint": "iam.us-gov.amazonaws.com" + }, + "us-gov-west-1/sts": { + "endpoint": "sts.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "*/cloudfront": { + "endpoint": "cloudfront.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/cloudsearchdomain": { + "endpoint": "", + "signingRegion": "us-east-1" + }, + "*/data.iot": { + "endpoint": "", + "signingRegion": "us-east-1" + }, + "*/ec2metadata": { + "endpoint": "http://169.254.169.254/latest", + "signingRegion": "us-east-1" + }, + "*/iam": { + "endpoint": "iam.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/importexport": { + "endpoint": "importexport.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/route53": { + "endpoint": "route53.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/sts": { + "endpoint": "sts.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/waf": { + "endpoint": "waf.amazonaws.com", + "signingRegion": "us-east-1" + }, + "us-east-1/sdb": { + "endpoint": "sdb.amazonaws.com", + "signingRegion": "us-east-1" + }, + "us-east-1/s3": { + "endpoint": "s3.amazonaws.com" + }, + "us-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "us-west-2/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "eu-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "ap-southeast-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "ap-southeast-2/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "ap-northeast-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "ap-northeast-2/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "sa-east-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "eu-central-1/s3": { + "endpoint": "{service}.{region}.amazonaws.com", + "signatureVersion": "v4" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,104 @@ +package endpoints + +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +type endpointStruct struct { + Version int + Endpoints map[string]endpointEntry +} + +type endpointEntry struct { + Endpoint string + SigningRegion string +} + +var endpointsMap = endpointStruct{ + Version: 2, + Endpoints: map[string]endpointEntry{ + "*/*": { + Endpoint: "{service}.{region}.amazonaws.com", + }, + "*/cloudfront": { + Endpoint: "cloudfront.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/cloudsearchdomain": { + Endpoint: "", + SigningRegion: "us-east-1", + }, + "*/data.iot": { + Endpoint: "", + SigningRegion: "us-east-1", + }, + "*/ec2metadata": { + Endpoint: "http://169.254.169.254/latest", + SigningRegion: "us-east-1", + }, + "*/iam": { + Endpoint: "iam.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/importexport": { + Endpoint: "importexport.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/route53": { + Endpoint: "route53.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/sts": { + Endpoint: "sts.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/waf": { + Endpoint: "waf.amazonaws.com", + SigningRegion: "us-east-1", + }, + "ap-northeast-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "ap-northeast-2/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "ap-southeast-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "ap-southeast-2/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "cn-north-1/*": { + Endpoint: "{service}.{region}.amazonaws.com.cn", + }, + "eu-central-1/s3": { + Endpoint: "{service}.{region}.amazonaws.com", + }, + "eu-west-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "sa-east-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-east-1/s3": { + Endpoint: "s3.amazonaws.com", + }, + "us-east-1/sdb": { + Endpoint: "sdb.amazonaws.com", + SigningRegion: "us-east-1", + }, + "us-gov-west-1/iam": { + Endpoint: "iam.us-gov.amazonaws.com", + }, + "us-gov-west-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-gov-west-1/sts": { + Endpoint: "sts.us-gov-west-1.amazonaws.com", + }, + "us-west-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-west-2/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + }, +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,41 @@ +package endpoints_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/private/endpoints" +) + +func TestGenericEndpoint(t *testing.T) { + name := "service" + region := "mock-region-1" + + ep, sr := endpoints.EndpointForRegion(name, region, false) + assert.Equal(t, fmt.Sprintf("https://%s.%s.amazonaws.com", name, region), ep) + assert.Empty(t, sr) +} + +func TestGlobalEndpoints(t *testing.T) { + region := "mock-region-1" + svcs := []string{"cloudfront", "iam", "importexport", "route53", "sts", "waf"} + + for _, name := range svcs { + ep, sr := endpoints.EndpointForRegion(name, region, false) + assert.Equal(t, fmt.Sprintf("https://%s.amazonaws.com", name), ep) + assert.Equal(t, "us-east-1", sr) + } +} + +func TestServicesInCN(t *testing.T) { + region := "cn-north-1" + svcs := []string{"cloudfront", "iam", "importexport", "route53", "sts", "s3", "waf"} + + for _, name := range svcs { + ep, sr := endpoints.EndpointForRegion(name, region, false) + assert.Equal(t, fmt.Sprintf("https://%s.%s.amazonaws.com.cn", name, region), ep) + assert.Empty(t, sr) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,410 @@ +// Package api represents API abstractions for rendering service generated files. +package api + +import ( + "bytes" + "fmt" + "path" + "regexp" + "sort" + "strings" + "text/template" +) + +// An API defines a service API's definition. and logic to serialize the definition. +type API struct { + Metadata Metadata + Operations map[string]*Operation + Shapes map[string]*Shape + Waiters []Waiter + Documentation string + + // Set to true to avoid removing unused shapes + NoRemoveUnusedShapes bool + + // Set to true to avoid renaming to 'Input/Output' postfixed shapes + NoRenameToplevelShapes bool + + // Set to true to ignore service/request init methods (for testing) + NoInitMethods bool + + // Set to true to ignore String() and GoString methods (for generated tests) + NoStringerMethods bool + + // Set to true to not generate API service name constants + NoConstServiceNames bool + + SvcClientImportPath string + + initialized bool + imports map[string]bool + name string + path string +} + +// A Metadata is the metadata about an API's definition. +type Metadata struct { + APIVersion string + EndpointPrefix string + SigningName string + ServiceAbbreviation string + ServiceFullName string + SignatureVersion string + JSONVersion string + TargetPrefix string + Protocol string +} + +// PackageName name of the API package +func (a *API) PackageName() string { + return strings.ToLower(a.StructName()) +} + +// InterfacePackageName returns the package name for the interface. +func (a *API) InterfacePackageName() string { + return a.PackageName() + "iface" +} + +var nameRegex = regexp.MustCompile(`^Amazon|AWS\s*|\(.*|\s+|\W+`) + +// StructName returns the struct name for a given API. +func (a *API) StructName() string { + if a.name == "" { + name := a.Metadata.ServiceAbbreviation + if name == "" { + name = a.Metadata.ServiceFullName + } + + name = nameRegex.ReplaceAllString(name, "") + switch name { + case "ElasticLoadBalancing": + a.name = "ELB" + case "Config": + a.name = "ConfigService" + default: + a.name = name + } + } + return a.name +} + +// UseInitMethods returns if the service's init method should be rendered. +func (a *API) UseInitMethods() bool { + return !a.NoInitMethods +} + +// NiceName returns the human friendly API name. +func (a *API) NiceName() string { + if a.Metadata.ServiceAbbreviation != "" { + return a.Metadata.ServiceAbbreviation + } + return a.Metadata.ServiceFullName +} + +// ProtocolPackage returns the package name of the protocol this API uses. +func (a *API) ProtocolPackage() string { + switch a.Metadata.Protocol { + case "json": + return "jsonrpc" + case "ec2": + return "ec2query" + default: + return strings.Replace(a.Metadata.Protocol, "-", "", -1) + } +} + +// OperationNames returns a slice of API operations supported. +func (a *API) OperationNames() []string { + i, names := 0, make([]string, len(a.Operations)) + for n := range a.Operations { + names[i] = n + i++ + } + sort.Strings(names) + return names +} + +// OperationList returns a slice of API operation pointers +func (a *API) OperationList() []*Operation { + list := make([]*Operation, len(a.Operations)) + for i, n := range a.OperationNames() { + list[i] = a.Operations[n] + } + return list +} + +// OperationHasOutputPlaceholder returns if any of the API operation input +// or output shapes are place holders. +func (a *API) OperationHasOutputPlaceholder() bool { + for _, op := range a.Operations { + if op.OutputRef.Shape.Placeholder { + return true + } + } + return false +} + +// ShapeNames returns a slice of names for each shape used by the API. +func (a *API) ShapeNames() []string { + i, names := 0, make([]string, len(a.Shapes)) + for n := range a.Shapes { + names[i] = n + i++ + } + sort.Strings(names) + return names +} + +// ShapeList returns a slice of shape pointers used by the API. +func (a *API) ShapeList() []*Shape { + list := make([]*Shape, len(a.Shapes)) + for i, n := range a.ShapeNames() { + list[i] = a.Shapes[n] + } + return list +} + +// resetImports resets the import map to default values. +func (a *API) resetImports() { + a.imports = map[string]bool{ + "github.com/aws/aws-sdk-go/aws": true, + } +} + +// importsGoCode returns the generated Go import code. +func (a *API) importsGoCode() string { + if len(a.imports) == 0 { + return "" + } + + corePkgs, extPkgs := []string{}, []string{} + for i := range a.imports { + if strings.Contains(i, ".") { + extPkgs = append(extPkgs, i) + } else { + corePkgs = append(corePkgs, i) + } + } + sort.Strings(corePkgs) + sort.Strings(extPkgs) + + code := "import (\n" + for _, i := range corePkgs { + code += fmt.Sprintf("\t%q\n", i) + } + if len(corePkgs) > 0 { + code += "\n" + } + for _, i := range extPkgs { + code += fmt.Sprintf("\t%q\n", i) + } + code += ")\n\n" + return code +} + +// A tplAPI is the top level template for the API +var tplAPI = template.Must(template.New("api").Parse(` +{{ range $_, $o := .OperationList }} +{{ $o.GoCode }} + +{{ end }} + +{{ range $_, $s := .ShapeList }} +{{ if and $s.IsInternal (eq $s.Type "structure") }}{{ $s.GoCode }}{{ end }} + +{{ end }} + +{{ range $_, $s := .ShapeList }} +{{ if $s.IsEnum }}{{ $s.GoCode }}{{ end }} + +{{ end }} +`)) + +// APIGoCode renders the API in Go code. Returning it as a string +func (a *API) APIGoCode() string { + a.resetImports() + delete(a.imports, "github.com/aws/aws-sdk-go/aws") + a.imports["github.com/aws/aws-sdk-go/aws/awsutil"] = true + a.imports["github.com/aws/aws-sdk-go/aws/request"] = true + if a.OperationHasOutputPlaceholder() { + a.imports["github.com/aws/aws-sdk-go/private/protocol/"+a.ProtocolPackage()] = true + a.imports["github.com/aws/aws-sdk-go/private/protocol"] = true + } + var buf bytes.Buffer + err := tplAPI.Execute(&buf, a) + if err != nil { + panic(err) + } + + code := a.importsGoCode() + strings.TrimSpace(buf.String()) + return code +} + +// A tplService defines the template for the service generated code. +var tplService = template.Must(template.New("service").Parse(` +{{ .Documentation }}//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type {{ .StructName }} struct { + *client.Client +} + +{{ if .UseInitMethods }}// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) +{{ end }} + +{{ if not .NoConstServiceNames }} +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "{{ .Metadata.EndpointPrefix }}" +{{ end }} + +// New creates a new instance of the {{ .StructName }} client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a {{ .StructName }} client from just a session. +// svc := {{ .PackageName }}.New(mySession) +// +// // Create a {{ .StructName }} client with additional configuration +// svc := {{ .PackageName }}.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *{{ .StructName }} { + c := p.ClientConfig({{ if .NoConstServiceNames }}"{{ .Metadata.EndpointPrefix }}"{{ else }}ServiceName{{ end }}, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *{{ .StructName }} { + svc := &{{ .StructName }}{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: {{ if .NoConstServiceNames }}"{{ .Metadata.EndpointPrefix }}"{{ else }}ServiceName{{ end }}, {{ if ne .Metadata.SigningName "" }} + SigningName: "{{ .Metadata.SigningName }}",{{ end }} + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "{{ .Metadata.APIVersion }}", +{{ if eq .Metadata.Protocol "json" }}JSONVersion: "{{ .Metadata.JSONVersion }}", + TargetPrefix: "{{ .Metadata.TargetPrefix }}", +{{ end }} + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack({{if eq .Metadata.SignatureVersion "v2"}}v2{{else}}v4{{end}}.Sign) + {{if eq .Metadata.SignatureVersion "v2"}}svc.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + {{end}}svc.Handlers.Build.PushBackNamed({{ .ProtocolPackage }}.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed({{ .ProtocolPackage }}.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed({{ .ProtocolPackage }}.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed({{ .ProtocolPackage }}.UnmarshalErrorHandler) + + {{ if .UseInitMethods }}// Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + {{ end }} + + return svc +} + +// newRequest creates a new request for a {{ .StructName }} operation and runs any +// custom request initialization. +func (c *{{ .StructName }}) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + {{ if .UseInitMethods }}// Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + {{ end }} + + return req +} +`)) + +// ServiceGoCode renders service go code. Returning it as a string. +func (a *API) ServiceGoCode() string { + a.resetImports() + a.imports["github.com/aws/aws-sdk-go/aws/client"] = true + a.imports["github.com/aws/aws-sdk-go/aws/client/metadata"] = true + a.imports["github.com/aws/aws-sdk-go/aws/request"] = true + if a.Metadata.SignatureVersion == "v2" { + a.imports["github.com/aws/aws-sdk-go/private/signer/v2"] = true + a.imports["github.com/aws/aws-sdk-go/aws/corehandlers"] = true + } else { + a.imports["github.com/aws/aws-sdk-go/private/signer/v4"] = true + } + a.imports["github.com/aws/aws-sdk-go/private/protocol/"+a.ProtocolPackage()] = true + + var buf bytes.Buffer + err := tplService.Execute(&buf, a) + if err != nil { + panic(err) + } + + code := a.importsGoCode() + buf.String() + return code +} + +// ExampleGoCode renders service example code. Returning it as a string. +func (a *API) ExampleGoCode() string { + exs := []string{} + for _, o := range a.OperationList() { + exs = append(exs, o.Example()) + } + + code := fmt.Sprintf("import (\n%q\n%q\n%q\n\n%q\n%q\n%q\n)\n\n"+ + "var _ time.Duration\nvar _ bytes.Buffer\n\n%s", + "bytes", + "fmt", + "time", + "github.com/aws/aws-sdk-go/aws", + "github.com/aws/aws-sdk-go/aws/session", + path.Join(a.SvcClientImportPath, a.PackageName()), + strings.Join(exs, "\n\n"), + ) + return code +} + +// A tplInterface defines the template for the service interface type. +var tplInterface = template.Must(template.New("interface").Parse(` +// {{ .StructName }}API is the interface type for {{ .PackageName }}.{{ .StructName }}. +type {{ .StructName }}API interface { + {{ range $_, $o := .OperationList }} + {{ $o.InterfaceSignature }} + {{ end }} +} + +var _ {{ .StructName }}API = (*{{ .PackageName }}.{{ .StructName }})(nil) +`)) + +// InterfaceGoCode returns the go code for the service's API operations as an +// interface{}. Assumes that the interface is being created in a different +// package than the service API's package. +func (a *API) InterfaceGoCode() string { + a.resetImports() + a.imports = map[string]bool{ + "github.com/aws/aws-sdk-go/aws/request": true, + path.Join(a.SvcClientImportPath, a.PackageName()): true, + } + + var buf bytes.Buffer + err := tplInterface.Execute(&buf, a) + + if err != nil { + panic(err) + } + + code := a.importsGoCode() + strings.TrimSpace(buf.String()) + return code +} + +// NewAPIGoCodeWithPkgName returns a string of instantiating the API prefixed +// with its package name. Takes a string depicting the Config. +func (a *API) NewAPIGoCodeWithPkgName(cfg string) string { + return fmt.Sprintf("%s.New(%s)", a.PackageName(), cfg) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/api_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/api_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/api_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/api_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,42 @@ +package api + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStructNameWithFullName(t *testing.T) { + a := API{ + Metadata: Metadata{ + ServiceFullName: "Amazon Service Name-100", + }, + } + assert.Equal(t, a.StructName(), "ServiceName100") +} + +func TestStructNameWithAbbreviation(t *testing.T) { + a := API{ + Metadata: Metadata{ + ServiceFullName: "AWS Service Name-100", + ServiceAbbreviation: "AWS SN100", + }, + } + assert.Equal(t, a.StructName(), "SN100") +} + +func TestStructNameForExceptions(t *testing.T) { + a := API{ + Metadata: Metadata{ + ServiceFullName: "Elastic Load Balancing", + }, + } + assert.Equal(t, a.StructName(), "ELB") + + a = API{ + Metadata: Metadata{ + ServiceFullName: "AWS Config", + }, + } + assert.Equal(t, a.StructName(), "ConfigService") +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/customization_passes.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/customization_passes.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/customization_passes.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/customization_passes.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,75 @@ +package api + +import ( + "path/filepath" + "strings" +) + +// customizationPasses Executes customization logic for the API by package name. +func (a *API) customizationPasses() { + var svcCustomizations = map[string]func(*API){ + "s3": s3Customizations, + "cloudfront": cloudfrontCustomizations, + "dynamodbstreams": dynamodbstreamsCustomizations, + } + + if fn := svcCustomizations[a.PackageName()]; fn != nil { + fn(a) + } +} + +// s3Customizations customizes the API generation to replace values specific to S3. +func s3Customizations(a *API) { + var strExpires *Shape + + for name, s := range a.Shapes { + // Remove ContentMD5 members + if _, ok := s.MemberRefs["ContentMD5"]; ok { + delete(s.MemberRefs, "ContentMD5") + } + + // Expires should be a string not time.Time since the format is not + // enforced by S3, and any value can be set to this field outside of the SDK. + if strings.HasSuffix(name, "Output") { + if ref, ok := s.MemberRefs["Expires"]; ok { + if strExpires == nil { + newShape := *ref.Shape + strExpires = &newShape + strExpires.Type = "string" + strExpires.refs = []*ShapeRef{} + } + ref.Shape.removeRef(ref) + ref.Shape = strExpires + ref.Shape.refs = append(ref.Shape.refs, &s.MemberRef) + } + } + } +} + +// cloudfrontCustomizations customized the API generation to replace values +// specific to CloudFront. +func cloudfrontCustomizations(a *API) { + // MaxItems members should always be integers + for _, s := range a.Shapes { + if ref, ok := s.MemberRefs["MaxItems"]; ok { + ref.ShapeName = "Integer" + ref.Shape = a.Shapes["Integer"] + } + } +} + +// dynamodbstreamsCustomizations references any duplicate shapes from DynamoDB +func dynamodbstreamsCustomizations(a *API) { + p := strings.Replace(a.path, "streams.dynamodb", "dynamodb", -1) + file := filepath.Join(p, "api-2.json") + + dbAPI := API{} + dbAPI.Attach(file) + dbAPI.Setup() + + for n := range a.Shapes { + if _, ok := dbAPI.Shapes[n]; ok { + a.Shapes[n].resolvePkg = "github.com/aws/aws-sdk-go/service/dynamodb" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/docstring.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/docstring.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/docstring.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/docstring.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,156 @@ +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "html" + "os" + "regexp" + "strings" +) + +type apiDocumentation struct { + *API + Operations map[string]string + Service string + Shapes map[string]shapeDocumentation +} + +type shapeDocumentation struct { + Base string + Refs map[string]string +} + +// AttachDocs attaches documentation from a JSON filename. +func (a *API) AttachDocs(filename string) { + d := apiDocumentation{API: a} + + f, err := os.Open(filename) + defer f.Close() + if err != nil { + panic(err) + } + err = json.NewDecoder(f).Decode(&d) + if err != nil { + panic(err) + } + + d.setup() + +} + +func (d *apiDocumentation) setup() { + d.API.Documentation = docstring(d.Service) + if d.Service == "" { + d.API.Documentation = + fmt.Sprintf("// %s is a client for %s.\n", d.API.StructName(), d.API.NiceName()) + } + + for op, doc := range d.Operations { + d.API.Operations[op].Documentation = docstring(doc) + } + + for shape, info := range d.Shapes { + if sh := d.API.Shapes[shape]; sh != nil { + sh.Documentation = docstring(info.Base) + } + + for ref, doc := range info.Refs { + if doc == "" { + continue + } + + parts := strings.Split(ref, "$") + if sh := d.API.Shapes[parts[0]]; sh != nil { + if m := sh.MemberRefs[parts[1]]; m != nil { + m.Documentation = docstring(doc) + } + } + } + } +} + +var reNewline = regexp.MustCompile(`\r?\n`) +var reMultiSpace = regexp.MustCompile(`\s+`) +var reComments = regexp.MustCompile(``) +var reFullname = regexp.MustCompile(`\s*.+?<\/fullname?>\s*`) +var reExamples = regexp.MustCompile(`.+?<\/examples?>`) +var rePara = regexp.MustCompile(`<(?:p|h\d)>(.+?)`) +var reLink = regexp.MustCompile(`(.+?)`) +var reTag = regexp.MustCompile(`<.+?>`) +var reEndNL = regexp.MustCompile(`\n+$`) + +// docstring rewrites a string to insert godocs formatting. +func docstring(doc string) string { + doc = reNewline.ReplaceAllString(doc, "") + doc = reMultiSpace.ReplaceAllString(doc, " ") + doc = reComments.ReplaceAllString(doc, "") + doc = reFullname.ReplaceAllString(doc, "") + doc = reExamples.ReplaceAllString(doc, "") + doc = rePara.ReplaceAllString(doc, "$1\n\n") + doc = reLink.ReplaceAllString(doc, "$2 ($1)") + doc = reTag.ReplaceAllString(doc, "$1") + doc = reEndNL.ReplaceAllString(doc, "") + doc = strings.TrimSpace(doc) + if doc == "" { + return "\n" + } + + doc = html.UnescapeString(doc) + doc = wrap(doc, 72) + + return commentify(doc) +} + +// commentify converts a string to a Go comment +func commentify(doc string) string { + lines := strings.Split(doc, "\n") + out := []string{} + for i, line := range lines { + if i > 0 && line == "" && lines[i-1] == "" { + continue + } + out = append(out, "// "+line) + } + + return strings.Join(out, "\n") + "\n" +} + +// wrap returns a rewritten version of text to have line breaks +// at approximately length characters. Line breaks will only be +// inserted into whitespace. +func wrap(text string, length int) string { + var buf bytes.Buffer + var last rune + var lastNL bool + var col int + + for _, c := range text { + switch c { + case '\r': // ignore this + continue // and also don't track `last` + case '\n': // ignore this too, but reset col + if col >= length || last == '\n' { + buf.WriteString("\n\n") + } + col = 0 + case ' ', '\t': // opportunity to split + if col >= length { + buf.WriteByte('\n') + col = 0 + } else { + if !lastNL { + buf.WriteRune(c) + } + col++ // count column + } + default: + buf.WriteRune(c) + col++ + } + lastNL = c == '\n' + last = c + } + return buf.String() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/exportable_name.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/exportable_name.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/exportable_name.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/exportable_name.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,12 @@ +package api + +import "strings" + +// ExportableName a name which is exportable as a value or name in Go code +func (a *API) ExportableName(name string) string { + if name == "" { + return name + } + + return strings.ToUpper(name[0:1]) + name[1:] +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/load.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/load.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/load.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/load.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,64 @@ +package api + +import ( + "encoding/json" + "os" + "path/filepath" +) + +// Load takes a set of files for each filetype and returns an API pointer. +// The API will be initialized once all files have been loaded and parsed. +// +// Will panic if any failure opening the definition JSON files, or there +// are unrecognized exported names. +func Load(api, docs, paginators, waiters string) *API { + a := API{} + a.Attach(api) + a.Attach(docs) + a.Attach(paginators) + a.Attach(waiters) + a.Setup() + return &a +} + +// Attach opens a file by name, and unmarshal its JSON data. +// Will proceed to setup the API if not already done so. +func (a *API) Attach(filename string) { + a.path = filepath.Dir(filename) + f, err := os.Open(filename) + defer f.Close() + if err != nil { + panic(err) + } + json.NewDecoder(f).Decode(a) +} + +// AttachString will unmarshal a raw JSON string, and setup the +// API if not already done so. +func (a *API) AttachString(str string) { + json.Unmarshal([]byte(str), a) + + if !a.initialized { + a.Setup() + } +} + +// Setup initializes the API. +func (a *API) Setup() { + a.writeShapeNames() + a.resolveReferences() + a.fixStutterNames() + a.renameExportable() + if !a.NoRenameToplevelShapes { + a.renameToplevelShapes() + } + a.updateTopLevelShapeReferences() + a.createInputOutputShapes() + a.customizationPasses() + + if !a.NoRemoveUnusedShapes { + a.removeUnusedShapes() + } + + a.initialized = true +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/load_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/load_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/load_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/load_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,30 @@ +package api + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestResolvedReferences(t *testing.T) { + json := `{ + "operations": { + "OperationName": { + "input": { "shape": "TestName" } + } + }, + "shapes": { + "TestName": { + "type": "structure", + "members": { + "memberName1": { "shape": "OtherTest" }, + "memberName2": { "shape": "OtherTest" } + } + }, + "OtherTest": { "type": "string" } + } + }` + a := API{} + a.AttachString(json) + assert.Equal(t, len(a.Shapes["OtherTest"].refs), 2) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/operation.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/operation.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/operation.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/operation.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,310 @@ +package api + +import ( + "bytes" + "fmt" + "regexp" + "sort" + "strings" + "text/template" +) + +// An Operation defines a specific API Operation. +type Operation struct { + API *API `json:"-"` + ExportedName string + Name string + Documentation string + HTTP HTTPInfo + InputRef ShapeRef `json:"input"` + OutputRef ShapeRef `json:"output"` + Paginator *Paginator +} + +// A HTTPInfo defines the method of HTTP request for the Operation. +type HTTPInfo struct { + Method string + RequestURI string + ResponseCode uint +} + +// HasInput returns if the Operation accepts an input paramater +func (o *Operation) HasInput() bool { + return o.InputRef.ShapeName != "" +} + +// HasOutput returns if the Operation accepts an output parameter +func (o *Operation) HasOutput() bool { + return o.OutputRef.ShapeName != "" +} + +// tplOperation defines a template for rendering an API Operation +var tplOperation = template.Must(template.New("operation").Parse(` +const op{{ .ExportedName }} = "{{ .Name }}" + +// {{ .ExportedName }}Request generates a request for the {{ .ExportedName }} operation. +func (c *{{ .API.StructName }}) {{ .ExportedName }}Request(` + + `input {{ .InputRef.GoType }}) (req *request.Request, output {{ .OutputRef.GoType }}) { + op := &request.Operation{ + Name: op{{ .ExportedName }}, + {{ if ne .HTTP.Method "" }}HTTPMethod: "{{ .HTTP.Method }}", + {{ end }}{{ if ne .HTTP.RequestURI "" }}HTTPPath: "{{ .HTTP.RequestURI }}", + {{ end }}{{ if .Paginator }}Paginator: &request.Paginator{ + InputTokens: {{ .Paginator.InputTokensString }}, + OutputTokens: {{ .Paginator.OutputTokensString }}, + LimitToken: "{{ .Paginator.LimitKey }}", + TruncationToken: "{{ .Paginator.MoreResults }}", + }, + {{ end }} + } + + if input == nil { + input = &{{ .InputRef.GoTypeElem }}{} + } + + req = c.newRequest(op, input, output){{ if eq .OutputRef.Shape.Placeholder true }} + req.Handlers.Unmarshal.Remove({{ .API.ProtocolPackage }}.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler){{ end }} + output = &{{ .OutputRef.GoTypeElem }}{} + req.Data = output + return +} + +{{ .Documentation }}func (c *{{ .API.StructName }}) {{ .ExportedName }}(` + + `input {{ .InputRef.GoType }}) ({{ .OutputRef.GoType }}, error) { + req, out := c.{{ .ExportedName }}Request(input) + err := req.Send() + return out, err +} + +{{ if .Paginator }} +func (c *{{ .API.StructName }}) {{ .ExportedName }}Pages(` + + `input {{ .InputRef.GoType }}, fn func(p {{ .OutputRef.GoType }}, lastPage bool) (shouldContinue bool)) error { + page, _ := c.{{ .ExportedName }}Request(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.({{ .OutputRef.GoType }}), lastPage) + }) +} +{{ end }} +`)) + +// GoCode returns a string of rendered GoCode for this Operation +func (o *Operation) GoCode() string { + var buf bytes.Buffer + err := tplOperation.Execute(&buf, o) + if err != nil { + panic(err) + } + + return strings.TrimSpace(buf.String()) +} + +// tplInfSig defines the template for rendering an Operation's signature within an Interface definition. +var tplInfSig = template.Must(template.New("opsig").Parse(` +{{ .ExportedName }}Request({{ .InputRef.GoTypeWithPkgName }}) (*request.Request, {{ .OutputRef.GoTypeWithPkgName }}) + +{{ .ExportedName }}({{ .InputRef.GoTypeWithPkgName }}) ({{ .OutputRef.GoTypeWithPkgName }}, error) +{{ if .Paginator }} +{{ .ExportedName }}Pages({{ .InputRef.GoTypeWithPkgName }}, func({{ .OutputRef.GoTypeWithPkgName }}, bool) bool) error{{ end }} +`)) + +// InterfaceSignature returns a string representing the Operation's interface{} +// functional signature. +func (o *Operation) InterfaceSignature() string { + var buf bytes.Buffer + err := tplInfSig.Execute(&buf, o) + if err != nil { + panic(err) + } + + return strings.TrimSpace(buf.String()) +} + +// tplExample defines the template for rendering an Operation example +var tplExample = template.Must(template.New("operationExample").Parse(` +func Example{{ .API.StructName }}_{{ .ExportedName }}() { + svc := {{ .API.PackageName }}.New(session.New()) + + {{ .ExampleInput }} + resp, err := svc.{{ .ExportedName }}(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} +`)) + +// Example returns a string of the rendered Go code for the Operation +func (o *Operation) Example() string { + var buf bytes.Buffer + err := tplExample.Execute(&buf, o) + if err != nil { + panic(err) + } + + return strings.TrimSpace(buf.String()) +} + +// ExampleInput return a string of the rendered Go code for an example's input parameters +func (o *Operation) ExampleInput() string { + if len(o.InputRef.Shape.MemberRefs) == 0 { + return fmt.Sprintf("var params *%s.%s", + o.API.PackageName(), o.InputRef.GoTypeElem()) + } + e := example{o, map[string]int{}} + return "params := " + e.traverseAny(o.InputRef.Shape, false, false) +} + +// A example provides +type example struct { + *Operation + visited map[string]int +} + +// traverseAny returns rendered Go code for the shape. +func (e *example) traverseAny(s *Shape, required, payload bool) string { + str := "" + e.visited[s.ShapeName]++ + + switch s.Type { + case "structure": + str = e.traverseStruct(s, required, payload) + case "list": + str = e.traverseList(s, required, payload) + case "map": + str = e.traverseMap(s, required, payload) + default: + str = e.traverseScalar(s, required, payload) + } + + e.visited[s.ShapeName]-- + + return str +} + +var reType = regexp.MustCompile(`\b([A-Z])`) + +// traverseStruct returns rendered Go code for a structure type shape. +func (e *example) traverseStruct(s *Shape, required, payload bool) string { + var buf bytes.Buffer + buf.WriteString("&" + s.API.PackageName() + "." + s.GoTypeElem() + "{") + if required { + buf.WriteString(" // Required") + } + buf.WriteString("\n") + + req := make([]string, len(s.Required)) + copy(req, s.Required) + sort.Strings(req) + + if e.visited[s.ShapeName] < 2 { + for _, n := range req { + m := s.MemberRefs[n].Shape + p := n == s.Payload && (s.MemberRefs[n].Streaming || m.Streaming) + buf.WriteString(n + ": " + e.traverseAny(m, true, p) + ",") + if m.Type != "list" && m.Type != "structure" && m.Type != "map" { + buf.WriteString(" // Required") + } + buf.WriteString("\n") + } + + for _, n := range s.MemberNames() { + if s.IsRequired(n) { + continue + } + m := s.MemberRefs[n].Shape + p := n == s.Payload && (s.MemberRefs[n].Streaming || m.Streaming) + buf.WriteString(n + ": " + e.traverseAny(m, false, p) + ",\n") + } + } else { + buf.WriteString("// Recursive values...\n") + } + + buf.WriteString("}") + return buf.String() +} + +// traverseMap returns rendered Go code for a map type shape. +func (e *example) traverseMap(s *Shape, required, payload bool) string { + var buf bytes.Buffer + t := reType.ReplaceAllString(s.GoTypeElem(), s.API.PackageName()+".$1") + buf.WriteString(t + "{") + if required { + buf.WriteString(" // Required") + } + buf.WriteString("\n") + + if e.visited[s.ShapeName] < 2 { + m := s.ValueRef.Shape + buf.WriteString("\"Key\": " + e.traverseAny(m, true, false) + ",") + if m.Type != "list" && m.Type != "structure" && m.Type != "map" { + buf.WriteString(" // Required") + } + buf.WriteString("\n// More values...\n") + } else { + buf.WriteString("// Recursive values...\n") + } + buf.WriteString("}") + + return buf.String() +} + +// traverseList returns rendered Go code for a list type shape. +func (e *example) traverseList(s *Shape, required, payload bool) string { + var buf bytes.Buffer + t := reType.ReplaceAllString(s.GoTypeElem(), s.API.PackageName()+".$1") + buf.WriteString(t + "{") + if required { + buf.WriteString(" // Required") + } + buf.WriteString("\n") + + if e.visited[s.ShapeName] < 2 { + m := s.MemberRef.Shape + buf.WriteString(e.traverseAny(m, true, false) + ",") + if m.Type != "list" && m.Type != "structure" && m.Type != "map" { + buf.WriteString(" // Required") + } + buf.WriteString("\n// More values...\n") + } else { + buf.WriteString("// Recursive values...\n") + } + buf.WriteString("}") + + return buf.String() +} + +// traverseScalar returns an AWS Type string representation initialized to a value. +// Will panic if s is an unsupported shape type. +func (e *example) traverseScalar(s *Shape, required, payload bool) string { + str := "" + switch s.Type { + case "integer", "long": + str = `aws.Int64(1)` + case "float", "double": + str = `aws.Float64(1.0)` + case "string", "character": + str = `aws.String("` + s.ShapeName + `")` + case "blob": + if payload { + str = `bytes.NewReader([]byte("PAYLOAD"))` + } else { + str = `[]byte("PAYLOAD")` + } + case "boolean": + str = `aws.Bool(true)` + case "timestamp": + str = `aws.Time(time.Now())` + default: + panic("unsupported shape " + s.Type) + } + + return str +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/pagination.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/pagination.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/pagination.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/pagination.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,89 @@ +package api + +import ( + "encoding/json" + "fmt" + "os" +) + +// Paginator keeps track of pagination configuration for an API operation. +type Paginator struct { + InputTokens interface{} `json:"input_token"` + OutputTokens interface{} `json:"output_token"` + LimitKey string `json:"limit_key"` + MoreResults string `json:"more_results"` +} + +// InputTokensString returns output tokens formatted as a list +func (p *Paginator) InputTokensString() string { + str := p.InputTokens.([]string) + return fmt.Sprintf("%#v", str) +} + +// OutputTokensString returns output tokens formatted as a list +func (p *Paginator) OutputTokensString() string { + str := p.OutputTokens.([]string) + return fmt.Sprintf("%#v", str) +} + +// used for unmarshaling from the paginators JSON file +type paginationDefinitions struct { + *API + Pagination map[string]Paginator +} + +// AttachPaginators attaches pagination configuration from filename to the API. +func (a *API) AttachPaginators(filename string) { + p := paginationDefinitions{API: a} + + f, err := os.Open(filename) + defer f.Close() + if err != nil { + panic(err) + } + err = json.NewDecoder(f).Decode(&p) + if err != nil { + panic(err) + } + + p.setup() +} + +// setup runs post-processing on the paginator configuration. +func (p *paginationDefinitions) setup() { + for n, e := range p.Pagination { + if e.InputTokens == nil || e.OutputTokens == nil { + continue + } + paginator := e + + switch t := paginator.InputTokens.(type) { + case string: + paginator.InputTokens = []string{t} + case []interface{}: + toks := []string{} + for _, e := range t { + s := e.(string) + toks = append(toks, s) + } + paginator.InputTokens = toks + } + switch t := paginator.OutputTokens.(type) { + case string: + paginator.OutputTokens = []string{t} + case []interface{}: + toks := []string{} + for _, e := range t { + s := e.(string) + toks = append(toks, s) + } + paginator.OutputTokens = toks + } + + if o, ok := p.Operations[n]; ok { + o.Paginator = &paginator + } else { + panic("unknown operation for paginator " + n) + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/passes.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/passes.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/passes.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/passes.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,255 @@ +package api + +import ( + "fmt" + "regexp" + "strings" +) + +// updateTopLevelShapeReferences moves resultWrapper, locationName, and +// xmlNamespace traits from toplevel shape references to the toplevel +// shapes for easier code generation +func (a *API) updateTopLevelShapeReferences() { + for _, o := range a.Operations { + // these are for REST-XML services + if o.InputRef.LocationName != "" { + o.InputRef.Shape.LocationName = o.InputRef.LocationName + } + if o.InputRef.Location != "" { + o.InputRef.Shape.Location = o.InputRef.Location + } + if o.InputRef.Payload != "" { + o.InputRef.Shape.Payload = o.InputRef.Payload + } + if o.InputRef.XMLNamespace.Prefix != "" { + o.InputRef.Shape.XMLNamespace.Prefix = o.InputRef.XMLNamespace.Prefix + } + if o.InputRef.XMLNamespace.URI != "" { + o.InputRef.Shape.XMLNamespace.URI = o.InputRef.XMLNamespace.URI + } + } + +} + +// writeShapeNames sets each shape's API and shape name values. Binding the +// shape to its parent API. +func (a *API) writeShapeNames() { + for n, s := range a.Shapes { + s.API = a + s.ShapeName = n + } +} + +func (a *API) resolveReferences() { + resolver := referenceResolver{API: a, visited: map[*ShapeRef]bool{}} + + for _, s := range a.Shapes { + resolver.resolveShape(s) + } + + for _, o := range a.Operations { + o.API = a // resolve parent reference + + resolver.resolveReference(&o.InputRef) + resolver.resolveReference(&o.OutputRef) + } +} + +// A referenceResolver provides a way to resolve shape references to +// shape definitions. +type referenceResolver struct { + *API + visited map[*ShapeRef]bool +} + +// resolveReference updates a shape reference to reference the API and +// its shape definition. All other nested references are also resolved. +func (r *referenceResolver) resolveReference(ref *ShapeRef) { + if ref.ShapeName == "" { + return + } + + if shape, ok := r.API.Shapes[ref.ShapeName]; ok { + ref.API = r.API // resolve reference back to API + ref.Shape = shape // resolve shape reference + + if r.visited[ref] { + return + } + r.visited[ref] = true + + shape.refs = append(shape.refs, ref) // register the ref + + // resolve shape's references, if it has any + r.resolveShape(shape) + } +} + +// resolveShape resolves a shape's Member Key Value, and nested member +// shape references. +func (r *referenceResolver) resolveShape(shape *Shape) { + r.resolveReference(&shape.MemberRef) + r.resolveReference(&shape.KeyRef) + r.resolveReference(&shape.ValueRef) + for _, m := range shape.MemberRefs { + r.resolveReference(m) + } +} + +// renameToplevelShapes renames all top level shapes of an API to their +// exportable variant. The shapes are also updated to include notations +// if they are Input or Outputs. +func (a *API) renameToplevelShapes() { + for _, v := range a.Operations { + if v.HasInput() { + name := v.ExportedName + "Input" + switch n := len(v.InputRef.Shape.refs); { + case n == 1 && a.Shapes[name] == nil: + v.InputRef.Shape.Rename(name) + } + } + if v.HasOutput() { + name := v.ExportedName + "Output" + switch n := len(v.OutputRef.Shape.refs); { + case n == 1 && a.Shapes[name] == nil: + v.OutputRef.Shape.Rename(name) + } + } + v.InputRef.Payload = a.ExportableName(v.InputRef.Payload) + v.OutputRef.Payload = a.ExportableName(v.OutputRef.Payload) + } +} + +// fixStutterNames fixes all name struttering based on Go naming conventions. +// "Stuttering" is when the prefix of a structure or function matches the +// package name (case insensitive). +func (a *API) fixStutterNames() { + str, end := a.StructName(), "" + if len(str) > 1 { + l := len(str) - 1 + str, end = str[0:l], str[l:] + } + re := regexp.MustCompile(fmt.Sprintf(`\A(?i:%s)%s`, str, end)) + + for name, op := range a.Operations { + newName := re.ReplaceAllString(name, "") + if newName != name { + delete(a.Operations, name) + a.Operations[newName] = op + } + op.ExportedName = newName + } + + for k, s := range a.Shapes { + newName := re.ReplaceAllString(k, "") + if newName != s.ShapeName { + s.Rename(newName) + } + } +} + +// renameExportable renames all operation names to be exportable names. +// All nested Shape names are also updated to the exportable variant. +func (a *API) renameExportable() { + for name, op := range a.Operations { + newName := a.ExportableName(name) + if newName != name { + delete(a.Operations, name) + a.Operations[newName] = op + } + op.ExportedName = newName + } + + for k, s := range a.Shapes { + // FIXME SNS has lower and uppercased shape names with the same name, + // except the lowercased variant is used exclusively for string and + // other primitive types. Renaming both would cause a collision. + // We work around this by only renaming the structure shapes. + if s.Type == "string" { + continue + } + + for mName, member := range s.MemberRefs { + newName := a.ExportableName(mName) + if newName != mName { + delete(s.MemberRefs, mName) + s.MemberRefs[newName] = member + + // also apply locationName trait so we keep the old one + // but only if there's no locationName trait on ref or shape + if member.LocationName == "" && member.Shape.LocationName == "" { + member.LocationName = mName + } + } + + if newName == "_" { + panic("Shape " + s.ShapeName + " uses reserved member name '_'") + } + } + + newName := a.ExportableName(k) + if newName != s.ShapeName { + s.Rename(newName) + } + + s.Payload = a.ExportableName(s.Payload) + + // fix required trait names + for i, n := range s.Required { + s.Required[i] = a.ExportableName(n) + } + } + + for _, s := range a.Shapes { + // fix enum names + if s.IsEnum() { + s.EnumConsts = make([]string, len(s.Enum)) + for i := range s.Enum { + shape := s.ShapeName + shape = strings.ToUpper(shape[0:1]) + shape[1:] + s.EnumConsts[i] = shape + s.EnumName(i) + } + } + } +} + +// createInputOutputShapes creates toplevel input/output shapes if they +// have not been defined in the API. This normalizes all APIs to always +// have an input and output structure in the signature. +func (a *API) createInputOutputShapes() { + for _, op := range a.Operations { + if !op.HasInput() { + setAsPlacholderShape(&op.InputRef, op.ExportedName+"Input", a) + } + if !op.HasOutput() { + setAsPlacholderShape(&op.OutputRef, op.ExportedName+"Output", a) + } + } +} + +func setAsPlacholderShape(tgtShapeRef *ShapeRef, name string, a *API) { + shape := a.makeIOShape(name) + shape.Placeholder = true + *tgtShapeRef = ShapeRef{API: a, ShapeName: shape.ShapeName, Shape: shape} + shape.refs = append(shape.refs, tgtShapeRef) +} + +// makeIOShape returns a pointer to a new Shape initialized by the name provided. +func (a *API) makeIOShape(name string) *Shape { + shape := &Shape{ + API: a, ShapeName: name, Type: "structure", + MemberRefs: map[string]*ShapeRef{}, + } + a.Shapes[name] = shape + return shape +} + +// removeUnusedShapes removes shapes from the API which are not referenced by any +// other shape in the API. +func (a *API) removeUnusedShapes() { + for n, s := range a.Shapes { + if len(s.refs) == 0 { + delete(a.Shapes, n) + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/shape.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/shape.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/shape.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/shape.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,441 @@ +package api + +import ( + "bytes" + "fmt" + "path" + "regexp" + "sort" + "strings" + "text/template" + + "github.com/aws/aws-sdk-go/private/util" +) + +// A ShapeRef defines the usage of a shape within the API. +type ShapeRef struct { + API *API `json:"-"` + Shape *Shape `json:"-"` + Documentation string + ShapeName string `json:"shape"` + Location string + LocationName string + QueryName string + Flattened bool + Streaming bool + XMLAttribute bool + XMLNamespace XMLInfo + Payload string + IdempotencyToken bool `json:"idempotencyToken"` +} + +// A XMLInfo defines URL and prefix for Shapes when rendered as XML +type XMLInfo struct { + Prefix string + URI string +} + +// A Shape defines the definition of a shape type +type Shape struct { + API *API `json:"-"` + ShapeName string + Documentation string + MemberRefs map[string]*ShapeRef `json:"members"` + MemberRef ShapeRef `json:"member"` + KeyRef ShapeRef `json:"key"` + ValueRef ShapeRef `json:"value"` + Required []string + Payload string + Type string + Exception bool + Enum []string + EnumConsts []string + Flattened bool + Streaming bool + Location string + LocationName string + IdempotencyToken bool `json:"idempotencyToken"` + XMLNamespace XMLInfo + Min int // optional Minimum length (string, list) or value (number) + Max int // optional Minimum length (string, list) or value (number) + + refs []*ShapeRef // References to this shape + resolvePkg string // use this package in the goType() if present + + // Defines if the shape is a placeholder and should not be used directly + Placeholder bool +} + +// Rename changes the name of the Shape to newName. Also updates +// the associated API's reference to use newName. +func (s *Shape) Rename(newName string) { + for _, r := range s.refs { + r.ShapeName = newName + } + + delete(s.API.Shapes, s.ShapeName) + s.API.Shapes[newName] = s + s.ShapeName = newName +} + +// MemberNames returns a slice of struct member names. +func (s *Shape) MemberNames() []string { + i, names := 0, make([]string, len(s.MemberRefs)) + for n := range s.MemberRefs { + names[i] = n + i++ + } + sort.Strings(names) + return names +} + +// GoTypeWithPkgName returns a shape's type as a string with the package name in +// . format. Package naming only applies to structures. +func (s *Shape) GoTypeWithPkgName() string { + return goType(s, true) +} + +// GoType returns a shape's Go type +func (s *Shape) GoType() string { + return goType(s, false) +} + +// GoType returns a shape ref's Go type. +func (ref *ShapeRef) GoType() string { + if ref.Shape == nil { + panic(fmt.Errorf("missing shape definition on reference for %#v", ref)) + } + + return ref.Shape.GoType() +} + +// GoTypeWithPkgName returns a shape's type as a string with the package name in +// . format. Package naming only applies to structures. +func (ref *ShapeRef) GoTypeWithPkgName() string { + if ref.Shape == nil { + panic(fmt.Errorf("missing shape definition on reference for %#v", ref)) + } + + return ref.Shape.GoTypeWithPkgName() +} + +// Returns a string version of the Shape's type. +// If withPkgName is true, the package name will be added as a prefix +func goType(s *Shape, withPkgName bool) string { + switch s.Type { + case "structure": + if withPkgName || s.resolvePkg != "" { + pkg := s.resolvePkg + if pkg != "" { + s.API.imports[pkg] = true + pkg = path.Base(pkg) + } else { + pkg = s.API.PackageName() + } + return fmt.Sprintf("*%s.%s", pkg, s.ShapeName) + } + return "*" + s.ShapeName + case "map": + return "map[string]" + s.ValueRef.GoType() + case "list": + return "[]" + s.MemberRef.GoType() + case "boolean": + return "*bool" + case "string", "character": + return "*string" + case "blob": + return "[]byte" + case "integer", "long": + return "*int64" + case "float", "double": + return "*float64" + case "timestamp": + s.API.imports["time"] = true + return "*time.Time" + default: + panic("Unsupported shape type: " + s.Type) + } +} + +// GoTypeElem returns the Go type for the Shape. If the shape type is a pointer just +// the type will be returned minus the pointer *. +func (s *Shape) GoTypeElem() string { + t := s.GoType() + if strings.HasPrefix(t, "*") { + return t[1:] + } + return t +} + +// GoTypeElem returns the Go type for the Shape. If the shape type is a pointer just +// the type will be returned minus the pointer *. +func (ref *ShapeRef) GoTypeElem() string { + if ref.Shape == nil { + panic(fmt.Errorf("missing shape definition on reference for %#v", ref)) + } + + return ref.Shape.GoTypeElem() +} + +// ShapeTag is a struct tag that will be applied to a shape's generated code +type ShapeTag struct { + Key, Val string +} + +// String returns the string representation of the shape tag +func (s ShapeTag) String() string { + return fmt.Sprintf(`%s:"%s"`, s.Key, s.Val) +} + +// ShapeTags is a collection of shape tags and provides serialization of the +// tags in an ordered list. +type ShapeTags []ShapeTag + +// Join returns an ordered serialization of the shape tags with the provided +// seperator. +func (s ShapeTags) Join(sep string) string { + o := &bytes.Buffer{} + for i, t := range s { + o.WriteString(t.String()) + if i < len(s)-1 { + o.WriteString(sep) + } + } + + return o.String() +} + +// String is an alias for Join with the empty space seperator. +func (s ShapeTags) String() string { + return s.Join(" ") +} + +// GoTags returns the rendered tags string for the ShapeRef +func (ref *ShapeRef) GoTags(toplevel bool, isRequired bool) string { + tags := ShapeTags{} + + if ref.Location != "" { + tags = append(tags, ShapeTag{"location", ref.Location}) + } else if ref.Shape.Location != "" { + tags = append(tags, ShapeTag{"location", ref.Shape.Location}) + } + + if ref.LocationName != "" { + tags = append(tags, ShapeTag{"locationName", ref.LocationName}) + } else if ref.Shape.LocationName != "" { + tags = append(tags, ShapeTag{"locationName", ref.Shape.LocationName}) + } + + if ref.QueryName != "" { + tags = append(tags, ShapeTag{"queryName", ref.QueryName}) + } + if ref.Shape.MemberRef.LocationName != "" { + tags = append(tags, ShapeTag{"locationNameList", ref.Shape.MemberRef.LocationName}) + } + if ref.Shape.KeyRef.LocationName != "" { + tags = append(tags, ShapeTag{"locationNameKey", ref.Shape.KeyRef.LocationName}) + } + if ref.Shape.ValueRef.LocationName != "" { + tags = append(tags, ShapeTag{"locationNameValue", ref.Shape.ValueRef.LocationName}) + } + if ref.Shape.Min > 0 { + tags = append(tags, ShapeTag{"min", fmt.Sprintf("%d", ref.Shape.Min)}) + } + // All shapes have a type + tags = append(tags, ShapeTag{"type", ref.Shape.Type}) + + // embed the timestamp type for easier lookups + if ref.Shape.Type == "timestamp" { + t := ShapeTag{Key: "timestampFormat"} + if ref.Location == "header" { + t.Val = "rfc822" + } else { + switch ref.API.Metadata.Protocol { + case "json", "rest-json": + t.Val = "unix" + case "rest-xml", "ec2", "query": + t.Val = "iso8601" + } + } + tags = append(tags, t) + } + + if ref.Shape.Flattened || ref.Flattened { + tags = append(tags, ShapeTag{"flattened", "true"}) + } + if ref.XMLAttribute { + tags = append(tags, ShapeTag{"xmlAttribute", "true"}) + } + if isRequired { + tags = append(tags, ShapeTag{"required", "true"}) + } + if ref.Shape.IsEnum() { + tags = append(tags, ShapeTag{"enum", ref.ShapeName}) + } + + if toplevel { + if ref.Shape.Payload != "" { + tags = append(tags, ShapeTag{"payload", ref.Shape.Payload}) + } + if ref.XMLNamespace.Prefix != "" { + tags = append(tags, ShapeTag{"xmlPrefix", ref.XMLNamespace.Prefix}) + } else if ref.Shape.XMLNamespace.Prefix != "" { + tags = append(tags, ShapeTag{"xmlPrefix", ref.Shape.XMLNamespace.Prefix}) + } + if ref.XMLNamespace.URI != "" { + tags = append(tags, ShapeTag{"xmlURI", ref.XMLNamespace.URI}) + } else if ref.Shape.XMLNamespace.URI != "" { + tags = append(tags, ShapeTag{"xmlURI", ref.Shape.XMLNamespace.URI}) + } + } + + if ref.IdempotencyToken || ref.Shape.IdempotencyToken { + tags = append(tags, ShapeTag{"idempotencyToken", "true"}) + } + + return fmt.Sprintf("`%s`", tags) +} + +// Docstring returns the godocs formated documentation +func (ref *ShapeRef) Docstring() string { + if ref.Documentation != "" { + return ref.Documentation + } + return ref.Shape.Docstring() +} + +// Docstring returns the godocs formated documentation +func (s *Shape) Docstring() string { + return s.Documentation +} + +var goCodeStringerTmpl = template.Must(template.New("goCodeStringerTmpl").Parse(` +// String returns the string representation +func (s {{ .ShapeName }}) String() string { + return awsutil.Prettify(s) +} +// GoString returns the string representation +func (s {{ .ShapeName }}) GoString() string { + return s.String() +} +`)) + +func (s *Shape) goCodeStringers() string { + w := bytes.Buffer{} + if err := goCodeStringerTmpl.Execute(&w, s); err != nil { + panic(fmt.Sprintln("Unexpected error executing goCodeStringers template", err)) + } + + return w.String() +} + +var enumStrip = regexp.MustCompile(`[^a-zA-Z0-9_:\./-]`) +var enumDelims = regexp.MustCompile(`[-_:\./]+`) +var enumCamelCase = regexp.MustCompile(`([a-z])([A-Z])`) + +// EnumName returns the Nth enum in the shapes Enum list +func (s *Shape) EnumName(n int) string { + enum := s.Enum[n] + enum = enumStrip.ReplaceAllLiteralString(enum, "") + enum = enumCamelCase.ReplaceAllString(enum, "$1-$2") + parts := enumDelims.Split(enum, -1) + for i, v := range parts { + v = strings.ToLower(v) + parts[i] = "" + if len(v) > 0 { + parts[i] = strings.ToUpper(v[0:1]) + } + if len(v) > 1 { + parts[i] += v[1:] + } + } + enum = strings.Join(parts, "") + enum = strings.ToUpper(enum[0:1]) + enum[1:] + return enum +} + +// GoCode returns the rendered Go code for the Shape. +func (s *Shape) GoCode() string { + code := s.Docstring() + if !s.IsEnum() { + code += "type " + s.ShapeName + " " + } + + switch { + case s.Type == "structure": + ref := &ShapeRef{ShapeName: s.ShapeName, API: s.API, Shape: s} + + code += "struct {\n" + code += "_ struct{} " + ref.GoTags(true, false) + "\n\n" + for _, n := range s.MemberNames() { + m := s.MemberRefs[n] + code += m.Docstring() + if (m.Streaming || m.Shape.Streaming) && s.Payload == n { + rtype := "io.ReadSeeker" + if len(s.refs) > 1 { + rtype = "aws.ReaderSeekCloser" + } else if strings.HasSuffix(s.ShapeName, "Output") { + rtype = "io.ReadCloser" + } + + s.API.imports["io"] = true + code += n + " " + rtype + " " + m.GoTags(false, s.IsRequired(n)) + "\n\n" + } else { + code += n + " " + m.GoType() + " " + m.GoTags(false, s.IsRequired(n)) + "\n\n" + } + } + code += "}" + + if !s.API.NoStringerMethods { + code += s.goCodeStringers() + } + case s.IsEnum(): + code += "const (\n" + for n, e := range s.Enum { + code += fmt.Sprintf("\t// @enum %s\n\t%s = %q\n", + s.ShapeName, s.EnumConsts[n], e) + } + code += ")" + default: + panic("Cannot generate toplevel shape for " + s.Type) + } + + return util.GoFmt(code) +} + +// IsEnum returns whether this shape is an enum list +func (s *Shape) IsEnum() bool { + return s.Type == "string" && len(s.Enum) > 0 +} + +// IsRequired returns if member is a required field. +func (s *Shape) IsRequired(member string) bool { + for _, n := range s.Required { + if n == member { + return true + } + } + return false +} + +// IsInternal returns whether the shape was defined in this package +func (s *Shape) IsInternal() bool { + return s.resolvePkg == "" +} + +// removeRef removes a shape reference from the list of references this +// shape is used in. +func (s *Shape) removeRef(ref *ShapeRef) { + r := s.refs + for i := 0; i < len(r); i++ { + if r[i] == ref { + j := i + 1 + copy(r[i:], r[j:]) + for k, n := len(r)-j+i, len(r); k < n; k++ { + r[k] = nil // free up the end of the list + } // for k + s.refs = r[:len(r)-j+i] + break + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/shapetag_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/shapetag_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/shapetag_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/shapetag_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,23 @@ +package api_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go/private/model/api" + "github.com/stretchr/testify/assert" +) + +func TestShapeTagJoin(t *testing.T) { + s := api.ShapeTags{ + {Key: "location", Val: "query"}, + {Key: "locationName", Val: "abc"}, + {Key: "type", Val: "string"}, + } + + expected := `location:"query" locationName:"abc" type:"string"` + + o := s.Join(" ") + o2 := s.String() + assert.Equal(t, expected, o) + assert.Equal(t, expected, o2) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/waiters.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/waiters.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/waiters.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/api/waiters.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,133 @@ +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "sort" + "text/template" +) + +// A Waiter is an individual waiter definition. +type Waiter struct { + Name string + Delay int + MaxAttempts int + OperationName string `json:"operation"` + Operation *Operation + Acceptors []WaitAcceptor +} + +// A WaitAcceptor is an individual wait acceptor definition. +type WaitAcceptor struct { + Expected interface{} + Matcher string + State string + Argument string +} + +// WaitersGoCode generates and returns Go code for each of the waiters of +// this API. +func (a *API) WaitersGoCode() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "import (\n\t%q\n)", + "github.com/aws/aws-sdk-go/private/waiter") + + for _, w := range a.Waiters { + buf.WriteString(w.GoCode()) + } + return buf.String() +} + +// used for unmarshaling from the waiter JSON file +type waiterDefinitions struct { + *API + Waiters map[string]Waiter +} + +// AttachWaiters reads a file of waiter definitions, and adds those to the API. +// Will panic if an error occurs. +func (a *API) AttachWaiters(filename string) { + p := waiterDefinitions{API: a} + + f, err := os.Open(filename) + defer f.Close() + if err != nil { + panic(err) + } + err = json.NewDecoder(f).Decode(&p) + if err != nil { + panic(err) + } + + p.setup() +} + +func (p *waiterDefinitions) setup() { + p.API.Waiters = []Waiter{} + i, keys := 0, make([]string, len(p.Waiters)) + for k := range p.Waiters { + keys[i] = k + i++ + } + sort.Strings(keys) + + for _, n := range keys { + e := p.Waiters[n] + n = p.ExportableName(n) + e.Name = n + e.OperationName = p.ExportableName(e.OperationName) + e.Operation = p.API.Operations[e.OperationName] + if e.Operation == nil { + panic("unknown operation " + e.OperationName + " for waiter " + n) + } + p.API.Waiters = append(p.API.Waiters, e) + } +} + +// ExpectedString returns the string that was expected by the WaitAcceptor +func (a *WaitAcceptor) ExpectedString() string { + switch a.Expected.(type) { + case string: + return fmt.Sprintf("%q", a.Expected) + default: + return fmt.Sprintf("%v", a.Expected) + } +} + +var tplWaiter = template.Must(template.New("waiter").Parse(` +func (c *{{ .Operation.API.StructName }}) WaitUntil{{ .Name }}(input {{ .Operation.InputRef.GoType }}) error { + waiterCfg := waiter.Config{ + Operation: "{{ .OperationName }}", + Delay: {{ .Delay }}, + MaxAttempts: {{ .MaxAttempts }}, + Acceptors: []waiter.WaitAcceptor{ + {{ range $_, $a := .Acceptors }}waiter.WaitAcceptor{ + State: "{{ .State }}", + Matcher: "{{ .Matcher }}", + Argument: "{{ .Argument }}", + Expected: {{ .ExpectedString }}, + }, + {{ end }} + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} +`)) + +// GoCode returns the generated Go code for an individual waiter. +func (w *Waiter) GoCode() string { + var buf bytes.Buffer + if err := tplWaiter.Execute(&buf, w); err != nil { + panic(err) + } + + return buf.String() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/cli/api-info/api-info.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/cli/api-info/api-info.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/cli/api-info/api-info.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/cli/api-info/api-info.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,27 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + "sort" + + "github.com/aws/aws-sdk-go/private/model/api" +) + +func main() { + dir, _ := os.Open(filepath.Join("models", "apis")) + names, _ := dir.Readdirnames(0) + for _, name := range names { + m, _ := filepath.Glob(filepath.Join("models", "apis", name, "*", "api-2.json")) + if len(m) == 0 { + continue + } + + sort.Strings(m) + f := m[len(m)-1] + a := api.API{} + a.Attach(f) + fmt.Printf("%s\t%s\n", a.Metadata.ServiceFullName, a.Metadata.APIVersion) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/cli/gen-api/main.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/cli/gen-api/main.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/cli/gen-api/main.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/cli/gen-api/main.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,254 @@ +// Command aws-gen-gocli parses a JSON description of an AWS API and generates a +// Go file containing a client for the API. +// +// aws-gen-gocli apis/s3/2006-03-03/api-2.json +package main + +import ( + "flag" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime/debug" + "sort" + "strings" + "sync" + + "github.com/aws/aws-sdk-go/private/model/api" + "github.com/aws/aws-sdk-go/private/util" +) + +type generateInfo struct { + *api.API + PackageDir string +} + +var excludeServices = map[string]struct{}{ + "importexport": {}, +} + +// newGenerateInfo initializes the service API's folder structure for a specific service. +// If the SERVICES environment variable is set, and this service is not apart of the list +// this service will be skipped. +func newGenerateInfo(modelFile, svcPath, svcImportPath string) *generateInfo { + g := &generateInfo{API: &api.API{SvcClientImportPath: svcImportPath}} + g.API.Attach(modelFile) + + if _, ok := excludeServices[g.API.PackageName()]; ok { + return nil + } + + paginatorsFile := strings.Replace(modelFile, "api-2.json", "paginators-1.json", -1) + if _, err := os.Stat(paginatorsFile); err == nil { + g.API.AttachPaginators(paginatorsFile) + } else if !os.IsNotExist(err) { + fmt.Println("api-2.json error:", err) + } + + docsFile := strings.Replace(modelFile, "api-2.json", "docs-2.json", -1) + if _, err := os.Stat(docsFile); err == nil { + g.API.AttachDocs(docsFile) + } else { + fmt.Println("docs-2.json error:", err) + } + + waitersFile := strings.Replace(modelFile, "api-2.json", "waiters-2.json", -1) + if _, err := os.Stat(waitersFile); err == nil { + g.API.AttachWaiters(waitersFile) + } else if !os.IsNotExist(err) { + fmt.Println("waiters-2.json error:", err) + } + + g.API.Setup() + + if svc := os.Getenv("SERVICES"); svc != "" { + svcs := strings.Split(svc, ",") + + included := false + for _, s := range svcs { + if s == g.API.PackageName() { + included = true + break + } + } + if !included { + // skip this non-included service + return nil + } + } + + // ensure the directory exists + pkgDir := filepath.Join(svcPath, g.API.PackageName()) + os.MkdirAll(pkgDir, 0775) + os.MkdirAll(filepath.Join(pkgDir, g.API.InterfacePackageName()), 0775) + + g.PackageDir = pkgDir + + return g +} + +// Generates service api, examples, and interface from api json definition files. +// +// Flags: +// -path alternative service path to write generated files to for each service. +// +// Env: +// SERVICES comma separated list of services to generate. +func main() { + var svcPath, sessionPath, svcImportPath string + flag.StringVar(&svcPath, "path", "service", "directory to generate service clients in") + flag.StringVar(&sessionPath, "sessionPath", filepath.Join("aws", "session"), "generate session service client factories") + flag.StringVar(&svcImportPath, "svc-import-path", "github.com/aws/aws-sdk-go/service", "namespace to generate service client Go code import path under") + flag.Parse() + + files := []string{} + for i := 0; i < flag.NArg(); i++ { + file := flag.Arg(i) + if strings.Contains(file, "*") { + paths, _ := filepath.Glob(file) + files = append(files, paths...) + } else { + files = append(files, file) + } + } + + for svcName := range excludeServices { + if strings.Contains(os.Getenv("SERVICES"), svcName) { + fmt.Printf("Service %s is not supported\n", svcName) + os.Exit(1) + } + } + + sort.Strings(files) + + // Remove old API versions from list + m := map[string]bool{} + for i := range files { + idx := len(files) - 1 - i + parts := strings.Split(files[idx], string(filepath.Separator)) + svc := parts[len(parts)-3] // service name is 2nd-to-last component + + if m[svc] { + files[idx] = "" // wipe this one out if we already saw the service + } + m[svc] = true + } + + wg := sync.WaitGroup{} + for i := range files { + filename := files[i] + if filename == "" { // empty file + continue + } + + genInfo := newGenerateInfo(filename, svcPath, svcImportPath) + if genInfo == nil { + continue + } + if _, ok := excludeServices[genInfo.API.PackageName()]; ok { + // Skip services not yet supported. + continue + } + + wg.Add(1) + go func(g *generateInfo, filename string) { + defer wg.Done() + writeServiceFiles(g, filename) + }(genInfo, filename) + } + + wg.Wait() +} + +func writeServiceFiles(g *generateInfo, filename string) { + defer func() { + if r := recover(); r != nil { + fmt.Fprintf(os.Stderr, "Error generating %s\n%s\n%s\n", + filename, r, debug.Stack()) + } + }() + + fmt.Printf("Generating %s (%s)...\n", + g.API.PackageName(), g.API.Metadata.APIVersion) + + // write api.go and service.go files + Must(writeAPIFile(g)) + Must(writeExamplesFile(g)) + Must(writeServiceFile(g)) + Must(writeInterfaceFile(g)) + Must(writeWaitersFile(g)) +} + +// Must will panic if the error passed in is not nil. +func Must(err error) { + if err != nil { + panic(err) + } +} + +const codeLayout = `// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +%s +package %s + +%s +` + +func writeGoFile(file string, layout string, args ...interface{}) error { + return ioutil.WriteFile(file, []byte(util.GoFmt(fmt.Sprintf(layout, args...))), 0664) +} + +// writeExamplesFile writes out the service example file. +func writeExamplesFile(g *generateInfo) error { + return writeGoFile(filepath.Join(g.PackageDir, "examples_test.go"), + codeLayout, + "", + g.API.PackageName()+"_test", + g.API.ExampleGoCode(), + ) +} + +// writeServiceFile writes out the service initialization file. +func writeServiceFile(g *generateInfo) error { + return writeGoFile(filepath.Join(g.PackageDir, "service.go"), + codeLayout, + "", + g.API.PackageName(), + g.API.ServiceGoCode(), + ) +} + +// writeInterfaceFile writes out the service interface file. +func writeInterfaceFile(g *generateInfo) error { + return writeGoFile(filepath.Join(g.PackageDir, g.API.InterfacePackageName(), "interface.go"), + codeLayout, + fmt.Sprintf("\n// Package %s provides an interface for the %s.", + g.API.InterfacePackageName(), g.API.Metadata.ServiceFullName), + g.API.InterfacePackageName(), + g.API.InterfaceGoCode(), + ) +} + +func writeWaitersFile(g *generateInfo) error { + if len(g.API.Waiters) == 0 { + return nil + } + + return writeGoFile(filepath.Join(g.PackageDir, "waiters.go"), + codeLayout, + "", + g.API.PackageName(), + g.API.WaitersGoCode(), + ) +} + +// writeAPIFile writes out the service api file. +func writeAPIFile(g *generateInfo) error { + return writeGoFile(filepath.Join(g.PackageDir, "api.go"), + codeLayout, + fmt.Sprintf("\n// Package %s provides a client for %s.", + g.API.PackageName(), g.API.Metadata.ServiceFullName), + g.API.PackageName(), + g.API.APIGoCode(), + ) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/cli/gen-endpoints/main.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/cli/gen-endpoints/main.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/cli/gen-endpoints/main.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/cli/gen-endpoints/main.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,47 @@ +// Command aws-gen-goendpoints parses a JSON description of the AWS endpoint +// discovery logic and generates a Go file which returns an endpoint. +// +// aws-gen-goendpoints apis/_endpoints.json aws/endpoints_map.go +package main + +import ( + "encoding/json" + "os" + + "github.com/aws/aws-sdk-go/private/model" +) + +// Generates the endpoints from json description +// +// CLI Args: +// [0] This file's execution path +// [1] The definition file to use +// [2] The output file to generate +func main() { + in, err := os.Open(os.Args[1]) + if err != nil { + panic(err) + } + defer in.Close() + + var endpoints struct { + Version int + Endpoints map[string]struct { + Endpoint string + SigningRegion string + } + } + if err = json.NewDecoder(in).Decode(&endpoints); err != nil { + panic(err) + } + + out, err := os.Create(os.Args[2]) + if err != nil { + panic(err) + } + defer out.Close() + + if err := model.GenerateEndpoints(endpoints, out); err != nil { + panic(err) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/endpoints.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/endpoints.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/endpoints.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/model/endpoints.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,57 @@ +package model + +import ( + "bytes" + "go/format" + "io" + "text/template" +) + +// GenerateEndpoints writes a Go file to the given writer. +func GenerateEndpoints(endpoints interface{}, w io.Writer) error { + tmpl, err := template.New("endpoints").Parse(t) + if err != nil { + return err + } + + out := bytes.NewBuffer(nil) + if err = tmpl.Execute(out, endpoints); err != nil { + return err + } + + b, err := format.Source(bytes.TrimSpace(out.Bytes())) + if err != nil { + return err + } + + _, err = io.Copy(w, bytes.NewReader(b)) + return err +} + +const t = ` +package endpoints + +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +type endpointStruct struct { + Version int + Endpoints map[string]endpointEntry +} + +type endpointEntry struct { + Endpoint string + SigningRegion string +} + +var endpointsMap = endpointStruct{ + Version: {{ .Version }}, + Endpoints: map[string]endpointEntry{ + {{ range $key, $entry := .Endpoints }}"{{ $key }}": endpointEntry{ + Endpoint: "{{ $entry.Endpoint }}", + {{ if ne $entry.SigningRegion "" }}SigningRegion: "{{ $entry.SigningRegion }}", + {{ end }} + }, + {{ end }} + }, +} +` diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_bench_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_bench_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_bench_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_bench_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,85 @@ +// +build bench + +package ec2query_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol/ec2query" + "github.com/aws/aws-sdk-go/service/ec2" +) + +func BenchmarkEC2QueryBuild_Complex_ec2AuthorizeSecurityGroupEgress(b *testing.B) { + params := &ec2.AuthorizeSecurityGroupEgressInput{ + GroupId: aws.String("String"), // Required + CidrIp: aws.String("String"), + DryRun: aws.Bool(true), + FromPort: aws.Int64(1), + IpPermissions: []*ec2.IpPermission{ + { // Required + FromPort: aws.Int64(1), + IpProtocol: aws.String("String"), + IpRanges: []*ec2.IpRange{ + { // Required + CidrIp: aws.String("String"), + }, + // More values... + }, + PrefixListIds: []*ec2.PrefixListId{ + { // Required + PrefixListId: aws.String("String"), + }, + // More values... + }, + ToPort: aws.Int64(1), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + { // Required + GroupId: aws.String("String"), + GroupName: aws.String("String"), + UserId: aws.String("String"), + }, + // More values... + }, + }, + // More values... + }, + IpProtocol: aws.String("String"), + SourceSecurityGroupName: aws.String("String"), + SourceSecurityGroupOwnerId: aws.String("String"), + ToPort: aws.Int64(1), + } + + benchEC2QueryBuild(b, "AuthorizeSecurityGroupEgress", params) +} + +func BenchmarkEC2QueryBuild_Simple_ec2AttachNetworkInterface(b *testing.B) { + params := &ec2.AttachNetworkInterfaceInput{ + DeviceIndex: aws.Int64(1), // Required + InstanceId: aws.String("String"), // Required + NetworkInterfaceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + + benchEC2QueryBuild(b, "AttachNetworkInterface", params) +} + +func benchEC2QueryBuild(b *testing.B, opName string, params interface{}) { + svc := awstesting.NewClient() + svc.ServiceName = "ec2" + svc.APIVersion = "2015-04-15" + + for i := 0; i < b.N; i++ { + r := svc.NewRequest(&request.Operation{ + Name: opName, + HTTPMethod: "POST", + HTTPPath: "/", + }, params, nil) + ec2query.Build(r) + if r.Error != nil { + b.Fatal("Unexpected error", r.Error) + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,35 @@ +// Package ec2query provides serialisation of AWS EC2 requests and responses. +package ec2query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/ec2.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// BuildHandler is a named request handler for building ec2query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.ec2query.Build", Fn: Build} + +// Build builds a request for the EC2 protocol. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, true); err != nil { + r.Error = awserr.New("SerializationError", "failed encoding EC2 Query request", err) + } + + if r.ExpireTime == 0 { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1167 @@ +package ec2query_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/ec2query" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/signer/v4" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String + +func init() { + protocol.RandReader = &awstesting.ZeroReader{} +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService1ProtocolTest client from just a session. +// svc := inputservice1protocoltest.New(mySession) +// +// // Create a InputService1ProtocolTest client with additional configuration +// svc := inputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService1ProtocolTest { + c := p.ClientConfig("inputservice1protocoltest", cfgs...) + return newInputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService1ProtocolTest { + svc := &InputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService1TestCaseOperation1 = "OperationName" + +// InputService1TestCaseOperation1Request generates a request for the InputService1TestCaseOperation1 operation. +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputService1TestCaseOperation1Input) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation1, + } + + if input == nil { + input = &InputService1TestShapeInputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService1TestShapeInputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputService1TestCaseOperation1Input) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) { + req, out := c.InputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService1TestShapeInputService1TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Bar *string `type:"string"` + + Foo *string `type:"string"` +} + +type InputService1TestShapeInputService1TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService2ProtocolTest client from just a session. +// svc := inputservice2protocoltest.New(mySession) +// +// // Create a InputService2ProtocolTest client with additional configuration +// svc := inputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService2ProtocolTest { + c := p.ClientConfig("inputservice2protocoltest", cfgs...) + return newInputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService2ProtocolTest { + svc := &InputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService2TestCaseOperation1 = "OperationName" + +// InputService2TestCaseOperation1Request generates a request for the InputService2TestCaseOperation1 operation. +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputService2TestCaseOperation1Input) (req *request.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService2TestCaseOperation1, + } + + if input == nil { + input = &InputService2TestShapeInputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService2TestShapeInputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputService2TestCaseOperation1Input) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) { + req, out := c.InputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService2TestShapeInputService2TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Bar *string `locationName:"barLocationName" type:"string"` + + Foo *string `type:"string"` + + Yuck *string `locationName:"yuckLocationName" queryName:"yuckQueryName" type:"string"` +} + +type InputService2TestShapeInputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService3ProtocolTest client from just a session. +// svc := inputservice3protocoltest.New(mySession) +// +// // Create a InputService3ProtocolTest client with additional configuration +// svc := inputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService3ProtocolTest { + c := p.ClientConfig("inputservice3protocoltest", cfgs...) + return newInputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService3ProtocolTest { + svc := &InputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService3TestCaseOperation1 = "OperationName" + +// InputService3TestCaseOperation1Request generates a request for the InputService3TestCaseOperation1 operation. +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputService3TestCaseOperation1Input) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService3TestCaseOperation1, + } + + if input == nil { + input = &InputService3TestShapeInputService3TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService3TestShapeInputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputService3TestCaseOperation1Input) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) { + req, out := c.InputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService3TestShapeInputService3TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + StructArg *InputService3TestShapeStructType `locationName:"Struct" type:"structure"` +} + +type InputService3TestShapeInputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService3TestShapeStructType struct { + _ struct{} `type:"structure"` + + ScalarArg *string `locationName:"Scalar" type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService4ProtocolTest client from just a session. +// svc := inputservice4protocoltest.New(mySession) +// +// // Create a InputService4ProtocolTest client with additional configuration +// svc := inputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService4ProtocolTest { + c := p.ClientConfig("inputservice4protocoltest", cfgs...) + return newInputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService4ProtocolTest { + svc := &InputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService4TestCaseOperation1 = "OperationName" + +// InputService4TestCaseOperation1Request generates a request for the InputService4TestCaseOperation1 operation. +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputService4TestCaseOperation1Input) (req *request.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService4TestCaseOperation1, + } + + if input == nil { + input = &InputService4TestShapeInputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService4TestShapeInputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputService4TestCaseOperation1Input) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) { + req, out := c.InputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService4TestShapeInputService4TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + ListArg []*string `type:"list"` +} + +type InputService4TestShapeInputService4TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService5ProtocolTest client from just a session. +// svc := inputservice5protocoltest.New(mySession) +// +// // Create a InputService5ProtocolTest client with additional configuration +// svc := inputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService5ProtocolTest { + c := p.ClientConfig("inputservice5protocoltest", cfgs...) + return newInputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService5ProtocolTest { + svc := &InputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService5TestCaseOperation1 = "OperationName" + +// InputService5TestCaseOperation1Request generates a request for the InputService5TestCaseOperation1 operation. +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputService5TestCaseOperation1Input) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation1, + } + + if input == nil { + input = &InputService5TestShapeInputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService5TestShapeInputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputService5TestCaseOperation1Input) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) { + req, out := c.InputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService5TestShapeInputService5TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + ListArg []*string `locationName:"ListMemberName" locationNameList:"item" type:"list"` +} + +type InputService5TestShapeInputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService6ProtocolTest client from just a session. +// svc := inputservice6protocoltest.New(mySession) +// +// // Create a InputService6ProtocolTest client with additional configuration +// svc := inputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService6ProtocolTest { + c := p.ClientConfig("inputservice6protocoltest", cfgs...) + return newInputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService6ProtocolTest { + svc := &InputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService6TestCaseOperation1 = "OperationName" + +// InputService6TestCaseOperation1Request generates a request for the InputService6TestCaseOperation1 operation. +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputService6TestCaseOperation1Input) (req *request.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService6TestCaseOperation1, + } + + if input == nil { + input = &InputService6TestShapeInputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService6TestShapeInputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputService6TestCaseOperation1Input) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) { + req, out := c.InputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService6TestShapeInputService6TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + ListArg []*string `locationName:"ListMemberName" queryName:"ListQueryName" locationNameList:"item" type:"list"` +} + +type InputService6TestShapeInputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService7ProtocolTest client from just a session. +// svc := inputservice7protocoltest.New(mySession) +// +// // Create a InputService7ProtocolTest client with additional configuration +// svc := inputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService7ProtocolTest { + c := p.ClientConfig("inputservice7protocoltest", cfgs...) + return newInputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService7ProtocolTest { + svc := &InputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService7TestCaseOperation1 = "OperationName" + +// InputService7TestCaseOperation1Request generates a request for the InputService7TestCaseOperation1 operation. +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputService7TestCaseOperation1Input) (req *request.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService7TestCaseOperation1, + } + + if input == nil { + input = &InputService7TestShapeInputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService7TestShapeInputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputService7TestCaseOperation1Input) (*InputService7TestShapeInputService7TestCaseOperation1Output, error) { + req, out := c.InputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService7TestShapeInputService7TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + BlobArg []byte `type:"blob"` +} + +type InputService7TestShapeInputService7TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService8ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService8ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService8ProtocolTest client from just a session. +// svc := inputservice8protocoltest.New(mySession) +// +// // Create a InputService8ProtocolTest client with additional configuration +// svc := inputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService8ProtocolTest { + c := p.ClientConfig("inputservice8protocoltest", cfgs...) + return newInputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService8ProtocolTest { + svc := &InputService8ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice8protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService8TestCaseOperation1 = "OperationName" + +// InputService8TestCaseOperation1Request generates a request for the InputService8TestCaseOperation1 operation. +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1Request(input *InputService8TestShapeInputService8TestCaseOperation1Input) (req *request.Request, output *InputService8TestShapeInputService8TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService8TestCaseOperation1, + } + + if input == nil { + input = &InputService8TestShapeInputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService8TestShapeInputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1(input *InputService8TestShapeInputService8TestCaseOperation1Input) (*InputService8TestShapeInputService8TestCaseOperation1Output, error) { + req, out := c.InputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService8TestShapeInputService8TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + TimeArg *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +type InputService8TestShapeInputService8TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService9ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService9ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService9ProtocolTest client from just a session. +// svc := inputservice9protocoltest.New(mySession) +// +// // Create a InputService9ProtocolTest client with additional configuration +// svc := inputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService9ProtocolTest { + c := p.ClientConfig("inputservice9protocoltest", cfgs...) + return newInputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService9ProtocolTest { + svc := &InputService9ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice9protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService9ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService9TestCaseOperation1 = "OperationName" + +// InputService9TestCaseOperation1Request generates a request for the InputService9TestCaseOperation1 operation. +func (c *InputService9ProtocolTest) InputService9TestCaseOperation1Request(input *InputService9TestShapeInputShape) (req *request.Request, output *InputService9TestShapeInputService9TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService9TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService9TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService9TestShapeInputService9TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService9ProtocolTest) InputService9TestCaseOperation1(input *InputService9TestShapeInputShape) (*InputService9TestShapeInputService9TestCaseOperation1Output, error) { + req, out := c.InputService9TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService9TestCaseOperation2 = "OperationName" + +// InputService9TestCaseOperation2Request generates a request for the InputService9TestCaseOperation2 operation. +func (c *InputService9ProtocolTest) InputService9TestCaseOperation2Request(input *InputService9TestShapeInputShape) (req *request.Request, output *InputService9TestShapeInputService9TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService9TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService9TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService9TestShapeInputService9TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService9ProtocolTest) InputService9TestCaseOperation2(input *InputService9TestShapeInputShape) (*InputService9TestShapeInputService9TestCaseOperation2Output, error) { + req, out := c.InputService9TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService9TestShapeInputService9TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService9TestShapeInputService9TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService9TestShapeInputShape struct { + _ struct{} `type:"structure"` + + Token *string `type:"string" idempotencyToken:"true"` +} + +// +// Tests begin here +// + +func TestInputService1ProtocolTestScalarMembersCase1(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService1TestShapeInputService1TestCaseOperation1Input{ + Bar: aws.String("val2"), + Foo: aws.String("val1"), + } + req, _ := svc.InputService1TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&Bar=val2&Foo=val1&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService2ProtocolTestStructureWithLocationNameAndQueryNameAppliedToMembersCase1(t *testing.T) { + sess := session.New() + svc := NewInputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService2TestShapeInputService2TestCaseOperation1Input{ + Bar: aws.String("val2"), + Foo: aws.String("val1"), + Yuck: aws.String("val3"), + } + req, _ := svc.InputService2TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&BarLocationName=val2&Foo=val1&Version=2014-01-01&yuckQueryName=val3`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService3ProtocolTestNestedStructureMembersCase1(t *testing.T) { + sess := session.New() + svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService3TestShapeInputService3TestCaseOperation1Input{ + StructArg: &InputService3TestShapeStructType{ + ScalarArg: aws.String("foo"), + }, + } + req, _ := svc.InputService3TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&Struct.Scalar=foo&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService4ProtocolTestListTypesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService4TestShapeInputService4TestCaseOperation1Input{ + ListArg: []*string{ + aws.String("foo"), + aws.String("bar"), + aws.String("baz"), + }, + } + req, _ := svc.InputService4TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListArg.1=foo&ListArg.2=bar&ListArg.3=baz&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService5ProtocolTestListWithLocationNameAppliedToMemberCase1(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService5TestShapeInputService5TestCaseOperation1Input{ + ListArg: []*string{ + aws.String("a"), + aws.String("b"), + aws.String("c"), + }, + } + req, _ := svc.InputService5TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListMemberName.1=a&ListMemberName.2=b&ListMemberName.3=c&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService6ProtocolTestListWithLocationNameAndQueryNameCase1(t *testing.T) { + sess := session.New() + svc := NewInputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService6TestShapeInputService6TestCaseOperation1Input{ + ListArg: []*string{ + aws.String("a"), + aws.String("b"), + aws.String("c"), + }, + } + req, _ := svc.InputService6TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListQueryName.1=a&ListQueryName.2=b&ListQueryName.3=c&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService7ProtocolTestBase64EncodedBlobsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService7TestShapeInputService7TestCaseOperation1Input{ + BlobArg: []byte("foo"), + } + req, _ := svc.InputService7TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&BlobArg=Zm9v&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService8ProtocolTestTimestampValuesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService8TestShapeInputService8TestCaseOperation1Input{ + TimeArg: aws.Time(time.Unix(1422172800, 0)), + } + req, _ := svc.InputService8TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&TimeArg=2015-01-25T08%3A00%3A00Z&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService9ProtocolTestIdempotencyTokenAutoFillCase1(t *testing.T) { + sess := session.New() + svc := NewInputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService9TestShapeInputShape{ + Token: aws.String("abc123"), + } + req, _ := svc.InputService9TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Token=abc123`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService9ProtocolTestIdempotencyTokenAutoFillCase2(t *testing.T) { + sess := session.New() + svc := NewInputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService9TestShapeInputShape{} + req, _ := svc.InputService9TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + ec2query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Token=00000000-0000-4000-8000-000000000000`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,63 @@ +package ec2query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/ec2.json unmarshal_test.go + +import ( + "encoding/xml" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalHandler is a named request handler for unmarshaling ec2query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.ec2query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling ec2query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.ec2query.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling ec2query protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.ec2query.UnmarshalError", Fn: UnmarshalError} + +// Unmarshal unmarshals a response body for the EC2 protocol. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, "") + if err != nil { + r.Error = awserr.New("SerializationError", "failed decoding EC2 Query response", err) + return + } + } +} + +// UnmarshalMeta unmarshals response headers for the EC2 protocol. +func UnmarshalMeta(r *request.Request) { + // TODO implement unmarshaling of request IDs +} + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"Response"` + Code string `xml:"Errors>Error>Code"` + Message string `xml:"Errors>Error>Message"` + RequestID string `xml:"RequestId"` +} + +// UnmarshalError unmarshals a response error for the EC2 protocol. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + resp := &xmlErrorResponse{} + err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) + if err != nil && err != io.EOF { + r.Error = awserr.New("SerializationError", "failed decoding EC2 Query error response", err) + } else { + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + resp.RequestID, + ) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1060 @@ +package ec2query_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/ec2query" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/signer/v4" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String + +func init() { + protocol.RandReader = &awstesting.ZeroReader{} +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService1ProtocolTest client from just a session. +// svc := outputservice1protocoltest.New(mySession) +// +// // Create a OutputService1ProtocolTest client with additional configuration +// svc := outputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService1ProtocolTest { + c := p.ClientConfig("outputservice1protocoltest", cfgs...) + return newOutputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService1ProtocolTest { + svc := &OutputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService1TestCaseOperation1 = "OperationName" + +// OutputService1TestCaseOperation1Request generates a request for the OutputService1TestCaseOperation1 operation. +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *request.Request, output *OutputService1TestShapeOutputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService1TestCaseOperation1, + } + + if input == nil { + input = &OutputService1TestShapeOutputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService1TestShapeOutputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (*OutputService1TestShapeOutputService1TestCaseOperation1Output, error) { + req, out := c.OutputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Char *string `type:"character"` + + Double *float64 `type:"double"` + + FalseBool *bool `type:"boolean"` + + Float *float64 `type:"float"` + + Long *int64 `type:"long"` + + Num *int64 `locationName:"FooNum" type:"integer"` + + Str *string `type:"string"` + + TrueBool *bool `type:"boolean"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService2ProtocolTest client from just a session. +// svc := outputservice2protocoltest.New(mySession) +// +// // Create a OutputService2ProtocolTest client with additional configuration +// svc := outputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService2ProtocolTest { + c := p.ClientConfig("outputservice2protocoltest", cfgs...) + return newOutputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService2ProtocolTest { + svc := &OutputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService2TestCaseOperation1 = "OperationName" + +// OutputService2TestCaseOperation1Request generates a request for the OutputService2TestCaseOperation1 operation. +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *request.Request, output *OutputService2TestShapeOutputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService2TestCaseOperation1, + } + + if input == nil { + input = &OutputService2TestShapeOutputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService2TestShapeOutputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (*OutputService2TestShapeOutputService2TestCaseOperation1Output, error) { + req, out := c.OutputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Blob []byte `type:"blob"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService3ProtocolTest client from just a session. +// svc := outputservice3protocoltest.New(mySession) +// +// // Create a OutputService3ProtocolTest client with additional configuration +// svc := outputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService3ProtocolTest { + c := p.ClientConfig("outputservice3protocoltest", cfgs...) + return newOutputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService3ProtocolTest { + svc := &OutputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService3TestCaseOperation1 = "OperationName" + +// OutputService3TestCaseOperation1Request generates a request for the OutputService3TestCaseOperation1 operation. +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *request.Request, output *OutputService3TestShapeOutputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService3TestCaseOperation1, + } + + if input == nil { + input = &OutputService3TestShapeOutputService3TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService3TestShapeOutputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (*OutputService3TestShapeOutputService3TestCaseOperation1Output, error) { + req, out := c.OutputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `type:"list"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService4ProtocolTest client from just a session. +// svc := outputservice4protocoltest.New(mySession) +// +// // Create a OutputService4ProtocolTest client with additional configuration +// svc := outputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService4ProtocolTest { + c := p.ClientConfig("outputservice4protocoltest", cfgs...) + return newOutputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService4ProtocolTest { + svc := &OutputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService4TestCaseOperation1 = "OperationName" + +// OutputService4TestCaseOperation1Request generates a request for the OutputService4TestCaseOperation1 operation. +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *request.Request, output *OutputService4TestShapeOutputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService4TestCaseOperation1, + } + + if input == nil { + input = &OutputService4TestShapeOutputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService4TestShapeOutputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (*OutputService4TestShapeOutputService4TestCaseOperation1Output, error) { + req, out := c.OutputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `locationNameList:"item" type:"list"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService5ProtocolTest client from just a session. +// svc := outputservice5protocoltest.New(mySession) +// +// // Create a OutputService5ProtocolTest client with additional configuration +// svc := outputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService5ProtocolTest { + c := p.ClientConfig("outputservice5protocoltest", cfgs...) + return newOutputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService5ProtocolTest { + svc := &OutputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService5TestCaseOperation1 = "OperationName" + +// OutputService5TestCaseOperation1Request generates a request for the OutputService5TestCaseOperation1 operation. +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *request.Request, output *OutputService5TestShapeOutputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService5TestCaseOperation1, + } + + if input == nil { + input = &OutputService5TestShapeOutputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService5TestShapeOutputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (*OutputService5TestShapeOutputService5TestCaseOperation1Output, error) { + req, out := c.OutputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `type:"list" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService6ProtocolTest client from just a session. +// svc := outputservice6protocoltest.New(mySession) +// +// // Create a OutputService6ProtocolTest client with additional configuration +// svc := outputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService6ProtocolTest { + c := p.ClientConfig("outputservice6protocoltest", cfgs...) + return newOutputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService6ProtocolTest { + svc := &OutputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService6TestCaseOperation1 = "OperationName" + +// OutputService6TestCaseOperation1Request generates a request for the OutputService6TestCaseOperation1 operation. +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *request.Request, output *OutputService6TestShapeOutputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService6TestCaseOperation1, + } + + if input == nil { + input = &OutputService6TestShapeOutputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService6TestShapeOutputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (*OutputService6TestShapeOutputService6TestCaseOperation1Output, error) { + req, out := c.OutputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*OutputService6TestShapeStructureType `type:"map"` +} + +type OutputService6TestShapeStructureType struct { + _ struct{} `type:"structure"` + + Foo *string `locationName:"foo" type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService7ProtocolTest client from just a session. +// svc := outputservice7protocoltest.New(mySession) +// +// // Create a OutputService7ProtocolTest client with additional configuration +// svc := outputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService7ProtocolTest { + c := p.ClientConfig("outputservice7protocoltest", cfgs...) + return newOutputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService7ProtocolTest { + svc := &OutputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService7TestCaseOperation1 = "OperationName" + +// OutputService7TestCaseOperation1Request generates a request for the OutputService7TestCaseOperation1 operation. +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1Request(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (req *request.Request, output *OutputService7TestShapeOutputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService7TestCaseOperation1, + } + + if input == nil { + input = &OutputService7TestShapeOutputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService7TestShapeOutputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (*OutputService7TestShapeOutputService7TestCaseOperation1Output, error) { + req, out := c.OutputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*string `type:"map" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService8ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService8ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService8ProtocolTest client from just a session. +// svc := outputservice8protocoltest.New(mySession) +// +// // Create a OutputService8ProtocolTest client with additional configuration +// svc := outputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService8ProtocolTest { + c := p.ClientConfig("outputservice8protocoltest", cfgs...) + return newOutputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService8ProtocolTest { + svc := &OutputService8ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice8protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService8TestCaseOperation1 = "OperationName" + +// OutputService8TestCaseOperation1Request generates a request for the OutputService8TestCaseOperation1 operation. +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1Request(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (req *request.Request, output *OutputService8TestShapeOutputService8TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService8TestCaseOperation1, + } + + if input == nil { + input = &OutputService8TestShapeOutputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService8TestShapeOutputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (*OutputService8TestShapeOutputService8TestCaseOperation1Output, error) { + req, out := c.OutputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*string `locationNameKey:"foo" locationNameValue:"bar" type:"map" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService9ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService9ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService9ProtocolTest client from just a session. +// svc := outputservice9protocoltest.New(mySession) +// +// // Create a OutputService9ProtocolTest client with additional configuration +// svc := outputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService9ProtocolTest { + c := p.ClientConfig("outputservice9protocoltest", cfgs...) + return newOutputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService9ProtocolTest { + svc := &OutputService9ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice9protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService9ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService9TestCaseOperation1 = "OperationName" + +// OutputService9TestCaseOperation1Request generates a request for the OutputService9TestCaseOperation1 operation. +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1Request(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (req *request.Request, output *OutputService9TestShapeOutputService9TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService9TestCaseOperation1, + } + + if input == nil { + input = &OutputService9TestShapeOutputService9TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService9TestShapeOutputService9TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (*OutputService9TestShapeOutputService9TestCaseOperation1Output, error) { + req, out := c.OutputService9TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Foo *string `type:"string"` +} + +// +// Tests begin here +// + +func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("myname123falsetrue1.21.3200arequest-id")) + req, out := svc.OutputService1TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.3, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.2, *out.Float) + assert.Equal(t, int64(200), *out.Long) + assert.Equal(t, int64(123), *out.Num) + assert.Equal(t, "myname", *out.Str) + assert.Equal(t, true, *out.TrueBool) + +} + +func TestOutputService2ProtocolTestBlobCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("dmFsdWU=requestid")) + req, out := svc.OutputService2TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "value", string(out.Blob)) + +} + +func TestOutputService3ProtocolTestListsCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService3TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService4ProtocolTestListWithCustomMemberNameCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService4TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService5ProtocolTestFlattenedListCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService5TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService6ProtocolTestNormalMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService6TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"].Foo) + assert.Equal(t, "bar", *out.Map["qux"].Foo) + +} + +func TestOutputService7ProtocolTestFlattenedMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService7TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"]) + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService8ProtocolTestNamedMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService8TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"]) + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService9ProtocolTestEmptyStringCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("requestid")) + req, out := svc.OutputService9TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + ec2query.UnmarshalMeta(req) + ec2query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "", *out.Foo) + +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/idempotency.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/idempotency.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/idempotency.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/idempotency.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,75 @@ +package protocol + +import ( + "crypto/rand" + "fmt" + "reflect" +) + +// RandReader is the random reader the protocol package will use to read +// random bytes from. This is exported for testing, and should not be used. +var RandReader = rand.Reader + +const idempotencyTokenFillTag = `idempotencyToken` + +// CanSetIdempotencyToken returns true if the struct field should be +// automatically populated with a Idempotency token. +// +// Only *string and string type fields that are tagged with idempotencyToken +// which are not already set can be auto filled. +func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { + switch u := v.Interface().(type) { + // To auto fill an Idempotency token the field must be a string, + // tagged for auto fill, and have a zero value. + case *string: + return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + case string: + return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + } + + return false +} + +// GetIdempotencyToken returns a randomly generated idempotency token. +func GetIdempotencyToken() string { + b := make([]byte, 16) + RandReader.Read(b) + + return UUIDVersion4(b) +} + +// SetIdempotencyToken will set the value provided with a Idempotency Token. +// Given that the value can be set. Will panic if value is not setable. +func SetIdempotencyToken(v reflect.Value) { + if v.Kind() == reflect.Ptr { + if v.IsNil() && v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = reflect.Indirect(v) + + if !v.CanSet() { + panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) + } + + b := make([]byte, 16) + _, err := rand.Read(b) + if err != nil { + // TODO handle error + return + } + + v.Set(reflect.ValueOf(UUIDVersion4(b))) +} + +// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided +func UUIDVersion4(u []byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + // 13th character is "4" + u[6] = (u[6] | 0x40) & 0x4F + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] | 0x80) & 0xBF + + return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/idempotency_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/idempotency_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/idempotency_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/idempotency_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,106 @@ +package protocol_test + +import ( + "reflect" + "testing" + + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/stretchr/testify/assert" +) + +func TestCanSetIdempotencyToken(t *testing.T) { + cases := []struct { + CanSet bool + Case interface{} + }{ + { + true, + struct { + Field *string `idempotencyToken:"true"` + }{}, + }, + { + true, + struct { + Field string `idempotencyToken:"true"` + }{}, + }, + { + false, + struct { + Field *string `idempotencyToken:"true"` + }{Field: new(string)}, + }, + { + false, + struct { + Field string `idempotencyToken:"true"` + }{Field: "value"}, + }, + { + false, + struct { + Field *int `idempotencyToken:"true"` + }{}, + }, + { + false, + struct { + Field *string + }{}, + }, + } + + for i, c := range cases { + v := reflect.Indirect(reflect.ValueOf(c.Case)) + ty := v.Type() + canSet := protocol.CanSetIdempotencyToken(v.Field(0), ty.Field(0)) + assert.Equal(t, c.CanSet, canSet, "Expect case %d can set to match", i) + } +} + +func TestSetIdempotencyToken(t *testing.T) { + cases := []struct { + Case interface{} + }{ + { + &struct { + Field *string `idempotencyToken:"true"` + }{}, + }, + { + &struct { + Field string `idempotencyToken:"true"` + }{}, + }, + { + &struct { + Field *string `idempotencyToken:"true"` + }{Field: new(string)}, + }, + { + &struct { + Field string `idempotencyToken:"true"` + }{Field: ""}, + }, + } + + for i, c := range cases { + v := reflect.Indirect(reflect.ValueOf(c.Case)) + + protocol.SetIdempotencyToken(v.Field(0)) + assert.NotEmpty(t, v.Field(0).Interface(), "Expect case %d to be set", i) + } +} + +func TestUUIDVersion4(t *testing.T) { + uuid := protocol.UUIDVersion4(make([]byte, 16)) + assert.Equal(t, `00000000-0000-4000-8000-000000000000`, uuid) + + b := make([]byte, 16) + for i := 0; i < len(b); i++ { + b[i] = 1 + } + uuid = protocol.UUIDVersion4(b) + assert.Equal(t, `01010101-0101-4101-8101-010101010101`, uuid) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,251 @@ +// Package jsonutil provides JSON serialisation of AWS requests and responses. +package jsonutil + +import ( + "bytes" + "encoding/base64" + "fmt" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +var timeType = reflect.ValueOf(time.Time{}).Type() +var byteSliceType = reflect.ValueOf([]byte{}).Type() + +// BuildJSON builds a JSON string for a given object v. +func BuildJSON(v interface{}) ([]byte, error) { + var buf bytes.Buffer + + err := buildAny(reflect.ValueOf(v), &buf, "") + return buf.Bytes(), err +} + +func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + value = reflect.Indirect(value) + if !value.IsValid() { + return nil + } + + vtype := value.Type() + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if value.Type() != timeType { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return buildStruct(value, buf, tag) + case "list": + return buildList(value, buf, tag) + case "map": + return buildMap(value, buf, tag) + default: + return buildScalar(value, buf, tag) + } +} + +func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + buf.WriteByte('{') + + t := value.Type() + first := true + for i := 0; i < t.NumField(); i++ { + member := value.Field(i) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("json") == "-" { + continue + } + if field.Tag.Get("location") != "" { + continue // ignore non-body elements + } + + if protocol.CanSetIdempotencyToken(member, field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(&token) + } + + if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { + continue // ignore unset fields + } + + if first { + first = false + } else { + buf.WriteByte(',') + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + fmt.Fprintf(buf, "%q:", name) + + err := buildAny(member, buf, field.Tag) + if err != nil { + return err + } + + } + + buf.WriteString("}") + + return nil +} + +func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("[") + + for i := 0; i < value.Len(); i++ { + buildAny(value.Index(i), buf, "") + + if i < value.Len()-1 { + buf.WriteString(",") + } + } + + buf.WriteString("]") + + return nil +} + +type sortedValues []reflect.Value + +func (sv sortedValues) Len() int { return len(sv) } +func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } + +func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("{") + + var sv sortedValues = value.MapKeys() + sort.Sort(sv) + + for i, k := range sv { + if i > 0 { + buf.WriteByte(',') + } + + fmt.Fprintf(buf, "%q:", k) + buildAny(value.MapIndex(k), buf, "") + } + + buf.WriteString("}") + + return nil +} + +func buildScalar(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + switch value.Kind() { + case reflect.String: + writeString(value.String(), buf) + case reflect.Bool: + buf.WriteString(strconv.FormatBool(value.Bool())) + case reflect.Int64: + buf.WriteString(strconv.FormatInt(value.Int(), 10)) + case reflect.Float64: + buf.WriteString(strconv.FormatFloat(value.Float(), 'f', -1, 64)) + default: + switch value.Type() { + case timeType: + converted := value.Interface().(time.Time) + buf.WriteString(strconv.FormatInt(converted.UTC().Unix(), 10)) + case byteSliceType: + if !value.IsNil() { + converted := value.Interface().([]byte) + buf.WriteByte('"') + if len(converted) < 1024 { + // for small buffers, using Encode directly is much faster. + dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) + base64.StdEncoding.Encode(dst, converted) + buf.Write(dst) + } else { + // for large buffers, avoid unnecessary extra temporary + // buffer space. + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(converted) + enc.Close() + } + buf.WriteByte('"') + } + default: + return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) + } + } + return nil +} + +func writeString(s string, buf *bytes.Buffer) { + buf.WriteByte('"') + for _, r := range s { + if r == '"' { + buf.WriteString(`\"`) + } else if r == '\\' { + buf.WriteString(`\\`) + } else if r == '\b' { + buf.WriteString(`\b`) + } else if r == '\f' { + buf.WriteString(`\f`) + } else if r == '\r' { + buf.WriteString(`\r`) + } else if r == '\t' { + buf.WriteString(`\t`) + } else if r == '\n' { + buf.WriteString(`\n`) + } else if r < 32 { + fmt.Fprintf(buf, "\\u%0.4x", r) + } else { + buf.WriteRune(r) + } + } + buf.WriteByte('"') +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,100 @@ +package jsonutil_test + +import ( + "encoding/json" + "testing" + "time" + + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/stretchr/testify/assert" +) + +func S(s string) *string { + return &s +} + +func D(s int64) *int64 { + return &s +} + +func F(s float64) *float64 { + return &s +} + +func T(s time.Time) *time.Time { + return &s +} + +type J struct { + S *string + SS []string + D *int64 + F *float64 + T *time.Time +} + +var jsonTests = []struct { + in interface{} + out string + err string +}{ + { + J{}, + `{}`, + ``, + }, + { + J{ + S: S("str"), + SS: []string{"A", "B", "C"}, + D: D(123), + F: F(4.56), + T: T(time.Unix(987, 0)), + }, + `{"S":"str","SS":["A","B","C"],"D":123,"F":4.56,"T":987}`, + ``, + }, + { + J{ + S: S(`"''"`), + }, + `{"S":"\"''\""}`, + ``, + }, + { + J{ + S: S("\x00føø\u00FF\n\\\"\r\t\b\f"), + }, + `{"S":"\u0000føøÿ\n\\\"\r\t\b\f"}`, + ``, + }, +} + +func TestBuildJSON(t *testing.T) { + for _, test := range jsonTests { + out, err := jsonutil.BuildJSON(test.in) + if test.err != "" { + assert.Error(t, err) + assert.Contains(t, err.Error(), test.err) + } else { + assert.NoError(t, err) + assert.Equal(t, string(out), test.out) + } + } +} + +func BenchmarkBuildJSON(b *testing.B) { + for i := 0; i < b.N; i++ { + for _, test := range jsonTests { + jsonutil.BuildJSON(test.in) + } + } +} + +func BenchmarkStdlibJSON(b *testing.B) { + for i := 0; i < b.N; i++ { + for _, test := range jsonTests { + json.Marshal(test.in) + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,213 @@ +package jsonutil + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "reflect" + "time" +) + +// UnmarshalJSON reads a stream and unmarshals the results in object v. +func UnmarshalJSON(v interface{}, stream io.Reader) error { + var out interface{} + + b, err := ioutil.ReadAll(stream) + if err != nil { + return err + } + + if len(b) == 0 { + return nil + } + + if err := json.Unmarshal(b, &out); err != nil { + return err + } + + return unmarshalAny(reflect.ValueOf(v), out, "") +} + +func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { + vtype := value.Type() + if vtype.Kind() == reflect.Ptr { + vtype = vtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := value.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return unmarshalStruct(value, data, tag) + case "list": + return unmarshalList(value, data, tag) + case "map": + return unmarshalMap(value, data, tag) + default: + return unmarshalScalar(value, data, tag) + } +} + +func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a structure (%#v)", data) + } + + t := value.Type() + if value.Kind() == reflect.Ptr { + if value.IsNil() { // create the structure if it's nil + s := reflect.New(value.Type().Elem()) + value.Set(s) + value = s + } + + value = value.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return unmarshalAny(value.FieldByName(payload), data, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.PkgPath != "" { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + member := value.FieldByIndex(field.Index) + err := unmarshalAny(member, mapData[name], field.Tag) + if err != nil { + return err + } + } + return nil +} + +func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + listData, ok := data.([]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a list (%#v)", data) + } + + if value.IsNil() { + l := len(listData) + value.Set(reflect.MakeSlice(value.Type(), l, l)) + } + + for i, c := range listData { + err := unmarshalAny(value.Index(i), c, "") + if err != nil { + return err + } + } + + return nil +} + +func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a map (%#v)", data) + } + + if value.IsNil() { + value.Set(reflect.MakeMap(value.Type())) + } + + for k, v := range mapData { + kvalue := reflect.ValueOf(k) + vvalue := reflect.New(value.Type().Elem()).Elem() + + unmarshalAny(vvalue, v, "") + value.SetMapIndex(kvalue, vvalue) + } + + return nil +} + +func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { + errf := func() error { + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + + switch d := data.(type) { + case nil: + return nil // nothing to do here + case string: + switch value.Interface().(type) { + case *string: + value.Set(reflect.ValueOf(&d)) + case []byte: + b, err := base64.StdEncoding.DecodeString(d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(b)) + default: + return errf() + } + case float64: + switch value.Interface().(type) { + case *int64: + di := int64(d) + value.Set(reflect.ValueOf(&di)) + case *float64: + value.Set(reflect.ValueOf(&d)) + case *time.Time: + t := time.Unix(int64(d), 0).UTC() + value.Set(reflect.ValueOf(&t)) + default: + return errf() + } + case bool: + switch value.Interface().(type) { + case *bool: + value.Set(reflect.ValueOf(&d)) + default: + return errf() + } + default: + return fmt.Errorf("unsupported JSON value (%v)", data) + } + return nil +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_bench_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_bench_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_bench_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_bench_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,71 @@ +// +build bench + +package jsonrpc_test + +import ( + "bytes" + "encoding/json" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" +) + +func BenchmarkJSONRPCBuild_Simple_dynamodbPutItem(b *testing.B) { + svc := awstesting.NewClient() + + params := getDynamodbPutItemParams(b) + + for i := 0; i < b.N; i++ { + r := svc.NewRequest(&request.Operation{Name: "Operation"}, params, nil) + jsonrpc.Build(r) + if r.Error != nil { + b.Fatal("Unexpected error", r.Error) + } + } +} + +func BenchmarkJSONUtilBuild_Simple_dynamodbPutItem(b *testing.B) { + svc := awstesting.NewClient() + + params := getDynamodbPutItemParams(b) + + for i := 0; i < b.N; i++ { + r := svc.NewRequest(&request.Operation{Name: "Operation"}, params, nil) + _, err := jsonutil.BuildJSON(r.Params) + if err != nil { + b.Fatal("Unexpected error", err) + } + } +} + +func BenchmarkEncodingJSONMarshal_Simple_dynamodbPutItem(b *testing.B) { + params := getDynamodbPutItemParams(b) + + for i := 0; i < b.N; i++ { + buf := &bytes.Buffer{} + encoder := json.NewEncoder(buf) + if err := encoder.Encode(params); err != nil { + b.Fatal("Unexpected error", err) + } + } +} + +func getDynamodbPutItemParams(b *testing.B) *dynamodb.PutItemInput { + av, err := dynamodbattribute.ConvertToMap(struct { + Key string + Data string + }{Key: "MyKey", Data: "MyData"}) + if err != nil { + b.Fatal("benchPutItem, expect no ConvertToMap errors", err) + } + return &dynamodb.PutItemInput{ + Item: av, + TableName: aws.String("tablename"), + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1342 @@ +package jsonrpc_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/signer/v4" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String + +func init() { + protocol.RandReader = &awstesting.ZeroReader{} +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService1ProtocolTest client from just a session. +// svc := inputservice1protocoltest.New(mySession) +// +// // Create a InputService1ProtocolTest client with additional configuration +// svc := inputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService1ProtocolTest { + c := p.ClientConfig("inputservice1protocoltest", cfgs...) + return newInputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService1ProtocolTest { + svc := &InputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "1.1", + TargetPrefix: "com.amazonaws.foo", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService1TestCaseOperation1 = "OperationName" + +// InputService1TestCaseOperation1Request generates a request for the InputService1TestCaseOperation1 operation. +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputService1TestCaseOperation1Input) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation1, + HTTPMethod: "POST", + } + + if input == nil { + input = &InputService1TestShapeInputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService1TestShapeInputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputService1TestCaseOperation1Input) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) { + req, out := c.InputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService1TestShapeInputService1TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Name *string `type:"string"` +} + +type InputService1TestShapeInputService1TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService2ProtocolTest client from just a session. +// svc := inputservice2protocoltest.New(mySession) +// +// // Create a InputService2ProtocolTest client with additional configuration +// svc := inputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService2ProtocolTest { + c := p.ClientConfig("inputservice2protocoltest", cfgs...) + return newInputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService2ProtocolTest { + svc := &InputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "1.1", + TargetPrefix: "com.amazonaws.foo", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService2TestCaseOperation1 = "OperationName" + +// InputService2TestCaseOperation1Request generates a request for the InputService2TestCaseOperation1 operation. +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputService2TestCaseOperation1Input) (req *request.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService2TestCaseOperation1, + } + + if input == nil { + input = &InputService2TestShapeInputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService2TestShapeInputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputService2TestCaseOperation1Input) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) { + req, out := c.InputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService2TestShapeInputService2TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + TimeArg *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +type InputService2TestShapeInputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService3ProtocolTest client from just a session. +// svc := inputservice3protocoltest.New(mySession) +// +// // Create a InputService3ProtocolTest client with additional configuration +// svc := inputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService3ProtocolTest { + c := p.ClientConfig("inputservice3protocoltest", cfgs...) + return newInputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService3ProtocolTest { + svc := &InputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "1.1", + TargetPrefix: "com.amazonaws.foo", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService3TestCaseOperation1 = "OperationName" + +// InputService3TestCaseOperation1Request generates a request for the InputService3TestCaseOperation1 operation. +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputShape) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService3TestCaseOperation1, + } + + if input == nil { + input = &InputService3TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService3TestShapeInputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) { + req, out := c.InputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService3TestCaseOperation2 = "OperationName" + +// InputService3TestCaseOperation2Request generates a request for the InputService3TestCaseOperation2 operation. +func (c *InputService3ProtocolTest) InputService3TestCaseOperation2Request(input *InputService3TestShapeInputShape) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService3TestCaseOperation2, + } + + if input == nil { + input = &InputService3TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService3TestShapeInputService3TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation2(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation2Output, error) { + req, out := c.InputService3TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService3TestShapeInputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService3TestShapeInputService3TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService3TestShapeInputShape struct { + _ struct{} `type:"structure"` + + BlobArg []byte `type:"blob"` + + BlobMap map[string][]byte `type:"map"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService4ProtocolTest client from just a session. +// svc := inputservice4protocoltest.New(mySession) +// +// // Create a InputService4ProtocolTest client with additional configuration +// svc := inputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService4ProtocolTest { + c := p.ClientConfig("inputservice4protocoltest", cfgs...) + return newInputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService4ProtocolTest { + svc := &InputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "1.1", + TargetPrefix: "com.amazonaws.foo", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService4TestCaseOperation1 = "OperationName" + +// InputService4TestCaseOperation1Request generates a request for the InputService4TestCaseOperation1 operation. +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputService4TestCaseOperation1Input) (req *request.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService4TestCaseOperation1, + HTTPMethod: "POST", + } + + if input == nil { + input = &InputService4TestShapeInputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService4TestShapeInputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputService4TestCaseOperation1Input) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) { + req, out := c.InputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService4TestShapeInputService4TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + ListParam [][]byte `type:"list"` +} + +type InputService4TestShapeInputService4TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService5ProtocolTest client from just a session. +// svc := inputservice5protocoltest.New(mySession) +// +// // Create a InputService5ProtocolTest client with additional configuration +// svc := inputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService5ProtocolTest { + c := p.ClientConfig("inputservice5protocoltest", cfgs...) + return newInputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService5ProtocolTest { + svc := &InputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "1.1", + TargetPrefix: "com.amazonaws.foo", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService5TestCaseOperation1 = "OperationName" + +// InputService5TestCaseOperation1Request generates a request for the InputService5TestCaseOperation1 operation. +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputShape) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation1, + } + + if input == nil { + input = &InputService5TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService5TestShapeInputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputShape) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) { + req, out := c.InputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService5TestCaseOperation2 = "OperationName" + +// InputService5TestCaseOperation2Request generates a request for the InputService5TestCaseOperation2 operation. +func (c *InputService5ProtocolTest) InputService5TestCaseOperation2Request(input *InputService5TestShapeInputShape) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation2, + } + + if input == nil { + input = &InputService5TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService5TestShapeInputService5TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation2(input *InputService5TestShapeInputShape) (*InputService5TestShapeInputService5TestCaseOperation2Output, error) { + req, out := c.InputService5TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +const opInputService5TestCaseOperation3 = "OperationName" + +// InputService5TestCaseOperation3Request generates a request for the InputService5TestCaseOperation3 operation. +func (c *InputService5ProtocolTest) InputService5TestCaseOperation3Request(input *InputService5TestShapeInputShape) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation3Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation3, + } + + if input == nil { + input = &InputService5TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService5TestShapeInputService5TestCaseOperation3Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation3(input *InputService5TestShapeInputShape) (*InputService5TestShapeInputService5TestCaseOperation3Output, error) { + req, out := c.InputService5TestCaseOperation3Request(input) + err := req.Send() + return out, err +} + +const opInputService5TestCaseOperation4 = "OperationName" + +// InputService5TestCaseOperation4Request generates a request for the InputService5TestCaseOperation4 operation. +func (c *InputService5ProtocolTest) InputService5TestCaseOperation4Request(input *InputService5TestShapeInputShape) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation4Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation4, + } + + if input == nil { + input = &InputService5TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService5TestShapeInputService5TestCaseOperation4Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation4(input *InputService5TestShapeInputShape) (*InputService5TestShapeInputService5TestCaseOperation4Output, error) { + req, out := c.InputService5TestCaseOperation4Request(input) + err := req.Send() + return out, err +} + +const opInputService5TestCaseOperation5 = "OperationName" + +// InputService5TestCaseOperation5Request generates a request for the InputService5TestCaseOperation5 operation. +func (c *InputService5ProtocolTest) InputService5TestCaseOperation5Request(input *InputService5TestShapeInputShape) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation5Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation5, + } + + if input == nil { + input = &InputService5TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService5TestShapeInputService5TestCaseOperation5Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation5(input *InputService5TestShapeInputShape) (*InputService5TestShapeInputService5TestCaseOperation5Output, error) { + req, out := c.InputService5TestCaseOperation5Request(input) + err := req.Send() + return out, err +} + +const opInputService5TestCaseOperation6 = "OperationName" + +// InputService5TestCaseOperation6Request generates a request for the InputService5TestCaseOperation6 operation. +func (c *InputService5ProtocolTest) InputService5TestCaseOperation6Request(input *InputService5TestShapeInputShape) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation6Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation6, + } + + if input == nil { + input = &InputService5TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService5TestShapeInputService5TestCaseOperation6Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation6(input *InputService5TestShapeInputShape) (*InputService5TestShapeInputService5TestCaseOperation6Output, error) { + req, out := c.InputService5TestCaseOperation6Request(input) + err := req.Send() + return out, err +} + +type InputService5TestShapeInputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService5TestShapeInputService5TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService5TestShapeInputService5TestCaseOperation3Output struct { + _ struct{} `type:"structure"` +} + +type InputService5TestShapeInputService5TestCaseOperation4Output struct { + _ struct{} `type:"structure"` +} + +type InputService5TestShapeInputService5TestCaseOperation5Output struct { + _ struct{} `type:"structure"` +} + +type InputService5TestShapeInputService5TestCaseOperation6Output struct { + _ struct{} `type:"structure"` +} + +type InputService5TestShapeInputShape struct { + _ struct{} `type:"structure"` + + RecursiveStruct *InputService5TestShapeRecursiveStructType `type:"structure"` +} + +type InputService5TestShapeRecursiveStructType struct { + _ struct{} `type:"structure"` + + NoRecurse *string `type:"string"` + + RecursiveList []*InputService5TestShapeRecursiveStructType `type:"list"` + + RecursiveMap map[string]*InputService5TestShapeRecursiveStructType `type:"map"` + + RecursiveStruct *InputService5TestShapeRecursiveStructType `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService6ProtocolTest client from just a session. +// svc := inputservice6protocoltest.New(mySession) +// +// // Create a InputService6ProtocolTest client with additional configuration +// svc := inputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService6ProtocolTest { + c := p.ClientConfig("inputservice6protocoltest", cfgs...) + return newInputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService6ProtocolTest { + svc := &InputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "1.1", + TargetPrefix: "com.amazonaws.foo", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService6TestCaseOperation1 = "OperationName" + +// InputService6TestCaseOperation1Request generates a request for the InputService6TestCaseOperation1 operation. +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputService6TestCaseOperation1Input) (req *request.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService6TestCaseOperation1, + HTTPMethod: "POST", + } + + if input == nil { + input = &InputService6TestShapeInputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService6TestShapeInputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputService6TestCaseOperation1Input) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) { + req, out := c.InputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService6TestShapeInputService6TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Map map[string]*string `type:"map"` +} + +type InputService6TestShapeInputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService7ProtocolTest client from just a session. +// svc := inputservice7protocoltest.New(mySession) +// +// // Create a InputService7ProtocolTest client with additional configuration +// svc := inputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService7ProtocolTest { + c := p.ClientConfig("inputservice7protocoltest", cfgs...) + return newInputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService7ProtocolTest { + svc := &InputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + JSONVersion: "", + TargetPrefix: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService7TestCaseOperation1 = "OperationName" + +// InputService7TestCaseOperation1Request generates a request for the InputService7TestCaseOperation1 operation. +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputShape) (req *request.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService7TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService7TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService7TestShapeInputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputShape) (*InputService7TestShapeInputService7TestCaseOperation1Output, error) { + req, out := c.InputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService7TestCaseOperation2 = "OperationName" + +// InputService7TestCaseOperation2Request generates a request for the InputService7TestCaseOperation2 operation. +func (c *InputService7ProtocolTest) InputService7TestCaseOperation2Request(input *InputService7TestShapeInputShape) (req *request.Request, output *InputService7TestShapeInputService7TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService7TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService7TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService7TestShapeInputService7TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService7ProtocolTest) InputService7TestCaseOperation2(input *InputService7TestShapeInputShape) (*InputService7TestShapeInputService7TestCaseOperation2Output, error) { + req, out := c.InputService7TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService7TestShapeInputService7TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService7TestShapeInputService7TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService7TestShapeInputShape struct { + _ struct{} `type:"structure"` + + Token *string `type:"string" idempotencyToken:"true"` +} + +// +// Tests begin here +// + +func TestInputService1ProtocolTestScalarMembersCase1(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService1TestShapeInputService1TestCaseOperation1Input{ + Name: aws.String("myname"), + } + req, _ := svc.InputService1TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"Name":"myname"}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService2ProtocolTestTimestampValuesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService2TestShapeInputService2TestCaseOperation1Input{ + TimeArg: aws.Time(time.Unix(1422172800, 0)), + } + req, _ := svc.InputService2TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"TimeArg":1422172800}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService3ProtocolTestBase64EncodedBlobsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService3TestShapeInputShape{ + BlobArg: []byte("foo"), + } + req, _ := svc.InputService3TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"BlobArg":"Zm9v"}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService3ProtocolTestBase64EncodedBlobsCase2(t *testing.T) { + sess := session.New() + svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService3TestShapeInputShape{ + BlobMap: map[string][]byte{ + "key1": []byte("foo"), + "key2": []byte("bar"), + }, + } + req, _ := svc.InputService3TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"BlobMap":{"key1":"Zm9v","key2":"YmFy"}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService4ProtocolTestNestedBlobsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService4TestShapeInputService4TestCaseOperation1Input{ + ListParam: [][]byte{ + []byte("foo"), + []byte("bar"), + }, + } + req, _ := svc.InputService4TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"ListParam":["Zm9v","YmFy"]}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService5ProtocolTestRecursiveShapesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService5TestShapeInputShape{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + } + req, _ := svc.InputService5TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"NoRecurse":"foo"}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService5ProtocolTestRecursiveShapesCase2(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService5TestShapeInputShape{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + } + req, _ := svc.InputService5TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveStruct":{"NoRecurse":"foo"}}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService5ProtocolTestRecursiveShapesCase3(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService5TestShapeInputShape{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + }, + }, + } + req, _ := svc.InputService5TestCaseOperation3Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveStruct":{"RecursiveStruct":{"RecursiveStruct":{"NoRecurse":"foo"}}}}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService5ProtocolTestRecursiveShapesCase4(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService5TestShapeInputShape{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + RecursiveList: []*InputService5TestShapeRecursiveStructType{ + { + NoRecurse: aws.String("foo"), + }, + { + NoRecurse: aws.String("bar"), + }, + }, + }, + } + req, _ := svc.InputService5TestCaseOperation4Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveList":[{"NoRecurse":"foo"},{"NoRecurse":"bar"}]}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService5ProtocolTestRecursiveShapesCase5(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService5TestShapeInputShape{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + RecursiveList: []*InputService5TestShapeRecursiveStructType{ + { + NoRecurse: aws.String("foo"), + }, + { + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + NoRecurse: aws.String("bar"), + }, + }, + }, + }, + } + req, _ := svc.InputService5TestCaseOperation5Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveList":[{"NoRecurse":"foo"},{"RecursiveStruct":{"NoRecurse":"bar"}}]}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService5ProtocolTestRecursiveShapesCase6(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService5TestShapeInputShape{ + RecursiveStruct: &InputService5TestShapeRecursiveStructType{ + RecursiveMap: map[string]*InputService5TestShapeRecursiveStructType{ + "bar": { + NoRecurse: aws.String("bar"), + }, + "foo": { + NoRecurse: aws.String("foo"), + }, + }, + }, + } + req, _ := svc.InputService5TestCaseOperation6Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveMap":{"foo":{"NoRecurse":"foo"},"bar":{"NoRecurse":"bar"}}}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService6ProtocolTestEmptyMapsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService6TestShapeInputService6TestCaseOperation1Input{ + Map: map[string]*string{}, + } + req, _ := svc.InputService6TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"Map":{}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) + assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + +} + +func TestInputService7ProtocolTestIdempotencyTokenAutoFillCase1(t *testing.T) { + sess := session.New() + svc := NewInputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService7TestShapeInputShape{ + Token: aws.String("abc123"), + } + req, _ := svc.InputService7TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"Token":"abc123"}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService7ProtocolTestIdempotencyTokenAutoFillCase2(t *testing.T) { + sess := session.New() + svc := NewInputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService7TestShapeInputShape{} + req, _ := svc.InputService7TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + jsonrpc.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"Token":"00000000-0000-4000-8000-000000000000"}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,111 @@ +// Package jsonrpc provides JSON RPC utilities for serialisation of AWS +// requests and responses. +package jsonrpc + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/json.json build_test.go +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/json.json unmarshal_test.go + +import ( + "encoding/json" + "io/ioutil" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +var emptyJSON = []byte("{}") + +// BuildHandler is a named request handler for building jsonrpc protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.jsonrpc.Build", Fn: Build} + +// UnmarshalHandler is a named request handler for unmarshaling jsonrpc protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.jsonrpc.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling jsonrpc protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.jsonrpc.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.jsonrpc.UnmarshalError", Fn: UnmarshalError} + +// Build builds a JSON payload for a JSON RPC request. +func Build(req *request.Request) { + var buf []byte + var err error + if req.ParamsFilled() { + buf, err = jsonutil.BuildJSON(req.Params) + if err != nil { + req.Error = awserr.New("SerializationError", "failed encoding JSON RPC request", err) + return + } + } else { + buf = emptyJSON + } + + if req.ClientInfo.TargetPrefix != "" || string(buf) != "{}" { + req.SetBufferBody(buf) + } + + if req.ClientInfo.TargetPrefix != "" { + target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name + req.HTTPRequest.Header.Add("X-Amz-Target", target) + } + if req.ClientInfo.JSONVersion != "" { + jsonVersion := req.ClientInfo.JSONVersion + req.HTTPRequest.Header.Add("Content-Type", "application/x-amz-json-"+jsonVersion) + } +} + +// Unmarshal unmarshals a response for a JSON RPC service. +func Unmarshal(req *request.Request) { + defer req.HTTPResponse.Body.Close() + if req.DataFilled() { + err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.New("SerializationError", "failed decoding JSON RPC response", err) + } + } + return +} + +// UnmarshalMeta unmarshals headers from a response for a JSON RPC service. +func UnmarshalMeta(req *request.Request) { + rest.UnmarshalMeta(req) +} + +// UnmarshalError unmarshals an error response for a JSON RPC service. +func UnmarshalError(req *request.Request) { + defer req.HTTPResponse.Body.Close() + bodyBytes, err := ioutil.ReadAll(req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.New("SerializationError", "failed reading JSON RPC error response", err) + return + } + if len(bodyBytes) == 0 { + req.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", req.HTTPResponse.Status, nil), + req.HTTPResponse.StatusCode, + "", + ) + return + } + var jsonErr jsonErrorResponse + if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil { + req.Error = awserr.New("SerializationError", "failed decoding JSON RPC error response", err) + return + } + + codes := strings.SplitN(jsonErr.Code, "#", 2) + req.Error = awserr.NewRequestFailure( + awserr.New(codes[len(codes)-1], jsonErr.Message, nil), + req.HTTPResponse.StatusCode, + req.RequestID, + ) +} + +type jsonErrorResponse struct { + Code string `json:"__type"` + Message string `json:"message"` +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,816 @@ +package jsonrpc_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/signer/v4" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String + +func init() { + protocol.RandReader = &awstesting.ZeroReader{} +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService1ProtocolTest client from just a session. +// svc := outputservice1protocoltest.New(mySession) +// +// // Create a OutputService1ProtocolTest client with additional configuration +// svc := outputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService1ProtocolTest { + c := p.ClientConfig("outputservice1protocoltest", cfgs...) + return newOutputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService1ProtocolTest { + svc := &OutputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "", + TargetPrefix: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService1TestCaseOperation1 = "OperationName" + +// OutputService1TestCaseOperation1Request generates a request for the OutputService1TestCaseOperation1 operation. +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *request.Request, output *OutputService1TestShapeOutputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService1TestCaseOperation1, + } + + if input == nil { + input = &OutputService1TestShapeOutputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService1TestShapeOutputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (*OutputService1TestShapeOutputService1TestCaseOperation1Output, error) { + req, out := c.OutputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Char *string `type:"character"` + + Double *float64 `type:"double"` + + FalseBool *bool `type:"boolean"` + + Float *float64 `type:"float"` + + Long *int64 `type:"long"` + + Num *int64 `type:"integer"` + + Str *string `type:"string"` + + TrueBool *bool `type:"boolean"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService2ProtocolTest client from just a session. +// svc := outputservice2protocoltest.New(mySession) +// +// // Create a OutputService2ProtocolTest client with additional configuration +// svc := outputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService2ProtocolTest { + c := p.ClientConfig("outputservice2protocoltest", cfgs...) + return newOutputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService2ProtocolTest { + svc := &OutputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "", + TargetPrefix: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService2TestCaseOperation1 = "OperationName" + +// OutputService2TestCaseOperation1Request generates a request for the OutputService2TestCaseOperation1 operation. +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *request.Request, output *OutputService2TestShapeOutputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService2TestCaseOperation1, + } + + if input == nil { + input = &OutputService2TestShapeOutputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService2TestShapeOutputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (*OutputService2TestShapeOutputService2TestCaseOperation1Output, error) { + req, out := c.OutputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService2TestShapeBlobContainer struct { + _ struct{} `type:"structure"` + + Foo []byte `locationName:"foo" type:"blob"` +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + BlobMember []byte `type:"blob"` + + StructMember *OutputService2TestShapeBlobContainer `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService3ProtocolTest client from just a session. +// svc := outputservice3protocoltest.New(mySession) +// +// // Create a OutputService3ProtocolTest client with additional configuration +// svc := outputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService3ProtocolTest { + c := p.ClientConfig("outputservice3protocoltest", cfgs...) + return newOutputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService3ProtocolTest { + svc := &OutputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "", + TargetPrefix: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService3TestCaseOperation1 = "OperationName" + +// OutputService3TestCaseOperation1Request generates a request for the OutputService3TestCaseOperation1 operation. +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *request.Request, output *OutputService3TestShapeOutputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService3TestCaseOperation1, + } + + if input == nil { + input = &OutputService3TestShapeOutputService3TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService3TestShapeOutputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (*OutputService3TestShapeOutputService3TestCaseOperation1Output, error) { + req, out := c.OutputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + StructMember *OutputService3TestShapeTimeContainer `type:"structure"` + + TimeMember *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +type OutputService3TestShapeTimeContainer struct { + _ struct{} `type:"structure"` + + Foo *time.Time `locationName:"foo" type:"timestamp" timestampFormat:"unix"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService4ProtocolTest client from just a session. +// svc := outputservice4protocoltest.New(mySession) +// +// // Create a OutputService4ProtocolTest client with additional configuration +// svc := outputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService4ProtocolTest { + c := p.ClientConfig("outputservice4protocoltest", cfgs...) + return newOutputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService4ProtocolTest { + svc := &OutputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "", + TargetPrefix: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService4TestCaseOperation1 = "OperationName" + +// OutputService4TestCaseOperation1Request generates a request for the OutputService4TestCaseOperation1 operation. +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *request.Request, output *OutputService4TestShapeOutputShape) { + op := &request.Operation{ + Name: opOutputService4TestCaseOperation1, + } + + if input == nil { + input = &OutputService4TestShapeOutputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService4TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (*OutputService4TestShapeOutputShape, error) { + req, out := c.OutputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opOutputService4TestCaseOperation2 = "OperationName" + +// OutputService4TestCaseOperation2Request generates a request for the OutputService4TestCaseOperation2 operation. +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation2Request(input *OutputService4TestShapeOutputService4TestCaseOperation2Input) (req *request.Request, output *OutputService4TestShapeOutputShape) { + op := &request.Operation{ + Name: opOutputService4TestCaseOperation2, + } + + if input == nil { + input = &OutputService4TestShapeOutputService4TestCaseOperation2Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService4TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation2(input *OutputService4TestShapeOutputService4TestCaseOperation2Input) (*OutputService4TestShapeOutputShape, error) { + req, out := c.OutputService4TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService4TestShapeOutputService4TestCaseOperation2Input struct { + _ struct{} `type:"structure"` +} + +type OutputService4TestShapeOutputShape struct { + _ struct{} `type:"structure"` + + ListMember []*string `type:"list"` + + ListMemberMap []map[string]*string `type:"list"` + + ListMemberStruct []*OutputService4TestShapeStructType `type:"list"` +} + +type OutputService4TestShapeStructType struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService5ProtocolTest client from just a session. +// svc := outputservice5protocoltest.New(mySession) +// +// // Create a OutputService5ProtocolTest client with additional configuration +// svc := outputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService5ProtocolTest { + c := p.ClientConfig("outputservice5protocoltest", cfgs...) + return newOutputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService5ProtocolTest { + svc := &OutputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "", + TargetPrefix: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService5TestCaseOperation1 = "OperationName" + +// OutputService5TestCaseOperation1Request generates a request for the OutputService5TestCaseOperation1 operation. +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *request.Request, output *OutputService5TestShapeOutputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService5TestCaseOperation1, + } + + if input == nil { + input = &OutputService5TestShapeOutputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService5TestShapeOutputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (*OutputService5TestShapeOutputService5TestCaseOperation1Output, error) { + req, out := c.OutputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + MapMember map[string][]*int64 `type:"map"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService6ProtocolTest client from just a session. +// svc := outputservice6protocoltest.New(mySession) +// +// // Create a OutputService6ProtocolTest client with additional configuration +// svc := outputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService6ProtocolTest { + c := p.ClientConfig("outputservice6protocoltest", cfgs...) + return newOutputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService6ProtocolTest { + svc := &OutputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + JSONVersion: "", + TargetPrefix: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService6TestCaseOperation1 = "OperationName" + +// OutputService6TestCaseOperation1Request generates a request for the OutputService6TestCaseOperation1 operation. +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *request.Request, output *OutputService6TestShapeOutputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService6TestCaseOperation1, + } + + if input == nil { + input = &OutputService6TestShapeOutputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService6TestShapeOutputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (*OutputService6TestShapeOutputService6TestCaseOperation1Output, error) { + req, out := c.OutputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + StrType *string `type:"string"` +} + +// +// Tests begin here +// + +func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"Str\": \"myname\", \"Num\": 123, \"FalseBool\": false, \"TrueBool\": true, \"Float\": 1.2, \"Double\": 1.3, \"Long\": 200, \"Char\": \"a\"}")) + req, out := svc.OutputService1TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + jsonrpc.UnmarshalMeta(req) + jsonrpc.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.3, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.2, *out.Float) + assert.Equal(t, int64(200), *out.Long) + assert.Equal(t, int64(123), *out.Num) + assert.Equal(t, "myname", *out.Str) + assert.Equal(t, true, *out.TrueBool) + +} + +func TestOutputService2ProtocolTestBlobMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"BlobMember\": \"aGkh\", \"StructMember\": {\"foo\": \"dGhlcmUh\"}}")) + req, out := svc.OutputService2TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + jsonrpc.UnmarshalMeta(req) + jsonrpc.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "hi!", string(out.BlobMember)) + assert.Equal(t, "there!", string(out.StructMember.Foo)) + +} + +func TestOutputService3ProtocolTestTimestampMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"TimeMember\": 1398796238, \"StructMember\": {\"foo\": 1398796238}}")) + req, out := svc.OutputService3TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + jsonrpc.UnmarshalMeta(req) + jsonrpc.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.StructMember.Foo.String()) + assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.TimeMember.String()) + +} + +func TestOutputService4ProtocolTestListsCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"ListMember\": [\"a\", \"b\"]}")) + req, out := svc.OutputService4TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + jsonrpc.UnmarshalMeta(req) + jsonrpc.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.ListMember[0]) + assert.Equal(t, "b", *out.ListMember[1]) + +} + +func TestOutputService4ProtocolTestListsCase2(t *testing.T) { + sess := session.New() + svc := NewOutputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"ListMember\": [\"a\", null], \"ListMemberMap\": [{}, null, null, {}], \"ListMemberStruct\": [{}, null, null, {}]}")) + req, out := svc.OutputService4TestCaseOperation2Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + jsonrpc.UnmarshalMeta(req) + jsonrpc.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.ListMember[0]) + assert.Nil(t, out.ListMember[1]) + assert.Nil(t, out.ListMemberMap[1]) + assert.Nil(t, out.ListMemberMap[2]) + assert.Nil(t, out.ListMemberStruct[1]) + assert.Nil(t, out.ListMemberStruct[2]) + +} + +func TestOutputService5ProtocolTestMapsCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"MapMember\": {\"a\": [1, 2], \"b\": [3, 4]}}")) + req, out := svc.OutputService5TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + jsonrpc.UnmarshalMeta(req) + jsonrpc.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, int64(1), *out.MapMember["a"][0]) + assert.Equal(t, int64(2), *out.MapMember["a"][1]) + assert.Equal(t, int64(3), *out.MapMember["b"][0]) + assert.Equal(t, int64(4), *out.MapMember["b"][1]) + +} + +func TestOutputService6ProtocolTestIgnoresExtraDataCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"foo\": \"bar\"}")) + req, out := svc.OutputService6TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + jsonrpc.UnmarshalMeta(req) + jsonrpc.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/query/build.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/query/build.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/query/build.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/query/build.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,36 @@ +// Package query provides serialisation of AWS query requests, and responses. +package query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// BuildHandler is a named request handler for building query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} + +// Build builds a request for an AWS Query service. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, false); err != nil { + r.Error = awserr.New("SerializationError", "failed encoding Query request", err) + return + } + + if r.ExpireTime == 0 { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/query/build_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/query/build_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/query/build_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/query/build_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2196 @@ +package query_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/signer/v4" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String + +func init() { + protocol.RandReader = &awstesting.ZeroReader{} +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService1ProtocolTest client from just a session. +// svc := inputservice1protocoltest.New(mySession) +// +// // Create a InputService1ProtocolTest client with additional configuration +// svc := inputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService1ProtocolTest { + c := p.ClientConfig("inputservice1protocoltest", cfgs...) + return newInputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService1ProtocolTest { + svc := &InputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService1TestCaseOperation1 = "OperationName" + +// InputService1TestCaseOperation1Request generates a request for the InputService1TestCaseOperation1 operation. +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputShape) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation1, + } + + if input == nil { + input = &InputService1TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService1TestShapeInputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) { + req, out := c.InputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService1TestCaseOperation2 = "OperationName" + +// InputService1TestCaseOperation2Request generates a request for the InputService1TestCaseOperation2 operation. +func (c *InputService1ProtocolTest) InputService1TestCaseOperation2Request(input *InputService1TestShapeInputShape) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation2, + } + + if input == nil { + input = &InputService1TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService1TestShapeInputService1TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation2(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation2Output, error) { + req, out := c.InputService1TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +const opInputService1TestCaseOperation3 = "OperationName" + +// InputService1TestCaseOperation3Request generates a request for the InputService1TestCaseOperation3 operation. +func (c *InputService1ProtocolTest) InputService1TestCaseOperation3Request(input *InputService1TestShapeInputShape) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation3Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation3, + } + + if input == nil { + input = &InputService1TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService1TestShapeInputService1TestCaseOperation3Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation3(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation3Output, error) { + req, out := c.InputService1TestCaseOperation3Request(input) + err := req.Send() + return out, err +} + +type InputService1TestShapeInputService1TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService1TestShapeInputService1TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService1TestShapeInputService1TestCaseOperation3Output struct { + _ struct{} `type:"structure"` +} + +type InputService1TestShapeInputShape struct { + _ struct{} `type:"structure"` + + Bar *string `type:"string"` + + Baz *bool `type:"boolean"` + + Foo *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService2ProtocolTest client from just a session. +// svc := inputservice2protocoltest.New(mySession) +// +// // Create a InputService2ProtocolTest client with additional configuration +// svc := inputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService2ProtocolTest { + c := p.ClientConfig("inputservice2protocoltest", cfgs...) + return newInputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService2ProtocolTest { + svc := &InputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService2TestCaseOperation1 = "OperationName" + +// InputService2TestCaseOperation1Request generates a request for the InputService2TestCaseOperation1 operation. +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputService2TestCaseOperation1Input) (req *request.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService2TestCaseOperation1, + } + + if input == nil { + input = &InputService2TestShapeInputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService2TestShapeInputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputService2TestCaseOperation1Input) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) { + req, out := c.InputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService2TestShapeInputService2TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + StructArg *InputService2TestShapeStructType `type:"structure"` +} + +type InputService2TestShapeInputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService2TestShapeStructType struct { + _ struct{} `type:"structure"` + + ScalarArg *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService3ProtocolTest client from just a session. +// svc := inputservice3protocoltest.New(mySession) +// +// // Create a InputService3ProtocolTest client with additional configuration +// svc := inputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService3ProtocolTest { + c := p.ClientConfig("inputservice3protocoltest", cfgs...) + return newInputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService3ProtocolTest { + svc := &InputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService3TestCaseOperation1 = "OperationName" + +// InputService3TestCaseOperation1Request generates a request for the InputService3TestCaseOperation1 operation. +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputShape) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService3TestCaseOperation1, + } + + if input == nil { + input = &InputService3TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService3TestShapeInputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) { + req, out := c.InputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService3TestCaseOperation2 = "OperationName" + +// InputService3TestCaseOperation2Request generates a request for the InputService3TestCaseOperation2 operation. +func (c *InputService3ProtocolTest) InputService3TestCaseOperation2Request(input *InputService3TestShapeInputShape) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService3TestCaseOperation2, + } + + if input == nil { + input = &InputService3TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService3TestShapeInputService3TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation2(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation2Output, error) { + req, out := c.InputService3TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService3TestShapeInputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService3TestShapeInputService3TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService3TestShapeInputShape struct { + _ struct{} `type:"structure"` + + ListArg []*string `type:"list"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService4ProtocolTest client from just a session. +// svc := inputservice4protocoltest.New(mySession) +// +// // Create a InputService4ProtocolTest client with additional configuration +// svc := inputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService4ProtocolTest { + c := p.ClientConfig("inputservice4protocoltest", cfgs...) + return newInputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService4ProtocolTest { + svc := &InputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService4TestCaseOperation1 = "OperationName" + +// InputService4TestCaseOperation1Request generates a request for the InputService4TestCaseOperation1 operation. +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputShape) (req *request.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService4TestCaseOperation1, + } + + if input == nil { + input = &InputService4TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService4TestShapeInputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputShape) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) { + req, out := c.InputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService4TestCaseOperation2 = "OperationName" + +// InputService4TestCaseOperation2Request generates a request for the InputService4TestCaseOperation2 operation. +func (c *InputService4ProtocolTest) InputService4TestCaseOperation2Request(input *InputService4TestShapeInputShape) (req *request.Request, output *InputService4TestShapeInputService4TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService4TestCaseOperation2, + } + + if input == nil { + input = &InputService4TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService4TestShapeInputService4TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService4ProtocolTest) InputService4TestCaseOperation2(input *InputService4TestShapeInputShape) (*InputService4TestShapeInputService4TestCaseOperation2Output, error) { + req, out := c.InputService4TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService4TestShapeInputService4TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService4TestShapeInputService4TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService4TestShapeInputShape struct { + _ struct{} `type:"structure"` + + ListArg []*string `type:"list" flattened:"true"` + + NamedListArg []*string `locationNameList:"Foo" type:"list" flattened:"true"` + + ScalarArg *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService5ProtocolTest client from just a session. +// svc := inputservice5protocoltest.New(mySession) +// +// // Create a InputService5ProtocolTest client with additional configuration +// svc := inputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService5ProtocolTest { + c := p.ClientConfig("inputservice5protocoltest", cfgs...) + return newInputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService5ProtocolTest { + svc := &InputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService5TestCaseOperation1 = "OperationName" + +// InputService5TestCaseOperation1Request generates a request for the InputService5TestCaseOperation1 operation. +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputService5TestCaseOperation1Input) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation1, + } + + if input == nil { + input = &InputService5TestShapeInputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService5TestShapeInputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputService5TestCaseOperation1Input) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) { + req, out := c.InputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService5TestShapeInputService5TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + MapArg map[string]*string `type:"map" flattened:"true"` +} + +type InputService5TestShapeInputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService6ProtocolTest client from just a session. +// svc := inputservice6protocoltest.New(mySession) +// +// // Create a InputService6ProtocolTest client with additional configuration +// svc := inputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService6ProtocolTest { + c := p.ClientConfig("inputservice6protocoltest", cfgs...) + return newInputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService6ProtocolTest { + svc := &InputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService6TestCaseOperation1 = "OperationName" + +// InputService6TestCaseOperation1Request generates a request for the InputService6TestCaseOperation1 operation. +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputService6TestCaseOperation1Input) (req *request.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService6TestCaseOperation1, + } + + if input == nil { + input = &InputService6TestShapeInputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService6TestShapeInputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputService6TestCaseOperation1Input) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) { + req, out := c.InputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService6TestShapeInputService6TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + ListArg []*string `locationNameList:"item" type:"list"` +} + +type InputService6TestShapeInputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService7ProtocolTest client from just a session. +// svc := inputservice7protocoltest.New(mySession) +// +// // Create a InputService7ProtocolTest client with additional configuration +// svc := inputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService7ProtocolTest { + c := p.ClientConfig("inputservice7protocoltest", cfgs...) + return newInputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService7ProtocolTest { + svc := &InputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService7TestCaseOperation1 = "OperationName" + +// InputService7TestCaseOperation1Request generates a request for the InputService7TestCaseOperation1 operation. +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputService7TestCaseOperation1Input) (req *request.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService7TestCaseOperation1, + } + + if input == nil { + input = &InputService7TestShapeInputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService7TestShapeInputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputService7TestCaseOperation1Input) (*InputService7TestShapeInputService7TestCaseOperation1Output, error) { + req, out := c.InputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService7TestShapeInputService7TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + ListArg []*string `locationNameList:"ListArgLocation" type:"list" flattened:"true"` + + ScalarArg *string `type:"string"` +} + +type InputService7TestShapeInputService7TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService8ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService8ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService8ProtocolTest client from just a session. +// svc := inputservice8protocoltest.New(mySession) +// +// // Create a InputService8ProtocolTest client with additional configuration +// svc := inputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService8ProtocolTest { + c := p.ClientConfig("inputservice8protocoltest", cfgs...) + return newInputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService8ProtocolTest { + svc := &InputService8ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice8protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService8TestCaseOperation1 = "OperationName" + +// InputService8TestCaseOperation1Request generates a request for the InputService8TestCaseOperation1 operation. +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1Request(input *InputService8TestShapeInputService8TestCaseOperation1Input) (req *request.Request, output *InputService8TestShapeInputService8TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService8TestCaseOperation1, + } + + if input == nil { + input = &InputService8TestShapeInputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService8TestShapeInputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1(input *InputService8TestShapeInputService8TestCaseOperation1Input) (*InputService8TestShapeInputService8TestCaseOperation1Output, error) { + req, out := c.InputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService8TestShapeInputService8TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + MapArg map[string]*string `type:"map"` +} + +type InputService8TestShapeInputService8TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService9ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService9ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService9ProtocolTest client from just a session. +// svc := inputservice9protocoltest.New(mySession) +// +// // Create a InputService9ProtocolTest client with additional configuration +// svc := inputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService9ProtocolTest { + c := p.ClientConfig("inputservice9protocoltest", cfgs...) + return newInputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService9ProtocolTest { + svc := &InputService9ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice9protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService9ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService9TestCaseOperation1 = "OperationName" + +// InputService9TestCaseOperation1Request generates a request for the InputService9TestCaseOperation1 operation. +func (c *InputService9ProtocolTest) InputService9TestCaseOperation1Request(input *InputService9TestShapeInputService9TestCaseOperation1Input) (req *request.Request, output *InputService9TestShapeInputService9TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService9TestCaseOperation1, + } + + if input == nil { + input = &InputService9TestShapeInputService9TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService9TestShapeInputService9TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService9ProtocolTest) InputService9TestCaseOperation1(input *InputService9TestShapeInputService9TestCaseOperation1Input) (*InputService9TestShapeInputService9TestCaseOperation1Output, error) { + req, out := c.InputService9TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService9TestShapeInputService9TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + MapArg map[string]*string `locationNameKey:"TheKey" locationNameValue:"TheValue" type:"map"` +} + +type InputService9TestShapeInputService9TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService10ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService10ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService10ProtocolTest client from just a session. +// svc := inputservice10protocoltest.New(mySession) +// +// // Create a InputService10ProtocolTest client with additional configuration +// svc := inputservice10protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService10ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService10ProtocolTest { + c := p.ClientConfig("inputservice10protocoltest", cfgs...) + return newInputService10ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService10ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService10ProtocolTest { + svc := &InputService10ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice10protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService10ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService10ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService10TestCaseOperation1 = "OperationName" + +// InputService10TestCaseOperation1Request generates a request for the InputService10TestCaseOperation1 operation. +func (c *InputService10ProtocolTest) InputService10TestCaseOperation1Request(input *InputService10TestShapeInputService10TestCaseOperation1Input) (req *request.Request, output *InputService10TestShapeInputService10TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService10TestCaseOperation1, + } + + if input == nil { + input = &InputService10TestShapeInputService10TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService10TestShapeInputService10TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService10ProtocolTest) InputService10TestCaseOperation1(input *InputService10TestShapeInputService10TestCaseOperation1Input) (*InputService10TestShapeInputService10TestCaseOperation1Output, error) { + req, out := c.InputService10TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService10TestShapeInputService10TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + BlobArg []byte `type:"blob"` +} + +type InputService10TestShapeInputService10TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService11ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService11ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService11ProtocolTest client from just a session. +// svc := inputservice11protocoltest.New(mySession) +// +// // Create a InputService11ProtocolTest client with additional configuration +// svc := inputservice11protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService11ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService11ProtocolTest { + c := p.ClientConfig("inputservice11protocoltest", cfgs...) + return newInputService11ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService11ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService11ProtocolTest { + svc := &InputService11ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice11protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService11ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService11TestCaseOperation1 = "OperationName" + +// InputService11TestCaseOperation1Request generates a request for the InputService11TestCaseOperation1 operation. +func (c *InputService11ProtocolTest) InputService11TestCaseOperation1Request(input *InputService11TestShapeInputService11TestCaseOperation1Input) (req *request.Request, output *InputService11TestShapeInputService11TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService11TestCaseOperation1, + } + + if input == nil { + input = &InputService11TestShapeInputService11TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService11TestShapeInputService11TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService11ProtocolTest) InputService11TestCaseOperation1(input *InputService11TestShapeInputService11TestCaseOperation1Input) (*InputService11TestShapeInputService11TestCaseOperation1Output, error) { + req, out := c.InputService11TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService11TestShapeInputService11TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + TimeArg *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +type InputService11TestShapeInputService11TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService12ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService12ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService12ProtocolTest client from just a session. +// svc := inputservice12protocoltest.New(mySession) +// +// // Create a InputService12ProtocolTest client with additional configuration +// svc := inputservice12protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService12ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService12ProtocolTest { + c := p.ClientConfig("inputservice12protocoltest", cfgs...) + return newInputService12ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService12ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService12ProtocolTest { + svc := &InputService12ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice12protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService12ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService12ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService12TestCaseOperation1 = "OperationName" + +// InputService12TestCaseOperation1Request generates a request for the InputService12TestCaseOperation1 operation. +func (c *InputService12ProtocolTest) InputService12TestCaseOperation1Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation1, + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService12TestShapeInputService12TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation1(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation1Output, error) { + req, out := c.InputService12TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService12TestCaseOperation2 = "OperationName" + +// InputService12TestCaseOperation2Request generates a request for the InputService12TestCaseOperation2 operation. +func (c *InputService12ProtocolTest) InputService12TestCaseOperation2Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation2, + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService12TestShapeInputService12TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation2(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation2Output, error) { + req, out := c.InputService12TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +const opInputService12TestCaseOperation3 = "OperationName" + +// InputService12TestCaseOperation3Request generates a request for the InputService12TestCaseOperation3 operation. +func (c *InputService12ProtocolTest) InputService12TestCaseOperation3Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation3Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation3, + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService12TestShapeInputService12TestCaseOperation3Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation3(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation3Output, error) { + req, out := c.InputService12TestCaseOperation3Request(input) + err := req.Send() + return out, err +} + +const opInputService12TestCaseOperation4 = "OperationName" + +// InputService12TestCaseOperation4Request generates a request for the InputService12TestCaseOperation4 operation. +func (c *InputService12ProtocolTest) InputService12TestCaseOperation4Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation4Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation4, + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService12TestShapeInputService12TestCaseOperation4Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation4(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation4Output, error) { + req, out := c.InputService12TestCaseOperation4Request(input) + err := req.Send() + return out, err +} + +const opInputService12TestCaseOperation5 = "OperationName" + +// InputService12TestCaseOperation5Request generates a request for the InputService12TestCaseOperation5 operation. +func (c *InputService12ProtocolTest) InputService12TestCaseOperation5Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation5Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation5, + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService12TestShapeInputService12TestCaseOperation5Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation5(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation5Output, error) { + req, out := c.InputService12TestCaseOperation5Request(input) + err := req.Send() + return out, err +} + +const opInputService12TestCaseOperation6 = "OperationName" + +// InputService12TestCaseOperation6Request generates a request for the InputService12TestCaseOperation6 operation. +func (c *InputService12ProtocolTest) InputService12TestCaseOperation6Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation6Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation6, + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService12TestShapeInputService12TestCaseOperation6Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation6(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation6Output, error) { + req, out := c.InputService12TestCaseOperation6Request(input) + err := req.Send() + return out, err +} + +type InputService12TestShapeInputService12TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService12TestShapeInputService12TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService12TestShapeInputService12TestCaseOperation3Output struct { + _ struct{} `type:"structure"` +} + +type InputService12TestShapeInputService12TestCaseOperation4Output struct { + _ struct{} `type:"structure"` +} + +type InputService12TestShapeInputService12TestCaseOperation5Output struct { + _ struct{} `type:"structure"` +} + +type InputService12TestShapeInputService12TestCaseOperation6Output struct { + _ struct{} `type:"structure"` +} + +type InputService12TestShapeInputShape struct { + _ struct{} `type:"structure"` + + RecursiveStruct *InputService12TestShapeRecursiveStructType `type:"structure"` +} + +type InputService12TestShapeRecursiveStructType struct { + _ struct{} `type:"structure"` + + NoRecurse *string `type:"string"` + + RecursiveList []*InputService12TestShapeRecursiveStructType `type:"list"` + + RecursiveMap map[string]*InputService12TestShapeRecursiveStructType `type:"map"` + + RecursiveStruct *InputService12TestShapeRecursiveStructType `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService13ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService13ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService13ProtocolTest client from just a session. +// svc := inputservice13protocoltest.New(mySession) +// +// // Create a InputService13ProtocolTest client with additional configuration +// svc := inputservice13protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService13ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService13ProtocolTest { + c := p.ClientConfig("inputservice13protocoltest", cfgs...) + return newInputService13ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService13ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService13ProtocolTest { + svc := &InputService13ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice13protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService13ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService13ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService13TestCaseOperation1 = "OperationName" + +// InputService13TestCaseOperation1Request generates a request for the InputService13TestCaseOperation1 operation. +func (c *InputService13ProtocolTest) InputService13TestCaseOperation1Request(input *InputService13TestShapeInputShape) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService13TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService13TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService13TestShapeInputService13TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService13ProtocolTest) InputService13TestCaseOperation1(input *InputService13TestShapeInputShape) (*InputService13TestShapeInputService13TestCaseOperation1Output, error) { + req, out := c.InputService13TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService13TestCaseOperation2 = "OperationName" + +// InputService13TestCaseOperation2Request generates a request for the InputService13TestCaseOperation2 operation. +func (c *InputService13ProtocolTest) InputService13TestCaseOperation2Request(input *InputService13TestShapeInputShape) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService13TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService13TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService13TestShapeInputService13TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService13ProtocolTest) InputService13TestCaseOperation2(input *InputService13TestShapeInputShape) (*InputService13TestShapeInputService13TestCaseOperation2Output, error) { + req, out := c.InputService13TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService13TestShapeInputService13TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService13TestShapeInputService13TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService13TestShapeInputShape struct { + _ struct{} `type:"structure"` + + Token *string `type:"string" idempotencyToken:"true"` +} + +// +// Tests begin here +// + +func TestInputService1ProtocolTestScalarMembersCase1(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService1TestShapeInputShape{ + Bar: aws.String("val2"), + Foo: aws.String("val1"), + } + req, _ := svc.InputService1TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&Bar=val2&Foo=val1&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService1ProtocolTestScalarMembersCase2(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService1TestShapeInputShape{ + Baz: aws.Bool(true), + } + req, _ := svc.InputService1TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&Baz=true&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService1ProtocolTestScalarMembersCase3(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService1TestShapeInputShape{ + Baz: aws.Bool(false), + } + req, _ := svc.InputService1TestCaseOperation3Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&Baz=false&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService2ProtocolTestNestedStructureMembersCase1(t *testing.T) { + sess := session.New() + svc := NewInputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService2TestShapeInputService2TestCaseOperation1Input{ + StructArg: &InputService2TestShapeStructType{ + ScalarArg: aws.String("foo"), + }, + } + req, _ := svc.InputService2TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&StructArg.ScalarArg=foo&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService3ProtocolTestListTypesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService3TestShapeInputShape{ + ListArg: []*string{ + aws.String("foo"), + aws.String("bar"), + aws.String("baz"), + }, + } + req, _ := svc.InputService3TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListArg.member.1=foo&ListArg.member.2=bar&ListArg.member.3=baz&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService3ProtocolTestListTypesCase2(t *testing.T) { + sess := session.New() + svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService3TestShapeInputShape{ + ListArg: []*string{}, + } + req, _ := svc.InputService3TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListArg=&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService4ProtocolTestFlattenedListCase1(t *testing.T) { + sess := session.New() + svc := NewInputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService4TestShapeInputShape{ + ListArg: []*string{ + aws.String("a"), + aws.String("b"), + aws.String("c"), + }, + ScalarArg: aws.String("foo"), + } + req, _ := svc.InputService4TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListArg.1=a&ListArg.2=b&ListArg.3=c&ScalarArg=foo&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService4ProtocolTestFlattenedListCase2(t *testing.T) { + sess := session.New() + svc := NewInputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService4TestShapeInputShape{ + NamedListArg: []*string{ + aws.String("a"), + }, + } + req, _ := svc.InputService4TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&Foo.1=a&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService5ProtocolTestSerializeFlattenedMapTypeCase1(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService5TestShapeInputService5TestCaseOperation1Input{ + MapArg: map[string]*string{ + "key1": aws.String("val1"), + "key2": aws.String("val2"), + }, + } + req, _ := svc.InputService5TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&MapArg.1.key=key1&MapArg.1.value=val1&MapArg.2.key=key2&MapArg.2.value=val2&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService6ProtocolTestNonFlattenedListWithLocationNameCase1(t *testing.T) { + sess := session.New() + svc := NewInputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService6TestShapeInputService6TestCaseOperation1Input{ + ListArg: []*string{ + aws.String("a"), + aws.String("b"), + aws.String("c"), + }, + } + req, _ := svc.InputService6TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListArg.item.1=a&ListArg.item.2=b&ListArg.item.3=c&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService7ProtocolTestFlattenedListWithLocationNameCase1(t *testing.T) { + sess := session.New() + svc := NewInputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService7TestShapeInputService7TestCaseOperation1Input{ + ListArg: []*string{ + aws.String("a"), + aws.String("b"), + aws.String("c"), + }, + ScalarArg: aws.String("foo"), + } + req, _ := svc.InputService7TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&ListArgLocation.1=a&ListArgLocation.2=b&ListArgLocation.3=c&ScalarArg=foo&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService8ProtocolTestSerializeMapTypeCase1(t *testing.T) { + sess := session.New() + svc := NewInputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService8TestShapeInputService8TestCaseOperation1Input{ + MapArg: map[string]*string{ + "key1": aws.String("val1"), + "key2": aws.String("val2"), + }, + } + req, _ := svc.InputService8TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&MapArg.entry.1.key=key1&MapArg.entry.1.value=val1&MapArg.entry.2.key=key2&MapArg.entry.2.value=val2&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService9ProtocolTestSerializeMapTypeWithLocationNameCase1(t *testing.T) { + sess := session.New() + svc := NewInputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService9TestShapeInputService9TestCaseOperation1Input{ + MapArg: map[string]*string{ + "key1": aws.String("val1"), + "key2": aws.String("val2"), + }, + } + req, _ := svc.InputService9TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&MapArg.entry.1.TheKey=key1&MapArg.entry.1.TheValue=val1&MapArg.entry.2.TheKey=key2&MapArg.entry.2.TheValue=val2&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService10ProtocolTestBase64EncodedBlobsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService10ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService10TestShapeInputService10TestCaseOperation1Input{ + BlobArg: []byte("foo"), + } + req, _ := svc.InputService10TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&BlobArg=Zm9v&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService11ProtocolTestTimestampValuesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService11ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService11TestShapeInputService11TestCaseOperation1Input{ + TimeArg: aws.Time(time.Unix(1422172800, 0)), + } + req, _ := svc.InputService11TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&TimeArg=2015-01-25T08%3A00%3A00Z&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestRecursiveShapesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService12TestShapeInputShape{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + } + req, _ := svc.InputService12TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.NoRecurse=foo&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestRecursiveShapesCase2(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService12TestShapeInputShape{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + } + req, _ := svc.InputService12TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveStruct.NoRecurse=foo&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestRecursiveShapesCase3(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService12TestShapeInputShape{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + }, + }, + } + req, _ := svc.InputService12TestCaseOperation3Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveStruct.RecursiveStruct.RecursiveStruct.NoRecurse=foo&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestRecursiveShapesCase4(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService12TestShapeInputShape{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveList: []*InputService12TestShapeRecursiveStructType{ + { + NoRecurse: aws.String("foo"), + }, + { + NoRecurse: aws.String("bar"), + }, + }, + }, + } + req, _ := svc.InputService12TestCaseOperation4Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveList.member.1.NoRecurse=foo&RecursiveStruct.RecursiveList.member.2.NoRecurse=bar&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestRecursiveShapesCase5(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService12TestShapeInputShape{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveList: []*InputService12TestShapeRecursiveStructType{ + { + NoRecurse: aws.String("foo"), + }, + { + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + NoRecurse: aws.String("bar"), + }, + }, + }, + }, + } + req, _ := svc.InputService12TestCaseOperation5Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveList.member.1.NoRecurse=foo&RecursiveStruct.RecursiveList.member.2.RecursiveStruct.NoRecurse=bar&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestRecursiveShapesCase6(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService12TestShapeInputShape{ + RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveMap: map[string]*InputService12TestShapeRecursiveStructType{ + "bar": { + NoRecurse: aws.String("bar"), + }, + "foo": { + NoRecurse: aws.String("foo"), + }, + }, + }, + } + req, _ := svc.InputService12TestCaseOperation6Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveMap.entry.1.key=foo&RecursiveStruct.RecursiveMap.entry.1.value.NoRecurse=foo&RecursiveStruct.RecursiveMap.entry.2.key=bar&RecursiveStruct.RecursiveMap.entry.2.value.NoRecurse=bar&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService13ProtocolTestIdempotencyTokenAutoFillCase1(t *testing.T) { + sess := session.New() + svc := NewInputService13ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService13TestShapeInputShape{ + Token: aws.String("abc123"), + } + req, _ := svc.InputService13TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Token=abc123`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService13ProtocolTestIdempotencyTokenAutoFillCase2(t *testing.T) { + sess := session.New() + svc := NewInputService13ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService13TestShapeInputShape{} + req, _ := svc.InputService13TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Token=00000000-0000-4000-8000-000000000000`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,230 @@ +package queryutil + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// Parse parses an object i and fills a url.Values object. The isEC2 flag +// indicates if this is the EC2 Query sub-protocol. +func Parse(body url.Values, i interface{}, isEC2 bool) error { + q := queryParser{isEC2: isEC2} + return q.parseValue(body, reflect.ValueOf(i), "", "") +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type queryParser struct { + isEC2 bool +} + +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + value = elemOf(value) + + // no need to handle zero values + if !value.IsValid() { + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + return q.parseStruct(v, value, prefix) + case "list": + return q.parseList(v, value, prefix, tag) + case "map": + return q.parseMap(v, value, prefix, tag) + default: + return q.parseScalar(v, value, prefix, tag) + } +} + +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { + if !value.IsValid() { + return nil + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + elemValue := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + elemValue = reflect.ValueOf(token) + } + + var name string + if q.isEC2 { + name = field.Tag.Get("queryName") + } + if name == "" { + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if name != "" && q.isEC2 { + name = strings.ToUpper(name[0:1]) + name[1:] + } + } + if name == "" { + name = field.Name + } + + if prefix != "" { + name = prefix + "." + name + } + + if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".member" + } + + for i := 0; i < value.Len(); i++ { + slicePrefix := prefix + if slicePrefix == "" { + slicePrefix = strconv.Itoa(i + 1) + } else { + slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) + } + if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".entry" + } + + // sort keys for improved serialization consistency. + // this is not strictly necessary for protocol support. + mapKeyValues := value.MapKeys() + mapKeys := map[string]reflect.Value{} + mapKeyNames := make([]string, len(mapKeyValues)) + for i, mapKey := range mapKeyValues { + name := mapKey.String() + mapKeys[name] = mapKey + mapKeyNames[i] = name + } + sort.Strings(mapKeyNames) + + for i, mapKeyName := range mapKeyNames { + mapKey := mapKeys[mapKeyName] + mapValue := value.MapIndex(mapKey) + + kname := tag.Get("locationNameKey") + if kname == "" { + kname = "key" + } + vname := tag.Get("locationNameValue") + if vname == "" { + vname = "value" + } + + // serialize key + var keyName string + if prefix == "" { + keyName = strconv.Itoa(i+1) + "." + kname + } else { + keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname + } + + if err := q.parseValue(v, mapKey, keyName, ""); err != nil { + return err + } + + // serialize value + var valueName string + if prefix == "" { + valueName = strconv.Itoa(i+1) + "." + vname + } else { + valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname + } + + if err := q.parseValue(v, mapValue, valueName, ""); err != nil { + return err + } + } + + return nil +} + +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { + switch value := r.Interface().(type) { + case string: + v.Set(name, value) + case []byte: + if !r.IsNil() { + v.Set(name, base64.StdEncoding.EncodeToString(value)) + } + case bool: + v.Set(name, strconv.FormatBool(value)) + case int64: + v.Set(name, strconv.FormatInt(value, 10)) + case int: + v.Set(name, strconv.Itoa(value)) + case float64: + v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + case float32: + v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + v.Set(name, value.UTC().Format(ISO8601UTC)) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) + } + return nil +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,40 @@ +package query + +import ( + "encoding/xml" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"ErrorResponse"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + +// UnmarshalError unmarshals an error response for an AWS Query service. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + resp := &xmlErrorResponse{} + err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) + if err != nil && err != io.EOF { + r.Error = awserr.New("SerializationError", "failed to decode query XML error response", err) + } else { + reqID := resp.RequestID + if reqID == "" { + reqID = r.RequestID + } + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + reqID, + ) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,35 @@ +package query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalHandler is a named request handler for unmarshaling query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals a response for an AWS Query service. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = awserr.New("SerializationError", "failed decoding Query response", err) + return + } + } +} + +// UnmarshalMeta unmarshals header response values for an AWS Query service. +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1750 @@ +package query_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/signer/v4" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String + +func init() { + protocol.RandReader = &awstesting.ZeroReader{} +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService1ProtocolTest client from just a session. +// svc := outputservice1protocoltest.New(mySession) +// +// // Create a OutputService1ProtocolTest client with additional configuration +// svc := outputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService1ProtocolTest { + c := p.ClientConfig("outputservice1protocoltest", cfgs...) + return newOutputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService1ProtocolTest { + svc := &OutputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService1TestCaseOperation1 = "OperationName" + +// OutputService1TestCaseOperation1Request generates a request for the OutputService1TestCaseOperation1 operation. +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *request.Request, output *OutputService1TestShapeOutputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService1TestCaseOperation1, + } + + if input == nil { + input = &OutputService1TestShapeOutputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService1TestShapeOutputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (*OutputService1TestShapeOutputService1TestCaseOperation1Output, error) { + req, out := c.OutputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Char *string `type:"character"` + + Double *float64 `type:"double"` + + FalseBool *bool `type:"boolean"` + + Float *float64 `type:"float"` + + Long *int64 `type:"long"` + + Num *int64 `locationName:"FooNum" type:"integer"` + + Str *string `type:"string"` + + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + TrueBool *bool `type:"boolean"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService2ProtocolTest client from just a session. +// svc := outputservice2protocoltest.New(mySession) +// +// // Create a OutputService2ProtocolTest client with additional configuration +// svc := outputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService2ProtocolTest { + c := p.ClientConfig("outputservice2protocoltest", cfgs...) + return newOutputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService2ProtocolTest { + svc := &OutputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService2TestCaseOperation1 = "OperationName" + +// OutputService2TestCaseOperation1Request generates a request for the OutputService2TestCaseOperation1 operation. +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *request.Request, output *OutputService2TestShapeOutputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService2TestCaseOperation1, + } + + if input == nil { + input = &OutputService2TestShapeOutputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService2TestShapeOutputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (*OutputService2TestShapeOutputService2TestCaseOperation1Output, error) { + req, out := c.OutputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Num *int64 `type:"integer"` + + Str *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService3ProtocolTest client from just a session. +// svc := outputservice3protocoltest.New(mySession) +// +// // Create a OutputService3ProtocolTest client with additional configuration +// svc := outputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService3ProtocolTest { + c := p.ClientConfig("outputservice3protocoltest", cfgs...) + return newOutputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService3ProtocolTest { + svc := &OutputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService3TestCaseOperation1 = "OperationName" + +// OutputService3TestCaseOperation1Request generates a request for the OutputService3TestCaseOperation1 operation. +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *request.Request, output *OutputService3TestShapeOutputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService3TestCaseOperation1, + } + + if input == nil { + input = &OutputService3TestShapeOutputService3TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService3TestShapeOutputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (*OutputService3TestShapeOutputService3TestCaseOperation1Output, error) { + req, out := c.OutputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Blob []byte `type:"blob"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService4ProtocolTest client from just a session. +// svc := outputservice4protocoltest.New(mySession) +// +// // Create a OutputService4ProtocolTest client with additional configuration +// svc := outputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService4ProtocolTest { + c := p.ClientConfig("outputservice4protocoltest", cfgs...) + return newOutputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService4ProtocolTest { + svc := &OutputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService4TestCaseOperation1 = "OperationName" + +// OutputService4TestCaseOperation1Request generates a request for the OutputService4TestCaseOperation1 operation. +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *request.Request, output *OutputService4TestShapeOutputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService4TestCaseOperation1, + } + + if input == nil { + input = &OutputService4TestShapeOutputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService4TestShapeOutputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (*OutputService4TestShapeOutputService4TestCaseOperation1Output, error) { + req, out := c.OutputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `type:"list"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService5ProtocolTest client from just a session. +// svc := outputservice5protocoltest.New(mySession) +// +// // Create a OutputService5ProtocolTest client with additional configuration +// svc := outputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService5ProtocolTest { + c := p.ClientConfig("outputservice5protocoltest", cfgs...) + return newOutputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService5ProtocolTest { + svc := &OutputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService5TestCaseOperation1 = "OperationName" + +// OutputService5TestCaseOperation1Request generates a request for the OutputService5TestCaseOperation1 operation. +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *request.Request, output *OutputService5TestShapeOutputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService5TestCaseOperation1, + } + + if input == nil { + input = &OutputService5TestShapeOutputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService5TestShapeOutputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (*OutputService5TestShapeOutputService5TestCaseOperation1Output, error) { + req, out := c.OutputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `locationNameList:"item" type:"list"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService6ProtocolTest client from just a session. +// svc := outputservice6protocoltest.New(mySession) +// +// // Create a OutputService6ProtocolTest client with additional configuration +// svc := outputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService6ProtocolTest { + c := p.ClientConfig("outputservice6protocoltest", cfgs...) + return newOutputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService6ProtocolTest { + svc := &OutputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService6TestCaseOperation1 = "OperationName" + +// OutputService6TestCaseOperation1Request generates a request for the OutputService6TestCaseOperation1 operation. +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *request.Request, output *OutputService6TestShapeOutputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService6TestCaseOperation1, + } + + if input == nil { + input = &OutputService6TestShapeOutputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService6TestShapeOutputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (*OutputService6TestShapeOutputService6TestCaseOperation1Output, error) { + req, out := c.OutputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `type:"list" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService7ProtocolTest client from just a session. +// svc := outputservice7protocoltest.New(mySession) +// +// // Create a OutputService7ProtocolTest client with additional configuration +// svc := outputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService7ProtocolTest { + c := p.ClientConfig("outputservice7protocoltest", cfgs...) + return newOutputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService7ProtocolTest { + svc := &OutputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService7TestCaseOperation1 = "OperationName" + +// OutputService7TestCaseOperation1Request generates a request for the OutputService7TestCaseOperation1 operation. +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1Request(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (req *request.Request, output *OutputService7TestShapeOutputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService7TestCaseOperation1, + } + + if input == nil { + input = &OutputService7TestShapeOutputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService7TestShapeOutputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (*OutputService7TestShapeOutputService7TestCaseOperation1Output, error) { + req, out := c.OutputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `type:"list" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService8ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService8ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService8ProtocolTest client from just a session. +// svc := outputservice8protocoltest.New(mySession) +// +// // Create a OutputService8ProtocolTest client with additional configuration +// svc := outputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService8ProtocolTest { + c := p.ClientConfig("outputservice8protocoltest", cfgs...) + return newOutputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService8ProtocolTest { + svc := &OutputService8ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice8protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService8TestCaseOperation1 = "OperationName" + +// OutputService8TestCaseOperation1Request generates a request for the OutputService8TestCaseOperation1 operation. +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1Request(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (req *request.Request, output *OutputService8TestShapeOutputService8TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService8TestCaseOperation1, + } + + if input == nil { + input = &OutputService8TestShapeOutputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService8TestShapeOutputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (*OutputService8TestShapeOutputService8TestCaseOperation1Output, error) { + req, out := c.OutputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + List []*OutputService8TestShapeStructureShape `type:"list"` +} + +type OutputService8TestShapeStructureShape struct { + _ struct{} `type:"structure"` + + Bar *string `type:"string"` + + Baz *string `type:"string"` + + Foo *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService9ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService9ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService9ProtocolTest client from just a session. +// svc := outputservice9protocoltest.New(mySession) +// +// // Create a OutputService9ProtocolTest client with additional configuration +// svc := outputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService9ProtocolTest { + c := p.ClientConfig("outputservice9protocoltest", cfgs...) + return newOutputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService9ProtocolTest { + svc := &OutputService9ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice9protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService9ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService9TestCaseOperation1 = "OperationName" + +// OutputService9TestCaseOperation1Request generates a request for the OutputService9TestCaseOperation1 operation. +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1Request(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (req *request.Request, output *OutputService9TestShapeOutputService9TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService9TestCaseOperation1, + } + + if input == nil { + input = &OutputService9TestShapeOutputService9TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService9TestShapeOutputService9TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (*OutputService9TestShapeOutputService9TestCaseOperation1Output, error) { + req, out := c.OutputService9TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + List []*OutputService9TestShapeStructureShape `type:"list" flattened:"true"` +} + +type OutputService9TestShapeStructureShape struct { + _ struct{} `type:"structure"` + + Bar *string `type:"string"` + + Baz *string `type:"string"` + + Foo *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService10ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService10ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService10ProtocolTest client from just a session. +// svc := outputservice10protocoltest.New(mySession) +// +// // Create a OutputService10ProtocolTest client with additional configuration +// svc := outputservice10protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService10ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService10ProtocolTest { + c := p.ClientConfig("outputservice10protocoltest", cfgs...) + return newOutputService10ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService10ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService10ProtocolTest { + svc := &OutputService10ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice10protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService10ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService10ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService10TestCaseOperation1 = "OperationName" + +// OutputService10TestCaseOperation1Request generates a request for the OutputService10TestCaseOperation1 operation. +func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1Request(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (req *request.Request, output *OutputService10TestShapeOutputService10TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService10TestCaseOperation1, + } + + if input == nil { + input = &OutputService10TestShapeOutputService10TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService10TestShapeOutputService10TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (*OutputService10TestShapeOutputService10TestCaseOperation1Output, error) { + req, out := c.OutputService10TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService10TestShapeOutputService10TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService10TestShapeOutputService10TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + List []*string `locationNameList:"NamedList" type:"list" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService11ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService11ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService11ProtocolTest client from just a session. +// svc := outputservice11protocoltest.New(mySession) +// +// // Create a OutputService11ProtocolTest client with additional configuration +// svc := outputservice11protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService11ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService11ProtocolTest { + c := p.ClientConfig("outputservice11protocoltest", cfgs...) + return newOutputService11ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService11ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService11ProtocolTest { + svc := &OutputService11ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice11protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService11ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService11TestCaseOperation1 = "OperationName" + +// OutputService11TestCaseOperation1Request generates a request for the OutputService11TestCaseOperation1 operation. +func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1Request(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (req *request.Request, output *OutputService11TestShapeOutputService11TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService11TestCaseOperation1, + } + + if input == nil { + input = &OutputService11TestShapeOutputService11TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService11TestShapeOutputService11TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (*OutputService11TestShapeOutputService11TestCaseOperation1Output, error) { + req, out := c.OutputService11TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService11TestShapeOutputService11TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService11TestShapeOutputService11TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*OutputService11TestShapeStructType `type:"map"` +} + +type OutputService11TestShapeStructType struct { + _ struct{} `type:"structure"` + + Foo *string `locationName:"foo" type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService12ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService12ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService12ProtocolTest client from just a session. +// svc := outputservice12protocoltest.New(mySession) +// +// // Create a OutputService12ProtocolTest client with additional configuration +// svc := outputservice12protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService12ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService12ProtocolTest { + c := p.ClientConfig("outputservice12protocoltest", cfgs...) + return newOutputService12ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService12ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService12ProtocolTest { + svc := &OutputService12ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice12protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService12ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService12ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService12TestCaseOperation1 = "OperationName" + +// OutputService12TestCaseOperation1Request generates a request for the OutputService12TestCaseOperation1 operation. +func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1Request(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (req *request.Request, output *OutputService12TestShapeOutputService12TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService12TestCaseOperation1, + } + + if input == nil { + input = &OutputService12TestShapeOutputService12TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService12TestShapeOutputService12TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (*OutputService12TestShapeOutputService12TestCaseOperation1Output, error) { + req, out := c.OutputService12TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService12TestShapeOutputService12TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService12TestShapeOutputService12TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*string `type:"map" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService13ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService13ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService13ProtocolTest client from just a session. +// svc := outputservice13protocoltest.New(mySession) +// +// // Create a OutputService13ProtocolTest client with additional configuration +// svc := outputservice13protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService13ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService13ProtocolTest { + c := p.ClientConfig("outputservice13protocoltest", cfgs...) + return newOutputService13ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService13ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService13ProtocolTest { + svc := &OutputService13ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice13protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService13ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService13ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService13TestCaseOperation1 = "OperationName" + +// OutputService13TestCaseOperation1Request generates a request for the OutputService13TestCaseOperation1 operation. +func (c *OutputService13ProtocolTest) OutputService13TestCaseOperation1Request(input *OutputService13TestShapeOutputService13TestCaseOperation1Input) (req *request.Request, output *OutputService13TestShapeOutputService13TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService13TestCaseOperation1, + } + + if input == nil { + input = &OutputService13TestShapeOutputService13TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService13TestShapeOutputService13TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService13ProtocolTest) OutputService13TestCaseOperation1(input *OutputService13TestShapeOutputService13TestCaseOperation1Input) (*OutputService13TestShapeOutputService13TestCaseOperation1Output, error) { + req, out := c.OutputService13TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService13TestShapeOutputService13TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService13TestShapeOutputService13TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService14ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService14ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService14ProtocolTest client from just a session. +// svc := outputservice14protocoltest.New(mySession) +// +// // Create a OutputService14ProtocolTest client with additional configuration +// svc := outputservice14protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService14ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService14ProtocolTest { + c := p.ClientConfig("outputservice14protocoltest", cfgs...) + return newOutputService14ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService14ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService14ProtocolTest { + svc := &OutputService14ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice14protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService14ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService14ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService14TestCaseOperation1 = "OperationName" + +// OutputService14TestCaseOperation1Request generates a request for the OutputService14TestCaseOperation1 operation. +func (c *OutputService14ProtocolTest) OutputService14TestCaseOperation1Request(input *OutputService14TestShapeOutputService14TestCaseOperation1Input) (req *request.Request, output *OutputService14TestShapeOutputService14TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService14TestCaseOperation1, + } + + if input == nil { + input = &OutputService14TestShapeOutputService14TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService14TestShapeOutputService14TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService14ProtocolTest) OutputService14TestCaseOperation1(input *OutputService14TestShapeOutputService14TestCaseOperation1Input) (*OutputService14TestShapeOutputService14TestCaseOperation1Output, error) { + req, out := c.OutputService14TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService14TestShapeOutputService14TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService14TestShapeOutputService14TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*string `locationNameKey:"foo" locationNameValue:"bar" type:"map" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService15ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService15ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService15ProtocolTest client from just a session. +// svc := outputservice15protocoltest.New(mySession) +// +// // Create a OutputService15ProtocolTest client with additional configuration +// svc := outputservice15protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService15ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService15ProtocolTest { + c := p.ClientConfig("outputservice15protocoltest", cfgs...) + return newOutputService15ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService15ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService15ProtocolTest { + svc := &OutputService15ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice15protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService15ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService15ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService15TestCaseOperation1 = "OperationName" + +// OutputService15TestCaseOperation1Request generates a request for the OutputService15TestCaseOperation1 operation. +func (c *OutputService15ProtocolTest) OutputService15TestCaseOperation1Request(input *OutputService15TestShapeOutputService15TestCaseOperation1Input) (req *request.Request, output *OutputService15TestShapeOutputService15TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService15TestCaseOperation1, + } + + if input == nil { + input = &OutputService15TestShapeOutputService15TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService15TestShapeOutputService15TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService15ProtocolTest) OutputService15TestCaseOperation1(input *OutputService15TestShapeOutputService15TestCaseOperation1Input) (*OutputService15TestShapeOutputService15TestCaseOperation1Output, error) { + req, out := c.OutputService15TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService15TestShapeOutputService15TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService15TestShapeOutputService15TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Foo *string `type:"string"` +} + +// +// Tests begin here +// + +func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("myname123falsetrue1.21.3200a2015-01-25T08:00:00Zrequest-id")) + req, out := svc.OutputService1TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.3, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.2, *out.Float) + assert.Equal(t, int64(200), *out.Long) + assert.Equal(t, int64(123), *out.Num) + assert.Equal(t, "myname", *out.Str) + assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String()) + assert.Equal(t, true, *out.TrueBool) + +} + +func TestOutputService2ProtocolTestNotAllMembersInResponseCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("mynamerequest-id")) + req, out := svc.OutputService2TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "myname", *out.Str) + +} + +func TestOutputService3ProtocolTestBlobCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("dmFsdWU=requestid")) + req, out := svc.OutputService3TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "value", string(out.Blob)) + +} + +func TestOutputService4ProtocolTestListsCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService4TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService5ProtocolTestListWithCustomMemberNameCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService5TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService6ProtocolTestFlattenedListCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123requestid")) + req, out := svc.OutputService6TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService7ProtocolTestFlattenedSingleElementListCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abcrequestid")) + req, out := svc.OutputService7TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + +} + +func TestOutputService8ProtocolTestListOfStructuresCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("firstfoofirstbarfirstbazsecondfoosecondbarsecondbazrequestid")) + req, out := svc.OutputService8TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "firstbar", *out.List[0].Bar) + assert.Equal(t, "firstbaz", *out.List[0].Baz) + assert.Equal(t, "firstfoo", *out.List[0].Foo) + assert.Equal(t, "secondbar", *out.List[1].Bar) + assert.Equal(t, "secondbaz", *out.List[1].Baz) + assert.Equal(t, "secondfoo", *out.List[1].Foo) + +} + +func TestOutputService9ProtocolTestFlattenedListOfStructuresCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("firstfoofirstbarfirstbazsecondfoosecondbarsecondbazrequestid")) + req, out := svc.OutputService9TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "firstbar", *out.List[0].Bar) + assert.Equal(t, "firstbaz", *out.List[0].Baz) + assert.Equal(t, "firstfoo", *out.List[0].Foo) + assert.Equal(t, "secondbar", *out.List[1].Bar) + assert.Equal(t, "secondbaz", *out.List[1].Baz) + assert.Equal(t, "secondfoo", *out.List[1].Foo) + +} + +func TestOutputService10ProtocolTestFlattenedListWithLocationNameCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService10ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abrequestid")) + req, out := svc.OutputService10TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.List[0]) + assert.Equal(t, "b", *out.List[1]) + +} + +func TestOutputService11ProtocolTestNormalMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService11ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService11TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"].Foo) + assert.Equal(t, "bar", *out.Map["qux"].Foo) + +} + +func TestOutputService12ProtocolTestFlattenedMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService12TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"]) + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService13ProtocolTestFlattenedMapInShapeDefinitionCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService13ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarrequestid")) + req, out := svc.OutputService13TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService14ProtocolTestNamedMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService14ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) + req, out := svc.OutputService14TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"]) + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService15ProtocolTestEmptyStringCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService15ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("requestid")) + req, out := svc.OutputService15TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + query.UnmarshalMeta(req) + query.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "", *out.Foo) + +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/build.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/build.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/build.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/build.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,257 @@ +// Package rest provides RESTful serialization of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RFC822 returns an RFC822 formatted timestamp for AWS protocols +const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +var errValueNotSet = fmt.Errorf("value not set") + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// BuildHandler is a named request handler for building rest protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} + +// Build builds the REST component of a service request. +func Build(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v) + buildBody(r, v) + } +} + +func buildLocationElements(r *request.Request, v reflect.Value) { + query := r.HTTPRequest.URL.Query() + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if m.Kind() == reflect.Ptr { + m = m.Elem() + } + if !m.IsValid() { + continue + } + + var err error + switch field.Tag.Get("location") { + case "headers": // header maps + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag.Get("locationName")) + case "header": + err = buildHeader(&r.HTTPRequest.Header, m, name) + case "uri": + err = buildURI(r.HTTPRequest.URL, m, name) + case "querystring": + err = buildQueryString(query, m, name) + } + r.Error = err + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path) +} + +func buildBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = awserr.New("SerializationError", + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(header *http.Header, v reflect.Value, name string) error { + str, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + header.Add(name, str) + + return nil +} + +func buildHeaderMap(header *http.Header, v reflect.Value, prefix string) error { + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key)) + if err == errValueNotSet { + continue + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + + } + + header.Add(prefix+key.String(), str) + } + return nil +} + +func buildURI(u *url.URL, v reflect.Value, name string) error { + value, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + uri := u.Path + uri = strings.Replace(uri, "{"+name+"}", EscapePath(value, true), -1) + uri = strings.Replace(uri, "{"+name+"+}", EscapePath(value, false), -1) + u.Path = uri + + return nil +} + +func buildQueryString(query url.Values, v reflect.Value, name string) error { + switch value := v.Interface().(type) { + case []*string: + for _, item := range value { + query.Add(name, *item) + } + case map[string]*string: + for key, item := range value { + query.Add(key, *item) + } + case map[string][]*string: + for key, items := range value { + for _, item := range items { + query.Add(key, *item) + } + } + default: + str, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + query.Set(name, str) + } + + return nil +} + +func updatePath(url *url.URL, urlPath string) { + scheme, query := url.Scheme, url.RawQuery + + hasSlash := strings.HasSuffix(urlPath, "/") + + // clean up path + urlPath = path.Clean(urlPath) + if hasSlash && !strings.HasSuffix(urlPath, "/") { + urlPath += "/" + } + + // get formatted URL minus scheme so we can build this into Opaque + url.Scheme, url.Path, url.RawQuery = "", "", "" + s := url.String() + url.Scheme = scheme + url.RawQuery = query + + // build opaque URI + url.Opaque = s + urlPath +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + buf.WriteByte('%') + buf.WriteString(strings.ToUpper(strconv.FormatUint(uint64(c), 16))) + } + } + return buf.String() +} + +func convertType(v reflect.Value) (string, error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return "", errValueNotSet + } + + var str string + switch value := v.Interface().(type) { + case string: + str = value + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + str = strconv.FormatFloat(value, 'f', -1, 64) + case time.Time: + str = value.UTC().Format(RFC822) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return "", err + } + return str, nil +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,45 @@ +package rest + +import "reflect" + +// PayloadMember returns the payload field member of i if there is one, or nil. +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +// PayloadType returns the type of a payload field member of i if there is one, or "". +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + return "" +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,193 @@ +package rest + +import ( + "encoding/base64" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals the REST component of a response in a REST service. +func Unmarshal(r *request.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalBody(r, v) + } +} + +// UnmarshalMeta unmarshals the REST metadata of a response in a REST service +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalLocationElements(r, v) + } +} + +func unmarshalBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := v.FieldByName(payloadName) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + payload.Set(reflect.ValueOf(b)) + } + case *string: + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + str := string(b) + payload.Set(reflect.ValueOf(&str)) + } + default: + switch payload.Type().String() { + case "io.ReadSeeker": + payload.Set(reflect.ValueOf(aws.ReadSeekCloser(r.HTTPResponse.Body))) + case "aws.ReadSeekCloser", "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + default: + r.Error = awserr.New("SerializationError", + "failed to decode REST response", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } + } +} + +func unmarshalLocationElements(r *request.Request, v reflect.Value) { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, r.HTTPResponse.StatusCode) + case "header": + err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name)) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + } + } + if r.Error != nil { + return + } + } +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { + switch r.Interface().(type) { + case map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + k = http.CanonicalHeaderKey(k) + if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { + out[k[len(prefix):]] = &v[0] + } + } + r.Set(reflect.ValueOf(out)) + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string) error { + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + + switch v.Interface().(type) { + case *string: + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&i)) + case *float64: + f, err := strconv.ParseFloat(header, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + t, err := time.Parse(RFC822, header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&t)) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restjson/build_bench_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restjson/build_bench_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restjson/build_bench_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restjson/build_bench_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,356 @@ +// +build bench + +package restjson_test + +import ( + "bytes" + "encoding/json" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol/rest" + "github.com/aws/aws-sdk-go/private/protocol/restjson" + "github.com/aws/aws-sdk-go/service/elastictranscoder" +) + +func BenchmarkRESTJSONBuild_Complex_elastictranscoderCreateJobInput(b *testing.B) { + svc := awstesting.NewClient() + svc.ServiceName = "elastictranscoder" + svc.APIVersion = "2012-09-25" + + for i := 0; i < b.N; i++ { + r := svc.NewRequest(&request.Operation{Name: "CreateJobInput"}, restjsonBuildParms, nil) + restjson.Build(r) + if r.Error != nil { + b.Fatal("Unexpected error", r.Error) + } + } +} + +func BenchmarkRESTBuild_Complex_elastictranscoderCreateJobInput(b *testing.B) { + svc := awstesting.NewClient() + svc.ServiceName = "elastictranscoder" + svc.APIVersion = "2012-09-25" + + for i := 0; i < b.N; i++ { + r := svc.NewRequest(&request.Operation{Name: "CreateJobInput"}, restjsonBuildParms, nil) + rest.Build(r) + if r.Error != nil { + b.Fatal("Unexpected error", r.Error) + } + } +} + +func BenchmarkEncodingJSONMarshal_Complex_elastictranscoderCreateJobInput(b *testing.B) { + params := restjsonBuildParms + + for i := 0; i < b.N; i++ { + buf := &bytes.Buffer{} + encoder := json.NewEncoder(buf) + if err := encoder.Encode(params); err != nil { + b.Fatal("Unexpected error", err) + } + } +} + +func BenchmarkRESTJSONBuild_Simple_elastictranscoderListJobsByPipeline(b *testing.B) { + svc := awstesting.NewClient() + svc.ServiceName = "elastictranscoder" + svc.APIVersion = "2012-09-25" + + params := &elastictranscoder.ListJobsByPipelineInput{ + PipelineId: aws.String("Id"), // Required + Ascending: aws.String("Ascending"), + PageToken: aws.String("Id"), + } + + for i := 0; i < b.N; i++ { + r := svc.NewRequest(&request.Operation{Name: "ListJobsByPipeline"}, params, nil) + restjson.Build(r) + if r.Error != nil { + b.Fatal("Unexpected error", r.Error) + } + } +} + +func BenchmarkRESTBuild_Simple_elastictranscoderListJobsByPipeline(b *testing.B) { + svc := awstesting.NewClient() + svc.ServiceName = "elastictranscoder" + svc.APIVersion = "2012-09-25" + + params := &elastictranscoder.ListJobsByPipelineInput{ + PipelineId: aws.String("Id"), // Required + Ascending: aws.String("Ascending"), + PageToken: aws.String("Id"), + } + + for i := 0; i < b.N; i++ { + r := svc.NewRequest(&request.Operation{Name: "ListJobsByPipeline"}, params, nil) + rest.Build(r) + if r.Error != nil { + b.Fatal("Unexpected error", r.Error) + } + } +} + +func BenchmarkEncodingJSONMarshal_Simple_elastictranscoderListJobsByPipeline(b *testing.B) { + params := &elastictranscoder.ListJobsByPipelineInput{ + PipelineId: aws.String("Id"), // Required + Ascending: aws.String("Ascending"), + PageToken: aws.String("Id"), + } + + for i := 0; i < b.N; i++ { + buf := &bytes.Buffer{} + encoder := json.NewEncoder(buf) + if err := encoder.Encode(params); err != nil { + b.Fatal("Unexpected error", err) + } + } +} + +var restjsonBuildParms = &elastictranscoder.CreateJobInput{ + Input: &elastictranscoder.JobInput{ // Required + AspectRatio: aws.String("AspectRatio"), + Container: aws.String("JobContainer"), + DetectedProperties: &elastictranscoder.DetectedProperties{ + DurationMillis: aws.Int64(1), + FileSize: aws.Int64(1), + FrameRate: aws.String("FloatString"), + Height: aws.Int64(1), + Width: aws.Int64(1), + }, + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + FrameRate: aws.String("FrameRate"), + Interlaced: aws.String("Interlaced"), + Key: aws.String("Key"), + Resolution: aws.String("Resolution"), + }, + PipelineId: aws.String("Id"), // Required + Output: &elastictranscoder.CreateJobOutput{ + AlbumArt: &elastictranscoder.JobAlbumArt{ + Artwork: []*elastictranscoder.Artwork{ + { // Required + AlbumArtFormat: aws.String("JpgOrPng"), + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + InputKey: aws.String("WatermarkKey"), + MaxHeight: aws.String("DigitsOrAuto"), + MaxWidth: aws.String("DigitsOrAuto"), + PaddingPolicy: aws.String("PaddingPolicy"), + SizingPolicy: aws.String("SizingPolicy"), + }, + // More values... + }, + MergePolicy: aws.String("MergePolicy"), + }, + Captions: &elastictranscoder.Captions{ + CaptionFormats: []*elastictranscoder.CaptionFormat{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Format: aws.String("CaptionFormatFormat"), + Pattern: aws.String("CaptionFormatPattern"), + }, + // More values... + }, + CaptionSources: []*elastictranscoder.CaptionSource{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Key: aws.String("Key"), + Label: aws.String("Name"), + Language: aws.String("Key"), + TimeOffset: aws.String("TimeOffset"), + }, + // More values... + }, + MergePolicy: aws.String("CaptionMergePolicy"), + }, + Composition: []*elastictranscoder.Clip{ + { // Required + TimeSpan: &elastictranscoder.TimeSpan{ + Duration: aws.String("Time"), + StartTime: aws.String("Time"), + }, + }, + // More values... + }, + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Key: aws.String("Key"), + PresetId: aws.String("Id"), + Rotate: aws.String("Rotate"), + SegmentDuration: aws.String("FloatString"), + ThumbnailEncryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + ThumbnailPattern: aws.String("ThumbnailPattern"), + Watermarks: []*elastictranscoder.JobWatermark{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + InputKey: aws.String("WatermarkKey"), + PresetWatermarkId: aws.String("PresetWatermarkId"), + }, + // More values... + }, + }, + OutputKeyPrefix: aws.String("Key"), + Outputs: []*elastictranscoder.CreateJobOutput{ + { // Required + AlbumArt: &elastictranscoder.JobAlbumArt{ + Artwork: []*elastictranscoder.Artwork{ + { // Required + AlbumArtFormat: aws.String("JpgOrPng"), + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + InputKey: aws.String("WatermarkKey"), + MaxHeight: aws.String("DigitsOrAuto"), + MaxWidth: aws.String("DigitsOrAuto"), + PaddingPolicy: aws.String("PaddingPolicy"), + SizingPolicy: aws.String("SizingPolicy"), + }, + // More values... + }, + MergePolicy: aws.String("MergePolicy"), + }, + Captions: &elastictranscoder.Captions{ + CaptionFormats: []*elastictranscoder.CaptionFormat{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Format: aws.String("CaptionFormatFormat"), + Pattern: aws.String("CaptionFormatPattern"), + }, + // More values... + }, + CaptionSources: []*elastictranscoder.CaptionSource{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Key: aws.String("Key"), + Label: aws.String("Name"), + Language: aws.String("Key"), + TimeOffset: aws.String("TimeOffset"), + }, + // More values... + }, + MergePolicy: aws.String("CaptionMergePolicy"), + }, + Composition: []*elastictranscoder.Clip{ + { // Required + TimeSpan: &elastictranscoder.TimeSpan{ + Duration: aws.String("Time"), + StartTime: aws.String("Time"), + }, + }, + // More values... + }, + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Key: aws.String("Key"), + PresetId: aws.String("Id"), + Rotate: aws.String("Rotate"), + SegmentDuration: aws.String("FloatString"), + ThumbnailEncryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + ThumbnailPattern: aws.String("ThumbnailPattern"), + Watermarks: []*elastictranscoder.JobWatermark{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + InputKey: aws.String("WatermarkKey"), + PresetWatermarkId: aws.String("PresetWatermarkId"), + }, + // More values... + }, + }, + // More values... + }, + Playlists: []*elastictranscoder.CreateJobPlaylist{ + { // Required + Format: aws.String("PlaylistFormat"), + HlsContentProtection: &elastictranscoder.HlsContentProtection{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + KeyStoragePolicy: aws.String("KeyStoragePolicy"), + LicenseAcquisitionUrl: aws.String("ZeroTo512String"), + Method: aws.String("HlsContentProtectionMethod"), + }, + Name: aws.String("Filename"), + OutputKeys: []*string{ + aws.String("Key"), // Required + // More values... + }, + PlayReadyDrm: &elastictranscoder.PlayReadyDrm{ + Format: aws.String("PlayReadyDrmFormatString"), + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("NonEmptyBase64EncodedString"), + KeyId: aws.String("KeyIdGuid"), + KeyMd5: aws.String("NonEmptyBase64EncodedString"), + LicenseAcquisitionUrl: aws.String("OneTo512String"), + }, + }, + // More values... + }, + UserMetadata: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restjson/build_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restjson/build_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restjson/build_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restjson/build_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2571 @@ +package restjson_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/signer/v4" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String + +func init() { + protocol.RandReader = &awstesting.ZeroReader{} +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService1ProtocolTest client from just a session. +// svc := inputservice1protocoltest.New(mySession) +// +// // Create a InputService1ProtocolTest client with additional configuration +// svc := inputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService1ProtocolTest { + c := p.ClientConfig("inputservice1protocoltest", cfgs...) + return newInputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService1ProtocolTest { + svc := &InputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService1TestCaseOperation1 = "OperationName" + +// InputService1TestCaseOperation1Request generates a request for the InputService1TestCaseOperation1 operation. +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputService1TestCaseOperation1Input) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation1, + HTTPMethod: "GET", + HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", + } + + if input == nil { + input = &InputService1TestShapeInputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService1TestShapeInputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputService1TestCaseOperation1Input) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) { + req, out := c.InputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService1TestShapeInputService1TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + PipelineId *string `location:"uri" type:"string"` +} + +type InputService1TestShapeInputService1TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService2ProtocolTest client from just a session. +// svc := inputservice2protocoltest.New(mySession) +// +// // Create a InputService2ProtocolTest client with additional configuration +// svc := inputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService2ProtocolTest { + c := p.ClientConfig("inputservice2protocoltest", cfgs...) + return newInputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService2ProtocolTest { + svc := &InputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService2TestCaseOperation1 = "OperationName" + +// InputService2TestCaseOperation1Request generates a request for the InputService2TestCaseOperation1 operation. +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputService2TestCaseOperation1Input) (req *request.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService2TestCaseOperation1, + HTTPMethod: "GET", + HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", + } + + if input == nil { + input = &InputService2TestShapeInputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService2TestShapeInputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputService2TestCaseOperation1Input) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) { + req, out := c.InputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService2TestShapeInputService2TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Foo *string `location:"uri" locationName:"PipelineId" type:"string"` +} + +type InputService2TestShapeInputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService3ProtocolTest client from just a session. +// svc := inputservice3protocoltest.New(mySession) +// +// // Create a InputService3ProtocolTest client with additional configuration +// svc := inputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService3ProtocolTest { + c := p.ClientConfig("inputservice3protocoltest", cfgs...) + return newInputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService3ProtocolTest { + svc := &InputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService3TestCaseOperation1 = "OperationName" + +// InputService3TestCaseOperation1Request generates a request for the InputService3TestCaseOperation1 operation. +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputService3TestCaseOperation1Input) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService3TestCaseOperation1, + HTTPMethod: "GET", + HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", + } + + if input == nil { + input = &InputService3TestShapeInputService3TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService3TestShapeInputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputService3TestCaseOperation1Input) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) { + req, out := c.InputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService3TestShapeInputService3TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + PipelineId *string `location:"uri" type:"string"` + + QueryDoc map[string]*string `location:"querystring" type:"map"` +} + +type InputService3TestShapeInputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService4ProtocolTest client from just a session. +// svc := inputservice4protocoltest.New(mySession) +// +// // Create a InputService4ProtocolTest client with additional configuration +// svc := inputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService4ProtocolTest { + c := p.ClientConfig("inputservice4protocoltest", cfgs...) + return newInputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService4ProtocolTest { + svc := &InputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService4TestCaseOperation1 = "OperationName" + +// InputService4TestCaseOperation1Request generates a request for the InputService4TestCaseOperation1 operation. +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputService4TestCaseOperation1Input) (req *request.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService4TestCaseOperation1, + HTTPMethod: "GET", + HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", + } + + if input == nil { + input = &InputService4TestShapeInputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService4TestShapeInputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputService4TestCaseOperation1Input) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) { + req, out := c.InputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService4TestShapeInputService4TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + PipelineId *string `location:"uri" type:"string"` + + QueryDoc map[string][]*string `location:"querystring" type:"map"` +} + +type InputService4TestShapeInputService4TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService5ProtocolTest client from just a session. +// svc := inputservice5protocoltest.New(mySession) +// +// // Create a InputService5ProtocolTest client with additional configuration +// svc := inputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService5ProtocolTest { + c := p.ClientConfig("inputservice5protocoltest", cfgs...) + return newInputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService5ProtocolTest { + svc := &InputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService5TestCaseOperation1 = "OperationName" + +// InputService5TestCaseOperation1Request generates a request for the InputService5TestCaseOperation1 operation. +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputService5TestCaseOperation1Input) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation1, + HTTPMethod: "GET", + HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", + } + + if input == nil { + input = &InputService5TestShapeInputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService5TestShapeInputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputService5TestCaseOperation1Input) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) { + req, out := c.InputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService5TestShapeInputService5TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Ascending *string `location:"querystring" locationName:"Ascending" type:"string"` + + PageToken *string `location:"querystring" locationName:"PageToken" type:"string"` + + PipelineId *string `location:"uri" locationName:"PipelineId" type:"string"` +} + +type InputService5TestShapeInputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService6ProtocolTest client from just a session. +// svc := inputservice6protocoltest.New(mySession) +// +// // Create a InputService6ProtocolTest client with additional configuration +// svc := inputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService6ProtocolTest { + c := p.ClientConfig("inputservice6protocoltest", cfgs...) + return newInputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService6ProtocolTest { + svc := &InputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService6TestCaseOperation1 = "OperationName" + +// InputService6TestCaseOperation1Request generates a request for the InputService6TestCaseOperation1 operation. +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputService6TestCaseOperation1Input) (req *request.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService6TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", + } + + if input == nil { + input = &InputService6TestShapeInputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService6TestShapeInputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputService6TestCaseOperation1Input) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) { + req, out := c.InputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService6TestShapeInputService6TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Ascending *string `location:"querystring" locationName:"Ascending" type:"string"` + + Config *InputService6TestShapeStructType `type:"structure"` + + PageToken *string `location:"querystring" locationName:"PageToken" type:"string"` + + PipelineId *string `location:"uri" locationName:"PipelineId" type:"string"` +} + +type InputService6TestShapeInputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService6TestShapeStructType struct { + _ struct{} `type:"structure"` + + A *string `type:"string"` + + B *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService7ProtocolTest client from just a session. +// svc := inputservice7protocoltest.New(mySession) +// +// // Create a InputService7ProtocolTest client with additional configuration +// svc := inputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService7ProtocolTest { + c := p.ClientConfig("inputservice7protocoltest", cfgs...) + return newInputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService7ProtocolTest { + svc := &InputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService7TestCaseOperation1 = "OperationName" + +// InputService7TestCaseOperation1Request generates a request for the InputService7TestCaseOperation1 operation. +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputService7TestCaseOperation1Input) (req *request.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService7TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", + } + + if input == nil { + input = &InputService7TestShapeInputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService7TestShapeInputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputService7TestCaseOperation1Input) (*InputService7TestShapeInputService7TestCaseOperation1Output, error) { + req, out := c.InputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService7TestShapeInputService7TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Ascending *string `location:"querystring" locationName:"Ascending" type:"string"` + + Checksum *string `location:"header" locationName:"x-amz-checksum" type:"string"` + + Config *InputService7TestShapeStructType `type:"structure"` + + PageToken *string `location:"querystring" locationName:"PageToken" type:"string"` + + PipelineId *string `location:"uri" locationName:"PipelineId" type:"string"` +} + +type InputService7TestShapeInputService7TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService7TestShapeStructType struct { + _ struct{} `type:"structure"` + + A *string `type:"string"` + + B *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService8ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService8ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService8ProtocolTest client from just a session. +// svc := inputservice8protocoltest.New(mySession) +// +// // Create a InputService8ProtocolTest client with additional configuration +// svc := inputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService8ProtocolTest { + c := p.ClientConfig("inputservice8protocoltest", cfgs...) + return newInputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService8ProtocolTest { + svc := &InputService8ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice8protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService8TestCaseOperation1 = "OperationName" + +// InputService8TestCaseOperation1Request generates a request for the InputService8TestCaseOperation1 operation. +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1Request(input *InputService8TestShapeInputService8TestCaseOperation1Input) (req *request.Request, output *InputService8TestShapeInputService8TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService8TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/vaults/{vaultName}/archives", + } + + if input == nil { + input = &InputService8TestShapeInputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService8TestShapeInputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1(input *InputService8TestShapeInputService8TestCaseOperation1Input) (*InputService8TestShapeInputService8TestCaseOperation1Output, error) { + req, out := c.InputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService8TestShapeInputService8TestCaseOperation1Input struct { + _ struct{} `type:"structure" payload:"Body"` + + Body io.ReadSeeker `locationName:"body" type:"blob"` + + Checksum *string `location:"header" locationName:"x-amz-sha256-tree-hash" type:"string"` + + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +type InputService8TestShapeInputService8TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService9ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService9ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService9ProtocolTest client from just a session. +// svc := inputservice9protocoltest.New(mySession) +// +// // Create a InputService9ProtocolTest client with additional configuration +// svc := inputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService9ProtocolTest { + c := p.ClientConfig("inputservice9protocoltest", cfgs...) + return newInputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService9ProtocolTest { + svc := &InputService9ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice9protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService9ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService9TestCaseOperation1 = "OperationName" + +// InputService9TestCaseOperation1Request generates a request for the InputService9TestCaseOperation1 operation. +func (c *InputService9ProtocolTest) InputService9TestCaseOperation1Request(input *InputService9TestShapeInputService9TestCaseOperation1Input) (req *request.Request, output *InputService9TestShapeInputService9TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService9TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService9TestShapeInputService9TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService9TestShapeInputService9TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService9ProtocolTest) InputService9TestCaseOperation1(input *InputService9TestShapeInputService9TestCaseOperation1Input) (*InputService9TestShapeInputService9TestCaseOperation1Output, error) { + req, out := c.InputService9TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService9TestShapeInputService9TestCaseOperation1Input struct { + _ struct{} `type:"structure" payload:"Foo"` + + Foo *string `locationName:"foo" type:"string"` +} + +type InputService9TestShapeInputService9TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService10ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService10ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService10ProtocolTest client from just a session. +// svc := inputservice10protocoltest.New(mySession) +// +// // Create a InputService10ProtocolTest client with additional configuration +// svc := inputservice10protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService10ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService10ProtocolTest { + c := p.ClientConfig("inputservice10protocoltest", cfgs...) + return newInputService10ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService10ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService10ProtocolTest { + svc := &InputService10ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice10protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService10ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService10ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService10TestCaseOperation1 = "OperationName" + +// InputService10TestCaseOperation1Request generates a request for the InputService10TestCaseOperation1 operation. +func (c *InputService10ProtocolTest) InputService10TestCaseOperation1Request(input *InputService10TestShapeInputShape) (req *request.Request, output *InputService10TestShapeInputService10TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService10TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService10TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService10TestShapeInputService10TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService10ProtocolTest) InputService10TestCaseOperation1(input *InputService10TestShapeInputShape) (*InputService10TestShapeInputService10TestCaseOperation1Output, error) { + req, out := c.InputService10TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService10TestCaseOperation2 = "OperationName" + +// InputService10TestCaseOperation2Request generates a request for the InputService10TestCaseOperation2 operation. +func (c *InputService10ProtocolTest) InputService10TestCaseOperation2Request(input *InputService10TestShapeInputShape) (req *request.Request, output *InputService10TestShapeInputService10TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService10TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService10TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService10TestShapeInputService10TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService10ProtocolTest) InputService10TestCaseOperation2(input *InputService10TestShapeInputShape) (*InputService10TestShapeInputService10TestCaseOperation2Output, error) { + req, out := c.InputService10TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService10TestShapeInputService10TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService10TestShapeInputService10TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService10TestShapeInputShape struct { + _ struct{} `type:"structure" payload:"Foo"` + + Foo []byte `locationName:"foo" type:"blob"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService11ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService11ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService11ProtocolTest client from just a session. +// svc := inputservice11protocoltest.New(mySession) +// +// // Create a InputService11ProtocolTest client with additional configuration +// svc := inputservice11protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService11ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService11ProtocolTest { + c := p.ClientConfig("inputservice11protocoltest", cfgs...) + return newInputService11ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService11ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService11ProtocolTest { + svc := &InputService11ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice11protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService11ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService11TestCaseOperation1 = "OperationName" + +// InputService11TestCaseOperation1Request generates a request for the InputService11TestCaseOperation1 operation. +func (c *InputService11ProtocolTest) InputService11TestCaseOperation1Request(input *InputService11TestShapeInputShape) (req *request.Request, output *InputService11TestShapeInputService11TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService11TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService11TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService11TestShapeInputService11TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService11ProtocolTest) InputService11TestCaseOperation1(input *InputService11TestShapeInputShape) (*InputService11TestShapeInputService11TestCaseOperation1Output, error) { + req, out := c.InputService11TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService11TestCaseOperation2 = "OperationName" + +// InputService11TestCaseOperation2Request generates a request for the InputService11TestCaseOperation2 operation. +func (c *InputService11ProtocolTest) InputService11TestCaseOperation2Request(input *InputService11TestShapeInputShape) (req *request.Request, output *InputService11TestShapeInputService11TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService11TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService11TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService11TestShapeInputService11TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService11ProtocolTest) InputService11TestCaseOperation2(input *InputService11TestShapeInputShape) (*InputService11TestShapeInputService11TestCaseOperation2Output, error) { + req, out := c.InputService11TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService11TestShapeFooShape struct { + _ struct{} `locationName:"foo" type:"structure"` + + Baz *string `locationName:"baz" type:"string"` +} + +type InputService11TestShapeInputService11TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService11TestShapeInputService11TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService11TestShapeInputShape struct { + _ struct{} `type:"structure" payload:"Foo"` + + Foo *InputService11TestShapeFooShape `locationName:"foo" type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService12ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService12ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService12ProtocolTest client from just a session. +// svc := inputservice12protocoltest.New(mySession) +// +// // Create a InputService12ProtocolTest client with additional configuration +// svc := inputservice12protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService12ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService12ProtocolTest { + c := p.ClientConfig("inputservice12protocoltest", cfgs...) + return newInputService12ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService12ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService12ProtocolTest { + svc := &InputService12ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice12protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService12ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService12ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService12TestCaseOperation1 = "OperationName" + +// InputService12TestCaseOperation1Request generates a request for the InputService12TestCaseOperation1 operation. +func (c *InputService12ProtocolTest) InputService12TestCaseOperation1Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService12TestShapeInputService12TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation1(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation1Output, error) { + req, out := c.InputService12TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService12TestCaseOperation2 = "OperationName" + +// InputService12TestCaseOperation2Request generates a request for the InputService12TestCaseOperation2 operation. +func (c *InputService12ProtocolTest) InputService12TestCaseOperation2Request(input *InputService12TestShapeInputShape) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/path?abc=mno", + } + + if input == nil { + input = &InputService12TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService12TestShapeInputService12TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation2(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation2Output, error) { + req, out := c.InputService12TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService12TestShapeInputService12TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService12TestShapeInputService12TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService12TestShapeInputShape struct { + _ struct{} `type:"structure"` + + Foo *string `location:"querystring" locationName:"param-name" type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService13ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService13ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService13ProtocolTest client from just a session. +// svc := inputservice13protocoltest.New(mySession) +// +// // Create a InputService13ProtocolTest client with additional configuration +// svc := inputservice13protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService13ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService13ProtocolTest { + c := p.ClientConfig("inputservice13protocoltest", cfgs...) + return newInputService13ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService13ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService13ProtocolTest { + svc := &InputService13ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice13protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService13ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService13ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService13TestCaseOperation1 = "OperationName" + +// InputService13TestCaseOperation1Request generates a request for the InputService13TestCaseOperation1 operation. +func (c *InputService13ProtocolTest) InputService13TestCaseOperation1Request(input *InputService13TestShapeInputShape) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService13TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService13TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService13TestShapeInputService13TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService13ProtocolTest) InputService13TestCaseOperation1(input *InputService13TestShapeInputShape) (*InputService13TestShapeInputService13TestCaseOperation1Output, error) { + req, out := c.InputService13TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService13TestCaseOperation2 = "OperationName" + +// InputService13TestCaseOperation2Request generates a request for the InputService13TestCaseOperation2 operation. +func (c *InputService13ProtocolTest) InputService13TestCaseOperation2Request(input *InputService13TestShapeInputShape) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService13TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService13TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService13TestShapeInputService13TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService13ProtocolTest) InputService13TestCaseOperation2(input *InputService13TestShapeInputShape) (*InputService13TestShapeInputService13TestCaseOperation2Output, error) { + req, out := c.InputService13TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +const opInputService13TestCaseOperation3 = "OperationName" + +// InputService13TestCaseOperation3Request generates a request for the InputService13TestCaseOperation3 operation. +func (c *InputService13ProtocolTest) InputService13TestCaseOperation3Request(input *InputService13TestShapeInputShape) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation3Output) { + op := &request.Operation{ + Name: opInputService13TestCaseOperation3, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService13TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService13TestShapeInputService13TestCaseOperation3Output{} + req.Data = output + return +} + +func (c *InputService13ProtocolTest) InputService13TestCaseOperation3(input *InputService13TestShapeInputShape) (*InputService13TestShapeInputService13TestCaseOperation3Output, error) { + req, out := c.InputService13TestCaseOperation3Request(input) + err := req.Send() + return out, err +} + +const opInputService13TestCaseOperation4 = "OperationName" + +// InputService13TestCaseOperation4Request generates a request for the InputService13TestCaseOperation4 operation. +func (c *InputService13ProtocolTest) InputService13TestCaseOperation4Request(input *InputService13TestShapeInputShape) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation4Output) { + op := &request.Operation{ + Name: opInputService13TestCaseOperation4, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService13TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService13TestShapeInputService13TestCaseOperation4Output{} + req.Data = output + return +} + +func (c *InputService13ProtocolTest) InputService13TestCaseOperation4(input *InputService13TestShapeInputShape) (*InputService13TestShapeInputService13TestCaseOperation4Output, error) { + req, out := c.InputService13TestCaseOperation4Request(input) + err := req.Send() + return out, err +} + +const opInputService13TestCaseOperation5 = "OperationName" + +// InputService13TestCaseOperation5Request generates a request for the InputService13TestCaseOperation5 operation. +func (c *InputService13ProtocolTest) InputService13TestCaseOperation5Request(input *InputService13TestShapeInputShape) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation5Output) { + op := &request.Operation{ + Name: opInputService13TestCaseOperation5, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService13TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService13TestShapeInputService13TestCaseOperation5Output{} + req.Data = output + return +} + +func (c *InputService13ProtocolTest) InputService13TestCaseOperation5(input *InputService13TestShapeInputShape) (*InputService13TestShapeInputService13TestCaseOperation5Output, error) { + req, out := c.InputService13TestCaseOperation5Request(input) + err := req.Send() + return out, err +} + +const opInputService13TestCaseOperation6 = "OperationName" + +// InputService13TestCaseOperation6Request generates a request for the InputService13TestCaseOperation6 operation. +func (c *InputService13ProtocolTest) InputService13TestCaseOperation6Request(input *InputService13TestShapeInputShape) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation6Output) { + op := &request.Operation{ + Name: opInputService13TestCaseOperation6, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService13TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService13TestShapeInputService13TestCaseOperation6Output{} + req.Data = output + return +} + +func (c *InputService13ProtocolTest) InputService13TestCaseOperation6(input *InputService13TestShapeInputShape) (*InputService13TestShapeInputService13TestCaseOperation6Output, error) { + req, out := c.InputService13TestCaseOperation6Request(input) + err := req.Send() + return out, err +} + +type InputService13TestShapeInputService13TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService13TestShapeInputService13TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService13TestShapeInputService13TestCaseOperation3Output struct { + _ struct{} `type:"structure"` +} + +type InputService13TestShapeInputService13TestCaseOperation4Output struct { + _ struct{} `type:"structure"` +} + +type InputService13TestShapeInputService13TestCaseOperation5Output struct { + _ struct{} `type:"structure"` +} + +type InputService13TestShapeInputService13TestCaseOperation6Output struct { + _ struct{} `type:"structure"` +} + +type InputService13TestShapeInputShape struct { + _ struct{} `type:"structure"` + + RecursiveStruct *InputService13TestShapeRecursiveStructType `type:"structure"` +} + +type InputService13TestShapeRecursiveStructType struct { + _ struct{} `type:"structure"` + + NoRecurse *string `type:"string"` + + RecursiveList []*InputService13TestShapeRecursiveStructType `type:"list"` + + RecursiveMap map[string]*InputService13TestShapeRecursiveStructType `type:"map"` + + RecursiveStruct *InputService13TestShapeRecursiveStructType `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService14ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService14ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService14ProtocolTest client from just a session. +// svc := inputservice14protocoltest.New(mySession) +// +// // Create a InputService14ProtocolTest client with additional configuration +// svc := inputservice14protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService14ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService14ProtocolTest { + c := p.ClientConfig("inputservice14protocoltest", cfgs...) + return newInputService14ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService14ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService14ProtocolTest { + svc := &InputService14ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice14protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService14ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService14ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService14TestCaseOperation1 = "OperationName" + +// InputService14TestCaseOperation1Request generates a request for the InputService14TestCaseOperation1 operation. +func (c *InputService14ProtocolTest) InputService14TestCaseOperation1Request(input *InputService14TestShapeInputShape) (req *request.Request, output *InputService14TestShapeInputService14TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService14TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService14TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService14TestShapeInputService14TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService14ProtocolTest) InputService14TestCaseOperation1(input *InputService14TestShapeInputShape) (*InputService14TestShapeInputService14TestCaseOperation1Output, error) { + req, out := c.InputService14TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService14TestCaseOperation2 = "OperationName" + +// InputService14TestCaseOperation2Request generates a request for the InputService14TestCaseOperation2 operation. +func (c *InputService14ProtocolTest) InputService14TestCaseOperation2Request(input *InputService14TestShapeInputShape) (req *request.Request, output *InputService14TestShapeInputService14TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService14TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService14TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService14TestShapeInputService14TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService14ProtocolTest) InputService14TestCaseOperation2(input *InputService14TestShapeInputShape) (*InputService14TestShapeInputService14TestCaseOperation2Output, error) { + req, out := c.InputService14TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService14TestShapeInputService14TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService14TestShapeInputService14TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService14TestShapeInputShape struct { + _ struct{} `type:"structure"` + + TimeArg *time.Time `type:"timestamp" timestampFormat:"unix"` + + TimeArgInHeader *time.Time `location:"header" locationName:"x-amz-timearg" type:"timestamp" timestampFormat:"rfc822"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService15ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService15ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService15ProtocolTest client from just a session. +// svc := inputservice15protocoltest.New(mySession) +// +// // Create a InputService15ProtocolTest client with additional configuration +// svc := inputservice15protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService15ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService15ProtocolTest { + c := p.ClientConfig("inputservice15protocoltest", cfgs...) + return newInputService15ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService15ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService15ProtocolTest { + svc := &InputService15ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice15protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService15ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService15ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService15TestCaseOperation1 = "OperationName" + +// InputService15TestCaseOperation1Request generates a request for the InputService15TestCaseOperation1 operation. +func (c *InputService15ProtocolTest) InputService15TestCaseOperation1Request(input *InputService15TestShapeInputService15TestCaseOperation1Input) (req *request.Request, output *InputService15TestShapeInputService15TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService15TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService15TestShapeInputService15TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService15TestShapeInputService15TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService15ProtocolTest) InputService15TestCaseOperation1(input *InputService15TestShapeInputService15TestCaseOperation1Input) (*InputService15TestShapeInputService15TestCaseOperation1Output, error) { + req, out := c.InputService15TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService15TestShapeInputService15TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + TimeArg *time.Time `locationName:"timestamp_location" type:"timestamp" timestampFormat:"unix"` +} + +type InputService15TestShapeInputService15TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService16ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService16ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService16ProtocolTest client from just a session. +// svc := inputservice16protocoltest.New(mySession) +// +// // Create a InputService16ProtocolTest client with additional configuration +// svc := inputservice16protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService16ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService16ProtocolTest { + c := p.ClientConfig("inputservice16protocoltest", cfgs...) + return newInputService16ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService16ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService16ProtocolTest { + svc := &InputService16ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice16protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService16ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService16ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService16TestCaseOperation1 = "OperationName" + +// InputService16TestCaseOperation1Request generates a request for the InputService16TestCaseOperation1 operation. +func (c *InputService16ProtocolTest) InputService16TestCaseOperation1Request(input *InputService16TestShapeInputShape) (req *request.Request, output *InputService16TestShapeInputService16TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService16TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService16TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService16TestShapeInputService16TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService16ProtocolTest) InputService16TestCaseOperation1(input *InputService16TestShapeInputShape) (*InputService16TestShapeInputService16TestCaseOperation1Output, error) { + req, out := c.InputService16TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService16TestCaseOperation2 = "OperationName" + +// InputService16TestCaseOperation2Request generates a request for the InputService16TestCaseOperation2 operation. +func (c *InputService16ProtocolTest) InputService16TestCaseOperation2Request(input *InputService16TestShapeInputShape) (req *request.Request, output *InputService16TestShapeInputService16TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService16TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService16TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService16TestShapeInputService16TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService16ProtocolTest) InputService16TestCaseOperation2(input *InputService16TestShapeInputShape) (*InputService16TestShapeInputService16TestCaseOperation2Output, error) { + req, out := c.InputService16TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService16TestShapeInputService16TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService16TestShapeInputService16TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService16TestShapeInputShape struct { + _ struct{} `type:"structure"` + + Token *string `type:"string" idempotencyToken:"true"` +} + +// +// Tests begin here +// + +func TestInputService1ProtocolTestURIParameterOnlyWithNoLocationNameCase1(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService1TestShapeInputService1TestCaseOperation1Input{ + PipelineId: aws.String("foo"), + } + req, _ := svc.InputService1TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo", r.URL.String()) + + // assert headers + +} + +func TestInputService2ProtocolTestURIParameterOnlyWithLocationNameCase1(t *testing.T) { + sess := session.New() + svc := NewInputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService2TestShapeInputService2TestCaseOperation1Input{ + Foo: aws.String("bar"), + } + req, _ := svc.InputService2TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/bar", r.URL.String()) + + // assert headers + +} + +func TestInputService3ProtocolTestStringToStringMapsInQuerystringCase1(t *testing.T) { + sess := session.New() + svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService3TestShapeInputService3TestCaseOperation1Input{ + PipelineId: aws.String("foo"), + QueryDoc: map[string]*string{ + "bar": aws.String("baz"), + "fizz": aws.String("buzz"), + }, + } + req, _ := svc.InputService3TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?bar=baz&fizz=buzz", r.URL.String()) + + // assert headers + +} + +func TestInputService4ProtocolTestStringToStringListMapsInQuerystringCase1(t *testing.T) { + sess := session.New() + svc := NewInputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService4TestShapeInputService4TestCaseOperation1Input{ + PipelineId: aws.String("id"), + QueryDoc: map[string][]*string{ + "fizz": { + aws.String("buzz"), + aws.String("pop"), + }, + "foo": { + aws.String("bar"), + aws.String("baz"), + }, + }, + } + req, _ := svc.InputService4TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/id?foo=bar&foo=baz&fizz=buzz&fizz=pop", r.URL.String()) + + // assert headers + +} + +func TestInputService5ProtocolTestURIParameterAndQuerystringParamsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService5TestShapeInputService5TestCaseOperation1Input{ + Ascending: aws.String("true"), + PageToken: aws.String("bar"), + PipelineId: aws.String("foo"), + } + req, _ := svc.InputService5TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?Ascending=true&PageToken=bar", r.URL.String()) + + // assert headers + +} + +func TestInputService6ProtocolTestURIParameterQuerystringParamsAndJSONBodyCase1(t *testing.T) { + sess := session.New() + svc := NewInputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService6TestShapeInputService6TestCaseOperation1Input{ + Ascending: aws.String("true"), + Config: &InputService6TestShapeStructType{ + A: aws.String("one"), + B: aws.String("two"), + }, + PageToken: aws.String("bar"), + PipelineId: aws.String("foo"), + } + req, _ := svc.InputService6TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"Config":{"A":"one","B":"two"}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?Ascending=true&PageToken=bar", r.URL.String()) + + // assert headers + +} + +func TestInputService7ProtocolTestURIParameterQuerystringParamsHeadersAndJSONBodyCase1(t *testing.T) { + sess := session.New() + svc := NewInputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService7TestShapeInputService7TestCaseOperation1Input{ + Ascending: aws.String("true"), + Checksum: aws.String("12345"), + Config: &InputService7TestShapeStructType{ + A: aws.String("one"), + B: aws.String("two"), + }, + PageToken: aws.String("bar"), + PipelineId: aws.String("foo"), + } + req, _ := svc.InputService7TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"Config":{"A":"one","B":"two"}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?Ascending=true&PageToken=bar", r.URL.String()) + + // assert headers + assert.Equal(t, "12345", r.Header.Get("x-amz-checksum")) + +} + +func TestInputService8ProtocolTestStreamingPayloadCase1(t *testing.T) { + sess := session.New() + svc := NewInputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService8TestShapeInputService8TestCaseOperation1Input{ + Body: aws.ReadSeekCloser(bytes.NewBufferString("contents")), + Checksum: aws.String("foo"), + VaultName: aws.String("name"), + } + req, _ := svc.InputService8TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, `contents`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/vaults/name/archives", r.URL.String()) + + // assert headers + assert.Equal(t, "foo", r.Header.Get("x-amz-sha256-tree-hash")) + +} + +func TestInputService9ProtocolTestStringPayloadCase1(t *testing.T) { + sess := session.New() + svc := NewInputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService9TestShapeInputService9TestCaseOperation1Input{ + Foo: aws.String("bar"), + } + req, _ := svc.InputService9TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, `bar`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService10ProtocolTestBlobPayloadCase1(t *testing.T) { + sess := session.New() + svc := NewInputService10ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService10TestShapeInputShape{ + Foo: []byte("bar"), + } + req, _ := svc.InputService10TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + assert.Equal(t, `bar`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService10ProtocolTestBlobPayloadCase2(t *testing.T) { + sess := session.New() + svc := NewInputService10ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService10TestShapeInputShape{} + req, _ := svc.InputService10TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService11ProtocolTestStructurePayloadCase1(t *testing.T) { + sess := session.New() + svc := NewInputService11ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService11TestShapeInputShape{ + Foo: &InputService11TestShapeFooShape{ + Baz: aws.String("bar"), + }, + } + req, _ := svc.InputService11TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"baz":"bar"}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService11ProtocolTestStructurePayloadCase2(t *testing.T) { + sess := session.New() + svc := NewInputService11ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService11TestShapeInputShape{} + req, _ := svc.InputService11TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestOmitsNullQueryParamsButSerializesEmptyStringsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService12TestShapeInputShape{} + req, _ := svc.InputService12TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService12ProtocolTestOmitsNullQueryParamsButSerializesEmptyStringsCase2(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService12TestShapeInputShape{ + Foo: aws.String(""), + } + req, _ := svc.InputService12TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/path?abc=mno¶m-name=", r.URL.String()) + + // assert headers + +} + +func TestInputService13ProtocolTestRecursiveShapesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService13ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService13TestShapeInputShape{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + } + req, _ := svc.InputService13TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"NoRecurse":"foo"}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService13ProtocolTestRecursiveShapesCase2(t *testing.T) { + sess := session.New() + svc := NewInputService13ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService13TestShapeInputShape{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + } + req, _ := svc.InputService13TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveStruct":{"NoRecurse":"foo"}}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService13ProtocolTestRecursiveShapesCase3(t *testing.T) { + sess := session.New() + svc := NewInputService13ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService13TestShapeInputShape{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + }, + }, + } + req, _ := svc.InputService13TestCaseOperation3Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveStruct":{"RecursiveStruct":{"RecursiveStruct":{"NoRecurse":"foo"}}}}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService13ProtocolTestRecursiveShapesCase4(t *testing.T) { + sess := session.New() + svc := NewInputService13ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService13TestShapeInputShape{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + RecursiveList: []*InputService13TestShapeRecursiveStructType{ + { + NoRecurse: aws.String("foo"), + }, + { + NoRecurse: aws.String("bar"), + }, + }, + }, + } + req, _ := svc.InputService13TestCaseOperation4Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveList":[{"NoRecurse":"foo"},{"NoRecurse":"bar"}]}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService13ProtocolTestRecursiveShapesCase5(t *testing.T) { + sess := session.New() + svc := NewInputService13ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService13TestShapeInputShape{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + RecursiveList: []*InputService13TestShapeRecursiveStructType{ + { + NoRecurse: aws.String("foo"), + }, + { + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + NoRecurse: aws.String("bar"), + }, + }, + }, + }, + } + req, _ := svc.InputService13TestCaseOperation5Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveList":[{"NoRecurse":"foo"},{"RecursiveStruct":{"NoRecurse":"bar"}}]}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService13ProtocolTestRecursiveShapesCase6(t *testing.T) { + sess := session.New() + svc := NewInputService13ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService13TestShapeInputShape{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + RecursiveMap: map[string]*InputService13TestShapeRecursiveStructType{ + "bar": { + NoRecurse: aws.String("bar"), + }, + "foo": { + NoRecurse: aws.String("foo"), + }, + }, + }, + } + req, _ := svc.InputService13TestCaseOperation6Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveMap":{"foo":{"NoRecurse":"foo"},"bar":{"NoRecurse":"bar"}}}}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService14ProtocolTestTimestampValuesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService14ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService14TestShapeInputShape{ + TimeArg: aws.Time(time.Unix(1422172800, 0)), + } + req, _ := svc.InputService14TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"TimeArg":1422172800}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService14ProtocolTestTimestampValuesCase2(t *testing.T) { + sess := session.New() + svc := NewInputService14ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService14TestShapeInputShape{ + TimeArgInHeader: aws.Time(time.Unix(1422172800, 0)), + } + req, _ := svc.InputService14TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + assert.Equal(t, "Sun, 25 Jan 2015 08:00:00 GMT", r.Header.Get("x-amz-timearg")) + +} + +func TestInputService15ProtocolTestNamedLocationsInJSONBodyCase1(t *testing.T) { + sess := session.New() + svc := NewInputService15ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService15TestShapeInputService15TestCaseOperation1Input{ + TimeArg: aws.Time(time.Unix(1422172800, 0)), + } + req, _ := svc.InputService15TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"timestamp_location":1422172800}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService16ProtocolTestIdempotencyTokenAutoFillCase1(t *testing.T) { + sess := session.New() + svc := NewInputService16ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService16TestShapeInputShape{ + Token: aws.String("abc123"), + } + req, _ := svc.InputService16TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"Token":"abc123"}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService16ProtocolTestIdempotencyTokenAutoFillCase2(t *testing.T) { + sess := session.New() + svc := NewInputService16ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService16TestShapeInputShape{} + req, _ := svc.InputService16TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restjson.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertJSON(t, `{"Token":"00000000-0000-4000-8000-000000000000"}`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,91 @@ +// Package restjson provides RESTful JSON serialisation of AWS +// requests and responses. +package restjson + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-json.json build_test.go +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-json.json unmarshal_test.go + +import ( + "encoding/json" + "io/ioutil" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +// BuildHandler is a named request handler for building restjson protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.restjson.Build", Fn: Build} + +// UnmarshalHandler is a named request handler for unmarshaling restjson protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restjson.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling restjson protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restjson.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling restjson protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restjson.UnmarshalError", Fn: UnmarshalError} + +// Build builds a request for the REST JSON protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + jsonrpc.Build(r) + } +} + +// Unmarshal unmarshals a response body for the REST JSON protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + jsonrpc.Unmarshal(r) + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST JSON protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} + +// UnmarshalError unmarshals a response error for the REST JSON protocol. +func UnmarshalError(r *request.Request) { + code := r.HTTPResponse.Header.Get("X-Amzn-Errortype") + bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed reading REST JSON error response", err) + return + } + if len(bodyBytes) == 0 { + r.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", r.HTTPResponse.Status, nil), + r.HTTPResponse.StatusCode, + "", + ) + return + } + var jsonErr jsonErrorResponse + if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil { + r.Error = awserr.New("SerializationError", "failed decoding REST JSON error response", err) + return + } + + if code == "" { + code = jsonErr.Code + } + + code = strings.SplitN(code, ":", 2)[0] + r.Error = awserr.NewRequestFailure( + awserr.New(code, jsonErr.Message, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) +} + +type jsonErrorResponse struct { + Code string `json:"code"` + Message string `json:"message"` +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1325 @@ +package restjson_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/signer/v4" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String + +func init() { + protocol.RandReader = &awstesting.ZeroReader{} +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService1ProtocolTest client from just a session. +// svc := outputservice1protocoltest.New(mySession) +// +// // Create a OutputService1ProtocolTest client with additional configuration +// svc := outputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService1ProtocolTest { + c := p.ClientConfig("outputservice1protocoltest", cfgs...) + return newOutputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService1ProtocolTest { + svc := &OutputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService1TestCaseOperation1 = "OperationName" + +// OutputService1TestCaseOperation1Request generates a request for the OutputService1TestCaseOperation1 operation. +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *request.Request, output *OutputService1TestShapeOutputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService1TestCaseOperation1, + } + + if input == nil { + input = &OutputService1TestShapeOutputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService1TestShapeOutputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (*OutputService1TestShapeOutputService1TestCaseOperation1Output, error) { + req, out := c.OutputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Char *string `type:"character"` + + Double *float64 `type:"double"` + + FalseBool *bool `type:"boolean"` + + Float *float64 `type:"float"` + + ImaHeader *string `location:"header" type:"string"` + + ImaHeaderLocation *string `location:"header" locationName:"X-Foo" type:"string"` + + Long *int64 `type:"long"` + + Num *int64 `type:"integer"` + + Status *int64 `location:"statusCode" type:"integer"` + + Str *string `type:"string"` + + TrueBool *bool `type:"boolean"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService2ProtocolTest client from just a session. +// svc := outputservice2protocoltest.New(mySession) +// +// // Create a OutputService2ProtocolTest client with additional configuration +// svc := outputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService2ProtocolTest { + c := p.ClientConfig("outputservice2protocoltest", cfgs...) + return newOutputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService2ProtocolTest { + svc := &OutputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService2TestCaseOperation1 = "OperationName" + +// OutputService2TestCaseOperation1Request generates a request for the OutputService2TestCaseOperation1 operation. +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *request.Request, output *OutputService2TestShapeOutputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService2TestCaseOperation1, + } + + if input == nil { + input = &OutputService2TestShapeOutputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService2TestShapeOutputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (*OutputService2TestShapeOutputService2TestCaseOperation1Output, error) { + req, out := c.OutputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService2TestShapeBlobContainer struct { + _ struct{} `type:"structure"` + + Foo []byte `locationName:"foo" type:"blob"` +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + BlobMember []byte `type:"blob"` + + StructMember *OutputService2TestShapeBlobContainer `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService3ProtocolTest client from just a session. +// svc := outputservice3protocoltest.New(mySession) +// +// // Create a OutputService3ProtocolTest client with additional configuration +// svc := outputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService3ProtocolTest { + c := p.ClientConfig("outputservice3protocoltest", cfgs...) + return newOutputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService3ProtocolTest { + svc := &OutputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService3TestCaseOperation1 = "OperationName" + +// OutputService3TestCaseOperation1Request generates a request for the OutputService3TestCaseOperation1 operation. +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *request.Request, output *OutputService3TestShapeOutputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService3TestCaseOperation1, + } + + if input == nil { + input = &OutputService3TestShapeOutputService3TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService3TestShapeOutputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (*OutputService3TestShapeOutputService3TestCaseOperation1Output, error) { + req, out := c.OutputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + StructMember *OutputService3TestShapeTimeContainer `type:"structure"` + + TimeMember *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +type OutputService3TestShapeTimeContainer struct { + _ struct{} `type:"structure"` + + Foo *time.Time `locationName:"foo" type:"timestamp" timestampFormat:"unix"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService4ProtocolTest client from just a session. +// svc := outputservice4protocoltest.New(mySession) +// +// // Create a OutputService4ProtocolTest client with additional configuration +// svc := outputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService4ProtocolTest { + c := p.ClientConfig("outputservice4protocoltest", cfgs...) + return newOutputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService4ProtocolTest { + svc := &OutputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService4TestCaseOperation1 = "OperationName" + +// OutputService4TestCaseOperation1Request generates a request for the OutputService4TestCaseOperation1 operation. +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *request.Request, output *OutputService4TestShapeOutputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService4TestCaseOperation1, + } + + if input == nil { + input = &OutputService4TestShapeOutputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService4TestShapeOutputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (*OutputService4TestShapeOutputService4TestCaseOperation1Output, error) { + req, out := c.OutputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `type:"list"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService5ProtocolTest client from just a session. +// svc := outputservice5protocoltest.New(mySession) +// +// // Create a OutputService5ProtocolTest client with additional configuration +// svc := outputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService5ProtocolTest { + c := p.ClientConfig("outputservice5protocoltest", cfgs...) + return newOutputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService5ProtocolTest { + svc := &OutputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService5TestCaseOperation1 = "OperationName" + +// OutputService5TestCaseOperation1Request generates a request for the OutputService5TestCaseOperation1 operation. +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *request.Request, output *OutputService5TestShapeOutputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService5TestCaseOperation1, + } + + if input == nil { + input = &OutputService5TestShapeOutputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService5TestShapeOutputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (*OutputService5TestShapeOutputService5TestCaseOperation1Output, error) { + req, out := c.OutputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*OutputService5TestShapeSingleStruct `type:"list"` +} + +type OutputService5TestShapeSingleStruct struct { + _ struct{} `type:"structure"` + + Foo *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService6ProtocolTest client from just a session. +// svc := outputservice6protocoltest.New(mySession) +// +// // Create a OutputService6ProtocolTest client with additional configuration +// svc := outputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService6ProtocolTest { + c := p.ClientConfig("outputservice6protocoltest", cfgs...) + return newOutputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService6ProtocolTest { + svc := &OutputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService6TestCaseOperation1 = "OperationName" + +// OutputService6TestCaseOperation1Request generates a request for the OutputService6TestCaseOperation1 operation. +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *request.Request, output *OutputService6TestShapeOutputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService6TestCaseOperation1, + } + + if input == nil { + input = &OutputService6TestShapeOutputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService6TestShapeOutputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (*OutputService6TestShapeOutputService6TestCaseOperation1Output, error) { + req, out := c.OutputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + MapMember map[string][]*int64 `type:"map"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService7ProtocolTest client from just a session. +// svc := outputservice7protocoltest.New(mySession) +// +// // Create a OutputService7ProtocolTest client with additional configuration +// svc := outputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService7ProtocolTest { + c := p.ClientConfig("outputservice7protocoltest", cfgs...) + return newOutputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService7ProtocolTest { + svc := &OutputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService7TestCaseOperation1 = "OperationName" + +// OutputService7TestCaseOperation1Request generates a request for the OutputService7TestCaseOperation1 operation. +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1Request(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (req *request.Request, output *OutputService7TestShapeOutputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService7TestCaseOperation1, + } + + if input == nil { + input = &OutputService7TestShapeOutputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService7TestShapeOutputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (*OutputService7TestShapeOutputService7TestCaseOperation1Output, error) { + req, out := c.OutputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + MapMember map[string]*time.Time `type:"map"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService8ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService8ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService8ProtocolTest client from just a session. +// svc := outputservice8protocoltest.New(mySession) +// +// // Create a OutputService8ProtocolTest client with additional configuration +// svc := outputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService8ProtocolTest { + c := p.ClientConfig("outputservice8protocoltest", cfgs...) + return newOutputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService8ProtocolTest { + svc := &OutputService8ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice8protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService8TestCaseOperation1 = "OperationName" + +// OutputService8TestCaseOperation1Request generates a request for the OutputService8TestCaseOperation1 operation. +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1Request(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (req *request.Request, output *OutputService8TestShapeOutputService8TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService8TestCaseOperation1, + } + + if input == nil { + input = &OutputService8TestShapeOutputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService8TestShapeOutputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (*OutputService8TestShapeOutputService8TestCaseOperation1Output, error) { + req, out := c.OutputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + StrType *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService9ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService9ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService9ProtocolTest client from just a session. +// svc := outputservice9protocoltest.New(mySession) +// +// // Create a OutputService9ProtocolTest client with additional configuration +// svc := outputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService9ProtocolTest { + c := p.ClientConfig("outputservice9protocoltest", cfgs...) + return newOutputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService9ProtocolTest { + svc := &OutputService9ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice9protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService9ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService9TestCaseOperation1 = "OperationName" + +// OutputService9TestCaseOperation1Request generates a request for the OutputService9TestCaseOperation1 operation. +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1Request(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (req *request.Request, output *OutputService9TestShapeOutputService9TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService9TestCaseOperation1, + } + + if input == nil { + input = &OutputService9TestShapeOutputService9TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService9TestShapeOutputService9TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (*OutputService9TestShapeOutputService9TestCaseOperation1Output, error) { + req, out := c.OutputService9TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + AllHeaders map[string]*string `location:"headers" type:"map"` + + PrefixedHeaders map[string]*string `location:"headers" locationName:"X-" type:"map"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService10ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService10ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService10ProtocolTest client from just a session. +// svc := outputservice10protocoltest.New(mySession) +// +// // Create a OutputService10ProtocolTest client with additional configuration +// svc := outputservice10protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService10ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService10ProtocolTest { + c := p.ClientConfig("outputservice10protocoltest", cfgs...) + return newOutputService10ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService10ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService10ProtocolTest { + svc := &OutputService10ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice10protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService10ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService10ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService10TestCaseOperation1 = "OperationName" + +// OutputService10TestCaseOperation1Request generates a request for the OutputService10TestCaseOperation1 operation. +func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1Request(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (req *request.Request, output *OutputService10TestShapeOutputService10TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService10TestCaseOperation1, + } + + if input == nil { + input = &OutputService10TestShapeOutputService10TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService10TestShapeOutputService10TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (*OutputService10TestShapeOutputService10TestCaseOperation1Output, error) { + req, out := c.OutputService10TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService10TestShapeBodyStructure struct { + _ struct{} `type:"structure"` + + Foo *string `type:"string"` +} + +type OutputService10TestShapeOutputService10TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService10TestShapeOutputService10TestCaseOperation1Output struct { + _ struct{} `type:"structure" payload:"Data"` + + Data *OutputService10TestShapeBodyStructure `type:"structure"` + + Header *string `location:"header" locationName:"X-Foo" type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService11ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService11ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService11ProtocolTest client from just a session. +// svc := outputservice11protocoltest.New(mySession) +// +// // Create a OutputService11ProtocolTest client with additional configuration +// svc := outputservice11protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService11ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService11ProtocolTest { + c := p.ClientConfig("outputservice11protocoltest", cfgs...) + return newOutputService11ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService11ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService11ProtocolTest { + svc := &OutputService11ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice11protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService11ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService11TestCaseOperation1 = "OperationName" + +// OutputService11TestCaseOperation1Request generates a request for the OutputService11TestCaseOperation1 operation. +func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1Request(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (req *request.Request, output *OutputService11TestShapeOutputService11TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService11TestCaseOperation1, + } + + if input == nil { + input = &OutputService11TestShapeOutputService11TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService11TestShapeOutputService11TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (*OutputService11TestShapeOutputService11TestCaseOperation1Output, error) { + req, out := c.OutputService11TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService11TestShapeOutputService11TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService11TestShapeOutputService11TestCaseOperation1Output struct { + _ struct{} `type:"structure" payload:"Stream"` + + Stream []byte `type:"blob"` +} + +// +// Tests begin here +// + +func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"Str\": \"myname\", \"Num\": 123, \"FalseBool\": false, \"TrueBool\": true, \"Float\": 1.2, \"Double\": 1.3, \"Long\": 200, \"Char\": \"a\"}")) + req, out := svc.OutputService1TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + req.HTTPResponse.Header.Set("ImaHeader", "test") + req.HTTPResponse.Header.Set("X-Foo", "abc") + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.3, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.2, *out.Float) + assert.Equal(t, "test", *out.ImaHeader) + assert.Equal(t, "abc", *out.ImaHeaderLocation) + assert.Equal(t, int64(200), *out.Long) + assert.Equal(t, int64(123), *out.Num) + assert.Equal(t, int64(200), *out.Status) + assert.Equal(t, "myname", *out.Str) + assert.Equal(t, true, *out.TrueBool) + +} + +func TestOutputService2ProtocolTestBlobMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"BlobMember\": \"aGkh\", \"StructMember\": {\"foo\": \"dGhlcmUh\"}}")) + req, out := svc.OutputService2TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "hi!", string(out.BlobMember)) + assert.Equal(t, "there!", string(out.StructMember.Foo)) + +} + +func TestOutputService3ProtocolTestTimestampMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"TimeMember\": 1398796238, \"StructMember\": {\"foo\": 1398796238}}")) + req, out := svc.OutputService3TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.StructMember.Foo.String()) + assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.TimeMember.String()) + +} + +func TestOutputService4ProtocolTestListsCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"ListMember\": [\"a\", \"b\"]}")) + req, out := svc.OutputService4TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.ListMember[0]) + assert.Equal(t, "b", *out.ListMember[1]) + +} + +func TestOutputService5ProtocolTestListsWithStructureMemberCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"ListMember\": [{\"Foo\": \"a\"}, {\"Foo\": \"b\"}]}")) + req, out := svc.OutputService5TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.ListMember[0].Foo) + assert.Equal(t, "b", *out.ListMember[1].Foo) + +} + +func TestOutputService6ProtocolTestMapsCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"MapMember\": {\"a\": [1, 2], \"b\": [3, 4]}}")) + req, out := svc.OutputService6TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, int64(1), *out.MapMember["a"][0]) + assert.Equal(t, int64(2), *out.MapMember["a"][1]) + assert.Equal(t, int64(3), *out.MapMember["b"][0]) + assert.Equal(t, int64(4), *out.MapMember["b"][1]) + +} + +func TestOutputService7ProtocolTestComplexMapValuesCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"MapMember\": {\"a\": 1398796238, \"b\": 1398796238}}")) + req, out := svc.OutputService7TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.MapMember["a"].String()) + assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.MapMember["b"].String()) + +} + +func TestOutputService8ProtocolTestIgnoresExtraDataCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"foo\": \"bar\"}")) + req, out := svc.OutputService8TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + +} + +func TestOutputService9ProtocolTestSupportsHeaderMapsCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{}")) + req, out := svc.OutputService9TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + req.HTTPResponse.Header.Set("Content-Length", "10") + req.HTTPResponse.Header.Set("X-Bam", "boo") + req.HTTPResponse.Header.Set("X-Foo", "bar") + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "10", *out.AllHeaders["Content-Length"]) + assert.Equal(t, "boo", *out.AllHeaders["X-Bam"]) + assert.Equal(t, "bar", *out.AllHeaders["X-Foo"]) + assert.Equal(t, "boo", *out.PrefixedHeaders["Bam"]) + assert.Equal(t, "bar", *out.PrefixedHeaders["Foo"]) + +} + +func TestOutputService10ProtocolTestJSONPayloadCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService10ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("{\"Foo\": \"abc\"}")) + req, out := svc.OutputService10TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + req.HTTPResponse.Header.Set("X-Foo", "baz") + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.Data.Foo) + assert.Equal(t, "baz", *out.Header) + +} + +func TestOutputService11ProtocolTestStreamingPayloadCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService11ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc")) + req, out := svc.OutputService11TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restjson.UnmarshalMeta(req) + restjson.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", string(out.Stream)) + +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restxml/build_bench_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restxml/build_bench_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restxml/build_bench_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restxml/build_bench_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,246 @@ +// +build bench + +package restxml_test + +import ( + "testing" + + "bytes" + "encoding/xml" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol/restxml" + "github.com/aws/aws-sdk-go/service/cloudfront" +) + +func BenchmarkRESTXMLBuild_Complex_cloudfrontCreateDistribution(b *testing.B) { + params := restxmlBuildCreateDistroParms + + op := &request.Operation{ + Name: "CreateDistribution", + HTTPMethod: "POST", + HTTPPath: "/2015-04-17/distribution/{DistributionId}/invalidation", + } + + benchRESTXMLBuild(b, op, params) +} + +func BenchmarkRESTXMLBuild_Simple_cloudfrontDeleteStreamingDistribution(b *testing.B) { + params := &cloudfront.DeleteDistributionInput{ + Id: aws.String("string"), // Required + IfMatch: aws.String("string"), + } + op := &request.Operation{ + Name: "DeleteStreamingDistribution", + HTTPMethod: "DELETE", + HTTPPath: "/2015-04-17/streaming-distribution/{Id}", + } + benchRESTXMLBuild(b, op, params) +} + +func BenchmarkEncodingXMLMarshal_Simple_cloudfrontDeleteStreamingDistribution(b *testing.B) { + params := &cloudfront.DeleteDistributionInput{ + Id: aws.String("string"), // Required + IfMatch: aws.String("string"), + } + + for i := 0; i < b.N; i++ { + buf := &bytes.Buffer{} + encoder := xml.NewEncoder(buf) + if err := encoder.Encode(params); err != nil { + b.Fatal("Unexpected error", err) + } + } +} + +func benchRESTXMLBuild(b *testing.B, op *request.Operation, params interface{}) { + svc := awstesting.NewClient() + svc.ServiceName = "cloudfront" + svc.APIVersion = "2015-04-17" + + for i := 0; i < b.N; i++ { + r := svc.NewRequest(op, params, nil) + restxml.Build(r) + if r.Error != nil { + b.Fatal("Unexpected error", r.Error) + } + } +} + +var restxmlBuildCreateDistroParms = &cloudfront.CreateDistributionInput{ + DistributionConfig: &cloudfront.DistributionConfig{ // Required + CallerReference: aws.String("string"), // Required + Comment: aws.String("string"), // Required + DefaultCacheBehavior: &cloudfront.DefaultCacheBehavior{ // Required + ForwardedValues: &cloudfront.ForwardedValues{ // Required + Cookies: &cloudfront.CookiePreference{ // Required + Forward: aws.String("ItemSelection"), // Required + WhitelistedNames: &cloudfront.CookieNames{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + QueryString: aws.Bool(true), // Required + Headers: &cloudfront.Headers{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + MinTTL: aws.Int64(1), // Required + TargetOriginId: aws.String("string"), // Required + TrustedSigners: &cloudfront.TrustedSigners{ // Required + Enabled: aws.Bool(true), // Required + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + ViewerProtocolPolicy: aws.String("ViewerProtocolPolicy"), // Required + AllowedMethods: &cloudfront.AllowedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + CachedMethods: &cloudfront.CachedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + }, + }, + DefaultTTL: aws.Int64(1), + MaxTTL: aws.Int64(1), + SmoothStreaming: aws.Bool(true), + }, + Enabled: aws.Bool(true), // Required + Origins: &cloudfront.Origins{ // Required + Quantity: aws.Int64(1), // Required + Items: []*cloudfront.Origin{ + { // Required + DomainName: aws.String("string"), // Required + Id: aws.String("string"), // Required + CustomOriginConfig: &cloudfront.CustomOriginConfig{ + HTTPPort: aws.Int64(1), // Required + HTTPSPort: aws.Int64(1), // Required + OriginProtocolPolicy: aws.String("OriginProtocolPolicy"), // Required + }, + OriginPath: aws.String("string"), + S3OriginConfig: &cloudfront.S3OriginConfig{ + OriginAccessIdentity: aws.String("string"), // Required + }, + }, + // More values... + }, + }, + Aliases: &cloudfront.Aliases{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + CacheBehaviors: &cloudfront.CacheBehaviors{ + Quantity: aws.Int64(1), // Required + Items: []*cloudfront.CacheBehavior{ + { // Required + ForwardedValues: &cloudfront.ForwardedValues{ // Required + Cookies: &cloudfront.CookiePreference{ // Required + Forward: aws.String("ItemSelection"), // Required + WhitelistedNames: &cloudfront.CookieNames{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + QueryString: aws.Bool(true), // Required + Headers: &cloudfront.Headers{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + MinTTL: aws.Int64(1), // Required + PathPattern: aws.String("string"), // Required + TargetOriginId: aws.String("string"), // Required + TrustedSigners: &cloudfront.TrustedSigners{ // Required + Enabled: aws.Bool(true), // Required + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + ViewerProtocolPolicy: aws.String("ViewerProtocolPolicy"), // Required + AllowedMethods: &cloudfront.AllowedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + CachedMethods: &cloudfront.CachedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + }, + }, + DefaultTTL: aws.Int64(1), + MaxTTL: aws.Int64(1), + SmoothStreaming: aws.Bool(true), + }, + // More values... + }, + }, + CustomErrorResponses: &cloudfront.CustomErrorResponses{ + Quantity: aws.Int64(1), // Required + Items: []*cloudfront.CustomErrorResponse{ + { // Required + ErrorCode: aws.Int64(1), // Required + ErrorCachingMinTTL: aws.Int64(1), + ResponseCode: aws.String("string"), + ResponsePagePath: aws.String("string"), + }, + // More values... + }, + }, + DefaultRootObject: aws.String("string"), + Logging: &cloudfront.LoggingConfig{ + Bucket: aws.String("string"), // Required + Enabled: aws.Bool(true), // Required + IncludeCookies: aws.Bool(true), // Required + Prefix: aws.String("string"), // Required + }, + PriceClass: aws.String("PriceClass"), + Restrictions: &cloudfront.Restrictions{ + GeoRestriction: &cloudfront.GeoRestriction{ // Required + Quantity: aws.Int64(1), // Required + RestrictionType: aws.String("GeoRestrictionType"), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + ViewerCertificate: &cloudfront.ViewerCertificate{ + CloudFrontDefaultCertificate: aws.Bool(true), + IAMCertificateId: aws.String("string"), + MinimumProtocolVersion: aws.String("MinimumProtocolVersion"), + SSLSupportMethod: aws.String("SSLSupportMethod"), + }, + }, +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restxml/build_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restxml/build_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restxml/build_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restxml/build_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,3548 @@ +package restxml_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restxml" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/signer/v4" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String + +func init() { + protocol.RandReader = &awstesting.ZeroReader{} +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService1ProtocolTest client from just a session. +// svc := inputservice1protocoltest.New(mySession) +// +// // Create a InputService1ProtocolTest client with additional configuration +// svc := inputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService1ProtocolTest { + c := p.ClientConfig("inputservice1protocoltest", cfgs...) + return newInputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService1ProtocolTest { + svc := &InputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService1TestCaseOperation1 = "OperationName" + +// InputService1TestCaseOperation1Request generates a request for the InputService1TestCaseOperation1 operation. +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputShape) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService1TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService1TestShapeInputService1TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) { + req, out := c.InputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService1TestCaseOperation2 = "OperationName" + +// InputService1TestCaseOperation2Request generates a request for the InputService1TestCaseOperation2 operation. +func (c *InputService1ProtocolTest) InputService1TestCaseOperation2Request(input *InputService1TestShapeInputShape) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation2, + HTTPMethod: "PUT", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService1TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService1TestShapeInputService1TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation2(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation2Output, error) { + req, out := c.InputService1TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +const opInputService1TestCaseOperation3 = "OperationName" + +// InputService1TestCaseOperation3Request generates a request for the InputService1TestCaseOperation3 operation. +func (c *InputService1ProtocolTest) InputService1TestCaseOperation3Request(input *InputService1TestShapeInputService1TestCaseOperation3Input) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation3Output) { + op := &request.Operation{ + Name: opInputService1TestCaseOperation3, + HTTPMethod: "GET", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService1TestShapeInputService1TestCaseOperation3Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService1TestShapeInputService1TestCaseOperation3Output{} + req.Data = output + return +} + +func (c *InputService1ProtocolTest) InputService1TestCaseOperation3(input *InputService1TestShapeInputService1TestCaseOperation3Input) (*InputService1TestShapeInputService1TestCaseOperation3Output, error) { + req, out := c.InputService1TestCaseOperation3Request(input) + err := req.Send() + return out, err +} + +type InputService1TestShapeInputService1TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService1TestShapeInputService1TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService1TestShapeInputService1TestCaseOperation3Input struct { + _ struct{} `type:"structure"` +} + +type InputService1TestShapeInputService1TestCaseOperation3Output struct { + _ struct{} `type:"structure"` +} + +type InputService1TestShapeInputShape struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + Description *string `type:"string"` + + Name *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService2ProtocolTest client from just a session. +// svc := inputservice2protocoltest.New(mySession) +// +// // Create a InputService2ProtocolTest client with additional configuration +// svc := inputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService2ProtocolTest { + c := p.ClientConfig("inputservice2protocoltest", cfgs...) + return newInputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService2ProtocolTest { + svc := &InputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService2TestCaseOperation1 = "OperationName" + +// InputService2TestCaseOperation1Request generates a request for the InputService2TestCaseOperation1 operation. +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputService2TestCaseOperation1Input) (req *request.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService2TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService2TestShapeInputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService2TestShapeInputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputService2TestCaseOperation1Input) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) { + req, out := c.InputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService2TestShapeInputService2TestCaseOperation1Input struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + First *bool `type:"boolean"` + + Fourth *int64 `type:"integer"` + + Second *bool `type:"boolean"` + + Third *float64 `type:"float"` +} + +type InputService2TestShapeInputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService3ProtocolTest client from just a session. +// svc := inputservice3protocoltest.New(mySession) +// +// // Create a InputService3ProtocolTest client with additional configuration +// svc := inputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService3ProtocolTest { + c := p.ClientConfig("inputservice3protocoltest", cfgs...) + return newInputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService3ProtocolTest { + svc := &InputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService3TestCaseOperation1 = "OperationName" + +// InputService3TestCaseOperation1Request generates a request for the InputService3TestCaseOperation1 operation. +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputShape) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService3TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService3TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService3TestShapeInputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) { + req, out := c.InputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService3TestCaseOperation2 = "OperationName" + +// InputService3TestCaseOperation2Request generates a request for the InputService3TestCaseOperation2 operation. +func (c *InputService3ProtocolTest) InputService3TestCaseOperation2Request(input *InputService3TestShapeInputShape) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService3TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService3TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService3TestShapeInputService3TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService3ProtocolTest) InputService3TestCaseOperation2(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation2Output, error) { + req, out := c.InputService3TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService3TestShapeInputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService3TestShapeInputService3TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService3TestShapeInputShape struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + Description *string `type:"string"` + + SubStructure *InputService3TestShapeSubStructure `type:"structure"` +} + +type InputService3TestShapeSubStructure struct { + _ struct{} `type:"structure"` + + Bar *string `type:"string"` + + Foo *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService4ProtocolTest client from just a session. +// svc := inputservice4protocoltest.New(mySession) +// +// // Create a InputService4ProtocolTest client with additional configuration +// svc := inputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService4ProtocolTest { + c := p.ClientConfig("inputservice4protocoltest", cfgs...) + return newInputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService4ProtocolTest { + svc := &InputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService4TestCaseOperation1 = "OperationName" + +// InputService4TestCaseOperation1Request generates a request for the InputService4TestCaseOperation1 operation. +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputService4TestCaseOperation1Input) (req *request.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService4TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService4TestShapeInputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService4TestShapeInputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputService4TestCaseOperation1Input) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) { + req, out := c.InputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService4TestShapeInputService4TestCaseOperation1Input struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + Description *string `type:"string"` + + SubStructure *InputService4TestShapeSubStructure `type:"structure"` +} + +type InputService4TestShapeInputService4TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService4TestShapeSubStructure struct { + _ struct{} `type:"structure"` + + Bar *string `type:"string"` + + Foo *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService5ProtocolTest client from just a session. +// svc := inputservice5protocoltest.New(mySession) +// +// // Create a InputService5ProtocolTest client with additional configuration +// svc := inputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService5ProtocolTest { + c := p.ClientConfig("inputservice5protocoltest", cfgs...) + return newInputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService5ProtocolTest { + svc := &InputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService5TestCaseOperation1 = "OperationName" + +// InputService5TestCaseOperation1Request generates a request for the InputService5TestCaseOperation1 operation. +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputService5TestCaseOperation1Input) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService5TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService5TestShapeInputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService5TestShapeInputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputService5TestCaseOperation1Input) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) { + req, out := c.InputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService5TestShapeInputService5TestCaseOperation1Input struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + ListParam []*string `type:"list"` +} + +type InputService5TestShapeInputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService6ProtocolTest client from just a session. +// svc := inputservice6protocoltest.New(mySession) +// +// // Create a InputService6ProtocolTest client with additional configuration +// svc := inputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService6ProtocolTest { + c := p.ClientConfig("inputservice6protocoltest", cfgs...) + return newInputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService6ProtocolTest { + svc := &InputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService6TestCaseOperation1 = "OperationName" + +// InputService6TestCaseOperation1Request generates a request for the InputService6TestCaseOperation1 operation. +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputService6TestCaseOperation1Input) (req *request.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService6TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService6TestShapeInputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService6TestShapeInputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputService6TestCaseOperation1Input) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) { + req, out := c.InputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService6TestShapeInputService6TestCaseOperation1Input struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + ListParam []*string `locationName:"AlternateName" locationNameList:"NotMember" type:"list"` +} + +type InputService6TestShapeInputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService7ProtocolTest client from just a session. +// svc := inputservice7protocoltest.New(mySession) +// +// // Create a InputService7ProtocolTest client with additional configuration +// svc := inputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService7ProtocolTest { + c := p.ClientConfig("inputservice7protocoltest", cfgs...) + return newInputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService7ProtocolTest { + svc := &InputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService7TestCaseOperation1 = "OperationName" + +// InputService7TestCaseOperation1Request generates a request for the InputService7TestCaseOperation1 operation. +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputService7TestCaseOperation1Input) (req *request.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService7TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService7TestShapeInputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService7TestShapeInputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputService7TestCaseOperation1Input) (*InputService7TestShapeInputService7TestCaseOperation1Output, error) { + req, out := c.InputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService7TestShapeInputService7TestCaseOperation1Input struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + ListParam []*string `type:"list" flattened:"true"` +} + +type InputService7TestShapeInputService7TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService8ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService8ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService8ProtocolTest client from just a session. +// svc := inputservice8protocoltest.New(mySession) +// +// // Create a InputService8ProtocolTest client with additional configuration +// svc := inputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService8ProtocolTest { + c := p.ClientConfig("inputservice8protocoltest", cfgs...) + return newInputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService8ProtocolTest { + svc := &InputService8ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice8protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService8TestCaseOperation1 = "OperationName" + +// InputService8TestCaseOperation1Request generates a request for the InputService8TestCaseOperation1 operation. +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1Request(input *InputService8TestShapeInputService8TestCaseOperation1Input) (req *request.Request, output *InputService8TestShapeInputService8TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService8TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService8TestShapeInputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService8TestShapeInputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService8ProtocolTest) InputService8TestCaseOperation1(input *InputService8TestShapeInputService8TestCaseOperation1Input) (*InputService8TestShapeInputService8TestCaseOperation1Output, error) { + req, out := c.InputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService8TestShapeInputService8TestCaseOperation1Input struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + ListParam []*string `locationName:"item" type:"list" flattened:"true"` +} + +type InputService8TestShapeInputService8TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService9ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService9ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService9ProtocolTest client from just a session. +// svc := inputservice9protocoltest.New(mySession) +// +// // Create a InputService9ProtocolTest client with additional configuration +// svc := inputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService9ProtocolTest { + c := p.ClientConfig("inputservice9protocoltest", cfgs...) + return newInputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService9ProtocolTest { + svc := &InputService9ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice9protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService9ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService9TestCaseOperation1 = "OperationName" + +// InputService9TestCaseOperation1Request generates a request for the InputService9TestCaseOperation1 operation. +func (c *InputService9ProtocolTest) InputService9TestCaseOperation1Request(input *InputService9TestShapeInputService9TestCaseOperation1Input) (req *request.Request, output *InputService9TestShapeInputService9TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService9TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService9TestShapeInputService9TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService9TestShapeInputService9TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService9ProtocolTest) InputService9TestCaseOperation1(input *InputService9TestShapeInputService9TestCaseOperation1Input) (*InputService9TestShapeInputService9TestCaseOperation1Output, error) { + req, out := c.InputService9TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService9TestShapeInputService9TestCaseOperation1Input struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + ListParam []*InputService9TestShapeSingleFieldStruct `locationName:"item" type:"list" flattened:"true"` +} + +type InputService9TestShapeInputService9TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService9TestShapeSingleFieldStruct struct { + _ struct{} `type:"structure"` + + Element *string `locationName:"value" type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService10ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService10ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService10ProtocolTest client from just a session. +// svc := inputservice10protocoltest.New(mySession) +// +// // Create a InputService10ProtocolTest client with additional configuration +// svc := inputservice10protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService10ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService10ProtocolTest { + c := p.ClientConfig("inputservice10protocoltest", cfgs...) + return newInputService10ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService10ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService10ProtocolTest { + svc := &InputService10ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice10protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService10ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService10ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService10TestCaseOperation1 = "OperationName" + +// InputService10TestCaseOperation1Request generates a request for the InputService10TestCaseOperation1 operation. +func (c *InputService10ProtocolTest) InputService10TestCaseOperation1Request(input *InputService10TestShapeInputService10TestCaseOperation1Input) (req *request.Request, output *InputService10TestShapeInputService10TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService10TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/2014-01-01/hostedzone", + } + + if input == nil { + input = &InputService10TestShapeInputService10TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService10TestShapeInputService10TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService10ProtocolTest) InputService10TestCaseOperation1(input *InputService10TestShapeInputService10TestCaseOperation1Input) (*InputService10TestShapeInputService10TestCaseOperation1Output, error) { + req, out := c.InputService10TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService10TestShapeInputService10TestCaseOperation1Input struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + StructureParam *InputService10TestShapeStructureShape `type:"structure"` +} + +type InputService10TestShapeInputService10TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService10TestShapeStructureShape struct { + _ struct{} `type:"structure"` + + B []byte `locationName:"b" type:"blob"` + + T *time.Time `locationName:"t" type:"timestamp" timestampFormat:"iso8601"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService11ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService11ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService11ProtocolTest client from just a session. +// svc := inputservice11protocoltest.New(mySession) +// +// // Create a InputService11ProtocolTest client with additional configuration +// svc := inputservice11protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService11ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService11ProtocolTest { + c := p.ClientConfig("inputservice11protocoltest", cfgs...) + return newInputService11ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService11ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService11ProtocolTest { + svc := &InputService11ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice11protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService11ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService11TestCaseOperation1 = "OperationName" + +// InputService11TestCaseOperation1Request generates a request for the InputService11TestCaseOperation1 operation. +func (c *InputService11ProtocolTest) InputService11TestCaseOperation1Request(input *InputService11TestShapeInputService11TestCaseOperation1Input) (req *request.Request, output *InputService11TestShapeInputService11TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService11TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService11TestShapeInputService11TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService11TestShapeInputService11TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService11ProtocolTest) InputService11TestCaseOperation1(input *InputService11TestShapeInputService11TestCaseOperation1Input) (*InputService11TestShapeInputService11TestCaseOperation1Output, error) { + req, out := c.InputService11TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService11TestShapeInputService11TestCaseOperation1Input struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + Foo map[string]*string `location:"headers" locationName:"x-foo-" type:"map"` +} + +type InputService11TestShapeInputService11TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService12ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService12ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService12ProtocolTest client from just a session. +// svc := inputservice12protocoltest.New(mySession) +// +// // Create a InputService12ProtocolTest client with additional configuration +// svc := inputservice12protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService12ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService12ProtocolTest { + c := p.ClientConfig("inputservice12protocoltest", cfgs...) + return newInputService12ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService12ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService12ProtocolTest { + svc := &InputService12ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice12protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService12ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService12ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService12TestCaseOperation1 = "OperationName" + +// InputService12TestCaseOperation1Request generates a request for the InputService12TestCaseOperation1 operation. +func (c *InputService12ProtocolTest) InputService12TestCaseOperation1Request(input *InputService12TestShapeInputService12TestCaseOperation1Input) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService12TestCaseOperation1, + HTTPMethod: "GET", + HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", + } + + if input == nil { + input = &InputService12TestShapeInputService12TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService12TestShapeInputService12TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService12ProtocolTest) InputService12TestCaseOperation1(input *InputService12TestShapeInputService12TestCaseOperation1Input) (*InputService12TestShapeInputService12TestCaseOperation1Output, error) { + req, out := c.InputService12TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService12TestShapeInputService12TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + PipelineId *string `location:"uri" type:"string"` + + QueryDoc map[string]*string `location:"querystring" type:"map"` +} + +type InputService12TestShapeInputService12TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService13ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService13ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService13ProtocolTest client from just a session. +// svc := inputservice13protocoltest.New(mySession) +// +// // Create a InputService13ProtocolTest client with additional configuration +// svc := inputservice13protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService13ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService13ProtocolTest { + c := p.ClientConfig("inputservice13protocoltest", cfgs...) + return newInputService13ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService13ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService13ProtocolTest { + svc := &InputService13ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice13protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService13ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService13ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService13TestCaseOperation1 = "OperationName" + +// InputService13TestCaseOperation1Request generates a request for the InputService13TestCaseOperation1 operation. +func (c *InputService13ProtocolTest) InputService13TestCaseOperation1Request(input *InputService13TestShapeInputService13TestCaseOperation1Input) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService13TestCaseOperation1, + HTTPMethod: "GET", + HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", + } + + if input == nil { + input = &InputService13TestShapeInputService13TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService13TestShapeInputService13TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService13ProtocolTest) InputService13TestCaseOperation1(input *InputService13TestShapeInputService13TestCaseOperation1Input) (*InputService13TestShapeInputService13TestCaseOperation1Output, error) { + req, out := c.InputService13TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService13TestShapeInputService13TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + PipelineId *string `location:"uri" type:"string"` + + QueryDoc map[string][]*string `location:"querystring" type:"map"` +} + +type InputService13TestShapeInputService13TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService14ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService14ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService14ProtocolTest client from just a session. +// svc := inputservice14protocoltest.New(mySession) +// +// // Create a InputService14ProtocolTest client with additional configuration +// svc := inputservice14protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService14ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService14ProtocolTest { + c := p.ClientConfig("inputservice14protocoltest", cfgs...) + return newInputService14ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService14ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService14ProtocolTest { + svc := &InputService14ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice14protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService14ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService14ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService14TestCaseOperation1 = "OperationName" + +// InputService14TestCaseOperation1Request generates a request for the InputService14TestCaseOperation1 operation. +func (c *InputService14ProtocolTest) InputService14TestCaseOperation1Request(input *InputService14TestShapeInputService14TestCaseOperation1Input) (req *request.Request, output *InputService14TestShapeInputService14TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService14TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService14TestShapeInputService14TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService14TestShapeInputService14TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService14ProtocolTest) InputService14TestCaseOperation1(input *InputService14TestShapeInputService14TestCaseOperation1Input) (*InputService14TestShapeInputService14TestCaseOperation1Output, error) { + req, out := c.InputService14TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService14TestShapeInputService14TestCaseOperation1Input struct { + _ struct{} `type:"structure" payload:"Foo"` + + Foo *string `locationName:"foo" type:"string"` +} + +type InputService14TestShapeInputService14TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService15ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService15ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService15ProtocolTest client from just a session. +// svc := inputservice15protocoltest.New(mySession) +// +// // Create a InputService15ProtocolTest client with additional configuration +// svc := inputservice15protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService15ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService15ProtocolTest { + c := p.ClientConfig("inputservice15protocoltest", cfgs...) + return newInputService15ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService15ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService15ProtocolTest { + svc := &InputService15ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice15protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService15ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService15ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService15TestCaseOperation1 = "OperationName" + +// InputService15TestCaseOperation1Request generates a request for the InputService15TestCaseOperation1 operation. +func (c *InputService15ProtocolTest) InputService15TestCaseOperation1Request(input *InputService15TestShapeInputShape) (req *request.Request, output *InputService15TestShapeInputService15TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService15TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService15TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService15TestShapeInputService15TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService15ProtocolTest) InputService15TestCaseOperation1(input *InputService15TestShapeInputShape) (*InputService15TestShapeInputService15TestCaseOperation1Output, error) { + req, out := c.InputService15TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService15TestCaseOperation2 = "OperationName" + +// InputService15TestCaseOperation2Request generates a request for the InputService15TestCaseOperation2 operation. +func (c *InputService15ProtocolTest) InputService15TestCaseOperation2Request(input *InputService15TestShapeInputShape) (req *request.Request, output *InputService15TestShapeInputService15TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService15TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService15TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService15TestShapeInputService15TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService15ProtocolTest) InputService15TestCaseOperation2(input *InputService15TestShapeInputShape) (*InputService15TestShapeInputService15TestCaseOperation2Output, error) { + req, out := c.InputService15TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService15TestShapeInputService15TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService15TestShapeInputService15TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService15TestShapeInputShape struct { + _ struct{} `type:"structure" payload:"Foo"` + + Foo []byte `locationName:"foo" type:"blob"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService16ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService16ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService16ProtocolTest client from just a session. +// svc := inputservice16protocoltest.New(mySession) +// +// // Create a InputService16ProtocolTest client with additional configuration +// svc := inputservice16protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService16ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService16ProtocolTest { + c := p.ClientConfig("inputservice16protocoltest", cfgs...) + return newInputService16ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService16ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService16ProtocolTest { + svc := &InputService16ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice16protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService16ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService16ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService16TestCaseOperation1 = "OperationName" + +// InputService16TestCaseOperation1Request generates a request for the InputService16TestCaseOperation1 operation. +func (c *InputService16ProtocolTest) InputService16TestCaseOperation1Request(input *InputService16TestShapeInputShape) (req *request.Request, output *InputService16TestShapeInputService16TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService16TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService16TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService16TestShapeInputService16TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService16ProtocolTest) InputService16TestCaseOperation1(input *InputService16TestShapeInputShape) (*InputService16TestShapeInputService16TestCaseOperation1Output, error) { + req, out := c.InputService16TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService16TestCaseOperation2 = "OperationName" + +// InputService16TestCaseOperation2Request generates a request for the InputService16TestCaseOperation2 operation. +func (c *InputService16ProtocolTest) InputService16TestCaseOperation2Request(input *InputService16TestShapeInputShape) (req *request.Request, output *InputService16TestShapeInputService16TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService16TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService16TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService16TestShapeInputService16TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService16ProtocolTest) InputService16TestCaseOperation2(input *InputService16TestShapeInputShape) (*InputService16TestShapeInputService16TestCaseOperation2Output, error) { + req, out := c.InputService16TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +const opInputService16TestCaseOperation3 = "OperationName" + +// InputService16TestCaseOperation3Request generates a request for the InputService16TestCaseOperation3 operation. +func (c *InputService16ProtocolTest) InputService16TestCaseOperation3Request(input *InputService16TestShapeInputShape) (req *request.Request, output *InputService16TestShapeInputService16TestCaseOperation3Output) { + op := &request.Operation{ + Name: opInputService16TestCaseOperation3, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService16TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService16TestShapeInputService16TestCaseOperation3Output{} + req.Data = output + return +} + +func (c *InputService16ProtocolTest) InputService16TestCaseOperation3(input *InputService16TestShapeInputShape) (*InputService16TestShapeInputService16TestCaseOperation3Output, error) { + req, out := c.InputService16TestCaseOperation3Request(input) + err := req.Send() + return out, err +} + +const opInputService16TestCaseOperation4 = "OperationName" + +// InputService16TestCaseOperation4Request generates a request for the InputService16TestCaseOperation4 operation. +func (c *InputService16ProtocolTest) InputService16TestCaseOperation4Request(input *InputService16TestShapeInputShape) (req *request.Request, output *InputService16TestShapeInputService16TestCaseOperation4Output) { + op := &request.Operation{ + Name: opInputService16TestCaseOperation4, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService16TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService16TestShapeInputService16TestCaseOperation4Output{} + req.Data = output + return +} + +func (c *InputService16ProtocolTest) InputService16TestCaseOperation4(input *InputService16TestShapeInputShape) (*InputService16TestShapeInputService16TestCaseOperation4Output, error) { + req, out := c.InputService16TestCaseOperation4Request(input) + err := req.Send() + return out, err +} + +type InputService16TestShapeFooShape struct { + _ struct{} `locationName:"foo" type:"structure"` + + Baz *string `locationName:"baz" type:"string"` +} + +type InputService16TestShapeInputService16TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService16TestShapeInputService16TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService16TestShapeInputService16TestCaseOperation3Output struct { + _ struct{} `type:"structure"` +} + +type InputService16TestShapeInputService16TestCaseOperation4Output struct { + _ struct{} `type:"structure"` +} + +type InputService16TestShapeInputShape struct { + _ struct{} `type:"structure" payload:"Foo"` + + Foo *InputService16TestShapeFooShape `locationName:"foo" type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService17ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService17ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService17ProtocolTest client from just a session. +// svc := inputservice17protocoltest.New(mySession) +// +// // Create a InputService17ProtocolTest client with additional configuration +// svc := inputservice17protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService17ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService17ProtocolTest { + c := p.ClientConfig("inputservice17protocoltest", cfgs...) + return newInputService17ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService17ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService17ProtocolTest { + svc := &InputService17ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice17protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService17ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService17ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService17TestCaseOperation1 = "OperationName" + +// InputService17TestCaseOperation1Request generates a request for the InputService17TestCaseOperation1 operation. +func (c *InputService17ProtocolTest) InputService17TestCaseOperation1Request(input *InputService17TestShapeInputService17TestCaseOperation1Input) (req *request.Request, output *InputService17TestShapeInputService17TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService17TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InputService17TestShapeInputService17TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService17TestShapeInputService17TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService17ProtocolTest) InputService17TestCaseOperation1(input *InputService17TestShapeInputService17TestCaseOperation1Input) (*InputService17TestShapeInputService17TestCaseOperation1Output, error) { + req, out := c.InputService17TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService17TestShapeGrant struct { + _ struct{} `locationName:"Grant" type:"structure"` + + Grantee *InputService17TestShapeGrantee `type:"structure"` +} + +type InputService17TestShapeGrantee struct { + _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + EmailAddress *string `type:"string"` + + Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true"` +} + +type InputService17TestShapeInputService17TestCaseOperation1Input struct { + _ struct{} `type:"structure" payload:"Grant"` + + Grant *InputService17TestShapeGrant `locationName:"Grant" type:"structure"` +} + +type InputService17TestShapeInputService17TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService18ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService18ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService18ProtocolTest client from just a session. +// svc := inputservice18protocoltest.New(mySession) +// +// // Create a InputService18ProtocolTest client with additional configuration +// svc := inputservice18protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService18ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService18ProtocolTest { + c := p.ClientConfig("inputservice18protocoltest", cfgs...) + return newInputService18ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService18ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService18ProtocolTest { + svc := &InputService18ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice18protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService18ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService18ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService18TestCaseOperation1 = "OperationName" + +// InputService18TestCaseOperation1Request generates a request for the InputService18TestCaseOperation1 operation. +func (c *InputService18ProtocolTest) InputService18TestCaseOperation1Request(input *InputService18TestShapeInputService18TestCaseOperation1Input) (req *request.Request, output *InputService18TestShapeInputService18TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService18TestCaseOperation1, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &InputService18TestShapeInputService18TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService18TestShapeInputService18TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService18ProtocolTest) InputService18TestCaseOperation1(input *InputService18TestShapeInputService18TestCaseOperation1Input) (*InputService18TestShapeInputService18TestCaseOperation1Output, error) { + req, out := c.InputService18TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService18TestShapeInputService18TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" type:"string"` + + Key *string `location:"uri" type:"string"` +} + +type InputService18TestShapeInputService18TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService19ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService19ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService19ProtocolTest client from just a session. +// svc := inputservice19protocoltest.New(mySession) +// +// // Create a InputService19ProtocolTest client with additional configuration +// svc := inputservice19protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService19ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService19ProtocolTest { + c := p.ClientConfig("inputservice19protocoltest", cfgs...) + return newInputService19ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService19ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService19ProtocolTest { + svc := &InputService19ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice19protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService19ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService19ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService19TestCaseOperation1 = "OperationName" + +// InputService19TestCaseOperation1Request generates a request for the InputService19TestCaseOperation1 operation. +func (c *InputService19ProtocolTest) InputService19TestCaseOperation1Request(input *InputService19TestShapeInputShape) (req *request.Request, output *InputService19TestShapeInputService19TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService19TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService19TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService19TestShapeInputService19TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService19ProtocolTest) InputService19TestCaseOperation1(input *InputService19TestShapeInputShape) (*InputService19TestShapeInputService19TestCaseOperation1Output, error) { + req, out := c.InputService19TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService19TestCaseOperation2 = "OperationName" + +// InputService19TestCaseOperation2Request generates a request for the InputService19TestCaseOperation2 operation. +func (c *InputService19ProtocolTest) InputService19TestCaseOperation2Request(input *InputService19TestShapeInputShape) (req *request.Request, output *InputService19TestShapeInputService19TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService19TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/path?abc=mno", + } + + if input == nil { + input = &InputService19TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService19TestShapeInputService19TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService19ProtocolTest) InputService19TestCaseOperation2(input *InputService19TestShapeInputShape) (*InputService19TestShapeInputService19TestCaseOperation2Output, error) { + req, out := c.InputService19TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService19TestShapeInputService19TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService19TestShapeInputService19TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService19TestShapeInputShape struct { + _ struct{} `type:"structure"` + + Foo *string `location:"querystring" locationName:"param-name" type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService20ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService20ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService20ProtocolTest client from just a session. +// svc := inputservice20protocoltest.New(mySession) +// +// // Create a InputService20ProtocolTest client with additional configuration +// svc := inputservice20protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService20ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService20ProtocolTest { + c := p.ClientConfig("inputservice20protocoltest", cfgs...) + return newInputService20ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService20ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService20ProtocolTest { + svc := &InputService20ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice20protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService20ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService20ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService20TestCaseOperation1 = "OperationName" + +// InputService20TestCaseOperation1Request generates a request for the InputService20TestCaseOperation1 operation. +func (c *InputService20ProtocolTest) InputService20TestCaseOperation1Request(input *InputService20TestShapeInputShape) (req *request.Request, output *InputService20TestShapeInputService20TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService20TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService20TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService20TestShapeInputService20TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService20ProtocolTest) InputService20TestCaseOperation1(input *InputService20TestShapeInputShape) (*InputService20TestShapeInputService20TestCaseOperation1Output, error) { + req, out := c.InputService20TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService20TestCaseOperation2 = "OperationName" + +// InputService20TestCaseOperation2Request generates a request for the InputService20TestCaseOperation2 operation. +func (c *InputService20ProtocolTest) InputService20TestCaseOperation2Request(input *InputService20TestShapeInputShape) (req *request.Request, output *InputService20TestShapeInputService20TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService20TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService20TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService20TestShapeInputService20TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService20ProtocolTest) InputService20TestCaseOperation2(input *InputService20TestShapeInputShape) (*InputService20TestShapeInputService20TestCaseOperation2Output, error) { + req, out := c.InputService20TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +const opInputService20TestCaseOperation3 = "OperationName" + +// InputService20TestCaseOperation3Request generates a request for the InputService20TestCaseOperation3 operation. +func (c *InputService20ProtocolTest) InputService20TestCaseOperation3Request(input *InputService20TestShapeInputShape) (req *request.Request, output *InputService20TestShapeInputService20TestCaseOperation3Output) { + op := &request.Operation{ + Name: opInputService20TestCaseOperation3, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService20TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService20TestShapeInputService20TestCaseOperation3Output{} + req.Data = output + return +} + +func (c *InputService20ProtocolTest) InputService20TestCaseOperation3(input *InputService20TestShapeInputShape) (*InputService20TestShapeInputService20TestCaseOperation3Output, error) { + req, out := c.InputService20TestCaseOperation3Request(input) + err := req.Send() + return out, err +} + +const opInputService20TestCaseOperation4 = "OperationName" + +// InputService20TestCaseOperation4Request generates a request for the InputService20TestCaseOperation4 operation. +func (c *InputService20ProtocolTest) InputService20TestCaseOperation4Request(input *InputService20TestShapeInputShape) (req *request.Request, output *InputService20TestShapeInputService20TestCaseOperation4Output) { + op := &request.Operation{ + Name: opInputService20TestCaseOperation4, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService20TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService20TestShapeInputService20TestCaseOperation4Output{} + req.Data = output + return +} + +func (c *InputService20ProtocolTest) InputService20TestCaseOperation4(input *InputService20TestShapeInputShape) (*InputService20TestShapeInputService20TestCaseOperation4Output, error) { + req, out := c.InputService20TestCaseOperation4Request(input) + err := req.Send() + return out, err +} + +const opInputService20TestCaseOperation5 = "OperationName" + +// InputService20TestCaseOperation5Request generates a request for the InputService20TestCaseOperation5 operation. +func (c *InputService20ProtocolTest) InputService20TestCaseOperation5Request(input *InputService20TestShapeInputShape) (req *request.Request, output *InputService20TestShapeInputService20TestCaseOperation5Output) { + op := &request.Operation{ + Name: opInputService20TestCaseOperation5, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService20TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService20TestShapeInputService20TestCaseOperation5Output{} + req.Data = output + return +} + +func (c *InputService20ProtocolTest) InputService20TestCaseOperation5(input *InputService20TestShapeInputShape) (*InputService20TestShapeInputService20TestCaseOperation5Output, error) { + req, out := c.InputService20TestCaseOperation5Request(input) + err := req.Send() + return out, err +} + +const opInputService20TestCaseOperation6 = "OperationName" + +// InputService20TestCaseOperation6Request generates a request for the InputService20TestCaseOperation6 operation. +func (c *InputService20ProtocolTest) InputService20TestCaseOperation6Request(input *InputService20TestShapeInputShape) (req *request.Request, output *InputService20TestShapeInputService20TestCaseOperation6Output) { + op := &request.Operation{ + Name: opInputService20TestCaseOperation6, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService20TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService20TestShapeInputService20TestCaseOperation6Output{} + req.Data = output + return +} + +func (c *InputService20ProtocolTest) InputService20TestCaseOperation6(input *InputService20TestShapeInputShape) (*InputService20TestShapeInputService20TestCaseOperation6Output, error) { + req, out := c.InputService20TestCaseOperation6Request(input) + err := req.Send() + return out, err +} + +type InputService20TestShapeInputService20TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService20TestShapeInputService20TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService20TestShapeInputService20TestCaseOperation3Output struct { + _ struct{} `type:"structure"` +} + +type InputService20TestShapeInputService20TestCaseOperation4Output struct { + _ struct{} `type:"structure"` +} + +type InputService20TestShapeInputService20TestCaseOperation5Output struct { + _ struct{} `type:"structure"` +} + +type InputService20TestShapeInputService20TestCaseOperation6Output struct { + _ struct{} `type:"structure"` +} + +type InputService20TestShapeInputShape struct { + _ struct{} `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` + + RecursiveStruct *InputService20TestShapeRecursiveStructType `type:"structure"` +} + +type InputService20TestShapeRecursiveStructType struct { + _ struct{} `type:"structure"` + + NoRecurse *string `type:"string"` + + RecursiveList []*InputService20TestShapeRecursiveStructType `type:"list"` + + RecursiveMap map[string]*InputService20TestShapeRecursiveStructType `type:"map"` + + RecursiveStruct *InputService20TestShapeRecursiveStructType `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService21ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService21ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService21ProtocolTest client from just a session. +// svc := inputservice21protocoltest.New(mySession) +// +// // Create a InputService21ProtocolTest client with additional configuration +// svc := inputservice21protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService21ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService21ProtocolTest { + c := p.ClientConfig("inputservice21protocoltest", cfgs...) + return newInputService21ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService21ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService21ProtocolTest { + svc := &InputService21ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice21protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService21ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService21ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService21TestCaseOperation1 = "OperationName" + +// InputService21TestCaseOperation1Request generates a request for the InputService21TestCaseOperation1 operation. +func (c *InputService21ProtocolTest) InputService21TestCaseOperation1Request(input *InputService21TestShapeInputService21TestCaseOperation1Input) (req *request.Request, output *InputService21TestShapeInputService21TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService21TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService21TestShapeInputService21TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService21TestShapeInputService21TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService21ProtocolTest) InputService21TestCaseOperation1(input *InputService21TestShapeInputService21TestCaseOperation1Input) (*InputService21TestShapeInputService21TestCaseOperation1Output, error) { + req, out := c.InputService21TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type InputService21TestShapeInputService21TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + TimeArgInHeader *time.Time `location:"header" locationName:"x-amz-timearg" type:"timestamp" timestampFormat:"rfc822"` +} + +type InputService21TestShapeInputService21TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type InputService22ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService22ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService22ProtocolTest client from just a session. +// svc := inputservice22protocoltest.New(mySession) +// +// // Create a InputService22ProtocolTest client with additional configuration +// svc := inputservice22protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService22ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService22ProtocolTest { + c := p.ClientConfig("inputservice22protocoltest", cfgs...) + return newInputService22ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService22ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *InputService22ProtocolTest { + svc := &InputService22ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice22protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService22ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService22ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService22TestCaseOperation1 = "OperationName" + +// InputService22TestCaseOperation1Request generates a request for the InputService22TestCaseOperation1 operation. +func (c *InputService22ProtocolTest) InputService22TestCaseOperation1Request(input *InputService22TestShapeInputShape) (req *request.Request, output *InputService22TestShapeInputService22TestCaseOperation1Output) { + op := &request.Operation{ + Name: opInputService22TestCaseOperation1, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService22TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService22TestShapeInputService22TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *InputService22ProtocolTest) InputService22TestCaseOperation1(input *InputService22TestShapeInputShape) (*InputService22TestShapeInputService22TestCaseOperation1Output, error) { + req, out := c.InputService22TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opInputService22TestCaseOperation2 = "OperationName" + +// InputService22TestCaseOperation2Request generates a request for the InputService22TestCaseOperation2 operation. +func (c *InputService22ProtocolTest) InputService22TestCaseOperation2Request(input *InputService22TestShapeInputShape) (req *request.Request, output *InputService22TestShapeInputService22TestCaseOperation2Output) { + op := &request.Operation{ + Name: opInputService22TestCaseOperation2, + HTTPMethod: "POST", + HTTPPath: "/path", + } + + if input == nil { + input = &InputService22TestShapeInputShape{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &InputService22TestShapeInputService22TestCaseOperation2Output{} + req.Data = output + return +} + +func (c *InputService22ProtocolTest) InputService22TestCaseOperation2(input *InputService22TestShapeInputShape) (*InputService22TestShapeInputService22TestCaseOperation2Output, error) { + req, out := c.InputService22TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type InputService22TestShapeInputService22TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +type InputService22TestShapeInputService22TestCaseOperation2Output struct { + _ struct{} `type:"structure"` +} + +type InputService22TestShapeInputShape struct { + _ struct{} `type:"structure"` + + Token *string `type:"string" idempotencyToken:"true"` +} + +// +// Tests begin here +// + +func TestInputService1ProtocolTestBasicXMLSerializationCase1(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService1TestShapeInputShape{ + Description: aws.String("bar"), + Name: aws.String("foo"), + } + req, _ := svc.InputService1TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `barfoo`, util.Trim(string(body)), InputService1TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService1ProtocolTestBasicXMLSerializationCase2(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService1TestShapeInputShape{ + Description: aws.String("bar"), + Name: aws.String("foo"), + } + req, _ := svc.InputService1TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `barfoo`, util.Trim(string(body)), InputService1TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService1ProtocolTestBasicXMLSerializationCase3(t *testing.T) { + sess := session.New() + svc := NewInputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService1TestShapeInputService1TestCaseOperation3Input{} + req, _ := svc.InputService1TestCaseOperation3Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService2ProtocolTestSerializeOtherScalarTypesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService2TestShapeInputService2TestCaseOperation1Input{ + First: aws.Bool(true), + Fourth: aws.Int64(3), + Second: aws.Bool(false), + Third: aws.Float64(1.2), + } + req, _ := svc.InputService2TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `true3false1.2`, util.Trim(string(body)), InputService2TestShapeInputService2TestCaseOperation1Input{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService3ProtocolTestNestedStructuresCase1(t *testing.T) { + sess := session.New() + svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService3TestShapeInputShape{ + Description: aws.String("baz"), + SubStructure: &InputService3TestShapeSubStructure{ + Bar: aws.String("b"), + Foo: aws.String("a"), + }, + } + req, _ := svc.InputService3TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `bazba`, util.Trim(string(body)), InputService3TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService3ProtocolTestNestedStructuresCase2(t *testing.T) { + sess := session.New() + svc := NewInputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService3TestShapeInputShape{ + Description: aws.String("baz"), + SubStructure: &InputService3TestShapeSubStructure{ + Foo: aws.String("a"), + }, + } + req, _ := svc.InputService3TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `baza`, util.Trim(string(body)), InputService3TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService4ProtocolTestNestedStructuresCase1(t *testing.T) { + sess := session.New() + svc := NewInputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService4TestShapeInputService4TestCaseOperation1Input{ + Description: aws.String("baz"), + SubStructure: &InputService4TestShapeSubStructure{}, + } + req, _ := svc.InputService4TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `baz`, util.Trim(string(body)), InputService4TestShapeInputService4TestCaseOperation1Input{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService5ProtocolTestNonFlattenedListsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService5TestShapeInputService5TestCaseOperation1Input{ + ListParam: []*string{ + aws.String("one"), + aws.String("two"), + aws.String("three"), + }, + } + req, _ := svc.InputService5TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `onetwothree`, util.Trim(string(body)), InputService5TestShapeInputService5TestCaseOperation1Input{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService6ProtocolTestNonFlattenedListsWithLocationNameCase1(t *testing.T) { + sess := session.New() + svc := NewInputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService6TestShapeInputService6TestCaseOperation1Input{ + ListParam: []*string{ + aws.String("one"), + aws.String("two"), + aws.String("three"), + }, + } + req, _ := svc.InputService6TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `onetwothree`, util.Trim(string(body)), InputService6TestShapeInputService6TestCaseOperation1Input{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService7ProtocolTestFlattenedListsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService7TestShapeInputService7TestCaseOperation1Input{ + ListParam: []*string{ + aws.String("one"), + aws.String("two"), + aws.String("three"), + }, + } + req, _ := svc.InputService7TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `onetwothree`, util.Trim(string(body)), InputService7TestShapeInputService7TestCaseOperation1Input{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService8ProtocolTestFlattenedListsWithLocationNameCase1(t *testing.T) { + sess := session.New() + svc := NewInputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService8TestShapeInputService8TestCaseOperation1Input{ + ListParam: []*string{ + aws.String("one"), + aws.String("two"), + aws.String("three"), + }, + } + req, _ := svc.InputService8TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `onetwothree`, util.Trim(string(body)), InputService8TestShapeInputService8TestCaseOperation1Input{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService9ProtocolTestListOfStructuresCase1(t *testing.T) { + sess := session.New() + svc := NewInputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService9TestShapeInputService9TestCaseOperation1Input{ + ListParam: []*InputService9TestShapeSingleFieldStruct{ + { + Element: aws.String("one"), + }, + { + Element: aws.String("two"), + }, + { + Element: aws.String("three"), + }, + }, + } + req, _ := svc.InputService9TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `onetwothree`, util.Trim(string(body)), InputService9TestShapeInputService9TestCaseOperation1Input{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService10ProtocolTestBlobAndTimestampShapesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService10ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService10TestShapeInputService10TestCaseOperation1Input{ + StructureParam: &InputService10TestShapeStructureShape{ + B: []byte("foo"), + T: aws.Time(time.Unix(1422172800, 0)), + }, + } + req, _ := svc.InputService10TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `Zm9v2015-01-25T08:00:00Z`, util.Trim(string(body)), InputService10TestShapeInputService10TestCaseOperation1Input{}) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String()) + + // assert headers + +} + +func TestInputService11ProtocolTestHeaderMapsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService11ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService11TestShapeInputService11TestCaseOperation1Input{ + Foo: map[string]*string{ + "a": aws.String("b"), + "c": aws.String("d"), + }, + } + req, _ := svc.InputService11TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + assert.Equal(t, "b", r.Header.Get("x-foo-a")) + assert.Equal(t, "d", r.Header.Get("x-foo-c")) + +} + +func TestInputService12ProtocolTestStringToStringMapsInQuerystringCase1(t *testing.T) { + sess := session.New() + svc := NewInputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService12TestShapeInputService12TestCaseOperation1Input{ + PipelineId: aws.String("foo"), + QueryDoc: map[string]*string{ + "bar": aws.String("baz"), + "fizz": aws.String("buzz"), + }, + } + req, _ := svc.InputService12TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?bar=baz&fizz=buzz", r.URL.String()) + + // assert headers + +} + +func TestInputService13ProtocolTestStringToStringListMapsInQuerystringCase1(t *testing.T) { + sess := session.New() + svc := NewInputService13ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService13TestShapeInputService13TestCaseOperation1Input{ + PipelineId: aws.String("id"), + QueryDoc: map[string][]*string{ + "fizz": { + aws.String("buzz"), + aws.String("pop"), + }, + "foo": { + aws.String("bar"), + aws.String("baz"), + }, + }, + } + req, _ := svc.InputService13TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/id?foo=bar&foo=baz&fizz=buzz&fizz=pop", r.URL.String()) + + // assert headers + +} + +func TestInputService14ProtocolTestStringPayloadCase1(t *testing.T) { + sess := session.New() + svc := NewInputService14ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService14TestShapeInputService14TestCaseOperation1Input{ + Foo: aws.String("bar"), + } + req, _ := svc.InputService14TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + assert.Equal(t, `bar`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService15ProtocolTestBlobPayloadCase1(t *testing.T) { + sess := session.New() + svc := NewInputService15ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService15TestShapeInputShape{ + Foo: []byte("bar"), + } + req, _ := svc.InputService15TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + assert.Equal(t, `bar`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService15ProtocolTestBlobPayloadCase2(t *testing.T) { + sess := session.New() + svc := NewInputService15ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService15TestShapeInputShape{} + req, _ := svc.InputService15TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService16ProtocolTestStructurePayloadCase1(t *testing.T) { + sess := session.New() + svc := NewInputService16ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService16TestShapeInputShape{ + Foo: &InputService16TestShapeFooShape{ + Baz: aws.String("bar"), + }, + } + req, _ := svc.InputService16TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `bar`, util.Trim(string(body)), InputService16TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService16ProtocolTestStructurePayloadCase2(t *testing.T) { + sess := session.New() + svc := NewInputService16ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService16TestShapeInputShape{} + req, _ := svc.InputService16TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService16ProtocolTestStructurePayloadCase3(t *testing.T) { + sess := session.New() + svc := NewInputService16ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService16TestShapeInputShape{ + Foo: &InputService16TestShapeFooShape{}, + } + req, _ := svc.InputService16TestCaseOperation3Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, ``, util.Trim(string(body)), InputService16TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService16ProtocolTestStructurePayloadCase4(t *testing.T) { + sess := session.New() + svc := NewInputService16ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService16TestShapeInputShape{} + req, _ := svc.InputService16TestCaseOperation4Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService17ProtocolTestXMLAttributeCase1(t *testing.T) { + sess := session.New() + svc := NewInputService17ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService17TestShapeInputService17TestCaseOperation1Input{ + Grant: &InputService17TestShapeGrant{ + Grantee: &InputService17TestShapeGrantee{ + EmailAddress: aws.String("foo@example.com"), + Type: aws.String("CanonicalUser"), + }, + }, + } + req, _ := svc.InputService17TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `foo@example.com`, util.Trim(string(body)), InputService17TestShapeInputService17TestCaseOperation1Input{}) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService18ProtocolTestGreedyKeysCase1(t *testing.T) { + sess := session.New() + svc := NewInputService18ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService18TestShapeInputService18TestCaseOperation1Input{ + Bucket: aws.String("my/bucket"), + Key: aws.String("testing /123"), + } + req, _ := svc.InputService18TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/my%2Fbucket/testing%20/123", r.URL.String()) + + // assert headers + +} + +func TestInputService19ProtocolTestOmitsNullQueryParamsButSerializesEmptyStringsCase1(t *testing.T) { + sess := session.New() + svc := NewInputService19ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService19TestShapeInputShape{} + req, _ := svc.InputService19TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService19ProtocolTestOmitsNullQueryParamsButSerializesEmptyStringsCase2(t *testing.T) { + sess := session.New() + svc := NewInputService19ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService19TestShapeInputShape{ + Foo: aws.String(""), + } + req, _ := svc.InputService19TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/path?abc=mno¶m-name=", r.URL.String()) + + // assert headers + +} + +func TestInputService20ProtocolTestRecursiveShapesCase1(t *testing.T) { + sess := session.New() + svc := NewInputService20ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService20TestShapeInputShape{ + RecursiveStruct: &InputService20TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + } + req, _ := svc.InputService20TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `foo`, util.Trim(string(body)), InputService20TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService20ProtocolTestRecursiveShapesCase2(t *testing.T) { + sess := session.New() + svc := NewInputService20ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService20TestShapeInputShape{ + RecursiveStruct: &InputService20TestShapeRecursiveStructType{ + RecursiveStruct: &InputService20TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + } + req, _ := svc.InputService20TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `foo`, util.Trim(string(body)), InputService20TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService20ProtocolTestRecursiveShapesCase3(t *testing.T) { + sess := session.New() + svc := NewInputService20ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService20TestShapeInputShape{ + RecursiveStruct: &InputService20TestShapeRecursiveStructType{ + RecursiveStruct: &InputService20TestShapeRecursiveStructType{ + RecursiveStruct: &InputService20TestShapeRecursiveStructType{ + RecursiveStruct: &InputService20TestShapeRecursiveStructType{ + NoRecurse: aws.String("foo"), + }, + }, + }, + }, + } + req, _ := svc.InputService20TestCaseOperation3Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `foo`, util.Trim(string(body)), InputService20TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService20ProtocolTestRecursiveShapesCase4(t *testing.T) { + sess := session.New() + svc := NewInputService20ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService20TestShapeInputShape{ + RecursiveStruct: &InputService20TestShapeRecursiveStructType{ + RecursiveList: []*InputService20TestShapeRecursiveStructType{ + { + NoRecurse: aws.String("foo"), + }, + { + NoRecurse: aws.String("bar"), + }, + }, + }, + } + req, _ := svc.InputService20TestCaseOperation4Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `foobar`, util.Trim(string(body)), InputService20TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService20ProtocolTestRecursiveShapesCase5(t *testing.T) { + sess := session.New() + svc := NewInputService20ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService20TestShapeInputShape{ + RecursiveStruct: &InputService20TestShapeRecursiveStructType{ + RecursiveList: []*InputService20TestShapeRecursiveStructType{ + { + NoRecurse: aws.String("foo"), + }, + { + RecursiveStruct: &InputService20TestShapeRecursiveStructType{ + NoRecurse: aws.String("bar"), + }, + }, + }, + }, + } + req, _ := svc.InputService20TestCaseOperation5Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `foobar`, util.Trim(string(body)), InputService20TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService20ProtocolTestRecursiveShapesCase6(t *testing.T) { + sess := session.New() + svc := NewInputService20ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService20TestShapeInputShape{ + RecursiveStruct: &InputService20TestShapeRecursiveStructType{ + RecursiveMap: map[string]*InputService20TestShapeRecursiveStructType{ + "bar": { + NoRecurse: aws.String("bar"), + }, + "foo": { + NoRecurse: aws.String("foo"), + }, + }, + }, + } + req, _ := svc.InputService20TestCaseOperation6Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `foofoobarbar`, util.Trim(string(body)), InputService20TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService21ProtocolTestTimestampInHeaderCase1(t *testing.T) { + sess := session.New() + svc := NewInputService21ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService21TestShapeInputService21TestCaseOperation1Input{ + TimeArgInHeader: aws.Time(time.Unix(1422172800, 0)), + } + req, _ := svc.InputService21TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + assert.Equal(t, "Sun, 25 Jan 2015 08:00:00 GMT", r.Header.Get("x-amz-timearg")) + +} + +func TestInputService22ProtocolTestIdempotencyTokenAutoFillCase1(t *testing.T) { + sess := session.New() + svc := NewInputService22ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService22TestShapeInputShape{ + Token: aws.String("abc123"), + } + req, _ := svc.InputService22TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `abc123`, util.Trim(string(body)), InputService22TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} + +func TestInputService22ProtocolTestIdempotencyTokenAutoFillCase2(t *testing.T) { + sess := session.New() + svc := NewInputService22ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService22TestShapeInputShape{} + req, _ := svc.InputService22TestCaseOperation2Request(input) + r := req.HTTPRequest + + // build request + restxml.Build(req) + assert.NoError(t, req.Error) + + // assert body + assert.NotNil(t, r.Body) + body := util.SortXML(r.Body) + awstesting.AssertXML(t, `00000000-0000-4000-8000-000000000000`, util.Trim(string(body)), InputService22TestShapeInputShape{}) + + // assert URL + awstesting.AssertURL(t, "https://test/path", r.URL.String()) + + // assert headers + +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,69 @@ +// Package restxml provides RESTful XML serialisation of AWS +// requests and responses. +package restxml + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go + +import ( + "bytes" + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/protocol/rest" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// BuildHandler is a named request handler for building restxml protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.restxml.Build", Fn: Build} + +// UnmarshalHandler is a named request handler for unmarshaling restxml protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restxml.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling restxml protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling restxml protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalError", Fn: UnmarshalError} + +// Build builds a request payload for the REST XML protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + var buf bytes.Buffer + err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to encode rest XML request", err) + return + } + r.SetBufferBody(buf.Bytes()) + } +} + +// Unmarshal unmarshals a payload response for the REST XML protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + defer r.HTTPResponse.Body.Close() + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, "") + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST XML response", err) + return + } + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST XML protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} + +// UnmarshalError unmarshals a response error for the REST XML protocol. +func UnmarshalError(r *request.Request) { + query.UnmarshalError(r) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restxml/unmarshal_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restxml/unmarshal_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restxml/unmarshal_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/restxml/unmarshal_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1502 @@ +package restxml_test + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "net/http" + "net/url" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restxml" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/private/signer/v4" + "github.com/aws/aws-sdk-go/private/util" + "github.com/stretchr/testify/assert" +) + +var _ bytes.Buffer // always import bytes +var _ http.Request +var _ json.Marshaler +var _ time.Time +var _ xmlutil.XMLNode +var _ xml.Attr +var _ = ioutil.Discard +var _ = util.Trim("") +var _ = url.Values{} +var _ = io.EOF +var _ = aws.String + +func init() { + protocol.RandReader = &awstesting.ZeroReader{} +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService1ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService1ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService1ProtocolTest client from just a session. +// svc := outputservice1protocoltest.New(mySession) +// +// // Create a OutputService1ProtocolTest client with additional configuration +// svc := outputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService1ProtocolTest { + c := p.ClientConfig("outputservice1protocoltest", cfgs...) + return newOutputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService1ProtocolTest { + svc := &OutputService1ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice1protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService1ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService1TestCaseOperation1 = "OperationName" + +// OutputService1TestCaseOperation1Request generates a request for the OutputService1TestCaseOperation1 operation. +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *request.Request, output *OutputService1TestShapeOutputShape) { + op := &request.Operation{ + Name: opOutputService1TestCaseOperation1, + } + + if input == nil { + input = &OutputService1TestShapeOutputService1TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService1TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (*OutputService1TestShapeOutputShape, error) { + req, out := c.OutputService1TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +const opOutputService1TestCaseOperation2 = "OperationName" + +// OutputService1TestCaseOperation2Request generates a request for the OutputService1TestCaseOperation2 operation. +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation2Request(input *OutputService1TestShapeOutputService1TestCaseOperation2Input) (req *request.Request, output *OutputService1TestShapeOutputShape) { + op := &request.Operation{ + Name: opOutputService1TestCaseOperation2, + } + + if input == nil { + input = &OutputService1TestShapeOutputService1TestCaseOperation2Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService1TestShapeOutputShape{} + req.Data = output + return +} + +func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation2(input *OutputService1TestShapeOutputService1TestCaseOperation2Input) (*OutputService1TestShapeOutputShape, error) { + req, out := c.OutputService1TestCaseOperation2Request(input) + err := req.Send() + return out, err +} + +type OutputService1TestShapeOutputService1TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService1TestShapeOutputService1TestCaseOperation2Input struct { + _ struct{} `type:"structure"` +} + +type OutputService1TestShapeOutputShape struct { + _ struct{} `type:"structure"` + + Char *string `type:"character"` + + Double *float64 `type:"double"` + + FalseBool *bool `type:"boolean"` + + Float *float64 `type:"float"` + + ImaHeader *string `location:"header" type:"string"` + + ImaHeaderLocation *string `location:"header" locationName:"X-Foo" type:"string"` + + Long *int64 `type:"long"` + + Num *int64 `locationName:"FooNum" type:"integer"` + + Str *string `type:"string"` + + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + TrueBool *bool `type:"boolean"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService2ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService2ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService2ProtocolTest client from just a session. +// svc := outputservice2protocoltest.New(mySession) +// +// // Create a OutputService2ProtocolTest client with additional configuration +// svc := outputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService2ProtocolTest { + c := p.ClientConfig("outputservice2protocoltest", cfgs...) + return newOutputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService2ProtocolTest { + svc := &OutputService2ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice2protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService2ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService2TestCaseOperation1 = "OperationName" + +// OutputService2TestCaseOperation1Request generates a request for the OutputService2TestCaseOperation1 operation. +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *request.Request, output *OutputService2TestShapeOutputService2TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService2TestCaseOperation1, + } + + if input == nil { + input = &OutputService2TestShapeOutputService2TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService2TestShapeOutputService2TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (*OutputService2TestShapeOutputService2TestCaseOperation1Output, error) { + req, out := c.OutputService2TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService2TestShapeOutputService2TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Blob []byte `type:"blob"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService3ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService3ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService3ProtocolTest client from just a session. +// svc := outputservice3protocoltest.New(mySession) +// +// // Create a OutputService3ProtocolTest client with additional configuration +// svc := outputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService3ProtocolTest { + c := p.ClientConfig("outputservice3protocoltest", cfgs...) + return newOutputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService3ProtocolTest { + svc := &OutputService3ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice3protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService3ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService3TestCaseOperation1 = "OperationName" + +// OutputService3TestCaseOperation1Request generates a request for the OutputService3TestCaseOperation1 operation. +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *request.Request, output *OutputService3TestShapeOutputService3TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService3TestCaseOperation1, + } + + if input == nil { + input = &OutputService3TestShapeOutputService3TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService3TestShapeOutputService3TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (*OutputService3TestShapeOutputService3TestCaseOperation1Output, error) { + req, out := c.OutputService3TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService3TestShapeOutputService3TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `type:"list"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService4ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService4ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService4ProtocolTest client from just a session. +// svc := outputservice4protocoltest.New(mySession) +// +// // Create a OutputService4ProtocolTest client with additional configuration +// svc := outputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService4ProtocolTest { + c := p.ClientConfig("outputservice4protocoltest", cfgs...) + return newOutputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService4ProtocolTest { + svc := &OutputService4ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice4protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService4ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService4TestCaseOperation1 = "OperationName" + +// OutputService4TestCaseOperation1Request generates a request for the OutputService4TestCaseOperation1 operation. +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *request.Request, output *OutputService4TestShapeOutputService4TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService4TestCaseOperation1, + } + + if input == nil { + input = &OutputService4TestShapeOutputService4TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService4TestShapeOutputService4TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (*OutputService4TestShapeOutputService4TestCaseOperation1Output, error) { + req, out := c.OutputService4TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService4TestShapeOutputService4TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `locationNameList:"item" type:"list"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService5ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService5ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService5ProtocolTest client from just a session. +// svc := outputservice5protocoltest.New(mySession) +// +// // Create a OutputService5ProtocolTest client with additional configuration +// svc := outputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService5ProtocolTest { + c := p.ClientConfig("outputservice5protocoltest", cfgs...) + return newOutputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService5ProtocolTest { + svc := &OutputService5ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice5protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService5ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService5TestCaseOperation1 = "OperationName" + +// OutputService5TestCaseOperation1Request generates a request for the OutputService5TestCaseOperation1 operation. +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *request.Request, output *OutputService5TestShapeOutputService5TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService5TestCaseOperation1, + } + + if input == nil { + input = &OutputService5TestShapeOutputService5TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService5TestShapeOutputService5TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (*OutputService5TestShapeOutputService5TestCaseOperation1Output, error) { + req, out := c.OutputService5TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService5TestShapeOutputService5TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + ListMember []*string `type:"list" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService6ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService6ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService6ProtocolTest client from just a session. +// svc := outputservice6protocoltest.New(mySession) +// +// // Create a OutputService6ProtocolTest client with additional configuration +// svc := outputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService6ProtocolTest { + c := p.ClientConfig("outputservice6protocoltest", cfgs...) + return newOutputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService6ProtocolTest { + svc := &OutputService6ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice6protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService6ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService6TestCaseOperation1 = "OperationName" + +// OutputService6TestCaseOperation1Request generates a request for the OutputService6TestCaseOperation1 operation. +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *request.Request, output *OutputService6TestShapeOutputService6TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService6TestCaseOperation1, + } + + if input == nil { + input = &OutputService6TestShapeOutputService6TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService6TestShapeOutputService6TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (*OutputService6TestShapeOutputService6TestCaseOperation1Output, error) { + req, out := c.OutputService6TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService6TestShapeOutputService6TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*OutputService6TestShapeSingleStructure `type:"map"` +} + +type OutputService6TestShapeSingleStructure struct { + _ struct{} `type:"structure"` + + Foo *string `locationName:"foo" type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService7ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService7ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService7ProtocolTest client from just a session. +// svc := outputservice7protocoltest.New(mySession) +// +// // Create a OutputService7ProtocolTest client with additional configuration +// svc := outputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService7ProtocolTest { + c := p.ClientConfig("outputservice7protocoltest", cfgs...) + return newOutputService7ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService7ProtocolTest { + svc := &OutputService7ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice7protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService7ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService7TestCaseOperation1 = "OperationName" + +// OutputService7TestCaseOperation1Request generates a request for the OutputService7TestCaseOperation1 operation. +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1Request(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (req *request.Request, output *OutputService7TestShapeOutputService7TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService7TestCaseOperation1, + } + + if input == nil { + input = &OutputService7TestShapeOutputService7TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService7TestShapeOutputService7TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (*OutputService7TestShapeOutputService7TestCaseOperation1Output, error) { + req, out := c.OutputService7TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService7TestShapeOutputService7TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*string `type:"map" flattened:"true"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService8ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService8ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService8ProtocolTest client from just a session. +// svc := outputservice8protocoltest.New(mySession) +// +// // Create a OutputService8ProtocolTest client with additional configuration +// svc := outputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService8ProtocolTest { + c := p.ClientConfig("outputservice8protocoltest", cfgs...) + return newOutputService8ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService8ProtocolTest { + svc := &OutputService8ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice8protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService8ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService8TestCaseOperation1 = "OperationName" + +// OutputService8TestCaseOperation1Request generates a request for the OutputService8TestCaseOperation1 operation. +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1Request(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (req *request.Request, output *OutputService8TestShapeOutputService8TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService8TestCaseOperation1, + } + + if input == nil { + input = &OutputService8TestShapeOutputService8TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService8TestShapeOutputService8TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (*OutputService8TestShapeOutputService8TestCaseOperation1Output, error) { + req, out := c.OutputService8TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService8TestShapeOutputService8TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Map map[string]*string `locationNameKey:"foo" locationNameValue:"bar" type:"map"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService9ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService9ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService9ProtocolTest client from just a session. +// svc := outputservice9protocoltest.New(mySession) +// +// // Create a OutputService9ProtocolTest client with additional configuration +// svc := outputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService9ProtocolTest { + c := p.ClientConfig("outputservice9protocoltest", cfgs...) + return newOutputService9ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService9ProtocolTest { + svc := &OutputService9ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice9protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService9ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService9TestCaseOperation1 = "OperationName" + +// OutputService9TestCaseOperation1Request generates a request for the OutputService9TestCaseOperation1 operation. +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1Request(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (req *request.Request, output *OutputService9TestShapeOutputService9TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService9TestCaseOperation1, + } + + if input == nil { + input = &OutputService9TestShapeOutputService9TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService9TestShapeOutputService9TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (*OutputService9TestShapeOutputService9TestCaseOperation1Output, error) { + req, out := c.OutputService9TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService9TestShapeOutputService9TestCaseOperation1Output struct { + _ struct{} `type:"structure" payload:"Data"` + + Data *OutputService9TestShapeSingleStructure `type:"structure"` + + Header *string `location:"header" locationName:"X-Foo" type:"string"` +} + +type OutputService9TestShapeSingleStructure struct { + _ struct{} `type:"structure"` + + Foo *string `type:"string"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService10ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService10ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService10ProtocolTest client from just a session. +// svc := outputservice10protocoltest.New(mySession) +// +// // Create a OutputService10ProtocolTest client with additional configuration +// svc := outputservice10protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService10ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService10ProtocolTest { + c := p.ClientConfig("outputservice10protocoltest", cfgs...) + return newOutputService10ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService10ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService10ProtocolTest { + svc := &OutputService10ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice10protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService10ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService10ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService10TestCaseOperation1 = "OperationName" + +// OutputService10TestCaseOperation1Request generates a request for the OutputService10TestCaseOperation1 operation. +func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1Request(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (req *request.Request, output *OutputService10TestShapeOutputService10TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService10TestCaseOperation1, + } + + if input == nil { + input = &OutputService10TestShapeOutputService10TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService10TestShapeOutputService10TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (*OutputService10TestShapeOutputService10TestCaseOperation1Output, error) { + req, out := c.OutputService10TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService10TestShapeOutputService10TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService10TestShapeOutputService10TestCaseOperation1Output struct { + _ struct{} `type:"structure" payload:"Stream"` + + Stream []byte `type:"blob"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService11ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService11ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService11ProtocolTest client from just a session. +// svc := outputservice11protocoltest.New(mySession) +// +// // Create a OutputService11ProtocolTest client with additional configuration +// svc := outputservice11protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService11ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService11ProtocolTest { + c := p.ClientConfig("outputservice11protocoltest", cfgs...) + return newOutputService11ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService11ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService11ProtocolTest { + svc := &OutputService11ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice11protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService11ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService11TestCaseOperation1 = "OperationName" + +// OutputService11TestCaseOperation1Request generates a request for the OutputService11TestCaseOperation1 operation. +func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1Request(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (req *request.Request, output *OutputService11TestShapeOutputService11TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService11TestCaseOperation1, + } + + if input == nil { + input = &OutputService11TestShapeOutputService11TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService11TestShapeOutputService11TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (*OutputService11TestShapeOutputService11TestCaseOperation1Output, error) { + req, out := c.OutputService11TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService11TestShapeOutputService11TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService11TestShapeOutputService11TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Char *string `location:"header" locationName:"x-char" type:"character"` + + Double *float64 `location:"header" locationName:"x-double" type:"double"` + + FalseBool *bool `location:"header" locationName:"x-false-bool" type:"boolean"` + + Float *float64 `location:"header" locationName:"x-float" type:"float"` + + Integer *int64 `location:"header" locationName:"x-int" type:"integer"` + + Long *int64 `location:"header" locationName:"x-long" type:"long"` + + Str *string `location:"header" locationName:"x-str" type:"string"` + + Timestamp *time.Time `location:"header" locationName:"x-timestamp" type:"timestamp" timestampFormat:"iso8601"` + + TrueBool *bool `location:"header" locationName:"x-true-bool" type:"boolean"` +} + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OutputService12ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the OutputService12ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OutputService12ProtocolTest client from just a session. +// svc := outputservice12protocoltest.New(mySession) +// +// // Create a OutputService12ProtocolTest client with additional configuration +// svc := outputservice12protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewOutputService12ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService12ProtocolTest { + c := p.ClientConfig("outputservice12protocoltest", cfgs...) + return newOutputService12ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newOutputService12ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService12ProtocolTest { + svc := &OutputService12ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "outputservice12protocoltest", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a OutputService12ProtocolTest operation and runs any +// custom request initialization. +func (c *OutputService12ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opOutputService12TestCaseOperation1 = "OperationName" + +// OutputService12TestCaseOperation1Request generates a request for the OutputService12TestCaseOperation1 operation. +func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1Request(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (req *request.Request, output *OutputService12TestShapeOutputService12TestCaseOperation1Output) { + op := &request.Operation{ + Name: opOutputService12TestCaseOperation1, + } + + if input == nil { + input = &OutputService12TestShapeOutputService12TestCaseOperation1Input{} + } + + req = c.newRequest(op, input, output) + output = &OutputService12TestShapeOutputService12TestCaseOperation1Output{} + req.Data = output + return +} + +func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (*OutputService12TestShapeOutputService12TestCaseOperation1Output, error) { + req, out := c.OutputService12TestCaseOperation1Request(input) + err := req.Send() + return out, err +} + +type OutputService12TestShapeOutputService12TestCaseOperation1Input struct { + _ struct{} `type:"structure"` +} + +type OutputService12TestShapeOutputService12TestCaseOperation1Output struct { + _ struct{} `type:"structure"` + + Foo *string `type:"string"` +} + +// +// Tests begin here +// + +func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("myname123falsetrue1.21.3200a2015-01-25T08:00:00Z")) + req, out := svc.OutputService1TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + req.HTTPResponse.Header.Set("ImaHeader", "test") + req.HTTPResponse.Header.Set("X-Foo", "abc") + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.3, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.2, *out.Float) + assert.Equal(t, "test", *out.ImaHeader) + assert.Equal(t, "abc", *out.ImaHeaderLocation) + assert.Equal(t, int64(200), *out.Long) + assert.Equal(t, int64(123), *out.Num) + assert.Equal(t, "myname", *out.Str) + assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String()) + assert.Equal(t, true, *out.TrueBool) + +} + +func TestOutputService1ProtocolTestScalarMembersCase2(t *testing.T) { + sess := session.New() + svc := NewOutputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("123falsetrue1.21.3200a2015-01-25T08:00:00Z")) + req, out := svc.OutputService1TestCaseOperation2Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + req.HTTPResponse.Header.Set("ImaHeader", "test") + req.HTTPResponse.Header.Set("X-Foo", "abc") + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.3, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.2, *out.Float) + assert.Equal(t, "test", *out.ImaHeader) + assert.Equal(t, "abc", *out.ImaHeaderLocation) + assert.Equal(t, int64(200), *out.Long) + assert.Equal(t, int64(123), *out.Num) + assert.Equal(t, "", *out.Str) + assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String()) + assert.Equal(t, true, *out.TrueBool) + +} + +func TestOutputService2ProtocolTestBlobCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("dmFsdWU=")) + req, out := svc.OutputService2TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "value", string(out.Blob)) + +} + +func TestOutputService3ProtocolTestListsCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123")) + req, out := svc.OutputService3TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService4ProtocolTestListWithCustomMemberNameCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123")) + req, out := svc.OutputService4TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService5ProtocolTestFlattenedListCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc123")) + req, out := svc.OutputService5TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.ListMember[0]) + assert.Equal(t, "123", *out.ListMember[1]) + +} + +func TestOutputService6ProtocolTestNormalMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbam")) + req, out := svc.OutputService6TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"].Foo) + assert.Equal(t, "bar", *out.Map["qux"].Foo) + +} + +func TestOutputService7ProtocolTestFlattenedMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService7ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbam")) + req, out := svc.OutputService7TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"]) + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService8ProtocolTestNamedMapCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService8ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("quxbarbazbam")) + req, out := svc.OutputService8TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "bam", *out.Map["baz"]) + assert.Equal(t, "bar", *out.Map["qux"]) + +} + +func TestOutputService9ProtocolTestXMLPayloadCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService9ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc")) + req, out := svc.OutputService9TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + req.HTTPResponse.Header.Set("X-Foo", "baz") + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", *out.Data.Foo) + assert.Equal(t, "baz", *out.Header) + +} + +func TestOutputService10ProtocolTestStreamingPayloadCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService10ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("abc")) + req, out := svc.OutputService10TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "abc", string(out.Stream)) + +} + +func TestOutputService11ProtocolTestScalarMembersInHeadersCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService11ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("")) + req, out := svc.OutputService11TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + req.HTTPResponse.Header.Set("x-char", "a") + req.HTTPResponse.Header.Set("x-double", "1.5") + req.HTTPResponse.Header.Set("x-false-bool", "false") + req.HTTPResponse.Header.Set("x-float", "1.5") + req.HTTPResponse.Header.Set("x-int", "1") + req.HTTPResponse.Header.Set("x-long", "100") + req.HTTPResponse.Header.Set("x-str", "string") + req.HTTPResponse.Header.Set("x-timestamp", "Sun, 25 Jan 2015 08:00:00 GMT") + req.HTTPResponse.Header.Set("x-true-bool", "true") + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "a", *out.Char) + assert.Equal(t, 1.5, *out.Double) + assert.Equal(t, false, *out.FalseBool) + assert.Equal(t, 1.5, *out.Float) + assert.Equal(t, int64(1), *out.Integer) + assert.Equal(t, int64(100), *out.Long) + assert.Equal(t, "string", *out.Str) + assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String()) + assert.Equal(t, true, *out.TrueBool) + +} + +func TestOutputService12ProtocolTestEmptyStringCase1(t *testing.T) { + sess := session.New() + svc := NewOutputService12ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")}) + + buf := bytes.NewReader([]byte("requestid")) + req, out := svc.OutputService12TestCaseOperation1Request(nil) + req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} + + // set headers + + // unmarshal response + restxml.UnmarshalMeta(req) + restxml.Unmarshal(req) + assert.NoError(t, req.Error) + + // assert response + assert.NotNil(t, out) // ensure out variable is used + assert.Equal(t, "", *out.Foo) + +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,21 @@ +package protocol + +import ( + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body +var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} + +// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. +func UnmarshalDiscardBody(r *request.Request) { + if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { + return + } + + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/unmarshal_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/unmarshal_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/unmarshal_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/unmarshal_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,40 @@ +package protocol_test + +import ( + "net/http" + "strings" + "testing" + + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/stretchr/testify/assert" +) + +type mockCloser struct { + *strings.Reader + Closed bool +} + +func (m *mockCloser) Close() error { + m.Closed = true + return nil +} + +func TestUnmarshalDrainBody(t *testing.T) { + b := &mockCloser{Reader: strings.NewReader("example body")} + r := &request.Request{HTTPResponse: &http.Response{ + Body: b, + }} + + protocol.UnmarshalDiscardBody(r) + assert.NoError(t, r.Error) + assert.Equal(t, 0, b.Len()) + assert.True(t, b.Closed) +} + +func TestUnmarshalDrainBodyNoBody(t *testing.T) { + r := &request.Request{HTTPResponse: &http.Response{}} + + protocol.UnmarshalDiscardBody(r) + assert.NoError(t, r.Error) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,293 @@ +// Package xmlutil provides XML serialisation of AWS requests and responses. +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// BuildXML will serialize params into an xml.Encoder. +// Error will be returned if the serialization of any of the params or nested values fails. +func BuildXML(params interface{}, e *xml.Encoder) error { + b := xmlBuilder{encoder: e, namespaces: map[string]string{}} + root := NewXMLElement(xml.Name{}) + if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { + return err + } + for _, c := range root.Children { + for _, v := range c { + return StructToXML(e, v, false) + } + } + return nil +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +// A xmlBuilder serializes values from Go code to XML +type xmlBuilder struct { + encoder *xml.Encoder + namespaces map[string]string +} + +// buildValue generic XMLNode builder for any type. Will build value for their specific type +// struct, list, map, scalar. +// +// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If +// type is not provided reflect will be used to determine the value's type. +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + value = elemOf(value) + if !value.IsValid() { // no need to handle zero values + return nil + } else if tag.Get("location") != "" { // don't handle non-body location values + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := value.Type().FieldByName("_"); ok { + tag = tag + reflect.StructTag(" ") + field.Tag + } + return b.buildStruct(value, current, tag) + case "list": + return b.buildList(value, current, tag) + case "map": + return b.buildMap(value, current, tag) + default: + return b.buildScalar(value, current, tag) + } +} + +// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested +// types are converted to XMLNodes also. +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + fieldAdded := false + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + + // there is an xmlNamespace associated with this struct + if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { + ns := xml.Attr{ + Name: xml.Name{Local: "xmlns"}, + Value: uri, + } + if prefix != "" { + b.namespaces[prefix] = uri // register the namespace + ns.Name.Local = "xmlns:" + prefix + } + + child.Attr = append(child.Attr, ns) + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + member := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + + mTag := field.Tag + if mTag.Get("location") != "" { // skip non-body members + continue + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(token) + } + + memberName := mTag.Get("locationName") + if memberName == "" { + memberName = field.Name + mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) + } + if err := b.buildValue(member, child, mTag); err != nil { + return err + } + + fieldAdded = true + } + + if fieldAdded { // only append this child if we have one ore more valid members + current.AddChild(child) + } + + return nil +} + +// buildList adds the value's list items to the current XMLNode as children nodes. All +// nested values in the list are converted to XMLNodes also. +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted lists + return nil + } + + // check for unflattened list member + flattened := tag.Get("flattened") != "" + + xname := xml.Name{Local: tag.Get("locationName")} + if flattened { + for i := 0; i < value.Len(); i++ { + child := NewXMLElement(xname) + current.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } else { + list := NewXMLElement(xname) + current.AddChild(list) + + for i := 0; i < value.Len(); i++ { + iname := tag.Get("locationNameList") + if iname == "" { + iname = "member" + } + + child := NewXMLElement(xml.Name{Local: iname}) + list.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } + + return nil +} + +// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All +// nested values in the map are converted to XMLNodes also. +// +// Error will be returned if it is unable to build the map's values into XMLNodes +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted maps + return nil + } + + maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + current.AddChild(maproot) + current = maproot + + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + // sorting is not required for compliance, but it makes testing easier + keys := make([]string, value.Len()) + for i, k := range value.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + + for _, k := range keys { + v := value.MapIndex(reflect.ValueOf(k)) + + mapcur := current + if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps + child := NewXMLElement(xml.Name{Local: "entry"}) + mapcur.AddChild(child) + mapcur = child + } + + kchild := NewXMLElement(xml.Name{Local: kname}) + kchild.Text = k + vchild := NewXMLElement(xml.Name{Local: vname}) + mapcur.AddChild(kchild) + mapcur.AddChild(vchild) + + if err := b.buildValue(v, vchild, ""); err != nil { + return err + } + } + + return nil +} + +// buildScalar will convert the value into a string and append it as a attribute or child +// of the current XMLNode. +// +// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. +// +// Error will be returned if the value type is unsupported. +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + var str string + switch converted := value.Interface().(type) { + case string: + str = converted + case []byte: + if !value.IsNil() { + str = base64.StdEncoding.EncodeToString(converted) + } + case bool: + str = strconv.FormatBool(converted) + case int64: + str = strconv.FormatInt(converted, 10) + case int: + str = strconv.Itoa(converted) + case float64: + str = strconv.FormatFloat(converted, 'f', -1, 64) + case float32: + str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + str = converted.UTC().Format(ISO8601UTC) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", + tag.Get("locationName"), value.Interface(), value.Type().Name()) + } + + xname := xml.Name{Local: tag.Get("locationName")} + if tag.Get("xmlAttribute") != "" { // put into current node's attribute list + attr := xml.Attr{Name: xname, Value: str} + current.Attr = append(current.Attr, attr) + } else { // regular text node + current.AddChild(&XMLNode{Name: xname, Text: str}) + } + return nil +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,260 @@ +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" +) + +// UnmarshalXML deserializes an xml.Decoder into the container v. V +// needs to match the shape of the XML expected to be decoded. +// If the shape doesn't match unmarshaling will fail. +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { + n, _ := XMLToStruct(d, nil) + if n.Children != nil { + for _, root := range n.Children { + for _, c := range root { + if wrappedChild, ok := c.Children[wrapper]; ok { + c = wrappedChild[0] // pull out wrapped element + } + + err := parse(reflect.ValueOf(v), c, "") + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + return nil + } + return nil +} + +// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect +// will be used to determine the type from r. +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + rtype := r.Type() + if rtype.Kind() == reflect.Ptr { + rtype = rtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch rtype.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := rtype.FieldByName("_"); ok { + tag = field.Tag + } + return parseStruct(r, node, tag) + case "list": + return parseList(r, node, tag) + case "map": + return parseMap(r, node, tag) + default: + return parseScalar(r, node, tag) + } +} + +// parseStruct deserializes a structure and its fields from an XMLNode. Any nested +// types in the structure will also be deserialized. +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + if r.IsNil() { // create the structure if it's nil + s := reflect.New(r.Type().Elem()) + r.Set(s) + r = s + } + + r = r.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return parseStruct(r.FieldByName(payload), node, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if c := field.Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + // try to find the field by name in elements + elems := node.Children[name] + + if elems == nil { // try to find the field in attributes + for _, a := range node.Attr { + if name == a.Name.Local { + // turn this into a text node for de-serializing + elems = []*XMLNode{{Text: a.Value}} + } + } + } + + member := r.FieldByName(field.Name) + for _, elem := range elems { + err := parse(member, elem, field.Tag) + if err != nil { + return err + } + } + } + return nil +} + +// parseList deserializes a list of values from an XML node. Each list entry +// will also be deserialized. +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + + if tag.Get("flattened") == "" { // look at all item entries + mname := "member" + if name := tag.Get("locationNameList"); name != "" { + mname = name + } + + if Children, ok := node.Children[mname]; ok { + if r.IsNil() { + r.Set(reflect.MakeSlice(t, len(Children), len(Children))) + } + + for i, c := range Children { + err := parse(r.Index(i), c, "") + if err != nil { + return err + } + } + } + } else { // flattened list means this is a single element + if r.IsNil() { + r.Set(reflect.MakeSlice(t, 0, 0)) + } + + childR := reflect.Zero(t.Elem()) + r.Set(reflect.Append(r, childR)) + err := parse(r.Index(r.Len()-1), node, "") + if err != nil { + return err + } + } + + return nil +} + +// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode +// will also be deserialized as map entries. +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + if r.IsNil() { + r.Set(reflect.MakeMap(r.Type())) + } + + if tag.Get("flattened") == "" { // look at all child entries + for _, entry := range node.Children["entry"] { + parseMapEntry(r, entry, tag) + } + } else { // this element is itself an entry + parseMapEntry(r, node, tag) + } + + return nil +} + +// parseMapEntry deserializes a map entry from a XML node. +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + keys, ok := node.Children[kname] + values := node.Children[vname] + if ok { + for i, key := range keys { + keyR := reflect.ValueOf(key.Text) + value := values[i] + valueR := reflect.New(r.Type().Elem()).Elem() + + parse(valueR, value, "") + r.SetMapIndex(keyR, valueR) + } + } + return nil +} + +// parseScaller deserializes an XMLNode value into a concrete type based on the +// interface type of r. +// +// Error is returned if the deserialization fails due to invalid type conversion, +// or unsupported interface type. +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + switch r.Interface().(type) { + case *string: + r.Set(reflect.ValueOf(&node.Text)) + return nil + case []byte: + b, err := base64.StdEncoding.DecodeString(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(b)) + case *bool: + v, err := strconv.ParseBool(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *int64: + v, err := strconv.ParseInt(node.Text, 10, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *float64: + v, err := strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + t, err := time.Parse(ISO8601UTC, node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) + } + return nil +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,105 @@ +package xmlutil + +import ( + "encoding/xml" + "io" + "sort" +) + +// A XMLNode contains the values to be encoded or decoded. +type XMLNode struct { + Name xml.Name `json:",omitempty"` + Children map[string][]*XMLNode `json:",omitempty"` + Text string `json:",omitempty"` + Attr []xml.Attr `json:",omitempty"` +} + +// NewXMLElement returns a pointer to a new XMLNode initialized to default values. +func NewXMLElement(name xml.Name) *XMLNode { + return &XMLNode{ + Name: name, + Children: map[string][]*XMLNode{}, + Attr: []xml.Attr{}, + } +} + +// AddChild adds child to the XMLNode. +func (n *XMLNode) AddChild(child *XMLNode) { + if _, ok := n.Children[child.Name.Local]; !ok { + n.Children[child.Name.Local] = []*XMLNode{} + } + n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) +} + +// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { + out := &XMLNode{} + for { + tok, err := d.Token() + if tok == nil || err == io.EOF { + break + } + if err != nil { + return out, err + } + + switch typed := tok.(type) { + case xml.CharData: + out.Text = string(typed.Copy()) + case xml.StartElement: + el := typed.Copy() + out.Attr = el.Attr + if out.Children == nil { + out.Children = map[string][]*XMLNode{} + } + + name := typed.Name.Local + slice := out.Children[name] + if slice == nil { + slice = []*XMLNode{} + } + node, e := XMLToStruct(d, &el) + if e != nil { + return out, e + } + node.Name = typed.Name + slice = append(slice, node) + out.Children[name] = slice + case xml.EndElement: + if s != nil && s.Name.Local == typed.Name.Local { // matching end token + return out, nil + } + } + } + return out, nil +} + +// StructToXML writes an XMLNode to a xml.Encoder as tokens. +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) + + if node.Text != "" { + e.EncodeToken(xml.CharData([]byte(node.Text))) + } else if sorted { + sortedNames := []string{} + for k := range node.Children { + sortedNames = append(sortedNames, k) + } + sort.Strings(sortedNames) + + for _, k := range sortedNames { + for _, v := range node.Children[k] { + StructToXML(e, v, sorted) + } + } + } else { + for _, c := range node.Children { + for _, v := range c { + StructToXML(e, v, sorted) + } + } + } + + e.EncodeToken(xml.EndElement{Name: node.Name}) + return e.Flush() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/README.md aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/README.md --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/README.md 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/README.md 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,4 @@ +## AWS SDK for Go Private packages ## +`private` is a collection of packages used internally by the SDK, and is subject to have breaking changes. This package is not `internal` so that if you really need to use its functionality, and understand breaking changes will be made, you are able to. + +These packages will be refactored in the future so that the API generator and model parsers are exposed cleanly on their own. Making it easier for you to generate your own code based on the API models. diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/signer/v2/v2.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/signer/v2/v2.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/signer/v2/v2.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/signer/v2/v2.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,174 @@ +package v2 + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "errors" + "fmt" + "net/http" + "net/url" + "sort" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +var ( + errInvalidMethod = errors.New("v2 signer only handles HTTP POST") +) + +const ( + signatureVersion = "2" + signatureMethod = "HmacSHA256" + timeFormat = "2006-01-02T15:04:05Z" +) + +type signer struct { + // Values that must be populated from the request + Request *http.Request + Time time.Time + Credentials *credentials.Credentials + Debug aws.LogLevelType + Logger aws.Logger + + Query url.Values + stringToSign string + signature string +} + +// Sign requests with signature version 2. +// +// Will sign the requests with the service config's Credentials object +// Signing is skipped if the credentials is the credentials.AnonymousCredentials +// object. +func Sign(req *request.Request) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + if req.HTTPRequest.Method != "POST" && req.HTTPRequest.Method != "GET" { + // The V2 signer only supports GET and POST + req.Error = errInvalidMethod + return + } + + v2 := signer{ + Request: req.HTTPRequest, + Time: req.Time, + Credentials: req.Config.Credentials, + Debug: req.Config.LogLevel.Value(), + Logger: req.Config.Logger, + } + + req.Error = v2.Sign() + + if req.Error != nil { + return + } + + if req.HTTPRequest.Method == "POST" { + // Set the body of the request based on the modified query parameters + req.SetStringBody(v2.Query.Encode()) + + // Now that the body has changed, remove any Content-Length header, + // because it will be incorrect + req.HTTPRequest.ContentLength = 0 + req.HTTPRequest.Header.Del("Content-Length") + } else { + req.HTTPRequest.URL.RawQuery = v2.Query.Encode() + } +} + +func (v2 *signer) Sign() error { + credValue, err := v2.Credentials.Get() + if err != nil { + return err + } + + if v2.Request.Method == "POST" { + // Parse the HTTP request to obtain the query parameters that will + // be used to build the string to sign. Note that because the HTTP + // request will need to be modified, the PostForm and Form properties + // are reset to nil after parsing. + v2.Request.ParseForm() + v2.Query = v2.Request.PostForm + v2.Request.PostForm = nil + v2.Request.Form = nil + } else { + v2.Query = v2.Request.URL.Query() + } + + // Set new query parameters + v2.Query.Set("AWSAccessKeyId", credValue.AccessKeyID) + v2.Query.Set("SignatureVersion", signatureVersion) + v2.Query.Set("SignatureMethod", signatureMethod) + v2.Query.Set("Timestamp", v2.Time.UTC().Format(timeFormat)) + if credValue.SessionToken != "" { + v2.Query.Set("SecurityToken", credValue.SessionToken) + } + + // in case this is a retry, ensure no signature present + v2.Query.Del("Signature") + + method := v2.Request.Method + host := v2.Request.URL.Host + path := v2.Request.URL.Path + if path == "" { + path = "/" + } + + // obtain all of the query keys and sort them + queryKeys := make([]string, 0, len(v2.Query)) + for key := range v2.Query { + queryKeys = append(queryKeys, key) + } + sort.Strings(queryKeys) + + // build URL-encoded query keys and values + queryKeysAndValues := make([]string, len(queryKeys)) + for i, key := range queryKeys { + k := strings.Replace(url.QueryEscape(key), "+", "%20", -1) + v := strings.Replace(url.QueryEscape(v2.Query.Get(key)), "+", "%20", -1) + queryKeysAndValues[i] = k + "=" + v + } + + // join into one query string + query := strings.Join(queryKeysAndValues, "&") + + // build the canonical string for the V2 signature + v2.stringToSign = strings.Join([]string{ + method, + host, + path, + query, + }, "\n") + + hash := hmac.New(sha256.New, []byte(credValue.SecretAccessKey)) + hash.Write([]byte(v2.stringToSign)) + v2.signature = base64.StdEncoding.EncodeToString(hash.Sum(nil)) + v2.Query.Set("Signature", v2.signature) + + if v2.Debug.Matches(aws.LogDebugWithSigning) { + v2.logSigningInfo() + } + + return nil +} + +const logSignInfoMsg = `DEBUG: Request Signature: +---[ STRING TO SIGN ]-------------------------------- +%s +---[ SIGNATURE ]------------------------------------- +%s +-----------------------------------------------------` + +func (v2 *signer) logSigningInfo() { + msg := fmt.Sprintf(logSignInfoMsg, v2.stringToSign, v2.Query.Get("Signature")) + v2.Logger.Log(msg) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/signer/v2/v2_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/signer/v2/v2_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/signer/v2/v2_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/signer/v2/v2_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,195 @@ +package v2 + +import ( + "bytes" + "net/http" + "net/url" + "os" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/stretchr/testify/assert" +) + +type signerBuilder struct { + ServiceName string + Region string + SignTime time.Time + Query url.Values + Method string + SessionToken string +} + +func (sb signerBuilder) BuildSigner() signer { + endpoint := "https://" + sb.ServiceName + "." + sb.Region + ".amazonaws.com" + var req *http.Request + if sb.Method == "POST" { + body := []byte(sb.Query.Encode()) + reader := bytes.NewReader(body) + req, _ = http.NewRequest(sb.Method, endpoint, reader) + req.Header.Add("Content-Type", "application/x-www-form-urlencoded") + req.Header.Add("Content-Length", string(len(body))) + } else { + req, _ = http.NewRequest(sb.Method, endpoint, nil) + req.URL.RawQuery = sb.Query.Encode() + } + + sig := signer{ + Request: req, + Time: sb.SignTime, + Credentials: credentials.NewStaticCredentials( + "AKID", + "SECRET", + sb.SessionToken), + } + + if os.Getenv("DEBUG") != "" { + sig.Debug = aws.LogDebug + sig.Logger = aws.NewDefaultLogger() + } + + return sig +} + +func TestSignRequestWithAndWithoutSession(t *testing.T) { + assert := assert.New(t) + + // have to create more than once, so use a function + newQuery := func() url.Values { + query := make(url.Values) + query.Add("Action", "CreateDomain") + query.Add("DomainName", "TestDomain-1437033376") + query.Add("Version", "2009-04-15") + return query + } + + // create request without a SecurityToken (session) in the credentials + + query := newQuery() + timestamp := time.Date(2015, 7, 16, 7, 56, 16, 0, time.UTC) + builder := signerBuilder{ + Method: "POST", + ServiceName: "sdb", + Region: "ap-southeast-2", + SignTime: timestamp, + Query: query, + } + + signer := builder.BuildSigner() + + err := signer.Sign() + assert.NoError(err) + assert.Equal("tm4dX8Ks7pzFSVHz7qHdoJVXKRLuC4gWz9eti60d8ks=", signer.signature) + assert.Equal(8, len(signer.Query)) + assert.Equal("AKID", signer.Query.Get("AWSAccessKeyId")) + assert.Equal("2015-07-16T07:56:16Z", signer.Query.Get("Timestamp")) + assert.Equal("HmacSHA256", signer.Query.Get("SignatureMethod")) + assert.Equal("2", signer.Query.Get("SignatureVersion")) + assert.Equal("tm4dX8Ks7pzFSVHz7qHdoJVXKRLuC4gWz9eti60d8ks=", signer.Query.Get("Signature")) + assert.Equal("CreateDomain", signer.Query.Get("Action")) + assert.Equal("TestDomain-1437033376", signer.Query.Get("DomainName")) + assert.Equal("2009-04-15", signer.Query.Get("Version")) + + // should not have a SecurityToken parameter + _, ok := signer.Query["SecurityToken"] + assert.False(ok) + + // now sign again, this time with a security token (session) + + query = newQuery() + builder.SessionToken = "SESSION" + signer = builder.BuildSigner() + + err = signer.Sign() + assert.NoError(err) + assert.Equal("Ch6qv3rzXB1SLqY2vFhsgA1WQ9rnQIE2WJCigOvAJwI=", signer.signature) + assert.Equal(9, len(signer.Query)) // expect one more parameter + assert.Equal("Ch6qv3rzXB1SLqY2vFhsgA1WQ9rnQIE2WJCigOvAJwI=", signer.Query.Get("Signature")) + assert.Equal("SESSION", signer.Query.Get("SecurityToken")) +} + +func TestMoreComplexSignRequest(t *testing.T) { + assert := assert.New(t) + query := make(url.Values) + query.Add("Action", "PutAttributes") + query.Add("DomainName", "TestDomain-1437041569") + query.Add("Version", "2009-04-15") + query.Add("Attribute.2.Name", "Attr2") + query.Add("Attribute.2.Value", "Value2") + query.Add("Attribute.2.Replace", "true") + query.Add("Attribute.1.Name", "Attr1-%\\+ %") + query.Add("Attribute.1.Value", " \tValue1 +!@#$%^&*(){}[]\"';:?/.>,<\x12\x00") + query.Add("Attribute.1.Replace", "true") + query.Add("ItemName", "Item 1") + + timestamp := time.Date(2015, 7, 16, 10, 12, 51, 0, time.UTC) + builder := signerBuilder{ + Method: "POST", + ServiceName: "sdb", + Region: "ap-southeast-2", + SignTime: timestamp, + Query: query, + SessionToken: "SESSION", + } + + signer := builder.BuildSigner() + + err := signer.Sign() + assert.NoError(err) + assert.Equal("WNdE62UJKLKoA6XncVY/9RDbrKmcVMdQPQOTAs8SgwQ=", signer.signature) +} + +func TestGet(t *testing.T) { + assert := assert.New(t) + svc := awstesting.NewClient(&aws.Config{ + Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"), + Region: aws.String("ap-southeast-2"), + }) + r := svc.NewRequest( + &request.Operation{ + Name: "OpName", + HTTPMethod: "GET", + HTTPPath: "/", + }, + nil, + nil, + ) + + r.Build() + assert.Equal("GET", r.HTTPRequest.Method) + assert.Equal("", r.HTTPRequest.URL.Query().Get("Signature")) + + Sign(r) + assert.NoError(r.Error) + t.Logf("Signature: %s", r.HTTPRequest.URL.Query().Get("Signature")) + assert.NotEqual("", r.HTTPRequest.URL.Query().Get("Signature")) +} + +func TestAnonymousCredentials(t *testing.T) { + assert := assert.New(t) + svc := awstesting.NewClient(&aws.Config{ + Credentials: credentials.AnonymousCredentials, + Region: aws.String("ap-southeast-2"), + }) + r := svc.NewRequest( + &request.Operation{ + Name: "PutAttributes", + HTTPMethod: "POST", + HTTPPath: "/", + }, + nil, + nil, + ) + r.Build() + + Sign(r) + + req := r.HTTPRequest + req.ParseForm() + + assert.Empty(req.PostForm.Get("Signature")) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/functional_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/functional_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/functional_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/functional_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,77 @@ +package v4_test + +import ( + "net/http" + "net/url" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" +) + +func TestPresignHandler(t *testing.T) { + svc := s3.New(unit.Session) + req, _ := svc.PutObjectRequest(&s3.PutObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + ContentDisposition: aws.String("a+b c$d"), + ACL: aws.String("public-read"), + }) + req.Time = time.Unix(0, 0) + urlstr, err := req.Presign(5 * time.Minute) + + assert.NoError(t, err) + + expectedDate := "19700101T000000Z" + expectedHeaders := "content-disposition;host;x-amz-acl" + expectedSig := "2d76a414208c0eac2a23ef9c834db9635ecd5a0fbb447a00ad191f82d854f55b" + expectedCred := "AKID/19700101/mock-region/s3/aws4_request" + + u, _ := url.Parse(urlstr) + urlQ := u.Query() + assert.Equal(t, expectedSig, urlQ.Get("X-Amz-Signature")) + assert.Equal(t, expectedCred, urlQ.Get("X-Amz-Credential")) + assert.Equal(t, expectedHeaders, urlQ.Get("X-Amz-SignedHeaders")) + assert.Equal(t, expectedDate, urlQ.Get("X-Amz-Date")) + assert.Equal(t, "300", urlQ.Get("X-Amz-Expires")) + + assert.NotContains(t, urlstr, "+") // + encoded as %20 +} + +func TestPresignRequest(t *testing.T) { + svc := s3.New(unit.Session) + req, _ := svc.PutObjectRequest(&s3.PutObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + ContentDisposition: aws.String("a+b c$d"), + ACL: aws.String("public-read"), + }) + req.Time = time.Unix(0, 0) + urlstr, headers, err := req.PresignRequest(5 * time.Minute) + + assert.NoError(t, err) + + expectedDate := "19700101T000000Z" + expectedHeaders := "content-disposition;host;x-amz-acl" + expectedSig := "2d76a414208c0eac2a23ef9c834db9635ecd5a0fbb447a00ad191f82d854f55b" + expectedCred := "AKID/19700101/mock-region/s3/aws4_request" + expectedHeaderMap := http.Header{ + "x-amz-acl": []string{"public-read"}, + "content-disposition": []string{"a+b c$d"}, + } + + u, _ := url.Parse(urlstr) + urlQ := u.Query() + assert.Equal(t, expectedSig, urlQ.Get("X-Amz-Signature")) + assert.Equal(t, expectedCred, urlQ.Get("X-Amz-Credential")) + assert.Equal(t, expectedHeaders, urlQ.Get("X-Amz-SignedHeaders")) + assert.Equal(t, expectedDate, urlQ.Get("X-Amz-Date")) + assert.Equal(t, expectedHeaderMap, headers) + assert.Equal(t, "300", urlQ.Get("X-Amz-Expires")) + + assert.NotContains(t, urlstr, "+") // + encoded as %20 +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,82 @@ +package v4 + +import ( + "net/http" + "strings" +) + +// validator houses a set of rule needed for validation of a +// string value +type rules []rule + +// rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that rule +type rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// mapRule generic rule for maps +type mapRule map[string]struct{} + +// IsValid for the map rule satisfies whether it exists in the map +func (m mapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// whitelist is a generic rule for whitelisting +type whitelist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (w whitelist) IsValid(value string) bool { + return w.rule.IsValid(value) +} + +// blacklist is a generic rule for blacklisting +type blacklist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (b blacklist) IsValid(value string) bool { + return !b.rule.IsValid(value) +} + +type patterns []string + +// IsValid for patterns checks each pattern and returns if a match has +// been found +func (p patterns) IsValid(value string) bool { + for _, pattern := range p { + if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { + return true + } + } + return false +} + +// inclusiveRules rules allow for rules to depend on one another +type inclusiveRules []rule + +// IsValid will return true if all rules are true +func (r inclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/header_rules_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/header_rules_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/header_rules_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/header_rules_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,57 @@ +package v4 + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRuleCheckWhitelist(t *testing.T) { + w := whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + }, + } + + assert.True(t, w.IsValid("Cache-Control")) + assert.False(t, w.IsValid("Cache-")) +} + +func TestRuleCheckBlacklist(t *testing.T) { + b := blacklist{ + mapRule{ + "Cache-Control": struct{}{}, + }, + } + + assert.False(t, b.IsValid("Cache-Control")) + assert.True(t, b.IsValid("Cache-")) +} + +func TestRuleCheckPattern(t *testing.T) { + p := patterns{"X-Amz-Meta-"} + + assert.True(t, p.IsValid("X-Amz-Meta-")) + assert.True(t, p.IsValid("X-Amz-Meta-Star")) + assert.False(t, p.IsValid("Cache-")) +} + +func TestRuleComplexWhitelist(t *testing.T) { + w := rules{ + whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, + } + + r := rules{ + inclusiveRules{patterns{"X-Amz-"}, blacklist{w}}, + } + + assert.True(t, r.IsValid("X-Amz-Blah")) + assert.False(t, r.IsValid("X-Amz-Meta-")) + assert.False(t, r.IsValid("X-Amz-Meta-Star")) + assert.False(t, r.IsValid("Cache-Control")) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,438 @@ +// Package v4 implements signing for AWS V4 signer +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" +) + +var ignoredHeaders = rules{ + blacklist{ + mapRule{ + "Content-Length": struct{}{}, + "User-Agent": struct{}{}, + }, + }, +} + +// requiredSignedHeaders is a whitelist for build canonical headers. +var requiredSignedHeaders = rules{ + whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, +} + +// allowedHoisting is a whitelist for build query headers. The boolean value +// represents whether or not it is a pattern. +var allowedQueryHoisting = inclusiveRules{ + blacklist{requiredSignedHeaders}, + patterns{"X-Amz-"}, +} + +type signer struct { + Request *http.Request + Time time.Time + ExpireTime time.Duration + ServiceName string + Region string + CredValues credentials.Value + Credentials *credentials.Credentials + Query url.Values + Body io.ReadSeeker + Debug aws.LogLevelType + Logger aws.Logger + + isPresign bool + formattedTime string + formattedShortTime string + + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string + notHoist bool + signedHeaderVals http.Header +} + +// Sign requests with signature version 4. +// +// Will sign the requests with the service config's Credentials object +// Signing is skipped if the credentials is the credentials.AnonymousCredentials +// object. +func Sign(req *request.Request) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + s := signer{ + Request: req.HTTPRequest, + Time: req.Time, + ExpireTime: req.ExpireTime, + Query: req.HTTPRequest.URL.Query(), + Body: req.Body, + ServiceName: name, + Region: region, + Credentials: req.Config.Credentials, + Debug: req.Config.LogLevel.Value(), + Logger: req.Config.Logger, + notHoist: req.NotHoist, + } + + req.Error = s.sign() + req.SignedHeaderVals = s.signedHeaderVals +} + +func (v4 *signer) sign() error { + if v4.ExpireTime != 0 { + v4.isPresign = true + } + + if v4.isRequestSigned() { + if !v4.Credentials.IsExpired() { + // If the request is already signed, and the credentials have not + // expired yet ignore the signing request. + return nil + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + if v4.isPresign { + v4.removePresign() + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + v4.Request.URL.RawQuery = v4.Query.Encode() + } + } + + var err error + v4.CredValues, err = v4.Credentials.Get() + if err != nil { + return err + } + + if v4.isPresign { + v4.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if v4.CredValues.SessionToken != "" { + v4.Query.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) + } else { + v4.Query.Del("X-Amz-Security-Token") + } + } else if v4.CredValues.SessionToken != "" { + v4.Request.Header.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) + } + + v4.build() + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo() + } + + return nil +} + +const logSignInfoMsg = `DEBUG: Request Signiture: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *signer) logSigningInfo() { + signedURLMsg := "" + if v4.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, v4.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, v4.canonicalString, v4.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (v4 *signer) build() { + + v4.buildTime() // no depends + v4.buildCredentialString() // no depends + + unsignedHeaders := v4.Request.Header + if v4.isPresign { + if !v4.notHoist { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends + for k := range urlValues { + v4.Query[k] = urlValues[k] + } + } + } + + v4.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) + v4.buildCanonicalString() // depends on canon headers / signed headers + v4.buildStringToSign() // depends on canon string + v4.buildSignature() // depends on string to sign + + if v4.isPresign { + v4.Request.URL.RawQuery += "&X-Amz-Signature=" + v4.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + v4.CredValues.AccessKeyID + "/" + v4.credentialString, + "SignedHeaders=" + v4.signedHeaders, + "Signature=" + v4.signature, + } + v4.Request.Header.Set("Authorization", strings.Join(parts, ", ")) + } +} + +func (v4 *signer) buildTime() { + v4.formattedTime = v4.Time.UTC().Format(timeFormat) + v4.formattedShortTime = v4.Time.UTC().Format(shortTimeFormat) + + if v4.isPresign { + duration := int64(v4.ExpireTime / time.Second) + v4.Query.Set("X-Amz-Date", v4.formattedTime) + v4.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + v4.Request.Header.Set("X-Amz-Date", v4.formattedTime) + } +} + +func (v4 *signer) buildCredentialString() { + v4.credentialString = strings.Join([]string{ + v4.formattedShortTime, + v4.Region, + v4.ServiceName, + "aws4_request", + }, "/") + + if v4.isPresign { + v4.Query.Set("X-Amz-Credential", v4.CredValues.AccessKeyID+"/"+v4.credentialString) + } +} + +func buildQuery(r rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} +func (v4 *signer) buildCanonicalHeaders(r rule, header http.Header) { + var headers []string + headers = append(headers, "host") + for k, v := range header { + canonicalKey := http.CanonicalHeaderKey(k) + if !r.IsValid(canonicalKey) { + continue // ignored header + } + + lowerCaseKey := strings.ToLower(k) + headers = append(headers, lowerCaseKey) + + if v4.signedHeaderVals == nil { + v4.signedHeaderVals = make(http.Header) + } + v4.signedHeaderVals[lowerCaseKey] = v + } + sort.Strings(headers) + + v4.signedHeaders = strings.Join(headers, ";") + + if v4.isPresign { + v4.Query.Set("X-Amz-SignedHeaders", v4.signedHeaders) + } + + headerValues := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + headerValues[i] = "host:" + v4.Request.URL.Host + } else { + headerValues[i] = k + ":" + + strings.Join(v4.Request.Header[http.CanonicalHeaderKey(k)], ",") + } + } + + v4.canonicalHeaders = strings.Join(headerValues, "\n") +} + +func (v4 *signer) buildCanonicalString() { + v4.Request.URL.RawQuery = strings.Replace(v4.Query.Encode(), "+", "%20", -1) + uri := v4.Request.URL.Opaque + if uri != "" { + uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/") + } else { + uri = v4.Request.URL.Path + } + if uri == "" { + uri = "/" + } + + if v4.ServiceName != "s3" { + uri = rest.EscapePath(uri, false) + } + + v4.canonicalString = strings.Join([]string{ + v4.Request.Method, + uri, + v4.Request.URL.RawQuery, + v4.canonicalHeaders + "\n", + v4.signedHeaders, + v4.bodyDigest(), + }, "\n") +} + +func (v4 *signer) buildStringToSign() { + v4.stringToSign = strings.Join([]string{ + authHeaderPrefix, + v4.formattedTime, + v4.credentialString, + hex.EncodeToString(makeSha256([]byte(v4.canonicalString))), + }, "\n") +} + +func (v4 *signer) buildSignature() { + secret := v4.CredValues.SecretAccessKey + date := makeHmac([]byte("AWS4"+secret), []byte(v4.formattedShortTime)) + region := makeHmac(date, []byte(v4.Region)) + service := makeHmac(region, []byte(v4.ServiceName)) + credentials := makeHmac(service, []byte("aws4_request")) + signature := makeHmac(credentials, []byte(v4.stringToSign)) + v4.signature = hex.EncodeToString(signature) +} + +func (v4 *signer) bodyDigest() string { + hash := v4.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + if v4.isPresign && v4.ServiceName == "s3" { + hash = "UNSIGNED-PAYLOAD" + } else if v4.Body == nil { + hash = hex.EncodeToString(makeSha256([]byte{})) + } else { + hash = hex.EncodeToString(makeSha256Reader(v4.Body)) + } + v4.Request.Header.Add("X-Amz-Content-Sha256", hash) + } + return hash +} + +// isRequestSigned returns if the request is currently signed or presigned +func (v4 *signer) isRequestSigned() bool { + if v4.isPresign && v4.Query.Get("X-Amz-Signature") != "" { + return true + } + if v4.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (v4 *signer) removePresign() { + v4.Query.Del("X-Amz-Algorithm") + v4.Query.Del("X-Amz-Signature") + v4.Query.Del("X-Amz-Security-Token") + v4.Query.Del("X-Amz-Date") + v4.Query.Del("X-Amz-Expires") + v4.Query.Del("X-Amz-Credential") + v4.Query.Del("X-Amz-SignedHeaders") +} + +func makeHmac(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) []byte { + hash := sha256.New() + start, _ := reader.Seek(0, 1) + defer reader.Seek(start, 0) + + io.Copy(hash, reader) + return hash.Sum(nil) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/v4_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/v4_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/v4_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/v4_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,255 @@ +package v4 + +import ( + "net/http" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" +) + +func buildSigner(serviceName string, region string, signTime time.Time, expireTime time.Duration, body string) signer { + endpoint := "https://" + serviceName + "." + region + ".amazonaws.com" + reader := strings.NewReader(body) + req, _ := http.NewRequest("POST", endpoint, reader) + req.URL.Opaque = "//example.org/bucket/key-._~,!@#$%^&*()" + req.Header.Add("X-Amz-Target", "prefix.Operation") + req.Header.Add("Content-Type", "application/x-amz-json-1.0") + req.Header.Add("Content-Length", string(len(body))) + req.Header.Add("X-Amz-Meta-Other-Header", "some-value=!@#$%^&* (+)") + + return signer{ + Request: req, + Time: signTime, + ExpireTime: expireTime, + Query: req.URL.Query(), + Body: reader, + ServiceName: serviceName, + Region: region, + Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"), + } +} + +func removeWS(text string) string { + text = strings.Replace(text, " ", "", -1) + text = strings.Replace(text, "\n", "", -1) + text = strings.Replace(text, "\t", "", -1) + return text +} + +func assertEqual(t *testing.T, expected, given string) { + if removeWS(expected) != removeWS(given) { + t.Errorf("\nExpected: %s\nGiven: %s", expected, given) + } +} + +func TestPresignRequest(t *testing.T) { + signer := buildSigner("dynamodb", "us-east-1", time.Unix(0, 0), 300*time.Second, "{}") + signer.sign() + + expectedDate := "19700101T000000Z" + expectedHeaders := "content-type;host;x-amz-meta-other-header" + expectedSig := "4fe8944ddd3e83a32bc874955e734e5a349116bfce2d4f43171e0f7572b842f6" + expectedCred := "AKID/19700101/us-east-1/dynamodb/aws4_request" + expectedTarget := "prefix.Operation" + + q := signer.Request.URL.Query() + assert.Equal(t, expectedSig, q.Get("X-Amz-Signature")) + assert.Equal(t, expectedCred, q.Get("X-Amz-Credential")) + assert.Equal(t, expectedHeaders, q.Get("X-Amz-SignedHeaders")) + assert.Equal(t, expectedDate, q.Get("X-Amz-Date")) + assert.Empty(t, q.Get("X-Amz-Meta-Other-Header")) + assert.Equal(t, expectedTarget, q.Get("X-Amz-Target")) +} + +func TestSignRequest(t *testing.T) { + signer := buildSigner("dynamodb", "us-east-1", time.Unix(0, 0), 0, "{}") + signer.sign() + + expectedDate := "19700101T000000Z" + expectedSig := "AWS4-HMAC-SHA256 Credential=AKID/19700101/us-east-1/dynamodb/aws4_request, SignedHeaders=content-type;host;x-amz-date;x-amz-meta-other-header;x-amz-security-token;x-amz-target, Signature=5d3983fb3de907bdc2f3a6951d968e510f0252a8358c038f7680aa02374eeb67" + + q := signer.Request.Header + assert.Equal(t, expectedSig, q.Get("Authorization")) + assert.Equal(t, expectedDate, q.Get("X-Amz-Date")) +} + +func TestSignEmptyBody(t *testing.T) { + signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "") + signer.Body = nil + signer.sign() + hash := signer.Request.Header.Get("X-Amz-Content-Sha256") + assert.Equal(t, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", hash) +} + +func TestSignBody(t *testing.T) { + signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "hello") + signer.sign() + hash := signer.Request.Header.Get("X-Amz-Content-Sha256") + assert.Equal(t, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", hash) +} + +func TestSignSeekedBody(t *testing.T) { + signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, " hello") + signer.Body.Read(make([]byte, 3)) // consume first 3 bytes so body is now "hello" + signer.sign() + hash := signer.Request.Header.Get("X-Amz-Content-Sha256") + assert.Equal(t, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", hash) + + start, _ := signer.Body.Seek(0, 1) + assert.Equal(t, int64(3), start) +} + +func TestPresignEmptyBodyS3(t *testing.T) { + signer := buildSigner("s3", "us-east-1", time.Now(), 5*time.Minute, "hello") + signer.sign() + hash := signer.Request.Header.Get("X-Amz-Content-Sha256") + assert.Equal(t, "UNSIGNED-PAYLOAD", hash) +} + +func TestSignPrecomputedBodyChecksum(t *testing.T) { + signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "hello") + signer.Request.Header.Set("X-Amz-Content-Sha256", "PRECOMPUTED") + signer.sign() + hash := signer.Request.Header.Get("X-Amz-Content-Sha256") + assert.Equal(t, "PRECOMPUTED", hash) +} + +func TestAnonymousCredentials(t *testing.T) { + svc := awstesting.NewClient(&aws.Config{Credentials: credentials.AnonymousCredentials}) + r := svc.NewRequest( + &request.Operation{ + Name: "BatchGetItem", + HTTPMethod: "POST", + HTTPPath: "/", + }, + nil, + nil, + ) + Sign(r) + + urlQ := r.HTTPRequest.URL.Query() + assert.Empty(t, urlQ.Get("X-Amz-Signature")) + assert.Empty(t, urlQ.Get("X-Amz-Credential")) + assert.Empty(t, urlQ.Get("X-Amz-SignedHeaders")) + assert.Empty(t, urlQ.Get("X-Amz-Date")) + + hQ := r.HTTPRequest.Header + assert.Empty(t, hQ.Get("Authorization")) + assert.Empty(t, hQ.Get("X-Amz-Date")) +} + +func TestIgnoreResignRequestWithValidCreds(t *testing.T) { + svc := awstesting.NewClient(&aws.Config{ + Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"), + Region: aws.String("us-west-2"), + }) + r := svc.NewRequest( + &request.Operation{ + Name: "BatchGetItem", + HTTPMethod: "POST", + HTTPPath: "/", + }, + nil, + nil, + ) + + Sign(r) + sig := r.HTTPRequest.Header.Get("Authorization") + + Sign(r) + assert.Equal(t, sig, r.HTTPRequest.Header.Get("Authorization")) +} + +func TestIgnorePreResignRequestWithValidCreds(t *testing.T) { + svc := awstesting.NewClient(&aws.Config{ + Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"), + Region: aws.String("us-west-2"), + }) + r := svc.NewRequest( + &request.Operation{ + Name: "BatchGetItem", + HTTPMethod: "POST", + HTTPPath: "/", + }, + nil, + nil, + ) + r.ExpireTime = time.Minute * 10 + + Sign(r) + sig := r.HTTPRequest.Header.Get("X-Amz-Signature") + + Sign(r) + assert.Equal(t, sig, r.HTTPRequest.Header.Get("X-Amz-Signature")) +} + +func TestResignRequestExpiredCreds(t *testing.T) { + creds := credentials.NewStaticCredentials("AKID", "SECRET", "SESSION") + svc := awstesting.NewClient(&aws.Config{Credentials: creds}) + r := svc.NewRequest( + &request.Operation{ + Name: "BatchGetItem", + HTTPMethod: "POST", + HTTPPath: "/", + }, + nil, + nil, + ) + Sign(r) + querySig := r.HTTPRequest.Header.Get("Authorization") + + creds.Expire() + + Sign(r) + assert.NotEqual(t, querySig, r.HTTPRequest.Header.Get("Authorization")) +} + +func TestPreResignRequestExpiredCreds(t *testing.T) { + provider := &credentials.StaticProvider{Value: credentials.Value{ + AccessKeyID: "AKID", + SecretAccessKey: "SECRET", + SessionToken: "SESSION", + }} + creds := credentials.NewCredentials(provider) + svc := awstesting.NewClient(&aws.Config{Credentials: creds}) + r := svc.NewRequest( + &request.Operation{ + Name: "BatchGetItem", + HTTPMethod: "POST", + HTTPPath: "/", + }, + nil, + nil, + ) + r.ExpireTime = time.Minute * 10 + + Sign(r) + querySig := r.HTTPRequest.URL.Query().Get("X-Amz-Signature") + + creds.Expire() + r.Time = time.Now().Add(time.Hour * 48) + + Sign(r) + assert.NotEqual(t, querySig, r.HTTPRequest.URL.Query().Get("X-Amz-Signature")) +} + +func BenchmarkPresignRequest(b *testing.B) { + signer := buildSigner("dynamodb", "us-east-1", time.Now(), 300*time.Second, "{}") + for i := 0; i < b.N; i++ { + signer.sign() + } +} + +func BenchmarkSignRequest(b *testing.B) { + signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "{}") + for i := 0; i < b.N; i++ { + signer.sign() + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/util/util.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/util/util.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/util/util.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/util/util.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,109 @@ +package util + +import ( + "bytes" + "encoding/xml" + "fmt" + "go/format" + "io" + "reflect" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// GoFmt returns the Go formated string of the input. +// +// Panics if the format fails. +func GoFmt(buf string) string { + formatted, err := format.Source([]byte(buf)) + if err != nil { + panic(fmt.Errorf("%s\nOriginal code:\n%s", err.Error(), buf)) + } + return string(formatted) +} + +var reTrim = regexp.MustCompile(`\s{2,}`) + +// Trim removes all leading and trailing white space. +// +// All consecutive spaces will be reduced to a single space. +func Trim(s string) string { + return strings.TrimSpace(reTrim.ReplaceAllString(s, " ")) +} + +// Capitalize capitalizes the first character of the string. +func Capitalize(s string) string { + if len(s) == 1 { + return strings.ToUpper(s) + } + return strings.ToUpper(s[0:1]) + s[1:] +} + +// SortXML sorts the reader's XML elements +func SortXML(r io.Reader) string { + var buf bytes.Buffer + d := xml.NewDecoder(r) + root, _ := xmlutil.XMLToStruct(d, nil) + e := xml.NewEncoder(&buf) + xmlutil.StructToXML(e, root, true) + return buf.String() +} + +// PrettyPrint generates a human readable representation of the value v. +// All values of v are recursively found and pretty printed also. +func PrettyPrint(v interface{}) string { + value := reflect.ValueOf(v) + switch value.Kind() { + case reflect.Struct: + str := fullName(value.Type()) + "{\n" + for i := 0; i < value.NumField(); i++ { + l := string(value.Type().Field(i).Name[0]) + if strings.ToUpper(l) == l { + str += value.Type().Field(i).Name + ": " + str += PrettyPrint(value.Field(i).Interface()) + str += ",\n" + } + } + str += "}" + return str + case reflect.Map: + str := "map[" + fullName(value.Type().Key()) + "]" + fullName(value.Type().Elem()) + "{\n" + for _, k := range value.MapKeys() { + str += "\"" + k.String() + "\": " + str += PrettyPrint(value.MapIndex(k).Interface()) + str += ",\n" + } + str += "}" + return str + case reflect.Ptr: + if e := value.Elem(); e.IsValid() { + return "&" + PrettyPrint(e.Interface()) + } + return "nil" + case reflect.Slice: + str := "[]" + fullName(value.Type().Elem()) + "{\n" + for i := 0; i < value.Len(); i++ { + str += PrettyPrint(value.Index(i).Interface()) + str += ",\n" + } + str += "}" + return str + default: + return fmt.Sprintf("%#v", v) + } +} + +func pkgName(t reflect.Type) string { + pkg := t.PkgPath() + c := strings.Split(pkg, "/") + return c[len(c)-1] +} + +func fullName(t reflect.Type) string { + if pkg := pkgName(t); pkg != "" { + return pkg + "." + t.Name() + } + return t.Name() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/waiter/waiter.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/waiter/waiter.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/waiter/waiter.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/waiter/waiter.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,134 @@ +package waiter + +import ( + "fmt" + "reflect" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides a collection of configuration values to setup a generated +// waiter code with. +type Config struct { + Name string + Delay int + MaxAttempts int + Operation string + Acceptors []WaitAcceptor +} + +// A WaitAcceptor provides the information needed to wait for an API operation +// to complete. +type WaitAcceptor struct { + Expected interface{} + Matcher string + State string + Argument string +} + +// A Waiter provides waiting for an operation to complete. +type Waiter struct { + Config + Client interface{} + Input interface{} +} + +// Wait waits for an operation to complete, expire max attempts, or fail. Error +// is returned if the operation fails. +func (w *Waiter) Wait() error { + client := reflect.ValueOf(w.Client) + in := reflect.ValueOf(w.Input) + method := client.MethodByName(w.Config.Operation + "Request") + + for i := 0; i < w.MaxAttempts; i++ { + res := method.Call([]reflect.Value{in}) + req := res[0].Interface().(*request.Request) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Waiter")) + + err := req.Send() + for _, a := range w.Acceptors { + result := false + var vals []interface{} + switch a.Matcher { + case "pathAll", "path": + // Require all matches to be equal for result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + if len(vals) == 0 { + break + } + result = true + for _, val := range vals { + if !awsutil.DeepEqual(val, a.Expected) { + result = false + break + } + } + case "pathAny": + // Only a single match needs to equal for the result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + for _, val := range vals { + if awsutil.DeepEqual(val, a.Expected) { + result = true + break + } + } + case "status": + s := a.Expected.(int) + result = s == req.HTTPResponse.StatusCode + case "error": + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == a.Expected.(string) + } + case "pathList": + // ignored matcher + default: + logf(client, "WARNING: Waiter for %s encountered unexpected matcher: %s", + w.Config.Operation, a.Matcher) + } + + if !result { + // If there was no matching result found there is nothing more to do + // for this response, retry the request. + continue + } + + switch a.State { + case "success": + // waiter completed + return nil + case "failure": + // Waiter failure state triggered + return awserr.New("ResourceNotReady", + fmt.Sprintf("failed waiting for successful resource state"), err) + case "retry": + // clear the error and retry the operation + err = nil + default: + logf(client, "WARNING: Waiter for %s encountered unexpected state: %s", + w.Config.Operation, a.State) + } + } + if err != nil { + return err + } + + time.Sleep(time.Second * time.Duration(w.Delay)) + } + + return awserr.New("ResourceNotReady", + fmt.Sprintf("exceeded %d wait attempts", w.MaxAttempts), nil) +} + +func logf(client reflect.Value, msg string, args ...interface{}) { + cfgVal := client.FieldByName("Config") + if !cfgVal.IsValid() { + return + } + if cfg, ok := cfgVal.Interface().(*aws.Config); ok && cfg.Logger != nil { + cfg.Logger.Log(fmt.Sprintf(msg, args...)) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/waiter/waiter_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/waiter/waiter_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/waiter/waiter_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/private/waiter/waiter_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,401 @@ +package waiter_test + +import ( + "bytes" + "io/ioutil" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/private/waiter" +) + +type mockClient struct { + *client.Client +} +type MockInput struct{} +type MockOutput struct { + States []*MockState +} +type MockState struct { + State *string +} + +func (c *mockClient) MockRequest(input *MockInput) (*request.Request, *MockOutput) { + op := &request.Operation{ + Name: "Mock", + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &MockInput{} + } + + output := &MockOutput{} + req := c.NewRequest(op, input, output) + req.Data = output + return req, output +} + +func TestWaiterPathAll(t *testing.T) { + svc := &mockClient{Client: awstesting.NewClient(&aws.Config{ + Region: aws.String("mock-region"), + })} + svc.Handlers.Send.Clear() // mock sending + svc.Handlers.Unmarshal.Clear() + svc.Handlers.UnmarshalMeta.Clear() + svc.Handlers.ValidateResponse.Clear() + + reqNum := 0 + resps := []*MockOutput{ + { // Request 1 + States: []*MockState{ + {State: aws.String("pending")}, + {State: aws.String("pending")}, + }, + }, + { // Request 2 + States: []*MockState{ + {State: aws.String("running")}, + {State: aws.String("pending")}, + }, + }, + { // Request 3 + States: []*MockState{ + {State: aws.String("running")}, + {State: aws.String("running")}, + }, + }, + } + + numBuiltReq := 0 + svc.Handlers.Build.PushBack(func(r *request.Request) { + numBuiltReq++ + }) + svc.Handlers.Unmarshal.PushBack(func(r *request.Request) { + if reqNum >= len(resps) { + assert.Fail(t, "too many polling requests made") + return + } + r.Data = resps[reqNum] + reqNum++ + }) + + waiterCfg := waiter.Config{ + Operation: "Mock", + Delay: 0, + MaxAttempts: 10, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "States[].State", + Expected: "running", + }, + }, + } + w := waiter.Waiter{ + Client: svc, + Input: &MockInput{}, + Config: waiterCfg, + } + + err := w.Wait() + assert.NoError(t, err) + assert.Equal(t, 3, numBuiltReq) + assert.Equal(t, 3, reqNum) +} + +func TestWaiterPath(t *testing.T) { + svc := &mockClient{Client: awstesting.NewClient(&aws.Config{ + Region: aws.String("mock-region"), + })} + svc.Handlers.Send.Clear() // mock sending + svc.Handlers.Unmarshal.Clear() + svc.Handlers.UnmarshalMeta.Clear() + svc.Handlers.ValidateResponse.Clear() + + reqNum := 0 + resps := []*MockOutput{ + { // Request 1 + States: []*MockState{ + {State: aws.String("pending")}, + {State: aws.String("pending")}, + }, + }, + { // Request 2 + States: []*MockState{ + {State: aws.String("running")}, + {State: aws.String("pending")}, + }, + }, + { // Request 3 + States: []*MockState{ + {State: aws.String("running")}, + {State: aws.String("running")}, + }, + }, + } + + numBuiltReq := 0 + svc.Handlers.Build.PushBack(func(r *request.Request) { + numBuiltReq++ + }) + svc.Handlers.Unmarshal.PushBack(func(r *request.Request) { + if reqNum >= len(resps) { + assert.Fail(t, "too many polling requests made") + return + } + r.Data = resps[reqNum] + reqNum++ + }) + + waiterCfg := waiter.Config{ + Operation: "Mock", + Delay: 0, + MaxAttempts: 10, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "States[].State", + Expected: "running", + }, + }, + } + w := waiter.Waiter{ + Client: svc, + Input: &MockInput{}, + Config: waiterCfg, + } + + err := w.Wait() + assert.NoError(t, err) + assert.Equal(t, 3, numBuiltReq) + assert.Equal(t, 3, reqNum) +} + +func TestWaiterFailure(t *testing.T) { + svc := &mockClient{Client: awstesting.NewClient(&aws.Config{ + Region: aws.String("mock-region"), + })} + svc.Handlers.Send.Clear() // mock sending + svc.Handlers.Unmarshal.Clear() + svc.Handlers.UnmarshalMeta.Clear() + svc.Handlers.ValidateResponse.Clear() + + reqNum := 0 + resps := []*MockOutput{ + { // Request 1 + States: []*MockState{ + {State: aws.String("pending")}, + {State: aws.String("pending")}, + }, + }, + { // Request 2 + States: []*MockState{ + {State: aws.String("running")}, + {State: aws.String("pending")}, + }, + }, + { // Request 3 + States: []*MockState{ + {State: aws.String("running")}, + {State: aws.String("stopping")}, + }, + }, + } + + numBuiltReq := 0 + svc.Handlers.Build.PushBack(func(r *request.Request) { + numBuiltReq++ + }) + svc.Handlers.Unmarshal.PushBack(func(r *request.Request) { + if reqNum >= len(resps) { + assert.Fail(t, "too many polling requests made") + return + } + r.Data = resps[reqNum] + reqNum++ + }) + + waiterCfg := waiter.Config{ + Operation: "Mock", + Delay: 0, + MaxAttempts: 10, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "States[].State", + Expected: "running", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "States[].State", + Expected: "stopping", + }, + }, + } + w := waiter.Waiter{ + Client: svc, + Input: &MockInput{}, + Config: waiterCfg, + } + + err := w.Wait().(awserr.Error) + assert.Error(t, err) + assert.Equal(t, "ResourceNotReady", err.Code()) + assert.Equal(t, "failed waiting for successful resource state", err.Message()) + assert.Equal(t, 3, numBuiltReq) + assert.Equal(t, 3, reqNum) +} + +func TestWaiterError(t *testing.T) { + svc := &mockClient{Client: awstesting.NewClient(&aws.Config{ + Region: aws.String("mock-region"), + })} + svc.Handlers.Send.Clear() // mock sending + svc.Handlers.Unmarshal.Clear() + svc.Handlers.UnmarshalMeta.Clear() + svc.Handlers.UnmarshalError.Clear() + svc.Handlers.ValidateResponse.Clear() + + reqNum := 0 + resps := []*MockOutput{ + { // Request 1 + States: []*MockState{ + {State: aws.String("pending")}, + {State: aws.String("pending")}, + }, + }, + { // Request 2, error case + }, + { // Request 3 + States: []*MockState{ + {State: aws.String("running")}, + {State: aws.String("running")}, + }, + }, + } + + numBuiltReq := 0 + svc.Handlers.Build.PushBack(func(r *request.Request) { + numBuiltReq++ + }) + svc.Handlers.Send.PushBack(func(r *request.Request) { + code := 200 + if reqNum == 1 { + code = 400 + } + r.HTTPResponse = &http.Response{ + StatusCode: code, + Status: http.StatusText(code), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + }) + svc.Handlers.Unmarshal.PushBack(func(r *request.Request) { + if reqNum >= len(resps) { + assert.Fail(t, "too many polling requests made") + return + } + r.Data = resps[reqNum] + reqNum++ + }) + svc.Handlers.UnmarshalMeta.PushBack(func(r *request.Request) { + if reqNum == 1 { + r.Error = awserr.New("MockException", "mock exception message", nil) + // If there was an error unmarshal error will be called instead of unmarshal + // need to increment count here also + reqNum++ + } + }) + + waiterCfg := waiter.Config{ + Operation: "Mock", + Delay: 0, + MaxAttempts: 10, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "States[].State", + Expected: "running", + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "MockException", + }, + }, + } + w := waiter.Waiter{ + Client: svc, + Input: &MockInput{}, + Config: waiterCfg, + } + + err := w.Wait() + assert.NoError(t, err) + assert.Equal(t, 3, numBuiltReq) + assert.Equal(t, 3, reqNum) +} + +func TestWaiterStatus(t *testing.T) { + svc := &mockClient{Client: awstesting.NewClient(&aws.Config{ + Region: aws.String("mock-region"), + })} + svc.Handlers.Send.Clear() // mock sending + svc.Handlers.Unmarshal.Clear() + svc.Handlers.UnmarshalMeta.Clear() + svc.Handlers.ValidateResponse.Clear() + + reqNum := 0 + svc.Handlers.Build.PushBack(func(r *request.Request) { + reqNum++ + }) + svc.Handlers.Send.PushBack(func(r *request.Request) { + code := 200 + if reqNum == 3 { + code = 404 + r.Error = awserr.New("NotFound", "Not Found", nil) + } + r.HTTPResponse = &http.Response{ + StatusCode: code, + Status: http.StatusText(code), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + }) + + waiterCfg := waiter.Config{ + Operation: "Mock", + Delay: 0, + MaxAttempts: 10, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + w := waiter.Waiter{ + Client: svc, + Input: &MockInput{}, + Config: waiterCfg, + } + + err := w.Wait() + assert.NoError(t, err) + assert.Equal(t, 3, reqNum) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/README.md aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/README.md --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/README.md 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/README.md 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,96 @@ +# AWS SDK for Go + +[![API Reference](http://img.shields.io/badge/api-reference-blue.svg)](http://docs.aws.amazon.com/sdk-for-go/api) +[![Join the chat at https://gitter.im/aws/aws-sdk-go](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/aws/aws-sdk-go?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Build Status](https://img.shields.io/travis/aws/aws-sdk-go.svg)](https://travis-ci.org/aws/aws-sdk-go) +[![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) + +aws-sdk-go is the official AWS SDK for the Go programming language. + +Checkout our [release notes](https://github.com/aws/aws-sdk-go/releases) for information about the latest bug fixes, updates, and features added to the SDK. + +## Installing + +If you are using Go 1.5 with the `GO15VENDOREXPERIMENT=1` vendoring flag you can use the following to get the SDK as the SDK's runtime dependencies are vendored in the `vendor` folder. + + $ go get -u github.com/aws/aws-sdk-go + +Otherwise you'll need to tell Go to get the SDK and all of its dependencies. + + $ go get -u github.com/aws/aws-sdk-go/... + +## Configuring Credentials + +Before using the SDK, ensure that you've configured credentials. The best +way to configure credentials on a development machine is to use the +`~/.aws/credentials` file, which might look like: + +``` +[default] +aws_access_key_id = AKID1234567890 +aws_secret_access_key = MY-SECRET-KEY +``` + +You can learn more about the credentials file from this +[blog post](http://blogs.aws.amazon.com/security/post/Tx3D6U6WSFGOK2H/A-New-and-Standardized-Way-to-Manage-Credentials-in-the-AWS-SDKs). + +Alternatively, you can set the following environment variables: + +``` +AWS_ACCESS_KEY_ID=AKID1234567890 +AWS_SECRET_ACCESS_KEY=MY-SECRET-KEY +``` + +### AWS CLI config file (`~/aws/config`) +The AWS SDK for Go does not support the AWS CLI's config file. The SDK will not use any contents from this file. The SDK only supports the shared credentials file (`~/aws/credentials`). #384 tracks this feature request discussion. + +## Using the Go SDK + +To use a service in the SDK, create a service variable by calling the `New()` +function. Once you have a service client, you can call API operations which each +return response data and a possible error. + +To list a set of instance IDs from EC2, you could run: + +```go +package main + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ec2" +) + +func main() { + // Create an EC2 service object in the "us-west-2" region + // Note that you can also configure your region globally by + // exporting the AWS_REGION environment variable + svc := ec2.New(session.New(), &aws.Config{Region: aws.String("us-west-2")}) + + // Call the DescribeInstances Operation + resp, err := svc.DescribeInstances(nil) + if err != nil { + panic(err) + } + + // resp has all of the response data, pull out instance IDs: + fmt.Println("> Number of reservation sets: ", len(resp.Reservations)) + for idx, res := range resp.Reservations { + fmt.Println(" > Number of instances: ", len(res.Instances)) + for _, inst := range resp.Reservations[idx].Instances { + fmt.Println(" - Instance ID: ", *inst.InstanceId) + } + } +} +``` + +You can find more information and operations in our +[API documentation](http://docs.aws.amazon.com/sdk-for-go/api/). + +## License + +This SDK is distributed under the +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), +see LICENSE.txt and NOTICE.txt for more information. diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/sdk.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/sdk.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/sdk.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/sdk.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,7 @@ +// Package sdk is the official AWS SDK for the Go programming language. +// +// See our Developer Guide for information for on getting started and using +// the SDK. +// +// https://github.com/aws/aws-sdk-go/wiki +package sdk diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/acm/acmiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/acm/acmiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/acm/acmiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/acm/acmiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,40 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package acmiface provides an interface for the AWS Certificate Manager. +package acmiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/acm" +) + +// ACMAPI is the interface type for acm.ACM. +type ACMAPI interface { + DeleteCertificateRequest(*acm.DeleteCertificateInput) (*request.Request, *acm.DeleteCertificateOutput) + + DeleteCertificate(*acm.DeleteCertificateInput) (*acm.DeleteCertificateOutput, error) + + DescribeCertificateRequest(*acm.DescribeCertificateInput) (*request.Request, *acm.DescribeCertificateOutput) + + DescribeCertificate(*acm.DescribeCertificateInput) (*acm.DescribeCertificateOutput, error) + + GetCertificateRequest(*acm.GetCertificateInput) (*request.Request, *acm.GetCertificateOutput) + + GetCertificate(*acm.GetCertificateInput) (*acm.GetCertificateOutput, error) + + ListCertificatesRequest(*acm.ListCertificatesInput) (*request.Request, *acm.ListCertificatesOutput) + + ListCertificates(*acm.ListCertificatesInput) (*acm.ListCertificatesOutput, error) + + ListCertificatesPages(*acm.ListCertificatesInput, func(*acm.ListCertificatesOutput, bool) bool) error + + RequestCertificateRequest(*acm.RequestCertificateInput) (*request.Request, *acm.RequestCertificateOutput) + + RequestCertificate(*acm.RequestCertificateInput) (*acm.RequestCertificateOutput, error) + + ResendValidationEmailRequest(*acm.ResendValidationEmailInput) (*request.Request, *acm.ResendValidationEmailOutput) + + ResendValidationEmail(*acm.ResendValidationEmailInput) (*acm.ResendValidationEmailOutput, error) +} + +var _ ACMAPI = (*acm.ACM)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/acm/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/acm/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/acm/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/acm/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,739 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package acm provides a client for AWS Certificate Manager. +package acm + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opDeleteCertificate = "DeleteCertificate" + +// DeleteCertificateRequest generates a request for the DeleteCertificate operation. +func (c *ACM) DeleteCertificateRequest(input *DeleteCertificateInput) (req *request.Request, output *DeleteCertificateOutput) { + op := &request.Operation{ + Name: opDeleteCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCertificateInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteCertificateOutput{} + req.Data = output + return +} + +// Deletes an ACM Certificate and its associated private key. If this action +// succeeds, the certificate no longer appears in the list of ACM Certificates +// that can be displayed by calling the ListCertificates action or be retrieved +// by calling the GetCertificate action. The certificate will not be available +// for use by other AWS services. +// +// You cannot delete an ACM Certificate that is being used by another AWS service. +// To delete a certificate that is in use, the certificate association must +// first be removed. +func (c *ACM) DeleteCertificate(input *DeleteCertificateInput) (*DeleteCertificateOutput, error) { + req, out := c.DeleteCertificateRequest(input) + err := req.Send() + return out, err +} + +const opDescribeCertificate = "DescribeCertificate" + +// DescribeCertificateRequest generates a request for the DescribeCertificate operation. +func (c *ACM) DescribeCertificateRequest(input *DescribeCertificateInput) (req *request.Request, output *DescribeCertificateOutput) { + op := &request.Operation{ + Name: opDescribeCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCertificateOutput{} + req.Data = output + return +} + +// Returns a list of the fields contained in the specified ACM Certificate. +// For example, this action returns the certificate status, a flag that indicates +// whether the certificate is associated with any other AWS service, and the +// date at which the certificate request was created. The ACM Certificate is +// specified on input by its Amazon Resource Name (ARN). +func (c *ACM) DescribeCertificate(input *DescribeCertificateInput) (*DescribeCertificateOutput, error) { + req, out := c.DescribeCertificateRequest(input) + err := req.Send() + return out, err +} + +const opGetCertificate = "GetCertificate" + +// GetCertificateRequest generates a request for the GetCertificate operation. +func (c *ACM) GetCertificateRequest(input *GetCertificateInput) (req *request.Request, output *GetCertificateOutput) { + op := &request.Operation{ + Name: opGetCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &GetCertificateOutput{} + req.Data = output + return +} + +// Retrieves an ACM Certificate and certificate chain for the certificate specified +// by an ARN. The chain is an ordered list of certificates that contains the +// root certificate, intermediate certificates of subordinate CAs, and the ACM +// Certificate. The certificate and certificate chain are base64 encoded. If +// you want to decode the certificate chain to see the individual certificate +// fields, you can use OpenSSL. +// +// Currently, ACM Certificates can be used only with Elastic Load Balancing +// and Amazon CloudFront. +func (c *ACM) GetCertificate(input *GetCertificateInput) (*GetCertificateOutput, error) { + req, out := c.GetCertificateRequest(input) + err := req.Send() + return out, err +} + +const opListCertificates = "ListCertificates" + +// ListCertificatesRequest generates a request for the ListCertificates operation. +func (c *ACM) ListCertificatesRequest(input *ListCertificatesInput) (req *request.Request, output *ListCertificatesOutput) { + op := &request.Operation{ + Name: opListCertificates, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxItems", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListCertificatesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListCertificatesOutput{} + req.Data = output + return +} + +// Retrieves a list of the ACM Certificate ARNs, and the domain name for each +// ARN, owned by the calling account. You can filter the list based on the CertificateStatuses +// parameter, and you can display up to MaxItems certificates at one time. If +// you have more than MaxItems certificates, use the NextToken marker from the +// response object in your next call to the ListCertificates action to retrieve +// the next set of certificate ARNs. +func (c *ACM) ListCertificates(input *ListCertificatesInput) (*ListCertificatesOutput, error) { + req, out := c.ListCertificatesRequest(input) + err := req.Send() + return out, err +} + +func (c *ACM) ListCertificatesPages(input *ListCertificatesInput, fn func(p *ListCertificatesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListCertificatesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListCertificatesOutput), lastPage) + }) +} + +const opRequestCertificate = "RequestCertificate" + +// RequestCertificateRequest generates a request for the RequestCertificate operation. +func (c *ACM) RequestCertificateRequest(input *RequestCertificateInput) (req *request.Request, output *RequestCertificateOutput) { + op := &request.Operation{ + Name: opRequestCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RequestCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &RequestCertificateOutput{} + req.Data = output + return +} + +// Requests an ACM Certificate for use with other AWS services. To request an +// ACM Certificate, you must specify the fully qualified domain name (FQDN) +// for your site. You can also specify additional FQDNs if users can reach your +// site by using other names. For each domain name you specify, email is sent +// to the domain owner to request approval to issue the certificate. After receiving +// approval from the domain owner, the ACM Certificate is issued. For more information, +// see the AWS Certificate Manager User Guide (http://docs.aws.amazon.com/acm/latest/userguide/overview.html). +func (c *ACM) RequestCertificate(input *RequestCertificateInput) (*RequestCertificateOutput, error) { + req, out := c.RequestCertificateRequest(input) + err := req.Send() + return out, err +} + +const opResendValidationEmail = "ResendValidationEmail" + +// ResendValidationEmailRequest generates a request for the ResendValidationEmail operation. +func (c *ACM) ResendValidationEmailRequest(input *ResendValidationEmailInput) (req *request.Request, output *ResendValidationEmailOutput) { + op := &request.Operation{ + Name: opResendValidationEmail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResendValidationEmailInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ResendValidationEmailOutput{} + req.Data = output + return +} + +// Resends the email that requests domain ownership validation. The domain owner +// or an authorized representative must approve the ACM Certificate before it +// can be issued. The certificate can be approved by clicking a link in the +// mail to navigate to the Amazon certificate approval website and then clicking +// I Approve. However, the validation email can be blocked by spam filters. +// Therefore, if you do not receive the original mail, you can request that +// the mail be resent within 72 hours of requesting the ACM Certificate. If +// more than 72 hours have elapsed since your original request or since your +// last attempt to resend validation mail, you must request a new certificate. +func (c *ACM) ResendValidationEmail(input *ResendValidationEmailInput) (*ResendValidationEmailOutput, error) { + req, out := c.ResendValidationEmailRequest(input) + err := req.Send() + return out, err +} + +// This structure is returned in the response object of the DescribeCertificate +// action. +type CertificateDetail struct { + _ struct{} `type:"structure"` + + // Amazon Resource Name (ARN) of the certificate. This is of the form: + // + // arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012 + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + CertificateArn *string `min:"20" type:"string"` + + // Time at which the certificate was requested. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Fully qualified domain name (FQDN), such as www.example.com or example.com, + // for the certificate. + DomainName *string `min:"1" type:"string"` + + // References a DomainValidation structure that contains the domain name in + // the certificate and the email address that can be used for validation. + DomainValidationOptions []*DomainValidation `min:"1" type:"list"` + + // List that identifies ARNs that are using the certificate. A single ACM Certificate + // can be used by multiple AWS resources. + InUseBy []*string `type:"list"` + + // Time at which the certificate was issued. + IssuedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The X.500 distinguished name of the CA that issued and signed the certificate. + Issuer *string `type:"string"` + + // Asymmetric algorithm used to generate the public and private key pair. Currently + // the only supported value is RSA_2048. + KeyAlgorithm *string `type:"string" enum:"KeyAlgorithm"` + + // Time after which the certificate is not valid. + NotAfter *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Time before which the certificate is not valid. + NotBefore *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A RevocationReason enumeration value that indicates why the certificate was + // revoked. This value exists only if the certificate has been revoked. This + // can be one of the following vales: UNSPECIFIED KEY_COMPROMISE CA_COMPROMISE + // AFFILIATION_CHANGED SUPERCEDED CESSATION_OF_OPERATION CERTIFICATE_HOLD REMOVE_FROM_CRL + // PRIVILEGE_WITHDRAWN A_A_COMPROMISE + RevocationReason *string `type:"string" enum:"RevocationReason"` + + // The time, if any, at which the certificate was revoked. This value exists + // only if the certificate has been revoked. + RevokedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // String that contains the serial number of the certificate. + Serial *string `type:"string"` + + // Algorithm used to generate a signature. Currently the only supported value + // is SHA256WITHRSA. + SignatureAlgorithm *string `type:"string"` + + // A CertificateStatus enumeration value that can contain one of the following: + // PENDING_VALIDATION ISSUED INACTIVE EXPIRED REVOKED FAILED VALIDATION_TIMED_OUT + Status *string `type:"string" enum:"CertificateStatus"` + + // The X.500 distinguished name of the entity associated with the public key + // contained in the certificate. + Subject *string `type:"string"` + + // One or more domain names (subject alternative names) included in the certificate + // request. After the certificate is issued, this list includes the domain names + // bound to the public key contained in the certificate. The subject alternative + // names include the canonical domain name (CN) of the certificate and additional + // domain names that can be used to connect to the website. + SubjectAlternativeNames []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s CertificateDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CertificateDetail) GoString() string { + return s.String() +} + +// This structure is returned in the response object of ListCertificates action. +type CertificateSummary struct { + _ struct{} `type:"structure"` + + // Amazon Resource Name (ARN) of the certificate. This is of the form: + // + // arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012 + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + CertificateArn *string `min:"20" type:"string"` + + // Fully qualified domain name (FQDN), such as www.example.com or example.com, + // for the certificate. + DomainName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CertificateSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CertificateSummary) GoString() string { + return s.String() +} + +type DeleteCertificateInput struct { + _ struct{} `type:"structure"` + + // String that contains the ARN of the ACM Certificate to be deleted. This must + // be of the form: + // + // arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012 + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + CertificateArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCertificateInput) GoString() string { + return s.String() +} + +type DeleteCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCertificateOutput) GoString() string { + return s.String() +} + +type DescribeCertificateInput struct { + _ struct{} `type:"structure"` + + // String that contains an ACM Certificate ARN. The ARN must be of the form: + // + // arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012 + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + CertificateArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCertificateInput) GoString() string { + return s.String() +} + +type DescribeCertificateOutput struct { + _ struct{} `type:"structure"` + + // Contains a CertificateDetail structure that lists the fields of an ACM Certificate. + Certificate *CertificateDetail `type:"structure"` +} + +// String returns the string representation +func (s DescribeCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCertificateOutput) GoString() string { + return s.String() +} + +// Structure that contains the domain name, the base validation domain to which +// validation email is sent, and the email addresses used to validate the domain +// identity. +type DomainValidation struct { + _ struct{} `type:"structure"` + + // Fully Qualified Domain Name (FQDN) of the form www.example.com or example.com + DomainName *string `min:"1" type:"string" required:"true"` + + // The base validation domain that acts as the suffix of the email addresses + // that are used to send the emails. + ValidationDomain *string `min:"1" type:"string"` + + // A list of contact address for the domain registrant. + ValidationEmails []*string `type:"list"` +} + +// String returns the string representation +func (s DomainValidation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainValidation) GoString() string { + return s.String() +} + +// This structure is used in the request object of the RequestCertificate action. +type DomainValidationOption struct { + _ struct{} `type:"structure"` + + // Fully Qualified Domain Name (FQDN) of the certificate being requested. + DomainName *string `min:"1" type:"string" required:"true"` + + // The domain to which validation email is sent. This is the base validation + // domain that will act as the suffix of the email addresses. This must be the + // same as the DomainName value or a superdomain of the DomainName value. For + // example, if you requested a certificate for site.subdomain.example.com and + // specify a ValidationDomain of subdomain.example.com, ACM sends email to the + // domain registrant, technical contact, and administrative contact in WHOIS + // for the base domain and the and the following five addresses: admin@subdomain.example.com + // administrator@subdomain.example.com hostmaster@subdomain.example.com postmaster@subdomain.example.com + // webmaster@subdomain.example.com + ValidationDomain *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DomainValidationOption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainValidationOption) GoString() string { + return s.String() +} + +type GetCertificateInput struct { + _ struct{} `type:"structure"` + + // String that contains a certificate ARN in the following format: + // + // arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012 + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + CertificateArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCertificateInput) GoString() string { + return s.String() +} + +type GetCertificateOutput struct { + _ struct{} `type:"structure"` + + // String that contains the ACM Certificate represented by the ARN specified + // at input. + Certificate *string `min:"1" type:"string"` + + // The certificate chain that contains the root certificate issued by the certificate + // authority (CA). + CertificateChain *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCertificateOutput) GoString() string { + return s.String() +} + +type ListCertificatesInput struct { + _ struct{} `type:"structure"` + + // Identifies the statuses of the ACM Certificates for which you want to retrieve + // the ARNs. This can be one or more of the following values: PENDING_VALIDATION + // ISSUED INACTIVE EXPIRED VALIDATION_TIMED_OUT REVOKED FAILED + CertificateStatuses []*string `type:"list"` + + // Specify this parameter when paginating results to indicate the maximum number + // of ACM Certificates that you want to display for each response. If there + // are additional certificates beyond the maximum you specify, use the NextToken + // value in your next call to the ListCertificates action. + MaxItems *int64 `min:"1" type:"integer"` + + // String that contains an opaque marker of the next ACM Certificate ARN to + // be displayed. Use this parameter when paginating results, and only in a subsequent + // request after you've received a response where the results have been truncated. + // Set it to an empty string the first time you call this action, and set it + // to the value of the NextToken element you receive in the response object + // for subsequent calls. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListCertificatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCertificatesInput) GoString() string { + return s.String() +} + +type ListCertificatesOutput struct { + _ struct{} `type:"structure"` + + // A list of the certificate ARNs. + CertificateSummaryList []*CertificateSummary `type:"list"` + + // If the list has been truncated, this value is present and should be used + // for the NextToken input parameter on your next call to ListCertificates. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListCertificatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCertificatesOutput) GoString() string { + return s.String() +} + +type RequestCertificateInput struct { + _ struct{} `type:"structure"` + + // Fully qualified domain name (FQDN), such as www.example.com, of the site + // you want to secure with an ACM Certificate. Use an asterisk (*) to create + // a wildcard certificate that protects several sites in the same domain. For + // example, *.example.com protects www.example.com, site.example.com, and images.example.com. + DomainName *string `min:"1" type:"string" required:"true"` + + // The base validation domain that will act as the suffix of the email addresses + // that are used to send the emails. This must be the same as the Domain value + // or a superdomain of the Domain value. For example, if you requested a certificate + // for www.example.com and specify DomainValidationOptions of example.com, ACM + // sends email to the domain registrant, technical contact, and administrative + // contact in WHOIS and the following five addresses: admin@example.com administrator@example.com + // hostmaster@example.com postmaster@example.com webmaster@example.com + DomainValidationOptions []*DomainValidationOption `min:"1" type:"list"` + + // Customer chosen string that can be used to distinguish between calls to RequestCertificate. + // Idempotency tokens time out after one hour. Therefore, if you call RequestCertificate + // multiple times with the same idempotency token within one hour, ACM recognizes + // that you are requesting only one certificate and will issue only one. If + // you change the idempotency token for each call, ACM recognizes that you are + // requesting multiple certificates. + IdempotencyToken *string `min:"1" type:"string"` + + // Additional FQDNs to be included in the Subject Alternative Name extension + // of the ACM Certificate. For example, add the name www.example.net to a certificate + // for which the DomainName field is www.example.com if users can reach your + // site by using either name. + SubjectAlternativeNames []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s RequestCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestCertificateInput) GoString() string { + return s.String() +} + +type RequestCertificateOutput struct { + _ struct{} `type:"structure"` + + // String that contains the ARN of the issued certificate. This must be of the + // form: + // + // arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012 + CertificateArn *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s RequestCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestCertificateOutput) GoString() string { + return s.String() +} + +type ResendValidationEmailInput struct { + _ struct{} `type:"structure"` + + // String that contains the ARN of the requested certificate. The certificate + // ARN is generated and returned by RequestCertificate as soon as the request + // is made. By default, using this parameter causes email to be sent to all + // top-level domains you specified in the certificate request. + // + // The ARN must be of the form: + // + // arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012 + CertificateArn *string `min:"20" type:"string" required:"true"` + + // The Fully Qualified Domain Name (FQDN) of the certificate that needs to be + // validated. + Domain *string `min:"1" type:"string" required:"true"` + + // The base validation domain that will act as the suffix of the email addresses + // that are used to send the emails. This must be the same as the Domain value + // or a superdomain of the Domain value. For example, if you requested a certificate + // for site.subdomain.example.com and specify a ValidationDomain of subdomain.example.com, + // ACM sends email to the domain registrant, technical contact, and administrative + // contact in WHOIS and the following five addresses: admin@subdomain.example.com + // administrator@subdomain.example.com hostmaster@subdomain.example.com postmaster@subdomain.example.com + // webmaster@subdomain.example.com + ValidationDomain *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ResendValidationEmailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResendValidationEmailInput) GoString() string { + return s.String() +} + +type ResendValidationEmailOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ResendValidationEmailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResendValidationEmailOutput) GoString() string { + return s.String() +} + +const ( + // @enum CertificateStatus + CertificateStatusPendingValidation = "PENDING_VALIDATION" + // @enum CertificateStatus + CertificateStatusIssued = "ISSUED" + // @enum CertificateStatus + CertificateStatusInactive = "INACTIVE" + // @enum CertificateStatus + CertificateStatusExpired = "EXPIRED" + // @enum CertificateStatus + CertificateStatusValidationTimedOut = "VALIDATION_TIMED_OUT" + // @enum CertificateStatus + CertificateStatusRevoked = "REVOKED" + // @enum CertificateStatus + CertificateStatusFailed = "FAILED" +) + +const ( + // @enum KeyAlgorithm + KeyAlgorithmRsa2048 = "RSA_2048" + // @enum KeyAlgorithm + KeyAlgorithmEcPrime256v1 = "EC_prime256v1" +) + +const ( + // @enum RevocationReason + RevocationReasonUnspecified = "UNSPECIFIED" + // @enum RevocationReason + RevocationReasonKeyCompromise = "KEY_COMPROMISE" + // @enum RevocationReason + RevocationReasonCaCompromise = "CA_COMPROMISE" + // @enum RevocationReason + RevocationReasonAffiliationChanged = "AFFILIATION_CHANGED" + // @enum RevocationReason + RevocationReasonSuperceded = "SUPERCEDED" + // @enum RevocationReason + RevocationReasonCessationOfOperation = "CESSATION_OF_OPERATION" + // @enum RevocationReason + RevocationReasonCertificateHold = "CERTIFICATE_HOLD" + // @enum RevocationReason + RevocationReasonRemoveFromCrl = "REMOVE_FROM_CRL" + // @enum RevocationReason + RevocationReasonPrivilegeWithdrawn = "PRIVILEGE_WITHDRAWN" + // @enum RevocationReason + RevocationReasonAACompromise = "A_A_COMPROMISE" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/acm/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/acm/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/acm/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/acm/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,149 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package acm_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/acm" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleACM_DeleteCertificate() { + svc := acm.New(session.New()) + + params := &acm.DeleteCertificateInput{ + CertificateArn: aws.String("Arn"), // Required + } + resp, err := svc.DeleteCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleACM_DescribeCertificate() { + svc := acm.New(session.New()) + + params := &acm.DescribeCertificateInput{ + CertificateArn: aws.String("Arn"), // Required + } + resp, err := svc.DescribeCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleACM_GetCertificate() { + svc := acm.New(session.New()) + + params := &acm.GetCertificateInput{ + CertificateArn: aws.String("Arn"), // Required + } + resp, err := svc.GetCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleACM_ListCertificates() { + svc := acm.New(session.New()) + + params := &acm.ListCertificatesInput{ + CertificateStatuses: []*string{ + aws.String("CertificateStatus"), // Required + // More values... + }, + MaxItems: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListCertificates(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleACM_RequestCertificate() { + svc := acm.New(session.New()) + + params := &acm.RequestCertificateInput{ + DomainName: aws.String("DomainNameString"), // Required + DomainValidationOptions: []*acm.DomainValidationOption{ + { // Required + DomainName: aws.String("DomainNameString"), // Required + ValidationDomain: aws.String("DomainNameString"), // Required + }, + // More values... + }, + IdempotencyToken: aws.String("IdempotencyToken"), + SubjectAlternativeNames: []*string{ + aws.String("DomainNameString"), // Required + // More values... + }, + } + resp, err := svc.RequestCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleACM_ResendValidationEmail() { + svc := acm.New(session.New()) + + params := &acm.ResendValidationEmailInput{ + CertificateArn: aws.String("Arn"), // Required + Domain: aws.String("DomainNameString"), // Required + ValidationDomain: aws.String("DomainNameString"), // Required + } + resp, err := svc.ResendValidationEmail(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/acm/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/acm/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/acm/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/acm/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,94 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package acm + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Welcome to the AWS Certificate Manager (ACM) CLI Command Reference. This +// guide provides descriptions, syntax, and usage examples for each ACM CLI +// command. You can use AWS Certificate Manager to request ACM Certificates +// for your AWS-based websites and applications. For general information about +// using ACM and for more information about using the console, see the AWS Certificate +// Manager User Guide (url-acm-ug;acm-overview.html). For more information about +// using the ACM API, see the AWS Certificate Manager API Reference (http://docs.aws.amazon.com/acm/latest/APIReference/Welcome.html). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type ACM struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "acm" + +// New creates a new instance of the ACM client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ACM client from just a session. +// svc := acm.New(mySession) +// +// // Create a ACM client with additional configuration +// svc := acm.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ACM { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *ACM { + svc := &ACM{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-12-08", + JSONVersion: "1.1", + TargetPrefix: "CertificateManager", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ACM operation and runs any +// custom request initialization. +func (c *ACM) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/apigateway/apigatewayiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/apigateway/apigatewayiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/apigateway/apigatewayiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/apigateway/apigatewayiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,298 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package apigatewayiface provides an interface for the Amazon API Gateway. +package apigatewayiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/apigateway" +) + +// APIGatewayAPI is the interface type for apigateway.APIGateway. +type APIGatewayAPI interface { + CreateApiKeyRequest(*apigateway.CreateApiKeyInput) (*request.Request, *apigateway.ApiKey) + + CreateApiKey(*apigateway.CreateApiKeyInput) (*apigateway.ApiKey, error) + + CreateBasePathMappingRequest(*apigateway.CreateBasePathMappingInput) (*request.Request, *apigateway.BasePathMapping) + + CreateBasePathMapping(*apigateway.CreateBasePathMappingInput) (*apigateway.BasePathMapping, error) + + CreateDeploymentRequest(*apigateway.CreateDeploymentInput) (*request.Request, *apigateway.Deployment) + + CreateDeployment(*apigateway.CreateDeploymentInput) (*apigateway.Deployment, error) + + CreateDomainNameRequest(*apigateway.CreateDomainNameInput) (*request.Request, *apigateway.DomainName) + + CreateDomainName(*apigateway.CreateDomainNameInput) (*apigateway.DomainName, error) + + CreateModelRequest(*apigateway.CreateModelInput) (*request.Request, *apigateway.Model) + + CreateModel(*apigateway.CreateModelInput) (*apigateway.Model, error) + + CreateResourceRequest(*apigateway.CreateResourceInput) (*request.Request, *apigateway.Resource) + + CreateResource(*apigateway.CreateResourceInput) (*apigateway.Resource, error) + + CreateRestApiRequest(*apigateway.CreateRestApiInput) (*request.Request, *apigateway.RestApi) + + CreateRestApi(*apigateway.CreateRestApiInput) (*apigateway.RestApi, error) + + CreateStageRequest(*apigateway.CreateStageInput) (*request.Request, *apigateway.Stage) + + CreateStage(*apigateway.CreateStageInput) (*apigateway.Stage, error) + + DeleteApiKeyRequest(*apigateway.DeleteApiKeyInput) (*request.Request, *apigateway.DeleteApiKeyOutput) + + DeleteApiKey(*apigateway.DeleteApiKeyInput) (*apigateway.DeleteApiKeyOutput, error) + + DeleteBasePathMappingRequest(*apigateway.DeleteBasePathMappingInput) (*request.Request, *apigateway.DeleteBasePathMappingOutput) + + DeleteBasePathMapping(*apigateway.DeleteBasePathMappingInput) (*apigateway.DeleteBasePathMappingOutput, error) + + DeleteClientCertificateRequest(*apigateway.DeleteClientCertificateInput) (*request.Request, *apigateway.DeleteClientCertificateOutput) + + DeleteClientCertificate(*apigateway.DeleteClientCertificateInput) (*apigateway.DeleteClientCertificateOutput, error) + + DeleteDeploymentRequest(*apigateway.DeleteDeploymentInput) (*request.Request, *apigateway.DeleteDeploymentOutput) + + DeleteDeployment(*apigateway.DeleteDeploymentInput) (*apigateway.DeleteDeploymentOutput, error) + + DeleteDomainNameRequest(*apigateway.DeleteDomainNameInput) (*request.Request, *apigateway.DeleteDomainNameOutput) + + DeleteDomainName(*apigateway.DeleteDomainNameInput) (*apigateway.DeleteDomainNameOutput, error) + + DeleteIntegrationRequest(*apigateway.DeleteIntegrationInput) (*request.Request, *apigateway.DeleteIntegrationOutput) + + DeleteIntegration(*apigateway.DeleteIntegrationInput) (*apigateway.DeleteIntegrationOutput, error) + + DeleteIntegrationResponseRequest(*apigateway.DeleteIntegrationResponseInput) (*request.Request, *apigateway.DeleteIntegrationResponseOutput) + + DeleteIntegrationResponse(*apigateway.DeleteIntegrationResponseInput) (*apigateway.DeleteIntegrationResponseOutput, error) + + DeleteMethodRequest(*apigateway.DeleteMethodInput) (*request.Request, *apigateway.DeleteMethodOutput) + + DeleteMethod(*apigateway.DeleteMethodInput) (*apigateway.DeleteMethodOutput, error) + + DeleteMethodResponseRequest(*apigateway.DeleteMethodResponseInput) (*request.Request, *apigateway.DeleteMethodResponseOutput) + + DeleteMethodResponse(*apigateway.DeleteMethodResponseInput) (*apigateway.DeleteMethodResponseOutput, error) + + DeleteModelRequest(*apigateway.DeleteModelInput) (*request.Request, *apigateway.DeleteModelOutput) + + DeleteModel(*apigateway.DeleteModelInput) (*apigateway.DeleteModelOutput, error) + + DeleteResourceRequest(*apigateway.DeleteResourceInput) (*request.Request, *apigateway.DeleteResourceOutput) + + DeleteResource(*apigateway.DeleteResourceInput) (*apigateway.DeleteResourceOutput, error) + + DeleteRestApiRequest(*apigateway.DeleteRestApiInput) (*request.Request, *apigateway.DeleteRestApiOutput) + + DeleteRestApi(*apigateway.DeleteRestApiInput) (*apigateway.DeleteRestApiOutput, error) + + DeleteStageRequest(*apigateway.DeleteStageInput) (*request.Request, *apigateway.DeleteStageOutput) + + DeleteStage(*apigateway.DeleteStageInput) (*apigateway.DeleteStageOutput, error) + + FlushStageCacheRequest(*apigateway.FlushStageCacheInput) (*request.Request, *apigateway.FlushStageCacheOutput) + + FlushStageCache(*apigateway.FlushStageCacheInput) (*apigateway.FlushStageCacheOutput, error) + + GenerateClientCertificateRequest(*apigateway.GenerateClientCertificateInput) (*request.Request, *apigateway.ClientCertificate) + + GenerateClientCertificate(*apigateway.GenerateClientCertificateInput) (*apigateway.ClientCertificate, error) + + GetAccountRequest(*apigateway.GetAccountInput) (*request.Request, *apigateway.Account) + + GetAccount(*apigateway.GetAccountInput) (*apigateway.Account, error) + + GetApiKeyRequest(*apigateway.GetApiKeyInput) (*request.Request, *apigateway.ApiKey) + + GetApiKey(*apigateway.GetApiKeyInput) (*apigateway.ApiKey, error) + + GetApiKeysRequest(*apigateway.GetApiKeysInput) (*request.Request, *apigateway.GetApiKeysOutput) + + GetApiKeys(*apigateway.GetApiKeysInput) (*apigateway.GetApiKeysOutput, error) + + GetApiKeysPages(*apigateway.GetApiKeysInput, func(*apigateway.GetApiKeysOutput, bool) bool) error + + GetBasePathMappingRequest(*apigateway.GetBasePathMappingInput) (*request.Request, *apigateway.BasePathMapping) + + GetBasePathMapping(*apigateway.GetBasePathMappingInput) (*apigateway.BasePathMapping, error) + + GetBasePathMappingsRequest(*apigateway.GetBasePathMappingsInput) (*request.Request, *apigateway.GetBasePathMappingsOutput) + + GetBasePathMappings(*apigateway.GetBasePathMappingsInput) (*apigateway.GetBasePathMappingsOutput, error) + + GetBasePathMappingsPages(*apigateway.GetBasePathMappingsInput, func(*apigateway.GetBasePathMappingsOutput, bool) bool) error + + GetClientCertificateRequest(*apigateway.GetClientCertificateInput) (*request.Request, *apigateway.ClientCertificate) + + GetClientCertificate(*apigateway.GetClientCertificateInput) (*apigateway.ClientCertificate, error) + + GetClientCertificatesRequest(*apigateway.GetClientCertificatesInput) (*request.Request, *apigateway.GetClientCertificatesOutput) + + GetClientCertificates(*apigateway.GetClientCertificatesInput) (*apigateway.GetClientCertificatesOutput, error) + + GetClientCertificatesPages(*apigateway.GetClientCertificatesInput, func(*apigateway.GetClientCertificatesOutput, bool) bool) error + + GetDeploymentRequest(*apigateway.GetDeploymentInput) (*request.Request, *apigateway.Deployment) + + GetDeployment(*apigateway.GetDeploymentInput) (*apigateway.Deployment, error) + + GetDeploymentsRequest(*apigateway.GetDeploymentsInput) (*request.Request, *apigateway.GetDeploymentsOutput) + + GetDeployments(*apigateway.GetDeploymentsInput) (*apigateway.GetDeploymentsOutput, error) + + GetDeploymentsPages(*apigateway.GetDeploymentsInput, func(*apigateway.GetDeploymentsOutput, bool) bool) error + + GetDomainNameRequest(*apigateway.GetDomainNameInput) (*request.Request, *apigateway.DomainName) + + GetDomainName(*apigateway.GetDomainNameInput) (*apigateway.DomainName, error) + + GetDomainNamesRequest(*apigateway.GetDomainNamesInput) (*request.Request, *apigateway.GetDomainNamesOutput) + + GetDomainNames(*apigateway.GetDomainNamesInput) (*apigateway.GetDomainNamesOutput, error) + + GetDomainNamesPages(*apigateway.GetDomainNamesInput, func(*apigateway.GetDomainNamesOutput, bool) bool) error + + GetIntegrationRequest(*apigateway.GetIntegrationInput) (*request.Request, *apigateway.Integration) + + GetIntegration(*apigateway.GetIntegrationInput) (*apigateway.Integration, error) + + GetIntegrationResponseRequest(*apigateway.GetIntegrationResponseInput) (*request.Request, *apigateway.IntegrationResponse) + + GetIntegrationResponse(*apigateway.GetIntegrationResponseInput) (*apigateway.IntegrationResponse, error) + + GetMethodRequest(*apigateway.GetMethodInput) (*request.Request, *apigateway.Method) + + GetMethod(*apigateway.GetMethodInput) (*apigateway.Method, error) + + GetMethodResponseRequest(*apigateway.GetMethodResponseInput) (*request.Request, *apigateway.MethodResponse) + + GetMethodResponse(*apigateway.GetMethodResponseInput) (*apigateway.MethodResponse, error) + + GetModelRequest(*apigateway.GetModelInput) (*request.Request, *apigateway.Model) + + GetModel(*apigateway.GetModelInput) (*apigateway.Model, error) + + GetModelTemplateRequest(*apigateway.GetModelTemplateInput) (*request.Request, *apigateway.GetModelTemplateOutput) + + GetModelTemplate(*apigateway.GetModelTemplateInput) (*apigateway.GetModelTemplateOutput, error) + + GetModelsRequest(*apigateway.GetModelsInput) (*request.Request, *apigateway.GetModelsOutput) + + GetModels(*apigateway.GetModelsInput) (*apigateway.GetModelsOutput, error) + + GetModelsPages(*apigateway.GetModelsInput, func(*apigateway.GetModelsOutput, bool) bool) error + + GetResourceRequest(*apigateway.GetResourceInput) (*request.Request, *apigateway.Resource) + + GetResource(*apigateway.GetResourceInput) (*apigateway.Resource, error) + + GetResourcesRequest(*apigateway.GetResourcesInput) (*request.Request, *apigateway.GetResourcesOutput) + + GetResources(*apigateway.GetResourcesInput) (*apigateway.GetResourcesOutput, error) + + GetResourcesPages(*apigateway.GetResourcesInput, func(*apigateway.GetResourcesOutput, bool) bool) error + + GetRestApiRequest(*apigateway.GetRestApiInput) (*request.Request, *apigateway.RestApi) + + GetRestApi(*apigateway.GetRestApiInput) (*apigateway.RestApi, error) + + GetRestApisRequest(*apigateway.GetRestApisInput) (*request.Request, *apigateway.GetRestApisOutput) + + GetRestApis(*apigateway.GetRestApisInput) (*apigateway.GetRestApisOutput, error) + + GetRestApisPages(*apigateway.GetRestApisInput, func(*apigateway.GetRestApisOutput, bool) bool) error + + GetSdkRequest(*apigateway.GetSdkInput) (*request.Request, *apigateway.GetSdkOutput) + + GetSdk(*apigateway.GetSdkInput) (*apigateway.GetSdkOutput, error) + + GetStageRequest(*apigateway.GetStageInput) (*request.Request, *apigateway.Stage) + + GetStage(*apigateway.GetStageInput) (*apigateway.Stage, error) + + GetStagesRequest(*apigateway.GetStagesInput) (*request.Request, *apigateway.GetStagesOutput) + + GetStages(*apigateway.GetStagesInput) (*apigateway.GetStagesOutput, error) + + PutIntegrationRequest(*apigateway.PutIntegrationInput) (*request.Request, *apigateway.Integration) + + PutIntegration(*apigateway.PutIntegrationInput) (*apigateway.Integration, error) + + PutIntegrationResponseRequest(*apigateway.PutIntegrationResponseInput) (*request.Request, *apigateway.IntegrationResponse) + + PutIntegrationResponse(*apigateway.PutIntegrationResponseInput) (*apigateway.IntegrationResponse, error) + + PutMethodRequest(*apigateway.PutMethodInput) (*request.Request, *apigateway.Method) + + PutMethod(*apigateway.PutMethodInput) (*apigateway.Method, error) + + PutMethodResponseRequest(*apigateway.PutMethodResponseInput) (*request.Request, *apigateway.MethodResponse) + + PutMethodResponse(*apigateway.PutMethodResponseInput) (*apigateway.MethodResponse, error) + + TestInvokeMethodRequest(*apigateway.TestInvokeMethodInput) (*request.Request, *apigateway.TestInvokeMethodOutput) + + TestInvokeMethod(*apigateway.TestInvokeMethodInput) (*apigateway.TestInvokeMethodOutput, error) + + UpdateAccountRequest(*apigateway.UpdateAccountInput) (*request.Request, *apigateway.Account) + + UpdateAccount(*apigateway.UpdateAccountInput) (*apigateway.Account, error) + + UpdateApiKeyRequest(*apigateway.UpdateApiKeyInput) (*request.Request, *apigateway.ApiKey) + + UpdateApiKey(*apigateway.UpdateApiKeyInput) (*apigateway.ApiKey, error) + + UpdateBasePathMappingRequest(*apigateway.UpdateBasePathMappingInput) (*request.Request, *apigateway.BasePathMapping) + + UpdateBasePathMapping(*apigateway.UpdateBasePathMappingInput) (*apigateway.BasePathMapping, error) + + UpdateClientCertificateRequest(*apigateway.UpdateClientCertificateInput) (*request.Request, *apigateway.ClientCertificate) + + UpdateClientCertificate(*apigateway.UpdateClientCertificateInput) (*apigateway.ClientCertificate, error) + + UpdateDeploymentRequest(*apigateway.UpdateDeploymentInput) (*request.Request, *apigateway.Deployment) + + UpdateDeployment(*apigateway.UpdateDeploymentInput) (*apigateway.Deployment, error) + + UpdateDomainNameRequest(*apigateway.UpdateDomainNameInput) (*request.Request, *apigateway.DomainName) + + UpdateDomainName(*apigateway.UpdateDomainNameInput) (*apigateway.DomainName, error) + + UpdateIntegrationRequest(*apigateway.UpdateIntegrationInput) (*request.Request, *apigateway.Integration) + + UpdateIntegration(*apigateway.UpdateIntegrationInput) (*apigateway.Integration, error) + + UpdateIntegrationResponseRequest(*apigateway.UpdateIntegrationResponseInput) (*request.Request, *apigateway.IntegrationResponse) + + UpdateIntegrationResponse(*apigateway.UpdateIntegrationResponseInput) (*apigateway.IntegrationResponse, error) + + UpdateMethodRequest(*apigateway.UpdateMethodInput) (*request.Request, *apigateway.Method) + + UpdateMethod(*apigateway.UpdateMethodInput) (*apigateway.Method, error) + + UpdateMethodResponseRequest(*apigateway.UpdateMethodResponseInput) (*request.Request, *apigateway.MethodResponse) + + UpdateMethodResponse(*apigateway.UpdateMethodResponseInput) (*apigateway.MethodResponse, error) + + UpdateModelRequest(*apigateway.UpdateModelInput) (*request.Request, *apigateway.Model) + + UpdateModel(*apigateway.UpdateModelInput) (*apigateway.Model, error) + + UpdateResourceRequest(*apigateway.UpdateResourceInput) (*request.Request, *apigateway.Resource) + + UpdateResource(*apigateway.UpdateResourceInput) (*apigateway.Resource, error) + + UpdateRestApiRequest(*apigateway.UpdateRestApiInput) (*request.Request, *apigateway.RestApi) + + UpdateRestApi(*apigateway.UpdateRestApiInput) (*apigateway.RestApi, error) + + UpdateStageRequest(*apigateway.UpdateStageInput) (*request.Request, *apigateway.Stage) + + UpdateStage(*apigateway.UpdateStageInput) (*apigateway.Stage, error) +} + +var _ APIGatewayAPI = (*apigateway.APIGateway)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/apigateway/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/apigateway/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/apigateway/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/apigateway/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,4802 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package apigateway provides a client for Amazon API Gateway. +package apigateway + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opCreateApiKey = "CreateApiKey" + +// CreateApiKeyRequest generates a request for the CreateApiKey operation. +func (c *APIGateway) CreateApiKeyRequest(input *CreateApiKeyInput) (req *request.Request, output *ApiKey) { + op := &request.Operation{ + Name: opCreateApiKey, + HTTPMethod: "POST", + HTTPPath: "/apikeys", + } + + if input == nil { + input = &CreateApiKeyInput{} + } + + req = c.newRequest(op, input, output) + output = &ApiKey{} + req.Data = output + return +} + +func (c *APIGateway) CreateApiKey(input *CreateApiKeyInput) (*ApiKey, error) { + req, out := c.CreateApiKeyRequest(input) + err := req.Send() + return out, err +} + +const opCreateBasePathMapping = "CreateBasePathMapping" + +// CreateBasePathMappingRequest generates a request for the CreateBasePathMapping operation. +func (c *APIGateway) CreateBasePathMappingRequest(input *CreateBasePathMappingInput) (req *request.Request, output *BasePathMapping) { + op := &request.Operation{ + Name: opCreateBasePathMapping, + HTTPMethod: "POST", + HTTPPath: "/domainnames/{domain_name}/basepathmappings", + } + + if input == nil { + input = &CreateBasePathMappingInput{} + } + + req = c.newRequest(op, input, output) + output = &BasePathMapping{} + req.Data = output + return +} + +// Creates a new BasePathMapping resource. +func (c *APIGateway) CreateBasePathMapping(input *CreateBasePathMappingInput) (*BasePathMapping, error) { + req, out := c.CreateBasePathMappingRequest(input) + err := req.Send() + return out, err +} + +const opCreateDeployment = "CreateDeployment" + +// CreateDeploymentRequest generates a request for the CreateDeployment operation. +func (c *APIGateway) CreateDeploymentRequest(input *CreateDeploymentInput) (req *request.Request, output *Deployment) { + op := &request.Operation{ + Name: opCreateDeployment, + HTTPMethod: "POST", + HTTPPath: "/restapis/{restapi_id}/deployments", + } + + if input == nil { + input = &CreateDeploymentInput{} + } + + req = c.newRequest(op, input, output) + output = &Deployment{} + req.Data = output + return +} + +// Creates a Deployment resource, which makes a specified RestApi callable over +// the internet. +func (c *APIGateway) CreateDeployment(input *CreateDeploymentInput) (*Deployment, error) { + req, out := c.CreateDeploymentRequest(input) + err := req.Send() + return out, err +} + +const opCreateDomainName = "CreateDomainName" + +// CreateDomainNameRequest generates a request for the CreateDomainName operation. +func (c *APIGateway) CreateDomainNameRequest(input *CreateDomainNameInput) (req *request.Request, output *DomainName) { + op := &request.Operation{ + Name: opCreateDomainName, + HTTPMethod: "POST", + HTTPPath: "/domainnames", + } + + if input == nil { + input = &CreateDomainNameInput{} + } + + req = c.newRequest(op, input, output) + output = &DomainName{} + req.Data = output + return +} + +// Creates a new domain name. +func (c *APIGateway) CreateDomainName(input *CreateDomainNameInput) (*DomainName, error) { + req, out := c.CreateDomainNameRequest(input) + err := req.Send() + return out, err +} + +const opCreateModel = "CreateModel" + +// CreateModelRequest generates a request for the CreateModel operation. +func (c *APIGateway) CreateModelRequest(input *CreateModelInput) (req *request.Request, output *Model) { + op := &request.Operation{ + Name: opCreateModel, + HTTPMethod: "POST", + HTTPPath: "/restapis/{restapi_id}/models", + } + + if input == nil { + input = &CreateModelInput{} + } + + req = c.newRequest(op, input, output) + output = &Model{} + req.Data = output + return +} + +// Adds a new Model resource to an existing RestApi resource. +func (c *APIGateway) CreateModel(input *CreateModelInput) (*Model, error) { + req, out := c.CreateModelRequest(input) + err := req.Send() + return out, err +} + +const opCreateResource = "CreateResource" + +// CreateResourceRequest generates a request for the CreateResource operation. +func (c *APIGateway) CreateResourceRequest(input *CreateResourceInput) (req *request.Request, output *Resource) { + op := &request.Operation{ + Name: opCreateResource, + HTTPMethod: "POST", + HTTPPath: "/restapis/{restapi_id}/resources/{parent_id}", + } + + if input == nil { + input = &CreateResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &Resource{} + req.Data = output + return +} + +// Creates a Resource resource. +func (c *APIGateway) CreateResource(input *CreateResourceInput) (*Resource, error) { + req, out := c.CreateResourceRequest(input) + err := req.Send() + return out, err +} + +const opCreateRestApi = "CreateRestApi" + +// CreateRestApiRequest generates a request for the CreateRestApi operation. +func (c *APIGateway) CreateRestApiRequest(input *CreateRestApiInput) (req *request.Request, output *RestApi) { + op := &request.Operation{ + Name: opCreateRestApi, + HTTPMethod: "POST", + HTTPPath: "/restapis", + } + + if input == nil { + input = &CreateRestApiInput{} + } + + req = c.newRequest(op, input, output) + output = &RestApi{} + req.Data = output + return +} + +// Creates a new RestApi resource. +func (c *APIGateway) CreateRestApi(input *CreateRestApiInput) (*RestApi, error) { + req, out := c.CreateRestApiRequest(input) + err := req.Send() + return out, err +} + +const opCreateStage = "CreateStage" + +// CreateStageRequest generates a request for the CreateStage operation. +func (c *APIGateway) CreateStageRequest(input *CreateStageInput) (req *request.Request, output *Stage) { + op := &request.Operation{ + Name: opCreateStage, + HTTPMethod: "POST", + HTTPPath: "/restapis/{restapi_id}/stages", + } + + if input == nil { + input = &CreateStageInput{} + } + + req = c.newRequest(op, input, output) + output = &Stage{} + req.Data = output + return +} + +// Creates a Stage resource. +func (c *APIGateway) CreateStage(input *CreateStageInput) (*Stage, error) { + req, out := c.CreateStageRequest(input) + err := req.Send() + return out, err +} + +const opDeleteApiKey = "DeleteApiKey" + +// DeleteApiKeyRequest generates a request for the DeleteApiKey operation. +func (c *APIGateway) DeleteApiKeyRequest(input *DeleteApiKeyInput) (req *request.Request, output *DeleteApiKeyOutput) { + op := &request.Operation{ + Name: opDeleteApiKey, + HTTPMethod: "DELETE", + HTTPPath: "/apikeys/{api_Key}", + } + + if input == nil { + input = &DeleteApiKeyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteApiKeyOutput{} + req.Data = output + return +} + +// Deletes the ApiKey resource. +func (c *APIGateway) DeleteApiKey(input *DeleteApiKeyInput) (*DeleteApiKeyOutput, error) { + req, out := c.DeleteApiKeyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBasePathMapping = "DeleteBasePathMapping" + +// DeleteBasePathMappingRequest generates a request for the DeleteBasePathMapping operation. +func (c *APIGateway) DeleteBasePathMappingRequest(input *DeleteBasePathMappingInput) (req *request.Request, output *DeleteBasePathMappingOutput) { + op := &request.Operation{ + Name: opDeleteBasePathMapping, + HTTPMethod: "DELETE", + HTTPPath: "/domainnames/{domain_name}/basepathmappings/{base_path}", + } + + if input == nil { + input = &DeleteBasePathMappingInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBasePathMappingOutput{} + req.Data = output + return +} + +// Deletes the BasePathMapping resource. +func (c *APIGateway) DeleteBasePathMapping(input *DeleteBasePathMappingInput) (*DeleteBasePathMappingOutput, error) { + req, out := c.DeleteBasePathMappingRequest(input) + err := req.Send() + return out, err +} + +const opDeleteClientCertificate = "DeleteClientCertificate" + +// DeleteClientCertificateRequest generates a request for the DeleteClientCertificate operation. +func (c *APIGateway) DeleteClientCertificateRequest(input *DeleteClientCertificateInput) (req *request.Request, output *DeleteClientCertificateOutput) { + op := &request.Operation{ + Name: opDeleteClientCertificate, + HTTPMethod: "DELETE", + HTTPPath: "/clientcertificates/{clientcertificate_id}", + } + + if input == nil { + input = &DeleteClientCertificateInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteClientCertificateOutput{} + req.Data = output + return +} + +func (c *APIGateway) DeleteClientCertificate(input *DeleteClientCertificateInput) (*DeleteClientCertificateOutput, error) { + req, out := c.DeleteClientCertificateRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDeployment = "DeleteDeployment" + +// DeleteDeploymentRequest generates a request for the DeleteDeployment operation. +func (c *APIGateway) DeleteDeploymentRequest(input *DeleteDeploymentInput) (req *request.Request, output *DeleteDeploymentOutput) { + op := &request.Operation{ + Name: opDeleteDeployment, + HTTPMethod: "DELETE", + HTTPPath: "/restapis/{restapi_id}/deployments/{deployment_id}", + } + + if input == nil { + input = &DeleteDeploymentInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteDeploymentOutput{} + req.Data = output + return +} + +// Deletes a Deployment resource. Deleting a deployment will only succeed if +// there are no Stage resources associated with it. +func (c *APIGateway) DeleteDeployment(input *DeleteDeploymentInput) (*DeleteDeploymentOutput, error) { + req, out := c.DeleteDeploymentRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDomainName = "DeleteDomainName" + +// DeleteDomainNameRequest generates a request for the DeleteDomainName operation. +func (c *APIGateway) DeleteDomainNameRequest(input *DeleteDomainNameInput) (req *request.Request, output *DeleteDomainNameOutput) { + op := &request.Operation{ + Name: opDeleteDomainName, + HTTPMethod: "DELETE", + HTTPPath: "/domainnames/{domain_name}", + } + + if input == nil { + input = &DeleteDomainNameInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteDomainNameOutput{} + req.Data = output + return +} + +// Deletes the DomainName resource. +func (c *APIGateway) DeleteDomainName(input *DeleteDomainNameInput) (*DeleteDomainNameOutput, error) { + req, out := c.DeleteDomainNameRequest(input) + err := req.Send() + return out, err +} + +const opDeleteIntegration = "DeleteIntegration" + +// DeleteIntegrationRequest generates a request for the DeleteIntegration operation. +func (c *APIGateway) DeleteIntegrationRequest(input *DeleteIntegrationInput) (req *request.Request, output *DeleteIntegrationOutput) { + op := &request.Operation{ + Name: opDeleteIntegration, + HTTPMethod: "DELETE", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration", + } + + if input == nil { + input = &DeleteIntegrationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteIntegrationOutput{} + req.Data = output + return +} + +// Represents a delete integration. +func (c *APIGateway) DeleteIntegration(input *DeleteIntegrationInput) (*DeleteIntegrationOutput, error) { + req, out := c.DeleteIntegrationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteIntegrationResponse = "DeleteIntegrationResponse" + +// DeleteIntegrationResponseRequest generates a request for the DeleteIntegrationResponse operation. +func (c *APIGateway) DeleteIntegrationResponseRequest(input *DeleteIntegrationResponseInput) (req *request.Request, output *DeleteIntegrationResponseOutput) { + op := &request.Operation{ + Name: opDeleteIntegrationResponse, + HTTPMethod: "DELETE", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration/responses/{status_code}", + } + + if input == nil { + input = &DeleteIntegrationResponseInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteIntegrationResponseOutput{} + req.Data = output + return +} + +// Represents a delete integration response. +func (c *APIGateway) DeleteIntegrationResponse(input *DeleteIntegrationResponseInput) (*DeleteIntegrationResponseOutput, error) { + req, out := c.DeleteIntegrationResponseRequest(input) + err := req.Send() + return out, err +} + +const opDeleteMethod = "DeleteMethod" + +// DeleteMethodRequest generates a request for the DeleteMethod operation. +func (c *APIGateway) DeleteMethodRequest(input *DeleteMethodInput) (req *request.Request, output *DeleteMethodOutput) { + op := &request.Operation{ + Name: opDeleteMethod, + HTTPMethod: "DELETE", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}", + } + + if input == nil { + input = &DeleteMethodInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteMethodOutput{} + req.Data = output + return +} + +// Deletes an existing Method resource. +func (c *APIGateway) DeleteMethod(input *DeleteMethodInput) (*DeleteMethodOutput, error) { + req, out := c.DeleteMethodRequest(input) + err := req.Send() + return out, err +} + +const opDeleteMethodResponse = "DeleteMethodResponse" + +// DeleteMethodResponseRequest generates a request for the DeleteMethodResponse operation. +func (c *APIGateway) DeleteMethodResponseRequest(input *DeleteMethodResponseInput) (req *request.Request, output *DeleteMethodResponseOutput) { + op := &request.Operation{ + Name: opDeleteMethodResponse, + HTTPMethod: "DELETE", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/responses/{status_code}", + } + + if input == nil { + input = &DeleteMethodResponseInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteMethodResponseOutput{} + req.Data = output + return +} + +// Deletes an existing MethodResponse resource. +func (c *APIGateway) DeleteMethodResponse(input *DeleteMethodResponseInput) (*DeleteMethodResponseOutput, error) { + req, out := c.DeleteMethodResponseRequest(input) + err := req.Send() + return out, err +} + +const opDeleteModel = "DeleteModel" + +// DeleteModelRequest generates a request for the DeleteModel operation. +func (c *APIGateway) DeleteModelRequest(input *DeleteModelInput) (req *request.Request, output *DeleteModelOutput) { + op := &request.Operation{ + Name: opDeleteModel, + HTTPMethod: "DELETE", + HTTPPath: "/restapis/{restapi_id}/models/{model_name}", + } + + if input == nil { + input = &DeleteModelInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteModelOutput{} + req.Data = output + return +} + +// Deletes a model. +func (c *APIGateway) DeleteModel(input *DeleteModelInput) (*DeleteModelOutput, error) { + req, out := c.DeleteModelRequest(input) + err := req.Send() + return out, err +} + +const opDeleteResource = "DeleteResource" + +// DeleteResourceRequest generates a request for the DeleteResource operation. +func (c *APIGateway) DeleteResourceRequest(input *DeleteResourceInput) (req *request.Request, output *DeleteResourceOutput) { + op := &request.Operation{ + Name: opDeleteResource, + HTTPMethod: "DELETE", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}", + } + + if input == nil { + input = &DeleteResourceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteResourceOutput{} + req.Data = output + return +} + +// Deletes a Resource resource. +func (c *APIGateway) DeleteResource(input *DeleteResourceInput) (*DeleteResourceOutput, error) { + req, out := c.DeleteResourceRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRestApi = "DeleteRestApi" + +// DeleteRestApiRequest generates a request for the DeleteRestApi operation. +func (c *APIGateway) DeleteRestApiRequest(input *DeleteRestApiInput) (req *request.Request, output *DeleteRestApiOutput) { + op := &request.Operation{ + Name: opDeleteRestApi, + HTTPMethod: "DELETE", + HTTPPath: "/restapis/{restapi_id}", + } + + if input == nil { + input = &DeleteRestApiInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteRestApiOutput{} + req.Data = output + return +} + +// Deletes the specified API. +func (c *APIGateway) DeleteRestApi(input *DeleteRestApiInput) (*DeleteRestApiOutput, error) { + req, out := c.DeleteRestApiRequest(input) + err := req.Send() + return out, err +} + +const opDeleteStage = "DeleteStage" + +// DeleteStageRequest generates a request for the DeleteStage operation. +func (c *APIGateway) DeleteStageRequest(input *DeleteStageInput) (req *request.Request, output *DeleteStageOutput) { + op := &request.Operation{ + Name: opDeleteStage, + HTTPMethod: "DELETE", + HTTPPath: "/restapis/{restapi_id}/stages/{stage_name}", + } + + if input == nil { + input = &DeleteStageInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteStageOutput{} + req.Data = output + return +} + +// Deletes a Stage resource. +func (c *APIGateway) DeleteStage(input *DeleteStageInput) (*DeleteStageOutput, error) { + req, out := c.DeleteStageRequest(input) + err := req.Send() + return out, err +} + +const opFlushStageCache = "FlushStageCache" + +// FlushStageCacheRequest generates a request for the FlushStageCache operation. +func (c *APIGateway) FlushStageCacheRequest(input *FlushStageCacheInput) (req *request.Request, output *FlushStageCacheOutput) { + op := &request.Operation{ + Name: opFlushStageCache, + HTTPMethod: "DELETE", + HTTPPath: "/restapis/{restapi_id}/stages/{stage_name}/cache/data", + } + + if input == nil { + input = &FlushStageCacheInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &FlushStageCacheOutput{} + req.Data = output + return +} + +// Flushes a stage's cache. +func (c *APIGateway) FlushStageCache(input *FlushStageCacheInput) (*FlushStageCacheOutput, error) { + req, out := c.FlushStageCacheRequest(input) + err := req.Send() + return out, err +} + +const opGenerateClientCertificate = "GenerateClientCertificate" + +// GenerateClientCertificateRequest generates a request for the GenerateClientCertificate operation. +func (c *APIGateway) GenerateClientCertificateRequest(input *GenerateClientCertificateInput) (req *request.Request, output *ClientCertificate) { + op := &request.Operation{ + Name: opGenerateClientCertificate, + HTTPMethod: "POST", + HTTPPath: "/clientcertificates", + } + + if input == nil { + input = &GenerateClientCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &ClientCertificate{} + req.Data = output + return +} + +func (c *APIGateway) GenerateClientCertificate(input *GenerateClientCertificateInput) (*ClientCertificate, error) { + req, out := c.GenerateClientCertificateRequest(input) + err := req.Send() + return out, err +} + +const opGetAccount = "GetAccount" + +// GetAccountRequest generates a request for the GetAccount operation. +func (c *APIGateway) GetAccountRequest(input *GetAccountInput) (req *request.Request, output *Account) { + op := &request.Operation{ + Name: opGetAccount, + HTTPMethod: "GET", + HTTPPath: "/account", + } + + if input == nil { + input = &GetAccountInput{} + } + + req = c.newRequest(op, input, output) + output = &Account{} + req.Data = output + return +} + +// Gets information about the current Account resource. +func (c *APIGateway) GetAccount(input *GetAccountInput) (*Account, error) { + req, out := c.GetAccountRequest(input) + err := req.Send() + return out, err +} + +const opGetApiKey = "GetApiKey" + +// GetApiKeyRequest generates a request for the GetApiKey operation. +func (c *APIGateway) GetApiKeyRequest(input *GetApiKeyInput) (req *request.Request, output *ApiKey) { + op := &request.Operation{ + Name: opGetApiKey, + HTTPMethod: "GET", + HTTPPath: "/apikeys/{api_Key}", + } + + if input == nil { + input = &GetApiKeyInput{} + } + + req = c.newRequest(op, input, output) + output = &ApiKey{} + req.Data = output + return +} + +// Gets information about the current ApiKey resource. +func (c *APIGateway) GetApiKey(input *GetApiKeyInput) (*ApiKey, error) { + req, out := c.GetApiKeyRequest(input) + err := req.Send() + return out, err +} + +const opGetApiKeys = "GetApiKeys" + +// GetApiKeysRequest generates a request for the GetApiKeys operation. +func (c *APIGateway) GetApiKeysRequest(input *GetApiKeysInput) (req *request.Request, output *GetApiKeysOutput) { + op := &request.Operation{ + Name: opGetApiKeys, + HTTPMethod: "GET", + HTTPPath: "/apikeys", + Paginator: &request.Paginator{ + InputTokens: []string{"position"}, + OutputTokens: []string{"position"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetApiKeysInput{} + } + + req = c.newRequest(op, input, output) + output = &GetApiKeysOutput{} + req.Data = output + return +} + +// Gets information about the current ApiKeys resource. +func (c *APIGateway) GetApiKeys(input *GetApiKeysInput) (*GetApiKeysOutput, error) { + req, out := c.GetApiKeysRequest(input) + err := req.Send() + return out, err +} + +func (c *APIGateway) GetApiKeysPages(input *GetApiKeysInput, fn func(p *GetApiKeysOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetApiKeysRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetApiKeysOutput), lastPage) + }) +} + +const opGetBasePathMapping = "GetBasePathMapping" + +// GetBasePathMappingRequest generates a request for the GetBasePathMapping operation. +func (c *APIGateway) GetBasePathMappingRequest(input *GetBasePathMappingInput) (req *request.Request, output *BasePathMapping) { + op := &request.Operation{ + Name: opGetBasePathMapping, + HTTPMethod: "GET", + HTTPPath: "/domainnames/{domain_name}/basepathmappings/{base_path}", + } + + if input == nil { + input = &GetBasePathMappingInput{} + } + + req = c.newRequest(op, input, output) + output = &BasePathMapping{} + req.Data = output + return +} + +// Describe a BasePathMapping resource. +func (c *APIGateway) GetBasePathMapping(input *GetBasePathMappingInput) (*BasePathMapping, error) { + req, out := c.GetBasePathMappingRequest(input) + err := req.Send() + return out, err +} + +const opGetBasePathMappings = "GetBasePathMappings" + +// GetBasePathMappingsRequest generates a request for the GetBasePathMappings operation. +func (c *APIGateway) GetBasePathMappingsRequest(input *GetBasePathMappingsInput) (req *request.Request, output *GetBasePathMappingsOutput) { + op := &request.Operation{ + Name: opGetBasePathMappings, + HTTPMethod: "GET", + HTTPPath: "/domainnames/{domain_name}/basepathmappings", + Paginator: &request.Paginator{ + InputTokens: []string{"position"}, + OutputTokens: []string{"position"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetBasePathMappingsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBasePathMappingsOutput{} + req.Data = output + return +} + +// Represents a collection of BasePathMapping resources. +func (c *APIGateway) GetBasePathMappings(input *GetBasePathMappingsInput) (*GetBasePathMappingsOutput, error) { + req, out := c.GetBasePathMappingsRequest(input) + err := req.Send() + return out, err +} + +func (c *APIGateway) GetBasePathMappingsPages(input *GetBasePathMappingsInput, fn func(p *GetBasePathMappingsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetBasePathMappingsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetBasePathMappingsOutput), lastPage) + }) +} + +const opGetClientCertificate = "GetClientCertificate" + +// GetClientCertificateRequest generates a request for the GetClientCertificate operation. +func (c *APIGateway) GetClientCertificateRequest(input *GetClientCertificateInput) (req *request.Request, output *ClientCertificate) { + op := &request.Operation{ + Name: opGetClientCertificate, + HTTPMethod: "GET", + HTTPPath: "/clientcertificates/{clientcertificate_id}", + } + + if input == nil { + input = &GetClientCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &ClientCertificate{} + req.Data = output + return +} + +func (c *APIGateway) GetClientCertificate(input *GetClientCertificateInput) (*ClientCertificate, error) { + req, out := c.GetClientCertificateRequest(input) + err := req.Send() + return out, err +} + +const opGetClientCertificates = "GetClientCertificates" + +// GetClientCertificatesRequest generates a request for the GetClientCertificates operation. +func (c *APIGateway) GetClientCertificatesRequest(input *GetClientCertificatesInput) (req *request.Request, output *GetClientCertificatesOutput) { + op := &request.Operation{ + Name: opGetClientCertificates, + HTTPMethod: "GET", + HTTPPath: "/clientcertificates", + Paginator: &request.Paginator{ + InputTokens: []string{"position"}, + OutputTokens: []string{"position"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetClientCertificatesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetClientCertificatesOutput{} + req.Data = output + return +} + +func (c *APIGateway) GetClientCertificates(input *GetClientCertificatesInput) (*GetClientCertificatesOutput, error) { + req, out := c.GetClientCertificatesRequest(input) + err := req.Send() + return out, err +} + +func (c *APIGateway) GetClientCertificatesPages(input *GetClientCertificatesInput, fn func(p *GetClientCertificatesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetClientCertificatesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetClientCertificatesOutput), lastPage) + }) +} + +const opGetDeployment = "GetDeployment" + +// GetDeploymentRequest generates a request for the GetDeployment operation. +func (c *APIGateway) GetDeploymentRequest(input *GetDeploymentInput) (req *request.Request, output *Deployment) { + op := &request.Operation{ + Name: opGetDeployment, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/deployments/{deployment_id}", + } + + if input == nil { + input = &GetDeploymentInput{} + } + + req = c.newRequest(op, input, output) + output = &Deployment{} + req.Data = output + return +} + +// Gets information about a Deployment resource. +func (c *APIGateway) GetDeployment(input *GetDeploymentInput) (*Deployment, error) { + req, out := c.GetDeploymentRequest(input) + err := req.Send() + return out, err +} + +const opGetDeployments = "GetDeployments" + +// GetDeploymentsRequest generates a request for the GetDeployments operation. +func (c *APIGateway) GetDeploymentsRequest(input *GetDeploymentsInput) (req *request.Request, output *GetDeploymentsOutput) { + op := &request.Operation{ + Name: opGetDeployments, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/deployments", + Paginator: &request.Paginator{ + InputTokens: []string{"position"}, + OutputTokens: []string{"position"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetDeploymentsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDeploymentsOutput{} + req.Data = output + return +} + +// Gets information about a Deployments collection. +func (c *APIGateway) GetDeployments(input *GetDeploymentsInput) (*GetDeploymentsOutput, error) { + req, out := c.GetDeploymentsRequest(input) + err := req.Send() + return out, err +} + +func (c *APIGateway) GetDeploymentsPages(input *GetDeploymentsInput, fn func(p *GetDeploymentsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetDeploymentsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetDeploymentsOutput), lastPage) + }) +} + +const opGetDomainName = "GetDomainName" + +// GetDomainNameRequest generates a request for the GetDomainName operation. +func (c *APIGateway) GetDomainNameRequest(input *GetDomainNameInput) (req *request.Request, output *DomainName) { + op := &request.Operation{ + Name: opGetDomainName, + HTTPMethod: "GET", + HTTPPath: "/domainnames/{domain_name}", + } + + if input == nil { + input = &GetDomainNameInput{} + } + + req = c.newRequest(op, input, output) + output = &DomainName{} + req.Data = output + return +} + +// Represents a domain name that is contained in a simpler, more intuitive URL +// that can be called. +func (c *APIGateway) GetDomainName(input *GetDomainNameInput) (*DomainName, error) { + req, out := c.GetDomainNameRequest(input) + err := req.Send() + return out, err +} + +const opGetDomainNames = "GetDomainNames" + +// GetDomainNamesRequest generates a request for the GetDomainNames operation. +func (c *APIGateway) GetDomainNamesRequest(input *GetDomainNamesInput) (req *request.Request, output *GetDomainNamesOutput) { + op := &request.Operation{ + Name: opGetDomainNames, + HTTPMethod: "GET", + HTTPPath: "/domainnames", + Paginator: &request.Paginator{ + InputTokens: []string{"position"}, + OutputTokens: []string{"position"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetDomainNamesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDomainNamesOutput{} + req.Data = output + return +} + +// Represents a collection of DomainName resources. +func (c *APIGateway) GetDomainNames(input *GetDomainNamesInput) (*GetDomainNamesOutput, error) { + req, out := c.GetDomainNamesRequest(input) + err := req.Send() + return out, err +} + +func (c *APIGateway) GetDomainNamesPages(input *GetDomainNamesInput, fn func(p *GetDomainNamesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetDomainNamesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetDomainNamesOutput), lastPage) + }) +} + +const opGetIntegration = "GetIntegration" + +// GetIntegrationRequest generates a request for the GetIntegration operation. +func (c *APIGateway) GetIntegrationRequest(input *GetIntegrationInput) (req *request.Request, output *Integration) { + op := &request.Operation{ + Name: opGetIntegration, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration", + } + + if input == nil { + input = &GetIntegrationInput{} + } + + req = c.newRequest(op, input, output) + output = &Integration{} + req.Data = output + return +} + +// Represents a get integration. +func (c *APIGateway) GetIntegration(input *GetIntegrationInput) (*Integration, error) { + req, out := c.GetIntegrationRequest(input) + err := req.Send() + return out, err +} + +const opGetIntegrationResponse = "GetIntegrationResponse" + +// GetIntegrationResponseRequest generates a request for the GetIntegrationResponse operation. +func (c *APIGateway) GetIntegrationResponseRequest(input *GetIntegrationResponseInput) (req *request.Request, output *IntegrationResponse) { + op := &request.Operation{ + Name: opGetIntegrationResponse, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration/responses/{status_code}", + } + + if input == nil { + input = &GetIntegrationResponseInput{} + } + + req = c.newRequest(op, input, output) + output = &IntegrationResponse{} + req.Data = output + return +} + +// Represents a get integration response. +func (c *APIGateway) GetIntegrationResponse(input *GetIntegrationResponseInput) (*IntegrationResponse, error) { + req, out := c.GetIntegrationResponseRequest(input) + err := req.Send() + return out, err +} + +const opGetMethod = "GetMethod" + +// GetMethodRequest generates a request for the GetMethod operation. +func (c *APIGateway) GetMethodRequest(input *GetMethodInput) (req *request.Request, output *Method) { + op := &request.Operation{ + Name: opGetMethod, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}", + } + + if input == nil { + input = &GetMethodInput{} + } + + req = c.newRequest(op, input, output) + output = &Method{} + req.Data = output + return +} + +// Describe an existing Method resource. +func (c *APIGateway) GetMethod(input *GetMethodInput) (*Method, error) { + req, out := c.GetMethodRequest(input) + err := req.Send() + return out, err +} + +const opGetMethodResponse = "GetMethodResponse" + +// GetMethodResponseRequest generates a request for the GetMethodResponse operation. +func (c *APIGateway) GetMethodResponseRequest(input *GetMethodResponseInput) (req *request.Request, output *MethodResponse) { + op := &request.Operation{ + Name: opGetMethodResponse, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/responses/{status_code}", + } + + if input == nil { + input = &GetMethodResponseInput{} + } + + req = c.newRequest(op, input, output) + output = &MethodResponse{} + req.Data = output + return +} + +// Describes a MethodResponse resource. +func (c *APIGateway) GetMethodResponse(input *GetMethodResponseInput) (*MethodResponse, error) { + req, out := c.GetMethodResponseRequest(input) + err := req.Send() + return out, err +} + +const opGetModel = "GetModel" + +// GetModelRequest generates a request for the GetModel operation. +func (c *APIGateway) GetModelRequest(input *GetModelInput) (req *request.Request, output *Model) { + op := &request.Operation{ + Name: opGetModel, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/models/{model_name}", + } + + if input == nil { + input = &GetModelInput{} + } + + req = c.newRequest(op, input, output) + output = &Model{} + req.Data = output + return +} + +// Describes an existing model defined for a RestApi resource. +func (c *APIGateway) GetModel(input *GetModelInput) (*Model, error) { + req, out := c.GetModelRequest(input) + err := req.Send() + return out, err +} + +const opGetModelTemplate = "GetModelTemplate" + +// GetModelTemplateRequest generates a request for the GetModelTemplate operation. +func (c *APIGateway) GetModelTemplateRequest(input *GetModelTemplateInput) (req *request.Request, output *GetModelTemplateOutput) { + op := &request.Operation{ + Name: opGetModelTemplate, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/models/{model_name}/default_template", + } + + if input == nil { + input = &GetModelTemplateInput{} + } + + req = c.newRequest(op, input, output) + output = &GetModelTemplateOutput{} + req.Data = output + return +} + +// Generates a sample mapping template that can be used to transform a payload +// into the structure of a model. +func (c *APIGateway) GetModelTemplate(input *GetModelTemplateInput) (*GetModelTemplateOutput, error) { + req, out := c.GetModelTemplateRequest(input) + err := req.Send() + return out, err +} + +const opGetModels = "GetModels" + +// GetModelsRequest generates a request for the GetModels operation. +func (c *APIGateway) GetModelsRequest(input *GetModelsInput) (req *request.Request, output *GetModelsOutput) { + op := &request.Operation{ + Name: opGetModels, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/models", + Paginator: &request.Paginator{ + InputTokens: []string{"position"}, + OutputTokens: []string{"position"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetModelsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetModelsOutput{} + req.Data = output + return +} + +// Describes existing Models defined for a RestApi resource. +func (c *APIGateway) GetModels(input *GetModelsInput) (*GetModelsOutput, error) { + req, out := c.GetModelsRequest(input) + err := req.Send() + return out, err +} + +func (c *APIGateway) GetModelsPages(input *GetModelsInput, fn func(p *GetModelsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetModelsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetModelsOutput), lastPage) + }) +} + +const opGetResource = "GetResource" + +// GetResourceRequest generates a request for the GetResource operation. +func (c *APIGateway) GetResourceRequest(input *GetResourceInput) (req *request.Request, output *Resource) { + op := &request.Operation{ + Name: opGetResource, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}", + } + + if input == nil { + input = &GetResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &Resource{} + req.Data = output + return +} + +// Lists information about a resource. +func (c *APIGateway) GetResource(input *GetResourceInput) (*Resource, error) { + req, out := c.GetResourceRequest(input) + err := req.Send() + return out, err +} + +const opGetResources = "GetResources" + +// GetResourcesRequest generates a request for the GetResources operation. +func (c *APIGateway) GetResourcesRequest(input *GetResourcesInput) (req *request.Request, output *GetResourcesOutput) { + op := &request.Operation{ + Name: opGetResources, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/resources", + Paginator: &request.Paginator{ + InputTokens: []string{"position"}, + OutputTokens: []string{"position"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetResourcesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetResourcesOutput{} + req.Data = output + return +} + +// Lists information about a collection of Resource resources. +func (c *APIGateway) GetResources(input *GetResourcesInput) (*GetResourcesOutput, error) { + req, out := c.GetResourcesRequest(input) + err := req.Send() + return out, err +} + +func (c *APIGateway) GetResourcesPages(input *GetResourcesInput, fn func(p *GetResourcesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetResourcesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetResourcesOutput), lastPage) + }) +} + +const opGetRestApi = "GetRestApi" + +// GetRestApiRequest generates a request for the GetRestApi operation. +func (c *APIGateway) GetRestApiRequest(input *GetRestApiInput) (req *request.Request, output *RestApi) { + op := &request.Operation{ + Name: opGetRestApi, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}", + } + + if input == nil { + input = &GetRestApiInput{} + } + + req = c.newRequest(op, input, output) + output = &RestApi{} + req.Data = output + return +} + +// Lists the RestApi resource in the collection. +func (c *APIGateway) GetRestApi(input *GetRestApiInput) (*RestApi, error) { + req, out := c.GetRestApiRequest(input) + err := req.Send() + return out, err +} + +const opGetRestApis = "GetRestApis" + +// GetRestApisRequest generates a request for the GetRestApis operation. +func (c *APIGateway) GetRestApisRequest(input *GetRestApisInput) (req *request.Request, output *GetRestApisOutput) { + op := &request.Operation{ + Name: opGetRestApis, + HTTPMethod: "GET", + HTTPPath: "/restapis", + Paginator: &request.Paginator{ + InputTokens: []string{"position"}, + OutputTokens: []string{"position"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetRestApisInput{} + } + + req = c.newRequest(op, input, output) + output = &GetRestApisOutput{} + req.Data = output + return +} + +// Lists the RestApis resources for your collection. +func (c *APIGateway) GetRestApis(input *GetRestApisInput) (*GetRestApisOutput, error) { + req, out := c.GetRestApisRequest(input) + err := req.Send() + return out, err +} + +func (c *APIGateway) GetRestApisPages(input *GetRestApisInput, fn func(p *GetRestApisOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetRestApisRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetRestApisOutput), lastPage) + }) +} + +const opGetSdk = "GetSdk" + +// GetSdkRequest generates a request for the GetSdk operation. +func (c *APIGateway) GetSdkRequest(input *GetSdkInput) (req *request.Request, output *GetSdkOutput) { + op := &request.Operation{ + Name: opGetSdk, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/stages/{stage_name}/sdks/{sdk_type}", + } + + if input == nil { + input = &GetSdkInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSdkOutput{} + req.Data = output + return +} + +func (c *APIGateway) GetSdk(input *GetSdkInput) (*GetSdkOutput, error) { + req, out := c.GetSdkRequest(input) + err := req.Send() + return out, err +} + +const opGetStage = "GetStage" + +// GetStageRequest generates a request for the GetStage operation. +func (c *APIGateway) GetStageRequest(input *GetStageInput) (req *request.Request, output *Stage) { + op := &request.Operation{ + Name: opGetStage, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/stages/{stage_name}", + } + + if input == nil { + input = &GetStageInput{} + } + + req = c.newRequest(op, input, output) + output = &Stage{} + req.Data = output + return +} + +// Gets information about a Stage resource. +func (c *APIGateway) GetStage(input *GetStageInput) (*Stage, error) { + req, out := c.GetStageRequest(input) + err := req.Send() + return out, err +} + +const opGetStages = "GetStages" + +// GetStagesRequest generates a request for the GetStages operation. +func (c *APIGateway) GetStagesRequest(input *GetStagesInput) (req *request.Request, output *GetStagesOutput) { + op := &request.Operation{ + Name: opGetStages, + HTTPMethod: "GET", + HTTPPath: "/restapis/{restapi_id}/stages", + } + + if input == nil { + input = &GetStagesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetStagesOutput{} + req.Data = output + return +} + +// Gets information about one or more Stage resources. +func (c *APIGateway) GetStages(input *GetStagesInput) (*GetStagesOutput, error) { + req, out := c.GetStagesRequest(input) + err := req.Send() + return out, err +} + +const opPutIntegration = "PutIntegration" + +// PutIntegrationRequest generates a request for the PutIntegration operation. +func (c *APIGateway) PutIntegrationRequest(input *PutIntegrationInput) (req *request.Request, output *Integration) { + op := &request.Operation{ + Name: opPutIntegration, + HTTPMethod: "PUT", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration", + } + + if input == nil { + input = &PutIntegrationInput{} + } + + req = c.newRequest(op, input, output) + output = &Integration{} + req.Data = output + return +} + +// Represents a put integration. +func (c *APIGateway) PutIntegration(input *PutIntegrationInput) (*Integration, error) { + req, out := c.PutIntegrationRequest(input) + err := req.Send() + return out, err +} + +const opPutIntegrationResponse = "PutIntegrationResponse" + +// PutIntegrationResponseRequest generates a request for the PutIntegrationResponse operation. +func (c *APIGateway) PutIntegrationResponseRequest(input *PutIntegrationResponseInput) (req *request.Request, output *IntegrationResponse) { + op := &request.Operation{ + Name: opPutIntegrationResponse, + HTTPMethod: "PUT", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration/responses/{status_code}", + } + + if input == nil { + input = &PutIntegrationResponseInput{} + } + + req = c.newRequest(op, input, output) + output = &IntegrationResponse{} + req.Data = output + return +} + +// Represents a put integration. +func (c *APIGateway) PutIntegrationResponse(input *PutIntegrationResponseInput) (*IntegrationResponse, error) { + req, out := c.PutIntegrationResponseRequest(input) + err := req.Send() + return out, err +} + +const opPutMethod = "PutMethod" + +// PutMethodRequest generates a request for the PutMethod operation. +func (c *APIGateway) PutMethodRequest(input *PutMethodInput) (req *request.Request, output *Method) { + op := &request.Operation{ + Name: opPutMethod, + HTTPMethod: "PUT", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}", + } + + if input == nil { + input = &PutMethodInput{} + } + + req = c.newRequest(op, input, output) + output = &Method{} + req.Data = output + return +} + +// Add a method to an existing Resource resource. +func (c *APIGateway) PutMethod(input *PutMethodInput) (*Method, error) { + req, out := c.PutMethodRequest(input) + err := req.Send() + return out, err +} + +const opPutMethodResponse = "PutMethodResponse" + +// PutMethodResponseRequest generates a request for the PutMethodResponse operation. +func (c *APIGateway) PutMethodResponseRequest(input *PutMethodResponseInput) (req *request.Request, output *MethodResponse) { + op := &request.Operation{ + Name: opPutMethodResponse, + HTTPMethod: "PUT", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/responses/{status_code}", + } + + if input == nil { + input = &PutMethodResponseInput{} + } + + req = c.newRequest(op, input, output) + output = &MethodResponse{} + req.Data = output + return +} + +// Adds a MethodResponse to an existing Method resource. +func (c *APIGateway) PutMethodResponse(input *PutMethodResponseInput) (*MethodResponse, error) { + req, out := c.PutMethodResponseRequest(input) + err := req.Send() + return out, err +} + +const opTestInvokeMethod = "TestInvokeMethod" + +// TestInvokeMethodRequest generates a request for the TestInvokeMethod operation. +func (c *APIGateway) TestInvokeMethodRequest(input *TestInvokeMethodInput) (req *request.Request, output *TestInvokeMethodOutput) { + op := &request.Operation{ + Name: opTestInvokeMethod, + HTTPMethod: "POST", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}", + } + + if input == nil { + input = &TestInvokeMethodInput{} + } + + req = c.newRequest(op, input, output) + output = &TestInvokeMethodOutput{} + req.Data = output + return +} + +func (c *APIGateway) TestInvokeMethod(input *TestInvokeMethodInput) (*TestInvokeMethodOutput, error) { + req, out := c.TestInvokeMethodRequest(input) + err := req.Send() + return out, err +} + +const opUpdateAccount = "UpdateAccount" + +// UpdateAccountRequest generates a request for the UpdateAccount operation. +func (c *APIGateway) UpdateAccountRequest(input *UpdateAccountInput) (req *request.Request, output *Account) { + op := &request.Operation{ + Name: opUpdateAccount, + HTTPMethod: "PATCH", + HTTPPath: "/account", + } + + if input == nil { + input = &UpdateAccountInput{} + } + + req = c.newRequest(op, input, output) + output = &Account{} + req.Data = output + return +} + +// Changes information about the current Account resource. +func (c *APIGateway) UpdateAccount(input *UpdateAccountInput) (*Account, error) { + req, out := c.UpdateAccountRequest(input) + err := req.Send() + return out, err +} + +const opUpdateApiKey = "UpdateApiKey" + +// UpdateApiKeyRequest generates a request for the UpdateApiKey operation. +func (c *APIGateway) UpdateApiKeyRequest(input *UpdateApiKeyInput) (req *request.Request, output *ApiKey) { + op := &request.Operation{ + Name: opUpdateApiKey, + HTTPMethod: "PATCH", + HTTPPath: "/apikeys/{api_Key}", + } + + if input == nil { + input = &UpdateApiKeyInput{} + } + + req = c.newRequest(op, input, output) + output = &ApiKey{} + req.Data = output + return +} + +// Changes information about an ApiKey resource. +func (c *APIGateway) UpdateApiKey(input *UpdateApiKeyInput) (*ApiKey, error) { + req, out := c.UpdateApiKeyRequest(input) + err := req.Send() + return out, err +} + +const opUpdateBasePathMapping = "UpdateBasePathMapping" + +// UpdateBasePathMappingRequest generates a request for the UpdateBasePathMapping operation. +func (c *APIGateway) UpdateBasePathMappingRequest(input *UpdateBasePathMappingInput) (req *request.Request, output *BasePathMapping) { + op := &request.Operation{ + Name: opUpdateBasePathMapping, + HTTPMethod: "PATCH", + HTTPPath: "/domainnames/{domain_name}/basepathmappings/{base_path}", + } + + if input == nil { + input = &UpdateBasePathMappingInput{} + } + + req = c.newRequest(op, input, output) + output = &BasePathMapping{} + req.Data = output + return +} + +// Changes information about the BasePathMapping resource. +func (c *APIGateway) UpdateBasePathMapping(input *UpdateBasePathMappingInput) (*BasePathMapping, error) { + req, out := c.UpdateBasePathMappingRequest(input) + err := req.Send() + return out, err +} + +const opUpdateClientCertificate = "UpdateClientCertificate" + +// UpdateClientCertificateRequest generates a request for the UpdateClientCertificate operation. +func (c *APIGateway) UpdateClientCertificateRequest(input *UpdateClientCertificateInput) (req *request.Request, output *ClientCertificate) { + op := &request.Operation{ + Name: opUpdateClientCertificate, + HTTPMethod: "PATCH", + HTTPPath: "/clientcertificates/{clientcertificate_id}", + } + + if input == nil { + input = &UpdateClientCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &ClientCertificate{} + req.Data = output + return +} + +func (c *APIGateway) UpdateClientCertificate(input *UpdateClientCertificateInput) (*ClientCertificate, error) { + req, out := c.UpdateClientCertificateRequest(input) + err := req.Send() + return out, err +} + +const opUpdateDeployment = "UpdateDeployment" + +// UpdateDeploymentRequest generates a request for the UpdateDeployment operation. +func (c *APIGateway) UpdateDeploymentRequest(input *UpdateDeploymentInput) (req *request.Request, output *Deployment) { + op := &request.Operation{ + Name: opUpdateDeployment, + HTTPMethod: "PATCH", + HTTPPath: "/restapis/{restapi_id}/deployments/{deployment_id}", + } + + if input == nil { + input = &UpdateDeploymentInput{} + } + + req = c.newRequest(op, input, output) + output = &Deployment{} + req.Data = output + return +} + +// Changes information about a Deployment resource. +func (c *APIGateway) UpdateDeployment(input *UpdateDeploymentInput) (*Deployment, error) { + req, out := c.UpdateDeploymentRequest(input) + err := req.Send() + return out, err +} + +const opUpdateDomainName = "UpdateDomainName" + +// UpdateDomainNameRequest generates a request for the UpdateDomainName operation. +func (c *APIGateway) UpdateDomainNameRequest(input *UpdateDomainNameInput) (req *request.Request, output *DomainName) { + op := &request.Operation{ + Name: opUpdateDomainName, + HTTPMethod: "PATCH", + HTTPPath: "/domainnames/{domain_name}", + } + + if input == nil { + input = &UpdateDomainNameInput{} + } + + req = c.newRequest(op, input, output) + output = &DomainName{} + req.Data = output + return +} + +// Changes information about the DomainName resource. +func (c *APIGateway) UpdateDomainName(input *UpdateDomainNameInput) (*DomainName, error) { + req, out := c.UpdateDomainNameRequest(input) + err := req.Send() + return out, err +} + +const opUpdateIntegration = "UpdateIntegration" + +// UpdateIntegrationRequest generates a request for the UpdateIntegration operation. +func (c *APIGateway) UpdateIntegrationRequest(input *UpdateIntegrationInput) (req *request.Request, output *Integration) { + op := &request.Operation{ + Name: opUpdateIntegration, + HTTPMethod: "PATCH", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration", + } + + if input == nil { + input = &UpdateIntegrationInput{} + } + + req = c.newRequest(op, input, output) + output = &Integration{} + req.Data = output + return +} + +// Represents an update integration. +func (c *APIGateway) UpdateIntegration(input *UpdateIntegrationInput) (*Integration, error) { + req, out := c.UpdateIntegrationRequest(input) + err := req.Send() + return out, err +} + +const opUpdateIntegrationResponse = "UpdateIntegrationResponse" + +// UpdateIntegrationResponseRequest generates a request for the UpdateIntegrationResponse operation. +func (c *APIGateway) UpdateIntegrationResponseRequest(input *UpdateIntegrationResponseInput) (req *request.Request, output *IntegrationResponse) { + op := &request.Operation{ + Name: opUpdateIntegrationResponse, + HTTPMethod: "PATCH", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/integration/responses/{status_code}", + } + + if input == nil { + input = &UpdateIntegrationResponseInput{} + } + + req = c.newRequest(op, input, output) + output = &IntegrationResponse{} + req.Data = output + return +} + +// Represents an update integration response. +func (c *APIGateway) UpdateIntegrationResponse(input *UpdateIntegrationResponseInput) (*IntegrationResponse, error) { + req, out := c.UpdateIntegrationResponseRequest(input) + err := req.Send() + return out, err +} + +const opUpdateMethod = "UpdateMethod" + +// UpdateMethodRequest generates a request for the UpdateMethod operation. +func (c *APIGateway) UpdateMethodRequest(input *UpdateMethodInput) (req *request.Request, output *Method) { + op := &request.Operation{ + Name: opUpdateMethod, + HTTPMethod: "PATCH", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}", + } + + if input == nil { + input = &UpdateMethodInput{} + } + + req = c.newRequest(op, input, output) + output = &Method{} + req.Data = output + return +} + +// Updates an existing Method resource. +func (c *APIGateway) UpdateMethod(input *UpdateMethodInput) (*Method, error) { + req, out := c.UpdateMethodRequest(input) + err := req.Send() + return out, err +} + +const opUpdateMethodResponse = "UpdateMethodResponse" + +// UpdateMethodResponseRequest generates a request for the UpdateMethodResponse operation. +func (c *APIGateway) UpdateMethodResponseRequest(input *UpdateMethodResponseInput) (req *request.Request, output *MethodResponse) { + op := &request.Operation{ + Name: opUpdateMethodResponse, + HTTPMethod: "PATCH", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}/methods/{http_method}/responses/{status_code}", + } + + if input == nil { + input = &UpdateMethodResponseInput{} + } + + req = c.newRequest(op, input, output) + output = &MethodResponse{} + req.Data = output + return +} + +// Updates an existing MethodResponse resource. +func (c *APIGateway) UpdateMethodResponse(input *UpdateMethodResponseInput) (*MethodResponse, error) { + req, out := c.UpdateMethodResponseRequest(input) + err := req.Send() + return out, err +} + +const opUpdateModel = "UpdateModel" + +// UpdateModelRequest generates a request for the UpdateModel operation. +func (c *APIGateway) UpdateModelRequest(input *UpdateModelInput) (req *request.Request, output *Model) { + op := &request.Operation{ + Name: opUpdateModel, + HTTPMethod: "PATCH", + HTTPPath: "/restapis/{restapi_id}/models/{model_name}", + } + + if input == nil { + input = &UpdateModelInput{} + } + + req = c.newRequest(op, input, output) + output = &Model{} + req.Data = output + return +} + +// Changes information about a model. +func (c *APIGateway) UpdateModel(input *UpdateModelInput) (*Model, error) { + req, out := c.UpdateModelRequest(input) + err := req.Send() + return out, err +} + +const opUpdateResource = "UpdateResource" + +// UpdateResourceRequest generates a request for the UpdateResource operation. +func (c *APIGateway) UpdateResourceRequest(input *UpdateResourceInput) (req *request.Request, output *Resource) { + op := &request.Operation{ + Name: opUpdateResource, + HTTPMethod: "PATCH", + HTTPPath: "/restapis/{restapi_id}/resources/{resource_id}", + } + + if input == nil { + input = &UpdateResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &Resource{} + req.Data = output + return +} + +// Changes information about a Resource resource. +func (c *APIGateway) UpdateResource(input *UpdateResourceInput) (*Resource, error) { + req, out := c.UpdateResourceRequest(input) + err := req.Send() + return out, err +} + +const opUpdateRestApi = "UpdateRestApi" + +// UpdateRestApiRequest generates a request for the UpdateRestApi operation. +func (c *APIGateway) UpdateRestApiRequest(input *UpdateRestApiInput) (req *request.Request, output *RestApi) { + op := &request.Operation{ + Name: opUpdateRestApi, + HTTPMethod: "PATCH", + HTTPPath: "/restapis/{restapi_id}", + } + + if input == nil { + input = &UpdateRestApiInput{} + } + + req = c.newRequest(op, input, output) + output = &RestApi{} + req.Data = output + return +} + +// Changes information about the specified API. +func (c *APIGateway) UpdateRestApi(input *UpdateRestApiInput) (*RestApi, error) { + req, out := c.UpdateRestApiRequest(input) + err := req.Send() + return out, err +} + +const opUpdateStage = "UpdateStage" + +// UpdateStageRequest generates a request for the UpdateStage operation. +func (c *APIGateway) UpdateStageRequest(input *UpdateStageInput) (req *request.Request, output *Stage) { + op := &request.Operation{ + Name: opUpdateStage, + HTTPMethod: "PATCH", + HTTPPath: "/restapis/{restapi_id}/stages/{stage_name}", + } + + if input == nil { + input = &UpdateStageInput{} + } + + req = c.newRequest(op, input, output) + output = &Stage{} + req.Data = output + return +} + +// Changes information about a Stage resource. +func (c *APIGateway) UpdateStage(input *UpdateStageInput) (*Stage, error) { + req, out := c.UpdateStageRequest(input) + err := req.Send() + return out, err +} + +// Represents an AWS account that is associated with Amazon API Gateway. +type Account struct { + _ struct{} `type:"structure"` + + // Specifies the Amazon resource name (ARN) of an Amazon CloudWatch role for + // the current Account resource. + CloudwatchRoleArn *string `locationName:"cloudwatchRoleArn" type:"string"` + + // Specifies the application programming interface (API) throttle settings for + // the current Account resource. + ThrottleSettings *ThrottleSettings `locationName:"throttleSettings" type:"structure"` +} + +// String returns the string representation +func (s Account) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Account) GoString() string { + return s.String() +} + +// A resource that can be distributed to callers for executing Method resources +// that require an API key. API keys can be mapped to any Stage on any RestApi, +// which indicates that the callers with the API key can make requests to that +// stage. +type ApiKey struct { + _ struct{} `type:"structure"` + + // The date when the API Key was created, in ISO 8601 format. + CreatedDate *time.Time `locationName:"createdDate" type:"timestamp" timestampFormat:"unix"` + + // The description of the API Key. + Description *string `locationName:"description" type:"string"` + + // Specifies whether the API Key can be used by callers. + Enabled *bool `locationName:"enabled" type:"boolean"` + + // The identifier of the API Key. + Id *string `locationName:"id" type:"string"` + + // When the API Key was last updated, in ISO 8601 format. + LastUpdatedDate *time.Time `locationName:"lastUpdatedDate" type:"timestamp" timestampFormat:"unix"` + + // The name of the API Key. + Name *string `locationName:"name" type:"string"` + + // A list of Stage resources that are associated with the ApiKey resource. + StageKeys []*string `locationName:"stageKeys" type:"list"` +} + +// String returns the string representation +func (s ApiKey) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApiKey) GoString() string { + return s.String() +} + +// Represents the base path that callers of the API that must provide as part +// of the URL after the domain name. +type BasePathMapping struct { + _ struct{} `type:"structure"` + + // The base path name that callers of the API must provide as part of the URL + // after the domain name. + BasePath *string `locationName:"basePath" type:"string"` + + // The name of the API. + RestApiId *string `locationName:"restApiId" type:"string"` + + // The name of the API's stage. + Stage *string `locationName:"stage" type:"string"` +} + +// String returns the string representation +func (s BasePathMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BasePathMapping) GoString() string { + return s.String() +} + +type ClientCertificate struct { + _ struct{} `type:"structure"` + + ClientCertificateId *string `locationName:"clientCertificateId" type:"string"` + + CreatedDate *time.Time `locationName:"createdDate" type:"timestamp" timestampFormat:"unix"` + + Description *string `locationName:"description" type:"string"` + + ExpirationDate *time.Time `locationName:"expirationDate" type:"timestamp" timestampFormat:"unix"` + + PemEncodedCertificate *string `locationName:"pemEncodedCertificate" type:"string"` +} + +// String returns the string representation +func (s ClientCertificate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientCertificate) GoString() string { + return s.String() +} + +type CreateApiKeyInput struct { + _ struct{} `type:"structure"` + + // The description of the ApiKey. + Description *string `locationName:"description" type:"string"` + + // Specifies whether the ApiKey can be used by callers. + Enabled *bool `locationName:"enabled" type:"boolean"` + + // The name of the ApiKey. + Name *string `locationName:"name" type:"string"` + + // Specifies whether the ApiKey can be used by callers. + StageKeys []*StageKey `locationName:"stageKeys" type:"list"` +} + +// String returns the string representation +func (s CreateApiKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateApiKeyInput) GoString() string { + return s.String() +} + +// Requests Amazon API Gateway to create a new BasePathMapping resource. +type CreateBasePathMappingInput struct { + _ struct{} `type:"structure"` + + // The base path name that callers of the API must provide as part of the URL + // after the domain name. This value must be unique for all of the mappings + // across a single API. Leave this blank if you do not want callers to specify + // a base path name after the domain name. + BasePath *string `locationName:"basePath" type:"string"` + + // The domain name of the BasePathMapping resource to create. + DomainName *string `location:"uri" locationName:"domain_name" type:"string" required:"true"` + + // The name of the API that you want to apply this mapping to. + RestApiId *string `locationName:"restApiId" type:"string" required:"true"` + + // The name of the API's stage that you want to use for this mapping. Leave + // this blank if you do not want callers to explicitly specify the stage name + // after any base path name. + Stage *string `locationName:"stage" type:"string"` +} + +// String returns the string representation +func (s CreateBasePathMappingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBasePathMappingInput) GoString() string { + return s.String() +} + +// Requests Amazon API Gateway to create a Deployment resource. +type CreateDeploymentInput struct { + _ struct{} `type:"structure"` + + // Enables a cache cluster for the Stage resource specified in the input. + CacheClusterEnabled *bool `locationName:"cacheClusterEnabled" type:"boolean"` + + // Specifies the cache cluster size for the Stage resource specified in the + // input, if a cache cluster is enabled. + CacheClusterSize *string `locationName:"cacheClusterSize" type:"string" enum:"CacheClusterSize"` + + // The description for the Deployment resource to create. + Description *string `locationName:"description" type:"string"` + + // The RestApi resource identifier for the Deployment resource to create. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // The description of the Stage resource for the Deployment resource to create. + StageDescription *string `locationName:"stageDescription" type:"string"` + + // The name of the Stage resource for the Deployment resource to create. + StageName *string `locationName:"stageName" type:"string" required:"true"` + + // A map that defines the stage variables for the Stage resource that is associated + // with the new deployment. Variable names can have alphabetic characters, and + // the values must match [A-Za-z0-9-._~:/?#&=,]+ + Variables map[string]*string `locationName:"variables" type:"map"` +} + +// String returns the string representation +func (s CreateDeploymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeploymentInput) GoString() string { + return s.String() +} + +// A request to create a new domain name. +type CreateDomainNameInput struct { + _ struct{} `type:"structure"` + + // The body of the server certificate provided by your certificate authority. + CertificateBody *string `locationName:"certificateBody" type:"string" required:"true"` + + // The intermediate certificates and optionally the root certificate, one after + // the other without any blank lines. If you include the root certificate, your + // certificate chain must start with intermediate certificates and end with + // the root certificate. Use the intermediate certificates that were provided + // by your certificate authority. Do not include any intermediaries that are + // not in the chain of trust path. + CertificateChain *string `locationName:"certificateChain" type:"string" required:"true"` + + // The name of the certificate. + CertificateName *string `locationName:"certificateName" type:"string" required:"true"` + + // Your certificate's private key. + CertificatePrivateKey *string `locationName:"certificatePrivateKey" type:"string" required:"true"` + + // The name of the DomainName resource. + DomainName *string `locationName:"domainName" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateDomainNameInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDomainNameInput) GoString() string { + return s.String() +} + +// Request to add a new Model to an existing RestApi resource. +type CreateModelInput struct { + _ struct{} `type:"structure"` + + // The content-type for the model. + ContentType *string `locationName:"contentType" type:"string" required:"true"` + + // The description of the model. + Description *string `locationName:"description" type:"string"` + + // The name of the model. + Name *string `locationName:"name" type:"string" required:"true"` + + // The RestApi identifier under which the Model will be created. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // The schema for the model. For application/json models, this should be JSON-schema + // draft v4 model. + Schema *string `locationName:"schema" type:"string"` +} + +// String returns the string representation +func (s CreateModelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateModelInput) GoString() string { + return s.String() +} + +// Requests Amazon API Gateway to create a Resource resource. +type CreateResourceInput struct { + _ struct{} `type:"structure"` + + // The parent resource's identifier. + ParentId *string `location:"uri" locationName:"parent_id" type:"string" required:"true"` + + // The last path segment for this resource. + PathPart *string `locationName:"pathPart" type:"string" required:"true"` + + // The identifier of the RestApi for the resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateResourceInput) GoString() string { + return s.String() +} + +// Request to add a new RestApi resource to your collection. +type CreateRestApiInput struct { + _ struct{} `type:"structure"` + + // The name of the RestApi that you want to clone from. + CloneFrom *string `locationName:"cloneFrom" type:"string"` + + // The description of the RestApi. + Description *string `locationName:"description" type:"string"` + + // The name of the RestApi. + Name *string `locationName:"name" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateRestApiInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRestApiInput) GoString() string { + return s.String() +} + +// Requests Amazon API Gateway to create a Stage resource. +type CreateStageInput struct { + _ struct{} `type:"structure"` + + // Whether cache clustering is enabled for the stage. + CacheClusterEnabled *bool `locationName:"cacheClusterEnabled" type:"boolean"` + + // The stage's cache cluster size. + CacheClusterSize *string `locationName:"cacheClusterSize" type:"string" enum:"CacheClusterSize"` + + // The identifier of the Deployment resource for the Stage resource. + DeploymentId *string `locationName:"deploymentId" type:"string" required:"true"` + + // The description of the Stage resource. + Description *string `locationName:"description" type:"string"` + + // The identifier of the RestApi resource for the Stage resource to create. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // The name for the Stage resource. + StageName *string `locationName:"stageName" type:"string" required:"true"` + + // A map that defines the stage variables for the new Stage resource. Variable + // names can have alphabetic characters, and the values must match [A-Za-z0-9-._~:/?#&=,]+ + Variables map[string]*string `locationName:"variables" type:"map"` +} + +// String returns the string representation +func (s CreateStageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStageInput) GoString() string { + return s.String() +} + +// A request to delete the ApiKey resource. +type DeleteApiKeyInput struct { + _ struct{} `type:"structure"` + + // The identifier of the ApiKey resource to be deleted. + ApiKey *string `location:"uri" locationName:"api_Key" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteApiKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteApiKeyInput) GoString() string { + return s.String() +} + +type DeleteApiKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteApiKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteApiKeyOutput) GoString() string { + return s.String() +} + +// A request to delete the BasePathMapping resource. +type DeleteBasePathMappingInput struct { + _ struct{} `type:"structure"` + + // The base path name of the BasePathMapping resource to delete. + BasePath *string `location:"uri" locationName:"base_path" type:"string" required:"true"` + + // The domain name of the BasePathMapping resource to delete. + DomainName *string `location:"uri" locationName:"domain_name" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBasePathMappingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBasePathMappingInput) GoString() string { + return s.String() +} + +type DeleteBasePathMappingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBasePathMappingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBasePathMappingOutput) GoString() string { + return s.String() +} + +type DeleteClientCertificateInput struct { + _ struct{} `type:"structure"` + + ClientCertificateId *string `location:"uri" locationName:"clientcertificate_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteClientCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClientCertificateInput) GoString() string { + return s.String() +} + +type DeleteClientCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteClientCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClientCertificateOutput) GoString() string { + return s.String() +} + +// Requests Amazon API Gateway to delete a Deployment resource. +type DeleteDeploymentInput struct { + _ struct{} `type:"structure"` + + // The identifier of the Deployment resource to delete. + DeploymentId *string `location:"uri" locationName:"deployment_id" type:"string" required:"true"` + + // The identifier of the RestApi resource for the Deployment resource to delete. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDeploymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDeploymentInput) GoString() string { + return s.String() +} + +type DeleteDeploymentOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDeploymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDeploymentOutput) GoString() string { + return s.String() +} + +// A request to delete the DomainName resource. +type DeleteDomainNameInput struct { + _ struct{} `type:"structure"` + + // The name of the DomainName resource to be deleted. + DomainName *string `location:"uri" locationName:"domain_name" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDomainNameInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDomainNameInput) GoString() string { + return s.String() +} + +type DeleteDomainNameOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDomainNameOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDomainNameOutput) GoString() string { + return s.String() +} + +// Represents a delete integration request. +type DeleteIntegrationInput struct { + _ struct{} `type:"structure"` + + // Specifies a delete integration request's HTTP method. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // Specifies a delete integration request's resource identifier. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // Specifies a delete integration request's API identifier. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteIntegrationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIntegrationInput) GoString() string { + return s.String() +} + +type DeleteIntegrationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteIntegrationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIntegrationOutput) GoString() string { + return s.String() +} + +// Represents a delete integration response request. +type DeleteIntegrationResponseInput struct { + _ struct{} `type:"structure"` + + // Specifies a delete integration response request's HTTP method. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // Specifies a delete integration response request's resource identifier. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // Specifies a delete integration response request's API identifier. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // Specifies a delete integration response request's status code. + StatusCode *string `location:"uri" locationName:"status_code" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteIntegrationResponseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIntegrationResponseInput) GoString() string { + return s.String() +} + +type DeleteIntegrationResponseOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteIntegrationResponseOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIntegrationResponseOutput) GoString() string { + return s.String() +} + +// Request to delete an existing Method resource. +type DeleteMethodInput struct { + _ struct{} `type:"structure"` + + // The HTTP verb that identifies the Method resource. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // The Resource identifier for the Method resource. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // The RestApi identifier for the Method resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMethodInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMethodInput) GoString() string { + return s.String() +} + +type DeleteMethodOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteMethodOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMethodOutput) GoString() string { + return s.String() +} + +// A request to delete an existing MethodResponse resource. +type DeleteMethodResponseInput struct { + _ struct{} `type:"structure"` + + // The HTTP verb identifier for the parent Method resource. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // The Resource identifier for the MethodResponse resource. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // The RestApi identifier for the MethodResponse resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // The status code identifier for the MethodResponse resource. + StatusCode *string `location:"uri" locationName:"status_code" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMethodResponseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMethodResponseInput) GoString() string { + return s.String() +} + +type DeleteMethodResponseOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteMethodResponseOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMethodResponseOutput) GoString() string { + return s.String() +} + +// Request to delete an existing model in an existing RestApi resource. +type DeleteModelInput struct { + _ struct{} `type:"structure"` + + // The name of the model to delete. + ModelName *string `location:"uri" locationName:"model_name" type:"string" required:"true"` + + // The RestApi under which the model will be deleted. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteModelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteModelInput) GoString() string { + return s.String() +} + +type DeleteModelOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteModelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteModelOutput) GoString() string { + return s.String() +} + +// Request to delete a Resource. +type DeleteResourceInput struct { + _ struct{} `type:"structure"` + + // The identifier of the Resource resource. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // The RestApi identifier for the Resource resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteResourceInput) GoString() string { + return s.String() +} + +type DeleteResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteResourceOutput) GoString() string { + return s.String() +} + +// Request to delete the specified API from your collection. +type DeleteRestApiInput struct { + _ struct{} `type:"structure"` + + // The ID of the RestApi you want to delete. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRestApiInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRestApiInput) GoString() string { + return s.String() +} + +type DeleteRestApiOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRestApiOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRestApiOutput) GoString() string { + return s.String() +} + +// Requests Amazon API Gateway to delete a Stage resource. +type DeleteStageInput struct { + _ struct{} `type:"structure"` + + // The identifier of the RestApi resource for the Stage resource to delete. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // The name of the Stage resource to delete. + StageName *string `location:"uri" locationName:"stage_name" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteStageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStageInput) GoString() string { + return s.String() +} + +type DeleteStageOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteStageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStageOutput) GoString() string { + return s.String() +} + +// An immutable representation of a RestApi resource that can be called by users +// using Stages. A deployment must be associated with a Stage for it to be callable +// over the Internet. +type Deployment struct { + _ struct{} `type:"structure"` + + // Gets a summary of the RestApi at the date and time that the deployment resource + // was created. + ApiSummary map[string]map[string]*MethodSnapshot `locationName:"apiSummary" type:"map"` + + // The date and time that the deployment resource was created. + CreatedDate *time.Time `locationName:"createdDate" type:"timestamp" timestampFormat:"unix"` + + // The description for the deployment resource. + Description *string `locationName:"description" type:"string"` + + // The identifier for the deployment resource. + Id *string `locationName:"id" type:"string"` +} + +// String returns the string representation +func (s Deployment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Deployment) GoString() string { + return s.String() +} + +// Represents a domain name that is contained in a simpler, more intuitive URL +// that can be called. +type DomainName struct { + _ struct{} `type:"structure"` + + // The name of the certificate. + CertificateName *string `locationName:"certificateName" type:"string"` + + // The date when the certificate was uploaded, in ISO 8601 format. + CertificateUploadDate *time.Time `locationName:"certificateUploadDate" type:"timestamp" timestampFormat:"unix"` + + // The domain name of the Amazon CloudFront distribution. For more information, + // see the Amazon CloudFront documentation. + DistributionDomainName *string `locationName:"distributionDomainName" type:"string"` + + // The name of the DomainName resource. + DomainName *string `locationName:"domainName" type:"string"` +} + +// String returns the string representation +func (s DomainName) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainName) GoString() string { + return s.String() +} + +// Requests Amazon API Gateway to flush a stage's cache. +type FlushStageCacheInput struct { + _ struct{} `type:"structure"` + + // The API identifier of the stage to flush its cache. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // The name of the stage to flush its cache. + StageName *string `location:"uri" locationName:"stage_name" type:"string" required:"true"` +} + +// String returns the string representation +func (s FlushStageCacheInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FlushStageCacheInput) GoString() string { + return s.String() +} + +type FlushStageCacheOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s FlushStageCacheOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FlushStageCacheOutput) GoString() string { + return s.String() +} + +type GenerateClientCertificateInput struct { + _ struct{} `type:"structure"` + + Description *string `locationName:"description" type:"string"` +} + +// String returns the string representation +func (s GenerateClientCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateClientCertificateInput) GoString() string { + return s.String() +} + +// Requests Amazon API Gateway to get information about the current Account +// resource. +type GetAccountInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetAccountInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountInput) GoString() string { + return s.String() +} + +// A request to get information about the current ApiKey resource. +type GetApiKeyInput struct { + _ struct{} `type:"structure"` + + // The identifier of the ApiKey resource. + ApiKey *string `location:"uri" locationName:"api_Key" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetApiKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetApiKeyInput) GoString() string { + return s.String() +} + +// A request to get information about the current ApiKeys resource. +type GetApiKeysInput struct { + _ struct{} `type:"structure"` + + // The maximum number of ApiKeys to get information about. + Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` + + // The position of the current ApiKeys resource to get information about. + Position *string `location:"querystring" locationName:"position" type:"string"` +} + +// String returns the string representation +func (s GetApiKeysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetApiKeysInput) GoString() string { + return s.String() +} + +// Represents a collection of ApiKey resources. +type GetApiKeysOutput struct { + _ struct{} `type:"structure"` + + // The current page of any ApiKey resources in the collection of ApiKey resources. + Items []*ApiKey `locationName:"item" type:"list"` + + Position *string `locationName:"position" type:"string"` +} + +// String returns the string representation +func (s GetApiKeysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetApiKeysOutput) GoString() string { + return s.String() +} + +// Request to describe a BasePathMapping resource. +type GetBasePathMappingInput struct { + _ struct{} `type:"structure"` + + // The base path name that callers of the API must provide as part of the URL + // after the domain name. This value must be unique for all of the mappings + // across a single API. Leave this blank if you do not want callers to specify + // any base path name after the domain name. + BasePath *string `location:"uri" locationName:"base_path" type:"string" required:"true"` + + // The domain name of the BasePathMapping resource to be described. + DomainName *string `location:"uri" locationName:"domain_name" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBasePathMappingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBasePathMappingInput) GoString() string { + return s.String() +} + +// A request to get information about a collection of BasePathMapping resources. +type GetBasePathMappingsInput struct { + _ struct{} `type:"structure"` + + // The domain name of a BasePathMapping resource. + DomainName *string `location:"uri" locationName:"domain_name" type:"string" required:"true"` + + // The maximum number of BasePathMapping resources in the collection to get + // information about. The default limit is 25. It should be an integer between + // 1 - 500. + Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` + + // The position of the current BasePathMapping resource in the collection to + // get information about. + Position *string `location:"querystring" locationName:"position" type:"string"` +} + +// String returns the string representation +func (s GetBasePathMappingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBasePathMappingsInput) GoString() string { + return s.String() +} + +// Represents a collection of BasePathMapping resources. +type GetBasePathMappingsOutput struct { + _ struct{} `type:"structure"` + + // The current page of any BasePathMapping resources in the collection of base + // path mapping resources. + Items []*BasePathMapping `locationName:"item" type:"list"` + + Position *string `locationName:"position" type:"string"` +} + +// String returns the string representation +func (s GetBasePathMappingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBasePathMappingsOutput) GoString() string { + return s.String() +} + +type GetClientCertificateInput struct { + _ struct{} `type:"structure"` + + ClientCertificateId *string `location:"uri" locationName:"clientcertificate_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetClientCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetClientCertificateInput) GoString() string { + return s.String() +} + +type GetClientCertificatesInput struct { + _ struct{} `type:"structure"` + + Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` + + Position *string `location:"querystring" locationName:"position" type:"string"` +} + +// String returns the string representation +func (s GetClientCertificatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetClientCertificatesInput) GoString() string { + return s.String() +} + +type GetClientCertificatesOutput struct { + _ struct{} `type:"structure"` + + Items []*ClientCertificate `locationName:"item" type:"list"` + + Position *string `locationName:"position" type:"string"` +} + +// String returns the string representation +func (s GetClientCertificatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetClientCertificatesOutput) GoString() string { + return s.String() +} + +// Requests Amazon API Gateway to get information about a Deployment resource. +type GetDeploymentInput struct { + _ struct{} `type:"structure"` + + // The identifier of the Deployment resource to get information about. + DeploymentId *string `location:"uri" locationName:"deployment_id" type:"string" required:"true"` + + // The identifier of the RestApi resource for the Deployment resource to get + // information about. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDeploymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentInput) GoString() string { + return s.String() +} + +// Requests Amazon API Gateway to get information about a Deployments collection. +type GetDeploymentsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of Deployment resources in the collection to get information + // about. The default limit is 25. It should be an integer between 1 - 500. + Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` + + // The position of the current Deployment resource in the collection to get + // information about. + Position *string `location:"querystring" locationName:"position" type:"string"` + + // The identifier of the RestApi resource for the collection of Deployment resources + // to get information about. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDeploymentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentsInput) GoString() string { + return s.String() +} + +// Represents a collection resource that contains zero or more references to +// your existing deployments, and links that guide you on ways to interact with +// your collection. The collection offers a paginated view of the contained +// deployments. +type GetDeploymentsOutput struct { + _ struct{} `type:"structure"` + + // The current page of any Deployment resources in the collection of deployment + // resources. + Items []*Deployment `locationName:"item" type:"list"` + + Position *string `locationName:"position" type:"string"` +} + +// String returns the string representation +func (s GetDeploymentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentsOutput) GoString() string { + return s.String() +} + +// Request to get the name of a DomainName resource. +type GetDomainNameInput struct { + _ struct{} `type:"structure"` + + // The name of the DomainName resource. + DomainName *string `location:"uri" locationName:"domain_name" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDomainNameInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDomainNameInput) GoString() string { + return s.String() +} + +// Request to describe a collection of DomainName resources. +type GetDomainNamesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of DomainName resources in the collection to get information + // about. The default limit is 25. It should be an integer between 1 - 500. + Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` + + // The position of the current domain names to get information about. + Position *string `location:"querystring" locationName:"position" type:"string"` +} + +// String returns the string representation +func (s GetDomainNamesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDomainNamesInput) GoString() string { + return s.String() +} + +// Represents a collection of DomainName resources. +type GetDomainNamesOutput struct { + _ struct{} `type:"structure"` + + // The current page of any DomainName resources in the collection of DomainName + // resources. + Items []*DomainName `locationName:"item" type:"list"` + + Position *string `locationName:"position" type:"string"` +} + +// String returns the string representation +func (s GetDomainNamesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDomainNamesOutput) GoString() string { + return s.String() +} + +// Represents a get integration request. +type GetIntegrationInput struct { + _ struct{} `type:"structure"` + + // Specifies a get integration request's HTTP method. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // Specifies a get integration request's resource identifier + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // Specifies a get integration request's API identifier. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetIntegrationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIntegrationInput) GoString() string { + return s.String() +} + +// Represents a get integration response request. +type GetIntegrationResponseInput struct { + _ struct{} `type:"structure"` + + // Specifies a get integration response request's HTTP method. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // Specifies a get integration response request's resource identifier. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // Specifies a get integration response request's API identifier. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // Specifies a get integration response request's status code. + StatusCode *string `location:"uri" locationName:"status_code" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetIntegrationResponseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIntegrationResponseInput) GoString() string { + return s.String() +} + +// Request to describe an existing Method resource. +type GetMethodInput struct { + _ struct{} `type:"structure"` + + // Specifies the put method request's HTTP method type. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // The Resource identifier for the Method resource. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // The RestApi identifier for the Method resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetMethodInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMethodInput) GoString() string { + return s.String() +} + +// Request to describe a MethodResponse resource. +type GetMethodResponseInput struct { + _ struct{} `type:"structure"` + + // The HTTP verb identifier for the parent Method resource. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // The Resource identifier for the MethodResponse resource. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // The RestApi identifier for the MethodResponse resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // The status code identifier for the MethodResponse resource. + StatusCode *string `location:"uri" locationName:"status_code" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetMethodResponseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMethodResponseInput) GoString() string { + return s.String() +} + +// Request to list information about a model in an existing RestApi resource. +type GetModelInput struct { + _ struct{} `type:"structure"` + + // Resolves all external model references and returns a flattened model schema. + Flatten *bool `location:"querystring" locationName:"flatten" type:"boolean"` + + // The name of the model as an identifier. + ModelName *string `location:"uri" locationName:"model_name" type:"string" required:"true"` + + // The RestApi identifier under which the Model exists. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetModelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetModelInput) GoString() string { + return s.String() +} + +// Request to generate a sample mapping template used to transform the payload. +type GetModelTemplateInput struct { + _ struct{} `type:"structure"` + + // The name of the model for which to generate a template. + ModelName *string `location:"uri" locationName:"model_name" type:"string" required:"true"` + + // The ID of the RestApi under which the model exists. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetModelTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetModelTemplateInput) GoString() string { + return s.String() +} + +// Represents a mapping template used to transform a payload. +type GetModelTemplateOutput struct { + _ struct{} `type:"structure"` + + // The Apache Velocity Template Language (VTL) template content used for the + // template resource. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s GetModelTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetModelTemplateOutput) GoString() string { + return s.String() +} + +// Request to list existing Models defined for a RestApi resource. +type GetModelsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of models in the collection to get information about. + // The default limit is 25. It should be an integer between 1 - 500. + Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` + + // The position of the next set of results in the Models resource to get information + // about. + Position *string `location:"querystring" locationName:"position" type:"string"` + + // The RestApi identifier. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetModelsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetModelsInput) GoString() string { + return s.String() +} + +// Represents a collection of Model resources. +type GetModelsOutput struct { + _ struct{} `type:"structure"` + + // Gets the current Model resource in the collection. + Items []*Model `locationName:"item" type:"list"` + + Position *string `locationName:"position" type:"string"` +} + +// String returns the string representation +func (s GetModelsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetModelsOutput) GoString() string { + return s.String() +} + +// Request to list information about a resource. +type GetResourceInput struct { + _ struct{} `type:"structure"` + + // The identifier for the Resource resource. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // The RestApi identifier for the resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetResourceInput) GoString() string { + return s.String() +} + +// Request to list information about a collection of resources. +type GetResourcesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of Resource resources in the collection to get information + // about. The default limit is 25. It should be an integer between 1 - 500. + Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` + + // The position of the next set of results in the current Resources resource + // to get information about. + Position *string `location:"querystring" locationName:"position" type:"string"` + + // The RestApi identifier for the Resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetResourcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetResourcesInput) GoString() string { + return s.String() +} + +// Represents a collection of Resource resources. +type GetResourcesOutput struct { + _ struct{} `type:"structure"` + + // Gets the current Resource resource in the collection. + Items []*Resource `locationName:"item" type:"list"` + + Position *string `locationName:"position" type:"string"` +} + +// String returns the string representation +func (s GetResourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetResourcesOutput) GoString() string { + return s.String() +} + +// Request to list an existing RestApi defined for your collection. +type GetRestApiInput struct { + _ struct{} `type:"structure"` + + // The identifier of the RestApi resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRestApiInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRestApiInput) GoString() string { + return s.String() +} + +// Request to list existing RestApis defined for your collection. +type GetRestApisInput struct { + _ struct{} `type:"structure"` + + // The maximum number of RestApi resources in the collection to get information + // about. The default limit is 25. It should be an integer between 1 - 500. + Limit *int64 `location:"querystring" locationName:"limit" type:"integer"` + + // The position of the current RestApis resource in the collection to get information + // about. + Position *string `location:"querystring" locationName:"position" type:"string"` +} + +// String returns the string representation +func (s GetRestApisInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRestApisInput) GoString() string { + return s.String() +} + +// Contains references to your APIs and links that guide you in ways to interact +// with your collection. A collection offers a paginated view of your APIs. +type GetRestApisOutput struct { + _ struct{} `type:"structure"` + + // An array of links to the current page of RestApi resources. + Items []*RestApi `locationName:"item" type:"list"` + + Position *string `locationName:"position" type:"string"` +} + +// String returns the string representation +func (s GetRestApisOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRestApisOutput) GoString() string { + return s.String() +} + +type GetSdkInput struct { + _ struct{} `type:"structure"` + + Parameters map[string]*string `location:"querystring" locationName:"parameters" type:"map"` + + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + SdkType *string `location:"uri" locationName:"sdk_type" type:"string" required:"true"` + + StageName *string `location:"uri" locationName:"stage_name" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetSdkInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSdkInput) GoString() string { + return s.String() +} + +type GetSdkOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + Body []byte `locationName:"body" type:"blob"` + + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` +} + +// String returns the string representation +func (s GetSdkOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSdkOutput) GoString() string { + return s.String() +} + +// Requests Amazon API Gateway to get information about a Stage resource. +type GetStageInput struct { + _ struct{} `type:"structure"` + + // The identifier of the RestApi resource for the Stage resource to get information + // about. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // The name of the Stage resource to get information about. + StageName *string `location:"uri" locationName:"stage_name" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetStageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetStageInput) GoString() string { + return s.String() +} + +// Requests Amazon API Gateway to get information about one or more Stage resources. +type GetStagesInput struct { + _ struct{} `type:"structure"` + + // The stages' deployment identifiers. + DeploymentId *string `location:"querystring" locationName:"deploymentId" type:"string"` + + // The stages' API identifiers. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetStagesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetStagesInput) GoString() string { + return s.String() +} + +// A list of Stage resource that are associated with the ApiKey resource. +type GetStagesOutput struct { + _ struct{} `type:"structure"` + + // An individual Stage resource. + Item []*Stage `locationName:"item" type:"list"` +} + +// String returns the string representation +func (s GetStagesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetStagesOutput) GoString() string { + return s.String() +} + +// Represents a HTTP, AWS, or Mock integration. +type Integration struct { + _ struct{} `type:"structure"` + + // Specifies the integration's cache key parameters. + CacheKeyParameters []*string `locationName:"cacheKeyParameters" type:"list"` + + // Specifies the integration's cache namespace. + CacheNamespace *string `locationName:"cacheNamespace" type:"string"` + + // Specifies the credentials required for the integration, if any. For AWS integrations, + // three options are available. To specify an IAM Role for Amazon API Gateway + // to assume, use the role's Amazon Resource Name (ARN). To require that the + // caller's identity be passed through from the request, specify the string + // arn:aws:iam::\*:user/\*. To use resource-based permissions on supported AWS + // services, specify null. + Credentials *string `locationName:"credentials" type:"string"` + + // Specifies the integration's HTTP method type. + HttpMethod *string `locationName:"httpMethod" type:"string"` + + // Specifies the integration's responses. + IntegrationResponses map[string]*IntegrationResponse `locationName:"integrationResponses" type:"map"` + + // Represents requests parameters that are sent with the backend request. Request + // parameters are represented as a key/value map, with a destination as the + // key and a source as the value. A source must match an existing method request + // parameter, or a static value. Static values must be enclosed with single + // quotes, and be pre-encoded based on their destination in the request. The + // destination must match the pattern integration.request.{location}.{name}, + // where location is either querystring, path, or header. name must be a valid, + // unique parameter name. + RequestParameters map[string]*string `locationName:"requestParameters" type:"map"` + + // Specifies the integration's request templates. + RequestTemplates map[string]*string `locationName:"requestTemplates" type:"map"` + + // Specifies the integration's type. + Type *string `locationName:"type" type:"string" enum:"IntegrationType"` + + // Specifies the integration's Uniform Resource Identifier (URI). For HTTP integrations, + // the URI must be a fully formed, encoded HTTP(S) URL according to the RFC-3986 + // specification. For AWS integrations, the URI should be of the form arn:aws:apigateway:{region}:{service}:{path|action}/{service_api}. + // Region and service are used to determine the right endpoint. For AWS services + // that use the Action= query string parameter, service_api should be a valid + // action for the desired service. For RESTful AWS service APIs, path is used + // to indicate that the remaining substring in the URI should be treated as + // the path to the resource, including the initial /. + Uri *string `locationName:"uri" type:"string"` +} + +// String returns the string representation +func (s Integration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Integration) GoString() string { + return s.String() +} + +// Represents an integration response. The status code must map to an existing +// MethodResponse, and parameters and templates can be used to transform the +// backend response. +type IntegrationResponse struct { + _ struct{} `type:"structure"` + + // Represents response parameters that can be read from the backend response. + // Response parameters are represented as a key/value map, with a destination + // as the key and a source as the value. A destination must match an existing + // response parameter in the Method. The source can be a header from the backend + // response, or a static value. Static values are specified using enclosing + // single quotes, and backend response headers can be read using the pattern + // integration.response.header.{name}. + ResponseParameters map[string]*string `locationName:"responseParameters" type:"map"` + + // Specifies the templates used to transform the integration response body. + // Response templates are represented as a key/value map, with a content-type + // as the key and a template as the value. + ResponseTemplates map[string]*string `locationName:"responseTemplates" type:"map"` + + // Specifies the regular expression (regex) pattern used to choose an integration + // response based on the response from the backend. If the backend is an AWS + // Lambda function, the AWS Lambda function error header is matched. For all + // other HTTP and AWS backends, the HTTP status code is matched. + SelectionPattern *string `locationName:"selectionPattern" type:"string"` + + // Specifies the status code that is used to map the integration response to + // an existing MethodResponse. + StatusCode *string `locationName:"statusCode" type:"string"` +} + +// String returns the string representation +func (s IntegrationResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IntegrationResponse) GoString() string { + return s.String() +} + +// Represents a method. +type Method struct { + _ struct{} `type:"structure"` + + // Specifies whether the method requires a valid ApiKey. + ApiKeyRequired *bool `locationName:"apiKeyRequired" type:"boolean"` + + // The method's authorization type. + AuthorizationType *string `locationName:"authorizationType" type:"string"` + + // The HTTP method. + HttpMethod *string `locationName:"httpMethod" type:"string"` + + // The method's integration. + MethodIntegration *Integration `locationName:"methodIntegration" type:"structure"` + + // Represents available responses that can be sent to the caller. Method responses + // are represented as a key/value map, with an HTTP status code as the key and + // a MethodResponse as the value. The status codes are available for the Integration + // responses to map to. + MethodResponses map[string]*MethodResponse `locationName:"methodResponses" type:"map"` + + // Specifies the Model resources used for the request's content type. Request + // models are represented as a key/value map, with a content type as the key + // and a Model name as the value. + RequestModels map[string]*string `locationName:"requestModels" type:"map"` + + // Represents request parameters that can be accepted by Amazon API Gateway. + // Request parameters are represented as a key/value map, with a source as the + // key and a Boolean flag as the value. The Boolean flag is used to specify + // whether the parameter is required. A source must match the pattern method.request.{location}.{name}, + // where location is either querystring, path, or header. name is a valid, unique + // parameter name. Sources specified here are available to the integration for + // mapping to integration request parameters or templates. + RequestParameters map[string]*bool `locationName:"requestParameters" type:"map"` +} + +// String returns the string representation +func (s Method) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Method) GoString() string { + return s.String() +} + +// Represents a method response. Amazon API Gateway sends back the status code +// to the caller as the HTTP status code. Parameters and models can be used +// to transform the response from the method's integration. +type MethodResponse struct { + _ struct{} `type:"structure"` + + // Specifies the Model resources used for the response's content-type. Response + // models are represented as a key/value map, with a content-type as the key + // and a Model name as the value. + ResponseModels map[string]*string `locationName:"responseModels" type:"map"` + + // Represents response parameters that can be sent back to the caller by Amazon + // API Gateway. Response parameters are represented as a key/value map, with + // a destination as the key and a boolean flag as the value, which is used to + // specify whether the parameter is required. A destination must match the pattern + // method.response.header.{name}, where name is a valid, unique header name. + // Destinations specified here are available to the integration for mapping + // from integration response parameters. + ResponseParameters map[string]*bool `locationName:"responseParameters" type:"map"` + + // The method response's status code. + StatusCode *string `locationName:"statusCode" type:"string"` +} + +// String returns the string representation +func (s MethodResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MethodResponse) GoString() string { + return s.String() +} + +// Specifies the method setting properties. +type MethodSetting struct { + _ struct{} `type:"structure"` + + // Specifies whether the cached responses are encrypted. The PATCH path for + // this setting is /{method_setting_key}/caching/dataEncrypted, and the value + // is a Boolean. + CacheDataEncrypted *bool `locationName:"cacheDataEncrypted" type:"boolean"` + + // Specifies the time to live (TTL) in seconds, for cached responses. The higher + // a the TTL, the longer the response will be cached. The PATCH path for this + // setting is /{method_setting_key}/caching/ttlInSeconds, and the value is an + // integer. + CacheTtlInSeconds *int64 `locationName:"cacheTtlInSeconds" type:"integer"` + + // Specifies whether responses should be cached and returned for requests. A + // cache cluster must be enabled on the stage for responses to be cached. The + // PATCH path for this setting is /{method_setting_key}/caching/enabled, and + // the value is a Boolean. + CachingEnabled *bool `locationName:"cachingEnabled" type:"boolean"` + + // Specifies the whether data trace logging is enabled for this method, which + // effects the log entries pushed to Amazon CloudWatch Logs. The PATCH path + // for this setting is /{method_setting_key}/logging/dataTrace, and the value + // is a Boolean. + DataTraceEnabled *bool `locationName:"dataTraceEnabled" type:"boolean"` + + // Specifies the logging level for this method, which effects the log entries + // pushed to Amazon CloudWatch Logs. The PATCH path for this setting is /{method_setting_key}/logging/loglevel, + // and the available levels are OFF, ERROR, and INFO. + LoggingLevel *string `locationName:"loggingLevel" type:"string"` + + // Specifies whether Amazon CloudWatch metrics are enabled for this method. + // The PATCH path for this setting is /{method_setting_key}/metrics/enabled, + // and the value is a Boolean. + MetricsEnabled *bool `locationName:"metricsEnabled" type:"boolean"` + + // Specifies the throttling burst limit. The PATCH path for this setting is + // /{method_setting_key}/throttling/burstLimit, and the value is an integer. + ThrottlingBurstLimit *int64 `locationName:"throttlingBurstLimit" type:"integer"` + + // Specifies the throttling rate limit. The PATCH path for this setting is /{method_setting_key}/throttling/rateLimit, + // and the value is a double. + ThrottlingRateLimit *float64 `locationName:"throttlingRateLimit" type:"double"` +} + +// String returns the string representation +func (s MethodSetting) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MethodSetting) GoString() string { + return s.String() +} + +// Represents a summary of a Method resource, given a particular date and time. +type MethodSnapshot struct { + _ struct{} `type:"structure"` + + // Specifies whether the method requires a valid ApiKey. + ApiKeyRequired *bool `locationName:"apiKeyRequired" type:"boolean"` + + // Specifies the type of authorization used for the method. + AuthorizationType *string `locationName:"authorizationType" type:"string"` +} + +// String returns the string representation +func (s MethodSnapshot) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MethodSnapshot) GoString() string { + return s.String() +} + +// Represents the structure of a request or response payload for a method. +type Model struct { + _ struct{} `type:"structure"` + + // The content-type for the model. + ContentType *string `locationName:"contentType" type:"string"` + + // The description of the model. + Description *string `locationName:"description" type:"string"` + + // The identifier for the model resource. + Id *string `locationName:"id" type:"string"` + + // The name of the model. + Name *string `locationName:"name" type:"string"` + + // The schema for the model. For application/json models, this should be JSON-schema + // draft v4 model. + Schema *string `locationName:"schema" type:"string"` +} + +// String returns the string representation +func (s Model) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Model) GoString() string { + return s.String() +} + +// A single patch operation to apply to the specified resource. Please refer +// to http://tools.ietf.org/html/rfc6902#section-4 for an explanation of how +// each operation is used. +type PatchOperation struct { + _ struct{} `type:"structure"` + + // The "move" and "copy" operation object MUST contain a "from" member, which + // is a string containing a JSON Pointer value that references the location + // in the target document to move the value from. + From *string `locationName:"from" type:"string"` + + // A patch operation whose value indicates the operation to perform. Its value + // MUST be one of "add", "remove", "replace", "move", "copy", or "test"; other + // values are errors. + Op *string `locationName:"op" type:"string" enum:"op"` + + // Operation objects MUST have exactly one "path" member. That member's value + // is a string containing a `JSON-Pointer` value that references a location + // within the target document (the "target location") where the operation is + // performed. + Path *string `locationName:"path" type:"string"` + + // The actual value content. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s PatchOperation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PatchOperation) GoString() string { + return s.String() +} + +// Represents a put integration request. +type PutIntegrationInput struct { + _ struct{} `type:"structure"` + + // Specifies a put integration input's cache key parameters. + CacheKeyParameters []*string `locationName:"cacheKeyParameters" type:"list"` + + // Specifies a put integration input's cache namespace. + CacheNamespace *string `locationName:"cacheNamespace" type:"string"` + + // Specifies whether credentials are required for a put integration. + Credentials *string `locationName:"credentials" type:"string"` + + // Specifies a put integration request's HTTP method. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // Specifies a put integration HTTP method. + IntegrationHttpMethod *string `locationName:"httpMethod" type:"string"` + + // Represents request parameters that are sent with the backend request. Request + // parameters are represented as a key/value map, with a destination as the + // key and a source as the value. A source must match an existing method request + // parameter, or a static value. Static values must be enclosed with single + // quotes, and be pre-encoded based on their destination in the request. The + // destination must match the pattern integration.request.{location}.{name}, + // where location is either querystring, path, or header. name must be a valid, + // unique parameter name. + RequestParameters map[string]*string `locationName:"requestParameters" type:"map"` + + // Specifies the templates used to transform the method request body. Request + // templates are represented as a key/value map, with a content-type as the + // key and a template as the value. + RequestTemplates map[string]*string `locationName:"requestTemplates" type:"map"` + + // Specifies a put integration request's resource ID. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // Specifies a put integration request's API identifier. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // Specifies a put integration input's type. + Type *string `locationName:"type" type:"string" required:"true" enum:"IntegrationType"` + + // Specifies a put integration input's Uniform Resource Identifier (URI). + Uri *string `locationName:"uri" type:"string"` +} + +// String returns the string representation +func (s PutIntegrationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutIntegrationInput) GoString() string { + return s.String() +} + +// Represents a put integration response request. +type PutIntegrationResponseInput struct { + _ struct{} `type:"structure"` + + // Specifies a put integration response request's HTTP method. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // Specifies a put integration response request's resource identifier. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // Represents response parameters that can be read from the backend response. + // Response parameters are represented as a key/value map, with a destination + // as the key and a source as the value. A destination must match an existing + // response parameter in the Method. The source can be a header from the backend + // response, or a static value. Static values are specified using enclosing + // single quotes, and backend response headers can be read using the pattern + // integration.response.header.{name}. + ResponseParameters map[string]*string `locationName:"responseParameters" type:"map"` + + // Specifies a put integration response's templates. + ResponseTemplates map[string]*string `locationName:"responseTemplates" type:"map"` + + // Specifies a put integration response request's API identifier. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // Specifies the selection pattern of a put integration response. + SelectionPattern *string `locationName:"selectionPattern" type:"string"` + + // Specifies the status code that is used to map the integration response to + // an existing MethodResponse. + StatusCode *string `location:"uri" locationName:"status_code" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutIntegrationResponseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutIntegrationResponseInput) GoString() string { + return s.String() +} + +// Request to add a method to an existing Resource resource. +type PutMethodInput struct { + _ struct{} `type:"structure"` + + // Specifies whether the method required a valid ApiKey. + ApiKeyRequired *bool `locationName:"apiKeyRequired" type:"boolean"` + + // Specifies the type of authorization used for the method. + AuthorizationType *string `locationName:"authorizationType" type:"string" required:"true"` + + // Specifies the put method request's HTTP method type. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // Specifies the Model resources used for the request's content type. Request + // models are represented as a key/value map, with a content type as the key + // and a Model name as the value. + RequestModels map[string]*string `locationName:"requestModels" type:"map"` + + // Represents requests parameters that are sent with the backend request. Request + // parameters are represented as a key/value map, with a destination as the + // key and a source as the value. A source must match an existing method request + // parameter, or a static value. Static values must be enclosed with single + // quotes, and be pre-encoded based on their destination in the request. The + // destination must match the pattern integration.request.{location}.{name}, + // where location is either querystring, path, or header. name must be a valid, + // unique parameter name. + RequestParameters map[string]*bool `locationName:"requestParameters" type:"map"` + + // The Resource identifier for the new Method resource. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // The RestApi identifier for the new Method resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutMethodInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMethodInput) GoString() string { + return s.String() +} + +// Request to add a MethodResponse to an existing Method resource. +type PutMethodResponseInput struct { + _ struct{} `type:"structure"` + + // The HTTP verb that identifies the Method resource. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // The Resource identifier for the Method resource. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // Specifies the Model resources used for the response's content type. Response + // models are represented as a key/value map, with a content type as the key + // and a Model name as the value. + ResponseModels map[string]*string `locationName:"responseModels" type:"map"` + + // Represents response parameters that can be sent back to the caller by Amazon + // API Gateway. Response parameters are represented as a key/value map, with + // a destination as the key and a Boolean flag as the value. The Boolean flag + // is used to specify whether the parameter is required. A destination must + // match the pattern method.response.header.{name}, where name is a valid, unique + // header name. Destinations specified here are available to the integration + // for mapping from integration response parameters. + ResponseParameters map[string]*bool `locationName:"responseParameters" type:"map"` + + // The RestApi identifier for the Method resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // The method response's status code. + StatusCode *string `location:"uri" locationName:"status_code" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutMethodResponseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMethodResponseInput) GoString() string { + return s.String() +} + +// Represents a resource. +type Resource struct { + _ struct{} `type:"structure"` + + // The resource's identifier. + Id *string `locationName:"id" type:"string"` + + // The parent resource's identifier. + ParentId *string `locationName:"parentId" type:"string"` + + // The full path for this resource. + Path *string `locationName:"path" type:"string"` + + // The last path segment for this resource. + PathPart *string `locationName:"pathPart" type:"string"` + + // Map of methods for this resource, which is included only if requested using + // the embed option. + ResourceMethods map[string]*Method `locationName:"resourceMethods" type:"map"` +} + +// String returns the string representation +func (s Resource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Resource) GoString() string { + return s.String() +} + +// Represents a REST API. +type RestApi struct { + _ struct{} `type:"structure"` + + // The date when the API was created, in ISO 8601 format. + CreatedDate *time.Time `locationName:"createdDate" type:"timestamp" timestampFormat:"unix"` + + // The API's description. + Description *string `locationName:"description" type:"string"` + + // The API's identifier. This identifier is unique across all of your APIs in + // Amazon API Gateway. + Id *string `locationName:"id" type:"string"` + + // The API's name. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s RestApi) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestApi) GoString() string { + return s.String() +} + +// Represents a unique identifier for a version of a deployed RestApi that is +// callable by users. +type Stage struct { + _ struct{} `type:"structure"` + + // Specifies whether a cache cluster is enabled for the stage. + CacheClusterEnabled *bool `locationName:"cacheClusterEnabled" type:"boolean"` + + // The size of the cache cluster for the stage, if enabled. + CacheClusterSize *string `locationName:"cacheClusterSize" type:"string" enum:"CacheClusterSize"` + + // The status of the cache cluster for the stage, if enabled. + CacheClusterStatus *string `locationName:"cacheClusterStatus" type:"string" enum:"CacheClusterStatus"` + + ClientCertificateId *string `locationName:"clientCertificateId" type:"string"` + + // The date and time that the stage was created, in ISO 8601 format. + CreatedDate *time.Time `locationName:"createdDate" type:"timestamp" timestampFormat:"unix"` + + // The identifier of the Deployment that the stage points to. + DeploymentId *string `locationName:"deploymentId" type:"string"` + + // The stage's description. + Description *string `locationName:"description" type:"string"` + + // The date and time that information about the stage was last updated, in ISO + // 8601 format. + LastUpdatedDate *time.Time `locationName:"lastUpdatedDate" type:"timestamp" timestampFormat:"unix"` + + // A map that defines the method settings for a Stage resource. Keys are defined + // as {resource_path}/{http_method} for an individual method override, or \*/\* + // for the settings applied to all methods in the stage. + MethodSettings map[string]*MethodSetting `locationName:"methodSettings" type:"map"` + + // The name of the stage is the first path segment in the Uniform Resource Identifier + // (URI) of a call to Amazon API Gateway. + StageName *string `locationName:"stageName" type:"string"` + + // A map that defines the stage variables for a Stage resource. Variable names + // can have alphabetic characters, and the values must match [A-Za-z0-9-._~:/?#&=,]+ + Variables map[string]*string `locationName:"variables" type:"map"` +} + +// String returns the string representation +func (s Stage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Stage) GoString() string { + return s.String() +} + +// A reference to a unique stage identified in the format {restApiId}/{stage}. +type StageKey struct { + _ struct{} `type:"structure"` + + // A list of Stage resources that are associated with the ApiKey resource. + RestApiId *string `locationName:"restApiId" type:"string"` + + // The stage name in the RestApi that the stage key references. + StageName *string `locationName:"stageName" type:"string"` +} + +// String returns the string representation +func (s StageKey) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StageKey) GoString() string { + return s.String() +} + +type TestInvokeMethodInput struct { + _ struct{} `type:"structure"` + + Body *string `locationName:"body" type:"string"` + + ClientCertificateId *string `locationName:"clientCertificateId" type:"string"` + + Headers map[string]*string `locationName:"headers" type:"map"` + + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + PathWithQueryString *string `locationName:"pathWithQueryString" type:"string"` + + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + StageVariables map[string]*string `locationName:"stageVariables" type:"map"` +} + +// String returns the string representation +func (s TestInvokeMethodInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestInvokeMethodInput) GoString() string { + return s.String() +} + +// Represents the response of the test invoke request in HTTP method. +type TestInvokeMethodOutput struct { + _ struct{} `type:"structure"` + + // The body of HTTP response. + Body *string `locationName:"body" type:"string"` + + // The headers of HTTP response. + Headers map[string]*string `locationName:"headers" type:"map"` + + // The execution latency of the test invoke request. + Latency *int64 `locationName:"latency" type:"long"` + + // The Amazon API Gateway execution log for the test invoke request. + Log *string `locationName:"log" type:"string"` + + // The HTTP status code. + Status *int64 `locationName:"status" type:"integer"` +} + +// String returns the string representation +func (s TestInvokeMethodOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestInvokeMethodOutput) GoString() string { + return s.String() +} + +// Returns the throttle settings. +type ThrottleSettings struct { + _ struct{} `type:"structure"` + + // Returns the burstLimit when ThrottleSettings is called. + BurstLimit *int64 `locationName:"burstLimit" type:"integer"` + + // Returns the rateLimit when ThrottleSettings is called. + RateLimit *float64 `locationName:"rateLimit" type:"double"` +} + +// String returns the string representation +func (s ThrottleSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ThrottleSettings) GoString() string { + return s.String() +} + +// Requests Amazon API Gateway to change information about the current Account +// resource. +type UpdateAccountInput struct { + _ struct{} `type:"structure"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` +} + +// String returns the string representation +func (s UpdateAccountInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAccountInput) GoString() string { + return s.String() +} + +// A request to change information about an ApiKey resource. +type UpdateApiKeyInput struct { + _ struct{} `type:"structure"` + + // The identifier of the ApiKey resource to be updated. + ApiKey *string `location:"uri" locationName:"api_Key" type:"string" required:"true"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` +} + +// String returns the string representation +func (s UpdateApiKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateApiKeyInput) GoString() string { + return s.String() +} + +// A request to change information about the BasePathMapping resource. +type UpdateBasePathMappingInput struct { + _ struct{} `type:"structure"` + + // The base path of the BasePathMapping resource to change. + BasePath *string `location:"uri" locationName:"base_path" type:"string" required:"true"` + + // The domain name of the BasePathMapping resource to change. + DomainName *string `location:"uri" locationName:"domain_name" type:"string" required:"true"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` +} + +// String returns the string representation +func (s UpdateBasePathMappingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBasePathMappingInput) GoString() string { + return s.String() +} + +type UpdateClientCertificateInput struct { + _ struct{} `type:"structure"` + + ClientCertificateId *string `location:"uri" locationName:"clientcertificate_id" type:"string" required:"true"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` +} + +// String returns the string representation +func (s UpdateClientCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateClientCertificateInput) GoString() string { + return s.String() +} + +// Requests Amazon API Gateway to change information about a Deployment resource. +type UpdateDeploymentInput struct { + _ struct{} `type:"structure"` + + // The replacment identifier for the Deployment resource to change information + // about. + DeploymentId *string `location:"uri" locationName:"deployment_id" type:"string" required:"true"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` + + // The replacement identifier of the RestApi resource for the Deployment resource + // to change information about. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateDeploymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDeploymentInput) GoString() string { + return s.String() +} + +// A request to change information about the DomainName resource. +type UpdateDomainNameInput struct { + _ struct{} `type:"structure"` + + // The name of the DomainName resource to be changed. + DomainName *string `location:"uri" locationName:"domain_name" type:"string" required:"true"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` +} + +// String returns the string representation +func (s UpdateDomainNameInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDomainNameInput) GoString() string { + return s.String() +} + +// Represents an update integration request. +type UpdateIntegrationInput struct { + _ struct{} `type:"structure"` + + // Represents an update integration request's HTTP method. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` + + // Represents an update integration request's resource identifier. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // Represents an update integration request's API identifier. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateIntegrationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateIntegrationInput) GoString() string { + return s.String() +} + +// Represents an update integration response request. +type UpdateIntegrationResponseInput struct { + _ struct{} `type:"structure"` + + // Specifies an update integration response request's HTTP method. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` + + // Specifies an update integration response request's resource identifier. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // Specifies an update integration response request's API identifier. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // Specifies an update integration response request's status code. + StatusCode *string `location:"uri" locationName:"status_code" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateIntegrationResponseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateIntegrationResponseInput) GoString() string { + return s.String() +} + +// Request to update an existing Method resource. +type UpdateMethodInput struct { + _ struct{} `type:"structure"` + + // The HTTP verb that identifies the Method resource. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` + + // The Resource identifier for the Method resource. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // The RestApi identifier for the Method resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateMethodInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMethodInput) GoString() string { + return s.String() +} + +// A request to update an existing MethodResponse resource. +type UpdateMethodResponseInput struct { + _ struct{} `type:"structure"` + + // The HTTP verb identifier for the parent Method resource. + HttpMethod *string `location:"uri" locationName:"http_method" type:"string" required:"true"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` + + // The Resource identifier for the MethodResponse resource. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // The RestApi identifier for the MethodResponse resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // The status code identifier for the MethodResponse resource. + StatusCode *string `location:"uri" locationName:"status_code" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateMethodResponseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMethodResponseInput) GoString() string { + return s.String() +} + +// Request to update an existing model in an existing RestApi resource. +type UpdateModelInput struct { + _ struct{} `type:"structure"` + + // The name of the model to update. + ModelName *string `location:"uri" locationName:"model_name" type:"string" required:"true"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` + + // The RestApi identifier under which the model exists. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateModelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateModelInput) GoString() string { + return s.String() +} + +// Request to change information about a Resource resource. +type UpdateResourceInput struct { + _ struct{} `type:"structure"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` + + // The identifier of the Resource resource. + ResourceId *string `location:"uri" locationName:"resource_id" type:"string" required:"true"` + + // The RestApi identifier for the Resource resource. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateResourceInput) GoString() string { + return s.String() +} + +// Request to update an existing RestApi resource in your collection. +type UpdateRestApiInput struct { + _ struct{} `type:"structure"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` + + // The ID of the RestApi you want to update. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateRestApiInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRestApiInput) GoString() string { + return s.String() +} + +// Requests Amazon API Gateway to change information about a Stage resource. +type UpdateStageInput struct { + _ struct{} `type:"structure"` + + // A list of operations describing the updates to apply to the specified resource. + // The patches are applied in the order specified in the list. + PatchOperations []*PatchOperation `locationName:"patchOperations" type:"list"` + + // The identifier of the RestApi resource for the Stage resource to change information + // about. + RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` + + // The name of the Stage resource to change information about. + StageName *string `location:"uri" locationName:"stage_name" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateStageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateStageInput) GoString() string { + return s.String() +} + +// Returns the size of the CacheCluster. +const ( + // @enum CacheClusterSize + CacheClusterSize05 = "0.5" + // @enum CacheClusterSize + CacheClusterSize16 = "1.6" + // @enum CacheClusterSize + CacheClusterSize61 = "6.1" + // @enum CacheClusterSize + CacheClusterSize135 = "13.5" + // @enum CacheClusterSize + CacheClusterSize284 = "28.4" + // @enum CacheClusterSize + CacheClusterSize582 = "58.2" + // @enum CacheClusterSize + CacheClusterSize118 = "118" + // @enum CacheClusterSize + CacheClusterSize237 = "237" +) + +// Returns the status of the CacheCluster. +const ( + // @enum CacheClusterStatus + CacheClusterStatusCreateInProgress = "CREATE_IN_PROGRESS" + // @enum CacheClusterStatus + CacheClusterStatusAvailable = "AVAILABLE" + // @enum CacheClusterStatus + CacheClusterStatusDeleteInProgress = "DELETE_IN_PROGRESS" + // @enum CacheClusterStatus + CacheClusterStatusNotAvailable = "NOT_AVAILABLE" + // @enum CacheClusterStatus + CacheClusterStatusFlushInProgress = "FLUSH_IN_PROGRESS" +) + +// The integration type. Possible values are HTTP, AWS, or Mock. +const ( + // @enum IntegrationType + IntegrationTypeHttp = "HTTP" + // @enum IntegrationType + IntegrationTypeAws = "AWS" + // @enum IntegrationType + IntegrationTypeMock = "MOCK" +) + +const ( + // @enum op + OpAdd = "add" + // @enum op + OpRemove = "remove" + // @enum op + OpReplace = "replace" + // @enum op + OpMove = "move" + // @enum op + OpCopy = "copy" + // @enum op + OpTest = "test" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/apigateway/customization.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/apigateway/customization.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/apigateway/customization.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/apigateway/customization.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,14 @@ +package apigateway + +import ( + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" +) + +func init() { + initClient = func(c *client.Client) { + c.Handlers.Build.PushBack(func(r *request.Request) { + r.HTTPRequest.Header.Add("Accept", "application/json") + }) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/apigateway/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/apigateway/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/apigateway/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/apigateway/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1588 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package apigateway_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/apigateway" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleAPIGateway_CreateApiKey() { + svc := apigateway.New(session.New()) + + params := &apigateway.CreateApiKeyInput{ + Description: aws.String("String"), + Enabled: aws.Bool(true), + Name: aws.String("String"), + StageKeys: []*apigateway.StageKey{ + { // Required + RestApiId: aws.String("String"), + StageName: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateApiKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_CreateBasePathMapping() { + svc := apigateway.New(session.New()) + + params := &apigateway.CreateBasePathMappingInput{ + DomainName: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + BasePath: aws.String("String"), + Stage: aws.String("String"), + } + resp, err := svc.CreateBasePathMapping(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_CreateDeployment() { + svc := apigateway.New(session.New()) + + params := &apigateway.CreateDeploymentInput{ + RestApiId: aws.String("String"), // Required + StageName: aws.String("String"), // Required + CacheClusterEnabled: aws.Bool(true), + CacheClusterSize: aws.String("CacheClusterSize"), + Description: aws.String("String"), + StageDescription: aws.String("String"), + Variables: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateDeployment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_CreateDomainName() { + svc := apigateway.New(session.New()) + + params := &apigateway.CreateDomainNameInput{ + CertificateBody: aws.String("String"), // Required + CertificateChain: aws.String("String"), // Required + CertificateName: aws.String("String"), // Required + CertificatePrivateKey: aws.String("String"), // Required + DomainName: aws.String("String"), // Required + } + resp, err := svc.CreateDomainName(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_CreateModel() { + svc := apigateway.New(session.New()) + + params := &apigateway.CreateModelInput{ + ContentType: aws.String("String"), // Required + Name: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + Description: aws.String("String"), + Schema: aws.String("String"), + } + resp, err := svc.CreateModel(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_CreateResource() { + svc := apigateway.New(session.New()) + + params := &apigateway.CreateResourceInput{ + ParentId: aws.String("String"), // Required + PathPart: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + } + resp, err := svc.CreateResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_CreateRestApi() { + svc := apigateway.New(session.New()) + + params := &apigateway.CreateRestApiInput{ + Name: aws.String("String"), // Required + CloneFrom: aws.String("String"), + Description: aws.String("String"), + } + resp, err := svc.CreateRestApi(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_CreateStage() { + svc := apigateway.New(session.New()) + + params := &apigateway.CreateStageInput{ + DeploymentId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + StageName: aws.String("String"), // Required + CacheClusterEnabled: aws.Bool(true), + CacheClusterSize: aws.String("CacheClusterSize"), + Description: aws.String("String"), + Variables: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateStage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_DeleteApiKey() { + svc := apigateway.New(session.New()) + + params := &apigateway.DeleteApiKeyInput{ + ApiKey: aws.String("String"), // Required + } + resp, err := svc.DeleteApiKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_DeleteBasePathMapping() { + svc := apigateway.New(session.New()) + + params := &apigateway.DeleteBasePathMappingInput{ + BasePath: aws.String("String"), // Required + DomainName: aws.String("String"), // Required + } + resp, err := svc.DeleteBasePathMapping(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_DeleteClientCertificate() { + svc := apigateway.New(session.New()) + + params := &apigateway.DeleteClientCertificateInput{ + ClientCertificateId: aws.String("String"), // Required + } + resp, err := svc.DeleteClientCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_DeleteDeployment() { + svc := apigateway.New(session.New()) + + params := &apigateway.DeleteDeploymentInput{ + DeploymentId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + } + resp, err := svc.DeleteDeployment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_DeleteDomainName() { + svc := apigateway.New(session.New()) + + params := &apigateway.DeleteDomainNameInput{ + DomainName: aws.String("String"), // Required + } + resp, err := svc.DeleteDomainName(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_DeleteIntegration() { + svc := apigateway.New(session.New()) + + params := &apigateway.DeleteIntegrationInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + } + resp, err := svc.DeleteIntegration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_DeleteIntegrationResponse() { + svc := apigateway.New(session.New()) + + params := &apigateway.DeleteIntegrationResponseInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + StatusCode: aws.String("StatusCode"), // Required + } + resp, err := svc.DeleteIntegrationResponse(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_DeleteMethod() { + svc := apigateway.New(session.New()) + + params := &apigateway.DeleteMethodInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + } + resp, err := svc.DeleteMethod(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_DeleteMethodResponse() { + svc := apigateway.New(session.New()) + + params := &apigateway.DeleteMethodResponseInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + StatusCode: aws.String("StatusCode"), // Required + } + resp, err := svc.DeleteMethodResponse(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_DeleteModel() { + svc := apigateway.New(session.New()) + + params := &apigateway.DeleteModelInput{ + ModelName: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + } + resp, err := svc.DeleteModel(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_DeleteResource() { + svc := apigateway.New(session.New()) + + params := &apigateway.DeleteResourceInput{ + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + } + resp, err := svc.DeleteResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_DeleteRestApi() { + svc := apigateway.New(session.New()) + + params := &apigateway.DeleteRestApiInput{ + RestApiId: aws.String("String"), // Required + } + resp, err := svc.DeleteRestApi(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_DeleteStage() { + svc := apigateway.New(session.New()) + + params := &apigateway.DeleteStageInput{ + RestApiId: aws.String("String"), // Required + StageName: aws.String("String"), // Required + } + resp, err := svc.DeleteStage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_FlushStageCache() { + svc := apigateway.New(session.New()) + + params := &apigateway.FlushStageCacheInput{ + RestApiId: aws.String("String"), // Required + StageName: aws.String("String"), // Required + } + resp, err := svc.FlushStageCache(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GenerateClientCertificate() { + svc := apigateway.New(session.New()) + + params := &apigateway.GenerateClientCertificateInput{ + Description: aws.String("String"), + } + resp, err := svc.GenerateClientCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetAccount() { + svc := apigateway.New(session.New()) + + var params *apigateway.GetAccountInput + resp, err := svc.GetAccount(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetApiKey() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetApiKeyInput{ + ApiKey: aws.String("String"), // Required + } + resp, err := svc.GetApiKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetApiKeys() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetApiKeysInput{ + Limit: aws.Int64(1), + Position: aws.String("String"), + } + resp, err := svc.GetApiKeys(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetBasePathMapping() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetBasePathMappingInput{ + BasePath: aws.String("String"), // Required + DomainName: aws.String("String"), // Required + } + resp, err := svc.GetBasePathMapping(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetBasePathMappings() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetBasePathMappingsInput{ + DomainName: aws.String("String"), // Required + Limit: aws.Int64(1), + Position: aws.String("String"), + } + resp, err := svc.GetBasePathMappings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetClientCertificate() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetClientCertificateInput{ + ClientCertificateId: aws.String("String"), // Required + } + resp, err := svc.GetClientCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetClientCertificates() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetClientCertificatesInput{ + Limit: aws.Int64(1), + Position: aws.String("String"), + } + resp, err := svc.GetClientCertificates(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetDeployment() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetDeploymentInput{ + DeploymentId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + } + resp, err := svc.GetDeployment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetDeployments() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetDeploymentsInput{ + RestApiId: aws.String("String"), // Required + Limit: aws.Int64(1), + Position: aws.String("String"), + } + resp, err := svc.GetDeployments(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetDomainName() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetDomainNameInput{ + DomainName: aws.String("String"), // Required + } + resp, err := svc.GetDomainName(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetDomainNames() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetDomainNamesInput{ + Limit: aws.Int64(1), + Position: aws.String("String"), + } + resp, err := svc.GetDomainNames(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetIntegration() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetIntegrationInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + } + resp, err := svc.GetIntegration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetIntegrationResponse() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetIntegrationResponseInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + StatusCode: aws.String("StatusCode"), // Required + } + resp, err := svc.GetIntegrationResponse(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetMethod() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetMethodInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + } + resp, err := svc.GetMethod(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetMethodResponse() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetMethodResponseInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + StatusCode: aws.String("StatusCode"), // Required + } + resp, err := svc.GetMethodResponse(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetModel() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetModelInput{ + ModelName: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + Flatten: aws.Bool(true), + } + resp, err := svc.GetModel(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetModelTemplate() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetModelTemplateInput{ + ModelName: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + } + resp, err := svc.GetModelTemplate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetModels() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetModelsInput{ + RestApiId: aws.String("String"), // Required + Limit: aws.Int64(1), + Position: aws.String("String"), + } + resp, err := svc.GetModels(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetResource() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetResourceInput{ + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + } + resp, err := svc.GetResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetResources() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetResourcesInput{ + RestApiId: aws.String("String"), // Required + Limit: aws.Int64(1), + Position: aws.String("String"), + } + resp, err := svc.GetResources(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetRestApi() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetRestApiInput{ + RestApiId: aws.String("String"), // Required + } + resp, err := svc.GetRestApi(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetRestApis() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetRestApisInput{ + Limit: aws.Int64(1), + Position: aws.String("String"), + } + resp, err := svc.GetRestApis(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetSdk() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetSdkInput{ + RestApiId: aws.String("String"), // Required + SdkType: aws.String("String"), // Required + StageName: aws.String("String"), // Required + Parameters: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.GetSdk(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetStage() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetStageInput{ + RestApiId: aws.String("String"), // Required + StageName: aws.String("String"), // Required + } + resp, err := svc.GetStage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_GetStages() { + svc := apigateway.New(session.New()) + + params := &apigateway.GetStagesInput{ + RestApiId: aws.String("String"), // Required + DeploymentId: aws.String("String"), + } + resp, err := svc.GetStages(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_PutIntegration() { + svc := apigateway.New(session.New()) + + params := &apigateway.PutIntegrationInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + Type: aws.String("IntegrationType"), // Required + CacheKeyParameters: []*string{ + aws.String("String"), // Required + // More values... + }, + CacheNamespace: aws.String("String"), + Credentials: aws.String("String"), + IntegrationHttpMethod: aws.String("String"), + RequestParameters: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + RequestTemplates: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + Uri: aws.String("String"), + } + resp, err := svc.PutIntegration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_PutIntegrationResponse() { + svc := apigateway.New(session.New()) + + params := &apigateway.PutIntegrationResponseInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + StatusCode: aws.String("StatusCode"), // Required + ResponseParameters: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + ResponseTemplates: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + SelectionPattern: aws.String("String"), + } + resp, err := svc.PutIntegrationResponse(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_PutMethod() { + svc := apigateway.New(session.New()) + + params := &apigateway.PutMethodInput{ + AuthorizationType: aws.String("String"), // Required + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + ApiKeyRequired: aws.Bool(true), + RequestModels: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + RequestParameters: map[string]*bool{ + "Key": aws.Bool(true), // Required + // More values... + }, + } + resp, err := svc.PutMethod(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_PutMethodResponse() { + svc := apigateway.New(session.New()) + + params := &apigateway.PutMethodResponseInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + StatusCode: aws.String("StatusCode"), // Required + ResponseModels: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + ResponseParameters: map[string]*bool{ + "Key": aws.Bool(true), // Required + // More values... + }, + } + resp, err := svc.PutMethodResponse(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_TestInvokeMethod() { + svc := apigateway.New(session.New()) + + params := &apigateway.TestInvokeMethodInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + Body: aws.String("String"), + ClientCertificateId: aws.String("String"), + Headers: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + PathWithQueryString: aws.String("String"), + StageVariables: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.TestInvokeMethod(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateAccount() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateAccountInput{ + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateAccount(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateApiKey() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateApiKeyInput{ + ApiKey: aws.String("String"), // Required + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateApiKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateBasePathMapping() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateBasePathMappingInput{ + BasePath: aws.String("String"), // Required + DomainName: aws.String("String"), // Required + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateBasePathMapping(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateClientCertificate() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateClientCertificateInput{ + ClientCertificateId: aws.String("String"), // Required + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateClientCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateDeployment() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateDeploymentInput{ + DeploymentId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateDeployment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateDomainName() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateDomainNameInput{ + DomainName: aws.String("String"), // Required + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateDomainName(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateIntegration() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateIntegrationInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateIntegration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateIntegrationResponse() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateIntegrationResponseInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + StatusCode: aws.String("StatusCode"), // Required + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateIntegrationResponse(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateMethod() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateMethodInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateMethod(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateMethodResponse() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateMethodResponseInput{ + HttpMethod: aws.String("String"), // Required + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + StatusCode: aws.String("StatusCode"), // Required + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateMethodResponse(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateModel() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateModelInput{ + ModelName: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateModel(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateResource() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateResourceInput{ + ResourceId: aws.String("String"), // Required + RestApiId: aws.String("String"), // Required + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateRestApi() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateRestApiInput{ + RestApiId: aws.String("String"), // Required + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateRestApi(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAPIGateway_UpdateStage() { + svc := apigateway.New(session.New()) + + params := &apigateway.UpdateStageInput{ + RestApiId: aws.String("String"), // Required + StageName: aws.String("String"), // Required + PatchOperations: []*apigateway.PatchOperation{ + { // Required + From: aws.String("String"), + Op: aws.String("op"), + Path: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateStage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/apigateway/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/apigateway/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/apigateway/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/apigateway/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,90 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package apigateway + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restjson" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Amazon API Gateway helps developers deliver robust, secure and scalable mobile +// and web application backends. Amazon API Gateway allows developers to securely +// connect mobile and web applications to APIs that run on AWS Lambda, Amazon +// EC2, or other publicly addressable web services that are hosted outside of +// AWS. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type APIGateway struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "apigateway" + +// New creates a new instance of the APIGateway client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a APIGateway client from just a session. +// svc := apigateway.New(mySession) +// +// // Create a APIGateway client with additional configuration +// svc := apigateway.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *APIGateway { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *APIGateway { + svc := &APIGateway{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-07-09", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a APIGateway operation and runs any +// custom request initialization. +func (c *APIGateway) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/autoscaling/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/autoscaling/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/autoscaling/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/autoscaling/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5150 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package autoscaling provides a client for Auto Scaling. +package autoscaling + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opAttachInstances = "AttachInstances" + +// AttachInstancesRequest generates a request for the AttachInstances operation. +func (c *AutoScaling) AttachInstancesRequest(input *AttachInstancesInput) (req *request.Request, output *AttachInstancesOutput) { + op := &request.Operation{ + Name: opAttachInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachInstancesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AttachInstancesOutput{} + req.Data = output + return +} + +// Attaches one or more EC2 instances to the specified Auto Scaling group. +// +// When you attach instances, Auto Scaling increases the desired capacity of +// the group by the number of instances being attached. If the number of instances +// being attached plus the desired capacity of the group exceeds the maximum +// size of the group, the operation fails. +// +// For more information, see Attach EC2 Instances to Your Auto Scaling Group +// (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/attach-instance-asg.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) AttachInstances(input *AttachInstancesInput) (*AttachInstancesOutput, error) { + req, out := c.AttachInstancesRequest(input) + err := req.Send() + return out, err +} + +const opAttachLoadBalancers = "AttachLoadBalancers" + +// AttachLoadBalancersRequest generates a request for the AttachLoadBalancers operation. +func (c *AutoScaling) AttachLoadBalancersRequest(input *AttachLoadBalancersInput) (req *request.Request, output *AttachLoadBalancersOutput) { + op := &request.Operation{ + Name: opAttachLoadBalancers, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachLoadBalancersInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachLoadBalancersOutput{} + req.Data = output + return +} + +// Attaches one or more load balancers to the specified Auto Scaling group. +// +// To describe the load balancers for an Auto Scaling group, use DescribeLoadBalancers. +// To detach the load balancer from the Auto Scaling group, use DetachLoadBalancers. +// +// For more information, see Attach a Load Balancer to Your Auto Scaling Group +// (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/attach-load-balancer-asg.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) AttachLoadBalancers(input *AttachLoadBalancersInput) (*AttachLoadBalancersOutput, error) { + req, out := c.AttachLoadBalancersRequest(input) + err := req.Send() + return out, err +} + +const opCompleteLifecycleAction = "CompleteLifecycleAction" + +// CompleteLifecycleActionRequest generates a request for the CompleteLifecycleAction operation. +func (c *AutoScaling) CompleteLifecycleActionRequest(input *CompleteLifecycleActionInput) (req *request.Request, output *CompleteLifecycleActionOutput) { + op := &request.Operation{ + Name: opCompleteLifecycleAction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CompleteLifecycleActionInput{} + } + + req = c.newRequest(op, input, output) + output = &CompleteLifecycleActionOutput{} + req.Data = output + return +} + +// Completes the lifecycle action for the associated token initiated under the +// given lifecycle hook with the specified result. +// +// This operation is a part of the basic sequence for adding a lifecycle hook +// to an Auto Scaling group: +// +// Create a notification target. A target can be either an Amazon SQS queue +// or an Amazon SNS topic. Create an IAM role. This role allows Auto Scaling +// to publish lifecycle notifications to the designated SQS queue or SNS topic. +// Create the lifecycle hook. You can create a hook that acts when instances +// launch or when instances terminate. If necessary, record the lifecycle action +// heartbeat to keep the instance in a pending state. Complete the lifecycle +// action. For more information, see Auto Scaling Pending State (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingPendingState.html) +// and Auto Scaling Terminating State (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingTerminatingState.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) CompleteLifecycleAction(input *CompleteLifecycleActionInput) (*CompleteLifecycleActionOutput, error) { + req, out := c.CompleteLifecycleActionRequest(input) + err := req.Send() + return out, err +} + +const opCreateAutoScalingGroup = "CreateAutoScalingGroup" + +// CreateAutoScalingGroupRequest generates a request for the CreateAutoScalingGroup operation. +func (c *AutoScaling) CreateAutoScalingGroupRequest(input *CreateAutoScalingGroupInput) (req *request.Request, output *CreateAutoScalingGroupOutput) { + op := &request.Operation{ + Name: opCreateAutoScalingGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAutoScalingGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateAutoScalingGroupOutput{} + req.Data = output + return +} + +// Creates an Auto Scaling group with the specified name and attributes. +// +// If you exceed your maximum limit of Auto Scaling groups, which by default +// is 20 per region, the call fails. For information about viewing and updating +// this limit, see DescribeAccountLimits. +// +// For more information, see Auto Scaling Groups (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroup.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) CreateAutoScalingGroup(input *CreateAutoScalingGroupInput) (*CreateAutoScalingGroupOutput, error) { + req, out := c.CreateAutoScalingGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateLaunchConfiguration = "CreateLaunchConfiguration" + +// CreateLaunchConfigurationRequest generates a request for the CreateLaunchConfiguration operation. +func (c *AutoScaling) CreateLaunchConfigurationRequest(input *CreateLaunchConfigurationInput) (req *request.Request, output *CreateLaunchConfigurationOutput) { + op := &request.Operation{ + Name: opCreateLaunchConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLaunchConfigurationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateLaunchConfigurationOutput{} + req.Data = output + return +} + +// Creates a launch configuration. +// +// If you exceed your maximum limit of launch configurations, which by default +// is 100 per region, the call fails. For information about viewing and updating +// this limit, see DescribeAccountLimits. +// +// For more information, see Launch Configurations (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/LaunchConfiguration.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) CreateLaunchConfiguration(input *CreateLaunchConfigurationInput) (*CreateLaunchConfigurationOutput, error) { + req, out := c.CreateLaunchConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opCreateOrUpdateTags = "CreateOrUpdateTags" + +// CreateOrUpdateTagsRequest generates a request for the CreateOrUpdateTags operation. +func (c *AutoScaling) CreateOrUpdateTagsRequest(input *CreateOrUpdateTagsInput) (req *request.Request, output *CreateOrUpdateTagsOutput) { + op := &request.Operation{ + Name: opCreateOrUpdateTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateOrUpdateTagsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateOrUpdateTagsOutput{} + req.Data = output + return +} + +// Creates or updates tags for the specified Auto Scaling group. +// +// A tag is defined by its resource ID, resource type, key, value, and propagate +// flag. The value and the propagate flag are optional parameters. The only +// supported resource type is auto-scaling-group, and the resource ID must be +// the name of the group. The PropagateAtLaunch flag determines whether the +// tag is added to instances launched in the group. Valid values are true or +// false. +// +// When you specify a tag with a key that already exists, the operation overwrites +// the previous tag definition, and you do not get an error message. +// +// For more information, see Tagging Auto Scaling Groups and Instances (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/ASTagging.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) CreateOrUpdateTags(input *CreateOrUpdateTagsInput) (*CreateOrUpdateTagsOutput, error) { + req, out := c.CreateOrUpdateTagsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAutoScalingGroup = "DeleteAutoScalingGroup" + +// DeleteAutoScalingGroupRequest generates a request for the DeleteAutoScalingGroup operation. +func (c *AutoScaling) DeleteAutoScalingGroupRequest(input *DeleteAutoScalingGroupInput) (req *request.Request, output *DeleteAutoScalingGroupOutput) { + op := &request.Operation{ + Name: opDeleteAutoScalingGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAutoScalingGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteAutoScalingGroupOutput{} + req.Data = output + return +} + +// Deletes the specified Auto Scaling group. +// +// If the group has instances or scaling activities in progress, you must specify +// the option to force the deletion in order for it to succeed. +// +// If the group has policies, deleting the group deletes the policies, the +// underlying alarm actions, and any alarm that no longer has an associated +// action. +// +// To remove instances from the Auto Scaling group before deleting it, call +// DetachInstances with the list of instances and the option to decrement the +// desired capacity so that Auto Scaling does not launch replacement instances. +// +// To terminate all instances before deleting the Auto Scaling group, call +// UpdateAutoScalingGroup and set the minimum size and desired capacity of the +// Auto Scaling group to zero. +func (c *AutoScaling) DeleteAutoScalingGroup(input *DeleteAutoScalingGroupInput) (*DeleteAutoScalingGroupOutput, error) { + req, out := c.DeleteAutoScalingGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLaunchConfiguration = "DeleteLaunchConfiguration" + +// DeleteLaunchConfigurationRequest generates a request for the DeleteLaunchConfiguration operation. +func (c *AutoScaling) DeleteLaunchConfigurationRequest(input *DeleteLaunchConfigurationInput) (req *request.Request, output *DeleteLaunchConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteLaunchConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLaunchConfigurationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteLaunchConfigurationOutput{} + req.Data = output + return +} + +// Deletes the specified launch configuration. +// +// The launch configuration must not be attached to an Auto Scaling group. +// When this call completes, the launch configuration is no longer available +// for use. +func (c *AutoScaling) DeleteLaunchConfiguration(input *DeleteLaunchConfigurationInput) (*DeleteLaunchConfigurationOutput, error) { + req, out := c.DeleteLaunchConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLifecycleHook = "DeleteLifecycleHook" + +// DeleteLifecycleHookRequest generates a request for the DeleteLifecycleHook operation. +func (c *AutoScaling) DeleteLifecycleHookRequest(input *DeleteLifecycleHookInput) (req *request.Request, output *DeleteLifecycleHookOutput) { + op := &request.Operation{ + Name: opDeleteLifecycleHook, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLifecycleHookInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteLifecycleHookOutput{} + req.Data = output + return +} + +// Deletes the specified lifecycle hook. +// +// If there are any outstanding lifecycle actions, they are completed first +// (ABANDON for launching instances, CONTINUE for terminating instances). +func (c *AutoScaling) DeleteLifecycleHook(input *DeleteLifecycleHookInput) (*DeleteLifecycleHookOutput, error) { + req, out := c.DeleteLifecycleHookRequest(input) + err := req.Send() + return out, err +} + +const opDeleteNotificationConfiguration = "DeleteNotificationConfiguration" + +// DeleteNotificationConfigurationRequest generates a request for the DeleteNotificationConfiguration operation. +func (c *AutoScaling) DeleteNotificationConfigurationRequest(input *DeleteNotificationConfigurationInput) (req *request.Request, output *DeleteNotificationConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteNotificationConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteNotificationConfigurationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteNotificationConfigurationOutput{} + req.Data = output + return +} + +// Deletes the specified notification. +func (c *AutoScaling) DeleteNotificationConfiguration(input *DeleteNotificationConfigurationInput) (*DeleteNotificationConfigurationOutput, error) { + req, out := c.DeleteNotificationConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opDeletePolicy = "DeletePolicy" + +// DeletePolicyRequest generates a request for the DeletePolicy operation. +func (c *AutoScaling) DeletePolicyRequest(input *DeletePolicyInput) (req *request.Request, output *DeletePolicyOutput) { + op := &request.Operation{ + Name: opDeletePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeletePolicyOutput{} + req.Data = output + return +} + +// Deletes the specified Auto Scaling policy. +// +// Deleting a policy deletes the underlying alarm action, but does not delete +// the alarm, even if it no longer has an associated action. +func (c *AutoScaling) DeletePolicy(input *DeletePolicyInput) (*DeletePolicyOutput, error) { + req, out := c.DeletePolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteScheduledAction = "DeleteScheduledAction" + +// DeleteScheduledActionRequest generates a request for the DeleteScheduledAction operation. +func (c *AutoScaling) DeleteScheduledActionRequest(input *DeleteScheduledActionInput) (req *request.Request, output *DeleteScheduledActionOutput) { + op := &request.Operation{ + Name: opDeleteScheduledAction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteScheduledActionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteScheduledActionOutput{} + req.Data = output + return +} + +// Deletes the specified scheduled action. +func (c *AutoScaling) DeleteScheduledAction(input *DeleteScheduledActionInput) (*DeleteScheduledActionOutput, error) { + req, out := c.DeleteScheduledActionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTags = "DeleteTags" + +// DeleteTagsRequest generates a request for the DeleteTags operation. +func (c *AutoScaling) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, output *DeleteTagsOutput) { + op := &request.Operation{ + Name: opDeleteTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTagsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteTagsOutput{} + req.Data = output + return +} + +// Deletes the specified tags. +func (c *AutoScaling) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) { + req, out := c.DeleteTagsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAccountLimits = "DescribeAccountLimits" + +// DescribeAccountLimitsRequest generates a request for the DescribeAccountLimits operation. +func (c *AutoScaling) DescribeAccountLimitsRequest(input *DescribeAccountLimitsInput) (req *request.Request, output *DescribeAccountLimitsOutput) { + op := &request.Operation{ + Name: opDescribeAccountLimits, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAccountLimitsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAccountLimitsOutput{} + req.Data = output + return +} + +// Describes the current Auto Scaling resource limits for your AWS account. +// +// For information about requesting an increase in these limits, see AWS Service +// Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) +// in the Amazon Web Services General Reference. +func (c *AutoScaling) DescribeAccountLimits(input *DescribeAccountLimitsInput) (*DescribeAccountLimitsOutput, error) { + req, out := c.DescribeAccountLimitsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAdjustmentTypes = "DescribeAdjustmentTypes" + +// DescribeAdjustmentTypesRequest generates a request for the DescribeAdjustmentTypes operation. +func (c *AutoScaling) DescribeAdjustmentTypesRequest(input *DescribeAdjustmentTypesInput) (req *request.Request, output *DescribeAdjustmentTypesOutput) { + op := &request.Operation{ + Name: opDescribeAdjustmentTypes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAdjustmentTypesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAdjustmentTypesOutput{} + req.Data = output + return +} + +// Describes the policy adjustment types for use with PutScalingPolicy. +func (c *AutoScaling) DescribeAdjustmentTypes(input *DescribeAdjustmentTypesInput) (*DescribeAdjustmentTypesOutput, error) { + req, out := c.DescribeAdjustmentTypesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAutoScalingGroups = "DescribeAutoScalingGroups" + +// DescribeAutoScalingGroupsRequest generates a request for the DescribeAutoScalingGroups operation. +func (c *AutoScaling) DescribeAutoScalingGroupsRequest(input *DescribeAutoScalingGroupsInput) (req *request.Request, output *DescribeAutoScalingGroupsOutput) { + op := &request.Operation{ + Name: opDescribeAutoScalingGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeAutoScalingGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAutoScalingGroupsOutput{} + req.Data = output + return +} + +// Describes one or more Auto Scaling groups. If a list of names is not provided, +// the call describes all Auto Scaling groups. +func (c *AutoScaling) DescribeAutoScalingGroups(input *DescribeAutoScalingGroupsInput) (*DescribeAutoScalingGroupsOutput, error) { + req, out := c.DescribeAutoScalingGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *AutoScaling) DescribeAutoScalingGroupsPages(input *DescribeAutoScalingGroupsInput, fn func(p *DescribeAutoScalingGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeAutoScalingGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeAutoScalingGroupsOutput), lastPage) + }) +} + +const opDescribeAutoScalingInstances = "DescribeAutoScalingInstances" + +// DescribeAutoScalingInstancesRequest generates a request for the DescribeAutoScalingInstances operation. +func (c *AutoScaling) DescribeAutoScalingInstancesRequest(input *DescribeAutoScalingInstancesInput) (req *request.Request, output *DescribeAutoScalingInstancesOutput) { + op := &request.Operation{ + Name: opDescribeAutoScalingInstances, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeAutoScalingInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAutoScalingInstancesOutput{} + req.Data = output + return +} + +// Describes one or more Auto Scaling instances. If a list is not provided, +// the call describes all instances. +func (c *AutoScaling) DescribeAutoScalingInstances(input *DescribeAutoScalingInstancesInput) (*DescribeAutoScalingInstancesOutput, error) { + req, out := c.DescribeAutoScalingInstancesRequest(input) + err := req.Send() + return out, err +} + +func (c *AutoScaling) DescribeAutoScalingInstancesPages(input *DescribeAutoScalingInstancesInput, fn func(p *DescribeAutoScalingInstancesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeAutoScalingInstancesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeAutoScalingInstancesOutput), lastPage) + }) +} + +const opDescribeAutoScalingNotificationTypes = "DescribeAutoScalingNotificationTypes" + +// DescribeAutoScalingNotificationTypesRequest generates a request for the DescribeAutoScalingNotificationTypes operation. +func (c *AutoScaling) DescribeAutoScalingNotificationTypesRequest(input *DescribeAutoScalingNotificationTypesInput) (req *request.Request, output *DescribeAutoScalingNotificationTypesOutput) { + op := &request.Operation{ + Name: opDescribeAutoScalingNotificationTypes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAutoScalingNotificationTypesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAutoScalingNotificationTypesOutput{} + req.Data = output + return +} + +// Describes the notification types that are supported by Auto Scaling. +func (c *AutoScaling) DescribeAutoScalingNotificationTypes(input *DescribeAutoScalingNotificationTypesInput) (*DescribeAutoScalingNotificationTypesOutput, error) { + req, out := c.DescribeAutoScalingNotificationTypesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLaunchConfigurations = "DescribeLaunchConfigurations" + +// DescribeLaunchConfigurationsRequest generates a request for the DescribeLaunchConfigurations operation. +func (c *AutoScaling) DescribeLaunchConfigurationsRequest(input *DescribeLaunchConfigurationsInput) (req *request.Request, output *DescribeLaunchConfigurationsOutput) { + op := &request.Operation{ + Name: opDescribeLaunchConfigurations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeLaunchConfigurationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLaunchConfigurationsOutput{} + req.Data = output + return +} + +// Describes one or more launch configurations. If you omit the list of names, +// then the call describes all launch configurations. +func (c *AutoScaling) DescribeLaunchConfigurations(input *DescribeLaunchConfigurationsInput) (*DescribeLaunchConfigurationsOutput, error) { + req, out := c.DescribeLaunchConfigurationsRequest(input) + err := req.Send() + return out, err +} + +func (c *AutoScaling) DescribeLaunchConfigurationsPages(input *DescribeLaunchConfigurationsInput, fn func(p *DescribeLaunchConfigurationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeLaunchConfigurationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeLaunchConfigurationsOutput), lastPage) + }) +} + +const opDescribeLifecycleHookTypes = "DescribeLifecycleHookTypes" + +// DescribeLifecycleHookTypesRequest generates a request for the DescribeLifecycleHookTypes operation. +func (c *AutoScaling) DescribeLifecycleHookTypesRequest(input *DescribeLifecycleHookTypesInput) (req *request.Request, output *DescribeLifecycleHookTypesOutput) { + op := &request.Operation{ + Name: opDescribeLifecycleHookTypes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLifecycleHookTypesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLifecycleHookTypesOutput{} + req.Data = output + return +} + +// Describes the available types of lifecycle hooks. +func (c *AutoScaling) DescribeLifecycleHookTypes(input *DescribeLifecycleHookTypesInput) (*DescribeLifecycleHookTypesOutput, error) { + req, out := c.DescribeLifecycleHookTypesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLifecycleHooks = "DescribeLifecycleHooks" + +// DescribeLifecycleHooksRequest generates a request for the DescribeLifecycleHooks operation. +func (c *AutoScaling) DescribeLifecycleHooksRequest(input *DescribeLifecycleHooksInput) (req *request.Request, output *DescribeLifecycleHooksOutput) { + op := &request.Operation{ + Name: opDescribeLifecycleHooks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLifecycleHooksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLifecycleHooksOutput{} + req.Data = output + return +} + +// Describes the lifecycle hooks for the specified Auto Scaling group. +func (c *AutoScaling) DescribeLifecycleHooks(input *DescribeLifecycleHooksInput) (*DescribeLifecycleHooksOutput, error) { + req, out := c.DescribeLifecycleHooksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLoadBalancers = "DescribeLoadBalancers" + +// DescribeLoadBalancersRequest generates a request for the DescribeLoadBalancers operation. +func (c *AutoScaling) DescribeLoadBalancersRequest(input *DescribeLoadBalancersInput) (req *request.Request, output *DescribeLoadBalancersOutput) { + op := &request.Operation{ + Name: opDescribeLoadBalancers, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLoadBalancersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLoadBalancersOutput{} + req.Data = output + return +} + +// Describes the load balancers for the specified Auto Scaling group. +func (c *AutoScaling) DescribeLoadBalancers(input *DescribeLoadBalancersInput) (*DescribeLoadBalancersOutput, error) { + req, out := c.DescribeLoadBalancersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeMetricCollectionTypes = "DescribeMetricCollectionTypes" + +// DescribeMetricCollectionTypesRequest generates a request for the DescribeMetricCollectionTypes operation. +func (c *AutoScaling) DescribeMetricCollectionTypesRequest(input *DescribeMetricCollectionTypesInput) (req *request.Request, output *DescribeMetricCollectionTypesOutput) { + op := &request.Operation{ + Name: opDescribeMetricCollectionTypes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeMetricCollectionTypesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeMetricCollectionTypesOutput{} + req.Data = output + return +} + +// Describes the available CloudWatch metrics for Auto Scaling. +// +// Note that the GroupStandbyInstances metric is not returned by default. You +// must explicitly request this metric when calling EnableMetricsCollection. +func (c *AutoScaling) DescribeMetricCollectionTypes(input *DescribeMetricCollectionTypesInput) (*DescribeMetricCollectionTypesOutput, error) { + req, out := c.DescribeMetricCollectionTypesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeNotificationConfigurations = "DescribeNotificationConfigurations" + +// DescribeNotificationConfigurationsRequest generates a request for the DescribeNotificationConfigurations operation. +func (c *AutoScaling) DescribeNotificationConfigurationsRequest(input *DescribeNotificationConfigurationsInput) (req *request.Request, output *DescribeNotificationConfigurationsOutput) { + op := &request.Operation{ + Name: opDescribeNotificationConfigurations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeNotificationConfigurationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeNotificationConfigurationsOutput{} + req.Data = output + return +} + +// Describes the notification actions associated with the specified Auto Scaling +// group. +func (c *AutoScaling) DescribeNotificationConfigurations(input *DescribeNotificationConfigurationsInput) (*DescribeNotificationConfigurationsOutput, error) { + req, out := c.DescribeNotificationConfigurationsRequest(input) + err := req.Send() + return out, err +} + +func (c *AutoScaling) DescribeNotificationConfigurationsPages(input *DescribeNotificationConfigurationsInput, fn func(p *DescribeNotificationConfigurationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeNotificationConfigurationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeNotificationConfigurationsOutput), lastPage) + }) +} + +const opDescribePolicies = "DescribePolicies" + +// DescribePoliciesRequest generates a request for the DescribePolicies operation. +func (c *AutoScaling) DescribePoliciesRequest(input *DescribePoliciesInput) (req *request.Request, output *DescribePoliciesOutput) { + op := &request.Operation{ + Name: opDescribePolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribePoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribePoliciesOutput{} + req.Data = output + return +} + +// Describes the policies for the specified Auto Scaling group. +func (c *AutoScaling) DescribePolicies(input *DescribePoliciesInput) (*DescribePoliciesOutput, error) { + req, out := c.DescribePoliciesRequest(input) + err := req.Send() + return out, err +} + +func (c *AutoScaling) DescribePoliciesPages(input *DescribePoliciesInput, fn func(p *DescribePoliciesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribePoliciesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribePoliciesOutput), lastPage) + }) +} + +const opDescribeScalingActivities = "DescribeScalingActivities" + +// DescribeScalingActivitiesRequest generates a request for the DescribeScalingActivities operation. +func (c *AutoScaling) DescribeScalingActivitiesRequest(input *DescribeScalingActivitiesInput) (req *request.Request, output *DescribeScalingActivitiesOutput) { + op := &request.Operation{ + Name: opDescribeScalingActivities, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeScalingActivitiesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeScalingActivitiesOutput{} + req.Data = output + return +} + +// Describes one or more scaling activities for the specified Auto Scaling group. +// If you omit the ActivityIds, the call returns all activities from the past +// six weeks. Activities are sorted by the start time. Activities still in progress +// appear first on the list. +func (c *AutoScaling) DescribeScalingActivities(input *DescribeScalingActivitiesInput) (*DescribeScalingActivitiesOutput, error) { + req, out := c.DescribeScalingActivitiesRequest(input) + err := req.Send() + return out, err +} + +func (c *AutoScaling) DescribeScalingActivitiesPages(input *DescribeScalingActivitiesInput, fn func(p *DescribeScalingActivitiesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeScalingActivitiesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeScalingActivitiesOutput), lastPage) + }) +} + +const opDescribeScalingProcessTypes = "DescribeScalingProcessTypes" + +// DescribeScalingProcessTypesRequest generates a request for the DescribeScalingProcessTypes operation. +func (c *AutoScaling) DescribeScalingProcessTypesRequest(input *DescribeScalingProcessTypesInput) (req *request.Request, output *DescribeScalingProcessTypesOutput) { + op := &request.Operation{ + Name: opDescribeScalingProcessTypes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeScalingProcessTypesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeScalingProcessTypesOutput{} + req.Data = output + return +} + +// Describes the scaling process types for use with ResumeProcesses and SuspendProcesses. +func (c *AutoScaling) DescribeScalingProcessTypes(input *DescribeScalingProcessTypesInput) (*DescribeScalingProcessTypesOutput, error) { + req, out := c.DescribeScalingProcessTypesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeScheduledActions = "DescribeScheduledActions" + +// DescribeScheduledActionsRequest generates a request for the DescribeScheduledActions operation. +func (c *AutoScaling) DescribeScheduledActionsRequest(input *DescribeScheduledActionsInput) (req *request.Request, output *DescribeScheduledActionsOutput) { + op := &request.Operation{ + Name: opDescribeScheduledActions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeScheduledActionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeScheduledActionsOutput{} + req.Data = output + return +} + +// Describes the actions scheduled for your Auto Scaling group that haven't +// run. To describe the actions that have already run, use DescribeScalingActivities. +func (c *AutoScaling) DescribeScheduledActions(input *DescribeScheduledActionsInput) (*DescribeScheduledActionsOutput, error) { + req, out := c.DescribeScheduledActionsRequest(input) + err := req.Send() + return out, err +} + +func (c *AutoScaling) DescribeScheduledActionsPages(input *DescribeScheduledActionsInput, fn func(p *DescribeScheduledActionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeScheduledActionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeScheduledActionsOutput), lastPage) + }) +} + +const opDescribeTags = "DescribeTags" + +// DescribeTagsRequest generates a request for the DescribeTags operation. +func (c *AutoScaling) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { + op := &request.Operation{ + Name: opDescribeTags, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTagsOutput{} + req.Data = output + return +} + +// Describes the specified tags. +// +// You can use filters to limit the results. For example, you can query for +// the tags for a specific Auto Scaling group. You can specify multiple values +// for a filter. A tag must match at least one of the specified values for it +// to be included in the results. +// +// You can also specify multiple filters. The result includes information for +// a particular tag only if it matches all the filters. If there's no match, +// no special message is returned. +func (c *AutoScaling) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + err := req.Send() + return out, err +} + +func (c *AutoScaling) DescribeTagsPages(input *DescribeTagsInput, fn func(p *DescribeTagsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeTagsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeTagsOutput), lastPage) + }) +} + +const opDescribeTerminationPolicyTypes = "DescribeTerminationPolicyTypes" + +// DescribeTerminationPolicyTypesRequest generates a request for the DescribeTerminationPolicyTypes operation. +func (c *AutoScaling) DescribeTerminationPolicyTypesRequest(input *DescribeTerminationPolicyTypesInput) (req *request.Request, output *DescribeTerminationPolicyTypesOutput) { + op := &request.Operation{ + Name: opDescribeTerminationPolicyTypes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTerminationPolicyTypesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTerminationPolicyTypesOutput{} + req.Data = output + return +} + +// Describes the termination policies supported by Auto Scaling. +func (c *AutoScaling) DescribeTerminationPolicyTypes(input *DescribeTerminationPolicyTypesInput) (*DescribeTerminationPolicyTypesOutput, error) { + req, out := c.DescribeTerminationPolicyTypesRequest(input) + err := req.Send() + return out, err +} + +const opDetachInstances = "DetachInstances" + +// DetachInstancesRequest generates a request for the DetachInstances operation. +func (c *AutoScaling) DetachInstancesRequest(input *DetachInstancesInput) (req *request.Request, output *DetachInstancesOutput) { + op := &request.Operation{ + Name: opDetachInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DetachInstancesOutput{} + req.Data = output + return +} + +// Removes one or more instances from the specified Auto Scaling group. +// +// After the instances are detached, you can manage them independently from +// the rest of the Auto Scaling group. +// +// If you do not specify the option to decrement the desired capacity, Auto +// Scaling launches instances to replace the ones that are detached. +// +// For more information, see Detach EC2 Instances from Your Auto Scaling Group +// (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/detach-instance-asg.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) DetachInstances(input *DetachInstancesInput) (*DetachInstancesOutput, error) { + req, out := c.DetachInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDetachLoadBalancers = "DetachLoadBalancers" + +// DetachLoadBalancersRequest generates a request for the DetachLoadBalancers operation. +func (c *AutoScaling) DetachLoadBalancersRequest(input *DetachLoadBalancersInput) (req *request.Request, output *DetachLoadBalancersOutput) { + op := &request.Operation{ + Name: opDetachLoadBalancers, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachLoadBalancersInput{} + } + + req = c.newRequest(op, input, output) + output = &DetachLoadBalancersOutput{} + req.Data = output + return +} + +// Removes one or more load balancers from the specified Auto Scaling group. +// +// When you detach a load balancer, it enters the Removing state while deregistering +// the instances in the group. When all instances are deregistered, then you +// can no longer describe the load balancer using DescribeLoadBalancers. Note +// that the instances remain running. +func (c *AutoScaling) DetachLoadBalancers(input *DetachLoadBalancersInput) (*DetachLoadBalancersOutput, error) { + req, out := c.DetachLoadBalancersRequest(input) + err := req.Send() + return out, err +} + +const opDisableMetricsCollection = "DisableMetricsCollection" + +// DisableMetricsCollectionRequest generates a request for the DisableMetricsCollection operation. +func (c *AutoScaling) DisableMetricsCollectionRequest(input *DisableMetricsCollectionInput) (req *request.Request, output *DisableMetricsCollectionOutput) { + op := &request.Operation{ + Name: opDisableMetricsCollection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableMetricsCollectionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DisableMetricsCollectionOutput{} + req.Data = output + return +} + +// Disables monitoring of the specified metrics for the specified Auto Scaling +// group. +func (c *AutoScaling) DisableMetricsCollection(input *DisableMetricsCollectionInput) (*DisableMetricsCollectionOutput, error) { + req, out := c.DisableMetricsCollectionRequest(input) + err := req.Send() + return out, err +} + +const opEnableMetricsCollection = "EnableMetricsCollection" + +// EnableMetricsCollectionRequest generates a request for the EnableMetricsCollection operation. +func (c *AutoScaling) EnableMetricsCollectionRequest(input *EnableMetricsCollectionInput) (req *request.Request, output *EnableMetricsCollectionOutput) { + op := &request.Operation{ + Name: opEnableMetricsCollection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableMetricsCollectionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &EnableMetricsCollectionOutput{} + req.Data = output + return +} + +// Enables monitoring of the specified metrics for the specified Auto Scaling +// group. +// +// You can only enable metrics collection if InstanceMonitoring in the launch +// configuration for the group is set to True. +func (c *AutoScaling) EnableMetricsCollection(input *EnableMetricsCollectionInput) (*EnableMetricsCollectionOutput, error) { + req, out := c.EnableMetricsCollectionRequest(input) + err := req.Send() + return out, err +} + +const opEnterStandby = "EnterStandby" + +// EnterStandbyRequest generates a request for the EnterStandby operation. +func (c *AutoScaling) EnterStandbyRequest(input *EnterStandbyInput) (req *request.Request, output *EnterStandbyOutput) { + op := &request.Operation{ + Name: opEnterStandby, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnterStandbyInput{} + } + + req = c.newRequest(op, input, output) + output = &EnterStandbyOutput{} + req.Data = output + return +} + +// Moves the specified instances into Standby mode. +// +// For more information, see Auto Scaling InService State (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingInServiceState.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) EnterStandby(input *EnterStandbyInput) (*EnterStandbyOutput, error) { + req, out := c.EnterStandbyRequest(input) + err := req.Send() + return out, err +} + +const opExecutePolicy = "ExecutePolicy" + +// ExecutePolicyRequest generates a request for the ExecutePolicy operation. +func (c *AutoScaling) ExecutePolicyRequest(input *ExecutePolicyInput) (req *request.Request, output *ExecutePolicyOutput) { + op := &request.Operation{ + Name: opExecutePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ExecutePolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ExecutePolicyOutput{} + req.Data = output + return +} + +// Executes the specified policy. +func (c *AutoScaling) ExecutePolicy(input *ExecutePolicyInput) (*ExecutePolicyOutput, error) { + req, out := c.ExecutePolicyRequest(input) + err := req.Send() + return out, err +} + +const opExitStandby = "ExitStandby" + +// ExitStandbyRequest generates a request for the ExitStandby operation. +func (c *AutoScaling) ExitStandbyRequest(input *ExitStandbyInput) (req *request.Request, output *ExitStandbyOutput) { + op := &request.Operation{ + Name: opExitStandby, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ExitStandbyInput{} + } + + req = c.newRequest(op, input, output) + output = &ExitStandbyOutput{} + req.Data = output + return +} + +// Moves the specified instances out of Standby mode. +// +// For more information, see Auto Scaling InService State (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingInServiceState.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) ExitStandby(input *ExitStandbyInput) (*ExitStandbyOutput, error) { + req, out := c.ExitStandbyRequest(input) + err := req.Send() + return out, err +} + +const opPutLifecycleHook = "PutLifecycleHook" + +// PutLifecycleHookRequest generates a request for the PutLifecycleHook operation. +func (c *AutoScaling) PutLifecycleHookRequest(input *PutLifecycleHookInput) (req *request.Request, output *PutLifecycleHookOutput) { + op := &request.Operation{ + Name: opPutLifecycleHook, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutLifecycleHookInput{} + } + + req = c.newRequest(op, input, output) + output = &PutLifecycleHookOutput{} + req.Data = output + return +} + +// Creates or updates a lifecycle hook for the specified Auto Scaling Group. +// +// A lifecycle hook tells Auto Scaling that you want to perform an action on +// an instance that is not actively in service; for example, either when the +// instance launches or before the instance terminates. +// +// This operation is a part of the basic sequence for adding a lifecycle hook +// to an Auto Scaling group: +// +// Create a notification target. A target can be either an Amazon SQS queue +// or an Amazon SNS topic. Create an IAM role. This role allows Auto Scaling +// to publish lifecycle notifications to the designated SQS queue or SNS topic. +// Create the lifecycle hook. You can create a hook that acts when instances +// launch or when instances terminate. If necessary, record the lifecycle action +// heartbeat to keep the instance in a pending state. Complete the lifecycle +// action. For more information, see Auto Scaling Pending State (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingPendingState.html) +// and Auto Scaling Terminating State (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingTerminatingState.html) +// in the Auto Scaling Developer Guide. +// +// If you exceed your maximum limit of lifecycle hooks, which by default is +// 50 per region, the call fails. For information about updating this limit, +// see AWS Service Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) +// in the Amazon Web Services General Reference. +func (c *AutoScaling) PutLifecycleHook(input *PutLifecycleHookInput) (*PutLifecycleHookOutput, error) { + req, out := c.PutLifecycleHookRequest(input) + err := req.Send() + return out, err +} + +const opPutNotificationConfiguration = "PutNotificationConfiguration" + +// PutNotificationConfigurationRequest generates a request for the PutNotificationConfiguration operation. +func (c *AutoScaling) PutNotificationConfigurationRequest(input *PutNotificationConfigurationInput) (req *request.Request, output *PutNotificationConfigurationOutput) { + op := &request.Operation{ + Name: opPutNotificationConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutNotificationConfigurationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutNotificationConfigurationOutput{} + req.Data = output + return +} + +// Configures an Auto Scaling group to send notifications when specified events +// take place. Subscribers to this topic can have messages for events delivered +// to an endpoint such as a web server or email address. +// +// For more information see Getting Notifications When Your Auto Scaling Group +// Changes (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/ASGettingNotifications.html) +// in the Auto Scaling Developer Guide. +// +// This configuration overwrites an existing configuration. +func (c *AutoScaling) PutNotificationConfiguration(input *PutNotificationConfigurationInput) (*PutNotificationConfigurationOutput, error) { + req, out := c.PutNotificationConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opPutScalingPolicy = "PutScalingPolicy" + +// PutScalingPolicyRequest generates a request for the PutScalingPolicy operation. +func (c *AutoScaling) PutScalingPolicyRequest(input *PutScalingPolicyInput) (req *request.Request, output *PutScalingPolicyOutput) { + op := &request.Operation{ + Name: opPutScalingPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutScalingPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &PutScalingPolicyOutput{} + req.Data = output + return +} + +// Creates or updates a policy for an Auto Scaling group. To update an existing +// policy, use the existing policy name and set the parameters you want to change. +// Any existing parameter not changed in an update to an existing policy is +// not changed in this update request. +// +// If you exceed your maximum limit of step adjustments, which by default is +// 20 per region, the call fails. For information about updating this limit, +// see AWS Service Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) +// in the Amazon Web Services General Reference. +func (c *AutoScaling) PutScalingPolicy(input *PutScalingPolicyInput) (*PutScalingPolicyOutput, error) { + req, out := c.PutScalingPolicyRequest(input) + err := req.Send() + return out, err +} + +const opPutScheduledUpdateGroupAction = "PutScheduledUpdateGroupAction" + +// PutScheduledUpdateGroupActionRequest generates a request for the PutScheduledUpdateGroupAction operation. +func (c *AutoScaling) PutScheduledUpdateGroupActionRequest(input *PutScheduledUpdateGroupActionInput) (req *request.Request, output *PutScheduledUpdateGroupActionOutput) { + op := &request.Operation{ + Name: opPutScheduledUpdateGroupAction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutScheduledUpdateGroupActionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutScheduledUpdateGroupActionOutput{} + req.Data = output + return +} + +// Creates or updates a scheduled scaling action for an Auto Scaling group. +// When updating a scheduled scaling action, if you leave a parameter unspecified, +// the corresponding value remains unchanged in the affected Auto Scaling group. +// +// For more information, see Scheduled Scaling (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/schedule_time.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) PutScheduledUpdateGroupAction(input *PutScheduledUpdateGroupActionInput) (*PutScheduledUpdateGroupActionOutput, error) { + req, out := c.PutScheduledUpdateGroupActionRequest(input) + err := req.Send() + return out, err +} + +const opRecordLifecycleActionHeartbeat = "RecordLifecycleActionHeartbeat" + +// RecordLifecycleActionHeartbeatRequest generates a request for the RecordLifecycleActionHeartbeat operation. +func (c *AutoScaling) RecordLifecycleActionHeartbeatRequest(input *RecordLifecycleActionHeartbeatInput) (req *request.Request, output *RecordLifecycleActionHeartbeatOutput) { + op := &request.Operation{ + Name: opRecordLifecycleActionHeartbeat, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RecordLifecycleActionHeartbeatInput{} + } + + req = c.newRequest(op, input, output) + output = &RecordLifecycleActionHeartbeatOutput{} + req.Data = output + return +} + +// Records a heartbeat for the lifecycle action associated with a specific token. +// This extends the timeout by the length of time defined by the HeartbeatTimeout +// parameter of PutLifecycleHook. +// +// This operation is a part of the basic sequence for adding a lifecycle hook +// to an Auto Scaling group: +// +// Create a notification target. A target can be either an Amazon SQS queue +// or an Amazon SNS topic. Create an IAM role. This role allows Auto Scaling +// to publish lifecycle notifications to the designated SQS queue or SNS topic. +// Create the lifecycle hook. You can create a hook that acts when instances +// launch or when instances terminate. If necessary, record the lifecycle action +// heartbeat to keep the instance in a pending state. Complete the lifecycle +// action. For more information, see Auto Scaling Pending State (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingPendingState.html) +// and Auto Scaling Terminating State (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingTerminatingState.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) RecordLifecycleActionHeartbeat(input *RecordLifecycleActionHeartbeatInput) (*RecordLifecycleActionHeartbeatOutput, error) { + req, out := c.RecordLifecycleActionHeartbeatRequest(input) + err := req.Send() + return out, err +} + +const opResumeProcesses = "ResumeProcesses" + +// ResumeProcessesRequest generates a request for the ResumeProcesses operation. +func (c *AutoScaling) ResumeProcessesRequest(input *ScalingProcessQuery) (req *request.Request, output *ResumeProcessesOutput) { + op := &request.Operation{ + Name: opResumeProcesses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ScalingProcessQuery{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ResumeProcessesOutput{} + req.Data = output + return +} + +// Resumes the specified suspended Auto Scaling processes for the specified +// Auto Scaling group. To resume specific processes, use the ScalingProcesses +// parameter. To resume all processes, omit the ScalingProcesses parameter. +// For more information, see Suspend and Resume Auto Scaling Processes (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US_SuspendResume.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) ResumeProcesses(input *ScalingProcessQuery) (*ResumeProcessesOutput, error) { + req, out := c.ResumeProcessesRequest(input) + err := req.Send() + return out, err +} + +const opSetDesiredCapacity = "SetDesiredCapacity" + +// SetDesiredCapacityRequest generates a request for the SetDesiredCapacity operation. +func (c *AutoScaling) SetDesiredCapacityRequest(input *SetDesiredCapacityInput) (req *request.Request, output *SetDesiredCapacityOutput) { + op := &request.Operation{ + Name: opSetDesiredCapacity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetDesiredCapacityInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetDesiredCapacityOutput{} + req.Data = output + return +} + +// Sets the size of the specified Auto Scaling group. +// +// For more information about desired capacity, see What Is Auto Scaling? (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/WhatIsAutoScaling.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) SetDesiredCapacity(input *SetDesiredCapacityInput) (*SetDesiredCapacityOutput, error) { + req, out := c.SetDesiredCapacityRequest(input) + err := req.Send() + return out, err +} + +const opSetInstanceHealth = "SetInstanceHealth" + +// SetInstanceHealthRequest generates a request for the SetInstanceHealth operation. +func (c *AutoScaling) SetInstanceHealthRequest(input *SetInstanceHealthInput) (req *request.Request, output *SetInstanceHealthOutput) { + op := &request.Operation{ + Name: opSetInstanceHealth, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetInstanceHealthInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetInstanceHealthOutput{} + req.Data = output + return +} + +// Sets the health status of the specified instance. +// +// For more information, see Health Checks (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/healthcheck.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) SetInstanceHealth(input *SetInstanceHealthInput) (*SetInstanceHealthOutput, error) { + req, out := c.SetInstanceHealthRequest(input) + err := req.Send() + return out, err +} + +const opSetInstanceProtection = "SetInstanceProtection" + +// SetInstanceProtectionRequest generates a request for the SetInstanceProtection operation. +func (c *AutoScaling) SetInstanceProtectionRequest(input *SetInstanceProtectionInput) (req *request.Request, output *SetInstanceProtectionOutput) { + op := &request.Operation{ + Name: opSetInstanceProtection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetInstanceProtectionInput{} + } + + req = c.newRequest(op, input, output) + output = &SetInstanceProtectionOutput{} + req.Data = output + return +} + +// Updates the instance protection settings of the specified instances. +// +// For more information, see Instance Protection (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingBehavior.InstanceTermination.html#instance-protection) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) SetInstanceProtection(input *SetInstanceProtectionInput) (*SetInstanceProtectionOutput, error) { + req, out := c.SetInstanceProtectionRequest(input) + err := req.Send() + return out, err +} + +const opSuspendProcesses = "SuspendProcesses" + +// SuspendProcessesRequest generates a request for the SuspendProcesses operation. +func (c *AutoScaling) SuspendProcessesRequest(input *ScalingProcessQuery) (req *request.Request, output *SuspendProcessesOutput) { + op := &request.Operation{ + Name: opSuspendProcesses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ScalingProcessQuery{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SuspendProcessesOutput{} + req.Data = output + return +} + +// Suspends the specified Auto Scaling processes for the specified Auto Scaling +// group. To suspend specific processes, use the ScalingProcesses parameter. +// To suspend all processes, omit the ScalingProcesses parameter. +// +// Note that if you suspend either the Launch or Terminate process types, it +// can prevent other process types from functioning properly. +// +// To resume processes that have been suspended, use ResumeProcesses. +// +// For more information, see Suspend and Resume Auto Scaling Processes (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US_SuspendResume.html) +// in the Auto Scaling Developer Guide. +func (c *AutoScaling) SuspendProcesses(input *ScalingProcessQuery) (*SuspendProcessesOutput, error) { + req, out := c.SuspendProcessesRequest(input) + err := req.Send() + return out, err +} + +const opTerminateInstanceInAutoScalingGroup = "TerminateInstanceInAutoScalingGroup" + +// TerminateInstanceInAutoScalingGroupRequest generates a request for the TerminateInstanceInAutoScalingGroup operation. +func (c *AutoScaling) TerminateInstanceInAutoScalingGroupRequest(input *TerminateInstanceInAutoScalingGroupInput) (req *request.Request, output *TerminateInstanceInAutoScalingGroupOutput) { + op := &request.Operation{ + Name: opTerminateInstanceInAutoScalingGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TerminateInstanceInAutoScalingGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &TerminateInstanceInAutoScalingGroupOutput{} + req.Data = output + return +} + +// Terminates the specified instance and optionally adjusts the desired group +// size. +// +// This call simply makes a termination request. The instance is not terminated +// immediately. +func (c *AutoScaling) TerminateInstanceInAutoScalingGroup(input *TerminateInstanceInAutoScalingGroupInput) (*TerminateInstanceInAutoScalingGroupOutput, error) { + req, out := c.TerminateInstanceInAutoScalingGroupRequest(input) + err := req.Send() + return out, err +} + +const opUpdateAutoScalingGroup = "UpdateAutoScalingGroup" + +// UpdateAutoScalingGroupRequest generates a request for the UpdateAutoScalingGroup operation. +func (c *AutoScaling) UpdateAutoScalingGroupRequest(input *UpdateAutoScalingGroupInput) (req *request.Request, output *UpdateAutoScalingGroupOutput) { + op := &request.Operation{ + Name: opUpdateAutoScalingGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAutoScalingGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateAutoScalingGroupOutput{} + req.Data = output + return +} + +// Updates the configuration for the specified Auto Scaling group. +// +// To update an Auto Scaling group with a launch configuration with InstanceMonitoring +// set to False, you must first disable the collection of group metrics. Otherwise, +// you will get an error. If you have previously enabled the collection of group +// metrics, you can disable it using DisableMetricsCollection. +// +// The new settings are registered upon the completion of this call. Any launch +// configuration settings take effect on any triggers after this call returns. +// Scaling activities that are currently in progress aren't affected. +// +// Note the following: +// +// If you specify a new value for MinSize without specifying a value for +// DesiredCapacity, and the new MinSize is larger than the current size of the +// group, we implicitly call SetDesiredCapacity to set the size of the group +// to the new value of MinSize. +// +// If you specify a new value for MaxSize without specifying a value for +// DesiredCapacity, and the new MaxSize is smaller than the current size of +// the group, we implicitly call SetDesiredCapacity to set the size of the group +// to the new value of MaxSize. +// +// All other optional parameters are left unchanged if not specified. +func (c *AutoScaling) UpdateAutoScalingGroup(input *UpdateAutoScalingGroupInput) (*UpdateAutoScalingGroupOutput, error) { + req, out := c.UpdateAutoScalingGroupRequest(input) + err := req.Send() + return out, err +} + +// Describes scaling activity, which is a long-running process that represents +// a change to your Auto Scaling group, such as changing its size or replacing +// an instance. +type Activity struct { + _ struct{} `type:"structure"` + + // The ID of the activity. + ActivityId *string `type:"string" required:"true"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The reason the activity began. + Cause *string `min:"1" type:"string" required:"true"` + + // A friendly, more verbose description of the activity. + Description *string `type:"string"` + + // The details about the activity. + Details *string `type:"string"` + + // The end time of the activity. + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A value between 0 and 100 that indicates the progress of the activity. + Progress *int64 `type:"integer"` + + // The start time of the activity. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The current status of the activity. + StatusCode *string `type:"string" required:"true" enum:"ScalingActivityStatusCode"` + + // A friendly, more verbose description of the activity status. + StatusMessage *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Activity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Activity) GoString() string { + return s.String() +} + +// Describes a policy adjustment type. +// +// For more information, see Dynamic Scaling (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html) +// in the Auto Scaling Developer Guide. +type AdjustmentType struct { + _ struct{} `type:"structure"` + + // The policy adjustment type. The valid values are ChangeInCapacity, ExactCapacity, + // and PercentChangeInCapacity. + AdjustmentType *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AdjustmentType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdjustmentType) GoString() string { + return s.String() +} + +// Describes an alarm. +type Alarm struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the alarm. + AlarmARN *string `min:"1" type:"string"` + + // The name of the alarm. + AlarmName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Alarm) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Alarm) GoString() string { + return s.String() +} + +type AttachInstancesInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more EC2 instance IDs. + InstanceIds []*string `type:"list"` +} + +// String returns the string representation +func (s AttachInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachInstancesInput) GoString() string { + return s.String() +} + +type AttachInstancesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachInstancesOutput) GoString() string { + return s.String() +} + +type AttachLoadBalancersInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // One or more load balancer names. + LoadBalancerNames []*string `type:"list"` +} + +// String returns the string representation +func (s AttachLoadBalancersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachLoadBalancersInput) GoString() string { + return s.String() +} + +type AttachLoadBalancersOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachLoadBalancersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachLoadBalancersOutput) GoString() string { + return s.String() +} + +// Describes a block device mapping. +type BlockDeviceMapping struct { + _ struct{} `type:"structure"` + + // The device name exposed to the EC2 instance (for example, /dev/sdh or xvdh). + DeviceName *string `min:"1" type:"string" required:"true"` + + // The information about the Amazon EBS volume. + Ebs *Ebs `type:"structure"` + + // Suppresses a device mapping. + // + // If this parameter is true for the root device, the instance might fail the + // EC2 health check. Auto Scaling launches a replacement instance if the instance + // fails the health check. + NoDevice *bool `type:"boolean"` + + // The name of the virtual device (for example, ephemeral0). + VirtualName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s BlockDeviceMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BlockDeviceMapping) GoString() string { + return s.String() +} + +type CompleteLifecycleActionInput struct { + _ struct{} `type:"structure"` + + // The name of the group for the lifecycle hook. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The action for the group to take. This parameter can be either CONTINUE or + // ABANDON. + LifecycleActionResult *string `type:"string" required:"true"` + + // A universally unique identifier (UUID) that identifies a specific lifecycle + // action associated with an instance. Auto Scaling sends this token to the + // notification target you specified when you created the lifecycle hook. + LifecycleActionToken *string `min:"36" type:"string" required:"true"` + + // The name of the lifecycle hook. + LifecycleHookName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteLifecycleActionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteLifecycleActionInput) GoString() string { + return s.String() +} + +type CompleteLifecycleActionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CompleteLifecycleActionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteLifecycleActionOutput) GoString() string { + return s.String() +} + +type CreateAutoScalingGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the group. This name must be unique within the scope of your + // AWS account. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more Availability Zones for the group. This parameter is optional + // if you specify subnets using the VPCZoneIdentifier parameter. + AvailabilityZones []*string `min:"1" type:"list"` + + // The amount of time, in seconds, after a scaling activity completes before + // another scaling activity can start. The default is 300. + // + // For more information, see Understanding Auto Scaling Cooldowns (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/Cooldown.html) + // in the Auto Scaling Developer Guide. + DefaultCooldown *int64 `type:"integer"` + + // The number of EC2 instances that should be running in the group. This number + // must be greater than or equal to the minimum size of the group and less than + // or equal to the maximum size of the group. + DesiredCapacity *int64 `type:"integer"` + + // The amount of time, in seconds, that Auto Scaling waits before checking the + // health status of an EC2 instance that has come into service. During this + // time, any health check failures for the instance are ignored. The default + // is 300. + // + // This parameter is required if you are adding an ELB health check. + // + // For more information, see Health Checks for Auto Scaling Instances (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/healthcheck.html) + // in the Auto Scaling Developer Guide. + HealthCheckGracePeriod *int64 `type:"integer"` + + // The service to use for the health checks. The valid values are EC2 and ELB. + // + // By default, health checks use Amazon EC2 instance status checks to determine + // the health of an instance. For more information, see Health Checks (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/healthcheck.html) + // in the Auto Scaling Developer Guide. + HealthCheckType *string `min:"1" type:"string"` + + // The ID of the EC2 instance used to create a launch configuration for the + // group. Alternatively, use the LaunchConfigurationName parameter to specify + // a launch configuration instead of an EC2 instance. + // + // When you specify an ID of an instance, Auto Scaling creates a new launch + // configuration and associates it with the group. This launch configuration + // derives its attributes from the specified instance, with the exception of + // the block device mapping. + // + // For more information, see Create an Auto Scaling Group from an EC2 Instance + // (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/create-asg-from-instance.html) + // in the Auto Scaling Developer Guide. + InstanceId *string `min:"1" type:"string"` + + // The name of the launch configuration. Alternatively, use the InstanceId parameter + // to specify an EC2 instance instead of a launch configuration. + LaunchConfigurationName *string `min:"1" type:"string"` + + // One or more load balancers. + // + // For more information, see Load Balance Your Auto Scaling Group (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US_SetUpASLBApp.html) + // in the Auto Scaling Developer Guide. + LoadBalancerNames []*string `type:"list"` + + // The maximum size of the group. + MaxSize *int64 `type:"integer" required:"true"` + + // The minimum size of the group. + MinSize *int64 `type:"integer" required:"true"` + + // Indicates whether newly launched instances are protected from termination + // by Auto Scaling when scaling in. + NewInstancesProtectedFromScaleIn *bool `type:"boolean"` + + // The name of the placement group into which you'll launch your instances, + // if any. For more information, see Placement Groups (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) + // in the Amazon Elastic Compute Cloud User Guide. + PlacementGroup *string `min:"1" type:"string"` + + // The tag to be created or updated. Each tag should be defined by its resource + // type, resource ID, key, value, and a propagate flag. Valid values: key=value, + // value=value, propagate=true or false. Value and propagate are optional parameters. + // + // For more information, see Tagging Auto Scaling Groups and Instances (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/ASTagging.html) + // in the Auto Scaling Developer Guide. + Tags []*Tag `type:"list"` + + // One or more termination policies used to select the instance to terminate. + // These policies are executed in the order that they are listed. + // + // For more information, see Choosing a Termination Policy for Your Auto Scaling + // Group (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/us-termination-policy.html) + // in the Auto Scaling Developer Guide. + TerminationPolicies []*string `type:"list"` + + // A comma-separated list of subnet identifiers for your virtual private cloud + // (VPC). + // + // If you specify subnets and Availability Zones with this call, ensure that + // the subnets' Availability Zones match the Availability Zones specified. + // + // For more information, see Auto Scaling and Amazon Virtual Private Cloud + // (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/autoscalingsubnets.html) + // in the Auto Scaling Developer Guide. + VPCZoneIdentifier *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateAutoScalingGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAutoScalingGroupInput) GoString() string { + return s.String() +} + +type CreateAutoScalingGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateAutoScalingGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAutoScalingGroupOutput) GoString() string { + return s.String() +} + +type CreateLaunchConfigurationInput struct { + _ struct{} `type:"structure"` + + // Used for groups that launch instances into a virtual private cloud (VPC). + // Specifies whether to assign a public IP address to each instance. For more + // information, see Auto Scaling and Amazon Virtual Private Cloud (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/autoscalingsubnets.html) + // in the Auto Scaling Developer Guide. + // + // If you specify a value for this parameter, be sure to specify at least one + // subnet using the VPCZoneIdentifier parameter when you create your group. + // + // Default: If the instance is launched into a default subnet, the default + // is true. If the instance is launched into a nondefault subnet, the default + // is false. For more information, see Supported Platforms (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html) + // in the Amazon Elastic Compute Cloud User Guide. + AssociatePublicIpAddress *bool `type:"boolean"` + + // One or more mappings that specify how block devices are exposed to the instance. + // For more information, see Block Device Mapping (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html) + // in the Amazon Elastic Compute Cloud User Guide. + BlockDeviceMappings []*BlockDeviceMapping `type:"list"` + + // The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. + // This parameter is supported only if you are launching EC2-Classic instances. + // For more information, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) + // in the Amazon Elastic Compute Cloud User Guide. + ClassicLinkVPCId *string `min:"1" type:"string"` + + // The IDs of one or more security groups for the VPC specified in ClassicLinkVPCId. + // This parameter is required if ClassicLinkVPCId is specified, and is not supported + // otherwise. For more information, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) + // in the Amazon Elastic Compute Cloud User Guide. + ClassicLinkVPCSecurityGroups []*string `type:"list"` + + // Indicates whether the instance is optimized for Amazon EBS I/O. By default, + // the instance is not optimized for EBS I/O. The optimization provides dedicated + // throughput to Amazon EBS and an optimized configuration stack to provide + // optimal I/O performance. This optimization is not available with all instance + // types. Additional usage charges apply. For more information, see Amazon EBS-Optimized + // Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html) + // in the Amazon Elastic Compute Cloud User Guide. + EbsOptimized *bool `type:"boolean"` + + // The name or the Amazon Resource Name (ARN) of the instance profile associated + // with the IAM role for the instance. + // + // EC2 instances launched with an IAM role will automatically have AWS security + // credentials available. You can use IAM roles with Auto Scaling to automatically + // enable applications running on your EC2 instances to securely access other + // AWS resources. For more information, see Launch Auto Scaling Instances with + // an IAM Role (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/us-iam-role.html) + // in the Auto Scaling Developer Guide. + IamInstanceProfile *string `min:"1" type:"string"` + + // The ID of the Amazon Machine Image (AMI) to use to launch your EC2 instances. + // For more information, see Finding an AMI (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html) + // in the Amazon Elastic Compute Cloud User Guide. + ImageId *string `min:"1" type:"string"` + + // The ID of the EC2 instance to use to create the launch configuration. + // + // The new launch configuration derives attributes from the instance, with + // the exception of the block device mapping. + // + // To create a launch configuration with a block device mapping or override + // any other instance attributes, specify them as part of the same request. + // + // For more information, see Create a Launch Configuration Using an EC2 Instance + // (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/create-lc-with-instanceID.html) + // in the Auto Scaling Developer Guide. + InstanceId *string `min:"1" type:"string"` + + // Enables detailed monitoring if it is disabled. Detailed monitoring is enabled + // by default. + // + // When detailed monitoring is enabled, Amazon CloudWatch generates metrics + // every minute and your account is charged a fee. When you disable detailed + // monitoring, by specifying False, CloudWatch generates metrics every 5 minutes. + // For more information, see Monitor Your Auto Scaling Instances (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-instance-monitoring.html) + // in the Auto Scaling Developer Guide. + InstanceMonitoring *InstanceMonitoring `type:"structure"` + + // The instance type of the EC2 instance. For information about available instance + // types, see Available Instance Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#AvailableInstanceTypes) + // in the Amazon Elastic Compute Cloud User Guide. + InstanceType *string `min:"1" type:"string"` + + // The ID of the kernel associated with the AMI. + KernelId *string `min:"1" type:"string"` + + // The name of the key pair. For more information, see Amazon EC2 Key Pairs + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) in + // the Amazon Elastic Compute Cloud User Guide. + KeyName *string `min:"1" type:"string"` + + // The name of the launch configuration. This name must be unique within the + // scope of your AWS account. + LaunchConfigurationName *string `min:"1" type:"string" required:"true"` + + // The tenancy of the instance. An instance with a tenancy of dedicated runs + // on single-tenant hardware and can only be launched into a VPC. + // + // You must set the value of this parameter to dedicated if want to launch + // Dedicated Instances into a shared tenancy VPC (VPC with instance placement + // tenancy attribute set to default). + // + // If you specify a value for this parameter, be sure to specify at least one + // subnet using the VPCZoneIdentifier parameter when you create your group. + // + // For more information, see Auto Scaling and Amazon Virtual Private Cloud + // (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/autoscalingsubnets.html) + // in the Auto Scaling Developer Guide. + // + // Valid values: default | dedicated + PlacementTenancy *string `min:"1" type:"string"` + + // The ID of the RAM disk associated with the AMI. + RamdiskId *string `min:"1" type:"string"` + + // One or more security groups with which to associate the instances. + // + // If your instances are launched in EC2-Classic, you can either specify security + // group names or the security group IDs. For more information about security + // groups for EC2-Classic, see Amazon EC2 Security Groups (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // If your instances are launched into a VPC, specify security group IDs. For + // more information, see Security Groups for Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html) + // in the Amazon Virtual Private Cloud User Guide. + SecurityGroups []*string `type:"list"` + + // The maximum hourly price to be paid for any Spot Instance launched to fulfill + // the request. Spot Instances are launched when the price you specify exceeds + // the current Spot market price. For more information, see Launch Spot Instances + // in Your Auto Scaling Group (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US-SpotInstances.html) + // in the Auto Scaling Developer Guide. + SpotPrice *string `min:"1" type:"string"` + + // The user data to make available to the launched EC2 instances. For more information, + // see Instance Metadata and User Data (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // At this time, launch configurations don't support compressed (zipped) user + // data files. + UserData *string `type:"string"` +} + +// String returns the string representation +func (s CreateLaunchConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLaunchConfigurationInput) GoString() string { + return s.String() +} + +type CreateLaunchConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateLaunchConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLaunchConfigurationOutput) GoString() string { + return s.String() +} + +type CreateOrUpdateTagsInput struct { + _ struct{} `type:"structure"` + + // One or more tags. + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateOrUpdateTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOrUpdateTagsInput) GoString() string { + return s.String() +} + +type CreateOrUpdateTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateOrUpdateTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOrUpdateTagsOutput) GoString() string { + return s.String() +} + +type DeleteAutoScalingGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the group to delete. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // Specifies that the group will be deleted along with all instances associated + // with the group, without waiting for all instances to be terminated. This + // parameter also deletes any lifecycle actions associated with the group. + ForceDelete *bool `type:"boolean"` +} + +// String returns the string representation +func (s DeleteAutoScalingGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAutoScalingGroupInput) GoString() string { + return s.String() +} + +type DeleteAutoScalingGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAutoScalingGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAutoScalingGroupOutput) GoString() string { + return s.String() +} + +type DeleteLaunchConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the launch configuration. + LaunchConfigurationName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLaunchConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLaunchConfigurationInput) GoString() string { + return s.String() +} + +type DeleteLaunchConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLaunchConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLaunchConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteLifecycleHookInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group for the lifecycle hook. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The name of the lifecycle hook. + LifecycleHookName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLifecycleHookInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLifecycleHookInput) GoString() string { + return s.String() +} + +type DeleteLifecycleHookOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLifecycleHookOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLifecycleHookOutput) GoString() string { + return s.String() +} + +type DeleteNotificationConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service + // (SNS) topic. + TopicARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteNotificationConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNotificationConfigurationInput) GoString() string { + return s.String() +} + +type DeleteNotificationConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteNotificationConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNotificationConfigurationOutput) GoString() string { + return s.String() +} + +type DeletePolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // The name or Amazon Resource Name (ARN) of the policy. + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyInput) GoString() string { + return s.String() +} + +type DeletePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyOutput) GoString() string { + return s.String() +} + +type DeleteScheduledActionInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // The name of the action to delete. + ScheduledActionName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteScheduledActionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteScheduledActionInput) GoString() string { + return s.String() +} + +type DeleteScheduledActionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteScheduledActionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteScheduledActionOutput) GoString() string { + return s.String() +} + +type DeleteTagsInput struct { + _ struct{} `type:"structure"` + + // Each tag should be defined by its resource type, resource ID, key, value, + // and a propagate flag. Valid values are: Resource type = auto-scaling-group, + // Resource ID = AutoScalingGroupName, key=value, value=value, propagate=true + // or false. + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsInput) GoString() string { + return s.String() +} + +type DeleteTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsOutput) GoString() string { + return s.String() +} + +type DescribeAccountLimitsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeAccountLimitsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountLimitsInput) GoString() string { + return s.String() +} + +type DescribeAccountLimitsOutput struct { + _ struct{} `type:"structure"` + + // The maximum number of groups allowed for your AWS account. The default limit + // is 20 per region. + MaxNumberOfAutoScalingGroups *int64 `type:"integer"` + + // The maximum number of launch configurations allowed for your AWS account. + // The default limit is 100 per region. + MaxNumberOfLaunchConfigurations *int64 `type:"integer"` + + // The current number of groups for your AWS account. + NumberOfAutoScalingGroups *int64 `type:"integer"` + + // The current number of launch configurations for your AWS account. + NumberOfLaunchConfigurations *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeAccountLimitsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountLimitsOutput) GoString() string { + return s.String() +} + +type DescribeAdjustmentTypesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeAdjustmentTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAdjustmentTypesInput) GoString() string { + return s.String() +} + +type DescribeAdjustmentTypesOutput struct { + _ struct{} `type:"structure"` + + // The policy adjustment types. + AdjustmentTypes []*AdjustmentType `type:"list"` +} + +// String returns the string representation +func (s DescribeAdjustmentTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAdjustmentTypesOutput) GoString() string { + return s.String() +} + +type DescribeAutoScalingGroupsInput struct { + _ struct{} `type:"structure"` + + // The group names. + AutoScalingGroupNames []*string `type:"list"` + + // The maximum number of items to return with this call. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAutoScalingGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAutoScalingGroupsInput) GoString() string { + return s.String() +} + +type DescribeAutoScalingGroupsOutput struct { + _ struct{} `type:"structure"` + + // The groups. + AutoScalingGroups []*Group `type:"list" required:"true"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAutoScalingGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAutoScalingGroupsOutput) GoString() string { + return s.String() +} + +type DescribeAutoScalingInstancesInput struct { + _ struct{} `type:"structure"` + + // One or more Auto Scaling instances to describe, up to 50 instances. If you + // omit this parameter, all Auto Scaling instances are described. If you specify + // an ID that does not exist, it is ignored with no error. + InstanceIds []*string `type:"list"` + + // The maximum number of items to return with this call. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAutoScalingInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAutoScalingInstancesInput) GoString() string { + return s.String() +} + +type DescribeAutoScalingInstancesOutput struct { + _ struct{} `type:"structure"` + + // The instances. + AutoScalingInstances []*InstanceDetails `type:"list"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAutoScalingInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAutoScalingInstancesOutput) GoString() string { + return s.String() +} + +type DescribeAutoScalingNotificationTypesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeAutoScalingNotificationTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAutoScalingNotificationTypesInput) GoString() string { + return s.String() +} + +type DescribeAutoScalingNotificationTypesOutput struct { + _ struct{} `type:"structure"` + + // One or more of the following notification types: + // + // autoscaling:EC2_INSTANCE_LAUNCH + // + // autoscaling:EC2_INSTANCE_LAUNCH_ERROR + // + // autoscaling:EC2_INSTANCE_TERMINATE + // + // autoscaling:EC2_INSTANCE_TERMINATE_ERROR + // + // autoscaling:TEST_NOTIFICATION + AutoScalingNotificationTypes []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeAutoScalingNotificationTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAutoScalingNotificationTypesOutput) GoString() string { + return s.String() +} + +type DescribeLaunchConfigurationsInput struct { + _ struct{} `type:"structure"` + + // The launch configuration names. + LaunchConfigurationNames []*string `type:"list"` + + // The maximum number of items to return with this call. The default is 100. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLaunchConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLaunchConfigurationsInput) GoString() string { + return s.String() +} + +type DescribeLaunchConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The launch configurations. + LaunchConfigurations []*LaunchConfiguration `type:"list" required:"true"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLaunchConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLaunchConfigurationsOutput) GoString() string { + return s.String() +} + +type DescribeLifecycleHookTypesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeLifecycleHookTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLifecycleHookTypesInput) GoString() string { + return s.String() +} + +type DescribeLifecycleHookTypesOutput struct { + _ struct{} `type:"structure"` + + // One or more of the following notification types: + // + // autoscaling:EC2_INSTANCE_LAUNCHING + // + // autoscaling:EC2_INSTANCE_TERMINATING + LifecycleHookTypes []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeLifecycleHookTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLifecycleHookTypesOutput) GoString() string { + return s.String() +} + +type DescribeLifecycleHooksInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The names of one or more lifecycle hooks. + LifecycleHookNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeLifecycleHooksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLifecycleHooksInput) GoString() string { + return s.String() +} + +type DescribeLifecycleHooksOutput struct { + _ struct{} `type:"structure"` + + // The lifecycle hooks for the specified group. + LifecycleHooks []*LifecycleHook `type:"list"` +} + +// String returns the string representation +func (s DescribeLifecycleHooksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLifecycleHooksOutput) GoString() string { + return s.String() +} + +type DescribeLoadBalancersInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The maximum number of items to return with this call. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLoadBalancersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancersInput) GoString() string { + return s.String() +} + +type DescribeLoadBalancersOutput struct { + _ struct{} `type:"structure"` + + // The load balancers. + LoadBalancers []*LoadBalancerState `type:"list"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLoadBalancersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancersOutput) GoString() string { + return s.String() +} + +type DescribeMetricCollectionTypesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeMetricCollectionTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMetricCollectionTypesInput) GoString() string { + return s.String() +} + +type DescribeMetricCollectionTypesOutput struct { + _ struct{} `type:"structure"` + + // The granularities for the metrics. + Granularities []*MetricGranularityType `type:"list"` + + // One or more metrics. + Metrics []*MetricCollectionType `type:"list"` +} + +// String returns the string representation +func (s DescribeMetricCollectionTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMetricCollectionTypesOutput) GoString() string { + return s.String() +} + +type DescribeNotificationConfigurationsInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupNames []*string `type:"list"` + + // The maximum number of items to return with this call. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeNotificationConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNotificationConfigurationsInput) GoString() string { + return s.String() +} + +type DescribeNotificationConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` + + // The notification configurations. + NotificationConfigurations []*NotificationConfiguration `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeNotificationConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNotificationConfigurationsOutput) GoString() string { + return s.String() +} + +type DescribePoliciesInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // The maximum number of items to be returned with each call. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` + + // One or more policy names or policy ARNs to be described. If you omit this + // list, all policy names are described. If an group name is provided, the results + // are limited to that group. This list is limited to 50 items. If you specify + // an unknown policy name, it is ignored with no error. + PolicyNames []*string `type:"list"` + + // One or more policy types. Valid values are SimpleScaling and StepScaling. + PolicyTypes []*string `type:"list"` +} + +// String returns the string representation +func (s DescribePoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePoliciesInput) GoString() string { + return s.String() +} + +type DescribePoliciesOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` + + // The scaling policies. + ScalingPolicies []*ScalingPolicy `type:"list"` +} + +// String returns the string representation +func (s DescribePoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePoliciesOutput) GoString() string { + return s.String() +} + +type DescribeScalingActivitiesInput struct { + _ struct{} `type:"structure"` + + // The activity IDs of the desired scaling activities. If this list is omitted, + // all activities are described. If the AutoScalingGroupName parameter is provided, + // the results are limited to that group. The list of requested activities cannot + // contain more than 50 items. If unknown activities are requested, they are + // ignored with no error. + ActivityIds []*string `type:"list"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // The maximum number of items to return with this call. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeScalingActivitiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingActivitiesInput) GoString() string { + return s.String() +} + +type DescribeScalingActivitiesOutput struct { + _ struct{} `type:"structure"` + + // The scaling activities. + Activities []*Activity `type:"list" required:"true"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeScalingActivitiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingActivitiesOutput) GoString() string { + return s.String() +} + +type DescribeScalingProcessTypesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeScalingProcessTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingProcessTypesInput) GoString() string { + return s.String() +} + +type DescribeScalingProcessTypesOutput struct { + _ struct{} `type:"structure"` + + // The names of the process types. + Processes []*ProcessType `type:"list"` +} + +// String returns the string representation +func (s DescribeScalingProcessTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingProcessTypesOutput) GoString() string { + return s.String() +} + +type DescribeScheduledActionsInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // The latest scheduled start time to return. If scheduled action names are + // provided, this parameter is ignored. + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The maximum number of items to return with this call. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` + + // Describes one or more scheduled actions. If you omit this list, the call + // describes all scheduled actions. If you specify an unknown scheduled action + // it is ignored with no error. + // + // You can describe up to a maximum of 50 instances with a single call. If + // there are more items to return, the call returns a token. To get the next + // set of items, repeat the call with the returned token in the NextToken parameter. + ScheduledActionNames []*string `type:"list"` + + // The earliest scheduled start time to return. If scheduled action names are + // provided, this parameter is ignored. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s DescribeScheduledActionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScheduledActionsInput) GoString() string { + return s.String() +} + +type DescribeScheduledActionsOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` + + // The scheduled actions. + ScheduledUpdateGroupActions []*ScheduledUpdateGroupAction `type:"list"` +} + +// String returns the string representation +func (s DescribeScheduledActionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScheduledActionsOutput) GoString() string { + return s.String() +} + +type DescribeTagsInput struct { + _ struct{} `type:"structure"` + + // A filter used to scope the tags to return. + Filters []*Filter `type:"list"` + + // The maximum number of items to return with this call. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsInput) GoString() string { + return s.String() +} + +type DescribeTagsOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` + + // The tags. + Tags []*TagDescription `type:"list"` +} + +// String returns the string representation +func (s DescribeTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsOutput) GoString() string { + return s.String() +} + +type DescribeTerminationPolicyTypesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeTerminationPolicyTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTerminationPolicyTypesInput) GoString() string { + return s.String() +} + +type DescribeTerminationPolicyTypesOutput struct { + _ struct{} `type:"structure"` + + // The termination policies supported by Auto Scaling (OldestInstance, OldestLaunchConfiguration, + // NewestInstance, ClosestToNextInstanceHour, and Default). + TerminationPolicyTypes []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeTerminationPolicyTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTerminationPolicyTypesOutput) GoString() string { + return s.String() +} + +type DetachInstancesInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more instance IDs. + InstanceIds []*string `type:"list"` + + // If True, the Auto Scaling group decrements the desired capacity value by + // the number of instances detached. + ShouldDecrementDesiredCapacity *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s DetachInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachInstancesInput) GoString() string { + return s.String() +} + +type DetachInstancesOutput struct { + _ struct{} `type:"structure"` + + // The activities related to detaching the instances from the Auto Scaling group. + Activities []*Activity `type:"list"` +} + +// String returns the string representation +func (s DetachInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachInstancesOutput) GoString() string { + return s.String() +} + +type DetachLoadBalancersInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // One or more load balancer names. + LoadBalancerNames []*string `type:"list"` +} + +// String returns the string representation +func (s DetachLoadBalancersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachLoadBalancersInput) GoString() string { + return s.String() +} + +type DetachLoadBalancersOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachLoadBalancersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachLoadBalancersOutput) GoString() string { + return s.String() +} + +type DisableMetricsCollectionInput struct { + _ struct{} `type:"structure"` + + // The name or Amazon Resource Name (ARN) of the group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more of the following metrics. If you omit this parameter, all metrics + // are disabled. + // + // GroupMinSize + // + // GroupMaxSize + // + // GroupDesiredCapacity + // + // GroupInServiceInstances + // + // GroupPendingInstances + // + // GroupStandbyInstances + // + // GroupTerminatingInstances + // + // GroupTotalInstances + Metrics []*string `type:"list"` +} + +// String returns the string representation +func (s DisableMetricsCollectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableMetricsCollectionInput) GoString() string { + return s.String() +} + +type DisableMetricsCollectionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableMetricsCollectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableMetricsCollectionOutput) GoString() string { + return s.String() +} + +// Describes an Amazon EBS volume. +type Ebs struct { + _ struct{} `type:"structure"` + + // Indicates whether to delete the volume on instance termination. + // + // Default: true + DeleteOnTermination *bool `type:"boolean"` + + // Indicates whether the volume should be encrypted. Encrypted EBS volumes must + // be attached to instances that support Amazon EBS encryption. Volumes that + // are created from encrypted snapshots are automatically encrypted. There is + // no way to create an encrypted volume from an unencrypted snapshot or an unencrypted + // volume from an encrypted snapshot. For more information, see Amazon EBS Encryption + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) in + // the Amazon Elastic Compute Cloud User Guide. + Encrypted *bool `type:"boolean"` + + // For Provisioned IOPS (SSD) volumes only. The number of I/O operations per + // second (IOPS) to provision for the volume. + // + // Default: None + Iops *int64 `min:"100" type:"integer"` + + // The ID of the snapshot. + SnapshotId *string `min:"1" type:"string"` + + // The volume size, in gigabytes. + // + // Valid values: If the volume type is io1, the minimum size of the volume + // is 10 GiB. If you specify SnapshotId and VolumeSize, VolumeSize must be equal + // to or larger than the size of the snapshot. + // + // Default: If you create a volume from a snapshot and you don't specify a + // volume size, the default is the size of the snapshot. + // + // Required: Required when the volume type is io1. + VolumeSize *int64 `min:"1" type:"integer"` + + // The volume type. + // + // Valid values: standard | io1 | gp2 + // + // Default: standard + VolumeType *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Ebs) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Ebs) GoString() string { + return s.String() +} + +type EnableMetricsCollectionInput struct { + _ struct{} `type:"structure"` + + // The name or ARN of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The granularity to associate with the metrics to collect. The only valid + // value is 1Minute. + Granularity *string `min:"1" type:"string" required:"true"` + + // One or more of the following metrics. If you omit this parameter, all metrics + // are enabled. + // + // GroupMinSize + // + // GroupMaxSize + // + // GroupDesiredCapacity + // + // GroupInServiceInstances + // + // GroupPendingInstances + // + // GroupStandbyInstances + // + // GroupTerminatingInstances + // + // GroupTotalInstances + // + // Note that the GroupStandbyInstances metric is not enabled by default. You + // must explicitly request this metric. + Metrics []*string `type:"list"` +} + +// String returns the string representation +func (s EnableMetricsCollectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableMetricsCollectionInput) GoString() string { + return s.String() +} + +type EnableMetricsCollectionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableMetricsCollectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableMetricsCollectionOutput) GoString() string { + return s.String() +} + +// Describes an enabled metric. +type EnabledMetric struct { + _ struct{} `type:"structure"` + + // The granularity of the metric. The only valid value is 1Minute. + Granularity *string `min:"1" type:"string"` + + // One of the following metrics: + // + // GroupMinSize + // + // GroupMaxSize + // + // GroupDesiredCapacity + // + // GroupInServiceInstances + // + // GroupPendingInstances + // + // GroupStandbyInstances + // + // GroupTerminatingInstances + // + // GroupTotalInstances + Metric *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s EnabledMetric) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnabledMetric) GoString() string { + return s.String() +} + +type EnterStandbyInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more instances to move into Standby mode. You must specify at least + // one instance ID. + InstanceIds []*string `type:"list"` + + // Specifies whether the instances moved to Standby mode count as part of the + // Auto Scaling group's desired capacity. If set, the desired capacity for the + // Auto Scaling group decrements by the number of instances moved to Standby + // mode. + ShouldDecrementDesiredCapacity *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s EnterStandbyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnterStandbyInput) GoString() string { + return s.String() +} + +type EnterStandbyOutput struct { + _ struct{} `type:"structure"` + + // The activities related to moving instances into Standby mode. + Activities []*Activity `type:"list"` +} + +// String returns the string representation +func (s EnterStandbyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnterStandbyOutput) GoString() string { + return s.String() +} + +type ExecutePolicyInput struct { + _ struct{} `type:"structure"` + + // The name or Amazon Resource Name (ARN) of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // The breach threshold for the alarm. + // + // This parameter is required if the policy type is StepScaling and not supported + // otherwise. + BreachThreshold *float64 `type:"double"` + + // If this parameter is true, Auto Scaling waits for the cooldown period to + // complete before executing the policy. Otherwise, Auto Scaling executes the + // policy without waiting for the cooldown period to complete. + // + // This parameter is not supported if the policy type is StepScaling. + // + // For more information, see Understanding Auto Scaling Cooldowns (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/Cooldown.html) + // in the Auto Scaling Developer Guide. + HonorCooldown *bool `type:"boolean"` + + // The metric value to compare to BreachThreshold. This enables you to execute + // a policy of type StepScaling and determine which step adjustment to use. + // For example, if the breach threshold is 50 and you want to use a step adjustment + // with a lower bound of 0 and an upper bound of 10, you can set the metric + // value to 59. + // + // If you specify a metric value that doesn't correspond to a step adjustment + // for the policy, the call returns an error. + // + // This parameter is required if the policy type is StepScaling and not supported + // otherwise. + MetricValue *float64 `type:"double"` + + // The name or ARN of the policy. + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ExecutePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExecutePolicyInput) GoString() string { + return s.String() +} + +type ExecutePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ExecutePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExecutePolicyOutput) GoString() string { + return s.String() +} + +type ExitStandbyInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more instance IDs. You must specify at least one instance ID. + InstanceIds []*string `type:"list"` +} + +// String returns the string representation +func (s ExitStandbyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExitStandbyInput) GoString() string { + return s.String() +} + +type ExitStandbyOutput struct { + _ struct{} `type:"structure"` + + // The activities related to moving instances out of Standby mode. + Activities []*Activity `type:"list"` +} + +// String returns the string representation +func (s ExitStandbyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExitStandbyOutput) GoString() string { + return s.String() +} + +// Describes a filter. +type Filter struct { + _ struct{} `type:"structure"` + + // The name of the filter. The valid values are: "auto-scaling-group", "key", + // "value", and "propagate-at-launch". + Name *string `type:"string"` + + // The value of the filter. + Values []*string `type:"list"` +} + +// String returns the string representation +func (s Filter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Filter) GoString() string { + return s.String() +} + +// Describes an Auto Scaling group. +type Group struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the group. + AutoScalingGroupARN *string `min:"1" type:"string"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more Availability Zones for the group. + AvailabilityZones []*string `min:"1" type:"list" required:"true"` + + // The date and time the group was created. + CreatedTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The amount of time, in seconds, after a scaling activity completes before + // another scaling activity can start. + DefaultCooldown *int64 `type:"integer" required:"true"` + + // The desired size of the group. + DesiredCapacity *int64 `type:"integer" required:"true"` + + // The metrics enabled for the group. + EnabledMetrics []*EnabledMetric `type:"list"` + + // The amount of time, in seconds, that Auto Scaling waits before checking the + // health status of an EC2 instance that has come into service. + HealthCheckGracePeriod *int64 `type:"integer"` + + // The service to use for the health checks. The valid values are EC2 and ELB. + HealthCheckType *string `min:"1" type:"string" required:"true"` + + // The EC2 instances associated with the group. + Instances []*Instance `type:"list"` + + // The name of the associated launch configuration. + LaunchConfigurationName *string `min:"1" type:"string"` + + // One or more load balancers associated with the group. + LoadBalancerNames []*string `type:"list"` + + // The maximum size of the group. + MaxSize *int64 `type:"integer" required:"true"` + + // The minimum size of the group. + MinSize *int64 `type:"integer" required:"true"` + + // Indicates whether newly launched instances are protected from termination + // by Auto Scaling when scaling in. + NewInstancesProtectedFromScaleIn *bool `type:"boolean"` + + // The name of the placement group into which you'll launch your instances, + // if any. For more information, see Placement Groups (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) + // in the Amazon Elastic Compute Cloud User Guide. + PlacementGroup *string `min:"1" type:"string"` + + // The current state of the group when DeleteAutoScalingGroup is in progress. + Status *string `min:"1" type:"string"` + + // The suspended processes associated with the group. + SuspendedProcesses []*SuspendedProcess `type:"list"` + + // The tags for the group. + Tags []*TagDescription `type:"list"` + + // The termination policies for the group. + TerminationPolicies []*string `type:"list"` + + // One or more subnet IDs, if applicable, separated by commas. + // + // If you specify VPCZoneIdentifier and AvailabilityZones, ensure that the + // Availability Zones of the subnets match the values for AvailabilityZones. + VPCZoneIdentifier *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Group) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Group) GoString() string { + return s.String() +} + +// Describes an EC2 instance. +type Instance struct { + _ struct{} `type:"structure"` + + // The Availability Zone in which the instance is running. + AvailabilityZone *string `min:"1" type:"string" required:"true"` + + // The health status of the instance. + HealthStatus *string `min:"1" type:"string" required:"true"` + + // The ID of the instance. + InstanceId *string `min:"1" type:"string" required:"true"` + + // The launch configuration associated with the instance. + LaunchConfigurationName *string `min:"1" type:"string" required:"true"` + + // A description of the current lifecycle state. Note that the Quarantined state + // is not used. + LifecycleState *string `type:"string" required:"true" enum:"LifecycleState"` + + // Indicates whether the instance is protected from termination by Auto Scaling + // when scaling in. + ProtectedFromScaleIn *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s Instance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Instance) GoString() string { + return s.String() +} + +// Describes an EC2 instance associated with an Auto Scaling group. +type InstanceDetails struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group associated with the instance. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The Availability Zone for the instance. + AvailabilityZone *string `min:"1" type:"string" required:"true"` + + // The health status of this instance. "Healthy" means that the instance is + // healthy and should remain in service. "Unhealthy" means that the instance + // is unhealthy and Auto Scaling should terminate and replace it. + HealthStatus *string `min:"1" type:"string" required:"true"` + + // The ID of the instance. + InstanceId *string `min:"1" type:"string" required:"true"` + + // The launch configuration associated with the instance. + LaunchConfigurationName *string `min:"1" type:"string" required:"true"` + + // The lifecycle state for the instance. For more information, see Auto Scaling + // Instance States (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroupLifecycle.html#AutoScalingStates) + // in the Auto Scaling Developer Guide. + LifecycleState *string `min:"1" type:"string" required:"true"` + + // Indicates whether the instance is protected from termination by Auto Scaling + // when scaling in. + ProtectedFromScaleIn *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s InstanceDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceDetails) GoString() string { + return s.String() +} + +// Describes whether instance monitoring is enabled. +type InstanceMonitoring struct { + _ struct{} `type:"structure"` + + // If True, instance monitoring is enabled. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s InstanceMonitoring) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceMonitoring) GoString() string { + return s.String() +} + +// Describes a launch configuration. +type LaunchConfiguration struct { + _ struct{} `type:"structure"` + + // [EC2-VPC] Indicates whether to assign a public IP address to each instance. + AssociatePublicIpAddress *bool `type:"boolean"` + + // A block device mapping, which specifies the block devices for the instance. + BlockDeviceMappings []*BlockDeviceMapping `type:"list"` + + // The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. + // This parameter can only be used if you are launching EC2-Classic instances. + // For more information, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) + // in the Amazon Elastic Compute Cloud User Guide. + ClassicLinkVPCId *string `min:"1" type:"string"` + + // The IDs of one or more security groups for the VPC specified in ClassicLinkVPCId. + // This parameter is required if ClassicLinkVPCId is specified, and cannot be + // used otherwise. For more information, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) + // in the Amazon Elastic Compute Cloud User Guide. + ClassicLinkVPCSecurityGroups []*string `type:"list"` + + // The creation date and time for the launch configuration. + CreatedTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // Controls whether the instance is optimized for EBS I/O (true) or not (false). + EbsOptimized *bool `type:"boolean"` + + // The name or Amazon Resource Name (ARN) of the instance profile associated + // with the IAM role for the instance. + IamInstanceProfile *string `min:"1" type:"string"` + + // The ID of the Amazon Machine Image (AMI). + ImageId *string `min:"1" type:"string" required:"true"` + + // Controls whether instances in this group are launched with detailed monitoring. + InstanceMonitoring *InstanceMonitoring `type:"structure"` + + // The instance type for the instances. + InstanceType *string `min:"1" type:"string" required:"true"` + + // The ID of the kernel associated with the AMI. + KernelId *string `min:"1" type:"string"` + + // The name of the key pair. + KeyName *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the launch configuration. + LaunchConfigurationARN *string `min:"1" type:"string"` + + // The name of the launch configuration. + LaunchConfigurationName *string `min:"1" type:"string" required:"true"` + + // The tenancy of the instance, either default or dedicated. An instance with + // dedicated tenancy runs in an isolated, single-tenant hardware and can only + // be launched into a VPC. + PlacementTenancy *string `min:"1" type:"string"` + + // The ID of the RAM disk associated with the AMI. + RamdiskId *string `min:"1" type:"string"` + + // The security groups to associate with the instances. + SecurityGroups []*string `type:"list"` + + // The price to bid when launching Spot Instances. + SpotPrice *string `min:"1" type:"string"` + + // The user data available to the instances. + UserData *string `type:"string"` +} + +// String returns the string representation +func (s LaunchConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchConfiguration) GoString() string { + return s.String() +} + +// Describes a lifecycle hook, which tells Auto Scaling that you want to perform +// an action when an instance launches or terminates. When you have a lifecycle +// hook in place, the Auto Scaling group will either: +// +// Pause the instance after it launches, but before it is put into service +// Pause the instance as it terminates, but before it is fully terminated For +// more information, see Auto Scaling Pending State (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingPendingState.html) +// and Auto Scaling Terminating State (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingTerminatingState.html) +// in the Auto Scaling Developer Guide. +type LifecycleHook struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group for the lifecycle hook. + AutoScalingGroupName *string `min:"1" type:"string"` + + // Defines the action the Auto Scaling group should take when the lifecycle + // hook timeout elapses or if an unexpected failure occurs. The valid values + // are CONTINUE and ABANDON. The default value is CONTINUE. + DefaultResult *string `type:"string"` + + // The maximum time, in seconds, that an instance can remain in a Pending:Wait + // or Terminating:Wait state. The default is 172800 seconds (48 hours). + GlobalTimeout *int64 `type:"integer"` + + // The maximum time, in seconds, that can elapse before the lifecycle hook times + // out. The default is 3600 seconds (1 hour). When the lifecycle hook times + // out, Auto Scaling performs the action defined in the DefaultResult parameter. + // You can prevent the lifecycle hook from timing out by calling RecordLifecycleActionHeartbeat. + HeartbeatTimeout *int64 `type:"integer"` + + // The name of the lifecycle hook. + LifecycleHookName *string `min:"1" type:"string"` + + // The state of the EC2 instance to which you want to attach the lifecycle hook. + // For a list of lifecycle hook types, see DescribeLifecycleHookTypes. + LifecycleTransition *string `type:"string"` + + // Additional information that you want to include any time Auto Scaling sends + // a message to the notification target. + NotificationMetadata *string `min:"1" type:"string"` + + // The ARN of the notification target that Auto Scaling uses to notify you when + // an instance is in the transition state for the lifecycle hook. This ARN target + // can be either an SQS queue or an SNS topic. The notification message sent + // to the target includes the following: + // + // Lifecycle action token User account ID Name of the Auto Scaling group Lifecycle + // hook name EC2 instance ID Lifecycle transition Notification metadata + NotificationTargetARN *string `min:"1" type:"string"` + + // The ARN of the IAM role that allows the Auto Scaling group to publish to + // the specified notification target. + RoleARN *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s LifecycleHook) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleHook) GoString() string { + return s.String() +} + +// Describes the state of a load balancer. +type LoadBalancerState struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `min:"1" type:"string"` + + // One of the following load balancer states: + // + // Adding - The instances in the group are being registered with the load + // balancer. + // + // Added - All instances in the group are registered with the load balancer. + // + // InService - At least one instance in the group passed an ELB health check. + // + // Removing - The instances are being deregistered from the load balancer. + // If connection draining is enabled, Elastic Load Balancing waits for in-flight + // requests to complete before deregistering the instances. + State *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s LoadBalancerState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadBalancerState) GoString() string { + return s.String() +} + +// Describes a metric. +type MetricCollectionType struct { + _ struct{} `type:"structure"` + + // One of the following metrics: + // + // GroupMinSize + // + // GroupMaxSize + // + // GroupDesiredCapacity + // + // GroupInServiceInstances + // + // GroupPendingInstances + // + // GroupStandbyInstances + // + // GroupTerminatingInstances + // + // GroupTotalInstances + Metric *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s MetricCollectionType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricCollectionType) GoString() string { + return s.String() +} + +// Describes a granularity of a metric. +type MetricGranularityType struct { + _ struct{} `type:"structure"` + + // The granularity. The only valid value is 1Minute. + Granularity *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s MetricGranularityType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricGranularityType) GoString() string { + return s.String() +} + +// Describes a notification. +type NotificationConfiguration struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // One of the following event notification types: + // + // autoscaling:EC2_INSTANCE_LAUNCH + // + // autoscaling:EC2_INSTANCE_LAUNCH_ERROR + // + // autoscaling:EC2_INSTANCE_TERMINATE + // + // autoscaling:EC2_INSTANCE_TERMINATE_ERROR + // + // autoscaling:TEST_NOTIFICATION + NotificationType *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service + // (SNS) topic. + TopicARN *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s NotificationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfiguration) GoString() string { + return s.String() +} + +// Describes a process type. +// +// For more information, see Auto Scaling Processes (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US_SuspendResume.html#process-types) +// in the Auto Scaling Developer Guide. +type ProcessType struct { + _ struct{} `type:"structure"` + + // One of the following processes: + // + // Launch + // + // Terminate + // + // AddToLoadBalancer + // + // AlarmNotification + // + // AZRebalance + // + // HealthCheck + // + // ReplaceUnhealthy + // + // ScheduledActions + ProcessName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ProcessType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProcessType) GoString() string { + return s.String() +} + +type PutLifecycleHookInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group to which you want to assign the lifecycle + // hook. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // Defines the action the Auto Scaling group should take when the lifecycle + // hook timeout elapses or if an unexpected failure occurs. The value for this + // parameter can be either CONTINUE or ABANDON. The default value for this parameter + // is ABANDON. + DefaultResult *string `type:"string"` + + // The amount of time, in seconds, that can elapse before the lifecycle hook + // times out. When the lifecycle hook times out, Auto Scaling performs the action + // defined in the DefaultResult parameter. You can prevent the lifecycle hook + // from timing out by calling RecordLifecycleActionHeartbeat. The default is + // 3600 seconds (1 hour). + HeartbeatTimeout *int64 `type:"integer"` + + // The name of the lifecycle hook. + LifecycleHookName *string `min:"1" type:"string" required:"true"` + + // The instance state to which you want to attach the lifecycle hook. For a + // list of lifecycle hook types, see DescribeLifecycleHookTypes. + // + // This parameter is required for new lifecycle hooks, but optional when updating + // existing hooks. + LifecycleTransition *string `type:"string"` + + // Contains additional information that you want to include any time Auto Scaling + // sends a message to the notification target. + NotificationMetadata *string `min:"1" type:"string"` + + // The ARN of the notification target that Auto Scaling will use to notify you + // when an instance is in the transition state for the lifecycle hook. This + // ARN target can be either an SQS queue or an SNS topic. + // + // This parameter is required for new lifecycle hooks, but optional when updating + // existing hooks. + // + // The notification message sent to the target will include: + // + // LifecycleActionToken. The Lifecycle action token. AccountId. The user account + // ID. AutoScalingGroupName. The name of the Auto Scaling group. LifecycleHookName. + // The lifecycle hook name. EC2InstanceId. The EC2 instance ID. LifecycleTransition. + // The lifecycle transition. NotificationMetadata. The notification metadata. + // This operation uses the JSON format when sending notifications to an Amazon + // SQS queue, and an email key/value pair format when sending notifications + // to an Amazon SNS topic. + // + // When you call this operation, a test message is sent to the notification + // target. This test message contains an additional key/value pair: Event:autoscaling:TEST_NOTIFICATION. + NotificationTargetARN *string `min:"1" type:"string"` + + // The ARN of the IAM role that allows the Auto Scaling group to publish to + // the specified notification target. + // + // This parameter is required for new lifecycle hooks, but optional when updating + // existing hooks. + RoleARN *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PutLifecycleHookInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLifecycleHookInput) GoString() string { + return s.String() +} + +type PutLifecycleHookOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutLifecycleHookOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLifecycleHookOutput) GoString() string { + return s.String() +} + +type PutNotificationConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The type of event that will cause the notification to be sent. For details + // about notification types supported by Auto Scaling, see DescribeAutoScalingNotificationTypes. + NotificationTypes []*string `type:"list" required:"true"` + + // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service + // (SNS) topic. + TopicARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutNotificationConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutNotificationConfigurationInput) GoString() string { + return s.String() +} + +type PutNotificationConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutNotificationConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutNotificationConfigurationOutput) GoString() string { + return s.String() +} + +type PutScalingPolicyInput struct { + _ struct{} `type:"structure"` + + // The adjustment type. Valid values are ChangeInCapacity, ExactCapacity, and + // PercentChangeInCapacity. + // + // For more information, see Dynamic Scaling (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html) + // in the Auto Scaling Developer Guide. + AdjustmentType *string `min:"1" type:"string" required:"true"` + + // The name or ARN of the group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The amount of time, in seconds, after a scaling activity completes and before + // the next scaling activity can start. If this parameter is not specified, + // the default cooldown period for the group applies. + // + // This parameter is not supported unless the policy type is SimpleScaling. + // + // For more information, see Understanding Auto Scaling Cooldowns (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/Cooldown.html) + // in the Auto Scaling Developer Guide. + Cooldown *int64 `type:"integer"` + + // The estimated time, in seconds, until a newly launched instance can contribute + // to the CloudWatch metrics. The default is to use the value specified for + // the default cooldown period for the group. + // + // This parameter is not supported if the policy type is SimpleScaling. + EstimatedInstanceWarmup *int64 `type:"integer"` + + // The aggregation type for the CloudWatch metrics. Valid values are Minimum, + // Maximum, and Average. If the aggregation type is null, the value is treated + // as Average. + // + // This parameter is not supported if the policy type is SimpleScaling. + MetricAggregationType *string `min:"1" type:"string"` + + // The minimum number of instances to scale. If the value of AdjustmentType + // is PercentChangeInCapacity, the scaling policy changes the DesiredCapacity + // of the Auto Scaling group by at least this many instances. Otherwise, the + // error is ValidationError. + MinAdjustmentMagnitude *int64 `type:"integer"` + + // Available for backward compatibility. Use MinAdjustmentMagnitude instead. + MinAdjustmentStep *int64 `type:"integer"` + + // The name of the policy. + PolicyName *string `min:"1" type:"string" required:"true"` + + // The policy type. Valid values are SimpleScaling and StepScaling. If the policy + // type is null, the value is treated as SimpleScaling. + PolicyType *string `min:"1" type:"string"` + + // The amount by which to scale, based on the specified adjustment type. A positive + // value adds to the current capacity while a negative number removes from the + // current capacity. + // + // This parameter is required if the policy type is SimpleScaling and not supported + // otherwise. + ScalingAdjustment *int64 `type:"integer"` + + // A set of adjustments that enable you to scale based on the size of the alarm + // breach. + // + // This parameter is required if the policy type is StepScaling and not supported + // otherwise. + StepAdjustments []*StepAdjustment `type:"list"` +} + +// String returns the string representation +func (s PutScalingPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutScalingPolicyInput) GoString() string { + return s.String() +} + +type PutScalingPolicyOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the policy. + PolicyARN *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PutScalingPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutScalingPolicyOutput) GoString() string { + return s.String() +} + +type PutScheduledUpdateGroupActionInput struct { + _ struct{} `type:"structure"` + + // The name or Amazon Resource Name (ARN) of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The number of EC2 instances that should be running in the group. + DesiredCapacity *int64 `type:"integer"` + + // The time for this action to end. + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The maximum size for the Auto Scaling group. + MaxSize *int64 `type:"integer"` + + // The minimum size for the Auto Scaling group. + MinSize *int64 `type:"integer"` + + // The time when recurring future actions will start. Start time is specified + // by the user following the Unix cron syntax format. For more information, + // see Cron (http://en.wikipedia.org/wiki/Cron) in Wikipedia. + // + // When StartTime and EndTime are specified with Recurrence, they form the + // boundaries of when the recurring action will start and stop. + Recurrence *string `min:"1" type:"string"` + + // The name of this scaling action. + ScheduledActionName *string `min:"1" type:"string" required:"true"` + + // The time for this action to start, in "YYYY-MM-DDThh:mm:ssZ" format in UTC/GMT + // only (for example, 2014-06-01T00:00:00Z). + // + // If you try to schedule your action in the past, Auto Scaling returns an + // error message. + // + // When StartTime and EndTime are specified with Recurrence, they form the + // boundaries of when the recurring action starts and stops. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // This parameter is deprecated; use StartTime instead. + // + // The time for this action to start. If both Time and StartTime are specified, + // their values must be identical. + Time *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s PutScheduledUpdateGroupActionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutScheduledUpdateGroupActionInput) GoString() string { + return s.String() +} + +type PutScheduledUpdateGroupActionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutScheduledUpdateGroupActionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutScheduledUpdateGroupActionOutput) GoString() string { + return s.String() +} + +type RecordLifecycleActionHeartbeatInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group for the hook. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // A token that uniquely identifies a specific lifecycle action associated with + // an instance. Auto Scaling sends this token to the notification target you + // specified when you created the lifecycle hook. + LifecycleActionToken *string `min:"36" type:"string" required:"true"` + + // The name of the lifecycle hook. + LifecycleHookName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RecordLifecycleActionHeartbeatInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecordLifecycleActionHeartbeatInput) GoString() string { + return s.String() +} + +type RecordLifecycleActionHeartbeatOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RecordLifecycleActionHeartbeatOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecordLifecycleActionHeartbeatOutput) GoString() string { + return s.String() +} + +type ResumeProcessesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ResumeProcessesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResumeProcessesOutput) GoString() string { + return s.String() +} + +// Describes a scaling policy. +type ScalingPolicy struct { + _ struct{} `type:"structure"` + + // The adjustment type, which specifies how ScalingAdjustment is interpreted. + // Valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity. + AdjustmentType *string `min:"1" type:"string"` + + // The CloudWatch alarms related to the policy. + Alarms []*Alarm `type:"list"` + + // The name of the Auto Scaling group associated with this scaling policy. + AutoScalingGroupName *string `min:"1" type:"string"` + + // The amount of time, in seconds, after a scaling activity completes before + // any further trigger-related scaling activities can start. + Cooldown *int64 `type:"integer"` + + // The estimated time, in seconds, until a newly launched instance can contribute + // to the CloudWatch metrics. + EstimatedInstanceWarmup *int64 `type:"integer"` + + // The aggregation type for the CloudWatch metrics. Valid values are Minimum, + // Maximum, and Average. + MetricAggregationType *string `min:"1" type:"string"` + + // The minimum number of instances to scale. If the value of AdjustmentType + // is PercentChangeInCapacity, the scaling policy changes the DesiredCapacity + // of the Auto Scaling group by at least this many instances. Otherwise, the + // error is ValidationError. + MinAdjustmentMagnitude *int64 `type:"integer"` + + // Available for backward compatibility. Use MinAdjustmentMagnitude instead. + MinAdjustmentStep *int64 `type:"integer"` + + // The Amazon Resource Name (ARN) of the policy. + PolicyARN *string `min:"1" type:"string"` + + // The name of the scaling policy. + PolicyName *string `min:"1" type:"string"` + + // The policy type. Valid values are SimpleScaling and StepScaling. + PolicyType *string `min:"1" type:"string"` + + // The amount by which to scale, based on the specified adjustment type. A positive + // value adds to the current capacity while a negative number removes from the + // current capacity. + ScalingAdjustment *int64 `type:"integer"` + + // A set of adjustments that enable you to scale based on the size of the alarm + // breach. + StepAdjustments []*StepAdjustment `type:"list"` +} + +// String returns the string representation +func (s ScalingPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScalingPolicy) GoString() string { + return s.String() +} + +type ScalingProcessQuery struct { + _ struct{} `type:"structure"` + + // The name or Amazon Resource Name (ARN) of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more of the following processes: + // + // Launch + // + // Terminate + // + // HealthCheck + // + // ReplaceUnhealthy + // + // AZRebalance + // + // AlarmNotification + // + // ScheduledActions + // + // AddToLoadBalancer + ScalingProcesses []*string `type:"list"` +} + +// String returns the string representation +func (s ScalingProcessQuery) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScalingProcessQuery) GoString() string { + return s.String() +} + +// Describes a scheduled update to an Auto Scaling group. +type ScheduledUpdateGroupAction struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // The number of instances you prefer to maintain in the group. + DesiredCapacity *int64 `type:"integer"` + + // The date and time that the action is scheduled to end. This date and time + // can be up to one month in the future. + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The maximum size of the group. + MaxSize *int64 `type:"integer"` + + // The minimum size of the group. + MinSize *int64 `type:"integer"` + + // The recurring schedule for the action. + Recurrence *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the scheduled action. + ScheduledActionARN *string `min:"1" type:"string"` + + // The name of the scheduled action. + ScheduledActionName *string `min:"1" type:"string"` + + // The date and time that the action is scheduled to begin. This date and time + // can be up to one month in the future. + // + // When StartTime and EndTime are specified with Recurrence, they form the + // boundaries of when the recurring action will start and stop. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // This parameter is deprecated; use StartTime instead. + Time *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ScheduledUpdateGroupAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledUpdateGroupAction) GoString() string { + return s.String() +} + +type SetDesiredCapacityInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The number of EC2 instances that should be running in the Auto Scaling group. + DesiredCapacity *int64 `type:"integer" required:"true"` + + // By default, SetDesiredCapacity overrides any cooldown period associated with + // the Auto Scaling group. Specify True to make Auto Scaling to wait for the + // cool-down period associated with the Auto Scaling group to complete before + // initiating a scaling activity to set your Auto Scaling group to its new capacity. + HonorCooldown *bool `type:"boolean"` +} + +// String returns the string representation +func (s SetDesiredCapacityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetDesiredCapacityInput) GoString() string { + return s.String() +} + +type SetDesiredCapacityOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetDesiredCapacityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetDesiredCapacityOutput) GoString() string { + return s.String() +} + +type SetInstanceHealthInput struct { + _ struct{} `type:"structure"` + + // The health status of the instance. Set to Healthy if you want the instance + // to remain in service. Set to Unhealthy if you want the instance to be out + // of service. Auto Scaling will terminate and replace the unhealthy instance. + HealthStatus *string `min:"1" type:"string" required:"true"` + + // The ID of the EC2 instance. + InstanceId *string `min:"1" type:"string" required:"true"` + + // If the Auto Scaling group of the specified instance has a HealthCheckGracePeriod + // specified for the group, by default, this call will respect the grace period. + // Set this to False, if you do not want the call to respect the grace period + // associated with the group. + // + // For more information, see the HealthCheckGracePeriod parameter description + // for CreateAutoScalingGroup. + ShouldRespectGracePeriod *bool `type:"boolean"` +} + +// String returns the string representation +func (s SetInstanceHealthInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetInstanceHealthInput) GoString() string { + return s.String() +} + +type SetInstanceHealthOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetInstanceHealthOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetInstanceHealthOutput) GoString() string { + return s.String() +} + +type SetInstanceProtectionInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more instance IDs. + InstanceIds []*string `type:"list" required:"true"` + + // Indicates whether the instance is protected from termination by Auto Scaling + // when scaling in. + ProtectedFromScaleIn *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s SetInstanceProtectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetInstanceProtectionInput) GoString() string { + return s.String() +} + +type SetInstanceProtectionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetInstanceProtectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetInstanceProtectionOutput) GoString() string { + return s.String() +} + +// Describes an adjustment based on the difference between the value of the +// aggregated CloudWatch metric and the breach threshold that you've defined +// for the alarm. +// +// For the following examples, suppose that you have an alarm with a breach +// threshold of 50: +// +// If you want the adjustment to be triggered when the metric is greater +// than or equal to 50 and less than 60, specify a lower bound of 0 and an upper +// bound of 10. +// +// If you want the adjustment to be triggered when the metric is greater +// than 40 and less than or equal to 50, specify a lower bound of -10 and an +// upper bound of 0. +// +// There are a few rules for the step adjustments for your step policy: +// +// The ranges of your step adjustments can't overlap or have a gap. +// +// At most one step adjustment can have a null lower bound. If one step adjustment +// has a negative lower bound, then there must be a step adjustment with a null +// lower bound. +// +// At most one step adjustment can have a null upper bound. If one step adjustment +// has a positive upper bound, then there must be a step adjustment with a null +// upper bound. +// +// The upper and lower bound can't be null in the same step adjustment. +type StepAdjustment struct { + _ struct{} `type:"structure"` + + // The lower bound for the difference between the alarm threshold and the CloudWatch + // metric. If the metric value is above the breach threshold, the lower bound + // is inclusive (the metric must be greater than or equal to the threshold plus + // the lower bound). Otherwise, it is exclusive (the metric must be greater + // than the threshold plus the lower bound). A null value indicates negative + // infinity. + MetricIntervalLowerBound *float64 `type:"double"` + + // The upper bound for the difference between the alarm threshold and the CloudWatch + // metric. If the metric value is above the breach threshold, the upper bound + // is exclusive (the metric must be less than the threshold plus the upper bound). + // Otherwise, it is inclusive (the metric must be less than or equal to the + // threshold plus the upper bound). A null value indicates positive infinity. + // + // The upper bound must be greater than the lower bound. + MetricIntervalUpperBound *float64 `type:"double"` + + // The amount by which to scale, based on the specified adjustment type. A positive + // value adds to the current capacity while a negative number removes from the + // current capacity. + ScalingAdjustment *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s StepAdjustment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StepAdjustment) GoString() string { + return s.String() +} + +type SuspendProcessesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SuspendProcessesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SuspendProcessesOutput) GoString() string { + return s.String() +} + +// Describes an Auto Scaling process that has been suspended. For more information, +// see ProcessType. +type SuspendedProcess struct { + _ struct{} `type:"structure"` + + // The name of the suspended process. + ProcessName *string `min:"1" type:"string"` + + // The reason that the process was suspended. + SuspensionReason *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SuspendedProcess) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SuspendedProcess) GoString() string { + return s.String() +} + +// Describes a tag for an Auto Scaling group. +type Tag struct { + _ struct{} `type:"structure"` + + // The tag key. + Key *string `min:"1" type:"string" required:"true"` + + // Determines whether the tag is added to new instances as they are launched + // in the group. + PropagateAtLaunch *bool `type:"boolean"` + + // The name of the group. + ResourceId *string `type:"string"` + + // The type of resource. The only supported value is auto-scaling-group. + ResourceType *string `type:"string"` + + // The tag value. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Describes a tag for an Auto Scaling group. +type TagDescription struct { + _ struct{} `type:"structure"` + + // The tag key. + Key *string `min:"1" type:"string"` + + // Determines whether the tag is added to new instances as they are launched + // in the group. + PropagateAtLaunch *bool `type:"boolean"` + + // The name of the group. + ResourceId *string `type:"string"` + + // The type of resource. The only supported value is auto-scaling-group. + ResourceType *string `type:"string"` + + // The tag value. + Value *string `type:"string"` +} + +// String returns the string representation +func (s TagDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagDescription) GoString() string { + return s.String() +} + +type TerminateInstanceInAutoScalingGroupInput struct { + _ struct{} `type:"structure"` + + // The ID of the EC2 instance. + InstanceId *string `min:"1" type:"string" required:"true"` + + // If true, terminating the instance also decrements the size of the Auto Scaling + // group. + ShouldDecrementDesiredCapacity *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s TerminateInstanceInAutoScalingGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateInstanceInAutoScalingGroupInput) GoString() string { + return s.String() +} + +type TerminateInstanceInAutoScalingGroupOutput struct { + _ struct{} `type:"structure"` + + // A scaling activity. + Activity *Activity `type:"structure"` +} + +// String returns the string representation +func (s TerminateInstanceInAutoScalingGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateInstanceInAutoScalingGroupOutput) GoString() string { + return s.String() +} + +type UpdateAutoScalingGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more Availability Zones for the group. + AvailabilityZones []*string `min:"1" type:"list"` + + // The amount of time, in seconds, after a scaling activity completes before + // another scaling activity can start. The default is 300. + // + // For more information, see Understanding Auto Scaling Cooldowns (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/Cooldown.html) + // in the Auto Scaling Developer Guide. + DefaultCooldown *int64 `type:"integer"` + + // The number of EC2 instances that should be running in the Auto Scaling group. + // This number must be greater than or equal to the minimum size of the group + // and less than or equal to the maximum size of the group. + DesiredCapacity *int64 `type:"integer"` + + // The amount of time, in seconds, that Auto Scaling waits before checking the + // health status of an EC2 instance that has come into service. The default + // is 300. + // + // For more information, see Health Checks For Auto Scaling Instances (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/healthcheck.html) + // in the Auto Scaling Developer Guide. + HealthCheckGracePeriod *int64 `type:"integer"` + + // The service to use for the health checks. The valid values are EC2 and ELB. + HealthCheckType *string `min:"1" type:"string"` + + // The name of the launch configuration. + LaunchConfigurationName *string `min:"1" type:"string"` + + // The maximum size of the Auto Scaling group. + MaxSize *int64 `type:"integer"` + + // The minimum size of the Auto Scaling group. + MinSize *int64 `type:"integer"` + + // Indicates whether newly launched instances are protected from termination + // by Auto Scaling when scaling in. + NewInstancesProtectedFromScaleIn *bool `type:"boolean"` + + // The name of the placement group into which you'll launch your instances, + // if any. For more information, see Placement Groups (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) + // in the Amazon Elastic Compute Cloud User Guide. + PlacementGroup *string `min:"1" type:"string"` + + // A standalone termination policy or a list of termination policies used to + // select the instance to terminate. The policies are executed in the order + // that they are listed. + // + // For more information, see Choosing a Termination Policy for Your Auto Scaling + // Group (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/us-termination-policy.html) + // in the Auto Scaling Developer Guide. + TerminationPolicies []*string `type:"list"` + + // The ID of the subnet, if you are launching into a VPC. You can specify several + // subnets in a comma-separated list. + // + // When you specify VPCZoneIdentifier with AvailabilityZones, ensure that the + // subnets' Availability Zones match the values you specify for AvailabilityZones. + // + // For more information, see Auto Scaling and Amazon Virtual Private Cloud + // (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/autoscalingsubnets.html) + // in the Auto Scaling Developer Guide. + VPCZoneIdentifier *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateAutoScalingGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAutoScalingGroupInput) GoString() string { + return s.String() +} + +type UpdateAutoScalingGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateAutoScalingGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAutoScalingGroupOutput) GoString() string { + return s.String() +} + +const ( + // @enum LifecycleState + LifecycleStatePending = "Pending" + // @enum LifecycleState + LifecycleStatePendingWait = "Pending:Wait" + // @enum LifecycleState + LifecycleStatePendingProceed = "Pending:Proceed" + // @enum LifecycleState + LifecycleStateQuarantined = "Quarantined" + // @enum LifecycleState + LifecycleStateInService = "InService" + // @enum LifecycleState + LifecycleStateTerminating = "Terminating" + // @enum LifecycleState + LifecycleStateTerminatingWait = "Terminating:Wait" + // @enum LifecycleState + LifecycleStateTerminatingProceed = "Terminating:Proceed" + // @enum LifecycleState + LifecycleStateTerminated = "Terminated" + // @enum LifecycleState + LifecycleStateDetaching = "Detaching" + // @enum LifecycleState + LifecycleStateDetached = "Detached" + // @enum LifecycleState + LifecycleStateEnteringStandby = "EnteringStandby" + // @enum LifecycleState + LifecycleStateStandby = "Standby" +) + +const ( + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodePendingSpotBidPlacement = "PendingSpotBidPlacement" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeWaitingForSpotInstanceRequestId = "WaitingForSpotInstanceRequestId" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeWaitingForSpotInstanceId = "WaitingForSpotInstanceId" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeWaitingForInstanceId = "WaitingForInstanceId" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodePreInService = "PreInService" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeInProgress = "InProgress" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeWaitingForElbconnectionDraining = "WaitingForELBConnectionDraining" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeMidLifecycleAction = "MidLifecycleAction" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeWaitingForInstanceWarmup = "WaitingForInstanceWarmup" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeSuccessful = "Successful" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeFailed = "Failed" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeCancelled = "Cancelled" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,226 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package autoscalingiface provides an interface for the Auto Scaling. +package autoscalingiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/autoscaling" +) + +// AutoScalingAPI is the interface type for autoscaling.AutoScaling. +type AutoScalingAPI interface { + AttachInstancesRequest(*autoscaling.AttachInstancesInput) (*request.Request, *autoscaling.AttachInstancesOutput) + + AttachInstances(*autoscaling.AttachInstancesInput) (*autoscaling.AttachInstancesOutput, error) + + AttachLoadBalancersRequest(*autoscaling.AttachLoadBalancersInput) (*request.Request, *autoscaling.AttachLoadBalancersOutput) + + AttachLoadBalancers(*autoscaling.AttachLoadBalancersInput) (*autoscaling.AttachLoadBalancersOutput, error) + + CompleteLifecycleActionRequest(*autoscaling.CompleteLifecycleActionInput) (*request.Request, *autoscaling.CompleteLifecycleActionOutput) + + CompleteLifecycleAction(*autoscaling.CompleteLifecycleActionInput) (*autoscaling.CompleteLifecycleActionOutput, error) + + CreateAutoScalingGroupRequest(*autoscaling.CreateAutoScalingGroupInput) (*request.Request, *autoscaling.CreateAutoScalingGroupOutput) + + CreateAutoScalingGroup(*autoscaling.CreateAutoScalingGroupInput) (*autoscaling.CreateAutoScalingGroupOutput, error) + + CreateLaunchConfigurationRequest(*autoscaling.CreateLaunchConfigurationInput) (*request.Request, *autoscaling.CreateLaunchConfigurationOutput) + + CreateLaunchConfiguration(*autoscaling.CreateLaunchConfigurationInput) (*autoscaling.CreateLaunchConfigurationOutput, error) + + CreateOrUpdateTagsRequest(*autoscaling.CreateOrUpdateTagsInput) (*request.Request, *autoscaling.CreateOrUpdateTagsOutput) + + CreateOrUpdateTags(*autoscaling.CreateOrUpdateTagsInput) (*autoscaling.CreateOrUpdateTagsOutput, error) + + DeleteAutoScalingGroupRequest(*autoscaling.DeleteAutoScalingGroupInput) (*request.Request, *autoscaling.DeleteAutoScalingGroupOutput) + + DeleteAutoScalingGroup(*autoscaling.DeleteAutoScalingGroupInput) (*autoscaling.DeleteAutoScalingGroupOutput, error) + + DeleteLaunchConfigurationRequest(*autoscaling.DeleteLaunchConfigurationInput) (*request.Request, *autoscaling.DeleteLaunchConfigurationOutput) + + DeleteLaunchConfiguration(*autoscaling.DeleteLaunchConfigurationInput) (*autoscaling.DeleteLaunchConfigurationOutput, error) + + DeleteLifecycleHookRequest(*autoscaling.DeleteLifecycleHookInput) (*request.Request, *autoscaling.DeleteLifecycleHookOutput) + + DeleteLifecycleHook(*autoscaling.DeleteLifecycleHookInput) (*autoscaling.DeleteLifecycleHookOutput, error) + + DeleteNotificationConfigurationRequest(*autoscaling.DeleteNotificationConfigurationInput) (*request.Request, *autoscaling.DeleteNotificationConfigurationOutput) + + DeleteNotificationConfiguration(*autoscaling.DeleteNotificationConfigurationInput) (*autoscaling.DeleteNotificationConfigurationOutput, error) + + DeletePolicyRequest(*autoscaling.DeletePolicyInput) (*request.Request, *autoscaling.DeletePolicyOutput) + + DeletePolicy(*autoscaling.DeletePolicyInput) (*autoscaling.DeletePolicyOutput, error) + + DeleteScheduledActionRequest(*autoscaling.DeleteScheduledActionInput) (*request.Request, *autoscaling.DeleteScheduledActionOutput) + + DeleteScheduledAction(*autoscaling.DeleteScheduledActionInput) (*autoscaling.DeleteScheduledActionOutput, error) + + DeleteTagsRequest(*autoscaling.DeleteTagsInput) (*request.Request, *autoscaling.DeleteTagsOutput) + + DeleteTags(*autoscaling.DeleteTagsInput) (*autoscaling.DeleteTagsOutput, error) + + DescribeAccountLimitsRequest(*autoscaling.DescribeAccountLimitsInput) (*request.Request, *autoscaling.DescribeAccountLimitsOutput) + + DescribeAccountLimits(*autoscaling.DescribeAccountLimitsInput) (*autoscaling.DescribeAccountLimitsOutput, error) + + DescribeAdjustmentTypesRequest(*autoscaling.DescribeAdjustmentTypesInput) (*request.Request, *autoscaling.DescribeAdjustmentTypesOutput) + + DescribeAdjustmentTypes(*autoscaling.DescribeAdjustmentTypesInput) (*autoscaling.DescribeAdjustmentTypesOutput, error) + + DescribeAutoScalingGroupsRequest(*autoscaling.DescribeAutoScalingGroupsInput) (*request.Request, *autoscaling.DescribeAutoScalingGroupsOutput) + + DescribeAutoScalingGroups(*autoscaling.DescribeAutoScalingGroupsInput) (*autoscaling.DescribeAutoScalingGroupsOutput, error) + + DescribeAutoScalingGroupsPages(*autoscaling.DescribeAutoScalingGroupsInput, func(*autoscaling.DescribeAutoScalingGroupsOutput, bool) bool) error + + DescribeAutoScalingInstancesRequest(*autoscaling.DescribeAutoScalingInstancesInput) (*request.Request, *autoscaling.DescribeAutoScalingInstancesOutput) + + DescribeAutoScalingInstances(*autoscaling.DescribeAutoScalingInstancesInput) (*autoscaling.DescribeAutoScalingInstancesOutput, error) + + DescribeAutoScalingInstancesPages(*autoscaling.DescribeAutoScalingInstancesInput, func(*autoscaling.DescribeAutoScalingInstancesOutput, bool) bool) error + + DescribeAutoScalingNotificationTypesRequest(*autoscaling.DescribeAutoScalingNotificationTypesInput) (*request.Request, *autoscaling.DescribeAutoScalingNotificationTypesOutput) + + DescribeAutoScalingNotificationTypes(*autoscaling.DescribeAutoScalingNotificationTypesInput) (*autoscaling.DescribeAutoScalingNotificationTypesOutput, error) + + DescribeLaunchConfigurationsRequest(*autoscaling.DescribeLaunchConfigurationsInput) (*request.Request, *autoscaling.DescribeLaunchConfigurationsOutput) + + DescribeLaunchConfigurations(*autoscaling.DescribeLaunchConfigurationsInput) (*autoscaling.DescribeLaunchConfigurationsOutput, error) + + DescribeLaunchConfigurationsPages(*autoscaling.DescribeLaunchConfigurationsInput, func(*autoscaling.DescribeLaunchConfigurationsOutput, bool) bool) error + + DescribeLifecycleHookTypesRequest(*autoscaling.DescribeLifecycleHookTypesInput) (*request.Request, *autoscaling.DescribeLifecycleHookTypesOutput) + + DescribeLifecycleHookTypes(*autoscaling.DescribeLifecycleHookTypesInput) (*autoscaling.DescribeLifecycleHookTypesOutput, error) + + DescribeLifecycleHooksRequest(*autoscaling.DescribeLifecycleHooksInput) (*request.Request, *autoscaling.DescribeLifecycleHooksOutput) + + DescribeLifecycleHooks(*autoscaling.DescribeLifecycleHooksInput) (*autoscaling.DescribeLifecycleHooksOutput, error) + + DescribeLoadBalancersRequest(*autoscaling.DescribeLoadBalancersInput) (*request.Request, *autoscaling.DescribeLoadBalancersOutput) + + DescribeLoadBalancers(*autoscaling.DescribeLoadBalancersInput) (*autoscaling.DescribeLoadBalancersOutput, error) + + DescribeMetricCollectionTypesRequest(*autoscaling.DescribeMetricCollectionTypesInput) (*request.Request, *autoscaling.DescribeMetricCollectionTypesOutput) + + DescribeMetricCollectionTypes(*autoscaling.DescribeMetricCollectionTypesInput) (*autoscaling.DescribeMetricCollectionTypesOutput, error) + + DescribeNotificationConfigurationsRequest(*autoscaling.DescribeNotificationConfigurationsInput) (*request.Request, *autoscaling.DescribeNotificationConfigurationsOutput) + + DescribeNotificationConfigurations(*autoscaling.DescribeNotificationConfigurationsInput) (*autoscaling.DescribeNotificationConfigurationsOutput, error) + + DescribeNotificationConfigurationsPages(*autoscaling.DescribeNotificationConfigurationsInput, func(*autoscaling.DescribeNotificationConfigurationsOutput, bool) bool) error + + DescribePoliciesRequest(*autoscaling.DescribePoliciesInput) (*request.Request, *autoscaling.DescribePoliciesOutput) + + DescribePolicies(*autoscaling.DescribePoliciesInput) (*autoscaling.DescribePoliciesOutput, error) + + DescribePoliciesPages(*autoscaling.DescribePoliciesInput, func(*autoscaling.DescribePoliciesOutput, bool) bool) error + + DescribeScalingActivitiesRequest(*autoscaling.DescribeScalingActivitiesInput) (*request.Request, *autoscaling.DescribeScalingActivitiesOutput) + + DescribeScalingActivities(*autoscaling.DescribeScalingActivitiesInput) (*autoscaling.DescribeScalingActivitiesOutput, error) + + DescribeScalingActivitiesPages(*autoscaling.DescribeScalingActivitiesInput, func(*autoscaling.DescribeScalingActivitiesOutput, bool) bool) error + + DescribeScalingProcessTypesRequest(*autoscaling.DescribeScalingProcessTypesInput) (*request.Request, *autoscaling.DescribeScalingProcessTypesOutput) + + DescribeScalingProcessTypes(*autoscaling.DescribeScalingProcessTypesInput) (*autoscaling.DescribeScalingProcessTypesOutput, error) + + DescribeScheduledActionsRequest(*autoscaling.DescribeScheduledActionsInput) (*request.Request, *autoscaling.DescribeScheduledActionsOutput) + + DescribeScheduledActions(*autoscaling.DescribeScheduledActionsInput) (*autoscaling.DescribeScheduledActionsOutput, error) + + DescribeScheduledActionsPages(*autoscaling.DescribeScheduledActionsInput, func(*autoscaling.DescribeScheduledActionsOutput, bool) bool) error + + DescribeTagsRequest(*autoscaling.DescribeTagsInput) (*request.Request, *autoscaling.DescribeTagsOutput) + + DescribeTags(*autoscaling.DescribeTagsInput) (*autoscaling.DescribeTagsOutput, error) + + DescribeTagsPages(*autoscaling.DescribeTagsInput, func(*autoscaling.DescribeTagsOutput, bool) bool) error + + DescribeTerminationPolicyTypesRequest(*autoscaling.DescribeTerminationPolicyTypesInput) (*request.Request, *autoscaling.DescribeTerminationPolicyTypesOutput) + + DescribeTerminationPolicyTypes(*autoscaling.DescribeTerminationPolicyTypesInput) (*autoscaling.DescribeTerminationPolicyTypesOutput, error) + + DetachInstancesRequest(*autoscaling.DetachInstancesInput) (*request.Request, *autoscaling.DetachInstancesOutput) + + DetachInstances(*autoscaling.DetachInstancesInput) (*autoscaling.DetachInstancesOutput, error) + + DetachLoadBalancersRequest(*autoscaling.DetachLoadBalancersInput) (*request.Request, *autoscaling.DetachLoadBalancersOutput) + + DetachLoadBalancers(*autoscaling.DetachLoadBalancersInput) (*autoscaling.DetachLoadBalancersOutput, error) + + DisableMetricsCollectionRequest(*autoscaling.DisableMetricsCollectionInput) (*request.Request, *autoscaling.DisableMetricsCollectionOutput) + + DisableMetricsCollection(*autoscaling.DisableMetricsCollectionInput) (*autoscaling.DisableMetricsCollectionOutput, error) + + EnableMetricsCollectionRequest(*autoscaling.EnableMetricsCollectionInput) (*request.Request, *autoscaling.EnableMetricsCollectionOutput) + + EnableMetricsCollection(*autoscaling.EnableMetricsCollectionInput) (*autoscaling.EnableMetricsCollectionOutput, error) + + EnterStandbyRequest(*autoscaling.EnterStandbyInput) (*request.Request, *autoscaling.EnterStandbyOutput) + + EnterStandby(*autoscaling.EnterStandbyInput) (*autoscaling.EnterStandbyOutput, error) + + ExecutePolicyRequest(*autoscaling.ExecutePolicyInput) (*request.Request, *autoscaling.ExecutePolicyOutput) + + ExecutePolicy(*autoscaling.ExecutePolicyInput) (*autoscaling.ExecutePolicyOutput, error) + + ExitStandbyRequest(*autoscaling.ExitStandbyInput) (*request.Request, *autoscaling.ExitStandbyOutput) + + ExitStandby(*autoscaling.ExitStandbyInput) (*autoscaling.ExitStandbyOutput, error) + + PutLifecycleHookRequest(*autoscaling.PutLifecycleHookInput) (*request.Request, *autoscaling.PutLifecycleHookOutput) + + PutLifecycleHook(*autoscaling.PutLifecycleHookInput) (*autoscaling.PutLifecycleHookOutput, error) + + PutNotificationConfigurationRequest(*autoscaling.PutNotificationConfigurationInput) (*request.Request, *autoscaling.PutNotificationConfigurationOutput) + + PutNotificationConfiguration(*autoscaling.PutNotificationConfigurationInput) (*autoscaling.PutNotificationConfigurationOutput, error) + + PutScalingPolicyRequest(*autoscaling.PutScalingPolicyInput) (*request.Request, *autoscaling.PutScalingPolicyOutput) + + PutScalingPolicy(*autoscaling.PutScalingPolicyInput) (*autoscaling.PutScalingPolicyOutput, error) + + PutScheduledUpdateGroupActionRequest(*autoscaling.PutScheduledUpdateGroupActionInput) (*request.Request, *autoscaling.PutScheduledUpdateGroupActionOutput) + + PutScheduledUpdateGroupAction(*autoscaling.PutScheduledUpdateGroupActionInput) (*autoscaling.PutScheduledUpdateGroupActionOutput, error) + + RecordLifecycleActionHeartbeatRequest(*autoscaling.RecordLifecycleActionHeartbeatInput) (*request.Request, *autoscaling.RecordLifecycleActionHeartbeatOutput) + + RecordLifecycleActionHeartbeat(*autoscaling.RecordLifecycleActionHeartbeatInput) (*autoscaling.RecordLifecycleActionHeartbeatOutput, error) + + ResumeProcessesRequest(*autoscaling.ScalingProcessQuery) (*request.Request, *autoscaling.ResumeProcessesOutput) + + ResumeProcesses(*autoscaling.ScalingProcessQuery) (*autoscaling.ResumeProcessesOutput, error) + + SetDesiredCapacityRequest(*autoscaling.SetDesiredCapacityInput) (*request.Request, *autoscaling.SetDesiredCapacityOutput) + + SetDesiredCapacity(*autoscaling.SetDesiredCapacityInput) (*autoscaling.SetDesiredCapacityOutput, error) + + SetInstanceHealthRequest(*autoscaling.SetInstanceHealthInput) (*request.Request, *autoscaling.SetInstanceHealthOutput) + + SetInstanceHealth(*autoscaling.SetInstanceHealthInput) (*autoscaling.SetInstanceHealthOutput, error) + + SetInstanceProtectionRequest(*autoscaling.SetInstanceProtectionInput) (*request.Request, *autoscaling.SetInstanceProtectionOutput) + + SetInstanceProtection(*autoscaling.SetInstanceProtectionInput) (*autoscaling.SetInstanceProtectionOutput, error) + + SuspendProcessesRequest(*autoscaling.ScalingProcessQuery) (*request.Request, *autoscaling.SuspendProcessesOutput) + + SuspendProcesses(*autoscaling.ScalingProcessQuery) (*autoscaling.SuspendProcessesOutput, error) + + TerminateInstanceInAutoScalingGroupRequest(*autoscaling.TerminateInstanceInAutoScalingGroupInput) (*request.Request, *autoscaling.TerminateInstanceInAutoScalingGroupOutput) + + TerminateInstanceInAutoScalingGroup(*autoscaling.TerminateInstanceInAutoScalingGroupInput) (*autoscaling.TerminateInstanceInAutoScalingGroupOutput, error) + + UpdateAutoScalingGroupRequest(*autoscaling.UpdateAutoScalingGroupInput) (*request.Request, *autoscaling.UpdateAutoScalingGroupOutput) + + UpdateAutoScalingGroup(*autoscaling.UpdateAutoScalingGroupInput) (*autoscaling.UpdateAutoScalingGroupOutput, error) +} + +var _ AutoScalingAPI = (*autoscaling.AutoScaling)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/autoscaling/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/autoscaling/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/autoscaling/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/autoscaling/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1207 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package autoscaling_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/autoscaling" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleAutoScaling_AttachInstances() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.AttachInstancesInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + InstanceIds: []*string{ + aws.String("XmlStringMaxLen19"), // Required + // More values... + }, + } + resp, err := svc.AttachInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_AttachLoadBalancers() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.AttachLoadBalancersInput{ + AutoScalingGroupName: aws.String("ResourceName"), + LoadBalancerNames: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + } + resp, err := svc.AttachLoadBalancers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_CompleteLifecycleAction() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.CompleteLifecycleActionInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + LifecycleActionResult: aws.String("LifecycleActionResult"), // Required + LifecycleActionToken: aws.String("LifecycleActionToken"), // Required + LifecycleHookName: aws.String("AsciiStringMaxLen255"), // Required + } + resp, err := svc.CompleteLifecycleAction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_CreateAutoScalingGroup() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.CreateAutoScalingGroupInput{ + AutoScalingGroupName: aws.String("XmlStringMaxLen255"), // Required + MaxSize: aws.Int64(1), // Required + MinSize: aws.Int64(1), // Required + AvailabilityZones: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + DefaultCooldown: aws.Int64(1), + DesiredCapacity: aws.Int64(1), + HealthCheckGracePeriod: aws.Int64(1), + HealthCheckType: aws.String("XmlStringMaxLen32"), + InstanceId: aws.String("XmlStringMaxLen19"), + LaunchConfigurationName: aws.String("ResourceName"), + LoadBalancerNames: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + NewInstancesProtectedFromScaleIn: aws.Bool(true), + PlacementGroup: aws.String("XmlStringMaxLen255"), + Tags: []*autoscaling.Tag{ + { // Required + Key: aws.String("TagKey"), // Required + PropagateAtLaunch: aws.Bool(true), + ResourceId: aws.String("XmlString"), + ResourceType: aws.String("XmlString"), + Value: aws.String("TagValue"), + }, + // More values... + }, + TerminationPolicies: []*string{ + aws.String("XmlStringMaxLen1600"), // Required + // More values... + }, + VPCZoneIdentifier: aws.String("XmlStringMaxLen255"), + } + resp, err := svc.CreateAutoScalingGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_CreateLaunchConfiguration() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.CreateLaunchConfigurationInput{ + LaunchConfigurationName: aws.String("XmlStringMaxLen255"), // Required + AssociatePublicIpAddress: aws.Bool(true), + BlockDeviceMappings: []*autoscaling.BlockDeviceMapping{ + { // Required + DeviceName: aws.String("XmlStringMaxLen255"), // Required + Ebs: &autoscaling.Ebs{ + DeleteOnTermination: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("XmlStringMaxLen255"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("BlockDeviceEbsVolumeType"), + }, + NoDevice: aws.Bool(true), + VirtualName: aws.String("XmlStringMaxLen255"), + }, + // More values... + }, + ClassicLinkVPCId: aws.String("XmlStringMaxLen255"), + ClassicLinkVPCSecurityGroups: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + EbsOptimized: aws.Bool(true), + IamInstanceProfile: aws.String("XmlStringMaxLen1600"), + ImageId: aws.String("XmlStringMaxLen255"), + InstanceId: aws.String("XmlStringMaxLen19"), + InstanceMonitoring: &autoscaling.InstanceMonitoring{ + Enabled: aws.Bool(true), + }, + InstanceType: aws.String("XmlStringMaxLen255"), + KernelId: aws.String("XmlStringMaxLen255"), + KeyName: aws.String("XmlStringMaxLen255"), + PlacementTenancy: aws.String("XmlStringMaxLen64"), + RamdiskId: aws.String("XmlStringMaxLen255"), + SecurityGroups: []*string{ + aws.String("XmlString"), // Required + // More values... + }, + SpotPrice: aws.String("SpotPrice"), + UserData: aws.String("XmlStringUserData"), + } + resp, err := svc.CreateLaunchConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_CreateOrUpdateTags() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.CreateOrUpdateTagsInput{ + Tags: []*autoscaling.Tag{ // Required + { // Required + Key: aws.String("TagKey"), // Required + PropagateAtLaunch: aws.Bool(true), + ResourceId: aws.String("XmlString"), + ResourceType: aws.String("XmlString"), + Value: aws.String("TagValue"), + }, + // More values... + }, + } + resp, err := svc.CreateOrUpdateTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DeleteAutoScalingGroup() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DeleteAutoScalingGroupInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + ForceDelete: aws.Bool(true), + } + resp, err := svc.DeleteAutoScalingGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DeleteLaunchConfiguration() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DeleteLaunchConfigurationInput{ + LaunchConfigurationName: aws.String("ResourceName"), // Required + } + resp, err := svc.DeleteLaunchConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DeleteLifecycleHook() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DeleteLifecycleHookInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + LifecycleHookName: aws.String("AsciiStringMaxLen255"), // Required + } + resp, err := svc.DeleteLifecycleHook(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DeleteNotificationConfiguration() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DeleteNotificationConfigurationInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + TopicARN: aws.String("ResourceName"), // Required + } + resp, err := svc.DeleteNotificationConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DeletePolicy() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DeletePolicyInput{ + PolicyName: aws.String("ResourceName"), // Required + AutoScalingGroupName: aws.String("ResourceName"), + } + resp, err := svc.DeletePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DeleteScheduledAction() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DeleteScheduledActionInput{ + ScheduledActionName: aws.String("ResourceName"), // Required + AutoScalingGroupName: aws.String("ResourceName"), + } + resp, err := svc.DeleteScheduledAction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DeleteTags() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DeleteTagsInput{ + Tags: []*autoscaling.Tag{ // Required + { // Required + Key: aws.String("TagKey"), // Required + PropagateAtLaunch: aws.Bool(true), + ResourceId: aws.String("XmlString"), + ResourceType: aws.String("XmlString"), + Value: aws.String("TagValue"), + }, + // More values... + }, + } + resp, err := svc.DeleteTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeAccountLimits() { + svc := autoscaling.New(session.New()) + + var params *autoscaling.DescribeAccountLimitsInput + resp, err := svc.DescribeAccountLimits(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeAdjustmentTypes() { + svc := autoscaling.New(session.New()) + + var params *autoscaling.DescribeAdjustmentTypesInput + resp, err := svc.DescribeAdjustmentTypes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeAutoScalingGroups() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribeAutoScalingGroupsInput{ + AutoScalingGroupNames: []*string{ + aws.String("ResourceName"), // Required + // More values... + }, + MaxRecords: aws.Int64(1), + NextToken: aws.String("XmlString"), + } + resp, err := svc.DescribeAutoScalingGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeAutoScalingInstances() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribeAutoScalingInstancesInput{ + InstanceIds: []*string{ + aws.String("XmlStringMaxLen19"), // Required + // More values... + }, + MaxRecords: aws.Int64(1), + NextToken: aws.String("XmlString"), + } + resp, err := svc.DescribeAutoScalingInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeAutoScalingNotificationTypes() { + svc := autoscaling.New(session.New()) + + var params *autoscaling.DescribeAutoScalingNotificationTypesInput + resp, err := svc.DescribeAutoScalingNotificationTypes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeLaunchConfigurations() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribeLaunchConfigurationsInput{ + LaunchConfigurationNames: []*string{ + aws.String("ResourceName"), // Required + // More values... + }, + MaxRecords: aws.Int64(1), + NextToken: aws.String("XmlString"), + } + resp, err := svc.DescribeLaunchConfigurations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeLifecycleHookTypes() { + svc := autoscaling.New(session.New()) + + var params *autoscaling.DescribeLifecycleHookTypesInput + resp, err := svc.DescribeLifecycleHookTypes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeLifecycleHooks() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribeLifecycleHooksInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + LifecycleHookNames: []*string{ + aws.String("AsciiStringMaxLen255"), // Required + // More values... + }, + } + resp, err := svc.DescribeLifecycleHooks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeLoadBalancers() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribeLoadBalancersInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + MaxRecords: aws.Int64(1), + NextToken: aws.String("XmlString"), + } + resp, err := svc.DescribeLoadBalancers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeMetricCollectionTypes() { + svc := autoscaling.New(session.New()) + + var params *autoscaling.DescribeMetricCollectionTypesInput + resp, err := svc.DescribeMetricCollectionTypes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeNotificationConfigurations() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribeNotificationConfigurationsInput{ + AutoScalingGroupNames: []*string{ + aws.String("ResourceName"), // Required + // More values... + }, + MaxRecords: aws.Int64(1), + NextToken: aws.String("XmlString"), + } + resp, err := svc.DescribeNotificationConfigurations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribePolicies() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribePoliciesInput{ + AutoScalingGroupName: aws.String("ResourceName"), + MaxRecords: aws.Int64(1), + NextToken: aws.String("XmlString"), + PolicyNames: []*string{ + aws.String("ResourceName"), // Required + // More values... + }, + PolicyTypes: []*string{ + aws.String("XmlStringMaxLen64"), // Required + // More values... + }, + } + resp, err := svc.DescribePolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeScalingActivities() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribeScalingActivitiesInput{ + ActivityIds: []*string{ + aws.String("XmlString"), // Required + // More values... + }, + AutoScalingGroupName: aws.String("ResourceName"), + MaxRecords: aws.Int64(1), + NextToken: aws.String("XmlString"), + } + resp, err := svc.DescribeScalingActivities(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeScalingProcessTypes() { + svc := autoscaling.New(session.New()) + + var params *autoscaling.DescribeScalingProcessTypesInput + resp, err := svc.DescribeScalingProcessTypes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeScheduledActions() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribeScheduledActionsInput{ + AutoScalingGroupName: aws.String("ResourceName"), + EndTime: aws.Time(time.Now()), + MaxRecords: aws.Int64(1), + NextToken: aws.String("XmlString"), + ScheduledActionNames: []*string{ + aws.String("ResourceName"), // Required + // More values... + }, + StartTime: aws.Time(time.Now()), + } + resp, err := svc.DescribeScheduledActions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeTags() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DescribeTagsInput{ + Filters: []*autoscaling.Filter{ + { // Required + Name: aws.String("XmlString"), + Values: []*string{ + aws.String("XmlString"), // Required + // More values... + }, + }, + // More values... + }, + MaxRecords: aws.Int64(1), + NextToken: aws.String("XmlString"), + } + resp, err := svc.DescribeTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DescribeTerminationPolicyTypes() { + svc := autoscaling.New(session.New()) + + var params *autoscaling.DescribeTerminationPolicyTypesInput + resp, err := svc.DescribeTerminationPolicyTypes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DetachInstances() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DetachInstancesInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + ShouldDecrementDesiredCapacity: aws.Bool(true), // Required + InstanceIds: []*string{ + aws.String("XmlStringMaxLen19"), // Required + // More values... + }, + } + resp, err := svc.DetachInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DetachLoadBalancers() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DetachLoadBalancersInput{ + AutoScalingGroupName: aws.String("ResourceName"), + LoadBalancerNames: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + } + resp, err := svc.DetachLoadBalancers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_DisableMetricsCollection() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.DisableMetricsCollectionInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + Metrics: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + } + resp, err := svc.DisableMetricsCollection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_EnableMetricsCollection() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.EnableMetricsCollectionInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + Granularity: aws.String("XmlStringMaxLen255"), // Required + Metrics: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + } + resp, err := svc.EnableMetricsCollection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_EnterStandby() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.EnterStandbyInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + ShouldDecrementDesiredCapacity: aws.Bool(true), // Required + InstanceIds: []*string{ + aws.String("XmlStringMaxLen19"), // Required + // More values... + }, + } + resp, err := svc.EnterStandby(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_ExecutePolicy() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.ExecutePolicyInput{ + PolicyName: aws.String("ResourceName"), // Required + AutoScalingGroupName: aws.String("ResourceName"), + BreachThreshold: aws.Float64(1.0), + HonorCooldown: aws.Bool(true), + MetricValue: aws.Float64(1.0), + } + resp, err := svc.ExecutePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_ExitStandby() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.ExitStandbyInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + InstanceIds: []*string{ + aws.String("XmlStringMaxLen19"), // Required + // More values... + }, + } + resp, err := svc.ExitStandby(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_PutLifecycleHook() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.PutLifecycleHookInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + LifecycleHookName: aws.String("AsciiStringMaxLen255"), // Required + DefaultResult: aws.String("LifecycleActionResult"), + HeartbeatTimeout: aws.Int64(1), + LifecycleTransition: aws.String("LifecycleTransition"), + NotificationMetadata: aws.String("XmlStringMaxLen1023"), + NotificationTargetARN: aws.String("ResourceName"), + RoleARN: aws.String("ResourceName"), + } + resp, err := svc.PutLifecycleHook(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_PutNotificationConfiguration() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.PutNotificationConfigurationInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + NotificationTypes: []*string{ // Required + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + TopicARN: aws.String("ResourceName"), // Required + } + resp, err := svc.PutNotificationConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_PutScalingPolicy() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.PutScalingPolicyInput{ + AdjustmentType: aws.String("XmlStringMaxLen255"), // Required + AutoScalingGroupName: aws.String("ResourceName"), // Required + PolicyName: aws.String("XmlStringMaxLen255"), // Required + Cooldown: aws.Int64(1), + EstimatedInstanceWarmup: aws.Int64(1), + MetricAggregationType: aws.String("XmlStringMaxLen32"), + MinAdjustmentMagnitude: aws.Int64(1), + MinAdjustmentStep: aws.Int64(1), + PolicyType: aws.String("XmlStringMaxLen64"), + ScalingAdjustment: aws.Int64(1), + StepAdjustments: []*autoscaling.StepAdjustment{ + { // Required + ScalingAdjustment: aws.Int64(1), // Required + MetricIntervalLowerBound: aws.Float64(1.0), + MetricIntervalUpperBound: aws.Float64(1.0), + }, + // More values... + }, + } + resp, err := svc.PutScalingPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_PutScheduledUpdateGroupAction() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.PutScheduledUpdateGroupActionInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + ScheduledActionName: aws.String("XmlStringMaxLen255"), // Required + DesiredCapacity: aws.Int64(1), + EndTime: aws.Time(time.Now()), + MaxSize: aws.Int64(1), + MinSize: aws.Int64(1), + Recurrence: aws.String("XmlStringMaxLen255"), + StartTime: aws.Time(time.Now()), + Time: aws.Time(time.Now()), + } + resp, err := svc.PutScheduledUpdateGroupAction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_RecordLifecycleActionHeartbeat() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.RecordLifecycleActionHeartbeatInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + LifecycleActionToken: aws.String("LifecycleActionToken"), // Required + LifecycleHookName: aws.String("AsciiStringMaxLen255"), // Required + } + resp, err := svc.RecordLifecycleActionHeartbeat(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_ResumeProcesses() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.ScalingProcessQuery{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + ScalingProcesses: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + } + resp, err := svc.ResumeProcesses(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_SetDesiredCapacity() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.SetDesiredCapacityInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + DesiredCapacity: aws.Int64(1), // Required + HonorCooldown: aws.Bool(true), + } + resp, err := svc.SetDesiredCapacity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_SetInstanceHealth() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.SetInstanceHealthInput{ + HealthStatus: aws.String("XmlStringMaxLen32"), // Required + InstanceId: aws.String("XmlStringMaxLen19"), // Required + ShouldRespectGracePeriod: aws.Bool(true), + } + resp, err := svc.SetInstanceHealth(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_SetInstanceProtection() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.SetInstanceProtectionInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + InstanceIds: []*string{ // Required + aws.String("XmlStringMaxLen19"), // Required + // More values... + }, + ProtectedFromScaleIn: aws.Bool(true), // Required + } + resp, err := svc.SetInstanceProtection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_SuspendProcesses() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.ScalingProcessQuery{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + ScalingProcesses: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + } + resp, err := svc.SuspendProcesses(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_TerminateInstanceInAutoScalingGroup() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.TerminateInstanceInAutoScalingGroupInput{ + InstanceId: aws.String("XmlStringMaxLen19"), // Required + ShouldDecrementDesiredCapacity: aws.Bool(true), // Required + } + resp, err := svc.TerminateInstanceInAutoScalingGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleAutoScaling_UpdateAutoScalingGroup() { + svc := autoscaling.New(session.New()) + + params := &autoscaling.UpdateAutoScalingGroupInput{ + AutoScalingGroupName: aws.String("ResourceName"), // Required + AvailabilityZones: []*string{ + aws.String("XmlStringMaxLen255"), // Required + // More values... + }, + DefaultCooldown: aws.Int64(1), + DesiredCapacity: aws.Int64(1), + HealthCheckGracePeriod: aws.Int64(1), + HealthCheckType: aws.String("XmlStringMaxLen32"), + LaunchConfigurationName: aws.String("ResourceName"), + MaxSize: aws.Int64(1), + MinSize: aws.Int64(1), + NewInstancesProtectedFromScaleIn: aws.Bool(true), + PlacementGroup: aws.String("XmlStringMaxLen255"), + TerminationPolicies: []*string{ + aws.String("XmlStringMaxLen1600"), // Required + // More values... + }, + VPCZoneIdentifier: aws.String("XmlStringMaxLen255"), + } + resp, err := svc.UpdateAutoScalingGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/autoscaling/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/autoscaling/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/autoscaling/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/autoscaling/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,88 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package autoscaling + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Auto Scaling is designed to automatically launch or terminate EC2 instances +// based on user-defined policies, schedules, and health checks. Use this service +// in conjunction with the Amazon CloudWatch and Elastic Load Balancing services. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type AutoScaling struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "autoscaling" + +// New creates a new instance of the AutoScaling client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a AutoScaling client from just a session. +// svc := autoscaling.New(mySession) +// +// // Create a AutoScaling client with additional configuration +// svc := autoscaling.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *AutoScaling { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *AutoScaling { + svc := &AutoScaling{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2011-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a AutoScaling operation and runs any +// custom request initialization. +func (c *AutoScaling) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudformation/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudformation/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudformation/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudformation/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2304 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudformation provides a client for AWS CloudFormation. +package cloudformation + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opCancelUpdateStack = "CancelUpdateStack" + +// CancelUpdateStackRequest generates a request for the CancelUpdateStack operation. +func (c *CloudFormation) CancelUpdateStackRequest(input *CancelUpdateStackInput) (req *request.Request, output *CancelUpdateStackOutput) { + op := &request.Operation{ + Name: opCancelUpdateStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelUpdateStackInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CancelUpdateStackOutput{} + req.Data = output + return +} + +// Cancels an update on the specified stack. If the call completes successfully, +// the stack rolls back the update and reverts to the previous stack configuration. +// +// You can cancel only stacks that are in the UPDATE_IN_PROGRESS state. +func (c *CloudFormation) CancelUpdateStack(input *CancelUpdateStackInput) (*CancelUpdateStackOutput, error) { + req, out := c.CancelUpdateStackRequest(input) + err := req.Send() + return out, err +} + +const opContinueUpdateRollback = "ContinueUpdateRollback" + +// ContinueUpdateRollbackRequest generates a request for the ContinueUpdateRollback operation. +func (c *CloudFormation) ContinueUpdateRollbackRequest(input *ContinueUpdateRollbackInput) (req *request.Request, output *ContinueUpdateRollbackOutput) { + op := &request.Operation{ + Name: opContinueUpdateRollback, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ContinueUpdateRollbackInput{} + } + + req = c.newRequest(op, input, output) + output = &ContinueUpdateRollbackOutput{} + req.Data = output + return +} + +// For a specified stack that is in the UPDATE_ROLLBACK_FAILED state, continues +// rolling it back to the UPDATE_ROLLBACK_COMPLETE state. Depending on the cause +// of the failure, you can manually fix the error (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/troubleshooting.html#troubleshooting-errors-update-rollback-failed) +// and continue the rollback. By continuing the rollback, you can return your +// stack to a working state (the UPDATE_ROLLBACK_COMPLETE state), return the +// stack to its original settings, and then try to update the stack again. +// +// A stack goes into the UPDATE_ROLLBACK_FAILED state when AWS CloudFormation +// cannot roll back all changes after a failed stack update. For example, you +// might have a stack that is rolling back to an old database instance that +// was deleted outside of AWS CloudFormation. Because AWS CloudFormation doesn't +// know the database was deleted, it assumes that the database instance still +// exists and attempts to roll back to it, causing the update rollback to fail. +func (c *CloudFormation) ContinueUpdateRollback(input *ContinueUpdateRollbackInput) (*ContinueUpdateRollbackOutput, error) { + req, out := c.ContinueUpdateRollbackRequest(input) + err := req.Send() + return out, err +} + +const opCreateStack = "CreateStack" + +// CreateStackRequest generates a request for the CreateStack operation. +func (c *CloudFormation) CreateStackRequest(input *CreateStackInput) (req *request.Request, output *CreateStackOutput) { + op := &request.Operation{ + Name: opCreateStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateStackInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateStackOutput{} + req.Data = output + return +} + +// Creates a stack as specified in the template. After the call completes successfully, +// the stack creation starts. You can check the status of the stack via the +// DescribeStacks API. +func (c *CloudFormation) CreateStack(input *CreateStackInput) (*CreateStackOutput, error) { + req, out := c.CreateStackRequest(input) + err := req.Send() + return out, err +} + +const opDeleteStack = "DeleteStack" + +// DeleteStackRequest generates a request for the DeleteStack operation. +func (c *CloudFormation) DeleteStackRequest(input *DeleteStackInput) (req *request.Request, output *DeleteStackOutput) { + op := &request.Operation{ + Name: opDeleteStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteStackInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteStackOutput{} + req.Data = output + return +} + +// Deletes a specified stack. Once the call completes successfully, stack deletion +// starts. Deleted stacks do not show up in the DescribeStacks API if the deletion +// has been completed successfully. +func (c *CloudFormation) DeleteStack(input *DeleteStackInput) (*DeleteStackOutput, error) { + req, out := c.DeleteStackRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAccountLimits = "DescribeAccountLimits" + +// DescribeAccountLimitsRequest generates a request for the DescribeAccountLimits operation. +func (c *CloudFormation) DescribeAccountLimitsRequest(input *DescribeAccountLimitsInput) (req *request.Request, output *DescribeAccountLimitsOutput) { + op := &request.Operation{ + Name: opDescribeAccountLimits, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAccountLimitsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAccountLimitsOutput{} + req.Data = output + return +} + +// Retrieves your account's AWS CloudFormation limits, such as the maximum number +// of stacks that you can create in your account. +func (c *CloudFormation) DescribeAccountLimits(input *DescribeAccountLimitsInput) (*DescribeAccountLimitsOutput, error) { + req, out := c.DescribeAccountLimitsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeStackEvents = "DescribeStackEvents" + +// DescribeStackEventsRequest generates a request for the DescribeStackEvents operation. +func (c *CloudFormation) DescribeStackEventsRequest(input *DescribeStackEventsInput) (req *request.Request, output *DescribeStackEventsOutput) { + op := &request.Operation{ + Name: opDescribeStackEvents, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeStackEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStackEventsOutput{} + req.Data = output + return +} + +// Returns all stack related events for a specified stack. For more information +// about a stack's event history, go to Stacks (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/concept-stack.html) +// in the AWS CloudFormation User Guide. +// +// You can list events for stacks that have failed to create or have been deleted +// by specifying the unique stack identifier (stack ID). +func (c *CloudFormation) DescribeStackEvents(input *DescribeStackEventsInput) (*DescribeStackEventsOutput, error) { + req, out := c.DescribeStackEventsRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudFormation) DescribeStackEventsPages(input *DescribeStackEventsInput, fn func(p *DescribeStackEventsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeStackEventsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeStackEventsOutput), lastPage) + }) +} + +const opDescribeStackResource = "DescribeStackResource" + +// DescribeStackResourceRequest generates a request for the DescribeStackResource operation. +func (c *CloudFormation) DescribeStackResourceRequest(input *DescribeStackResourceInput) (req *request.Request, output *DescribeStackResourceOutput) { + op := &request.Operation{ + Name: opDescribeStackResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeStackResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStackResourceOutput{} + req.Data = output + return +} + +// Returns a description of the specified resource in the specified stack. +// +// For deleted stacks, DescribeStackResource returns resource information for +// up to 90 days after the stack has been deleted. +func (c *CloudFormation) DescribeStackResource(input *DescribeStackResourceInput) (*DescribeStackResourceOutput, error) { + req, out := c.DescribeStackResourceRequest(input) + err := req.Send() + return out, err +} + +const opDescribeStackResources = "DescribeStackResources" + +// DescribeStackResourcesRequest generates a request for the DescribeStackResources operation. +func (c *CloudFormation) DescribeStackResourcesRequest(input *DescribeStackResourcesInput) (req *request.Request, output *DescribeStackResourcesOutput) { + op := &request.Operation{ + Name: opDescribeStackResources, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeStackResourcesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStackResourcesOutput{} + req.Data = output + return +} + +// Returns AWS resource descriptions for running and deleted stacks. If StackName +// is specified, all the associated resources that are part of the stack are +// returned. If PhysicalResourceId is specified, the associated resources of +// the stack that the resource belongs to are returned. +// +// Only the first 100 resources will be returned. If your stack has more resources +// than this, you should use ListStackResources instead. For deleted stacks, +// DescribeStackResources returns resource information for up to 90 days after +// the stack has been deleted. +// +// You must specify either StackName or PhysicalResourceId, but not both. In +// addition, you can specify LogicalResourceId to filter the returned result. +// For more information about resources, the LogicalResourceId and PhysicalResourceId, +// go to the AWS CloudFormation User Guide (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/). +// +// A ValidationError is returned if you specify both StackName and PhysicalResourceId +// in the same request. +func (c *CloudFormation) DescribeStackResources(input *DescribeStackResourcesInput) (*DescribeStackResourcesOutput, error) { + req, out := c.DescribeStackResourcesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeStacks = "DescribeStacks" + +// DescribeStacksRequest generates a request for the DescribeStacks operation. +func (c *CloudFormation) DescribeStacksRequest(input *DescribeStacksInput) (req *request.Request, output *DescribeStacksOutput) { + op := &request.Operation{ + Name: opDescribeStacks, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeStacksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStacksOutput{} + req.Data = output + return +} + +// Returns the description for the specified stack; if no stack name was specified, +// then it returns the description for all the stacks created. +func (c *CloudFormation) DescribeStacks(input *DescribeStacksInput) (*DescribeStacksOutput, error) { + req, out := c.DescribeStacksRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudFormation) DescribeStacksPages(input *DescribeStacksInput, fn func(p *DescribeStacksOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeStacksRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeStacksOutput), lastPage) + }) +} + +const opEstimateTemplateCost = "EstimateTemplateCost" + +// EstimateTemplateCostRequest generates a request for the EstimateTemplateCost operation. +func (c *CloudFormation) EstimateTemplateCostRequest(input *EstimateTemplateCostInput) (req *request.Request, output *EstimateTemplateCostOutput) { + op := &request.Operation{ + Name: opEstimateTemplateCost, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EstimateTemplateCostInput{} + } + + req = c.newRequest(op, input, output) + output = &EstimateTemplateCostOutput{} + req.Data = output + return +} + +// Returns the estimated monthly cost of a template. The return value is an +// AWS Simple Monthly Calculator URL with a query string that describes the +// resources required to run the template. +func (c *CloudFormation) EstimateTemplateCost(input *EstimateTemplateCostInput) (*EstimateTemplateCostOutput, error) { + req, out := c.EstimateTemplateCostRequest(input) + err := req.Send() + return out, err +} + +const opGetStackPolicy = "GetStackPolicy" + +// GetStackPolicyRequest generates a request for the GetStackPolicy operation. +func (c *CloudFormation) GetStackPolicyRequest(input *GetStackPolicyInput) (req *request.Request, output *GetStackPolicyOutput) { + op := &request.Operation{ + Name: opGetStackPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetStackPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetStackPolicyOutput{} + req.Data = output + return +} + +// Returns the stack policy for a specified stack. If a stack doesn't have a +// policy, a null value is returned. +func (c *CloudFormation) GetStackPolicy(input *GetStackPolicyInput) (*GetStackPolicyOutput, error) { + req, out := c.GetStackPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetTemplate = "GetTemplate" + +// GetTemplateRequest generates a request for the GetTemplate operation. +func (c *CloudFormation) GetTemplateRequest(input *GetTemplateInput) (req *request.Request, output *GetTemplateOutput) { + op := &request.Operation{ + Name: opGetTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetTemplateInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTemplateOutput{} + req.Data = output + return +} + +// Returns the template body for a specified stack. You can get the template +// for running or deleted stacks. +// +// For deleted stacks, GetTemplate returns the template for up to 90 days after +// the stack has been deleted. +// +// If the template does not exist, a ValidationError is returned. +func (c *CloudFormation) GetTemplate(input *GetTemplateInput) (*GetTemplateOutput, error) { + req, out := c.GetTemplateRequest(input) + err := req.Send() + return out, err +} + +const opGetTemplateSummary = "GetTemplateSummary" + +// GetTemplateSummaryRequest generates a request for the GetTemplateSummary operation. +func (c *CloudFormation) GetTemplateSummaryRequest(input *GetTemplateSummaryInput) (req *request.Request, output *GetTemplateSummaryOutput) { + op := &request.Operation{ + Name: opGetTemplateSummary, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetTemplateSummaryInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTemplateSummaryOutput{} + req.Data = output + return +} + +// Returns information about a new or existing template. The GetTemplateSummary +// action is useful for viewing parameter information, such as default parameter +// values and parameter types, before you create or update a stack. +// +// You can use the GetTemplateSummary action when you submit a template, or +// you can get template information for a running or deleted stack. +// +// For deleted stacks, GetTemplateSummary returns the template information +// for up to 90 days after the stack has been deleted. If the template does +// not exist, a ValidationError is returned. +func (c *CloudFormation) GetTemplateSummary(input *GetTemplateSummaryInput) (*GetTemplateSummaryOutput, error) { + req, out := c.GetTemplateSummaryRequest(input) + err := req.Send() + return out, err +} + +const opListStackResources = "ListStackResources" + +// ListStackResourcesRequest generates a request for the ListStackResources operation. +func (c *CloudFormation) ListStackResourcesRequest(input *ListStackResourcesInput) (req *request.Request, output *ListStackResourcesOutput) { + op := &request.Operation{ + Name: opListStackResources, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListStackResourcesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListStackResourcesOutput{} + req.Data = output + return +} + +// Returns descriptions of all resources of the specified stack. +// +// For deleted stacks, ListStackResources returns resource information for +// up to 90 days after the stack has been deleted. +func (c *CloudFormation) ListStackResources(input *ListStackResourcesInput) (*ListStackResourcesOutput, error) { + req, out := c.ListStackResourcesRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudFormation) ListStackResourcesPages(input *ListStackResourcesInput, fn func(p *ListStackResourcesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListStackResourcesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListStackResourcesOutput), lastPage) + }) +} + +const opListStacks = "ListStacks" + +// ListStacksRequest generates a request for the ListStacks operation. +func (c *CloudFormation) ListStacksRequest(input *ListStacksInput) (req *request.Request, output *ListStacksOutput) { + op := &request.Operation{ + Name: opListStacks, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListStacksInput{} + } + + req = c.newRequest(op, input, output) + output = &ListStacksOutput{} + req.Data = output + return +} + +// Returns the summary information for stacks whose status matches the specified +// StackStatusFilter. Summary information for stacks that have been deleted +// is kept for 90 days after the stack is deleted. If no StackStatusFilter is +// specified, summary information for all stacks is returned (including existing +// stacks and stacks that have been deleted). +func (c *CloudFormation) ListStacks(input *ListStacksInput) (*ListStacksOutput, error) { + req, out := c.ListStacksRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudFormation) ListStacksPages(input *ListStacksInput, fn func(p *ListStacksOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListStacksRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListStacksOutput), lastPage) + }) +} + +const opSetStackPolicy = "SetStackPolicy" + +// SetStackPolicyRequest generates a request for the SetStackPolicy operation. +func (c *CloudFormation) SetStackPolicyRequest(input *SetStackPolicyInput) (req *request.Request, output *SetStackPolicyOutput) { + op := &request.Operation{ + Name: opSetStackPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetStackPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetStackPolicyOutput{} + req.Data = output + return +} + +// Sets a stack policy for a specified stack. +func (c *CloudFormation) SetStackPolicy(input *SetStackPolicyInput) (*SetStackPolicyOutput, error) { + req, out := c.SetStackPolicyRequest(input) + err := req.Send() + return out, err +} + +const opSignalResource = "SignalResource" + +// SignalResourceRequest generates a request for the SignalResource operation. +func (c *CloudFormation) SignalResourceRequest(input *SignalResourceInput) (req *request.Request, output *SignalResourceOutput) { + op := &request.Operation{ + Name: opSignalResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SignalResourceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SignalResourceOutput{} + req.Data = output + return +} + +// Sends a signal to the specified resource with a success or failure status. +// You can use the SignalResource API in conjunction with a creation policy +// or update policy. AWS CloudFormation doesn't proceed with a stack creation +// or update until resources receive the required number of signals or the timeout +// period is exceeded. The SignalResource API is useful in cases where you want +// to send signals from anywhere other than an Amazon EC2 instance. +func (c *CloudFormation) SignalResource(input *SignalResourceInput) (*SignalResourceOutput, error) { + req, out := c.SignalResourceRequest(input) + err := req.Send() + return out, err +} + +const opUpdateStack = "UpdateStack" + +// UpdateStackRequest generates a request for the UpdateStack operation. +func (c *CloudFormation) UpdateStackRequest(input *UpdateStackInput) (req *request.Request, output *UpdateStackOutput) { + op := &request.Operation{ + Name: opUpdateStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateStackInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateStackOutput{} + req.Data = output + return +} + +// Updates a stack as specified in the template. After the call completes successfully, +// the stack update starts. You can check the status of the stack via the DescribeStacks +// action. +// +// To get a copy of the template for an existing stack, you can use the GetTemplate +// action. +// +// Tags that were associated with this stack during creation time will still +// be associated with the stack after an UpdateStack operation. +// +// For more information about creating an update template, updating a stack, +// and monitoring the progress of the update, see Updating a Stack (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks.html). +func (c *CloudFormation) UpdateStack(input *UpdateStackInput) (*UpdateStackOutput, error) { + req, out := c.UpdateStackRequest(input) + err := req.Send() + return out, err +} + +const opValidateTemplate = "ValidateTemplate" + +// ValidateTemplateRequest generates a request for the ValidateTemplate operation. +func (c *CloudFormation) ValidateTemplateRequest(input *ValidateTemplateInput) (req *request.Request, output *ValidateTemplateOutput) { + op := &request.Operation{ + Name: opValidateTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ValidateTemplateInput{} + } + + req = c.newRequest(op, input, output) + output = &ValidateTemplateOutput{} + req.Data = output + return +} + +// Validates a specified template. +func (c *CloudFormation) ValidateTemplate(input *ValidateTemplateInput) (*ValidateTemplateOutput, error) { + req, out := c.ValidateTemplateRequest(input) + err := req.Send() + return out, err +} + +// The AccountLimit data type. +type AccountLimit struct { + _ struct{} `type:"structure"` + + // The name of the account limit. Currently, the only account limit is StackLimit. + Name *string `type:"string"` + + // The value that is associated with the account limit name. + Value *int64 `type:"integer"` +} + +// String returns the string representation +func (s AccountLimit) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountLimit) GoString() string { + return s.String() +} + +// The input for the CancelUpdateStack action. +type CancelUpdateStackInput struct { + _ struct{} `type:"structure"` + + // The name or the unique stack ID that is associated with the stack. + StackName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelUpdateStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelUpdateStackInput) GoString() string { + return s.String() +} + +type CancelUpdateStackOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CancelUpdateStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelUpdateStackOutput) GoString() string { + return s.String() +} + +// The input for the ContinueUpdateRollback action. +type ContinueUpdateRollbackInput struct { + _ struct{} `type:"structure"` + + // The name or the unique ID of the stack that you want to continue rolling + // back. + StackName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ContinueUpdateRollbackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContinueUpdateRollbackInput) GoString() string { + return s.String() +} + +// The output for a ContinueUpdateRollback action. +type ContinueUpdateRollbackOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ContinueUpdateRollbackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContinueUpdateRollbackOutput) GoString() string { + return s.String() +} + +// The input for CreateStack action. +type CreateStackInput struct { + _ struct{} `type:"structure"` + + // A list of capabilities that you must specify before AWS CloudFormation can + // create or update certain stacks. Some stack templates might include resources + // that can affect permissions in your AWS account. For those stacks, you must + // explicitly acknowledge their capabilities by specifying this parameter. + // + // Currently, the only valid value is CAPABILITY_IAM, which is required for + // the following resources: AWS::IAM::AccessKey (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html), + // AWS::IAM::Group (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-group.html), + // AWS::IAM::InstanceProfile (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html), + // AWS::IAM::Policy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html), + // AWS::IAM::Role (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html), + // AWS::IAM::User (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-user.html), + // and AWS::IAM::UserToGroupAddition (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-addusertogroup.html). + // If your stack template contains these resources, we recommend that you review + // any permissions associated with them. If you don't specify this parameter, + // this action returns an InsufficientCapabilities error. + Capabilities []*string `type:"list"` + + // Set to true to disable rollback of the stack if stack creation failed. You + // can specify either DisableRollback or OnFailure, but not both. + // + // Default: false + DisableRollback *bool `type:"boolean"` + + // The Simple Notification Service (SNS) topic ARNs to publish stack related + // events. You can find your SNS topic ARNs using the SNS console (http://console.aws.amazon.com/sns) + // or your Command Line Interface (CLI). + NotificationARNs []*string `type:"list"` + + // Determines what action will be taken if stack creation fails. This must be + // one of: DO_NOTHING, ROLLBACK, or DELETE. You can specify either OnFailure + // or DisableRollback, but not both. + // + // Default: ROLLBACK + OnFailure *string `type:"string" enum:"OnFailure"` + + // A list of Parameter structures that specify input parameters for the stack. + // For more information, see the Parameter (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_Parameter.html) + // data type. + Parameters []*Parameter `type:"list"` + + // The template resource types that you have permissions to work with for this + // create stack action, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. + // Use the following syntax to describe template resource types: AWS::* (for + // all AWS resource), Custom::* (for all custom resources), Custom::logical_ID + // (for a specific custom resource), AWS::service_name::* (for all resources + // of a particular AWS service), and AWS::service_name::resource_logical_ID + // (for a specific AWS resource). + // + // If the list of resource types doesn't include a resource that you're creating, + // the stack creation fails. By default, AWS CloudFormation grants permissions + // to all resource types. AWS Identity and Access Management (IAM) uses this + // parameter for AWS CloudFormation-specific condition keys in IAM policies. + // For more information, see Controlling Access with AWS Identity and Access + // Management (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html). + ResourceTypes []*string `type:"list"` + + // The name that is associated with the stack. The name must be unique in the + // region in which you are creating the stack. + // + // A stack name can contain only alphanumeric characters (case sensitive) and + // hyphens. It must start with an alphabetic character and cannot be longer + // than 128 characters. + StackName *string `type:"string" required:"true"` + + // Structure containing the stack policy body. For more information, go to + // Prevent Updates to Stack Resources (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html) + // in the AWS CloudFormation User Guide. You can specify either the StackPolicyBody + // or the StackPolicyURL parameter, but not both. + StackPolicyBody *string `min:"1" type:"string"` + + // Location of a file containing the stack policy. The URL must point to a policy + // (max size: 16KB) located in an S3 bucket in the same region as the stack. + // You can specify either the StackPolicyBody or the StackPolicyURL parameter, + // but not both. + StackPolicyURL *string `min:"1" type:"string"` + + // Key-value pairs to associate with this stack. AWS CloudFormation also propagates + // these tags to the resources created in the stack. A maximum number of 10 + // tags can be specified. + Tags []*Tag `type:"list"` + + // Structure containing the template body with a minimum length of 1 byte and + // a maximum length of 51,200 bytes. For more information, go to Template Anatomy + // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide. + // + // Conditional: You must specify either the TemplateBody or the TemplateURL + // parameter, but not both. + TemplateBody *string `min:"1" type:"string"` + + // Location of file containing the template body. The URL must point to a template + // (max size: 460,800 bytes) that is located in an Amazon S3 bucket. For more + // information, go to the Template Anatomy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide. + // + // Conditional: You must specify either the TemplateBody or the TemplateURL + // parameter, but not both. + TemplateURL *string `min:"1" type:"string"` + + // The amount of time that can pass before the stack status becomes CREATE_FAILED; + // if DisableRollback is not set or is set to false, the stack will be rolled + // back. + TimeoutInMinutes *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s CreateStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStackInput) GoString() string { + return s.String() +} + +// The output for a CreateStack action. +type CreateStackOutput struct { + _ struct{} `type:"structure"` + + // Unique identifier of the stack. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s CreateStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStackOutput) GoString() string { + return s.String() +} + +// The input for DeleteStack action. +type DeleteStackInput struct { + _ struct{} `type:"structure"` + + // The name or the unique stack ID that is associated with the stack. + StackName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStackInput) GoString() string { + return s.String() +} + +type DeleteStackOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStackOutput) GoString() string { + return s.String() +} + +// The input for the DescribeAccountLimits action. +type DescribeAccountLimitsInput struct { + _ struct{} `type:"structure"` + + // A string that identifies the next page of limits that you want to retrieve. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeAccountLimitsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountLimitsInput) GoString() string { + return s.String() +} + +// The output for the DescribeAccountLimits action. +type DescribeAccountLimitsOutput struct { + _ struct{} `type:"structure"` + + // An account limit structure that contain a list of AWS CloudFormation account + // limits and their values. + AccountLimits []*AccountLimit `type:"list"` + + // If the output exceeds 1 MB in size, a string that identifies the next page + // of limits. If no additional page exists, this value is null. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeAccountLimitsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountLimitsOutput) GoString() string { + return s.String() +} + +// The input for DescribeStackEvents action. +type DescribeStackEventsInput struct { + _ struct{} `type:"structure"` + + // A string that identifies the next page of events that you want to retrieve. + NextToken *string `min:"1" type:"string"` + + // The name or the unique stack ID that is associated with the stack, which + // are not always interchangeable: + // + // Running stacks: You can specify either the stack's name or its unique stack + // ID. Deleted stacks: You must specify the unique stack ID. Default: There + // is no default value. + StackName *string `type:"string"` +} + +// String returns the string representation +func (s DescribeStackEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackEventsInput) GoString() string { + return s.String() +} + +// The output for a DescribeStackEvents action. +type DescribeStackEventsOutput struct { + _ struct{} `type:"structure"` + + // If the output exceeds 1 MB in size, a string that identifies the next page + // of events. If no additional page exists, this value is null. + NextToken *string `min:"1" type:"string"` + + // A list of StackEvents structures. + StackEvents []*StackEvent `type:"list"` +} + +// String returns the string representation +func (s DescribeStackEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackEventsOutput) GoString() string { + return s.String() +} + +// The input for DescribeStackResource action. +type DescribeStackResourceInput struct { + _ struct{} `type:"structure"` + + // The logical name of the resource as specified in the template. + // + // Default: There is no default value. + LogicalResourceId *string `type:"string" required:"true"` + + // The name or the unique stack ID that is associated with the stack, which + // are not always interchangeable: + // + // Running stacks: You can specify either the stack's name or its unique stack + // ID. Deleted stacks: You must specify the unique stack ID. Default: There + // is no default value. + StackName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeStackResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackResourceInput) GoString() string { + return s.String() +} + +// The output for a DescribeStackResource action. +type DescribeStackResourceOutput struct { + _ struct{} `type:"structure"` + + // A StackResourceDetail structure containing the description of the specified + // resource in the specified stack. + StackResourceDetail *StackResourceDetail `type:"structure"` +} + +// String returns the string representation +func (s DescribeStackResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackResourceOutput) GoString() string { + return s.String() +} + +// The input for DescribeStackResources action. +type DescribeStackResourcesInput struct { + _ struct{} `type:"structure"` + + // The logical name of the resource as specified in the template. + // + // Default: There is no default value. + LogicalResourceId *string `type:"string"` + + // The name or unique identifier that corresponds to a physical instance ID + // of a resource supported by AWS CloudFormation. + // + // For example, for an Amazon Elastic Compute Cloud (EC2) instance, PhysicalResourceId + // corresponds to the InstanceId. You can pass the EC2 InstanceId to DescribeStackResources + // to find which stack the instance belongs to and what other resources are + // part of the stack. + // + // Required: Conditional. If you do not specify PhysicalResourceId, you must + // specify StackName. + // + // Default: There is no default value. + PhysicalResourceId *string `type:"string"` + + // The name or the unique stack ID that is associated with the stack, which + // are not always interchangeable: + // + // Running stacks: You can specify either the stack's name or its unique stack + // ID. Deleted stacks: You must specify the unique stack ID. Default: There + // is no default value. + // + // Required: Conditional. If you do not specify StackName, you must specify + // PhysicalResourceId. + StackName *string `type:"string"` +} + +// String returns the string representation +func (s DescribeStackResourcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackResourcesInput) GoString() string { + return s.String() +} + +// The output for a DescribeStackResources action. +type DescribeStackResourcesOutput struct { + _ struct{} `type:"structure"` + + // A list of StackResource structures. + StackResources []*StackResource `type:"list"` +} + +// String returns the string representation +func (s DescribeStackResourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackResourcesOutput) GoString() string { + return s.String() +} + +// The input for DescribeStacks action. +type DescribeStacksInput struct { + _ struct{} `type:"structure"` + + // A string that identifies the next page of stacks that you want to retrieve. + NextToken *string `min:"1" type:"string"` + + // The name or the unique stack ID that is associated with the stack, which + // are not always interchangeable: + // + // Running stacks: You can specify either the stack's name or its unique stack + // ID. Deleted stacks: You must specify the unique stack ID. Default: There + // is no default value. + StackName *string `type:"string"` +} + +// String returns the string representation +func (s DescribeStacksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStacksInput) GoString() string { + return s.String() +} + +// The output for a DescribeStacks action. +type DescribeStacksOutput struct { + _ struct{} `type:"structure"` + + // If the output exceeds 1 MB in size, a string that identifies the next page + // of stacks. If no additional page exists, this value is null. + NextToken *string `min:"1" type:"string"` + + // A list of stack structures. + Stacks []*Stack `type:"list"` +} + +// String returns the string representation +func (s DescribeStacksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStacksOutput) GoString() string { + return s.String() +} + +type EstimateTemplateCostInput struct { + _ struct{} `type:"structure"` + + // A list of Parameter structures that specify input parameters. + Parameters []*Parameter `type:"list"` + + // Structure containing the template body with a minimum length of 1 byte and + // a maximum length of 51,200 bytes. (For more information, go to Template Anatomy + // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide.) + // + // Conditional: You must pass TemplateBody or TemplateURL. If both are passed, + // only TemplateBody is used. + TemplateBody *string `min:"1" type:"string"` + + // Location of file containing the template body. The URL must point to a template + // that is located in an Amazon S3 bucket. For more information, go to Template + // Anatomy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide. + // + // Conditional: You must pass TemplateURL or TemplateBody. If both are passed, + // only TemplateBody is used. + TemplateURL *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s EstimateTemplateCostInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EstimateTemplateCostInput) GoString() string { + return s.String() +} + +// The output for a EstimateTemplateCost action. +type EstimateTemplateCostOutput struct { + _ struct{} `type:"structure"` + + // An AWS Simple Monthly Calculator URL with a query string that describes the + // resources required to run the template. + Url *string `type:"string"` +} + +// String returns the string representation +func (s EstimateTemplateCostOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EstimateTemplateCostOutput) GoString() string { + return s.String() +} + +// The input for the GetStackPolicy action. +type GetStackPolicyInput struct { + _ struct{} `type:"structure"` + + // The name or unique stack ID that is associated with the stack whose policy + // you want to get. + StackName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetStackPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetStackPolicyInput) GoString() string { + return s.String() +} + +// The output for the GetStackPolicy action. +type GetStackPolicyOutput struct { + _ struct{} `type:"structure"` + + // Structure containing the stack policy body. (For more information, go to + // Prevent Updates to Stack Resources (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html) + // in the AWS CloudFormation User Guide.) + StackPolicyBody *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetStackPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetStackPolicyOutput) GoString() string { + return s.String() +} + +// The input for a GetTemplate action. +type GetTemplateInput struct { + _ struct{} `type:"structure"` + + // The name or the unique stack ID that is associated with the stack, which + // are not always interchangeable: + // + // Running stacks: You can specify either the stack's name or its unique stack + // ID. Deleted stacks: You must specify the unique stack ID. Default: There + // is no default value. + StackName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTemplateInput) GoString() string { + return s.String() +} + +// The output for GetTemplate action. +type GetTemplateOutput struct { + _ struct{} `type:"structure"` + + // Structure containing the template body. (For more information, go to Template + // Anatomy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide.) + TemplateBody *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTemplateOutput) GoString() string { + return s.String() +} + +// The input for the GetTemplateSummary action. +type GetTemplateSummaryInput struct { + _ struct{} `type:"structure"` + + // The name or the stack ID that is associated with the stack, which are not + // always interchangeable. For running stacks, you can specify either the stack's + // name or its unique stack ID. For deleted stack, you must specify the unique + // stack ID. + // + // Conditional: You must specify only one of the following parameters: StackName, + // TemplateBody, or TemplateURL. + StackName *string `min:"1" type:"string"` + + // Structure containing the template body with a minimum length of 1 byte and + // a maximum length of 51,200 bytes. For more information about templates, see + // Template Anatomy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide. + // + // Conditional: You must specify only one of the following parameters: StackName, + // TemplateBody, or TemplateURL. + TemplateBody *string `min:"1" type:"string"` + + // Location of file containing the template body. The URL must point to a template + // (max size: 460,800 bytes) that is located in an Amazon S3 bucket. For more + // information about templates, see Template Anatomy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide. + // + // Conditional: You must specify only one of the following parameters: StackName, + // TemplateBody, or TemplateURL. + TemplateURL *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetTemplateSummaryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTemplateSummaryInput) GoString() string { + return s.String() +} + +// The output for the GetTemplateSummary action. +type GetTemplateSummaryOutput struct { + _ struct{} `type:"structure"` + + // The capabilities found within the template. Currently, AWS CloudFormation + // supports only the CAPABILITY_IAM capability. If your template contains IAM + // resources, you must specify the CAPABILITY_IAM value for this parameter when + // you use the CreateStack or UpdateStack actions with your template; otherwise, + // those actions return an InsufficientCapabilities error. + Capabilities []*string `type:"list"` + + // The list of resources that generated the values in the Capabilities response + // element. + CapabilitiesReason *string `type:"string"` + + // The value that is defined in the Description property of the template. + Description *string `type:"string"` + + // The value that is defined for the Metadata property of the template. + Metadata *string `type:"string"` + + // A list of parameter declarations that describe various properties for each + // parameter. + Parameters []*ParameterDeclaration `type:"list"` + + // A list of all the template resource types that are defined in the template, + // such as AWS::EC2::Instance, AWS::Dynamo::Table, and Custom::MyCustomInstance. + ResourceTypes []*string `type:"list"` + + // The AWS template format version, which identifies the capabilities of the + // template. + Version *string `type:"string"` +} + +// String returns the string representation +func (s GetTemplateSummaryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTemplateSummaryOutput) GoString() string { + return s.String() +} + +// The input for the ListStackResource action. +type ListStackResourcesInput struct { + _ struct{} `type:"structure"` + + // A string that identifies the next page of stack resources that you want to + // retrieve. + NextToken *string `min:"1" type:"string"` + + // The name or the unique stack ID that is associated with the stack, which + // are not always interchangeable: + // + // Running stacks: You can specify either the stack's name or its unique stack + // ID. Deleted stacks: You must specify the unique stack ID. Default: There + // is no default value. + StackName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListStackResourcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStackResourcesInput) GoString() string { + return s.String() +} + +// The output for a ListStackResources action. +type ListStackResourcesOutput struct { + _ struct{} `type:"structure"` + + // If the output exceeds 1 MB in size, a string that identifies the next page + // of stack resources. If no additional page exists, this value is null. + NextToken *string `min:"1" type:"string"` + + // A list of StackResourceSummary structures. + StackResourceSummaries []*StackResourceSummary `type:"list"` +} + +// String returns the string representation +func (s ListStackResourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStackResourcesOutput) GoString() string { + return s.String() +} + +// The input for ListStacks action. +type ListStacksInput struct { + _ struct{} `type:"structure"` + + // A string that identifies the next page of stacks that you want to retrieve. + NextToken *string `min:"1" type:"string"` + + // Stack status to use as a filter. Specify one or more stack status codes to + // list only stacks with the specified status codes. For a complete list of + // stack status codes, see the StackStatus parameter of the Stack data type. + StackStatusFilter []*string `type:"list"` +} + +// String returns the string representation +func (s ListStacksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStacksInput) GoString() string { + return s.String() +} + +// The output for ListStacks action. +type ListStacksOutput struct { + _ struct{} `type:"structure"` + + // If the output exceeds 1 MB in size, a string that identifies the next page + // of stacks. If no additional page exists, this value is null. + NextToken *string `min:"1" type:"string"` + + // A list of StackSummary structures containing information about the specified + // stacks. + StackSummaries []*StackSummary `type:"list"` +} + +// String returns the string representation +func (s ListStacksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStacksOutput) GoString() string { + return s.String() +} + +// The Output data type. +type Output struct { + _ struct{} `type:"structure"` + + // User defined description associated with the output. + Description *string `type:"string"` + + // The key associated with the output. + OutputKey *string `type:"string"` + + // The value associated with the output. + OutputValue *string `type:"string"` +} + +// String returns the string representation +func (s Output) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Output) GoString() string { + return s.String() +} + +// The Parameter data type. +type Parameter struct { + _ struct{} `type:"structure"` + + // The key associated with the parameter. If you don't specify a key and value + // for a particular parameter, AWS CloudFormation uses the default value that + // is specified in your template. + ParameterKey *string `type:"string"` + + // The value associated with the parameter. + ParameterValue *string `type:"string"` + + // During a stack update, use the existing parameter value that the stack is + // using for a given parameter key. If you specify true, do not specify a parameter + // value. + UsePreviousValue *bool `type:"boolean"` +} + +// String returns the string representation +func (s Parameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Parameter) GoString() string { + return s.String() +} + +// A set of criteria that AWS CloudFormation uses to validate parameter values. +// Although other constraints might be defined in the stack template, AWS CloudFormation +// returns only the AllowedValues property. +type ParameterConstraints struct { + _ struct{} `type:"structure"` + + // A list of values that are permitted for a parameter. + AllowedValues []*string `type:"list"` +} + +// String returns the string representation +func (s ParameterConstraints) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterConstraints) GoString() string { + return s.String() +} + +// The ParameterDeclaration data type. +type ParameterDeclaration struct { + _ struct{} `type:"structure"` + + // The default value of the parameter. + DefaultValue *string `type:"string"` + + // The description that is associate with the parameter. + Description *string `type:"string"` + + // Flag that indicates whether the parameter value is shown as plain text in + // logs and in the AWS Management Console. + NoEcho *bool `type:"boolean"` + + // The criteria that AWS CloudFormation uses to validate parameter values. + ParameterConstraints *ParameterConstraints `type:"structure"` + + // The name that is associated with the parameter. + ParameterKey *string `type:"string"` + + // The type of parameter. + ParameterType *string `type:"string"` +} + +// String returns the string representation +func (s ParameterDeclaration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterDeclaration) GoString() string { + return s.String() +} + +// The input for the SetStackPolicy action. +type SetStackPolicyInput struct { + _ struct{} `type:"structure"` + + // The name or unique stack ID that you want to associate a policy with. + StackName *string `type:"string" required:"true"` + + // Structure containing the stack policy body. For more information, go to + // Prevent Updates to Stack Resources (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html) + // in the AWS CloudFormation User Guide. You can specify either the StackPolicyBody + // or the StackPolicyURL parameter, but not both. + StackPolicyBody *string `min:"1" type:"string"` + + // Location of a file containing the stack policy. The URL must point to a policy + // (max size: 16KB) located in an S3 bucket in the same region as the stack. + // You can specify either the StackPolicyBody or the StackPolicyURL parameter, + // but not both. + StackPolicyURL *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SetStackPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetStackPolicyInput) GoString() string { + return s.String() +} + +type SetStackPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetStackPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetStackPolicyOutput) GoString() string { + return s.String() +} + +// The input for the SignalResource action. +type SignalResourceInput struct { + _ struct{} `type:"structure"` + + // The logical ID of the resource that you want to signal. The logical ID is + // the name of the resource that given in the template. + LogicalResourceId *string `type:"string" required:"true"` + + // The stack name or unique stack ID that includes the resource that you want + // to signal. + StackName *string `min:"1" type:"string" required:"true"` + + // The status of the signal, which is either success or failure. A failure signal + // causes AWS CloudFormation to immediately fail the stack creation or update. + Status *string `type:"string" required:"true" enum:"ResourceSignalStatus"` + + // A unique ID of the signal. When you signal Amazon EC2 instances or Auto Scaling + // groups, specify the instance ID that you are signaling as the unique ID. + // If you send multiple signals to a single resource (such as signaling a wait + // condition), each signal requires a different unique ID. + UniqueId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SignalResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SignalResourceInput) GoString() string { + return s.String() +} + +type SignalResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SignalResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SignalResourceOutput) GoString() string { + return s.String() +} + +// The Stack data type. +type Stack struct { + _ struct{} `type:"structure"` + + // The capabilities allowed in the stack. + Capabilities []*string `type:"list"` + + // The time at which the stack was created. + CreationTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // A user-defined description associated with the stack. + Description *string `type:"string"` + + // Boolean to enable or disable rollback on stack creation failures: + // + // true: disable rollback false: enable rollback + DisableRollback *bool `type:"boolean"` + + // The time the stack was last updated. This field will only be returned if + // the stack has been updated at least once. + LastUpdatedTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // SNS topic ARNs to which stack related events are published. + NotificationARNs []*string `type:"list"` + + // A list of output structures. + Outputs []*Output `type:"list"` + + // A list of Parameter structures. + Parameters []*Parameter `type:"list"` + + // Unique identifier of the stack. + StackId *string `type:"string"` + + // The name associated with the stack. + StackName *string `type:"string" required:"true"` + + // Current status of the stack. + StackStatus *string `type:"string" required:"true" enum:"StackStatus"` + + // Success/failure message associated with the stack status. + StackStatusReason *string `type:"string"` + + // A list of Tags that specify cost allocation information for the stack. + Tags []*Tag `type:"list"` + + // The amount of time within which stack creation should complete. + TimeoutInMinutes *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s Stack) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Stack) GoString() string { + return s.String() +} + +// The StackEvent data type. +type StackEvent struct { + _ struct{} `type:"structure"` + + // The unique ID of this event. + EventId *string `type:"string" required:"true"` + + // The logical name of the resource specified in the template. + LogicalResourceId *string `type:"string"` + + // The name or unique identifier associated with the physical instance of the + // resource. + PhysicalResourceId *string `type:"string"` + + // BLOB of the properties used to create the resource. + ResourceProperties *string `type:"string"` + + // Current status of the resource. + ResourceStatus *string `type:"string" enum:"ResourceStatus"` + + // Success/failure message associated with the resource. + ResourceStatusReason *string `type:"string"` + + // Type of resource. (For more information, go to AWS Resource Types Reference + // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) + // in the AWS CloudFormation User Guide.) + ResourceType *string `type:"string"` + + // The unique ID name of the instance of the stack. + StackId *string `type:"string" required:"true"` + + // The name associated with a stack. + StackName *string `type:"string" required:"true"` + + // Time the status was updated. + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s StackEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackEvent) GoString() string { + return s.String() +} + +// The StackResource data type. +type StackResource struct { + _ struct{} `type:"structure"` + + // User defined description associated with the resource. + Description *string `type:"string"` + + // The logical name of the resource specified in the template. + LogicalResourceId *string `type:"string" required:"true"` + + // The name or unique identifier that corresponds to a physical instance ID + // of a resource supported by AWS CloudFormation. + PhysicalResourceId *string `type:"string"` + + // Current status of the resource. + ResourceStatus *string `type:"string" required:"true" enum:"ResourceStatus"` + + // Success/failure message associated with the resource. + ResourceStatusReason *string `type:"string"` + + // Type of resource. (For more information, go to AWS Resource Types Reference + // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) + // in the AWS CloudFormation User Guide.) + ResourceType *string `type:"string" required:"true"` + + // Unique identifier of the stack. + StackId *string `type:"string"` + + // The name associated with the stack. + StackName *string `type:"string"` + + // Time the status was updated. + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s StackResource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackResource) GoString() string { + return s.String() +} + +// Contains detailed information about the specified stack resource. +type StackResourceDetail struct { + _ struct{} `type:"structure"` + + // User defined description associated with the resource. + Description *string `type:"string"` + + // Time the status was updated. + LastUpdatedTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The logical name of the resource specified in the template. + LogicalResourceId *string `type:"string" required:"true"` + + // The JSON format content of the Metadata attribute declared for the resource. + // For more information, see Metadata Attribute (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-metadata.html) + // in the AWS CloudFormation User Guide. + Metadata *string `type:"string"` + + // The name or unique identifier that corresponds to a physical instance ID + // of a resource supported by AWS CloudFormation. + PhysicalResourceId *string `type:"string"` + + // Current status of the resource. + ResourceStatus *string `type:"string" required:"true" enum:"ResourceStatus"` + + // Success/failure message associated with the resource. + ResourceStatusReason *string `type:"string"` + + // Type of resource. ((For more information, go to AWS Resource Types Reference + // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) + // in the AWS CloudFormation User Guide.) + ResourceType *string `type:"string" required:"true"` + + // Unique identifier of the stack. + StackId *string `type:"string"` + + // The name associated with the stack. + StackName *string `type:"string"` +} + +// String returns the string representation +func (s StackResourceDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackResourceDetail) GoString() string { + return s.String() +} + +// Contains high-level information about the specified stack resource. +type StackResourceSummary struct { + _ struct{} `type:"structure"` + + // Time the status was updated. + LastUpdatedTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The logical name of the resource specified in the template. + LogicalResourceId *string `type:"string" required:"true"` + + // The name or unique identifier that corresponds to a physical instance ID + // of the resource. + PhysicalResourceId *string `type:"string"` + + // Current status of the resource. + ResourceStatus *string `type:"string" required:"true" enum:"ResourceStatus"` + + // Success/failure message associated with the resource. + ResourceStatusReason *string `type:"string"` + + // Type of resource. (For more information, go to AWS Resource Types Reference + // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) + // in the AWS CloudFormation User Guide.) + ResourceType *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StackResourceSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackResourceSummary) GoString() string { + return s.String() +} + +// The StackSummary Data Type +type StackSummary struct { + _ struct{} `type:"structure"` + + // The time the stack was created. + CreationTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The time the stack was deleted. + DeletionTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The time the stack was last updated. This field will only be returned if + // the stack has been updated at least once. + LastUpdatedTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Unique stack identifier. + StackId *string `type:"string"` + + // The name associated with the stack. + StackName *string `type:"string" required:"true"` + + // The current status of the stack. + StackStatus *string `type:"string" required:"true" enum:"StackStatus"` + + // Success/Failure message associated with the stack status. + StackStatusReason *string `type:"string"` + + // The template description of the template used to create the stack. + TemplateDescription *string `type:"string"` +} + +// String returns the string representation +func (s StackSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackSummary) GoString() string { + return s.String() +} + +// The Tag type is used by CreateStack in the Tags parameter. It allows you +// to specify a key-value pair that can be used to store information related +// to cost allocation for an AWS CloudFormation stack. +type Tag struct { + _ struct{} `type:"structure"` + + // Required. A string used to identify this tag. You can specify a maximum of + // 128 characters for a tag key. Tags owned by Amazon Web Services (AWS) have + // the reserved prefix: aws:. + Key *string `type:"string"` + + // Required. A string containing the value for this tag. You can specify a maximum + // of 256 characters for a tag value. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// The TemplateParameter data type. +type TemplateParameter struct { + _ struct{} `type:"structure"` + + // The default value associated with the parameter. + DefaultValue *string `type:"string"` + + // User defined description associated with the parameter. + Description *string `type:"string"` + + // Flag indicating whether the parameter should be displayed as plain text in + // logs and UIs. + NoEcho *bool `type:"boolean"` + + // The name associated with the parameter. + ParameterKey *string `type:"string"` +} + +// String returns the string representation +func (s TemplateParameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TemplateParameter) GoString() string { + return s.String() +} + +// The input for UpdateStack action. +type UpdateStackInput struct { + _ struct{} `type:"structure"` + + // A list of capabilities that you must specify before AWS CloudFormation can + // create or update certain stacks. Some stack templates might include resources + // that can affect permissions in your AWS account. For those stacks, you must + // explicitly acknowledge their capabilities by specifying this parameter. Currently, + // the only valid value is CAPABILITY_IAM, which is required for the following + // resources: AWS::IAM::AccessKey (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html), + // AWS::IAM::Group (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-group.html), + // AWS::IAM::InstanceProfile (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html), + // AWS::IAM::Policy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html), + // AWS::IAM::Role (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html), + // AWS::IAM::User (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-user.html), + // and AWS::IAM::UserToGroupAddition (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-addusertogroup.html). + // If your stack template contains these resources, we recommend that you review + // any permissions associated with them. If you don't specify this parameter, + // this action returns an InsufficientCapabilities error. + Capabilities []*string `type:"list"` + + // Amazon Simple Notification Service topic Amazon Resource Names (ARNs) that + // AWS CloudFormation associates with the stack. Specify an empty list to remove + // all notification topics. + NotificationARNs []*string `type:"list"` + + // A list of Parameter structures that specify input parameters for the stack. + // For more information, see the Parameter (http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_Parameter.html) + // data type. + Parameters []*Parameter `type:"list"` + + // The template resource types that you have permissions to work with for this + // update stack action, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. + // + // If the list of resource types doesn't include a resource that you're updating, + // the stack update fails. By default, AWS CloudFormation grants permissions + // to all resource types. AWS Identity and Access Management (IAM) uses this + // parameter for AWS CloudFormation-specific condition keys in IAM policies. + // For more information, see Controlling Access with AWS Identity and Access + // Management (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html). + ResourceTypes []*string `type:"list"` + + // The name or unique stack ID of the stack to update. + StackName *string `type:"string" required:"true"` + + // Structure containing a new stack policy body. You can specify either the + // StackPolicyBody or the StackPolicyURL parameter, but not both. + // + // You might update the stack policy, for example, in order to protect a new + // resource that you created during a stack update. If you do not specify a + // stack policy, the current policy that is associated with the stack is unchanged. + StackPolicyBody *string `min:"1" type:"string"` + + // Structure containing the temporary overriding stack policy body. You can + // specify either the StackPolicyDuringUpdateBody or the StackPolicyDuringUpdateURL + // parameter, but not both. + // + // If you want to update protected resources, specify a temporary overriding + // stack policy during this update. If you do not specify a stack policy, the + // current policy that is associated with the stack will be used. + StackPolicyDuringUpdateBody *string `min:"1" type:"string"` + + // Location of a file containing the temporary overriding stack policy. The + // URL must point to a policy (max size: 16KB) located in an S3 bucket in the + // same region as the stack. You can specify either the StackPolicyDuringUpdateBody + // or the StackPolicyDuringUpdateURL parameter, but not both. + // + // If you want to update protected resources, specify a temporary overriding + // stack policy during this update. If you do not specify a stack policy, the + // current policy that is associated with the stack will be used. + StackPolicyDuringUpdateURL *string `min:"1" type:"string"` + + // Location of a file containing the updated stack policy. The URL must point + // to a policy (max size: 16KB) located in an S3 bucket in the same region as + // the stack. You can specify either the StackPolicyBody or the StackPolicyURL + // parameter, but not both. + // + // You might update the stack policy, for example, in order to protect a new + // resource that you created during a stack update. If you do not specify a + // stack policy, the current policy that is associated with the stack is unchanged. + StackPolicyURL *string `min:"1" type:"string"` + + // Structure containing the template body with a minimum length of 1 byte and + // a maximum length of 51,200 bytes. (For more information, go to Template Anatomy + // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide.) + // + // Conditional: You must specify either the TemplateBody or the TemplateURL + // parameter, but not both. + TemplateBody *string `min:"1" type:"string"` + + // Location of file containing the template body. The URL must point to a template + // that is located in an Amazon S3 bucket. For more information, go to Template + // Anatomy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide. + // + // Conditional: You must specify either the TemplateBody or the TemplateURL + // parameter, but not both. + TemplateURL *string `min:"1" type:"string"` + + // Reuse the existing template that is associated with the stack that you are + // updating. + UsePreviousTemplate *bool `type:"boolean"` +} + +// String returns the string representation +func (s UpdateStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateStackInput) GoString() string { + return s.String() +} + +// The output for a UpdateStack action. +type UpdateStackOutput struct { + _ struct{} `type:"structure"` + + // Unique identifier of the stack. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s UpdateStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateStackOutput) GoString() string { + return s.String() +} + +// The input for ValidateTemplate action. +type ValidateTemplateInput struct { + _ struct{} `type:"structure"` + + // Structure containing the template body with a minimum length of 1 byte and + // a maximum length of 51,200 bytes. For more information, go to Template Anatomy + // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide. + // + // Conditional: You must pass TemplateURL or TemplateBody. If both are passed, + // only TemplateBody is used. + TemplateBody *string `min:"1" type:"string"` + + // Location of file containing the template body. The URL must point to a template + // (max size: 460,800 bytes) that is located in an Amazon S3 bucket. For more + // information, go to Template Anatomy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) + // in the AWS CloudFormation User Guide. + // + // Conditional: You must pass TemplateURL or TemplateBody. If both are passed, + // only TemplateBody is used. + TemplateURL *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ValidateTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidateTemplateInput) GoString() string { + return s.String() +} + +// The output for ValidateTemplate action. +type ValidateTemplateOutput struct { + _ struct{} `type:"structure"` + + // The capabilities found within the template. Currently, AWS CloudFormation + // supports only the CAPABILITY_IAM capability. If your template contains IAM + // resources, you must specify the CAPABILITY_IAM value for this parameter when + // you use the CreateStack or UpdateStack actions with your template; otherwise, + // those actions return an InsufficientCapabilities error. + Capabilities []*string `type:"list"` + + // The list of resources that generated the values in the Capabilities response + // element. + CapabilitiesReason *string `type:"string"` + + // The description found within the template. + Description *string `type:"string"` + + // A list of TemplateParameter structures. + Parameters []*TemplateParameter `type:"list"` +} + +// String returns the string representation +func (s ValidateTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidateTemplateOutput) GoString() string { + return s.String() +} + +const ( + // @enum Capability + CapabilityCapabilityIam = "CAPABILITY_IAM" +) + +const ( + // @enum OnFailure + OnFailureDoNothing = "DO_NOTHING" + // @enum OnFailure + OnFailureRollback = "ROLLBACK" + // @enum OnFailure + OnFailureDelete = "DELETE" +) + +const ( + // @enum ResourceSignalStatus + ResourceSignalStatusSuccess = "SUCCESS" + // @enum ResourceSignalStatus + ResourceSignalStatusFailure = "FAILURE" +) + +const ( + // @enum ResourceStatus + ResourceStatusCreateInProgress = "CREATE_IN_PROGRESS" + // @enum ResourceStatus + ResourceStatusCreateFailed = "CREATE_FAILED" + // @enum ResourceStatus + ResourceStatusCreateComplete = "CREATE_COMPLETE" + // @enum ResourceStatus + ResourceStatusDeleteInProgress = "DELETE_IN_PROGRESS" + // @enum ResourceStatus + ResourceStatusDeleteFailed = "DELETE_FAILED" + // @enum ResourceStatus + ResourceStatusDeleteComplete = "DELETE_COMPLETE" + // @enum ResourceStatus + ResourceStatusDeleteSkipped = "DELETE_SKIPPED" + // @enum ResourceStatus + ResourceStatusUpdateInProgress = "UPDATE_IN_PROGRESS" + // @enum ResourceStatus + ResourceStatusUpdateFailed = "UPDATE_FAILED" + // @enum ResourceStatus + ResourceStatusUpdateComplete = "UPDATE_COMPLETE" +) + +const ( + // @enum StackStatus + StackStatusCreateInProgress = "CREATE_IN_PROGRESS" + // @enum StackStatus + StackStatusCreateFailed = "CREATE_FAILED" + // @enum StackStatus + StackStatusCreateComplete = "CREATE_COMPLETE" + // @enum StackStatus + StackStatusRollbackInProgress = "ROLLBACK_IN_PROGRESS" + // @enum StackStatus + StackStatusRollbackFailed = "ROLLBACK_FAILED" + // @enum StackStatus + StackStatusRollbackComplete = "ROLLBACK_COMPLETE" + // @enum StackStatus + StackStatusDeleteInProgress = "DELETE_IN_PROGRESS" + // @enum StackStatus + StackStatusDeleteFailed = "DELETE_FAILED" + // @enum StackStatus + StackStatusDeleteComplete = "DELETE_COMPLETE" + // @enum StackStatus + StackStatusUpdateInProgress = "UPDATE_IN_PROGRESS" + // @enum StackStatus + StackStatusUpdateCompleteCleanupInProgress = "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS" + // @enum StackStatus + StackStatusUpdateComplete = "UPDATE_COMPLETE" + // @enum StackStatus + StackStatusUpdateRollbackInProgress = "UPDATE_ROLLBACK_IN_PROGRESS" + // @enum StackStatus + StackStatusUpdateRollbackFailed = "UPDATE_ROLLBACK_FAILED" + // @enum StackStatus + StackStatusUpdateRollbackCompleteCleanupInProgress = "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS" + // @enum StackStatus + StackStatusUpdateRollbackComplete = "UPDATE_ROLLBACK_COMPLETE" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudformation/cloudformationiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudformation/cloudformationiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudformation/cloudformationiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudformation/cloudformationiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,98 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudformationiface provides an interface for the AWS CloudFormation. +package cloudformationiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudformation" +) + +// CloudFormationAPI is the interface type for cloudformation.CloudFormation. +type CloudFormationAPI interface { + CancelUpdateStackRequest(*cloudformation.CancelUpdateStackInput) (*request.Request, *cloudformation.CancelUpdateStackOutput) + + CancelUpdateStack(*cloudformation.CancelUpdateStackInput) (*cloudformation.CancelUpdateStackOutput, error) + + ContinueUpdateRollbackRequest(*cloudformation.ContinueUpdateRollbackInput) (*request.Request, *cloudformation.ContinueUpdateRollbackOutput) + + ContinueUpdateRollback(*cloudformation.ContinueUpdateRollbackInput) (*cloudformation.ContinueUpdateRollbackOutput, error) + + CreateStackRequest(*cloudformation.CreateStackInput) (*request.Request, *cloudformation.CreateStackOutput) + + CreateStack(*cloudformation.CreateStackInput) (*cloudformation.CreateStackOutput, error) + + DeleteStackRequest(*cloudformation.DeleteStackInput) (*request.Request, *cloudformation.DeleteStackOutput) + + DeleteStack(*cloudformation.DeleteStackInput) (*cloudformation.DeleteStackOutput, error) + + DescribeAccountLimitsRequest(*cloudformation.DescribeAccountLimitsInput) (*request.Request, *cloudformation.DescribeAccountLimitsOutput) + + DescribeAccountLimits(*cloudformation.DescribeAccountLimitsInput) (*cloudformation.DescribeAccountLimitsOutput, error) + + DescribeStackEventsRequest(*cloudformation.DescribeStackEventsInput) (*request.Request, *cloudformation.DescribeStackEventsOutput) + + DescribeStackEvents(*cloudformation.DescribeStackEventsInput) (*cloudformation.DescribeStackEventsOutput, error) + + DescribeStackEventsPages(*cloudformation.DescribeStackEventsInput, func(*cloudformation.DescribeStackEventsOutput, bool) bool) error + + DescribeStackResourceRequest(*cloudformation.DescribeStackResourceInput) (*request.Request, *cloudformation.DescribeStackResourceOutput) + + DescribeStackResource(*cloudformation.DescribeStackResourceInput) (*cloudformation.DescribeStackResourceOutput, error) + + DescribeStackResourcesRequest(*cloudformation.DescribeStackResourcesInput) (*request.Request, *cloudformation.DescribeStackResourcesOutput) + + DescribeStackResources(*cloudformation.DescribeStackResourcesInput) (*cloudformation.DescribeStackResourcesOutput, error) + + DescribeStacksRequest(*cloudformation.DescribeStacksInput) (*request.Request, *cloudformation.DescribeStacksOutput) + + DescribeStacks(*cloudformation.DescribeStacksInput) (*cloudformation.DescribeStacksOutput, error) + + DescribeStacksPages(*cloudformation.DescribeStacksInput, func(*cloudformation.DescribeStacksOutput, bool) bool) error + + EstimateTemplateCostRequest(*cloudformation.EstimateTemplateCostInput) (*request.Request, *cloudformation.EstimateTemplateCostOutput) + + EstimateTemplateCost(*cloudformation.EstimateTemplateCostInput) (*cloudformation.EstimateTemplateCostOutput, error) + + GetStackPolicyRequest(*cloudformation.GetStackPolicyInput) (*request.Request, *cloudformation.GetStackPolicyOutput) + + GetStackPolicy(*cloudformation.GetStackPolicyInput) (*cloudformation.GetStackPolicyOutput, error) + + GetTemplateRequest(*cloudformation.GetTemplateInput) (*request.Request, *cloudformation.GetTemplateOutput) + + GetTemplate(*cloudformation.GetTemplateInput) (*cloudformation.GetTemplateOutput, error) + + GetTemplateSummaryRequest(*cloudformation.GetTemplateSummaryInput) (*request.Request, *cloudformation.GetTemplateSummaryOutput) + + GetTemplateSummary(*cloudformation.GetTemplateSummaryInput) (*cloudformation.GetTemplateSummaryOutput, error) + + ListStackResourcesRequest(*cloudformation.ListStackResourcesInput) (*request.Request, *cloudformation.ListStackResourcesOutput) + + ListStackResources(*cloudformation.ListStackResourcesInput) (*cloudformation.ListStackResourcesOutput, error) + + ListStackResourcesPages(*cloudformation.ListStackResourcesInput, func(*cloudformation.ListStackResourcesOutput, bool) bool) error + + ListStacksRequest(*cloudformation.ListStacksInput) (*request.Request, *cloudformation.ListStacksOutput) + + ListStacks(*cloudformation.ListStacksInput) (*cloudformation.ListStacksOutput, error) + + ListStacksPages(*cloudformation.ListStacksInput, func(*cloudformation.ListStacksOutput, bool) bool) error + + SetStackPolicyRequest(*cloudformation.SetStackPolicyInput) (*request.Request, *cloudformation.SetStackPolicyOutput) + + SetStackPolicy(*cloudformation.SetStackPolicyInput) (*cloudformation.SetStackPolicyOutput, error) + + SignalResourceRequest(*cloudformation.SignalResourceInput) (*request.Request, *cloudformation.SignalResourceOutput) + + SignalResource(*cloudformation.SignalResourceInput) (*cloudformation.SignalResourceOutput, error) + + UpdateStackRequest(*cloudformation.UpdateStackInput) (*request.Request, *cloudformation.UpdateStackOutput) + + UpdateStack(*cloudformation.UpdateStackInput) (*cloudformation.UpdateStackOutput, error) + + ValidateTemplateRequest(*cloudformation.ValidateTemplateInput) (*request.Request, *cloudformation.ValidateTemplateOutput) + + ValidateTemplate(*cloudformation.ValidateTemplateInput) (*cloudformation.ValidateTemplateOutput, error) +} + +var _ CloudFormationAPI = (*cloudformation.CloudFormation)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudformation/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudformation/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudformation/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudformation/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,465 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudformation_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudformation" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCloudFormation_CancelUpdateStack() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.CancelUpdateStackInput{ + StackName: aws.String("StackName"), // Required + } + resp, err := svc.CancelUpdateStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_ContinueUpdateRollback() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.ContinueUpdateRollbackInput{ + StackName: aws.String("StackNameOrId"), // Required + } + resp, err := svc.ContinueUpdateRollback(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_CreateStack() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.CreateStackInput{ + StackName: aws.String("StackName"), // Required + Capabilities: []*string{ + aws.String("Capability"), // Required + // More values... + }, + DisableRollback: aws.Bool(true), + NotificationARNs: []*string{ + aws.String("NotificationARN"), // Required + // More values... + }, + OnFailure: aws.String("OnFailure"), + Parameters: []*cloudformation.Parameter{ + { // Required + ParameterKey: aws.String("ParameterKey"), + ParameterValue: aws.String("ParameterValue"), + UsePreviousValue: aws.Bool(true), + }, + // More values... + }, + ResourceTypes: []*string{ + aws.String("ResourceType"), // Required + // More values... + }, + StackPolicyBody: aws.String("StackPolicyBody"), + StackPolicyURL: aws.String("StackPolicyURL"), + Tags: []*cloudformation.Tag{ + { // Required + Key: aws.String("TagKey"), + Value: aws.String("TagValue"), + }, + // More values... + }, + TemplateBody: aws.String("TemplateBody"), + TemplateURL: aws.String("TemplateURL"), + TimeoutInMinutes: aws.Int64(1), + } + resp, err := svc.CreateStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_DeleteStack() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.DeleteStackInput{ + StackName: aws.String("StackName"), // Required + } + resp, err := svc.DeleteStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_DescribeAccountLimits() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.DescribeAccountLimitsInput{ + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeAccountLimits(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_DescribeStackEvents() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.DescribeStackEventsInput{ + NextToken: aws.String("NextToken"), + StackName: aws.String("StackName"), + } + resp, err := svc.DescribeStackEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_DescribeStackResource() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.DescribeStackResourceInput{ + LogicalResourceId: aws.String("LogicalResourceId"), // Required + StackName: aws.String("StackName"), // Required + } + resp, err := svc.DescribeStackResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_DescribeStackResources() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.DescribeStackResourcesInput{ + LogicalResourceId: aws.String("LogicalResourceId"), + PhysicalResourceId: aws.String("PhysicalResourceId"), + StackName: aws.String("StackName"), + } + resp, err := svc.DescribeStackResources(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_DescribeStacks() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.DescribeStacksInput{ + NextToken: aws.String("NextToken"), + StackName: aws.String("StackName"), + } + resp, err := svc.DescribeStacks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_EstimateTemplateCost() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.EstimateTemplateCostInput{ + Parameters: []*cloudformation.Parameter{ + { // Required + ParameterKey: aws.String("ParameterKey"), + ParameterValue: aws.String("ParameterValue"), + UsePreviousValue: aws.Bool(true), + }, + // More values... + }, + TemplateBody: aws.String("TemplateBody"), + TemplateURL: aws.String("TemplateURL"), + } + resp, err := svc.EstimateTemplateCost(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_GetStackPolicy() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.GetStackPolicyInput{ + StackName: aws.String("StackName"), // Required + } + resp, err := svc.GetStackPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_GetTemplate() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.GetTemplateInput{ + StackName: aws.String("StackName"), // Required + } + resp, err := svc.GetTemplate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_GetTemplateSummary() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.GetTemplateSummaryInput{ + StackName: aws.String("StackNameOrId"), + TemplateBody: aws.String("TemplateBody"), + TemplateURL: aws.String("TemplateURL"), + } + resp, err := svc.GetTemplateSummary(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_ListStackResources() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.ListStackResourcesInput{ + StackName: aws.String("StackName"), // Required + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListStackResources(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_ListStacks() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.ListStacksInput{ + NextToken: aws.String("NextToken"), + StackStatusFilter: []*string{ + aws.String("StackStatus"), // Required + // More values... + }, + } + resp, err := svc.ListStacks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_SetStackPolicy() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.SetStackPolicyInput{ + StackName: aws.String("StackName"), // Required + StackPolicyBody: aws.String("StackPolicyBody"), + StackPolicyURL: aws.String("StackPolicyURL"), + } + resp, err := svc.SetStackPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_SignalResource() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.SignalResourceInput{ + LogicalResourceId: aws.String("LogicalResourceId"), // Required + StackName: aws.String("StackNameOrId"), // Required + Status: aws.String("ResourceSignalStatus"), // Required + UniqueId: aws.String("ResourceSignalUniqueId"), // Required + } + resp, err := svc.SignalResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_UpdateStack() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.UpdateStackInput{ + StackName: aws.String("StackName"), // Required + Capabilities: []*string{ + aws.String("Capability"), // Required + // More values... + }, + NotificationARNs: []*string{ + aws.String("NotificationARN"), // Required + // More values... + }, + Parameters: []*cloudformation.Parameter{ + { // Required + ParameterKey: aws.String("ParameterKey"), + ParameterValue: aws.String("ParameterValue"), + UsePreviousValue: aws.Bool(true), + }, + // More values... + }, + ResourceTypes: []*string{ + aws.String("ResourceType"), // Required + // More values... + }, + StackPolicyBody: aws.String("StackPolicyBody"), + StackPolicyDuringUpdateBody: aws.String("StackPolicyDuringUpdateBody"), + StackPolicyDuringUpdateURL: aws.String("StackPolicyDuringUpdateURL"), + StackPolicyURL: aws.String("StackPolicyURL"), + TemplateBody: aws.String("TemplateBody"), + TemplateURL: aws.String("TemplateURL"), + UsePreviousTemplate: aws.Bool(true), + } + resp, err := svc.UpdateStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFormation_ValidateTemplate() { + svc := cloudformation.New(session.New()) + + params := &cloudformation.ValidateTemplateInput{ + TemplateBody: aws.String("TemplateBody"), + TemplateURL: aws.String("TemplateURL"), + } + resp, err := svc.ValidateTemplate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudformation/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudformation/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudformation/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudformation/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,103 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudformation + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// AWS CloudFormation enables you to create and manage AWS infrastructure deployments +// predictably and repeatedly. AWS CloudFormation helps you leverage AWS products +// such as Amazon EC2, EBS, Amazon SNS, ELB, and Auto Scaling to build highly-reliable, +// highly scalable, cost effective applications without worrying about creating +// and configuring the underlying AWS infrastructure. +// +// With AWS CloudFormation, you declare all of your resources and dependencies +// in a template file. The template defines a collection of resources as a single +// unit called a stack. AWS CloudFormation creates and deletes all member resources +// of the stack together and manages all dependencies between the resources +// for you. +// +// For more information about this product, go to the CloudFormation Product +// Page (http://aws.amazon.com/cloudformation/). +// +// Amazon CloudFormation makes use of other AWS products. If you need additional +// technical information about a specific AWS product, you can find the product's +// technical documentation at http://docs.aws.amazon.com/documentation/ (http://docs.aws.amazon.com/documentation/). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CloudFormation struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "cloudformation" + +// New creates a new instance of the CloudFormation client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CloudFormation client from just a session. +// svc := cloudformation.New(mySession) +// +// // Create a CloudFormation client with additional configuration +// svc := cloudformation.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudFormation { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CloudFormation { + svc := &CloudFormation{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2010-05-15", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CloudFormation operation and runs any +// custom request initialization. +func (c *CloudFormation) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudformation/waiters.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudformation/waiters.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudformation/waiters.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudformation/waiters.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,100 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudformation + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *CloudFormation) WaitUntilStackCreateComplete(input *DescribeStacksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeStacks", + Delay: 30, + MaxAttempts: 50, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Stacks[].StackStatus", + Expected: "CREATE_COMPLETE", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "CREATE_FAILED", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *CloudFormation) WaitUntilStackDeleteComplete(input *DescribeStacksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeStacks", + Delay: 30, + MaxAttempts: 25, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Stacks[].StackStatus", + Expected: "DELETE_COMPLETE", + }, + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "ValidationError", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "DELETE_FAILED", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *CloudFormation) WaitUntilStackUpdateComplete(input *DescribeStacksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeStacks", + Delay: 30, + MaxAttempts: 5, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Stacks[].StackStatus", + Expected: "UPDATE_COMPLETE", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Stacks[].StackStatus", + Expected: "UPDATE_FAILED", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,3456 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudfront provides a client for Amazon CloudFront. +package cloudfront + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +const opCreateCloudFrontOriginAccessIdentity = "CreateCloudFrontOriginAccessIdentity2016_01_28" + +// CreateCloudFrontOriginAccessIdentityRequest generates a request for the CreateCloudFrontOriginAccessIdentity operation. +func (c *CloudFront) CreateCloudFrontOriginAccessIdentityRequest(input *CreateCloudFrontOriginAccessIdentityInput) (req *request.Request, output *CreateCloudFrontOriginAccessIdentityOutput) { + op := &request.Operation{ + Name: opCreateCloudFrontOriginAccessIdentity, + HTTPMethod: "POST", + HTTPPath: "/2016-01-28/origin-access-identity/cloudfront", + } + + if input == nil { + input = &CreateCloudFrontOriginAccessIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateCloudFrontOriginAccessIdentityOutput{} + req.Data = output + return +} + +// Create a new origin access identity. +func (c *CloudFront) CreateCloudFrontOriginAccessIdentity(input *CreateCloudFrontOriginAccessIdentityInput) (*CreateCloudFrontOriginAccessIdentityOutput, error) { + req, out := c.CreateCloudFrontOriginAccessIdentityRequest(input) + err := req.Send() + return out, err +} + +const opCreateDistribution = "CreateDistribution2016_01_28" + +// CreateDistributionRequest generates a request for the CreateDistribution operation. +func (c *CloudFront) CreateDistributionRequest(input *CreateDistributionInput) (req *request.Request, output *CreateDistributionOutput) { + op := &request.Operation{ + Name: opCreateDistribution, + HTTPMethod: "POST", + HTTPPath: "/2016-01-28/distribution", + } + + if input == nil { + input = &CreateDistributionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDistributionOutput{} + req.Data = output + return +} + +// Create a new distribution. +func (c *CloudFront) CreateDistribution(input *CreateDistributionInput) (*CreateDistributionOutput, error) { + req, out := c.CreateDistributionRequest(input) + err := req.Send() + return out, err +} + +const opCreateInvalidation = "CreateInvalidation2016_01_28" + +// CreateInvalidationRequest generates a request for the CreateInvalidation operation. +func (c *CloudFront) CreateInvalidationRequest(input *CreateInvalidationInput) (req *request.Request, output *CreateInvalidationOutput) { + op := &request.Operation{ + Name: opCreateInvalidation, + HTTPMethod: "POST", + HTTPPath: "/2016-01-28/distribution/{DistributionId}/invalidation", + } + + if input == nil { + input = &CreateInvalidationInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateInvalidationOutput{} + req.Data = output + return +} + +// Create a new invalidation. +func (c *CloudFront) CreateInvalidation(input *CreateInvalidationInput) (*CreateInvalidationOutput, error) { + req, out := c.CreateInvalidationRequest(input) + err := req.Send() + return out, err +} + +const opCreateStreamingDistribution = "CreateStreamingDistribution2016_01_28" + +// CreateStreamingDistributionRequest generates a request for the CreateStreamingDistribution operation. +func (c *CloudFront) CreateStreamingDistributionRequest(input *CreateStreamingDistributionInput) (req *request.Request, output *CreateStreamingDistributionOutput) { + op := &request.Operation{ + Name: opCreateStreamingDistribution, + HTTPMethod: "POST", + HTTPPath: "/2016-01-28/streaming-distribution", + } + + if input == nil { + input = &CreateStreamingDistributionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateStreamingDistributionOutput{} + req.Data = output + return +} + +// Create a new streaming distribution. +func (c *CloudFront) CreateStreamingDistribution(input *CreateStreamingDistributionInput) (*CreateStreamingDistributionOutput, error) { + req, out := c.CreateStreamingDistributionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCloudFrontOriginAccessIdentity = "DeleteCloudFrontOriginAccessIdentity2016_01_28" + +// DeleteCloudFrontOriginAccessIdentityRequest generates a request for the DeleteCloudFrontOriginAccessIdentity operation. +func (c *CloudFront) DeleteCloudFrontOriginAccessIdentityRequest(input *DeleteCloudFrontOriginAccessIdentityInput) (req *request.Request, output *DeleteCloudFrontOriginAccessIdentityOutput) { + op := &request.Operation{ + Name: opDeleteCloudFrontOriginAccessIdentity, + HTTPMethod: "DELETE", + HTTPPath: "/2016-01-28/origin-access-identity/cloudfront/{Id}", + } + + if input == nil { + input = &DeleteCloudFrontOriginAccessIdentityInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteCloudFrontOriginAccessIdentityOutput{} + req.Data = output + return +} + +// Delete an origin access identity. +func (c *CloudFront) DeleteCloudFrontOriginAccessIdentity(input *DeleteCloudFrontOriginAccessIdentityInput) (*DeleteCloudFrontOriginAccessIdentityOutput, error) { + req, out := c.DeleteCloudFrontOriginAccessIdentityRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDistribution = "DeleteDistribution2016_01_28" + +// DeleteDistributionRequest generates a request for the DeleteDistribution operation. +func (c *CloudFront) DeleteDistributionRequest(input *DeleteDistributionInput) (req *request.Request, output *DeleteDistributionOutput) { + op := &request.Operation{ + Name: opDeleteDistribution, + HTTPMethod: "DELETE", + HTTPPath: "/2016-01-28/distribution/{Id}", + } + + if input == nil { + input = &DeleteDistributionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteDistributionOutput{} + req.Data = output + return +} + +// Delete a distribution. +func (c *CloudFront) DeleteDistribution(input *DeleteDistributionInput) (*DeleteDistributionOutput, error) { + req, out := c.DeleteDistributionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteStreamingDistribution = "DeleteStreamingDistribution2016_01_28" + +// DeleteStreamingDistributionRequest generates a request for the DeleteStreamingDistribution operation. +func (c *CloudFront) DeleteStreamingDistributionRequest(input *DeleteStreamingDistributionInput) (req *request.Request, output *DeleteStreamingDistributionOutput) { + op := &request.Operation{ + Name: opDeleteStreamingDistribution, + HTTPMethod: "DELETE", + HTTPPath: "/2016-01-28/streaming-distribution/{Id}", + } + + if input == nil { + input = &DeleteStreamingDistributionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteStreamingDistributionOutput{} + req.Data = output + return +} + +// Delete a streaming distribution. +func (c *CloudFront) DeleteStreamingDistribution(input *DeleteStreamingDistributionInput) (*DeleteStreamingDistributionOutput, error) { + req, out := c.DeleteStreamingDistributionRequest(input) + err := req.Send() + return out, err +} + +const opGetCloudFrontOriginAccessIdentity = "GetCloudFrontOriginAccessIdentity2016_01_28" + +// GetCloudFrontOriginAccessIdentityRequest generates a request for the GetCloudFrontOriginAccessIdentity operation. +func (c *CloudFront) GetCloudFrontOriginAccessIdentityRequest(input *GetCloudFrontOriginAccessIdentityInput) (req *request.Request, output *GetCloudFrontOriginAccessIdentityOutput) { + op := &request.Operation{ + Name: opGetCloudFrontOriginAccessIdentity, + HTTPMethod: "GET", + HTTPPath: "/2016-01-28/origin-access-identity/cloudfront/{Id}", + } + + if input == nil { + input = &GetCloudFrontOriginAccessIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &GetCloudFrontOriginAccessIdentityOutput{} + req.Data = output + return +} + +// Get the information about an origin access identity. +func (c *CloudFront) GetCloudFrontOriginAccessIdentity(input *GetCloudFrontOriginAccessIdentityInput) (*GetCloudFrontOriginAccessIdentityOutput, error) { + req, out := c.GetCloudFrontOriginAccessIdentityRequest(input) + err := req.Send() + return out, err +} + +const opGetCloudFrontOriginAccessIdentityConfig = "GetCloudFrontOriginAccessIdentityConfig2016_01_28" + +// GetCloudFrontOriginAccessIdentityConfigRequest generates a request for the GetCloudFrontOriginAccessIdentityConfig operation. +func (c *CloudFront) GetCloudFrontOriginAccessIdentityConfigRequest(input *GetCloudFrontOriginAccessIdentityConfigInput) (req *request.Request, output *GetCloudFrontOriginAccessIdentityConfigOutput) { + op := &request.Operation{ + Name: opGetCloudFrontOriginAccessIdentityConfig, + HTTPMethod: "GET", + HTTPPath: "/2016-01-28/origin-access-identity/cloudfront/{Id}/config", + } + + if input == nil { + input = &GetCloudFrontOriginAccessIdentityConfigInput{} + } + + req = c.newRequest(op, input, output) + output = &GetCloudFrontOriginAccessIdentityConfigOutput{} + req.Data = output + return +} + +// Get the configuration information about an origin access identity. +func (c *CloudFront) GetCloudFrontOriginAccessIdentityConfig(input *GetCloudFrontOriginAccessIdentityConfigInput) (*GetCloudFrontOriginAccessIdentityConfigOutput, error) { + req, out := c.GetCloudFrontOriginAccessIdentityConfigRequest(input) + err := req.Send() + return out, err +} + +const opGetDistribution = "GetDistribution2016_01_28" + +// GetDistributionRequest generates a request for the GetDistribution operation. +func (c *CloudFront) GetDistributionRequest(input *GetDistributionInput) (req *request.Request, output *GetDistributionOutput) { + op := &request.Operation{ + Name: opGetDistribution, + HTTPMethod: "GET", + HTTPPath: "/2016-01-28/distribution/{Id}", + } + + if input == nil { + input = &GetDistributionInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDistributionOutput{} + req.Data = output + return +} + +// Get the information about a distribution. +func (c *CloudFront) GetDistribution(input *GetDistributionInput) (*GetDistributionOutput, error) { + req, out := c.GetDistributionRequest(input) + err := req.Send() + return out, err +} + +const opGetDistributionConfig = "GetDistributionConfig2016_01_28" + +// GetDistributionConfigRequest generates a request for the GetDistributionConfig operation. +func (c *CloudFront) GetDistributionConfigRequest(input *GetDistributionConfigInput) (req *request.Request, output *GetDistributionConfigOutput) { + op := &request.Operation{ + Name: opGetDistributionConfig, + HTTPMethod: "GET", + HTTPPath: "/2016-01-28/distribution/{Id}/config", + } + + if input == nil { + input = &GetDistributionConfigInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDistributionConfigOutput{} + req.Data = output + return +} + +// Get the configuration information about a distribution. +func (c *CloudFront) GetDistributionConfig(input *GetDistributionConfigInput) (*GetDistributionConfigOutput, error) { + req, out := c.GetDistributionConfigRequest(input) + err := req.Send() + return out, err +} + +const opGetInvalidation = "GetInvalidation2016_01_28" + +// GetInvalidationRequest generates a request for the GetInvalidation operation. +func (c *CloudFront) GetInvalidationRequest(input *GetInvalidationInput) (req *request.Request, output *GetInvalidationOutput) { + op := &request.Operation{ + Name: opGetInvalidation, + HTTPMethod: "GET", + HTTPPath: "/2016-01-28/distribution/{DistributionId}/invalidation/{Id}", + } + + if input == nil { + input = &GetInvalidationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetInvalidationOutput{} + req.Data = output + return +} + +// Get the information about an invalidation. +func (c *CloudFront) GetInvalidation(input *GetInvalidationInput) (*GetInvalidationOutput, error) { + req, out := c.GetInvalidationRequest(input) + err := req.Send() + return out, err +} + +const opGetStreamingDistribution = "GetStreamingDistribution2016_01_28" + +// GetStreamingDistributionRequest generates a request for the GetStreamingDistribution operation. +func (c *CloudFront) GetStreamingDistributionRequest(input *GetStreamingDistributionInput) (req *request.Request, output *GetStreamingDistributionOutput) { + op := &request.Operation{ + Name: opGetStreamingDistribution, + HTTPMethod: "GET", + HTTPPath: "/2016-01-28/streaming-distribution/{Id}", + } + + if input == nil { + input = &GetStreamingDistributionInput{} + } + + req = c.newRequest(op, input, output) + output = &GetStreamingDistributionOutput{} + req.Data = output + return +} + +// Get the information about a streaming distribution. +func (c *CloudFront) GetStreamingDistribution(input *GetStreamingDistributionInput) (*GetStreamingDistributionOutput, error) { + req, out := c.GetStreamingDistributionRequest(input) + err := req.Send() + return out, err +} + +const opGetStreamingDistributionConfig = "GetStreamingDistributionConfig2016_01_28" + +// GetStreamingDistributionConfigRequest generates a request for the GetStreamingDistributionConfig operation. +func (c *CloudFront) GetStreamingDistributionConfigRequest(input *GetStreamingDistributionConfigInput) (req *request.Request, output *GetStreamingDistributionConfigOutput) { + op := &request.Operation{ + Name: opGetStreamingDistributionConfig, + HTTPMethod: "GET", + HTTPPath: "/2016-01-28/streaming-distribution/{Id}/config", + } + + if input == nil { + input = &GetStreamingDistributionConfigInput{} + } + + req = c.newRequest(op, input, output) + output = &GetStreamingDistributionConfigOutput{} + req.Data = output + return +} + +// Get the configuration information about a streaming distribution. +func (c *CloudFront) GetStreamingDistributionConfig(input *GetStreamingDistributionConfigInput) (*GetStreamingDistributionConfigOutput, error) { + req, out := c.GetStreamingDistributionConfigRequest(input) + err := req.Send() + return out, err +} + +const opListCloudFrontOriginAccessIdentities = "ListCloudFrontOriginAccessIdentities2016_01_28" + +// ListCloudFrontOriginAccessIdentitiesRequest generates a request for the ListCloudFrontOriginAccessIdentities operation. +func (c *CloudFront) ListCloudFrontOriginAccessIdentitiesRequest(input *ListCloudFrontOriginAccessIdentitiesInput) (req *request.Request, output *ListCloudFrontOriginAccessIdentitiesOutput) { + op := &request.Operation{ + Name: opListCloudFrontOriginAccessIdentities, + HTTPMethod: "GET", + HTTPPath: "/2016-01-28/origin-access-identity/cloudfront", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"CloudFrontOriginAccessIdentityList.NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "CloudFrontOriginAccessIdentityList.IsTruncated", + }, + } + + if input == nil { + input = &ListCloudFrontOriginAccessIdentitiesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListCloudFrontOriginAccessIdentitiesOutput{} + req.Data = output + return +} + +// List origin access identities. +func (c *CloudFront) ListCloudFrontOriginAccessIdentities(input *ListCloudFrontOriginAccessIdentitiesInput) (*ListCloudFrontOriginAccessIdentitiesOutput, error) { + req, out := c.ListCloudFrontOriginAccessIdentitiesRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudFront) ListCloudFrontOriginAccessIdentitiesPages(input *ListCloudFrontOriginAccessIdentitiesInput, fn func(p *ListCloudFrontOriginAccessIdentitiesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListCloudFrontOriginAccessIdentitiesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListCloudFrontOriginAccessIdentitiesOutput), lastPage) + }) +} + +const opListDistributions = "ListDistributions2016_01_28" + +// ListDistributionsRequest generates a request for the ListDistributions operation. +func (c *CloudFront) ListDistributionsRequest(input *ListDistributionsInput) (req *request.Request, output *ListDistributionsOutput) { + op := &request.Operation{ + Name: opListDistributions, + HTTPMethod: "GET", + HTTPPath: "/2016-01-28/distribution", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"DistributionList.NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "DistributionList.IsTruncated", + }, + } + + if input == nil { + input = &ListDistributionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDistributionsOutput{} + req.Data = output + return +} + +// List distributions. +func (c *CloudFront) ListDistributions(input *ListDistributionsInput) (*ListDistributionsOutput, error) { + req, out := c.ListDistributionsRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudFront) ListDistributionsPages(input *ListDistributionsInput, fn func(p *ListDistributionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListDistributionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListDistributionsOutput), lastPage) + }) +} + +const opListDistributionsByWebACLId = "ListDistributionsByWebACLId2016_01_28" + +// ListDistributionsByWebACLIdRequest generates a request for the ListDistributionsByWebACLId operation. +func (c *CloudFront) ListDistributionsByWebACLIdRequest(input *ListDistributionsByWebACLIdInput) (req *request.Request, output *ListDistributionsByWebACLIdOutput) { + op := &request.Operation{ + Name: opListDistributionsByWebACLId, + HTTPMethod: "GET", + HTTPPath: "/2016-01-28/distributionsByWebACLId/{WebACLId}", + } + + if input == nil { + input = &ListDistributionsByWebACLIdInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDistributionsByWebACLIdOutput{} + req.Data = output + return +} + +// List the distributions that are associated with a specified AWS WAF web ACL. +func (c *CloudFront) ListDistributionsByWebACLId(input *ListDistributionsByWebACLIdInput) (*ListDistributionsByWebACLIdOutput, error) { + req, out := c.ListDistributionsByWebACLIdRequest(input) + err := req.Send() + return out, err +} + +const opListInvalidations = "ListInvalidations2016_01_28" + +// ListInvalidationsRequest generates a request for the ListInvalidations operation. +func (c *CloudFront) ListInvalidationsRequest(input *ListInvalidationsInput) (req *request.Request, output *ListInvalidationsOutput) { + op := &request.Operation{ + Name: opListInvalidations, + HTTPMethod: "GET", + HTTPPath: "/2016-01-28/distribution/{DistributionId}/invalidation", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"InvalidationList.NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "InvalidationList.IsTruncated", + }, + } + + if input == nil { + input = &ListInvalidationsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListInvalidationsOutput{} + req.Data = output + return +} + +// List invalidation batches. +func (c *CloudFront) ListInvalidations(input *ListInvalidationsInput) (*ListInvalidationsOutput, error) { + req, out := c.ListInvalidationsRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudFront) ListInvalidationsPages(input *ListInvalidationsInput, fn func(p *ListInvalidationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListInvalidationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListInvalidationsOutput), lastPage) + }) +} + +const opListStreamingDistributions = "ListStreamingDistributions2016_01_28" + +// ListStreamingDistributionsRequest generates a request for the ListStreamingDistributions operation. +func (c *CloudFront) ListStreamingDistributionsRequest(input *ListStreamingDistributionsInput) (req *request.Request, output *ListStreamingDistributionsOutput) { + op := &request.Operation{ + Name: opListStreamingDistributions, + HTTPMethod: "GET", + HTTPPath: "/2016-01-28/streaming-distribution", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"StreamingDistributionList.NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "StreamingDistributionList.IsTruncated", + }, + } + + if input == nil { + input = &ListStreamingDistributionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListStreamingDistributionsOutput{} + req.Data = output + return +} + +// List streaming distributions. +func (c *CloudFront) ListStreamingDistributions(input *ListStreamingDistributionsInput) (*ListStreamingDistributionsOutput, error) { + req, out := c.ListStreamingDistributionsRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudFront) ListStreamingDistributionsPages(input *ListStreamingDistributionsInput, fn func(p *ListStreamingDistributionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListStreamingDistributionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListStreamingDistributionsOutput), lastPage) + }) +} + +const opUpdateCloudFrontOriginAccessIdentity = "UpdateCloudFrontOriginAccessIdentity2016_01_28" + +// UpdateCloudFrontOriginAccessIdentityRequest generates a request for the UpdateCloudFrontOriginAccessIdentity operation. +func (c *CloudFront) UpdateCloudFrontOriginAccessIdentityRequest(input *UpdateCloudFrontOriginAccessIdentityInput) (req *request.Request, output *UpdateCloudFrontOriginAccessIdentityOutput) { + op := &request.Operation{ + Name: opUpdateCloudFrontOriginAccessIdentity, + HTTPMethod: "PUT", + HTTPPath: "/2016-01-28/origin-access-identity/cloudfront/{Id}/config", + } + + if input == nil { + input = &UpdateCloudFrontOriginAccessIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateCloudFrontOriginAccessIdentityOutput{} + req.Data = output + return +} + +// Update an origin access identity. +func (c *CloudFront) UpdateCloudFrontOriginAccessIdentity(input *UpdateCloudFrontOriginAccessIdentityInput) (*UpdateCloudFrontOriginAccessIdentityOutput, error) { + req, out := c.UpdateCloudFrontOriginAccessIdentityRequest(input) + err := req.Send() + return out, err +} + +const opUpdateDistribution = "UpdateDistribution2016_01_28" + +// UpdateDistributionRequest generates a request for the UpdateDistribution operation. +func (c *CloudFront) UpdateDistributionRequest(input *UpdateDistributionInput) (req *request.Request, output *UpdateDistributionOutput) { + op := &request.Operation{ + Name: opUpdateDistribution, + HTTPMethod: "PUT", + HTTPPath: "/2016-01-28/distribution/{Id}/config", + } + + if input == nil { + input = &UpdateDistributionInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateDistributionOutput{} + req.Data = output + return +} + +// Update a distribution. +func (c *CloudFront) UpdateDistribution(input *UpdateDistributionInput) (*UpdateDistributionOutput, error) { + req, out := c.UpdateDistributionRequest(input) + err := req.Send() + return out, err +} + +const opUpdateStreamingDistribution = "UpdateStreamingDistribution2016_01_28" + +// UpdateStreamingDistributionRequest generates a request for the UpdateStreamingDistribution operation. +func (c *CloudFront) UpdateStreamingDistributionRequest(input *UpdateStreamingDistributionInput) (req *request.Request, output *UpdateStreamingDistributionOutput) { + op := &request.Operation{ + Name: opUpdateStreamingDistribution, + HTTPMethod: "PUT", + HTTPPath: "/2016-01-28/streaming-distribution/{Id}/config", + } + + if input == nil { + input = &UpdateStreamingDistributionInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateStreamingDistributionOutput{} + req.Data = output + return +} + +// Update a streaming distribution. +func (c *CloudFront) UpdateStreamingDistribution(input *UpdateStreamingDistributionInput) (*UpdateStreamingDistributionOutput, error) { + req, out := c.UpdateStreamingDistributionRequest(input) + err := req.Send() + return out, err +} + +// A complex type that lists the AWS accounts, if any, that you included in +// the TrustedSigners complex type for the default cache behavior or for any +// of the other cache behaviors for this distribution. These are accounts that +// you want to allow to create signed URLs for private content. +type ActiveTrustedSigners struct { + _ struct{} `type:"structure"` + + // Each active trusted signer. + Enabled *bool `type:"boolean" required:"true"` + + // A complex type that contains one Signer complex type for each unique trusted + // signer that is specified in the TrustedSigners complex type, including trusted + // signers in the default cache behavior and in all of the other cache behaviors. + Items []*Signer `locationNameList:"Signer" type:"list"` + + // The number of unique trusted signers included in all cache behaviors. For + // example, if three cache behaviors all list the same three AWS accounts, the + // value of Quantity for ActiveTrustedSigners will be 3. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s ActiveTrustedSigners) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActiveTrustedSigners) GoString() string { + return s.String() +} + +// A complex type that contains information about CNAMEs (alternate domain names), +// if any, for this distribution. +type Aliases struct { + _ struct{} `type:"structure"` + + // Optional: A complex type that contains CNAME elements, if any, for this distribution. + // If Quantity is 0, you can omit Items. + Items []*string `locationNameList:"CNAME" type:"list"` + + // The number of CNAMEs, if any, for this distribution. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s Aliases) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Aliases) GoString() string { + return s.String() +} + +// A complex type that controls which HTTP methods CloudFront processes and +// forwards to your Amazon S3 bucket or your custom origin. There are three +// choices: - CloudFront forwards only GET and HEAD requests. - CloudFront forwards +// only GET, HEAD and OPTIONS requests. - CloudFront forwards GET, HEAD, OPTIONS, +// PUT, PATCH, POST, and DELETE requests. If you pick the third choice, you +// may need to restrict access to your Amazon S3 bucket or to your custom origin +// so users can't perform operations that you don't want them to. For example, +// you may not want users to have permission to delete objects from your origin. +type AllowedMethods struct { + _ struct{} `type:"structure"` + + // A complex type that controls whether CloudFront caches the response to requests + // using the specified HTTP methods. There are two choices: - CloudFront caches + // responses to GET and HEAD requests. - CloudFront caches responses to GET, + // HEAD, and OPTIONS requests. If you pick the second choice for your S3 Origin, + // you may need to forward Access-Control-Request-Method, Access-Control-Request-Headers + // and Origin headers for the responses to be cached correctly. + CachedMethods *CachedMethods `type:"structure"` + + // A complex type that contains the HTTP methods that you want CloudFront to + // process and forward to your origin. + Items []*string `locationNameList:"Method" type:"list" required:"true"` + + // The number of HTTP methods that you want CloudFront to forward to your origin. + // Valid values are 2 (for GET and HEAD requests), 3 (for GET, HEAD and OPTIONS + // requests) and 7 (for GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests). + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s AllowedMethods) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllowedMethods) GoString() string { + return s.String() +} + +// A complex type that describes how CloudFront processes requests. You can +// create up to 10 cache behaviors.You must create at least as many cache behaviors +// (including the default cache behavior) as you have origins if you want CloudFront +// to distribute objects from all of the origins. Each cache behavior specifies +// the one origin from which you want CloudFront to get objects. If you have +// two origins and only the default cache behavior, the default cache behavior +// will cause CloudFront to get objects from one of the origins, but the other +// origin will never be used. If you don't want to specify any cache behaviors, +// include only an empty CacheBehaviors element. Don't include an empty CacheBehavior +// element, or CloudFront returns a MalformedXML error. To delete all cache +// behaviors in an existing distribution, update the distribution configuration +// and include only an empty CacheBehaviors element. To add, change, or remove +// one or more cache behaviors, update the distribution configuration and specify +// all of the cache behaviors that you want to include in the updated distribution. +type CacheBehavior struct { + _ struct{} `type:"structure"` + + // A complex type that controls which HTTP methods CloudFront processes and + // forwards to your Amazon S3 bucket or your custom origin. There are three + // choices: - CloudFront forwards only GET and HEAD requests. - CloudFront forwards + // only GET, HEAD and OPTIONS requests. - CloudFront forwards GET, HEAD, OPTIONS, + // PUT, PATCH, POST, and DELETE requests. If you pick the third choice, you + // may need to restrict access to your Amazon S3 bucket or to your custom origin + // so users can't perform operations that you don't want them to. For example, + // you may not want users to have permission to delete objects from your origin. + AllowedMethods *AllowedMethods `type:"structure"` + + // Whether you want CloudFront to automatically compress content for web requests + // that include Accept-Encoding: gzip in the request header. If so, specify + // true; if not, specify false. CloudFront compresses files larger than 1000 + // bytes and less than 1 megabyte for both Amazon S3 and custom origins. When + // a CloudFront edge location is unusually busy, some files might not be compressed. + // The value of the Content-Type header must be on the list of file types that + // CloudFront will compress. For the current list, see Serving Compressed Content + // (http://docs.aws.amazon.com/console/cloudfront/compressed-content) in the + // Amazon CloudFront Developer Guide. If you configure CloudFront to compress + // content, CloudFront removes the ETag response header from the objects that + // it compresses. The ETag header indicates that the version in a CloudFront + // edge cache is identical to the version on the origin server, but after compression + // the two versions are no longer identical. As a result, for compressed objects, + // CloudFront can't use the ETag header to determine whether an expired object + // in the CloudFront edge cache is still the latest version. + Compress *bool `type:"boolean"` + + // If you don't configure your origin to add a Cache-Control max-age directive + // or an Expires header, DefaultTTL is the default amount of time (in seconds) + // that an object is in a CloudFront cache before CloudFront forwards another + // request to your origin to determine whether the object has been updated. + // The value that you specify applies only when your origin does not add HTTP + // headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires + // to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 + // years). + DefaultTTL *int64 `type:"long"` + + // A complex type that specifies how CloudFront handles query strings, cookies + // and headers. + ForwardedValues *ForwardedValues `type:"structure" required:"true"` + + // The maximum amount of time (in seconds) that an object is in a CloudFront + // cache before CloudFront forwards another request to your origin to determine + // whether the object has been updated. The value that you specify applies only + // when your origin adds HTTP headers such as Cache-Control max-age, Cache-Control + // s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 + // seconds (100 years). + MaxTTL *int64 `type:"long"` + + // The minimum amount of time that you want objects to stay in CloudFront caches + // before CloudFront queries your origin to see whether the object has been + // updated.You can specify a value from 0 to 3,153,600,000 seconds (100 years). + MinTTL *int64 `type:"long" required:"true"` + + // The pattern (for example, images/*.jpg) that specifies which requests you + // want this cache behavior to apply to. When CloudFront receives an end-user + // request, the requested path is compared with path patterns in the order in + // which cache behaviors are listed in the distribution. The path pattern for + // the default cache behavior is * and cannot be changed. If the request for + // an object does not match the path pattern for any cache behaviors, CloudFront + // applies the behavior in the default cache behavior. + PathPattern *string `type:"string" required:"true"` + + // Indicates whether you want to distribute media files in Microsoft Smooth + // Streaming format using the origin that is associated with this cache behavior. + // If so, specify true; if not, specify false. + SmoothStreaming *bool `type:"boolean"` + + // The value of ID for the origin that you want CloudFront to route requests + // to when a request matches the path pattern either for a cache behavior or + // for the default cache behavior. + TargetOriginId *string `type:"string" required:"true"` + + // A complex type that specifies the AWS accounts, if any, that you want to + // allow to create signed URLs for private content. If you want to require signed + // URLs in requests for objects in the target origin that match the PathPattern + // for this cache behavior, specify true for Enabled, and specify the applicable + // values for Quantity and Items. For more information, go to Using a Signed + // URL to Serve Private Content in the Amazon CloudFront Developer Guide. If + // you don't want to require signed URLs in requests for objects that match + // PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To + // add, change, or remove one or more trusted signers, change Enabled to true + // (if it's currently false), change Quantity as applicable, and specify all + // of the trusted signers that you want to include in the updated distribution. + TrustedSigners *TrustedSigners `type:"structure" required:"true"` + + // Use this element to specify the protocol that users can use to access the + // files in the origin specified by TargetOriginId when a request matches the + // path pattern in PathPattern. If you want CloudFront to allow end users to + // use any available protocol, specify allow-all. If you want CloudFront to + // require HTTPS, specify https. If you want CloudFront to respond to an HTTP + // request with an HTTP status code of 301 (Moved Permanently) and the HTTPS + // URL, specify redirect-to-https. The viewer then resubmits the request using + // the HTTPS URL. + ViewerProtocolPolicy *string `type:"string" required:"true" enum:"ViewerProtocolPolicy"` +} + +// String returns the string representation +func (s CacheBehavior) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheBehavior) GoString() string { + return s.String() +} + +// A complex type that contains zero or more CacheBehavior elements. +type CacheBehaviors struct { + _ struct{} `type:"structure"` + + // Optional: A complex type that contains cache behaviors for this distribution. + // If Quantity is 0, you can omit Items. + Items []*CacheBehavior `locationNameList:"CacheBehavior" type:"list"` + + // The number of cache behaviors for this distribution. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s CacheBehaviors) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheBehaviors) GoString() string { + return s.String() +} + +// A complex type that controls whether CloudFront caches the response to requests +// using the specified HTTP methods. There are two choices: - CloudFront caches +// responses to GET and HEAD requests. - CloudFront caches responses to GET, +// HEAD, and OPTIONS requests. If you pick the second choice for your S3 Origin, +// you may need to forward Access-Control-Request-Method, Access-Control-Request-Headers +// and Origin headers for the responses to be cached correctly. +type CachedMethods struct { + _ struct{} `type:"structure"` + + // A complex type that contains the HTTP methods that you want CloudFront to + // cache responses to. + Items []*string `locationNameList:"Method" type:"list" required:"true"` + + // The number of HTTP methods for which you want CloudFront to cache responses. + // Valid values are 2 (for caching responses to GET and HEAD requests) and 3 + // (for caching responses to GET, HEAD, and OPTIONS requests). + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s CachedMethods) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CachedMethods) GoString() string { + return s.String() +} + +// A complex type that specifies the whitelisted cookies, if any, that you want +// CloudFront to forward to your origin that is associated with this cache behavior. +type CookieNames struct { + _ struct{} `type:"structure"` + + // Optional: A complex type that contains whitelisted cookies for this cache + // behavior. If Quantity is 0, you can omit Items. + Items []*string `locationNameList:"Name" type:"list"` + + // The number of whitelisted cookies for this cache behavior. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s CookieNames) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CookieNames) GoString() string { + return s.String() +} + +// A complex type that specifies the cookie preferences associated with this +// cache behavior. +type CookiePreference struct { + _ struct{} `type:"structure"` + + // Use this element to specify whether you want CloudFront to forward cookies + // to the origin that is associated with this cache behavior. You can specify + // all, none or whitelist. If you choose All, CloudFront forwards all cookies + // regardless of how many your application uses. + Forward *string `type:"string" required:"true" enum:"ItemSelection"` + + // A complex type that specifies the whitelisted cookies, if any, that you want + // CloudFront to forward to your origin that is associated with this cache behavior. + WhitelistedNames *CookieNames `type:"structure"` +} + +// String returns the string representation +func (s CookiePreference) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CookiePreference) GoString() string { + return s.String() +} + +// The request to create a new origin access identity. +type CreateCloudFrontOriginAccessIdentityInput struct { + _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentityConfig"` + + // The origin access identity's configuration information. + CloudFrontOriginAccessIdentityConfig *OriginAccessIdentityConfig `locationName:"CloudFrontOriginAccessIdentityConfig" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateCloudFrontOriginAccessIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCloudFrontOriginAccessIdentityInput) GoString() string { + return s.String() +} + +// The returned result of the corresponding request. +type CreateCloudFrontOriginAccessIdentityOutput struct { + _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentity"` + + // The origin access identity's information. + CloudFrontOriginAccessIdentity *OriginAccessIdentity `type:"structure"` + + // The current version of the origin access identity created. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // The fully qualified URI of the new origin access identity just created. For + // example: https://cloudfront.amazonaws.com/2010-11-01/origin-access-identity/cloudfront/E74FTE3AJFJ256A. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateCloudFrontOriginAccessIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCloudFrontOriginAccessIdentityOutput) GoString() string { + return s.String() +} + +// The request to create a new distribution. +type CreateDistributionInput struct { + _ struct{} `type:"structure" payload:"DistributionConfig"` + + // The distribution's configuration information. + DistributionConfig *DistributionConfig `locationName:"DistributionConfig" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateDistributionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDistributionInput) GoString() string { + return s.String() +} + +// The returned result of the corresponding request. +type CreateDistributionOutput struct { + _ struct{} `type:"structure" payload:"Distribution"` + + // The distribution's information. + Distribution *Distribution `type:"structure"` + + // The current version of the distribution created. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // The fully qualified URI of the new distribution resource just created. For + // example: https://cloudfront.amazonaws.com/2010-11-01/distribution/EDFDVBD632BHDS5. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateDistributionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDistributionOutput) GoString() string { + return s.String() +} + +// The request to create an invalidation. +type CreateInvalidationInput struct { + _ struct{} `type:"structure" payload:"InvalidationBatch"` + + // The distribution's id. + DistributionId *string `location:"uri" locationName:"DistributionId" type:"string" required:"true"` + + // The batch information for the invalidation. + InvalidationBatch *InvalidationBatch `locationName:"InvalidationBatch" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateInvalidationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInvalidationInput) GoString() string { + return s.String() +} + +// The returned result of the corresponding request. +type CreateInvalidationOutput struct { + _ struct{} `type:"structure" payload:"Invalidation"` + + // The invalidation's information. + Invalidation *Invalidation `type:"structure"` + + // The fully qualified URI of the distribution and invalidation batch request, + // including the Invalidation ID. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateInvalidationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInvalidationOutput) GoString() string { + return s.String() +} + +// The request to create a new streaming distribution. +type CreateStreamingDistributionInput struct { + _ struct{} `type:"structure" payload:"StreamingDistributionConfig"` + + // The streaming distribution's configuration information. + StreamingDistributionConfig *StreamingDistributionConfig `locationName:"StreamingDistributionConfig" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateStreamingDistributionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStreamingDistributionInput) GoString() string { + return s.String() +} + +// The returned result of the corresponding request. +type CreateStreamingDistributionOutput struct { + _ struct{} `type:"structure" payload:"StreamingDistribution"` + + // The current version of the streaming distribution created. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // The fully qualified URI of the new streaming distribution resource just created. + // For example: https://cloudfront.amazonaws.com/2010-11-01/streaming-distribution/EGTXBD79H29TRA8. + Location *string `location:"header" locationName:"Location" type:"string"` + + // The streaming distribution's information. + StreamingDistribution *StreamingDistribution `type:"structure"` +} + +// String returns the string representation +func (s CreateStreamingDistributionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStreamingDistributionOutput) GoString() string { + return s.String() +} + +// A complex type that describes how you'd prefer CloudFront to respond to requests +// that result in either a 4xx or 5xx response. You can control whether a custom +// error page should be displayed, what the desired response code should be +// for this error page and how long should the error response be cached by CloudFront. +// If you don't want to specify any custom error responses, include only an +// empty CustomErrorResponses element. To delete all custom error responses +// in an existing distribution, update the distribution configuration and include +// only an empty CustomErrorResponses element. To add, change, or remove one +// or more custom error responses, update the distribution configuration and +// specify all of the custom error responses that you want to include in the +// updated distribution. +type CustomErrorResponse struct { + _ struct{} `type:"structure"` + + // The minimum amount of time you want HTTP error codes to stay in CloudFront + // caches before CloudFront queries your origin to see whether the object has + // been updated. You can specify a value from 0 to 31,536,000. + ErrorCachingMinTTL *int64 `type:"long"` + + // The 4xx or 5xx HTTP status code that you want to customize. For a list of + // HTTP status codes that you can customize, see CloudFront documentation. + ErrorCode *int64 `type:"integer" required:"true"` + + // The HTTP status code that you want CloudFront to return with the custom error + // page to the viewer. For a list of HTTP status codes that you can replace, + // see CloudFront Documentation. + ResponseCode *string `type:"string"` + + // The path of the custom error page (for example, /custom_404.html). The path + // is relative to the distribution and must begin with a slash (/). If the path + // includes any non-ASCII characters or unsafe characters as defined in RFC + // 1783 (http://www.ietf.org/rfc/rfc1738.txt), URL encode those characters. + // Do not URL encode any other characters in the path, or CloudFront will not + // return the custom error page to the viewer. + ResponsePagePath *string `type:"string"` +} + +// String returns the string representation +func (s CustomErrorResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomErrorResponse) GoString() string { + return s.String() +} + +// A complex type that contains zero or more CustomErrorResponse elements. +type CustomErrorResponses struct { + _ struct{} `type:"structure"` + + // Optional: A complex type that contains custom error responses for this distribution. + // If Quantity is 0, you can omit Items. + Items []*CustomErrorResponse `locationNameList:"CustomErrorResponse" type:"list"` + + // The number of custom error responses for this distribution. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s CustomErrorResponses) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomErrorResponses) GoString() string { + return s.String() +} + +// A complex type that contains the list of Custom Headers for each origin. +type CustomHeaders struct { + _ struct{} `type:"structure"` + + // A complex type that contains the custom headers for this Origin. + Items []*OriginCustomHeader `locationNameList:"OriginCustomHeader" type:"list"` + + // The number of custom headers for this origin. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s CustomHeaders) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomHeaders) GoString() string { + return s.String() +} + +// A customer origin. +type CustomOriginConfig struct { + _ struct{} `type:"structure"` + + // The HTTP port the custom origin listens on. + HTTPPort *int64 `type:"integer" required:"true"` + + // The HTTPS port the custom origin listens on. + HTTPSPort *int64 `type:"integer" required:"true"` + + // The origin protocol policy to apply to your origin. + OriginProtocolPolicy *string `type:"string" required:"true" enum:"OriginProtocolPolicy"` + + // The SSL/TLS protocols that you want CloudFront to use when communicating + // with your origin over HTTPS. + OriginSslProtocols *OriginSslProtocols `type:"structure"` +} + +// String returns the string representation +func (s CustomOriginConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomOriginConfig) GoString() string { + return s.String() +} + +// A complex type that describes the default cache behavior if you do not specify +// a CacheBehavior element or if files don't match any of the values of PathPattern +// in CacheBehavior elements.You must create exactly one default cache behavior. +type DefaultCacheBehavior struct { + _ struct{} `type:"structure"` + + // A complex type that controls which HTTP methods CloudFront processes and + // forwards to your Amazon S3 bucket or your custom origin. There are three + // choices: - CloudFront forwards only GET and HEAD requests. - CloudFront forwards + // only GET, HEAD and OPTIONS requests. - CloudFront forwards GET, HEAD, OPTIONS, + // PUT, PATCH, POST, and DELETE requests. If you pick the third choice, you + // may need to restrict access to your Amazon S3 bucket or to your custom origin + // so users can't perform operations that you don't want them to. For example, + // you may not want users to have permission to delete objects from your origin. + AllowedMethods *AllowedMethods `type:"structure"` + + // Whether you want CloudFront to automatically compress content for web requests + // that include Accept-Encoding: gzip in the request header. If so, specify + // true; if not, specify false. CloudFront compresses files larger than 1000 + // bytes and less than 1 megabyte for both Amazon S3 and custom origins. When + // a CloudFront edge location is unusually busy, some files might not be compressed. + // The value of the Content-Type header must be on the list of file types that + // CloudFront will compress. For the current list, see Serving Compressed Content + // (http://docs.aws.amazon.com/console/cloudfront/compressed-content) in the + // Amazon CloudFront Developer Guide. If you configure CloudFront to compress + // content, CloudFront removes the ETag response header from the objects that + // it compresses. The ETag header indicates that the version in a CloudFront + // edge cache is identical to the version on the origin server, but after compression + // the two versions are no longer identical. As a result, for compressed objects, + // CloudFront can't use the ETag header to determine whether an expired object + // in the CloudFront edge cache is still the latest version. + Compress *bool `type:"boolean"` + + // If you don't configure your origin to add a Cache-Control max-age directive + // or an Expires header, DefaultTTL is the default amount of time (in seconds) + // that an object is in a CloudFront cache before CloudFront forwards another + // request to your origin to determine whether the object has been updated. + // The value that you specify applies only when your origin does not add HTTP + // headers such as Cache-Control max-age, Cache-Control s-maxage, and Expires + // to objects. You can specify a value from 0 to 3,153,600,000 seconds (100 + // years). + DefaultTTL *int64 `type:"long"` + + // A complex type that specifies how CloudFront handles query strings, cookies + // and headers. + ForwardedValues *ForwardedValues `type:"structure" required:"true"` + + // The maximum amount of time (in seconds) that an object is in a CloudFront + // cache before CloudFront forwards another request to your origin to determine + // whether the object has been updated. The value that you specify applies only + // when your origin adds HTTP headers such as Cache-Control max-age, Cache-Control + // s-maxage, and Expires to objects. You can specify a value from 0 to 3,153,600,000 + // seconds (100 years). + MaxTTL *int64 `type:"long"` + + // The minimum amount of time that you want objects to stay in CloudFront caches + // before CloudFront queries your origin to see whether the object has been + // updated.You can specify a value from 0 to 3,153,600,000 seconds (100 years). + MinTTL *int64 `type:"long" required:"true"` + + // Indicates whether you want to distribute media files in Microsoft Smooth + // Streaming format using the origin that is associated with this cache behavior. + // If so, specify true; if not, specify false. + SmoothStreaming *bool `type:"boolean"` + + // The value of ID for the origin that you want CloudFront to route requests + // to when a request matches the path pattern either for a cache behavior or + // for the default cache behavior. + TargetOriginId *string `type:"string" required:"true"` + + // A complex type that specifies the AWS accounts, if any, that you want to + // allow to create signed URLs for private content. If you want to require signed + // URLs in requests for objects in the target origin that match the PathPattern + // for this cache behavior, specify true for Enabled, and specify the applicable + // values for Quantity and Items. For more information, go to Using a Signed + // URL to Serve Private Content in the Amazon CloudFront Developer Guide. If + // you don't want to require signed URLs in requests for objects that match + // PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To + // add, change, or remove one or more trusted signers, change Enabled to true + // (if it's currently false), change Quantity as applicable, and specify all + // of the trusted signers that you want to include in the updated distribution. + TrustedSigners *TrustedSigners `type:"structure" required:"true"` + + // Use this element to specify the protocol that users can use to access the + // files in the origin specified by TargetOriginId when a request matches the + // path pattern in PathPattern. If you want CloudFront to allow end users to + // use any available protocol, specify allow-all. If you want CloudFront to + // require HTTPS, specify https. If you want CloudFront to respond to an HTTP + // request with an HTTP status code of 301 (Moved Permanently) and the HTTPS + // URL, specify redirect-to-https. The viewer then resubmits the request using + // the HTTPS URL. + ViewerProtocolPolicy *string `type:"string" required:"true" enum:"ViewerProtocolPolicy"` +} + +// String returns the string representation +func (s DefaultCacheBehavior) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefaultCacheBehavior) GoString() string { + return s.String() +} + +// The request to delete a origin access identity. +type DeleteCloudFrontOriginAccessIdentityInput struct { + _ struct{} `type:"structure"` + + // The origin access identity's id. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The value of the ETag header you received from a previous GET or PUT request. + // For example: E2QWRUHAPOMQZL. + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` +} + +// String returns the string representation +func (s DeleteCloudFrontOriginAccessIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCloudFrontOriginAccessIdentityInput) GoString() string { + return s.String() +} + +type DeleteCloudFrontOriginAccessIdentityOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteCloudFrontOriginAccessIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCloudFrontOriginAccessIdentityOutput) GoString() string { + return s.String() +} + +// The request to delete a distribution. +type DeleteDistributionInput struct { + _ struct{} `type:"structure"` + + // The distribution id. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The value of the ETag header you received when you disabled the distribution. + // For example: E2QWRUHAPOMQZL. + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` +} + +// String returns the string representation +func (s DeleteDistributionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDistributionInput) GoString() string { + return s.String() +} + +type DeleteDistributionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDistributionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDistributionOutput) GoString() string { + return s.String() +} + +// The request to delete a streaming distribution. +type DeleteStreamingDistributionInput struct { + _ struct{} `type:"structure"` + + // The distribution id. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The value of the ETag header you received when you disabled the streaming + // distribution. For example: E2QWRUHAPOMQZL. + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` +} + +// String returns the string representation +func (s DeleteStreamingDistributionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStreamingDistributionInput) GoString() string { + return s.String() +} + +type DeleteStreamingDistributionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteStreamingDistributionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStreamingDistributionOutput) GoString() string { + return s.String() +} + +// A distribution. +type Distribution struct { + _ struct{} `type:"structure"` + + // CloudFront automatically adds this element to the response only if you've + // set up the distribution to serve private content with signed URLs. The element + // lists the key pair IDs that CloudFront is aware of for each trusted signer. + // The Signer child element lists the AWS account number of the trusted signer + // (or an empty Self element if the signer is you). The Signer element also + // includes the IDs of any active key pairs associated with the trusted signer's + // AWS account. If no KeyPairId element appears for a Signer, that signer can't + // create working signed URLs. + ActiveTrustedSigners *ActiveTrustedSigners `type:"structure" required:"true"` + + // The current configuration information for the distribution. + DistributionConfig *DistributionConfig `type:"structure" required:"true"` + + // The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net. + DomainName *string `type:"string" required:"true"` + + // The identifier for the distribution. For example: EDFDVBD632BHDS5. + Id *string `type:"string" required:"true"` + + // The number of invalidation batches currently in progress. + InProgressInvalidationBatches *int64 `type:"integer" required:"true"` + + // The date and time the distribution was last modified. + LastModifiedTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // This response element indicates the current status of the distribution. When + // the status is Deployed, the distribution's information is fully propagated + // throughout the Amazon CloudFront system. + Status *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Distribution) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Distribution) GoString() string { + return s.String() +} + +// A distribution Configuration. +type DistributionConfig struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about CNAMEs (alternate domain names), + // if any, for this distribution. + Aliases *Aliases `type:"structure"` + + // A complex type that contains zero or more CacheBehavior elements. + CacheBehaviors *CacheBehaviors `type:"structure"` + + // A unique number that ensures the request can't be replayed. If the CallerReference + // is new (no matter the content of the DistributionConfig object), a new distribution + // is created. If the CallerReference is a value you already sent in a previous + // request to create a distribution, and the content of the DistributionConfig + // is identical to the original request (ignoring white space), the response + // includes the same information returned to the original request. If the CallerReference + // is a value you already sent in a previous request to create a distribution + // but the content of the DistributionConfig is different from the original + // request, CloudFront returns a DistributionAlreadyExists error. + CallerReference *string `type:"string" required:"true"` + + // Any comments you want to include about the distribution. + Comment *string `type:"string" required:"true"` + + // A complex type that contains zero or more CustomErrorResponse elements. + CustomErrorResponses *CustomErrorResponses `type:"structure"` + + // A complex type that describes the default cache behavior if you do not specify + // a CacheBehavior element or if files don't match any of the values of PathPattern + // in CacheBehavior elements.You must create exactly one default cache behavior. + DefaultCacheBehavior *DefaultCacheBehavior `type:"structure" required:"true"` + + // The object that you want CloudFront to return (for example, index.html) when + // an end user requests the root URL for your distribution (http://www.example.com) + // instead of an object in your distribution (http://www.example.com/index.html). + // Specifying a default root object avoids exposing the contents of your distribution. + // If you don't want to specify a default root object when you create a distribution, + // include an empty DefaultRootObject element. To delete the default root object + // from an existing distribution, update the distribution configuration and + // include an empty DefaultRootObject element. To replace the default root object, + // update the distribution configuration and specify the new object. + DefaultRootObject *string `type:"string"` + + // Whether the distribution is enabled to accept end user requests for content. + Enabled *bool `type:"boolean" required:"true"` + + // A complex type that controls whether access logs are written for the distribution. + Logging *LoggingConfig `type:"structure"` + + // A complex type that contains information about origins for this distribution. + Origins *Origins `type:"structure" required:"true"` + + // A complex type that contains information about price class for this distribution. + PriceClass *string `type:"string" enum:"PriceClass"` + + // A complex type that identifies ways in which you want to restrict distribution + // of your content. + Restrictions *Restrictions `type:"structure"` + + // A complex type that contains information about viewer certificates for this + // distribution. + ViewerCertificate *ViewerCertificate `type:"structure"` + + // (Optional) If you're using AWS WAF to filter CloudFront requests, the Id + // of the AWS WAF web ACL that is associated with the distribution. + WebACLId *string `type:"string"` +} + +// String returns the string representation +func (s DistributionConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DistributionConfig) GoString() string { + return s.String() +} + +// A distribution list. +type DistributionList struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether more distributions remain to be listed. If + // your results were truncated, you can make a follow-up pagination request + // using the Marker request parameter to retrieve more distributions in the + // list. + IsTruncated *bool `type:"boolean" required:"true"` + + // A complex type that contains one DistributionSummary element for each distribution + // that was created by the current AWS account. + Items []*DistributionSummary `locationNameList:"DistributionSummary" type:"list"` + + // The value you provided for the Marker request parameter. + Marker *string `type:"string" required:"true"` + + // The value you provided for the MaxItems request parameter. + MaxItems *int64 `type:"integer" required:"true"` + + // If IsTruncated is true, this element is present and contains the value you + // can use for the Marker request parameter to continue listing your distributions + // where they left off. + NextMarker *string `type:"string"` + + // The number of distributions that were created by the current AWS account. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s DistributionList) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DistributionList) GoString() string { + return s.String() +} + +// A summary of the information for an Amazon CloudFront distribution. +type DistributionSummary struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about CNAMEs (alternate domain names), + // if any, for this distribution. + Aliases *Aliases `type:"structure" required:"true"` + + // A complex type that contains zero or more CacheBehavior elements. + CacheBehaviors *CacheBehaviors `type:"structure" required:"true"` + + // The comment originally specified when this distribution was created. + Comment *string `type:"string" required:"true"` + + // A complex type that contains zero or more CustomErrorResponses elements. + CustomErrorResponses *CustomErrorResponses `type:"structure" required:"true"` + + // A complex type that describes the default cache behavior if you do not specify + // a CacheBehavior element or if files don't match any of the values of PathPattern + // in CacheBehavior elements.You must create exactly one default cache behavior. + DefaultCacheBehavior *DefaultCacheBehavior `type:"structure" required:"true"` + + // The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net. + DomainName *string `type:"string" required:"true"` + + // Whether the distribution is enabled to accept end user requests for content. + Enabled *bool `type:"boolean" required:"true"` + + // The identifier for the distribution. For example: EDFDVBD632BHDS5. + Id *string `type:"string" required:"true"` + + // The date and time the distribution was last modified. + LastModifiedTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // A complex type that contains information about origins for this distribution. + Origins *Origins `type:"structure" required:"true"` + + PriceClass *string `type:"string" required:"true" enum:"PriceClass"` + + // A complex type that identifies ways in which you want to restrict distribution + // of your content. + Restrictions *Restrictions `type:"structure" required:"true"` + + // This response element indicates the current status of the distribution. When + // the status is Deployed, the distribution's information is fully propagated + // throughout the Amazon CloudFront system. + Status *string `type:"string" required:"true"` + + // A complex type that contains information about viewer certificates for this + // distribution. + ViewerCertificate *ViewerCertificate `type:"structure" required:"true"` + + // The Web ACL Id (if any) associated with the distribution. + WebACLId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DistributionSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DistributionSummary) GoString() string { + return s.String() +} + +// A complex type that specifies how CloudFront handles query strings, cookies +// and headers. +type ForwardedValues struct { + _ struct{} `type:"structure"` + + // A complex type that specifies how CloudFront handles cookies. + Cookies *CookiePreference `type:"structure" required:"true"` + + // A complex type that specifies the Headers, if any, that you want CloudFront + // to vary upon for this cache behavior. + Headers *Headers `type:"structure"` + + // Indicates whether you want CloudFront to forward query strings to the origin + // that is associated with this cache behavior. If so, specify true; if not, + // specify false. + QueryString *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s ForwardedValues) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ForwardedValues) GoString() string { + return s.String() +} + +// A complex type that controls the countries in which your content is distributed. +// For more information about geo restriction, go to Customizing Error Responses +// in the Amazon CloudFront Developer Guide. CloudFront determines the location +// of your users using MaxMind GeoIP databases. For information about the accuracy +// of these databases, see How accurate are your GeoIP databases? on the MaxMind +// website. +type GeoRestriction struct { + _ struct{} `type:"structure"` + + // A complex type that contains a Location element for each country in which + // you want CloudFront either to distribute your content (whitelist) or not + // distribute your content (blacklist). The Location element is a two-letter, + // uppercase country code for a country that you want to include in your blacklist + // or whitelist. Include one Location element for each country. CloudFront and + // MaxMind both use ISO 3166 country codes. For the current list of countries + // and the corresponding codes, see ISO 3166-1-alpha-2 code on the International + // Organization for Standardization website. You can also refer to the country + // list in the CloudFront console, which includes both country names and codes. + Items []*string `locationNameList:"Location" type:"list"` + + // When geo restriction is enabled, this is the number of countries in your + // whitelist or blacklist. Otherwise, when it is not enabled, Quantity is 0, + // and you can omit Items. + Quantity *int64 `type:"integer" required:"true"` + + // The method that you want to use to restrict distribution of your content + // by country: - none: No geo restriction is enabled, meaning access to content + // is not restricted by client geo location. - blacklist: The Location elements + // specify the countries in which you do not want CloudFront to distribute your + // content. - whitelist: The Location elements specify the countries in which + // you want CloudFront to distribute your content. + RestrictionType *string `type:"string" required:"true" enum:"GeoRestrictionType"` +} + +// String returns the string representation +func (s GeoRestriction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GeoRestriction) GoString() string { + return s.String() +} + +// The request to get an origin access identity's configuration. +type GetCloudFrontOriginAccessIdentityConfigInput struct { + _ struct{} `type:"structure"` + + // The identity's id. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetCloudFrontOriginAccessIdentityConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCloudFrontOriginAccessIdentityConfigInput) GoString() string { + return s.String() +} + +// The returned result of the corresponding request. +type GetCloudFrontOriginAccessIdentityConfigOutput struct { + _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentityConfig"` + + // The origin access identity's configuration information. + CloudFrontOriginAccessIdentityConfig *OriginAccessIdentityConfig `type:"structure"` + + // The current version of the configuration. For example: E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` +} + +// String returns the string representation +func (s GetCloudFrontOriginAccessIdentityConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCloudFrontOriginAccessIdentityConfigOutput) GoString() string { + return s.String() +} + +// The request to get an origin access identity's information. +type GetCloudFrontOriginAccessIdentityInput struct { + _ struct{} `type:"structure"` + + // The identity's id. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetCloudFrontOriginAccessIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCloudFrontOriginAccessIdentityInput) GoString() string { + return s.String() +} + +// The returned result of the corresponding request. +type GetCloudFrontOriginAccessIdentityOutput struct { + _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentity"` + + // The origin access identity's information. + CloudFrontOriginAccessIdentity *OriginAccessIdentity `type:"structure"` + + // The current version of the origin access identity's information. For example: + // E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` +} + +// String returns the string representation +func (s GetCloudFrontOriginAccessIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCloudFrontOriginAccessIdentityOutput) GoString() string { + return s.String() +} + +// The request to get a distribution configuration. +type GetDistributionConfigInput struct { + _ struct{} `type:"structure"` + + // The distribution's id. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDistributionConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDistributionConfigInput) GoString() string { + return s.String() +} + +// The returned result of the corresponding request. +type GetDistributionConfigOutput struct { + _ struct{} `type:"structure" payload:"DistributionConfig"` + + // The distribution's configuration information. + DistributionConfig *DistributionConfig `type:"structure"` + + // The current version of the configuration. For example: E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` +} + +// String returns the string representation +func (s GetDistributionConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDistributionConfigOutput) GoString() string { + return s.String() +} + +// The request to get a distribution's information. +type GetDistributionInput struct { + _ struct{} `type:"structure"` + + // The distribution's id. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDistributionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDistributionInput) GoString() string { + return s.String() +} + +// The returned result of the corresponding request. +type GetDistributionOutput struct { + _ struct{} `type:"structure" payload:"Distribution"` + + // The distribution's information. + Distribution *Distribution `type:"structure"` + + // The current version of the distribution's information. For example: E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` +} + +// String returns the string representation +func (s GetDistributionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDistributionOutput) GoString() string { + return s.String() +} + +// The request to get an invalidation's information. +type GetInvalidationInput struct { + _ struct{} `type:"structure"` + + // The distribution's id. + DistributionId *string `location:"uri" locationName:"DistributionId" type:"string" required:"true"` + + // The invalidation's id. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetInvalidationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetInvalidationInput) GoString() string { + return s.String() +} + +// The returned result of the corresponding request. +type GetInvalidationOutput struct { + _ struct{} `type:"structure" payload:"Invalidation"` + + // The invalidation's information. + Invalidation *Invalidation `type:"structure"` +} + +// String returns the string representation +func (s GetInvalidationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetInvalidationOutput) GoString() string { + return s.String() +} + +// To request to get a streaming distribution configuration. +type GetStreamingDistributionConfigInput struct { + _ struct{} `type:"structure"` + + // The streaming distribution's id. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetStreamingDistributionConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetStreamingDistributionConfigInput) GoString() string { + return s.String() +} + +// The returned result of the corresponding request. +type GetStreamingDistributionConfigOutput struct { + _ struct{} `type:"structure" payload:"StreamingDistributionConfig"` + + // The current version of the configuration. For example: E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // The streaming distribution's configuration information. + StreamingDistributionConfig *StreamingDistributionConfig `type:"structure"` +} + +// String returns the string representation +func (s GetStreamingDistributionConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetStreamingDistributionConfigOutput) GoString() string { + return s.String() +} + +// The request to get a streaming distribution's information. +type GetStreamingDistributionInput struct { + _ struct{} `type:"structure"` + + // The streaming distribution's id. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetStreamingDistributionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetStreamingDistributionInput) GoString() string { + return s.String() +} + +// The returned result of the corresponding request. +type GetStreamingDistributionOutput struct { + _ struct{} `type:"structure" payload:"StreamingDistribution"` + + // The current version of the streaming distribution's information. For example: + // E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // The streaming distribution's information. + StreamingDistribution *StreamingDistribution `type:"structure"` +} + +// String returns the string representation +func (s GetStreamingDistributionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetStreamingDistributionOutput) GoString() string { + return s.String() +} + +// A complex type that specifies the headers that you want CloudFront to forward +// to the origin for this cache behavior. For the headers that you specify, +// CloudFront also caches separate versions of a given object based on the header +// values in viewer requests; this is known as varying on headers. For example, +// suppose viewer requests for logo.jpg contain a custom Product header that +// has a value of either Acme or Apex, and you configure CloudFront to vary +// on the Product header. CloudFront forwards the Product header to the origin +// and caches the response from the origin once for each header value. +type Headers struct { + _ struct{} `type:"structure"` + + // Optional: A complex type that contains a Name element for each header that + // you want CloudFront to forward to the origin and to vary on for this cache + // behavior. If Quantity is 0, omit Items. + Items []*string `locationNameList:"Name" type:"list"` + + // The number of different headers that you want CloudFront to forward to the + // origin and to vary on for this cache behavior. The maximum number of headers + // that you can specify by name is 10. If you want CloudFront to forward all + // headers to the origin and vary on all of them, specify 1 for Quantity and + // * for Name. If you don't want CloudFront to forward any additional headers + // to the origin or to vary on any headers, specify 0 for Quantity and omit + // Items. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s Headers) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Headers) GoString() string { + return s.String() +} + +// An invalidation. +type Invalidation struct { + _ struct{} `type:"structure"` + + // The date and time the invalidation request was first made. + CreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The identifier for the invalidation request. For example: IDFDVBD632BHDS5. + Id *string `type:"string" required:"true"` + + // The current invalidation information for the batch request. + InvalidationBatch *InvalidationBatch `type:"structure" required:"true"` + + // The status of the invalidation request. When the invalidation batch is finished, + // the status is Completed. + Status *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Invalidation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Invalidation) GoString() string { + return s.String() +} + +// An invalidation batch. +type InvalidationBatch struct { + _ struct{} `type:"structure"` + + // A unique name that ensures the request can't be replayed. If the CallerReference + // is new (no matter the content of the Path object), a new distribution is + // created. If the CallerReference is a value you already sent in a previous + // request to create an invalidation batch, and the content of each Path element + // is identical to the original request, the response includes the same information + // returned to the original request. If the CallerReference is a value you already + // sent in a previous request to create a distribution but the content of any + // Path is different from the original request, CloudFront returns an InvalidationBatchAlreadyExists + // error. + CallerReference *string `type:"string" required:"true"` + + // The path of the object to invalidate. The path is relative to the distribution + // and must begin with a slash (/). You must enclose each invalidation object + // with the Path element tags. If the path includes non-ASCII characters or + // unsafe characters as defined in RFC 1783 (http://www.ietf.org/rfc/rfc1738.txt), + // URL encode those characters. Do not URL encode any other characters in the + // path, or CloudFront will not invalidate the old version of the updated object. + Paths *Paths `type:"structure" required:"true"` +} + +// String returns the string representation +func (s InvalidationBatch) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidationBatch) GoString() string { + return s.String() +} + +// An invalidation list. +type InvalidationList struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether more invalidation batch requests remain to + // be listed. If your results were truncated, you can make a follow-up pagination + // request using the Marker request parameter to retrieve more invalidation + // batches in the list. + IsTruncated *bool `type:"boolean" required:"true"` + + // A complex type that contains one InvalidationSummary element for each invalidation + // batch that was created by the current AWS account. + Items []*InvalidationSummary `locationNameList:"InvalidationSummary" type:"list"` + + // The value you provided for the Marker request parameter. + Marker *string `type:"string" required:"true"` + + // The value you provided for the MaxItems request parameter. + MaxItems *int64 `type:"integer" required:"true"` + + // If IsTruncated is true, this element is present and contains the value you + // can use for the Marker request parameter to continue listing your invalidation + // batches where they left off. + NextMarker *string `type:"string"` + + // The number of invalidation batches that were created by the current AWS account. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s InvalidationList) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidationList) GoString() string { + return s.String() +} + +// Summary of an invalidation request. +type InvalidationSummary struct { + _ struct{} `type:"structure"` + + CreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The unique ID for an invalidation request. + Id *string `type:"string" required:"true"` + + // The status of an invalidation request. + Status *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s InvalidationSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidationSummary) GoString() string { + return s.String() +} + +// A complex type that lists the active CloudFront key pairs, if any, that are +// associated with AwsAccountNumber. +type KeyPairIds struct { + _ struct{} `type:"structure"` + + // A complex type that lists the active CloudFront key pairs, if any, that are + // associated with AwsAccountNumber. + Items []*string `locationNameList:"KeyPairId" type:"list"` + + // The number of active CloudFront key pairs for AwsAccountNumber. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s KeyPairIds) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyPairIds) GoString() string { + return s.String() +} + +// The request to list origin access identities. +type ListCloudFrontOriginAccessIdentitiesInput struct { + _ struct{} `type:"structure"` + + // Use this when paginating results to indicate where to begin in your list + // of origin access identities. The results include identities in the list that + // occur after the marker. To get the next page of results, set the Marker to + // the value of the NextMarker from the current page's response (which is also + // the ID of the last identity on that page). + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // The maximum number of origin access identities you want in the response body. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` +} + +// String returns the string representation +func (s ListCloudFrontOriginAccessIdentitiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCloudFrontOriginAccessIdentitiesInput) GoString() string { + return s.String() +} + +// The returned result of the corresponding request. +type ListCloudFrontOriginAccessIdentitiesOutput struct { + _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentityList"` + + // The CloudFrontOriginAccessIdentityList type. + CloudFrontOriginAccessIdentityList *OriginAccessIdentityList `type:"structure"` +} + +// String returns the string representation +func (s ListCloudFrontOriginAccessIdentitiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCloudFrontOriginAccessIdentitiesOutput) GoString() string { + return s.String() +} + +// The request to list distributions that are associated with a specified AWS +// WAF web ACL. +type ListDistributionsByWebACLIdInput struct { + _ struct{} `type:"structure"` + + // Use Marker and MaxItems to control pagination of results. If you have more + // than MaxItems distributions that satisfy the request, the response includes + // a NextMarker element. To get the next page of results, submit another request. + // For the value of Marker, specify the value of NextMarker from the last response. + // (For the first request, omit Marker.) + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // The maximum number of distributions that you want CloudFront to return in + // the response body. The maximum and default values are both 100. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` + + // The Id of the AWS WAF web ACL for which you want to list the associated distributions. + // If you specify "null" for the Id, the request returns a list of the distributions + // that aren't associated with a web ACL. + WebACLId *string `location:"uri" locationName:"WebACLId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListDistributionsByWebACLIdInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDistributionsByWebACLIdInput) GoString() string { + return s.String() +} + +// The response to a request to list the distributions that are associated with +// a specified AWS WAF web ACL. +type ListDistributionsByWebACLIdOutput struct { + _ struct{} `type:"structure" payload:"DistributionList"` + + // The DistributionList type. + DistributionList *DistributionList `type:"structure"` +} + +// String returns the string representation +func (s ListDistributionsByWebACLIdOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDistributionsByWebACLIdOutput) GoString() string { + return s.String() +} + +// The request to list your distributions. +type ListDistributionsInput struct { + _ struct{} `type:"structure"` + + // Use Marker and MaxItems to control pagination of results. If you have more + // than MaxItems distributions that satisfy the request, the response includes + // a NextMarker element. To get the next page of results, submit another request. + // For the value of Marker, specify the value of NextMarker from the last response. + // (For the first request, omit Marker.) + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // The maximum number of distributions that you want CloudFront to return in + // the response body. The maximum and default values are both 100. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` +} + +// String returns the string representation +func (s ListDistributionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDistributionsInput) GoString() string { + return s.String() +} + +// The returned result of the corresponding request. +type ListDistributionsOutput struct { + _ struct{} `type:"structure" payload:"DistributionList"` + + // The DistributionList type. + DistributionList *DistributionList `type:"structure"` +} + +// String returns the string representation +func (s ListDistributionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDistributionsOutput) GoString() string { + return s.String() +} + +// The request to list invalidations. +type ListInvalidationsInput struct { + _ struct{} `type:"structure"` + + // The distribution's id. + DistributionId *string `location:"uri" locationName:"DistributionId" type:"string" required:"true"` + + // Use this parameter when paginating results to indicate where to begin in + // your list of invalidation batches. Because the results are returned in decreasing + // order from most recent to oldest, the most recent results are on the first + // page, the second page will contain earlier results, and so on. To get the + // next page of results, set the Marker to the value of the NextMarker from + // the current page's response. This value is the same as the ID of the last + // invalidation batch on that page. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // The maximum number of invalidation batches you want in the response body. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` +} + +// String returns the string representation +func (s ListInvalidationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInvalidationsInput) GoString() string { + return s.String() +} + +// The returned result of the corresponding request. +type ListInvalidationsOutput struct { + _ struct{} `type:"structure" payload:"InvalidationList"` + + // Information about invalidation batches. + InvalidationList *InvalidationList `type:"structure"` +} + +// String returns the string representation +func (s ListInvalidationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInvalidationsOutput) GoString() string { + return s.String() +} + +// The request to list your streaming distributions. +type ListStreamingDistributionsInput struct { + _ struct{} `type:"structure"` + + // Use this when paginating results to indicate where to begin in your list + // of streaming distributions. The results include distributions in the list + // that occur after the marker. To get the next page of results, set the Marker + // to the value of the NextMarker from the current page's response (which is + // also the ID of the last distribution on that page). + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // The maximum number of streaming distributions you want in the response body. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` +} + +// String returns the string representation +func (s ListStreamingDistributionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStreamingDistributionsInput) GoString() string { + return s.String() +} + +// The returned result of the corresponding request. +type ListStreamingDistributionsOutput struct { + _ struct{} `type:"structure" payload:"StreamingDistributionList"` + + // The StreamingDistributionList type. + StreamingDistributionList *StreamingDistributionList `type:"structure"` +} + +// String returns the string representation +func (s ListStreamingDistributionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStreamingDistributionsOutput) GoString() string { + return s.String() +} + +// A complex type that controls whether access logs are written for the distribution. +type LoggingConfig struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com. + Bucket *string `type:"string" required:"true"` + + // Specifies whether you want CloudFront to save access logs to an Amazon S3 + // bucket. If you do not want to enable logging when you create a distribution + // or if you want to disable logging for an existing distribution, specify false + // for Enabled, and specify empty Bucket and Prefix elements. If you specify + // false for Enabled but you specify values for Bucket, prefix and IncludeCookies, + // the values are automatically deleted. + Enabled *bool `type:"boolean" required:"true"` + + // Specifies whether you want CloudFront to include cookies in access logs, + // specify true for IncludeCookies. If you choose to include cookies in logs, + // CloudFront logs all cookies regardless of how you configure the cache behaviors + // for this distribution. If you do not want to include cookies when you create + // a distribution or if you want to disable include cookies for an existing + // distribution, specify false for IncludeCookies. + IncludeCookies *bool `type:"boolean" required:"true"` + + // An optional string that you want CloudFront to prefix to the access log filenames + // for this distribution, for example, myprefix/. If you want to enable logging, + // but you do not want to specify a prefix, you still must include an empty + // Prefix element in the Logging element. + Prefix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s LoggingConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoggingConfig) GoString() string { + return s.String() +} + +// A complex type that describes the Amazon S3 bucket or the HTTP server (for +// example, a web server) from which CloudFront gets your files.You must create +// at least one origin. +type Origin struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the custom headers associated + // with this Origin. + CustomHeaders *CustomHeaders `type:"structure"` + + // A complex type that contains information about a custom origin. If the origin + // is an Amazon S3 bucket, use the S3OriginConfig element instead. + CustomOriginConfig *CustomOriginConfig `type:"structure"` + + // Amazon S3 origins: The DNS name of the Amazon S3 bucket from which you want + // CloudFront to get objects for this origin, for example, myawsbucket.s3.amazonaws.com. + // Custom origins: The DNS domain name for the HTTP server from which you want + // CloudFront to get objects for this origin, for example, www.example.com. + DomainName *string `type:"string" required:"true"` + + // A unique identifier for the origin. The value of Id must be unique within + // the distribution. You use the value of Id when you create a cache behavior. + // The Id identifies the origin that CloudFront routes a request to when the + // request matches the path pattern for that cache behavior. + Id *string `type:"string" required:"true"` + + // An optional element that causes CloudFront to request your content from a + // directory in your Amazon S3 bucket or your custom origin. When you include + // the OriginPath element, specify the directory name, beginning with a /. CloudFront + // appends the directory name to the value of DomainName. + OriginPath *string `type:"string"` + + // A complex type that contains information about the Amazon S3 origin. If the + // origin is a custom origin, use the CustomOriginConfig element instead. + S3OriginConfig *S3OriginConfig `type:"structure"` +} + +// String returns the string representation +func (s Origin) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Origin) GoString() string { + return s.String() +} + +// CloudFront origin access identity. +type OriginAccessIdentity struct { + _ struct{} `type:"structure"` + + // The current configuration information for the identity. + CloudFrontOriginAccessIdentityConfig *OriginAccessIdentityConfig `type:"structure"` + + // The ID for the origin access identity. For example: E74FTE3AJFJ256A. + Id *string `type:"string" required:"true"` + + // The Amazon S3 canonical user ID for the origin access identity, which you + // use when giving the origin access identity read permission to an object in + // Amazon S3. + S3CanonicalUserId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s OriginAccessIdentity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OriginAccessIdentity) GoString() string { + return s.String() +} + +// Origin access identity configuration. +type OriginAccessIdentityConfig struct { + _ struct{} `type:"structure"` + + // A unique number that ensures the request can't be replayed. If the CallerReference + // is new (no matter the content of the CloudFrontOriginAccessIdentityConfig + // object), a new origin access identity is created. If the CallerReference + // is a value you already sent in a previous request to create an identity, + // and the content of the CloudFrontOriginAccessIdentityConfig is identical + // to the original request (ignoring white space), the response includes the + // same information returned to the original request. If the CallerReference + // is a value you already sent in a previous request to create an identity but + // the content of the CloudFrontOriginAccessIdentityConfig is different from + // the original request, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists + // error. + CallerReference *string `type:"string" required:"true"` + + // Any comments you want to include about the origin access identity. + Comment *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s OriginAccessIdentityConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OriginAccessIdentityConfig) GoString() string { + return s.String() +} + +// The CloudFrontOriginAccessIdentityList type. +type OriginAccessIdentityList struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether more origin access identities remain to be + // listed. If your results were truncated, you can make a follow-up pagination + // request using the Marker request parameter to retrieve more items in the + // list. + IsTruncated *bool `type:"boolean" required:"true"` + + // A complex type that contains one CloudFrontOriginAccessIdentitySummary element + // for each origin access identity that was created by the current AWS account. + Items []*OriginAccessIdentitySummary `locationNameList:"CloudFrontOriginAccessIdentitySummary" type:"list"` + + // The value you provided for the Marker request parameter. + Marker *string `type:"string" required:"true"` + + // The value you provided for the MaxItems request parameter. + MaxItems *int64 `type:"integer" required:"true"` + + // If IsTruncated is true, this element is present and contains the value you + // can use for the Marker request parameter to continue listing your origin + // access identities where they left off. + NextMarker *string `type:"string"` + + // The number of CloudFront origin access identities that were created by the + // current AWS account. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s OriginAccessIdentityList) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OriginAccessIdentityList) GoString() string { + return s.String() +} + +// Summary of the information about a CloudFront origin access identity. +type OriginAccessIdentitySummary struct { + _ struct{} `type:"structure"` + + // The comment for this origin access identity, as originally specified when + // created. + Comment *string `type:"string" required:"true"` + + // The ID for the origin access identity. For example: E74FTE3AJFJ256A. + Id *string `type:"string" required:"true"` + + // The Amazon S3 canonical user ID for the origin access identity, which you + // use when giving the origin access identity read permission to an object in + // Amazon S3. + S3CanonicalUserId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s OriginAccessIdentitySummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OriginAccessIdentitySummary) GoString() string { + return s.String() +} + +// A complex type that contains information related to a Header +type OriginCustomHeader struct { + _ struct{} `type:"structure"` + + // The header's name. + HeaderName *string `type:"string" required:"true"` + + // The header's value. + HeaderValue *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s OriginCustomHeader) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OriginCustomHeader) GoString() string { + return s.String() +} + +// A complex type that contains the list of SSL/TLS protocols that you want +// CloudFront to use when communicating with your origin over HTTPS. +type OriginSslProtocols struct { + _ struct{} `type:"structure"` + + // A complex type that contains one SslProtocol element for each SSL/TLS protocol + // that you want to allow CloudFront to use when establishing an HTTPS connection + // with this origin. + Items []*string `locationNameList:"SslProtocol" type:"list" required:"true"` + + // The number of SSL/TLS protocols that you want to allow CloudFront to use + // when establishing an HTTPS connection with this origin. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s OriginSslProtocols) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OriginSslProtocols) GoString() string { + return s.String() +} + +// A complex type that contains information about origins for this distribution. +type Origins struct { + _ struct{} `type:"structure"` + + // A complex type that contains origins for this distribution. + Items []*Origin `locationNameList:"Origin" min:"1" type:"list"` + + // The number of origins for this distribution. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s Origins) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Origins) GoString() string { + return s.String() +} + +// A complex type that contains information about the objects that you want +// to invalidate. +type Paths struct { + _ struct{} `type:"structure"` + + // A complex type that contains a list of the objects that you want to invalidate. + Items []*string `locationNameList:"Path" type:"list"` + + // The number of objects that you want to invalidate. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s Paths) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Paths) GoString() string { + return s.String() +} + +// A complex type that identifies ways in which you want to restrict distribution +// of your content. +type Restrictions struct { + _ struct{} `type:"structure"` + + // A complex type that controls the countries in which your content is distributed. + // For more information about geo restriction, go to Customizing Error Responses + // in the Amazon CloudFront Developer Guide. CloudFront determines the location + // of your users using MaxMind GeoIP databases. For information about the accuracy + // of these databases, see How accurate are your GeoIP databases? on the MaxMind + // website. + GeoRestriction *GeoRestriction `type:"structure" required:"true"` +} + +// String returns the string representation +func (s Restrictions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Restrictions) GoString() string { + return s.String() +} + +// A complex type that contains information about the Amazon S3 bucket from +// which you want CloudFront to get your media files for distribution. +type S3Origin struct { + _ struct{} `type:"structure"` + + // The DNS name of the S3 origin. + DomainName *string `type:"string" required:"true"` + + // Your S3 origin's origin access identity. + OriginAccessIdentity *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s S3Origin) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3Origin) GoString() string { + return s.String() +} + +// A complex type that contains information about the Amazon S3 origin. If the +// origin is a custom origin, use the CustomOriginConfig element instead. +type S3OriginConfig struct { + _ struct{} `type:"structure"` + + // The CloudFront origin access identity to associate with the origin. Use an + // origin access identity to configure the origin so that end users can only + // access objects in an Amazon S3 bucket through CloudFront. If you want end + // users to be able to access objects using either the CloudFront URL or the + // Amazon S3 URL, specify an empty OriginAccessIdentity element. To delete the + // origin access identity from an existing distribution, update the distribution + // configuration and include an empty OriginAccessIdentity element. To replace + // the origin access identity, update the distribution configuration and specify + // the new origin access identity. Use the format origin-access-identity/cloudfront/Id + // where Id is the value that CloudFront returned in the Id element when you + // created the origin access identity. + OriginAccessIdentity *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s S3OriginConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3OriginConfig) GoString() string { + return s.String() +} + +// A complex type that lists the AWS accounts that were included in the TrustedSigners +// complex type, as well as their active CloudFront key pair IDs, if any. +type Signer struct { + _ struct{} `type:"structure"` + + // Specifies an AWS account that can create signed URLs. Values: self, which + // indicates that the AWS account that was used to create the distribution can + // created signed URLs, or an AWS account number. Omit the dashes in the account + // number. + AwsAccountNumber *string `type:"string"` + + // A complex type that lists the active CloudFront key pairs, if any, that are + // associated with AwsAccountNumber. + KeyPairIds *KeyPairIds `type:"structure"` +} + +// String returns the string representation +func (s Signer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Signer) GoString() string { + return s.String() +} + +// A streaming distribution. +type StreamingDistribution struct { + _ struct{} `type:"structure"` + + // CloudFront automatically adds this element to the response only if you've + // set up the distribution to serve private content with signed URLs. The element + // lists the key pair IDs that CloudFront is aware of for each trusted signer. + // The Signer child element lists the AWS account number of the trusted signer + // (or an empty Self element if the signer is you). The Signer element also + // includes the IDs of any active key pairs associated with the trusted signer's + // AWS account. If no KeyPairId element appears for a Signer, that signer can't + // create working signed URLs. + ActiveTrustedSigners *ActiveTrustedSigners `type:"structure" required:"true"` + + // The domain name corresponding to the streaming distribution. For example: + // s5c39gqb8ow64r.cloudfront.net. + DomainName *string `type:"string" required:"true"` + + // The identifier for the streaming distribution. For example: EGTXBD79H29TRA8. + Id *string `type:"string" required:"true"` + + // The date and time the distribution was last modified. + LastModifiedTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The current status of the streaming distribution. When the status is Deployed, + // the distribution's information is fully propagated throughout the Amazon + // CloudFront system. + Status *string `type:"string" required:"true"` + + // The current configuration information for the streaming distribution. + StreamingDistributionConfig *StreamingDistributionConfig `type:"structure" required:"true"` +} + +// String returns the string representation +func (s StreamingDistribution) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StreamingDistribution) GoString() string { + return s.String() +} + +// The configuration for the streaming distribution. +type StreamingDistributionConfig struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about CNAMEs (alternate domain names), + // if any, for this streaming distribution. + Aliases *Aliases `type:"structure"` + + // A unique number that ensures the request can't be replayed. If the CallerReference + // is new (no matter the content of the StreamingDistributionConfig object), + // a new streaming distribution is created. If the CallerReference is a value + // you already sent in a previous request to create a streaming distribution, + // and the content of the StreamingDistributionConfig is identical to the original + // request (ignoring white space), the response includes the same information + // returned to the original request. If the CallerReference is a value you already + // sent in a previous request to create a streaming distribution but the content + // of the StreamingDistributionConfig is different from the original request, + // CloudFront returns a DistributionAlreadyExists error. + CallerReference *string `type:"string" required:"true"` + + // Any comments you want to include about the streaming distribution. + Comment *string `type:"string" required:"true"` + + // Whether the streaming distribution is enabled to accept end user requests + // for content. + Enabled *bool `type:"boolean" required:"true"` + + // A complex type that controls whether access logs are written for the streaming + // distribution. + Logging *StreamingLoggingConfig `type:"structure"` + + // A complex type that contains information about price class for this streaming + // distribution. + PriceClass *string `type:"string" enum:"PriceClass"` + + // A complex type that contains information about the Amazon S3 bucket from + // which you want CloudFront to get your media files for distribution. + S3Origin *S3Origin `type:"structure" required:"true"` + + // A complex type that specifies the AWS accounts, if any, that you want to + // allow to create signed URLs for private content. If you want to require signed + // URLs in requests for objects in the target origin that match the PathPattern + // for this cache behavior, specify true for Enabled, and specify the applicable + // values for Quantity and Items. For more information, go to Using a Signed + // URL to Serve Private Content in the Amazon CloudFront Developer Guide. If + // you don't want to require signed URLs in requests for objects that match + // PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To + // add, change, or remove one or more trusted signers, change Enabled to true + // (if it's currently false), change Quantity as applicable, and specify all + // of the trusted signers that you want to include in the updated distribution. + TrustedSigners *TrustedSigners `type:"structure" required:"true"` +} + +// String returns the string representation +func (s StreamingDistributionConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StreamingDistributionConfig) GoString() string { + return s.String() +} + +// A streaming distribution list. +type StreamingDistributionList struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether more streaming distributions remain to be listed. + // If your results were truncated, you can make a follow-up pagination request + // using the Marker request parameter to retrieve more distributions in the + // list. + IsTruncated *bool `type:"boolean" required:"true"` + + // A complex type that contains one StreamingDistributionSummary element for + // each distribution that was created by the current AWS account. + Items []*StreamingDistributionSummary `locationNameList:"StreamingDistributionSummary" type:"list"` + + // The value you provided for the Marker request parameter. + Marker *string `type:"string" required:"true"` + + // The value you provided for the MaxItems request parameter. + MaxItems *int64 `type:"integer" required:"true"` + + // If IsTruncated is true, this element is present and contains the value you + // can use for the Marker request parameter to continue listing your streaming + // distributions where they left off. + NextMarker *string `type:"string"` + + // The number of streaming distributions that were created by the current AWS + // account. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s StreamingDistributionList) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StreamingDistributionList) GoString() string { + return s.String() +} + +// A summary of the information for an Amazon CloudFront streaming distribution. +type StreamingDistributionSummary struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about CNAMEs (alternate domain names), + // if any, for this streaming distribution. + Aliases *Aliases `type:"structure" required:"true"` + + // The comment originally specified when this distribution was created. + Comment *string `type:"string" required:"true"` + + // The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net. + DomainName *string `type:"string" required:"true"` + + // Whether the distribution is enabled to accept end user requests for content. + Enabled *bool `type:"boolean" required:"true"` + + // The identifier for the distribution. For example: EDFDVBD632BHDS5. + Id *string `type:"string" required:"true"` + + // The date and time the distribution was last modified. + LastModifiedTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + PriceClass *string `type:"string" required:"true" enum:"PriceClass"` + + // A complex type that contains information about the Amazon S3 bucket from + // which you want CloudFront to get your media files for distribution. + S3Origin *S3Origin `type:"structure" required:"true"` + + // Indicates the current status of the distribution. When the status is Deployed, + // the distribution's information is fully propagated throughout the Amazon + // CloudFront system. + Status *string `type:"string" required:"true"` + + // A complex type that specifies the AWS accounts, if any, that you want to + // allow to create signed URLs for private content. If you want to require signed + // URLs in requests for objects in the target origin that match the PathPattern + // for this cache behavior, specify true for Enabled, and specify the applicable + // values for Quantity and Items. For more information, go to Using a Signed + // URL to Serve Private Content in the Amazon CloudFront Developer Guide. If + // you don't want to require signed URLs in requests for objects that match + // PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To + // add, change, or remove one or more trusted signers, change Enabled to true + // (if it's currently false), change Quantity as applicable, and specify all + // of the trusted signers that you want to include in the updated distribution. + TrustedSigners *TrustedSigners `type:"structure" required:"true"` +} + +// String returns the string representation +func (s StreamingDistributionSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StreamingDistributionSummary) GoString() string { + return s.String() +} + +// A complex type that controls whether access logs are written for this streaming +// distribution. +type StreamingLoggingConfig struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com. + Bucket *string `type:"string" required:"true"` + + // Specifies whether you want CloudFront to save access logs to an Amazon S3 + // bucket. If you do not want to enable logging when you create a streaming + // distribution or if you want to disable logging for an existing streaming + // distribution, specify false for Enabled, and specify empty Bucket and Prefix + // elements. If you specify false for Enabled but you specify values for Bucket + // and Prefix, the values are automatically deleted. + Enabled *bool `type:"boolean" required:"true"` + + // An optional string that you want CloudFront to prefix to the access log filenames + // for this streaming distribution, for example, myprefix/. If you want to enable + // logging, but you do not want to specify a prefix, you still must include + // an empty Prefix element in the Logging element. + Prefix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StreamingLoggingConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StreamingLoggingConfig) GoString() string { + return s.String() +} + +// A complex type that specifies the AWS accounts, if any, that you want to +// allow to create signed URLs for private content. If you want to require signed +// URLs in requests for objects in the target origin that match the PathPattern +// for this cache behavior, specify true for Enabled, and specify the applicable +// values for Quantity and Items. For more information, go to Using a Signed +// URL to Serve Private Content in the Amazon CloudFront Developer Guide. If +// you don't want to require signed URLs in requests for objects that match +// PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. To +// add, change, or remove one or more trusted signers, change Enabled to true +// (if it's currently false), change Quantity as applicable, and specify all +// of the trusted signers that you want to include in the updated distribution. +type TrustedSigners struct { + _ struct{} `type:"structure"` + + // Specifies whether you want to require end users to use signed URLs to access + // the files specified by PathPattern and TargetOriginId. + Enabled *bool `type:"boolean" required:"true"` + + // Optional: A complex type that contains trusted signers for this cache behavior. + // If Quantity is 0, you can omit Items. + Items []*string `locationNameList:"AwsAccountNumber" type:"list"` + + // The number of trusted signers for this cache behavior. + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s TrustedSigners) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrustedSigners) GoString() string { + return s.String() +} + +// The request to update an origin access identity. +type UpdateCloudFrontOriginAccessIdentityInput struct { + _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentityConfig"` + + // The identity's configuration information. + CloudFrontOriginAccessIdentityConfig *OriginAccessIdentityConfig `locationName:"CloudFrontOriginAccessIdentityConfig" type:"structure" required:"true"` + + // The identity's id. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The value of the ETag header you received when retrieving the identity's + // configuration. For example: E2QWRUHAPOMQZL. + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` +} + +// String returns the string representation +func (s UpdateCloudFrontOriginAccessIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateCloudFrontOriginAccessIdentityInput) GoString() string { + return s.String() +} + +// The returned result of the corresponding request. +type UpdateCloudFrontOriginAccessIdentityOutput struct { + _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentity"` + + // The origin access identity's information. + CloudFrontOriginAccessIdentity *OriginAccessIdentity `type:"structure"` + + // The current version of the configuration. For example: E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` +} + +// String returns the string representation +func (s UpdateCloudFrontOriginAccessIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateCloudFrontOriginAccessIdentityOutput) GoString() string { + return s.String() +} + +// The request to update a distribution. +type UpdateDistributionInput struct { + _ struct{} `type:"structure" payload:"DistributionConfig"` + + // The distribution's configuration information. + DistributionConfig *DistributionConfig `locationName:"DistributionConfig" type:"structure" required:"true"` + + // The distribution's id. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The value of the ETag header you received when retrieving the distribution's + // configuration. For example: E2QWRUHAPOMQZL. + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` +} + +// String returns the string representation +func (s UpdateDistributionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDistributionInput) GoString() string { + return s.String() +} + +// The returned result of the corresponding request. +type UpdateDistributionOutput struct { + _ struct{} `type:"structure" payload:"Distribution"` + + // The distribution's information. + Distribution *Distribution `type:"structure"` + + // The current version of the configuration. For example: E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` +} + +// String returns the string representation +func (s UpdateDistributionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDistributionOutput) GoString() string { + return s.String() +} + +// The request to update a streaming distribution. +type UpdateStreamingDistributionInput struct { + _ struct{} `type:"structure" payload:"StreamingDistributionConfig"` + + // The streaming distribution's id. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The value of the ETag header you received when retrieving the streaming distribution's + // configuration. For example: E2QWRUHAPOMQZL. + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // The streaming distribution's configuration information. + StreamingDistributionConfig *StreamingDistributionConfig `locationName:"StreamingDistributionConfig" type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateStreamingDistributionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateStreamingDistributionInput) GoString() string { + return s.String() +} + +// The returned result of the corresponding request. +type UpdateStreamingDistributionOutput struct { + _ struct{} `type:"structure" payload:"StreamingDistribution"` + + // The current version of the configuration. For example: E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // The streaming distribution's information. + StreamingDistribution *StreamingDistribution `type:"structure"` +} + +// String returns the string representation +func (s UpdateStreamingDistributionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateStreamingDistributionOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about viewer certificates for this +// distribution. +type ViewerCertificate struct { + _ struct{} `type:"structure"` + + // If you want viewers to use HTTPS to request your objects and you're using + // an alternate domain name in your object URLs (for example, https://example.com/logo.jpg), + // you can use your own IAM or ACM certificate. For ACM, set to the ACM certificate + // ARN. For IAM, set to the IAM certificate identifier. + Certificate *string `type:"string"` + + // If you want viewers to use HTTPS to request your objects and you're using + // the CloudFront domain name of your distribution in your object URLs (for + // example, https://d111111abcdef8.cloudfront.net/logo.jpg), set to "cloudfront". + // If you want viewers to use HTTPS to request your objects and you're using + // an alternate domain name in your object URLs (for example, https://example.com/logo.jpg), + // you can use your own IAM or ACM certificate. To use an ACM certificate, set + // to "acm" and update the Certificate to the ACM certificate ARN. To use an + // IAM certificate, set to "iam" and update the Certificate to the IAM certificate + // identifier. + CertificateSource *string `type:"string" enum:"CertificateSource"` + + // Note: this field is deprecated. Please use "cloudfront" as CertificateSource + // and omit specifying a Certificate. If you want viewers to use HTTPS to request + // your objects and you're using the CloudFront domain name of your distribution + // in your object URLs (for example, https://d111111abcdef8.cloudfront.net/logo.jpg), + // set to true. Omit this value if you are setting an IAMCertificateId. + CloudFrontDefaultCertificate *bool `type:"boolean"` + + // Note: this field is deprecated. Please use "iam" as CertificateSource and + // specify the IAM certificate Id as the Certificate. If you want viewers to + // use HTTPS to request your objects and you're using an alternate domain name + // in your object URLs (for example, https://example.com/logo.jpg), specify + // the IAM certificate identifier of the custom viewer certificate for this + // distribution. Specify either this value or CloudFrontDefaultCertificate. + IAMCertificateId *string `type:"string"` + + // Specify the minimum version of the SSL protocol that you want CloudFront + // to use, SSLv3 or TLSv1, for HTTPS connections. CloudFront will serve your + // objects only to browsers or devices that support at least the SSL version + // that you specify. The TLSv1 protocol is more secure, so we recommend that + // you specify SSLv3 only if your users are using browsers or devices that don't + // support TLSv1. If you're using a custom certificate (if you specify a value + // for IAMCertificateId) and if you're using dedicated IP (if you specify vip + // for SSLSupportMethod), you can choose SSLv3 or TLSv1 as the MinimumProtocolVersion. + // If you're using a custom certificate (if you specify a value for IAMCertificateId) + // and if you're using SNI (if you specify sni-only for SSLSupportMethod), you + // must specify TLSv1 for MinimumProtocolVersion. + MinimumProtocolVersion *string `type:"string" enum:"MinimumProtocolVersion"` + + // If you specify a value for IAMCertificateId, you must also specify how you + // want CloudFront to serve HTTPS requests. Valid values are vip and sni-only. + // If you specify vip, CloudFront uses dedicated IP addresses for your content + // and can respond to HTTPS requests from any viewer. However, you must request + // permission to use this feature, and you incur additional monthly charges. + // If you specify sni-only, CloudFront can only respond to HTTPS requests from + // viewers that support Server Name Indication (SNI). All modern browsers support + // SNI, but some browsers still in use don't support SNI. Do not specify a value + // for SSLSupportMethod if you specified true for CloudFrontDefaultCertificate. + SSLSupportMethod *string `type:"string" enum:"SSLSupportMethod"` +} + +// String returns the string representation +func (s ViewerCertificate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ViewerCertificate) GoString() string { + return s.String() +} + +const ( + // @enum CertificateSource + CertificateSourceCloudfront = "cloudfront" + // @enum CertificateSource + CertificateSourceIam = "iam" + // @enum CertificateSource + CertificateSourceAcm = "acm" +) + +const ( + // @enum GeoRestrictionType + GeoRestrictionTypeBlacklist = "blacklist" + // @enum GeoRestrictionType + GeoRestrictionTypeWhitelist = "whitelist" + // @enum GeoRestrictionType + GeoRestrictionTypeNone = "none" +) + +const ( + // @enum ItemSelection + ItemSelectionNone = "none" + // @enum ItemSelection + ItemSelectionWhitelist = "whitelist" + // @enum ItemSelection + ItemSelectionAll = "all" +) + +const ( + // @enum Method + MethodGet = "GET" + // @enum Method + MethodHead = "HEAD" + // @enum Method + MethodPost = "POST" + // @enum Method + MethodPut = "PUT" + // @enum Method + MethodPatch = "PATCH" + // @enum Method + MethodOptions = "OPTIONS" + // @enum Method + MethodDelete = "DELETE" +) + +const ( + // @enum MinimumProtocolVersion + MinimumProtocolVersionSslv3 = "SSLv3" + // @enum MinimumProtocolVersion + MinimumProtocolVersionTlsv1 = "TLSv1" +) + +const ( + // @enum OriginProtocolPolicy + OriginProtocolPolicyHttpOnly = "http-only" + // @enum OriginProtocolPolicy + OriginProtocolPolicyMatchViewer = "match-viewer" + // @enum OriginProtocolPolicy + OriginProtocolPolicyHttpsOnly = "https-only" +) + +const ( + // @enum PriceClass + PriceClassPriceClass100 = "PriceClass_100" + // @enum PriceClass + PriceClassPriceClass200 = "PriceClass_200" + // @enum PriceClass + PriceClassPriceClassAll = "PriceClass_All" +) + +const ( + // @enum SSLSupportMethod + SSLSupportMethodSniOnly = "sni-only" + // @enum SSLSupportMethod + SSLSupportMethodVip = "vip" +) + +const ( + // @enum SslProtocol + SslProtocolSslv3 = "SSLv3" + // @enum SslProtocol + SslProtocolTlsv1 = "TLSv1" + // @enum SslProtocol + SslProtocolTlsv11 = "TLSv1.1" + // @enum SslProtocol + SslProtocolTlsv12 = "TLSv1.2" +) + +const ( + // @enum ViewerProtocolPolicy + ViewerProtocolPolicyAllowAll = "allow-all" + // @enum ViewerProtocolPolicy + ViewerProtocolPolicyHttpsOnly = "https-only" + // @enum ViewerProtocolPolicy + ViewerProtocolPolicyRedirectToHttps = "redirect-to-https" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/cloudfrontiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/cloudfrontiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/cloudfrontiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/cloudfrontiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,110 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudfrontiface provides an interface for the Amazon CloudFront. +package cloudfrontiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudfront" +) + +// CloudFrontAPI is the interface type for cloudfront.CloudFront. +type CloudFrontAPI interface { + CreateCloudFrontOriginAccessIdentityRequest(*cloudfront.CreateCloudFrontOriginAccessIdentityInput) (*request.Request, *cloudfront.CreateCloudFrontOriginAccessIdentityOutput) + + CreateCloudFrontOriginAccessIdentity(*cloudfront.CreateCloudFrontOriginAccessIdentityInput) (*cloudfront.CreateCloudFrontOriginAccessIdentityOutput, error) + + CreateDistributionRequest(*cloudfront.CreateDistributionInput) (*request.Request, *cloudfront.CreateDistributionOutput) + + CreateDistribution(*cloudfront.CreateDistributionInput) (*cloudfront.CreateDistributionOutput, error) + + CreateInvalidationRequest(*cloudfront.CreateInvalidationInput) (*request.Request, *cloudfront.CreateInvalidationOutput) + + CreateInvalidation(*cloudfront.CreateInvalidationInput) (*cloudfront.CreateInvalidationOutput, error) + + CreateStreamingDistributionRequest(*cloudfront.CreateStreamingDistributionInput) (*request.Request, *cloudfront.CreateStreamingDistributionOutput) + + CreateStreamingDistribution(*cloudfront.CreateStreamingDistributionInput) (*cloudfront.CreateStreamingDistributionOutput, error) + + DeleteCloudFrontOriginAccessIdentityRequest(*cloudfront.DeleteCloudFrontOriginAccessIdentityInput) (*request.Request, *cloudfront.DeleteCloudFrontOriginAccessIdentityOutput) + + DeleteCloudFrontOriginAccessIdentity(*cloudfront.DeleteCloudFrontOriginAccessIdentityInput) (*cloudfront.DeleteCloudFrontOriginAccessIdentityOutput, error) + + DeleteDistributionRequest(*cloudfront.DeleteDistributionInput) (*request.Request, *cloudfront.DeleteDistributionOutput) + + DeleteDistribution(*cloudfront.DeleteDistributionInput) (*cloudfront.DeleteDistributionOutput, error) + + DeleteStreamingDistributionRequest(*cloudfront.DeleteStreamingDistributionInput) (*request.Request, *cloudfront.DeleteStreamingDistributionOutput) + + DeleteStreamingDistribution(*cloudfront.DeleteStreamingDistributionInput) (*cloudfront.DeleteStreamingDistributionOutput, error) + + GetCloudFrontOriginAccessIdentityRequest(*cloudfront.GetCloudFrontOriginAccessIdentityInput) (*request.Request, *cloudfront.GetCloudFrontOriginAccessIdentityOutput) + + GetCloudFrontOriginAccessIdentity(*cloudfront.GetCloudFrontOriginAccessIdentityInput) (*cloudfront.GetCloudFrontOriginAccessIdentityOutput, error) + + GetCloudFrontOriginAccessIdentityConfigRequest(*cloudfront.GetCloudFrontOriginAccessIdentityConfigInput) (*request.Request, *cloudfront.GetCloudFrontOriginAccessIdentityConfigOutput) + + GetCloudFrontOriginAccessIdentityConfig(*cloudfront.GetCloudFrontOriginAccessIdentityConfigInput) (*cloudfront.GetCloudFrontOriginAccessIdentityConfigOutput, error) + + GetDistributionRequest(*cloudfront.GetDistributionInput) (*request.Request, *cloudfront.GetDistributionOutput) + + GetDistribution(*cloudfront.GetDistributionInput) (*cloudfront.GetDistributionOutput, error) + + GetDistributionConfigRequest(*cloudfront.GetDistributionConfigInput) (*request.Request, *cloudfront.GetDistributionConfigOutput) + + GetDistributionConfig(*cloudfront.GetDistributionConfigInput) (*cloudfront.GetDistributionConfigOutput, error) + + GetInvalidationRequest(*cloudfront.GetInvalidationInput) (*request.Request, *cloudfront.GetInvalidationOutput) + + GetInvalidation(*cloudfront.GetInvalidationInput) (*cloudfront.GetInvalidationOutput, error) + + GetStreamingDistributionRequest(*cloudfront.GetStreamingDistributionInput) (*request.Request, *cloudfront.GetStreamingDistributionOutput) + + GetStreamingDistribution(*cloudfront.GetStreamingDistributionInput) (*cloudfront.GetStreamingDistributionOutput, error) + + GetStreamingDistributionConfigRequest(*cloudfront.GetStreamingDistributionConfigInput) (*request.Request, *cloudfront.GetStreamingDistributionConfigOutput) + + GetStreamingDistributionConfig(*cloudfront.GetStreamingDistributionConfigInput) (*cloudfront.GetStreamingDistributionConfigOutput, error) + + ListCloudFrontOriginAccessIdentitiesRequest(*cloudfront.ListCloudFrontOriginAccessIdentitiesInput) (*request.Request, *cloudfront.ListCloudFrontOriginAccessIdentitiesOutput) + + ListCloudFrontOriginAccessIdentities(*cloudfront.ListCloudFrontOriginAccessIdentitiesInput) (*cloudfront.ListCloudFrontOriginAccessIdentitiesOutput, error) + + ListCloudFrontOriginAccessIdentitiesPages(*cloudfront.ListCloudFrontOriginAccessIdentitiesInput, func(*cloudfront.ListCloudFrontOriginAccessIdentitiesOutput, bool) bool) error + + ListDistributionsRequest(*cloudfront.ListDistributionsInput) (*request.Request, *cloudfront.ListDistributionsOutput) + + ListDistributions(*cloudfront.ListDistributionsInput) (*cloudfront.ListDistributionsOutput, error) + + ListDistributionsPages(*cloudfront.ListDistributionsInput, func(*cloudfront.ListDistributionsOutput, bool) bool) error + + ListDistributionsByWebACLIdRequest(*cloudfront.ListDistributionsByWebACLIdInput) (*request.Request, *cloudfront.ListDistributionsByWebACLIdOutput) + + ListDistributionsByWebACLId(*cloudfront.ListDistributionsByWebACLIdInput) (*cloudfront.ListDistributionsByWebACLIdOutput, error) + + ListInvalidationsRequest(*cloudfront.ListInvalidationsInput) (*request.Request, *cloudfront.ListInvalidationsOutput) + + ListInvalidations(*cloudfront.ListInvalidationsInput) (*cloudfront.ListInvalidationsOutput, error) + + ListInvalidationsPages(*cloudfront.ListInvalidationsInput, func(*cloudfront.ListInvalidationsOutput, bool) bool) error + + ListStreamingDistributionsRequest(*cloudfront.ListStreamingDistributionsInput) (*request.Request, *cloudfront.ListStreamingDistributionsOutput) + + ListStreamingDistributions(*cloudfront.ListStreamingDistributionsInput) (*cloudfront.ListStreamingDistributionsOutput, error) + + ListStreamingDistributionsPages(*cloudfront.ListStreamingDistributionsInput, func(*cloudfront.ListStreamingDistributionsOutput, bool) bool) error + + UpdateCloudFrontOriginAccessIdentityRequest(*cloudfront.UpdateCloudFrontOriginAccessIdentityInput) (*request.Request, *cloudfront.UpdateCloudFrontOriginAccessIdentityOutput) + + UpdateCloudFrontOriginAccessIdentity(*cloudfront.UpdateCloudFrontOriginAccessIdentityInput) (*cloudfront.UpdateCloudFrontOriginAccessIdentityOutput, error) + + UpdateDistributionRequest(*cloudfront.UpdateDistributionInput) (*request.Request, *cloudfront.UpdateDistributionOutput) + + UpdateDistribution(*cloudfront.UpdateDistributionInput) (*cloudfront.UpdateDistributionOutput, error) + + UpdateStreamingDistributionRequest(*cloudfront.UpdateStreamingDistributionInput) (*request.Request, *cloudfront.UpdateStreamingDistributionOutput) + + UpdateStreamingDistribution(*cloudfront.UpdateStreamingDistributionInput) (*cloudfront.UpdateStreamingDistributionOutput, error) +} + +var _ CloudFrontAPI = (*cloudfront.CloudFront)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,915 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudfront_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudfront" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCloudFront_CreateCloudFrontOriginAccessIdentity() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.CreateCloudFrontOriginAccessIdentityInput{ + CloudFrontOriginAccessIdentityConfig: &cloudfront.OriginAccessIdentityConfig{ // Required + CallerReference: aws.String("string"), // Required + Comment: aws.String("string"), // Required + }, + } + resp, err := svc.CreateCloudFrontOriginAccessIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_CreateDistribution() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.CreateDistributionInput{ + DistributionConfig: &cloudfront.DistributionConfig{ // Required + CallerReference: aws.String("string"), // Required + Comment: aws.String("string"), // Required + DefaultCacheBehavior: &cloudfront.DefaultCacheBehavior{ // Required + ForwardedValues: &cloudfront.ForwardedValues{ // Required + Cookies: &cloudfront.CookiePreference{ // Required + Forward: aws.String("ItemSelection"), // Required + WhitelistedNames: &cloudfront.CookieNames{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + QueryString: aws.Bool(true), // Required + Headers: &cloudfront.Headers{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + MinTTL: aws.Int64(1), // Required + TargetOriginId: aws.String("string"), // Required + TrustedSigners: &cloudfront.TrustedSigners{ // Required + Enabled: aws.Bool(true), // Required + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + ViewerProtocolPolicy: aws.String("ViewerProtocolPolicy"), // Required + AllowedMethods: &cloudfront.AllowedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + CachedMethods: &cloudfront.CachedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + }, + }, + Compress: aws.Bool(true), + DefaultTTL: aws.Int64(1), + MaxTTL: aws.Int64(1), + SmoothStreaming: aws.Bool(true), + }, + Enabled: aws.Bool(true), // Required + Origins: &cloudfront.Origins{ // Required + Quantity: aws.Int64(1), // Required + Items: []*cloudfront.Origin{ + { // Required + DomainName: aws.String("string"), // Required + Id: aws.String("string"), // Required + CustomHeaders: &cloudfront.CustomHeaders{ + Quantity: aws.Int64(1), // Required + Items: []*cloudfront.OriginCustomHeader{ + { // Required + HeaderName: aws.String("string"), // Required + HeaderValue: aws.String("string"), // Required + }, + // More values... + }, + }, + CustomOriginConfig: &cloudfront.CustomOriginConfig{ + HTTPPort: aws.Int64(1), // Required + HTTPSPort: aws.Int64(1), // Required + OriginProtocolPolicy: aws.String("OriginProtocolPolicy"), // Required + OriginSslProtocols: &cloudfront.OriginSslProtocols{ + Items: []*string{ // Required + aws.String("SslProtocol"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + }, + }, + OriginPath: aws.String("string"), + S3OriginConfig: &cloudfront.S3OriginConfig{ + OriginAccessIdentity: aws.String("string"), // Required + }, + }, + // More values... + }, + }, + Aliases: &cloudfront.Aliases{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + CacheBehaviors: &cloudfront.CacheBehaviors{ + Quantity: aws.Int64(1), // Required + Items: []*cloudfront.CacheBehavior{ + { // Required + ForwardedValues: &cloudfront.ForwardedValues{ // Required + Cookies: &cloudfront.CookiePreference{ // Required + Forward: aws.String("ItemSelection"), // Required + WhitelistedNames: &cloudfront.CookieNames{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + QueryString: aws.Bool(true), // Required + Headers: &cloudfront.Headers{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + MinTTL: aws.Int64(1), // Required + PathPattern: aws.String("string"), // Required + TargetOriginId: aws.String("string"), // Required + TrustedSigners: &cloudfront.TrustedSigners{ // Required + Enabled: aws.Bool(true), // Required + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + ViewerProtocolPolicy: aws.String("ViewerProtocolPolicy"), // Required + AllowedMethods: &cloudfront.AllowedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + CachedMethods: &cloudfront.CachedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + }, + }, + Compress: aws.Bool(true), + DefaultTTL: aws.Int64(1), + MaxTTL: aws.Int64(1), + SmoothStreaming: aws.Bool(true), + }, + // More values... + }, + }, + CustomErrorResponses: &cloudfront.CustomErrorResponses{ + Quantity: aws.Int64(1), // Required + Items: []*cloudfront.CustomErrorResponse{ + { // Required + ErrorCode: aws.Int64(1), // Required + ErrorCachingMinTTL: aws.Int64(1), + ResponseCode: aws.String("string"), + ResponsePagePath: aws.String("string"), + }, + // More values... + }, + }, + DefaultRootObject: aws.String("string"), + Logging: &cloudfront.LoggingConfig{ + Bucket: aws.String("string"), // Required + Enabled: aws.Bool(true), // Required + IncludeCookies: aws.Bool(true), // Required + Prefix: aws.String("string"), // Required + }, + PriceClass: aws.String("PriceClass"), + Restrictions: &cloudfront.Restrictions{ + GeoRestriction: &cloudfront.GeoRestriction{ // Required + Quantity: aws.Int64(1), // Required + RestrictionType: aws.String("GeoRestrictionType"), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + ViewerCertificate: &cloudfront.ViewerCertificate{ + Certificate: aws.String("string"), + CertificateSource: aws.String("CertificateSource"), + CloudFrontDefaultCertificate: aws.Bool(true), + IAMCertificateId: aws.String("string"), + MinimumProtocolVersion: aws.String("MinimumProtocolVersion"), + SSLSupportMethod: aws.String("SSLSupportMethod"), + }, + WebACLId: aws.String("string"), + }, + } + resp, err := svc.CreateDistribution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_CreateInvalidation() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.CreateInvalidationInput{ + DistributionId: aws.String("string"), // Required + InvalidationBatch: &cloudfront.InvalidationBatch{ // Required + CallerReference: aws.String("string"), // Required + Paths: &cloudfront.Paths{ // Required + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + } + resp, err := svc.CreateInvalidation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_CreateStreamingDistribution() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.CreateStreamingDistributionInput{ + StreamingDistributionConfig: &cloudfront.StreamingDistributionConfig{ // Required + CallerReference: aws.String("string"), // Required + Comment: aws.String("string"), // Required + Enabled: aws.Bool(true), // Required + S3Origin: &cloudfront.S3Origin{ // Required + DomainName: aws.String("string"), // Required + OriginAccessIdentity: aws.String("string"), // Required + }, + TrustedSigners: &cloudfront.TrustedSigners{ // Required + Enabled: aws.Bool(true), // Required + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + Aliases: &cloudfront.Aliases{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + Logging: &cloudfront.StreamingLoggingConfig{ + Bucket: aws.String("string"), // Required + Enabled: aws.Bool(true), // Required + Prefix: aws.String("string"), // Required + }, + PriceClass: aws.String("PriceClass"), + }, + } + resp, err := svc.CreateStreamingDistribution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_DeleteCloudFrontOriginAccessIdentity() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.DeleteCloudFrontOriginAccessIdentityInput{ + Id: aws.String("string"), // Required + IfMatch: aws.String("string"), + } + resp, err := svc.DeleteCloudFrontOriginAccessIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_DeleteDistribution() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.DeleteDistributionInput{ + Id: aws.String("string"), // Required + IfMatch: aws.String("string"), + } + resp, err := svc.DeleteDistribution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_DeleteStreamingDistribution() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.DeleteStreamingDistributionInput{ + Id: aws.String("string"), // Required + IfMatch: aws.String("string"), + } + resp, err := svc.DeleteStreamingDistribution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_GetCloudFrontOriginAccessIdentity() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.GetCloudFrontOriginAccessIdentityInput{ + Id: aws.String("string"), // Required + } + resp, err := svc.GetCloudFrontOriginAccessIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_GetCloudFrontOriginAccessIdentityConfig() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.GetCloudFrontOriginAccessIdentityConfigInput{ + Id: aws.String("string"), // Required + } + resp, err := svc.GetCloudFrontOriginAccessIdentityConfig(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_GetDistribution() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.GetDistributionInput{ + Id: aws.String("string"), // Required + } + resp, err := svc.GetDistribution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_GetDistributionConfig() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.GetDistributionConfigInput{ + Id: aws.String("string"), // Required + } + resp, err := svc.GetDistributionConfig(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_GetInvalidation() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.GetInvalidationInput{ + DistributionId: aws.String("string"), // Required + Id: aws.String("string"), // Required + } + resp, err := svc.GetInvalidation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_GetStreamingDistribution() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.GetStreamingDistributionInput{ + Id: aws.String("string"), // Required + } + resp, err := svc.GetStreamingDistribution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_GetStreamingDistributionConfig() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.GetStreamingDistributionConfigInput{ + Id: aws.String("string"), // Required + } + resp, err := svc.GetStreamingDistributionConfig(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_ListCloudFrontOriginAccessIdentities() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.ListCloudFrontOriginAccessIdentitiesInput{ + Marker: aws.String("string"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListCloudFrontOriginAccessIdentities(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_ListDistributions() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.ListDistributionsInput{ + Marker: aws.String("string"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListDistributions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_ListDistributionsByWebACLId() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.ListDistributionsByWebACLIdInput{ + WebACLId: aws.String("string"), // Required + Marker: aws.String("string"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListDistributionsByWebACLId(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_ListInvalidations() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.ListInvalidationsInput{ + DistributionId: aws.String("string"), // Required + Marker: aws.String("string"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListInvalidations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_ListStreamingDistributions() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.ListStreamingDistributionsInput{ + Marker: aws.String("string"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListStreamingDistributions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_UpdateCloudFrontOriginAccessIdentity() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.UpdateCloudFrontOriginAccessIdentityInput{ + CloudFrontOriginAccessIdentityConfig: &cloudfront.OriginAccessIdentityConfig{ // Required + CallerReference: aws.String("string"), // Required + Comment: aws.String("string"), // Required + }, + Id: aws.String("string"), // Required + IfMatch: aws.String("string"), + } + resp, err := svc.UpdateCloudFrontOriginAccessIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_UpdateDistribution() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.UpdateDistributionInput{ + DistributionConfig: &cloudfront.DistributionConfig{ // Required + CallerReference: aws.String("string"), // Required + Comment: aws.String("string"), // Required + DefaultCacheBehavior: &cloudfront.DefaultCacheBehavior{ // Required + ForwardedValues: &cloudfront.ForwardedValues{ // Required + Cookies: &cloudfront.CookiePreference{ // Required + Forward: aws.String("ItemSelection"), // Required + WhitelistedNames: &cloudfront.CookieNames{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + QueryString: aws.Bool(true), // Required + Headers: &cloudfront.Headers{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + MinTTL: aws.Int64(1), // Required + TargetOriginId: aws.String("string"), // Required + TrustedSigners: &cloudfront.TrustedSigners{ // Required + Enabled: aws.Bool(true), // Required + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + ViewerProtocolPolicy: aws.String("ViewerProtocolPolicy"), // Required + AllowedMethods: &cloudfront.AllowedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + CachedMethods: &cloudfront.CachedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + }, + }, + Compress: aws.Bool(true), + DefaultTTL: aws.Int64(1), + MaxTTL: aws.Int64(1), + SmoothStreaming: aws.Bool(true), + }, + Enabled: aws.Bool(true), // Required + Origins: &cloudfront.Origins{ // Required + Quantity: aws.Int64(1), // Required + Items: []*cloudfront.Origin{ + { // Required + DomainName: aws.String("string"), // Required + Id: aws.String("string"), // Required + CustomHeaders: &cloudfront.CustomHeaders{ + Quantity: aws.Int64(1), // Required + Items: []*cloudfront.OriginCustomHeader{ + { // Required + HeaderName: aws.String("string"), // Required + HeaderValue: aws.String("string"), // Required + }, + // More values... + }, + }, + CustomOriginConfig: &cloudfront.CustomOriginConfig{ + HTTPPort: aws.Int64(1), // Required + HTTPSPort: aws.Int64(1), // Required + OriginProtocolPolicy: aws.String("OriginProtocolPolicy"), // Required + OriginSslProtocols: &cloudfront.OriginSslProtocols{ + Items: []*string{ // Required + aws.String("SslProtocol"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + }, + }, + OriginPath: aws.String("string"), + S3OriginConfig: &cloudfront.S3OriginConfig{ + OriginAccessIdentity: aws.String("string"), // Required + }, + }, + // More values... + }, + }, + Aliases: &cloudfront.Aliases{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + CacheBehaviors: &cloudfront.CacheBehaviors{ + Quantity: aws.Int64(1), // Required + Items: []*cloudfront.CacheBehavior{ + { // Required + ForwardedValues: &cloudfront.ForwardedValues{ // Required + Cookies: &cloudfront.CookiePreference{ // Required + Forward: aws.String("ItemSelection"), // Required + WhitelistedNames: &cloudfront.CookieNames{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + QueryString: aws.Bool(true), // Required + Headers: &cloudfront.Headers{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + MinTTL: aws.Int64(1), // Required + PathPattern: aws.String("string"), // Required + TargetOriginId: aws.String("string"), // Required + TrustedSigners: &cloudfront.TrustedSigners{ // Required + Enabled: aws.Bool(true), // Required + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + ViewerProtocolPolicy: aws.String("ViewerProtocolPolicy"), // Required + AllowedMethods: &cloudfront.AllowedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + CachedMethods: &cloudfront.CachedMethods{ + Items: []*string{ // Required + aws.String("Method"), // Required + // More values... + }, + Quantity: aws.Int64(1), // Required + }, + }, + Compress: aws.Bool(true), + DefaultTTL: aws.Int64(1), + MaxTTL: aws.Int64(1), + SmoothStreaming: aws.Bool(true), + }, + // More values... + }, + }, + CustomErrorResponses: &cloudfront.CustomErrorResponses{ + Quantity: aws.Int64(1), // Required + Items: []*cloudfront.CustomErrorResponse{ + { // Required + ErrorCode: aws.Int64(1), // Required + ErrorCachingMinTTL: aws.Int64(1), + ResponseCode: aws.String("string"), + ResponsePagePath: aws.String("string"), + }, + // More values... + }, + }, + DefaultRootObject: aws.String("string"), + Logging: &cloudfront.LoggingConfig{ + Bucket: aws.String("string"), // Required + Enabled: aws.Bool(true), // Required + IncludeCookies: aws.Bool(true), // Required + Prefix: aws.String("string"), // Required + }, + PriceClass: aws.String("PriceClass"), + Restrictions: &cloudfront.Restrictions{ + GeoRestriction: &cloudfront.GeoRestriction{ // Required + Quantity: aws.Int64(1), // Required + RestrictionType: aws.String("GeoRestrictionType"), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + ViewerCertificate: &cloudfront.ViewerCertificate{ + Certificate: aws.String("string"), + CertificateSource: aws.String("CertificateSource"), + CloudFrontDefaultCertificate: aws.Bool(true), + IAMCertificateId: aws.String("string"), + MinimumProtocolVersion: aws.String("MinimumProtocolVersion"), + SSLSupportMethod: aws.String("SSLSupportMethod"), + }, + WebACLId: aws.String("string"), + }, + Id: aws.String("string"), // Required + IfMatch: aws.String("string"), + } + resp, err := svc.UpdateDistribution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudFront_UpdateStreamingDistribution() { + svc := cloudfront.New(session.New()) + + params := &cloudfront.UpdateStreamingDistributionInput{ + Id: aws.String("string"), // Required + StreamingDistributionConfig: &cloudfront.StreamingDistributionConfig{ // Required + CallerReference: aws.String("string"), // Required + Comment: aws.String("string"), // Required + Enabled: aws.Bool(true), // Required + S3Origin: &cloudfront.S3Origin{ // Required + DomainName: aws.String("string"), // Required + OriginAccessIdentity: aws.String("string"), // Required + }, + TrustedSigners: &cloudfront.TrustedSigners{ // Required + Enabled: aws.Bool(true), // Required + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + Aliases: &cloudfront.Aliases{ + Quantity: aws.Int64(1), // Required + Items: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + Logging: &cloudfront.StreamingLoggingConfig{ + Bucket: aws.String("string"), // Required + Enabled: aws.Bool(true), // Required + Prefix: aws.String("string"), // Required + }, + PriceClass: aws.String("PriceClass"), + }, + IfMatch: aws.String("string"), + } + resp, err := svc.UpdateStreamingDistribution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,86 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudfront + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restxml" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// CloudFront is a client for CloudFront. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CloudFront struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "cloudfront" + +// New creates a new instance of the CloudFront client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CloudFront client from just a session. +// svc := cloudfront.New(mySession) +// +// // Create a CloudFront client with additional configuration +// svc := cloudfront.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudFront { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CloudFront { + svc := &CloudFront{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2016-01-28", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CloudFront operation and runs any +// custom request initialization. +func (c *CloudFront) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,210 @@ +package sign + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/url" + "strings" + "time" +) + +// An AWSEpochTime wraps a time value providing JSON serialization needed for +// AWS Policy epoch time fields. +type AWSEpochTime struct { + time.Time +} + +// NewAWSEpochTime returns a new AWSEpochTime pointer wrapping the Go time provided. +func NewAWSEpochTime(t time.Time) *AWSEpochTime { + return &AWSEpochTime{t} +} + +// MarshalJSON serializes the epoch time as AWS Profile epoch time. +func (t AWSEpochTime) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`{"AWS:EpochTime":%d}`, t.UTC().Unix())), nil +} + +// An IPAddress wraps an IPAddress source IP providing JSON serialization information +type IPAddress struct { + SourceIP string `json:"AWS:SourceIp"` +} + +// A Condition defines the restrictions for how a signed URL can be used. +type Condition struct { + // Optional IP address mask the signed URL must be requested from. + IPAddress *IPAddress `json:"IpAddress,omitempty"` + + // Optional date that the signed URL cannot be used until. It is invalid + // to make requests with the signed URL prior to this date. + DateGreaterThan *AWSEpochTime `json:",omitempty"` + + // Required date that the signed URL will expire. A DateLessThan is required + // sign cloud front URLs + DateLessThan *AWSEpochTime `json:",omitempty"` +} + +// A Statement is a collection of conditions for resources +type Statement struct { + // The Web or RTMP resource the URL will be signed for + Resource string + + // The set of conditions for this resource + Condition Condition +} + +// A Policy defines the resources that a signed will be signed for. +// +// See the following page for more information on how policies are constructed. +// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-custom-policy.html#private-content-custom-policy-statement +type Policy struct { + // List of resource and condition statements. + // Signed URLs should only provide a single statement. + Statements []Statement `json:"Statement"` +} + +// Override for testing to mock out usage of crypto/rand.Reader +var randReader = rand.Reader + +// Sign will sign a policy using an RSA private key. It will return a base 64 +// encoded signature and policy if no error is encountered. +// +// The signature and policy should be added to the signed URL following the +// guidelines in: +// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-signed-urls.html +func (p *Policy) Sign(privKey *rsa.PrivateKey) (b64Signature, b64Policy []byte, err error) { + if err = p.Validate(); err != nil { + return nil, nil, err + } + + // Build and escape the policy + b64Policy, jsonPolicy, err := encodePolicy(p) + if err != nil { + return nil, nil, err + } + awsEscapeEncoded(b64Policy) + + // Build and escape the signature + b64Signature, err = signEncodedPolicy(randReader, jsonPolicy, privKey) + if err != nil { + return nil, nil, err + } + awsEscapeEncoded(b64Signature) + + return b64Signature, b64Policy, nil +} + +// Validate verifies that the policy is valid and usable, and returns an +// error if there is a problem. +func (p *Policy) Validate() error { + if len(p.Statements) == 0 { + return fmt.Errorf("at least one policy statement is required") + } + for i, s := range p.Statements { + if s.Resource == "" { + return fmt.Errorf("statement at index %d does not have a resource", i) + } + } + + return nil +} + +// CreateResource constructs, validates, and returns a resource URL string. An +// error will be returned if unable to create the resource string. +func CreateResource(scheme, u string) (string, error) { + scheme = strings.ToLower(scheme) + + if scheme == "http" || scheme == "https" { + return u, nil + } + + if scheme == "rtmp" { + parsed, err := url.Parse(u) + if err != nil { + return "", fmt.Errorf("unable to parse rtmp URL, err: %s", err) + } + + rtmpURL := strings.TrimLeft(parsed.Path, "/") + if parsed.RawQuery != "" { + rtmpURL = fmt.Sprintf("%s?%s", rtmpURL, parsed.RawQuery) + } + + return rtmpURL, nil + } + + return "", fmt.Errorf("invalid URL scheme must be http, https, or rtmp. Provided: %s", scheme) +} + +// NewCannedPolicy returns a new Canned Policy constructed using the resource +// and expires time. This can be used to generate the basic model for a Policy +// that can be then augmented with additional conditions. +// +// See the following page for more information on how policies are constructed. +// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-custom-policy.html#private-content-custom-policy-statement +func NewCannedPolicy(resource string, expires time.Time) *Policy { + return &Policy{ + Statements: []Statement{ + { + Resource: resource, + Condition: Condition{ + DateLessThan: NewAWSEpochTime(expires), + }, + }, + }, + } +} + +// encodePolicy encodes the Policy as JSON and also base 64 encodes it. +func encodePolicy(p *Policy) (b64Policy, jsonPolicy []byte, err error) { + jsonPolicy, err = json.Marshal(p) + if err != nil { + return nil, nil, fmt.Errorf("failed to encode policy, %s", err.Error()) + } + + // Remove leading and trailing white space, JSON encoding will note include + // whitespace within the encoding. + jsonPolicy = bytes.TrimSpace(jsonPolicy) + + b64Policy = make([]byte, base64.StdEncoding.EncodedLen(len(jsonPolicy))) + base64.StdEncoding.Encode(b64Policy, jsonPolicy) + return b64Policy, jsonPolicy, nil +} + +// signEncodedPolicy will sign and base 64 encode the JSON encoded policy. +func signEncodedPolicy(randReader io.Reader, jsonPolicy []byte, privKey *rsa.PrivateKey) ([]byte, error) { + hash := sha1.New() + if _, err := bytes.NewReader(jsonPolicy).WriteTo(hash); err != nil { + return nil, fmt.Errorf("failed to calculate signing hash, %s", err.Error()) + } + + sig, err := rsa.SignPKCS1v15(randReader, privKey, crypto.SHA1, hash.Sum(nil)) + if err != nil { + return nil, fmt.Errorf("failed to sign policy, %s", err.Error()) + } + + b64Sig := make([]byte, base64.StdEncoding.EncodedLen(len(sig))) + base64.StdEncoding.Encode(b64Sig, sig) + return b64Sig, nil +} + +// special characters to be replaced with awsEscapeEncoded +var invalidEncodedChar = map[byte]byte{ + '+': '-', + '=': '_', + '/': '~', +} + +// awsEscapeEncoded will replace base64 encoding's special characters to be URL safe. +func awsEscapeEncoded(b []byte) { + for i, v := range b { + if r, ok := invalidEncodedChar[v]; ok { + b[i] = r + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,139 @@ +package sign + +import ( + "bytes" + "crypto" + "crypto/rsa" + "crypto/sha1" + "encoding/base64" + "fmt" + "math/rand" + "strings" + "testing" + "time" +) + +func TestEpochTimeMarshal(t *testing.T) { + v := AWSEpochTime{time.Now()} + b, err := v.MarshalJSON() + if err != nil { + t.Fatalf("Unexpected error, %#v", err) + } + + expected := fmt.Sprintf(`{"AWS:EpochTime":%d}`, v.UTC().Unix()) + if string(b) != expected { + t.Errorf("Expected marshaled time to match, expect: %s, actual: %s", + expected, string(b)) + } +} + +var testCreateResource = []struct { + scheme, u string + expect string + errPrefix string +}{ + { + "https", "https://example.com/a?b=1", + "https://example.com/a?b=1", "", + }, + { + "http", "http*://example.com/a?b=1", + "http*://example.com/a?b=1", "", + }, + { + "rtmp", "https://example.com/a?b=1", + "a?b=1", "", + }, + { + "ftp", "ftp://example.com/a?b=1", + "", "invalid URL scheme", + }, +} + +func TestCreateResource(t *testing.T) { + for i, v := range testCreateResource { + r, err := CreateResource(v.scheme, v.u) + if err != nil { + if v.errPrefix == "" { + t.Errorf("%d, Unexpected error %s", i, err.Error()) + continue + } + if !strings.HasPrefix(err.Error(), v.errPrefix) { + t.Errorf("%d, Expected to find prefix\nexpect: %s\nactual: %s", i, v.errPrefix, err.Error()) + continue + } + } else if v.errPrefix != "" { + t.Errorf("%d, Expected error %s", i, v.errPrefix) + continue + } + + if v.expect != r { + t.Errorf("%d, Expected to find prefix\nexpect: %s\nactual: %s", i, v.expect, r) + } + } +} + +var testTime = time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) + +const expectedJSONPolicy = `{"Statement":[{"Resource":"https://example.com/a","Condition":{"DateLessThan":{"AWS:EpochTime":1257894000}}}]}` +const expectedB64Policy = `eyJTdGF0ZW1lbnQiOlt7IlJlc291cmNlIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9hIiwiQ29uZGl0aW9uIjp7IkRhdGVMZXNzVGhhbiI6eyJBV1M6RXBvY2hUaW1lIjoxMjU3ODk0MDAwfX19XX0=` + +func TestEncodePolicy(t *testing.T) { + p := NewCannedPolicy("https://example.com/a", testTime) + + b64Policy, jsonPolicy, err := encodePolicy(p) + if err != nil { + t.Fatalf("Unexpected error, %#v", err) + } + + if string(jsonPolicy) != expectedJSONPolicy { + t.Errorf("Expected json encoding to match, \nexpect: %s\nactual: %s\n", expectedJSONPolicy, jsonPolicy) + } + + if string(b64Policy) != expectedB64Policy { + t.Errorf("Expected b64 encoding to match, \nexpect: %s\nactual: %s\n", expectedB64Policy, b64Policy) + } +} + +func TestSignEncodedPolicy(t *testing.T) { + p := NewCannedPolicy("https://example.com/a", testTime) + _, jsonPolicy, err := encodePolicy(p) + if err != nil { + t.Fatalf("Unexpected policy encode error, %#v", err) + } + + r := newRandomReader(rand.New(rand.NewSource(1))) + + privKey, err := rsa.GenerateKey(r, 1024) + if err != nil { + t.Fatalf("Unexpected priv key error, %#v", err) + } + + b64Signature, err := signEncodedPolicy(r, jsonPolicy, privKey) + if err != nil { + t.Fatalf("Unexpected policy sign error, %#v", err) + } + + hash := sha1.New() + if _, err = bytes.NewReader(jsonPolicy).WriteTo(hash); err != nil { + t.Fatalf("Unexpected hash error, %#v", err) + } + + decodedSig, err := base64.StdEncoding.DecodeString(string(b64Signature)) + if err != nil { + t.Fatalf("Unexpected base64 decode signature, %#v", err) + } + + if err := rsa.VerifyPKCS1v15(&privKey.PublicKey, crypto.SHA1, hash.Sum(nil), decodedSig); err != nil { + t.Fatalf("Unable to verify signature, %#v", err) + } +} + +func TestAWSEscape(t *testing.T) { + expect := "a-b_c~" + actual := []byte("a+b=c/") + awsEscapeEncoded(actual) + if string(actual) != expect { + t.Errorf("expect: %s, actual: %s", expect, string(actual)) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/privkey.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/privkey.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/privkey.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/privkey.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,68 @@ +package sign + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + "io" + "io/ioutil" + "os" +) + +// LoadPEMPrivKeyFile reads a PEM encoded RSA private key from the file name. +// A new RSA private key will be returned if no error. +func LoadPEMPrivKeyFile(name string) (*rsa.PrivateKey, error) { + file, err := os.Open(name) + if err != nil { + return nil, err + } + defer file.Close() + + return LoadPEMPrivKey(file) +} + +// LoadPEMPrivKey reads a PEM encoded RSA private key from the io.Reader. +// A new RSA private key will be returned if no error. +func LoadPEMPrivKey(reader io.Reader) (*rsa.PrivateKey, error) { + block, err := loadPem(reader) + if err != nil { + return nil, err + } + + return x509.ParsePKCS1PrivateKey(block.Bytes) +} + +// LoadEncryptedPEMPrivKey decrypts the PEM encoded private key using the +// password provided returning a RSA private key. If the PEM data is invalid, +// or unable to decrypt an error will be returned. +func LoadEncryptedPEMPrivKey(reader io.Reader, password []byte) (*rsa.PrivateKey, error) { + block, err := loadPem(reader) + if err != nil { + return nil, err + } + + decryptedBlock, err := x509.DecryptPEMBlock(block, password) + if err != nil { + return nil, err + } + + return x509.ParsePKCS1PrivateKey(decryptedBlock) +} + +func loadPem(reader io.Reader) (*pem.Block, error) { + b, err := ioutil.ReadAll(reader) + if err != nil { + return nil, err + } + + block, _ := pem.Decode(b) + if block == nil { + // pem.Decode will set block to nil if there is no PEM data in the input + // the second parameter will contain the provided bytes that failed + // to be decoded. + return nil, fmt.Errorf("no valid PEM data provided") + } + + return block, nil +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/privkey_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/privkey_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/privkey_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/privkey_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,90 @@ +package sign + +import ( + "bytes" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "io" + "math/rand" + "strings" + "testing" +) + +func generatePEM(randReader io.Reader, password []byte) (buf *bytes.Buffer, err error) { + k, err := rsa.GenerateKey(randReader, 1024) + if err != nil { + return nil, err + } + + derBytes := x509.MarshalPKCS1PrivateKey(k) + + var block *pem.Block + if password != nil { + block, err = x509.EncryptPEMBlock(randReader, "RSA PRIVATE KEY", derBytes, password, x509.PEMCipherAES128) + } else { + block = &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: derBytes, + } + } + + buf = &bytes.Buffer{} + err = pem.Encode(buf, block) + return buf, err +} + +func TestLoadPemPrivKey(t *testing.T) { + reader, err := generatePEM(newRandomReader(rand.New(rand.NewSource(1))), nil) + if err != nil { + t.Errorf("Unexpected pem generation err %s", err.Error()) + } + + privKey, err := LoadPEMPrivKey(reader) + if err != nil { + t.Errorf("Unexpected key load error, %s", err.Error()) + } + if privKey == nil { + t.Errorf("Expected valid privKey, but got nil") + } +} + +func TestLoadPemPrivKeyInvalidPEM(t *testing.T) { + reader := strings.NewReader("invalid PEM data") + privKey, err := LoadPEMPrivKey(reader) + + if err == nil { + t.Errorf("Expected error invalid PEM data error") + } + if privKey != nil { + t.Errorf("Expected nil privKey but got %#v", privKey) + } +} + +func TestLoadEncryptedPEMPrivKey(t *testing.T) { + reader, err := generatePEM(newRandomReader(rand.New(rand.NewSource(1))), []byte("password")) + if err != nil { + t.Errorf("Unexpected pem generation err %s", err.Error()) + } + + privKey, err := LoadEncryptedPEMPrivKey(reader, []byte("password")) + + if err != nil { + t.Errorf("Unexpected key load error, %s", err.Error()) + } + if privKey == nil { + t.Errorf("Expected valid privKey, but got nil") + } +} + +func TestLoadEncryptedPEMPrivKeyWrongPassword(t *testing.T) { + reader, err := generatePEM(newRandomReader(rand.New(rand.NewSource(1))), []byte("password")) + privKey, err := LoadEncryptedPEMPrivKey(reader, []byte("wrong password")) + + if err == nil { + t.Errorf("Expected error invalid PEM data error") + } + if privKey != nil { + t.Errorf("Expected nil privKey but got %#v", privKey) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/randomreader.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/randomreader.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/randomreader.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/randomreader.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,30 @@ +package sign + +import ( + "bytes" + "encoding/binary" + "math/rand" +) + +// A randomReader wraps a math/rand.Rand within an reader so that it can used +// as a predictable testing replacement for crypto/rand.Reader +type randomReader struct { + b *bytes.Buffer + r *rand.Rand +} + +// newRandomReader returns a new instance of the random reader +func newRandomReader(r *rand.Rand) *randomReader { + return &randomReader{b: &bytes.Buffer{}, r: r} +} + +// Read will read random bytes from up to the length of b. +func (m *randomReader) Read(b []byte) (int, error) { + for i := 0; i < len(b); { + binary.Write(m.b, binary.LittleEndian, m.r.Int63()) + n, _ := m.b.Read(b[i:]) + i += n + } + + return len(b), nil +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,205 @@ +// Package sign provides utilities to generate signed URLs for Amazon CloudFront. +// +// More information about signed URLs and their structure can be found at: +// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-canned-policy.html +// +// To sign a URL create a URLSigner with your private key and credential pair key ID. +// Once you have a URLSigner instance you can call Sign or SignWithPolicy to +// sign the URLs. +// +// Example: +// +// // Sign URL to be valid for 1 hour from now. +// signer := sign.NewURLSigner(keyID, privKey) +// signedURL, err := signer.Sign(rawURL, time.Now().Add(1*time.Hour)) +// if err != nil { +// log.Fatalf("Failed to sign url, err: %s\n", err.Error()) +// } +// +package sign + +import ( + "crypto/rsa" + "fmt" + "net/url" + "strings" + "time" +) + +// An URLSigner provides URL signing utilities to sign URLs for Amazon CloudFront +// resources. Using a private key and Credential Key Pair key ID the URLSigner +// only needs to be created once per Credential Key Pair key ID and private key. +// +// The signer is safe to use concurrently. +type URLSigner struct { + keyID string + privKey *rsa.PrivateKey +} + +// NewURLSigner constructs and returns a new URLSigner to be used to for signing +// Amazon CloudFront URL resources with. +func NewURLSigner(keyID string, privKey *rsa.PrivateKey) *URLSigner { + return &URLSigner{ + keyID: keyID, + privKey: privKey, + } +} + +// Sign will sign a single URL to expire at the time of expires sign using the +// Amazon CloudFront default Canned Policy. The URL will be signed with the +// private key and Credential Key Pair Key ID previously provided to URLSigner. +// +// This is the default method of signing Amazon CloudFront URLs. If extra policy +// conditions are need other than URL expiry use SignWithPolicy instead. +// +// Example: +// +// // Sign URL to be valid for 1 hour from now. +// signer := sign.NewURLSigner(keyID, privKey) +// signedURL, err := signer.Sign(rawURL, time.Now().Add(1*time.Hour)) +// if err != nil { +// log.Fatalf("Failed to sign url, err: %s\n", err.Error()) +// } +// +func (s URLSigner) Sign(url string, expires time.Time) (string, error) { + scheme, cleanedURL, err := cleanURLScheme(url) + if err != nil { + return "", err + } + + resource, err := CreateResource(scheme, url) + if err != nil { + return "", err + } + + return signURL(scheme, cleanedURL, s.keyID, NewCannedPolicy(resource, expires), false, s.privKey) +} + +// SignWithPolicy will sign a URL with the Policy provided. The URL will be +// signed with the private key and Credential Key Pair Key ID previously provided to URLSigner. +// +// Use this signing method if you are looking to sign a URL with more than just +// the URL's expiry time, or reusing Policies between multiple URL signings. +// If only the expiry time is needed you can use Sign and provide just the +// URL's expiry time. A minimum of at least one policy statement is required for a signed URL. +// +// Note: It is not safe to use Polices between multiple signers concurrently +// +// Example: +// +// // Sign URL to be valid for 30 minutes from now, expires one hour from now, and +// // restricted to the 192.0.2.0/24 IP address range. +// policy := &sign.Policy{ +// Statements: []Statement{ +// { +// Resource: rawURL, +// Condition: Condition{ +// // Optional IP source address range +// IPAddress: &IPAddress{SourceIP: "192.0.2.0/24"}, +// // Optional date URL is not valid until +// DateGreaterThan: &AWSEpochTime{time.Now().Add(30 * time.Minute)}, +// // Required date the URL will expire after +// DateLessThan: &AWSEpochTime{time.Now().Add(1 * time.Hour)}, +// } +// } +// } +// } +// +// signer := sign.NewURLSigner(keyID, privKey) +// signedURL, err := signer.SignWithPolicy(rawURL, policy) +// if err != nil { +// log.Fatalf("Failed to sign url, err: %s\n", err.Error()) +// } +// +func (s URLSigner) SignWithPolicy(url string, p *Policy) (string, error) { + scheme, cleanedURL, err := cleanURLScheme(url) + if err != nil { + return "", err + } + + return signURL(scheme, cleanedURL, s.keyID, p, true, s.privKey) +} + +func signURL(scheme, url, keyID string, p *Policy, customPolicy bool, privKey *rsa.PrivateKey) (string, error) { + // Validation URL elements + if err := validateURL(url); err != nil { + return "", err + } + + b64Signature, b64Policy, err := p.Sign(privKey) + if err != nil { + return "", err + } + + // build and return signed URL + builtURL := buildSignedURL(url, keyID, p, customPolicy, b64Policy, b64Signature) + if scheme == "rtmp" { + return buildRTMPURL(builtURL) + } + + return builtURL, nil +} + +func buildSignedURL(baseURL, keyID string, p *Policy, customPolicy bool, b64Policy, b64Signature []byte) string { + pred := "?" + if strings.Contains(baseURL, "?") { + pred = "&" + } + signedURL := baseURL + pred + + if customPolicy { + signedURL += "Policy=" + string(b64Policy) + } else { + signedURL += fmt.Sprintf("Expires=%d", p.Statements[0].Condition.DateLessThan.UTC().Unix()) + } + signedURL += fmt.Sprintf("&Signature=%s&Key-Pair-Id=%s", string(b64Signature), keyID) + + return signedURL +} + +func buildRTMPURL(u string) (string, error) { + parsed, err := url.Parse(u) + if err != nil { + return "", fmt.Errorf("unable to parse rtmp signed URL, err: %s", err) + } + + rtmpURL := strings.TrimLeft(parsed.Path, "/") + if parsed.RawQuery != "" { + rtmpURL = fmt.Sprintf("%s?%s", rtmpURL, parsed.RawQuery) + } + + return rtmpURL, nil +} + +func cleanURLScheme(u string) (scheme, cleanedURL string, err error) { + parts := strings.SplitN(u, "://", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("invalid URL, missing scheme and domain/path") + } + scheme = strings.Replace(parts[0], "*", "", 1) + cleanedURL = fmt.Sprintf("%s://%s", scheme, parts[1]) + + return strings.ToLower(scheme), cleanedURL, nil +} + +var illegalQueryParms = []string{"Expires", "Policy", "Signature", "Key-Pair-Id"} + +func validateURL(u string) error { + parsed, err := url.Parse(u) + if err != nil { + return fmt.Errorf("unable to parse URL, err: %s", err.Error()) + } + + if parsed.Scheme == "" { + return fmt.Errorf("URL missing valid scheme, %s", u) + } + + q := parsed.Query() + for _, p := range illegalQueryParms { + if _, ok := q[p]; ok { + return fmt.Errorf("%s cannot be a query parameter for a signed URL", p) + } + } + + return nil +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,145 @@ +package sign + +import ( + "crypto/rsa" + "math/rand" + "strings" + "testing" + "time" +) + +var testSignTime = time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) + +var testSignURL = []struct { + u string + p *Policy + t time.Time + customPolicy bool + expectErr bool + out string +}{ + { + "http://example.com/a", NewCannedPolicy("http://example.com/a", testSignTime), time.Time{}, true, false, + "http://example.com/a?Policy=eyJTdGF0ZW1lbnQiOlt7IlJlc291cmNlIjoiaHR0cDovL2V4YW1wbGUuY29tL2EiLCJDb25kaXRpb24iOnsiRGF0ZUxlc3NUaGFuIjp7IkFXUzpFcG9jaFRpbWUiOjEyNTc4OTQwMDB9fX1dfQ__&Signature=Y6qvWOZNl99uNPMGprvrKXEmXpLWJ-xXKVHL~nmF0BR1jPb2XA2jor0MUYKBE4ViTkWZZ1dz46zSFMsEEfw~n6-SVYXZ2QHBBTkSAoxGtH6dH33Ph9pz~f9Wy7aYXq~9I-Ah0E6yC~BMiQuXe5qAOucuMPorKgPfC0dvLMw2EF0_&Key-Pair-Id=KeyID", + }, + { + "http://example.com/a", nil, testSignTime, false, false, + "http://example.com/a?Expires=1257894000&Signature=Y6qvWOZNl99uNPMGprvrKXEmXpLWJ-xXKVHL~nmF0BR1jPb2XA2jor0MUYKBE4ViTkWZZ1dz46zSFMsEEfw~n6-SVYXZ2QHBBTkSAoxGtH6dH33Ph9pz~f9Wy7aYXq~9I-Ah0E6yC~BMiQuXe5qAOucuMPorKgPfC0dvLMw2EF0_&Key-Pair-Id=KeyID", + }, + { + "http://example.com/a", &Policy{}, time.Time{}, true, true, + "http://example.com/a?Policy=eyJTdGF0ZW1lbnQiOlt7IlJlc291cmNlIjoiaHR0cDovL2V4YW1wbGUuY29tL2EiLCJDb25kaXRpb24iOnsiRGF0ZUxlc3NUaGFuIjp7IkFXUzpFcG9jaFRpbWUiOjEyNTc4OTQwMDB9fX1dfQ__&Signature=Y6qvWOZNl99uNPMGprvrKXEmXpLWJ-xXKVHL~nmF0BR1jPb2XA2jor0MUYKBE4ViTkWZZ1dz46zSFMsEEfw~n6-SVYXZ2QHBBTkSAoxGtH6dH33Ph9pz~f9Wy7aYXq~9I-Ah0E6yC~BMiQuXe5qAOucuMPorKgPfC0dvLMw2EF0_&Key-Pair-Id=KeyID", + }, + { + "http://example.com/a", NewCannedPolicy("", testSignTime), time.Time{}, true, true, + "http://example.com/a?Policy=eyJTdGF0ZW1lbnQiOlt7IlJlc291cmNlIjoiaHR0cDovL2V4YW1wbGUuY29tL2EiLCJDb25kaXRpb24iOnsiRGF0ZUxlc3NUaGFuIjp7IkFXUzpFcG9jaFRpbWUiOjEyNTc4OTQwMDB9fX1dfQ__&Signature=Y6qvWOZNl99uNPMGprvrKXEmXpLWJ-xXKVHL~nmF0BR1jPb2XA2jor0MUYKBE4ViTkWZZ1dz46zSFMsEEfw~n6-SVYXZ2QHBBTkSAoxGtH6dH33Ph9pz~f9Wy7aYXq~9I-Ah0E6yC~BMiQuXe5qAOucuMPorKgPfC0dvLMw2EF0_&Key-Pair-Id=KeyID", + }, + { + "rtmp://example.com/a", nil, testSignTime, false, false, + "a?Expires=1257894000&Signature=Ds9NbpGwIcDKG1iZDyjfPXp0ZFYSIzfvGzJj-x28XlXfrarHrJbTOQj3bec~aAyb8NAqghBYRdKF9~RdjNrdyxyiequo-SCjFgFHnRNIk0FiqH0fVt2NO63f0X8-Kbur9cPtJoHR9Jzk0I1CQnECqhL6A0OgPhijTfKUITocmzA_&Key-Pair-Id=KeyID", + }, + { + "rtmp://example.com/a", NewCannedPolicy("a", testSignTime), time.Time{}, true, false, + "a?Policy=eyJTdGF0ZW1lbnQiOlt7IlJlc291cmNlIjoiYSIsIkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTI1Nzg5NDAwMH19fV19&Signature=Ds9NbpGwIcDKG1iZDyjfPXp0ZFYSIzfvGzJj-x28XlXfrarHrJbTOQj3bec~aAyb8NAqghBYRdKF9~RdjNrdyxyiequo-SCjFgFHnRNIk0FiqH0fVt2NO63f0X8-Kbur9cPtJoHR9Jzk0I1CQnECqhL6A0OgPhijTfKUITocmzA_&Key-Pair-Id=KeyID", + }, +} + +// TODO Sign URL HTTP +// TODO Sign URL RMTP +func TestSignURL(t *testing.T) { + origRandReader := randReader + randReader = newRandomReader(rand.New(rand.NewSource(1))) + defer func() { + randReader = origRandReader + }() + + privKey, err := rsa.GenerateKey(randReader, 1024) + if err != nil { + t.Fatalf("Unexpected priv key error, %#v", err) + } + + s := NewURLSigner("KeyID", privKey) + + for i, v := range testSignURL { + var u string + var err error + + if v.customPolicy { + u, err = s.SignWithPolicy(v.u, v.p) + } else { + u, err = s.Sign(v.u, v.t) + } + + if err != nil { + if v.expectErr { + continue + } + t.Errorf("%d, Unexpected error, %s", i, err.Error()) + continue + } else if v.expectErr { + t.Errorf("%d Expected error, but got none", i) + continue + } + + if u != v.out { + t.Errorf("%d, Unexpected URL\nexpect: %s\nactual: %s\n", i, v.out, u) + } + } + +} + +var testBuildSignedURL = []struct { + u, keyID string + p *Policy + customPolicy bool + b64Policy, b64Sig []byte + out string +}{ + { + "https://example.com/a?b=1", "KeyID", NewCannedPolicy("", testSignTime), true, []byte("b64Policy"), []byte("b64Sig"), + "https://example.com/a?b=1&Policy=b64Policy&Signature=b64Sig&Key-Pair-Id=KeyID", + }, + { + "https://example.com/a", "KeyID", NewCannedPolicy("", testSignTime), true, []byte("b64Policy"), []byte("b64Sig"), + "https://example.com/a?Policy=b64Policy&Signature=b64Sig&Key-Pair-Id=KeyID", + }, + { + "https://example.com/a?b=1", "KeyID", NewCannedPolicy("https://example.com/a?b=1", testSignTime), false, []byte("b64Policy"), []byte("b64Sig"), + "https://example.com/a?b=1&Expires=1257894000&Signature=b64Sig&Key-Pair-Id=KeyID", + }, +} + +func TestBuildSignedURL(t *testing.T) { + for i, v := range testBuildSignedURL { + u := buildSignedURL(v.u, v.keyID, v.p, v.customPolicy, v.b64Policy, v.b64Sig) + if u != v.out { + t.Errorf("%d, Unexpected URL\nexpect: %s\nactual: %s\n", i, v.out, u) + } + } +} + +var testValidURL = []struct { + in, errPrefix string +}{ + {"https://example.com/a?b=1&else=b", ""}, + {"https://example.com/a?b=1&Policy=something&else=b", "Policy"}, + {"https://example.com/a?b=1&Signature=something&else=b", "Signature"}, + {"https://example.com/a?b=1&Key-Pair-Id=something&else=b", "Key-Pair-Id"}, + {"http?://example.com/a?b=1", "URL missing valid scheme"}, +} + +func TestValidateURL(t *testing.T) { + for i, v := range testValidURL { + err := validateURL(v.in) + if err != nil { + if v.errPrefix == "" { + t.Errorf("%d, Unexpected error %s", i, err.Error()) + } + if !strings.HasPrefix(err.Error(), v.errPrefix) { + t.Errorf("%d, Expected to find prefix\nexpect: %s\nactual: %s", i, v.errPrefix, err.Error()) + } + } else if v.errPrefix != "" { + t.Errorf("%d, Expected error %s", i, v.errPrefix) + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/waiters.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/waiters.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/waiters.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudfront/waiters.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,76 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudfront + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *CloudFront) WaitUntilDistributionDeployed(input *GetDistributionInput) error { + waiterCfg := waiter.Config{ + Operation: "GetDistribution", + Delay: 60, + MaxAttempts: 25, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "Distribution.Status", + Expected: "Deployed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *CloudFront) WaitUntilInvalidationCompleted(input *GetInvalidationInput) error { + waiterCfg := waiter.Config{ + Operation: "GetInvalidation", + Delay: 20, + MaxAttempts: 30, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "Invalidation.Status", + Expected: "Completed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *CloudFront) WaitUntilStreamingDistributionDeployed(input *GetStreamingDistributionInput) error { + waiterCfg := waiter.Config{ + Operation: "GetStreamingDistribution", + Delay: 60, + MaxAttempts: 25, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "StreamingDistribution.Status", + Expected: "Deployed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudhsm/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudhsm/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudhsm/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudhsm/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1343 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudhsm provides a client for Amazon CloudHSM. +package cloudhsm + +import ( + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opCreateHapg = "CreateHapg" + +// CreateHapgRequest generates a request for the CreateHapg operation. +func (c *CloudHSM) CreateHapgRequest(input *CreateHapgInput) (req *request.Request, output *CreateHapgOutput) { + op := &request.Operation{ + Name: opCreateHapg, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateHapgInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateHapgOutput{} + req.Data = output + return +} + +// Creates a high-availability partition group. A high-availability partition +// group is a group of partitions that spans multiple physical HSMs. +func (c *CloudHSM) CreateHapg(input *CreateHapgInput) (*CreateHapgOutput, error) { + req, out := c.CreateHapgRequest(input) + err := req.Send() + return out, err +} + +const opCreateHsm = "CreateHsm" + +// CreateHsmRequest generates a request for the CreateHsm operation. +func (c *CloudHSM) CreateHsmRequest(input *CreateHsmInput) (req *request.Request, output *CreateHsmOutput) { + op := &request.Operation{ + Name: opCreateHsm, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateHsmInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateHsmOutput{} + req.Data = output + return +} + +// Creates an uninitialized HSM instance. +// +// There is an upfront fee charged for each HSM instance that you create with +// the CreateHsm operation. If you accidentally provision an HSM and want to +// request a refund, delete the instance using the DeleteHsm operation, go to +// the AWS Support Center (https://console.aws.amazon.com/support/home#/), create +// a new case, and select Account and Billing Support. +// +// It can take up to 20 minutes to create and provision an HSM. You can monitor +// the status of the HSM with the DescribeHsm operation. The HSM is ready to +// be initialized when the status changes to RUNNING. +func (c *CloudHSM) CreateHsm(input *CreateHsmInput) (*CreateHsmOutput, error) { + req, out := c.CreateHsmRequest(input) + err := req.Send() + return out, err +} + +const opCreateLunaClient = "CreateLunaClient" + +// CreateLunaClientRequest generates a request for the CreateLunaClient operation. +func (c *CloudHSM) CreateLunaClientRequest(input *CreateLunaClientInput) (req *request.Request, output *CreateLunaClientOutput) { + op := &request.Operation{ + Name: opCreateLunaClient, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLunaClientInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateLunaClientOutput{} + req.Data = output + return +} + +// Creates an HSM client. +func (c *CloudHSM) CreateLunaClient(input *CreateLunaClientInput) (*CreateLunaClientOutput, error) { + req, out := c.CreateLunaClientRequest(input) + err := req.Send() + return out, err +} + +const opDeleteHapg = "DeleteHapg" + +// DeleteHapgRequest generates a request for the DeleteHapg operation. +func (c *CloudHSM) DeleteHapgRequest(input *DeleteHapgInput) (req *request.Request, output *DeleteHapgOutput) { + op := &request.Operation{ + Name: opDeleteHapg, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteHapgInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteHapgOutput{} + req.Data = output + return +} + +// Deletes a high-availability partition group. +func (c *CloudHSM) DeleteHapg(input *DeleteHapgInput) (*DeleteHapgOutput, error) { + req, out := c.DeleteHapgRequest(input) + err := req.Send() + return out, err +} + +const opDeleteHsm = "DeleteHsm" + +// DeleteHsmRequest generates a request for the DeleteHsm operation. +func (c *CloudHSM) DeleteHsmRequest(input *DeleteHsmInput) (req *request.Request, output *DeleteHsmOutput) { + op := &request.Operation{ + Name: opDeleteHsm, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteHsmInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteHsmOutput{} + req.Data = output + return +} + +// Deletes an HSM. After completion, this operation cannot be undone and your +// key material cannot be recovered. +func (c *CloudHSM) DeleteHsm(input *DeleteHsmInput) (*DeleteHsmOutput, error) { + req, out := c.DeleteHsmRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLunaClient = "DeleteLunaClient" + +// DeleteLunaClientRequest generates a request for the DeleteLunaClient operation. +func (c *CloudHSM) DeleteLunaClientRequest(input *DeleteLunaClientInput) (req *request.Request, output *DeleteLunaClientOutput) { + op := &request.Operation{ + Name: opDeleteLunaClient, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLunaClientInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteLunaClientOutput{} + req.Data = output + return +} + +// Deletes a client. +func (c *CloudHSM) DeleteLunaClient(input *DeleteLunaClientInput) (*DeleteLunaClientOutput, error) { + req, out := c.DeleteLunaClientRequest(input) + err := req.Send() + return out, err +} + +const opDescribeHapg = "DescribeHapg" + +// DescribeHapgRequest generates a request for the DescribeHapg operation. +func (c *CloudHSM) DescribeHapgRequest(input *DescribeHapgInput) (req *request.Request, output *DescribeHapgOutput) { + op := &request.Operation{ + Name: opDescribeHapg, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeHapgInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeHapgOutput{} + req.Data = output + return +} + +// Retrieves information about a high-availability partition group. +func (c *CloudHSM) DescribeHapg(input *DescribeHapgInput) (*DescribeHapgOutput, error) { + req, out := c.DescribeHapgRequest(input) + err := req.Send() + return out, err +} + +const opDescribeHsm = "DescribeHsm" + +// DescribeHsmRequest generates a request for the DescribeHsm operation. +func (c *CloudHSM) DescribeHsmRequest(input *DescribeHsmInput) (req *request.Request, output *DescribeHsmOutput) { + op := &request.Operation{ + Name: opDescribeHsm, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeHsmInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeHsmOutput{} + req.Data = output + return +} + +// Retrieves information about an HSM. You can identify the HSM by its ARN or +// its serial number. +func (c *CloudHSM) DescribeHsm(input *DescribeHsmInput) (*DescribeHsmOutput, error) { + req, out := c.DescribeHsmRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLunaClient = "DescribeLunaClient" + +// DescribeLunaClientRequest generates a request for the DescribeLunaClient operation. +func (c *CloudHSM) DescribeLunaClientRequest(input *DescribeLunaClientInput) (req *request.Request, output *DescribeLunaClientOutput) { + op := &request.Operation{ + Name: opDescribeLunaClient, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLunaClientInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLunaClientOutput{} + req.Data = output + return +} + +// Retrieves information about an HSM client. +func (c *CloudHSM) DescribeLunaClient(input *DescribeLunaClientInput) (*DescribeLunaClientOutput, error) { + req, out := c.DescribeLunaClientRequest(input) + err := req.Send() + return out, err +} + +const opGetConfig = "GetConfig" + +// GetConfigRequest generates a request for the GetConfig operation. +func (c *CloudHSM) GetConfigRequest(input *GetConfigInput) (req *request.Request, output *GetConfigOutput) { + op := &request.Operation{ + Name: opGetConfig, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetConfigInput{} + } + + req = c.newRequest(op, input, output) + output = &GetConfigOutput{} + req.Data = output + return +} + +// Gets the configuration files necessary to connect to all high availability +// partition groups the client is associated with. +func (c *CloudHSM) GetConfig(input *GetConfigInput) (*GetConfigOutput, error) { + req, out := c.GetConfigRequest(input) + err := req.Send() + return out, err +} + +const opListAvailableZones = "ListAvailableZones" + +// ListAvailableZonesRequest generates a request for the ListAvailableZones operation. +func (c *CloudHSM) ListAvailableZonesRequest(input *ListAvailableZonesInput) (req *request.Request, output *ListAvailableZonesOutput) { + op := &request.Operation{ + Name: opListAvailableZones, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListAvailableZonesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAvailableZonesOutput{} + req.Data = output + return +} + +// Lists the Availability Zones that have available AWS CloudHSM capacity. +func (c *CloudHSM) ListAvailableZones(input *ListAvailableZonesInput) (*ListAvailableZonesOutput, error) { + req, out := c.ListAvailableZonesRequest(input) + err := req.Send() + return out, err +} + +const opListHapgs = "ListHapgs" + +// ListHapgsRequest generates a request for the ListHapgs operation. +func (c *CloudHSM) ListHapgsRequest(input *ListHapgsInput) (req *request.Request, output *ListHapgsOutput) { + op := &request.Operation{ + Name: opListHapgs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListHapgsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListHapgsOutput{} + req.Data = output + return +} + +// Lists the high-availability partition groups for the account. +// +// This operation supports pagination with the use of the NextToken member. +// If more results are available, the NextToken member of the response contains +// a token that you pass in the next call to ListHapgs to retrieve the next +// set of items. +func (c *CloudHSM) ListHapgs(input *ListHapgsInput) (*ListHapgsOutput, error) { + req, out := c.ListHapgsRequest(input) + err := req.Send() + return out, err +} + +const opListHsms = "ListHsms" + +// ListHsmsRequest generates a request for the ListHsms operation. +func (c *CloudHSM) ListHsmsRequest(input *ListHsmsInput) (req *request.Request, output *ListHsmsOutput) { + op := &request.Operation{ + Name: opListHsms, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListHsmsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListHsmsOutput{} + req.Data = output + return +} + +// Retrieves the identifiers of all of the HSMs provisioned for the current +// customer. +// +// This operation supports pagination with the use of the NextToken member. +// If more results are available, the NextToken member of the response contains +// a token that you pass in the next call to ListHsms to retrieve the next set +// of items. +func (c *CloudHSM) ListHsms(input *ListHsmsInput) (*ListHsmsOutput, error) { + req, out := c.ListHsmsRequest(input) + err := req.Send() + return out, err +} + +const opListLunaClients = "ListLunaClients" + +// ListLunaClientsRequest generates a request for the ListLunaClients operation. +func (c *CloudHSM) ListLunaClientsRequest(input *ListLunaClientsInput) (req *request.Request, output *ListLunaClientsOutput) { + op := &request.Operation{ + Name: opListLunaClients, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListLunaClientsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListLunaClientsOutput{} + req.Data = output + return +} + +// Lists all of the clients. +// +// This operation supports pagination with the use of the NextToken member. +// If more results are available, the NextToken member of the response contains +// a token that you pass in the next call to ListLunaClients to retrieve the +// next set of items. +func (c *CloudHSM) ListLunaClients(input *ListLunaClientsInput) (*ListLunaClientsOutput, error) { + req, out := c.ListLunaClientsRequest(input) + err := req.Send() + return out, err +} + +const opModifyHapg = "ModifyHapg" + +// ModifyHapgRequest generates a request for the ModifyHapg operation. +func (c *CloudHSM) ModifyHapgRequest(input *ModifyHapgInput) (req *request.Request, output *ModifyHapgOutput) { + op := &request.Operation{ + Name: opModifyHapg, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyHapgInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyHapgOutput{} + req.Data = output + return +} + +// Modifies an existing high-availability partition group. +func (c *CloudHSM) ModifyHapg(input *ModifyHapgInput) (*ModifyHapgOutput, error) { + req, out := c.ModifyHapgRequest(input) + err := req.Send() + return out, err +} + +const opModifyHsm = "ModifyHsm" + +// ModifyHsmRequest generates a request for the ModifyHsm operation. +func (c *CloudHSM) ModifyHsmRequest(input *ModifyHsmInput) (req *request.Request, output *ModifyHsmOutput) { + op := &request.Operation{ + Name: opModifyHsm, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyHsmInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyHsmOutput{} + req.Data = output + return +} + +// Modifies an HSM. +// +// This operation can result in the HSM being offline for up to 15 minutes +// while the AWS CloudHSM service is reconfigured. If you are modifying a production +// HSM, you should ensure that your AWS CloudHSM service is configured for high +// availability, and consider executing this operation during a maintenance +// window. +func (c *CloudHSM) ModifyHsm(input *ModifyHsmInput) (*ModifyHsmOutput, error) { + req, out := c.ModifyHsmRequest(input) + err := req.Send() + return out, err +} + +const opModifyLunaClient = "ModifyLunaClient" + +// ModifyLunaClientRequest generates a request for the ModifyLunaClient operation. +func (c *CloudHSM) ModifyLunaClientRequest(input *ModifyLunaClientInput) (req *request.Request, output *ModifyLunaClientOutput) { + op := &request.Operation{ + Name: opModifyLunaClient, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyLunaClientInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyLunaClientOutput{} + req.Data = output + return +} + +// Modifies the certificate used by the client. +// +// This action can potentially start a workflow to install the new certificate +// on the client's HSMs. +func (c *CloudHSM) ModifyLunaClient(input *ModifyLunaClientInput) (*ModifyLunaClientOutput, error) { + req, out := c.ModifyLunaClientRequest(input) + err := req.Send() + return out, err +} + +// Contains the inputs for the CreateHapgRequest action. +type CreateHapgInput struct { + _ struct{} `type:"structure"` + + // The label of the new high-availability partition group. + Label *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateHapgInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHapgInput) GoString() string { + return s.String() +} + +// Contains the output of the CreateHAPartitionGroup action. +type CreateHapgOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the high-availability partition group. + HapgArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateHapgOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHapgOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the CreateHsm operation. +type CreateHsmInput struct { + _ struct{} `locationName:"CreateHsmRequest" type:"structure"` + + // A user-defined token to ensure idempotence. Subsequent calls to this operation + // with the same token will be ignored. + ClientToken *string `locationName:"ClientToken" type:"string"` + + // The IP address to assign to the HSM's ENI. + // + // If an IP address is not specified, an IP address will be randomly chosen + // from the CIDR range of the subnet. + EniIp *string `locationName:"EniIp" type:"string"` + + // The external ID from IamRoleArn, if present. + ExternalId *string `locationName:"ExternalId" type:"string"` + + // The ARN of an IAM role to enable the AWS CloudHSM service to allocate an + // ENI on your behalf. + IamRoleArn *string `locationName:"IamRoleArn" type:"string" required:"true"` + + // The SSH public key to install on the HSM. + SshKey *string `locationName:"SshKey" type:"string" required:"true"` + + // The identifier of the subnet in your VPC in which to place the HSM. + SubnetId *string `locationName:"SubnetId" type:"string" required:"true"` + + // Specifies the type of subscription for the HSM. + // + // PRODUCTION - The HSM is being used in a production environment. TRIAL + // - The HSM is being used in a product trial. + SubscriptionType *string `locationName:"SubscriptionType" type:"string" required:"true" enum:"SubscriptionType"` + + // The IP address for the syslog monitoring server. The AWS CloudHSM service + // only supports one syslog monitoring server. + SyslogIp *string `locationName:"SyslogIp" type:"string"` +} + +// String returns the string representation +func (s CreateHsmInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHsmInput) GoString() string { + return s.String() +} + +// Contains the output of the CreateHsm operation. +type CreateHsmOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the HSM. + HsmArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateHsmOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHsmOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the CreateLunaClient action. +type CreateLunaClientInput struct { + _ struct{} `type:"structure"` + + // The contents of a Base64-Encoded X.509 v3 certificate to be installed on + // the HSMs used by this client. + Certificate *string `min:"600" type:"string" required:"true"` + + // The label for the client. + Label *string `type:"string"` +} + +// String returns the string representation +func (s CreateLunaClientInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLunaClientInput) GoString() string { + return s.String() +} + +// Contains the output of the CreateLunaClient action. +type CreateLunaClientOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the client. + ClientArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateLunaClientOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLunaClientOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the DeleteHapg action. +type DeleteHapgInput struct { + _ struct{} `type:"structure"` + + // The ARN of the high-availability partition group to delete. + HapgArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteHapgInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHapgInput) GoString() string { + return s.String() +} + +// Contains the output of the DeleteHapg action. +type DeleteHapgOutput struct { + _ struct{} `type:"structure"` + + // The status of the action. + Status *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteHapgOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHapgOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the DeleteHsm operation. +type DeleteHsmInput struct { + _ struct{} `locationName:"DeleteHsmRequest" type:"structure"` + + // The ARN of the HSM to delete. + HsmArn *string `locationName:"HsmArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteHsmInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHsmInput) GoString() string { + return s.String() +} + +// Contains the output of the DeleteHsm operation. +type DeleteHsmOutput struct { + _ struct{} `type:"structure"` + + // The status of the operation. + Status *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteHsmOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHsmOutput) GoString() string { + return s.String() +} + +type DeleteLunaClientInput struct { + _ struct{} `type:"structure"` + + // The ARN of the client to delete. + ClientArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLunaClientInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLunaClientInput) GoString() string { + return s.String() +} + +type DeleteLunaClientOutput struct { + _ struct{} `type:"structure"` + + // The status of the action. + Status *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLunaClientOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLunaClientOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the DescribeHapg action. +type DescribeHapgInput struct { + _ struct{} `type:"structure"` + + // The ARN of the high-availability partition group to describe. + HapgArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeHapgInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHapgInput) GoString() string { + return s.String() +} + +// Contains the output of the DescribeHapg action. +type DescribeHapgOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the high-availability partition group. + HapgArn *string `type:"string"` + + // The serial number of the high-availability partition group. + HapgSerial *string `type:"string"` + + // Contains a list of ARNs that identify the HSMs. + HsmsLastActionFailed []*string `type:"list"` + + // Contains a list of ARNs that identify the HSMs. + HsmsPendingDeletion []*string `type:"list"` + + // Contains a list of ARNs that identify the HSMs. + HsmsPendingRegistration []*string `type:"list"` + + // The label for the high-availability partition group. + Label *string `type:"string"` + + // The date and time the high-availability partition group was last modified. + LastModifiedTimestamp *string `type:"string"` + + // The list of partition serial numbers that belong to the high-availability + // partition group. + PartitionSerialList []*string `type:"list"` + + // The state of the high-availability partition group. + State *string `type:"string" enum:"CloudHsmObjectState"` +} + +// String returns the string representation +func (s DescribeHapgOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHapgOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the DescribeHsm operation. +type DescribeHsmInput struct { + _ struct{} `type:"structure"` + + // The ARN of the HSM. Either the HsmArn or the SerialNumber parameter must + // be specified. + HsmArn *string `type:"string"` + + // The serial number of the HSM. Either the HsmArn or the HsmSerialNumber parameter + // must be specified. + HsmSerialNumber *string `type:"string"` +} + +// String returns the string representation +func (s DescribeHsmInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHsmInput) GoString() string { + return s.String() +} + +// Contains the output of the DescribeHsm operation. +type DescribeHsmOutput struct { + _ struct{} `type:"structure"` + + // The Availability Zone that the HSM is in. + AvailabilityZone *string `type:"string"` + + // The identifier of the elastic network interface (ENI) attached to the HSM. + EniId *string `type:"string"` + + // The IP address assigned to the HSM's ENI. + EniIp *string `type:"string"` + + // The ARN of the HSM. + HsmArn *string `type:"string"` + + // The HSM model type. + HsmType *string `type:"string"` + + // The ARN of the IAM role assigned to the HSM. + IamRoleArn *string `type:"string"` + + // The list of partitions on the HSM. + Partitions []*string `type:"list"` + + // The serial number of the HSM. + SerialNumber *string `type:"string"` + + // The date and time that the server certificate was last updated. + ServerCertLastUpdated *string `type:"string"` + + // The URI of the certificate server. + ServerCertUri *string `type:"string"` + + // The HSM software version. + SoftwareVersion *string `type:"string"` + + // The date and time that the SSH key was last updated. + SshKeyLastUpdated *string `type:"string"` + + // The public SSH key. + SshPublicKey *string `type:"string"` + + // The status of the HSM. + Status *string `type:"string" enum:"HsmStatus"` + + // Contains additional information about the status of the HSM. + StatusDetails *string `type:"string"` + + // The identifier of the subnet that the HSM is in. + SubnetId *string `type:"string"` + + // The subscription end date. + SubscriptionEndDate *string `type:"string"` + + // The subscription start date. + SubscriptionStartDate *string `type:"string"` + + // Specifies the type of subscription for the HSM. + // + // PRODUCTION - The HSM is being used in a production environment. TRIAL + // - The HSM is being used in a product trial. + SubscriptionType *string `type:"string" enum:"SubscriptionType"` + + // The name of the HSM vendor. + VendorName *string `type:"string"` + + // The identifier of the VPC that the HSM is in. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeHsmOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHsmOutput) GoString() string { + return s.String() +} + +type DescribeLunaClientInput struct { + _ struct{} `type:"structure"` + + // The certificate fingerprint. + CertificateFingerprint *string `type:"string"` + + // The ARN of the client. + ClientArn *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLunaClientInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLunaClientInput) GoString() string { + return s.String() +} + +type DescribeLunaClientOutput struct { + _ struct{} `type:"structure"` + + // The certificate installed on the HSMs used by this client. + Certificate *string `min:"600" type:"string"` + + // The certificate fingerprint. + CertificateFingerprint *string `type:"string"` + + // The ARN of the client. + ClientArn *string `type:"string"` + + // The label of the client. + Label *string `type:"string"` + + // The date and time the client was last modified. + LastModifiedTimestamp *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLunaClientOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLunaClientOutput) GoString() string { + return s.String() +} + +type GetConfigInput struct { + _ struct{} `type:"structure"` + + // The ARN of the client. + ClientArn *string `type:"string" required:"true"` + + // The client version. + ClientVersion *string `type:"string" required:"true" enum:"ClientVersion"` + + // A list of ARNs that identify the high-availability partition groups that + // are associated with the client. + HapgList []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetConfigInput) GoString() string { + return s.String() +} + +type GetConfigOutput struct { + _ struct{} `type:"structure"` + + // The certificate file containing the server.pem files of the HSMs. + ConfigCred *string `type:"string"` + + // The chrystoki.conf configuration file. + ConfigFile *string `type:"string"` + + // The type of credentials. + ConfigType *string `type:"string"` +} + +// String returns the string representation +func (s GetConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetConfigOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the ListAvailableZones action. +type ListAvailableZonesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListAvailableZonesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAvailableZonesInput) GoString() string { + return s.String() +} + +type ListAvailableZonesOutput struct { + _ struct{} `type:"structure"` + + // The list of Availability Zones that have available AWS CloudHSM capacity. + AZList []*string `type:"list"` +} + +// String returns the string representation +func (s ListAvailableZonesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAvailableZonesOutput) GoString() string { + return s.String() +} + +type ListHapgsInput struct { + _ struct{} `type:"structure"` + + // The NextToken value from a previous call to ListHapgs. Pass null if this + // is the first call. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListHapgsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHapgsInput) GoString() string { + return s.String() +} + +type ListHapgsOutput struct { + _ struct{} `type:"structure"` + + // The list of high-availability partition groups. + HapgList []*string `type:"list" required:"true"` + + // If not null, more results are available. Pass this value to ListHapgs to + // retrieve the next set of items. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListHapgsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHapgsOutput) GoString() string { + return s.String() +} + +type ListHsmsInput struct { + _ struct{} `type:"structure"` + + // The NextToken value from a previous call to ListHsms. Pass null if this is + // the first call. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListHsmsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHsmsInput) GoString() string { + return s.String() +} + +// Contains the output of the ListHsms operation. +type ListHsmsOutput struct { + _ struct{} `type:"structure"` + + // The list of ARNs that identify the HSMs. + HsmList []*string `type:"list"` + + // If not null, more results are available. Pass this value to ListHsms to retrieve + // the next set of items. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListHsmsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHsmsOutput) GoString() string { + return s.String() +} + +type ListLunaClientsInput struct { + _ struct{} `type:"structure"` + + // The NextToken value from a previous call to ListLunaClients. Pass null if + // this is the first call. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListLunaClientsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListLunaClientsInput) GoString() string { + return s.String() +} + +type ListLunaClientsOutput struct { + _ struct{} `type:"structure"` + + // The list of clients. + ClientList []*string `type:"list" required:"true"` + + // If not null, more results are available. Pass this to ListLunaClients to + // retrieve the next set of items. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListLunaClientsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListLunaClientsOutput) GoString() string { + return s.String() +} + +type ModifyHapgInput struct { + _ struct{} `type:"structure"` + + // The ARN of the high-availability partition group to modify. + HapgArn *string `type:"string" required:"true"` + + // The new label for the high-availability partition group. + Label *string `type:"string"` + + // The list of partition serial numbers to make members of the high-availability + // partition group. + PartitionSerialList []*string `type:"list"` +} + +// String returns the string representation +func (s ModifyHapgInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyHapgInput) GoString() string { + return s.String() +} + +type ModifyHapgOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the high-availability partition group. + HapgArn *string `type:"string"` +} + +// String returns the string representation +func (s ModifyHapgOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyHapgOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the ModifyHsm operation. +type ModifyHsmInput struct { + _ struct{} `locationName:"ModifyHsmRequest" type:"structure"` + + // The new IP address for the elastic network interface (ENI) attached to the + // HSM. + // + // If the HSM is moved to a different subnet, and an IP address is not specified, + // an IP address will be randomly chosen from the CIDR range of the new subnet. + EniIp *string `locationName:"EniIp" type:"string"` + + // The new external ID. + ExternalId *string `locationName:"ExternalId" type:"string"` + + // The ARN of the HSM to modify. + HsmArn *string `locationName:"HsmArn" type:"string" required:"true"` + + // The new IAM role ARN. + IamRoleArn *string `locationName:"IamRoleArn" type:"string"` + + // The new identifier of the subnet that the HSM is in. The new subnet must + // be in the same Availability Zone as the current subnet. + SubnetId *string `locationName:"SubnetId" type:"string"` + + // The new IP address for the syslog monitoring server. The AWS CloudHSM service + // only supports one syslog monitoring server. + SyslogIp *string `locationName:"SyslogIp" type:"string"` +} + +// String returns the string representation +func (s ModifyHsmInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyHsmInput) GoString() string { + return s.String() +} + +// Contains the output of the ModifyHsm operation. +type ModifyHsmOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the HSM. + HsmArn *string `type:"string"` +} + +// String returns the string representation +func (s ModifyHsmOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyHsmOutput) GoString() string { + return s.String() +} + +type ModifyLunaClientInput struct { + _ struct{} `type:"structure"` + + // The new certificate for the client. + Certificate *string `min:"600" type:"string" required:"true"` + + // The ARN of the client. + ClientArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyLunaClientInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyLunaClientInput) GoString() string { + return s.String() +} + +type ModifyLunaClientOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the client. + ClientArn *string `type:"string"` +} + +// String returns the string representation +func (s ModifyLunaClientOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyLunaClientOutput) GoString() string { + return s.String() +} + +const ( + // @enum ClientVersion + ClientVersion51 = "5.1" + // @enum ClientVersion + ClientVersion53 = "5.3" +) + +const ( + // @enum CloudHsmObjectState + CloudHsmObjectStateReady = "READY" + // @enum CloudHsmObjectState + CloudHsmObjectStateUpdating = "UPDATING" + // @enum CloudHsmObjectState + CloudHsmObjectStateDegraded = "DEGRADED" +) + +const ( + // @enum HsmStatus + HsmStatusPending = "PENDING" + // @enum HsmStatus + HsmStatusRunning = "RUNNING" + // @enum HsmStatus + HsmStatusUpdating = "UPDATING" + // @enum HsmStatus + HsmStatusSuspended = "SUSPENDED" + // @enum HsmStatus + HsmStatusTerminating = "TERMINATING" + // @enum HsmStatus + HsmStatusTerminated = "TERMINATED" + // @enum HsmStatus + HsmStatusDegraded = "DEGRADED" +) + +// Specifies the type of subscription for the HSM. +// +// PRODUCTION - The HSM is being used in a production environment. TRIAL +// - The HSM is being used in a product trial. +const ( + // @enum SubscriptionType + SubscriptionTypeProduction = "PRODUCTION" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudhsm/cloudhsmiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudhsm/cloudhsmiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudhsm/cloudhsmiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudhsm/cloudhsmiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,82 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudhsmiface provides an interface for the Amazon CloudHSM. +package cloudhsmiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudhsm" +) + +// CloudHSMAPI is the interface type for cloudhsm.CloudHSM. +type CloudHSMAPI interface { + CreateHapgRequest(*cloudhsm.CreateHapgInput) (*request.Request, *cloudhsm.CreateHapgOutput) + + CreateHapg(*cloudhsm.CreateHapgInput) (*cloudhsm.CreateHapgOutput, error) + + CreateHsmRequest(*cloudhsm.CreateHsmInput) (*request.Request, *cloudhsm.CreateHsmOutput) + + CreateHsm(*cloudhsm.CreateHsmInput) (*cloudhsm.CreateHsmOutput, error) + + CreateLunaClientRequest(*cloudhsm.CreateLunaClientInput) (*request.Request, *cloudhsm.CreateLunaClientOutput) + + CreateLunaClient(*cloudhsm.CreateLunaClientInput) (*cloudhsm.CreateLunaClientOutput, error) + + DeleteHapgRequest(*cloudhsm.DeleteHapgInput) (*request.Request, *cloudhsm.DeleteHapgOutput) + + DeleteHapg(*cloudhsm.DeleteHapgInput) (*cloudhsm.DeleteHapgOutput, error) + + DeleteHsmRequest(*cloudhsm.DeleteHsmInput) (*request.Request, *cloudhsm.DeleteHsmOutput) + + DeleteHsm(*cloudhsm.DeleteHsmInput) (*cloudhsm.DeleteHsmOutput, error) + + DeleteLunaClientRequest(*cloudhsm.DeleteLunaClientInput) (*request.Request, *cloudhsm.DeleteLunaClientOutput) + + DeleteLunaClient(*cloudhsm.DeleteLunaClientInput) (*cloudhsm.DeleteLunaClientOutput, error) + + DescribeHapgRequest(*cloudhsm.DescribeHapgInput) (*request.Request, *cloudhsm.DescribeHapgOutput) + + DescribeHapg(*cloudhsm.DescribeHapgInput) (*cloudhsm.DescribeHapgOutput, error) + + DescribeHsmRequest(*cloudhsm.DescribeHsmInput) (*request.Request, *cloudhsm.DescribeHsmOutput) + + DescribeHsm(*cloudhsm.DescribeHsmInput) (*cloudhsm.DescribeHsmOutput, error) + + DescribeLunaClientRequest(*cloudhsm.DescribeLunaClientInput) (*request.Request, *cloudhsm.DescribeLunaClientOutput) + + DescribeLunaClient(*cloudhsm.DescribeLunaClientInput) (*cloudhsm.DescribeLunaClientOutput, error) + + GetConfigRequest(*cloudhsm.GetConfigInput) (*request.Request, *cloudhsm.GetConfigOutput) + + GetConfig(*cloudhsm.GetConfigInput) (*cloudhsm.GetConfigOutput, error) + + ListAvailableZonesRequest(*cloudhsm.ListAvailableZonesInput) (*request.Request, *cloudhsm.ListAvailableZonesOutput) + + ListAvailableZones(*cloudhsm.ListAvailableZonesInput) (*cloudhsm.ListAvailableZonesOutput, error) + + ListHapgsRequest(*cloudhsm.ListHapgsInput) (*request.Request, *cloudhsm.ListHapgsOutput) + + ListHapgs(*cloudhsm.ListHapgsInput) (*cloudhsm.ListHapgsOutput, error) + + ListHsmsRequest(*cloudhsm.ListHsmsInput) (*request.Request, *cloudhsm.ListHsmsOutput) + + ListHsms(*cloudhsm.ListHsmsInput) (*cloudhsm.ListHsmsOutput, error) + + ListLunaClientsRequest(*cloudhsm.ListLunaClientsInput) (*request.Request, *cloudhsm.ListLunaClientsOutput) + + ListLunaClients(*cloudhsm.ListLunaClientsInput) (*cloudhsm.ListLunaClientsOutput, error) + + ModifyHapgRequest(*cloudhsm.ModifyHapgInput) (*request.Request, *cloudhsm.ModifyHapgOutput) + + ModifyHapg(*cloudhsm.ModifyHapgInput) (*cloudhsm.ModifyHapgOutput, error) + + ModifyHsmRequest(*cloudhsm.ModifyHsmInput) (*request.Request, *cloudhsm.ModifyHsmOutput) + + ModifyHsm(*cloudhsm.ModifyHsmInput) (*cloudhsm.ModifyHsmOutput, error) + + ModifyLunaClientRequest(*cloudhsm.ModifyLunaClientInput) (*request.Request, *cloudhsm.ModifyLunaClientOutput) + + ModifyLunaClient(*cloudhsm.ModifyLunaClientInput) (*cloudhsm.ModifyLunaClientOutput, error) +} + +var _ CloudHSMAPI = (*cloudhsm.CloudHSM)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudhsm/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudhsm/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudhsm/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudhsm/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,363 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudhsm_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudhsm" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCloudHSM_CreateHapg() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.CreateHapgInput{ + Label: aws.String("Label"), // Required + } + resp, err := svc.CreateHapg(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_CreateHsm() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.CreateHsmInput{ + IamRoleArn: aws.String("IamRoleArn"), // Required + SshKey: aws.String("SshKey"), // Required + SubnetId: aws.String("SubnetId"), // Required + SubscriptionType: aws.String("SubscriptionType"), // Required + ClientToken: aws.String("ClientToken"), + EniIp: aws.String("IpAddress"), + ExternalId: aws.String("ExternalId"), + SyslogIp: aws.String("IpAddress"), + } + resp, err := svc.CreateHsm(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_CreateLunaClient() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.CreateLunaClientInput{ + Certificate: aws.String("Certificate"), // Required + Label: aws.String("ClientLabel"), + } + resp, err := svc.CreateLunaClient(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_DeleteHapg() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.DeleteHapgInput{ + HapgArn: aws.String("HapgArn"), // Required + } + resp, err := svc.DeleteHapg(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_DeleteHsm() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.DeleteHsmInput{ + HsmArn: aws.String("HsmArn"), // Required + } + resp, err := svc.DeleteHsm(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_DeleteLunaClient() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.DeleteLunaClientInput{ + ClientArn: aws.String("ClientArn"), // Required + } + resp, err := svc.DeleteLunaClient(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_DescribeHapg() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.DescribeHapgInput{ + HapgArn: aws.String("HapgArn"), // Required + } + resp, err := svc.DescribeHapg(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_DescribeHsm() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.DescribeHsmInput{ + HsmArn: aws.String("HsmArn"), + HsmSerialNumber: aws.String("HsmSerialNumber"), + } + resp, err := svc.DescribeHsm(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_DescribeLunaClient() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.DescribeLunaClientInput{ + CertificateFingerprint: aws.String("CertificateFingerprint"), + ClientArn: aws.String("ClientArn"), + } + resp, err := svc.DescribeLunaClient(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_GetConfig() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.GetConfigInput{ + ClientArn: aws.String("ClientArn"), // Required + ClientVersion: aws.String("ClientVersion"), // Required + HapgList: []*string{ // Required + aws.String("HapgArn"), // Required + // More values... + }, + } + resp, err := svc.GetConfig(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_ListAvailableZones() { + svc := cloudhsm.New(session.New()) + + var params *cloudhsm.ListAvailableZonesInput + resp, err := svc.ListAvailableZones(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_ListHapgs() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.ListHapgsInput{ + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListHapgs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_ListHsms() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.ListHsmsInput{ + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListHsms(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_ListLunaClients() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.ListLunaClientsInput{ + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListLunaClients(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_ModifyHapg() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.ModifyHapgInput{ + HapgArn: aws.String("HapgArn"), // Required + Label: aws.String("Label"), + PartitionSerialList: []*string{ + aws.String("PartitionSerial"), // Required + // More values... + }, + } + resp, err := svc.ModifyHapg(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_ModifyHsm() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.ModifyHsmInput{ + HsmArn: aws.String("HsmArn"), // Required + EniIp: aws.String("IpAddress"), + ExternalId: aws.String("ExternalId"), + IamRoleArn: aws.String("IamRoleArn"), + SubnetId: aws.String("SubnetId"), + SyslogIp: aws.String("IpAddress"), + } + resp, err := svc.ModifyHsm(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudHSM_ModifyLunaClient() { + svc := cloudhsm.New(session.New()) + + params := &cloudhsm.ModifyLunaClientInput{ + Certificate: aws.String("Certificate"), // Required + ClientArn: aws.String("ClientArn"), // Required + } + resp, err := svc.ModifyLunaClient(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudhsm/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudhsm/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudhsm/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudhsm/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,87 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudhsm + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CloudHSM struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "cloudhsm" + +// New creates a new instance of the CloudHSM client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CloudHSM client from just a session. +// svc := cloudhsm.New(mySession) +// +// // Create a CloudHSM client with additional configuration +// svc := cloudhsm.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudHSM { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CloudHSM { + svc := &CloudHSM{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-05-30", + JSONVersion: "1.1", + TargetPrefix: "CloudHsmFrontendService", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CloudHSM operation and runs any +// custom request initialization. +func (c *CloudHSM) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearch/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearch/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearch/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearch/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2974 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudsearch provides a client for Amazon CloudSearch. +package cloudsearch + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opBuildSuggesters = "BuildSuggesters" + +// BuildSuggestersRequest generates a request for the BuildSuggesters operation. +func (c *CloudSearch) BuildSuggestersRequest(input *BuildSuggestersInput) (req *request.Request, output *BuildSuggestersOutput) { + op := &request.Operation{ + Name: opBuildSuggesters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BuildSuggestersInput{} + } + + req = c.newRequest(op, input, output) + output = &BuildSuggestersOutput{} + req.Data = output + return +} + +// Indexes the search suggestions. For more information, see Configuring Suggesters +// (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/getting-suggestions.html#configuring-suggesters) +// in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) BuildSuggesters(input *BuildSuggestersInput) (*BuildSuggestersOutput, error) { + req, out := c.BuildSuggestersRequest(input) + err := req.Send() + return out, err +} + +const opCreateDomain = "CreateDomain" + +// CreateDomainRequest generates a request for the CreateDomain operation. +func (c *CloudSearch) CreateDomainRequest(input *CreateDomainInput) (req *request.Request, output *CreateDomainOutput) { + op := &request.Operation{ + Name: opCreateDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDomainInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDomainOutput{} + req.Data = output + return +} + +// Creates a new search domain. For more information, see Creating a Search +// Domain (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/creating-domains.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) CreateDomain(input *CreateDomainInput) (*CreateDomainOutput, error) { + req, out := c.CreateDomainRequest(input) + err := req.Send() + return out, err +} + +const opDefineAnalysisScheme = "DefineAnalysisScheme" + +// DefineAnalysisSchemeRequest generates a request for the DefineAnalysisScheme operation. +func (c *CloudSearch) DefineAnalysisSchemeRequest(input *DefineAnalysisSchemeInput) (req *request.Request, output *DefineAnalysisSchemeOutput) { + op := &request.Operation{ + Name: opDefineAnalysisScheme, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DefineAnalysisSchemeInput{} + } + + req = c.newRequest(op, input, output) + output = &DefineAnalysisSchemeOutput{} + req.Data = output + return +} + +// Configures an analysis scheme that can be applied to a text or text-array +// field to define language-specific text processing options. For more information, +// see Configuring Analysis Schemes (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-analysis-schemes.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DefineAnalysisScheme(input *DefineAnalysisSchemeInput) (*DefineAnalysisSchemeOutput, error) { + req, out := c.DefineAnalysisSchemeRequest(input) + err := req.Send() + return out, err +} + +const opDefineExpression = "DefineExpression" + +// DefineExpressionRequest generates a request for the DefineExpression operation. +func (c *CloudSearch) DefineExpressionRequest(input *DefineExpressionInput) (req *request.Request, output *DefineExpressionOutput) { + op := &request.Operation{ + Name: opDefineExpression, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DefineExpressionInput{} + } + + req = c.newRequest(op, input, output) + output = &DefineExpressionOutput{} + req.Data = output + return +} + +// Configures an Expression for the search domain. Used to create new expressions +// and modify existing ones. If the expression exists, the new configuration +// replaces the old one. For more information, see Configuring Expressions (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-expressions.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DefineExpression(input *DefineExpressionInput) (*DefineExpressionOutput, error) { + req, out := c.DefineExpressionRequest(input) + err := req.Send() + return out, err +} + +const opDefineIndexField = "DefineIndexField" + +// DefineIndexFieldRequest generates a request for the DefineIndexField operation. +func (c *CloudSearch) DefineIndexFieldRequest(input *DefineIndexFieldInput) (req *request.Request, output *DefineIndexFieldOutput) { + op := &request.Operation{ + Name: opDefineIndexField, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DefineIndexFieldInput{} + } + + req = c.newRequest(op, input, output) + output = &DefineIndexFieldOutput{} + req.Data = output + return +} + +// Configures an IndexField for the search domain. Used to create new fields +// and modify existing ones. You must specify the name of the domain you are +// configuring and an index field configuration. The index field configuration +// specifies a unique name, the index field type, and the options you want to +// configure for the field. The options you can specify depend on the IndexFieldType. +// If the field exists, the new configuration replaces the old one. For more +// information, see Configuring Index Fields (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-index-fields.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DefineIndexField(input *DefineIndexFieldInput) (*DefineIndexFieldOutput, error) { + req, out := c.DefineIndexFieldRequest(input) + err := req.Send() + return out, err +} + +const opDefineSuggester = "DefineSuggester" + +// DefineSuggesterRequest generates a request for the DefineSuggester operation. +func (c *CloudSearch) DefineSuggesterRequest(input *DefineSuggesterInput) (req *request.Request, output *DefineSuggesterOutput) { + op := &request.Operation{ + Name: opDefineSuggester, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DefineSuggesterInput{} + } + + req = c.newRequest(op, input, output) + output = &DefineSuggesterOutput{} + req.Data = output + return +} + +// Configures a suggester for a domain. A suggester enables you to display possible +// matches before users finish typing their queries. When you configure a suggester, +// you must specify the name of the text field you want to search for possible +// matches and a unique name for the suggester. For more information, see Getting +// Search Suggestions (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/getting-suggestions.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DefineSuggester(input *DefineSuggesterInput) (*DefineSuggesterOutput, error) { + req, out := c.DefineSuggesterRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAnalysisScheme = "DeleteAnalysisScheme" + +// DeleteAnalysisSchemeRequest generates a request for the DeleteAnalysisScheme operation. +func (c *CloudSearch) DeleteAnalysisSchemeRequest(input *DeleteAnalysisSchemeInput) (req *request.Request, output *DeleteAnalysisSchemeOutput) { + op := &request.Operation{ + Name: opDeleteAnalysisScheme, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAnalysisSchemeInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteAnalysisSchemeOutput{} + req.Data = output + return +} + +// Deletes an analysis scheme. For more information, see Configuring Analysis +// Schemes (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-analysis-schemes.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DeleteAnalysisScheme(input *DeleteAnalysisSchemeInput) (*DeleteAnalysisSchemeOutput, error) { + req, out := c.DeleteAnalysisSchemeRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDomain = "DeleteDomain" + +// DeleteDomainRequest generates a request for the DeleteDomain operation. +func (c *CloudSearch) DeleteDomainRequest(input *DeleteDomainInput) (req *request.Request, output *DeleteDomainOutput) { + op := &request.Operation{ + Name: opDeleteDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDomainInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDomainOutput{} + req.Data = output + return +} + +// Permanently deletes a search domain and all of its data. Once a domain has +// been deleted, it cannot be recovered. For more information, see Deleting +// a Search Domain (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/deleting-domains.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DeleteDomain(input *DeleteDomainInput) (*DeleteDomainOutput, error) { + req, out := c.DeleteDomainRequest(input) + err := req.Send() + return out, err +} + +const opDeleteExpression = "DeleteExpression" + +// DeleteExpressionRequest generates a request for the DeleteExpression operation. +func (c *CloudSearch) DeleteExpressionRequest(input *DeleteExpressionInput) (req *request.Request, output *DeleteExpressionOutput) { + op := &request.Operation{ + Name: opDeleteExpression, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteExpressionInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteExpressionOutput{} + req.Data = output + return +} + +// Removes an Expression from the search domain. For more information, see Configuring +// Expressions (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-expressions.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DeleteExpression(input *DeleteExpressionInput) (*DeleteExpressionOutput, error) { + req, out := c.DeleteExpressionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteIndexField = "DeleteIndexField" + +// DeleteIndexFieldRequest generates a request for the DeleteIndexField operation. +func (c *CloudSearch) DeleteIndexFieldRequest(input *DeleteIndexFieldInput) (req *request.Request, output *DeleteIndexFieldOutput) { + op := &request.Operation{ + Name: opDeleteIndexField, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteIndexFieldInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteIndexFieldOutput{} + req.Data = output + return +} + +// Removes an IndexField from the search domain. For more information, see Configuring +// Index Fields (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-index-fields.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DeleteIndexField(input *DeleteIndexFieldInput) (*DeleteIndexFieldOutput, error) { + req, out := c.DeleteIndexFieldRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSuggester = "DeleteSuggester" + +// DeleteSuggesterRequest generates a request for the DeleteSuggester operation. +func (c *CloudSearch) DeleteSuggesterRequest(input *DeleteSuggesterInput) (req *request.Request, output *DeleteSuggesterOutput) { + op := &request.Operation{ + Name: opDeleteSuggester, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSuggesterInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteSuggesterOutput{} + req.Data = output + return +} + +// Deletes a suggester. For more information, see Getting Search Suggestions +// (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/getting-suggestions.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DeleteSuggester(input *DeleteSuggesterInput) (*DeleteSuggesterOutput, error) { + req, out := c.DeleteSuggesterRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAnalysisSchemes = "DescribeAnalysisSchemes" + +// DescribeAnalysisSchemesRequest generates a request for the DescribeAnalysisSchemes operation. +func (c *CloudSearch) DescribeAnalysisSchemesRequest(input *DescribeAnalysisSchemesInput) (req *request.Request, output *DescribeAnalysisSchemesOutput) { + op := &request.Operation{ + Name: opDescribeAnalysisSchemes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAnalysisSchemesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAnalysisSchemesOutput{} + req.Data = output + return +} + +// Gets the analysis schemes configured for a domain. An analysis scheme defines +// language-specific text processing options for a text field. Can be limited +// to specific analysis schemes by name. By default, shows all analysis schemes +// and includes any pending changes to the configuration. Set the Deployed option +// to true to show the active configuration and exclude pending changes. For +// more information, see Configuring Analysis Schemes (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-analysis-schemes.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DescribeAnalysisSchemes(input *DescribeAnalysisSchemesInput) (*DescribeAnalysisSchemesOutput, error) { + req, out := c.DescribeAnalysisSchemesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAvailabilityOptions = "DescribeAvailabilityOptions" + +// DescribeAvailabilityOptionsRequest generates a request for the DescribeAvailabilityOptions operation. +func (c *CloudSearch) DescribeAvailabilityOptionsRequest(input *DescribeAvailabilityOptionsInput) (req *request.Request, output *DescribeAvailabilityOptionsOutput) { + op := &request.Operation{ + Name: opDescribeAvailabilityOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAvailabilityOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAvailabilityOptionsOutput{} + req.Data = output + return +} + +// Gets the availability options configured for a domain. By default, shows +// the configuration with any pending changes. Set the Deployed option to true +// to show the active configuration and exclude pending changes. For more information, +// see Configuring Availability Options (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-availability-options.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DescribeAvailabilityOptions(input *DescribeAvailabilityOptionsInput) (*DescribeAvailabilityOptionsOutput, error) { + req, out := c.DescribeAvailabilityOptionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDomains = "DescribeDomains" + +// DescribeDomainsRequest generates a request for the DescribeDomains operation. +func (c *CloudSearch) DescribeDomainsRequest(input *DescribeDomainsInput) (req *request.Request, output *DescribeDomainsOutput) { + op := &request.Operation{ + Name: opDescribeDomains, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDomainsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDomainsOutput{} + req.Data = output + return +} + +// Gets information about the search domains owned by this account. Can be limited +// to specific domains. Shows all domains by default. To get the number of searchable +// documents in a domain, use the console or submit a matchall request to your +// domain's search endpoint: q=matchall&q.parser=structured&size=0. +// For more information, see Getting Information about a Search Domain (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/getting-domain-info.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DescribeDomains(input *DescribeDomainsInput) (*DescribeDomainsOutput, error) { + req, out := c.DescribeDomainsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeExpressions = "DescribeExpressions" + +// DescribeExpressionsRequest generates a request for the DescribeExpressions operation. +func (c *CloudSearch) DescribeExpressionsRequest(input *DescribeExpressionsInput) (req *request.Request, output *DescribeExpressionsOutput) { + op := &request.Operation{ + Name: opDescribeExpressions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeExpressionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeExpressionsOutput{} + req.Data = output + return +} + +// Gets the expressions configured for the search domain. Can be limited to +// specific expressions by name. By default, shows all expressions and includes +// any pending changes to the configuration. Set the Deployed option to true +// to show the active configuration and exclude pending changes. For more information, +// see Configuring Expressions (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-expressions.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DescribeExpressions(input *DescribeExpressionsInput) (*DescribeExpressionsOutput, error) { + req, out := c.DescribeExpressionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeIndexFields = "DescribeIndexFields" + +// DescribeIndexFieldsRequest generates a request for the DescribeIndexFields operation. +func (c *CloudSearch) DescribeIndexFieldsRequest(input *DescribeIndexFieldsInput) (req *request.Request, output *DescribeIndexFieldsOutput) { + op := &request.Operation{ + Name: opDescribeIndexFields, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeIndexFieldsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeIndexFieldsOutput{} + req.Data = output + return +} + +// Gets information about the index fields configured for the search domain. +// Can be limited to specific fields by name. By default, shows all fields and +// includes any pending changes to the configuration. Set the Deployed option +// to true to show the active configuration and exclude pending changes. For +// more information, see Getting Domain Information (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/getting-domain-info.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DescribeIndexFields(input *DescribeIndexFieldsInput) (*DescribeIndexFieldsOutput, error) { + req, out := c.DescribeIndexFieldsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeScalingParameters = "DescribeScalingParameters" + +// DescribeScalingParametersRequest generates a request for the DescribeScalingParameters operation. +func (c *CloudSearch) DescribeScalingParametersRequest(input *DescribeScalingParametersInput) (req *request.Request, output *DescribeScalingParametersOutput) { + op := &request.Operation{ + Name: opDescribeScalingParameters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeScalingParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeScalingParametersOutput{} + req.Data = output + return +} + +// Gets the scaling parameters configured for a domain. A domain's scaling parameters +// specify the desired search instance type and replication count. For more +// information, see Configuring Scaling Options (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-scaling-options.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DescribeScalingParameters(input *DescribeScalingParametersInput) (*DescribeScalingParametersOutput, error) { + req, out := c.DescribeScalingParametersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeServiceAccessPolicies = "DescribeServiceAccessPolicies" + +// DescribeServiceAccessPoliciesRequest generates a request for the DescribeServiceAccessPolicies operation. +func (c *CloudSearch) DescribeServiceAccessPoliciesRequest(input *DescribeServiceAccessPoliciesInput) (req *request.Request, output *DescribeServiceAccessPoliciesOutput) { + op := &request.Operation{ + Name: opDescribeServiceAccessPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeServiceAccessPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeServiceAccessPoliciesOutput{} + req.Data = output + return +} + +// Gets information about the access policies that control access to the domain's +// document and search endpoints. By default, shows the configuration with any +// pending changes. Set the Deployed option to true to show the active configuration +// and exclude pending changes. For more information, see Configuring Access +// for a Search Domain (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-access.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DescribeServiceAccessPolicies(input *DescribeServiceAccessPoliciesInput) (*DescribeServiceAccessPoliciesOutput, error) { + req, out := c.DescribeServiceAccessPoliciesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSuggesters = "DescribeSuggesters" + +// DescribeSuggestersRequest generates a request for the DescribeSuggesters operation. +func (c *CloudSearch) DescribeSuggestersRequest(input *DescribeSuggestersInput) (req *request.Request, output *DescribeSuggestersOutput) { + op := &request.Operation{ + Name: opDescribeSuggesters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSuggestersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSuggestersOutput{} + req.Data = output + return +} + +// Gets the suggesters configured for a domain. A suggester enables you to display +// possible matches before users finish typing their queries. Can be limited +// to specific suggesters by name. By default, shows all suggesters and includes +// any pending changes to the configuration. Set the Deployed option to true +// to show the active configuration and exclude pending changes. For more information, +// see Getting Search Suggestions (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/getting-suggestions.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) DescribeSuggesters(input *DescribeSuggestersInput) (*DescribeSuggestersOutput, error) { + req, out := c.DescribeSuggestersRequest(input) + err := req.Send() + return out, err +} + +const opIndexDocuments = "IndexDocuments" + +// IndexDocumentsRequest generates a request for the IndexDocuments operation. +func (c *CloudSearch) IndexDocumentsRequest(input *IndexDocumentsInput) (req *request.Request, output *IndexDocumentsOutput) { + op := &request.Operation{ + Name: opIndexDocuments, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &IndexDocumentsInput{} + } + + req = c.newRequest(op, input, output) + output = &IndexDocumentsOutput{} + req.Data = output + return +} + +// Tells the search domain to start indexing its documents using the latest +// indexing options. This operation must be invoked to activate options whose +// OptionStatus is RequiresIndexDocuments. +func (c *CloudSearch) IndexDocuments(input *IndexDocumentsInput) (*IndexDocumentsOutput, error) { + req, out := c.IndexDocumentsRequest(input) + err := req.Send() + return out, err +} + +const opListDomainNames = "ListDomainNames" + +// ListDomainNamesRequest generates a request for the ListDomainNames operation. +func (c *CloudSearch) ListDomainNamesRequest(input *ListDomainNamesInput) (req *request.Request, output *ListDomainNamesOutput) { + op := &request.Operation{ + Name: opListDomainNames, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListDomainNamesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDomainNamesOutput{} + req.Data = output + return +} + +// Lists all search domains owned by an account. +func (c *CloudSearch) ListDomainNames(input *ListDomainNamesInput) (*ListDomainNamesOutput, error) { + req, out := c.ListDomainNamesRequest(input) + err := req.Send() + return out, err +} + +const opUpdateAvailabilityOptions = "UpdateAvailabilityOptions" + +// UpdateAvailabilityOptionsRequest generates a request for the UpdateAvailabilityOptions operation. +func (c *CloudSearch) UpdateAvailabilityOptionsRequest(input *UpdateAvailabilityOptionsInput) (req *request.Request, output *UpdateAvailabilityOptionsOutput) { + op := &request.Operation{ + Name: opUpdateAvailabilityOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAvailabilityOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateAvailabilityOptionsOutput{} + req.Data = output + return +} + +// Configures the availability options for a domain. Enabling the Multi-AZ option +// expands an Amazon CloudSearch domain to an additional Availability Zone in +// the same Region to increase fault tolerance in the event of a service disruption. +// Changes to the Multi-AZ option can take about half an hour to become active. +// For more information, see Configuring Availability Options (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-availability-options.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) UpdateAvailabilityOptions(input *UpdateAvailabilityOptionsInput) (*UpdateAvailabilityOptionsOutput, error) { + req, out := c.UpdateAvailabilityOptionsRequest(input) + err := req.Send() + return out, err +} + +const opUpdateScalingParameters = "UpdateScalingParameters" + +// UpdateScalingParametersRequest generates a request for the UpdateScalingParameters operation. +func (c *CloudSearch) UpdateScalingParametersRequest(input *UpdateScalingParametersInput) (req *request.Request, output *UpdateScalingParametersOutput) { + op := &request.Operation{ + Name: opUpdateScalingParameters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateScalingParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateScalingParametersOutput{} + req.Data = output + return +} + +// Configures scaling parameters for a domain. A domain's scaling parameters +// specify the desired search instance type and replication count. Amazon CloudSearch +// will still automatically scale your domain based on the volume of data and +// traffic, but not below the desired instance type and replication count. If +// the Multi-AZ option is enabled, these values control the resources used per +// Availability Zone. For more information, see Configuring Scaling Options +// (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-scaling-options.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +func (c *CloudSearch) UpdateScalingParameters(input *UpdateScalingParametersInput) (*UpdateScalingParametersOutput, error) { + req, out := c.UpdateScalingParametersRequest(input) + err := req.Send() + return out, err +} + +const opUpdateServiceAccessPolicies = "UpdateServiceAccessPolicies" + +// UpdateServiceAccessPoliciesRequest generates a request for the UpdateServiceAccessPolicies operation. +func (c *CloudSearch) UpdateServiceAccessPoliciesRequest(input *UpdateServiceAccessPoliciesInput) (req *request.Request, output *UpdateServiceAccessPoliciesOutput) { + op := &request.Operation{ + Name: opUpdateServiceAccessPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateServiceAccessPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateServiceAccessPoliciesOutput{} + req.Data = output + return +} + +// Configures the access rules that control access to the domain's document +// and search endpoints. For more information, see Configuring Access for an +// Amazon CloudSearch Domain (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-access.html" +// target="_blank). +func (c *CloudSearch) UpdateServiceAccessPolicies(input *UpdateServiceAccessPoliciesInput) (*UpdateServiceAccessPoliciesOutput, error) { + req, out := c.UpdateServiceAccessPoliciesRequest(input) + err := req.Send() + return out, err +} + +// The configured access rules for the domain's document and search endpoints, +// and the current status of those rules. +type AccessPoliciesStatus struct { + _ struct{} `type:"structure"` + + // Access rules for a domain's document or search service endpoints. For more + // information, see Configuring Access for a Search Domain (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-access.html" + // target="_blank) in the Amazon CloudSearch Developer Guide. The maximum size + // of a policy document is 100 KB. + Options *string `type:"string" required:"true"` + + // The status of domain configuration option. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AccessPoliciesStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessPoliciesStatus) GoString() string { + return s.String() +} + +// Synonyms, stopwords, and stemming options for an analysis scheme. Includes +// tokenization dictionary for Japanese. +type AnalysisOptions struct { + _ struct{} `type:"structure"` + + // The level of algorithmic stemming to perform: none, minimal, light, or full. + // The available levels vary depending on the language. For more information, + // see Language Specific Text Processing Settings (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/text-processing.html#text-processing-settings" + // target="_blank) in the Amazon CloudSearch Developer Guide + AlgorithmicStemming *string `type:"string" enum:"AlgorithmicStemming"` + + // A JSON array that contains a collection of terms, tokens, readings and part + // of speech for Japanese Tokenizaiton. The Japanese tokenization dictionary + // enables you to override the default tokenization for selected terms. This + // is only valid for Japanese language fields. + JapaneseTokenizationDictionary *string `type:"string"` + + // A JSON object that contains a collection of string:value pairs that each + // map a term to its stem. For example, {"term1": "stem1", "term2": "stem2", + // "term3": "stem3"}. The stemming dictionary is applied in addition to any + // algorithmic stemming. This enables you to override the results of the algorithmic + // stemming to correct specific cases of overstemming or understemming. The + // maximum size of a stemming dictionary is 500 KB. + StemmingDictionary *string `type:"string"` + + // A JSON array of terms to ignore during indexing and searching. For example, + // ["a", "an", "the", "of"]. The stopwords dictionary must explicitly list each + // word you want to ignore. Wildcards and regular expressions are not supported. + Stopwords *string `type:"string"` + + // A JSON object that defines synonym groups and aliases. A synonym group is + // an array of arrays, where each sub-array is a group of terms where each term + // in the group is considered a synonym of every other term in the group. The + // aliases value is an object that contains a collection of string:value pairs + // where the string specifies a term and the array of values specifies each + // of the aliases for that term. An alias is considered a synonym of the specified + // term, but the term is not considered a synonym of the alias. For more information + // about specifying synonyms, see Synonyms (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-analysis-schemes.html#synonyms) + // in the Amazon CloudSearch Developer Guide. + Synonyms *string `type:"string"` +} + +// String returns the string representation +func (s AnalysisOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalysisOptions) GoString() string { + return s.String() +} + +// Configuration information for an analysis scheme. Each analysis scheme has +// a unique name and specifies the language of the text to be processed. The +// following options can be configured for an analysis scheme: Synonyms, Stopwords, +// StemmingDictionary, JapaneseTokenizationDictionary and AlgorithmicStemming. +type AnalysisScheme struct { + _ struct{} `type:"structure"` + + // Synonyms, stopwords, and stemming options for an analysis scheme. Includes + // tokenization dictionary for Japanese. + AnalysisOptions *AnalysisOptions `type:"structure"` + + // An IETF RFC 4646 (http://tools.ietf.org/html/rfc4646" target="_blank) language + // code or mul for multiple languages. + AnalysisSchemeLanguage *string `type:"string" required:"true" enum:"AnalysisSchemeLanguage"` + + // Names must begin with a letter and can contain the following characters: + // a-z (lowercase), 0-9, and _ (underscore). + AnalysisSchemeName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AnalysisScheme) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalysisScheme) GoString() string { + return s.String() +} + +// The status and configuration of an AnalysisScheme. +type AnalysisSchemeStatus struct { + _ struct{} `type:"structure"` + + // Configuration information for an analysis scheme. Each analysis scheme has + // a unique name and specifies the language of the text to be processed. The + // following options can be configured for an analysis scheme: Synonyms, Stopwords, + // StemmingDictionary, JapaneseTokenizationDictionary and AlgorithmicStemming. + Options *AnalysisScheme `type:"structure" required:"true"` + + // The status of domain configuration option. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AnalysisSchemeStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalysisSchemeStatus) GoString() string { + return s.String() +} + +// The status and configuration of the domain's availability options. +type AvailabilityOptionsStatus struct { + _ struct{} `type:"structure"` + + // The availability options configured for the domain. + Options *bool `type:"boolean" required:"true"` + + // The status of domain configuration option. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AvailabilityOptionsStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailabilityOptionsStatus) GoString() string { + return s.String() +} + +// Container for the parameters to the BuildSuggester operation. Specifies the +// name of the domain you want to update. +type BuildSuggestersInput struct { + _ struct{} `type:"structure"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s BuildSuggestersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BuildSuggestersInput) GoString() string { + return s.String() +} + +// The result of a BuildSuggester request. Contains a list of the fields used +// for suggestions. +type BuildSuggestersOutput struct { + _ struct{} `type:"structure"` + + // A list of field names. + FieldNames []*string `type:"list"` +} + +// String returns the string representation +func (s BuildSuggestersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BuildSuggestersOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the CreateDomain operation. Specifies a name +// for the new search domain. +type CreateDomainInput struct { + _ struct{} `type:"structure"` + + // A name for the domain you are creating. Allowed characters are a-z (lower-case + // letters), 0-9, and hyphen (-). Domain names must start with a letter or number + // and be at least 3 and no more than 28 characters long. + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDomainInput) GoString() string { + return s.String() +} + +// The result of a CreateDomainRequest. Contains the status of a newly created +// domain. +type CreateDomainOutput struct { + _ struct{} `type:"structure"` + + // The current status of the search domain. + DomainStatus *DomainStatus `type:"structure"` +} + +// String returns the string representation +func (s CreateDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDomainOutput) GoString() string { + return s.String() +} + +// Options for a field that contains an array of dates. Present if IndexFieldType +// specifies the field is of type date-array. All options are enabled by default. +type DateArrayOptions struct { + _ struct{} `type:"structure"` + + // A value to use for the field if the field isn't specified for a document. + DefaultValue *string `type:"string"` + + // Whether facet information can be returned for the field. + FacetEnabled *bool `type:"boolean"` + + // Whether the contents of the field can be returned in the search results. + ReturnEnabled *bool `type:"boolean"` + + // Whether the contents of the field are searchable. + SearchEnabled *bool `type:"boolean"` + + // A list of source fields to map to the field. + SourceFields *string `type:"string"` +} + +// String returns the string representation +func (s DateArrayOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DateArrayOptions) GoString() string { + return s.String() +} + +// Options for a date field. Dates and times are specified in UTC (Coordinated +// Universal Time) according to IETF RFC3339: yyyy-mm-ddT00:00:00Z. Present +// if IndexFieldType specifies the field is of type date. All options are enabled +// by default. +type DateOptions struct { + _ struct{} `type:"structure"` + + // A value to use for the field if the field isn't specified for a document. + DefaultValue *string `type:"string"` + + // Whether facet information can be returned for the field. + FacetEnabled *bool `type:"boolean"` + + // Whether the contents of the field can be returned in the search results. + ReturnEnabled *bool `type:"boolean"` + + // Whether the contents of the field are searchable. + SearchEnabled *bool `type:"boolean"` + + // Whether the field can be used to sort the search results. + SortEnabled *bool `type:"boolean"` + + // A string that represents the name of an index field. CloudSearch supports + // regular index fields as well as dynamic fields. A dynamic field's name defines + // a pattern that begins or ends with a wildcard. Any document fields that don't + // map to a regular index field but do match a dynamic field's pattern are configured + // with the dynamic field's indexing options. + // + // Regular field names begin with a letter and can contain the following characters: + // a-z (lowercase), 0-9, and _ (underscore). Dynamic field names must begin + // or end with a wildcard (*). The wildcard can also be the only character in + // a dynamic field name. Multiple wildcards, and wildcards embedded within a + // string are not supported. + // + // The name score is reserved and cannot be used as a field name. To reference + // a document's ID, you can use the name _id. + SourceField *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DateOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DateOptions) GoString() string { + return s.String() +} + +// Container for the parameters to the DefineAnalysisScheme operation. Specifies +// the name of the domain you want to update and the analysis scheme configuration. +type DefineAnalysisSchemeInput struct { + _ struct{} `type:"structure"` + + // Configuration information for an analysis scheme. Each analysis scheme has + // a unique name and specifies the language of the text to be processed. The + // following options can be configured for an analysis scheme: Synonyms, Stopwords, + // StemmingDictionary, JapaneseTokenizationDictionary and AlgorithmicStemming. + AnalysisScheme *AnalysisScheme `type:"structure" required:"true"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DefineAnalysisSchemeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefineAnalysisSchemeInput) GoString() string { + return s.String() +} + +// The result of a DefineAnalysisScheme request. Contains the status of the +// newly-configured analysis scheme. +type DefineAnalysisSchemeOutput struct { + _ struct{} `type:"structure"` + + // The status and configuration of an AnalysisScheme. + AnalysisScheme *AnalysisSchemeStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DefineAnalysisSchemeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefineAnalysisSchemeOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DefineExpression operation. Specifies +// the name of the domain you want to update and the expression you want to +// configure. +type DefineExpressionInput struct { + _ struct{} `type:"structure"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` + + // A named expression that can be evaluated at search time. Can be used to sort + // the search results, define other expressions, or return computed information + // in the search results. + Expression *Expression `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DefineExpressionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefineExpressionInput) GoString() string { + return s.String() +} + +// The result of a DefineExpression request. Contains the status of the newly-configured +// expression. +type DefineExpressionOutput struct { + _ struct{} `type:"structure"` + + // The value of an Expression and its current status. + Expression *ExpressionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DefineExpressionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefineExpressionOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DefineIndexField operation. Specifies +// the name of the domain you want to update and the index field configuration. +type DefineIndexFieldInput struct { + _ struct{} `type:"structure"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` + + // The index field and field options you want to configure. + IndexField *IndexField `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DefineIndexFieldInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefineIndexFieldInput) GoString() string { + return s.String() +} + +// The result of a DefineIndexField request. Contains the status of the newly-configured +// index field. +type DefineIndexFieldOutput struct { + _ struct{} `type:"structure"` + + // The value of an IndexField and its current status. + IndexField *IndexFieldStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DefineIndexFieldOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefineIndexFieldOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DefineSuggester operation. Specifies +// the name of the domain you want to update and the suggester configuration. +type DefineSuggesterInput struct { + _ struct{} `type:"structure"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` + + // Configuration information for a search suggester. Each suggester has a unique + // name and specifies the text field you want to use for suggestions. The following + // options can be configured for a suggester: FuzzyMatching, SortExpression. + Suggester *Suggester `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DefineSuggesterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefineSuggesterInput) GoString() string { + return s.String() +} + +// The result of a DefineSuggester request. Contains the status of the newly-configured +// suggester. +type DefineSuggesterOutput struct { + _ struct{} `type:"structure"` + + // The value of a Suggester and its current status. + Suggester *SuggesterStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DefineSuggesterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefineSuggesterOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DeleteAnalysisScheme operation. Specifies +// the name of the domain you want to update and the analysis scheme you want +// to delete. +type DeleteAnalysisSchemeInput struct { + _ struct{} `type:"structure"` + + // The name of the analysis scheme you want to delete. + AnalysisSchemeName *string `min:"1" type:"string" required:"true"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAnalysisSchemeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAnalysisSchemeInput) GoString() string { + return s.String() +} + +// The result of a DeleteAnalysisScheme request. Contains the status of the +// deleted analysis scheme. +type DeleteAnalysisSchemeOutput struct { + _ struct{} `type:"structure"` + + // The status of the analysis scheme being deleted. + AnalysisScheme *AnalysisSchemeStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DeleteAnalysisSchemeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAnalysisSchemeOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DeleteDomain operation. Specifies the +// name of the domain you want to delete. +type DeleteDomainInput struct { + _ struct{} `type:"structure"` + + // The name of the domain you want to permanently delete. + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDomainInput) GoString() string { + return s.String() +} + +// The result of a DeleteDomain request. Contains the status of a newly deleted +// domain, or no status if the domain has already been completely deleted. +type DeleteDomainOutput struct { + _ struct{} `type:"structure"` + + // The current status of the search domain. + DomainStatus *DomainStatus `type:"structure"` +} + +// String returns the string representation +func (s DeleteDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDomainOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DeleteExpression operation. Specifies +// the name of the domain you want to update and the name of the expression +// you want to delete. +type DeleteExpressionInput struct { + _ struct{} `type:"structure"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` + + // The name of the Expression to delete. + ExpressionName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteExpressionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteExpressionInput) GoString() string { + return s.String() +} + +// The result of a DeleteExpression request. Specifies the expression being +// deleted. +type DeleteExpressionOutput struct { + _ struct{} `type:"structure"` + + // The status of the expression being deleted. + Expression *ExpressionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DeleteExpressionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteExpressionOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DeleteIndexField operation. Specifies +// the name of the domain you want to update and the name of the index field +// you want to delete. +type DeleteIndexFieldInput struct { + _ struct{} `type:"structure"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` + + // The name of the index field your want to remove from the domain's indexing + // options. + IndexFieldName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteIndexFieldInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIndexFieldInput) GoString() string { + return s.String() +} + +// The result of a DeleteIndexField request. +type DeleteIndexFieldOutput struct { + _ struct{} `type:"structure"` + + // The status of the index field being deleted. + IndexField *IndexFieldStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DeleteIndexFieldOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIndexFieldOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DeleteSuggester operation. Specifies +// the name of the domain you want to update and name of the suggester you want +// to delete. +type DeleteSuggesterInput struct { + _ struct{} `type:"structure"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` + + // Specifies the name of the suggester you want to delete. + SuggesterName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSuggesterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSuggesterInput) GoString() string { + return s.String() +} + +// The result of a DeleteSuggester request. Contains the status of the deleted +// suggester. +type DeleteSuggesterOutput struct { + _ struct{} `type:"structure"` + + // The status of the suggester being deleted. + Suggester *SuggesterStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DeleteSuggesterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSuggesterOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeAnalysisSchemes operation. Specifies +// the name of the domain you want to describe. To limit the response to particular +// analysis schemes, specify the names of the analysis schemes you want to describe. +// To show the active configuration and exclude any pending changes, set the +// Deployed option to true. +type DescribeAnalysisSchemesInput struct { + _ struct{} `type:"structure"` + + // The analysis schemes you want to describe. + AnalysisSchemeNames []*string `type:"list"` + + // Whether to display the deployed configuration (true) or include any pending + // changes (false). Defaults to false. + Deployed *bool `type:"boolean"` + + // The name of the domain you want to describe. + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeAnalysisSchemesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAnalysisSchemesInput) GoString() string { + return s.String() +} + +// The result of a DescribeAnalysisSchemes request. Contains the analysis schemes +// configured for the domain specified in the request. +type DescribeAnalysisSchemesOutput struct { + _ struct{} `type:"structure"` + + // The analysis scheme descriptions. + AnalysisSchemes []*AnalysisSchemeStatus `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeAnalysisSchemesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAnalysisSchemesOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeAvailabilityOptions operation. +// Specifies the name of the domain you want to describe. To show the active +// configuration and exclude any pending changes, set the Deployed option to +// true. +type DescribeAvailabilityOptionsInput struct { + _ struct{} `type:"structure"` + + // Whether to display the deployed configuration (true) or include any pending + // changes (false). Defaults to false. + Deployed *bool `type:"boolean"` + + // The name of the domain you want to describe. + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeAvailabilityOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAvailabilityOptionsInput) GoString() string { + return s.String() +} + +// The result of a DescribeAvailabilityOptions request. Indicates whether or +// not the Multi-AZ option is enabled for the domain specified in the request. +type DescribeAvailabilityOptionsOutput struct { + _ struct{} `type:"structure"` + + // The availability options configured for the domain. Indicates whether Multi-AZ + // is enabled for the domain. + AvailabilityOptions *AvailabilityOptionsStatus `type:"structure"` +} + +// String returns the string representation +func (s DescribeAvailabilityOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAvailabilityOptionsOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeDomains operation. By default +// shows the status of all domains. To restrict the response to particular domains, +// specify the names of the domains you want to describe. +type DescribeDomainsInput struct { + _ struct{} `type:"structure"` + + // The names of the domains you want to include in the response. + DomainNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeDomainsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDomainsInput) GoString() string { + return s.String() +} + +// The result of a DescribeDomains request. Contains the status of the domains +// specified in the request or all domains owned by the account. +type DescribeDomainsOutput struct { + _ struct{} `type:"structure"` + + // A list that contains the status of each requested domain. + DomainStatusList []*DomainStatus `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeDomainsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDomainsOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeDomains operation. Specifies +// the name of the domain you want to describe. To restrict the response to +// particular expressions, specify the names of the expressions you want to +// describe. To show the active configuration and exclude any pending changes, +// set the Deployed option to true. +type DescribeExpressionsInput struct { + _ struct{} `type:"structure"` + + // Whether to display the deployed configuration (true) or include any pending + // changes (false). Defaults to false. + Deployed *bool `type:"boolean"` + + // The name of the domain you want to describe. + DomainName *string `min:"3" type:"string" required:"true"` + + // Limits the DescribeExpressions response to the specified expressions. If + // not specified, all expressions are shown. + ExpressionNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeExpressionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExpressionsInput) GoString() string { + return s.String() +} + +// The result of a DescribeExpressions request. Contains the expressions configured +// for the domain specified in the request. +type DescribeExpressionsOutput struct { + _ struct{} `type:"structure"` + + // The expressions configured for the domain. + Expressions []*ExpressionStatus `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeExpressionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExpressionsOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeIndexFields operation. Specifies +// the name of the domain you want to describe. To restrict the response to +// particular index fields, specify the names of the index fields you want to +// describe. To show the active configuration and exclude any pending changes, +// set the Deployed option to true. +type DescribeIndexFieldsInput struct { + _ struct{} `type:"structure"` + + // Whether to display the deployed configuration (true) or include any pending + // changes (false). Defaults to false. + Deployed *bool `type:"boolean"` + + // The name of the domain you want to describe. + DomainName *string `min:"3" type:"string" required:"true"` + + // A list of the index fields you want to describe. If not specified, information + // is returned for all configured index fields. + FieldNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeIndexFieldsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIndexFieldsInput) GoString() string { + return s.String() +} + +// The result of a DescribeIndexFields request. Contains the index fields configured +// for the domain specified in the request. +type DescribeIndexFieldsOutput struct { + _ struct{} `type:"structure"` + + // The index fields configured for the domain. + IndexFields []*IndexFieldStatus `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeIndexFieldsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIndexFieldsOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeScalingParameters operation. +// Specifies the name of the domain you want to describe. +type DescribeScalingParametersInput struct { + _ struct{} `type:"structure"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeScalingParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingParametersInput) GoString() string { + return s.String() +} + +// The result of a DescribeScalingParameters request. Contains the scaling parameters +// configured for the domain specified in the request. +type DescribeScalingParametersOutput struct { + _ struct{} `type:"structure"` + + // The status and configuration of a search domain's scaling parameters. + ScalingParameters *ScalingParametersStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeScalingParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingParametersOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeServiceAccessPolicies operation. +// Specifies the name of the domain you want to describe. To show the active +// configuration and exclude any pending changes, set the Deployed option to +// true. +type DescribeServiceAccessPoliciesInput struct { + _ struct{} `type:"structure"` + + // Whether to display the deployed configuration (true) or include any pending + // changes (false). Defaults to false. + Deployed *bool `type:"boolean"` + + // The name of the domain you want to describe. + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeServiceAccessPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeServiceAccessPoliciesInput) GoString() string { + return s.String() +} + +// The result of a DescribeServiceAccessPolicies request. +type DescribeServiceAccessPoliciesOutput struct { + _ struct{} `type:"structure"` + + // The access rules configured for the domain specified in the request. + AccessPolicies *AccessPoliciesStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeServiceAccessPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeServiceAccessPoliciesOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeSuggester operation. Specifies +// the name of the domain you want to describe. To restrict the response to +// particular suggesters, specify the names of the suggesters you want to describe. +// To show the active configuration and exclude any pending changes, set the +// Deployed option to true. +type DescribeSuggestersInput struct { + _ struct{} `type:"structure"` + + // Whether to display the deployed configuration (true) or include any pending + // changes (false). Defaults to false. + Deployed *bool `type:"boolean"` + + // The name of the domain you want to describe. + DomainName *string `min:"3" type:"string" required:"true"` + + // The suggesters you want to describe. + SuggesterNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeSuggestersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSuggestersInput) GoString() string { + return s.String() +} + +// The result of a DescribeSuggesters request. +type DescribeSuggestersOutput struct { + _ struct{} `type:"structure"` + + // The suggesters configured for the domain specified in the request. + Suggesters []*SuggesterStatus `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeSuggestersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSuggestersOutput) GoString() string { + return s.String() +} + +// Options for a search suggester. +type DocumentSuggesterOptions struct { + _ struct{} `type:"structure"` + + // The level of fuzziness allowed when suggesting matches for a string: none, + // low, or high. With none, the specified string is treated as an exact prefix. + // With low, suggestions must differ from the specified string by no more than + // one character. With high, suggestions can differ by up to two characters. + // The default is none. + FuzzyMatching *string `type:"string" enum:"SuggesterFuzzyMatching"` + + // An expression that computes a score for each suggestion to control how they + // are sorted. The scores are rounded to the nearest integer, with a floor of + // 0 and a ceiling of 2^31-1. A document's relevance score is not computed for + // suggestions, so sort expressions cannot reference the _score value. To sort + // suggestions using a numeric field or existing expression, simply specify + // the name of the field or expression. If no expression is configured for the + // suggester, the suggestions are sorted with the closest matches listed first. + SortExpression *string `type:"string"` + + // The name of the index field you want to use for suggestions. + SourceField *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DocumentSuggesterOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DocumentSuggesterOptions) GoString() string { + return s.String() +} + +// The current status of the search domain. +type DomainStatus struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the search domain. See Identifiers for + // IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/index.html?Using_Identifiers.html" + // target="_blank) in Using AWS Identity and Access Management for more information. + ARN *string `type:"string"` + + // True if the search domain is created. It can take several minutes to initialize + // a domain when CreateDomain is called. Newly created search domains are returned + // from DescribeDomains with a false value for Created until domain creation + // is complete. + Created *bool `type:"boolean"` + + // True if the search domain has been deleted. The system must clean up resources + // dedicated to the search domain when DeleteDomain is called. Newly deleted + // search domains are returned from DescribeDomains with a true value for IsDeleted + // for several minutes until resource cleanup is complete. + Deleted *bool `type:"boolean"` + + // The service endpoint for updating documents in a search domain. + DocService *ServiceEndpoint `type:"structure"` + + // An internally generated unique identifier for a domain. + DomainId *string `min:"1" type:"string" required:"true"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` + + Limits *Limits `type:"structure"` + + // True if processing is being done to activate the current domain configuration. + Processing *bool `type:"boolean"` + + // True if IndexDocuments needs to be called to activate the current domain + // configuration. + RequiresIndexDocuments *bool `type:"boolean" required:"true"` + + // The number of search instances that are available to process search requests. + SearchInstanceCount *int64 `min:"1" type:"integer"` + + // The instance type that is being used to process search requests. + SearchInstanceType *string `type:"string"` + + // The number of partitions across which the search index is spread. + SearchPartitionCount *int64 `min:"1" type:"integer"` + + // The service endpoint for requesting search results from a search domain. + SearchService *ServiceEndpoint `type:"structure"` +} + +// String returns the string representation +func (s DomainStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainStatus) GoString() string { + return s.String() +} + +// Options for a field that contains an array of double-precision 64-bit floating +// point values. Present if IndexFieldType specifies the field is of type double-array. +// All options are enabled by default. +type DoubleArrayOptions struct { + _ struct{} `type:"structure"` + + // A value to use for the field if the field isn't specified for a document. + DefaultValue *float64 `type:"double"` + + // Whether facet information can be returned for the field. + FacetEnabled *bool `type:"boolean"` + + // Whether the contents of the field can be returned in the search results. + ReturnEnabled *bool `type:"boolean"` + + // Whether the contents of the field are searchable. + SearchEnabled *bool `type:"boolean"` + + // A list of source fields to map to the field. + SourceFields *string `type:"string"` +} + +// String returns the string representation +func (s DoubleArrayOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DoubleArrayOptions) GoString() string { + return s.String() +} + +// Options for a double-precision 64-bit floating point field. Present if IndexFieldType +// specifies the field is of type double. All options are enabled by default. +type DoubleOptions struct { + _ struct{} `type:"structure"` + + // A value to use for the field if the field isn't specified for a document. + // This can be important if you are using the field in an expression and that + // field is not present in every document. + DefaultValue *float64 `type:"double"` + + // Whether facet information can be returned for the field. + FacetEnabled *bool `type:"boolean"` + + // Whether the contents of the field can be returned in the search results. + ReturnEnabled *bool `type:"boolean"` + + // Whether the contents of the field are searchable. + SearchEnabled *bool `type:"boolean"` + + // Whether the field can be used to sort the search results. + SortEnabled *bool `type:"boolean"` + + // The name of the source field to map to the field. + SourceField *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DoubleOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DoubleOptions) GoString() string { + return s.String() +} + +// A named expression that can be evaluated at search time. Can be used to sort +// the search results, define other expressions, or return computed information +// in the search results. +type Expression struct { + _ struct{} `type:"structure"` + + // Names must begin with a letter and can contain the following characters: + // a-z (lowercase), 0-9, and _ (underscore). + ExpressionName *string `min:"1" type:"string" required:"true"` + + // The expression to evaluate for sorting while processing a search request. + // The Expression syntax is based on JavaScript expressions. For more information, + // see Configuring Expressions (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-expressions.html" + // target="_blank) in the Amazon CloudSearch Developer Guide. + ExpressionValue *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Expression) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Expression) GoString() string { + return s.String() +} + +// The value of an Expression and its current status. +type ExpressionStatus struct { + _ struct{} `type:"structure"` + + // The expression that is evaluated for sorting while processing a search request. + Options *Expression `type:"structure" required:"true"` + + // The status of domain configuration option. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ExpressionStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExpressionStatus) GoString() string { + return s.String() +} + +// Container for the parameters to the IndexDocuments operation. Specifies the +// name of the domain you want to re-index. +type IndexDocumentsInput struct { + _ struct{} `type:"structure"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s IndexDocumentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IndexDocumentsInput) GoString() string { + return s.String() +} + +// The result of an IndexDocuments request. Contains the status of the indexing +// operation, including the fields being indexed. +type IndexDocumentsOutput struct { + _ struct{} `type:"structure"` + + // The names of the fields that are currently being indexed. + FieldNames []*string `type:"list"` +} + +// String returns the string representation +func (s IndexDocumentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IndexDocumentsOutput) GoString() string { + return s.String() +} + +// Configuration information for a field in the index, including its name, type, +// and options. The supported options depend on the IndexFieldType. +type IndexField struct { + _ struct{} `type:"structure"` + + // Options for a field that contains an array of dates. Present if IndexFieldType + // specifies the field is of type date-array. All options are enabled by default. + DateArrayOptions *DateArrayOptions `type:"structure"` + + // Options for a date field. Dates and times are specified in UTC (Coordinated + // Universal Time) according to IETF RFC3339: yyyy-mm-ddT00:00:00Z. Present + // if IndexFieldType specifies the field is of type date. All options are enabled + // by default. + DateOptions *DateOptions `type:"structure"` + + // Options for a field that contains an array of double-precision 64-bit floating + // point values. Present if IndexFieldType specifies the field is of type double-array. + // All options are enabled by default. + DoubleArrayOptions *DoubleArrayOptions `type:"structure"` + + // Options for a double-precision 64-bit floating point field. Present if IndexFieldType + // specifies the field is of type double. All options are enabled by default. + DoubleOptions *DoubleOptions `type:"structure"` + + // A string that represents the name of an index field. CloudSearch supports + // regular index fields as well as dynamic fields. A dynamic field's name defines + // a pattern that begins or ends with a wildcard. Any document fields that don't + // map to a regular index field but do match a dynamic field's pattern are configured + // with the dynamic field's indexing options. + // + // Regular field names begin with a letter and can contain the following characters: + // a-z (lowercase), 0-9, and _ (underscore). Dynamic field names must begin + // or end with a wildcard (*). The wildcard can also be the only character in + // a dynamic field name. Multiple wildcards, and wildcards embedded within a + // string are not supported. + // + // The name score is reserved and cannot be used as a field name. To reference + // a document's ID, you can use the name _id. + IndexFieldName *string `min:"1" type:"string" required:"true"` + + // The type of field. The valid options for a field depend on the field type. + // For more information about the supported field types, see Configuring Index + // Fields (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-index-fields.html" + // target="_blank) in the Amazon CloudSearch Developer Guide. + IndexFieldType *string `type:"string" required:"true" enum:"IndexFieldType"` + + // Options for a field that contains an array of 64-bit signed integers. Present + // if IndexFieldType specifies the field is of type int-array. All options are + // enabled by default. + IntArrayOptions *IntArrayOptions `type:"structure"` + + // Options for a 64-bit signed integer field. Present if IndexFieldType specifies + // the field is of type int. All options are enabled by default. + IntOptions *IntOptions `type:"structure"` + + // Options for a latlon field. A latlon field contains a location stored as + // a latitude and longitude value pair. Present if IndexFieldType specifies + // the field is of type latlon. All options are enabled by default. + LatLonOptions *LatLonOptions `type:"structure"` + + // Options for a field that contains an array of literal strings. Present if + // IndexFieldType specifies the field is of type literal-array. All options + // are enabled by default. + LiteralArrayOptions *LiteralArrayOptions `type:"structure"` + + // Options for literal field. Present if IndexFieldType specifies the field + // is of type literal. All options are enabled by default. + LiteralOptions *LiteralOptions `type:"structure"` + + // Options for a field that contains an array of text strings. Present if IndexFieldType + // specifies the field is of type text-array. A text-array field is always searchable. + // All options are enabled by default. + TextArrayOptions *TextArrayOptions `type:"structure"` + + // Options for text field. Present if IndexFieldType specifies the field is + // of type text. A text field is always searchable. All options are enabled + // by default. + TextOptions *TextOptions `type:"structure"` +} + +// String returns the string representation +func (s IndexField) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IndexField) GoString() string { + return s.String() +} + +// The value of an IndexField and its current status. +type IndexFieldStatus struct { + _ struct{} `type:"structure"` + + // Configuration information for a field in the index, including its name, type, + // and options. The supported options depend on the IndexFieldType. + Options *IndexField `type:"structure" required:"true"` + + // The status of domain configuration option. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s IndexFieldStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IndexFieldStatus) GoString() string { + return s.String() +} + +// Options for a field that contains an array of 64-bit signed integers. Present +// if IndexFieldType specifies the field is of type int-array. All options are +// enabled by default. +type IntArrayOptions struct { + _ struct{} `type:"structure"` + + // A value to use for the field if the field isn't specified for a document. + DefaultValue *int64 `type:"long"` + + // Whether facet information can be returned for the field. + FacetEnabled *bool `type:"boolean"` + + // Whether the contents of the field can be returned in the search results. + ReturnEnabled *bool `type:"boolean"` + + // Whether the contents of the field are searchable. + SearchEnabled *bool `type:"boolean"` + + // A list of source fields to map to the field. + SourceFields *string `type:"string"` +} + +// String returns the string representation +func (s IntArrayOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IntArrayOptions) GoString() string { + return s.String() +} + +// Options for a 64-bit signed integer field. Present if IndexFieldType specifies +// the field is of type int. All options are enabled by default. +type IntOptions struct { + _ struct{} `type:"structure"` + + // A value to use for the field if the field isn't specified for a document. + // This can be important if you are using the field in an expression and that + // field is not present in every document. + DefaultValue *int64 `type:"long"` + + // Whether facet information can be returned for the field. + FacetEnabled *bool `type:"boolean"` + + // Whether the contents of the field can be returned in the search results. + ReturnEnabled *bool `type:"boolean"` + + // Whether the contents of the field are searchable. + SearchEnabled *bool `type:"boolean"` + + // Whether the field can be used to sort the search results. + SortEnabled *bool `type:"boolean"` + + // The name of the source field to map to the field. + SourceField *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s IntOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IntOptions) GoString() string { + return s.String() +} + +// Options for a latlon field. A latlon field contains a location stored as +// a latitude and longitude value pair. Present if IndexFieldType specifies +// the field is of type latlon. All options are enabled by default. +type LatLonOptions struct { + _ struct{} `type:"structure"` + + // A value to use for the field if the field isn't specified for a document. + DefaultValue *string `type:"string"` + + // Whether facet information can be returned for the field. + FacetEnabled *bool `type:"boolean"` + + // Whether the contents of the field can be returned in the search results. + ReturnEnabled *bool `type:"boolean"` + + // Whether the contents of the field are searchable. + SearchEnabled *bool `type:"boolean"` + + // Whether the field can be used to sort the search results. + SortEnabled *bool `type:"boolean"` + + // A string that represents the name of an index field. CloudSearch supports + // regular index fields as well as dynamic fields. A dynamic field's name defines + // a pattern that begins or ends with a wildcard. Any document fields that don't + // map to a regular index field but do match a dynamic field's pattern are configured + // with the dynamic field's indexing options. + // + // Regular field names begin with a letter and can contain the following characters: + // a-z (lowercase), 0-9, and _ (underscore). Dynamic field names must begin + // or end with a wildcard (*). The wildcard can also be the only character in + // a dynamic field name. Multiple wildcards, and wildcards embedded within a + // string are not supported. + // + // The name score is reserved and cannot be used as a field name. To reference + // a document's ID, you can use the name _id. + SourceField *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s LatLonOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LatLonOptions) GoString() string { + return s.String() +} + +type Limits struct { + _ struct{} `type:"structure"` + + MaximumPartitionCount *int64 `min:"1" type:"integer" required:"true"` + + MaximumReplicationCount *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s Limits) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Limits) GoString() string { + return s.String() +} + +type ListDomainNamesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListDomainNamesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDomainNamesInput) GoString() string { + return s.String() +} + +// The result of a ListDomainNames request. Contains a list of the domains owned +// by an account. +type ListDomainNamesOutput struct { + _ struct{} `type:"structure"` + + // The names of the search domains owned by an account. + DomainNames map[string]*string `type:"map"` +} + +// String returns the string representation +func (s ListDomainNamesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDomainNamesOutput) GoString() string { + return s.String() +} + +// Options for a field that contains an array of literal strings. Present if +// IndexFieldType specifies the field is of type literal-array. All options +// are enabled by default. +type LiteralArrayOptions struct { + _ struct{} `type:"structure"` + + // A value to use for the field if the field isn't specified for a document. + DefaultValue *string `type:"string"` + + // Whether facet information can be returned for the field. + FacetEnabled *bool `type:"boolean"` + + // Whether the contents of the field can be returned in the search results. + ReturnEnabled *bool `type:"boolean"` + + // Whether the contents of the field are searchable. + SearchEnabled *bool `type:"boolean"` + + // A list of source fields to map to the field. + SourceFields *string `type:"string"` +} + +// String returns the string representation +func (s LiteralArrayOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LiteralArrayOptions) GoString() string { + return s.String() +} + +// Options for literal field. Present if IndexFieldType specifies the field +// is of type literal. All options are enabled by default. +type LiteralOptions struct { + _ struct{} `type:"structure"` + + // A value to use for the field if the field isn't specified for a document. + DefaultValue *string `type:"string"` + + // Whether facet information can be returned for the field. + FacetEnabled *bool `type:"boolean"` + + // Whether the contents of the field can be returned in the search results. + ReturnEnabled *bool `type:"boolean"` + + // Whether the contents of the field are searchable. + SearchEnabled *bool `type:"boolean"` + + // Whether the field can be used to sort the search results. + SortEnabled *bool `type:"boolean"` + + // A string that represents the name of an index field. CloudSearch supports + // regular index fields as well as dynamic fields. A dynamic field's name defines + // a pattern that begins or ends with a wildcard. Any document fields that don't + // map to a regular index field but do match a dynamic field's pattern are configured + // with the dynamic field's indexing options. + // + // Regular field names begin with a letter and can contain the following characters: + // a-z (lowercase), 0-9, and _ (underscore). Dynamic field names must begin + // or end with a wildcard (*). The wildcard can also be the only character in + // a dynamic field name. Multiple wildcards, and wildcards embedded within a + // string are not supported. + // + // The name score is reserved and cannot be used as a field name. To reference + // a document's ID, you can use the name _id. + SourceField *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s LiteralOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LiteralOptions) GoString() string { + return s.String() +} + +// The status of domain configuration option. +type OptionStatus struct { + _ struct{} `type:"structure"` + + // A timestamp for when this option was created. + CreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // Indicates that the option will be deleted once processing is complete. + PendingDeletion *bool `type:"boolean"` + + // The state of processing a change to an option. Possible values: + // + // RequiresIndexDocuments: the option's latest value will not be deployed + // until IndexDocuments has been called and indexing is complete. Processing: + // the option's latest value is in the process of being activated. Active: + // the option's latest value is completely deployed. FailedToValidate: the + // option value is not compatible with the domain's data and cannot be used + // to index the data. You must either modify the option value or update or remove + // the incompatible documents. + State *string `type:"string" required:"true" enum:"OptionState"` + + // A timestamp for when this option was last updated. + UpdateDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // A unique integer that indicates when this option was last updated. + UpdateVersion *int64 `type:"integer"` +} + +// String returns the string representation +func (s OptionStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionStatus) GoString() string { + return s.String() +} + +// The desired instance type and desired number of replicas of each index partition. +type ScalingParameters struct { + _ struct{} `type:"structure"` + + // The instance type that you want to preconfigure for your domain. For example, + // search.m1.small. + DesiredInstanceType *string `type:"string" enum:"PartitionInstanceType"` + + // The number of partitions you want to preconfigure for your domain. Only valid + // when you select m2.2xlarge as the desired instance type. + DesiredPartitionCount *int64 `type:"integer"` + + // The number of replicas you want to preconfigure for each index partition. + DesiredReplicationCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s ScalingParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScalingParameters) GoString() string { + return s.String() +} + +// The status and configuration of a search domain's scaling parameters. +type ScalingParametersStatus struct { + _ struct{} `type:"structure"` + + // The desired instance type and desired number of replicas of each index partition. + Options *ScalingParameters `type:"structure" required:"true"` + + // The status of domain configuration option. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ScalingParametersStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScalingParametersStatus) GoString() string { + return s.String() +} + +// The endpoint to which service requests can be submitted. +type ServiceEndpoint struct { + _ struct{} `type:"structure"` + + // The endpoint to which service requests can be submitted. For example, search-imdb-movies-oopcnjfn6ugofer3zx5iadxxca.eu-west-1.cloudsearch.amazonaws.com + // or doc-imdb-movies-oopcnjfn6ugofer3zx5iadxxca.eu-west-1.cloudsearch.amazonaws.com. + Endpoint *string `type:"string"` +} + +// String returns the string representation +func (s ServiceEndpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceEndpoint) GoString() string { + return s.String() +} + +// Configuration information for a search suggester. Each suggester has a unique +// name and specifies the text field you want to use for suggestions. The following +// options can be configured for a suggester: FuzzyMatching, SortExpression. +type Suggester struct { + _ struct{} `type:"structure"` + + // Options for a search suggester. + DocumentSuggesterOptions *DocumentSuggesterOptions `type:"structure" required:"true"` + + // Names must begin with a letter and can contain the following characters: + // a-z (lowercase), 0-9, and _ (underscore). + SuggesterName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Suggester) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Suggester) GoString() string { + return s.String() +} + +// The value of a Suggester and its current status. +type SuggesterStatus struct { + _ struct{} `type:"structure"` + + // Configuration information for a search suggester. Each suggester has a unique + // name and specifies the text field you want to use for suggestions. The following + // options can be configured for a suggester: FuzzyMatching, SortExpression. + Options *Suggester `type:"structure" required:"true"` + + // The status of domain configuration option. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s SuggesterStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SuggesterStatus) GoString() string { + return s.String() +} + +// Options for a field that contains an array of text strings. Present if IndexFieldType +// specifies the field is of type text-array. A text-array field is always searchable. +// All options are enabled by default. +type TextArrayOptions struct { + _ struct{} `type:"structure"` + + // The name of an analysis scheme for a text-array field. + AnalysisScheme *string `type:"string"` + + // A value to use for the field if the field isn't specified for a document. + DefaultValue *string `type:"string"` + + // Whether highlights can be returned for the field. + HighlightEnabled *bool `type:"boolean"` + + // Whether the contents of the field can be returned in the search results. + ReturnEnabled *bool `type:"boolean"` + + // A list of source fields to map to the field. + SourceFields *string `type:"string"` +} + +// String returns the string representation +func (s TextArrayOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TextArrayOptions) GoString() string { + return s.String() +} + +// Options for text field. Present if IndexFieldType specifies the field is +// of type text. A text field is always searchable. All options are enabled +// by default. +type TextOptions struct { + _ struct{} `type:"structure"` + + // The name of an analysis scheme for a text field. + AnalysisScheme *string `type:"string"` + + // A value to use for the field if the field isn't specified for a document. + DefaultValue *string `type:"string"` + + // Whether highlights can be returned for the field. + HighlightEnabled *bool `type:"boolean"` + + // Whether the contents of the field can be returned in the search results. + ReturnEnabled *bool `type:"boolean"` + + // Whether the field can be used to sort the search results. + SortEnabled *bool `type:"boolean"` + + // A string that represents the name of an index field. CloudSearch supports + // regular index fields as well as dynamic fields. A dynamic field's name defines + // a pattern that begins or ends with a wildcard. Any document fields that don't + // map to a regular index field but do match a dynamic field's pattern are configured + // with the dynamic field's indexing options. + // + // Regular field names begin with a letter and can contain the following characters: + // a-z (lowercase), 0-9, and _ (underscore). Dynamic field names must begin + // or end with a wildcard (*). The wildcard can also be the only character in + // a dynamic field name. Multiple wildcards, and wildcards embedded within a + // string are not supported. + // + // The name score is reserved and cannot be used as a field name. To reference + // a document's ID, you can use the name _id. + SourceField *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s TextOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TextOptions) GoString() string { + return s.String() +} + +// Container for the parameters to the UpdateAvailabilityOptions operation. +// Specifies the name of the domain you want to update and the Multi-AZ availability +// option. +type UpdateAvailabilityOptionsInput struct { + _ struct{} `type:"structure"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` + + // You expand an existing search domain to a second Availability Zone by setting + // the Multi-AZ option to true. Similarly, you can turn off the Multi-AZ option + // to downgrade the domain to a single Availability Zone by setting the Multi-AZ + // option to false. + MultiAZ *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s UpdateAvailabilityOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAvailabilityOptionsInput) GoString() string { + return s.String() +} + +// The result of a UpdateAvailabilityOptions request. Contains the status of +// the domain's availability options. +type UpdateAvailabilityOptionsOutput struct { + _ struct{} `type:"structure"` + + // The newly-configured availability options. Indicates whether Multi-AZ is + // enabled for the domain. + AvailabilityOptions *AvailabilityOptionsStatus `type:"structure"` +} + +// String returns the string representation +func (s UpdateAvailabilityOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAvailabilityOptionsOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the UpdateScalingParameters operation. Specifies +// the name of the domain you want to update and the scaling parameters you +// want to configure. +type UpdateScalingParametersInput struct { + _ struct{} `type:"structure"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` + + // The desired instance type and desired number of replicas of each index partition. + ScalingParameters *ScalingParameters `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateScalingParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateScalingParametersInput) GoString() string { + return s.String() +} + +// The result of a UpdateScalingParameters request. Contains the status of the +// newly-configured scaling parameters. +type UpdateScalingParametersOutput struct { + _ struct{} `type:"structure"` + + // The status and configuration of a search domain's scaling parameters. + ScalingParameters *ScalingParametersStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateScalingParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateScalingParametersOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the UpdateServiceAccessPolicies operation. +// Specifies the name of the domain you want to update and the access rules +// you want to configure. +type UpdateServiceAccessPoliciesInput struct { + _ struct{} `type:"structure"` + + // The access rules you want to configure. These rules replace any existing + // rules. + AccessPolicies *string `type:"string" required:"true"` + + // A string that represents the name of a domain. Domain names are unique across + // the domains owned by an account within an AWS region. Domain names start + // with a letter or number and can contain the following characters: a-z (lowercase), + // 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateServiceAccessPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateServiceAccessPoliciesInput) GoString() string { + return s.String() +} + +// The result of an UpdateServiceAccessPolicies request. Contains the new access +// policies. +type UpdateServiceAccessPoliciesOutput struct { + _ struct{} `type:"structure"` + + // The access rules configured for the domain. + AccessPolicies *AccessPoliciesStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateServiceAccessPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateServiceAccessPoliciesOutput) GoString() string { + return s.String() +} + +const ( + // @enum AlgorithmicStemming + AlgorithmicStemmingNone = "none" + // @enum AlgorithmicStemming + AlgorithmicStemmingMinimal = "minimal" + // @enum AlgorithmicStemming + AlgorithmicStemmingLight = "light" + // @enum AlgorithmicStemming + AlgorithmicStemmingFull = "full" +) + +// An IETF RFC 4646 (http://tools.ietf.org/html/rfc4646" target="_blank) language +// code or mul for multiple languages. +const ( + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageAr = "ar" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageBg = "bg" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageCa = "ca" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageCs = "cs" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageDa = "da" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageDe = "de" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageEl = "el" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageEn = "en" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageEs = "es" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageEu = "eu" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageFa = "fa" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageFi = "fi" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageFr = "fr" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageGa = "ga" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageGl = "gl" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageHe = "he" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageHi = "hi" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageHu = "hu" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageHy = "hy" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageId = "id" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageIt = "it" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageJa = "ja" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageKo = "ko" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageLv = "lv" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageMul = "mul" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageNl = "nl" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageNo = "no" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguagePt = "pt" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageRo = "ro" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageRu = "ru" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageSv = "sv" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageTh = "th" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageTr = "tr" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageZhHans = "zh-Hans" + // @enum AnalysisSchemeLanguage + AnalysisSchemeLanguageZhHant = "zh-Hant" +) + +// The type of field. The valid options for a field depend on the field type. +// For more information about the supported field types, see Configuring Index +// Fields (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-index-fields.html" +// target="_blank) in the Amazon CloudSearch Developer Guide. +const ( + // @enum IndexFieldType + IndexFieldTypeInt = "int" + // @enum IndexFieldType + IndexFieldTypeDouble = "double" + // @enum IndexFieldType + IndexFieldTypeLiteral = "literal" + // @enum IndexFieldType + IndexFieldTypeText = "text" + // @enum IndexFieldType + IndexFieldTypeDate = "date" + // @enum IndexFieldType + IndexFieldTypeLatlon = "latlon" + // @enum IndexFieldType + IndexFieldTypeIntArray = "int-array" + // @enum IndexFieldType + IndexFieldTypeDoubleArray = "double-array" + // @enum IndexFieldType + IndexFieldTypeLiteralArray = "literal-array" + // @enum IndexFieldType + IndexFieldTypeTextArray = "text-array" + // @enum IndexFieldType + IndexFieldTypeDateArray = "date-array" +) + +// The state of processing a change to an option. One of: +// +// RequiresIndexDocuments: The option's latest value will not be deployed +// until IndexDocuments has been called and indexing is complete. Processing: +// The option's latest value is in the process of being activated. Active: The +// option's latest value is fully deployed. FailedToValidate: The option value +// is not compatible with the domain's data and cannot be used to index the +// data. You must either modify the option value or update or remove the incompatible +// documents. +const ( + // @enum OptionState + OptionStateRequiresIndexDocuments = "RequiresIndexDocuments" + // @enum OptionState + OptionStateProcessing = "Processing" + // @enum OptionState + OptionStateActive = "Active" + // @enum OptionState + OptionStateFailedToValidate = "FailedToValidate" +) + +// The instance type (such as search.m1.small) on which an index partition is +// hosted. +const ( + // @enum PartitionInstanceType + PartitionInstanceTypeSearchM1Small = "search.m1.small" + // @enum PartitionInstanceType + PartitionInstanceTypeSearchM1Large = "search.m1.large" + // @enum PartitionInstanceType + PartitionInstanceTypeSearchM2Xlarge = "search.m2.xlarge" + // @enum PartitionInstanceType + PartitionInstanceTypeSearchM22xlarge = "search.m2.2xlarge" + // @enum PartitionInstanceType + PartitionInstanceTypeSearchM3Medium = "search.m3.medium" + // @enum PartitionInstanceType + PartitionInstanceTypeSearchM3Large = "search.m3.large" + // @enum PartitionInstanceType + PartitionInstanceTypeSearchM3Xlarge = "search.m3.xlarge" + // @enum PartitionInstanceType + PartitionInstanceTypeSearchM32xlarge = "search.m3.2xlarge" +) + +const ( + // @enum SuggesterFuzzyMatching + SuggesterFuzzyMatchingNone = "none" + // @enum SuggesterFuzzyMatching + SuggesterFuzzyMatchingLow = "low" + // @enum SuggesterFuzzyMatching + SuggesterFuzzyMatchingHigh = "high" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearch/cloudsearchiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearch/cloudsearchiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearch/cloudsearchiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearch/cloudsearchiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,110 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudsearchiface provides an interface for the Amazon CloudSearch. +package cloudsearchiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudsearch" +) + +// CloudSearchAPI is the interface type for cloudsearch.CloudSearch. +type CloudSearchAPI interface { + BuildSuggestersRequest(*cloudsearch.BuildSuggestersInput) (*request.Request, *cloudsearch.BuildSuggestersOutput) + + BuildSuggesters(*cloudsearch.BuildSuggestersInput) (*cloudsearch.BuildSuggestersOutput, error) + + CreateDomainRequest(*cloudsearch.CreateDomainInput) (*request.Request, *cloudsearch.CreateDomainOutput) + + CreateDomain(*cloudsearch.CreateDomainInput) (*cloudsearch.CreateDomainOutput, error) + + DefineAnalysisSchemeRequest(*cloudsearch.DefineAnalysisSchemeInput) (*request.Request, *cloudsearch.DefineAnalysisSchemeOutput) + + DefineAnalysisScheme(*cloudsearch.DefineAnalysisSchemeInput) (*cloudsearch.DefineAnalysisSchemeOutput, error) + + DefineExpressionRequest(*cloudsearch.DefineExpressionInput) (*request.Request, *cloudsearch.DefineExpressionOutput) + + DefineExpression(*cloudsearch.DefineExpressionInput) (*cloudsearch.DefineExpressionOutput, error) + + DefineIndexFieldRequest(*cloudsearch.DefineIndexFieldInput) (*request.Request, *cloudsearch.DefineIndexFieldOutput) + + DefineIndexField(*cloudsearch.DefineIndexFieldInput) (*cloudsearch.DefineIndexFieldOutput, error) + + DefineSuggesterRequest(*cloudsearch.DefineSuggesterInput) (*request.Request, *cloudsearch.DefineSuggesterOutput) + + DefineSuggester(*cloudsearch.DefineSuggesterInput) (*cloudsearch.DefineSuggesterOutput, error) + + DeleteAnalysisSchemeRequest(*cloudsearch.DeleteAnalysisSchemeInput) (*request.Request, *cloudsearch.DeleteAnalysisSchemeOutput) + + DeleteAnalysisScheme(*cloudsearch.DeleteAnalysisSchemeInput) (*cloudsearch.DeleteAnalysisSchemeOutput, error) + + DeleteDomainRequest(*cloudsearch.DeleteDomainInput) (*request.Request, *cloudsearch.DeleteDomainOutput) + + DeleteDomain(*cloudsearch.DeleteDomainInput) (*cloudsearch.DeleteDomainOutput, error) + + DeleteExpressionRequest(*cloudsearch.DeleteExpressionInput) (*request.Request, *cloudsearch.DeleteExpressionOutput) + + DeleteExpression(*cloudsearch.DeleteExpressionInput) (*cloudsearch.DeleteExpressionOutput, error) + + DeleteIndexFieldRequest(*cloudsearch.DeleteIndexFieldInput) (*request.Request, *cloudsearch.DeleteIndexFieldOutput) + + DeleteIndexField(*cloudsearch.DeleteIndexFieldInput) (*cloudsearch.DeleteIndexFieldOutput, error) + + DeleteSuggesterRequest(*cloudsearch.DeleteSuggesterInput) (*request.Request, *cloudsearch.DeleteSuggesterOutput) + + DeleteSuggester(*cloudsearch.DeleteSuggesterInput) (*cloudsearch.DeleteSuggesterOutput, error) + + DescribeAnalysisSchemesRequest(*cloudsearch.DescribeAnalysisSchemesInput) (*request.Request, *cloudsearch.DescribeAnalysisSchemesOutput) + + DescribeAnalysisSchemes(*cloudsearch.DescribeAnalysisSchemesInput) (*cloudsearch.DescribeAnalysisSchemesOutput, error) + + DescribeAvailabilityOptionsRequest(*cloudsearch.DescribeAvailabilityOptionsInput) (*request.Request, *cloudsearch.DescribeAvailabilityOptionsOutput) + + DescribeAvailabilityOptions(*cloudsearch.DescribeAvailabilityOptionsInput) (*cloudsearch.DescribeAvailabilityOptionsOutput, error) + + DescribeDomainsRequest(*cloudsearch.DescribeDomainsInput) (*request.Request, *cloudsearch.DescribeDomainsOutput) + + DescribeDomains(*cloudsearch.DescribeDomainsInput) (*cloudsearch.DescribeDomainsOutput, error) + + DescribeExpressionsRequest(*cloudsearch.DescribeExpressionsInput) (*request.Request, *cloudsearch.DescribeExpressionsOutput) + + DescribeExpressions(*cloudsearch.DescribeExpressionsInput) (*cloudsearch.DescribeExpressionsOutput, error) + + DescribeIndexFieldsRequest(*cloudsearch.DescribeIndexFieldsInput) (*request.Request, *cloudsearch.DescribeIndexFieldsOutput) + + DescribeIndexFields(*cloudsearch.DescribeIndexFieldsInput) (*cloudsearch.DescribeIndexFieldsOutput, error) + + DescribeScalingParametersRequest(*cloudsearch.DescribeScalingParametersInput) (*request.Request, *cloudsearch.DescribeScalingParametersOutput) + + DescribeScalingParameters(*cloudsearch.DescribeScalingParametersInput) (*cloudsearch.DescribeScalingParametersOutput, error) + + DescribeServiceAccessPoliciesRequest(*cloudsearch.DescribeServiceAccessPoliciesInput) (*request.Request, *cloudsearch.DescribeServiceAccessPoliciesOutput) + + DescribeServiceAccessPolicies(*cloudsearch.DescribeServiceAccessPoliciesInput) (*cloudsearch.DescribeServiceAccessPoliciesOutput, error) + + DescribeSuggestersRequest(*cloudsearch.DescribeSuggestersInput) (*request.Request, *cloudsearch.DescribeSuggestersOutput) + + DescribeSuggesters(*cloudsearch.DescribeSuggestersInput) (*cloudsearch.DescribeSuggestersOutput, error) + + IndexDocumentsRequest(*cloudsearch.IndexDocumentsInput) (*request.Request, *cloudsearch.IndexDocumentsOutput) + + IndexDocuments(*cloudsearch.IndexDocumentsInput) (*cloudsearch.IndexDocumentsOutput, error) + + ListDomainNamesRequest(*cloudsearch.ListDomainNamesInput) (*request.Request, *cloudsearch.ListDomainNamesOutput) + + ListDomainNames(*cloudsearch.ListDomainNamesInput) (*cloudsearch.ListDomainNamesOutput, error) + + UpdateAvailabilityOptionsRequest(*cloudsearch.UpdateAvailabilityOptionsInput) (*request.Request, *cloudsearch.UpdateAvailabilityOptionsOutput) + + UpdateAvailabilityOptions(*cloudsearch.UpdateAvailabilityOptionsInput) (*cloudsearch.UpdateAvailabilityOptionsOutput, error) + + UpdateScalingParametersRequest(*cloudsearch.UpdateScalingParametersInput) (*request.Request, *cloudsearch.UpdateScalingParametersOutput) + + UpdateScalingParameters(*cloudsearch.UpdateScalingParametersInput) (*cloudsearch.UpdateScalingParametersOutput, error) + + UpdateServiceAccessPoliciesRequest(*cloudsearch.UpdateServiceAccessPoliciesInput) (*request.Request, *cloudsearch.UpdateServiceAccessPoliciesOutput) + + UpdateServiceAccessPolicies(*cloudsearch.UpdateServiceAccessPoliciesInput) (*cloudsearch.UpdateServiceAccessPoliciesOutput, error) +} + +var _ CloudSearchAPI = (*cloudsearch.CloudSearch)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearch/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearch/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearch/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearch/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,616 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudsearch_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudsearch" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCloudSearch_BuildSuggesters() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.BuildSuggestersInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.BuildSuggesters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_CreateDomain() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.CreateDomainInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.CreateDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DefineAnalysisScheme() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DefineAnalysisSchemeInput{ + AnalysisScheme: &cloudsearch.AnalysisScheme{ // Required + AnalysisSchemeLanguage: aws.String("AnalysisSchemeLanguage"), // Required + AnalysisSchemeName: aws.String("StandardName"), // Required + AnalysisOptions: &cloudsearch.AnalysisOptions{ + AlgorithmicStemming: aws.String("AlgorithmicStemming"), + JapaneseTokenizationDictionary: aws.String("String"), + StemmingDictionary: aws.String("String"), + Stopwords: aws.String("String"), + Synonyms: aws.String("String"), + }, + }, + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.DefineAnalysisScheme(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DefineExpression() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DefineExpressionInput{ + DomainName: aws.String("DomainName"), // Required + Expression: &cloudsearch.Expression{ // Required + ExpressionName: aws.String("StandardName"), // Required + ExpressionValue: aws.String("ExpressionValue"), // Required + }, + } + resp, err := svc.DefineExpression(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DefineIndexField() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DefineIndexFieldInput{ + DomainName: aws.String("DomainName"), // Required + IndexField: &cloudsearch.IndexField{ // Required + IndexFieldName: aws.String("DynamicFieldName"), // Required + IndexFieldType: aws.String("IndexFieldType"), // Required + DateArrayOptions: &cloudsearch.DateArrayOptions{ + DefaultValue: aws.String("FieldValue"), + FacetEnabled: aws.Bool(true), + ReturnEnabled: aws.Bool(true), + SearchEnabled: aws.Bool(true), + SourceFields: aws.String("FieldNameCommaList"), + }, + DateOptions: &cloudsearch.DateOptions{ + DefaultValue: aws.String("FieldValue"), + FacetEnabled: aws.Bool(true), + ReturnEnabled: aws.Bool(true), + SearchEnabled: aws.Bool(true), + SortEnabled: aws.Bool(true), + SourceField: aws.String("FieldName"), + }, + DoubleArrayOptions: &cloudsearch.DoubleArrayOptions{ + DefaultValue: aws.Float64(1.0), + FacetEnabled: aws.Bool(true), + ReturnEnabled: aws.Bool(true), + SearchEnabled: aws.Bool(true), + SourceFields: aws.String("FieldNameCommaList"), + }, + DoubleOptions: &cloudsearch.DoubleOptions{ + DefaultValue: aws.Float64(1.0), + FacetEnabled: aws.Bool(true), + ReturnEnabled: aws.Bool(true), + SearchEnabled: aws.Bool(true), + SortEnabled: aws.Bool(true), + SourceField: aws.String("FieldName"), + }, + IntArrayOptions: &cloudsearch.IntArrayOptions{ + DefaultValue: aws.Int64(1), + FacetEnabled: aws.Bool(true), + ReturnEnabled: aws.Bool(true), + SearchEnabled: aws.Bool(true), + SourceFields: aws.String("FieldNameCommaList"), + }, + IntOptions: &cloudsearch.IntOptions{ + DefaultValue: aws.Int64(1), + FacetEnabled: aws.Bool(true), + ReturnEnabled: aws.Bool(true), + SearchEnabled: aws.Bool(true), + SortEnabled: aws.Bool(true), + SourceField: aws.String("FieldName"), + }, + LatLonOptions: &cloudsearch.LatLonOptions{ + DefaultValue: aws.String("FieldValue"), + FacetEnabled: aws.Bool(true), + ReturnEnabled: aws.Bool(true), + SearchEnabled: aws.Bool(true), + SortEnabled: aws.Bool(true), + SourceField: aws.String("FieldName"), + }, + LiteralArrayOptions: &cloudsearch.LiteralArrayOptions{ + DefaultValue: aws.String("FieldValue"), + FacetEnabled: aws.Bool(true), + ReturnEnabled: aws.Bool(true), + SearchEnabled: aws.Bool(true), + SourceFields: aws.String("FieldNameCommaList"), + }, + LiteralOptions: &cloudsearch.LiteralOptions{ + DefaultValue: aws.String("FieldValue"), + FacetEnabled: aws.Bool(true), + ReturnEnabled: aws.Bool(true), + SearchEnabled: aws.Bool(true), + SortEnabled: aws.Bool(true), + SourceField: aws.String("FieldName"), + }, + TextArrayOptions: &cloudsearch.TextArrayOptions{ + AnalysisScheme: aws.String("Word"), + DefaultValue: aws.String("FieldValue"), + HighlightEnabled: aws.Bool(true), + ReturnEnabled: aws.Bool(true), + SourceFields: aws.String("FieldNameCommaList"), + }, + TextOptions: &cloudsearch.TextOptions{ + AnalysisScheme: aws.String("Word"), + DefaultValue: aws.String("FieldValue"), + HighlightEnabled: aws.Bool(true), + ReturnEnabled: aws.Bool(true), + SortEnabled: aws.Bool(true), + SourceField: aws.String("FieldName"), + }, + }, + } + resp, err := svc.DefineIndexField(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DefineSuggester() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DefineSuggesterInput{ + DomainName: aws.String("DomainName"), // Required + Suggester: &cloudsearch.Suggester{ // Required + DocumentSuggesterOptions: &cloudsearch.DocumentSuggesterOptions{ // Required + SourceField: aws.String("FieldName"), // Required + FuzzyMatching: aws.String("SuggesterFuzzyMatching"), + SortExpression: aws.String("String"), + }, + SuggesterName: aws.String("StandardName"), // Required + }, + } + resp, err := svc.DefineSuggester(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DeleteAnalysisScheme() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DeleteAnalysisSchemeInput{ + AnalysisSchemeName: aws.String("StandardName"), // Required + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.DeleteAnalysisScheme(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DeleteDomain() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DeleteDomainInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.DeleteDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DeleteExpression() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DeleteExpressionInput{ + DomainName: aws.String("DomainName"), // Required + ExpressionName: aws.String("StandardName"), // Required + } + resp, err := svc.DeleteExpression(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DeleteIndexField() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DeleteIndexFieldInput{ + DomainName: aws.String("DomainName"), // Required + IndexFieldName: aws.String("DynamicFieldName"), // Required + } + resp, err := svc.DeleteIndexField(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DeleteSuggester() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DeleteSuggesterInput{ + DomainName: aws.String("DomainName"), // Required + SuggesterName: aws.String("StandardName"), // Required + } + resp, err := svc.DeleteSuggester(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DescribeAnalysisSchemes() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DescribeAnalysisSchemesInput{ + DomainName: aws.String("DomainName"), // Required + AnalysisSchemeNames: []*string{ + aws.String("StandardName"), // Required + // More values... + }, + Deployed: aws.Bool(true), + } + resp, err := svc.DescribeAnalysisSchemes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DescribeAvailabilityOptions() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DescribeAvailabilityOptionsInput{ + DomainName: aws.String("DomainName"), // Required + Deployed: aws.Bool(true), + } + resp, err := svc.DescribeAvailabilityOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DescribeDomains() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DescribeDomainsInput{ + DomainNames: []*string{ + aws.String("DomainName"), // Required + // More values... + }, + } + resp, err := svc.DescribeDomains(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DescribeExpressions() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DescribeExpressionsInput{ + DomainName: aws.String("DomainName"), // Required + Deployed: aws.Bool(true), + ExpressionNames: []*string{ + aws.String("StandardName"), // Required + // More values... + }, + } + resp, err := svc.DescribeExpressions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DescribeIndexFields() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DescribeIndexFieldsInput{ + DomainName: aws.String("DomainName"), // Required + Deployed: aws.Bool(true), + FieldNames: []*string{ + aws.String("DynamicFieldName"), // Required + // More values... + }, + } + resp, err := svc.DescribeIndexFields(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DescribeScalingParameters() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DescribeScalingParametersInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.DescribeScalingParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DescribeServiceAccessPolicies() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DescribeServiceAccessPoliciesInput{ + DomainName: aws.String("DomainName"), // Required + Deployed: aws.Bool(true), + } + resp, err := svc.DescribeServiceAccessPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_DescribeSuggesters() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.DescribeSuggestersInput{ + DomainName: aws.String("DomainName"), // Required + Deployed: aws.Bool(true), + SuggesterNames: []*string{ + aws.String("StandardName"), // Required + // More values... + }, + } + resp, err := svc.DescribeSuggesters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_IndexDocuments() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.IndexDocumentsInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.IndexDocuments(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_ListDomainNames() { + svc := cloudsearch.New(session.New()) + + var params *cloudsearch.ListDomainNamesInput + resp, err := svc.ListDomainNames(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_UpdateAvailabilityOptions() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.UpdateAvailabilityOptionsInput{ + DomainName: aws.String("DomainName"), // Required + MultiAZ: aws.Bool(true), // Required + } + resp, err := svc.UpdateAvailabilityOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_UpdateScalingParameters() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.UpdateScalingParametersInput{ + DomainName: aws.String("DomainName"), // Required + ScalingParameters: &cloudsearch.ScalingParameters{ // Required + DesiredInstanceType: aws.String("PartitionInstanceType"), + DesiredPartitionCount: aws.Int64(1), + DesiredReplicationCount: aws.Int64(1), + }, + } + resp, err := svc.UpdateScalingParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearch_UpdateServiceAccessPolicies() { + svc := cloudsearch.New(session.New()) + + params := &cloudsearch.UpdateServiceAccessPoliciesInput{ + AccessPolicies: aws.String("PolicyDocument"), // Required + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.UpdateServiceAccessPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearch/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearch/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearch/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearch/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,94 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudsearch + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// You use the Amazon CloudSearch configuration service to create, configure, +// and manage search domains. Configuration service requests are submitted using +// the AWS Query protocol. AWS Query requests are HTTP or HTTPS requests submitted +// via HTTP GET or POST with a query parameter named Action. +// +// The endpoint for configuration service requests is region-specific: cloudsearch.region.amazonaws.com. +// For example, cloudsearch.us-east-1.amazonaws.com. For a current list of supported +// regions and endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#cloudsearch_region" +// target="_blank). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CloudSearch struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "cloudsearch" + +// New creates a new instance of the CloudSearch client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CloudSearch client from just a session. +// svc := cloudsearch.New(mySession) +// +// // Create a CloudSearch client with additional configuration +// svc := cloudsearch.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudSearch { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CloudSearch { + svc := &CloudSearch{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2013-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CloudSearch operation and runs any +// custom request initialization. +func (c *CloudSearch) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearchdomain/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearchdomain/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearchdomain/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearchdomain/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,784 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudsearchdomain provides a client for Amazon CloudSearch Domain. +package cloudsearchdomain + +import ( + "io" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opSearch = "Search" + +// SearchRequest generates a request for the Search operation. +func (c *CloudSearchDomain) SearchRequest(input *SearchInput) (req *request.Request, output *SearchOutput) { + op := &request.Operation{ + Name: opSearch, + HTTPMethod: "GET", + HTTPPath: "/2013-01-01/search?format=sdk&pretty=true", + } + + if input == nil { + input = &SearchInput{} + } + + req = c.newRequest(op, input, output) + output = &SearchOutput{} + req.Data = output + return +} + +// Retrieves a list of documents that match the specified search criteria. How +// you specify the search criteria depends on which query parser you use. Amazon +// CloudSearch supports four query parsers: +// +// simple: search all text and text-array fields for the specified string. +// Search for phrases, individual terms, and prefixes. structured: search +// specific fields, construct compound queries using Boolean operators, and +// use advanced features such as term boosting and proximity searching. lucene: +// specify search criteria using the Apache Lucene query parser syntax. dismax: +// specify search criteria using the simplified subset of the Apache Lucene +// query parser syntax defined by the DisMax query parser. For more information, +// see Searching Your Data (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/searching.html) +// in the Amazon CloudSearch Developer Guide. +// +// The endpoint for submitting Search requests is domain-specific. You submit +// search requests to a domain's search endpoint. To get the search endpoint +// for your domain, use the Amazon CloudSearch configuration service DescribeDomains +// action. A domain's endpoints are also displayed on the domain dashboard in +// the Amazon CloudSearch console. +func (c *CloudSearchDomain) Search(input *SearchInput) (*SearchOutput, error) { + req, out := c.SearchRequest(input) + err := req.Send() + return out, err +} + +const opSuggest = "Suggest" + +// SuggestRequest generates a request for the Suggest operation. +func (c *CloudSearchDomain) SuggestRequest(input *SuggestInput) (req *request.Request, output *SuggestOutput) { + op := &request.Operation{ + Name: opSuggest, + HTTPMethod: "GET", + HTTPPath: "/2013-01-01/suggest?format=sdk&pretty=true", + } + + if input == nil { + input = &SuggestInput{} + } + + req = c.newRequest(op, input, output) + output = &SuggestOutput{} + req.Data = output + return +} + +// Retrieves autocomplete suggestions for a partial query string. You can use +// suggestions enable you to display likely matches before users finish typing. +// In Amazon CloudSearch, suggestions are based on the contents of a particular +// text field. When you request suggestions, Amazon CloudSearch finds all of +// the documents whose values in the suggester field start with the specified +// query string. The beginning of the field must match the query string to be +// considered a match. +// +// For more information about configuring suggesters and retrieving suggestions, +// see Getting Suggestions (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/getting-suggestions.html) +// in the Amazon CloudSearch Developer Guide. +// +// The endpoint for submitting Suggest requests is domain-specific. You submit +// suggest requests to a domain's search endpoint. To get the search endpoint +// for your domain, use the Amazon CloudSearch configuration service DescribeDomains +// action. A domain's endpoints are also displayed on the domain dashboard in +// the Amazon CloudSearch console. +func (c *CloudSearchDomain) Suggest(input *SuggestInput) (*SuggestOutput, error) { + req, out := c.SuggestRequest(input) + err := req.Send() + return out, err +} + +const opUploadDocuments = "UploadDocuments" + +// UploadDocumentsRequest generates a request for the UploadDocuments operation. +func (c *CloudSearchDomain) UploadDocumentsRequest(input *UploadDocumentsInput) (req *request.Request, output *UploadDocumentsOutput) { + op := &request.Operation{ + Name: opUploadDocuments, + HTTPMethod: "POST", + HTTPPath: "/2013-01-01/documents/batch?format=sdk", + } + + if input == nil { + input = &UploadDocumentsInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadDocumentsOutput{} + req.Data = output + return +} + +// Posts a batch of documents to a search domain for indexing. A document batch +// is a collection of add and delete operations that represent the documents +// you want to add, update, or delete from your domain. Batches can be described +// in either JSON or XML. Each item that you want Amazon CloudSearch to return +// as a search result (such as a product) is represented as a document. Every +// document has a unique ID and one or more fields that contain the data that +// you want to search and return in results. Individual documents cannot contain +// more than 1 MB of data. The entire batch cannot exceed 5 MB. To get the best +// possible upload performance, group add and delete operations in batches that +// are close the 5 MB limit. Submitting a large volume of single-document batches +// can overload a domain's document service. +// +// The endpoint for submitting UploadDocuments requests is domain-specific. +// To get the document endpoint for your domain, use the Amazon CloudSearch +// configuration service DescribeDomains action. A domain's endpoints are also +// displayed on the domain dashboard in the Amazon CloudSearch console. +// +// For more information about formatting your data for Amazon CloudSearch, +// see Preparing Your Data (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/preparing-data.html) +// in the Amazon CloudSearch Developer Guide. For more information about uploading +// data for indexing, see Uploading Data (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/uploading-data.html) +// in the Amazon CloudSearch Developer Guide. +func (c *CloudSearchDomain) UploadDocuments(input *UploadDocumentsInput) (*UploadDocumentsOutput, error) { + req, out := c.UploadDocumentsRequest(input) + err := req.Send() + return out, err +} + +// A container for facet information. +type Bucket struct { + _ struct{} `type:"structure"` + + // The number of hits that contain the facet value in the specified facet field. + Count *int64 `locationName:"count" type:"long"` + + // The facet value being counted. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s Bucket) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Bucket) GoString() string { + return s.String() +} + +// A container for the calculated facet values and counts. +type BucketInfo struct { + _ struct{} `type:"structure"` + + // A list of the calculated facet values and counts. + Buckets []*Bucket `locationName:"buckets" type:"list"` +} + +// String returns the string representation +func (s BucketInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketInfo) GoString() string { + return s.String() +} + +// A warning returned by the document service when an issue is discovered while +// processing an upload request. +type DocumentServiceWarning struct { + _ struct{} `type:"structure"` + + // The description for a warning returned by the document service. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s DocumentServiceWarning) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DocumentServiceWarning) GoString() string { + return s.String() +} + +// Information about a document that matches the search request. +type Hit struct { + _ struct{} `type:"structure"` + + // The expressions returned from a document that matches the search request. + Exprs map[string]*string `locationName:"exprs" type:"map"` + + // The fields returned from a document that matches the search request. + Fields map[string][]*string `locationName:"fields" type:"map"` + + // The highlights returned from a document that matches the search request. + Highlights map[string]*string `locationName:"highlights" type:"map"` + + // The document ID of a document that matches the search request. + Id *string `locationName:"id" type:"string"` +} + +// String returns the string representation +func (s Hit) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Hit) GoString() string { + return s.String() +} + +// The collection of documents that match the search request. +type Hits struct { + _ struct{} `type:"structure"` + + // A cursor that can be used to retrieve the next set of matching documents + // when you want to page through a large result set. + Cursor *string `locationName:"cursor" type:"string"` + + // The total number of documents that match the search request. + Found *int64 `locationName:"found" type:"long"` + + // A document that matches the search request. + Hit []*Hit `locationName:"hit" type:"list"` + + // The index of the first matching document. + Start *int64 `locationName:"start" type:"long"` +} + +// String returns the string representation +func (s Hits) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Hits) GoString() string { + return s.String() +} + +// Container for the parameters to the Search request. +type SearchInput struct { + _ struct{} `type:"structure"` + + // Retrieves a cursor value you can use to page through large result sets. Use + // the size parameter to control the number of hits to include in each response. + // You can specify either the cursor or start parameter in a request; they are + // mutually exclusive. To get the first cursor, set the cursor value to initial. + // In subsequent requests, specify the cursor value returned in the hits section + // of the response. + // + // For more information, see Paginating Results (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/paginating-results.html) + // in the Amazon CloudSearch Developer Guide. + Cursor *string `location:"querystring" locationName:"cursor" type:"string"` + + // Defines one or more numeric expressions that can be used to sort results + // or specify search or filter criteria. You can also specify expressions as + // return fields. + // + // You specify the expressions in JSON using the form {"EXPRESSIONNAME":"EXPRESSION"}. + // You can define and use multiple expressions in a search request. For example: + // + // {"expression1":"_score*rating", "expression2":"(1/rank)*year"} + // + // For information about the variables, operators, and functions you can use + // in expressions, see Writing Expressions (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-expressions.html#writing-expressions) + // in the Amazon CloudSearch Developer Guide. + Expr *string `location:"querystring" locationName:"expr" type:"string"` + + // Specifies one or more fields for which to get facet information, and options + // that control how the facet information is returned. Each specified field + // must be facet-enabled in the domain configuration. The fields and options + // are specified in JSON using the form {"FIELD":{"OPTION":VALUE,"OPTION:"STRING"},"FIELD":{"OPTION":VALUE,"OPTION":"STRING"}}. + // + // You can specify the following faceting options: + // + // buckets specifies an array of the facet values or ranges to count. Ranges + // are specified using the same syntax that you use to search for a range of + // values. For more information, see Searching for a Range of Values (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/searching-ranges.html) + // in the Amazon CloudSearch Developer Guide. Buckets are returned in the order + // they are specified in the request. The sort and size options are not valid + // if you specify buckets. + // + // size specifies the maximum number of facets to include in the results. + // By default, Amazon CloudSearch returns counts for the top 10. The size parameter + // is only valid when you specify the sort option; it cannot be used in conjunction + // with buckets. + // + // sort specifies how you want to sort the facets in the results: bucket + // or count. Specify bucket to sort alphabetically or numerically by facet value + // (in ascending order). Specify count to sort by the facet counts computed + // for each facet value (in descending order). To retrieve facet counts for + // particular values or ranges of values, use the buckets option instead of + // sort. + // + // If no facet options are specified, facet counts are computed for all field + // values, the facets are sorted by facet count, and the top 10 facets are returned + // in the results. + // + // To count particular buckets of values, use the buckets option. For example, + // the following request uses the buckets option to calculate and return facet + // counts by decade. + // + // {"year":{"buckets":["[1970,1979]","[1980,1989]","[1990,1999]","[2000,2009]","[2010,}"]}} + // + // To sort facets by facet count, use the count option. For example, the following + // request sets the sort option to count to sort the facet values by facet count, + // with the facet values that have the most matching documents listed first. + // Setting the size option to 3 returns only the top three facet values. + // + // {"year":{"sort":"count","size":3}} + // + // To sort the facets by value, use the bucket option. For example, the following + // request sets the sort option to bucket to sort the facet values numerically + // by year, with earliest year listed first. + // + // {"year":{"sort":"bucket"}} + // + // For more information, see Getting and Using Facet Information (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/faceting.html) + // in the Amazon CloudSearch Developer Guide. + Facet *string `location:"querystring" locationName:"facet" type:"string"` + + // Specifies a structured query that filters the results of a search without + // affecting how the results are scored and sorted. You use filterQuery in conjunction + // with the query parameter to filter the documents that match the constraints + // specified in the query parameter. Specifying a filter controls only which + // matching documents are included in the results, it has no effect on how they + // are scored and sorted. The filterQuery parameter supports the full structured + // query syntax. + // + // For more information about using filters, see Filtering Matching Documents + // (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/filtering-results.html) + // in the Amazon CloudSearch Developer Guide. + FilterQuery *string `location:"querystring" locationName:"fq" type:"string"` + + // Retrieves highlights for matches in the specified text or text-array fields. + // Each specified field must be highlight enabled in the domain configuration. + // The fields and options are specified in JSON using the form {"FIELD":{"OPTION":VALUE,"OPTION:"STRING"},"FIELD":{"OPTION":VALUE,"OPTION":"STRING"}}. + // + // You can specify the following highlight options: + // + // format: specifies the format of the data in the text field: text or html. + // When data is returned as HTML, all non-alphanumeric characters are encoded. + // The default is html. max_phrases: specifies the maximum number of occurrences + // of the search term(s) you want to highlight. By default, the first occurrence + // is highlighted. pre_tag: specifies the string to prepend to an occurrence + // of a search term. The default for HTML highlights is <em>. The default + // for text highlights is *. post_tag: specifies the string to append to an + // occurrence of a search term. The default for HTML highlights is </em>. + // The default for text highlights is *. If no highlight options are specified + // for a field, the returned field text is treated as HTML and the first match + // is highlighted with emphasis tags: <em>search-term</em>. + // + // For example, the following request retrieves highlights for the actors and + // title fields. + // + // { "actors": {}, "title": {"format": "text","max_phrases": 2,"pre_tag": + // "","post_tag": ""} } + Highlight *string `location:"querystring" locationName:"highlight" type:"string"` + + // Enables partial results to be returned if one or more index partitions are + // unavailable. When your search index is partitioned across multiple search + // instances, by default Amazon CloudSearch only returns results if every partition + // can be queried. This means that the failure of a single search instance can + // result in 5xx (internal server) errors. When you enable partial results, + // Amazon CloudSearch returns whatever results are available and includes the + // percentage of documents searched in the search results (percent-searched). + // This enables you to more gracefully degrade your users' search experience. + // For example, rather than displaying no results, you could display the partial + // results and a message indicating that the results might be incomplete due + // to a temporary system outage. + Partial *bool `location:"querystring" locationName:"partial" type:"boolean"` + + // Specifies the search criteria for the request. How you specify the search + // criteria depends on the query parser used for the request and the parser + // options specified in the queryOptions parameter. By default, the simple query + // parser is used to process requests. To use the structured, lucene, or dismax + // query parser, you must also specify the queryParser parameter. + // + // For more information about specifying search criteria, see Searching Your + // Data (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/searching.html) + // in the Amazon CloudSearch Developer Guide. + Query *string `location:"querystring" locationName:"q" type:"string" required:"true"` + + // Configures options for the query parser specified in the queryParser parameter. + // You specify the options in JSON using the following form {"OPTION1":"VALUE1","OPTION2":VALUE2"..."OPTIONN":"VALUEN"}. + // + // The options you can configure vary according to which parser you use: + // + // defaultOperator: The default operator used to combine individual terms + // in the search string. For example: defaultOperator: 'or'. For the dismax + // parser, you specify a percentage that represents the percentage of terms + // in the search string (rounded down) that must match, rather than a default + // operator. A value of 0% is the equivalent to OR, and a value of 100% is equivalent + // to AND. The percentage must be specified as a value in the range 0-100 followed + // by the percent (%) symbol. For example, defaultOperator: 50%. Valid values: + // and, or, a percentage in the range 0%-100% (dismax). Default: and (simple, + // structured, lucene) or 100 (dismax). Valid for: simple, structured, lucene, + // and dismax. fields: An array of the fields to search when no fields are + // specified in a search. If no fields are specified in a search and this option + // is not specified, all text and text-array fields are searched. You can specify + // a weight for each field to control the relative importance of each field + // when Amazon CloudSearch calculates relevance scores. To specify a field weight, + // append a caret (^) symbol and the weight to the field name. For example, + // to boost the importance of the title field over the description field you + // could specify: "fields":["title^5","description"]. Valid values: The name + // of any configured field and an optional numeric value greater than zero. + // Default: All text and text-array fields. Valid for: simple, structured, lucene, + // and dismax. operators: An array of the operators or special characters you + // want to disable for the simple query parser. If you disable the and, or, + // or not operators, the corresponding operators (+, |, -) have no special meaning + // and are dropped from the search string. Similarly, disabling prefix disables + // the wildcard operator (*) and disabling phrase disables the ability to search + // for phrases by enclosing phrases in double quotes. Disabling precedence disables + // the ability to control order of precedence using parentheses. Disabling near + // disables the ability to use the ~ operator to perform a sloppy phrase search. + // Disabling the fuzzy operator disables the ability to use the ~ operator to + // perform a fuzzy search. escape disables the ability to use a backslash (\) + // to escape special characters within the search string. Disabling whitespace + // is an advanced option that prevents the parser from tokenizing on whitespace, + // which can be useful for Vietnamese. (It prevents Vietnamese words from being + // split incorrectly.) For example, you could disable all operators other than + // the phrase operator to support just simple term and phrase queries: "operators":["and","not","or", + // "prefix"]. Valid values: and, escape, fuzzy, near, not, or, phrase, precedence, + // prefix, whitespace. Default: All operators and special characters are enabled. + // Valid for: simple. phraseFields: An array of the text or text-array fields + // you want to use for phrase searches. When the terms in the search string + // appear in close proximity within a field, the field scores higher. You can + // specify a weight for each field to boost that score. The phraseSlop option + // controls how much the matches can deviate from the search string and still + // be boosted. To specify a field weight, append a caret (^) symbol and the + // weight to the field name. For example, to boost phrase matches in the title + // field over the abstract field, you could specify: "phraseFields":["title^3", + // "plot"] Valid values: The name of any text or text-array field and an optional + // numeric value greater than zero. Default: No fields. If you don't specify + // any fields with phraseFields, proximity scoring is disabled even if phraseSlop + // is specified. Valid for: dismax. phraseSlop: An integer value that specifies + // how much matches can deviate from the search phrase and still be boosted + // according to the weights specified in the phraseFields option; for example, + // phraseSlop: 2. You must also specify phraseFields to enable proximity scoring. + // Valid values: positive integers. Default: 0. Valid for: dismax. explicitPhraseSlop: + // An integer value that specifies how much a match can deviate from the search + // phrase when the phrase is enclosed in double quotes in the search string. + // (Phrases that exceed this proximity distance are not considered a match.) + // For example, to specify a slop of three for dismax phrase queries, you would + // specify "explicitPhraseSlop":3. Valid values: positive integers. Default: + // 0. Valid for: dismax. tieBreaker: When a term in the search string is found + // in a document's field, a score is calculated for that field based on how + // common the word is in that field compared to other documents. If the term + // occurs in multiple fields within a document, by default only the highest + // scoring field contributes to the document's overall score. You can specify + // a tieBreaker value to enable the matches in lower-scoring fields to contribute + // to the document's score. That way, if two documents have the same max field + // score for a particular term, the score for the document that has matches + // in more fields will be higher. The formula for calculating the score with + // a tieBreaker is (max field score) + (tieBreaker) * (sum of the scores for + // the rest of the matching fields). Set tieBreaker to 0 to disregard all but + // the highest scoring field (pure max): "tieBreaker":0. Set to 1 to sum the + // scores from all fields (pure sum): "tieBreaker":1. Valid values: 0.0 to 1.0. + // Default: 0.0. Valid for: dismax. + QueryOptions *string `location:"querystring" locationName:"q.options" type:"string"` + + // Specifies which query parser to use to process the request. If queryParser + // is not specified, Amazon CloudSearch uses the simple query parser. + // + // Amazon CloudSearch supports four query parsers: + // + // simple: perform simple searches of text and text-array fields. By default, + // the simple query parser searches all text and text-array fields. You can + // specify which fields to search by with the queryOptions parameter. If you + // prefix a search term with a plus sign (+) documents must contain the term + // to be considered a match. (This is the default, unless you configure the + // default operator with the queryOptions parameter.) You can use the - (NOT), + // | (OR), and * (wildcard) operators to exclude particular terms, find results + // that match any of the specified terms, or search for a prefix. To search + // for a phrase rather than individual terms, enclose the phrase in double quotes. + // For more information, see Searching for Text (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/searching-text.html) + // in the Amazon CloudSearch Developer Guide. structured: perform advanced + // searches by combining multiple expressions to define the search criteria. + // You can also search within particular fields, search for values and ranges + // of values, and use advanced options such as term boosting, matchall, and + // near. For more information, see Constructing Compound Queries (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/searching-compound-queries.html) + // in the Amazon CloudSearch Developer Guide. lucene: search using the Apache + // Lucene query parser syntax. For more information, see Apache Lucene Query + // Parser Syntax (http://lucene.apache.org/core/4_6_0/queryparser/org/apache/lucene/queryparser/classic/package-summary.html#package_description). + // dismax: search using the simplified subset of the Apache Lucene query parser + // syntax defined by the DisMax query parser. For more information, see DisMax + // Query Parser Syntax (http://wiki.apache.org/solr/DisMaxQParserPlugin#Query_Syntax). + QueryParser *string `location:"querystring" locationName:"q.parser" type:"string" enum:"QueryParser"` + + // Specifies the field and expression values to include in the response. Multiple + // fields or expressions are specified as a comma-separated list. By default, + // a search response includes all return enabled fields (_all_fields). To return + // only the document IDs for the matching documents, specify _no_fields. To + // retrieve the relevance score calculated for each document, specify _score. + Return *string `location:"querystring" locationName:"return" type:"string"` + + // Specifies the maximum number of search hits to include in the response. + Size *int64 `location:"querystring" locationName:"size" type:"long"` + + // Specifies the fields or custom expressions to use to sort the search results. + // Multiple fields or expressions are specified as a comma-separated list. You + // must specify the sort direction (asc or desc) for each field; for example, + // year desc,title asc. To use a field to sort results, the field must be sort-enabled + // in the domain configuration. Array type fields cannot be used for sorting. + // If no sort parameter is specified, results are sorted by their default relevance + // scores in descending order: _score desc. You can also sort by document ID + // (_id asc) and version (_version desc). + // + // For more information, see Sorting Results (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/sorting-results.html) + // in the Amazon CloudSearch Developer Guide. + Sort *string `location:"querystring" locationName:"sort" type:"string"` + + // Specifies the offset of the first search hit you want to return. Note that + // the result set is zero-based; the first result is at index 0. You can specify + // either the start or cursor parameter in a request, they are mutually exclusive. + // + // For more information, see Paginating Results (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/paginating-results.html) + // in the Amazon CloudSearch Developer Guide. + Start *int64 `location:"querystring" locationName:"start" type:"long"` +} + +// String returns the string representation +func (s SearchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SearchInput) GoString() string { + return s.String() +} + +// The result of a Search request. Contains the documents that match the specified +// search criteria and any requested fields, highlights, and facet information. +type SearchOutput struct { + _ struct{} `type:"structure"` + + // The requested facet information. + Facets map[string]*BucketInfo `locationName:"facets" type:"map"` + + // The documents that match the search criteria. + Hits *Hits `locationName:"hits" type:"structure"` + + // The status information returned for the search request. + Status *SearchStatus `locationName:"status" type:"structure"` +} + +// String returns the string representation +func (s SearchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SearchOutput) GoString() string { + return s.String() +} + +// Contains the resource id (rid) and the time it took to process the request +// (timems). +type SearchStatus struct { + _ struct{} `type:"structure"` + + // The encrypted resource ID for the request. + Rid *string `locationName:"rid" type:"string"` + + // How long it took to process the request, in milliseconds. + Timems *int64 `locationName:"timems" type:"long"` +} + +// String returns the string representation +func (s SearchStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SearchStatus) GoString() string { + return s.String() +} + +// Container for the parameters to the Suggest request. +type SuggestInput struct { + _ struct{} `type:"structure"` + + // Specifies the string for which you want to get suggestions. + Query *string `location:"querystring" locationName:"q" type:"string" required:"true"` + + // Specifies the maximum number of suggestions to return. + Size *int64 `location:"querystring" locationName:"size" type:"long"` + + // Specifies the name of the suggester to use to find suggested matches. + Suggester *string `location:"querystring" locationName:"suggester" type:"string" required:"true"` +} + +// String returns the string representation +func (s SuggestInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SuggestInput) GoString() string { + return s.String() +} + +// Container for the suggestion information returned in a SuggestResponse. +type SuggestModel struct { + _ struct{} `type:"structure"` + + // The number of documents that were found to match the query string. + Found *int64 `locationName:"found" type:"long"` + + // The query string specified in the suggest request. + Query *string `locationName:"query" type:"string"` + + // The documents that match the query string. + Suggestions []*SuggestionMatch `locationName:"suggestions" type:"list"` +} + +// String returns the string representation +func (s SuggestModel) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SuggestModel) GoString() string { + return s.String() +} + +// Contains the response to a Suggest request. +type SuggestOutput struct { + _ struct{} `type:"structure"` + + // The status of a SuggestRequest. Contains the resource ID (rid) and how long + // it took to process the request (timems). + Status *SuggestStatus `locationName:"status" type:"structure"` + + // Container for the matching search suggestion information. + Suggest *SuggestModel `locationName:"suggest" type:"structure"` +} + +// String returns the string representation +func (s SuggestOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SuggestOutput) GoString() string { + return s.String() +} + +// Contains the resource id (rid) and the time it took to process the request +// (timems). +type SuggestStatus struct { + _ struct{} `type:"structure"` + + // The encrypted resource ID for the request. + Rid *string `locationName:"rid" type:"string"` + + // How long it took to process the request, in milliseconds. + Timems *int64 `locationName:"timems" type:"long"` +} + +// String returns the string representation +func (s SuggestStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SuggestStatus) GoString() string { + return s.String() +} + +// An autocomplete suggestion that matches the query string specified in a SuggestRequest. +type SuggestionMatch struct { + _ struct{} `type:"structure"` + + // The document ID of the suggested document. + Id *string `locationName:"id" type:"string"` + + // The relevance score of a suggested match. + Score *int64 `locationName:"score" type:"long"` + + // The string that matches the query string specified in the SuggestRequest. + Suggestion *string `locationName:"suggestion" type:"string"` +} + +// String returns the string representation +func (s SuggestionMatch) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SuggestionMatch) GoString() string { + return s.String() +} + +// Container for the parameters to the UploadDocuments request. +type UploadDocumentsInput struct { + _ struct{} `type:"structure" payload:"Documents"` + + // The format of the batch you are uploading. Amazon CloudSearch supports two + // document batch formats: + // + // application/json application/xml + ContentType *string `location:"header" locationName:"Content-Type" type:"string" required:"true" enum:"ContentType"` + + // A batch of documents formatted in JSON or HTML. + Documents io.ReadSeeker `locationName:"documents" type:"blob" required:"true"` +} + +// String returns the string representation +func (s UploadDocumentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadDocumentsInput) GoString() string { + return s.String() +} + +// Contains the response to an UploadDocuments request. +type UploadDocumentsOutput struct { + _ struct{} `type:"structure"` + + // The number of documents that were added to the search domain. + Adds *int64 `locationName:"adds" type:"long"` + + // The number of documents that were deleted from the search domain. + Deletes *int64 `locationName:"deletes" type:"long"` + + // The status of an UploadDocumentsRequest. + Status *string `locationName:"status" type:"string"` + + // Any warnings returned by the document service about the documents being uploaded. + Warnings []*DocumentServiceWarning `locationName:"warnings" type:"list"` +} + +// String returns the string representation +func (s UploadDocumentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadDocumentsOutput) GoString() string { + return s.String() +} + +const ( + // @enum ContentType + ContentTypeApplicationJson = "application/json" + // @enum ContentType + ContentTypeApplicationXml = "application/xml" +) + +const ( + // @enum QueryParser + QueryParserSimple = "simple" + // @enum QueryParser + QueryParserStructured = "structured" + // @enum QueryParser + QueryParserLucene = "lucene" + // @enum QueryParser + QueryParserDismax = "dismax" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearchdomain/cloudsearchdomainiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearchdomain/cloudsearchdomainiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearchdomain/cloudsearchdomainiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearchdomain/cloudsearchdomainiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,26 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudsearchdomainiface provides an interface for the Amazon CloudSearch Domain. +package cloudsearchdomainiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudsearchdomain" +) + +// CloudSearchDomainAPI is the interface type for cloudsearchdomain.CloudSearchDomain. +type CloudSearchDomainAPI interface { + SearchRequest(*cloudsearchdomain.SearchInput) (*request.Request, *cloudsearchdomain.SearchOutput) + + Search(*cloudsearchdomain.SearchInput) (*cloudsearchdomain.SearchOutput, error) + + SuggestRequest(*cloudsearchdomain.SuggestInput) (*request.Request, *cloudsearchdomain.SuggestOutput) + + Suggest(*cloudsearchdomain.SuggestInput) (*cloudsearchdomain.SuggestOutput, error) + + UploadDocumentsRequest(*cloudsearchdomain.UploadDocumentsInput) (*request.Request, *cloudsearchdomain.UploadDocumentsOutput) + + UploadDocuments(*cloudsearchdomain.UploadDocumentsInput) (*cloudsearchdomain.UploadDocumentsOutput, error) +} + +var _ CloudSearchDomainAPI = (*cloudsearchdomain.CloudSearchDomain)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearchdomain/customizations_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearchdomain/customizations_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearchdomain/customizations_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearchdomain/customizations_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,50 @@ +package cloudsearchdomain_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/cloudsearchdomain" +) + +func TestRequireEndpointIfRegionProvided(t *testing.T) { + svc := cloudsearchdomain.New(unit.Session, &aws.Config{ + Region: aws.String("mock-region"), + DisableParamValidation: aws.Bool(true), + }) + req, _ := svc.SearchRequest(nil) + err := req.Build() + + assert.Equal(t, "", svc.Endpoint) + assert.Error(t, err) + assert.Equal(t, aws.ErrMissingEndpoint, err) +} + +func TestRequireEndpointIfNoRegionProvided(t *testing.T) { + svc := cloudsearchdomain.New(unit.Session, &aws.Config{ + Region: aws.String(""), + DisableParamValidation: aws.Bool(true), + }) + req, _ := svc.SearchRequest(nil) + err := req.Build() + + assert.Equal(t, "", svc.Endpoint) + assert.Error(t, err) + assert.Equal(t, aws.ErrMissingEndpoint, err) +} + +func TestRequireEndpointUsed(t *testing.T) { + svc := cloudsearchdomain.New(unit.Session, &aws.Config{ + Region: aws.String("mock-region"), + DisableParamValidation: aws.Bool(true), + Endpoint: aws.String("https://endpoint"), + }) + req, _ := svc.SearchRequest(nil) + err := req.Build() + + assert.Equal(t, "https://endpoint", svc.Endpoint) + assert.NoError(t, err) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearchdomain/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearchdomain/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearchdomain/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearchdomain/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,88 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudsearchdomain_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudsearchdomain" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCloudSearchDomain_Search() { + svc := cloudsearchdomain.New(session.New()) + + params := &cloudsearchdomain.SearchInput{ + Query: aws.String("Query"), // Required + Cursor: aws.String("Cursor"), + Expr: aws.String("Expr"), + Facet: aws.String("Facet"), + FilterQuery: aws.String("FilterQuery"), + Highlight: aws.String("Highlight"), + Partial: aws.Bool(true), + QueryOptions: aws.String("QueryOptions"), + QueryParser: aws.String("QueryParser"), + Return: aws.String("Return"), + Size: aws.Int64(1), + Sort: aws.String("Sort"), + Start: aws.Int64(1), + } + resp, err := svc.Search(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearchDomain_Suggest() { + svc := cloudsearchdomain.New(session.New()) + + params := &cloudsearchdomain.SuggestInput{ + Query: aws.String("Query"), // Required + Suggester: aws.String("Suggester"), // Required + Size: aws.Int64(1), + } + resp, err := svc.Suggest(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudSearchDomain_UploadDocuments() { + svc := cloudsearchdomain.New(session.New()) + + params := &cloudsearchdomain.UploadDocumentsInput{ + ContentType: aws.String("ContentType"), // Required + Documents: bytes.NewReader([]byte("PAYLOAD")), // Required + } + resp, err := svc.UploadDocuments(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearchdomain/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearchdomain/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearchdomain/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudsearchdomain/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,96 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudsearchdomain + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restjson" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// You use the AmazonCloudSearch2013 API to upload documents to a search domain +// and search those documents. +// +// The endpoints for submitting UploadDocuments, Search, and Suggest requests +// are domain-specific. To get the endpoints for your domain, use the Amazon +// CloudSearch configuration service DescribeDomains action. The domain endpoints +// are also displayed on the domain dashboard in the Amazon CloudSearch console. +// You submit suggest requests to the search endpoint. +// +// For more information, see the Amazon CloudSearch Developer Guide (http://docs.aws.amazon.com/cloudsearch/latest/developerguide). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CloudSearchDomain struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "cloudsearchdomain" + +// New creates a new instance of the CloudSearchDomain client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CloudSearchDomain client from just a session. +// svc := cloudsearchdomain.New(mySession) +// +// // Create a CloudSearchDomain client with additional configuration +// svc := cloudsearchdomain.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudSearchDomain { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CloudSearchDomain { + svc := &CloudSearchDomain{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: "cloudsearch", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2013-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CloudSearchDomain operation and runs any +// custom request initialization. +func (c *CloudSearchDomain) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudtrail/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudtrail/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudtrail/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudtrail/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1409 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudtrail provides a client for AWS CloudTrail. +package cloudtrail + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAddTags = "AddTags" + +// AddTagsRequest generates a request for the AddTags operation. +func (c *CloudTrail) AddTagsRequest(input *AddTagsInput) (req *request.Request, output *AddTagsOutput) { + op := &request.Operation{ + Name: opAddTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &AddTagsOutput{} + req.Data = output + return +} + +// Adds one or more tags to a trail, up to a limit of 10. Tags must be unique +// per trail. Overwrites an existing tag's value when a new value is specified +// for an existing tag key. If you specify a key without a value, the tag will +// be created with the specified key and a value of null. You can tag a trail +// that applies to all regions only from the region in which the trail was created +// (that is, from its home region). +func (c *CloudTrail) AddTags(input *AddTagsInput) (*AddTagsOutput, error) { + req, out := c.AddTagsRequest(input) + err := req.Send() + return out, err +} + +const opCreateTrail = "CreateTrail" + +// CreateTrailRequest generates a request for the CreateTrail operation. +func (c *CloudTrail) CreateTrailRequest(input *CreateTrailInput) (req *request.Request, output *CreateTrailOutput) { + op := &request.Operation{ + Name: opCreateTrail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTrailInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTrailOutput{} + req.Data = output + return +} + +// Creates a trail that specifies the settings for delivery of log data to an +// Amazon S3 bucket. A maximum of five trails can exist in a region, irrespective +// of the region in which they were created. +func (c *CloudTrail) CreateTrail(input *CreateTrailInput) (*CreateTrailOutput, error) { + req, out := c.CreateTrailRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTrail = "DeleteTrail" + +// DeleteTrailRequest generates a request for the DeleteTrail operation. +func (c *CloudTrail) DeleteTrailRequest(input *DeleteTrailInput) (req *request.Request, output *DeleteTrailOutput) { + op := &request.Operation{ + Name: opDeleteTrail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTrailInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTrailOutput{} + req.Data = output + return +} + +// Deletes a trail. This operation must be called from the region in which the +// trail was created. DeleteTrail cannot be called on the shadow trails (replicated +// trails in other regions) of a trail that is enabled in all regions. +func (c *CloudTrail) DeleteTrail(input *DeleteTrailInput) (*DeleteTrailOutput, error) { + req, out := c.DeleteTrailRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTrails = "DescribeTrails" + +// DescribeTrailsRequest generates a request for the DescribeTrails operation. +func (c *CloudTrail) DescribeTrailsRequest(input *DescribeTrailsInput) (req *request.Request, output *DescribeTrailsOutput) { + op := &request.Operation{ + Name: opDescribeTrails, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTrailsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTrailsOutput{} + req.Data = output + return +} + +// Retrieves settings for the trail associated with the current region for your +// account. +func (c *CloudTrail) DescribeTrails(input *DescribeTrailsInput) (*DescribeTrailsOutput, error) { + req, out := c.DescribeTrailsRequest(input) + err := req.Send() + return out, err +} + +const opGetTrailStatus = "GetTrailStatus" + +// GetTrailStatusRequest generates a request for the GetTrailStatus operation. +func (c *CloudTrail) GetTrailStatusRequest(input *GetTrailStatusInput) (req *request.Request, output *GetTrailStatusOutput) { + op := &request.Operation{ + Name: opGetTrailStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetTrailStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTrailStatusOutput{} + req.Data = output + return +} + +// Returns a JSON-formatted list of information about the specified trail. Fields +// include information on delivery errors, Amazon SNS and Amazon S3 errors, +// and start and stop logging times for each trail. This operation returns trail +// status from a single region. To return trail status from all regions, you +// must call the operation on each region. +func (c *CloudTrail) GetTrailStatus(input *GetTrailStatusInput) (*GetTrailStatusOutput, error) { + req, out := c.GetTrailStatusRequest(input) + err := req.Send() + return out, err +} + +const opListPublicKeys = "ListPublicKeys" + +// ListPublicKeysRequest generates a request for the ListPublicKeys operation. +func (c *CloudTrail) ListPublicKeysRequest(input *ListPublicKeysInput) (req *request.Request, output *ListPublicKeysOutput) { + op := &request.Operation{ + Name: opListPublicKeys, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListPublicKeysInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPublicKeysOutput{} + req.Data = output + return +} + +// Returns all public keys whose private keys were used to sign the digest files +// within the specified time range. The public key is needed to validate digest +// files that were signed with its corresponding private key. +// +// CloudTrail uses different private/public key pairs per region. Each digest +// file is signed with a private key unique to its region. Therefore, when you +// validate a digest file from a particular region, you must look in the same +// region for its corresponding public key. +func (c *CloudTrail) ListPublicKeys(input *ListPublicKeysInput) (*ListPublicKeysOutput, error) { + req, out := c.ListPublicKeysRequest(input) + err := req.Send() + return out, err +} + +const opListTags = "ListTags" + +// ListTagsRequest generates a request for the ListTags operation. +func (c *CloudTrail) ListTagsRequest(input *ListTagsInput) (req *request.Request, output *ListTagsOutput) { + op := &request.Operation{ + Name: opListTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsOutput{} + req.Data = output + return +} + +// Lists the tags for the specified trail or trails in the current region. +// +// Lists the tags for the trail in the current region. +func (c *CloudTrail) ListTags(input *ListTagsInput) (*ListTagsOutput, error) { + req, out := c.ListTagsRequest(input) + err := req.Send() + return out, err +} + +const opLookupEvents = "LookupEvents" + +// LookupEventsRequest generates a request for the LookupEvents operation. +func (c *CloudTrail) LookupEventsRequest(input *LookupEventsInput) (req *request.Request, output *LookupEventsOutput) { + op := &request.Operation{ + Name: opLookupEvents, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &LookupEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &LookupEventsOutput{} + req.Data = output + return +} + +// Looks up API activity events captured by CloudTrail that create, update, +// or delete resources in your account. Events for a region can be looked up +// for the times in which you had CloudTrail turned on in that region during +// the last seven days. Lookup supports five different attributes: time range +// (defined by a start time and end time), user name, event name, resource type, +// and resource name. All attributes are optional. The maximum number of attributes +// that can be specified in any one lookup request are time range and one other +// attribute. The default number of results returned is 10, with a maximum of +// 50 possible. The response includes a token that you can use to get the next +// page of results. +// +// The rate of lookup requests is limited to one per second per account. If +// this limit is exceeded, a throttling error occurs. Events that occurred +// during the selected time range will not be available for lookup if CloudTrail +// logging was not enabled when the events occurred. +func (c *CloudTrail) LookupEvents(input *LookupEventsInput) (*LookupEventsOutput, error) { + req, out := c.LookupEventsRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTags = "RemoveTags" + +// RemoveTagsRequest generates a request for the RemoveTags operation. +func (c *CloudTrail) RemoveTagsRequest(input *RemoveTagsInput) (req *request.Request, output *RemoveTagsOutput) { + op := &request.Operation{ + Name: opRemoveTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveTagsOutput{} + req.Data = output + return +} + +// Removes the specified tags from a trail. +func (c *CloudTrail) RemoveTags(input *RemoveTagsInput) (*RemoveTagsOutput, error) { + req, out := c.RemoveTagsRequest(input) + err := req.Send() + return out, err +} + +const opStartLogging = "StartLogging" + +// StartLoggingRequest generates a request for the StartLogging operation. +func (c *CloudTrail) StartLoggingRequest(input *StartLoggingInput) (req *request.Request, output *StartLoggingOutput) { + op := &request.Operation{ + Name: opStartLogging, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartLoggingInput{} + } + + req = c.newRequest(op, input, output) + output = &StartLoggingOutput{} + req.Data = output + return +} + +// Starts the recording of AWS API calls and log file delivery for a trail. +// For a trail that is enabled in all regions, this operation must be called +// from the region in which the trail was created. This operation cannot be +// called on the shadow trails (replicated trails in other regions) of a trail +// that is enabled in all regions. +func (c *CloudTrail) StartLogging(input *StartLoggingInput) (*StartLoggingOutput, error) { + req, out := c.StartLoggingRequest(input) + err := req.Send() + return out, err +} + +const opStopLogging = "StopLogging" + +// StopLoggingRequest generates a request for the StopLogging operation. +func (c *CloudTrail) StopLoggingRequest(input *StopLoggingInput) (req *request.Request, output *StopLoggingOutput) { + op := &request.Operation{ + Name: opStopLogging, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopLoggingInput{} + } + + req = c.newRequest(op, input, output) + output = &StopLoggingOutput{} + req.Data = output + return +} + +// Suspends the recording of AWS API calls and log file delivery for the specified +// trail. Under most circumstances, there is no need to use this action. You +// can update a trail without stopping it first. This action is the only way +// to stop recording. For a trail enabled in all regions, this operation must +// be called from the region in which the trail was created, or an InvalidHomeRegionException +// will occur. This operation cannot be called on the shadow trails (replicated +// trails in other regions) of a trail enabled in all regions. +func (c *CloudTrail) StopLogging(input *StopLoggingInput) (*StopLoggingOutput, error) { + req, out := c.StopLoggingRequest(input) + err := req.Send() + return out, err +} + +const opUpdateTrail = "UpdateTrail" + +// UpdateTrailRequest generates a request for the UpdateTrail operation. +func (c *CloudTrail) UpdateTrailRequest(input *UpdateTrailInput) (req *request.Request, output *UpdateTrailOutput) { + op := &request.Operation{ + Name: opUpdateTrail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateTrailInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateTrailOutput{} + req.Data = output + return +} + +// Updates the settings that specify delivery of log files. Changes to a trail +// do not require stopping the CloudTrail service. Use this action to designate +// an existing bucket for log delivery. If the existing bucket has previously +// been a target for CloudTrail log files, an IAM policy exists for the bucket. +// UpdateTrail must be called from the region in which the trail was created; +// otherwise, an InvalidHomeRegionException is thrown. +func (c *CloudTrail) UpdateTrail(input *UpdateTrailInput) (*UpdateTrailOutput, error) { + req, out := c.UpdateTrailRequest(input) + err := req.Send() + return out, err +} + +// Specifies the tags to add to a trail. +type AddTagsInput struct { + _ struct{} `type:"structure"` + + // Specifies the ARN of the trail to which one or more tags will be added. The + // format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + ResourceId *string `type:"string" required:"true"` + + // Contains a list of CloudTrail tags, up to a limit of 10. + TagsList []*Tag `type:"list"` +} + +// String returns the string representation +func (s AddTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsInput) GoString() string { + return s.String() +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type AddTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsOutput) GoString() string { + return s.String() +} + +// Specifies the settings for each trail. +type CreateTrailInput struct { + _ struct{} `type:"structure"` + + // Specifies a log group name using an Amazon Resource Name (ARN), a unique + // identifier that represents the log group to which CloudTrail logs will be + // delivered. Not required unless you specify CloudWatchLogsRoleArn. + CloudWatchLogsLogGroupArn *string `type:"string"` + + // Specifies the role for the CloudWatch Logs endpoint to assume to write to + // a user's log group. + CloudWatchLogsRoleArn *string `type:"string"` + + // Specifies whether log file integrity validation is enabled. The default is + // false. + // + // When you disable log file integrity validation, the chain of digest files + // is broken after one hour. CloudTrail will not create digest files for log + // files that were delivered during a period in which log file integrity validation + // was disabled. For example, if you enable log file integrity validation at + // noon on January 1, disable it at noon on January 2, and re-enable it at noon + // on January 10, digest files will not be created for the log files delivered + // from noon on January 2 to noon on January 10. The same applies whenever you + // stop CloudTrail logging or delete a trail. + EnableLogFileValidation *bool `type:"boolean"` + + // Specifies whether the trail is publishing events from global services such + // as IAM to the log files. + IncludeGlobalServiceEvents *bool `type:"boolean"` + + // Specifies whether the trail is created in the current region or in all regions. + // The default is false. + IsMultiRegionTrail *bool `type:"boolean"` + + // Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. + // The value can be a an alias name prefixed by "alias/", a fully specified + // ARN to an alias, a fully specified ARN to a key, or a globally unique identifier. + // + // Examples: + // + // alias/MyAliasName arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // 12345678-1234-1234-1234-123456789012 + KmsKeyId *string `type:"string"` + + // Specifies the name of the trail. The name must meet the following requirements: + // + // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-) Start with a letter or number, and end with a letter or + // number Be between 3 and 128 characters Have no adjacent periods, underscores + // or dashes. Names like my-_namespace and my--namespace are invalid. Not be + // in IP address format (for example, 192.168.5.4) + Name *string `type:"string" required:"true"` + + // Specifies the name of the Amazon S3 bucket designated for publishing log + // files. See Amazon S3 Bucket Naming Requirements (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/create_trail_naming_policy.html). + S3BucketName *string `type:"string" required:"true"` + + // Specifies the Amazon S3 key prefix that comes after the name of the bucket + // you have designated for log file delivery. For more information, see Finding + // Your CloudTrail Log Files (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html). + // The maximum length is 200 characters. + S3KeyPrefix *string `type:"string"` + + // Specifies the name of the Amazon SNS topic defined for notification of log + // file delivery. The maximum length is 256 characters. + SnsTopicName *string `type:"string"` +} + +// String returns the string representation +func (s CreateTrailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrailInput) GoString() string { + return s.String() +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type CreateTrailOutput struct { + _ struct{} `type:"structure"` + + // Specifies the Amazon Resource Name (ARN) of the log group to which CloudTrail + // logs will be delivered. + CloudWatchLogsLogGroupArn *string `type:"string"` + + // Specifies the role for the CloudWatch Logs endpoint to assume to write to + // a user's log group. + CloudWatchLogsRoleArn *string `type:"string"` + + // Specifies whether the trail is publishing events from global services such + // as IAM to the log files. + IncludeGlobalServiceEvents *bool `type:"boolean"` + + // Specifies whether the trail exists in one region or in all regions. + IsMultiRegionTrail *bool `type:"boolean"` + + // Specifies the KMS key ID that encrypts the logs delivered by CloudTrail. + // The value is a fully specified ARN to a KMS key in the format: + // + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + KmsKeyId *string `type:"string"` + + // Specifies whether log file integrity validation is enabled. + LogFileValidationEnabled *bool `type:"boolean"` + + // Specifies the name of the trail. + Name *string `type:"string"` + + // Specifies the name of the Amazon S3 bucket designated for publishing log + // files. + S3BucketName *string `type:"string"` + + // Specifies the Amazon S3 key prefix that comes after the name of the bucket + // you have designated for log file delivery. For more information, see Finding + // Your CloudTrail Log Files (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html). + S3KeyPrefix *string `type:"string"` + + // Specifies the name of the Amazon SNS topic defined for notification of log + // file delivery. + SnsTopicName *string `type:"string"` + + // Specifies the ARN of the trail that was created. + TrailARN *string `type:"string"` +} + +// String returns the string representation +func (s CreateTrailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrailOutput) GoString() string { + return s.String() +} + +// The request that specifies the name of a trail to delete. +type DeleteTrailInput struct { + _ struct{} `type:"structure"` + + // Specifies the name or the CloudTrail ARN of the trail to be deleted. The + // format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTrailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrailInput) GoString() string { + return s.String() +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type DeleteTrailOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTrailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrailOutput) GoString() string { + return s.String() +} + +// Returns information about the trail. +type DescribeTrailsInput struct { + _ struct{} `type:"structure"` + + // Specifies whether to include shadow trails in the response. A shadow trail + // is the replication in a region of a trail that was created in a different + // region. The default is true. + IncludeShadowTrails *bool `locationName:"includeShadowTrails" type:"boolean"` + + // Specifies a list of trail names, trail ARNs, or both, of the trails to describe. + // The format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + // If an empty list is specified, information for the trail in the current region + // is returned. + // + // If an empty list is specified and IncludeShadowTrails is false, then information + // for all trails in the current region is returned. If an empty list is specified + // and IncludeShadowTrails is null or true, then information for all trails + // in the current region and any associated shadow trails in other regions is + // returned. If one or more trail names are specified, information is returned + // only if the names match the names of trails belonging only to the current + // region. To return information about a trail in another region, you must specify + // its trail ARN. + TrailNameList []*string `locationName:"trailNameList" type:"list"` +} + +// String returns the string representation +func (s DescribeTrailsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrailsInput) GoString() string { + return s.String() +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type DescribeTrailsOutput struct { + _ struct{} `type:"structure"` + + // The list of trail objects. + TrailList []*Trail `locationName:"trailList" type:"list"` +} + +// String returns the string representation +func (s DescribeTrailsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrailsOutput) GoString() string { + return s.String() +} + +// Contains information about an event that was returned by a lookup request. +// The result includes a representation of a CloudTrail event. +type Event struct { + _ struct{} `type:"structure"` + + // A JSON string that contains a representation of the event returned. + CloudTrailEvent *string `type:"string"` + + // The CloudTrail ID of the event returned. + EventId *string `type:"string"` + + // The name of the event returned. + EventName *string `type:"string"` + + // The date and time of the event returned. + EventTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A list of resources referenced by the event returned. + Resources []*Resource `type:"list"` + + // A user name or role name of the requester that called the API in the event + // returned. + Username *string `type:"string"` +} + +// String returns the string representation +func (s Event) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Event) GoString() string { + return s.String() +} + +// The name of a trail about which you want the current status. +type GetTrailStatusInput struct { + _ struct{} `type:"structure"` + + // Specifies the name or the CloudTrail ARN of the trail for which you are requesting + // status. To get the status of a shadow trail (a replication of the trail in + // another region), you must specify its ARN. The format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTrailStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrailStatusInput) GoString() string { + return s.String() +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type GetTrailStatusOutput struct { + _ struct{} `type:"structure"` + + // Whether the CloudTrail is currently logging AWS API calls. + IsLogging *bool `type:"boolean"` + + // Displays any CloudWatch Logs error that CloudTrail encountered when attempting + // to deliver logs to CloudWatch Logs. + LatestCloudWatchLogsDeliveryError *string `type:"string"` + + // Displays the most recent date and time when CloudTrail delivered logs to + // CloudWatch Logs. + LatestCloudWatchLogsDeliveryTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // This field is deprecated. + LatestDeliveryAttemptSucceeded *string `type:"string"` + + // This field is deprecated. + LatestDeliveryAttemptTime *string `type:"string"` + + // Displays any Amazon S3 error that CloudTrail encountered when attempting + // to deliver log files to the designated bucket. For more information see the + // topic Error Responses (http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) + // in the Amazon S3 API Reference. + // + // This error occurs only when there is a problem with the destination S3 bucket + // and will not occur for timeouts. To resolve the issue, create a new bucket + // and call UpdateTrail to specify the new bucket, or fix the existing objects + // so that CloudTrail can again write to the bucket. + LatestDeliveryError *string `type:"string"` + + // Specifies the date and time that CloudTrail last delivered log files to an + // account's Amazon S3 bucket. + LatestDeliveryTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Displays any Amazon S3 error that CloudTrail encountered when attempting + // to deliver a digest file to the designated bucket. For more information see + // the topic Error Responses (http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) + // in the Amazon S3 API Reference. + // + // This error occurs only when there is a problem with the destination S3 bucket + // and will not occur for timeouts. To resolve the issue, create a new bucket + // and call UpdateTrail to specify the new bucket, or fix the existing objects + // so that CloudTrail can again write to the bucket. + LatestDigestDeliveryError *string `type:"string"` + + // Specifies the date and time that CloudTrail last delivered a digest file + // to an account's Amazon S3 bucket. + LatestDigestDeliveryTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // This field is deprecated. + LatestNotificationAttemptSucceeded *string `type:"string"` + + // This field is deprecated. + LatestNotificationAttemptTime *string `type:"string"` + + // Displays any Amazon SNS error that CloudTrail encountered when attempting + // to send a notification. For more information about Amazon SNS errors, see + // the Amazon SNS Developer Guide (http://docs.aws.amazon.com/sns/latest/dg/welcome.html). + LatestNotificationError *string `type:"string"` + + // Specifies the date and time of the most recent Amazon SNS notification that + // CloudTrail has written a new log file to an account's Amazon S3 bucket. + LatestNotificationTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Specifies the most recent date and time when CloudTrail started recording + // API calls for an AWS account. + StartLoggingTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Specifies the most recent date and time when CloudTrail stopped recording + // API calls for an AWS account. + StopLoggingTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // This field is deprecated. + TimeLoggingStarted *string `type:"string"` + + // This field is deprecated. + TimeLoggingStopped *string `type:"string"` +} + +// String returns the string representation +func (s GetTrailStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrailStatusOutput) GoString() string { + return s.String() +} + +// Requests the public keys for a specified time range. +type ListPublicKeysInput struct { + _ struct{} `type:"structure"` + + // Optionally specifies, in UTC, the end of the time range to look up public + // keys for CloudTrail digest files. If not specified, the current time is used. + EndTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Reserved for future use. + NextToken *string `type:"string"` + + // Optionally specifies, in UTC, the start of the time range to look up public + // keys for CloudTrail digest files. If not specified, the current time is used, + // and the current public key is returned. + StartTime *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s ListPublicKeysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPublicKeysInput) GoString() string { + return s.String() +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type ListPublicKeysOutput struct { + _ struct{} `type:"structure"` + + // Reserved for future use. + NextToken *string `type:"string"` + + // Contains an array of PublicKey objects. + // + // The returned public keys may have validity time ranges that overlap. + PublicKeyList []*PublicKey `type:"list"` +} + +// String returns the string representation +func (s ListPublicKeysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPublicKeysOutput) GoString() string { + return s.String() +} + +// Specifies a list of trail tags to return. +type ListTagsInput struct { + _ struct{} `type:"structure"` + + // Reserved for future use. + NextToken *string `type:"string"` + + // Specifies a list of trail ARNs whose tags will be listed. The list has a + // limit of 20 ARNs. The format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + ResourceIdList []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsInput) GoString() string { + return s.String() +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type ListTagsOutput struct { + _ struct{} `type:"structure"` + + // Reserved for future use. + NextToken *string `type:"string"` + + // A list of resource tags. + ResourceTagList []*ResourceTag `type:"list"` +} + +// String returns the string representation +func (s ListTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsOutput) GoString() string { + return s.String() +} + +// Specifies an attribute and value that filter the events returned. +type LookupAttribute struct { + _ struct{} `type:"structure"` + + // Specifies an attribute on which to filter the events returned. + AttributeKey *string `type:"string" required:"true" enum:"LookupAttributeKey"` + + // Specifies a value for the specified AttributeKey. + AttributeValue *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s LookupAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LookupAttribute) GoString() string { + return s.String() +} + +// Contains a request for LookupEvents. +type LookupEventsInput struct { + _ struct{} `type:"structure"` + + // Specifies that only events that occur before or at the specified time are + // returned. If the specified end time is before the specified start time, an + // error is returned. + EndTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Contains a list of lookup attributes. Currently the list can contain only + // one item. + LookupAttributes []*LookupAttribute `type:"list"` + + // The number of events to return. Possible values are 1 through 50. The default + // is 10. + MaxResults *int64 `min:"1" type:"integer"` + + // The token to use to get the next page of results after a previous API call. + // This token must be passed in with the same parameters that were specified + // in the the original call. For example, if the original call specified an + // AttributeKey of 'Username' with a value of 'root', the call with NextToken + // should include those same parameters. + NextToken *string `type:"string"` + + // Specifies that only events that occur after or at the specified time are + // returned. If the specified start time is after the specified end time, an + // error is returned. + StartTime *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s LookupEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LookupEventsInput) GoString() string { + return s.String() +} + +// Contains a response to a LookupEvents action. +type LookupEventsOutput struct { + _ struct{} `type:"structure"` + + // A list of events returned based on the lookup attributes specified and the + // CloudTrail event. The events list is sorted by time. The most recent event + // is listed first. + Events []*Event `type:"list"` + + // The token to use to get the next page of results after a previous API call. + // If the token does not appear, there are no more results to return. The token + // must be passed in with the same parameters as the previous call. For example, + // if the original call specified an AttributeKey of 'Username' with a value + // of 'root', the call with NextToken should include those same parameters. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s LookupEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LookupEventsOutput) GoString() string { + return s.String() +} + +// Contains information about a returned public key. +type PublicKey struct { + _ struct{} `type:"structure"` + + // The fingerprint of the public key. + Fingerprint *string `type:"string"` + + // The ending time of validity of the public key. + ValidityEndTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The starting time of validity of the public key. + ValidityStartTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The DER encoded public key value in PKCS#1 format. + Value []byte `type:"blob"` +} + +// String returns the string representation +func (s PublicKey) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublicKey) GoString() string { + return s.String() +} + +// Specifies the tags to remove from a trail. +type RemoveTagsInput struct { + _ struct{} `type:"structure"` + + // Specifies the ARN of the trail from which tags should be removed. The format + // of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + ResourceId *string `type:"string" required:"true"` + + // Specifies a list of tags to be removed. + TagsList []*Tag `type:"list"` +} + +// String returns the string representation +func (s RemoveTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsInput) GoString() string { + return s.String() +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type RemoveTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsOutput) GoString() string { + return s.String() +} + +// Specifies the type and name of a resource referenced by an event. +type Resource struct { + _ struct{} `type:"structure"` + + // The name of the resource referenced by the event returned. These are user-created + // names whose values will depend on the environment. For example, the resource + // name might be "auto-scaling-test-group" for an Auto Scaling Group or "i-1234567" + // for an EC2 Instance. + ResourceName *string `type:"string"` + + // The type of a resource referenced by the event returned. When the resource + // type cannot be determined, null is returned. Some examples of resource types + // are: Instance for EC2, Trail for CloudTrail, DBInstance for RDS, and AccessKey + // for IAM. For a list of resource types supported for event lookup, see Resource + // Types Supported for Event Lookup (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/lookup_supported_resourcetypes.html). + ResourceType *string `type:"string"` +} + +// String returns the string representation +func (s Resource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Resource) GoString() string { + return s.String() +} + +// A resource tag. +type ResourceTag struct { + _ struct{} `type:"structure"` + + // Specifies the ARN of the resource. + ResourceId *string `type:"string"` + + // A list of tags. + TagsList []*Tag `type:"list"` +} + +// String returns the string representation +func (s ResourceTag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceTag) GoString() string { + return s.String() +} + +// The request to CloudTrail to start logging AWS API calls for an account. +type StartLoggingInput struct { + _ struct{} `type:"structure"` + + // Specifies the name or the CloudTrail ARN of the trail for which CloudTrail + // logs AWS API calls. The format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StartLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartLoggingInput) GoString() string { + return s.String() +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type StartLoggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StartLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartLoggingOutput) GoString() string { + return s.String() +} + +// Passes the request to CloudTrail to stop logging AWS API calls for the specified +// account. +type StopLoggingInput struct { + _ struct{} `type:"structure"` + + // Specifies the name or the CloudTrail ARN of the trail for which CloudTrail + // will stop logging AWS API calls. The format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StopLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopLoggingInput) GoString() string { + return s.String() +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type StopLoggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StopLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopLoggingOutput) GoString() string { + return s.String() +} + +// A custom key-value pair associated with a resource such as a CloudTrail trail. +type Tag struct { + _ struct{} `type:"structure"` + + // The key in a key-value pair. The key must be must be no longer than 128 Unicode + // characters. The key must be unique for the resource to which it applies. + Key *string `type:"string" required:"true"` + + // The value in a key-value pair of a tag. The value must be no longer than + // 256 Unicode characters. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// The settings for a trail. +type Trail struct { + _ struct{} `type:"structure"` + + // Specifies an Amazon Resource Name (ARN), a unique identifier that represents + // the log group to which CloudTrail logs will be delivered. + CloudWatchLogsLogGroupArn *string `type:"string"` + + // Specifies the role for the CloudWatch Logs endpoint to assume to write to + // a user's log group. + CloudWatchLogsRoleArn *string `type:"string"` + + // The region in which the trail was created. + HomeRegion *string `type:"string"` + + // Set to True to include AWS API calls from AWS global services such as IAM. + // Otherwise, False. + IncludeGlobalServiceEvents *bool `type:"boolean"` + + // Specifies whether the trail belongs only to one region or exists in all regions. + IsMultiRegionTrail *bool `type:"boolean"` + + // Specifies the KMS key ID that encrypts the logs delivered by CloudTrail. + // The value is a fully specified ARN to a KMS key in the format: + // + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + KmsKeyId *string `type:"string"` + + // Specifies whether log file validation is enabled. + LogFileValidationEnabled *bool `type:"boolean"` + + // Name of the trail set by calling CreateTrail. The maximum length is 128 characters. + Name *string `type:"string"` + + // Name of the Amazon S3 bucket into which CloudTrail delivers your trail files. + // See Amazon S3 Bucket Naming Requirements (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/create_trail_naming_policy.html). + S3BucketName *string `type:"string"` + + // Specifies the Amazon S3 key prefix that comes after the name of the bucket + // you have designated for log file delivery. For more information, see Finding + // Your CloudTrail Log Files (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html).The + // maximum length is 200 characters. + S3KeyPrefix *string `type:"string"` + + // Name of the existing Amazon SNS topic that CloudTrail uses to notify the + // account owner when new CloudTrail log files have been delivered. The maximum + // length is 256 characters. + SnsTopicName *string `type:"string"` + + // The Amazon Resource Name of the trail. The TrailARN format is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + TrailARN *string `type:"string"` +} + +// String returns the string representation +func (s Trail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Trail) GoString() string { + return s.String() +} + +// Specifies settings to update for the trail. +type UpdateTrailInput struct { + _ struct{} `type:"structure"` + + // Specifies a log group name using an Amazon Resource Name (ARN), a unique + // identifier that represents the log group to which CloudTrail logs will be + // delivered. Not required unless you specify CloudWatchLogsRoleArn. + CloudWatchLogsLogGroupArn *string `type:"string"` + + // Specifies the role for the CloudWatch Logs endpoint to assume to write to + // a user's log group. + CloudWatchLogsRoleArn *string `type:"string"` + + // Specifies whether log file validation is enabled. The default is false. + // + // When you disable log file integrity validation, the chain of digest files + // is broken after one hour. CloudTrail will not create digest files for log + // files that were delivered during a period in which log file integrity validation + // was disabled. For example, if you enable log file integrity validation at + // noon on January 1, disable it at noon on January 2, and re-enable it at noon + // on January 10, digest files will not be created for the log files delivered + // from noon on January 2 to noon on January 10. The same applies whenever you + // stop CloudTrail logging or delete a trail. + EnableLogFileValidation *bool `type:"boolean"` + + // Specifies whether the trail is publishing events from global services such + // as IAM to the log files. + IncludeGlobalServiceEvents *bool `type:"boolean"` + + // Specifies whether the trail applies only to the current region or to all + // regions. The default is false. If the trail exists only in the current region + // and this value is set to true, shadow trails (replications of the trail) + // will be created in the other regions. If the trail exists in all regions + // and this value is set to false, the trail will remain in the region where + // it was created, and its shadow trails in other regions will be deleted. + IsMultiRegionTrail *bool `type:"boolean"` + + // Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. + // The value can be a an alias name prefixed by "alias/", a fully specified + // ARN to an alias, a fully specified ARN to a key, or a globally unique identifier. + // + // Examples: + // + // alias/MyAliasName arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // 12345678-1234-1234-1234-123456789012 + KmsKeyId *string `type:"string"` + + // Specifies the name of the trail or trail ARN. If Name is a trail name, the + // string must meet the following requirements: + // + // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-) Start with a letter or number, and end with a letter or + // number Be between 3 and 128 characters Have no adjacent periods, underscores + // or dashes. Names like my-_namespace and my--namespace are invalid. Not be + // in IP address format (for example, 192.168.5.4) If Name is a trail ARN, + // it must be in the format arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + Name *string `type:"string" required:"true"` + + // Specifies the name of the Amazon S3 bucket designated for publishing log + // files. See Amazon S3 Bucket Naming Requirements (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/create_trail_naming_policy.html). + S3BucketName *string `type:"string"` + + // Specifies the Amazon S3 key prefix that comes after the name of the bucket + // you have designated for log file delivery. For more information, see Finding + // Your CloudTrail Log Files (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html). + // The maximum length is 200 characters. + S3KeyPrefix *string `type:"string"` + + // Specifies the name of the Amazon SNS topic defined for notification of log + // file delivery. The maximum length is 256 characters. + SnsTopicName *string `type:"string"` +} + +// String returns the string representation +func (s UpdateTrailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTrailInput) GoString() string { + return s.String() +} + +// Returns the objects or data listed below if successful. Otherwise, returns +// an error. +type UpdateTrailOutput struct { + _ struct{} `type:"structure"` + + // Specifies the Amazon Resource Name (ARN) of the log group to which CloudTrail + // logs will be delivered. + CloudWatchLogsLogGroupArn *string `type:"string"` + + // Specifies the role for the CloudWatch Logs endpoint to assume to write to + // a user's log group. + CloudWatchLogsRoleArn *string `type:"string"` + + // Specifies whether the trail is publishing events from global services such + // as IAM to the log files. + IncludeGlobalServiceEvents *bool `type:"boolean"` + + // Specifies whether the trail exists in one region or in all regions. + IsMultiRegionTrail *bool `type:"boolean"` + + // Specifies the KMS key ID that encrypts the logs delivered by CloudTrail. + // The value is a fully specified ARN to a KMS key in the format: + // + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + KmsKeyId *string `type:"string"` + + // Specifies whether log file integrity validation is enabled. + LogFileValidationEnabled *bool `type:"boolean"` + + // Specifies the name of the trail. + Name *string `type:"string"` + + // Specifies the name of the Amazon S3 bucket designated for publishing log + // files. + S3BucketName *string `type:"string"` + + // Specifies the Amazon S3 key prefix that comes after the name of the bucket + // you have designated for log file delivery. For more information, see Finding + // Your CloudTrail Log Files (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html). + S3KeyPrefix *string `type:"string"` + + // Specifies the name of the Amazon SNS topic defined for notification of log + // file delivery. + SnsTopicName *string `type:"string"` + + // Specifies the ARN of the trail that was updated. + TrailARN *string `type:"string"` +} + +// String returns the string representation +func (s UpdateTrailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTrailOutput) GoString() string { + return s.String() +} + +const ( + // @enum LookupAttributeKey + LookupAttributeKeyEventId = "EventId" + // @enum LookupAttributeKey + LookupAttributeKeyEventName = "EventName" + // @enum LookupAttributeKey + LookupAttributeKeyUsername = "Username" + // @enum LookupAttributeKey + LookupAttributeKeyResourceType = "ResourceType" + // @enum LookupAttributeKey + LookupAttributeKeyResourceName = "ResourceName" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudtrail/cloudtrailiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudtrail/cloudtrailiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudtrail/cloudtrailiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudtrail/cloudtrailiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,62 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudtrailiface provides an interface for the AWS CloudTrail. +package cloudtrailiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudtrail" +) + +// CloudTrailAPI is the interface type for cloudtrail.CloudTrail. +type CloudTrailAPI interface { + AddTagsRequest(*cloudtrail.AddTagsInput) (*request.Request, *cloudtrail.AddTagsOutput) + + AddTags(*cloudtrail.AddTagsInput) (*cloudtrail.AddTagsOutput, error) + + CreateTrailRequest(*cloudtrail.CreateTrailInput) (*request.Request, *cloudtrail.CreateTrailOutput) + + CreateTrail(*cloudtrail.CreateTrailInput) (*cloudtrail.CreateTrailOutput, error) + + DeleteTrailRequest(*cloudtrail.DeleteTrailInput) (*request.Request, *cloudtrail.DeleteTrailOutput) + + DeleteTrail(*cloudtrail.DeleteTrailInput) (*cloudtrail.DeleteTrailOutput, error) + + DescribeTrailsRequest(*cloudtrail.DescribeTrailsInput) (*request.Request, *cloudtrail.DescribeTrailsOutput) + + DescribeTrails(*cloudtrail.DescribeTrailsInput) (*cloudtrail.DescribeTrailsOutput, error) + + GetTrailStatusRequest(*cloudtrail.GetTrailStatusInput) (*request.Request, *cloudtrail.GetTrailStatusOutput) + + GetTrailStatus(*cloudtrail.GetTrailStatusInput) (*cloudtrail.GetTrailStatusOutput, error) + + ListPublicKeysRequest(*cloudtrail.ListPublicKeysInput) (*request.Request, *cloudtrail.ListPublicKeysOutput) + + ListPublicKeys(*cloudtrail.ListPublicKeysInput) (*cloudtrail.ListPublicKeysOutput, error) + + ListTagsRequest(*cloudtrail.ListTagsInput) (*request.Request, *cloudtrail.ListTagsOutput) + + ListTags(*cloudtrail.ListTagsInput) (*cloudtrail.ListTagsOutput, error) + + LookupEventsRequest(*cloudtrail.LookupEventsInput) (*request.Request, *cloudtrail.LookupEventsOutput) + + LookupEvents(*cloudtrail.LookupEventsInput) (*cloudtrail.LookupEventsOutput, error) + + RemoveTagsRequest(*cloudtrail.RemoveTagsInput) (*request.Request, *cloudtrail.RemoveTagsOutput) + + RemoveTags(*cloudtrail.RemoveTagsInput) (*cloudtrail.RemoveTagsOutput, error) + + StartLoggingRequest(*cloudtrail.StartLoggingInput) (*request.Request, *cloudtrail.StartLoggingOutput) + + StartLogging(*cloudtrail.StartLoggingInput) (*cloudtrail.StartLoggingOutput, error) + + StopLoggingRequest(*cloudtrail.StopLoggingInput) (*request.Request, *cloudtrail.StopLoggingOutput) + + StopLogging(*cloudtrail.StopLoggingInput) (*cloudtrail.StopLoggingOutput, error) + + UpdateTrailRequest(*cloudtrail.UpdateTrailInput) (*request.Request, *cloudtrail.UpdateTrailOutput) + + UpdateTrail(*cloudtrail.UpdateTrailInput) (*cloudtrail.UpdateTrailOutput, error) +} + +var _ CloudTrailAPI = (*cloudtrail.CloudTrail)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudtrail/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudtrail/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudtrail/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudtrail/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,296 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudtrail_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudtrail" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCloudTrail_AddTags() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.AddTagsInput{ + ResourceId: aws.String("String"), // Required + TagsList: []*cloudtrail.Tag{ + { // Required + Key: aws.String("String"), // Required + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.AddTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_CreateTrail() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.CreateTrailInput{ + Name: aws.String("String"), // Required + S3BucketName: aws.String("String"), // Required + CloudWatchLogsLogGroupArn: aws.String("String"), + CloudWatchLogsRoleArn: aws.String("String"), + EnableLogFileValidation: aws.Bool(true), + IncludeGlobalServiceEvents: aws.Bool(true), + IsMultiRegionTrail: aws.Bool(true), + KmsKeyId: aws.String("String"), + S3KeyPrefix: aws.String("String"), + SnsTopicName: aws.String("String"), + } + resp, err := svc.CreateTrail(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_DeleteTrail() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.DeleteTrailInput{ + Name: aws.String("String"), // Required + } + resp, err := svc.DeleteTrail(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_DescribeTrails() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.DescribeTrailsInput{ + IncludeShadowTrails: aws.Bool(true), + TrailNameList: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeTrails(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_GetTrailStatus() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.GetTrailStatusInput{ + Name: aws.String("String"), // Required + } + resp, err := svc.GetTrailStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_ListPublicKeys() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.ListPublicKeysInput{ + EndTime: aws.Time(time.Now()), + NextToken: aws.String("String"), + StartTime: aws.Time(time.Now()), + } + resp, err := svc.ListPublicKeys(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_ListTags() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.ListTagsInput{ + ResourceIdList: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + NextToken: aws.String("String"), + } + resp, err := svc.ListTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_LookupEvents() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.LookupEventsInput{ + EndTime: aws.Time(time.Now()), + LookupAttributes: []*cloudtrail.LookupAttribute{ + { // Required + AttributeKey: aws.String("LookupAttributeKey"), // Required + AttributeValue: aws.String("String"), // Required + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + StartTime: aws.Time(time.Now()), + } + resp, err := svc.LookupEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_RemoveTags() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.RemoveTagsInput{ + ResourceId: aws.String("String"), // Required + TagsList: []*cloudtrail.Tag{ + { // Required + Key: aws.String("String"), // Required + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.RemoveTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_StartLogging() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.StartLoggingInput{ + Name: aws.String("String"), // Required + } + resp, err := svc.StartLogging(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_StopLogging() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.StopLoggingInput{ + Name: aws.String("String"), // Required + } + resp, err := svc.StopLogging(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudTrail_UpdateTrail() { + svc := cloudtrail.New(session.New()) + + params := &cloudtrail.UpdateTrailInput{ + Name: aws.String("String"), // Required + CloudWatchLogsLogGroupArn: aws.String("String"), + CloudWatchLogsRoleArn: aws.String("String"), + EnableLogFileValidation: aws.Bool(true), + IncludeGlobalServiceEvents: aws.Bool(true), + IsMultiRegionTrail: aws.Bool(true), + KmsKeyId: aws.String("String"), + S3BucketName: aws.String("String"), + S3KeyPrefix: aws.String("String"), + SnsTopicName: aws.String("String"), + } + resp, err := svc.UpdateTrail(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudtrail/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudtrail/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudtrail/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudtrail/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,106 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudtrail + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// This is the CloudTrail API Reference. It provides descriptions of actions, +// data types, common parameters, and common errors for CloudTrail. +// +// CloudTrail is a web service that records AWS API calls for your AWS account +// and delivers log files to an Amazon S3 bucket. The recorded information includes +// the identity of the user, the start time of the AWS API call, the source +// IP address, the request parameters, and the response elements returned by +// the service. +// +// As an alternative to using the API, you can use one of the AWS SDKs, which +// consist of libraries and sample code for various programming languages and +// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient +// way to create programmatic access to AWSCloudTrail. For example, the SDKs +// take care of cryptographically signing requests, managing errors, and retrying +// requests automatically. For information about the AWS SDKs, including how +// to download and install them, see the Tools for Amazon Web Services page +// (http://aws.amazon.com/tools/). See the CloudTrail User Guide for information +// about the data that is included with each AWS API call listed in the log +// files. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CloudTrail struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "cloudtrail" + +// New creates a new instance of the CloudTrail client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CloudTrail client from just a session. +// svc := cloudtrail.New(mySession) +// +// // Create a CloudTrail client with additional configuration +// svc := cloudtrail.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudTrail { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CloudTrail { + svc := &CloudTrail{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2013-11-01", + JSONVersion: "1.1", + TargetPrefix: "com.amazonaws.cloudtrail.v20131101.CloudTrail_20131101", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CloudTrail operation and runs any +// custom request initialization. +func (c *CloudTrail) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatch/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatch/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatch/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatch/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1375 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudwatch provides a client for Amazon CloudWatch. +package cloudwatch + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opDeleteAlarms = "DeleteAlarms" + +// DeleteAlarmsRequest generates a request for the DeleteAlarms operation. +func (c *CloudWatch) DeleteAlarmsRequest(input *DeleteAlarmsInput) (req *request.Request, output *DeleteAlarmsOutput) { + op := &request.Operation{ + Name: opDeleteAlarms, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAlarmsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteAlarmsOutput{} + req.Data = output + return +} + +// Deletes all specified alarms. In the event of an error, no alarms are deleted. +func (c *CloudWatch) DeleteAlarms(input *DeleteAlarmsInput) (*DeleteAlarmsOutput, error) { + req, out := c.DeleteAlarmsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAlarmHistory = "DescribeAlarmHistory" + +// DescribeAlarmHistoryRequest generates a request for the DescribeAlarmHistory operation. +func (c *CloudWatch) DescribeAlarmHistoryRequest(input *DescribeAlarmHistoryInput) (req *request.Request, output *DescribeAlarmHistoryOutput) { + op := &request.Operation{ + Name: opDescribeAlarmHistory, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeAlarmHistoryInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAlarmHistoryOutput{} + req.Data = output + return +} + +// Retrieves history for the specified alarm. Filter alarms by date range or +// item type. If an alarm name is not specified, Amazon CloudWatch returns histories +// for all of the owner's alarms. +func (c *CloudWatch) DescribeAlarmHistory(input *DescribeAlarmHistoryInput) (*DescribeAlarmHistoryOutput, error) { + req, out := c.DescribeAlarmHistoryRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudWatch) DescribeAlarmHistoryPages(input *DescribeAlarmHistoryInput, fn func(p *DescribeAlarmHistoryOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeAlarmHistoryRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeAlarmHistoryOutput), lastPage) + }) +} + +const opDescribeAlarms = "DescribeAlarms" + +// DescribeAlarmsRequest generates a request for the DescribeAlarms operation. +func (c *CloudWatch) DescribeAlarmsRequest(input *DescribeAlarmsInput) (req *request.Request, output *DescribeAlarmsOutput) { + op := &request.Operation{ + Name: opDescribeAlarms, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeAlarmsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAlarmsOutput{} + req.Data = output + return +} + +// Retrieves alarms with the specified names. If no name is specified, all alarms +// for the user are returned. Alarms can be retrieved by using only a prefix +// for the alarm name, the alarm state, or a prefix for any action. +func (c *CloudWatch) DescribeAlarms(input *DescribeAlarmsInput) (*DescribeAlarmsOutput, error) { + req, out := c.DescribeAlarmsRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudWatch) DescribeAlarmsPages(input *DescribeAlarmsInput, fn func(p *DescribeAlarmsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeAlarmsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeAlarmsOutput), lastPage) + }) +} + +const opDescribeAlarmsForMetric = "DescribeAlarmsForMetric" + +// DescribeAlarmsForMetricRequest generates a request for the DescribeAlarmsForMetric operation. +func (c *CloudWatch) DescribeAlarmsForMetricRequest(input *DescribeAlarmsForMetricInput) (req *request.Request, output *DescribeAlarmsForMetricOutput) { + op := &request.Operation{ + Name: opDescribeAlarmsForMetric, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAlarmsForMetricInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAlarmsForMetricOutput{} + req.Data = output + return +} + +// Retrieves all alarms for a single metric. Specify a statistic, period, or +// unit to filter the set of alarms further. +func (c *CloudWatch) DescribeAlarmsForMetric(input *DescribeAlarmsForMetricInput) (*DescribeAlarmsForMetricOutput, error) { + req, out := c.DescribeAlarmsForMetricRequest(input) + err := req.Send() + return out, err +} + +const opDisableAlarmActions = "DisableAlarmActions" + +// DisableAlarmActionsRequest generates a request for the DisableAlarmActions operation. +func (c *CloudWatch) DisableAlarmActionsRequest(input *DisableAlarmActionsInput) (req *request.Request, output *DisableAlarmActionsOutput) { + op := &request.Operation{ + Name: opDisableAlarmActions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableAlarmActionsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DisableAlarmActionsOutput{} + req.Data = output + return +} + +// Disables actions for the specified alarms. When an alarm's actions are disabled +// the alarm's state may change, but none of the alarm's actions will execute. +func (c *CloudWatch) DisableAlarmActions(input *DisableAlarmActionsInput) (*DisableAlarmActionsOutput, error) { + req, out := c.DisableAlarmActionsRequest(input) + err := req.Send() + return out, err +} + +const opEnableAlarmActions = "EnableAlarmActions" + +// EnableAlarmActionsRequest generates a request for the EnableAlarmActions operation. +func (c *CloudWatch) EnableAlarmActionsRequest(input *EnableAlarmActionsInput) (req *request.Request, output *EnableAlarmActionsOutput) { + op := &request.Operation{ + Name: opEnableAlarmActions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableAlarmActionsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &EnableAlarmActionsOutput{} + req.Data = output + return +} + +// Enables actions for the specified alarms. +func (c *CloudWatch) EnableAlarmActions(input *EnableAlarmActionsInput) (*EnableAlarmActionsOutput, error) { + req, out := c.EnableAlarmActionsRequest(input) + err := req.Send() + return out, err +} + +const opGetMetricStatistics = "GetMetricStatistics" + +// GetMetricStatisticsRequest generates a request for the GetMetricStatistics operation. +func (c *CloudWatch) GetMetricStatisticsRequest(input *GetMetricStatisticsInput) (req *request.Request, output *GetMetricStatisticsOutput) { + op := &request.Operation{ + Name: opGetMetricStatistics, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetMetricStatisticsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetMetricStatisticsOutput{} + req.Data = output + return +} + +// Gets statistics for the specified metric. +// +// The maximum number of data points returned from a single GetMetricStatistics +// request is 1,440, wereas the maximum number of data points that can be queried +// is 50,850. If you make a request that generates more than 1,440 data points, +// Amazon CloudWatch returns an error. In such a case, you can alter the request +// by narrowing the specified time range or increasing the specified period. +// Alternatively, you can make multiple requests across adjacent time ranges. +// +// Amazon CloudWatch aggregates data points based on the length of the period +// that you specify. For example, if you request statistics with a one-minute +// granularity, Amazon CloudWatch aggregates data points with time stamps that +// fall within the same one-minute period. In such a case, the data points queried +// can greatly outnumber the data points returned. +// +// The following examples show various statistics allowed by the data point +// query maximum of 50,850 when you call GetMetricStatistics on Amazon EC2 instances +// with detailed (one-minute) monitoring enabled: +// +// Statistics for up to 400 instances for a span of one hour Statistics for +// up to 35 instances over a span of 24 hours Statistics for up to 2 instances +// over a span of 2 weeks For information about the namespace, metric names, +// and dimensions that other Amazon Web Services products use to send metrics +// to Cloudwatch, go to Amazon CloudWatch Metrics, Namespaces, and Dimensions +// Reference (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html) +// in the Amazon CloudWatch Developer Guide. +func (c *CloudWatch) GetMetricStatistics(input *GetMetricStatisticsInput) (*GetMetricStatisticsOutput, error) { + req, out := c.GetMetricStatisticsRequest(input) + err := req.Send() + return out, err +} + +const opListMetrics = "ListMetrics" + +// ListMetricsRequest generates a request for the ListMetrics operation. +func (c *CloudWatch) ListMetricsRequest(input *ListMetricsInput) (req *request.Request, output *ListMetricsOutput) { + op := &request.Operation{ + Name: opListMetrics, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListMetricsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListMetricsOutput{} + req.Data = output + return +} + +// Returns a list of valid metrics stored for the AWS account owner. Returned +// metrics can be used with GetMetricStatistics to obtain statistical data for +// a given metric. +func (c *CloudWatch) ListMetrics(input *ListMetricsInput) (*ListMetricsOutput, error) { + req, out := c.ListMetricsRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudWatch) ListMetricsPages(input *ListMetricsInput, fn func(p *ListMetricsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListMetricsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListMetricsOutput), lastPage) + }) +} + +const opPutMetricAlarm = "PutMetricAlarm" + +// PutMetricAlarmRequest generates a request for the PutMetricAlarm operation. +func (c *CloudWatch) PutMetricAlarmRequest(input *PutMetricAlarmInput) (req *request.Request, output *PutMetricAlarmOutput) { + op := &request.Operation{ + Name: opPutMetricAlarm, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutMetricAlarmInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutMetricAlarmOutput{} + req.Data = output + return +} + +// Creates or updates an alarm and associates it with the specified Amazon CloudWatch +// metric. Optionally, this operation can associate one or more Amazon Simple +// Notification Service resources with the alarm. +// +// When this operation creates an alarm, the alarm state is immediately set +// to INSUFFICIENT_DATA. The alarm is evaluated and its StateValue is set appropriately. +// Any actions associated with the StateValue is then executed. +func (c *CloudWatch) PutMetricAlarm(input *PutMetricAlarmInput) (*PutMetricAlarmOutput, error) { + req, out := c.PutMetricAlarmRequest(input) + err := req.Send() + return out, err +} + +const opPutMetricData = "PutMetricData" + +// PutMetricDataRequest generates a request for the PutMetricData operation. +func (c *CloudWatch) PutMetricDataRequest(input *PutMetricDataInput) (req *request.Request, output *PutMetricDataOutput) { + op := &request.Operation{ + Name: opPutMetricData, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutMetricDataInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutMetricDataOutput{} + req.Data = output + return +} + +// Publishes metric data points to Amazon CloudWatch. Amazon Cloudwatch associates +// the data points with the specified metric. If the specified metric does not +// exist, Amazon CloudWatch creates the metric. It can take up to fifteen minutes +// for a new metric to appear in calls to the ListMetrics action. +// +// The size of a PutMetricData request is limited to 8 KB for HTTP GET requests +// and 40 KB for HTTP POST requests. +// +// Although the Value parameter accepts numbers of type Double, Amazon CloudWatch +// truncates values with very large exponents. Values with base-10 exponents +// greater than 126 (1 x 10^126) are truncated. Likewise, values with base-10 +// exponents less than -130 (1 x 10^-130) are also truncated. Data that is +// timestamped 24 hours or more in the past may take in excess of 48 hours to +// become available from submission time using GetMetricStatistics. +func (c *CloudWatch) PutMetricData(input *PutMetricDataInput) (*PutMetricDataOutput, error) { + req, out := c.PutMetricDataRequest(input) + err := req.Send() + return out, err +} + +const opSetAlarmState = "SetAlarmState" + +// SetAlarmStateRequest generates a request for the SetAlarmState operation. +func (c *CloudWatch) SetAlarmStateRequest(input *SetAlarmStateInput) (req *request.Request, output *SetAlarmStateOutput) { + op := &request.Operation{ + Name: opSetAlarmState, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetAlarmStateInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetAlarmStateOutput{} + req.Data = output + return +} + +// Temporarily sets the state of an alarm. When the updated StateValue differs +// from the previous value, the action configured for the appropriate state +// is invoked. This is not a permanent change. The next periodic alarm check +// (in about a minute) will set the alarm to its actual state. +func (c *CloudWatch) SetAlarmState(input *SetAlarmStateInput) (*SetAlarmStateOutput, error) { + req, out := c.SetAlarmStateRequest(input) + err := req.Send() + return out, err +} + +// The AlarmHistoryItem data type contains descriptive information about the +// history of a specific alarm. If you call DescribeAlarmHistory, Amazon CloudWatch +// returns this data type as part of the DescribeAlarmHistoryResult data type. +type AlarmHistoryItem struct { + _ struct{} `type:"structure"` + + // The descriptive name for the alarm. + AlarmName *string `min:"1" type:"string"` + + // Machine-readable data about the alarm in JSON format. + HistoryData *string `min:"1" type:"string"` + + // The type of alarm history item. + HistoryItemType *string `type:"string" enum:"HistoryItemType"` + + // A human-readable summary of the alarm history. + HistorySummary *string `min:"1" type:"string"` + + // The time stamp for the alarm history item. Amazon CloudWatch uses Coordinated + // Universal Time (UTC) when returning time stamps, which do not accommodate + // seasonal adjustments such as daylight savings time. For more information, + // see Time stamps (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#about_timestamp) + // in the Amazon CloudWatch Developer Guide. + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s AlarmHistoryItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AlarmHistoryItem) GoString() string { + return s.String() +} + +// The Datapoint data type encapsulates the statistical data that Amazon CloudWatch +// computes from metric data. +type Datapoint struct { + _ struct{} `type:"structure"` + + // The average of metric values that correspond to the datapoint. + Average *float64 `type:"double"` + + // The maximum of the metric value used for the datapoint. + Maximum *float64 `type:"double"` + + // The minimum metric value used for the datapoint. + Minimum *float64 `type:"double"` + + // The number of metric values that contributed to the aggregate value of this + // datapoint. + SampleCount *float64 `type:"double"` + + // The sum of metric values used for the datapoint. + Sum *float64 `type:"double"` + + // The time stamp used for the datapoint. Amazon CloudWatch uses Coordinated + // Universal Time (UTC) when returning time stamps, which do not accommodate + // seasonal adjustments such as daylight savings time. For more information, + // see Time stamps (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#about_timestamp) + // in the Amazon CloudWatch Developer Guide. + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The standard unit used for the datapoint. + Unit *string `type:"string" enum:"StandardUnit"` +} + +// String returns the string representation +func (s Datapoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Datapoint) GoString() string { + return s.String() +} + +type DeleteAlarmsInput struct { + _ struct{} `type:"structure"` + + // A list of alarms to be deleted. + AlarmNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteAlarmsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAlarmsInput) GoString() string { + return s.String() +} + +type DeleteAlarmsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAlarmsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAlarmsOutput) GoString() string { + return s.String() +} + +type DescribeAlarmHistoryInput struct { + _ struct{} `type:"structure"` + + // The name of the alarm. + AlarmName *string `min:"1" type:"string"` + + // The ending date to retrieve alarm history. + EndDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The type of alarm histories to retrieve. + HistoryItemType *string `type:"string" enum:"HistoryItemType"` + + // The maximum number of alarm history records to retrieve. + MaxRecords *int64 `min:"1" type:"integer"` + + // The token returned by a previous call to indicate that there is more data + // available. + NextToken *string `type:"string"` + + // The starting date to retrieve alarm history. + StartDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s DescribeAlarmHistoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAlarmHistoryInput) GoString() string { + return s.String() +} + +// The output for the DescribeAlarmHistory action. +type DescribeAlarmHistoryOutput struct { + _ struct{} `type:"structure"` + + // A list of alarm histories in JSON format. + AlarmHistoryItems []*AlarmHistoryItem `type:"list"` + + // A string that marks the start of the next batch of returned results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAlarmHistoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAlarmHistoryOutput) GoString() string { + return s.String() +} + +type DescribeAlarmsForMetricInput struct { + _ struct{} `type:"structure"` + + // The list of dimensions associated with the metric. + Dimensions []*Dimension `type:"list"` + + // The name of the metric. + MetricName *string `min:"1" type:"string" required:"true"` + + // The namespace of the metric. + Namespace *string `min:"1" type:"string" required:"true"` + + // The period in seconds over which the statistic is applied. + Period *int64 `min:"60" type:"integer"` + + // The statistic for the metric. + Statistic *string `type:"string" enum:"Statistic"` + + // The unit for the metric. + Unit *string `type:"string" enum:"StandardUnit"` +} + +// String returns the string representation +func (s DescribeAlarmsForMetricInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAlarmsForMetricInput) GoString() string { + return s.String() +} + +// The output for the DescribeAlarmsForMetric action. +type DescribeAlarmsForMetricOutput struct { + _ struct{} `type:"structure"` + + // A list of information for each alarm with the specified metric. + MetricAlarms []*MetricAlarm `type:"list"` +} + +// String returns the string representation +func (s DescribeAlarmsForMetricOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAlarmsForMetricOutput) GoString() string { + return s.String() +} + +type DescribeAlarmsInput struct { + _ struct{} `type:"structure"` + + // The action name prefix. + ActionPrefix *string `min:"1" type:"string"` + + // The alarm name prefix. AlarmNames cannot be specified if this parameter is + // specified. + AlarmNamePrefix *string `min:"1" type:"string"` + + // A list of alarm names to retrieve information for. + AlarmNames []*string `type:"list"` + + // The maximum number of alarm descriptions to retrieve. + MaxRecords *int64 `min:"1" type:"integer"` + + // The token returned by a previous call to indicate that there is more data + // available. + NextToken *string `type:"string"` + + // The state value to be used in matching alarms. + StateValue *string `type:"string" enum:"StateValue"` +} + +// String returns the string representation +func (s DescribeAlarmsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAlarmsInput) GoString() string { + return s.String() +} + +// The output for the DescribeAlarms action. +type DescribeAlarmsOutput struct { + _ struct{} `type:"structure"` + + // A list of information for the specified alarms. + MetricAlarms []*MetricAlarm `type:"list"` + + // A string that marks the start of the next batch of returned results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAlarmsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAlarmsOutput) GoString() string { + return s.String() +} + +// The Dimension data type further expands on the identity of a metric using +// a Name, Value pair. +// +// For examples that use one or more dimensions, see PutMetricData. +type Dimension struct { + _ struct{} `type:"structure"` + + // The name of the dimension. + Name *string `min:"1" type:"string" required:"true"` + + // The value representing the dimension measurement + Value *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Dimension) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Dimension) GoString() string { + return s.String() +} + +// The DimensionFilter data type is used to filter ListMetrics results. +type DimensionFilter struct { + _ struct{} `type:"structure"` + + // The dimension name to be matched. + Name *string `min:"1" type:"string" required:"true"` + + // The value of the dimension to be matched. + Value *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DimensionFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DimensionFilter) GoString() string { + return s.String() +} + +type DisableAlarmActionsInput struct { + _ struct{} `type:"structure"` + + // The names of the alarms to disable actions for. + AlarmNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DisableAlarmActionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableAlarmActionsInput) GoString() string { + return s.String() +} + +type DisableAlarmActionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableAlarmActionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableAlarmActionsOutput) GoString() string { + return s.String() +} + +type EnableAlarmActionsInput struct { + _ struct{} `type:"structure"` + + // The names of the alarms to enable actions for. + AlarmNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s EnableAlarmActionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableAlarmActionsInput) GoString() string { + return s.String() +} + +type EnableAlarmActionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableAlarmActionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableAlarmActionsOutput) GoString() string { + return s.String() +} + +type GetMetricStatisticsInput struct { + _ struct{} `type:"structure"` + + // A list of dimensions describing qualities of the metric. + Dimensions []*Dimension `type:"list"` + + // The time stamp to use for determining the last datapoint to return. The value + // specified is exclusive; results will include datapoints up to the time stamp + // specified. + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The name of the metric, with or without spaces. + MetricName *string `min:"1" type:"string" required:"true"` + + // The namespace of the metric, with or without spaces. + Namespace *string `min:"1" type:"string" required:"true"` + + // The granularity, in seconds, of the returned datapoints. Period must be at + // least 60 seconds and must be a multiple of 60. The default value is 60. + Period *int64 `min:"60" type:"integer" required:"true"` + + // The time stamp to use for determining the first datapoint to return. The + // value specified is inclusive; results include datapoints with the time stamp + // specified. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The metric statistics to return. For information about specific statistics + // returned by GetMetricStatistics, go to Statistics (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/index.html?CHAP_TerminologyandKeyConcepts.html#Statistic) + // in the Amazon CloudWatch Developer Guide. + // + // Valid Values: Average | Sum | SampleCount | Maximum | Minimum + Statistics []*string `min:"1" type:"list" required:"true"` + + // The unit for the metric. + Unit *string `type:"string" enum:"StandardUnit"` +} + +// String returns the string representation +func (s GetMetricStatisticsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMetricStatisticsInput) GoString() string { + return s.String() +} + +// The output for the GetMetricStatistics action. +type GetMetricStatisticsOutput struct { + _ struct{} `type:"structure"` + + // The datapoints for the specified metric. + Datapoints []*Datapoint `type:"list"` + + // A label describing the specified metric. + Label *string `type:"string"` +} + +// String returns the string representation +func (s GetMetricStatisticsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMetricStatisticsOutput) GoString() string { + return s.String() +} + +type ListMetricsInput struct { + _ struct{} `type:"structure"` + + // A list of dimensions to filter against. + Dimensions []*DimensionFilter `type:"list"` + + // The name of the metric to filter against. + MetricName *string `min:"1" type:"string"` + + // The namespace to filter against. + Namespace *string `min:"1" type:"string"` + + // The token returned by a previous call to indicate that there is more data + // available. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListMetricsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMetricsInput) GoString() string { + return s.String() +} + +// The output for the ListMetrics action. +type ListMetricsOutput struct { + _ struct{} `type:"structure"` + + // A list of metrics used to generate statistics for an AWS account. + Metrics []*Metric `type:"list"` + + // A string that marks the start of the next batch of returned results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListMetricsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMetricsOutput) GoString() string { + return s.String() +} + +// The Metric data type contains information about a specific metric. If you +// call ListMetrics, Amazon CloudWatch returns information contained by this +// data type. +// +// The example in the Examples section publishes two metrics named buffers +// and latency. Both metrics are in the examples namespace. Both metrics have +// two dimensions, InstanceID and InstanceType. +type Metric struct { + _ struct{} `type:"structure"` + + // A list of dimensions associated with the metric. + Dimensions []*Dimension `type:"list"` + + // The name of the metric. + MetricName *string `min:"1" type:"string"` + + // The namespace of the metric. + Namespace *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Metric) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Metric) GoString() string { + return s.String() +} + +// The MetricAlarm data type represents an alarm. You can use PutMetricAlarm +// to create or update an alarm. +type MetricAlarm struct { + _ struct{} `type:"structure"` + + // Indicates whether actions should be executed during any changes to the alarm's + // state. + ActionsEnabled *bool `type:"boolean"` + + // The list of actions to execute when this alarm transitions into an ALARM + // state from any other state. Each action is specified as an Amazon Resource + // Number (ARN). Currently the only actions supported are publishing to an Amazon + // SNS topic and triggering an Auto Scaling policy. + AlarmActions []*string `type:"list"` + + // The Amazon Resource Name (ARN) of the alarm. + AlarmArn *string `min:"1" type:"string"` + + // The time stamp of the last update to the alarm configuration. Amazon CloudWatch + // uses Coordinated Universal Time (UTC) when returning time stamps, which do + // not accommodate seasonal adjustments such as daylight savings time. For more + // information, see Time stamps (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#about_timestamp) + // in the Amazon CloudWatch Developer Guide. + AlarmConfigurationUpdatedTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The description for the alarm. + AlarmDescription *string `type:"string"` + + // The name of the alarm. + AlarmName *string `min:"1" type:"string"` + + // The arithmetic operation to use when comparing the specified Statistic and + // Threshold. The specified Statistic value is used as the first operand. + ComparisonOperator *string `type:"string" enum:"ComparisonOperator"` + + // The list of dimensions associated with the alarm's associated metric. + Dimensions []*Dimension `type:"list"` + + // The number of periods over which data is compared to the specified threshold. + EvaluationPeriods *int64 `min:"1" type:"integer"` + + // The list of actions to execute when this alarm transitions into an INSUFFICIENT_DATA + // state from any other state. Each action is specified as an Amazon Resource + // Number (ARN). Currently the only actions supported are publishing to an Amazon + // SNS topic or triggering an Auto Scaling policy. + // + // The current WSDL lists this attribute as UnknownActions. + InsufficientDataActions []*string `type:"list"` + + // The name of the alarm's metric. + MetricName *string `min:"1" type:"string"` + + // The namespace of alarm's associated metric. + Namespace *string `min:"1" type:"string"` + + // The list of actions to execute when this alarm transitions into an OK state + // from any other state. Each action is specified as an Amazon Resource Number + // (ARN). Currently the only actions supported are publishing to an Amazon SNS + // topic and triggering an Auto Scaling policy. + OKActions []*string `type:"list"` + + // The period in seconds over which the statistic is applied. + Period *int64 `min:"60" type:"integer"` + + // A human-readable explanation for the alarm's state. + StateReason *string `type:"string"` + + // An explanation for the alarm's state in machine-readable JSON format + StateReasonData *string `type:"string"` + + // The time stamp of the last update to the alarm's state. Amazon CloudWatch + // uses Coordinated Universal Time (UTC) when returning time stamps, which do + // not accommodate seasonal adjustments such as daylight savings time. For more + // information, see Time stamps (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#about_timestamp) + // in the Amazon CloudWatch Developer Guide. + StateUpdatedTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The state value for the alarm. + StateValue *string `type:"string" enum:"StateValue"` + + // The statistic to apply to the alarm's associated metric. + Statistic *string `type:"string" enum:"Statistic"` + + // The value against which the specified statistic is compared. + Threshold *float64 `type:"double"` + + // The unit of the alarm's associated metric. + Unit *string `type:"string" enum:"StandardUnit"` +} + +// String returns the string representation +func (s MetricAlarm) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricAlarm) GoString() string { + return s.String() +} + +// The MetricDatum data type encapsulates the information sent with PutMetricData +// to either create a new metric or add new values to be aggregated into an +// existing metric. +type MetricDatum struct { + _ struct{} `type:"structure"` + + // A list of dimensions associated with the metric. Note, when using the Dimensions + // value in a query, you need to append .member.N to it (e.g., Dimensions.member.N). + Dimensions []*Dimension `type:"list"` + + // The name of the metric. + MetricName *string `min:"1" type:"string" required:"true"` + + // A set of statistical values describing the metric. + StatisticValues *StatisticSet `type:"structure"` + + // The time stamp used for the metric. If not specified, the default value is + // set to the time the metric data was received. Amazon CloudWatch uses Coordinated + // Universal Time (UTC) when returning time stamps, which do not accommodate + // seasonal adjustments such as daylight savings time. For more information, + // see Time stamps (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#about_timestamp) + // in the Amazon CloudWatch Developer Guide. + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The unit of the metric. + Unit *string `type:"string" enum:"StandardUnit"` + + // The value for the metric. + // + // Although the Value parameter accepts numbers of type Double, Amazon CloudWatch + // truncates values with very large exponents. Values with base-10 exponents + // greater than 126 (1 x 10^126) are truncated. Likewise, values with base-10 + // exponents less than -130 (1 x 10^-130) are also truncated. + Value *float64 `type:"double"` +} + +// String returns the string representation +func (s MetricDatum) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricDatum) GoString() string { + return s.String() +} + +type PutMetricAlarmInput struct { + _ struct{} `type:"structure"` + + // Indicates whether or not actions should be executed during any changes to + // the alarm's state. + ActionsEnabled *bool `type:"boolean"` + + // The list of actions to execute when this alarm transitions into an ALARM + // state from any other state. Each action is specified as an Amazon Resource + // Number (ARN). Currently the only action supported is publishing to an Amazon + // SNS topic or an Amazon Auto Scaling policy. + AlarmActions []*string `type:"list"` + + // The description for the alarm. + AlarmDescription *string `type:"string"` + + // The descriptive name for the alarm. This name must be unique within the user's + // AWS account + AlarmName *string `min:"1" type:"string" required:"true"` + + // The arithmetic operation to use when comparing the specified Statistic and + // Threshold. The specified Statistic value is used as the first operand. + ComparisonOperator *string `type:"string" required:"true" enum:"ComparisonOperator"` + + // The dimensions for the alarm's associated metric. + Dimensions []*Dimension `type:"list"` + + // The number of periods over which data is compared to the specified threshold. + EvaluationPeriods *int64 `min:"1" type:"integer" required:"true"` + + // The list of actions to execute when this alarm transitions into an INSUFFICIENT_DATA + // state from any other state. Each action is specified as an Amazon Resource + // Number (ARN). Currently the only action supported is publishing to an Amazon + // SNS topic or an Amazon Auto Scaling policy. + InsufficientDataActions []*string `type:"list"` + + // The name for the alarm's associated metric. + MetricName *string `min:"1" type:"string" required:"true"` + + // The namespace for the alarm's associated metric. + Namespace *string `min:"1" type:"string" required:"true"` + + // The list of actions to execute when this alarm transitions into an OK state + // from any other state. Each action is specified as an Amazon Resource Number + // (ARN). Currently the only action supported is publishing to an Amazon SNS + // topic or an Amazon Auto Scaling policy. + OKActions []*string `type:"list"` + + // The period in seconds over which the specified statistic is applied. + Period *int64 `min:"60" type:"integer" required:"true"` + + // The statistic to apply to the alarm's associated metric. + Statistic *string `type:"string" required:"true" enum:"Statistic"` + + // The value against which the specified statistic is compared. + Threshold *float64 `type:"double" required:"true"` + + // The unit for the alarm's associated metric. + Unit *string `type:"string" enum:"StandardUnit"` +} + +// String returns the string representation +func (s PutMetricAlarmInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMetricAlarmInput) GoString() string { + return s.String() +} + +type PutMetricAlarmOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutMetricAlarmOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMetricAlarmOutput) GoString() string { + return s.String() +} + +type PutMetricDataInput struct { + _ struct{} `type:"structure"` + + // A list of data describing the metric. + MetricData []*MetricDatum `type:"list" required:"true"` + + // The namespace for the metric data. + Namespace *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutMetricDataInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMetricDataInput) GoString() string { + return s.String() +} + +type PutMetricDataOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutMetricDataOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMetricDataOutput) GoString() string { + return s.String() +} + +type SetAlarmStateInput struct { + _ struct{} `type:"structure"` + + // The descriptive name for the alarm. This name must be unique within the user's + // AWS account. The maximum length is 255 characters. + AlarmName *string `min:"1" type:"string" required:"true"` + + // The reason that this alarm is set to this specific state (in human-readable + // text format) + StateReason *string `type:"string" required:"true"` + + // The reason that this alarm is set to this specific state (in machine-readable + // JSON format) + StateReasonData *string `type:"string"` + + // The value of the state. + StateValue *string `type:"string" required:"true" enum:"StateValue"` +} + +// String returns the string representation +func (s SetAlarmStateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetAlarmStateInput) GoString() string { + return s.String() +} + +type SetAlarmStateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetAlarmStateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetAlarmStateOutput) GoString() string { + return s.String() +} + +// The StatisticSet data type describes the StatisticValues component of MetricDatum, +// and represents a set of statistics that describes a specific metric. +type StatisticSet struct { + _ struct{} `type:"structure"` + + // The maximum value of the sample set. + Maximum *float64 `type:"double" required:"true"` + + // The minimum value of the sample set. + Minimum *float64 `type:"double" required:"true"` + + // The number of samples used for the statistic set. + SampleCount *float64 `type:"double" required:"true"` + + // The sum of values for the sample set. + Sum *float64 `type:"double" required:"true"` +} + +// String returns the string representation +func (s StatisticSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StatisticSet) GoString() string { + return s.String() +} + +const ( + // @enum ComparisonOperator + ComparisonOperatorGreaterThanOrEqualToThreshold = "GreaterThanOrEqualToThreshold" + // @enum ComparisonOperator + ComparisonOperatorGreaterThanThreshold = "GreaterThanThreshold" + // @enum ComparisonOperator + ComparisonOperatorLessThanThreshold = "LessThanThreshold" + // @enum ComparisonOperator + ComparisonOperatorLessThanOrEqualToThreshold = "LessThanOrEqualToThreshold" +) + +const ( + // @enum HistoryItemType + HistoryItemTypeConfigurationUpdate = "ConfigurationUpdate" + // @enum HistoryItemType + HistoryItemTypeStateUpdate = "StateUpdate" + // @enum HistoryItemType + HistoryItemTypeAction = "Action" +) + +const ( + // @enum StandardUnit + StandardUnitSeconds = "Seconds" + // @enum StandardUnit + StandardUnitMicroseconds = "Microseconds" + // @enum StandardUnit + StandardUnitMilliseconds = "Milliseconds" + // @enum StandardUnit + StandardUnitBytes = "Bytes" + // @enum StandardUnit + StandardUnitKilobytes = "Kilobytes" + // @enum StandardUnit + StandardUnitMegabytes = "Megabytes" + // @enum StandardUnit + StandardUnitGigabytes = "Gigabytes" + // @enum StandardUnit + StandardUnitTerabytes = "Terabytes" + // @enum StandardUnit + StandardUnitBits = "Bits" + // @enum StandardUnit + StandardUnitKilobits = "Kilobits" + // @enum StandardUnit + StandardUnitMegabits = "Megabits" + // @enum StandardUnit + StandardUnitGigabits = "Gigabits" + // @enum StandardUnit + StandardUnitTerabits = "Terabits" + // @enum StandardUnit + StandardUnitPercent = "Percent" + // @enum StandardUnit + StandardUnitCount = "Count" + // @enum StandardUnit + StandardUnitBytesSecond = "Bytes/Second" + // @enum StandardUnit + StandardUnitKilobytesSecond = "Kilobytes/Second" + // @enum StandardUnit + StandardUnitMegabytesSecond = "Megabytes/Second" + // @enum StandardUnit + StandardUnitGigabytesSecond = "Gigabytes/Second" + // @enum StandardUnit + StandardUnitTerabytesSecond = "Terabytes/Second" + // @enum StandardUnit + StandardUnitBitsSecond = "Bits/Second" + // @enum StandardUnit + StandardUnitKilobitsSecond = "Kilobits/Second" + // @enum StandardUnit + StandardUnitMegabitsSecond = "Megabits/Second" + // @enum StandardUnit + StandardUnitGigabitsSecond = "Gigabits/Second" + // @enum StandardUnit + StandardUnitTerabitsSecond = "Terabits/Second" + // @enum StandardUnit + StandardUnitCountSecond = "Count/Second" + // @enum StandardUnit + StandardUnitNone = "None" +) + +const ( + // @enum StateValue + StateValueOk = "OK" + // @enum StateValue + StateValueAlarm = "ALARM" + // @enum StateValue + StateValueInsufficientData = "INSUFFICIENT_DATA" +) + +const ( + // @enum Statistic + StatisticSampleCount = "SampleCount" + // @enum Statistic + StatisticAverage = "Average" + // @enum Statistic + StatisticSum = "Sum" + // @enum Statistic + StatisticMinimum = "Minimum" + // @enum Statistic + StatisticMaximum = "Maximum" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,64 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudwatchiface provides an interface for the Amazon CloudWatch. +package cloudwatchiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudwatch" +) + +// CloudWatchAPI is the interface type for cloudwatch.CloudWatch. +type CloudWatchAPI interface { + DeleteAlarmsRequest(*cloudwatch.DeleteAlarmsInput) (*request.Request, *cloudwatch.DeleteAlarmsOutput) + + DeleteAlarms(*cloudwatch.DeleteAlarmsInput) (*cloudwatch.DeleteAlarmsOutput, error) + + DescribeAlarmHistoryRequest(*cloudwatch.DescribeAlarmHistoryInput) (*request.Request, *cloudwatch.DescribeAlarmHistoryOutput) + + DescribeAlarmHistory(*cloudwatch.DescribeAlarmHistoryInput) (*cloudwatch.DescribeAlarmHistoryOutput, error) + + DescribeAlarmHistoryPages(*cloudwatch.DescribeAlarmHistoryInput, func(*cloudwatch.DescribeAlarmHistoryOutput, bool) bool) error + + DescribeAlarmsRequest(*cloudwatch.DescribeAlarmsInput) (*request.Request, *cloudwatch.DescribeAlarmsOutput) + + DescribeAlarms(*cloudwatch.DescribeAlarmsInput) (*cloudwatch.DescribeAlarmsOutput, error) + + DescribeAlarmsPages(*cloudwatch.DescribeAlarmsInput, func(*cloudwatch.DescribeAlarmsOutput, bool) bool) error + + DescribeAlarmsForMetricRequest(*cloudwatch.DescribeAlarmsForMetricInput) (*request.Request, *cloudwatch.DescribeAlarmsForMetricOutput) + + DescribeAlarmsForMetric(*cloudwatch.DescribeAlarmsForMetricInput) (*cloudwatch.DescribeAlarmsForMetricOutput, error) + + DisableAlarmActionsRequest(*cloudwatch.DisableAlarmActionsInput) (*request.Request, *cloudwatch.DisableAlarmActionsOutput) + + DisableAlarmActions(*cloudwatch.DisableAlarmActionsInput) (*cloudwatch.DisableAlarmActionsOutput, error) + + EnableAlarmActionsRequest(*cloudwatch.EnableAlarmActionsInput) (*request.Request, *cloudwatch.EnableAlarmActionsOutput) + + EnableAlarmActions(*cloudwatch.EnableAlarmActionsInput) (*cloudwatch.EnableAlarmActionsOutput, error) + + GetMetricStatisticsRequest(*cloudwatch.GetMetricStatisticsInput) (*request.Request, *cloudwatch.GetMetricStatisticsOutput) + + GetMetricStatistics(*cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) + + ListMetricsRequest(*cloudwatch.ListMetricsInput) (*request.Request, *cloudwatch.ListMetricsOutput) + + ListMetrics(*cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) + + ListMetricsPages(*cloudwatch.ListMetricsInput, func(*cloudwatch.ListMetricsOutput, bool) bool) error + + PutMetricAlarmRequest(*cloudwatch.PutMetricAlarmInput) (*request.Request, *cloudwatch.PutMetricAlarmOutput) + + PutMetricAlarm(*cloudwatch.PutMetricAlarmInput) (*cloudwatch.PutMetricAlarmOutput, error) + + PutMetricDataRequest(*cloudwatch.PutMetricDataInput) (*request.Request, *cloudwatch.PutMetricDataOutput) + + PutMetricData(*cloudwatch.PutMetricDataInput) (*cloudwatch.PutMetricDataOutput, error) + + SetAlarmStateRequest(*cloudwatch.SetAlarmStateInput) (*request.Request, *cloudwatch.SetAlarmStateOutput) + + SetAlarmState(*cloudwatch.SetAlarmStateInput) (*cloudwatch.SetAlarmStateOutput, error) +} + +var _ CloudWatchAPI = (*cloudwatch.CloudWatch)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatch/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatch/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatch/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatch/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,337 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudwatch_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudwatch" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCloudWatch_DeleteAlarms() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.DeleteAlarmsInput{ + AlarmNames: []*string{ // Required + aws.String("AlarmName"), // Required + // More values... + }, + } + resp, err := svc.DeleteAlarms(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_DescribeAlarmHistory() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.DescribeAlarmHistoryInput{ + AlarmName: aws.String("AlarmName"), + EndDate: aws.Time(time.Now()), + HistoryItemType: aws.String("HistoryItemType"), + MaxRecords: aws.Int64(1), + NextToken: aws.String("NextToken"), + StartDate: aws.Time(time.Now()), + } + resp, err := svc.DescribeAlarmHistory(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_DescribeAlarms() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.DescribeAlarmsInput{ + ActionPrefix: aws.String("ActionPrefix"), + AlarmNamePrefix: aws.String("AlarmNamePrefix"), + AlarmNames: []*string{ + aws.String("AlarmName"), // Required + // More values... + }, + MaxRecords: aws.Int64(1), + NextToken: aws.String("NextToken"), + StateValue: aws.String("StateValue"), + } + resp, err := svc.DescribeAlarms(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_DescribeAlarmsForMetric() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.DescribeAlarmsForMetricInput{ + MetricName: aws.String("MetricName"), // Required + Namespace: aws.String("Namespace"), // Required + Dimensions: []*cloudwatch.Dimension{ + { // Required + Name: aws.String("DimensionName"), // Required + Value: aws.String("DimensionValue"), // Required + }, + // More values... + }, + Period: aws.Int64(1), + Statistic: aws.String("Statistic"), + Unit: aws.String("StandardUnit"), + } + resp, err := svc.DescribeAlarmsForMetric(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_DisableAlarmActions() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.DisableAlarmActionsInput{ + AlarmNames: []*string{ // Required + aws.String("AlarmName"), // Required + // More values... + }, + } + resp, err := svc.DisableAlarmActions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_EnableAlarmActions() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.EnableAlarmActionsInput{ + AlarmNames: []*string{ // Required + aws.String("AlarmName"), // Required + // More values... + }, + } + resp, err := svc.EnableAlarmActions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_GetMetricStatistics() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.GetMetricStatisticsInput{ + EndTime: aws.Time(time.Now()), // Required + MetricName: aws.String("MetricName"), // Required + Namespace: aws.String("Namespace"), // Required + Period: aws.Int64(1), // Required + StartTime: aws.Time(time.Now()), // Required + Statistics: []*string{ // Required + aws.String("Statistic"), // Required + // More values... + }, + Dimensions: []*cloudwatch.Dimension{ + { // Required + Name: aws.String("DimensionName"), // Required + Value: aws.String("DimensionValue"), // Required + }, + // More values... + }, + Unit: aws.String("StandardUnit"), + } + resp, err := svc.GetMetricStatistics(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_ListMetrics() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.ListMetricsInput{ + Dimensions: []*cloudwatch.DimensionFilter{ + { // Required + Name: aws.String("DimensionName"), // Required + Value: aws.String("DimensionValue"), + }, + // More values... + }, + MetricName: aws.String("MetricName"), + Namespace: aws.String("Namespace"), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListMetrics(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_PutMetricAlarm() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.PutMetricAlarmInput{ + AlarmName: aws.String("AlarmName"), // Required + ComparisonOperator: aws.String("ComparisonOperator"), // Required + EvaluationPeriods: aws.Int64(1), // Required + MetricName: aws.String("MetricName"), // Required + Namespace: aws.String("Namespace"), // Required + Period: aws.Int64(1), // Required + Statistic: aws.String("Statistic"), // Required + Threshold: aws.Float64(1.0), // Required + ActionsEnabled: aws.Bool(true), + AlarmActions: []*string{ + aws.String("ResourceName"), // Required + // More values... + }, + AlarmDescription: aws.String("AlarmDescription"), + Dimensions: []*cloudwatch.Dimension{ + { // Required + Name: aws.String("DimensionName"), // Required + Value: aws.String("DimensionValue"), // Required + }, + // More values... + }, + InsufficientDataActions: []*string{ + aws.String("ResourceName"), // Required + // More values... + }, + OKActions: []*string{ + aws.String("ResourceName"), // Required + // More values... + }, + Unit: aws.String("StandardUnit"), + } + resp, err := svc.PutMetricAlarm(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_PutMetricData() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.PutMetricDataInput{ + MetricData: []*cloudwatch.MetricDatum{ // Required + { // Required + MetricName: aws.String("MetricName"), // Required + Dimensions: []*cloudwatch.Dimension{ + { // Required + Name: aws.String("DimensionName"), // Required + Value: aws.String("DimensionValue"), // Required + }, + // More values... + }, + StatisticValues: &cloudwatch.StatisticSet{ + Maximum: aws.Float64(1.0), // Required + Minimum: aws.Float64(1.0), // Required + SampleCount: aws.Float64(1.0), // Required + Sum: aws.Float64(1.0), // Required + }, + Timestamp: aws.Time(time.Now()), + Unit: aws.String("StandardUnit"), + Value: aws.Float64(1.0), + }, + // More values... + }, + Namespace: aws.String("Namespace"), // Required + } + resp, err := svc.PutMetricData(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatch_SetAlarmState() { + svc := cloudwatch.New(session.New()) + + params := &cloudwatch.SetAlarmStateInput{ + AlarmName: aws.String("AlarmName"), // Required + StateReason: aws.String("StateReason"), // Required + StateValue: aws.String("StateValue"), // Required + StateReasonData: aws.String("StateReasonData"), + } + resp, err := svc.SetAlarmState(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatch/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatch/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatch/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatch/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,125 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudwatch + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// This is the Amazon CloudWatch API Reference. This guide provides detailed +// information about Amazon CloudWatch actions, data types, parameters, and +// errors. For detailed information about Amazon CloudWatch features and their +// associated API calls, go to the Amazon CloudWatch Developer Guide (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide). +// +// Amazon CloudWatch is a web service that enables you to publish, monitor, +// and manage various metrics, as well as configure alarm actions based on data +// from metrics. For more information about this product go to http://aws.amazon.com/cloudwatch +// (http://aws.amazon.com/cloudwatch). +// +// For information about the namespace, metric names, and dimensions that +// other Amazon Web Services products use to send metrics to Cloudwatch, go +// to Amazon CloudWatch Metrics, Namespaces, and Dimensions Reference (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html) +// in the Amazon CloudWatch Developer Guide. +// +// Use the following links to get started using the Amazon CloudWatch API Reference: +// +// Actions (http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_Operations.html): +// An alphabetical list of all Amazon CloudWatch actions. Data Types (http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_Types.html): +// An alphabetical list of all Amazon CloudWatch data types. Common Parameters +// (http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CommonParameters.html): +// Parameters that all Query actions can use. Common Errors (http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CommonErrors.html): +// Client and server errors that all actions can return. Regions and Endpoints +// (http://docs.aws.amazon.com/general/latest/gr/index.html?rande.html): Itemized +// regions and endpoints for all AWS products. WSDL Location (http://monitoring.amazonaws.com/doc/2010-08-01/CloudWatch.wsdl): +// http://monitoring.amazonaws.com/doc/2010-08-01/CloudWatch.wsdl In addition +// to using the Amazon CloudWatch API, you can also use the following SDKs and +// third-party libraries to access Amazon CloudWatch programmatically. +// +// AWS SDK for Java Documentation (http://aws.amazon.com/documentation/sdkforjava/) +// AWS SDK for .NET Documentation (http://aws.amazon.com/documentation/sdkfornet/) +// AWS SDK for PHP Documentation (http://aws.amazon.com/documentation/sdkforphp/) +// AWS SDK for Ruby Documentation (http://aws.amazon.com/documentation/sdkforruby/) +// Developers in the AWS developer community also provide their own libraries, +// which you can find at the following AWS developer centers: +// +// AWS Java Developer Center (http://aws.amazon.com/java/) AWS PHP Developer +// Center (http://aws.amazon.com/php/) AWS Python Developer Center (http://aws.amazon.com/python/) +// AWS Ruby Developer Center (http://aws.amazon.com/ruby/) AWS Windows and .NET +// Developer Center (http://aws.amazon.com/net/) +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CloudWatch struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "monitoring" + +// New creates a new instance of the CloudWatch client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CloudWatch client from just a session. +// svc := cloudwatch.New(mySession) +// +// // Create a CloudWatch client with additional configuration +// svc := cloudwatch.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudWatch { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CloudWatch { + svc := &CloudWatch{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2010-08-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CloudWatch operation and runs any +// custom request initialization. +func (c *CloudWatch) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchevents/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchevents/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchevents/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchevents/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1102 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudwatchevents provides a client for Amazon CloudWatch Events. +package cloudwatchevents + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opDeleteRule = "DeleteRule" + +// DeleteRuleRequest generates a request for the DeleteRule operation. +func (c *CloudWatchEvents) DeleteRuleRequest(input *DeleteRuleInput) (req *request.Request, output *DeleteRuleOutput) { + op := &request.Operation{ + Name: opDeleteRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRuleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteRuleOutput{} + req.Data = output + return +} + +// Deletes a rule. You must remove all targets from a rule using RemoveTargets +// before you can delete the rule. +// +// Note: When you make a change with this action, incoming events might still +// continue to match to the deleted rule. Please allow a short period of time +// for changes to take effect. +func (c *CloudWatchEvents) DeleteRule(input *DeleteRuleInput) (*DeleteRuleOutput, error) { + req, out := c.DeleteRuleRequest(input) + err := req.Send() + return out, err +} + +const opDescribeRule = "DescribeRule" + +// DescribeRuleRequest generates a request for the DescribeRule operation. +func (c *CloudWatchEvents) DescribeRuleRequest(input *DescribeRuleInput) (req *request.Request, output *DescribeRuleOutput) { + op := &request.Operation{ + Name: opDescribeRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeRuleOutput{} + req.Data = output + return +} + +// Describes the details of the specified rule. +func (c *CloudWatchEvents) DescribeRule(input *DescribeRuleInput) (*DescribeRuleOutput, error) { + req, out := c.DescribeRuleRequest(input) + err := req.Send() + return out, err +} + +const opDisableRule = "DisableRule" + +// DisableRuleRequest generates a request for the DisableRule operation. +func (c *CloudWatchEvents) DisableRuleRequest(input *DisableRuleInput) (req *request.Request, output *DisableRuleOutput) { + op := &request.Operation{ + Name: opDisableRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableRuleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DisableRuleOutput{} + req.Data = output + return +} + +// Disables a rule. A disabled rule won't match any events, and won't self-trigger +// if it has a schedule expression. +// +// Note: When you make a change with this action, incoming events might still +// continue to match to the disabled rule. Please allow a short period of time +// for changes to take effect. +func (c *CloudWatchEvents) DisableRule(input *DisableRuleInput) (*DisableRuleOutput, error) { + req, out := c.DisableRuleRequest(input) + err := req.Send() + return out, err +} + +const opEnableRule = "EnableRule" + +// EnableRuleRequest generates a request for the EnableRule operation. +func (c *CloudWatchEvents) EnableRuleRequest(input *EnableRuleInput) (req *request.Request, output *EnableRuleOutput) { + op := &request.Operation{ + Name: opEnableRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableRuleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &EnableRuleOutput{} + req.Data = output + return +} + +// Enables a rule. If the rule does not exist, the operation fails. +// +// Note: When you make a change with this action, incoming events might not +// immediately start matching to a newly enabled rule. Please allow a short +// period of time for changes to take effect. +func (c *CloudWatchEvents) EnableRule(input *EnableRuleInput) (*EnableRuleOutput, error) { + req, out := c.EnableRuleRequest(input) + err := req.Send() + return out, err +} + +const opListRuleNamesByTarget = "ListRuleNamesByTarget" + +// ListRuleNamesByTargetRequest generates a request for the ListRuleNamesByTarget operation. +func (c *CloudWatchEvents) ListRuleNamesByTargetRequest(input *ListRuleNamesByTargetInput) (req *request.Request, output *ListRuleNamesByTargetOutput) { + op := &request.Operation{ + Name: opListRuleNamesByTarget, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListRuleNamesByTargetInput{} + } + + req = c.newRequest(op, input, output) + output = &ListRuleNamesByTargetOutput{} + req.Data = output + return +} + +// Lists the names of the rules that the given target is put to. Using this +// action, you can find out which of the rules in Amazon CloudWatch Events can +// invoke a specific target in your account. If you have more rules in your +// account than the given limit, the results will be paginated. In that case, +// use the next token returned in the response and repeat the ListRulesByTarget +// action until the NextToken in the response is returned as null. +func (c *CloudWatchEvents) ListRuleNamesByTarget(input *ListRuleNamesByTargetInput) (*ListRuleNamesByTargetOutput, error) { + req, out := c.ListRuleNamesByTargetRequest(input) + err := req.Send() + return out, err +} + +const opListRules = "ListRules" + +// ListRulesRequest generates a request for the ListRules operation. +func (c *CloudWatchEvents) ListRulesRequest(input *ListRulesInput) (req *request.Request, output *ListRulesOutput) { + op := &request.Operation{ + Name: opListRules, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListRulesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListRulesOutput{} + req.Data = output + return +} + +// Lists the Amazon CloudWatch Events rules in your account. You can either +// list all the rules or you can provide a prefix to match to the rule names. +// If you have more rules in your account than the given limit, the results +// will be paginated. In that case, use the next token returned in the response +// and repeat the ListRules action until the NextToken in the response is returned +// as null. +func (c *CloudWatchEvents) ListRules(input *ListRulesInput) (*ListRulesOutput, error) { + req, out := c.ListRulesRequest(input) + err := req.Send() + return out, err +} + +const opListTargetsByRule = "ListTargetsByRule" + +// ListTargetsByRuleRequest generates a request for the ListTargetsByRule operation. +func (c *CloudWatchEvents) ListTargetsByRuleRequest(input *ListTargetsByRuleInput) (req *request.Request, output *ListTargetsByRuleOutput) { + op := &request.Operation{ + Name: opListTargetsByRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTargetsByRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTargetsByRuleOutput{} + req.Data = output + return +} + +// Lists of targets assigned to the rule. +func (c *CloudWatchEvents) ListTargetsByRule(input *ListTargetsByRuleInput) (*ListTargetsByRuleOutput, error) { + req, out := c.ListTargetsByRuleRequest(input) + err := req.Send() + return out, err +} + +const opPutEvents = "PutEvents" + +// PutEventsRequest generates a request for the PutEvents operation. +func (c *CloudWatchEvents) PutEventsRequest(input *PutEventsInput) (req *request.Request, output *PutEventsOutput) { + op := &request.Operation{ + Name: opPutEvents, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &PutEventsOutput{} + req.Data = output + return +} + +// Sends custom events to Amazon CloudWatch Events so that they can be matched +// to rules. +func (c *CloudWatchEvents) PutEvents(input *PutEventsInput) (*PutEventsOutput, error) { + req, out := c.PutEventsRequest(input) + err := req.Send() + return out, err +} + +const opPutRule = "PutRule" + +// PutRuleRequest generates a request for the PutRule operation. +func (c *CloudWatchEvents) PutRuleRequest(input *PutRuleInput) (req *request.Request, output *PutRuleOutput) { + op := &request.Operation{ + Name: opPutRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &PutRuleOutput{} + req.Data = output + return +} + +// Creates or updates a rule. Rules are enabled by default, or based on value +// of the State parameter. You can disable a rule using DisableRule. +// +// Note: When you make a change with this action, incoming events might not +// immediately start matching to new or updated rules. Please allow a short +// period of time for changes to take effect. +// +// A rule must contain at least an EventPattern or ScheduleExpression. Rules +// with EventPatterns are triggered when a matching event is observed. Rules +// with ScheduleExpressions self-trigger based on the given schedule. A rule +// can have both an EventPattern and a ScheduleExpression, in which case the +// rule will trigger on matching events as well as on a schedule. +// +// Note: Most services in AWS treat : or / as the same character in Amazon +// Resource Names (ARNs). However, CloudWatch Events uses an exact match in +// event patterns and rules. Be sure to use the correct ARN characters when +// creating event patterns so that they match the ARN syntax in the event you +// want to match. +func (c *CloudWatchEvents) PutRule(input *PutRuleInput) (*PutRuleOutput, error) { + req, out := c.PutRuleRequest(input) + err := req.Send() + return out, err +} + +const opPutTargets = "PutTargets" + +// PutTargetsRequest generates a request for the PutTargets operation. +func (c *CloudWatchEvents) PutTargetsRequest(input *PutTargetsInput) (req *request.Request, output *PutTargetsOutput) { + op := &request.Operation{ + Name: opPutTargets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutTargetsInput{} + } + + req = c.newRequest(op, input, output) + output = &PutTargetsOutput{} + req.Data = output + return +} + +// Adds target(s) to a rule. Updates the target(s) if they are already associated +// with the role. In other words, if there is already a target with the given +// target ID, then the target associated with that ID is updated. +// +// Note: When you make a change with this action, when the associated rule +// triggers, new or updated targets might not be immediately invoked. Please +// allow a short period of time for changes to take effect. +func (c *CloudWatchEvents) PutTargets(input *PutTargetsInput) (*PutTargetsOutput, error) { + req, out := c.PutTargetsRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTargets = "RemoveTargets" + +// RemoveTargetsRequest generates a request for the RemoveTargets operation. +func (c *CloudWatchEvents) RemoveTargetsRequest(input *RemoveTargetsInput) (req *request.Request, output *RemoveTargetsOutput) { + op := &request.Operation{ + Name: opRemoveTargets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTargetsInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveTargetsOutput{} + req.Data = output + return +} + +// Removes target(s) from a rule so that when the rule is triggered, those targets +// will no longer be invoked. +// +// Note: When you make a change with this action, when the associated rule +// triggers, removed targets might still continue to be invoked. Please allow +// a short period of time for changes to take effect. +func (c *CloudWatchEvents) RemoveTargets(input *RemoveTargetsInput) (*RemoveTargetsOutput, error) { + req, out := c.RemoveTargetsRequest(input) + err := req.Send() + return out, err +} + +const opTestEventPattern = "TestEventPattern" + +// TestEventPatternRequest generates a request for the TestEventPattern operation. +func (c *CloudWatchEvents) TestEventPatternRequest(input *TestEventPatternInput) (req *request.Request, output *TestEventPatternOutput) { + op := &request.Operation{ + Name: opTestEventPattern, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TestEventPatternInput{} + } + + req = c.newRequest(op, input, output) + output = &TestEventPatternOutput{} + req.Data = output + return +} + +// Tests whether an event pattern matches the provided event. +// +// Note: Most services in AWS treat : or / as the same character in Amazon +// Resource Names (ARNs). However, CloudWatch Events uses an exact match in +// event patterns and rules. Be sure to use the correct ARN characters when +// creating event patterns so that they match the ARN syntax in the event you +// want to match. +func (c *CloudWatchEvents) TestEventPattern(input *TestEventPatternInput) (*TestEventPatternOutput, error) { + req, out := c.TestEventPatternRequest(input) + err := req.Send() + return out, err +} + +// Container for the parameters to the DeleteRule operation. +type DeleteRuleInput struct { + _ struct{} `type:"structure"` + + // The name of the rule to be deleted. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRuleInput) GoString() string { + return s.String() +} + +type DeleteRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRuleOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeRule operation. +type DescribeRuleInput struct { + _ struct{} `type:"structure"` + + // The name of the rule you want to describe details for. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRuleInput) GoString() string { + return s.String() +} + +// The result of the DescribeRule operation. +type DescribeRuleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) associated with the rule. + Arn *string `min:"1" type:"string"` + + // The rule's description. + Description *string `type:"string"` + + // The event pattern. + EventPattern *string `type:"string"` + + // The rule's name. + Name *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the IAM role associated with the rule. + RoleArn *string `min:"1" type:"string"` + + // The scheduling expression. For example, "cron(0 20 * * ? *)", "rate(5 minutes)". + ScheduleExpression *string `type:"string"` + + // Specifies whether the rule is enabled or disabled. + State *string `type:"string" enum:"RuleState"` +} + +// String returns the string representation +func (s DescribeRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRuleOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DisableRule operation. +type DisableRuleInput struct { + _ struct{} `type:"structure"` + + // The name of the rule you want to disable. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableRuleInput) GoString() string { + return s.String() +} + +type DisableRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableRuleOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the EnableRule operation. +type EnableRuleInput struct { + _ struct{} `type:"structure"` + + // The name of the rule that you want to enable. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableRuleInput) GoString() string { + return s.String() +} + +type EnableRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableRuleOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the ListRuleNamesByTarget operation. +type ListRuleNamesByTargetInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return. + Limit *int64 `min:"1" type:"integer"` + + // The token returned by a previous call to indicate that there is more data + // available. + NextToken *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the target resource that you want to list + // the rules for. + TargetArn *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListRuleNamesByTargetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRuleNamesByTargetInput) GoString() string { + return s.String() +} + +// The result of the ListRuleNamesByTarget operation. +type ListRuleNamesByTargetOutput struct { + _ struct{} `type:"structure"` + + // Indicates that there are additional results to retrieve. + NextToken *string `min:"1" type:"string"` + + // List of rules names that can invoke the given target. + RuleNames []*string `type:"list"` +} + +// String returns the string representation +func (s ListRuleNamesByTargetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRuleNamesByTargetOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the ListRules operation. +type ListRulesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return. + Limit *int64 `min:"1" type:"integer"` + + // The prefix matching the rule name. + NamePrefix *string `min:"1" type:"string"` + + // The token returned by a previous call to indicate that there is more data + // available. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListRulesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRulesInput) GoString() string { + return s.String() +} + +// The result of the ListRules operation. +type ListRulesOutput struct { + _ struct{} `type:"structure"` + + // Indicates that there are additional results to retrieve. + NextToken *string `min:"1" type:"string"` + + // List of rules matching the specified criteria. + Rules []*Rule `type:"list"` +} + +// String returns the string representation +func (s ListRulesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRulesOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the ListTargetsByRule operation. +type ListTargetsByRuleInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return. + Limit *int64 `min:"1" type:"integer"` + + // The token returned by a previous call to indicate that there is more data + // available. + NextToken *string `min:"1" type:"string"` + + // The name of the rule whose targets you want to list. + Rule *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTargetsByRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTargetsByRuleInput) GoString() string { + return s.String() +} + +// The result of the ListTargetsByRule operation. +type ListTargetsByRuleOutput struct { + _ struct{} `type:"structure"` + + // Indicates that there are additional results to retrieve. + NextToken *string `min:"1" type:"string"` + + // Lists the targets assigned to the rule. + Targets []*Target `type:"list"` +} + +// String returns the string representation +func (s ListTargetsByRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTargetsByRuleOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the PutEvents operation. +type PutEventsInput struct { + _ struct{} `type:"structure"` + + // The entry that defines an event in your system. You can specify several parameters + // for the entry such as the source and type of the event, resources associated + // with the event, and so on. + Entries []*PutEventsRequestEntry `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s PutEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutEventsInput) GoString() string { + return s.String() +} + +// The result of the PutEvents operation. +type PutEventsOutput struct { + _ struct{} `type:"structure"` + + // A list of successfully and unsuccessfully ingested events results. If the + // ingestion was successful, the entry will have the event ID in it. If not, + // then the ErrorCode and ErrorMessage can be used to identify the problem with + // the entry. + Entries []*PutEventsResultEntry `type:"list"` + + // The number of failed entries. + FailedEntryCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s PutEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutEventsOutput) GoString() string { + return s.String() +} + +// Contains information about the event to be used in the PutEvents action. +type PutEventsRequestEntry struct { + _ struct{} `type:"structure"` + + // In the JSON sense, an object containing fields, which may also contain nested + // sub-objects. No constraints are imposed on its contents. + Detail *string `type:"string"` + + // Free-form string used to decide what fields to expect in the event detail. + DetailType *string `type:"string"` + + // AWS resources, identified by Amazon Resource Name (ARN), which the event + // primarily concerns. Any number, including zero, may be present. + Resources []*string `type:"list"` + + // The source of the event. + Source *string `type:"string"` + + // Timestamp of event, per RFC3339 (https://www.rfc-editor.org/rfc/rfc3339.txt). + // If no timestamp is provided, the timestamp of the PutEvents call will be + // used. + Time *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s PutEventsRequestEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutEventsRequestEntry) GoString() string { + return s.String() +} + +// A PutEventsResult contains a list of PutEventsResultEntry. +type PutEventsResultEntry struct { + _ struct{} `type:"structure"` + + // The error code representing why the event submission failed on this entry. + ErrorCode *string `type:"string"` + + // The error message explaining why the event submission failed on this entry. + ErrorMessage *string `type:"string"` + + // The ID of the event submitted to Amazon CloudWatch Events. + EventId *string `type:"string"` +} + +// String returns the string representation +func (s PutEventsResultEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutEventsResultEntry) GoString() string { + return s.String() +} + +// Container for the parameters to the PutRule operation. +type PutRuleInput struct { + _ struct{} `type:"structure"` + + // A description of the rule. + Description *string `type:"string"` + + // The event pattern. + EventPattern *string `type:"string"` + + // The name of the rule that you are creating or updating. + Name *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM role associated with the rule. + RoleArn *string `min:"1" type:"string"` + + // The scheduling expression. For example, "cron(0 20 * * ? *)", "rate(5 minutes)". + ScheduleExpression *string `type:"string"` + + // Indicates whether the rule is enabled or disabled. + State *string `type:"string" enum:"RuleState"` +} + +// String returns the string representation +func (s PutRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRuleInput) GoString() string { + return s.String() +} + +// The result of the PutRule operation. +type PutRuleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that identifies the rule. + RuleArn *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PutRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRuleOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the PutTargets operation. +type PutTargetsInput struct { + _ struct{} `type:"structure"` + + // The name of the rule you want to add targets to. + Rule *string `min:"1" type:"string" required:"true"` + + // List of targets you want to update or add to the rule. + Targets []*Target `type:"list" required:"true"` +} + +// String returns the string representation +func (s PutTargetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutTargetsInput) GoString() string { + return s.String() +} + +// The result of the PutTargets operation. +type PutTargetsOutput struct { + _ struct{} `type:"structure"` + + // An array of failed target entries. + FailedEntries []*PutTargetsResultEntry `type:"list"` + + // The number of failed entries. + FailedEntryCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s PutTargetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutTargetsOutput) GoString() string { + return s.String() +} + +// A PutTargetsResult contains a list of PutTargetsResultEntry. +type PutTargetsResultEntry struct { + _ struct{} `type:"structure"` + + // The error code representing why the target submission failed on this entry. + ErrorCode *string `type:"string"` + + // The error message explaining why the target submission failed on this entry. + ErrorMessage *string `type:"string"` + + // The ID of the target submitted to Amazon CloudWatch Events. + TargetId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PutTargetsResultEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutTargetsResultEntry) GoString() string { + return s.String() +} + +// Container for the parameters to the RemoveTargets operation. +type RemoveTargetsInput struct { + _ struct{} `type:"structure"` + + // The list of target IDs to remove from the rule. + Ids []*string `min:"1" type:"list" required:"true"` + + // The name of the rule you want to remove targets from. + Rule *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RemoveTargetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTargetsInput) GoString() string { + return s.String() +} + +// The result of the RemoveTargets operation. +type RemoveTargetsOutput struct { + _ struct{} `type:"structure"` + + // An array of failed target entries. + FailedEntries []*RemoveTargetsResultEntry `type:"list"` + + // The number of failed entries. + FailedEntryCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s RemoveTargetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTargetsOutput) GoString() string { + return s.String() +} + +// The ID of the target requested to be removed from the rule by Amazon CloudWatch +// Events. +type RemoveTargetsResultEntry struct { + _ struct{} `type:"structure"` + + // The error code representing why the target removal failed on this entry. + ErrorCode *string `type:"string"` + + // The error message explaining why the target removal failed on this entry. + ErrorMessage *string `type:"string"` + + // The ID of the target requested to be removed by Amazon CloudWatch Events. + TargetId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s RemoveTargetsResultEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTargetsResultEntry) GoString() string { + return s.String() +} + +// Contains information about a rule in Amazon CloudWatch Events. A ListRulesResult +// contains a list of Rules. +type Rule struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the rule. + Arn *string `min:"1" type:"string"` + + // The description of the rule. + Description *string `type:"string"` + + // The event pattern of the rule. + EventPattern *string `type:"string"` + + // The rule's name. + Name *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) associated with the role that is used for + // target invocation. + RoleArn *string `min:"1" type:"string"` + + // The scheduling expression. For example, "cron(0 20 * * ? *)", "rate(5 minutes)". + ScheduleExpression *string `type:"string"` + + // The rule's state. + State *string `type:"string" enum:"RuleState"` +} + +// String returns the string representation +func (s Rule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Rule) GoString() string { + return s.String() +} + +// Targets are the resources that can be invoked when a rule is triggered. For +// example, AWS Lambda functions, Amazon Kinesis streams, and built-in targets. +// +// Input and InputPath are mutually-exclusive and optional parameters of a +// target. When a rule is triggered due to a matched event, if for a target: +// +// Neither Input nor InputPath is specified, then the entire event is passed +// to the target in JSON form. InputPath is specified in the form of JSONPath +// (e.g. $.detail), then only the part of the event specified in the path is +// passed to the target (e.g. only the detail part of the event is passed). +// Input is specified in the form of a valid JSON, then the matched event +// is overridden with this constant. +type Target struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) associated of the target. + Arn *string `min:"1" type:"string" required:"true"` + + // The unique target assignment ID. + Id *string `min:"1" type:"string" required:"true"` + + // Valid JSON text passed to the target. For more information about JSON text, + // see The JavaScript Object Notation (JSON) Data Interchange Format (http://www.rfc-editor.org/rfc/rfc7159.txt). + Input *string `type:"string"` + + // The value of the JSONPath that is used for extracting part of the matched + // event when passing it to the target. For more information about JSON paths, + // see JSONPath (http://goessner.net/articles/JsonPath/). + InputPath *string `type:"string"` +} + +// String returns the string representation +func (s Target) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Target) GoString() string { + return s.String() +} + +// Container for the parameters to the TestEventPattern operation. +type TestEventPatternInput struct { + _ struct{} `type:"structure"` + + // The event in the JSON format to test against the event pattern. + Event *string `type:"string" required:"true"` + + // The event pattern you want to test. + EventPattern *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s TestEventPatternInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestEventPatternInput) GoString() string { + return s.String() +} + +// The result of the TestEventPattern operation. +type TestEventPatternOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the event matches the event pattern. + Result *bool `type:"boolean"` +} + +// String returns the string representation +func (s TestEventPatternOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestEventPatternOutput) GoString() string { + return s.String() +} + +const ( + // @enum RuleState + RuleStateEnabled = "ENABLED" + // @enum RuleState + RuleStateDisabled = "DISABLED" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchevents/cloudwatcheventsiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchevents/cloudwatcheventsiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchevents/cloudwatcheventsiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchevents/cloudwatcheventsiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,62 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudwatcheventsiface provides an interface for the Amazon CloudWatch Events. +package cloudwatcheventsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudwatchevents" +) + +// CloudWatchEventsAPI is the interface type for cloudwatchevents.CloudWatchEvents. +type CloudWatchEventsAPI interface { + DeleteRuleRequest(*cloudwatchevents.DeleteRuleInput) (*request.Request, *cloudwatchevents.DeleteRuleOutput) + + DeleteRule(*cloudwatchevents.DeleteRuleInput) (*cloudwatchevents.DeleteRuleOutput, error) + + DescribeRuleRequest(*cloudwatchevents.DescribeRuleInput) (*request.Request, *cloudwatchevents.DescribeRuleOutput) + + DescribeRule(*cloudwatchevents.DescribeRuleInput) (*cloudwatchevents.DescribeRuleOutput, error) + + DisableRuleRequest(*cloudwatchevents.DisableRuleInput) (*request.Request, *cloudwatchevents.DisableRuleOutput) + + DisableRule(*cloudwatchevents.DisableRuleInput) (*cloudwatchevents.DisableRuleOutput, error) + + EnableRuleRequest(*cloudwatchevents.EnableRuleInput) (*request.Request, *cloudwatchevents.EnableRuleOutput) + + EnableRule(*cloudwatchevents.EnableRuleInput) (*cloudwatchevents.EnableRuleOutput, error) + + ListRuleNamesByTargetRequest(*cloudwatchevents.ListRuleNamesByTargetInput) (*request.Request, *cloudwatchevents.ListRuleNamesByTargetOutput) + + ListRuleNamesByTarget(*cloudwatchevents.ListRuleNamesByTargetInput) (*cloudwatchevents.ListRuleNamesByTargetOutput, error) + + ListRulesRequest(*cloudwatchevents.ListRulesInput) (*request.Request, *cloudwatchevents.ListRulesOutput) + + ListRules(*cloudwatchevents.ListRulesInput) (*cloudwatchevents.ListRulesOutput, error) + + ListTargetsByRuleRequest(*cloudwatchevents.ListTargetsByRuleInput) (*request.Request, *cloudwatchevents.ListTargetsByRuleOutput) + + ListTargetsByRule(*cloudwatchevents.ListTargetsByRuleInput) (*cloudwatchevents.ListTargetsByRuleOutput, error) + + PutEventsRequest(*cloudwatchevents.PutEventsInput) (*request.Request, *cloudwatchevents.PutEventsOutput) + + PutEvents(*cloudwatchevents.PutEventsInput) (*cloudwatchevents.PutEventsOutput, error) + + PutRuleRequest(*cloudwatchevents.PutRuleInput) (*request.Request, *cloudwatchevents.PutRuleOutput) + + PutRule(*cloudwatchevents.PutRuleInput) (*cloudwatchevents.PutRuleOutput, error) + + PutTargetsRequest(*cloudwatchevents.PutTargetsInput) (*request.Request, *cloudwatchevents.PutTargetsOutput) + + PutTargets(*cloudwatchevents.PutTargetsInput) (*cloudwatchevents.PutTargetsOutput, error) + + RemoveTargetsRequest(*cloudwatchevents.RemoveTargetsInput) (*request.Request, *cloudwatchevents.RemoveTargetsOutput) + + RemoveTargets(*cloudwatchevents.RemoveTargetsInput) (*cloudwatchevents.RemoveTargetsOutput, error) + + TestEventPatternRequest(*cloudwatchevents.TestEventPatternInput) (*request.Request, *cloudwatchevents.TestEventPatternOutput) + + TestEventPattern(*cloudwatchevents.TestEventPatternInput) (*cloudwatchevents.TestEventPatternOutput, error) +} + +var _ CloudWatchEventsAPI = (*cloudwatchevents.CloudWatchEvents)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchevents/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchevents/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchevents/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchevents/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,281 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudwatchevents_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudwatchevents" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCloudWatchEvents_DeleteRule() { + svc := cloudwatchevents.New(session.New()) + + params := &cloudwatchevents.DeleteRuleInput{ + Name: aws.String("RuleName"), // Required + } + resp, err := svc.DeleteRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchEvents_DescribeRule() { + svc := cloudwatchevents.New(session.New()) + + params := &cloudwatchevents.DescribeRuleInput{ + Name: aws.String("RuleName"), // Required + } + resp, err := svc.DescribeRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchEvents_DisableRule() { + svc := cloudwatchevents.New(session.New()) + + params := &cloudwatchevents.DisableRuleInput{ + Name: aws.String("RuleName"), // Required + } + resp, err := svc.DisableRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchEvents_EnableRule() { + svc := cloudwatchevents.New(session.New()) + + params := &cloudwatchevents.EnableRuleInput{ + Name: aws.String("RuleName"), // Required + } + resp, err := svc.EnableRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchEvents_ListRuleNamesByTarget() { + svc := cloudwatchevents.New(session.New()) + + params := &cloudwatchevents.ListRuleNamesByTargetInput{ + TargetArn: aws.String("TargetArn"), // Required + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListRuleNamesByTarget(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchEvents_ListRules() { + svc := cloudwatchevents.New(session.New()) + + params := &cloudwatchevents.ListRulesInput{ + Limit: aws.Int64(1), + NamePrefix: aws.String("RuleName"), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListRules(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchEvents_ListTargetsByRule() { + svc := cloudwatchevents.New(session.New()) + + params := &cloudwatchevents.ListTargetsByRuleInput{ + Rule: aws.String("RuleName"), // Required + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListTargetsByRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchEvents_PutEvents() { + svc := cloudwatchevents.New(session.New()) + + params := &cloudwatchevents.PutEventsInput{ + Entries: []*cloudwatchevents.PutEventsRequestEntry{ // Required + { // Required + Detail: aws.String("String"), + DetailType: aws.String("String"), + Resources: []*string{ + aws.String("EventResource"), // Required + // More values... + }, + Source: aws.String("String"), + Time: aws.Time(time.Now()), + }, + // More values... + }, + } + resp, err := svc.PutEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchEvents_PutRule() { + svc := cloudwatchevents.New(session.New()) + + params := &cloudwatchevents.PutRuleInput{ + Name: aws.String("RuleName"), // Required + Description: aws.String("RuleDescription"), + EventPattern: aws.String("EventPattern"), + RoleArn: aws.String("RoleArn"), + ScheduleExpression: aws.String("ScheduleExpression"), + State: aws.String("RuleState"), + } + resp, err := svc.PutRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchEvents_PutTargets() { + svc := cloudwatchevents.New(session.New()) + + params := &cloudwatchevents.PutTargetsInput{ + Rule: aws.String("RuleName"), // Required + Targets: []*cloudwatchevents.Target{ // Required + { // Required + Arn: aws.String("TargetArn"), // Required + Id: aws.String("TargetId"), // Required + Input: aws.String("TargetInput"), + InputPath: aws.String("TargetInputPath"), + }, + // More values... + }, + } + resp, err := svc.PutTargets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchEvents_RemoveTargets() { + svc := cloudwatchevents.New(session.New()) + + params := &cloudwatchevents.RemoveTargetsInput{ + Ids: []*string{ // Required + aws.String("TargetId"), // Required + // More values... + }, + Rule: aws.String("RuleName"), // Required + } + resp, err := svc.RemoveTargets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchEvents_TestEventPattern() { + svc := cloudwatchevents.New(session.New()) + + params := &cloudwatchevents.TestEventPatternInput{ + Event: aws.String("String"), // Required + EventPattern: aws.String("EventPattern"), // Required + } + resp, err := svc.TestEventPattern(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchevents/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchevents/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchevents/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchevents/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,101 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudwatchevents + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Amazon CloudWatch Events helps you to respond to state changes in your AWS +// resources. When your resources change state they automatically send events +// into an event stream. You can create rules that match selected events in +// the stream and route them to targets to take action. You can also use rules +// to take action on a pre-determined schedule. For example, you can configure +// rules to: +// +// Automatically invoke an AWS Lambda function to update DNS entries when +// an event notifies you that Amazon EC2 instance enters the running state. +// Direct specific API records from CloudTrail to an Amazon Kinesis stream for +// detailed analysis of potential security or availability risks. Periodically +// invoke a built-in target to create a snapshot of an Amazon EBS volume. +// For more information about Amazon CloudWatch Events features, see the Amazon +// CloudWatch Developer Guide (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CloudWatchEvents struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "events" + +// New creates a new instance of the CloudWatchEvents client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CloudWatchEvents client from just a session. +// svc := cloudwatchevents.New(mySession) +// +// // Create a CloudWatchEvents client with additional configuration +// svc := cloudwatchevents.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudWatchEvents { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CloudWatchEvents { + svc := &CloudWatchEvents{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-02-03", + JSONVersion: "1.1", + TargetPrefix: "AWSEvents", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CloudWatchEvents operation and runs any +// custom request initialization. +func (c *CloudWatchEvents) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2499 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudwatchlogs provides a client for Amazon CloudWatch Logs. +package cloudwatchlogs + +import ( + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opCancelExportTask = "CancelExportTask" + +// CancelExportTaskRequest generates a request for the CancelExportTask operation. +func (c *CloudWatchLogs) CancelExportTaskRequest(input *CancelExportTaskInput) (req *request.Request, output *CancelExportTaskOutput) { + op := &request.Operation{ + Name: opCancelExportTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelExportTaskInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CancelExportTaskOutput{} + req.Data = output + return +} + +// Cancels an export task if it is in PENDING or RUNNING state. +func (c *CloudWatchLogs) CancelExportTask(input *CancelExportTaskInput) (*CancelExportTaskOutput, error) { + req, out := c.CancelExportTaskRequest(input) + err := req.Send() + return out, err +} + +const opCreateExportTask = "CreateExportTask" + +// CreateExportTaskRequest generates a request for the CreateExportTask operation. +func (c *CloudWatchLogs) CreateExportTaskRequest(input *CreateExportTaskInput) (req *request.Request, output *CreateExportTaskOutput) { + op := &request.Operation{ + Name: opCreateExportTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateExportTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateExportTaskOutput{} + req.Data = output + return +} + +// Creates an ExportTask which allows you to efficiently export data from a +// Log Group to your Amazon S3 bucket. +// +// This is an asynchronous call. If all the required information is provided, +// this API will initiate an export task and respond with the task Id. Once +// started, DescribeExportTasks can be used to get the status of an export task. +// +// You can export logs from multiple log groups or multiple time ranges to +// the same Amazon S3 bucket. To separate out log data for each export task, +// you can specify a prefix that will be used as the Amazon S3 key prefix for +// all exported objects. +func (c *CloudWatchLogs) CreateExportTask(input *CreateExportTaskInput) (*CreateExportTaskOutput, error) { + req, out := c.CreateExportTaskRequest(input) + err := req.Send() + return out, err +} + +const opCreateLogGroup = "CreateLogGroup" + +// CreateLogGroupRequest generates a request for the CreateLogGroup operation. +func (c *CloudWatchLogs) CreateLogGroupRequest(input *CreateLogGroupInput) (req *request.Request, output *CreateLogGroupOutput) { + op := &request.Operation{ + Name: opCreateLogGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLogGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateLogGroupOutput{} + req.Data = output + return +} + +// Creates a new log group with the specified name. The name of the log group +// must be unique within a region for an AWS account. You can create up to 500 +// log groups per account. +// +// You must use the following guidelines when naming a log group: Log group +// names can be between 1 and 512 characters long. Allowed characters are a-z, +// A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), and '.' (period). +func (c *CloudWatchLogs) CreateLogGroup(input *CreateLogGroupInput) (*CreateLogGroupOutput, error) { + req, out := c.CreateLogGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateLogStream = "CreateLogStream" + +// CreateLogStreamRequest generates a request for the CreateLogStream operation. +func (c *CloudWatchLogs) CreateLogStreamRequest(input *CreateLogStreamInput) (req *request.Request, output *CreateLogStreamOutput) { + op := &request.Operation{ + Name: opCreateLogStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLogStreamInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateLogStreamOutput{} + req.Data = output + return +} + +// Creates a new log stream in the specified log group. The name of the log +// stream must be unique within the log group. There is no limit on the number +// of log streams that can exist in a log group. +// +// You must use the following guidelines when naming a log stream: Log stream +// names can be between 1 and 512 characters long. The ':' colon character is +// not allowed. +func (c *CloudWatchLogs) CreateLogStream(input *CreateLogStreamInput) (*CreateLogStreamOutput, error) { + req, out := c.CreateLogStreamRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDestination = "DeleteDestination" + +// DeleteDestinationRequest generates a request for the DeleteDestination operation. +func (c *CloudWatchLogs) DeleteDestinationRequest(input *DeleteDestinationInput) (req *request.Request, output *DeleteDestinationOutput) { + op := &request.Operation{ + Name: opDeleteDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDestinationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteDestinationOutput{} + req.Data = output + return +} + +// Deletes the destination with the specified name and eventually disables all +// the subscription filters that publish to it. This will not delete the physical +// resource encapsulated by the destination. +func (c *CloudWatchLogs) DeleteDestination(input *DeleteDestinationInput) (*DeleteDestinationOutput, error) { + req, out := c.DeleteDestinationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLogGroup = "DeleteLogGroup" + +// DeleteLogGroupRequest generates a request for the DeleteLogGroup operation. +func (c *CloudWatchLogs) DeleteLogGroupRequest(input *DeleteLogGroupInput) (req *request.Request, output *DeleteLogGroupOutput) { + op := &request.Operation{ + Name: opDeleteLogGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLogGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteLogGroupOutput{} + req.Data = output + return +} + +// Deletes the log group with the specified name and permanently deletes all +// the archived log events associated with it. +func (c *CloudWatchLogs) DeleteLogGroup(input *DeleteLogGroupInput) (*DeleteLogGroupOutput, error) { + req, out := c.DeleteLogGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLogStream = "DeleteLogStream" + +// DeleteLogStreamRequest generates a request for the DeleteLogStream operation. +func (c *CloudWatchLogs) DeleteLogStreamRequest(input *DeleteLogStreamInput) (req *request.Request, output *DeleteLogStreamOutput) { + op := &request.Operation{ + Name: opDeleteLogStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLogStreamInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteLogStreamOutput{} + req.Data = output + return +} + +// Deletes a log stream and permanently deletes all the archived log events +// associated with it. +func (c *CloudWatchLogs) DeleteLogStream(input *DeleteLogStreamInput) (*DeleteLogStreamOutput, error) { + req, out := c.DeleteLogStreamRequest(input) + err := req.Send() + return out, err +} + +const opDeleteMetricFilter = "DeleteMetricFilter" + +// DeleteMetricFilterRequest generates a request for the DeleteMetricFilter operation. +func (c *CloudWatchLogs) DeleteMetricFilterRequest(input *DeleteMetricFilterInput) (req *request.Request, output *DeleteMetricFilterOutput) { + op := &request.Operation{ + Name: opDeleteMetricFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteMetricFilterInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteMetricFilterOutput{} + req.Data = output + return +} + +// Deletes a metric filter associated with the specified log group. +func (c *CloudWatchLogs) DeleteMetricFilter(input *DeleteMetricFilterInput) (*DeleteMetricFilterOutput, error) { + req, out := c.DeleteMetricFilterRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRetentionPolicy = "DeleteRetentionPolicy" + +// DeleteRetentionPolicyRequest generates a request for the DeleteRetentionPolicy operation. +func (c *CloudWatchLogs) DeleteRetentionPolicyRequest(input *DeleteRetentionPolicyInput) (req *request.Request, output *DeleteRetentionPolicyOutput) { + op := &request.Operation{ + Name: opDeleteRetentionPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRetentionPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteRetentionPolicyOutput{} + req.Data = output + return +} + +// Deletes the retention policy of the specified log group. Log events would +// not expire if they belong to log groups without a retention policy. +func (c *CloudWatchLogs) DeleteRetentionPolicy(input *DeleteRetentionPolicyInput) (*DeleteRetentionPolicyOutput, error) { + req, out := c.DeleteRetentionPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSubscriptionFilter = "DeleteSubscriptionFilter" + +// DeleteSubscriptionFilterRequest generates a request for the DeleteSubscriptionFilter operation. +func (c *CloudWatchLogs) DeleteSubscriptionFilterRequest(input *DeleteSubscriptionFilterInput) (req *request.Request, output *DeleteSubscriptionFilterOutput) { + op := &request.Operation{ + Name: opDeleteSubscriptionFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSubscriptionFilterInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteSubscriptionFilterOutput{} + req.Data = output + return +} + +// Deletes a subscription filter associated with the specified log group. +func (c *CloudWatchLogs) DeleteSubscriptionFilter(input *DeleteSubscriptionFilterInput) (*DeleteSubscriptionFilterOutput, error) { + req, out := c.DeleteSubscriptionFilterRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDestinations = "DescribeDestinations" + +// DescribeDestinationsRequest generates a request for the DescribeDestinations operation. +func (c *CloudWatchLogs) DescribeDestinationsRequest(input *DescribeDestinationsInput) (req *request.Request, output *DescribeDestinationsOutput) { + op := &request.Operation{ + Name: opDescribeDestinations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDestinationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDestinationsOutput{} + req.Data = output + return +} + +// Returns all the destinations that are associated with the AWS account making +// the request. The list returned in the response is ASCII-sorted by destination +// name. +// +// By default, this operation returns up to 50 destinations. If there are +// more destinations to list, the response would contain a nextToken value in +// the response body. You can also limit the number of destinations returned +// in the response by specifying the limit parameter in the request. +func (c *CloudWatchLogs) DescribeDestinations(input *DescribeDestinationsInput) (*DescribeDestinationsOutput, error) { + req, out := c.DescribeDestinationsRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudWatchLogs) DescribeDestinationsPages(input *DescribeDestinationsInput, fn func(p *DescribeDestinationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDestinationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDestinationsOutput), lastPage) + }) +} + +const opDescribeExportTasks = "DescribeExportTasks" + +// DescribeExportTasksRequest generates a request for the DescribeExportTasks operation. +func (c *CloudWatchLogs) DescribeExportTasksRequest(input *DescribeExportTasksInput) (req *request.Request, output *DescribeExportTasksOutput) { + op := &request.Operation{ + Name: opDescribeExportTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeExportTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeExportTasksOutput{} + req.Data = output + return +} + +// Returns all the export tasks that are associated with the AWS account making +// the request. The export tasks can be filtered based on TaskId or TaskStatus. +// +// By default, this operation returns up to 50 export tasks that satisfy the +// specified filters. If there are more export tasks to list, the response would +// contain a nextToken value in the response body. You can also limit the number +// of export tasks returned in the response by specifying the limit parameter +// in the request. +func (c *CloudWatchLogs) DescribeExportTasks(input *DescribeExportTasksInput) (*DescribeExportTasksOutput, error) { + req, out := c.DescribeExportTasksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLogGroups = "DescribeLogGroups" + +// DescribeLogGroupsRequest generates a request for the DescribeLogGroups operation. +func (c *CloudWatchLogs) DescribeLogGroupsRequest(input *DescribeLogGroupsInput) (req *request.Request, output *DescribeLogGroupsOutput) { + op := &request.Operation{ + Name: opDescribeLogGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeLogGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLogGroupsOutput{} + req.Data = output + return +} + +// Returns all the log groups that are associated with the AWS account making +// the request. The list returned in the response is ASCII-sorted by log group +// name. +// +// By default, this operation returns up to 50 log groups. If there are more +// log groups to list, the response would contain a nextToken value in the response +// body. You can also limit the number of log groups returned in the response +// by specifying the limit parameter in the request. +func (c *CloudWatchLogs) DescribeLogGroups(input *DescribeLogGroupsInput) (*DescribeLogGroupsOutput, error) { + req, out := c.DescribeLogGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudWatchLogs) DescribeLogGroupsPages(input *DescribeLogGroupsInput, fn func(p *DescribeLogGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeLogGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeLogGroupsOutput), lastPage) + }) +} + +const opDescribeLogStreams = "DescribeLogStreams" + +// DescribeLogStreamsRequest generates a request for the DescribeLogStreams operation. +func (c *CloudWatchLogs) DescribeLogStreamsRequest(input *DescribeLogStreamsInput) (req *request.Request, output *DescribeLogStreamsOutput) { + op := &request.Operation{ + Name: opDescribeLogStreams, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeLogStreamsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLogStreamsOutput{} + req.Data = output + return +} + +// Returns all the log streams that are associated with the specified log group. +// The list returned in the response is ASCII-sorted by log stream name. +// +// By default, this operation returns up to 50 log streams. If there are more +// log streams to list, the response would contain a nextToken value in the +// response body. You can also limit the number of log streams returned in the +// response by specifying the limit parameter in the request. This operation +// has a limit of five transactions per second, after which transactions are +// throttled. +func (c *CloudWatchLogs) DescribeLogStreams(input *DescribeLogStreamsInput) (*DescribeLogStreamsOutput, error) { + req, out := c.DescribeLogStreamsRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudWatchLogs) DescribeLogStreamsPages(input *DescribeLogStreamsInput, fn func(p *DescribeLogStreamsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeLogStreamsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeLogStreamsOutput), lastPage) + }) +} + +const opDescribeMetricFilters = "DescribeMetricFilters" + +// DescribeMetricFiltersRequest generates a request for the DescribeMetricFilters operation. +func (c *CloudWatchLogs) DescribeMetricFiltersRequest(input *DescribeMetricFiltersInput) (req *request.Request, output *DescribeMetricFiltersOutput) { + op := &request.Operation{ + Name: opDescribeMetricFilters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeMetricFiltersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeMetricFiltersOutput{} + req.Data = output + return +} + +// Returns all the metrics filters associated with the specified log group. +// The list returned in the response is ASCII-sorted by filter name. +// +// By default, this operation returns up to 50 metric filters. If there are +// more metric filters to list, the response would contain a nextToken value +// in the response body. You can also limit the number of metric filters returned +// in the response by specifying the limit parameter in the request. +func (c *CloudWatchLogs) DescribeMetricFilters(input *DescribeMetricFiltersInput) (*DescribeMetricFiltersOutput, error) { + req, out := c.DescribeMetricFiltersRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudWatchLogs) DescribeMetricFiltersPages(input *DescribeMetricFiltersInput, fn func(p *DescribeMetricFiltersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeMetricFiltersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeMetricFiltersOutput), lastPage) + }) +} + +const opDescribeSubscriptionFilters = "DescribeSubscriptionFilters" + +// DescribeSubscriptionFiltersRequest generates a request for the DescribeSubscriptionFilters operation. +func (c *CloudWatchLogs) DescribeSubscriptionFiltersRequest(input *DescribeSubscriptionFiltersInput) (req *request.Request, output *DescribeSubscriptionFiltersOutput) { + op := &request.Operation{ + Name: opDescribeSubscriptionFilters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeSubscriptionFiltersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSubscriptionFiltersOutput{} + req.Data = output + return +} + +// Returns all the subscription filters associated with the specified log group. +// The list returned in the response is ASCII-sorted by filter name. +// +// By default, this operation returns up to 50 subscription filters. If there +// are more subscription filters to list, the response would contain a nextToken +// value in the response body. You can also limit the number of subscription +// filters returned in the response by specifying the limit parameter in the +// request. +func (c *CloudWatchLogs) DescribeSubscriptionFilters(input *DescribeSubscriptionFiltersInput) (*DescribeSubscriptionFiltersOutput, error) { + req, out := c.DescribeSubscriptionFiltersRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudWatchLogs) DescribeSubscriptionFiltersPages(input *DescribeSubscriptionFiltersInput, fn func(p *DescribeSubscriptionFiltersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeSubscriptionFiltersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeSubscriptionFiltersOutput), lastPage) + }) +} + +const opFilterLogEvents = "FilterLogEvents" + +// FilterLogEventsRequest generates a request for the FilterLogEvents operation. +func (c *CloudWatchLogs) FilterLogEventsRequest(input *FilterLogEventsInput) (req *request.Request, output *FilterLogEventsOutput) { + op := &request.Operation{ + Name: opFilterLogEvents, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &FilterLogEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &FilterLogEventsOutput{} + req.Data = output + return +} + +// Retrieves log events, optionally filtered by a filter pattern from the specified +// log group. You can provide an optional time range to filter the results on +// the event timestamp. You can limit the streams searched to an explicit list +// of logStreamNames. +// +// By default, this operation returns as much matching log events as can fit +// in a response size of 1MB, up to 10,000 log events, or all the events found +// within a time-bounded scan window. If the response includes a nextToken, +// then there is more data to search, and the search can be resumed with a new +// request providing the nextToken. The response will contain a list of searchedLogStreams +// that contains information about which streams were searched in the request +// and whether they have been searched completely or require further pagination. +// The limit parameter in the request. can be used to specify the maximum number +// of events to return in a page. +func (c *CloudWatchLogs) FilterLogEvents(input *FilterLogEventsInput) (*FilterLogEventsOutput, error) { + req, out := c.FilterLogEventsRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudWatchLogs) FilterLogEventsPages(input *FilterLogEventsInput, fn func(p *FilterLogEventsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.FilterLogEventsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*FilterLogEventsOutput), lastPage) + }) +} + +const opGetLogEvents = "GetLogEvents" + +// GetLogEventsRequest generates a request for the GetLogEvents operation. +func (c *CloudWatchLogs) GetLogEventsRequest(input *GetLogEventsInput) (req *request.Request, output *GetLogEventsOutput) { + op := &request.Operation{ + Name: opGetLogEvents, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextForwardToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetLogEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetLogEventsOutput{} + req.Data = output + return +} + +// Retrieves log events from the specified log stream. You can provide an optional +// time range to filter the results on the event timestamp. +// +// By default, this operation returns as much log events as can fit in a response +// size of 1MB, up to 10,000 log events. The response will always include a +// nextForwardToken and a nextBackwardToken in the response body. You can use +// any of these tokens in subsequent GetLogEvents requests to paginate through +// events in either forward or backward direction. You can also limit the number +// of log events returned in the response by specifying the limit parameter +// in the request. +func (c *CloudWatchLogs) GetLogEvents(input *GetLogEventsInput) (*GetLogEventsOutput, error) { + req, out := c.GetLogEventsRequest(input) + err := req.Send() + return out, err +} + +func (c *CloudWatchLogs) GetLogEventsPages(input *GetLogEventsInput, fn func(p *GetLogEventsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetLogEventsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetLogEventsOutput), lastPage) + }) +} + +const opPutDestination = "PutDestination" + +// PutDestinationRequest generates a request for the PutDestination operation. +func (c *CloudWatchLogs) PutDestinationRequest(input *PutDestinationInput) (req *request.Request, output *PutDestinationOutput) { + op := &request.Operation{ + Name: opPutDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutDestinationInput{} + } + + req = c.newRequest(op, input, output) + output = &PutDestinationOutput{} + req.Data = output + return +} + +// Creates or updates a Destination. A destination encapsulates a physical resource +// (such as a Kinesis stream) and allows you to subscribe to a real-time stream +// of log events of a different account, ingested through PutLogEvents requests. +// Currently, the only supported physical resource is a Amazon Kinesis stream +// belonging to the same account as the destination. +// +// A destination controls what is written to its Amazon Kinesis stream through +// an access policy. By default, PutDestination does not set any access policy +// with the destination, which means a cross-account user will not be able to +// call PutSubscriptionFilter against this destination. To enable that, the +// destination owner must call PutDestinationPolicy after PutDestination. +func (c *CloudWatchLogs) PutDestination(input *PutDestinationInput) (*PutDestinationOutput, error) { + req, out := c.PutDestinationRequest(input) + err := req.Send() + return out, err +} + +const opPutDestinationPolicy = "PutDestinationPolicy" + +// PutDestinationPolicyRequest generates a request for the PutDestinationPolicy operation. +func (c *CloudWatchLogs) PutDestinationPolicyRequest(input *PutDestinationPolicyInput) (req *request.Request, output *PutDestinationPolicyOutput) { + op := &request.Operation{ + Name: opPutDestinationPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutDestinationPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutDestinationPolicyOutput{} + req.Data = output + return +} + +// Creates or updates an access policy associated with an existing Destination. +// An access policy is an IAM policy document (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies_overview.html) +// that is used to authorize claims to register a subscription filter against +// a given destination. +func (c *CloudWatchLogs) PutDestinationPolicy(input *PutDestinationPolicyInput) (*PutDestinationPolicyOutput, error) { + req, out := c.PutDestinationPolicyRequest(input) + err := req.Send() + return out, err +} + +const opPutLogEvents = "PutLogEvents" + +// PutLogEventsRequest generates a request for the PutLogEvents operation. +func (c *CloudWatchLogs) PutLogEventsRequest(input *PutLogEventsInput) (req *request.Request, output *PutLogEventsOutput) { + op := &request.Operation{ + Name: opPutLogEvents, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutLogEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &PutLogEventsOutput{} + req.Data = output + return +} + +// Uploads a batch of log events to the specified log stream. +// +// Every PutLogEvents request must include the sequenceToken obtained from +// the response of the previous request. An upload in a newly created log stream +// does not require a sequenceToken. +// +// The batch of events must satisfy the following constraints: The maximum +// batch size is 1,048,576 bytes, and this size is calculated as the sum of +// all event messages in UTF-8, plus 26 bytes for each log event. None of the +// log events in the batch can be more than 2 hours in the future. None of the +// log events in the batch can be older than 14 days or the retention period +// of the log group. The log events in the batch must be in chronological ordered +// by their timestamp. The maximum number of log events in a batch is 10,000. +func (c *CloudWatchLogs) PutLogEvents(input *PutLogEventsInput) (*PutLogEventsOutput, error) { + req, out := c.PutLogEventsRequest(input) + err := req.Send() + return out, err +} + +const opPutMetricFilter = "PutMetricFilter" + +// PutMetricFilterRequest generates a request for the PutMetricFilter operation. +func (c *CloudWatchLogs) PutMetricFilterRequest(input *PutMetricFilterInput) (req *request.Request, output *PutMetricFilterOutput) { + op := &request.Operation{ + Name: opPutMetricFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutMetricFilterInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutMetricFilterOutput{} + req.Data = output + return +} + +// Creates or updates a metric filter and associates it with the specified log +// group. Metric filters allow you to configure rules to extract metric data +// from log events ingested through PutLogEvents requests. +// +// The maximum number of metric filters that can be associated with a log +// group is 100. +func (c *CloudWatchLogs) PutMetricFilter(input *PutMetricFilterInput) (*PutMetricFilterOutput, error) { + req, out := c.PutMetricFilterRequest(input) + err := req.Send() + return out, err +} + +const opPutRetentionPolicy = "PutRetentionPolicy" + +// PutRetentionPolicyRequest generates a request for the PutRetentionPolicy operation. +func (c *CloudWatchLogs) PutRetentionPolicyRequest(input *PutRetentionPolicyInput) (req *request.Request, output *PutRetentionPolicyOutput) { + op := &request.Operation{ + Name: opPutRetentionPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRetentionPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutRetentionPolicyOutput{} + req.Data = output + return +} + +// Sets the retention of the specified log group. A retention policy allows +// you to configure the number of days you want to retain log events in the +// specified log group. +func (c *CloudWatchLogs) PutRetentionPolicy(input *PutRetentionPolicyInput) (*PutRetentionPolicyOutput, error) { + req, out := c.PutRetentionPolicyRequest(input) + err := req.Send() + return out, err +} + +const opPutSubscriptionFilter = "PutSubscriptionFilter" + +// PutSubscriptionFilterRequest generates a request for the PutSubscriptionFilter operation. +func (c *CloudWatchLogs) PutSubscriptionFilterRequest(input *PutSubscriptionFilterInput) (req *request.Request, output *PutSubscriptionFilterOutput) { + op := &request.Operation{ + Name: opPutSubscriptionFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutSubscriptionFilterInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutSubscriptionFilterOutput{} + req.Data = output + return +} + +// Creates or updates a subscription filter and associates it with the specified +// log group. Subscription filters allow you to subscribe to a real-time stream +// of log events ingested through PutLogEvents requests and have them delivered +// to a specific destination. Currently, the supported destinations are: A +// Amazon Kinesis stream belonging to the same account as the subscription filter, +// for same-account delivery. A logical destination (used via an ARN of Destination) +// belonging to a different account, for cross-account delivery. +// +// Currently there can only be one subscription filter associated with a log +// group. +func (c *CloudWatchLogs) PutSubscriptionFilter(input *PutSubscriptionFilterInput) (*PutSubscriptionFilterOutput, error) { + req, out := c.PutSubscriptionFilterRequest(input) + err := req.Send() + return out, err +} + +const opTestMetricFilter = "TestMetricFilter" + +// TestMetricFilterRequest generates a request for the TestMetricFilter operation. +func (c *CloudWatchLogs) TestMetricFilterRequest(input *TestMetricFilterInput) (req *request.Request, output *TestMetricFilterOutput) { + op := &request.Operation{ + Name: opTestMetricFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TestMetricFilterInput{} + } + + req = c.newRequest(op, input, output) + output = &TestMetricFilterOutput{} + req.Data = output + return +} + +// Tests the filter pattern of a metric filter against a sample of log event +// messages. You can use this operation to validate the correctness of a metric +// filter pattern. +func (c *CloudWatchLogs) TestMetricFilter(input *TestMetricFilterInput) (*TestMetricFilterOutput, error) { + req, out := c.TestMetricFilterRequest(input) + err := req.Send() + return out, err +} + +type CancelExportTaskInput struct { + _ struct{} `type:"structure"` + + // Id of the export task to cancel. + TaskId *string `locationName:"taskId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelExportTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelExportTaskInput) GoString() string { + return s.String() +} + +type CancelExportTaskOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CancelExportTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelExportTaskOutput) GoString() string { + return s.String() +} + +type CreateExportTaskInput struct { + _ struct{} `type:"structure"` + + // Name of Amazon S3 bucket to which the log data will be exported. + // + // NOTE: Only buckets in the same AWS region are supported + Destination *string `locationName:"destination" min:"1" type:"string" required:"true"` + + // Prefix that will be used as the start of Amazon S3 key for every object exported. + // If not specified, this defaults to 'exportedlogs'. + DestinationPrefix *string `locationName:"destinationPrefix" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. It indicates the start time of the range for the request. Events + // with a timestamp prior to this time will not be exported. + From *int64 `locationName:"from" type:"long" required:"true"` + + // The name of the log group to export. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // Will only export log streams that match the provided logStreamNamePrefix. + // If you don't specify a value, no prefix filter is applied. + LogStreamNamePrefix *string `locationName:"logStreamNamePrefix" min:"1" type:"string"` + + // The name of the export task. + TaskName *string `locationName:"taskName" min:"1" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. It indicates the end time of the range for the request. Events + // with a timestamp later than this time will not be exported. + To *int64 `locationName:"to" type:"long" required:"true"` +} + +// String returns the string representation +func (s CreateExportTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateExportTaskInput) GoString() string { + return s.String() +} + +type CreateExportTaskOutput struct { + _ struct{} `type:"structure"` + + // Id of the export task that got created. + TaskId *string `locationName:"taskId" min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateExportTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateExportTaskOutput) GoString() string { + return s.String() +} + +type CreateLogGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the log group to create. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateLogGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLogGroupInput) GoString() string { + return s.String() +} + +type CreateLogGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateLogGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLogGroupOutput) GoString() string { + return s.String() +} + +type CreateLogStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the log group under which the log stream is to be created. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The name of the log stream to create. + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateLogStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLogStreamInput) GoString() string { + return s.String() +} + +type CreateLogStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateLogStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLogStreamOutput) GoString() string { + return s.String() +} + +type DeleteDestinationInput struct { + _ struct{} `type:"structure"` + + // The name of destination to delete. + DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDestinationInput) GoString() string { + return s.String() +} + +type DeleteDestinationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDestinationOutput) GoString() string { + return s.String() +} + +type DeleteLogGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the log group to delete. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLogGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLogGroupInput) GoString() string { + return s.String() +} + +type DeleteLogGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLogGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLogGroupOutput) GoString() string { + return s.String() +} + +type DeleteLogStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the log group under which the log stream to delete belongs. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The name of the log stream to delete. + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLogStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLogStreamInput) GoString() string { + return s.String() +} + +type DeleteLogStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLogStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLogStreamOutput) GoString() string { + return s.String() +} + +type DeleteMetricFilterInput struct { + _ struct{} `type:"structure"` + + // The name of the metric filter to delete. + FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` + + // The name of the log group that is associated with the metric filter to delete. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMetricFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMetricFilterInput) GoString() string { + return s.String() +} + +type DeleteMetricFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteMetricFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMetricFilterOutput) GoString() string { + return s.String() +} + +type DeleteRetentionPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the log group that is associated with the retention policy to + // delete. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRetentionPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRetentionPolicyInput) GoString() string { + return s.String() +} + +type DeleteRetentionPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRetentionPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRetentionPolicyOutput) GoString() string { + return s.String() +} + +type DeleteSubscriptionFilterInput struct { + _ struct{} `type:"structure"` + + // The name of the subscription filter to delete. + FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` + + // The name of the log group that is associated with the subscription filter + // to delete. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSubscriptionFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSubscriptionFilterInput) GoString() string { + return s.String() +} + +type DeleteSubscriptionFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSubscriptionFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSubscriptionFilterOutput) GoString() string { + return s.String() +} + +type DescribeDestinationsInput struct { + _ struct{} `type:"structure"` + + // Will only return destinations that match the provided destinationNamePrefix. + // If you don't specify a value, no prefix is applied. + DestinationNamePrefix *string `min:"1" type:"string"` + + // The maximum number of results to return. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeDestinationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDestinationsInput) GoString() string { + return s.String() +} + +type DescribeDestinationsOutput struct { + _ struct{} `type:"structure"` + + Destinations []*Destination `locationName:"destinations" type:"list"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeDestinationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDestinationsOutput) GoString() string { + return s.String() +} + +type DescribeExportTasksInput struct { + _ struct{} `type:"structure"` + + // The maximum number of items returned in the response. If you don't specify + // a value, the request would return up to 50 items. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous DescribeExportTasks + // request. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // All export tasks that matches the specified status code will be returned. + // This can return zero or more export tasks. + StatusCode *string `locationName:"statusCode" type:"string" enum:"ExportTaskStatusCode"` + + // Export task that matches the specified task Id will be returned. This can + // result in zero or one export task. + TaskId *string `locationName:"taskId" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeExportTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportTasksInput) GoString() string { + return s.String() +} + +type DescribeExportTasksOutput struct { + _ struct{} `type:"structure"` + + // A list of export tasks. + ExportTasks []*ExportTask `locationName:"exportTasks" type:"list"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeExportTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportTasksOutput) GoString() string { + return s.String() +} + +type DescribeLogGroupsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of items returned in the response. If you don't specify + // a value, the request would return up to 50 items. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // Will only return log groups that match the provided logGroupNamePrefix. If + // you don't specify a value, no prefix filter is applied. + LogGroupNamePrefix *string `locationName:"logGroupNamePrefix" min:"1" type:"string"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous DescribeLogGroups + // request. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeLogGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLogGroupsInput) GoString() string { + return s.String() +} + +type DescribeLogGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of log groups. + LogGroups []*LogGroup `locationName:"logGroups" type:"list"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeLogGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLogGroupsOutput) GoString() string { + return s.String() +} + +type DescribeLogStreamsInput struct { + _ struct{} `type:"structure"` + + // If set to true, results are returned in descending order. If you don't specify + // a value or set it to false, results are returned in ascending order. + Descending *bool `locationName:"descending" type:"boolean"` + + // The maximum number of items returned in the response. If you don't specify + // a value, the request would return up to 50 items. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The log group name for which log streams are to be listed. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // Will only return log streams that match the provided logStreamNamePrefix. + // If you don't specify a value, no prefix filter is applied. + LogStreamNamePrefix *string `locationName:"logStreamNamePrefix" min:"1" type:"string"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous DescribeLogStreams + // request. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // Specifies what to order the returned log streams by. Valid arguments are + // 'LogStreamName' or 'LastEventTime'. If you don't specify a value, results + // are ordered by LogStreamName. If 'LastEventTime' is chosen, the request cannot + // also contain a logStreamNamePrefix. + OrderBy *string `locationName:"orderBy" type:"string" enum:"OrderBy"` +} + +// String returns the string representation +func (s DescribeLogStreamsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLogStreamsInput) GoString() string { + return s.String() +} + +type DescribeLogStreamsOutput struct { + _ struct{} `type:"structure"` + + // A list of log streams. + LogStreams []*LogStream `locationName:"logStreams" type:"list"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeLogStreamsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLogStreamsOutput) GoString() string { + return s.String() +} + +type DescribeMetricFiltersInput struct { + _ struct{} `type:"structure"` + + // Will only return metric filters that match the provided filterNamePrefix. + // If you don't specify a value, no prefix filter is applied. + FilterNamePrefix *string `locationName:"filterNamePrefix" min:"1" type:"string"` + + // The maximum number of items returned in the response. If you don't specify + // a value, the request would return up to 50 items. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The log group name for which metric filters are to be listed. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous DescribeMetricFilters + // request. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeMetricFiltersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMetricFiltersInput) GoString() string { + return s.String() +} + +type DescribeMetricFiltersOutput struct { + _ struct{} `type:"structure"` + + MetricFilters []*MetricFilter `locationName:"metricFilters" type:"list"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeMetricFiltersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMetricFiltersOutput) GoString() string { + return s.String() +} + +type DescribeSubscriptionFiltersInput struct { + _ struct{} `type:"structure"` + + // Will only return subscription filters that match the provided filterNamePrefix. + // If you don't specify a value, no prefix filter is applied. + FilterNamePrefix *string `locationName:"filterNamePrefix" min:"1" type:"string"` + + // The maximum number of results to return. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The log group name for which subscription filters are to be listed. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeSubscriptionFiltersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSubscriptionFiltersInput) GoString() string { + return s.String() +} + +type DescribeSubscriptionFiltersOutput struct { + _ struct{} `type:"structure"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + SubscriptionFilters []*SubscriptionFilter `locationName:"subscriptionFilters" type:"list"` +} + +// String returns the string representation +func (s DescribeSubscriptionFiltersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSubscriptionFiltersOutput) GoString() string { + return s.String() +} + +// A cross account destination that is the recipient of subscription log events. +type Destination struct { + _ struct{} `type:"structure"` + + // An IAM policy document that governs which AWS accounts can create subscription + // filters against this destination. + AccessPolicy *string `locationName:"accessPolicy" min:"1" type:"string"` + + // ARN of this destination. + Arn *string `locationName:"arn" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC specifying when this destination was created. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + // Name of the destination. + DestinationName *string `locationName:"destinationName" min:"1" type:"string"` + + // A role for impersonation for delivering log events to the target. + RoleArn *string `locationName:"roleArn" min:"1" type:"string"` + + // ARN of the physical target where the log events will be delivered (eg. ARN + // of a Kinesis stream). + TargetArn *string `locationName:"targetArn" min:"1" type:"string"` +} + +// String returns the string representation +func (s Destination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Destination) GoString() string { + return s.String() +} + +// Represents an export task. +type ExportTask struct { + _ struct{} `type:"structure"` + + // Name of Amazon S3 bucket to which the log data was exported. + Destination *string `locationName:"destination" min:"1" type:"string"` + + // Prefix that was used as the start of Amazon S3 key for every object exported. + DestinationPrefix *string `locationName:"destinationPrefix" type:"string"` + + // Execution info about the export task. + ExecutionInfo *ExportTaskExecutionInfo `locationName:"executionInfo" type:"structure"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. Events with a timestamp prior to this time are not exported. + From *int64 `locationName:"from" type:"long"` + + // The name of the log group from which logs data was exported. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // Status of the export task. + Status *ExportTaskStatus `locationName:"status" type:"structure"` + + // Id of the export task. + TaskId *string `locationName:"taskId" min:"1" type:"string"` + + // The name of the export task. + TaskName *string `locationName:"taskName" min:"1" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. Events with a timestamp later than this time are not exported. + To *int64 `locationName:"to" type:"long"` +} + +// String returns the string representation +func (s ExportTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTask) GoString() string { + return s.String() +} + +// Represents the status of an export task. +type ExportTaskExecutionInfo struct { + _ struct{} `type:"structure"` + + // A point in time when the export task got completed. + CompletionTime *int64 `locationName:"completionTime" type:"long"` + + // A point in time when the export task got created. + CreationTime *int64 `locationName:"creationTime" type:"long"` +} + +// String returns the string representation +func (s ExportTaskExecutionInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTaskExecutionInfo) GoString() string { + return s.String() +} + +// Represents the status of an export task. +type ExportTaskStatus struct { + _ struct{} `type:"structure"` + + // Status code of the export task. + Code *string `locationName:"code" type:"string" enum:"ExportTaskStatusCode"` + + // Status message related to the code. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ExportTaskStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTaskStatus) GoString() string { + return s.String() +} + +type FilterLogEventsInput struct { + _ struct{} `type:"structure"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. If provided, events with a timestamp later than this time are + // not returned. + EndTime *int64 `locationName:"endTime" type:"long"` + + // A valid CloudWatch Logs filter pattern to use for filtering the response. + // If not provided, all the events are matched. + FilterPattern *string `locationName:"filterPattern" type:"string"` + + // If provided, the API will make a best effort to provide responses that contain + // events from multiple log streams within the log group interleaved in a single + // response. If not provided, all the matched log events in the first log stream + // will be searched first, then those in the next log stream, etc. + Interleaved *bool `locationName:"interleaved" type:"boolean"` + + // The maximum number of events to return in a page of results. Default is 10,000 + // events. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The name of the log group to query. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // Optional list of log stream names within the specified log group to search. + // Defaults to all the log streams in the log group. + LogStreamNames []*string `locationName:"logStreamNames" min:"1" type:"list"` + + // A pagination token obtained from a FilterLogEvents response to continue paginating + // the FilterLogEvents results. This token is omitted from the response when + // there are no other events to display. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. If provided, events with a timestamp prior to this time are + // not returned. + StartTime *int64 `locationName:"startTime" type:"long"` +} + +// String returns the string representation +func (s FilterLogEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilterLogEventsInput) GoString() string { + return s.String() +} + +type FilterLogEventsOutput struct { + _ struct{} `type:"structure"` + + // A list of FilteredLogEvent objects representing the matched events from the + // request. + Events []*FilteredLogEvent `locationName:"events" type:"list"` + + // A pagination token obtained from a FilterLogEvents response to continue paginating + // the FilterLogEvents results. This token is omitted from the response when + // there are no other events to display. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // A list of SearchedLogStream objects indicating which log streams have been + // searched in this request and whether each has been searched completely or + // still has more to be paginated. + SearchedLogStreams []*SearchedLogStream `locationName:"searchedLogStreams" type:"list"` +} + +// String returns the string representation +func (s FilterLogEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilterLogEventsOutput) GoString() string { + return s.String() +} + +// Represents a matched event from a FilterLogEvents request. +type FilteredLogEvent struct { + _ struct{} `type:"structure"` + + // A unique identifier for this event. + EventId *string `locationName:"eventId" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + IngestionTime *int64 `locationName:"ingestionTime" type:"long"` + + // The name of the log stream this event belongs to. + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` + + // The data contained in the log event. + Message *string `locationName:"message" min:"1" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + Timestamp *int64 `locationName:"timestamp" type:"long"` +} + +// String returns the string representation +func (s FilteredLogEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilteredLogEvent) GoString() string { + return s.String() +} + +type GetLogEventsInput struct { + _ struct{} `type:"structure"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + EndTime *int64 `locationName:"endTime" type:"long"` + + // The maximum number of log events returned in the response. If you don't specify + // a value, the request would return as many log events as can fit in a response + // size of 1MB, up to 10,000 log events. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The name of the log group to query. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The name of the log stream to query. + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the nextForwardToken or nextBackwardToken + // fields in the response of the previous GetLogEvents request. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // If set to true, the earliest log events would be returned first. The default + // is false (the latest log events are returned first). + StartFromHead *bool `locationName:"startFromHead" type:"boolean"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + StartTime *int64 `locationName:"startTime" type:"long"` +} + +// String returns the string representation +func (s GetLogEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLogEventsInput) GoString() string { + return s.String() +} + +type GetLogEventsOutput struct { + _ struct{} `type:"structure"` + + Events []*OutputLogEvent `locationName:"events" type:"list"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextBackwardToken *string `locationName:"nextBackwardToken" min:"1" type:"string"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextForwardToken *string `locationName:"nextForwardToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s GetLogEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLogEventsOutput) GoString() string { + return s.String() +} + +// A log event is a record of some activity that was recorded by the application +// or resource being monitored. The log event record that Amazon CloudWatch +// Logs understands contains two properties: the timestamp of when the event +// occurred, and the raw event message. +type InputLogEvent struct { + _ struct{} `type:"structure"` + + Message *string `locationName:"message" min:"1" type:"string" required:"true"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + Timestamp *int64 `locationName:"timestamp" type:"long" required:"true"` +} + +// String returns the string representation +func (s InputLogEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputLogEvent) GoString() string { + return s.String() +} + +type LogGroup struct { + _ struct{} `type:"structure"` + + Arn *string `locationName:"arn" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // The number of metric filters associated with the log group. + MetricFilterCount *int64 `locationName:"metricFilterCount" type:"integer"` + + // Specifies the number of days you want to retain log events in the specified + // log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, + // 365, 400, 545, 731, 1827, 3653. + RetentionInDays *int64 `locationName:"retentionInDays" type:"integer"` + + StoredBytes *int64 `locationName:"storedBytes" type:"long"` +} + +// String returns the string representation +func (s LogGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LogGroup) GoString() string { + return s.String() +} + +// A log stream is sequence of log events from a single emitter of logs. +type LogStream struct { + _ struct{} `type:"structure"` + + Arn *string `locationName:"arn" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + FirstEventTimestamp *int64 `locationName:"firstEventTimestamp" type:"long"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + LastEventTimestamp *int64 `locationName:"lastEventTimestamp" type:"long"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + LastIngestionTime *int64 `locationName:"lastIngestionTime" type:"long"` + + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` + + StoredBytes *int64 `locationName:"storedBytes" type:"long"` + + // A string token used for making PutLogEvents requests. A sequenceToken can + // only be used once, and PutLogEvents requests must include the sequenceToken + // obtained from the response of the previous request. + UploadSequenceToken *string `locationName:"uploadSequenceToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s LogStream) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LogStream) GoString() string { + return s.String() +} + +// Metric filters can be used to express how Amazon CloudWatch Logs would extract +// metric observations from ingested log events and transform them to metric +// data in a CloudWatch metric. +type MetricFilter struct { + _ struct{} `type:"structure"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + // A name for a metric or subscription filter. + FilterName *string `locationName:"filterName" min:"1" type:"string"` + + // A symbolic description of how Amazon CloudWatch Logs should interpret the + // data in each log event. For example, a log event may contain timestamps, + // IP addresses, strings, and so on. You use the filter pattern to specify what + // to look for in the log event message. + FilterPattern *string `locationName:"filterPattern" type:"string"` + + MetricTransformations []*MetricTransformation `locationName:"metricTransformations" min:"1" type:"list"` +} + +// String returns the string representation +func (s MetricFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricFilter) GoString() string { + return s.String() +} + +type MetricFilterMatchRecord struct { + _ struct{} `type:"structure"` + + EventMessage *string `locationName:"eventMessage" min:"1" type:"string"` + + EventNumber *int64 `locationName:"eventNumber" type:"long"` + + ExtractedValues map[string]*string `locationName:"extractedValues" type:"map"` +} + +// String returns the string representation +func (s MetricFilterMatchRecord) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricFilterMatchRecord) GoString() string { + return s.String() +} + +type MetricTransformation struct { + _ struct{} `type:"structure"` + + // The name of the CloudWatch metric to which the monitored log information + // should be published. For example, you may publish to a metric called ErrorCount. + MetricName *string `locationName:"metricName" type:"string" required:"true"` + + // The destination namespace of the new CloudWatch metric. + MetricNamespace *string `locationName:"metricNamespace" type:"string" required:"true"` + + // What to publish to the metric. For example, if you're counting the occurrences + // of a particular term like "Error", the value will be "1" for each occurrence. + // If you're counting the bytes transferred the published value will be the + // value in the log event. + MetricValue *string `locationName:"metricValue" type:"string" required:"true"` +} + +// String returns the string representation +func (s MetricTransformation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricTransformation) GoString() string { + return s.String() +} + +type OutputLogEvent struct { + _ struct{} `type:"structure"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + IngestionTime *int64 `locationName:"ingestionTime" type:"long"` + + Message *string `locationName:"message" min:"1" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + Timestamp *int64 `locationName:"timestamp" type:"long"` +} + +// String returns the string representation +func (s OutputLogEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OutputLogEvent) GoString() string { + return s.String() +} + +type PutDestinationInput struct { + _ struct{} `type:"structure"` + + // A name for the destination. + DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` + + // The ARN of an IAM role that grants Amazon CloudWatch Logs permissions to + // do Amazon Kinesis PutRecord requests on the desitnation stream. + RoleArn *string `locationName:"roleArn" min:"1" type:"string" required:"true"` + + // The ARN of an Amazon Kinesis stream to deliver matching log events to. + TargetArn *string `locationName:"targetArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutDestinationInput) GoString() string { + return s.String() +} + +type PutDestinationOutput struct { + _ struct{} `type:"structure"` + + // A cross account destination that is the recipient of subscription log events. + Destination *Destination `locationName:"destination" type:"structure"` +} + +// String returns the string representation +func (s PutDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutDestinationOutput) GoString() string { + return s.String() +} + +type PutDestinationPolicyInput struct { + _ struct{} `type:"structure"` + + // An IAM policy document that authorizes cross-account users to deliver their + // log events to associated destination. + AccessPolicy *string `locationName:"accessPolicy" min:"1" type:"string" required:"true"` + + // A name for an existing destination. + DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutDestinationPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutDestinationPolicyInput) GoString() string { + return s.String() +} + +type PutDestinationPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutDestinationPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutDestinationPolicyOutput) GoString() string { + return s.String() +} + +type PutLogEventsInput struct { + _ struct{} `type:"structure"` + + // A list of log events belonging to a log stream. + LogEvents []*InputLogEvent `locationName:"logEvents" min:"1" type:"list" required:"true"` + + // The name of the log group to put log events to. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The name of the log stream to put log events to. + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` + + // A string token that must be obtained from the response of the previous PutLogEvents + // request. + SequenceToken *string `locationName:"sequenceToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s PutLogEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLogEventsInput) GoString() string { + return s.String() +} + +type PutLogEventsOutput struct { + _ struct{} `type:"structure"` + + // A string token used for making PutLogEvents requests. A sequenceToken can + // only be used once, and PutLogEvents requests must include the sequenceToken + // obtained from the response of the previous request. + NextSequenceToken *string `locationName:"nextSequenceToken" min:"1" type:"string"` + + RejectedLogEventsInfo *RejectedLogEventsInfo `locationName:"rejectedLogEventsInfo" type:"structure"` +} + +// String returns the string representation +func (s PutLogEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLogEventsOutput) GoString() string { + return s.String() +} + +type PutMetricFilterInput struct { + _ struct{} `type:"structure"` + + // A name for the metric filter. + FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` + + // A valid CloudWatch Logs filter pattern for extracting metric data out of + // ingested log events. + FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` + + // The name of the log group to associate the metric filter with. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // A collection of information needed to define how metric data gets emitted. + MetricTransformations []*MetricTransformation `locationName:"metricTransformations" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s PutMetricFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMetricFilterInput) GoString() string { + return s.String() +} + +type PutMetricFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutMetricFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMetricFilterOutput) GoString() string { + return s.String() +} + +type PutRetentionPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the log group to associate the retention policy with. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // Specifies the number of days you want to retain log events in the specified + // log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, + // 365, 400, 545, 731, 1827, 3653. + RetentionInDays *int64 `locationName:"retentionInDays" type:"integer" required:"true"` +} + +// String returns the string representation +func (s PutRetentionPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRetentionPolicyInput) GoString() string { + return s.String() +} + +type PutRetentionPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutRetentionPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRetentionPolicyOutput) GoString() string { + return s.String() +} + +type PutSubscriptionFilterInput struct { + _ struct{} `type:"structure"` + + // The ARN of the destination to deliver matching log events to. Currently, + // the supported destinations are: A Amazon Kinesis stream belonging to the + // same account as the subscription filter, for same-account delivery. A logical + // destination (used via an ARN of Destination) belonging to a different account, + // for cross-account delivery. + DestinationArn *string `locationName:"destinationArn" min:"1" type:"string" required:"true"` + + // A name for the subscription filter. + FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` + + // A valid CloudWatch Logs filter pattern for subscribing to a filtered stream + // of log events. + FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` + + // The name of the log group to associate the subscription filter with. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The ARN of an IAM role that grants Amazon CloudWatch Logs permissions to + // deliver ingested log events to the destination stream. You don't need to + // provide the ARN when you are working with a logical destination (used via + // an ARN of Destination) for cross-account delivery. + RoleArn *string `locationName:"roleArn" min:"1" type:"string"` +} + +// String returns the string representation +func (s PutSubscriptionFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutSubscriptionFilterInput) GoString() string { + return s.String() +} + +type PutSubscriptionFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutSubscriptionFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutSubscriptionFilterOutput) GoString() string { + return s.String() +} + +type RejectedLogEventsInfo struct { + _ struct{} `type:"structure"` + + ExpiredLogEventEndIndex *int64 `locationName:"expiredLogEventEndIndex" type:"integer"` + + TooNewLogEventStartIndex *int64 `locationName:"tooNewLogEventStartIndex" type:"integer"` + + TooOldLogEventEndIndex *int64 `locationName:"tooOldLogEventEndIndex" type:"integer"` +} + +// String returns the string representation +func (s RejectedLogEventsInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RejectedLogEventsInfo) GoString() string { + return s.String() +} + +// An object indicating the search status of a log stream in a FilterLogEvents +// request. +type SearchedLogStream struct { + _ struct{} `type:"structure"` + + // The name of the log stream. + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` + + // Indicates whether all the events in this log stream were searched or more + // data exists to search by paginating further. + SearchedCompletely *bool `locationName:"searchedCompletely" type:"boolean"` +} + +// String returns the string representation +func (s SearchedLogStream) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SearchedLogStream) GoString() string { + return s.String() +} + +type SubscriptionFilter struct { + _ struct{} `type:"structure"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + DestinationArn *string `locationName:"destinationArn" min:"1" type:"string"` + + // A name for a metric or subscription filter. + FilterName *string `locationName:"filterName" min:"1" type:"string"` + + // A symbolic description of how Amazon CloudWatch Logs should interpret the + // data in each log event. For example, a log event may contain timestamps, + // IP addresses, strings, and so on. You use the filter pattern to specify what + // to look for in the log event message. + FilterPattern *string `locationName:"filterPattern" type:"string"` + + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + RoleArn *string `locationName:"roleArn" min:"1" type:"string"` +} + +// String returns the string representation +func (s SubscriptionFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubscriptionFilter) GoString() string { + return s.String() +} + +type TestMetricFilterInput struct { + _ struct{} `type:"structure"` + + // A symbolic description of how Amazon CloudWatch Logs should interpret the + // data in each log event. For example, a log event may contain timestamps, + // IP addresses, strings, and so on. You use the filter pattern to specify what + // to look for in the log event message. + FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` + + // A list of log event messages to test. + LogEventMessages []*string `locationName:"logEventMessages" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s TestMetricFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestMetricFilterInput) GoString() string { + return s.String() +} + +type TestMetricFilterOutput struct { + _ struct{} `type:"structure"` + + Matches []*MetricFilterMatchRecord `locationName:"matches" type:"list"` +} + +// String returns the string representation +func (s TestMetricFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestMetricFilterOutput) GoString() string { + return s.String() +} + +const ( + // @enum ExportTaskStatusCode + ExportTaskStatusCodeCancelled = "CANCELLED" + // @enum ExportTaskStatusCode + ExportTaskStatusCodeCompleted = "COMPLETED" + // @enum ExportTaskStatusCode + ExportTaskStatusCodeFailed = "FAILED" + // @enum ExportTaskStatusCode + ExportTaskStatusCodePending = "PENDING" + // @enum ExportTaskStatusCode + ExportTaskStatusCodePendingCancel = "PENDING_CANCEL" + // @enum ExportTaskStatusCode + ExportTaskStatusCodeRunning = "RUNNING" +) + +const ( + // @enum OrderBy + OrderByLogStreamName = "LogStreamName" + // @enum OrderBy + OrderByLastEventTime = "LastEventTime" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/cloudwatchlogsiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/cloudwatchlogsiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/cloudwatchlogsiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/cloudwatchlogsiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,128 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudwatchlogsiface provides an interface for the Amazon CloudWatch Logs. +package cloudwatchlogsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" +) + +// CloudWatchLogsAPI is the interface type for cloudwatchlogs.CloudWatchLogs. +type CloudWatchLogsAPI interface { + CancelExportTaskRequest(*cloudwatchlogs.CancelExportTaskInput) (*request.Request, *cloudwatchlogs.CancelExportTaskOutput) + + CancelExportTask(*cloudwatchlogs.CancelExportTaskInput) (*cloudwatchlogs.CancelExportTaskOutput, error) + + CreateExportTaskRequest(*cloudwatchlogs.CreateExportTaskInput) (*request.Request, *cloudwatchlogs.CreateExportTaskOutput) + + CreateExportTask(*cloudwatchlogs.CreateExportTaskInput) (*cloudwatchlogs.CreateExportTaskOutput, error) + + CreateLogGroupRequest(*cloudwatchlogs.CreateLogGroupInput) (*request.Request, *cloudwatchlogs.CreateLogGroupOutput) + + CreateLogGroup(*cloudwatchlogs.CreateLogGroupInput) (*cloudwatchlogs.CreateLogGroupOutput, error) + + CreateLogStreamRequest(*cloudwatchlogs.CreateLogStreamInput) (*request.Request, *cloudwatchlogs.CreateLogStreamOutput) + + CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) + + DeleteDestinationRequest(*cloudwatchlogs.DeleteDestinationInput) (*request.Request, *cloudwatchlogs.DeleteDestinationOutput) + + DeleteDestination(*cloudwatchlogs.DeleteDestinationInput) (*cloudwatchlogs.DeleteDestinationOutput, error) + + DeleteLogGroupRequest(*cloudwatchlogs.DeleteLogGroupInput) (*request.Request, *cloudwatchlogs.DeleteLogGroupOutput) + + DeleteLogGroup(*cloudwatchlogs.DeleteLogGroupInput) (*cloudwatchlogs.DeleteLogGroupOutput, error) + + DeleteLogStreamRequest(*cloudwatchlogs.DeleteLogStreamInput) (*request.Request, *cloudwatchlogs.DeleteLogStreamOutput) + + DeleteLogStream(*cloudwatchlogs.DeleteLogStreamInput) (*cloudwatchlogs.DeleteLogStreamOutput, error) + + DeleteMetricFilterRequest(*cloudwatchlogs.DeleteMetricFilterInput) (*request.Request, *cloudwatchlogs.DeleteMetricFilterOutput) + + DeleteMetricFilter(*cloudwatchlogs.DeleteMetricFilterInput) (*cloudwatchlogs.DeleteMetricFilterOutput, error) + + DeleteRetentionPolicyRequest(*cloudwatchlogs.DeleteRetentionPolicyInput) (*request.Request, *cloudwatchlogs.DeleteRetentionPolicyOutput) + + DeleteRetentionPolicy(*cloudwatchlogs.DeleteRetentionPolicyInput) (*cloudwatchlogs.DeleteRetentionPolicyOutput, error) + + DeleteSubscriptionFilterRequest(*cloudwatchlogs.DeleteSubscriptionFilterInput) (*request.Request, *cloudwatchlogs.DeleteSubscriptionFilterOutput) + + DeleteSubscriptionFilter(*cloudwatchlogs.DeleteSubscriptionFilterInput) (*cloudwatchlogs.DeleteSubscriptionFilterOutput, error) + + DescribeDestinationsRequest(*cloudwatchlogs.DescribeDestinationsInput) (*request.Request, *cloudwatchlogs.DescribeDestinationsOutput) + + DescribeDestinations(*cloudwatchlogs.DescribeDestinationsInput) (*cloudwatchlogs.DescribeDestinationsOutput, error) + + DescribeDestinationsPages(*cloudwatchlogs.DescribeDestinationsInput, func(*cloudwatchlogs.DescribeDestinationsOutput, bool) bool) error + + DescribeExportTasksRequest(*cloudwatchlogs.DescribeExportTasksInput) (*request.Request, *cloudwatchlogs.DescribeExportTasksOutput) + + DescribeExportTasks(*cloudwatchlogs.DescribeExportTasksInput) (*cloudwatchlogs.DescribeExportTasksOutput, error) + + DescribeLogGroupsRequest(*cloudwatchlogs.DescribeLogGroupsInput) (*request.Request, *cloudwatchlogs.DescribeLogGroupsOutput) + + DescribeLogGroups(*cloudwatchlogs.DescribeLogGroupsInput) (*cloudwatchlogs.DescribeLogGroupsOutput, error) + + DescribeLogGroupsPages(*cloudwatchlogs.DescribeLogGroupsInput, func(*cloudwatchlogs.DescribeLogGroupsOutput, bool) bool) error + + DescribeLogStreamsRequest(*cloudwatchlogs.DescribeLogStreamsInput) (*request.Request, *cloudwatchlogs.DescribeLogStreamsOutput) + + DescribeLogStreams(*cloudwatchlogs.DescribeLogStreamsInput) (*cloudwatchlogs.DescribeLogStreamsOutput, error) + + DescribeLogStreamsPages(*cloudwatchlogs.DescribeLogStreamsInput, func(*cloudwatchlogs.DescribeLogStreamsOutput, bool) bool) error + + DescribeMetricFiltersRequest(*cloudwatchlogs.DescribeMetricFiltersInput) (*request.Request, *cloudwatchlogs.DescribeMetricFiltersOutput) + + DescribeMetricFilters(*cloudwatchlogs.DescribeMetricFiltersInput) (*cloudwatchlogs.DescribeMetricFiltersOutput, error) + + DescribeMetricFiltersPages(*cloudwatchlogs.DescribeMetricFiltersInput, func(*cloudwatchlogs.DescribeMetricFiltersOutput, bool) bool) error + + DescribeSubscriptionFiltersRequest(*cloudwatchlogs.DescribeSubscriptionFiltersInput) (*request.Request, *cloudwatchlogs.DescribeSubscriptionFiltersOutput) + + DescribeSubscriptionFilters(*cloudwatchlogs.DescribeSubscriptionFiltersInput) (*cloudwatchlogs.DescribeSubscriptionFiltersOutput, error) + + DescribeSubscriptionFiltersPages(*cloudwatchlogs.DescribeSubscriptionFiltersInput, func(*cloudwatchlogs.DescribeSubscriptionFiltersOutput, bool) bool) error + + FilterLogEventsRequest(*cloudwatchlogs.FilterLogEventsInput) (*request.Request, *cloudwatchlogs.FilterLogEventsOutput) + + FilterLogEvents(*cloudwatchlogs.FilterLogEventsInput) (*cloudwatchlogs.FilterLogEventsOutput, error) + + FilterLogEventsPages(*cloudwatchlogs.FilterLogEventsInput, func(*cloudwatchlogs.FilterLogEventsOutput, bool) bool) error + + GetLogEventsRequest(*cloudwatchlogs.GetLogEventsInput) (*request.Request, *cloudwatchlogs.GetLogEventsOutput) + + GetLogEvents(*cloudwatchlogs.GetLogEventsInput) (*cloudwatchlogs.GetLogEventsOutput, error) + + GetLogEventsPages(*cloudwatchlogs.GetLogEventsInput, func(*cloudwatchlogs.GetLogEventsOutput, bool) bool) error + + PutDestinationRequest(*cloudwatchlogs.PutDestinationInput) (*request.Request, *cloudwatchlogs.PutDestinationOutput) + + PutDestination(*cloudwatchlogs.PutDestinationInput) (*cloudwatchlogs.PutDestinationOutput, error) + + PutDestinationPolicyRequest(*cloudwatchlogs.PutDestinationPolicyInput) (*request.Request, *cloudwatchlogs.PutDestinationPolicyOutput) + + PutDestinationPolicy(*cloudwatchlogs.PutDestinationPolicyInput) (*cloudwatchlogs.PutDestinationPolicyOutput, error) + + PutLogEventsRequest(*cloudwatchlogs.PutLogEventsInput) (*request.Request, *cloudwatchlogs.PutLogEventsOutput) + + PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) + + PutMetricFilterRequest(*cloudwatchlogs.PutMetricFilterInput) (*request.Request, *cloudwatchlogs.PutMetricFilterOutput) + + PutMetricFilter(*cloudwatchlogs.PutMetricFilterInput) (*cloudwatchlogs.PutMetricFilterOutput, error) + + PutRetentionPolicyRequest(*cloudwatchlogs.PutRetentionPolicyInput) (*request.Request, *cloudwatchlogs.PutRetentionPolicyOutput) + + PutRetentionPolicy(*cloudwatchlogs.PutRetentionPolicyInput) (*cloudwatchlogs.PutRetentionPolicyOutput, error) + + PutSubscriptionFilterRequest(*cloudwatchlogs.PutSubscriptionFilterInput) (*request.Request, *cloudwatchlogs.PutSubscriptionFilterOutput) + + PutSubscriptionFilter(*cloudwatchlogs.PutSubscriptionFilterInput) (*cloudwatchlogs.PutSubscriptionFilterOutput, error) + + TestMetricFilterRequest(*cloudwatchlogs.TestMetricFilterInput) (*request.Request, *cloudwatchlogs.TestMetricFilterOutput) + + TestMetricFilter(*cloudwatchlogs.TestMetricFilterInput) (*cloudwatchlogs.TestMetricFilterOutput, error) +} + +var _ CloudWatchLogsAPI = (*cloudwatchlogs.CloudWatchLogs)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,566 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudwatchlogs_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCloudWatchLogs_CancelExportTask() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.CancelExportTaskInput{ + TaskId: aws.String("ExportTaskId"), // Required + } + resp, err := svc.CancelExportTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_CreateExportTask() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.CreateExportTaskInput{ + Destination: aws.String("ExportDestinationBucket"), // Required + From: aws.Int64(1), // Required + LogGroupName: aws.String("LogGroupName"), // Required + To: aws.Int64(1), // Required + DestinationPrefix: aws.String("ExportDestinationPrefix"), + LogStreamNamePrefix: aws.String("LogStreamName"), + TaskName: aws.String("ExportTaskName"), + } + resp, err := svc.CreateExportTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_CreateLogGroup() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.CreateLogGroupInput{ + LogGroupName: aws.String("LogGroupName"), // Required + } + resp, err := svc.CreateLogGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_CreateLogStream() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.CreateLogStreamInput{ + LogGroupName: aws.String("LogGroupName"), // Required + LogStreamName: aws.String("LogStreamName"), // Required + } + resp, err := svc.CreateLogStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DeleteDestination() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DeleteDestinationInput{ + DestinationName: aws.String("DestinationName"), // Required + } + resp, err := svc.DeleteDestination(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DeleteLogGroup() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DeleteLogGroupInput{ + LogGroupName: aws.String("LogGroupName"), // Required + } + resp, err := svc.DeleteLogGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DeleteLogStream() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DeleteLogStreamInput{ + LogGroupName: aws.String("LogGroupName"), // Required + LogStreamName: aws.String("LogStreamName"), // Required + } + resp, err := svc.DeleteLogStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DeleteMetricFilter() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DeleteMetricFilterInput{ + FilterName: aws.String("FilterName"), // Required + LogGroupName: aws.String("LogGroupName"), // Required + } + resp, err := svc.DeleteMetricFilter(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DeleteRetentionPolicy() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DeleteRetentionPolicyInput{ + LogGroupName: aws.String("LogGroupName"), // Required + } + resp, err := svc.DeleteRetentionPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DeleteSubscriptionFilter() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DeleteSubscriptionFilterInput{ + FilterName: aws.String("FilterName"), // Required + LogGroupName: aws.String("LogGroupName"), // Required + } + resp, err := svc.DeleteSubscriptionFilter(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DescribeDestinations() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DescribeDestinationsInput{ + DestinationNamePrefix: aws.String("DestinationName"), + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeDestinations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DescribeExportTasks() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DescribeExportTasksInput{ + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + StatusCode: aws.String("ExportTaskStatusCode"), + TaskId: aws.String("ExportTaskId"), + } + resp, err := svc.DescribeExportTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DescribeLogGroups() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DescribeLogGroupsInput{ + Limit: aws.Int64(1), + LogGroupNamePrefix: aws.String("LogGroupName"), + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeLogGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DescribeLogStreams() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DescribeLogStreamsInput{ + LogGroupName: aws.String("LogGroupName"), // Required + Descending: aws.Bool(true), + Limit: aws.Int64(1), + LogStreamNamePrefix: aws.String("LogStreamName"), + NextToken: aws.String("NextToken"), + OrderBy: aws.String("OrderBy"), + } + resp, err := svc.DescribeLogStreams(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DescribeMetricFilters() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DescribeMetricFiltersInput{ + LogGroupName: aws.String("LogGroupName"), // Required + FilterNamePrefix: aws.String("FilterName"), + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeMetricFilters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_DescribeSubscriptionFilters() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.DescribeSubscriptionFiltersInput{ + LogGroupName: aws.String("LogGroupName"), // Required + FilterNamePrefix: aws.String("FilterName"), + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeSubscriptionFilters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_FilterLogEvents() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.FilterLogEventsInput{ + LogGroupName: aws.String("LogGroupName"), // Required + EndTime: aws.Int64(1), + FilterPattern: aws.String("FilterPattern"), + Interleaved: aws.Bool(true), + Limit: aws.Int64(1), + LogStreamNames: []*string{ + aws.String("LogStreamName"), // Required + // More values... + }, + NextToken: aws.String("NextToken"), + StartTime: aws.Int64(1), + } + resp, err := svc.FilterLogEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_GetLogEvents() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.GetLogEventsInput{ + LogGroupName: aws.String("LogGroupName"), // Required + LogStreamName: aws.String("LogStreamName"), // Required + EndTime: aws.Int64(1), + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + StartFromHead: aws.Bool(true), + StartTime: aws.Int64(1), + } + resp, err := svc.GetLogEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_PutDestination() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.PutDestinationInput{ + DestinationName: aws.String("DestinationName"), // Required + RoleArn: aws.String("RoleArn"), // Required + TargetArn: aws.String("TargetArn"), // Required + } + resp, err := svc.PutDestination(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_PutDestinationPolicy() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.PutDestinationPolicyInput{ + AccessPolicy: aws.String("AccessPolicy"), // Required + DestinationName: aws.String("DestinationName"), // Required + } + resp, err := svc.PutDestinationPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_PutLogEvents() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.PutLogEventsInput{ + LogEvents: []*cloudwatchlogs.InputLogEvent{ // Required + { // Required + Message: aws.String("EventMessage"), // Required + Timestamp: aws.Int64(1), // Required + }, + // More values... + }, + LogGroupName: aws.String("LogGroupName"), // Required + LogStreamName: aws.String("LogStreamName"), // Required + SequenceToken: aws.String("SequenceToken"), + } + resp, err := svc.PutLogEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_PutMetricFilter() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.PutMetricFilterInput{ + FilterName: aws.String("FilterName"), // Required + FilterPattern: aws.String("FilterPattern"), // Required + LogGroupName: aws.String("LogGroupName"), // Required + MetricTransformations: []*cloudwatchlogs.MetricTransformation{ // Required + { // Required + MetricName: aws.String("MetricName"), // Required + MetricNamespace: aws.String("MetricNamespace"), // Required + MetricValue: aws.String("MetricValue"), // Required + }, + // More values... + }, + } + resp, err := svc.PutMetricFilter(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_PutRetentionPolicy() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.PutRetentionPolicyInput{ + LogGroupName: aws.String("LogGroupName"), // Required + RetentionInDays: aws.Int64(1), // Required + } + resp, err := svc.PutRetentionPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_PutSubscriptionFilter() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.PutSubscriptionFilterInput{ + DestinationArn: aws.String("DestinationArn"), // Required + FilterName: aws.String("FilterName"), // Required + FilterPattern: aws.String("FilterPattern"), // Required + LogGroupName: aws.String("LogGroupName"), // Required + RoleArn: aws.String("RoleArn"), + } + resp, err := svc.PutSubscriptionFilter(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCloudWatchLogs_TestMetricFilter() { + svc := cloudwatchlogs.New(session.New()) + + params := &cloudwatchlogs.TestMetricFilterInput{ + FilterPattern: aws.String("FilterPattern"), // Required + LogEventMessages: []*string{ // Required + aws.String("EventMessage"), // Required + // More values... + }, + } + resp, err := svc.TestMetricFilter(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,119 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudwatchlogs + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// This is the Amazon CloudWatch Logs API Reference. Amazon CloudWatch Logs +// enables you to monitor, store, and access your system, application, and custom +// log files. This guide provides detailed information about Amazon CloudWatch +// Logs actions, data types, parameters, and errors. For detailed information +// about Amazon CloudWatch Logs features and their associated API calls, go +// to the Amazon CloudWatch Developer Guide (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide). +// +// Use the following links to get started using the Amazon CloudWatch Logs +// API Reference: +// +// Actions (http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_Operations.html): +// An alphabetical list of all Amazon CloudWatch Logs actions. Data Types (http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_Types.html): +// An alphabetical list of all Amazon CloudWatch Logs data types. Common Parameters +// (http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/CommonParameters.html): +// Parameters that all Query actions can use. Common Errors (http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/CommonErrors.html): +// Client and server errors that all actions can return. Regions and Endpoints +// (http://docs.aws.amazon.com/general/latest/gr/index.html?rande.html): Itemized +// regions and endpoints for all AWS products. In addition to using the Amazon +// CloudWatch Logs API, you can also use the following SDKs and third-party +// libraries to access Amazon CloudWatch Logs programmatically. +// +// AWS SDK for Java Documentation (http://aws.amazon.com/documentation/sdkforjava/) +// AWS SDK for .NET Documentation (http://aws.amazon.com/documentation/sdkfornet/) +// AWS SDK for PHP Documentation (http://aws.amazon.com/documentation/sdkforphp/) +// AWS SDK for Ruby Documentation (http://aws.amazon.com/documentation/sdkforruby/) +// Developers in the AWS developer community also provide their own libraries, +// which you can find at the following AWS developer centers: +// +// AWS Java Developer Center (http://aws.amazon.com/java/) AWS PHP Developer +// Center (http://aws.amazon.com/php/) AWS Python Developer Center (http://aws.amazon.com/python/) +// AWS Ruby Developer Center (http://aws.amazon.com/ruby/) AWS Windows and .NET +// Developer Center (http://aws.amazon.com/net/) +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CloudWatchLogs struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "logs" + +// New creates a new instance of the CloudWatchLogs client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CloudWatchLogs client from just a session. +// svc := cloudwatchlogs.New(mySession) +// +// // Create a CloudWatchLogs client with additional configuration +// svc := cloudwatchlogs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudWatchLogs { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CloudWatchLogs { + svc := &CloudWatchLogs{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-03-28", + JSONVersion: "1.1", + TargetPrefix: "Logs_20140328", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CloudWatchLogs operation and runs any +// custom request initialization. +func (c *CloudWatchLogs) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codecommit/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codecommit/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codecommit/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codecommit/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,890 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package codecommit provides a client for AWS CodeCommit. +package codecommit + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opBatchGetRepositories = "BatchGetRepositories" + +// BatchGetRepositoriesRequest generates a request for the BatchGetRepositories operation. +func (c *CodeCommit) BatchGetRepositoriesRequest(input *BatchGetRepositoriesInput) (req *request.Request, output *BatchGetRepositoriesOutput) { + op := &request.Operation{ + Name: opBatchGetRepositories, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchGetRepositoriesInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchGetRepositoriesOutput{} + req.Data = output + return +} + +// Gets information about one or more repositories. +// +// The description field for a repository accepts all HTML characters and all +// valid Unicode characters. Applications that do not HTML-encode the description +// and display it in a web page could expose users to potentially malicious +// code. Make sure that you HTML-encode the description field in any application +// that uses this API to display the repository description on a web page. +func (c *CodeCommit) BatchGetRepositories(input *BatchGetRepositoriesInput) (*BatchGetRepositoriesOutput, error) { + req, out := c.BatchGetRepositoriesRequest(input) + err := req.Send() + return out, err +} + +const opCreateBranch = "CreateBranch" + +// CreateBranchRequest generates a request for the CreateBranch operation. +func (c *CodeCommit) CreateBranchRequest(input *CreateBranchInput) (req *request.Request, output *CreateBranchOutput) { + op := &request.Operation{ + Name: opCreateBranch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateBranchInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateBranchOutput{} + req.Data = output + return +} + +// Creates a new branch in a repository and points the branch to a commit. +// +// Calling the create branch operation does not set a repository's default +// branch. To do this, call the update default branch operation. +func (c *CodeCommit) CreateBranch(input *CreateBranchInput) (*CreateBranchOutput, error) { + req, out := c.CreateBranchRequest(input) + err := req.Send() + return out, err +} + +const opCreateRepository = "CreateRepository" + +// CreateRepositoryRequest generates a request for the CreateRepository operation. +func (c *CodeCommit) CreateRepositoryRequest(input *CreateRepositoryInput) (req *request.Request, output *CreateRepositoryOutput) { + op := &request.Operation{ + Name: opCreateRepository, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateRepositoryInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateRepositoryOutput{} + req.Data = output + return +} + +// Creates a new, empty repository. +func (c *CodeCommit) CreateRepository(input *CreateRepositoryInput) (*CreateRepositoryOutput, error) { + req, out := c.CreateRepositoryRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRepository = "DeleteRepository" + +// DeleteRepositoryRequest generates a request for the DeleteRepository operation. +func (c *CodeCommit) DeleteRepositoryRequest(input *DeleteRepositoryInput) (req *request.Request, output *DeleteRepositoryOutput) { + op := &request.Operation{ + Name: opDeleteRepository, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRepositoryInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteRepositoryOutput{} + req.Data = output + return +} + +// Deletes a repository. If a specified repository was already deleted, a null +// repository ID will be returned. +// +// Deleting a repository also deletes all associated objects and metadata. +// After a repository is deleted, all future push calls to the deleted repository +// will fail. +func (c *CodeCommit) DeleteRepository(input *DeleteRepositoryInput) (*DeleteRepositoryOutput, error) { + req, out := c.DeleteRepositoryRequest(input) + err := req.Send() + return out, err +} + +const opGetBranch = "GetBranch" + +// GetBranchRequest generates a request for the GetBranch operation. +func (c *CodeCommit) GetBranchRequest(input *GetBranchInput) (req *request.Request, output *GetBranchOutput) { + op := &request.Operation{ + Name: opGetBranch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetBranchInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBranchOutput{} + req.Data = output + return +} + +// Retrieves information about a repository branch, including its name and the +// last commit ID. +func (c *CodeCommit) GetBranch(input *GetBranchInput) (*GetBranchOutput, error) { + req, out := c.GetBranchRequest(input) + err := req.Send() + return out, err +} + +const opGetRepository = "GetRepository" + +// GetRepositoryRequest generates a request for the GetRepository operation. +func (c *CodeCommit) GetRepositoryRequest(input *GetRepositoryInput) (req *request.Request, output *GetRepositoryOutput) { + op := &request.Operation{ + Name: opGetRepository, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRepositoryInput{} + } + + req = c.newRequest(op, input, output) + output = &GetRepositoryOutput{} + req.Data = output + return +} + +// Gets information about a repository. +// +// The description field for a repository accepts all HTML characters and all +// valid Unicode characters. Applications that do not HTML-encode the description +// and display it in a web page could expose users to potentially malicious +// code. Make sure that you HTML-encode the description field in any application +// that uses this API to display the repository description on a web page. +func (c *CodeCommit) GetRepository(input *GetRepositoryInput) (*GetRepositoryOutput, error) { + req, out := c.GetRepositoryRequest(input) + err := req.Send() + return out, err +} + +const opListBranches = "ListBranches" + +// ListBranchesRequest generates a request for the ListBranches operation. +func (c *CodeCommit) ListBranchesRequest(input *ListBranchesInput) (req *request.Request, output *ListBranchesOutput) { + op := &request.Operation{ + Name: opListBranches, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListBranchesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListBranchesOutput{} + req.Data = output + return +} + +// Gets information about one or more branches in a repository. +func (c *CodeCommit) ListBranches(input *ListBranchesInput) (*ListBranchesOutput, error) { + req, out := c.ListBranchesRequest(input) + err := req.Send() + return out, err +} + +const opListRepositories = "ListRepositories" + +// ListRepositoriesRequest generates a request for the ListRepositories operation. +func (c *CodeCommit) ListRepositoriesRequest(input *ListRepositoriesInput) (req *request.Request, output *ListRepositoriesOutput) { + op := &request.Operation{ + Name: opListRepositories, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListRepositoriesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListRepositoriesOutput{} + req.Data = output + return +} + +// Gets information about one or more repositories. +func (c *CodeCommit) ListRepositories(input *ListRepositoriesInput) (*ListRepositoriesOutput, error) { + req, out := c.ListRepositoriesRequest(input) + err := req.Send() + return out, err +} + +const opUpdateDefaultBranch = "UpdateDefaultBranch" + +// UpdateDefaultBranchRequest generates a request for the UpdateDefaultBranch operation. +func (c *CodeCommit) UpdateDefaultBranchRequest(input *UpdateDefaultBranchInput) (req *request.Request, output *UpdateDefaultBranchOutput) { + op := &request.Operation{ + Name: opUpdateDefaultBranch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDefaultBranchInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateDefaultBranchOutput{} + req.Data = output + return +} + +// Sets or changes the default branch name for the specified repository. +// +// If you use this operation to change the default branch name to the current +// default branch name, a success message is returned even though the default +// branch did not change. +func (c *CodeCommit) UpdateDefaultBranch(input *UpdateDefaultBranchInput) (*UpdateDefaultBranchOutput, error) { + req, out := c.UpdateDefaultBranchRequest(input) + err := req.Send() + return out, err +} + +const opUpdateRepositoryDescription = "UpdateRepositoryDescription" + +// UpdateRepositoryDescriptionRequest generates a request for the UpdateRepositoryDescription operation. +func (c *CodeCommit) UpdateRepositoryDescriptionRequest(input *UpdateRepositoryDescriptionInput) (req *request.Request, output *UpdateRepositoryDescriptionOutput) { + op := &request.Operation{ + Name: opUpdateRepositoryDescription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateRepositoryDescriptionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateRepositoryDescriptionOutput{} + req.Data = output + return +} + +// Sets or changes the comment or description for a repository. +// +// The description field for a repository accepts all HTML characters and all +// valid Unicode characters. Applications that do not HTML-encode the description +// and display it in a web page could expose users to potentially malicious +// code. Make sure that you HTML-encode the description field in any application +// that uses this API to display the repository description on a web page. +func (c *CodeCommit) UpdateRepositoryDescription(input *UpdateRepositoryDescriptionInput) (*UpdateRepositoryDescriptionOutput, error) { + req, out := c.UpdateRepositoryDescriptionRequest(input) + err := req.Send() + return out, err +} + +const opUpdateRepositoryName = "UpdateRepositoryName" + +// UpdateRepositoryNameRequest generates a request for the UpdateRepositoryName operation. +func (c *CodeCommit) UpdateRepositoryNameRequest(input *UpdateRepositoryNameInput) (req *request.Request, output *UpdateRepositoryNameOutput) { + op := &request.Operation{ + Name: opUpdateRepositoryName, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateRepositoryNameInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateRepositoryNameOutput{} + req.Data = output + return +} + +// Renames a repository. +func (c *CodeCommit) UpdateRepositoryName(input *UpdateRepositoryNameInput) (*UpdateRepositoryNameOutput, error) { + req, out := c.UpdateRepositoryNameRequest(input) + err := req.Send() + return out, err +} + +// Represents the input of a batch get repositories operation. +type BatchGetRepositoriesInput struct { + _ struct{} `type:"structure"` + + // The names of the repositories to get information about. + RepositoryNames []*string `locationName:"repositoryNames" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchGetRepositoriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetRepositoriesInput) GoString() string { + return s.String() +} + +// Represents the output of a batch get repositories operation. +type BatchGetRepositoriesOutput struct { + _ struct{} `type:"structure"` + + // A list of repositories returned by the batch get repositories operation. + Repositories []*RepositoryMetadata `locationName:"repositories" type:"list"` + + // Returns a list of repository names for which information could not be found. + RepositoriesNotFound []*string `locationName:"repositoriesNotFound" type:"list"` +} + +// String returns the string representation +func (s BatchGetRepositoriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetRepositoriesOutput) GoString() string { + return s.String() +} + +// Returns information about a branch. +type BranchInfo struct { + _ struct{} `type:"structure"` + + // The name of the branch. + BranchName *string `locationName:"branchName" min:"1" type:"string"` + + // The ID of the last commit made to the branch. + CommitId *string `locationName:"commitId" type:"string"` +} + +// String returns the string representation +func (s BranchInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BranchInfo) GoString() string { + return s.String() +} + +// Represents the input of a create branch operation. +type CreateBranchInput struct { + _ struct{} `type:"structure"` + + // The name of the new branch to create. + BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"` + + // The ID of the commit to point the new branch to. + // + // If this commit ID is not specified, the new branch will point to the commit + // that is pointed to by the repository's default branch. + CommitId *string `locationName:"commitId" type:"string" required:"true"` + + // The name of the repository in which you want to create the new branch. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateBranchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBranchInput) GoString() string { + return s.String() +} + +type CreateBranchOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateBranchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBranchOutput) GoString() string { + return s.String() +} + +// Represents the input of a create repository operation. +type CreateRepositoryInput struct { + _ struct{} `type:"structure"` + + // A comment or description about the new repository. + RepositoryDescription *string `locationName:"repositoryDescription" type:"string"` + + // The name of the new repository to be created. + // + // The repository name must be unique across the calling AWS account. In addition, + // repository names are restricted to alphanumeric characters. The suffix ".git" + // is prohibited. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateRepositoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRepositoryInput) GoString() string { + return s.String() +} + +// Represents the output of a create repository operation. +type CreateRepositoryOutput struct { + _ struct{} `type:"structure"` + + // Information about the newly created repository. + RepositoryMetadata *RepositoryMetadata `locationName:"repositoryMetadata" type:"structure"` +} + +// String returns the string representation +func (s CreateRepositoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRepositoryOutput) GoString() string { + return s.String() +} + +// Represents the input of a delete repository operation. +type DeleteRepositoryInput struct { + _ struct{} `type:"structure"` + + // The name of the repository to delete. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRepositoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRepositoryInput) GoString() string { + return s.String() +} + +// Represents the output of a delete repository operation. +type DeleteRepositoryOutput struct { + _ struct{} `type:"structure"` + + // The ID of the repository that was deleted. + RepositoryId *string `locationName:"repositoryId" type:"string"` +} + +// String returns the string representation +func (s DeleteRepositoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRepositoryOutput) GoString() string { + return s.String() +} + +// Represents the input of a get branch operation. +type GetBranchInput struct { + _ struct{} `type:"structure"` + + // The name of the branch for which you want to retrieve information. + BranchName *string `locationName:"branchName" min:"1" type:"string"` + + // Repository name is restricted to alphanumeric characters (a-z, A-Z, 0-9), + // ".", "_", and "-". Additionally, the suffix ".git" is prohibited in a repository + // name. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string"` +} + +// String returns the string representation +func (s GetBranchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBranchInput) GoString() string { + return s.String() +} + +// Represents the output of a get branch operation. +type GetBranchOutput struct { + _ struct{} `type:"structure"` + + // The name of the branch. + Branch *BranchInfo `locationName:"branch" type:"structure"` +} + +// String returns the string representation +func (s GetBranchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBranchOutput) GoString() string { + return s.String() +} + +// Represents the input of a get repository operation. +type GetRepositoryInput struct { + _ struct{} `type:"structure"` + + // The name of the repository to get information about. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRepositoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRepositoryInput) GoString() string { + return s.String() +} + +// Represents the output of a get repository operation. +type GetRepositoryOutput struct { + _ struct{} `type:"structure"` + + // Information about the repository. + RepositoryMetadata *RepositoryMetadata `locationName:"repositoryMetadata" type:"structure"` +} + +// String returns the string representation +func (s GetRepositoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRepositoryOutput) GoString() string { + return s.String() +} + +// Represents the input of a list branches operation. +type ListBranchesInput struct { + _ struct{} `type:"structure"` + + // An enumeration token that allows the operation to batch the results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The name of the repository that contains the branches. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListBranchesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBranchesInput) GoString() string { + return s.String() +} + +// Represents the output of a list branches operation. +type ListBranchesOutput struct { + _ struct{} `type:"structure"` + + // The list of branch names. + Branches []*string `locationName:"branches" type:"list"` + + // An enumeration token that returns the batch of the results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListBranchesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBranchesOutput) GoString() string { + return s.String() +} + +// Represents the input of a list repositories operation. +type ListRepositoriesInput struct { + _ struct{} `type:"structure"` + + // An enumeration token that allows the operation to batch the results of the + // operation. Batch sizes are 1,000 for list repository operations. When the + // client sends the token back to AWS CodeCommit, another page of 1,000 records + // is retrieved. + NextToken *string `locationName:"nextToken" type:"string"` + + // The order in which to sort the results of a list repositories operation. + Order *string `locationName:"order" type:"string" enum:"OrderEnum"` + + // The criteria used to sort the results of a list repositories operation. + SortBy *string `locationName:"sortBy" type:"string" enum:"SortByEnum"` +} + +// String returns the string representation +func (s ListRepositoriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRepositoriesInput) GoString() string { + return s.String() +} + +// Represents the output of a list repositories operation. +type ListRepositoriesOutput struct { + _ struct{} `type:"structure"` + + // An enumeration token that allows the operation to batch the results of the + // operation. Batch sizes are 1,000 for list repository operations. When the + // client sends the token back to AWS CodeCommit, another page of 1,000 records + // is retrieved. + NextToken *string `locationName:"nextToken" type:"string"` + + // Lists the repositories called by the list repositories operation. + Repositories []*RepositoryNameIdPair `locationName:"repositories" type:"list"` +} + +// String returns the string representation +func (s ListRepositoriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRepositoriesOutput) GoString() string { + return s.String() +} + +// Information about a repository. +type RepositoryMetadata struct { + _ struct{} `type:"structure"` + + // The ID of the AWS account associated with the repository. + AccountId *string `locationName:"accountId" type:"string"` + + // The Amazon Resource Name (ARN) of the repository. + Arn *string `type:"string"` + + // The URL to use for cloning the repository over HTTPS. + CloneUrlHttp *string `locationName:"cloneUrlHttp" type:"string"` + + // The URL to use for cloning the repository over SSH. + CloneUrlSsh *string `locationName:"cloneUrlSsh" type:"string"` + + // The date and time the repository was created, in timestamp format. + CreationDate *time.Time `locationName:"creationDate" type:"timestamp" timestampFormat:"unix"` + + // The repository's default branch name. + DefaultBranch *string `locationName:"defaultBranch" min:"1" type:"string"` + + // The date and time the repository was last modified, in timestamp format. + LastModifiedDate *time.Time `locationName:"lastModifiedDate" type:"timestamp" timestampFormat:"unix"` + + // A comment or description about the repository. + RepositoryDescription *string `locationName:"repositoryDescription" type:"string"` + + // The ID of the repository. + RepositoryId *string `locationName:"repositoryId" type:"string"` + + // The repository's name. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string"` +} + +// String returns the string representation +func (s RepositoryMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RepositoryMetadata) GoString() string { + return s.String() +} + +// Information about a repository name and ID. +type RepositoryNameIdPair struct { + _ struct{} `type:"structure"` + + // The ID associated with the repository name. + RepositoryId *string `locationName:"repositoryId" type:"string"` + + // Repository name is restricted to alphanumeric characters (a-z, A-Z, 0-9), + // ".", "_", and "-". Additionally, the suffix ".git" is prohibited in a repository + // name. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string"` +} + +// String returns the string representation +func (s RepositoryNameIdPair) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RepositoryNameIdPair) GoString() string { + return s.String() +} + +// Represents the input of an update default branch operation. +type UpdateDefaultBranchInput struct { + _ struct{} `type:"structure"` + + // The name of the branch to set as the default. + DefaultBranchName *string `locationName:"defaultBranchName" min:"1" type:"string" required:"true"` + + // The name of the repository to set or change the default branch for. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateDefaultBranchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDefaultBranchInput) GoString() string { + return s.String() +} + +type UpdateDefaultBranchOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateDefaultBranchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDefaultBranchOutput) GoString() string { + return s.String() +} + +// Represents the input of an update repository description operation. +type UpdateRepositoryDescriptionInput struct { + _ struct{} `type:"structure"` + + // The new comment or description for the specified repository. + RepositoryDescription *string `locationName:"repositoryDescription" type:"string"` + + // The name of the repository to set or change the comment or description for. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateRepositoryDescriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRepositoryDescriptionInput) GoString() string { + return s.String() +} + +type UpdateRepositoryDescriptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateRepositoryDescriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRepositoryDescriptionOutput) GoString() string { + return s.String() +} + +// Represents the input of an update repository description operation. +type UpdateRepositoryNameInput struct { + _ struct{} `type:"structure"` + + // Repository name is restricted to alphanumeric characters (a-z, A-Z, 0-9), + // ".", "_", and "-". Additionally, the suffix ".git" is prohibited in a repository + // name. + NewName *string `locationName:"newName" min:"1" type:"string" required:"true"` + + // Repository name is restricted to alphanumeric characters (a-z, A-Z, 0-9), + // ".", "_", and "-". Additionally, the suffix ".git" is prohibited in a repository + // name. + OldName *string `locationName:"oldName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateRepositoryNameInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRepositoryNameInput) GoString() string { + return s.String() +} + +type UpdateRepositoryNameOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateRepositoryNameOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRepositoryNameOutput) GoString() string { + return s.String() +} + +const ( + // @enum OrderEnum + OrderEnumAscending = "ascending" + // @enum OrderEnum + OrderEnumDescending = "descending" +) + +const ( + // @enum SortByEnum + SortByEnumRepositoryName = "repositoryName" + // @enum SortByEnum + SortByEnumLastModifiedDate = "lastModifiedDate" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codecommit/codecommitiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codecommit/codecommitiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codecommit/codecommitiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codecommit/codecommitiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,58 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package codecommitiface provides an interface for the AWS CodeCommit. +package codecommitiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/codecommit" +) + +// CodeCommitAPI is the interface type for codecommit.CodeCommit. +type CodeCommitAPI interface { + BatchGetRepositoriesRequest(*codecommit.BatchGetRepositoriesInput) (*request.Request, *codecommit.BatchGetRepositoriesOutput) + + BatchGetRepositories(*codecommit.BatchGetRepositoriesInput) (*codecommit.BatchGetRepositoriesOutput, error) + + CreateBranchRequest(*codecommit.CreateBranchInput) (*request.Request, *codecommit.CreateBranchOutput) + + CreateBranch(*codecommit.CreateBranchInput) (*codecommit.CreateBranchOutput, error) + + CreateRepositoryRequest(*codecommit.CreateRepositoryInput) (*request.Request, *codecommit.CreateRepositoryOutput) + + CreateRepository(*codecommit.CreateRepositoryInput) (*codecommit.CreateRepositoryOutput, error) + + DeleteRepositoryRequest(*codecommit.DeleteRepositoryInput) (*request.Request, *codecommit.DeleteRepositoryOutput) + + DeleteRepository(*codecommit.DeleteRepositoryInput) (*codecommit.DeleteRepositoryOutput, error) + + GetBranchRequest(*codecommit.GetBranchInput) (*request.Request, *codecommit.GetBranchOutput) + + GetBranch(*codecommit.GetBranchInput) (*codecommit.GetBranchOutput, error) + + GetRepositoryRequest(*codecommit.GetRepositoryInput) (*request.Request, *codecommit.GetRepositoryOutput) + + GetRepository(*codecommit.GetRepositoryInput) (*codecommit.GetRepositoryOutput, error) + + ListBranchesRequest(*codecommit.ListBranchesInput) (*request.Request, *codecommit.ListBranchesOutput) + + ListBranches(*codecommit.ListBranchesInput) (*codecommit.ListBranchesOutput, error) + + ListRepositoriesRequest(*codecommit.ListRepositoriesInput) (*request.Request, *codecommit.ListRepositoriesOutput) + + ListRepositories(*codecommit.ListRepositoriesInput) (*codecommit.ListRepositoriesOutput, error) + + UpdateDefaultBranchRequest(*codecommit.UpdateDefaultBranchInput) (*request.Request, *codecommit.UpdateDefaultBranchOutput) + + UpdateDefaultBranch(*codecommit.UpdateDefaultBranchInput) (*codecommit.UpdateDefaultBranchOutput, error) + + UpdateRepositoryDescriptionRequest(*codecommit.UpdateRepositoryDescriptionInput) (*request.Request, *codecommit.UpdateRepositoryDescriptionOutput) + + UpdateRepositoryDescription(*codecommit.UpdateRepositoryDescriptionInput) (*codecommit.UpdateRepositoryDescriptionOutput, error) + + UpdateRepositoryNameRequest(*codecommit.UpdateRepositoryNameInput) (*request.Request, *codecommit.UpdateRepositoryNameOutput) + + UpdateRepositoryName(*codecommit.UpdateRepositoryNameInput) (*codecommit.UpdateRepositoryNameOutput, error) +} + +var _ CodeCommitAPI = (*codecommit.CodeCommit)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codecommit/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codecommit/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codecommit/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codecommit/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,238 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package codecommit_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/codecommit" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCodeCommit_BatchGetRepositories() { + svc := codecommit.New(session.New()) + + params := &codecommit.BatchGetRepositoriesInput{ + RepositoryNames: []*string{ // Required + aws.String("RepositoryName"), // Required + // More values... + }, + } + resp, err := svc.BatchGetRepositories(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_CreateBranch() { + svc := codecommit.New(session.New()) + + params := &codecommit.CreateBranchInput{ + BranchName: aws.String("BranchName"), // Required + CommitId: aws.String("CommitId"), // Required + RepositoryName: aws.String("RepositoryName"), // Required + } + resp, err := svc.CreateBranch(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_CreateRepository() { + svc := codecommit.New(session.New()) + + params := &codecommit.CreateRepositoryInput{ + RepositoryName: aws.String("RepositoryName"), // Required + RepositoryDescription: aws.String("RepositoryDescription"), + } + resp, err := svc.CreateRepository(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_DeleteRepository() { + svc := codecommit.New(session.New()) + + params := &codecommit.DeleteRepositoryInput{ + RepositoryName: aws.String("RepositoryName"), // Required + } + resp, err := svc.DeleteRepository(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_GetBranch() { + svc := codecommit.New(session.New()) + + params := &codecommit.GetBranchInput{ + BranchName: aws.String("BranchName"), + RepositoryName: aws.String("RepositoryName"), + } + resp, err := svc.GetBranch(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_GetRepository() { + svc := codecommit.New(session.New()) + + params := &codecommit.GetRepositoryInput{ + RepositoryName: aws.String("RepositoryName"), // Required + } + resp, err := svc.GetRepository(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_ListBranches() { + svc := codecommit.New(session.New()) + + params := &codecommit.ListBranchesInput{ + RepositoryName: aws.String("RepositoryName"), // Required + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListBranches(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_ListRepositories() { + svc := codecommit.New(session.New()) + + params := &codecommit.ListRepositoriesInput{ + NextToken: aws.String("NextToken"), + Order: aws.String("OrderEnum"), + SortBy: aws.String("SortByEnum"), + } + resp, err := svc.ListRepositories(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_UpdateDefaultBranch() { + svc := codecommit.New(session.New()) + + params := &codecommit.UpdateDefaultBranchInput{ + DefaultBranchName: aws.String("BranchName"), // Required + RepositoryName: aws.String("RepositoryName"), // Required + } + resp, err := svc.UpdateDefaultBranch(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_UpdateRepositoryDescription() { + svc := codecommit.New(session.New()) + + params := &codecommit.UpdateRepositoryDescriptionInput{ + RepositoryName: aws.String("RepositoryName"), // Required + RepositoryDescription: aws.String("RepositoryDescription"), + } + resp, err := svc.UpdateRepositoryDescription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeCommit_UpdateRepositoryName() { + svc := codecommit.New(session.New()) + + params := &codecommit.UpdateRepositoryNameInput{ + NewName: aws.String("RepositoryName"), // Required + OldName: aws.String("RepositoryName"), // Required + } + resp, err := svc.UpdateRepositoryName(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codecommit/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codecommit/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codecommit/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codecommit/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,94 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package codecommit + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// This is the AWS CodeCommit API Reference. This reference provides descriptions +// of the AWS CodeCommit API. +// +// You can use the AWS CodeCommit API to work with the following objects: +// +// Repositories Branches Commits For information about how to use AWS CodeCommit, +// see the AWS CodeCommit User Guide. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CodeCommit struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "codecommit" + +// New creates a new instance of the CodeCommit client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CodeCommit client from just a session. +// svc := codecommit.New(mySession) +// +// // Create a CodeCommit client with additional configuration +// svc := codecommit.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CodeCommit { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CodeCommit { + svc := &CodeCommit{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-04-13", + JSONVersion: "1.1", + TargetPrefix: "CodeCommit_20150413", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CodeCommit operation and runs any +// custom request initialization. +func (c *CodeCommit) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codedeploy/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codedeploy/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codedeploy/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codedeploy/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,3281 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package codedeploy provides a client for AWS CodeDeploy. +package codedeploy + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opAddTagsToOnPremisesInstances = "AddTagsToOnPremisesInstances" + +// AddTagsToOnPremisesInstancesRequest generates a request for the AddTagsToOnPremisesInstances operation. +func (c *CodeDeploy) AddTagsToOnPremisesInstancesRequest(input *AddTagsToOnPremisesInstancesInput) (req *request.Request, output *AddTagsToOnPremisesInstancesOutput) { + op := &request.Operation{ + Name: opAddTagsToOnPremisesInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsToOnPremisesInstancesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AddTagsToOnPremisesInstancesOutput{} + req.Data = output + return +} + +// Adds tags to on-premises instances. +func (c *CodeDeploy) AddTagsToOnPremisesInstances(input *AddTagsToOnPremisesInstancesInput) (*AddTagsToOnPremisesInstancesOutput, error) { + req, out := c.AddTagsToOnPremisesInstancesRequest(input) + err := req.Send() + return out, err +} + +const opBatchGetApplications = "BatchGetApplications" + +// BatchGetApplicationsRequest generates a request for the BatchGetApplications operation. +func (c *CodeDeploy) BatchGetApplicationsRequest(input *BatchGetApplicationsInput) (req *request.Request, output *BatchGetApplicationsOutput) { + op := &request.Operation{ + Name: opBatchGetApplications, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchGetApplicationsInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchGetApplicationsOutput{} + req.Data = output + return +} + +// Gets information about one or more applications. +func (c *CodeDeploy) BatchGetApplications(input *BatchGetApplicationsInput) (*BatchGetApplicationsOutput, error) { + req, out := c.BatchGetApplicationsRequest(input) + err := req.Send() + return out, err +} + +const opBatchGetDeployments = "BatchGetDeployments" + +// BatchGetDeploymentsRequest generates a request for the BatchGetDeployments operation. +func (c *CodeDeploy) BatchGetDeploymentsRequest(input *BatchGetDeploymentsInput) (req *request.Request, output *BatchGetDeploymentsOutput) { + op := &request.Operation{ + Name: opBatchGetDeployments, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchGetDeploymentsInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchGetDeploymentsOutput{} + req.Data = output + return +} + +// Gets information about one or more deployments. +func (c *CodeDeploy) BatchGetDeployments(input *BatchGetDeploymentsInput) (*BatchGetDeploymentsOutput, error) { + req, out := c.BatchGetDeploymentsRequest(input) + err := req.Send() + return out, err +} + +const opBatchGetOnPremisesInstances = "BatchGetOnPremisesInstances" + +// BatchGetOnPremisesInstancesRequest generates a request for the BatchGetOnPremisesInstances operation. +func (c *CodeDeploy) BatchGetOnPremisesInstancesRequest(input *BatchGetOnPremisesInstancesInput) (req *request.Request, output *BatchGetOnPremisesInstancesOutput) { + op := &request.Operation{ + Name: opBatchGetOnPremisesInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchGetOnPremisesInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchGetOnPremisesInstancesOutput{} + req.Data = output + return +} + +// Gets information about one or more on-premises instances. +func (c *CodeDeploy) BatchGetOnPremisesInstances(input *BatchGetOnPremisesInstancesInput) (*BatchGetOnPremisesInstancesOutput, error) { + req, out := c.BatchGetOnPremisesInstancesRequest(input) + err := req.Send() + return out, err +} + +const opCreateApplication = "CreateApplication" + +// CreateApplicationRequest generates a request for the CreateApplication operation. +func (c *CodeDeploy) CreateApplicationRequest(input *CreateApplicationInput) (req *request.Request, output *CreateApplicationOutput) { + op := &request.Operation{ + Name: opCreateApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateApplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateApplicationOutput{} + req.Data = output + return +} + +// Creates a new application. +func (c *CodeDeploy) CreateApplication(input *CreateApplicationInput) (*CreateApplicationOutput, error) { + req, out := c.CreateApplicationRequest(input) + err := req.Send() + return out, err +} + +const opCreateDeployment = "CreateDeployment" + +// CreateDeploymentRequest generates a request for the CreateDeployment operation. +func (c *CodeDeploy) CreateDeploymentRequest(input *CreateDeploymentInput) (req *request.Request, output *CreateDeploymentOutput) { + op := &request.Operation{ + Name: opCreateDeployment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDeploymentInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDeploymentOutput{} + req.Data = output + return +} + +// Deploys an application revision through the specified deployment group. +func (c *CodeDeploy) CreateDeployment(input *CreateDeploymentInput) (*CreateDeploymentOutput, error) { + req, out := c.CreateDeploymentRequest(input) + err := req.Send() + return out, err +} + +const opCreateDeploymentConfig = "CreateDeploymentConfig" + +// CreateDeploymentConfigRequest generates a request for the CreateDeploymentConfig operation. +func (c *CodeDeploy) CreateDeploymentConfigRequest(input *CreateDeploymentConfigInput) (req *request.Request, output *CreateDeploymentConfigOutput) { + op := &request.Operation{ + Name: opCreateDeploymentConfig, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDeploymentConfigInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDeploymentConfigOutput{} + req.Data = output + return +} + +// Creates a new deployment configuration. +func (c *CodeDeploy) CreateDeploymentConfig(input *CreateDeploymentConfigInput) (*CreateDeploymentConfigOutput, error) { + req, out := c.CreateDeploymentConfigRequest(input) + err := req.Send() + return out, err +} + +const opCreateDeploymentGroup = "CreateDeploymentGroup" + +// CreateDeploymentGroupRequest generates a request for the CreateDeploymentGroup operation. +func (c *CodeDeploy) CreateDeploymentGroupRequest(input *CreateDeploymentGroupInput) (req *request.Request, output *CreateDeploymentGroupOutput) { + op := &request.Operation{ + Name: opCreateDeploymentGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDeploymentGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDeploymentGroupOutput{} + req.Data = output + return +} + +// Creates a new deployment group for application revisions to be deployed to. +func (c *CodeDeploy) CreateDeploymentGroup(input *CreateDeploymentGroupInput) (*CreateDeploymentGroupOutput, error) { + req, out := c.CreateDeploymentGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteApplication = "DeleteApplication" + +// DeleteApplicationRequest generates a request for the DeleteApplication operation. +func (c *CodeDeploy) DeleteApplicationRequest(input *DeleteApplicationInput) (req *request.Request, output *DeleteApplicationOutput) { + op := &request.Operation{ + Name: opDeleteApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteApplicationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteApplicationOutput{} + req.Data = output + return +} + +// Deletes an application. +func (c *CodeDeploy) DeleteApplication(input *DeleteApplicationInput) (*DeleteApplicationOutput, error) { + req, out := c.DeleteApplicationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDeploymentConfig = "DeleteDeploymentConfig" + +// DeleteDeploymentConfigRequest generates a request for the DeleteDeploymentConfig operation. +func (c *CodeDeploy) DeleteDeploymentConfigRequest(input *DeleteDeploymentConfigInput) (req *request.Request, output *DeleteDeploymentConfigOutput) { + op := &request.Operation{ + Name: opDeleteDeploymentConfig, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDeploymentConfigInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteDeploymentConfigOutput{} + req.Data = output + return +} + +// Deletes a deployment configuration. +// +// A deployment configuration cannot be deleted if it is currently in use. +// Also, predefined configurations cannot be deleted. +func (c *CodeDeploy) DeleteDeploymentConfig(input *DeleteDeploymentConfigInput) (*DeleteDeploymentConfigOutput, error) { + req, out := c.DeleteDeploymentConfigRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDeploymentGroup = "DeleteDeploymentGroup" + +// DeleteDeploymentGroupRequest generates a request for the DeleteDeploymentGroup operation. +func (c *CodeDeploy) DeleteDeploymentGroupRequest(input *DeleteDeploymentGroupInput) (req *request.Request, output *DeleteDeploymentGroupOutput) { + op := &request.Operation{ + Name: opDeleteDeploymentGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDeploymentGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDeploymentGroupOutput{} + req.Data = output + return +} + +// Deletes a deployment group. +func (c *CodeDeploy) DeleteDeploymentGroup(input *DeleteDeploymentGroupInput) (*DeleteDeploymentGroupOutput, error) { + req, out := c.DeleteDeploymentGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterOnPremisesInstance = "DeregisterOnPremisesInstance" + +// DeregisterOnPremisesInstanceRequest generates a request for the DeregisterOnPremisesInstance operation. +func (c *CodeDeploy) DeregisterOnPremisesInstanceRequest(input *DeregisterOnPremisesInstanceInput) (req *request.Request, output *DeregisterOnPremisesInstanceOutput) { + op := &request.Operation{ + Name: opDeregisterOnPremisesInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterOnPremisesInstanceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeregisterOnPremisesInstanceOutput{} + req.Data = output + return +} + +// Deregisters an on-premises instance. +func (c *CodeDeploy) DeregisterOnPremisesInstance(input *DeregisterOnPremisesInstanceInput) (*DeregisterOnPremisesInstanceOutput, error) { + req, out := c.DeregisterOnPremisesInstanceRequest(input) + err := req.Send() + return out, err +} + +const opGetApplication = "GetApplication" + +// GetApplicationRequest generates a request for the GetApplication operation. +func (c *CodeDeploy) GetApplicationRequest(input *GetApplicationInput) (req *request.Request, output *GetApplicationOutput) { + op := &request.Operation{ + Name: opGetApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetApplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetApplicationOutput{} + req.Data = output + return +} + +// Gets information about an application. +func (c *CodeDeploy) GetApplication(input *GetApplicationInput) (*GetApplicationOutput, error) { + req, out := c.GetApplicationRequest(input) + err := req.Send() + return out, err +} + +const opGetApplicationRevision = "GetApplicationRevision" + +// GetApplicationRevisionRequest generates a request for the GetApplicationRevision operation. +func (c *CodeDeploy) GetApplicationRevisionRequest(input *GetApplicationRevisionInput) (req *request.Request, output *GetApplicationRevisionOutput) { + op := &request.Operation{ + Name: opGetApplicationRevision, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetApplicationRevisionInput{} + } + + req = c.newRequest(op, input, output) + output = &GetApplicationRevisionOutput{} + req.Data = output + return +} + +// Gets information about an application revision. +func (c *CodeDeploy) GetApplicationRevision(input *GetApplicationRevisionInput) (*GetApplicationRevisionOutput, error) { + req, out := c.GetApplicationRevisionRequest(input) + err := req.Send() + return out, err +} + +const opGetDeployment = "GetDeployment" + +// GetDeploymentRequest generates a request for the GetDeployment operation. +func (c *CodeDeploy) GetDeploymentRequest(input *GetDeploymentInput) (req *request.Request, output *GetDeploymentOutput) { + op := &request.Operation{ + Name: opGetDeployment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDeploymentInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDeploymentOutput{} + req.Data = output + return +} + +// Gets information about a deployment. +func (c *CodeDeploy) GetDeployment(input *GetDeploymentInput) (*GetDeploymentOutput, error) { + req, out := c.GetDeploymentRequest(input) + err := req.Send() + return out, err +} + +const opGetDeploymentConfig = "GetDeploymentConfig" + +// GetDeploymentConfigRequest generates a request for the GetDeploymentConfig operation. +func (c *CodeDeploy) GetDeploymentConfigRequest(input *GetDeploymentConfigInput) (req *request.Request, output *GetDeploymentConfigOutput) { + op := &request.Operation{ + Name: opGetDeploymentConfig, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDeploymentConfigInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDeploymentConfigOutput{} + req.Data = output + return +} + +// Gets information about a deployment configuration. +func (c *CodeDeploy) GetDeploymentConfig(input *GetDeploymentConfigInput) (*GetDeploymentConfigOutput, error) { + req, out := c.GetDeploymentConfigRequest(input) + err := req.Send() + return out, err +} + +const opGetDeploymentGroup = "GetDeploymentGroup" + +// GetDeploymentGroupRequest generates a request for the GetDeploymentGroup operation. +func (c *CodeDeploy) GetDeploymentGroupRequest(input *GetDeploymentGroupInput) (req *request.Request, output *GetDeploymentGroupOutput) { + op := &request.Operation{ + Name: opGetDeploymentGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDeploymentGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDeploymentGroupOutput{} + req.Data = output + return +} + +// Gets information about a deployment group. +func (c *CodeDeploy) GetDeploymentGroup(input *GetDeploymentGroupInput) (*GetDeploymentGroupOutput, error) { + req, out := c.GetDeploymentGroupRequest(input) + err := req.Send() + return out, err +} + +const opGetDeploymentInstance = "GetDeploymentInstance" + +// GetDeploymentInstanceRequest generates a request for the GetDeploymentInstance operation. +func (c *CodeDeploy) GetDeploymentInstanceRequest(input *GetDeploymentInstanceInput) (req *request.Request, output *GetDeploymentInstanceOutput) { + op := &request.Operation{ + Name: opGetDeploymentInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDeploymentInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDeploymentInstanceOutput{} + req.Data = output + return +} + +// Gets information about an instance as part of a deployment. +func (c *CodeDeploy) GetDeploymentInstance(input *GetDeploymentInstanceInput) (*GetDeploymentInstanceOutput, error) { + req, out := c.GetDeploymentInstanceRequest(input) + err := req.Send() + return out, err +} + +const opGetOnPremisesInstance = "GetOnPremisesInstance" + +// GetOnPremisesInstanceRequest generates a request for the GetOnPremisesInstance operation. +func (c *CodeDeploy) GetOnPremisesInstanceRequest(input *GetOnPremisesInstanceInput) (req *request.Request, output *GetOnPremisesInstanceOutput) { + op := &request.Operation{ + Name: opGetOnPremisesInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetOnPremisesInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &GetOnPremisesInstanceOutput{} + req.Data = output + return +} + +// Gets information about an on-premises instance. +func (c *CodeDeploy) GetOnPremisesInstance(input *GetOnPremisesInstanceInput) (*GetOnPremisesInstanceOutput, error) { + req, out := c.GetOnPremisesInstanceRequest(input) + err := req.Send() + return out, err +} + +const opListApplicationRevisions = "ListApplicationRevisions" + +// ListApplicationRevisionsRequest generates a request for the ListApplicationRevisions operation. +func (c *CodeDeploy) ListApplicationRevisionsRequest(input *ListApplicationRevisionsInput) (req *request.Request, output *ListApplicationRevisionsOutput) { + op := &request.Operation{ + Name: opListApplicationRevisions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListApplicationRevisionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListApplicationRevisionsOutput{} + req.Data = output + return +} + +// Lists information about revisions for an application. +func (c *CodeDeploy) ListApplicationRevisions(input *ListApplicationRevisionsInput) (*ListApplicationRevisionsOutput, error) { + req, out := c.ListApplicationRevisionsRequest(input) + err := req.Send() + return out, err +} + +func (c *CodeDeploy) ListApplicationRevisionsPages(input *ListApplicationRevisionsInput, fn func(p *ListApplicationRevisionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListApplicationRevisionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListApplicationRevisionsOutput), lastPage) + }) +} + +const opListApplications = "ListApplications" + +// ListApplicationsRequest generates a request for the ListApplications operation. +func (c *CodeDeploy) ListApplicationsRequest(input *ListApplicationsInput) (req *request.Request, output *ListApplicationsOutput) { + op := &request.Operation{ + Name: opListApplications, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListApplicationsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListApplicationsOutput{} + req.Data = output + return +} + +// Lists the applications registered with the applicable IAM user or AWS account. +func (c *CodeDeploy) ListApplications(input *ListApplicationsInput) (*ListApplicationsOutput, error) { + req, out := c.ListApplicationsRequest(input) + err := req.Send() + return out, err +} + +func (c *CodeDeploy) ListApplicationsPages(input *ListApplicationsInput, fn func(p *ListApplicationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListApplicationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListApplicationsOutput), lastPage) + }) +} + +const opListDeploymentConfigs = "ListDeploymentConfigs" + +// ListDeploymentConfigsRequest generates a request for the ListDeploymentConfigs operation. +func (c *CodeDeploy) ListDeploymentConfigsRequest(input *ListDeploymentConfigsInput) (req *request.Request, output *ListDeploymentConfigsOutput) { + op := &request.Operation{ + Name: opListDeploymentConfigs, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDeploymentConfigsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDeploymentConfigsOutput{} + req.Data = output + return +} + +// Lists the deployment configurations with the applicable IAM user or AWS account. +func (c *CodeDeploy) ListDeploymentConfigs(input *ListDeploymentConfigsInput) (*ListDeploymentConfigsOutput, error) { + req, out := c.ListDeploymentConfigsRequest(input) + err := req.Send() + return out, err +} + +func (c *CodeDeploy) ListDeploymentConfigsPages(input *ListDeploymentConfigsInput, fn func(p *ListDeploymentConfigsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListDeploymentConfigsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListDeploymentConfigsOutput), lastPage) + }) +} + +const opListDeploymentGroups = "ListDeploymentGroups" + +// ListDeploymentGroupsRequest generates a request for the ListDeploymentGroups operation. +func (c *CodeDeploy) ListDeploymentGroupsRequest(input *ListDeploymentGroupsInput) (req *request.Request, output *ListDeploymentGroupsOutput) { + op := &request.Operation{ + Name: opListDeploymentGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDeploymentGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDeploymentGroupsOutput{} + req.Data = output + return +} + +// Lists the deployment groups for an application registered with the applicable +// IAM user or AWS account. +func (c *CodeDeploy) ListDeploymentGroups(input *ListDeploymentGroupsInput) (*ListDeploymentGroupsOutput, error) { + req, out := c.ListDeploymentGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *CodeDeploy) ListDeploymentGroupsPages(input *ListDeploymentGroupsInput, fn func(p *ListDeploymentGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListDeploymentGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListDeploymentGroupsOutput), lastPage) + }) +} + +const opListDeploymentInstances = "ListDeploymentInstances" + +// ListDeploymentInstancesRequest generates a request for the ListDeploymentInstances operation. +func (c *CodeDeploy) ListDeploymentInstancesRequest(input *ListDeploymentInstancesInput) (req *request.Request, output *ListDeploymentInstancesOutput) { + op := &request.Operation{ + Name: opListDeploymentInstances, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDeploymentInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDeploymentInstancesOutput{} + req.Data = output + return +} + +// Lists the instances for a deployment associated with the applicable IAM user +// or AWS account. +func (c *CodeDeploy) ListDeploymentInstances(input *ListDeploymentInstancesInput) (*ListDeploymentInstancesOutput, error) { + req, out := c.ListDeploymentInstancesRequest(input) + err := req.Send() + return out, err +} + +func (c *CodeDeploy) ListDeploymentInstancesPages(input *ListDeploymentInstancesInput, fn func(p *ListDeploymentInstancesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListDeploymentInstancesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListDeploymentInstancesOutput), lastPage) + }) +} + +const opListDeployments = "ListDeployments" + +// ListDeploymentsRequest generates a request for the ListDeployments operation. +func (c *CodeDeploy) ListDeploymentsRequest(input *ListDeploymentsInput) (req *request.Request, output *ListDeploymentsOutput) { + op := &request.Operation{ + Name: opListDeployments, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDeploymentsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDeploymentsOutput{} + req.Data = output + return +} + +// Lists the deployments within a deployment group for an application registered +// with the applicable IAM user or AWS account. +func (c *CodeDeploy) ListDeployments(input *ListDeploymentsInput) (*ListDeploymentsOutput, error) { + req, out := c.ListDeploymentsRequest(input) + err := req.Send() + return out, err +} + +func (c *CodeDeploy) ListDeploymentsPages(input *ListDeploymentsInput, fn func(p *ListDeploymentsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListDeploymentsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListDeploymentsOutput), lastPage) + }) +} + +const opListOnPremisesInstances = "ListOnPremisesInstances" + +// ListOnPremisesInstancesRequest generates a request for the ListOnPremisesInstances operation. +func (c *CodeDeploy) ListOnPremisesInstancesRequest(input *ListOnPremisesInstancesInput) (req *request.Request, output *ListOnPremisesInstancesOutput) { + op := &request.Operation{ + Name: opListOnPremisesInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListOnPremisesInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListOnPremisesInstancesOutput{} + req.Data = output + return +} + +// Gets a list of one or more on-premises instance names. +// +// Unless otherwise specified, both registered and deregistered on-premises +// instance names will be listed. To list only registered or deregistered on-premises +// instance names, use the registration status parameter. +func (c *CodeDeploy) ListOnPremisesInstances(input *ListOnPremisesInstancesInput) (*ListOnPremisesInstancesOutput, error) { + req, out := c.ListOnPremisesInstancesRequest(input) + err := req.Send() + return out, err +} + +const opRegisterApplicationRevision = "RegisterApplicationRevision" + +// RegisterApplicationRevisionRequest generates a request for the RegisterApplicationRevision operation. +func (c *CodeDeploy) RegisterApplicationRevisionRequest(input *RegisterApplicationRevisionInput) (req *request.Request, output *RegisterApplicationRevisionOutput) { + op := &request.Operation{ + Name: opRegisterApplicationRevision, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterApplicationRevisionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RegisterApplicationRevisionOutput{} + req.Data = output + return +} + +// Registers with AWS CodeDeploy a revision for the specified application. +func (c *CodeDeploy) RegisterApplicationRevision(input *RegisterApplicationRevisionInput) (*RegisterApplicationRevisionOutput, error) { + req, out := c.RegisterApplicationRevisionRequest(input) + err := req.Send() + return out, err +} + +const opRegisterOnPremisesInstance = "RegisterOnPremisesInstance" + +// RegisterOnPremisesInstanceRequest generates a request for the RegisterOnPremisesInstance operation. +func (c *CodeDeploy) RegisterOnPremisesInstanceRequest(input *RegisterOnPremisesInstanceInput) (req *request.Request, output *RegisterOnPremisesInstanceOutput) { + op := &request.Operation{ + Name: opRegisterOnPremisesInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterOnPremisesInstanceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RegisterOnPremisesInstanceOutput{} + req.Data = output + return +} + +// Registers an on-premises instance. +func (c *CodeDeploy) RegisterOnPremisesInstance(input *RegisterOnPremisesInstanceInput) (*RegisterOnPremisesInstanceOutput, error) { + req, out := c.RegisterOnPremisesInstanceRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTagsFromOnPremisesInstances = "RemoveTagsFromOnPremisesInstances" + +// RemoveTagsFromOnPremisesInstancesRequest generates a request for the RemoveTagsFromOnPremisesInstances operation. +func (c *CodeDeploy) RemoveTagsFromOnPremisesInstancesRequest(input *RemoveTagsFromOnPremisesInstancesInput) (req *request.Request, output *RemoveTagsFromOnPremisesInstancesOutput) { + op := &request.Operation{ + Name: opRemoveTagsFromOnPremisesInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsFromOnPremisesInstancesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RemoveTagsFromOnPremisesInstancesOutput{} + req.Data = output + return +} + +// Removes one or more tags from one or more on-premises instances. +func (c *CodeDeploy) RemoveTagsFromOnPremisesInstances(input *RemoveTagsFromOnPremisesInstancesInput) (*RemoveTagsFromOnPremisesInstancesOutput, error) { + req, out := c.RemoveTagsFromOnPremisesInstancesRequest(input) + err := req.Send() + return out, err +} + +const opStopDeployment = "StopDeployment" + +// StopDeploymentRequest generates a request for the StopDeployment operation. +func (c *CodeDeploy) StopDeploymentRequest(input *StopDeploymentInput) (req *request.Request, output *StopDeploymentOutput) { + op := &request.Operation{ + Name: opStopDeployment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopDeploymentInput{} + } + + req = c.newRequest(op, input, output) + output = &StopDeploymentOutput{} + req.Data = output + return +} + +// Attempts to stop an ongoing deployment. +func (c *CodeDeploy) StopDeployment(input *StopDeploymentInput) (*StopDeploymentOutput, error) { + req, out := c.StopDeploymentRequest(input) + err := req.Send() + return out, err +} + +const opUpdateApplication = "UpdateApplication" + +// UpdateApplicationRequest generates a request for the UpdateApplication operation. +func (c *CodeDeploy) UpdateApplicationRequest(input *UpdateApplicationInput) (req *request.Request, output *UpdateApplicationOutput) { + op := &request.Operation{ + Name: opUpdateApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateApplicationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateApplicationOutput{} + req.Data = output + return +} + +// Changes an existing application's name. +func (c *CodeDeploy) UpdateApplication(input *UpdateApplicationInput) (*UpdateApplicationOutput, error) { + req, out := c.UpdateApplicationRequest(input) + err := req.Send() + return out, err +} + +const opUpdateDeploymentGroup = "UpdateDeploymentGroup" + +// UpdateDeploymentGroupRequest generates a request for the UpdateDeploymentGroup operation. +func (c *CodeDeploy) UpdateDeploymentGroupRequest(input *UpdateDeploymentGroupInput) (req *request.Request, output *UpdateDeploymentGroupOutput) { + op := &request.Operation{ + Name: opUpdateDeploymentGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDeploymentGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateDeploymentGroupOutput{} + req.Data = output + return +} + +// Changes information about an existing deployment group. +func (c *CodeDeploy) UpdateDeploymentGroup(input *UpdateDeploymentGroupInput) (*UpdateDeploymentGroupOutput, error) { + req, out := c.UpdateDeploymentGroupRequest(input) + err := req.Send() + return out, err +} + +// Represents the input of an adds tags to on-premises instance operation. +type AddTagsToOnPremisesInstancesInput struct { + _ struct{} `type:"structure"` + + // The names of the on-premises instances to add tags to. + InstanceNames []*string `locationName:"instanceNames" type:"list" required:"true"` + + // The tag key-value pairs to add to the on-premises instances. + // + // Keys and values are both required. Keys cannot be nulls or empty strings. + // Value-only tags are not allowed. + Tags []*Tag `locationName:"tags" type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsToOnPremisesInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToOnPremisesInstancesInput) GoString() string { + return s.String() +} + +type AddTagsToOnPremisesInstancesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsToOnPremisesInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToOnPremisesInstancesOutput) GoString() string { + return s.String() +} + +// Information about an application. +type ApplicationInfo struct { + _ struct{} `type:"structure"` + + // The application ID. + ApplicationId *string `locationName:"applicationId" type:"string"` + + // The application name. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string"` + + // The time that the application was created. + CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"unix"` + + // True if the user has authenticated with GitHub for the specified application; + // otherwise, false. + LinkedToGitHub *bool `locationName:"linkedToGitHub" type:"boolean"` +} + +// String returns the string representation +func (s ApplicationInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplicationInfo) GoString() string { + return s.String() +} + +// Information about an Auto Scaling group. +type AutoScalingGroup struct { + _ struct{} `type:"structure"` + + // An Auto Scaling lifecycle event hook name. + Hook *string `locationName:"hook" type:"string"` + + // The Auto Scaling group name. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s AutoScalingGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutoScalingGroup) GoString() string { + return s.String() +} + +// Represents the input of a batch get applications operation. +type BatchGetApplicationsInput struct { + _ struct{} `type:"structure"` + + // A list of application names, with multiple application names separated by + // spaces. + ApplicationNames []*string `locationName:"applicationNames" type:"list"` +} + +// String returns the string representation +func (s BatchGetApplicationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetApplicationsInput) GoString() string { + return s.String() +} + +// Represents the output of a batch get applications operation. +type BatchGetApplicationsOutput struct { + _ struct{} `type:"structure"` + + // Information about the applications. + ApplicationsInfo []*ApplicationInfo `locationName:"applicationsInfo" type:"list"` +} + +// String returns the string representation +func (s BatchGetApplicationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetApplicationsOutput) GoString() string { + return s.String() +} + +// Represents the input of a batch get deployments operation. +type BatchGetDeploymentsInput struct { + _ struct{} `type:"structure"` + + // A list of deployment IDs, with multiple deployment IDs separated by spaces. + DeploymentIds []*string `locationName:"deploymentIds" type:"list"` +} + +// String returns the string representation +func (s BatchGetDeploymentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetDeploymentsInput) GoString() string { + return s.String() +} + +// Represents the output of a batch get deployments operation. +type BatchGetDeploymentsOutput struct { + _ struct{} `type:"structure"` + + // Information about the deployments. + DeploymentsInfo []*DeploymentInfo `locationName:"deploymentsInfo" type:"list"` +} + +// String returns the string representation +func (s BatchGetDeploymentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetDeploymentsOutput) GoString() string { + return s.String() +} + +// Represents the input of a batch get on-premises instances operation. +type BatchGetOnPremisesInstancesInput struct { + _ struct{} `type:"structure"` + + // The names of the on-premises instances to get information about. + InstanceNames []*string `locationName:"instanceNames" type:"list"` +} + +// String returns the string representation +func (s BatchGetOnPremisesInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetOnPremisesInstancesInput) GoString() string { + return s.String() +} + +// Represents the output of a batch get on-premises instances operation. +type BatchGetOnPremisesInstancesOutput struct { + _ struct{} `type:"structure"` + + // Information about the on-premises instances. + InstanceInfos []*InstanceInfo `locationName:"instanceInfos" type:"list"` +} + +// String returns the string representation +func (s BatchGetOnPremisesInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetOnPremisesInstancesOutput) GoString() string { + return s.String() +} + +// Represents the input of a create application operation. +type CreateApplicationInput struct { + _ struct{} `type:"structure"` + + // The name of the application. This name must be unique with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateApplicationInput) GoString() string { + return s.String() +} + +// Represents the output of a create application operation. +type CreateApplicationOutput struct { + _ struct{} `type:"structure"` + + // A unique application ID. + ApplicationId *string `locationName:"applicationId" type:"string"` +} + +// String returns the string representation +func (s CreateApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateApplicationOutput) GoString() string { + return s.String() +} + +// Represents the input of a create deployment configuration operation. +type CreateDeploymentConfigInput struct { + _ struct{} `type:"structure"` + + // The name of the deployment configuration to create. + DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string" required:"true"` + + // The minimum number of healthy instances that should be available at any time + // during the deployment. There are two parameters expected in the input: type + // and value. + // + // The type parameter takes either of the following values: + // + // HOST_COUNT: The value parameter represents the minimum number of healthy + // instances, as an absolute value. FLEET_PERCENT: The value parameter represents + // the minimum number of healthy instances, as a percentage of the total number + // of instances in the deployment. If you specify FLEET_PERCENT, then at the + // start of the deployment AWS CodeDeploy converts the percentage to the equivalent + // number of instances and rounds fractional instances up. The value parameter + // takes an integer. + // + // For example, to set a minimum of 95% healthy instances, specify a type of + // FLEET_PERCENT and a value of 95. + MinimumHealthyHosts *MinimumHealthyHosts `locationName:"minimumHealthyHosts" type:"structure"` +} + +// String returns the string representation +func (s CreateDeploymentConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeploymentConfigInput) GoString() string { + return s.String() +} + +// Represents the output of a create deployment configuration operation. +type CreateDeploymentConfigOutput struct { + _ struct{} `type:"structure"` + + // A unique deployment configuration ID. + DeploymentConfigId *string `locationName:"deploymentConfigId" type:"string"` +} + +// String returns the string representation +func (s CreateDeploymentConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeploymentConfigOutput) GoString() string { + return s.String() +} + +// Represents the input of a create deployment group operation. +type CreateDeploymentGroupInput struct { + _ struct{} `type:"structure"` + + // The name of an existing AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // A list of associated Auto Scaling groups. + AutoScalingGroups []*string `locationName:"autoScalingGroups" type:"list"` + + // If specified, the deployment configuration name must be one of the predefined + // values, or it can be a custom deployment configuration: + // + // CodeDeployDefault.AllAtOnce deploys an application revision to up to all + // of the instances at once. The overall deployment succeeds if the application + // revision deploys to at least one of the instances. The overall deployment + // fails after the application revision fails to deploy to all of the instances. + // For example, for 9 instances, deploy to up to all 9 instances at once. The + // overall deployment succeeds if any of the 9 instances is successfully deployed + // to, and it fails if all 9 instances fail to be deployed to. CodeDeployDefault.HalfAtATime + // deploys to up to half of the instances at a time (with fractions rounded + // down). The overall deployment succeeds if the application revision deploys + // to at least half of the instances (with fractions rounded up); otherwise, + // the deployment fails. For example, for 9 instances, deploy to up to 4 instances + // at a time. The overall deployment succeeds if 5 or more instances are successfully + // deployed to; otherwise, the deployment fails. Note that the deployment may + // successfully deploy to some instances, even if the overall deployment fails. + // CodeDeployDefault.OneAtATime deploys the application revision to only one + // of the instances at a time. The overall deployment succeeds if the application + // revision deploys to all of the instances. The overall deployment fails after + // the application revision first fails to deploy to any one instances. For + // example, for 9 instances, deploy to one instance at a time. The overall deployment + // succeeds if all 9 instances are successfully deployed to, and it fails if + // any of one of the 9 instances fail to be deployed to. Note that the deployment + // may successfully deploy to some instances, even if the overall deployment + // fails. This is the default deployment configuration if a configuration isn't + // specified for either the deployment or the deployment group. To create a + // custom deployment configuration, call the create deployment configuration + // operation. + DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string"` + + // The name of an existing deployment group for the specified application. + DeploymentGroupName *string `locationName:"deploymentGroupName" min:"1" type:"string" required:"true"` + + // The Amazon EC2 tags to filter on. + Ec2TagFilters []*EC2TagFilter `locationName:"ec2TagFilters" type:"list"` + + // The on-premises instance tags to filter on. + OnPremisesInstanceTagFilters []*TagFilter `locationName:"onPremisesInstanceTagFilters" type:"list"` + + // A service role ARN that allows AWS CodeDeploy to act on the user's behalf + // when interacting with AWS services. + ServiceRoleArn *string `locationName:"serviceRoleArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateDeploymentGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeploymentGroupInput) GoString() string { + return s.String() +} + +// Represents the output of a create deployment group operation. +type CreateDeploymentGroupOutput struct { + _ struct{} `type:"structure"` + + // A unique deployment group ID. + DeploymentGroupId *string `locationName:"deploymentGroupId" type:"string"` +} + +// String returns the string representation +func (s CreateDeploymentGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeploymentGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a create deployment operation. +type CreateDeploymentInput struct { + _ struct{} `type:"structure"` + + // The name of an existing AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // The name of an existing deployment configuration associated with the applicable + // IAM user or AWS account. + // + // If not specified, the value configured in the deployment group will be used + // as the default. If the deployment group does not have a deployment configuration + // associated with it, then CodeDeployDefault.OneAtATime will be used by default. + DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string"` + + // The deployment group's name. + DeploymentGroupName *string `locationName:"deploymentGroupName" min:"1" type:"string"` + + // A comment about the deployment. + Description *string `locationName:"description" type:"string"` + + // If set to true, then if the deployment causes the ApplicationStop deployment + // lifecycle event to fail to a specific instance, the deployment will not be + // considered to have failed to that instance at that point and will continue + // on to the BeforeInstall deployment lifecycle event. + // + // If set to false or not specified, then if the deployment causes the ApplicationStop + // deployment lifecycle event to fail to a specific instance, the deployment + // will stop to that instance, and the deployment to that instance will be considered + // to have failed. + IgnoreApplicationStopFailures *bool `locationName:"ignoreApplicationStopFailures" type:"boolean"` + + // The type of revision to deploy, along with information about the revision's + // location. + Revision *RevisionLocation `locationName:"revision" type:"structure"` +} + +// String returns the string representation +func (s CreateDeploymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeploymentInput) GoString() string { + return s.String() +} + +// Represents the output of a create deployment operation. +type CreateDeploymentOutput struct { + _ struct{} `type:"structure"` + + // A unique deployment ID. + DeploymentId *string `locationName:"deploymentId" type:"string"` +} + +// String returns the string representation +func (s CreateDeploymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeploymentOutput) GoString() string { + return s.String() +} + +// Represents the input of a delete application operation. +type DeleteApplicationInput struct { + _ struct{} `type:"structure"` + + // The name of an existing AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteApplicationInput) GoString() string { + return s.String() +} + +type DeleteApplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteApplicationOutput) GoString() string { + return s.String() +} + +// Represents the input of a delete deployment configuration operation. +type DeleteDeploymentConfigInput struct { + _ struct{} `type:"structure"` + + // The name of an existing deployment configuration associated with the applicable + // IAM user or AWS account. + DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDeploymentConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDeploymentConfigInput) GoString() string { + return s.String() +} + +type DeleteDeploymentConfigOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDeploymentConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDeploymentConfigOutput) GoString() string { + return s.String() +} + +// Represents the input of a delete deployment group operation. +type DeleteDeploymentGroupInput struct { + _ struct{} `type:"structure"` + + // The name of an existing AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // The name of an existing deployment group for the specified application. + DeploymentGroupName *string `locationName:"deploymentGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDeploymentGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDeploymentGroupInput) GoString() string { + return s.String() +} + +// Represents the output of a delete deployment group operation. +type DeleteDeploymentGroupOutput struct { + _ struct{} `type:"structure"` + + // If the output contains no data, and the corresponding deployment group contained + // at least one Auto Scaling group, AWS CodeDeploy successfully removed all + // corresponding Auto Scaling lifecycle event hooks from the Amazon EC2 instances + // in the Auto Scaling. If the output does contain data, AWS CodeDeploy could + // not remove some Auto Scaling lifecycle event hooks from the Amazon EC2 instances + // in the Auto Scaling group. + HooksNotCleanedUp []*AutoScalingGroup `locationName:"hooksNotCleanedUp" type:"list"` +} + +// String returns the string representation +func (s DeleteDeploymentGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDeploymentGroupOutput) GoString() string { + return s.String() +} + +// Information about a deployment configuration. +type DeploymentConfigInfo struct { + _ struct{} `type:"structure"` + + // The time that the deployment configuration was created. + CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"unix"` + + // The deployment configuration ID. + DeploymentConfigId *string `locationName:"deploymentConfigId" type:"string"` + + // The deployment configuration name. + DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string"` + + // Information about the number or percentage of minimum healthy instances. + MinimumHealthyHosts *MinimumHealthyHosts `locationName:"minimumHealthyHosts" type:"structure"` +} + +// String returns the string representation +func (s DeploymentConfigInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeploymentConfigInfo) GoString() string { + return s.String() +} + +// Information about a deployment group. +type DeploymentGroupInfo struct { + _ struct{} `type:"structure"` + + // The application name. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string"` + + // A list of associated Auto Scaling groups. + AutoScalingGroups []*AutoScalingGroup `locationName:"autoScalingGroups" type:"list"` + + // The deployment configuration name. + DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string"` + + // The deployment group ID. + DeploymentGroupId *string `locationName:"deploymentGroupId" type:"string"` + + // The deployment group name. + DeploymentGroupName *string `locationName:"deploymentGroupName" min:"1" type:"string"` + + // The Amazon EC2 tags to filter on. + Ec2TagFilters []*EC2TagFilter `locationName:"ec2TagFilters" type:"list"` + + // The on-premises instance tags to filter on. + OnPremisesInstanceTagFilters []*TagFilter `locationName:"onPremisesInstanceTagFilters" type:"list"` + + // A service role ARN. + ServiceRoleArn *string `locationName:"serviceRoleArn" type:"string"` + + // Information about the deployment group's target revision, including the revision's + // type and its location. + TargetRevision *RevisionLocation `locationName:"targetRevision" type:"structure"` +} + +// String returns the string representation +func (s DeploymentGroupInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeploymentGroupInfo) GoString() string { + return s.String() +} + +// Information about a deployment. +type DeploymentInfo struct { + _ struct{} `type:"structure"` + + // The application name. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string"` + + // A timestamp indicating when the deployment was completed. + CompleteTime *time.Time `locationName:"completeTime" type:"timestamp" timestampFormat:"unix"` + + // A timestamp indicating when the deployment was created. + CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"unix"` + + // How the deployment was created: + // + // user: A user created the deployment. autoscaling: Auto Scaling created + // the deployment. + Creator *string `locationName:"creator" type:"string" enum:"DeploymentCreator"` + + // The deployment configuration name. + DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string"` + + // The deployment group name. + DeploymentGroupName *string `locationName:"deploymentGroupName" min:"1" type:"string"` + + // The deployment ID. + DeploymentId *string `locationName:"deploymentId" type:"string"` + + // A summary of the deployment status of the instances in the deployment. + DeploymentOverview *DeploymentOverview `locationName:"deploymentOverview" type:"structure"` + + // A comment about the deployment. + Description *string `locationName:"description" type:"string"` + + // Information about any error associated with this deployment. + ErrorInformation *ErrorInformation `locationName:"errorInformation" type:"structure"` + + // If true, then if the deployment causes the ApplicationStop deployment lifecycle + // event to fail to a specific instance, the deployment will not be considered + // to have failed to that instance at that point and will continue on to the + // BeforeInstall deployment lifecycle event. + // + // If false or not specified, then if the deployment causes the ApplicationStop + // deployment lifecycle event to fail to a specific instance, the deployment + // will stop to that instance, and the deployment to that instance will be considered + // to have failed. + IgnoreApplicationStopFailures *bool `locationName:"ignoreApplicationStopFailures" type:"boolean"` + + // Information about the location of application artifacts that are stored and + // the service to retrieve them from. + Revision *RevisionLocation `locationName:"revision" type:"structure"` + + // A timestamp indicating when the deployment began deploying to the deployment + // group. + // + // Note that in some cases, the reported value of the start time may be later + // than the complete time. This is due to differences in the clock settings + // of various back-end servers that participate in the overall deployment process. + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"unix"` + + // The current state of the deployment as a whole. + Status *string `locationName:"status" type:"string" enum:"DeploymentStatus"` +} + +// String returns the string representation +func (s DeploymentInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeploymentInfo) GoString() string { + return s.String() +} + +// Information about the deployment status of the instances in the deployment. +type DeploymentOverview struct { + _ struct{} `type:"structure"` + + // The number of instances that have failed in the deployment. + Failed *int64 `type:"long"` + + // The number of instances that are in progress in the deployment. + InProgress *int64 `type:"long"` + + // The number of instances that are pending in the deployment. + Pending *int64 `type:"long"` + + // The number of instances that have been skipped in the deployment. + Skipped *int64 `type:"long"` + + // The number of instances that have succeeded in the deployment. + Succeeded *int64 `type:"long"` +} + +// String returns the string representation +func (s DeploymentOverview) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeploymentOverview) GoString() string { + return s.String() +} + +// Represents the input of a deregister on-premises instance operation. +type DeregisterOnPremisesInstanceInput struct { + _ struct{} `type:"structure"` + + // The name of the on-premises instance to deregister. + InstanceName *string `locationName:"instanceName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterOnPremisesInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterOnPremisesInstanceInput) GoString() string { + return s.String() +} + +type DeregisterOnPremisesInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterOnPremisesInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterOnPremisesInstanceOutput) GoString() string { + return s.String() +} + +// Diagnostic information about executable scripts that are part of a deployment. +type Diagnostics struct { + _ struct{} `type:"structure"` + + // The associated error code: + // + // Success: The specified script ran. ScriptMissing: The specified script + // was not found in the specified location. ScriptNotExecutable: The specified + // script is not a recognized executable file type. ScriptTimedOut: The specified + // script did not finish running in the specified time period. ScriptFailed: + // The specified script failed to run as expected. UnknownError: The specified + // script did not run for an unknown reason. + ErrorCode *string `locationName:"errorCode" type:"string" enum:"LifecycleErrorCode"` + + // The last portion of the associated diagnostic log. + LogTail *string `locationName:"logTail" type:"string"` + + // The message associated with the error. + Message *string `locationName:"message" type:"string"` + + // The name of the script. + ScriptName *string `locationName:"scriptName" type:"string"` +} + +// String returns the string representation +func (s Diagnostics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Diagnostics) GoString() string { + return s.String() +} + +// Information about a tag filter. +type EC2TagFilter struct { + _ struct{} `type:"structure"` + + // The tag filter key. + Key *string `type:"string"` + + // The tag filter type: + // + // KEY_ONLY: Key only. VALUE_ONLY: Value only. KEY_AND_VALUE: Key and value. + Type *string `type:"string" enum:"EC2TagFilterType"` + + // The tag filter value. + Value *string `type:"string"` +} + +// String returns the string representation +func (s EC2TagFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EC2TagFilter) GoString() string { + return s.String() +} + +// Information about a deployment error. +type ErrorInformation struct { + _ struct{} `type:"structure"` + + // The error code: + // + // APPLICATION_MISSING: The application was missing. Note that this error + // code will most likely be raised if the application is deleted after the deployment + // is created but before it starts. DEPLOYMENT_GROUP_MISSING: The deployment + // group was missing. Note that this error code will most likely be raised if + // the deployment group is deleted after the deployment is created but before + // it starts. HEALTH_CONSTRAINTS: The deployment failed on too many instances + // to be able to successfully deploy within the specified instance health constraints. + // HEALTH_CONSTRAINTS_INVALID: The revision can never successfully deploy within + // the instance health constraints as specified. IAM_ROLE_MISSING: The service + // role cannot be accessed. IAM_ROLE_PERMISSIONS: The service role does not + // have the correct permissions. INTERNAL_ERROR: There was an internal error. + // NO_EC2_SUBSCRIPTION: The calling account is not subscribed to the Amazon + // EC2 service. NO_INSTANCES: No instances were specified, or no instances can + // be found. OVER_MAX_INSTANCES: The maximum number of instances was exceeded. + // THROTTLED: The operation was throttled because the calling account exceeded + // the throttling limits of one or more AWS services. TIMEOUT: The deployment + // has timed out. REVISION_MISSING: The revision ID was missing. Note that this + // error code will most likely be raised if the revision is deleted after the + // deployment is created but before it starts. + Code *string `locationName:"code" type:"string" enum:"ErrorCode"` + + // An accompanying error message. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ErrorInformation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorInformation) GoString() string { + return s.String() +} + +// Information about an application revision. +type GenericRevisionInfo struct { + _ struct{} `type:"structure"` + + // A list of deployment groups that use this revision. + DeploymentGroups []*string `locationName:"deploymentGroups" type:"list"` + + // A comment about the revision. + Description *string `locationName:"description" type:"string"` + + // When the revision was first used by AWS CodeDeploy. + FirstUsedTime *time.Time `locationName:"firstUsedTime" type:"timestamp" timestampFormat:"unix"` + + // When the revision was last used by AWS CodeDeploy. + LastUsedTime *time.Time `locationName:"lastUsedTime" type:"timestamp" timestampFormat:"unix"` + + // When the revision was registered with AWS CodeDeploy. + RegisterTime *time.Time `locationName:"registerTime" type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s GenericRevisionInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenericRevisionInfo) GoString() string { + return s.String() +} + +// Represents the input of a get application operation. +type GetApplicationInput struct { + _ struct{} `type:"structure"` + + // The name of an existing AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetApplicationInput) GoString() string { + return s.String() +} + +// Represents the output of a get application operation. +type GetApplicationOutput struct { + _ struct{} `type:"structure"` + + // Information about the application. + Application *ApplicationInfo `locationName:"application" type:"structure"` +} + +// String returns the string representation +func (s GetApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetApplicationOutput) GoString() string { + return s.String() +} + +// Represents the input of a get application revision operation. +type GetApplicationRevisionInput struct { + _ struct{} `type:"structure"` + + // The name of the application that corresponds to the revision. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // Information about the application revision to get, including the revision's + // type and its location. + Revision *RevisionLocation `locationName:"revision" type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetApplicationRevisionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetApplicationRevisionInput) GoString() string { + return s.String() +} + +// Represents the output of a get application revision operation. +type GetApplicationRevisionOutput struct { + _ struct{} `type:"structure"` + + // The name of the application that corresponds to the revision. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string"` + + // Additional information about the revision, including the revision's type + // and its location. + Revision *RevisionLocation `locationName:"revision" type:"structure"` + + // General information about the revision. + RevisionInfo *GenericRevisionInfo `locationName:"revisionInfo" type:"structure"` +} + +// String returns the string representation +func (s GetApplicationRevisionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetApplicationRevisionOutput) GoString() string { + return s.String() +} + +// Represents the input of a get deployment configuration operation. +type GetDeploymentConfigInput struct { + _ struct{} `type:"structure"` + + // The name of an existing deployment configuration associated with the applicable + // IAM user or AWS account. + DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDeploymentConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentConfigInput) GoString() string { + return s.String() +} + +// Represents the output of a get deployment configuration operation. +type GetDeploymentConfigOutput struct { + _ struct{} `type:"structure"` + + // Information about the deployment configuration. + DeploymentConfigInfo *DeploymentConfigInfo `locationName:"deploymentConfigInfo" type:"structure"` +} + +// String returns the string representation +func (s GetDeploymentConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentConfigOutput) GoString() string { + return s.String() +} + +// Represents the input of a get deployment group operation. +type GetDeploymentGroupInput struct { + _ struct{} `type:"structure"` + + // The name of an existing AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // The name of an existing deployment group for the specified application. + DeploymentGroupName *string `locationName:"deploymentGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDeploymentGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentGroupInput) GoString() string { + return s.String() +} + +// Represents the output of a get deployment group operation. +type GetDeploymentGroupOutput struct { + _ struct{} `type:"structure"` + + // Information about the deployment group. + DeploymentGroupInfo *DeploymentGroupInfo `locationName:"deploymentGroupInfo" type:"structure"` +} + +// String returns the string representation +func (s GetDeploymentGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a get deployment operation. +type GetDeploymentInput struct { + _ struct{} `type:"structure"` + + // An existing deployment ID associated with the applicable IAM user or AWS + // account. + DeploymentId *string `locationName:"deploymentId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDeploymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentInput) GoString() string { + return s.String() +} + +// Represents the input of a get deployment instance operation. +type GetDeploymentInstanceInput struct { + _ struct{} `type:"structure"` + + // The unique ID of a deployment. + DeploymentId *string `locationName:"deploymentId" type:"string" required:"true"` + + // The unique ID of an instance in the deployment's deployment group. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDeploymentInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentInstanceInput) GoString() string { + return s.String() +} + +// Represents the output of a get deployment instance operation. +type GetDeploymentInstanceOutput struct { + _ struct{} `type:"structure"` + + // Information about the instance. + InstanceSummary *InstanceSummary `locationName:"instanceSummary" type:"structure"` +} + +// String returns the string representation +func (s GetDeploymentInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentInstanceOutput) GoString() string { + return s.String() +} + +// Represents the output of a get deployment operation. +type GetDeploymentOutput struct { + _ struct{} `type:"structure"` + + // Information about the deployment. + DeploymentInfo *DeploymentInfo `locationName:"deploymentInfo" type:"structure"` +} + +// String returns the string representation +func (s GetDeploymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeploymentOutput) GoString() string { + return s.String() +} + +// Represents the input of a get on-premises instance operation. +type GetOnPremisesInstanceInput struct { + _ struct{} `type:"structure"` + + // The name of the on-premises instance to get information about + InstanceName *string `locationName:"instanceName" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetOnPremisesInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOnPremisesInstanceInput) GoString() string { + return s.String() +} + +// Represents the output of a get on-premises instance operation. +type GetOnPremisesInstanceOutput struct { + _ struct{} `type:"structure"` + + // Information about the on-premises instance. + InstanceInfo *InstanceInfo `locationName:"instanceInfo" type:"structure"` +} + +// String returns the string representation +func (s GetOnPremisesInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOnPremisesInstanceOutput) GoString() string { + return s.String() +} + +// Information about the location of application artifacts that are stored in +// GitHub. +type GitHubLocation struct { + _ struct{} `type:"structure"` + + // The SHA1 commit ID of the GitHub commit that references the that represents + // the bundled artifacts for the application revision. + CommitId *string `locationName:"commitId" type:"string"` + + // The GitHub account and repository pair that stores a reference to the commit + // that represents the bundled artifacts for the application revision. + // + // Specified as account/repository. + Repository *string `locationName:"repository" type:"string"` +} + +// String returns the string representation +func (s GitHubLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GitHubLocation) GoString() string { + return s.String() +} + +// Information about an on-premises instance. +type InstanceInfo struct { + _ struct{} `type:"structure"` + + // If the on-premises instance was deregistered, the time that the on-premises + // instance was deregistered. + DeregisterTime *time.Time `locationName:"deregisterTime" type:"timestamp" timestampFormat:"unix"` + + // The IAM user ARN associated with the on-premises instance. + IamUserArn *string `locationName:"iamUserArn" type:"string"` + + // The ARN of the on-premises instance. + InstanceArn *string `locationName:"instanceArn" type:"string"` + + // The name of the on-premises instance. + InstanceName *string `locationName:"instanceName" type:"string"` + + // The time that the on-premises instance was registered. + RegisterTime *time.Time `locationName:"registerTime" type:"timestamp" timestampFormat:"unix"` + + // The tags that are currently associated with the on-premises instance. + Tags []*Tag `locationName:"tags" type:"list"` +} + +// String returns the string representation +func (s InstanceInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceInfo) GoString() string { + return s.String() +} + +// Information about an instance in a deployment. +type InstanceSummary struct { + _ struct{} `type:"structure"` + + // The deployment ID. + DeploymentId *string `locationName:"deploymentId" type:"string"` + + // The instance ID. + InstanceId *string `locationName:"instanceId" type:"string"` + + // A timestamp indicating when the instance information was last updated. + LastUpdatedAt *time.Time `locationName:"lastUpdatedAt" type:"timestamp" timestampFormat:"unix"` + + // A list of lifecycle events for this instance. + LifecycleEvents []*LifecycleEvent `locationName:"lifecycleEvents" type:"list"` + + // The deployment status for this instance: + // + // Pending: The deployment is pending for this instance. In Progress: The + // deployment is in progress for this instance. Succeeded: The deployment has + // succeeded for this instance. Failed: The deployment has failed for this instance. + // Skipped: The deployment has been skipped for this instance. Unknown: The + // deployment status is unknown for this instance. + Status *string `locationName:"status" type:"string" enum:"InstanceStatus"` +} + +// String returns the string representation +func (s InstanceSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceSummary) GoString() string { + return s.String() +} + +// Information about a deployment lifecycle event. +type LifecycleEvent struct { + _ struct{} `type:"structure"` + + // Diagnostic information about the deployment lifecycle event. + Diagnostics *Diagnostics `locationName:"diagnostics" type:"structure"` + + // A timestamp indicating when the deployment lifecycle event ended. + EndTime *time.Time `locationName:"endTime" type:"timestamp" timestampFormat:"unix"` + + // The deployment lifecycle event name, such as ApplicationStop, BeforeInstall, + // AfterInstall, ApplicationStart, or ValidateService. + LifecycleEventName *string `locationName:"lifecycleEventName" type:"string"` + + // A timestamp indicating when the deployment lifecycle event started. + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"unix"` + + // The deployment lifecycle event status: + // + // Pending: The deployment lifecycle event is pending. InProgress: The deployment + // lifecycle event is in progress. Succeeded: The deployment lifecycle event + // has succeeded. Failed: The deployment lifecycle event has failed. Skipped: + // The deployment lifecycle event has been skipped. Unknown: The deployment + // lifecycle event is unknown. + Status *string `locationName:"status" type:"string" enum:"LifecycleEventStatus"` +} + +// String returns the string representation +func (s LifecycleEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleEvent) GoString() string { + return s.String() +} + +// Represents the input of a list application revisions operation. +type ListApplicationRevisionsInput struct { + _ struct{} `type:"structure"` + + // The name of an existing AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // Whether to list revisions based on whether the revision is the target revision + // of an deployment group: + // + // include: List revisions that are target revisions of a deployment group. + // exclude: Do not list revisions that are target revisions of a deployment + // group. ignore: List all revisions, regardless of whether they are target + // revisions of a deployment group. + Deployed *string `locationName:"deployed" type:"string" enum:"ListStateFilterAction"` + + // An identifier that was returned from the previous list application revisions + // call, which can be used to return the next set of applications in the list. + NextToken *string `locationName:"nextToken" type:"string"` + + // A specific Amazon S3 bucket name to limit the search for revisions. + // + // If set to null, then all of the user's buckets will be searched. + S3Bucket *string `locationName:"s3Bucket" type:"string"` + + // A specific key prefix for the set of Amazon S3 objects to limit the search + // for revisions. + S3KeyPrefix *string `locationName:"s3KeyPrefix" type:"string"` + + // The column name to sort the list results by: + // + // registerTime: Sort the list results by when the revisions were registered + // with AWS CodeDeploy. firstUsedTime: Sort the list results by when the revisions + // were first used by in a deployment. lastUsedTime: Sort the list results by + // when the revisions were last used in a deployment. If not specified or set + // to null, the results will be returned in an arbitrary order. + SortBy *string `locationName:"sortBy" type:"string" enum:"ApplicationRevisionSortBy"` + + // The order to sort the list results by: + // + // ascending: Sort the list of results in ascending order. descending: Sort + // the list of results in descending order. If not specified, the results will + // be sorted in ascending order. + // + // If set to null, the results will be sorted in an arbitrary order. + SortOrder *string `locationName:"sortOrder" type:"string" enum:"SortOrder"` +} + +// String returns the string representation +func (s ListApplicationRevisionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListApplicationRevisionsInput) GoString() string { + return s.String() +} + +// Represents the output of a list application revisions operation. +type ListApplicationRevisionsOutput struct { + _ struct{} `type:"structure"` + + // If the amount of information that is returned is significantly large, an + // identifier will also be returned, which can be used in a subsequent list + // application revisions call to return the next set of application revisions + // in the list. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of revision locations that contain the matching revisions. + Revisions []*RevisionLocation `locationName:"revisions" type:"list"` +} + +// String returns the string representation +func (s ListApplicationRevisionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListApplicationRevisionsOutput) GoString() string { + return s.String() +} + +// Represents the input of a list applications operation. +type ListApplicationsInput struct { + _ struct{} `type:"structure"` + + // An identifier that was returned from the previous list applications call, + // which can be used to return the next set of applications in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListApplicationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListApplicationsInput) GoString() string { + return s.String() +} + +// Represents the output of a list applications operation. +type ListApplicationsOutput struct { + _ struct{} `type:"structure"` + + // A list of application names. + Applications []*string `locationName:"applications" type:"list"` + + // If the amount of information that is returned is significantly large, an + // identifier will also be returned, which can be used in a subsequent list + // applications call to return the next set of applications in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListApplicationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListApplicationsOutput) GoString() string { + return s.String() +} + +// Represents the input of a list deployment configurations operation. +type ListDeploymentConfigsInput struct { + _ struct{} `type:"structure"` + + // An identifier that was returned from the previous list deployment configurations + // call, which can be used to return the next set of deployment configurations + // in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDeploymentConfigsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeploymentConfigsInput) GoString() string { + return s.String() +} + +// Represents the output of a list deployment configurations operation. +type ListDeploymentConfigsOutput struct { + _ struct{} `type:"structure"` + + // A list of deployment configurations, including the built-in configurations + // such as CodeDeployDefault.OneAtATime. + DeploymentConfigsList []*string `locationName:"deploymentConfigsList" type:"list"` + + // If the amount of information that is returned is significantly large, an + // identifier will also be returned, which can be used in a subsequent list + // deployment configurations call to return the next set of deployment configurations + // in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDeploymentConfigsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeploymentConfigsOutput) GoString() string { + return s.String() +} + +// Represents the input of a list deployment groups operation. +type ListDeploymentGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of an existing AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // An identifier that was returned from the previous list deployment groups + // call, which can be used to return the next set of deployment groups in the + // list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDeploymentGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeploymentGroupsInput) GoString() string { + return s.String() +} + +// Represents the output of a list deployment groups operation. +type ListDeploymentGroupsOutput struct { + _ struct{} `type:"structure"` + + // The application name. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string"` + + // A list of corresponding deployment group names. + DeploymentGroups []*string `locationName:"deploymentGroups" type:"list"` + + // If the amount of information that is returned is significantly large, an + // identifier will also be returned, which can be used in a subsequent list + // deployment groups call to return the next set of deployment groups in the + // list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDeploymentGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeploymentGroupsOutput) GoString() string { + return s.String() +} + +// Represents the input of a list deployment instances operation. +type ListDeploymentInstancesInput struct { + _ struct{} `type:"structure"` + + // The unique ID of a deployment. + DeploymentId *string `locationName:"deploymentId" type:"string" required:"true"` + + // A subset of instances to list, by status: + // + // Pending: Include in the resulting list those instances with pending deployments. + // InProgress: Include in the resulting list those instances with in-progress + // deployments. Succeeded: Include in the resulting list those instances with + // succeeded deployments. Failed: Include in the resulting list those instances + // with failed deployments. Skipped: Include in the resulting list those instances + // with skipped deployments. Unknown: Include in the resulting list those instances + // with deployments in an unknown state. + InstanceStatusFilter []*string `locationName:"instanceStatusFilter" type:"list"` + + // An identifier that was returned from the previous list deployment instances + // call, which can be used to return the next set of deployment instances in + // the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDeploymentInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeploymentInstancesInput) GoString() string { + return s.String() +} + +// Represents the output of a list deployment instances operation. +type ListDeploymentInstancesOutput struct { + _ struct{} `type:"structure"` + + // A list of instances IDs. + InstancesList []*string `locationName:"instancesList" type:"list"` + + // If the amount of information that is returned is significantly large, an + // identifier will also be returned, which can be used in a subsequent list + // deployment instances call to return the next set of deployment instances + // in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDeploymentInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeploymentInstancesOutput) GoString() string { + return s.String() +} + +// Represents the input of a list deployments operation. +type ListDeploymentsInput struct { + _ struct{} `type:"structure"` + + // The name of an existing AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string"` + + // A deployment creation start- and end-time range for returning a subset of + // the list of deployments. + CreateTimeRange *TimeRange `locationName:"createTimeRange" type:"structure"` + + // The name of an existing deployment group for the specified application. + DeploymentGroupName *string `locationName:"deploymentGroupName" min:"1" type:"string"` + + // A subset of deployments to list, by status: + // + // Created: Include in the resulting list created deployments. Queued: Include + // in the resulting list queued deployments. In Progress: Include in the resulting + // list in-progress deployments. Succeeded: Include in the resulting list succeeded + // deployments. Failed: Include in the resulting list failed deployments. Aborted: + // Include in the resulting list aborted deployments. + IncludeOnlyStatuses []*string `locationName:"includeOnlyStatuses" type:"list"` + + // An identifier that was returned from the previous list deployments call, + // which can be used to return the next set of deployments in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDeploymentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeploymentsInput) GoString() string { + return s.String() +} + +// Represents the output of a list deployments operation. +type ListDeploymentsOutput struct { + _ struct{} `type:"structure"` + + // A list of deployment IDs. + Deployments []*string `locationName:"deployments" type:"list"` + + // If the amount of information that is returned is significantly large, an + // identifier will also be returned, which can be used in a subsequent list + // deployments call to return the next set of deployments in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDeploymentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeploymentsOutput) GoString() string { + return s.String() +} + +// Represents the input of a list on-premises instances operation. +// +// . +type ListOnPremisesInstancesInput struct { + _ struct{} `type:"structure"` + + // An identifier that was returned from the previous list on-premises instances + // call, which can be used to return the next set of on-premises instances in + // the list. + NextToken *string `locationName:"nextToken" type:"string"` + + // The on-premises instances registration status: + // + // Deregistered: Include in the resulting list deregistered on-premises instances. + // Registered: Include in the resulting list registered on-premises instances. + RegistrationStatus *string `locationName:"registrationStatus" type:"string" enum:"RegistrationStatus"` + + // The on-premises instance tags that will be used to restrict the corresponding + // on-premises instance names that are returned. + TagFilters []*TagFilter `locationName:"tagFilters" type:"list"` +} + +// String returns the string representation +func (s ListOnPremisesInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListOnPremisesInstancesInput) GoString() string { + return s.String() +} + +// Represents the output of list on-premises instances operation. +type ListOnPremisesInstancesOutput struct { + _ struct{} `type:"structure"` + + // The list of matching on-premises instance names. + InstanceNames []*string `locationName:"instanceNames" type:"list"` + + // If the amount of information that is returned is significantly large, an + // identifier will also be returned, which can be used in a subsequent list + // on-premises instances call to return the next set of on-premises instances + // in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListOnPremisesInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListOnPremisesInstancesOutput) GoString() string { + return s.String() +} + +// Information about minimum healthy instances. +type MinimumHealthyHosts struct { + _ struct{} `type:"structure"` + + // The minimum healthy instances type: + // + // HOST_COUNT: The minimum number of healthy instances, as an absolute value. + // FLEET_PERCENT: The minimum number of healthy instances, as a percentage of + // the total number of instances in the deployment. For example, for 9 instances, + // if a HOST_COUNT of 6 is specified, deploy to up to 3 instances at a time. + // The deployment succeeds if 6 or more instances are successfully deployed + // to; otherwise, the deployment fails. If a FLEET_PERCENT of 40 is specified, + // deploy to up to 5 instances at a time. The deployment succeeds if 4 or more + // instances are successfully deployed to; otherwise, the deployment fails. + // + // In a call to the get deployment configuration operation, CodeDeployDefault.OneAtATime + // will return a minimum healthy instances type of MOST_CONCURRENCY and a value + // of 1. This means a deployment to only one instances at a time. (You cannot + // set the type to MOST_CONCURRENCY, only to HOST_COUNT or FLEET_PERCENT.) + Type *string `locationName:"type" type:"string" enum:"MinimumHealthyHostsType"` + + // The minimum healthy instances value. + Value *int64 `locationName:"value" type:"integer"` +} + +// String returns the string representation +func (s MinimumHealthyHosts) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MinimumHealthyHosts) GoString() string { + return s.String() +} + +// Represents the input of a register application revision operation. +type RegisterApplicationRevisionInput struct { + _ struct{} `type:"structure"` + + // The name of an existing AWS CodeDeploy application associated with the applicable + // IAM user or AWS account. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // A comment about the revision. + Description *string `locationName:"description" type:"string"` + + // Information about the application revision to register, including the revision's + // type and its location. + Revision *RevisionLocation `locationName:"revision" type:"structure" required:"true"` +} + +// String returns the string representation +func (s RegisterApplicationRevisionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterApplicationRevisionInput) GoString() string { + return s.String() +} + +type RegisterApplicationRevisionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RegisterApplicationRevisionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterApplicationRevisionOutput) GoString() string { + return s.String() +} + +// Represents the input of register on-premises instance operation. +type RegisterOnPremisesInstanceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the IAM user to associate with the on-premises instance. + IamUserArn *string `locationName:"iamUserArn" type:"string" required:"true"` + + // The name of the on-premises instance to register. + InstanceName *string `locationName:"instanceName" type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterOnPremisesInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterOnPremisesInstanceInput) GoString() string { + return s.String() +} + +type RegisterOnPremisesInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RegisterOnPremisesInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterOnPremisesInstanceOutput) GoString() string { + return s.String() +} + +// Represents the input of a remove tags from on-premises instances operation. +type RemoveTagsFromOnPremisesInstancesInput struct { + _ struct{} `type:"structure"` + + // The names of the on-premises instances to remove tags from. + InstanceNames []*string `locationName:"instanceNames" type:"list" required:"true"` + + // The tag key-value pairs to remove from the on-premises instances. + Tags []*Tag `locationName:"tags" type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsFromOnPremisesInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromOnPremisesInstancesInput) GoString() string { + return s.String() +} + +type RemoveTagsFromOnPremisesInstancesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsFromOnPremisesInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromOnPremisesInstancesOutput) GoString() string { + return s.String() +} + +// Information about an application revision's location. +type RevisionLocation struct { + _ struct{} `type:"structure"` + + // Information about the location of application artifacts that are stored in + // GitHub. + GitHubLocation *GitHubLocation `locationName:"gitHubLocation" type:"structure"` + + // The application revision's type: + // + // S3: An application revision stored in Amazon S3. GitHub: An application + // revision stored in GitHub. + RevisionType *string `locationName:"revisionType" type:"string" enum:"RevisionLocationType"` + + // Information about the location of application artifacts that are stored in + // Amazon S3. + S3Location *S3Location `locationName:"s3Location" type:"structure"` +} + +// String returns the string representation +func (s RevisionLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevisionLocation) GoString() string { + return s.String() +} + +// Information about the location of application artifacts that are stored in +// Amazon S3. +type S3Location struct { + _ struct{} `type:"structure"` + + // The name of the Amazon S3 bucket where the application revision is stored. + Bucket *string `locationName:"bucket" type:"string"` + + // The file type of the application revision. Must be one of the following: + // + // tar: A tar archive file. tgz: A compressed tar archive file. zip: A zip + // archive file. + BundleType *string `locationName:"bundleType" type:"string" enum:"BundleType"` + + // The ETag of the Amazon S3 object that represents the bundled artifacts for + // the application revision. + // + // If the ETag is not specified as an input parameter, ETag validation of the + // object will be skipped. + ETag *string `locationName:"eTag" type:"string"` + + // The name of the Amazon S3 object that represents the bundled artifacts for + // the application revision. + Key *string `locationName:"key" type:"string"` + + // A specific version of the Amazon S3 object that represents the bundled artifacts + // for the application revision. + // + // If the version is not specified, the system will use the most recent version + // by default. + Version *string `locationName:"version" type:"string"` +} + +// String returns the string representation +func (s S3Location) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3Location) GoString() string { + return s.String() +} + +// Represents the input of a stop deployment operation. +type StopDeploymentInput struct { + _ struct{} `type:"structure"` + + // The unique ID of a deployment. + DeploymentId *string `locationName:"deploymentId" type:"string" required:"true"` +} + +// String returns the string representation +func (s StopDeploymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopDeploymentInput) GoString() string { + return s.String() +} + +// Represents the output of a stop deployment operation. +type StopDeploymentOutput struct { + _ struct{} `type:"structure"` + + // The status of the stop deployment operation: + // + // Pending: The stop operation is pending. Succeeded: The stop operation succeeded. + Status *string `locationName:"status" type:"string" enum:"StopStatus"` + + // An accompanying status message. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s StopDeploymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopDeploymentOutput) GoString() string { + return s.String() +} + +// Information about a tag. +type Tag struct { + _ struct{} `type:"structure"` + + // The tag's key. + Key *string `type:"string"` + + // The tag's value. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Information about an on-premises instance tag filter. +type TagFilter struct { + _ struct{} `type:"structure"` + + // The on-premises instance tag filter key. + Key *string `type:"string"` + + // The on-premises instance tag filter type: + // + // KEY_ONLY: Key only. VALUE_ONLY: Value only. KEY_AND_VALUE: Key and value. + Type *string `type:"string" enum:"TagFilterType"` + + // The on-premises instance tag filter value. + Value *string `type:"string"` +} + +// String returns the string representation +func (s TagFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagFilter) GoString() string { + return s.String() +} + +// Information about a time range. +type TimeRange struct { + _ struct{} `type:"structure"` + + // The time range's end time. + // + // Specify null to leave the time range's end time open-ended. + End *time.Time `locationName:"end" type:"timestamp" timestampFormat:"unix"` + + // The time range's start time. + // + // Specify null to leave the time range's start time open-ended. + Start *time.Time `locationName:"start" type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s TimeRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimeRange) GoString() string { + return s.String() +} + +// Represents the input of an update application operation. +type UpdateApplicationInput struct { + _ struct{} `type:"structure"` + + // The current name of the application that you want to change. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string"` + + // The new name that you want to change the application to. + NewApplicationName *string `locationName:"newApplicationName" min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateApplicationInput) GoString() string { + return s.String() +} + +type UpdateApplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateApplicationOutput) GoString() string { + return s.String() +} + +// Represents the input of an update deployment group operation. +type UpdateDeploymentGroupInput struct { + _ struct{} `type:"structure"` + + // The application name corresponding to the deployment group to update. + ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` + + // The replacement list of Auto Scaling groups to be included in the deployment + // group, if you want to change them. + AutoScalingGroups []*string `locationName:"autoScalingGroups" type:"list"` + + // The current name of the existing deployment group. + CurrentDeploymentGroupName *string `locationName:"currentDeploymentGroupName" min:"1" type:"string" required:"true"` + + // The replacement deployment configuration name to use, if you want to change + // it. + DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string"` + + // The replacement set of Amazon EC2 tags to filter on, if you want to change + // them. + Ec2TagFilters []*EC2TagFilter `locationName:"ec2TagFilters" type:"list"` + + // The new name of the deployment group, if you want to change it. + NewDeploymentGroupName *string `locationName:"newDeploymentGroupName" min:"1" type:"string"` + + // The replacement set of on-premises instance tags for filter on, if you want + // to change them. + OnPremisesInstanceTagFilters []*TagFilter `locationName:"onPremisesInstanceTagFilters" type:"list"` + + // A replacement service role's ARN, if you want to change it. + ServiceRoleArn *string `locationName:"serviceRoleArn" type:"string"` +} + +// String returns the string representation +func (s UpdateDeploymentGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDeploymentGroupInput) GoString() string { + return s.String() +} + +// Represents the output of an update deployment group operation. +type UpdateDeploymentGroupOutput struct { + _ struct{} `type:"structure"` + + // If the output contains no data, and the corresponding deployment group contained + // at least one Auto Scaling group, AWS CodeDeploy successfully removed all + // corresponding Auto Scaling lifecycle event hooks from the AWS account. If + // the output does contain data, AWS CodeDeploy could not remove some Auto Scaling + // lifecycle event hooks from the AWS account. + HooksNotCleanedUp []*AutoScalingGroup `locationName:"hooksNotCleanedUp" type:"list"` +} + +// String returns the string representation +func (s UpdateDeploymentGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDeploymentGroupOutput) GoString() string { + return s.String() +} + +const ( + // @enum ApplicationRevisionSortBy + ApplicationRevisionSortByRegisterTime = "registerTime" + // @enum ApplicationRevisionSortBy + ApplicationRevisionSortByFirstUsedTime = "firstUsedTime" + // @enum ApplicationRevisionSortBy + ApplicationRevisionSortByLastUsedTime = "lastUsedTime" +) + +const ( + // @enum BundleType + BundleTypeTar = "tar" + // @enum BundleType + BundleTypeTgz = "tgz" + // @enum BundleType + BundleTypeZip = "zip" +) + +const ( + // @enum DeploymentCreator + DeploymentCreatorUser = "user" + // @enum DeploymentCreator + DeploymentCreatorAutoscaling = "autoscaling" +) + +const ( + // @enum DeploymentStatus + DeploymentStatusCreated = "Created" + // @enum DeploymentStatus + DeploymentStatusQueued = "Queued" + // @enum DeploymentStatus + DeploymentStatusInProgress = "InProgress" + // @enum DeploymentStatus + DeploymentStatusSucceeded = "Succeeded" + // @enum DeploymentStatus + DeploymentStatusFailed = "Failed" + // @enum DeploymentStatus + DeploymentStatusStopped = "Stopped" +) + +const ( + // @enum EC2TagFilterType + EC2TagFilterTypeKeyOnly = "KEY_ONLY" + // @enum EC2TagFilterType + EC2TagFilterTypeValueOnly = "VALUE_ONLY" + // @enum EC2TagFilterType + EC2TagFilterTypeKeyAndValue = "KEY_AND_VALUE" +) + +const ( + // @enum ErrorCode + ErrorCodeDeploymentGroupMissing = "DEPLOYMENT_GROUP_MISSING" + // @enum ErrorCode + ErrorCodeApplicationMissing = "APPLICATION_MISSING" + // @enum ErrorCode + ErrorCodeRevisionMissing = "REVISION_MISSING" + // @enum ErrorCode + ErrorCodeIamRoleMissing = "IAM_ROLE_MISSING" + // @enum ErrorCode + ErrorCodeIamRolePermissions = "IAM_ROLE_PERMISSIONS" + // @enum ErrorCode + ErrorCodeNoEc2Subscription = "NO_EC2_SUBSCRIPTION" + // @enum ErrorCode + ErrorCodeOverMaxInstances = "OVER_MAX_INSTANCES" + // @enum ErrorCode + ErrorCodeNoInstances = "NO_INSTANCES" + // @enum ErrorCode + ErrorCodeTimeout = "TIMEOUT" + // @enum ErrorCode + ErrorCodeHealthConstraintsInvalid = "HEALTH_CONSTRAINTS_INVALID" + // @enum ErrorCode + ErrorCodeHealthConstraints = "HEALTH_CONSTRAINTS" + // @enum ErrorCode + ErrorCodeInternalError = "INTERNAL_ERROR" + // @enum ErrorCode + ErrorCodeThrottled = "THROTTLED" +) + +const ( + // @enum InstanceStatus + InstanceStatusPending = "Pending" + // @enum InstanceStatus + InstanceStatusInProgress = "InProgress" + // @enum InstanceStatus + InstanceStatusSucceeded = "Succeeded" + // @enum InstanceStatus + InstanceStatusFailed = "Failed" + // @enum InstanceStatus + InstanceStatusSkipped = "Skipped" + // @enum InstanceStatus + InstanceStatusUnknown = "Unknown" +) + +const ( + // @enum LifecycleErrorCode + LifecycleErrorCodeSuccess = "Success" + // @enum LifecycleErrorCode + LifecycleErrorCodeScriptMissing = "ScriptMissing" + // @enum LifecycleErrorCode + LifecycleErrorCodeScriptNotExecutable = "ScriptNotExecutable" + // @enum LifecycleErrorCode + LifecycleErrorCodeScriptTimedOut = "ScriptTimedOut" + // @enum LifecycleErrorCode + LifecycleErrorCodeScriptFailed = "ScriptFailed" + // @enum LifecycleErrorCode + LifecycleErrorCodeUnknownError = "UnknownError" +) + +const ( + // @enum LifecycleEventStatus + LifecycleEventStatusPending = "Pending" + // @enum LifecycleEventStatus + LifecycleEventStatusInProgress = "InProgress" + // @enum LifecycleEventStatus + LifecycleEventStatusSucceeded = "Succeeded" + // @enum LifecycleEventStatus + LifecycleEventStatusFailed = "Failed" + // @enum LifecycleEventStatus + LifecycleEventStatusSkipped = "Skipped" + // @enum LifecycleEventStatus + LifecycleEventStatusUnknown = "Unknown" +) + +const ( + // @enum ListStateFilterAction + ListStateFilterActionInclude = "include" + // @enum ListStateFilterAction + ListStateFilterActionExclude = "exclude" + // @enum ListStateFilterAction + ListStateFilterActionIgnore = "ignore" +) + +const ( + // @enum MinimumHealthyHostsType + MinimumHealthyHostsTypeHostCount = "HOST_COUNT" + // @enum MinimumHealthyHostsType + MinimumHealthyHostsTypeFleetPercent = "FLEET_PERCENT" +) + +const ( + // @enum RegistrationStatus + RegistrationStatusRegistered = "Registered" + // @enum RegistrationStatus + RegistrationStatusDeregistered = "Deregistered" +) + +const ( + // @enum RevisionLocationType + RevisionLocationTypeS3 = "S3" + // @enum RevisionLocationType + RevisionLocationTypeGitHub = "GitHub" +) + +const ( + // @enum SortOrder + SortOrderAscending = "ascending" + // @enum SortOrder + SortOrderDescending = "descending" +) + +const ( + // @enum StopStatus + StopStatusPending = "Pending" + // @enum StopStatus + StopStatusSucceeded = "Succeeded" +) + +const ( + // @enum TagFilterType + TagFilterTypeKeyOnly = "KEY_ONLY" + // @enum TagFilterType + TagFilterTypeValueOnly = "VALUE_ONLY" + // @enum TagFilterType + TagFilterTypeKeyAndValue = "KEY_AND_VALUE" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codedeploy/codedeployiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codedeploy/codedeployiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codedeploy/codedeployiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codedeploy/codedeployiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,154 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package codedeployiface provides an interface for the AWS CodeDeploy. +package codedeployiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/codedeploy" +) + +// CodeDeployAPI is the interface type for codedeploy.CodeDeploy. +type CodeDeployAPI interface { + AddTagsToOnPremisesInstancesRequest(*codedeploy.AddTagsToOnPremisesInstancesInput) (*request.Request, *codedeploy.AddTagsToOnPremisesInstancesOutput) + + AddTagsToOnPremisesInstances(*codedeploy.AddTagsToOnPremisesInstancesInput) (*codedeploy.AddTagsToOnPremisesInstancesOutput, error) + + BatchGetApplicationsRequest(*codedeploy.BatchGetApplicationsInput) (*request.Request, *codedeploy.BatchGetApplicationsOutput) + + BatchGetApplications(*codedeploy.BatchGetApplicationsInput) (*codedeploy.BatchGetApplicationsOutput, error) + + BatchGetDeploymentsRequest(*codedeploy.BatchGetDeploymentsInput) (*request.Request, *codedeploy.BatchGetDeploymentsOutput) + + BatchGetDeployments(*codedeploy.BatchGetDeploymentsInput) (*codedeploy.BatchGetDeploymentsOutput, error) + + BatchGetOnPremisesInstancesRequest(*codedeploy.BatchGetOnPremisesInstancesInput) (*request.Request, *codedeploy.BatchGetOnPremisesInstancesOutput) + + BatchGetOnPremisesInstances(*codedeploy.BatchGetOnPremisesInstancesInput) (*codedeploy.BatchGetOnPremisesInstancesOutput, error) + + CreateApplicationRequest(*codedeploy.CreateApplicationInput) (*request.Request, *codedeploy.CreateApplicationOutput) + + CreateApplication(*codedeploy.CreateApplicationInput) (*codedeploy.CreateApplicationOutput, error) + + CreateDeploymentRequest(*codedeploy.CreateDeploymentInput) (*request.Request, *codedeploy.CreateDeploymentOutput) + + CreateDeployment(*codedeploy.CreateDeploymentInput) (*codedeploy.CreateDeploymentOutput, error) + + CreateDeploymentConfigRequest(*codedeploy.CreateDeploymentConfigInput) (*request.Request, *codedeploy.CreateDeploymentConfigOutput) + + CreateDeploymentConfig(*codedeploy.CreateDeploymentConfigInput) (*codedeploy.CreateDeploymentConfigOutput, error) + + CreateDeploymentGroupRequest(*codedeploy.CreateDeploymentGroupInput) (*request.Request, *codedeploy.CreateDeploymentGroupOutput) + + CreateDeploymentGroup(*codedeploy.CreateDeploymentGroupInput) (*codedeploy.CreateDeploymentGroupOutput, error) + + DeleteApplicationRequest(*codedeploy.DeleteApplicationInput) (*request.Request, *codedeploy.DeleteApplicationOutput) + + DeleteApplication(*codedeploy.DeleteApplicationInput) (*codedeploy.DeleteApplicationOutput, error) + + DeleteDeploymentConfigRequest(*codedeploy.DeleteDeploymentConfigInput) (*request.Request, *codedeploy.DeleteDeploymentConfigOutput) + + DeleteDeploymentConfig(*codedeploy.DeleteDeploymentConfigInput) (*codedeploy.DeleteDeploymentConfigOutput, error) + + DeleteDeploymentGroupRequest(*codedeploy.DeleteDeploymentGroupInput) (*request.Request, *codedeploy.DeleteDeploymentGroupOutput) + + DeleteDeploymentGroup(*codedeploy.DeleteDeploymentGroupInput) (*codedeploy.DeleteDeploymentGroupOutput, error) + + DeregisterOnPremisesInstanceRequest(*codedeploy.DeregisterOnPremisesInstanceInput) (*request.Request, *codedeploy.DeregisterOnPremisesInstanceOutput) + + DeregisterOnPremisesInstance(*codedeploy.DeregisterOnPremisesInstanceInput) (*codedeploy.DeregisterOnPremisesInstanceOutput, error) + + GetApplicationRequest(*codedeploy.GetApplicationInput) (*request.Request, *codedeploy.GetApplicationOutput) + + GetApplication(*codedeploy.GetApplicationInput) (*codedeploy.GetApplicationOutput, error) + + GetApplicationRevisionRequest(*codedeploy.GetApplicationRevisionInput) (*request.Request, *codedeploy.GetApplicationRevisionOutput) + + GetApplicationRevision(*codedeploy.GetApplicationRevisionInput) (*codedeploy.GetApplicationRevisionOutput, error) + + GetDeploymentRequest(*codedeploy.GetDeploymentInput) (*request.Request, *codedeploy.GetDeploymentOutput) + + GetDeployment(*codedeploy.GetDeploymentInput) (*codedeploy.GetDeploymentOutput, error) + + GetDeploymentConfigRequest(*codedeploy.GetDeploymentConfigInput) (*request.Request, *codedeploy.GetDeploymentConfigOutput) + + GetDeploymentConfig(*codedeploy.GetDeploymentConfigInput) (*codedeploy.GetDeploymentConfigOutput, error) + + GetDeploymentGroupRequest(*codedeploy.GetDeploymentGroupInput) (*request.Request, *codedeploy.GetDeploymentGroupOutput) + + GetDeploymentGroup(*codedeploy.GetDeploymentGroupInput) (*codedeploy.GetDeploymentGroupOutput, error) + + GetDeploymentInstanceRequest(*codedeploy.GetDeploymentInstanceInput) (*request.Request, *codedeploy.GetDeploymentInstanceOutput) + + GetDeploymentInstance(*codedeploy.GetDeploymentInstanceInput) (*codedeploy.GetDeploymentInstanceOutput, error) + + GetOnPremisesInstanceRequest(*codedeploy.GetOnPremisesInstanceInput) (*request.Request, *codedeploy.GetOnPremisesInstanceOutput) + + GetOnPremisesInstance(*codedeploy.GetOnPremisesInstanceInput) (*codedeploy.GetOnPremisesInstanceOutput, error) + + ListApplicationRevisionsRequest(*codedeploy.ListApplicationRevisionsInput) (*request.Request, *codedeploy.ListApplicationRevisionsOutput) + + ListApplicationRevisions(*codedeploy.ListApplicationRevisionsInput) (*codedeploy.ListApplicationRevisionsOutput, error) + + ListApplicationRevisionsPages(*codedeploy.ListApplicationRevisionsInput, func(*codedeploy.ListApplicationRevisionsOutput, bool) bool) error + + ListApplicationsRequest(*codedeploy.ListApplicationsInput) (*request.Request, *codedeploy.ListApplicationsOutput) + + ListApplications(*codedeploy.ListApplicationsInput) (*codedeploy.ListApplicationsOutput, error) + + ListApplicationsPages(*codedeploy.ListApplicationsInput, func(*codedeploy.ListApplicationsOutput, bool) bool) error + + ListDeploymentConfigsRequest(*codedeploy.ListDeploymentConfigsInput) (*request.Request, *codedeploy.ListDeploymentConfigsOutput) + + ListDeploymentConfigs(*codedeploy.ListDeploymentConfigsInput) (*codedeploy.ListDeploymentConfigsOutput, error) + + ListDeploymentConfigsPages(*codedeploy.ListDeploymentConfigsInput, func(*codedeploy.ListDeploymentConfigsOutput, bool) bool) error + + ListDeploymentGroupsRequest(*codedeploy.ListDeploymentGroupsInput) (*request.Request, *codedeploy.ListDeploymentGroupsOutput) + + ListDeploymentGroups(*codedeploy.ListDeploymentGroupsInput) (*codedeploy.ListDeploymentGroupsOutput, error) + + ListDeploymentGroupsPages(*codedeploy.ListDeploymentGroupsInput, func(*codedeploy.ListDeploymentGroupsOutput, bool) bool) error + + ListDeploymentInstancesRequest(*codedeploy.ListDeploymentInstancesInput) (*request.Request, *codedeploy.ListDeploymentInstancesOutput) + + ListDeploymentInstances(*codedeploy.ListDeploymentInstancesInput) (*codedeploy.ListDeploymentInstancesOutput, error) + + ListDeploymentInstancesPages(*codedeploy.ListDeploymentInstancesInput, func(*codedeploy.ListDeploymentInstancesOutput, bool) bool) error + + ListDeploymentsRequest(*codedeploy.ListDeploymentsInput) (*request.Request, *codedeploy.ListDeploymentsOutput) + + ListDeployments(*codedeploy.ListDeploymentsInput) (*codedeploy.ListDeploymentsOutput, error) + + ListDeploymentsPages(*codedeploy.ListDeploymentsInput, func(*codedeploy.ListDeploymentsOutput, bool) bool) error + + ListOnPremisesInstancesRequest(*codedeploy.ListOnPremisesInstancesInput) (*request.Request, *codedeploy.ListOnPremisesInstancesOutput) + + ListOnPremisesInstances(*codedeploy.ListOnPremisesInstancesInput) (*codedeploy.ListOnPremisesInstancesOutput, error) + + RegisterApplicationRevisionRequest(*codedeploy.RegisterApplicationRevisionInput) (*request.Request, *codedeploy.RegisterApplicationRevisionOutput) + + RegisterApplicationRevision(*codedeploy.RegisterApplicationRevisionInput) (*codedeploy.RegisterApplicationRevisionOutput, error) + + RegisterOnPremisesInstanceRequest(*codedeploy.RegisterOnPremisesInstanceInput) (*request.Request, *codedeploy.RegisterOnPremisesInstanceOutput) + + RegisterOnPremisesInstance(*codedeploy.RegisterOnPremisesInstanceInput) (*codedeploy.RegisterOnPremisesInstanceOutput, error) + + RemoveTagsFromOnPremisesInstancesRequest(*codedeploy.RemoveTagsFromOnPremisesInstancesInput) (*request.Request, *codedeploy.RemoveTagsFromOnPremisesInstancesOutput) + + RemoveTagsFromOnPremisesInstances(*codedeploy.RemoveTagsFromOnPremisesInstancesInput) (*codedeploy.RemoveTagsFromOnPremisesInstancesOutput, error) + + StopDeploymentRequest(*codedeploy.StopDeploymentInput) (*request.Request, *codedeploy.StopDeploymentOutput) + + StopDeployment(*codedeploy.StopDeploymentInput) (*codedeploy.StopDeploymentOutput, error) + + UpdateApplicationRequest(*codedeploy.UpdateApplicationInput) (*request.Request, *codedeploy.UpdateApplicationOutput) + + UpdateApplication(*codedeploy.UpdateApplicationInput) (*codedeploy.UpdateApplicationOutput, error) + + UpdateDeploymentGroupRequest(*codedeploy.UpdateDeploymentGroupInput) (*request.Request, *codedeploy.UpdateDeploymentGroupOutput) + + UpdateDeploymentGroup(*codedeploy.UpdateDeploymentGroupInput) (*codedeploy.UpdateDeploymentGroupOutput, error) +} + +var _ CodeDeployAPI = (*codedeploy.CodeDeploy)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codedeploy/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codedeploy/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codedeploy/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codedeploy/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,787 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package codedeploy_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/codedeploy" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCodeDeploy_AddTagsToOnPremisesInstances() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.AddTagsToOnPremisesInstancesInput{ + InstanceNames: []*string{ // Required + aws.String("InstanceName"), // Required + // More values... + }, + Tags: []*codedeploy.Tag{ // Required + { // Required + Key: aws.String("Key"), + Value: aws.String("Value"), + }, + // More values... + }, + } + resp, err := svc.AddTagsToOnPremisesInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_BatchGetApplications() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.BatchGetApplicationsInput{ + ApplicationNames: []*string{ + aws.String("ApplicationName"), // Required + // More values... + }, + } + resp, err := svc.BatchGetApplications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_BatchGetDeployments() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.BatchGetDeploymentsInput{ + DeploymentIds: []*string{ + aws.String("DeploymentId"), // Required + // More values... + }, + } + resp, err := svc.BatchGetDeployments(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_BatchGetOnPremisesInstances() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.BatchGetOnPremisesInstancesInput{ + InstanceNames: []*string{ + aws.String("InstanceName"), // Required + // More values... + }, + } + resp, err := svc.BatchGetOnPremisesInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_CreateApplication() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.CreateApplicationInput{ + ApplicationName: aws.String("ApplicationName"), // Required + } + resp, err := svc.CreateApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_CreateDeployment() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.CreateDeploymentInput{ + ApplicationName: aws.String("ApplicationName"), // Required + DeploymentConfigName: aws.String("DeploymentConfigName"), + DeploymentGroupName: aws.String("DeploymentGroupName"), + Description: aws.String("Description"), + IgnoreApplicationStopFailures: aws.Bool(true), + Revision: &codedeploy.RevisionLocation{ + GitHubLocation: &codedeploy.GitHubLocation{ + CommitId: aws.String("CommitId"), + Repository: aws.String("Repository"), + }, + RevisionType: aws.String("RevisionLocationType"), + S3Location: &codedeploy.S3Location{ + Bucket: aws.String("S3Bucket"), + BundleType: aws.String("BundleType"), + ETag: aws.String("ETag"), + Key: aws.String("S3Key"), + Version: aws.String("VersionId"), + }, + }, + } + resp, err := svc.CreateDeployment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_CreateDeploymentConfig() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.CreateDeploymentConfigInput{ + DeploymentConfigName: aws.String("DeploymentConfigName"), // Required + MinimumHealthyHosts: &codedeploy.MinimumHealthyHosts{ + Type: aws.String("MinimumHealthyHostsType"), + Value: aws.Int64(1), + }, + } + resp, err := svc.CreateDeploymentConfig(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_CreateDeploymentGroup() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.CreateDeploymentGroupInput{ + ApplicationName: aws.String("ApplicationName"), // Required + DeploymentGroupName: aws.String("DeploymentGroupName"), // Required + ServiceRoleArn: aws.String("Role"), // Required + AutoScalingGroups: []*string{ + aws.String("AutoScalingGroupName"), // Required + // More values... + }, + DeploymentConfigName: aws.String("DeploymentConfigName"), + Ec2TagFilters: []*codedeploy.EC2TagFilter{ + { // Required + Key: aws.String("Key"), + Type: aws.String("EC2TagFilterType"), + Value: aws.String("Value"), + }, + // More values... + }, + OnPremisesInstanceTagFilters: []*codedeploy.TagFilter{ + { // Required + Key: aws.String("Key"), + Type: aws.String("TagFilterType"), + Value: aws.String("Value"), + }, + // More values... + }, + } + resp, err := svc.CreateDeploymentGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_DeleteApplication() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.DeleteApplicationInput{ + ApplicationName: aws.String("ApplicationName"), // Required + } + resp, err := svc.DeleteApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_DeleteDeploymentConfig() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.DeleteDeploymentConfigInput{ + DeploymentConfigName: aws.String("DeploymentConfigName"), // Required + } + resp, err := svc.DeleteDeploymentConfig(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_DeleteDeploymentGroup() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.DeleteDeploymentGroupInput{ + ApplicationName: aws.String("ApplicationName"), // Required + DeploymentGroupName: aws.String("DeploymentGroupName"), // Required + } + resp, err := svc.DeleteDeploymentGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_DeregisterOnPremisesInstance() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.DeregisterOnPremisesInstanceInput{ + InstanceName: aws.String("InstanceName"), // Required + } + resp, err := svc.DeregisterOnPremisesInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_GetApplication() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.GetApplicationInput{ + ApplicationName: aws.String("ApplicationName"), // Required + } + resp, err := svc.GetApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_GetApplicationRevision() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.GetApplicationRevisionInput{ + ApplicationName: aws.String("ApplicationName"), // Required + Revision: &codedeploy.RevisionLocation{ // Required + GitHubLocation: &codedeploy.GitHubLocation{ + CommitId: aws.String("CommitId"), + Repository: aws.String("Repository"), + }, + RevisionType: aws.String("RevisionLocationType"), + S3Location: &codedeploy.S3Location{ + Bucket: aws.String("S3Bucket"), + BundleType: aws.String("BundleType"), + ETag: aws.String("ETag"), + Key: aws.String("S3Key"), + Version: aws.String("VersionId"), + }, + }, + } + resp, err := svc.GetApplicationRevision(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_GetDeployment() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.GetDeploymentInput{ + DeploymentId: aws.String("DeploymentId"), // Required + } + resp, err := svc.GetDeployment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_GetDeploymentConfig() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.GetDeploymentConfigInput{ + DeploymentConfigName: aws.String("DeploymentConfigName"), // Required + } + resp, err := svc.GetDeploymentConfig(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_GetDeploymentGroup() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.GetDeploymentGroupInput{ + ApplicationName: aws.String("ApplicationName"), // Required + DeploymentGroupName: aws.String("DeploymentGroupName"), // Required + } + resp, err := svc.GetDeploymentGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_GetDeploymentInstance() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.GetDeploymentInstanceInput{ + DeploymentId: aws.String("DeploymentId"), // Required + InstanceId: aws.String("InstanceId"), // Required + } + resp, err := svc.GetDeploymentInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_GetOnPremisesInstance() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.GetOnPremisesInstanceInput{ + InstanceName: aws.String("InstanceName"), // Required + } + resp, err := svc.GetOnPremisesInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_ListApplicationRevisions() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.ListApplicationRevisionsInput{ + ApplicationName: aws.String("ApplicationName"), // Required + Deployed: aws.String("ListStateFilterAction"), + NextToken: aws.String("NextToken"), + S3Bucket: aws.String("S3Bucket"), + S3KeyPrefix: aws.String("S3Key"), + SortBy: aws.String("ApplicationRevisionSortBy"), + SortOrder: aws.String("SortOrder"), + } + resp, err := svc.ListApplicationRevisions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_ListApplications() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.ListApplicationsInput{ + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListApplications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_ListDeploymentConfigs() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.ListDeploymentConfigsInput{ + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListDeploymentConfigs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_ListDeploymentGroups() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.ListDeploymentGroupsInput{ + ApplicationName: aws.String("ApplicationName"), // Required + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListDeploymentGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_ListDeploymentInstances() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.ListDeploymentInstancesInput{ + DeploymentId: aws.String("DeploymentId"), // Required + InstanceStatusFilter: []*string{ + aws.String("InstanceStatus"), // Required + // More values... + }, + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListDeploymentInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_ListDeployments() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.ListDeploymentsInput{ + ApplicationName: aws.String("ApplicationName"), + CreateTimeRange: &codedeploy.TimeRange{ + End: aws.Time(time.Now()), + Start: aws.Time(time.Now()), + }, + DeploymentGroupName: aws.String("DeploymentGroupName"), + IncludeOnlyStatuses: []*string{ + aws.String("DeploymentStatus"), // Required + // More values... + }, + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListDeployments(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_ListOnPremisesInstances() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.ListOnPremisesInstancesInput{ + NextToken: aws.String("NextToken"), + RegistrationStatus: aws.String("RegistrationStatus"), + TagFilters: []*codedeploy.TagFilter{ + { // Required + Key: aws.String("Key"), + Type: aws.String("TagFilterType"), + Value: aws.String("Value"), + }, + // More values... + }, + } + resp, err := svc.ListOnPremisesInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_RegisterApplicationRevision() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.RegisterApplicationRevisionInput{ + ApplicationName: aws.String("ApplicationName"), // Required + Revision: &codedeploy.RevisionLocation{ // Required + GitHubLocation: &codedeploy.GitHubLocation{ + CommitId: aws.String("CommitId"), + Repository: aws.String("Repository"), + }, + RevisionType: aws.String("RevisionLocationType"), + S3Location: &codedeploy.S3Location{ + Bucket: aws.String("S3Bucket"), + BundleType: aws.String("BundleType"), + ETag: aws.String("ETag"), + Key: aws.String("S3Key"), + Version: aws.String("VersionId"), + }, + }, + Description: aws.String("Description"), + } + resp, err := svc.RegisterApplicationRevision(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_RegisterOnPremisesInstance() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.RegisterOnPremisesInstanceInput{ + IamUserArn: aws.String("IamUserArn"), // Required + InstanceName: aws.String("InstanceName"), // Required + } + resp, err := svc.RegisterOnPremisesInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_RemoveTagsFromOnPremisesInstances() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.RemoveTagsFromOnPremisesInstancesInput{ + InstanceNames: []*string{ // Required + aws.String("InstanceName"), // Required + // More values... + }, + Tags: []*codedeploy.Tag{ // Required + { // Required + Key: aws.String("Key"), + Value: aws.String("Value"), + }, + // More values... + }, + } + resp, err := svc.RemoveTagsFromOnPremisesInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_StopDeployment() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.StopDeploymentInput{ + DeploymentId: aws.String("DeploymentId"), // Required + } + resp, err := svc.StopDeployment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_UpdateApplication() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.UpdateApplicationInput{ + ApplicationName: aws.String("ApplicationName"), + NewApplicationName: aws.String("ApplicationName"), + } + resp, err := svc.UpdateApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodeDeploy_UpdateDeploymentGroup() { + svc := codedeploy.New(session.New()) + + params := &codedeploy.UpdateDeploymentGroupInput{ + ApplicationName: aws.String("ApplicationName"), // Required + CurrentDeploymentGroupName: aws.String("DeploymentGroupName"), // Required + AutoScalingGroups: []*string{ + aws.String("AutoScalingGroupName"), // Required + // More values... + }, + DeploymentConfigName: aws.String("DeploymentConfigName"), + Ec2TagFilters: []*codedeploy.EC2TagFilter{ + { // Required + Key: aws.String("Key"), + Type: aws.String("EC2TagFilterType"), + Value: aws.String("Value"), + }, + // More values... + }, + NewDeploymentGroupName: aws.String("DeploymentGroupName"), + OnPremisesInstanceTagFilters: []*codedeploy.TagFilter{ + { // Required + Key: aws.String("Key"), + Type: aws.String("TagFilterType"), + Value: aws.String("Value"), + }, + // More values... + }, + ServiceRoleArn: aws.String("Role"), + } + resp, err := svc.UpdateDeploymentGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codedeploy/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codedeploy/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codedeploy/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codedeploy/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,136 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package codedeploy + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Overview This is the AWS CodeDeploy API Reference. This guide provides descriptions +// of the AWS CodeDeploy APIs. For additional information, see the AWS CodeDeploy +// User Guide (http://docs.aws.amazon.com/codedeploy/latest/userguide). +// +// Using the APIs You can use the AWS CodeDeploy APIs to work with the following +// items: +// +// Applications are unique identifiers that AWS CodeDeploy uses to ensure +// that the correct combinations of revisions, deployment configurations, and +// deployment groups are being referenced during deployments. +// +// You can use the AWS CodeDeploy APIs to create, delete, get, list, and update +// applications. +// +// Deployment configurations are sets of deployment rules and deployment +// success and failure conditions that AWS CodeDeploy uses during deployments. +// +// You can use the AWS CodeDeploy APIs to create, delete, get, and list deployment +// configurations. +// +// Deployment groups are groups of instances to which application revisions +// can be deployed. +// +// You can use the AWS CodeDeploy APIs to create, delete, get, list, and update +// deployment groups. +// +// Instances represent Amazon EC2 instances to which application revisions +// are deployed. Instances are identified by their Amazon EC2 tags or Auto Scaling +// group names. Instances belong to deployment groups. +// +// You can use the AWS CodeDeploy APIs to get and list instances. +// +// Deployments represent the process of deploying revisions to instances. +// +// You can use the AWS CodeDeploy APIs to create, get, list, and stop deployments. +// +// Application revisions are archive files that are stored in Amazon S3 buckets +// or GitHub repositories. These revisions contain source content (such as source +// code, web pages, executable files, any deployment scripts, and similar) along +// with an Application Specification file (AppSpec file). (The AppSpec file +// is unique to AWS CodeDeploy; it defines a series of deployment actions that +// you want AWS CodeDeploy to execute.) An application revision is uniquely +// identified by its Amazon S3 object key and its ETag, version, or both (for +// application revisions that are stored in Amazon S3 buckets) or by its repository +// name and commit ID (for applications revisions that are stored in GitHub +// repositories). Application revisions are deployed through deployment groups. +// +// You can use the AWS CodeDeploy APIs to get, list, and register application +// revisions. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CodeDeploy struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "codedeploy" + +// New creates a new instance of the CodeDeploy client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CodeDeploy client from just a session. +// svc := codedeploy.New(mySession) +// +// // Create a CodeDeploy client with additional configuration +// svc := codedeploy.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CodeDeploy { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CodeDeploy { + svc := &CodeDeploy{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-10-06", + JSONVersion: "1.1", + TargetPrefix: "CodeDeploy_20141006", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CodeDeploy operation and runs any +// custom request initialization. +func (c *CodeDeploy) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codepipeline/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codepipeline/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codepipeline/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codepipeline/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2797 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package codepipeline provides a client for AWS CodePipeline. +package codepipeline + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opAcknowledgeJob = "AcknowledgeJob" + +// AcknowledgeJobRequest generates a request for the AcknowledgeJob operation. +func (c *CodePipeline) AcknowledgeJobRequest(input *AcknowledgeJobInput) (req *request.Request, output *AcknowledgeJobOutput) { + op := &request.Operation{ + Name: opAcknowledgeJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AcknowledgeJobInput{} + } + + req = c.newRequest(op, input, output) + output = &AcknowledgeJobOutput{} + req.Data = output + return +} + +// Returns information about a specified job and whether that job has been received +// by the job worker. Only used for custom actions. +func (c *CodePipeline) AcknowledgeJob(input *AcknowledgeJobInput) (*AcknowledgeJobOutput, error) { + req, out := c.AcknowledgeJobRequest(input) + err := req.Send() + return out, err +} + +const opAcknowledgeThirdPartyJob = "AcknowledgeThirdPartyJob" + +// AcknowledgeThirdPartyJobRequest generates a request for the AcknowledgeThirdPartyJob operation. +func (c *CodePipeline) AcknowledgeThirdPartyJobRequest(input *AcknowledgeThirdPartyJobInput) (req *request.Request, output *AcknowledgeThirdPartyJobOutput) { + op := &request.Operation{ + Name: opAcknowledgeThirdPartyJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AcknowledgeThirdPartyJobInput{} + } + + req = c.newRequest(op, input, output) + output = &AcknowledgeThirdPartyJobOutput{} + req.Data = output + return +} + +// Confirms a job worker has received the specified job. Only used for partner +// actions. +func (c *CodePipeline) AcknowledgeThirdPartyJob(input *AcknowledgeThirdPartyJobInput) (*AcknowledgeThirdPartyJobOutput, error) { + req, out := c.AcknowledgeThirdPartyJobRequest(input) + err := req.Send() + return out, err +} + +const opCreateCustomActionType = "CreateCustomActionType" + +// CreateCustomActionTypeRequest generates a request for the CreateCustomActionType operation. +func (c *CodePipeline) CreateCustomActionTypeRequest(input *CreateCustomActionTypeInput) (req *request.Request, output *CreateCustomActionTypeOutput) { + op := &request.Operation{ + Name: opCreateCustomActionType, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCustomActionTypeInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateCustomActionTypeOutput{} + req.Data = output + return +} + +// Creates a new custom action that can be used in all pipelines associated +// with the AWS account. Only used for custom actions. +func (c *CodePipeline) CreateCustomActionType(input *CreateCustomActionTypeInput) (*CreateCustomActionTypeOutput, error) { + req, out := c.CreateCustomActionTypeRequest(input) + err := req.Send() + return out, err +} + +const opCreatePipeline = "CreatePipeline" + +// CreatePipelineRequest generates a request for the CreatePipeline operation. +func (c *CodePipeline) CreatePipelineRequest(input *CreatePipelineInput) (req *request.Request, output *CreatePipelineOutput) { + op := &request.Operation{ + Name: opCreatePipeline, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePipelineInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePipelineOutput{} + req.Data = output + return +} + +// Creates a pipeline. +func (c *CodePipeline) CreatePipeline(input *CreatePipelineInput) (*CreatePipelineOutput, error) { + req, out := c.CreatePipelineRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCustomActionType = "DeleteCustomActionType" + +// DeleteCustomActionTypeRequest generates a request for the DeleteCustomActionType operation. +func (c *CodePipeline) DeleteCustomActionTypeRequest(input *DeleteCustomActionTypeInput) (req *request.Request, output *DeleteCustomActionTypeOutput) { + op := &request.Operation{ + Name: opDeleteCustomActionType, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCustomActionTypeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteCustomActionTypeOutput{} + req.Data = output + return +} + +// Marks a custom action as deleted. PollForJobs for the custom action will +// fail after the action is marked for deletion. Only used for custom actions. +// +// You cannot recreate a custom action after it has been deleted unless you +// increase the version number of the action. +func (c *CodePipeline) DeleteCustomActionType(input *DeleteCustomActionTypeInput) (*DeleteCustomActionTypeOutput, error) { + req, out := c.DeleteCustomActionTypeRequest(input) + err := req.Send() + return out, err +} + +const opDeletePipeline = "DeletePipeline" + +// DeletePipelineRequest generates a request for the DeletePipeline operation. +func (c *CodePipeline) DeletePipelineRequest(input *DeletePipelineInput) (req *request.Request, output *DeletePipelineOutput) { + op := &request.Operation{ + Name: opDeletePipeline, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePipelineInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeletePipelineOutput{} + req.Data = output + return +} + +// Deletes the specified pipeline. +func (c *CodePipeline) DeletePipeline(input *DeletePipelineInput) (*DeletePipelineOutput, error) { + req, out := c.DeletePipelineRequest(input) + err := req.Send() + return out, err +} + +const opDisableStageTransition = "DisableStageTransition" + +// DisableStageTransitionRequest generates a request for the DisableStageTransition operation. +func (c *CodePipeline) DisableStageTransitionRequest(input *DisableStageTransitionInput) (req *request.Request, output *DisableStageTransitionOutput) { + op := &request.Operation{ + Name: opDisableStageTransition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableStageTransitionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DisableStageTransitionOutput{} + req.Data = output + return +} + +// Prevents artifacts in a pipeline from transitioning to the next stage in +// the pipeline. +func (c *CodePipeline) DisableStageTransition(input *DisableStageTransitionInput) (*DisableStageTransitionOutput, error) { + req, out := c.DisableStageTransitionRequest(input) + err := req.Send() + return out, err +} + +const opEnableStageTransition = "EnableStageTransition" + +// EnableStageTransitionRequest generates a request for the EnableStageTransition operation. +func (c *CodePipeline) EnableStageTransitionRequest(input *EnableStageTransitionInput) (req *request.Request, output *EnableStageTransitionOutput) { + op := &request.Operation{ + Name: opEnableStageTransition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableStageTransitionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &EnableStageTransitionOutput{} + req.Data = output + return +} + +// Enables artifacts in a pipeline to transition to a stage in a pipeline. +func (c *CodePipeline) EnableStageTransition(input *EnableStageTransitionInput) (*EnableStageTransitionOutput, error) { + req, out := c.EnableStageTransitionRequest(input) + err := req.Send() + return out, err +} + +const opGetJobDetails = "GetJobDetails" + +// GetJobDetailsRequest generates a request for the GetJobDetails operation. +func (c *CodePipeline) GetJobDetailsRequest(input *GetJobDetailsInput) (req *request.Request, output *GetJobDetailsOutput) { + op := &request.Operation{ + Name: opGetJobDetails, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetJobDetailsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetJobDetailsOutput{} + req.Data = output + return +} + +// Returns information about a job. Only used for custom actions. +// +// When this API is called, AWS CodePipeline returns temporary credentials +// for the Amazon S3 bucket used to store artifacts for the pipeline, if the +// action requires access to that Amazon S3 bucket for input or output artifacts. +// Additionally, this API returns any secret values defined for the action. +func (c *CodePipeline) GetJobDetails(input *GetJobDetailsInput) (*GetJobDetailsOutput, error) { + req, out := c.GetJobDetailsRequest(input) + err := req.Send() + return out, err +} + +const opGetPipeline = "GetPipeline" + +// GetPipelineRequest generates a request for the GetPipeline operation. +func (c *CodePipeline) GetPipelineRequest(input *GetPipelineInput) (req *request.Request, output *GetPipelineOutput) { + op := &request.Operation{ + Name: opGetPipeline, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPipelineInput{} + } + + req = c.newRequest(op, input, output) + output = &GetPipelineOutput{} + req.Data = output + return +} + +// Returns the metadata, structure, stages, and actions of a pipeline. Can be +// used to return the entire structure of a pipeline in JSON format, which can +// then be modified and used to update the pipeline structure with UpdatePipeline. +func (c *CodePipeline) GetPipeline(input *GetPipelineInput) (*GetPipelineOutput, error) { + req, out := c.GetPipelineRequest(input) + err := req.Send() + return out, err +} + +const opGetPipelineState = "GetPipelineState" + +// GetPipelineStateRequest generates a request for the GetPipelineState operation. +func (c *CodePipeline) GetPipelineStateRequest(input *GetPipelineStateInput) (req *request.Request, output *GetPipelineStateOutput) { + op := &request.Operation{ + Name: opGetPipelineState, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPipelineStateInput{} + } + + req = c.newRequest(op, input, output) + output = &GetPipelineStateOutput{} + req.Data = output + return +} + +// Returns information about the state of a pipeline, including the stages, +// actions, and details about the last run of the pipeline. +func (c *CodePipeline) GetPipelineState(input *GetPipelineStateInput) (*GetPipelineStateOutput, error) { + req, out := c.GetPipelineStateRequest(input) + err := req.Send() + return out, err +} + +const opGetThirdPartyJobDetails = "GetThirdPartyJobDetails" + +// GetThirdPartyJobDetailsRequest generates a request for the GetThirdPartyJobDetails operation. +func (c *CodePipeline) GetThirdPartyJobDetailsRequest(input *GetThirdPartyJobDetailsInput) (req *request.Request, output *GetThirdPartyJobDetailsOutput) { + op := &request.Operation{ + Name: opGetThirdPartyJobDetails, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetThirdPartyJobDetailsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetThirdPartyJobDetailsOutput{} + req.Data = output + return +} + +// Requests the details of a job for a third party action. Only used for partner +// actions. +// +// When this API is called, AWS CodePipeline returns temporary credentials +// for the Amazon S3 bucket used to store artifacts for the pipeline, if the +// action requires access to that Amazon S3 bucket for input or output artifacts. +// Additionally, this API returns any secret values defined for the action. +func (c *CodePipeline) GetThirdPartyJobDetails(input *GetThirdPartyJobDetailsInput) (*GetThirdPartyJobDetailsOutput, error) { + req, out := c.GetThirdPartyJobDetailsRequest(input) + err := req.Send() + return out, err +} + +const opListActionTypes = "ListActionTypes" + +// ListActionTypesRequest generates a request for the ListActionTypes operation. +func (c *CodePipeline) ListActionTypesRequest(input *ListActionTypesInput) (req *request.Request, output *ListActionTypesOutput) { + op := &request.Operation{ + Name: opListActionTypes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListActionTypesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListActionTypesOutput{} + req.Data = output + return +} + +// Gets a summary of all AWS CodePipeline action types associated with your +// account. +func (c *CodePipeline) ListActionTypes(input *ListActionTypesInput) (*ListActionTypesOutput, error) { + req, out := c.ListActionTypesRequest(input) + err := req.Send() + return out, err +} + +const opListPipelines = "ListPipelines" + +// ListPipelinesRequest generates a request for the ListPipelines operation. +func (c *CodePipeline) ListPipelinesRequest(input *ListPipelinesInput) (req *request.Request, output *ListPipelinesOutput) { + op := &request.Operation{ + Name: opListPipelines, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListPipelinesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPipelinesOutput{} + req.Data = output + return +} + +// Gets a summary of all of the pipelines associated with your account. +func (c *CodePipeline) ListPipelines(input *ListPipelinesInput) (*ListPipelinesOutput, error) { + req, out := c.ListPipelinesRequest(input) + err := req.Send() + return out, err +} + +const opPollForJobs = "PollForJobs" + +// PollForJobsRequest generates a request for the PollForJobs operation. +func (c *CodePipeline) PollForJobsRequest(input *PollForJobsInput) (req *request.Request, output *PollForJobsOutput) { + op := &request.Operation{ + Name: opPollForJobs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PollForJobsInput{} + } + + req = c.newRequest(op, input, output) + output = &PollForJobsOutput{} + req.Data = output + return +} + +// Returns information about any jobs for AWS CodePipeline to act upon. +// +// When this API is called, AWS CodePipeline returns temporary credentials +// for the Amazon S3 bucket used to store artifacts for the pipeline, if the +// action requires access to that Amazon S3 bucket for input or output artifacts. +// Additionally, this API returns any secret values defined for the action. +func (c *CodePipeline) PollForJobs(input *PollForJobsInput) (*PollForJobsOutput, error) { + req, out := c.PollForJobsRequest(input) + err := req.Send() + return out, err +} + +const opPollForThirdPartyJobs = "PollForThirdPartyJobs" + +// PollForThirdPartyJobsRequest generates a request for the PollForThirdPartyJobs operation. +func (c *CodePipeline) PollForThirdPartyJobsRequest(input *PollForThirdPartyJobsInput) (req *request.Request, output *PollForThirdPartyJobsOutput) { + op := &request.Operation{ + Name: opPollForThirdPartyJobs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PollForThirdPartyJobsInput{} + } + + req = c.newRequest(op, input, output) + output = &PollForThirdPartyJobsOutput{} + req.Data = output + return +} + +// Determines whether there are any third party jobs for a job worker to act +// on. Only used for partner actions. +// +// When this API is called, AWS CodePipeline returns temporary credentials +// for the Amazon S3 bucket used to store artifacts for the pipeline, if the +// action requires access to that Amazon S3 bucket for input or output artifacts. +func (c *CodePipeline) PollForThirdPartyJobs(input *PollForThirdPartyJobsInput) (*PollForThirdPartyJobsOutput, error) { + req, out := c.PollForThirdPartyJobsRequest(input) + err := req.Send() + return out, err +} + +const opPutActionRevision = "PutActionRevision" + +// PutActionRevisionRequest generates a request for the PutActionRevision operation. +func (c *CodePipeline) PutActionRevisionRequest(input *PutActionRevisionInput) (req *request.Request, output *PutActionRevisionOutput) { + op := &request.Operation{ + Name: opPutActionRevision, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutActionRevisionInput{} + } + + req = c.newRequest(op, input, output) + output = &PutActionRevisionOutput{} + req.Data = output + return +} + +// Provides information to AWS CodePipeline about new revisions to a source. +func (c *CodePipeline) PutActionRevision(input *PutActionRevisionInput) (*PutActionRevisionOutput, error) { + req, out := c.PutActionRevisionRequest(input) + err := req.Send() + return out, err +} + +const opPutJobFailureResult = "PutJobFailureResult" + +// PutJobFailureResultRequest generates a request for the PutJobFailureResult operation. +func (c *CodePipeline) PutJobFailureResultRequest(input *PutJobFailureResultInput) (req *request.Request, output *PutJobFailureResultOutput) { + op := &request.Operation{ + Name: opPutJobFailureResult, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutJobFailureResultInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutJobFailureResultOutput{} + req.Data = output + return +} + +// Represents the failure of a job as returned to the pipeline by a job worker. +// Only used for custom actions. +func (c *CodePipeline) PutJobFailureResult(input *PutJobFailureResultInput) (*PutJobFailureResultOutput, error) { + req, out := c.PutJobFailureResultRequest(input) + err := req.Send() + return out, err +} + +const opPutJobSuccessResult = "PutJobSuccessResult" + +// PutJobSuccessResultRequest generates a request for the PutJobSuccessResult operation. +func (c *CodePipeline) PutJobSuccessResultRequest(input *PutJobSuccessResultInput) (req *request.Request, output *PutJobSuccessResultOutput) { + op := &request.Operation{ + Name: opPutJobSuccessResult, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutJobSuccessResultInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutJobSuccessResultOutput{} + req.Data = output + return +} + +// Represents the success of a job as returned to the pipeline by a job worker. +// Only used for custom actions. +func (c *CodePipeline) PutJobSuccessResult(input *PutJobSuccessResultInput) (*PutJobSuccessResultOutput, error) { + req, out := c.PutJobSuccessResultRequest(input) + err := req.Send() + return out, err +} + +const opPutThirdPartyJobFailureResult = "PutThirdPartyJobFailureResult" + +// PutThirdPartyJobFailureResultRequest generates a request for the PutThirdPartyJobFailureResult operation. +func (c *CodePipeline) PutThirdPartyJobFailureResultRequest(input *PutThirdPartyJobFailureResultInput) (req *request.Request, output *PutThirdPartyJobFailureResultOutput) { + op := &request.Operation{ + Name: opPutThirdPartyJobFailureResult, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutThirdPartyJobFailureResultInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutThirdPartyJobFailureResultOutput{} + req.Data = output + return +} + +// Represents the failure of a third party job as returned to the pipeline by +// a job worker. Only used for partner actions. +func (c *CodePipeline) PutThirdPartyJobFailureResult(input *PutThirdPartyJobFailureResultInput) (*PutThirdPartyJobFailureResultOutput, error) { + req, out := c.PutThirdPartyJobFailureResultRequest(input) + err := req.Send() + return out, err +} + +const opPutThirdPartyJobSuccessResult = "PutThirdPartyJobSuccessResult" + +// PutThirdPartyJobSuccessResultRequest generates a request for the PutThirdPartyJobSuccessResult operation. +func (c *CodePipeline) PutThirdPartyJobSuccessResultRequest(input *PutThirdPartyJobSuccessResultInput) (req *request.Request, output *PutThirdPartyJobSuccessResultOutput) { + op := &request.Operation{ + Name: opPutThirdPartyJobSuccessResult, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutThirdPartyJobSuccessResultInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutThirdPartyJobSuccessResultOutput{} + req.Data = output + return +} + +// Represents the success of a third party job as returned to the pipeline by +// a job worker. Only used for partner actions. +func (c *CodePipeline) PutThirdPartyJobSuccessResult(input *PutThirdPartyJobSuccessResultInput) (*PutThirdPartyJobSuccessResultOutput, error) { + req, out := c.PutThirdPartyJobSuccessResultRequest(input) + err := req.Send() + return out, err +} + +const opStartPipelineExecution = "StartPipelineExecution" + +// StartPipelineExecutionRequest generates a request for the StartPipelineExecution operation. +func (c *CodePipeline) StartPipelineExecutionRequest(input *StartPipelineExecutionInput) (req *request.Request, output *StartPipelineExecutionOutput) { + op := &request.Operation{ + Name: opStartPipelineExecution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartPipelineExecutionInput{} + } + + req = c.newRequest(op, input, output) + output = &StartPipelineExecutionOutput{} + req.Data = output + return +} + +// Starts the specified pipeline. Specifically, it begins processing the latest +// commit to the source location specified as part of the pipeline. +func (c *CodePipeline) StartPipelineExecution(input *StartPipelineExecutionInput) (*StartPipelineExecutionOutput, error) { + req, out := c.StartPipelineExecutionRequest(input) + err := req.Send() + return out, err +} + +const opUpdatePipeline = "UpdatePipeline" + +// UpdatePipelineRequest generates a request for the UpdatePipeline operation. +func (c *CodePipeline) UpdatePipelineRequest(input *UpdatePipelineInput) (req *request.Request, output *UpdatePipelineOutput) { + op := &request.Operation{ + Name: opUpdatePipeline, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdatePipelineInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdatePipelineOutput{} + req.Data = output + return +} + +// Updates a specified pipeline with edits or changes to its structure. Use +// a JSON file with the pipeline structure in conjunction with UpdatePipeline +// to provide the full structure of the pipeline. Updating the pipeline increases +// the version number of the pipeline by 1. +func (c *CodePipeline) UpdatePipeline(input *UpdatePipelineInput) (*UpdatePipelineOutput, error) { + req, out := c.UpdatePipelineRequest(input) + err := req.Send() + return out, err +} + +// Represents an AWS session credentials object. These credentials are temporary +// credentials that are issued by AWS Secure Token Service (STS). They can be +// used to access input and output artifacts in the Amazon S3 bucket used to +// store artifact for the pipeline in AWS CodePipeline. +type AWSSessionCredentials struct { + _ struct{} `type:"structure"` + + // The access key for the session. + AccessKeyId *string `locationName:"accessKeyId" type:"string" required:"true"` + + // The secret access key for the session. + SecretAccessKey *string `locationName:"secretAccessKey" type:"string" required:"true"` + + // The token for the session. + SessionToken *string `locationName:"sessionToken" type:"string" required:"true"` +} + +// String returns the string representation +func (s AWSSessionCredentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AWSSessionCredentials) GoString() string { + return s.String() +} + +// Represents the input of an acknowledge job action. +type AcknowledgeJobInput struct { + _ struct{} `type:"structure"` + + // The unique system-generated ID of the job for which you want to confirm receipt. + JobId *string `locationName:"jobId" type:"string" required:"true"` + + // A system-generated random number that AWS CodePipeline uses to ensure that + // the job is being worked on by only one job worker. This number must be returned + // in the response. + Nonce *string `locationName:"nonce" type:"string" required:"true"` +} + +// String returns the string representation +func (s AcknowledgeJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcknowledgeJobInput) GoString() string { + return s.String() +} + +// Represents the output of an acknowledge job action. +type AcknowledgeJobOutput struct { + _ struct{} `type:"structure"` + + // Whether the job worker has received the specified job. + Status *string `locationName:"status" type:"string" enum:"JobStatus"` +} + +// String returns the string representation +func (s AcknowledgeJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcknowledgeJobOutput) GoString() string { + return s.String() +} + +// Represents the input of an acknowledge third party job action. +type AcknowledgeThirdPartyJobInput struct { + _ struct{} `type:"structure"` + + // The clientToken portion of the clientId and clientToken pair used to verify + // that the calling entity is allowed access to the job and its details. + ClientToken *string `locationName:"clientToken" type:"string" required:"true"` + + // The unique system-generated ID of the job. + JobId *string `locationName:"jobId" min:"1" type:"string" required:"true"` + + // A system-generated random number that AWS CodePipeline uses to ensure that + // the job is being worked on by only one job worker. This number must be returned + // in the response. + Nonce *string `locationName:"nonce" type:"string" required:"true"` +} + +// String returns the string representation +func (s AcknowledgeThirdPartyJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcknowledgeThirdPartyJobInput) GoString() string { + return s.String() +} + +// Represents the output of an acknowledge third party job action. +type AcknowledgeThirdPartyJobOutput struct { + _ struct{} `type:"structure"` + + // The status information for the third party job, if any. + Status *string `locationName:"status" type:"string" enum:"JobStatus"` +} + +// String returns the string representation +func (s AcknowledgeThirdPartyJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcknowledgeThirdPartyJobOutput) GoString() string { + return s.String() +} + +// Represents information about an action configuration. +type ActionConfiguration struct { + _ struct{} `type:"structure"` + + // The configuration data for the action. + Configuration map[string]*string `locationName:"configuration" type:"map"` +} + +// String returns the string representation +func (s ActionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActionConfiguration) GoString() string { + return s.String() +} + +// Represents information about an action configuration property. +type ActionConfigurationProperty struct { + _ struct{} `type:"structure"` + + // The description of the action configuration property that will be displayed + // to users. + Description *string `locationName:"description" min:"1" type:"string"` + + // Whether the configuration property is a key. + Key *bool `locationName:"key" type:"boolean" required:"true"` + + // The name of the action configuration property. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // Indicates that the proprety will be used in conjunction with PollForJobs. + // When creating a custom action, an action can have up to one queryable property. + // If it has one, that property must be both required and not secret. + // + // If you create a pipeline with a custom action type, and that custom action + // contains a queryable property, the value for that configuration property + // is subject to additional restrictions. The value must be less than or equal + // to twenty (20) characters. The value can contain only alphanumeric characters, + // underscores, and hyphens. + Queryable *bool `locationName:"queryable" type:"boolean"` + + // Whether the configuration property is a required value. + Required *bool `locationName:"required" type:"boolean" required:"true"` + + // Whether the configuration property is secret. Secrets are hidden from all + // calls except for GetJobDetails, GetThirdPartyJobDetails, PollForJobs, and + // PollForThirdPartyJobs. + // + // When updating a pipeline, passing * * * * * without changing any other values + // of the action will preserve the prior value of the secret. + Secret *bool `locationName:"secret" type:"boolean" required:"true"` + + // The type of the configuration property. + Type *string `locationName:"type" type:"string" enum:"ActionConfigurationPropertyType"` +} + +// String returns the string representation +func (s ActionConfigurationProperty) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActionConfigurationProperty) GoString() string { + return s.String() +} + +// Represents the context of an action within the stage of a pipeline to a job +// worker. +type ActionContext struct { + _ struct{} `type:"structure"` + + // The name of the action within the context of a job. + Name *string `locationName:"name" min:"1" type:"string"` +} + +// String returns the string representation +func (s ActionContext) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActionContext) GoString() string { + return s.String() +} + +// Represents information about an action declaration. +type ActionDeclaration struct { + _ struct{} `type:"structure"` + + // The configuration information for the action type. + ActionTypeId *ActionTypeId `locationName:"actionTypeId" type:"structure" required:"true"` + + // The action declaration's configuration. + Configuration map[string]*string `locationName:"configuration" type:"map"` + + // The name or ID of the artifact consumed by the action, such as a test or + // build artifact. + InputArtifacts []*InputArtifact `locationName:"inputArtifacts" type:"list"` + + // The action declaration's name. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The name or ID of the result of the action declaration, such as a test or + // build artifact. + OutputArtifacts []*OutputArtifact `locationName:"outputArtifacts" type:"list"` + + // The ARN of the IAM service role that will perform the declared action. This + // is assumed through the roleArn for the pipeline. + RoleArn *string `locationName:"roleArn" type:"string"` + + // The order in which actions are run. + RunOrder *int64 `locationName:"runOrder" min:"1" type:"integer"` +} + +// String returns the string representation +func (s ActionDeclaration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActionDeclaration) GoString() string { + return s.String() +} + +// Represents information about how an action runs. +type ActionExecution struct { + _ struct{} `type:"structure"` + + // The details of an error returned by a URL external to AWS. + ErrorDetails *ErrorDetails `locationName:"errorDetails" type:"structure"` + + // The external ID of the run of the action. + ExternalExecutionId *string `locationName:"externalExecutionId" type:"string"` + + // The URL of a resource external to AWS that will be used when running the + // action, for example an external repository URL. + ExternalExecutionUrl *string `locationName:"externalExecutionUrl" min:"1" type:"string"` + + // The last status change of the action. + LastStatusChange *time.Time `locationName:"lastStatusChange" type:"timestamp" timestampFormat:"unix"` + + // A percentage of completeness of the action as it runs. + PercentComplete *int64 `locationName:"percentComplete" type:"integer"` + + // The status of the action, or for a completed action, the last status of the + // action. + Status *string `locationName:"status" type:"string" enum:"ActionExecutionStatus"` + + // A summary of the run of the action. + Summary *string `locationName:"summary" type:"string"` +} + +// String returns the string representation +func (s ActionExecution) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActionExecution) GoString() string { + return s.String() +} + +// Represents information about the version (or revision) of an action. +type ActionRevision struct { + _ struct{} `type:"structure"` + + // The date and time when the most recent version of the action was created, + // in timestamp format. + Created *time.Time `locationName:"created" type:"timestamp" timestampFormat:"unix" required:"true"` + + // The unique identifier of the change that set the state to this revision, + // for example a deployment ID or timestamp. + RevisionChangeId *string `locationName:"revisionChangeId" type:"string"` + + // The system-generated unique ID that identifies the revision number of the + // action. + RevisionId *string `locationName:"revisionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ActionRevision) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActionRevision) GoString() string { + return s.String() +} + +// Represents information about the state of an action. +type ActionState struct { + _ struct{} `type:"structure"` + + // The name of the action. + ActionName *string `locationName:"actionName" min:"1" type:"string"` + + // Represents information about the version (or revision) of an action. + CurrentRevision *ActionRevision `locationName:"currentRevision" type:"structure"` + + // A URL link for more information about the state of the action, such as a + // deployment group details page. + EntityUrl *string `locationName:"entityUrl" min:"1" type:"string"` + + // Represents information about how an action runs. + LatestExecution *ActionExecution `locationName:"latestExecution" type:"structure"` + + // A URL link for more information about the revision, such as a commit details + // page. + RevisionUrl *string `locationName:"revisionUrl" min:"1" type:"string"` +} + +// String returns the string representation +func (s ActionState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActionState) GoString() string { + return s.String() +} + +// Returns information about the details of an action type. +type ActionType struct { + _ struct{} `type:"structure"` + + // The configuration properties for the action type. + ActionConfigurationProperties []*ActionConfigurationProperty `locationName:"actionConfigurationProperties" type:"list"` + + // Represents information about an action type. + Id *ActionTypeId `locationName:"id" type:"structure" required:"true"` + + // The details of the input artifact for the action, such as its commit ID. + InputArtifactDetails *ArtifactDetails `locationName:"inputArtifactDetails" type:"structure" required:"true"` + + // The details of the output artifact of the action, such as its commit ID. + OutputArtifactDetails *ArtifactDetails `locationName:"outputArtifactDetails" type:"structure" required:"true"` + + // The settings for the action type. + Settings *ActionTypeSettings `locationName:"settings" type:"structure"` +} + +// String returns the string representation +func (s ActionType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActionType) GoString() string { + return s.String() +} + +// Represents information about an action type. +type ActionTypeId struct { + _ struct{} `type:"structure"` + + // A category defines what kind of action can be taken in the stage, and constrains + // the provider type for the action. Valid categories are limited to one of + // the values below. + Category *string `locationName:"category" type:"string" required:"true" enum:"ActionCategory"` + + // The creator of the action being called. + Owner *string `locationName:"owner" type:"string" required:"true" enum:"ActionOwner"` + + // The provider of the service being called by the action. Valid providers are + // determined by the action category. For example, an action in the Deploy category + // type might have a provider of AWS CodeDeploy, which would be specified as + // CodeDeploy. + Provider *string `locationName:"provider" min:"1" type:"string" required:"true"` + + // A string that identifies the action type. + Version *string `locationName:"version" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ActionTypeId) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActionTypeId) GoString() string { + return s.String() +} + +// Returns information about the settings for an action type. +type ActionTypeSettings struct { + _ struct{} `type:"structure"` + + // The URL returned to the AWS CodePipeline console that provides a deep link + // to the resources of the external system, such as the configuration page for + // an AWS CodeDeploy deployment group. This link is provided as part of the + // action display within the pipeline. + EntityUrlTemplate *string `locationName:"entityUrlTemplate" min:"1" type:"string"` + + // The URL returned to the AWS CodePipeline console that contains a link to + // the top-level landing page for the external system, such as console page + // for AWS CodeDeploy. This link is shown on the pipeline view page in the AWS + // CodePipeline console and provides a link to the execution entity of the external + // action. + ExecutionUrlTemplate *string `locationName:"executionUrlTemplate" min:"1" type:"string"` + + // The URL returned to the AWS CodePipeline console that contains a link to + // the page where customers can update or change the configuration of the external + // action. + RevisionUrlTemplate *string `locationName:"revisionUrlTemplate" min:"1" type:"string"` + + // The URL of a sign-up page where users can sign up for an external service + // and perform initial configuration of the action provided by that service. + ThirdPartyConfigurationUrl *string `locationName:"thirdPartyConfigurationUrl" min:"1" type:"string"` +} + +// String returns the string representation +func (s ActionTypeSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActionTypeSettings) GoString() string { + return s.String() +} + +// Represents information about an artifact that will be worked upon by actions +// in the pipeline. +type Artifact struct { + _ struct{} `type:"structure"` + + // The location of an artifact. + Location *ArtifactLocation `locationName:"location" type:"structure"` + + // The artifact's name. + Name *string `locationName:"name" min:"1" type:"string"` + + // The artifact's revision ID. Depending on the type of object, this could be + // a commit ID (GitHub) or a revision ID (Amazon S3). + Revision *string `locationName:"revision" type:"string"` +} + +// String returns the string representation +func (s Artifact) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Artifact) GoString() string { + return s.String() +} + +// Returns information about the details of an artifact. +type ArtifactDetails struct { + _ struct{} `type:"structure"` + + // The maximum number of artifacts allowed for the action type. + MaximumCount *int64 `locationName:"maximumCount" type:"integer" required:"true"` + + // The minimum number of artifacts allowed for the action type. + MinimumCount *int64 `locationName:"minimumCount" type:"integer" required:"true"` +} + +// String returns the string representation +func (s ArtifactDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ArtifactDetails) GoString() string { + return s.String() +} + +// Represents information about the location of an artifact. +type ArtifactLocation struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket that contains the artifact. + S3Location *S3ArtifactLocation `locationName:"s3Location" type:"structure"` + + // The type of artifact in the location. + Type *string `locationName:"type" type:"string" enum:"ArtifactLocationType"` +} + +// String returns the string representation +func (s ArtifactLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ArtifactLocation) GoString() string { + return s.String() +} + +// The Amazon S3 location where artifacts are stored for the pipeline. If this +// Amazon S3 bucket is created manually, it must meet the requirements for AWS +// CodePipeline. For more information, see the Concepts. +type ArtifactStore struct { + _ struct{} `type:"structure"` + + // The AWS Key Management Service (AWS KMS) key used to encrypt the data in + // the artifact store. If this is undefined, the default key for Amazon S3 is + // used. + EncryptionKey *EncryptionKey `locationName:"encryptionKey" type:"structure"` + + // The location for storing the artifacts for a pipeline, such as an S3 bucket + // or folder. + Location *string `locationName:"location" min:"3" type:"string" required:"true"` + + // The type of the artifact store, such as S3. + Type *string `locationName:"type" type:"string" required:"true" enum:"ArtifactStoreType"` +} + +// String returns the string representation +func (s ArtifactStore) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ArtifactStore) GoString() string { + return s.String() +} + +// Represents information about a gate declaration. +type BlockerDeclaration struct { + _ struct{} `type:"structure"` + + // The name of the gate declaration. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The type of the gate declaration. + Type *string `locationName:"type" type:"string" required:"true" enum:"BlockerType"` +} + +// String returns the string representation +func (s BlockerDeclaration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BlockerDeclaration) GoString() string { + return s.String() +} + +// Represents the input of a create custom action operation. +type CreateCustomActionTypeInput struct { + _ struct{} `type:"structure"` + + // The category of the custom action, such as a source action or a build action. + Category *string `locationName:"category" type:"string" required:"true" enum:"ActionCategory"` + + // The configuration properties for the custom action. + ConfigurationProperties []*ActionConfigurationProperty `locationName:"configurationProperties" type:"list"` + + // Returns information about the details of an artifact. + InputArtifactDetails *ArtifactDetails `locationName:"inputArtifactDetails" type:"structure" required:"true"` + + // Returns information about the details of an artifact. + OutputArtifactDetails *ArtifactDetails `locationName:"outputArtifactDetails" type:"structure" required:"true"` + + // The provider of the service used in the custom action, such as AWS CodeDeploy. + Provider *string `locationName:"provider" min:"1" type:"string" required:"true"` + + // Returns information about the settings for an action type. + Settings *ActionTypeSettings `locationName:"settings" type:"structure"` + + // The version number of the custom action. + // + // A newly-created custom action is always assigned a version number of 1. + // This is required. + Version *string `locationName:"version" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateCustomActionTypeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCustomActionTypeInput) GoString() string { + return s.String() +} + +// Represents the output of a create custom action operation. +type CreateCustomActionTypeOutput struct { + _ struct{} `type:"structure"` + + // Returns information about the details of an action type. + ActionType *ActionType `locationName:"actionType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateCustomActionTypeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCustomActionTypeOutput) GoString() string { + return s.String() +} + +// Represents the input of a create pipeline action. +type CreatePipelineInput struct { + _ struct{} `type:"structure"` + + // Represents the structure of actions and stages to be performed in the pipeline. + Pipeline *PipelineDeclaration `locationName:"pipeline" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreatePipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePipelineInput) GoString() string { + return s.String() +} + +// Represents the output of a create pipeline action. +type CreatePipelineOutput struct { + _ struct{} `type:"structure"` + + // Represents the structure of actions and stages to be performed in the pipeline. + Pipeline *PipelineDeclaration `locationName:"pipeline" type:"structure"` +} + +// String returns the string representation +func (s CreatePipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePipelineOutput) GoString() string { + return s.String() +} + +// Represents information about a current revision. +type CurrentRevision struct { + _ struct{} `type:"structure"` + + // The change identifier for the current revision. + ChangeIdentifier *string `locationName:"changeIdentifier" type:"string" required:"true"` + + // The revision ID of the current version of an artifact. + Revision *string `locationName:"revision" type:"string" required:"true"` +} + +// String returns the string representation +func (s CurrentRevision) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CurrentRevision) GoString() string { + return s.String() +} + +// Represents the input of a delete custom action operation. The custom action +// will be marked as deleted. +type DeleteCustomActionTypeInput struct { + _ struct{} `type:"structure"` + + // The category of the custom action that you want to delete, such as source + // or deploy. + Category *string `locationName:"category" type:"string" required:"true" enum:"ActionCategory"` + + // The provider of the service used in the custom action, such as AWS CodeDeploy. + Provider *string `locationName:"provider" min:"1" type:"string" required:"true"` + + // The version of the custom action to delete. + Version *string `locationName:"version" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteCustomActionTypeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCustomActionTypeInput) GoString() string { + return s.String() +} + +type DeleteCustomActionTypeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteCustomActionTypeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCustomActionTypeOutput) GoString() string { + return s.String() +} + +// Represents the input of a delete pipeline action. +type DeletePipelineInput struct { + _ struct{} `type:"structure"` + + // The name of the pipeline to be deleted. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePipelineInput) GoString() string { + return s.String() +} + +type DeletePipelineOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePipelineOutput) GoString() string { + return s.String() +} + +// Represents the input of a disable stage transition input action. +type DisableStageTransitionInput struct { + _ struct{} `type:"structure"` + + // The name of the pipeline in which you want to disable the flow of artifacts + // from one stage to another. + PipelineName *string `locationName:"pipelineName" min:"1" type:"string" required:"true"` + + // The reason given to the user why a stage is disabled, such as waiting for + // manual approval or manual tests. This message is displayed in the pipeline + // console UI. + Reason *string `locationName:"reason" min:"1" type:"string" required:"true"` + + // The name of the stage where you want to disable the inbound or outbound transition + // of artifacts. + StageName *string `locationName:"stageName" min:"1" type:"string" required:"true"` + + // Specifies whether artifacts will be prevented from transitioning into the + // stage and being processed by the actions in that stage (inbound), or prevented + // from transitioning from the stage after they have been processed by the actions + // in that stage (outbound). + TransitionType *string `locationName:"transitionType" type:"string" required:"true" enum:"StageTransitionType"` +} + +// String returns the string representation +func (s DisableStageTransitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableStageTransitionInput) GoString() string { + return s.String() +} + +type DisableStageTransitionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableStageTransitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableStageTransitionOutput) GoString() string { + return s.String() +} + +// Represents the input of an enable stage transition action. +type EnableStageTransitionInput struct { + _ struct{} `type:"structure"` + + // The name of the pipeline in which you want to enable the flow of artifacts + // from one stage to another. + PipelineName *string `locationName:"pipelineName" min:"1" type:"string" required:"true"` + + // The name of the stage where you want to enable the transition of artifacts, + // either into the stage (inbound) or from that stage to the next stage (outbound). + StageName *string `locationName:"stageName" min:"1" type:"string" required:"true"` + + // Specifies whether artifacts will be allowed to enter the stage and be processed + // by the actions in that stage (inbound) or whether already-processed artifacts + // will be allowed to transition to the next stage (outbound). + TransitionType *string `locationName:"transitionType" type:"string" required:"true" enum:"StageTransitionType"` +} + +// String returns the string representation +func (s EnableStageTransitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableStageTransitionInput) GoString() string { + return s.String() +} + +type EnableStageTransitionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableStageTransitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableStageTransitionOutput) GoString() string { + return s.String() +} + +// Represents information about the AWS Key Management Service (AWS KMS) key +// used to encrypt data in the artifact store. +type EncryptionKey struct { + _ struct{} `type:"structure"` + + // The ID of the AWS KMS key. + Id *string `locationName:"id" min:"1" type:"string" required:"true"` + + // The type of AWS KMS key, such as a customer master key. + Type *string `locationName:"type" type:"string" required:"true" enum:"EncryptionKeyType"` +} + +// String returns the string representation +func (s EncryptionKey) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EncryptionKey) GoString() string { + return s.String() +} + +// Represents information about an error in AWS CodePipeline. +type ErrorDetails struct { + _ struct{} `type:"structure"` + + // The system ID or error number code of the error. + Code *string `locationName:"code" type:"string"` + + // The text of the error message. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ErrorDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorDetails) GoString() string { + return s.String() +} + +// The details of the actions taken and results produced on an artifact as it +// passes through stages in the pipeline. +type ExecutionDetails struct { + _ struct{} `type:"structure"` + + // The system-generated unique ID of this action used to identify this job worker + // in any external systems, such as AWS CodeDeploy. + ExternalExecutionId *string `locationName:"externalExecutionId" type:"string"` + + // The percentage of work completed on the action, represented on a scale of + // zero to one hundred percent. + PercentComplete *int64 `locationName:"percentComplete" type:"integer"` + + // The summary of the current status of the actions. + Summary *string `locationName:"summary" type:"string"` +} + +// String returns the string representation +func (s ExecutionDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExecutionDetails) GoString() string { + return s.String() +} + +// Represents information about failure details. +type FailureDetails struct { + _ struct{} `type:"structure"` + + // The external ID of the run of the action that failed. + ExternalExecutionId *string `locationName:"externalExecutionId" type:"string"` + + // The message about the failure. + Message *string `locationName:"message" type:"string" required:"true"` + + // The type of the failure. + Type *string `locationName:"type" type:"string" required:"true" enum:"FailureType"` +} + +// String returns the string representation +func (s FailureDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailureDetails) GoString() string { + return s.String() +} + +// Represents the input of a get job details action. +type GetJobDetailsInput struct { + _ struct{} `type:"structure"` + + // The unique system-generated ID for the job. + JobId *string `locationName:"jobId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetJobDetailsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetJobDetailsInput) GoString() string { + return s.String() +} + +// Represents the output of a get job details action. +type GetJobDetailsOutput struct { + _ struct{} `type:"structure"` + + // The details of the job. + // + // If AWSSessionCredentials is used, a long-running job can call GetJobDetails + // again to obtain new credentials. + JobDetails *JobDetails `locationName:"jobDetails" type:"structure"` +} + +// String returns the string representation +func (s GetJobDetailsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetJobDetailsOutput) GoString() string { + return s.String() +} + +// Represents the input of a get pipeline action. +type GetPipelineInput struct { + _ struct{} `type:"structure"` + + // The name of the pipeline for which you want to get information. Pipeline + // names must be unique under an Amazon Web Services (AWS) user account. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The version number of the pipeline. If you do not specify a version, defaults + // to the most current version. + Version *int64 `locationName:"version" min:"1" type:"integer"` +} + +// String returns the string representation +func (s GetPipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPipelineInput) GoString() string { + return s.String() +} + +// Represents the output of a get pipeline action. +type GetPipelineOutput struct { + _ struct{} `type:"structure"` + + // Represents the structure of actions and stages to be performed in the pipeline. + Pipeline *PipelineDeclaration `locationName:"pipeline" type:"structure"` +} + +// String returns the string representation +func (s GetPipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPipelineOutput) GoString() string { + return s.String() +} + +// Represents the input of a get pipeline state action. +type GetPipelineStateInput struct { + _ struct{} `type:"structure"` + + // The name of the pipeline about which you want to get information. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPipelineStateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPipelineStateInput) GoString() string { + return s.String() +} + +// Represents the output of a get pipeline state action. +type GetPipelineStateOutput struct { + _ struct{} `type:"structure"` + + // The date and time the pipeline was created, in timestamp format. + Created *time.Time `locationName:"created" type:"timestamp" timestampFormat:"unix"` + + // The name of the pipeline for which you want to get the state. + PipelineName *string `locationName:"pipelineName" min:"1" type:"string"` + + // The version number of the pipeline. + // + // A newly-created pipeline is always assigned a version number of 1. + PipelineVersion *int64 `locationName:"pipelineVersion" min:"1" type:"integer"` + + // A list of the pipeline stage output information, including stage name, state, + // most recent run details, whether the stage is disabled, and other data. + StageStates []*StageState `locationName:"stageStates" type:"list"` + + // The date and time the pipeline was last updated, in timestamp format. + Updated *time.Time `locationName:"updated" type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s GetPipelineStateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPipelineStateOutput) GoString() string { + return s.String() +} + +// Represents the input of a get third party job details action. +type GetThirdPartyJobDetailsInput struct { + _ struct{} `type:"structure"` + + // The clientToken portion of the clientId and clientToken pair used to verify + // that the calling entity is allowed access to the job and its details. + ClientToken *string `locationName:"clientToken" type:"string" required:"true"` + + // The unique system-generated ID used for identifying the job. + JobId *string `locationName:"jobId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetThirdPartyJobDetailsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetThirdPartyJobDetailsInput) GoString() string { + return s.String() +} + +// Represents the output of a get third party job details action. +type GetThirdPartyJobDetailsOutput struct { + _ struct{} `type:"structure"` + + // The details of the job, including any protected values defined for the job. + JobDetails *ThirdPartyJobDetails `locationName:"jobDetails" type:"structure"` +} + +// String returns the string representation +func (s GetThirdPartyJobDetailsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetThirdPartyJobDetailsOutput) GoString() string { + return s.String() +} + +// Represents information about an artifact to be worked on, such as a test +// or build artifact. +type InputArtifact struct { + _ struct{} `type:"structure"` + + // The name of the artifact to be worked on, for example, "My App". + // + // The input artifact of an action must exactly match the output artifact declared + // in a preceding action, but the input artifact does not have to be the next + // action in strict sequence from the action that provided the output artifact. + // Actions in parallel can declare different output artifacts, which are in + // turn consumed by different following actions. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s InputArtifact) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputArtifact) GoString() string { + return s.String() +} + +// Represents information about a job. +type Job struct { + _ struct{} `type:"structure"` + + // The ID of the AWS account to use when performing the job. + AccountId *string `locationName:"accountId" type:"string"` + + // Additional data about a job. + Data *JobData `locationName:"data" type:"structure"` + + // The unique system-generated ID of the job. + Id *string `locationName:"id" type:"string"` + + // A system-generated random number that AWS CodePipeline uses to ensure that + // the job is being worked on by only one job worker. This number must be returned + // in the response. + Nonce *string `locationName:"nonce" type:"string"` +} + +// String returns the string representation +func (s Job) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Job) GoString() string { + return s.String() +} + +// Represents additional information about a job required for a job worker to +// complete the job. +type JobData struct { + _ struct{} `type:"structure"` + + // Represents information about an action configuration. + ActionConfiguration *ActionConfiguration `locationName:"actionConfiguration" type:"structure"` + + // Represents information about an action type. + ActionTypeId *ActionTypeId `locationName:"actionTypeId" type:"structure"` + + // Represents an AWS session credentials object. These credentials are temporary + // credentials that are issued by AWS Secure Token Service (STS). They can be + // used to access input and output artifacts in the Amazon S3 bucket used to + // store artifact for the pipeline in AWS CodePipeline. + ArtifactCredentials *AWSSessionCredentials `locationName:"artifactCredentials" type:"structure"` + + // A system-generated token, such as a AWS CodeDeploy deployment ID, that a + // job requires in order to continue the job asynchronously. + ContinuationToken *string `locationName:"continuationToken" type:"string"` + + // Represents information about the AWS Key Management Service (AWS KMS) key + // used to encrypt data in the artifact store. + EncryptionKey *EncryptionKey `locationName:"encryptionKey" type:"structure"` + + // The artifact supplied to the job. + InputArtifacts []*Artifact `locationName:"inputArtifacts" type:"list"` + + // The output of the job. + OutputArtifacts []*Artifact `locationName:"outputArtifacts" type:"list"` + + // Represents information about a pipeline to a job worker. + PipelineContext *PipelineContext `locationName:"pipelineContext" type:"structure"` +} + +// String returns the string representation +func (s JobData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobData) GoString() string { + return s.String() +} + +// Represents information about the details of a job. +type JobDetails struct { + _ struct{} `type:"structure"` + + // The AWS account ID associated with the job. + AccountId *string `locationName:"accountId" type:"string"` + + // Represents additional information about a job required for a job worker to + // complete the job. + Data *JobData `locationName:"data" type:"structure"` + + // The unique system-generated ID of the job. + Id *string `locationName:"id" type:"string"` +} + +// String returns the string representation +func (s JobDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobDetails) GoString() string { + return s.String() +} + +// Represents the input of a list action types action. +type ListActionTypesInput struct { + _ struct{} `type:"structure"` + + // Filters the list of action types to those created by a specified entity. + ActionOwnerFilter *string `locationName:"actionOwnerFilter" type:"string" enum:"ActionOwner"` + + // An identifier that was returned from the previous list action types call, + // which can be used to return the next set of action types in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListActionTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListActionTypesInput) GoString() string { + return s.String() +} + +// Represents the output of a list action types action. +type ListActionTypesOutput struct { + _ struct{} `type:"structure"` + + // Provides details of the action types. + ActionTypes []*ActionType `locationName:"actionTypes" type:"list" required:"true"` + + // If the amount of returned information is significantly large, an identifier + // is also returned which can be used in a subsequent list action types call + // to return the next set of action types in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListActionTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListActionTypesOutput) GoString() string { + return s.String() +} + +// Represents the input of a list pipelines action. +type ListPipelinesInput struct { + _ struct{} `type:"structure"` + + // An identifier that was returned from the previous list pipelines call, which + // can be used to return the next set of pipelines in the list. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListPipelinesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPipelinesInput) GoString() string { + return s.String() +} + +// Represents the output of a list pipelines action. +type ListPipelinesOutput struct { + _ struct{} `type:"structure"` + + // If the amount of returned information is significantly large, an identifier + // is also returned which can be used in a subsequent list pipelines call to + // return the next set of pipelines in the list. + NextToken *string `locationName:"nextToken" type:"string"` + + // The list of pipelines. + Pipelines []*PipelineSummary `locationName:"pipelines" type:"list"` +} + +// String returns the string representation +func (s ListPipelinesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPipelinesOutput) GoString() string { + return s.String() +} + +// Represents information about the output of an action. +type OutputArtifact struct { + _ struct{} `type:"structure"` + + // The name of the output of an artifact, such as "My App". + // + // The input artifact of an action must exactly match the output artifact declared + // in a preceding action, but the input artifact does not have to be the next + // action in strict sequence from the action that provided the output artifact. + // Actions in parallel can declare different output artifacts, which are in + // turn consumed by different following actions. + // + // Output artifact names must be unique within a pipeline. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s OutputArtifact) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OutputArtifact) GoString() string { + return s.String() +} + +// Represents information about a pipeline to a job worker. +type PipelineContext struct { + _ struct{} `type:"structure"` + + // Represents the context of an action within the stage of a pipeline to a job + // worker. + Action *ActionContext `locationName:"action" type:"structure"` + + // The name of the pipeline. This is a user-specified value. Pipeline names + // must be unique across all pipeline names under an Amazon Web Services account. + PipelineName *string `locationName:"pipelineName" min:"1" type:"string"` + + // The stage of the pipeline. + Stage *StageContext `locationName:"stage" type:"structure"` +} + +// String returns the string representation +func (s PipelineContext) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PipelineContext) GoString() string { + return s.String() +} + +// Represents the structure of actions and stages to be performed in the pipeline. +type PipelineDeclaration struct { + _ struct{} `type:"structure"` + + // The Amazon S3 location where artifacts are stored for the pipeline. If this + // Amazon S3 bucket is created manually, it must meet the requirements for AWS + // CodePipeline. For more information, see the Concepts. + ArtifactStore *ArtifactStore `locationName:"artifactStore" type:"structure" required:"true"` + + // The name of the action to be performed. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) for AWS CodePipeline to use to either perform + // actions with no actionRoleArn, or to use to assume roles for actions with + // an actionRoleArn. + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` + + // The stage in which to perform the action. + Stages []*StageDeclaration `locationName:"stages" type:"list" required:"true"` + + // The version number of the pipeline. A new pipeline always has a version number + // of 1. This number is automatically incremented when a pipeline is updated. + Version *int64 `locationName:"version" min:"1" type:"integer"` +} + +// String returns the string representation +func (s PipelineDeclaration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PipelineDeclaration) GoString() string { + return s.String() +} + +// Returns a summary of a pipeline. +type PipelineSummary struct { + _ struct{} `type:"structure"` + + // The date and time the pipeline was created, in timestamp format. + Created *time.Time `locationName:"created" type:"timestamp" timestampFormat:"unix"` + + // The name of the pipeline. + Name *string `locationName:"name" min:"1" type:"string"` + + // The date and time of the last update to the pipeline, in timestamp format. + Updated *time.Time `locationName:"updated" type:"timestamp" timestampFormat:"unix"` + + // The version number of the pipeline. + Version *int64 `locationName:"version" min:"1" type:"integer"` +} + +// String returns the string representation +func (s PipelineSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PipelineSummary) GoString() string { + return s.String() +} + +// Represents the input of a poll for jobs action. +type PollForJobsInput struct { + _ struct{} `type:"structure"` + + // Represents information about an action type. + ActionTypeId *ActionTypeId `locationName:"actionTypeId" type:"structure" required:"true"` + + // The maximum number of jobs to return in a poll for jobs call. + MaxBatchSize *int64 `locationName:"maxBatchSize" min:"1" type:"integer"` + + // A map of property names and values. For an action type with no queryable + // properties, this value must be null or an empty map. For an action type with + // a queryable property, you must supply that property as a key in the map. + // Only jobs whose action configuration matches the mapped value will be returned. + QueryParam map[string]*string `locationName:"queryParam" type:"map"` +} + +// String returns the string representation +func (s PollForJobsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PollForJobsInput) GoString() string { + return s.String() +} + +// Represents the output of a poll for jobs action. +type PollForJobsOutput struct { + _ struct{} `type:"structure"` + + // Information about the jobs to take action on. + Jobs []*Job `locationName:"jobs" type:"list"` +} + +// String returns the string representation +func (s PollForJobsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PollForJobsOutput) GoString() string { + return s.String() +} + +// Represents the input of a poll for third party jobs action. +type PollForThirdPartyJobsInput struct { + _ struct{} `type:"structure"` + + // Represents information about an action type. + ActionTypeId *ActionTypeId `locationName:"actionTypeId" type:"structure" required:"true"` + + // The maximum number of jobs to return in a poll for jobs call. + MaxBatchSize *int64 `locationName:"maxBatchSize" min:"1" type:"integer"` +} + +// String returns the string representation +func (s PollForThirdPartyJobsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PollForThirdPartyJobsInput) GoString() string { + return s.String() +} + +// Represents the output of a poll for third party jobs action. +type PollForThirdPartyJobsOutput struct { + _ struct{} `type:"structure"` + + // Information about the jobs to take action on. + Jobs []*ThirdPartyJob `locationName:"jobs" type:"list"` +} + +// String returns the string representation +func (s PollForThirdPartyJobsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PollForThirdPartyJobsOutput) GoString() string { + return s.String() +} + +// Represents the input of a put action revision action. +type PutActionRevisionInput struct { + _ struct{} `type:"structure"` + + // The name of the action that will process the revision. + ActionName *string `locationName:"actionName" min:"1" type:"string" required:"true"` + + // Represents information about the version (or revision) of an action. + ActionRevision *ActionRevision `locationName:"actionRevision" type:"structure" required:"true"` + + // The name of the pipeline that will start processing the revision to the source. + PipelineName *string `locationName:"pipelineName" min:"1" type:"string" required:"true"` + + // The name of the stage that contains the action that will act upon the revision. + StageName *string `locationName:"stageName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutActionRevisionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutActionRevisionInput) GoString() string { + return s.String() +} + +// Represents the output of a put action revision action. +type PutActionRevisionOutput struct { + _ struct{} `type:"structure"` + + // The new revision number or ID for the revision after the action completes. + NewRevision *bool `locationName:"newRevision" type:"boolean"` + + // The ID of the current workflow state of the pipeline. + PipelineExecutionId *string `locationName:"pipelineExecutionId" type:"string"` +} + +// String returns the string representation +func (s PutActionRevisionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutActionRevisionOutput) GoString() string { + return s.String() +} + +// Represents the input of a put job failure result action. +type PutJobFailureResultInput struct { + _ struct{} `type:"structure"` + + // The details about the failure of a job. + FailureDetails *FailureDetails `locationName:"failureDetails" type:"structure" required:"true"` + + // The unique system-generated ID of the job that failed. This is the same ID + // returned from PollForJobs. + JobId *string `locationName:"jobId" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutJobFailureResultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutJobFailureResultInput) GoString() string { + return s.String() +} + +type PutJobFailureResultOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutJobFailureResultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutJobFailureResultOutput) GoString() string { + return s.String() +} + +// Represents the input of a put job success result action. +type PutJobSuccessResultInput struct { + _ struct{} `type:"structure"` + + // A system-generated token, such as a AWS CodeDeploy deployment ID, that the + // successful job used to complete a job asynchronously. + ContinuationToken *string `locationName:"continuationToken" type:"string"` + + // The ID of the current revision of the artifact successfully worked upon by + // the job. + CurrentRevision *CurrentRevision `locationName:"currentRevision" type:"structure"` + + // The execution details of the successful job, such as the actions taken by + // the job worker. + ExecutionDetails *ExecutionDetails `locationName:"executionDetails" type:"structure"` + + // The unique system-generated ID of the job that succeeded. This is the same + // ID returned from PollForJobs. + JobId *string `locationName:"jobId" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutJobSuccessResultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutJobSuccessResultInput) GoString() string { + return s.String() +} + +type PutJobSuccessResultOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutJobSuccessResultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutJobSuccessResultOutput) GoString() string { + return s.String() +} + +// Represents the input of a third party job failure result action. +type PutThirdPartyJobFailureResultInput struct { + _ struct{} `type:"structure"` + + // The clientToken portion of the clientId and clientToken pair used to verify + // that the calling entity is allowed access to the job and its details. + ClientToken *string `locationName:"clientToken" type:"string" required:"true"` + + // Represents information about failure details. + FailureDetails *FailureDetails `locationName:"failureDetails" type:"structure" required:"true"` + + // The ID of the job that failed. This is the same ID returned from PollForThirdPartyJobs. + JobId *string `locationName:"jobId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutThirdPartyJobFailureResultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutThirdPartyJobFailureResultInput) GoString() string { + return s.String() +} + +type PutThirdPartyJobFailureResultOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutThirdPartyJobFailureResultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutThirdPartyJobFailureResultOutput) GoString() string { + return s.String() +} + +// Represents the input of a put third party job success result action. +type PutThirdPartyJobSuccessResultInput struct { + _ struct{} `type:"structure"` + + // The clientToken portion of the clientId and clientToken pair used to verify + // that the calling entity is allowed access to the job and its details. + ClientToken *string `locationName:"clientToken" type:"string" required:"true"` + + // A system-generated token, such as a AWS CodeDeploy deployment ID, that a + // job uses in order to continue the job asynchronously. + ContinuationToken *string `locationName:"continuationToken" type:"string"` + + // Represents information about a current revision. + CurrentRevision *CurrentRevision `locationName:"currentRevision" type:"structure"` + + // The details of the actions taken and results produced on an artifact as it + // passes through stages in the pipeline. + ExecutionDetails *ExecutionDetails `locationName:"executionDetails" type:"structure"` + + // The ID of the job that successfully completed. This is the same ID returned + // from PollForThirdPartyJobs. + JobId *string `locationName:"jobId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutThirdPartyJobSuccessResultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutThirdPartyJobSuccessResultInput) GoString() string { + return s.String() +} + +type PutThirdPartyJobSuccessResultOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutThirdPartyJobSuccessResultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutThirdPartyJobSuccessResultOutput) GoString() string { + return s.String() +} + +// The location of the Amazon S3 bucket that contains a revision. +type S3ArtifactLocation struct { + _ struct{} `type:"structure"` + + // The name of the Amazon S3 bucket. + BucketName *string `locationName:"bucketName" type:"string" required:"true"` + + // The key of the object in the Amazon S3 bucket, which uniquely identifies + // the object in the bucket. + ObjectKey *string `locationName:"objectKey" type:"string" required:"true"` +} + +// String returns the string representation +func (s S3ArtifactLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3ArtifactLocation) GoString() string { + return s.String() +} + +// Represents information about a stage to a job worker. +type StageContext struct { + _ struct{} `type:"structure"` + + // The name of the stage. + Name *string `locationName:"name" min:"1" type:"string"` +} + +// String returns the string representation +func (s StageContext) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StageContext) GoString() string { + return s.String() +} + +// Represents information about a stage and its definition. +type StageDeclaration struct { + _ struct{} `type:"structure"` + + // The actions included in a stage. + Actions []*ActionDeclaration `locationName:"actions" type:"list" required:"true"` + + // The gates included in a stage. + Blockers []*BlockerDeclaration `locationName:"blockers" type:"list"` + + // The name of the stage. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StageDeclaration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StageDeclaration) GoString() string { + return s.String() +} + +// Represents information about the state of the stage. +type StageState struct { + _ struct{} `type:"structure"` + + // The state of the stage. + ActionStates []*ActionState `locationName:"actionStates" type:"list"` + + // The state of the inbound transition, which is either enabled or disabled. + InboundTransitionState *TransitionState `locationName:"inboundTransitionState" type:"structure"` + + // The name of the stage. + StageName *string `locationName:"stageName" min:"1" type:"string"` +} + +// String returns the string representation +func (s StageState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StageState) GoString() string { + return s.String() +} + +// Represents the input of a start pipeline execution action. +type StartPipelineExecutionInput struct { + _ struct{} `type:"structure"` + + // The name of the pipeline to start. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartPipelineExecutionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartPipelineExecutionInput) GoString() string { + return s.String() +} + +// Represents the output of a start pipeline execution action. +type StartPipelineExecutionOutput struct { + _ struct{} `type:"structure"` + + // The unique system-generated ID of the pipeline that was started. + PipelineExecutionId *string `locationName:"pipelineExecutionId" type:"string"` +} + +// String returns the string representation +func (s StartPipelineExecutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartPipelineExecutionOutput) GoString() string { + return s.String() +} + +// A response to a PollForThirdPartyJobs request returned by AWS CodePipeline +// when there is a job to be worked upon by a partner action. +type ThirdPartyJob struct { + _ struct{} `type:"structure"` + + // The clientToken portion of the clientId and clientToken pair used to verify + // that the calling entity is allowed access to the job and its details. + ClientId *string `locationName:"clientId" type:"string"` + + // The identifier used to identify the job in AWS CodePipeline. + JobId *string `locationName:"jobId" type:"string"` +} + +// String returns the string representation +func (s ThirdPartyJob) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ThirdPartyJob) GoString() string { + return s.String() +} + +// Represents information about the job data for a partner action. +type ThirdPartyJobData struct { + _ struct{} `type:"structure"` + + // Represents information about an action configuration. + ActionConfiguration *ActionConfiguration `locationName:"actionConfiguration" type:"structure"` + + // Represents information about an action type. + ActionTypeId *ActionTypeId `locationName:"actionTypeId" type:"structure"` + + // Represents an AWS session credentials object. These credentials are temporary + // credentials that are issued by AWS Secure Token Service (STS). They can be + // used to access input and output artifacts in the Amazon S3 bucket used to + // store artifact for the pipeline in AWS CodePipeline. + ArtifactCredentials *AWSSessionCredentials `locationName:"artifactCredentials" type:"structure"` + + // A system-generated token, such as a AWS CodeDeploy deployment ID, that a + // job requires in order to continue the job asynchronously. + ContinuationToken *string `locationName:"continuationToken" type:"string"` + + // The AWS Key Management Service (AWS KMS) key used to encrypt and decrypt + // data in the artifact store for the pipeline. + EncryptionKey *EncryptionKey `locationName:"encryptionKey" type:"structure"` + + // The name of the artifact that will be worked upon by the action, if any. + // This name might be system-generated, such as "MyApp", or might be defined + // by the user when the action is created. The input artifact name must match + // the name of an output artifact generated by an action in an earlier action + // or stage of the pipeline. + InputArtifacts []*Artifact `locationName:"inputArtifacts" type:"list"` + + // The name of the artifact that will be the result of the action, if any. This + // name might be system-generated, such as "MyBuiltApp", or might be defined + // by the user when the action is created. + OutputArtifacts []*Artifact `locationName:"outputArtifacts" type:"list"` + + // Represents information about a pipeline to a job worker. + PipelineContext *PipelineContext `locationName:"pipelineContext" type:"structure"` +} + +// String returns the string representation +func (s ThirdPartyJobData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ThirdPartyJobData) GoString() string { + return s.String() +} + +// The details of a job sent in response to a GetThirdPartyJobDetails request. +type ThirdPartyJobDetails struct { + _ struct{} `type:"structure"` + + // The data to be returned by the third party job worker. + Data *ThirdPartyJobData `locationName:"data" type:"structure"` + + // The identifier used to identify the job details in AWS CodePipeline. + Id *string `locationName:"id" min:"1" type:"string"` + + // A system-generated random number that AWS CodePipeline uses to ensure that + // the job is being worked on by only one job worker. This number must be returned + // in the response. + Nonce *string `locationName:"nonce" type:"string"` +} + +// String returns the string representation +func (s ThirdPartyJobDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ThirdPartyJobDetails) GoString() string { + return s.String() +} + +// Represents information about the state of transitions between one stage and +// another stage. +type TransitionState struct { + _ struct{} `type:"structure"` + + // The user-specified reason why the transition between two stages of a pipeline + // was disabled. + DisabledReason *string `locationName:"disabledReason" min:"1" type:"string"` + + // Whether the transition between stages is enabled (true) or disabled (false). + Enabled *bool `locationName:"enabled" type:"boolean"` + + // The timestamp when the transition state was last changed. + LastChangedAt *time.Time `locationName:"lastChangedAt" type:"timestamp" timestampFormat:"unix"` + + // The ID of the user who last changed the transition state. + LastChangedBy *string `locationName:"lastChangedBy" type:"string"` +} + +// String returns the string representation +func (s TransitionState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitionState) GoString() string { + return s.String() +} + +// Represents the input of an update pipeline action. +type UpdatePipelineInput struct { + _ struct{} `type:"structure"` + + // The name of the pipeline to be updated. + Pipeline *PipelineDeclaration `locationName:"pipeline" type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdatePipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdatePipelineInput) GoString() string { + return s.String() +} + +// Represents the output of an update pipeline action. +type UpdatePipelineOutput struct { + _ struct{} `type:"structure"` + + // The structure of the updated pipeline. + Pipeline *PipelineDeclaration `locationName:"pipeline" type:"structure"` +} + +// String returns the string representation +func (s UpdatePipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdatePipelineOutput) GoString() string { + return s.String() +} + +const ( + // @enum ActionCategory + ActionCategorySource = "Source" + // @enum ActionCategory + ActionCategoryBuild = "Build" + // @enum ActionCategory + ActionCategoryDeploy = "Deploy" + // @enum ActionCategory + ActionCategoryTest = "Test" + // @enum ActionCategory + ActionCategoryInvoke = "Invoke" +) + +const ( + // @enum ActionConfigurationPropertyType + ActionConfigurationPropertyTypeString = "String" + // @enum ActionConfigurationPropertyType + ActionConfigurationPropertyTypeNumber = "Number" + // @enum ActionConfigurationPropertyType + ActionConfigurationPropertyTypeBoolean = "Boolean" +) + +const ( + // @enum ActionExecutionStatus + ActionExecutionStatusInProgress = "InProgress" + // @enum ActionExecutionStatus + ActionExecutionStatusSucceeded = "Succeeded" + // @enum ActionExecutionStatus + ActionExecutionStatusFailed = "Failed" +) + +const ( + // @enum ActionOwner + ActionOwnerAws = "AWS" + // @enum ActionOwner + ActionOwnerThirdParty = "ThirdParty" + // @enum ActionOwner + ActionOwnerCustom = "Custom" +) + +const ( + // @enum ArtifactLocationType + ArtifactLocationTypeS3 = "S3" +) + +const ( + // @enum ArtifactStoreType + ArtifactStoreTypeS3 = "S3" +) + +const ( + // @enum BlockerType + BlockerTypeSchedule = "Schedule" +) + +const ( + // @enum EncryptionKeyType + EncryptionKeyTypeKms = "KMS" +) + +const ( + // @enum FailureType + FailureTypeJobFailed = "JobFailed" + // @enum FailureType + FailureTypeConfigurationError = "ConfigurationError" + // @enum FailureType + FailureTypePermissionError = "PermissionError" + // @enum FailureType + FailureTypeRevisionOutOfSync = "RevisionOutOfSync" + // @enum FailureType + FailureTypeRevisionUnavailable = "RevisionUnavailable" + // @enum FailureType + FailureTypeSystemUnavailable = "SystemUnavailable" +) + +const ( + // @enum JobStatus + JobStatusCreated = "Created" + // @enum JobStatus + JobStatusQueued = "Queued" + // @enum JobStatus + JobStatusDispatched = "Dispatched" + // @enum JobStatus + JobStatusInProgress = "InProgress" + // @enum JobStatus + JobStatusTimedOut = "TimedOut" + // @enum JobStatus + JobStatusSucceeded = "Succeeded" + // @enum JobStatus + JobStatusFailed = "Failed" +) + +const ( + // @enum StageTransitionType + StageTransitionTypeInbound = "Inbound" + // @enum StageTransitionType + StageTransitionTypeOutbound = "Outbound" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codepipeline/codepipelineiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codepipeline/codepipelineiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codepipeline/codepipelineiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codepipeline/codepipelineiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,106 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package codepipelineiface provides an interface for the AWS CodePipeline. +package codepipelineiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/codepipeline" +) + +// CodePipelineAPI is the interface type for codepipeline.CodePipeline. +type CodePipelineAPI interface { + AcknowledgeJobRequest(*codepipeline.AcknowledgeJobInput) (*request.Request, *codepipeline.AcknowledgeJobOutput) + + AcknowledgeJob(*codepipeline.AcknowledgeJobInput) (*codepipeline.AcknowledgeJobOutput, error) + + AcknowledgeThirdPartyJobRequest(*codepipeline.AcknowledgeThirdPartyJobInput) (*request.Request, *codepipeline.AcknowledgeThirdPartyJobOutput) + + AcknowledgeThirdPartyJob(*codepipeline.AcknowledgeThirdPartyJobInput) (*codepipeline.AcknowledgeThirdPartyJobOutput, error) + + CreateCustomActionTypeRequest(*codepipeline.CreateCustomActionTypeInput) (*request.Request, *codepipeline.CreateCustomActionTypeOutput) + + CreateCustomActionType(*codepipeline.CreateCustomActionTypeInput) (*codepipeline.CreateCustomActionTypeOutput, error) + + CreatePipelineRequest(*codepipeline.CreatePipelineInput) (*request.Request, *codepipeline.CreatePipelineOutput) + + CreatePipeline(*codepipeline.CreatePipelineInput) (*codepipeline.CreatePipelineOutput, error) + + DeleteCustomActionTypeRequest(*codepipeline.DeleteCustomActionTypeInput) (*request.Request, *codepipeline.DeleteCustomActionTypeOutput) + + DeleteCustomActionType(*codepipeline.DeleteCustomActionTypeInput) (*codepipeline.DeleteCustomActionTypeOutput, error) + + DeletePipelineRequest(*codepipeline.DeletePipelineInput) (*request.Request, *codepipeline.DeletePipelineOutput) + + DeletePipeline(*codepipeline.DeletePipelineInput) (*codepipeline.DeletePipelineOutput, error) + + DisableStageTransitionRequest(*codepipeline.DisableStageTransitionInput) (*request.Request, *codepipeline.DisableStageTransitionOutput) + + DisableStageTransition(*codepipeline.DisableStageTransitionInput) (*codepipeline.DisableStageTransitionOutput, error) + + EnableStageTransitionRequest(*codepipeline.EnableStageTransitionInput) (*request.Request, *codepipeline.EnableStageTransitionOutput) + + EnableStageTransition(*codepipeline.EnableStageTransitionInput) (*codepipeline.EnableStageTransitionOutput, error) + + GetJobDetailsRequest(*codepipeline.GetJobDetailsInput) (*request.Request, *codepipeline.GetJobDetailsOutput) + + GetJobDetails(*codepipeline.GetJobDetailsInput) (*codepipeline.GetJobDetailsOutput, error) + + GetPipelineRequest(*codepipeline.GetPipelineInput) (*request.Request, *codepipeline.GetPipelineOutput) + + GetPipeline(*codepipeline.GetPipelineInput) (*codepipeline.GetPipelineOutput, error) + + GetPipelineStateRequest(*codepipeline.GetPipelineStateInput) (*request.Request, *codepipeline.GetPipelineStateOutput) + + GetPipelineState(*codepipeline.GetPipelineStateInput) (*codepipeline.GetPipelineStateOutput, error) + + GetThirdPartyJobDetailsRequest(*codepipeline.GetThirdPartyJobDetailsInput) (*request.Request, *codepipeline.GetThirdPartyJobDetailsOutput) + + GetThirdPartyJobDetails(*codepipeline.GetThirdPartyJobDetailsInput) (*codepipeline.GetThirdPartyJobDetailsOutput, error) + + ListActionTypesRequest(*codepipeline.ListActionTypesInput) (*request.Request, *codepipeline.ListActionTypesOutput) + + ListActionTypes(*codepipeline.ListActionTypesInput) (*codepipeline.ListActionTypesOutput, error) + + ListPipelinesRequest(*codepipeline.ListPipelinesInput) (*request.Request, *codepipeline.ListPipelinesOutput) + + ListPipelines(*codepipeline.ListPipelinesInput) (*codepipeline.ListPipelinesOutput, error) + + PollForJobsRequest(*codepipeline.PollForJobsInput) (*request.Request, *codepipeline.PollForJobsOutput) + + PollForJobs(*codepipeline.PollForJobsInput) (*codepipeline.PollForJobsOutput, error) + + PollForThirdPartyJobsRequest(*codepipeline.PollForThirdPartyJobsInput) (*request.Request, *codepipeline.PollForThirdPartyJobsOutput) + + PollForThirdPartyJobs(*codepipeline.PollForThirdPartyJobsInput) (*codepipeline.PollForThirdPartyJobsOutput, error) + + PutActionRevisionRequest(*codepipeline.PutActionRevisionInput) (*request.Request, *codepipeline.PutActionRevisionOutput) + + PutActionRevision(*codepipeline.PutActionRevisionInput) (*codepipeline.PutActionRevisionOutput, error) + + PutJobFailureResultRequest(*codepipeline.PutJobFailureResultInput) (*request.Request, *codepipeline.PutJobFailureResultOutput) + + PutJobFailureResult(*codepipeline.PutJobFailureResultInput) (*codepipeline.PutJobFailureResultOutput, error) + + PutJobSuccessResultRequest(*codepipeline.PutJobSuccessResultInput) (*request.Request, *codepipeline.PutJobSuccessResultOutput) + + PutJobSuccessResult(*codepipeline.PutJobSuccessResultInput) (*codepipeline.PutJobSuccessResultOutput, error) + + PutThirdPartyJobFailureResultRequest(*codepipeline.PutThirdPartyJobFailureResultInput) (*request.Request, *codepipeline.PutThirdPartyJobFailureResultOutput) + + PutThirdPartyJobFailureResult(*codepipeline.PutThirdPartyJobFailureResultInput) (*codepipeline.PutThirdPartyJobFailureResultOutput, error) + + PutThirdPartyJobSuccessResultRequest(*codepipeline.PutThirdPartyJobSuccessResultInput) (*request.Request, *codepipeline.PutThirdPartyJobSuccessResultOutput) + + PutThirdPartyJobSuccessResult(*codepipeline.PutThirdPartyJobSuccessResultInput) (*codepipeline.PutThirdPartyJobSuccessResultOutput, error) + + StartPipelineExecutionRequest(*codepipeline.StartPipelineExecutionInput) (*request.Request, *codepipeline.StartPipelineExecutionOutput) + + StartPipelineExecution(*codepipeline.StartPipelineExecutionInput) (*codepipeline.StartPipelineExecutionOutput, error) + + UpdatePipelineRequest(*codepipeline.UpdatePipelineInput) (*request.Request, *codepipeline.UpdatePipelineOutput) + + UpdatePipeline(*codepipeline.UpdatePipelineInput) (*codepipeline.UpdatePipelineOutput, error) +} + +var _ CodePipelineAPI = (*codepipeline.CodePipeline)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codepipeline/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codepipeline/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codepipeline/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codepipeline/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,659 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package codepipeline_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/codepipeline" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCodePipeline_AcknowledgeJob() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.AcknowledgeJobInput{ + JobId: aws.String("JobId"), // Required + Nonce: aws.String("Nonce"), // Required + } + resp, err := svc.AcknowledgeJob(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_AcknowledgeThirdPartyJob() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.AcknowledgeThirdPartyJobInput{ + ClientToken: aws.String("ClientToken"), // Required + JobId: aws.String("ThirdPartyJobId"), // Required + Nonce: aws.String("Nonce"), // Required + } + resp, err := svc.AcknowledgeThirdPartyJob(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_CreateCustomActionType() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.CreateCustomActionTypeInput{ + Category: aws.String("ActionCategory"), // Required + InputArtifactDetails: &codepipeline.ArtifactDetails{ // Required + MaximumCount: aws.Int64(1), // Required + MinimumCount: aws.Int64(1), // Required + }, + OutputArtifactDetails: &codepipeline.ArtifactDetails{ // Required + MaximumCount: aws.Int64(1), // Required + MinimumCount: aws.Int64(1), // Required + }, + Provider: aws.String("ActionProvider"), // Required + Version: aws.String("Version"), // Required + ConfigurationProperties: []*codepipeline.ActionConfigurationProperty{ + { // Required + Key: aws.Bool(true), // Required + Name: aws.String("ActionConfigurationKey"), // Required + Required: aws.Bool(true), // Required + Secret: aws.Bool(true), // Required + Description: aws.String("Description"), + Queryable: aws.Bool(true), + Type: aws.String("ActionConfigurationPropertyType"), + }, + // More values... + }, + Settings: &codepipeline.ActionTypeSettings{ + EntityUrlTemplate: aws.String("UrlTemplate"), + ExecutionUrlTemplate: aws.String("UrlTemplate"), + RevisionUrlTemplate: aws.String("UrlTemplate"), + ThirdPartyConfigurationUrl: aws.String("Url"), + }, + } + resp, err := svc.CreateCustomActionType(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_CreatePipeline() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.CreatePipelineInput{ + Pipeline: &codepipeline.PipelineDeclaration{ // Required + ArtifactStore: &codepipeline.ArtifactStore{ // Required + Location: aws.String("ArtifactStoreLocation"), // Required + Type: aws.String("ArtifactStoreType"), // Required + EncryptionKey: &codepipeline.EncryptionKey{ + Id: aws.String("EncryptionKeyId"), // Required + Type: aws.String("EncryptionKeyType"), // Required + }, + }, + Name: aws.String("PipelineName"), // Required + RoleArn: aws.String("RoleArn"), // Required + Stages: []*codepipeline.StageDeclaration{ // Required + { // Required + Actions: []*codepipeline.ActionDeclaration{ // Required + { // Required + ActionTypeId: &codepipeline.ActionTypeId{ // Required + Category: aws.String("ActionCategory"), // Required + Owner: aws.String("ActionOwner"), // Required + Provider: aws.String("ActionProvider"), // Required + Version: aws.String("Version"), // Required + }, + Name: aws.String("ActionName"), // Required + Configuration: map[string]*string{ + "Key": aws.String("ActionConfigurationValue"), // Required + // More values... + }, + InputArtifacts: []*codepipeline.InputArtifact{ + { // Required + Name: aws.String("ArtifactName"), // Required + }, + // More values... + }, + OutputArtifacts: []*codepipeline.OutputArtifact{ + { // Required + Name: aws.String("ArtifactName"), // Required + }, + // More values... + }, + RoleArn: aws.String("RoleArn"), + RunOrder: aws.Int64(1), + }, + // More values... + }, + Name: aws.String("StageName"), // Required + Blockers: []*codepipeline.BlockerDeclaration{ + { // Required + Name: aws.String("BlockerName"), // Required + Type: aws.String("BlockerType"), // Required + }, + // More values... + }, + }, + // More values... + }, + Version: aws.Int64(1), + }, + } + resp, err := svc.CreatePipeline(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_DeleteCustomActionType() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.DeleteCustomActionTypeInput{ + Category: aws.String("ActionCategory"), // Required + Provider: aws.String("ActionProvider"), // Required + Version: aws.String("Version"), // Required + } + resp, err := svc.DeleteCustomActionType(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_DeletePipeline() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.DeletePipelineInput{ + Name: aws.String("PipelineName"), // Required + } + resp, err := svc.DeletePipeline(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_DisableStageTransition() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.DisableStageTransitionInput{ + PipelineName: aws.String("PipelineName"), // Required + Reason: aws.String("DisabledReason"), // Required + StageName: aws.String("StageName"), // Required + TransitionType: aws.String("StageTransitionType"), // Required + } + resp, err := svc.DisableStageTransition(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_EnableStageTransition() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.EnableStageTransitionInput{ + PipelineName: aws.String("PipelineName"), // Required + StageName: aws.String("StageName"), // Required + TransitionType: aws.String("StageTransitionType"), // Required + } + resp, err := svc.EnableStageTransition(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_GetJobDetails() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.GetJobDetailsInput{ + JobId: aws.String("JobId"), // Required + } + resp, err := svc.GetJobDetails(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_GetPipeline() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.GetPipelineInput{ + Name: aws.String("PipelineName"), // Required + Version: aws.Int64(1), + } + resp, err := svc.GetPipeline(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_GetPipelineState() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.GetPipelineStateInput{ + Name: aws.String("PipelineName"), // Required + } + resp, err := svc.GetPipelineState(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_GetThirdPartyJobDetails() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.GetThirdPartyJobDetailsInput{ + ClientToken: aws.String("ClientToken"), // Required + JobId: aws.String("ThirdPartyJobId"), // Required + } + resp, err := svc.GetThirdPartyJobDetails(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_ListActionTypes() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.ListActionTypesInput{ + ActionOwnerFilter: aws.String("ActionOwner"), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListActionTypes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_ListPipelines() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.ListPipelinesInput{ + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListPipelines(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_PollForJobs() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.PollForJobsInput{ + ActionTypeId: &codepipeline.ActionTypeId{ // Required + Category: aws.String("ActionCategory"), // Required + Owner: aws.String("ActionOwner"), // Required + Provider: aws.String("ActionProvider"), // Required + Version: aws.String("Version"), // Required + }, + MaxBatchSize: aws.Int64(1), + QueryParam: map[string]*string{ + "Key": aws.String("ActionConfigurationQueryableValue"), // Required + // More values... + }, + } + resp, err := svc.PollForJobs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_PollForThirdPartyJobs() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.PollForThirdPartyJobsInput{ + ActionTypeId: &codepipeline.ActionTypeId{ // Required + Category: aws.String("ActionCategory"), // Required + Owner: aws.String("ActionOwner"), // Required + Provider: aws.String("ActionProvider"), // Required + Version: aws.String("Version"), // Required + }, + MaxBatchSize: aws.Int64(1), + } + resp, err := svc.PollForThirdPartyJobs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_PutActionRevision() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.PutActionRevisionInput{ + ActionName: aws.String("ActionName"), // Required + ActionRevision: &codepipeline.ActionRevision{ // Required + Created: aws.Time(time.Now()), // Required + RevisionId: aws.String("RevisionId"), // Required + RevisionChangeId: aws.String("RevisionChangeId"), + }, + PipelineName: aws.String("PipelineName"), // Required + StageName: aws.String("StageName"), // Required + } + resp, err := svc.PutActionRevision(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_PutJobFailureResult() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.PutJobFailureResultInput{ + FailureDetails: &codepipeline.FailureDetails{ // Required + Message: aws.String("Message"), // Required + Type: aws.String("FailureType"), // Required + ExternalExecutionId: aws.String("ExecutionId"), + }, + JobId: aws.String("JobId"), // Required + } + resp, err := svc.PutJobFailureResult(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_PutJobSuccessResult() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.PutJobSuccessResultInput{ + JobId: aws.String("JobId"), // Required + ContinuationToken: aws.String("ContinuationToken"), + CurrentRevision: &codepipeline.CurrentRevision{ + ChangeIdentifier: aws.String("RevisionChangeIdentifier"), // Required + Revision: aws.String("Revision"), // Required + }, + ExecutionDetails: &codepipeline.ExecutionDetails{ + ExternalExecutionId: aws.String("ExecutionId"), + PercentComplete: aws.Int64(1), + Summary: aws.String("ExecutionSummary"), + }, + } + resp, err := svc.PutJobSuccessResult(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_PutThirdPartyJobFailureResult() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.PutThirdPartyJobFailureResultInput{ + ClientToken: aws.String("ClientToken"), // Required + FailureDetails: &codepipeline.FailureDetails{ // Required + Message: aws.String("Message"), // Required + Type: aws.String("FailureType"), // Required + ExternalExecutionId: aws.String("ExecutionId"), + }, + JobId: aws.String("ThirdPartyJobId"), // Required + } + resp, err := svc.PutThirdPartyJobFailureResult(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_PutThirdPartyJobSuccessResult() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.PutThirdPartyJobSuccessResultInput{ + ClientToken: aws.String("ClientToken"), // Required + JobId: aws.String("ThirdPartyJobId"), // Required + ContinuationToken: aws.String("ContinuationToken"), + CurrentRevision: &codepipeline.CurrentRevision{ + ChangeIdentifier: aws.String("RevisionChangeIdentifier"), // Required + Revision: aws.String("Revision"), // Required + }, + ExecutionDetails: &codepipeline.ExecutionDetails{ + ExternalExecutionId: aws.String("ExecutionId"), + PercentComplete: aws.Int64(1), + Summary: aws.String("ExecutionSummary"), + }, + } + resp, err := svc.PutThirdPartyJobSuccessResult(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_StartPipelineExecution() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.StartPipelineExecutionInput{ + Name: aws.String("PipelineName"), // Required + } + resp, err := svc.StartPipelineExecution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCodePipeline_UpdatePipeline() { + svc := codepipeline.New(session.New()) + + params := &codepipeline.UpdatePipelineInput{ + Pipeline: &codepipeline.PipelineDeclaration{ // Required + ArtifactStore: &codepipeline.ArtifactStore{ // Required + Location: aws.String("ArtifactStoreLocation"), // Required + Type: aws.String("ArtifactStoreType"), // Required + EncryptionKey: &codepipeline.EncryptionKey{ + Id: aws.String("EncryptionKeyId"), // Required + Type: aws.String("EncryptionKeyType"), // Required + }, + }, + Name: aws.String("PipelineName"), // Required + RoleArn: aws.String("RoleArn"), // Required + Stages: []*codepipeline.StageDeclaration{ // Required + { // Required + Actions: []*codepipeline.ActionDeclaration{ // Required + { // Required + ActionTypeId: &codepipeline.ActionTypeId{ // Required + Category: aws.String("ActionCategory"), // Required + Owner: aws.String("ActionOwner"), // Required + Provider: aws.String("ActionProvider"), // Required + Version: aws.String("Version"), // Required + }, + Name: aws.String("ActionName"), // Required + Configuration: map[string]*string{ + "Key": aws.String("ActionConfigurationValue"), // Required + // More values... + }, + InputArtifacts: []*codepipeline.InputArtifact{ + { // Required + Name: aws.String("ArtifactName"), // Required + }, + // More values... + }, + OutputArtifacts: []*codepipeline.OutputArtifact{ + { // Required + Name: aws.String("ArtifactName"), // Required + }, + // More values... + }, + RoleArn: aws.String("RoleArn"), + RunOrder: aws.Int64(1), + }, + // More values... + }, + Name: aws.String("StageName"), // Required + Blockers: []*codepipeline.BlockerDeclaration{ + { // Required + Name: aws.String("BlockerName"), // Required + Type: aws.String("BlockerType"), // Required + }, + // More values... + }, + }, + // More values... + }, + Version: aws.Int64(1), + }, + } + resp, err := svc.UpdatePipeline(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codepipeline/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codepipeline/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codepipeline/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/codepipeline/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,159 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package codepipeline + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Overview This is the AWS CodePipeline API Reference. This guide provides +// descriptions of the actions and data types for AWS CodePipeline. Some functionality +// for your pipeline is only configurable through the API. For additional information, +// see the AWS CodePipeline User Guide (http://docs.aws.amazon.com/pipelines/latest/userguide/welcome.html). +// +// You can use the AWS CodePipeline API to work with pipelines, stages, actions, +// gates, and transitions, as described below. +// +// Pipelines are models of automated release processes. Each pipeline is uniquely +// named, and consists of actions, gates, and stages. +// +// You can work with pipelines by calling: CreatePipeline, which creates a +// uniquely-named pipeline. DeletePipeline, which deletes the specified pipeline. +// GetPipeline, which returns information about a pipeline structure. GetPipelineState, +// which returns information about the current state of the stages and actions +// of a pipeline. ListPipelines, which gets a summary of all of the pipelines +// associated with your account. StartPipelineExecution, which runs the the +// most recent revision of an artifact through the pipeline. UpdatePipeline, +// which updates a pipeline with edits or changes to the structure of the pipeline. +// Pipelines include stages, which are which are logical groupings of gates +// and actions. Each stage contains one or more actions that must complete before +// the next stage begins. A stage will result in success or failure. If a stage +// fails, then the pipeline stops at that stage and will remain stopped until +// either a new version of an artifact appears in the source location, or a +// user takes action to re-run the most recent artifact through the pipeline. +// You can call GetPipelineState, which displays the status of a pipeline, including +// the status of stages in the pipeline, or GetPipeline, which returns the entire +// structure of the pipeline, including the stages of that pipeline. For more +// information about the structure of stages and actions, also refer to the +// AWS CodePipeline Pipeline Structure Reference. +// +// Pipeline stages include actions, which are categorized into categories +// such as source or build actions performed within a stage of a pipeline. For +// example, you can use a source action to import artifacts into a pipeline +// from a source such as Amazon S3. Like stages, you do not work with actions +// directly in most cases, but you do define and interact with actions when +// working with pipeline operations such as CreatePipeline and GetPipelineState. +// +// Pipelines also include transitions, which allow the transition of artifacts +// from one stage to the next in a pipeline after the actions in one stage complete. +// +// You can work with transitions by calling: +// +// DisableStageTransition, which prevents artifacts from transitioning to +// the next stage in a pipeline. EnableStageTransition, which enables transition +// of artifacts between stages in a pipeline. Using the API to integrate with +// AWS CodePipeline +// +// For third-party integrators or developers who want to create their own integrations +// with AWS CodePipeline, the expected sequence varies from the standard API +// user. In order to integrate with AWS CodePipeline, developers will need to +// work with the following items: +// +// Jobs, which are instances of an action. For example, a job for a source +// action might import a revision of an artifact from a source. You can work +// with jobs by calling: +// +// AcknowledgeJob, which confirms whether a job worker has received the specified +// job, GetJobDetails, which returns the details of a job, PollForJobs, which +// determines whether there are any jobs to act upon, PutJobFailureResult, +// which provides details of a job failure, and PutJobSuccessResult, which provides +// details of a job success. Third party jobs, which are instances of an action +// created by a partner action and integrated into AWS CodePipeline. Partner +// actions are created by members of the AWS Partner Network. You can work with +// third party jobs by calling: +// +// AcknowledgeThirdPartyJob, which confirms whether a job worker has received +// the specified job, GetThirdPartyJobDetails, which requests the details of +// a job for a partner action, PollForThirdPartyJobs, which determines whether +// there are any jobs to act upon, PutThirdPartyJobFailureResult, which provides +// details of a job failure, and PutThirdPartyJobSuccessResult, which provides +// details of a job success. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CodePipeline struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "codepipeline" + +// New creates a new instance of the CodePipeline client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CodePipeline client from just a session. +// svc := codepipeline.New(mySession) +// +// // Create a CodePipeline client with additional configuration +// svc := codepipeline.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CodePipeline { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CodePipeline { + svc := &CodePipeline{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-07-09", + JSONVersion: "1.1", + TargetPrefix: "CodePipeline_20150709", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CodePipeline operation and runs any +// custom request initialization. +func (c *CodePipeline) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitoidentity/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitoidentity/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitoidentity/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitoidentity/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1432 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cognitoidentity provides a client for Amazon Cognito Identity. +package cognitoidentity + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opCreateIdentityPool = "CreateIdentityPool" + +// CreateIdentityPoolRequest generates a request for the CreateIdentityPool operation. +func (c *CognitoIdentity) CreateIdentityPoolRequest(input *CreateIdentityPoolInput) (req *request.Request, output *IdentityPool) { + op := &request.Operation{ + Name: opCreateIdentityPool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateIdentityPoolInput{} + } + + req = c.newRequest(op, input, output) + output = &IdentityPool{} + req.Data = output + return +} + +// Creates a new identity pool. The identity pool is a store of user identity +// information that is specific to your AWS account. The limit on identity pools +// is 60 per account. You must use AWS Developer credentials to call this API. +func (c *CognitoIdentity) CreateIdentityPool(input *CreateIdentityPoolInput) (*IdentityPool, error) { + req, out := c.CreateIdentityPoolRequest(input) + err := req.Send() + return out, err +} + +const opDeleteIdentities = "DeleteIdentities" + +// DeleteIdentitiesRequest generates a request for the DeleteIdentities operation. +func (c *CognitoIdentity) DeleteIdentitiesRequest(input *DeleteIdentitiesInput) (req *request.Request, output *DeleteIdentitiesOutput) { + op := &request.Operation{ + Name: opDeleteIdentities, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteIdentitiesInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteIdentitiesOutput{} + req.Data = output + return +} + +// Deletes identities from an identity pool. You can specify a list of 1-60 +// identities that you want to delete. +// +// You must use AWS Developer credentials to call this API. +func (c *CognitoIdentity) DeleteIdentities(input *DeleteIdentitiesInput) (*DeleteIdentitiesOutput, error) { + req, out := c.DeleteIdentitiesRequest(input) + err := req.Send() + return out, err +} + +const opDeleteIdentityPool = "DeleteIdentityPool" + +// DeleteIdentityPoolRequest generates a request for the DeleteIdentityPool operation. +func (c *CognitoIdentity) DeleteIdentityPoolRequest(input *DeleteIdentityPoolInput) (req *request.Request, output *DeleteIdentityPoolOutput) { + op := &request.Operation{ + Name: opDeleteIdentityPool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteIdentityPoolInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteIdentityPoolOutput{} + req.Data = output + return +} + +// Deletes a user pool. Once a pool is deleted, users will not be able to authenticate +// with the pool. +// +// You must use AWS Developer credentials to call this API. +func (c *CognitoIdentity) DeleteIdentityPool(input *DeleteIdentityPoolInput) (*DeleteIdentityPoolOutput, error) { + req, out := c.DeleteIdentityPoolRequest(input) + err := req.Send() + return out, err +} + +const opDescribeIdentity = "DescribeIdentity" + +// DescribeIdentityRequest generates a request for the DescribeIdentity operation. +func (c *CognitoIdentity) DescribeIdentityRequest(input *DescribeIdentityInput) (req *request.Request, output *IdentityDescription) { + op := &request.Operation{ + Name: opDescribeIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &IdentityDescription{} + req.Data = output + return +} + +// Returns metadata related to the given identity, including when the identity +// was created and any associated linked logins. +// +// You must use AWS Developer credentials to call this API. +func (c *CognitoIdentity) DescribeIdentity(input *DescribeIdentityInput) (*IdentityDescription, error) { + req, out := c.DescribeIdentityRequest(input) + err := req.Send() + return out, err +} + +const opDescribeIdentityPool = "DescribeIdentityPool" + +// DescribeIdentityPoolRequest generates a request for the DescribeIdentityPool operation. +func (c *CognitoIdentity) DescribeIdentityPoolRequest(input *DescribeIdentityPoolInput) (req *request.Request, output *IdentityPool) { + op := &request.Operation{ + Name: opDescribeIdentityPool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeIdentityPoolInput{} + } + + req = c.newRequest(op, input, output) + output = &IdentityPool{} + req.Data = output + return +} + +// Gets details about a particular identity pool, including the pool name, ID +// description, creation date, and current number of users. +// +// You must use AWS Developer credentials to call this API. +func (c *CognitoIdentity) DescribeIdentityPool(input *DescribeIdentityPoolInput) (*IdentityPool, error) { + req, out := c.DescribeIdentityPoolRequest(input) + err := req.Send() + return out, err +} + +const opGetCredentialsForIdentity = "GetCredentialsForIdentity" + +// GetCredentialsForIdentityRequest generates a request for the GetCredentialsForIdentity operation. +func (c *CognitoIdentity) GetCredentialsForIdentityRequest(input *GetCredentialsForIdentityInput) (req *request.Request, output *GetCredentialsForIdentityOutput) { + op := &request.Operation{ + Name: opGetCredentialsForIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCredentialsForIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &GetCredentialsForIdentityOutput{} + req.Data = output + return +} + +// Returns credentials for the the provided identity ID. Any provided logins +// will be validated against supported login providers. If the token is for +// cognito-identity.amazonaws.com, it will be passed through to AWS Security +// Token Service with the appropriate role for the token. +// +// This is a public API. You do not need any credentials to call this API. +func (c *CognitoIdentity) GetCredentialsForIdentity(input *GetCredentialsForIdentityInput) (*GetCredentialsForIdentityOutput, error) { + req, out := c.GetCredentialsForIdentityRequest(input) + err := req.Send() + return out, err +} + +const opGetId = "GetId" + +// GetIdRequest generates a request for the GetId operation. +func (c *CognitoIdentity) GetIdRequest(input *GetIdInput) (req *request.Request, output *GetIdOutput) { + op := &request.Operation{ + Name: opGetId, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetIdInput{} + } + + req = c.newRequest(op, input, output) + output = &GetIdOutput{} + req.Data = output + return +} + +// Generates (or retrieves) a Cognito ID. Supplying multiple logins will create +// an implicit linked account. +// +// token+";"+tokenSecret. +// +// This is a public API. You do not need any credentials to call this API. +func (c *CognitoIdentity) GetId(input *GetIdInput) (*GetIdOutput, error) { + req, out := c.GetIdRequest(input) + err := req.Send() + return out, err +} + +const opGetIdentityPoolRoles = "GetIdentityPoolRoles" + +// GetIdentityPoolRolesRequest generates a request for the GetIdentityPoolRoles operation. +func (c *CognitoIdentity) GetIdentityPoolRolesRequest(input *GetIdentityPoolRolesInput) (req *request.Request, output *GetIdentityPoolRolesOutput) { + op := &request.Operation{ + Name: opGetIdentityPoolRoles, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetIdentityPoolRolesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetIdentityPoolRolesOutput{} + req.Data = output + return +} + +// Gets the roles for an identity pool. +// +// You must use AWS Developer credentials to call this API. +func (c *CognitoIdentity) GetIdentityPoolRoles(input *GetIdentityPoolRolesInput) (*GetIdentityPoolRolesOutput, error) { + req, out := c.GetIdentityPoolRolesRequest(input) + err := req.Send() + return out, err +} + +const opGetOpenIdToken = "GetOpenIdToken" + +// GetOpenIdTokenRequest generates a request for the GetOpenIdToken operation. +func (c *CognitoIdentity) GetOpenIdTokenRequest(input *GetOpenIdTokenInput) (req *request.Request, output *GetOpenIdTokenOutput) { + op := &request.Operation{ + Name: opGetOpenIdToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetOpenIdTokenInput{} + } + + req = c.newRequest(op, input, output) + output = &GetOpenIdTokenOutput{} + req.Data = output + return +} + +// Gets an OpenID token, using a known Cognito ID. This known Cognito ID is +// returned by GetId. You can optionally add additional logins for the identity. +// Supplying multiple logins creates an implicit link. +// +// The OpenId token is valid for 15 minutes. +// +// This is a public API. You do not need any credentials to call this API. +func (c *CognitoIdentity) GetOpenIdToken(input *GetOpenIdTokenInput) (*GetOpenIdTokenOutput, error) { + req, out := c.GetOpenIdTokenRequest(input) + err := req.Send() + return out, err +} + +const opGetOpenIdTokenForDeveloperIdentity = "GetOpenIdTokenForDeveloperIdentity" + +// GetOpenIdTokenForDeveloperIdentityRequest generates a request for the GetOpenIdTokenForDeveloperIdentity operation. +func (c *CognitoIdentity) GetOpenIdTokenForDeveloperIdentityRequest(input *GetOpenIdTokenForDeveloperIdentityInput) (req *request.Request, output *GetOpenIdTokenForDeveloperIdentityOutput) { + op := &request.Operation{ + Name: opGetOpenIdTokenForDeveloperIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetOpenIdTokenForDeveloperIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &GetOpenIdTokenForDeveloperIdentityOutput{} + req.Data = output + return +} + +// Registers (or retrieves) a Cognito IdentityId and an OpenID Connect token +// for a user authenticated by your backend authentication process. Supplying +// multiple logins will create an implicit linked account. You can only specify +// one developer provider as part of the Logins map, which is linked to the +// identity pool. The developer provider is the "domain" by which Cognito will +// refer to your users. +// +// You can use GetOpenIdTokenForDeveloperIdentity to create a new identity +// and to link new logins (that is, user credentials issued by a public provider +// or developer provider) to an existing identity. When you want to create a +// new identity, the IdentityId should be null. When you want to associate a +// new login with an existing authenticated/unauthenticated identity, you can +// do so by providing the existing IdentityId. This API will create the identity +// in the specified IdentityPoolId. +// +// You must use AWS Developer credentials to call this API. +func (c *CognitoIdentity) GetOpenIdTokenForDeveloperIdentity(input *GetOpenIdTokenForDeveloperIdentityInput) (*GetOpenIdTokenForDeveloperIdentityOutput, error) { + req, out := c.GetOpenIdTokenForDeveloperIdentityRequest(input) + err := req.Send() + return out, err +} + +const opListIdentities = "ListIdentities" + +// ListIdentitiesRequest generates a request for the ListIdentities operation. +func (c *CognitoIdentity) ListIdentitiesRequest(input *ListIdentitiesInput) (req *request.Request, output *ListIdentitiesOutput) { + op := &request.Operation{ + Name: opListIdentities, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListIdentitiesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListIdentitiesOutput{} + req.Data = output + return +} + +// Lists the identities in a pool. +// +// You must use AWS Developer credentials to call this API. +func (c *CognitoIdentity) ListIdentities(input *ListIdentitiesInput) (*ListIdentitiesOutput, error) { + req, out := c.ListIdentitiesRequest(input) + err := req.Send() + return out, err +} + +const opListIdentityPools = "ListIdentityPools" + +// ListIdentityPoolsRequest generates a request for the ListIdentityPools operation. +func (c *CognitoIdentity) ListIdentityPoolsRequest(input *ListIdentityPoolsInput) (req *request.Request, output *ListIdentityPoolsOutput) { + op := &request.Operation{ + Name: opListIdentityPools, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListIdentityPoolsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListIdentityPoolsOutput{} + req.Data = output + return +} + +// Lists all of the Cognito identity pools registered for your account. +// +// This is a public API. You do not need any credentials to call this API. +func (c *CognitoIdentity) ListIdentityPools(input *ListIdentityPoolsInput) (*ListIdentityPoolsOutput, error) { + req, out := c.ListIdentityPoolsRequest(input) + err := req.Send() + return out, err +} + +const opLookupDeveloperIdentity = "LookupDeveloperIdentity" + +// LookupDeveloperIdentityRequest generates a request for the LookupDeveloperIdentity operation. +func (c *CognitoIdentity) LookupDeveloperIdentityRequest(input *LookupDeveloperIdentityInput) (req *request.Request, output *LookupDeveloperIdentityOutput) { + op := &request.Operation{ + Name: opLookupDeveloperIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &LookupDeveloperIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &LookupDeveloperIdentityOutput{} + req.Data = output + return +} + +// Retrieves the IdentityID associated with a DeveloperUserIdentifier or the +// list of DeveloperUserIdentifiers associated with an IdentityId for an existing +// identity. Either IdentityID or DeveloperUserIdentifier must not be null. +// If you supply only one of these values, the other value will be searched +// in the database and returned as a part of the response. If you supply both, +// DeveloperUserIdentifier will be matched against IdentityID. If the values +// are verified against the database, the response returns both values and is +// the same as the request. Otherwise a ResourceConflictException is thrown. +// +// You must use AWS Developer credentials to call this API. +func (c *CognitoIdentity) LookupDeveloperIdentity(input *LookupDeveloperIdentityInput) (*LookupDeveloperIdentityOutput, error) { + req, out := c.LookupDeveloperIdentityRequest(input) + err := req.Send() + return out, err +} + +const opMergeDeveloperIdentities = "MergeDeveloperIdentities" + +// MergeDeveloperIdentitiesRequest generates a request for the MergeDeveloperIdentities operation. +func (c *CognitoIdentity) MergeDeveloperIdentitiesRequest(input *MergeDeveloperIdentitiesInput) (req *request.Request, output *MergeDeveloperIdentitiesOutput) { + op := &request.Operation{ + Name: opMergeDeveloperIdentities, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &MergeDeveloperIdentitiesInput{} + } + + req = c.newRequest(op, input, output) + output = &MergeDeveloperIdentitiesOutput{} + req.Data = output + return +} + +// Merges two users having different IdentityIds, existing in the same identity +// pool, and identified by the same developer provider. You can use this action +// to request that discrete users be merged and identified as a single user +// in the Cognito environment. Cognito associates the given source user (SourceUserIdentifier) +// with the IdentityId of the DestinationUserIdentifier. Only developer-authenticated +// users can be merged. If the users to be merged are associated with the same +// public provider, but as two different users, an exception will be thrown. +// +// You must use AWS Developer credentials to call this API. +func (c *CognitoIdentity) MergeDeveloperIdentities(input *MergeDeveloperIdentitiesInput) (*MergeDeveloperIdentitiesOutput, error) { + req, out := c.MergeDeveloperIdentitiesRequest(input) + err := req.Send() + return out, err +} + +const opSetIdentityPoolRoles = "SetIdentityPoolRoles" + +// SetIdentityPoolRolesRequest generates a request for the SetIdentityPoolRoles operation. +func (c *CognitoIdentity) SetIdentityPoolRolesRequest(input *SetIdentityPoolRolesInput) (req *request.Request, output *SetIdentityPoolRolesOutput) { + op := &request.Operation{ + Name: opSetIdentityPoolRoles, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetIdentityPoolRolesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetIdentityPoolRolesOutput{} + req.Data = output + return +} + +// Sets the roles for an identity pool. These roles are used when making calls +// to GetCredentialsForIdentity action. +// +// You must use AWS Developer credentials to call this API. +func (c *CognitoIdentity) SetIdentityPoolRoles(input *SetIdentityPoolRolesInput) (*SetIdentityPoolRolesOutput, error) { + req, out := c.SetIdentityPoolRolesRequest(input) + err := req.Send() + return out, err +} + +const opUnlinkDeveloperIdentity = "UnlinkDeveloperIdentity" + +// UnlinkDeveloperIdentityRequest generates a request for the UnlinkDeveloperIdentity operation. +func (c *CognitoIdentity) UnlinkDeveloperIdentityRequest(input *UnlinkDeveloperIdentityInput) (req *request.Request, output *UnlinkDeveloperIdentityOutput) { + op := &request.Operation{ + Name: opUnlinkDeveloperIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnlinkDeveloperIdentityInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UnlinkDeveloperIdentityOutput{} + req.Data = output + return +} + +// Unlinks a DeveloperUserIdentifier from an existing identity. Unlinked developer +// users will be considered new identities next time they are seen. If, for +// a given Cognito identity, you remove all federated identities as well as +// the developer user identifier, the Cognito identity becomes inaccessible. +// +// This is a public API. You do not need any credentials to call this API. +func (c *CognitoIdentity) UnlinkDeveloperIdentity(input *UnlinkDeveloperIdentityInput) (*UnlinkDeveloperIdentityOutput, error) { + req, out := c.UnlinkDeveloperIdentityRequest(input) + err := req.Send() + return out, err +} + +const opUnlinkIdentity = "UnlinkIdentity" + +// UnlinkIdentityRequest generates a request for the UnlinkIdentity operation. +func (c *CognitoIdentity) UnlinkIdentityRequest(input *UnlinkIdentityInput) (req *request.Request, output *UnlinkIdentityOutput) { + op := &request.Operation{ + Name: opUnlinkIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnlinkIdentityInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UnlinkIdentityOutput{} + req.Data = output + return +} + +// Unlinks a federated identity from an existing account. Unlinked logins will +// be considered new identities next time they are seen. Removing the last linked +// login will make this identity inaccessible. +// +// This is a public API. You do not need any credentials to call this API. +func (c *CognitoIdentity) UnlinkIdentity(input *UnlinkIdentityInput) (*UnlinkIdentityOutput, error) { + req, out := c.UnlinkIdentityRequest(input) + err := req.Send() + return out, err +} + +const opUpdateIdentityPool = "UpdateIdentityPool" + +// UpdateIdentityPoolRequest generates a request for the UpdateIdentityPool operation. +func (c *CognitoIdentity) UpdateIdentityPoolRequest(input *IdentityPool) (req *request.Request, output *IdentityPool) { + op := &request.Operation{ + Name: opUpdateIdentityPool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &IdentityPool{} + } + + req = c.newRequest(op, input, output) + output = &IdentityPool{} + req.Data = output + return +} + +// Updates a user pool. +// +// You must use AWS Developer credentials to call this API. +func (c *CognitoIdentity) UpdateIdentityPool(input *IdentityPool) (*IdentityPool, error) { + req, out := c.UpdateIdentityPoolRequest(input) + err := req.Send() + return out, err +} + +// Input to the CreateIdentityPool action. +type CreateIdentityPoolInput struct { + _ struct{} `type:"structure"` + + // TRUE if the identity pool supports unauthenticated logins. + AllowUnauthenticatedIdentities *bool `type:"boolean" required:"true"` + + // The "domain" by which Cognito will refer to your users. This name acts as + // a placeholder that allows your backend and the Cognito service to communicate + // about the developer provider. For the DeveloperProviderName, you can use + // letters as well as period (.), underscore (_), and dash (-). + // + // Once you have set a developer provider name, you cannot change it. Please + // take care in setting this parameter. + DeveloperProviderName *string `min:"1" type:"string"` + + // A string that you provide. + IdentityPoolName *string `min:"1" type:"string" required:"true"` + + // A list of OpendID Connect provider ARNs. + OpenIdConnectProviderARNs []*string `type:"list"` + + // Optional key:value pairs mapping provider names to provider app IDs. + SupportedLoginProviders map[string]*string `type:"map"` +} + +// String returns the string representation +func (s CreateIdentityPoolInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateIdentityPoolInput) GoString() string { + return s.String() +} + +// Credentials for the the provided identity ID. +type Credentials struct { + _ struct{} `type:"structure"` + + // The Access Key portion of the credentials. + AccessKeyId *string `type:"string"` + + // The date at which these credentials will expire. + Expiration *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The Secret Access Key portion of the credentials + SecretKey *string `type:"string"` + + // The Session Token portion of the credentials + SessionToken *string `type:"string"` +} + +// String returns the string representation +func (s Credentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Credentials) GoString() string { + return s.String() +} + +// Input to the DeleteIdentities action. +type DeleteIdentitiesInput struct { + _ struct{} `type:"structure"` + + // A list of 1-60 identities that you want to delete. + IdentityIdsToDelete []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteIdentitiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIdentitiesInput) GoString() string { + return s.String() +} + +// Returned in response to a successful DeleteIdentities operation. +type DeleteIdentitiesOutput struct { + _ struct{} `type:"structure"` + + // An array of UnprocessedIdentityId objects, each of which contains an ErrorCode + // and IdentityId. + UnprocessedIdentityIds []*UnprocessedIdentityId `type:"list"` +} + +// String returns the string representation +func (s DeleteIdentitiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIdentitiesOutput) GoString() string { + return s.String() +} + +// Input to the DeleteIdentityPool action. +type DeleteIdentityPoolInput struct { + _ struct{} `type:"structure"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteIdentityPoolInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIdentityPoolInput) GoString() string { + return s.String() +} + +type DeleteIdentityPoolOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteIdentityPoolOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIdentityPoolOutput) GoString() string { + return s.String() +} + +// Input to the DescribeIdentity action. +type DescribeIdentityInput struct { + _ struct{} `type:"structure"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIdentityInput) GoString() string { + return s.String() +} + +// Input to the DescribeIdentityPool action. +type DescribeIdentityPoolInput struct { + _ struct{} `type:"structure"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeIdentityPoolInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIdentityPoolInput) GoString() string { + return s.String() +} + +// Input to the GetCredentialsForIdentity action. +type GetCredentialsForIdentityInput struct { + _ struct{} `type:"structure"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string" required:"true"` + + // A set of optional name-value pairs that map provider names to provider tokens. + Logins map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetCredentialsForIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCredentialsForIdentityInput) GoString() string { + return s.String() +} + +// Returned in response to a successful GetCredentialsForIdentity operation. +type GetCredentialsForIdentityOutput struct { + _ struct{} `type:"structure"` + + // Credentials for the the provided identity ID. + Credentials *Credentials `type:"structure"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetCredentialsForIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCredentialsForIdentityOutput) GoString() string { + return s.String() +} + +// Input to the GetId action. +type GetIdInput struct { + _ struct{} `type:"structure"` + + // A standard AWS account ID (9+ digits). + AccountId *string `min:"1" type:"string"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string" required:"true"` + + // A set of optional name-value pairs that map provider names to provider tokens. + // + // The available provider names for Logins are as follows: Facebook: graph.facebook.com + // Google: accounts.google.com Amazon: www.amazon.com Twitter: www.twitter.com + // Digits: www.digits.com + Logins map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetIdInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdInput) GoString() string { + return s.String() +} + +// Returned in response to a GetId request. +type GetIdOutput struct { + _ struct{} `type:"structure"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetIdOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdOutput) GoString() string { + return s.String() +} + +// Input to the GetIdentityPoolRoles action. +type GetIdentityPoolRolesInput struct { + _ struct{} `type:"structure"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetIdentityPoolRolesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityPoolRolesInput) GoString() string { + return s.String() +} + +// Returned in response to a successful GetIdentityPoolRoles operation. +type GetIdentityPoolRolesOutput struct { + _ struct{} `type:"structure"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string"` + + // The map of roles associated with this pool. Currently only authenticated + // and unauthenticated roles are supported. + Roles map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetIdentityPoolRolesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityPoolRolesOutput) GoString() string { + return s.String() +} + +// Input to the GetOpenIdTokenForDeveloperIdentity action. +type GetOpenIdTokenForDeveloperIdentityInput struct { + _ struct{} `type:"structure"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string" required:"true"` + + // A set of optional name-value pairs that map provider names to provider tokens. + // Each name-value pair represents a user from a public provider or developer + // provider. If the user is from a developer provider, the name-value pair will + // follow the syntax "developer_provider_name": "developer_user_identifier". + // The developer provider is the "domain" by which Cognito will refer to your + // users; you provided this domain while creating/updating the identity pool. + // The developer user identifier is an identifier from your backend that uniquely + // identifies a user. When you create an identity pool, you can specify the + // supported logins. + Logins map[string]*string `type:"map" required:"true"` + + // The expiration time of the token, in seconds. You can specify a custom expiration + // time for the token so that you can cache it. If you don't provide an expiration + // time, the token is valid for 15 minutes. You can exchange the token with + // Amazon STS for temporary AWS credentials, which are valid for a maximum of + // one hour. The maximum token duration you can set is 24 hours. You should + // take care in setting the expiration time for a token, as there are significant + // security implications: an attacker could use a leaked token to access your + // AWS resources for the token's duration. + TokenDuration *int64 `min:"1" type:"long"` +} + +// String returns the string representation +func (s GetOpenIdTokenForDeveloperIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOpenIdTokenForDeveloperIdentityInput) GoString() string { + return s.String() +} + +// Returned in response to a successful GetOpenIdTokenForDeveloperIdentity request. +type GetOpenIdTokenForDeveloperIdentityOutput struct { + _ struct{} `type:"structure"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string"` + + // An OpenID token. + Token *string `type:"string"` +} + +// String returns the string representation +func (s GetOpenIdTokenForDeveloperIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOpenIdTokenForDeveloperIdentityOutput) GoString() string { + return s.String() +} + +// Input to the GetOpenIdToken action. +type GetOpenIdTokenInput struct { + _ struct{} `type:"structure"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string" required:"true"` + + // A set of optional name-value pairs that map provider names to provider tokens. + // When using graph.facebook.com and www.amazon.com, supply the access_token + // returned from the provider's authflow. For accounts.google.com or any other + // OpenId Connect provider, always include the id_token. + Logins map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetOpenIdTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOpenIdTokenInput) GoString() string { + return s.String() +} + +// Returned in response to a successful GetOpenIdToken request. +type GetOpenIdTokenOutput struct { + _ struct{} `type:"structure"` + + // A unique identifier in the format REGION:GUID. Note that the IdentityId returned + // may not match the one passed on input. + IdentityId *string `min:"1" type:"string"` + + // An OpenID token, valid for 15 minutes. + Token *string `type:"string"` +} + +// String returns the string representation +func (s GetOpenIdTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOpenIdTokenOutput) GoString() string { + return s.String() +} + +// A description of the identity. +type IdentityDescription struct { + _ struct{} `type:"structure"` + + // Date on which the identity was created. + CreationDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string"` + + // Date on which the identity was last modified. + LastModifiedDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A set of optional name-value pairs that map provider names to provider tokens. + Logins []*string `type:"list"` +} + +// String returns the string representation +func (s IdentityDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdentityDescription) GoString() string { + return s.String() +} + +// An object representing a Cognito identity pool. +type IdentityPool struct { + _ struct{} `type:"structure"` + + // TRUE if the identity pool supports unauthenticated logins. + AllowUnauthenticatedIdentities *bool `type:"boolean" required:"true"` + + // The "domain" by which Cognito will refer to your users. + DeveloperProviderName *string `min:"1" type:"string"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string" required:"true"` + + // A string that you provide. + IdentityPoolName *string `min:"1" type:"string" required:"true"` + + // A list of OpendID Connect provider ARNs. + OpenIdConnectProviderARNs []*string `type:"list"` + + // Optional key:value pairs mapping provider names to provider app IDs. + SupportedLoginProviders map[string]*string `type:"map"` +} + +// String returns the string representation +func (s IdentityPool) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdentityPool) GoString() string { + return s.String() +} + +// A description of the identity pool. +type IdentityPoolShortDescription struct { + _ struct{} `type:"structure"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string"` + + // A string that you provide. + IdentityPoolName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s IdentityPoolShortDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdentityPoolShortDescription) GoString() string { + return s.String() +} + +// Input to the ListIdentities action. +type ListIdentitiesInput struct { + _ struct{} `type:"structure"` + + // An optional boolean parameter that allows you to hide disabled identities. + // If omitted, the ListIdentities API will include disabled identities in the + // response. + HideDisabled *bool `type:"boolean"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string" required:"true"` + + // The maximum number of identities to return. + MaxResults *int64 `min:"1" type:"integer" required:"true"` + + // A pagination token. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListIdentitiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentitiesInput) GoString() string { + return s.String() +} + +// The response to a ListIdentities request. +type ListIdentitiesOutput struct { + _ struct{} `type:"structure"` + + // An object containing a set of identities and associated mappings. + Identities []*IdentityDescription `type:"list"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string"` + + // A pagination token. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListIdentitiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentitiesOutput) GoString() string { + return s.String() +} + +// Input to the ListIdentityPools action. +type ListIdentityPoolsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of identities to return. + MaxResults *int64 `min:"1" type:"integer" required:"true"` + + // A pagination token. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListIdentityPoolsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentityPoolsInput) GoString() string { + return s.String() +} + +// The result of a successful ListIdentityPools action. +type ListIdentityPoolsOutput struct { + _ struct{} `type:"structure"` + + // The identity pools returned by the ListIdentityPools action. + IdentityPools []*IdentityPoolShortDescription `type:"list"` + + // A pagination token. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListIdentityPoolsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentityPoolsOutput) GoString() string { + return s.String() +} + +// Input to the LookupDeveloperIdentityInput action. +type LookupDeveloperIdentityInput struct { + _ struct{} `type:"structure"` + + // A unique ID used by your backend authentication process to identify a user. + // Typically, a developer identity provider would issue many developer user + // identifiers, in keeping with the number of users. + DeveloperUserIdentifier *string `min:"1" type:"string"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string" required:"true"` + + // The maximum number of identities to return. + MaxResults *int64 `min:"1" type:"integer"` + + // A pagination token. The first call you make will have NextToken set to null. + // After that the service will return NextToken values as needed. For example, + // let's say you make a request with MaxResults set to 10, and there are 20 + // matches in the database. The service will return a pagination token as a + // part of the response. This token can be used to call the API again and get + // results starting from the 11th match. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s LookupDeveloperIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LookupDeveloperIdentityInput) GoString() string { + return s.String() +} + +// Returned in response to a successful LookupDeveloperIdentity action. +type LookupDeveloperIdentityOutput struct { + _ struct{} `type:"structure"` + + // This is the list of developer user identifiers associated with an identity + // ID. Cognito supports the association of multiple developer user identifiers + // with an identity ID. + DeveloperUserIdentifierList []*string `type:"list"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string"` + + // A pagination token. The first call you make will have NextToken set to null. + // After that the service will return NextToken values as needed. For example, + // let's say you make a request with MaxResults set to 10, and there are 20 + // matches in the database. The service will return a pagination token as a + // part of the response. This token can be used to call the API again and get + // results starting from the 11th match. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s LookupDeveloperIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LookupDeveloperIdentityOutput) GoString() string { + return s.String() +} + +// Input to the MergeDeveloperIdentities action. +type MergeDeveloperIdentitiesInput struct { + _ struct{} `type:"structure"` + + // User identifier for the destination user. The value should be a DeveloperUserIdentifier. + DestinationUserIdentifier *string `min:"1" type:"string" required:"true"` + + // The "domain" by which Cognito will refer to your users. This is a (pseudo) + // domain name that you provide while creating an identity pool. This name acts + // as a placeholder that allows your backend and the Cognito service to communicate + // about the developer provider. For the DeveloperProviderName, you can use + // letters as well as period (.), underscore (_), and dash (-). + DeveloperProviderName *string `min:"1" type:"string" required:"true"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string" required:"true"` + + // User identifier for the source user. The value should be a DeveloperUserIdentifier. + SourceUserIdentifier *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s MergeDeveloperIdentitiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MergeDeveloperIdentitiesInput) GoString() string { + return s.String() +} + +// Returned in response to a successful MergeDeveloperIdentities action. +type MergeDeveloperIdentitiesOutput struct { + _ struct{} `type:"structure"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s MergeDeveloperIdentitiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MergeDeveloperIdentitiesOutput) GoString() string { + return s.String() +} + +// Input to the SetIdentityPoolRoles action. +type SetIdentityPoolRolesInput struct { + _ struct{} `type:"structure"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string" required:"true"` + + // The map of roles associated with this pool. For a given role, the key will + // be either "authenticated" or "unauthenticated" and the value will be the + // Role ARN. + Roles map[string]*string `type:"map" required:"true"` +} + +// String returns the string representation +func (s SetIdentityPoolRolesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityPoolRolesInput) GoString() string { + return s.String() +} + +type SetIdentityPoolRolesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetIdentityPoolRolesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityPoolRolesOutput) GoString() string { + return s.String() +} + +// Input to the UnlinkDeveloperIdentity action. +type UnlinkDeveloperIdentityInput struct { + _ struct{} `type:"structure"` + + // The "domain" by which Cognito will refer to your users. + DeveloperProviderName *string `min:"1" type:"string" required:"true"` + + // A unique ID used by your backend authentication process to identify a user. + DeveloperUserIdentifier *string `min:"1" type:"string" required:"true"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string" required:"true"` + + // An identity pool ID in the format REGION:GUID. + IdentityPoolId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UnlinkDeveloperIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnlinkDeveloperIdentityInput) GoString() string { + return s.String() +} + +type UnlinkDeveloperIdentityOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UnlinkDeveloperIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnlinkDeveloperIdentityOutput) GoString() string { + return s.String() +} + +// Input to the UnlinkIdentity action. +type UnlinkIdentityInput struct { + _ struct{} `type:"structure"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string" required:"true"` + + // A set of optional name-value pairs that map provider names to provider tokens. + Logins map[string]*string `type:"map" required:"true"` + + // Provider names to unlink from this identity. + LoginsToRemove []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s UnlinkIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnlinkIdentityInput) GoString() string { + return s.String() +} + +type UnlinkIdentityOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UnlinkIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnlinkIdentityOutput) GoString() string { + return s.String() +} + +// An array of UnprocessedIdentityId objects, each of which contains an ErrorCode +// and IdentityId. +type UnprocessedIdentityId struct { + _ struct{} `type:"structure"` + + // The error code indicating the type of error that occurred. + ErrorCode *string `type:"string" enum:"ErrorCode"` + + // A unique identifier in the format REGION:GUID. + IdentityId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UnprocessedIdentityId) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnprocessedIdentityId) GoString() string { + return s.String() +} + +const ( + // @enum ErrorCode + ErrorCodeAccessDenied = "AccessDenied" + // @enum ErrorCode + ErrorCodeInternalServerError = "InternalServerError" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitoidentity/cognitoidentityiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitoidentity/cognitoidentityiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitoidentity/cognitoidentityiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitoidentity/cognitoidentityiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,86 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cognitoidentityiface provides an interface for the Amazon Cognito Identity. +package cognitoidentityiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cognitoidentity" +) + +// CognitoIdentityAPI is the interface type for cognitoidentity.CognitoIdentity. +type CognitoIdentityAPI interface { + CreateIdentityPoolRequest(*cognitoidentity.CreateIdentityPoolInput) (*request.Request, *cognitoidentity.IdentityPool) + + CreateIdentityPool(*cognitoidentity.CreateIdentityPoolInput) (*cognitoidentity.IdentityPool, error) + + DeleteIdentitiesRequest(*cognitoidentity.DeleteIdentitiesInput) (*request.Request, *cognitoidentity.DeleteIdentitiesOutput) + + DeleteIdentities(*cognitoidentity.DeleteIdentitiesInput) (*cognitoidentity.DeleteIdentitiesOutput, error) + + DeleteIdentityPoolRequest(*cognitoidentity.DeleteIdentityPoolInput) (*request.Request, *cognitoidentity.DeleteIdentityPoolOutput) + + DeleteIdentityPool(*cognitoidentity.DeleteIdentityPoolInput) (*cognitoidentity.DeleteIdentityPoolOutput, error) + + DescribeIdentityRequest(*cognitoidentity.DescribeIdentityInput) (*request.Request, *cognitoidentity.IdentityDescription) + + DescribeIdentity(*cognitoidentity.DescribeIdentityInput) (*cognitoidentity.IdentityDescription, error) + + DescribeIdentityPoolRequest(*cognitoidentity.DescribeIdentityPoolInput) (*request.Request, *cognitoidentity.IdentityPool) + + DescribeIdentityPool(*cognitoidentity.DescribeIdentityPoolInput) (*cognitoidentity.IdentityPool, error) + + GetCredentialsForIdentityRequest(*cognitoidentity.GetCredentialsForIdentityInput) (*request.Request, *cognitoidentity.GetCredentialsForIdentityOutput) + + GetCredentialsForIdentity(*cognitoidentity.GetCredentialsForIdentityInput) (*cognitoidentity.GetCredentialsForIdentityOutput, error) + + GetIdRequest(*cognitoidentity.GetIdInput) (*request.Request, *cognitoidentity.GetIdOutput) + + GetId(*cognitoidentity.GetIdInput) (*cognitoidentity.GetIdOutput, error) + + GetIdentityPoolRolesRequest(*cognitoidentity.GetIdentityPoolRolesInput) (*request.Request, *cognitoidentity.GetIdentityPoolRolesOutput) + + GetIdentityPoolRoles(*cognitoidentity.GetIdentityPoolRolesInput) (*cognitoidentity.GetIdentityPoolRolesOutput, error) + + GetOpenIdTokenRequest(*cognitoidentity.GetOpenIdTokenInput) (*request.Request, *cognitoidentity.GetOpenIdTokenOutput) + + GetOpenIdToken(*cognitoidentity.GetOpenIdTokenInput) (*cognitoidentity.GetOpenIdTokenOutput, error) + + GetOpenIdTokenForDeveloperIdentityRequest(*cognitoidentity.GetOpenIdTokenForDeveloperIdentityInput) (*request.Request, *cognitoidentity.GetOpenIdTokenForDeveloperIdentityOutput) + + GetOpenIdTokenForDeveloperIdentity(*cognitoidentity.GetOpenIdTokenForDeveloperIdentityInput) (*cognitoidentity.GetOpenIdTokenForDeveloperIdentityOutput, error) + + ListIdentitiesRequest(*cognitoidentity.ListIdentitiesInput) (*request.Request, *cognitoidentity.ListIdentitiesOutput) + + ListIdentities(*cognitoidentity.ListIdentitiesInput) (*cognitoidentity.ListIdentitiesOutput, error) + + ListIdentityPoolsRequest(*cognitoidentity.ListIdentityPoolsInput) (*request.Request, *cognitoidentity.ListIdentityPoolsOutput) + + ListIdentityPools(*cognitoidentity.ListIdentityPoolsInput) (*cognitoidentity.ListIdentityPoolsOutput, error) + + LookupDeveloperIdentityRequest(*cognitoidentity.LookupDeveloperIdentityInput) (*request.Request, *cognitoidentity.LookupDeveloperIdentityOutput) + + LookupDeveloperIdentity(*cognitoidentity.LookupDeveloperIdentityInput) (*cognitoidentity.LookupDeveloperIdentityOutput, error) + + MergeDeveloperIdentitiesRequest(*cognitoidentity.MergeDeveloperIdentitiesInput) (*request.Request, *cognitoidentity.MergeDeveloperIdentitiesOutput) + + MergeDeveloperIdentities(*cognitoidentity.MergeDeveloperIdentitiesInput) (*cognitoidentity.MergeDeveloperIdentitiesOutput, error) + + SetIdentityPoolRolesRequest(*cognitoidentity.SetIdentityPoolRolesInput) (*request.Request, *cognitoidentity.SetIdentityPoolRolesOutput) + + SetIdentityPoolRoles(*cognitoidentity.SetIdentityPoolRolesInput) (*cognitoidentity.SetIdentityPoolRolesOutput, error) + + UnlinkDeveloperIdentityRequest(*cognitoidentity.UnlinkDeveloperIdentityInput) (*request.Request, *cognitoidentity.UnlinkDeveloperIdentityOutput) + + UnlinkDeveloperIdentity(*cognitoidentity.UnlinkDeveloperIdentityInput) (*cognitoidentity.UnlinkDeveloperIdentityOutput, error) + + UnlinkIdentityRequest(*cognitoidentity.UnlinkIdentityInput) (*request.Request, *cognitoidentity.UnlinkIdentityOutput) + + UnlinkIdentity(*cognitoidentity.UnlinkIdentityInput) (*cognitoidentity.UnlinkIdentityOutput, error) + + UpdateIdentityPoolRequest(*cognitoidentity.IdentityPool) (*request.Request, *cognitoidentity.IdentityPool) + + UpdateIdentityPool(*cognitoidentity.IdentityPool) (*cognitoidentity.IdentityPool, error) +} + +var _ CognitoIdentityAPI = (*cognitoidentity.CognitoIdentity)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitoidentity/customizations.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitoidentity/customizations.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitoidentity/customizations.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitoidentity/customizations.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,12 @@ +package cognitoidentity + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = func(r *request.Request) { + switch r.Operation.Name { + case opGetOpenIdToken, opGetId, opGetCredentialsForIdentity: + r.Handlers.Sign.Clear() // these operations are unsigned + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitoidentity/customizations_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitoidentity/customizations_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitoidentity/customizations_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitoidentity/customizations_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,42 @@ +package cognitoidentity_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/cognitoidentity" + "github.com/stretchr/testify/assert" +) + +var svc = cognitoidentity.New(unit.Session) + +func TestUnsignedRequest_GetID(t *testing.T) { + req, _ := svc.GetIdRequest(&cognitoidentity.GetIdInput{ + IdentityPoolId: aws.String("IdentityPoolId"), + }) + + err := req.Sign() + assert.NoError(t, err) + assert.Equal(t, "", req.HTTPRequest.Header.Get("Authorization")) +} + +func TestUnsignedRequest_GetOpenIDToken(t *testing.T) { + req, _ := svc.GetOpenIdTokenRequest(&cognitoidentity.GetOpenIdTokenInput{ + IdentityId: aws.String("IdentityId"), + }) + + err := req.Sign() + assert.NoError(t, err) + assert.Equal(t, "", req.HTTPRequest.Header.Get("Authorization")) +} + +func TestUnsignedRequest_GetCredentialsForIdentity(t *testing.T) { + req, _ := svc.GetCredentialsForIdentityRequest(&cognitoidentity.GetCredentialsForIdentityInput{ + IdentityId: aws.String("IdentityId"), + }) + + err := req.Sign() + assert.NoError(t, err) + assert.Equal(t, "", req.HTTPRequest.Header.Get("Authorization")) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitoidentity/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitoidentity/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitoidentity/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitoidentity/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,427 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cognitoidentity_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cognitoidentity" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCognitoIdentity_CreateIdentityPool() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.CreateIdentityPoolInput{ + AllowUnauthenticatedIdentities: aws.Bool(true), // Required + IdentityPoolName: aws.String("IdentityPoolName"), // Required + DeveloperProviderName: aws.String("DeveloperProviderName"), + OpenIdConnectProviderARNs: []*string{ + aws.String("ARNString"), // Required + // More values... + }, + SupportedLoginProviders: map[string]*string{ + "Key": aws.String("IdentityProviderId"), // Required + // More values... + }, + } + resp, err := svc.CreateIdentityPool(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_DeleteIdentities() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.DeleteIdentitiesInput{ + IdentityIdsToDelete: []*string{ // Required + aws.String("IdentityId"), // Required + // More values... + }, + } + resp, err := svc.DeleteIdentities(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_DeleteIdentityPool() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.DeleteIdentityPoolInput{ + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.DeleteIdentityPool(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_DescribeIdentity() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.DescribeIdentityInput{ + IdentityId: aws.String("IdentityId"), // Required + } + resp, err := svc.DescribeIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_DescribeIdentityPool() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.DescribeIdentityPoolInput{ + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.DescribeIdentityPool(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_GetCredentialsForIdentity() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.GetCredentialsForIdentityInput{ + IdentityId: aws.String("IdentityId"), // Required + Logins: map[string]*string{ + "Key": aws.String("IdentityProviderToken"), // Required + // More values... + }, + } + resp, err := svc.GetCredentialsForIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_GetId() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.GetIdInput{ + IdentityPoolId: aws.String("IdentityPoolId"), // Required + AccountId: aws.String("AccountId"), + Logins: map[string]*string{ + "Key": aws.String("IdentityProviderToken"), // Required + // More values... + }, + } + resp, err := svc.GetId(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_GetIdentityPoolRoles() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.GetIdentityPoolRolesInput{ + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.GetIdentityPoolRoles(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_GetOpenIdToken() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.GetOpenIdTokenInput{ + IdentityId: aws.String("IdentityId"), // Required + Logins: map[string]*string{ + "Key": aws.String("IdentityProviderToken"), // Required + // More values... + }, + } + resp, err := svc.GetOpenIdToken(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_GetOpenIdTokenForDeveloperIdentity() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.GetOpenIdTokenForDeveloperIdentityInput{ + IdentityPoolId: aws.String("IdentityPoolId"), // Required + Logins: map[string]*string{ // Required + "Key": aws.String("IdentityProviderToken"), // Required + // More values... + }, + IdentityId: aws.String("IdentityId"), + TokenDuration: aws.Int64(1), + } + resp, err := svc.GetOpenIdTokenForDeveloperIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_ListIdentities() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.ListIdentitiesInput{ + IdentityPoolId: aws.String("IdentityPoolId"), // Required + MaxResults: aws.Int64(1), // Required + HideDisabled: aws.Bool(true), + NextToken: aws.String("PaginationKey"), + } + resp, err := svc.ListIdentities(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_ListIdentityPools() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.ListIdentityPoolsInput{ + MaxResults: aws.Int64(1), // Required + NextToken: aws.String("PaginationKey"), + } + resp, err := svc.ListIdentityPools(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_LookupDeveloperIdentity() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.LookupDeveloperIdentityInput{ + IdentityPoolId: aws.String("IdentityPoolId"), // Required + DeveloperUserIdentifier: aws.String("DeveloperUserIdentifier"), + IdentityId: aws.String("IdentityId"), + MaxResults: aws.Int64(1), + NextToken: aws.String("PaginationKey"), + } + resp, err := svc.LookupDeveloperIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_MergeDeveloperIdentities() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.MergeDeveloperIdentitiesInput{ + DestinationUserIdentifier: aws.String("DeveloperUserIdentifier"), // Required + DeveloperProviderName: aws.String("DeveloperProviderName"), // Required + IdentityPoolId: aws.String("IdentityPoolId"), // Required + SourceUserIdentifier: aws.String("DeveloperUserIdentifier"), // Required + } + resp, err := svc.MergeDeveloperIdentities(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_SetIdentityPoolRoles() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.SetIdentityPoolRolesInput{ + IdentityPoolId: aws.String("IdentityPoolId"), // Required + Roles: map[string]*string{ // Required + "Key": aws.String("ARNString"), // Required + // More values... + }, + } + resp, err := svc.SetIdentityPoolRoles(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_UnlinkDeveloperIdentity() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.UnlinkDeveloperIdentityInput{ + DeveloperProviderName: aws.String("DeveloperProviderName"), // Required + DeveloperUserIdentifier: aws.String("DeveloperUserIdentifier"), // Required + IdentityId: aws.String("IdentityId"), // Required + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.UnlinkDeveloperIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_UnlinkIdentity() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.UnlinkIdentityInput{ + IdentityId: aws.String("IdentityId"), // Required + Logins: map[string]*string{ // Required + "Key": aws.String("IdentityProviderToken"), // Required + // More values... + }, + LoginsToRemove: []*string{ // Required + aws.String("IdentityProviderName"), // Required + // More values... + }, + } + resp, err := svc.UnlinkIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoIdentity_UpdateIdentityPool() { + svc := cognitoidentity.New(session.New()) + + params := &cognitoidentity.IdentityPool{ + AllowUnauthenticatedIdentities: aws.Bool(true), // Required + IdentityPoolId: aws.String("IdentityPoolId"), // Required + IdentityPoolName: aws.String("IdentityPoolName"), // Required + DeveloperProviderName: aws.String("DeveloperProviderName"), + OpenIdConnectProviderARNs: []*string{ + aws.String("ARNString"), // Required + // More values... + }, + SupportedLoginProviders: map[string]*string{ + "Key": aws.String("IdentityProviderId"), // Required + // More values... + }, + } + resp, err := svc.UpdateIdentityPool(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitoidentity/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitoidentity/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitoidentity/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitoidentity/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,119 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cognitoidentity + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Amazon Cognito is a web service that delivers scoped temporary credentials +// to mobile devices and other untrusted environments. Amazon Cognito uniquely +// identifies a device and supplies the user with a consistent identity over +// the lifetime of an application. +// +// Using Amazon Cognito, you can enable authentication with one or more third-party +// identity providers (Facebook, Google, or Login with Amazon), and you can +// also choose to support unauthenticated access from your app. Cognito delivers +// a unique identifier for each user and acts as an OpenID token provider trusted +// by AWS Security Token Service (STS) to access temporary, limited-privilege +// AWS credentials. +// +// To provide end-user credentials, first make an unsigned call to GetId. If +// the end user is authenticated with one of the supported identity providers, +// set the Logins map with the identity provider token. GetId returns a unique +// identifier for the user. +// +// Next, make an unsigned call to GetCredentialsForIdentity. This call expects +// the same Logins map as the GetId call, as well as the IdentityID originally +// returned by GetId. Assuming your identity pool has been configured via the +// SetIdentityPoolRoles operation, GetCredentialsForIdentity will return AWS +// credentials for your use. If your pool has not been configured with SetIdentityPoolRoles, +// or if you want to follow legacy flow, make an unsigned call to GetOpenIdToken, +// which returns the OpenID token necessary to call STS and retrieve AWS credentials. +// This call expects the same Logins map as the GetId call, as well as the IdentityID +// originally returned by GetId. The token returned by GetOpenIdToken can be +// passed to the STS operation AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html) +// to retrieve AWS credentials. +// +// If you want to use Amazon Cognito in an Android, iOS, or Unity application, +// you will probably want to make API calls via the AWS Mobile SDK. To learn +// more, see the AWS Mobile SDK Developer Guide (http://docs.aws.amazon.com/mobile/index.html). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CognitoIdentity struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "cognito-identity" + +// New creates a new instance of the CognitoIdentity client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CognitoIdentity client from just a session. +// svc := cognitoidentity.New(mySession) +// +// // Create a CognitoIdentity client with additional configuration +// svc := cognitoidentity.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CognitoIdentity { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CognitoIdentity { + svc := &CognitoIdentity{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-06-30", + JSONVersion: "1.1", + TargetPrefix: "AWSCognitoIdentityService", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CognitoIdentity operation and runs any +// custom request initialization. +func (c *CognitoIdentity) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitosync/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitosync/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitosync/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitosync/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1634 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cognitosync provides a client for Amazon Cognito Sync. +package cognitosync + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opBulkPublish = "BulkPublish" + +// BulkPublishRequest generates a request for the BulkPublish operation. +func (c *CognitoSync) BulkPublishRequest(input *BulkPublishInput) (req *request.Request, output *BulkPublishOutput) { + op := &request.Operation{ + Name: opBulkPublish, + HTTPMethod: "POST", + HTTPPath: "/identitypools/{IdentityPoolId}/bulkpublish", + } + + if input == nil { + input = &BulkPublishInput{} + } + + req = c.newRequest(op, input, output) + output = &BulkPublishOutput{} + req.Data = output + return +} + +// Initiates a bulk publish of all existing datasets for an Identity Pool to +// the configured stream. Customers are limited to one successful bulk publish +// per 24 hours. Bulk publish is an asynchronous request, customers can see +// the status of the request via the GetBulkPublishDetails operation. +// +// This API can only be called with developer credentials. You cannot call +// this API with the temporary user credentials provided by Cognito Identity. +func (c *CognitoSync) BulkPublish(input *BulkPublishInput) (*BulkPublishOutput, error) { + req, out := c.BulkPublishRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDataset = "DeleteDataset" + +// DeleteDatasetRequest generates a request for the DeleteDataset operation. +func (c *CognitoSync) DeleteDatasetRequest(input *DeleteDatasetInput) (req *request.Request, output *DeleteDatasetOutput) { + op := &request.Operation{ + Name: opDeleteDataset, + HTTPMethod: "DELETE", + HTTPPath: "/identitypools/{IdentityPoolId}/identities/{IdentityId}/datasets/{DatasetName}", + } + + if input == nil { + input = &DeleteDatasetInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDatasetOutput{} + req.Data = output + return +} + +// Deletes the specific dataset. The dataset will be deleted permanently, and +// the action can't be undone. Datasets that this dataset was merged with will +// no longer report the merge. Any subsequent operation on this dataset will +// result in a ResourceNotFoundException. +// +// This API can be called with temporary user credentials provided by Cognito +// Identity or with developer credentials. +func (c *CognitoSync) DeleteDataset(input *DeleteDatasetInput) (*DeleteDatasetOutput, error) { + req, out := c.DeleteDatasetRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDataset = "DescribeDataset" + +// DescribeDatasetRequest generates a request for the DescribeDataset operation. +func (c *CognitoSync) DescribeDatasetRequest(input *DescribeDatasetInput) (req *request.Request, output *DescribeDatasetOutput) { + op := &request.Operation{ + Name: opDescribeDataset, + HTTPMethod: "GET", + HTTPPath: "/identitypools/{IdentityPoolId}/identities/{IdentityId}/datasets/{DatasetName}", + } + + if input == nil { + input = &DescribeDatasetInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDatasetOutput{} + req.Data = output + return +} + +// Gets meta data about a dataset by identity and dataset name. With Amazon +// Cognito Sync, each identity has access only to its own data. Thus, the credentials +// used to make this API call need to have access to the identity data. +// +// This API can be called with temporary user credentials provided by Cognito +// Identity or with developer credentials. You should use Cognito Identity credentials +// to make this API call. +func (c *CognitoSync) DescribeDataset(input *DescribeDatasetInput) (*DescribeDatasetOutput, error) { + req, out := c.DescribeDatasetRequest(input) + err := req.Send() + return out, err +} + +const opDescribeIdentityPoolUsage = "DescribeIdentityPoolUsage" + +// DescribeIdentityPoolUsageRequest generates a request for the DescribeIdentityPoolUsage operation. +func (c *CognitoSync) DescribeIdentityPoolUsageRequest(input *DescribeIdentityPoolUsageInput) (req *request.Request, output *DescribeIdentityPoolUsageOutput) { + op := &request.Operation{ + Name: opDescribeIdentityPoolUsage, + HTTPMethod: "GET", + HTTPPath: "/identitypools/{IdentityPoolId}", + } + + if input == nil { + input = &DescribeIdentityPoolUsageInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeIdentityPoolUsageOutput{} + req.Data = output + return +} + +// Gets usage details (for example, data storage) about a particular identity +// pool. +// +// This API can only be called with developer credentials. You cannot call +// this API with the temporary user credentials provided by Cognito Identity. +func (c *CognitoSync) DescribeIdentityPoolUsage(input *DescribeIdentityPoolUsageInput) (*DescribeIdentityPoolUsageOutput, error) { + req, out := c.DescribeIdentityPoolUsageRequest(input) + err := req.Send() + return out, err +} + +const opDescribeIdentityUsage = "DescribeIdentityUsage" + +// DescribeIdentityUsageRequest generates a request for the DescribeIdentityUsage operation. +func (c *CognitoSync) DescribeIdentityUsageRequest(input *DescribeIdentityUsageInput) (req *request.Request, output *DescribeIdentityUsageOutput) { + op := &request.Operation{ + Name: opDescribeIdentityUsage, + HTTPMethod: "GET", + HTTPPath: "/identitypools/{IdentityPoolId}/identities/{IdentityId}", + } + + if input == nil { + input = &DescribeIdentityUsageInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeIdentityUsageOutput{} + req.Data = output + return +} + +// Gets usage information for an identity, including number of datasets and +// data usage. +// +// This API can be called with temporary user credentials provided by Cognito +// Identity or with developer credentials. +func (c *CognitoSync) DescribeIdentityUsage(input *DescribeIdentityUsageInput) (*DescribeIdentityUsageOutput, error) { + req, out := c.DescribeIdentityUsageRequest(input) + err := req.Send() + return out, err +} + +const opGetBulkPublishDetails = "GetBulkPublishDetails" + +// GetBulkPublishDetailsRequest generates a request for the GetBulkPublishDetails operation. +func (c *CognitoSync) GetBulkPublishDetailsRequest(input *GetBulkPublishDetailsInput) (req *request.Request, output *GetBulkPublishDetailsOutput) { + op := &request.Operation{ + Name: opGetBulkPublishDetails, + HTTPMethod: "POST", + HTTPPath: "/identitypools/{IdentityPoolId}/getBulkPublishDetails", + } + + if input == nil { + input = &GetBulkPublishDetailsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBulkPublishDetailsOutput{} + req.Data = output + return +} + +// Get the status of the last BulkPublish operation for an identity pool. +// +// This API can only be called with developer credentials. You cannot call +// this API with the temporary user credentials provided by Cognito Identity. +func (c *CognitoSync) GetBulkPublishDetails(input *GetBulkPublishDetailsInput) (*GetBulkPublishDetailsOutput, error) { + req, out := c.GetBulkPublishDetailsRequest(input) + err := req.Send() + return out, err +} + +const opGetCognitoEvents = "GetCognitoEvents" + +// GetCognitoEventsRequest generates a request for the GetCognitoEvents operation. +func (c *CognitoSync) GetCognitoEventsRequest(input *GetCognitoEventsInput) (req *request.Request, output *GetCognitoEventsOutput) { + op := &request.Operation{ + Name: opGetCognitoEvents, + HTTPMethod: "GET", + HTTPPath: "/identitypools/{IdentityPoolId}/events", + } + + if input == nil { + input = &GetCognitoEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetCognitoEventsOutput{} + req.Data = output + return +} + +// Gets the events and the corresponding Lambda functions associated with an +// identity pool. +// +// This API can only be called with developer credentials. You cannot call +// this API with the temporary user credentials provided by Cognito Identity. +func (c *CognitoSync) GetCognitoEvents(input *GetCognitoEventsInput) (*GetCognitoEventsOutput, error) { + req, out := c.GetCognitoEventsRequest(input) + err := req.Send() + return out, err +} + +const opGetIdentityPoolConfiguration = "GetIdentityPoolConfiguration" + +// GetIdentityPoolConfigurationRequest generates a request for the GetIdentityPoolConfiguration operation. +func (c *CognitoSync) GetIdentityPoolConfigurationRequest(input *GetIdentityPoolConfigurationInput) (req *request.Request, output *GetIdentityPoolConfigurationOutput) { + op := &request.Operation{ + Name: opGetIdentityPoolConfiguration, + HTTPMethod: "GET", + HTTPPath: "/identitypools/{IdentityPoolId}/configuration", + } + + if input == nil { + input = &GetIdentityPoolConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetIdentityPoolConfigurationOutput{} + req.Data = output + return +} + +// Gets the configuration settings of an identity pool. +// +// This API can only be called with developer credentials. You cannot call +// this API with the temporary user credentials provided by Cognito Identity. +func (c *CognitoSync) GetIdentityPoolConfiguration(input *GetIdentityPoolConfigurationInput) (*GetIdentityPoolConfigurationOutput, error) { + req, out := c.GetIdentityPoolConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opListDatasets = "ListDatasets" + +// ListDatasetsRequest generates a request for the ListDatasets operation. +func (c *CognitoSync) ListDatasetsRequest(input *ListDatasetsInput) (req *request.Request, output *ListDatasetsOutput) { + op := &request.Operation{ + Name: opListDatasets, + HTTPMethod: "GET", + HTTPPath: "/identitypools/{IdentityPoolId}/identities/{IdentityId}/datasets", + } + + if input == nil { + input = &ListDatasetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDatasetsOutput{} + req.Data = output + return +} + +// Lists datasets for an identity. With Amazon Cognito Sync, each identity has +// access only to its own data. Thus, the credentials used to make this API +// call need to have access to the identity data. +// +// ListDatasets can be called with temporary user credentials provided by Cognito +// Identity or with developer credentials. You should use the Cognito Identity +// credentials to make this API call. +func (c *CognitoSync) ListDatasets(input *ListDatasetsInput) (*ListDatasetsOutput, error) { + req, out := c.ListDatasetsRequest(input) + err := req.Send() + return out, err +} + +const opListIdentityPoolUsage = "ListIdentityPoolUsage" + +// ListIdentityPoolUsageRequest generates a request for the ListIdentityPoolUsage operation. +func (c *CognitoSync) ListIdentityPoolUsageRequest(input *ListIdentityPoolUsageInput) (req *request.Request, output *ListIdentityPoolUsageOutput) { + op := &request.Operation{ + Name: opListIdentityPoolUsage, + HTTPMethod: "GET", + HTTPPath: "/identitypools", + } + + if input == nil { + input = &ListIdentityPoolUsageInput{} + } + + req = c.newRequest(op, input, output) + output = &ListIdentityPoolUsageOutput{} + req.Data = output + return +} + +// Gets a list of identity pools registered with Cognito. +// +// ListIdentityPoolUsage can only be called with developer credentials. You +// cannot make this API call with the temporary user credentials provided by +// Cognito Identity. +func (c *CognitoSync) ListIdentityPoolUsage(input *ListIdentityPoolUsageInput) (*ListIdentityPoolUsageOutput, error) { + req, out := c.ListIdentityPoolUsageRequest(input) + err := req.Send() + return out, err +} + +const opListRecords = "ListRecords" + +// ListRecordsRequest generates a request for the ListRecords operation. +func (c *CognitoSync) ListRecordsRequest(input *ListRecordsInput) (req *request.Request, output *ListRecordsOutput) { + op := &request.Operation{ + Name: opListRecords, + HTTPMethod: "GET", + HTTPPath: "/identitypools/{IdentityPoolId}/identities/{IdentityId}/datasets/{DatasetName}/records", + } + + if input == nil { + input = &ListRecordsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListRecordsOutput{} + req.Data = output + return +} + +// Gets paginated records, optionally changed after a particular sync count +// for a dataset and identity. With Amazon Cognito Sync, each identity has access +// only to its own data. Thus, the credentials used to make this API call need +// to have access to the identity data. +// +// ListRecords can be called with temporary user credentials provided by Cognito +// Identity or with developer credentials. You should use Cognito Identity credentials +// to make this API call. +func (c *CognitoSync) ListRecords(input *ListRecordsInput) (*ListRecordsOutput, error) { + req, out := c.ListRecordsRequest(input) + err := req.Send() + return out, err +} + +const opRegisterDevice = "RegisterDevice" + +// RegisterDeviceRequest generates a request for the RegisterDevice operation. +func (c *CognitoSync) RegisterDeviceRequest(input *RegisterDeviceInput) (req *request.Request, output *RegisterDeviceOutput) { + op := &request.Operation{ + Name: opRegisterDevice, + HTTPMethod: "POST", + HTTPPath: "/identitypools/{IdentityPoolId}/identity/{IdentityId}/device", + } + + if input == nil { + input = &RegisterDeviceInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterDeviceOutput{} + req.Data = output + return +} + +// Registers a device to receive push sync notifications. +// +// This API can only be called with temporary credentials provided by Cognito +// Identity. You cannot call this API with developer credentials. +func (c *CognitoSync) RegisterDevice(input *RegisterDeviceInput) (*RegisterDeviceOutput, error) { + req, out := c.RegisterDeviceRequest(input) + err := req.Send() + return out, err +} + +const opSetCognitoEvents = "SetCognitoEvents" + +// SetCognitoEventsRequest generates a request for the SetCognitoEvents operation. +func (c *CognitoSync) SetCognitoEventsRequest(input *SetCognitoEventsInput) (req *request.Request, output *SetCognitoEventsOutput) { + op := &request.Operation{ + Name: opSetCognitoEvents, + HTTPMethod: "POST", + HTTPPath: "/identitypools/{IdentityPoolId}/events", + } + + if input == nil { + input = &SetCognitoEventsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetCognitoEventsOutput{} + req.Data = output + return +} + +// Sets the AWS Lambda function for a given event type for an identity pool. +// This request only updates the key/value pair specified. Other key/values +// pairs are not updated. To remove a key value pair, pass a empty value for +// the particular key. +// +// This API can only be called with developer credentials. You cannot call +// this API with the temporary user credentials provided by Cognito Identity. +func (c *CognitoSync) SetCognitoEvents(input *SetCognitoEventsInput) (*SetCognitoEventsOutput, error) { + req, out := c.SetCognitoEventsRequest(input) + err := req.Send() + return out, err +} + +const opSetIdentityPoolConfiguration = "SetIdentityPoolConfiguration" + +// SetIdentityPoolConfigurationRequest generates a request for the SetIdentityPoolConfiguration operation. +func (c *CognitoSync) SetIdentityPoolConfigurationRequest(input *SetIdentityPoolConfigurationInput) (req *request.Request, output *SetIdentityPoolConfigurationOutput) { + op := &request.Operation{ + Name: opSetIdentityPoolConfiguration, + HTTPMethod: "POST", + HTTPPath: "/identitypools/{IdentityPoolId}/configuration", + } + + if input == nil { + input = &SetIdentityPoolConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &SetIdentityPoolConfigurationOutput{} + req.Data = output + return +} + +// Sets the necessary configuration for push sync. +// +// This API can only be called with developer credentials. You cannot call +// this API with the temporary user credentials provided by Cognito Identity. +func (c *CognitoSync) SetIdentityPoolConfiguration(input *SetIdentityPoolConfigurationInput) (*SetIdentityPoolConfigurationOutput, error) { + req, out := c.SetIdentityPoolConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opSubscribeToDataset = "SubscribeToDataset" + +// SubscribeToDatasetRequest generates a request for the SubscribeToDataset operation. +func (c *CognitoSync) SubscribeToDatasetRequest(input *SubscribeToDatasetInput) (req *request.Request, output *SubscribeToDatasetOutput) { + op := &request.Operation{ + Name: opSubscribeToDataset, + HTTPMethod: "POST", + HTTPPath: "/identitypools/{IdentityPoolId}/identities/{IdentityId}/datasets/{DatasetName}/subscriptions/{DeviceId}", + } + + if input == nil { + input = &SubscribeToDatasetInput{} + } + + req = c.newRequest(op, input, output) + output = &SubscribeToDatasetOutput{} + req.Data = output + return +} + +// Subscribes to receive notifications when a dataset is modified by another +// device. +// +// This API can only be called with temporary credentials provided by Cognito +// Identity. You cannot call this API with developer credentials. +func (c *CognitoSync) SubscribeToDataset(input *SubscribeToDatasetInput) (*SubscribeToDatasetOutput, error) { + req, out := c.SubscribeToDatasetRequest(input) + err := req.Send() + return out, err +} + +const opUnsubscribeFromDataset = "UnsubscribeFromDataset" + +// UnsubscribeFromDatasetRequest generates a request for the UnsubscribeFromDataset operation. +func (c *CognitoSync) UnsubscribeFromDatasetRequest(input *UnsubscribeFromDatasetInput) (req *request.Request, output *UnsubscribeFromDatasetOutput) { + op := &request.Operation{ + Name: opUnsubscribeFromDataset, + HTTPMethod: "DELETE", + HTTPPath: "/identitypools/{IdentityPoolId}/identities/{IdentityId}/datasets/{DatasetName}/subscriptions/{DeviceId}", + } + + if input == nil { + input = &UnsubscribeFromDatasetInput{} + } + + req = c.newRequest(op, input, output) + output = &UnsubscribeFromDatasetOutput{} + req.Data = output + return +} + +// Unsubscribes from receiving notifications when a dataset is modified by another +// device. +// +// This API can only be called with temporary credentials provided by Cognito +// Identity. You cannot call this API with developer credentials. +func (c *CognitoSync) UnsubscribeFromDataset(input *UnsubscribeFromDatasetInput) (*UnsubscribeFromDatasetOutput, error) { + req, out := c.UnsubscribeFromDatasetRequest(input) + err := req.Send() + return out, err +} + +const opUpdateRecords = "UpdateRecords" + +// UpdateRecordsRequest generates a request for the UpdateRecords operation. +func (c *CognitoSync) UpdateRecordsRequest(input *UpdateRecordsInput) (req *request.Request, output *UpdateRecordsOutput) { + op := &request.Operation{ + Name: opUpdateRecords, + HTTPMethod: "POST", + HTTPPath: "/identitypools/{IdentityPoolId}/identities/{IdentityId}/datasets/{DatasetName}", + } + + if input == nil { + input = &UpdateRecordsInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateRecordsOutput{} + req.Data = output + return +} + +// Posts updates to records and adds and deletes records for a dataset and user. +// +// The sync count in the record patch is your last known sync count for that +// record. The server will reject an UpdateRecords request with a ResourceConflictException +// if you try to patch a record with a new value but a stale sync count. +// +// For example, if the sync count on the server is 5 for a key called highScore +// and you try and submit a new highScore with sync count of 4, the request +// will be rejected. To obtain the current sync count for a record, call ListRecords. +// On a successful update of the record, the response returns the new sync count +// for that record. You should present that sync count the next time you try +// to update that same record. When the record does not exist, specify the sync +// count as 0. +// +// This API can be called with temporary user credentials provided by Cognito +// Identity or with developer credentials. +func (c *CognitoSync) UpdateRecords(input *UpdateRecordsInput) (*UpdateRecordsOutput, error) { + req, out := c.UpdateRecordsRequest(input) + err := req.Send() + return out, err +} + +// The input for the BulkPublish operation. +type BulkPublishInput struct { + _ struct{} `type:"structure"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s BulkPublishInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BulkPublishInput) GoString() string { + return s.String() +} + +// The output for the BulkPublish operation. +type BulkPublishOutput struct { + _ struct{} `type:"structure"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityPoolId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s BulkPublishOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BulkPublishOutput) GoString() string { + return s.String() +} + +// Configuration options for configure Cognito streams. +type CognitoStreams struct { + _ struct{} `type:"structure"` + + // The ARN of the role Amazon Cognito can assume in order to publish to the + // stream. This role must grant access to Amazon Cognito (cognito-sync) to invoke + // PutRecord on your Cognito stream. + RoleArn *string `min:"20" type:"string"` + + // The name of the Cognito stream to receive updates. This stream must be in + // the developers account and in the same region as the identity pool. + StreamName *string `min:"1" type:"string"` + + // Status of the Cognito streams. Valid values are: ENABLED - Streaming of updates + // to identity pool is enabled. + // + // DISABLED - Streaming of updates to identity pool is disabled. Bulk publish + // will also fail if StreamingStatus is DISABLED. + StreamingStatus *string `type:"string" enum:"StreamingStatus"` +} + +// String returns the string representation +func (s CognitoStreams) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CognitoStreams) GoString() string { + return s.String() +} + +// A collection of data for an identity pool. An identity pool can have multiple +// datasets. A dataset is per identity and can be general or associated with +// a particular entity in an application (like a saved game). Datasets are automatically +// created if they don't exist. Data is synced by dataset, and a dataset can +// hold up to 1MB of key-value pairs. +type Dataset struct { + _ struct{} `type:"structure"` + + // Date on which the dataset was created. + CreationDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Total size in bytes of the records in this dataset. + DataStorage *int64 `type:"long"` + + // A string of up to 128 characters. Allowed characters are a-z, A-Z, 0-9, '_' + // (underscore), '-' (dash), and '.' (dot). + DatasetName *string `min:"1" type:"string"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityId *string `min:"1" type:"string"` + + // The device that made the last change to this dataset. + LastModifiedBy *string `type:"string"` + + // Date when the dataset was last modified. + LastModifiedDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Number of records in this dataset. + NumRecords *int64 `type:"long"` +} + +// String returns the string representation +func (s Dataset) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Dataset) GoString() string { + return s.String() +} + +// A request to delete the specific dataset. +type DeleteDatasetInput struct { + _ struct{} `type:"structure"` + + // A string of up to 128 characters. Allowed characters are a-z, A-Z, 0-9, '_' + // (underscore), '-' (dash), and '.' (dot). + DatasetName *string `location:"uri" locationName:"DatasetName" min:"1" type:"string" required:"true"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityId *string `location:"uri" locationName:"IdentityId" min:"1" type:"string" required:"true"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDatasetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDatasetInput) GoString() string { + return s.String() +} + +// Response to a successful DeleteDataset request. +type DeleteDatasetOutput struct { + _ struct{} `type:"structure"` + + // A collection of data for an identity pool. An identity pool can have multiple + // datasets. A dataset is per identity and can be general or associated with + // a particular entity in an application (like a saved game). Datasets are automatically + // created if they don't exist. Data is synced by dataset, and a dataset can + // hold up to 1MB of key-value pairs. + Dataset *Dataset `type:"structure"` +} + +// String returns the string representation +func (s DeleteDatasetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDatasetOutput) GoString() string { + return s.String() +} + +// A request for meta data about a dataset (creation date, number of records, +// size) by owner and dataset name. +type DescribeDatasetInput struct { + _ struct{} `type:"structure"` + + // A string of up to 128 characters. Allowed characters are a-z, A-Z, 0-9, '_' + // (underscore), '-' (dash), and '.' (dot). + DatasetName *string `location:"uri" locationName:"DatasetName" min:"1" type:"string" required:"true"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityId *string `location:"uri" locationName:"IdentityId" min:"1" type:"string" required:"true"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDatasetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDatasetInput) GoString() string { + return s.String() +} + +// Response to a successful DescribeDataset request. +type DescribeDatasetOutput struct { + _ struct{} `type:"structure"` + + // Meta data for a collection of data for an identity. An identity can have + // multiple datasets. A dataset can be general or associated with a particular + // entity in an application (like a saved game). Datasets are automatically + // created if they don't exist. Data is synced by dataset, and a dataset can + // hold up to 1MB of key-value pairs. + Dataset *Dataset `type:"structure"` +} + +// String returns the string representation +func (s DescribeDatasetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDatasetOutput) GoString() string { + return s.String() +} + +// A request for usage information about the identity pool. +type DescribeIdentityPoolUsageInput struct { + _ struct{} `type:"structure"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeIdentityPoolUsageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIdentityPoolUsageInput) GoString() string { + return s.String() +} + +// Response to a successful DescribeIdentityPoolUsage request. +type DescribeIdentityPoolUsageOutput struct { + _ struct{} `type:"structure"` + + // Information about the usage of the identity pool. + IdentityPoolUsage *IdentityPoolUsage `type:"structure"` +} + +// String returns the string representation +func (s DescribeIdentityPoolUsageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIdentityPoolUsageOutput) GoString() string { + return s.String() +} + +// A request for information about the usage of an identity pool. +type DescribeIdentityUsageInput struct { + _ struct{} `type:"structure"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityId *string `location:"uri" locationName:"IdentityId" min:"1" type:"string" required:"true"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeIdentityUsageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIdentityUsageInput) GoString() string { + return s.String() +} + +// The response to a successful DescribeIdentityUsage request. +type DescribeIdentityUsageOutput struct { + _ struct{} `type:"structure"` + + // Usage information for the identity. + IdentityUsage *IdentityUsage `type:"structure"` +} + +// String returns the string representation +func (s DescribeIdentityUsageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIdentityUsageOutput) GoString() string { + return s.String() +} + +// The input for the GetBulkPublishDetails operation. +type GetBulkPublishDetailsInput struct { + _ struct{} `type:"structure"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBulkPublishDetailsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBulkPublishDetailsInput) GoString() string { + return s.String() +} + +// The output for the GetBulkPublishDetails operation. +type GetBulkPublishDetailsOutput struct { + _ struct{} `type:"structure"` + + // If BulkPublishStatus is SUCCEEDED, the time the last bulk publish operation + // completed. + BulkPublishCompleteTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The date/time at which the last bulk publish was initiated. + BulkPublishStartTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Status of the last bulk publish operation, valid values are: NOT_STARTED + // - No bulk publish has been requested for this identity pool + // + // IN_PROGRESS - Data is being published to the configured stream + // + // SUCCEEDED - All data for the identity pool has been published to the configured + // stream + // + // FAILED - Some portion of the data has failed to publish, check FailureMessage + // for the cause. + BulkPublishStatus *string `type:"string" enum:"BulkPublishStatus"` + + // If BulkPublishStatus is FAILED this field will contain the error message + // that caused the bulk publish to fail. + FailureMessage *string `type:"string"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityPoolId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetBulkPublishDetailsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBulkPublishDetailsOutput) GoString() string { + return s.String() +} + +// A request for a list of the configured Cognito Events +type GetCognitoEventsInput struct { + _ struct{} `type:"structure"` + + // The Cognito Identity Pool ID for the request + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetCognitoEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCognitoEventsInput) GoString() string { + return s.String() +} + +// The response from the GetCognitoEvents request +type GetCognitoEventsOutput struct { + _ struct{} `type:"structure"` + + // The Cognito Events returned from the GetCognitoEvents request + Events map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetCognitoEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCognitoEventsOutput) GoString() string { + return s.String() +} + +// The input for the GetIdentityPoolConfiguration operation. +type GetIdentityPoolConfigurationInput struct { + _ struct{} `type:"structure"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. This is the ID of the pool for which to return + // a configuration. + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetIdentityPoolConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityPoolConfigurationInput) GoString() string { + return s.String() +} + +// The output for the GetIdentityPoolConfiguration operation. +type GetIdentityPoolConfigurationOutput struct { + _ struct{} `type:"structure"` + + // Options to apply to this identity pool for Amazon Cognito streams. + CognitoStreams *CognitoStreams `type:"structure"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. + IdentityPoolId *string `min:"1" type:"string"` + + // Options to apply to this identity pool for push synchronization. + PushSync *PushSync `type:"structure"` +} + +// String returns the string representation +func (s GetIdentityPoolConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityPoolConfigurationOutput) GoString() string { + return s.String() +} + +// Usage information for the identity pool. +type IdentityPoolUsage struct { + _ struct{} `type:"structure"` + + // Data storage information for the identity pool. + DataStorage *int64 `type:"long"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityPoolId *string `min:"1" type:"string"` + + // Date on which the identity pool was last modified. + LastModifiedDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Number of sync sessions for the identity pool. + SyncSessionsCount *int64 `type:"long"` +} + +// String returns the string representation +func (s IdentityPoolUsage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdentityPoolUsage) GoString() string { + return s.String() +} + +// Usage information for the identity. +type IdentityUsage struct { + _ struct{} `type:"structure"` + + // Total data storage for this identity. + DataStorage *int64 `type:"long"` + + // Number of datasets for the identity. + DatasetCount *int64 `type:"integer"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityId *string `min:"1" type:"string"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityPoolId *string `min:"1" type:"string"` + + // Date on which the identity was last modified. + LastModifiedDate *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s IdentityUsage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdentityUsage) GoString() string { + return s.String() +} + +// Request for a list of datasets for an identity. +type ListDatasetsInput struct { + _ struct{} `type:"structure"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityId *string `location:"uri" locationName:"IdentityId" min:"1" type:"string" required:"true"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` + + // The maximum number of results to be returned. + MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"` + + // A pagination token for obtaining the next page of results. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDatasetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDatasetsInput) GoString() string { + return s.String() +} + +// Returned for a successful ListDatasets request. +type ListDatasetsOutput struct { + _ struct{} `type:"structure"` + + // Number of datasets returned. + Count *int64 `type:"integer"` + + // A set of datasets. + Datasets []*Dataset `type:"list"` + + // A pagination token for obtaining the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListDatasetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDatasetsOutput) GoString() string { + return s.String() +} + +// A request for usage information on an identity pool. +type ListIdentityPoolUsageInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to be returned. + MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"` + + // A pagination token for obtaining the next page of results. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListIdentityPoolUsageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentityPoolUsageInput) GoString() string { + return s.String() +} + +// Returned for a successful ListIdentityPoolUsage request. +type ListIdentityPoolUsageOutput struct { + _ struct{} `type:"structure"` + + // Total number of identities for the identity pool. + Count *int64 `type:"integer"` + + // Usage information for the identity pools. + IdentityPoolUsages []*IdentityPoolUsage `type:"list"` + + // The maximum number of results to be returned. + MaxResults *int64 `type:"integer"` + + // A pagination token for obtaining the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListIdentityPoolUsageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentityPoolUsageOutput) GoString() string { + return s.String() +} + +// A request for a list of records. +type ListRecordsInput struct { + _ struct{} `type:"structure"` + + // A string of up to 128 characters. Allowed characters are a-z, A-Z, 0-9, '_' + // (underscore), '-' (dash), and '.' (dot). + DatasetName *string `location:"uri" locationName:"DatasetName" min:"1" type:"string" required:"true"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityId *string `location:"uri" locationName:"IdentityId" min:"1" type:"string" required:"true"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` + + // The last server sync count for this record. + LastSyncCount *int64 `location:"querystring" locationName:"lastSyncCount" type:"long"` + + // The maximum number of results to be returned. + MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"` + + // A pagination token for obtaining the next page of results. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` + + // A token containing a session ID, identity ID, and expiration. + SyncSessionToken *string `location:"querystring" locationName:"syncSessionToken" type:"string"` +} + +// String returns the string representation +func (s ListRecordsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRecordsInput) GoString() string { + return s.String() +} + +// Returned for a successful ListRecordsRequest. +type ListRecordsOutput struct { + _ struct{} `type:"structure"` + + // Total number of records. + Count *int64 `type:"integer"` + + // A boolean value specifying whether to delete the dataset locally. + DatasetDeletedAfterRequestedSyncCount *bool `type:"boolean"` + + // Indicates whether the dataset exists. + DatasetExists *bool `type:"boolean"` + + // Server sync count for this dataset. + DatasetSyncCount *int64 `type:"long"` + + // The user/device that made the last change to this record. + LastModifiedBy *string `type:"string"` + + // Names of merged datasets. + MergedDatasetNames []*string `type:"list"` + + // A pagination token for obtaining the next page of results. + NextToken *string `type:"string"` + + // A list of all records. + Records []*Record `type:"list"` + + // A token containing a session ID, identity ID, and expiration. + SyncSessionToken *string `type:"string"` +} + +// String returns the string representation +func (s ListRecordsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRecordsOutput) GoString() string { + return s.String() +} + +// Configuration options to be applied to the identity pool. +type PushSync struct { + _ struct{} `type:"structure"` + + // List of SNS platform application ARNs that could be used by clients. + ApplicationArns []*string `type:"list"` + + // A role configured to allow Cognito to call SNS on behalf of the developer. + RoleArn *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s PushSync) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PushSync) GoString() string { + return s.String() +} + +// The basic data structure of a dataset. +type Record struct { + _ struct{} `type:"structure"` + + // The last modified date of the client device. + DeviceLastModifiedDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The key for the record. + Key *string `min:"1" type:"string"` + + // The user/device that made the last change to this record. + LastModifiedBy *string `type:"string"` + + // The date on which the record was last modified. + LastModifiedDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The server sync count for this record. + SyncCount *int64 `type:"long"` + + // The value for the record. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Record) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Record) GoString() string { + return s.String() +} + +// An update operation for a record. +type RecordPatch struct { + _ struct{} `type:"structure"` + + // The last modified date of the client device. + DeviceLastModifiedDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The key associated with the record patch. + Key *string `min:"1" type:"string" required:"true"` + + // An operation, either replace or remove. + Op *string `type:"string" required:"true" enum:"Operation"` + + // Last known server sync count for this record. Set to 0 if unknown. + SyncCount *int64 `type:"long" required:"true"` + + // The value associated with the record patch. + Value *string `type:"string"` +} + +// String returns the string representation +func (s RecordPatch) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecordPatch) GoString() string { + return s.String() +} + +// A request to RegisterDevice. +type RegisterDeviceInput struct { + _ struct{} `type:"structure"` + + // The unique ID for this identity. + IdentityId *string `location:"uri" locationName:"IdentityId" min:"1" type:"string" required:"true"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. Here, the ID of the pool that the identity belongs + // to. + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` + + // The SNS platform type (e.g. GCM, SDM, APNS, APNS_SANDBOX). + Platform *string `type:"string" required:"true" enum:"Platform"` + + // The push token. + Token *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterDeviceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterDeviceInput) GoString() string { + return s.String() +} + +// Response to a RegisterDevice request. +type RegisterDeviceOutput struct { + _ struct{} `type:"structure"` + + // The unique ID generated for this device by Cognito. + DeviceId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s RegisterDeviceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterDeviceOutput) GoString() string { + return s.String() +} + +// A request to configure Cognito Events" +// +// " +type SetCognitoEventsInput struct { + _ struct{} `type:"structure"` + + // The events to configure + Events map[string]*string `type:"map" required:"true"` + + // The Cognito Identity Pool to use when configuring Cognito Events + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SetCognitoEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetCognitoEventsInput) GoString() string { + return s.String() +} + +type SetCognitoEventsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetCognitoEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetCognitoEventsOutput) GoString() string { + return s.String() +} + +// The input for the SetIdentityPoolConfiguration operation. +type SetIdentityPoolConfigurationInput struct { + _ struct{} `type:"structure"` + + // Options to apply to this identity pool for Amazon Cognito streams. + CognitoStreams *CognitoStreams `type:"structure"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. This is the ID of the pool to modify. + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` + + // Options to apply to this identity pool for push synchronization. + PushSync *PushSync `type:"structure"` +} + +// String returns the string representation +func (s SetIdentityPoolConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityPoolConfigurationInput) GoString() string { + return s.String() +} + +// The output for the SetIdentityPoolConfiguration operation +type SetIdentityPoolConfigurationOutput struct { + _ struct{} `type:"structure"` + + // Options to apply to this identity pool for Amazon Cognito streams. + CognitoStreams *CognitoStreams `type:"structure"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. + IdentityPoolId *string `min:"1" type:"string"` + + // Options to apply to this identity pool for push synchronization. + PushSync *PushSync `type:"structure"` +} + +// String returns the string representation +func (s SetIdentityPoolConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityPoolConfigurationOutput) GoString() string { + return s.String() +} + +// A request to SubscribeToDatasetRequest. +type SubscribeToDatasetInput struct { + _ struct{} `type:"structure"` + + // The name of the dataset to subcribe to. + DatasetName *string `location:"uri" locationName:"DatasetName" min:"1" type:"string" required:"true"` + + // The unique ID generated for this device by Cognito. + DeviceId *string `location:"uri" locationName:"DeviceId" min:"1" type:"string" required:"true"` + + // Unique ID for this identity. + IdentityId *string `location:"uri" locationName:"IdentityId" min:"1" type:"string" required:"true"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. The ID of the pool to which the identity belongs. + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SubscribeToDatasetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubscribeToDatasetInput) GoString() string { + return s.String() +} + +// Response to a SubscribeToDataset request. +type SubscribeToDatasetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SubscribeToDatasetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubscribeToDatasetOutput) GoString() string { + return s.String() +} + +// A request to UnsubscribeFromDataset. +type UnsubscribeFromDatasetInput struct { + _ struct{} `type:"structure"` + + // The name of the dataset from which to unsubcribe. + DatasetName *string `location:"uri" locationName:"DatasetName" min:"1" type:"string" required:"true"` + + // The unique ID generated for this device by Cognito. + DeviceId *string `location:"uri" locationName:"DeviceId" min:"1" type:"string" required:"true"` + + // Unique ID for this identity. + IdentityId *string `location:"uri" locationName:"IdentityId" min:"1" type:"string" required:"true"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. The ID of the pool to which this identity belongs. + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UnsubscribeFromDatasetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsubscribeFromDatasetInput) GoString() string { + return s.String() +} + +// Response to an UnsubscribeFromDataset request. +type UnsubscribeFromDatasetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UnsubscribeFromDatasetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsubscribeFromDatasetOutput) GoString() string { + return s.String() +} + +// A request to post updates to records or add and delete records for a dataset +// and user. +type UpdateRecordsInput struct { + _ struct{} `type:"structure"` + + // Intended to supply a device ID that will populate the lastModifiedBy field + // referenced in other methods. The ClientContext field is not yet implemented. + ClientContext *string `location:"header" locationName:"x-amz-Client-Context" type:"string"` + + // A string of up to 128 characters. Allowed characters are a-z, A-Z, 0-9, '_' + // (underscore), '-' (dash), and '.' (dot). + DatasetName *string `location:"uri" locationName:"DatasetName" min:"1" type:"string" required:"true"` + + // The unique ID generated for this device by Cognito. + DeviceId *string `min:"1" type:"string"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityId *string `location:"uri" locationName:"IdentityId" min:"1" type:"string" required:"true"` + + // A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) + // created by Amazon Cognito. GUID generation is unique within a region. + IdentityPoolId *string `location:"uri" locationName:"IdentityPoolId" min:"1" type:"string" required:"true"` + + // A list of patch operations. + RecordPatches []*RecordPatch `type:"list"` + + // The SyncSessionToken returned by a previous call to ListRecords for this + // dataset and identity. + SyncSessionToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateRecordsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRecordsInput) GoString() string { + return s.String() +} + +// Returned for a successful UpdateRecordsRequest. +type UpdateRecordsOutput struct { + _ struct{} `type:"structure"` + + // A list of records that have been updated. + Records []*Record `type:"list"` +} + +// String returns the string representation +func (s UpdateRecordsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRecordsOutput) GoString() string { + return s.String() +} + +const ( + // @enum BulkPublishStatus + BulkPublishStatusNotStarted = "NOT_STARTED" + // @enum BulkPublishStatus + BulkPublishStatusInProgress = "IN_PROGRESS" + // @enum BulkPublishStatus + BulkPublishStatusFailed = "FAILED" + // @enum BulkPublishStatus + BulkPublishStatusSucceeded = "SUCCEEDED" +) + +const ( + // @enum Operation + OperationReplace = "replace" + // @enum Operation + OperationRemove = "remove" +) + +const ( + // @enum Platform + PlatformApns = "APNS" + // @enum Platform + PlatformApnsSandbox = "APNS_SANDBOX" + // @enum Platform + PlatformGcm = "GCM" + // @enum Platform + PlatformAdm = "ADM" +) + +const ( + // @enum StreamingStatus + StreamingStatusEnabled = "ENABLED" + // @enum StreamingStatus + StreamingStatusDisabled = "DISABLED" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitosync/cognitosynciface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitosync/cognitosynciface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitosync/cognitosynciface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitosync/cognitosynciface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,82 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cognitosynciface provides an interface for the Amazon Cognito Sync. +package cognitosynciface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cognitosync" +) + +// CognitoSyncAPI is the interface type for cognitosync.CognitoSync. +type CognitoSyncAPI interface { + BulkPublishRequest(*cognitosync.BulkPublishInput) (*request.Request, *cognitosync.BulkPublishOutput) + + BulkPublish(*cognitosync.BulkPublishInput) (*cognitosync.BulkPublishOutput, error) + + DeleteDatasetRequest(*cognitosync.DeleteDatasetInput) (*request.Request, *cognitosync.DeleteDatasetOutput) + + DeleteDataset(*cognitosync.DeleteDatasetInput) (*cognitosync.DeleteDatasetOutput, error) + + DescribeDatasetRequest(*cognitosync.DescribeDatasetInput) (*request.Request, *cognitosync.DescribeDatasetOutput) + + DescribeDataset(*cognitosync.DescribeDatasetInput) (*cognitosync.DescribeDatasetOutput, error) + + DescribeIdentityPoolUsageRequest(*cognitosync.DescribeIdentityPoolUsageInput) (*request.Request, *cognitosync.DescribeIdentityPoolUsageOutput) + + DescribeIdentityPoolUsage(*cognitosync.DescribeIdentityPoolUsageInput) (*cognitosync.DescribeIdentityPoolUsageOutput, error) + + DescribeIdentityUsageRequest(*cognitosync.DescribeIdentityUsageInput) (*request.Request, *cognitosync.DescribeIdentityUsageOutput) + + DescribeIdentityUsage(*cognitosync.DescribeIdentityUsageInput) (*cognitosync.DescribeIdentityUsageOutput, error) + + GetBulkPublishDetailsRequest(*cognitosync.GetBulkPublishDetailsInput) (*request.Request, *cognitosync.GetBulkPublishDetailsOutput) + + GetBulkPublishDetails(*cognitosync.GetBulkPublishDetailsInput) (*cognitosync.GetBulkPublishDetailsOutput, error) + + GetCognitoEventsRequest(*cognitosync.GetCognitoEventsInput) (*request.Request, *cognitosync.GetCognitoEventsOutput) + + GetCognitoEvents(*cognitosync.GetCognitoEventsInput) (*cognitosync.GetCognitoEventsOutput, error) + + GetIdentityPoolConfigurationRequest(*cognitosync.GetIdentityPoolConfigurationInput) (*request.Request, *cognitosync.GetIdentityPoolConfigurationOutput) + + GetIdentityPoolConfiguration(*cognitosync.GetIdentityPoolConfigurationInput) (*cognitosync.GetIdentityPoolConfigurationOutput, error) + + ListDatasetsRequest(*cognitosync.ListDatasetsInput) (*request.Request, *cognitosync.ListDatasetsOutput) + + ListDatasets(*cognitosync.ListDatasetsInput) (*cognitosync.ListDatasetsOutput, error) + + ListIdentityPoolUsageRequest(*cognitosync.ListIdentityPoolUsageInput) (*request.Request, *cognitosync.ListIdentityPoolUsageOutput) + + ListIdentityPoolUsage(*cognitosync.ListIdentityPoolUsageInput) (*cognitosync.ListIdentityPoolUsageOutput, error) + + ListRecordsRequest(*cognitosync.ListRecordsInput) (*request.Request, *cognitosync.ListRecordsOutput) + + ListRecords(*cognitosync.ListRecordsInput) (*cognitosync.ListRecordsOutput, error) + + RegisterDeviceRequest(*cognitosync.RegisterDeviceInput) (*request.Request, *cognitosync.RegisterDeviceOutput) + + RegisterDevice(*cognitosync.RegisterDeviceInput) (*cognitosync.RegisterDeviceOutput, error) + + SetCognitoEventsRequest(*cognitosync.SetCognitoEventsInput) (*request.Request, *cognitosync.SetCognitoEventsOutput) + + SetCognitoEvents(*cognitosync.SetCognitoEventsInput) (*cognitosync.SetCognitoEventsOutput, error) + + SetIdentityPoolConfigurationRequest(*cognitosync.SetIdentityPoolConfigurationInput) (*request.Request, *cognitosync.SetIdentityPoolConfigurationOutput) + + SetIdentityPoolConfiguration(*cognitosync.SetIdentityPoolConfigurationInput) (*cognitosync.SetIdentityPoolConfigurationOutput, error) + + SubscribeToDatasetRequest(*cognitosync.SubscribeToDatasetInput) (*request.Request, *cognitosync.SubscribeToDatasetOutput) + + SubscribeToDataset(*cognitosync.SubscribeToDatasetInput) (*cognitosync.SubscribeToDatasetOutput, error) + + UnsubscribeFromDatasetRequest(*cognitosync.UnsubscribeFromDatasetInput) (*request.Request, *cognitosync.UnsubscribeFromDatasetOutput) + + UnsubscribeFromDataset(*cognitosync.UnsubscribeFromDatasetInput) (*cognitosync.UnsubscribeFromDatasetOutput, error) + + UpdateRecordsRequest(*cognitosync.UpdateRecordsInput) (*request.Request, *cognitosync.UpdateRecordsOutput) + + UpdateRecords(*cognitosync.UpdateRecordsInput) (*cognitosync.UpdateRecordsOutput, error) +} + +var _ CognitoSyncAPI = (*cognitosync.CognitoSync)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitosync/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitosync/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitosync/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitosync/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,394 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cognitosync_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cognitosync" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleCognitoSync_BulkPublish() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.BulkPublishInput{ + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.BulkPublish(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_DeleteDataset() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.DeleteDatasetInput{ + DatasetName: aws.String("DatasetName"), // Required + IdentityId: aws.String("IdentityId"), // Required + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.DeleteDataset(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_DescribeDataset() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.DescribeDatasetInput{ + DatasetName: aws.String("DatasetName"), // Required + IdentityId: aws.String("IdentityId"), // Required + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.DescribeDataset(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_DescribeIdentityPoolUsage() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.DescribeIdentityPoolUsageInput{ + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.DescribeIdentityPoolUsage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_DescribeIdentityUsage() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.DescribeIdentityUsageInput{ + IdentityId: aws.String("IdentityId"), // Required + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.DescribeIdentityUsage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_GetBulkPublishDetails() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.GetBulkPublishDetailsInput{ + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.GetBulkPublishDetails(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_GetCognitoEvents() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.GetCognitoEventsInput{ + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.GetCognitoEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_GetIdentityPoolConfiguration() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.GetIdentityPoolConfigurationInput{ + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.GetIdentityPoolConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_ListDatasets() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.ListDatasetsInput{ + IdentityId: aws.String("IdentityId"), // Required + IdentityPoolId: aws.String("IdentityPoolId"), // Required + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.ListDatasets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_ListIdentityPoolUsage() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.ListIdentityPoolUsageInput{ + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.ListIdentityPoolUsage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_ListRecords() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.ListRecordsInput{ + DatasetName: aws.String("DatasetName"), // Required + IdentityId: aws.String("IdentityId"), // Required + IdentityPoolId: aws.String("IdentityPoolId"), // Required + LastSyncCount: aws.Int64(1), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + SyncSessionToken: aws.String("SyncSessionToken"), + } + resp, err := svc.ListRecords(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_RegisterDevice() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.RegisterDeviceInput{ + IdentityId: aws.String("IdentityId"), // Required + IdentityPoolId: aws.String("IdentityPoolId"), // Required + Platform: aws.String("Platform"), // Required + Token: aws.String("PushToken"), // Required + } + resp, err := svc.RegisterDevice(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_SetCognitoEvents() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.SetCognitoEventsInput{ + Events: map[string]*string{ // Required + "Key": aws.String("LambdaFunctionArn"), // Required + // More values... + }, + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.SetCognitoEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_SetIdentityPoolConfiguration() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.SetIdentityPoolConfigurationInput{ + IdentityPoolId: aws.String("IdentityPoolId"), // Required + CognitoStreams: &cognitosync.CognitoStreams{ + RoleArn: aws.String("AssumeRoleArn"), + StreamName: aws.String("StreamName"), + StreamingStatus: aws.String("StreamingStatus"), + }, + PushSync: &cognitosync.PushSync{ + ApplicationArns: []*string{ + aws.String("ApplicationArn"), // Required + // More values... + }, + RoleArn: aws.String("AssumeRoleArn"), + }, + } + resp, err := svc.SetIdentityPoolConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_SubscribeToDataset() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.SubscribeToDatasetInput{ + DatasetName: aws.String("DatasetName"), // Required + DeviceId: aws.String("DeviceId"), // Required + IdentityId: aws.String("IdentityId"), // Required + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.SubscribeToDataset(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_UnsubscribeFromDataset() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.UnsubscribeFromDatasetInput{ + DatasetName: aws.String("DatasetName"), // Required + DeviceId: aws.String("DeviceId"), // Required + IdentityId: aws.String("IdentityId"), // Required + IdentityPoolId: aws.String("IdentityPoolId"), // Required + } + resp, err := svc.UnsubscribeFromDataset(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleCognitoSync_UpdateRecords() { + svc := cognitosync.New(session.New()) + + params := &cognitosync.UpdateRecordsInput{ + DatasetName: aws.String("DatasetName"), // Required + IdentityId: aws.String("IdentityId"), // Required + IdentityPoolId: aws.String("IdentityPoolId"), // Required + SyncSessionToken: aws.String("SyncSessionToken"), // Required + ClientContext: aws.String("ClientContext"), + DeviceId: aws.String("DeviceId"), + RecordPatches: []*cognitosync.RecordPatch{ + { // Required + Key: aws.String("RecordKey"), // Required + Op: aws.String("Operation"), // Required + SyncCount: aws.Int64(1), // Required + DeviceLastModifiedDate: aws.Time(time.Now()), + Value: aws.String("RecordValue"), + }, + // More values... + }, + } + resp, err := svc.UpdateRecords(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitosync/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitosync/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitosync/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/cognitosync/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,103 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cognitosync + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restjson" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Amazon Cognito Sync provides an AWS service and client library that enable +// cross-device syncing of application-related user data. High-level client +// libraries are available for both iOS and Android. You can use these libraries +// to persist data locally so that it's available even if the device is offline. +// Developer credentials don't need to be stored on the mobile device to access +// the service. You can use Amazon Cognito to obtain a normalized user ID and +// credentials. User data is persisted in a dataset that can store up to 1 MB +// of key-value pairs, and you can have up to 20 datasets per user identity. +// +// With Amazon Cognito Sync, the data stored for each identity is accessible +// only to credentials assigned to that identity. In order to use the Cognito +// Sync service, you need to make API calls using credentials retrieved with +// Amazon Cognito Identity service (http://docs.aws.amazon.com/cognitoidentity/latest/APIReference/Welcome.html). +// +// If you want to use Cognito Sync in an Android or iOS application, you will +// probably want to make API calls via the AWS Mobile SDK. To learn more, see +// the Developer Guide for Android (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-sync.html) +// and the Developer Guide for iOS (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-sync.html). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CognitoSync struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "cognito-sync" + +// New creates a new instance of the CognitoSync client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CognitoSync client from just a session. +// svc := cognitosync.New(mySession) +// +// // Create a CognitoSync client with additional configuration +// svc := cognitosync.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CognitoSync { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CognitoSync { + svc := &CognitoSync{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-06-30", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CognitoSync operation and runs any +// custom request initialization. +func (c *CognitoSync) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/configservice/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/configservice/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/configservice/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/configservice/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2832 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package configservice provides a client for AWS Config. +package configservice + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opDeleteConfigRule = "DeleteConfigRule" + +// DeleteConfigRuleRequest generates a request for the DeleteConfigRule operation. +func (c *ConfigService) DeleteConfigRuleRequest(input *DeleteConfigRuleInput) (req *request.Request, output *DeleteConfigRuleOutput) { + op := &request.Operation{ + Name: opDeleteConfigRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteConfigRuleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteConfigRuleOutput{} + req.Data = output + return +} + +// Deletes the specified AWS Config rule and all of its evaluation results. +// +// AWS Config sets the state of a rule to DELETING until the deletion is complete. +// You cannot update a rule while it is in this state. If you make a PutConfigRule +// request for the rule, you will receive a ResourceInUseException. +// +// You can check the state of a rule by using the DescribeConfigRules request. +func (c *ConfigService) DeleteConfigRule(input *DeleteConfigRuleInput) (*DeleteConfigRuleOutput, error) { + req, out := c.DeleteConfigRuleRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDeliveryChannel = "DeleteDeliveryChannel" + +// DeleteDeliveryChannelRequest generates a request for the DeleteDeliveryChannel operation. +func (c *ConfigService) DeleteDeliveryChannelRequest(input *DeleteDeliveryChannelInput) (req *request.Request, output *DeleteDeliveryChannelOutput) { + op := &request.Operation{ + Name: opDeleteDeliveryChannel, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDeliveryChannelInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteDeliveryChannelOutput{} + req.Data = output + return +} + +// Deletes the specified delivery channel. +// +// The delivery channel cannot be deleted if it is the only delivery channel +// and the configuration recorder is still running. To delete the delivery channel, +// stop the running configuration recorder using the StopConfigurationRecorder +// action. +func (c *ConfigService) DeleteDeliveryChannel(input *DeleteDeliveryChannelInput) (*DeleteDeliveryChannelOutput, error) { + req, out := c.DeleteDeliveryChannelRequest(input) + err := req.Send() + return out, err +} + +const opDeliverConfigSnapshot = "DeliverConfigSnapshot" + +// DeliverConfigSnapshotRequest generates a request for the DeliverConfigSnapshot operation. +func (c *ConfigService) DeliverConfigSnapshotRequest(input *DeliverConfigSnapshotInput) (req *request.Request, output *DeliverConfigSnapshotOutput) { + op := &request.Operation{ + Name: opDeliverConfigSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeliverConfigSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &DeliverConfigSnapshotOutput{} + req.Data = output + return +} + +// Schedules delivery of a configuration snapshot to the Amazon S3 bucket in +// the specified delivery channel. After the delivery has started, AWS Config +// sends following notifications using an Amazon SNS topic that you have specified. +// +// Notification of starting the delivery. Notification of delivery completed, +// if the delivery was successfully completed. Notification of delivery failure, +// if the delivery failed to complete. +func (c *ConfigService) DeliverConfigSnapshot(input *DeliverConfigSnapshotInput) (*DeliverConfigSnapshotOutput, error) { + req, out := c.DeliverConfigSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opDescribeComplianceByConfigRule = "DescribeComplianceByConfigRule" + +// DescribeComplianceByConfigRuleRequest generates a request for the DescribeComplianceByConfigRule operation. +func (c *ConfigService) DescribeComplianceByConfigRuleRequest(input *DescribeComplianceByConfigRuleInput) (req *request.Request, output *DescribeComplianceByConfigRuleOutput) { + op := &request.Operation{ + Name: opDescribeComplianceByConfigRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeComplianceByConfigRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeComplianceByConfigRuleOutput{} + req.Data = output + return +} + +// Indicates whether the specified AWS Config rules are compliant. If a rule +// is noncompliant, this action returns the number of AWS resources that do +// not comply with the rule. +// +// A rule is compliant if all of the evaluated resources comply with it, and +// it is noncompliant if any of these resources do not comply. +// +// If AWS Config has no current evaluation results for the rule, it returns +// InsufficientData. This result might indicate one of the following conditions: +// AWS Config has never invoked an evaluation for the rule. To check whether +// it has, use the DescribeConfigRuleEvaluationStatus action to get the LastSuccessfulInvocationTime +// and LastFailedInvocationTime. The rule's AWS Lambda function is failing to +// send evaluation results to AWS Config. Verify that the role that you assigned +// to your configuration recorder includes the config:PutEvaluations permission. +// If the rule is a customer managed rule, verify that the AWS Lambda execution +// role includes the config:PutEvaluations permission. The rule's AWS Lambda +// function has returned NOT_APPLICABLE for all evaluation results. This can +// occur if the resources were deleted or removed from the rule's scope. +func (c *ConfigService) DescribeComplianceByConfigRule(input *DescribeComplianceByConfigRuleInput) (*DescribeComplianceByConfigRuleOutput, error) { + req, out := c.DescribeComplianceByConfigRuleRequest(input) + err := req.Send() + return out, err +} + +const opDescribeComplianceByResource = "DescribeComplianceByResource" + +// DescribeComplianceByResourceRequest generates a request for the DescribeComplianceByResource operation. +func (c *ConfigService) DescribeComplianceByResourceRequest(input *DescribeComplianceByResourceInput) (req *request.Request, output *DescribeComplianceByResourceOutput) { + op := &request.Operation{ + Name: opDescribeComplianceByResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeComplianceByResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeComplianceByResourceOutput{} + req.Data = output + return +} + +// Indicates whether the specified AWS resources are compliant. If a resource +// is noncompliant, this action returns the number of AWS Config rules that +// the resource does not comply with. +// +// A resource is compliant if it complies with all the AWS Config rules that +// evaluate it. It is noncompliant if it does not comply with one or more of +// these rules. +// +// If AWS Config has no current evaluation results for the resource, it returns +// InsufficientData. This result might indicate one of the following conditions +// about the rules that evaluate the resource: AWS Config has never invoked +// an evaluation for the rule. To check whether it has, use the DescribeConfigRuleEvaluationStatus +// action to get the LastSuccessfulInvocationTime and LastFailedInvocationTime. +// The rule's AWS Lambda function is failing to send evaluation results to AWS +// Config. Verify that the role that you assigned to your configuration recorder +// includes the config:PutEvaluations permission. If the rule is a customer +// managed rule, verify that the AWS Lambda execution role includes the config:PutEvaluations +// permission. The rule's AWS Lambda function has returned NOT_APPLICABLE for +// all evaluation results. This can occur if the resources were deleted or removed +// from the rule's scope. +func (c *ConfigService) DescribeComplianceByResource(input *DescribeComplianceByResourceInput) (*DescribeComplianceByResourceOutput, error) { + req, out := c.DescribeComplianceByResourceRequest(input) + err := req.Send() + return out, err +} + +const opDescribeConfigRuleEvaluationStatus = "DescribeConfigRuleEvaluationStatus" + +// DescribeConfigRuleEvaluationStatusRequest generates a request for the DescribeConfigRuleEvaluationStatus operation. +func (c *ConfigService) DescribeConfigRuleEvaluationStatusRequest(input *DescribeConfigRuleEvaluationStatusInput) (req *request.Request, output *DescribeConfigRuleEvaluationStatusOutput) { + op := &request.Operation{ + Name: opDescribeConfigRuleEvaluationStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConfigRuleEvaluationStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeConfigRuleEvaluationStatusOutput{} + req.Data = output + return +} + +// Returns status information for each of your AWS managed Config rules. The +// status includes information such as the last time AWS Config invoked the +// rule, the last time AWS Config failed to invoke the rule, and the related +// error for the last failure. +func (c *ConfigService) DescribeConfigRuleEvaluationStatus(input *DescribeConfigRuleEvaluationStatusInput) (*DescribeConfigRuleEvaluationStatusOutput, error) { + req, out := c.DescribeConfigRuleEvaluationStatusRequest(input) + err := req.Send() + return out, err +} + +const opDescribeConfigRules = "DescribeConfigRules" + +// DescribeConfigRulesRequest generates a request for the DescribeConfigRules operation. +func (c *ConfigService) DescribeConfigRulesRequest(input *DescribeConfigRulesInput) (req *request.Request, output *DescribeConfigRulesOutput) { + op := &request.Operation{ + Name: opDescribeConfigRules, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConfigRulesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeConfigRulesOutput{} + req.Data = output + return +} + +// Returns details about your AWS Config rules. +func (c *ConfigService) DescribeConfigRules(input *DescribeConfigRulesInput) (*DescribeConfigRulesOutput, error) { + req, out := c.DescribeConfigRulesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeConfigurationRecorderStatus = "DescribeConfigurationRecorderStatus" + +// DescribeConfigurationRecorderStatusRequest generates a request for the DescribeConfigurationRecorderStatus operation. +func (c *ConfigService) DescribeConfigurationRecorderStatusRequest(input *DescribeConfigurationRecorderStatusInput) (req *request.Request, output *DescribeConfigurationRecorderStatusOutput) { + op := &request.Operation{ + Name: opDescribeConfigurationRecorderStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConfigurationRecorderStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeConfigurationRecorderStatusOutput{} + req.Data = output + return +} + +// Returns the current status of the specified configuration recorder. If a +// configuration recorder is not specified, this action returns the status of +// all configuration recorder associated with the account. +// +// Currently, you can specify only one configuration recorder per account. +func (c *ConfigService) DescribeConfigurationRecorderStatus(input *DescribeConfigurationRecorderStatusInput) (*DescribeConfigurationRecorderStatusOutput, error) { + req, out := c.DescribeConfigurationRecorderStatusRequest(input) + err := req.Send() + return out, err +} + +const opDescribeConfigurationRecorders = "DescribeConfigurationRecorders" + +// DescribeConfigurationRecordersRequest generates a request for the DescribeConfigurationRecorders operation. +func (c *ConfigService) DescribeConfigurationRecordersRequest(input *DescribeConfigurationRecordersInput) (req *request.Request, output *DescribeConfigurationRecordersOutput) { + op := &request.Operation{ + Name: opDescribeConfigurationRecorders, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConfigurationRecordersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeConfigurationRecordersOutput{} + req.Data = output + return +} + +// Returns the name of one or more specified configuration recorders. If the +// recorder name is not specified, this action returns the names of all the +// configuration recorders associated with the account. +// +// Currently, you can specify only one configuration recorder per account. +func (c *ConfigService) DescribeConfigurationRecorders(input *DescribeConfigurationRecordersInput) (*DescribeConfigurationRecordersOutput, error) { + req, out := c.DescribeConfigurationRecordersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDeliveryChannelStatus = "DescribeDeliveryChannelStatus" + +// DescribeDeliveryChannelStatusRequest generates a request for the DescribeDeliveryChannelStatus operation. +func (c *ConfigService) DescribeDeliveryChannelStatusRequest(input *DescribeDeliveryChannelStatusInput) (req *request.Request, output *DescribeDeliveryChannelStatusOutput) { + op := &request.Operation{ + Name: opDescribeDeliveryChannelStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDeliveryChannelStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDeliveryChannelStatusOutput{} + req.Data = output + return +} + +// Returns the current status of the specified delivery channel. If a delivery +// channel is not specified, this action returns the current status of all delivery +// channels associated with the account. +// +// Currently, you can specify only one delivery channel per account. +func (c *ConfigService) DescribeDeliveryChannelStatus(input *DescribeDeliveryChannelStatusInput) (*DescribeDeliveryChannelStatusOutput, error) { + req, out := c.DescribeDeliveryChannelStatusRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDeliveryChannels = "DescribeDeliveryChannels" + +// DescribeDeliveryChannelsRequest generates a request for the DescribeDeliveryChannels operation. +func (c *ConfigService) DescribeDeliveryChannelsRequest(input *DescribeDeliveryChannelsInput) (req *request.Request, output *DescribeDeliveryChannelsOutput) { + op := &request.Operation{ + Name: opDescribeDeliveryChannels, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDeliveryChannelsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDeliveryChannelsOutput{} + req.Data = output + return +} + +// Returns details about the specified delivery channel. If a delivery channel +// is not specified, this action returns the details of all delivery channels +// associated with the account. +// +// Currently, you can specify only one delivery channel per account. +func (c *ConfigService) DescribeDeliveryChannels(input *DescribeDeliveryChannelsInput) (*DescribeDeliveryChannelsOutput, error) { + req, out := c.DescribeDeliveryChannelsRequest(input) + err := req.Send() + return out, err +} + +const opGetComplianceDetailsByConfigRule = "GetComplianceDetailsByConfigRule" + +// GetComplianceDetailsByConfigRuleRequest generates a request for the GetComplianceDetailsByConfigRule operation. +func (c *ConfigService) GetComplianceDetailsByConfigRuleRequest(input *GetComplianceDetailsByConfigRuleInput) (req *request.Request, output *GetComplianceDetailsByConfigRuleOutput) { + op := &request.Operation{ + Name: opGetComplianceDetailsByConfigRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetComplianceDetailsByConfigRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &GetComplianceDetailsByConfigRuleOutput{} + req.Data = output + return +} + +// Returns the evaluation results for the specified AWS Config rule. The results +// indicate which AWS resources were evaluated by the rule, when each resource +// was last evaluated, and whether each resource complies with the rule. +func (c *ConfigService) GetComplianceDetailsByConfigRule(input *GetComplianceDetailsByConfigRuleInput) (*GetComplianceDetailsByConfigRuleOutput, error) { + req, out := c.GetComplianceDetailsByConfigRuleRequest(input) + err := req.Send() + return out, err +} + +const opGetComplianceDetailsByResource = "GetComplianceDetailsByResource" + +// GetComplianceDetailsByResourceRequest generates a request for the GetComplianceDetailsByResource operation. +func (c *ConfigService) GetComplianceDetailsByResourceRequest(input *GetComplianceDetailsByResourceInput) (req *request.Request, output *GetComplianceDetailsByResourceOutput) { + op := &request.Operation{ + Name: opGetComplianceDetailsByResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetComplianceDetailsByResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &GetComplianceDetailsByResourceOutput{} + req.Data = output + return +} + +// Returns the evaluation results for the specified AWS resource. The results +// indicate which AWS Config rules were used to evaluate the resource, when +// each rule was last used, and whether the resource complies with each rule. +func (c *ConfigService) GetComplianceDetailsByResource(input *GetComplianceDetailsByResourceInput) (*GetComplianceDetailsByResourceOutput, error) { + req, out := c.GetComplianceDetailsByResourceRequest(input) + err := req.Send() + return out, err +} + +const opGetComplianceSummaryByConfigRule = "GetComplianceSummaryByConfigRule" + +// GetComplianceSummaryByConfigRuleRequest generates a request for the GetComplianceSummaryByConfigRule operation. +func (c *ConfigService) GetComplianceSummaryByConfigRuleRequest(input *GetComplianceSummaryByConfigRuleInput) (req *request.Request, output *GetComplianceSummaryByConfigRuleOutput) { + op := &request.Operation{ + Name: opGetComplianceSummaryByConfigRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetComplianceSummaryByConfigRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &GetComplianceSummaryByConfigRuleOutput{} + req.Data = output + return +} + +// Returns the number of AWS Config rules that are compliant and noncompliant, +// up to a maximum of 25 for each. +func (c *ConfigService) GetComplianceSummaryByConfigRule(input *GetComplianceSummaryByConfigRuleInput) (*GetComplianceSummaryByConfigRuleOutput, error) { + req, out := c.GetComplianceSummaryByConfigRuleRequest(input) + err := req.Send() + return out, err +} + +const opGetComplianceSummaryByResourceType = "GetComplianceSummaryByResourceType" + +// GetComplianceSummaryByResourceTypeRequest generates a request for the GetComplianceSummaryByResourceType operation. +func (c *ConfigService) GetComplianceSummaryByResourceTypeRequest(input *GetComplianceSummaryByResourceTypeInput) (req *request.Request, output *GetComplianceSummaryByResourceTypeOutput) { + op := &request.Operation{ + Name: opGetComplianceSummaryByResourceType, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetComplianceSummaryByResourceTypeInput{} + } + + req = c.newRequest(op, input, output) + output = &GetComplianceSummaryByResourceTypeOutput{} + req.Data = output + return +} + +// Returns the number of resources that are compliant and the number that are +// noncompliant. You can specify one or more resource types to get these numbers +// for each resource type. The maximum number returned is 100. +func (c *ConfigService) GetComplianceSummaryByResourceType(input *GetComplianceSummaryByResourceTypeInput) (*GetComplianceSummaryByResourceTypeOutput, error) { + req, out := c.GetComplianceSummaryByResourceTypeRequest(input) + err := req.Send() + return out, err +} + +const opGetResourceConfigHistory = "GetResourceConfigHistory" + +// GetResourceConfigHistoryRequest generates a request for the GetResourceConfigHistory operation. +func (c *ConfigService) GetResourceConfigHistoryRequest(input *GetResourceConfigHistoryInput) (req *request.Request, output *GetResourceConfigHistoryOutput) { + op := &request.Operation{ + Name: opGetResourceConfigHistory, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetResourceConfigHistoryInput{} + } + + req = c.newRequest(op, input, output) + output = &GetResourceConfigHistoryOutput{} + req.Data = output + return +} + +// Returns a list of configuration items for the specified resource. The list +// contains details about each state of the resource during the specified time +// interval. +// +// The response is paginated, and by default, AWS Config returns a limit of +// 10 configuration items per page. You can customize this number with the limit +// parameter. The response includes a nextToken string, and to get the next +// page of results, run the request again and enter this string for the nextToken +// parameter. +// +// Each call to the API is limited to span a duration of seven days. It is +// likely that the number of records returned is smaller than the specified +// limit. In such cases, you can make another call, using the nextToken. +func (c *ConfigService) GetResourceConfigHistory(input *GetResourceConfigHistoryInput) (*GetResourceConfigHistoryOutput, error) { + req, out := c.GetResourceConfigHistoryRequest(input) + err := req.Send() + return out, err +} + +func (c *ConfigService) GetResourceConfigHistoryPages(input *GetResourceConfigHistoryInput, fn func(p *GetResourceConfigHistoryOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetResourceConfigHistoryRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetResourceConfigHistoryOutput), lastPage) + }) +} + +const opListDiscoveredResources = "ListDiscoveredResources" + +// ListDiscoveredResourcesRequest generates a request for the ListDiscoveredResources operation. +func (c *ConfigService) ListDiscoveredResourcesRequest(input *ListDiscoveredResourcesInput) (req *request.Request, output *ListDiscoveredResourcesOutput) { + op := &request.Operation{ + Name: opListDiscoveredResources, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListDiscoveredResourcesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDiscoveredResourcesOutput{} + req.Data = output + return +} + +// Accepts a resource type and returns a list of resource identifiers for the +// resources of that type. A resource identifier includes the resource type, +// ID, and (if available) the custom resource name. The results consist of resources +// that AWS Config has discovered, including those that AWS Config is not currently +// recording. You can narrow the results to include only resources that have +// specific resource IDs or a resource name. +// +// You can specify either resource IDs or a resource name but not both in the +// same request. The response is paginated, and by default AWS Config lists +// 100 resource identifiers on each page. You can customize this number with +// the limit parameter. The response includes a nextToken string, and to get +// the next page of results, run the request again and enter this string for +// the nextToken parameter. +func (c *ConfigService) ListDiscoveredResources(input *ListDiscoveredResourcesInput) (*ListDiscoveredResourcesOutput, error) { + req, out := c.ListDiscoveredResourcesRequest(input) + err := req.Send() + return out, err +} + +const opPutConfigRule = "PutConfigRule" + +// PutConfigRuleRequest generates a request for the PutConfigRule operation. +func (c *ConfigService) PutConfigRuleRequest(input *PutConfigRuleInput) (req *request.Request, output *PutConfigRuleOutput) { + op := &request.Operation{ + Name: opPutConfigRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutConfigRuleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutConfigRuleOutput{} + req.Data = output + return +} + +// Adds or updates an AWS Config rule for evaluating whether your AWS resources +// comply with your desired configurations. +// +// You can use this action for customer managed Config rules and AWS managed +// Config rules. A customer managed Config rule is a custom rule that you develop +// and maintain. An AWS managed Config rule is a customizable, predefined rule +// that is provided by AWS Config. +// +// If you are adding a new customer managed Config rule, you must first create +// the AWS Lambda function that the rule invokes to evaluate your resources. +// When you use the PutConfigRule action to add the rule to AWS Config, you +// must specify the Amazon Resource Name (ARN) that AWS Lambda assigns to the +// function. Specify the ARN for the SourceIdentifier key. This key is part +// of the Source object, which is part of the ConfigRule object. +// +// If you are adding a new AWS managed Config rule, specify the rule's identifier +// for the SourceIdentifier key. To reference AWS managed Config rule identifiers, +// see Using AWS Managed Config Rules (http://docs.aws.amazon.com/config/latest/developerguide/evaluate-config_use-managed-rules.html). +// +// For any new rule that you add, specify the ConfigRuleName in the ConfigRule +// object. Do not specify the ConfigRuleArn or the ConfigRuleId. These values +// are generated by AWS Config for new rules. +// +// If you are updating a rule that you have added previously, specify the rule's +// ConfigRuleName, ConfigRuleId, or ConfigRuleArn in the ConfigRule data type +// that you use in this request. +// +// The maximum number of rules that AWS Config supports is 25. +// +// For more information about developing and using AWS Config rules, see Evaluating +// AWS Resource Configurations with AWS Config (http://docs.aws.amazon.com/config/latest/developerguide/evaluate-config.html) +// in the AWS Config Developer Guide. +func (c *ConfigService) PutConfigRule(input *PutConfigRuleInput) (*PutConfigRuleOutput, error) { + req, out := c.PutConfigRuleRequest(input) + err := req.Send() + return out, err +} + +const opPutConfigurationRecorder = "PutConfigurationRecorder" + +// PutConfigurationRecorderRequest generates a request for the PutConfigurationRecorder operation. +func (c *ConfigService) PutConfigurationRecorderRequest(input *PutConfigurationRecorderInput) (req *request.Request, output *PutConfigurationRecorderOutput) { + op := &request.Operation{ + Name: opPutConfigurationRecorder, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutConfigurationRecorderInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutConfigurationRecorderOutput{} + req.Data = output + return +} + +// Creates a new configuration recorder to record the selected resource configurations. +// +// You can use this action to change the role roleARN and/or the recordingGroup +// of an existing recorder. To change the role, call the action on the existing +// configuration recorder and specify a role. +// +// Currently, you can specify only one configuration recorder per account. +// +// If ConfigurationRecorder does not have the recordingGroup parameter specified, +// the default is to record all supported resource types. +func (c *ConfigService) PutConfigurationRecorder(input *PutConfigurationRecorderInput) (*PutConfigurationRecorderOutput, error) { + req, out := c.PutConfigurationRecorderRequest(input) + err := req.Send() + return out, err +} + +const opPutDeliveryChannel = "PutDeliveryChannel" + +// PutDeliveryChannelRequest generates a request for the PutDeliveryChannel operation. +func (c *ConfigService) PutDeliveryChannelRequest(input *PutDeliveryChannelInput) (req *request.Request, output *PutDeliveryChannelOutput) { + op := &request.Operation{ + Name: opPutDeliveryChannel, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutDeliveryChannelInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutDeliveryChannelOutput{} + req.Data = output + return +} + +// Creates a new delivery channel object to deliver the configuration information +// to an Amazon S3 bucket, and to an Amazon SNS topic. +// +// You can use this action to change the Amazon S3 bucket or an Amazon SNS +// topic of the existing delivery channel. To change the Amazon S3 bucket or +// an Amazon SNS topic, call this action and specify the changed values for +// the S3 bucket and the SNS topic. If you specify a different value for either +// the S3 bucket or the SNS topic, this action will keep the existing value +// for the parameter that is not changed. +// +// Currently, you can specify only one delivery channel per account. +func (c *ConfigService) PutDeliveryChannel(input *PutDeliveryChannelInput) (*PutDeliveryChannelOutput, error) { + req, out := c.PutDeliveryChannelRequest(input) + err := req.Send() + return out, err +} + +const opPutEvaluations = "PutEvaluations" + +// PutEvaluationsRequest generates a request for the PutEvaluations operation. +func (c *ConfigService) PutEvaluationsRequest(input *PutEvaluationsInput) (req *request.Request, output *PutEvaluationsOutput) { + op := &request.Operation{ + Name: opPutEvaluations, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutEvaluationsInput{} + } + + req = c.newRequest(op, input, output) + output = &PutEvaluationsOutput{} + req.Data = output + return +} + +// Used by an AWS Lambda function to deliver evaluation results to AWS Config. +// This action is required in every AWS Lambda function that is invoked by an +// AWS Config rule. +func (c *ConfigService) PutEvaluations(input *PutEvaluationsInput) (*PutEvaluationsOutput, error) { + req, out := c.PutEvaluationsRequest(input) + err := req.Send() + return out, err +} + +const opStartConfigurationRecorder = "StartConfigurationRecorder" + +// StartConfigurationRecorderRequest generates a request for the StartConfigurationRecorder operation. +func (c *ConfigService) StartConfigurationRecorderRequest(input *StartConfigurationRecorderInput) (req *request.Request, output *StartConfigurationRecorderOutput) { + op := &request.Operation{ + Name: opStartConfigurationRecorder, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartConfigurationRecorderInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &StartConfigurationRecorderOutput{} + req.Data = output + return +} + +// Starts recording configurations of the AWS resources you have selected to +// record in your AWS account. +// +// You must have created at least one delivery channel to successfully start +// the configuration recorder. +func (c *ConfigService) StartConfigurationRecorder(input *StartConfigurationRecorderInput) (*StartConfigurationRecorderOutput, error) { + req, out := c.StartConfigurationRecorderRequest(input) + err := req.Send() + return out, err +} + +const opStopConfigurationRecorder = "StopConfigurationRecorder" + +// StopConfigurationRecorderRequest generates a request for the StopConfigurationRecorder operation. +func (c *ConfigService) StopConfigurationRecorderRequest(input *StopConfigurationRecorderInput) (req *request.Request, output *StopConfigurationRecorderOutput) { + op := &request.Operation{ + Name: opStopConfigurationRecorder, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopConfigurationRecorderInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &StopConfigurationRecorderOutput{} + req.Data = output + return +} + +// Stops recording configurations of the AWS resources you have selected to +// record in your AWS account. +func (c *ConfigService) StopConfigurationRecorder(input *StopConfigurationRecorderInput) (*StopConfigurationRecorderOutput, error) { + req, out := c.StopConfigurationRecorderRequest(input) + err := req.Send() + return out, err +} + +// Indicates whether an AWS resource or AWS Config rule is compliant and provides +// the number of contributors that affect the compliance. +type Compliance struct { + _ struct{} `type:"structure"` + + // The number of AWS resources or AWS Config rules that cause a result of NON_COMPLIANT, + // up to a maximum of 25. + ComplianceContributorCount *ComplianceContributorCount `type:"structure"` + + // Indicates whether an AWS resource or AWS Config rule is compliant. + // + // A resource is compliant if it complies with all of the AWS Config rules + // that evaluate it, and it is noncompliant if it does not comply with one or + // more of these rules. + // + // A rule is compliant if all of the resources that the rule evaluates comply + // with it, and it is noncompliant if any of these resources do not comply. + ComplianceType *string `type:"string" enum:"ComplianceType"` +} + +// String returns the string representation +func (s Compliance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Compliance) GoString() string { + return s.String() +} + +// Indicates whether an AWS Config rule is compliant. A rule is compliant if +// all of the resources that the rule evaluated comply with it, and it is noncompliant +// if any of these resources do not comply. +type ComplianceByConfigRule struct { + _ struct{} `type:"structure"` + + // Indicates whether the AWS Config rule is compliant. + Compliance *Compliance `type:"structure"` + + // The name of the AWS Config rule. + ConfigRuleName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ComplianceByConfigRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComplianceByConfigRule) GoString() string { + return s.String() +} + +// Indicates whether an AWS resource that is evaluated according to one or more +// AWS Config rules is compliant. A resource is compliant if it complies with +// all of the rules that evaluate it, and it is noncompliant if it does not +// comply with one or more of these rules. +type ComplianceByResource struct { + _ struct{} `type:"structure"` + + // Indicates whether the AWS resource complies with all of the AWS Config rules + // that evaluated it. + Compliance *Compliance `type:"structure"` + + // The ID of the AWS resource that was evaluated. + ResourceId *string `min:"1" type:"string"` + + // The type of the AWS resource that was evaluated. + ResourceType *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ComplianceByResource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComplianceByResource) GoString() string { + return s.String() +} + +// The number of AWS resources or AWS Config rules responsible for the current +// compliance of the item, up to a maximum number. +type ComplianceContributorCount struct { + _ struct{} `type:"structure"` + + // Indicates whether the maximum count is reached. + CapExceeded *bool `type:"boolean"` + + // The number of AWS resources or AWS Config rules responsible for the current + // compliance of the item. + CappedCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s ComplianceContributorCount) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComplianceContributorCount) GoString() string { + return s.String() +} + +// The number of AWS Config rules or AWS resources that are compliant and noncompliant, +// up to a maximum. +type ComplianceSummary struct { + _ struct{} `type:"structure"` + + // The time that AWS Config created the compliance summary. + ComplianceSummaryTimestamp *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The number of AWS Config rules or AWS resources that are compliant, up to + // a maximum of 25 for rules and 100 for resources. + CompliantResourceCount *ComplianceContributorCount `type:"structure"` + + // The number of AWS Config rules or AWS resources that are noncompliant, up + // to a maximum of 25 for rules and 100 for resources. + NonCompliantResourceCount *ComplianceContributorCount `type:"structure"` +} + +// String returns the string representation +func (s ComplianceSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComplianceSummary) GoString() string { + return s.String() +} + +// The number of AWS resources of a specific type that are compliant or noncompliant, +// up to a maximum of 100 for each compliance. +type ComplianceSummaryByResourceType struct { + _ struct{} `type:"structure"` + + // The number of AWS resources that are compliant or noncompliant, up to a maximum + // of 100 for each compliance. + ComplianceSummary *ComplianceSummary `type:"structure"` + + // The type of AWS resource. + ResourceType *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ComplianceSummaryByResourceType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComplianceSummaryByResourceType) GoString() string { + return s.String() +} + +// A list that contains the status of the delivery of either the snapshot or +// the configuration history to the specified Amazon S3 bucket. +type ConfigExportDeliveryInfo struct { + _ struct{} `type:"structure"` + + // The time of the last attempted delivery. + LastAttemptTime *time.Time `locationName:"lastAttemptTime" type:"timestamp" timestampFormat:"unix"` + + // The error code from the last attempted delivery. + LastErrorCode *string `locationName:"lastErrorCode" type:"string"` + + // The error message from the last attempted delivery. + LastErrorMessage *string `locationName:"lastErrorMessage" type:"string"` + + // Status of the last attempted delivery. + LastStatus *string `locationName:"lastStatus" type:"string" enum:"DeliveryStatus"` + + // The time of the last successful delivery. + LastSuccessfulTime *time.Time `locationName:"lastSuccessfulTime" type:"timestamp" timestampFormat:"unix"` + + // The time that the next delivery occurs. + NextDeliveryTime *time.Time `locationName:"nextDeliveryTime" type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s ConfigExportDeliveryInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigExportDeliveryInfo) GoString() string { + return s.String() +} + +// An AWS Lambda function that evaluates configuration items to assess whether +// your AWS resources comply with your desired configurations. This function +// can run when AWS Config detects a configuration change or delivers a configuration +// snapshot. This function can evaluate any resource in the recording group. +// To define which of these are evaluated, specify a value for the Scope key. +// +// For more information about developing and using AWS Config rules, see Evaluating +// AWS Resource Configurations with AWS Config (http://docs.aws.amazon.com/config/latest/developerguide/evaluate-config.html) +// in the AWS Config Developer Guide. +type ConfigRule struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the AWS Config rule. + ConfigRuleArn *string `type:"string"` + + // The ID of the AWS Config rule. + ConfigRuleId *string `type:"string"` + + // The name that you assign to the AWS Config rule. The name is required if + // you are adding a new rule. + ConfigRuleName *string `min:"1" type:"string"` + + // Indicates whether the AWS Config rule is active or currently being deleted + // by AWS Config. + // + // AWS Config sets the state of a rule to DELETING temporarily after you use + // the DeleteConfigRule request to delete the rule. After AWS Config finishes + // deleting a rule, the rule and all of its evaluations are erased and no longer + // available. + // + // You cannot add a rule to AWS Config that has the state set to DELETING. + // If you want to delete a rule, you must use the DeleteConfigRule request. + ConfigRuleState *string `type:"string" enum:"ConfigRuleState"` + + // The description that you provide for the AWS Config rule. + Description *string `type:"string"` + + // A string in JSON format that is passed to the AWS Config rule Lambda function. + InputParameters *string `min:"1" type:"string"` + + // The maximum frequency at which the AWS Config rule runs evaluations. + // + // If your rule is periodic, meaning it runs an evaluation when AWS Config + // delivers a configuration snapshot, then it cannot run evaluations more frequently + // than AWS Config delivers the snapshots. For periodic rules, set the value + // of the MaximumExecutionFrequency key to be equal to or greater than the value + // of the deliveryFrequency key, which is part of ConfigSnapshotDeliveryProperties. + // To update the frequency with which AWS Config delivers your snapshots, use + // the PutDeliveryChannel action. + MaximumExecutionFrequency *string `type:"string" enum:"MaximumExecutionFrequency"` + + // Defines which resources the AWS Config rule evaluates. The scope can include + // one or more resource types, a combination of a tag key and value, or a combination + // of one resource type and one or more resource IDs. Specify a scope to constrain + // the resources that are evaluated. If you do not specify a scope, the AWS + // Config Rule evaluates all resources in the recording group. + Scope *Scope `type:"structure"` + + // Provides the rule owner (AWS or customer), the rule identifier, and the events + // that cause the function to evaluate your AWS resources. + Source *Source `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ConfigRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigRule) GoString() string { + return s.String() +} + +// Status information for your AWS managed Config rules. The status includes +// information such as the last time the rule ran, the last time it failed, +// and the related error for the last failure. +// +// This action does not return status information about customer managed Config +// rules. +type ConfigRuleEvaluationStatus struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the AWS Config rule. + ConfigRuleArn *string `type:"string"` + + // The ID of the AWS Config rule. + ConfigRuleId *string `type:"string"` + + // The name of the AWS Config rule. + ConfigRuleName *string `min:"1" type:"string"` + + // The time that you first activated the AWS Config rule. + FirstActivatedTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Indicates whether AWS Config has evaluated your resources against the rule + // at least once. + // + // true - AWS Config has evaluated your AWS resources against the rule at + // least once. false - AWS Config has not once finished evaluating your AWS + // resources against the rule. + FirstEvaluationStarted *bool `type:"boolean"` + + // The error code that AWS Config returned when the rule last failed. + LastErrorCode *string `type:"string"` + + // The error message that AWS Config returned when the rule last failed. + LastErrorMessage *string `type:"string"` + + // The time that AWS Config last failed to evaluate your AWS resources against + // the rule. + LastFailedEvaluationTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The time that AWS Config last failed to invoke the AWS Config rule to evaluate + // your AWS resources. + LastFailedInvocationTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The time that AWS Config last successfully evaluated your AWS resources against + // the rule. + LastSuccessfulEvaluationTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The time that AWS Config last successfully invoked the AWS Config rule to + // evaluate your AWS resources. + LastSuccessfulInvocationTime *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s ConfigRuleEvaluationStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigRuleEvaluationStatus) GoString() string { + return s.String() +} + +// Options for how AWS Config delivers configuration snapshots to the Amazon +// S3 bucket in your delivery channel. +type ConfigSnapshotDeliveryProperties struct { + _ struct{} `type:"structure"` + + // The frequency with which a AWS Config recurringly delivers configuration + // snapshots. + DeliveryFrequency *string `locationName:"deliveryFrequency" type:"string" enum:"MaximumExecutionFrequency"` +} + +// String returns the string representation +func (s ConfigSnapshotDeliveryProperties) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigSnapshotDeliveryProperties) GoString() string { + return s.String() +} + +// A list that contains the status of the delivery of the configuration stream +// notification to the Amazon SNS topic. +type ConfigStreamDeliveryInfo struct { + _ struct{} `type:"structure"` + + // The error code from the last attempted delivery. + LastErrorCode *string `locationName:"lastErrorCode" type:"string"` + + // The error message from the last attempted delivery. + LastErrorMessage *string `locationName:"lastErrorMessage" type:"string"` + + // Status of the last attempted delivery. + // + // Note Providing an SNS topic on a DeliveryChannel (http://docs.aws.amazon.com/config/latest/APIReference/API_DeliveryChannel.html) + // for AWS Config is optional. If the SNS delivery is turned off, the last status + // will be Not_Applicable. + LastStatus *string `locationName:"lastStatus" type:"string" enum:"DeliveryStatus"` + + // The time from the last status change. + LastStatusChangeTime *time.Time `locationName:"lastStatusChangeTime" type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s ConfigStreamDeliveryInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigStreamDeliveryInfo) GoString() string { + return s.String() +} + +// A list that contains detailed configurations of a specified resource. +// +// Currently, the list does not contain information about non-AWS components +// (for example, applications on your Amazon EC2 instances). +type ConfigurationItem struct { + _ struct{} `type:"structure"` + + // The 12 digit AWS account ID associated with the resource. + AccountId *string `locationName:"accountId" type:"string"` + + // The Amazon Resource Name (ARN) of the resource. + Arn *string `locationName:"arn" type:"string"` + + // The Availability Zone associated with the resource. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The region where the resource resides. + AwsRegion *string `locationName:"awsRegion" type:"string"` + + // The description of the resource configuration. + Configuration *string `locationName:"configuration" type:"string"` + + // The time when the configuration recording was initiated. + ConfigurationItemCaptureTime *time.Time `locationName:"configurationItemCaptureTime" type:"timestamp" timestampFormat:"unix"` + + // Unique MD5 hash that represents the configuration item's state. + // + // You can use MD5 hash to compare the states of two or more configuration + // items that are associated with the same resource. + ConfigurationItemMD5Hash *string `locationName:"configurationItemMD5Hash" type:"string"` + + // The configuration item status. + ConfigurationItemStatus *string `locationName:"configurationItemStatus" type:"string" enum:"ConfigurationItemStatus"` + + // An identifier that indicates the ordering of the configuration items of a + // resource. + ConfigurationStateId *string `locationName:"configurationStateId" type:"string"` + + // A list of CloudTrail event IDs. + // + // A populated field indicates that the current configuration was initiated + // by the events recorded in the CloudTrail log. For more information about + // CloudTrail, see What is AWS CloudTrail? (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). + // + // An empty field indicates that the current configuration was not initiated + // by any event. + RelatedEvents []*string `locationName:"relatedEvents" type:"list"` + + // A list of related AWS resources. + Relationships []*Relationship `locationName:"relationships" type:"list"` + + // The time stamp when the resource was created. + ResourceCreationTime *time.Time `locationName:"resourceCreationTime" type:"timestamp" timestampFormat:"unix"` + + // The ID of the resource (for example., sg-xxxxxx). + ResourceId *string `locationName:"resourceId" type:"string"` + + // The custom name of the resource, if available. + ResourceName *string `locationName:"resourceName" type:"string"` + + // The type of AWS resource. + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` + + // A mapping of key value tags associated with the resource. + Tags map[string]*string `locationName:"tags" type:"map"` + + // The version number of the resource configuration. + Version *string `locationName:"version" type:"string"` +} + +// String returns the string representation +func (s ConfigurationItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigurationItem) GoString() string { + return s.String() +} + +// An object that represents the recording of configuration changes of an AWS +// resource. +type ConfigurationRecorder struct { + _ struct{} `type:"structure"` + + // The name of the recorder. By default, AWS Config automatically assigns the + // name "default" when creating the configuration recorder. You cannot change + // the assigned name. + Name *string `locationName:"name" min:"1" type:"string"` + + // Specifies the types of AWS resource for which AWS Config records configuration + // changes. + RecordingGroup *RecordingGroup `locationName:"recordingGroup" type:"structure"` + + // Amazon Resource Name (ARN) of the IAM role used to describe the AWS resources + // associated with the account. + RoleARN *string `locationName:"roleARN" type:"string"` +} + +// String returns the string representation +func (s ConfigurationRecorder) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigurationRecorder) GoString() string { + return s.String() +} + +// The current status of the configuration recorder. +type ConfigurationRecorderStatus struct { + _ struct{} `type:"structure"` + + // The error code indicating that the recording failed. + LastErrorCode *string `locationName:"lastErrorCode" type:"string"` + + // The message indicating that the recording failed due to an error. + LastErrorMessage *string `locationName:"lastErrorMessage" type:"string"` + + // The time the recorder was last started. + LastStartTime *time.Time `locationName:"lastStartTime" type:"timestamp" timestampFormat:"unix"` + + // The last (previous) status of the recorder. + LastStatus *string `locationName:"lastStatus" type:"string" enum:"RecorderStatus"` + + // The time when the status was last changed. + LastStatusChangeTime *time.Time `locationName:"lastStatusChangeTime" type:"timestamp" timestampFormat:"unix"` + + // The time the recorder was last stopped. + LastStopTime *time.Time `locationName:"lastStopTime" type:"timestamp" timestampFormat:"unix"` + + // The name of the configuration recorder. + Name *string `locationName:"name" type:"string"` + + // Specifies whether the recorder is currently recording or not. + Recording *bool `locationName:"recording" type:"boolean"` +} + +// String returns the string representation +func (s ConfigurationRecorderStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigurationRecorderStatus) GoString() string { + return s.String() +} + +type DeleteConfigRuleInput struct { + _ struct{} `type:"structure"` + + // The name of the AWS Config rule that you want to delete. + ConfigRuleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteConfigRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteConfigRuleInput) GoString() string { + return s.String() +} + +type DeleteConfigRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteConfigRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteConfigRuleOutput) GoString() string { + return s.String() +} + +// The input for the DeleteDeliveryChannel action. The action accepts the following +// data in JSON format. +type DeleteDeliveryChannelInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery channel to delete. + DeliveryChannelName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDeliveryChannelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDeliveryChannelInput) GoString() string { + return s.String() +} + +type DeleteDeliveryChannelOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDeliveryChannelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDeliveryChannelOutput) GoString() string { + return s.String() +} + +// The input for the DeliverConfigSnapshot action. +type DeliverConfigSnapshotInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery channel through which the snapshot is delivered. + DeliveryChannelName *string `locationName:"deliveryChannelName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeliverConfigSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeliverConfigSnapshotInput) GoString() string { + return s.String() +} + +// The output for the DeliverConfigSnapshot action in JSON format. +type DeliverConfigSnapshotOutput struct { + _ struct{} `type:"structure"` + + // The ID of the snapshot that is being created. + ConfigSnapshotId *string `locationName:"configSnapshotId" type:"string"` +} + +// String returns the string representation +func (s DeliverConfigSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeliverConfigSnapshotOutput) GoString() string { + return s.String() +} + +// A logical container used for storing the configuration changes of an AWS +// resource. +type DeliveryChannel struct { + _ struct{} `type:"structure"` + + // Options for how AWS Config delivers configuration snapshots to the Amazon + // S3 bucket in your delivery channel. + ConfigSnapshotDeliveryProperties *ConfigSnapshotDeliveryProperties `locationName:"configSnapshotDeliveryProperties" type:"structure"` + + // The name of the delivery channel. By default, AWS Config automatically assigns + // the name "default" when creating the delivery channel. You cannot change + // the assigned name. + Name *string `locationName:"name" min:"1" type:"string"` + + // The name of the Amazon S3 bucket used to store configuration history for + // the delivery channel. + S3BucketName *string `locationName:"s3BucketName" type:"string"` + + // The prefix for the specified Amazon S3 bucket. + S3KeyPrefix *string `locationName:"s3KeyPrefix" type:"string"` + + // The Amazon Resource Name (ARN) of the SNS topic that AWS Config delivers + // notifications to. + SnsTopicARN *string `locationName:"snsTopicARN" type:"string"` +} + +// String returns the string representation +func (s DeliveryChannel) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeliveryChannel) GoString() string { + return s.String() +} + +// The status of a specified delivery channel. +// +// Valid values: Success | Failure +type DeliveryChannelStatus struct { + _ struct{} `type:"structure"` + + // A list that contains the status of the delivery of the configuration history + // to the specified Amazon S3 bucket. + ConfigHistoryDeliveryInfo *ConfigExportDeliveryInfo `locationName:"configHistoryDeliveryInfo" type:"structure"` + + // A list containing the status of the delivery of the snapshot to the specified + // Amazon S3 bucket. + ConfigSnapshotDeliveryInfo *ConfigExportDeliveryInfo `locationName:"configSnapshotDeliveryInfo" type:"structure"` + + // A list containing the status of the delivery of the configuration stream + // notification to the specified Amazon SNS topic. + ConfigStreamDeliveryInfo *ConfigStreamDeliveryInfo `locationName:"configStreamDeliveryInfo" type:"structure"` + + // The name of the delivery channel. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s DeliveryChannelStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeliveryChannelStatus) GoString() string { + return s.String() +} + +type DescribeComplianceByConfigRuleInput struct { + _ struct{} `type:"structure"` + + // Filters the results by compliance. The valid values are Compliant and NonCompliant. + ComplianceTypes []*string `type:"list"` + + // Specify one or more AWS Config rule names to filter the results by rule. + ConfigRuleNames []*string `type:"list"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeComplianceByConfigRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeComplianceByConfigRuleInput) GoString() string { + return s.String() +} + +type DescribeComplianceByConfigRuleOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether each of the specified AWS Config rules is compliant. + ComplianceByConfigRules []*ComplianceByConfigRule `type:"list"` + + // The string that you use in a subsequent request to get the next page of results + // in a paginated response. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeComplianceByConfigRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeComplianceByConfigRuleOutput) GoString() string { + return s.String() +} + +type DescribeComplianceByResourceInput struct { + _ struct{} `type:"structure"` + + // Filters the results by compliance. The valid values are Compliant and NonCompliant. + ComplianceTypes []*string `type:"list"` + + // The maximum number of evaluation results returned on each page. The default + // is 10. You cannot specify a limit greater than 100. If you specify 0, AWS + // Config uses the default. + Limit *int64 `type:"integer"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` + + // The ID of the AWS resource for which you want compliance information. You + // can specify only one resource ID. If you specify a resource ID, you must + // also specify a type for ResourceType. + ResourceId *string `min:"1" type:"string"` + + // The types of AWS resources for which you want compliance information; for + // example, AWS::EC2::Instance. For this action, you can specify that the resource + // type is an AWS account by specifying AWS::::Account. + ResourceType *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeComplianceByResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeComplianceByResourceInput) GoString() string { + return s.String() +} + +type DescribeComplianceByResourceOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the specified AWS resource complies with all of the AWS + // Config rules that evaluate it. + ComplianceByResources []*ComplianceByResource `type:"list"` + + // The string that you use in a subsequent request to get the next page of results + // in a paginated response. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeComplianceByResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeComplianceByResourceOutput) GoString() string { + return s.String() +} + +type DescribeConfigRuleEvaluationStatusInput struct { + _ struct{} `type:"structure"` + + // The name of the AWS managed Config rules for which you want status information. + // If you do not specify any names, AWS Config returns status information for + // all AWS managed Config rules that you use. + ConfigRuleNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeConfigRuleEvaluationStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigRuleEvaluationStatusInput) GoString() string { + return s.String() +} + +type DescribeConfigRuleEvaluationStatusOutput struct { + _ struct{} `type:"structure"` + + // Status information about your AWS managed Config rules. + ConfigRulesEvaluationStatus []*ConfigRuleEvaluationStatus `type:"list"` +} + +// String returns the string representation +func (s DescribeConfigRuleEvaluationStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigRuleEvaluationStatusOutput) GoString() string { + return s.String() +} + +type DescribeConfigRulesInput struct { + _ struct{} `type:"structure"` + + // The names of the AWS Config rules for which you want details. If you do not + // specify any names, AWS Config returns details for all your rules. + ConfigRuleNames []*string `type:"list"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeConfigRulesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigRulesInput) GoString() string { + return s.String() +} + +type DescribeConfigRulesOutput struct { + _ struct{} `type:"structure"` + + // The details about your AWS Config rules. + ConfigRules []*ConfigRule `type:"list"` + + // The string that you use in a subsequent request to get the next page of results + // in a paginated response. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeConfigRulesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigRulesOutput) GoString() string { + return s.String() +} + +// The input for the DescribeConfigurationRecorderStatus action. +type DescribeConfigurationRecorderStatusInput struct { + _ struct{} `type:"structure"` + + // The name(s) of the configuration recorder. If the name is not specified, + // the action returns the current status of all the configuration recorders + // associated with the account. + ConfigurationRecorderNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeConfigurationRecorderStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigurationRecorderStatusInput) GoString() string { + return s.String() +} + +// The output for the DescribeConfigurationRecorderStatus action in JSON format. +type DescribeConfigurationRecorderStatusOutput struct { + _ struct{} `type:"structure"` + + // A list that contains status of the specified recorders. + ConfigurationRecordersStatus []*ConfigurationRecorderStatus `type:"list"` +} + +// String returns the string representation +func (s DescribeConfigurationRecorderStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigurationRecorderStatusOutput) GoString() string { + return s.String() +} + +// The input for the DescribeConfigurationRecorders action. +type DescribeConfigurationRecordersInput struct { + _ struct{} `type:"structure"` + + // A list of configuration recorder names. + ConfigurationRecorderNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeConfigurationRecordersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigurationRecordersInput) GoString() string { + return s.String() +} + +// The output for the DescribeConfigurationRecorders action. +type DescribeConfigurationRecordersOutput struct { + _ struct{} `type:"structure"` + + // A list that contains the descriptions of the specified configuration recorders. + ConfigurationRecorders []*ConfigurationRecorder `type:"list"` +} + +// String returns the string representation +func (s DescribeConfigurationRecordersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigurationRecordersOutput) GoString() string { + return s.String() +} + +// The input for the DeliveryChannelStatus action. +type DescribeDeliveryChannelStatusInput struct { + _ struct{} `type:"structure"` + + // A list of delivery channel names. + DeliveryChannelNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeDeliveryChannelStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDeliveryChannelStatusInput) GoString() string { + return s.String() +} + +// The output for the DescribeDeliveryChannelStatus action. +type DescribeDeliveryChannelStatusOutput struct { + _ struct{} `type:"structure"` + + // A list that contains the status of a specified delivery channel. + DeliveryChannelsStatus []*DeliveryChannelStatus `type:"list"` +} + +// String returns the string representation +func (s DescribeDeliveryChannelStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDeliveryChannelStatusOutput) GoString() string { + return s.String() +} + +// The input for the DescribeDeliveryChannels action. +type DescribeDeliveryChannelsInput struct { + _ struct{} `type:"structure"` + + // A list of delivery channel names. + DeliveryChannelNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeDeliveryChannelsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDeliveryChannelsInput) GoString() string { + return s.String() +} + +// The output for the DescribeDeliveryChannels action. +type DescribeDeliveryChannelsOutput struct { + _ struct{} `type:"structure"` + + // A list that contains the descriptions of the specified delivery channel. + DeliveryChannels []*DeliveryChannel `type:"list"` +} + +// String returns the string representation +func (s DescribeDeliveryChannelsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDeliveryChannelsOutput) GoString() string { + return s.String() +} + +// Identifies an AWS resource and indicates whether it complies with the AWS +// Config rule that it was evaluated against. +type Evaluation struct { + _ struct{} `type:"structure"` + + // Supplementary information about how the evaluation determined the compliance. + Annotation *string `min:"1" type:"string"` + + // The ID of the AWS resource that was evaluated. + ComplianceResourceId *string `min:"1" type:"string" required:"true"` + + // The type of AWS resource that was evaluated. + ComplianceResourceType *string `min:"1" type:"string" required:"true"` + + // Indicates whether the AWS resource complies with the AWS Config rule that + // it was evaluated against. + ComplianceType *string `type:"string" required:"true" enum:"ComplianceType"` + + // The time of the event in AWS Config that triggered the evaluation. For event-based + // evaluations, the time indicates when AWS Config created the configuration + // item that triggered the evaluation. For periodic evaluations, the time indicates + // when AWS Config delivered the configuration snapshot that triggered the evaluation. + OrderingTimestamp *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` +} + +// String returns the string representation +func (s Evaluation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Evaluation) GoString() string { + return s.String() +} + +// The details of an AWS Config evaluation. Provides the AWS resource that was +// evaluated, the compliance of the resource, related timestamps, and supplementary +// information. +type EvaluationResult struct { + _ struct{} `type:"structure"` + + // Supplementary information about how the evaluation determined the compliance. + Annotation *string `min:"1" type:"string"` + + // Indicates whether the AWS resource complies with the AWS Config rule that + // evaluated it. + ComplianceType *string `type:"string" enum:"ComplianceType"` + + // The time when the AWS Config rule evaluated the AWS resource. + ConfigRuleInvokedTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Uniquely identifies the evaluation result. + EvaluationResultIdentifier *EvaluationResultIdentifier `type:"structure"` + + // The time when AWS Config recorded the evaluation result. + ResultRecordedTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // An encrypted token that associates an evaluation with an AWS Config rule. + // The token identifies the rule, the AWS resource being evaluated, and the + // event that triggered the evaluation. + ResultToken *string `type:"string"` +} + +// String returns the string representation +func (s EvaluationResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EvaluationResult) GoString() string { + return s.String() +} + +// Uniquely identifies an evaluation result. +type EvaluationResultIdentifier struct { + _ struct{} `type:"structure"` + + // Identifies an AWS Config rule used to evaluate an AWS resource, and provides + // the type and ID of the evaluated resource. + EvaluationResultQualifier *EvaluationResultQualifier `type:"structure"` + + // The time of the event that triggered the evaluation of your AWS resources. + // The time can indicate when AWS Config delivered a configuration item change + // notification, or it can indicate when AWS Config delivered the configuration + // snapshot, depending on which event triggered the evaluation. + OrderingTimestamp *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s EvaluationResultIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EvaluationResultIdentifier) GoString() string { + return s.String() +} + +// Identifies an AWS Config rule that evaluated an AWS resource, and provides +// the type and ID of the resource that the rule evaluated. +type EvaluationResultQualifier struct { + _ struct{} `type:"structure"` + + // The name of the AWS Config rule that was used in the evaluation. + ConfigRuleName *string `min:"1" type:"string"` + + // The ID of the evaluated AWS resource. + ResourceId *string `min:"1" type:"string"` + + // The type of AWS resource that was evaluated. + ResourceType *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s EvaluationResultQualifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EvaluationResultQualifier) GoString() string { + return s.String() +} + +type GetComplianceDetailsByConfigRuleInput struct { + _ struct{} `type:"structure"` + + // Specify to filter the results by compliance. The valid values are Compliant, + // NonCompliant, and NotApplicable. + ComplianceTypes []*string `type:"list"` + + // The name of the AWS Config rule for which you want compliance information. + ConfigRuleName *string `min:"1" type:"string" required:"true"` + + // The maximum number of evaluation results returned on each page. The default + // is 10. You cannot specify a limit greater than 100. If you specify 0, AWS + // Config uses the default. + Limit *int64 `type:"integer"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s GetComplianceDetailsByConfigRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetComplianceDetailsByConfigRuleInput) GoString() string { + return s.String() +} + +type GetComplianceDetailsByConfigRuleOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the AWS resource complies with the specified AWS Config + // rule. + EvaluationResults []*EvaluationResult `type:"list"` + + // The string that you use in a subsequent request to get the next page of results + // in a paginated response. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s GetComplianceDetailsByConfigRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetComplianceDetailsByConfigRuleOutput) GoString() string { + return s.String() +} + +type GetComplianceDetailsByResourceInput struct { + _ struct{} `type:"structure"` + + // Specify to filter the results by compliance. The valid values are Compliant, + // NonCompliant, and NotApplicable. + ComplianceTypes []*string `type:"list"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` + + // The ID of the AWS resource for which you want compliance information. + ResourceId *string `min:"1" type:"string" required:"true"` + + // The type of the AWS resource for which you want compliance information. + ResourceType *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetComplianceDetailsByResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetComplianceDetailsByResourceInput) GoString() string { + return s.String() +} + +type GetComplianceDetailsByResourceOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the specified AWS resource complies each AWS Config rule. + EvaluationResults []*EvaluationResult `type:"list"` + + // The string that you use in a subsequent request to get the next page of results + // in a paginated response. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s GetComplianceDetailsByResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetComplianceDetailsByResourceOutput) GoString() string { + return s.String() +} + +type GetComplianceSummaryByConfigRuleInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetComplianceSummaryByConfigRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetComplianceSummaryByConfigRuleInput) GoString() string { + return s.String() +} + +type GetComplianceSummaryByConfigRuleOutput struct { + _ struct{} `type:"structure"` + + // The number of AWS Config rules that are compliant and the number that are + // noncompliant, up to a maximum of 25 for each. + ComplianceSummary *ComplianceSummary `type:"structure"` +} + +// String returns the string representation +func (s GetComplianceSummaryByConfigRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetComplianceSummaryByConfigRuleOutput) GoString() string { + return s.String() +} + +type GetComplianceSummaryByResourceTypeInput struct { + _ struct{} `type:"structure"` + + // Specify one or more resource types to get the number of resources that are + // compliant and the number that are noncompliant for each resource type. + // + // For this request, you can specify an AWS resource type such as AWS::EC2::Instance, + // and you can specify that the resource type is an AWS account by specifying + // AWS::::Account. + ResourceTypes []*string `type:"list"` +} + +// String returns the string representation +func (s GetComplianceSummaryByResourceTypeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetComplianceSummaryByResourceTypeInput) GoString() string { + return s.String() +} + +type GetComplianceSummaryByResourceTypeOutput struct { + _ struct{} `type:"structure"` + + // The number of resources that are compliant and the number that are noncompliant. + // If one or more resource types were provided with the request, the numbers + // are returned for each resource type. The maximum number returned is 100. + ComplianceSummariesByResourceType []*ComplianceSummaryByResourceType `type:"list"` +} + +// String returns the string representation +func (s GetComplianceSummaryByResourceTypeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetComplianceSummaryByResourceTypeOutput) GoString() string { + return s.String() +} + +// The input for the GetResourceConfigHistory action. +type GetResourceConfigHistoryInput struct { + _ struct{} `type:"structure"` + + // The chronological order for configuration items listed. By default the results + // are listed in reverse chronological order. + ChronologicalOrder *string `locationName:"chronologicalOrder" type:"string" enum:"ChronologicalOrder"` + + // The time stamp that indicates an earlier time. If not specified, the action + // returns paginated results that contain configuration items that start from + // when the first configuration item was recorded. + EarlierTime *time.Time `locationName:"earlierTime" type:"timestamp" timestampFormat:"unix"` + + // The time stamp that indicates a later time. If not specified, current time + // is taken. + LaterTime *time.Time `locationName:"laterTime" type:"timestamp" timestampFormat:"unix"` + + // The maximum number of configuration items returned on each page. The default + // is 10. You cannot specify a limit greater than 100. If you specify 0, AWS + // Config uses the default. + Limit *int64 `locationName:"limit" type:"integer"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `locationName:"nextToken" type:"string"` + + // The ID of the resource (for example., sg-xxxxxx). + ResourceId *string `locationName:"resourceId" type:"string" required:"true"` + + // The resource type. + ResourceType *string `locationName:"resourceType" type:"string" required:"true" enum:"ResourceType"` +} + +// String returns the string representation +func (s GetResourceConfigHistoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetResourceConfigHistoryInput) GoString() string { + return s.String() +} + +// The output for the GetResourceConfigHistory action. +type GetResourceConfigHistoryOutput struct { + _ struct{} `type:"structure"` + + // A list that contains the configuration history of one or more resources. + ConfigurationItems []*ConfigurationItem `locationName:"configurationItems" type:"list"` + + // The string that you use in a subsequent request to get the next page of results + // in a paginated response. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s GetResourceConfigHistoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetResourceConfigHistoryOutput) GoString() string { + return s.String() +} + +type ListDiscoveredResourcesInput struct { + _ struct{} `type:"structure"` + + // Specifies whether AWS Config includes deleted resources in the results. By + // default, deleted resources are not included. + IncludeDeletedResources *bool `locationName:"includeDeletedResources" type:"boolean"` + + // The maximum number of resource identifiers returned on each page. The default + // is 100. You cannot specify a limit greater than 100. If you specify 0, AWS + // Config uses the default. + Limit *int64 `locationName:"limit" type:"integer"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `locationName:"nextToken" type:"string"` + + // The IDs of only those resources that you want AWS Config to list in the response. + // If you do not specify this parameter, AWS Config lists all resources of the + // specified type that it has discovered. + ResourceIds []*string `locationName:"resourceIds" type:"list"` + + // The custom name of only those resources that you want AWS Config to list + // in the response. If you do not specify this parameter, AWS Config lists all + // resources of the specified type that it has discovered. + ResourceName *string `locationName:"resourceName" type:"string"` + + // The type of resources that you want AWS Config to list in the response. + ResourceType *string `locationName:"resourceType" type:"string" required:"true" enum:"ResourceType"` +} + +// String returns the string representation +func (s ListDiscoveredResourcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDiscoveredResourcesInput) GoString() string { + return s.String() +} + +type ListDiscoveredResourcesOutput struct { + _ struct{} `type:"structure"` + + // The string that you use in a subsequent request to get the next page of results + // in a paginated response. + NextToken *string `locationName:"nextToken" type:"string"` + + // The details that identify a resource that is discovered by AWS Config, including + // the resource type, ID, and (if available) the custom resource name. + ResourceIdentifiers []*ResourceIdentifier `locationName:"resourceIdentifiers" type:"list"` +} + +// String returns the string representation +func (s ListDiscoveredResourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDiscoveredResourcesOutput) GoString() string { + return s.String() +} + +type PutConfigRuleInput struct { + _ struct{} `type:"structure"` + + // An AWS Lambda function that evaluates configuration items to assess whether + // your AWS resources comply with your desired configurations. This function + // can run when AWS Config detects a configuration change or delivers a configuration + // snapshot. This function can evaluate any resource in the recording group. + // To define which of these are evaluated, specify a value for the Scope key. + // + // For more information about developing and using AWS Config rules, see Evaluating + // AWS Resource Configurations with AWS Config (http://docs.aws.amazon.com/config/latest/developerguide/evaluate-config.html) + // in the AWS Config Developer Guide. + ConfigRule *ConfigRule `type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutConfigRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutConfigRuleInput) GoString() string { + return s.String() +} + +type PutConfigRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutConfigRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutConfigRuleOutput) GoString() string { + return s.String() +} + +// The input for the PutConfigurationRecorder action. +type PutConfigurationRecorderInput struct { + _ struct{} `type:"structure"` + + // The configuration recorder object that records each configuration change + // made to the resources. + ConfigurationRecorder *ConfigurationRecorder `type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutConfigurationRecorderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutConfigurationRecorderInput) GoString() string { + return s.String() +} + +type PutConfigurationRecorderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutConfigurationRecorderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutConfigurationRecorderOutput) GoString() string { + return s.String() +} + +// The input for the PutDeliveryChannel action. +type PutDeliveryChannelInput struct { + _ struct{} `type:"structure"` + + // The configuration delivery channel object that delivers the configuration + // information to an Amazon S3 bucket, and to an Amazon SNS topic. + DeliveryChannel *DeliveryChannel `type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutDeliveryChannelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutDeliveryChannelInput) GoString() string { + return s.String() +} + +type PutDeliveryChannelOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutDeliveryChannelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutDeliveryChannelOutput) GoString() string { + return s.String() +} + +type PutEvaluationsInput struct { + _ struct{} `type:"structure"` + + // The assessments that the AWS Lambda function performs. Each evaluation identifies + // an AWS resource and indicates whether it complies with the AWS Config rule + // that invokes the AWS Lambda function. + Evaluations []*Evaluation `type:"list"` + + // An encrypted token that associates an evaluation with an AWS Config rule. + // Identifies the rule and the event that triggered the evaluation + ResultToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PutEvaluationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutEvaluationsInput) GoString() string { + return s.String() +} + +type PutEvaluationsOutput struct { + _ struct{} `type:"structure"` + + // Requests that failed because of a client or server error. + FailedEvaluations []*Evaluation `type:"list"` +} + +// String returns the string representation +func (s PutEvaluationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutEvaluationsOutput) GoString() string { + return s.String() +} + +// Specifies the types of AWS resource for which AWS Config records configuration +// changes. +// +// In the recording group, you specify whether all supported types or specific +// types of resources are recorded. +// +// By default, AWS Config records configuration changes for all supported types +// of regional resources that AWS Config discovers in the region in which it +// is running. Regional resources are tied to a region and can be used only +// in that region. Examples of regional resources are EC2 instances and EBS +// volumes. +// +// You can also have AWS Config record configuration changes for supported +// types of global resources. Global resources are not tied to an individual +// region and can be used in all regions. +// +// The configuration details for any global resource are the same in all regions. +// If you customize AWS Config in multiple regions to record global resources, +// it will create multiple configuration items each time a global resource changes: +// one configuration item for each region. These configuration items will contain +// identical data. To prevent duplicate configuration items, you should consider +// customizing AWS Config in only one region to record global resources, unless +// you want the configuration items to be available in multiple regions. If +// you don't want AWS Config to record all resources, you can specify which +// types of resources it will record with the resourceTypes parameter. +// +// For a list of supported resource types, see Supported resource types (http://docs.aws.amazon.com/config/latest/developerguide/resource-config-reference.html#supported-resources). +// +// For more information, see Selecting Which Resources AWS Config Records (http://docs.aws.amazon.com/config/latest/developerguide/select-resources.html). +type RecordingGroup struct { + _ struct{} `type:"structure"` + + // Specifies whether AWS Config records configuration changes for every supported + // type of regional resource. + // + // If you set this option to true, when AWS Config adds support for a new type + // of regional resource, it automatically starts recording resources of that + // type. + // + // If you set this option to true, you cannot enumerate a list of resourceTypes. + AllSupported *bool `locationName:"allSupported" type:"boolean"` + + // Specifies whether AWS Config includes all supported types of global resources + // with the resources that it records. + // + // Before you can set this option to true, you must set the allSupported option + // to true. + // + // If you set this option to true, when AWS Config adds support for a new type + // of global resource, it automatically starts recording resources of that type. + IncludeGlobalResourceTypes *bool `locationName:"includeGlobalResourceTypes" type:"boolean"` + + // A comma-separated list that specifies the types of AWS resources for which + // AWS Config records configuration changes (for example, AWS::EC2::Instance + // or AWS::CloudTrail::Trail). + // + // Before you can set this option to true, you must set the allSupported option + // to false. + // + // If you set this option to true, when AWS Config adds support for a new type + // of resource, it will not record resources of that type unless you manually + // add that type to your recording group. + // + // For a list of valid resourceTypes values, see the resourceType Value column + // in Supported AWS Resource Types (http://docs.aws.amazon.com/config/latest/developerguide/resource-config-reference.html#supported-resources). + ResourceTypes []*string `locationName:"resourceTypes" type:"list"` +} + +// String returns the string representation +func (s RecordingGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecordingGroup) GoString() string { + return s.String() +} + +// The relationship of the related resource to the main resource. +type Relationship struct { + _ struct{} `type:"structure"` + + // The type of relationship with the related resource. + RelationshipName *string `locationName:"relationshipName" type:"string"` + + // The ID of the related resource (for example, sg-xxxxxx). + ResourceId *string `locationName:"resourceId" type:"string"` + + // The custom name of the related resource, if available. + ResourceName *string `locationName:"resourceName" type:"string"` + + // The resource type of the related resource. + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` +} + +// String returns the string representation +func (s Relationship) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Relationship) GoString() string { + return s.String() +} + +// The details that identify a resource that is discovered by AWS Config, including +// the resource type, ID, and (if available) the custom resource name. +type ResourceIdentifier struct { + _ struct{} `type:"structure"` + + // The time that the resource was deleted. + ResourceDeletionTime *time.Time `locationName:"resourceDeletionTime" type:"timestamp" timestampFormat:"unix"` + + // The ID of the resource (for example., sg-xxxxxx). + ResourceId *string `locationName:"resourceId" type:"string"` + + // The custom name of the resource (if available). + ResourceName *string `locationName:"resourceName" type:"string"` + + // The type of resource. + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` +} + +// String returns the string representation +func (s ResourceIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceIdentifier) GoString() string { + return s.String() +} + +// Defines which resources AWS Config evaluates against a rule. The scope can +// include one or more resource types, a combination of a tag key and value, +// or a combination of one resource type and one or more resource IDs. Specify +// a scope to constrain the resources to be evaluated. If you do not specify +// a scope, all resources in your recording group are evaluated against the +// rule. +type Scope struct { + _ struct{} `type:"structure"` + + // The IDs of only those AWS resources that you want AWS Config to evaluate + // against the rule. If you specify a resource ID, you must specify one resource + // type for ComplianceResourceTypes. + ComplianceResourceId *string `min:"1" type:"string"` + + // The resource types of only those AWS resources that you want AWS Config to + // evaluate against the rule. You can specify only one type if you also specify + // resource IDs for ComplianceResourceId. + ComplianceResourceTypes []*string `type:"list"` + + // The tag key that is applied to only those AWS resources that you want AWS + // Config to evaluate against the rule. + TagKey *string `min:"1" type:"string"` + + // The tag value applied to only those AWS resources that you want AWS Config + // to evaluate against the rule. If you specify a value for TagValue, you must + // also specify a value for TagKey. + TagValue *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Scope) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Scope) GoString() string { + return s.String() +} + +// Provides the AWS Config rule owner (AWS or customer), the rule identifier, +// and the events that trigger the evaluation of your AWS resources. +type Source struct { + _ struct{} `type:"structure"` + + // Indicates whether AWS or the customer owns and manages the AWS Config rule. + Owner *string `type:"string" enum:"Owner"` + + // Provides the source and type of the event that causes AWS Config to evaluate + // your AWS resources. + SourceDetails []*SourceDetail `type:"list"` + + // For AWS managed Config rules, a pre-defined identifier from a list. To reference + // the list, see Using AWS Managed Config Rules (http://docs.aws.amazon.com/config/latest/developerguide/evaluate-config_use-managed-rules.html). + // + // For customer managed Config rules, the identifier is the Amazon Resource + // Name (ARN) of the rule's AWS Lambda function. + SourceIdentifier *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Source) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Source) GoString() string { + return s.String() +} + +// Provides the source and type of the event that triggers AWS Config to evaluate +// your AWS resources against a rule. +type SourceDetail struct { + _ struct{} `type:"structure"` + + // The source of the event, such as an AWS service, that triggers AWS Config + // to evaluate your AWS resources. + EventSource *string `type:"string" enum:"EventSource"` + + // The type of SNS message that triggers AWS Config to run an evaluation. For + // evaluations that are initiated when AWS Config delivers a configuration item + // change notification, you must use ConfigurationItemChangeNotification. For + // evaluations that are initiated when AWS Config delivers a configuration snapshot, + // you must use ConfigurationSnapshotDeliveryCompleted. + MessageType *string `type:"string" enum:"MessageType"` +} + +// String returns the string representation +func (s SourceDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SourceDetail) GoString() string { + return s.String() +} + +// The input for the StartConfigurationRecorder action. +type StartConfigurationRecorderInput struct { + _ struct{} `type:"structure"` + + // The name of the recorder object that records each configuration change made + // to the resources. + ConfigurationRecorderName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartConfigurationRecorderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartConfigurationRecorderInput) GoString() string { + return s.String() +} + +type StartConfigurationRecorderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StartConfigurationRecorderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartConfigurationRecorderOutput) GoString() string { + return s.String() +} + +// The input for the StopConfigurationRecorder action. +type StopConfigurationRecorderInput struct { + _ struct{} `type:"structure"` + + // The name of the recorder object that records each configuration change made + // to the resources. + ConfigurationRecorderName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StopConfigurationRecorderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopConfigurationRecorderInput) GoString() string { + return s.String() +} + +type StopConfigurationRecorderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StopConfigurationRecorderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopConfigurationRecorderOutput) GoString() string { + return s.String() +} + +const ( + // @enum ChronologicalOrder + ChronologicalOrderReverse = "Reverse" + // @enum ChronologicalOrder + ChronologicalOrderForward = "Forward" +) + +const ( + // @enum ComplianceType + ComplianceTypeCompliant = "COMPLIANT" + // @enum ComplianceType + ComplianceTypeNonCompliant = "NON_COMPLIANT" + // @enum ComplianceType + ComplianceTypeNotApplicable = "NOT_APPLICABLE" + // @enum ComplianceType + ComplianceTypeInsufficientData = "INSUFFICIENT_DATA" +) + +const ( + // @enum ConfigRuleState + ConfigRuleStateActive = "ACTIVE" + // @enum ConfigRuleState + ConfigRuleStateDeleting = "DELETING" +) + +const ( + // @enum ConfigurationItemStatus + ConfigurationItemStatusOk = "Ok" + // @enum ConfigurationItemStatus + ConfigurationItemStatusFailed = "Failed" + // @enum ConfigurationItemStatus + ConfigurationItemStatusDiscovered = "Discovered" + // @enum ConfigurationItemStatus + ConfigurationItemStatusDeleted = "Deleted" +) + +const ( + // @enum DeliveryStatus + DeliveryStatusSuccess = "Success" + // @enum DeliveryStatus + DeliveryStatusFailure = "Failure" + // @enum DeliveryStatus + DeliveryStatusNotApplicable = "Not_Applicable" +) + +const ( + // @enum EventSource + EventSourceAwsConfig = "aws.config" +) + +const ( + // @enum MaximumExecutionFrequency + MaximumExecutionFrequencyOneHour = "One_Hour" + // @enum MaximumExecutionFrequency + MaximumExecutionFrequencyThreeHours = "Three_Hours" + // @enum MaximumExecutionFrequency + MaximumExecutionFrequencySixHours = "Six_Hours" + // @enum MaximumExecutionFrequency + MaximumExecutionFrequencyTwelveHours = "Twelve_Hours" + // @enum MaximumExecutionFrequency + MaximumExecutionFrequencyTwentyFourHours = "TwentyFour_Hours" +) + +const ( + // @enum MessageType + MessageTypeConfigurationItemChangeNotification = "ConfigurationItemChangeNotification" + // @enum MessageType + MessageTypeConfigurationSnapshotDeliveryCompleted = "ConfigurationSnapshotDeliveryCompleted" +) + +const ( + // @enum Owner + OwnerCustomLambda = "CUSTOM_LAMBDA" + // @enum Owner + OwnerAws = "AWS" +) + +const ( + // @enum RecorderStatus + RecorderStatusPending = "Pending" + // @enum RecorderStatus + RecorderStatusSuccess = "Success" + // @enum RecorderStatus + RecorderStatusFailure = "Failure" +) + +const ( + // @enum ResourceType + ResourceTypeAwsEc2CustomerGateway = "AWS::EC2::CustomerGateway" + // @enum ResourceType + ResourceTypeAwsEc2Eip = "AWS::EC2::EIP" + // @enum ResourceType + ResourceTypeAwsEc2Host = "AWS::EC2::Host" + // @enum ResourceType + ResourceTypeAwsEc2Instance = "AWS::EC2::Instance" + // @enum ResourceType + ResourceTypeAwsEc2InternetGateway = "AWS::EC2::InternetGateway" + // @enum ResourceType + ResourceTypeAwsEc2NetworkAcl = "AWS::EC2::NetworkAcl" + // @enum ResourceType + ResourceTypeAwsEc2NetworkInterface = "AWS::EC2::NetworkInterface" + // @enum ResourceType + ResourceTypeAwsEc2RouteTable = "AWS::EC2::RouteTable" + // @enum ResourceType + ResourceTypeAwsEc2SecurityGroup = "AWS::EC2::SecurityGroup" + // @enum ResourceType + ResourceTypeAwsEc2Subnet = "AWS::EC2::Subnet" + // @enum ResourceType + ResourceTypeAwsCloudTrailTrail = "AWS::CloudTrail::Trail" + // @enum ResourceType + ResourceTypeAwsEc2Volume = "AWS::EC2::Volume" + // @enum ResourceType + ResourceTypeAwsEc2Vpc = "AWS::EC2::VPC" + // @enum ResourceType + ResourceTypeAwsEc2Vpnconnection = "AWS::EC2::VPNConnection" + // @enum ResourceType + ResourceTypeAwsEc2Vpngateway = "AWS::EC2::VPNGateway" + // @enum ResourceType + ResourceTypeAwsIamGroup = "AWS::IAM::Group" + // @enum ResourceType + ResourceTypeAwsIamPolicy = "AWS::IAM::Policy" + // @enum ResourceType + ResourceTypeAwsIamRole = "AWS::IAM::Role" + // @enum ResourceType + ResourceTypeAwsIamUser = "AWS::IAM::User" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/configservice/configserviceiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/configservice/configserviceiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/configservice/configserviceiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/configservice/configserviceiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,108 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package configserviceiface provides an interface for the AWS Config. +package configserviceiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/configservice" +) + +// ConfigServiceAPI is the interface type for configservice.ConfigService. +type ConfigServiceAPI interface { + DeleteConfigRuleRequest(*configservice.DeleteConfigRuleInput) (*request.Request, *configservice.DeleteConfigRuleOutput) + + DeleteConfigRule(*configservice.DeleteConfigRuleInput) (*configservice.DeleteConfigRuleOutput, error) + + DeleteDeliveryChannelRequest(*configservice.DeleteDeliveryChannelInput) (*request.Request, *configservice.DeleteDeliveryChannelOutput) + + DeleteDeliveryChannel(*configservice.DeleteDeliveryChannelInput) (*configservice.DeleteDeliveryChannelOutput, error) + + DeliverConfigSnapshotRequest(*configservice.DeliverConfigSnapshotInput) (*request.Request, *configservice.DeliverConfigSnapshotOutput) + + DeliverConfigSnapshot(*configservice.DeliverConfigSnapshotInput) (*configservice.DeliverConfigSnapshotOutput, error) + + DescribeComplianceByConfigRuleRequest(*configservice.DescribeComplianceByConfigRuleInput) (*request.Request, *configservice.DescribeComplianceByConfigRuleOutput) + + DescribeComplianceByConfigRule(*configservice.DescribeComplianceByConfigRuleInput) (*configservice.DescribeComplianceByConfigRuleOutput, error) + + DescribeComplianceByResourceRequest(*configservice.DescribeComplianceByResourceInput) (*request.Request, *configservice.DescribeComplianceByResourceOutput) + + DescribeComplianceByResource(*configservice.DescribeComplianceByResourceInput) (*configservice.DescribeComplianceByResourceOutput, error) + + DescribeConfigRuleEvaluationStatusRequest(*configservice.DescribeConfigRuleEvaluationStatusInput) (*request.Request, *configservice.DescribeConfigRuleEvaluationStatusOutput) + + DescribeConfigRuleEvaluationStatus(*configservice.DescribeConfigRuleEvaluationStatusInput) (*configservice.DescribeConfigRuleEvaluationStatusOutput, error) + + DescribeConfigRulesRequest(*configservice.DescribeConfigRulesInput) (*request.Request, *configservice.DescribeConfigRulesOutput) + + DescribeConfigRules(*configservice.DescribeConfigRulesInput) (*configservice.DescribeConfigRulesOutput, error) + + DescribeConfigurationRecorderStatusRequest(*configservice.DescribeConfigurationRecorderStatusInput) (*request.Request, *configservice.DescribeConfigurationRecorderStatusOutput) + + DescribeConfigurationRecorderStatus(*configservice.DescribeConfigurationRecorderStatusInput) (*configservice.DescribeConfigurationRecorderStatusOutput, error) + + DescribeConfigurationRecordersRequest(*configservice.DescribeConfigurationRecordersInput) (*request.Request, *configservice.DescribeConfigurationRecordersOutput) + + DescribeConfigurationRecorders(*configservice.DescribeConfigurationRecordersInput) (*configservice.DescribeConfigurationRecordersOutput, error) + + DescribeDeliveryChannelStatusRequest(*configservice.DescribeDeliveryChannelStatusInput) (*request.Request, *configservice.DescribeDeliveryChannelStatusOutput) + + DescribeDeliveryChannelStatus(*configservice.DescribeDeliveryChannelStatusInput) (*configservice.DescribeDeliveryChannelStatusOutput, error) + + DescribeDeliveryChannelsRequest(*configservice.DescribeDeliveryChannelsInput) (*request.Request, *configservice.DescribeDeliveryChannelsOutput) + + DescribeDeliveryChannels(*configservice.DescribeDeliveryChannelsInput) (*configservice.DescribeDeliveryChannelsOutput, error) + + GetComplianceDetailsByConfigRuleRequest(*configservice.GetComplianceDetailsByConfigRuleInput) (*request.Request, *configservice.GetComplianceDetailsByConfigRuleOutput) + + GetComplianceDetailsByConfigRule(*configservice.GetComplianceDetailsByConfigRuleInput) (*configservice.GetComplianceDetailsByConfigRuleOutput, error) + + GetComplianceDetailsByResourceRequest(*configservice.GetComplianceDetailsByResourceInput) (*request.Request, *configservice.GetComplianceDetailsByResourceOutput) + + GetComplianceDetailsByResource(*configservice.GetComplianceDetailsByResourceInput) (*configservice.GetComplianceDetailsByResourceOutput, error) + + GetComplianceSummaryByConfigRuleRequest(*configservice.GetComplianceSummaryByConfigRuleInput) (*request.Request, *configservice.GetComplianceSummaryByConfigRuleOutput) + + GetComplianceSummaryByConfigRule(*configservice.GetComplianceSummaryByConfigRuleInput) (*configservice.GetComplianceSummaryByConfigRuleOutput, error) + + GetComplianceSummaryByResourceTypeRequest(*configservice.GetComplianceSummaryByResourceTypeInput) (*request.Request, *configservice.GetComplianceSummaryByResourceTypeOutput) + + GetComplianceSummaryByResourceType(*configservice.GetComplianceSummaryByResourceTypeInput) (*configservice.GetComplianceSummaryByResourceTypeOutput, error) + + GetResourceConfigHistoryRequest(*configservice.GetResourceConfigHistoryInput) (*request.Request, *configservice.GetResourceConfigHistoryOutput) + + GetResourceConfigHistory(*configservice.GetResourceConfigHistoryInput) (*configservice.GetResourceConfigHistoryOutput, error) + + GetResourceConfigHistoryPages(*configservice.GetResourceConfigHistoryInput, func(*configservice.GetResourceConfigHistoryOutput, bool) bool) error + + ListDiscoveredResourcesRequest(*configservice.ListDiscoveredResourcesInput) (*request.Request, *configservice.ListDiscoveredResourcesOutput) + + ListDiscoveredResources(*configservice.ListDiscoveredResourcesInput) (*configservice.ListDiscoveredResourcesOutput, error) + + PutConfigRuleRequest(*configservice.PutConfigRuleInput) (*request.Request, *configservice.PutConfigRuleOutput) + + PutConfigRule(*configservice.PutConfigRuleInput) (*configservice.PutConfigRuleOutput, error) + + PutConfigurationRecorderRequest(*configservice.PutConfigurationRecorderInput) (*request.Request, *configservice.PutConfigurationRecorderOutput) + + PutConfigurationRecorder(*configservice.PutConfigurationRecorderInput) (*configservice.PutConfigurationRecorderOutput, error) + + PutDeliveryChannelRequest(*configservice.PutDeliveryChannelInput) (*request.Request, *configservice.PutDeliveryChannelOutput) + + PutDeliveryChannel(*configservice.PutDeliveryChannelInput) (*configservice.PutDeliveryChannelOutput, error) + + PutEvaluationsRequest(*configservice.PutEvaluationsInput) (*request.Request, *configservice.PutEvaluationsOutput) + + PutEvaluations(*configservice.PutEvaluationsInput) (*configservice.PutEvaluationsOutput, error) + + StartConfigurationRecorderRequest(*configservice.StartConfigurationRecorderInput) (*request.Request, *configservice.StartConfigurationRecorderOutput) + + StartConfigurationRecorder(*configservice.StartConfigurationRecorderInput) (*configservice.StartConfigurationRecorderOutput, error) + + StopConfigurationRecorderRequest(*configservice.StopConfigurationRecorderInput) (*request.Request, *configservice.StopConfigurationRecorderOutput) + + StopConfigurationRecorder(*configservice.StopConfigurationRecorderInput) (*configservice.StopConfigurationRecorderOutput, error) +} + +var _ ConfigServiceAPI = (*configservice.ConfigService)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/configservice/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/configservice/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/configservice/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/configservice/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,571 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package configservice_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/configservice" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleConfigService_DeleteConfigRule() { + svc := configservice.New(session.New()) + + params := &configservice.DeleteConfigRuleInput{ + ConfigRuleName: aws.String("StringWithCharLimit64"), // Required + } + resp, err := svc.DeleteConfigRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_DeleteDeliveryChannel() { + svc := configservice.New(session.New()) + + params := &configservice.DeleteDeliveryChannelInput{ + DeliveryChannelName: aws.String("ChannelName"), // Required + } + resp, err := svc.DeleteDeliveryChannel(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_DeliverConfigSnapshot() { + svc := configservice.New(session.New()) + + params := &configservice.DeliverConfigSnapshotInput{ + DeliveryChannelName: aws.String("ChannelName"), // Required + } + resp, err := svc.DeliverConfigSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_DescribeComplianceByConfigRule() { + svc := configservice.New(session.New()) + + params := &configservice.DescribeComplianceByConfigRuleInput{ + ComplianceTypes: []*string{ + aws.String("ComplianceType"), // Required + // More values... + }, + ConfigRuleNames: []*string{ + aws.String("StringWithCharLimit64"), // Required + // More values... + }, + NextToken: aws.String("String"), + } + resp, err := svc.DescribeComplianceByConfigRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_DescribeComplianceByResource() { + svc := configservice.New(session.New()) + + params := &configservice.DescribeComplianceByResourceInput{ + ComplianceTypes: []*string{ + aws.String("ComplianceType"), // Required + // More values... + }, + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + ResourceId: aws.String("StringWithCharLimit256"), + ResourceType: aws.String("StringWithCharLimit256"), + } + resp, err := svc.DescribeComplianceByResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_DescribeConfigRuleEvaluationStatus() { + svc := configservice.New(session.New()) + + params := &configservice.DescribeConfigRuleEvaluationStatusInput{ + ConfigRuleNames: []*string{ + aws.String("StringWithCharLimit64"), // Required + // More values... + }, + } + resp, err := svc.DescribeConfigRuleEvaluationStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_DescribeConfigRules() { + svc := configservice.New(session.New()) + + params := &configservice.DescribeConfigRulesInput{ + ConfigRuleNames: []*string{ + aws.String("StringWithCharLimit64"), // Required + // More values... + }, + NextToken: aws.String("String"), + } + resp, err := svc.DescribeConfigRules(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_DescribeConfigurationRecorderStatus() { + svc := configservice.New(session.New()) + + params := &configservice.DescribeConfigurationRecorderStatusInput{ + ConfigurationRecorderNames: []*string{ + aws.String("RecorderName"), // Required + // More values... + }, + } + resp, err := svc.DescribeConfigurationRecorderStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_DescribeConfigurationRecorders() { + svc := configservice.New(session.New()) + + params := &configservice.DescribeConfigurationRecordersInput{ + ConfigurationRecorderNames: []*string{ + aws.String("RecorderName"), // Required + // More values... + }, + } + resp, err := svc.DescribeConfigurationRecorders(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_DescribeDeliveryChannelStatus() { + svc := configservice.New(session.New()) + + params := &configservice.DescribeDeliveryChannelStatusInput{ + DeliveryChannelNames: []*string{ + aws.String("ChannelName"), // Required + // More values... + }, + } + resp, err := svc.DescribeDeliveryChannelStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_DescribeDeliveryChannels() { + svc := configservice.New(session.New()) + + params := &configservice.DescribeDeliveryChannelsInput{ + DeliveryChannelNames: []*string{ + aws.String("ChannelName"), // Required + // More values... + }, + } + resp, err := svc.DescribeDeliveryChannels(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_GetComplianceDetailsByConfigRule() { + svc := configservice.New(session.New()) + + params := &configservice.GetComplianceDetailsByConfigRuleInput{ + ConfigRuleName: aws.String("StringWithCharLimit64"), // Required + ComplianceTypes: []*string{ + aws.String("ComplianceType"), // Required + // More values... + }, + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.GetComplianceDetailsByConfigRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_GetComplianceDetailsByResource() { + svc := configservice.New(session.New()) + + params := &configservice.GetComplianceDetailsByResourceInput{ + ResourceId: aws.String("StringWithCharLimit256"), // Required + ResourceType: aws.String("StringWithCharLimit256"), // Required + ComplianceTypes: []*string{ + aws.String("ComplianceType"), // Required + // More values... + }, + NextToken: aws.String("String"), + } + resp, err := svc.GetComplianceDetailsByResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_GetComplianceSummaryByConfigRule() { + svc := configservice.New(session.New()) + + var params *configservice.GetComplianceSummaryByConfigRuleInput + resp, err := svc.GetComplianceSummaryByConfigRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_GetComplianceSummaryByResourceType() { + svc := configservice.New(session.New()) + + params := &configservice.GetComplianceSummaryByResourceTypeInput{ + ResourceTypes: []*string{ + aws.String("StringWithCharLimit256"), // Required + // More values... + }, + } + resp, err := svc.GetComplianceSummaryByResourceType(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_GetResourceConfigHistory() { + svc := configservice.New(session.New()) + + params := &configservice.GetResourceConfigHistoryInput{ + ResourceId: aws.String("ResourceId"), // Required + ResourceType: aws.String("ResourceType"), // Required + ChronologicalOrder: aws.String("ChronologicalOrder"), + EarlierTime: aws.Time(time.Now()), + LaterTime: aws.Time(time.Now()), + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.GetResourceConfigHistory(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_ListDiscoveredResources() { + svc := configservice.New(session.New()) + + params := &configservice.ListDiscoveredResourcesInput{ + ResourceType: aws.String("ResourceType"), // Required + IncludeDeletedResources: aws.Bool(true), + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + ResourceIds: []*string{ + aws.String("ResourceId"), // Required + // More values... + }, + ResourceName: aws.String("ResourceName"), + } + resp, err := svc.ListDiscoveredResources(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_PutConfigRule() { + svc := configservice.New(session.New()) + + params := &configservice.PutConfigRuleInput{ + ConfigRule: &configservice.ConfigRule{ // Required + Source: &configservice.Source{ // Required + Owner: aws.String("Owner"), + SourceDetails: []*configservice.SourceDetail{ + { // Required + EventSource: aws.String("EventSource"), + MessageType: aws.String("MessageType"), + }, + // More values... + }, + SourceIdentifier: aws.String("StringWithCharLimit256"), + }, + ConfigRuleArn: aws.String("String"), + ConfigRuleId: aws.String("String"), + ConfigRuleName: aws.String("StringWithCharLimit64"), + ConfigRuleState: aws.String("ConfigRuleState"), + Description: aws.String("EmptiableStringWithCharLimit256"), + InputParameters: aws.String("StringWithCharLimit256"), + MaximumExecutionFrequency: aws.String("MaximumExecutionFrequency"), + Scope: &configservice.Scope{ + ComplianceResourceId: aws.String("StringWithCharLimit256"), + ComplianceResourceTypes: []*string{ + aws.String("StringWithCharLimit256"), // Required + // More values... + }, + TagKey: aws.String("StringWithCharLimit128"), + TagValue: aws.String("StringWithCharLimit256"), + }, + }, + } + resp, err := svc.PutConfigRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_PutConfigurationRecorder() { + svc := configservice.New(session.New()) + + params := &configservice.PutConfigurationRecorderInput{ + ConfigurationRecorder: &configservice.ConfigurationRecorder{ // Required + Name: aws.String("RecorderName"), + RecordingGroup: &configservice.RecordingGroup{ + AllSupported: aws.Bool(true), + IncludeGlobalResourceTypes: aws.Bool(true), + ResourceTypes: []*string{ + aws.String("ResourceType"), // Required + // More values... + }, + }, + RoleARN: aws.String("String"), + }, + } + resp, err := svc.PutConfigurationRecorder(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_PutDeliveryChannel() { + svc := configservice.New(session.New()) + + params := &configservice.PutDeliveryChannelInput{ + DeliveryChannel: &configservice.DeliveryChannel{ // Required + ConfigSnapshotDeliveryProperties: &configservice.ConfigSnapshotDeliveryProperties{ + DeliveryFrequency: aws.String("MaximumExecutionFrequency"), + }, + Name: aws.String("ChannelName"), + S3BucketName: aws.String("String"), + S3KeyPrefix: aws.String("String"), + SnsTopicARN: aws.String("String"), + }, + } + resp, err := svc.PutDeliveryChannel(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_PutEvaluations() { + svc := configservice.New(session.New()) + + params := &configservice.PutEvaluationsInput{ + ResultToken: aws.String("String"), // Required + Evaluations: []*configservice.Evaluation{ + { // Required + ComplianceResourceId: aws.String("StringWithCharLimit256"), // Required + ComplianceResourceType: aws.String("StringWithCharLimit256"), // Required + ComplianceType: aws.String("ComplianceType"), // Required + OrderingTimestamp: aws.Time(time.Now()), // Required + Annotation: aws.String("StringWithCharLimit256"), + }, + // More values... + }, + } + resp, err := svc.PutEvaluations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_StartConfigurationRecorder() { + svc := configservice.New(session.New()) + + params := &configservice.StartConfigurationRecorderInput{ + ConfigurationRecorderName: aws.String("RecorderName"), // Required + } + resp, err := svc.StartConfigurationRecorder(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleConfigService_StopConfigurationRecorder() { + svc := configservice.New(session.New()) + + params := &configservice.StopConfigurationRecorderInput{ + ConfigurationRecorderName: aws.String("RecorderName"), // Required + } + resp, err := svc.StopConfigurationRecorder(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/configservice/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/configservice/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/configservice/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/configservice/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,111 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package configservice + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// AWS Config provides a way to keep track of the configurations of all the +// AWS resources associated with your AWS account. You can use AWS Config to +// get the current and historical configurations of each AWS resource and also +// to get information about the relationship between the resources. An AWS resource +// can be an Amazon Compute Cloud (Amazon EC2) instance, an Elastic Block Store +// (EBS) volume, an Elastic network Interface (ENI), or a security group. For +// a complete list of resources currently supported by AWS Config, see Supported +// AWS Resources (http://docs.aws.amazon.com/config/latest/developerguide/resource-config-reference.html#supported-resources). +// +// You can access and manage AWS Config through the AWS Management Console, +// the AWS Command Line Interface (AWS CLI), the AWS Config API, or the AWS +// SDKs for AWS Config +// +// This reference guide contains documentation for the AWS Config API and the +// AWS CLI commands that you can use to manage AWS Config. +// +// The AWS Config API uses the Signature Version 4 protocol for signing requests. +// For more information about how to sign a request with this protocol, see +// Signature Version 4 Signing Process (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// +// For detailed information about AWS Config features and their associated +// actions or commands, as well as how to work with AWS Management Console, +// see What Is AWS Config? (http://docs.aws.amazon.com/config/latest/developerguide/WhatIsConfig.html) +// in the AWS Config Developer Guide. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type ConfigService struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "config" + +// New creates a new instance of the ConfigService client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ConfigService client from just a session. +// svc := configservice.New(mySession) +// +// // Create a ConfigService client with additional configuration +// svc := configservice.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ConfigService { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *ConfigService { + svc := &ConfigService{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-11-12", + JSONVersion: "1.1", + TargetPrefix: "StarlingDoveService", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ConfigService operation and runs any +// custom request initialization. +func (c *ConfigService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/datapipeline/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/datapipeline/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/datapipeline/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/datapipeline/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1955 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package datapipeline provides a client for AWS Data Pipeline. +package datapipeline + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opActivatePipeline = "ActivatePipeline" + +// ActivatePipelineRequest generates a request for the ActivatePipeline operation. +func (c *DataPipeline) ActivatePipelineRequest(input *ActivatePipelineInput) (req *request.Request, output *ActivatePipelineOutput) { + op := &request.Operation{ + Name: opActivatePipeline, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ActivatePipelineInput{} + } + + req = c.newRequest(op, input, output) + output = &ActivatePipelineOutput{} + req.Data = output + return +} + +// Validates the specified pipeline and starts processing pipeline tasks. If +// the pipeline does not pass validation, activation fails. +// +// If you need to pause the pipeline to investigate an issue with a component, +// such as a data source or script, call DeactivatePipeline. +// +// To activate a finished pipeline, modify the end date for the pipeline and +// then activate it. +func (c *DataPipeline) ActivatePipeline(input *ActivatePipelineInput) (*ActivatePipelineOutput, error) { + req, out := c.ActivatePipelineRequest(input) + err := req.Send() + return out, err +} + +const opAddTags = "AddTags" + +// AddTagsRequest generates a request for the AddTags operation. +func (c *DataPipeline) AddTagsRequest(input *AddTagsInput) (req *request.Request, output *AddTagsOutput) { + op := &request.Operation{ + Name: opAddTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &AddTagsOutput{} + req.Data = output + return +} + +// Adds or modifies tags for the specified pipeline. +func (c *DataPipeline) AddTags(input *AddTagsInput) (*AddTagsOutput, error) { + req, out := c.AddTagsRequest(input) + err := req.Send() + return out, err +} + +const opCreatePipeline = "CreatePipeline" + +// CreatePipelineRequest generates a request for the CreatePipeline operation. +func (c *DataPipeline) CreatePipelineRequest(input *CreatePipelineInput) (req *request.Request, output *CreatePipelineOutput) { + op := &request.Operation{ + Name: opCreatePipeline, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePipelineInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePipelineOutput{} + req.Data = output + return +} + +// Creates a new, empty pipeline. Use PutPipelineDefinition to populate the +// pipeline. +func (c *DataPipeline) CreatePipeline(input *CreatePipelineInput) (*CreatePipelineOutput, error) { + req, out := c.CreatePipelineRequest(input) + err := req.Send() + return out, err +} + +const opDeactivatePipeline = "DeactivatePipeline" + +// DeactivatePipelineRequest generates a request for the DeactivatePipeline operation. +func (c *DataPipeline) DeactivatePipelineRequest(input *DeactivatePipelineInput) (req *request.Request, output *DeactivatePipelineOutput) { + op := &request.Operation{ + Name: opDeactivatePipeline, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeactivatePipelineInput{} + } + + req = c.newRequest(op, input, output) + output = &DeactivatePipelineOutput{} + req.Data = output + return +} + +// Deactivates the specified running pipeline. The pipeline is set to the DEACTIVATING +// state until the deactivation process completes. +// +// To resume a deactivated pipeline, use ActivatePipeline. By default, the +// pipeline resumes from the last completed execution. Optionally, you can specify +// the date and time to resume the pipeline. +func (c *DataPipeline) DeactivatePipeline(input *DeactivatePipelineInput) (*DeactivatePipelineOutput, error) { + req, out := c.DeactivatePipelineRequest(input) + err := req.Send() + return out, err +} + +const opDeletePipeline = "DeletePipeline" + +// DeletePipelineRequest generates a request for the DeletePipeline operation. +func (c *DataPipeline) DeletePipelineRequest(input *DeletePipelineInput) (req *request.Request, output *DeletePipelineOutput) { + op := &request.Operation{ + Name: opDeletePipeline, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePipelineInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeletePipelineOutput{} + req.Data = output + return +} + +// Deletes a pipeline, its pipeline definition, and its run history. AWS Data +// Pipeline attempts to cancel instances associated with the pipeline that are +// currently being processed by task runners. +// +// Deleting a pipeline cannot be undone. You cannot query or restore a deleted +// pipeline. To temporarily pause a pipeline instead of deleting it, call SetStatus +// with the status set to PAUSE on individual components. Components that are +// paused by SetStatus can be resumed. +func (c *DataPipeline) DeletePipeline(input *DeletePipelineInput) (*DeletePipelineOutput, error) { + req, out := c.DeletePipelineRequest(input) + err := req.Send() + return out, err +} + +const opDescribeObjects = "DescribeObjects" + +// DescribeObjectsRequest generates a request for the DescribeObjects operation. +func (c *DataPipeline) DescribeObjectsRequest(input *DescribeObjectsInput) (req *request.Request, output *DescribeObjectsOutput) { + op := &request.Operation{ + Name: opDescribeObjects, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"marker"}, + LimitToken: "", + TruncationToken: "hasMoreResults", + }, + } + + if input == nil { + input = &DescribeObjectsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeObjectsOutput{} + req.Data = output + return +} + +// Gets the object definitions for a set of objects associated with the pipeline. +// Object definitions are composed of a set of fields that define the properties +// of the object. +func (c *DataPipeline) DescribeObjects(input *DescribeObjectsInput) (*DescribeObjectsOutput, error) { + req, out := c.DescribeObjectsRequest(input) + err := req.Send() + return out, err +} + +func (c *DataPipeline) DescribeObjectsPages(input *DescribeObjectsInput, fn func(p *DescribeObjectsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeObjectsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeObjectsOutput), lastPage) + }) +} + +const opDescribePipelines = "DescribePipelines" + +// DescribePipelinesRequest generates a request for the DescribePipelines operation. +func (c *DataPipeline) DescribePipelinesRequest(input *DescribePipelinesInput) (req *request.Request, output *DescribePipelinesOutput) { + op := &request.Operation{ + Name: opDescribePipelines, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribePipelinesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribePipelinesOutput{} + req.Data = output + return +} + +// Retrieves metadata about one or more pipelines. The information retrieved +// includes the name of the pipeline, the pipeline identifier, its current state, +// and the user account that owns the pipeline. Using account credentials, you +// can retrieve metadata about pipelines that you or your IAM users have created. +// If you are using an IAM user account, you can retrieve metadata about only +// those pipelines for which you have read permissions. +// +// To retrieve the full pipeline definition instead of metadata about the pipeline, +// call GetPipelineDefinition. +func (c *DataPipeline) DescribePipelines(input *DescribePipelinesInput) (*DescribePipelinesOutput, error) { + req, out := c.DescribePipelinesRequest(input) + err := req.Send() + return out, err +} + +const opEvaluateExpression = "EvaluateExpression" + +// EvaluateExpressionRequest generates a request for the EvaluateExpression operation. +func (c *DataPipeline) EvaluateExpressionRequest(input *EvaluateExpressionInput) (req *request.Request, output *EvaluateExpressionOutput) { + op := &request.Operation{ + Name: opEvaluateExpression, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EvaluateExpressionInput{} + } + + req = c.newRequest(op, input, output) + output = &EvaluateExpressionOutput{} + req.Data = output + return +} + +// Task runners call EvaluateExpression to evaluate a string in the context +// of the specified object. For example, a task runner can evaluate SQL queries +// stored in Amazon S3. +func (c *DataPipeline) EvaluateExpression(input *EvaluateExpressionInput) (*EvaluateExpressionOutput, error) { + req, out := c.EvaluateExpressionRequest(input) + err := req.Send() + return out, err +} + +const opGetPipelineDefinition = "GetPipelineDefinition" + +// GetPipelineDefinitionRequest generates a request for the GetPipelineDefinition operation. +func (c *DataPipeline) GetPipelineDefinitionRequest(input *GetPipelineDefinitionInput) (req *request.Request, output *GetPipelineDefinitionOutput) { + op := &request.Operation{ + Name: opGetPipelineDefinition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPipelineDefinitionInput{} + } + + req = c.newRequest(op, input, output) + output = &GetPipelineDefinitionOutput{} + req.Data = output + return +} + +// Gets the definition of the specified pipeline. You can call GetPipelineDefinition +// to retrieve the pipeline definition that you provided using PutPipelineDefinition. +func (c *DataPipeline) GetPipelineDefinition(input *GetPipelineDefinitionInput) (*GetPipelineDefinitionOutput, error) { + req, out := c.GetPipelineDefinitionRequest(input) + err := req.Send() + return out, err +} + +const opListPipelines = "ListPipelines" + +// ListPipelinesRequest generates a request for the ListPipelines operation. +func (c *DataPipeline) ListPipelinesRequest(input *ListPipelinesInput) (req *request.Request, output *ListPipelinesOutput) { + op := &request.Operation{ + Name: opListPipelines, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"marker"}, + LimitToken: "", + TruncationToken: "hasMoreResults", + }, + } + + if input == nil { + input = &ListPipelinesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPipelinesOutput{} + req.Data = output + return +} + +// Lists the pipeline identifiers for all active pipelines that you have permission +// to access. +func (c *DataPipeline) ListPipelines(input *ListPipelinesInput) (*ListPipelinesOutput, error) { + req, out := c.ListPipelinesRequest(input) + err := req.Send() + return out, err +} + +func (c *DataPipeline) ListPipelinesPages(input *ListPipelinesInput, fn func(p *ListPipelinesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListPipelinesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListPipelinesOutput), lastPage) + }) +} + +const opPollForTask = "PollForTask" + +// PollForTaskRequest generates a request for the PollForTask operation. +func (c *DataPipeline) PollForTaskRequest(input *PollForTaskInput) (req *request.Request, output *PollForTaskOutput) { + op := &request.Operation{ + Name: opPollForTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PollForTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &PollForTaskOutput{} + req.Data = output + return +} + +// Task runners call PollForTask to receive a task to perform from AWS Data +// Pipeline. The task runner specifies which tasks it can perform by setting +// a value for the workerGroup parameter. The task returned can come from any +// of the pipelines that match the workerGroup value passed in by the task runner +// and that was launched using the IAM user credentials specified by the task +// runner. +// +// If tasks are ready in the work queue, PollForTask returns a response immediately. +// If no tasks are available in the queue, PollForTask uses long-polling and +// holds on to a poll connection for up to a 90 seconds, during which time the +// first newly scheduled task is handed to the task runner. To accomodate this, +// set the socket timeout in your task runner to 90 seconds. The task runner +// should not call PollForTask again on the same workerGroup until it receives +// a response, and this can take up to 90 seconds. +func (c *DataPipeline) PollForTask(input *PollForTaskInput) (*PollForTaskOutput, error) { + req, out := c.PollForTaskRequest(input) + err := req.Send() + return out, err +} + +const opPutPipelineDefinition = "PutPipelineDefinition" + +// PutPipelineDefinitionRequest generates a request for the PutPipelineDefinition operation. +func (c *DataPipeline) PutPipelineDefinitionRequest(input *PutPipelineDefinitionInput) (req *request.Request, output *PutPipelineDefinitionOutput) { + op := &request.Operation{ + Name: opPutPipelineDefinition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutPipelineDefinitionInput{} + } + + req = c.newRequest(op, input, output) + output = &PutPipelineDefinitionOutput{} + req.Data = output + return +} + +// Adds tasks, schedules, and preconditions to the specified pipeline. You can +// use PutPipelineDefinition to populate a new pipeline. +// +// PutPipelineDefinition also validates the configuration as it adds it to +// the pipeline. Changes to the pipeline are saved unless one of the following +// three validation errors exists in the pipeline. +// +// An object is missing a name or identifier field. A string or reference +// field is empty. The number of objects in the pipeline exceeds the maximum +// allowed objects. The pipeline is in a FINISHED state. Pipeline object definitions +// are passed to the PutPipelineDefinition action and returned by the GetPipelineDefinition +// action. +func (c *DataPipeline) PutPipelineDefinition(input *PutPipelineDefinitionInput) (*PutPipelineDefinitionOutput, error) { + req, out := c.PutPipelineDefinitionRequest(input) + err := req.Send() + return out, err +} + +const opQueryObjects = "QueryObjects" + +// QueryObjectsRequest generates a request for the QueryObjects operation. +func (c *DataPipeline) QueryObjectsRequest(input *QueryObjectsInput) (req *request.Request, output *QueryObjectsOutput) { + op := &request.Operation{ + Name: opQueryObjects, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"marker"}, + LimitToken: "limit", + TruncationToken: "hasMoreResults", + }, + } + + if input == nil { + input = &QueryObjectsInput{} + } + + req = c.newRequest(op, input, output) + output = &QueryObjectsOutput{} + req.Data = output + return +} + +// Queries the specified pipeline for the names of objects that match the specified +// set of conditions. +func (c *DataPipeline) QueryObjects(input *QueryObjectsInput) (*QueryObjectsOutput, error) { + req, out := c.QueryObjectsRequest(input) + err := req.Send() + return out, err +} + +func (c *DataPipeline) QueryObjectsPages(input *QueryObjectsInput, fn func(p *QueryObjectsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.QueryObjectsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*QueryObjectsOutput), lastPage) + }) +} + +const opRemoveTags = "RemoveTags" + +// RemoveTagsRequest generates a request for the RemoveTags operation. +func (c *DataPipeline) RemoveTagsRequest(input *RemoveTagsInput) (req *request.Request, output *RemoveTagsOutput) { + op := &request.Operation{ + Name: opRemoveTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveTagsOutput{} + req.Data = output + return +} + +// Removes existing tags from the specified pipeline. +func (c *DataPipeline) RemoveTags(input *RemoveTagsInput) (*RemoveTagsOutput, error) { + req, out := c.RemoveTagsRequest(input) + err := req.Send() + return out, err +} + +const opReportTaskProgress = "ReportTaskProgress" + +// ReportTaskProgressRequest generates a request for the ReportTaskProgress operation. +func (c *DataPipeline) ReportTaskProgressRequest(input *ReportTaskProgressInput) (req *request.Request, output *ReportTaskProgressOutput) { + op := &request.Operation{ + Name: opReportTaskProgress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReportTaskProgressInput{} + } + + req = c.newRequest(op, input, output) + output = &ReportTaskProgressOutput{} + req.Data = output + return +} + +// Task runners call ReportTaskProgress when assigned a task to acknowledge +// that it has the task. If the web service does not receive this acknowledgement +// within 2 minutes, it assigns the task in a subsequent PollForTask call. After +// this initial acknowledgement, the task runner only needs to report progress +// every 15 minutes to maintain its ownership of the task. You can change this +// reporting time from 15 minutes by specifying a reportProgressTimeout field +// in your pipeline. +// +// If a task runner does not report its status after 5 minutes, AWS Data Pipeline +// assumes that the task runner is unable to process the task and reassigns +// the task in a subsequent response to PollForTask. Task runners should call +// ReportTaskProgress every 60 seconds. +func (c *DataPipeline) ReportTaskProgress(input *ReportTaskProgressInput) (*ReportTaskProgressOutput, error) { + req, out := c.ReportTaskProgressRequest(input) + err := req.Send() + return out, err +} + +const opReportTaskRunnerHeartbeat = "ReportTaskRunnerHeartbeat" + +// ReportTaskRunnerHeartbeatRequest generates a request for the ReportTaskRunnerHeartbeat operation. +func (c *DataPipeline) ReportTaskRunnerHeartbeatRequest(input *ReportTaskRunnerHeartbeatInput) (req *request.Request, output *ReportTaskRunnerHeartbeatOutput) { + op := &request.Operation{ + Name: opReportTaskRunnerHeartbeat, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReportTaskRunnerHeartbeatInput{} + } + + req = c.newRequest(op, input, output) + output = &ReportTaskRunnerHeartbeatOutput{} + req.Data = output + return +} + +// Task runners call ReportTaskRunnerHeartbeat every 15 minutes to indicate +// that they are operational. If the AWS Data Pipeline Task Runner is launched +// on a resource managed by AWS Data Pipeline, the web service can use this +// call to detect when the task runner application has failed and restart a +// new instance. +func (c *DataPipeline) ReportTaskRunnerHeartbeat(input *ReportTaskRunnerHeartbeatInput) (*ReportTaskRunnerHeartbeatOutput, error) { + req, out := c.ReportTaskRunnerHeartbeatRequest(input) + err := req.Send() + return out, err +} + +const opSetStatus = "SetStatus" + +// SetStatusRequest generates a request for the SetStatus operation. +func (c *DataPipeline) SetStatusRequest(input *SetStatusInput) (req *request.Request, output *SetStatusOutput) { + op := &request.Operation{ + Name: opSetStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetStatusInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetStatusOutput{} + req.Data = output + return +} + +// Requests that the status of the specified physical or logical pipeline objects +// be updated in the specified pipeline. This update might not occur immediately, +// but is eventually consistent. The status that can be set depends on the type +// of object (for example, DataNode or Activity). You cannot perform this operation +// on FINISHED pipelines and attempting to do so returns InvalidRequestException. +func (c *DataPipeline) SetStatus(input *SetStatusInput) (*SetStatusOutput, error) { + req, out := c.SetStatusRequest(input) + err := req.Send() + return out, err +} + +const opSetTaskStatus = "SetTaskStatus" + +// SetTaskStatusRequest generates a request for the SetTaskStatus operation. +func (c *DataPipeline) SetTaskStatusRequest(input *SetTaskStatusInput) (req *request.Request, output *SetTaskStatusOutput) { + op := &request.Operation{ + Name: opSetTaskStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetTaskStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &SetTaskStatusOutput{} + req.Data = output + return +} + +// Task runners call SetTaskStatus to notify AWS Data Pipeline that a task is +// completed and provide information about the final status. A task runner makes +// this call regardless of whether the task was sucessful. A task runner does +// not need to call SetTaskStatus for tasks that are canceled by the web service +// during a call to ReportTaskProgress. +func (c *DataPipeline) SetTaskStatus(input *SetTaskStatusInput) (*SetTaskStatusOutput, error) { + req, out := c.SetTaskStatusRequest(input) + err := req.Send() + return out, err +} + +const opValidatePipelineDefinition = "ValidatePipelineDefinition" + +// ValidatePipelineDefinitionRequest generates a request for the ValidatePipelineDefinition operation. +func (c *DataPipeline) ValidatePipelineDefinitionRequest(input *ValidatePipelineDefinitionInput) (req *request.Request, output *ValidatePipelineDefinitionOutput) { + op := &request.Operation{ + Name: opValidatePipelineDefinition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ValidatePipelineDefinitionInput{} + } + + req = c.newRequest(op, input, output) + output = &ValidatePipelineDefinitionOutput{} + req.Data = output + return +} + +// Validates the specified pipeline definition to ensure that it is well formed +// and can be run without error. +func (c *DataPipeline) ValidatePipelineDefinition(input *ValidatePipelineDefinitionInput) (*ValidatePipelineDefinitionOutput, error) { + req, out := c.ValidatePipelineDefinitionRequest(input) + err := req.Send() + return out, err +} + +// Contains the parameters for ActivatePipeline. +type ActivatePipelineInput struct { + _ struct{} `type:"structure"` + + // A list of parameter values to pass to the pipeline at activation. + ParameterValues []*ParameterValue `locationName:"parameterValues" type:"list"` + + // The ID of the pipeline. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string" required:"true"` + + // The date and time to resume the pipeline. By default, the pipeline resumes + // from the last completed execution. + StartTimestamp *time.Time `locationName:"startTimestamp" type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s ActivatePipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivatePipelineInput) GoString() string { + return s.String() +} + +// Contains the output of ActivatePipeline. +type ActivatePipelineOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ActivatePipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivatePipelineOutput) GoString() string { + return s.String() +} + +// Contains the parameters for AddTags. +type AddTagsInput struct { + _ struct{} `type:"structure"` + + // The ID of the pipeline. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string" required:"true"` + + // The tags to add, as key/value pairs. + Tags []*Tag `locationName:"tags" type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsInput) GoString() string { + return s.String() +} + +// Contains the output of AddTags. +type AddTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CreatePipeline. +type CreatePipelineInput struct { + _ struct{} `type:"structure"` + + // The description for the pipeline. + Description *string `locationName:"description" type:"string"` + + // The name for the pipeline. You can use the same name for multiple pipelines + // associated with your AWS account, because AWS Data Pipeline assigns each + // pipeline a unique pipeline identifier. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // A list of tags to associate with the pipeline at creation. Tags let you control + // access to pipelines. For more information, see Controlling User Access to + // Pipelines (http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-control-access.html) + // in the AWS Data Pipeline Developer Guide. + Tags []*Tag `locationName:"tags" type:"list"` + + // A unique identifier. This identifier is not the same as the pipeline identifier + // assigned by AWS Data Pipeline. You are responsible for defining the format + // and ensuring the uniqueness of this identifier. You use this parameter to + // ensure idempotency during repeated calls to CreatePipeline. For example, + // if the first call to CreatePipeline does not succeed, you can pass in the + // same unique identifier and pipeline name combination on a subsequent call + // to CreatePipeline. CreatePipeline ensures that if a pipeline already exists + // with the same name and unique identifier, a new pipeline is not created. + // Instead, you'll receive the pipeline identifier from the previous attempt. + // The uniqueness of the name and unique identifier combination is scoped to + // the AWS account or IAM user credentials. + UniqueId *string `locationName:"uniqueId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreatePipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePipelineInput) GoString() string { + return s.String() +} + +// Contains the output of CreatePipeline. +type CreatePipelineOutput struct { + _ struct{} `type:"structure"` + + // The ID that AWS Data Pipeline assigns the newly created pipeline. For example, + // df-06372391ZG65EXAMPLE. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreatePipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePipelineOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeactivatePipeline. +type DeactivatePipelineInput struct { + _ struct{} `type:"structure"` + + // Indicates whether to cancel any running objects. The default is true, which + // sets the state of any running objects to CANCELED. If this value is false, + // the pipeline is deactivated after all running objects finish. + CancelActive *bool `locationName:"cancelActive" type:"boolean"` + + // The ID of the pipeline. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeactivatePipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeactivatePipelineInput) GoString() string { + return s.String() +} + +// Contains the output of DeactivatePipeline. +type DeactivatePipelineOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeactivatePipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeactivatePipelineOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeletePipeline. +type DeletePipelineInput struct { + _ struct{} `type:"structure"` + + // The ID of the pipeline. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePipelineInput) GoString() string { + return s.String() +} + +type DeletePipelineOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePipelineOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeObjects. +type DescribeObjectsInput struct { + _ struct{} `type:"structure"` + + // Indicates whether any expressions in the object should be evaluated when + // the object descriptions are returned. + EvaluateExpressions *bool `locationName:"evaluateExpressions" type:"boolean"` + + // The starting point for the results to be returned. For the first call, this + // value should be empty. As long as there are more results, continue to call + // DescribeObjects with the marker value from the previous call to retrieve + // the next set of results. + Marker *string `locationName:"marker" type:"string"` + + // The IDs of the pipeline objects that contain the definitions to be described. + // You can pass as many as 25 identifiers in a single call to DescribeObjects. + ObjectIds []*string `locationName:"objectIds" type:"list" required:"true"` + + // The ID of the pipeline that contains the object definitions. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeObjectsInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeObjects. +type DescribeObjectsOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether there are more results to return. + HasMoreResults *bool `locationName:"hasMoreResults" type:"boolean"` + + // The starting point for the next page of results. To view the next page of + // results, call DescribeObjects again with this marker value. If the value + // is null, there are no more results. + Marker *string `locationName:"marker" type:"string"` + + // An array of object definitions. + PipelineObjects []*PipelineObject `locationName:"pipelineObjects" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeObjectsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribePipelines. +type DescribePipelinesInput struct { + _ struct{} `type:"structure"` + + // The IDs of the pipelines to describe. You can pass as many as 25 identifiers + // in a single call. To obtain pipeline IDs, call ListPipelines. + PipelineIds []*string `locationName:"pipelineIds" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribePipelinesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePipelinesInput) GoString() string { + return s.String() +} + +// Contains the output of DescribePipelines. +type DescribePipelinesOutput struct { + _ struct{} `type:"structure"` + + // An array of descriptions for the specified pipelines. + PipelineDescriptionList []*PipelineDescription `locationName:"pipelineDescriptionList" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribePipelinesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePipelinesOutput) GoString() string { + return s.String() +} + +// Contains the parameters for EvaluateExpression. +type EvaluateExpressionInput struct { + _ struct{} `type:"structure"` + + // The expression to evaluate. + Expression *string `locationName:"expression" type:"string" required:"true"` + + // The ID of the object. + ObjectId *string `locationName:"objectId" min:"1" type:"string" required:"true"` + + // The ID of the pipeline. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s EvaluateExpressionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EvaluateExpressionInput) GoString() string { + return s.String() +} + +// Contains the output of EvaluateExpression. +type EvaluateExpressionOutput struct { + _ struct{} `type:"structure"` + + // The evaluated expression. + EvaluatedExpression *string `locationName:"evaluatedExpression" type:"string" required:"true"` +} + +// String returns the string representation +func (s EvaluateExpressionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EvaluateExpressionOutput) GoString() string { + return s.String() +} + +// A key-value pair that describes a property of a pipeline object. The value +// is specified as either a string value (StringValue) or a reference to another +// object (RefValue) but not as both. +type Field struct { + _ struct{} `type:"structure"` + + // The field identifier. + Key *string `locationName:"key" min:"1" type:"string" required:"true"` + + // The field value, expressed as the identifier of another object. + RefValue *string `locationName:"refValue" min:"1" type:"string"` + + // The field value, expressed as a String. + StringValue *string `locationName:"stringValue" type:"string"` +} + +// String returns the string representation +func (s Field) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Field) GoString() string { + return s.String() +} + +// Contains the parameters for GetPipelineDefinition. +type GetPipelineDefinitionInput struct { + _ struct{} `type:"structure"` + + // The ID of the pipeline. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string" required:"true"` + + // The version of the pipeline definition to retrieve. Set this parameter to + // latest (default) to use the last definition saved to the pipeline or active + // to use the last definition that was activated. + Version *string `locationName:"version" type:"string"` +} + +// String returns the string representation +func (s GetPipelineDefinitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPipelineDefinitionInput) GoString() string { + return s.String() +} + +// Contains the output of GetPipelineDefinition. +type GetPipelineDefinitionOutput struct { + _ struct{} `type:"structure"` + + // The parameter objects used in the pipeline definition. + ParameterObjects []*ParameterObject `locationName:"parameterObjects" type:"list"` + + // The parameter values used in the pipeline definition. + ParameterValues []*ParameterValue `locationName:"parameterValues" type:"list"` + + // The objects defined in the pipeline. + PipelineObjects []*PipelineObject `locationName:"pipelineObjects" type:"list"` +} + +// String returns the string representation +func (s GetPipelineDefinitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPipelineDefinitionOutput) GoString() string { + return s.String() +} + +// Identity information for the EC2 instance that is hosting the task runner. +// You can get this value by calling a metadata URI from the EC2 instance. For +// more information, see Instance Metadata (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html) +// in the Amazon Elastic Compute Cloud User Guide. Passing in this value proves +// that your task runner is running on an EC2 instance, and ensures the proper +// AWS Data Pipeline service charges are applied to your pipeline. +type InstanceIdentity struct { + _ struct{} `type:"structure"` + + // A description of an EC2 instance that is generated when the instance is launched + // and exposed to the instance via the instance metadata service in the form + // of a JSON representation of an object. + Document *string `locationName:"document" type:"string"` + + // A signature which can be used to verify the accuracy and authenticity of + // the information provided in the instance identity document. + Signature *string `locationName:"signature" type:"string"` +} + +// String returns the string representation +func (s InstanceIdentity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceIdentity) GoString() string { + return s.String() +} + +// Contains the parameters for ListPipelines. +type ListPipelinesInput struct { + _ struct{} `type:"structure"` + + // The starting point for the results to be returned. For the first call, this + // value should be empty. As long as there are more results, continue to call + // ListPipelines with the marker value from the previous call to retrieve the + // next set of results. + Marker *string `locationName:"marker" type:"string"` +} + +// String returns the string representation +func (s ListPipelinesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPipelinesInput) GoString() string { + return s.String() +} + +// Contains the output of ListPipelines. +type ListPipelinesOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether there are more results that can be obtained by a subsequent + // call. + HasMoreResults *bool `locationName:"hasMoreResults" type:"boolean"` + + // The starting point for the next page of results. To view the next page of + // results, call ListPipelinesOutput again with this marker value. If the value + // is null, there are no more results. + Marker *string `locationName:"marker" type:"string"` + + // The pipeline identifiers. If you require additional information about the + // pipelines, you can use these identifiers to call DescribePipelines and GetPipelineDefinition. + PipelineIdList []*PipelineIdName `locationName:"pipelineIdList" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListPipelinesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPipelinesOutput) GoString() string { + return s.String() +} + +// Contains a logical operation for comparing the value of a field with a specified +// value. +type Operator struct { + _ struct{} `type:"structure"` + + // The logical operation to be performed: equal (EQ), equal reference (REF_EQ), + // less than or equal (LE), greater than or equal (GE), or between (BETWEEN). + // Equal reference (REF_EQ) can be used only with reference fields. The other + // comparison types can be used only with String fields. The comparison types + // you can use apply only to certain object fields, as detailed below. + // + // The comparison operators EQ and REF_EQ act on the following fields: + // + // name @sphere parent @componentParent @instanceParent @status @scheduledStartTime + // @scheduledEndTime @actualStartTime @actualEndTime The comparison operators + // GE, LE, and BETWEEN act on the following fields: + // + // @scheduledStartTime @scheduledEndTime @actualStartTime @actualEndTime + // Note that fields beginning with the at sign (@) are read-only and set by + // the web service. When you name fields, you should choose names containing + // only alpha-numeric values, as symbols may be reserved by AWS Data Pipeline. + // User-defined fields that you add to a pipeline should prefix their name with + // the string "my". + Type *string `locationName:"type" type:"string" enum:"OperatorType"` + + // The value that the actual field value will be compared with. + Values []*string `locationName:"values" type:"list"` +} + +// String returns the string representation +func (s Operator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Operator) GoString() string { + return s.String() +} + +// The attributes allowed or specified with a parameter object. +type ParameterAttribute struct { + _ struct{} `type:"structure"` + + // The field identifier. + Key *string `locationName:"key" min:"1" type:"string" required:"true"` + + // The field value, expressed as a String. + StringValue *string `locationName:"stringValue" type:"string" required:"true"` +} + +// String returns the string representation +func (s ParameterAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterAttribute) GoString() string { + return s.String() +} + +// Contains information about a parameter object. +type ParameterObject struct { + _ struct{} `type:"structure"` + + // The attributes of the parameter object. + Attributes []*ParameterAttribute `locationName:"attributes" type:"list" required:"true"` + + // The ID of the parameter object. + Id *string `locationName:"id" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ParameterObject) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterObject) GoString() string { + return s.String() +} + +// A value or list of parameter values. +type ParameterValue struct { + _ struct{} `type:"structure"` + + // The ID of the parameter value. + Id *string `locationName:"id" min:"1" type:"string" required:"true"` + + // The field value, expressed as a String. + StringValue *string `locationName:"stringValue" type:"string" required:"true"` +} + +// String returns the string representation +func (s ParameterValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterValue) GoString() string { + return s.String() +} + +// Contains pipeline metadata. +type PipelineDescription struct { + _ struct{} `type:"structure"` + + // Description of the pipeline. + Description *string `locationName:"description" type:"string"` + + // A list of read-only fields that contain metadata about the pipeline: @userId, + // @accountId, and @pipelineState. + Fields []*Field `locationName:"fields" type:"list" required:"true"` + + // The name of the pipeline. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The pipeline identifier that was assigned by AWS Data Pipeline. This is a + // string of the form df-297EG78HU43EEXAMPLE. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string" required:"true"` + + // A list of tags to associated with a pipeline. Tags let you control access + // to pipelines. For more information, see Controlling User Access to Pipelines + // (http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-control-access.html) + // in the AWS Data Pipeline Developer Guide. + Tags []*Tag `locationName:"tags" type:"list"` +} + +// String returns the string representation +func (s PipelineDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PipelineDescription) GoString() string { + return s.String() +} + +// Contains the name and identifier of a pipeline. +type PipelineIdName struct { + _ struct{} `type:"structure"` + + // The ID of the pipeline that was assigned by AWS Data Pipeline. This is a + // string of the form df-297EG78HU43EEXAMPLE. + Id *string `locationName:"id" min:"1" type:"string"` + + // The name of the pipeline. + Name *string `locationName:"name" min:"1" type:"string"` +} + +// String returns the string representation +func (s PipelineIdName) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PipelineIdName) GoString() string { + return s.String() +} + +// Contains information about a pipeline object. This can be a logical, physical, +// or physical attempt pipeline object. The complete set of components of a +// pipeline defines the pipeline. +type PipelineObject struct { + _ struct{} `type:"structure"` + + // Key-value pairs that define the properties of the object. + Fields []*Field `locationName:"fields" type:"list" required:"true"` + + // The ID of the object. + Id *string `locationName:"id" min:"1" type:"string" required:"true"` + + // The name of the object. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PipelineObject) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PipelineObject) GoString() string { + return s.String() +} + +// Contains the parameters for PollForTask. +type PollForTaskInput struct { + _ struct{} `type:"structure"` + + // The public DNS name of the calling task runner. + Hostname *string `locationName:"hostname" min:"1" type:"string"` + + // Identity information for the EC2 instance that is hosting the task runner. + // You can get this value from the instance using http://169.254.169.254/latest/meta-data/instance-id. + // For more information, see Instance Metadata (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html) + // in the Amazon Elastic Compute Cloud User Guide. Passing in this value proves + // that your task runner is running on an EC2 instance, and ensures the proper + // AWS Data Pipeline service charges are applied to your pipeline. + InstanceIdentity *InstanceIdentity `locationName:"instanceIdentity" type:"structure"` + + // The type of task the task runner is configured to accept and process. The + // worker group is set as a field on objects in the pipeline when they are created. + // You can only specify a single value for workerGroup in the call to PollForTask. + // There are no wildcard values permitted in workerGroup; the string must be + // an exact, case-sensitive, match. + WorkerGroup *string `locationName:"workerGroup" type:"string" required:"true"` +} + +// String returns the string representation +func (s PollForTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PollForTaskInput) GoString() string { + return s.String() +} + +// Contains the output of PollForTask. +type PollForTaskOutput struct { + _ struct{} `type:"structure"` + + // The information needed to complete the task that is being assigned to the + // task runner. One of the fields returned in this object is taskId, which contains + // an identifier for the task being assigned. The calling task runner uses taskId + // in subsequent calls to ReportTaskProgress and SetTaskStatus. + TaskObject *TaskObject `locationName:"taskObject" type:"structure"` +} + +// String returns the string representation +func (s PollForTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PollForTaskOutput) GoString() string { + return s.String() +} + +// Contains the parameters for PutPipelineDefinition. +type PutPipelineDefinitionInput struct { + _ struct{} `type:"structure"` + + // The parameter objects used with the pipeline. + ParameterObjects []*ParameterObject `locationName:"parameterObjects" type:"list"` + + // The parameter values used with the pipeline. + ParameterValues []*ParameterValue `locationName:"parameterValues" type:"list"` + + // The ID of the pipeline. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string" required:"true"` + + // The objects that define the pipeline. These objects overwrite the existing + // pipeline definition. + PipelineObjects []*PipelineObject `locationName:"pipelineObjects" type:"list" required:"true"` +} + +// String returns the string representation +func (s PutPipelineDefinitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutPipelineDefinitionInput) GoString() string { + return s.String() +} + +// Contains the output of PutPipelineDefinition. +type PutPipelineDefinitionOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether there were validation errors, and the pipeline definition + // is stored but cannot be activated until you correct the pipeline and call + // PutPipelineDefinition to commit the corrected pipeline. + Errored *bool `locationName:"errored" type:"boolean" required:"true"` + + // The validation errors that are associated with the objects defined in pipelineObjects. + ValidationErrors []*ValidationError `locationName:"validationErrors" type:"list"` + + // The validation warnings that are associated with the objects defined in pipelineObjects. + ValidationWarnings []*ValidationWarning `locationName:"validationWarnings" type:"list"` +} + +// String returns the string representation +func (s PutPipelineDefinitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutPipelineDefinitionOutput) GoString() string { + return s.String() +} + +// Defines the query to run against an object. +type Query struct { + _ struct{} `type:"structure"` + + // List of selectors that define the query. An object must satisfy all of the + // selectors to match the query. + Selectors []*Selector `locationName:"selectors" type:"list"` +} + +// String returns the string representation +func (s Query) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Query) GoString() string { + return s.String() +} + +// Contains the parameters for QueryObjects. +type QueryObjectsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of object names that QueryObjects will return in a single + // call. The default value is 100. + Limit *int64 `locationName:"limit" type:"integer"` + + // The starting point for the results to be returned. For the first call, this + // value should be empty. As long as there are more results, continue to call + // QueryObjects with the marker value from the previous call to retrieve the + // next set of results. + Marker *string `locationName:"marker" type:"string"` + + // The ID of the pipeline. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string" required:"true"` + + // The query that defines the objects to be returned. The Query object can contain + // a maximum of ten selectors. The conditions in the query are limited to top-level + // String fields in the object. These filters can be applied to components, + // instances, and attempts. + Query *Query `locationName:"query" type:"structure"` + + // Indicates whether the query applies to components or instances. The possible + // values are: COMPONENT, INSTANCE, and ATTEMPT. + Sphere *string `locationName:"sphere" type:"string" required:"true"` +} + +// String returns the string representation +func (s QueryObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueryObjectsInput) GoString() string { + return s.String() +} + +// Contains the output of QueryObjects. +type QueryObjectsOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether there are more results that can be obtained by a subsequent + // call. + HasMoreResults *bool `locationName:"hasMoreResults" type:"boolean"` + + // The identifiers that match the query selectors. + Ids []*string `locationName:"ids" type:"list"` + + // The starting point for the next page of results. To view the next page of + // results, call QueryObjects again with this marker value. If the value is + // null, there are no more results. + Marker *string `locationName:"marker" type:"string"` +} + +// String returns the string representation +func (s QueryObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueryObjectsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for RemoveTags. +type RemoveTagsInput struct { + _ struct{} `type:"structure"` + + // The ID of the pipeline. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string" required:"true"` + + // The keys of the tags to remove. + TagKeys []*string `locationName:"tagKeys" type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsInput) GoString() string { + return s.String() +} + +// Contains the output of RemoveTags. +type RemoveTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ReportTaskProgress. +type ReportTaskProgressInput struct { + _ struct{} `type:"structure"` + + // Key-value pairs that define the properties of the ReportTaskProgressInput + // object. + Fields []*Field `locationName:"fields" type:"list"` + + // The ID of the task assigned to the task runner. This value is provided in + // the response for PollForTask. + TaskId *string `locationName:"taskId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ReportTaskProgressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReportTaskProgressInput) GoString() string { + return s.String() +} + +// Contains the output of ReportTaskProgress. +type ReportTaskProgressOutput struct { + _ struct{} `type:"structure"` + + // If true, the calling task runner should cancel processing of the task. The + // task runner does not need to call SetTaskStatus for canceled tasks. + Canceled *bool `locationName:"canceled" type:"boolean" required:"true"` +} + +// String returns the string representation +func (s ReportTaskProgressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReportTaskProgressOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ReportTaskRunnerHeartbeat. +type ReportTaskRunnerHeartbeatInput struct { + _ struct{} `type:"structure"` + + // The public DNS name of the task runner. + Hostname *string `locationName:"hostname" min:"1" type:"string"` + + // The ID of the task runner. This value should be unique across your AWS account. + // In the case of AWS Data Pipeline Task Runner launched on a resource managed + // by AWS Data Pipeline, the web service provides a unique identifier when it + // launches the application. If you have written a custom task runner, you should + // assign a unique identifier for the task runner. + TaskrunnerId *string `locationName:"taskrunnerId" min:"1" type:"string" required:"true"` + + // The type of task the task runner is configured to accept and process. The + // worker group is set as a field on objects in the pipeline when they are created. + // You can only specify a single value for workerGroup. There are no wildcard + // values permitted in workerGroup; the string must be an exact, case-sensitive, + // match. + WorkerGroup *string `locationName:"workerGroup" type:"string"` +} + +// String returns the string representation +func (s ReportTaskRunnerHeartbeatInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReportTaskRunnerHeartbeatInput) GoString() string { + return s.String() +} + +// Contains the output of ReportTaskRunnerHeartbeat. +type ReportTaskRunnerHeartbeatOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the calling task runner should terminate. + Terminate *bool `locationName:"terminate" type:"boolean" required:"true"` +} + +// String returns the string representation +func (s ReportTaskRunnerHeartbeatOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReportTaskRunnerHeartbeatOutput) GoString() string { + return s.String() +} + +// A comparision that is used to determine whether a query should return this +// object. +type Selector struct { + _ struct{} `type:"structure"` + + // The name of the field that the operator will be applied to. The field name + // is the "key" portion of the field definition in the pipeline definition syntax + // that is used by the AWS Data Pipeline API. If the field is not set on the + // object, the condition fails. + FieldName *string `locationName:"fieldName" type:"string"` + + // Contains a logical operation for comparing the value of a field with a specified + // value. + Operator *Operator `locationName:"operator" type:"structure"` +} + +// String returns the string representation +func (s Selector) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Selector) GoString() string { + return s.String() +} + +// Contains the parameters for SetStatus. +type SetStatusInput struct { + _ struct{} `type:"structure"` + + // The IDs of the objects. The corresponding objects can be either physical + // or components, but not a mix of both types. + ObjectIds []*string `locationName:"objectIds" type:"list" required:"true"` + + // The ID of the pipeline that contains the objects. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string" required:"true"` + + // The status to be set on all the objects specified in objectIds. For components, + // use PAUSE or RESUME. For instances, use TRY_CANCEL, RERUN, or MARK_FINISHED. + Status *string `locationName:"status" type:"string" required:"true"` +} + +// String returns the string representation +func (s SetStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetStatusInput) GoString() string { + return s.String() +} + +type SetStatusOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetStatusOutput) GoString() string { + return s.String() +} + +// Contains the parameters for SetTaskStatus. +type SetTaskStatusInput struct { + _ struct{} `type:"structure"` + + // If an error occurred during the task, this value specifies the error code. + // This value is set on the physical attempt object. It is used to display error + // information to the user. It should not start with string "Service_" which + // is reserved by the system. + ErrorId *string `locationName:"errorId" type:"string"` + + // If an error occurred during the task, this value specifies a text description + // of the error. This value is set on the physical attempt object. It is used + // to display error information to the user. The web service does not parse + // this value. + ErrorMessage *string `locationName:"errorMessage" type:"string"` + + // If an error occurred during the task, this value specifies the stack trace + // associated with the error. This value is set on the physical attempt object. + // It is used to display error information to the user. The web service does + // not parse this value. + ErrorStackTrace *string `locationName:"errorStackTrace" type:"string"` + + // The ID of the task assigned to the task runner. This value is provided in + // the response for PollForTask. + TaskId *string `locationName:"taskId" min:"1" type:"string" required:"true"` + + // If FINISHED, the task successfully completed. If FAILED, the task ended unsuccessfully. + // Preconditions use false. + TaskStatus *string `locationName:"taskStatus" type:"string" required:"true" enum:"TaskStatus"` +} + +// String returns the string representation +func (s SetTaskStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetTaskStatusInput) GoString() string { + return s.String() +} + +// Contains the output of SetTaskStatus. +type SetTaskStatusOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetTaskStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetTaskStatusOutput) GoString() string { + return s.String() +} + +// Tags are key/value pairs defined by a user and associated with a pipeline +// to control access. AWS Data Pipeline allows you to associate ten tags per +// pipeline. For more information, see Controlling User Access to Pipelines +// (http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-control-access.html) +// in the AWS Data Pipeline Developer Guide. +type Tag struct { + _ struct{} `type:"structure"` + + // The key name of a tag defined by a user. For more information, see Controlling + // User Access to Pipelines (http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-control-access.html) + // in the AWS Data Pipeline Developer Guide. + Key *string `locationName:"key" min:"1" type:"string" required:"true"` + + // The optional value portion of a tag defined by a user. For more information, + // see Controlling User Access to Pipelines (http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-control-access.html) + // in the AWS Data Pipeline Developer Guide. + Value *string `locationName:"value" type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Contains information about a pipeline task that is assigned to a task runner. +type TaskObject struct { + _ struct{} `type:"structure"` + + // The ID of the pipeline task attempt object. AWS Data Pipeline uses this value + // to track how many times a task is attempted. + AttemptId *string `locationName:"attemptId" min:"1" type:"string"` + + // Connection information for the location where the task runner will publish + // the output of the task. + Objects map[string]*PipelineObject `locationName:"objects" type:"map"` + + // The ID of the pipeline that provided the task. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string"` + + // An internal identifier for the task. This ID is passed to the SetTaskStatus + // and ReportTaskProgress actions. + TaskId *string `locationName:"taskId" min:"1" type:"string"` +} + +// String returns the string representation +func (s TaskObject) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TaskObject) GoString() string { + return s.String() +} + +// Contains the parameters for ValidatePipelineDefinition. +type ValidatePipelineDefinitionInput struct { + _ struct{} `type:"structure"` + + // The parameter objects used with the pipeline. + ParameterObjects []*ParameterObject `locationName:"parameterObjects" type:"list"` + + // The parameter values used with the pipeline. + ParameterValues []*ParameterValue `locationName:"parameterValues" type:"list"` + + // The ID of the pipeline. + PipelineId *string `locationName:"pipelineId" min:"1" type:"string" required:"true"` + + // The objects that define the pipeline changes to validate against the pipeline. + PipelineObjects []*PipelineObject `locationName:"pipelineObjects" type:"list" required:"true"` +} + +// String returns the string representation +func (s ValidatePipelineDefinitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidatePipelineDefinitionInput) GoString() string { + return s.String() +} + +// Contains the output of ValidatePipelineDefinition. +type ValidatePipelineDefinitionOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether there were validation errors. + Errored *bool `locationName:"errored" type:"boolean" required:"true"` + + // Any validation errors that were found. + ValidationErrors []*ValidationError `locationName:"validationErrors" type:"list"` + + // Any validation warnings that were found. + ValidationWarnings []*ValidationWarning `locationName:"validationWarnings" type:"list"` +} + +// String returns the string representation +func (s ValidatePipelineDefinitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidatePipelineDefinitionOutput) GoString() string { + return s.String() +} + +// Defines a validation error. Validation errors prevent pipeline activation. +// The set of validation errors that can be returned are defined by AWS Data +// Pipeline. +type ValidationError struct { + _ struct{} `type:"structure"` + + // A description of the validation error. + Errors []*string `locationName:"errors" type:"list"` + + // The identifier of the object that contains the validation error. + Id *string `locationName:"id" min:"1" type:"string"` +} + +// String returns the string representation +func (s ValidationError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidationError) GoString() string { + return s.String() +} + +// Defines a validation warning. Validation warnings do not prevent pipeline +// activation. The set of validation warnings that can be returned are defined +// by AWS Data Pipeline. +type ValidationWarning struct { + _ struct{} `type:"structure"` + + // The identifier of the object that contains the validation warning. + Id *string `locationName:"id" min:"1" type:"string"` + + // A description of the validation warning. + Warnings []*string `locationName:"warnings" type:"list"` +} + +// String returns the string representation +func (s ValidationWarning) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidationWarning) GoString() string { + return s.String() +} + +const ( + // @enum OperatorType + OperatorTypeEq = "EQ" + // @enum OperatorType + OperatorTypeRefEq = "REF_EQ" + // @enum OperatorType + OperatorTypeLe = "LE" + // @enum OperatorType + OperatorTypeGe = "GE" + // @enum OperatorType + OperatorTypeBetween = "BETWEEN" +) + +const ( + // @enum TaskStatus + TaskStatusFinished = "FINISHED" + // @enum TaskStatus + TaskStatusFailed = "FAILED" + // @enum TaskStatus + TaskStatusFalse = "FALSE" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/datapipeline/datapipelineiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/datapipeline/datapipelineiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/datapipeline/datapipelineiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/datapipeline/datapipelineiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,96 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package datapipelineiface provides an interface for the AWS Data Pipeline. +package datapipelineiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/datapipeline" +) + +// DataPipelineAPI is the interface type for datapipeline.DataPipeline. +type DataPipelineAPI interface { + ActivatePipelineRequest(*datapipeline.ActivatePipelineInput) (*request.Request, *datapipeline.ActivatePipelineOutput) + + ActivatePipeline(*datapipeline.ActivatePipelineInput) (*datapipeline.ActivatePipelineOutput, error) + + AddTagsRequest(*datapipeline.AddTagsInput) (*request.Request, *datapipeline.AddTagsOutput) + + AddTags(*datapipeline.AddTagsInput) (*datapipeline.AddTagsOutput, error) + + CreatePipelineRequest(*datapipeline.CreatePipelineInput) (*request.Request, *datapipeline.CreatePipelineOutput) + + CreatePipeline(*datapipeline.CreatePipelineInput) (*datapipeline.CreatePipelineOutput, error) + + DeactivatePipelineRequest(*datapipeline.DeactivatePipelineInput) (*request.Request, *datapipeline.DeactivatePipelineOutput) + + DeactivatePipeline(*datapipeline.DeactivatePipelineInput) (*datapipeline.DeactivatePipelineOutput, error) + + DeletePipelineRequest(*datapipeline.DeletePipelineInput) (*request.Request, *datapipeline.DeletePipelineOutput) + + DeletePipeline(*datapipeline.DeletePipelineInput) (*datapipeline.DeletePipelineOutput, error) + + DescribeObjectsRequest(*datapipeline.DescribeObjectsInput) (*request.Request, *datapipeline.DescribeObjectsOutput) + + DescribeObjects(*datapipeline.DescribeObjectsInput) (*datapipeline.DescribeObjectsOutput, error) + + DescribeObjectsPages(*datapipeline.DescribeObjectsInput, func(*datapipeline.DescribeObjectsOutput, bool) bool) error + + DescribePipelinesRequest(*datapipeline.DescribePipelinesInput) (*request.Request, *datapipeline.DescribePipelinesOutput) + + DescribePipelines(*datapipeline.DescribePipelinesInput) (*datapipeline.DescribePipelinesOutput, error) + + EvaluateExpressionRequest(*datapipeline.EvaluateExpressionInput) (*request.Request, *datapipeline.EvaluateExpressionOutput) + + EvaluateExpression(*datapipeline.EvaluateExpressionInput) (*datapipeline.EvaluateExpressionOutput, error) + + GetPipelineDefinitionRequest(*datapipeline.GetPipelineDefinitionInput) (*request.Request, *datapipeline.GetPipelineDefinitionOutput) + + GetPipelineDefinition(*datapipeline.GetPipelineDefinitionInput) (*datapipeline.GetPipelineDefinitionOutput, error) + + ListPipelinesRequest(*datapipeline.ListPipelinesInput) (*request.Request, *datapipeline.ListPipelinesOutput) + + ListPipelines(*datapipeline.ListPipelinesInput) (*datapipeline.ListPipelinesOutput, error) + + ListPipelinesPages(*datapipeline.ListPipelinesInput, func(*datapipeline.ListPipelinesOutput, bool) bool) error + + PollForTaskRequest(*datapipeline.PollForTaskInput) (*request.Request, *datapipeline.PollForTaskOutput) + + PollForTask(*datapipeline.PollForTaskInput) (*datapipeline.PollForTaskOutput, error) + + PutPipelineDefinitionRequest(*datapipeline.PutPipelineDefinitionInput) (*request.Request, *datapipeline.PutPipelineDefinitionOutput) + + PutPipelineDefinition(*datapipeline.PutPipelineDefinitionInput) (*datapipeline.PutPipelineDefinitionOutput, error) + + QueryObjectsRequest(*datapipeline.QueryObjectsInput) (*request.Request, *datapipeline.QueryObjectsOutput) + + QueryObjects(*datapipeline.QueryObjectsInput) (*datapipeline.QueryObjectsOutput, error) + + QueryObjectsPages(*datapipeline.QueryObjectsInput, func(*datapipeline.QueryObjectsOutput, bool) bool) error + + RemoveTagsRequest(*datapipeline.RemoveTagsInput) (*request.Request, *datapipeline.RemoveTagsOutput) + + RemoveTags(*datapipeline.RemoveTagsInput) (*datapipeline.RemoveTagsOutput, error) + + ReportTaskProgressRequest(*datapipeline.ReportTaskProgressInput) (*request.Request, *datapipeline.ReportTaskProgressOutput) + + ReportTaskProgress(*datapipeline.ReportTaskProgressInput) (*datapipeline.ReportTaskProgressOutput, error) + + ReportTaskRunnerHeartbeatRequest(*datapipeline.ReportTaskRunnerHeartbeatInput) (*request.Request, *datapipeline.ReportTaskRunnerHeartbeatOutput) + + ReportTaskRunnerHeartbeat(*datapipeline.ReportTaskRunnerHeartbeatInput) (*datapipeline.ReportTaskRunnerHeartbeatOutput, error) + + SetStatusRequest(*datapipeline.SetStatusInput) (*request.Request, *datapipeline.SetStatusOutput) + + SetStatus(*datapipeline.SetStatusInput) (*datapipeline.SetStatusOutput, error) + + SetTaskStatusRequest(*datapipeline.SetTaskStatusInput) (*request.Request, *datapipeline.SetTaskStatusOutput) + + SetTaskStatus(*datapipeline.SetTaskStatusInput) (*datapipeline.SetTaskStatusOutput, error) + + ValidatePipelineDefinitionRequest(*datapipeline.ValidatePipelineDefinitionInput) (*request.Request, *datapipeline.ValidatePipelineDefinitionOutput) + + ValidatePipelineDefinition(*datapipeline.ValidatePipelineDefinitionInput) (*datapipeline.ValidatePipelineDefinitionOutput, error) +} + +var _ DataPipelineAPI = (*datapipeline.DataPipeline)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/datapipeline/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/datapipeline/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/datapipeline/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/datapipeline/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,530 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package datapipeline_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/datapipeline" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleDataPipeline_ActivatePipeline() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.ActivatePipelineInput{ + PipelineId: aws.String("id"), // Required + ParameterValues: []*datapipeline.ParameterValue{ + { // Required + Id: aws.String("fieldNameString"), // Required + StringValue: aws.String("fieldStringValue"), // Required + }, + // More values... + }, + StartTimestamp: aws.Time(time.Now()), + } + resp, err := svc.ActivatePipeline(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_AddTags() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.AddTagsInput{ + PipelineId: aws.String("id"), // Required + Tags: []*datapipeline.Tag{ // Required + { // Required + Key: aws.String("tagKey"), // Required + Value: aws.String("tagValue"), // Required + }, + // More values... + }, + } + resp, err := svc.AddTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_CreatePipeline() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.CreatePipelineInput{ + Name: aws.String("id"), // Required + UniqueId: aws.String("id"), // Required + Description: aws.String("string"), + Tags: []*datapipeline.Tag{ + { // Required + Key: aws.String("tagKey"), // Required + Value: aws.String("tagValue"), // Required + }, + // More values... + }, + } + resp, err := svc.CreatePipeline(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_DeactivatePipeline() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.DeactivatePipelineInput{ + PipelineId: aws.String("id"), // Required + CancelActive: aws.Bool(true), + } + resp, err := svc.DeactivatePipeline(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_DeletePipeline() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.DeletePipelineInput{ + PipelineId: aws.String("id"), // Required + } + resp, err := svc.DeletePipeline(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_DescribeObjects() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.DescribeObjectsInput{ + ObjectIds: []*string{ // Required + aws.String("id"), // Required + // More values... + }, + PipelineId: aws.String("id"), // Required + EvaluateExpressions: aws.Bool(true), + Marker: aws.String("string"), + } + resp, err := svc.DescribeObjects(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_DescribePipelines() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.DescribePipelinesInput{ + PipelineIds: []*string{ // Required + aws.String("id"), // Required + // More values... + }, + } + resp, err := svc.DescribePipelines(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_EvaluateExpression() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.EvaluateExpressionInput{ + Expression: aws.String("longString"), // Required + ObjectId: aws.String("id"), // Required + PipelineId: aws.String("id"), // Required + } + resp, err := svc.EvaluateExpression(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_GetPipelineDefinition() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.GetPipelineDefinitionInput{ + PipelineId: aws.String("id"), // Required + Version: aws.String("string"), + } + resp, err := svc.GetPipelineDefinition(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_ListPipelines() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.ListPipelinesInput{ + Marker: aws.String("string"), + } + resp, err := svc.ListPipelines(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_PollForTask() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.PollForTaskInput{ + WorkerGroup: aws.String("string"), // Required + Hostname: aws.String("id"), + InstanceIdentity: &datapipeline.InstanceIdentity{ + Document: aws.String("string"), + Signature: aws.String("string"), + }, + } + resp, err := svc.PollForTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_PutPipelineDefinition() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.PutPipelineDefinitionInput{ + PipelineId: aws.String("id"), // Required + PipelineObjects: []*datapipeline.PipelineObject{ // Required + { // Required + Fields: []*datapipeline.Field{ // Required + { // Required + Key: aws.String("fieldNameString"), // Required + RefValue: aws.String("fieldNameString"), + StringValue: aws.String("fieldStringValue"), + }, + // More values... + }, + Id: aws.String("id"), // Required + Name: aws.String("id"), // Required + }, + // More values... + }, + ParameterObjects: []*datapipeline.ParameterObject{ + { // Required + Attributes: []*datapipeline.ParameterAttribute{ // Required + { // Required + Key: aws.String("attributeNameString"), // Required + StringValue: aws.String("attributeValueString"), // Required + }, + // More values... + }, + Id: aws.String("fieldNameString"), // Required + }, + // More values... + }, + ParameterValues: []*datapipeline.ParameterValue{ + { // Required + Id: aws.String("fieldNameString"), // Required + StringValue: aws.String("fieldStringValue"), // Required + }, + // More values... + }, + } + resp, err := svc.PutPipelineDefinition(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_QueryObjects() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.QueryObjectsInput{ + PipelineId: aws.String("id"), // Required + Sphere: aws.String("string"), // Required + Limit: aws.Int64(1), + Marker: aws.String("string"), + Query: &datapipeline.Query{ + Selectors: []*datapipeline.Selector{ + { // Required + FieldName: aws.String("string"), + Operator: &datapipeline.Operator{ + Type: aws.String("OperatorType"), + Values: []*string{ + aws.String("string"), // Required + // More values... + }, + }, + }, + // More values... + }, + }, + } + resp, err := svc.QueryObjects(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_RemoveTags() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.RemoveTagsInput{ + PipelineId: aws.String("id"), // Required + TagKeys: []*string{ // Required + aws.String("string"), // Required + // More values... + }, + } + resp, err := svc.RemoveTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_ReportTaskProgress() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.ReportTaskProgressInput{ + TaskId: aws.String("taskId"), // Required + Fields: []*datapipeline.Field{ + { // Required + Key: aws.String("fieldNameString"), // Required + RefValue: aws.String("fieldNameString"), + StringValue: aws.String("fieldStringValue"), + }, + // More values... + }, + } + resp, err := svc.ReportTaskProgress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_ReportTaskRunnerHeartbeat() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.ReportTaskRunnerHeartbeatInput{ + TaskrunnerId: aws.String("id"), // Required + Hostname: aws.String("id"), + WorkerGroup: aws.String("string"), + } + resp, err := svc.ReportTaskRunnerHeartbeat(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_SetStatus() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.SetStatusInput{ + ObjectIds: []*string{ // Required + aws.String("id"), // Required + // More values... + }, + PipelineId: aws.String("id"), // Required + Status: aws.String("string"), // Required + } + resp, err := svc.SetStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_SetTaskStatus() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.SetTaskStatusInput{ + TaskId: aws.String("taskId"), // Required + TaskStatus: aws.String("TaskStatus"), // Required + ErrorId: aws.String("string"), + ErrorMessage: aws.String("errorMessage"), + ErrorStackTrace: aws.String("string"), + } + resp, err := svc.SetTaskStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDataPipeline_ValidatePipelineDefinition() { + svc := datapipeline.New(session.New()) + + params := &datapipeline.ValidatePipelineDefinitionInput{ + PipelineId: aws.String("id"), // Required + PipelineObjects: []*datapipeline.PipelineObject{ // Required + { // Required + Fields: []*datapipeline.Field{ // Required + { // Required + Key: aws.String("fieldNameString"), // Required + RefValue: aws.String("fieldNameString"), + StringValue: aws.String("fieldStringValue"), + }, + // More values... + }, + Id: aws.String("id"), // Required + Name: aws.String("id"), // Required + }, + // More values... + }, + ParameterObjects: []*datapipeline.ParameterObject{ + { // Required + Attributes: []*datapipeline.ParameterAttribute{ // Required + { // Required + Key: aws.String("attributeNameString"), // Required + StringValue: aws.String("attributeValueString"), // Required + }, + // More values... + }, + Id: aws.String("fieldNameString"), // Required + }, + // More values... + }, + ParameterValues: []*datapipeline.ParameterValue{ + { // Required + Id: aws.String("fieldNameString"), // Required + StringValue: aws.String("fieldStringValue"), // Required + }, + // More values... + }, + } + resp, err := svc.ValidatePipelineDefinition(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/datapipeline/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/datapipeline/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/datapipeline/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/datapipeline/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,109 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package datapipeline + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// AWS Data Pipeline configures and manages a data-driven workflow called a +// pipeline. AWS Data Pipeline handles the details of scheduling and ensuring +// that data dependencies are met so that your application can focus on processing +// the data. +// +// AWS Data Pipeline provides a JAR implementation of a task runner called +// AWS Data Pipeline Task Runner. AWS Data Pipeline Task Runner provides logic +// for common data management scenarios, such as performing database queries +// and running data analysis using Amazon Elastic MapReduce (Amazon EMR). You +// can use AWS Data Pipeline Task Runner as your task runner, or you can write +// your own task runner to provide custom data management. +// +// AWS Data Pipeline implements two main sets of functionality. Use the first +// set to create a pipeline and define data sources, schedules, dependencies, +// and the transforms to be performed on the data. Use the second set in your +// task runner application to receive the next task ready for processing. The +// logic for performing the task, such as querying the data, running data analysis, +// or converting the data from one format to another, is contained within the +// task runner. The task runner performs the task assigned to it by the web +// service, reporting progress to the web service as it does so. When the task +// is done, the task runner reports the final success or failure of the task +// to the web service. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type DataPipeline struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "datapipeline" + +// New creates a new instance of the DataPipeline client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a DataPipeline client from just a session. +// svc := datapipeline.New(mySession) +// +// // Create a DataPipeline client with additional configuration +// svc := datapipeline.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *DataPipeline { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *DataPipeline { + svc := &DataPipeline{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2012-10-29", + JSONVersion: "1.1", + TargetPrefix: "DataPipeline", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a DataPipeline operation and runs any +// custom request initialization. +func (c *DataPipeline) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/devicefarm/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/devicefarm/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/devicefarm/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/devicefarm/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,3880 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package devicefarm provides a client for AWS Device Farm. +package devicefarm + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opCreateDevicePool = "CreateDevicePool" + +// CreateDevicePoolRequest generates a request for the CreateDevicePool operation. +func (c *DeviceFarm) CreateDevicePoolRequest(input *CreateDevicePoolInput) (req *request.Request, output *CreateDevicePoolOutput) { + op := &request.Operation{ + Name: opCreateDevicePool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDevicePoolInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDevicePoolOutput{} + req.Data = output + return +} + +// Creates a device pool. +func (c *DeviceFarm) CreateDevicePool(input *CreateDevicePoolInput) (*CreateDevicePoolOutput, error) { + req, out := c.CreateDevicePoolRequest(input) + err := req.Send() + return out, err +} + +const opCreateProject = "CreateProject" + +// CreateProjectRequest generates a request for the CreateProject operation. +func (c *DeviceFarm) CreateProjectRequest(input *CreateProjectInput) (req *request.Request, output *CreateProjectOutput) { + op := &request.Operation{ + Name: opCreateProject, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateProjectInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateProjectOutput{} + req.Data = output + return +} + +// Creates a new project. +func (c *DeviceFarm) CreateProject(input *CreateProjectInput) (*CreateProjectOutput, error) { + req, out := c.CreateProjectRequest(input) + err := req.Send() + return out, err +} + +const opCreateUpload = "CreateUpload" + +// CreateUploadRequest generates a request for the CreateUpload operation. +func (c *DeviceFarm) CreateUploadRequest(input *CreateUploadInput) (req *request.Request, output *CreateUploadOutput) { + op := &request.Operation{ + Name: opCreateUpload, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateUploadOutput{} + req.Data = output + return +} + +// Uploads an app or test scripts. +func (c *DeviceFarm) CreateUpload(input *CreateUploadInput) (*CreateUploadOutput, error) { + req, out := c.CreateUploadRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDevicePool = "DeleteDevicePool" + +// DeleteDevicePoolRequest generates a request for the DeleteDevicePool operation. +func (c *DeviceFarm) DeleteDevicePoolRequest(input *DeleteDevicePoolInput) (req *request.Request, output *DeleteDevicePoolOutput) { + op := &request.Operation{ + Name: opDeleteDevicePool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDevicePoolInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDevicePoolOutput{} + req.Data = output + return +} + +// Deletes a device pool given the pool ARN. Does not allow deletion of curated +// pools owned by the system. +func (c *DeviceFarm) DeleteDevicePool(input *DeleteDevicePoolInput) (*DeleteDevicePoolOutput, error) { + req, out := c.DeleteDevicePoolRequest(input) + err := req.Send() + return out, err +} + +const opDeleteProject = "DeleteProject" + +// DeleteProjectRequest generates a request for the DeleteProject operation. +func (c *DeviceFarm) DeleteProjectRequest(input *DeleteProjectInput) (req *request.Request, output *DeleteProjectOutput) { + op := &request.Operation{ + Name: opDeleteProject, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteProjectInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteProjectOutput{} + req.Data = output + return +} + +// Deletes an AWS Device Farm project, given the project ARN. +// +// Note Deleting this resource does not stop an in-progress run. +func (c *DeviceFarm) DeleteProject(input *DeleteProjectInput) (*DeleteProjectOutput, error) { + req, out := c.DeleteProjectRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRun = "DeleteRun" + +// DeleteRunRequest generates a request for the DeleteRun operation. +func (c *DeviceFarm) DeleteRunRequest(input *DeleteRunInput) (req *request.Request, output *DeleteRunOutput) { + op := &request.Operation{ + Name: opDeleteRun, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRunInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteRunOutput{} + req.Data = output + return +} + +// Deletes the run, given the run ARN. +// +// Note Deleting this resource does not stop an in-progress run. +func (c *DeviceFarm) DeleteRun(input *DeleteRunInput) (*DeleteRunOutput, error) { + req, out := c.DeleteRunRequest(input) + err := req.Send() + return out, err +} + +const opDeleteUpload = "DeleteUpload" + +// DeleteUploadRequest generates a request for the DeleteUpload operation. +func (c *DeviceFarm) DeleteUploadRequest(input *DeleteUploadInput) (req *request.Request, output *DeleteUploadOutput) { + op := &request.Operation{ + Name: opDeleteUpload, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteUploadOutput{} + req.Data = output + return +} + +// Deletes an upload given the upload ARN. +func (c *DeviceFarm) DeleteUpload(input *DeleteUploadInput) (*DeleteUploadOutput, error) { + req, out := c.DeleteUploadRequest(input) + err := req.Send() + return out, err +} + +const opGetAccountSettings = "GetAccountSettings" + +// GetAccountSettingsRequest generates a request for the GetAccountSettings operation. +func (c *DeviceFarm) GetAccountSettingsRequest(input *GetAccountSettingsInput) (req *request.Request, output *GetAccountSettingsOutput) { + op := &request.Operation{ + Name: opGetAccountSettings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccountSettingsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetAccountSettingsOutput{} + req.Data = output + return +} + +// Returns the number of unmetered iOS and/or unmetered Android devices that +// have been purchased by the account. +func (c *DeviceFarm) GetAccountSettings(input *GetAccountSettingsInput) (*GetAccountSettingsOutput, error) { + req, out := c.GetAccountSettingsRequest(input) + err := req.Send() + return out, err +} + +const opGetDevice = "GetDevice" + +// GetDeviceRequest generates a request for the GetDevice operation. +func (c *DeviceFarm) GetDeviceRequest(input *GetDeviceInput) (req *request.Request, output *GetDeviceOutput) { + op := &request.Operation{ + Name: opGetDevice, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDeviceInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDeviceOutput{} + req.Data = output + return +} + +// Gets information about a unique device type. +func (c *DeviceFarm) GetDevice(input *GetDeviceInput) (*GetDeviceOutput, error) { + req, out := c.GetDeviceRequest(input) + err := req.Send() + return out, err +} + +const opGetDevicePool = "GetDevicePool" + +// GetDevicePoolRequest generates a request for the GetDevicePool operation. +func (c *DeviceFarm) GetDevicePoolRequest(input *GetDevicePoolInput) (req *request.Request, output *GetDevicePoolOutput) { + op := &request.Operation{ + Name: opGetDevicePool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDevicePoolInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDevicePoolOutput{} + req.Data = output + return +} + +// Gets information about a device pool. +func (c *DeviceFarm) GetDevicePool(input *GetDevicePoolInput) (*GetDevicePoolOutput, error) { + req, out := c.GetDevicePoolRequest(input) + err := req.Send() + return out, err +} + +const opGetDevicePoolCompatibility = "GetDevicePoolCompatibility" + +// GetDevicePoolCompatibilityRequest generates a request for the GetDevicePoolCompatibility operation. +func (c *DeviceFarm) GetDevicePoolCompatibilityRequest(input *GetDevicePoolCompatibilityInput) (req *request.Request, output *GetDevicePoolCompatibilityOutput) { + op := &request.Operation{ + Name: opGetDevicePoolCompatibility, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDevicePoolCompatibilityInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDevicePoolCompatibilityOutput{} + req.Data = output + return +} + +// Gets information about compatibility with a device pool. +func (c *DeviceFarm) GetDevicePoolCompatibility(input *GetDevicePoolCompatibilityInput) (*GetDevicePoolCompatibilityOutput, error) { + req, out := c.GetDevicePoolCompatibilityRequest(input) + err := req.Send() + return out, err +} + +const opGetJob = "GetJob" + +// GetJobRequest generates a request for the GetJob operation. +func (c *DeviceFarm) GetJobRequest(input *GetJobInput) (req *request.Request, output *GetJobOutput) { + op := &request.Operation{ + Name: opGetJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetJobInput{} + } + + req = c.newRequest(op, input, output) + output = &GetJobOutput{} + req.Data = output + return +} + +// Gets information about a job. +func (c *DeviceFarm) GetJob(input *GetJobInput) (*GetJobOutput, error) { + req, out := c.GetJobRequest(input) + err := req.Send() + return out, err +} + +const opGetProject = "GetProject" + +// GetProjectRequest generates a request for the GetProject operation. +func (c *DeviceFarm) GetProjectRequest(input *GetProjectInput) (req *request.Request, output *GetProjectOutput) { + op := &request.Operation{ + Name: opGetProject, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetProjectInput{} + } + + req = c.newRequest(op, input, output) + output = &GetProjectOutput{} + req.Data = output + return +} + +// Gets information about a project. +func (c *DeviceFarm) GetProject(input *GetProjectInput) (*GetProjectOutput, error) { + req, out := c.GetProjectRequest(input) + err := req.Send() + return out, err +} + +const opGetRun = "GetRun" + +// GetRunRequest generates a request for the GetRun operation. +func (c *DeviceFarm) GetRunRequest(input *GetRunInput) (req *request.Request, output *GetRunOutput) { + op := &request.Operation{ + Name: opGetRun, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRunInput{} + } + + req = c.newRequest(op, input, output) + output = &GetRunOutput{} + req.Data = output + return +} + +// Gets information about a run. +func (c *DeviceFarm) GetRun(input *GetRunInput) (*GetRunOutput, error) { + req, out := c.GetRunRequest(input) + err := req.Send() + return out, err +} + +const opGetSuite = "GetSuite" + +// GetSuiteRequest generates a request for the GetSuite operation. +func (c *DeviceFarm) GetSuiteRequest(input *GetSuiteInput) (req *request.Request, output *GetSuiteOutput) { + op := &request.Operation{ + Name: opGetSuite, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSuiteInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSuiteOutput{} + req.Data = output + return +} + +// Gets information about a suite. +func (c *DeviceFarm) GetSuite(input *GetSuiteInput) (*GetSuiteOutput, error) { + req, out := c.GetSuiteRequest(input) + err := req.Send() + return out, err +} + +const opGetTest = "GetTest" + +// GetTestRequest generates a request for the GetTest operation. +func (c *DeviceFarm) GetTestRequest(input *GetTestInput) (req *request.Request, output *GetTestOutput) { + op := &request.Operation{ + Name: opGetTest, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetTestInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTestOutput{} + req.Data = output + return +} + +// Gets information about a test. +func (c *DeviceFarm) GetTest(input *GetTestInput) (*GetTestOutput, error) { + req, out := c.GetTestRequest(input) + err := req.Send() + return out, err +} + +const opGetUpload = "GetUpload" + +// GetUploadRequest generates a request for the GetUpload operation. +func (c *DeviceFarm) GetUploadRequest(input *GetUploadInput) (req *request.Request, output *GetUploadOutput) { + op := &request.Operation{ + Name: opGetUpload, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &GetUploadOutput{} + req.Data = output + return +} + +// Gets information about an upload. +func (c *DeviceFarm) GetUpload(input *GetUploadInput) (*GetUploadOutput, error) { + req, out := c.GetUploadRequest(input) + err := req.Send() + return out, err +} + +const opListArtifacts = "ListArtifacts" + +// ListArtifactsRequest generates a request for the ListArtifacts operation. +func (c *DeviceFarm) ListArtifactsRequest(input *ListArtifactsInput) (req *request.Request, output *ListArtifactsOutput) { + op := &request.Operation{ + Name: opListArtifacts, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListArtifactsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListArtifactsOutput{} + req.Data = output + return +} + +// Gets information about artifacts. +func (c *DeviceFarm) ListArtifacts(input *ListArtifactsInput) (*ListArtifactsOutput, error) { + req, out := c.ListArtifactsRequest(input) + err := req.Send() + return out, err +} + +func (c *DeviceFarm) ListArtifactsPages(input *ListArtifactsInput, fn func(p *ListArtifactsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListArtifactsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListArtifactsOutput), lastPage) + }) +} + +const opListDevicePools = "ListDevicePools" + +// ListDevicePoolsRequest generates a request for the ListDevicePools operation. +func (c *DeviceFarm) ListDevicePoolsRequest(input *ListDevicePoolsInput) (req *request.Request, output *ListDevicePoolsOutput) { + op := &request.Operation{ + Name: opListDevicePools, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDevicePoolsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDevicePoolsOutput{} + req.Data = output + return +} + +// Gets information about device pools. +func (c *DeviceFarm) ListDevicePools(input *ListDevicePoolsInput) (*ListDevicePoolsOutput, error) { + req, out := c.ListDevicePoolsRequest(input) + err := req.Send() + return out, err +} + +func (c *DeviceFarm) ListDevicePoolsPages(input *ListDevicePoolsInput, fn func(p *ListDevicePoolsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListDevicePoolsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListDevicePoolsOutput), lastPage) + }) +} + +const opListDevices = "ListDevices" + +// ListDevicesRequest generates a request for the ListDevices operation. +func (c *DeviceFarm) ListDevicesRequest(input *ListDevicesInput) (req *request.Request, output *ListDevicesOutput) { + op := &request.Operation{ + Name: opListDevices, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDevicesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDevicesOutput{} + req.Data = output + return +} + +// Gets information about unique device types. +func (c *DeviceFarm) ListDevices(input *ListDevicesInput) (*ListDevicesOutput, error) { + req, out := c.ListDevicesRequest(input) + err := req.Send() + return out, err +} + +func (c *DeviceFarm) ListDevicesPages(input *ListDevicesInput, fn func(p *ListDevicesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListDevicesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListDevicesOutput), lastPage) + }) +} + +const opListJobs = "ListJobs" + +// ListJobsRequest generates a request for the ListJobs operation. +func (c *DeviceFarm) ListJobsRequest(input *ListJobsInput) (req *request.Request, output *ListJobsOutput) { + op := &request.Operation{ + Name: opListJobs, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListJobsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListJobsOutput{} + req.Data = output + return +} + +// Gets information about jobs. +func (c *DeviceFarm) ListJobs(input *ListJobsInput) (*ListJobsOutput, error) { + req, out := c.ListJobsRequest(input) + err := req.Send() + return out, err +} + +func (c *DeviceFarm) ListJobsPages(input *ListJobsInput, fn func(p *ListJobsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListJobsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListJobsOutput), lastPage) + }) +} + +const opListProjects = "ListProjects" + +// ListProjectsRequest generates a request for the ListProjects operation. +func (c *DeviceFarm) ListProjectsRequest(input *ListProjectsInput) (req *request.Request, output *ListProjectsOutput) { + op := &request.Operation{ + Name: opListProjects, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListProjectsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListProjectsOutput{} + req.Data = output + return +} + +// Gets information about projects. +func (c *DeviceFarm) ListProjects(input *ListProjectsInput) (*ListProjectsOutput, error) { + req, out := c.ListProjectsRequest(input) + err := req.Send() + return out, err +} + +func (c *DeviceFarm) ListProjectsPages(input *ListProjectsInput, fn func(p *ListProjectsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListProjectsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListProjectsOutput), lastPage) + }) +} + +const opListRuns = "ListRuns" + +// ListRunsRequest generates a request for the ListRuns operation. +func (c *DeviceFarm) ListRunsRequest(input *ListRunsInput) (req *request.Request, output *ListRunsOutput) { + op := &request.Operation{ + Name: opListRuns, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListRunsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListRunsOutput{} + req.Data = output + return +} + +// Gets information about runs. +func (c *DeviceFarm) ListRuns(input *ListRunsInput) (*ListRunsOutput, error) { + req, out := c.ListRunsRequest(input) + err := req.Send() + return out, err +} + +func (c *DeviceFarm) ListRunsPages(input *ListRunsInput, fn func(p *ListRunsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListRunsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListRunsOutput), lastPage) + }) +} + +const opListSamples = "ListSamples" + +// ListSamplesRequest generates a request for the ListSamples operation. +func (c *DeviceFarm) ListSamplesRequest(input *ListSamplesInput) (req *request.Request, output *ListSamplesOutput) { + op := &request.Operation{ + Name: opListSamples, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListSamplesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListSamplesOutput{} + req.Data = output + return +} + +// Gets information about samples. +func (c *DeviceFarm) ListSamples(input *ListSamplesInput) (*ListSamplesOutput, error) { + req, out := c.ListSamplesRequest(input) + err := req.Send() + return out, err +} + +func (c *DeviceFarm) ListSamplesPages(input *ListSamplesInput, fn func(p *ListSamplesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListSamplesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListSamplesOutput), lastPage) + }) +} + +const opListSuites = "ListSuites" + +// ListSuitesRequest generates a request for the ListSuites operation. +func (c *DeviceFarm) ListSuitesRequest(input *ListSuitesInput) (req *request.Request, output *ListSuitesOutput) { + op := &request.Operation{ + Name: opListSuites, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListSuitesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListSuitesOutput{} + req.Data = output + return +} + +// Gets information about suites. +func (c *DeviceFarm) ListSuites(input *ListSuitesInput) (*ListSuitesOutput, error) { + req, out := c.ListSuitesRequest(input) + err := req.Send() + return out, err +} + +func (c *DeviceFarm) ListSuitesPages(input *ListSuitesInput, fn func(p *ListSuitesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListSuitesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListSuitesOutput), lastPage) + }) +} + +const opListTests = "ListTests" + +// ListTestsRequest generates a request for the ListTests operation. +func (c *DeviceFarm) ListTestsRequest(input *ListTestsInput) (req *request.Request, output *ListTestsOutput) { + op := &request.Operation{ + Name: opListTests, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTestsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTestsOutput{} + req.Data = output + return +} + +// Gets information about tests. +func (c *DeviceFarm) ListTests(input *ListTestsInput) (*ListTestsOutput, error) { + req, out := c.ListTestsRequest(input) + err := req.Send() + return out, err +} + +func (c *DeviceFarm) ListTestsPages(input *ListTestsInput, fn func(p *ListTestsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListTestsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListTestsOutput), lastPage) + }) +} + +const opListUniqueProblems = "ListUniqueProblems" + +// ListUniqueProblemsRequest generates a request for the ListUniqueProblems operation. +func (c *DeviceFarm) ListUniqueProblemsRequest(input *ListUniqueProblemsInput) (req *request.Request, output *ListUniqueProblemsOutput) { + op := &request.Operation{ + Name: opListUniqueProblems, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListUniqueProblemsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListUniqueProblemsOutput{} + req.Data = output + return +} + +// Gets information about unique problems. +func (c *DeviceFarm) ListUniqueProblems(input *ListUniqueProblemsInput) (*ListUniqueProblemsOutput, error) { + req, out := c.ListUniqueProblemsRequest(input) + err := req.Send() + return out, err +} + +func (c *DeviceFarm) ListUniqueProblemsPages(input *ListUniqueProblemsInput, fn func(p *ListUniqueProblemsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListUniqueProblemsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListUniqueProblemsOutput), lastPage) + }) +} + +const opListUploads = "ListUploads" + +// ListUploadsRequest generates a request for the ListUploads operation. +func (c *DeviceFarm) ListUploadsRequest(input *ListUploadsInput) (req *request.Request, output *ListUploadsOutput) { + op := &request.Operation{ + Name: opListUploads, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListUploadsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListUploadsOutput{} + req.Data = output + return +} + +// Gets information about uploads. +func (c *DeviceFarm) ListUploads(input *ListUploadsInput) (*ListUploadsOutput, error) { + req, out := c.ListUploadsRequest(input) + err := req.Send() + return out, err +} + +func (c *DeviceFarm) ListUploadsPages(input *ListUploadsInput, fn func(p *ListUploadsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListUploadsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListUploadsOutput), lastPage) + }) +} + +const opScheduleRun = "ScheduleRun" + +// ScheduleRunRequest generates a request for the ScheduleRun operation. +func (c *DeviceFarm) ScheduleRunRequest(input *ScheduleRunInput) (req *request.Request, output *ScheduleRunOutput) { + op := &request.Operation{ + Name: opScheduleRun, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ScheduleRunInput{} + } + + req = c.newRequest(op, input, output) + output = &ScheduleRunOutput{} + req.Data = output + return +} + +// Schedules a run. +func (c *DeviceFarm) ScheduleRun(input *ScheduleRunInput) (*ScheduleRunOutput, error) { + req, out := c.ScheduleRunRequest(input) + err := req.Send() + return out, err +} + +const opUpdateDevicePool = "UpdateDevicePool" + +// UpdateDevicePoolRequest generates a request for the UpdateDevicePool operation. +func (c *DeviceFarm) UpdateDevicePoolRequest(input *UpdateDevicePoolInput) (req *request.Request, output *UpdateDevicePoolOutput) { + op := &request.Operation{ + Name: opUpdateDevicePool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDevicePoolInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateDevicePoolOutput{} + req.Data = output + return +} + +// Modifies the name, description, and rules in a device pool given the attributes +// and the pool ARN. Rule updates are all-or-nothing, meaning they can only +// be updated as a whole (or not at all). +func (c *DeviceFarm) UpdateDevicePool(input *UpdateDevicePoolInput) (*UpdateDevicePoolOutput, error) { + req, out := c.UpdateDevicePoolRequest(input) + err := req.Send() + return out, err +} + +const opUpdateProject = "UpdateProject" + +// UpdateProjectRequest generates a request for the UpdateProject operation. +func (c *DeviceFarm) UpdateProjectRequest(input *UpdateProjectInput) (req *request.Request, output *UpdateProjectOutput) { + op := &request.Operation{ + Name: opUpdateProject, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateProjectInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateProjectOutput{} + req.Data = output + return +} + +// Modifies the specified project name, given the project ARN and a new name. +func (c *DeviceFarm) UpdateProject(input *UpdateProjectInput) (*UpdateProjectOutput, error) { + req, out := c.UpdateProjectRequest(input) + err := req.Send() + return out, err +} + +// A container for account-level settings within AWS Device Farm. +type AccountSettings struct { + _ struct{} `type:"structure"` + + // The AWS account number specified in the AccountSettings container. + AwsAccountNumber *string `locationName:"awsAccountNumber" min:"2" type:"string"` + + // Returns the unmetered devices you have purchased. + UnmeteredDevices map[string]*int64 `locationName:"unmeteredDevices" type:"map"` +} + +// String returns the string representation +func (s AccountSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountSettings) GoString() string { + return s.String() +} + +// Represents the output of a test. Examples of artifacts include logs and screenshots. +type Artifact struct { + _ struct{} `type:"structure"` + + // The artifact's ARN. + Arn *string `locationName:"arn" min:"32" type:"string"` + + // The artifact's file extension. + Extension *string `locationName:"extension" type:"string"` + + // The artifact's name. + Name *string `locationName:"name" type:"string"` + + // The artifact's type. + // + // Allowed values include the following: + // + // APPIUM_JAVA_OUTPUT: The Appium Java output type. + // + // APPIUM_JAVA_XML_OUTPUT: The Appium Java XML output type. + // + // APPIUM_PYTHON_OUTPUT: The Appium Python output type. + // + // APPIUM_PYTHON_XML_OUTPUT: The Appium Python XML output type. + // + // APPIUM_SERVER_OUTPUT: The Appium server output type. + // + // AUTOMATION_OUTPUT: The automation output type. + // + // CALABASH_JSON_OUTPUT: The Calabash JSON output type. + // + // CALABASH_JAVA_XML_OUTPUT: The Calabash Java XML output type. + // + // CALABASH_PRETTY_OUTPUT: The Calabash pretty output type. + // + // CALABASH_STANDARD_OUTPUT: The Calabash standard output type. + // + // DEVICE_LOG: The device log type. + // + // EXERCISER_MONKEY_OUTPUT: For Android, the artifact (log) generated by an + // Android fuzz test. + // + // INSTRUMENTATION_OUTPUT: The instrumentation type. + // + // MESSAGE_LOG: The message log type. + // + // RESULT_LOG: The result log type. + // + // SCREENSHOT: The screenshot type. + // + // SERVICE_LOG: The service log type. + // + // UNKNOWN: An unknown type. + Type *string `locationName:"type" type:"string" enum:"ArtifactType"` + + // The pre-signed Amazon S3 URL that can be used with a corresponding GET request + // to download the artifact's file. + Url *string `locationName:"url" type:"string"` +} + +// String returns the string representation +func (s Artifact) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Artifact) GoString() string { + return s.String() +} + +// Represents the amount of CPU that an app is using on a physical device. +// +// Note that this does not represent system-wide CPU usage. +type CPU struct { + _ struct{} `type:"structure"` + + // The CPU's architecture, for example x86 or ARM. + Architecture *string `locationName:"architecture" type:"string"` + + // The clock speed of the device's CPU, expressed in hertz (Hz). For example, + // a 1.2 GHz CPU is expressed as 1200000000. + Clock *float64 `locationName:"clock" type:"double"` + + // The CPU's frequency. + Frequency *string `locationName:"frequency" type:"string"` +} + +// String returns the string representation +func (s CPU) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CPU) GoString() string { + return s.String() +} + +// Represents entity counters. +type Counters struct { + _ struct{} `type:"structure"` + + // The number of errored entities. + Errored *int64 `locationName:"errored" type:"integer"` + + // The number of failed entities. + Failed *int64 `locationName:"failed" type:"integer"` + + // The number of passed entities. + Passed *int64 `locationName:"passed" type:"integer"` + + // The number of skipped entities. + Skipped *int64 `locationName:"skipped" type:"integer"` + + // The number of stopped entities. + Stopped *int64 `locationName:"stopped" type:"integer"` + + // The total number of entities. + Total *int64 `locationName:"total" type:"integer"` + + // The number of warned entities. + Warned *int64 `locationName:"warned" type:"integer"` +} + +// String returns the string representation +func (s Counters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Counters) GoString() string { + return s.String() +} + +// Represents a request to the create device pool operation. +type CreateDevicePoolInput struct { + _ struct{} `type:"structure"` + + // The device pool's description. + Description *string `locationName:"description" type:"string"` + + // The device pool's name. + Name *string `locationName:"name" type:"string" required:"true"` + + // The ARN of the project for the device pool. + ProjectArn *string `locationName:"projectArn" min:"32" type:"string" required:"true"` + + // The device pool's rules. + Rules []*Rule `locationName:"rules" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateDevicePoolInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDevicePoolInput) GoString() string { + return s.String() +} + +// Represents the result of a create device pool request. +type CreateDevicePoolOutput struct { + _ struct{} `type:"structure"` + + // The newly created device pool. + DevicePool *DevicePool `locationName:"devicePool" type:"structure"` +} + +// String returns the string representation +func (s CreateDevicePoolOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDevicePoolOutput) GoString() string { + return s.String() +} + +// Represents a request to the create project operation. +type CreateProjectInput struct { + _ struct{} `type:"structure"` + + // The project's name. + Name *string `locationName:"name" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateProjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateProjectInput) GoString() string { + return s.String() +} + +// Represents the result of a create project request. +type CreateProjectOutput struct { + _ struct{} `type:"structure"` + + // The newly created project. + Project *Project `locationName:"project" type:"structure"` +} + +// String returns the string representation +func (s CreateProjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateProjectOutput) GoString() string { + return s.String() +} + +// Represents a request to the create upload operation. +type CreateUploadInput struct { + _ struct{} `type:"structure"` + + // The upload's content type (for example, "application/octet-stream"). + ContentType *string `locationName:"contentType" type:"string"` + + // The upload's file name. + Name *string `locationName:"name" type:"string" required:"true"` + + // The ARN of the project for the upload. + ProjectArn *string `locationName:"projectArn" min:"32" type:"string" required:"true"` + + // The upload's upload type. + // + // Must be one of the following values: + // + // ANDROID_APP: An Android upload. + // + // IOS_APP: An iOS upload. + // + // EXTERNAL_DATA: An external data upload. + // + // APPIUM_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload. + // + // APPIUM_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload. + // + // APPIUM_PYTHON_TEST_PACKAGE: An Appium Python test package upload. + // + // CALABASH_TEST_PACKAGE: A Calabash test package upload. + // + // INSTRUMENTATION_TEST_PACKAGE: An instrumentation upload. + // + // UIAUTOMATOR_TEST_PACKAGE: A uiautomator test package upload. + // + // XCTEST_TEST_PACKAGE: An XCode test package upload. + // + // APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload. + // + // APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package + // upload. + // + // APPIUM_WEB_PYTHON_TEST_PACKAGE: An Appium Python test package upload. + // + // Note If you call CreateUpload with WEB_APP specified, AWS Device Farm throws + // an ArgumentException error. + Type *string `locationName:"type" type:"string" required:"true" enum:"UploadType"` +} + +// String returns the string representation +func (s CreateUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUploadInput) GoString() string { + return s.String() +} + +// Represents the result of a create upload request. +type CreateUploadOutput struct { + _ struct{} `type:"structure"` + + // The newly created upload. + Upload *Upload `locationName:"upload" type:"structure"` +} + +// String returns the string representation +func (s CreateUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUploadOutput) GoString() string { + return s.String() +} + +// Represents a request to the delete device pool operation. +type DeleteDevicePoolInput struct { + _ struct{} `type:"structure"` + + // Represents the Amazon Resource Name (ARN) of the Device Farm device pool + // you wish to delete. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDevicePoolInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDevicePoolInput) GoString() string { + return s.String() +} + +// Represents the result of a delete device pool request. +type DeleteDevicePoolOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDevicePoolOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDevicePoolOutput) GoString() string { + return s.String() +} + +// Represents a request to the delete project operation. +type DeleteProjectInput struct { + _ struct{} `type:"structure"` + + // Represents the Amazon Resource Name (ARN) of the Device Farm project you + // wish to delete. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteProjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteProjectInput) GoString() string { + return s.String() +} + +// Represents the result of a delete project request. +type DeleteProjectOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteProjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteProjectOutput) GoString() string { + return s.String() +} + +// Represents a request to the delete run operation. +type DeleteRunInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the run you wish to delete. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRunInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRunInput) GoString() string { + return s.String() +} + +// Represents the result of a delete run request. +type DeleteRunOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRunOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRunOutput) GoString() string { + return s.String() +} + +// Represents a request to the delete upload operation. +type DeleteUploadInput struct { + _ struct{} `type:"structure"` + + // Represents the Amazon Resource Name (ARN) of the Device Farm upload you wish + // to delete. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUploadInput) GoString() string { + return s.String() +} + +// Represents the result of a delete upload request. +type DeleteUploadOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUploadOutput) GoString() string { + return s.String() +} + +// Represents a device type that an app is tested against. +type Device struct { + _ struct{} `type:"structure"` + + // The device's ARN. + Arn *string `locationName:"arn" min:"32" type:"string"` + + // The device's carrier. + Carrier *string `locationName:"carrier" type:"string"` + + // Information about the device's CPU. + Cpu *CPU `locationName:"cpu" type:"structure"` + + // The device's form factor. + // + // Allowed values include: + // + // PHONE: The phone form factor. + // + // TABLET: The tablet form factor. + FormFactor *string `locationName:"formFactor" type:"string" enum:"DeviceFormFactor"` + + // The device's heap size, expressed in bytes. + HeapSize *int64 `locationName:"heapSize" type:"long"` + + // The device's image name. + Image *string `locationName:"image" type:"string"` + + // The device's manufacturer name. + Manufacturer *string `locationName:"manufacturer" type:"string"` + + // The device's total memory size, expressed in bytes. + Memory *int64 `locationName:"memory" type:"long"` + + // The device's model name. + Model *string `locationName:"model" type:"string"` + + // The device's display name. + Name *string `locationName:"name" type:"string"` + + // The device's operating system type. + Os *string `locationName:"os" type:"string"` + + // The device's platform. + // + // Allowed values include: + // + // ANDROID: The Android platform. + // + // IOS: The iOS platform. + Platform *string `locationName:"platform" type:"string" enum:"DevicePlatform"` + + // The device's radio. + Radio *string `locationName:"radio" type:"string"` + + // Represents the screen resolution of a device in height and width, expressed + // in pixels. + Resolution *Resolution `locationName:"resolution" type:"structure"` +} + +// String returns the string representation +func (s Device) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Device) GoString() string { + return s.String() +} + +// Represents the total (metered or unmetered) minutes used by the resource +// to run tests. Contains the sum of minutes consumed by all children. +type DeviceMinutes struct { + _ struct{} `type:"structure"` + + // When specified, represents only the sum of metered minutes used by the resource + // to run tests. + Metered *float64 `locationName:"metered" type:"double"` + + // When specified, represents the total minutes used by the resource to run + // tests. + Total *float64 `locationName:"total" type:"double"` + + // When specified, represents only the sum of unmetered minutes used by the + // resource to run tests. + Unmetered *float64 `locationName:"unmetered" type:"double"` +} + +// String returns the string representation +func (s DeviceMinutes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeviceMinutes) GoString() string { + return s.String() +} + +// Represents a collection of device types. +type DevicePool struct { + _ struct{} `type:"structure"` + + // The device pool's ARN. + Arn *string `locationName:"arn" min:"32" type:"string"` + + // The device pool's description. + Description *string `locationName:"description" type:"string"` + + // The device pool's name. + Name *string `locationName:"name" type:"string"` + + // Information about the device pool's rules. + Rules []*Rule `locationName:"rules" type:"list"` + + // The device pool's type. + // + // Allowed values include: + // + // CURATED: A device pool that is created and managed by AWS Device Farm. + // + // PRIVATE: A device pool that is created and managed by the device pool developer. + Type *string `locationName:"type" type:"string" enum:"DevicePoolType"` +} + +// String returns the string representation +func (s DevicePool) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DevicePool) GoString() string { + return s.String() +} + +// Represents a device pool compatibility result. +type DevicePoolCompatibilityResult struct { + _ struct{} `type:"structure"` + + // Whether the result was compatible with the device pool. + Compatible *bool `locationName:"compatible" type:"boolean"` + + // Represents a device type that an app is tested against. + Device *Device `locationName:"device" type:"structure"` + + // Information about the compatibility. + IncompatibilityMessages []*IncompatibilityMessage `locationName:"incompatibilityMessages" type:"list"` +} + +// String returns the string representation +func (s DevicePoolCompatibilityResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DevicePoolCompatibilityResult) GoString() string { + return s.String() +} + +type GetAccountSettingsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetAccountSettingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountSettingsInput) GoString() string { + return s.String() +} + +type GetAccountSettingsOutput struct { + _ struct{} `type:"structure"` + + // A container for account-level settings within AWS Device Farm. + AccountSettings *AccountSettings `locationName:"accountSettings" type:"structure"` +} + +// String returns the string representation +func (s GetAccountSettingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountSettingsOutput) GoString() string { + return s.String() +} + +// Represents a request to the get device request. +type GetDeviceInput struct { + _ struct{} `type:"structure"` + + // The device type's ARN. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDeviceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeviceInput) GoString() string { + return s.String() +} + +// Represents the result of a get device request. +type GetDeviceOutput struct { + _ struct{} `type:"structure"` + + // Represents a device type that an app is tested against. + Device *Device `locationName:"device" type:"structure"` +} + +// String returns the string representation +func (s GetDeviceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeviceOutput) GoString() string { + return s.String() +} + +// Represents a request to the get device pool compatibility operation. +type GetDevicePoolCompatibilityInput struct { + _ struct{} `type:"structure"` + + // The ARN of the app that is associated with the specified device pool. + AppArn *string `locationName:"appArn" min:"32" type:"string"` + + // The device pool's ARN. + DevicePoolArn *string `locationName:"devicePoolArn" min:"32" type:"string" required:"true"` + + // The test type for the specified device pool. + // + // Allowed values include the following: + // + // BUILTIN_FUZZ: The built-in fuzz type. + // + // BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android + // app, interacting with it and capturing screenshots at the same time. + // + // APPIUM_JAVA_JUNIT: The Appium Java JUnit type. + // + // APPIUM_JAVA_TESTNG: The Appium Java TestNG type. + // + // APPIUM_PYTHON: The Appium Python type. + // + // CALABASH: The Calabash type. + // + // INSTRUMENTATION: The Instrumentation type. + // + // UIAUTOMATION: The uiautomation type. + // + // UIAUTOMATOR: The uiautomator type. + // + // XCTEST: The XCode test type. + // + // APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps. + // + // APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps. + // + // APPIUM_WEB_PYTHON: The Appium Python type for Web apps. + TestType *string `locationName:"testType" type:"string" enum:"TestType"` +} + +// String returns the string representation +func (s GetDevicePoolCompatibilityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDevicePoolCompatibilityInput) GoString() string { + return s.String() +} + +// Represents the result of describe device pool compatibility request. +type GetDevicePoolCompatibilityOutput struct { + _ struct{} `type:"structure"` + + // Information about compatible devices. + CompatibleDevices []*DevicePoolCompatibilityResult `locationName:"compatibleDevices" type:"list"` + + // Information about incompatible devices. + IncompatibleDevices []*DevicePoolCompatibilityResult `locationName:"incompatibleDevices" type:"list"` +} + +// String returns the string representation +func (s GetDevicePoolCompatibilityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDevicePoolCompatibilityOutput) GoString() string { + return s.String() +} + +// Represents a request to the get device pool operation. +type GetDevicePoolInput struct { + _ struct{} `type:"structure"` + + // The device pool's ARN. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDevicePoolInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDevicePoolInput) GoString() string { + return s.String() +} + +// Represents the result of a get device pool request. +type GetDevicePoolOutput struct { + _ struct{} `type:"structure"` + + // Represents a collection of device types. + DevicePool *DevicePool `locationName:"devicePool" type:"structure"` +} + +// String returns the string representation +func (s GetDevicePoolOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDevicePoolOutput) GoString() string { + return s.String() +} + +// Represents a request to the get job operation. +type GetJobInput struct { + _ struct{} `type:"structure"` + + // The job's ARN. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetJobInput) GoString() string { + return s.String() +} + +// Represents the result of a get job request. +type GetJobOutput struct { + _ struct{} `type:"structure"` + + // Represents a device. + Job *Job `locationName:"job" type:"structure"` +} + +// String returns the string representation +func (s GetJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetJobOutput) GoString() string { + return s.String() +} + +// Represents a request to the get project operation. +type GetProjectInput struct { + _ struct{} `type:"structure"` + + // The project's ARN. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetProjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetProjectInput) GoString() string { + return s.String() +} + +// Represents the result of a get project request. +type GetProjectOutput struct { + _ struct{} `type:"structure"` + + // Represents an operating-system neutral workspace for running and managing + // tests. + Project *Project `locationName:"project" type:"structure"` +} + +// String returns the string representation +func (s GetProjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetProjectOutput) GoString() string { + return s.String() +} + +// Represents a request to the get run operation. +type GetRunInput struct { + _ struct{} `type:"structure"` + + // The run's ARN. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRunInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRunInput) GoString() string { + return s.String() +} + +// Represents the result of a get run request. +type GetRunOutput struct { + _ struct{} `type:"structure"` + + // Represents an app on a set of devices with a specific test and configuration. + Run *Run `locationName:"run" type:"structure"` +} + +// String returns the string representation +func (s GetRunOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRunOutput) GoString() string { + return s.String() +} + +// Represents a request to the get suite operation. +type GetSuiteInput struct { + _ struct{} `type:"structure"` + + // The suite's ARN. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetSuiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSuiteInput) GoString() string { + return s.String() +} + +// Represents the result of a get suite request. +type GetSuiteOutput struct { + _ struct{} `type:"structure"` + + // Represents a collection of one or more tests. + Suite *Suite `locationName:"suite" type:"structure"` +} + +// String returns the string representation +func (s GetSuiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSuiteOutput) GoString() string { + return s.String() +} + +// Represents a request to the get test operation. +type GetTestInput struct { + _ struct{} `type:"structure"` + + // The test's ARN. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTestInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTestInput) GoString() string { + return s.String() +} + +// Represents the result of a get test request. +type GetTestOutput struct { + _ struct{} `type:"structure"` + + // Represents a condition that is evaluated. + Test *Test `locationName:"test" type:"structure"` +} + +// String returns the string representation +func (s GetTestOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTestOutput) GoString() string { + return s.String() +} + +// Represents a request to the get upload operation. +type GetUploadInput struct { + _ struct{} `type:"structure"` + + // The upload's ARN. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetUploadInput) GoString() string { + return s.String() +} + +// Represents the result of a get upload request. +type GetUploadOutput struct { + _ struct{} `type:"structure"` + + // An app or a set of one or more tests to upload or that have been uploaded. + Upload *Upload `locationName:"upload" type:"structure"` +} + +// String returns the string representation +func (s GetUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetUploadOutput) GoString() string { + return s.String() +} + +// Represents information about incompatibility. +type IncompatibilityMessage struct { + _ struct{} `type:"structure"` + + // A message about the incompatibility. + Message *string `locationName:"message" type:"string"` + + // The type of incompatibility. + // + // Allowed values include: + // + // ARN: The ARN. + // + // FORM_FACTOR: The form factor (for example, phone or tablet). + // + // MANUFACTURER: The manufacturer. + // + // PLATFORM: The platform (for example, Android or iOS). + Type *string `locationName:"type" type:"string" enum:"DeviceAttribute"` +} + +// String returns the string representation +func (s IncompatibilityMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IncompatibilityMessage) GoString() string { + return s.String() +} + +// Represents a device. +type Job struct { + _ struct{} `type:"structure"` + + // The job's ARN. + Arn *string `locationName:"arn" min:"32" type:"string"` + + // The job's result counters. + Counters *Counters `locationName:"counters" type:"structure"` + + // When the job was created. + Created *time.Time `locationName:"created" type:"timestamp" timestampFormat:"unix"` + + // Represents a device type that an app is tested against. + Device *Device `locationName:"device" type:"structure"` + + // Represents the total (metered or unmetered) minutes used by the job. + DeviceMinutes *DeviceMinutes `locationName:"deviceMinutes" type:"structure"` + + // A message about the job's result. + Message *string `locationName:"message" type:"string"` + + // The job's name. + Name *string `locationName:"name" type:"string"` + + // The job's result. + // + // Allowed values include: + // + // ERRORED: An error condition. + // + // FAILED: A failed condition. + // + // SKIPPED: A skipped condition. + // + // STOPPED: A stopped condition. + // + // PASSED: A passing condition. + // + // PENDING: A pending condition. + // + // WARNED: A warning condition. + Result *string `locationName:"result" type:"string" enum:"ExecutionResult"` + + // The job's start time. + Started *time.Time `locationName:"started" type:"timestamp" timestampFormat:"unix"` + + // The job's status. + // + // Allowed values include: + // + // COMPLETED: A completed status. + // + // PENDING: A pending status. + // + // PROCESSING: A processing status. + // + // RUNNING: A running status. + // + // SCHEDULING: A scheduling status. + Status *string `locationName:"status" type:"string" enum:"ExecutionStatus"` + + // The job's stop time. + Stopped *time.Time `locationName:"stopped" type:"timestamp" timestampFormat:"unix"` + + // The job's type. + // + // Allowed values include the following: + // + // BUILTIN_FUZZ: The built-in fuzz type. + // + // BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android + // app, interacting with it and capturing screenshots at the same time. + // + // APPIUM_JAVA_JUNIT: The Appium Java JUnit type. + // + // APPIUM_JAVA_TESTNG: The Appium Java TestNG type. + // + // APPIUM_PYTHON: The Appium Python type. + // + // CALABASH: The Calabash type. + // + // INSTRUMENTATION: The Instrumentation type. + // + // UIAUTOMATION: The uiautomation type. + // + // UIAUTOMATOR: The uiautomator type. + // + // XCTEST: The XCode test type. + // + // APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps. + // + // APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps. + // + // APPIUM_WEB_PYTHON: The Appium Python type for Web apps. + Type *string `locationName:"type" type:"string" enum:"TestType"` +} + +// String returns the string representation +func (s Job) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Job) GoString() string { + return s.String() +} + +// Represents a request to the list artifacts operation. +type ListArtifactsInput struct { + _ struct{} `type:"structure"` + + // The Run, Job, Suite, or Test ARN. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` + + // The artifacts' type. + // + // Allowed values include: + // + // FILE: The artifacts are files. LOG: The artifacts are logs. SCREENSHOT: + // The artifacts are screenshots. + Type *string `locationName:"type" type:"string" required:"true" enum:"ArtifactCategory"` +} + +// String returns the string representation +func (s ListArtifactsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListArtifactsInput) GoString() string { + return s.String() +} + +// Represents the result of a list artifacts operation. +type ListArtifactsOutput struct { + _ struct{} `type:"structure"` + + // Information about the artifacts. + Artifacts []*Artifact `locationName:"artifacts" type:"list"` + + // If the number of items that are returned is significantly large, this is + // an identifier that is also returned, which can be used in a subsequent call + // to this operation to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListArtifactsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListArtifactsOutput) GoString() string { + return s.String() +} + +// Represents the result of a list device pools request. +type ListDevicePoolsInput struct { + _ struct{} `type:"structure"` + + // The project ARN. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` + + // The device pools' type. + // + // Allowed values include: + // + // CURATED: A device pool that is created and managed by AWS Device Farm. + // + // PRIVATE: A device pool that is created and managed by the device pool developer. + Type *string `locationName:"type" type:"string" enum:"DevicePoolType"` +} + +// String returns the string representation +func (s ListDevicePoolsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDevicePoolsInput) GoString() string { + return s.String() +} + +// Represents the result of a list device pools request. +type ListDevicePoolsOutput struct { + _ struct{} `type:"structure"` + + // Information about the device pools. + DevicePools []*DevicePool `locationName:"devicePools" type:"list"` + + // If the number of items that are returned is significantly large, this is + // an identifier that is also returned, which can be used in a subsequent call + // to this operation to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListDevicePoolsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDevicePoolsOutput) GoString() string { + return s.String() +} + +// Represents the result of a list devices request. +type ListDevicesInput struct { + _ struct{} `type:"structure"` + + // The device types' ARNs. + Arn *string `locationName:"arn" min:"32" type:"string"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListDevicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDevicesInput) GoString() string { + return s.String() +} + +// Represents the result of a list devices operation. +type ListDevicesOutput struct { + _ struct{} `type:"structure"` + + // Information about the devices. + Devices []*Device `locationName:"devices" type:"list"` + + // If the number of items that are returned is significantly large, this is + // an identifier that is also returned, which can be used in a subsequent call + // to this operation to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListDevicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDevicesOutput) GoString() string { + return s.String() +} + +// Represents a request to the list jobs operation. +type ListJobsInput struct { + _ struct{} `type:"structure"` + + // The jobs' ARNs. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListJobsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListJobsInput) GoString() string { + return s.String() +} + +// Represents the result of a list jobs request. +type ListJobsOutput struct { + _ struct{} `type:"structure"` + + // Information about the jobs. + Jobs []*Job `locationName:"jobs" type:"list"` + + // If the number of items that are returned is significantly large, this is + // an identifier that is also returned, which can be used in a subsequent call + // to this operation to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListJobsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListJobsOutput) GoString() string { + return s.String() +} + +// Represents a request to the list projects operation. +type ListProjectsInput struct { + _ struct{} `type:"structure"` + + // The projects' ARNs. + Arn *string `locationName:"arn" min:"32" type:"string"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListProjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListProjectsInput) GoString() string { + return s.String() +} + +// Represents the result of a list projects request. +type ListProjectsOutput struct { + _ struct{} `type:"structure"` + + // If the number of items that are returned is significantly large, this is + // an identifier that is also returned, which can be used in a subsequent call + // to this operation to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` + + // Information about the projects. + Projects []*Project `locationName:"projects" type:"list"` +} + +// String returns the string representation +func (s ListProjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListProjectsOutput) GoString() string { + return s.String() +} + +// Represents a request to the list runs operation. +type ListRunsInput struct { + _ struct{} `type:"structure"` + + // The runs' ARNs. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListRunsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRunsInput) GoString() string { + return s.String() +} + +// Represents the result of a list runs request. +type ListRunsOutput struct { + _ struct{} `type:"structure"` + + // If the number of items that are returned is significantly large, this is + // an identifier that is also returned, which can be used in a subsequent call + // to this operation to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` + + // Information about the runs. + Runs []*Run `locationName:"runs" type:"list"` +} + +// String returns the string representation +func (s ListRunsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRunsOutput) GoString() string { + return s.String() +} + +// Represents a request to the list samples operation. +type ListSamplesInput struct { + _ struct{} `type:"structure"` + + // The samples' ARNs. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListSamplesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSamplesInput) GoString() string { + return s.String() +} + +// Represents the result of a list samples request. +type ListSamplesOutput struct { + _ struct{} `type:"structure"` + + // If the number of items that are returned is significantly large, this is + // an identifier that is also returned, which can be used in a subsequent call + // to this operation to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` + + // Information about the samples. + Samples []*Sample `locationName:"samples" type:"list"` +} + +// String returns the string representation +func (s ListSamplesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSamplesOutput) GoString() string { + return s.String() +} + +// Represents a request to the list suites operation. +type ListSuitesInput struct { + _ struct{} `type:"structure"` + + // The suites' ARNs. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListSuitesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSuitesInput) GoString() string { + return s.String() +} + +// Represents the result of a list suites request. +type ListSuitesOutput struct { + _ struct{} `type:"structure"` + + // If the number of items that are returned is significantly large, this is + // an identifier that is also returned, which can be used in a subsequent call + // to this operation to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` + + // Information about the suites. + Suites []*Suite `locationName:"suites" type:"list"` +} + +// String returns the string representation +func (s ListSuitesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSuitesOutput) GoString() string { + return s.String() +} + +// Represents a request to the list tests operation. +type ListTestsInput struct { + _ struct{} `type:"structure"` + + // The tests' ARNs. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListTestsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTestsInput) GoString() string { + return s.String() +} + +// Represents the result of a list tests request. +type ListTestsOutput struct { + _ struct{} `type:"structure"` + + // If the number of items that are returned is significantly large, this is + // an identifier that is also returned, which can be used in a subsequent call + // to this operation to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` + + // Information about the tests. + Tests []*Test `locationName:"tests" type:"list"` +} + +// String returns the string representation +func (s ListTestsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTestsOutput) GoString() string { + return s.String() +} + +// Represents a request to the list unique problems operation. +type ListUniqueProblemsInput struct { + _ struct{} `type:"structure"` + + // The unique problems' ARNs. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListUniqueProblemsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUniqueProblemsInput) GoString() string { + return s.String() +} + +// Represents the result of a list unique problems request. +type ListUniqueProblemsOutput struct { + _ struct{} `type:"structure"` + + // If the number of items that are returned is significantly large, this is + // an identifier that is also returned, which can be used in a subsequent call + // to this operation to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` + + // Information about the unique problems. + // + // Allowed values include: + // + // ERRORED: An error condition. + // + // FAILED: A failed condition. + // + // SKIPPED: A skipped condition. + // + // STOPPED: A stopped condition. + // + // PASSED: A passing condition. + // + // PENDING: A pending condition. + // + // WARNED: A warning condition. + UniqueProblems map[string][]*UniqueProblem `locationName:"uniqueProblems" type:"map"` +} + +// String returns the string representation +func (s ListUniqueProblemsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUniqueProblemsOutput) GoString() string { + return s.String() +} + +// Represents a request to the list uploads operation. +type ListUploadsInput struct { + _ struct{} `type:"structure"` + + // The uploads' ARNs. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` + + // An identifier that was returned from the previous call to this operation, + // which can be used to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListUploadsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUploadsInput) GoString() string { + return s.String() +} + +// Represents the result of a list uploads request. +type ListUploadsOutput struct { + _ struct{} `type:"structure"` + + // If the number of items that are returned is significantly large, this is + // an identifier that is also returned, which can be used in a subsequent call + // to this operation to return the next set of items in the list. + NextToken *string `locationName:"nextToken" min:"4" type:"string"` + + // Information about the uploads. + Uploads []*Upload `locationName:"uploads" type:"list"` +} + +// String returns the string representation +func (s ListUploadsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUploadsOutput) GoString() string { + return s.String() +} + +// Represents a latitude and longitude pair, expressed in geographic coordinate +// system degrees (for example 47.6204, -122.3491). +// +// Elevation is currently not supported. +type Location struct { + _ struct{} `type:"structure"` + + // The latitude. + Latitude *float64 `locationName:"latitude" type:"double" required:"true"` + + // The longitude. + Longitude *float64 `locationName:"longitude" type:"double" required:"true"` +} + +// String returns the string representation +func (s Location) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Location) GoString() string { + return s.String() +} + +// Represents a specific warning or failure. +type Problem struct { + _ struct{} `type:"structure"` + + // Information about the associated device. + Device *Device `locationName:"device" type:"structure"` + + // Information about the associated job. + Job *ProblemDetail `locationName:"job" type:"structure"` + + // A message about the problem's result. + Message *string `locationName:"message" type:"string"` + + // The problem's result. + // + // Allowed values include: + // + // ERRORED: An error condition. + // + // FAILED: A failed condition. + // + // SKIPPED: A skipped condition. + // + // STOPPED: A stopped condition. + // + // PASSED: A passing condition. + // + // PENDING: A pending condition. + // + // WARNED: A warning condition. + Result *string `locationName:"result" type:"string" enum:"ExecutionResult"` + + // Information about the associated run. + Run *ProblemDetail `locationName:"run" type:"structure"` + + // Information about the associated suite. + Suite *ProblemDetail `locationName:"suite" type:"structure"` + + // Information about the associated test. + Test *ProblemDetail `locationName:"test" type:"structure"` +} + +// String returns the string representation +func (s Problem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Problem) GoString() string { + return s.String() +} + +// Information about a problem detail. +type ProblemDetail struct { + _ struct{} `type:"structure"` + + // The problem detail's ARN. + Arn *string `locationName:"arn" min:"32" type:"string"` + + // The problem detail's name. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s ProblemDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProblemDetail) GoString() string { + return s.String() +} + +// Represents an operating-system neutral workspace for running and managing +// tests. +type Project struct { + _ struct{} `type:"structure"` + + // The project's ARN. + Arn *string `locationName:"arn" min:"32" type:"string"` + + // When the project was created. + Created *time.Time `locationName:"created" type:"timestamp" timestampFormat:"unix"` + + // The project's name. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s Project) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Project) GoString() string { + return s.String() +} + +// Represents the set of radios and their states on a device. Examples of radios +// include Wi-Fi, GPS, Bluetooth, and NFC. +type Radios struct { + _ struct{} `type:"structure"` + + // True if Bluetooth is enabled at the beginning of the test; otherwise, false. + Bluetooth *bool `locationName:"bluetooth" type:"boolean"` + + // True if GPS is enabled at the beginning of the test; otherwise, false. + Gps *bool `locationName:"gps" type:"boolean"` + + // True if NFC is enabled at the beginning of the test; otherwise, false. + Nfc *bool `locationName:"nfc" type:"boolean"` + + // True if Wi-Fi is enabled at the beginning of the test; otherwise, false. + Wifi *bool `locationName:"wifi" type:"boolean"` +} + +// String returns the string representation +func (s Radios) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Radios) GoString() string { + return s.String() +} + +// Represents the screen resolution of a device in height and width, expressed +// in pixels. +type Resolution struct { + _ struct{} `type:"structure"` + + // The screen resolution's height, expressed in pixels. + Height *int64 `locationName:"height" type:"integer"` + + // The screen resolution's width, expressed in pixels. + Width *int64 `locationName:"width" type:"integer"` +} + +// String returns the string representation +func (s Resolution) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Resolution) GoString() string { + return s.String() +} + +// Represents a condition for a device pool. +type Rule struct { + _ struct{} `type:"structure"` + + // The rule's attribute. + // + // Allowed values include: + // + // ARN: The ARN. + // + // FORM_FACTOR: The form factor (for example, phone or tablet). + // + // MANUFACTURER: The manufacturer. + // + // PLATFORM: The platform (for example, Android or iOS). + Attribute *string `locationName:"attribute" type:"string" enum:"DeviceAttribute"` + + // The rule's operator. + // + // EQUALS: The equals operator. + // + // GREATER_THAN: The greater-than operator. + // + // IN: The in operator. + // + // LESS_THAN: The less-than operator. + // + // NOT_IN: The not-in operator. + Operator *string `locationName:"operator" type:"string" enum:"RuleOperator"` + + // The rule's value. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s Rule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Rule) GoString() string { + return s.String() +} + +// Represents an app on a set of devices with a specific test and configuration. +type Run struct { + _ struct{} `type:"structure"` + + // The run's ARN. + Arn *string `locationName:"arn" min:"32" type:"string"` + + // Specifies the billing method for a test run: metered or unmetered. If the + // parameter is not specified, the default value is unmetered. + BillingMethod *string `locationName:"billingMethod" type:"string" enum:"BillingMethod"` + + // The total number of completed jobs. + CompletedJobs *int64 `locationName:"completedJobs" type:"integer"` + + // The run's result counters. + Counters *Counters `locationName:"counters" type:"structure"` + + // When the run was created. + Created *time.Time `locationName:"created" type:"timestamp" timestampFormat:"unix"` + + // Represents the total (metered or unmetered) minutes used by the test run. + DeviceMinutes *DeviceMinutes `locationName:"deviceMinutes" type:"structure"` + + // A message about the run's result. + Message *string `locationName:"message" type:"string"` + + // The run's name. + Name *string `locationName:"name" type:"string"` + + // The run's platform. + // + // Allowed values include: + // + // ANDROID: The Android platform. + // + // IOS: The iOS platform. + Platform *string `locationName:"platform" type:"string" enum:"DevicePlatform"` + + // The run's result. + // + // Allowed values include: + // + // ERRORED: An error condition. + // + // FAILED: A failed condition. + // + // SKIPPED: A skipped condition. + // + // STOPPED: A stopped condition. + // + // PASSED: A passing condition. + // + // PENDING: A pending condition. + // + // WARNED: A warning condition. + Result *string `locationName:"result" type:"string" enum:"ExecutionResult"` + + // The run's start time. + Started *time.Time `locationName:"started" type:"timestamp" timestampFormat:"unix"` + + // The run's status. + // + // Allowed values include: + // + // COMPLETED: A completed status. + // + // PENDING: A pending status. + // + // PROCESSING: A processing status. + // + // RUNNING: A running status. + // + // SCHEDULING: A scheduling status. + Status *string `locationName:"status" type:"string" enum:"ExecutionStatus"` + + // The run's stop time. + Stopped *time.Time `locationName:"stopped" type:"timestamp" timestampFormat:"unix"` + + // The total number of jobs for the run. + TotalJobs *int64 `locationName:"totalJobs" type:"integer"` + + // The run's type. + // + // Must be one of the following values: + // + // BUILTIN_FUZZ: The built-in fuzz type. + // + // BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android + // app, interacting with it and capturing screenshots at the same time. + // + // APPIUM_JAVA_JUNIT: The Appium Java JUnit type. + // + // APPIUM_JAVA_TESTNG: The Appium Java TestNG type. + // + // APPIUM_PYTHON: The Appium Python type. + // + // CALABASH: The Calabash type. + // + // INSTRUMENTATION: The Instrumentation type. + // + // UIAUTOMATION: The uiautomation type. + // + // UIAUTOMATOR: The uiautomator type. + // + // XCTEST: The XCode test type. + // + // APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps. + // + // APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps. + // + // APPIUM_WEB_PYTHON: The Appium Python type for Web apps. + Type *string `locationName:"type" type:"string" enum:"TestType"` +} + +// String returns the string representation +func (s Run) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Run) GoString() string { + return s.String() +} + +// Represents a sample of performance data. +type Sample struct { + _ struct{} `type:"structure"` + + // The sample's ARN. + Arn *string `locationName:"arn" min:"32" type:"string"` + + // The sample's type. + // + // Must be one of the following values: + // + // CPU: A CPU sample type. This is expressed as the app processing CPU time + // (including child processes) as reported by process, as a percentage. + // + // MEMORY: A memory usage sample type. This is expressed as the total proportional + // set size of an app process, in kilobytes. + // + // NATIVE_AVG_DRAWTIME + // + // NATIVE_FPS + // + // NATIVE_FRAMES + // + // NATIVE_MAX_DRAWTIME + // + // NATIVE_MIN_DRAWTIME + // + // OPENGL_AVG_DRAWTIME + // + // OPENGL_FPS + // + // OPENGL_FRAMES + // + // OPENGL_MAX_DRAWTIME + // + // OPENGL_MIN_DRAWTIME + // + // RX + // + // RX_RATE: The total number of bytes per second (TCP and UDP) that are sent, + // by app process. + // + // THREADS: A threads sample type. This is expressed as the total number of + // threads per app process. + // + // TX + // + // TX_RATE: The total number of bytes per second (TCP and UDP) that are received, + // by app process. + Type *string `locationName:"type" type:"string" enum:"SampleType"` + + // The pre-signed Amazon S3 URL that can be used with a corresponding GET request + // to download the sample's file. + Url *string `locationName:"url" type:"string"` +} + +// String returns the string representation +func (s Sample) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Sample) GoString() string { + return s.String() +} + +// Represents the settings for a run. Includes things like location, radio states, +// auxiliary apps, and network profiles. +type ScheduleRunConfiguration struct { + _ struct{} `type:"structure"` + + // A list of auxiliary apps for the run. + AuxiliaryApps []*string `locationName:"auxiliaryApps" type:"list"` + + // Specifies the billing method for a test run: metered or unmetered. If the + // parameter is not specified, the default value is unmetered. + BillingMethod *string `locationName:"billingMethod" type:"string" enum:"BillingMethod"` + + // The ARN of the extra data for the run. The extra data is a .zip file that + // AWS Device Farm will extract to external data for Android or the app's sandbox + // for iOS. + ExtraDataPackageArn *string `locationName:"extraDataPackageArn" min:"32" type:"string"` + + // Information about the locale that is used for the run. + Locale *string `locationName:"locale" type:"string"` + + // Information about the location that is used for the run. + Location *Location `locationName:"location" type:"structure"` + + // Reserved for internal use. + NetworkProfileArn *string `locationName:"networkProfileArn" min:"32" type:"string"` + + // Information about the radio states for the run. + Radios *Radios `locationName:"radios" type:"structure"` +} + +// String returns the string representation +func (s ScheduleRunConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduleRunConfiguration) GoString() string { + return s.String() +} + +// Represents a request to the schedule run operation. +type ScheduleRunInput struct { + _ struct{} `type:"structure"` + + // The ARN of the app to schedule a run. + AppArn *string `locationName:"appArn" min:"32" type:"string"` + + // Information about the settings for the run to be scheduled. + Configuration *ScheduleRunConfiguration `locationName:"configuration" type:"structure"` + + // The ARN of the device pool for the run to be scheduled. + DevicePoolArn *string `locationName:"devicePoolArn" min:"32" type:"string" required:"true"` + + // The name for the run to be scheduled. + Name *string `locationName:"name" type:"string"` + + // The ARN of the project for the run to be scheduled. + ProjectArn *string `locationName:"projectArn" min:"32" type:"string" required:"true"` + + // Information about the test for the run to be scheduled. + Test *ScheduleRunTest `locationName:"test" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ScheduleRunInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduleRunInput) GoString() string { + return s.String() +} + +// Represents the result of a schedule run request. +type ScheduleRunOutput struct { + _ struct{} `type:"structure"` + + // Information about the scheduled run. + Run *Run `locationName:"run" type:"structure"` +} + +// String returns the string representation +func (s ScheduleRunOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduleRunOutput) GoString() string { + return s.String() +} + +// Represents additional test settings. +type ScheduleRunTest struct { + _ struct{} `type:"structure"` + + // The test's filter. + Filter *string `locationName:"filter" type:"string"` + + // The test's parameters, such as test framework parameters and fixture settings. + Parameters map[string]*string `locationName:"parameters" type:"map"` + + // The ARN of the uploaded test that will be run. + TestPackageArn *string `locationName:"testPackageArn" min:"32" type:"string"` + + // The test's type. + // + // Must be one of the following values: + // + // BUILTIN_FUZZ: The built-in fuzz type. + // + // BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android + // app, interacting with it and capturing screenshots at the same time. + // + // APPIUM_JAVA_JUNIT: The Appium Java JUnit type. + // + // APPIUM_JAVA_TESTNG: The Appium Java TestNG type. + // + // APPIUM_PYTHON: The Appium Python type. + // + // CALABASH: The Calabash type. + // + // INSTRUMENTATION: The Instrumentation type. + // + // UIAUTOMATION: The uiautomation type. + // + // UIAUTOMATOR: The uiautomator type. + // + // XCTEST: The XCode test type. + // + // APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps. + // + // APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps. + // + // APPIUM_WEB_PYTHON: The Appium Python type for Web apps. + Type *string `locationName:"type" type:"string" required:"true" enum:"TestType"` +} + +// String returns the string representation +func (s ScheduleRunTest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduleRunTest) GoString() string { + return s.String() +} + +// Represents a collection of one or more tests. +type Suite struct { + _ struct{} `type:"structure"` + + // The suite's ARN. + Arn *string `locationName:"arn" min:"32" type:"string"` + + // The suite's result counters. + Counters *Counters `locationName:"counters" type:"structure"` + + // When the suite was created. + Created *time.Time `locationName:"created" type:"timestamp" timestampFormat:"unix"` + + // Represents the total (metered or unmetered) minutes used by the test suite. + DeviceMinutes *DeviceMinutes `locationName:"deviceMinutes" type:"structure"` + + // A message about the suite's result. + Message *string `locationName:"message" type:"string"` + + // The suite's name. + Name *string `locationName:"name" type:"string"` + + // The suite's result. + // + // Allowed values include: + // + // ERRORED: An error condition. + // + // FAILED: A failed condition. + // + // SKIPPED: A skipped condition. + // + // STOPPED: A stopped condition. + // + // PASSED: A passing condition. + // + // PENDING: A pending condition. + // + // WARNED: A warning condition. + Result *string `locationName:"result" type:"string" enum:"ExecutionResult"` + + // The suite's start time. + Started *time.Time `locationName:"started" type:"timestamp" timestampFormat:"unix"` + + // The suite's status. + // + // Allowed values include: + // + // COMPLETED: A completed status. + // + // PENDING: A pending status. + // + // PROCESSING: A processing status. + // + // RUNNING: A running status. + // + // SCHEDULING: A scheduling status. + Status *string `locationName:"status" type:"string" enum:"ExecutionStatus"` + + // The suite's stop time. + Stopped *time.Time `locationName:"stopped" type:"timestamp" timestampFormat:"unix"` + + // The suite's type. + // + // Must be one of the following values: + // + // BUILTIN_FUZZ: The built-in fuzz type. + // + // BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android + // app, interacting with it and capturing screenshots at the same time. + // + // APPIUM_JAVA_JUNIT: The Appium Java JUnit type. + // + // APPIUM_JAVA_TESTNG: The Appium Java TestNG type. + // + // APPIUM_PYTHON: The Appium Python type. + // + // CALABASH: The Calabash type. + // + // INSTRUMENTATION: The Instrumentation type. + // + // UIAUTOMATION: The uiautomation type. + // + // UIAUTOMATOR: The uiautomator type. + // + // XCTEST: The XCode test type. + // + // APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps. + // + // APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps. + // + // APPIUM_WEB_PYTHON: The Appium Python type for Web apps. + Type *string `locationName:"type" type:"string" enum:"TestType"` +} + +// String returns the string representation +func (s Suite) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Suite) GoString() string { + return s.String() +} + +// Represents a condition that is evaluated. +type Test struct { + _ struct{} `type:"structure"` + + // The test's ARN. + Arn *string `locationName:"arn" min:"32" type:"string"` + + // The test's result counters. + Counters *Counters `locationName:"counters" type:"structure"` + + // When the test was created. + Created *time.Time `locationName:"created" type:"timestamp" timestampFormat:"unix"` + + // Represents the total (metered or unmetered) minutes used by the test. + DeviceMinutes *DeviceMinutes `locationName:"deviceMinutes" type:"structure"` + + // A message about the test's result. + Message *string `locationName:"message" type:"string"` + + // The test's name. + Name *string `locationName:"name" type:"string"` + + // The test's result. + // + // Allowed values include: + // + // ERRORED: An error condition. + // + // FAILED: A failed condition. + // + // SKIPPED: A skipped condition. + // + // STOPPED: A stopped condition. + // + // PASSED: A passing condition. + // + // PENDING: A pending condition. + // + // WARNED: A warning condition. + Result *string `locationName:"result" type:"string" enum:"ExecutionResult"` + + // The test's start time. + Started *time.Time `locationName:"started" type:"timestamp" timestampFormat:"unix"` + + // The test's status. + // + // Allowed values include: + // + // COMPLETED: A completed status. + // + // PENDING: A pending status. + // + // PROCESSING: A processing status. + // + // RUNNING: A running status. + // + // SCHEDULING: A scheduling status. + Status *string `locationName:"status" type:"string" enum:"ExecutionStatus"` + + // The test's stop time. + Stopped *time.Time `locationName:"stopped" type:"timestamp" timestampFormat:"unix"` + + // The test's type. + // + // Must be one of the following values: + // + // BUILTIN_FUZZ: The built-in fuzz type. + // + // BUILTIN_EXPLORER: For Android, an app explorer that will traverse an Android + // app, interacting with it and capturing screenshots at the same time. + // + // APPIUM_JAVA_JUNIT: The Appium Java JUnit type. + // + // APPIUM_JAVA_TESTNG: The Appium Java TestNG type. + // + // APPIUM_PYTHON: The Appium Python type. + // + // CALABASH: The Calabash type. + // + // INSTRUMENTATION: The Instrumentation type. + // + // UIAUTOMATION: The uiautomation type. + // + // UIAUTOMATOR: The uiautomator type. + // + // XCTEST: The XCode test type. + // + // APPIUM_WEB_JAVA_JUNIT: The Appium Java JUnit type for Web apps. + // + // APPIUM_WEB_JAVA_TESTNG: The Appium Java TestNG type for Web apps. + // + // APPIUM_WEB_PYTHON: The Appium Python type for Web apps. + Type *string `locationName:"type" type:"string" enum:"TestType"` +} + +// String returns the string representation +func (s Test) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Test) GoString() string { + return s.String() +} + +// A collection of one or more problems, grouped by their result. +type UniqueProblem struct { + _ struct{} `type:"structure"` + + // A message about the unique problems' result. + Message *string `locationName:"message" type:"string"` + + // Information about the problems. + Problems []*Problem `locationName:"problems" type:"list"` +} + +// String returns the string representation +func (s UniqueProblem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UniqueProblem) GoString() string { + return s.String() +} + +// Represents a request to the update device pool operation. +type UpdateDevicePoolInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resourc Name (ARN) of the Device Farm device pool you wish to + // update. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` + + // A description of the device pool you wish to update. + Description *string `locationName:"description" type:"string"` + + // A string representing the name of the device pool you wish to update. + Name *string `locationName:"name" type:"string"` + + // Represents the rules you wish to modify for the device pool. Updating rules + // is optional; however, if you choose to update rules for your request, the + // update will replace the existing rules. + Rules []*Rule `locationName:"rules" type:"list"` +} + +// String returns the string representation +func (s UpdateDevicePoolInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDevicePoolInput) GoString() string { + return s.String() +} + +// Represents the result of an update device pool request. +type UpdateDevicePoolOutput struct { + _ struct{} `type:"structure"` + + // Represents a collection of device types. + DevicePool *DevicePool `locationName:"devicePool" type:"structure"` +} + +// String returns the string representation +func (s UpdateDevicePoolOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDevicePoolOutput) GoString() string { + return s.String() +} + +// Represents a request to the update project operation. +type UpdateProjectInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the project whose name you wish to update. + Arn *string `locationName:"arn" min:"32" type:"string" required:"true"` + + // A string representing the new name of the project that you are updating. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s UpdateProjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateProjectInput) GoString() string { + return s.String() +} + +// Represents the result of an update project request. +type UpdateProjectOutput struct { + _ struct{} `type:"structure"` + + // Represents an operating-system neutral workspace for running and managing + // tests. + Project *Project `locationName:"project" type:"structure"` +} + +// String returns the string representation +func (s UpdateProjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateProjectOutput) GoString() string { + return s.String() +} + +// An app or a set of one or more tests to upload or that have been uploaded. +type Upload struct { + _ struct{} `type:"structure"` + + // The upload's ARN. + Arn *string `locationName:"arn" min:"32" type:"string"` + + // The upload's content type (for example, "application/octet-stream"). + ContentType *string `locationName:"contentType" type:"string"` + + // When the upload was created. + Created *time.Time `locationName:"created" type:"timestamp" timestampFormat:"unix"` + + // A message about the upload's result. + Message *string `locationName:"message" type:"string"` + + // The upload's metadata. For example, for Android, this contains information + // that is parsed from the manifest and is displayed in the AWS Device Farm + // console after the associated app is uploaded. + Metadata *string `locationName:"metadata" type:"string"` + + // The upload's file name. + Name *string `locationName:"name" type:"string"` + + // The upload's status. + // + // Must be one of the following values: + // + // FAILED: A failed status. + // + // INITIALIZED: An initialized status. + // + // PROCESSING: A processing status. + // + // SUCCEEDED: A succeeded status. + Status *string `locationName:"status" type:"string" enum:"UploadStatus"` + + // The upload's type. + // + // Must be one of the following values: + // + // ANDROID_APP: An Android upload. + // + // IOS_APP: An iOS upload. + // + // EXTERNAL_DATA: An external data upload. + // + // APPIUM_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload. + // + // APPIUM_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload. + // + // APPIUM_PYTHON_TEST_PACKAGE: An Appium Python test package upload. + // + // CALABASH_TEST_PACKAGE: A Calabash test package upload. + // + // INSTRUMENTATION_TEST_PACKAGE: An instrumentation upload. + // + // UIAUTOMATOR_TEST_PACKAGE: A uiautomator test package upload. + // + // XCTEST_TEST_PACKAGE: An XCode test package upload. + // + // APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload. + // + // APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package + // upload. + // + // APPIUM_WEB_PYTHON_TEST_PACKAGE: An Appium Python test package upload. + Type *string `locationName:"type" type:"string" enum:"UploadType"` + + // The pre-signed Amazon S3 URL that was used to store a file through a corresponding + // PUT request. + Url *string `locationName:"url" type:"string"` +} + +// String returns the string representation +func (s Upload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Upload) GoString() string { + return s.String() +} + +const ( + // @enum ArtifactCategory + ArtifactCategoryScreenshot = "SCREENSHOT" + // @enum ArtifactCategory + ArtifactCategoryFile = "FILE" + // @enum ArtifactCategory + ArtifactCategoryLog = "LOG" +) + +const ( + // @enum ArtifactType + ArtifactTypeUnknown = "UNKNOWN" + // @enum ArtifactType + ArtifactTypeScreenshot = "SCREENSHOT" + // @enum ArtifactType + ArtifactTypeDeviceLog = "DEVICE_LOG" + // @enum ArtifactType + ArtifactTypeMessageLog = "MESSAGE_LOG" + // @enum ArtifactType + ArtifactTypeResultLog = "RESULT_LOG" + // @enum ArtifactType + ArtifactTypeServiceLog = "SERVICE_LOG" + // @enum ArtifactType + ArtifactTypeWebkitLog = "WEBKIT_LOG" + // @enum ArtifactType + ArtifactTypeInstrumentationOutput = "INSTRUMENTATION_OUTPUT" + // @enum ArtifactType + ArtifactTypeExerciserMonkeyOutput = "EXERCISER_MONKEY_OUTPUT" + // @enum ArtifactType + ArtifactTypeCalabashJsonOutput = "CALABASH_JSON_OUTPUT" + // @enum ArtifactType + ArtifactTypeCalabashPrettyOutput = "CALABASH_PRETTY_OUTPUT" + // @enum ArtifactType + ArtifactTypeCalabashStandardOutput = "CALABASH_STANDARD_OUTPUT" + // @enum ArtifactType + ArtifactTypeCalabashJavaXmlOutput = "CALABASH_JAVA_XML_OUTPUT" + // @enum ArtifactType + ArtifactTypeAutomationOutput = "AUTOMATION_OUTPUT" + // @enum ArtifactType + ArtifactTypeAppiumServerOutput = "APPIUM_SERVER_OUTPUT" + // @enum ArtifactType + ArtifactTypeAppiumJavaOutput = "APPIUM_JAVA_OUTPUT" + // @enum ArtifactType + ArtifactTypeAppiumJavaXmlOutput = "APPIUM_JAVA_XML_OUTPUT" + // @enum ArtifactType + ArtifactTypeAppiumPythonOutput = "APPIUM_PYTHON_OUTPUT" + // @enum ArtifactType + ArtifactTypeAppiumPythonXmlOutput = "APPIUM_PYTHON_XML_OUTPUT" + // @enum ArtifactType + ArtifactTypeExplorerEventLog = "EXPLORER_EVENT_LOG" + // @enum ArtifactType + ArtifactTypeExplorerSummaryLog = "EXPLORER_SUMMARY_LOG" + // @enum ArtifactType + ArtifactTypeApplicationCrashReport = "APPLICATION_CRASH_REPORT" +) + +const ( + // @enum BillingMethod + BillingMethodMetered = "METERED" + // @enum BillingMethod + BillingMethodUnmetered = "UNMETERED" +) + +const ( + // @enum DeviceAttribute + DeviceAttributeArn = "ARN" + // @enum DeviceAttribute + DeviceAttributePlatform = "PLATFORM" + // @enum DeviceAttribute + DeviceAttributeFormFactor = "FORM_FACTOR" + // @enum DeviceAttribute + DeviceAttributeManufacturer = "MANUFACTURER" +) + +const ( + // @enum DeviceFormFactor + DeviceFormFactorPhone = "PHONE" + // @enum DeviceFormFactor + DeviceFormFactorTablet = "TABLET" +) + +const ( + // @enum DevicePlatform + DevicePlatformAndroid = "ANDROID" + // @enum DevicePlatform + DevicePlatformIos = "IOS" +) + +const ( + // @enum DevicePoolType + DevicePoolTypeCurated = "CURATED" + // @enum DevicePoolType + DevicePoolTypePrivate = "PRIVATE" +) + +const ( + // @enum ExecutionResult + ExecutionResultPending = "PENDING" + // @enum ExecutionResult + ExecutionResultPassed = "PASSED" + // @enum ExecutionResult + ExecutionResultWarned = "WARNED" + // @enum ExecutionResult + ExecutionResultFailed = "FAILED" + // @enum ExecutionResult + ExecutionResultSkipped = "SKIPPED" + // @enum ExecutionResult + ExecutionResultErrored = "ERRORED" + // @enum ExecutionResult + ExecutionResultStopped = "STOPPED" +) + +const ( + // @enum ExecutionStatus + ExecutionStatusPending = "PENDING" + // @enum ExecutionStatus + ExecutionStatusProcessing = "PROCESSING" + // @enum ExecutionStatus + ExecutionStatusScheduling = "SCHEDULING" + // @enum ExecutionStatus + ExecutionStatusRunning = "RUNNING" + // @enum ExecutionStatus + ExecutionStatusCompleted = "COMPLETED" +) + +const ( + // @enum RuleOperator + RuleOperatorEquals = "EQUALS" + // @enum RuleOperator + RuleOperatorLessThan = "LESS_THAN" + // @enum RuleOperator + RuleOperatorGreaterThan = "GREATER_THAN" + // @enum RuleOperator + RuleOperatorIn = "IN" + // @enum RuleOperator + RuleOperatorNotIn = "NOT_IN" +) + +const ( + // @enum SampleType + SampleTypeCpu = "CPU" + // @enum SampleType + SampleTypeMemory = "MEMORY" + // @enum SampleType + SampleTypeThreads = "THREADS" + // @enum SampleType + SampleTypeRxRate = "RX_RATE" + // @enum SampleType + SampleTypeTxRate = "TX_RATE" + // @enum SampleType + SampleTypeRx = "RX" + // @enum SampleType + SampleTypeTx = "TX" + // @enum SampleType + SampleTypeNativeFrames = "NATIVE_FRAMES" + // @enum SampleType + SampleTypeNativeFps = "NATIVE_FPS" + // @enum SampleType + SampleTypeNativeMinDrawtime = "NATIVE_MIN_DRAWTIME" + // @enum SampleType + SampleTypeNativeAvgDrawtime = "NATIVE_AVG_DRAWTIME" + // @enum SampleType + SampleTypeNativeMaxDrawtime = "NATIVE_MAX_DRAWTIME" + // @enum SampleType + SampleTypeOpenglFrames = "OPENGL_FRAMES" + // @enum SampleType + SampleTypeOpenglFps = "OPENGL_FPS" + // @enum SampleType + SampleTypeOpenglMinDrawtime = "OPENGL_MIN_DRAWTIME" + // @enum SampleType + SampleTypeOpenglAvgDrawtime = "OPENGL_AVG_DRAWTIME" + // @enum SampleType + SampleTypeOpenglMaxDrawtime = "OPENGL_MAX_DRAWTIME" +) + +const ( + // @enum TestType + TestTypeBuiltinFuzz = "BUILTIN_FUZZ" + // @enum TestType + TestTypeBuiltinExplorer = "BUILTIN_EXPLORER" + // @enum TestType + TestTypeAppiumJavaJunit = "APPIUM_JAVA_JUNIT" + // @enum TestType + TestTypeAppiumJavaTestng = "APPIUM_JAVA_TESTNG" + // @enum TestType + TestTypeAppiumPython = "APPIUM_PYTHON" + // @enum TestType + TestTypeAppiumWebJavaJunit = "APPIUM_WEB_JAVA_JUNIT" + // @enum TestType + TestTypeAppiumWebJavaTestng = "APPIUM_WEB_JAVA_TESTNG" + // @enum TestType + TestTypeAppiumWebPython = "APPIUM_WEB_PYTHON" + // @enum TestType + TestTypeCalabash = "CALABASH" + // @enum TestType + TestTypeInstrumentation = "INSTRUMENTATION" + // @enum TestType + TestTypeUiautomation = "UIAUTOMATION" + // @enum TestType + TestTypeUiautomator = "UIAUTOMATOR" + // @enum TestType + TestTypeXctest = "XCTEST" +) + +const ( + // @enum UploadStatus + UploadStatusInitialized = "INITIALIZED" + // @enum UploadStatus + UploadStatusProcessing = "PROCESSING" + // @enum UploadStatus + UploadStatusSucceeded = "SUCCEEDED" + // @enum UploadStatus + UploadStatusFailed = "FAILED" +) + +const ( + // @enum UploadType + UploadTypeAndroidApp = "ANDROID_APP" + // @enum UploadType + UploadTypeIosApp = "IOS_APP" + // @enum UploadType + UploadTypeWebApp = "WEB_APP" + // @enum UploadType + UploadTypeExternalData = "EXTERNAL_DATA" + // @enum UploadType + UploadTypeAppiumJavaJunitTestPackage = "APPIUM_JAVA_JUNIT_TEST_PACKAGE" + // @enum UploadType + UploadTypeAppiumJavaTestngTestPackage = "APPIUM_JAVA_TESTNG_TEST_PACKAGE" + // @enum UploadType + UploadTypeAppiumPythonTestPackage = "APPIUM_PYTHON_TEST_PACKAGE" + // @enum UploadType + UploadTypeAppiumWebJavaJunitTestPackage = "APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE" + // @enum UploadType + UploadTypeAppiumWebJavaTestngTestPackage = "APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE" + // @enum UploadType + UploadTypeAppiumWebPythonTestPackage = "APPIUM_WEB_PYTHON_TEST_PACKAGE" + // @enum UploadType + UploadTypeCalabashTestPackage = "CALABASH_TEST_PACKAGE" + // @enum UploadType + UploadTypeInstrumentationTestPackage = "INSTRUMENTATION_TEST_PACKAGE" + // @enum UploadType + UploadTypeUiautomationTestPackage = "UIAUTOMATION_TEST_PACKAGE" + // @enum UploadType + UploadTypeUiautomatorTestPackage = "UIAUTOMATOR_TEST_PACKAGE" + // @enum UploadType + UploadTypeXctestTestPackage = "XCTEST_TEST_PACKAGE" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/devicefarm/devicefarmiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/devicefarm/devicefarmiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/devicefarm/devicefarmiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/devicefarm/devicefarmiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,160 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package devicefarmiface provides an interface for the AWS Device Farm. +package devicefarmiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/devicefarm" +) + +// DeviceFarmAPI is the interface type for devicefarm.DeviceFarm. +type DeviceFarmAPI interface { + CreateDevicePoolRequest(*devicefarm.CreateDevicePoolInput) (*request.Request, *devicefarm.CreateDevicePoolOutput) + + CreateDevicePool(*devicefarm.CreateDevicePoolInput) (*devicefarm.CreateDevicePoolOutput, error) + + CreateProjectRequest(*devicefarm.CreateProjectInput) (*request.Request, *devicefarm.CreateProjectOutput) + + CreateProject(*devicefarm.CreateProjectInput) (*devicefarm.CreateProjectOutput, error) + + CreateUploadRequest(*devicefarm.CreateUploadInput) (*request.Request, *devicefarm.CreateUploadOutput) + + CreateUpload(*devicefarm.CreateUploadInput) (*devicefarm.CreateUploadOutput, error) + + DeleteDevicePoolRequest(*devicefarm.DeleteDevicePoolInput) (*request.Request, *devicefarm.DeleteDevicePoolOutput) + + DeleteDevicePool(*devicefarm.DeleteDevicePoolInput) (*devicefarm.DeleteDevicePoolOutput, error) + + DeleteProjectRequest(*devicefarm.DeleteProjectInput) (*request.Request, *devicefarm.DeleteProjectOutput) + + DeleteProject(*devicefarm.DeleteProjectInput) (*devicefarm.DeleteProjectOutput, error) + + DeleteRunRequest(*devicefarm.DeleteRunInput) (*request.Request, *devicefarm.DeleteRunOutput) + + DeleteRun(*devicefarm.DeleteRunInput) (*devicefarm.DeleteRunOutput, error) + + DeleteUploadRequest(*devicefarm.DeleteUploadInput) (*request.Request, *devicefarm.DeleteUploadOutput) + + DeleteUpload(*devicefarm.DeleteUploadInput) (*devicefarm.DeleteUploadOutput, error) + + GetAccountSettingsRequest(*devicefarm.GetAccountSettingsInput) (*request.Request, *devicefarm.GetAccountSettingsOutput) + + GetAccountSettings(*devicefarm.GetAccountSettingsInput) (*devicefarm.GetAccountSettingsOutput, error) + + GetDeviceRequest(*devicefarm.GetDeviceInput) (*request.Request, *devicefarm.GetDeviceOutput) + + GetDevice(*devicefarm.GetDeviceInput) (*devicefarm.GetDeviceOutput, error) + + GetDevicePoolRequest(*devicefarm.GetDevicePoolInput) (*request.Request, *devicefarm.GetDevicePoolOutput) + + GetDevicePool(*devicefarm.GetDevicePoolInput) (*devicefarm.GetDevicePoolOutput, error) + + GetDevicePoolCompatibilityRequest(*devicefarm.GetDevicePoolCompatibilityInput) (*request.Request, *devicefarm.GetDevicePoolCompatibilityOutput) + + GetDevicePoolCompatibility(*devicefarm.GetDevicePoolCompatibilityInput) (*devicefarm.GetDevicePoolCompatibilityOutput, error) + + GetJobRequest(*devicefarm.GetJobInput) (*request.Request, *devicefarm.GetJobOutput) + + GetJob(*devicefarm.GetJobInput) (*devicefarm.GetJobOutput, error) + + GetProjectRequest(*devicefarm.GetProjectInput) (*request.Request, *devicefarm.GetProjectOutput) + + GetProject(*devicefarm.GetProjectInput) (*devicefarm.GetProjectOutput, error) + + GetRunRequest(*devicefarm.GetRunInput) (*request.Request, *devicefarm.GetRunOutput) + + GetRun(*devicefarm.GetRunInput) (*devicefarm.GetRunOutput, error) + + GetSuiteRequest(*devicefarm.GetSuiteInput) (*request.Request, *devicefarm.GetSuiteOutput) + + GetSuite(*devicefarm.GetSuiteInput) (*devicefarm.GetSuiteOutput, error) + + GetTestRequest(*devicefarm.GetTestInput) (*request.Request, *devicefarm.GetTestOutput) + + GetTest(*devicefarm.GetTestInput) (*devicefarm.GetTestOutput, error) + + GetUploadRequest(*devicefarm.GetUploadInput) (*request.Request, *devicefarm.GetUploadOutput) + + GetUpload(*devicefarm.GetUploadInput) (*devicefarm.GetUploadOutput, error) + + ListArtifactsRequest(*devicefarm.ListArtifactsInput) (*request.Request, *devicefarm.ListArtifactsOutput) + + ListArtifacts(*devicefarm.ListArtifactsInput) (*devicefarm.ListArtifactsOutput, error) + + ListArtifactsPages(*devicefarm.ListArtifactsInput, func(*devicefarm.ListArtifactsOutput, bool) bool) error + + ListDevicePoolsRequest(*devicefarm.ListDevicePoolsInput) (*request.Request, *devicefarm.ListDevicePoolsOutput) + + ListDevicePools(*devicefarm.ListDevicePoolsInput) (*devicefarm.ListDevicePoolsOutput, error) + + ListDevicePoolsPages(*devicefarm.ListDevicePoolsInput, func(*devicefarm.ListDevicePoolsOutput, bool) bool) error + + ListDevicesRequest(*devicefarm.ListDevicesInput) (*request.Request, *devicefarm.ListDevicesOutput) + + ListDevices(*devicefarm.ListDevicesInput) (*devicefarm.ListDevicesOutput, error) + + ListDevicesPages(*devicefarm.ListDevicesInput, func(*devicefarm.ListDevicesOutput, bool) bool) error + + ListJobsRequest(*devicefarm.ListJobsInput) (*request.Request, *devicefarm.ListJobsOutput) + + ListJobs(*devicefarm.ListJobsInput) (*devicefarm.ListJobsOutput, error) + + ListJobsPages(*devicefarm.ListJobsInput, func(*devicefarm.ListJobsOutput, bool) bool) error + + ListProjectsRequest(*devicefarm.ListProjectsInput) (*request.Request, *devicefarm.ListProjectsOutput) + + ListProjects(*devicefarm.ListProjectsInput) (*devicefarm.ListProjectsOutput, error) + + ListProjectsPages(*devicefarm.ListProjectsInput, func(*devicefarm.ListProjectsOutput, bool) bool) error + + ListRunsRequest(*devicefarm.ListRunsInput) (*request.Request, *devicefarm.ListRunsOutput) + + ListRuns(*devicefarm.ListRunsInput) (*devicefarm.ListRunsOutput, error) + + ListRunsPages(*devicefarm.ListRunsInput, func(*devicefarm.ListRunsOutput, bool) bool) error + + ListSamplesRequest(*devicefarm.ListSamplesInput) (*request.Request, *devicefarm.ListSamplesOutput) + + ListSamples(*devicefarm.ListSamplesInput) (*devicefarm.ListSamplesOutput, error) + + ListSamplesPages(*devicefarm.ListSamplesInput, func(*devicefarm.ListSamplesOutput, bool) bool) error + + ListSuitesRequest(*devicefarm.ListSuitesInput) (*request.Request, *devicefarm.ListSuitesOutput) + + ListSuites(*devicefarm.ListSuitesInput) (*devicefarm.ListSuitesOutput, error) + + ListSuitesPages(*devicefarm.ListSuitesInput, func(*devicefarm.ListSuitesOutput, bool) bool) error + + ListTestsRequest(*devicefarm.ListTestsInput) (*request.Request, *devicefarm.ListTestsOutput) + + ListTests(*devicefarm.ListTestsInput) (*devicefarm.ListTestsOutput, error) + + ListTestsPages(*devicefarm.ListTestsInput, func(*devicefarm.ListTestsOutput, bool) bool) error + + ListUniqueProblemsRequest(*devicefarm.ListUniqueProblemsInput) (*request.Request, *devicefarm.ListUniqueProblemsOutput) + + ListUniqueProblems(*devicefarm.ListUniqueProblemsInput) (*devicefarm.ListUniqueProblemsOutput, error) + + ListUniqueProblemsPages(*devicefarm.ListUniqueProblemsInput, func(*devicefarm.ListUniqueProblemsOutput, bool) bool) error + + ListUploadsRequest(*devicefarm.ListUploadsInput) (*request.Request, *devicefarm.ListUploadsOutput) + + ListUploads(*devicefarm.ListUploadsInput) (*devicefarm.ListUploadsOutput, error) + + ListUploadsPages(*devicefarm.ListUploadsInput, func(*devicefarm.ListUploadsOutput, bool) bool) error + + ScheduleRunRequest(*devicefarm.ScheduleRunInput) (*request.Request, *devicefarm.ScheduleRunOutput) + + ScheduleRun(*devicefarm.ScheduleRunInput) (*devicefarm.ScheduleRunOutput, error) + + UpdateDevicePoolRequest(*devicefarm.UpdateDevicePoolInput) (*request.Request, *devicefarm.UpdateDevicePoolOutput) + + UpdateDevicePool(*devicefarm.UpdateDevicePoolInput) (*devicefarm.UpdateDevicePoolOutput, error) + + UpdateProjectRequest(*devicefarm.UpdateProjectInput) (*request.Request, *devicefarm.UpdateProjectOutput) + + UpdateProject(*devicefarm.UpdateProjectInput) (*devicefarm.UpdateProjectOutput, error) +} + +var _ DeviceFarmAPI = (*devicefarm.DeviceFarm)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/devicefarm/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/devicefarm/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/devicefarm/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/devicefarm/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,674 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package devicefarm_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/devicefarm" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleDeviceFarm_CreateDevicePool() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.CreateDevicePoolInput{ + Name: aws.String("Name"), // Required + ProjectArn: aws.String("AmazonResourceName"), // Required + Rules: []*devicefarm.Rule{ // Required + { // Required + Attribute: aws.String("DeviceAttribute"), + Operator: aws.String("RuleOperator"), + Value: aws.String("String"), + }, + // More values... + }, + Description: aws.String("Message"), + } + resp, err := svc.CreateDevicePool(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_CreateProject() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.CreateProjectInput{ + Name: aws.String("Name"), // Required + } + resp, err := svc.CreateProject(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_CreateUpload() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.CreateUploadInput{ + Name: aws.String("Name"), // Required + ProjectArn: aws.String("AmazonResourceName"), // Required + Type: aws.String("UploadType"), // Required + ContentType: aws.String("ContentType"), + } + resp, err := svc.CreateUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_DeleteDevicePool() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.DeleteDevicePoolInput{ + Arn: aws.String("AmazonResourceName"), // Required + } + resp, err := svc.DeleteDevicePool(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_DeleteProject() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.DeleteProjectInput{ + Arn: aws.String("AmazonResourceName"), // Required + } + resp, err := svc.DeleteProject(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_DeleteRun() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.DeleteRunInput{ + Arn: aws.String("AmazonResourceName"), // Required + } + resp, err := svc.DeleteRun(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_DeleteUpload() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.DeleteUploadInput{ + Arn: aws.String("AmazonResourceName"), // Required + } + resp, err := svc.DeleteUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_GetAccountSettings() { + svc := devicefarm.New(session.New()) + + var params *devicefarm.GetAccountSettingsInput + resp, err := svc.GetAccountSettings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_GetDevice() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.GetDeviceInput{ + Arn: aws.String("AmazonResourceName"), // Required + } + resp, err := svc.GetDevice(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_GetDevicePool() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.GetDevicePoolInput{ + Arn: aws.String("AmazonResourceName"), // Required + } + resp, err := svc.GetDevicePool(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_GetDevicePoolCompatibility() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.GetDevicePoolCompatibilityInput{ + DevicePoolArn: aws.String("AmazonResourceName"), // Required + AppArn: aws.String("AmazonResourceName"), + TestType: aws.String("TestType"), + } + resp, err := svc.GetDevicePoolCompatibility(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_GetJob() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.GetJobInput{ + Arn: aws.String("AmazonResourceName"), // Required + } + resp, err := svc.GetJob(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_GetProject() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.GetProjectInput{ + Arn: aws.String("AmazonResourceName"), // Required + } + resp, err := svc.GetProject(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_GetRun() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.GetRunInput{ + Arn: aws.String("AmazonResourceName"), // Required + } + resp, err := svc.GetRun(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_GetSuite() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.GetSuiteInput{ + Arn: aws.String("AmazonResourceName"), // Required + } + resp, err := svc.GetSuite(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_GetTest() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.GetTestInput{ + Arn: aws.String("AmazonResourceName"), // Required + } + resp, err := svc.GetTest(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_GetUpload() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.GetUploadInput{ + Arn: aws.String("AmazonResourceName"), // Required + } + resp, err := svc.GetUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_ListArtifacts() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.ListArtifactsInput{ + Arn: aws.String("AmazonResourceName"), // Required + Type: aws.String("ArtifactCategory"), // Required + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListArtifacts(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_ListDevicePools() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.ListDevicePoolsInput{ + Arn: aws.String("AmazonResourceName"), // Required + NextToken: aws.String("PaginationToken"), + Type: aws.String("DevicePoolType"), + } + resp, err := svc.ListDevicePools(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_ListDevices() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.ListDevicesInput{ + Arn: aws.String("AmazonResourceName"), + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListDevices(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_ListJobs() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.ListJobsInput{ + Arn: aws.String("AmazonResourceName"), // Required + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListJobs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_ListProjects() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.ListProjectsInput{ + Arn: aws.String("AmazonResourceName"), + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListProjects(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_ListRuns() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.ListRunsInput{ + Arn: aws.String("AmazonResourceName"), // Required + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListRuns(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_ListSamples() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.ListSamplesInput{ + Arn: aws.String("AmazonResourceName"), // Required + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListSamples(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_ListSuites() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.ListSuitesInput{ + Arn: aws.String("AmazonResourceName"), // Required + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListSuites(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_ListTests() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.ListTestsInput{ + Arn: aws.String("AmazonResourceName"), // Required + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListTests(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_ListUniqueProblems() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.ListUniqueProblemsInput{ + Arn: aws.String("AmazonResourceName"), // Required + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListUniqueProblems(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_ListUploads() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.ListUploadsInput{ + Arn: aws.String("AmazonResourceName"), // Required + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListUploads(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_ScheduleRun() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.ScheduleRunInput{ + DevicePoolArn: aws.String("AmazonResourceName"), // Required + ProjectArn: aws.String("AmazonResourceName"), // Required + Test: &devicefarm.ScheduleRunTest{ // Required + Type: aws.String("TestType"), // Required + Filter: aws.String("Filter"), + Parameters: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + TestPackageArn: aws.String("AmazonResourceName"), + }, + AppArn: aws.String("AmazonResourceName"), + Configuration: &devicefarm.ScheduleRunConfiguration{ + AuxiliaryApps: []*string{ + aws.String("AmazonResourceName"), // Required + // More values... + }, + BillingMethod: aws.String("BillingMethod"), + ExtraDataPackageArn: aws.String("AmazonResourceName"), + Locale: aws.String("String"), + Location: &devicefarm.Location{ + Latitude: aws.Float64(1.0), // Required + Longitude: aws.Float64(1.0), // Required + }, + NetworkProfileArn: aws.String("AmazonResourceName"), + Radios: &devicefarm.Radios{ + Bluetooth: aws.Bool(true), + Gps: aws.Bool(true), + Nfc: aws.Bool(true), + Wifi: aws.Bool(true), + }, + }, + Name: aws.String("Name"), + } + resp, err := svc.ScheduleRun(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_UpdateDevicePool() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.UpdateDevicePoolInput{ + Arn: aws.String("AmazonResourceName"), // Required + Description: aws.String("Message"), + Name: aws.String("Name"), + Rules: []*devicefarm.Rule{ + { // Required + Attribute: aws.String("DeviceAttribute"), + Operator: aws.String("RuleOperator"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateDevicePool(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDeviceFarm_UpdateProject() { + svc := devicefarm.New(session.New()) + + params := &devicefarm.UpdateProjectInput{ + Arn: aws.String("AmazonResourceName"), // Required + Name: aws.String("Name"), + } + resp, err := svc.UpdateProject(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/devicefarm/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/devicefarm/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/devicefarm/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/devicefarm/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,90 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package devicefarm + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// AWS Device Farm is a service that enables mobile app developers to test Android, +// iOS, and Fire OS apps on physical phones, tablets, and other devices in the +// cloud. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type DeviceFarm struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "devicefarm" + +// New creates a new instance of the DeviceFarm client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a DeviceFarm client from just a session. +// svc := devicefarm.New(mySession) +// +// // Create a DeviceFarm client with additional configuration +// svc := devicefarm.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *DeviceFarm { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *DeviceFarm { + svc := &DeviceFarm{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-06-23", + JSONVersion: "1.1", + TargetPrefix: "DeviceFarm_20150623", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a DeviceFarm operation and runs any +// custom request initialization. +func (c *DeviceFarm) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directconnect/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directconnect/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directconnect/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directconnect/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1952 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package directconnect provides a client for AWS Direct Connect. +package directconnect + +import ( + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAllocateConnectionOnInterconnect = "AllocateConnectionOnInterconnect" + +// AllocateConnectionOnInterconnectRequest generates a request for the AllocateConnectionOnInterconnect operation. +func (c *DirectConnect) AllocateConnectionOnInterconnectRequest(input *AllocateConnectionOnInterconnectInput) (req *request.Request, output *Connection) { + op := &request.Operation{ + Name: opAllocateConnectionOnInterconnect, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AllocateConnectionOnInterconnectInput{} + } + + req = c.newRequest(op, input, output) + output = &Connection{} + req.Data = output + return +} + +// Creates a hosted connection on an interconnect. +// +// Allocates a VLAN number and a specified amount of bandwidth for use by a +// hosted connection on the given interconnect. +func (c *DirectConnect) AllocateConnectionOnInterconnect(input *AllocateConnectionOnInterconnectInput) (*Connection, error) { + req, out := c.AllocateConnectionOnInterconnectRequest(input) + err := req.Send() + return out, err +} + +const opAllocatePrivateVirtualInterface = "AllocatePrivateVirtualInterface" + +// AllocatePrivateVirtualInterfaceRequest generates a request for the AllocatePrivateVirtualInterface operation. +func (c *DirectConnect) AllocatePrivateVirtualInterfaceRequest(input *AllocatePrivateVirtualInterfaceInput) (req *request.Request, output *VirtualInterface) { + op := &request.Operation{ + Name: opAllocatePrivateVirtualInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AllocatePrivateVirtualInterfaceInput{} + } + + req = c.newRequest(op, input, output) + output = &VirtualInterface{} + req.Data = output + return +} + +// Provisions a private virtual interface to be owned by a different customer. +// +// The owner of a connection calls this function to provision a private virtual +// interface which will be owned by another AWS customer. +// +// Virtual interfaces created using this function must be confirmed by the +// virtual interface owner by calling ConfirmPrivateVirtualInterface. Until +// this step has been completed, the virtual interface will be in 'Confirming' +// state, and will not be available for handling traffic. +func (c *DirectConnect) AllocatePrivateVirtualInterface(input *AllocatePrivateVirtualInterfaceInput) (*VirtualInterface, error) { + req, out := c.AllocatePrivateVirtualInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opAllocatePublicVirtualInterface = "AllocatePublicVirtualInterface" + +// AllocatePublicVirtualInterfaceRequest generates a request for the AllocatePublicVirtualInterface operation. +func (c *DirectConnect) AllocatePublicVirtualInterfaceRequest(input *AllocatePublicVirtualInterfaceInput) (req *request.Request, output *VirtualInterface) { + op := &request.Operation{ + Name: opAllocatePublicVirtualInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AllocatePublicVirtualInterfaceInput{} + } + + req = c.newRequest(op, input, output) + output = &VirtualInterface{} + req.Data = output + return +} + +// Provisions a public virtual interface to be owned by a different customer. +// +// The owner of a connection calls this function to provision a public virtual +// interface which will be owned by another AWS customer. +// +// Virtual interfaces created using this function must be confirmed by the +// virtual interface owner by calling ConfirmPublicVirtualInterface. Until this +// step has been completed, the virtual interface will be in 'Confirming' state, +// and will not be available for handling traffic. +func (c *DirectConnect) AllocatePublicVirtualInterface(input *AllocatePublicVirtualInterfaceInput) (*VirtualInterface, error) { + req, out := c.AllocatePublicVirtualInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opConfirmConnection = "ConfirmConnection" + +// ConfirmConnectionRequest generates a request for the ConfirmConnection operation. +func (c *DirectConnect) ConfirmConnectionRequest(input *ConfirmConnectionInput) (req *request.Request, output *ConfirmConnectionOutput) { + op := &request.Operation{ + Name: opConfirmConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ConfirmConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &ConfirmConnectionOutput{} + req.Data = output + return +} + +// Confirm the creation of a hosted connection on an interconnect. +// +// Upon creation, the hosted connection is initially in the 'Ordering' state, +// and will remain in this state until the owner calls ConfirmConnection to +// confirm creation of the hosted connection. +func (c *DirectConnect) ConfirmConnection(input *ConfirmConnectionInput) (*ConfirmConnectionOutput, error) { + req, out := c.ConfirmConnectionRequest(input) + err := req.Send() + return out, err +} + +const opConfirmPrivateVirtualInterface = "ConfirmPrivateVirtualInterface" + +// ConfirmPrivateVirtualInterfaceRequest generates a request for the ConfirmPrivateVirtualInterface operation. +func (c *DirectConnect) ConfirmPrivateVirtualInterfaceRequest(input *ConfirmPrivateVirtualInterfaceInput) (req *request.Request, output *ConfirmPrivateVirtualInterfaceOutput) { + op := &request.Operation{ + Name: opConfirmPrivateVirtualInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ConfirmPrivateVirtualInterfaceInput{} + } + + req = c.newRequest(op, input, output) + output = &ConfirmPrivateVirtualInterfaceOutput{} + req.Data = output + return +} + +// Accept ownership of a private virtual interface created by another customer. +// +// After the virtual interface owner calls this function, the virtual interface +// will be created and attached to the given virtual private gateway, and will +// be available for handling traffic. +func (c *DirectConnect) ConfirmPrivateVirtualInterface(input *ConfirmPrivateVirtualInterfaceInput) (*ConfirmPrivateVirtualInterfaceOutput, error) { + req, out := c.ConfirmPrivateVirtualInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opConfirmPublicVirtualInterface = "ConfirmPublicVirtualInterface" + +// ConfirmPublicVirtualInterfaceRequest generates a request for the ConfirmPublicVirtualInterface operation. +func (c *DirectConnect) ConfirmPublicVirtualInterfaceRequest(input *ConfirmPublicVirtualInterfaceInput) (req *request.Request, output *ConfirmPublicVirtualInterfaceOutput) { + op := &request.Operation{ + Name: opConfirmPublicVirtualInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ConfirmPublicVirtualInterfaceInput{} + } + + req = c.newRequest(op, input, output) + output = &ConfirmPublicVirtualInterfaceOutput{} + req.Data = output + return +} + +// Accept ownership of a public virtual interface created by another customer. +// +// After the virtual interface owner calls this function, the specified virtual +// interface will be created and made available for handling traffic. +func (c *DirectConnect) ConfirmPublicVirtualInterface(input *ConfirmPublicVirtualInterfaceInput) (*ConfirmPublicVirtualInterfaceOutput, error) { + req, out := c.ConfirmPublicVirtualInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opCreateConnection = "CreateConnection" + +// CreateConnectionRequest generates a request for the CreateConnection operation. +func (c *DirectConnect) CreateConnectionRequest(input *CreateConnectionInput) (req *request.Request, output *Connection) { + op := &request.Operation{ + Name: opCreateConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &Connection{} + req.Data = output + return +} + +// Creates a new connection between the customer network and a specific AWS +// Direct Connect location. +// +// A connection links your internal network to an AWS Direct Connect location +// over a standard 1 gigabit or 10 gigabit Ethernet fiber-optic cable. One end +// of the cable is connected to your router, the other to an AWS Direct Connect +// router. An AWS Direct Connect location provides access to Amazon Web Services +// in the region it is associated with. You can establish connections with AWS +// Direct Connect locations in multiple regions, but a connection in one region +// does not provide connectivity to other regions. +func (c *DirectConnect) CreateConnection(input *CreateConnectionInput) (*Connection, error) { + req, out := c.CreateConnectionRequest(input) + err := req.Send() + return out, err +} + +const opCreateInterconnect = "CreateInterconnect" + +// CreateInterconnectRequest generates a request for the CreateInterconnect operation. +func (c *DirectConnect) CreateInterconnectRequest(input *CreateInterconnectInput) (req *request.Request, output *Interconnect) { + op := &request.Operation{ + Name: opCreateInterconnect, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateInterconnectInput{} + } + + req = c.newRequest(op, input, output) + output = &Interconnect{} + req.Data = output + return +} + +// Creates a new interconnect between a AWS Direct Connect partner's network +// and a specific AWS Direct Connect location. +// +// An interconnect is a connection which is capable of hosting other connections. +// The AWS Direct Connect partner can use an interconnect to provide sub-1Gbps +// AWS Direct Connect service to tier 2 customers who do not have their own +// connections. Like a standard connection, an interconnect links the AWS Direct +// Connect partner's network to an AWS Direct Connect location over a standard +// 1 Gbps or 10 Gbps Ethernet fiber-optic cable. One end is connected to the +// partner's router, the other to an AWS Direct Connect router. +// +// For each end customer, the AWS Direct Connect partner provisions a connection +// on their interconnect by calling AllocateConnectionOnInterconnect. The end +// customer can then connect to AWS resources by creating a virtual interface +// on their connection, using the VLAN assigned to them by the AWS Direct Connect +// partner. +func (c *DirectConnect) CreateInterconnect(input *CreateInterconnectInput) (*Interconnect, error) { + req, out := c.CreateInterconnectRequest(input) + err := req.Send() + return out, err +} + +const opCreatePrivateVirtualInterface = "CreatePrivateVirtualInterface" + +// CreatePrivateVirtualInterfaceRequest generates a request for the CreatePrivateVirtualInterface operation. +func (c *DirectConnect) CreatePrivateVirtualInterfaceRequest(input *CreatePrivateVirtualInterfaceInput) (req *request.Request, output *VirtualInterface) { + op := &request.Operation{ + Name: opCreatePrivateVirtualInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePrivateVirtualInterfaceInput{} + } + + req = c.newRequest(op, input, output) + output = &VirtualInterface{} + req.Data = output + return +} + +// Creates a new private virtual interface. A virtual interface is the VLAN +// that transports AWS Direct Connect traffic. A private virtual interface supports +// sending traffic to a single virtual private cloud (VPC). +func (c *DirectConnect) CreatePrivateVirtualInterface(input *CreatePrivateVirtualInterfaceInput) (*VirtualInterface, error) { + req, out := c.CreatePrivateVirtualInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opCreatePublicVirtualInterface = "CreatePublicVirtualInterface" + +// CreatePublicVirtualInterfaceRequest generates a request for the CreatePublicVirtualInterface operation. +func (c *DirectConnect) CreatePublicVirtualInterfaceRequest(input *CreatePublicVirtualInterfaceInput) (req *request.Request, output *VirtualInterface) { + op := &request.Operation{ + Name: opCreatePublicVirtualInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePublicVirtualInterfaceInput{} + } + + req = c.newRequest(op, input, output) + output = &VirtualInterface{} + req.Data = output + return +} + +// Creates a new public virtual interface. A virtual interface is the VLAN that +// transports AWS Direct Connect traffic. A public virtual interface supports +// sending traffic to public services of AWS such as Amazon Simple Storage Service +// (Amazon S3). +func (c *DirectConnect) CreatePublicVirtualInterface(input *CreatePublicVirtualInterfaceInput) (*VirtualInterface, error) { + req, out := c.CreatePublicVirtualInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opDeleteConnection = "DeleteConnection" + +// DeleteConnectionRequest generates a request for the DeleteConnection operation. +func (c *DirectConnect) DeleteConnectionRequest(input *DeleteConnectionInput) (req *request.Request, output *Connection) { + op := &request.Operation{ + Name: opDeleteConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &Connection{} + req.Data = output + return +} + +// Deletes the connection. +// +// Deleting a connection only stops the AWS Direct Connect port hour and data +// transfer charges. You need to cancel separately with the providers any services +// or charges for cross-connects or network circuits that connect you to the +// AWS Direct Connect location. +func (c *DirectConnect) DeleteConnection(input *DeleteConnectionInput) (*Connection, error) { + req, out := c.DeleteConnectionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteInterconnect = "DeleteInterconnect" + +// DeleteInterconnectRequest generates a request for the DeleteInterconnect operation. +func (c *DirectConnect) DeleteInterconnectRequest(input *DeleteInterconnectInput) (req *request.Request, output *DeleteInterconnectOutput) { + op := &request.Operation{ + Name: opDeleteInterconnect, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteInterconnectInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteInterconnectOutput{} + req.Data = output + return +} + +// Deletes the specified interconnect. +func (c *DirectConnect) DeleteInterconnect(input *DeleteInterconnectInput) (*DeleteInterconnectOutput, error) { + req, out := c.DeleteInterconnectRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVirtualInterface = "DeleteVirtualInterface" + +// DeleteVirtualInterfaceRequest generates a request for the DeleteVirtualInterface operation. +func (c *DirectConnect) DeleteVirtualInterfaceRequest(input *DeleteVirtualInterfaceInput) (req *request.Request, output *DeleteVirtualInterfaceOutput) { + op := &request.Operation{ + Name: opDeleteVirtualInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVirtualInterfaceInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteVirtualInterfaceOutput{} + req.Data = output + return +} + +// Deletes a virtual interface. +func (c *DirectConnect) DeleteVirtualInterface(input *DeleteVirtualInterfaceInput) (*DeleteVirtualInterfaceOutput, error) { + req, out := c.DeleteVirtualInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opDescribeConnections = "DescribeConnections" + +// DescribeConnectionsRequest generates a request for the DescribeConnections operation. +func (c *DirectConnect) DescribeConnectionsRequest(input *DescribeConnectionsInput) (req *request.Request, output *Connections) { + op := &request.Operation{ + Name: opDescribeConnections, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConnectionsInput{} + } + + req = c.newRequest(op, input, output) + output = &Connections{} + req.Data = output + return +} + +// Displays all connections in this region. +// +// If a connection ID is provided, the call returns only that particular connection. +func (c *DirectConnect) DescribeConnections(input *DescribeConnectionsInput) (*Connections, error) { + req, out := c.DescribeConnectionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeConnectionsOnInterconnect = "DescribeConnectionsOnInterconnect" + +// DescribeConnectionsOnInterconnectRequest generates a request for the DescribeConnectionsOnInterconnect operation. +func (c *DirectConnect) DescribeConnectionsOnInterconnectRequest(input *DescribeConnectionsOnInterconnectInput) (req *request.Request, output *Connections) { + op := &request.Operation{ + Name: opDescribeConnectionsOnInterconnect, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConnectionsOnInterconnectInput{} + } + + req = c.newRequest(op, input, output) + output = &Connections{} + req.Data = output + return +} + +// Return a list of connections that have been provisioned on the given interconnect. +func (c *DirectConnect) DescribeConnectionsOnInterconnect(input *DescribeConnectionsOnInterconnectInput) (*Connections, error) { + req, out := c.DescribeConnectionsOnInterconnectRequest(input) + err := req.Send() + return out, err +} + +const opDescribeInterconnects = "DescribeInterconnects" + +// DescribeInterconnectsRequest generates a request for the DescribeInterconnects operation. +func (c *DirectConnect) DescribeInterconnectsRequest(input *DescribeInterconnectsInput) (req *request.Request, output *DescribeInterconnectsOutput) { + op := &request.Operation{ + Name: opDescribeInterconnects, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInterconnectsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInterconnectsOutput{} + req.Data = output + return +} + +// Returns a list of interconnects owned by the AWS account. +// +// If an interconnect ID is provided, it will only return this particular interconnect. +func (c *DirectConnect) DescribeInterconnects(input *DescribeInterconnectsInput) (*DescribeInterconnectsOutput, error) { + req, out := c.DescribeInterconnectsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLocations = "DescribeLocations" + +// DescribeLocationsRequest generates a request for the DescribeLocations operation. +func (c *DirectConnect) DescribeLocationsRequest(input *DescribeLocationsInput) (req *request.Request, output *DescribeLocationsOutput) { + op := &request.Operation{ + Name: opDescribeLocations, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLocationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLocationsOutput{} + req.Data = output + return +} + +// Returns the list of AWS Direct Connect locations in the current AWS region. +// These are the locations that may be selected when calling CreateConnection +// or CreateInterconnect. +func (c *DirectConnect) DescribeLocations(input *DescribeLocationsInput) (*DescribeLocationsOutput, error) { + req, out := c.DescribeLocationsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVirtualGateways = "DescribeVirtualGateways" + +// DescribeVirtualGatewaysRequest generates a request for the DescribeVirtualGateways operation. +func (c *DirectConnect) DescribeVirtualGatewaysRequest(input *DescribeVirtualGatewaysInput) (req *request.Request, output *DescribeVirtualGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeVirtualGateways, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVirtualGatewaysInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVirtualGatewaysOutput{} + req.Data = output + return +} + +// Returns a list of virtual private gateways owned by the AWS account. +// +// You can create one or more AWS Direct Connect private virtual interfaces +// linking to a virtual private gateway. A virtual private gateway can be managed +// via Amazon Virtual Private Cloud (VPC) console or the EC2 CreateVpnGateway +// (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-CreateVpnGateway.html) +// action. +func (c *DirectConnect) DescribeVirtualGateways(input *DescribeVirtualGatewaysInput) (*DescribeVirtualGatewaysOutput, error) { + req, out := c.DescribeVirtualGatewaysRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVirtualInterfaces = "DescribeVirtualInterfaces" + +// DescribeVirtualInterfacesRequest generates a request for the DescribeVirtualInterfaces operation. +func (c *DirectConnect) DescribeVirtualInterfacesRequest(input *DescribeVirtualInterfacesInput) (req *request.Request, output *DescribeVirtualInterfacesOutput) { + op := &request.Operation{ + Name: opDescribeVirtualInterfaces, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVirtualInterfacesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVirtualInterfacesOutput{} + req.Data = output + return +} + +// Displays all virtual interfaces for an AWS account. Virtual interfaces deleted +// fewer than 15 minutes before DescribeVirtualInterfaces is called are also +// returned. If a connection ID is included then only virtual interfaces associated +// with this connection will be returned. If a virtual interface ID is included +// then only a single virtual interface will be returned. +// +// A virtual interface (VLAN) transmits the traffic between the AWS Direct +// Connect location and the customer. +// +// If a connection ID is provided, only virtual interfaces provisioned on the +// specified connection will be returned. If a virtual interface ID is provided, +// only this particular virtual interface will be returned. +func (c *DirectConnect) DescribeVirtualInterfaces(input *DescribeVirtualInterfacesInput) (*DescribeVirtualInterfacesOutput, error) { + req, out := c.DescribeVirtualInterfacesRequest(input) + err := req.Send() + return out, err +} + +// Container for the parameters to the AllocateConnectionOnInterconnect operation. +type AllocateConnectionOnInterconnectInput struct { + _ struct{} `type:"structure"` + + // Bandwidth of the connection. + // + // Example: "500Mbps" + // + // Default: None + Bandwidth *string `locationName:"bandwidth" type:"string" required:"true"` + + // Name of the provisioned connection. + // + // Example: "500M Connection to AWS" + // + // Default: None + ConnectionName *string `locationName:"connectionName" type:"string" required:"true"` + + // ID of the interconnect on which the connection will be provisioned. + // + // Example: dxcon-456abc78 + // + // Default: None + InterconnectId *string `locationName:"interconnectId" type:"string" required:"true"` + + // Numeric account Id of the customer for whom the connection will be provisioned. + // + // Example: 123443215678 + // + // Default: None + OwnerAccount *string `locationName:"ownerAccount" type:"string" required:"true"` + + // The dedicated VLAN provisioned to the connection. + // + // Example: 101 + // + // Default: None + Vlan *int64 `locationName:"vlan" type:"integer" required:"true"` +} + +// String returns the string representation +func (s AllocateConnectionOnInterconnectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocateConnectionOnInterconnectInput) GoString() string { + return s.String() +} + +// Container for the parameters to the AllocatePrivateVirtualInterface operation. +type AllocatePrivateVirtualInterfaceInput struct { + _ struct{} `type:"structure"` + + // The connection ID on which the private virtual interface is provisioned. + // + // Default: None + ConnectionId *string `locationName:"connectionId" type:"string" required:"true"` + + // Detailed information for the private virtual interface to be provisioned. + // + // Default: None + NewPrivateVirtualInterfaceAllocation *NewPrivateVirtualInterfaceAllocation `locationName:"newPrivateVirtualInterfaceAllocation" type:"structure" required:"true"` + + // The AWS account that will own the new private virtual interface. + // + // Default: None + OwnerAccount *string `locationName:"ownerAccount" type:"string" required:"true"` +} + +// String returns the string representation +func (s AllocatePrivateVirtualInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocatePrivateVirtualInterfaceInput) GoString() string { + return s.String() +} + +// Container for the parameters to the AllocatePublicVirtualInterface operation. +type AllocatePublicVirtualInterfaceInput struct { + _ struct{} `type:"structure"` + + // The connection ID on which the public virtual interface is provisioned. + // + // Default: None + ConnectionId *string `locationName:"connectionId" type:"string" required:"true"` + + // Detailed information for the public virtual interface to be provisioned. + // + // Default: None + NewPublicVirtualInterfaceAllocation *NewPublicVirtualInterfaceAllocation `locationName:"newPublicVirtualInterfaceAllocation" type:"structure" required:"true"` + + // The AWS account that will own the new public virtual interface. + // + // Default: None + OwnerAccount *string `locationName:"ownerAccount" type:"string" required:"true"` +} + +// String returns the string representation +func (s AllocatePublicVirtualInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocatePublicVirtualInterfaceInput) GoString() string { + return s.String() +} + +// Container for the parameters to the ConfirmConnection operation. +type ConfirmConnectionInput struct { + _ struct{} `type:"structure"` + + // ID of the connection. + // + // Example: dxcon-fg5678gh + // + // Default: None + ConnectionId *string `locationName:"connectionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ConfirmConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmConnectionInput) GoString() string { + return s.String() +} + +// The response received when ConfirmConnection is called. +type ConfirmConnectionOutput struct { + _ struct{} `type:"structure"` + + // State of the connection. Ordering: The initial state of a hosted connection + // provisioned on an interconnect. The connection stays in the ordering state + // until the owner of the hosted connection confirms or declines the connection + // order. Requested: The initial state of a standard connection. The connection + // stays in the requested state until the Letter of Authorization (LOA) is sent + // to the customer. Pending: The connection has been approved, and is being + // initialized. Available: The network link is up, and the connection is ready + // for use. Down: The network link is down. Deleting: The connection is in the + // process of being deleted. Deleted: The connection has been deleted. Rejected: + // A hosted connection in the 'Ordering' state will enter the 'Rejected' state + // if it is deleted by the end customer. + ConnectionState *string `locationName:"connectionState" type:"string" enum:"ConnectionState"` +} + +// String returns the string representation +func (s ConfirmConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmConnectionOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the ConfirmPrivateVirtualInterface operation. +type ConfirmPrivateVirtualInterfaceInput struct { + _ struct{} `type:"structure"` + + // ID of the virtual private gateway that will be attached to the virtual interface. + // + // A virtual private gateway can be managed via the Amazon Virtual Private + // Cloud (VPC) console or the EC2 CreateVpnGateway (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-CreateVpnGateway.html) + // action. + // + // Default: None + VirtualGatewayId *string `locationName:"virtualGatewayId" type:"string" required:"true"` + + // ID of the virtual interface. + // + // Example: dxvif-123dfg56 + // + // Default: None + VirtualInterfaceId *string `locationName:"virtualInterfaceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ConfirmPrivateVirtualInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmPrivateVirtualInterfaceInput) GoString() string { + return s.String() +} + +// The response received when ConfirmPrivateVirtualInterface is called. +type ConfirmPrivateVirtualInterfaceOutput struct { + _ struct{} `type:"structure"` + + // State of the virtual interface. Confirming: The creation of the virtual + // interface is pending confirmation from the virtual interface owner. If the + // owner of the virtual interface is different from the owner of the connection + // on which it is provisioned, then the virtual interface will remain in this + // state until it is confirmed by the virtual interface owner. Verifying: This + // state only applies to public virtual interfaces. Each public virtual interface + // needs validation before the virtual interface can be created. Pending: A + // virtual interface is in this state from the time that it is created until + // the virtual interface is ready to forward traffic. Available: A virtual interface + // that is able to forward traffic. Down: A virtual interface that is BGP down. + // Deleting: A virtual interface is in this state immediately after calling + // DeleteVirtualInterface until it can no longer forward traffic. Deleted: A + // virtual interface that cannot forward traffic. Rejected: The virtual interface + // owner has declined creation of the virtual interface. If a virtual interface + // in the 'Confirming' state is deleted by the virtual interface owner, the + // virtual interface will enter the 'Rejected' state. + VirtualInterfaceState *string `locationName:"virtualInterfaceState" type:"string" enum:"VirtualInterfaceState"` +} + +// String returns the string representation +func (s ConfirmPrivateVirtualInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmPrivateVirtualInterfaceOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the ConfirmPublicVirtualInterface operation. +type ConfirmPublicVirtualInterfaceInput struct { + _ struct{} `type:"structure"` + + // ID of the virtual interface. + // + // Example: dxvif-123dfg56 + // + // Default: None + VirtualInterfaceId *string `locationName:"virtualInterfaceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ConfirmPublicVirtualInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmPublicVirtualInterfaceInput) GoString() string { + return s.String() +} + +// The response received when ConfirmPublicVirtualInterface is called. +type ConfirmPublicVirtualInterfaceOutput struct { + _ struct{} `type:"structure"` + + // State of the virtual interface. Confirming: The creation of the virtual + // interface is pending confirmation from the virtual interface owner. If the + // owner of the virtual interface is different from the owner of the connection + // on which it is provisioned, then the virtual interface will remain in this + // state until it is confirmed by the virtual interface owner. Verifying: This + // state only applies to public virtual interfaces. Each public virtual interface + // needs validation before the virtual interface can be created. Pending: A + // virtual interface is in this state from the time that it is created until + // the virtual interface is ready to forward traffic. Available: A virtual interface + // that is able to forward traffic. Down: A virtual interface that is BGP down. + // Deleting: A virtual interface is in this state immediately after calling + // DeleteVirtualInterface until it can no longer forward traffic. Deleted: A + // virtual interface that cannot forward traffic. Rejected: The virtual interface + // owner has declined creation of the virtual interface. If a virtual interface + // in the 'Confirming' state is deleted by the virtual interface owner, the + // virtual interface will enter the 'Rejected' state. + VirtualInterfaceState *string `locationName:"virtualInterfaceState" type:"string" enum:"VirtualInterfaceState"` +} + +// String returns the string representation +func (s ConfirmPublicVirtualInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmPublicVirtualInterfaceOutput) GoString() string { + return s.String() +} + +// A connection represents the physical network connection between the AWS Direct +// Connect location and the customer. +type Connection struct { + _ struct{} `type:"structure"` + + // Bandwidth of the connection. + // + // Example: 1Gbps (for regular connections), or 500Mbps (for hosted connections) + // + // Default: None + Bandwidth *string `locationName:"bandwidth" type:"string"` + + // ID of the connection. + // + // Example: dxcon-fg5678gh + // + // Default: None + ConnectionId *string `locationName:"connectionId" type:"string"` + + // The name of the connection. + // + // Example: "My Connection to AWS" + // + // Default: None + ConnectionName *string `locationName:"connectionName" type:"string"` + + // State of the connection. Ordering: The initial state of a hosted connection + // provisioned on an interconnect. The connection stays in the ordering state + // until the owner of the hosted connection confirms or declines the connection + // order. Requested: The initial state of a standard connection. The connection + // stays in the requested state until the Letter of Authorization (LOA) is sent + // to the customer. Pending: The connection has been approved, and is being + // initialized. Available: The network link is up, and the connection is ready + // for use. Down: The network link is down. Deleting: The connection is in the + // process of being deleted. Deleted: The connection has been deleted. Rejected: + // A hosted connection in the 'Ordering' state will enter the 'Rejected' state + // if it is deleted by the end customer. + ConnectionState *string `locationName:"connectionState" type:"string" enum:"ConnectionState"` + + // Where the connection is located. + // + // Example: EqSV5 + // + // Default: None + Location *string `locationName:"location" type:"string"` + + OwnerAccount *string `locationName:"ownerAccount" type:"string"` + + PartnerName *string `locationName:"partnerName" type:"string"` + + // The AWS region where the connection is located. + // + // Example: us-east-1 + // + // Default: None + Region *string `locationName:"region" type:"string"` + + // The VLAN ID. + // + // Example: 101 + Vlan *int64 `locationName:"vlan" type:"integer"` +} + +// String returns the string representation +func (s Connection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Connection) GoString() string { + return s.String() +} + +// A structure containing a list of connections. +type Connections struct { + _ struct{} `type:"structure"` + + // A list of connections. + Connections []*Connection `locationName:"connections" type:"list"` +} + +// String returns the string representation +func (s Connections) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Connections) GoString() string { + return s.String() +} + +// Container for the parameters to the CreateConnection operation. +type CreateConnectionInput struct { + _ struct{} `type:"structure"` + + // Bandwidth of the connection. + // + // Example: 1Gbps + // + // Default: None + Bandwidth *string `locationName:"bandwidth" type:"string" required:"true"` + + // The name of the connection. + // + // Example: "My Connection to AWS" + // + // Default: None + ConnectionName *string `locationName:"connectionName" type:"string" required:"true"` + + // Where the connection is located. + // + // Example: EqSV5 + // + // Default: None + Location *string `locationName:"location" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateConnectionInput) GoString() string { + return s.String() +} + +// Container for the parameters to the CreateInterconnect operation. +type CreateInterconnectInput struct { + _ struct{} `type:"structure"` + + // The port bandwidth + // + // Example: 1Gbps + // + // Default: None + // + // Available values: 1Gbps,10Gbps + Bandwidth *string `locationName:"bandwidth" type:"string" required:"true"` + + // The name of the interconnect. + // + // Example: "1G Interconnect to AWS" + // + // Default: None + InterconnectName *string `locationName:"interconnectName" type:"string" required:"true"` + + // Where the interconnect is located + // + // Example: EqSV5 + // + // Default: None + Location *string `locationName:"location" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateInterconnectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInterconnectInput) GoString() string { + return s.String() +} + +// Container for the parameters to the CreatePrivateVirtualInterface operation. +type CreatePrivateVirtualInterfaceInput struct { + _ struct{} `type:"structure"` + + // ID of the connection. + // + // Example: dxcon-fg5678gh + // + // Default: None + ConnectionId *string `locationName:"connectionId" type:"string" required:"true"` + + // Detailed information for the private virtual interface to be created. + // + // Default: None + NewPrivateVirtualInterface *NewPrivateVirtualInterface `locationName:"newPrivateVirtualInterface" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreatePrivateVirtualInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePrivateVirtualInterfaceInput) GoString() string { + return s.String() +} + +// Container for the parameters to the CreatePublicVirtualInterface operation. +type CreatePublicVirtualInterfaceInput struct { + _ struct{} `type:"structure"` + + // ID of the connection. + // + // Example: dxcon-fg5678gh + // + // Default: None + ConnectionId *string `locationName:"connectionId" type:"string" required:"true"` + + // Detailed information for the public virtual interface to be created. + // + // Default: None + NewPublicVirtualInterface *NewPublicVirtualInterface `locationName:"newPublicVirtualInterface" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreatePublicVirtualInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePublicVirtualInterfaceInput) GoString() string { + return s.String() +} + +// Container for the parameters to the DeleteConnection operation. +type DeleteConnectionInput struct { + _ struct{} `type:"structure"` + + // ID of the connection. + // + // Example: dxcon-fg5678gh + // + // Default: None + ConnectionId *string `locationName:"connectionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteConnectionInput) GoString() string { + return s.String() +} + +// Container for the parameters to the DeleteInterconnect operation. +type DeleteInterconnectInput struct { + _ struct{} `type:"structure"` + + // The ID of the interconnect. + // + // Example: dxcon-abc123 + InterconnectId *string `locationName:"interconnectId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteInterconnectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInterconnectInput) GoString() string { + return s.String() +} + +// The response received when DeleteInterconnect is called. +type DeleteInterconnectOutput struct { + _ struct{} `type:"structure"` + + // State of the interconnect. Requested: The initial state of an interconnect. + // The interconnect stays in the requested state until the Letter of Authorization + // (LOA) is sent to the customer. Pending: The interconnect has been approved, + // and is being initialized. Available: The network link is up, and the interconnect + // is ready for use. Down: The network link is down. Deleting: The interconnect + // is in the process of being deleted. Deleted: The interconnect has been deleted. + InterconnectState *string `locationName:"interconnectState" type:"string" enum:"InterconnectState"` +} + +// String returns the string representation +func (s DeleteInterconnectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInterconnectOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DeleteVirtualInterface operation. +type DeleteVirtualInterfaceInput struct { + _ struct{} `type:"structure"` + + // ID of the virtual interface. + // + // Example: dxvif-123dfg56 + // + // Default: None + VirtualInterfaceId *string `locationName:"virtualInterfaceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVirtualInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVirtualInterfaceInput) GoString() string { + return s.String() +} + +// The response received when DeleteVirtualInterface is called. +type DeleteVirtualInterfaceOutput struct { + _ struct{} `type:"structure"` + + // State of the virtual interface. Confirming: The creation of the virtual + // interface is pending confirmation from the virtual interface owner. If the + // owner of the virtual interface is different from the owner of the connection + // on which it is provisioned, then the virtual interface will remain in this + // state until it is confirmed by the virtual interface owner. Verifying: This + // state only applies to public virtual interfaces. Each public virtual interface + // needs validation before the virtual interface can be created. Pending: A + // virtual interface is in this state from the time that it is created until + // the virtual interface is ready to forward traffic. Available: A virtual interface + // that is able to forward traffic. Down: A virtual interface that is BGP down. + // Deleting: A virtual interface is in this state immediately after calling + // DeleteVirtualInterface until it can no longer forward traffic. Deleted: A + // virtual interface that cannot forward traffic. Rejected: The virtual interface + // owner has declined creation of the virtual interface. If a virtual interface + // in the 'Confirming' state is deleted by the virtual interface owner, the + // virtual interface will enter the 'Rejected' state. + VirtualInterfaceState *string `locationName:"virtualInterfaceState" type:"string" enum:"VirtualInterfaceState"` +} + +// String returns the string representation +func (s DeleteVirtualInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVirtualInterfaceOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeConnections operation. +type DescribeConnectionsInput struct { + _ struct{} `type:"structure"` + + // ID of the connection. + // + // Example: dxcon-fg5678gh + // + // Default: None + ConnectionId *string `locationName:"connectionId" type:"string"` +} + +// String returns the string representation +func (s DescribeConnectionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConnectionsInput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeConnectionsOnInterconnect operation. +type DescribeConnectionsOnInterconnectInput struct { + _ struct{} `type:"structure"` + + // ID of the interconnect on which a list of connection is provisioned. + // + // Example: dxcon-abc123 + // + // Default: None + InterconnectId *string `locationName:"interconnectId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeConnectionsOnInterconnectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConnectionsOnInterconnectInput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeInterconnects operation. +type DescribeInterconnectsInput struct { + _ struct{} `type:"structure"` + + // The ID of the interconnect. + // + // Example: dxcon-abc123 + InterconnectId *string `locationName:"interconnectId" type:"string"` +} + +// String returns the string representation +func (s DescribeInterconnectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInterconnectsInput) GoString() string { + return s.String() +} + +// A structure containing a list of interconnects. +type DescribeInterconnectsOutput struct { + _ struct{} `type:"structure"` + + // A list of interconnects. + Interconnects []*Interconnect `locationName:"interconnects" type:"list"` +} + +// String returns the string representation +func (s DescribeInterconnectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInterconnectsOutput) GoString() string { + return s.String() +} + +type DescribeLocationsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeLocationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLocationsInput) GoString() string { + return s.String() +} + +// A location is a network facility where AWS Direct Connect routers are available +// to be connected. Generally, these are colocation hubs where many network +// providers have equipment, and where cross connects can be delivered. Locations +// include a name and facility code, and must be provided when creating a connection. +type DescribeLocationsOutput struct { + _ struct{} `type:"structure"` + + // A list of colocation hubs where network providers have equipment. Most regions + // have multiple locations available. + Locations []*Location `locationName:"locations" type:"list"` +} + +// String returns the string representation +func (s DescribeLocationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLocationsOutput) GoString() string { + return s.String() +} + +type DescribeVirtualGatewaysInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeVirtualGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVirtualGatewaysInput) GoString() string { + return s.String() +} + +// A structure containing a list of virtual private gateways. +type DescribeVirtualGatewaysOutput struct { + _ struct{} `type:"structure"` + + // A list of virtual private gateways. + VirtualGateways []*VirtualGateway `locationName:"virtualGateways" type:"list"` +} + +// String returns the string representation +func (s DescribeVirtualGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVirtualGatewaysOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeVirtualInterfaces operation. +type DescribeVirtualInterfacesInput struct { + _ struct{} `type:"structure"` + + // ID of the connection. + // + // Example: dxcon-fg5678gh + // + // Default: None + ConnectionId *string `locationName:"connectionId" type:"string"` + + // ID of the virtual interface. + // + // Example: dxvif-123dfg56 + // + // Default: None + VirtualInterfaceId *string `locationName:"virtualInterfaceId" type:"string"` +} + +// String returns the string representation +func (s DescribeVirtualInterfacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVirtualInterfacesInput) GoString() string { + return s.String() +} + +// A structure containing a list of virtual interfaces. +type DescribeVirtualInterfacesOutput struct { + _ struct{} `type:"structure"` + + // A list of virtual interfaces. + VirtualInterfaces []*VirtualInterface `locationName:"virtualInterfaces" type:"list"` +} + +// String returns the string representation +func (s DescribeVirtualInterfacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVirtualInterfacesOutput) GoString() string { + return s.String() +} + +// An interconnect is a connection that can host other connections. +// +// Like a standard AWS Direct Connect connection, an interconnect represents +// the physical connection between an AWS Direct Connect partner's network and +// a specific Direct Connect location. An AWS Direct Connect partner who owns +// an interconnect can provision hosted connections on the interconnect for +// their end customers, thereby providing the end customers with connectivity +// to AWS services. +// +// The resources of the interconnect, including bandwidth and VLAN numbers, +// are shared by all of the hosted connections on the interconnect, and the +// owner of the interconnect determines how these resources are assigned. +type Interconnect struct { + _ struct{} `type:"structure"` + + // Bandwidth of the connection. + // + // Example: 1Gbps + // + // Default: None + Bandwidth *string `locationName:"bandwidth" type:"string"` + + // The ID of the interconnect. + // + // Example: dxcon-abc123 + InterconnectId *string `locationName:"interconnectId" type:"string"` + + // The name of the interconnect. + // + // Example: "1G Interconnect to AWS" + InterconnectName *string `locationName:"interconnectName" type:"string"` + + // State of the interconnect. Requested: The initial state of an interconnect. + // The interconnect stays in the requested state until the Letter of Authorization + // (LOA) is sent to the customer. Pending: The interconnect has been approved, + // and is being initialized. Available: The network link is up, and the interconnect + // is ready for use. Down: The network link is down. Deleting: The interconnect + // is in the process of being deleted. Deleted: The interconnect has been deleted. + InterconnectState *string `locationName:"interconnectState" type:"string" enum:"InterconnectState"` + + // Where the connection is located. + // + // Example: EqSV5 + // + // Default: None + Location *string `locationName:"location" type:"string"` + + // The AWS region where the connection is located. + // + // Example: us-east-1 + // + // Default: None + Region *string `locationName:"region" type:"string"` +} + +// String returns the string representation +func (s Interconnect) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Interconnect) GoString() string { + return s.String() +} + +// An AWS Direct Connect location where connections and interconnects can be +// requested. +type Location struct { + _ struct{} `type:"structure"` + + // The code used to indicate the AWS Direct Connect location. + LocationCode *string `locationName:"locationCode" type:"string"` + + // The name of the AWS Direct Connect location. The name includes the colocation + // partner name and the physical site of the lit building. + LocationName *string `locationName:"locationName" type:"string"` +} + +// String returns the string representation +func (s Location) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Location) GoString() string { + return s.String() +} + +// A structure containing information about a new private virtual interface. +type NewPrivateVirtualInterface struct { + _ struct{} `type:"structure"` + + // IP address assigned to the Amazon interface. + // + // Example: 192.168.1.1/30 + AmazonAddress *string `locationName:"amazonAddress" type:"string"` + + // Autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. + // + // Example: 65000 + Asn *int64 `locationName:"asn" type:"integer" required:"true"` + + // Authentication key for BGP configuration. + // + // Example: asdf34example + AuthKey *string `locationName:"authKey" type:"string"` + + // IP address assigned to the customer interface. + // + // Example: 192.168.1.2/30 + CustomerAddress *string `locationName:"customerAddress" type:"string"` + + // The ID of the virtual private gateway to a VPC. This only applies to private + // virtual interfaces. + // + // Example: vgw-123er56 + VirtualGatewayId *string `locationName:"virtualGatewayId" type:"string" required:"true"` + + // The name of the virtual interface assigned by the customer. + // + // Example: "My VPC" + VirtualInterfaceName *string `locationName:"virtualInterfaceName" type:"string" required:"true"` + + // The VLAN ID. + // + // Example: 101 + Vlan *int64 `locationName:"vlan" type:"integer" required:"true"` +} + +// String returns the string representation +func (s NewPrivateVirtualInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NewPrivateVirtualInterface) GoString() string { + return s.String() +} + +// A structure containing information about a private virtual interface that +// will be provisioned on a connection. +type NewPrivateVirtualInterfaceAllocation struct { + _ struct{} `type:"structure"` + + // IP address assigned to the Amazon interface. + // + // Example: 192.168.1.1/30 + AmazonAddress *string `locationName:"amazonAddress" type:"string"` + + // Autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. + // + // Example: 65000 + Asn *int64 `locationName:"asn" type:"integer" required:"true"` + + // Authentication key for BGP configuration. + // + // Example: asdf34example + AuthKey *string `locationName:"authKey" type:"string"` + + // IP address assigned to the customer interface. + // + // Example: 192.168.1.2/30 + CustomerAddress *string `locationName:"customerAddress" type:"string"` + + // The name of the virtual interface assigned by the customer. + // + // Example: "My VPC" + VirtualInterfaceName *string `locationName:"virtualInterfaceName" type:"string" required:"true"` + + // The VLAN ID. + // + // Example: 101 + Vlan *int64 `locationName:"vlan" type:"integer" required:"true"` +} + +// String returns the string representation +func (s NewPrivateVirtualInterfaceAllocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NewPrivateVirtualInterfaceAllocation) GoString() string { + return s.String() +} + +// A structure containing information about a new public virtual interface. +type NewPublicVirtualInterface struct { + _ struct{} `type:"structure"` + + // IP address assigned to the Amazon interface. + // + // Example: 192.168.1.1/30 + AmazonAddress *string `locationName:"amazonAddress" type:"string" required:"true"` + + // Autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. + // + // Example: 65000 + Asn *int64 `locationName:"asn" type:"integer" required:"true"` + + // Authentication key for BGP configuration. + // + // Example: asdf34example + AuthKey *string `locationName:"authKey" type:"string"` + + // IP address assigned to the customer interface. + // + // Example: 192.168.1.2/30 + CustomerAddress *string `locationName:"customerAddress" type:"string" required:"true"` + + // A list of routes to be advertised to the AWS network in this region (public + // virtual interface). + RouteFilterPrefixes []*RouteFilterPrefix `locationName:"routeFilterPrefixes" type:"list" required:"true"` + + // The name of the virtual interface assigned by the customer. + // + // Example: "My VPC" + VirtualInterfaceName *string `locationName:"virtualInterfaceName" type:"string" required:"true"` + + // The VLAN ID. + // + // Example: 101 + Vlan *int64 `locationName:"vlan" type:"integer" required:"true"` +} + +// String returns the string representation +func (s NewPublicVirtualInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NewPublicVirtualInterface) GoString() string { + return s.String() +} + +// A structure containing information about a public virtual interface that +// will be provisioned on a connection. +type NewPublicVirtualInterfaceAllocation struct { + _ struct{} `type:"structure"` + + // IP address assigned to the Amazon interface. + // + // Example: 192.168.1.1/30 + AmazonAddress *string `locationName:"amazonAddress" type:"string" required:"true"` + + // Autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. + // + // Example: 65000 + Asn *int64 `locationName:"asn" type:"integer" required:"true"` + + // Authentication key for BGP configuration. + // + // Example: asdf34example + AuthKey *string `locationName:"authKey" type:"string"` + + // IP address assigned to the customer interface. + // + // Example: 192.168.1.2/30 + CustomerAddress *string `locationName:"customerAddress" type:"string" required:"true"` + + // A list of routes to be advertised to the AWS network in this region (public + // virtual interface). + RouteFilterPrefixes []*RouteFilterPrefix `locationName:"routeFilterPrefixes" type:"list" required:"true"` + + // The name of the virtual interface assigned by the customer. + // + // Example: "My VPC" + VirtualInterfaceName *string `locationName:"virtualInterfaceName" type:"string" required:"true"` + + // The VLAN ID. + // + // Example: 101 + Vlan *int64 `locationName:"vlan" type:"integer" required:"true"` +} + +// String returns the string representation +func (s NewPublicVirtualInterfaceAllocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NewPublicVirtualInterfaceAllocation) GoString() string { + return s.String() +} + +// A route filter prefix that the customer can advertise through Border Gateway +// Protocol (BGP) over a public virtual interface. +type RouteFilterPrefix struct { + _ struct{} `type:"structure"` + + // CIDR notation for the advertised route. Multiple routes are separated by + // commas. + // + // Example: 10.10.10.0/24,10.10.11.0/24 + Cidr *string `locationName:"cidr" type:"string"` +} + +// String returns the string representation +func (s RouteFilterPrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RouteFilterPrefix) GoString() string { + return s.String() +} + +// You can create one or more AWS Direct Connect private virtual interfaces +// linking to your virtual private gateway. +// +// Virtual private gateways can be managed using the Amazon Virtual Private +// Cloud (Amazon VPC) console or the Amazon EC2 CreateVpnGateway action (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-CreateVpnGateway.html). +type VirtualGateway struct { + _ struct{} `type:"structure"` + + // The ID of the virtual private gateway to a VPC. This only applies to private + // virtual interfaces. + // + // Example: vgw-123er56 + VirtualGatewayId *string `locationName:"virtualGatewayId" type:"string"` + + // State of the virtual private gateway. Pending: This is the initial state + // after calling CreateVpnGateway. Available: Ready for use by a private virtual + // interface. Deleting: This is the initial state after calling DeleteVpnGateway. + // Deleted: In this state, a private virtual interface is unable to send traffic + // over this gateway. + VirtualGatewayState *string `locationName:"virtualGatewayState" type:"string"` +} + +// String returns the string representation +func (s VirtualGateway) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VirtualGateway) GoString() string { + return s.String() +} + +// A virtual interface (VLAN) transmits the traffic between the AWS Direct Connect +// location and the customer. +type VirtualInterface struct { + _ struct{} `type:"structure"` + + // IP address assigned to the Amazon interface. + // + // Example: 192.168.1.1/30 + AmazonAddress *string `locationName:"amazonAddress" type:"string"` + + // Autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. + // + // Example: 65000 + Asn *int64 `locationName:"asn" type:"integer"` + + // Authentication key for BGP configuration. + // + // Example: asdf34example + AuthKey *string `locationName:"authKey" type:"string"` + + // ID of the connection. + // + // Example: dxcon-fg5678gh + // + // Default: None + ConnectionId *string `locationName:"connectionId" type:"string"` + + // IP address assigned to the customer interface. + // + // Example: 192.168.1.2/30 + CustomerAddress *string `locationName:"customerAddress" type:"string"` + + // Information for generating the customer router configuration. + CustomerRouterConfig *string `locationName:"customerRouterConfig" type:"string"` + + // Where the connection is located. + // + // Example: EqSV5 + // + // Default: None + Location *string `locationName:"location" type:"string"` + + OwnerAccount *string `locationName:"ownerAccount" type:"string"` + + // A list of routes to be advertised to the AWS network in this region (public + // virtual interface). + RouteFilterPrefixes []*RouteFilterPrefix `locationName:"routeFilterPrefixes" type:"list"` + + // The ID of the virtual private gateway to a VPC. This only applies to private + // virtual interfaces. + // + // Example: vgw-123er56 + VirtualGatewayId *string `locationName:"virtualGatewayId" type:"string"` + + // ID of the virtual interface. + // + // Example: dxvif-123dfg56 + // + // Default: None + VirtualInterfaceId *string `locationName:"virtualInterfaceId" type:"string"` + + // The name of the virtual interface assigned by the customer. + // + // Example: "My VPC" + VirtualInterfaceName *string `locationName:"virtualInterfaceName" type:"string"` + + // State of the virtual interface. Confirming: The creation of the virtual + // interface is pending confirmation from the virtual interface owner. If the + // owner of the virtual interface is different from the owner of the connection + // on which it is provisioned, then the virtual interface will remain in this + // state until it is confirmed by the virtual interface owner. Verifying: This + // state only applies to public virtual interfaces. Each public virtual interface + // needs validation before the virtual interface can be created. Pending: A + // virtual interface is in this state from the time that it is created until + // the virtual interface is ready to forward traffic. Available: A virtual interface + // that is able to forward traffic. Down: A virtual interface that is BGP down. + // Deleting: A virtual interface is in this state immediately after calling + // DeleteVirtualInterface until it can no longer forward traffic. Deleted: A + // virtual interface that cannot forward traffic. Rejected: The virtual interface + // owner has declined creation of the virtual interface. If a virtual interface + // in the 'Confirming' state is deleted by the virtual interface owner, the + // virtual interface will enter the 'Rejected' state. + VirtualInterfaceState *string `locationName:"virtualInterfaceState" type:"string" enum:"VirtualInterfaceState"` + + // The type of virtual interface. + // + // Example: private (Amazon VPC) or public (Amazon S3, Amazon DynamoDB, and + // so on.) + VirtualInterfaceType *string `locationName:"virtualInterfaceType" type:"string"` + + // The VLAN ID. + // + // Example: 101 + Vlan *int64 `locationName:"vlan" type:"integer"` +} + +// String returns the string representation +func (s VirtualInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VirtualInterface) GoString() string { + return s.String() +} + +// State of the connection. Ordering: The initial state of a hosted connection +// provisioned on an interconnect. The connection stays in the ordering state +// until the owner of the hosted connection confirms or declines the connection +// order. Requested: The initial state of a standard connection. The connection +// stays in the requested state until the Letter of Authorization (LOA) is sent +// to the customer. Pending: The connection has been approved, and is being +// initialized. Available: The network link is up, and the connection is ready +// for use. Down: The network link is down. Deleting: The connection is in the +// process of being deleted. Deleted: The connection has been deleted. Rejected: +// A hosted connection in the 'Ordering' state will enter the 'Rejected' state +// if it is deleted by the end customer. +const ( + // @enum ConnectionState + ConnectionStateOrdering = "ordering" + // @enum ConnectionState + ConnectionStateRequested = "requested" + // @enum ConnectionState + ConnectionStatePending = "pending" + // @enum ConnectionState + ConnectionStateAvailable = "available" + // @enum ConnectionState + ConnectionStateDown = "down" + // @enum ConnectionState + ConnectionStateDeleting = "deleting" + // @enum ConnectionState + ConnectionStateDeleted = "deleted" + // @enum ConnectionState + ConnectionStateRejected = "rejected" +) + +// State of the interconnect. Requested: The initial state of an interconnect. +// The interconnect stays in the requested state until the Letter of Authorization +// (LOA) is sent to the customer. Pending: The interconnect has been approved, +// and is being initialized. Available: The network link is up, and the interconnect +// is ready for use. Down: The network link is down. Deleting: The interconnect +// is in the process of being deleted. Deleted: The interconnect has been deleted. +const ( + // @enum InterconnectState + InterconnectStateRequested = "requested" + // @enum InterconnectState + InterconnectStatePending = "pending" + // @enum InterconnectState + InterconnectStateAvailable = "available" + // @enum InterconnectState + InterconnectStateDown = "down" + // @enum InterconnectState + InterconnectStateDeleting = "deleting" + // @enum InterconnectState + InterconnectStateDeleted = "deleted" +) + +// State of the virtual interface. Confirming: The creation of the virtual +// interface is pending confirmation from the virtual interface owner. If the +// owner of the virtual interface is different from the owner of the connection +// on which it is provisioned, then the virtual interface will remain in this +// state until it is confirmed by the virtual interface owner. Verifying: This +// state only applies to public virtual interfaces. Each public virtual interface +// needs validation before the virtual interface can be created. Pending: A +// virtual interface is in this state from the time that it is created until +// the virtual interface is ready to forward traffic. Available: A virtual interface +// that is able to forward traffic. Down: A virtual interface that is BGP down. +// Deleting: A virtual interface is in this state immediately after calling +// DeleteVirtualInterface until it can no longer forward traffic. Deleted: A +// virtual interface that cannot forward traffic. Rejected: The virtual interface +// owner has declined creation of the virtual interface. If a virtual interface +// in the 'Confirming' state is deleted by the virtual interface owner, the +// virtual interface will enter the 'Rejected' state. +const ( + // @enum VirtualInterfaceState + VirtualInterfaceStateConfirming = "confirming" + // @enum VirtualInterfaceState + VirtualInterfaceStateVerifying = "verifying" + // @enum VirtualInterfaceState + VirtualInterfaceStatePending = "pending" + // @enum VirtualInterfaceState + VirtualInterfaceStateAvailable = "available" + // @enum VirtualInterfaceState + VirtualInterfaceStateDeleting = "deleting" + // @enum VirtualInterfaceState + VirtualInterfaceStateDeleted = "deleted" + // @enum VirtualInterfaceState + VirtualInterfaceStateRejected = "rejected" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directconnect/directconnectiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directconnect/directconnectiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directconnect/directconnectiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directconnect/directconnectiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,90 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package directconnectiface provides an interface for the AWS Direct Connect. +package directconnectiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/directconnect" +) + +// DirectConnectAPI is the interface type for directconnect.DirectConnect. +type DirectConnectAPI interface { + AllocateConnectionOnInterconnectRequest(*directconnect.AllocateConnectionOnInterconnectInput) (*request.Request, *directconnect.Connection) + + AllocateConnectionOnInterconnect(*directconnect.AllocateConnectionOnInterconnectInput) (*directconnect.Connection, error) + + AllocatePrivateVirtualInterfaceRequest(*directconnect.AllocatePrivateVirtualInterfaceInput) (*request.Request, *directconnect.VirtualInterface) + + AllocatePrivateVirtualInterface(*directconnect.AllocatePrivateVirtualInterfaceInput) (*directconnect.VirtualInterface, error) + + AllocatePublicVirtualInterfaceRequest(*directconnect.AllocatePublicVirtualInterfaceInput) (*request.Request, *directconnect.VirtualInterface) + + AllocatePublicVirtualInterface(*directconnect.AllocatePublicVirtualInterfaceInput) (*directconnect.VirtualInterface, error) + + ConfirmConnectionRequest(*directconnect.ConfirmConnectionInput) (*request.Request, *directconnect.ConfirmConnectionOutput) + + ConfirmConnection(*directconnect.ConfirmConnectionInput) (*directconnect.ConfirmConnectionOutput, error) + + ConfirmPrivateVirtualInterfaceRequest(*directconnect.ConfirmPrivateVirtualInterfaceInput) (*request.Request, *directconnect.ConfirmPrivateVirtualInterfaceOutput) + + ConfirmPrivateVirtualInterface(*directconnect.ConfirmPrivateVirtualInterfaceInput) (*directconnect.ConfirmPrivateVirtualInterfaceOutput, error) + + ConfirmPublicVirtualInterfaceRequest(*directconnect.ConfirmPublicVirtualInterfaceInput) (*request.Request, *directconnect.ConfirmPublicVirtualInterfaceOutput) + + ConfirmPublicVirtualInterface(*directconnect.ConfirmPublicVirtualInterfaceInput) (*directconnect.ConfirmPublicVirtualInterfaceOutput, error) + + CreateConnectionRequest(*directconnect.CreateConnectionInput) (*request.Request, *directconnect.Connection) + + CreateConnection(*directconnect.CreateConnectionInput) (*directconnect.Connection, error) + + CreateInterconnectRequest(*directconnect.CreateInterconnectInput) (*request.Request, *directconnect.Interconnect) + + CreateInterconnect(*directconnect.CreateInterconnectInput) (*directconnect.Interconnect, error) + + CreatePrivateVirtualInterfaceRequest(*directconnect.CreatePrivateVirtualInterfaceInput) (*request.Request, *directconnect.VirtualInterface) + + CreatePrivateVirtualInterface(*directconnect.CreatePrivateVirtualInterfaceInput) (*directconnect.VirtualInterface, error) + + CreatePublicVirtualInterfaceRequest(*directconnect.CreatePublicVirtualInterfaceInput) (*request.Request, *directconnect.VirtualInterface) + + CreatePublicVirtualInterface(*directconnect.CreatePublicVirtualInterfaceInput) (*directconnect.VirtualInterface, error) + + DeleteConnectionRequest(*directconnect.DeleteConnectionInput) (*request.Request, *directconnect.Connection) + + DeleteConnection(*directconnect.DeleteConnectionInput) (*directconnect.Connection, error) + + DeleteInterconnectRequest(*directconnect.DeleteInterconnectInput) (*request.Request, *directconnect.DeleteInterconnectOutput) + + DeleteInterconnect(*directconnect.DeleteInterconnectInput) (*directconnect.DeleteInterconnectOutput, error) + + DeleteVirtualInterfaceRequest(*directconnect.DeleteVirtualInterfaceInput) (*request.Request, *directconnect.DeleteVirtualInterfaceOutput) + + DeleteVirtualInterface(*directconnect.DeleteVirtualInterfaceInput) (*directconnect.DeleteVirtualInterfaceOutput, error) + + DescribeConnectionsRequest(*directconnect.DescribeConnectionsInput) (*request.Request, *directconnect.Connections) + + DescribeConnections(*directconnect.DescribeConnectionsInput) (*directconnect.Connections, error) + + DescribeConnectionsOnInterconnectRequest(*directconnect.DescribeConnectionsOnInterconnectInput) (*request.Request, *directconnect.Connections) + + DescribeConnectionsOnInterconnect(*directconnect.DescribeConnectionsOnInterconnectInput) (*directconnect.Connections, error) + + DescribeInterconnectsRequest(*directconnect.DescribeInterconnectsInput) (*request.Request, *directconnect.DescribeInterconnectsOutput) + + DescribeInterconnects(*directconnect.DescribeInterconnectsInput) (*directconnect.DescribeInterconnectsOutput, error) + + DescribeLocationsRequest(*directconnect.DescribeLocationsInput) (*request.Request, *directconnect.DescribeLocationsOutput) + + DescribeLocations(*directconnect.DescribeLocationsInput) (*directconnect.DescribeLocationsOutput, error) + + DescribeVirtualGatewaysRequest(*directconnect.DescribeVirtualGatewaysInput) (*request.Request, *directconnect.DescribeVirtualGatewaysOutput) + + DescribeVirtualGateways(*directconnect.DescribeVirtualGatewaysInput) (*directconnect.DescribeVirtualGatewaysOutput, error) + + DescribeVirtualInterfacesRequest(*directconnect.DescribeVirtualInterfacesInput) (*request.Request, *directconnect.DescribeVirtualInterfacesOutput) + + DescribeVirtualInterfaces(*directconnect.DescribeVirtualInterfacesInput) (*directconnect.DescribeVirtualInterfacesOutput, error) +} + +var _ DirectConnectAPI = (*directconnect.DirectConnect)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directconnect/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directconnect/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directconnect/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directconnect/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,430 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package directconnect_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/directconnect" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleDirectConnect_AllocateConnectionOnInterconnect() { + svc := directconnect.New(session.New()) + + params := &directconnect.AllocateConnectionOnInterconnectInput{ + Bandwidth: aws.String("Bandwidth"), // Required + ConnectionName: aws.String("ConnectionName"), // Required + InterconnectId: aws.String("InterconnectId"), // Required + OwnerAccount: aws.String("OwnerAccount"), // Required + Vlan: aws.Int64(1), // Required + } + resp, err := svc.AllocateConnectionOnInterconnect(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_AllocatePrivateVirtualInterface() { + svc := directconnect.New(session.New()) + + params := &directconnect.AllocatePrivateVirtualInterfaceInput{ + ConnectionId: aws.String("ConnectionId"), // Required + NewPrivateVirtualInterfaceAllocation: &directconnect.NewPrivateVirtualInterfaceAllocation{ // Required + Asn: aws.Int64(1), // Required + VirtualInterfaceName: aws.String("VirtualInterfaceName"), // Required + Vlan: aws.Int64(1), // Required + AmazonAddress: aws.String("AmazonAddress"), + AuthKey: aws.String("BGPAuthKey"), + CustomerAddress: aws.String("CustomerAddress"), + }, + OwnerAccount: aws.String("OwnerAccount"), // Required + } + resp, err := svc.AllocatePrivateVirtualInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_AllocatePublicVirtualInterface() { + svc := directconnect.New(session.New()) + + params := &directconnect.AllocatePublicVirtualInterfaceInput{ + ConnectionId: aws.String("ConnectionId"), // Required + NewPublicVirtualInterfaceAllocation: &directconnect.NewPublicVirtualInterfaceAllocation{ // Required + AmazonAddress: aws.String("AmazonAddress"), // Required + Asn: aws.Int64(1), // Required + CustomerAddress: aws.String("CustomerAddress"), // Required + RouteFilterPrefixes: []*directconnect.RouteFilterPrefix{ // Required + { // Required + Cidr: aws.String("CIDR"), + }, + // More values... + }, + VirtualInterfaceName: aws.String("VirtualInterfaceName"), // Required + Vlan: aws.Int64(1), // Required + AuthKey: aws.String("BGPAuthKey"), + }, + OwnerAccount: aws.String("OwnerAccount"), // Required + } + resp, err := svc.AllocatePublicVirtualInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_ConfirmConnection() { + svc := directconnect.New(session.New()) + + params := &directconnect.ConfirmConnectionInput{ + ConnectionId: aws.String("ConnectionId"), // Required + } + resp, err := svc.ConfirmConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_ConfirmPrivateVirtualInterface() { + svc := directconnect.New(session.New()) + + params := &directconnect.ConfirmPrivateVirtualInterfaceInput{ + VirtualGatewayId: aws.String("VirtualGatewayId"), // Required + VirtualInterfaceId: aws.String("VirtualInterfaceId"), // Required + } + resp, err := svc.ConfirmPrivateVirtualInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_ConfirmPublicVirtualInterface() { + svc := directconnect.New(session.New()) + + params := &directconnect.ConfirmPublicVirtualInterfaceInput{ + VirtualInterfaceId: aws.String("VirtualInterfaceId"), // Required + } + resp, err := svc.ConfirmPublicVirtualInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_CreateConnection() { + svc := directconnect.New(session.New()) + + params := &directconnect.CreateConnectionInput{ + Bandwidth: aws.String("Bandwidth"), // Required + ConnectionName: aws.String("ConnectionName"), // Required + Location: aws.String("LocationCode"), // Required + } + resp, err := svc.CreateConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_CreateInterconnect() { + svc := directconnect.New(session.New()) + + params := &directconnect.CreateInterconnectInput{ + Bandwidth: aws.String("Bandwidth"), // Required + InterconnectName: aws.String("InterconnectName"), // Required + Location: aws.String("LocationCode"), // Required + } + resp, err := svc.CreateInterconnect(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_CreatePrivateVirtualInterface() { + svc := directconnect.New(session.New()) + + params := &directconnect.CreatePrivateVirtualInterfaceInput{ + ConnectionId: aws.String("ConnectionId"), // Required + NewPrivateVirtualInterface: &directconnect.NewPrivateVirtualInterface{ // Required + Asn: aws.Int64(1), // Required + VirtualGatewayId: aws.String("VirtualGatewayId"), // Required + VirtualInterfaceName: aws.String("VirtualInterfaceName"), // Required + Vlan: aws.Int64(1), // Required + AmazonAddress: aws.String("AmazonAddress"), + AuthKey: aws.String("BGPAuthKey"), + CustomerAddress: aws.String("CustomerAddress"), + }, + } + resp, err := svc.CreatePrivateVirtualInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_CreatePublicVirtualInterface() { + svc := directconnect.New(session.New()) + + params := &directconnect.CreatePublicVirtualInterfaceInput{ + ConnectionId: aws.String("ConnectionId"), // Required + NewPublicVirtualInterface: &directconnect.NewPublicVirtualInterface{ // Required + AmazonAddress: aws.String("AmazonAddress"), // Required + Asn: aws.Int64(1), // Required + CustomerAddress: aws.String("CustomerAddress"), // Required + RouteFilterPrefixes: []*directconnect.RouteFilterPrefix{ // Required + { // Required + Cidr: aws.String("CIDR"), + }, + // More values... + }, + VirtualInterfaceName: aws.String("VirtualInterfaceName"), // Required + Vlan: aws.Int64(1), // Required + AuthKey: aws.String("BGPAuthKey"), + }, + } + resp, err := svc.CreatePublicVirtualInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_DeleteConnection() { + svc := directconnect.New(session.New()) + + params := &directconnect.DeleteConnectionInput{ + ConnectionId: aws.String("ConnectionId"), // Required + } + resp, err := svc.DeleteConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_DeleteInterconnect() { + svc := directconnect.New(session.New()) + + params := &directconnect.DeleteInterconnectInput{ + InterconnectId: aws.String("InterconnectId"), // Required + } + resp, err := svc.DeleteInterconnect(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_DeleteVirtualInterface() { + svc := directconnect.New(session.New()) + + params := &directconnect.DeleteVirtualInterfaceInput{ + VirtualInterfaceId: aws.String("VirtualInterfaceId"), // Required + } + resp, err := svc.DeleteVirtualInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_DescribeConnections() { + svc := directconnect.New(session.New()) + + params := &directconnect.DescribeConnectionsInput{ + ConnectionId: aws.String("ConnectionId"), + } + resp, err := svc.DescribeConnections(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_DescribeConnectionsOnInterconnect() { + svc := directconnect.New(session.New()) + + params := &directconnect.DescribeConnectionsOnInterconnectInput{ + InterconnectId: aws.String("InterconnectId"), // Required + } + resp, err := svc.DescribeConnectionsOnInterconnect(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_DescribeInterconnects() { + svc := directconnect.New(session.New()) + + params := &directconnect.DescribeInterconnectsInput{ + InterconnectId: aws.String("InterconnectId"), + } + resp, err := svc.DescribeInterconnects(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_DescribeLocations() { + svc := directconnect.New(session.New()) + + var params *directconnect.DescribeLocationsInput + resp, err := svc.DescribeLocations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_DescribeVirtualGateways() { + svc := directconnect.New(session.New()) + + var params *directconnect.DescribeVirtualGatewaysInput + resp, err := svc.DescribeVirtualGateways(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectConnect_DescribeVirtualInterfaces() { + svc := directconnect.New(session.New()) + + params := &directconnect.DescribeVirtualInterfacesInput{ + ConnectionId: aws.String("ConnectionId"), + VirtualInterfaceId: aws.String("VirtualInterfaceId"), + } + resp, err := svc.DescribeVirtualInterfaces(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directconnect/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directconnect/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directconnect/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directconnect/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,104 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package directconnect + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// AWS Direct Connect makes it easy to establish a dedicated network connection +// from your premises to Amazon Web Services (AWS). Using AWS Direct Connect, +// you can establish private connectivity between AWS and your data center, +// office, or colocation environment, which in many cases can reduce your network +// costs, increase bandwidth throughput, and provide a more consistent network +// experience than Internet-based connections. +// +// The AWS Direct Connect API Reference provides descriptions, syntax, and +// usage examples for each of the actions and data types for AWS Direct Connect. +// Use the following links to get started using the AWS Direct Connect API Reference: +// +// Actions (http://docs.aws.amazon.com/directconnect/latest/APIReference/API_Operations.html): +// An alphabetical list of all AWS Direct Connect actions. Data Types (http://docs.aws.amazon.com/directconnect/latest/APIReference/API_Types.html): +// An alphabetical list of all AWS Direct Connect data types. Common Query Parameters +// (http://docs.aws.amazon.com/directconnect/latest/APIReference/CommonParameters.html): +// Parameters that all Query actions can use. Common Errors (http://docs.aws.amazon.com/directconnect/latest/APIReference/CommonErrors.html): +// Client and server errors that all actions can return. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type DirectConnect struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "directconnect" + +// New creates a new instance of the DirectConnect client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a DirectConnect client from just a session. +// svc := directconnect.New(mySession) +// +// // Create a DirectConnect client with additional configuration +// svc := directconnect.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *DirectConnect { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *DirectConnect { + svc := &DirectConnect{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2012-10-25", + JSONVersion: "1.1", + TargetPrefix: "OvertureService", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a DirectConnect operation and runs any +// custom request initialization. +func (c *DirectConnect) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directoryservice/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directoryservice/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directoryservice/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directoryservice/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2179 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package directoryservice provides a client for AWS Directory Service. +package directoryservice + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opConnectDirectory = "ConnectDirectory" + +// ConnectDirectoryRequest generates a request for the ConnectDirectory operation. +func (c *DirectoryService) ConnectDirectoryRequest(input *ConnectDirectoryInput) (req *request.Request, output *ConnectDirectoryOutput) { + op := &request.Operation{ + Name: opConnectDirectory, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ConnectDirectoryInput{} + } + + req = c.newRequest(op, input, output) + output = &ConnectDirectoryOutput{} + req.Data = output + return +} + +// Creates an AD Connector to connect to an on-premises directory. +func (c *DirectoryService) ConnectDirectory(input *ConnectDirectoryInput) (*ConnectDirectoryOutput, error) { + req, out := c.ConnectDirectoryRequest(input) + err := req.Send() + return out, err +} + +const opCreateAlias = "CreateAlias" + +// CreateAliasRequest generates a request for the CreateAlias operation. +func (c *DirectoryService) CreateAliasRequest(input *CreateAliasInput) (req *request.Request, output *CreateAliasOutput) { + op := &request.Operation{ + Name: opCreateAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAliasInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateAliasOutput{} + req.Data = output + return +} + +// Creates an alias for a directory and assigns the alias to the directory. +// The alias is used to construct the access URL for the directory, such as +// http://alias.awsapps.com. +// +// After an alias has been created, it cannot be deleted or reused, so this +// operation should only be used when absolutely necessary. +func (c *DirectoryService) CreateAlias(input *CreateAliasInput) (*CreateAliasOutput, error) { + req, out := c.CreateAliasRequest(input) + err := req.Send() + return out, err +} + +const opCreateComputer = "CreateComputer" + +// CreateComputerRequest generates a request for the CreateComputer operation. +func (c *DirectoryService) CreateComputerRequest(input *CreateComputerInput) (req *request.Request, output *CreateComputerOutput) { + op := &request.Operation{ + Name: opCreateComputer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateComputerInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateComputerOutput{} + req.Data = output + return +} + +// Creates a computer account in the specified directory, and joins the computer +// to the directory. +func (c *DirectoryService) CreateComputer(input *CreateComputerInput) (*CreateComputerOutput, error) { + req, out := c.CreateComputerRequest(input) + err := req.Send() + return out, err +} + +const opCreateDirectory = "CreateDirectory" + +// CreateDirectoryRequest generates a request for the CreateDirectory operation. +func (c *DirectoryService) CreateDirectoryRequest(input *CreateDirectoryInput) (req *request.Request, output *CreateDirectoryOutput) { + op := &request.Operation{ + Name: opCreateDirectory, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDirectoryInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDirectoryOutput{} + req.Data = output + return +} + +// Creates a Simple AD directory. +func (c *DirectoryService) CreateDirectory(input *CreateDirectoryInput) (*CreateDirectoryOutput, error) { + req, out := c.CreateDirectoryRequest(input) + err := req.Send() + return out, err +} + +const opCreateMicrosoftAD = "CreateMicrosoftAD" + +// CreateMicrosoftADRequest generates a request for the CreateMicrosoftAD operation. +func (c *DirectoryService) CreateMicrosoftADRequest(input *CreateMicrosoftADInput) (req *request.Request, output *CreateMicrosoftADOutput) { + op := &request.Operation{ + Name: opCreateMicrosoftAD, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateMicrosoftADInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateMicrosoftADOutput{} + req.Data = output + return +} + +// Creates a Microsoft AD in the AWS cloud. +func (c *DirectoryService) CreateMicrosoftAD(input *CreateMicrosoftADInput) (*CreateMicrosoftADOutput, error) { + req, out := c.CreateMicrosoftADRequest(input) + err := req.Send() + return out, err +} + +const opCreateSnapshot = "CreateSnapshot" + +// CreateSnapshotRequest generates a request for the CreateSnapshot operation. +func (c *DirectoryService) CreateSnapshotRequest(input *CreateSnapshotInput) (req *request.Request, output *CreateSnapshotOutput) { + op := &request.Operation{ + Name: opCreateSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSnapshotOutput{} + req.Data = output + return +} + +// Creates a snapshot of a Simple AD directory. +// +// You cannot take snapshots of AD Connector directories. +func (c *DirectoryService) CreateSnapshot(input *CreateSnapshotInput) (*CreateSnapshotOutput, error) { + req, out := c.CreateSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCreateTrust = "CreateTrust" + +// CreateTrustRequest generates a request for the CreateTrust operation. +func (c *DirectoryService) CreateTrustRequest(input *CreateTrustInput) (req *request.Request, output *CreateTrustOutput) { + op := &request.Operation{ + Name: opCreateTrust, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTrustInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTrustOutput{} + req.Data = output + return +} + +// AWS Directory Service for Microsoft Active Directory allows you to configure +// trust relationships. For example, you can establish a trust between your +// Microsoft AD in the AWS cloud, and your existing on-premises Microsoft Active +// Directory. This would allow you to provide users and groups access to resources +// in either domain, with a single set of credentials. +// +// This action initiates the creation of the AWS side of a trust relationship +// between a Microsoft AD in the AWS cloud and an external domain. +func (c *DirectoryService) CreateTrust(input *CreateTrustInput) (*CreateTrustOutput, error) { + req, out := c.CreateTrustRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDirectory = "DeleteDirectory" + +// DeleteDirectoryRequest generates a request for the DeleteDirectory operation. +func (c *DirectoryService) DeleteDirectoryRequest(input *DeleteDirectoryInput) (req *request.Request, output *DeleteDirectoryOutput) { + op := &request.Operation{ + Name: opDeleteDirectory, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDirectoryInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDirectoryOutput{} + req.Data = output + return +} + +// Deletes an AWS Directory Service directory. +func (c *DirectoryService) DeleteDirectory(input *DeleteDirectoryInput) (*DeleteDirectoryOutput, error) { + req, out := c.DeleteDirectoryRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSnapshot = "DeleteSnapshot" + +// DeleteSnapshotRequest generates a request for the DeleteSnapshot operation. +func (c *DirectoryService) DeleteSnapshotRequest(input *DeleteSnapshotInput) (req *request.Request, output *DeleteSnapshotOutput) { + op := &request.Operation{ + Name: opDeleteSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteSnapshotOutput{} + req.Data = output + return +} + +// Deletes a directory snapshot. +func (c *DirectoryService) DeleteSnapshot(input *DeleteSnapshotInput) (*DeleteSnapshotOutput, error) { + req, out := c.DeleteSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTrust = "DeleteTrust" + +// DeleteTrustRequest generates a request for the DeleteTrust operation. +func (c *DirectoryService) DeleteTrustRequest(input *DeleteTrustInput) (req *request.Request, output *DeleteTrustOutput) { + op := &request.Operation{ + Name: opDeleteTrust, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTrustInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTrustOutput{} + req.Data = output + return +} + +// Deletes an existing trust relationship between your Microsoft AD in the AWS +// cloud and an external domain. +func (c *DirectoryService) DeleteTrust(input *DeleteTrustInput) (*DeleteTrustOutput, error) { + req, out := c.DeleteTrustRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDirectories = "DescribeDirectories" + +// DescribeDirectoriesRequest generates a request for the DescribeDirectories operation. +func (c *DirectoryService) DescribeDirectoriesRequest(input *DescribeDirectoriesInput) (req *request.Request, output *DescribeDirectoriesOutput) { + op := &request.Operation{ + Name: opDescribeDirectories, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDirectoriesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDirectoriesOutput{} + req.Data = output + return +} + +// Obtains information about the directories that belong to this account. +// +// You can retrieve information about specific directories by passing the directory +// identifiers in the DirectoryIds parameter. Otherwise, all directories that +// belong to the current account are returned. +// +// This operation supports pagination with the use of the NextToken request +// and response parameters. If more results are available, the DescribeDirectoriesResult.NextToken +// member contains a token that you pass in the next call to DescribeDirectories +// to retrieve the next set of items. +// +// You can also specify a maximum number of return results with the Limit parameter. +func (c *DirectoryService) DescribeDirectories(input *DescribeDirectoriesInput) (*DescribeDirectoriesOutput, error) { + req, out := c.DescribeDirectoriesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSnapshots = "DescribeSnapshots" + +// DescribeSnapshotsRequest generates a request for the DescribeSnapshots operation. +func (c *DirectoryService) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *request.Request, output *DescribeSnapshotsOutput) { + op := &request.Operation{ + Name: opDescribeSnapshots, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSnapshotsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSnapshotsOutput{} + req.Data = output + return +} + +// Obtains information about the directory snapshots that belong to this account. +// +// This operation supports pagination with the use of the NextToken request +// and response parameters. If more results are available, the DescribeSnapshots.NextToken +// member contains a token that you pass in the next call to DescribeSnapshots +// to retrieve the next set of items. +// +// You can also specify a maximum number of return results with the Limit parameter. +func (c *DirectoryService) DescribeSnapshots(input *DescribeSnapshotsInput) (*DescribeSnapshotsOutput, error) { + req, out := c.DescribeSnapshotsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTrusts = "DescribeTrusts" + +// DescribeTrustsRequest generates a request for the DescribeTrusts operation. +func (c *DirectoryService) DescribeTrustsRequest(input *DescribeTrustsInput) (req *request.Request, output *DescribeTrustsOutput) { + op := &request.Operation{ + Name: opDescribeTrusts, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTrustsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTrustsOutput{} + req.Data = output + return +} + +// Obtains information about the trust relationships for this account. +// +// If no input parameters are provided, such as DirectoryId or TrustIds, this +// request describes all the trust relationships belonging to the account. +func (c *DirectoryService) DescribeTrusts(input *DescribeTrustsInput) (*DescribeTrustsOutput, error) { + req, out := c.DescribeTrustsRequest(input) + err := req.Send() + return out, err +} + +const opDisableRadius = "DisableRadius" + +// DisableRadiusRequest generates a request for the DisableRadius operation. +func (c *DirectoryService) DisableRadiusRequest(input *DisableRadiusInput) (req *request.Request, output *DisableRadiusOutput) { + op := &request.Operation{ + Name: opDisableRadius, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableRadiusInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableRadiusOutput{} + req.Data = output + return +} + +// Disables multi-factor authentication (MFA) with the Remote Authentication +// Dial In User Service (RADIUS) server for an AD Connector directory. +func (c *DirectoryService) DisableRadius(input *DisableRadiusInput) (*DisableRadiusOutput, error) { + req, out := c.DisableRadiusRequest(input) + err := req.Send() + return out, err +} + +const opDisableSso = "DisableSso" + +// DisableSsoRequest generates a request for the DisableSso operation. +func (c *DirectoryService) DisableSsoRequest(input *DisableSsoInput) (req *request.Request, output *DisableSsoOutput) { + op := &request.Operation{ + Name: opDisableSso, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableSsoInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableSsoOutput{} + req.Data = output + return +} + +// Disables single-sign on for a directory. +func (c *DirectoryService) DisableSso(input *DisableSsoInput) (*DisableSsoOutput, error) { + req, out := c.DisableSsoRequest(input) + err := req.Send() + return out, err +} + +const opEnableRadius = "EnableRadius" + +// EnableRadiusRequest generates a request for the EnableRadius operation. +func (c *DirectoryService) EnableRadiusRequest(input *EnableRadiusInput) (req *request.Request, output *EnableRadiusOutput) { + op := &request.Operation{ + Name: opEnableRadius, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableRadiusInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableRadiusOutput{} + req.Data = output + return +} + +// Enables multi-factor authentication (MFA) with the Remote Authentication +// Dial In User Service (RADIUS) server for an AD Connector directory. +func (c *DirectoryService) EnableRadius(input *EnableRadiusInput) (*EnableRadiusOutput, error) { + req, out := c.EnableRadiusRequest(input) + err := req.Send() + return out, err +} + +const opEnableSso = "EnableSso" + +// EnableSsoRequest generates a request for the EnableSso operation. +func (c *DirectoryService) EnableSsoRequest(input *EnableSsoInput) (req *request.Request, output *EnableSsoOutput) { + op := &request.Operation{ + Name: opEnableSso, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableSsoInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableSsoOutput{} + req.Data = output + return +} + +// Enables single-sign on for a directory. +func (c *DirectoryService) EnableSso(input *EnableSsoInput) (*EnableSsoOutput, error) { + req, out := c.EnableSsoRequest(input) + err := req.Send() + return out, err +} + +const opGetDirectoryLimits = "GetDirectoryLimits" + +// GetDirectoryLimitsRequest generates a request for the GetDirectoryLimits operation. +func (c *DirectoryService) GetDirectoryLimitsRequest(input *GetDirectoryLimitsInput) (req *request.Request, output *GetDirectoryLimitsOutput) { + op := &request.Operation{ + Name: opGetDirectoryLimits, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDirectoryLimitsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDirectoryLimitsOutput{} + req.Data = output + return +} + +// Obtains directory limit information for the current region. +func (c *DirectoryService) GetDirectoryLimits(input *GetDirectoryLimitsInput) (*GetDirectoryLimitsOutput, error) { + req, out := c.GetDirectoryLimitsRequest(input) + err := req.Send() + return out, err +} + +const opGetSnapshotLimits = "GetSnapshotLimits" + +// GetSnapshotLimitsRequest generates a request for the GetSnapshotLimits operation. +func (c *DirectoryService) GetSnapshotLimitsRequest(input *GetSnapshotLimitsInput) (req *request.Request, output *GetSnapshotLimitsOutput) { + op := &request.Operation{ + Name: opGetSnapshotLimits, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSnapshotLimitsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSnapshotLimitsOutput{} + req.Data = output + return +} + +// Obtains the manual snapshot limits for a directory. +func (c *DirectoryService) GetSnapshotLimits(input *GetSnapshotLimitsInput) (*GetSnapshotLimitsOutput, error) { + req, out := c.GetSnapshotLimitsRequest(input) + err := req.Send() + return out, err +} + +const opRestoreFromSnapshot = "RestoreFromSnapshot" + +// RestoreFromSnapshotRequest generates a request for the RestoreFromSnapshot operation. +func (c *DirectoryService) RestoreFromSnapshotRequest(input *RestoreFromSnapshotInput) (req *request.Request, output *RestoreFromSnapshotOutput) { + op := &request.Operation{ + Name: opRestoreFromSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreFromSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreFromSnapshotOutput{} + req.Data = output + return +} + +// Restores a directory using an existing directory snapshot. +// +// When you restore a directory from a snapshot, any changes made to the directory +// after the snapshot date are overwritten. +// +// This action returns as soon as the restore operation is initiated. You can +// monitor the progress of the restore operation by calling the DescribeDirectories +// operation with the directory identifier. When the DirectoryDescription.Stage +// value changes to Active, the restore operation is complete. +func (c *DirectoryService) RestoreFromSnapshot(input *RestoreFromSnapshotInput) (*RestoreFromSnapshotOutput, error) { + req, out := c.RestoreFromSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opUpdateRadius = "UpdateRadius" + +// UpdateRadiusRequest generates a request for the UpdateRadius operation. +func (c *DirectoryService) UpdateRadiusRequest(input *UpdateRadiusInput) (req *request.Request, output *UpdateRadiusOutput) { + op := &request.Operation{ + Name: opUpdateRadius, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateRadiusInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateRadiusOutput{} + req.Data = output + return +} + +// Updates the Remote Authentication Dial In User Service (RADIUS) server information +// for an AD Connector directory. +func (c *DirectoryService) UpdateRadius(input *UpdateRadiusInput) (*UpdateRadiusOutput, error) { + req, out := c.UpdateRadiusRequest(input) + err := req.Send() + return out, err +} + +const opVerifyTrust = "VerifyTrust" + +// VerifyTrustRequest generates a request for the VerifyTrust operation. +func (c *DirectoryService) VerifyTrustRequest(input *VerifyTrustInput) (req *request.Request, output *VerifyTrustOutput) { + op := &request.Operation{ + Name: opVerifyTrust, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &VerifyTrustInput{} + } + + req = c.newRequest(op, input, output) + output = &VerifyTrustOutput{} + req.Data = output + return +} + +// AWS Directory Service for Microsoft Active Directory allows you to configure +// and verify trust relationships. +// +// This action verifies a trust relationship between your Microsoft AD in the +// AWS cloud and an external domain. +func (c *DirectoryService) VerifyTrust(input *VerifyTrustInput) (*VerifyTrustOutput, error) { + req, out := c.VerifyTrustRequest(input) + err := req.Send() + return out, err +} + +// Represents a named directory attribute. +type Attribute struct { + _ struct{} `type:"structure"` + + // The name of the attribute. + Name *string `min:"1" type:"string"` + + // The value of the attribute. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Attribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Attribute) GoString() string { + return s.String() +} + +// Contains information about a computer account in a directory. +type Computer struct { + _ struct{} `type:"structure"` + + // An array of Attribute objects containing the LDAP attributes that belong + // to the computer account. + ComputerAttributes []*Attribute `type:"list"` + + // The identifier of the computer. + ComputerId *string `min:"1" type:"string"` + + // The computer name. + ComputerName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Computer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Computer) GoString() string { + return s.String() +} + +// Contains the inputs for the ConnectDirectory operation. +type ConnectDirectoryInput struct { + _ struct{} `type:"structure"` + + // A DirectoryConnectSettings object that contains additional information for + // the operation. + ConnectSettings *DirectoryConnectSettings `type:"structure" required:"true"` + + // A textual description for the directory. + Description *string `type:"string"` + + // The fully-qualified name of the on-premises directory, such as corp.example.com. + Name *string `type:"string" required:"true"` + + // The password for the on-premises user account. + Password *string `min:"1" type:"string" required:"true"` + + // The NetBIOS name of the on-premises directory, such as CORP. + ShortName *string `type:"string"` + + // The size of the directory. + Size *string `type:"string" required:"true" enum:"DirectorySize"` +} + +// String returns the string representation +func (s ConnectDirectoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConnectDirectoryInput) GoString() string { + return s.String() +} + +// Contains the results of the ConnectDirectory operation. +type ConnectDirectoryOutput struct { + _ struct{} `type:"structure"` + + // The identifier of the new directory. + DirectoryId *string `type:"string"` +} + +// String returns the string representation +func (s ConnectDirectoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConnectDirectoryOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the CreateAlias operation. +type CreateAliasInput struct { + _ struct{} `type:"structure"` + + // The requested alias. + // + // The alias must be unique amongst all aliases in AWS. This operation throws + // an EntityAlreadyExistsException error if the alias already exists. + Alias *string `min:"1" type:"string" required:"true"` + + // The identifier of the directory for which to create the alias. + DirectoryId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAliasInput) GoString() string { + return s.String() +} + +// Contains the results of the CreateAlias operation. +type CreateAliasOutput struct { + _ struct{} `type:"structure"` + + // The alias for the directory. + Alias *string `min:"1" type:"string"` + + // The identifier of the directory. + DirectoryId *string `type:"string"` +} + +// String returns the string representation +func (s CreateAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAliasOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the CreateComputer operation. +type CreateComputerInput struct { + _ struct{} `type:"structure"` + + // An array of Attribute objects that contain any LDAP attributes to apply to + // the computer account. + ComputerAttributes []*Attribute `type:"list"` + + // The name of the computer account. + ComputerName *string `min:"1" type:"string" required:"true"` + + // The identifier of the directory in which to create the computer account. + DirectoryId *string `type:"string" required:"true"` + + // The fully-qualified distinguished name of the organizational unit to place + // the computer account in. + OrganizationalUnitDistinguishedName *string `min:"1" type:"string"` + + // A one-time password that is used to join the computer to the directory. You + // should generate a random, strong password to use for this parameter. + Password *string `min:"8" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateComputerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateComputerInput) GoString() string { + return s.String() +} + +// Contains the results for the CreateComputer operation. +type CreateComputerOutput struct { + _ struct{} `type:"structure"` + + // A Computer object that represents the computer account. + Computer *Computer `type:"structure"` +} + +// String returns the string representation +func (s CreateComputerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateComputerOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the CreateDirectory operation. +type CreateDirectoryInput struct { + _ struct{} `type:"structure"` + + // A textual description for the directory. + Description *string `type:"string"` + + // The fully qualified name for the directory, such as corp.example.com. + Name *string `type:"string" required:"true"` + + // The password for the directory administrator. The directory creation process + // creates a directory administrator account with the username Administrator + // and this password. + Password *string `type:"string" required:"true"` + + // The short name of the directory, such as CORP. + ShortName *string `type:"string"` + + // The size of the directory. + Size *string `type:"string" required:"true" enum:"DirectorySize"` + + // A DirectoryVpcSettings object that contains additional information for the + // operation. + VpcSettings *DirectoryVpcSettings `type:"structure"` +} + +// String returns the string representation +func (s CreateDirectoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDirectoryInput) GoString() string { + return s.String() +} + +// Contains the results of the CreateDirectory operation. +type CreateDirectoryOutput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory that was created. + DirectoryId *string `type:"string"` +} + +// String returns the string representation +func (s CreateDirectoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDirectoryOutput) GoString() string { + return s.String() +} + +// Creates a Microsoft AD in the AWS cloud. +type CreateMicrosoftADInput struct { + _ struct{} `type:"structure"` + + // A textual description for the directory. This label will appear on the AWS + // console Directory Details page after the directory is created. + Description *string `type:"string"` + + // The fully qualified domain name for the directory, such as corp.example.com. + // This name will resolve inside your VPC only. It does not need to be publicly + // resolvable. + Name *string `type:"string" required:"true"` + + // The password for the default administrative user named Admin. + Password *string `type:"string" required:"true"` + + // The NetBIOS name for your domain. A short identifier for your domain, such + // as CORP. If you don't specify a NetBIOS name, it will default to the first + // part of your directory DNS. For example, CORP for the directory DNS corp.example.com. + ShortName *string `type:"string"` + + // Contains VPC information for the CreateDirectory or CreateMicrosoftAD operation. + VpcSettings *DirectoryVpcSettings `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateMicrosoftADInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMicrosoftADInput) GoString() string { + return s.String() +} + +type CreateMicrosoftADOutput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory that was created. + DirectoryId *string `type:"string"` +} + +// String returns the string representation +func (s CreateMicrosoftADOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMicrosoftADOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the CreateSnapshot operation. +type CreateSnapshotInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory to take a snapshot of. + DirectoryId *string `type:"string" required:"true"` + + // The descriptive name to apply to the snapshot. + Name *string `type:"string"` +} + +// String returns the string representation +func (s CreateSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotInput) GoString() string { + return s.String() +} + +// Contains the results of the CreateSnapshot operation. +type CreateSnapshotOutput struct { + _ struct{} `type:"structure"` + + // The identifier of the snapshot that was created. + SnapshotId *string `type:"string"` +} + +// String returns the string representation +func (s CreateSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotOutput) GoString() string { + return s.String() +} + +// AWS Directory Service for Microsoft Active Directory allows you to configure +// trust relationships. For example, you can establish a trust between your +// Microsoft AD in the AWS cloud, and your existing on-premises Microsoft Active +// Directory. This would allow you to provide users and groups access to resources +// in either domain, with a single set of credentials. +// +// This action initiates the creation of the AWS side of a trust relationship +// between a Microsoft AD in the AWS cloud and an external domain. +type CreateTrustInput struct { + _ struct{} `type:"structure"` + + // The Directory ID of the Microsoft AD in the AWS cloud for which to establish + // the trust relationship. + DirectoryId *string `type:"string" required:"true"` + + // The Fully Qualified Domain Name (FQDN) of the external domain for which to + // create the trust relationship. + RemoteDomainName *string `type:"string" required:"true"` + + // The direction of the trust relationship. + TrustDirection *string `type:"string" required:"true" enum:"TrustDirection"` + + // The trust password. The must be the same password that was used when creating + // the trust relationship on the external domain. + TrustPassword *string `min:"1" type:"string" required:"true"` + + // The trust relationship type. + TrustType *string `type:"string" enum:"TrustType"` +} + +// String returns the string representation +func (s CreateTrustInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrustInput) GoString() string { + return s.String() +} + +type CreateTrustOutput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the trust relationship that was created. + TrustId *string `type:"string"` +} + +// String returns the string representation +func (s CreateTrustOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrustOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the DeleteDirectory operation. +type DeleteDirectoryInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory to delete. + DirectoryId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDirectoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDirectoryInput) GoString() string { + return s.String() +} + +// Contains the results of the DeleteDirectory operation. +type DeleteDirectoryOutput struct { + _ struct{} `type:"structure"` + + // The directory identifier. + DirectoryId *string `type:"string"` +} + +// String returns the string representation +func (s DeleteDirectoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDirectoryOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the DeleteSnapshot operation. +type DeleteSnapshotInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory snapshot to be deleted. + SnapshotId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotInput) GoString() string { + return s.String() +} + +// Contains the results of the DeleteSnapshot operation. +type DeleteSnapshotOutput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory snapshot that was deleted. + SnapshotId *string `type:"string"` +} + +// String returns the string representation +func (s DeleteSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotOutput) GoString() string { + return s.String() +} + +// Deletes the local side of an existing trust relationship between the Microsoft +// AD in the AWS cloud and the external domain. +type DeleteTrustInput struct { + _ struct{} `type:"structure"` + + // The Trust ID of the trust relationship to be deleted. + TrustId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTrustInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrustInput) GoString() string { + return s.String() +} + +type DeleteTrustOutput struct { + _ struct{} `type:"structure"` + + // The Trust ID of the trust relationship that was deleted. + TrustId *string `type:"string"` +} + +// String returns the string representation +func (s DeleteTrustOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrustOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the DescribeDirectories operation. +type DescribeDirectoriesInput struct { + _ struct{} `type:"structure"` + + // A list of identifiers of the directories for which to obtain the information. + // If this member is null, all directories that belong to the current account + // are returned. + // + // An empty list results in an InvalidParameterException being thrown. + DirectoryIds []*string `type:"list"` + + // The maximum number of items to return. If this value is zero, the maximum + // number of items is specified by the limitations of the operation. + Limit *int64 `type:"integer"` + + // The DescribeDirectoriesResult.NextToken value from a previous call to DescribeDirectories. + // Pass null if this is the first call. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDirectoriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDirectoriesInput) GoString() string { + return s.String() +} + +// Contains the results of the DescribeDirectories operation. +type DescribeDirectoriesOutput struct { + _ struct{} `type:"structure"` + + // The list of DirectoryDescription objects that were retrieved. + // + // It is possible that this list contains less than the number of items specified + // in the Limit member of the request. This occurs if there are less than the + // requested number of items left to retrieve, or if the limitations of the + // operation have been exceeded. + DirectoryDescriptions []*DirectoryDescription `type:"list"` + + // If not null, more results are available. Pass this value for the NextToken + // parameter in a subsequent call to DescribeDirectories to retrieve the next + // set of items. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDirectoriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDirectoriesOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the DescribeSnapshots operation. +type DescribeSnapshotsInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory for which to retrieve snapshot information. + DirectoryId *string `type:"string"` + + // The maximum number of objects to return. + Limit *int64 `type:"integer"` + + // The DescribeSnapshotsResult.NextToken value from a previous call to DescribeSnapshots. + // Pass null if this is the first call. + NextToken *string `type:"string"` + + // A list of identifiers of the snapshots to obtain the information for. If + // this member is null or empty, all snapshots are returned using the Limit + // and NextToken members. + SnapshotIds []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeSnapshotsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotsInput) GoString() string { + return s.String() +} + +// Contains the results of the DescribeSnapshots operation. +type DescribeSnapshotsOutput struct { + _ struct{} `type:"structure"` + + // If not null, more results are available. Pass this value in the NextToken + // member of a subsequent call to DescribeSnapshots. + NextToken *string `type:"string"` + + // The list of Snapshot objects that were retrieved. + // + // It is possible that this list contains less than the number of items specified + // in the Limit member of the request. This occurs if there are less than the + // requested number of items left to retrieve, or if the limitations of the + // operation have been exceeded. + Snapshots []*Snapshot `type:"list"` +} + +// String returns the string representation +func (s DescribeSnapshotsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotsOutput) GoString() string { + return s.String() +} + +// Describes the trust relationships for a particular Microsoft AD in the AWS +// cloud. If no input parameters are are provided, such as directory ID or trust +// ID, this request describes all the trust relationships. +type DescribeTrustsInput struct { + _ struct{} `type:"structure"` + + // The Directory ID of the AWS directory that is a part of the requested trust + // relationship. + DirectoryId *string `type:"string"` + + // The maximum number of objects to return. + Limit *int64 `type:"integer"` + + // The DescribeTrustsResult.NextToken value from a previous call to DescribeTrusts. + // Pass null if this is the first call. + NextToken *string `type:"string"` + + // A list of identifiers of the trust relationships for which to obtain the + // information. If this member is null, all trust relationships that belong + // to the current account are returned. + // + // An empty list results in an InvalidParameterException being thrown. + TrustIds []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeTrustsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrustsInput) GoString() string { + return s.String() +} + +type DescribeTrustsOutput struct { + _ struct{} `type:"structure"` + + // If not null, more results are available. Pass this value for the NextToken + // parameter in a subsequent call to DescribeTrusts to retrieve the next set + // of items. + NextToken *string `type:"string"` + + // The list of Trust objects that were retrieved. + // + // It is possible that this list contains less than the number of items specified + // in the Limit member of the request. This occurs if there are less than the + // requested number of items left to retrieve, or if the limitations of the + // operation have been exceeded. + Trusts []*Trust `type:"list"` +} + +// String returns the string representation +func (s DescribeTrustsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrustsOutput) GoString() string { + return s.String() +} + +// Contains information for the ConnectDirectory operation when an AD Connector +// directory is being created. +type DirectoryConnectSettings struct { + _ struct{} `type:"structure"` + + // A list of one or more IP addresses of DNS servers or domain controllers in + // the on-premises directory. + CustomerDnsIps []*string `type:"list" required:"true"` + + // The username of an account in the on-premises directory that is used to connect + // to the directory. This account must have the following privileges: + // + // Read users and groups Create computer objects Join computers to the domain + CustomerUserName *string `min:"1" type:"string" required:"true"` + + // A list of subnet identifiers in the VPC in which the AD Connector is created. + SubnetIds []*string `type:"list" required:"true"` + + // The identifier of the VPC in which the AD Connector is created. + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DirectoryConnectSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DirectoryConnectSettings) GoString() string { + return s.String() +} + +// Contains information about an AD Connector directory. +type DirectoryConnectSettingsDescription struct { + _ struct{} `type:"structure"` + + // A list of the Availability Zones that the directory is in. + AvailabilityZones []*string `type:"list"` + + // The IP addresses of the AD Connector servers. + ConnectIps []*string `type:"list"` + + // The username of the service account in the on-premises directory. + CustomerUserName *string `min:"1" type:"string"` + + // The security group identifier for the AD Connector directory. + SecurityGroupId *string `type:"string"` + + // A list of subnet identifiers in the VPC that the AD connector is in. + SubnetIds []*string `type:"list"` + + // The identifier of the VPC that the AD Connector is in. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s DirectoryConnectSettingsDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DirectoryConnectSettingsDescription) GoString() string { + return s.String() +} + +// Contains information about an AWS Directory Service directory. +type DirectoryDescription struct { + _ struct{} `type:"structure"` + + // The access URL for the directory, such as http://alias.awsapps.com. If no + // alias has been created for the directory, alias is the directory identifier, + // such as d-XXXXXXXXXX. + AccessUrl *string `min:"1" type:"string"` + + // The alias for the directory. If no alias has been created for the directory, + // the alias is the directory identifier, such as d-XXXXXXXXXX. + Alias *string `min:"1" type:"string"` + + // A DirectoryConnectSettingsDescription object that contains additional information + // about an AD Connector directory. This member is only present if the directory + // is an AD Connector directory. + ConnectSettings *DirectoryConnectSettingsDescription `type:"structure"` + + // The textual description for the directory. + Description *string `type:"string"` + + // The directory identifier. + DirectoryId *string `type:"string"` + + // The IP addresses of the DNS servers for the directory. For a Simple AD or + // Microsoft AD directory, these are the IP addresses of the Simple AD or Microsoft + // AD directory servers. For an AD Connector directory, these are the IP addresses + // of the DNS servers or domain controllers in the on-premises directory to + // which the AD Connector is connected. + DnsIpAddrs []*string `type:"list"` + + // Specifies when the directory was created. + LaunchTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The fully-qualified name of the directory. + Name *string `type:"string"` + + // A RadiusSettings object that contains information about the RADIUS server + // configured for this directory. + RadiusSettings *RadiusSettings `type:"structure"` + + // The status of the RADIUS MFA server connection. + RadiusStatus *string `type:"string" enum:"RadiusStatus"` + + // The short name of the directory. + ShortName *string `type:"string"` + + // The directory size. + Size *string `type:"string" enum:"DirectorySize"` + + // Indicates if single-sign on is enabled for the directory. For more information, + // see EnableSso and DisableSso. + SsoEnabled *bool `type:"boolean"` + + // The current stage of the directory. + Stage *string `type:"string" enum:"DirectoryStage"` + + // The date and time that the stage was last updated. + StageLastUpdatedDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Additional information about the directory stage. + StageReason *string `type:"string"` + + // The directory size. + Type *string `type:"string" enum:"DirectoryType"` + + // A DirectoryVpcSettingsDescription object that contains additional information + // about a directory. This member is only present if the directory is a Simple + // AD or Managed AD directory. + VpcSettings *DirectoryVpcSettingsDescription `type:"structure"` +} + +// String returns the string representation +func (s DirectoryDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DirectoryDescription) GoString() string { + return s.String() +} + +// Contains directory limit information for a region. +type DirectoryLimits struct { + _ struct{} `type:"structure"` + + // The current number of cloud directories in the region. + CloudOnlyDirectoriesCurrentCount *int64 `type:"integer"` + + // The maximum number of cloud directories allowed in the region. + CloudOnlyDirectoriesLimit *int64 `type:"integer"` + + // Indicates if the cloud directory limit has been reached. + CloudOnlyDirectoriesLimitReached *bool `type:"boolean"` + + // The current number of Microsoft AD directories in the region. + CloudOnlyMicrosoftADCurrentCount *int64 `type:"integer"` + + // The maximum number of Microsoft AD directories allowed in the region. + CloudOnlyMicrosoftADLimit *int64 `type:"integer"` + + // Indicates if the Microsoft AD directory limit has been reached. + CloudOnlyMicrosoftADLimitReached *bool `type:"boolean"` + + // The current number of connected directories in the region. + ConnectedDirectoriesCurrentCount *int64 `type:"integer"` + + // The maximum number of connected directories allowed in the region. + ConnectedDirectoriesLimit *int64 `type:"integer"` + + // Indicates if the connected directory limit has been reached. + ConnectedDirectoriesLimitReached *bool `type:"boolean"` +} + +// String returns the string representation +func (s DirectoryLimits) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DirectoryLimits) GoString() string { + return s.String() +} + +// Contains VPC information for the CreateDirectory or CreateMicrosoftAD operation. +type DirectoryVpcSettings struct { + _ struct{} `type:"structure"` + + // The identifiers of the subnets for the directory servers. The two subnets + // must be in different Availability Zones. AWS Directory Service creates a + // directory server and a DNS server in each of these subnets. + SubnetIds []*string `type:"list" required:"true"` + + // The identifier of the VPC in which to create the directory. + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DirectoryVpcSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DirectoryVpcSettings) GoString() string { + return s.String() +} + +// Contains information about the directory. +type DirectoryVpcSettingsDescription struct { + _ struct{} `type:"structure"` + + // The list of Availability Zones that the directory is in. + AvailabilityZones []*string `type:"list"` + + // The security group identifier for the directory. If the directory was created + // before 8/1/2014, this is the identifier of the directory members security + // group that was created when the directory was created. If the directory was + // created after this date, this value is null. + SecurityGroupId *string `type:"string"` + + // The identifiers of the subnets for the directory servers. + SubnetIds []*string `type:"list"` + + // The identifier of the VPC that the directory is in. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s DirectoryVpcSettingsDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DirectoryVpcSettingsDescription) GoString() string { + return s.String() +} + +// Contains the inputs for the DisableRadius operation. +type DisableRadiusInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory for which to disable MFA. + DirectoryId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableRadiusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableRadiusInput) GoString() string { + return s.String() +} + +// Contains the results of the DisableRadius operation. +type DisableRadiusOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableRadiusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableRadiusOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the DisableSso operation. +type DisableSsoInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory for which to disable single-sign on. + DirectoryId *string `type:"string" required:"true"` + + // The password of an alternate account to use to disable single-sign on. This + // is only used for AD Connector directories. For more information, see the + // UserName parameter. + Password *string `min:"1" type:"string"` + + // The username of an alternate account to use to disable single-sign on. This + // is only used for AD Connector directories. This account must have privileges + // to remove a service principal name. + // + // If the AD Connector service account does not have privileges to remove a + // service principal name, you can specify an alternate account with the UserName + // and Password parameters. These credentials are only used to disable single + // sign-on and are not stored by the service. The AD Connector service account + // is not changed. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DisableSsoInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableSsoInput) GoString() string { + return s.String() +} + +// Contains the results of the DisableSso operation. +type DisableSsoOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableSsoOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableSsoOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the EnableRadius operation. +type EnableRadiusInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory for which to enable MFA. + DirectoryId *string `type:"string" required:"true"` + + // A RadiusSettings object that contains information about the RADIUS server. + RadiusSettings *RadiusSettings `type:"structure" required:"true"` +} + +// String returns the string representation +func (s EnableRadiusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableRadiusInput) GoString() string { + return s.String() +} + +// Contains the results of the EnableRadius operation. +type EnableRadiusOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableRadiusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableRadiusOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the EnableSso operation. +type EnableSsoInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory for which to enable single-sign on. + DirectoryId *string `type:"string" required:"true"` + + // The password of an alternate account to use to enable single-sign on. This + // is only used for AD Connector directories. For more information, see the + // UserName parameter. + Password *string `min:"1" type:"string"` + + // The username of an alternate account to use to enable single-sign on. This + // is only used for AD Connector directories. This account must have privileges + // to add a service principal name. + // + // If the AD Connector service account does not have privileges to add a service + // principal name, you can specify an alternate account with the UserName and + // Password parameters. These credentials are only used to enable single sign-on + // and are not stored by the service. The AD Connector service account is not + // changed. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s EnableSsoInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableSsoInput) GoString() string { + return s.String() +} + +// Contains the results of the EnableSso operation. +type EnableSsoOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableSsoOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableSsoOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the GetDirectoryLimits operation. +type GetDirectoryLimitsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetDirectoryLimitsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDirectoryLimitsInput) GoString() string { + return s.String() +} + +// Contains the results of the GetDirectoryLimits operation. +type GetDirectoryLimitsOutput struct { + _ struct{} `type:"structure"` + + // A DirectoryLimits object that contains the directory limits for the current + // region. + DirectoryLimits *DirectoryLimits `type:"structure"` +} + +// String returns the string representation +func (s GetDirectoryLimitsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDirectoryLimitsOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the GetSnapshotLimits operation. +type GetSnapshotLimitsInput struct { + _ struct{} `type:"structure"` + + // Contains the identifier of the directory to obtain the limits for. + DirectoryId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetSnapshotLimitsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSnapshotLimitsInput) GoString() string { + return s.String() +} + +// Contains the results of the GetSnapshotLimits operation. +type GetSnapshotLimitsOutput struct { + _ struct{} `type:"structure"` + + // A SnapshotLimits object that contains the manual snapshot limits for the + // specified directory. + SnapshotLimits *SnapshotLimits `type:"structure"` +} + +// String returns the string representation +func (s GetSnapshotLimitsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSnapshotLimitsOutput) GoString() string { + return s.String() +} + +// Contains information about a Remote Authentication Dial In User Service (RADIUS) +// server. +type RadiusSettings struct { + _ struct{} `type:"structure"` + + // The protocol specified for your RADIUS endpoints. + AuthenticationProtocol *string `type:"string" enum:"RadiusAuthenticationProtocol"` + + // Not currently used. + DisplayLabel *string `min:"1" type:"string"` + + // The port that your RADIUS server is using for communications. Your on-premises + // network must allow inbound traffic over this port from the AWS Directory + // Service servers. + RadiusPort *int64 `min:"1025" type:"integer"` + + // The maximum number of times that communication with the RADIUS server is + // attempted. + RadiusRetries *int64 `type:"integer"` + + // An array of strings that contains the IP addresses of the RADIUS server endpoints, + // or the IP addresses of your RADIUS server load balancer. + RadiusServers []*string `type:"list"` + + // The amount of time, in seconds, to wait for the RADIUS server to respond. + RadiusTimeout *int64 `min:"1" type:"integer"` + + // The shared secret code that was specified when your RADIUS endpoints were + // created. + SharedSecret *string `min:"8" type:"string"` + + // Not currently used. + UseSameUsername *bool `type:"boolean"` +} + +// String returns the string representation +func (s RadiusSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RadiusSettings) GoString() string { + return s.String() +} + +// An object representing the inputs for the RestoreFromSnapshot operation. +type RestoreFromSnapshotInput struct { + _ struct{} `type:"structure"` + + // The identifier of the snapshot to restore from. + SnapshotId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RestoreFromSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreFromSnapshotInput) GoString() string { + return s.String() +} + +// Contains the results of the RestoreFromSnapshot operation. +type RestoreFromSnapshotOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RestoreFromSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreFromSnapshotOutput) GoString() string { + return s.String() +} + +// Describes a directory snapshot. +type Snapshot struct { + _ struct{} `type:"structure"` + + // The directory identifier. + DirectoryId *string `type:"string"` + + // The descriptive name of the snapshot. + Name *string `type:"string"` + + // The snapshot identifier. + SnapshotId *string `type:"string"` + + // The date and time that the snapshot was taken. + StartTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The snapshot status. + Status *string `type:"string" enum:"SnapshotStatus"` + + // The snapshot type. + Type *string `type:"string" enum:"SnapshotType"` +} + +// String returns the string representation +func (s Snapshot) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Snapshot) GoString() string { + return s.String() +} + +// Contains manual snapshot limit information for a directory. +type SnapshotLimits struct { + _ struct{} `type:"structure"` + + // The current number of manual snapshots of the directory. + ManualSnapshotsCurrentCount *int64 `type:"integer"` + + // The maximum number of manual snapshots allowed. + ManualSnapshotsLimit *int64 `type:"integer"` + + // Indicates if the manual snapshot limit has been reached. + ManualSnapshotsLimitReached *bool `type:"boolean"` +} + +// String returns the string representation +func (s SnapshotLimits) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotLimits) GoString() string { + return s.String() +} + +// Describes a trust relationship between an Microsoft AD in the AWS cloud and +// an external domain. +type Trust struct { + _ struct{} `type:"structure"` + + // The date and time that the trust relationship was created. + CreatedDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The Directory ID of the AWS directory involved in the trust relationship. + DirectoryId *string `type:"string"` + + // The date and time that the trust relationship was last updated. + LastUpdatedDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The Fully Qualified Domain Name (FQDN) of the external domain involved in + // the trust relationship. + RemoteDomainName *string `type:"string"` + + // The date and time that the TrustState was last updated. + StateLastUpdatedDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The trust relationship direction. + TrustDirection *string `type:"string" enum:"TrustDirection"` + + // The unique ID of the trust relationship. + TrustId *string `type:"string"` + + // The trust relationship state. + TrustState *string `type:"string" enum:"TrustState"` + + // The trust relationship type. + TrustType *string `type:"string" enum:"TrustType"` +} + +// String returns the string representation +func (s Trust) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Trust) GoString() string { + return s.String() +} + +// Contains the inputs for the UpdateRadius operation. +type UpdateRadiusInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory for which to update the RADIUS server information. + DirectoryId *string `type:"string" required:"true"` + + // A RadiusSettings object that contains information about the RADIUS server. + RadiusSettings *RadiusSettings `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateRadiusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRadiusInput) GoString() string { + return s.String() +} + +// Contains the results of the UpdateRadius operation. +type UpdateRadiusOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateRadiusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRadiusOutput) GoString() string { + return s.String() +} + +// Initiates the verification of an existing trust relationship between a Microsoft +// AD in the AWS cloud and an external domain. +type VerifyTrustInput struct { + _ struct{} `type:"structure"` + + // The unique Trust ID of the trust relationship to verify. + TrustId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s VerifyTrustInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyTrustInput) GoString() string { + return s.String() +} + +type VerifyTrustOutput struct { + _ struct{} `type:"structure"` + + // The unique Trust ID of the trust relationship that was verified. + TrustId *string `type:"string"` +} + +// String returns the string representation +func (s VerifyTrustOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyTrustOutput) GoString() string { + return s.String() +} + +const ( + // @enum DirectorySize + DirectorySizeSmall = "Small" + // @enum DirectorySize + DirectorySizeLarge = "Large" +) + +const ( + // @enum DirectoryStage + DirectoryStageRequested = "Requested" + // @enum DirectoryStage + DirectoryStageCreating = "Creating" + // @enum DirectoryStage + DirectoryStageCreated = "Created" + // @enum DirectoryStage + DirectoryStageActive = "Active" + // @enum DirectoryStage + DirectoryStageInoperable = "Inoperable" + // @enum DirectoryStage + DirectoryStageImpaired = "Impaired" + // @enum DirectoryStage + DirectoryStageRestoring = "Restoring" + // @enum DirectoryStage + DirectoryStageRestoreFailed = "RestoreFailed" + // @enum DirectoryStage + DirectoryStageDeleting = "Deleting" + // @enum DirectoryStage + DirectoryStageDeleted = "Deleted" + // @enum DirectoryStage + DirectoryStageFailed = "Failed" +) + +const ( + // @enum DirectoryType + DirectoryTypeSimpleAd = "SimpleAD" + // @enum DirectoryType + DirectoryTypeAdconnector = "ADConnector" + // @enum DirectoryType + DirectoryTypeMicrosoftAd = "MicrosoftAD" +) + +const ( + // @enum RadiusAuthenticationProtocol + RadiusAuthenticationProtocolPap = "PAP" + // @enum RadiusAuthenticationProtocol + RadiusAuthenticationProtocolChap = "CHAP" + // @enum RadiusAuthenticationProtocol + RadiusAuthenticationProtocolMsChapv1 = "MS-CHAPv1" + // @enum RadiusAuthenticationProtocol + RadiusAuthenticationProtocolMsChapv2 = "MS-CHAPv2" +) + +const ( + // @enum RadiusStatus + RadiusStatusCreating = "Creating" + // @enum RadiusStatus + RadiusStatusCompleted = "Completed" + // @enum RadiusStatus + RadiusStatusFailed = "Failed" +) + +const ( + // @enum SnapshotStatus + SnapshotStatusCreating = "Creating" + // @enum SnapshotStatus + SnapshotStatusCompleted = "Completed" + // @enum SnapshotStatus + SnapshotStatusFailed = "Failed" +) + +const ( + // @enum SnapshotType + SnapshotTypeAuto = "Auto" + // @enum SnapshotType + SnapshotTypeManual = "Manual" +) + +const ( + // @enum TrustDirection + TrustDirectionOneWayOutgoing = "One-Way: Outgoing" + // @enum TrustDirection + TrustDirectionOneWayIncoming = "One-Way: Incoming" + // @enum TrustDirection + TrustDirectionTwoWay = "Two-Way" +) + +const ( + // @enum TrustState + TrustStateCreating = "Creating" + // @enum TrustState + TrustStateCreated = "Created" + // @enum TrustState + TrustStateVerifying = "Verifying" + // @enum TrustState + TrustStateVerifyFailed = "VerifyFailed" + // @enum TrustState + TrustStateVerified = "Verified" + // @enum TrustState + TrustStateDeleting = "Deleting" + // @enum TrustState + TrustStateDeleted = "Deleted" + // @enum TrustState + TrustStateFailed = "Failed" +) + +const ( + // @enum TrustType + TrustTypeForest = "Forest" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directoryservice/directoryserviceiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directoryservice/directoryserviceiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directoryservice/directoryserviceiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directoryservice/directoryserviceiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,102 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package directoryserviceiface provides an interface for the AWS Directory Service. +package directoryserviceiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/directoryservice" +) + +// DirectoryServiceAPI is the interface type for directoryservice.DirectoryService. +type DirectoryServiceAPI interface { + ConnectDirectoryRequest(*directoryservice.ConnectDirectoryInput) (*request.Request, *directoryservice.ConnectDirectoryOutput) + + ConnectDirectory(*directoryservice.ConnectDirectoryInput) (*directoryservice.ConnectDirectoryOutput, error) + + CreateAliasRequest(*directoryservice.CreateAliasInput) (*request.Request, *directoryservice.CreateAliasOutput) + + CreateAlias(*directoryservice.CreateAliasInput) (*directoryservice.CreateAliasOutput, error) + + CreateComputerRequest(*directoryservice.CreateComputerInput) (*request.Request, *directoryservice.CreateComputerOutput) + + CreateComputer(*directoryservice.CreateComputerInput) (*directoryservice.CreateComputerOutput, error) + + CreateDirectoryRequest(*directoryservice.CreateDirectoryInput) (*request.Request, *directoryservice.CreateDirectoryOutput) + + CreateDirectory(*directoryservice.CreateDirectoryInput) (*directoryservice.CreateDirectoryOutput, error) + + CreateMicrosoftADRequest(*directoryservice.CreateMicrosoftADInput) (*request.Request, *directoryservice.CreateMicrosoftADOutput) + + CreateMicrosoftAD(*directoryservice.CreateMicrosoftADInput) (*directoryservice.CreateMicrosoftADOutput, error) + + CreateSnapshotRequest(*directoryservice.CreateSnapshotInput) (*request.Request, *directoryservice.CreateSnapshotOutput) + + CreateSnapshot(*directoryservice.CreateSnapshotInput) (*directoryservice.CreateSnapshotOutput, error) + + CreateTrustRequest(*directoryservice.CreateTrustInput) (*request.Request, *directoryservice.CreateTrustOutput) + + CreateTrust(*directoryservice.CreateTrustInput) (*directoryservice.CreateTrustOutput, error) + + DeleteDirectoryRequest(*directoryservice.DeleteDirectoryInput) (*request.Request, *directoryservice.DeleteDirectoryOutput) + + DeleteDirectory(*directoryservice.DeleteDirectoryInput) (*directoryservice.DeleteDirectoryOutput, error) + + DeleteSnapshotRequest(*directoryservice.DeleteSnapshotInput) (*request.Request, *directoryservice.DeleteSnapshotOutput) + + DeleteSnapshot(*directoryservice.DeleteSnapshotInput) (*directoryservice.DeleteSnapshotOutput, error) + + DeleteTrustRequest(*directoryservice.DeleteTrustInput) (*request.Request, *directoryservice.DeleteTrustOutput) + + DeleteTrust(*directoryservice.DeleteTrustInput) (*directoryservice.DeleteTrustOutput, error) + + DescribeDirectoriesRequest(*directoryservice.DescribeDirectoriesInput) (*request.Request, *directoryservice.DescribeDirectoriesOutput) + + DescribeDirectories(*directoryservice.DescribeDirectoriesInput) (*directoryservice.DescribeDirectoriesOutput, error) + + DescribeSnapshotsRequest(*directoryservice.DescribeSnapshotsInput) (*request.Request, *directoryservice.DescribeSnapshotsOutput) + + DescribeSnapshots(*directoryservice.DescribeSnapshotsInput) (*directoryservice.DescribeSnapshotsOutput, error) + + DescribeTrustsRequest(*directoryservice.DescribeTrustsInput) (*request.Request, *directoryservice.DescribeTrustsOutput) + + DescribeTrusts(*directoryservice.DescribeTrustsInput) (*directoryservice.DescribeTrustsOutput, error) + + DisableRadiusRequest(*directoryservice.DisableRadiusInput) (*request.Request, *directoryservice.DisableRadiusOutput) + + DisableRadius(*directoryservice.DisableRadiusInput) (*directoryservice.DisableRadiusOutput, error) + + DisableSsoRequest(*directoryservice.DisableSsoInput) (*request.Request, *directoryservice.DisableSsoOutput) + + DisableSso(*directoryservice.DisableSsoInput) (*directoryservice.DisableSsoOutput, error) + + EnableRadiusRequest(*directoryservice.EnableRadiusInput) (*request.Request, *directoryservice.EnableRadiusOutput) + + EnableRadius(*directoryservice.EnableRadiusInput) (*directoryservice.EnableRadiusOutput, error) + + EnableSsoRequest(*directoryservice.EnableSsoInput) (*request.Request, *directoryservice.EnableSsoOutput) + + EnableSso(*directoryservice.EnableSsoInput) (*directoryservice.EnableSsoOutput, error) + + GetDirectoryLimitsRequest(*directoryservice.GetDirectoryLimitsInput) (*request.Request, *directoryservice.GetDirectoryLimitsOutput) + + GetDirectoryLimits(*directoryservice.GetDirectoryLimitsInput) (*directoryservice.GetDirectoryLimitsOutput, error) + + GetSnapshotLimitsRequest(*directoryservice.GetSnapshotLimitsInput) (*request.Request, *directoryservice.GetSnapshotLimitsOutput) + + GetSnapshotLimits(*directoryservice.GetSnapshotLimitsInput) (*directoryservice.GetSnapshotLimitsOutput, error) + + RestoreFromSnapshotRequest(*directoryservice.RestoreFromSnapshotInput) (*request.Request, *directoryservice.RestoreFromSnapshotOutput) + + RestoreFromSnapshot(*directoryservice.RestoreFromSnapshotInput) (*directoryservice.RestoreFromSnapshotOutput, error) + + UpdateRadiusRequest(*directoryservice.UpdateRadiusInput) (*request.Request, *directoryservice.UpdateRadiusOutput) + + UpdateRadius(*directoryservice.UpdateRadiusInput) (*directoryservice.UpdateRadiusOutput, error) + + VerifyTrustRequest(*directoryservice.VerifyTrustInput) (*request.Request, *directoryservice.VerifyTrustOutput) + + VerifyTrust(*directoryservice.VerifyTrustInput) (*directoryservice.VerifyTrustOutput, error) +} + +var _ DirectoryServiceAPI = (*directoryservice.DirectoryService)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directoryservice/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directoryservice/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directoryservice/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directoryservice/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,532 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package directoryservice_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/directoryservice" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleDirectoryService_ConnectDirectory() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.ConnectDirectoryInput{ + ConnectSettings: &directoryservice.DirectoryConnectSettings{ // Required + CustomerDnsIps: []*string{ // Required + aws.String("IpAddr"), // Required + // More values... + }, + CustomerUserName: aws.String("UserName"), // Required + SubnetIds: []*string{ // Required + aws.String("SubnetId"), // Required + // More values... + }, + VpcId: aws.String("VpcId"), // Required + }, + Name: aws.String("DirectoryName"), // Required + Password: aws.String("ConnectPassword"), // Required + Size: aws.String("DirectorySize"), // Required + Description: aws.String("Description"), + ShortName: aws.String("DirectoryShortName"), + } + resp, err := svc.ConnectDirectory(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_CreateAlias() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.CreateAliasInput{ + Alias: aws.String("AliasName"), // Required + DirectoryId: aws.String("DirectoryId"), // Required + } + resp, err := svc.CreateAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_CreateComputer() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.CreateComputerInput{ + ComputerName: aws.String("ComputerName"), // Required + DirectoryId: aws.String("DirectoryId"), // Required + Password: aws.String("ComputerPassword"), // Required + ComputerAttributes: []*directoryservice.Attribute{ + { // Required + Name: aws.String("AttributeName"), + Value: aws.String("AttributeValue"), + }, + // More values... + }, + OrganizationalUnitDistinguishedName: aws.String("OrganizationalUnitDN"), + } + resp, err := svc.CreateComputer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_CreateDirectory() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.CreateDirectoryInput{ + Name: aws.String("DirectoryName"), // Required + Password: aws.String("Password"), // Required + Size: aws.String("DirectorySize"), // Required + Description: aws.String("Description"), + ShortName: aws.String("DirectoryShortName"), + VpcSettings: &directoryservice.DirectoryVpcSettings{ + SubnetIds: []*string{ // Required + aws.String("SubnetId"), // Required + // More values... + }, + VpcId: aws.String("VpcId"), // Required + }, + } + resp, err := svc.CreateDirectory(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_CreateMicrosoftAD() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.CreateMicrosoftADInput{ + Name: aws.String("DirectoryName"), // Required + Password: aws.String("Password"), // Required + VpcSettings: &directoryservice.DirectoryVpcSettings{ // Required + SubnetIds: []*string{ // Required + aws.String("SubnetId"), // Required + // More values... + }, + VpcId: aws.String("VpcId"), // Required + }, + Description: aws.String("Description"), + ShortName: aws.String("DirectoryShortName"), + } + resp, err := svc.CreateMicrosoftAD(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_CreateSnapshot() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.CreateSnapshotInput{ + DirectoryId: aws.String("DirectoryId"), // Required + Name: aws.String("SnapshotName"), + } + resp, err := svc.CreateSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_CreateTrust() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.CreateTrustInput{ + DirectoryId: aws.String("DirectoryId"), // Required + RemoteDomainName: aws.String("RemoteDomainName"), // Required + TrustDirection: aws.String("TrustDirection"), // Required + TrustPassword: aws.String("TrustPassword"), // Required + TrustType: aws.String("TrustType"), + } + resp, err := svc.CreateTrust(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_DeleteDirectory() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.DeleteDirectoryInput{ + DirectoryId: aws.String("DirectoryId"), // Required + } + resp, err := svc.DeleteDirectory(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_DeleteSnapshot() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.DeleteSnapshotInput{ + SnapshotId: aws.String("SnapshotId"), // Required + } + resp, err := svc.DeleteSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_DeleteTrust() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.DeleteTrustInput{ + TrustId: aws.String("TrustId"), // Required + } + resp, err := svc.DeleteTrust(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_DescribeDirectories() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.DescribeDirectoriesInput{ + DirectoryIds: []*string{ + aws.String("DirectoryId"), // Required + // More values... + }, + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeDirectories(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_DescribeSnapshots() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.DescribeSnapshotsInput{ + DirectoryId: aws.String("DirectoryId"), + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + SnapshotIds: []*string{ + aws.String("SnapshotId"), // Required + // More values... + }, + } + resp, err := svc.DescribeSnapshots(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_DescribeTrusts() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.DescribeTrustsInput{ + DirectoryId: aws.String("DirectoryId"), + Limit: aws.Int64(1), + NextToken: aws.String("NextToken"), + TrustIds: []*string{ + aws.String("TrustId"), // Required + // More values... + }, + } + resp, err := svc.DescribeTrusts(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_DisableRadius() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.DisableRadiusInput{ + DirectoryId: aws.String("DirectoryId"), // Required + } + resp, err := svc.DisableRadius(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_DisableSso() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.DisableSsoInput{ + DirectoryId: aws.String("DirectoryId"), // Required + Password: aws.String("ConnectPassword"), + UserName: aws.String("UserName"), + } + resp, err := svc.DisableSso(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_EnableRadius() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.EnableRadiusInput{ + DirectoryId: aws.String("DirectoryId"), // Required + RadiusSettings: &directoryservice.RadiusSettings{ // Required + AuthenticationProtocol: aws.String("RadiusAuthenticationProtocol"), + DisplayLabel: aws.String("RadiusDisplayLabel"), + RadiusPort: aws.Int64(1), + RadiusRetries: aws.Int64(1), + RadiusServers: []*string{ + aws.String("Server"), // Required + // More values... + }, + RadiusTimeout: aws.Int64(1), + SharedSecret: aws.String("RadiusSharedSecret"), + UseSameUsername: aws.Bool(true), + }, + } + resp, err := svc.EnableRadius(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_EnableSso() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.EnableSsoInput{ + DirectoryId: aws.String("DirectoryId"), // Required + Password: aws.String("ConnectPassword"), + UserName: aws.String("UserName"), + } + resp, err := svc.EnableSso(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_GetDirectoryLimits() { + svc := directoryservice.New(session.New()) + + var params *directoryservice.GetDirectoryLimitsInput + resp, err := svc.GetDirectoryLimits(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_GetSnapshotLimits() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.GetSnapshotLimitsInput{ + DirectoryId: aws.String("DirectoryId"), // Required + } + resp, err := svc.GetSnapshotLimits(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_RestoreFromSnapshot() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.RestoreFromSnapshotInput{ + SnapshotId: aws.String("SnapshotId"), // Required + } + resp, err := svc.RestoreFromSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_UpdateRadius() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.UpdateRadiusInput{ + DirectoryId: aws.String("DirectoryId"), // Required + RadiusSettings: &directoryservice.RadiusSettings{ // Required + AuthenticationProtocol: aws.String("RadiusAuthenticationProtocol"), + DisplayLabel: aws.String("RadiusDisplayLabel"), + RadiusPort: aws.Int64(1), + RadiusRetries: aws.Int64(1), + RadiusServers: []*string{ + aws.String("Server"), // Required + // More values... + }, + RadiusTimeout: aws.Int64(1), + SharedSecret: aws.String("RadiusSharedSecret"), + UseSameUsername: aws.Bool(true), + }, + } + resp, err := svc.UpdateRadius(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDirectoryService_VerifyTrust() { + svc := directoryservice.New(session.New()) + + params := &directoryservice.VerifyTrustInput{ + TrustId: aws.String("TrustId"), // Required + } + resp, err := svc.VerifyTrust(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directoryservice/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directoryservice/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directoryservice/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/directoryservice/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,90 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package directoryservice + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// This is the AWS Directory Service API Reference. This guide provides detailed +// information about AWS Directory Service operations, data types, parameters, +// and errors. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type DirectoryService struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "ds" + +// New creates a new instance of the DirectoryService client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a DirectoryService client from just a session. +// svc := directoryservice.New(mySession) +// +// // Create a DirectoryService client with additional configuration +// svc := directoryservice.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *DirectoryService { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *DirectoryService { + svc := &DirectoryService{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-04-16", + JSONVersion: "1.1", + TargetPrefix: "DirectoryService_20150416", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a DirectoryService operation and runs any +// custom request initialization. +func (c *DirectoryService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5387 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package dynamodb provides a client for Amazon DynamoDB. +package dynamodb + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opBatchGetItem = "BatchGetItem" + +// BatchGetItemRequest generates a request for the BatchGetItem operation. +func (c *DynamoDB) BatchGetItemRequest(input *BatchGetItemInput) (req *request.Request, output *BatchGetItemOutput) { + op := &request.Operation{ + Name: opBatchGetItem, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"RequestItems"}, + OutputTokens: []string{"UnprocessedKeys"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &BatchGetItemInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchGetItemOutput{} + req.Data = output + return +} + +// The BatchGetItem operation returns the attributes of one or more items from +// one or more tables. You identify requested items by primary key. +// +// A single operation can retrieve up to 16 MB of data, which can contain as +// many as 100 items. BatchGetItem will return a partial result if the response +// size limit is exceeded, the table's provisioned throughput is exceeded, or +// an internal processing failure occurs. If a partial result is returned, the +// operation returns a value for UnprocessedKeys. You can use this value to +// retry the operation starting with the next item to get. +// +// If you request more than 100 items BatchGetItem will return a ValidationException +// with the message "Too many items requested for the BatchGetItem call". +// +// For example, if you ask to retrieve 100 items, but each individual item +// is 300 KB in size, the system returns 52 items (so as not to exceed the 16 +// MB limit). It also returns an appropriate UnprocessedKeys value so you can +// get the next page of results. If desired, your application can include its +// own logic to assemble the pages of results into one data set. +// +// If none of the items can be processed due to insufficient provisioned throughput +// on all of the tables in the request, then BatchGetItem will return a ProvisionedThroughputExceededException. +// If at least one of the items is successfully processed, then BatchGetItem +// completes successfully, while returning the keys of the unread items in UnprocessedKeys. +// +// If DynamoDB returns any unprocessed items, you should retry the batch operation +// on those items. However, we strongly recommend that you use an exponential +// backoff algorithm. If you retry the batch operation immediately, the underlying +// read or write requests can still fail due to throttling on the individual +// tables. If you delay the batch operation using exponential backoff, the individual +// requests in the batch are much more likely to succeed. +// +// For more information, see Batch Operations and Error Handling (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#BatchOperations) +// in the Amazon DynamoDB Developer Guide. +// +// By default, BatchGetItem performs eventually consistent reads on every +// table in the request. If you want strongly consistent reads instead, you +// can set ConsistentRead to true for any or all tables. +// +// In order to minimize response latency, BatchGetItem retrieves items in parallel. +// +// When designing your application, keep in mind that DynamoDB does not return +// attributes in any particular order. To help parse the response by item, include +// the primary key values for the items in your request in the AttributesToGet +// parameter. +// +// If a requested item does not exist, it is not returned in the result. Requests +// for nonexistent items consume the minimum read capacity units according to +// the type of read. For more information, see Capacity Units Calculations (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#CapacityUnitCalculations) +// in the Amazon DynamoDB Developer Guide. +func (c *DynamoDB) BatchGetItem(input *BatchGetItemInput) (*BatchGetItemOutput, error) { + req, out := c.BatchGetItemRequest(input) + err := req.Send() + return out, err +} + +func (c *DynamoDB) BatchGetItemPages(input *BatchGetItemInput, fn func(p *BatchGetItemOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.BatchGetItemRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*BatchGetItemOutput), lastPage) + }) +} + +const opBatchWriteItem = "BatchWriteItem" + +// BatchWriteItemRequest generates a request for the BatchWriteItem operation. +func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *request.Request, output *BatchWriteItemOutput) { + op := &request.Operation{ + Name: opBatchWriteItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchWriteItemInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchWriteItemOutput{} + req.Data = output + return +} + +// The BatchWriteItem operation puts or deletes multiple items in one or more +// tables. A single call to BatchWriteItem can write up to 16 MB of data, which +// can comprise as many as 25 put or delete requests. Individual items to be +// written can be as large as 400 KB. +// +// BatchWriteItem cannot update items. To update items, use the UpdateItem +// API. +// +// The individual PutItem and DeleteItem operations specified in BatchWriteItem +// are atomic; however BatchWriteItem as a whole is not. If any requested operations +// fail because the table's provisioned throughput is exceeded or an internal +// processing failure occurs, the failed operations are returned in the UnprocessedItems +// response parameter. You can investigate and optionally resend the requests. +// Typically, you would call BatchWriteItem in a loop. Each iteration would +// check for unprocessed items and submit a new BatchWriteItem request with +// those unprocessed items until all items have been processed. +// +// Note that if none of the items can be processed due to insufficient provisioned +// throughput on all of the tables in the request, then BatchWriteItem will +// return a ProvisionedThroughputExceededException. +// +// If DynamoDB returns any unprocessed items, you should retry the batch operation +// on those items. However, we strongly recommend that you use an exponential +// backoff algorithm. If you retry the batch operation immediately, the underlying +// read or write requests can still fail due to throttling on the individual +// tables. If you delay the batch operation using exponential backoff, the individual +// requests in the batch are much more likely to succeed. +// +// For more information, see Batch Operations and Error Handling (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#BatchOperations) +// in the Amazon DynamoDB Developer Guide. +// +// With BatchWriteItem, you can efficiently write or delete large amounts +// of data, such as from Amazon Elastic MapReduce (EMR), or copy data from another +// database into DynamoDB. In order to improve performance with these large-scale +// operations, BatchWriteItem does not behave in the same way as individual +// PutItem and DeleteItem calls would. For example, you cannot specify conditions +// on individual put and delete requests, and BatchWriteItem does not return +// deleted items in the response. +// +// If you use a programming language that supports concurrency, you can use +// threads to write items in parallel. Your application must include the necessary +// logic to manage the threads. With languages that don't support threading, +// you must update or delete the specified items one at a time. In both situations, +// BatchWriteItem provides an alternative where the API performs the specified +// put and delete operations in parallel, giving you the power of the thread +// pool approach without having to introduce complexity into your application. +// +// Parallel processing reduces latency, but each specified put and delete request +// consumes the same number of write capacity units whether it is processed +// in parallel or not. Delete operations on nonexistent items consume one write +// capacity unit. +// +// If one or more of the following is true, DynamoDB rejects the entire batch +// write operation: +// +// One or more tables specified in the BatchWriteItem request does not exist. +// +// Primary key attributes specified on an item in the request do not match +// those in the corresponding table's primary key schema. +// +// You try to perform multiple operations on the same item in the same BatchWriteItem +// request. For example, you cannot put and delete the same item in the same +// BatchWriteItem request. +// +// There are more than 25 requests in the batch. +// +// Any individual item in a batch exceeds 400 KB. +// +// The total request size exceeds 16 MB. +func (c *DynamoDB) BatchWriteItem(input *BatchWriteItemInput) (*BatchWriteItemOutput, error) { + req, out := c.BatchWriteItemRequest(input) + err := req.Send() + return out, err +} + +const opCreateTable = "CreateTable" + +// CreateTableRequest generates a request for the CreateTable operation. +func (c *DynamoDB) CreateTableRequest(input *CreateTableInput) (req *request.Request, output *CreateTableOutput) { + op := &request.Operation{ + Name: opCreateTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTableInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTableOutput{} + req.Data = output + return +} + +// The CreateTable operation adds a new table to your account. In an AWS account, +// table names must be unique within each region. That is, you can have two +// tables with same name if you create the tables in different regions. +// +// CreateTable is an asynchronous operation. Upon receiving a CreateTable request, +// DynamoDB immediately returns a response with a TableStatus of CREATING. After +// the table is created, DynamoDB sets the TableStatus to ACTIVE. You can perform +// read and write operations only on an ACTIVE table. +// +// You can optionally define secondary indexes on the new table, as part of +// the CreateTable operation. If you want to create multiple tables with secondary +// indexes on them, you must create the tables sequentially. Only one table +// with secondary indexes can be in the CREATING state at any given time. +// +// You can use the DescribeTable API to check the table status. +func (c *DynamoDB) CreateTable(input *CreateTableInput) (*CreateTableOutput, error) { + req, out := c.CreateTableRequest(input) + err := req.Send() + return out, err +} + +const opDeleteItem = "DeleteItem" + +// DeleteItemRequest generates a request for the DeleteItem operation. +func (c *DynamoDB) DeleteItemRequest(input *DeleteItemInput) (req *request.Request, output *DeleteItemOutput) { + op := &request.Operation{ + Name: opDeleteItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteItemInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteItemOutput{} + req.Data = output + return +} + +// Deletes a single item in a table by primary key. You can perform a conditional +// delete operation that deletes the item if it exists, or if it has an expected +// attribute value. +// +// In addition to deleting an item, you can also return the item's attribute +// values in the same operation, using the ReturnValues parameter. +// +// Unless you specify conditions, the DeleteItem is an idempotent operation; +// running it multiple times on the same item or attribute does not result in +// an error response. +// +// Conditional deletes are useful for deleting items only if specific conditions +// are met. If those conditions are met, DynamoDB performs the delete. Otherwise, +// the item is not deleted. +func (c *DynamoDB) DeleteItem(input *DeleteItemInput) (*DeleteItemOutput, error) { + req, out := c.DeleteItemRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTable = "DeleteTable" + +// DeleteTableRequest generates a request for the DeleteTable operation. +func (c *DynamoDB) DeleteTableRequest(input *DeleteTableInput) (req *request.Request, output *DeleteTableOutput) { + op := &request.Operation{ + Name: opDeleteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTableInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTableOutput{} + req.Data = output + return +} + +// The DeleteTable operation deletes a table and all of its items. After a DeleteTable +// request, the specified table is in the DELETING state until DynamoDB completes +// the deletion. If the table is in the ACTIVE state, you can delete it. If +// a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. +// If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. +// If table is already in the DELETING state, no error is returned. +// +// DynamoDB might continue to accept data read and write operations, such +// as GetItem and PutItem, on a table in the DELETING state until the table +// deletion is complete. +// +// When you delete a table, any indexes on that table are also deleted. +// +// If you have DynamoDB Streams enabled on the table, then the corresponding +// stream on that table goes into the DISABLED state, and the stream is automatically +// deleted after 24 hours. +// +// Use the DescribeTable API to check the status of the table. +func (c *DynamoDB) DeleteTable(input *DeleteTableInput) (*DeleteTableOutput, error) { + req, out := c.DeleteTableRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTable = "DescribeTable" + +// DescribeTableRequest generates a request for the DescribeTable operation. +func (c *DynamoDB) DescribeTableRequest(input *DescribeTableInput) (req *request.Request, output *DescribeTableOutput) { + op := &request.Operation{ + Name: opDescribeTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTableInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTableOutput{} + req.Data = output + return +} + +// Returns information about the table, including the current status of the +// table, when it was created, the primary key schema, and any indexes on the +// table. +// +// If you issue a DescribeTable request immediately after a CreateTable request, +// DynamoDB might return a ResourceNotFoundException. This is because DescribeTable +// uses an eventually consistent query, and the metadata for your table might +// not be available at that moment. Wait for a few seconds, and then try the +// DescribeTable request again. +func (c *DynamoDB) DescribeTable(input *DescribeTableInput) (*DescribeTableOutput, error) { + req, out := c.DescribeTableRequest(input) + err := req.Send() + return out, err +} + +const opGetItem = "GetItem" + +// GetItemRequest generates a request for the GetItem operation. +func (c *DynamoDB) GetItemRequest(input *GetItemInput) (req *request.Request, output *GetItemOutput) { + op := &request.Operation{ + Name: opGetItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetItemInput{} + } + + req = c.newRequest(op, input, output) + output = &GetItemOutput{} + req.Data = output + return +} + +// The GetItem operation returns a set of attributes for the item with the given +// primary key. If there is no matching item, GetItem does not return any data. +// +// GetItem provides an eventually consistent read by default. If your application +// requires a strongly consistent read, set ConsistentRead to true. Although +// a strongly consistent read might take more time than an eventually consistent +// read, it always returns the last updated value. +func (c *DynamoDB) GetItem(input *GetItemInput) (*GetItemOutput, error) { + req, out := c.GetItemRequest(input) + err := req.Send() + return out, err +} + +const opListTables = "ListTables" + +// ListTablesRequest generates a request for the ListTables operation. +func (c *DynamoDB) ListTablesRequest(input *ListTablesInput) (req *request.Request, output *ListTablesOutput) { + op := &request.Operation{ + Name: opListTables, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ExclusiveStartTableName"}, + OutputTokens: []string{"LastEvaluatedTableName"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTablesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTablesOutput{} + req.Data = output + return +} + +// Returns an array of table names associated with the current account and endpoint. +// The output from ListTables is paginated, with each page returning a maximum +// of 100 table names. +func (c *DynamoDB) ListTables(input *ListTablesInput) (*ListTablesOutput, error) { + req, out := c.ListTablesRequest(input) + err := req.Send() + return out, err +} + +func (c *DynamoDB) ListTablesPages(input *ListTablesInput, fn func(p *ListTablesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListTablesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListTablesOutput), lastPage) + }) +} + +const opPutItem = "PutItem" + +// PutItemRequest generates a request for the PutItem operation. +func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, output *PutItemOutput) { + op := &request.Operation{ + Name: opPutItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutItemInput{} + } + + req = c.newRequest(op, input, output) + output = &PutItemOutput{} + req.Data = output + return +} + +// Creates a new item, or replaces an old item with a new item. If an item that +// has the same primary key as the new item already exists in the specified +// table, the new item completely replaces the existing item. You can perform +// a conditional put operation (add a new item if one with the specified primary +// key doesn't exist), or replace an existing item if it has certain attribute +// values. +// +// In addition to putting an item, you can also return the item's attribute +// values in the same operation, using the ReturnValues parameter. +// +// When you add an item, the primary key attribute(s) are the only required +// attributes. Attribute values cannot be null. String and Binary type attributes +// must have lengths greater than zero. Set type attributes cannot be empty. +// Requests with empty values will be rejected with a ValidationException exception. +// +// You can request that PutItem return either a copy of the original item (before +// the update) or a copy of the updated item (after the update). For more information, +// see the ReturnValues description below. +// +// To prevent a new item from replacing an existing item, use a conditional +// put operation with ComparisonOperator set to NULL for the primary key attribute, +// or attributes. +// +// For more information about using this API, see Working with Items (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html) +// in the Amazon DynamoDB Developer Guide. +func (c *DynamoDB) PutItem(input *PutItemInput) (*PutItemOutput, error) { + req, out := c.PutItemRequest(input) + err := req.Send() + return out, err +} + +const opQuery = "Query" + +// QueryRequest generates a request for the Query operation. +func (c *DynamoDB) QueryRequest(input *QueryInput) (req *request.Request, output *QueryOutput) { + op := &request.Operation{ + Name: opQuery, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ExclusiveStartKey"}, + OutputTokens: []string{"LastEvaluatedKey"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &QueryInput{} + } + + req = c.newRequest(op, input, output) + output = &QueryOutput{} + req.Data = output + return +} + +// A Query operation uses the primary key of a table or a secondary index to +// directly access items from that table or index. +// +// Use the KeyConditionExpression parameter to provide a specific hash key +// value. The Query operation will return all of the items from the table or +// index with that hash key value. You can optionally narrow the scope of the +// Query operation by specifying a range key value and a comparison operator +// in KeyConditionExpression. You can use the ScanIndexForward parameter to +// get results in forward or reverse order, by range key or by index key. +// +// Queries that do not return results consume the minimum number of read capacity +// units for that type of read operation. +// +// If the total number of items meeting the query criteria exceeds the result +// set size limit of 1 MB, the query stops and results are returned to the user +// with the LastEvaluatedKey element to continue the query in a subsequent operation. +// Unlike a Scan operation, a Query operation never returns both an empty result +// set and a LastEvaluatedKey value. LastEvaluatedKey is only provided if the +// results exceed 1 MB, or if you have used the Limit parameter. +// +// You can query a table, a local secondary index, or a global secondary index. +// For a query on a table or on a local secondary index, you can set the ConsistentRead +// parameter to true and obtain a strongly consistent result. Global secondary +// indexes support eventually consistent reads only, so do not specify ConsistentRead +// when querying a global secondary index. +func (c *DynamoDB) Query(input *QueryInput) (*QueryOutput, error) { + req, out := c.QueryRequest(input) + err := req.Send() + return out, err +} + +func (c *DynamoDB) QueryPages(input *QueryInput, fn func(p *QueryOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.QueryRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*QueryOutput), lastPage) + }) +} + +const opScan = "Scan" + +// ScanRequest generates a request for the Scan operation. +func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output *ScanOutput) { + op := &request.Operation{ + Name: opScan, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ExclusiveStartKey"}, + OutputTokens: []string{"LastEvaluatedKey"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ScanInput{} + } + + req = c.newRequest(op, input, output) + output = &ScanOutput{} + req.Data = output + return +} + +// The Scan operation returns one or more items and item attributes by accessing +// every item in a table or a secondary index. To have DynamoDB return fewer +// items, you can provide a ScanFilter operation. +// +// If the total number of scanned items exceeds the maximum data set size limit +// of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey +// value to continue the scan in a subsequent operation. The results also include +// the number of items exceeding the limit. A scan can result in no table data +// meeting the filter criteria. +// +// By default, Scan operations proceed sequentially; however, for faster performance +// on a large table or secondary index, applications can request a parallel +// Scan operation by providing the Segment and TotalSegments parameters. For +// more information, see Parallel Scan (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#QueryAndScanParallelScan) +// in the Amazon DynamoDB Developer Guide. +// +// By default, Scan uses eventually consistent reads when acessing the data +// in the table or local secondary index. However, you can use strongly consistent +// reads instead by setting the ConsistentRead parameter to true. +func (c *DynamoDB) Scan(input *ScanInput) (*ScanOutput, error) { + req, out := c.ScanRequest(input) + err := req.Send() + return out, err +} + +func (c *DynamoDB) ScanPages(input *ScanInput, fn func(p *ScanOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ScanRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ScanOutput), lastPage) + }) +} + +const opUpdateItem = "UpdateItem" + +// UpdateItemRequest generates a request for the UpdateItem operation. +func (c *DynamoDB) UpdateItemRequest(input *UpdateItemInput) (req *request.Request, output *UpdateItemOutput) { + op := &request.Operation{ + Name: opUpdateItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateItemInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateItemOutput{} + req.Data = output + return +} + +// Edits an existing item's attributes, or adds a new item to the table if it +// does not already exist. You can put, delete, or add attribute values. You +// can also perform a conditional update on an existing item (insert a new attribute +// name-value pair if it doesn't exist, or replace an existing name-value pair +// if it has certain expected attribute values). If conditions are specified +// and the item does not exist, then the operation fails and a new item is not +// created. +// +// You can also return the item's attribute values in the same UpdateItem operation +// using the ReturnValues parameter. +func (c *DynamoDB) UpdateItem(input *UpdateItemInput) (*UpdateItemOutput, error) { + req, out := c.UpdateItemRequest(input) + err := req.Send() + return out, err +} + +const opUpdateTable = "UpdateTable" + +// UpdateTableRequest generates a request for the UpdateTable operation. +func (c *DynamoDB) UpdateTableRequest(input *UpdateTableInput) (req *request.Request, output *UpdateTableOutput) { + op := &request.Operation{ + Name: opUpdateTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateTableInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateTableOutput{} + req.Data = output + return +} + +// Modifies the provisioned throughput settings, global secondary indexes, or +// DynamoDB Streams settings for a given table. +// +// You can only perform one of the following operations at once: +// +// Modify the provisioned throughput settings of the table. +// +// Enable or disable Streams on the table. +// +// Remove a global secondary index from the table. +// +// Create a new global secondary index on the table. Once the index begins +// backfilling, you can use UpdateTable to perform other operations. +// +// UpdateTable is an asynchronous operation; while it is executing, the table +// status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot +// issue another UpdateTable request. When the table returns to the ACTIVE state, +// the UpdateTable operation is complete. +func (c *DynamoDB) UpdateTable(input *UpdateTableInput) (*UpdateTableOutput, error) { + req, out := c.UpdateTableRequest(input) + err := req.Send() + return out, err +} + +// Represents an attribute for describing the key schema for the table and indexes. +type AttributeDefinition struct { + _ struct{} `type:"structure"` + + // A name for the attribute. + AttributeName *string `min:"1" type:"string" required:"true"` + + // The data type for the attribute. + AttributeType *string `type:"string" required:"true" enum:"ScalarAttributeType"` +} + +// String returns the string representation +func (s AttributeDefinition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributeDefinition) GoString() string { + return s.String() +} + +// Represents the data for an attribute. You can set one, and only one, of the +// elements. +// +// Each attribute in an item is a name-value pair. An attribute can be single-valued +// or multi-valued set. For example, a book item can have title and authors +// attributes. Each book has one title but can have many authors. The multi-valued +// attribute is a set; duplicate values are not allowed. +type AttributeValue struct { + _ struct{} `type:"structure"` + + // A Binary data type. + B []byte `type:"blob"` + + // A Boolean data type. + BOOL *bool `type:"boolean"` + + // A Binary Set data type. + BS [][]byte `type:"list"` + + // A List of attribute values. + L []*AttributeValue `type:"list"` + + // A Map of attribute values. + M map[string]*AttributeValue `type:"map"` + + // A Number data type. + N *string `type:"string"` + + // A Number Set data type. + NS []*string `type:"list"` + + // A Null data type. + NULL *bool `type:"boolean"` + + // A String data type. + S *string `type:"string"` + + // A String Set data type. + SS []*string `type:"list"` +} + +// String returns the string representation +func (s AttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributeValue) GoString() string { + return s.String() +} + +// For the UpdateItem operation, represents the attributes to be modified, the +// action to perform on each, and the new value for each. +// +// You cannot use UpdateItem to update any primary key attributes. Instead, +// you will need to delete the item, and then use PutItem to create a new item +// with new attributes. +// +// Attribute values cannot be null; string and binary type attributes must +// have lengths greater than zero; and set type attributes must not be empty. +// Requests with empty values will be rejected with a ValidationException exception. +type AttributeValueUpdate struct { + _ struct{} `type:"structure"` + + // Specifies how to perform the update. Valid values are PUT (default), DELETE, + // and ADD. The behavior depends on whether the specified primary key already + // exists in the table. + // + // If an item with the specified Key is found in the table: + // + // PUT - Adds the specified attribute to the item. If the attribute already + // exists, it is replaced by the new value. + // + // DELETE - If no value is specified, the attribute and its value are removed + // from the item. The data type of the specified value must match the existing + // value's data type. + // + // If a set of values is specified, then those values are subtracted from the + // old set. For example, if the attribute value was the set [a,b,c] and the + // DELETE action specified [a,c], then the final attribute value would be [b]. + // Specifying an empty set is an error. + // + // ADD - If the attribute does not already exist, then the attribute and + // its values are added to the item. If the attribute does exist, then the behavior + // of ADD depends on the data type of the attribute: + // + // If the existing attribute is a number, and if Value is also a number, + // then the Value is mathematically added to the existing attribute. If Value + // is a negative number, then it is subtracted from the existing attribute. + // + // If you use ADD to increment or decrement a number value for an item that + // doesn't exist before the update, DynamoDB uses 0 as the initial value. + // + // In addition, if you use ADD to update an existing item, and intend to increment + // or decrement an attribute value which does not yet exist, DynamoDB uses 0 + // as the initial value. For example, suppose that the item you want to update + // does not yet have an attribute named itemcount, but you decide to ADD the + // number 3 to this attribute anyway, even though it currently does not exist. + // DynamoDB will create the itemcount attribute, set its initial value to 0, + // and finally add 3 to it. The result will be a new itemcount attribute in + // the item, with a value of 3. + // + // If the existing data type is a set, and if the Value is also a set, then + // the Value is added to the existing set. (This is a set operation, not mathematical + // addition.) For example, if the attribute value was the set [1,2], and the + // ADD action specified [3], then the final attribute value would be [1,2,3]. + // An error occurs if an Add action is specified for a set attribute and the + // attribute type specified does not match the existing set type. + // + // Both sets must have the same primitive data type. For example, if the existing + // data type is a set of strings, the Value must also be a set of strings. The + // same holds true for number sets and binary sets. + // + // This action is only valid for an existing attribute whose data type is + // number or is a set. Do not use ADD for any other data types. + // + // If no item with the specified Key is found: + // + // PUT - DynamoDB creates a new item with the specified primary key, and + // then adds the attribute. + // + // DELETE - Nothing happens; there is no attribute to delete. + // + // ADD - DynamoDB creates an item with the supplied primary key and number + // (or set of numbers) for the attribute value. The only data types allowed + // are number and number set; no other data types can be specified. + Action *string `type:"string" enum:"AttributeAction"` + + // Represents the data for an attribute. You can set one, and only one, of the + // elements. + // + // Each attribute in an item is a name-value pair. An attribute can be single-valued + // or multi-valued set. For example, a book item can have title and authors + // attributes. Each book has one title but can have many authors. The multi-valued + // attribute is a set; duplicate values are not allowed. + Value *AttributeValue `type:"structure"` +} + +// String returns the string representation +func (s AttributeValueUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributeValueUpdate) GoString() string { + return s.String() +} + +// Represents the input of a BatchGetItem operation. +type BatchGetItemInput struct { + _ struct{} `type:"structure"` + + // A map of one or more table names and, for each table, a map that describes + // one or more items to retrieve from that table. Each table name can be used + // only once per BatchGetItem request. + // + // Each element in the map of items to retrieve consists of the following: + // + // ConsistentRead - If true, a strongly consistent read is used; if false + // (the default), an eventually consistent read is used. + // + // ExpressionAttributeNames - One or more substitution tokens for attribute + // names in the ProjectionExpression parameter. The following are some use cases + // for using ExpressionAttributeNames: + // + // To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. + // For example, consider the following attribute name: + // + // Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot + // be used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // #P = :val + // + // Tokens that begin with the : character are expression attribute values, + // which are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Accessing Item Attributes + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + // + // Keys - An array of primary key attribute values that define specific items + // in the table. For each primary key, you must provide all of the key attributes. + // For example, with a hash type primary key, you only need to provide the hash + // attribute. For a hash-and-range type primary key, you must provide both the + // hash attribute and the range attribute. + // + // ProjectionExpression - A string that identifies one or more attributes + // to retrieve from the table. These attributes can include scalars, sets, or + // elements of a JSON document. The attributes in the expression must be separated + // by commas. + // + // If no attribute names are specified, then all attributes will be returned. + // If any of the requested attributes are not found, they will not appear in + // the result. + // + // For more information, see Accessing Item Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + // + // AttributesToGet - + // + // This is a legacy parameter, for backward compatibility. New applications + // should use ProjectionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // This parameter allows you to retrieve attributes of type List or Map; however, + // it cannot retrieve individual elements within a List or a Map. + // + // The names of one or more attributes to retrieve. If no attribute names are + // provided, then all attributes will be returned. If any of the requested attributes + // are not found, they will not appear in the result. + // + // Note that AttributesToGet has no effect on provisioned throughput consumption. + // DynamoDB determines capacity units consumed based on item size, not on the + // amount of data that is returned to an application. + RequestItems map[string]*KeysAndAttributes `min:"1" type:"map" required:"true"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem, do not access + // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity + // information for table(s). + // + // TOTAL - The response includes only the aggregate ConsumedCapacity for the + // operation. + // + // NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` +} + +// String returns the string representation +func (s BatchGetItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetItemInput) GoString() string { + return s.String() +} + +// Represents the output of a BatchGetItem operation. +type BatchGetItemOutput struct { + _ struct{} `type:"structure"` + + // The read capacity units consumed by the operation. + // + // Each element consists of: + // + // TableName - The table that consumed the provisioned throughput. + // + // CapacityUnits - The total number of capacity units consumed. + ConsumedCapacity []*ConsumedCapacity `type:"list"` + + // A map of table name to a list of items. Each object in Responses consists + // of a table name, along with a map of attribute data consisting of the data + // type and attribute value. + Responses map[string][]map[string]*AttributeValue `type:"map"` + + // A map of tables and their respective keys that were not processed with the + // current response. The UnprocessedKeys value is in the same form as RequestItems, + // so the value can be provided directly to a subsequent BatchGetItem operation. + // For more information, see RequestItems in the Request Parameters section. + // + // Each element consists of: + // + // Keys - An array of primary key attribute values that define specific items + // in the table. + // + // AttributesToGet - One or more attributes to be retrieved from the table + // or index. By default, all attributes are returned. If a requested attribute + // is not found, it does not appear in the result. + // + // ConsistentRead - The consistency of a read operation. If set to true, + // then a strongly consistent read is used; otherwise, an eventually consistent + // read is used. + // + // If there are no unprocessed keys remaining, the response contains an empty + // UnprocessedKeys map. + UnprocessedKeys map[string]*KeysAndAttributes `min:"1" type:"map"` +} + +// String returns the string representation +func (s BatchGetItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetItemOutput) GoString() string { + return s.String() +} + +// Represents the input of a BatchWriteItem operation. +type BatchWriteItemInput struct { + _ struct{} `type:"structure"` + + // A map of one or more table names and, for each table, a list of operations + // to be performed (DeleteRequest or PutRequest). Each element in the map consists + // of the following: + // + // DeleteRequest - Perform a DeleteItem operation on the specified item. + // The item to be deleted is identified by a Key subelement: + // + // Key - A map of primary key attribute values that uniquely identify the + // ! item. Each entry in this map consists of an attribute name and an attribute + // value. For each primary key, you must provide all of the key attributes. + // For example, with a hash type primary key, you only need to provide the hash + // attribute. For a hash-and-range type primary key, you must provide both the + // hash attribute and the range attribute. + // + // PutRequest - Perform a PutItem operation on the specified item. The + // item to be put is identified by an Item subelement: + // + // Item - A map of attributes and their values. Each entry in this map consists + // of an attribute name and an attribute value. Attribute values must not be + // null; string and binary type attributes must have lengths greater than zero; + // and set type attributes must not be empty. Requests that contain empty values + // will be rejected with a ValidationException exception. + // + // If you specify any attributes that are part of an index key, then the data + // types for those attributes must match those of the schema in the table's + // attribute definition. + RequestItems map[string][]*WriteRequest `min:"1" type:"map" required:"true"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem, do not access + // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity + // information for table(s). + // + // TOTAL - The response includes only the aggregate ConsumedCapacity for the + // operation. + // + // NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Determines whether item collection metrics are returned. If set to SIZE, + // the response includes statistics about item collections, if any, that were + // modified during the operation are returned in the response. If set to NONE + // (the default), no statistics are returned. + ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` +} + +// String returns the string representation +func (s BatchWriteItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchWriteItemInput) GoString() string { + return s.String() +} + +// Represents the output of a BatchWriteItem operation. +type BatchWriteItemOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by the operation. + // + // Each element consists of: + // + // TableName - The table that consumed the provisioned throughput. + // + // CapacityUnits - The total number of capacity units consumed. + ConsumedCapacity []*ConsumedCapacity `type:"list"` + + // A list of tables that were processed by BatchWriteItem and, for each table, + // information about any item collections that were affected by individual DeleteItem + // or PutItem operations. + // + // Each entry consists of the following subelements: + // + // ItemCollectionKey - The hash key value of the item collection. This is + // the same as the hash key of the item. + // + // SizeEstimateRange - An estimate of item collection size, expressed in + // GB. This is a two-element array containing a lower bound and an upper bound + // for the estimate. The estimate includes the size of all the items in the + // table, plus the size of all attributes projected into all of the local secondary + // indexes on the table. Use this estimate to measure whether a local secondary + // index is approaching its size limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. + ItemCollectionMetrics map[string][]*ItemCollectionMetrics `type:"map"` + + // A map of tables and requests against those tables that were not processed. + // The UnprocessedItems value is in the same form as RequestItems, so you can + // provide this value directly to a subsequent BatchGetItem operation. For more + // information, see RequestItems in the Request Parameters section. + // + // Each UnprocessedItems entry consists of a table name and, for that table, + // a list of operations to perform (DeleteRequest or PutRequest). + // + // DeleteRequest - Perform a DeleteItem operation on the specified item. + // The item to be deleted is identified by a Key subelement: + // + // Key - A map of primary key attribute values that uniquely identify the + // item. Each entry in this map consists of an attribute name and an attribute + // value. + // + // PutRequest - Perform a PutItem operation on the specified item. The + // item to be put is identified by an Item subelement: + // + // Item - A map of attributes and their values. Each entry in this map consists + // of an attribute name and an attribute value. Attribute values must not be + // null; string and binary type attributes must have lengths greater than zero; + // and set type attributes must not be empty. Requests that contain empty values + // will be rejected with a ValidationException exception. + // + // If you specify any attributes that are part of an index key, then the data + // types for those attributes must match those of the schema in the table's + // attribute definition. + // + // If there are no unprocessed items remaining, the response contains an + // empty UnprocessedItems map. + UnprocessedItems map[string][]*WriteRequest `min:"1" type:"map"` +} + +// String returns the string representation +func (s BatchWriteItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchWriteItemOutput) GoString() string { + return s.String() +} + +// Represents the amount of provisioned throughput capacity consumed on a table +// or an index. +type Capacity struct { + _ struct{} `type:"structure"` + + // The total number of capacity units consumed on a table or an index. + CapacityUnits *float64 `type:"double"` +} + +// String returns the string representation +func (s Capacity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Capacity) GoString() string { + return s.String() +} + +// Represents the selection criteria for a Query or Scan operation: +// +// For a Query operation, Condition is used for specifying the KeyConditions +// to use when querying a table or an index. For KeyConditions, only the following +// comparison operators are supported: +// +// EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN +// +// Condition is also used in a QueryFilter, which evaluates the query results +// and returns only the desired values. +// +// For a Scan operation, Condition is used in a ScanFilter, which evaluates +// the scan results and returns only the desired values. +type Condition struct { + _ struct{} `type:"structure"` + + // One or more values to evaluate against the supplied attribute. The number + // of values in the list depends on the ComparisonOperator being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + // + // For Binary, DynamoDB treats each byte of the binary data as unsigned when + // it compares binary values. + AttributeValueList []*AttributeValue `type:"list"` + + // A comparator for evaluating attributes. For example, equals, greater than, + // less than, etc. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS + // | BEGINS_WITH | IN | BETWEEN + // + // The following are descriptions of each comparison operator. + // + // EQ : Equal. EQ is supported for all datatypes, including lists and maps. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, Binary, String Set, Number Set, or Binary Set. If an item contains + // an AttributeValue element of a different type than the one provided in the + // request, the value does not match. For example, {"S":"6"} does not equal + // {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // + // NE : Not equal. NE is supported for all datatypes, including lists and + // maps. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue + // of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not equal {"NS":["6", "2", "1"]}. + // + // LE : Less than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // or Binary (not a set type). If an item contains an AttributeValue element + // of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GE : Greater than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GT : Greater than. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the existence of an attribute, not its data type. + // If the data type of attribute "a" is null, and you evaluate it using NOT_NULL, + // the result is a Boolean true. This result is because the attribute "a" exists; + // its data type is not relevant to the NOT_NULL comparison operator. + // + // NULL : The attribute does not exist. NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the nonexistence of an attribute, not its data type. + // If the data type of attribute "a" is null, and you evaluate it using NULL, + // the result is a Boolean false. This is because the attribute "a" exists; + // its data type is not relevant to the NULL comparison operator. + // + // CONTAINS : Checks for a subsequence, or value in a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison + // is of type String, then the operator checks for a substring match. If the + // target attribute of the comparison is of type Binary, then the operator looks + // for a subsequence of the target that matches the input. If the target attribute + // of the comparison is a set ("SS", "NS", or "BS"), then the operator evaluates + // to true if it finds an exact match with any member of the set. + // + // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can + // be a list; however, "b" cannot be a set, a map, or a list. + // + // NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value + // in a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison + // is a String, then the operator checks for the absence of a substring match. + // If the target attribute of the comparison is Binary, then the operator checks + // for the absence of a subsequence of the target that matches the input. If + // the target attribute of the comparison is a set ("SS", "NS", or "BS"), then + // the operator evaluates to true if it does not find an exact match with any + // member of the set. + // + // NOT_CONTAINS is supported for lists: When evaluating "a NOT CONTAINS b", + // "a" can be a list; however, "b" cannot be a set, a map, or a list. + // + // BEGINS_WITH : Checks for a prefix. + // + // AttributeValueList can contain only one AttributeValue of type String or + // Binary (not a Number or a set type). The target attribute of the comparison + // must be of type String or Binary (not a Number or a set type). + // + // IN : Checks for matching elements within two sets. + // + // AttributeValueList can contain one or more AttributeValue elements of type + // String, Number, or Binary (not a set type). These attributes are compared + // against an existing set type attribute of an item. If any elements of the + // input set are present in the item attribute, the expression evaluates to + // true. + // + // BETWEEN : Greater than or equal to the first value, and less than or equal + // to the second value. + // + // AttributeValueList must contain two AttributeValue elements of the same + // type, either String, Number, or Binary (not a set type). A target attribute + // matches if the target value is greater than, or equal to, the first element + // and less than, or equal to, the second element. If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not compare to {"N":"6"}. Also, + // {"N":"6"} does not compare to {"NS":["6", "2", "1"]} + // + // For usage examples of AttributeValueList and ComparisonOperator, see Legacy + // Conditional Parameters (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html) + // in the Amazon DynamoDB Developer Guide. + ComparisonOperator *string `type:"string" required:"true" enum:"ComparisonOperator"` +} + +// String returns the string representation +func (s Condition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Condition) GoString() string { + return s.String() +} + +// The capacity units consumed by an operation. The data returned includes the +// total provisioned throughput consumed, along with statistics for the table +// and any indexes involved in the operation. ConsumedCapacity is only returned +// if the request asked for it. For more information, see Provisioned Throughput +// (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) +// in the Amazon DynamoDB Developer Guide. +type ConsumedCapacity struct { + _ struct{} `type:"structure"` + + // The total number of capacity units consumed by the operation. + CapacityUnits *float64 `type:"double"` + + // The amount of throughput consumed on each global index affected by the operation. + GlobalSecondaryIndexes map[string]*Capacity `type:"map"` + + // The amount of throughput consumed on each local index affected by the operation. + LocalSecondaryIndexes map[string]*Capacity `type:"map"` + + // The amount of throughput consumed on the table affected by the operation. + Table *Capacity `type:"structure"` + + // The name of the table that was affected by the operation. + TableName *string `min:"3" type:"string"` +} + +// String returns the string representation +func (s ConsumedCapacity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConsumedCapacity) GoString() string { + return s.String() +} + +// Represents a new global secondary index to be added to an existing table. +type CreateGlobalSecondaryIndexAction struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index to be created. + IndexName *string `min:"3" type:"string" required:"true"` + + // The key schema for the global secondary index. + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // Represents attributes that are copied (projected) from the table into an + // index. These are in addition to the primary key attributes and index key + // attributes, which are automatically projected. + Projection *Projection `type:"structure" required:"true"` + + // Represents the provisioned throughput settings for a specified table or index. + // The settings can be modified using the UpdateTable operation. + // + // For current minimum and maximum provisioned throughput values, see Limits + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateGlobalSecondaryIndexAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGlobalSecondaryIndexAction) GoString() string { + return s.String() +} + +// Represents the input of a CreateTable operation. +type CreateTableInput struct { + _ struct{} `type:"structure"` + + // An array of attributes that describe the key schema for the table and indexes. + AttributeDefinitions []*AttributeDefinition `type:"list" required:"true"` + + // One or more global secondary indexes (the maximum is five) to be created + // on the table. Each global secondary index in the array includes the following: + // + // IndexName - The name of the global secondary index. Must be unique only + // for this table. + // + // KeySchema - Specifies the key schema for the global secondary index. + // + // Projection - Specifies attributes that are copied (projected) from the + // table into the index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. Each attribute + // specification is composed of: + // + // ProjectionType - One of the following: + // + // KEYS_ONLY - Only the index and primary keys are projected into the index. + // + // INCLUDE - Only the specified table attributes are projected into the index. + // The list of projected attributes are in NonKeyAttributes. + // + // ALL - All of the table attributes are projected into the index. + // + // NonKeyAttributes - A list of one or more non-key attribute names that + // are projected into the secondary index. The total count of attributes provided + // in NonKeyAttributes, summed across all of the secondary indexes, must not + // exceed 20. If you project the same attribute into two different indexes, + // this counts as two distinct attributes when determining the total. + // + // ProvisionedThroughput - The provisioned throughput settings for the + // global secondary index, consisting of read and write capacity units. + GlobalSecondaryIndexes []*GlobalSecondaryIndex `type:"list"` + + // Specifies the attributes that make up the primary key for a table or an index. + // The attributes in KeySchema must also be defined in the AttributeDefinitions + // array. For more information, see Data Model (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html) + // in the Amazon DynamoDB Developer Guide. + // + // Each KeySchemaElement in the array is composed of: + // + // AttributeName - The name of this key attribute. + // + // KeyType - Determines whether the key attribute is HASH or RANGE. + // + // For a primary key that consists of a hash attribute, you must provide + // exactly one element with a KeyType of HASH. + // + // For a primary key that consists of hash and range attributes, you must provide + // exactly two elements, in this order: The first element must have a KeyType + // of HASH, and the second element must have a KeyType of RANGE. + // + // For more information, see Specifying the Primary Key (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#WorkingWithTables.primary.key) + // in the Amazon DynamoDB Developer Guide. + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // One or more local secondary indexes (the maximum is five) to be created on + // the table. Each index is scoped to a given hash key value. There is a 10 + // GB size limit per hash key; otherwise, the size of a local secondary index + // is unconstrained. + // + // Each local secondary index in the array includes the following: + // + // IndexName - The name of the local secondary index. Must be unique only + // for this table. + // + // KeySchema - Specifies the key schema for the local secondary index. The + // key schema must begin with the same hash key attribute as the table. + // + // Projection - Specifies attributes that are copied (projected) from the + // table into the index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. Each attribute + // specification is composed of: + // + // ProjectionType - One of the following: + // + // KEYS_ONLY - Only the index and primary keys are projected into the index. + // + // INCLUDE - Only the specified table attributes are projected into the index. + // The list of projected attributes are in NonKeyAttributes. + // + // ALL - All of the table attributes are projected into the index. + // + // NonKeyAttributes - A list of one or more non-key attribute names that + // are projected into the secondary index. The total count of attributes provided + // in NonKeyAttributes, summed across all of the secondary indexes, must not + // exceed 20. If you project the same attribute into two different indexes, + // this counts as two distinct attributes when determining the total. + LocalSecondaryIndexes []*LocalSecondaryIndex `type:"list"` + + // Represents the provisioned throughput settings for a specified table or index. + // The settings can be modified using the UpdateTable operation. + // + // For current minimum and maximum provisioned throughput values, see Limits + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"` + + // The settings for DynamoDB Streams on the table. These settings consist of: + // + // StreamEnabled - Indicates whether Streams is to be enabled (true) or disabled + // (false). + // + // StreamViewType - When an item in the table is modified, StreamViewType + // determines what information is written to the table's stream. Valid values + // for StreamViewType are: + // + // KEYS_ONLY - Only the key attributes of the modified item are written to + // the stream. + // + // NEW_IMAGE - The entire item, as it appears after it was modified, is written + // to the stream. + // + // OLD_IMAGE - The entire item, as it appeared before it was modified, is written + // to the stream. + // + // NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are + // written to the stream. + StreamSpecification *StreamSpecification `type:"structure"` + + // The name of the table to create. + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTableInput) GoString() string { + return s.String() +} + +// Represents the output of a CreateTable operation. +type CreateTableOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of a table. + TableDescription *TableDescription `type:"structure"` +} + +// String returns the string representation +func (s CreateTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTableOutput) GoString() string { + return s.String() +} + +// Represents a global secondary index to be deleted from an existing table. +type DeleteGlobalSecondaryIndexAction struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index to be deleted. + IndexName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteGlobalSecondaryIndexAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGlobalSecondaryIndexAction) GoString() string { + return s.String() +} + +// Represents the input of a DeleteItem operation. +type DeleteItemInput struct { + _ struct{} `type:"structure"` + + // A condition that must be satisfied in order for a conditional DeleteItem + // to succeed. + // + // An expression can contain any of the following: + // + // Functions: attribute_exists | attribute_not_exists | attribute_type | + // contains | begins_with | size + // + // These function names are case-sensitive. + // + // Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN + // + // Logical operators: AND | OR | NOT + // + // For more information on condition expressions, see Specifying Conditions + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + // + // ConditionExpression replaces the legacy ConditionalOperator and Expected + // parameters. + ConditionExpression *string `type:"string"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use ConditionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // A logical operator to apply to the conditions in the Expected map: + // + // AND - If all of the conditions evaluate to true, then the entire map evaluates + // to true. + // + // OR - If at least one of the conditions evaluate to true, then the entire + // map evaluates to true. + // + // If you omit ConditionalOperator, then AND is the default. + // + // The operation will succeed only if the entire map evaluates to true. + // + // This parameter does not support attributes of type List or Map. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use ConditionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // A map of attribute/condition pairs. Expected provides a conditional block + // for the DeleteItem operation. + // + // Each element of Expected consists of an attribute name, a comparison operator, + // and one or more values. DynamoDB compares the attribute with the value(s) + // you supplied, using the comparison operator. For each Expected element, the + // result of the evaluation is either true or false. + // + // If you specify more than one element in the Expected map, then by default + // all of the conditions must evaluate to true. In other words, the conditions + // are ANDed together. (You can use the ConditionalOperator parameter to OR + // the conditions instead. If you do this, then at least one of the conditions + // must evaluate to true, rather than all of them.) + // + // If the Expected map evaluates to true, then the conditional operation succeeds; + // otherwise, it fails. + // + // Expected contains the following: + // + // AttributeValueList - One or more values to evaluate against the supplied + // attribute. The number of values in the list depends on the ComparisonOperator + // being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters. + // + // For type Binary, DynamoDB treats each byte of the binary data as unsigned + // when it compares binary values. + // + // ComparisonOperator - A comparator for evaluating attributes in the AttributeValueList. + // When performing the comparison, DynamoDB uses strongly consistent reads. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS + // | BEGINS_WITH | IN | BETWEEN + // + // The following are descriptions of each comparison operator. + // + // EQ : Equal. EQ is supported for all datatypes, including lists and maps. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, Binary, String Set, Number Set, or Binary Set. If an item contains + // an AttributeValue element of a different type than the one provided in the + // request, the value does not match. For example, {"S":"6"} does not equal + // {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // + // NE : Not equal. NE is supported for all datatypes, including lists and + // maps. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue + // of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not equal {"NS":["6", "2", "1"]}. + // + // LE : Less than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // or Binary (not a set type). If an item contains an AttributeValue element + // of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GE : Greater than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GT : Greater than. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the existence of an attribute, not its data type. + // If the data type of attribute "a" is null, and you evaluate it using NOT_NULL, + // the result is a Boolean true. This result is because the attribute "a" exists; + // its data type is not relevant to the NOT_NULL comparison operator. + // + // NULL : The attribute does not exist. NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the nonexistence of an attribute, not its data type. + // If the data type of attribute "a" is null, and you evaluate it using NULL, + // the result is a Boolean false. This is because the attribute "a" exists; + // its data type is not relevant to the NULL comparison operator. + // + // CONTAINS : Checks for a subsequence, or value in a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison + // is of type String, then the operator checks for a substring match. If the + // target attribute of the comparison is of type Binary, then the operator looks + // for a subsequence of the target that matches the input. If the target attribute + // of the comparison is a set ("SS", "NS", or "BS"), then the operator evaluates + // to true if it finds an exact match with any member of the set. + // + // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can + // be a list; however, "b" cannot be a set, a map, or a list. + // + // NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value + // in a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison + // is a String, then the operator checks for the absence of a substring match. + // If the target attribute of the comparison is Binary, then the operator checks + // for the absence of a subsequence of the target that matches the input. If + // the target attribute of the comparison is a set ("SS", "NS", or "BS"), then + // the operator evaluates to true if it does not find an exact match with any + // member of the set. + // + // NOT_CONTAINS is supported for lists: When evaluating "a NOT CONTAINS b", + // "a" can be a list; however, "b" cannot be a set, a map, or a list. + // + // BEGINS_WITH : Checks for a prefix. + // + // AttributeValueList can contain only one AttributeValue of type String or + // Binary (not a Number or a set type). The target attribute of the comparison + // must be of type String or Binary (not a Number or a set type). + // + // IN : Checks for matching elements within two sets. + // + // AttributeValueList can contain one or more AttributeValue elements of type + // String, Number, or Binary (not a set type). These attributes are compared + // against an existing set type attribute of an item. If any elements of the + // input set are present in the item attribute, the expression evaluates to + // true. + // + // BETWEEN : Greater than or equal to the first value, and less than or equal + // to the second value. + // + // AttributeValueList must contain two AttributeValue elements of the same + // type, either String, Number, or Binary (not a set type). A target attribute + // matches if the target value is greater than, or equal to, the first element + // and less than, or equal to, the second element. If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not compare to {"N":"6"}. Also, + // {"N":"6"} does not compare to {"NS":["6", "2", "1"]} + // + // For usage examples of AttributeValueList and ComparisonOperator, see + // Legacy Conditional Parameters (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html) + // in the Amazon DynamoDB Developer Guide. + // + // For backward compatibility with previous DynamoDB releases, the following + // parameters can be used instead of AttributeValueList and ComparisonOperator: + // + // Value - A value for DynamoDB to compare with an attribute. + // + // Exists - A Boolean value that causes DynamoDB to evaluate the value before + // attempting the conditional operation: + // + // If Exists is true, DynamoDB will check to see if that attribute value + // already exists in the table. If it is found, then the condition evaluates + // to true; otherwise the condition evaluate to false. + // + // If Exists is false, DynamoDB assumes that the attribute value does not + // exist in the table. If in fact the value does not exist, then the assumption + // is valid and the condition evaluates to true. If the value is found, despite + // the assumption that it does not exist, the condition evaluates to false. + // + // Note that the default value for Exists is true. + // + // The Value and Exists parameters are incompatible with AttributeValueList + // and ComparisonOperator. Note that if you use both sets of parameters at once, + // DynamoDB will return a ValidationException exception. + // + // This parameter does not support attributes of type List or Map. + Expected map[string]*ExpectedAttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. + // For example, consider the following attribute name: + // + // Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot + // be used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // #P = :val + // + // Tokens that begin with the : character are expression attribute values, + // which are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Accessing Item Attributes + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Specifying Conditions + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // A map of attribute names to AttributeValue objects, representing the primary + // key of the item to delete. + // + // For the primary key, you must provide all of the attributes. For example, + // with a hash type primary key, you only need to provide the hash attribute. + // For a hash-and-range type primary key, you must provide both the hash attribute + // and the range attribute. + Key map[string]*AttributeValue `type:"map" required:"true"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem, do not access + // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity + // information for table(s). + // + // TOTAL - The response includes only the aggregate ConsumedCapacity for the + // operation. + // + // NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Determines whether item collection metrics are returned. If set to SIZE, + // the response includes statistics about item collections, if any, that were + // modified during the operation are returned in the response. If set to NONE + // (the default), no statistics are returned. + ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` + + // Use ReturnValues if you want to get the item attributes as they appeared + // before they were deleted. For DeleteItem, the valid values are: + // + // NONE - If ReturnValues is not specified, or if its value is NONE, then + // nothing is returned. (This setting is the default for ReturnValues.) + // + // ALL_OLD - The content of the old item is returned. + ReturnValues *string `type:"string" enum:"ReturnValue"` + + // The name of the table from which to delete the item. + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteItemInput) GoString() string { + return s.String() +} + +// Represents the output of a DeleteItem operation. +type DeleteItemOutput struct { + _ struct{} `type:"structure"` + + // A map of attribute names to AttributeValue objects, representing the item + // as it appeared before the DeleteItem operation. This map appears in the response + // only if ReturnValues was specified as ALL_OLD in the request. + Attributes map[string]*AttributeValue `type:"map"` + + // The capacity units consumed by an operation. The data returned includes the + // total provisioned throughput consumed, along with statistics for the table + // and any indexes involved in the operation. ConsumedCapacity is only returned + // if the request asked for it. For more information, see Provisioned Throughput + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // Information about item collections, if any, that were affected by the operation. + // ItemCollectionMetrics is only returned if the request asked for it. If the + // table does not have any local secondary indexes, this information is not + // returned in the response. + // + // Each ItemCollectionMetrics element consists of: + // + // ItemCollectionKey - The hash key value of the item collection. This is + // the same as the hash key of the item. + // + // SizeEstimateRange - An estimate of item collection size, in gigabytes. This + // value is a two-element array containing a lower bound and an upper bound + // for the estimate. The estimate includes the size of all the items in the + // table, plus the size of all attributes projected into all of the local secondary + // indexes on that table. Use this estimate to measure whether a local secondary + // index is approaching its size limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. + ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"` +} + +// String returns the string representation +func (s DeleteItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteItemOutput) GoString() string { + return s.String() +} + +// Represents a request to perform a DeleteItem operation on an item. +type DeleteRequest struct { + _ struct{} `type:"structure"` + + // A map of attribute name to attribute values, representing the primary key + // of the item to delete. All of the table's primary key attributes must be + // specified, and their data types must match those of the table's key schema. + Key map[string]*AttributeValue `type:"map" required:"true"` +} + +// String returns the string representation +func (s DeleteRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRequest) GoString() string { + return s.String() +} + +// Represents the input of a DeleteTable operation. +type DeleteTableInput struct { + _ struct{} `type:"structure"` + + // The name of the table to delete. + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTableInput) GoString() string { + return s.String() +} + +// Represents the output of a DeleteTable operation. +type DeleteTableOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of a table. + TableDescription *TableDescription `type:"structure"` +} + +// String returns the string representation +func (s DeleteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTableOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeTable operation. +type DescribeTableInput struct { + _ struct{} `type:"structure"` + + // The name of the table to describe. + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTableInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeTable operation. +type DescribeTableOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of a table. + Table *TableDescription `type:"structure"` +} + +// String returns the string representation +func (s DescribeTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTableOutput) GoString() string { + return s.String() +} + +// Represents a condition to be compared with an attribute value. This condition +// can be used with DeleteItem, PutItem or UpdateItem operations; if the comparison +// evaluates to true, the operation succeeds; if not, the operation fails. You +// can use ExpectedAttributeValue in one of two different ways: +// +// Use AttributeValueList to specify one or more values to compare against +// an attribute. Use ComparisonOperator to specify how you want to perform the +// comparison. If the comparison evaluates to true, then the conditional operation +// succeeds. +// +// Use Value to specify a value that DynamoDB will compare against an attribute. +// If the values match, then ExpectedAttributeValue evaluates to true and the +// conditional operation succeeds. Optionally, you can also set Exists to false, +// indicating that you do not expect to find the attribute value in the table. +// In this case, the conditional operation succeeds only if the comparison evaluates +// to false. +// +// Value and Exists are incompatible with AttributeValueList and ComparisonOperator. +// Note that if you use both sets of parameters at once, DynamoDB will return +// a ValidationException exception. +type ExpectedAttributeValue struct { + _ struct{} `type:"structure"` + + // One or more values to evaluate against the supplied attribute. The number + // of values in the list depends on the ComparisonOperator being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + // + // For Binary, DynamoDB treats each byte of the binary data as unsigned when + // it compares binary values. + // + // For information on specifying data types in JSON, see JSON Data Format (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataFormat.html) + // in the Amazon DynamoDB Developer Guide. + AttributeValueList []*AttributeValue `type:"list"` + + // A comparator for evaluating attributes in the AttributeValueList. For example, + // equals, greater than, less than, etc. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS + // | BEGINS_WITH | IN | BETWEEN + // + // The following are descriptions of each comparison operator. + // + // EQ : Equal. EQ is supported for all datatypes, including lists and maps. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, Binary, String Set, Number Set, or Binary Set. If an item contains + // an AttributeValue element of a different type than the one provided in the + // request, the value does not match. For example, {"S":"6"} does not equal + // {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // + // NE : Not equal. NE is supported for all datatypes, including lists and + // maps. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue + // of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not equal {"NS":["6", "2", "1"]}. + // + // LE : Less than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // or Binary (not a set type). If an item contains an AttributeValue element + // of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GE : Greater than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GT : Greater than. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the existence of an attribute, not its data type. + // If the data type of attribute "a" is null, and you evaluate it using NOT_NULL, + // the result is a Boolean true. This result is because the attribute "a" exists; + // its data type is not relevant to the NOT_NULL comparison operator. + // + // NULL : The attribute does not exist. NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the nonexistence of an attribute, not its data type. + // If the data type of attribute "a" is null, and you evaluate it using NULL, + // the result is a Boolean false. This is because the attribute "a" exists; + // its data type is not relevant to the NULL comparison operator. + // + // CONTAINS : Checks for a subsequence, or value in a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison + // is of type String, then the operator checks for a substring match. If the + // target attribute of the comparison is of type Binary, then the operator looks + // for a subsequence of the target that matches the input. If the target attribute + // of the comparison is a set ("SS", "NS", or "BS"), then the operator evaluates + // to true if it finds an exact match with any member of the set. + // + // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can + // be a list; however, "b" cannot be a set, a map, or a list. + // + // NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value + // in a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison + // is a String, then the operator checks for the absence of a substring match. + // If the target attribute of the comparison is Binary, then the operator checks + // for the absence of a subsequence of the target that matches the input. If + // the target attribute of the comparison is a set ("SS", "NS", or "BS"), then + // the operator evaluates to true if it does not find an exact match with any + // member of the set. + // + // NOT_CONTAINS is supported for lists: When evaluating "a NOT CONTAINS b", + // "a" can be a list; however, "b" cannot be a set, a map, or a list. + // + // BEGINS_WITH : Checks for a prefix. + // + // AttributeValueList can contain only one AttributeValue of type String or + // Binary (not a Number or a set type). The target attribute of the comparison + // must be of type String or Binary (not a Number or a set type). + // + // IN : Checks for matching elements within two sets. + // + // AttributeValueList can contain one or more AttributeValue elements of type + // String, Number, or Binary (not a set type). These attributes are compared + // against an existing set type attribute of an item. If any elements of the + // input set are present in the item attribute, the expression evaluates to + // true. + // + // BETWEEN : Greater than or equal to the first value, and less than or equal + // to the second value. + // + // AttributeValueList must contain two AttributeValue elements of the same + // type, either String, Number, or Binary (not a set type). A target attribute + // matches if the target value is greater than, or equal to, the first element + // and less than, or equal to, the second element. If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not compare to {"N":"6"}. Also, + // {"N":"6"} does not compare to {"NS":["6", "2", "1"]} + ComparisonOperator *string `type:"string" enum:"ComparisonOperator"` + + // Causes DynamoDB to evaluate the value before attempting a conditional operation: + // + // If Exists is true, DynamoDB will check to see if that attribute value + // already exists in the table. If it is found, then the operation succeeds. + // If it is not found, the operation fails with a ConditionalCheckFailedException. + // + // If Exists is false, DynamoDB assumes that the attribute value does not + // exist in the table. If in fact the value does not exist, then the assumption + // is valid and the operation succeeds. If the value is found, despite the assumption + // that it does not exist, the operation fails with a ConditionalCheckFailedException. + // + // The default setting for Exists is true. If you supply a Value all by itself, + // DynamoDB assumes the attribute exists: You don't have to set Exists to true, + // because it is implied. + // + // DynamoDB returns a ValidationException if: + // + // Exists is true but there is no Value to check. (You expect a value to + // exist, but don't specify what that value is.) + // + // Exists is false but you also provide a Value. (You cannot expect an attribute + // to have a value, while also expecting it not to exist.) + Exists *bool `type:"boolean"` + + // Represents the data for an attribute. You can set one, and only one, of the + // elements. + // + // Each attribute in an item is a name-value pair. An attribute can be single-valued + // or multi-valued set. For example, a book item can have title and authors + // attributes. Each book has one title but can have many authors. The multi-valued + // attribute is a set; duplicate values are not allowed. + Value *AttributeValue `type:"structure"` +} + +// String returns the string representation +func (s ExpectedAttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExpectedAttributeValue) GoString() string { + return s.String() +} + +// Represents the input of a GetItem operation. +type GetItemInput struct { + _ struct{} `type:"structure"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use ProjectionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // This parameter allows you to retrieve attributes of type List or Map; however, + // it cannot retrieve individual elements within a List or a Map. + // + // The names of one or more attributes to retrieve. If no attribute names are + // provided, then all attributes will be returned. If any of the requested attributes + // are not found, they will not appear in the result. + // + // Note that AttributesToGet has no effect on provisioned throughput consumption. + // DynamoDB determines capacity units consumed based on item size, not on the + // amount of data that is returned to an application. + AttributesToGet []*string `min:"1" type:"list"` + + // Determines the read consistency model: If set to true, then the operation + // uses strongly consistent reads; otherwise, the operation uses eventually + // consistent reads. + ConsistentRead *bool `type:"boolean"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. + // For example, consider the following attribute name: + // + // Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot + // be used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // #P = :val + // + // Tokens that begin with the : character are expression attribute values, + // which are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Accessing Item Attributes + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // A map of attribute names to AttributeValue objects, representing the primary + // key of the item to retrieve. + // + // For the primary key, you must provide all of the attributes. For example, + // with a hash type primary key, you only need to provide the hash attribute. + // For a hash-and-range type primary key, you must provide both the hash attribute + // and the range attribute. + Key map[string]*AttributeValue `type:"map" required:"true"` + + // A string that identifies one or more attributes to retrieve from the table. + // These attributes can include scalars, sets, or elements of a JSON document. + // The attributes in the expression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. + // If any of the requested attributes are not found, they will not appear in + // the result. + // + // For more information, see Accessing Item Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + // + // ProjectionExpression replaces the legacy AttributesToGet parameter. + ProjectionExpression *string `type:"string"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem, do not access + // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity + // information for table(s). + // + // TOTAL - The response includes only the aggregate ConsumedCapacity for the + // operation. + // + // NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // The name of the table containing the requested item. + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetItemInput) GoString() string { + return s.String() +} + +// Represents the output of a GetItem operation. +type GetItemOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by an operation. The data returned includes the + // total provisioned throughput consumed, along with statistics for the table + // and any indexes involved in the operation. ConsumedCapacity is only returned + // if the request asked for it. For more information, see Provisioned Throughput + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // A map of attribute names to AttributeValue objects, as specified by AttributesToGet. + Item map[string]*AttributeValue `type:"map"` +} + +// String returns the string representation +func (s GetItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetItemOutput) GoString() string { + return s.String() +} + +// Represents the properties of a global secondary index. +type GlobalSecondaryIndex struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index. The name must be unique among all + // other indexes on this table. + IndexName *string `min:"3" type:"string" required:"true"` + + // The complete key schema for a global secondary index, which consists of one + // or more pairs of attribute names and key types (HASH or RANGE). + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // Represents attributes that are copied (projected) from the table into an + // index. These are in addition to the primary key attributes and index key + // attributes, which are automatically projected. + Projection *Projection `type:"structure" required:"true"` + + // Represents the provisioned throughput settings for a specified table or index. + // The settings can be modified using the UpdateTable operation. + // + // For current minimum and maximum provisioned throughput values, see Limits + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GlobalSecondaryIndex) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GlobalSecondaryIndex) GoString() string { + return s.String() +} + +// Represents the properties of a global secondary index. +type GlobalSecondaryIndexDescription struct { + _ struct{} `type:"structure"` + + // Indicates whether the index is currently backfilling. Backfilling is the + // process of reading items from the table and determining whether they can + // be added to the index. (Not all items will qualify: For example, a hash key + // attribute cannot have any duplicates.) If an item can be added to the index, + // DynamoDB will do so. After all items have been processed, the backfilling + // operation is complete and Backfilling is false. + // + // For indexes that were created during a CreateTable operation, the Backfilling + // attribute does not appear in the DescribeTable output. + Backfilling *bool `type:"boolean"` + + // The Amazon Resource Name (ARN) that uniquely identifies the index. + IndexArn *string `type:"string"` + + // The name of the global secondary index. + IndexName *string `min:"3" type:"string"` + + // The total size of the specified index, in bytes. DynamoDB updates this value + // approximately every six hours. Recent changes might not be reflected in this + // value. + IndexSizeBytes *int64 `type:"long"` + + // The current state of the global secondary index: + // + // CREATING - The index is being created. + // + // UPDATING - The index is being updated. + // + // DELETING - The index is being deleted. + // + // ACTIVE - The index is ready for use. + IndexStatus *string `type:"string" enum:"IndexStatus"` + + // The number of items in the specified index. DynamoDB updates this value approximately + // every six hours. Recent changes might not be reflected in this value. + ItemCount *int64 `type:"long"` + + // The complete key schema for the global secondary index, consisting of one + // or more pairs of attribute names and key types (HASH or RANGE). + KeySchema []*KeySchemaElement `min:"1" type:"list"` + + // Represents attributes that are copied (projected) from the table into an + // index. These are in addition to the primary key attributes and index key + // attributes, which are automatically projected. + Projection *Projection `type:"structure"` + + // Represents the provisioned throughput settings for the table, consisting + // of read and write capacity units, along with data about increases and decreases. + ProvisionedThroughput *ProvisionedThroughputDescription `type:"structure"` +} + +// String returns the string representation +func (s GlobalSecondaryIndexDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GlobalSecondaryIndexDescription) GoString() string { + return s.String() +} + +// Represents one of the following: +// +// A new global secondary index to be added to an existing table. +// +// New provisioned throughput parameters for an existing global secondary index. +// +// An existing global secondary index to be removed from an existing table. +type GlobalSecondaryIndexUpdate struct { + _ struct{} `type:"structure"` + + // The parameters required for creating a global secondary index on an existing + // table: + // + // IndexName + // + // KeySchema + // + // AttributeDefinitions + // + // Projection + // + // ProvisionedThroughput + Create *CreateGlobalSecondaryIndexAction `type:"structure"` + + // The name of an existing global secondary index to be removed. + Delete *DeleteGlobalSecondaryIndexAction `type:"structure"` + + // The name of an existing global secondary index, along with new provisioned + // throughput settings to be applied to that index. + Update *UpdateGlobalSecondaryIndexAction `type:"structure"` +} + +// String returns the string representation +func (s GlobalSecondaryIndexUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GlobalSecondaryIndexUpdate) GoString() string { + return s.String() +} + +// Information about item collections, if any, that were affected by the operation. +// ItemCollectionMetrics is only returned if the request asked for it. If the +// table does not have any local secondary indexes, this information is not +// returned in the response. +type ItemCollectionMetrics struct { + _ struct{} `type:"structure"` + + // The hash key value of the item collection. This value is the same as the + // hash key of the item. + ItemCollectionKey map[string]*AttributeValue `type:"map"` + + // An estimate of item collection size, in gigabytes. This value is a two-element + // array containing a lower bound and an upper bound for the estimate. The estimate + // includes the size of all the items in the table, plus the size of all attributes + // projected into all of the local secondary indexes on that table. Use this + // estimate to measure whether a local secondary index is approaching its size + // limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. + SizeEstimateRangeGB []*float64 `type:"list"` +} + +// String returns the string representation +func (s ItemCollectionMetrics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ItemCollectionMetrics) GoString() string { + return s.String() +} + +// Represents a single element of a key schema. A key schema specifies the attributes +// that make up the primary key of a table, or the key attributes of an index. +// +// A KeySchemaElement represents exactly one attribute of the primary key. +// For example, a hash type primary key would be represented by one KeySchemaElement. +// A hash-and-range type primary key would require one KeySchemaElement for +// the hash attribute, and another KeySchemaElement for the range attribute. +type KeySchemaElement struct { + _ struct{} `type:"structure"` + + // The name of a key attribute. + AttributeName *string `min:"1" type:"string" required:"true"` + + // The attribute data, consisting of the data type and the attribute value itself. + KeyType *string `type:"string" required:"true" enum:"KeyType"` +} + +// String returns the string representation +func (s KeySchemaElement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeySchemaElement) GoString() string { + return s.String() +} + +// Represents a set of primary keys and, for each key, the attributes to retrieve +// from the table. +// +// For each primary key, you must provide all of the key attributes. For example, +// with a hash type primary key, you only need to provide the hash attribute. +// For a hash-and-range type primary key, you must provide both the hash attribute +// and the range attribute. +type KeysAndAttributes struct { + _ struct{} `type:"structure"` + + // One or more attributes to retrieve from the table or index. If no attribute + // names are specified then all attributes will be returned. If any of the specified + // attributes are not found, they will not appear in the result. + AttributesToGet []*string `min:"1" type:"list"` + + // The consistency of a read operation. If set to true, then a strongly consistent + // read is used; otherwise, an eventually consistent read is used. + ConsistentRead *bool `type:"boolean"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. + // For example, consider the following attribute name: + // + // Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot + // be used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // #P = :val + // + // Tokens that begin with the : character are expression attribute values, + // which are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Accessing Item Attributes + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // The primary key attribute values that define the items and the attributes + // associated with the items. + Keys []map[string]*AttributeValue `min:"1" type:"list" required:"true"` + + // A string that identifies one or more attributes to retrieve from the table. + // These attributes can include scalars, sets, or elements of a JSON document. + // The attributes in the ProjectionExpression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. + // If any of the requested attributes are not found, they will not appear in + // the result. + // + // For more information, see Accessing Item Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + // + // ProjectionExpression replaces the legacy AttributesToGet parameter. + ProjectionExpression *string `type:"string"` +} + +// String returns the string representation +func (s KeysAndAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeysAndAttributes) GoString() string { + return s.String() +} + +// Represents the input of a ListTables operation. +type ListTablesInput struct { + _ struct{} `type:"structure"` + + // The first table name that this operation will evaluate. Use the value that + // was returned for LastEvaluatedTableName in a previous operation, so that + // you can obtain the next page of results. + ExclusiveStartTableName *string `min:"3" type:"string"` + + // A maximum number of table names to return. If this parameter is not specified, + // the limit is 100. + Limit *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListTablesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTablesInput) GoString() string { + return s.String() +} + +// Represents the output of a ListTables operation. +type ListTablesOutput struct { + _ struct{} `type:"structure"` + + // The name of the last table in the current page of results. Use this value + // as the ExclusiveStartTableName in a new request to obtain the next page of + // results, until all the table names are returned. + // + // If you do not receive a LastEvaluatedTableName value in the response, this + // means that there are no more table names to be retrieved. + LastEvaluatedTableName *string `min:"3" type:"string"` + + // The names of the tables associated with the current account at the current + // endpoint. The maximum size of this array is 100. + // + // If LastEvaluatedTableName also appears in the output, you can use this value + // as the ExclusiveStartTableName parameter in a subsequent ListTables request + // and obtain the next page of results. + TableNames []*string `type:"list"` +} + +// String returns the string representation +func (s ListTablesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTablesOutput) GoString() string { + return s.String() +} + +// Represents the properties of a local secondary index. +type LocalSecondaryIndex struct { + _ struct{} `type:"structure"` + + // The name of the local secondary index. The name must be unique among all + // other indexes on this table. + IndexName *string `min:"3" type:"string" required:"true"` + + // The complete key schema for the local secondary index, consisting of one + // or more pairs of attribute names and key types (HASH or RANGE). + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // Represents attributes that are copied (projected) from the table into an + // index. These are in addition to the primary key attributes and index key + // attributes, which are automatically projected. + Projection *Projection `type:"structure" required:"true"` +} + +// String returns the string representation +func (s LocalSecondaryIndex) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LocalSecondaryIndex) GoString() string { + return s.String() +} + +// Represents the properties of a local secondary index. +type LocalSecondaryIndexDescription struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that uniquely identifies the index. + IndexArn *string `type:"string"` + + // Represents the name of the local secondary index. + IndexName *string `min:"3" type:"string"` + + // The total size of the specified index, in bytes. DynamoDB updates this value + // approximately every six hours. Recent changes might not be reflected in this + // value. + IndexSizeBytes *int64 `type:"long"` + + // The number of items in the specified index. DynamoDB updates this value approximately + // every six hours. Recent changes might not be reflected in this value. + ItemCount *int64 `type:"long"` + + // The complete index key schema, which consists of one or more pairs of attribute + // names and key types (HASH or RANGE). + KeySchema []*KeySchemaElement `min:"1" type:"list"` + + // Represents attributes that are copied (projected) from the table into an + // index. These are in addition to the primary key attributes and index key + // attributes, which are automatically projected. + Projection *Projection `type:"structure"` +} + +// String returns the string representation +func (s LocalSecondaryIndexDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LocalSecondaryIndexDescription) GoString() string { + return s.String() +} + +// Represents attributes that are copied (projected) from the table into an +// index. These are in addition to the primary key attributes and index key +// attributes, which are automatically projected. +type Projection struct { + _ struct{} `type:"structure"` + + // Represents the non-key attribute names which will be projected into the index. + // + // For local secondary indexes, the total count of NonKeyAttributes summed + // across all of the local secondary indexes, must not exceed 20. If you project + // the same attribute into two different indexes, this counts as two distinct + // attributes when determining the total. + NonKeyAttributes []*string `min:"1" type:"list"` + + // The set of attributes that are projected into the index: + // + // KEYS_ONLY - Only the index and primary keys are projected into the index. + // + // INCLUDE - Only the specified table attributes are projected into the index. + // The list of projected attributes are in NonKeyAttributes. + // + // ALL - All of the table attributes are projected into the index. + ProjectionType *string `type:"string" enum:"ProjectionType"` +} + +// String returns the string representation +func (s Projection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Projection) GoString() string { + return s.String() +} + +// Represents the provisioned throughput settings for a specified table or index. +// The settings can be modified using the UpdateTable operation. +// +// For current minimum and maximum provisioned throughput values, see Limits +// (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) +// in the Amazon DynamoDB Developer Guide. +type ProvisionedThroughput struct { + _ struct{} `type:"structure"` + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException. For more information, see Specifying + // Read and Write Requirements (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) + // in the Amazon DynamoDB Developer Guide. + ReadCapacityUnits *int64 `min:"1" type:"long" required:"true"` + + // The maximum number of writes consumed per second before DynamoDB returns + // a ThrottlingException. For more information, see Specifying Read and Write + // Requirements (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) + // in the Amazon DynamoDB Developer Guide. + WriteCapacityUnits *int64 `min:"1" type:"long" required:"true"` +} + +// String returns the string representation +func (s ProvisionedThroughput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProvisionedThroughput) GoString() string { + return s.String() +} + +// Represents the provisioned throughput settings for the table, consisting +// of read and write capacity units, along with data about increases and decreases. +type ProvisionedThroughputDescription struct { + _ struct{} `type:"structure"` + + // The date and time of the last provisioned throughput decrease for this table. + LastDecreaseDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The date and time of the last provisioned throughput increase for this table. + LastIncreaseDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The number of provisioned throughput decreases for this table during this + // UTC calendar day. For current maximums on provisioned throughput decreases, + // see Limits (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + NumberOfDecreasesToday *int64 `min:"1" type:"long"` + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException. Eventually consistent reads require + // less effort than strongly consistent reads, so a setting of 50 ReadCapacityUnits + // per second provides 100 eventually consistent ReadCapacityUnits per second. + ReadCapacityUnits *int64 `min:"1" type:"long"` + + // The maximum number of writes consumed per second before DynamoDB returns + // a ThrottlingException. + WriteCapacityUnits *int64 `min:"1" type:"long"` +} + +// String returns the string representation +func (s ProvisionedThroughputDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProvisionedThroughputDescription) GoString() string { + return s.String() +} + +// Represents the input of a PutItem operation. +type PutItemInput struct { + _ struct{} `type:"structure"` + + // A condition that must be satisfied in order for a conditional PutItem operation + // to succeed. + // + // An expression can contain any of the following: + // + // Functions: attribute_exists | attribute_not_exists | attribute_type | + // contains | begins_with | size + // + // These function names are case-sensitive. + // + // Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN + // + // Logical operators: AND | OR | NOT + // + // For more information on condition expressions, see Specifying Conditions + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + // + // ConditionExpression replaces the legacy ConditionalOperator and Expected + // parameters. + ConditionExpression *string `type:"string"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use ConditionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // A logical operator to apply to the conditions in the Expected map: + // + // AND - If all of the conditions evaluate to true, then the entire map evaluates + // to true. + // + // OR - If at least one of the conditions evaluate to true, then the entire + // map evaluates to true. + // + // If you omit ConditionalOperator, then AND is the default. + // + // The operation will succeed only if the entire map evaluates to true. + // + // This parameter does not support attributes of type List or Map. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use ConditionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // A map of attribute/condition pairs. Expected provides a conditional block + // for the PutItem operation. + // + // This parameter does not support attributes of type List or Map. + // + // Each element of Expected consists of an attribute name, a comparison operator, + // and one or more values. DynamoDB compares the attribute with the value(s) + // you supplied, using the comparison operator. For each Expected element, the + // result of the evaluation is either true or false. + // + // If you specify more than one element in the Expected map, then by default + // all of the conditions must evaluate to true. In other words, the conditions + // are ANDed together. (You can use the ConditionalOperator parameter to OR + // the conditions instead. If you do this, then at least one of the conditions + // must evaluate to true, rather than all of them.) + // + // If the Expected map evaluates to true, then the conditional operation succeeds; + // otherwise, it fails. + // + // Expected contains the following: + // + // AttributeValueList - One or more values to evaluate against the supplied + // attribute. The number of values in the list depends on the ComparisonOperator + // being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters. + // + // For type Binary, DynamoDB treats each byte of the binary data as unsigned + // when it compares binary values. + // + // ComparisonOperator - A comparator for evaluating attributes in the AttributeValueList. + // When performing the comparison, DynamoDB uses strongly consistent reads. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS + // | BEGINS_WITH | IN | BETWEEN + // + // The following are descriptions of each comparison operator. + // + // EQ : Equal. EQ is supported for all datatypes, including lists and maps. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, Binary, String Set, Number Set, or Binary Set. If an item contains + // an AttributeValue element of a different type than the one provided in the + // request, the value does not match. For example, {"S":"6"} does not equal + // {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // + // NE : Not equal. NE is supported for all datatypes, including lists and + // maps. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue + // of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not equal {"NS":["6", "2", "1"]}. + // + // LE : Less than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // or Binary (not a set type). If an item contains an AttributeValue element + // of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GE : Greater than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GT : Greater than. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the existence of an attribute, not its data type. + // If the data type of attribute "a" is null, and you evaluate it using NOT_NULL, + // the result is a Boolean true. This result is because the attribute "a" exists; + // its data type is not relevant to the NOT_NULL comparison operator. + // + // NULL : The attribute does not exist. NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the nonexistence of an attribute, not its data type. + // If the data type of attribute "a" is null, and you evaluate it using NULL, + // the result is a Boolean false. This is because the attribute "a" exists; + // its data type is not relevant to the NULL comparison operator. + // + // CONTAINS : Checks for a subsequence, or value in a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison + // is of type String, then the operator checks for a substring match. If the + // target attribute of the comparison is of type Binary, then the operator looks + // for a subsequence of the target that matches the input. If the target attribute + // of the comparison is a set ("SS", "NS", or "BS"), then the operator evaluates + // to true if it finds an exact match with any member of the set. + // + // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can + // be a list; however, "b" cannot be a set, a map, or a list. + // + // NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value + // in a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison + // is a String, then the operator checks for the absence of a substring match. + // If the target attribute of the comparison is Binary, then the operator checks + // for the absence of a subsequence of the target that matches the input. If + // the target attribute of the comparison is a set ("SS", "NS", or "BS"), then + // the operator evaluates to true if it does not find an exact match with any + // member of the set. + // + // NOT_CONTAINS is supported for lists: When evaluating "a NOT CONTAINS b", + // "a" can be a list; however, "b" cannot be a set, a map, or a list. + // + // BEGINS_WITH : Checks for a prefix. + // + // AttributeValueList can contain only one AttributeValue of type String or + // Binary (not a Number or a set type). The target attribute of the comparison + // must be of type String or Binary (not a Number or a set type). + // + // IN : Checks for matching elements within two sets. + // + // AttributeValueList can contain one or more AttributeValue elements of type + // String, Number, or Binary (not a set type). These attributes are compared + // against an existing set type attribute of an item. If any elements of the + // input set are present in the item attribute, the expression evaluates to + // true. + // + // BETWEEN : Greater than or equal to the first value, and less than or equal + // to the second value. + // + // AttributeValueList must contain two AttributeValue elements of the same + // type, either String, Number, or Binary (not a set type). A target attribute + // matches if the target value is greater than, or equal to, the first element + // and less than, or equal to, the second element. If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not compare to {"N":"6"}. Also, + // {"N":"6"} does not compare to {"NS":["6", "2", "1"]} + // + // For usage examples of AttributeValueList and ComparisonOperator, see + // Legacy Conditional Parameters (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html) + // in the Amazon DynamoDB Developer Guide. + // + // For backward compatibility with previous DynamoDB releases, the following + // parameters can be used instead of AttributeValueList and ComparisonOperator: + // + // Value - A value for DynamoDB to compare with an attribute. + // + // Exists - A Boolean value that causes DynamoDB to evaluate the value before + // attempting the conditional operation: + // + // If Exists is true, DynamoDB will check to see if that attribute value + // already exists in the table. If it is found, then the condition evaluates + // to true; otherwise the condition evaluate to false. + // + // If Exists is false, DynamoDB assumes that the attribute value does not + // exist in the table. If in fact the value does not exist, then the assumption + // is valid and the condition evaluates to true. If the value is found, despite + // the assumption that it does not exist, the condition evaluates to false. + // + // Note that the default value for Exists is true. + // + // The Value and Exists parameters are incompatible with AttributeValueList + // and ComparisonOperator. Note that if you use both sets of parameters at once, + // DynamoDB will return a ValidationException exception. + Expected map[string]*ExpectedAttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. + // For example, consider the following attribute name: + // + // Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot + // be used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // #P = :val + // + // Tokens that begin with the : character are expression attribute values, + // which are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Accessing Item Attributes + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Specifying Conditions + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // A map of attribute name/value pairs, one for each attribute. Only the primary + // key attributes are required; you can optionally provide other attribute name-value + // pairs for the item. + // + // You must provide all of the attributes for the primary key. For example, + // with a hash type primary key, you only need to provide the hash attribute. + // For a hash-and-range type primary key, you must provide both the hash attribute + // and the range attribute. + // + // If you specify any attributes that are part of an index key, then the data + // types for those attributes must match those of the schema in the table's + // attribute definition. + // + // For more information about primary keys, see Primary Key (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelPrimaryKey) + // in the Amazon DynamoDB Developer Guide. + // + // Each element in the Item map is an AttributeValue object. + Item map[string]*AttributeValue `type:"map" required:"true"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem, do not access + // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity + // information for table(s). + // + // TOTAL - The response includes only the aggregate ConsumedCapacity for the + // operation. + // + // NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Determines whether item collection metrics are returned. If set to SIZE, + // the response includes statistics about item collections, if any, that were + // modified during the operation are returned in the response. If set to NONE + // (the default), no statistics are returned. + ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` + + // Use ReturnValues if you want to get the item attributes as they appeared + // before they were updated with the PutItem request. For PutItem, the valid + // values are: + // + // NONE - If ReturnValues is not specified, or if its value is NONE, then + // nothing is returned. (This setting is the default for ReturnValues.) + // + // ALL_OLD - If PutItem overwrote an attribute name-value pair, then the + // content of the old item is returned. + // + // Other "Valid Values" are not relevant to PutItem. + ReturnValues *string `type:"string" enum:"ReturnValue"` + + // The name of the table to contain the item. + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutItemInput) GoString() string { + return s.String() +} + +// Represents the output of a PutItem operation. +type PutItemOutput struct { + _ struct{} `type:"structure"` + + // The attribute values as they appeared before the PutItem operation, but only + // if ReturnValues is specified as ALL_OLD in the request. Each element consists + // of an attribute name and an attribute value. + Attributes map[string]*AttributeValue `type:"map"` + + // The capacity units consumed by an operation. The data returned includes the + // total provisioned throughput consumed, along with statistics for the table + // and any indexes involved in the operation. ConsumedCapacity is only returned + // if the request asked for it. For more information, see Provisioned Throughput + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // Information about item collections, if any, that were affected by the operation. + // ItemCollectionMetrics is only returned if the request asked for it. If the + // table does not have any local secondary indexes, this information is not + // returned in the response. + // + // Each ItemCollectionMetrics element consists of: + // + // ItemCollectionKey - The hash key value of the item collection. This is + // the same as the hash key of the item. + // + // SizeEstimateRange - An estimate of item collection size, in gigabytes. This + // value is a two-element array containing a lower bound and an upper bound + // for the estimate. The estimate includes the size of all the items in the + // table, plus the size of all attributes projected into all of the local secondary + // indexes on that table. Use this estimate to measure whether a local secondary + // index is approaching its size limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. + ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"` +} + +// String returns the string representation +func (s PutItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutItemOutput) GoString() string { + return s.String() +} + +// Represents a request to perform a PutItem operation on an item. +type PutRequest struct { + _ struct{} `type:"structure"` + + // A map of attribute name to attribute values, representing the primary key + // of an item to be processed by PutItem. All of the table's primary key attributes + // must be specified, and their data types must match those of the table's key + // schema. If any attributes are present in the item which are part of an index + // key schema for the table, their types must match the index key schema. + Item map[string]*AttributeValue `type:"map" required:"true"` +} + +// String returns the string representation +func (s PutRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRequest) GoString() string { + return s.String() +} + +// Represents the input of a Query operation. +type QueryInput struct { + _ struct{} `type:"structure"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use ProjectionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // This parameter allows you to retrieve attributes of type List or Map; however, + // it cannot retrieve individual elements within a List or a Map. + // + // The names of one or more attributes to retrieve. If no attribute names are + // provided, then all attributes will be returned. If any of the requested attributes + // are not found, they will not appear in the result. + // + // Note that AttributesToGet has no effect on provisioned throughput consumption. + // DynamoDB determines capacity units consumed based on item size, not on the + // amount of data that is returned to an application. + // + // You cannot use both AttributesToGet and Select together in a Query request, + // unless the value for Select is SPECIFIC_ATTRIBUTES. (This usage is equivalent + // to specifying AttributesToGet without any value for Select.) + // + // If you query a local secondary index and request only attributes that are + // projected into that index, the operation will read only the index and not + // the table. If any of the requested attributes are not projected into the + // local secondary index, DynamoDB will fetch each of these attributes from + // the parent table. This extra fetching incurs additional throughput cost and + // latency. + // + // If you query a global secondary index, you can only request attributes that + // are projected into the index. Global secondary index queries cannot fetch + // attributes from the parent table. + AttributesToGet []*string `min:"1" type:"list"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use FilterExpression instead. Do not combine legacy parameters and + // expression parameters in a single API call; otherwise, DynamoDB will return + // a ValidationException exception. + // + // A logical operator to apply to the conditions in a QueryFilter map: + // + // AND - If all of the conditions evaluate to true, then the entire map evaluates + // to true. + // + // OR - If at least one of the conditions evaluate to true, then the entire + // map evaluates to true. + // + // If you omit ConditionalOperator, then AND is the default. + // + // The operation will succeed only if the entire map evaluates to true. + // + // This parameter does not support attributes of type List or Map. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // Determines the read consistency model: If set to true, then the operation + // uses strongly consistent reads; otherwise, the operation uses eventually + // consistent reads. + // + // Strongly consistent reads are not supported on global secondary indexes. + // If you query a global secondary index with ConsistentRead set to true, you + // will receive a ValidationException. + ConsistentRead *bool `type:"boolean"` + + // The primary key of the first item that this operation will evaluate. Use + // the value that was returned for LastEvaluatedKey in the previous operation. + // + // The data type for ExclusiveStartKey must be String, Number or Binary. No + // set data types are allowed. + ExclusiveStartKey map[string]*AttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. + // For example, consider the following attribute name: + // + // Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot + // be used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // #P = :val + // + // Tokens that begin with the : character are expression attribute values, + // which are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Accessing Item Attributes + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Specifying Conditions + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // A string that contains conditions that DynamoDB applies after the Query operation, + // but before the data is returned to you. Items that do not satisfy the FilterExpression + // criteria are not returned. + // + // A FilterExpression is applied after the items have already been read; the + // process of filtering does not consume any additional read capacity units. + // + // For more information, see Filter Expressions (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#FilteringResults) + // in the Amazon DynamoDB Developer Guide. + // + // FilterExpression replaces the legacy QueryFilter and ConditionalOperator + // parameters. + FilterExpression *string `type:"string"` + + // The name of an index to query. This index can be any local secondary index + // or global secondary index on the table. Note that if you use the IndexName + // parameter, you must also provide TableName. + IndexName *string `min:"3" type:"string"` + + // The condition that specifies the key value(s) for items to be retrieved by + // the Query action. + // + // The condition must perform an equality test on a single hash key value. + // The condition can also perform one of several comparison tests on a single + // range key value. Query can use KeyConditionExpression to retrieve one item + // with a given hash and range key value, or several items that have the same + // hash key value but different range key values. + // + // The hash key equality test is required, and must be specified in the following + // format: + // + // hashAttributeName = :hashval + // + // If you also want to provide a range key condition, it must be combined using + // AND with the hash key condition. Following is an example, using the = comparison + // operator for the range key: + // + // hashAttributeName = :hashval AND rangeAttributeName = :rangeval + // + // Valid comparisons for the range key condition are as follows: + // + // rangeAttributeName = :rangeval - true if the range key is equal to :rangeval. + // + // rangeAttributeName < :rangeval - true if the range key is less than :rangeval. + // + // rangeAttributeName <= :rangeval - true if the range key is less than or + // equal to :rangeval. + // + // rangeAttributeName > :rangeval - true if the range key is greater than + // :rangeval. + // + // rangeAttributeName >= :rangeval - true if the range key is greater than + // or equal to :rangeval. + // + // rangeAttributeName BETWEEN :rangeval1 AND :rangeval2 - true if the range + // key is greater than or equal to :rangeval1, and less than or equal to :rangeval2. + // + // begins_with (rangeAttributeName, :rangeval) - true if the range key begins + // with a particular operand. (You cannot use this function with a range key + // that is of type Number.) Note that the function name begins_with is case-sensitive. + // + // Use the ExpressionAttributeValues parameter to replace tokens such as + // :hashval and :rangeval with actual values at runtime. + // + // You can optionally use the ExpressionAttributeNames parameter to replace + // the names of the hash and range attributes with placeholder tokens. This + // option might be necessary if an attribute name conflicts with a DynamoDB + // reserved word. For example, the following KeyConditionExpression parameter + // causes an error because Size is a reserved word: + // + // Size = :myval To work around this, define a placeholder (such a #S) + // to represent the attribute name Size. KeyConditionExpression then is as follows: + // + // #S = :myval For a list of reserved words, see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide. + // + // For more information on ExpressionAttributeNames and ExpressionAttributeValues, + // see Using Placeholders for Attribute Names and Values (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ExpressionPlaceholders.html) + // in the Amazon DynamoDB Developer Guide. + // + // KeyConditionExpression replaces the legacy KeyConditions parameter. + KeyConditionExpression *string `type:"string"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use KeyConditionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // The selection criteria for the query. For a query on a table, you can have + // conditions only on the table primary key attributes. You must provide the + // hash key attribute name and value as an EQ condition. You can optionally + // provide a second condition, referring to the range key attribute. + // + // If you don't provide a range key condition, all of the items that match + // the hash key will be retrieved. If a FilterExpression or QueryFilter is present, + // it will be applied after the items are retrieved. + // + // For a query on an index, you can have conditions only on the index key attributes. + // You must provide the index hash attribute name and value as an EQ condition. + // You can optionally provide a second condition, referring to the index key + // range attribute. + // + // Each KeyConditions element consists of an attribute name to compare, along + // with the following: + // + // AttributeValueList - One or more values to evaluate against the supplied + // attribute. The number of values in the list depends on the ComparisonOperator + // being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + // + // For Binary, DynamoDB treats each byte of the binary data as unsigned when + // it compares binary values. + // + // ComparisonOperator - A comparator for evaluating attributes, for example, + // equals, greater than, less than, and so on. + // + // For KeyConditions, only the following comparison operators are supported: + // + // EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN + // + // The following are descriptions of these comparison operators. + // + // EQ : Equal. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // or Binary (not a set type). If an item contains an AttributeValue element + // of a different type than the one specified in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not equal {"NS":["6", "2", "1"]}. + // + // LE : Less than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // or Binary (not a set type). If an item contains an AttributeValue element + // of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GE : Greater than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GT : Greater than. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // BEGINS_WITH : Checks for a prefix. + // + // AttributeValueList can contain only one AttributeValue of type String or + // Binary (not a Number or a set type). The target attribute of the comparison + // must be of type String or Binary (not a Number or a set type). + // + // BETWEEN : Greater than or equal to the first value, and less than or + // equal to the second value. + // + // AttributeValueList must contain two AttributeValue elements of the same + // type, either String, Number, or Binary (not a set type). A target attribute + // matches if the target value is greater than, or equal to, the first element + // and less than, or equal to, the second element. If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not compare to {"N":"6"}. Also, + // {"N":"6"} does not compare to {"NS":["6", "2", "1"]} + // + // For usage examples of AttributeValueList and ComparisonOperator, see + // Legacy Conditional Parameters (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html) + // in the Amazon DynamoDB Developer Guide. + KeyConditions map[string]*Condition `type:"map"` + + // The maximum number of items to evaluate (not necessarily the number of matching + // items). If DynamoDB processes the number of items up to the limit while processing + // the results, it stops the operation and returns the matching values up to + // that point, and a key in LastEvaluatedKey to apply in a subsequent operation, + // so that you can pick up where you left off. Also, if the processed data set + // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation + // and returns the matching values up to the limit, and a key in LastEvaluatedKey + // to apply in a subsequent operation to continue the operation. For more information, + // see Query and Scan in the Amazon DynamoDB Developer Guide. + Limit *int64 `min:"1" type:"integer"` + + // A string that identifies one or more attributes to retrieve from the table. + // These attributes can include scalars, sets, or elements of a JSON document. + // The attributes in the expression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. + // If any of the requested attributes are not found, they will not appear in + // the result. + // + // For more information, see Accessing Item Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + // + // ProjectionExpression replaces the legacy AttributesToGet parameter. + ProjectionExpression *string `type:"string"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use FilterExpression instead. Do not combine legacy parameters and + // expression parameters in a single API call; otherwise, DynamoDB will return + // a ValidationException exception. + // + // A condition that evaluates the query results after the items are read and + // returns only the desired values. + // + // This parameter does not support attributes of type List or Map. + // + // A QueryFilter is applied after the items have already been read; the process + // of filtering does not consume any additional read capacity units. + // + // If you provide more than one condition in the QueryFilter map, then by default + // all of the conditions must evaluate to true. In other words, the conditions + // are ANDed together. (You can use the ConditionalOperator parameter to OR + // the conditions instead. If you do this, then at least one of the conditions + // must evaluate to true, rather than all of them.) + // + // Note that QueryFilter does not allow key attributes. You cannot define a + // filter condition on a hash key or range key. + // + // Each QueryFilter element consists of an attribute name to compare, along + // with the following: + // + // AttributeValueList - One or more values to evaluate against the supplied + // attribute. The number of values in the list depends on the operator specified + // in ComparisonOperator. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + // + // For type Binary, DynamoDB treats each byte of the binary data as unsigned + // when it compares binary values. + // + // For information on specifying data types in JSON, see JSON Data Format (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataFormat.html) + // in the Amazon DynamoDB Developer Guide. + // + // ComparisonOperator - A comparator for evaluating attributes. For example, + // equals, greater than, less than, etc. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS + // | BEGINS_WITH | IN | BETWEEN + // + // For complete descriptions of all comparison operators, see the Condition + // (http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Condition.html) + // data type. + QueryFilter map[string]*Condition `type:"map"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem, do not access + // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity + // information for table(s). + // + // TOTAL - The response includes only the aggregate ConsumedCapacity for the + // operation. + // + // NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Specifies the order in which to return the query results - either ascending + // (true) or descending (false). + // + // Items with the same hash key are stored in sorted order by range key .If + // the range key data type is Number, the results are stored in numeric order. + // For type String, the results are returned in order of ASCII character code + // values. For type Binary, DynamoDB treats each byte of the binary data as + // unsigned. + // + // If ScanIndexForward is true, DynamoDB returns the results in order, by range + // key. This is the default behavior. + // + // If ScanIndexForward is false, DynamoDB sorts the results in descending order + // by range key, and then returns the results to the client. + ScanIndexForward *bool `type:"boolean"` + + // The attributes to be returned in the result. You can retrieve all item attributes, + // specific item attributes, the count of matching items, or in the case of + // an index, some or all of the attributes projected into the index. + // + // ALL_ATTRIBUTES - Returns all of the item attributes from the specified + // table or index. If you query a local secondary index, then for each matching + // item in the index DynamoDB will fetch the entire item from the parent table. + // If the index is configured to project all item attributes, then all of the + // data can be obtained from the local secondary index, and no fetching is required. + // + // ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves + // all attributes that have been projected into the index. If the index is configured + // to project all attributes, this return value is equivalent to specifying + // ALL_ATTRIBUTES. + // + // COUNT - Returns the number of matching items, rather than the matching + // items themselves. + // + // SPECIFIC_ATTRIBUTES - Returns only the attributes listed in AttributesToGet. + // This return value is equivalent to specifying AttributesToGet without specifying + // any value for Select. + // + // If you query a local secondary index and request only attributes that are + // projected into that index, the operation will read only the index and not + // the table. If any of the requested attributes are not projected into the + // local secondary index, DynamoDB will fetch each of these attributes from + // the parent table. This extra fetching incurs additional throughput cost and + // latency. + // + // If you query a global secondary index, you can only request attributes that + // are projected into the index. Global secondary index queries cannot fetch + // attributes from the parent table. + // + // If neither Select nor AttributesToGet are specified, DynamoDB defaults + // to ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when + // accessing an index. You cannot use both Select and AttributesToGet together + // in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES. + // (This usage is equivalent to specifying AttributesToGet without any value + // for Select.) + // + // If you use the ProjectionExpression parameter, then the value for Select + // can only be SPECIFIC_ATTRIBUTES. Any other value for Select will return an + // error. + Select *string `type:"string" enum:"Select"` + + // The name of the table containing the requested items. + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s QueryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueryInput) GoString() string { + return s.String() +} + +// Represents the output of a Query operation. +type QueryOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by an operation. The data returned includes the + // total provisioned throughput consumed, along with statistics for the table + // and any indexes involved in the operation. ConsumedCapacity is only returned + // if the request asked for it. For more information, see Provisioned Throughput + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // The number of items in the response. + // + // If you used a QueryFilter in the request, then Count is the number of items + // returned after the filter was applied, and ScannedCount is the number of + // matching items before> the filter was applied. + // + // If you did not use a filter in the request, then Count and ScannedCount + // are the same. + Count *int64 `type:"integer"` + + // An array of item attributes that match the query criteria. Each element in + // this array consists of an attribute name and the value for that attribute. + Items []map[string]*AttributeValue `type:"list"` + + // The primary key of the item where the operation stopped, inclusive of the + // previous result set. Use this value to start a new operation, excluding this + // value in the new request. + // + // If LastEvaluatedKey is empty, then the "last page" of results has been processed + // and there is no more data to be retrieved. + // + // If LastEvaluatedKey is not empty, it does not necessarily mean that there + // is more data in the result set. The only way to know when you have reached + // the end of the result set is when LastEvaluatedKey is empty. + LastEvaluatedKey map[string]*AttributeValue `type:"map"` + + // The number of items evaluated, before any QueryFilter is applied. A high + // ScannedCount value with few, or no, Count results indicates an inefficient + // Query operation. For more information, see Count and ScannedCount (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Count) + // in the Amazon DynamoDB Developer Guide. + // + // If you did not use a filter in the request, then ScannedCount is the same + // as Count. + ScannedCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s QueryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueryOutput) GoString() string { + return s.String() +} + +// Represents the input of a Scan operation. +type ScanInput struct { + _ struct{} `type:"structure"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use ProjectionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // This parameter allows you to retrieve attributes of type List or Map; however, + // it cannot retrieve individual elements within a List or a Map. + // + // The names of one or more attributes to retrieve. If no attribute names are + // provided, then all attributes will be returned. If any of the requested attributes + // are not found, they will not appear in the result. + // + // Note that AttributesToGet has no effect on provisioned throughput consumption. + // DynamoDB determines capacity units consumed based on item size, not on the + // amount of data that is returned to an application. + AttributesToGet []*string `min:"1" type:"list"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use FilterExpression instead. Do not combine legacy parameters and + // expression parameters in a single API call; otherwise, DynamoDB will return + // a ValidationException exception. + // + // A logical operator to apply to the conditions in a ScanFilter map: + // + // AND - If all of the conditions evaluate to true, then the entire map evaluates + // to true. + // + // OR - If at least one of the conditions evaluate to true, then the entire + // map evaluates to true. + // + // If you omit ConditionalOperator, then AND is the default. + // + // The operation will succeed only if the entire map evaluates to true. + // + // This parameter does not support attributes of type List or Map. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // A Boolean value that determines the read consistency model during the scan: + // + // If ConsistentRead is false, then Scan will use eventually consistent reads. + // The data returned from Scan might not contain the results of other recently + // completed write operations (PutItem, UpdateItem or DeleteItem). The Scan + // response might include some stale data. + // + // If ConsistentRead is true, then Scan will use strongly consistent reads. + // All of the write operations that completed before the Scan began are guaranteed + // to be contained in the Scan response. + // + // The default setting for ConsistentRead is false, meaning that eventually + // consistent reads will be used. + // + // Strongly consistent reads are not supported on global secondary indexes. + // If you scan a global secondary index with ConsistentRead set to true, you + // will receive a ValidationException. + ConsistentRead *bool `type:"boolean"` + + // The primary key of the first item that this operation will evaluate. Use + // the value that was returned for LastEvaluatedKey in the previous operation. + // + // The data type for ExclusiveStartKey must be String, Number or Binary. No + // set data types are allowed. + // + // In a parallel scan, a Scan request that includes ExclusiveStartKey must + // specify the same segment whose previous Scan returned the corresponding value + // of LastEvaluatedKey. + ExclusiveStartKey map[string]*AttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. + // For example, consider the following attribute name: + // + // Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot + // be used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // #P = :val + // + // Tokens that begin with the : character are expression attribute values, + // which are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Accessing Item Attributes + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Specifying Conditions + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // A string that contains conditions that DynamoDB applies after the Scan operation, + // but before the data is returned to you. Items that do not satisfy the FilterExpression + // criteria are not returned. + // + // A FilterExpression is applied after the items have already been read; the + // process of filtering does not consume any additional read capacity units. + // + // For more information, see Filter Expressions (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#FilteringResults) + // in the Amazon DynamoDB Developer Guide. + // + // FilterExpression replaces the legacy ScanFilter and ConditionalOperator + // parameters. + FilterExpression *string `type:"string"` + + // The name of a secondary index to scan. This index can be any local secondary + // index or global secondary index. Note that if you use the IndexName parameter, + // you must also provide TableName. + IndexName *string `min:"3" type:"string"` + + // The maximum number of items to evaluate (not necessarily the number of matching + // items). If DynamoDB processes the number of items up to the limit while processing + // the results, it stops the operation and returns the matching values up to + // that point, and a key in LastEvaluatedKey to apply in a subsequent operation, + // so that you can pick up where you left off. Also, if the processed data set + // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation + // and returns the matching values up to the limit, and a key in LastEvaluatedKey + // to apply in a subsequent operation to continue the operation. For more information, + // see Query and Scan in the Amazon DynamoDB Developer Guide. + Limit *int64 `min:"1" type:"integer"` + + // A string that identifies one or more attributes to retrieve from the specified + // table or index. These attributes can include scalars, sets, or elements of + // a JSON document. The attributes in the expression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. + // If any of the requested attributes are not found, they will not appear in + // the result. + // + // For more information, see Accessing Item Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + // + // ProjectionExpression replaces the legacy AttributesToGet parameter. + ProjectionExpression *string `type:"string"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem, do not access + // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity + // information for table(s). + // + // TOTAL - The response includes only the aggregate ConsumedCapacity for the + // operation. + // + // NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use FilterExpression instead. Do not combine legacy parameters and + // expression parameters in a single API call; otherwise, DynamoDB will return + // a ValidationException exception. + // + // A condition that evaluates the scan results and returns only the desired + // values. + // + // This parameter does not support attributes of type List or Map. + // + // If you specify more than one condition in the ScanFilter map, then by default + // all of the conditions must evaluate to true. In other words, the conditions + // are ANDed together. (You can use the ConditionalOperator parameter to OR + // the conditions instead. If you do this, then at least one of the conditions + // must evaluate to true, rather than all of them.) + // + // Each ScanFilter element consists of an attribute name to compare, along + // with the following: + // + // AttributeValueList - One or more values to evaluate against the supplied + // attribute. The number of values in the list depends on the operator specified + // in ComparisonOperator . + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + // + // For Binary, DynamoDB treats each byte of the binary data as unsigned when + // it compares binary values. + // + // For information on specifying data types in JSON, see JSON Data Format (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataFormat.html) + // in the Amazon DynamoDB Developer Guide. + // + // ComparisonOperator - A comparator for evaluating attributes. For example, + // equals, greater than, less than, etc. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS + // | BEGINS_WITH | IN | BETWEEN + // + // For complete descriptions of all comparison operators, see Condition (http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Condition.html). + ScanFilter map[string]*Condition `type:"map"` + + // For a parallel Scan request, Segment identifies an individual segment to + // be scanned by an application worker. + // + // Segment IDs are zero-based, so the first segment is always 0. For example, + // if you want to use four application threads to scan a table or an index, + // then the first thread specifies a Segment value of 0, the second thread specifies + // 1, and so on. + // + // The value of LastEvaluatedKey returned from a parallel Scan request must + // be used as ExclusiveStartKey with the same segment ID in a subsequent Scan + // operation. + // + // The value for Segment must be greater than or equal to 0, and less than + // the value provided for TotalSegments. + // + // If you provide Segment, you must also provide TotalSegments. + Segment *int64 `type:"integer"` + + // The attributes to be returned in the result. You can retrieve all item attributes, + // specific item attributes, or the count of matching items. + // + // ALL_ATTRIBUTES - Returns all of the item attributes. + // + // COUNT - Returns the number of matching items, rather than the matching + // items themselves. + // + // SPECIFIC_ATTRIBUTES - Returns only the attributes listed in AttributesToGet. + // This return value is equivalent to specifying AttributesToGet without specifying + // any value for Select. + // + // If neither Select nor AttributesToGet are specified, DynamoDB defaults + // to ALL_ATTRIBUTES. You cannot use both AttributesToGet and Select together + // in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES. + // (This usage is equivalent to specifying AttributesToGet without any value + // for Select.) + Select *string `type:"string" enum:"Select"` + + // The name of the table containing the requested items; or, if you provide + // IndexName, the name of the table to which that index belongs. + TableName *string `min:"3" type:"string" required:"true"` + + // For a parallel Scan request, TotalSegments represents the total number of + // segments into which the Scan operation will be divided. The value of TotalSegments + // corresponds to the number of application workers that will perform the parallel + // scan. For example, if you want to use four application threads to scan a + // table or an index, specify a TotalSegments value of 4. + // + // The value for TotalSegments must be greater than or equal to 1, and less + // than or equal to 1000000. If you specify a TotalSegments value of 1, the + // Scan operation will be sequential rather than parallel. + // + // If you specify TotalSegments, you must also specify Segment. + TotalSegments *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ScanInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScanInput) GoString() string { + return s.String() +} + +// Represents the output of a Scan operation. +type ScanOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by an operation. The data returned includes the + // total provisioned throughput consumed, along with statistics for the table + // and any indexes involved in the operation. ConsumedCapacity is only returned + // if the request asked for it. For more information, see Provisioned Throughput + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // The number of items in the response. + // + // If you set ScanFilter in the request, then Count is the number of items + // returned after the filter was applied, and ScannedCount is the number of + // matching items before the filter was applied. + // + // If you did not use a filter in the request, then Count is the same as ScannedCount. + Count *int64 `type:"integer"` + + // An array of item attributes that match the scan criteria. Each element in + // this array consists of an attribute name and the value for that attribute. + Items []map[string]*AttributeValue `type:"list"` + + // The primary key of the item where the operation stopped, inclusive of the + // previous result set. Use this value to start a new operation, excluding this + // value in the new request. + // + // If LastEvaluatedKey is empty, then the "last page" of results has been processed + // and there is no more data to be retrieved. + // + // If LastEvaluatedKey is not empty, it does not necessarily mean that there + // is more data in the result set. The only way to know when you have reached + // the end of the result set is when LastEvaluatedKey is empty. + LastEvaluatedKey map[string]*AttributeValue `type:"map"` + + // The number of items evaluated, before any ScanFilter is applied. A high ScannedCount + // value with few, or no, Count results indicates an inefficient Scan operation. + // For more information, see Count and ScannedCount (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Count) + // in the Amazon DynamoDB Developer Guide. + // + // If you did not use a filter in the request, then ScannedCount is the same + // as Count. + ScannedCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s ScanOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScanOutput) GoString() string { + return s.String() +} + +// Represents the DynamoDB Streams configuration for a table in DynamoDB. +type StreamSpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether DynamoDB Streams is enabled (true) or disabled (false) + // on the table. + StreamEnabled *bool `type:"boolean"` + + // The DynamoDB Streams settings for the table. These settings consist of: + // + // StreamEnabled - Indicates whether DynamoDB Streams is enabled (true) or + // disabled (false) on the table. + // + // StreamViewType - When an item in the table is modified, StreamViewType + // determines what information is written to the stream for this table. Valid + // values for StreamViewType are: + // + // KEYS_ONLY - Only the key attributes of the modified item are written to + // the stream. + // + // NEW_IMAGE - The entire item, as it appears after it was modified, is written + // to the stream. + // + // OLD_IMAGE - The entire item, as it appeared before it was modified, is written + // to the stream. + // + // NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are + // written to the stream. + StreamViewType *string `type:"string" enum:"StreamViewType"` +} + +// String returns the string representation +func (s StreamSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StreamSpecification) GoString() string { + return s.String() +} + +// Represents the properties of a table. +type TableDescription struct { + _ struct{} `type:"structure"` + + // An array of AttributeDefinition objects. Each of these objects describes + // one attribute in the table and index key schema. + // + // Each AttributeDefinition object in this array is composed of: + // + // AttributeName - The name of the attribute. + // + // AttributeType - The data type for the attribute. + AttributeDefinitions []*AttributeDefinition `type:"list"` + + // The date and time when the table was created, in UNIX epoch time (http://www.epochconverter.com/) + // format. + CreationDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The global secondary indexes, if any, on the table. Each index is scoped + // to a given hash key value. Each element is composed of: + // + // Backfilling - If true, then the index is currently in the backfilling + // phase. Backfilling occurs only when a new global secondary index is added + // to the table; it is the process by which DynamoDB populates the new index + // with data from the table. (This attribute does not appear for indexes that + // were created during a CreateTable operation.) + // + // IndexName - The name of the global secondary index. + // + // IndexSizeBytes - The total size of the global secondary index, in bytes. + // DynamoDB updates this value approximately every six hours. Recent changes + // might not be reflected in this value. + // + // IndexStatus - The current status of the global secondary index: + // + // CREATING - The index is being created. + // + // UPDATING - The index is being updated. + // + // DELETING - The index is being deleted. + // + // ACTIVE - The index is ready for use. + // + // ItemCount - The number of items in the global secondary index. DynamoDB + // updates this value approximately every six hours. Recent changes might not + // be reflected in this value. + // + // KeySchema - Specifies the complete index key schema. The attribute names + // in the key schema must be between 1 and 255 characters (inclusive). The key + // schema must begin with the same hash key attribute as the table. + // + // Projection - Specifies attributes that are copied (projected) from the + // table into the index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. Each attribute + // specification is composed of: + // + // ProjectionType - One of the following: + // + // KEYS_ONLY - Only the index and primary keys are projected into the index. + // + // INCLUDE - Only the specified table attributes are projected into the index. + // The list of projected attributes are in NonKeyAttributes. + // + // ALL - All of the table attributes are projected into the index. + // + // NonKeyAttributes - A list of one or more non-key attribute names that + // are projected into the secondary index. The total count of attributes provided + // in NonKeyAttributes, summed across all of the secondary indexes, must not + // exceed 20. If you project the same attribute into two different indexes, + // this counts as two distinct attributes when determining the total. + // + // ProvisionedThroughput - The provisioned throughput settings for the + // global secondary index, consisting of read and write capacity units, along + // with data about increases and decreases. + // + // If the table is in the DELETING state, no information about indexes will + // be returned. + GlobalSecondaryIndexes []*GlobalSecondaryIndexDescription `type:"list"` + + // The number of items in the specified table. DynamoDB updates this value approximately + // every six hours. Recent changes might not be reflected in this value. + ItemCount *int64 `type:"long"` + + // The primary key structure for the table. Each KeySchemaElement consists of: + // + // AttributeName - The name of the attribute. + // + // KeyType - The key type for the attribute. Can be either HASH or RANGE. + // + // For more information about primary keys, see Primary Key (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelPrimaryKey) + // in the Amazon DynamoDB Developer Guide. + KeySchema []*KeySchemaElement `min:"1" type:"list"` + + // The Amazon Resource Name (ARN) that uniquely identifies the latest stream + // for this table. + LatestStreamArn *string `min:"37" type:"string"` + + // A timestamp, in ISO 8601 format, for this stream. + // + // Note that LatestStreamLabel is not a unique identifier for the stream, because + // it is possible that a stream from another table might have the same timestamp. + // However, the combination of the following three elements is guaranteed to + // be unique: + // + // the AWS customer ID. + // + // the table name. + // + // the StreamLabel. + LatestStreamLabel *string `type:"string"` + + // Represents one or more local secondary indexes on the table. Each index is + // scoped to a given hash key value. Tables with one or more local secondary + // indexes are subject to an item collection size limit, where the amount of + // data within a given item collection cannot exceed 10 GB. Each element is + // composed of: + // + // IndexName - The name of the local secondary index. + // + // KeySchema - Specifies the complete index key schema. The attribute names + // in the key schema must be between 1 and 255 characters (inclusive). The key + // schema must begin with the same hash key attribute as the table. + // + // Projection - Specifies attributes that are copied (projected) from the + // table into the index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. Each attribute + // specification is composed of: + // + // ProjectionType - One of the following: + // + // KEYS_ONLY - Only the index and primary keys are projected into the index. + // + // INCLUDE - Only the specified table attributes are projected into the index. + // The list of projected attributes are in NonKeyAttributes. + // + // ALL - All of the table attributes are projected into the index. + // + // NonKeyAttributes - A list of one or more non-key attribute names that + // are projected into the secondary index. The total count of attributes provided + // in NonKeyAttributes, summed across all of the secondary indexes, must not + // exceed 20. If you project the same attribute into two different indexes, + // this counts as two distinct attributes when determining the total. + // + // IndexSizeBytes - Represents the total size of the index, in bytes. DynamoDB + // updates this value approximately every six hours. Recent changes might not + // be reflected in this value. + // + // ItemCount - Represents the number of items in the index. DynamoDB updates + // this value approximately every six hours. Recent changes might not be reflected + // in this value. + // + // If the table is in the DELETING state, no information about indexes will + // be returned. + LocalSecondaryIndexes []*LocalSecondaryIndexDescription `type:"list"` + + // The provisioned throughput settings for the table, consisting of read and + // write capacity units, along with data about increases and decreases. + ProvisionedThroughput *ProvisionedThroughputDescription `type:"structure"` + + // The current DynamoDB Streams configuration for the table. + StreamSpecification *StreamSpecification `type:"structure"` + + // The Amazon Resource Name (ARN) that uniquely identifies the table. + TableArn *string `type:"string"` + + // The name of the table. + TableName *string `min:"3" type:"string"` + + // The total size of the specified table, in bytes. DynamoDB updates this value + // approximately every six hours. Recent changes might not be reflected in this + // value. + TableSizeBytes *int64 `type:"long"` + + // The current state of the table: + // + // CREATING - The table is being created. + // + // UPDATING - The table is being updated. + // + // DELETING - The table is being deleted. + // + // ACTIVE - The table is ready for use. + TableStatus *string `type:"string" enum:"TableStatus"` +} + +// String returns the string representation +func (s TableDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TableDescription) GoString() string { + return s.String() +} + +// Represents the new provisioned throughput settings to be applied to a global +// secondary index. +type UpdateGlobalSecondaryIndexAction struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index to be updated. + IndexName *string `min:"3" type:"string" required:"true"` + + // Represents the provisioned throughput settings for a specified table or index. + // The settings can be modified using the UpdateTable operation. + // + // For current minimum and maximum provisioned throughput values, see Limits + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateGlobalSecondaryIndexAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGlobalSecondaryIndexAction) GoString() string { + return s.String() +} + +// Represents the input of an UpdateItem operation. +type UpdateItemInput struct { + _ struct{} `type:"structure"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use UpdateExpression instead. Do not combine legacy parameters and + // expression parameters in a single API call; otherwise, DynamoDB will return + // a ValidationException exception. + // + // This parameter can be used for modifying top-level attributes; however, + // it does not support individual list or map elements. + // + // The names of attributes to be modified, the action to perform on each, + // and the new value for each. If you are updating an attribute that is an index + // key attribute for any indexes on that table, the attribute type must match + // the index key type defined in the AttributesDefinition of the table description. + // You can use UpdateItem to update any nonkey attributes. + // + // Attribute values cannot be null. String and Binary type attributes must + // have lengths greater than zero. Set type attributes must not be empty. Requests + // with empty values will be rejected with a ValidationException exception. + // + // Each AttributeUpdates element consists of an attribute name to modify, along + // with the following: + // + // Value - The new value, if applicable, for this attribute. + // + // Action - A value that specifies how to perform the update. This action + // is only valid for an existing attribute whose data type is Number or is a + // set; do not use ADD for other data types. + // + // If an item with the specified primary key is found in the table, the following + // values perform the following actions: + // + // PUT - Adds the specified attribute to the item. If the attribute already + // exists, it is replaced by the new value. + // + // DELETE - Removes the attribute and its value, if no value is specified + // for DELETE. The data type of the specified value must match the existing + // value's data type. + // + // If a set of values is specified, then those values are subtracted from the + // old set. For example, if the attribute value was the set [a,b,c] and the + // DELETE action specifies [a,c], then the final attribute value is [b]. Specifying + // an empty set is an error. + // + // ADD - Adds the specified value to the item, if the attribute does not + // already exist. If the attribute does exist, then the behavior of ADD depends + // on the data type of the attribute: + // + // If the existing attribute is a number, and if Value is also a number, + // then Value is mathematically added to the existing attribute. If Value is + // a negative number, then it is subtracted from the existing attribute. + // + // If you use ADD to increment or decrement a number value for an item that + // doesn't exist before the update, DynamoDB uses 0 as the initial value. + // + // Similarly, if you use ADD for an existing item to increment or decrement + // an attribute value that doesn't exist before the update, DynamoDB uses 0 + // as the initial value. For example, suppose that the item you want to update + // doesn't have an attribute named itemcount, but you decide to ADD the number + // 3 to this attribute anyway. DynamoDB will create the itemcount attribute, + // set its initial value to 0, and finally add 3 to it. The result will be a + // new itemcount attribute, with a value of 3. + // + // If the existing data type is a set, and if Value is also a set, then + // Value is appended to the existing set. For example, if the attribute value + // is the set [1,2], and the ADD action specified [3], then the final attribute + // value is [1,2,3]. An error occurs if an ADD action is specified for a set + // attribute and the attribute type specified does not match the existing set + // type. + // + // Both sets must have the same primitive data type. For example, if the existing + // data type is a set of strings, Value must also be a set of strings. + // + // If no item with the specified key is found in the table, the following + // values perform the following actions: + // + // PUT - Causes DynamoDB to create a new item with the specified primary + // key, and then adds the attribute. + // + // DELETE - Nothing happens, because attributes cannot be deleted from a + // nonexistent item. The operation succeeds, but DynamoDB does not create a + // new item. + // + // ADD - Causes DynamoDB to create an item with the supplied primary key + // and number (or set of numbers) for the attribute value. The only data types + // allowed are Number and Number Set. + // + // If you provide any attributes that are part of an index key, then the + // data types for those attributes must match those of the schema in the table's + // attribute definition. + AttributeUpdates map[string]*AttributeValueUpdate `type:"map"` + + // A condition that must be satisfied in order for a conditional update to succeed. + // + // An expression can contain any of the following: + // + // Functions: attribute_exists | attribute_not_exists | attribute_type | + // contains | begins_with | size + // + // These function names are case-sensitive. + // + // Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN + // + // Logical operators: AND | OR | NOT + // + // For more information on condition expressions, see Specifying Conditions + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + // + // ConditionExpression replaces the legacy ConditionalOperator and Expected + // parameters. + ConditionExpression *string `type:"string"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use ConditionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // A logical operator to apply to the conditions in the Expected map: + // + // AND - If all of the conditions evaluate to true, then the entire map evaluates + // to true. + // + // OR - If at least one of the conditions evaluate to true, then the entire + // map evaluates to true. + // + // If you omit ConditionalOperator, then AND is the default. + // + // The operation will succeed only if the entire map evaluates to true. + // + // This parameter does not support attributes of type List or Map. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // This is a legacy parameter, for backward compatibility. New applications + // should use ConditionExpression instead. Do not combine legacy parameters + // and expression parameters in a single API call; otherwise, DynamoDB will + // return a ValidationException exception. + // + // A map of attribute/condition pairs. Expected provides a conditional block + // for the UpdateItem operation. + // + // Each element of Expected consists of an attribute name, a comparison operator, + // and one or more values. DynamoDB compares the attribute with the value(s) + // you supplied, using the comparison operator. For each Expected element, the + // result of the evaluation is either true or false. + // + // If you specify more than one element in the Expected map, then by default + // all of the conditions must evaluate to true. In other words, the conditions + // are ANDed together. (You can use the ConditionalOperator parameter to OR + // the conditions instead. If you do this, then at least one of the conditions + // must evaluate to true, rather than all of them.) + // + // If the Expected map evaluates to true, then the conditional operation succeeds; + // otherwise, it fails. + // + // Expected contains the following: + // + // AttributeValueList - One or more values to evaluate against the supplied + // attribute. The number of values in the list depends on the ComparisonOperator + // being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters. + // + // For type Binary, DynamoDB treats each byte of the binary data as unsigned + // when it compares binary values. + // + // ComparisonOperator - A comparator for evaluating attributes in the AttributeValueList. + // When performing the comparison, DynamoDB uses strongly consistent reads. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS + // | BEGINS_WITH | IN | BETWEEN + // + // The following are descriptions of each comparison operator. + // + // EQ : Equal. EQ is supported for all datatypes, including lists and maps. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, Binary, String Set, Number Set, or Binary Set. If an item contains + // an AttributeValue element of a different type than the one provided in the + // request, the value does not match. For example, {"S":"6"} does not equal + // {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // + // NE : Not equal. NE is supported for all datatypes, including lists and + // maps. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue + // of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not equal {"NS":["6", "2", "1"]}. + // + // LE : Less than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // or Binary (not a set type). If an item contains an AttributeValue element + // of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GE : Greater than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // GT : Greater than. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the existence of an attribute, not its data type. + // If the data type of attribute "a" is null, and you evaluate it using NOT_NULL, + // the result is a Boolean true. This result is because the attribute "a" exists; + // its data type is not relevant to the NOT_NULL comparison operator. + // + // NULL : The attribute does not exist. NULL is supported for all datatypes, + // including lists and maps. + // + // This operator tests for the nonexistence of an attribute, not its data type. + // If the data type of attribute "a" is null, and you evaluate it using NULL, + // the result is a Boolean false. This is because the attribute "a" exists; + // its data type is not relevant to the NULL comparison operator. + // + // CONTAINS : Checks for a subsequence, or value in a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison + // is of type String, then the operator checks for a substring match. If the + // target attribute of the comparison is of type Binary, then the operator looks + // for a subsequence of the target that matches the input. If the target attribute + // of the comparison is a set ("SS", "NS", or "BS"), then the operator evaluates + // to true if it finds an exact match with any member of the set. + // + // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can + // be a list; however, "b" cannot be a set, a map, or a list. + // + // NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value + // in a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison + // is a String, then the operator checks for the absence of a substring match. + // If the target attribute of the comparison is Binary, then the operator checks + // for the absence of a subsequence of the target that matches the input. If + // the target attribute of the comparison is a set ("SS", "NS", or "BS"), then + // the operator evaluates to true if it does not find an exact match with any + // member of the set. + // + // NOT_CONTAINS is supported for lists: When evaluating "a NOT CONTAINS b", + // "a" can be a list; however, "b" cannot be a set, a map, or a list. + // + // BEGINS_WITH : Checks for a prefix. + // + // AttributeValueList can contain only one AttributeValue of type String or + // Binary (not a Number or a set type). The target attribute of the comparison + // must be of type String or Binary (not a Number or a set type). + // + // IN : Checks for matching elements within two sets. + // + // AttributeValueList can contain one or more AttributeValue elements of type + // String, Number, or Binary (not a set type). These attributes are compared + // against an existing set type attribute of an item. If any elements of the + // input set are present in the item attribute, the expression evaluates to + // true. + // + // BETWEEN : Greater than or equal to the first value, and less than or equal + // to the second value. + // + // AttributeValueList must contain two AttributeValue elements of the same + // type, either String, Number, or Binary (not a set type). A target attribute + // matches if the target value is greater than, or equal to, the first element + // and less than, or equal to, the second element. If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not compare to {"N":"6"}. Also, + // {"N":"6"} does not compare to {"NS":["6", "2", "1"]} + // + // For usage examples of AttributeValueList and ComparisonOperator, see + // Legacy Conditional Parameters (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html) + // in the Amazon DynamoDB Developer Guide. + // + // For backward compatibility with previous DynamoDB releases, the following + // parameters can be used instead of AttributeValueList and ComparisonOperator: + // + // Value - A value for DynamoDB to compare with an attribute. + // + // Exists - A Boolean value that causes DynamoDB to evaluate the value before + // attempting the conditional operation: + // + // If Exists is true, DynamoDB will check to see if that attribute value + // already exists in the table. If it is found, then the condition evaluates + // to true; otherwise the condition evaluate to false. + // + // If Exists is false, DynamoDB assumes that the attribute value does not + // exist in the table. If in fact the value does not exist, then the assumption + // is valid and the condition evaluates to true. If the value is found, despite + // the assumption that it does not exist, the condition evaluates to false. + // + // Note that the default value for Exists is true. + // + // The Value and Exists parameters are incompatible with AttributeValueList + // and ComparisonOperator. Note that if you use both sets of parameters at once, + // DynamoDB will return a ValidationException exception. + // + // This parameter does not support attributes of type List or Map. + Expected map[string]*ExpectedAttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. + // For example, consider the following attribute name: + // + // Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot + // be used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // #P = :val + // + // Tokens that begin with the : character are expression attribute values, + // which are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Accessing Item Attributes + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Specifying Conditions + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // The primary key of the item to be updated. Each element consists of an attribute + // name and a value for that attribute. + // + // For the primary key, you must provide all of the attributes. For example, + // with a hash type primary key, you only need to provide the hash attribute. + // For a hash-and-range type primary key, you must provide both the hash attribute + // and the range attribute. + Key map[string]*AttributeValue `type:"map" required:"true"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem, do not access + // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity + // information for table(s). + // + // TOTAL - The response includes only the aggregate ConsumedCapacity for the + // operation. + // + // NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Determines whether item collection metrics are returned. If set to SIZE, + // the response includes statistics about item collections, if any, that were + // modified during the operation are returned in the response. If set to NONE + // (the default), no statistics are returned. + ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` + + // Use ReturnValues if you want to get the item attributes as they appeared + // either before or after they were updated. For UpdateItem, the valid values + // are: + // + // NONE - If ReturnValues is not specified, or if its value is NONE, then + // nothing is returned. (This setting is the default for ReturnValues.) + // + // ALL_OLD - If UpdateItem overwrote an attribute name-value pair, then the + // content of the old item is returned. + // + // UPDATED_OLD - The old versions of only the updated attributes are returned. + // + // ALL_NEW - All of the attributes of the new version of the item are returned. + // + // UPDATED_NEW - The new versions of only the updated attributes are returned. + ReturnValues *string `type:"string" enum:"ReturnValue"` + + // The name of the table containing the item to update. + TableName *string `min:"3" type:"string" required:"true"` + + // An expression that defines one or more attributes to be updated, the action + // to be performed on them, and new value(s) for them. + // + // The following action values are available for UpdateExpression. + // + // SET - Adds one or more attributes and values to an item. If any of these + // attribute already exist, they are replaced by the new values. You can also + // use SET to add or subtract from an attribute that is of type Number. For + // example: SET myNum = myNum + :val + // + // SET supports the following functions: + // + // if_not_exists (path, operand) - if the item does not contain an attribute + // at the specified path, then if_not_exists evaluates to operand; otherwise, + // it evaluates to path. You can use this function to avoid overwriting an attribute + // that may already be present in the item. + // + // list_append (operand, operand) - evaluates to a list with a new element + // added to it. You can append the new element to the start or the end of the + // list by reversing the order of the operands. + // + // These function names are case-sensitive. + // + // REMOVE - Removes one or more attributes from an item. + // + // ADD - Adds the specified value to the item, if the attribute does not + // already exist. If the attribute does exist, then the behavior of ADD depends + // on the data type of the attribute: + // + // If the existing attribute is a number, and if Value is also a number, + // then Value is mathematically added to the existing attribute. If Value is + // a negative number, then it is subtracted from the existing attribute. + // + // If you use ADD to increment or decrement a number value for an item that + // doesn't exist before the update, DynamoDB uses 0 as the initial value. + // + // Similarly, if you use ADD for an existing item to increment or decrement + // an attribute value that doesn't exist before the update, DynamoDB uses 0 + // as the initial value. For example, suppose that the item you want to update + // doesn't have an attribute named itemcount, but you decide to ADD the number + // 3 to this attribute anyway. DynamoDB will create the itemcount attribute, + // set its initial value to 0, and finally add 3 to it. The result will be a + // new itemcount attribute in the item, with a value of 3. + // + // If the existing data type is a set and if Value is also a set, then Value + // is added to the existing set. For example, if the attribute value is the + // set [1,2], and the ADD action specified [3], then the final attribute value + // is [1,2,3]. An error occurs if an ADD action is specified for a set attribute + // and the attribute type specified does not match the existing set type. + // + // Both sets must have the same primitive data type. For example, if the existing + // data type is a set of strings, the Value must also be a set of strings. + // + // The ADD action only supports Number and set data types. In addition, ADD + // can only be used on top-level attributes, not nested attributes. + // + // DELETE - Deletes an element from a set. + // + // If a set of values is specified, then those values are subtracted from the + // old set. For example, if the attribute value was the set [a,b,c] and the + // DELETE action specifies [a,c], then the final attribute value is [b]. Specifying + // an empty set is an error. + // + // The DELETE action only supports set data types. In addition, DELETE can + // only be used on top-level attributes, not nested attributes. + // + // You can have many actions in a single expression, such as the following: + // SET a=:value1, b=:value2 DELETE :value3, :value4, :value5 + // + // For more information on update expressions, see Modifying Items and Attributes + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.Modifying.html) + // in the Amazon DynamoDB Developer Guide. + // + // UpdateExpression replaces the legacy AttributeUpdates parameter. + UpdateExpression *string `type:"string"` +} + +// String returns the string representation +func (s UpdateItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateItemInput) GoString() string { + return s.String() +} + +// Represents the output of an UpdateItem operation. +type UpdateItemOutput struct { + _ struct{} `type:"structure"` + + // A map of attribute values as they appeared before the UpdateItem operation. + // This map only appears if ReturnValues was specified as something other than + // NONE in the request. Each element represents one attribute. + Attributes map[string]*AttributeValue `type:"map"` + + // The capacity units consumed by an operation. The data returned includes the + // total provisioned throughput consumed, along with statistics for the table + // and any indexes involved in the operation. ConsumedCapacity is only returned + // if the request asked for it. For more information, see Provisioned Throughput + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // Information about item collections, if any, that were affected by the operation. + // ItemCollectionMetrics is only returned if the request asked for it. If the + // table does not have any local secondary indexes, this information is not + // returned in the response. + ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"` +} + +// String returns the string representation +func (s UpdateItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateItemOutput) GoString() string { + return s.String() +} + +// Represents the input of an UpdateTable operation. +type UpdateTableInput struct { + _ struct{} `type:"structure"` + + // An array of attributes that describe the key schema for the table and indexes. + // If you are adding a new global secondary index to the table, AttributeDefinitions + // must include the key element(s) of the new index. + AttributeDefinitions []*AttributeDefinition `type:"list"` + + // An array of one or more global secondary indexes for the table. For each + // index in the array, you can request one action: + // + // Create - add a new global secondary index to the table. + // + // Update - modify the provisioned throughput settings of an existing global + // secondary index. + // + // Delete - remove a global secondary index from the table. + // + // For more information, see Managing Global Secondary Indexes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GSI.OnlineOps.html) + // in the Amazon DynamoDB Developer Guide. + GlobalSecondaryIndexUpdates []*GlobalSecondaryIndexUpdate `type:"list"` + + // Represents the provisioned throughput settings for a specified table or index. + // The settings can be modified using the UpdateTable operation. + // + // For current minimum and maximum provisioned throughput values, see Limits + // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughput `type:"structure"` + + // Represents the DynamoDB Streams configuration for the table. + // + // You will receive a ResourceInUseException if you attempt to enable a stream + // on a table that already has a stream, or if you attempt to disable a stream + // on a table which does not have a stream. + StreamSpecification *StreamSpecification `type:"structure"` + + // The name of the table to be updated. + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTableInput) GoString() string { + return s.String() +} + +// Represents the output of an UpdateTable operation. +type UpdateTableOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of a table. + TableDescription *TableDescription `type:"structure"` +} + +// String returns the string representation +func (s UpdateTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTableOutput) GoString() string { + return s.String() +} + +// Represents an operation to perform - either DeleteItem or PutItem. You can +// only request one of these operations, not both, in a single WriteRequest. +// If you do need to perform both of these operations, you will need to provide +// two separate WriteRequest objects. +type WriteRequest struct { + _ struct{} `type:"structure"` + + // A request to perform a DeleteItem operation. + DeleteRequest *DeleteRequest `type:"structure"` + + // A request to perform a PutItem operation. + PutRequest *PutRequest `type:"structure"` +} + +// String returns the string representation +func (s WriteRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WriteRequest) GoString() string { + return s.String() +} + +const ( + // @enum AttributeAction + AttributeActionAdd = "ADD" + // @enum AttributeAction + AttributeActionPut = "PUT" + // @enum AttributeAction + AttributeActionDelete = "DELETE" +) + +const ( + // @enum ComparisonOperator + ComparisonOperatorEq = "EQ" + // @enum ComparisonOperator + ComparisonOperatorNe = "NE" + // @enum ComparisonOperator + ComparisonOperatorIn = "IN" + // @enum ComparisonOperator + ComparisonOperatorLe = "LE" + // @enum ComparisonOperator + ComparisonOperatorLt = "LT" + // @enum ComparisonOperator + ComparisonOperatorGe = "GE" + // @enum ComparisonOperator + ComparisonOperatorGt = "GT" + // @enum ComparisonOperator + ComparisonOperatorBetween = "BETWEEN" + // @enum ComparisonOperator + ComparisonOperatorNotNull = "NOT_NULL" + // @enum ComparisonOperator + ComparisonOperatorNull = "NULL" + // @enum ComparisonOperator + ComparisonOperatorContains = "CONTAINS" + // @enum ComparisonOperator + ComparisonOperatorNotContains = "NOT_CONTAINS" + // @enum ComparisonOperator + ComparisonOperatorBeginsWith = "BEGINS_WITH" +) + +const ( + // @enum ConditionalOperator + ConditionalOperatorAnd = "AND" + // @enum ConditionalOperator + ConditionalOperatorOr = "OR" +) + +const ( + // @enum IndexStatus + IndexStatusCreating = "CREATING" + // @enum IndexStatus + IndexStatusUpdating = "UPDATING" + // @enum IndexStatus + IndexStatusDeleting = "DELETING" + // @enum IndexStatus + IndexStatusActive = "ACTIVE" +) + +const ( + // @enum KeyType + KeyTypeHash = "HASH" + // @enum KeyType + KeyTypeRange = "RANGE" +) + +const ( + // @enum ProjectionType + ProjectionTypeAll = "ALL" + // @enum ProjectionType + ProjectionTypeKeysOnly = "KEYS_ONLY" + // @enum ProjectionType + ProjectionTypeInclude = "INCLUDE" +) + +// Determines the level of detail about provisioned throughput consumption that +// is returned in the response: +// +// INDEXES - The response includes the aggregate ConsumedCapacity for the +// operation, together with ConsumedCapacity for each table and secondary index +// that was accessed. +// +// Note that some operations, such as GetItem and BatchGetItem, do not access +// any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity +// information for table(s). +// +// TOTAL - The response includes only the aggregate ConsumedCapacity for the +// operation. +// +// NONE - No ConsumedCapacity details are included in the response. +const ( + // @enum ReturnConsumedCapacity + ReturnConsumedCapacityIndexes = "INDEXES" + // @enum ReturnConsumedCapacity + ReturnConsumedCapacityTotal = "TOTAL" + // @enum ReturnConsumedCapacity + ReturnConsumedCapacityNone = "NONE" +) + +const ( + // @enum ReturnItemCollectionMetrics + ReturnItemCollectionMetricsSize = "SIZE" + // @enum ReturnItemCollectionMetrics + ReturnItemCollectionMetricsNone = "NONE" +) + +const ( + // @enum ReturnValue + ReturnValueNone = "NONE" + // @enum ReturnValue + ReturnValueAllOld = "ALL_OLD" + // @enum ReturnValue + ReturnValueUpdatedOld = "UPDATED_OLD" + // @enum ReturnValue + ReturnValueAllNew = "ALL_NEW" + // @enum ReturnValue + ReturnValueUpdatedNew = "UPDATED_NEW" +) + +const ( + // @enum ScalarAttributeType + ScalarAttributeTypeS = "S" + // @enum ScalarAttributeType + ScalarAttributeTypeN = "N" + // @enum ScalarAttributeType + ScalarAttributeTypeB = "B" +) + +const ( + // @enum Select + SelectAllAttributes = "ALL_ATTRIBUTES" + // @enum Select + SelectAllProjectedAttributes = "ALL_PROJECTED_ATTRIBUTES" + // @enum Select + SelectSpecificAttributes = "SPECIFIC_ATTRIBUTES" + // @enum Select + SelectCount = "COUNT" +) + +const ( + // @enum StreamViewType + StreamViewTypeNewImage = "NEW_IMAGE" + // @enum StreamViewType + StreamViewTypeOldImage = "OLD_IMAGE" + // @enum StreamViewType + StreamViewTypeNewAndOldImages = "NEW_AND_OLD_IMAGES" + // @enum StreamViewType + StreamViewTypeKeysOnly = "KEYS_ONLY" +) + +const ( + // @enum TableStatus + TableStatusCreating = "CREATING" + // @enum TableStatus + TableStatusUpdating = "UPDATING" + // @enum TableStatus + TableStatusDeleting = "DELETING" + // @enum TableStatus + TableStatusActive = "ACTIVE" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,98 @@ +package dynamodb + +import ( + "bytes" + "hash/crc32" + "io" + "io/ioutil" + "math" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" +) + +type retryer struct { + client.DefaultRetryer +} + +func (d retryer) RetryRules(r *request.Request) time.Duration { + delay := time.Duration(math.Pow(2, float64(r.RetryCount))) * 50 + return delay * time.Millisecond +} + +func init() { + initClient = func(c *client.Client) { + r := retryer{} + if c.Config.MaxRetries == nil || aws.IntValue(c.Config.MaxRetries) == aws.UseServiceDefaultRetries { + r.NumMaxRetries = 10 + } else { + r.NumMaxRetries = *c.Config.MaxRetries + } + c.Retryer = r + + c.Handlers.Build.PushBack(disableCompression) + c.Handlers.Unmarshal.PushFront(validateCRC32) + } +} + +func drainBody(b io.ReadCloser, length int64) (out *bytes.Buffer, err error) { + if length < 0 { + length = 0 + } + buf := bytes.NewBuffer(make([]byte, 0, length)) + + if _, err = buf.ReadFrom(b); err != nil { + return nil, err + } + if err = b.Close(); err != nil { + return nil, err + } + return buf, nil +} + +func disableCompression(r *request.Request) { + r.HTTPRequest.Header.Set("Accept-Encoding", "identity") +} + +func validateCRC32(r *request.Request) { + if r.Error != nil { + return // already have an error, no need to verify CRC + } + + // Checksum validation is off, skip + if aws.BoolValue(r.Config.DisableComputeChecksums) { + return + } + + // Try to get CRC from response + header := r.HTTPResponse.Header.Get("X-Amz-Crc32") + if header == "" { + return // No header, skip + } + + expected, err := strconv.ParseUint(header, 10, 32) + if err != nil { + return // Could not determine CRC value, skip + } + + buf, err := drainBody(r.HTTPResponse.Body, r.HTTPResponse.ContentLength) + if err != nil { // failed to read the response body, skip + return + } + + // Reset body for subsequent reads + r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewReader(buf.Bytes())) + + // Compute the CRC checksum + crc := crc32.ChecksumIEEE(buf.Bytes()) + + if crc != uint32(expected) { + // CRC does not match, set a retryable error + r.Retryable = aws.Bool(true) + r.Error = awserr.New("CRC32CheckFailed", "CRC32 integrity check failed", nil) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/customizations_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/customizations_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/customizations_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/customizations_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,106 @@ +package dynamodb_test + +import ( + "bytes" + "io/ioutil" + "net/http" + "os" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/dynamodb" +) + +var db *dynamodb.DynamoDB + +func TestMain(m *testing.M) { + db = dynamodb.New(unit.Session, &aws.Config{ + MaxRetries: aws.Int(2), + }) + db.Handlers.Send.Clear() // mock sending + + os.Exit(m.Run()) +} + +func mockCRCResponse(svc *dynamodb.DynamoDB, status int, body, crc string) (req *request.Request) { + header := http.Header{} + header.Set("x-amz-crc32", crc) + + req, _ = svc.ListTablesRequest(nil) + req.Handlers.Send.PushBack(func(*request.Request) { + req.HTTPResponse = &http.Response{ + ContentLength: int64(len(body)), + StatusCode: status, + Body: ioutil.NopCloser(bytes.NewReader([]byte(body))), + Header: header, + } + }) + req.Send() + return +} + +func TestDefaultRetryRules(t *testing.T) { + d := dynamodb.New(unit.Session, &aws.Config{MaxRetries: aws.Int(-1)}) + assert.Equal(t, d.MaxRetries(), 10) +} + +func TestCustomRetryRules(t *testing.T) { + d := dynamodb.New(unit.Session, &aws.Config{MaxRetries: aws.Int(2)}) + assert.Equal(t, d.MaxRetries(), 2) +} + +func TestValidateCRC32NoHeaderSkip(t *testing.T) { + req := mockCRCResponse(db, 200, "{}", "") + assert.NoError(t, req.Error) +} + +func TestValidateCRC32InvalidHeaderSkip(t *testing.T) { + req := mockCRCResponse(db, 200, "{}", "ABC") + assert.NoError(t, req.Error) +} + +func TestValidateCRC32AlreadyErrorSkip(t *testing.T) { + req := mockCRCResponse(db, 400, "{}", "1234") + assert.Error(t, req.Error) + + assert.NotEqual(t, "CRC32CheckFailed", req.Error.(awserr.Error).Code()) +} + +func TestValidateCRC32IsValid(t *testing.T) { + req := mockCRCResponse(db, 200, `{"TableNames":["A"]}`, "3090163698") + assert.NoError(t, req.Error) + + // CRC check does not affect output parsing + out := req.Data.(*dynamodb.ListTablesOutput) + assert.Equal(t, "A", *out.TableNames[0]) +} + +func TestValidateCRC32DoesNotMatch(t *testing.T) { + req := mockCRCResponse(db, 200, "{}", "1234") + assert.Error(t, req.Error) + + assert.Equal(t, "CRC32CheckFailed", req.Error.(awserr.Error).Code()) + assert.Equal(t, 2, req.RetryCount) +} + +func TestValidateCRC32DoesNotMatchNoComputeChecksum(t *testing.T) { + svc := dynamodb.New(unit.Session, &aws.Config{ + MaxRetries: aws.Int(2), + DisableComputeChecksums: aws.Bool(true), + }) + svc.Handlers.Send.Clear() // mock sending + + req := mockCRCResponse(svc, 200, `{"TableNames":["A"]}`, "1234") + assert.NoError(t, req.Error) + + assert.Equal(t, 0, int(req.RetryCount)) + + // CRC check disabled. Does not affect output parsing + out := req.Data.(*dynamodb.ListTablesOutput) + assert.Equal(t, "A", *out.TableNames[0]) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,473 @@ +// Package dynamodbattribute provides conversion utilities from dynamodb.AttributeValue +// to concrete Go types and structures. These conversion utilities allow you to +// convert a Struct, Slice, Map, or Scalar value to or from dynamodb.AttributeValue. +// These are most useful to serialize concrete types to dynamodb.AttributeValue for +// requests or unmarshalling the dynamodb.AttributeValue into a well known typed form. +// +// Converting []byte fields to dynamodb.AttributeValue are only currently supported +// if the input is a map[string]interface{} type. []byte within typed structs are not +// converted correctly and are converted into base64 strings. This is a known bug, +// and will be fixed in a later release. +// +// Convert concrete type to dynamodb.AttributeValue: See (ExampleConvertTo) +// +// type Record struct { +// MyField string +// Letters []string +// A2Num map[string]int +// } +// +// ... +// +// r := Record{ +// MyField: "dynamodbattribute.ConvertToX example", +// Letters: []string{"a", "b", "c", "d"}, +// A2Num: map[string]int{"a": 1, "b": 2, "c": 3}, +// } +// av, err := dynamodbattribute.ConvertTo(r) +// fmt.Println(av, err) +// +// Convert dynamodb.AttributeValue to Concrete type: See (ExampleConvertFrom) +// +// r2 := Record{} +// err = dynamodbattribute.ConvertFrom(av, &r2) +// fmt.Println(err, reflect.DeepEqual(r, r2)) +// +// Use Conversion utilities with DynamoDB.PutItem: See () +// +// svc := dynamodb.New(nil) +// item, err := dynamodbattribute.ConvertToMap(r) +// if err != nil { +// fmt.Println("Failed to convert", err) +// return +// } +// result, err := svc.PutItem(&dynamodb.PutItemInput{ +// Item: item, +// TableName: aws.String("exampleTable"), +// }) +package dynamodbattribute + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "runtime" + "strconv" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/dynamodb" +) + +// ConvertToMap accepts a map[string]interface{} or struct and converts it to a +// map[string]*dynamodb.AttributeValue. +// +// If in contains any structs, it is first JSON encoded/decoded it to convert it +// to a map[string]interface{}, so `json` struct tags are respected. +func ConvertToMap(in interface{}) (item map[string]*dynamodb.AttributeValue, err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(runtime.Error); ok { + err = e + } else if s, ok := r.(string); ok { + err = fmt.Errorf(s) + } else { + err = r.(error) + } + item = nil + } + }() + + if in == nil { + return nil, awserr.New("SerializationError", + "in must be a map[string]interface{} or struct, got ", nil) + } + + v := reflect.ValueOf(in) + if v.Kind() != reflect.Struct && !(v.Kind() == reflect.Map && v.Type().Key().Kind() == reflect.String) { + return nil, awserr.New("SerializationError", + fmt.Sprintf("in must be a map[string]interface{} or struct, got %s", + v.Type().String()), + nil) + } + + if isTyped(reflect.TypeOf(in)) { + var out map[string]interface{} + in = convertToUntyped(in, out) + } + + item = make(map[string]*dynamodb.AttributeValue) + for k, v := range in.(map[string]interface{}) { + item[k] = convertTo(v) + } + + return item, nil +} + +// ConvertFromMap accepts a map[string]*dynamodb.AttributeValue and converts it to a +// map[string]interface{} or struct. +// +// If v points to a struct, the result is first converted it to a +// map[string]interface{}, then JSON encoded/decoded it to convert to a struct, +// so `json` struct tags are respected. +func ConvertFromMap(item map[string]*dynamodb.AttributeValue, v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(runtime.Error); ok { + err = e + } else if s, ok := r.(string); ok { + err = fmt.Errorf(s) + } else { + err = r.(error) + } + item = nil + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return awserr.New("SerializationError", + fmt.Sprintf("v must be a non-nil pointer to a map[string]interface{} or struct, got %s", + rv.Type()), + nil) + } + if rv.Elem().Kind() != reflect.Struct && !(rv.Elem().Kind() == reflect.Map && rv.Elem().Type().Key().Kind() == reflect.String) { + return awserr.New("SerializationError", + fmt.Sprintf("v must be a non-nil pointer to a map[string]interface{} or struct, got %s", + rv.Type()), + nil) + } + + m := make(map[string]interface{}) + for k, v := range item { + m[k] = convertFrom(v) + } + + if isTyped(reflect.TypeOf(v)) { + err = convertToTyped(m, v) + } else { + rv.Elem().Set(reflect.ValueOf(m)) + } + + return err +} + +// ConvertToList accepts an array or slice and converts it to a +// []*dynamodb.AttributeValue. +// +// If in contains any structs, it is first JSON encoded/decoded it to convert it +// to a []interface{}, so `json` struct tags are respected. +func ConvertToList(in interface{}) (item []*dynamodb.AttributeValue, err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(runtime.Error); ok { + err = e + } else if s, ok := r.(string); ok { + err = fmt.Errorf(s) + } else { + err = r.(error) + } + item = nil + } + }() + + if in == nil { + return nil, awserr.New("SerializationError", + "in must be an array or slice, got ", + nil) + } + + v := reflect.ValueOf(in) + if v.Kind() != reflect.Array && v.Kind() != reflect.Slice { + return nil, awserr.New("SerializationError", + fmt.Sprintf("in must be an array or slice, got %s", + v.Type().String()), + nil) + } + + if isTyped(reflect.TypeOf(in)) { + var out []interface{} + in = convertToUntyped(in, out) + } + + item = make([]*dynamodb.AttributeValue, 0, len(in.([]interface{}))) + for _, v := range in.([]interface{}) { + item = append(item, convertTo(v)) + } + + return item, nil +} + +// ConvertFromList accepts a []*dynamodb.AttributeValue and converts it to an array or +// slice. +// +// If v contains any structs, the result is first converted it to a +// []interface{}, then JSON encoded/decoded it to convert to a typed array or +// slice, so `json` struct tags are respected. +func ConvertFromList(item []*dynamodb.AttributeValue, v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(runtime.Error); ok { + err = e + } else if s, ok := r.(string); ok { + err = fmt.Errorf(s) + } else { + err = r.(error) + } + item = nil + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return awserr.New("SerializationError", + fmt.Sprintf("v must be a non-nil pointer to an array or slice, got %s", + rv.Type()), + nil) + } + if rv.Elem().Kind() != reflect.Array && rv.Elem().Kind() != reflect.Slice { + return awserr.New("SerializationError", + fmt.Sprintf("v must be a non-nil pointer to an array or slice, got %s", + rv.Type()), + nil) + } + + l := make([]interface{}, 0, len(item)) + for _, v := range item { + l = append(l, convertFrom(v)) + } + + if isTyped(reflect.TypeOf(v)) { + err = convertToTyped(l, v) + } else { + rv.Elem().Set(reflect.ValueOf(l)) + } + + return err +} + +// ConvertTo accepts any interface{} and converts it to a *dynamodb.AttributeValue. +// +// If in contains any structs, it is first JSON encoded/decoded it to convert it +// to a interface{}, so `json` struct tags are respected. +func ConvertTo(in interface{}) (item *dynamodb.AttributeValue, err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(runtime.Error); ok { + err = e + } else if s, ok := r.(string); ok { + err = fmt.Errorf(s) + } else { + err = r.(error) + } + item = nil + } + }() + + if in != nil && isTyped(reflect.TypeOf(in)) { + var out interface{} + in = convertToUntyped(in, out) + } + + item = convertTo(in) + return item, nil +} + +// ConvertFrom accepts a *dynamodb.AttributeValue and converts it to any interface{}. +// +// If v contains any structs, the result is first converted it to a interface{}, +// then JSON encoded/decoded it to convert to a struct, so `json` struct tags +// are respected. +func ConvertFrom(item *dynamodb.AttributeValue, v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(runtime.Error); ok { + err = e + } else if s, ok := r.(string); ok { + err = fmt.Errorf(s) + } else { + err = r.(error) + } + item = nil + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return awserr.New("SerializationError", + fmt.Sprintf("v must be a non-nil pointer to an interface{} or struct, got %s", + rv.Type()), + nil) + } + if rv.Elem().Kind() != reflect.Interface && rv.Elem().Kind() != reflect.Struct { + return awserr.New("SerializationError", + fmt.Sprintf("v must be a non-nil pointer to an interface{} or struct, got %s", + rv.Type()), + nil) + } + + res := convertFrom(item) + + if isTyped(reflect.TypeOf(v)) { + err = convertToTyped(res, v) + } else if res != nil { + rv.Elem().Set(reflect.ValueOf(res)) + } + + return err +} + +func isTyped(v reflect.Type) bool { + switch v.Kind() { + case reflect.Struct: + return true + case reflect.Array, reflect.Slice: + if isTyped(v.Elem()) { + return true + } + case reflect.Map: + if isTyped(v.Key()) { + return true + } + if isTyped(v.Elem()) { + return true + } + case reflect.Ptr: + return isTyped(v.Elem()) + } + return false +} + +func convertToUntyped(in, out interface{}) interface{} { + b, err := json.Marshal(in) + if err != nil { + panic(err) + } + + decoder := json.NewDecoder(bytes.NewReader(b)) + decoder.UseNumber() + err = decoder.Decode(&out) + if err != nil { + panic(err) + } + + return out +} + +func convertToTyped(in, out interface{}) error { + b, err := json.Marshal(in) + if err != nil { + return err + } + + decoder := json.NewDecoder(bytes.NewReader(b)) + return decoder.Decode(&out) +} + +func convertTo(in interface{}) *dynamodb.AttributeValue { + a := &dynamodb.AttributeValue{} + + if in == nil { + a.NULL = new(bool) + *a.NULL = true + return a + } + + if m, ok := in.(map[string]interface{}); ok { + a.M = make(map[string]*dynamodb.AttributeValue) + for k, v := range m { + a.M[k] = convertTo(v) + } + return a + } + + v := reflect.ValueOf(in) + switch v.Kind() { + case reflect.Bool: + a.BOOL = new(bool) + *a.BOOL = v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + a.N = new(string) + *a.N = strconv.FormatInt(v.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + a.N = new(string) + *a.N = strconv.FormatUint(v.Uint(), 10) + case reflect.Float32, reflect.Float64: + a.N = new(string) + *a.N = strconv.FormatFloat(v.Float(), 'f', -1, 64) + case reflect.String: + if n, ok := in.(json.Number); ok { + a.N = new(string) + *a.N = n.String() + } else { + a.S = new(string) + *a.S = v.String() + } + case reflect.Slice: + switch v.Type() { + case reflect.TypeOf(([]byte)(nil)): + a.B = v.Bytes() + default: + a.L = make([]*dynamodb.AttributeValue, v.Len()) + for i := 0; i < v.Len(); i++ { + a.L[i] = convertTo(v.Index(i).Interface()) + } + } + default: + panic(fmt.Sprintf("the type %s is not supported", v.Type().String())) + } + + return a +} + +func convertFrom(a *dynamodb.AttributeValue) interface{} { + if a.S != nil { + return *a.S + } + + if a.N != nil { + // Number is tricky b/c we don't know which numeric type to use. Here we + // simply try the different types from most to least restrictive. + if n, err := strconv.ParseInt(*a.N, 10, 64); err == nil { + return int(n) + } + if n, err := strconv.ParseUint(*a.N, 10, 64); err == nil { + return uint(n) + } + n, err := strconv.ParseFloat(*a.N, 64) + if err != nil { + panic(err) + } + return n + } + + if a.BOOL != nil { + return *a.BOOL + } + + if a.NULL != nil { + return nil + } + + if a.M != nil { + m := make(map[string]interface{}) + for k, v := range a.M { + m[k] = convertFrom(v) + } + return m + } + + if a.L != nil { + l := make([]interface{}, len(a.L)) + for index, v := range a.L { + l[index] = convertFrom(v) + } + return l + } + + if a.B != nil { + return a.B + } + + panic(fmt.Sprintf("%#v is not a supported dynamodb.AttributeValue", a)) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,492 @@ +package dynamodbattribute + +import ( + "math" + "reflect" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/service/dynamodb" +) + +type mySimpleStruct struct { + String string + Int int + Uint uint + Float32 float32 + Float64 float64 + Bool bool + Null *interface{} +} + +type myComplexStruct struct { + Simple []mySimpleStruct +} + +type converterTestInput struct { + input interface{} + expected interface{} + err awserr.Error + inputType string // "enum" of types +} + +var trueValue = true +var falseValue = false + +var converterScalarInputs = []converterTestInput{ + { + input: nil, + expected: &dynamodb.AttributeValue{NULL: &trueValue}, + }, + { + input: "some string", + expected: &dynamodb.AttributeValue{S: aws.String("some string")}, + }, + { + input: true, + expected: &dynamodb.AttributeValue{BOOL: &trueValue}, + }, + { + input: false, + expected: &dynamodb.AttributeValue{BOOL: &falseValue}, + }, + { + input: 3.14, + expected: &dynamodb.AttributeValue{N: aws.String("3.14")}, + }, + { + input: math.MaxFloat32, + expected: &dynamodb.AttributeValue{N: aws.String("340282346638528860000000000000000000000")}, + }, + { + input: math.MaxFloat64, + expected: &dynamodb.AttributeValue{N: aws.String("179769313486231570000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")}, + }, + { + input: 12, + expected: &dynamodb.AttributeValue{N: aws.String("12")}, + }, + { + input: mySimpleStruct{}, + expected: &dynamodb.AttributeValue{ + M: map[string]*dynamodb.AttributeValue{ + "Bool": {BOOL: &falseValue}, + "Float32": {N: aws.String("0")}, + "Float64": {N: aws.String("0")}, + "Int": {N: aws.String("0")}, + "Null": {NULL: &trueValue}, + "String": {S: aws.String("")}, + "Uint": {N: aws.String("0")}, + }, + }, + inputType: "mySimpleStruct", + }, +} + +var converterMapTestInputs = []converterTestInput{ + // Scalar tests + { + input: nil, + err: awserr.New("SerializationError", "in must be a map[string]interface{} or struct, got ", nil), + }, + { + input: map[string]interface{}{"string": "some string"}, + expected: map[string]*dynamodb.AttributeValue{"string": {S: aws.String("some string")}}, + }, + { + input: map[string]interface{}{"bool": true}, + expected: map[string]*dynamodb.AttributeValue{"bool": {BOOL: &trueValue}}, + }, + { + input: map[string]interface{}{"bool": false}, + expected: map[string]*dynamodb.AttributeValue{"bool": {BOOL: &falseValue}}, + }, + { + input: map[string]interface{}{"null": nil}, + expected: map[string]*dynamodb.AttributeValue{"null": {NULL: &trueValue}}, + }, + { + input: map[string]interface{}{"float": 3.14}, + expected: map[string]*dynamodb.AttributeValue{"float": {N: aws.String("3.14")}}, + }, + { + input: map[string]interface{}{"float": math.MaxFloat32}, + expected: map[string]*dynamodb.AttributeValue{"float": {N: aws.String("340282346638528860000000000000000000000")}}, + }, + { + input: map[string]interface{}{"float": math.MaxFloat64}, + expected: map[string]*dynamodb.AttributeValue{"float": {N: aws.String("179769313486231570000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")}}, + }, + { + input: map[string]interface{}{"int": int(12)}, + expected: map[string]*dynamodb.AttributeValue{"int": {N: aws.String("12")}}, + }, + { + input: map[string]interface{}{"byte": []byte{48, 49}}, + expected: map[string]*dynamodb.AttributeValue{"byte": {B: []byte{48, 49}}}, + }, + // List + { + input: map[string]interface{}{"list": []interface{}{"a string", 12, 3.14, true, nil, false}}, + expected: map[string]*dynamodb.AttributeValue{ + "list": { + L: []*dynamodb.AttributeValue{ + {S: aws.String("a string")}, + {N: aws.String("12")}, + {N: aws.String("3.14")}, + {BOOL: &trueValue}, + {NULL: &trueValue}, + {BOOL: &falseValue}, + }, + }, + }, + }, + // Map + { + input: map[string]interface{}{"map": map[string]interface{}{"nestedint": 12}}, + expected: map[string]*dynamodb.AttributeValue{ + "map": { + M: map[string]*dynamodb.AttributeValue{ + "nestedint": { + N: aws.String("12"), + }, + }, + }, + }, + }, + // Structs + { + input: mySimpleStruct{}, + expected: map[string]*dynamodb.AttributeValue{ + "Bool": {BOOL: &falseValue}, + "Float32": {N: aws.String("0")}, + "Float64": {N: aws.String("0")}, + "Int": {N: aws.String("0")}, + "Null": {NULL: &trueValue}, + "String": {S: aws.String("")}, + "Uint": {N: aws.String("0")}, + }, + inputType: "mySimpleStruct", + }, + { + input: myComplexStruct{}, + expected: map[string]*dynamodb.AttributeValue{ + "Simple": {NULL: &trueValue}, + }, + inputType: "myComplexStruct", + }, + { + input: myComplexStruct{Simple: []mySimpleStruct{{Int: -2}, {Uint: 5}}}, + expected: map[string]*dynamodb.AttributeValue{ + "Simple": { + L: []*dynamodb.AttributeValue{ + { + M: map[string]*dynamodb.AttributeValue{ + "Bool": {BOOL: &falseValue}, + "Float32": {N: aws.String("0")}, + "Float64": {N: aws.String("0")}, + "Int": {N: aws.String("-2")}, + "Null": {NULL: &trueValue}, + "String": {S: aws.String("")}, + "Uint": {N: aws.String("0")}, + }, + }, + { + M: map[string]*dynamodb.AttributeValue{ + "Bool": {BOOL: &falseValue}, + "Float32": {N: aws.String("0")}, + "Float64": {N: aws.String("0")}, + "Int": {N: aws.String("0")}, + "Null": {NULL: &trueValue}, + "String": {S: aws.String("")}, + "Uint": {N: aws.String("5")}, + }, + }, + }, + }, + }, + inputType: "myComplexStruct", + }, +} + +var converterListTestInputs = []converterTestInput{ + { + input: nil, + err: awserr.New("SerializationError", "in must be an array or slice, got ", nil), + }, + { + input: []interface{}{}, + expected: []*dynamodb.AttributeValue{}, + }, + { + input: []interface{}{"a string", 12, 3.14, true, nil, false}, + expected: []*dynamodb.AttributeValue{ + {S: aws.String("a string")}, + {N: aws.String("12")}, + {N: aws.String("3.14")}, + {BOOL: &trueValue}, + {NULL: &trueValue}, + {BOOL: &falseValue}, + }, + }, + { + input: []mySimpleStruct{{}}, + expected: []*dynamodb.AttributeValue{ + { + M: map[string]*dynamodb.AttributeValue{ + "Bool": {BOOL: &falseValue}, + "Float32": {N: aws.String("0")}, + "Float64": {N: aws.String("0")}, + "Int": {N: aws.String("0")}, + "Null": {NULL: &trueValue}, + "String": {S: aws.String("")}, + "Uint": {N: aws.String("0")}, + }, + }, + }, + inputType: "mySimpleStruct", + }, +} + +func TestConvertTo(t *testing.T) { + for _, test := range converterScalarInputs { + testConvertTo(t, test) + } +} + +func testConvertTo(t *testing.T, test converterTestInput) { + actual, err := ConvertTo(test.input) + if test.err != nil { + if err == nil { + t.Errorf("ConvertTo with input %#v retured %#v, expected error `%s`", test.input, actual, test.err) + } else if err.Error() != test.err.Error() { + t.Errorf("ConvertTo with input %#v retured error `%s`, expected error `%s`", test.input, err, test.err) + } + } else { + if err != nil { + t.Errorf("ConvertTo with input %#v retured error `%s`", test.input, err) + } + compareObjects(t, test.expected, actual) + } +} + +func TestConvertFrom(t *testing.T) { + // Using the same inputs from TestConvertTo, test the reverse mapping. + for _, test := range converterScalarInputs { + if test.expected != nil { + testConvertFrom(t, test) + } + } +} + +func testConvertFrom(t *testing.T, test converterTestInput) { + switch test.inputType { + case "mySimpleStruct": + var actual mySimpleStruct + if err := ConvertFrom(test.expected.(*dynamodb.AttributeValue), &actual); err != nil { + t.Errorf("ConvertFrom with input %#v retured error `%s`", test.expected, err) + } + compareObjects(t, test.input, actual) + case "myComplexStruct": + var actual myComplexStruct + if err := ConvertFrom(test.expected.(*dynamodb.AttributeValue), &actual); err != nil { + t.Errorf("ConvertFrom with input %#v retured error `%s`", test.expected, err) + } + compareObjects(t, test.input, actual) + default: + var actual interface{} + if err := ConvertFrom(test.expected.(*dynamodb.AttributeValue), &actual); err != nil { + t.Errorf("ConvertFrom with input %#v retured error `%s`", test.expected, err) + } + compareObjects(t, test.input, actual) + } +} + +func TestConvertFromError(t *testing.T) { + // Test that we get an error using ConvertFrom to convert to a map. + var actual map[string]interface{} + expected := awserr.New("SerializationError", `v must be a non-nil pointer to an interface{} or struct, got *map[string]interface {}`, nil).Error() + if err := ConvertFrom(nil, &actual); err == nil { + t.Errorf("ConvertFrom with input %#v returned no error, expected error `%s`", nil, expected) + } else if err.Error() != expected { + t.Errorf("ConvertFrom with input %#v returned error `%s`, expected error `%s`", nil, err, expected) + } + + // Test that we get an error using ConvertFrom to convert to a list. + var actual2 []interface{} + expected = awserr.New("SerializationError", `v must be a non-nil pointer to an interface{} or struct, got *[]interface {}`, nil).Error() + if err := ConvertFrom(nil, &actual2); err == nil { + t.Errorf("ConvertFrom with input %#v returned no error, expected error `%s`", nil, expected) + } else if err.Error() != expected { + t.Errorf("ConvertFrom with input %#v returned error `%s`, expected error `%s`", nil, err, expected) + } +} + +func TestConvertToMap(t *testing.T) { + for _, test := range converterMapTestInputs { + testConvertToMap(t, test) + } +} + +func testConvertToMap(t *testing.T, test converterTestInput) { + actual, err := ConvertToMap(test.input) + if test.err != nil { + if err == nil { + t.Errorf("ConvertToMap with input %#v retured %#v, expected error `%s`", test.input, actual, test.err) + } else if err.Error() != test.err.Error() { + t.Errorf("ConvertToMap with input %#v retured error `%s`, expected error `%s`", test.input, err, test.err) + } + } else { + if err != nil { + t.Errorf("ConvertToMap with input %#v retured error `%s`", test.input, err) + } + compareObjects(t, test.expected, actual) + } +} + +func TestConvertFromMap(t *testing.T) { + // Using the same inputs from TestConvertToMap, test the reverse mapping. + for _, test := range converterMapTestInputs { + if test.expected != nil { + testConvertFromMap(t, test) + } + } +} + +func testConvertFromMap(t *testing.T, test converterTestInput) { + switch test.inputType { + case "mySimpleStruct": + var actual mySimpleStruct + if err := ConvertFromMap(test.expected.(map[string]*dynamodb.AttributeValue), &actual); err != nil { + t.Errorf("ConvertFromMap with input %#v retured error `%s`", test.expected, err) + } + compareObjects(t, test.input, actual) + case "myComplexStruct": + var actual myComplexStruct + if err := ConvertFromMap(test.expected.(map[string]*dynamodb.AttributeValue), &actual); err != nil { + t.Errorf("ConvertFromMap with input %#v retured error `%s`", test.expected, err) + } + compareObjects(t, test.input, actual) + default: + var actual map[string]interface{} + if err := ConvertFromMap(test.expected.(map[string]*dynamodb.AttributeValue), &actual); err != nil { + t.Errorf("ConvertFromMap with input %#v retured error `%s`", test.expected, err) + } + compareObjects(t, test.input, actual) + } +} + +func TestConvertFromMapError(t *testing.T) { + // Test that we get an error using ConvertFromMap to convert to an interface{}. + var actual interface{} + expected := awserr.New("SerializationError", `v must be a non-nil pointer to a map[string]interface{} or struct, got *interface {}`, nil).Error() + if err := ConvertFromMap(nil, &actual); err == nil { + t.Errorf("ConvertFromMap with input %#v returned no error, expected error `%s`", nil, expected) + } else if err.Error() != expected { + t.Errorf("ConvertFromMap with input %#v returned error `%s`, expected error `%s`", nil, err, expected) + } + + // Test that we get an error using ConvertFromMap to convert to a slice. + var actual2 []interface{} + expected = awserr.New("SerializationError", `v must be a non-nil pointer to a map[string]interface{} or struct, got *[]interface {}`, nil).Error() + if err := ConvertFromMap(nil, &actual2); err == nil { + t.Errorf("ConvertFromMap with input %#v returned no error, expected error `%s`", nil, expected) + } else if err.Error() != expected { + t.Errorf("ConvertFromMap with input %#v returned error `%s`, expected error `%s`", nil, err, expected) + } +} + +func TestConvertToList(t *testing.T) { + for _, test := range converterListTestInputs { + testConvertToList(t, test) + } +} + +func testConvertToList(t *testing.T, test converterTestInput) { + actual, err := ConvertToList(test.input) + if test.err != nil { + if err == nil { + t.Errorf("ConvertToList with input %#v retured %#v, expected error `%s`", test.input, actual, test.err) + } else if err.Error() != test.err.Error() { + t.Errorf("ConvertToList with input %#v retured error `%s`, expected error `%s`", test.input, err, test.err) + } + } else { + if err != nil { + t.Errorf("ConvertToList with input %#v retured error `%s`", test.input, err) + } + compareObjects(t, test.expected, actual) + } +} + +func TestConvertFromList(t *testing.T) { + // Using the same inputs from TestConvertToList, test the reverse mapping. + for _, test := range converterListTestInputs { + if test.expected != nil { + testConvertFromList(t, test) + } + } +} + +func testConvertFromList(t *testing.T, test converterTestInput) { + switch test.inputType { + case "mySimpleStruct": + var actual []mySimpleStruct + if err := ConvertFromList(test.expected.([]*dynamodb.AttributeValue), &actual); err != nil { + t.Errorf("ConvertFromList with input %#v retured error `%s`", test.expected, err) + } + compareObjects(t, test.input, actual) + case "myComplexStruct": + var actual []myComplexStruct + if err := ConvertFromList(test.expected.([]*dynamodb.AttributeValue), &actual); err != nil { + t.Errorf("ConvertFromList with input %#v retured error `%s`", test.expected, err) + } + compareObjects(t, test.input, actual) + default: + var actual []interface{} + if err := ConvertFromList(test.expected.([]*dynamodb.AttributeValue), &actual); err != nil { + t.Errorf("ConvertFromList with input %#v retured error `%s`", test.expected, err) + } + compareObjects(t, test.input, actual) + } +} + +func TestConvertFromListError(t *testing.T) { + // Test that we get an error using ConvertFromList to convert to a map. + var actual map[string]interface{} + expected := awserr.New("SerializationError", `v must be a non-nil pointer to an array or slice, got *map[string]interface {}`, nil).Error() + if err := ConvertFromList(nil, &actual); err == nil { + t.Errorf("ConvertFromList with input %#v returned no error, expected error `%s`", nil, expected) + } else if err.Error() != expected { + t.Errorf("ConvertFromList with input %#v returned error `%s`, expected error `%s`", nil, err, expected) + } + + // Test that we get an error using ConvertFromList to convert to a struct. + var actual2 myComplexStruct + expected = awserr.New("SerializationError", `v must be a non-nil pointer to an array or slice, got *dynamodbattribute.myComplexStruct`, nil).Error() + if err := ConvertFromList(nil, &actual2); err == nil { + t.Errorf("ConvertFromList with input %#v returned no error, expected error `%s`", nil, expected) + } else if err.Error() != expected { + t.Errorf("ConvertFromList with input %#v returned error `%s`, expected error `%s`", nil, err, expected) + } + + // Test that we get an error using ConvertFromList to convert to an interface{}. + var actual3 interface{} + expected = awserr.New("SerializationError", `v must be a non-nil pointer to an array or slice, got *interface {}`, nil).Error() + if err := ConvertFromList(nil, &actual3); err == nil { + t.Errorf("ConvertFromList with input %#v returned no error, expected error `%s`", nil, expected) + } else if err.Error() != expected { + t.Errorf("ConvertFromList with input %#v returned error `%s`, expected error `%s`", nil, err, expected) + } +} + +func compareObjects(t *testing.T, expected interface{}, actual interface{}) { + if !reflect.DeepEqual(expected, actual) { + t.Errorf("\nExpected %s:\n%s\nActual %s:\n%s\n", + reflect.ValueOf(expected).Kind(), + awsutil.Prettify(expected), + reflect.ValueOf(actual).Kind(), + awsutil.Prettify(actual)) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,80 @@ +package dynamodbattribute_test + +import ( + "fmt" + "reflect" + + "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" +) + +func ExampleConvertTo() { + type Record struct { + MyField string + Letters []string + Numbers []int + } + + r := Record{ + MyField: "MyFieldValue", + Letters: []string{"a", "b", "c", "d"}, + Numbers: []int{1, 2, 3}, + } + av, err := dynamodbattribute.ConvertTo(r) + fmt.Println("err", err) + fmt.Println("MyField", av.M["MyField"]) + fmt.Println("Letters", av.M["Letters"]) + fmt.Println("Numbers", av.M["Numbers"]) + + // Output: + // err + // MyField { + // S: "MyFieldValue" + // } + // Letters { + // L: [ + // { + // S: "a" + // }, + // { + // S: "b" + // }, + // { + // S: "c" + // }, + // { + // S: "d" + // } + // ] + // } + // Numbers { + // L: [{ + // N: "1" + // },{ + // N: "2" + // },{ + // N: "3" + // }] + // } +} + +func ExampleConvertFrom() { + type Record struct { + MyField string + Letters []string + A2Num map[string]int + } + + r := Record{ + MyField: "MyFieldValue", + Letters: []string{"a", "b", "c", "d"}, + A2Num: map[string]int{"a": 1, "b": 2, "c": 3}, + } + av, err := dynamodbattribute.ConvertTo(r) + + r2 := Record{} + err = dynamodbattribute.ConvertFrom(av, &r2) + fmt.Println(err, reflect.DeepEqual(r, r2)) + + // Output: + // true +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,74 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package dynamodbiface provides an interface for the Amazon DynamoDB. +package dynamodbiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/dynamodb" +) + +// DynamoDBAPI is the interface type for dynamodb.DynamoDB. +type DynamoDBAPI interface { + BatchGetItemRequest(*dynamodb.BatchGetItemInput) (*request.Request, *dynamodb.BatchGetItemOutput) + + BatchGetItem(*dynamodb.BatchGetItemInput) (*dynamodb.BatchGetItemOutput, error) + + BatchGetItemPages(*dynamodb.BatchGetItemInput, func(*dynamodb.BatchGetItemOutput, bool) bool) error + + BatchWriteItemRequest(*dynamodb.BatchWriteItemInput) (*request.Request, *dynamodb.BatchWriteItemOutput) + + BatchWriteItem(*dynamodb.BatchWriteItemInput) (*dynamodb.BatchWriteItemOutput, error) + + CreateTableRequest(*dynamodb.CreateTableInput) (*request.Request, *dynamodb.CreateTableOutput) + + CreateTable(*dynamodb.CreateTableInput) (*dynamodb.CreateTableOutput, error) + + DeleteItemRequest(*dynamodb.DeleteItemInput) (*request.Request, *dynamodb.DeleteItemOutput) + + DeleteItem(*dynamodb.DeleteItemInput) (*dynamodb.DeleteItemOutput, error) + + DeleteTableRequest(*dynamodb.DeleteTableInput) (*request.Request, *dynamodb.DeleteTableOutput) + + DeleteTable(*dynamodb.DeleteTableInput) (*dynamodb.DeleteTableOutput, error) + + DescribeTableRequest(*dynamodb.DescribeTableInput) (*request.Request, *dynamodb.DescribeTableOutput) + + DescribeTable(*dynamodb.DescribeTableInput) (*dynamodb.DescribeTableOutput, error) + + GetItemRequest(*dynamodb.GetItemInput) (*request.Request, *dynamodb.GetItemOutput) + + GetItem(*dynamodb.GetItemInput) (*dynamodb.GetItemOutput, error) + + ListTablesRequest(*dynamodb.ListTablesInput) (*request.Request, *dynamodb.ListTablesOutput) + + ListTables(*dynamodb.ListTablesInput) (*dynamodb.ListTablesOutput, error) + + ListTablesPages(*dynamodb.ListTablesInput, func(*dynamodb.ListTablesOutput, bool) bool) error + + PutItemRequest(*dynamodb.PutItemInput) (*request.Request, *dynamodb.PutItemOutput) + + PutItem(*dynamodb.PutItemInput) (*dynamodb.PutItemOutput, error) + + QueryRequest(*dynamodb.QueryInput) (*request.Request, *dynamodb.QueryOutput) + + Query(*dynamodb.QueryInput) (*dynamodb.QueryOutput, error) + + QueryPages(*dynamodb.QueryInput, func(*dynamodb.QueryOutput, bool) bool) error + + ScanRequest(*dynamodb.ScanInput) (*request.Request, *dynamodb.ScanOutput) + + Scan(*dynamodb.ScanInput) (*dynamodb.ScanOutput, error) + + ScanPages(*dynamodb.ScanInput, func(*dynamodb.ScanOutput, bool) bool) error + + UpdateItemRequest(*dynamodb.UpdateItemInput) (*request.Request, *dynamodb.UpdateItemOutput) + + UpdateItem(*dynamodb.UpdateItemInput) (*dynamodb.UpdateItemOutput, error) + + UpdateTableRequest(*dynamodb.UpdateTableInput) (*request.Request, *dynamodb.UpdateTableOutput) + + UpdateTable(*dynamodb.UpdateTableInput) (*dynamodb.UpdateTableOutput, error) +} + +var _ DynamoDBAPI = (*dynamodb.DynamoDB)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1336 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package dynamodb_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/dynamodb" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleDynamoDB_BatchGetItem() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.BatchGetItemInput{ + RequestItems: map[string]*dynamodb.KeysAndAttributes{ // Required + "Key": { // Required + Keys: []map[string]*dynamodb.AttributeValue{ // Required + { // Required + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + // More values... + }, + AttributesToGet: []*string{ + aws.String("AttributeName"), // Required + // More values... + }, + ConsistentRead: aws.Bool(true), + ExpressionAttributeNames: map[string]*string{ + "Key": aws.String("AttributeName"), // Required + // More values... + }, + ProjectionExpression: aws.String("ProjectionExpression"), + }, + // More values... + }, + ReturnConsumedCapacity: aws.String("ReturnConsumedCapacity"), + } + resp, err := svc.BatchGetItem(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_BatchWriteItem() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.BatchWriteItemInput{ + RequestItems: map[string][]*dynamodb.WriteRequest{ // Required + "Key": { // Required + { // Required + DeleteRequest: &dynamodb.DeleteRequest{ + Key: map[string]*dynamodb.AttributeValue{ // Required + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + }, + PutRequest: &dynamodb.PutRequest{ + Item: map[string]*dynamodb.AttributeValue{ // Required + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + }, + }, + // More values... + }, + // More values... + }, + ReturnConsumedCapacity: aws.String("ReturnConsumedCapacity"), + ReturnItemCollectionMetrics: aws.String("ReturnItemCollectionMetrics"), + } + resp, err := svc.BatchWriteItem(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_CreateTable() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.CreateTableInput{ + AttributeDefinitions: []*dynamodb.AttributeDefinition{ // Required + { // Required + AttributeName: aws.String("KeySchemaAttributeName"), // Required + AttributeType: aws.String("ScalarAttributeType"), // Required + }, + // More values... + }, + KeySchema: []*dynamodb.KeySchemaElement{ // Required + { // Required + AttributeName: aws.String("KeySchemaAttributeName"), // Required + KeyType: aws.String("KeyType"), // Required + }, + // More values... + }, + ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ // Required + ReadCapacityUnits: aws.Int64(1), // Required + WriteCapacityUnits: aws.Int64(1), // Required + }, + TableName: aws.String("TableName"), // Required + GlobalSecondaryIndexes: []*dynamodb.GlobalSecondaryIndex{ + { // Required + IndexName: aws.String("IndexName"), // Required + KeySchema: []*dynamodb.KeySchemaElement{ // Required + { // Required + AttributeName: aws.String("KeySchemaAttributeName"), // Required + KeyType: aws.String("KeyType"), // Required + }, + // More values... + }, + Projection: &dynamodb.Projection{ // Required + NonKeyAttributes: []*string{ + aws.String("NonKeyAttributeName"), // Required + // More values... + }, + ProjectionType: aws.String("ProjectionType"), + }, + ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ // Required + ReadCapacityUnits: aws.Int64(1), // Required + WriteCapacityUnits: aws.Int64(1), // Required + }, + }, + // More values... + }, + LocalSecondaryIndexes: []*dynamodb.LocalSecondaryIndex{ + { // Required + IndexName: aws.String("IndexName"), // Required + KeySchema: []*dynamodb.KeySchemaElement{ // Required + { // Required + AttributeName: aws.String("KeySchemaAttributeName"), // Required + KeyType: aws.String("KeyType"), // Required + }, + // More values... + }, + Projection: &dynamodb.Projection{ // Required + NonKeyAttributes: []*string{ + aws.String("NonKeyAttributeName"), // Required + // More values... + }, + ProjectionType: aws.String("ProjectionType"), + }, + }, + // More values... + }, + StreamSpecification: &dynamodb.StreamSpecification{ + StreamEnabled: aws.Bool(true), + StreamViewType: aws.String("StreamViewType"), + }, + } + resp, err := svc.CreateTable(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_DeleteItem() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.DeleteItemInput{ + Key: map[string]*dynamodb.AttributeValue{ // Required + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + TableName: aws.String("TableName"), // Required + ConditionExpression: aws.String("ConditionExpression"), + ConditionalOperator: aws.String("ConditionalOperator"), + Expected: map[string]*dynamodb.ExpectedAttributeValue{ + "Key": { // Required + AttributeValueList: []*dynamodb.AttributeValue{ + { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + ComparisonOperator: aws.String("ComparisonOperator"), + Exists: aws.Bool(true), + Value: &dynamodb.AttributeValue{ + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + }, + // More values... + }, + ExpressionAttributeNames: map[string]*string{ + "Key": aws.String("AttributeName"), // Required + // More values... + }, + ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + ReturnConsumedCapacity: aws.String("ReturnConsumedCapacity"), + ReturnItemCollectionMetrics: aws.String("ReturnItemCollectionMetrics"), + ReturnValues: aws.String("ReturnValue"), + } + resp, err := svc.DeleteItem(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_DeleteTable() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.DeleteTableInput{ + TableName: aws.String("TableName"), // Required + } + resp, err := svc.DeleteTable(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_DescribeTable() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.DescribeTableInput{ + TableName: aws.String("TableName"), // Required + } + resp, err := svc.DescribeTable(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_GetItem() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.GetItemInput{ + Key: map[string]*dynamodb.AttributeValue{ // Required + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + TableName: aws.String("TableName"), // Required + AttributesToGet: []*string{ + aws.String("AttributeName"), // Required + // More values... + }, + ConsistentRead: aws.Bool(true), + ExpressionAttributeNames: map[string]*string{ + "Key": aws.String("AttributeName"), // Required + // More values... + }, + ProjectionExpression: aws.String("ProjectionExpression"), + ReturnConsumedCapacity: aws.String("ReturnConsumedCapacity"), + } + resp, err := svc.GetItem(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_ListTables() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.ListTablesInput{ + ExclusiveStartTableName: aws.String("TableName"), + Limit: aws.Int64(1), + } + resp, err := svc.ListTables(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_PutItem() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.PutItemInput{ + Item: map[string]*dynamodb.AttributeValue{ // Required + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + TableName: aws.String("TableName"), // Required + ConditionExpression: aws.String("ConditionExpression"), + ConditionalOperator: aws.String("ConditionalOperator"), + Expected: map[string]*dynamodb.ExpectedAttributeValue{ + "Key": { // Required + AttributeValueList: []*dynamodb.AttributeValue{ + { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + ComparisonOperator: aws.String("ComparisonOperator"), + Exists: aws.Bool(true), + Value: &dynamodb.AttributeValue{ + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + }, + // More values... + }, + ExpressionAttributeNames: map[string]*string{ + "Key": aws.String("AttributeName"), // Required + // More values... + }, + ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + ReturnConsumedCapacity: aws.String("ReturnConsumedCapacity"), + ReturnItemCollectionMetrics: aws.String("ReturnItemCollectionMetrics"), + ReturnValues: aws.String("ReturnValue"), + } + resp, err := svc.PutItem(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_Query() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.QueryInput{ + TableName: aws.String("TableName"), // Required + AttributesToGet: []*string{ + aws.String("AttributeName"), // Required + // More values... + }, + ConditionalOperator: aws.String("ConditionalOperator"), + ConsistentRead: aws.Bool(true), + ExclusiveStartKey: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + ExpressionAttributeNames: map[string]*string{ + "Key": aws.String("AttributeName"), // Required + // More values... + }, + ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + FilterExpression: aws.String("ConditionExpression"), + IndexName: aws.String("IndexName"), + KeyConditionExpression: aws.String("KeyExpression"), + KeyConditions: map[string]*dynamodb.Condition{ + "Key": { // Required + ComparisonOperator: aws.String("ComparisonOperator"), // Required + AttributeValueList: []*dynamodb.AttributeValue{ + { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + }, + // More values... + }, + Limit: aws.Int64(1), + ProjectionExpression: aws.String("ProjectionExpression"), + QueryFilter: map[string]*dynamodb.Condition{ + "Key": { // Required + ComparisonOperator: aws.String("ComparisonOperator"), // Required + AttributeValueList: []*dynamodb.AttributeValue{ + { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + }, + // More values... + }, + ReturnConsumedCapacity: aws.String("ReturnConsumedCapacity"), + ScanIndexForward: aws.Bool(true), + Select: aws.String("Select"), + } + resp, err := svc.Query(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_Scan() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.ScanInput{ + TableName: aws.String("TableName"), // Required + AttributesToGet: []*string{ + aws.String("AttributeName"), // Required + // More values... + }, + ConditionalOperator: aws.String("ConditionalOperator"), + ConsistentRead: aws.Bool(true), + ExclusiveStartKey: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + ExpressionAttributeNames: map[string]*string{ + "Key": aws.String("AttributeName"), // Required + // More values... + }, + ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + FilterExpression: aws.String("ConditionExpression"), + IndexName: aws.String("IndexName"), + Limit: aws.Int64(1), + ProjectionExpression: aws.String("ProjectionExpression"), + ReturnConsumedCapacity: aws.String("ReturnConsumedCapacity"), + ScanFilter: map[string]*dynamodb.Condition{ + "Key": { // Required + ComparisonOperator: aws.String("ComparisonOperator"), // Required + AttributeValueList: []*dynamodb.AttributeValue{ + { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + }, + // More values... + }, + Segment: aws.Int64(1), + Select: aws.String("Select"), + TotalSegments: aws.Int64(1), + } + resp, err := svc.Scan(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_UpdateItem() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.UpdateItemInput{ + Key: map[string]*dynamodb.AttributeValue{ // Required + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + TableName: aws.String("TableName"), // Required + AttributeUpdates: map[string]*dynamodb.AttributeValueUpdate{ + "Key": { // Required + Action: aws.String("AttributeAction"), + Value: &dynamodb.AttributeValue{ + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + }, + // More values... + }, + ConditionExpression: aws.String("ConditionExpression"), + ConditionalOperator: aws.String("ConditionalOperator"), + Expected: map[string]*dynamodb.ExpectedAttributeValue{ + "Key": { // Required + AttributeValueList: []*dynamodb.AttributeValue{ + { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + ComparisonOperator: aws.String("ComparisonOperator"), + Exists: aws.Bool(true), + Value: &dynamodb.AttributeValue{ + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + }, + // More values... + }, + ExpressionAttributeNames: map[string]*string{ + "Key": aws.String("AttributeName"), // Required + // More values... + }, + ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + B: []byte("PAYLOAD"), + BOOL: aws.Bool(true), + BS: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + L: []*dynamodb.AttributeValue{ + { // Required + // Recursive values... + }, + // More values... + }, + M: map[string]*dynamodb.AttributeValue{ + "Key": { // Required + // Recursive values... + }, + // More values... + }, + N: aws.String("NumberAttributeValue"), + NS: []*string{ + aws.String("NumberAttributeValue"), // Required + // More values... + }, + NULL: aws.Bool(true), + S: aws.String("StringAttributeValue"), + SS: []*string{ + aws.String("StringAttributeValue"), // Required + // More values... + }, + }, + // More values... + }, + ReturnConsumedCapacity: aws.String("ReturnConsumedCapacity"), + ReturnItemCollectionMetrics: aws.String("ReturnItemCollectionMetrics"), + ReturnValues: aws.String("ReturnValue"), + UpdateExpression: aws.String("UpdateExpression"), + } + resp, err := svc.UpdateItem(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDB_UpdateTable() { + svc := dynamodb.New(session.New()) + + params := &dynamodb.UpdateTableInput{ + TableName: aws.String("TableName"), // Required + AttributeDefinitions: []*dynamodb.AttributeDefinition{ + { // Required + AttributeName: aws.String("KeySchemaAttributeName"), // Required + AttributeType: aws.String("ScalarAttributeType"), // Required + }, + // More values... + }, + GlobalSecondaryIndexUpdates: []*dynamodb.GlobalSecondaryIndexUpdate{ + { // Required + Create: &dynamodb.CreateGlobalSecondaryIndexAction{ + IndexName: aws.String("IndexName"), // Required + KeySchema: []*dynamodb.KeySchemaElement{ // Required + { // Required + AttributeName: aws.String("KeySchemaAttributeName"), // Required + KeyType: aws.String("KeyType"), // Required + }, + // More values... + }, + Projection: &dynamodb.Projection{ // Required + NonKeyAttributes: []*string{ + aws.String("NonKeyAttributeName"), // Required + // More values... + }, + ProjectionType: aws.String("ProjectionType"), + }, + ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ // Required + ReadCapacityUnits: aws.Int64(1), // Required + WriteCapacityUnits: aws.Int64(1), // Required + }, + }, + Delete: &dynamodb.DeleteGlobalSecondaryIndexAction{ + IndexName: aws.String("IndexName"), // Required + }, + Update: &dynamodb.UpdateGlobalSecondaryIndexAction{ + IndexName: aws.String("IndexName"), // Required + ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ // Required + ReadCapacityUnits: aws.Int64(1), // Required + WriteCapacityUnits: aws.Int64(1), // Required + }, + }, + }, + // More values... + }, + ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ + ReadCapacityUnits: aws.Int64(1), // Required + WriteCapacityUnits: aws.Int64(1), // Required + }, + StreamSpecification: &dynamodb.StreamSpecification{ + StreamEnabled: aws.Bool(true), + StreamViewType: aws.String("StreamViewType"), + }, + } + resp, err := svc.UpdateTable(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,190 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package dynamodb + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Overview +// +// This is the Amazon DynamoDB API Reference. This guide provides descriptions +// and samples of the low-level DynamoDB API. For information about DynamoDB +// application development, see the Amazon DynamoDB Developer Guide (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/). +// +// Instead of making the requests to the low-level DynamoDB API directly from +// your application, we recommend that you use the AWS Software Development +// Kits (SDKs). The easy-to-use libraries in the AWS SDKs make it unnecessary +// to call the low-level DynamoDB API directly from your application. The libraries +// take care of request authentication, serialization, and connection management. +// For more information, see Using the AWS SDKs with DynamoDB (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/UsingAWSSDK.html) +// in the Amazon DynamoDB Developer Guide. +// +// If you decide to code against the low-level DynamoDB API directly, you will +// need to write the necessary code to authenticate your requests. For more +// information on signing your requests, see Using the DynamoDB API (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/API.html) +// in the Amazon DynamoDB Developer Guide. +// +// The following are short descriptions of each low-level API action, organized +// by function. +// +// Managing Tables +// +// CreateTable - Creates a table with user-specified provisioned throughput +// settings. You must designate one attribute as the hash primary key for the +// table; you can optionally designate a second attribute as the range primary +// key. DynamoDB creates indexes on these key attributes for fast data access. +// Optionally, you can create one or more secondary indexes, which provide fast +// data access using non-key attributes. +// +// DescribeTable - Returns metadata for a table, such as table size, status, +// and index information. +// +// UpdateTable - Modifies the provisioned throughput settings for a table. +// Optionally, you can modify the provisioned throughput settings for global +// secondary indexes on the table. +// +// ListTables - Returns a list of all tables associated with the current +// AWS account and endpoint. +// +// DeleteTable - Deletes a table and all of its indexes. +// +// For conceptual information about managing tables, see Working with Tables +// (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html) +// in the Amazon DynamoDB Developer Guide. +// +// Reading Data +// +// GetItem - Returns a set of attributes for the item that has a given primary +// key. By default, GetItem performs an eventually consistent read; however, +// applications can request a strongly consistent read instead. +// +// BatchGetItem - Performs multiple GetItem requests for data items using +// their primary keys, from one table or multiple tables. The response from +// BatchGetItem has a size limit of 16 MB and returns a maximum of 100 items. +// Both eventually consistent and strongly consistent reads can be used. +// +// Query - Returns one or more items from a table or a secondary index. You +// must provide a specific hash key value. You can narrow the scope of the query +// using comparison operators against a range key value, or on the index key. +// Query supports either eventual or strong consistency. A single response has +// a size limit of 1 MB. +// +// Scan - Reads every item in a table; the result set is eventually consistent. +// You can limit the number of items returned by filtering the data attributes, +// using conditional expressions. Scan can be used to enable ad-hoc querying +// of a table against non-key attributes; however, since this is a full table +// scan without using an index, Scan should not be used for any application +// query use case that requires predictable performance. +// +// For conceptual information about reading data, see Working with Items +// (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html) +// and Query and Scan Operations (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html) +// in the Amazon DynamoDB Developer Guide. +// +// Modifying Data +// +// PutItem - Creates a new item, or replaces an existing item with a new +// item (including all the attributes). By default, if an item in the table +// already exists with the same primary key, the new item completely replaces +// the existing item. You can use conditional operators to replace an item only +// if its attribute values match certain conditions, or to insert a new item +// only if that item doesn't already exist. +// +// UpdateItem - Modifies the attributes of an existing item. You can also +// use conditional operators to perform an update only if the item's attribute +// values match certain conditions. +// +// DeleteItem - Deletes an item in a table by primary key. You can use conditional +// operators to perform a delete an item only if the item's attribute values +// match certain conditions. +// +// BatchWriteItem - Performs multiple PutItem and DeleteItem requests across +// multiple tables in a single request. A failure of any request(s) in the batch +// will not cause the entire BatchWriteItem operation to fail. Supports batches +// of up to 25 items to put or delete, with a maximum total request size of +// 16 MB. +// +// For conceptual information about modifying data, see Working with Items +// (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html) +// and Query and Scan Operations (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html) +// in the Amazon DynamoDB Developer Guide. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type DynamoDB struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "dynamodb" + +// New creates a new instance of the DynamoDB client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a DynamoDB client from just a session. +// svc := dynamodb.New(mySession) +// +// // Create a DynamoDB client with additional configuration +// svc := dynamodb.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *DynamoDB { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *DynamoDB { + svc := &DynamoDB{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2012-08-10", + JSONVersion: "1.0", + TargetPrefix: "DynamoDB_20120810", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a DynamoDB operation and runs any +// custom request initialization. +func (c *DynamoDB) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,59 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package dynamodb + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *DynamoDB) WaitUntilTableExists(input *DescribeTableInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeTable", + Delay: 20, + MaxAttempts: 25, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "Table.TableStatus", + Expected: "ACTIVE", + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "ResourceNotFoundException", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *DynamoDB) WaitUntilTableNotExists(input *DescribeTableInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeTable", + Delay: 20, + MaxAttempts: 25, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "ResourceNotFoundException", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodbstreams/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodbstreams/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodbstreams/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodbstreams/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,656 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package dynamodbstreams provides a client for Amazon DynamoDB Streams. +package dynamodbstreams + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/dynamodb" +) + +const opDescribeStream = "DescribeStream" + +// DescribeStreamRequest generates a request for the DescribeStream operation. +func (c *DynamoDBStreams) DescribeStreamRequest(input *DescribeStreamInput) (req *request.Request, output *DescribeStreamOutput) { + op := &request.Operation{ + Name: opDescribeStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeStreamInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStreamOutput{} + req.Data = output + return +} + +// Returns information about a stream, including the current status of the stream, +// its Amazon Resource Name (ARN), the composition of its shards, and its corresponding +// DynamoDB table. +// +// You can call DescribeStream at a maximum rate of 10 times per second. +// +// Each shard in the stream has a SequenceNumberRange associated with it. If +// the SequenceNumberRange has a StartingSequenceNumber but no EndingSequenceNumber, +// then the shard is still open (able to receive more stream records). If both +// StartingSequenceNumber and EndingSequenceNumber are present, the that shared +// is closed and can no longer receive more data. +func (c *DynamoDBStreams) DescribeStream(input *DescribeStreamInput) (*DescribeStreamOutput, error) { + req, out := c.DescribeStreamRequest(input) + err := req.Send() + return out, err +} + +const opGetRecords = "GetRecords" + +// GetRecordsRequest generates a request for the GetRecords operation. +func (c *DynamoDBStreams) GetRecordsRequest(input *GetRecordsInput) (req *request.Request, output *GetRecordsOutput) { + op := &request.Operation{ + Name: opGetRecords, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRecordsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetRecordsOutput{} + req.Data = output + return +} + +// Retrieves the stream records from a given shard. +// +// Specify a shard iterator using the ShardIterator parameter. The shard iterator +// specifies the position in the shard from which you want to start reading +// stream records sequentially. If there are no stream records available in +// the portion of the shard that the iterator points to, GetRecords returns +// an empty list. Note that it might take multiple calls to get to a portion +// of the shard that contains stream records. +// +// GetRecords can retrieve a maximum of 1 MB of data or 2000 stream records, +// whichever comes first. +func (c *DynamoDBStreams) GetRecords(input *GetRecordsInput) (*GetRecordsOutput, error) { + req, out := c.GetRecordsRequest(input) + err := req.Send() + return out, err +} + +const opGetShardIterator = "GetShardIterator" + +// GetShardIteratorRequest generates a request for the GetShardIterator operation. +func (c *DynamoDBStreams) GetShardIteratorRequest(input *GetShardIteratorInput) (req *request.Request, output *GetShardIteratorOutput) { + op := &request.Operation{ + Name: opGetShardIterator, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetShardIteratorInput{} + } + + req = c.newRequest(op, input, output) + output = &GetShardIteratorOutput{} + req.Data = output + return +} + +// Returns a shard iterator. A shard iterator provides information about how +// to retrieve the stream records from within a shard. Use the shard iterator +// in a subsequent GetRecords request to read the stream records from the shard. +// +// A shard iterator expires 15 minutes after it is returned to the requester. +func (c *DynamoDBStreams) GetShardIterator(input *GetShardIteratorInput) (*GetShardIteratorOutput, error) { + req, out := c.GetShardIteratorRequest(input) + err := req.Send() + return out, err +} + +const opListStreams = "ListStreams" + +// ListStreamsRequest generates a request for the ListStreams operation. +func (c *DynamoDBStreams) ListStreamsRequest(input *ListStreamsInput) (req *request.Request, output *ListStreamsOutput) { + op := &request.Operation{ + Name: opListStreams, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListStreamsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListStreamsOutput{} + req.Data = output + return +} + +// Returns an array of stream ARNs associated with the current account and endpoint. +// If the TableName parameter is present, then ListStreams will return only +// the streams ARNs for that table. +// +// You can call ListStreams at a maximum rate of 5 times per second. +func (c *DynamoDBStreams) ListStreams(input *ListStreamsInput) (*ListStreamsOutput, error) { + req, out := c.ListStreamsRequest(input) + err := req.Send() + return out, err +} + +// Represents the input of a DescribeStream operation. +type DescribeStreamInput struct { + _ struct{} `type:"structure"` + + // The shard ID of the first item that this operation will evaluate. Use the + // value that was returned for LastEvaluatedShardId in the previous operation. + ExclusiveStartShardId *string `min:"28" type:"string"` + + // The maximum number of shard objects to return. The upper limit is 100. + Limit *int64 `min:"1" type:"integer"` + + // The Amazon Resource Name (ARN) for the stream. + StreamArn *string `min:"37" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStreamInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeStream operation. +type DescribeStreamOutput struct { + _ struct{} `type:"structure"` + + // A complete description of the stream, including its creation date and time, + // the DynamoDB table associated with the stream, the shard IDs within the stream, + // and the beginning and ending sequence numbers of stream records within the + // shards. + StreamDescription *StreamDescription `type:"structure"` +} + +// String returns the string representation +func (s DescribeStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStreamOutput) GoString() string { + return s.String() +} + +// Represents the input of a GetRecords operation. +type GetRecordsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of records to return from the shard. The upper limit is + // 1000. + Limit *int64 `min:"1" type:"integer"` + + // A shard iterator that was retrieved from a previous GetShardIterator operation. + // This iterator can be used to access the stream records in this shard. + ShardIterator *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRecordsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRecordsInput) GoString() string { + return s.String() +} + +// Represents the output of a GetRecords operation. +type GetRecordsOutput struct { + _ struct{} `type:"structure"` + + // The next position in the shard from which to start sequentially reading stream + // records. If set to null, the shard has been closed and the requested iterator + // will not return any more data. + NextShardIterator *string `min:"1" type:"string"` + + // The stream records from the shard, which were retrieved using the shard iterator. + Records []*Record `type:"list"` +} + +// String returns the string representation +func (s GetRecordsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRecordsOutput) GoString() string { + return s.String() +} + +// Represents the input of a GetShardIterator operation. +type GetShardIteratorInput struct { + _ struct{} `type:"structure"` + + // The sequence number of a stream record in the shard from which to start reading. + SequenceNumber *string `min:"21" type:"string"` + + // The identifier of the shard. The iterator will be returned for this shard + // ID. + ShardId *string `min:"28" type:"string" required:"true"` + + // Determines how the shard iterator is used to start reading stream records + // from the shard: + // + // AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted by + // a specific sequence number. + // + // AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted + // by a specific sequence number. + // + // TRIM_HORIZON - Start reading at the last (untrimmed) stream record, which + // is the oldest record in the shard. In DynamoDB Streams, there is a 24 hour + // limit on data retention. Stream records whose age exceeds this limit are + // subject to removal (trimming) from the stream. + // + // LATEST - Start reading just after the most recent stream record in the + // shard, so that you always read the most recent data in the shard. + ShardIteratorType *string `type:"string" required:"true" enum:"ShardIteratorType"` + + // The Amazon Resource Name (ARN) for the stream. + StreamArn *string `min:"37" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetShardIteratorInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetShardIteratorInput) GoString() string { + return s.String() +} + +// Represents the output of a GetShardIterator operation. +type GetShardIteratorOutput struct { + _ struct{} `type:"structure"` + + // The position in the shard from which to start reading stream records sequentially. + // A shard iterator specifies this position using the sequence number of a stream + // record in a shard. + ShardIterator *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetShardIteratorOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetShardIteratorOutput) GoString() string { + return s.String() +} + +// Represents the input of a ListStreams operation. +type ListStreamsInput struct { + _ struct{} `type:"structure"` + + // The ARN (Amazon Resource Name) of the first item that this operation will + // evaluate. Use the value that was returned for LastEvaluatedStreamArn in the + // previous operation. + ExclusiveStartStreamArn *string `min:"37" type:"string"` + + // The maximum number of streams to return. The upper limit is 100. + Limit *int64 `min:"1" type:"integer"` + + // If this parameter is provided, then only the streams associated with this + // table name are returned. + TableName *string `min:"3" type:"string"` +} + +// String returns the string representation +func (s ListStreamsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStreamsInput) GoString() string { + return s.String() +} + +// Represents the output of a ListStreams operation. +type ListStreamsOutput struct { + _ struct{} `type:"structure"` + + // The stream ARN of the item where the operation stopped, inclusive of the + // previous result set. Use this value to start a new operation, excluding this + // value in the new request. + // + // If LastEvaluatedStreamArn is empty, then the "last page" of results has + // been processed and there is no more data to be retrieved. + // + // If LastEvaluatedStreamArn is not empty, it does not necessarily mean that + // there is more data in the result set. The only way to know when you have + // reached the end of the result set is when LastEvaluatedStreamArn is empty. + LastEvaluatedStreamArn *string `min:"37" type:"string"` + + // A list of stream descriptors associated with the current account and endpoint. + Streams []*Stream `type:"list"` +} + +// String returns the string representation +func (s ListStreamsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStreamsOutput) GoString() string { + return s.String() +} + +// A description of a unique event within a stream. +type Record struct { + _ struct{} `type:"structure"` + + // The region in which the GetRecords request was received. + AwsRegion *string `locationName:"awsRegion" type:"string"` + + // The main body of the stream record, containing all of the DynamoDB-specific + // fields. + Dynamodb *StreamRecord `locationName:"dynamodb" type:"structure"` + + // A globally unique identifier for the event that was recorded in this stream + // record. + EventID *string `locationName:"eventID" type:"string"` + + // The type of data modification that was performed on the DynamoDB table: + // + // INSERT - a new item was added to the table. + // + // MODIFY - one or more of the item's attributes were updated. + // + // REMOVE - the item was deleted from the table + EventName *string `locationName:"eventName" type:"string" enum:"OperationType"` + + // The AWS service from which the stream record originated. For DynamoDB Streams, + // this is aws:dynamodb. + EventSource *string `locationName:"eventSource" type:"string"` + + // The version number of the stream record format. Currently, this is 1.0. + EventVersion *string `locationName:"eventVersion" type:"string"` +} + +// String returns the string representation +func (s Record) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Record) GoString() string { + return s.String() +} + +// The beginning and ending sequence numbers for the stream records contained +// within a shard. +type SequenceNumberRange struct { + _ struct{} `type:"structure"` + + // The last sequence number. + EndingSequenceNumber *string `min:"21" type:"string"` + + // The first sequence number. + StartingSequenceNumber *string `min:"21" type:"string"` +} + +// String returns the string representation +func (s SequenceNumberRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SequenceNumberRange) GoString() string { + return s.String() +} + +// A uniquely identified group of stream records within a stream. +type Shard struct { + _ struct{} `type:"structure"` + + // The shard ID of the current shard's parent. + ParentShardId *string `min:"28" type:"string"` + + // The range of possible sequence numbers for the shard. + SequenceNumberRange *SequenceNumberRange `type:"structure"` + + // The system-generated identifier for this shard. + ShardId *string `min:"28" type:"string"` +} + +// String returns the string representation +func (s Shard) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Shard) GoString() string { + return s.String() +} + +// Represents all of the data describing a particular stream. +type Stream struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the stream. + StreamArn *string `min:"37" type:"string"` + + // A timestamp, in ISO 8601 format, for this stream. + // + // Note that LatestStreamLabel is not a unique identifier for the stream, because + // it is possible that a stream from another table might have the same timestamp. + // However, the combination of the following three elements is guaranteed to + // be unique: + // + // the AWS customer ID. + // + // the table name + // + // the StreamLabel + StreamLabel *string `type:"string"` + + // The DynamoDB table with which the stream is associated. + TableName *string `min:"3" type:"string"` +} + +// String returns the string representation +func (s Stream) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Stream) GoString() string { + return s.String() +} + +// Represents all of the data describing a particular stream. +type StreamDescription struct { + _ struct{} `type:"structure"` + + // The date and time when the request to create this stream was issued. + CreationRequestDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The key attribute(s) of the stream's DynamoDB table. + KeySchema []*dynamodb.KeySchemaElement `min:"1" type:"list"` + + // The shard ID of the item where the operation stopped, inclusive of the previous + // result set. Use this value to start a new operation, excluding this value + // in the new request. + // + // If LastEvaluatedShardId is empty, then the "last page" of results has been + // processed and there is currently no more data to be retrieved. + // + // If LastEvaluatedShardId is not empty, it does not necessarily mean that + // there is more data in the result set. The only way to know when you have + // reached the end of the result set is when LastEvaluatedShardId is empty. + LastEvaluatedShardId *string `min:"28" type:"string"` + + // The shards that comprise the stream. + Shards []*Shard `type:"list"` + + // The Amazon Resource Name (ARN) for the stream. + StreamArn *string `min:"37" type:"string"` + + // A timestamp, in ISO 8601 format, for this stream. + // + // Note that LatestStreamLabel is not a unique identifier for the stream, because + // it is possible that a stream from another table might have the same timestamp. + // However, the combination of the following three elements is guaranteed to + // be unique: + // + // the AWS customer ID. + // + // the table name + // + // the StreamLabel + StreamLabel *string `type:"string"` + + // Indicates the current status of the stream: + // + // ENABLING - Streams is currently being enabled on the DynamoDB table. + // + // ENABLING - the stream is enabled. + // + // DISABLING - Streams is currently being disabled on the DynamoDB table. + // + // DISABLED - the stream is disabled. + StreamStatus *string `type:"string" enum:"StreamStatus"` + + // Indicates the format of the records within this stream: + // + // KEYS_ONLY - only the key attributes of items that were modified in the + // DynamoDB table. + // + // NEW_IMAGE - entire item from the table, as it appeared after they were modified. + // + // OLD_IMAGE - entire item from the table, as it appeared before they were + // modified. + // + // NEW_AND_OLD_IMAGES - both the new and the old images of the items from the + // table. + StreamViewType *string `type:"string" enum:"StreamViewType"` + + // The DynamoDB table with which the stream is associated. + TableName *string `min:"3" type:"string"` +} + +// String returns the string representation +func (s StreamDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StreamDescription) GoString() string { + return s.String() +} + +// A description of a single data modification that was performed on an item +// in a DynamoDB table. +type StreamRecord struct { + _ struct{} `type:"structure"` + + // The primary key attribute(s) for the DynamoDB item that was modified. + Keys map[string]*dynamodb.AttributeValue `type:"map"` + + // The item in the DynamoDB table as it appeared after it was modified. + NewImage map[string]*dynamodb.AttributeValue `type:"map"` + + // The item in the DynamoDB table as it appeared before it was modified. + OldImage map[string]*dynamodb.AttributeValue `type:"map"` + + // The sequence number of the stream record. + SequenceNumber *string `min:"21" type:"string"` + + // The size of the stream record, in bytes. + SizeBytes *int64 `min:"1" type:"long"` + + // The type of data from the modified DynamoDB item that was captured in this + // stream record: + // + // KEYS_ONLY - only the key attributes of the modified item. + // + // NEW_IMAGE - the entire item, as it appears after it was modified. + // + // OLD_IMAGE - the entire item, as it appeared before it was modified. + // + // NEW_AND_OLD_IMAGES — both the new and the old item images of the item. + StreamViewType *string `type:"string" enum:"StreamViewType"` +} + +// String returns the string representation +func (s StreamRecord) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StreamRecord) GoString() string { + return s.String() +} + +const ( + // @enum KeyType + KeyTypeHash = "HASH" + // @enum KeyType + KeyTypeRange = "RANGE" +) + +const ( + // @enum OperationType + OperationTypeInsert = "INSERT" + // @enum OperationType + OperationTypeModify = "MODIFY" + // @enum OperationType + OperationTypeRemove = "REMOVE" +) + +const ( + // @enum ShardIteratorType + ShardIteratorTypeTrimHorizon = "TRIM_HORIZON" + // @enum ShardIteratorType + ShardIteratorTypeLatest = "LATEST" + // @enum ShardIteratorType + ShardIteratorTypeAtSequenceNumber = "AT_SEQUENCE_NUMBER" + // @enum ShardIteratorType + ShardIteratorTypeAfterSequenceNumber = "AFTER_SEQUENCE_NUMBER" +) + +const ( + // @enum StreamStatus + StreamStatusEnabling = "ENABLING" + // @enum StreamStatus + StreamStatusEnabled = "ENABLED" + // @enum StreamStatus + StreamStatusDisabling = "DISABLING" + // @enum StreamStatus + StreamStatusDisabled = "DISABLED" +) + +const ( + // @enum StreamViewType + StreamViewTypeNewImage = "NEW_IMAGE" + // @enum StreamViewType + StreamViewTypeOldImage = "OLD_IMAGE" + // @enum StreamViewType + StreamViewTypeNewAndOldImages = "NEW_AND_OLD_IMAGES" + // @enum StreamViewType + StreamViewTypeKeysOnly = "KEYS_ONLY" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodbstreams/dynamodbstreamsiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodbstreams/dynamodbstreamsiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodbstreams/dynamodbstreamsiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodbstreams/dynamodbstreamsiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,30 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package dynamodbstreamsiface provides an interface for the Amazon DynamoDB Streams. +package dynamodbstreamsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/dynamodbstreams" +) + +// DynamoDBStreamsAPI is the interface type for dynamodbstreams.DynamoDBStreams. +type DynamoDBStreamsAPI interface { + DescribeStreamRequest(*dynamodbstreams.DescribeStreamInput) (*request.Request, *dynamodbstreams.DescribeStreamOutput) + + DescribeStream(*dynamodbstreams.DescribeStreamInput) (*dynamodbstreams.DescribeStreamOutput, error) + + GetRecordsRequest(*dynamodbstreams.GetRecordsInput) (*request.Request, *dynamodbstreams.GetRecordsOutput) + + GetRecords(*dynamodbstreams.GetRecordsInput) (*dynamodbstreams.GetRecordsOutput, error) + + GetShardIteratorRequest(*dynamodbstreams.GetShardIteratorInput) (*request.Request, *dynamodbstreams.GetShardIteratorOutput) + + GetShardIterator(*dynamodbstreams.GetShardIteratorInput) (*dynamodbstreams.GetShardIteratorOutput, error) + + ListStreamsRequest(*dynamodbstreams.ListStreamsInput) (*request.Request, *dynamodbstreams.ListStreamsOutput) + + ListStreams(*dynamodbstreams.ListStreamsInput) (*dynamodbstreams.ListStreamsOutput, error) +} + +var _ DynamoDBStreamsAPI = (*dynamodbstreams.DynamoDBStreams)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodbstreams/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodbstreams/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodbstreams/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodbstreams/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,100 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package dynamodbstreams_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/dynamodbstreams" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleDynamoDBStreams_DescribeStream() { + svc := dynamodbstreams.New(session.New()) + + params := &dynamodbstreams.DescribeStreamInput{ + StreamArn: aws.String("StreamArn"), // Required + ExclusiveStartShardId: aws.String("ShardId"), + Limit: aws.Int64(1), + } + resp, err := svc.DescribeStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDBStreams_GetRecords() { + svc := dynamodbstreams.New(session.New()) + + params := &dynamodbstreams.GetRecordsInput{ + ShardIterator: aws.String("ShardIterator"), // Required + Limit: aws.Int64(1), + } + resp, err := svc.GetRecords(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDBStreams_GetShardIterator() { + svc := dynamodbstreams.New(session.New()) + + params := &dynamodbstreams.GetShardIteratorInput{ + ShardId: aws.String("ShardId"), // Required + ShardIteratorType: aws.String("ShardIteratorType"), // Required + StreamArn: aws.String("StreamArn"), // Required + SequenceNumber: aws.String("SequenceNumber"), + } + resp, err := svc.GetShardIterator(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleDynamoDBStreams_ListStreams() { + svc := dynamodbstreams.New(session.New()) + + params := &dynamodbstreams.ListStreamsInput{ + ExclusiveStartStreamArn: aws.String("StreamArn"), + Limit: aws.Int64(1), + TableName: aws.String("TableName"), + } + resp, err := svc.ListStreams(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodbstreams/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodbstreams/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodbstreams/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/dynamodbstreams/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,112 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package dynamodbstreams + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// This is the Amazon DynamoDB Streams API Reference. This guide describes the +// low-level API actions for accessing streams and processing stream records. +// For information about application development with DynamoDB Streams, see +// the Amazon DynamoDB Developer Guide (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide//Streams.html). +// +// Note that this document is intended for use with the following DynamoDB +// documentation: +// +// Amazon DynamoDB Developer Guide (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/) +// +// Amazon DynamoDB API Reference (http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/) +// +// The following are short descriptions of each low-level DynamoDB Streams +// API action, organized by function. +// +// DescribeStream - Returns detailed information about a particular stream. +// +// GetRecords - Retrieves the stream records from within a shard. +// +// GetShardIterator - Returns information on how to retrieve the streams +// record from a shard with a given shard ID. +// +// ListStreams - Returns a list of all the streams associated with the current +// AWS account and endpoint. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type DynamoDBStreams struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "streams.dynamodb" + +// New creates a new instance of the DynamoDBStreams client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a DynamoDBStreams client from just a session. +// svc := dynamodbstreams.New(mySession) +// +// // Create a DynamoDBStreams client with additional configuration +// svc := dynamodbstreams.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *DynamoDBStreams { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *DynamoDBStreams { + svc := &DynamoDBStreams{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: "dynamodb", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2012-08-10", + JSONVersion: "1.0", + TargetPrefix: "DynamoDBStreams_20120810", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a DynamoDBStreams operation and runs any +// custom request initialization. +func (c *DynamoDBStreams) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ec2/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ec2/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ec2/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ec2/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,25097 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package ec2 provides a client for Amazon Elastic Compute Cloud. +package ec2 + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/ec2query" +) + +const opAcceptVpcPeeringConnection = "AcceptVpcPeeringConnection" + +// AcceptVpcPeeringConnectionRequest generates a request for the AcceptVpcPeeringConnection operation. +func (c *EC2) AcceptVpcPeeringConnectionRequest(input *AcceptVpcPeeringConnectionInput) (req *request.Request, output *AcceptVpcPeeringConnectionOutput) { + op := &request.Operation{ + Name: opAcceptVpcPeeringConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AcceptVpcPeeringConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &AcceptVpcPeeringConnectionOutput{} + req.Data = output + return +} + +// Accept a VPC peering connection request. To accept a request, the VPC peering +// connection must be in the pending-acceptance state, and you must be the owner +// of the peer VPC. Use the DescribeVpcPeeringConnections request to view your +// outstanding VPC peering connection requests. +func (c *EC2) AcceptVpcPeeringConnection(input *AcceptVpcPeeringConnectionInput) (*AcceptVpcPeeringConnectionOutput, error) { + req, out := c.AcceptVpcPeeringConnectionRequest(input) + err := req.Send() + return out, err +} + +const opAllocateAddress = "AllocateAddress" + +// AllocateAddressRequest generates a request for the AllocateAddress operation. +func (c *EC2) AllocateAddressRequest(input *AllocateAddressInput) (req *request.Request, output *AllocateAddressOutput) { + op := &request.Operation{ + Name: opAllocateAddress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AllocateAddressInput{} + } + + req = c.newRequest(op, input, output) + output = &AllocateAddressOutput{} + req.Data = output + return +} + +// Acquires an Elastic IP address. +// +// An Elastic IP address is for use either in the EC2-Classic platform or in +// a VPC. For more information, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) AllocateAddress(input *AllocateAddressInput) (*AllocateAddressOutput, error) { + req, out := c.AllocateAddressRequest(input) + err := req.Send() + return out, err +} + +const opAllocateHosts = "AllocateHosts" + +// AllocateHostsRequest generates a request for the AllocateHosts operation. +func (c *EC2) AllocateHostsRequest(input *AllocateHostsInput) (req *request.Request, output *AllocateHostsOutput) { + op := &request.Operation{ + Name: opAllocateHosts, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AllocateHostsInput{} + } + + req = c.newRequest(op, input, output) + output = &AllocateHostsOutput{} + req.Data = output + return +} + +// Allocates a Dedicated host to your account. At minimum you need to specify +// the instance size type, Availability Zone, and quantity of hosts you want +// to allocate. +func (c *EC2) AllocateHosts(input *AllocateHostsInput) (*AllocateHostsOutput, error) { + req, out := c.AllocateHostsRequest(input) + err := req.Send() + return out, err +} + +const opAssignPrivateIpAddresses = "AssignPrivateIpAddresses" + +// AssignPrivateIpAddressesRequest generates a request for the AssignPrivateIpAddresses operation. +func (c *EC2) AssignPrivateIpAddressesRequest(input *AssignPrivateIpAddressesInput) (req *request.Request, output *AssignPrivateIpAddressesOutput) { + op := &request.Operation{ + Name: opAssignPrivateIpAddresses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssignPrivateIpAddressesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AssignPrivateIpAddressesOutput{} + req.Data = output + return +} + +// Assigns one or more secondary private IP addresses to the specified network +// interface. You can specify one or more specific secondary IP addresses, or +// you can specify the number of secondary IP addresses to be automatically +// assigned within the subnet's CIDR block range. The number of secondary IP +// addresses that you can assign to an instance varies by instance type. For +// information about instance types, see Instance Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) +// in the Amazon Elastic Compute Cloud User Guide. For more information about +// Elastic IP addresses, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// AssignPrivateIpAddresses is available only in EC2-VPC. +func (c *EC2) AssignPrivateIpAddresses(input *AssignPrivateIpAddressesInput) (*AssignPrivateIpAddressesOutput, error) { + req, out := c.AssignPrivateIpAddressesRequest(input) + err := req.Send() + return out, err +} + +const opAssociateAddress = "AssociateAddress" + +// AssociateAddressRequest generates a request for the AssociateAddress operation. +func (c *EC2) AssociateAddressRequest(input *AssociateAddressInput) (req *request.Request, output *AssociateAddressOutput) { + op := &request.Operation{ + Name: opAssociateAddress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateAddressInput{} + } + + req = c.newRequest(op, input, output) + output = &AssociateAddressOutput{} + req.Data = output + return +} + +// Associates an Elastic IP address with an instance or a network interface. +// +// An Elastic IP address is for use in either the EC2-Classic platform or in +// a VPC. For more information, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// [EC2-Classic, VPC in an EC2-VPC-only account] If the Elastic IP address +// is already associated with a different instance, it is disassociated from +// that instance and associated with the specified instance. +// +// [VPC in an EC2-Classic account] If you don't specify a private IP address, +// the Elastic IP address is associated with the primary IP address. If the +// Elastic IP address is already associated with a different instance or a network +// interface, you get an error unless you allow reassociation. +// +// This is an idempotent operation. If you perform the operation more than +// once, Amazon EC2 doesn't return an error. +func (c *EC2) AssociateAddress(input *AssociateAddressInput) (*AssociateAddressOutput, error) { + req, out := c.AssociateAddressRequest(input) + err := req.Send() + return out, err +} + +const opAssociateDhcpOptions = "AssociateDhcpOptions" + +// AssociateDhcpOptionsRequest generates a request for the AssociateDhcpOptions operation. +func (c *EC2) AssociateDhcpOptionsRequest(input *AssociateDhcpOptionsInput) (req *request.Request, output *AssociateDhcpOptionsOutput) { + op := &request.Operation{ + Name: opAssociateDhcpOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateDhcpOptionsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AssociateDhcpOptionsOutput{} + req.Data = output + return +} + +// Associates a set of DHCP options (that you've previously created) with the +// specified VPC, or associates no DHCP options with the VPC. +// +// After you associate the options with the VPC, any existing instances and +// all new instances that you launch in that VPC use the options. You don't +// need to restart or relaunch the instances. They automatically pick up the +// changes within a few hours, depending on how frequently the instance renews +// its DHCP lease. You can explicitly renew the lease using the operating system +// on the instance. +// +// For more information, see DHCP Options Sets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) AssociateDhcpOptions(input *AssociateDhcpOptionsInput) (*AssociateDhcpOptionsOutput, error) { + req, out := c.AssociateDhcpOptionsRequest(input) + err := req.Send() + return out, err +} + +const opAssociateRouteTable = "AssociateRouteTable" + +// AssociateRouteTableRequest generates a request for the AssociateRouteTable operation. +func (c *EC2) AssociateRouteTableRequest(input *AssociateRouteTableInput) (req *request.Request, output *AssociateRouteTableOutput) { + op := &request.Operation{ + Name: opAssociateRouteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateRouteTableInput{} + } + + req = c.newRequest(op, input, output) + output = &AssociateRouteTableOutput{} + req.Data = output + return +} + +// Associates a subnet with a route table. The subnet and route table must be +// in the same VPC. This association causes traffic originating from the subnet +// to be routed according to the routes in the route table. The action returns +// an association ID, which you need in order to disassociate the route table +// from the subnet later. A route table can be associated with multiple subnets. +// +// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) AssociateRouteTable(input *AssociateRouteTableInput) (*AssociateRouteTableOutput, error) { + req, out := c.AssociateRouteTableRequest(input) + err := req.Send() + return out, err +} + +const opAttachClassicLinkVpc = "AttachClassicLinkVpc" + +// AttachClassicLinkVpcRequest generates a request for the AttachClassicLinkVpc operation. +func (c *EC2) AttachClassicLinkVpcRequest(input *AttachClassicLinkVpcInput) (req *request.Request, output *AttachClassicLinkVpcOutput) { + op := &request.Operation{ + Name: opAttachClassicLinkVpc, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachClassicLinkVpcInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachClassicLinkVpcOutput{} + req.Data = output + return +} + +// Links an EC2-Classic instance to a ClassicLink-enabled VPC through one or +// more of the VPC's security groups. You cannot link an EC2-Classic instance +// to more than one VPC at a time. You can only link an instance that's in the +// running state. An instance is automatically unlinked from a VPC when it's +// stopped - you can link it to the VPC again when you restart it. +// +// After you've linked an instance, you cannot change the VPC security groups +// that are associated with it. To change the security groups, you must first +// unlink the instance, and then link it again. +// +// Linking your instance to a VPC is sometimes referred to as attaching your +// instance. +func (c *EC2) AttachClassicLinkVpc(input *AttachClassicLinkVpcInput) (*AttachClassicLinkVpcOutput, error) { + req, out := c.AttachClassicLinkVpcRequest(input) + err := req.Send() + return out, err +} + +const opAttachInternetGateway = "AttachInternetGateway" + +// AttachInternetGatewayRequest generates a request for the AttachInternetGateway operation. +func (c *EC2) AttachInternetGatewayRequest(input *AttachInternetGatewayInput) (req *request.Request, output *AttachInternetGatewayOutput) { + op := &request.Operation{ + Name: opAttachInternetGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachInternetGatewayInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AttachInternetGatewayOutput{} + req.Data = output + return +} + +// Attaches an Internet gateway to a VPC, enabling connectivity between the +// Internet and the VPC. For more information about your VPC and Internet gateway, +// see the Amazon Virtual Private Cloud User Guide (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/). +func (c *EC2) AttachInternetGateway(input *AttachInternetGatewayInput) (*AttachInternetGatewayOutput, error) { + req, out := c.AttachInternetGatewayRequest(input) + err := req.Send() + return out, err +} + +const opAttachNetworkInterface = "AttachNetworkInterface" + +// AttachNetworkInterfaceRequest generates a request for the AttachNetworkInterface operation. +func (c *EC2) AttachNetworkInterfaceRequest(input *AttachNetworkInterfaceInput) (req *request.Request, output *AttachNetworkInterfaceOutput) { + op := &request.Operation{ + Name: opAttachNetworkInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachNetworkInterfaceInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachNetworkInterfaceOutput{} + req.Data = output + return +} + +// Attaches a network interface to an instance. +func (c *EC2) AttachNetworkInterface(input *AttachNetworkInterfaceInput) (*AttachNetworkInterfaceOutput, error) { + req, out := c.AttachNetworkInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opAttachVolume = "AttachVolume" + +// AttachVolumeRequest generates a request for the AttachVolume operation. +func (c *EC2) AttachVolumeRequest(input *AttachVolumeInput) (req *request.Request, output *VolumeAttachment) { + op := &request.Operation{ + Name: opAttachVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &VolumeAttachment{} + req.Data = output + return +} + +// Attaches an EBS volume to a running or stopped instance and exposes it to +// the instance with the specified device name. +// +// Encrypted EBS volumes may only be attached to instances that support Amazon +// EBS encryption. For more information, see Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For a list of supported device names, see Attaching an EBS Volume to an +// Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-attaching-volume.html). +// Any device names that aren't reserved for instance store volumes can be used +// for EBS volumes. For more information, see Amazon EC2 Instance Store (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// If a volume has an AWS Marketplace product code: +// +// The volume can be attached only to a stopped instance. AWS Marketplace +// product codes are copied from the volume to the instance. You must be subscribed +// to the product. The instance type and operating system of the instance must +// support the product. For example, you can't detach a volume from a Windows +// instance and attach it to a Linux instance. For an overview of the AWS Marketplace, +// see Introducing AWS Marketplace (https://aws.amazon.com/marketplace/help/200900000). +// +// For more information about EBS volumes, see Attaching Amazon EBS Volumes +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-attaching-volume.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) AttachVolume(input *AttachVolumeInput) (*VolumeAttachment, error) { + req, out := c.AttachVolumeRequest(input) + err := req.Send() + return out, err +} + +const opAttachVpnGateway = "AttachVpnGateway" + +// AttachVpnGatewayRequest generates a request for the AttachVpnGateway operation. +func (c *EC2) AttachVpnGatewayRequest(input *AttachVpnGatewayInput) (req *request.Request, output *AttachVpnGatewayOutput) { + op := &request.Operation{ + Name: opAttachVpnGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachVpnGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachVpnGatewayOutput{} + req.Data = output + return +} + +// Attaches a virtual private gateway to a VPC. For more information, see Adding +// a Hardware Virtual Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) AttachVpnGateway(input *AttachVpnGatewayInput) (*AttachVpnGatewayOutput, error) { + req, out := c.AttachVpnGatewayRequest(input) + err := req.Send() + return out, err +} + +const opAuthorizeSecurityGroupEgress = "AuthorizeSecurityGroupEgress" + +// AuthorizeSecurityGroupEgressRequest generates a request for the AuthorizeSecurityGroupEgress operation. +func (c *EC2) AuthorizeSecurityGroupEgressRequest(input *AuthorizeSecurityGroupEgressInput) (req *request.Request, output *AuthorizeSecurityGroupEgressOutput) { + op := &request.Operation{ + Name: opAuthorizeSecurityGroupEgress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AuthorizeSecurityGroupEgressInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AuthorizeSecurityGroupEgressOutput{} + req.Data = output + return +} + +// [EC2-VPC only] Adds one or more egress rules to a security group for use +// with a VPC. Specifically, this action permits instances to send traffic to +// one or more destination CIDR IP address ranges, or to one or more destination +// security groups for the same VPC. This action doesn't apply to security groups +// for use in EC2-Classic. For more information, see Security Groups for Your +// VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// You can have up to 50 rules per security group (covering both ingress and +// egress rules). +// +// Each rule consists of the protocol (for example, TCP), plus either a CIDR +// range or a source group. For the TCP and UDP protocols, you must also specify +// the destination port or port range. For the ICMP protocol, you must also +// specify the ICMP type and code. You can use -1 for the type or code to mean +// all types or all codes. +// +// Rule changes are propagated to affected instances as quickly as possible. +// However, a small delay might occur. +func (c *EC2) AuthorizeSecurityGroupEgress(input *AuthorizeSecurityGroupEgressInput) (*AuthorizeSecurityGroupEgressOutput, error) { + req, out := c.AuthorizeSecurityGroupEgressRequest(input) + err := req.Send() + return out, err +} + +const opAuthorizeSecurityGroupIngress = "AuthorizeSecurityGroupIngress" + +// AuthorizeSecurityGroupIngressRequest generates a request for the AuthorizeSecurityGroupIngress operation. +func (c *EC2) AuthorizeSecurityGroupIngressRequest(input *AuthorizeSecurityGroupIngressInput) (req *request.Request, output *AuthorizeSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opAuthorizeSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AuthorizeSecurityGroupIngressInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AuthorizeSecurityGroupIngressOutput{} + req.Data = output + return +} + +// Adds one or more ingress rules to a security group. +// +// EC2-Classic: You can have up to 100 rules per group. +// +// EC2-VPC: You can have up to 50 rules per group (covering both ingress and +// egress rules). +// +// Rule changes are propagated to instances within the security group as quickly +// as possible. However, a small delay might occur. +// +// [EC2-Classic] This action gives one or more CIDR IP address ranges permission +// to access a security group in your account, or gives one or more security +// groups (called the source groups) permission to access a security group for +// your account. A source group can be for your own AWS account, or another. +// +// [EC2-VPC] This action gives one or more CIDR IP address ranges permission +// to access a security group in your VPC, or gives one or more other security +// groups (called the source groups) permission to access a security group for +// your VPC. The security groups must all be for the same VPC. +func (c *EC2) AuthorizeSecurityGroupIngress(input *AuthorizeSecurityGroupIngressInput) (*AuthorizeSecurityGroupIngressOutput, error) { + req, out := c.AuthorizeSecurityGroupIngressRequest(input) + err := req.Send() + return out, err +} + +const opBundleInstance = "BundleInstance" + +// BundleInstanceRequest generates a request for the BundleInstance operation. +func (c *EC2) BundleInstanceRequest(input *BundleInstanceInput) (req *request.Request, output *BundleInstanceOutput) { + op := &request.Operation{ + Name: opBundleInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BundleInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &BundleInstanceOutput{} + req.Data = output + return +} + +// Bundles an Amazon instance store-backed Windows instance. +// +// During bundling, only the root device volume (C:\) is bundled. Data on other +// instance store volumes is not preserved. +// +// This action is not applicable for Linux/Unix instances or Windows instances +// that are backed by Amazon EBS. +// +// For more information, see Creating an Instance Store-Backed Windows AMI +// (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/Creating_InstanceStoreBacked_WinAMI.html). +func (c *EC2) BundleInstance(input *BundleInstanceInput) (*BundleInstanceOutput, error) { + req, out := c.BundleInstanceRequest(input) + err := req.Send() + return out, err +} + +const opCancelBundleTask = "CancelBundleTask" + +// CancelBundleTaskRequest generates a request for the CancelBundleTask operation. +func (c *EC2) CancelBundleTaskRequest(input *CancelBundleTaskInput) (req *request.Request, output *CancelBundleTaskOutput) { + op := &request.Operation{ + Name: opCancelBundleTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelBundleTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelBundleTaskOutput{} + req.Data = output + return +} + +// Cancels a bundling operation for an instance store-backed Windows instance. +func (c *EC2) CancelBundleTask(input *CancelBundleTaskInput) (*CancelBundleTaskOutput, error) { + req, out := c.CancelBundleTaskRequest(input) + err := req.Send() + return out, err +} + +const opCancelConversionTask = "CancelConversionTask" + +// CancelConversionTaskRequest generates a request for the CancelConversionTask operation. +func (c *EC2) CancelConversionTaskRequest(input *CancelConversionTaskInput) (req *request.Request, output *CancelConversionTaskOutput) { + op := &request.Operation{ + Name: opCancelConversionTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelConversionTaskInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CancelConversionTaskOutput{} + req.Data = output + return +} + +// Cancels an active conversion task. The task can be the import of an instance +// or volume. The action removes all artifacts of the conversion, including +// a partially uploaded volume or instance. If the conversion is complete or +// is in the process of transferring the final disk image, the command fails +// and returns an exception. +// +// For more information, see Using the Command Line Tools to Import Your Virtual +// Machine to Amazon EC2 (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UploadingYourInstancesandVolumes.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CancelConversionTask(input *CancelConversionTaskInput) (*CancelConversionTaskOutput, error) { + req, out := c.CancelConversionTaskRequest(input) + err := req.Send() + return out, err +} + +const opCancelExportTask = "CancelExportTask" + +// CancelExportTaskRequest generates a request for the CancelExportTask operation. +func (c *EC2) CancelExportTaskRequest(input *CancelExportTaskInput) (req *request.Request, output *CancelExportTaskOutput) { + op := &request.Operation{ + Name: opCancelExportTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelExportTaskInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CancelExportTaskOutput{} + req.Data = output + return +} + +// Cancels an active export task. The request removes all artifacts of the export, +// including any partially-created Amazon S3 objects. If the export task is +// complete or is in the process of transferring the final disk image, the command +// fails and returns an error. +func (c *EC2) CancelExportTask(input *CancelExportTaskInput) (*CancelExportTaskOutput, error) { + req, out := c.CancelExportTaskRequest(input) + err := req.Send() + return out, err +} + +const opCancelImportTask = "CancelImportTask" + +// CancelImportTaskRequest generates a request for the CancelImportTask operation. +func (c *EC2) CancelImportTaskRequest(input *CancelImportTaskInput) (req *request.Request, output *CancelImportTaskOutput) { + op := &request.Operation{ + Name: opCancelImportTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelImportTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelImportTaskOutput{} + req.Data = output + return +} + +// Cancels an in-process import virtual machine or import snapshot task. +func (c *EC2) CancelImportTask(input *CancelImportTaskInput) (*CancelImportTaskOutput, error) { + req, out := c.CancelImportTaskRequest(input) + err := req.Send() + return out, err +} + +const opCancelReservedInstancesListing = "CancelReservedInstancesListing" + +// CancelReservedInstancesListingRequest generates a request for the CancelReservedInstancesListing operation. +func (c *EC2) CancelReservedInstancesListingRequest(input *CancelReservedInstancesListingInput) (req *request.Request, output *CancelReservedInstancesListingOutput) { + op := &request.Operation{ + Name: opCancelReservedInstancesListing, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelReservedInstancesListingInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelReservedInstancesListingOutput{} + req.Data = output + return +} + +// Cancels the specified Reserved Instance listing in the Reserved Instance +// Marketplace. +// +// For more information, see Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CancelReservedInstancesListing(input *CancelReservedInstancesListingInput) (*CancelReservedInstancesListingOutput, error) { + req, out := c.CancelReservedInstancesListingRequest(input) + err := req.Send() + return out, err +} + +const opCancelSpotFleetRequests = "CancelSpotFleetRequests" + +// CancelSpotFleetRequestsRequest generates a request for the CancelSpotFleetRequests operation. +func (c *EC2) CancelSpotFleetRequestsRequest(input *CancelSpotFleetRequestsInput) (req *request.Request, output *CancelSpotFleetRequestsOutput) { + op := &request.Operation{ + Name: opCancelSpotFleetRequests, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelSpotFleetRequestsInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelSpotFleetRequestsOutput{} + req.Data = output + return +} + +// Cancels the specified Spot fleet requests. +// +// After you cancel a Spot fleet request, the Spot fleet launches no new Spot +// instances. You must specify whether the Spot fleet should also terminate +// its Spot instances. If you terminate the instances, the Spot fleet request +// enters the cancelled_terminating state. Otherwise, the Spot fleet request +// enters the cancelled_running state and the instances continue to run until +// they are interrupted or you terminate them manually. +func (c *EC2) CancelSpotFleetRequests(input *CancelSpotFleetRequestsInput) (*CancelSpotFleetRequestsOutput, error) { + req, out := c.CancelSpotFleetRequestsRequest(input) + err := req.Send() + return out, err +} + +const opCancelSpotInstanceRequests = "CancelSpotInstanceRequests" + +// CancelSpotInstanceRequestsRequest generates a request for the CancelSpotInstanceRequests operation. +func (c *EC2) CancelSpotInstanceRequestsRequest(input *CancelSpotInstanceRequestsInput) (req *request.Request, output *CancelSpotInstanceRequestsOutput) { + op := &request.Operation{ + Name: opCancelSpotInstanceRequests, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelSpotInstanceRequestsInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelSpotInstanceRequestsOutput{} + req.Data = output + return +} + +// Cancels one or more Spot instance requests. Spot instances are instances +// that Amazon EC2 starts on your behalf when the bid price that you specify +// exceeds the current Spot price. Amazon EC2 periodically sets the Spot price +// based on available Spot instance capacity and current Spot instance requests. +// For more information, see Spot Instance Requests (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Canceling a Spot instance request does not terminate running Spot instances +// associated with the request. +func (c *EC2) CancelSpotInstanceRequests(input *CancelSpotInstanceRequestsInput) (*CancelSpotInstanceRequestsOutput, error) { + req, out := c.CancelSpotInstanceRequestsRequest(input) + err := req.Send() + return out, err +} + +const opConfirmProductInstance = "ConfirmProductInstance" + +// ConfirmProductInstanceRequest generates a request for the ConfirmProductInstance operation. +func (c *EC2) ConfirmProductInstanceRequest(input *ConfirmProductInstanceInput) (req *request.Request, output *ConfirmProductInstanceOutput) { + op := &request.Operation{ + Name: opConfirmProductInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ConfirmProductInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &ConfirmProductInstanceOutput{} + req.Data = output + return +} + +// Determines whether a product code is associated with an instance. This action +// can only be used by the owner of the product code. It is useful when a product +// code owner needs to verify whether another user's instance is eligible for +// support. +func (c *EC2) ConfirmProductInstance(input *ConfirmProductInstanceInput) (*ConfirmProductInstanceOutput, error) { + req, out := c.ConfirmProductInstanceRequest(input) + err := req.Send() + return out, err +} + +const opCopyImage = "CopyImage" + +// CopyImageRequest generates a request for the CopyImage operation. +func (c *EC2) CopyImageRequest(input *CopyImageInput) (req *request.Request, output *CopyImageOutput) { + op := &request.Operation{ + Name: opCopyImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopyImageInput{} + } + + req = c.newRequest(op, input, output) + output = &CopyImageOutput{} + req.Data = output + return +} + +// Initiates the copy of an AMI from the specified source region to the current +// region. You specify the destination region by using its endpoint when making +// the request. +// +// For more information, see Copying AMIs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/CopyingAMIs.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CopyImage(input *CopyImageInput) (*CopyImageOutput, error) { + req, out := c.CopyImageRequest(input) + err := req.Send() + return out, err +} + +const opCopySnapshot = "CopySnapshot" + +// CopySnapshotRequest generates a request for the CopySnapshot operation. +func (c *EC2) CopySnapshotRequest(input *CopySnapshotInput) (req *request.Request, output *CopySnapshotOutput) { + op := &request.Operation{ + Name: opCopySnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopySnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CopySnapshotOutput{} + req.Data = output + return +} + +// Copies a point-in-time snapshot of an EBS volume and stores it in Amazon +// S3. You can copy the snapshot within the same region or from one region to +// another. You can use the snapshot to create EBS volumes or Amazon Machine +// Images (AMIs). The snapshot is copied to the regional endpoint that you send +// the HTTP request to. +// +// Copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted +// snapshots remain unencrypted, unless the Encrypted flag is specified during +// the snapshot copy operation. By default, encrypted snapshot copies use the +// default AWS Key Management Service (AWS KMS) customer master key (CMK); however, +// you can specify a non-default CMK with the KmsKeyId parameter. +// +// For more information, see Copying an Amazon EBS Snapshot (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-copy-snapshot.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CopySnapshot(input *CopySnapshotInput) (*CopySnapshotOutput, error) { + req, out := c.CopySnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCreateCustomerGateway = "CreateCustomerGateway" + +// CreateCustomerGatewayRequest generates a request for the CreateCustomerGateway operation. +func (c *EC2) CreateCustomerGatewayRequest(input *CreateCustomerGatewayInput) (req *request.Request, output *CreateCustomerGatewayOutput) { + op := &request.Operation{ + Name: opCreateCustomerGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCustomerGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateCustomerGatewayOutput{} + req.Data = output + return +} + +// Provides information to AWS about your VPN customer gateway device. The customer +// gateway is the appliance at your end of the VPN connection. (The device on +// the AWS side of the VPN connection is the virtual private gateway.) You must +// provide the Internet-routable IP address of the customer gateway's external +// interface. The IP address must be static and may be behind a device performing +// network address translation (NAT). +// +// For devices that use Border Gateway Protocol (BGP), you can also provide +// the device's BGP Autonomous System Number (ASN). You can use an existing +// ASN assigned to your network. If you don't have an ASN already, you can use +// a private ASN (in the 64512 - 65534 range). +// +// Amazon EC2 supports all 2-byte ASN numbers in the range of 1 - 65534, with +// the exception of 7224, which is reserved in the us-east-1 region, and 9059, +// which is reserved in the eu-west-1 region. +// +// For more information about VPN customer gateways, see Adding a Hardware +// Virtual Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// You cannot create more than one customer gateway with the same VPN type, +// IP address, and BGP ASN parameter values. If you run an identical request +// more than one time, the first request creates the customer gateway, and subsequent +// requests return information about the existing customer gateway. The subsequent +// requests do not create new customer gateway resources. +func (c *EC2) CreateCustomerGateway(input *CreateCustomerGatewayInput) (*CreateCustomerGatewayOutput, error) { + req, out := c.CreateCustomerGatewayRequest(input) + err := req.Send() + return out, err +} + +const opCreateDhcpOptions = "CreateDhcpOptions" + +// CreateDhcpOptionsRequest generates a request for the CreateDhcpOptions operation. +func (c *EC2) CreateDhcpOptionsRequest(input *CreateDhcpOptionsInput) (req *request.Request, output *CreateDhcpOptionsOutput) { + op := &request.Operation{ + Name: opCreateDhcpOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDhcpOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDhcpOptionsOutput{} + req.Data = output + return +} + +// Creates a set of DHCP options for your VPC. After creating the set, you must +// associate it with the VPC, causing all existing and new instances that you +// launch in the VPC to use this set of DHCP options. The following are the +// individual DHCP options you can specify. For more information about the options, +// see RFC 2132 (http://www.ietf.org/rfc/rfc2132.txt). +// +// domain-name-servers - The IP addresses of up to four domain name servers, +// or AmazonProvidedDNS. The default DHCP option set specifies AmazonProvidedDNS. +// If specifying more than one domain name server, specify the IP addresses +// in a single parameter, separated by commas. domain-name - If you're using +// AmazonProvidedDNS in us-east-1, specify ec2.internal. If you're using AmazonProvidedDNS +// in another region, specify region.compute.internal (for example, ap-northeast-1.compute.internal). +// Otherwise, specify a domain name (for example, MyCompany.com). Important: +// Some Linux operating systems accept multiple domain names separated by spaces. +// However, Windows and other Linux operating systems treat the value as a single +// domain, which results in unexpected behavior. If your DHCP options set is +// associated with a VPC that has instances with multiple operating systems, +// specify only one domain name. ntp-servers - The IP addresses of up to four +// Network Time Protocol (NTP) servers. netbios-name-servers - The IP addresses +// of up to four NetBIOS name servers. netbios-node-type - The NetBIOS node +// type (1, 2, 4, or 8). We recommend that you specify 2 (broadcast and multicast +// are not currently supported). For more information about these node types, +// see RFC 2132 (http://www.ietf.org/rfc/rfc2132.txt). Your VPC automatically +// starts out with a set of DHCP options that includes only a DNS server that +// we provide (AmazonProvidedDNS). If you create a set of options, and if your +// VPC has an Internet gateway, make sure to set the domain-name-servers option +// either to AmazonProvidedDNS or to a domain name server of your choice. For +// more information about DHCP options, see DHCP Options Sets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateDhcpOptions(input *CreateDhcpOptionsInput) (*CreateDhcpOptionsOutput, error) { + req, out := c.CreateDhcpOptionsRequest(input) + err := req.Send() + return out, err +} + +const opCreateFlowLogs = "CreateFlowLogs" + +// CreateFlowLogsRequest generates a request for the CreateFlowLogs operation. +func (c *EC2) CreateFlowLogsRequest(input *CreateFlowLogsInput) (req *request.Request, output *CreateFlowLogsOutput) { + op := &request.Operation{ + Name: opCreateFlowLogs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateFlowLogsInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateFlowLogsOutput{} + req.Data = output + return +} + +// Creates one or more flow logs to capture IP traffic for a specific network +// interface, subnet, or VPC. Flow logs are delivered to a specified log group +// in Amazon CloudWatch Logs. If you specify a VPC or subnet in the request, +// a log stream is created in CloudWatch Logs for each network interface in +// the subnet or VPC. Log streams can include information about accepted and +// rejected traffic to a network interface. You can view the data in your log +// streams using Amazon CloudWatch Logs. +// +// In your request, you must also specify an IAM role that has permission to +// publish logs to CloudWatch Logs. +func (c *EC2) CreateFlowLogs(input *CreateFlowLogsInput) (*CreateFlowLogsOutput, error) { + req, out := c.CreateFlowLogsRequest(input) + err := req.Send() + return out, err +} + +const opCreateImage = "CreateImage" + +// CreateImageRequest generates a request for the CreateImage operation. +func (c *EC2) CreateImageRequest(input *CreateImageInput) (req *request.Request, output *CreateImageOutput) { + op := &request.Operation{ + Name: opCreateImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateImageInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateImageOutput{} + req.Data = output + return +} + +// Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance that +// is either running or stopped. +// +// If you customized your instance with instance store volumes or EBS volumes +// in addition to the root device volume, the new AMI contains block device +// mapping information for those volumes. When you launch an instance from this +// new AMI, the instance automatically launches with those additional volumes. +// +// For more information, see Creating Amazon EBS-Backed Linux AMIs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami-ebs.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateImage(input *CreateImageInput) (*CreateImageOutput, error) { + req, out := c.CreateImageRequest(input) + err := req.Send() + return out, err +} + +const opCreateInstanceExportTask = "CreateInstanceExportTask" + +// CreateInstanceExportTaskRequest generates a request for the CreateInstanceExportTask operation. +func (c *EC2) CreateInstanceExportTaskRequest(input *CreateInstanceExportTaskInput) (req *request.Request, output *CreateInstanceExportTaskOutput) { + op := &request.Operation{ + Name: opCreateInstanceExportTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateInstanceExportTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateInstanceExportTaskOutput{} + req.Data = output + return +} + +// Exports a running or stopped instance to an S3 bucket. +// +// For information about the supported operating systems, image formats, and +// known limitations for the types of instances you can export, see Exporting +// EC2 Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ExportingEC2Instances.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateInstanceExportTask(input *CreateInstanceExportTaskInput) (*CreateInstanceExportTaskOutput, error) { + req, out := c.CreateInstanceExportTaskRequest(input) + err := req.Send() + return out, err +} + +const opCreateInternetGateway = "CreateInternetGateway" + +// CreateInternetGatewayRequest generates a request for the CreateInternetGateway operation. +func (c *EC2) CreateInternetGatewayRequest(input *CreateInternetGatewayInput) (req *request.Request, output *CreateInternetGatewayOutput) { + op := &request.Operation{ + Name: opCreateInternetGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateInternetGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateInternetGatewayOutput{} + req.Data = output + return +} + +// Creates an Internet gateway for use with a VPC. After creating the Internet +// gateway, you attach it to a VPC using AttachInternetGateway. +// +// For more information about your VPC and Internet gateway, see the Amazon +// Virtual Private Cloud User Guide (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/). +func (c *EC2) CreateInternetGateway(input *CreateInternetGatewayInput) (*CreateInternetGatewayOutput, error) { + req, out := c.CreateInternetGatewayRequest(input) + err := req.Send() + return out, err +} + +const opCreateKeyPair = "CreateKeyPair" + +// CreateKeyPairRequest generates a request for the CreateKeyPair operation. +func (c *EC2) CreateKeyPairRequest(input *CreateKeyPairInput) (req *request.Request, output *CreateKeyPairOutput) { + op := &request.Operation{ + Name: opCreateKeyPair, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateKeyPairInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateKeyPairOutput{} + req.Data = output + return +} + +// Creates a 2048-bit RSA key pair with the specified name. Amazon EC2 stores +// the public key and displays the private key for you to save to a file. The +// private key is returned as an unencrypted PEM encoded PKCS#8 private key. +// If a key with the specified name already exists, Amazon EC2 returns an error. +// +// You can have up to five thousand key pairs per region. +// +// The key pair returned to you is available only in the region in which you +// create it. To create a key pair that is available in all regions, use ImportKeyPair. +// +// For more information about key pairs, see Key Pairs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateKeyPair(input *CreateKeyPairInput) (*CreateKeyPairOutput, error) { + req, out := c.CreateKeyPairRequest(input) + err := req.Send() + return out, err +} + +const opCreateNatGateway = "CreateNatGateway" + +// CreateNatGatewayRequest generates a request for the CreateNatGateway operation. +func (c *EC2) CreateNatGatewayRequest(input *CreateNatGatewayInput) (req *request.Request, output *CreateNatGatewayOutput) { + op := &request.Operation{ + Name: opCreateNatGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateNatGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateNatGatewayOutput{} + req.Data = output + return +} + +// Creates a NAT gateway in the specified subnet. A NAT gateway can be used +// to enable instances in a private subnet to connect to the Internet. This +// action creates a network interface in the specified subnet with a private +// IP address from the IP address range of the subnet. For more information, +// see NAT Gateways (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateNatGateway(input *CreateNatGatewayInput) (*CreateNatGatewayOutput, error) { + req, out := c.CreateNatGatewayRequest(input) + err := req.Send() + return out, err +} + +const opCreateNetworkAcl = "CreateNetworkAcl" + +// CreateNetworkAclRequest generates a request for the CreateNetworkAcl operation. +func (c *EC2) CreateNetworkAclRequest(input *CreateNetworkAclInput) (req *request.Request, output *CreateNetworkAclOutput) { + op := &request.Operation{ + Name: opCreateNetworkAcl, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateNetworkAclInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateNetworkAclOutput{} + req.Data = output + return +} + +// Creates a network ACL in a VPC. Network ACLs provide an optional layer of +// security (in addition to security groups) for the instances in your VPC. +// +// For more information about network ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateNetworkAcl(input *CreateNetworkAclInput) (*CreateNetworkAclOutput, error) { + req, out := c.CreateNetworkAclRequest(input) + err := req.Send() + return out, err +} + +const opCreateNetworkAclEntry = "CreateNetworkAclEntry" + +// CreateNetworkAclEntryRequest generates a request for the CreateNetworkAclEntry operation. +func (c *EC2) CreateNetworkAclEntryRequest(input *CreateNetworkAclEntryInput) (req *request.Request, output *CreateNetworkAclEntryOutput) { + op := &request.Operation{ + Name: opCreateNetworkAclEntry, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateNetworkAclEntryInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateNetworkAclEntryOutput{} + req.Data = output + return +} + +// Creates an entry (a rule) in a network ACL with the specified rule number. +// Each network ACL has a set of numbered ingress rules and a separate set of +// numbered egress rules. When determining whether a packet should be allowed +// in or out of a subnet associated with the ACL, we process the entries in +// the ACL according to the rule numbers, in ascending order. Each network ACL +// has a set of ingress rules and a separate set of egress rules. +// +// We recommend that you leave room between the rule numbers (for example, +// 100, 110, 120, ...), and not number them one right after the other (for example, +// 101, 102, 103, ...). This makes it easier to add a rule between existing +// ones without having to renumber the rules. +// +// After you add an entry, you can't modify it; you must either replace it, +// or create an entry and delete the old one. +// +// For more information about network ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateNetworkAclEntry(input *CreateNetworkAclEntryInput) (*CreateNetworkAclEntryOutput, error) { + req, out := c.CreateNetworkAclEntryRequest(input) + err := req.Send() + return out, err +} + +const opCreateNetworkInterface = "CreateNetworkInterface" + +// CreateNetworkInterfaceRequest generates a request for the CreateNetworkInterface operation. +func (c *EC2) CreateNetworkInterfaceRequest(input *CreateNetworkInterfaceInput) (req *request.Request, output *CreateNetworkInterfaceOutput) { + op := &request.Operation{ + Name: opCreateNetworkInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateNetworkInterfaceInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateNetworkInterfaceOutput{} + req.Data = output + return +} + +// Creates a network interface in the specified subnet. +// +// For more information about network interfaces, see Elastic Network Interfaces +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html) in the +// Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateNetworkInterface(input *CreateNetworkInterfaceInput) (*CreateNetworkInterfaceOutput, error) { + req, out := c.CreateNetworkInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opCreatePlacementGroup = "CreatePlacementGroup" + +// CreatePlacementGroupRequest generates a request for the CreatePlacementGroup operation. +func (c *EC2) CreatePlacementGroupRequest(input *CreatePlacementGroupInput) (req *request.Request, output *CreatePlacementGroupOutput) { + op := &request.Operation{ + Name: opCreatePlacementGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePlacementGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreatePlacementGroupOutput{} + req.Data = output + return +} + +// Creates a placement group that you launch cluster instances into. You must +// give the group a name that's unique within the scope of your account. +// +// For more information about placement groups and cluster instances, see Cluster +// Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using_cluster_computing.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreatePlacementGroup(input *CreatePlacementGroupInput) (*CreatePlacementGroupOutput, error) { + req, out := c.CreatePlacementGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateReservedInstancesListing = "CreateReservedInstancesListing" + +// CreateReservedInstancesListingRequest generates a request for the CreateReservedInstancesListing operation. +func (c *EC2) CreateReservedInstancesListingRequest(input *CreateReservedInstancesListingInput) (req *request.Request, output *CreateReservedInstancesListingOutput) { + op := &request.Operation{ + Name: opCreateReservedInstancesListing, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateReservedInstancesListingInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateReservedInstancesListingOutput{} + req.Data = output + return +} + +// Creates a listing for Amazon EC2 Reserved Instances to be sold in the Reserved +// Instance Marketplace. You can submit one Reserved Instance listing at a time. +// To get a list of your Reserved Instances, you can use the DescribeReservedInstances +// operation. +// +// The Reserved Instance Marketplace matches sellers who want to resell Reserved +// Instance capacity that they no longer need with buyers who want to purchase +// additional capacity. Reserved Instances bought and sold through the Reserved +// Instance Marketplace work like any other Reserved Instances. +// +// To sell your Reserved Instances, you must first register as a seller in +// the Reserved Instance Marketplace. After completing the registration process, +// you can create a Reserved Instance Marketplace listing of some or all of +// your Reserved Instances, and specify the upfront price to receive for them. +// Your Reserved Instance listings then become available for purchase. To view +// the details of your Reserved Instance listing, you can use the DescribeReservedInstancesListings +// operation. +// +// For more information, see Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateReservedInstancesListing(input *CreateReservedInstancesListingInput) (*CreateReservedInstancesListingOutput, error) { + req, out := c.CreateReservedInstancesListingRequest(input) + err := req.Send() + return out, err +} + +const opCreateRoute = "CreateRoute" + +// CreateRouteRequest generates a request for the CreateRoute operation. +func (c *EC2) CreateRouteRequest(input *CreateRouteInput) (req *request.Request, output *CreateRouteOutput) { + op := &request.Operation{ + Name: opCreateRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateRouteInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateRouteOutput{} + req.Data = output + return +} + +// Creates a route in a route table within a VPC. +// +// You must specify one of the following targets: Internet gateway or virtual +// private gateway, NAT instance, NAT gateway, VPC peering connection, or network +// interface. +// +// When determining how to route traffic, we use the route with the most specific +// match. For example, let's say the traffic is destined for 192.0.2.3, and +// the route table includes the following two routes: +// +// 192.0.2.0/24 (goes to some target A) +// +// 192.0.2.0/28 (goes to some target B) +// +// Both routes apply to the traffic destined for 192.0.2.3. However, the +// second route in the list covers a smaller number of IP addresses and is therefore +// more specific, so we use that route to determine where to target the traffic. +// +// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateRoute(input *CreateRouteInput) (*CreateRouteOutput, error) { + req, out := c.CreateRouteRequest(input) + err := req.Send() + return out, err +} + +const opCreateRouteTable = "CreateRouteTable" + +// CreateRouteTableRequest generates a request for the CreateRouteTable operation. +func (c *EC2) CreateRouteTableRequest(input *CreateRouteTableInput) (req *request.Request, output *CreateRouteTableOutput) { + op := &request.Operation{ + Name: opCreateRouteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateRouteTableInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateRouteTableOutput{} + req.Data = output + return +} + +// Creates a route table for the specified VPC. After you create a route table, +// you can add routes and associate the table with a subnet. +// +// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateRouteTable(input *CreateRouteTableInput) (*CreateRouteTableOutput, error) { + req, out := c.CreateRouteTableRequest(input) + err := req.Send() + return out, err +} + +const opCreateSecurityGroup = "CreateSecurityGroup" + +// CreateSecurityGroupRequest generates a request for the CreateSecurityGroup operation. +func (c *EC2) CreateSecurityGroupRequest(input *CreateSecurityGroupInput) (req *request.Request, output *CreateSecurityGroupOutput) { + op := &request.Operation{ + Name: opCreateSecurityGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSecurityGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSecurityGroupOutput{} + req.Data = output + return +} + +// Creates a security group. +// +// A security group is for use with instances either in the EC2-Classic platform +// or in a specific VPC. For more information, see Amazon EC2 Security Groups +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html) +// in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your +// VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// EC2-Classic: You can have up to 500 security groups. +// +// EC2-VPC: You can create up to 500 security groups per VPC. +// +// When you create a security group, you specify a friendly name of your choice. +// You can have a security group for use in EC2-Classic with the same name as +// a security group for use in a VPC. However, you can't have two security groups +// for use in EC2-Classic with the same name or two security groups for use +// in a VPC with the same name. +// +// You have a default security group for use in EC2-Classic and a default security +// group for use in your VPC. If you don't specify a security group when you +// launch an instance, the instance is launched into the appropriate default +// security group. A default security group includes a default rule that grants +// instances unrestricted network access to each other. +// +// You can add or remove rules from your security groups using AuthorizeSecurityGroupIngress, +// AuthorizeSecurityGroupEgress, RevokeSecurityGroupIngress, and RevokeSecurityGroupEgress. +func (c *EC2) CreateSecurityGroup(input *CreateSecurityGroupInput) (*CreateSecurityGroupOutput, error) { + req, out := c.CreateSecurityGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateSnapshot = "CreateSnapshot" + +// CreateSnapshotRequest generates a request for the CreateSnapshot operation. +func (c *EC2) CreateSnapshotRequest(input *CreateSnapshotInput) (req *request.Request, output *Snapshot) { + op := &request.Operation{ + Name: opCreateSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &Snapshot{} + req.Data = output + return +} + +// Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use +// snapshots for backups, to make copies of EBS volumes, and to save data before +// shutting down an instance. +// +// When a snapshot is created, any AWS Marketplace product codes that are associated +// with the source volume are propagated to the snapshot. +// +// You can take a snapshot of an attached volume that is in use. However, snapshots +// only capture data that has been written to your EBS volume at the time the +// snapshot command is issued; this may exclude any data that has been cached +// by any applications or the operating system. If you can pause any file systems +// on the volume long enough to take a snapshot, your snapshot should be complete. +// However, if you cannot pause all file writes to the volume, you should unmount +// the volume from within the instance, issue the snapshot command, and then +// remount the volume to ensure a consistent and complete snapshot. You may +// remount and use your volume while the snapshot status is pending. +// +// To create a snapshot for EBS volumes that serve as root devices, you should +// stop the instance before taking the snapshot. +// +// Snapshots that are taken from encrypted volumes are automatically encrypted. +// Volumes that are created from encrypted snapshots are also automatically +// encrypted. Your encrypted volumes and any associated snapshots always remain +// protected. +// +// For more information, see Amazon Elastic Block Store (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AmazonEBS.html) +// and Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateSnapshot(input *CreateSnapshotInput) (*Snapshot, error) { + req, out := c.CreateSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCreateSpotDatafeedSubscription = "CreateSpotDatafeedSubscription" + +// CreateSpotDatafeedSubscriptionRequest generates a request for the CreateSpotDatafeedSubscription operation. +func (c *EC2) CreateSpotDatafeedSubscriptionRequest(input *CreateSpotDatafeedSubscriptionInput) (req *request.Request, output *CreateSpotDatafeedSubscriptionOutput) { + op := &request.Operation{ + Name: opCreateSpotDatafeedSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSpotDatafeedSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSpotDatafeedSubscriptionOutput{} + req.Data = output + return +} + +// Creates a data feed for Spot instances, enabling you to view Spot instance +// usage logs. You can create one data feed per AWS account. For more information, +// see Spot Instance Data Feed (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateSpotDatafeedSubscription(input *CreateSpotDatafeedSubscriptionInput) (*CreateSpotDatafeedSubscriptionOutput, error) { + req, out := c.CreateSpotDatafeedSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opCreateSubnet = "CreateSubnet" + +// CreateSubnetRequest generates a request for the CreateSubnet operation. +func (c *EC2) CreateSubnetRequest(input *CreateSubnetInput) (req *request.Request, output *CreateSubnetOutput) { + op := &request.Operation{ + Name: opCreateSubnet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSubnetInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSubnetOutput{} + req.Data = output + return +} + +// Creates a subnet in an existing VPC. +// +// When you create each subnet, you provide the VPC ID and the CIDR block you +// want for the subnet. After you create a subnet, you can't change its CIDR +// block. The subnet's CIDR block can be the same as the VPC's CIDR block (assuming +// you want only a single subnet in the VPC), or a subset of the VPC's CIDR +// block. If you create more than one subnet in a VPC, the subnets' CIDR blocks +// must not overlap. The smallest subnet (and VPC) you can create uses a /28 +// netmask (16 IP addresses), and the largest uses a /16 netmask (65,536 IP +// addresses). +// +// AWS reserves both the first four and the last IP address in each subnet's +// CIDR block. They're not available for use. +// +// If you add more than one subnet to a VPC, they're set up in a star topology +// with a logical router in the middle. +// +// If you launch an instance in a VPC using an Amazon EBS-backed AMI, the IP +// address doesn't change if you stop and restart the instance (unlike a similar +// instance launched outside a VPC, which gets a new IP address when restarted). +// It's therefore possible to have a subnet with no running instances (they're +// all stopped), but no remaining IP addresses available. +// +// For more information about subnets, see Your VPC and Subnets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateSubnet(input *CreateSubnetInput) (*CreateSubnetOutput, error) { + req, out := c.CreateSubnetRequest(input) + err := req.Send() + return out, err +} + +const opCreateTags = "CreateTags" + +// CreateTagsRequest generates a request for the CreateTags operation. +func (c *EC2) CreateTagsRequest(input *CreateTagsInput) (req *request.Request, output *CreateTagsOutput) { + op := &request.Operation{ + Name: opCreateTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTagsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateTagsOutput{} + req.Data = output + return +} + +// Adds or overwrites one or more tags for the specified Amazon EC2 resource +// or resources. Each resource can have a maximum of 10 tags. Each tag consists +// of a key and optional value. Tag keys must be unique per resource. +// +// For more information about tags, see Tagging Your Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) +// in the Amazon Elastic Compute Cloud User Guide. For more information about +// creating IAM policies that control users' access to resources based on tags, +// see Supported Resource-Level Permissions for Amazon EC2 API Actions (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-iam-actions-resources.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateTags(input *CreateTagsInput) (*CreateTagsOutput, error) { + req, out := c.CreateTagsRequest(input) + err := req.Send() + return out, err +} + +const opCreateVolume = "CreateVolume" + +// CreateVolumeRequest generates a request for the CreateVolume operation. +func (c *EC2) CreateVolumeRequest(input *CreateVolumeInput) (req *request.Request, output *Volume) { + op := &request.Operation{ + Name: opCreateVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &Volume{} + req.Data = output + return +} + +// Creates an EBS volume that can be attached to an instance in the same Availability +// Zone. The volume is created in the regional endpoint that you send the HTTP +// request to. For more information see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). +// +// You can create a new empty volume or restore a volume from an EBS snapshot. +// Any AWS Marketplace product codes from the snapshot are propagated to the +// volume. +// +// You can create encrypted volumes with the Encrypted parameter. Encrypted +// volumes may only be attached to instances that support Amazon EBS encryption. +// Volumes that are created from encrypted snapshots are also automatically +// encrypted. For more information, see Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For more information, see Creating or Restoring an Amazon EBS Volume (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-creating-volume.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) CreateVolume(input *CreateVolumeInput) (*Volume, error) { + req, out := c.CreateVolumeRequest(input) + err := req.Send() + return out, err +} + +const opCreateVpc = "CreateVpc" + +// CreateVpcRequest generates a request for the CreateVpc operation. +func (c *EC2) CreateVpcRequest(input *CreateVpcInput) (req *request.Request, output *CreateVpcOutput) { + op := &request.Operation{ + Name: opCreateVpc, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpcInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateVpcOutput{} + req.Data = output + return +} + +// Creates a VPC with the specified CIDR block. +// +// The smallest VPC you can create uses a /28 netmask (16 IP addresses), and +// the largest uses a /16 netmask (65,536 IP addresses). To help you decide +// how big to make your VPC, see Your VPC and Subnets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// By default, each instance you launch in the VPC has the default DHCP options, +// which includes only a default DNS server that we provide (AmazonProvidedDNS). +// For more information about DHCP options, see DHCP Options Sets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateVpc(input *CreateVpcInput) (*CreateVpcOutput, error) { + req, out := c.CreateVpcRequest(input) + err := req.Send() + return out, err +} + +const opCreateVpcEndpoint = "CreateVpcEndpoint" + +// CreateVpcEndpointRequest generates a request for the CreateVpcEndpoint operation. +func (c *EC2) CreateVpcEndpointRequest(input *CreateVpcEndpointInput) (req *request.Request, output *CreateVpcEndpointOutput) { + op := &request.Operation{ + Name: opCreateVpcEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpcEndpointInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateVpcEndpointOutput{} + req.Data = output + return +} + +// Creates a VPC endpoint for a specified AWS service. An endpoint enables you +// to create a private connection between your VPC and another AWS service in +// your account. You can specify an endpoint policy to attach to the endpoint +// that will control access to the service from your VPC. You can also specify +// the VPC route tables that use the endpoint. +// +// Currently, only endpoints to Amazon S3 are supported. +func (c *EC2) CreateVpcEndpoint(input *CreateVpcEndpointInput) (*CreateVpcEndpointOutput, error) { + req, out := c.CreateVpcEndpointRequest(input) + err := req.Send() + return out, err +} + +const opCreateVpcPeeringConnection = "CreateVpcPeeringConnection" + +// CreateVpcPeeringConnectionRequest generates a request for the CreateVpcPeeringConnection operation. +func (c *EC2) CreateVpcPeeringConnectionRequest(input *CreateVpcPeeringConnectionInput) (req *request.Request, output *CreateVpcPeeringConnectionOutput) { + op := &request.Operation{ + Name: opCreateVpcPeeringConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpcPeeringConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateVpcPeeringConnectionOutput{} + req.Data = output + return +} + +// Requests a VPC peering connection between two VPCs: a requester VPC that +// you own and a peer VPC with which to create the connection. The peer VPC +// can belong to another AWS account. The requester VPC and peer VPC cannot +// have overlapping CIDR blocks. +// +// The owner of the peer VPC must accept the peering request to activate the +// peering connection. The VPC peering connection request expires after 7 days, +// after which it cannot be accepted or rejected. +// +// A CreateVpcPeeringConnection request between VPCs with overlapping CIDR +// blocks results in the VPC peering connection having a status of failed. +func (c *EC2) CreateVpcPeeringConnection(input *CreateVpcPeeringConnectionInput) (*CreateVpcPeeringConnectionOutput, error) { + req, out := c.CreateVpcPeeringConnectionRequest(input) + err := req.Send() + return out, err +} + +const opCreateVpnConnection = "CreateVpnConnection" + +// CreateVpnConnectionRequest generates a request for the CreateVpnConnection operation. +func (c *EC2) CreateVpnConnectionRequest(input *CreateVpnConnectionInput) (req *request.Request, output *CreateVpnConnectionOutput) { + op := &request.Operation{ + Name: opCreateVpnConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpnConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateVpnConnectionOutput{} + req.Data = output + return +} + +// Creates a VPN connection between an existing virtual private gateway and +// a VPN customer gateway. The only supported connection type is ipsec.1. +// +// The response includes information that you need to give to your network +// administrator to configure your customer gateway. +// +// We strongly recommend that you use HTTPS when calling this operation because +// the response contains sensitive cryptographic information for configuring +// your customer gateway. +// +// If you decide to shut down your VPN connection for any reason and later +// create a new VPN connection, you must reconfigure your customer gateway with +// the new information returned from this call. +// +// For more information about VPN connections, see Adding a Hardware Virtual +// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateVpnConnection(input *CreateVpnConnectionInput) (*CreateVpnConnectionOutput, error) { + req, out := c.CreateVpnConnectionRequest(input) + err := req.Send() + return out, err +} + +const opCreateVpnConnectionRoute = "CreateVpnConnectionRoute" + +// CreateVpnConnectionRouteRequest generates a request for the CreateVpnConnectionRoute operation. +func (c *EC2) CreateVpnConnectionRouteRequest(input *CreateVpnConnectionRouteInput) (req *request.Request, output *CreateVpnConnectionRouteOutput) { + op := &request.Operation{ + Name: opCreateVpnConnectionRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpnConnectionRouteInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateVpnConnectionRouteOutput{} + req.Data = output + return +} + +// Creates a static route associated with a VPN connection between an existing +// virtual private gateway and a VPN customer gateway. The static route allows +// traffic to be routed from the virtual private gateway to the VPN customer +// gateway. +// +// For more information about VPN connections, see Adding a Hardware Virtual +// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateVpnConnectionRoute(input *CreateVpnConnectionRouteInput) (*CreateVpnConnectionRouteOutput, error) { + req, out := c.CreateVpnConnectionRouteRequest(input) + err := req.Send() + return out, err +} + +const opCreateVpnGateway = "CreateVpnGateway" + +// CreateVpnGatewayRequest generates a request for the CreateVpnGateway operation. +func (c *EC2) CreateVpnGatewayRequest(input *CreateVpnGatewayInput) (req *request.Request, output *CreateVpnGatewayOutput) { + op := &request.Operation{ + Name: opCreateVpnGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpnGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateVpnGatewayOutput{} + req.Data = output + return +} + +// Creates a virtual private gateway. A virtual private gateway is the endpoint +// on the VPC side of your VPN connection. You can create a virtual private +// gateway before creating the VPC itself. +// +// For more information about virtual private gateways, see Adding a Hardware +// Virtual Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) CreateVpnGateway(input *CreateVpnGatewayInput) (*CreateVpnGatewayOutput, error) { + req, out := c.CreateVpnGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCustomerGateway = "DeleteCustomerGateway" + +// DeleteCustomerGatewayRequest generates a request for the DeleteCustomerGateway operation. +func (c *EC2) DeleteCustomerGatewayRequest(input *DeleteCustomerGatewayInput) (req *request.Request, output *DeleteCustomerGatewayOutput) { + op := &request.Operation{ + Name: opDeleteCustomerGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCustomerGatewayInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteCustomerGatewayOutput{} + req.Data = output + return +} + +// Deletes the specified customer gateway. You must delete the VPN connection +// before you can delete the customer gateway. +func (c *EC2) DeleteCustomerGateway(input *DeleteCustomerGatewayInput) (*DeleteCustomerGatewayOutput, error) { + req, out := c.DeleteCustomerGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDhcpOptions = "DeleteDhcpOptions" + +// DeleteDhcpOptionsRequest generates a request for the DeleteDhcpOptions operation. +func (c *EC2) DeleteDhcpOptionsRequest(input *DeleteDhcpOptionsInput) (req *request.Request, output *DeleteDhcpOptionsOutput) { + op := &request.Operation{ + Name: opDeleteDhcpOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDhcpOptionsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteDhcpOptionsOutput{} + req.Data = output + return +} + +// Deletes the specified set of DHCP options. You must disassociate the set +// of DHCP options before you can delete it. You can disassociate the set of +// DHCP options by associating either a new set of options or the default set +// of options with the VPC. +func (c *EC2) DeleteDhcpOptions(input *DeleteDhcpOptionsInput) (*DeleteDhcpOptionsOutput, error) { + req, out := c.DeleteDhcpOptionsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteFlowLogs = "DeleteFlowLogs" + +// DeleteFlowLogsRequest generates a request for the DeleteFlowLogs operation. +func (c *EC2) DeleteFlowLogsRequest(input *DeleteFlowLogsInput) (req *request.Request, output *DeleteFlowLogsOutput) { + op := &request.Operation{ + Name: opDeleteFlowLogs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteFlowLogsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteFlowLogsOutput{} + req.Data = output + return +} + +// Deletes one or more flow logs. +func (c *EC2) DeleteFlowLogs(input *DeleteFlowLogsInput) (*DeleteFlowLogsOutput, error) { + req, out := c.DeleteFlowLogsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteInternetGateway = "DeleteInternetGateway" + +// DeleteInternetGatewayRequest generates a request for the DeleteInternetGateway operation. +func (c *EC2) DeleteInternetGatewayRequest(input *DeleteInternetGatewayInput) (req *request.Request, output *DeleteInternetGatewayOutput) { + op := &request.Operation{ + Name: opDeleteInternetGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteInternetGatewayInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteInternetGatewayOutput{} + req.Data = output + return +} + +// Deletes the specified Internet gateway. You must detach the Internet gateway +// from the VPC before you can delete it. +func (c *EC2) DeleteInternetGateway(input *DeleteInternetGatewayInput) (*DeleteInternetGatewayOutput, error) { + req, out := c.DeleteInternetGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDeleteKeyPair = "DeleteKeyPair" + +// DeleteKeyPairRequest generates a request for the DeleteKeyPair operation. +func (c *EC2) DeleteKeyPairRequest(input *DeleteKeyPairInput) (req *request.Request, output *DeleteKeyPairOutput) { + op := &request.Operation{ + Name: opDeleteKeyPair, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteKeyPairInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteKeyPairOutput{} + req.Data = output + return +} + +// Deletes the specified key pair, by removing the public key from Amazon EC2. +func (c *EC2) DeleteKeyPair(input *DeleteKeyPairInput) (*DeleteKeyPairOutput, error) { + req, out := c.DeleteKeyPairRequest(input) + err := req.Send() + return out, err +} + +const opDeleteNatGateway = "DeleteNatGateway" + +// DeleteNatGatewayRequest generates a request for the DeleteNatGateway operation. +func (c *EC2) DeleteNatGatewayRequest(input *DeleteNatGatewayInput) (req *request.Request, output *DeleteNatGatewayOutput) { + op := &request.Operation{ + Name: opDeleteNatGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteNatGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteNatGatewayOutput{} + req.Data = output + return +} + +// Deletes the specified NAT gateway. Deleting a NAT gateway disassociates its +// Elastic IP address, but does not release the address from your account. Deleting +// a NAT gateway does not delete any NAT gateway routes in your route tables. +func (c *EC2) DeleteNatGateway(input *DeleteNatGatewayInput) (*DeleteNatGatewayOutput, error) { + req, out := c.DeleteNatGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDeleteNetworkAcl = "DeleteNetworkAcl" + +// DeleteNetworkAclRequest generates a request for the DeleteNetworkAcl operation. +func (c *EC2) DeleteNetworkAclRequest(input *DeleteNetworkAclInput) (req *request.Request, output *DeleteNetworkAclOutput) { + op := &request.Operation{ + Name: opDeleteNetworkAcl, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteNetworkAclInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteNetworkAclOutput{} + req.Data = output + return +} + +// Deletes the specified network ACL. You can't delete the ACL if it's associated +// with any subnets. You can't delete the default network ACL. +func (c *EC2) DeleteNetworkAcl(input *DeleteNetworkAclInput) (*DeleteNetworkAclOutput, error) { + req, out := c.DeleteNetworkAclRequest(input) + err := req.Send() + return out, err +} + +const opDeleteNetworkAclEntry = "DeleteNetworkAclEntry" + +// DeleteNetworkAclEntryRequest generates a request for the DeleteNetworkAclEntry operation. +func (c *EC2) DeleteNetworkAclEntryRequest(input *DeleteNetworkAclEntryInput) (req *request.Request, output *DeleteNetworkAclEntryOutput) { + op := &request.Operation{ + Name: opDeleteNetworkAclEntry, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteNetworkAclEntryInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteNetworkAclEntryOutput{} + req.Data = output + return +} + +// Deletes the specified ingress or egress entry (rule) from the specified network +// ACL. +func (c *EC2) DeleteNetworkAclEntry(input *DeleteNetworkAclEntryInput) (*DeleteNetworkAclEntryOutput, error) { + req, out := c.DeleteNetworkAclEntryRequest(input) + err := req.Send() + return out, err +} + +const opDeleteNetworkInterface = "DeleteNetworkInterface" + +// DeleteNetworkInterfaceRequest generates a request for the DeleteNetworkInterface operation. +func (c *EC2) DeleteNetworkInterfaceRequest(input *DeleteNetworkInterfaceInput) (req *request.Request, output *DeleteNetworkInterfaceOutput) { + op := &request.Operation{ + Name: opDeleteNetworkInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteNetworkInterfaceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteNetworkInterfaceOutput{} + req.Data = output + return +} + +// Deletes the specified network interface. You must detach the network interface +// before you can delete it. +func (c *EC2) DeleteNetworkInterface(input *DeleteNetworkInterfaceInput) (*DeleteNetworkInterfaceOutput, error) { + req, out := c.DeleteNetworkInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opDeletePlacementGroup = "DeletePlacementGroup" + +// DeletePlacementGroupRequest generates a request for the DeletePlacementGroup operation. +func (c *EC2) DeletePlacementGroupRequest(input *DeletePlacementGroupInput) (req *request.Request, output *DeletePlacementGroupOutput) { + op := &request.Operation{ + Name: opDeletePlacementGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePlacementGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeletePlacementGroupOutput{} + req.Data = output + return +} + +// Deletes the specified placement group. You must terminate all instances in +// the placement group before you can delete the placement group. For more information +// about placement groups and cluster instances, see Cluster Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using_cluster_computing.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DeletePlacementGroup(input *DeletePlacementGroupInput) (*DeletePlacementGroupOutput, error) { + req, out := c.DeletePlacementGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRoute = "DeleteRoute" + +// DeleteRouteRequest generates a request for the DeleteRoute operation. +func (c *EC2) DeleteRouteRequest(input *DeleteRouteInput) (req *request.Request, output *DeleteRouteOutput) { + op := &request.Operation{ + Name: opDeleteRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRouteInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteRouteOutput{} + req.Data = output + return +} + +// Deletes the specified route from the specified route table. +func (c *EC2) DeleteRoute(input *DeleteRouteInput) (*DeleteRouteOutput, error) { + req, out := c.DeleteRouteRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRouteTable = "DeleteRouteTable" + +// DeleteRouteTableRequest generates a request for the DeleteRouteTable operation. +func (c *EC2) DeleteRouteTableRequest(input *DeleteRouteTableInput) (req *request.Request, output *DeleteRouteTableOutput) { + op := &request.Operation{ + Name: opDeleteRouteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRouteTableInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteRouteTableOutput{} + req.Data = output + return +} + +// Deletes the specified route table. You must disassociate the route table +// from any subnets before you can delete it. You can't delete the main route +// table. +func (c *EC2) DeleteRouteTable(input *DeleteRouteTableInput) (*DeleteRouteTableOutput, error) { + req, out := c.DeleteRouteTableRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSecurityGroup = "DeleteSecurityGroup" + +// DeleteSecurityGroupRequest generates a request for the DeleteSecurityGroup operation. +func (c *EC2) DeleteSecurityGroupRequest(input *DeleteSecurityGroupInput) (req *request.Request, output *DeleteSecurityGroupOutput) { + op := &request.Operation{ + Name: opDeleteSecurityGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSecurityGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteSecurityGroupOutput{} + req.Data = output + return +} + +// Deletes a security group. +// +// If you attempt to delete a security group that is associated with an instance, +// or is referenced by another security group, the operation fails with InvalidGroup.InUse +// in EC2-Classic or DependencyViolation in EC2-VPC. +func (c *EC2) DeleteSecurityGroup(input *DeleteSecurityGroupInput) (*DeleteSecurityGroupOutput, error) { + req, out := c.DeleteSecurityGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSnapshot = "DeleteSnapshot" + +// DeleteSnapshotRequest generates a request for the DeleteSnapshot operation. +func (c *EC2) DeleteSnapshotRequest(input *DeleteSnapshotInput) (req *request.Request, output *DeleteSnapshotOutput) { + op := &request.Operation{ + Name: opDeleteSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSnapshotInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteSnapshotOutput{} + req.Data = output + return +} + +// Deletes the specified snapshot. +// +// When you make periodic snapshots of a volume, the snapshots are incremental, +// and only the blocks on the device that have changed since your last snapshot +// are saved in the new snapshot. When you delete a snapshot, only the data +// not needed for any other snapshot is removed. So regardless of which prior +// snapshots have been deleted, all active snapshots will have access to all +// the information needed to restore the volume. +// +// You cannot delete a snapshot of the root device of an EBS volume used by +// a registered AMI. You must first de-register the AMI before you can delete +// the snapshot. +// +// For more information, see Deleting an Amazon EBS Snapshot (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-snapshot.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DeleteSnapshot(input *DeleteSnapshotInput) (*DeleteSnapshotOutput, error) { + req, out := c.DeleteSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSpotDatafeedSubscription = "DeleteSpotDatafeedSubscription" + +// DeleteSpotDatafeedSubscriptionRequest generates a request for the DeleteSpotDatafeedSubscription operation. +func (c *EC2) DeleteSpotDatafeedSubscriptionRequest(input *DeleteSpotDatafeedSubscriptionInput) (req *request.Request, output *DeleteSpotDatafeedSubscriptionOutput) { + op := &request.Operation{ + Name: opDeleteSpotDatafeedSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSpotDatafeedSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteSpotDatafeedSubscriptionOutput{} + req.Data = output + return +} + +// Deletes the data feed for Spot instances. +func (c *EC2) DeleteSpotDatafeedSubscription(input *DeleteSpotDatafeedSubscriptionInput) (*DeleteSpotDatafeedSubscriptionOutput, error) { + req, out := c.DeleteSpotDatafeedSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSubnet = "DeleteSubnet" + +// DeleteSubnetRequest generates a request for the DeleteSubnet operation. +func (c *EC2) DeleteSubnetRequest(input *DeleteSubnetInput) (req *request.Request, output *DeleteSubnetOutput) { + op := &request.Operation{ + Name: opDeleteSubnet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSubnetInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteSubnetOutput{} + req.Data = output + return +} + +// Deletes the specified subnet. You must terminate all running instances in +// the subnet before you can delete the subnet. +func (c *EC2) DeleteSubnet(input *DeleteSubnetInput) (*DeleteSubnetOutput, error) { + req, out := c.DeleteSubnetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTags = "DeleteTags" + +// DeleteTagsRequest generates a request for the DeleteTags operation. +func (c *EC2) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, output *DeleteTagsOutput) { + op := &request.Operation{ + Name: opDeleteTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTagsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteTagsOutput{} + req.Data = output + return +} + +// Deletes the specified set of tags from the specified set of resources. This +// call is designed to follow a DescribeTags request. +// +// For more information about tags, see Tagging Your Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) { + req, out := c.DeleteTagsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVolume = "DeleteVolume" + +// DeleteVolumeRequest generates a request for the DeleteVolume operation. +func (c *EC2) DeleteVolumeRequest(input *DeleteVolumeInput) (req *request.Request, output *DeleteVolumeOutput) { + op := &request.Operation{ + Name: opDeleteVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVolumeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteVolumeOutput{} + req.Data = output + return +} + +// Deletes the specified EBS volume. The volume must be in the available state +// (not attached to an instance). +// +// The volume may remain in the deleting state for several minutes. +// +// For more information, see Deleting an Amazon EBS Volume (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-volume.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DeleteVolume(input *DeleteVolumeInput) (*DeleteVolumeOutput, error) { + req, out := c.DeleteVolumeRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVpc = "DeleteVpc" + +// DeleteVpcRequest generates a request for the DeleteVpc operation. +func (c *EC2) DeleteVpcRequest(input *DeleteVpcInput) (req *request.Request, output *DeleteVpcOutput) { + op := &request.Operation{ + Name: opDeleteVpc, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpcInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteVpcOutput{} + req.Data = output + return +} + +// Deletes the specified VPC. You must detach or delete all gateways and resources +// that are associated with the VPC before you can delete it. For example, you +// must terminate all instances running in the VPC, delete all security groups +// associated with the VPC (except the default one), delete all route tables +// associated with the VPC (except the default one), and so on. +func (c *EC2) DeleteVpc(input *DeleteVpcInput) (*DeleteVpcOutput, error) { + req, out := c.DeleteVpcRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVpcEndpoints = "DeleteVpcEndpoints" + +// DeleteVpcEndpointsRequest generates a request for the DeleteVpcEndpoints operation. +func (c *EC2) DeleteVpcEndpointsRequest(input *DeleteVpcEndpointsInput) (req *request.Request, output *DeleteVpcEndpointsOutput) { + op := &request.Operation{ + Name: opDeleteVpcEndpoints, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpcEndpointsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteVpcEndpointsOutput{} + req.Data = output + return +} + +// Deletes one or more specified VPC endpoints. Deleting the endpoint also deletes +// the endpoint routes in the route tables that were associated with the endpoint. +func (c *EC2) DeleteVpcEndpoints(input *DeleteVpcEndpointsInput) (*DeleteVpcEndpointsOutput, error) { + req, out := c.DeleteVpcEndpointsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVpcPeeringConnection = "DeleteVpcPeeringConnection" + +// DeleteVpcPeeringConnectionRequest generates a request for the DeleteVpcPeeringConnection operation. +func (c *EC2) DeleteVpcPeeringConnectionRequest(input *DeleteVpcPeeringConnectionInput) (req *request.Request, output *DeleteVpcPeeringConnectionOutput) { + op := &request.Operation{ + Name: opDeleteVpcPeeringConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpcPeeringConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteVpcPeeringConnectionOutput{} + req.Data = output + return +} + +// Deletes a VPC peering connection. Either the owner of the requester VPC or +// the owner of the peer VPC can delete the VPC peering connection if it's in +// the active state. The owner of the requester VPC can delete a VPC peering +// connection in the pending-acceptance state. +func (c *EC2) DeleteVpcPeeringConnection(input *DeleteVpcPeeringConnectionInput) (*DeleteVpcPeeringConnectionOutput, error) { + req, out := c.DeleteVpcPeeringConnectionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVpnConnection = "DeleteVpnConnection" + +// DeleteVpnConnectionRequest generates a request for the DeleteVpnConnection operation. +func (c *EC2) DeleteVpnConnectionRequest(input *DeleteVpnConnectionInput) (req *request.Request, output *DeleteVpnConnectionOutput) { + op := &request.Operation{ + Name: opDeleteVpnConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpnConnectionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteVpnConnectionOutput{} + req.Data = output + return +} + +// Deletes the specified VPN connection. +// +// If you're deleting the VPC and its associated components, we recommend that +// you detach the virtual private gateway from the VPC and delete the VPC before +// deleting the VPN connection. If you believe that the tunnel credentials for +// your VPN connection have been compromised, you can delete the VPN connection +// and create a new one that has new keys, without needing to delete the VPC +// or virtual private gateway. If you create a new VPN connection, you must +// reconfigure the customer gateway using the new configuration information +// returned with the new VPN connection ID. +func (c *EC2) DeleteVpnConnection(input *DeleteVpnConnectionInput) (*DeleteVpnConnectionOutput, error) { + req, out := c.DeleteVpnConnectionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVpnConnectionRoute = "DeleteVpnConnectionRoute" + +// DeleteVpnConnectionRouteRequest generates a request for the DeleteVpnConnectionRoute operation. +func (c *EC2) DeleteVpnConnectionRouteRequest(input *DeleteVpnConnectionRouteInput) (req *request.Request, output *DeleteVpnConnectionRouteOutput) { + op := &request.Operation{ + Name: opDeleteVpnConnectionRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpnConnectionRouteInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteVpnConnectionRouteOutput{} + req.Data = output + return +} + +// Deletes the specified static route associated with a VPN connection between +// an existing virtual private gateway and a VPN customer gateway. The static +// route allows traffic to be routed from the virtual private gateway to the +// VPN customer gateway. +func (c *EC2) DeleteVpnConnectionRoute(input *DeleteVpnConnectionRouteInput) (*DeleteVpnConnectionRouteOutput, error) { + req, out := c.DeleteVpnConnectionRouteRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVpnGateway = "DeleteVpnGateway" + +// DeleteVpnGatewayRequest generates a request for the DeleteVpnGateway operation. +func (c *EC2) DeleteVpnGatewayRequest(input *DeleteVpnGatewayInput) (req *request.Request, output *DeleteVpnGatewayOutput) { + op := &request.Operation{ + Name: opDeleteVpnGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpnGatewayInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteVpnGatewayOutput{} + req.Data = output + return +} + +// Deletes the specified virtual private gateway. We recommend that before you +// delete a virtual private gateway, you detach it from the VPC and delete the +// VPN connection. Note that you don't need to delete the virtual private gateway +// if you plan to delete and recreate the VPN connection between your VPC and +// your network. +func (c *EC2) DeleteVpnGateway(input *DeleteVpnGatewayInput) (*DeleteVpnGatewayOutput, error) { + req, out := c.DeleteVpnGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterImage = "DeregisterImage" + +// DeregisterImageRequest generates a request for the DeregisterImage operation. +func (c *EC2) DeregisterImageRequest(input *DeregisterImageInput) (req *request.Request, output *DeregisterImageOutput) { + op := &request.Operation{ + Name: opDeregisterImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterImageInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeregisterImageOutput{} + req.Data = output + return +} + +// Deregisters the specified AMI. After you deregister an AMI, it can't be used +// to launch new instances. +// +// This command does not delete the AMI. +func (c *EC2) DeregisterImage(input *DeregisterImageInput) (*DeregisterImageOutput, error) { + req, out := c.DeregisterImageRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAccountAttributes = "DescribeAccountAttributes" + +// DescribeAccountAttributesRequest generates a request for the DescribeAccountAttributes operation. +func (c *EC2) DescribeAccountAttributesRequest(input *DescribeAccountAttributesInput) (req *request.Request, output *DescribeAccountAttributesOutput) { + op := &request.Operation{ + Name: opDescribeAccountAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAccountAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAccountAttributesOutput{} + req.Data = output + return +} + +// Describes attributes of your AWS account. The following are the supported +// account attributes: +// +// supported-platforms: Indicates whether your account can launch instances +// into EC2-Classic and EC2-VPC, or only into EC2-VPC. +// +// default-vpc: The ID of the default VPC for your account, or none. +// +// max-instances: The maximum number of On-Demand instances that you can +// run. +// +// vpc-max-security-groups-per-interface: The maximum number of security +// groups that you can assign to a network interface. +// +// max-elastic-ips: The maximum number of Elastic IP addresses that you can +// allocate for use with EC2-Classic. +// +// vpc-max-elastic-ips: The maximum number of Elastic IP addresses that you +// can allocate for use with EC2-VPC. +func (c *EC2) DescribeAccountAttributes(input *DescribeAccountAttributesInput) (*DescribeAccountAttributesOutput, error) { + req, out := c.DescribeAccountAttributesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAddresses = "DescribeAddresses" + +// DescribeAddressesRequest generates a request for the DescribeAddresses operation. +func (c *EC2) DescribeAddressesRequest(input *DescribeAddressesInput) (req *request.Request, output *DescribeAddressesOutput) { + op := &request.Operation{ + Name: opDescribeAddresses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAddressesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAddressesOutput{} + req.Data = output + return +} + +// Describes one or more of your Elastic IP addresses. +// +// An Elastic IP address is for use in either the EC2-Classic platform or in +// a VPC. For more information, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeAddresses(input *DescribeAddressesInput) (*DescribeAddressesOutput, error) { + req, out := c.DescribeAddressesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAvailabilityZones = "DescribeAvailabilityZones" + +// DescribeAvailabilityZonesRequest generates a request for the DescribeAvailabilityZones operation. +func (c *EC2) DescribeAvailabilityZonesRequest(input *DescribeAvailabilityZonesInput) (req *request.Request, output *DescribeAvailabilityZonesOutput) { + op := &request.Operation{ + Name: opDescribeAvailabilityZones, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAvailabilityZonesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAvailabilityZonesOutput{} + req.Data = output + return +} + +// Describes one or more of the Availability Zones that are available to you. +// The results include zones only for the region you're currently using. If +// there is an event impacting an Availability Zone, you can use this request +// to view the state and any provided message for that Availability Zone. +// +// For more information, see Regions and Availability Zones (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeAvailabilityZones(input *DescribeAvailabilityZonesInput) (*DescribeAvailabilityZonesOutput, error) { + req, out := c.DescribeAvailabilityZonesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeBundleTasks = "DescribeBundleTasks" + +// DescribeBundleTasksRequest generates a request for the DescribeBundleTasks operation. +func (c *EC2) DescribeBundleTasksRequest(input *DescribeBundleTasksInput) (req *request.Request, output *DescribeBundleTasksOutput) { + op := &request.Operation{ + Name: opDescribeBundleTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeBundleTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeBundleTasksOutput{} + req.Data = output + return +} + +// Describes one or more of your bundling tasks. +// +// Completed bundle tasks are listed for only a limited time. If your bundle +// task is no longer in the list, you can still register an AMI from it. Just +// use RegisterImage with the Amazon S3 bucket name and image manifest name +// you provided to the bundle task. +func (c *EC2) DescribeBundleTasks(input *DescribeBundleTasksInput) (*DescribeBundleTasksOutput, error) { + req, out := c.DescribeBundleTasksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeClassicLinkInstances = "DescribeClassicLinkInstances" + +// DescribeClassicLinkInstancesRequest generates a request for the DescribeClassicLinkInstances operation. +func (c *EC2) DescribeClassicLinkInstancesRequest(input *DescribeClassicLinkInstancesInput) (req *request.Request, output *DescribeClassicLinkInstancesOutput) { + op := &request.Operation{ + Name: opDescribeClassicLinkInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeClassicLinkInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClassicLinkInstancesOutput{} + req.Data = output + return +} + +// Describes one or more of your linked EC2-Classic instances. This request +// only returns information about EC2-Classic instances linked to a VPC through +// ClassicLink; you cannot use this request to return information about other +// instances. +func (c *EC2) DescribeClassicLinkInstances(input *DescribeClassicLinkInstancesInput) (*DescribeClassicLinkInstancesOutput, error) { + req, out := c.DescribeClassicLinkInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeConversionTasks = "DescribeConversionTasks" + +// DescribeConversionTasksRequest generates a request for the DescribeConversionTasks operation. +func (c *EC2) DescribeConversionTasksRequest(input *DescribeConversionTasksInput) (req *request.Request, output *DescribeConversionTasksOutput) { + op := &request.Operation{ + Name: opDescribeConversionTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConversionTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeConversionTasksOutput{} + req.Data = output + return +} + +// Describes one or more of your conversion tasks. For more information, see +// Using the Command Line Tools to Import Your Virtual Machine to Amazon EC2 +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UploadingYourInstancesandVolumes.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeConversionTasks(input *DescribeConversionTasksInput) (*DescribeConversionTasksOutput, error) { + req, out := c.DescribeConversionTasksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeCustomerGateways = "DescribeCustomerGateways" + +// DescribeCustomerGatewaysRequest generates a request for the DescribeCustomerGateways operation. +func (c *EC2) DescribeCustomerGatewaysRequest(input *DescribeCustomerGatewaysInput) (req *request.Request, output *DescribeCustomerGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeCustomerGateways, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeCustomerGatewaysInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCustomerGatewaysOutput{} + req.Data = output + return +} + +// Describes one or more of your VPN customer gateways. +// +// For more information about VPN customer gateways, see Adding a Hardware +// Virtual Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeCustomerGateways(input *DescribeCustomerGatewaysInput) (*DescribeCustomerGatewaysOutput, error) { + req, out := c.DescribeCustomerGatewaysRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDhcpOptions = "DescribeDhcpOptions" + +// DescribeDhcpOptionsRequest generates a request for the DescribeDhcpOptions operation. +func (c *EC2) DescribeDhcpOptionsRequest(input *DescribeDhcpOptionsInput) (req *request.Request, output *DescribeDhcpOptionsOutput) { + op := &request.Operation{ + Name: opDescribeDhcpOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDhcpOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDhcpOptionsOutput{} + req.Data = output + return +} + +// Describes one or more of your DHCP options sets. +// +// For more information about DHCP options sets, see DHCP Options Sets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeDhcpOptions(input *DescribeDhcpOptionsInput) (*DescribeDhcpOptionsOutput, error) { + req, out := c.DescribeDhcpOptionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeExportTasks = "DescribeExportTasks" + +// DescribeExportTasksRequest generates a request for the DescribeExportTasks operation. +func (c *EC2) DescribeExportTasksRequest(input *DescribeExportTasksInput) (req *request.Request, output *DescribeExportTasksOutput) { + op := &request.Operation{ + Name: opDescribeExportTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeExportTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeExportTasksOutput{} + req.Data = output + return +} + +// Describes one or more of your export tasks. +func (c *EC2) DescribeExportTasks(input *DescribeExportTasksInput) (*DescribeExportTasksOutput, error) { + req, out := c.DescribeExportTasksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeFlowLogs = "DescribeFlowLogs" + +// DescribeFlowLogsRequest generates a request for the DescribeFlowLogs operation. +func (c *EC2) DescribeFlowLogsRequest(input *DescribeFlowLogsInput) (req *request.Request, output *DescribeFlowLogsOutput) { + op := &request.Operation{ + Name: opDescribeFlowLogs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFlowLogsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeFlowLogsOutput{} + req.Data = output + return +} + +// Describes one or more flow logs. To view the information in your flow logs +// (the log streams for the network interfaces), you must use the CloudWatch +// Logs console or the CloudWatch Logs API. +func (c *EC2) DescribeFlowLogs(input *DescribeFlowLogsInput) (*DescribeFlowLogsOutput, error) { + req, out := c.DescribeFlowLogsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeHosts = "DescribeHosts" + +// DescribeHostsRequest generates a request for the DescribeHosts operation. +func (c *EC2) DescribeHostsRequest(input *DescribeHostsInput) (req *request.Request, output *DescribeHostsOutput) { + op := &request.Operation{ + Name: opDescribeHosts, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeHostsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeHostsOutput{} + req.Data = output + return +} + +// Describes one or more of your Dedicated hosts. +// +// The results describe only the Dedicated hosts in the region you're currently +// using. All listed instances consume capacity on your Dedicated host. Dedicated +// hosts that have recently been released will be listed with the state released. +func (c *EC2) DescribeHosts(input *DescribeHostsInput) (*DescribeHostsOutput, error) { + req, out := c.DescribeHostsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeIdFormat = "DescribeIdFormat" + +// DescribeIdFormatRequest generates a request for the DescribeIdFormat operation. +func (c *EC2) DescribeIdFormatRequest(input *DescribeIdFormatInput) (req *request.Request, output *DescribeIdFormatOutput) { + op := &request.Operation{ + Name: opDescribeIdFormat, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeIdFormatInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeIdFormatOutput{} + req.Data = output + return +} + +// Describes the ID format settings for your resources on a per-region basis, +// for example, to view which resource types are enabled for longer IDs. This +// request only returns information about resource types whose ID formats can +// be modified; it does not return information about other resource types. +// +// The following resource types support longer IDs: instance | reservation. +// +// These settings apply to the IAM user who makes the request; they do not +// apply to the entire AWS account. By default, an IAM user defaults to the +// same settings as the root user, unless they explicitly override the settings +// by running the ModifyIdFormat command. Resources created with longer IDs +// are visible to all IAM users, regardless of these settings and provided that +// they have permission to use the relevant Describe command for the resource +// type. +func (c *EC2) DescribeIdFormat(input *DescribeIdFormatInput) (*DescribeIdFormatOutput, error) { + req, out := c.DescribeIdFormatRequest(input) + err := req.Send() + return out, err +} + +const opDescribeImageAttribute = "DescribeImageAttribute" + +// DescribeImageAttributeRequest generates a request for the DescribeImageAttribute operation. +func (c *EC2) DescribeImageAttributeRequest(input *DescribeImageAttributeInput) (req *request.Request, output *DescribeImageAttributeOutput) { + op := &request.Operation{ + Name: opDescribeImageAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeImageAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeImageAttributeOutput{} + req.Data = output + return +} + +// Describes the specified attribute of the specified AMI. You can specify only +// one attribute at a time. +func (c *EC2) DescribeImageAttribute(input *DescribeImageAttributeInput) (*DescribeImageAttributeOutput, error) { + req, out := c.DescribeImageAttributeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeImages = "DescribeImages" + +// DescribeImagesRequest generates a request for the DescribeImages operation. +func (c *EC2) DescribeImagesRequest(input *DescribeImagesInput) (req *request.Request, output *DescribeImagesOutput) { + op := &request.Operation{ + Name: opDescribeImages, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeImagesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeImagesOutput{} + req.Data = output + return +} + +// Describes one or more of the images (AMIs, AKIs, and ARIs) available to you. +// Images available to you include public images, private images that you own, +// and private images owned by other AWS accounts but for which you have explicit +// launch permissions. +// +// Deregistered images are included in the returned results for an unspecified +// interval after deregistration. +func (c *EC2) DescribeImages(input *DescribeImagesInput) (*DescribeImagesOutput, error) { + req, out := c.DescribeImagesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeImportImageTasks = "DescribeImportImageTasks" + +// DescribeImportImageTasksRequest generates a request for the DescribeImportImageTasks operation. +func (c *EC2) DescribeImportImageTasksRequest(input *DescribeImportImageTasksInput) (req *request.Request, output *DescribeImportImageTasksOutput) { + op := &request.Operation{ + Name: opDescribeImportImageTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeImportImageTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeImportImageTasksOutput{} + req.Data = output + return +} + +// Displays details about an import virtual machine or import snapshot tasks +// that are already created. +func (c *EC2) DescribeImportImageTasks(input *DescribeImportImageTasksInput) (*DescribeImportImageTasksOutput, error) { + req, out := c.DescribeImportImageTasksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeImportSnapshotTasks = "DescribeImportSnapshotTasks" + +// DescribeImportSnapshotTasksRequest generates a request for the DescribeImportSnapshotTasks operation. +func (c *EC2) DescribeImportSnapshotTasksRequest(input *DescribeImportSnapshotTasksInput) (req *request.Request, output *DescribeImportSnapshotTasksOutput) { + op := &request.Operation{ + Name: opDescribeImportSnapshotTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeImportSnapshotTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeImportSnapshotTasksOutput{} + req.Data = output + return +} + +// Describes your import snapshot tasks. +func (c *EC2) DescribeImportSnapshotTasks(input *DescribeImportSnapshotTasksInput) (*DescribeImportSnapshotTasksOutput, error) { + req, out := c.DescribeImportSnapshotTasksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeInstanceAttribute = "DescribeInstanceAttribute" + +// DescribeInstanceAttributeRequest generates a request for the DescribeInstanceAttribute operation. +func (c *EC2) DescribeInstanceAttributeRequest(input *DescribeInstanceAttributeInput) (req *request.Request, output *DescribeInstanceAttributeOutput) { + op := &request.Operation{ + Name: opDescribeInstanceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInstanceAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInstanceAttributeOutput{} + req.Data = output + return +} + +// Describes the specified attribute of the specified instance. You can specify +// only one attribute at a time. Valid attribute values are: instanceType | +// kernel | ramdisk | userData | disableApiTermination | instanceInitiatedShutdownBehavior +// | rootDeviceName | blockDeviceMapping | productCodes | sourceDestCheck | +// groupSet | ebsOptimized | sriovNetSupport +func (c *EC2) DescribeInstanceAttribute(input *DescribeInstanceAttributeInput) (*DescribeInstanceAttributeOutput, error) { + req, out := c.DescribeInstanceAttributeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeInstanceStatus = "DescribeInstanceStatus" + +// DescribeInstanceStatusRequest generates a request for the DescribeInstanceStatus operation. +func (c *EC2) DescribeInstanceStatusRequest(input *DescribeInstanceStatusInput) (req *request.Request, output *DescribeInstanceStatusOutput) { + op := &request.Operation{ + Name: opDescribeInstanceStatus, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeInstanceStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInstanceStatusOutput{} + req.Data = output + return +} + +// Describes the status of one or more instances. +// +// Instance status includes the following components: +// +// Status checks - Amazon EC2 performs status checks on running EC2 instances +// to identify hardware and software issues. For more information, see Status +// Checks for Your Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-system-instance-status-check.html) +// and Troubleshooting Instances with Failed Status Checks (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstances.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Scheduled events - Amazon EC2 can schedule events (such as reboot, stop, +// or terminate) for your instances related to hardware issues, software updates, +// or system maintenance. For more information, see Scheduled Events for Your +// Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-instances-status-check_sched.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Instance state - You can manage your instances from the moment you launch +// them through their termination. For more information, see Instance Lifecycle +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeInstanceStatus(input *DescribeInstanceStatusInput) (*DescribeInstanceStatusOutput, error) { + req, out := c.DescribeInstanceStatusRequest(input) + err := req.Send() + return out, err +} + +func (c *EC2) DescribeInstanceStatusPages(input *DescribeInstanceStatusInput, fn func(p *DescribeInstanceStatusOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeInstanceStatusRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeInstanceStatusOutput), lastPage) + }) +} + +const opDescribeInstances = "DescribeInstances" + +// DescribeInstancesRequest generates a request for the DescribeInstances operation. +func (c *EC2) DescribeInstancesRequest(input *DescribeInstancesInput) (req *request.Request, output *DescribeInstancesOutput) { + op := &request.Operation{ + Name: opDescribeInstances, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInstancesOutput{} + req.Data = output + return +} + +// Describes one or more of your instances. +// +// If you specify one or more instance IDs, Amazon EC2 returns information +// for those instances. If you do not specify instance IDs, Amazon EC2 returns +// information for all relevant instances. If you specify an instance ID that +// is not valid, an error is returned. If you specify an instance that you do +// not own, it is not included in the returned results. +// +// Recently terminated instances might appear in the returned results. This +// interval is usually less than one hour. +func (c *EC2) DescribeInstances(input *DescribeInstancesInput) (*DescribeInstancesOutput, error) { + req, out := c.DescribeInstancesRequest(input) + err := req.Send() + return out, err +} + +func (c *EC2) DescribeInstancesPages(input *DescribeInstancesInput, fn func(p *DescribeInstancesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeInstancesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeInstancesOutput), lastPage) + }) +} + +const opDescribeInternetGateways = "DescribeInternetGateways" + +// DescribeInternetGatewaysRequest generates a request for the DescribeInternetGateways operation. +func (c *EC2) DescribeInternetGatewaysRequest(input *DescribeInternetGatewaysInput) (req *request.Request, output *DescribeInternetGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeInternetGateways, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInternetGatewaysInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInternetGatewaysOutput{} + req.Data = output + return +} + +// Describes one or more of your Internet gateways. +func (c *EC2) DescribeInternetGateways(input *DescribeInternetGatewaysInput) (*DescribeInternetGatewaysOutput, error) { + req, out := c.DescribeInternetGatewaysRequest(input) + err := req.Send() + return out, err +} + +const opDescribeKeyPairs = "DescribeKeyPairs" + +// DescribeKeyPairsRequest generates a request for the DescribeKeyPairs operation. +func (c *EC2) DescribeKeyPairsRequest(input *DescribeKeyPairsInput) (req *request.Request, output *DescribeKeyPairsOutput) { + op := &request.Operation{ + Name: opDescribeKeyPairs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeKeyPairsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeKeyPairsOutput{} + req.Data = output + return +} + +// Describes one or more of your key pairs. +// +// For more information about key pairs, see Key Pairs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeKeyPairs(input *DescribeKeyPairsInput) (*DescribeKeyPairsOutput, error) { + req, out := c.DescribeKeyPairsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeMovingAddresses = "DescribeMovingAddresses" + +// DescribeMovingAddressesRequest generates a request for the DescribeMovingAddresses operation. +func (c *EC2) DescribeMovingAddressesRequest(input *DescribeMovingAddressesInput) (req *request.Request, output *DescribeMovingAddressesOutput) { + op := &request.Operation{ + Name: opDescribeMovingAddresses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeMovingAddressesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeMovingAddressesOutput{} + req.Data = output + return +} + +// Describes your Elastic IP addresses that are being moved to the EC2-VPC platform, +// or that are being restored to the EC2-Classic platform. This request does +// not return information about any other Elastic IP addresses in your account. +func (c *EC2) DescribeMovingAddresses(input *DescribeMovingAddressesInput) (*DescribeMovingAddressesOutput, error) { + req, out := c.DescribeMovingAddressesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeNatGateways = "DescribeNatGateways" + +// DescribeNatGatewaysRequest generates a request for the DescribeNatGateways operation. +func (c *EC2) DescribeNatGatewaysRequest(input *DescribeNatGatewaysInput) (req *request.Request, output *DescribeNatGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeNatGateways, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeNatGatewaysInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeNatGatewaysOutput{} + req.Data = output + return +} + +// Describes one or more of the your NAT gateways. +func (c *EC2) DescribeNatGateways(input *DescribeNatGatewaysInput) (*DescribeNatGatewaysOutput, error) { + req, out := c.DescribeNatGatewaysRequest(input) + err := req.Send() + return out, err +} + +const opDescribeNetworkAcls = "DescribeNetworkAcls" + +// DescribeNetworkAclsRequest generates a request for the DescribeNetworkAcls operation. +func (c *EC2) DescribeNetworkAclsRequest(input *DescribeNetworkAclsInput) (req *request.Request, output *DescribeNetworkAclsOutput) { + op := &request.Operation{ + Name: opDescribeNetworkAcls, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeNetworkAclsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeNetworkAclsOutput{} + req.Data = output + return +} + +// Describes one or more of your network ACLs. +// +// For more information about network ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeNetworkAcls(input *DescribeNetworkAclsInput) (*DescribeNetworkAclsOutput, error) { + req, out := c.DescribeNetworkAclsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeNetworkInterfaceAttribute = "DescribeNetworkInterfaceAttribute" + +// DescribeNetworkInterfaceAttributeRequest generates a request for the DescribeNetworkInterfaceAttribute operation. +func (c *EC2) DescribeNetworkInterfaceAttributeRequest(input *DescribeNetworkInterfaceAttributeInput) (req *request.Request, output *DescribeNetworkInterfaceAttributeOutput) { + op := &request.Operation{ + Name: opDescribeNetworkInterfaceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeNetworkInterfaceAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeNetworkInterfaceAttributeOutput{} + req.Data = output + return +} + +// Describes a network interface attribute. You can specify only one attribute +// at a time. +func (c *EC2) DescribeNetworkInterfaceAttribute(input *DescribeNetworkInterfaceAttributeInput) (*DescribeNetworkInterfaceAttributeOutput, error) { + req, out := c.DescribeNetworkInterfaceAttributeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeNetworkInterfaces = "DescribeNetworkInterfaces" + +// DescribeNetworkInterfacesRequest generates a request for the DescribeNetworkInterfaces operation. +func (c *EC2) DescribeNetworkInterfacesRequest(input *DescribeNetworkInterfacesInput) (req *request.Request, output *DescribeNetworkInterfacesOutput) { + op := &request.Operation{ + Name: opDescribeNetworkInterfaces, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeNetworkInterfacesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeNetworkInterfacesOutput{} + req.Data = output + return +} + +// Describes one or more of your network interfaces. +func (c *EC2) DescribeNetworkInterfaces(input *DescribeNetworkInterfacesInput) (*DescribeNetworkInterfacesOutput, error) { + req, out := c.DescribeNetworkInterfacesRequest(input) + err := req.Send() + return out, err +} + +const opDescribePlacementGroups = "DescribePlacementGroups" + +// DescribePlacementGroupsRequest generates a request for the DescribePlacementGroups operation. +func (c *EC2) DescribePlacementGroupsRequest(input *DescribePlacementGroupsInput) (req *request.Request, output *DescribePlacementGroupsOutput) { + op := &request.Operation{ + Name: opDescribePlacementGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribePlacementGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribePlacementGroupsOutput{} + req.Data = output + return +} + +// Describes one or more of your placement groups. For more information about +// placement groups and cluster instances, see Cluster Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using_cluster_computing.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribePlacementGroups(input *DescribePlacementGroupsInput) (*DescribePlacementGroupsOutput, error) { + req, out := c.DescribePlacementGroupsRequest(input) + err := req.Send() + return out, err +} + +const opDescribePrefixLists = "DescribePrefixLists" + +// DescribePrefixListsRequest generates a request for the DescribePrefixLists operation. +func (c *EC2) DescribePrefixListsRequest(input *DescribePrefixListsInput) (req *request.Request, output *DescribePrefixListsOutput) { + op := &request.Operation{ + Name: opDescribePrefixLists, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribePrefixListsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribePrefixListsOutput{} + req.Data = output + return +} + +// Describes available AWS services in a prefix list format, which includes +// the prefix list name and prefix list ID of the service and the IP address +// range for the service. A prefix list ID is required for creating an outbound +// security group rule that allows traffic from a VPC to access an AWS service +// through a VPC endpoint. +func (c *EC2) DescribePrefixLists(input *DescribePrefixListsInput) (*DescribePrefixListsOutput, error) { + req, out := c.DescribePrefixListsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeRegions = "DescribeRegions" + +// DescribeRegionsRequest generates a request for the DescribeRegions operation. +func (c *EC2) DescribeRegionsRequest(input *DescribeRegionsInput) (req *request.Request, output *DescribeRegionsOutput) { + op := &request.Operation{ + Name: opDescribeRegions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRegionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeRegionsOutput{} + req.Data = output + return +} + +// Describes one or more regions that are currently available to you. +// +// For a list of the regions supported by Amazon EC2, see Regions and Endpoints +// (http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region). +func (c *EC2) DescribeRegions(input *DescribeRegionsInput) (*DescribeRegionsOutput, error) { + req, out := c.DescribeRegionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeReservedInstances = "DescribeReservedInstances" + +// DescribeReservedInstancesRequest generates a request for the DescribeReservedInstances operation. +func (c *EC2) DescribeReservedInstancesRequest(input *DescribeReservedInstancesInput) (req *request.Request, output *DescribeReservedInstancesOutput) { + op := &request.Operation{ + Name: opDescribeReservedInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeReservedInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedInstancesOutput{} + req.Data = output + return +} + +// Describes one or more of the Reserved Instances that you purchased. +// +// For more information about Reserved Instances, see Reserved Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/concepts-on-demand-reserved-instances.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeReservedInstances(input *DescribeReservedInstancesInput) (*DescribeReservedInstancesOutput, error) { + req, out := c.DescribeReservedInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeReservedInstancesListings = "DescribeReservedInstancesListings" + +// DescribeReservedInstancesListingsRequest generates a request for the DescribeReservedInstancesListings operation. +func (c *EC2) DescribeReservedInstancesListingsRequest(input *DescribeReservedInstancesListingsInput) (req *request.Request, output *DescribeReservedInstancesListingsOutput) { + op := &request.Operation{ + Name: opDescribeReservedInstancesListings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeReservedInstancesListingsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedInstancesListingsOutput{} + req.Data = output + return +} + +// Describes your account's Reserved Instance listings in the Reserved Instance +// Marketplace. +// +// The Reserved Instance Marketplace matches sellers who want to resell Reserved +// Instance capacity that they no longer need with buyers who want to purchase +// additional capacity. Reserved Instances bought and sold through the Reserved +// Instance Marketplace work like any other Reserved Instances. +// +// As a seller, you choose to list some or all of your Reserved Instances, +// and you specify the upfront price to receive for them. Your Reserved Instances +// are then listed in the Reserved Instance Marketplace and are available for +// purchase. +// +// As a buyer, you specify the configuration of the Reserved Instance to purchase, +// and the Marketplace matches what you're searching for with what's available. +// The Marketplace first sells the lowest priced Reserved Instances to you, +// and continues to sell available Reserved Instance listings to you until your +// demand is met. You are charged based on the total price of all of the listings +// that you purchase. +// +// For more information, see Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeReservedInstancesListings(input *DescribeReservedInstancesListingsInput) (*DescribeReservedInstancesListingsOutput, error) { + req, out := c.DescribeReservedInstancesListingsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeReservedInstancesModifications = "DescribeReservedInstancesModifications" + +// DescribeReservedInstancesModificationsRequest generates a request for the DescribeReservedInstancesModifications operation. +func (c *EC2) DescribeReservedInstancesModificationsRequest(input *DescribeReservedInstancesModificationsInput) (req *request.Request, output *DescribeReservedInstancesModificationsOutput) { + op := &request.Operation{ + Name: opDescribeReservedInstancesModifications, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReservedInstancesModificationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedInstancesModificationsOutput{} + req.Data = output + return +} + +// Describes the modifications made to your Reserved Instances. If no parameter +// is specified, information about all your Reserved Instances modification +// requests is returned. If a modification ID is specified, only information +// about the specific modification is returned. +// +// For more information, see Modifying Reserved Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-modifying.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeReservedInstancesModifications(input *DescribeReservedInstancesModificationsInput) (*DescribeReservedInstancesModificationsOutput, error) { + req, out := c.DescribeReservedInstancesModificationsRequest(input) + err := req.Send() + return out, err +} + +func (c *EC2) DescribeReservedInstancesModificationsPages(input *DescribeReservedInstancesModificationsInput, fn func(p *DescribeReservedInstancesModificationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReservedInstancesModificationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReservedInstancesModificationsOutput), lastPage) + }) +} + +const opDescribeReservedInstancesOfferings = "DescribeReservedInstancesOfferings" + +// DescribeReservedInstancesOfferingsRequest generates a request for the DescribeReservedInstancesOfferings operation. +func (c *EC2) DescribeReservedInstancesOfferingsRequest(input *DescribeReservedInstancesOfferingsInput) (req *request.Request, output *DescribeReservedInstancesOfferingsOutput) { + op := &request.Operation{ + Name: opDescribeReservedInstancesOfferings, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReservedInstancesOfferingsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedInstancesOfferingsOutput{} + req.Data = output + return +} + +// Describes Reserved Instance offerings that are available for purchase. With +// Reserved Instances, you purchase the right to launch instances for a period +// of time. During that time period, you do not receive insufficient capacity +// errors, and you pay a lower usage rate than the rate charged for On-Demand +// instances for the actual time used. +// +// If you have listed your own Reserved Instances for sale in the Reserved +// Instance Marketplace, they will be excluded from these results. This is to +// ensure that you do not purchase your own Reserved Instances. +// +// For more information, see Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeReservedInstancesOfferings(input *DescribeReservedInstancesOfferingsInput) (*DescribeReservedInstancesOfferingsOutput, error) { + req, out := c.DescribeReservedInstancesOfferingsRequest(input) + err := req.Send() + return out, err +} + +func (c *EC2) DescribeReservedInstancesOfferingsPages(input *DescribeReservedInstancesOfferingsInput, fn func(p *DescribeReservedInstancesOfferingsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReservedInstancesOfferingsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReservedInstancesOfferingsOutput), lastPage) + }) +} + +const opDescribeRouteTables = "DescribeRouteTables" + +// DescribeRouteTablesRequest generates a request for the DescribeRouteTables operation. +func (c *EC2) DescribeRouteTablesRequest(input *DescribeRouteTablesInput) (req *request.Request, output *DescribeRouteTablesOutput) { + op := &request.Operation{ + Name: opDescribeRouteTables, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRouteTablesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeRouteTablesOutput{} + req.Data = output + return +} + +// Describes one or more of your route tables. +// +// Each subnet in your VPC must be associated with a route table. If a subnet +// is not explicitly associated with any route table, it is implicitly associated +// with the main route table. This command does not return the subnet ID for +// implicit associations. +// +// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeRouteTables(input *DescribeRouteTablesInput) (*DescribeRouteTablesOutput, error) { + req, out := c.DescribeRouteTablesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeScheduledInstanceAvailability = "DescribeScheduledInstanceAvailability" + +// DescribeScheduledInstanceAvailabilityRequest generates a request for the DescribeScheduledInstanceAvailability operation. +func (c *EC2) DescribeScheduledInstanceAvailabilityRequest(input *DescribeScheduledInstanceAvailabilityInput) (req *request.Request, output *DescribeScheduledInstanceAvailabilityOutput) { + op := &request.Operation{ + Name: opDescribeScheduledInstanceAvailability, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeScheduledInstanceAvailabilityInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeScheduledInstanceAvailabilityOutput{} + req.Data = output + return +} + +// Finds available schedules that meet the specified criteria. +// +// You can search for an available schedule no more than 3 months in advance. +// You must meet the minimum required duration of 1,200 hours per year. For +// example, the minimum daily schedule is 4 hours, the minimum weekly schedule +// is 24 hours, and the minimum monthly schedule is 100 hours. +// +// After you find a schedule that meets your needs, call PurchaseScheduledInstances +// to purchase Scheduled Instances with that schedule. +func (c *EC2) DescribeScheduledInstanceAvailability(input *DescribeScheduledInstanceAvailabilityInput) (*DescribeScheduledInstanceAvailabilityOutput, error) { + req, out := c.DescribeScheduledInstanceAvailabilityRequest(input) + err := req.Send() + return out, err +} + +const opDescribeScheduledInstances = "DescribeScheduledInstances" + +// DescribeScheduledInstancesRequest generates a request for the DescribeScheduledInstances operation. +func (c *EC2) DescribeScheduledInstancesRequest(input *DescribeScheduledInstancesInput) (req *request.Request, output *DescribeScheduledInstancesOutput) { + op := &request.Operation{ + Name: opDescribeScheduledInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeScheduledInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeScheduledInstancesOutput{} + req.Data = output + return +} + +// Describes one or more of your Scheduled Instances. +func (c *EC2) DescribeScheduledInstances(input *DescribeScheduledInstancesInput) (*DescribeScheduledInstancesOutput, error) { + req, out := c.DescribeScheduledInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSecurityGroups = "DescribeSecurityGroups" + +// DescribeSecurityGroupsRequest generates a request for the DescribeSecurityGroups operation. +func (c *EC2) DescribeSecurityGroupsRequest(input *DescribeSecurityGroupsInput) (req *request.Request, output *DescribeSecurityGroupsOutput) { + op := &request.Operation{ + Name: opDescribeSecurityGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSecurityGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSecurityGroupsOutput{} + req.Data = output + return +} + +// Describes one or more of your security groups. +// +// A security group is for use with instances either in the EC2-Classic platform +// or in a specific VPC. For more information, see Amazon EC2 Security Groups +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html) +// in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your +// VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeSecurityGroups(input *DescribeSecurityGroupsInput) (*DescribeSecurityGroupsOutput, error) { + req, out := c.DescribeSecurityGroupsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSnapshotAttribute = "DescribeSnapshotAttribute" + +// DescribeSnapshotAttributeRequest generates a request for the DescribeSnapshotAttribute operation. +func (c *EC2) DescribeSnapshotAttributeRequest(input *DescribeSnapshotAttributeInput) (req *request.Request, output *DescribeSnapshotAttributeOutput) { + op := &request.Operation{ + Name: opDescribeSnapshotAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSnapshotAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSnapshotAttributeOutput{} + req.Data = output + return +} + +// Describes the specified attribute of the specified snapshot. You can specify +// only one attribute at a time. +// +// For more information about EBS snapshots, see Amazon EBS Snapshots in the +// Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeSnapshotAttribute(input *DescribeSnapshotAttributeInput) (*DescribeSnapshotAttributeOutput, error) { + req, out := c.DescribeSnapshotAttributeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSnapshots = "DescribeSnapshots" + +// DescribeSnapshotsRequest generates a request for the DescribeSnapshots operation. +func (c *EC2) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *request.Request, output *DescribeSnapshotsOutput) { + op := &request.Operation{ + Name: opDescribeSnapshots, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeSnapshotsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSnapshotsOutput{} + req.Data = output + return +} + +// Describes one or more of the EBS snapshots available to you. Available snapshots +// include public snapshots available for any AWS account to launch, private +// snapshots that you own, and private snapshots owned by another AWS account +// but for which you've been given explicit create volume permissions. +// +// The create volume permissions fall into the following categories: +// +// public: The owner of the snapshot granted create volume permissions for +// the snapshot to the all group. All AWS accounts have create volume permissions +// for these snapshots. explicit: The owner of the snapshot granted create volume +// permissions to a specific AWS account. implicit: An AWS account has implicit +// create volume permissions for all snapshots it owns. The list of snapshots +// returned can be modified by specifying snapshot IDs, snapshot owners, or +// AWS accounts with create volume permissions. If no options are specified, +// Amazon EC2 returns all snapshots for which you have create volume permissions. +// +// If you specify one or more snapshot IDs, only snapshots that have the specified +// IDs are returned. If you specify an invalid snapshot ID, an error is returned. +// If you specify a snapshot ID for which you do not have access, it is not +// included in the returned results. +// +// If you specify one or more snapshot owners, only snapshots from the specified +// owners and for which you have access are returned. The results can include +// the AWS account IDs of the specified owners, amazon for snapshots owned by +// Amazon, or self for snapshots that you own. +// +// If you specify a list of restorable users, only snapshots with create snapshot +// permissions for those users are returned. You can specify AWS account IDs +// (if you own the snapshots), self for snapshots for which you own or have +// explicit permissions, or all for public snapshots. +// +// If you are describing a long list of snapshots, you can paginate the output +// to make the list more manageable. The MaxResults parameter sets the maximum +// number of results returned in a single page. If the list of results exceeds +// your MaxResults value, then that number of results is returned along with +// a NextToken value that can be passed to a subsequent DescribeSnapshots request +// to retrieve the remaining results. +// +// For more information about EBS snapshots, see Amazon EBS Snapshots in the +// Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeSnapshots(input *DescribeSnapshotsInput) (*DescribeSnapshotsOutput, error) { + req, out := c.DescribeSnapshotsRequest(input) + err := req.Send() + return out, err +} + +func (c *EC2) DescribeSnapshotsPages(input *DescribeSnapshotsInput, fn func(p *DescribeSnapshotsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeSnapshotsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeSnapshotsOutput), lastPage) + }) +} + +const opDescribeSpotDatafeedSubscription = "DescribeSpotDatafeedSubscription" + +// DescribeSpotDatafeedSubscriptionRequest generates a request for the DescribeSpotDatafeedSubscription operation. +func (c *EC2) DescribeSpotDatafeedSubscriptionRequest(input *DescribeSpotDatafeedSubscriptionInput) (req *request.Request, output *DescribeSpotDatafeedSubscriptionOutput) { + op := &request.Operation{ + Name: opDescribeSpotDatafeedSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSpotDatafeedSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSpotDatafeedSubscriptionOutput{} + req.Data = output + return +} + +// Describes the data feed for Spot instances. For more information, see Spot +// Instance Data Feed (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeSpotDatafeedSubscription(input *DescribeSpotDatafeedSubscriptionInput) (*DescribeSpotDatafeedSubscriptionOutput, error) { + req, out := c.DescribeSpotDatafeedSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSpotFleetInstances = "DescribeSpotFleetInstances" + +// DescribeSpotFleetInstancesRequest generates a request for the DescribeSpotFleetInstances operation. +func (c *EC2) DescribeSpotFleetInstancesRequest(input *DescribeSpotFleetInstancesInput) (req *request.Request, output *DescribeSpotFleetInstancesOutput) { + op := &request.Operation{ + Name: opDescribeSpotFleetInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSpotFleetInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSpotFleetInstancesOutput{} + req.Data = output + return +} + +// Describes the running instances for the specified Spot fleet. +func (c *EC2) DescribeSpotFleetInstances(input *DescribeSpotFleetInstancesInput) (*DescribeSpotFleetInstancesOutput, error) { + req, out := c.DescribeSpotFleetInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSpotFleetRequestHistory = "DescribeSpotFleetRequestHistory" + +// DescribeSpotFleetRequestHistoryRequest generates a request for the DescribeSpotFleetRequestHistory operation. +func (c *EC2) DescribeSpotFleetRequestHistoryRequest(input *DescribeSpotFleetRequestHistoryInput) (req *request.Request, output *DescribeSpotFleetRequestHistoryOutput) { + op := &request.Operation{ + Name: opDescribeSpotFleetRequestHistory, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSpotFleetRequestHistoryInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSpotFleetRequestHistoryOutput{} + req.Data = output + return +} + +// Describes the events for the specified Spot fleet request during the specified +// time. +// +// Spot fleet events are delayed by up to 30 seconds before they can be described. +// This ensures that you can query by the last evaluated time and not miss a +// recorded event. +func (c *EC2) DescribeSpotFleetRequestHistory(input *DescribeSpotFleetRequestHistoryInput) (*DescribeSpotFleetRequestHistoryOutput, error) { + req, out := c.DescribeSpotFleetRequestHistoryRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSpotFleetRequests = "DescribeSpotFleetRequests" + +// DescribeSpotFleetRequestsRequest generates a request for the DescribeSpotFleetRequests operation. +func (c *EC2) DescribeSpotFleetRequestsRequest(input *DescribeSpotFleetRequestsInput) (req *request.Request, output *DescribeSpotFleetRequestsOutput) { + op := &request.Operation{ + Name: opDescribeSpotFleetRequests, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSpotFleetRequestsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSpotFleetRequestsOutput{} + req.Data = output + return +} + +// Describes your Spot fleet requests. +func (c *EC2) DescribeSpotFleetRequests(input *DescribeSpotFleetRequestsInput) (*DescribeSpotFleetRequestsOutput, error) { + req, out := c.DescribeSpotFleetRequestsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSpotInstanceRequests = "DescribeSpotInstanceRequests" + +// DescribeSpotInstanceRequestsRequest generates a request for the DescribeSpotInstanceRequests operation. +func (c *EC2) DescribeSpotInstanceRequestsRequest(input *DescribeSpotInstanceRequestsInput) (req *request.Request, output *DescribeSpotInstanceRequestsOutput) { + op := &request.Operation{ + Name: opDescribeSpotInstanceRequests, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSpotInstanceRequestsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSpotInstanceRequestsOutput{} + req.Data = output + return +} + +// Describes the Spot instance requests that belong to your account. Spot instances +// are instances that Amazon EC2 launches when the bid price that you specify +// exceeds the current Spot price. Amazon EC2 periodically sets the Spot price +// based on available Spot instance capacity and current Spot instance requests. +// For more information, see Spot Instance Requests (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// You can use DescribeSpotInstanceRequests to find a running Spot instance +// by examining the response. If the status of the Spot instance is fulfilled, +// the instance ID appears in the response and contains the identifier of the +// instance. Alternatively, you can use DescribeInstances with a filter to look +// for instances where the instance lifecycle is spot. +func (c *EC2) DescribeSpotInstanceRequests(input *DescribeSpotInstanceRequestsInput) (*DescribeSpotInstanceRequestsOutput, error) { + req, out := c.DescribeSpotInstanceRequestsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSpotPriceHistory = "DescribeSpotPriceHistory" + +// DescribeSpotPriceHistoryRequest generates a request for the DescribeSpotPriceHistory operation. +func (c *EC2) DescribeSpotPriceHistoryRequest(input *DescribeSpotPriceHistoryInput) (req *request.Request, output *DescribeSpotPriceHistoryOutput) { + op := &request.Operation{ + Name: opDescribeSpotPriceHistory, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeSpotPriceHistoryInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSpotPriceHistoryOutput{} + req.Data = output + return +} + +// Describes the Spot price history. The prices returned are listed in chronological +// order, from the oldest to the most recent, for up to the past 90 days. For +// more information, see Spot Instance Pricing History (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances-history.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// When you specify a start and end time, this operation returns the prices +// of the instance types within the time range that you specified and the time +// when the price changed. The price is valid within the time period that you +// specified; the response merely indicates the last time that the price changed. +func (c *EC2) DescribeSpotPriceHistory(input *DescribeSpotPriceHistoryInput) (*DescribeSpotPriceHistoryOutput, error) { + req, out := c.DescribeSpotPriceHistoryRequest(input) + err := req.Send() + return out, err +} + +func (c *EC2) DescribeSpotPriceHistoryPages(input *DescribeSpotPriceHistoryInput, fn func(p *DescribeSpotPriceHistoryOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeSpotPriceHistoryRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeSpotPriceHistoryOutput), lastPage) + }) +} + +const opDescribeSubnets = "DescribeSubnets" + +// DescribeSubnetsRequest generates a request for the DescribeSubnets operation. +func (c *EC2) DescribeSubnetsRequest(input *DescribeSubnetsInput) (req *request.Request, output *DescribeSubnetsOutput) { + op := &request.Operation{ + Name: opDescribeSubnets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSubnetsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSubnetsOutput{} + req.Data = output + return +} + +// Describes one or more of your subnets. +// +// For more information about subnets, see Your VPC and Subnets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeSubnets(input *DescribeSubnetsInput) (*DescribeSubnetsOutput, error) { + req, out := c.DescribeSubnetsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTags = "DescribeTags" + +// DescribeTagsRequest generates a request for the DescribeTags operation. +func (c *EC2) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { + op := &request.Operation{ + Name: opDescribeTags, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTagsOutput{} + req.Data = output + return +} + +// Describes one or more of the tags for your EC2 resources. +// +// For more information about tags, see Tagging Your Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + err := req.Send() + return out, err +} + +func (c *EC2) DescribeTagsPages(input *DescribeTagsInput, fn func(p *DescribeTagsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeTagsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeTagsOutput), lastPage) + }) +} + +const opDescribeVolumeAttribute = "DescribeVolumeAttribute" + +// DescribeVolumeAttributeRequest generates a request for the DescribeVolumeAttribute operation. +func (c *EC2) DescribeVolumeAttributeRequest(input *DescribeVolumeAttributeInput) (req *request.Request, output *DescribeVolumeAttributeOutput) { + op := &request.Operation{ + Name: opDescribeVolumeAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVolumeAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVolumeAttributeOutput{} + req.Data = output + return +} + +// Describes the specified attribute of the specified volume. You can specify +// only one attribute at a time. +// +// For more information about EBS volumes, see Amazon EBS Volumes in the Amazon +// Elastic Compute Cloud User Guide. +func (c *EC2) DescribeVolumeAttribute(input *DescribeVolumeAttributeInput) (*DescribeVolumeAttributeOutput, error) { + req, out := c.DescribeVolumeAttributeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVolumeStatus = "DescribeVolumeStatus" + +// DescribeVolumeStatusRequest generates a request for the DescribeVolumeStatus operation. +func (c *EC2) DescribeVolumeStatusRequest(input *DescribeVolumeStatusInput) (req *request.Request, output *DescribeVolumeStatusOutput) { + op := &request.Operation{ + Name: opDescribeVolumeStatus, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeVolumeStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVolumeStatusOutput{} + req.Data = output + return +} + +// Describes the status of the specified volumes. Volume status provides the +// result of the checks performed on your volumes to determine events that can +// impair the performance of your volumes. The performance of a volume can be +// affected if an issue occurs on the volume's underlying host. If the volume's +// underlying host experiences a power outage or system issue, after the system +// is restored, there could be data inconsistencies on the volume. Volume events +// notify you if this occurs. Volume actions notify you if any action needs +// to be taken in response to the event. +// +// The DescribeVolumeStatus operation provides the following information about +// the specified volumes: +// +// Status: Reflects the current status of the volume. The possible values are +// ok, impaired , warning, or insufficient-data. If all checks pass, the overall +// status of the volume is ok. If the check fails, the overall status is impaired. +// If the status is insufficient-data, then the checks may still be taking place +// on your volume at the time. We recommend that you retry the request. For +// more information on volume status, see Monitoring the Status of Your Volumes +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-volume-status.html). +// +// Events: Reflect the cause of a volume status and may require you to take +// action. For example, if your volume returns an impaired status, then the +// volume event might be potential-data-inconsistency. This means that your +// volume has been affected by an issue with the underlying host, has all I/O +// operations disabled, and may have inconsistent data. +// +// Actions: Reflect the actions you may have to take in response to an event. +// For example, if the status of the volume is impaired and the volume event +// shows potential-data-inconsistency, then the action shows enable-volume-io. +// This means that you may want to enable the I/O operations for the volume +// by calling the EnableVolumeIO action and then check the volume for data consistency. +// +// Volume status is based on the volume status checks, and does not reflect +// the volume state. Therefore, volume status does not indicate volumes in the +// error state (for example, when a volume is incapable of accepting I/O.) +func (c *EC2) DescribeVolumeStatus(input *DescribeVolumeStatusInput) (*DescribeVolumeStatusOutput, error) { + req, out := c.DescribeVolumeStatusRequest(input) + err := req.Send() + return out, err +} + +func (c *EC2) DescribeVolumeStatusPages(input *DescribeVolumeStatusInput, fn func(p *DescribeVolumeStatusOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeVolumeStatusRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeVolumeStatusOutput), lastPage) + }) +} + +const opDescribeVolumes = "DescribeVolumes" + +// DescribeVolumesRequest generates a request for the DescribeVolumes operation. +func (c *EC2) DescribeVolumesRequest(input *DescribeVolumesInput) (req *request.Request, output *DescribeVolumesOutput) { + op := &request.Operation{ + Name: opDescribeVolumes, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeVolumesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVolumesOutput{} + req.Data = output + return +} + +// Describes the specified EBS volumes. +// +// If you are describing a long list of volumes, you can paginate the output +// to make the list more manageable. The MaxResults parameter sets the maximum +// number of results returned in a single page. If the list of results exceeds +// your MaxResults value, then that number of results is returned along with +// a NextToken value that can be passed to a subsequent DescribeVolumes request +// to retrieve the remaining results. +// +// For more information about EBS volumes, see Amazon EBS Volumes in the Amazon +// Elastic Compute Cloud User Guide. +func (c *EC2) DescribeVolumes(input *DescribeVolumesInput) (*DescribeVolumesOutput, error) { + req, out := c.DescribeVolumesRequest(input) + err := req.Send() + return out, err +} + +func (c *EC2) DescribeVolumesPages(input *DescribeVolumesInput, fn func(p *DescribeVolumesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeVolumesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeVolumesOutput), lastPage) + }) +} + +const opDescribeVpcAttribute = "DescribeVpcAttribute" + +// DescribeVpcAttributeRequest generates a request for the DescribeVpcAttribute operation. +func (c *EC2) DescribeVpcAttributeRequest(input *DescribeVpcAttributeInput) (req *request.Request, output *DescribeVpcAttributeOutput) { + op := &request.Operation{ + Name: opDescribeVpcAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpcAttributeOutput{} + req.Data = output + return +} + +// Describes the specified attribute of the specified VPC. You can specify only +// one attribute at a time. +func (c *EC2) DescribeVpcAttribute(input *DescribeVpcAttributeInput) (*DescribeVpcAttributeOutput, error) { + req, out := c.DescribeVpcAttributeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpcClassicLink = "DescribeVpcClassicLink" + +// DescribeVpcClassicLinkRequest generates a request for the DescribeVpcClassicLink operation. +func (c *EC2) DescribeVpcClassicLinkRequest(input *DescribeVpcClassicLinkInput) (req *request.Request, output *DescribeVpcClassicLinkOutput) { + op := &request.Operation{ + Name: opDescribeVpcClassicLink, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcClassicLinkInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpcClassicLinkOutput{} + req.Data = output + return +} + +// Describes the ClassicLink status of one or more VPCs. +func (c *EC2) DescribeVpcClassicLink(input *DescribeVpcClassicLinkInput) (*DescribeVpcClassicLinkOutput, error) { + req, out := c.DescribeVpcClassicLinkRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpcClassicLinkDnsSupport = "DescribeVpcClassicLinkDnsSupport" + +// DescribeVpcClassicLinkDnsSupportRequest generates a request for the DescribeVpcClassicLinkDnsSupport operation. +func (c *EC2) DescribeVpcClassicLinkDnsSupportRequest(input *DescribeVpcClassicLinkDnsSupportInput) (req *request.Request, output *DescribeVpcClassicLinkDnsSupportOutput) { + op := &request.Operation{ + Name: opDescribeVpcClassicLinkDnsSupport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcClassicLinkDnsSupportInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpcClassicLinkDnsSupportOutput{} + req.Data = output + return +} + +// Describes the ClassicLink DNS support status of one or more VPCs. If enabled, +// the DNS hostname of a linked EC2-Classic instance resolves to its private +// IP address when addressed from an instance in the VPC to which it's linked. +// Similarly, the DNS hostname of an instance in a VPC resolves to its private +// IP address when addressed from a linked EC2-Classic instance. For more information +// about ClassicLink, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeVpcClassicLinkDnsSupport(input *DescribeVpcClassicLinkDnsSupportInput) (*DescribeVpcClassicLinkDnsSupportOutput, error) { + req, out := c.DescribeVpcClassicLinkDnsSupportRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpcEndpointServices = "DescribeVpcEndpointServices" + +// DescribeVpcEndpointServicesRequest generates a request for the DescribeVpcEndpointServices operation. +func (c *EC2) DescribeVpcEndpointServicesRequest(input *DescribeVpcEndpointServicesInput) (req *request.Request, output *DescribeVpcEndpointServicesOutput) { + op := &request.Operation{ + Name: opDescribeVpcEndpointServices, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcEndpointServicesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpcEndpointServicesOutput{} + req.Data = output + return +} + +// Describes all supported AWS services that can be specified when creating +// a VPC endpoint. +func (c *EC2) DescribeVpcEndpointServices(input *DescribeVpcEndpointServicesInput) (*DescribeVpcEndpointServicesOutput, error) { + req, out := c.DescribeVpcEndpointServicesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpcEndpoints = "DescribeVpcEndpoints" + +// DescribeVpcEndpointsRequest generates a request for the DescribeVpcEndpoints operation. +func (c *EC2) DescribeVpcEndpointsRequest(input *DescribeVpcEndpointsInput) (req *request.Request, output *DescribeVpcEndpointsOutput) { + op := &request.Operation{ + Name: opDescribeVpcEndpoints, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcEndpointsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpcEndpointsOutput{} + req.Data = output + return +} + +// Describes one or more of your VPC endpoints. +func (c *EC2) DescribeVpcEndpoints(input *DescribeVpcEndpointsInput) (*DescribeVpcEndpointsOutput, error) { + req, out := c.DescribeVpcEndpointsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpcPeeringConnections = "DescribeVpcPeeringConnections" + +// DescribeVpcPeeringConnectionsRequest generates a request for the DescribeVpcPeeringConnections operation. +func (c *EC2) DescribeVpcPeeringConnectionsRequest(input *DescribeVpcPeeringConnectionsInput) (req *request.Request, output *DescribeVpcPeeringConnectionsOutput) { + op := &request.Operation{ + Name: opDescribeVpcPeeringConnections, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcPeeringConnectionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpcPeeringConnectionsOutput{} + req.Data = output + return +} + +// Describes one or more of your VPC peering connections. +func (c *EC2) DescribeVpcPeeringConnections(input *DescribeVpcPeeringConnectionsInput) (*DescribeVpcPeeringConnectionsOutput, error) { + req, out := c.DescribeVpcPeeringConnectionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpcs = "DescribeVpcs" + +// DescribeVpcsRequest generates a request for the DescribeVpcs operation. +func (c *EC2) DescribeVpcsRequest(input *DescribeVpcsInput) (req *request.Request, output *DescribeVpcsOutput) { + op := &request.Operation{ + Name: opDescribeVpcs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpcsOutput{} + req.Data = output + return +} + +// Describes one or more of your VPCs. +func (c *EC2) DescribeVpcs(input *DescribeVpcsInput) (*DescribeVpcsOutput, error) { + req, out := c.DescribeVpcsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpnConnections = "DescribeVpnConnections" + +// DescribeVpnConnectionsRequest generates a request for the DescribeVpnConnections operation. +func (c *EC2) DescribeVpnConnectionsRequest(input *DescribeVpnConnectionsInput) (req *request.Request, output *DescribeVpnConnectionsOutput) { + op := &request.Operation{ + Name: opDescribeVpnConnections, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpnConnectionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpnConnectionsOutput{} + req.Data = output + return +} + +// Describes one or more of your VPN connections. +// +// For more information about VPN connections, see Adding a Hardware Virtual +// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeVpnConnections(input *DescribeVpnConnectionsInput) (*DescribeVpnConnectionsOutput, error) { + req, out := c.DescribeVpnConnectionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVpnGateways = "DescribeVpnGateways" + +// DescribeVpnGatewaysRequest generates a request for the DescribeVpnGateways operation. +func (c *EC2) DescribeVpnGatewaysRequest(input *DescribeVpnGatewaysInput) (req *request.Request, output *DescribeVpnGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeVpnGateways, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpnGatewaysInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpnGatewaysOutput{} + req.Data = output + return +} + +// Describes one or more of your virtual private gateways. +// +// For more information about virtual private gateways, see Adding an IPsec +// Hardware VPN to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DescribeVpnGateways(input *DescribeVpnGatewaysInput) (*DescribeVpnGatewaysOutput, error) { + req, out := c.DescribeVpnGatewaysRequest(input) + err := req.Send() + return out, err +} + +const opDetachClassicLinkVpc = "DetachClassicLinkVpc" + +// DetachClassicLinkVpcRequest generates a request for the DetachClassicLinkVpc operation. +func (c *EC2) DetachClassicLinkVpcRequest(input *DetachClassicLinkVpcInput) (req *request.Request, output *DetachClassicLinkVpcOutput) { + op := &request.Operation{ + Name: opDetachClassicLinkVpc, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachClassicLinkVpcInput{} + } + + req = c.newRequest(op, input, output) + output = &DetachClassicLinkVpcOutput{} + req.Data = output + return +} + +// Unlinks (detaches) a linked EC2-Classic instance from a VPC. After the instance +// has been unlinked, the VPC security groups are no longer associated with +// it. An instance is automatically unlinked from a VPC when it's stopped. +func (c *EC2) DetachClassicLinkVpc(input *DetachClassicLinkVpcInput) (*DetachClassicLinkVpcOutput, error) { + req, out := c.DetachClassicLinkVpcRequest(input) + err := req.Send() + return out, err +} + +const opDetachInternetGateway = "DetachInternetGateway" + +// DetachInternetGatewayRequest generates a request for the DetachInternetGateway operation. +func (c *EC2) DetachInternetGatewayRequest(input *DetachInternetGatewayInput) (req *request.Request, output *DetachInternetGatewayOutput) { + op := &request.Operation{ + Name: opDetachInternetGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachInternetGatewayInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DetachInternetGatewayOutput{} + req.Data = output + return +} + +// Detaches an Internet gateway from a VPC, disabling connectivity between the +// Internet and the VPC. The VPC must not contain any running instances with +// Elastic IP addresses. +func (c *EC2) DetachInternetGateway(input *DetachInternetGatewayInput) (*DetachInternetGatewayOutput, error) { + req, out := c.DetachInternetGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDetachNetworkInterface = "DetachNetworkInterface" + +// DetachNetworkInterfaceRequest generates a request for the DetachNetworkInterface operation. +func (c *EC2) DetachNetworkInterfaceRequest(input *DetachNetworkInterfaceInput) (req *request.Request, output *DetachNetworkInterfaceOutput) { + op := &request.Operation{ + Name: opDetachNetworkInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachNetworkInterfaceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DetachNetworkInterfaceOutput{} + req.Data = output + return +} + +// Detaches a network interface from an instance. +func (c *EC2) DetachNetworkInterface(input *DetachNetworkInterfaceInput) (*DetachNetworkInterfaceOutput, error) { + req, out := c.DetachNetworkInterfaceRequest(input) + err := req.Send() + return out, err +} + +const opDetachVolume = "DetachVolume" + +// DetachVolumeRequest generates a request for the DetachVolume operation. +func (c *EC2) DetachVolumeRequest(input *DetachVolumeInput) (req *request.Request, output *VolumeAttachment) { + op := &request.Operation{ + Name: opDetachVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &VolumeAttachment{} + req.Data = output + return +} + +// Detaches an EBS volume from an instance. Make sure to unmount any file systems +// on the device within your operating system before detaching the volume. Failure +// to do so results in the volume being stuck in a busy state while detaching. +// +// If an Amazon EBS volume is the root device of an instance, it can't be detached +// while the instance is running. To detach the root volume, stop the instance +// first. +// +// When a volume with an AWS Marketplace product code is detached from an instance, +// the product code is no longer associated with the instance. +// +// For more information, see Detaching an Amazon EBS Volume (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-detaching-volume.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DetachVolume(input *DetachVolumeInput) (*VolumeAttachment, error) { + req, out := c.DetachVolumeRequest(input) + err := req.Send() + return out, err +} + +const opDetachVpnGateway = "DetachVpnGateway" + +// DetachVpnGatewayRequest generates a request for the DetachVpnGateway operation. +func (c *EC2) DetachVpnGatewayRequest(input *DetachVpnGatewayInput) (req *request.Request, output *DetachVpnGatewayOutput) { + op := &request.Operation{ + Name: opDetachVpnGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachVpnGatewayInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DetachVpnGatewayOutput{} + req.Data = output + return +} + +// Detaches a virtual private gateway from a VPC. You do this if you're planning +// to turn off the VPC and not use it anymore. You can confirm a virtual private +// gateway has been completely detached from a VPC by describing the virtual +// private gateway (any attachments to the virtual private gateway are also +// described). +// +// You must wait for the attachment's state to switch to detached before you +// can delete the VPC or attach a different VPC to the virtual private gateway. +func (c *EC2) DetachVpnGateway(input *DetachVpnGatewayInput) (*DetachVpnGatewayOutput, error) { + req, out := c.DetachVpnGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDisableVgwRoutePropagation = "DisableVgwRoutePropagation" + +// DisableVgwRoutePropagationRequest generates a request for the DisableVgwRoutePropagation operation. +func (c *EC2) DisableVgwRoutePropagationRequest(input *DisableVgwRoutePropagationInput) (req *request.Request, output *DisableVgwRoutePropagationOutput) { + op := &request.Operation{ + Name: opDisableVgwRoutePropagation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableVgwRoutePropagationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DisableVgwRoutePropagationOutput{} + req.Data = output + return +} + +// Disables a virtual private gateway (VGW) from propagating routes to a specified +// route table of a VPC. +func (c *EC2) DisableVgwRoutePropagation(input *DisableVgwRoutePropagationInput) (*DisableVgwRoutePropagationOutput, error) { + req, out := c.DisableVgwRoutePropagationRequest(input) + err := req.Send() + return out, err +} + +const opDisableVpcClassicLink = "DisableVpcClassicLink" + +// DisableVpcClassicLinkRequest generates a request for the DisableVpcClassicLink operation. +func (c *EC2) DisableVpcClassicLinkRequest(input *DisableVpcClassicLinkInput) (req *request.Request, output *DisableVpcClassicLinkOutput) { + op := &request.Operation{ + Name: opDisableVpcClassicLink, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableVpcClassicLinkInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableVpcClassicLinkOutput{} + req.Data = output + return +} + +// Disables ClassicLink for a VPC. You cannot disable ClassicLink for a VPC +// that has EC2-Classic instances linked to it. +func (c *EC2) DisableVpcClassicLink(input *DisableVpcClassicLinkInput) (*DisableVpcClassicLinkOutput, error) { + req, out := c.DisableVpcClassicLinkRequest(input) + err := req.Send() + return out, err +} + +const opDisableVpcClassicLinkDnsSupport = "DisableVpcClassicLinkDnsSupport" + +// DisableVpcClassicLinkDnsSupportRequest generates a request for the DisableVpcClassicLinkDnsSupport operation. +func (c *EC2) DisableVpcClassicLinkDnsSupportRequest(input *DisableVpcClassicLinkDnsSupportInput) (req *request.Request, output *DisableVpcClassicLinkDnsSupportOutput) { + op := &request.Operation{ + Name: opDisableVpcClassicLinkDnsSupport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableVpcClassicLinkDnsSupportInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableVpcClassicLinkDnsSupportOutput{} + req.Data = output + return +} + +// Disables ClassicLink DNS support for a VPC. If disabled, DNS hostnames resolve +// to public IP addresses when addressed between a linked EC2-Classic instance +// and instances in the VPC to which it's linked. For more information about +// ClassicLink, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DisableVpcClassicLinkDnsSupport(input *DisableVpcClassicLinkDnsSupportInput) (*DisableVpcClassicLinkDnsSupportOutput, error) { + req, out := c.DisableVpcClassicLinkDnsSupportRequest(input) + err := req.Send() + return out, err +} + +const opDisassociateAddress = "DisassociateAddress" + +// DisassociateAddressRequest generates a request for the DisassociateAddress operation. +func (c *EC2) DisassociateAddressRequest(input *DisassociateAddressInput) (req *request.Request, output *DisassociateAddressOutput) { + op := &request.Operation{ + Name: opDisassociateAddress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateAddressInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DisassociateAddressOutput{} + req.Data = output + return +} + +// Disassociates an Elastic IP address from the instance or network interface +// it's associated with. +// +// An Elastic IP address is for use in either the EC2-Classic platform or in +// a VPC. For more information, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// This is an idempotent operation. If you perform the operation more than +// once, Amazon EC2 doesn't return an error. +func (c *EC2) DisassociateAddress(input *DisassociateAddressInput) (*DisassociateAddressOutput, error) { + req, out := c.DisassociateAddressRequest(input) + err := req.Send() + return out, err +} + +const opDisassociateRouteTable = "DisassociateRouteTable" + +// DisassociateRouteTableRequest generates a request for the DisassociateRouteTable operation. +func (c *EC2) DisassociateRouteTableRequest(input *DisassociateRouteTableInput) (req *request.Request, output *DisassociateRouteTableOutput) { + op := &request.Operation{ + Name: opDisassociateRouteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateRouteTableInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DisassociateRouteTableOutput{} + req.Data = output + return +} + +// Disassociates a subnet from a route table. +// +// After you perform this action, the subnet no longer uses the routes in the +// route table. Instead, it uses the routes in the VPC's main route table. For +// more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) DisassociateRouteTable(input *DisassociateRouteTableInput) (*DisassociateRouteTableOutput, error) { + req, out := c.DisassociateRouteTableRequest(input) + err := req.Send() + return out, err +} + +const opEnableVgwRoutePropagation = "EnableVgwRoutePropagation" + +// EnableVgwRoutePropagationRequest generates a request for the EnableVgwRoutePropagation operation. +func (c *EC2) EnableVgwRoutePropagationRequest(input *EnableVgwRoutePropagationInput) (req *request.Request, output *EnableVgwRoutePropagationOutput) { + op := &request.Operation{ + Name: opEnableVgwRoutePropagation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableVgwRoutePropagationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &EnableVgwRoutePropagationOutput{} + req.Data = output + return +} + +// Enables a virtual private gateway (VGW) to propagate routes to the specified +// route table of a VPC. +func (c *EC2) EnableVgwRoutePropagation(input *EnableVgwRoutePropagationInput) (*EnableVgwRoutePropagationOutput, error) { + req, out := c.EnableVgwRoutePropagationRequest(input) + err := req.Send() + return out, err +} + +const opEnableVolumeIO = "EnableVolumeIO" + +// EnableVolumeIORequest generates a request for the EnableVolumeIO operation. +func (c *EC2) EnableVolumeIORequest(input *EnableVolumeIOInput) (req *request.Request, output *EnableVolumeIOOutput) { + op := &request.Operation{ + Name: opEnableVolumeIO, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableVolumeIOInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &EnableVolumeIOOutput{} + req.Data = output + return +} + +// Enables I/O operations for a volume that had I/O operations disabled because +// the data on the volume was potentially inconsistent. +func (c *EC2) EnableVolumeIO(input *EnableVolumeIOInput) (*EnableVolumeIOOutput, error) { + req, out := c.EnableVolumeIORequest(input) + err := req.Send() + return out, err +} + +const opEnableVpcClassicLink = "EnableVpcClassicLink" + +// EnableVpcClassicLinkRequest generates a request for the EnableVpcClassicLink operation. +func (c *EC2) EnableVpcClassicLinkRequest(input *EnableVpcClassicLinkInput) (req *request.Request, output *EnableVpcClassicLinkOutput) { + op := &request.Operation{ + Name: opEnableVpcClassicLink, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableVpcClassicLinkInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableVpcClassicLinkOutput{} + req.Data = output + return +} + +// Enables a VPC for ClassicLink. You can then link EC2-Classic instances to +// your ClassicLink-enabled VPC to allow communication over private IP addresses. +// You cannot enable your VPC for ClassicLink if any of your VPC's route tables +// have existing routes for address ranges within the 10.0.0.0/8 IP address +// range, excluding local routes for VPCs in the 10.0.0.0/16 and 10.1.0.0/16 +// IP address ranges. For more information, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) EnableVpcClassicLink(input *EnableVpcClassicLinkInput) (*EnableVpcClassicLinkOutput, error) { + req, out := c.EnableVpcClassicLinkRequest(input) + err := req.Send() + return out, err +} + +const opEnableVpcClassicLinkDnsSupport = "EnableVpcClassicLinkDnsSupport" + +// EnableVpcClassicLinkDnsSupportRequest generates a request for the EnableVpcClassicLinkDnsSupport operation. +func (c *EC2) EnableVpcClassicLinkDnsSupportRequest(input *EnableVpcClassicLinkDnsSupportInput) (req *request.Request, output *EnableVpcClassicLinkDnsSupportOutput) { + op := &request.Operation{ + Name: opEnableVpcClassicLinkDnsSupport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableVpcClassicLinkDnsSupportInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableVpcClassicLinkDnsSupportOutput{} + req.Data = output + return +} + +// Enables a VPC to support DNS hostname resolution for ClassicLink. If enabled, +// the DNS hostname of a linked EC2-Classic instance resolves to its private +// IP address when addressed from an instance in the VPC to which it's linked. +// Similarly, the DNS hostname of an instance in a VPC resolves to its private +// IP address when addressed from a linked EC2-Classic instance. For more information +// about ClassicLink, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) EnableVpcClassicLinkDnsSupport(input *EnableVpcClassicLinkDnsSupportInput) (*EnableVpcClassicLinkDnsSupportOutput, error) { + req, out := c.EnableVpcClassicLinkDnsSupportRequest(input) + err := req.Send() + return out, err +} + +const opGetConsoleOutput = "GetConsoleOutput" + +// GetConsoleOutputRequest generates a request for the GetConsoleOutput operation. +func (c *EC2) GetConsoleOutputRequest(input *GetConsoleOutputInput) (req *request.Request, output *GetConsoleOutputOutput) { + op := &request.Operation{ + Name: opGetConsoleOutput, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetConsoleOutputInput{} + } + + req = c.newRequest(op, input, output) + output = &GetConsoleOutputOutput{} + req.Data = output + return +} + +// Gets the console output for the specified instance. +// +// Instances do not have a physical monitor through which you can view their +// console output. They also lack physical controls that allow you to power +// up, reboot, or shut them down. To allow these actions, we provide them through +// the Amazon EC2 API and command line interface. +// +// Instance console output is buffered and posted shortly after instance boot, +// reboot, and termination. Amazon EC2 preserves the most recent 64 KB output +// which is available for at least one hour after the most recent post. +// +// For Linux instances, the instance console output displays the exact console +// output that would normally be displayed on a physical monitor attached to +// a computer. This output is buffered because the instance produces it and +// then posts it to a store where the instance's owner can retrieve it. +// +// For Windows instances, the instance console output includes output from +// the EC2Config service. +func (c *EC2) GetConsoleOutput(input *GetConsoleOutputInput) (*GetConsoleOutputOutput, error) { + req, out := c.GetConsoleOutputRequest(input) + err := req.Send() + return out, err +} + +const opGetPasswordData = "GetPasswordData" + +// GetPasswordDataRequest generates a request for the GetPasswordData operation. +func (c *EC2) GetPasswordDataRequest(input *GetPasswordDataInput) (req *request.Request, output *GetPasswordDataOutput) { + op := &request.Operation{ + Name: opGetPasswordData, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPasswordDataInput{} + } + + req = c.newRequest(op, input, output) + output = &GetPasswordDataOutput{} + req.Data = output + return +} + +// Retrieves the encrypted administrator password for an instance running Windows. +// +// The Windows password is generated at boot if the EC2Config service plugin, +// Ec2SetPassword, is enabled. This usually only happens the first time an AMI +// is launched, and then Ec2SetPassword is automatically disabled. The password +// is not generated for rebundled AMIs unless Ec2SetPassword is enabled before +// bundling. +// +// The password is encrypted using the key pair that you specified when you +// launched the instance. You must provide the corresponding key pair file. +// +// Password generation and encryption takes a few moments. We recommend that +// you wait up to 15 minutes after launching an instance before trying to retrieve +// the generated password. +func (c *EC2) GetPasswordData(input *GetPasswordDataInput) (*GetPasswordDataOutput, error) { + req, out := c.GetPasswordDataRequest(input) + err := req.Send() + return out, err +} + +const opImportImage = "ImportImage" + +// ImportImageRequest generates a request for the ImportImage operation. +func (c *EC2) ImportImageRequest(input *ImportImageInput) (req *request.Request, output *ImportImageOutput) { + op := &request.Operation{ + Name: opImportImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportImageInput{} + } + + req = c.newRequest(op, input, output) + output = &ImportImageOutput{} + req.Data = output + return +} + +// Import single or multi-volume disk images or EBS snapshots into an Amazon +// Machine Image (AMI). +func (c *EC2) ImportImage(input *ImportImageInput) (*ImportImageOutput, error) { + req, out := c.ImportImageRequest(input) + err := req.Send() + return out, err +} + +const opImportInstance = "ImportInstance" + +// ImportInstanceRequest generates a request for the ImportInstance operation. +func (c *EC2) ImportInstanceRequest(input *ImportInstanceInput) (req *request.Request, output *ImportInstanceOutput) { + op := &request.Operation{ + Name: opImportInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &ImportInstanceOutput{} + req.Data = output + return +} + +// Creates an import instance task using metadata from the specified disk image. +// ImportInstance only supports single-volume VMs. To import multi-volume VMs, +// use ImportImage. After importing the image, you then upload it using the +// ec2-import-volume command in the EC2 command line tools. For more information, +// see Using the Command Line Tools to Import Your Virtual Machine to Amazon +// EC2 (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UploadingYourInstancesandVolumes.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) ImportInstance(input *ImportInstanceInput) (*ImportInstanceOutput, error) { + req, out := c.ImportInstanceRequest(input) + err := req.Send() + return out, err +} + +const opImportKeyPair = "ImportKeyPair" + +// ImportKeyPairRequest generates a request for the ImportKeyPair operation. +func (c *EC2) ImportKeyPairRequest(input *ImportKeyPairInput) (req *request.Request, output *ImportKeyPairOutput) { + op := &request.Operation{ + Name: opImportKeyPair, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportKeyPairInput{} + } + + req = c.newRequest(op, input, output) + output = &ImportKeyPairOutput{} + req.Data = output + return +} + +// Imports the public key from an RSA key pair that you created with a third-party +// tool. Compare this with CreateKeyPair, in which AWS creates the key pair +// and gives the keys to you (AWS keeps a copy of the public key). With ImportKeyPair, +// you create the key pair and give AWS just the public key. The private key +// is never transferred between you and AWS. +// +// For more information about key pairs, see Key Pairs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) ImportKeyPair(input *ImportKeyPairInput) (*ImportKeyPairOutput, error) { + req, out := c.ImportKeyPairRequest(input) + err := req.Send() + return out, err +} + +const opImportSnapshot = "ImportSnapshot" + +// ImportSnapshotRequest generates a request for the ImportSnapshot operation. +func (c *EC2) ImportSnapshotRequest(input *ImportSnapshotInput) (req *request.Request, output *ImportSnapshotOutput) { + op := &request.Operation{ + Name: opImportSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &ImportSnapshotOutput{} + req.Data = output + return +} + +// Imports a disk into an EBS snapshot. +func (c *EC2) ImportSnapshot(input *ImportSnapshotInput) (*ImportSnapshotOutput, error) { + req, out := c.ImportSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opImportVolume = "ImportVolume" + +// ImportVolumeRequest generates a request for the ImportVolume operation. +func (c *EC2) ImportVolumeRequest(input *ImportVolumeInput) (req *request.Request, output *ImportVolumeOutput) { + op := &request.Operation{ + Name: opImportVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &ImportVolumeOutput{} + req.Data = output + return +} + +// Creates an import volume task using metadata from the specified disk image. +// After importing the image, you then upload it using the ec2-import-volume +// command in the Amazon EC2 command-line interface (CLI) tools. For more information, +// see Using the Command Line Tools to Import Your Virtual Machine to Amazon +// EC2 (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UploadingYourInstancesandVolumes.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) ImportVolume(input *ImportVolumeInput) (*ImportVolumeOutput, error) { + req, out := c.ImportVolumeRequest(input) + err := req.Send() + return out, err +} + +const opModifyHosts = "ModifyHosts" + +// ModifyHostsRequest generates a request for the ModifyHosts operation. +func (c *EC2) ModifyHostsRequest(input *ModifyHostsInput) (req *request.Request, output *ModifyHostsOutput) { + op := &request.Operation{ + Name: opModifyHosts, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyHostsInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyHostsOutput{} + req.Data = output + return +} + +// Modify the auto-placement setting of a Dedicated host. When auto-placement +// is enabled, AWS will place instances that you launch with a tenancy of host, +// but without targeting a specific host ID, onto any available Dedicated host +// in your account which has auto-placement enabled. When auto-placement is +// disabled, you need to provide a host ID if you want the instance to launch +// onto a specific host. If no host ID is provided, the instance will be launched +// onto a suitable host which has auto-placement enabled. +func (c *EC2) ModifyHosts(input *ModifyHostsInput) (*ModifyHostsOutput, error) { + req, out := c.ModifyHostsRequest(input) + err := req.Send() + return out, err +} + +const opModifyIdFormat = "ModifyIdFormat" + +// ModifyIdFormatRequest generates a request for the ModifyIdFormat operation. +func (c *EC2) ModifyIdFormatRequest(input *ModifyIdFormatInput) (req *request.Request, output *ModifyIdFormatOutput) { + op := &request.Operation{ + Name: opModifyIdFormat, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyIdFormatInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ModifyIdFormatOutput{} + req.Data = output + return +} + +// Modifies the ID format for the specified resource on a per-region basis. +// You can specify that resources should receive longer IDs (17-character IDs) +// when they are created. The following resource types support longer IDs: instance +// | reservation. +// +// This setting applies to the IAM user who makes the request; it does not +// apply to the entire AWS account. By default, an IAM user defaults to the +// same settings as the root user, unless they explicitly override the settings +// by running this request. Resources created with longer IDs are visible to +// all IAM users, regardless of these settings and provided that they have permission +// to use the relevant Describe command for the resource type. +func (c *EC2) ModifyIdFormat(input *ModifyIdFormatInput) (*ModifyIdFormatOutput, error) { + req, out := c.ModifyIdFormatRequest(input) + err := req.Send() + return out, err +} + +const opModifyImageAttribute = "ModifyImageAttribute" + +// ModifyImageAttributeRequest generates a request for the ModifyImageAttribute operation. +func (c *EC2) ModifyImageAttributeRequest(input *ModifyImageAttributeInput) (req *request.Request, output *ModifyImageAttributeOutput) { + op := &request.Operation{ + Name: opModifyImageAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyImageAttributeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ModifyImageAttributeOutput{} + req.Data = output + return +} + +// Modifies the specified attribute of the specified AMI. You can specify only +// one attribute at a time. +// +// AWS Marketplace product codes cannot be modified. Images with an AWS Marketplace +// product code cannot be made public. +func (c *EC2) ModifyImageAttribute(input *ModifyImageAttributeInput) (*ModifyImageAttributeOutput, error) { + req, out := c.ModifyImageAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifyInstanceAttribute = "ModifyInstanceAttribute" + +// ModifyInstanceAttributeRequest generates a request for the ModifyInstanceAttribute operation. +func (c *EC2) ModifyInstanceAttributeRequest(input *ModifyInstanceAttributeInput) (req *request.Request, output *ModifyInstanceAttributeOutput) { + op := &request.Operation{ + Name: opModifyInstanceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyInstanceAttributeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ModifyInstanceAttributeOutput{} + req.Data = output + return +} + +// Modifies the specified attribute of the specified instance. You can specify +// only one attribute at a time. +// +// To modify some attributes, the instance must be stopped. For more information, +// see Modifying Attributes of a Stopped Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_ChangingAttributesWhileInstanceStopped.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) ModifyInstanceAttribute(input *ModifyInstanceAttributeInput) (*ModifyInstanceAttributeOutput, error) { + req, out := c.ModifyInstanceAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifyInstancePlacement = "ModifyInstancePlacement" + +// ModifyInstancePlacementRequest generates a request for the ModifyInstancePlacement operation. +func (c *EC2) ModifyInstancePlacementRequest(input *ModifyInstancePlacementInput) (req *request.Request, output *ModifyInstancePlacementOutput) { + op := &request.Operation{ + Name: opModifyInstancePlacement, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyInstancePlacementInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyInstancePlacementOutput{} + req.Data = output + return +} + +// Set the instance affinity value for a specific stopped instance and modify +// the instance tenancy setting. +// +// Instance affinity is disabled by default. When instance affinity is host +// and it is not associated with a specific Dedicated host, the next time it +// is launched it will automatically be associated with the host it lands on. +// This relationship will persist if the instance is stopped/started, or rebooted. +// +// You can modify the host ID associated with a stopped instance. If a stopped +// instance has a new host ID association, the instance will target that host +// when restarted. +// +// You can modify the tenancy of a stopped instance with a tenancy of host +// or dedicated. +// +// Affinity, hostID, and tenancy are not required parameters, but at least +// one of them must be specified in the request. Affinity and tenancy can be +// modified in the same request, but tenancy can only be modified on instances +// that are stopped. +func (c *EC2) ModifyInstancePlacement(input *ModifyInstancePlacementInput) (*ModifyInstancePlacementOutput, error) { + req, out := c.ModifyInstancePlacementRequest(input) + err := req.Send() + return out, err +} + +const opModifyNetworkInterfaceAttribute = "ModifyNetworkInterfaceAttribute" + +// ModifyNetworkInterfaceAttributeRequest generates a request for the ModifyNetworkInterfaceAttribute operation. +func (c *EC2) ModifyNetworkInterfaceAttributeRequest(input *ModifyNetworkInterfaceAttributeInput) (req *request.Request, output *ModifyNetworkInterfaceAttributeOutput) { + op := &request.Operation{ + Name: opModifyNetworkInterfaceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyNetworkInterfaceAttributeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ModifyNetworkInterfaceAttributeOutput{} + req.Data = output + return +} + +// Modifies the specified network interface attribute. You can specify only +// one attribute at a time. +func (c *EC2) ModifyNetworkInterfaceAttribute(input *ModifyNetworkInterfaceAttributeInput) (*ModifyNetworkInterfaceAttributeOutput, error) { + req, out := c.ModifyNetworkInterfaceAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifyReservedInstances = "ModifyReservedInstances" + +// ModifyReservedInstancesRequest generates a request for the ModifyReservedInstances operation. +func (c *EC2) ModifyReservedInstancesRequest(input *ModifyReservedInstancesInput) (req *request.Request, output *ModifyReservedInstancesOutput) { + op := &request.Operation{ + Name: opModifyReservedInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyReservedInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyReservedInstancesOutput{} + req.Data = output + return +} + +// Modifies the Availability Zone, instance count, instance type, or network +// platform (EC2-Classic or EC2-VPC) of your Reserved Instances. The Reserved +// Instances to be modified must be identical, except for Availability Zone, +// network platform, and instance type. +// +// For more information, see Modifying Reserved Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-modifying.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) ModifyReservedInstances(input *ModifyReservedInstancesInput) (*ModifyReservedInstancesOutput, error) { + req, out := c.ModifyReservedInstancesRequest(input) + err := req.Send() + return out, err +} + +const opModifySnapshotAttribute = "ModifySnapshotAttribute" + +// ModifySnapshotAttributeRequest generates a request for the ModifySnapshotAttribute operation. +func (c *EC2) ModifySnapshotAttributeRequest(input *ModifySnapshotAttributeInput) (req *request.Request, output *ModifySnapshotAttributeOutput) { + op := &request.Operation{ + Name: opModifySnapshotAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifySnapshotAttributeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ModifySnapshotAttributeOutput{} + req.Data = output + return +} + +// Adds or removes permission settings for the specified snapshot. You may add +// or remove specified AWS account IDs from a snapshot's list of create volume +// permissions, but you cannot do both in a single API call. If you need to +// both add and remove account IDs for a snapshot, you must use multiple API +// calls. +// +// For more information on modifying snapshot permissions, see Sharing Snapshots +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Snapshots with AWS Marketplace product codes cannot be made public. +func (c *EC2) ModifySnapshotAttribute(input *ModifySnapshotAttributeInput) (*ModifySnapshotAttributeOutput, error) { + req, out := c.ModifySnapshotAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifySpotFleetRequest = "ModifySpotFleetRequest" + +// ModifySpotFleetRequestRequest generates a request for the ModifySpotFleetRequest operation. +func (c *EC2) ModifySpotFleetRequestRequest(input *ModifySpotFleetRequestInput) (req *request.Request, output *ModifySpotFleetRequestOutput) { + op := &request.Operation{ + Name: opModifySpotFleetRequest, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifySpotFleetRequestInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifySpotFleetRequestOutput{} + req.Data = output + return +} + +// Modifies the specified Spot fleet request. +// +// While the Spot fleet request is being modified, it is in the modifying state. +// +// To scale up your Spot fleet, increase its target capacity. The Spot fleet +// launches the additional Spot instances according to the allocation strategy +// for the Spot fleet request. If the allocation strategy is lowestPrice, the +// Spot fleet launches instances using the Spot pool with the lowest price. +// If the allocation strategy is diversified, the Spot fleet distributes the +// instances across the Spot pools. +// +// To scale down your Spot fleet, decrease its target capacity. First, the +// Spot fleet cancels any open bids that exceed the new target capacity. You +// can request that the Spot fleet terminate Spot instances until the size of +// the fleet no longer exceeds the new target capacity. If the allocation strategy +// is lowestPrice, the Spot fleet terminates the instances with the highest +// price per unit. If the allocation strategy is diversified, the Spot fleet +// terminates instances across the Spot pools. Alternatively, you can request +// that the Spot fleet keep the fleet at its current size, but not replace any +// Spot instances that are interrupted or that you terminate manually. +func (c *EC2) ModifySpotFleetRequest(input *ModifySpotFleetRequestInput) (*ModifySpotFleetRequestOutput, error) { + req, out := c.ModifySpotFleetRequestRequest(input) + err := req.Send() + return out, err +} + +const opModifySubnetAttribute = "ModifySubnetAttribute" + +// ModifySubnetAttributeRequest generates a request for the ModifySubnetAttribute operation. +func (c *EC2) ModifySubnetAttributeRequest(input *ModifySubnetAttributeInput) (req *request.Request, output *ModifySubnetAttributeOutput) { + op := &request.Operation{ + Name: opModifySubnetAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifySubnetAttributeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ModifySubnetAttributeOutput{} + req.Data = output + return +} + +// Modifies a subnet attribute. +func (c *EC2) ModifySubnetAttribute(input *ModifySubnetAttributeInput) (*ModifySubnetAttributeOutput, error) { + req, out := c.ModifySubnetAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifyVolumeAttribute = "ModifyVolumeAttribute" + +// ModifyVolumeAttributeRequest generates a request for the ModifyVolumeAttribute operation. +func (c *EC2) ModifyVolumeAttributeRequest(input *ModifyVolumeAttributeInput) (req *request.Request, output *ModifyVolumeAttributeOutput) { + op := &request.Operation{ + Name: opModifyVolumeAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVolumeAttributeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ModifyVolumeAttributeOutput{} + req.Data = output + return +} + +// Modifies a volume attribute. +// +// By default, all I/O operations for the volume are suspended when the data +// on the volume is determined to be potentially inconsistent, to prevent undetectable, +// latent data corruption. The I/O access to the volume can be resumed by first +// enabling I/O access and then checking the data consistency on your volume. +// +// You can change the default behavior to resume I/O operations. We recommend +// that you change this only for boot volumes or for volumes that are stateless +// or disposable. +func (c *EC2) ModifyVolumeAttribute(input *ModifyVolumeAttributeInput) (*ModifyVolumeAttributeOutput, error) { + req, out := c.ModifyVolumeAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifyVpcAttribute = "ModifyVpcAttribute" + +// ModifyVpcAttributeRequest generates a request for the ModifyVpcAttribute operation. +func (c *EC2) ModifyVpcAttributeRequest(input *ModifyVpcAttributeInput) (req *request.Request, output *ModifyVpcAttributeOutput) { + op := &request.Operation{ + Name: opModifyVpcAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpcAttributeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ModifyVpcAttributeOutput{} + req.Data = output + return +} + +// Modifies the specified attribute of the specified VPC. +func (c *EC2) ModifyVpcAttribute(input *ModifyVpcAttributeInput) (*ModifyVpcAttributeOutput, error) { + req, out := c.ModifyVpcAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifyVpcEndpoint = "ModifyVpcEndpoint" + +// ModifyVpcEndpointRequest generates a request for the ModifyVpcEndpoint operation. +func (c *EC2) ModifyVpcEndpointRequest(input *ModifyVpcEndpointInput) (req *request.Request, output *ModifyVpcEndpointOutput) { + op := &request.Operation{ + Name: opModifyVpcEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpcEndpointInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyVpcEndpointOutput{} + req.Data = output + return +} + +// Modifies attributes of a specified VPC endpoint. You can modify the policy +// associated with the endpoint, and you can add and remove route tables associated +// with the endpoint. +func (c *EC2) ModifyVpcEndpoint(input *ModifyVpcEndpointInput) (*ModifyVpcEndpointOutput, error) { + req, out := c.ModifyVpcEndpointRequest(input) + err := req.Send() + return out, err +} + +const opMonitorInstances = "MonitorInstances" + +// MonitorInstancesRequest generates a request for the MonitorInstances operation. +func (c *EC2) MonitorInstancesRequest(input *MonitorInstancesInput) (req *request.Request, output *MonitorInstancesOutput) { + op := &request.Operation{ + Name: opMonitorInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &MonitorInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &MonitorInstancesOutput{} + req.Data = output + return +} + +// Enables monitoring for a running instance. For more information about monitoring +// instances, see Monitoring Your Instances and Volumes (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) MonitorInstances(input *MonitorInstancesInput) (*MonitorInstancesOutput, error) { + req, out := c.MonitorInstancesRequest(input) + err := req.Send() + return out, err +} + +const opMoveAddressToVpc = "MoveAddressToVpc" + +// MoveAddressToVpcRequest generates a request for the MoveAddressToVpc operation. +func (c *EC2) MoveAddressToVpcRequest(input *MoveAddressToVpcInput) (req *request.Request, output *MoveAddressToVpcOutput) { + op := &request.Operation{ + Name: opMoveAddressToVpc, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &MoveAddressToVpcInput{} + } + + req = c.newRequest(op, input, output) + output = &MoveAddressToVpcOutput{} + req.Data = output + return +} + +// Moves an Elastic IP address from the EC2-Classic platform to the EC2-VPC +// platform. The Elastic IP address must be allocated to your account for more +// than 24 hours, and it must not be associated with an instance. After the +// Elastic IP address is moved, it is no longer available for use in the EC2-Classic +// platform, unless you move it back using the RestoreAddressToClassic request. +// You cannot move an Elastic IP address that's allocated for use in the EC2-VPC +// platform to the EC2-Classic platform. You cannot migrate an Elastic IP address +// that's associated with a reverse DNS record. Contact AWS account and billing +// support to remove the reverse DNS record. +func (c *EC2) MoveAddressToVpc(input *MoveAddressToVpcInput) (*MoveAddressToVpcOutput, error) { + req, out := c.MoveAddressToVpcRequest(input) + err := req.Send() + return out, err +} + +const opPurchaseReservedInstancesOffering = "PurchaseReservedInstancesOffering" + +// PurchaseReservedInstancesOfferingRequest generates a request for the PurchaseReservedInstancesOffering operation. +func (c *EC2) PurchaseReservedInstancesOfferingRequest(input *PurchaseReservedInstancesOfferingInput) (req *request.Request, output *PurchaseReservedInstancesOfferingOutput) { + op := &request.Operation{ + Name: opPurchaseReservedInstancesOffering, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PurchaseReservedInstancesOfferingInput{} + } + + req = c.newRequest(op, input, output) + output = &PurchaseReservedInstancesOfferingOutput{} + req.Data = output + return +} + +// Purchases a Reserved Instance for use with your account. With Reserved Instances, +// you obtain a capacity reservation for a certain instance configuration over +// a specified period of time and pay a lower hourly rate compared to On-Demand +// instance pricing. +// +// Use DescribeReservedInstancesOfferings to get a list of Reserved Instance +// offerings that match your specifications. After you've purchased a Reserved +// Instance, you can check for your new Reserved Instance with DescribeReservedInstances. +// +// For more information, see Reserved Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/concepts-on-demand-reserved-instances.html) +// and Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) PurchaseReservedInstancesOffering(input *PurchaseReservedInstancesOfferingInput) (*PurchaseReservedInstancesOfferingOutput, error) { + req, out := c.PurchaseReservedInstancesOfferingRequest(input) + err := req.Send() + return out, err +} + +const opPurchaseScheduledInstances = "PurchaseScheduledInstances" + +// PurchaseScheduledInstancesRequest generates a request for the PurchaseScheduledInstances operation. +func (c *EC2) PurchaseScheduledInstancesRequest(input *PurchaseScheduledInstancesInput) (req *request.Request, output *PurchaseScheduledInstancesOutput) { + op := &request.Operation{ + Name: opPurchaseScheduledInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PurchaseScheduledInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &PurchaseScheduledInstancesOutput{} + req.Data = output + return +} + +// Purchases one or more Scheduled Instances with the specified schedule. +// +// Scheduled Instances enable you to purchase Amazon EC2 compute capacity by +// the hour for a one-year term. Before you can purchase a Scheduled Instance, +// you must call DescribeScheduledInstanceAvailability to check for available +// schedules and obtain a purchase token. +func (c *EC2) PurchaseScheduledInstances(input *PurchaseScheduledInstancesInput) (*PurchaseScheduledInstancesOutput, error) { + req, out := c.PurchaseScheduledInstancesRequest(input) + err := req.Send() + return out, err +} + +const opRebootInstances = "RebootInstances" + +// RebootInstancesRequest generates a request for the RebootInstances operation. +func (c *EC2) RebootInstancesRequest(input *RebootInstancesInput) (req *request.Request, output *RebootInstancesOutput) { + op := &request.Operation{ + Name: opRebootInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RebootInstancesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RebootInstancesOutput{} + req.Data = output + return +} + +// Requests a reboot of one or more instances. This operation is asynchronous; +// it only queues a request to reboot the specified instances. The operation +// succeeds if the instances are valid and belong to you. Requests to reboot +// terminated instances are ignored. +// +// If a Linux/Unix instance does not cleanly shut down within four minutes, +// Amazon EC2 performs a hard reboot. +// +// For more information about troubleshooting, see Getting Console Output and +// Rebooting Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-console.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) RebootInstances(input *RebootInstancesInput) (*RebootInstancesOutput, error) { + req, out := c.RebootInstancesRequest(input) + err := req.Send() + return out, err +} + +const opRegisterImage = "RegisterImage" + +// RegisterImageRequest generates a request for the RegisterImage operation. +func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Request, output *RegisterImageOutput) { + op := &request.Operation{ + Name: opRegisterImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterImageInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterImageOutput{} + req.Data = output + return +} + +// Registers an AMI. When you're creating an AMI, this is the final step you +// must complete before you can launch an instance from the AMI. For more information +// about creating AMIs, see Creating Your Own AMIs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For Amazon EBS-backed instances, CreateImage creates and registers the AMI +// in a single request, so you don't have to register the AMI yourself. +// +// You can also use RegisterImage to create an Amazon EBS-backed Linux AMI +// from a snapshot of a root device volume. For more information, see Launching +// an Instance from a Snapshot (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_LaunchingInstanceFromSnapshot.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE +// Linux Enterprise Server (SLES), use the EC2 billingProduct code associated +// with an AMI to verify subscription status for package updates. Creating an +// AMI from an EBS snapshot does not maintain this billing code, and subsequent +// instances launched from such an AMI will not be able to connect to package +// update infrastructure. +// +// Similarly, although you can create a Windows AMI from a snapshot, you can't +// successfully launch an instance from the AMI. +// +// To create Windows AMIs or to create AMIs for Linux operating systems that +// must retain AMI billing codes to work properly, see CreateImage. +// +// If needed, you can deregister an AMI at any time. Any modifications you +// make to an AMI backed by an instance store volume invalidates its registration. +// If you make changes to an image, deregister the previous image and register +// the new image. +// +// You can't register an image where a secondary (non-root) snapshot has AWS +// Marketplace product codes. +func (c *EC2) RegisterImage(input *RegisterImageInput) (*RegisterImageOutput, error) { + req, out := c.RegisterImageRequest(input) + err := req.Send() + return out, err +} + +const opRejectVpcPeeringConnection = "RejectVpcPeeringConnection" + +// RejectVpcPeeringConnectionRequest generates a request for the RejectVpcPeeringConnection operation. +func (c *EC2) RejectVpcPeeringConnectionRequest(input *RejectVpcPeeringConnectionInput) (req *request.Request, output *RejectVpcPeeringConnectionOutput) { + op := &request.Operation{ + Name: opRejectVpcPeeringConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RejectVpcPeeringConnectionInput{} + } + + req = c.newRequest(op, input, output) + output = &RejectVpcPeeringConnectionOutput{} + req.Data = output + return +} + +// Rejects a VPC peering connection request. The VPC peering connection must +// be in the pending-acceptance state. Use the DescribeVpcPeeringConnections +// request to view your outstanding VPC peering connection requests. To delete +// an active VPC peering connection, or to delete a VPC peering connection request +// that you initiated, use DeleteVpcPeeringConnection. +func (c *EC2) RejectVpcPeeringConnection(input *RejectVpcPeeringConnectionInput) (*RejectVpcPeeringConnectionOutput, error) { + req, out := c.RejectVpcPeeringConnectionRequest(input) + err := req.Send() + return out, err +} + +const opReleaseAddress = "ReleaseAddress" + +// ReleaseAddressRequest generates a request for the ReleaseAddress operation. +func (c *EC2) ReleaseAddressRequest(input *ReleaseAddressInput) (req *request.Request, output *ReleaseAddressOutput) { + op := &request.Operation{ + Name: opReleaseAddress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReleaseAddressInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ReleaseAddressOutput{} + req.Data = output + return +} + +// Releases the specified Elastic IP address. +// +// After releasing an Elastic IP address, it is released to the IP address +// pool and might be unavailable to you. Be sure to update your DNS records +// and any servers or devices that communicate with the address. If you attempt +// to release an Elastic IP address that you already released, you'll get an +// AuthFailure error if the address is already allocated to another AWS account. +// +// [EC2-Classic, default VPC] Releasing an Elastic IP address automatically +// disassociates it from any instance that it's associated with. To disassociate +// an Elastic IP address without releasing it, use DisassociateAddress. +// +// [Nondefault VPC] You must use DisassociateAddress to disassociate the Elastic +// IP address before you try to release it. Otherwise, Amazon EC2 returns an +// error (InvalidIPAddress.InUse). +func (c *EC2) ReleaseAddress(input *ReleaseAddressInput) (*ReleaseAddressOutput, error) { + req, out := c.ReleaseAddressRequest(input) + err := req.Send() + return out, err +} + +const opReleaseHosts = "ReleaseHosts" + +// ReleaseHostsRequest generates a request for the ReleaseHosts operation. +func (c *EC2) ReleaseHostsRequest(input *ReleaseHostsInput) (req *request.Request, output *ReleaseHostsOutput) { + op := &request.Operation{ + Name: opReleaseHosts, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReleaseHostsInput{} + } + + req = c.newRequest(op, input, output) + output = &ReleaseHostsOutput{} + req.Data = output + return +} + +// When you no longer want to use a Dedicated host it can be released. On-Demand +// billing is stopped and the host goes into released state. The host ID of +// Dedicated hosts that have been released can no longer be specified in another +// request, e.g., ModifyHosts. You must stop or terminate all instances on a +// host before it can be released. +// +// When Dedicated hosts are released, it make take some time for them to stop +// counting toward your limit and you may receive capacity errors when trying +// to allocate new Dedicated hosts. Try waiting a few minutes, and then try +// again. +// +// Released hosts will still appear in a DescribeHosts response. +func (c *EC2) ReleaseHosts(input *ReleaseHostsInput) (*ReleaseHostsOutput, error) { + req, out := c.ReleaseHostsRequest(input) + err := req.Send() + return out, err +} + +const opReplaceNetworkAclAssociation = "ReplaceNetworkAclAssociation" + +// ReplaceNetworkAclAssociationRequest generates a request for the ReplaceNetworkAclAssociation operation. +func (c *EC2) ReplaceNetworkAclAssociationRequest(input *ReplaceNetworkAclAssociationInput) (req *request.Request, output *ReplaceNetworkAclAssociationOutput) { + op := &request.Operation{ + Name: opReplaceNetworkAclAssociation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReplaceNetworkAclAssociationInput{} + } + + req = c.newRequest(op, input, output) + output = &ReplaceNetworkAclAssociationOutput{} + req.Data = output + return +} + +// Changes which network ACL a subnet is associated with. By default when you +// create a subnet, it's automatically associated with the default network ACL. +// For more information about network ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) ReplaceNetworkAclAssociation(input *ReplaceNetworkAclAssociationInput) (*ReplaceNetworkAclAssociationOutput, error) { + req, out := c.ReplaceNetworkAclAssociationRequest(input) + err := req.Send() + return out, err +} + +const opReplaceNetworkAclEntry = "ReplaceNetworkAclEntry" + +// ReplaceNetworkAclEntryRequest generates a request for the ReplaceNetworkAclEntry operation. +func (c *EC2) ReplaceNetworkAclEntryRequest(input *ReplaceNetworkAclEntryInput) (req *request.Request, output *ReplaceNetworkAclEntryOutput) { + op := &request.Operation{ + Name: opReplaceNetworkAclEntry, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReplaceNetworkAclEntryInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ReplaceNetworkAclEntryOutput{} + req.Data = output + return +} + +// Replaces an entry (rule) in a network ACL. For more information about network +// ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) ReplaceNetworkAclEntry(input *ReplaceNetworkAclEntryInput) (*ReplaceNetworkAclEntryOutput, error) { + req, out := c.ReplaceNetworkAclEntryRequest(input) + err := req.Send() + return out, err +} + +const opReplaceRoute = "ReplaceRoute" + +// ReplaceRouteRequest generates a request for the ReplaceRoute operation. +func (c *EC2) ReplaceRouteRequest(input *ReplaceRouteInput) (req *request.Request, output *ReplaceRouteOutput) { + op := &request.Operation{ + Name: opReplaceRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReplaceRouteInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ReplaceRouteOutput{} + req.Data = output + return +} + +// Replaces an existing route within a route table in a VPC. You must provide +// only one of the following: Internet gateway or virtual private gateway, NAT +// instance, NAT gateway, VPC peering connection, or network interface. +// +// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) ReplaceRoute(input *ReplaceRouteInput) (*ReplaceRouteOutput, error) { + req, out := c.ReplaceRouteRequest(input) + err := req.Send() + return out, err +} + +const opReplaceRouteTableAssociation = "ReplaceRouteTableAssociation" + +// ReplaceRouteTableAssociationRequest generates a request for the ReplaceRouteTableAssociation operation. +func (c *EC2) ReplaceRouteTableAssociationRequest(input *ReplaceRouteTableAssociationInput) (req *request.Request, output *ReplaceRouteTableAssociationOutput) { + op := &request.Operation{ + Name: opReplaceRouteTableAssociation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReplaceRouteTableAssociationInput{} + } + + req = c.newRequest(op, input, output) + output = &ReplaceRouteTableAssociationOutput{} + req.Data = output + return +} + +// Changes the route table associated with a given subnet in a VPC. After the +// operation completes, the subnet uses the routes in the new route table it's +// associated with. For more information about route tables, see Route Tables +// (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// You can also use ReplaceRouteTableAssociation to change which table is the +// main route table in the VPC. You just specify the main route table's association +// ID and the route table to be the new main route table. +func (c *EC2) ReplaceRouteTableAssociation(input *ReplaceRouteTableAssociationInput) (*ReplaceRouteTableAssociationOutput, error) { + req, out := c.ReplaceRouteTableAssociationRequest(input) + err := req.Send() + return out, err +} + +const opReportInstanceStatus = "ReportInstanceStatus" + +// ReportInstanceStatusRequest generates a request for the ReportInstanceStatus operation. +func (c *EC2) ReportInstanceStatusRequest(input *ReportInstanceStatusInput) (req *request.Request, output *ReportInstanceStatusOutput) { + op := &request.Operation{ + Name: opReportInstanceStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReportInstanceStatusInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ReportInstanceStatusOutput{} + req.Data = output + return +} + +// Submits feedback about the status of an instance. The instance must be in +// the running state. If your experience with the instance differs from the +// instance status returned by DescribeInstanceStatus, use ReportInstanceStatus +// to report your experience with the instance. Amazon EC2 collects this information +// to improve the accuracy of status checks. +// +// Use of this action does not change the value returned by DescribeInstanceStatus. +func (c *EC2) ReportInstanceStatus(input *ReportInstanceStatusInput) (*ReportInstanceStatusOutput, error) { + req, out := c.ReportInstanceStatusRequest(input) + err := req.Send() + return out, err +} + +const opRequestSpotFleet = "RequestSpotFleet" + +// RequestSpotFleetRequest generates a request for the RequestSpotFleet operation. +func (c *EC2) RequestSpotFleetRequest(input *RequestSpotFleetInput) (req *request.Request, output *RequestSpotFleetOutput) { + op := &request.Operation{ + Name: opRequestSpotFleet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RequestSpotFleetInput{} + } + + req = c.newRequest(op, input, output) + output = &RequestSpotFleetOutput{} + req.Data = output + return +} + +// Creates a Spot fleet request. +// +// You can submit a single request that includes multiple launch specifications +// that vary by instance type, AMI, Availability Zone, or subnet. +// +// By default, the Spot fleet requests Spot instances in the Spot pool where +// the price per unit is the lowest. Each launch specification can include its +// own instance weighting that reflects the value of the instance type to your +// application workload. +// +// Alternatively, you can specify that the Spot fleet distribute the target +// capacity across the Spot pools included in its launch specifications. By +// ensuring that the Spot instances in your Spot fleet are in different Spot +// pools, you can improve the availability of your fleet. +// +// For more information, see Spot Fleet Requests (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-requests.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) RequestSpotFleet(input *RequestSpotFleetInput) (*RequestSpotFleetOutput, error) { + req, out := c.RequestSpotFleetRequest(input) + err := req.Send() + return out, err +} + +const opRequestSpotInstances = "RequestSpotInstances" + +// RequestSpotInstancesRequest generates a request for the RequestSpotInstances operation. +func (c *EC2) RequestSpotInstancesRequest(input *RequestSpotInstancesInput) (req *request.Request, output *RequestSpotInstancesOutput) { + op := &request.Operation{ + Name: opRequestSpotInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RequestSpotInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &RequestSpotInstancesOutput{} + req.Data = output + return +} + +// Creates a Spot instance request. Spot instances are instances that Amazon +// EC2 launches when the bid price that you specify exceeds the current Spot +// price. Amazon EC2 periodically sets the Spot price based on available Spot +// Instance capacity and current Spot instance requests. For more information, +// see Spot Instance Requests (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) RequestSpotInstances(input *RequestSpotInstancesInput) (*RequestSpotInstancesOutput, error) { + req, out := c.RequestSpotInstancesRequest(input) + err := req.Send() + return out, err +} + +const opResetImageAttribute = "ResetImageAttribute" + +// ResetImageAttributeRequest generates a request for the ResetImageAttribute operation. +func (c *EC2) ResetImageAttributeRequest(input *ResetImageAttributeInput) (req *request.Request, output *ResetImageAttributeOutput) { + op := &request.Operation{ + Name: opResetImageAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetImageAttributeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ResetImageAttributeOutput{} + req.Data = output + return +} + +// Resets an attribute of an AMI to its default value. +// +// The productCodes attribute can't be reset. +func (c *EC2) ResetImageAttribute(input *ResetImageAttributeInput) (*ResetImageAttributeOutput, error) { + req, out := c.ResetImageAttributeRequest(input) + err := req.Send() + return out, err +} + +const opResetInstanceAttribute = "ResetInstanceAttribute" + +// ResetInstanceAttributeRequest generates a request for the ResetInstanceAttribute operation. +func (c *EC2) ResetInstanceAttributeRequest(input *ResetInstanceAttributeInput) (req *request.Request, output *ResetInstanceAttributeOutput) { + op := &request.Operation{ + Name: opResetInstanceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetInstanceAttributeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ResetInstanceAttributeOutput{} + req.Data = output + return +} + +// Resets an attribute of an instance to its default value. To reset the kernel +// or ramdisk, the instance must be in a stopped state. To reset the SourceDestCheck, +// the instance can be either running or stopped. +// +// The SourceDestCheck attribute controls whether source/destination checking +// is enabled. The default value is true, which means checking is enabled. This +// value must be false for a NAT instance to perform NAT. For more information, +// see NAT Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html) +// in the Amazon Virtual Private Cloud User Guide. +func (c *EC2) ResetInstanceAttribute(input *ResetInstanceAttributeInput) (*ResetInstanceAttributeOutput, error) { + req, out := c.ResetInstanceAttributeRequest(input) + err := req.Send() + return out, err +} + +const opResetNetworkInterfaceAttribute = "ResetNetworkInterfaceAttribute" + +// ResetNetworkInterfaceAttributeRequest generates a request for the ResetNetworkInterfaceAttribute operation. +func (c *EC2) ResetNetworkInterfaceAttributeRequest(input *ResetNetworkInterfaceAttributeInput) (req *request.Request, output *ResetNetworkInterfaceAttributeOutput) { + op := &request.Operation{ + Name: opResetNetworkInterfaceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetNetworkInterfaceAttributeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ResetNetworkInterfaceAttributeOutput{} + req.Data = output + return +} + +// Resets a network interface attribute. You can specify only one attribute +// at a time. +func (c *EC2) ResetNetworkInterfaceAttribute(input *ResetNetworkInterfaceAttributeInput) (*ResetNetworkInterfaceAttributeOutput, error) { + req, out := c.ResetNetworkInterfaceAttributeRequest(input) + err := req.Send() + return out, err +} + +const opResetSnapshotAttribute = "ResetSnapshotAttribute" + +// ResetSnapshotAttributeRequest generates a request for the ResetSnapshotAttribute operation. +func (c *EC2) ResetSnapshotAttributeRequest(input *ResetSnapshotAttributeInput) (req *request.Request, output *ResetSnapshotAttributeOutput) { + op := &request.Operation{ + Name: opResetSnapshotAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetSnapshotAttributeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ResetSnapshotAttributeOutput{} + req.Data = output + return +} + +// Resets permission settings for the specified snapshot. +// +// For more information on modifying snapshot permissions, see Sharing Snapshots +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) ResetSnapshotAttribute(input *ResetSnapshotAttributeInput) (*ResetSnapshotAttributeOutput, error) { + req, out := c.ResetSnapshotAttributeRequest(input) + err := req.Send() + return out, err +} + +const opRestoreAddressToClassic = "RestoreAddressToClassic" + +// RestoreAddressToClassicRequest generates a request for the RestoreAddressToClassic operation. +func (c *EC2) RestoreAddressToClassicRequest(input *RestoreAddressToClassicInput) (req *request.Request, output *RestoreAddressToClassicOutput) { + op := &request.Operation{ + Name: opRestoreAddressToClassic, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreAddressToClassicInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreAddressToClassicOutput{} + req.Data = output + return +} + +// Restores an Elastic IP address that was previously moved to the EC2-VPC platform +// back to the EC2-Classic platform. You cannot move an Elastic IP address that +// was originally allocated for use in EC2-VPC. The Elastic IP address must +// not be associated with an instance or network interface. You cannot restore +// an Elastic IP address that's associated with a reverse DNS record. Contact +// AWS account and billing support to remove the reverse DNS record. +func (c *EC2) RestoreAddressToClassic(input *RestoreAddressToClassicInput) (*RestoreAddressToClassicOutput, error) { + req, out := c.RestoreAddressToClassicRequest(input) + err := req.Send() + return out, err +} + +const opRevokeSecurityGroupEgress = "RevokeSecurityGroupEgress" + +// RevokeSecurityGroupEgressRequest generates a request for the RevokeSecurityGroupEgress operation. +func (c *EC2) RevokeSecurityGroupEgressRequest(input *RevokeSecurityGroupEgressInput) (req *request.Request, output *RevokeSecurityGroupEgressOutput) { + op := &request.Operation{ + Name: opRevokeSecurityGroupEgress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RevokeSecurityGroupEgressInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RevokeSecurityGroupEgressOutput{} + req.Data = output + return +} + +// [EC2-VPC only] Removes one or more egress rules from a security group for +// EC2-VPC. This action doesn't apply to security groups for use in EC2-Classic. +// The values that you specify in the revoke request (for example, ports) must +// match the existing rule's values for the rule to be revoked. +// +// Each rule consists of the protocol and the CIDR range or source security +// group. For the TCP and UDP protocols, you must also specify the destination +// port or range of ports. For the ICMP protocol, you must also specify the +// ICMP type and code. +// +// Rule changes are propagated to instances within the security group as quickly +// as possible. However, a small delay might occur. +func (c *EC2) RevokeSecurityGroupEgress(input *RevokeSecurityGroupEgressInput) (*RevokeSecurityGroupEgressOutput, error) { + req, out := c.RevokeSecurityGroupEgressRequest(input) + err := req.Send() + return out, err +} + +const opRevokeSecurityGroupIngress = "RevokeSecurityGroupIngress" + +// RevokeSecurityGroupIngressRequest generates a request for the RevokeSecurityGroupIngress operation. +func (c *EC2) RevokeSecurityGroupIngressRequest(input *RevokeSecurityGroupIngressInput) (req *request.Request, output *RevokeSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opRevokeSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RevokeSecurityGroupIngressInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RevokeSecurityGroupIngressOutput{} + req.Data = output + return +} + +// Removes one or more ingress rules from a security group. The values that +// you specify in the revoke request (for example, ports) must match the existing +// rule's values for the rule to be removed. +// +// Each rule consists of the protocol and the CIDR range or source security +// group. For the TCP and UDP protocols, you must also specify the destination +// port or range of ports. For the ICMP protocol, you must also specify the +// ICMP type and code. +// +// Rule changes are propagated to instances within the security group as quickly +// as possible. However, a small delay might occur. +func (c *EC2) RevokeSecurityGroupIngress(input *RevokeSecurityGroupIngressInput) (*RevokeSecurityGroupIngressOutput, error) { + req, out := c.RevokeSecurityGroupIngressRequest(input) + err := req.Send() + return out, err +} + +const opRunInstances = "RunInstances" + +// RunInstancesRequest generates a request for the RunInstances operation. +func (c *EC2) RunInstancesRequest(input *RunInstancesInput) (req *request.Request, output *Reservation) { + op := &request.Operation{ + Name: opRunInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RunInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &Reservation{} + req.Data = output + return +} + +// Launches the specified number of instances using an AMI for which you have +// permissions. +// +// When you launch an instance, it enters the pending state. After the instance +// is ready for you, it enters the running state. To check the state of your +// instance, call DescribeInstances. +// +// If you don't specify a security group when launching an instance, Amazon +// EC2 uses the default security group. For more information, see Security Groups +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// [EC2-VPC only accounts] If you don't specify a subnet in the request, we +// choose a default subnet from your default VPC for you. +// +// [EC2-Classic accounts] If you're launching into EC2-Classic and you don't +// specify an Availability Zone, we choose one for you. +// +// Linux instances have access to the public key of the key pair at boot. You +// can use this key to provide secure access to the instance. Amazon EC2 public +// images use this feature to provide secure access without passwords. For more +// information, see Key Pairs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// You can provide optional user data when launching an instance. For more +// information, see Instance Metadata (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// If any of the AMIs have a product code attached for which the user has not +// subscribed, RunInstances fails. +// +// T2 instance types can only be launched into a VPC. If you do not have a +// default VPC, or if you do not specify a subnet ID in the request, RunInstances +// fails. +// +// For more information about troubleshooting, see What To Do If An Instance +// Immediately Terminates (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_InstanceStraightToTerminated.html), +// and Troubleshooting Connecting to Your Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesConnecting.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) RunInstances(input *RunInstancesInput) (*Reservation, error) { + req, out := c.RunInstancesRequest(input) + err := req.Send() + return out, err +} + +const opRunScheduledInstances = "RunScheduledInstances" + +// RunScheduledInstancesRequest generates a request for the RunScheduledInstances operation. +func (c *EC2) RunScheduledInstancesRequest(input *RunScheduledInstancesInput) (req *request.Request, output *RunScheduledInstancesOutput) { + op := &request.Operation{ + Name: opRunScheduledInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RunScheduledInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &RunScheduledInstancesOutput{} + req.Data = output + return +} + +// Launches the specified Scheduled Instances. +// +// Before you can launch a Scheduled Instance, you must purchase it and obtain +// an identifier using PurchaseScheduledInstances. +// +// You must launch a Scheduled Instance during its scheduled time period. You +// can't stop or reboot a Scheduled Instance, but you can terminate it as needed. +// If you terminate a Scheduled Instance before the current scheduled time period +// ends, you can launch it again after a few minutes. +func (c *EC2) RunScheduledInstances(input *RunScheduledInstancesInput) (*RunScheduledInstancesOutput, error) { + req, out := c.RunScheduledInstancesRequest(input) + err := req.Send() + return out, err +} + +const opStartInstances = "StartInstances" + +// StartInstancesRequest generates a request for the StartInstances operation. +func (c *EC2) StartInstancesRequest(input *StartInstancesInput) (req *request.Request, output *StartInstancesOutput) { + op := &request.Operation{ + Name: opStartInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &StartInstancesOutput{} + req.Data = output + return +} + +// Starts an Amazon EBS-backed AMI that you've previously stopped. +// +// Instances that use Amazon EBS volumes as their root devices can be quickly +// stopped and started. When an instance is stopped, the compute resources are +// released and you are not billed for hourly instance usage. However, your +// root partition Amazon EBS volume remains, continues to persist your data, +// and you are charged for Amazon EBS volume usage. You can restart your instance +// at any time. Each time you transition an instance from stopped to started, +// Amazon EC2 charges a full instance hour, even if transitions happen multiple +// times within a single hour. +// +// Before stopping an instance, make sure it is in a state from which it can +// be restarted. Stopping an instance does not preserve data stored in RAM. +// +// Performing this operation on an instance that uses an instance store as +// its root device returns an error. +// +// For more information, see Stopping Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) StartInstances(input *StartInstancesInput) (*StartInstancesOutput, error) { + req, out := c.StartInstancesRequest(input) + err := req.Send() + return out, err +} + +const opStopInstances = "StopInstances" + +// StopInstancesRequest generates a request for the StopInstances operation. +func (c *EC2) StopInstancesRequest(input *StopInstancesInput) (req *request.Request, output *StopInstancesOutput) { + op := &request.Operation{ + Name: opStopInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &StopInstancesOutput{} + req.Data = output + return +} + +// Stops an Amazon EBS-backed instance. Each time you transition an instance +// from stopped to started, Amazon EC2 charges a full instance hour, even if +// transitions happen multiple times within a single hour. +// +// You can't start or stop Spot instances. +// +// Instances that use Amazon EBS volumes as their root devices can be quickly +// stopped and started. When an instance is stopped, the compute resources are +// released and you are not billed for hourly instance usage. However, your +// root partition Amazon EBS volume remains, continues to persist your data, +// and you are charged for Amazon EBS volume usage. You can restart your instance +// at any time. +// +// Before stopping an instance, make sure it is in a state from which it can +// be restarted. Stopping an instance does not preserve data stored in RAM. +// +// Performing this operation on an instance that uses an instance store as +// its root device returns an error. +// +// You can stop, start, and terminate EBS-backed instances. You can only terminate +// instance store-backed instances. What happens to an instance differs if you +// stop it or terminate it. For example, when you stop an instance, the root +// device and any other devices attached to the instance persist. When you terminate +// an instance, the root device and any other devices attached during the instance +// launch are automatically deleted. For more information about the differences +// between stopping and terminating instances, see Instance Lifecycle (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For more information about troubleshooting, see Troubleshooting Stopping +// Your Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesStopping.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) StopInstances(input *StopInstancesInput) (*StopInstancesOutput, error) { + req, out := c.StopInstancesRequest(input) + err := req.Send() + return out, err +} + +const opTerminateInstances = "TerminateInstances" + +// TerminateInstancesRequest generates a request for the TerminateInstances operation. +func (c *EC2) TerminateInstancesRequest(input *TerminateInstancesInput) (req *request.Request, output *TerminateInstancesOutput) { + op := &request.Operation{ + Name: opTerminateInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TerminateInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &TerminateInstancesOutput{} + req.Data = output + return +} + +// Shuts down one or more instances. This operation is idempotent; if you terminate +// an instance more than once, each call succeeds. +// +// Terminated instances remain visible after termination (for approximately +// one hour). +// +// By default, Amazon EC2 deletes all EBS volumes that were attached when the +// instance launched. Volumes attached after instance launch continue running. +// +// You can stop, start, and terminate EBS-backed instances. You can only terminate +// instance store-backed instances. What happens to an instance differs if you +// stop it or terminate it. For example, when you stop an instance, the root +// device and any other devices attached to the instance persist. When you terminate +// an instance, any attached EBS volumes with the DeleteOnTermination block +// device mapping parameter set to true are automatically deleted. For more +// information about the differences between stopping and terminating instances, +// see Instance Lifecycle (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For more information about troubleshooting, see Troubleshooting Terminating +// Your Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesShuttingDown.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) TerminateInstances(input *TerminateInstancesInput) (*TerminateInstancesOutput, error) { + req, out := c.TerminateInstancesRequest(input) + err := req.Send() + return out, err +} + +const opUnassignPrivateIpAddresses = "UnassignPrivateIpAddresses" + +// UnassignPrivateIpAddressesRequest generates a request for the UnassignPrivateIpAddresses operation. +func (c *EC2) UnassignPrivateIpAddressesRequest(input *UnassignPrivateIpAddressesInput) (req *request.Request, output *UnassignPrivateIpAddressesOutput) { + op := &request.Operation{ + Name: opUnassignPrivateIpAddresses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnassignPrivateIpAddressesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UnassignPrivateIpAddressesOutput{} + req.Data = output + return +} + +// Unassigns one or more secondary private IP addresses from a network interface. +func (c *EC2) UnassignPrivateIpAddresses(input *UnassignPrivateIpAddressesInput) (*UnassignPrivateIpAddressesOutput, error) { + req, out := c.UnassignPrivateIpAddressesRequest(input) + err := req.Send() + return out, err +} + +const opUnmonitorInstances = "UnmonitorInstances" + +// UnmonitorInstancesRequest generates a request for the UnmonitorInstances operation. +func (c *EC2) UnmonitorInstancesRequest(input *UnmonitorInstancesInput) (req *request.Request, output *UnmonitorInstancesOutput) { + op := &request.Operation{ + Name: opUnmonitorInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnmonitorInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &UnmonitorInstancesOutput{} + req.Data = output + return +} + +// Disables monitoring for a running instance. For more information about monitoring +// instances, see Monitoring Your Instances and Volumes (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) UnmonitorInstances(input *UnmonitorInstancesInput) (*UnmonitorInstancesOutput, error) { + req, out := c.UnmonitorInstancesRequest(input) + err := req.Send() + return out, err +} + +type AcceptVpcPeeringConnectionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` +} + +// String returns the string representation +func (s AcceptVpcPeeringConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcceptVpcPeeringConnectionInput) GoString() string { + return s.String() +} + +type AcceptVpcPeeringConnectionOutput struct { + _ struct{} `type:"structure"` + + // Information about the VPC peering connection. + VpcPeeringConnection *VpcPeeringConnection `locationName:"vpcPeeringConnection" type:"structure"` +} + +// String returns the string representation +func (s AcceptVpcPeeringConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcceptVpcPeeringConnectionOutput) GoString() string { + return s.String() +} + +// Describes an account attribute. +type AccountAttribute struct { + _ struct{} `type:"structure"` + + // The name of the account attribute. + AttributeName *string `locationName:"attributeName" type:"string"` + + // One or more values for the account attribute. + AttributeValues []*AccountAttributeValue `locationName:"attributeValueSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s AccountAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountAttribute) GoString() string { + return s.String() +} + +// Describes a value of an account attribute. +type AccountAttributeValue struct { + _ struct{} `type:"structure"` + + // The value of the attribute. + AttributeValue *string `locationName:"attributeValue" type:"string"` +} + +// String returns the string representation +func (s AccountAttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountAttributeValue) GoString() string { + return s.String() +} + +// Describes a running instance in a Spot fleet. +type ActiveInstance struct { + _ struct{} `type:"structure"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string"` + + // The ID of the Spot instance request. + SpotInstanceRequestId *string `locationName:"spotInstanceRequestId" type:"string"` +} + +// String returns the string representation +func (s ActiveInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActiveInstance) GoString() string { + return s.String() +} + +// Describes an Elastic IP address. +type Address struct { + _ struct{} `type:"structure"` + + // The ID representing the allocation of the address for use with EC2-VPC. + AllocationId *string `locationName:"allocationId" type:"string"` + + // The ID representing the association of the address with an instance in a + // VPC. + AssociationId *string `locationName:"associationId" type:"string"` + + // Indicates whether this Elastic IP address is for use with instances in EC2-Classic + // (standard) or instances in a VPC (vpc). + Domain *string `locationName:"domain" type:"string" enum:"DomainType"` + + // The ID of the instance that the address is associated with (if any). + InstanceId *string `locationName:"instanceId" type:"string"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The ID of the AWS account that owns the network interface. + NetworkInterfaceOwnerId *string `locationName:"networkInterfaceOwnerId" type:"string"` + + // The private IP address associated with the Elastic IP address. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The Elastic IP address. + PublicIp *string `locationName:"publicIp" type:"string"` +} + +// String returns the string representation +func (s Address) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Address) GoString() string { + return s.String() +} + +type AllocateAddressInput struct { + _ struct{} `type:"structure"` + + // Set to vpc to allocate the address for use with instances in a VPC. + // + // Default: The address is for use with instances in EC2-Classic. + Domain *string `type:"string" enum:"DomainType"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s AllocateAddressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocateAddressInput) GoString() string { + return s.String() +} + +type AllocateAddressOutput struct { + _ struct{} `type:"structure"` + + // [EC2-VPC] The ID that AWS assigns to represent the allocation of the Elastic + // IP address for use with instances in a VPC. + AllocationId *string `locationName:"allocationId" type:"string"` + + // Indicates whether this Elastic IP address is for use with instances in EC2-Classic + // (standard) or instances in a VPC (vpc). + Domain *string `locationName:"domain" type:"string" enum:"DomainType"` + + // The Elastic IP address. + PublicIp *string `locationName:"publicIp" type:"string"` +} + +// String returns the string representation +func (s AllocateAddressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocateAddressOutput) GoString() string { + return s.String() +} + +type AllocateHostsInput struct { + _ struct{} `type:"structure"` + + // This is enabled by default. This property allows instances to be automatically + // placed onto available Dedicated hosts, when you are launching instances without + // specifying a host ID. + // + // Default: Enabled + AutoPlacement *string `locationName:"autoPlacement" type:"string" enum:"AutoPlacement"` + + // The Availability Zone for the Dedicated hosts. + AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` + + // Unique, case-sensitive identifier you provide to ensure idempotency of the + // request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) + // in the Amazon Elastic Compute Cloud User Guide. + ClientToken *string `locationName:"clientToken" type:"string"` + + // Specify the instance type that you want your Dedicated hosts to be configured + // for. When you specify the instance type, that is the only instance type that + // you can launch onto that host. + InstanceType *string `locationName:"instanceType" type:"string" required:"true"` + + // The number of Dedicated hosts you want to allocate to your account with these + // parameters. + Quantity *int64 `locationName:"quantity" type:"integer" required:"true"` +} + +// String returns the string representation +func (s AllocateHostsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocateHostsInput) GoString() string { + return s.String() +} + +type AllocateHostsOutput struct { + _ struct{} `type:"structure"` + + // The ID of the allocated Dedicated host. This is used when you want to launch + // an instance onto a specific host. + HostIds []*string `locationName:"hostIdSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s AllocateHostsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocateHostsOutput) GoString() string { + return s.String() +} + +type AssignPrivateIpAddressesInput struct { + _ struct{} `type:"structure"` + + // Indicates whether to allow an IP address that is already assigned to another + // network interface or instance to be reassigned to the specified network interface. + AllowReassignment *bool `locationName:"allowReassignment" type:"boolean"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` + + // One or more IP addresses to be assigned as a secondary private IP address + // to the network interface. You can't specify this parameter when also specifying + // a number of secondary IP addresses. + // + // If you don't specify an IP address, Amazon EC2 automatically selects an + // IP address within the subnet range. + PrivateIpAddresses []*string `locationName:"privateIpAddress" locationNameList:"PrivateIpAddress" type:"list"` + + // The number of secondary IP addresses to assign to the network interface. + // You can't specify this parameter when also specifying private IP addresses. + SecondaryPrivateIpAddressCount *int64 `locationName:"secondaryPrivateIpAddressCount" type:"integer"` +} + +// String returns the string representation +func (s AssignPrivateIpAddressesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssignPrivateIpAddressesInput) GoString() string { + return s.String() +} + +type AssignPrivateIpAddressesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AssignPrivateIpAddressesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssignPrivateIpAddressesOutput) GoString() string { + return s.String() +} + +type AssociateAddressInput struct { + _ struct{} `type:"structure"` + + // [EC2-VPC] The allocation ID. This is required for EC2-VPC. + AllocationId *string `type:"string"` + + // [EC2-VPC] For a VPC in an EC2-Classic account, specify true to allow an Elastic + // IP address that is already associated with an instance or network interface + // to be reassociated with the specified instance or network interface. Otherwise, + // the operation fails. In a VPC in an EC2-VPC-only account, reassociation is + // automatic, therefore you can specify false to ensure the operation fails + // if the Elastic IP address is already associated with another resource. + AllowReassociation *bool `locationName:"allowReassociation" type:"boolean"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. This is required for EC2-Classic. For EC2-VPC, you + // can specify either the instance ID or the network interface ID, but not both. + // The operation fails if you specify an instance ID unless exactly one network + // interface is attached. + InstanceId *string `type:"string"` + + // [EC2-VPC] The ID of the network interface. If the instance has more than + // one network interface, you must specify a network interface ID. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // [EC2-VPC] The primary or secondary private IP address to associate with the + // Elastic IP address. If no private IP address is specified, the Elastic IP + // address is associated with the primary private IP address. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The Elastic IP address. This is required for EC2-Classic. + PublicIp *string `type:"string"` +} + +// String returns the string representation +func (s AssociateAddressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateAddressInput) GoString() string { + return s.String() +} + +type AssociateAddressOutput struct { + _ struct{} `type:"structure"` + + // [EC2-VPC] The ID that represents the association of the Elastic IP address + // with an instance. + AssociationId *string `locationName:"associationId" type:"string"` +} + +// String returns the string representation +func (s AssociateAddressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateAddressOutput) GoString() string { + return s.String() +} + +type AssociateDhcpOptionsInput struct { + _ struct{} `type:"structure"` + + // The ID of the DHCP options set, or default to associate no DHCP options with + // the VPC. + DhcpOptionsId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociateDhcpOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateDhcpOptionsInput) GoString() string { + return s.String() +} + +type AssociateDhcpOptionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AssociateDhcpOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateDhcpOptionsOutput) GoString() string { + return s.String() +} + +type AssociateRouteTableInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the route table. + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociateRouteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateRouteTableInput) GoString() string { + return s.String() +} + +type AssociateRouteTableOutput struct { + _ struct{} `type:"structure"` + + // The route table association ID (needed to disassociate the route table). + AssociationId *string `locationName:"associationId" type:"string"` +} + +// String returns the string representation +func (s AssociateRouteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateRouteTableOutput) GoString() string { + return s.String() +} + +type AttachClassicLinkVpcInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of one or more of the VPC's security groups. You cannot specify security + // groups from a different VPC. + Groups []*string `locationName:"SecurityGroupId" locationNameList:"groupId" type:"list" required:"true"` + + // The ID of an EC2-Classic instance to link to the ClassicLink-enabled VPC. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // The ID of a ClassicLink-enabled VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachClassicLinkVpcInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachClassicLinkVpcInput) GoString() string { + return s.String() +} + +type AttachClassicLinkVpcOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s AttachClassicLinkVpcOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachClassicLinkVpcOutput) GoString() string { + return s.String() +} + +type AttachInternetGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the Internet gateway. + InternetGatewayId *string `locationName:"internetGatewayId" type:"string" required:"true"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachInternetGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachInternetGatewayInput) GoString() string { + return s.String() +} + +type AttachInternetGatewayOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachInternetGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachInternetGatewayOutput) GoString() string { + return s.String() +} + +type AttachNetworkInterfaceInput struct { + _ struct{} `type:"structure"` + + // The index of the device for the network interface attachment. + DeviceIndex *int64 `locationName:"deviceIndex" type:"integer" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachNetworkInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachNetworkInterfaceInput) GoString() string { + return s.String() +} + +type AttachNetworkInterfaceOutput struct { + _ struct{} `type:"structure"` + + // The ID of the network interface attachment. + AttachmentId *string `locationName:"attachmentId" type:"string"` +} + +// String returns the string representation +func (s AttachNetworkInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachNetworkInterfaceOutput) GoString() string { + return s.String() +} + +type AttachVolumeInput struct { + _ struct{} `type:"structure"` + + // The device name to expose to the instance (for example, /dev/sdh or xvdh). + Device *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + InstanceId *string `type:"string" required:"true"` + + // The ID of the EBS volume. The volume and instance must be within the same + // Availability Zone. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachVolumeInput) GoString() string { + return s.String() +} + +type AttachVpnGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `type:"string" required:"true"` + + // The ID of the virtual private gateway. + VpnGatewayId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachVpnGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachVpnGatewayInput) GoString() string { + return s.String() +} + +type AttachVpnGatewayOutput struct { + _ struct{} `type:"structure"` + + // Information about the attachment. + VpcAttachment *VpcAttachment `locationName:"attachment" type:"structure"` +} + +// String returns the string representation +func (s AttachVpnGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachVpnGatewayOutput) GoString() string { + return s.String() +} + +// The value to use when a resource attribute accepts a Boolean value. +type AttributeBooleanValue struct { + _ struct{} `type:"structure"` + + // Valid values are true or false. + Value *bool `locationName:"value" type:"boolean"` +} + +// String returns the string representation +func (s AttributeBooleanValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributeBooleanValue) GoString() string { + return s.String() +} + +// The value to use for a resource attribute. +type AttributeValue struct { + _ struct{} `type:"structure"` + + // Valid values are case-sensitive and vary by action. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s AttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributeValue) GoString() string { + return s.String() +} + +type AuthorizeSecurityGroupEgressInput struct { + _ struct{} `type:"structure"` + + // The CIDR IP address range. We recommend that you specify the CIDR range in + // a set of IP permissions instead. + CidrIp *string `locationName:"cidrIp" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The start of port range for the TCP and UDP protocols, or an ICMP type number. + // We recommend that you specify the port range in a set of IP permissions instead. + FromPort *int64 `locationName:"fromPort" type:"integer"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string" required:"true"` + + // A set of IP permissions. You can't specify a destination security group and + // a CIDR IP address range. + IpPermissions []*IpPermission `locationName:"ipPermissions" locationNameList:"item" type:"list"` + + // The IP protocol name or number. We recommend that you specify the protocol + // in a set of IP permissions instead. + IpProtocol *string `locationName:"ipProtocol" type:"string"` + + // The name of a destination security group. To authorize outbound access to + // a destination security group, we recommend that you use a set of IP permissions + // instead. + SourceSecurityGroupName *string `locationName:"sourceSecurityGroupName" type:"string"` + + // The AWS account number for a destination security group. To authorize outbound + // access to a destination security group, we recommend that you use a set of + // IP permissions instead. + SourceSecurityGroupOwnerId *string `locationName:"sourceSecurityGroupOwnerId" type:"string"` + + // The end of port range for the TCP and UDP protocols, or an ICMP type number. + // We recommend that you specify the port range in a set of IP permissions instead. + ToPort *int64 `locationName:"toPort" type:"integer"` +} + +// String returns the string representation +func (s AuthorizeSecurityGroupEgressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeSecurityGroupEgressInput) GoString() string { + return s.String() +} + +type AuthorizeSecurityGroupEgressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AuthorizeSecurityGroupEgressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeSecurityGroupEgressOutput) GoString() string { + return s.String() +} + +type AuthorizeSecurityGroupIngressInput struct { + _ struct{} `type:"structure"` + + // The CIDR IP address range. You can't specify this parameter when specifying + // a source security group. + CidrIp *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The start of port range for the TCP and UDP protocols, or an ICMP type number. + // For the ICMP type number, use -1 to specify all ICMP types. + FromPort *int64 `type:"integer"` + + // The ID of the security group. Required for a nondefault VPC. + GroupId *string `type:"string"` + + // [EC2-Classic, default VPC] The name of the security group. + GroupName *string `type:"string"` + + // A set of IP permissions. Can be used to specify multiple rules in a single + // command. + IpPermissions []*IpPermission `locationNameList:"item" type:"list"` + + // The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)). + // (VPC only) Use -1 to specify all. + IpProtocol *string `type:"string"` + + // [EC2-Classic, default VPC] The name of the source security group. You can't + // specify this parameter in combination with the following parameters: the + // CIDR IP address range, the start of the port range, the IP protocol, and + // the end of the port range. For EC2-VPC, the source security group must be + // in the same VPC. + SourceSecurityGroupName *string `type:"string"` + + // [EC2-Classic, default VPC] The AWS account number for the source security + // group. For EC2-VPC, the source security group must be in the same VPC. You + // can't specify this parameter in combination with the following parameters: + // the CIDR IP address range, the IP protocol, the start of the port range, + // and the end of the port range. Creates rules that grant full ICMP, UDP, and + // TCP access. To create a rule with a specific IP protocol and port range, + // use a set of IP permissions instead. + SourceSecurityGroupOwnerId *string `type:"string"` + + // The end of port range for the TCP and UDP protocols, or an ICMP code number. + // For the ICMP code number, use -1 to specify all ICMP codes for the ICMP type. + ToPort *int64 `type:"integer"` +} + +// String returns the string representation +func (s AuthorizeSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeSecurityGroupIngressInput) GoString() string { + return s.String() +} + +type AuthorizeSecurityGroupIngressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AuthorizeSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +// Describes an Availability Zone. +type AvailabilityZone struct { + _ struct{} `type:"structure"` + + // Any messages about the Availability Zone. + Messages []*AvailabilityZoneMessage `locationName:"messageSet" locationNameList:"item" type:"list"` + + // The name of the region. + RegionName *string `locationName:"regionName" type:"string"` + + // The state of the Availability Zone. + State *string `locationName:"zoneState" type:"string" enum:"AvailabilityZoneState"` + + // The name of the Availability Zone. + ZoneName *string `locationName:"zoneName" type:"string"` +} + +// String returns the string representation +func (s AvailabilityZone) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailabilityZone) GoString() string { + return s.String() +} + +// Describes a message about an Availability Zone. +type AvailabilityZoneMessage struct { + _ struct{} `type:"structure"` + + // The message about the Availability Zone. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s AvailabilityZoneMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailabilityZoneMessage) GoString() string { + return s.String() +} + +// The capacity information for instances launched onto the Dedicated host. +type AvailableCapacity struct { + _ struct{} `type:"structure"` + + // The total number of instances that the Dedicated host supports. + AvailableInstanceCapacity []*InstanceCapacity `locationName:"availableInstanceCapacity" locationNameList:"item" type:"list"` + + // The number of vCPUs available on the Dedicated host. + AvailableVCpus *int64 `locationName:"availableVCpus" type:"integer"` +} + +// String returns the string representation +func (s AvailableCapacity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailableCapacity) GoString() string { + return s.String() +} + +type BlobAttributeValue struct { + _ struct{} `type:"structure"` + + Value []byte `locationName:"value" type:"blob"` +} + +// String returns the string representation +func (s BlobAttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BlobAttributeValue) GoString() string { + return s.String() +} + +// Describes a block device mapping. +type BlockDeviceMapping struct { + _ struct{} `type:"structure"` + + // The device name exposed to the instance (for example, /dev/sdh or xvdh). + DeviceName *string `locationName:"deviceName" type:"string"` + + // Parameters used to automatically set up EBS volumes when the instance is + // launched. + Ebs *EbsBlockDevice `locationName:"ebs" type:"structure"` + + // Suppresses the specified device included in the block device mapping of the + // AMI. + NoDevice *string `locationName:"noDevice" type:"string"` + + // The virtual device name (ephemeralN). Instance store volumes are numbered + // starting from 0. An instance type with 2 available instance store volumes + // can specify mappings for ephemeral0 and ephemeral1.The number of available + // instance store volumes depends on the instance type. After you connect to + // the instance, you must mount the volume. + // + // Constraints: For M3 instances, you must specify instance store volumes in + // the block device mapping for the instance. When you launch an M3 instance, + // we ignore any instance store volumes specified in the block device mapping + // for the AMI. + VirtualName *string `locationName:"virtualName" type:"string"` +} + +// String returns the string representation +func (s BlockDeviceMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BlockDeviceMapping) GoString() string { + return s.String() +} + +type BundleInstanceInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance to bundle. + // + // Type: String + // + // Default: None + // + // Required: Yes + InstanceId *string `type:"string" required:"true"` + + // The bucket in which to store the AMI. You can specify a bucket that you already + // own or a new bucket that Amazon EC2 creates on your behalf. If you specify + // a bucket that belongs to someone else, Amazon EC2 returns an error. + Storage *Storage `type:"structure" required:"true"` +} + +// String returns the string representation +func (s BundleInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BundleInstanceInput) GoString() string { + return s.String() +} + +type BundleInstanceOutput struct { + _ struct{} `type:"structure"` + + // Information about the bundle task. + BundleTask *BundleTask `locationName:"bundleInstanceTask" type:"structure"` +} + +// String returns the string representation +func (s BundleInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BundleInstanceOutput) GoString() string { + return s.String() +} + +// Describes a bundle task. +type BundleTask struct { + _ struct{} `type:"structure"` + + // The ID of the bundle task. + BundleId *string `locationName:"bundleId" type:"string"` + + // If the task fails, a description of the error. + BundleTaskError *BundleTaskError `locationName:"error" type:"structure"` + + // The ID of the instance associated with this bundle task. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The level of task completion, as a percent (for example, 20%). + Progress *string `locationName:"progress" type:"string"` + + // The time this task started. + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"` + + // The state of the task. + State *string `locationName:"state" type:"string" enum:"BundleTaskState"` + + // The Amazon S3 storage locations. + Storage *Storage `locationName:"storage" type:"structure"` + + // The time of the most recent update for the task. + UpdateTime *time.Time `locationName:"updateTime" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s BundleTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BundleTask) GoString() string { + return s.String() +} + +// Describes an error for BundleInstance. +type BundleTaskError struct { + _ struct{} `type:"structure"` + + // The error code. + Code *string `locationName:"code" type:"string"` + + // The error message. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s BundleTaskError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BundleTaskError) GoString() string { + return s.String() +} + +type CancelBundleTaskInput struct { + _ struct{} `type:"structure"` + + // The ID of the bundle task. + BundleId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s CancelBundleTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelBundleTaskInput) GoString() string { + return s.String() +} + +type CancelBundleTaskOutput struct { + _ struct{} `type:"structure"` + + // Information about the bundle task. + BundleTask *BundleTask `locationName:"bundleInstanceTask" type:"structure"` +} + +// String returns the string representation +func (s CancelBundleTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelBundleTaskOutput) GoString() string { + return s.String() +} + +type CancelConversionTaskInput struct { + _ struct{} `type:"structure"` + + // The ID of the conversion task. + ConversionTaskId *string `locationName:"conversionTaskId" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The reason for canceling the conversion task. + ReasonMessage *string `locationName:"reasonMessage" type:"string"` +} + +// String returns the string representation +func (s CancelConversionTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelConversionTaskInput) GoString() string { + return s.String() +} + +type CancelConversionTaskOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CancelConversionTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelConversionTaskOutput) GoString() string { + return s.String() +} + +type CancelExportTaskInput struct { + _ struct{} `type:"structure"` + + // The ID of the export task. This is the ID returned by CreateInstanceExportTask. + ExportTaskId *string `locationName:"exportTaskId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelExportTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelExportTaskInput) GoString() string { + return s.String() +} + +type CancelExportTaskOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CancelExportTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelExportTaskOutput) GoString() string { + return s.String() +} + +type CancelImportTaskInput struct { + _ struct{} `type:"structure"` + + // The reason for canceling the task. + CancelReason *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the import image or import snapshot task to be canceled. + ImportTaskId *string `type:"string"` +} + +// String returns the string representation +func (s CancelImportTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelImportTaskInput) GoString() string { + return s.String() +} + +type CancelImportTaskOutput struct { + _ struct{} `type:"structure"` + + // The ID of the task being canceled. + ImportTaskId *string `locationName:"importTaskId" type:"string"` + + // The current state of the task being canceled. + PreviousState *string `locationName:"previousState" type:"string"` + + // The current state of the task being canceled. + State *string `locationName:"state" type:"string"` +} + +// String returns the string representation +func (s CancelImportTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelImportTaskOutput) GoString() string { + return s.String() +} + +type CancelReservedInstancesListingInput struct { + _ struct{} `type:"structure"` + + // The ID of the Reserved Instance listing. + ReservedInstancesListingId *string `locationName:"reservedInstancesListingId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelReservedInstancesListingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelReservedInstancesListingInput) GoString() string { + return s.String() +} + +type CancelReservedInstancesListingOutput struct { + _ struct{} `type:"structure"` + + // The Reserved Instance listing. + ReservedInstancesListings []*ReservedInstancesListing `locationName:"reservedInstancesListingsSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CancelReservedInstancesListingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelReservedInstancesListingOutput) GoString() string { + return s.String() +} + +// Describes a Spot fleet error. +type CancelSpotFleetRequestsError struct { + _ struct{} `type:"structure"` + + // The error code. + Code *string `locationName:"code" type:"string" required:"true" enum:"CancelBatchErrorCode"` + + // The description for the error code. + Message *string `locationName:"message" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelSpotFleetRequestsError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotFleetRequestsError) GoString() string { + return s.String() +} + +// Describes a Spot fleet request that was not successfully canceled. +type CancelSpotFleetRequestsErrorItem struct { + _ struct{} `type:"structure"` + + // The error. + Error *CancelSpotFleetRequestsError `locationName:"error" type:"structure" required:"true"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelSpotFleetRequestsErrorItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotFleetRequestsErrorItem) GoString() string { + return s.String() +} + +// Contains the parameters for CancelSpotFleetRequests. +type CancelSpotFleetRequestsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The IDs of the Spot fleet requests. + SpotFleetRequestIds []*string `locationName:"spotFleetRequestId" locationNameList:"item" type:"list" required:"true"` + + // Indicates whether to terminate instances for a Spot fleet request if it is + // canceled successfully. + TerminateInstances *bool `locationName:"terminateInstances" type:"boolean" required:"true"` +} + +// String returns the string representation +func (s CancelSpotFleetRequestsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotFleetRequestsInput) GoString() string { + return s.String() +} + +// Contains the output of CancelSpotFleetRequests. +type CancelSpotFleetRequestsOutput struct { + _ struct{} `type:"structure"` + + // Information about the Spot fleet requests that are successfully canceled. + SuccessfulFleetRequests []*CancelSpotFleetRequestsSuccessItem `locationName:"successfulFleetRequestSet" locationNameList:"item" type:"list"` + + // Information about the Spot fleet requests that are not successfully canceled. + UnsuccessfulFleetRequests []*CancelSpotFleetRequestsErrorItem `locationName:"unsuccessfulFleetRequestSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CancelSpotFleetRequestsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotFleetRequestsOutput) GoString() string { + return s.String() +} + +// Describes a Spot fleet request that was successfully canceled. +type CancelSpotFleetRequestsSuccessItem struct { + _ struct{} `type:"structure"` + + // The current state of the Spot fleet request. + CurrentSpotFleetRequestState *string `locationName:"currentSpotFleetRequestState" type:"string" required:"true" enum:"BatchState"` + + // The previous state of the Spot fleet request. + PreviousSpotFleetRequestState *string `locationName:"previousSpotFleetRequestState" type:"string" required:"true" enum:"BatchState"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelSpotFleetRequestsSuccessItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotFleetRequestsSuccessItem) GoString() string { + return s.String() +} + +// Contains the parameters for CancelSpotInstanceRequests. +type CancelSpotInstanceRequestsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more Spot instance request IDs. + SpotInstanceRequestIds []*string `locationName:"SpotInstanceRequestId" locationNameList:"SpotInstanceRequestId" type:"list" required:"true"` +} + +// String returns the string representation +func (s CancelSpotInstanceRequestsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotInstanceRequestsInput) GoString() string { + return s.String() +} + +// Contains the output of CancelSpotInstanceRequests. +type CancelSpotInstanceRequestsOutput struct { + _ struct{} `type:"structure"` + + // One or more Spot instance requests. + CancelledSpotInstanceRequests []*CancelledSpotInstanceRequest `locationName:"spotInstanceRequestSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CancelSpotInstanceRequestsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotInstanceRequestsOutput) GoString() string { + return s.String() +} + +// Describes a request to cancel a Spot instance. +type CancelledSpotInstanceRequest struct { + _ struct{} `type:"structure"` + + // The ID of the Spot instance request. + SpotInstanceRequestId *string `locationName:"spotInstanceRequestId" type:"string"` + + // The state of the Spot instance request. + State *string `locationName:"state" type:"string" enum:"CancelSpotInstanceRequestState"` +} + +// String returns the string representation +func (s CancelledSpotInstanceRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelledSpotInstanceRequest) GoString() string { + return s.String() +} + +// Describes the ClassicLink DNS support status of a VPC. +type ClassicLinkDnsSupport struct { + _ struct{} `type:"structure"` + + // Indicates whether ClassicLink DNS support is enabled for the VPC. + ClassicLinkDnsSupported *bool `locationName:"classicLinkDnsSupported" type:"boolean"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s ClassicLinkDnsSupport) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClassicLinkDnsSupport) GoString() string { + return s.String() +} + +// Describes a linked EC2-Classic instance. +type ClassicLinkInstance struct { + _ struct{} `type:"structure"` + + // A list of security groups. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // Any tags assigned to the instance. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s ClassicLinkInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClassicLinkInstance) GoString() string { + return s.String() +} + +// Describes the client-specific data. +type ClientData struct { + _ struct{} `type:"structure"` + + // A user-defined comment about the disk upload. + Comment *string `type:"string"` + + // The time that the disk upload ends. + UploadEnd *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The size of the uploaded disk image, in GiB. + UploadSize *float64 `type:"double"` + + // The time that the disk upload starts. + UploadStart *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ClientData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientData) GoString() string { + return s.String() +} + +type ConfirmProductInstanceInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + InstanceId *string `type:"string" required:"true"` + + // The product code. This must be a product code that you own. + ProductCode *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ConfirmProductInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmProductInstanceInput) GoString() string { + return s.String() +} + +type ConfirmProductInstanceOutput struct { + _ struct{} `type:"structure"` + + // The AWS account ID of the instance owner. This is only present if the product + // code is attached to the instance. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The return value of the request. Returns true if the specified product code + // is owned by the requester and associated with the specified instance. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ConfirmProductInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmProductInstanceOutput) GoString() string { + return s.String() +} + +// Describes a conversion task. +type ConversionTask struct { + _ struct{} `type:"structure"` + + // The ID of the conversion task. + ConversionTaskId *string `locationName:"conversionTaskId" type:"string" required:"true"` + + // The time when the task expires. If the upload isn't complete before the expiration + // time, we automatically cancel the task. + ExpirationTime *string `locationName:"expirationTime" type:"string"` + + // If the task is for importing an instance, this contains information about + // the import instance task. + ImportInstance *ImportInstanceTaskDetails `locationName:"importInstance" type:"structure"` + + // If the task is for importing a volume, this contains information about the + // import volume task. + ImportVolume *ImportVolumeTaskDetails `locationName:"importVolume" type:"structure"` + + // The state of the conversion task. + State *string `locationName:"state" type:"string" required:"true" enum:"ConversionTaskState"` + + // The status message related to the conversion task. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // Any tags assigned to the task. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ConversionTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConversionTask) GoString() string { + return s.String() +} + +type CopyImageInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure idempotency of the + // request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) + // in the Amazon Elastic Compute Cloud User Guide. + ClientToken *string `type:"string"` + + // A description for the new AMI in the destination region. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Specifies whether the destination snapshots of the copied image should be + // encrypted. The default CMK for EBS is used unless a non-default AWS Key Management + // Service (AWS KMS) CMK is specified with KmsKeyId. For more information, see + // Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) + // in the Amazon Elastic Compute Cloud User Guide. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // The full ARN of the AWS Key Management Service (AWS KMS) CMK to use when + // encrypting the snapshots of an image during a copy operation. This parameter + // is only required if you want to use a non-default CMK; if this parameter + // is not specified, the default CMK for EBS is used. The ARN contains the arn:aws:kms + // namespace, followed by the region of the CMK, the AWS account ID of the CMK + // owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. + // The specified CMK must exist in the region that the snapshot is being copied + // to. If a KmsKeyId is specified, the Encrypted flag must also be set. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // The name of the new AMI in the destination region. + Name *string `type:"string" required:"true"` + + // The ID of the AMI to copy. + SourceImageId *string `type:"string" required:"true"` + + // The name of the region that contains the AMI to copy. + SourceRegion *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopyImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyImageInput) GoString() string { + return s.String() +} + +type CopyImageOutput struct { + _ struct{} `type:"structure"` + + // The ID of the new AMI. + ImageId *string `locationName:"imageId" type:"string"` +} + +// String returns the string representation +func (s CopyImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyImageOutput) GoString() string { + return s.String() +} + +type CopySnapshotInput struct { + _ struct{} `type:"structure"` + + // A description for the EBS snapshot. + Description *string `type:"string"` + + // The destination region to use in the PresignedUrl parameter of a snapshot + // copy operation. This parameter is only valid for specifying the destination + // region in a PresignedUrl parameter, where it is required. + // + // CopySnapshot sends the snapshot copy to the regional endpoint that you + // send the HTTP request to, such as ec2.us-east-1.amazonaws.com (in the AWS + // CLI, this is specified with the --region parameter or the default region + // in your AWS configuration file). + DestinationRegion *string `locationName:"destinationRegion" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Specifies whether the destination snapshot should be encrypted. There is + // no way to create an unencrypted snapshot copy from an encrypted snapshot; + // however, you can encrypt a copy of an unencrypted snapshot with this flag. + // The default CMK for EBS is used unless a non-default AWS Key Management Service + // (AWS KMS) CMK is specified with KmsKeyId. For more information, see Amazon + // EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) + // in the Amazon Elastic Compute Cloud User Guide. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // The full ARN of the AWS Key Management Service (AWS KMS) CMK to use when + // creating the snapshot copy. This parameter is only required if you want to + // use a non-default CMK; if this parameter is not specified, the default CMK + // for EBS is used. The ARN contains the arn:aws:kms namespace, followed by + // the region of the CMK, the AWS account ID of the CMK owner, the key namespace, + // and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. + // The specified CMK must exist in the region that the snapshot is being copied + // to. If a KmsKeyId is specified, the Encrypted flag must also be set. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // The pre-signed URL that facilitates copying an encrypted snapshot. This parameter + // is only required when copying an encrypted snapshot with the Amazon EC2 Query + // API; it is available as an optional parameter in all other cases. The PresignedUrl + // should use the snapshot source endpoint, the CopySnapshot action, and include + // the SourceRegion, SourceSnapshotId, and DestinationRegion parameters. The + // PresignedUrl must be signed using AWS Signature Version 4. Because EBS snapshots + // are stored in Amazon S3, the signing algorithm for this parameter uses the + // same logic that is described in Authenticating Requests by Using Query Parameters + // (AWS Signature Version 4) (http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html) + // in the Amazon Simple Storage Service API Reference. An invalid or improperly + // signed PresignedUrl will cause the copy operation to fail asynchronously, + // and the snapshot will move to an error state. + PresignedUrl *string `locationName:"presignedUrl" type:"string"` + + // The ID of the region that contains the snapshot to be copied. + SourceRegion *string `type:"string" required:"true"` + + // The ID of the EBS snapshot to copy. + SourceSnapshotId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopySnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopySnapshotInput) GoString() string { + return s.String() +} + +type CopySnapshotOutput struct { + _ struct{} `type:"structure"` + + // The ID of the new snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` +} + +// String returns the string representation +func (s CopySnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopySnapshotOutput) GoString() string { + return s.String() +} + +type CreateCustomerGatewayInput struct { + _ struct{} `type:"structure"` + + // For devices that support BGP, the customer gateway's BGP ASN. + // + // Default: 65000 + BgpAsn *int64 `type:"integer" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The Internet-routable IP address for the customer gateway's outside interface. + // The address must be static. + PublicIp *string `locationName:"IpAddress" type:"string" required:"true"` + + // The type of VPN connection that this customer gateway supports (ipsec.1). + Type *string `type:"string" required:"true" enum:"GatewayType"` +} + +// String returns the string representation +func (s CreateCustomerGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCustomerGatewayInput) GoString() string { + return s.String() +} + +type CreateCustomerGatewayOutput struct { + _ struct{} `type:"structure"` + + // Information about the customer gateway. + CustomerGateway *CustomerGateway `locationName:"customerGateway" type:"structure"` +} + +// String returns the string representation +func (s CreateCustomerGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCustomerGatewayOutput) GoString() string { + return s.String() +} + +type CreateDhcpOptionsInput struct { + _ struct{} `type:"structure"` + + // A DHCP configuration option. + DhcpConfigurations []*NewDhcpConfiguration `locationName:"dhcpConfiguration" locationNameList:"item" type:"list" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s CreateDhcpOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDhcpOptionsInput) GoString() string { + return s.String() +} + +type CreateDhcpOptionsOutput struct { + _ struct{} `type:"structure"` + + // A set of DHCP options. + DhcpOptions *DhcpOptions `locationName:"dhcpOptions" type:"structure"` +} + +// String returns the string representation +func (s CreateDhcpOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDhcpOptionsOutput) GoString() string { + return s.String() +} + +type CreateFlowLogsInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // The ARN for the IAM role that's used to post flow logs to a CloudWatch Logs + // log group. + DeliverLogsPermissionArn *string `type:"string" required:"true"` + + // The name of the CloudWatch log group. + LogGroupName *string `type:"string" required:"true"` + + // One or more subnet, network interface, or VPC IDs. + ResourceIds []*string `locationName:"ResourceId" locationNameList:"item" type:"list" required:"true"` + + // The type of resource on which to create the flow log. + ResourceType *string `type:"string" required:"true" enum:"FlowLogsResourceType"` + + // The type of traffic to log. + TrafficType *string `type:"string" required:"true" enum:"TrafficType"` +} + +// String returns the string representation +func (s CreateFlowLogsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFlowLogsInput) GoString() string { + return s.String() +} + +type CreateFlowLogsOutput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. + ClientToken *string `locationName:"clientToken" type:"string"` + + // The IDs of the flow logs. + FlowLogIds []*string `locationName:"flowLogIdSet" locationNameList:"item" type:"list"` + + // Information about the flow logs that could not be created successfully. + Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateFlowLogsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFlowLogsOutput) GoString() string { + return s.String() +} + +type CreateImageInput struct { + _ struct{} `type:"structure"` + + // Information about one or more block device mappings. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` + + // A description for the new image. + Description *string `locationName:"description" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // A name for the new image. + // + // Constraints: 3-128 alphanumeric characters, parentheses (()), square brackets + // ([]), spaces ( ), periods (.), slashes (/), dashes (-), single quotes ('), + // at-signs (@), or underscores(_) + Name *string `locationName:"name" type:"string" required:"true"` + + // By default, this parameter is set to false, which means Amazon EC2 attempts + // to shut down the instance cleanly before image creation and then reboots + // the instance. When the parameter is set to true, Amazon EC2 doesn't shut + // down the instance before creating the image. When this option is used, file + // system integrity on the created image can't be guaranteed. + NoReboot *bool `locationName:"noReboot" type:"boolean"` +} + +// String returns the string representation +func (s CreateImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateImageInput) GoString() string { + return s.String() +} + +type CreateImageOutput struct { + _ struct{} `type:"structure"` + + // The ID of the new AMI. + ImageId *string `locationName:"imageId" type:"string"` +} + +// String returns the string representation +func (s CreateImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateImageOutput) GoString() string { + return s.String() +} + +type CreateInstanceExportTaskInput struct { + _ struct{} `type:"structure"` + + // A description for the conversion task or the resource being exported. The + // maximum length is 255 bytes. + Description *string `locationName:"description" type:"string"` + + // The format and location for an instance export task. + ExportToS3Task *ExportToS3TaskSpecification `locationName:"exportToS3" type:"structure"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // The target virtualization environment. + TargetEnvironment *string `locationName:"targetEnvironment" type:"string" enum:"ExportEnvironment"` +} + +// String returns the string representation +func (s CreateInstanceExportTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInstanceExportTaskInput) GoString() string { + return s.String() +} + +type CreateInstanceExportTaskOutput struct { + _ struct{} `type:"structure"` + + // Information about the instance export task. + ExportTask *ExportTask `locationName:"exportTask" type:"structure"` +} + +// String returns the string representation +func (s CreateInstanceExportTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInstanceExportTaskOutput) GoString() string { + return s.String() +} + +type CreateInternetGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s CreateInternetGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInternetGatewayInput) GoString() string { + return s.String() +} + +type CreateInternetGatewayOutput struct { + _ struct{} `type:"structure"` + + // Information about the Internet gateway. + InternetGateway *InternetGateway `locationName:"internetGateway" type:"structure"` +} + +// String returns the string representation +func (s CreateInternetGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInternetGatewayOutput) GoString() string { + return s.String() +} + +type CreateKeyPairInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // A unique name for the key pair. + // + // Constraints: Up to 255 ASCII characters + KeyName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateKeyPairInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateKeyPairInput) GoString() string { + return s.String() +} + +// Describes a key pair. +type CreateKeyPairOutput struct { + _ struct{} `type:"structure"` + + // The SHA-1 digest of the DER encoded private key. + KeyFingerprint *string `locationName:"keyFingerprint" type:"string"` + + // An unencrypted PEM encoded RSA private key. + KeyMaterial *string `locationName:"keyMaterial" type:"string"` + + // The name of the key pair. + KeyName *string `locationName:"keyName" type:"string"` +} + +// String returns the string representation +func (s CreateKeyPairOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateKeyPairOutput) GoString() string { + return s.String() +} + +type CreateNatGatewayInput struct { + _ struct{} `type:"structure"` + + // The allocation ID of an Elastic IP address to associate with the NAT gateway. + // If the Elastic IP address is associated with another resource, you must first + // disassociate it. + AllocationId *string `type:"string" required:"true"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // + // Constraint: Maximum 64 ASCII characters. + ClientToken *string `type:"string"` + + // The subnet in which to create the NAT gateway. + SubnetId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateNatGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNatGatewayInput) GoString() string { + return s.String() +} + +type CreateNatGatewayOutput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier to ensure the idempotency of the request. + // Only returned if a client token was provided in the request. + ClientToken *string `locationName:"clientToken" type:"string"` + + // Information about the NAT gateway. + NatGateway *NatGateway `locationName:"natGateway" type:"structure"` +} + +// String returns the string representation +func (s CreateNatGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNatGatewayOutput) GoString() string { + return s.String() +} + +type CreateNetworkAclEntryInput struct { + _ struct{} `type:"structure"` + + // The network range to allow or deny, in CIDR notation (for example 172.16.0.0/24). + CidrBlock *string `locationName:"cidrBlock" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Indicates whether this is an egress rule (rule is applied to traffic leaving + // the subnet). + Egress *bool `locationName:"egress" type:"boolean" required:"true"` + + // ICMP protocol: The ICMP type and code. Required if specifying ICMP for the + // protocol. + IcmpTypeCode *IcmpTypeCode `locationName:"Icmp" type:"structure"` + + // The ID of the network ACL. + NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"` + + // TCP or UDP protocols: The range of ports the rule applies to. + PortRange *PortRange `locationName:"portRange" type:"structure"` + + // The protocol. A value of -1 means all protocols. + Protocol *string `locationName:"protocol" type:"string" required:"true"` + + // Indicates whether to allow or deny the traffic that matches the rule. + RuleAction *string `locationName:"ruleAction" type:"string" required:"true" enum:"RuleAction"` + + // The rule number for the entry (for example, 100). ACL entries are processed + // in ascending order by rule number. + // + // Constraints: Positive integer from 1 to 32766 + RuleNumber *int64 `locationName:"ruleNumber" type:"integer" required:"true"` +} + +// String returns the string representation +func (s CreateNetworkAclEntryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkAclEntryInput) GoString() string { + return s.String() +} + +type CreateNetworkAclEntryOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateNetworkAclEntryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkAclEntryOutput) GoString() string { + return s.String() +} + +type CreateNetworkAclInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateNetworkAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkAclInput) GoString() string { + return s.String() +} + +type CreateNetworkAclOutput struct { + _ struct{} `type:"structure"` + + // Information about the network ACL. + NetworkAcl *NetworkAcl `locationName:"networkAcl" type:"structure"` +} + +// String returns the string representation +func (s CreateNetworkAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkAclOutput) GoString() string { + return s.String() +} + +type CreateNetworkInterfaceInput struct { + _ struct{} `type:"structure"` + + // A description for the network interface. + Description *string `locationName:"description" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The IDs of one or more security groups. + Groups []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` + + // The primary private IP address of the network interface. If you don't specify + // an IP address, Amazon EC2 selects one for you from the subnet range. If you + // specify an IP address, you cannot indicate any IP addresses specified in + // privateIpAddresses as primary (only one IP address can be designated as primary). + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // One or more private IP addresses. + PrivateIpAddresses []*PrivateIpAddressSpecification `locationName:"privateIpAddresses" locationNameList:"item" type:"list"` + + // The number of secondary private IP addresses to assign to a network interface. + // When you specify a number of secondary IP addresses, Amazon EC2 selects these + // IP addresses within the subnet range. You can't specify this option and specify + // more than one private IP address using privateIpAddresses. + // + // The number of IP addresses you can assign to a network interface varies + // by instance type. For more information, see Private IP Addresses Per ENI + // Per Instance Type (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) + // in the Amazon Elastic Compute Cloud User Guide. + SecondaryPrivateIpAddressCount *int64 `locationName:"secondaryPrivateIpAddressCount" type:"integer"` + + // The ID of the subnet to associate with the network interface. + SubnetId *string `locationName:"subnetId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateNetworkInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkInterfaceInput) GoString() string { + return s.String() +} + +type CreateNetworkInterfaceOutput struct { + _ struct{} `type:"structure"` + + // Information about the network interface. + NetworkInterface *NetworkInterface `locationName:"networkInterface" type:"structure"` +} + +// String returns the string representation +func (s CreateNetworkInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkInterfaceOutput) GoString() string { + return s.String() +} + +type CreatePlacementGroupInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // A name for the placement group. + // + // Constraints: Up to 255 ASCII characters + GroupName *string `locationName:"groupName" type:"string" required:"true"` + + // The placement strategy. + Strategy *string `locationName:"strategy" type:"string" required:"true" enum:"PlacementStrategy"` +} + +// String returns the string representation +func (s CreatePlacementGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlacementGroupInput) GoString() string { + return s.String() +} + +type CreatePlacementGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreatePlacementGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlacementGroupOutput) GoString() string { + return s.String() +} + +type CreateReservedInstancesListingInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure idempotency of your + // listings. This helps avoid duplicate listings. For more information, see + // Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string" required:"true"` + + // The number of instances that are a part of a Reserved Instance account to + // be listed in the Reserved Instance Marketplace. This number should be less + // than or equal to the instance count associated with the Reserved Instance + // ID specified in this call. + InstanceCount *int64 `locationName:"instanceCount" type:"integer" required:"true"` + + // A list specifying the price of the Reserved Instance for each month remaining + // in the Reserved Instance term. + PriceSchedules []*PriceScheduleSpecification `locationName:"priceSchedules" locationNameList:"item" type:"list" required:"true"` + + // The ID of the active Reserved Instance. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateReservedInstancesListingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReservedInstancesListingInput) GoString() string { + return s.String() +} + +type CreateReservedInstancesListingOutput struct { + _ struct{} `type:"structure"` + + // Information about the Reserved Instance listing. + ReservedInstancesListings []*ReservedInstancesListing `locationName:"reservedInstancesListingsSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateReservedInstancesListingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReservedInstancesListingOutput) GoString() string { + return s.String() +} + +type CreateRouteInput struct { + _ struct{} `type:"structure"` + + // The CIDR address block used for the destination match. Routing decisions + // are based on the most specific match. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of an Internet gateway or virtual private gateway attached to your + // VPC. + GatewayId *string `locationName:"gatewayId" type:"string"` + + // The ID of a NAT instance in your VPC. The operation fails if you specify + // an instance ID unless exactly one network interface is attached. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The ID of a NAT gateway. + NatGatewayId *string `locationName:"natGatewayId" type:"string"` + + // The ID of a network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The ID of the route table for the route. + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` + + // The ID of a VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` +} + +// String returns the string representation +func (s CreateRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRouteInput) GoString() string { + return s.String() +} + +type CreateRouteOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s CreateRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRouteOutput) GoString() string { + return s.String() +} + +type CreateRouteTableInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateRouteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRouteTableInput) GoString() string { + return s.String() +} + +type CreateRouteTableOutput struct { + _ struct{} `type:"structure"` + + // Information about the route table. + RouteTable *RouteTable `locationName:"routeTable" type:"structure"` +} + +// String returns the string representation +func (s CreateRouteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRouteTableOutput) GoString() string { + return s.String() +} + +type CreateSecurityGroupInput struct { + _ struct{} `type:"structure"` + + // A description for the security group. This is informational only. + // + // Constraints: Up to 255 characters in length + // + // Constraints for EC2-Classic: ASCII characters + // + // Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$* + Description *string `locationName:"GroupDescription" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The name of the security group. + // + // Constraints: Up to 255 characters in length + // + // Constraints for EC2-Classic: ASCII characters + // + // Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$* + GroupName *string `type:"string" required:"true"` + + // [EC2-VPC] The ID of the VPC. Required for EC2-VPC. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s CreateSecurityGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSecurityGroupInput) GoString() string { + return s.String() +} + +type CreateSecurityGroupOutput struct { + _ struct{} `type:"structure"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string"` +} + +// String returns the string representation +func (s CreateSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSecurityGroupOutput) GoString() string { + return s.String() +} + +type CreateSnapshotInput struct { + _ struct{} `type:"structure"` + + // A description for the snapshot. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the EBS volume. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotInput) GoString() string { + return s.String() +} + +// Contains the parameters for CreateSpotDatafeedSubscription. +type CreateSpotDatafeedSubscriptionInput struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket in which to store the Spot instance data feed. + Bucket *string `locationName:"bucket" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // A prefix for the data feed file names. + Prefix *string `locationName:"prefix" type:"string"` +} + +// String returns the string representation +func (s CreateSpotDatafeedSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSpotDatafeedSubscriptionInput) GoString() string { + return s.String() +} + +// Contains the output of CreateSpotDatafeedSubscription. +type CreateSpotDatafeedSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // The Spot instance data feed subscription. + SpotDatafeedSubscription *SpotDatafeedSubscription `locationName:"spotDatafeedSubscription" type:"structure"` +} + +// String returns the string representation +func (s CreateSpotDatafeedSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSpotDatafeedSubscriptionOutput) GoString() string { + return s.String() +} + +type CreateSubnetInput struct { + _ struct{} `type:"structure"` + + // The Availability Zone for the subnet. + // + // Default: AWS selects one for you. If you create more than one subnet in + // your VPC, we may not necessarily select a different zone for each subnet. + AvailabilityZone *string `type:"string"` + + // The network range for the subnet, in CIDR notation. For example, 10.0.0.0/24. + CidrBlock *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateSubnetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSubnetInput) GoString() string { + return s.String() +} + +type CreateSubnetOutput struct { + _ struct{} `type:"structure"` + + // Information about the subnet. + Subnet *Subnet `locationName:"subnet" type:"structure"` +} + +// String returns the string representation +func (s CreateSubnetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSubnetOutput) GoString() string { + return s.String() +} + +type CreateTagsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The IDs of one or more resources to tag. For example, ami-1a2b3c4d. + Resources []*string `locationName:"ResourceId" type:"list" required:"true"` + + // One or more tags. The value parameter is required, but if you don't want + // the tag to have a value, specify the parameter with no value, and we set + // the value to an empty string. + Tags []*Tag `locationName:"Tag" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTagsInput) GoString() string { + return s.String() +} + +type CreateTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTagsOutput) GoString() string { + return s.String() +} + +type CreateVolumeInput struct { + _ struct{} `type:"structure"` + + // The Availability Zone in which to create the volume. Use DescribeAvailabilityZones + // to list the Availability Zones that are currently available to you. + AvailabilityZone *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Specifies whether the volume should be encrypted. Encrypted Amazon EBS volumes + // may only be attached to instances that support Amazon EBS encryption. Volumes + // that are created from encrypted snapshots are automatically encrypted. There + // is no way to create an encrypted volume from an unencrypted snapshot or vice + // versa. If your AMI uses encrypted volumes, you can only launch it on supported + // instance types. For more information, see Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) + // in the Amazon Elastic Compute Cloud User Guide. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // Only valid for Provisioned IOPS (SSD) volumes. The number of I/O operations + // per second (IOPS) to provision for the volume, with a maximum ratio of 30 + // IOPS/GiB. + // + // Constraint: Range is 100 to 20000 for Provisioned IOPS (SSD) volumes + Iops *int64 `type:"integer"` + + // The full ARN of the AWS Key Management Service (AWS KMS) customer master + // key (CMK) to use when creating the encrypted volume. This parameter is only + // required if you want to use a non-default CMK; if this parameter is not specified, + // the default CMK for EBS is used. The ARN contains the arn:aws:kms namespace, + // followed by the region of the CMK, the AWS account ID of the CMK owner, the + // key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. + // If a KmsKeyId is specified, the Encrypted flag must also be set. + KmsKeyId *string `type:"string"` + + // The size of the volume, in GiBs. + // + // Constraints: 1-1024 for standard volumes, 1-16384 for gp2 volumes, and 4-16384 + // for io1 volumes. If you specify a snapshot, the volume size must be equal + // to or larger than the snapshot size. + // + // Default: If you're creating the volume from a snapshot and don't specify + // a volume size, the default is the snapshot size. + Size *int64 `type:"integer"` + + // The snapshot from which to create the volume. + SnapshotId *string `type:"string"` + + // The volume type. This can be gp2 for General Purpose (SSD) volumes, io1 for + // Provisioned IOPS (SSD) volumes, or standard for Magnetic volumes. + // + // Default: standard + VolumeType *string `type:"string" enum:"VolumeType"` +} + +// String returns the string representation +func (s CreateVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVolumeInput) GoString() string { + return s.String() +} + +// Describes the user or group to be added or removed from the permissions for +// a volume. +type CreateVolumePermission struct { + _ struct{} `type:"structure"` + + // The specific group that is to be added or removed from a volume's list of + // create volume permissions. + Group *string `locationName:"group" type:"string" enum:"PermissionGroup"` + + // The specific AWS account ID that is to be added or removed from a volume's + // list of create volume permissions. + UserId *string `locationName:"userId" type:"string"` +} + +// String returns the string representation +func (s CreateVolumePermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVolumePermission) GoString() string { + return s.String() +} + +// Describes modifications to the permissions for a volume. +type CreateVolumePermissionModifications struct { + _ struct{} `type:"structure"` + + // Adds a specific AWS account ID or group to a volume's list of create volume + // permissions. + Add []*CreateVolumePermission `locationNameList:"item" type:"list"` + + // Removes a specific AWS account ID or group from a volume's list of create + // volume permissions. + Remove []*CreateVolumePermission `locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateVolumePermissionModifications) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVolumePermissionModifications) GoString() string { + return s.String() +} + +type CreateVpcEndpointInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // A policy to attach to the endpoint that controls access to the service. The + // policy must be in valid JSON format. If this parameter is not specified, + // we attach a default policy that allows full access to the service. + PolicyDocument *string `type:"string"` + + // One or more route table IDs. + RouteTableIds []*string `locationName:"RouteTableId" locationNameList:"item" type:"list"` + + // The AWS service name, in the form com.amazonaws.region.service. To get a + // list of available services, use the DescribeVpcEndpointServices request. + ServiceName *string `type:"string" required:"true"` + + // The ID of the VPC in which the endpoint will be used. + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateVpcEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcEndpointInput) GoString() string { + return s.String() +} + +type CreateVpcEndpointOutput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. + ClientToken *string `locationName:"clientToken" type:"string"` + + // Information about the endpoint. + VpcEndpoint *VpcEndpoint `locationName:"vpcEndpoint" type:"structure"` +} + +// String returns the string representation +func (s CreateVpcEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcEndpointOutput) GoString() string { + return s.String() +} + +type CreateVpcInput struct { + _ struct{} `type:"structure"` + + // The network range for the VPC, in CIDR notation. For example, 10.0.0.0/16. + CidrBlock *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The supported tenancy options for instances launched into the VPC. A value + // of default means that instances can be launched with any tenancy; a value + // of dedicated means all instances launched into the VPC are launched as dedicated + // tenancy instances regardless of the tenancy assigned to the instance at launch. + // Dedicated tenancy instances run on single-tenant hardware. + // + // Important: The host value cannot be used with this parameter. Use the default + // or dedicated values only. + // + // Default: default + InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` +} + +// String returns the string representation +func (s CreateVpcInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcInput) GoString() string { + return s.String() +} + +type CreateVpcOutput struct { + _ struct{} `type:"structure"` + + // Information about the VPC. + Vpc *Vpc `locationName:"vpc" type:"structure"` +} + +// String returns the string representation +func (s CreateVpcOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcOutput) GoString() string { + return s.String() +} + +type CreateVpcPeeringConnectionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The AWS account ID of the owner of the peer VPC. + // + // Default: Your AWS account ID + PeerOwnerId *string `locationName:"peerOwnerId" type:"string"` + + // The ID of the VPC with which you are creating the VPC peering connection. + PeerVpcId *string `locationName:"peerVpcId" type:"string"` + + // The ID of the requester VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s CreateVpcPeeringConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcPeeringConnectionInput) GoString() string { + return s.String() +} + +type CreateVpcPeeringConnectionOutput struct { + _ struct{} `type:"structure"` + + // Information about the VPC peering connection. + VpcPeeringConnection *VpcPeeringConnection `locationName:"vpcPeeringConnection" type:"structure"` +} + +// String returns the string representation +func (s CreateVpcPeeringConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcPeeringConnectionOutput) GoString() string { + return s.String() +} + +type CreateVpnConnectionInput struct { + _ struct{} `type:"structure"` + + // The ID of the customer gateway. + CustomerGatewayId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Indicates whether the VPN connection requires static routes. If you are creating + // a VPN connection for a device that does not support BGP, you must specify + // true. + // + // Default: false + Options *VpnConnectionOptionsSpecification `locationName:"options" type:"structure"` + + // The type of VPN connection (ipsec.1). + Type *string `type:"string" required:"true"` + + // The ID of the virtual private gateway. + VpnGatewayId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateVpnConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnConnectionInput) GoString() string { + return s.String() +} + +type CreateVpnConnectionOutput struct { + _ struct{} `type:"structure"` + + // Information about the VPN connection. + VpnConnection *VpnConnection `locationName:"vpnConnection" type:"structure"` +} + +// String returns the string representation +func (s CreateVpnConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnConnectionOutput) GoString() string { + return s.String() +} + +type CreateVpnConnectionRouteInput struct { + _ struct{} `type:"structure"` + + // The CIDR block associated with the local subnet of the customer network. + DestinationCidrBlock *string `type:"string" required:"true"` + + // The ID of the VPN connection. + VpnConnectionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateVpnConnectionRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnConnectionRouteInput) GoString() string { + return s.String() +} + +type CreateVpnConnectionRouteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateVpnConnectionRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnConnectionRouteOutput) GoString() string { + return s.String() +} + +type CreateVpnGatewayInput struct { + _ struct{} `type:"structure"` + + // The Availability Zone for the virtual private gateway. + AvailabilityZone *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The type of VPN connection this virtual private gateway supports. + Type *string `type:"string" required:"true" enum:"GatewayType"` +} + +// String returns the string representation +func (s CreateVpnGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnGatewayInput) GoString() string { + return s.String() +} + +type CreateVpnGatewayOutput struct { + _ struct{} `type:"structure"` + + // Information about the virtual private gateway. + VpnGateway *VpnGateway `locationName:"vpnGateway" type:"structure"` +} + +// String returns the string representation +func (s CreateVpnGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnGatewayOutput) GoString() string { + return s.String() +} + +// Describes a customer gateway. +type CustomerGateway struct { + _ struct{} `type:"structure"` + + // The customer gateway's Border Gateway Protocol (BGP) Autonomous System Number + // (ASN). + BgpAsn *string `locationName:"bgpAsn" type:"string"` + + // The ID of the customer gateway. + CustomerGatewayId *string `locationName:"customerGatewayId" type:"string"` + + // The Internet-routable IP address of the customer gateway's outside interface. + IpAddress *string `locationName:"ipAddress" type:"string"` + + // The current state of the customer gateway (pending | available | deleting + // | deleted). + State *string `locationName:"state" type:"string"` + + // Any tags assigned to the customer gateway. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The type of VPN connection the customer gateway supports (ipsec.1). + Type *string `locationName:"type" type:"string"` +} + +// String returns the string representation +func (s CustomerGateway) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomerGateway) GoString() string { + return s.String() +} + +type DeleteCustomerGatewayInput struct { + _ struct{} `type:"structure"` + + // The ID of the customer gateway. + CustomerGatewayId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s DeleteCustomerGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCustomerGatewayInput) GoString() string { + return s.String() +} + +type DeleteCustomerGatewayOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteCustomerGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCustomerGatewayOutput) GoString() string { + return s.String() +} + +type DeleteDhcpOptionsInput struct { + _ struct{} `type:"structure"` + + // The ID of the DHCP options set. + DhcpOptionsId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s DeleteDhcpOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDhcpOptionsInput) GoString() string { + return s.String() +} + +type DeleteDhcpOptionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDhcpOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDhcpOptionsOutput) GoString() string { + return s.String() +} + +type DeleteFlowLogsInput struct { + _ struct{} `type:"structure"` + + // One or more flow log IDs. + FlowLogIds []*string `locationName:"FlowLogId" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteFlowLogsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFlowLogsInput) GoString() string { + return s.String() +} + +type DeleteFlowLogsOutput struct { + _ struct{} `type:"structure"` + + // Information about the flow logs that could not be deleted successfully. + Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DeleteFlowLogsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFlowLogsOutput) GoString() string { + return s.String() +} + +type DeleteInternetGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the Internet gateway. + InternetGatewayId *string `locationName:"internetGatewayId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteInternetGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInternetGatewayInput) GoString() string { + return s.String() +} + +type DeleteInternetGatewayOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteInternetGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInternetGatewayOutput) GoString() string { + return s.String() +} + +type DeleteKeyPairInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The name of the key pair. + KeyName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteKeyPairInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteKeyPairInput) GoString() string { + return s.String() +} + +type DeleteKeyPairOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteKeyPairOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteKeyPairOutput) GoString() string { + return s.String() +} + +type DeleteNatGatewayInput struct { + _ struct{} `type:"structure"` + + // The ID of the NAT gateway. + NatGatewayId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteNatGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNatGatewayInput) GoString() string { + return s.String() +} + +type DeleteNatGatewayOutput struct { + _ struct{} `type:"structure"` + + // The ID of the NAT gateway. + NatGatewayId *string `locationName:"natGatewayId" type:"string"` +} + +// String returns the string representation +func (s DeleteNatGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNatGatewayOutput) GoString() string { + return s.String() +} + +type DeleteNetworkAclEntryInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Indicates whether the rule is an egress rule. + Egress *bool `locationName:"egress" type:"boolean" required:"true"` + + // The ID of the network ACL. + NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"` + + // The rule number of the entry to delete. + RuleNumber *int64 `locationName:"ruleNumber" type:"integer" required:"true"` +} + +// String returns the string representation +func (s DeleteNetworkAclEntryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkAclEntryInput) GoString() string { + return s.String() +} + +type DeleteNetworkAclEntryOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteNetworkAclEntryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkAclEntryOutput) GoString() string { + return s.String() +} + +type DeleteNetworkAclInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the network ACL. + NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteNetworkAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkAclInput) GoString() string { + return s.String() +} + +type DeleteNetworkAclOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteNetworkAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkAclOutput) GoString() string { + return s.String() +} + +type DeleteNetworkInterfaceInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteNetworkInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkInterfaceInput) GoString() string { + return s.String() +} + +type DeleteNetworkInterfaceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteNetworkInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkInterfaceOutput) GoString() string { + return s.String() +} + +type DeletePlacementGroupInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The name of the placement group. + GroupName *string `locationName:"groupName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePlacementGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePlacementGroupInput) GoString() string { + return s.String() +} + +type DeletePlacementGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePlacementGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePlacementGroupOutput) GoString() string { + return s.String() +} + +type DeleteRouteInput struct { + _ struct{} `type:"structure"` + + // The CIDR range for the route. The value you specify must match the CIDR for + // the route exactly. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the route table. + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRouteInput) GoString() string { + return s.String() +} + +type DeleteRouteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRouteOutput) GoString() string { + return s.String() +} + +type DeleteRouteTableInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the route table. + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRouteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRouteTableInput) GoString() string { + return s.String() +} + +type DeleteRouteTableOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRouteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRouteTableOutput) GoString() string { + return s.String() +} + +type DeleteSecurityGroupInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the security group. Required for a nondefault VPC. + GroupId *string `type:"string"` + + // [EC2-Classic, default VPC] The name of the security group. You can specify + // either the security group name or the security group ID. + GroupName *string `type:"string"` +} + +// String returns the string representation +func (s DeleteSecurityGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSecurityGroupInput) GoString() string { + return s.String() +} + +type DeleteSecurityGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSecurityGroupOutput) GoString() string { + return s.String() +} + +type DeleteSnapshotInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the EBS snapshot. + SnapshotId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotInput) GoString() string { + return s.String() +} + +type DeleteSnapshotOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteSpotDatafeedSubscription. +type DeleteSpotDatafeedSubscriptionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s DeleteSpotDatafeedSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSpotDatafeedSubscriptionInput) GoString() string { + return s.String() +} + +type DeleteSpotDatafeedSubscriptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSpotDatafeedSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSpotDatafeedSubscriptionOutput) GoString() string { + return s.String() +} + +type DeleteSubnetInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the subnet. + SubnetId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSubnetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSubnetInput) GoString() string { + return s.String() +} + +type DeleteSubnetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSubnetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSubnetOutput) GoString() string { + return s.String() +} + +type DeleteTagsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the resource. For example, ami-1a2b3c4d. You can specify more than + // one resource ID. + Resources []*string `locationName:"resourceId" type:"list" required:"true"` + + // One or more tags to delete. If you omit the value parameter, we delete the + // tag regardless of its value. If you specify this parameter with an empty + // string as the value, we delete the key only if its value is an empty string. + Tags []*Tag `locationName:"tag" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DeleteTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsInput) GoString() string { + return s.String() +} + +type DeleteTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsOutput) GoString() string { + return s.String() +} + +type DeleteVolumeInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the volume. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVolumeInput) GoString() string { + return s.String() +} + +type DeleteVolumeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVolumeOutput) GoString() string { + return s.String() +} + +type DeleteVpcEndpointsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more endpoint IDs. + VpcEndpointIds []*string `locationName:"VpcEndpointId" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteVpcEndpointsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcEndpointsInput) GoString() string { + return s.String() +} + +type DeleteVpcEndpointsOutput struct { + _ struct{} `type:"structure"` + + // Information about the endpoints that were not successfully deleted. + Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DeleteVpcEndpointsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcEndpointsOutput) GoString() string { + return s.String() +} + +type DeleteVpcInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVpcInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcInput) GoString() string { + return s.String() +} + +type DeleteVpcOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpcOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcOutput) GoString() string { + return s.String() +} + +type DeleteVpcPeeringConnectionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVpcPeeringConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcPeeringConnectionInput) GoString() string { + return s.String() +} + +type DeleteVpcPeeringConnectionOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s DeleteVpcPeeringConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcPeeringConnectionOutput) GoString() string { + return s.String() +} + +type DeleteVpnConnectionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPN connection. + VpnConnectionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVpnConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnConnectionInput) GoString() string { + return s.String() +} + +type DeleteVpnConnectionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpnConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnConnectionOutput) GoString() string { + return s.String() +} + +type DeleteVpnConnectionRouteInput struct { + _ struct{} `type:"structure"` + + // The CIDR block associated with the local subnet of the customer network. + DestinationCidrBlock *string `type:"string" required:"true"` + + // The ID of the VPN connection. + VpnConnectionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVpnConnectionRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnConnectionRouteInput) GoString() string { + return s.String() +} + +type DeleteVpnConnectionRouteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpnConnectionRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnConnectionRouteOutput) GoString() string { + return s.String() +} + +type DeleteVpnGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the virtual private gateway. + VpnGatewayId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVpnGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnGatewayInput) GoString() string { + return s.String() +} + +type DeleteVpnGatewayOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpnGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnGatewayOutput) GoString() string { + return s.String() +} + +type DeregisterImageInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the AMI. + ImageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterImageInput) GoString() string { + return s.String() +} + +type DeregisterImageOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterImageOutput) GoString() string { + return s.String() +} + +type DescribeAccountAttributesInput struct { + _ struct{} `type:"structure"` + + // One or more account attribute names. + AttributeNames []*string `locationName:"attributeName" locationNameList:"attributeName" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s DescribeAccountAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountAttributesInput) GoString() string { + return s.String() +} + +type DescribeAccountAttributesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more account attributes. + AccountAttributes []*AccountAttribute `locationName:"accountAttributeSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeAccountAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountAttributesOutput) GoString() string { + return s.String() +} + +type DescribeAddressesInput struct { + _ struct{} `type:"structure"` + + // [EC2-VPC] One or more allocation IDs. + // + // Default: Describes all your Elastic IP addresses. + AllocationIds []*string `locationName:"AllocationId" locationNameList:"AllocationId" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. Filter names and values are case-sensitive. + // + // allocation-id - [EC2-VPC] The allocation ID for the address. + // + // association-id - [EC2-VPC] The association ID for the address. + // + // domain - Indicates whether the address is for use in EC2-Classic (standard) + // or in a VPC (vpc). + // + // instance-id - The ID of the instance the address is associated with, if + // any. + // + // network-interface-id - [EC2-VPC] The ID of the network interface that + // the address is associated with, if any. + // + // network-interface-owner-id - The AWS account ID of the owner. + // + // private-ip-address - [EC2-VPC] The private IP address associated with + // the Elastic IP address. + // + // public-ip - The Elastic IP address. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // [EC2-Classic] One or more Elastic IP addresses. + // + // Default: Describes all your Elastic IP addresses. + PublicIps []*string `locationName:"PublicIp" locationNameList:"PublicIp" type:"list"` +} + +// String returns the string representation +func (s DescribeAddressesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAddressesInput) GoString() string { + return s.String() +} + +type DescribeAddressesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more Elastic IP addresses. + Addresses []*Address `locationName:"addressesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeAddressesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAddressesOutput) GoString() string { + return s.String() +} + +type DescribeAvailabilityZonesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // message - Information about the Availability Zone. + // + // region-name - The name of the region for the Availability Zone (for example, + // us-east-1). + // + // state - The state of the Availability Zone (available | information | + // impaired | unavailable). + // + // zone-name - The name of the Availability Zone (for example, us-east-1a). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The names of one or more Availability Zones. + ZoneNames []*string `locationName:"ZoneName" locationNameList:"ZoneName" type:"list"` +} + +// String returns the string representation +func (s DescribeAvailabilityZonesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAvailabilityZonesInput) GoString() string { + return s.String() +} + +type DescribeAvailabilityZonesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more Availability Zones. + AvailabilityZones []*AvailabilityZone `locationName:"availabilityZoneInfo" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeAvailabilityZonesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAvailabilityZonesOutput) GoString() string { + return s.String() +} + +type DescribeBundleTasksInput struct { + _ struct{} `type:"structure"` + + // One or more bundle task IDs. + // + // Default: Describes all your bundle tasks. + BundleIds []*string `locationName:"BundleId" locationNameList:"BundleId" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // bundle-id - The ID of the bundle task. + // + // error-code - If the task failed, the error code returned. + // + // error-message - If the task failed, the error message returned. + // + // instance-id - The ID of the instance. + // + // progress - The level of task completion, as a percentage (for example, + // 20%). + // + // s3-bucket - The Amazon S3 bucket to store the AMI. + // + // s3-prefix - The beginning of the AMI name. + // + // start-time - The time the task started (for example, 2013-09-15T17:15:20.000Z). + // + // state - The state of the task (pending | waiting-for-shutdown | bundling + // | storing | cancelling | complete | failed). + // + // update-time - The time of the most recent update for the task. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` +} + +// String returns the string representation +func (s DescribeBundleTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBundleTasksInput) GoString() string { + return s.String() +} + +type DescribeBundleTasksOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more bundle tasks. + BundleTasks []*BundleTask `locationName:"bundleInstanceTasksSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeBundleTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBundleTasksOutput) GoString() string { + return s.String() +} + +type DescribeClassicLinkInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // group-id - The ID of a VPC security group that's associated with the instance. + // + // instance-id - The ID of the instance. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // vpc-id - The ID of the VPC that the instance is linked to. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more instance IDs. Must be instances linked to a VPC through ClassicLink. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results of the initial request can be seen by sending another + // request with the returned NextToken value. This value can be between 5 and + // 1000; if MaxResults is given a value larger than 1000, only 1000 results + // are returned. You cannot specify this parameter and the instance IDs parameter + // in the same request. + // + // Constraint: If the value is greater than 1000, we return only 1000 items. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeClassicLinkInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClassicLinkInstancesInput) GoString() string { + return s.String() +} + +type DescribeClassicLinkInstancesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more linked EC2-Classic instances. + Instances []*ClassicLinkInstance `locationName:"instancesSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeClassicLinkInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClassicLinkInstancesOutput) GoString() string { + return s.String() +} + +type DescribeConversionTasksInput struct { + _ struct{} `type:"structure"` + + // One or more conversion task IDs. + ConversionTaskIds []*string `locationName:"conversionTaskId" locationNameList:"item" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + Filters []*Filter `locationName:"filter" locationNameList:"Filter" type:"list"` +} + +// String returns the string representation +func (s DescribeConversionTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConversionTasksInput) GoString() string { + return s.String() +} + +type DescribeConversionTasksOutput struct { + _ struct{} `type:"structure"` + + // Information about the conversion tasks. + ConversionTasks []*ConversionTask `locationName:"conversionTasks" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeConversionTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConversionTasksOutput) GoString() string { + return s.String() +} + +type DescribeCustomerGatewaysInput struct { + _ struct{} `type:"structure"` + + // One or more customer gateway IDs. + // + // Default: Describes all your customer gateways. + CustomerGatewayIds []*string `locationName:"CustomerGatewayId" locationNameList:"CustomerGatewayId" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // bgp-asn - The customer gateway's Border Gateway Protocol (BGP) Autonomous + // System Number (ASN). + // + // customer-gateway-id - The ID of the customer gateway. + // + // ip-address - The IP address of the customer gateway's Internet-routable + // external interface. + // + // state - The state of the customer gateway (pending | available | deleting + // | deleted). + // + // type - The type of customer gateway. Currently, the only supported type + // is ipsec.1. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` +} + +// String returns the string representation +func (s DescribeCustomerGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCustomerGatewaysInput) GoString() string { + return s.String() +} + +type DescribeCustomerGatewaysOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more customer gateways. + CustomerGateways []*CustomerGateway `locationName:"customerGatewaySet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeCustomerGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCustomerGatewaysOutput) GoString() string { + return s.String() +} + +type DescribeDhcpOptionsInput struct { + _ struct{} `type:"structure"` + + // The IDs of one or more DHCP options sets. + // + // Default: Describes all your DHCP options sets. + DhcpOptionsIds []*string `locationName:"DhcpOptionsId" locationNameList:"DhcpOptionsId" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // dhcp-options-id - The ID of a set of DHCP options. + // + // key - The key for one of the options (for example, domain-name). + // + // value - The value for one of the options. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` +} + +// String returns the string representation +func (s DescribeDhcpOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDhcpOptionsInput) GoString() string { + return s.String() +} + +type DescribeDhcpOptionsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more DHCP options sets. + DhcpOptions []*DhcpOptions `locationName:"dhcpOptionsSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeDhcpOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDhcpOptionsOutput) GoString() string { + return s.String() +} + +type DescribeExportTasksInput struct { + _ struct{} `type:"structure"` + + // One or more export task IDs. + ExportTaskIds []*string `locationName:"exportTaskId" locationNameList:"ExportTaskId" type:"list"` +} + +// String returns the string representation +func (s DescribeExportTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportTasksInput) GoString() string { + return s.String() +} + +type DescribeExportTasksOutput struct { + _ struct{} `type:"structure"` + + // Information about the export tasks. + ExportTasks []*ExportTask `locationName:"exportTaskSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeExportTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportTasksOutput) GoString() string { + return s.String() +} + +type DescribeFlowLogsInput struct { + _ struct{} `type:"structure"` + + // One or more filters. + // + // deliver-log-status - The status of the logs delivery (SUCCESS | FAILED). + // + // flow-log-id - The ID of the flow log. + // + // log-group-name - The name of the log group. + // + // resource-id - The ID of the VPC, subnet, or network interface. + // + // traffic-type - The type of traffic (ACCEPT | REJECT | ALL) + Filter []*Filter `locationNameList:"Filter" type:"list"` + + // One or more flow log IDs. + FlowLogIds []*string `locationName:"FlowLogId" locationNameList:"item" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results can be seen by sending another request with the returned + // NextToken value. This value can be between 5 and 1000; if MaxResults is given + // a value larger than 1000, only 1000 results are returned. You cannot specify + // this parameter and the flow log IDs parameter in the same request. + MaxResults *int64 `type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeFlowLogsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFlowLogsInput) GoString() string { + return s.String() +} + +type DescribeFlowLogsOutput struct { + _ struct{} `type:"structure"` + + // Information about the flow logs. + FlowLogs []*FlowLog `locationName:"flowLogSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeFlowLogsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFlowLogsOutput) GoString() string { + return s.String() +} + +type DescribeHostsInput struct { + _ struct{} `type:"structure"` + + // One or more filters. + // + // instance-type - The instance type size that the Dedicated host is configured + // to support. + // + // auto-placement - Whether auto-placement is enabled or disabled (on | off). + // + // host-reservation-id - The ID of the reservation associated with this host. + // + // client-token - The idempotency token you provided when you launched the + // instance + // + // state- The allocation state of the Dedicated host (available | under-assessment + // | permanent-failure | released | released-permanent-failure). + // + // availability-zone - The Availability Zone of the host. + Filter []*Filter `locationName:"filter" locationNameList:"Filter" type:"list"` + + // The IDs of the Dedicated hosts. The IDs are used for targeted instance launches. + HostIds []*string `locationName:"hostId" locationNameList:"item" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results can be seen by sending another request with the returned + // nextToken value. This value can be between 5 and 500; if maxResults is given + // a larger value than 500, you will receive an error. You cannot specify this + // parameter and the host IDs parameter in the same request. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeHostsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHostsInput) GoString() string { + return s.String() +} + +type DescribeHostsOutput struct { + _ struct{} `type:"structure"` + + // Information about the Dedicated hosts. + Hosts []*Host `locationName:"hostSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeHostsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHostsOutput) GoString() string { + return s.String() +} + +type DescribeIdFormatInput struct { + _ struct{} `type:"structure"` + + // The type of resource. + Resource *string `type:"string"` +} + +// String returns the string representation +func (s DescribeIdFormatInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIdFormatInput) GoString() string { + return s.String() +} + +type DescribeIdFormatOutput struct { + _ struct{} `type:"structure"` + + // Information about the ID format for the resource. + Statuses []*IdFormat `locationName:"statusSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeIdFormatOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIdFormatOutput) GoString() string { + return s.String() +} + +type DescribeImageAttributeInput struct { + _ struct{} `type:"structure"` + + // The AMI attribute. + // + // Note: Depending on your account privileges, the blockDeviceMapping attribute + // may return a Client.AuthFailure error. If this happens, use DescribeImages + // to get information about the block device mapping for the AMI. + Attribute *string `type:"string" required:"true" enum:"ImageAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the AMI. + ImageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeImageAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImageAttributeInput) GoString() string { + return s.String() +} + +// Describes an image attribute. +type DescribeImageAttributeOutput struct { + _ struct{} `type:"structure"` + + // One or more block device mapping entries. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // A description for the AMI. + Description *AttributeValue `locationName:"description" type:"structure"` + + // The ID of the AMI. + ImageId *string `locationName:"imageId" type:"string"` + + // The kernel ID. + KernelId *AttributeValue `locationName:"kernel" type:"structure"` + + // One or more launch permissions. + LaunchPermissions []*LaunchPermission `locationName:"launchPermission" locationNameList:"item" type:"list"` + + // One or more product codes. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // The RAM disk ID. + RamdiskId *AttributeValue `locationName:"ramdisk" type:"structure"` + + // The value to use for a resource attribute. + SriovNetSupport *AttributeValue `locationName:"sriovNetSupport" type:"structure"` +} + +// String returns the string representation +func (s DescribeImageAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImageAttributeOutput) GoString() string { + return s.String() +} + +type DescribeImagesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Scopes the images by users with explicit launch permissions. Specify an AWS + // account ID, self (the sender of the request), or all (public AMIs). + ExecutableUsers []*string `locationName:"ExecutableBy" locationNameList:"ExecutableBy" type:"list"` + + // One or more filters. + // + // architecture - The image architecture (i386 | x86_64). + // + // block-device-mapping.delete-on-termination - A Boolean value that indicates + // whether the Amazon EBS volume is deleted on instance termination. + // + // block-device-mapping.device-name - The device name for the EBS volume + // (for example, /dev/sdh). + // + // block-device-mapping.snapshot-id - The ID of the snapshot used for the + // EBS volume. + // + // block-device-mapping.volume-size - The volume size of the EBS volume, + // in GiB. + // + // block-device-mapping.volume-type - The volume type of the EBS volume (gp2 + // | standard | io1). + // + // description - The description of the image (provided during image creation). + // + // hypervisor - The hypervisor type (ovm | xen). + // + // image-id - The ID of the image. + // + // image-type - The image type (machine | kernel | ramdisk). + // + // is-public - A Boolean that indicates whether the image is public. + // + // kernel-id - The kernel ID. + // + // manifest-location - The location of the image manifest. + // + // name - The name of the AMI (provided during image creation). + // + // owner-alias - The AWS account alias (for example, amazon). + // + // owner-id - The AWS account ID of the image owner. + // + // platform - The platform. To only list Windows-based AMIs, use windows. + // + // product-code - The product code. + // + // product-code.type - The type of the product code (devpay | marketplace). + // + // ramdisk-id - The RAM disk ID. + // + // root-device-name - The name of the root device volume (for example, /dev/sda1). + // + // root-device-type - The type of the root device volume (ebs | instance-store). + // + // state - The state of the image (available | pending | failed). + // + // state-reason-code - The reason code for the state change. + // + // state-reason-message - The message for the state change. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // virtualization-type - The virtualization type (paravirtual | hvm). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more image IDs. + // + // Default: Describes all images available to you. + ImageIds []*string `locationName:"ImageId" locationNameList:"ImageId" type:"list"` + + // Filters the images by the owner. Specify an AWS account ID, amazon (owner + // is Amazon), aws-marketplace (owner is AWS Marketplace), self (owner is the + // sender of the request). Omitting this option returns all images for which + // you have launch permissions, regardless of ownership. + Owners []*string `locationName:"Owner" locationNameList:"Owner" type:"list"` +} + +// String returns the string representation +func (s DescribeImagesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImagesInput) GoString() string { + return s.String() +} + +type DescribeImagesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more images. + Images []*Image `locationName:"imagesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeImagesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImagesOutput) GoString() string { + return s.String() +} + +type DescribeImportImageTasksInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // A list of import image task IDs. + ImportTaskIds []*string `locationName:"ImportTaskId" locationNameList:"ImportTaskId" type:"list"` + + // The maximum number of results to return in a single request. + MaxResults *int64 `type:"integer"` + + // A token that indicates the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeImportImageTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImportImageTasksInput) GoString() string { + return s.String() +} + +type DescribeImportImageTasksOutput struct { + _ struct{} `type:"structure"` + + // A list of zero or more import image tasks that are currently active or were + // completed or canceled in the previous 7 days. + ImportImageTasks []*ImportImageTask `locationName:"importImageTaskSet" locationNameList:"item" type:"list"` + + // The token to use to get the next page of results. This value is null when + // there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeImportImageTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImportImageTasksOutput) GoString() string { + return s.String() +} + +type DescribeImportSnapshotTasksInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // A list of import snapshot task IDs. + ImportTaskIds []*string `locationName:"ImportTaskId" locationNameList:"ImportTaskId" type:"list"` + + // The maximum number of results to return in a single request. + MaxResults *int64 `type:"integer"` + + // A token that indicates the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeImportSnapshotTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImportSnapshotTasksInput) GoString() string { + return s.String() +} + +type DescribeImportSnapshotTasksOutput struct { + _ struct{} `type:"structure"` + + // A list of zero or more import snapshot tasks that are currently active or + // were completed or canceled in the previous 7 days. + ImportSnapshotTasks []*ImportSnapshotTask `locationName:"importSnapshotTaskSet" locationNameList:"item" type:"list"` + + // The token to use to get the next page of results. This value is null when + // there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeImportSnapshotTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImportSnapshotTasksOutput) GoString() string { + return s.String() +} + +type DescribeInstanceAttributeInput struct { + _ struct{} `type:"structure"` + + // The instance attribute. + Attribute *string `locationName:"attribute" type:"string" required:"true" enum:"InstanceAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeInstanceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceAttributeInput) GoString() string { + return s.String() +} + +// Describes an instance attribute. +type DescribeInstanceAttributeOutput struct { + _ struct{} `type:"structure"` + + // The block device mapping of the instance. + BlockDeviceMappings []*InstanceBlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // If the value is true, you can't terminate the instance through the Amazon + // EC2 console, CLI, or API; otherwise, you can. + DisableApiTermination *AttributeBooleanValue `locationName:"disableApiTermination" type:"structure"` + + // Indicates whether the instance is optimized for EBS I/O. + EbsOptimized *AttributeBooleanValue `locationName:"ebsOptimized" type:"structure"` + + // The security groups associated with the instance. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // Indicates whether an instance stops or terminates when you initiate shutdown + // from the instance (using the operating system command for system shutdown). + InstanceInitiatedShutdownBehavior *AttributeValue `locationName:"instanceInitiatedShutdownBehavior" type:"structure"` + + // The instance type. + InstanceType *AttributeValue `locationName:"instanceType" type:"structure"` + + // The kernel ID. + KernelId *AttributeValue `locationName:"kernel" type:"structure"` + + // A list of product codes. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // The RAM disk ID. + RamdiskId *AttributeValue `locationName:"ramdisk" type:"structure"` + + // The name of the root device (for example, /dev/sda1 or /dev/xvda). + RootDeviceName *AttributeValue `locationName:"rootDeviceName" type:"structure"` + + // Indicates whether source/destination checking is enabled. A value of true + // means checking is enabled, and false means checking is disabled. This value + // must be false for a NAT instance to perform NAT. + SourceDestCheck *AttributeBooleanValue `locationName:"sourceDestCheck" type:"structure"` + + // The value to use for a resource attribute. + SriovNetSupport *AttributeValue `locationName:"sriovNetSupport" type:"structure"` + + // The Base64-encoded MIME user data. + UserData *AttributeValue `locationName:"userData" type:"structure"` +} + +// String returns the string representation +func (s DescribeInstanceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceAttributeOutput) GoString() string { + return s.String() +} + +type DescribeInstanceStatusInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // availability-zone - The Availability Zone of the instance. + // + // event.code - The code for the scheduled event (instance-reboot | system-reboot + // | system-maintenance | instance-retirement | instance-stop). + // + // event.description - A description of the event. + // + // event.not-after - The latest end time for the scheduled event (for example, + // 2014-09-15T17:15:20.000Z). + // + // event.not-before - The earliest start time for the scheduled event (for + // example, 2014-09-15T17:15:20.000Z). + // + // instance-state-code - The code for the instance state, as a 16-bit unsigned + // integer. The high byte is an opaque internal value and should be ignored. + // The low byte is set based on the state represented. The valid values are + // 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), + // and 80 (stopped). + // + // instance-state-name - The state of the instance (pending | running | shutting-down + // | terminated | stopping | stopped). + // + // instance-status.reachability - Filters on instance status where the name + // is reachability (passed | failed | initializing | insufficient-data). + // + // instance-status.status - The status of the instance (ok | impaired | initializing + // | insufficient-data | not-applicable). + // + // system-status.reachability - Filters on system status where the name is + // reachability (passed | failed | initializing | insufficient-data). + // + // system-status.status - The system status of the instance (ok | impaired + // | initializing | insufficient-data | not-applicable). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // When true, includes the health status for all instances. When false, includes + // the health status for running instances only. + // + // Default: false + IncludeAllInstances *bool `locationName:"includeAllInstances" type:"boolean"` + + // One or more instance IDs. + // + // Default: Describes all your instances. + // + // Constraints: Maximum 100 explicitly specified instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results of the initial request can be seen by sending another + // request with the returned NextToken value. This value can be between 5 and + // 1000; if MaxResults is given a value larger than 1000, only 1000 results + // are returned. You cannot specify this parameter and the instance IDs parameter + // in the same request. + MaxResults *int64 `type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstanceStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceStatusInput) GoString() string { + return s.String() +} + +type DescribeInstanceStatusOutput struct { + _ struct{} `type:"structure"` + + // One or more instance status descriptions. + InstanceStatuses []*InstanceStatus `locationName:"instanceStatusSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeInstanceStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceStatusOutput) GoString() string { + return s.String() +} + +type DescribeInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // affinity - The affinity setting for an instance running on a Dedicated + // host (default | host). + // + // architecture - The instance architecture (i386 | x86_64). + // + // availability-zone - The Availability Zone of the instance. + // + // block-device-mapping.attach-time - The attach time for an EBS volume mapped + // to the instance, for example, 2010-09-15T17:15:20.000Z. + // + // block-device-mapping.delete-on-termination - A Boolean that indicates + // whether the EBS volume is deleted on instance termination. + // + // block-device-mapping.device-name - The device name for the EBS volume + // (for example, /dev/sdh or xvdh). + // + // block-device-mapping.status - The status for the EBS volume (attaching + // | attached | detaching | detached). + // + // block-device-mapping.volume-id - The volume ID of the EBS volume. + // + // client-token - The idempotency token you provided when you launched the + // instance. + // + // dns-name - The public DNS name of the instance. + // + // group-id - The ID of the security group for the instance. EC2-Classic + // only. + // + // group-name - The name of the security group for the instance. EC2-Classic + // only. + // + // host-Id - The ID of the Dedicated host on which the instance is running, + // if applicable. + // + // hypervisor - The hypervisor type of the instance (ovm | xen). + // + // iam-instance-profile.arn - The instance profile associated with the instance. + // Specified as an ARN. + // + // image-id - The ID of the image used to launch the instance. + // + // instance-id - The ID of the instance. + // + // instance-lifecycle - Indicates whether this is a Spot Instance (spot). + // + // instance-state-code - The state of the instance, as a 16-bit unsigned + // integer. The high byte is an opaque internal value and should be ignored. + // The low byte is set based on the state represented. The valid values are: + // 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), + // and 80 (stopped). + // + // instance-state-name - The state of the instance (pending | running | shutting-down + // | terminated | stopping | stopped). + // + // instance-type - The type of instance (for example, t2.micro). + // + // instance.group-id - The ID of the security group for the instance. + // + // instance.group-name - The name of the security group for the instance. + // + // ip-address - The public IP address of the instance. + // + // kernel-id - The kernel ID. + // + // key-name - The name of the key pair used when the instance was launched. + // + // launch-index - When launching multiple instances, this is the index for + // the instance in the launch group (for example, 0, 1, 2, and so on). + // + // launch-time - The time when the instance was launched. + // + // monitoring-state - Indicates whether monitoring is enabled for the instance + // (disabled | enabled). + // + // owner-id - The AWS account ID of the instance owner. + // + // placement-group-name - The name of the placement group for the instance. + // + // platform - The platform. Use windows if you have Windows instances; otherwise, + // leave blank. + // + // private-dns-name - The private DNS name of the instance. + // + // private-ip-address - The private IP address of the instance. + // + // product-code - The product code associated with the AMI used to launch + // the instance. + // + // product-code.type - The type of product code (devpay | marketplace). + // + // ramdisk-id - The RAM disk ID. + // + // reason - The reason for the current state of the instance (for example, + // shows "User Initiated [date]" when you stop or terminate the instance). Similar + // to the state-reason-code filter. + // + // requester-id - The ID of the entity that launched the instance on your + // behalf (for example, AWS Management Console, Auto Scaling, and so on). + // + // reservation-id - The ID of the instance's reservation. A reservation ID + // is created any time you launch an instance. A reservation ID has a one-to-one + // relationship with an instance launch request, but can be associated with + // more than one instance if you launch multiple instances using the same launch + // request. For example, if you launch one instance, you'll get one reservation + // ID. If you launch ten instances using the same launch request, you'll also + // get one reservation ID. + // + // root-device-name - The name of the root device for the instance (for example, + // /dev/sda1 or /dev/xvda). + // + // root-device-type - The type of root device that the instance uses (ebs + // | instance-store). + // + // source-dest-check - Indicates whether the instance performs source/destination + // checking. A value of true means that checking is enabled, and false means + // checking is disabled. The value must be false for the instance to perform + // network address translation (NAT) in your VPC. + // + // spot-instance-request-id - The ID of the Spot instance request. + // + // state-reason-code - The reason code for the state change. + // + // state-reason-message - A message that describes the state change. + // + // subnet-id - The ID of the subnet for the instance. + // + // tag:key=value - The key/value combination of a tag assigned to the resource, + // where tag:key is the tag's key. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // tenancy - The tenancy of an instance (dedicated | default | host). + // + // virtualization-type - The virtualization type of the instance (paravirtual + // | hvm). + // + // vpc-id - The ID of the VPC that the instance is running in. + // + // network-interface.description - The description of the network interface. + // + // network-interface.subnet-id - The ID of the subnet for the network interface. + // + // network-interface.vpc-id - The ID of the VPC for the network interface. + // + // network-interface.network-interface-id - The ID of the network interface. + // + // network-interface.owner-id - The ID of the owner of the network interface. + // + // network-interface.availability-zone - The Availability Zone for the network + // interface. + // + // network-interface.requester-id - The requester ID for the network interface. + // + // network-interface.requester-managed - Indicates whether the network interface + // is being managed by AWS. + // + // network-interface.status - The status of the network interface (available) + // | in-use). + // + // network-interface.mac-address - The MAC address of the network interface. + // + // network-interface.private-dns-name - The private DNS name of the network + // interface. + // + // network-interface.source-dest-check - Whether the network interface performs + // source/destination checking. A value of true means checking is enabled, and + // false means checking is disabled. The value must be false for the network + // interface to perform network address translation (NAT) in your VPC. + // + // network-interface.group-id - The ID of a security group associated with + // the network interface. + // + // network-interface.group-name - The name of a security group associated + // with the network interface. + // + // network-interface.attachment.attachment-id - The ID of the interface attachment. + // + // network-interface.attachment.instance-id - The ID of the instance to which + // the network interface is attached. + // + // network-interface.attachment.instance-owner-id - The owner ID of the instance + // to which the network interface is attached. + // + // network-interface.addresses.private-ip-address - The private IP address + // associated with the network interface. + // + // network-interface.attachment.device-index - The device index to which + // the network interface is attached. + // + // network-interface.attachment.status - The status of the attachment (attaching + // | attached | detaching | detached). + // + // network-interface.attachment.attach-time - The time that the network interface + // was attached to an instance. + // + // network-interface.attachment.delete-on-termination - Specifies whether + // the attachment is deleted when an instance is terminated. + // + // network-interface.addresses.primary - Specifies whether the IP address + // of the network interface is the primary private IP address. + // + // network-interface.addresses.association.public-ip - The ID of the association + // of an Elastic IP address with a network interface. + // + // network-interface.addresses.association.ip-owner-id - The owner ID of + // the private IP address associated with the network interface. + // + // association.public-ip - The address of the Elastic IP address bound to + // the network interface. + // + // association.ip-owner-id - The owner of the Elastic IP address associated + // with the network interface. + // + // association.allocation-id - The allocation ID returned when you allocated + // the Elastic IP address for your network interface. + // + // association.association-id - The association ID returned when the network + // interface was associated with an IP address. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more instance IDs. + // + // Default: Describes all your instances. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results of the initial request can be seen by sending another + // request with the returned NextToken value. This value can be between 5 and + // 1000; if MaxResults is given a value larger than 1000, only 1000 results + // are returned. You cannot specify this parameter and the instance IDs parameter + // in the same request. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token to request the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstancesInput) GoString() string { + return s.String() +} + +type DescribeInstancesOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Zero or more reservations. + Reservations []*Reservation `locationName:"reservationSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstancesOutput) GoString() string { + return s.String() +} + +type DescribeInternetGatewaysInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // attachment.state - The current state of the attachment between the gateway + // and the VPC (available). Present only if a VPC is attached. + // + // attachment.vpc-id - The ID of an attached VPC. + // + // internet-gateway-id - The ID of the Internet gateway. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more Internet gateway IDs. + // + // Default: Describes all your Internet gateways. + InternetGatewayIds []*string `locationName:"internetGatewayId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeInternetGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInternetGatewaysInput) GoString() string { + return s.String() +} + +type DescribeInternetGatewaysOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more Internet gateways. + InternetGateways []*InternetGateway `locationName:"internetGatewaySet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeInternetGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInternetGatewaysOutput) GoString() string { + return s.String() +} + +type DescribeKeyPairsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // fingerprint - The fingerprint of the key pair. + // + // key-name - The name of the key pair. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more key pair names. + // + // Default: Describes all your key pairs. + KeyNames []*string `locationName:"KeyName" locationNameList:"KeyName" type:"list"` +} + +// String returns the string representation +func (s DescribeKeyPairsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeKeyPairsInput) GoString() string { + return s.String() +} + +type DescribeKeyPairsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more key pairs. + KeyPairs []*KeyPairInfo `locationName:"keySet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeKeyPairsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeKeyPairsOutput) GoString() string { + return s.String() +} + +type DescribeMovingAddressesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // moving-status - The status of the Elastic IP address (MovingToVpc | RestoringToClassic). + Filters []*Filter `locationName:"filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results of the initial request can be seen by sending another + // request with the returned NextToken value. This value can be between 5 and + // 1000; if MaxResults is given a value outside of this range, an error is returned. + // + // Default: If no value is provided, the default is 1000. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token to use to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // One or more Elastic IP addresses. + PublicIps []*string `locationName:"publicIp" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeMovingAddressesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMovingAddressesInput) GoString() string { + return s.String() +} + +type DescribeMovingAddressesOutput struct { + _ struct{} `type:"structure"` + + // The status for each Elastic IP address. + MovingAddressStatuses []*MovingAddressStatus `locationName:"movingAddressStatusSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeMovingAddressesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMovingAddressesOutput) GoString() string { + return s.String() +} + +type DescribeNatGatewaysInput struct { + _ struct{} `type:"structure"` + + // One or more filters. + // + // nat-gateway-id - The ID of the NAT gateway. + // + // state - The state of the NAT gateway (pending | failed | available | deleting + // | deleted). + // + // subnet-id - The ID of the subnet in which the NAT gateway resides. + // + // vpc-id - The ID of the VPC in which the NAT gateway resides. + Filter []*Filter `locationNameList:"Filter" type:"list"` + + // The maximum number of items to return for this request. The request returns + // a token that you can specify in a subsequent call to get the next set of + // results. + // + // Constraint: If the value specified is greater than 1000, we return only + // 1000 items. + MaxResults *int64 `type:"integer"` + + // One or more NAT gateway IDs. + NatGatewayIds []*string `locationName:"NatGatewayId" locationNameList:"item" type:"list"` + + // The token to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeNatGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNatGatewaysInput) GoString() string { + return s.String() +} + +type DescribeNatGatewaysOutput struct { + _ struct{} `type:"structure"` + + // Information about the NAT gateways. + NatGateways []*NatGateway `locationName:"natGatewaySet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeNatGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNatGatewaysOutput) GoString() string { + return s.String() +} + +type DescribeNetworkAclsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // association.association-id - The ID of an association ID for the ACL. + // + // association.network-acl-id - The ID of the network ACL involved in the + // association. + // + // association.subnet-id - The ID of the subnet involved in the association. + // + // default - Indicates whether the ACL is the default network ACL for the + // VPC. + // + // entry.cidr - The CIDR range specified in the entry. + // + // entry.egress - Indicates whether the entry applies to egress traffic. + // + // entry.icmp.code - The ICMP code specified in the entry, if any. + // + // entry.icmp.type - The ICMP type specified in the entry, if any. + // + // entry.port-range.from - The start of the port range specified in the entry. + // + // entry.port-range.to - The end of the port range specified in the entry. + // + // entry.protocol - The protocol specified in the entry (tcp | udp | icmp + // or a protocol number). + // + // entry.rule-action - Allows or denies the matching traffic (allow | deny). + // + // entry.rule-number - The number of an entry (in other words, rule) in the + // ACL's set of entries. + // + // network-acl-id - The ID of the network ACL. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // vpc-id - The ID of the VPC for the network ACL. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more network ACL IDs. + // + // Default: Describes all your network ACLs. + NetworkAclIds []*string `locationName:"NetworkAclId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeNetworkAclsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkAclsInput) GoString() string { + return s.String() +} + +type DescribeNetworkAclsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more network ACLs. + NetworkAcls []*NetworkAcl `locationName:"networkAclSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeNetworkAclsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkAclsOutput) GoString() string { + return s.String() +} + +type DescribeNetworkInterfaceAttributeInput struct { + _ struct{} `type:"structure"` + + // The attribute of the network interface. + Attribute *string `locationName:"attribute" type:"string" enum:"NetworkInterfaceAttribute"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeNetworkInterfaceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkInterfaceAttributeInput) GoString() string { + return s.String() +} + +type DescribeNetworkInterfaceAttributeOutput struct { + _ struct{} `type:"structure"` + + // The attachment (if any) of the network interface. + Attachment *NetworkInterfaceAttachment `locationName:"attachment" type:"structure"` + + // The description of the network interface. + Description *AttributeValue `locationName:"description" type:"structure"` + + // The security groups associated with the network interface. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // Indicates whether source/destination checking is enabled. + SourceDestCheck *AttributeBooleanValue `locationName:"sourceDestCheck" type:"structure"` +} + +// String returns the string representation +func (s DescribeNetworkInterfaceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkInterfaceAttributeOutput) GoString() string { + return s.String() +} + +type DescribeNetworkInterfacesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // addresses.private-ip-address - The private IP addresses associated with + // the network interface. + // + // addresses.primary - Whether the private IP address is the primary IP address + // associated with the network interface. + // + // addresses.association.public-ip - The association ID returned when the + // network interface was associated with the Elastic IP address. + // + // addresses.association.owner-id - The owner ID of the addresses associated + // with the network interface. + // + // association.association-id - The association ID returned when the network + // interface was associated with an IP address. + // + // association.allocation-id - The allocation ID returned when you allocated + // the Elastic IP address for your network interface. + // + // association.ip-owner-id - The owner of the Elastic IP address associated + // with the network interface. + // + // association.public-ip - The address of the Elastic IP address bound to + // the network interface. + // + // association.public-dns-name - The public DNS name for the network interface. + // + // attachment.attachment-id - The ID of the interface attachment. + // + // attachment.attach.time - The time that the network interface was attached + // to an instance. + // + // attachment.delete-on-termination - Indicates whether the attachment is + // deleted when an instance is terminated. + // + // attachment.device-index - The device index to which the network interface + // is attached. + // + // attachment.instance-id - The ID of the instance to which the network interface + // is attached. + // + // attachment.instance-owner-id - The owner ID of the instance to which the + // network interface is attached. + // + // attachment.nat-gateway-id - The ID of the NAT gateway to which the network + // interface is attached. + // + // attachment.status - The status of the attachment (attaching | attached + // | detaching | detached). + // + // availability-zone - The Availability Zone of the network interface. + // + // description - The description of the network interface. + // + // group-id - The ID of a security group associated with the network interface. + // + // group-name - The name of a security group associated with the network + // interface. + // + // mac-address - The MAC address of the network interface. + // + // network-interface-id - The ID of the network interface. + // + // owner-id - The AWS account ID of the network interface owner. + // + // private-ip-address - The private IP address or addresses of the network + // interface. + // + // private-dns-name - The private DNS name of the network interface. + // + // requester-id - The ID of the entity that launched the instance on your + // behalf (for example, AWS Management Console, Auto Scaling, and so on). + // + // requester-managed - Indicates whether the network interface is being managed + // by an AWS service (for example, AWS Management Console, Auto Scaling, and + // so on). + // + // source-desk-check - Indicates whether the network interface performs source/destination + // checking. A value of true means checking is enabled, and false means checking + // is disabled. The value must be false for the network interface to perform + // network address translation (NAT) in your VPC. + // + // status - The status of the network interface. If the network interface + // is not attached to an instance, the status is available; if a network interface + // is attached to an instance the status is in-use. + // + // subnet-id - The ID of the subnet for the network interface. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // vpc-id - The ID of the VPC for the network interface. + Filters []*Filter `locationName:"filter" locationNameList:"Filter" type:"list"` + + // One or more network interface IDs. + // + // Default: Describes all your network interfaces. + NetworkInterfaceIds []*string `locationName:"NetworkInterfaceId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeNetworkInterfacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkInterfacesInput) GoString() string { + return s.String() +} + +type DescribeNetworkInterfacesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more network interfaces. + NetworkInterfaces []*NetworkInterface `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeNetworkInterfacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkInterfacesOutput) GoString() string { + return s.String() +} + +type DescribePlacementGroupsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // group-name - The name of the placement group. + // + // state - The state of the placement group (pending | available | deleting + // | deleted). + // + // strategy - The strategy of the placement group (cluster). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more placement group names. + // + // Default: Describes all your placement groups, or only those otherwise specified. + GroupNames []*string `locationName:"groupName" type:"list"` +} + +// String returns the string representation +func (s DescribePlacementGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePlacementGroupsInput) GoString() string { + return s.String() +} + +type DescribePlacementGroupsOutput struct { + _ struct{} `type:"structure"` + + // One or more placement groups. + PlacementGroups []*PlacementGroup `locationName:"placementGroupSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribePlacementGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePlacementGroupsOutput) GoString() string { + return s.String() +} + +type DescribePrefixListsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // prefix-list-id: The ID of a prefix list. + // + // prefix-list-name: The name of a prefix list. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of items to return for this request. The request returns + // a token that you can specify in a subsequent call to get the next set of + // results. + // + // Constraint: If the value specified is greater than 1000, we return only + // 1000 items. + MaxResults *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a prior call.) + NextToken *string `type:"string"` + + // One or more prefix list IDs. + PrefixListIds []*string `locationName:"PrefixListId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribePrefixListsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePrefixListsInput) GoString() string { + return s.String() +} + +type DescribePrefixListsOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `locationName:"nextToken" type:"string"` + + // All available prefix lists. + PrefixLists []*PrefixList `locationName:"prefixListSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribePrefixListsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePrefixListsOutput) GoString() string { + return s.String() +} + +type DescribeRegionsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // endpoint - The endpoint of the region (for example, ec2.us-east-1.amazonaws.com). + // + // region-name - The name of the region (for example, us-east-1). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The names of one or more regions. + RegionNames []*string `locationName:"RegionName" locationNameList:"RegionName" type:"list"` +} + +// String returns the string representation +func (s DescribeRegionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRegionsInput) GoString() string { + return s.String() +} + +type DescribeRegionsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more regions. + Regions []*Region `locationName:"regionInfo" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeRegionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRegionsOutput) GoString() string { + return s.String() +} + +type DescribeReservedInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // availability-zone - The Availability Zone where the Reserved Instance + // can be used. + // + // duration - The duration of the Reserved Instance (one year or three years), + // in seconds (31536000 | 94608000). + // + // end - The time when the Reserved Instance expires (for example, 2015-08-07T11:54:42.000Z). + // + // fixed-price - The purchase price of the Reserved Instance (for example, + // 9800.0). + // + // instance-type - The instance type that is covered by the reservation. + // + // product-description - The Reserved Instance product platform description. + // Instances that include (Amazon VPC) in the product platform description will + // only be displayed to EC2-Classic account holders and are for use with Amazon + // VPC (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | SUSE Linux (Amazon + // VPC) | Red Hat Enterprise Linux | Red Hat Enterprise Linux (Amazon VPC) | + // Windows | Windows (Amazon VPC) | Windows with SQL Server Standard | Windows + // with SQL Server Standard (Amazon VPC) | Windows with SQL Server Web | Windows + // with SQL Server Web (Amazon VPC) | Windows with SQL Server Enterprise | Windows + // with SQL Server Enterprise (Amazon VPC)). + // + // reserved-instances-id - The ID of the Reserved Instance. + // + // start - The time at which the Reserved Instance purchase request was placed + // (for example, 2014-08-07T11:54:42.000Z). + // + // state - The state of the Reserved Instance (payment-pending | active | + // payment-failed | retired). + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // usage-price - The usage price of the Reserved Instance, per hour (for + // example, 0.84). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The Reserved Instance offering type. If you are using tools that predate + // the 2011-11-01 API version, you only have access to the Medium Utilization + // Reserved Instance offering type. + OfferingType *string `locationName:"offeringType" type:"string" enum:"OfferingTypeValues"` + + // One or more Reserved Instance IDs. + // + // Default: Describes all your Reserved Instances, or only those otherwise + // specified. + ReservedInstancesIds []*string `locationName:"ReservedInstancesId" locationNameList:"ReservedInstancesId" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesInput) GoString() string { + return s.String() +} + +type DescribeReservedInstancesListingsInput struct { + _ struct{} `type:"structure"` + + // One or more filters. + // + // reserved-instances-id - The ID of the Reserved Instances. + // + // reserved-instances-listing-id - The ID of the Reserved Instances listing. + // + // status - The status of the Reserved Instance listing (pending | active + // | cancelled | closed). + // + // status-message - The reason for the status. + Filters []*Filter `locationName:"filters" locationNameList:"Filter" type:"list"` + + // One or more Reserved Instance IDs. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` + + // One or more Reserved Instance listing IDs. + ReservedInstancesListingId *string `locationName:"reservedInstancesListingId" type:"string"` +} + +// String returns the string representation +func (s DescribeReservedInstancesListingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesListingsInput) GoString() string { + return s.String() +} + +type DescribeReservedInstancesListingsOutput struct { + _ struct{} `type:"structure"` + + // Information about the Reserved Instance listing. + ReservedInstancesListings []*ReservedInstancesListing `locationName:"reservedInstancesListingsSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedInstancesListingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesListingsOutput) GoString() string { + return s.String() +} + +type DescribeReservedInstancesModificationsInput struct { + _ struct{} `type:"structure"` + + // One or more filters. + // + // client-token - The idempotency token for the modification request. + // + // create-date - The time when the modification request was created. + // + // effective-date - The time when the modification becomes effective. + // + // modification-result.reserved-instances-id - The ID for the Reserved Instances + // created as part of the modification request. This ID is only available when + // the status of the modification is fulfilled. + // + // modification-result.target-configuration.availability-zone - The Availability + // Zone for the new Reserved Instances. + // + // modification-result.target-configuration.instance-count - The number + // of new Reserved Instances. + // + // modification-result.target-configuration.instance-type - The instance + // type of the new Reserved Instances. + // + // modification-result.target-configuration.platform - The network platform + // of the new Reserved Instances (EC2-Classic | EC2-VPC). + // + // reserved-instances-id - The ID of the Reserved Instances modified. + // + // reserved-instances-modification-id - The ID of the modification request. + // + // status - The status of the Reserved Instances modification request (processing + // | fulfilled | failed). + // + // status-message - The reason for the status. + // + // update-date - The time when the modification request was last updated. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The token to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // IDs for the submitted modification request. + ReservedInstancesModificationIds []*string `locationName:"ReservedInstancesModificationId" locationNameList:"ReservedInstancesModificationId" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedInstancesModificationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesModificationsInput) GoString() string { + return s.String() +} + +type DescribeReservedInstancesModificationsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The Reserved Instance modification information. + ReservedInstancesModifications []*ReservedInstancesModification `locationName:"reservedInstancesModificationsSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedInstancesModificationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesModificationsOutput) GoString() string { + return s.String() +} + +type DescribeReservedInstancesOfferingsInput struct { + _ struct{} `type:"structure"` + + // The Availability Zone in which the Reserved Instance can be used. + AvailabilityZone *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // availability-zone - The Availability Zone where the Reserved Instance + // can be used. + // + // duration - The duration of the Reserved Instance (for example, one year + // or three years), in seconds (31536000 | 94608000). + // + // fixed-price - The purchase price of the Reserved Instance (for example, + // 9800.0). + // + // instance-type - The instance type that is covered by the reservation. + // + // marketplace - Set to true to show only Reserved Instance Marketplace offerings. + // When this filter is not used, which is the default behavior, all offerings + // from both AWS and the Reserved Instance Marketplace are listed. + // + // product-description - The Reserved Instance product platform description. + // Instances that include (Amazon VPC) in the product platform description will + // only be displayed to EC2-Classic account holders and are for use with Amazon + // VPC. (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | SUSE Linux (Amazon + // VPC) | Red Hat Enterprise Linux | Red Hat Enterprise Linux (Amazon VPC) | + // Windows | Windows (Amazon VPC) | Windows with SQL Server Standard | Windows + // with SQL Server Standard (Amazon VPC) | Windows with SQL Server Web | Windows + // with SQL Server Web (Amazon VPC) | Windows with SQL Server Enterprise | Windows + // with SQL Server Enterprise (Amazon VPC)) + // + // reserved-instances-offering-id - The Reserved Instances offering ID. + // + // usage-price - The usage price of the Reserved Instance, per hour (for + // example, 0.84). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // Include Reserved Instance Marketplace offerings in the response. + IncludeMarketplace *bool `type:"boolean"` + + // The tenancy of the instances covered by the reservation. A Reserved Instance + // with a tenancy of dedicated is applied to instances that run in a VPC on + // single-tenant hardware (i.e., Dedicated Instances). + // + // Default: default + InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` + + // The instance type that the reservation will cover (for example, m1.small). + // For more information, see Instance Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) + // in the Amazon Elastic Compute Cloud User Guide. + InstanceType *string `type:"string" enum:"InstanceType"` + + // The maximum duration (in seconds) to filter when searching for offerings. + // + // Default: 94608000 (3 years) + MaxDuration *int64 `type:"long"` + + // The maximum number of instances to filter when searching for offerings. + // + // Default: 20 + MaxInstanceCount *int64 `type:"integer"` + + // The maximum number of results to return for the request in a single page. + // The remaining results of the initial request can be seen by sending another + // request with the returned NextToken value. The maximum is 100. + // + // Default: 100 + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The minimum duration (in seconds) to filter when searching for offerings. + // + // Default: 2592000 (1 month) + MinDuration *int64 `type:"long"` + + // The token to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The Reserved Instance offering type. If you are using tools that predate + // the 2011-11-01 API version, you only have access to the Medium Utilization + // Reserved Instance offering type. + OfferingType *string `locationName:"offeringType" type:"string" enum:"OfferingTypeValues"` + + // The Reserved Instance product platform description. Instances that include + // (Amazon VPC) in the description are for use with Amazon VPC. + ProductDescription *string `type:"string" enum:"RIProductDescription"` + + // One or more Reserved Instances offering IDs. + ReservedInstancesOfferingIds []*string `locationName:"ReservedInstancesOfferingId" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedInstancesOfferingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesOfferingsInput) GoString() string { + return s.String() +} + +type DescribeReservedInstancesOfferingsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of Reserved Instances offerings. + ReservedInstancesOfferings []*ReservedInstancesOffering `locationName:"reservedInstancesOfferingsSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedInstancesOfferingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesOfferingsOutput) GoString() string { + return s.String() +} + +type DescribeReservedInstancesOutput struct { + _ struct{} `type:"structure"` + + // A list of Reserved Instances. + ReservedInstances []*ReservedInstances `locationName:"reservedInstancesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesOutput) GoString() string { + return s.String() +} + +type DescribeRouteTablesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // association.route-table-association-id - The ID of an association ID for + // the route table. + // + // association.route-table-id - The ID of the route table involved in the + // association. + // + // association.subnet-id - The ID of the subnet involved in the association. + // + // association.main - Indicates whether the route table is the main route + // table for the VPC (true | false). + // + // route-table-id - The ID of the route table. + // + // route.destination-cidr-block - The CIDR range specified in a route in + // the table. + // + // route.destination-prefix-list-id - The ID (prefix) of the AWS service + // specified in a route in the table. + // + // route.gateway-id - The ID of a gateway specified in a route in the table. + // + // route.instance-id - The ID of an instance specified in a route in the + // table. + // + // route.nat-gateway-id - The ID of a NAT gateway. + // + // route.origin - Describes how the route was created. CreateRouteTable indicates + // that the route was automatically created when the route table was created; + // CreateRoute indicates that the route was manually added to the route table; + // EnableVgwRoutePropagation indicates that the route was propagated by route + // propagation. + // + // route.state - The state of a route in the route table (active | blackhole). + // The blackhole state indicates that the route's target isn't available (for + // example, the specified gateway isn't attached to the VPC, the specified NAT + // instance has been terminated, and so on). + // + // route.vpc-peering-connection-id - The ID of a VPC peering connection specified + // in a route in the table. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // vpc-id - The ID of the VPC for the route table. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more route table IDs. + // + // Default: Describes all your route tables. + RouteTableIds []*string `locationName:"RouteTableId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeRouteTablesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRouteTablesInput) GoString() string { + return s.String() +} + +type DescribeRouteTablesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more route tables. + RouteTables []*RouteTable `locationName:"routeTableSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeRouteTablesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRouteTablesOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeScheduledInstanceAvailability. +type DescribeScheduledInstanceAvailabilityInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // availability-zone - The Availability Zone (for example, us-west-2a). + // + // instance-type - The instance type (for example, c4.large). + // + // network-platform - The network platform (EC2-Classic or EC2-VPC). + // + // platform - The platform (Linux/UNIX or Windows). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The time period for the first schedule to start. + FirstSlotStartTimeRange *SlotDateTimeRangeRequest `type:"structure" required:"true"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. + MaxResults *int64 `type:"integer"` + + // The maximum available duration, in hours. This value must be greater than + // MinSlotDurationInHours and less than 1,720. + MaxSlotDurationInHours *int64 `type:"integer"` + + // The minimum available duration, in hours. The minimum required duration is + // 1,200 hours per year. For example, the minimum daily schedule is 4 hours, + // the minimum weekly schedule is 24 hours, and the minimum monthly schedule + // is 100 hours. + MinSlotDurationInHours *int64 `type:"integer"` + + // The token for the next set of results. + NextToken *string `type:"string"` + + // The schedule recurrence. + Recurrence *ScheduledInstanceRecurrenceRequest `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeScheduledInstanceAvailabilityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScheduledInstanceAvailabilityInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeScheduledInstanceAvailability. +type DescribeScheduledInstanceAvailabilityOutput struct { + _ struct{} `type:"structure"` + + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the available Scheduled Instances. + ScheduledInstanceAvailabilitySet []*ScheduledInstanceAvailability `locationName:"scheduledInstanceAvailabilitySet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeScheduledInstanceAvailabilityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScheduledInstanceAvailabilityOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeScheduledInstances. +type DescribeScheduledInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // availability-zone - The Availability Zone (for example, us-west-2a). + // + // instance-type - The instance type (for example, c4.large). + // + // network-platform - The network platform (EC2-Classic or EC2-VPC). + // + // platform - The platform (Linux/UNIX or Windows). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. + MaxResults *int64 `type:"integer"` + + // The token for the next set of results. + NextToken *string `type:"string"` + + // One or more Scheduled Instance IDs. + ScheduledInstanceIds []*string `locationName:"ScheduledInstanceId" locationNameList:"ScheduledInstanceId" type:"list"` + + // The time period for the first schedule to start. + SlotStartTimeRange *SlotStartTimeRangeRequest `type:"structure"` +} + +// String returns the string representation +func (s DescribeScheduledInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScheduledInstancesInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeScheduledInstances. +type DescribeScheduledInstancesOutput struct { + _ struct{} `type:"structure"` + + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the Scheduled Instances. + ScheduledInstanceSet []*ScheduledInstance `locationName:"scheduledInstanceSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeScheduledInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScheduledInstancesOutput) GoString() string { + return s.String() +} + +type DescribeSecurityGroupsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. If using multiple filters for rules, the results include + // security groups for which any combination of rules - not necessarily a single + // rule - match all filters. + // + // description - The description of the security group. + // + // egress.ip-permission.prefix-list-id - The ID (prefix) of the AWS service + // to which the security group allows access. + // + // group-id - The ID of the security group. + // + // group-name - The name of the security group. + // + // ip-permission.cidr - A CIDR range that has been granted permission. + // + // ip-permission.from-port - The start of port range for the TCP and UDP + // protocols, or an ICMP type number. + // + // ip-permission.group-id - The ID of a security group that has been granted + // permission. + // + // ip-permission.group-name - The name of a security group that has been + // granted permission. + // + // ip-permission.protocol - The IP protocol for the permission (tcp | udp + // | icmp or a protocol number). + // + // ip-permission.to-port - The end of port range for the TCP and UDP protocols, + // or an ICMP code. + // + // ip-permission.user-id - The ID of an AWS account that has been granted + // permission. + // + // owner-id - The AWS account ID of the owner of the security group. + // + // tag-key - The key of a tag assigned to the security group. + // + // tag-value - The value of a tag assigned to the security group. + // + // vpc-id - The ID of the VPC specified when the security group was created. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more security group IDs. Required for security groups in a nondefault + // VPC. + // + // Default: Describes all your security groups. + GroupIds []*string `locationName:"GroupId" locationNameList:"groupId" type:"list"` + + // [EC2-Classic and default VPC only] One or more security group names. You + // can specify either the security group name or the security group ID. For + // security groups in a nondefault VPC, use the group-name filter to describe + // security groups by name. + // + // Default: Describes all your security groups. + GroupNames []*string `locationName:"GroupName" locationNameList:"GroupName" type:"list"` +} + +// String returns the string representation +func (s DescribeSecurityGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSecurityGroupsInput) GoString() string { + return s.String() +} + +type DescribeSecurityGroupsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more security groups. + SecurityGroups []*SecurityGroup `locationName:"securityGroupInfo" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeSecurityGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSecurityGroupsOutput) GoString() string { + return s.String() +} + +type DescribeSnapshotAttributeInput struct { + _ struct{} `type:"structure"` + + // The snapshot attribute you would like to view. + Attribute *string `type:"string" required:"true" enum:"SnapshotAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the EBS snapshot. + SnapshotId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeSnapshotAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotAttributeInput) GoString() string { + return s.String() +} + +type DescribeSnapshotAttributeOutput struct { + _ struct{} `type:"structure"` + + // A list of permissions for creating volumes from the snapshot. + CreateVolumePermissions []*CreateVolumePermission `locationName:"createVolumePermission" locationNameList:"item" type:"list"` + + // A list of product codes. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // The ID of the EBS snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` +} + +// String returns the string representation +func (s DescribeSnapshotAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotAttributeOutput) GoString() string { + return s.String() +} + +type DescribeSnapshotsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // description - A description of the snapshot. + // + // owner-alias - The AWS account alias (for example, amazon) that owns the + // snapshot. + // + // owner-id - The ID of the AWS account that owns the snapshot. + // + // progress - The progress of the snapshot, as a percentage (for example, + // 80%). + // + // snapshot-id - The snapshot ID. + // + // start-time - The time stamp when the snapshot was initiated. + // + // status - The status of the snapshot (pending | completed | error). + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // volume-id - The ID of the volume the snapshot is for. + // + // volume-size - The size of the volume, in GiB. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of snapshot results returned by DescribeSnapshots in paginated + // output. When this parameter is used, DescribeSnapshots only returns MaxResults + // results in a single page along with a NextToken response element. The remaining + // results of the initial request can be seen by sending another DescribeSnapshots + // request with the returned NextToken value. This value can be between 5 and + // 1000; if MaxResults is given a value larger than 1000, only 1000 results + // are returned. If this parameter is not used, then DescribeSnapshots returns + // all results. You cannot specify this parameter and the snapshot IDs parameter + // in the same request. + MaxResults *int64 `type:"integer"` + + // The NextToken value returned from a previous paginated DescribeSnapshots + // request where MaxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the NextToken value. This value is null when there are no more results + // to return. + NextToken *string `type:"string"` + + // Returns the snapshots owned by the specified owner. Multiple owners can be + // specified. + OwnerIds []*string `locationName:"Owner" locationNameList:"Owner" type:"list"` + + // One or more AWS accounts IDs that can create volumes from the snapshot. + RestorableByUserIds []*string `locationName:"RestorableBy" type:"list"` + + // One or more snapshot IDs. + // + // Default: Describes snapshots for which you have launch permissions. + SnapshotIds []*string `locationName:"SnapshotId" locationNameList:"SnapshotId" type:"list"` +} + +// String returns the string representation +func (s DescribeSnapshotsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotsInput) GoString() string { + return s.String() +} + +type DescribeSnapshotsOutput struct { + _ struct{} `type:"structure"` + + // The NextToken value to include in a future DescribeSnapshots request. When + // the results of a DescribeSnapshots request exceed MaxResults, this value + // can be used to retrieve the next page of results. This value is null when + // there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the snapshots. + Snapshots []*Snapshot `locationName:"snapshotSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeSnapshotsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeSpotDatafeedSubscription. +type DescribeSpotDatafeedSubscriptionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s DescribeSpotDatafeedSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotDatafeedSubscriptionInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeSpotDatafeedSubscription. +type DescribeSpotDatafeedSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // The Spot instance data feed subscription. + SpotDatafeedSubscription *SpotDatafeedSubscription `locationName:"spotDatafeedSubscription" type:"structure"` +} + +// String returns the string representation +func (s DescribeSpotDatafeedSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotDatafeedSubscriptionOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeSpotFleetInstances. +type DescribeSpotFleetInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The maximum number of results to return in a single call. Specify a value + // between 1 and 1000. The default value is 1000. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeSpotFleetInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetInstancesInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeSpotFleetInstances. +type DescribeSpotFleetInstancesOutput struct { + _ struct{} `type:"structure"` + + // The running instances. Note that this list is refreshed periodically and + // might be out of date. + ActiveInstances []*ActiveInstance `locationName:"activeInstanceSet" locationNameList:"item" type:"list" required:"true"` + + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeSpotFleetInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetInstancesOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeSpotFleetRequestHistory. +type DescribeSpotFleetRequestHistoryInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The type of events to describe. By default, all events are described. + EventType *string `locationName:"eventType" type:"string" enum:"EventType"` + + // The maximum number of results to return in a single call. Specify a value + // between 1 and 1000. The default value is 1000. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` + + // The starting date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s DescribeSpotFleetRequestHistoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetRequestHistoryInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeSpotFleetRequestHistory. +type DescribeSpotFleetRequestHistoryOutput struct { + _ struct{} `type:"structure"` + + // Information about the events in the history of the Spot fleet request. + HistoryRecords []*HistoryRecord `locationName:"historyRecordSet" locationNameList:"item" type:"list" required:"true"` + + // The last date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // All records up to this time were retrieved. + // + // If nextToken indicates that there are more results, this value is not present. + LastEvaluatedTime *time.Time `locationName:"lastEvaluatedTime" type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` + + // The starting date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s DescribeSpotFleetRequestHistoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetRequestHistoryOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeSpotFleetRequests. +type DescribeSpotFleetRequestsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The maximum number of results to return in a single call. Specify a value + // between 1 and 1000. The default value is 1000. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The IDs of the Spot fleet requests. + SpotFleetRequestIds []*string `locationName:"spotFleetRequestId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeSpotFleetRequestsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetRequestsInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeSpotFleetRequests. +type DescribeSpotFleetRequestsOutput struct { + _ struct{} `type:"structure"` + + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the configuration of your Spot fleet. + SpotFleetRequestConfigs []*SpotFleetRequestConfig `locationName:"spotFleetRequestConfigSet" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeSpotFleetRequestsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetRequestsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeSpotInstanceRequests. +type DescribeSpotInstanceRequestsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // availability-zone-group - The Availability Zone group. + // + // create-time - The time stamp when the Spot instance request was created. + // + // fault-code - The fault code related to the request. + // + // fault-message - The fault message related to the request. + // + // instance-id - The ID of the instance that fulfilled the request. + // + // launch-group - The Spot instance launch group. + // + // launch.block-device-mapping.delete-on-termination - Indicates whether + // the Amazon EBS volume is deleted on instance termination. + // + // launch.block-device-mapping.device-name - The device name for the Amazon + // EBS volume (for example, /dev/sdh). + // + // launch.block-device-mapping.snapshot-id - The ID of the snapshot used + // for the Amazon EBS volume. + // + // launch.block-device-mapping.volume-size - The size of the Amazon EBS volume, + // in GiB. + // + // launch.block-device-mapping.volume-type - The type of the Amazon EBS volume + // (gp2 | standard | io1). + // + // launch.group-id - The security group for the instance. + // + // launch.image-id - The ID of the AMI. + // + // launch.instance-type - The type of instance (for example, m3.medium). + // + // launch.kernel-id - The kernel ID. + // + // launch.key-name - The name of the key pair the instance launched with. + // + // launch.monitoring-enabled - Whether monitoring is enabled for the Spot + // instance. + // + // launch.ramdisk-id - The RAM disk ID. + // + // network-interface.network-interface-id - The ID of the network interface. + // + // network-interface.device-index - The index of the device for the network + // interface attachment on the instance. + // + // network-interface.subnet-id - The ID of the subnet for the instance. + // + // network-interface.description - A description of the network interface. + // + // network-interface.private-ip-address - The primary private IP address + // of the network interface. + // + // network-interface.delete-on-termination - Indicates whether the network + // interface is deleted when the instance is terminated. + // + // network-interface.group-id - The ID of the security group associated with + // the network interface. + // + // network-interface.group-name - The name of the security group associated + // with the network interface. + // + // network-interface.addresses.primary - Indicates whether the IP address + // is the primary private IP address. + // + // product-description - The product description associated with the instance + // (Linux/UNIX | Windows). + // + // spot-instance-request-id - The Spot instance request ID. + // + // spot-price - The maximum hourly price for any Spot instance launched to + // fulfill the request. + // + // state - The state of the Spot instance request (open | active | closed + // | cancelled | failed). Spot bid status information can help you track your + // Amazon EC2 Spot instance requests. For more information, see Spot Bid Status + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // status-code - The short code describing the most recent evaluation of + // your Spot instance request. + // + // status-message - The message explaining the status of the Spot instance + // request. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // type - The type of Spot instance request (one-time | persistent). + // + // launched-availability-zone - The Availability Zone in which the bid is + // launched. + // + // valid-from - The start date of the request. + // + // valid-until - The end date of the request. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more Spot instance request IDs. + SpotInstanceRequestIds []*string `locationName:"SpotInstanceRequestId" locationNameList:"SpotInstanceRequestId" type:"list"` +} + +// String returns the string representation +func (s DescribeSpotInstanceRequestsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotInstanceRequestsInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeSpotInstanceRequests. +type DescribeSpotInstanceRequestsOutput struct { + _ struct{} `type:"structure"` + + // One or more Spot instance requests. + SpotInstanceRequests []*SpotInstanceRequest `locationName:"spotInstanceRequestSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeSpotInstanceRequestsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotInstanceRequestsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeSpotPriceHistory. +type DescribeSpotPriceHistoryInput struct { + _ struct{} `type:"structure"` + + // Filters the results by the specified Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The date and time, up to the current date, from which to stop retrieving + // the price history data, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + EndTime *time.Time `locationName:"endTime" type:"timestamp" timestampFormat:"iso8601"` + + // One or more filters. + // + // availability-zone - The Availability Zone for which prices should be returned. + // + // instance-type - The type of instance (for example, m3.medium). + // + // product-description - The product description for the Spot price (Linux/UNIX + // | SUSE Linux | Windows | Linux/UNIX (Amazon VPC) | SUSE Linux (Amazon VPC) + // | Windows (Amazon VPC)). + // + // spot-price - The Spot price. The value must match exactly (or use wildcards; + // greater than or less than comparison is not supported). + // + // timestamp - The timestamp of the Spot price history, in UTC format (for + // example, YYYY-MM-DDTHH:MM:SSZ). You can use wildcards (* and ?). Greater + // than or less than comparison is not supported. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // Filters the results by the specified instance types. + InstanceTypes []*string `locationName:"InstanceType" type:"list"` + + // The maximum number of results to return in a single call. Specify a value + // between 1 and 1000. The default value is 1000. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // Filters the results by the specified basic product descriptions. + ProductDescriptions []*string `locationName:"ProductDescription" type:"list"` + + // The date and time, up to the past 90 days, from which to start retrieving + // the price history data, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s DescribeSpotPriceHistoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotPriceHistoryInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeSpotPriceHistory. +type DescribeSpotPriceHistoryOutput struct { + _ struct{} `type:"structure"` + + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The historical Spot prices. + SpotPriceHistory []*SpotPrice `locationName:"spotPriceHistorySet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeSpotPriceHistoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotPriceHistoryOutput) GoString() string { + return s.String() +} + +type DescribeSubnetsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // availabilityZone - The Availability Zone for the subnet. You can also + // use availability-zone as the filter name. + // + // available-ip-address-count - The number of IP addresses in the subnet + // that are available. + // + // cidrBlock - The CIDR block of the subnet. The CIDR block you specify must + // exactly match the subnet's CIDR block for information to be returned for + // the subnet. You can also use cidr or cidr-block as the filter names. + // + // defaultForAz - Indicates whether this is the default subnet for the Availability + // Zone. You can also use default-for-az as the filter name. + // + // state - The state of the subnet (pending | available). + // + // subnet-id - The ID of the subnet. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // vpc-id - The ID of the VPC for the subnet. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more subnet IDs. + // + // Default: Describes all your subnets. + SubnetIds []*string `locationName:"SubnetId" locationNameList:"SubnetId" type:"list"` +} + +// String returns the string representation +func (s DescribeSubnetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSubnetsInput) GoString() string { + return s.String() +} + +type DescribeSubnetsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more subnets. + Subnets []*Subnet `locationName:"subnetSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeSubnetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSubnetsOutput) GoString() string { + return s.String() +} + +type DescribeTagsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // key - The tag key. + // + // resource-id - The resource ID. + // + // resource-type - The resource type (customer-gateway | dhcp-options | image + // | instance | internet-gateway | network-acl | network-interface | reserved-instances + // | route-table | security-group | snapshot | spot-instances-request | subnet + // | volume | vpc | vpn-connection | vpn-gateway). + // + // value - The tag value. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results of the initial request can be seen by sending another + // request with the returned NextToken value. This value can be between 5 and + // 1000; if MaxResults is given a value larger than 1000, only 1000 results + // are returned. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsInput) GoString() string { + return s.String() +} + +type DescribeTagsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return.. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of tags. + Tags []*TagDescription `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsOutput) GoString() string { + return s.String() +} + +type DescribeVolumeAttributeInput struct { + _ struct{} `type:"structure"` + + // The instance attribute. + Attribute *string `type:"string" enum:"VolumeAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the volume. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeVolumeAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumeAttributeInput) GoString() string { + return s.String() +} + +type DescribeVolumeAttributeOutput struct { + _ struct{} `type:"structure"` + + // The state of autoEnableIO attribute. + AutoEnableIO *AttributeBooleanValue `locationName:"autoEnableIO" type:"structure"` + + // A list of product codes. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // The ID of the volume. + VolumeId *string `locationName:"volumeId" type:"string"` +} + +// String returns the string representation +func (s DescribeVolumeAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumeAttributeOutput) GoString() string { + return s.String() +} + +type DescribeVolumeStatusInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // action.code - The action code for the event (for example, enable-volume-io). + // + // action.description - A description of the action. + // + // action.event-id - The event ID associated with the action. + // + // availability-zone - The Availability Zone of the instance. + // + // event.description - A description of the event. + // + // event.event-id - The event ID. + // + // event.event-type - The event type (for io-enabled: passed | failed; for + // io-performance: io-performance:degraded | io-performance:severely-degraded + // | io-performance:stalled). + // + // event.not-after - The latest end time for the event. + // + // event.not-before - The earliest start time for the event. + // + // volume-status.details-name - The cause for volume-status.status (io-enabled + // | io-performance). + // + // volume-status.details-status - The status of volume-status.details-name + // (for io-enabled: passed | failed; for io-performance: normal | degraded | + // severely-degraded | stalled). + // + // volume-status.status - The status of the volume (ok | impaired | warning + // | insufficient-data). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of volume results returned by DescribeVolumeStatus in + // paginated output. When this parameter is used, the request only returns MaxResults + // results in a single page along with a NextToken response element. The remaining + // results of the initial request can be seen by sending another request with + // the returned NextToken value. This value can be between 5 and 1000; if MaxResults + // is given a value larger than 1000, only 1000 results are returned. If this + // parameter is not used, then DescribeVolumeStatus returns all results. You + // cannot specify this parameter and the volume IDs parameter in the same request. + MaxResults *int64 `type:"integer"` + + // The NextToken value to include in a future DescribeVolumeStatus request. + // When the results of the request exceed MaxResults, this value can be used + // to retrieve the next page of results. This value is null when there are no + // more results to return. + NextToken *string `type:"string"` + + // One or more volume IDs. + // + // Default: Describes all your volumes. + VolumeIds []*string `locationName:"VolumeId" locationNameList:"VolumeId" type:"list"` +} + +// String returns the string representation +func (s DescribeVolumeStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumeStatusInput) GoString() string { + return s.String() +} + +type DescribeVolumeStatusOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of volumes. + VolumeStatuses []*VolumeStatusItem `locationName:"volumeStatusSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVolumeStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumeStatusOutput) GoString() string { + return s.String() +} + +type DescribeVolumesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // attachment.attach-time - The time stamp when the attachment initiated. + // + // attachment.delete-on-termination - Whether the volume is deleted on instance + // termination. + // + // attachment.device - The device name that is exposed to the instance (for + // example, /dev/sda1). + // + // attachment.instance-id - The ID of the instance the volume is attached + // to. + // + // attachment.status - The attachment state (attaching | attached | detaching + // | detached). + // + // availability-zone - The Availability Zone in which the volume was created. + // + // create-time - The time stamp when the volume was created. + // + // encrypted - The encryption status of the volume. + // + // size - The size of the volume, in GiB. + // + // snapshot-id - The snapshot from which the volume was created. + // + // status - The status of the volume (creating | available | in-use | deleting + // | deleted | error). + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // volume-id - The volume ID. + // + // volume-type - The Amazon EBS volume type. This can be gp2 for General + // Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, or standard + // for Magnetic volumes. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of volume results returned by DescribeVolumes in paginated + // output. When this parameter is used, DescribeVolumes only returns MaxResults + // results in a single page along with a NextToken response element. The remaining + // results of the initial request can be seen by sending another DescribeVolumes + // request with the returned NextToken value. This value can be between 5 and + // 1000; if MaxResults is given a value larger than 1000, only 1000 results + // are returned. If this parameter is not used, then DescribeVolumes returns + // all results. You cannot specify this parameter and the volume IDs parameter + // in the same request. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The NextToken value returned from a previous paginated DescribeVolumes request + // where MaxResults was used and the results exceeded the value of that parameter. + // Pagination continues from the end of the previous results that returned the + // NextToken value. This value is null when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // One or more volume IDs. + VolumeIds []*string `locationName:"VolumeId" locationNameList:"VolumeId" type:"list"` +} + +// String returns the string representation +func (s DescribeVolumesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumesInput) GoString() string { + return s.String() +} + +type DescribeVolumesOutput struct { + _ struct{} `type:"structure"` + + // The NextToken value to include in a future DescribeVolumes request. When + // the results of a DescribeVolumes request exceed MaxResults, this value can + // be used to retrieve the next page of results. This value is null when there + // are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the volumes. + Volumes []*Volume `locationName:"volumeSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVolumesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumesOutput) GoString() string { + return s.String() +} + +type DescribeVpcAttributeInput struct { + _ struct{} `type:"structure"` + + // The VPC attribute. + Attribute *string `type:"string" required:"true" enum:"VpcAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeVpcAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcAttributeInput) GoString() string { + return s.String() +} + +type DescribeVpcAttributeOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the instances launched in the VPC get DNS hostnames. If + // this attribute is true, instances in the VPC get DNS hostnames; otherwise, + // they do not. + EnableDnsHostnames *AttributeBooleanValue `locationName:"enableDnsHostnames" type:"structure"` + + // Indicates whether DNS resolution is enabled for the VPC. If this attribute + // is true, the Amazon DNS server resolves DNS hostnames for your instances + // to their corresponding IP addresses; otherwise, it does not. + EnableDnsSupport *AttributeBooleanValue `locationName:"enableDnsSupport" type:"structure"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s DescribeVpcAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcAttributeOutput) GoString() string { + return s.String() +} + +type DescribeVpcClassicLinkDnsSupportInput struct { + _ struct{} `type:"structure"` + + // The maximum number of items to return for this request. The request returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `locationName:"maxResults" min:"5" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a prior call.) + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // One or more VPC IDs. + VpcIds []*string `locationNameList:"VpcId" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcClassicLinkDnsSupportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcClassicLinkDnsSupportInput) GoString() string { + return s.String() +} + +type DescribeVpcClassicLinkDnsSupportOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // Information about the ClassicLink DNS support status of the VPCs. + Vpcs []*ClassicLinkDnsSupport `locationName:"vpcs" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcClassicLinkDnsSupportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcClassicLinkDnsSupportOutput) GoString() string { + return s.String() +} + +type DescribeVpcClassicLinkInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // is-classic-link-enabled - Whether the VPC is enabled for ClassicLink (true + // | false). + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more VPCs for which you want to describe the ClassicLink status. + VpcIds []*string `locationName:"VpcId" locationNameList:"VpcId" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcClassicLinkInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcClassicLinkInput) GoString() string { + return s.String() +} + +type DescribeVpcClassicLinkOutput struct { + _ struct{} `type:"structure"` + + // The ClassicLink status of one or more VPCs. + Vpcs []*VpcClassicLink `locationName:"vpcSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcClassicLinkOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcClassicLinkOutput) GoString() string { + return s.String() +} + +type DescribeVpcEndpointServicesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The maximum number of items to return for this request. The request returns + // a token that you can specify in a subsequent call to get the next set of + // results. + // + // Constraint: If the value is greater than 1000, we return only 1000 items. + MaxResults *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a prior call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeVpcEndpointServicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcEndpointServicesInput) GoString() string { + return s.String() +} + +type DescribeVpcEndpointServicesOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of supported AWS services. + ServiceNames []*string `locationName:"serviceNameSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcEndpointServicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcEndpointServicesOutput) GoString() string { + return s.String() +} + +type DescribeVpcEndpointsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // service-name: The name of the AWS service. + // + // vpc-id: The ID of the VPC in which the endpoint resides. + // + // vpc-endpoint-id: The ID of the endpoint. + // + // vpc-endpoint-state: The state of the endpoint. (pending | available | + // deleting | deleted) + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of items to return for this request. The request returns + // a token that you can specify in a subsequent call to get the next set of + // results. + // + // Constraint: If the value is greater than 1000, we return only 1000 items. + MaxResults *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a prior call.) + NextToken *string `type:"string"` + + // One or more endpoint IDs. + VpcEndpointIds []*string `locationName:"VpcEndpointId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcEndpointsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcEndpointsInput) GoString() string { + return s.String() +} + +type DescribeVpcEndpointsOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the endpoints. + VpcEndpoints []*VpcEndpoint `locationName:"vpcEndpointSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcEndpointsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcEndpointsOutput) GoString() string { + return s.String() +} + +type DescribeVpcPeeringConnectionsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // accepter-vpc-info.cidr-block - The CIDR block of the peer VPC. + // + // accepter-vpc-info.owner-id - The AWS account ID of the owner of the peer + // VPC. + // + // accepter-vpc-info.vpc-id - The ID of the peer VPC. + // + // expiration-time - The expiration date and time for the VPC peering connection. + // + // requester-vpc-info.cidr-block - The CIDR block of the requester's VPC. + // + // requester-vpc-info.owner-id - The AWS account ID of the owner of the requester + // VPC. + // + // requester-vpc-info.vpc-id - The ID of the requester VPC. + // + // status-code - The status of the VPC peering connection (pending-acceptance + // | failed | expired | provisioning | active | deleted | rejected). + // + // status-message - A message that provides more information about the status + // of the VPC peering connection, if applicable. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // vpc-peering-connection-id - The ID of the VPC peering connection. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more VPC peering connection IDs. + // + // Default: Describes all your VPC peering connections. + VpcPeeringConnectionIds []*string `locationName:"VpcPeeringConnectionId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcPeeringConnectionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcPeeringConnectionsInput) GoString() string { + return s.String() +} + +type DescribeVpcPeeringConnectionsOutput struct { + _ struct{} `type:"structure"` + + // Information about the VPC peering connections. + VpcPeeringConnections []*VpcPeeringConnection `locationName:"vpcPeeringConnectionSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcPeeringConnectionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcPeeringConnectionsOutput) GoString() string { + return s.String() +} + +type DescribeVpcsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // cidr - The CIDR block of the VPC. The CIDR block you specify must exactly + // match the VPC's CIDR block for information to be returned for the VPC. Must + // contain the slash followed by one or two digits (for example, /28). + // + // dhcp-options-id - The ID of a set of DHCP options. + // + // isDefault - Indicates whether the VPC is the default VPC. + // + // state - The state of the VPC (pending | available). + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // vpc-id - The ID of the VPC. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more VPC IDs. + // + // Default: Describes all your VPCs. + VpcIds []*string `locationName:"VpcId" locationNameList:"VpcId" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcsInput) GoString() string { + return s.String() +} + +type DescribeVpcsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more VPCs. + Vpcs []*Vpc `locationName:"vpcSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcsOutput) GoString() string { + return s.String() +} + +type DescribeVpnConnectionsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // customer-gateway-configuration - The configuration information for the + // customer gateway. + // + // customer-gateway-id - The ID of a customer gateway associated with the + // VPN connection. + // + // state - The state of the VPN connection (pending | available | deleting + // | deleted). + // + // option.static-routes-only - Indicates whether the connection has static + // routes only. Used for devices that do not support Border Gateway Protocol + // (BGP). + // + // route.destination-cidr-block - The destination CIDR block. This corresponds + // to the subnet used in a customer data center. + // + // bgp-asn - The BGP Autonomous System Number (ASN) associated with a BGP + // device. + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // type - The type of VPN connection. Currently the only supported type is + // ipsec.1. + // + // vpn-connection-id - The ID of the VPN connection. + // + // vpn-gateway-id - The ID of a virtual private gateway associated with the + // VPN connection. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more VPN connection IDs. + // + // Default: Describes your VPN connections. + VpnConnectionIds []*string `locationName:"VpnConnectionId" locationNameList:"VpnConnectionId" type:"list"` +} + +// String returns the string representation +func (s DescribeVpnConnectionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpnConnectionsInput) GoString() string { + return s.String() +} + +type DescribeVpnConnectionsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more VPN connections. + VpnConnections []*VpnConnection `locationName:"vpnConnectionSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpnConnectionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpnConnectionsOutput) GoString() string { + return s.String() +} + +type DescribeVpnGatewaysInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // attachment.state - The current state of the attachment between the gateway + // and the VPC (attaching | attached | detaching | detached). + // + // attachment.vpc-id - The ID of an attached VPC. + // + // availability-zone - The Availability Zone for the virtual private gateway + // (if applicable). + // + // state - The state of the virtual private gateway (pending | available + // | deleting | deleted). + // + // tag:key=value - The key/value combination of a tag assigned to the resource. + // + // tag-key - The key of a tag assigned to the resource. This filter is independent + // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" + // and the filter "tag-value=X", you get any resources assigned both the tag + // key Purpose (regardless of what the tag's value is), and the tag value X + // (regardless of what the tag's key is). If you want to list only resources + // where Purpose is X, see the tag:key=value filter. + // + // tag-value - The value of a tag assigned to the resource. This filter is + // independent of the tag-key filter. + // + // type - The type of virtual private gateway. Currently the only supported + // type is ipsec.1. + // + // vpn-gateway-id - The ID of the virtual private gateway. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more virtual private gateway IDs. + // + // Default: Describes all your virtual private gateways. + VpnGatewayIds []*string `locationName:"VpnGatewayId" locationNameList:"VpnGatewayId" type:"list"` +} + +// String returns the string representation +func (s DescribeVpnGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpnGatewaysInput) GoString() string { + return s.String() +} + +type DescribeVpnGatewaysOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more virtual private gateways. + VpnGateways []*VpnGateway `locationName:"vpnGatewaySet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpnGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpnGatewaysOutput) GoString() string { + return s.String() +} + +type DetachClassicLinkVpcInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance to unlink from the VPC. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // The ID of the VPC to which the instance is linked. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachClassicLinkVpcInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachClassicLinkVpcInput) GoString() string { + return s.String() +} + +type DetachClassicLinkVpcOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s DetachClassicLinkVpcOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachClassicLinkVpcOutput) GoString() string { + return s.String() +} + +type DetachInternetGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the Internet gateway. + InternetGatewayId *string `locationName:"internetGatewayId" type:"string" required:"true"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachInternetGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachInternetGatewayInput) GoString() string { + return s.String() +} + +type DetachInternetGatewayOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachInternetGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachInternetGatewayOutput) GoString() string { + return s.String() +} + +type DetachNetworkInterfaceInput struct { + _ struct{} `type:"structure"` + + // The ID of the attachment. + AttachmentId *string `locationName:"attachmentId" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Specifies whether to force a detachment. + Force *bool `locationName:"force" type:"boolean"` +} + +// String returns the string representation +func (s DetachNetworkInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachNetworkInterfaceInput) GoString() string { + return s.String() +} + +type DetachNetworkInterfaceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachNetworkInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachNetworkInterfaceOutput) GoString() string { + return s.String() +} + +type DetachVolumeInput struct { + _ struct{} `type:"structure"` + + // The device name. + Device *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Forces detachment if the previous detachment attempt did not occur cleanly + // (for example, logging into an instance, unmounting the volume, and detaching + // normally). This option can lead to data loss or a corrupted file system. + // Use this option only as a last resort to detach a volume from a failed instance. + // The instance won't have an opportunity to flush file system caches or file + // system metadata. If you use this option, you must perform file system check + // and repair procedures. + Force *bool `type:"boolean"` + + // The ID of the instance. + InstanceId *string `type:"string"` + + // The ID of the volume. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachVolumeInput) GoString() string { + return s.String() +} + +type DetachVpnGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `type:"string" required:"true"` + + // The ID of the virtual private gateway. + VpnGatewayId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachVpnGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachVpnGatewayInput) GoString() string { + return s.String() +} + +type DetachVpnGatewayOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachVpnGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachVpnGatewayOutput) GoString() string { + return s.String() +} + +// Describes a DHCP configuration option. +type DhcpConfiguration struct { + _ struct{} `type:"structure"` + + // The name of a DHCP option. + Key *string `locationName:"key" type:"string"` + + // One or more values for the DHCP option. + Values []*AttributeValue `locationName:"valueSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DhcpConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DhcpConfiguration) GoString() string { + return s.String() +} + +// Describes a set of DHCP options. +type DhcpOptions struct { + _ struct{} `type:"structure"` + + // One or more DHCP options in the set. + DhcpConfigurations []*DhcpConfiguration `locationName:"dhcpConfigurationSet" locationNameList:"item" type:"list"` + + // The ID of the set of DHCP options. + DhcpOptionsId *string `locationName:"dhcpOptionsId" type:"string"` + + // Any tags assigned to the DHCP options set. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DhcpOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DhcpOptions) GoString() string { + return s.String() +} + +type DisableVgwRoutePropagationInput struct { + _ struct{} `type:"structure"` + + // The ID of the virtual private gateway. + GatewayId *string `type:"string" required:"true"` + + // The ID of the route table. + RouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableVgwRoutePropagationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVgwRoutePropagationInput) GoString() string { + return s.String() +} + +type DisableVgwRoutePropagationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableVgwRoutePropagationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVgwRoutePropagationOutput) GoString() string { + return s.String() +} + +type DisableVpcClassicLinkDnsSupportInput struct { + _ struct{} `type:"structure"` + + // The ID of the VPC. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s DisableVpcClassicLinkDnsSupportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVpcClassicLinkDnsSupportInput) GoString() string { + return s.String() +} + +type DisableVpcClassicLinkDnsSupportOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s DisableVpcClassicLinkDnsSupportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVpcClassicLinkDnsSupportOutput) GoString() string { + return s.String() +} + +type DisableVpcClassicLinkInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableVpcClassicLinkInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVpcClassicLinkInput) GoString() string { + return s.String() +} + +type DisableVpcClassicLinkOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s DisableVpcClassicLinkOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVpcClassicLinkOutput) GoString() string { + return s.String() +} + +type DisassociateAddressInput struct { + _ struct{} `type:"structure"` + + // [EC2-VPC] The association ID. Required for EC2-VPC. + AssociationId *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // [EC2-Classic] The Elastic IP address. Required for EC2-Classic. + PublicIp *string `type:"string"` +} + +// String returns the string representation +func (s DisassociateAddressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateAddressInput) GoString() string { + return s.String() +} + +type DisassociateAddressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisassociateAddressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateAddressOutput) GoString() string { + return s.String() +} + +type DisassociateRouteTableInput struct { + _ struct{} `type:"structure"` + + // The association ID representing the current association between the route + // table and subnet. + AssociationId *string `locationName:"associationId" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s DisassociateRouteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateRouteTableInput) GoString() string { + return s.String() +} + +type DisassociateRouteTableOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisassociateRouteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateRouteTableOutput) GoString() string { + return s.String() +} + +// Describes a disk image. +type DiskImage struct { + _ struct{} `type:"structure"` + + // A description of the disk image. + Description *string `type:"string"` + + // Information about the disk image. + Image *DiskImageDetail `type:"structure"` + + // Information about the volume. + Volume *VolumeDetail `type:"structure"` +} + +// String returns the string representation +func (s DiskImage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiskImage) GoString() string { + return s.String() +} + +// Describes a disk image. +type DiskImageDescription struct { + _ struct{} `type:"structure"` + + // The checksum computed for the disk image. + Checksum *string `locationName:"checksum" type:"string"` + + // The disk image format. + Format *string `locationName:"format" type:"string" required:"true" enum:"DiskImageFormat"` + + // A presigned URL for the import manifest stored in Amazon S3. For information + // about creating a presigned URL for an Amazon S3 object, read the "Query String + // Request Authentication Alternative" section of the Authenticating REST Requests + // (http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) + // topic in the Amazon Simple Storage Service Developer Guide. + ImportManifestUrl *string `locationName:"importManifestUrl" type:"string" required:"true"` + + // The size of the disk image, in GiB. + Size *int64 `locationName:"size" type:"long" required:"true"` +} + +// String returns the string representation +func (s DiskImageDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiskImageDescription) GoString() string { + return s.String() +} + +// Describes a disk image. +type DiskImageDetail struct { + _ struct{} `type:"structure"` + + // The size of the disk image, in GiB. + Bytes *int64 `locationName:"bytes" type:"long" required:"true"` + + // The disk image format. + Format *string `locationName:"format" type:"string" required:"true" enum:"DiskImageFormat"` + + // A presigned URL for the import manifest stored in Amazon S3 and presented + // here as an Amazon S3 presigned URL. For information about creating a presigned + // URL for an Amazon S3 object, read the "Query String Request Authentication + // Alternative" section of the Authenticating REST Requests (http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) + // topic in the Amazon Simple Storage Service Developer Guide. + ImportManifestUrl *string `locationName:"importManifestUrl" type:"string" required:"true"` +} + +// String returns the string representation +func (s DiskImageDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiskImageDetail) GoString() string { + return s.String() +} + +// Describes a disk image volume. +type DiskImageVolumeDescription struct { + _ struct{} `type:"structure"` + + // The volume identifier. + Id *string `locationName:"id" type:"string" required:"true"` + + // The size of the volume, in GiB. + Size *int64 `locationName:"size" type:"long"` +} + +// String returns the string representation +func (s DiskImageVolumeDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiskImageVolumeDescription) GoString() string { + return s.String() +} + +// Describes a block device for an EBS volume. +type EbsBlockDevice struct { + _ struct{} `type:"structure"` + + // Indicates whether the EBS volume is deleted on instance termination. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes + // may only be attached to instances that support Amazon EBS encryption. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // The number of I/O operations per second (IOPS) that the volume supports. + // For Provisioned IOPS (SSD) volumes, this represents the number of IOPS that + // are provisioned for the volume. For General Purpose (SSD) volumes, this represents + // the baseline performance of the volume and the rate at which the volume accumulates + // I/O credits for bursting. For more information on General Purpose (SSD) baseline + // performance, I/O credits, and bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Constraint: Range is 100 to 20000 for Provisioned IOPS (SSD) volumes and + // 3 to 10000 for General Purpose (SSD) volumes. + // + // Condition: This parameter is required for requests to create io1 volumes; + // it is not used in requests to create standard or gp2 volumes. + Iops *int64 `locationName:"iops" type:"integer"` + + // The ID of the snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // The size of the volume, in GiB. + // + // Constraints: 1-1024 for standard volumes, 1-16384 for gp2 volumes, and 4-16384 + // for io1 volumes. If you specify a snapshot, the volume size must be equal + // to or larger than the snapshot size. + // + // Default: If you're creating the volume from a snapshot and don't specify + // a volume size, the default is the snapshot size. + VolumeSize *int64 `locationName:"volumeSize" type:"integer"` + + // The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned + // IOPS (SSD) volumes, and standard for Magnetic volumes. + // + // Default: standard + VolumeType *string `locationName:"volumeType" type:"string" enum:"VolumeType"` +} + +// String returns the string representation +func (s EbsBlockDevice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EbsBlockDevice) GoString() string { + return s.String() +} + +// Describes a parameter used to set up an EBS volume in a block device mapping. +type EbsInstanceBlockDevice struct { + _ struct{} `type:"structure"` + + // The time stamp when the attachment initiated. + AttachTime *time.Time `locationName:"attachTime" type:"timestamp" timestampFormat:"iso8601"` + + // Indicates whether the volume is deleted on instance termination. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The attachment state. + Status *string `locationName:"status" type:"string" enum:"AttachmentStatus"` + + // The ID of the EBS volume. + VolumeId *string `locationName:"volumeId" type:"string"` +} + +// String returns the string representation +func (s EbsInstanceBlockDevice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EbsInstanceBlockDevice) GoString() string { + return s.String() +} + +type EbsInstanceBlockDeviceSpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether the volume is deleted on instance termination. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The ID of the EBS volume. + VolumeId *string `locationName:"volumeId" type:"string"` +} + +// String returns the string representation +func (s EbsInstanceBlockDeviceSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EbsInstanceBlockDeviceSpecification) GoString() string { + return s.String() +} + +type EnableVgwRoutePropagationInput struct { + _ struct{} `type:"structure"` + + // The ID of the virtual private gateway. + GatewayId *string `type:"string" required:"true"` + + // The ID of the route table. + RouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableVgwRoutePropagationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVgwRoutePropagationInput) GoString() string { + return s.String() +} + +type EnableVgwRoutePropagationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableVgwRoutePropagationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVgwRoutePropagationOutput) GoString() string { + return s.String() +} + +type EnableVolumeIOInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the volume. + VolumeId *string `locationName:"volumeId" type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableVolumeIOInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVolumeIOInput) GoString() string { + return s.String() +} + +type EnableVolumeIOOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableVolumeIOOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVolumeIOOutput) GoString() string { + return s.String() +} + +type EnableVpcClassicLinkDnsSupportInput struct { + _ struct{} `type:"structure"` + + // The ID of the VPC. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s EnableVpcClassicLinkDnsSupportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVpcClassicLinkDnsSupportInput) GoString() string { + return s.String() +} + +type EnableVpcClassicLinkDnsSupportOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s EnableVpcClassicLinkDnsSupportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVpcClassicLinkDnsSupportOutput) GoString() string { + return s.String() +} + +type EnableVpcClassicLinkInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableVpcClassicLinkInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVpcClassicLinkInput) GoString() string { + return s.String() +} + +type EnableVpcClassicLinkOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s EnableVpcClassicLinkOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVpcClassicLinkOutput) GoString() string { + return s.String() +} + +// Describes a Spot fleet event. +type EventInformation struct { + _ struct{} `type:"structure"` + + // The description of the event. + EventDescription *string `locationName:"eventDescription" type:"string"` + + // The event. + // + // The following are the error events. + // + // iamFleetRoleInvalid - The Spot fleet did not have the required permissions + // either to launch or terminate an instance. + // + // launchSpecTemporarilyBlacklisted - The configuration is not valid and + // several attempts to launch instances have failed. For more information, see + // the description of the event. + // + // spotFleetRequestConfigurationInvalid - The configuration is not valid. + // For more information, see the description of the event. + // + // spotInstanceCountLimitExceeded - You've reached the limit on the number + // of Spot instances that you can launch. + // + // The following are the fleetRequestChange events. + // + // active - The Spot fleet has been validated and Amazon EC2 is attempting + // to maintain the target number of running Spot instances. + // + // cancelled - The Spot fleet is canceled and has no running Spot instances. + // The Spot fleet will be deleted two days after its instances were terminated. + // + // cancelled_running - The Spot fleet is canceled and will not launch additional + // Spot instances, but its existing Spot instances continue to run until they + // are interrupted or terminated. + // + // cancelled_terminating - The Spot fleet is canceled and its Spot instances + // are terminating. + // + // expired - The Spot fleet request has expired. A subsequent event indicates + // that the instances were terminated, if the request was created with TerminateInstancesWithExpiration + // set. + // + // modify_in_progress - A request to modify the Spot fleet request was accepted + // and is in progress. + // + // modify_successful - The Spot fleet request was modified. + // + // price_update - The bid price for a launch configuration was adjusted because + // it was too high. This change is permanent. + // + // submitted - The Spot fleet request is being evaluated and Amazon EC2 is + // preparing to launch the target number of Spot instances. + // + // The following are the instanceChange events. + // + // launched - A bid was fulfilled and a new instance was launched. + // + // terminated - An instance was terminated by the user. + EventSubType *string `locationName:"eventSubType" type:"string"` + + // The ID of the instance. This information is available only for instanceChange + // events. + InstanceId *string `locationName:"instanceId" type:"string"` +} + +// String returns the string representation +func (s EventInformation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventInformation) GoString() string { + return s.String() +} + +// Describes an instance export task. +type ExportTask struct { + _ struct{} `type:"structure"` + + // A description of the resource being exported. + Description *string `locationName:"description" type:"string"` + + // The ID of the export task. + ExportTaskId *string `locationName:"exportTaskId" type:"string"` + + // Information about the export task. + ExportToS3Task *ExportToS3Task `locationName:"exportToS3" type:"structure"` + + // Information about the instance to export. + InstanceExportDetails *InstanceExportDetails `locationName:"instanceExport" type:"structure"` + + // The state of the export task. + State *string `locationName:"state" type:"string" enum:"ExportTaskState"` + + // The status message related to the export task. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s ExportTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTask) GoString() string { + return s.String() +} + +// Describes the format and location for an instance export task. +type ExportToS3Task struct { + _ struct{} `type:"structure"` + + // The container format used to combine disk images with metadata (such as OVF). + // If absent, only the disk image is exported. + ContainerFormat *string `locationName:"containerFormat" type:"string" enum:"ContainerFormat"` + + // The format for the exported image. + DiskImageFormat *string `locationName:"diskImageFormat" type:"string" enum:"DiskImageFormat"` + + // The S3 bucket for the destination image. The destination bucket must exist + // and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. + S3Bucket *string `locationName:"s3Bucket" type:"string"` + + // The encryption key for your S3 bucket. + S3Key *string `locationName:"s3Key" type:"string"` +} + +// String returns the string representation +func (s ExportToS3Task) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportToS3Task) GoString() string { + return s.String() +} + +// Describes an instance export task. +type ExportToS3TaskSpecification struct { + _ struct{} `type:"structure"` + + // The container format used to combine disk images with metadata (such as OVF). + // If absent, only the disk image is exported. + ContainerFormat *string `locationName:"containerFormat" type:"string" enum:"ContainerFormat"` + + // The format for the exported image. + DiskImageFormat *string `locationName:"diskImageFormat" type:"string" enum:"DiskImageFormat"` + + // The S3 bucket for the destination image. The destination bucket must exist + // and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. + S3Bucket *string `locationName:"s3Bucket" type:"string"` + + // The image is written to a single object in the S3 bucket at the S3 key s3prefix + // + exportTaskId + '.' + diskImageFormat. + S3Prefix *string `locationName:"s3Prefix" type:"string"` +} + +// String returns the string representation +func (s ExportToS3TaskSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportToS3TaskSpecification) GoString() string { + return s.String() +} + +// A filter name and value pair that is used to return a more specific list +// of results. Filters can be used to match a set of resources by various criteria, +// such as tags, attributes, or IDs. +type Filter struct { + _ struct{} `type:"structure"` + + // The name of the filter. Filter names are case-sensitive. + Name *string `type:"string"` + + // One or more filter values. Filter values are case-sensitive. + Values []*string `locationName:"Value" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s Filter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Filter) GoString() string { + return s.String() +} + +// Describes a flow log. +type FlowLog struct { + _ struct{} `type:"structure"` + + // The date and time the flow log was created. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp" timestampFormat:"iso8601"` + + // Information about the error that occurred. Rate limited indicates that CloudWatch + // logs throttling has been applied for one or more network interfaces, or that + // you've reached the limit on the number of CloudWatch Logs log groups that + // you can create. Access error indicates that the IAM role associated with + // the flow log does not have sufficient permissions to publish to CloudWatch + // Logs. Unknown error indicates an internal error. + DeliverLogsErrorMessage *string `locationName:"deliverLogsErrorMessage" type:"string"` + + // The ARN of the IAM role that posts logs to CloudWatch Logs. + DeliverLogsPermissionArn *string `locationName:"deliverLogsPermissionArn" type:"string"` + + // The status of the logs delivery (SUCCESS | FAILED). + DeliverLogsStatus *string `locationName:"deliverLogsStatus" type:"string"` + + // The flow log ID. + FlowLogId *string `locationName:"flowLogId" type:"string"` + + // The status of the flow log (ACTIVE). + FlowLogStatus *string `locationName:"flowLogStatus" type:"string"` + + // The name of the flow log group. + LogGroupName *string `locationName:"logGroupName" type:"string"` + + // The ID of the resource on which the flow log was created. + ResourceId *string `locationName:"resourceId" type:"string"` + + // The type of traffic captured for the flow log. + TrafficType *string `locationName:"trafficType" type:"string" enum:"TrafficType"` +} + +// String returns the string representation +func (s FlowLog) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FlowLog) GoString() string { + return s.String() +} + +type GetConsoleOutputInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetConsoleOutputInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetConsoleOutputInput) GoString() string { + return s.String() +} + +type GetConsoleOutputOutput struct { + _ struct{} `type:"structure"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The console output, Base64 encoded. If using a command line tool, the tools + // decode the output for you. + Output *string `locationName:"output" type:"string"` + + // The time the output was last updated. + Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s GetConsoleOutputOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetConsoleOutputOutput) GoString() string { + return s.String() +} + +type GetPasswordDataInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the Windows instance. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPasswordDataInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPasswordDataInput) GoString() string { + return s.String() +} + +type GetPasswordDataOutput struct { + _ struct{} `type:"structure"` + + // The ID of the Windows instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The password of the instance. + PasswordData *string `locationName:"passwordData" type:"string"` + + // The time the data was last updated. + Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s GetPasswordDataOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPasswordDataOutput) GoString() string { + return s.String() +} + +// Describes a security group. +type GroupIdentifier struct { + _ struct{} `type:"structure"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string"` + + // The name of the security group. + GroupName *string `locationName:"groupName" type:"string"` +} + +// String returns the string representation +func (s GroupIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GroupIdentifier) GoString() string { + return s.String() +} + +// Describes an event in the history of the Spot fleet request. +type HistoryRecord struct { + _ struct{} `type:"structure"` + + // Information about the event. + EventInformation *EventInformation `locationName:"eventInformation" type:"structure" required:"true"` + + // The event type. + // + // error - Indicates an error with the Spot fleet request. + // + // fleetRequestChange - Indicates a change in the status or configuration + // of the Spot fleet request. + // + // instanceChange - Indicates that an instance was launched or terminated. + EventType *string `locationName:"eventType" type:"string" required:"true" enum:"EventType"` + + // The date and time of the event, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s HistoryRecord) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HistoryRecord) GoString() string { + return s.String() +} + +// Describes the properties of the Dedicated host. +type Host struct { + _ struct{} `type:"structure"` + + // Whether auto-placement is on or off. + AutoPlacement *string `locationName:"autoPlacement" type:"string" enum:"AutoPlacement"` + + // The Availability Zone of the Dedicated host. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The number of new instances that can be launched onto the Dedicated host. + AvailableCapacity *AvailableCapacity `locationName:"availableCapacity" type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure idempotency of the + // request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) + // in the Amazon Elastic Compute Cloud User Guide. + ClientToken *string `locationName:"clientToken" type:"string"` + + // The ID of the Dedicated host. + HostId *string `locationName:"hostId" type:"string"` + + // The hardware specifications of the Dedicated host. + HostProperties *HostProperties `locationName:"hostProperties" type:"structure"` + + // The reservation ID of the Dedicated host. This returns a null response if + // the Dedicated host doesn't have an associated reservation. + HostReservationId *string `locationName:"hostReservationId" type:"string"` + + // The IDs and instance type that are currently running on the Dedicated host. + Instances []*HostInstance `locationName:"instances" locationNameList:"item" type:"list"` + + // The Dedicated host's state. + State *string `locationName:"state" type:"string" enum:"AllocationState"` +} + +// String returns the string representation +func (s Host) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Host) GoString() string { + return s.String() +} + +type HostInstance struct { + _ struct{} `type:"structure"` + + // the IDs of instances that are running on the Dedicated host. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The instance type size (e.g., m3.medium) of the running instance. + InstanceType *string `locationName:"instanceType" type:"string"` +} + +// String returns the string representation +func (s HostInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostInstance) GoString() string { + return s.String() +} + +type HostProperties struct { + _ struct{} `type:"structure"` + + // The number of cores on the Dedicated host. + Cores *int64 `locationName:"cores" type:"integer"` + + // The instance type size that the Dedicated host supports (e.g., m3.medium). + InstanceType *string `locationName:"instanceType" type:"string"` + + // The number of sockets on the Dedicated host. + Sockets *int64 `locationName:"sockets" type:"integer"` + + // The number of vCPUs on the Dedicated host. + TotalVCpus *int64 `locationName:"totalVCpus" type:"integer"` +} + +// String returns the string representation +func (s HostProperties) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostProperties) GoString() string { + return s.String() +} + +// Describes an IAM instance profile. +type IamInstanceProfile struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the instance profile. + Arn *string `locationName:"arn" type:"string"` + + // The ID of the instance profile. + Id *string `locationName:"id" type:"string"` +} + +// String returns the string representation +func (s IamInstanceProfile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IamInstanceProfile) GoString() string { + return s.String() +} + +// Describes an IAM instance profile. +type IamInstanceProfileSpecification struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the instance profile. + Arn *string `locationName:"arn" type:"string"` + + // The name of the instance profile. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s IamInstanceProfileSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IamInstanceProfileSpecification) GoString() string { + return s.String() +} + +// Describes the ICMP type and code. +type IcmpTypeCode struct { + _ struct{} `type:"structure"` + + // The ICMP type. A value of -1 means all types. + Code *int64 `locationName:"code" type:"integer"` + + // The ICMP code. A value of -1 means all codes for the specified ICMP type. + Type *int64 `locationName:"type" type:"integer"` +} + +// String returns the string representation +func (s IcmpTypeCode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IcmpTypeCode) GoString() string { + return s.String() +} + +// Describes the ID format for a resource. +type IdFormat struct { + _ struct{} `type:"structure"` + + // The date in UTC at which you are permanently switched over to using longer + // IDs. + Deadline *time.Time `locationName:"deadline" type:"timestamp" timestampFormat:"iso8601"` + + // The type of resource. + Resource *string `locationName:"resource" type:"string"` + + // Indicates whether longer IDs (17-character IDs) are enabled for the resource. + UseLongIds *bool `locationName:"useLongIds" type:"boolean"` +} + +// String returns the string representation +func (s IdFormat) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdFormat) GoString() string { + return s.String() +} + +// Describes an image. +type Image struct { + _ struct{} `type:"structure"` + + // The architecture of the image. + Architecture *string `locationName:"architecture" type:"string" enum:"ArchitectureValues"` + + // Any block device mapping entries. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // The date and time the image was created. + CreationDate *string `locationName:"creationDate" type:"string"` + + // The description of the AMI that was provided during image creation. + Description *string `locationName:"description" type:"string"` + + // The hypervisor type of the image. + Hypervisor *string `locationName:"hypervisor" type:"string" enum:"HypervisorType"` + + // The ID of the AMI. + ImageId *string `locationName:"imageId" type:"string"` + + // The location of the AMI. + ImageLocation *string `locationName:"imageLocation" type:"string"` + + // The AWS account alias (for example, amazon, self) or the AWS account ID of + // the AMI owner. + ImageOwnerAlias *string `locationName:"imageOwnerAlias" type:"string"` + + // The type of image. + ImageType *string `locationName:"imageType" type:"string" enum:"ImageTypeValues"` + + // The kernel associated with the image, if any. Only applicable for machine + // images. + KernelId *string `locationName:"kernelId" type:"string"` + + // The name of the AMI that was provided during image creation. + Name *string `locationName:"name" type:"string"` + + // The AWS account ID of the image owner. + OwnerId *string `locationName:"imageOwnerId" type:"string"` + + // The value is Windows for Windows AMIs; otherwise blank. + Platform *string `locationName:"platform" type:"string" enum:"PlatformValues"` + + // Any product codes associated with the AMI. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // Indicates whether the image has public launch permissions. The value is true + // if this image has public launch permissions or false if it has only implicit + // and explicit launch permissions. + Public *bool `locationName:"isPublic" type:"boolean"` + + // The RAM disk associated with the image, if any. Only applicable for machine + // images. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + // The device name of the root device (for example, /dev/sda1 or /dev/xvda). + RootDeviceName *string `locationName:"rootDeviceName" type:"string"` + + // The type of root device used by the AMI. The AMI can use an EBS volume or + // an instance store volume. + RootDeviceType *string `locationName:"rootDeviceType" type:"string" enum:"DeviceType"` + + // Specifies whether enhanced networking is enabled. + SriovNetSupport *string `locationName:"sriovNetSupport" type:"string"` + + // The current state of the AMI. If the state is available, the image is successfully + // registered and can be used to launch an instance. + State *string `locationName:"imageState" type:"string" enum:"ImageState"` + + // The reason for the state change. + StateReason *StateReason `locationName:"stateReason" type:"structure"` + + // Any tags assigned to the image. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The type of virtualization of the AMI. + VirtualizationType *string `locationName:"virtualizationType" type:"string" enum:"VirtualizationType"` +} + +// String returns the string representation +func (s Image) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Image) GoString() string { + return s.String() +} + +// Describes the disk container object for an import image task. +type ImageDiskContainer struct { + _ struct{} `type:"structure"` + + // The description of the disk image. + Description *string `type:"string"` + + // The block device mapping for the disk. + DeviceName *string `type:"string"` + + // The format of the disk image being imported. + // + // Valid values: RAW | VHD | VMDK | OVA + Format *string `type:"string"` + + // The ID of the EBS snapshot to be used for importing the snapshot. + SnapshotId *string `type:"string"` + + // The URL to the Amazon S3-based disk image being imported. The URL can either + // be a https URL (https://..) or an Amazon S3 URL (s3://..) + Url *string `type:"string"` + + // The S3 bucket for the disk image. + UserBucket *UserBucket `type:"structure"` +} + +// String returns the string representation +func (s ImageDiskContainer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageDiskContainer) GoString() string { + return s.String() +} + +type ImportImageInput struct { + _ struct{} `type:"structure"` + + // The architecture of the virtual machine. + // + // Valid values: i386 | x86_64 + Architecture *string `type:"string"` + + // The client-specific data. + ClientData *ClientData `type:"structure"` + + // The token to enable idempotency for VM import requests. + ClientToken *string `type:"string"` + + // A description string for the import image task. + Description *string `type:"string"` + + // Information about the disk containers. + DiskContainers []*ImageDiskContainer `locationName:"DiskContainer" locationNameList:"item" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The target hypervisor platform. + // + // Valid values: xen + Hypervisor *string `type:"string"` + + // The license type to be used for the Amazon Machine Image (AMI) after importing. + // + // Note: You may only use BYOL if you have existing licenses with rights to + // use these licenses in a third party cloud like AWS. For more information, + // see VM Import/Export Prerequisites (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/VMImportPrerequisites.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Valid values: AWS | BYOL + LicenseType *string `type:"string"` + + // The operating system of the virtual machine. + // + // Valid values: Windows | Linux + Platform *string `type:"string"` + + // The name of the role to use when not using the default role, 'vmimport'. + RoleName *string `type:"string"` +} + +// String returns the string representation +func (s ImportImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportImageInput) GoString() string { + return s.String() +} + +type ImportImageOutput struct { + _ struct{} `type:"structure"` + + // The architecture of the virtual machine. + Architecture *string `locationName:"architecture" type:"string"` + + // A description of the import task. + Description *string `locationName:"description" type:"string"` + + // The target hypervisor of the import task. + Hypervisor *string `locationName:"hypervisor" type:"string"` + + // The ID of the Amazon Machine Image (AMI) created by the import task. + ImageId *string `locationName:"imageId" type:"string"` + + // The task ID of the import image task. + ImportTaskId *string `locationName:"importTaskId" type:"string"` + + // The license type of the virtual machine. + LicenseType *string `locationName:"licenseType" type:"string"` + + // The operating system of the virtual machine. + Platform *string `locationName:"platform" type:"string"` + + // The progress of the task. + Progress *string `locationName:"progress" type:"string"` + + // Information about the snapshots. + SnapshotDetails []*SnapshotDetail `locationName:"snapshotDetailSet" locationNameList:"item" type:"list"` + + // A brief status of the task. + Status *string `locationName:"status" type:"string"` + + // A detailed status message of the import task. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s ImportImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportImageOutput) GoString() string { + return s.String() +} + +// Describes an import image task. +type ImportImageTask struct { + _ struct{} `type:"structure"` + + // The architecture of the virtual machine. + // + // Valid values: i386 | x86_64 + Architecture *string `locationName:"architecture" type:"string"` + + // A description of the import task. + Description *string `locationName:"description" type:"string"` + + // The target hypervisor for the import task. + // + // Valid values: xen + Hypervisor *string `locationName:"hypervisor" type:"string"` + + // The ID of the Amazon Machine Image (AMI) of the imported virtual machine. + ImageId *string `locationName:"imageId" type:"string"` + + // The ID of the import image task. + ImportTaskId *string `locationName:"importTaskId" type:"string"` + + // The license type of the virtual machine. + LicenseType *string `locationName:"licenseType" type:"string"` + + // The description string for the import image task. + Platform *string `locationName:"platform" type:"string"` + + // The percentage of progress of the import image task. + Progress *string `locationName:"progress" type:"string"` + + // Information about the snapshots. + SnapshotDetails []*SnapshotDetail `locationName:"snapshotDetailSet" locationNameList:"item" type:"list"` + + // A brief status for the import image task. + Status *string `locationName:"status" type:"string"` + + // A descriptive status message for the import image task. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s ImportImageTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportImageTask) GoString() string { + return s.String() +} + +type ImportInstanceInput struct { + _ struct{} `type:"structure"` + + // A description for the instance being imported. + Description *string `locationName:"description" type:"string"` + + // The disk image. + DiskImages []*DiskImage `locationName:"diskImage" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The launch specification. + LaunchSpecification *ImportInstanceLaunchSpecification `locationName:"launchSpecification" type:"structure"` + + // The instance operating system. + Platform *string `locationName:"platform" type:"string" required:"true" enum:"PlatformValues"` +} + +// String returns the string representation +func (s ImportInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstanceInput) GoString() string { + return s.String() +} + +// Describes the launch specification for VM import. +type ImportInstanceLaunchSpecification struct { + _ struct{} `type:"structure"` + + // Reserved. + AdditionalInfo *string `locationName:"additionalInfo" type:"string"` + + // The architecture of the instance. + Architecture *string `locationName:"architecture" type:"string" enum:"ArchitectureValues"` + + // One or more security group IDs. + GroupIds []*string `locationName:"GroupId" locationNameList:"SecurityGroupId" type:"list"` + + // One or more security group names. + GroupNames []*string `locationName:"GroupName" locationNameList:"SecurityGroup" type:"list"` + + // Indicates whether an instance stops or terminates when you initiate shutdown + // from the instance (using the operating system command for system shutdown). + InstanceInitiatedShutdownBehavior *string `locationName:"instanceInitiatedShutdownBehavior" type:"string" enum:"ShutdownBehavior"` + + // The instance type. For more information about the instance types that you + // can import, see Before You Get Started (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/VMImportPrerequisites.html) + // in the Amazon Elastic Compute Cloud User Guide. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // Indicates whether monitoring is enabled. + Monitoring *bool `locationName:"monitoring" type:"boolean"` + + // The placement information for the instance. + Placement *Placement `locationName:"placement" type:"structure"` + + // [EC2-VPC] An available IP address from the IP address range of the subnet. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // [EC2-VPC] The ID of the subnet in which to launch the instance. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The Base64-encoded MIME user data to be made available to the instance. + UserData *UserData `locationName:"userData" type:"structure"` +} + +// String returns the string representation +func (s ImportInstanceLaunchSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstanceLaunchSpecification) GoString() string { + return s.String() +} + +type ImportInstanceOutput struct { + _ struct{} `type:"structure"` + + // Information about the conversion task. + ConversionTask *ConversionTask `locationName:"conversionTask" type:"structure"` +} + +// String returns the string representation +func (s ImportInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstanceOutput) GoString() string { + return s.String() +} + +// Describes an import instance task. +type ImportInstanceTaskDetails struct { + _ struct{} `type:"structure"` + + // A description of the task. + Description *string `locationName:"description" type:"string"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The instance operating system. + Platform *string `locationName:"platform" type:"string" enum:"PlatformValues"` + + // One or more volumes. + Volumes []*ImportInstanceVolumeDetailItem `locationName:"volumes" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s ImportInstanceTaskDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstanceTaskDetails) GoString() string { + return s.String() +} + +// Describes an import volume task. +type ImportInstanceVolumeDetailItem struct { + _ struct{} `type:"structure"` + + // The Availability Zone where the resulting instance will reside. + AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` + + // The number of bytes converted so far. + BytesConverted *int64 `locationName:"bytesConverted" type:"long" required:"true"` + + // A description of the task. + Description *string `locationName:"description" type:"string"` + + // The image. + Image *DiskImageDescription `locationName:"image" type:"structure" required:"true"` + + // The status of the import of this particular disk image. + Status *string `locationName:"status" type:"string" required:"true"` + + // The status information or errors related to the disk image. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // The volume. + Volume *DiskImageVolumeDescription `locationName:"volume" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ImportInstanceVolumeDetailItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstanceVolumeDetailItem) GoString() string { + return s.String() +} + +type ImportKeyPairInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // A unique name for the key pair. + KeyName *string `locationName:"keyName" type:"string" required:"true"` + + // The public key. You must base64 encode the public key material before sending + // it to AWS. + PublicKeyMaterial []byte `locationName:"publicKeyMaterial" type:"blob" required:"true"` +} + +// String returns the string representation +func (s ImportKeyPairInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportKeyPairInput) GoString() string { + return s.String() +} + +type ImportKeyPairOutput struct { + _ struct{} `type:"structure"` + + // The MD5 public key fingerprint as specified in section 4 of RFC 4716. + KeyFingerprint *string `locationName:"keyFingerprint" type:"string"` + + // The key pair name you provided. + KeyName *string `locationName:"keyName" type:"string"` +} + +// String returns the string representation +func (s ImportKeyPairOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportKeyPairOutput) GoString() string { + return s.String() +} + +type ImportSnapshotInput struct { + _ struct{} `type:"structure"` + + // The client-specific data. + ClientData *ClientData `type:"structure"` + + // Token to enable idempotency for VM import requests. + ClientToken *string `type:"string"` + + // The description string for the import snapshot task. + Description *string `type:"string"` + + // Information about the disk container. + DiskContainer *SnapshotDiskContainer `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The name of the role to use when not using the default role, 'vmimport'. + RoleName *string `type:"string"` +} + +// String returns the string representation +func (s ImportSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportSnapshotInput) GoString() string { + return s.String() +} + +type ImportSnapshotOutput struct { + _ struct{} `type:"structure"` + + // A description of the import snapshot task. + Description *string `locationName:"description" type:"string"` + + // The ID of the import snapshot task. + ImportTaskId *string `locationName:"importTaskId" type:"string"` + + // Information about the import snapshot task. + SnapshotTaskDetail *SnapshotTaskDetail `locationName:"snapshotTaskDetail" type:"structure"` +} + +// String returns the string representation +func (s ImportSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportSnapshotOutput) GoString() string { + return s.String() +} + +// Describes an import snapshot task. +type ImportSnapshotTask struct { + _ struct{} `type:"structure"` + + // A description of the import snapshot task. + Description *string `locationName:"description" type:"string"` + + // The ID of the import snapshot task. + ImportTaskId *string `locationName:"importTaskId" type:"string"` + + // Describes an import snapshot task. + SnapshotTaskDetail *SnapshotTaskDetail `locationName:"snapshotTaskDetail" type:"structure"` +} + +// String returns the string representation +func (s ImportSnapshotTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportSnapshotTask) GoString() string { + return s.String() +} + +type ImportVolumeInput struct { + _ struct{} `type:"structure"` + + // The Availability Zone for the resulting EBS volume. + AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` + + // A description of the volume. + Description *string `locationName:"description" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The disk image. + Image *DiskImageDetail `locationName:"image" type:"structure" required:"true"` + + // The volume size. + Volume *VolumeDetail `locationName:"volume" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ImportVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportVolumeInput) GoString() string { + return s.String() +} + +type ImportVolumeOutput struct { + _ struct{} `type:"structure"` + + // Information about the conversion task. + ConversionTask *ConversionTask `locationName:"conversionTask" type:"structure"` +} + +// String returns the string representation +func (s ImportVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportVolumeOutput) GoString() string { + return s.String() +} + +// Describes an import volume task. +type ImportVolumeTaskDetails struct { + _ struct{} `type:"structure"` + + // The Availability Zone where the resulting volume will reside. + AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` + + // The number of bytes converted so far. + BytesConverted *int64 `locationName:"bytesConverted" type:"long" required:"true"` + + // The description you provided when starting the import volume task. + Description *string `locationName:"description" type:"string"` + + // The image. + Image *DiskImageDescription `locationName:"image" type:"structure" required:"true"` + + // The volume. + Volume *DiskImageVolumeDescription `locationName:"volume" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ImportVolumeTaskDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportVolumeTaskDetails) GoString() string { + return s.String() +} + +// Describes an instance. +type Instance struct { + _ struct{} `type:"structure"` + + // The AMI launch index, which can be used to find this instance in the launch + // group. + AmiLaunchIndex *int64 `locationName:"amiLaunchIndex" type:"integer"` + + // The architecture of the image. + Architecture *string `locationName:"architecture" type:"string" enum:"ArchitectureValues"` + + // Any block device mapping entries for the instance. + BlockDeviceMappings []*InstanceBlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // The idempotency token you provided when you launched the instance, if applicable. + ClientToken *string `locationName:"clientToken" type:"string"` + + // Indicates whether the instance is optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS + // Optimized instance. + EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + + // The hypervisor type of the instance. + Hypervisor *string `locationName:"hypervisor" type:"string" enum:"HypervisorType"` + + // The IAM instance profile associated with the instance, if applicable. + IamInstanceProfile *IamInstanceProfile `locationName:"iamInstanceProfile" type:"structure"` + + // The ID of the AMI used to launch the instance. + ImageId *string `locationName:"imageId" type:"string"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // Indicates whether this is a Spot instance. + InstanceLifecycle *string `locationName:"instanceLifecycle" type:"string" enum:"InstanceLifecycleType"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The kernel associated with this instance, if applicable. + KernelId *string `locationName:"kernelId" type:"string"` + + // The name of the key pair, if this instance was launched with an associated + // key pair. + KeyName *string `locationName:"keyName" type:"string"` + + // The time the instance was launched. + LaunchTime *time.Time `locationName:"launchTime" type:"timestamp" timestampFormat:"iso8601"` + + // The monitoring information for the instance. + Monitoring *Monitoring `locationName:"monitoring" type:"structure"` + + // [EC2-VPC] One or more network interfaces for the instance. + NetworkInterfaces []*InstanceNetworkInterface `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"` + + // The location where the instance launched, if applicable. + Placement *Placement `locationName:"placement" type:"structure"` + + // The value is Windows for Windows instances; otherwise blank. + Platform *string `locationName:"platform" type:"string" enum:"PlatformValues"` + + // The private DNS name assigned to the instance. This DNS name can only be + // used inside the Amazon EC2 network. This name is not available until the + // instance enters the running state. For EC2-VPC, this name is only available + // if you've enabled DNS hostnames for your VPC. + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + + // The private IP address assigned to the instance. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The product codes attached to this instance, if applicable. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // The public DNS name assigned to the instance. This name is not available + // until the instance enters the running state. For EC2-VPC, this name is only + // available if you've enabled DNS hostnames for your VPC. + PublicDnsName *string `locationName:"dnsName" type:"string"` + + // The public IP address assigned to the instance, if applicable. + PublicIpAddress *string `locationName:"ipAddress" type:"string"` + + // The RAM disk associated with this instance, if applicable. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + // The root device name (for example, /dev/sda1 or /dev/xvda). + RootDeviceName *string `locationName:"rootDeviceName" type:"string"` + + // The root device type used by the AMI. The AMI can use an EBS volume or an + // instance store volume. + RootDeviceType *string `locationName:"rootDeviceType" type:"string" enum:"DeviceType"` + + // One or more security groups for the instance. + SecurityGroups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // Specifies whether to enable an instance launched in a VPC to perform NAT. + // This controls whether source/destination checking is enabled on the instance. + // A value of true means checking is enabled, and false means checking is disabled. + // The value must be false for the instance to perform NAT. For more information, + // see NAT Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html) + // in the Amazon Virtual Private Cloud User Guide. + SourceDestCheck *bool `locationName:"sourceDestCheck" type:"boolean"` + + // If the request is a Spot instance request, the ID of the request. + SpotInstanceRequestId *string `locationName:"spotInstanceRequestId" type:"string"` + + // Specifies whether enhanced networking is enabled. + SriovNetSupport *string `locationName:"sriovNetSupport" type:"string"` + + // The current state of the instance. + State *InstanceState `locationName:"instanceState" type:"structure"` + + // The reason for the most recent state transition. + StateReason *StateReason `locationName:"stateReason" type:"structure"` + + // The reason for the most recent state transition. This might be an empty string. + StateTransitionReason *string `locationName:"reason" type:"string"` + + // [EC2-VPC] The ID of the subnet in which the instance is running. + SubnetId *string `locationName:"subnetId" type:"string"` + + // Any tags assigned to the instance. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The virtualization type of the instance. + VirtualizationType *string `locationName:"virtualizationType" type:"string" enum:"VirtualizationType"` + + // [EC2-VPC] The ID of the VPC in which the instance is running. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s Instance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Instance) GoString() string { + return s.String() +} + +// Describes a block device mapping. +type InstanceBlockDeviceMapping struct { + _ struct{} `type:"structure"` + + // The device name exposed to the instance (for example, /dev/sdh or xvdh). + DeviceName *string `locationName:"deviceName" type:"string"` + + // Parameters used to automatically set up EBS volumes when the instance is + // launched. + Ebs *EbsInstanceBlockDevice `locationName:"ebs" type:"structure"` +} + +// String returns the string representation +func (s InstanceBlockDeviceMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceBlockDeviceMapping) GoString() string { + return s.String() +} + +// Describes a block device mapping entry. +type InstanceBlockDeviceMappingSpecification struct { + _ struct{} `type:"structure"` + + // The device name exposed to the instance (for example, /dev/sdh or xvdh). + DeviceName *string `locationName:"deviceName" type:"string"` + + // Parameters used to automatically set up EBS volumes when the instance is + // launched. + Ebs *EbsInstanceBlockDeviceSpecification `locationName:"ebs" type:"structure"` + + // suppress the specified device included in the block device mapping. + NoDevice *string `locationName:"noDevice" type:"string"` + + // The virtual device name. + VirtualName *string `locationName:"virtualName" type:"string"` +} + +// String returns the string representation +func (s InstanceBlockDeviceMappingSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceBlockDeviceMappingSpecification) GoString() string { + return s.String() +} + +// Information about the instance type that the Dedicated host supports. +type InstanceCapacity struct { + _ struct{} `type:"structure"` + + // The number of instances that can still be launched onto the Dedicated host. + AvailableCapacity *int64 `locationName:"availableCapacity" type:"integer"` + + // The instance type size supported by the Dedicated host. + InstanceType *string `locationName:"instanceType" type:"string"` + + // The total number of instances that can be launched onto the Dedicated host. + TotalCapacity *int64 `locationName:"totalCapacity" type:"integer"` +} + +// String returns the string representation +func (s InstanceCapacity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceCapacity) GoString() string { + return s.String() +} + +// Describes a Reserved Instance listing state. +type InstanceCount struct { + _ struct{} `type:"structure"` + + // The number of listed Reserved Instances in the state specified by the state. + InstanceCount *int64 `locationName:"instanceCount" type:"integer"` + + // The states of the listed Reserved Instances. + State *string `locationName:"state" type:"string" enum:"ListingState"` +} + +// String returns the string representation +func (s InstanceCount) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceCount) GoString() string { + return s.String() +} + +// Describes an instance to export. +type InstanceExportDetails struct { + _ struct{} `type:"structure"` + + // The ID of the resource being exported. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The target virtualization environment. + TargetEnvironment *string `locationName:"targetEnvironment" type:"string" enum:"ExportEnvironment"` +} + +// String returns the string representation +func (s InstanceExportDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceExportDetails) GoString() string { + return s.String() +} + +// Describes the monitoring information of the instance. +type InstanceMonitoring struct { + _ struct{} `type:"structure"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The monitoring information. + Monitoring *Monitoring `locationName:"monitoring" type:"structure"` +} + +// String returns the string representation +func (s InstanceMonitoring) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceMonitoring) GoString() string { + return s.String() +} + +// Describes a network interface. +type InstanceNetworkInterface struct { + _ struct{} `type:"structure"` + + // The association information for an Elastic IP associated with the network + // interface. + Association *InstanceNetworkInterfaceAssociation `locationName:"association" type:"structure"` + + // The network interface attachment. + Attachment *InstanceNetworkInterfaceAttachment `locationName:"attachment" type:"structure"` + + // The description. + Description *string `locationName:"description" type:"string"` + + // One or more security groups. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The MAC address. + MacAddress *string `locationName:"macAddress" type:"string"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The ID of the AWS account that created the network interface. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The private DNS name. + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + + // The IP address of the network interface within the subnet. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The private IP addresses associated with the network interface. + PrivateIpAddresses []*InstancePrivateIpAddress `locationName:"privateIpAddressesSet" locationNameList:"item" type:"list"` + + // Indicates whether to validate network traffic to or from this network interface. + SourceDestCheck *bool `locationName:"sourceDestCheck" type:"boolean"` + + // The status of the network interface. + Status *string `locationName:"status" type:"string" enum:"NetworkInterfaceStatus"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s InstanceNetworkInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceNetworkInterface) GoString() string { + return s.String() +} + +// Describes association information for an Elastic IP address. +type InstanceNetworkInterfaceAssociation struct { + _ struct{} `type:"structure"` + + // The ID of the owner of the Elastic IP address. + IpOwnerId *string `locationName:"ipOwnerId" type:"string"` + + // The public DNS name. + PublicDnsName *string `locationName:"publicDnsName" type:"string"` + + // The public IP address or Elastic IP address bound to the network interface. + PublicIp *string `locationName:"publicIp" type:"string"` +} + +// String returns the string representation +func (s InstanceNetworkInterfaceAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceNetworkInterfaceAssociation) GoString() string { + return s.String() +} + +// Describes a network interface attachment. +type InstanceNetworkInterfaceAttachment struct { + _ struct{} `type:"structure"` + + // The time stamp when the attachment initiated. + AttachTime *time.Time `locationName:"attachTime" type:"timestamp" timestampFormat:"iso8601"` + + // The ID of the network interface attachment. + AttachmentId *string `locationName:"attachmentId" type:"string"` + + // Indicates whether the network interface is deleted when the instance is terminated. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The index of the device on the instance for the network interface attachment. + DeviceIndex *int64 `locationName:"deviceIndex" type:"integer"` + + // The attachment state. + Status *string `locationName:"status" type:"string" enum:"AttachmentStatus"` +} + +// String returns the string representation +func (s InstanceNetworkInterfaceAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceNetworkInterfaceAttachment) GoString() string { + return s.String() +} + +// Describes a network interface. +type InstanceNetworkInterfaceSpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether to assign a public IP address to an instance you launch + // in a VPC. The public IP address can only be assigned to a network interface + // for eth0, and can only be assigned to a new network interface, not an existing + // one. You cannot specify more than one network interface in the request. If + // launching into a default subnet, the default value is true. + AssociatePublicIpAddress *bool `locationName:"associatePublicIpAddress" type:"boolean"` + + // If set to true, the interface is deleted when the instance is terminated. + // You can specify true only if creating a new network interface when launching + // an instance. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The description of the network interface. Applies only if creating a network + // interface when launching an instance. + Description *string `locationName:"description" type:"string"` + + // The index of the device on the instance for the network interface attachment. + // If you are specifying a network interface in a RunInstances request, you + // must provide the device index. + DeviceIndex *int64 `locationName:"deviceIndex" type:"integer"` + + // The IDs of the security groups for the network interface. Applies only if + // creating a network interface when launching an instance. + Groups []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The private IP address of the network interface. Applies only if creating + // a network interface when launching an instance. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // One or more private IP addresses to assign to the network interface. Only + // one private IP address can be designated as primary. + PrivateIpAddresses []*PrivateIpAddressSpecification `locationName:"privateIpAddressesSet" queryName:"PrivateIpAddresses" locationNameList:"item" type:"list"` + + // The number of secondary private IP addresses. You can't specify this option + // and specify more than one private IP address using the private IP addresses + // option. + SecondaryPrivateIpAddressCount *int64 `locationName:"secondaryPrivateIpAddressCount" type:"integer"` + + // The ID of the subnet associated with the network string. Applies only if + // creating a network interface when launching an instance. + SubnetId *string `locationName:"subnetId" type:"string"` +} + +// String returns the string representation +func (s InstanceNetworkInterfaceSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceNetworkInterfaceSpecification) GoString() string { + return s.String() +} + +// Describes a private IP address. +type InstancePrivateIpAddress struct { + _ struct{} `type:"structure"` + + // The association information for an Elastic IP address for the network interface. + Association *InstanceNetworkInterfaceAssociation `locationName:"association" type:"structure"` + + // Indicates whether this IP address is the primary private IP address of the + // network interface. + Primary *bool `locationName:"primary" type:"boolean"` + + // The private DNS name. + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + + // The private IP address of the network interface. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` +} + +// String returns the string representation +func (s InstancePrivateIpAddress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstancePrivateIpAddress) GoString() string { + return s.String() +} + +// Describes the current state of the instance. +type InstanceState struct { + _ struct{} `type:"structure"` + + // The low byte represents the state. The high byte is an opaque internal value + // and should be ignored. + // + // 0 : pending + // + // 16 : running + // + // 32 : shutting-down + // + // 48 : terminated + // + // 64 : stopping + // + // 80 : stopped + Code *int64 `locationName:"code" type:"integer"` + + // The current state of the instance. + Name *string `locationName:"name" type:"string" enum:"InstanceStateName"` +} + +// String returns the string representation +func (s InstanceState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceState) GoString() string { + return s.String() +} + +// Describes an instance state change. +type InstanceStateChange struct { + _ struct{} `type:"structure"` + + // The current state of the instance. + CurrentState *InstanceState `locationName:"currentState" type:"structure"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The previous state of the instance. + PreviousState *InstanceState `locationName:"previousState" type:"structure"` +} + +// String returns the string representation +func (s InstanceStateChange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStateChange) GoString() string { + return s.String() +} + +// Describes the status of an instance. +type InstanceStatus struct { + _ struct{} `type:"structure"` + + // The Availability Zone of the instance. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // Any scheduled events associated with the instance. + Events []*InstanceStatusEvent `locationName:"eventsSet" locationNameList:"item" type:"list"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The intended state of the instance. DescribeInstanceStatus requires that + // an instance be in the running state. + InstanceState *InstanceState `locationName:"instanceState" type:"structure"` + + // Reports impaired functionality that stems from issues internal to the instance, + // such as impaired reachability. + InstanceStatus *InstanceStatusSummary `locationName:"instanceStatus" type:"structure"` + + // Reports impaired functionality that stems from issues related to the systems + // that support an instance, such as hardware failures and network connectivity + // problems. + SystemStatus *InstanceStatusSummary `locationName:"systemStatus" type:"structure"` +} + +// String returns the string representation +func (s InstanceStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStatus) GoString() string { + return s.String() +} + +// Describes the instance status. +type InstanceStatusDetails struct { + _ struct{} `type:"structure"` + + // The time when a status check failed. For an instance that was launched and + // impaired, this is the time when the instance was launched. + ImpairedSince *time.Time `locationName:"impairedSince" type:"timestamp" timestampFormat:"iso8601"` + + // The type of instance status. + Name *string `locationName:"name" type:"string" enum:"StatusName"` + + // The status. + Status *string `locationName:"status" type:"string" enum:"StatusType"` +} + +// String returns the string representation +func (s InstanceStatusDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStatusDetails) GoString() string { + return s.String() +} + +// Describes a scheduled event for an instance. +type InstanceStatusEvent struct { + _ struct{} `type:"structure"` + + // The event code. + Code *string `locationName:"code" type:"string" enum:"EventCode"` + + // A description of the event. + // + // After a scheduled event is completed, it can still be described for up to + // a week. If the event has been completed, this description starts with the + // following text: [Completed]. + Description *string `locationName:"description" type:"string"` + + // The latest scheduled end time for the event. + NotAfter *time.Time `locationName:"notAfter" type:"timestamp" timestampFormat:"iso8601"` + + // The earliest scheduled start time for the event. + NotBefore *time.Time `locationName:"notBefore" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s InstanceStatusEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStatusEvent) GoString() string { + return s.String() +} + +// Describes the status of an instance. +type InstanceStatusSummary struct { + _ struct{} `type:"structure"` + + // The system instance health or application instance health. + Details []*InstanceStatusDetails `locationName:"details" locationNameList:"item" type:"list"` + + // The status. + Status *string `locationName:"status" type:"string" enum:"SummaryStatus"` +} + +// String returns the string representation +func (s InstanceStatusSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStatusSummary) GoString() string { + return s.String() +} + +// Describes an Internet gateway. +type InternetGateway struct { + _ struct{} `type:"structure"` + + // Any VPCs attached to the Internet gateway. + Attachments []*InternetGatewayAttachment `locationName:"attachmentSet" locationNameList:"item" type:"list"` + + // The ID of the Internet gateway. + InternetGatewayId *string `locationName:"internetGatewayId" type:"string"` + + // Any tags assigned to the Internet gateway. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s InternetGateway) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InternetGateway) GoString() string { + return s.String() +} + +// Describes the attachment of a VPC to an Internet gateway. +type InternetGatewayAttachment struct { + _ struct{} `type:"structure"` + + // The current state of the attachment. + State *string `locationName:"state" type:"string" enum:"AttachmentStatus"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s InternetGatewayAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InternetGatewayAttachment) GoString() string { + return s.String() +} + +// Describes a security group rule. +type IpPermission struct { + _ struct{} `type:"structure"` + + // The start of port range for the TCP and UDP protocols, or an ICMP type number. + // A value of -1 indicates all ICMP types. + FromPort *int64 `locationName:"fromPort" type:"integer"` + + // The IP protocol name (for tcp, udp, and icmp) or number (see Protocol Numbers + // (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)). + // + // [EC2-VPC only] When you authorize or revoke security group rules, you can + // use -1 to specify all. + IpProtocol *string `locationName:"ipProtocol" type:"string"` + + // One or more IP ranges. + IpRanges []*IpRange `locationName:"ipRanges" locationNameList:"item" type:"list"` + + // (Valid for AuthorizeSecurityGroupEgress, RevokeSecurityGroupEgress and DescribeSecurityGroups + // only) One or more prefix list IDs for an AWS service. In an AuthorizeSecurityGroupEgress + // request, this is the AWS service that you want to access through a VPC endpoint + // from instances associated with the security group. + PrefixListIds []*PrefixListId `locationName:"prefixListIds" locationNameList:"item" type:"list"` + + // The end of port range for the TCP and UDP protocols, or an ICMP code. A value + // of -1 indicates all ICMP codes for the specified ICMP type. + ToPort *int64 `locationName:"toPort" type:"integer"` + + // One or more security group and AWS account ID pairs. + UserIdGroupPairs []*UserIdGroupPair `locationName:"groups" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s IpPermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IpPermission) GoString() string { + return s.String() +} + +// Describes an IP range. +type IpRange struct { + _ struct{} `type:"structure"` + + // The CIDR range. You can either specify a CIDR range or a source security + // group, not both. + CidrIp *string `locationName:"cidrIp" type:"string"` +} + +// String returns the string representation +func (s IpRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IpRange) GoString() string { + return s.String() +} + +// Describes a key pair. +type KeyPairInfo struct { + _ struct{} `type:"structure"` + + // If you used CreateKeyPair to create the key pair, this is the SHA-1 digest + // of the DER encoded private key. If you used ImportKeyPair to provide AWS + // the public key, this is the MD5 public key fingerprint as specified in section + // 4 of RFC4716. + KeyFingerprint *string `locationName:"keyFingerprint" type:"string"` + + // The name of the key pair. + KeyName *string `locationName:"keyName" type:"string"` +} + +// String returns the string representation +func (s KeyPairInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyPairInfo) GoString() string { + return s.String() +} + +// Describes a launch permission. +type LaunchPermission struct { + _ struct{} `type:"structure"` + + // The name of the group. + Group *string `locationName:"group" type:"string" enum:"PermissionGroup"` + + // The AWS account ID. + UserId *string `locationName:"userId" type:"string"` +} + +// String returns the string representation +func (s LaunchPermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchPermission) GoString() string { + return s.String() +} + +// Describes a launch permission modification. +type LaunchPermissionModifications struct { + _ struct{} `type:"structure"` + + // The AWS account ID to add to the list of launch permissions for the AMI. + Add []*LaunchPermission `locationNameList:"item" type:"list"` + + // The AWS account ID to remove from the list of launch permissions for the + // AMI. + Remove []*LaunchPermission `locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s LaunchPermissionModifications) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchPermissionModifications) GoString() string { + return s.String() +} + +// Describes the launch specification for an instance. +type LaunchSpecification struct { + _ struct{} `type:"structure"` + + // Deprecated. + AddressingType *string `locationName:"addressingType" type:"string"` + + // One or more block device mapping entries. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // Indicates whether the instance is optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS + // Optimized instance. + // + // Default: false + EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + + // The IAM instance profile. + IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"` + + // The ID of the AMI. + ImageId *string `locationName:"imageId" type:"string"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The ID of the kernel. + KernelId *string `locationName:"kernelId" type:"string"` + + // The name of the key pair. + KeyName *string `locationName:"keyName" type:"string"` + + // Describes the monitoring for the instance. + Monitoring *RunInstancesMonitoringEnabled `locationName:"monitoring" type:"structure"` + + // One or more network interfaces. + NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"` + + // The placement information for the instance. + Placement *SpotPlacement `locationName:"placement" type:"structure"` + + // The ID of the RAM disk. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + // One or more security groups. When requesting instances in a VPC, you must + // specify the IDs of the security groups. When requesting instances in EC2-Classic, + // you can specify the names or the IDs of the security groups. + SecurityGroups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The ID of the subnet in which to launch the instance. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The Base64-encoded MIME user data to make available to the instances. + UserData *string `locationName:"userData" type:"string"` +} + +// String returns the string representation +func (s LaunchSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchSpecification) GoString() string { + return s.String() +} + +type ModifyHostsInput struct { + _ struct{} `type:"structure"` + + // Specify whether to enable or disable auto-placement. + AutoPlacement *string `locationName:"autoPlacement" type:"string" required:"true" enum:"AutoPlacement"` + + // The host IDs of the Dedicated hosts you want to modify. + HostIds []*string `locationName:"hostId" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s ModifyHostsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyHostsInput) GoString() string { + return s.String() +} + +type ModifyHostsOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the Dedicated hosts that were successfully modified. + Successful []*string `locationName:"successful" locationNameList:"item" type:"list"` + + // The IDs of the Dedicated hosts that could not be modified. Check whether + // the setting you requested can be used. + Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ModifyHostsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyHostsOutput) GoString() string { + return s.String() +} + +type ModifyIdFormatInput struct { + _ struct{} `type:"structure"` + + // The type of resource. + Resource *string `type:"string" required:"true"` + + // Indicate whether the resource should use longer IDs (17-character IDs). + UseLongIds *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s ModifyIdFormatInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyIdFormatInput) GoString() string { + return s.String() +} + +type ModifyIdFormatOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyIdFormatOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyIdFormatOutput) GoString() string { + return s.String() +} + +type ModifyImageAttributeInput struct { + _ struct{} `type:"structure"` + + // The name of the attribute to modify. + Attribute *string `type:"string"` + + // A description for the AMI. + Description *AttributeValue `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the AMI. + ImageId *string `type:"string" required:"true"` + + // A launch permission modification. + LaunchPermission *LaunchPermissionModifications `type:"structure"` + + // The operation type. + OperationType *string `type:"string" enum:"OperationType"` + + // One or more product codes. After you add a product code to an AMI, it can't + // be removed. This is only valid when modifying the productCodes attribute. + ProductCodes []*string `locationName:"ProductCode" locationNameList:"ProductCode" type:"list"` + + // One or more user groups. This is only valid when modifying the launchPermission + // attribute. + UserGroups []*string `locationName:"UserGroup" locationNameList:"UserGroup" type:"list"` + + // One or more AWS account IDs. This is only valid when modifying the launchPermission + // attribute. + UserIds []*string `locationName:"UserId" locationNameList:"UserId" type:"list"` + + // The value of the attribute being modified. This is only valid when modifying + // the description attribute. + Value *string `type:"string"` +} + +// String returns the string representation +func (s ModifyImageAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyImageAttributeInput) GoString() string { + return s.String() +} + +type ModifyImageAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyImageAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyImageAttributeOutput) GoString() string { + return s.String() +} + +type ModifyInstanceAttributeInput struct { + _ struct{} `type:"structure"` + + // The name of the attribute. + Attribute *string `locationName:"attribute" type:"string" enum:"InstanceAttributeName"` + + // Modifies the DeleteOnTermination attribute for volumes that are currently + // attached. The volume must be owned by the caller. If no value is specified + // for DeleteOnTermination, the default is true and the volume is deleted when + // the instance is terminated. + // + // To add instance store volumes to an Amazon EBS-backed instance, you must + // add them when you launch the instance. For more information, see Updating + // the Block Device Mapping when Launching an Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html#Using_OverridingAMIBDM) + // in the Amazon Elastic Compute Cloud User Guide. + BlockDeviceMappings []*InstanceBlockDeviceMappingSpecification `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // If the value is true, you can't terminate the instance using the Amazon EC2 + // console, CLI, or API; otherwise, you can. You cannot use this paramater for + // Spot Instances. + DisableApiTermination *AttributeBooleanValue `locationName:"disableApiTermination" type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Specifies whether the instance is optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS + // Optimized instance. + EbsOptimized *AttributeBooleanValue `locationName:"ebsOptimized" type:"structure"` + + // [EC2-VPC] Changes the security groups of the instance. You must specify at + // least one security group, even if it's just the default security group for + // the VPC. You must specify the security group ID, not the security group name. + Groups []*string `locationName:"GroupId" locationNameList:"groupId" type:"list"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // Specifies whether an instance stops or terminates when you initiate shutdown + // from the instance (using the operating system command for system shutdown). + InstanceInitiatedShutdownBehavior *AttributeValue `locationName:"instanceInitiatedShutdownBehavior" type:"structure"` + + // Changes the instance type to the specified value. For more information, see + // Instance Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + // If the instance type is not valid, the error returned is InvalidInstanceAttributeValue. + InstanceType *AttributeValue `locationName:"instanceType" type:"structure"` + + // Changes the instance's kernel to the specified value. We recommend that you + // use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html). + Kernel *AttributeValue `locationName:"kernel" type:"structure"` + + // Changes the instance's RAM disk to the specified value. We recommend that + // you use PV-GRUB instead of kernels and RAM disks. For more information, see + // PV-GRUB (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html). + Ramdisk *AttributeValue `locationName:"ramdisk" type:"structure"` + + // Specifies whether source/destination checking is enabled. A value of true + // means that checking is enabled, and false means checking is disabled. This + // value must be false for a NAT instance to perform NAT. + SourceDestCheck *AttributeBooleanValue `type:"structure"` + + // Set to simple to enable enhanced networking for the instance. + // + // There is no way to disable enhanced networking at this time. + // + // This option is supported only for HVM instances. Specifying this option + // with a PV instance can make it unreachable. + SriovNetSupport *AttributeValue `locationName:"sriovNetSupport" type:"structure"` + + // Changes the instance's user data to the specified value. + UserData *BlobAttributeValue `locationName:"userData" type:"structure"` + + // A new value for the attribute. Use only with the kernel, ramdisk, userData, + // disableApiTermination, or instanceInitiatedShutdownBehavior attribute. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s ModifyInstanceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstanceAttributeInput) GoString() string { + return s.String() +} + +type ModifyInstanceAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyInstanceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstanceAttributeOutput) GoString() string { + return s.String() +} + +type ModifyInstancePlacementInput struct { + _ struct{} `type:"structure"` + + // The new affinity setting for the instance. + Affinity *string `locationName:"affinity" type:"string" enum:"Affinity"` + + // The ID of the Dedicated host that the instance will have affinity with. + HostId *string `locationName:"hostId" type:"string"` + + // The ID of the instance that you are modifying. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // The tenancy of the instance that you are modifying. + Tenancy *string `locationName:"tenancy" type:"string" enum:"HostTenancy"` +} + +// String returns the string representation +func (s ModifyInstancePlacementInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstancePlacementInput) GoString() string { + return s.String() +} + +type ModifyInstancePlacementOutput struct { + _ struct{} `type:"structure"` + + // Is true if the request succeeds, and an error otherwise. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ModifyInstancePlacementOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstancePlacementOutput) GoString() string { + return s.String() +} + +type ModifyNetworkInterfaceAttributeInput struct { + _ struct{} `type:"structure"` + + // Information about the interface attachment. If modifying the 'delete on termination' + // attribute, you must specify the ID of the interface attachment. + Attachment *NetworkInterfaceAttachmentChanges `locationName:"attachment" type:"structure"` + + // A description for the network interface. + Description *AttributeValue `locationName:"description" type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Changes the security groups for the network interface. The new set of groups + // you specify replaces the current set. You must specify at least one group, + // even if it's just the default security group in the VPC. You must specify + // the ID of the security group, not the name. + Groups []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` + + // Indicates whether source/destination checking is enabled. A value of true + // means checking is enabled, and false means checking is disabled. This value + // must be false for a NAT instance to perform NAT. For more information, see + // NAT Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html) + // in the Amazon Virtual Private Cloud User Guide. + SourceDestCheck *AttributeBooleanValue `locationName:"sourceDestCheck" type:"structure"` +} + +// String returns the string representation +func (s ModifyNetworkInterfaceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyNetworkInterfaceAttributeInput) GoString() string { + return s.String() +} + +type ModifyNetworkInterfaceAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyNetworkInterfaceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyNetworkInterfaceAttributeOutput) GoString() string { + return s.String() +} + +type ModifyReservedInstancesInput struct { + _ struct{} `type:"structure"` + + // A unique, case-sensitive token you provide to ensure idempotency of your + // modification request. For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // The IDs of the Reserved Instances to modify. + ReservedInstancesIds []*string `locationName:"ReservedInstancesId" locationNameList:"ReservedInstancesId" type:"list" required:"true"` + + // The configuration settings for the Reserved Instances to modify. + TargetConfigurations []*ReservedInstancesConfiguration `locationName:"ReservedInstancesConfigurationSetItemType" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s ModifyReservedInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyReservedInstancesInput) GoString() string { + return s.String() +} + +type ModifyReservedInstancesOutput struct { + _ struct{} `type:"structure"` + + // The ID for the modification. + ReservedInstancesModificationId *string `locationName:"reservedInstancesModificationId" type:"string"` +} + +// String returns the string representation +func (s ModifyReservedInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyReservedInstancesOutput) GoString() string { + return s.String() +} + +type ModifySnapshotAttributeInput struct { + _ struct{} `type:"structure"` + + // The snapshot attribute to modify. + // + // Only volume creation permissions may be modified at the customer level. + Attribute *string `type:"string" enum:"SnapshotAttributeName"` + + // A JSON representation of the snapshot attribute modification. + CreateVolumePermission *CreateVolumePermissionModifications `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The group to modify for the snapshot. + GroupNames []*string `locationName:"UserGroup" locationNameList:"GroupName" type:"list"` + + // The type of operation to perform to the attribute. + OperationType *string `type:"string" enum:"OperationType"` + + // The ID of the snapshot. + SnapshotId *string `type:"string" required:"true"` + + // The account ID to modify for the snapshot. + UserIds []*string `locationName:"UserId" locationNameList:"UserId" type:"list"` +} + +// String returns the string representation +func (s ModifySnapshotAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySnapshotAttributeInput) GoString() string { + return s.String() +} + +type ModifySnapshotAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifySnapshotAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySnapshotAttributeOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ModifySpotFleetRequest. +type ModifySpotFleetRequestInput struct { + _ struct{} `type:"structure"` + + // Indicates whether running Spot instances should be terminated if the target + // capacity of the Spot fleet request is decreased below the current size of + // the Spot fleet. + ExcessCapacityTerminationPolicy *string `locationName:"excessCapacityTerminationPolicy" type:"string" enum:"ExcessCapacityTerminationPolicy"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` + + // The size of the fleet. + TargetCapacity *int64 `locationName:"targetCapacity" type:"integer"` +} + +// String returns the string representation +func (s ModifySpotFleetRequestInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySpotFleetRequestInput) GoString() string { + return s.String() +} + +// Contains the output of ModifySpotFleetRequest. +type ModifySpotFleetRequestOutput struct { + _ struct{} `type:"structure"` + + // Is true if the request succeeds, and an error otherwise. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ModifySpotFleetRequestOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySpotFleetRequestOutput) GoString() string { + return s.String() +} + +type ModifySubnetAttributeInput struct { + _ struct{} `type:"structure"` + + // Specify true to indicate that instances launched into the specified subnet + // should be assigned public IP address. + MapPublicIpOnLaunch *AttributeBooleanValue `type:"structure"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifySubnetAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySubnetAttributeInput) GoString() string { + return s.String() +} + +type ModifySubnetAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifySubnetAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySubnetAttributeOutput) GoString() string { + return s.String() +} + +type ModifyVolumeAttributeInput struct { + _ struct{} `type:"structure"` + + // Indicates whether the volume should be auto-enabled for I/O operations. + AutoEnableIO *AttributeBooleanValue `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the volume. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVolumeAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVolumeAttributeInput) GoString() string { + return s.String() +} + +type ModifyVolumeAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyVolumeAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVolumeAttributeOutput) GoString() string { + return s.String() +} + +type ModifyVpcAttributeInput struct { + _ struct{} `type:"structure"` + + // Indicates whether the instances launched in the VPC get DNS hostnames. If + // enabled, instances in the VPC get DNS hostnames; otherwise, they do not. + // + // You cannot modify the DNS resolution and DNS hostnames attributes in the + // same request. Use separate requests for each attribute. You can only enable + // DNS hostnames if you've enabled DNS support. + EnableDnsHostnames *AttributeBooleanValue `type:"structure"` + + // Indicates whether the DNS resolution is supported for the VPC. If enabled, + // queries to the Amazon provided DNS server at the 169.254.169.253 IP address, + // or the reserved IP address at the base of the VPC network range "plus two" + // will succeed. If disabled, the Amazon provided DNS service in the VPC that + // resolves public DNS hostnames to IP addresses is not enabled. + // + // You cannot modify the DNS resolution and DNS hostnames attributes in the + // same request. Use separate requests for each attribute. + EnableDnsSupport *AttributeBooleanValue `type:"structure"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVpcAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcAttributeInput) GoString() string { + return s.String() +} + +type ModifyVpcAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyVpcAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcAttributeOutput) GoString() string { + return s.String() +} + +type ModifyVpcEndpointInput struct { + _ struct{} `type:"structure"` + + // One or more route tables IDs to associate with the endpoint. + AddRouteTableIds []*string `locationName:"AddRouteTableId" locationNameList:"item" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // A policy document to attach to the endpoint. The policy must be in valid + // JSON format. + PolicyDocument *string `type:"string"` + + // One or more route table IDs to disassociate from the endpoint. + RemoveRouteTableIds []*string `locationName:"RemoveRouteTableId" locationNameList:"item" type:"list"` + + // Specify true to reset the policy document to the default policy. The default + // policy allows access to the service. + ResetPolicy *bool `type:"boolean"` + + // The ID of the endpoint. + VpcEndpointId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVpcEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcEndpointInput) GoString() string { + return s.String() +} + +type ModifyVpcEndpointOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ModifyVpcEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcEndpointOutput) GoString() string { + return s.String() +} + +type MonitorInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` +} + +// String returns the string representation +func (s MonitorInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MonitorInstancesInput) GoString() string { + return s.String() +} + +type MonitorInstancesOutput struct { + _ struct{} `type:"structure"` + + // Monitoring information for one or more instances. + InstanceMonitorings []*InstanceMonitoring `locationName:"instancesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s MonitorInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MonitorInstancesOutput) GoString() string { + return s.String() +} + +// Describes the monitoring for the instance. +type Monitoring struct { + _ struct{} `type:"structure"` + + // Indicates whether monitoring is enabled for the instance. + State *string `locationName:"state" type:"string" enum:"MonitoringState"` +} + +// String returns the string representation +func (s Monitoring) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Monitoring) GoString() string { + return s.String() +} + +type MoveAddressToVpcInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The Elastic IP address. + PublicIp *string `locationName:"publicIp" type:"string" required:"true"` +} + +// String returns the string representation +func (s MoveAddressToVpcInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MoveAddressToVpcInput) GoString() string { + return s.String() +} + +type MoveAddressToVpcOutput struct { + _ struct{} `type:"structure"` + + // The allocation ID for the Elastic IP address. + AllocationId *string `locationName:"allocationId" type:"string"` + + // The status of the move of the IP address. + Status *string `locationName:"status" type:"string" enum:"Status"` +} + +// String returns the string representation +func (s MoveAddressToVpcOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MoveAddressToVpcOutput) GoString() string { + return s.String() +} + +// Describes the status of a moving Elastic IP address. +type MovingAddressStatus struct { + _ struct{} `type:"structure"` + + // The status of the Elastic IP address that's being moved to the EC2-VPC platform, + // or restored to the EC2-Classic platform. + MoveStatus *string `locationName:"moveStatus" type:"string" enum:"MoveStatus"` + + // The Elastic IP address. + PublicIp *string `locationName:"publicIp" type:"string"` +} + +// String returns the string representation +func (s MovingAddressStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MovingAddressStatus) GoString() string { + return s.String() +} + +// Describes a NAT gateway. +type NatGateway struct { + _ struct{} `type:"structure"` + + // The date and time the NAT gateway was created. + CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601"` + + // The date and time the NAT gateway was deleted, if applicable. + DeleteTime *time.Time `locationName:"deleteTime" type:"timestamp" timestampFormat:"iso8601"` + + // If the NAT gateway could not be created, specifies the error code for the + // failure. (InsufficientFreeAddressesInSubnet | Gateway.NotAttached | InvalidAllocationID.NotFound + // | Resource.AlreadyAssociated | InternalError) + FailureCode *string `locationName:"failureCode" type:"string"` + + // If the NAT gateway could not be created, specifies the error message for + // the failure, that corresponds to the error code. + // + // For InsufficientFreeAddressesInSubnet: Subnet has insufficient free addresses + // to create this NAT gateway For Gateway.NotAttached: Network vpc-xxxxxxxx + // has no Internet gateway attached For InvalidAllocationID.NotFound: Elastic + // IP address eipalloc-xxxxxxxx could not be associated with this NAT gateway + // For Resource.AlreadyAssociated: Elastic IP address eipalloc-xxxxxxxx is already + // associated For InternalError: Network interface eni-xxxxxxxx, created and + // used internally by this NAT gateway is in an invalid state. Please try again. + FailureMessage *string `locationName:"failureMessage" type:"string"` + + // Information about the IP addresses and network interface associated with + // the NAT gateway. + NatGatewayAddresses []*NatGatewayAddress `locationName:"natGatewayAddressSet" locationNameList:"item" type:"list"` + + // The ID of the NAT gateway. + NatGatewayId *string `locationName:"natGatewayId" type:"string"` + + // The state of the NAT gateway. + State *string `locationName:"state" type:"string" enum:"NatGatewayState"` + + // The ID of the subnet in which the NAT gateway is located. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The ID of the VPC in which the NAT gateway is located. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s NatGateway) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NatGateway) GoString() string { + return s.String() +} + +// Describes the IP addresses and network interface associated with a NAT gateway. +type NatGatewayAddress struct { + _ struct{} `type:"structure"` + + // The allocation ID of the Elastic IP address that's associated with the NAT + // gateway. + AllocationId *string `locationName:"allocationId" type:"string"` + + // The ID of the network interface associated with the NAT gateway. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The private IP address associated with the Elastic IP address. + PrivateIp *string `locationName:"privateIp" type:"string"` + + // The Elastic IP address associated with the NAT gateway. + PublicIp *string `locationName:"publicIp" type:"string"` +} + +// String returns the string representation +func (s NatGatewayAddress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NatGatewayAddress) GoString() string { + return s.String() +} + +// Describes a network ACL. +type NetworkAcl struct { + _ struct{} `type:"structure"` + + // Any associations between the network ACL and one or more subnets + Associations []*NetworkAclAssociation `locationName:"associationSet" locationNameList:"item" type:"list"` + + // One or more entries (rules) in the network ACL. + Entries []*NetworkAclEntry `locationName:"entrySet" locationNameList:"item" type:"list"` + + // Indicates whether this is the default network ACL for the VPC. + IsDefault *bool `locationName:"default" type:"boolean"` + + // The ID of the network ACL. + NetworkAclId *string `locationName:"networkAclId" type:"string"` + + // Any tags assigned to the network ACL. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC for the network ACL. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s NetworkAcl) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkAcl) GoString() string { + return s.String() +} + +// Describes an association between a network ACL and a subnet. +type NetworkAclAssociation struct { + _ struct{} `type:"structure"` + + // The ID of the association between a network ACL and a subnet. + NetworkAclAssociationId *string `locationName:"networkAclAssociationId" type:"string"` + + // The ID of the network ACL. + NetworkAclId *string `locationName:"networkAclId" type:"string"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string"` +} + +// String returns the string representation +func (s NetworkAclAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkAclAssociation) GoString() string { + return s.String() +} + +// Describes an entry in a network ACL. +type NetworkAclEntry struct { + _ struct{} `type:"structure"` + + // The network range to allow or deny, in CIDR notation. + CidrBlock *string `locationName:"cidrBlock" type:"string"` + + // Indicates whether the rule is an egress rule (applied to traffic leaving + // the subnet). + Egress *bool `locationName:"egress" type:"boolean"` + + // ICMP protocol: The ICMP type and code. + IcmpTypeCode *IcmpTypeCode `locationName:"icmpTypeCode" type:"structure"` + + // TCP or UDP protocols: The range of ports the rule applies to. + PortRange *PortRange `locationName:"portRange" type:"structure"` + + // The protocol. A value of -1 means all protocols. + Protocol *string `locationName:"protocol" type:"string"` + + // Indicates whether to allow or deny the traffic that matches the rule. + RuleAction *string `locationName:"ruleAction" type:"string" enum:"RuleAction"` + + // The rule number for the entry. ACL entries are processed in ascending order + // by rule number. + RuleNumber *int64 `locationName:"ruleNumber" type:"integer"` +} + +// String returns the string representation +func (s NetworkAclEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkAclEntry) GoString() string { + return s.String() +} + +// Describes a network interface. +type NetworkInterface struct { + _ struct{} `type:"structure"` + + // The association information for an Elastic IP associated with the network + // interface. + Association *NetworkInterfaceAssociation `locationName:"association" type:"structure"` + + // The network interface attachment. + Attachment *NetworkInterfaceAttachment `locationName:"attachment" type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // A description. + Description *string `locationName:"description" type:"string"` + + // Any security groups for the network interface. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The type of interface. + InterfaceType *string `locationName:"interfaceType" type:"string" enum:"NetworkInterfaceType"` + + // The MAC address. + MacAddress *string `locationName:"macAddress" type:"string"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The AWS account ID of the owner of the network interface. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The private DNS name. + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + + // The IP address of the network interface within the subnet. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The private IP addresses associated with the network interface. + PrivateIpAddresses []*NetworkInterfacePrivateIpAddress `locationName:"privateIpAddressesSet" locationNameList:"item" type:"list"` + + // The ID of the entity that launched the instance on your behalf (for example, + // AWS Management Console or Auto Scaling). + RequesterId *string `locationName:"requesterId" type:"string"` + + // Indicates whether the network interface is being managed by AWS. + RequesterManaged *bool `locationName:"requesterManaged" type:"boolean"` + + // Indicates whether traffic to or from the instance is validated. + SourceDestCheck *bool `locationName:"sourceDestCheck" type:"boolean"` + + // The status of the network interface. + Status *string `locationName:"status" type:"string" enum:"NetworkInterfaceStatus"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string"` + + // Any tags assigned to the network interface. + TagSet []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s NetworkInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterface) GoString() string { + return s.String() +} + +// Describes association information for an Elastic IP address. +type NetworkInterfaceAssociation struct { + _ struct{} `type:"structure"` + + // The allocation ID. + AllocationId *string `locationName:"allocationId" type:"string"` + + // The association ID. + AssociationId *string `locationName:"associationId" type:"string"` + + // The ID of the Elastic IP address owner. + IpOwnerId *string `locationName:"ipOwnerId" type:"string"` + + // The public DNS name. + PublicDnsName *string `locationName:"publicDnsName" type:"string"` + + // The address of the Elastic IP address bound to the network interface. + PublicIp *string `locationName:"publicIp" type:"string"` +} + +// String returns the string representation +func (s NetworkInterfaceAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterfaceAssociation) GoString() string { + return s.String() +} + +// Describes a network interface attachment. +type NetworkInterfaceAttachment struct { + _ struct{} `type:"structure"` + + // The timestamp indicating when the attachment initiated. + AttachTime *time.Time `locationName:"attachTime" type:"timestamp" timestampFormat:"iso8601"` + + // The ID of the network interface attachment. + AttachmentId *string `locationName:"attachmentId" type:"string"` + + // Indicates whether the network interface is deleted when the instance is terminated. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The device index of the network interface attachment on the instance. + DeviceIndex *int64 `locationName:"deviceIndex" type:"integer"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The AWS account ID of the owner of the instance. + InstanceOwnerId *string `locationName:"instanceOwnerId" type:"string"` + + // The attachment state. + Status *string `locationName:"status" type:"string" enum:"AttachmentStatus"` +} + +// String returns the string representation +func (s NetworkInterfaceAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterfaceAttachment) GoString() string { + return s.String() +} + +// Describes an attachment change. +type NetworkInterfaceAttachmentChanges struct { + _ struct{} `type:"structure"` + + // The ID of the network interface attachment. + AttachmentId *string `locationName:"attachmentId" type:"string"` + + // Indicates whether the network interface is deleted when the instance is terminated. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` +} + +// String returns the string representation +func (s NetworkInterfaceAttachmentChanges) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterfaceAttachmentChanges) GoString() string { + return s.String() +} + +// Describes the private IP address of a network interface. +type NetworkInterfacePrivateIpAddress struct { + _ struct{} `type:"structure"` + + // The association information for an Elastic IP address associated with the + // network interface. + Association *NetworkInterfaceAssociation `locationName:"association" type:"structure"` + + // Indicates whether this IP address is the primary private IP address of the + // network interface. + Primary *bool `locationName:"primary" type:"boolean"` + + // The private DNS name. + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + + // The private IP address. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` +} + +// String returns the string representation +func (s NetworkInterfacePrivateIpAddress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterfacePrivateIpAddress) GoString() string { + return s.String() +} + +type NewDhcpConfiguration struct { + _ struct{} `type:"structure"` + + Key *string `locationName:"key" type:"string"` + + Values []*string `locationName:"Value" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s NewDhcpConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NewDhcpConfiguration) GoString() string { + return s.String() +} + +// Describes the placement for the instance. +type Placement struct { + _ struct{} `type:"structure"` + + // The affinity setting for the instance on the Dedicated host. This parameter + // is not supported for the ImportInstance command. + Affinity *string `locationName:"affinity" type:"string"` + + // The Availability Zone of the instance. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The name of the placement group the instance is in (for cluster compute instances). + GroupName *string `locationName:"groupName" type:"string"` + + // The ID of the Dedicted host on which the instance resides. This parameter + // is not support for the ImportInstance command. + HostId *string `locationName:"hostId" type:"string"` + + // The tenancy of the instance (if the instance is running in a VPC). An instance + // with a tenancy of dedicated runs on single-tenant hardware. The host tenancy + // is not supported for the ImportInstance command. + Tenancy *string `locationName:"tenancy" type:"string" enum:"Tenancy"` +} + +// String returns the string representation +func (s Placement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Placement) GoString() string { + return s.String() +} + +// Describes a placement group. +type PlacementGroup struct { + _ struct{} `type:"structure"` + + // The name of the placement group. + GroupName *string `locationName:"groupName" type:"string"` + + // The state of the placement group. + State *string `locationName:"state" type:"string" enum:"PlacementGroupState"` + + // The placement strategy. + Strategy *string `locationName:"strategy" type:"string" enum:"PlacementStrategy"` +} + +// String returns the string representation +func (s PlacementGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PlacementGroup) GoString() string { + return s.String() +} + +// Describes a range of ports. +type PortRange struct { + _ struct{} `type:"structure"` + + // The first port in the range. + From *int64 `locationName:"from" type:"integer"` + + // The last port in the range. + To *int64 `locationName:"to" type:"integer"` +} + +// String returns the string representation +func (s PortRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PortRange) GoString() string { + return s.String() +} + +// Describes prefixes for AWS services. +type PrefixList struct { + _ struct{} `type:"structure"` + + // The IP address range of the AWS service. + Cidrs []*string `locationName:"cidrSet" locationNameList:"item" type:"list"` + + // The ID of the prefix. + PrefixListId *string `locationName:"prefixListId" type:"string"` + + // The name of the prefix. + PrefixListName *string `locationName:"prefixListName" type:"string"` +} + +// String returns the string representation +func (s PrefixList) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrefixList) GoString() string { + return s.String() +} + +// The ID of the prefix. +type PrefixListId struct { + _ struct{} `type:"structure"` + + // The ID of the prefix. + PrefixListId *string `locationName:"prefixListId" type:"string"` +} + +// String returns the string representation +func (s PrefixListId) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrefixListId) GoString() string { + return s.String() +} + +// Describes the price for a Reserved Instance. +type PriceSchedule struct { + _ struct{} `type:"structure"` + + // The current price schedule, as determined by the term remaining for the Reserved + // Instance in the listing. + // + // A specific price schedule is always in effect, but only one price schedule + // can be active at any time. Take, for example, a Reserved Instance listing + // that has five months remaining in its term. When you specify price schedules + // for five months and two months, this means that schedule 1, covering the + // first three months of the remaining term, will be active during months 5, + // 4, and 3. Then schedule 2, covering the last two months of the term, will + // be active for months 2 and 1. + Active *bool `locationName:"active" type:"boolean"` + + // The currency for transacting the Reserved Instance resale. At this time, + // the only supported currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` + + // The fixed price for the term. + Price *float64 `locationName:"price" type:"double"` + + // The number of months remaining in the reservation. For example, 2 is the + // second to the last month before the capacity reservation expires. + Term *int64 `locationName:"term" type:"long"` +} + +// String returns the string representation +func (s PriceSchedule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PriceSchedule) GoString() string { + return s.String() +} + +// Describes the price for a Reserved Instance. +type PriceScheduleSpecification struct { + _ struct{} `type:"structure"` + + // The currency for transacting the Reserved Instance resale. At this time, + // the only supported currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` + + // The fixed price for the term. + Price *float64 `locationName:"price" type:"double"` + + // The number of months remaining in the reservation. For example, 2 is the + // second to the last month before the capacity reservation expires. + Term *int64 `locationName:"term" type:"long"` +} + +// String returns the string representation +func (s PriceScheduleSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PriceScheduleSpecification) GoString() string { + return s.String() +} + +// Describes a Reserved Instance offering. +type PricingDetail struct { + _ struct{} `type:"structure"` + + // The number of reservations available for the price. + Count *int64 `locationName:"count" type:"integer"` + + // The price per instance. + Price *float64 `locationName:"price" type:"double"` +} + +// String returns the string representation +func (s PricingDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PricingDetail) GoString() string { + return s.String() +} + +// Describes a secondary private IP address for a network interface. +type PrivateIpAddressSpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether the private IP address is the primary private IP address. + // Only one IP address can be designated as primary. + Primary *bool `locationName:"primary" type:"boolean"` + + // The private IP addresses. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string" required:"true"` +} + +// String returns the string representation +func (s PrivateIpAddressSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrivateIpAddressSpecification) GoString() string { + return s.String() +} + +// Describes a product code. +type ProductCode struct { + _ struct{} `type:"structure"` + + // The product code. + ProductCodeId *string `locationName:"productCode" type:"string"` + + // The type of product code. + ProductCodeType *string `locationName:"type" type:"string" enum:"ProductCodeValues"` +} + +// String returns the string representation +func (s ProductCode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProductCode) GoString() string { + return s.String() +} + +// Describes a virtual private gateway propagating route. +type PropagatingVgw struct { + _ struct{} `type:"structure"` + + // The ID of the virtual private gateway (VGW). + GatewayId *string `locationName:"gatewayId" type:"string"` +} + +// String returns the string representation +func (s PropagatingVgw) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PropagatingVgw) GoString() string { + return s.String() +} + +// Describes a request to purchase Scheduled Instances. +type PurchaseRequest struct { + _ struct{} `type:"structure"` + + // The number of instances. + InstanceCount *int64 `type:"integer"` + + // The purchase token. + PurchaseToken *string `type:"string"` +} + +// String returns the string representation +func (s PurchaseRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseRequest) GoString() string { + return s.String() +} + +type PurchaseReservedInstancesOfferingInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The number of Reserved Instances to purchase. + InstanceCount *int64 `type:"integer" required:"true"` + + // Specified for Reserved Instance Marketplace offerings to limit the total + // order and ensure that the Reserved Instances are not purchased at unexpected + // prices. + LimitPrice *ReservedInstanceLimitPrice `locationName:"limitPrice" type:"structure"` + + // The ID of the Reserved Instance offering to purchase. + ReservedInstancesOfferingId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PurchaseReservedInstancesOfferingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseReservedInstancesOfferingInput) GoString() string { + return s.String() +} + +type PurchaseReservedInstancesOfferingOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the purchased Reserved Instances. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` +} + +// String returns the string representation +func (s PurchaseReservedInstancesOfferingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseReservedInstancesOfferingOutput) GoString() string { + return s.String() +} + +// Contains the parameters for PurchaseScheduledInstances. +type PurchaseScheduledInstancesInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that ensures the idempotency of the request. + // For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more purchase requests. + PurchaseRequests []*PurchaseRequest `locationName:"PurchaseRequest" locationNameList:"PurchaseRequest" type:"list" required:"true"` +} + +// String returns the string representation +func (s PurchaseScheduledInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseScheduledInstancesInput) GoString() string { + return s.String() +} + +// Contains the output of PurchaseScheduledInstances. +type PurchaseScheduledInstancesOutput struct { + _ struct{} `type:"structure"` + + // Information about the Scheduled Instances. + ScheduledInstanceSet []*ScheduledInstance `locationName:"scheduledInstanceSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s PurchaseScheduledInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseScheduledInstancesOutput) GoString() string { + return s.String() +} + +type RebootInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` +} + +// String returns the string representation +func (s RebootInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootInstancesInput) GoString() string { + return s.String() +} + +type RebootInstancesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RebootInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootInstancesOutput) GoString() string { + return s.String() +} + +// Describes a recurring charge. +type RecurringCharge struct { + _ struct{} `type:"structure"` + + // The amount of the recurring charge. + Amount *float64 `locationName:"amount" type:"double"` + + // The frequency of the recurring charge. + Frequency *string `locationName:"frequency" type:"string" enum:"RecurringChargeFrequency"` +} + +// String returns the string representation +func (s RecurringCharge) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecurringCharge) GoString() string { + return s.String() +} + +// Describes a region. +type Region struct { + _ struct{} `type:"structure"` + + // The region service endpoint. + Endpoint *string `locationName:"regionEndpoint" type:"string"` + + // The name of the region. + RegionName *string `locationName:"regionName" type:"string"` +} + +// String returns the string representation +func (s Region) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Region) GoString() string { + return s.String() +} + +type RegisterImageInput struct { + _ struct{} `type:"structure"` + + // The architecture of the AMI. + // + // Default: For Amazon EBS-backed AMIs, i386. For instance store-backed AMIs, + // the architecture specified in the manifest file. + Architecture *string `locationName:"architecture" type:"string" enum:"ArchitectureValues"` + + // One or more block device mapping entries. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` + + // A description for your AMI. + Description *string `locationName:"description" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The full path to your AMI manifest in Amazon S3 storage. + ImageLocation *string `type:"string"` + + // The ID of the kernel. + KernelId *string `locationName:"kernelId" type:"string"` + + // A name for your AMI. + // + // Constraints: 3-128 alphanumeric characters, parentheses (()), square brackets + // ([]), spaces ( ), periods (.), slashes (/), dashes (-), single quotes ('), + // at-signs (@), or underscores(_) + Name *string `locationName:"name" type:"string" required:"true"` + + // The ID of the RAM disk. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + // The name of the root device (for example, /dev/sda1, or /dev/xvda). + RootDeviceName *string `locationName:"rootDeviceName" type:"string"` + + // Set to simple to enable enhanced networking for the AMI and any instances + // that you launch from the AMI. + // + // There is no way to disable enhanced networking at this time. + // + // This option is supported only for HVM AMIs. Specifying this option with + // a PV AMI can make instances launched from the AMI unreachable. + SriovNetSupport *string `locationName:"sriovNetSupport" type:"string"` + + // The type of virtualization. + // + // Default: paravirtual + VirtualizationType *string `locationName:"virtualizationType" type:"string"` +} + +// String returns the string representation +func (s RegisterImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterImageInput) GoString() string { + return s.String() +} + +type RegisterImageOutput struct { + _ struct{} `type:"structure"` + + // The ID of the newly registered AMI. + ImageId *string `locationName:"imageId" type:"string"` +} + +// String returns the string representation +func (s RegisterImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterImageOutput) GoString() string { + return s.String() +} + +type RejectVpcPeeringConnectionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s RejectVpcPeeringConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RejectVpcPeeringConnectionInput) GoString() string { + return s.String() +} + +type RejectVpcPeeringConnectionOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s RejectVpcPeeringConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RejectVpcPeeringConnectionOutput) GoString() string { + return s.String() +} + +type ReleaseAddressInput struct { + _ struct{} `type:"structure"` + + // [EC2-VPC] The allocation ID. Required for EC2-VPC. + AllocationId *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // [EC2-Classic] The Elastic IP address. Required for EC2-Classic. + PublicIp *string `type:"string"` +} + +// String returns the string representation +func (s ReleaseAddressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReleaseAddressInput) GoString() string { + return s.String() +} + +type ReleaseAddressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ReleaseAddressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReleaseAddressOutput) GoString() string { + return s.String() +} + +type ReleaseHostsInput struct { + _ struct{} `type:"structure"` + + // The IDs of the Dedicated hosts you want to release. + HostIds []*string `locationName:"hostId" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s ReleaseHostsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReleaseHostsInput) GoString() string { + return s.String() +} + +type ReleaseHostsOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the Dedicated hosts that were successfully released. + Successful []*string `locationName:"successful" locationNameList:"item" type:"list"` + + // The IDs of the Dedicated hosts that could not be released, including an error + // message. + Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ReleaseHostsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReleaseHostsOutput) GoString() string { + return s.String() +} + +type ReplaceNetworkAclAssociationInput struct { + _ struct{} `type:"structure"` + + // The ID of the current association between the original network ACL and the + // subnet. + AssociationId *string `locationName:"associationId" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the new network ACL to associate with the subnet. + NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ReplaceNetworkAclAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceNetworkAclAssociationInput) GoString() string { + return s.String() +} + +type ReplaceNetworkAclAssociationOutput struct { + _ struct{} `type:"structure"` + + // The ID of the new association. + NewAssociationId *string `locationName:"newAssociationId" type:"string"` +} + +// String returns the string representation +func (s ReplaceNetworkAclAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceNetworkAclAssociationOutput) GoString() string { + return s.String() +} + +type ReplaceNetworkAclEntryInput struct { + _ struct{} `type:"structure"` + + // The network range to allow or deny, in CIDR notation. + CidrBlock *string `locationName:"cidrBlock" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Indicates whether to replace the egress rule. + // + // Default: If no value is specified, we replace the ingress rule. + Egress *bool `locationName:"egress" type:"boolean" required:"true"` + + // ICMP protocol: The ICMP type and code. Required if specifying 1 (ICMP) for + // the protocol. + IcmpTypeCode *IcmpTypeCode `locationName:"Icmp" type:"structure"` + + // The ID of the ACL. + NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"` + + // TCP or UDP protocols: The range of ports the rule applies to. Required if + // specifying 6 (TCP) or 17 (UDP) for the protocol. + PortRange *PortRange `locationName:"portRange" type:"structure"` + + // The IP protocol. You can specify all or -1 to mean all protocols. + Protocol *string `locationName:"protocol" type:"string" required:"true"` + + // Indicates whether to allow or deny the traffic that matches the rule. + RuleAction *string `locationName:"ruleAction" type:"string" required:"true" enum:"RuleAction"` + + // The rule number of the entry to replace. + RuleNumber *int64 `locationName:"ruleNumber" type:"integer" required:"true"` +} + +// String returns the string representation +func (s ReplaceNetworkAclEntryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceNetworkAclEntryInput) GoString() string { + return s.String() +} + +type ReplaceNetworkAclEntryOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ReplaceNetworkAclEntryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceNetworkAclEntryOutput) GoString() string { + return s.String() +} + +type ReplaceRouteInput struct { + _ struct{} `type:"structure"` + + // The CIDR address block used for the destination match. The value you provide + // must match the CIDR of an existing route in the table. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of an Internet gateway or virtual private gateway. + GatewayId *string `locationName:"gatewayId" type:"string"` + + // The ID of a NAT instance in your VPC. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The ID of a NAT gateway. + NatGatewayId *string `locationName:"natGatewayId" type:"string"` + + // The ID of a network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The ID of the route table. + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` + + // The ID of a VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` +} + +// String returns the string representation +func (s ReplaceRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceRouteInput) GoString() string { + return s.String() +} + +type ReplaceRouteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ReplaceRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceRouteOutput) GoString() string { + return s.String() +} + +type ReplaceRouteTableAssociationInput struct { + _ struct{} `type:"structure"` + + // The association ID. + AssociationId *string `locationName:"associationId" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the new route table to associate with the subnet. + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ReplaceRouteTableAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceRouteTableAssociationInput) GoString() string { + return s.String() +} + +type ReplaceRouteTableAssociationOutput struct { + _ struct{} `type:"structure"` + + // The ID of the new association. + NewAssociationId *string `locationName:"newAssociationId" type:"string"` +} + +// String returns the string representation +func (s ReplaceRouteTableAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceRouteTableAssociationOutput) GoString() string { + return s.String() +} + +type ReportInstanceStatusInput struct { + _ struct{} `type:"structure"` + + // Descriptive text about the health state of your instance. + Description *string `locationName:"description" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The time at which the reported instance health state ended. + EndTime *time.Time `locationName:"endTime" type:"timestamp" timestampFormat:"iso8601"` + + // One or more instances. + Instances []*string `locationName:"instanceId" locationNameList:"InstanceId" type:"list" required:"true"` + + // One or more reason codes that describes the health state of your instance. + // + // instance-stuck-in-state: My instance is stuck in a state. + // + // unresponsive: My instance is unresponsive. + // + // not-accepting-credentials: My instance is not accepting my credentials. + // + // password-not-available: A password is not available for my instance. + // + // performance-network: My instance is experiencing performance problems which + // I believe are network related. + // + // performance-instance-store: My instance is experiencing performance problems + // which I believe are related to the instance stores. + // + // performance-ebs-volume: My instance is experiencing performance problems + // which I believe are related to an EBS volume. + // + // performance-other: My instance is experiencing performance problems. + // + // other: [explain using the description parameter] + ReasonCodes []*string `locationName:"reasonCode" locationNameList:"item" type:"list" required:"true"` + + // The time at which the reported instance health state began. + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"` + + // The status of all instances listed. + Status *string `locationName:"status" type:"string" required:"true" enum:"ReportStatusType"` +} + +// String returns the string representation +func (s ReportInstanceStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReportInstanceStatusInput) GoString() string { + return s.String() +} + +type ReportInstanceStatusOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ReportInstanceStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReportInstanceStatusOutput) GoString() string { + return s.String() +} + +// Contains the parameters for RequestSpotFleet. +type RequestSpotFleetInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The configuration for the Spot fleet request. + SpotFleetRequestConfig *SpotFleetRequestConfigData `locationName:"spotFleetRequestConfig" type:"structure" required:"true"` +} + +// String returns the string representation +func (s RequestSpotFleetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestSpotFleetInput) GoString() string { + return s.String() +} + +// Contains the output of RequestSpotFleet. +type RequestSpotFleetOutput struct { + _ struct{} `type:"structure"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` +} + +// String returns the string representation +func (s RequestSpotFleetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestSpotFleetOutput) GoString() string { + return s.String() +} + +// Contains the parameters for RequestSpotInstances. +type RequestSpotInstancesInput struct { + _ struct{} `type:"structure"` + + // The user-specified name for a logical grouping of bids. + // + // When you specify an Availability Zone group in a Spot Instance request, + // all Spot instances in the request are launched in the same Availability Zone. + // Instance proximity is maintained with this parameter, but the choice of Availability + // Zone is not. The group applies only to bids for Spot Instances of the same + // instance type. Any additional Spot instance requests that are specified with + // the same Availability Zone group name are launched in that same Availability + // Zone, as long as at least one instance from the group is still active. + // + // If there is no active instance running in the Availability Zone group that + // you specify for a new Spot instance request (all instances are terminated, + // the bid is expired, or the bid falls below current market), then Amazon EC2 + // launches the instance in any Availability Zone where the constraint can be + // met. Consequently, the subsequent set of Spot instances could be placed in + // a different zone from the original request, even if you specified the same + // Availability Zone group. + // + // Default: Instances are launched in any available Availability Zone. + AvailabilityZoneGroup *string `locationName:"availabilityZoneGroup" type:"string"` + + // The required duration for the Spot instances, in minutes. This value must + // be a multiple of 60 (60, 120, 180, 240, 300, or 360). + // + // The duration period starts as soon as your Spot instance receives its instance + // ID. At the end of the duration period, Amazon EC2 marks the Spot instance + // for termination and provides a Spot instance termination notice, which gives + // the instance a two-minute warning before it terminates. + // + // Note that you can't specify an Availability Zone group or a launch group + // if you specify a duration. + BlockDurationMinutes *int64 `locationName:"blockDurationMinutes" type:"integer"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) + // in the Amazon Elastic Compute Cloud User Guide. + ClientToken *string `locationName:"clientToken" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The maximum number of Spot instances to launch. + // + // Default: 1 + InstanceCount *int64 `locationName:"instanceCount" type:"integer"` + + // The instance launch group. Launch groups are Spot instances that launch together + // and terminate together. + // + // Default: Instances are launched and terminated individually + LaunchGroup *string `locationName:"launchGroup" type:"string"` + + // Describes the launch specification for an instance. + LaunchSpecification *RequestSpotLaunchSpecification `type:"structure"` + + // The maximum hourly price (bid) for any Spot instance launched to fulfill + // the request. + SpotPrice *string `locationName:"spotPrice" type:"string" required:"true"` + + // The Spot instance request type. + // + // Default: one-time + Type *string `locationName:"type" type:"string" enum:"SpotInstanceType"` + + // The start date of the request. If this is a one-time request, the request + // becomes active at this date and time and remains active until all instances + // launch, the request expires, or the request is canceled. If the request is + // persistent, the request becomes active at this date and time and remains + // active until it expires or is canceled. + // + // Default: The request is effective indefinitely. + ValidFrom *time.Time `locationName:"validFrom" type:"timestamp" timestampFormat:"iso8601"` + + // The end date of the request. If this is a one-time request, the request remains + // active until all instances launch, the request is canceled, or this date + // is reached. If the request is persistent, it remains active until it is canceled + // or this date and time is reached. + // + // Default: The request is effective indefinitely. + ValidUntil *time.Time `locationName:"validUntil" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s RequestSpotInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestSpotInstancesInput) GoString() string { + return s.String() +} + +// Contains the output of RequestSpotInstances. +type RequestSpotInstancesOutput struct { + _ struct{} `type:"structure"` + + // One or more Spot instance requests. + SpotInstanceRequests []*SpotInstanceRequest `locationName:"spotInstanceRequestSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s RequestSpotInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestSpotInstancesOutput) GoString() string { + return s.String() +} + +// Describes the launch specification for an instance. +type RequestSpotLaunchSpecification struct { + _ struct{} `type:"structure"` + + // Deprecated. + AddressingType *string `locationName:"addressingType" type:"string"` + + // One or more block device mapping entries. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // Indicates whether the instance is optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS + // Optimized instance. + // + // Default: false + EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + + // The IAM instance profile. + IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"` + + // The ID of the AMI. + ImageId *string `locationName:"imageId" type:"string"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The ID of the kernel. + KernelId *string `locationName:"kernelId" type:"string"` + + // The name of the key pair. + KeyName *string `locationName:"keyName" type:"string"` + + // Describes the monitoring for the instance. + Monitoring *RunInstancesMonitoringEnabled `locationName:"monitoring" type:"structure"` + + // One or more network interfaces. + NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"NetworkInterface" locationNameList:"item" type:"list"` + + // The placement information for the instance. + Placement *SpotPlacement `locationName:"placement" type:"structure"` + + // The ID of the RAM disk. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"item" type:"list"` + + SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"item" type:"list"` + + // The ID of the subnet in which to launch the instance. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The Base64-encoded MIME user data to make available to the instances. + UserData *string `locationName:"userData" type:"string"` +} + +// String returns the string representation +func (s RequestSpotLaunchSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestSpotLaunchSpecification) GoString() string { + return s.String() +} + +// Describes a reservation. +type Reservation struct { + _ struct{} `type:"structure"` + + // One or more security groups. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // One or more instances. + Instances []*Instance `locationName:"instancesSet" locationNameList:"item" type:"list"` + + // The ID of the AWS account that owns the reservation. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The ID of the requester that launched the instances on your behalf (for example, + // AWS Management Console or Auto Scaling). + RequesterId *string `locationName:"requesterId" type:"string"` + + // The ID of the reservation. + ReservationId *string `locationName:"reservationId" type:"string"` +} + +// String returns the string representation +func (s Reservation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Reservation) GoString() string { + return s.String() +} + +// Describes the limit price of a Reserved Instance offering. +type ReservedInstanceLimitPrice struct { + _ struct{} `type:"structure"` + + // Used for Reserved Instance Marketplace offerings. Specifies the limit price + // on the total order (instanceCount * price). + Amount *float64 `locationName:"amount" type:"double"` + + // The currency in which the limitPrice amount is specified. At this time, the + // only supported currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` +} + +// String returns the string representation +func (s ReservedInstanceLimitPrice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstanceLimitPrice) GoString() string { + return s.String() +} + +// Describes a Reserved Instance. +type ReservedInstances struct { + _ struct{} `type:"structure"` + + // The Availability Zone in which the Reserved Instance can be used. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The currency of the Reserved Instance. It's specified using ISO 4217 standard + // currency codes. At this time, the only supported currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` + + // The duration of the Reserved Instance, in seconds. + Duration *int64 `locationName:"duration" type:"long"` + + // The time when the Reserved Instance expires. + End *time.Time `locationName:"end" type:"timestamp" timestampFormat:"iso8601"` + + // The purchase price of the Reserved Instance. + FixedPrice *float64 `locationName:"fixedPrice" type:"float"` + + // The number of reservations purchased. + InstanceCount *int64 `locationName:"instanceCount" type:"integer"` + + // The tenancy of the instance. + InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` + + // The instance type on which the Reserved Instance can be used. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The Reserved Instance offering type. + OfferingType *string `locationName:"offeringType" type:"string" enum:"OfferingTypeValues"` + + // The Reserved Instance product platform description. + ProductDescription *string `locationName:"productDescription" type:"string" enum:"RIProductDescription"` + + // The recurring charge tag assigned to the resource. + RecurringCharges []*RecurringCharge `locationName:"recurringCharges" locationNameList:"item" type:"list"` + + // The ID of the Reserved Instance. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` + + // The date and time the Reserved Instance started. + Start *time.Time `locationName:"start" type:"timestamp" timestampFormat:"iso8601"` + + // The state of the Reserved Instance purchase. + State *string `locationName:"state" type:"string" enum:"ReservedInstanceState"` + + // Any tags assigned to the resource. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The usage price of the Reserved Instance, per hour. + UsagePrice *float64 `locationName:"usagePrice" type:"float"` +} + +// String returns the string representation +func (s ReservedInstances) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstances) GoString() string { + return s.String() +} + +// Describes the configuration settings for the modified Reserved Instances. +type ReservedInstancesConfiguration struct { + _ struct{} `type:"structure"` + + // The Availability Zone for the modified Reserved Instances. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The number of modified Reserved Instances. + InstanceCount *int64 `locationName:"instanceCount" type:"integer"` + + // The instance type for the modified Reserved Instances. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The network platform of the modified Reserved Instances, which is either + // EC2-Classic or EC2-VPC. + Platform *string `locationName:"platform" type:"string"` +} + +// String returns the string representation +func (s ReservedInstancesConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesConfiguration) GoString() string { + return s.String() +} + +// Describes the ID of a Reserved Instance. +type ReservedInstancesId struct { + _ struct{} `type:"structure"` + + // The ID of the Reserved Instance. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` +} + +// String returns the string representation +func (s ReservedInstancesId) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesId) GoString() string { + return s.String() +} + +// Describes a Reserved Instance listing. +type ReservedInstancesListing struct { + _ struct{} `type:"structure"` + + // A unique, case-sensitive key supplied by the client to ensure that the request + // is idempotent. For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // The time the listing was created. + CreateDate *time.Time `locationName:"createDate" type:"timestamp" timestampFormat:"iso8601"` + + // The number of instances in this state. + InstanceCounts []*InstanceCount `locationName:"instanceCounts" locationNameList:"item" type:"list"` + + // The price of the Reserved Instance listing. + PriceSchedules []*PriceSchedule `locationName:"priceSchedules" locationNameList:"item" type:"list"` + + // The ID of the Reserved Instance. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` + + // The ID of the Reserved Instance listing. + ReservedInstancesListingId *string `locationName:"reservedInstancesListingId" type:"string"` + + // The status of the Reserved Instance listing. + Status *string `locationName:"status" type:"string" enum:"ListingStatus"` + + // The reason for the current status of the Reserved Instance listing. The response + // can be blank. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // Any tags assigned to the resource. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The last modified timestamp of the listing. + UpdateDate *time.Time `locationName:"updateDate" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ReservedInstancesListing) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesListing) GoString() string { + return s.String() +} + +// Describes a Reserved Instance modification. +type ReservedInstancesModification struct { + _ struct{} `type:"structure"` + + // A unique, case-sensitive key supplied by the client to ensure that the request + // is idempotent. For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // The time when the modification request was created. + CreateDate *time.Time `locationName:"createDate" type:"timestamp" timestampFormat:"iso8601"` + + // The time for the modification to become effective. + EffectiveDate *time.Time `locationName:"effectiveDate" type:"timestamp" timestampFormat:"iso8601"` + + // Contains target configurations along with their corresponding new Reserved + // Instance IDs. + ModificationResults []*ReservedInstancesModificationResult `locationName:"modificationResultSet" locationNameList:"item" type:"list"` + + // The IDs of one or more Reserved Instances. + ReservedInstancesIds []*ReservedInstancesId `locationName:"reservedInstancesSet" locationNameList:"item" type:"list"` + + // A unique ID for the Reserved Instance modification. + ReservedInstancesModificationId *string `locationName:"reservedInstancesModificationId" type:"string"` + + // The status of the Reserved Instances modification request. + Status *string `locationName:"status" type:"string"` + + // The reason for the status. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // The time when the modification request was last updated. + UpdateDate *time.Time `locationName:"updateDate" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ReservedInstancesModification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesModification) GoString() string { + return s.String() +} + +type ReservedInstancesModificationResult struct { + _ struct{} `type:"structure"` + + // The ID for the Reserved Instances that were created as part of the modification + // request. This field is only available when the modification is fulfilled. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` + + // The target Reserved Instances configurations supplied as part of the modification + // request. + TargetConfiguration *ReservedInstancesConfiguration `locationName:"targetConfiguration" type:"structure"` +} + +// String returns the string representation +func (s ReservedInstancesModificationResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesModificationResult) GoString() string { + return s.String() +} + +// Describes a Reserved Instance offering. +type ReservedInstancesOffering struct { + _ struct{} `type:"structure"` + + // The Availability Zone in which the Reserved Instance can be used. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The currency of the Reserved Instance offering you are purchasing. It's specified + // using ISO 4217 standard currency codes. At this time, the only supported + // currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` + + // The duration of the Reserved Instance, in seconds. + Duration *int64 `locationName:"duration" type:"long"` + + // The purchase price of the Reserved Instance. + FixedPrice *float64 `locationName:"fixedPrice" type:"float"` + + // The tenancy of the instance. + InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` + + // The instance type on which the Reserved Instance can be used. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // Indicates whether the offering is available through the Reserved Instance + // Marketplace (resale) or AWS. If it's a Reserved Instance Marketplace offering, + // this is true. + Marketplace *bool `locationName:"marketplace" type:"boolean"` + + // The Reserved Instance offering type. + OfferingType *string `locationName:"offeringType" type:"string" enum:"OfferingTypeValues"` + + // The pricing details of the Reserved Instance offering. + PricingDetails []*PricingDetail `locationName:"pricingDetailsSet" locationNameList:"item" type:"list"` + + // The Reserved Instance product platform description. + ProductDescription *string `locationName:"productDescription" type:"string" enum:"RIProductDescription"` + + // The recurring charge tag assigned to the resource. + RecurringCharges []*RecurringCharge `locationName:"recurringCharges" locationNameList:"item" type:"list"` + + // The ID of the Reserved Instance offering. + ReservedInstancesOfferingId *string `locationName:"reservedInstancesOfferingId" type:"string"` + + // The usage price of the Reserved Instance, per hour. + UsagePrice *float64 `locationName:"usagePrice" type:"float"` +} + +// String returns the string representation +func (s ReservedInstancesOffering) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesOffering) GoString() string { + return s.String() +} + +type ResetImageAttributeInput struct { + _ struct{} `type:"structure"` + + // The attribute to reset (currently you can only reset the launch permission + // attribute). + Attribute *string `type:"string" required:"true" enum:"ResetImageAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the AMI. + ImageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ResetImageAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetImageAttributeInput) GoString() string { + return s.String() +} + +type ResetImageAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ResetImageAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetImageAttributeOutput) GoString() string { + return s.String() +} + +type ResetInstanceAttributeInput struct { + _ struct{} `type:"structure"` + + // The attribute to reset. + Attribute *string `locationName:"attribute" type:"string" required:"true" enum:"InstanceAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ResetInstanceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetInstanceAttributeInput) GoString() string { + return s.String() +} + +type ResetInstanceAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ResetInstanceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetInstanceAttributeOutput) GoString() string { + return s.String() +} + +type ResetNetworkInterfaceAttributeInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` + + // The source/destination checking attribute. Resets the value to true. + SourceDestCheck *string `locationName:"sourceDestCheck" type:"string"` +} + +// String returns the string representation +func (s ResetNetworkInterfaceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetNetworkInterfaceAttributeInput) GoString() string { + return s.String() +} + +type ResetNetworkInterfaceAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ResetNetworkInterfaceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetNetworkInterfaceAttributeOutput) GoString() string { + return s.String() +} + +type ResetSnapshotAttributeInput struct { + _ struct{} `type:"structure"` + + // The attribute to reset. Currently, only the attribute for permission to create + // volumes can be reset. + Attribute *string `type:"string" required:"true" enum:"SnapshotAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the snapshot. + SnapshotId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ResetSnapshotAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetSnapshotAttributeInput) GoString() string { + return s.String() +} + +type ResetSnapshotAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ResetSnapshotAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetSnapshotAttributeOutput) GoString() string { + return s.String() +} + +type RestoreAddressToClassicInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The Elastic IP address. + PublicIp *string `locationName:"publicIp" type:"string" required:"true"` +} + +// String returns the string representation +func (s RestoreAddressToClassicInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreAddressToClassicInput) GoString() string { + return s.String() +} + +type RestoreAddressToClassicOutput struct { + _ struct{} `type:"structure"` + + // The Elastic IP address. + PublicIp *string `locationName:"publicIp" type:"string"` + + // The move status for the IP address. + Status *string `locationName:"status" type:"string" enum:"Status"` +} + +// String returns the string representation +func (s RestoreAddressToClassicOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreAddressToClassicOutput) GoString() string { + return s.String() +} + +type RevokeSecurityGroupEgressInput struct { + _ struct{} `type:"structure"` + + // The CIDR IP address range. We recommend that you specify the CIDR range in + // a set of IP permissions instead. + CidrIp *string `locationName:"cidrIp" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The start of port range for the TCP and UDP protocols, or an ICMP type number. + // We recommend that you specify the port range in a set of IP permissions instead. + FromPort *int64 `locationName:"fromPort" type:"integer"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string" required:"true"` + + // A set of IP permissions. You can't specify a destination security group and + // a CIDR IP address range. + IpPermissions []*IpPermission `locationName:"ipPermissions" locationNameList:"item" type:"list"` + + // The IP protocol name or number. We recommend that you specify the protocol + // in a set of IP permissions instead. + IpProtocol *string `locationName:"ipProtocol" type:"string"` + + // The name of a destination security group. To revoke outbound access to a + // destination security group, we recommend that you use a set of IP permissions + // instead. + SourceSecurityGroupName *string `locationName:"sourceSecurityGroupName" type:"string"` + + // The AWS account number for a destination security group. To revoke outbound + // access to a destination security group, we recommend that you use a set of + // IP permissions instead. + SourceSecurityGroupOwnerId *string `locationName:"sourceSecurityGroupOwnerId" type:"string"` + + // The end of port range for the TCP and UDP protocols, or an ICMP type number. + // We recommend that you specify the port range in a set of IP permissions instead. + ToPort *int64 `locationName:"toPort" type:"integer"` +} + +// String returns the string representation +func (s RevokeSecurityGroupEgressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeSecurityGroupEgressInput) GoString() string { + return s.String() +} + +type RevokeSecurityGroupEgressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RevokeSecurityGroupEgressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeSecurityGroupEgressOutput) GoString() string { + return s.String() +} + +type RevokeSecurityGroupIngressInput struct { + _ struct{} `type:"structure"` + + // The CIDR IP address range. You can't specify this parameter when specifying + // a source security group. + CidrIp *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The start of port range for the TCP and UDP protocols, or an ICMP type number. + // For the ICMP type number, use -1 to specify all ICMP types. + FromPort *int64 `type:"integer"` + + // The ID of the security group. Required for a security group in a nondefault + // VPC. + GroupId *string `type:"string"` + + // [EC2-Classic, default VPC] The name of the security group. + GroupName *string `type:"string"` + + // A set of IP permissions. You can't specify a source security group and a + // CIDR IP address range. + IpPermissions []*IpPermission `locationNameList:"item" type:"list"` + + // The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)). + // Use -1 to specify all. + IpProtocol *string `type:"string"` + + // [EC2-Classic, default VPC] The name of the source security group. You can't + // specify this parameter in combination with the following parameters: the + // CIDR IP address range, the start of the port range, the IP protocol, and + // the end of the port range. For EC2-VPC, the source security group must be + // in the same VPC. + SourceSecurityGroupName *string `type:"string"` + + // [EC2-Classic, default VPC] The AWS account ID of the source security group. + // For EC2-VPC, the source security group must be in the same VPC. You can't + // specify this parameter in combination with the following parameters: the + // CIDR IP address range, the IP protocol, the start of the port range, and + // the end of the port range. To revoke a specific rule for an IP protocol and + // port range, use a set of IP permissions instead. + SourceSecurityGroupOwnerId *string `type:"string"` + + // The end of port range for the TCP and UDP protocols, or an ICMP code number. + // For the ICMP code number, use -1 to specify all ICMP codes for the ICMP type. + ToPort *int64 `type:"integer"` +} + +// String returns the string representation +func (s RevokeSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeSecurityGroupIngressInput) GoString() string { + return s.String() +} + +type RevokeSecurityGroupIngressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RevokeSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +// Describes a route in a route table. +type Route struct { + _ struct{} `type:"structure"` + + // The CIDR block used for the destination match. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` + + // The prefix of the AWS service. + DestinationPrefixListId *string `locationName:"destinationPrefixListId" type:"string"` + + // The ID of a gateway attached to your VPC. + GatewayId *string `locationName:"gatewayId" type:"string"` + + // The ID of a NAT instance in your VPC. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The AWS account ID of the owner of the instance. + InstanceOwnerId *string `locationName:"instanceOwnerId" type:"string"` + + // The ID of a NAT gateway. + NatGatewayId *string `locationName:"natGatewayId" type:"string"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // Describes how the route was created. + // + // CreateRouteTable indicates that route was automatically created when the + // route table was created. CreateRoute indicates that the route was manually + // added to the route table. EnableVgwRoutePropagation indicates that the route + // was propagated by route propagation. + Origin *string `locationName:"origin" type:"string" enum:"RouteOrigin"` + + // The state of the route. The blackhole state indicates that the route's target + // isn't available (for example, the specified gateway isn't attached to the + // VPC, or the specified NAT instance has been terminated). + State *string `locationName:"state" type:"string" enum:"RouteState"` + + // The ID of the VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` +} + +// String returns the string representation +func (s Route) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Route) GoString() string { + return s.String() +} + +// Describes a route table. +type RouteTable struct { + _ struct{} `type:"structure"` + + // The associations between the route table and one or more subnets. + Associations []*RouteTableAssociation `locationName:"associationSet" locationNameList:"item" type:"list"` + + // Any virtual private gateway (VGW) propagating routes. + PropagatingVgws []*PropagatingVgw `locationName:"propagatingVgwSet" locationNameList:"item" type:"list"` + + // The ID of the route table. + RouteTableId *string `locationName:"routeTableId" type:"string"` + + // The routes in the route table. + Routes []*Route `locationName:"routeSet" locationNameList:"item" type:"list"` + + // Any tags assigned to the route table. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s RouteTable) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RouteTable) GoString() string { + return s.String() +} + +// Describes an association between a route table and a subnet. +type RouteTableAssociation struct { + _ struct{} `type:"structure"` + + // Indicates whether this is the main route table. + Main *bool `locationName:"main" type:"boolean"` + + // The ID of the association between a route table and a subnet. + RouteTableAssociationId *string `locationName:"routeTableAssociationId" type:"string"` + + // The ID of the route table. + RouteTableId *string `locationName:"routeTableId" type:"string"` + + // The ID of the subnet. A subnet ID is not returned for an implicit association. + SubnetId *string `locationName:"subnetId" type:"string"` +} + +// String returns the string representation +func (s RouteTableAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RouteTableAssociation) GoString() string { + return s.String() +} + +type RunInstancesInput struct { + _ struct{} `type:"structure"` + + // Reserved. + AdditionalInfo *string `locationName:"additionalInfo" type:"string"` + + // The block device mapping. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // + // Constraints: Maximum 64 ASCII characters + ClientToken *string `locationName:"clientToken" type:"string"` + + // If you set this parameter to true, you can't terminate the instance using + // the Amazon EC2 console, CLI, or API; otherwise, you can. If you set this + // parameter to true and then later want to be able to terminate the instance, + // you must first change the value of the disableApiTermination attribute to + // false using ModifyInstanceAttribute. Alternatively, if you set InstanceInitiatedShutdownBehavior + // to terminate, you can terminate the instance by running the shutdown command + // from the instance. + // + // Default: false + DisableApiTermination *bool `locationName:"disableApiTermination" type:"boolean"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Indicates whether the instance is optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS-optimized + // instance. + // + // Default: false + EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + + // The IAM instance profile. + IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"` + + // The ID of the AMI, which you can get by calling DescribeImages. + ImageId *string `type:"string" required:"true"` + + // Indicates whether an instance stops or terminates when you initiate shutdown + // from the instance (using the operating system command for system shutdown). + // + // Default: stop + InstanceInitiatedShutdownBehavior *string `locationName:"instanceInitiatedShutdownBehavior" type:"string" enum:"ShutdownBehavior"` + + // The instance type. For more information, see Instance Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Default: m1.small + InstanceType *string `type:"string" enum:"InstanceType"` + + // The ID of the kernel. + // + // We recommend that you use PV-GRUB instead of kernels and RAM disks. For + // more information, see PV-GRUB (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) + // in the Amazon Elastic Compute Cloud User Guide. + KernelId *string `type:"string"` + + // The name of the key pair. You can create a key pair using CreateKeyPair or + // ImportKeyPair. + // + // If you do not specify a key pair, you can't connect to the instance unless + // you choose an AMI that is configured to allow users another way to log in. + KeyName *string `type:"string"` + + // The maximum number of instances to launch. If you specify more instances + // than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches + // the largest possible number of instances above MinCount. + // + // Constraints: Between 1 and the maximum number you're allowed for the specified + // instance type. For more information about the default limits, and how to + // request an increase, see How many instances can I run in Amazon EC2 (http://aws.amazon.com/ec2/faqs/#How_many_instances_can_I_run_in_Amazon_EC2) + // in the Amazon EC2 General FAQ. + MaxCount *int64 `type:"integer" required:"true"` + + // The minimum number of instances to launch. If you specify a minimum that + // is more instances than Amazon EC2 can launch in the target Availability Zone, + // Amazon EC2 launches no instances. + // + // Constraints: Between 1 and the maximum number you're allowed for the specified + // instance type. For more information about the default limits, and how to + // request an increase, see How many instances can I run in Amazon EC2 (http://aws.amazon.com/ec2/faqs/#How_many_instances_can_I_run_in_Amazon_EC2) + // in the Amazon EC2 General FAQ. + MinCount *int64 `type:"integer" required:"true"` + + // The monitoring for the instance. + Monitoring *RunInstancesMonitoringEnabled `type:"structure"` + + // One or more network interfaces. + NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"networkInterface" locationNameList:"item" type:"list"` + + // The placement for the instance. + Placement *Placement `type:"structure"` + + // [EC2-VPC] The primary IP address. You must specify a value from the IP address + // range of the subnet. + // + // Only one private IP address can be designated as primary. Therefore, you + // can't specify this parameter if PrivateIpAddresses.n.Primary is set to true + // and PrivateIpAddresses.n.PrivateIpAddress is set to an IP address. + // + // Default: We select an IP address from the IP address range of the subnet. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The ID of the RAM disk. + // + // We recommend that you use PV-GRUB instead of kernels and RAM disks. For + // more information, see PV-GRUB (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) + // in the Amazon Elastic Compute Cloud User Guide. + RamdiskId *string `type:"string"` + + // One or more security group IDs. You can create a security group using CreateSecurityGroup. + // + // Default: Amazon EC2 uses the default security group. + SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` + + // [EC2-Classic, default VPC] One or more security group names. For a nondefault + // VPC, you must use security group IDs instead. + // + // Default: Amazon EC2 uses the default security group. + SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"SecurityGroup" type:"list"` + + // [EC2-VPC] The ID of the subnet to launch the instance into. + SubnetId *string `type:"string"` + + // Data to configure the instance, or a script to run during instance launch. + // For more information, see Running Commands on Your Linux Instance at Launch + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) (Linux) + // and Adding User Data (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-instance-metadata.html#instancedata-add-user-data) + // (Windows). For API calls, the text must be base64-encoded. Command line tools + // perform encoding for you. + UserData *string `type:"string"` +} + +// String returns the string representation +func (s RunInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunInstancesInput) GoString() string { + return s.String() +} + +// Describes the monitoring for the instance. +type RunInstancesMonitoringEnabled struct { + _ struct{} `type:"structure"` + + // Indicates whether monitoring is enabled for the instance. + Enabled *bool `locationName:"enabled" type:"boolean" required:"true"` +} + +// String returns the string representation +func (s RunInstancesMonitoringEnabled) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunInstancesMonitoringEnabled) GoString() string { + return s.String() +} + +// Contains the parameters for RunScheduledInstances. +type RunScheduledInstancesInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that ensures the idempotency of the request. + // For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The number of instances. + // + // Default: 1 + InstanceCount *int64 `type:"integer"` + + // The launch specification. + LaunchSpecification *ScheduledInstancesLaunchSpecification `type:"structure" required:"true"` + + // The Scheduled Instance ID. + ScheduledInstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RunScheduledInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunScheduledInstancesInput) GoString() string { + return s.String() +} + +// Contains the output of RunScheduledInstances. +type RunScheduledInstancesOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the newly launched instances. + InstanceIdSet []*string `locationName:"instanceIdSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s RunScheduledInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunScheduledInstancesOutput) GoString() string { + return s.String() +} + +// Describes the storage parameters for S3 and S3 buckets for an instance store-backed +// AMI. +type S3Storage struct { + _ struct{} `type:"structure"` + + // The access key ID of the owner of the bucket. Before you specify a value + // for your access key ID, review and follow the guidance in Best Practices + // for Managing AWS Access Keys (http://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html). + AWSAccessKeyId *string `type:"string"` + + // The bucket in which to store the AMI. You can specify a bucket that you already + // own or a new bucket that Amazon EC2 creates on your behalf. If you specify + // a bucket that belongs to someone else, Amazon EC2 returns an error. + Bucket *string `locationName:"bucket" type:"string"` + + // The beginning of the file name of the AMI. + Prefix *string `locationName:"prefix" type:"string"` + + // A Base64-encoded Amazon S3 upload policy that gives Amazon EC2 permission + // to upload items into Amazon S3 on your behalf. + UploadPolicy []byte `locationName:"uploadPolicy" type:"blob"` + + // The signature of the Base64 encoded JSON document. + UploadPolicySignature *string `locationName:"uploadPolicySignature" type:"string"` +} + +// String returns the string representation +func (s S3Storage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3Storage) GoString() string { + return s.String() +} + +// Describes a Scheduled Instance. +type ScheduledInstance struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The date when the Scheduled Instance was purchased. + CreateDate *time.Time `locationName:"createDate" type:"timestamp" timestampFormat:"iso8601"` + + // The hourly price for a single instance. + HourlyPrice *string `locationName:"hourlyPrice" type:"string"` + + // The number of instances. + InstanceCount *int64 `locationName:"instanceCount" type:"integer"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string"` + + // The network platform (EC2-Classic or EC2-VPC). + NetworkPlatform *string `locationName:"networkPlatform" type:"string"` + + // The time for the next schedule to start. + NextSlotStartTime *time.Time `locationName:"nextSlotStartTime" type:"timestamp" timestampFormat:"iso8601"` + + // The platform (Linux/UNIX or Windows). + Platform *string `locationName:"platform" type:"string"` + + // The time that the previous schedule ended or will end. + PreviousSlotEndTime *time.Time `locationName:"previousSlotEndTime" type:"timestamp" timestampFormat:"iso8601"` + + // The schedule recurrence. + Recurrence *ScheduledInstanceRecurrence `locationName:"recurrence" type:"structure"` + + // The Scheduled Instance ID. + ScheduledInstanceId *string `locationName:"scheduledInstanceId" type:"string"` + + // The number of hours in the schedule. + SlotDurationInHours *int64 `locationName:"slotDurationInHours" type:"integer"` + + // The end date for the Scheduled Instance. + TermEndDate *time.Time `locationName:"termEndDate" type:"timestamp" timestampFormat:"iso8601"` + + // The start date for the Scheduled Instance. + TermStartDate *time.Time `locationName:"termStartDate" type:"timestamp" timestampFormat:"iso8601"` + + // The total number of hours for a single instance for the entire term. + TotalScheduledInstanceHours *int64 `locationName:"totalScheduledInstanceHours" type:"integer"` +} + +// String returns the string representation +func (s ScheduledInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstance) GoString() string { + return s.String() +} + +// Describes a schedule that is available for your Scheduled Instances. +type ScheduledInstanceAvailability struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The number of available instances. + AvailableInstanceCount *int64 `locationName:"availableInstanceCount" type:"integer"` + + // The time period for the first schedule to start. + FirstSlotStartTime *time.Time `locationName:"firstSlotStartTime" type:"timestamp" timestampFormat:"iso8601"` + + // The hourly price for a single instance. + HourlyPrice *string `locationName:"hourlyPrice" type:"string"` + + // The instance type. You can specify one of the C3, C4, M4, or R3 instance + // types. + InstanceType *string `locationName:"instanceType" type:"string"` + + // The maximum term. The only possible value is 365 days. + MaxTermDurationInDays *int64 `locationName:"maxTermDurationInDays" type:"integer"` + + // The minimum term. The only possible value is 365 days. + MinTermDurationInDays *int64 `locationName:"minTermDurationInDays" type:"integer"` + + // The network platform (EC2-Classic or EC2-VPC). + NetworkPlatform *string `locationName:"networkPlatform" type:"string"` + + // The platform (Linux/UNIX or Windows). + Platform *string `locationName:"platform" type:"string"` + + // The purchase token. This token expires in two hours. + PurchaseToken *string `locationName:"purchaseToken" type:"string"` + + // The schedule recurrence. + Recurrence *ScheduledInstanceRecurrence `locationName:"recurrence" type:"structure"` + + // The number of hours in the schedule. + SlotDurationInHours *int64 `locationName:"slotDurationInHours" type:"integer"` + + // The total number of hours for a single instance for the entire term. + TotalScheduledInstanceHours *int64 `locationName:"totalScheduledInstanceHours" type:"integer"` +} + +// String returns the string representation +func (s ScheduledInstanceAvailability) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstanceAvailability) GoString() string { + return s.String() +} + +// Describes the recurring schedule for a Scheduled Instance. +type ScheduledInstanceRecurrence struct { + _ struct{} `type:"structure"` + + // The frequency (Daily, Weekly, or Monthly). + Frequency *string `locationName:"frequency" type:"string"` + + // The interval quantity. The interval unit depends on the value of frequency. + // For example, every 2 weeks or every 2 months. + Interval *int64 `locationName:"interval" type:"integer"` + + // The days. For a monthly schedule, this is one or more days of the month (1-31). + // For a weekly schedule, this is one or more days of the week (1-7, where 1 + // is Sunday). + OccurrenceDaySet []*int64 `locationName:"occurrenceDaySet" locationNameList:"item" type:"list"` + + // Indicates whether the occurrence is relative to the end of the specified + // week or month. + OccurrenceRelativeToEnd *bool `locationName:"occurrenceRelativeToEnd" type:"boolean"` + + // The unit for occurrenceDaySet (DayOfWeek or DayOfMonth). + OccurrenceUnit *string `locationName:"occurrenceUnit" type:"string"` +} + +// String returns the string representation +func (s ScheduledInstanceRecurrence) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstanceRecurrence) GoString() string { + return s.String() +} + +// Describes the recurring schedule for a Scheduled Instance. +type ScheduledInstanceRecurrenceRequest struct { + _ struct{} `type:"structure"` + + // The frequency (Daily, Weekly, or Monthly). + Frequency *string `type:"string"` + + // The interval quantity. The interval unit depends on the value of Frequency. + // For example, every 2 weeks or every 2 months. + Interval *int64 `type:"integer"` + + // The days. For a monthly schedule, this is one or more days of the month (1-31). + // For a weekly schedule, this is one or more days of the week (1-7, where 1 + // is Sunday). You can't specify this value with a daily schedule. If the occurrence + // is relative to the end of the month, you can specify only a single day. + OccurrenceDays []*int64 `locationName:"OccurrenceDay" locationNameList:"OccurenceDay" type:"list"` + + // Indicates whether the occurrence is relative to the end of the specified + // week or month. You can't specify this value with a daily schedule. + OccurrenceRelativeToEnd *bool `type:"boolean"` + + // The unit for OccurrenceDays (DayOfWeek or DayOfMonth). This value is required + // for a monthly schedule. You can't specify DayOfWeek with a weekly schedule. + // You can't specify this value with a daily schedule. + OccurrenceUnit *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstanceRecurrenceRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstanceRecurrenceRequest) GoString() string { + return s.String() +} + +// Describes a block device mapping for a Scheduled Instance. +type ScheduledInstancesBlockDeviceMapping struct { + _ struct{} `type:"structure"` + + // The device name exposed to the instance (for example, /dev/sdh or xvdh). + DeviceName *string `type:"string"` + + // Parameters used to set up EBS volumes automatically when the instance is + // launched. + Ebs *ScheduledInstancesEbs `type:"structure"` + + // Suppresses the specified device included in the block device mapping of the + // AMI. + NoDevice *string `type:"string"` + + // The virtual device name (ephemeralN). Instance store volumes are numbered + // starting from 0. An instance type with two available instance store volumes + // can specify mappings for ephemeral0 and ephemeral1.The number of available + // instance store volumes depends on the instance type. After you connect to + // the instance, you must mount the volume. + // + // Constraints: For M3 instances, you must specify instance store volumes in + // the block device mapping for the instance. When you launch an M3 instance, + // we ignore any instance store volumes specified in the block device mapping + // for the AMI. + VirtualName *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesBlockDeviceMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesBlockDeviceMapping) GoString() string { + return s.String() +} + +// Describes an EBS volume for a Scheduled Instance. +type ScheduledInstancesEbs struct { + _ struct{} `type:"structure"` + + // Indicates whether the volume is deleted on instance termination. + DeleteOnTermination *bool `type:"boolean"` + + // Indicates whether the volume is encrypted. You can attached encrypted volumes + // only to instances that support them. + Encrypted *bool `type:"boolean"` + + // The number of I/O operations per second (IOPS) that the volume supports. + // For Provisioned IOPS (SSD) volumes, this represents the number of IOPS that + // are provisioned for the volume. For General Purpose (SSD) volumes, this represents + // the baseline performance of the volume and the rate at which the volume accumulates + // I/O credits for bursting. For more information about General Purpose (SSD) + // baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Constraint: Range is 100 to 20000 for Provisioned IOPS (SSD) volumes and + // 3 to 10000 for General Purpose (SSD) volumes. + // + // Condition: This parameter is required for requests to create io1 volumes; + // it is not used in requests to create standard or gp2 volumes. + Iops *int64 `type:"integer"` + + // The ID of the snapshot. + SnapshotId *string `type:"string"` + + // The size of the volume, in GiB. + // + // Default: If you're creating the volume from a snapshot and don't specify + // a volume size, the default is the snapshot size. + VolumeSize *int64 `type:"integer"` + + // The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned + // IOPS (SSD) volumes, and standard for Magnetic volumes. + // + // Default: standard + VolumeType *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesEbs) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesEbs) GoString() string { + return s.String() +} + +// Describes an IAM instance profile for a Scheduled Instance. +type ScheduledInstancesIamInstanceProfile struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). + Arn *string `type:"string"` + + // The name. + Name *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesIamInstanceProfile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesIamInstanceProfile) GoString() string { + return s.String() +} + +// Describes the launch specification for a Scheduled Instance. +type ScheduledInstancesLaunchSpecification struct { + _ struct{} `type:"structure"` + + // One or more block device mapping entries. + BlockDeviceMappings []*ScheduledInstancesBlockDeviceMapping `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` + + // Indicates whether the instances are optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS-optimized + // instance. + // + // Default: false + EbsOptimized *bool `type:"boolean"` + + // The IAM instance profile. + IamInstanceProfile *ScheduledInstancesIamInstanceProfile `type:"structure"` + + // The ID of the Amazon Machine Image (AMI). + ImageId *string `type:"string" required:"true"` + + // The instance type. + InstanceType *string `type:"string"` + + // The ID of the kernel. + KernelId *string `type:"string"` + + // The name of the key pair. + KeyName *string `type:"string"` + + // Enable or disable monitoring for the instances. + Monitoring *ScheduledInstancesMonitoring `type:"structure"` + + // One or more network interfaces. + NetworkInterfaces []*ScheduledInstancesNetworkInterface `locationName:"NetworkInterface" locationNameList:"NetworkInterface" type:"list"` + + // The placement information. + Placement *ScheduledInstancesPlacement `type:"structure"` + + // The ID of the RAM disk. + RamdiskId *string `type:"string"` + + // The IDs of one or more security groups. + SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` + + // The ID of the subnet in which to launch the instances. + SubnetId *string `type:"string"` + + // The base64-encoded MIME user data. + UserData *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesLaunchSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesLaunchSpecification) GoString() string { + return s.String() +} + +// Describes whether monitoring is enabled for a Scheduled Instance. +type ScheduledInstancesMonitoring struct { + _ struct{} `type:"structure"` + + // Indicates whether monitoring is enabled. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s ScheduledInstancesMonitoring) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesMonitoring) GoString() string { + return s.String() +} + +// Describes a network interface for a Scheduled Instance. +type ScheduledInstancesNetworkInterface struct { + _ struct{} `type:"structure"` + + // Indicates whether to assign a public IP address to instances launched in + // a VPC. The public IP address can only be assigned to a network interface + // for eth0, and can only be assigned to a new network interface, not an existing + // one. You cannot specify more than one network interface in the request. If + // launching into a default subnet, the default value is true. + AssociatePublicIpAddress *bool `type:"boolean"` + + // Indicates whether to delete the interface when the instance is terminated. + DeleteOnTermination *bool `type:"boolean"` + + // The description. + Description *string `type:"string"` + + // The index of the device for the network interface attachment. + DeviceIndex *int64 `type:"integer"` + + // The IDs of one or more security groups. + Groups []*string `locationName:"Group" locationNameList:"SecurityGroupId" type:"list"` + + // The ID of the network interface. + NetworkInterfaceId *string `type:"string"` + + // The IP address of the network interface within the subnet. + PrivateIpAddress *string `type:"string"` + + // The private IP addresses. + PrivateIpAddressConfigs []*ScheduledInstancesPrivateIpAddressConfig `locationName:"PrivateIpAddressConfig" locationNameList:"PrivateIpAddressConfigSet" type:"list"` + + // The number of secondary private IP addresses. + SecondaryPrivateIpAddressCount *int64 `type:"integer"` + + // The ID of the subnet. + SubnetId *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesNetworkInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesNetworkInterface) GoString() string { + return s.String() +} + +// Describes the placement for a Scheduled Instance. +type ScheduledInstancesPlacement struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `type:"string"` + + // The name of the placement group. + GroupName *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesPlacement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesPlacement) GoString() string { + return s.String() +} + +// Describes a private IP address for a Scheduled Instance. +type ScheduledInstancesPrivateIpAddressConfig struct { + _ struct{} `type:"structure"` + + // Indicates whether this is a primary IP address. Otherwise, this is a secondary + // IP address. + Primary *bool `type:"boolean"` + + // The IP address. + PrivateIpAddress *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesPrivateIpAddressConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesPrivateIpAddressConfig) GoString() string { + return s.String() +} + +// Describes a security group +type SecurityGroup struct { + _ struct{} `type:"structure"` + + // A description of the security group. + Description *string `locationName:"groupDescription" type:"string"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string"` + + // The name of the security group. + GroupName *string `locationName:"groupName" type:"string"` + + // One or more inbound rules associated with the security group. + IpPermissions []*IpPermission `locationName:"ipPermissions" locationNameList:"item" type:"list"` + + // [EC2-VPC] One or more outbound rules associated with the security group. + IpPermissionsEgress []*IpPermission `locationName:"ipPermissionsEgress" locationNameList:"item" type:"list"` + + // The AWS account ID of the owner of the security group. + OwnerId *string `locationName:"ownerId" type:"string"` + + // Any tags assigned to the security group. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // [EC2-VPC] The ID of the VPC for the security group. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s SecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SecurityGroup) GoString() string { + return s.String() +} + +// Describes the time period for a Scheduled Instance to start its first schedule. +// The time period must span less than one day. +type SlotDateTimeRangeRequest struct { + _ struct{} `type:"structure"` + + // The earliest date and time, in UTC, for the Scheduled Instance to start. + EarliestTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The latest date and time, in UTC, for the Scheduled Instance to start. This + // value must be later than or equal to the earliest date and at most three + // months in the future. + LatestTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s SlotDateTimeRangeRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SlotDateTimeRangeRequest) GoString() string { + return s.String() +} + +// Describes the time period for a Scheduled Instance to start its first schedule. +type SlotStartTimeRangeRequest struct { + _ struct{} `type:"structure"` + + // The earliest date and time, in UTC, for the Scheduled Instance to start. + EarliestTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The latest date and time, in UTC, for the Scheduled Instance to start. + LatestTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s SlotStartTimeRangeRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SlotStartTimeRangeRequest) GoString() string { + return s.String() +} + +// Describes a snapshot. +type Snapshot struct { + _ struct{} `type:"structure"` + + // The data encryption key identifier for the snapshot. This value is a unique + // identifier that corresponds to the data encryption key that was used to encrypt + // the original volume or snapshot copy. Because data encryption keys are inherited + // by volumes created from snapshots, and vice versa, if snapshots share the + // same data encryption key identifier, then they belong to the same volume/snapshot + // lineage. This parameter is only returned by the DescribeSnapshots API operation. + DataEncryptionKeyId *string `locationName:"dataEncryptionKeyId" type:"string"` + + // The description for the snapshot. + Description *string `locationName:"description" type:"string"` + + // Indicates whether the snapshot is encrypted. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // The full ARN of the AWS Key Management Service (AWS KMS) customer master + // key (CMK) that was used to protect the volume encryption key for the parent + // volume. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // The AWS account alias (for example, amazon, self) or AWS account ID that + // owns the snapshot. + OwnerAlias *string `locationName:"ownerAlias" type:"string"` + + // The AWS account ID of the EBS snapshot owner. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The progress of the snapshot, as a percentage. + Progress *string `locationName:"progress" type:"string"` + + // The ID of the snapshot. Each snapshot receives a unique identifier when it + // is created. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // The time stamp when the snapshot was initiated. + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"` + + // The snapshot state. + State *string `locationName:"status" type:"string" enum:"SnapshotState"` + + // Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy + // operation fails (for example, if the proper AWS Key Management Service (AWS + // KMS) permissions are not obtained) this field displays error state details + // to help you diagnose why the error occurred. This parameter is only returned + // by the DescribeSnapshots API operation. + StateMessage *string `locationName:"statusMessage" type:"string"` + + // Any tags assigned to the snapshot. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the volume that was used to create the snapshot. + VolumeId *string `locationName:"volumeId" type:"string"` + + // The size of the volume, in GiB. + VolumeSize *int64 `locationName:"volumeSize" type:"integer"` +} + +// String returns the string representation +func (s Snapshot) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Snapshot) GoString() string { + return s.String() +} + +// Describes the snapshot created from the imported disk. +type SnapshotDetail struct { + _ struct{} `type:"structure"` + + // A description for the snapshot. + Description *string `locationName:"description" type:"string"` + + // The block device mapping for the snapshot. + DeviceName *string `locationName:"deviceName" type:"string"` + + // The size of the disk in the snapshot, in GiB. + DiskImageSize *float64 `locationName:"diskImageSize" type:"double"` + + // The format of the disk image from which the snapshot is created. + Format *string `locationName:"format" type:"string"` + + // The percentage of progress for the task. + Progress *string `locationName:"progress" type:"string"` + + // The snapshot ID of the disk being imported. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // A brief status of the snapshot creation. + Status *string `locationName:"status" type:"string"` + + // A detailed status message for the snapshot creation. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // The URL used to access the disk image. + Url *string `locationName:"url" type:"string"` + + // Describes the S3 bucket for the disk image. + UserBucket *UserBucketDetails `locationName:"userBucket" type:"structure"` +} + +// String returns the string representation +func (s SnapshotDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotDetail) GoString() string { + return s.String() +} + +// The disk container object for the import snapshot request. +type SnapshotDiskContainer struct { + _ struct{} `type:"structure"` + + // The description of the disk image being imported. + Description *string `type:"string"` + + // The format of the disk image being imported. + // + // Valid values: RAW | VHD | VMDK | OVA + Format *string `type:"string"` + + // The URL to the Amazon S3-based disk image being imported. It can either be + // a https URL (https://..) or an Amazon S3 URL (s3://..). + Url *string `type:"string"` + + // Describes the S3 bucket for the disk image. + UserBucket *UserBucket `type:"structure"` +} + +// String returns the string representation +func (s SnapshotDiskContainer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotDiskContainer) GoString() string { + return s.String() +} + +// Details about the import snapshot task. +type SnapshotTaskDetail struct { + _ struct{} `type:"structure"` + + // The description of the snapshot. + Description *string `locationName:"description" type:"string"` + + // The size of the disk in the snapshot, in GiB. + DiskImageSize *float64 `locationName:"diskImageSize" type:"double"` + + // The format of the disk image from which the snapshot is created. + Format *string `locationName:"format" type:"string"` + + // The percentage of completion for the import snapshot task. + Progress *string `locationName:"progress" type:"string"` + + // The snapshot ID of the disk being imported. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // A brief status for the import snapshot task. + Status *string `locationName:"status" type:"string"` + + // A detailed status message for the import snapshot task. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // The URL of the disk image from which the snapshot is created. + Url *string `locationName:"url" type:"string"` + + // The S3 bucket for the disk image. + UserBucket *UserBucketDetails `locationName:"userBucket" type:"structure"` +} + +// String returns the string representation +func (s SnapshotTaskDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotTaskDetail) GoString() string { + return s.String() +} + +// Describes the data feed for a Spot instance. +type SpotDatafeedSubscription struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket where the Spot instance data feed is located. + Bucket *string `locationName:"bucket" type:"string"` + + // The fault codes for the Spot instance request, if any. + Fault *SpotInstanceStateFault `locationName:"fault" type:"structure"` + + // The AWS account ID of the account. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The prefix that is prepended to data feed files. + Prefix *string `locationName:"prefix" type:"string"` + + // The state of the Spot instance data feed subscription. + State *string `locationName:"state" type:"string" enum:"DatafeedSubscriptionState"` +} + +// String returns the string representation +func (s SpotDatafeedSubscription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotDatafeedSubscription) GoString() string { + return s.String() +} + +// Describes the launch specification for one or more Spot instances. +type SpotFleetLaunchSpecification struct { + _ struct{} `type:"structure"` + + // Deprecated. + AddressingType *string `locationName:"addressingType" type:"string"` + + // One or more block device mapping entries. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // Indicates whether the instances are optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS + // Optimized instance. + // + // Default: false + EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + + // The IAM instance profile. + IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"` + + // The ID of the AMI. + ImageId *string `locationName:"imageId" type:"string"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The ID of the kernel. + KernelId *string `locationName:"kernelId" type:"string"` + + // The name of the key pair. + KeyName *string `locationName:"keyName" type:"string"` + + // Enable or disable monitoring for the instances. + Monitoring *SpotFleetMonitoring `locationName:"monitoring" type:"structure"` + + // One or more network interfaces. + NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"` + + // The placement information. + Placement *SpotPlacement `locationName:"placement" type:"structure"` + + // The ID of the RAM disk. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + // One or more security groups. When requesting instances in a VPC, you must + // specify the IDs of the security groups. When requesting instances in EC2-Classic, + // you can specify the names or the IDs of the security groups. + SecurityGroups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The bid price per unit hour for the specified instance type. If this value + // is not specified, the default is the Spot bid price specified for the fleet. + // To determine the bid price per unit hour, divide the Spot bid price by the + // value of WeightedCapacity. + SpotPrice *string `locationName:"spotPrice" type:"string"` + + // The ID of the subnet in which to launch the instances. To specify multiple + // subnets, separate them using commas; for example, "subnet-a61dafcf, subnet-65ea5f08". + SubnetId *string `locationName:"subnetId" type:"string"` + + // The Base64-encoded MIME user data to make available to the instances. + UserData *string `locationName:"userData" type:"string"` + + // The number of units provided by the specified instance type. These are the + // same units that you chose to set the target capacity in terms (instances + // or a performance characteristic such as vCPUs, memory, or I/O). + // + // If the target capacity divided by this value is not a whole number, we round + // the number of instances to the next whole number. If this value is not specified, + // the default is 1. + WeightedCapacity *float64 `locationName:"weightedCapacity" type:"double"` +} + +// String returns the string representation +func (s SpotFleetLaunchSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotFleetLaunchSpecification) GoString() string { + return s.String() +} + +// Describes whether monitoring is enabled. +type SpotFleetMonitoring struct { + _ struct{} `type:"structure"` + + // Enables monitoring for the instance. + // + // Default: false + Enabled *bool `locationName:"enabled" type:"boolean"` +} + +// String returns the string representation +func (s SpotFleetMonitoring) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotFleetMonitoring) GoString() string { + return s.String() +} + +// Describes a Spot fleet request. +type SpotFleetRequestConfig struct { + _ struct{} `type:"structure"` + + // The creation date and time of the request. + CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // Information about the configuration of the Spot fleet request. + SpotFleetRequestConfig *SpotFleetRequestConfigData `locationName:"spotFleetRequestConfig" type:"structure" required:"true"` + + // The ID of the Spot fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` + + // The state of the Spot fleet request. + SpotFleetRequestState *string `locationName:"spotFleetRequestState" type:"string" required:"true" enum:"BatchState"` +} + +// String returns the string representation +func (s SpotFleetRequestConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotFleetRequestConfig) GoString() string { + return s.String() +} + +// Describes the configuration of a Spot fleet request. +type SpotFleetRequestConfigData struct { + _ struct{} `type:"structure"` + + // Indicates how to allocate the target capacity across the Spot pools specified + // by the Spot fleet request. The default is lowestPrice. + AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"AllocationStrategy"` + + // A unique, case-sensitive identifier you provide to ensure idempotency of + // your listings. This helps avoid duplicate listings. For more information, + // see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // Indicates whether running Spot instances should be terminated if the target + // capacity of the Spot fleet request is decreased below the current size of + // the Spot fleet. + ExcessCapacityTerminationPolicy *string `locationName:"excessCapacityTerminationPolicy" type:"string" enum:"ExcessCapacityTerminationPolicy"` + + // Grants the Spot fleet permission to terminate Spot instances on your behalf + // when you cancel its Spot fleet request using CancelSpotFleetRequests or when + // the Spot fleet request expires, if you set terminateInstancesWithExpiration. + IamFleetRole *string `locationName:"iamFleetRole" type:"string" required:"true"` + + // Information about the launch specifications for the Spot fleet request. + LaunchSpecifications []*SpotFleetLaunchSpecification `locationName:"launchSpecifications" locationNameList:"item" min:"1" type:"list" required:"true"` + + // The bid price per unit hour. + SpotPrice *string `locationName:"spotPrice" type:"string" required:"true"` + + // The number of units to request. You can choose to set the target capacity + // in terms of instances or a performance characteristic that is important to + // your application workload, such as vCPUs, memory, or I/O. + TargetCapacity *int64 `locationName:"targetCapacity" type:"integer" required:"true"` + + // Indicates whether running Spot instances should be terminated when the Spot + // fleet request expires. + TerminateInstancesWithExpiration *bool `locationName:"terminateInstancesWithExpiration" type:"boolean"` + + // The start date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // The default is to start fulfilling the request immediately. + ValidFrom *time.Time `locationName:"validFrom" type:"timestamp" timestampFormat:"iso8601"` + + // The end date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // At this point, no new Spot instance requests are placed or enabled to fulfill + // the request. + ValidUntil *time.Time `locationName:"validUntil" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s SpotFleetRequestConfigData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotFleetRequestConfigData) GoString() string { + return s.String() +} + +// Describes a Spot instance request. +type SpotInstanceRequest struct { + _ struct{} `type:"structure"` + + // If you specified a duration and your Spot instance request was fulfilled, + // this is the fixed hourly price in effect for the Spot instance while it runs. + ActualBlockHourlyPrice *string `locationName:"actualBlockHourlyPrice" type:"string"` + + // The Availability Zone group. If you specify the same Availability Zone group + // for all Spot instance requests, all Spot instances are launched in the same + // Availability Zone. + AvailabilityZoneGroup *string `locationName:"availabilityZoneGroup" type:"string"` + + // The duration for the Spot instance, in minutes. + BlockDurationMinutes *int64 `locationName:"blockDurationMinutes" type:"integer"` + + // The date and time when the Spot instance request was created, in UTC format + // (for example, YYYY-MM-DDTHH:MM:SSZ). + CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601"` + + // The fault codes for the Spot instance request, if any. + Fault *SpotInstanceStateFault `locationName:"fault" type:"structure"` + + // The instance ID, if an instance has been launched to fulfill the Spot instance + // request. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The instance launch group. Launch groups are Spot instances that launch together + // and terminate together. + LaunchGroup *string `locationName:"launchGroup" type:"string"` + + // Additional information for launching instances. + LaunchSpecification *LaunchSpecification `locationName:"launchSpecification" type:"structure"` + + // The Availability Zone in which the bid is launched. + LaunchedAvailabilityZone *string `locationName:"launchedAvailabilityZone" type:"string"` + + // The product description associated with the Spot instance. + ProductDescription *string `locationName:"productDescription" type:"string" enum:"RIProductDescription"` + + // The ID of the Spot instance request. + SpotInstanceRequestId *string `locationName:"spotInstanceRequestId" type:"string"` + + // The maximum hourly price (bid) for the Spot instance launched to fulfill + // the request. + SpotPrice *string `locationName:"spotPrice" type:"string"` + + // The state of the Spot instance request. Spot bid status information can help + // you track your Spot instance requests. For more information, see Spot Bid + // Status (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) + // in the Amazon Elastic Compute Cloud User Guide. + State *string `locationName:"state" type:"string" enum:"SpotInstanceState"` + + // The status code and status message describing the Spot instance request. + Status *SpotInstanceStatus `locationName:"status" type:"structure"` + + // Any tags assigned to the resource. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The Spot instance request type. + Type *string `locationName:"type" type:"string" enum:"SpotInstanceType"` + + // The start date of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // The request becomes active at this date and time. + ValidFrom *time.Time `locationName:"validFrom" type:"timestamp" timestampFormat:"iso8601"` + + // The end date of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // If this is a one-time request, it remains active until all instances launch, + // the request is canceled, or this date is reached. If the request is persistent, + // it remains active until it is canceled or this date is reached. + ValidUntil *time.Time `locationName:"validUntil" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s SpotInstanceRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotInstanceRequest) GoString() string { + return s.String() +} + +// Describes a Spot instance state change. +type SpotInstanceStateFault struct { + _ struct{} `type:"structure"` + + // The reason code for the Spot instance state change. + Code *string `locationName:"code" type:"string"` + + // The message for the Spot instance state change. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s SpotInstanceStateFault) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotInstanceStateFault) GoString() string { + return s.String() +} + +// Describes the status of a Spot instance request. +type SpotInstanceStatus struct { + _ struct{} `type:"structure"` + + // The status code. For a list of status codes, see Spot Bid Status Codes (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html#spot-instance-bid-status-understand) + // in the Amazon Elastic Compute Cloud User Guide. + Code *string `locationName:"code" type:"string"` + + // The description for the status code. + Message *string `locationName:"message" type:"string"` + + // The date and time of the most recent status update, in UTC format (for example, + // YYYY-MM-DDTHH:MM:SSZ). + UpdateTime *time.Time `locationName:"updateTime" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s SpotInstanceStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotInstanceStatus) GoString() string { + return s.String() +} + +// Describes Spot instance placement. +type SpotPlacement struct { + _ struct{} `type:"structure"` + + // The Availability Zones. To specify multiple Availability Zones, separate + // them using commas; for example, "us-west-2a, us-west-2b". + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The name of the placement group (for cluster instances). + GroupName *string `locationName:"groupName" type:"string"` +} + +// String returns the string representation +func (s SpotPlacement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotPlacement) GoString() string { + return s.String() +} + +// Describes the maximum hourly price (bid) for any Spot instance launched to +// fulfill the request. +type SpotPrice struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // A general description of the AMI. + ProductDescription *string `locationName:"productDescription" type:"string" enum:"RIProductDescription"` + + // The maximum price (bid) that you are willing to pay for a Spot instance. + SpotPrice *string `locationName:"spotPrice" type:"string"` + + // The date and time the request was created, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s SpotPrice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotPrice) GoString() string { + return s.String() +} + +type StartInstancesInput struct { + _ struct{} `type:"structure"` + + // Reserved. + AdditionalInfo *string `locationName:"additionalInfo" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` +} + +// String returns the string representation +func (s StartInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartInstancesInput) GoString() string { + return s.String() +} + +type StartInstancesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more started instances. + StartingInstances []*InstanceStateChange `locationName:"instancesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s StartInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartInstancesOutput) GoString() string { + return s.String() +} + +// Describes a state change. +type StateReason struct { + _ struct{} `type:"structure"` + + // The reason code for the state change. + Code *string `locationName:"code" type:"string"` + + // The message for the state change. + // + // Server.SpotInstanceTermination: A Spot instance was terminated due to an + // increase in the market price. + // + // Server.InternalError: An internal error occurred during instance launch, + // resulting in termination. + // + // Server.InsufficientInstanceCapacity: There was insufficient instance capacity + // to satisfy the launch request. + // + // Client.InternalError: A client error caused the instance to terminate on + // launch. + // + // Client.InstanceInitiatedShutdown: The instance was shut down using the shutdown + // -h command from the instance. + // + // Client.UserInitiatedShutdown: The instance was shut down using the Amazon + // EC2 API. + // + // Client.VolumeLimitExceeded: The limit on the number of EBS volumes or total + // storage was exceeded. Decrease usage or request an increase in your limits. + // + // Client.InvalidSnapshot.NotFound: The specified snapshot was not found. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s StateReason) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StateReason) GoString() string { + return s.String() +} + +type StopInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Forces the instances to stop. The instances do not have an opportunity to + // flush file system caches or file system metadata. If you use this option, + // you must perform file system check and repair procedures. This option is + // not recommended for Windows instances. + // + // Default: false + Force *bool `locationName:"force" type:"boolean"` + + // One or more instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` +} + +// String returns the string representation +func (s StopInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopInstancesInput) GoString() string { + return s.String() +} + +type StopInstancesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more stopped instances. + StoppingInstances []*InstanceStateChange `locationName:"instancesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s StopInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopInstancesOutput) GoString() string { + return s.String() +} + +// Describes the storage location for an instance store-backed AMI. +type Storage struct { + _ struct{} `type:"structure"` + + // An Amazon S3 storage location. + S3 *S3Storage `type:"structure"` +} + +// String returns the string representation +func (s Storage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Storage) GoString() string { + return s.String() +} + +// Describes a subnet. +type Subnet struct { + _ struct{} `type:"structure"` + + // The Availability Zone of the subnet. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The number of unused IP addresses in the subnet. Note that the IP addresses + // for any stopped instances are considered unavailable. + AvailableIpAddressCount *int64 `locationName:"availableIpAddressCount" type:"integer"` + + // The CIDR block assigned to the subnet. + CidrBlock *string `locationName:"cidrBlock" type:"string"` + + // Indicates whether this is the default subnet for the Availability Zone. + DefaultForAz *bool `locationName:"defaultForAz" type:"boolean"` + + // Indicates whether instances launched in this subnet receive a public IP address. + MapPublicIpOnLaunch *bool `locationName:"mapPublicIpOnLaunch" type:"boolean"` + + // The current state of the subnet. + State *string `locationName:"state" type:"string" enum:"SubnetState"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string"` + + // Any tags assigned to the subnet. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC the subnet is in. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s Subnet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Subnet) GoString() string { + return s.String() +} + +// Describes a tag. +type Tag struct { + _ struct{} `type:"structure"` + + // The key of the tag. + // + // Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode + // characters. May not begin with aws: + Key *string `locationName:"key" type:"string"` + + // The value of the tag. + // + // Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode + // characters. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Describes a tag. +type TagDescription struct { + _ struct{} `type:"structure"` + + // The tag key. + Key *string `locationName:"key" type:"string"` + + // The ID of the resource. For example, ami-1a2b3c4d. + ResourceId *string `locationName:"resourceId" type:"string"` + + // The resource type. + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` + + // The tag value. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s TagDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagDescription) GoString() string { + return s.String() +} + +type TerminateInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` +} + +// String returns the string representation +func (s TerminateInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateInstancesInput) GoString() string { + return s.String() +} + +type TerminateInstancesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more terminated instances. + TerminatingInstances []*InstanceStateChange `locationName:"instancesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s TerminateInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateInstancesOutput) GoString() string { + return s.String() +} + +type UnassignPrivateIpAddressesInput struct { + _ struct{} `type:"structure"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` + + // The secondary private IP addresses to unassign from the network interface. + // You can specify this option multiple times to unassign more than one IP address. + PrivateIpAddresses []*string `locationName:"privateIpAddress" locationNameList:"PrivateIpAddress" type:"list" required:"true"` +} + +// String returns the string representation +func (s UnassignPrivateIpAddressesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnassignPrivateIpAddressesInput) GoString() string { + return s.String() +} + +type UnassignPrivateIpAddressesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UnassignPrivateIpAddressesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnassignPrivateIpAddressesOutput) GoString() string { + return s.String() +} + +type UnmonitorInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` +} + +// String returns the string representation +func (s UnmonitorInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnmonitorInstancesInput) GoString() string { + return s.String() +} + +type UnmonitorInstancesOutput struct { + _ struct{} `type:"structure"` + + // Monitoring information for one or more instances. + InstanceMonitorings []*InstanceMonitoring `locationName:"instancesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s UnmonitorInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnmonitorInstancesOutput) GoString() string { + return s.String() +} + +// Information about items that were not successfully processed in a batch call. +type UnsuccessfulItem struct { + _ struct{} `type:"structure"` + + // Information about the error. + Error *UnsuccessfulItemError `locationName:"error" type:"structure" required:"true"` + + // The ID of the resource. + ResourceId *string `locationName:"resourceId" type:"string"` +} + +// String returns the string representation +func (s UnsuccessfulItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsuccessfulItem) GoString() string { + return s.String() +} + +// Information about the error that occurred. For more information about errors, +// see Error Codes (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html). +type UnsuccessfulItemError struct { + _ struct{} `type:"structure"` + + // The error code. + Code *string `locationName:"code" type:"string" required:"true"` + + // The error message accompanying the error code. + Message *string `locationName:"message" type:"string" required:"true"` +} + +// String returns the string representation +func (s UnsuccessfulItemError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsuccessfulItemError) GoString() string { + return s.String() +} + +// Describes the S3 bucket for the disk image. +type UserBucket struct { + _ struct{} `type:"structure"` + + // The name of the S3 bucket where the disk image is located. + S3Bucket *string `type:"string"` + + // The key for the disk image. + S3Key *string `type:"string"` +} + +// String returns the string representation +func (s UserBucket) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserBucket) GoString() string { + return s.String() +} + +// Describes the S3 bucket for the disk image. +type UserBucketDetails struct { + _ struct{} `type:"structure"` + + // The S3 bucket from which the disk image was created. + S3Bucket *string `locationName:"s3Bucket" type:"string"` + + // The key from which the disk image was created. + S3Key *string `locationName:"s3Key" type:"string"` +} + +// String returns the string representation +func (s UserBucketDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserBucketDetails) GoString() string { + return s.String() +} + +// Describes the user data to be made available to an instance. +type UserData struct { + _ struct{} `type:"structure"` + + // The Base64-encoded MIME user data for the instance. + Data *string `locationName:"data" type:"string"` +} + +// String returns the string representation +func (s UserData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserData) GoString() string { + return s.String() +} + +// Describes a security group and AWS account ID pair. +type UserIdGroupPair struct { + _ struct{} `type:"structure"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string"` + + // The name of the security group. In a request, use this parameter for a security + // group in EC2-Classic or a default VPC only. For a security group in a nondefault + // VPC, use GroupId. + GroupName *string `locationName:"groupName" type:"string"` + + // The ID of an AWS account. EC2-Classic only. + UserId *string `locationName:"userId" type:"string"` +} + +// String returns the string representation +func (s UserIdGroupPair) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserIdGroupPair) GoString() string { + return s.String() +} + +// Describes telemetry for a VPN tunnel. +type VgwTelemetry struct { + _ struct{} `type:"structure"` + + // The number of accepted routes. + AcceptedRouteCount *int64 `locationName:"acceptedRouteCount" type:"integer"` + + // The date and time of the last change in status. + LastStatusChange *time.Time `locationName:"lastStatusChange" type:"timestamp" timestampFormat:"iso8601"` + + // The Internet-routable IP address of the virtual private gateway's outside + // interface. + OutsideIpAddress *string `locationName:"outsideIpAddress" type:"string"` + + // The status of the VPN tunnel. + Status *string `locationName:"status" type:"string" enum:"TelemetryStatus"` + + // If an error occurs, a description of the error. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s VgwTelemetry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VgwTelemetry) GoString() string { + return s.String() +} + +// Describes a volume. +type Volume struct { + _ struct{} `type:"structure"` + + // Information about the volume attachments. + Attachments []*VolumeAttachment `locationName:"attachmentSet" locationNameList:"item" type:"list"` + + // The Availability Zone for the volume. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The time stamp when volume creation was initiated. + CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601"` + + // Indicates whether the volume will be encrypted. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // The number of I/O operations per second (IOPS) that the volume supports. + // For Provisioned IOPS (SSD) volumes, this represents the number of IOPS that + // are provisioned for the volume. For General Purpose (SSD) volumes, this represents + // the baseline performance of the volume and the rate at which the volume accumulates + // I/O credits for bursting. For more information on General Purpose (SSD) baseline + // performance, I/O credits, and bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Constraint: Range is 100 to 20000 for Provisioned IOPS (SSD) volumes and + // 3 to 10000 for General Purpose (SSD) volumes. + // + // Condition: This parameter is required for requests to create io1 volumes; + // it is not used in requests to create standard or gp2 volumes. + Iops *int64 `locationName:"iops" type:"integer"` + + // The full ARN of the AWS Key Management Service (AWS KMS) customer master + // key (CMK) that was used to protect the volume encryption key for the volume. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // The size of the volume, in GiBs. + Size *int64 `locationName:"size" type:"integer"` + + // The snapshot from which the volume was created, if applicable. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // The volume state. + State *string `locationName:"status" type:"string" enum:"VolumeState"` + + // Any tags assigned to the volume. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the volume. + VolumeId *string `locationName:"volumeId" type:"string"` + + // The volume type. This can be gp2 for General Purpose (SSD) volumes, io1 for + // Provisioned IOPS (SSD) volumes, or standard for Magnetic volumes. + VolumeType *string `locationName:"volumeType" type:"string" enum:"VolumeType"` +} + +// String returns the string representation +func (s Volume) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Volume) GoString() string { + return s.String() +} + +// Describes volume attachment details. +type VolumeAttachment struct { + _ struct{} `type:"structure"` + + // The time stamp when the attachment initiated. + AttachTime *time.Time `locationName:"attachTime" type:"timestamp" timestampFormat:"iso8601"` + + // Indicates whether the EBS volume is deleted on instance termination. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The device name. + Device *string `locationName:"device" type:"string"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The attachment state of the volume. + State *string `locationName:"status" type:"string" enum:"VolumeAttachmentState"` + + // The ID of the volume. + VolumeId *string `locationName:"volumeId" type:"string"` +} + +// String returns the string representation +func (s VolumeAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeAttachment) GoString() string { + return s.String() +} + +// Describes an EBS volume. +type VolumeDetail struct { + _ struct{} `type:"structure"` + + // The size of the volume, in GiB. + Size *int64 `locationName:"size" type:"long" required:"true"` +} + +// String returns the string representation +func (s VolumeDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeDetail) GoString() string { + return s.String() +} + +// Describes a volume status operation code. +type VolumeStatusAction struct { + _ struct{} `type:"structure"` + + // The code identifying the operation, for example, enable-volume-io. + Code *string `locationName:"code" type:"string"` + + // A description of the operation. + Description *string `locationName:"description" type:"string"` + + // The ID of the event associated with this operation. + EventId *string `locationName:"eventId" type:"string"` + + // The event type associated with this operation. + EventType *string `locationName:"eventType" type:"string"` +} + +// String returns the string representation +func (s VolumeStatusAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeStatusAction) GoString() string { + return s.String() +} + +// Describes a volume status. +type VolumeStatusDetails struct { + _ struct{} `type:"structure"` + + // The name of the volume status. + Name *string `locationName:"name" type:"string" enum:"VolumeStatusName"` + + // The intended status of the volume status. + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s VolumeStatusDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeStatusDetails) GoString() string { + return s.String() +} + +// Describes a volume status event. +type VolumeStatusEvent struct { + _ struct{} `type:"structure"` + + // A description of the event. + Description *string `locationName:"description" type:"string"` + + // The ID of this event. + EventId *string `locationName:"eventId" type:"string"` + + // The type of this event. + EventType *string `locationName:"eventType" type:"string"` + + // The latest end time of the event. + NotAfter *time.Time `locationName:"notAfter" type:"timestamp" timestampFormat:"iso8601"` + + // The earliest start time of the event. + NotBefore *time.Time `locationName:"notBefore" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s VolumeStatusEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeStatusEvent) GoString() string { + return s.String() +} + +// Describes the status of a volume. +type VolumeStatusInfo struct { + _ struct{} `type:"structure"` + + // The details of the volume status. + Details []*VolumeStatusDetails `locationName:"details" locationNameList:"item" type:"list"` + + // The status of the volume. + Status *string `locationName:"status" type:"string" enum:"VolumeStatusInfoStatus"` +} + +// String returns the string representation +func (s VolumeStatusInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeStatusInfo) GoString() string { + return s.String() +} + +// Describes the volume status. +type VolumeStatusItem struct { + _ struct{} `type:"structure"` + + // The details of the operation. + Actions []*VolumeStatusAction `locationName:"actionsSet" locationNameList:"item" type:"list"` + + // The Availability Zone of the volume. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // A list of events associated with the volume. + Events []*VolumeStatusEvent `locationName:"eventsSet" locationNameList:"item" type:"list"` + + // The volume ID. + VolumeId *string `locationName:"volumeId" type:"string"` + + // The volume status. + VolumeStatus *VolumeStatusInfo `locationName:"volumeStatus" type:"structure"` +} + +// String returns the string representation +func (s VolumeStatusItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeStatusItem) GoString() string { + return s.String() +} + +// Describes a VPC. +type Vpc struct { + _ struct{} `type:"structure"` + + // The CIDR block for the VPC. + CidrBlock *string `locationName:"cidrBlock" type:"string"` + + // The ID of the set of DHCP options you've associated with the VPC (or default + // if the default options are associated with the VPC). + DhcpOptionsId *string `locationName:"dhcpOptionsId" type:"string"` + + // The allowed tenancy of instances launched into the VPC. + InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` + + // Indicates whether the VPC is the default VPC. + IsDefault *bool `locationName:"isDefault" type:"boolean"` + + // The current state of the VPC. + State *string `locationName:"state" type:"string" enum:"VpcState"` + + // Any tags assigned to the VPC. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s Vpc) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Vpc) GoString() string { + return s.String() +} + +// Describes an attachment between a virtual private gateway and a VPC. +type VpcAttachment struct { + _ struct{} `type:"structure"` + + // The current state of the attachment. + State *string `locationName:"state" type:"string" enum:"AttachmentStatus"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s VpcAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcAttachment) GoString() string { + return s.String() +} + +// Describes whether a VPC is enabled for ClassicLink. +type VpcClassicLink struct { + _ struct{} `type:"structure"` + + // Indicates whether the VPC is enabled for ClassicLink. + ClassicLinkEnabled *bool `locationName:"classicLinkEnabled" type:"boolean"` + + // Any tags assigned to the VPC. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s VpcClassicLink) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcClassicLink) GoString() string { + return s.String() +} + +// Describes a VPC endpoint. +type VpcEndpoint struct { + _ struct{} `type:"structure"` + + // The date and time the VPC endpoint was created. + CreationTimestamp *time.Time `locationName:"creationTimestamp" type:"timestamp" timestampFormat:"iso8601"` + + // The policy document associated with the endpoint. + PolicyDocument *string `locationName:"policyDocument" type:"string"` + + // One or more route tables associated with the endpoint. + RouteTableIds []*string `locationName:"routeTableIdSet" locationNameList:"item" type:"list"` + + // The name of the AWS service to which the endpoint is associated. + ServiceName *string `locationName:"serviceName" type:"string"` + + // The state of the VPC endpoint. + State *string `locationName:"state" type:"string" enum:"State"` + + // The ID of the VPC endpoint. + VpcEndpointId *string `locationName:"vpcEndpointId" type:"string"` + + // The ID of the VPC to which the endpoint is associated. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s VpcEndpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcEndpoint) GoString() string { + return s.String() +} + +// Describes a VPC peering connection. +type VpcPeeringConnection struct { + _ struct{} `type:"structure"` + + // The information of the peer VPC. + AccepterVpcInfo *VpcPeeringConnectionVpcInfo `locationName:"accepterVpcInfo" type:"structure"` + + // The time that an unaccepted VPC peering connection will expire. + ExpirationTime *time.Time `locationName:"expirationTime" type:"timestamp" timestampFormat:"iso8601"` + + // The information of the requester VPC. + RequesterVpcInfo *VpcPeeringConnectionVpcInfo `locationName:"requesterVpcInfo" type:"structure"` + + // The status of the VPC peering connection. + Status *VpcPeeringConnectionStateReason `locationName:"status" type:"structure"` + + // Any tags assigned to the resource. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` +} + +// String returns the string representation +func (s VpcPeeringConnection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcPeeringConnection) GoString() string { + return s.String() +} + +// Describes the status of a VPC peering connection. +type VpcPeeringConnectionStateReason struct { + _ struct{} `type:"structure"` + + // The status of the VPC peering connection. + Code *string `locationName:"code" type:"string" enum:"VpcPeeringConnectionStateReasonCode"` + + // A message that provides more information about the status, if applicable. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s VpcPeeringConnectionStateReason) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcPeeringConnectionStateReason) GoString() string { + return s.String() +} + +// Describes a VPC in a VPC peering connection. +type VpcPeeringConnectionVpcInfo struct { + _ struct{} `type:"structure"` + + // The CIDR block for the VPC. + CidrBlock *string `locationName:"cidrBlock" type:"string"` + + // The AWS account ID of the VPC owner. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s VpcPeeringConnectionVpcInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcPeeringConnectionVpcInfo) GoString() string { + return s.String() +} + +// Describes a VPN connection. +type VpnConnection struct { + _ struct{} `type:"structure"` + + // The configuration information for the VPN connection's customer gateway (in + // the native XML format). This element is always present in the CreateVpnConnection + // response; however, it's present in the DescribeVpnConnections response only + // if the VPN connection is in the pending or available state. + CustomerGatewayConfiguration *string `locationName:"customerGatewayConfiguration" type:"string"` + + // The ID of the customer gateway at your end of the VPN connection. + CustomerGatewayId *string `locationName:"customerGatewayId" type:"string"` + + // The VPN connection options. + Options *VpnConnectionOptions `locationName:"options" type:"structure"` + + // The static routes associated with the VPN connection. + Routes []*VpnStaticRoute `locationName:"routes" locationNameList:"item" type:"list"` + + // The current state of the VPN connection. + State *string `locationName:"state" type:"string" enum:"VpnState"` + + // Any tags assigned to the VPN connection. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The type of VPN connection. + Type *string `locationName:"type" type:"string" enum:"GatewayType"` + + // Information about the VPN tunnel. + VgwTelemetry []*VgwTelemetry `locationName:"vgwTelemetry" locationNameList:"item" type:"list"` + + // The ID of the VPN connection. + VpnConnectionId *string `locationName:"vpnConnectionId" type:"string"` + + // The ID of the virtual private gateway at the AWS side of the VPN connection. + VpnGatewayId *string `locationName:"vpnGatewayId" type:"string"` +} + +// String returns the string representation +func (s VpnConnection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnConnection) GoString() string { + return s.String() +} + +// Describes VPN connection options. +type VpnConnectionOptions struct { + _ struct{} `type:"structure"` + + // Indicates whether the VPN connection uses static routes only. Static routes + // must be used for devices that don't support BGP. + StaticRoutesOnly *bool `locationName:"staticRoutesOnly" type:"boolean"` +} + +// String returns the string representation +func (s VpnConnectionOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnConnectionOptions) GoString() string { + return s.String() +} + +// Describes VPN connection options. +type VpnConnectionOptionsSpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether the VPN connection uses static routes only. Static routes + // must be used for devices that don't support BGP. + StaticRoutesOnly *bool `locationName:"staticRoutesOnly" type:"boolean"` +} + +// String returns the string representation +func (s VpnConnectionOptionsSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnConnectionOptionsSpecification) GoString() string { + return s.String() +} + +// Describes a virtual private gateway. +type VpnGateway struct { + _ struct{} `type:"structure"` + + // The Availability Zone where the virtual private gateway was created, if applicable. + // This field may be empty or not returned. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The current state of the virtual private gateway. + State *string `locationName:"state" type:"string" enum:"VpnState"` + + // Any tags assigned to the virtual private gateway. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The type of VPN connection the virtual private gateway supports. + Type *string `locationName:"type" type:"string" enum:"GatewayType"` + + // Any VPCs attached to the virtual private gateway. + VpcAttachments []*VpcAttachment `locationName:"attachments" locationNameList:"item" type:"list"` + + // The ID of the virtual private gateway. + VpnGatewayId *string `locationName:"vpnGatewayId" type:"string"` +} + +// String returns the string representation +func (s VpnGateway) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnGateway) GoString() string { + return s.String() +} + +// Describes a static route for a VPN connection. +type VpnStaticRoute struct { + _ struct{} `type:"structure"` + + // The CIDR block associated with the local subnet of the customer data center. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` + + // Indicates how the routes were provided. + Source *string `locationName:"source" type:"string" enum:"VpnStaticRouteSource"` + + // The current state of the static route. + State *string `locationName:"state" type:"string" enum:"VpnState"` +} + +// String returns the string representation +func (s VpnStaticRoute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnStaticRoute) GoString() string { + return s.String() +} + +const ( + // @enum AccountAttributeName + AccountAttributeNameSupportedPlatforms = "supported-platforms" + // @enum AccountAttributeName + AccountAttributeNameDefaultVpc = "default-vpc" +) + +const ( + // @enum Affinity + AffinityDefault = "default" + // @enum Affinity + AffinityHost = "host" +) + +const ( + // @enum AllocationState + AllocationStateAvailable = "available" + // @enum AllocationState + AllocationStateUnderAssessment = "under-assessment" + // @enum AllocationState + AllocationStatePermanentFailure = "permanent-failure" + // @enum AllocationState + AllocationStateReleased = "released" + // @enum AllocationState + AllocationStateReleasedPermanentFailure = "released-permanent-failure" +) + +const ( + // @enum AllocationStrategy + AllocationStrategyLowestPrice = "lowestPrice" + // @enum AllocationStrategy + AllocationStrategyDiversified = "diversified" +) + +const ( + // @enum ArchitectureValues + ArchitectureValuesI386 = "i386" + // @enum ArchitectureValues + ArchitectureValuesX8664 = "x86_64" +) + +const ( + // @enum AttachmentStatus + AttachmentStatusAttaching = "attaching" + // @enum AttachmentStatus + AttachmentStatusAttached = "attached" + // @enum AttachmentStatus + AttachmentStatusDetaching = "detaching" + // @enum AttachmentStatus + AttachmentStatusDetached = "detached" +) + +const ( + // @enum AutoPlacement + AutoPlacementOn = "on" + // @enum AutoPlacement + AutoPlacementOff = "off" +) + +const ( + // @enum AvailabilityZoneState + AvailabilityZoneStateAvailable = "available" + // @enum AvailabilityZoneState + AvailabilityZoneStateInformation = "information" + // @enum AvailabilityZoneState + AvailabilityZoneStateImpaired = "impaired" + // @enum AvailabilityZoneState + AvailabilityZoneStateUnavailable = "unavailable" +) + +const ( + // @enum BatchState + BatchStateSubmitted = "submitted" + // @enum BatchState + BatchStateActive = "active" + // @enum BatchState + BatchStateCancelled = "cancelled" + // @enum BatchState + BatchStateFailed = "failed" + // @enum BatchState + BatchStateCancelledRunning = "cancelled_running" + // @enum BatchState + BatchStateCancelledTerminating = "cancelled_terminating" + // @enum BatchState + BatchStateModifying = "modifying" +) + +const ( + // @enum BundleTaskState + BundleTaskStatePending = "pending" + // @enum BundleTaskState + BundleTaskStateWaitingForShutdown = "waiting-for-shutdown" + // @enum BundleTaskState + BundleTaskStateBundling = "bundling" + // @enum BundleTaskState + BundleTaskStateStoring = "storing" + // @enum BundleTaskState + BundleTaskStateCancelling = "cancelling" + // @enum BundleTaskState + BundleTaskStateComplete = "complete" + // @enum BundleTaskState + BundleTaskStateFailed = "failed" +) + +const ( + // @enum CancelBatchErrorCode + CancelBatchErrorCodeFleetRequestIdDoesNotExist = "fleetRequestIdDoesNotExist" + // @enum CancelBatchErrorCode + CancelBatchErrorCodeFleetRequestIdMalformed = "fleetRequestIdMalformed" + // @enum CancelBatchErrorCode + CancelBatchErrorCodeFleetRequestNotInCancellableState = "fleetRequestNotInCancellableState" + // @enum CancelBatchErrorCode + CancelBatchErrorCodeUnexpectedError = "unexpectedError" +) + +const ( + // @enum CancelSpotInstanceRequestState + CancelSpotInstanceRequestStateActive = "active" + // @enum CancelSpotInstanceRequestState + CancelSpotInstanceRequestStateOpen = "open" + // @enum CancelSpotInstanceRequestState + CancelSpotInstanceRequestStateClosed = "closed" + // @enum CancelSpotInstanceRequestState + CancelSpotInstanceRequestStateCancelled = "cancelled" + // @enum CancelSpotInstanceRequestState + CancelSpotInstanceRequestStateCompleted = "completed" +) + +const ( + // @enum ContainerFormat + ContainerFormatOva = "ova" +) + +const ( + // @enum ConversionTaskState + ConversionTaskStateActive = "active" + // @enum ConversionTaskState + ConversionTaskStateCancelling = "cancelling" + // @enum ConversionTaskState + ConversionTaskStateCancelled = "cancelled" + // @enum ConversionTaskState + ConversionTaskStateCompleted = "completed" +) + +const ( + // @enum CurrencyCodeValues + CurrencyCodeValuesUsd = "USD" +) + +const ( + // @enum DatafeedSubscriptionState + DatafeedSubscriptionStateActive = "Active" + // @enum DatafeedSubscriptionState + DatafeedSubscriptionStateInactive = "Inactive" +) + +const ( + // @enum DeviceType + DeviceTypeEbs = "ebs" + // @enum DeviceType + DeviceTypeInstanceStore = "instance-store" +) + +const ( + // @enum DiskImageFormat + DiskImageFormatVmdk = "VMDK" + // @enum DiskImageFormat + DiskImageFormatRaw = "RAW" + // @enum DiskImageFormat + DiskImageFormatVhd = "VHD" +) + +const ( + // @enum DomainType + DomainTypeVpc = "vpc" + // @enum DomainType + DomainTypeStandard = "standard" +) + +const ( + // @enum EventCode + EventCodeInstanceReboot = "instance-reboot" + // @enum EventCode + EventCodeSystemReboot = "system-reboot" + // @enum EventCode + EventCodeSystemMaintenance = "system-maintenance" + // @enum EventCode + EventCodeInstanceRetirement = "instance-retirement" + // @enum EventCode + EventCodeInstanceStop = "instance-stop" +) + +const ( + // @enum EventType + EventTypeInstanceChange = "instanceChange" + // @enum EventType + EventTypeFleetRequestChange = "fleetRequestChange" + // @enum EventType + EventTypeError = "error" +) + +const ( + // @enum ExcessCapacityTerminationPolicy + ExcessCapacityTerminationPolicyNoTermination = "noTermination" + // @enum ExcessCapacityTerminationPolicy + ExcessCapacityTerminationPolicyDefault = "default" +) + +const ( + // @enum ExportEnvironment + ExportEnvironmentCitrix = "citrix" + // @enum ExportEnvironment + ExportEnvironmentVmware = "vmware" + // @enum ExportEnvironment + ExportEnvironmentMicrosoft = "microsoft" +) + +const ( + // @enum ExportTaskState + ExportTaskStateActive = "active" + // @enum ExportTaskState + ExportTaskStateCancelling = "cancelling" + // @enum ExportTaskState + ExportTaskStateCancelled = "cancelled" + // @enum ExportTaskState + ExportTaskStateCompleted = "completed" +) + +const ( + // @enum FlowLogsResourceType + FlowLogsResourceTypeVpc = "VPC" + // @enum FlowLogsResourceType + FlowLogsResourceTypeSubnet = "Subnet" + // @enum FlowLogsResourceType + FlowLogsResourceTypeNetworkInterface = "NetworkInterface" +) + +const ( + // @enum GatewayType + GatewayTypeIpsec1 = "ipsec.1" +) + +const ( + // @enum HostTenancy + HostTenancyDedicated = "dedicated" + // @enum HostTenancy + HostTenancyHost = "host" +) + +const ( + // @enum HypervisorType + HypervisorTypeOvm = "ovm" + // @enum HypervisorType + HypervisorTypeXen = "xen" +) + +const ( + // @enum ImageAttributeName + ImageAttributeNameDescription = "description" + // @enum ImageAttributeName + ImageAttributeNameKernel = "kernel" + // @enum ImageAttributeName + ImageAttributeNameRamdisk = "ramdisk" + // @enum ImageAttributeName + ImageAttributeNameLaunchPermission = "launchPermission" + // @enum ImageAttributeName + ImageAttributeNameProductCodes = "productCodes" + // @enum ImageAttributeName + ImageAttributeNameBlockDeviceMapping = "blockDeviceMapping" + // @enum ImageAttributeName + ImageAttributeNameSriovNetSupport = "sriovNetSupport" +) + +const ( + // @enum ImageState + ImageStatePending = "pending" + // @enum ImageState + ImageStateAvailable = "available" + // @enum ImageState + ImageStateInvalid = "invalid" + // @enum ImageState + ImageStateDeregistered = "deregistered" + // @enum ImageState + ImageStateTransient = "transient" + // @enum ImageState + ImageStateFailed = "failed" + // @enum ImageState + ImageStateError = "error" +) + +const ( + // @enum ImageTypeValues + ImageTypeValuesMachine = "machine" + // @enum ImageTypeValues + ImageTypeValuesKernel = "kernel" + // @enum ImageTypeValues + ImageTypeValuesRamdisk = "ramdisk" +) + +const ( + // @enum InstanceAttributeName + InstanceAttributeNameInstanceType = "instanceType" + // @enum InstanceAttributeName + InstanceAttributeNameKernel = "kernel" + // @enum InstanceAttributeName + InstanceAttributeNameRamdisk = "ramdisk" + // @enum InstanceAttributeName + InstanceAttributeNameUserData = "userData" + // @enum InstanceAttributeName + InstanceAttributeNameDisableApiTermination = "disableApiTermination" + // @enum InstanceAttributeName + InstanceAttributeNameInstanceInitiatedShutdownBehavior = "instanceInitiatedShutdownBehavior" + // @enum InstanceAttributeName + InstanceAttributeNameRootDeviceName = "rootDeviceName" + // @enum InstanceAttributeName + InstanceAttributeNameBlockDeviceMapping = "blockDeviceMapping" + // @enum InstanceAttributeName + InstanceAttributeNameProductCodes = "productCodes" + // @enum InstanceAttributeName + InstanceAttributeNameSourceDestCheck = "sourceDestCheck" + // @enum InstanceAttributeName + InstanceAttributeNameGroupSet = "groupSet" + // @enum InstanceAttributeName + InstanceAttributeNameEbsOptimized = "ebsOptimized" + // @enum InstanceAttributeName + InstanceAttributeNameSriovNetSupport = "sriovNetSupport" +) + +const ( + // @enum InstanceLifecycleType + InstanceLifecycleTypeSpot = "spot" +) + +const ( + // @enum InstanceStateName + InstanceStateNamePending = "pending" + // @enum InstanceStateName + InstanceStateNameRunning = "running" + // @enum InstanceStateName + InstanceStateNameShuttingDown = "shutting-down" + // @enum InstanceStateName + InstanceStateNameTerminated = "terminated" + // @enum InstanceStateName + InstanceStateNameStopping = "stopping" + // @enum InstanceStateName + InstanceStateNameStopped = "stopped" +) + +const ( + // @enum InstanceType + InstanceTypeT1Micro = "t1.micro" + // @enum InstanceType + InstanceTypeM1Small = "m1.small" + // @enum InstanceType + InstanceTypeM1Medium = "m1.medium" + // @enum InstanceType + InstanceTypeM1Large = "m1.large" + // @enum InstanceType + InstanceTypeM1Xlarge = "m1.xlarge" + // @enum InstanceType + InstanceTypeM3Medium = "m3.medium" + // @enum InstanceType + InstanceTypeM3Large = "m3.large" + // @enum InstanceType + InstanceTypeM3Xlarge = "m3.xlarge" + // @enum InstanceType + InstanceTypeM32xlarge = "m3.2xlarge" + // @enum InstanceType + InstanceTypeM4Large = "m4.large" + // @enum InstanceType + InstanceTypeM4Xlarge = "m4.xlarge" + // @enum InstanceType + InstanceTypeM42xlarge = "m4.2xlarge" + // @enum InstanceType + InstanceTypeM44xlarge = "m4.4xlarge" + // @enum InstanceType + InstanceTypeM410xlarge = "m4.10xlarge" + // @enum InstanceType + InstanceTypeT2Nano = "t2.nano" + // @enum InstanceType + InstanceTypeT2Micro = "t2.micro" + // @enum InstanceType + InstanceTypeT2Small = "t2.small" + // @enum InstanceType + InstanceTypeT2Medium = "t2.medium" + // @enum InstanceType + InstanceTypeT2Large = "t2.large" + // @enum InstanceType + InstanceTypeM2Xlarge = "m2.xlarge" + // @enum InstanceType + InstanceTypeM22xlarge = "m2.2xlarge" + // @enum InstanceType + InstanceTypeM24xlarge = "m2.4xlarge" + // @enum InstanceType + InstanceTypeCr18xlarge = "cr1.8xlarge" + // @enum InstanceType + InstanceTypeI2Xlarge = "i2.xlarge" + // @enum InstanceType + InstanceTypeI22xlarge = "i2.2xlarge" + // @enum InstanceType + InstanceTypeI24xlarge = "i2.4xlarge" + // @enum InstanceType + InstanceTypeI28xlarge = "i2.8xlarge" + // @enum InstanceType + InstanceTypeHi14xlarge = "hi1.4xlarge" + // @enum InstanceType + InstanceTypeHs18xlarge = "hs1.8xlarge" + // @enum InstanceType + InstanceTypeC1Medium = "c1.medium" + // @enum InstanceType + InstanceTypeC1Xlarge = "c1.xlarge" + // @enum InstanceType + InstanceTypeC3Large = "c3.large" + // @enum InstanceType + InstanceTypeC3Xlarge = "c3.xlarge" + // @enum InstanceType + InstanceTypeC32xlarge = "c3.2xlarge" + // @enum InstanceType + InstanceTypeC34xlarge = "c3.4xlarge" + // @enum InstanceType + InstanceTypeC38xlarge = "c3.8xlarge" + // @enum InstanceType + InstanceTypeC4Large = "c4.large" + // @enum InstanceType + InstanceTypeC4Xlarge = "c4.xlarge" + // @enum InstanceType + InstanceTypeC42xlarge = "c4.2xlarge" + // @enum InstanceType + InstanceTypeC44xlarge = "c4.4xlarge" + // @enum InstanceType + InstanceTypeC48xlarge = "c4.8xlarge" + // @enum InstanceType + InstanceTypeCc14xlarge = "cc1.4xlarge" + // @enum InstanceType + InstanceTypeCc28xlarge = "cc2.8xlarge" + // @enum InstanceType + InstanceTypeG22xlarge = "g2.2xlarge" + // @enum InstanceType + InstanceTypeCg14xlarge = "cg1.4xlarge" + // @enum InstanceType + InstanceTypeR3Large = "r3.large" + // @enum InstanceType + InstanceTypeR3Xlarge = "r3.xlarge" + // @enum InstanceType + InstanceTypeR32xlarge = "r3.2xlarge" + // @enum InstanceType + InstanceTypeR34xlarge = "r3.4xlarge" + // @enum InstanceType + InstanceTypeR38xlarge = "r3.8xlarge" + // @enum InstanceType + InstanceTypeD2Xlarge = "d2.xlarge" + // @enum InstanceType + InstanceTypeD22xlarge = "d2.2xlarge" + // @enum InstanceType + InstanceTypeD24xlarge = "d2.4xlarge" + // @enum InstanceType + InstanceTypeD28xlarge = "d2.8xlarge" +) + +const ( + // @enum ListingState + ListingStateAvailable = "available" + // @enum ListingState + ListingStateSold = "sold" + // @enum ListingState + ListingStateCancelled = "cancelled" + // @enum ListingState + ListingStatePending = "pending" +) + +const ( + // @enum ListingStatus + ListingStatusActive = "active" + // @enum ListingStatus + ListingStatusPending = "pending" + // @enum ListingStatus + ListingStatusCancelled = "cancelled" + // @enum ListingStatus + ListingStatusClosed = "closed" +) + +const ( + // @enum MonitoringState + MonitoringStateDisabled = "disabled" + // @enum MonitoringState + MonitoringStateDisabling = "disabling" + // @enum MonitoringState + MonitoringStateEnabled = "enabled" + // @enum MonitoringState + MonitoringStatePending = "pending" +) + +const ( + // @enum MoveStatus + MoveStatusMovingToVpc = "movingToVpc" + // @enum MoveStatus + MoveStatusRestoringToClassic = "restoringToClassic" +) + +const ( + // @enum NatGatewayState + NatGatewayStatePending = "pending" + // @enum NatGatewayState + NatGatewayStateFailed = "failed" + // @enum NatGatewayState + NatGatewayStateAvailable = "available" + // @enum NatGatewayState + NatGatewayStateDeleting = "deleting" + // @enum NatGatewayState + NatGatewayStateDeleted = "deleted" +) + +const ( + // @enum NetworkInterfaceAttribute + NetworkInterfaceAttributeDescription = "description" + // @enum NetworkInterfaceAttribute + NetworkInterfaceAttributeGroupSet = "groupSet" + // @enum NetworkInterfaceAttribute + NetworkInterfaceAttributeSourceDestCheck = "sourceDestCheck" + // @enum NetworkInterfaceAttribute + NetworkInterfaceAttributeAttachment = "attachment" +) + +const ( + // @enum NetworkInterfaceStatus + NetworkInterfaceStatusAvailable = "available" + // @enum NetworkInterfaceStatus + NetworkInterfaceStatusAttaching = "attaching" + // @enum NetworkInterfaceStatus + NetworkInterfaceStatusInUse = "in-use" + // @enum NetworkInterfaceStatus + NetworkInterfaceStatusDetaching = "detaching" +) + +const ( + // @enum NetworkInterfaceType + NetworkInterfaceTypeInterface = "interface" + // @enum NetworkInterfaceType + NetworkInterfaceTypeNatGateway = "natGateway" +) + +const ( + // @enum OfferingTypeValues + OfferingTypeValuesHeavyUtilization = "Heavy Utilization" + // @enum OfferingTypeValues + OfferingTypeValuesMediumUtilization = "Medium Utilization" + // @enum OfferingTypeValues + OfferingTypeValuesLightUtilization = "Light Utilization" + // @enum OfferingTypeValues + OfferingTypeValuesNoUpfront = "No Upfront" + // @enum OfferingTypeValues + OfferingTypeValuesPartialUpfront = "Partial Upfront" + // @enum OfferingTypeValues + OfferingTypeValuesAllUpfront = "All Upfront" +) + +const ( + // @enum OperationType + OperationTypeAdd = "add" + // @enum OperationType + OperationTypeRemove = "remove" +) + +const ( + // @enum PermissionGroup + PermissionGroupAll = "all" +) + +const ( + // @enum PlacementGroupState + PlacementGroupStatePending = "pending" + // @enum PlacementGroupState + PlacementGroupStateAvailable = "available" + // @enum PlacementGroupState + PlacementGroupStateDeleting = "deleting" + // @enum PlacementGroupState + PlacementGroupStateDeleted = "deleted" +) + +const ( + // @enum PlacementStrategy + PlacementStrategyCluster = "cluster" +) + +const ( + // @enum PlatformValues + PlatformValuesWindows = "Windows" +) + +const ( + // @enum ProductCodeValues + ProductCodeValuesDevpay = "devpay" + // @enum ProductCodeValues + ProductCodeValuesMarketplace = "marketplace" +) + +const ( + // @enum RIProductDescription + RIProductDescriptionLinuxUnix = "Linux/UNIX" + // @enum RIProductDescription + RIProductDescriptionLinuxUnixamazonVpc = "Linux/UNIX (Amazon VPC)" + // @enum RIProductDescription + RIProductDescriptionWindows = "Windows" + // @enum RIProductDescription + RIProductDescriptionWindowsAmazonVpc = "Windows (Amazon VPC)" +) + +const ( + // @enum RecurringChargeFrequency + RecurringChargeFrequencyHourly = "Hourly" +) + +const ( + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesInstanceStuckInState = "instance-stuck-in-state" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesUnresponsive = "unresponsive" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesNotAcceptingCredentials = "not-accepting-credentials" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesPasswordNotAvailable = "password-not-available" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesPerformanceNetwork = "performance-network" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesPerformanceInstanceStore = "performance-instance-store" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesPerformanceEbsVolume = "performance-ebs-volume" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesPerformanceOther = "performance-other" + // @enum ReportInstanceReasonCodes + ReportInstanceReasonCodesOther = "other" +) + +const ( + // @enum ReportStatusType + ReportStatusTypeOk = "ok" + // @enum ReportStatusType + ReportStatusTypeImpaired = "impaired" +) + +const ( + // @enum ReservedInstanceState + ReservedInstanceStatePaymentPending = "payment-pending" + // @enum ReservedInstanceState + ReservedInstanceStateActive = "active" + // @enum ReservedInstanceState + ReservedInstanceStatePaymentFailed = "payment-failed" + // @enum ReservedInstanceState + ReservedInstanceStateRetired = "retired" +) + +const ( + // @enum ResetImageAttributeName + ResetImageAttributeNameLaunchPermission = "launchPermission" +) + +const ( + // @enum ResourceType + ResourceTypeCustomerGateway = "customer-gateway" + // @enum ResourceType + ResourceTypeDhcpOptions = "dhcp-options" + // @enum ResourceType + ResourceTypeImage = "image" + // @enum ResourceType + ResourceTypeInstance = "instance" + // @enum ResourceType + ResourceTypeInternetGateway = "internet-gateway" + // @enum ResourceType + ResourceTypeNetworkAcl = "network-acl" + // @enum ResourceType + ResourceTypeNetworkInterface = "network-interface" + // @enum ResourceType + ResourceTypeReservedInstances = "reserved-instances" + // @enum ResourceType + ResourceTypeRouteTable = "route-table" + // @enum ResourceType + ResourceTypeSnapshot = "snapshot" + // @enum ResourceType + ResourceTypeSpotInstancesRequest = "spot-instances-request" + // @enum ResourceType + ResourceTypeSubnet = "subnet" + // @enum ResourceType + ResourceTypeSecurityGroup = "security-group" + // @enum ResourceType + ResourceTypeVolume = "volume" + // @enum ResourceType + ResourceTypeVpc = "vpc" + // @enum ResourceType + ResourceTypeVpnConnection = "vpn-connection" + // @enum ResourceType + ResourceTypeVpnGateway = "vpn-gateway" +) + +const ( + // @enum RouteOrigin + RouteOriginCreateRouteTable = "CreateRouteTable" + // @enum RouteOrigin + RouteOriginCreateRoute = "CreateRoute" + // @enum RouteOrigin + RouteOriginEnableVgwRoutePropagation = "EnableVgwRoutePropagation" +) + +const ( + // @enum RouteState + RouteStateActive = "active" + // @enum RouteState + RouteStateBlackhole = "blackhole" +) + +const ( + // @enum RuleAction + RuleActionAllow = "allow" + // @enum RuleAction + RuleActionDeny = "deny" +) + +const ( + // @enum ShutdownBehavior + ShutdownBehaviorStop = "stop" + // @enum ShutdownBehavior + ShutdownBehaviorTerminate = "terminate" +) + +const ( + // @enum SnapshotAttributeName + SnapshotAttributeNameProductCodes = "productCodes" + // @enum SnapshotAttributeName + SnapshotAttributeNameCreateVolumePermission = "createVolumePermission" +) + +const ( + // @enum SnapshotState + SnapshotStatePending = "pending" + // @enum SnapshotState + SnapshotStateCompleted = "completed" + // @enum SnapshotState + SnapshotStateError = "error" +) + +const ( + // @enum SpotInstanceState + SpotInstanceStateOpen = "open" + // @enum SpotInstanceState + SpotInstanceStateActive = "active" + // @enum SpotInstanceState + SpotInstanceStateClosed = "closed" + // @enum SpotInstanceState + SpotInstanceStateCancelled = "cancelled" + // @enum SpotInstanceState + SpotInstanceStateFailed = "failed" +) + +const ( + // @enum SpotInstanceType + SpotInstanceTypeOneTime = "one-time" + // @enum SpotInstanceType + SpotInstanceTypePersistent = "persistent" +) + +const ( + // @enum State + StatePending = "Pending" + // @enum State + StateAvailable = "Available" + // @enum State + StateDeleting = "Deleting" + // @enum State + StateDeleted = "Deleted" +) + +const ( + // @enum Status + StatusMoveInProgress = "MoveInProgress" + // @enum Status + StatusInVpc = "InVpc" + // @enum Status + StatusInClassic = "InClassic" +) + +const ( + // @enum StatusName + StatusNameReachability = "reachability" +) + +const ( + // @enum StatusType + StatusTypePassed = "passed" + // @enum StatusType + StatusTypeFailed = "failed" + // @enum StatusType + StatusTypeInsufficientData = "insufficient-data" + // @enum StatusType + StatusTypeInitializing = "initializing" +) + +const ( + // @enum SubnetState + SubnetStatePending = "pending" + // @enum SubnetState + SubnetStateAvailable = "available" +) + +const ( + // @enum SummaryStatus + SummaryStatusOk = "ok" + // @enum SummaryStatus + SummaryStatusImpaired = "impaired" + // @enum SummaryStatus + SummaryStatusInsufficientData = "insufficient-data" + // @enum SummaryStatus + SummaryStatusNotApplicable = "not-applicable" + // @enum SummaryStatus + SummaryStatusInitializing = "initializing" +) + +const ( + // @enum TelemetryStatus + TelemetryStatusUp = "UP" + // @enum TelemetryStatus + TelemetryStatusDown = "DOWN" +) + +const ( + // @enum Tenancy + TenancyDefault = "default" + // @enum Tenancy + TenancyDedicated = "dedicated" + // @enum Tenancy + TenancyHost = "host" +) + +const ( + // @enum TrafficType + TrafficTypeAccept = "ACCEPT" + // @enum TrafficType + TrafficTypeReject = "REJECT" + // @enum TrafficType + TrafficTypeAll = "ALL" +) + +const ( + // @enum VirtualizationType + VirtualizationTypeHvm = "hvm" + // @enum VirtualizationType + VirtualizationTypeParavirtual = "paravirtual" +) + +const ( + // @enum VolumeAttachmentState + VolumeAttachmentStateAttaching = "attaching" + // @enum VolumeAttachmentState + VolumeAttachmentStateAttached = "attached" + // @enum VolumeAttachmentState + VolumeAttachmentStateDetaching = "detaching" + // @enum VolumeAttachmentState + VolumeAttachmentStateDetached = "detached" +) + +const ( + // @enum VolumeAttributeName + VolumeAttributeNameAutoEnableIo = "autoEnableIO" + // @enum VolumeAttributeName + VolumeAttributeNameProductCodes = "productCodes" +) + +const ( + // @enum VolumeState + VolumeStateCreating = "creating" + // @enum VolumeState + VolumeStateAvailable = "available" + // @enum VolumeState + VolumeStateInUse = "in-use" + // @enum VolumeState + VolumeStateDeleting = "deleting" + // @enum VolumeState + VolumeStateDeleted = "deleted" + // @enum VolumeState + VolumeStateError = "error" +) + +const ( + // @enum VolumeStatusInfoStatus + VolumeStatusInfoStatusOk = "ok" + // @enum VolumeStatusInfoStatus + VolumeStatusInfoStatusImpaired = "impaired" + // @enum VolumeStatusInfoStatus + VolumeStatusInfoStatusInsufficientData = "insufficient-data" +) + +const ( + // @enum VolumeStatusName + VolumeStatusNameIoEnabled = "io-enabled" + // @enum VolumeStatusName + VolumeStatusNameIoPerformance = "io-performance" +) + +const ( + // @enum VolumeType + VolumeTypeStandard = "standard" + // @enum VolumeType + VolumeTypeIo1 = "io1" + // @enum VolumeType + VolumeTypeGp2 = "gp2" +) + +const ( + // @enum VpcAttributeName + VpcAttributeNameEnableDnsSupport = "enableDnsSupport" + // @enum VpcAttributeName + VpcAttributeNameEnableDnsHostnames = "enableDnsHostnames" +) + +const ( + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeInitiatingRequest = "initiating-request" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodePendingAcceptance = "pending-acceptance" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeActive = "active" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeDeleted = "deleted" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeRejected = "rejected" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeFailed = "failed" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeExpired = "expired" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeProvisioning = "provisioning" + // @enum VpcPeeringConnectionStateReasonCode + VpcPeeringConnectionStateReasonCodeDeleting = "deleting" +) + +const ( + // @enum VpcState + VpcStatePending = "pending" + // @enum VpcState + VpcStateAvailable = "available" +) + +const ( + // @enum VpnState + VpnStatePending = "pending" + // @enum VpnState + VpnStateAvailable = "available" + // @enum VpnState + VpnStateDeleting = "deleting" + // @enum VpnState + VpnStateDeleted = "deleted" +) + +const ( + // @enum VpnStaticRouteSource + VpnStaticRouteSourceStatic = "Static" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ec2/customizations.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ec2/customizations.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ec2/customizations.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ec2/customizations.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,55 @@ +package ec2 + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/endpoints" +) + +func init() { + initRequest = func(r *request.Request) { + if r.Operation.Name == opCopySnapshot { // fill the PresignedURL parameter + r.Handlers.Build.PushFront(fillPresignedURL) + } + } +} + +func fillPresignedURL(r *request.Request) { + if !r.ParamsFilled() { + return + } + + origParams := r.Params.(*CopySnapshotInput) + + // Stop if PresignedURL/DestinationRegion is set + if origParams.PresignedUrl != nil || origParams.DestinationRegion != nil { + return + } + + origParams.DestinationRegion = r.Config.Region + newParams := awsutil.CopyOf(r.Params).(*CopySnapshotInput) + + // Create a new request based on the existing request. We will use this to + // presign the CopySnapshot request against the source region. + cfg := r.Config.Copy(aws.NewConfig(). + WithEndpoint(""). + WithRegion(aws.StringValue(origParams.SourceRegion))) + + clientInfo := r.ClientInfo + clientInfo.Endpoint, clientInfo.SigningRegion = endpoints.EndpointForRegion( + clientInfo.ServiceName, aws.StringValue(cfg.Region), aws.BoolValue(cfg.DisableSSL)) + + // Presign a CopySnapshot request with modified params + req := request.New(*cfg, clientInfo, r.Handlers, r.Retryer, r.Operation, newParams, r.Data) + url, err := req.Presign(5 * time.Minute) // 5 minutes should be enough. + if err != nil { // bubble error back up to original request + r.Error = err + return + } + + // We have our URL, set it on params + origParams.PresignedUrl = &url +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ec2/customizations_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ec2/customizations_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ec2/customizations_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ec2/customizations_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,35 @@ +package ec2_test + +import ( + "io/ioutil" + "net/url" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/stretchr/testify/assert" +) + +func TestCopySnapshotPresignedURL(t *testing.T) { + svc := ec2.New(unit.Session, &aws.Config{Region: aws.String("us-west-2")}) + + assert.NotPanics(t, func() { + // Doesn't panic on nil input + req, _ := svc.CopySnapshotRequest(nil) + req.Sign() + }) + + req, _ := svc.CopySnapshotRequest(&ec2.CopySnapshotInput{ + SourceRegion: aws.String("us-west-1"), + SourceSnapshotId: aws.String("snap-id"), + }) + req.Sign() + + b, _ := ioutil.ReadAll(req.HTTPRequest.Body) + q, _ := url.ParseQuery(string(b)) + u, _ := url.QueryUnescape(q.Get("PresignedUrl")) + assert.Equal(t, "us-west-2", q.Get("DestinationRegion")) + assert.Equal(t, "us-west-1", q.Get("SourceRegion")) + assert.Regexp(t, `^https://ec2\.us-west-1\.amazonaws\.com/.+&DestinationRegion=us-west-2`, u) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,832 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package ec2iface provides an interface for the Amazon Elastic Compute Cloud. +package ec2iface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/ec2" +) + +// EC2API is the interface type for ec2.EC2. +type EC2API interface { + AcceptVpcPeeringConnectionRequest(*ec2.AcceptVpcPeeringConnectionInput) (*request.Request, *ec2.AcceptVpcPeeringConnectionOutput) + + AcceptVpcPeeringConnection(*ec2.AcceptVpcPeeringConnectionInput) (*ec2.AcceptVpcPeeringConnectionOutput, error) + + AllocateAddressRequest(*ec2.AllocateAddressInput) (*request.Request, *ec2.AllocateAddressOutput) + + AllocateAddress(*ec2.AllocateAddressInput) (*ec2.AllocateAddressOutput, error) + + AllocateHostsRequest(*ec2.AllocateHostsInput) (*request.Request, *ec2.AllocateHostsOutput) + + AllocateHosts(*ec2.AllocateHostsInput) (*ec2.AllocateHostsOutput, error) + + AssignPrivateIpAddressesRequest(*ec2.AssignPrivateIpAddressesInput) (*request.Request, *ec2.AssignPrivateIpAddressesOutput) + + AssignPrivateIpAddresses(*ec2.AssignPrivateIpAddressesInput) (*ec2.AssignPrivateIpAddressesOutput, error) + + AssociateAddressRequest(*ec2.AssociateAddressInput) (*request.Request, *ec2.AssociateAddressOutput) + + AssociateAddress(*ec2.AssociateAddressInput) (*ec2.AssociateAddressOutput, error) + + AssociateDhcpOptionsRequest(*ec2.AssociateDhcpOptionsInput) (*request.Request, *ec2.AssociateDhcpOptionsOutput) + + AssociateDhcpOptions(*ec2.AssociateDhcpOptionsInput) (*ec2.AssociateDhcpOptionsOutput, error) + + AssociateRouteTableRequest(*ec2.AssociateRouteTableInput) (*request.Request, *ec2.AssociateRouteTableOutput) + + AssociateRouteTable(*ec2.AssociateRouteTableInput) (*ec2.AssociateRouteTableOutput, error) + + AttachClassicLinkVpcRequest(*ec2.AttachClassicLinkVpcInput) (*request.Request, *ec2.AttachClassicLinkVpcOutput) + + AttachClassicLinkVpc(*ec2.AttachClassicLinkVpcInput) (*ec2.AttachClassicLinkVpcOutput, error) + + AttachInternetGatewayRequest(*ec2.AttachInternetGatewayInput) (*request.Request, *ec2.AttachInternetGatewayOutput) + + AttachInternetGateway(*ec2.AttachInternetGatewayInput) (*ec2.AttachInternetGatewayOutput, error) + + AttachNetworkInterfaceRequest(*ec2.AttachNetworkInterfaceInput) (*request.Request, *ec2.AttachNetworkInterfaceOutput) + + AttachNetworkInterface(*ec2.AttachNetworkInterfaceInput) (*ec2.AttachNetworkInterfaceOutput, error) + + AttachVolumeRequest(*ec2.AttachVolumeInput) (*request.Request, *ec2.VolumeAttachment) + + AttachVolume(*ec2.AttachVolumeInput) (*ec2.VolumeAttachment, error) + + AttachVpnGatewayRequest(*ec2.AttachVpnGatewayInput) (*request.Request, *ec2.AttachVpnGatewayOutput) + + AttachVpnGateway(*ec2.AttachVpnGatewayInput) (*ec2.AttachVpnGatewayOutput, error) + + AuthorizeSecurityGroupEgressRequest(*ec2.AuthorizeSecurityGroupEgressInput) (*request.Request, *ec2.AuthorizeSecurityGroupEgressOutput) + + AuthorizeSecurityGroupEgress(*ec2.AuthorizeSecurityGroupEgressInput) (*ec2.AuthorizeSecurityGroupEgressOutput, error) + + AuthorizeSecurityGroupIngressRequest(*ec2.AuthorizeSecurityGroupIngressInput) (*request.Request, *ec2.AuthorizeSecurityGroupIngressOutput) + + AuthorizeSecurityGroupIngress(*ec2.AuthorizeSecurityGroupIngressInput) (*ec2.AuthorizeSecurityGroupIngressOutput, error) + + BundleInstanceRequest(*ec2.BundleInstanceInput) (*request.Request, *ec2.BundleInstanceOutput) + + BundleInstance(*ec2.BundleInstanceInput) (*ec2.BundleInstanceOutput, error) + + CancelBundleTaskRequest(*ec2.CancelBundleTaskInput) (*request.Request, *ec2.CancelBundleTaskOutput) + + CancelBundleTask(*ec2.CancelBundleTaskInput) (*ec2.CancelBundleTaskOutput, error) + + CancelConversionTaskRequest(*ec2.CancelConversionTaskInput) (*request.Request, *ec2.CancelConversionTaskOutput) + + CancelConversionTask(*ec2.CancelConversionTaskInput) (*ec2.CancelConversionTaskOutput, error) + + CancelExportTaskRequest(*ec2.CancelExportTaskInput) (*request.Request, *ec2.CancelExportTaskOutput) + + CancelExportTask(*ec2.CancelExportTaskInput) (*ec2.CancelExportTaskOutput, error) + + CancelImportTaskRequest(*ec2.CancelImportTaskInput) (*request.Request, *ec2.CancelImportTaskOutput) + + CancelImportTask(*ec2.CancelImportTaskInput) (*ec2.CancelImportTaskOutput, error) + + CancelReservedInstancesListingRequest(*ec2.CancelReservedInstancesListingInput) (*request.Request, *ec2.CancelReservedInstancesListingOutput) + + CancelReservedInstancesListing(*ec2.CancelReservedInstancesListingInput) (*ec2.CancelReservedInstancesListingOutput, error) + + CancelSpotFleetRequestsRequest(*ec2.CancelSpotFleetRequestsInput) (*request.Request, *ec2.CancelSpotFleetRequestsOutput) + + CancelSpotFleetRequests(*ec2.CancelSpotFleetRequestsInput) (*ec2.CancelSpotFleetRequestsOutput, error) + + CancelSpotInstanceRequestsRequest(*ec2.CancelSpotInstanceRequestsInput) (*request.Request, *ec2.CancelSpotInstanceRequestsOutput) + + CancelSpotInstanceRequests(*ec2.CancelSpotInstanceRequestsInput) (*ec2.CancelSpotInstanceRequestsOutput, error) + + ConfirmProductInstanceRequest(*ec2.ConfirmProductInstanceInput) (*request.Request, *ec2.ConfirmProductInstanceOutput) + + ConfirmProductInstance(*ec2.ConfirmProductInstanceInput) (*ec2.ConfirmProductInstanceOutput, error) + + CopyImageRequest(*ec2.CopyImageInput) (*request.Request, *ec2.CopyImageOutput) + + CopyImage(*ec2.CopyImageInput) (*ec2.CopyImageOutput, error) + + CopySnapshotRequest(*ec2.CopySnapshotInput) (*request.Request, *ec2.CopySnapshotOutput) + + CopySnapshot(*ec2.CopySnapshotInput) (*ec2.CopySnapshotOutput, error) + + CreateCustomerGatewayRequest(*ec2.CreateCustomerGatewayInput) (*request.Request, *ec2.CreateCustomerGatewayOutput) + + CreateCustomerGateway(*ec2.CreateCustomerGatewayInput) (*ec2.CreateCustomerGatewayOutput, error) + + CreateDhcpOptionsRequest(*ec2.CreateDhcpOptionsInput) (*request.Request, *ec2.CreateDhcpOptionsOutput) + + CreateDhcpOptions(*ec2.CreateDhcpOptionsInput) (*ec2.CreateDhcpOptionsOutput, error) + + CreateFlowLogsRequest(*ec2.CreateFlowLogsInput) (*request.Request, *ec2.CreateFlowLogsOutput) + + CreateFlowLogs(*ec2.CreateFlowLogsInput) (*ec2.CreateFlowLogsOutput, error) + + CreateImageRequest(*ec2.CreateImageInput) (*request.Request, *ec2.CreateImageOutput) + + CreateImage(*ec2.CreateImageInput) (*ec2.CreateImageOutput, error) + + CreateInstanceExportTaskRequest(*ec2.CreateInstanceExportTaskInput) (*request.Request, *ec2.CreateInstanceExportTaskOutput) + + CreateInstanceExportTask(*ec2.CreateInstanceExportTaskInput) (*ec2.CreateInstanceExportTaskOutput, error) + + CreateInternetGatewayRequest(*ec2.CreateInternetGatewayInput) (*request.Request, *ec2.CreateInternetGatewayOutput) + + CreateInternetGateway(*ec2.CreateInternetGatewayInput) (*ec2.CreateInternetGatewayOutput, error) + + CreateKeyPairRequest(*ec2.CreateKeyPairInput) (*request.Request, *ec2.CreateKeyPairOutput) + + CreateKeyPair(*ec2.CreateKeyPairInput) (*ec2.CreateKeyPairOutput, error) + + CreateNatGatewayRequest(*ec2.CreateNatGatewayInput) (*request.Request, *ec2.CreateNatGatewayOutput) + + CreateNatGateway(*ec2.CreateNatGatewayInput) (*ec2.CreateNatGatewayOutput, error) + + CreateNetworkAclRequest(*ec2.CreateNetworkAclInput) (*request.Request, *ec2.CreateNetworkAclOutput) + + CreateNetworkAcl(*ec2.CreateNetworkAclInput) (*ec2.CreateNetworkAclOutput, error) + + CreateNetworkAclEntryRequest(*ec2.CreateNetworkAclEntryInput) (*request.Request, *ec2.CreateNetworkAclEntryOutput) + + CreateNetworkAclEntry(*ec2.CreateNetworkAclEntryInput) (*ec2.CreateNetworkAclEntryOutput, error) + + CreateNetworkInterfaceRequest(*ec2.CreateNetworkInterfaceInput) (*request.Request, *ec2.CreateNetworkInterfaceOutput) + + CreateNetworkInterface(*ec2.CreateNetworkInterfaceInput) (*ec2.CreateNetworkInterfaceOutput, error) + + CreatePlacementGroupRequest(*ec2.CreatePlacementGroupInput) (*request.Request, *ec2.CreatePlacementGroupOutput) + + CreatePlacementGroup(*ec2.CreatePlacementGroupInput) (*ec2.CreatePlacementGroupOutput, error) + + CreateReservedInstancesListingRequest(*ec2.CreateReservedInstancesListingInput) (*request.Request, *ec2.CreateReservedInstancesListingOutput) + + CreateReservedInstancesListing(*ec2.CreateReservedInstancesListingInput) (*ec2.CreateReservedInstancesListingOutput, error) + + CreateRouteRequest(*ec2.CreateRouteInput) (*request.Request, *ec2.CreateRouteOutput) + + CreateRoute(*ec2.CreateRouteInput) (*ec2.CreateRouteOutput, error) + + CreateRouteTableRequest(*ec2.CreateRouteTableInput) (*request.Request, *ec2.CreateRouteTableOutput) + + CreateRouteTable(*ec2.CreateRouteTableInput) (*ec2.CreateRouteTableOutput, error) + + CreateSecurityGroupRequest(*ec2.CreateSecurityGroupInput) (*request.Request, *ec2.CreateSecurityGroupOutput) + + CreateSecurityGroup(*ec2.CreateSecurityGroupInput) (*ec2.CreateSecurityGroupOutput, error) + + CreateSnapshotRequest(*ec2.CreateSnapshotInput) (*request.Request, *ec2.Snapshot) + + CreateSnapshot(*ec2.CreateSnapshotInput) (*ec2.Snapshot, error) + + CreateSpotDatafeedSubscriptionRequest(*ec2.CreateSpotDatafeedSubscriptionInput) (*request.Request, *ec2.CreateSpotDatafeedSubscriptionOutput) + + CreateSpotDatafeedSubscription(*ec2.CreateSpotDatafeedSubscriptionInput) (*ec2.CreateSpotDatafeedSubscriptionOutput, error) + + CreateSubnetRequest(*ec2.CreateSubnetInput) (*request.Request, *ec2.CreateSubnetOutput) + + CreateSubnet(*ec2.CreateSubnetInput) (*ec2.CreateSubnetOutput, error) + + CreateTagsRequest(*ec2.CreateTagsInput) (*request.Request, *ec2.CreateTagsOutput) + + CreateTags(*ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error) + + CreateVolumeRequest(*ec2.CreateVolumeInput) (*request.Request, *ec2.Volume) + + CreateVolume(*ec2.CreateVolumeInput) (*ec2.Volume, error) + + CreateVpcRequest(*ec2.CreateVpcInput) (*request.Request, *ec2.CreateVpcOutput) + + CreateVpc(*ec2.CreateVpcInput) (*ec2.CreateVpcOutput, error) + + CreateVpcEndpointRequest(*ec2.CreateVpcEndpointInput) (*request.Request, *ec2.CreateVpcEndpointOutput) + + CreateVpcEndpoint(*ec2.CreateVpcEndpointInput) (*ec2.CreateVpcEndpointOutput, error) + + CreateVpcPeeringConnectionRequest(*ec2.CreateVpcPeeringConnectionInput) (*request.Request, *ec2.CreateVpcPeeringConnectionOutput) + + CreateVpcPeeringConnection(*ec2.CreateVpcPeeringConnectionInput) (*ec2.CreateVpcPeeringConnectionOutput, error) + + CreateVpnConnectionRequest(*ec2.CreateVpnConnectionInput) (*request.Request, *ec2.CreateVpnConnectionOutput) + + CreateVpnConnection(*ec2.CreateVpnConnectionInput) (*ec2.CreateVpnConnectionOutput, error) + + CreateVpnConnectionRouteRequest(*ec2.CreateVpnConnectionRouteInput) (*request.Request, *ec2.CreateVpnConnectionRouteOutput) + + CreateVpnConnectionRoute(*ec2.CreateVpnConnectionRouteInput) (*ec2.CreateVpnConnectionRouteOutput, error) + + CreateVpnGatewayRequest(*ec2.CreateVpnGatewayInput) (*request.Request, *ec2.CreateVpnGatewayOutput) + + CreateVpnGateway(*ec2.CreateVpnGatewayInput) (*ec2.CreateVpnGatewayOutput, error) + + DeleteCustomerGatewayRequest(*ec2.DeleteCustomerGatewayInput) (*request.Request, *ec2.DeleteCustomerGatewayOutput) + + DeleteCustomerGateway(*ec2.DeleteCustomerGatewayInput) (*ec2.DeleteCustomerGatewayOutput, error) + + DeleteDhcpOptionsRequest(*ec2.DeleteDhcpOptionsInput) (*request.Request, *ec2.DeleteDhcpOptionsOutput) + + DeleteDhcpOptions(*ec2.DeleteDhcpOptionsInput) (*ec2.DeleteDhcpOptionsOutput, error) + + DeleteFlowLogsRequest(*ec2.DeleteFlowLogsInput) (*request.Request, *ec2.DeleteFlowLogsOutput) + + DeleteFlowLogs(*ec2.DeleteFlowLogsInput) (*ec2.DeleteFlowLogsOutput, error) + + DeleteInternetGatewayRequest(*ec2.DeleteInternetGatewayInput) (*request.Request, *ec2.DeleteInternetGatewayOutput) + + DeleteInternetGateway(*ec2.DeleteInternetGatewayInput) (*ec2.DeleteInternetGatewayOutput, error) + + DeleteKeyPairRequest(*ec2.DeleteKeyPairInput) (*request.Request, *ec2.DeleteKeyPairOutput) + + DeleteKeyPair(*ec2.DeleteKeyPairInput) (*ec2.DeleteKeyPairOutput, error) + + DeleteNatGatewayRequest(*ec2.DeleteNatGatewayInput) (*request.Request, *ec2.DeleteNatGatewayOutput) + + DeleteNatGateway(*ec2.DeleteNatGatewayInput) (*ec2.DeleteNatGatewayOutput, error) + + DeleteNetworkAclRequest(*ec2.DeleteNetworkAclInput) (*request.Request, *ec2.DeleteNetworkAclOutput) + + DeleteNetworkAcl(*ec2.DeleteNetworkAclInput) (*ec2.DeleteNetworkAclOutput, error) + + DeleteNetworkAclEntryRequest(*ec2.DeleteNetworkAclEntryInput) (*request.Request, *ec2.DeleteNetworkAclEntryOutput) + + DeleteNetworkAclEntry(*ec2.DeleteNetworkAclEntryInput) (*ec2.DeleteNetworkAclEntryOutput, error) + + DeleteNetworkInterfaceRequest(*ec2.DeleteNetworkInterfaceInput) (*request.Request, *ec2.DeleteNetworkInterfaceOutput) + + DeleteNetworkInterface(*ec2.DeleteNetworkInterfaceInput) (*ec2.DeleteNetworkInterfaceOutput, error) + + DeletePlacementGroupRequest(*ec2.DeletePlacementGroupInput) (*request.Request, *ec2.DeletePlacementGroupOutput) + + DeletePlacementGroup(*ec2.DeletePlacementGroupInput) (*ec2.DeletePlacementGroupOutput, error) + + DeleteRouteRequest(*ec2.DeleteRouteInput) (*request.Request, *ec2.DeleteRouteOutput) + + DeleteRoute(*ec2.DeleteRouteInput) (*ec2.DeleteRouteOutput, error) + + DeleteRouteTableRequest(*ec2.DeleteRouteTableInput) (*request.Request, *ec2.DeleteRouteTableOutput) + + DeleteRouteTable(*ec2.DeleteRouteTableInput) (*ec2.DeleteRouteTableOutput, error) + + DeleteSecurityGroupRequest(*ec2.DeleteSecurityGroupInput) (*request.Request, *ec2.DeleteSecurityGroupOutput) + + DeleteSecurityGroup(*ec2.DeleteSecurityGroupInput) (*ec2.DeleteSecurityGroupOutput, error) + + DeleteSnapshotRequest(*ec2.DeleteSnapshotInput) (*request.Request, *ec2.DeleteSnapshotOutput) + + DeleteSnapshot(*ec2.DeleteSnapshotInput) (*ec2.DeleteSnapshotOutput, error) + + DeleteSpotDatafeedSubscriptionRequest(*ec2.DeleteSpotDatafeedSubscriptionInput) (*request.Request, *ec2.DeleteSpotDatafeedSubscriptionOutput) + + DeleteSpotDatafeedSubscription(*ec2.DeleteSpotDatafeedSubscriptionInput) (*ec2.DeleteSpotDatafeedSubscriptionOutput, error) + + DeleteSubnetRequest(*ec2.DeleteSubnetInput) (*request.Request, *ec2.DeleteSubnetOutput) + + DeleteSubnet(*ec2.DeleteSubnetInput) (*ec2.DeleteSubnetOutput, error) + + DeleteTagsRequest(*ec2.DeleteTagsInput) (*request.Request, *ec2.DeleteTagsOutput) + + DeleteTags(*ec2.DeleteTagsInput) (*ec2.DeleteTagsOutput, error) + + DeleteVolumeRequest(*ec2.DeleteVolumeInput) (*request.Request, *ec2.DeleteVolumeOutput) + + DeleteVolume(*ec2.DeleteVolumeInput) (*ec2.DeleteVolumeOutput, error) + + DeleteVpcRequest(*ec2.DeleteVpcInput) (*request.Request, *ec2.DeleteVpcOutput) + + DeleteVpc(*ec2.DeleteVpcInput) (*ec2.DeleteVpcOutput, error) + + DeleteVpcEndpointsRequest(*ec2.DeleteVpcEndpointsInput) (*request.Request, *ec2.DeleteVpcEndpointsOutput) + + DeleteVpcEndpoints(*ec2.DeleteVpcEndpointsInput) (*ec2.DeleteVpcEndpointsOutput, error) + + DeleteVpcPeeringConnectionRequest(*ec2.DeleteVpcPeeringConnectionInput) (*request.Request, *ec2.DeleteVpcPeeringConnectionOutput) + + DeleteVpcPeeringConnection(*ec2.DeleteVpcPeeringConnectionInput) (*ec2.DeleteVpcPeeringConnectionOutput, error) + + DeleteVpnConnectionRequest(*ec2.DeleteVpnConnectionInput) (*request.Request, *ec2.DeleteVpnConnectionOutput) + + DeleteVpnConnection(*ec2.DeleteVpnConnectionInput) (*ec2.DeleteVpnConnectionOutput, error) + + DeleteVpnConnectionRouteRequest(*ec2.DeleteVpnConnectionRouteInput) (*request.Request, *ec2.DeleteVpnConnectionRouteOutput) + + DeleteVpnConnectionRoute(*ec2.DeleteVpnConnectionRouteInput) (*ec2.DeleteVpnConnectionRouteOutput, error) + + DeleteVpnGatewayRequest(*ec2.DeleteVpnGatewayInput) (*request.Request, *ec2.DeleteVpnGatewayOutput) + + DeleteVpnGateway(*ec2.DeleteVpnGatewayInput) (*ec2.DeleteVpnGatewayOutput, error) + + DeregisterImageRequest(*ec2.DeregisterImageInput) (*request.Request, *ec2.DeregisterImageOutput) + + DeregisterImage(*ec2.DeregisterImageInput) (*ec2.DeregisterImageOutput, error) + + DescribeAccountAttributesRequest(*ec2.DescribeAccountAttributesInput) (*request.Request, *ec2.DescribeAccountAttributesOutput) + + DescribeAccountAttributes(*ec2.DescribeAccountAttributesInput) (*ec2.DescribeAccountAttributesOutput, error) + + DescribeAddressesRequest(*ec2.DescribeAddressesInput) (*request.Request, *ec2.DescribeAddressesOutput) + + DescribeAddresses(*ec2.DescribeAddressesInput) (*ec2.DescribeAddressesOutput, error) + + DescribeAvailabilityZonesRequest(*ec2.DescribeAvailabilityZonesInput) (*request.Request, *ec2.DescribeAvailabilityZonesOutput) + + DescribeAvailabilityZones(*ec2.DescribeAvailabilityZonesInput) (*ec2.DescribeAvailabilityZonesOutput, error) + + DescribeBundleTasksRequest(*ec2.DescribeBundleTasksInput) (*request.Request, *ec2.DescribeBundleTasksOutput) + + DescribeBundleTasks(*ec2.DescribeBundleTasksInput) (*ec2.DescribeBundleTasksOutput, error) + + DescribeClassicLinkInstancesRequest(*ec2.DescribeClassicLinkInstancesInput) (*request.Request, *ec2.DescribeClassicLinkInstancesOutput) + + DescribeClassicLinkInstances(*ec2.DescribeClassicLinkInstancesInput) (*ec2.DescribeClassicLinkInstancesOutput, error) + + DescribeConversionTasksRequest(*ec2.DescribeConversionTasksInput) (*request.Request, *ec2.DescribeConversionTasksOutput) + + DescribeConversionTasks(*ec2.DescribeConversionTasksInput) (*ec2.DescribeConversionTasksOutput, error) + + DescribeCustomerGatewaysRequest(*ec2.DescribeCustomerGatewaysInput) (*request.Request, *ec2.DescribeCustomerGatewaysOutput) + + DescribeCustomerGateways(*ec2.DescribeCustomerGatewaysInput) (*ec2.DescribeCustomerGatewaysOutput, error) + + DescribeDhcpOptionsRequest(*ec2.DescribeDhcpOptionsInput) (*request.Request, *ec2.DescribeDhcpOptionsOutput) + + DescribeDhcpOptions(*ec2.DescribeDhcpOptionsInput) (*ec2.DescribeDhcpOptionsOutput, error) + + DescribeExportTasksRequest(*ec2.DescribeExportTasksInput) (*request.Request, *ec2.DescribeExportTasksOutput) + + DescribeExportTasks(*ec2.DescribeExportTasksInput) (*ec2.DescribeExportTasksOutput, error) + + DescribeFlowLogsRequest(*ec2.DescribeFlowLogsInput) (*request.Request, *ec2.DescribeFlowLogsOutput) + + DescribeFlowLogs(*ec2.DescribeFlowLogsInput) (*ec2.DescribeFlowLogsOutput, error) + + DescribeHostsRequest(*ec2.DescribeHostsInput) (*request.Request, *ec2.DescribeHostsOutput) + + DescribeHosts(*ec2.DescribeHostsInput) (*ec2.DescribeHostsOutput, error) + + DescribeIdFormatRequest(*ec2.DescribeIdFormatInput) (*request.Request, *ec2.DescribeIdFormatOutput) + + DescribeIdFormat(*ec2.DescribeIdFormatInput) (*ec2.DescribeIdFormatOutput, error) + + DescribeImageAttributeRequest(*ec2.DescribeImageAttributeInput) (*request.Request, *ec2.DescribeImageAttributeOutput) + + DescribeImageAttribute(*ec2.DescribeImageAttributeInput) (*ec2.DescribeImageAttributeOutput, error) + + DescribeImagesRequest(*ec2.DescribeImagesInput) (*request.Request, *ec2.DescribeImagesOutput) + + DescribeImages(*ec2.DescribeImagesInput) (*ec2.DescribeImagesOutput, error) + + DescribeImportImageTasksRequest(*ec2.DescribeImportImageTasksInput) (*request.Request, *ec2.DescribeImportImageTasksOutput) + + DescribeImportImageTasks(*ec2.DescribeImportImageTasksInput) (*ec2.DescribeImportImageTasksOutput, error) + + DescribeImportSnapshotTasksRequest(*ec2.DescribeImportSnapshotTasksInput) (*request.Request, *ec2.DescribeImportSnapshotTasksOutput) + + DescribeImportSnapshotTasks(*ec2.DescribeImportSnapshotTasksInput) (*ec2.DescribeImportSnapshotTasksOutput, error) + + DescribeInstanceAttributeRequest(*ec2.DescribeInstanceAttributeInput) (*request.Request, *ec2.DescribeInstanceAttributeOutput) + + DescribeInstanceAttribute(*ec2.DescribeInstanceAttributeInput) (*ec2.DescribeInstanceAttributeOutput, error) + + DescribeInstanceStatusRequest(*ec2.DescribeInstanceStatusInput) (*request.Request, *ec2.DescribeInstanceStatusOutput) + + DescribeInstanceStatus(*ec2.DescribeInstanceStatusInput) (*ec2.DescribeInstanceStatusOutput, error) + + DescribeInstanceStatusPages(*ec2.DescribeInstanceStatusInput, func(*ec2.DescribeInstanceStatusOutput, bool) bool) error + + DescribeInstancesRequest(*ec2.DescribeInstancesInput) (*request.Request, *ec2.DescribeInstancesOutput) + + DescribeInstances(*ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error) + + DescribeInstancesPages(*ec2.DescribeInstancesInput, func(*ec2.DescribeInstancesOutput, bool) bool) error + + DescribeInternetGatewaysRequest(*ec2.DescribeInternetGatewaysInput) (*request.Request, *ec2.DescribeInternetGatewaysOutput) + + DescribeInternetGateways(*ec2.DescribeInternetGatewaysInput) (*ec2.DescribeInternetGatewaysOutput, error) + + DescribeKeyPairsRequest(*ec2.DescribeKeyPairsInput) (*request.Request, *ec2.DescribeKeyPairsOutput) + + DescribeKeyPairs(*ec2.DescribeKeyPairsInput) (*ec2.DescribeKeyPairsOutput, error) + + DescribeMovingAddressesRequest(*ec2.DescribeMovingAddressesInput) (*request.Request, *ec2.DescribeMovingAddressesOutput) + + DescribeMovingAddresses(*ec2.DescribeMovingAddressesInput) (*ec2.DescribeMovingAddressesOutput, error) + + DescribeNatGatewaysRequest(*ec2.DescribeNatGatewaysInput) (*request.Request, *ec2.DescribeNatGatewaysOutput) + + DescribeNatGateways(*ec2.DescribeNatGatewaysInput) (*ec2.DescribeNatGatewaysOutput, error) + + DescribeNetworkAclsRequest(*ec2.DescribeNetworkAclsInput) (*request.Request, *ec2.DescribeNetworkAclsOutput) + + DescribeNetworkAcls(*ec2.DescribeNetworkAclsInput) (*ec2.DescribeNetworkAclsOutput, error) + + DescribeNetworkInterfaceAttributeRequest(*ec2.DescribeNetworkInterfaceAttributeInput) (*request.Request, *ec2.DescribeNetworkInterfaceAttributeOutput) + + DescribeNetworkInterfaceAttribute(*ec2.DescribeNetworkInterfaceAttributeInput) (*ec2.DescribeNetworkInterfaceAttributeOutput, error) + + DescribeNetworkInterfacesRequest(*ec2.DescribeNetworkInterfacesInput) (*request.Request, *ec2.DescribeNetworkInterfacesOutput) + + DescribeNetworkInterfaces(*ec2.DescribeNetworkInterfacesInput) (*ec2.DescribeNetworkInterfacesOutput, error) + + DescribePlacementGroupsRequest(*ec2.DescribePlacementGroupsInput) (*request.Request, *ec2.DescribePlacementGroupsOutput) + + DescribePlacementGroups(*ec2.DescribePlacementGroupsInput) (*ec2.DescribePlacementGroupsOutput, error) + + DescribePrefixListsRequest(*ec2.DescribePrefixListsInput) (*request.Request, *ec2.DescribePrefixListsOutput) + + DescribePrefixLists(*ec2.DescribePrefixListsInput) (*ec2.DescribePrefixListsOutput, error) + + DescribeRegionsRequest(*ec2.DescribeRegionsInput) (*request.Request, *ec2.DescribeRegionsOutput) + + DescribeRegions(*ec2.DescribeRegionsInput) (*ec2.DescribeRegionsOutput, error) + + DescribeReservedInstancesRequest(*ec2.DescribeReservedInstancesInput) (*request.Request, *ec2.DescribeReservedInstancesOutput) + + DescribeReservedInstances(*ec2.DescribeReservedInstancesInput) (*ec2.DescribeReservedInstancesOutput, error) + + DescribeReservedInstancesListingsRequest(*ec2.DescribeReservedInstancesListingsInput) (*request.Request, *ec2.DescribeReservedInstancesListingsOutput) + + DescribeReservedInstancesListings(*ec2.DescribeReservedInstancesListingsInput) (*ec2.DescribeReservedInstancesListingsOutput, error) + + DescribeReservedInstancesModificationsRequest(*ec2.DescribeReservedInstancesModificationsInput) (*request.Request, *ec2.DescribeReservedInstancesModificationsOutput) + + DescribeReservedInstancesModifications(*ec2.DescribeReservedInstancesModificationsInput) (*ec2.DescribeReservedInstancesModificationsOutput, error) + + DescribeReservedInstancesModificationsPages(*ec2.DescribeReservedInstancesModificationsInput, func(*ec2.DescribeReservedInstancesModificationsOutput, bool) bool) error + + DescribeReservedInstancesOfferingsRequest(*ec2.DescribeReservedInstancesOfferingsInput) (*request.Request, *ec2.DescribeReservedInstancesOfferingsOutput) + + DescribeReservedInstancesOfferings(*ec2.DescribeReservedInstancesOfferingsInput) (*ec2.DescribeReservedInstancesOfferingsOutput, error) + + DescribeReservedInstancesOfferingsPages(*ec2.DescribeReservedInstancesOfferingsInput, func(*ec2.DescribeReservedInstancesOfferingsOutput, bool) bool) error + + DescribeRouteTablesRequest(*ec2.DescribeRouteTablesInput) (*request.Request, *ec2.DescribeRouteTablesOutput) + + DescribeRouteTables(*ec2.DescribeRouteTablesInput) (*ec2.DescribeRouteTablesOutput, error) + + DescribeScheduledInstanceAvailabilityRequest(*ec2.DescribeScheduledInstanceAvailabilityInput) (*request.Request, *ec2.DescribeScheduledInstanceAvailabilityOutput) + + DescribeScheduledInstanceAvailability(*ec2.DescribeScheduledInstanceAvailabilityInput) (*ec2.DescribeScheduledInstanceAvailabilityOutput, error) + + DescribeScheduledInstancesRequest(*ec2.DescribeScheduledInstancesInput) (*request.Request, *ec2.DescribeScheduledInstancesOutput) + + DescribeScheduledInstances(*ec2.DescribeScheduledInstancesInput) (*ec2.DescribeScheduledInstancesOutput, error) + + DescribeSecurityGroupsRequest(*ec2.DescribeSecurityGroupsInput) (*request.Request, *ec2.DescribeSecurityGroupsOutput) + + DescribeSecurityGroups(*ec2.DescribeSecurityGroupsInput) (*ec2.DescribeSecurityGroupsOutput, error) + + DescribeSnapshotAttributeRequest(*ec2.DescribeSnapshotAttributeInput) (*request.Request, *ec2.DescribeSnapshotAttributeOutput) + + DescribeSnapshotAttribute(*ec2.DescribeSnapshotAttributeInput) (*ec2.DescribeSnapshotAttributeOutput, error) + + DescribeSnapshotsRequest(*ec2.DescribeSnapshotsInput) (*request.Request, *ec2.DescribeSnapshotsOutput) + + DescribeSnapshots(*ec2.DescribeSnapshotsInput) (*ec2.DescribeSnapshotsOutput, error) + + DescribeSnapshotsPages(*ec2.DescribeSnapshotsInput, func(*ec2.DescribeSnapshotsOutput, bool) bool) error + + DescribeSpotDatafeedSubscriptionRequest(*ec2.DescribeSpotDatafeedSubscriptionInput) (*request.Request, *ec2.DescribeSpotDatafeedSubscriptionOutput) + + DescribeSpotDatafeedSubscription(*ec2.DescribeSpotDatafeedSubscriptionInput) (*ec2.DescribeSpotDatafeedSubscriptionOutput, error) + + DescribeSpotFleetInstancesRequest(*ec2.DescribeSpotFleetInstancesInput) (*request.Request, *ec2.DescribeSpotFleetInstancesOutput) + + DescribeSpotFleetInstances(*ec2.DescribeSpotFleetInstancesInput) (*ec2.DescribeSpotFleetInstancesOutput, error) + + DescribeSpotFleetRequestHistoryRequest(*ec2.DescribeSpotFleetRequestHistoryInput) (*request.Request, *ec2.DescribeSpotFleetRequestHistoryOutput) + + DescribeSpotFleetRequestHistory(*ec2.DescribeSpotFleetRequestHistoryInput) (*ec2.DescribeSpotFleetRequestHistoryOutput, error) + + DescribeSpotFleetRequestsRequest(*ec2.DescribeSpotFleetRequestsInput) (*request.Request, *ec2.DescribeSpotFleetRequestsOutput) + + DescribeSpotFleetRequests(*ec2.DescribeSpotFleetRequestsInput) (*ec2.DescribeSpotFleetRequestsOutput, error) + + DescribeSpotInstanceRequestsRequest(*ec2.DescribeSpotInstanceRequestsInput) (*request.Request, *ec2.DescribeSpotInstanceRequestsOutput) + + DescribeSpotInstanceRequests(*ec2.DescribeSpotInstanceRequestsInput) (*ec2.DescribeSpotInstanceRequestsOutput, error) + + DescribeSpotPriceHistoryRequest(*ec2.DescribeSpotPriceHistoryInput) (*request.Request, *ec2.DescribeSpotPriceHistoryOutput) + + DescribeSpotPriceHistory(*ec2.DescribeSpotPriceHistoryInput) (*ec2.DescribeSpotPriceHistoryOutput, error) + + DescribeSpotPriceHistoryPages(*ec2.DescribeSpotPriceHistoryInput, func(*ec2.DescribeSpotPriceHistoryOutput, bool) bool) error + + DescribeSubnetsRequest(*ec2.DescribeSubnetsInput) (*request.Request, *ec2.DescribeSubnetsOutput) + + DescribeSubnets(*ec2.DescribeSubnetsInput) (*ec2.DescribeSubnetsOutput, error) + + DescribeTagsRequest(*ec2.DescribeTagsInput) (*request.Request, *ec2.DescribeTagsOutput) + + DescribeTags(*ec2.DescribeTagsInput) (*ec2.DescribeTagsOutput, error) + + DescribeTagsPages(*ec2.DescribeTagsInput, func(*ec2.DescribeTagsOutput, bool) bool) error + + DescribeVolumeAttributeRequest(*ec2.DescribeVolumeAttributeInput) (*request.Request, *ec2.DescribeVolumeAttributeOutput) + + DescribeVolumeAttribute(*ec2.DescribeVolumeAttributeInput) (*ec2.DescribeVolumeAttributeOutput, error) + + DescribeVolumeStatusRequest(*ec2.DescribeVolumeStatusInput) (*request.Request, *ec2.DescribeVolumeStatusOutput) + + DescribeVolumeStatus(*ec2.DescribeVolumeStatusInput) (*ec2.DescribeVolumeStatusOutput, error) + + DescribeVolumeStatusPages(*ec2.DescribeVolumeStatusInput, func(*ec2.DescribeVolumeStatusOutput, bool) bool) error + + DescribeVolumesRequest(*ec2.DescribeVolumesInput) (*request.Request, *ec2.DescribeVolumesOutput) + + DescribeVolumes(*ec2.DescribeVolumesInput) (*ec2.DescribeVolumesOutput, error) + + DescribeVolumesPages(*ec2.DescribeVolumesInput, func(*ec2.DescribeVolumesOutput, bool) bool) error + + DescribeVpcAttributeRequest(*ec2.DescribeVpcAttributeInput) (*request.Request, *ec2.DescribeVpcAttributeOutput) + + DescribeVpcAttribute(*ec2.DescribeVpcAttributeInput) (*ec2.DescribeVpcAttributeOutput, error) + + DescribeVpcClassicLinkRequest(*ec2.DescribeVpcClassicLinkInput) (*request.Request, *ec2.DescribeVpcClassicLinkOutput) + + DescribeVpcClassicLink(*ec2.DescribeVpcClassicLinkInput) (*ec2.DescribeVpcClassicLinkOutput, error) + + DescribeVpcClassicLinkDnsSupportRequest(*ec2.DescribeVpcClassicLinkDnsSupportInput) (*request.Request, *ec2.DescribeVpcClassicLinkDnsSupportOutput) + + DescribeVpcClassicLinkDnsSupport(*ec2.DescribeVpcClassicLinkDnsSupportInput) (*ec2.DescribeVpcClassicLinkDnsSupportOutput, error) + + DescribeVpcEndpointServicesRequest(*ec2.DescribeVpcEndpointServicesInput) (*request.Request, *ec2.DescribeVpcEndpointServicesOutput) + + DescribeVpcEndpointServices(*ec2.DescribeVpcEndpointServicesInput) (*ec2.DescribeVpcEndpointServicesOutput, error) + + DescribeVpcEndpointsRequest(*ec2.DescribeVpcEndpointsInput) (*request.Request, *ec2.DescribeVpcEndpointsOutput) + + DescribeVpcEndpoints(*ec2.DescribeVpcEndpointsInput) (*ec2.DescribeVpcEndpointsOutput, error) + + DescribeVpcPeeringConnectionsRequest(*ec2.DescribeVpcPeeringConnectionsInput) (*request.Request, *ec2.DescribeVpcPeeringConnectionsOutput) + + DescribeVpcPeeringConnections(*ec2.DescribeVpcPeeringConnectionsInput) (*ec2.DescribeVpcPeeringConnectionsOutput, error) + + DescribeVpcsRequest(*ec2.DescribeVpcsInput) (*request.Request, *ec2.DescribeVpcsOutput) + + DescribeVpcs(*ec2.DescribeVpcsInput) (*ec2.DescribeVpcsOutput, error) + + DescribeVpnConnectionsRequest(*ec2.DescribeVpnConnectionsInput) (*request.Request, *ec2.DescribeVpnConnectionsOutput) + + DescribeVpnConnections(*ec2.DescribeVpnConnectionsInput) (*ec2.DescribeVpnConnectionsOutput, error) + + DescribeVpnGatewaysRequest(*ec2.DescribeVpnGatewaysInput) (*request.Request, *ec2.DescribeVpnGatewaysOutput) + + DescribeVpnGateways(*ec2.DescribeVpnGatewaysInput) (*ec2.DescribeVpnGatewaysOutput, error) + + DetachClassicLinkVpcRequest(*ec2.DetachClassicLinkVpcInput) (*request.Request, *ec2.DetachClassicLinkVpcOutput) + + DetachClassicLinkVpc(*ec2.DetachClassicLinkVpcInput) (*ec2.DetachClassicLinkVpcOutput, error) + + DetachInternetGatewayRequest(*ec2.DetachInternetGatewayInput) (*request.Request, *ec2.DetachInternetGatewayOutput) + + DetachInternetGateway(*ec2.DetachInternetGatewayInput) (*ec2.DetachInternetGatewayOutput, error) + + DetachNetworkInterfaceRequest(*ec2.DetachNetworkInterfaceInput) (*request.Request, *ec2.DetachNetworkInterfaceOutput) + + DetachNetworkInterface(*ec2.DetachNetworkInterfaceInput) (*ec2.DetachNetworkInterfaceOutput, error) + + DetachVolumeRequest(*ec2.DetachVolumeInput) (*request.Request, *ec2.VolumeAttachment) + + DetachVolume(*ec2.DetachVolumeInput) (*ec2.VolumeAttachment, error) + + DetachVpnGatewayRequest(*ec2.DetachVpnGatewayInput) (*request.Request, *ec2.DetachVpnGatewayOutput) + + DetachVpnGateway(*ec2.DetachVpnGatewayInput) (*ec2.DetachVpnGatewayOutput, error) + + DisableVgwRoutePropagationRequest(*ec2.DisableVgwRoutePropagationInput) (*request.Request, *ec2.DisableVgwRoutePropagationOutput) + + DisableVgwRoutePropagation(*ec2.DisableVgwRoutePropagationInput) (*ec2.DisableVgwRoutePropagationOutput, error) + + DisableVpcClassicLinkRequest(*ec2.DisableVpcClassicLinkInput) (*request.Request, *ec2.DisableVpcClassicLinkOutput) + + DisableVpcClassicLink(*ec2.DisableVpcClassicLinkInput) (*ec2.DisableVpcClassicLinkOutput, error) + + DisableVpcClassicLinkDnsSupportRequest(*ec2.DisableVpcClassicLinkDnsSupportInput) (*request.Request, *ec2.DisableVpcClassicLinkDnsSupportOutput) + + DisableVpcClassicLinkDnsSupport(*ec2.DisableVpcClassicLinkDnsSupportInput) (*ec2.DisableVpcClassicLinkDnsSupportOutput, error) + + DisassociateAddressRequest(*ec2.DisassociateAddressInput) (*request.Request, *ec2.DisassociateAddressOutput) + + DisassociateAddress(*ec2.DisassociateAddressInput) (*ec2.DisassociateAddressOutput, error) + + DisassociateRouteTableRequest(*ec2.DisassociateRouteTableInput) (*request.Request, *ec2.DisassociateRouteTableOutput) + + DisassociateRouteTable(*ec2.DisassociateRouteTableInput) (*ec2.DisassociateRouteTableOutput, error) + + EnableVgwRoutePropagationRequest(*ec2.EnableVgwRoutePropagationInput) (*request.Request, *ec2.EnableVgwRoutePropagationOutput) + + EnableVgwRoutePropagation(*ec2.EnableVgwRoutePropagationInput) (*ec2.EnableVgwRoutePropagationOutput, error) + + EnableVolumeIORequest(*ec2.EnableVolumeIOInput) (*request.Request, *ec2.EnableVolumeIOOutput) + + EnableVolumeIO(*ec2.EnableVolumeIOInput) (*ec2.EnableVolumeIOOutput, error) + + EnableVpcClassicLinkRequest(*ec2.EnableVpcClassicLinkInput) (*request.Request, *ec2.EnableVpcClassicLinkOutput) + + EnableVpcClassicLink(*ec2.EnableVpcClassicLinkInput) (*ec2.EnableVpcClassicLinkOutput, error) + + EnableVpcClassicLinkDnsSupportRequest(*ec2.EnableVpcClassicLinkDnsSupportInput) (*request.Request, *ec2.EnableVpcClassicLinkDnsSupportOutput) + + EnableVpcClassicLinkDnsSupport(*ec2.EnableVpcClassicLinkDnsSupportInput) (*ec2.EnableVpcClassicLinkDnsSupportOutput, error) + + GetConsoleOutputRequest(*ec2.GetConsoleOutputInput) (*request.Request, *ec2.GetConsoleOutputOutput) + + GetConsoleOutput(*ec2.GetConsoleOutputInput) (*ec2.GetConsoleOutputOutput, error) + + GetPasswordDataRequest(*ec2.GetPasswordDataInput) (*request.Request, *ec2.GetPasswordDataOutput) + + GetPasswordData(*ec2.GetPasswordDataInput) (*ec2.GetPasswordDataOutput, error) + + ImportImageRequest(*ec2.ImportImageInput) (*request.Request, *ec2.ImportImageOutput) + + ImportImage(*ec2.ImportImageInput) (*ec2.ImportImageOutput, error) + + ImportInstanceRequest(*ec2.ImportInstanceInput) (*request.Request, *ec2.ImportInstanceOutput) + + ImportInstance(*ec2.ImportInstanceInput) (*ec2.ImportInstanceOutput, error) + + ImportKeyPairRequest(*ec2.ImportKeyPairInput) (*request.Request, *ec2.ImportKeyPairOutput) + + ImportKeyPair(*ec2.ImportKeyPairInput) (*ec2.ImportKeyPairOutput, error) + + ImportSnapshotRequest(*ec2.ImportSnapshotInput) (*request.Request, *ec2.ImportSnapshotOutput) + + ImportSnapshot(*ec2.ImportSnapshotInput) (*ec2.ImportSnapshotOutput, error) + + ImportVolumeRequest(*ec2.ImportVolumeInput) (*request.Request, *ec2.ImportVolumeOutput) + + ImportVolume(*ec2.ImportVolumeInput) (*ec2.ImportVolumeOutput, error) + + ModifyHostsRequest(*ec2.ModifyHostsInput) (*request.Request, *ec2.ModifyHostsOutput) + + ModifyHosts(*ec2.ModifyHostsInput) (*ec2.ModifyHostsOutput, error) + + ModifyIdFormatRequest(*ec2.ModifyIdFormatInput) (*request.Request, *ec2.ModifyIdFormatOutput) + + ModifyIdFormat(*ec2.ModifyIdFormatInput) (*ec2.ModifyIdFormatOutput, error) + + ModifyImageAttributeRequest(*ec2.ModifyImageAttributeInput) (*request.Request, *ec2.ModifyImageAttributeOutput) + + ModifyImageAttribute(*ec2.ModifyImageAttributeInput) (*ec2.ModifyImageAttributeOutput, error) + + ModifyInstanceAttributeRequest(*ec2.ModifyInstanceAttributeInput) (*request.Request, *ec2.ModifyInstanceAttributeOutput) + + ModifyInstanceAttribute(*ec2.ModifyInstanceAttributeInput) (*ec2.ModifyInstanceAttributeOutput, error) + + ModifyInstancePlacementRequest(*ec2.ModifyInstancePlacementInput) (*request.Request, *ec2.ModifyInstancePlacementOutput) + + ModifyInstancePlacement(*ec2.ModifyInstancePlacementInput) (*ec2.ModifyInstancePlacementOutput, error) + + ModifyNetworkInterfaceAttributeRequest(*ec2.ModifyNetworkInterfaceAttributeInput) (*request.Request, *ec2.ModifyNetworkInterfaceAttributeOutput) + + ModifyNetworkInterfaceAttribute(*ec2.ModifyNetworkInterfaceAttributeInput) (*ec2.ModifyNetworkInterfaceAttributeOutput, error) + + ModifyReservedInstancesRequest(*ec2.ModifyReservedInstancesInput) (*request.Request, *ec2.ModifyReservedInstancesOutput) + + ModifyReservedInstances(*ec2.ModifyReservedInstancesInput) (*ec2.ModifyReservedInstancesOutput, error) + + ModifySnapshotAttributeRequest(*ec2.ModifySnapshotAttributeInput) (*request.Request, *ec2.ModifySnapshotAttributeOutput) + + ModifySnapshotAttribute(*ec2.ModifySnapshotAttributeInput) (*ec2.ModifySnapshotAttributeOutput, error) + + ModifySpotFleetRequestRequest(*ec2.ModifySpotFleetRequestInput) (*request.Request, *ec2.ModifySpotFleetRequestOutput) + + ModifySpotFleetRequest(*ec2.ModifySpotFleetRequestInput) (*ec2.ModifySpotFleetRequestOutput, error) + + ModifySubnetAttributeRequest(*ec2.ModifySubnetAttributeInput) (*request.Request, *ec2.ModifySubnetAttributeOutput) + + ModifySubnetAttribute(*ec2.ModifySubnetAttributeInput) (*ec2.ModifySubnetAttributeOutput, error) + + ModifyVolumeAttributeRequest(*ec2.ModifyVolumeAttributeInput) (*request.Request, *ec2.ModifyVolumeAttributeOutput) + + ModifyVolumeAttribute(*ec2.ModifyVolumeAttributeInput) (*ec2.ModifyVolumeAttributeOutput, error) + + ModifyVpcAttributeRequest(*ec2.ModifyVpcAttributeInput) (*request.Request, *ec2.ModifyVpcAttributeOutput) + + ModifyVpcAttribute(*ec2.ModifyVpcAttributeInput) (*ec2.ModifyVpcAttributeOutput, error) + + ModifyVpcEndpointRequest(*ec2.ModifyVpcEndpointInput) (*request.Request, *ec2.ModifyVpcEndpointOutput) + + ModifyVpcEndpoint(*ec2.ModifyVpcEndpointInput) (*ec2.ModifyVpcEndpointOutput, error) + + MonitorInstancesRequest(*ec2.MonitorInstancesInput) (*request.Request, *ec2.MonitorInstancesOutput) + + MonitorInstances(*ec2.MonitorInstancesInput) (*ec2.MonitorInstancesOutput, error) + + MoveAddressToVpcRequest(*ec2.MoveAddressToVpcInput) (*request.Request, *ec2.MoveAddressToVpcOutput) + + MoveAddressToVpc(*ec2.MoveAddressToVpcInput) (*ec2.MoveAddressToVpcOutput, error) + + PurchaseReservedInstancesOfferingRequest(*ec2.PurchaseReservedInstancesOfferingInput) (*request.Request, *ec2.PurchaseReservedInstancesOfferingOutput) + + PurchaseReservedInstancesOffering(*ec2.PurchaseReservedInstancesOfferingInput) (*ec2.PurchaseReservedInstancesOfferingOutput, error) + + PurchaseScheduledInstancesRequest(*ec2.PurchaseScheduledInstancesInput) (*request.Request, *ec2.PurchaseScheduledInstancesOutput) + + PurchaseScheduledInstances(*ec2.PurchaseScheduledInstancesInput) (*ec2.PurchaseScheduledInstancesOutput, error) + + RebootInstancesRequest(*ec2.RebootInstancesInput) (*request.Request, *ec2.RebootInstancesOutput) + + RebootInstances(*ec2.RebootInstancesInput) (*ec2.RebootInstancesOutput, error) + + RegisterImageRequest(*ec2.RegisterImageInput) (*request.Request, *ec2.RegisterImageOutput) + + RegisterImage(*ec2.RegisterImageInput) (*ec2.RegisterImageOutput, error) + + RejectVpcPeeringConnectionRequest(*ec2.RejectVpcPeeringConnectionInput) (*request.Request, *ec2.RejectVpcPeeringConnectionOutput) + + RejectVpcPeeringConnection(*ec2.RejectVpcPeeringConnectionInput) (*ec2.RejectVpcPeeringConnectionOutput, error) + + ReleaseAddressRequest(*ec2.ReleaseAddressInput) (*request.Request, *ec2.ReleaseAddressOutput) + + ReleaseAddress(*ec2.ReleaseAddressInput) (*ec2.ReleaseAddressOutput, error) + + ReleaseHostsRequest(*ec2.ReleaseHostsInput) (*request.Request, *ec2.ReleaseHostsOutput) + + ReleaseHosts(*ec2.ReleaseHostsInput) (*ec2.ReleaseHostsOutput, error) + + ReplaceNetworkAclAssociationRequest(*ec2.ReplaceNetworkAclAssociationInput) (*request.Request, *ec2.ReplaceNetworkAclAssociationOutput) + + ReplaceNetworkAclAssociation(*ec2.ReplaceNetworkAclAssociationInput) (*ec2.ReplaceNetworkAclAssociationOutput, error) + + ReplaceNetworkAclEntryRequest(*ec2.ReplaceNetworkAclEntryInput) (*request.Request, *ec2.ReplaceNetworkAclEntryOutput) + + ReplaceNetworkAclEntry(*ec2.ReplaceNetworkAclEntryInput) (*ec2.ReplaceNetworkAclEntryOutput, error) + + ReplaceRouteRequest(*ec2.ReplaceRouteInput) (*request.Request, *ec2.ReplaceRouteOutput) + + ReplaceRoute(*ec2.ReplaceRouteInput) (*ec2.ReplaceRouteOutput, error) + + ReplaceRouteTableAssociationRequest(*ec2.ReplaceRouteTableAssociationInput) (*request.Request, *ec2.ReplaceRouteTableAssociationOutput) + + ReplaceRouteTableAssociation(*ec2.ReplaceRouteTableAssociationInput) (*ec2.ReplaceRouteTableAssociationOutput, error) + + ReportInstanceStatusRequest(*ec2.ReportInstanceStatusInput) (*request.Request, *ec2.ReportInstanceStatusOutput) + + ReportInstanceStatus(*ec2.ReportInstanceStatusInput) (*ec2.ReportInstanceStatusOutput, error) + + RequestSpotFleetRequest(*ec2.RequestSpotFleetInput) (*request.Request, *ec2.RequestSpotFleetOutput) + + RequestSpotFleet(*ec2.RequestSpotFleetInput) (*ec2.RequestSpotFleetOutput, error) + + RequestSpotInstancesRequest(*ec2.RequestSpotInstancesInput) (*request.Request, *ec2.RequestSpotInstancesOutput) + + RequestSpotInstances(*ec2.RequestSpotInstancesInput) (*ec2.RequestSpotInstancesOutput, error) + + ResetImageAttributeRequest(*ec2.ResetImageAttributeInput) (*request.Request, *ec2.ResetImageAttributeOutput) + + ResetImageAttribute(*ec2.ResetImageAttributeInput) (*ec2.ResetImageAttributeOutput, error) + + ResetInstanceAttributeRequest(*ec2.ResetInstanceAttributeInput) (*request.Request, *ec2.ResetInstanceAttributeOutput) + + ResetInstanceAttribute(*ec2.ResetInstanceAttributeInput) (*ec2.ResetInstanceAttributeOutput, error) + + ResetNetworkInterfaceAttributeRequest(*ec2.ResetNetworkInterfaceAttributeInput) (*request.Request, *ec2.ResetNetworkInterfaceAttributeOutput) + + ResetNetworkInterfaceAttribute(*ec2.ResetNetworkInterfaceAttributeInput) (*ec2.ResetNetworkInterfaceAttributeOutput, error) + + ResetSnapshotAttributeRequest(*ec2.ResetSnapshotAttributeInput) (*request.Request, *ec2.ResetSnapshotAttributeOutput) + + ResetSnapshotAttribute(*ec2.ResetSnapshotAttributeInput) (*ec2.ResetSnapshotAttributeOutput, error) + + RestoreAddressToClassicRequest(*ec2.RestoreAddressToClassicInput) (*request.Request, *ec2.RestoreAddressToClassicOutput) + + RestoreAddressToClassic(*ec2.RestoreAddressToClassicInput) (*ec2.RestoreAddressToClassicOutput, error) + + RevokeSecurityGroupEgressRequest(*ec2.RevokeSecurityGroupEgressInput) (*request.Request, *ec2.RevokeSecurityGroupEgressOutput) + + RevokeSecurityGroupEgress(*ec2.RevokeSecurityGroupEgressInput) (*ec2.RevokeSecurityGroupEgressOutput, error) + + RevokeSecurityGroupIngressRequest(*ec2.RevokeSecurityGroupIngressInput) (*request.Request, *ec2.RevokeSecurityGroupIngressOutput) + + RevokeSecurityGroupIngress(*ec2.RevokeSecurityGroupIngressInput) (*ec2.RevokeSecurityGroupIngressOutput, error) + + RunInstancesRequest(*ec2.RunInstancesInput) (*request.Request, *ec2.Reservation) + + RunInstances(*ec2.RunInstancesInput) (*ec2.Reservation, error) + + RunScheduledInstancesRequest(*ec2.RunScheduledInstancesInput) (*request.Request, *ec2.RunScheduledInstancesOutput) + + RunScheduledInstances(*ec2.RunScheduledInstancesInput) (*ec2.RunScheduledInstancesOutput, error) + + StartInstancesRequest(*ec2.StartInstancesInput) (*request.Request, *ec2.StartInstancesOutput) + + StartInstances(*ec2.StartInstancesInput) (*ec2.StartInstancesOutput, error) + + StopInstancesRequest(*ec2.StopInstancesInput) (*request.Request, *ec2.StopInstancesOutput) + + StopInstances(*ec2.StopInstancesInput) (*ec2.StopInstancesOutput, error) + + TerminateInstancesRequest(*ec2.TerminateInstancesInput) (*request.Request, *ec2.TerminateInstancesOutput) + + TerminateInstances(*ec2.TerminateInstancesInput) (*ec2.TerminateInstancesOutput, error) + + UnassignPrivateIpAddressesRequest(*ec2.UnassignPrivateIpAddressesInput) (*request.Request, *ec2.UnassignPrivateIpAddressesOutput) + + UnassignPrivateIpAddresses(*ec2.UnassignPrivateIpAddressesInput) (*ec2.UnassignPrivateIpAddressesOutput, error) + + UnmonitorInstancesRequest(*ec2.UnmonitorInstancesInput) (*request.Request, *ec2.UnmonitorInstancesOutput) + + UnmonitorInstances(*ec2.UnmonitorInstancesInput) (*ec2.UnmonitorInstancesOutput, error) +} + +var _ EC2API = (*ec2.EC2)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ec2/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ec2/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ec2/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ec2/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5695 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ec2_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ec2" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleEC2_AcceptVpcPeeringConnection() { + svc := ec2.New(session.New()) + + params := &ec2.AcceptVpcPeeringConnectionInput{ + DryRun: aws.Bool(true), + VpcPeeringConnectionId: aws.String("String"), + } + resp, err := svc.AcceptVpcPeeringConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AllocateAddress() { + svc := ec2.New(session.New()) + + params := &ec2.AllocateAddressInput{ + Domain: aws.String("DomainType"), + DryRun: aws.Bool(true), + } + resp, err := svc.AllocateAddress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AllocateHosts() { + svc := ec2.New(session.New()) + + params := &ec2.AllocateHostsInput{ + AvailabilityZone: aws.String("String"), // Required + InstanceType: aws.String("String"), // Required + Quantity: aws.Int64(1), // Required + AutoPlacement: aws.String("AutoPlacement"), + ClientToken: aws.String("String"), + } + resp, err := svc.AllocateHosts(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AssignPrivateIpAddresses() { + svc := ec2.New(session.New()) + + params := &ec2.AssignPrivateIpAddressesInput{ + NetworkInterfaceId: aws.String("String"), // Required + AllowReassignment: aws.Bool(true), + PrivateIpAddresses: []*string{ + aws.String("String"), // Required + // More values... + }, + SecondaryPrivateIpAddressCount: aws.Int64(1), + } + resp, err := svc.AssignPrivateIpAddresses(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AssociateAddress() { + svc := ec2.New(session.New()) + + params := &ec2.AssociateAddressInput{ + AllocationId: aws.String("String"), + AllowReassociation: aws.Bool(true), + DryRun: aws.Bool(true), + InstanceId: aws.String("String"), + NetworkInterfaceId: aws.String("String"), + PrivateIpAddress: aws.String("String"), + PublicIp: aws.String("String"), + } + resp, err := svc.AssociateAddress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AssociateDhcpOptions() { + svc := ec2.New(session.New()) + + params := &ec2.AssociateDhcpOptionsInput{ + DhcpOptionsId: aws.String("String"), // Required + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.AssociateDhcpOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AssociateRouteTable() { + svc := ec2.New(session.New()) + + params := &ec2.AssociateRouteTableInput{ + RouteTableId: aws.String("String"), // Required + SubnetId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.AssociateRouteTable(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AttachClassicLinkVpc() { + svc := ec2.New(session.New()) + + params := &ec2.AttachClassicLinkVpcInput{ + Groups: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + InstanceId: aws.String("String"), // Required + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.AttachClassicLinkVpc(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AttachInternetGateway() { + svc := ec2.New(session.New()) + + params := &ec2.AttachInternetGatewayInput{ + InternetGatewayId: aws.String("String"), // Required + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.AttachInternetGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AttachNetworkInterface() { + svc := ec2.New(session.New()) + + params := &ec2.AttachNetworkInterfaceInput{ + DeviceIndex: aws.Int64(1), // Required + InstanceId: aws.String("String"), // Required + NetworkInterfaceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.AttachNetworkInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AttachVolume() { + svc := ec2.New(session.New()) + + params := &ec2.AttachVolumeInput{ + Device: aws.String("String"), // Required + InstanceId: aws.String("String"), // Required + VolumeId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.AttachVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AttachVpnGateway() { + svc := ec2.New(session.New()) + + params := &ec2.AttachVpnGatewayInput{ + VpcId: aws.String("String"), // Required + VpnGatewayId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.AttachVpnGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AuthorizeSecurityGroupEgress() { + svc := ec2.New(session.New()) + + params := &ec2.AuthorizeSecurityGroupEgressInput{ + GroupId: aws.String("String"), // Required + CidrIp: aws.String("String"), + DryRun: aws.Bool(true), + FromPort: aws.Int64(1), + IpPermissions: []*ec2.IpPermission{ + { // Required + FromPort: aws.Int64(1), + IpProtocol: aws.String("String"), + IpRanges: []*ec2.IpRange{ + { // Required + CidrIp: aws.String("String"), + }, + // More values... + }, + PrefixListIds: []*ec2.PrefixListId{ + { // Required + PrefixListId: aws.String("String"), + }, + // More values... + }, + ToPort: aws.Int64(1), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + { // Required + GroupId: aws.String("String"), + GroupName: aws.String("String"), + UserId: aws.String("String"), + }, + // More values... + }, + }, + // More values... + }, + IpProtocol: aws.String("String"), + SourceSecurityGroupName: aws.String("String"), + SourceSecurityGroupOwnerId: aws.String("String"), + ToPort: aws.Int64(1), + } + resp, err := svc.AuthorizeSecurityGroupEgress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_AuthorizeSecurityGroupIngress() { + svc := ec2.New(session.New()) + + params := &ec2.AuthorizeSecurityGroupIngressInput{ + CidrIp: aws.String("String"), + DryRun: aws.Bool(true), + FromPort: aws.Int64(1), + GroupId: aws.String("String"), + GroupName: aws.String("String"), + IpPermissions: []*ec2.IpPermission{ + { // Required + FromPort: aws.Int64(1), + IpProtocol: aws.String("String"), + IpRanges: []*ec2.IpRange{ + { // Required + CidrIp: aws.String("String"), + }, + // More values... + }, + PrefixListIds: []*ec2.PrefixListId{ + { // Required + PrefixListId: aws.String("String"), + }, + // More values... + }, + ToPort: aws.Int64(1), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + { // Required + GroupId: aws.String("String"), + GroupName: aws.String("String"), + UserId: aws.String("String"), + }, + // More values... + }, + }, + // More values... + }, + IpProtocol: aws.String("String"), + SourceSecurityGroupName: aws.String("String"), + SourceSecurityGroupOwnerId: aws.String("String"), + ToPort: aws.Int64(1), + } + resp, err := svc.AuthorizeSecurityGroupIngress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_BundleInstance() { + svc := ec2.New(session.New()) + + params := &ec2.BundleInstanceInput{ + InstanceId: aws.String("String"), // Required + Storage: &ec2.Storage{ // Required + S3: &ec2.S3Storage{ + AWSAccessKeyId: aws.String("String"), + Bucket: aws.String("String"), + Prefix: aws.String("String"), + UploadPolicy: []byte("PAYLOAD"), + UploadPolicySignature: aws.String("String"), + }, + }, + DryRun: aws.Bool(true), + } + resp, err := svc.BundleInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CancelBundleTask() { + svc := ec2.New(session.New()) + + params := &ec2.CancelBundleTaskInput{ + BundleId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.CancelBundleTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CancelConversionTask() { + svc := ec2.New(session.New()) + + params := &ec2.CancelConversionTaskInput{ + ConversionTaskId: aws.String("String"), // Required + DryRun: aws.Bool(true), + ReasonMessage: aws.String("String"), + } + resp, err := svc.CancelConversionTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CancelExportTask() { + svc := ec2.New(session.New()) + + params := &ec2.CancelExportTaskInput{ + ExportTaskId: aws.String("String"), // Required + } + resp, err := svc.CancelExportTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CancelImportTask() { + svc := ec2.New(session.New()) + + params := &ec2.CancelImportTaskInput{ + CancelReason: aws.String("String"), + DryRun: aws.Bool(true), + ImportTaskId: aws.String("String"), + } + resp, err := svc.CancelImportTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CancelReservedInstancesListing() { + svc := ec2.New(session.New()) + + params := &ec2.CancelReservedInstancesListingInput{ + ReservedInstancesListingId: aws.String("String"), // Required + } + resp, err := svc.CancelReservedInstancesListing(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CancelSpotFleetRequests() { + svc := ec2.New(session.New()) + + params := &ec2.CancelSpotFleetRequestsInput{ + SpotFleetRequestIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + TerminateInstances: aws.Bool(true), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.CancelSpotFleetRequests(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CancelSpotInstanceRequests() { + svc := ec2.New(session.New()) + + params := &ec2.CancelSpotInstanceRequestsInput{ + SpotInstanceRequestIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.CancelSpotInstanceRequests(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ConfirmProductInstance() { + svc := ec2.New(session.New()) + + params := &ec2.ConfirmProductInstanceInput{ + InstanceId: aws.String("String"), // Required + ProductCode: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.ConfirmProductInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CopyImage() { + svc := ec2.New(session.New()) + + params := &ec2.CopyImageInput{ + Name: aws.String("String"), // Required + SourceImageId: aws.String("String"), // Required + SourceRegion: aws.String("String"), // Required + ClientToken: aws.String("String"), + Description: aws.String("String"), + DryRun: aws.Bool(true), + Encrypted: aws.Bool(true), + KmsKeyId: aws.String("String"), + } + resp, err := svc.CopyImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CopySnapshot() { + svc := ec2.New(session.New()) + + params := &ec2.CopySnapshotInput{ + SourceRegion: aws.String("String"), // Required + SourceSnapshotId: aws.String("String"), // Required + Description: aws.String("String"), + DestinationRegion: aws.String("String"), + DryRun: aws.Bool(true), + Encrypted: aws.Bool(true), + KmsKeyId: aws.String("String"), + PresignedUrl: aws.String("String"), + } + resp, err := svc.CopySnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateCustomerGateway() { + svc := ec2.New(session.New()) + + params := &ec2.CreateCustomerGatewayInput{ + BgpAsn: aws.Int64(1), // Required + PublicIp: aws.String("String"), // Required + Type: aws.String("GatewayType"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.CreateCustomerGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateDhcpOptions() { + svc := ec2.New(session.New()) + + params := &ec2.CreateDhcpOptionsInput{ + DhcpConfigurations: []*ec2.NewDhcpConfiguration{ // Required + { // Required + Key: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.CreateDhcpOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateFlowLogs() { + svc := ec2.New(session.New()) + + params := &ec2.CreateFlowLogsInput{ + DeliverLogsPermissionArn: aws.String("String"), // Required + LogGroupName: aws.String("String"), // Required + ResourceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + ResourceType: aws.String("FlowLogsResourceType"), // Required + TrafficType: aws.String("TrafficType"), // Required + ClientToken: aws.String("String"), + } + resp, err := svc.CreateFlowLogs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateImage() { + svc := ec2.New(session.New()) + + params := &ec2.CreateImageInput{ + InstanceId: aws.String("String"), // Required + Name: aws.String("String"), // Required + BlockDeviceMappings: []*ec2.BlockDeviceMapping{ + { // Required + DeviceName: aws.String("String"), + Ebs: &ec2.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + Description: aws.String("String"), + DryRun: aws.Bool(true), + NoReboot: aws.Bool(true), + } + resp, err := svc.CreateImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateInstanceExportTask() { + svc := ec2.New(session.New()) + + params := &ec2.CreateInstanceExportTaskInput{ + InstanceId: aws.String("String"), // Required + Description: aws.String("String"), + ExportToS3Task: &ec2.ExportToS3TaskSpecification{ + ContainerFormat: aws.String("ContainerFormat"), + DiskImageFormat: aws.String("DiskImageFormat"), + S3Bucket: aws.String("String"), + S3Prefix: aws.String("String"), + }, + TargetEnvironment: aws.String("ExportEnvironment"), + } + resp, err := svc.CreateInstanceExportTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateInternetGateway() { + svc := ec2.New(session.New()) + + params := &ec2.CreateInternetGatewayInput{ + DryRun: aws.Bool(true), + } + resp, err := svc.CreateInternetGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateKeyPair() { + svc := ec2.New(session.New()) + + params := &ec2.CreateKeyPairInput{ + KeyName: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.CreateKeyPair(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateNatGateway() { + svc := ec2.New(session.New()) + + params := &ec2.CreateNatGatewayInput{ + AllocationId: aws.String("String"), // Required + SubnetId: aws.String("String"), // Required + ClientToken: aws.String("String"), + } + resp, err := svc.CreateNatGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateNetworkAcl() { + svc := ec2.New(session.New()) + + params := &ec2.CreateNetworkAclInput{ + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.CreateNetworkAcl(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateNetworkAclEntry() { + svc := ec2.New(session.New()) + + params := &ec2.CreateNetworkAclEntryInput{ + CidrBlock: aws.String("String"), // Required + Egress: aws.Bool(true), // Required + NetworkAclId: aws.String("String"), // Required + Protocol: aws.String("String"), // Required + RuleAction: aws.String("RuleAction"), // Required + RuleNumber: aws.Int64(1), // Required + DryRun: aws.Bool(true), + IcmpTypeCode: &ec2.IcmpTypeCode{ + Code: aws.Int64(1), + Type: aws.Int64(1), + }, + PortRange: &ec2.PortRange{ + From: aws.Int64(1), + To: aws.Int64(1), + }, + } + resp, err := svc.CreateNetworkAclEntry(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateNetworkInterface() { + svc := ec2.New(session.New()) + + params := &ec2.CreateNetworkInterfaceInput{ + SubnetId: aws.String("String"), // Required + Description: aws.String("String"), + DryRun: aws.Bool(true), + Groups: []*string{ + aws.String("String"), // Required + // More values... + }, + PrivateIpAddress: aws.String("String"), + PrivateIpAddresses: []*ec2.PrivateIpAddressSpecification{ + { // Required + PrivateIpAddress: aws.String("String"), // Required + Primary: aws.Bool(true), + }, + // More values... + }, + SecondaryPrivateIpAddressCount: aws.Int64(1), + } + resp, err := svc.CreateNetworkInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreatePlacementGroup() { + svc := ec2.New(session.New()) + + params := &ec2.CreatePlacementGroupInput{ + GroupName: aws.String("String"), // Required + Strategy: aws.String("PlacementStrategy"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.CreatePlacementGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateReservedInstancesListing() { + svc := ec2.New(session.New()) + + params := &ec2.CreateReservedInstancesListingInput{ + ClientToken: aws.String("String"), // Required + InstanceCount: aws.Int64(1), // Required + PriceSchedules: []*ec2.PriceScheduleSpecification{ // Required + { // Required + CurrencyCode: aws.String("CurrencyCodeValues"), + Price: aws.Float64(1.0), + Term: aws.Int64(1), + }, + // More values... + }, + ReservedInstancesId: aws.String("String"), // Required + } + resp, err := svc.CreateReservedInstancesListing(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateRoute() { + svc := ec2.New(session.New()) + + params := &ec2.CreateRouteInput{ + DestinationCidrBlock: aws.String("String"), // Required + RouteTableId: aws.String("String"), // Required + DryRun: aws.Bool(true), + GatewayId: aws.String("String"), + InstanceId: aws.String("String"), + NatGatewayId: aws.String("String"), + NetworkInterfaceId: aws.String("String"), + VpcPeeringConnectionId: aws.String("String"), + } + resp, err := svc.CreateRoute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateRouteTable() { + svc := ec2.New(session.New()) + + params := &ec2.CreateRouteTableInput{ + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.CreateRouteTable(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateSecurityGroup() { + svc := ec2.New(session.New()) + + params := &ec2.CreateSecurityGroupInput{ + Description: aws.String("String"), // Required + GroupName: aws.String("String"), // Required + DryRun: aws.Bool(true), + VpcId: aws.String("String"), + } + resp, err := svc.CreateSecurityGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateSnapshot() { + svc := ec2.New(session.New()) + + params := &ec2.CreateSnapshotInput{ + VolumeId: aws.String("String"), // Required + Description: aws.String("String"), + DryRun: aws.Bool(true), + } + resp, err := svc.CreateSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateSpotDatafeedSubscription() { + svc := ec2.New(session.New()) + + params := &ec2.CreateSpotDatafeedSubscriptionInput{ + Bucket: aws.String("String"), // Required + DryRun: aws.Bool(true), + Prefix: aws.String("String"), + } + resp, err := svc.CreateSpotDatafeedSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateSubnet() { + svc := ec2.New(session.New()) + + params := &ec2.CreateSubnetInput{ + CidrBlock: aws.String("String"), // Required + VpcId: aws.String("String"), // Required + AvailabilityZone: aws.String("String"), + DryRun: aws.Bool(true), + } + resp, err := svc.CreateSubnet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateTags() { + svc := ec2.New(session.New()) + + params := &ec2.CreateTagsInput{ + Resources: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Tags: []*ec2.Tag{ // Required + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.CreateTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateVolume() { + svc := ec2.New(session.New()) + + params := &ec2.CreateVolumeInput{ + AvailabilityZone: aws.String("String"), // Required + DryRun: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + KmsKeyId: aws.String("String"), + Size: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeType: aws.String("VolumeType"), + } + resp, err := svc.CreateVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateVpc() { + svc := ec2.New(session.New()) + + params := &ec2.CreateVpcInput{ + CidrBlock: aws.String("String"), // Required + DryRun: aws.Bool(true), + InstanceTenancy: aws.String("Tenancy"), + } + resp, err := svc.CreateVpc(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateVpcEndpoint() { + svc := ec2.New(session.New()) + + params := &ec2.CreateVpcEndpointInput{ + ServiceName: aws.String("String"), // Required + VpcId: aws.String("String"), // Required + ClientToken: aws.String("String"), + DryRun: aws.Bool(true), + PolicyDocument: aws.String("String"), + RouteTableIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateVpcEndpoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateVpcPeeringConnection() { + svc := ec2.New(session.New()) + + params := &ec2.CreateVpcPeeringConnectionInput{ + DryRun: aws.Bool(true), + PeerOwnerId: aws.String("String"), + PeerVpcId: aws.String("String"), + VpcId: aws.String("String"), + } + resp, err := svc.CreateVpcPeeringConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateVpnConnection() { + svc := ec2.New(session.New()) + + params := &ec2.CreateVpnConnectionInput{ + CustomerGatewayId: aws.String("String"), // Required + Type: aws.String("String"), // Required + VpnGatewayId: aws.String("String"), // Required + DryRun: aws.Bool(true), + Options: &ec2.VpnConnectionOptionsSpecification{ + StaticRoutesOnly: aws.Bool(true), + }, + } + resp, err := svc.CreateVpnConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateVpnConnectionRoute() { + svc := ec2.New(session.New()) + + params := &ec2.CreateVpnConnectionRouteInput{ + DestinationCidrBlock: aws.String("String"), // Required + VpnConnectionId: aws.String("String"), // Required + } + resp, err := svc.CreateVpnConnectionRoute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_CreateVpnGateway() { + svc := ec2.New(session.New()) + + params := &ec2.CreateVpnGatewayInput{ + Type: aws.String("GatewayType"), // Required + AvailabilityZone: aws.String("String"), + DryRun: aws.Bool(true), + } + resp, err := svc.CreateVpnGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteCustomerGateway() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteCustomerGatewayInput{ + CustomerGatewayId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteCustomerGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteDhcpOptions() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteDhcpOptionsInput{ + DhcpOptionsId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteDhcpOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteFlowLogs() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteFlowLogsInput{ + FlowLogIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DeleteFlowLogs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteInternetGateway() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteInternetGatewayInput{ + InternetGatewayId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteInternetGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteKeyPair() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteKeyPairInput{ + KeyName: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteKeyPair(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteNatGateway() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteNatGatewayInput{ + NatGatewayId: aws.String("String"), // Required + } + resp, err := svc.DeleteNatGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteNetworkAcl() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteNetworkAclInput{ + NetworkAclId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteNetworkAcl(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteNetworkAclEntry() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteNetworkAclEntryInput{ + Egress: aws.Bool(true), // Required + NetworkAclId: aws.String("String"), // Required + RuleNumber: aws.Int64(1), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteNetworkAclEntry(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteNetworkInterface() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteNetworkInterfaceInput{ + NetworkInterfaceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteNetworkInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeletePlacementGroup() { + svc := ec2.New(session.New()) + + params := &ec2.DeletePlacementGroupInput{ + GroupName: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeletePlacementGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteRoute() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteRouteInput{ + DestinationCidrBlock: aws.String("String"), // Required + RouteTableId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteRoute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteRouteTable() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteRouteTableInput{ + RouteTableId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteRouteTable(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteSecurityGroup() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteSecurityGroupInput{ + DryRun: aws.Bool(true), + GroupId: aws.String("String"), + GroupName: aws.String("String"), + } + resp, err := svc.DeleteSecurityGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteSnapshot() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteSnapshotInput{ + SnapshotId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteSpotDatafeedSubscription() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteSpotDatafeedSubscriptionInput{ + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteSpotDatafeedSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteSubnet() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteSubnetInput{ + SubnetId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteSubnet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteTags() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteTagsInput{ + Resources: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + Tags: []*ec2.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.DeleteTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteVolume() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteVolumeInput{ + VolumeId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteVpc() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteVpcInput{ + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteVpc(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteVpcEndpoints() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteVpcEndpointsInput{ + VpcEndpointIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteVpcEndpoints(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteVpcPeeringConnection() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteVpcPeeringConnectionInput{ + VpcPeeringConnectionId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteVpcPeeringConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteVpnConnection() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteVpnConnectionInput{ + VpnConnectionId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteVpnConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteVpnConnectionRoute() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteVpnConnectionRouteInput{ + DestinationCidrBlock: aws.String("String"), // Required + VpnConnectionId: aws.String("String"), // Required + } + resp, err := svc.DeleteVpnConnectionRoute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeleteVpnGateway() { + svc := ec2.New(session.New()) + + params := &ec2.DeleteVpnGatewayInput{ + VpnGatewayId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeleteVpnGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DeregisterImage() { + svc := ec2.New(session.New()) + + params := &ec2.DeregisterImageInput{ + ImageId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DeregisterImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeAccountAttributes() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeAccountAttributesInput{ + AttributeNames: []*string{ + aws.String("AccountAttributeName"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeAccountAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeAddresses() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeAddressesInput{ + AllocationIds: []*string{ + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + PublicIps: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeAddresses(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeAvailabilityZones() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeAvailabilityZonesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + ZoneNames: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeAvailabilityZones(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeBundleTasks() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeBundleTasksInput{ + BundleIds: []*string{ + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + } + resp, err := svc.DescribeBundleTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeClassicLinkInstances() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeClassicLinkInstancesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + InstanceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeClassicLinkInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeConversionTasks() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeConversionTasksInput{ + ConversionTaskIds: []*string{ + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + } + resp, err := svc.DescribeConversionTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeCustomerGateways() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeCustomerGatewaysInput{ + CustomerGatewayIds: []*string{ + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + } + resp, err := svc.DescribeCustomerGateways(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeDhcpOptions() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeDhcpOptionsInput{ + DhcpOptionsIds: []*string{ + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + } + resp, err := svc.DescribeDhcpOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeExportTasks() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeExportTasksInput{ + ExportTaskIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeExportTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeFlowLogs() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeFlowLogsInput{ + Filter: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + FlowLogIds: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeFlowLogs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeHosts() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeHostsInput{ + Filter: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + HostIds: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeHosts(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeIdFormat() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeIdFormatInput{ + Resource: aws.String("String"), + } + resp, err := svc.DescribeIdFormat(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeImageAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeImageAttributeInput{ + Attribute: aws.String("ImageAttributeName"), // Required + ImageId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeImageAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeImages() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeImagesInput{ + DryRun: aws.Bool(true), + ExecutableUsers: []*string{ + aws.String("String"), // Required + // More values... + }, + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + ImageIds: []*string{ + aws.String("String"), // Required + // More values... + }, + Owners: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeImages(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeImportImageTasks() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeImportImageTasksInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + ImportTaskIds: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeImportImageTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeImportSnapshotTasks() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeImportSnapshotTasksInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + ImportTaskIds: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeImportSnapshotTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeInstanceAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeInstanceAttributeInput{ + Attribute: aws.String("InstanceAttributeName"), // Required + InstanceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeInstanceAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeInstanceStatus() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeInstanceStatusInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + IncludeAllInstances: aws.Bool(true), + InstanceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeInstanceStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeInstances() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeInstancesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + InstanceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeInternetGateways() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeInternetGatewaysInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + InternetGatewayIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeInternetGateways(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeKeyPairs() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeKeyPairsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + KeyNames: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeKeyPairs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeMovingAddresses() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeMovingAddressesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + PublicIps: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeMovingAddresses(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeNatGateways() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeNatGatewaysInput{ + Filter: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NatGatewayIds: []*string{ + aws.String("String"), // Required + // More values... + }, + NextToken: aws.String("String"), + } + resp, err := svc.DescribeNatGateways(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeNetworkAcls() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeNetworkAclsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + NetworkAclIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeNetworkAcls(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeNetworkInterfaceAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeNetworkInterfaceAttributeInput{ + NetworkInterfaceId: aws.String("String"), // Required + Attribute: aws.String("NetworkInterfaceAttribute"), + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeNetworkInterfaceAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeNetworkInterfaces() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeNetworkInterfacesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + NetworkInterfaceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeNetworkInterfaces(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribePlacementGroups() { + svc := ec2.New(session.New()) + + params := &ec2.DescribePlacementGroupsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + GroupNames: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribePlacementGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribePrefixLists() { + svc := ec2.New(session.New()) + + params := &ec2.DescribePrefixListsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + PrefixListIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribePrefixLists(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeRegions() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeRegionsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + RegionNames: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeRegions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeReservedInstances() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeReservedInstancesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + OfferingType: aws.String("OfferingTypeValues"), + ReservedInstancesIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeReservedInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeReservedInstancesListings() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeReservedInstancesListingsInput{ + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + ReservedInstancesId: aws.String("String"), + ReservedInstancesListingId: aws.String("String"), + } + resp, err := svc.DescribeReservedInstancesListings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeReservedInstancesModifications() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeReservedInstancesModificationsInput{ + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + NextToken: aws.String("String"), + ReservedInstancesModificationIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeReservedInstancesModifications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeReservedInstancesOfferings() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeReservedInstancesOfferingsInput{ + AvailabilityZone: aws.String("String"), + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + IncludeMarketplace: aws.Bool(true), + InstanceTenancy: aws.String("Tenancy"), + InstanceType: aws.String("InstanceType"), + MaxDuration: aws.Int64(1), + MaxInstanceCount: aws.Int64(1), + MaxResults: aws.Int64(1), + MinDuration: aws.Int64(1), + NextToken: aws.String("String"), + OfferingType: aws.String("OfferingTypeValues"), + ProductDescription: aws.String("RIProductDescription"), + ReservedInstancesOfferingIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeReservedInstancesOfferings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeRouteTables() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeRouteTablesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + RouteTableIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeRouteTables(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeScheduledInstanceAvailability() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeScheduledInstanceAvailabilityInput{ + FirstSlotStartTimeRange: &ec2.SlotDateTimeRangeRequest{ // Required + EarliestTime: aws.Time(time.Now()), // Required + LatestTime: aws.Time(time.Now()), // Required + }, + Recurrence: &ec2.ScheduledInstanceRecurrenceRequest{ // Required + Frequency: aws.String("String"), + Interval: aws.Int64(1), + OccurrenceDays: []*int64{ + aws.Int64(1), // Required + // More values... + }, + OccurrenceRelativeToEnd: aws.Bool(true), + OccurrenceUnit: aws.String("String"), + }, + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + MaxSlotDurationInHours: aws.Int64(1), + MinSlotDurationInHours: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeScheduledInstanceAvailability(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeScheduledInstances() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeScheduledInstancesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + ScheduledInstanceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SlotStartTimeRange: &ec2.SlotStartTimeRangeRequest{ + EarliestTime: aws.Time(time.Now()), + LatestTime: aws.Time(time.Now()), + }, + } + resp, err := svc.DescribeScheduledInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSecurityGroups() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSecurityGroupsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + GroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + GroupNames: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeSecurityGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSnapshotAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSnapshotAttributeInput{ + Attribute: aws.String("SnapshotAttributeName"), // Required + SnapshotId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeSnapshotAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSnapshots() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSnapshotsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + OwnerIds: []*string{ + aws.String("String"), // Required + // More values... + }, + RestorableByUserIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SnapshotIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeSnapshots(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSpotDatafeedSubscription() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSpotDatafeedSubscriptionInput{ + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeSpotDatafeedSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSpotFleetInstances() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSpotFleetInstancesInput{ + SpotFleetRequestId: aws.String("String"), // Required + DryRun: aws.Bool(true), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeSpotFleetInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSpotFleetRequestHistory() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSpotFleetRequestHistoryInput{ + SpotFleetRequestId: aws.String("String"), // Required + StartTime: aws.Time(time.Now()), // Required + DryRun: aws.Bool(true), + EventType: aws.String("EventType"), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeSpotFleetRequestHistory(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSpotFleetRequests() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSpotFleetRequestsInput{ + DryRun: aws.Bool(true), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + SpotFleetRequestIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeSpotFleetRequests(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSpotInstanceRequests() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSpotInstanceRequestsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + SpotInstanceRequestIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeSpotInstanceRequests(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSpotPriceHistory() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSpotPriceHistoryInput{ + AvailabilityZone: aws.String("String"), + DryRun: aws.Bool(true), + EndTime: aws.Time(time.Now()), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + InstanceTypes: []*string{ + aws.String("InstanceType"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + ProductDescriptions: []*string{ + aws.String("String"), // Required + // More values... + }, + StartTime: aws.Time(time.Now()), + } + resp, err := svc.DescribeSpotPriceHistory(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeSubnets() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeSubnetsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + SubnetIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeSubnets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeTags() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeTagsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVolumeAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVolumeAttributeInput{ + VolumeId: aws.String("String"), // Required + Attribute: aws.String("VolumeAttributeName"), + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeVolumeAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVolumeStatus() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVolumeStatusInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + VolumeIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVolumeStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVolumes() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVolumesInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + VolumeIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVolumes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpcAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpcAttributeInput{ + Attribute: aws.String("VpcAttributeName"), // Required + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DescribeVpcAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpcClassicLink() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpcClassicLinkInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + VpcIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVpcClassicLink(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpcClassicLinkDnsSupport() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpcClassicLinkDnsSupportInput{ + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + VpcIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVpcClassicLinkDnsSupport(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpcEndpointServices() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpcEndpointServicesInput{ + DryRun: aws.Bool(true), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.DescribeVpcEndpointServices(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpcEndpoints() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpcEndpointsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + VpcEndpointIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVpcEndpoints(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpcPeeringConnections() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpcPeeringConnectionsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + VpcPeeringConnectionIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVpcPeeringConnections(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpcs() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpcsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + VpcIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVpcs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpnConnections() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpnConnectionsInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + VpnConnectionIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVpnConnections(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DescribeVpnGateways() { + svc := ec2.New(session.New()) + + params := &ec2.DescribeVpnGatewaysInput{ + DryRun: aws.Bool(true), + Filters: []*ec2.Filter{ + { // Required + Name: aws.String("String"), + Values: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + VpnGatewayIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVpnGateways(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DetachClassicLinkVpc() { + svc := ec2.New(session.New()) + + params := &ec2.DetachClassicLinkVpcInput{ + InstanceId: aws.String("String"), // Required + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DetachClassicLinkVpc(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DetachInternetGateway() { + svc := ec2.New(session.New()) + + params := &ec2.DetachInternetGatewayInput{ + InternetGatewayId: aws.String("String"), // Required + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DetachInternetGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DetachNetworkInterface() { + svc := ec2.New(session.New()) + + params := &ec2.DetachNetworkInterfaceInput{ + AttachmentId: aws.String("String"), // Required + DryRun: aws.Bool(true), + Force: aws.Bool(true), + } + resp, err := svc.DetachNetworkInterface(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DetachVolume() { + svc := ec2.New(session.New()) + + params := &ec2.DetachVolumeInput{ + VolumeId: aws.String("String"), // Required + Device: aws.String("String"), + DryRun: aws.Bool(true), + Force: aws.Bool(true), + InstanceId: aws.String("String"), + } + resp, err := svc.DetachVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DetachVpnGateway() { + svc := ec2.New(session.New()) + + params := &ec2.DetachVpnGatewayInput{ + VpcId: aws.String("String"), // Required + VpnGatewayId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DetachVpnGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DisableVgwRoutePropagation() { + svc := ec2.New(session.New()) + + params := &ec2.DisableVgwRoutePropagationInput{ + GatewayId: aws.String("String"), // Required + RouteTableId: aws.String("String"), // Required + } + resp, err := svc.DisableVgwRoutePropagation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DisableVpcClassicLink() { + svc := ec2.New(session.New()) + + params := &ec2.DisableVpcClassicLinkInput{ + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DisableVpcClassicLink(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DisableVpcClassicLinkDnsSupport() { + svc := ec2.New(session.New()) + + params := &ec2.DisableVpcClassicLinkDnsSupportInput{ + VpcId: aws.String("String"), + } + resp, err := svc.DisableVpcClassicLinkDnsSupport(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DisassociateAddress() { + svc := ec2.New(session.New()) + + params := &ec2.DisassociateAddressInput{ + AssociationId: aws.String("String"), + DryRun: aws.Bool(true), + PublicIp: aws.String("String"), + } + resp, err := svc.DisassociateAddress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_DisassociateRouteTable() { + svc := ec2.New(session.New()) + + params := &ec2.DisassociateRouteTableInput{ + AssociationId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.DisassociateRouteTable(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_EnableVgwRoutePropagation() { + svc := ec2.New(session.New()) + + params := &ec2.EnableVgwRoutePropagationInput{ + GatewayId: aws.String("String"), // Required + RouteTableId: aws.String("String"), // Required + } + resp, err := svc.EnableVgwRoutePropagation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_EnableVolumeIO() { + svc := ec2.New(session.New()) + + params := &ec2.EnableVolumeIOInput{ + VolumeId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.EnableVolumeIO(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_EnableVpcClassicLink() { + svc := ec2.New(session.New()) + + params := &ec2.EnableVpcClassicLinkInput{ + VpcId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.EnableVpcClassicLink(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_EnableVpcClassicLinkDnsSupport() { + svc := ec2.New(session.New()) + + params := &ec2.EnableVpcClassicLinkDnsSupportInput{ + VpcId: aws.String("String"), + } + resp, err := svc.EnableVpcClassicLinkDnsSupport(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_GetConsoleOutput() { + svc := ec2.New(session.New()) + + params := &ec2.GetConsoleOutputInput{ + InstanceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.GetConsoleOutput(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_GetPasswordData() { + svc := ec2.New(session.New()) + + params := &ec2.GetPasswordDataInput{ + InstanceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.GetPasswordData(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ImportImage() { + svc := ec2.New(session.New()) + + params := &ec2.ImportImageInput{ + Architecture: aws.String("String"), + ClientData: &ec2.ClientData{ + Comment: aws.String("String"), + UploadEnd: aws.Time(time.Now()), + UploadSize: aws.Float64(1.0), + UploadStart: aws.Time(time.Now()), + }, + ClientToken: aws.String("String"), + Description: aws.String("String"), + DiskContainers: []*ec2.ImageDiskContainer{ + { // Required + Description: aws.String("String"), + DeviceName: aws.String("String"), + Format: aws.String("String"), + SnapshotId: aws.String("String"), + Url: aws.String("String"), + UserBucket: &ec2.UserBucket{ + S3Bucket: aws.String("String"), + S3Key: aws.String("String"), + }, + }, + // More values... + }, + DryRun: aws.Bool(true), + Hypervisor: aws.String("String"), + LicenseType: aws.String("String"), + Platform: aws.String("String"), + RoleName: aws.String("String"), + } + resp, err := svc.ImportImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ImportInstance() { + svc := ec2.New(session.New()) + + params := &ec2.ImportInstanceInput{ + Platform: aws.String("PlatformValues"), // Required + Description: aws.String("String"), + DiskImages: []*ec2.DiskImage{ + { // Required + Description: aws.String("String"), + Image: &ec2.DiskImageDetail{ + Bytes: aws.Int64(1), // Required + Format: aws.String("DiskImageFormat"), // Required + ImportManifestUrl: aws.String("String"), // Required + }, + Volume: &ec2.VolumeDetail{ + Size: aws.Int64(1), // Required + }, + }, + // More values... + }, + DryRun: aws.Bool(true), + LaunchSpecification: &ec2.ImportInstanceLaunchSpecification{ + AdditionalInfo: aws.String("String"), + Architecture: aws.String("ArchitectureValues"), + GroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + GroupNames: []*string{ + aws.String("String"), // Required + // More values... + }, + InstanceInitiatedShutdownBehavior: aws.String("ShutdownBehavior"), + InstanceType: aws.String("InstanceType"), + Monitoring: aws.Bool(true), + Placement: &ec2.Placement{ + Affinity: aws.String("String"), + AvailabilityZone: aws.String("String"), + GroupName: aws.String("String"), + HostId: aws.String("String"), + Tenancy: aws.String("Tenancy"), + }, + PrivateIpAddress: aws.String("String"), + SubnetId: aws.String("String"), + UserData: &ec2.UserData{ + Data: aws.String("String"), + }, + }, + } + resp, err := svc.ImportInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ImportKeyPair() { + svc := ec2.New(session.New()) + + params := &ec2.ImportKeyPairInput{ + KeyName: aws.String("String"), // Required + PublicKeyMaterial: []byte("PAYLOAD"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.ImportKeyPair(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ImportSnapshot() { + svc := ec2.New(session.New()) + + params := &ec2.ImportSnapshotInput{ + ClientData: &ec2.ClientData{ + Comment: aws.String("String"), + UploadEnd: aws.Time(time.Now()), + UploadSize: aws.Float64(1.0), + UploadStart: aws.Time(time.Now()), + }, + ClientToken: aws.String("String"), + Description: aws.String("String"), + DiskContainer: &ec2.SnapshotDiskContainer{ + Description: aws.String("String"), + Format: aws.String("String"), + Url: aws.String("String"), + UserBucket: &ec2.UserBucket{ + S3Bucket: aws.String("String"), + S3Key: aws.String("String"), + }, + }, + DryRun: aws.Bool(true), + RoleName: aws.String("String"), + } + resp, err := svc.ImportSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ImportVolume() { + svc := ec2.New(session.New()) + + params := &ec2.ImportVolumeInput{ + AvailabilityZone: aws.String("String"), // Required + Image: &ec2.DiskImageDetail{ // Required + Bytes: aws.Int64(1), // Required + Format: aws.String("DiskImageFormat"), // Required + ImportManifestUrl: aws.String("String"), // Required + }, + Volume: &ec2.VolumeDetail{ // Required + Size: aws.Int64(1), // Required + }, + Description: aws.String("String"), + DryRun: aws.Bool(true), + } + resp, err := svc.ImportVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyHosts() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyHostsInput{ + AutoPlacement: aws.String("AutoPlacement"), // Required + HostIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ModifyHosts(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyIdFormat() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyIdFormatInput{ + Resource: aws.String("String"), // Required + UseLongIds: aws.Bool(true), // Required + } + resp, err := svc.ModifyIdFormat(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyImageAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyImageAttributeInput{ + ImageId: aws.String("String"), // Required + Attribute: aws.String("String"), + Description: &ec2.AttributeValue{ + Value: aws.String("String"), + }, + DryRun: aws.Bool(true), + LaunchPermission: &ec2.LaunchPermissionModifications{ + Add: []*ec2.LaunchPermission{ + { // Required + Group: aws.String("PermissionGroup"), + UserId: aws.String("String"), + }, + // More values... + }, + Remove: []*ec2.LaunchPermission{ + { // Required + Group: aws.String("PermissionGroup"), + UserId: aws.String("String"), + }, + // More values... + }, + }, + OperationType: aws.String("OperationType"), + ProductCodes: []*string{ + aws.String("String"), // Required + // More values... + }, + UserGroups: []*string{ + aws.String("String"), // Required + // More values... + }, + UserIds: []*string{ + aws.String("String"), // Required + // More values... + }, + Value: aws.String("String"), + } + resp, err := svc.ModifyImageAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyInstanceAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyInstanceAttributeInput{ + InstanceId: aws.String("String"), // Required + Attribute: aws.String("InstanceAttributeName"), + BlockDeviceMappings: []*ec2.InstanceBlockDeviceMappingSpecification{ + { // Required + DeviceName: aws.String("String"), + Ebs: &ec2.EbsInstanceBlockDeviceSpecification{ + DeleteOnTermination: aws.Bool(true), + VolumeId: aws.String("String"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + DisableApiTermination: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + DryRun: aws.Bool(true), + EbsOptimized: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + Groups: []*string{ + aws.String("String"), // Required + // More values... + }, + InstanceInitiatedShutdownBehavior: &ec2.AttributeValue{ + Value: aws.String("String"), + }, + InstanceType: &ec2.AttributeValue{ + Value: aws.String("String"), + }, + Kernel: &ec2.AttributeValue{ + Value: aws.String("String"), + }, + Ramdisk: &ec2.AttributeValue{ + Value: aws.String("String"), + }, + SourceDestCheck: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + SriovNetSupport: &ec2.AttributeValue{ + Value: aws.String("String"), + }, + UserData: &ec2.BlobAttributeValue{ + Value: []byte("PAYLOAD"), + }, + Value: aws.String("String"), + } + resp, err := svc.ModifyInstanceAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyInstancePlacement() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyInstancePlacementInput{ + InstanceId: aws.String("String"), // Required + Affinity: aws.String("Affinity"), + HostId: aws.String("String"), + Tenancy: aws.String("HostTenancy"), + } + resp, err := svc.ModifyInstancePlacement(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyNetworkInterfaceAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyNetworkInterfaceAttributeInput{ + NetworkInterfaceId: aws.String("String"), // Required + Attachment: &ec2.NetworkInterfaceAttachmentChanges{ + AttachmentId: aws.String("String"), + DeleteOnTermination: aws.Bool(true), + }, + Description: &ec2.AttributeValue{ + Value: aws.String("String"), + }, + DryRun: aws.Bool(true), + Groups: []*string{ + aws.String("String"), // Required + // More values... + }, + SourceDestCheck: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + } + resp, err := svc.ModifyNetworkInterfaceAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyReservedInstances() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyReservedInstancesInput{ + ReservedInstancesIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + TargetConfigurations: []*ec2.ReservedInstancesConfiguration{ // Required + { // Required + AvailabilityZone: aws.String("String"), + InstanceCount: aws.Int64(1), + InstanceType: aws.String("InstanceType"), + Platform: aws.String("String"), + }, + // More values... + }, + ClientToken: aws.String("String"), + } + resp, err := svc.ModifyReservedInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifySnapshotAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ModifySnapshotAttributeInput{ + SnapshotId: aws.String("String"), // Required + Attribute: aws.String("SnapshotAttributeName"), + CreateVolumePermission: &ec2.CreateVolumePermissionModifications{ + Add: []*ec2.CreateVolumePermission{ + { // Required + Group: aws.String("PermissionGroup"), + UserId: aws.String("String"), + }, + // More values... + }, + Remove: []*ec2.CreateVolumePermission{ + { // Required + Group: aws.String("PermissionGroup"), + UserId: aws.String("String"), + }, + // More values... + }, + }, + DryRun: aws.Bool(true), + GroupNames: []*string{ + aws.String("String"), // Required + // More values... + }, + OperationType: aws.String("OperationType"), + UserIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ModifySnapshotAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifySpotFleetRequest() { + svc := ec2.New(session.New()) + + params := &ec2.ModifySpotFleetRequestInput{ + SpotFleetRequestId: aws.String("String"), // Required + ExcessCapacityTerminationPolicy: aws.String("ExcessCapacityTerminationPolicy"), + TargetCapacity: aws.Int64(1), + } + resp, err := svc.ModifySpotFleetRequest(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifySubnetAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ModifySubnetAttributeInput{ + SubnetId: aws.String("String"), // Required + MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + } + resp, err := svc.ModifySubnetAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyVolumeAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyVolumeAttributeInput{ + VolumeId: aws.String("String"), // Required + AutoEnableIO: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + DryRun: aws.Bool(true), + } + resp, err := svc.ModifyVolumeAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyVpcAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyVpcAttributeInput{ + VpcId: aws.String("String"), // Required + EnableDnsHostnames: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + EnableDnsSupport: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + } + resp, err := svc.ModifyVpcAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ModifyVpcEndpoint() { + svc := ec2.New(session.New()) + + params := &ec2.ModifyVpcEndpointInput{ + VpcEndpointId: aws.String("String"), // Required + AddRouteTableIds: []*string{ + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + PolicyDocument: aws.String("String"), + RemoveRouteTableIds: []*string{ + aws.String("String"), // Required + // More values... + }, + ResetPolicy: aws.Bool(true), + } + resp, err := svc.ModifyVpcEndpoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_MonitorInstances() { + svc := ec2.New(session.New()) + + params := &ec2.MonitorInstancesInput{ + InstanceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.MonitorInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_MoveAddressToVpc() { + svc := ec2.New(session.New()) + + params := &ec2.MoveAddressToVpcInput{ + PublicIp: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.MoveAddressToVpc(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_PurchaseReservedInstancesOffering() { + svc := ec2.New(session.New()) + + params := &ec2.PurchaseReservedInstancesOfferingInput{ + InstanceCount: aws.Int64(1), // Required + ReservedInstancesOfferingId: aws.String("String"), // Required + DryRun: aws.Bool(true), + LimitPrice: &ec2.ReservedInstanceLimitPrice{ + Amount: aws.Float64(1.0), + CurrencyCode: aws.String("CurrencyCodeValues"), + }, + } + resp, err := svc.PurchaseReservedInstancesOffering(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_PurchaseScheduledInstances() { + svc := ec2.New(session.New()) + + params := &ec2.PurchaseScheduledInstancesInput{ + PurchaseRequests: []*ec2.PurchaseRequest{ // Required + { // Required + InstanceCount: aws.Int64(1), + PurchaseToken: aws.String("String"), + }, + // More values... + }, + ClientToken: aws.String("String"), + DryRun: aws.Bool(true), + } + resp, err := svc.PurchaseScheduledInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RebootInstances() { + svc := ec2.New(session.New()) + + params := &ec2.RebootInstancesInput{ + InstanceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.RebootInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RegisterImage() { + svc := ec2.New(session.New()) + + params := &ec2.RegisterImageInput{ + Name: aws.String("String"), // Required + Architecture: aws.String("ArchitectureValues"), + BlockDeviceMappings: []*ec2.BlockDeviceMapping{ + { // Required + DeviceName: aws.String("String"), + Ebs: &ec2.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + Description: aws.String("String"), + DryRun: aws.Bool(true), + ImageLocation: aws.String("String"), + KernelId: aws.String("String"), + RamdiskId: aws.String("String"), + RootDeviceName: aws.String("String"), + SriovNetSupport: aws.String("String"), + VirtualizationType: aws.String("String"), + } + resp, err := svc.RegisterImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RejectVpcPeeringConnection() { + svc := ec2.New(session.New()) + + params := &ec2.RejectVpcPeeringConnectionInput{ + VpcPeeringConnectionId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.RejectVpcPeeringConnection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ReleaseAddress() { + svc := ec2.New(session.New()) + + params := &ec2.ReleaseAddressInput{ + AllocationId: aws.String("String"), + DryRun: aws.Bool(true), + PublicIp: aws.String("String"), + } + resp, err := svc.ReleaseAddress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ReleaseHosts() { + svc := ec2.New(session.New()) + + params := &ec2.ReleaseHostsInput{ + HostIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ReleaseHosts(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ReplaceNetworkAclAssociation() { + svc := ec2.New(session.New()) + + params := &ec2.ReplaceNetworkAclAssociationInput{ + AssociationId: aws.String("String"), // Required + NetworkAclId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.ReplaceNetworkAclAssociation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ReplaceNetworkAclEntry() { + svc := ec2.New(session.New()) + + params := &ec2.ReplaceNetworkAclEntryInput{ + CidrBlock: aws.String("String"), // Required + Egress: aws.Bool(true), // Required + NetworkAclId: aws.String("String"), // Required + Protocol: aws.String("String"), // Required + RuleAction: aws.String("RuleAction"), // Required + RuleNumber: aws.Int64(1), // Required + DryRun: aws.Bool(true), + IcmpTypeCode: &ec2.IcmpTypeCode{ + Code: aws.Int64(1), + Type: aws.Int64(1), + }, + PortRange: &ec2.PortRange{ + From: aws.Int64(1), + To: aws.Int64(1), + }, + } + resp, err := svc.ReplaceNetworkAclEntry(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ReplaceRoute() { + svc := ec2.New(session.New()) + + params := &ec2.ReplaceRouteInput{ + DestinationCidrBlock: aws.String("String"), // Required + RouteTableId: aws.String("String"), // Required + DryRun: aws.Bool(true), + GatewayId: aws.String("String"), + InstanceId: aws.String("String"), + NatGatewayId: aws.String("String"), + NetworkInterfaceId: aws.String("String"), + VpcPeeringConnectionId: aws.String("String"), + } + resp, err := svc.ReplaceRoute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ReplaceRouteTableAssociation() { + svc := ec2.New(session.New()) + + params := &ec2.ReplaceRouteTableAssociationInput{ + AssociationId: aws.String("String"), // Required + RouteTableId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.ReplaceRouteTableAssociation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ReportInstanceStatus() { + svc := ec2.New(session.New()) + + params := &ec2.ReportInstanceStatusInput{ + Instances: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + ReasonCodes: []*string{ // Required + aws.String("ReportInstanceReasonCodes"), // Required + // More values... + }, + Status: aws.String("ReportStatusType"), // Required + Description: aws.String("String"), + DryRun: aws.Bool(true), + EndTime: aws.Time(time.Now()), + StartTime: aws.Time(time.Now()), + } + resp, err := svc.ReportInstanceStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RequestSpotFleet() { + svc := ec2.New(session.New()) + + params := &ec2.RequestSpotFleetInput{ + SpotFleetRequestConfig: &ec2.SpotFleetRequestConfigData{ // Required + IamFleetRole: aws.String("String"), // Required + LaunchSpecifications: []*ec2.SpotFleetLaunchSpecification{ // Required + { // Required + AddressingType: aws.String("String"), + BlockDeviceMappings: []*ec2.BlockDeviceMapping{ + { // Required + DeviceName: aws.String("String"), + Ebs: &ec2.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + EbsOptimized: aws.Bool(true), + IamInstanceProfile: &ec2.IamInstanceProfileSpecification{ + Arn: aws.String("String"), + Name: aws.String("String"), + }, + ImageId: aws.String("String"), + InstanceType: aws.String("InstanceType"), + KernelId: aws.String("String"), + KeyName: aws.String("String"), + Monitoring: &ec2.SpotFleetMonitoring{ + Enabled: aws.Bool(true), + }, + NetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{ + { // Required + AssociatePublicIpAddress: aws.Bool(true), + DeleteOnTermination: aws.Bool(true), + Description: aws.String("String"), + DeviceIndex: aws.Int64(1), + Groups: []*string{ + aws.String("String"), // Required + // More values... + }, + NetworkInterfaceId: aws.String("String"), + PrivateIpAddress: aws.String("String"), + PrivateIpAddresses: []*ec2.PrivateIpAddressSpecification{ + { // Required + PrivateIpAddress: aws.String("String"), // Required + Primary: aws.Bool(true), + }, + // More values... + }, + SecondaryPrivateIpAddressCount: aws.Int64(1), + SubnetId: aws.String("String"), + }, + // More values... + }, + Placement: &ec2.SpotPlacement{ + AvailabilityZone: aws.String("String"), + GroupName: aws.String("String"), + }, + RamdiskId: aws.String("String"), + SecurityGroups: []*ec2.GroupIdentifier{ + { // Required + GroupId: aws.String("String"), + GroupName: aws.String("String"), + }, + // More values... + }, + SpotPrice: aws.String("String"), + SubnetId: aws.String("String"), + UserData: aws.String("String"), + WeightedCapacity: aws.Float64(1.0), + }, + // More values... + }, + SpotPrice: aws.String("String"), // Required + TargetCapacity: aws.Int64(1), // Required + AllocationStrategy: aws.String("AllocationStrategy"), + ClientToken: aws.String("String"), + ExcessCapacityTerminationPolicy: aws.String("ExcessCapacityTerminationPolicy"), + TerminateInstancesWithExpiration: aws.Bool(true), + ValidFrom: aws.Time(time.Now()), + ValidUntil: aws.Time(time.Now()), + }, + DryRun: aws.Bool(true), + } + resp, err := svc.RequestSpotFleet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RequestSpotInstances() { + svc := ec2.New(session.New()) + + params := &ec2.RequestSpotInstancesInput{ + SpotPrice: aws.String("String"), // Required + AvailabilityZoneGroup: aws.String("String"), + BlockDurationMinutes: aws.Int64(1), + ClientToken: aws.String("String"), + DryRun: aws.Bool(true), + InstanceCount: aws.Int64(1), + LaunchGroup: aws.String("String"), + LaunchSpecification: &ec2.RequestSpotLaunchSpecification{ + AddressingType: aws.String("String"), + BlockDeviceMappings: []*ec2.BlockDeviceMapping{ + { // Required + DeviceName: aws.String("String"), + Ebs: &ec2.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + EbsOptimized: aws.Bool(true), + IamInstanceProfile: &ec2.IamInstanceProfileSpecification{ + Arn: aws.String("String"), + Name: aws.String("String"), + }, + ImageId: aws.String("String"), + InstanceType: aws.String("InstanceType"), + KernelId: aws.String("String"), + KeyName: aws.String("String"), + Monitoring: &ec2.RunInstancesMonitoringEnabled{ + Enabled: aws.Bool(true), // Required + }, + NetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{ + { // Required + AssociatePublicIpAddress: aws.Bool(true), + DeleteOnTermination: aws.Bool(true), + Description: aws.String("String"), + DeviceIndex: aws.Int64(1), + Groups: []*string{ + aws.String("String"), // Required + // More values... + }, + NetworkInterfaceId: aws.String("String"), + PrivateIpAddress: aws.String("String"), + PrivateIpAddresses: []*ec2.PrivateIpAddressSpecification{ + { // Required + PrivateIpAddress: aws.String("String"), // Required + Primary: aws.Bool(true), + }, + // More values... + }, + SecondaryPrivateIpAddressCount: aws.Int64(1), + SubnetId: aws.String("String"), + }, + // More values... + }, + Placement: &ec2.SpotPlacement{ + AvailabilityZone: aws.String("String"), + GroupName: aws.String("String"), + }, + RamdiskId: aws.String("String"), + SecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SecurityGroups: []*string{ + aws.String("String"), // Required + // More values... + }, + SubnetId: aws.String("String"), + UserData: aws.String("String"), + }, + Type: aws.String("SpotInstanceType"), + ValidFrom: aws.Time(time.Now()), + ValidUntil: aws.Time(time.Now()), + } + resp, err := svc.RequestSpotInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ResetImageAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ResetImageAttributeInput{ + Attribute: aws.String("ResetImageAttributeName"), // Required + ImageId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.ResetImageAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ResetInstanceAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ResetInstanceAttributeInput{ + Attribute: aws.String("InstanceAttributeName"), // Required + InstanceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.ResetInstanceAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ResetNetworkInterfaceAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ResetNetworkInterfaceAttributeInput{ + NetworkInterfaceId: aws.String("String"), // Required + DryRun: aws.Bool(true), + SourceDestCheck: aws.String("String"), + } + resp, err := svc.ResetNetworkInterfaceAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_ResetSnapshotAttribute() { + svc := ec2.New(session.New()) + + params := &ec2.ResetSnapshotAttributeInput{ + Attribute: aws.String("SnapshotAttributeName"), // Required + SnapshotId: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.ResetSnapshotAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RestoreAddressToClassic() { + svc := ec2.New(session.New()) + + params := &ec2.RestoreAddressToClassicInput{ + PublicIp: aws.String("String"), // Required + DryRun: aws.Bool(true), + } + resp, err := svc.RestoreAddressToClassic(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RevokeSecurityGroupEgress() { + svc := ec2.New(session.New()) + + params := &ec2.RevokeSecurityGroupEgressInput{ + GroupId: aws.String("String"), // Required + CidrIp: aws.String("String"), + DryRun: aws.Bool(true), + FromPort: aws.Int64(1), + IpPermissions: []*ec2.IpPermission{ + { // Required + FromPort: aws.Int64(1), + IpProtocol: aws.String("String"), + IpRanges: []*ec2.IpRange{ + { // Required + CidrIp: aws.String("String"), + }, + // More values... + }, + PrefixListIds: []*ec2.PrefixListId{ + { // Required + PrefixListId: aws.String("String"), + }, + // More values... + }, + ToPort: aws.Int64(1), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + { // Required + GroupId: aws.String("String"), + GroupName: aws.String("String"), + UserId: aws.String("String"), + }, + // More values... + }, + }, + // More values... + }, + IpProtocol: aws.String("String"), + SourceSecurityGroupName: aws.String("String"), + SourceSecurityGroupOwnerId: aws.String("String"), + ToPort: aws.Int64(1), + } + resp, err := svc.RevokeSecurityGroupEgress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RevokeSecurityGroupIngress() { + svc := ec2.New(session.New()) + + params := &ec2.RevokeSecurityGroupIngressInput{ + CidrIp: aws.String("String"), + DryRun: aws.Bool(true), + FromPort: aws.Int64(1), + GroupId: aws.String("String"), + GroupName: aws.String("String"), + IpPermissions: []*ec2.IpPermission{ + { // Required + FromPort: aws.Int64(1), + IpProtocol: aws.String("String"), + IpRanges: []*ec2.IpRange{ + { // Required + CidrIp: aws.String("String"), + }, + // More values... + }, + PrefixListIds: []*ec2.PrefixListId{ + { // Required + PrefixListId: aws.String("String"), + }, + // More values... + }, + ToPort: aws.Int64(1), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + { // Required + GroupId: aws.String("String"), + GroupName: aws.String("String"), + UserId: aws.String("String"), + }, + // More values... + }, + }, + // More values... + }, + IpProtocol: aws.String("String"), + SourceSecurityGroupName: aws.String("String"), + SourceSecurityGroupOwnerId: aws.String("String"), + ToPort: aws.Int64(1), + } + resp, err := svc.RevokeSecurityGroupIngress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RunInstances() { + svc := ec2.New(session.New()) + + params := &ec2.RunInstancesInput{ + ImageId: aws.String("String"), // Required + MaxCount: aws.Int64(1), // Required + MinCount: aws.Int64(1), // Required + AdditionalInfo: aws.String("String"), + BlockDeviceMappings: []*ec2.BlockDeviceMapping{ + { // Required + DeviceName: aws.String("String"), + Ebs: &ec2.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + ClientToken: aws.String("String"), + DisableApiTermination: aws.Bool(true), + DryRun: aws.Bool(true), + EbsOptimized: aws.Bool(true), + IamInstanceProfile: &ec2.IamInstanceProfileSpecification{ + Arn: aws.String("String"), + Name: aws.String("String"), + }, + InstanceInitiatedShutdownBehavior: aws.String("ShutdownBehavior"), + InstanceType: aws.String("InstanceType"), + KernelId: aws.String("String"), + KeyName: aws.String("String"), + Monitoring: &ec2.RunInstancesMonitoringEnabled{ + Enabled: aws.Bool(true), // Required + }, + NetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{ + { // Required + AssociatePublicIpAddress: aws.Bool(true), + DeleteOnTermination: aws.Bool(true), + Description: aws.String("String"), + DeviceIndex: aws.Int64(1), + Groups: []*string{ + aws.String("String"), // Required + // More values... + }, + NetworkInterfaceId: aws.String("String"), + PrivateIpAddress: aws.String("String"), + PrivateIpAddresses: []*ec2.PrivateIpAddressSpecification{ + { // Required + PrivateIpAddress: aws.String("String"), // Required + Primary: aws.Bool(true), + }, + // More values... + }, + SecondaryPrivateIpAddressCount: aws.Int64(1), + SubnetId: aws.String("String"), + }, + // More values... + }, + Placement: &ec2.Placement{ + Affinity: aws.String("String"), + AvailabilityZone: aws.String("String"), + GroupName: aws.String("String"), + HostId: aws.String("String"), + Tenancy: aws.String("Tenancy"), + }, + PrivateIpAddress: aws.String("String"), + RamdiskId: aws.String("String"), + SecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SecurityGroups: []*string{ + aws.String("String"), // Required + // More values... + }, + SubnetId: aws.String("String"), + UserData: aws.String("String"), + } + resp, err := svc.RunInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_RunScheduledInstances() { + svc := ec2.New(session.New()) + + params := &ec2.RunScheduledInstancesInput{ + LaunchSpecification: &ec2.ScheduledInstancesLaunchSpecification{ // Required + ImageId: aws.String("String"), // Required + BlockDeviceMappings: []*ec2.ScheduledInstancesBlockDeviceMapping{ + { // Required + DeviceName: aws.String("String"), + Ebs: &ec2.ScheduledInstancesEbs{ + DeleteOnTermination: aws.Bool(true), + Encrypted: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("String"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + EbsOptimized: aws.Bool(true), + IamInstanceProfile: &ec2.ScheduledInstancesIamInstanceProfile{ + Arn: aws.String("String"), + Name: aws.String("String"), + }, + InstanceType: aws.String("String"), + KernelId: aws.String("String"), + KeyName: aws.String("String"), + Monitoring: &ec2.ScheduledInstancesMonitoring{ + Enabled: aws.Bool(true), + }, + NetworkInterfaces: []*ec2.ScheduledInstancesNetworkInterface{ + { // Required + AssociatePublicIpAddress: aws.Bool(true), + DeleteOnTermination: aws.Bool(true), + Description: aws.String("String"), + DeviceIndex: aws.Int64(1), + Groups: []*string{ + aws.String("String"), // Required + // More values... + }, + NetworkInterfaceId: aws.String("String"), + PrivateIpAddress: aws.String("String"), + PrivateIpAddressConfigs: []*ec2.ScheduledInstancesPrivateIpAddressConfig{ + { // Required + Primary: aws.Bool(true), + PrivateIpAddress: aws.String("String"), + }, + // More values... + }, + SecondaryPrivateIpAddressCount: aws.Int64(1), + SubnetId: aws.String("String"), + }, + // More values... + }, + Placement: &ec2.ScheduledInstancesPlacement{ + AvailabilityZone: aws.String("String"), + GroupName: aws.String("String"), + }, + RamdiskId: aws.String("String"), + SecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SubnetId: aws.String("String"), + UserData: aws.String("String"), + }, + ScheduledInstanceId: aws.String("String"), // Required + ClientToken: aws.String("String"), + DryRun: aws.Bool(true), + InstanceCount: aws.Int64(1), + } + resp, err := svc.RunScheduledInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_StartInstances() { + svc := ec2.New(session.New()) + + params := &ec2.StartInstancesInput{ + InstanceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + AdditionalInfo: aws.String("String"), + DryRun: aws.Bool(true), + } + resp, err := svc.StartInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_StopInstances() { + svc := ec2.New(session.New()) + + params := &ec2.StopInstancesInput{ + InstanceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + Force: aws.Bool(true), + } + resp, err := svc.StopInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_TerminateInstances() { + svc := ec2.New(session.New()) + + params := &ec2.TerminateInstancesInput{ + InstanceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.TerminateInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_UnassignPrivateIpAddresses() { + svc := ec2.New(session.New()) + + params := &ec2.UnassignPrivateIpAddressesInput{ + NetworkInterfaceId: aws.String("String"), // Required + PrivateIpAddresses: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.UnassignPrivateIpAddresses(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEC2_UnmonitorInstances() { + svc := ec2.New(session.New()) + + params := &ec2.UnmonitorInstancesInput{ + InstanceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DryRun: aws.Bool(true), + } + resp, err := svc.UnmonitorInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ec2/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ec2/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ec2/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ec2/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,89 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ec2 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/ec2query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Amazon Elastic Compute Cloud (Amazon EC2) provides resizable computing capacity +// in the Amazon Web Services (AWS) cloud. Using Amazon EC2 eliminates your +// need to invest in hardware up front, so you can develop and deploy applications +// faster. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type EC2 struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "ec2" + +// New creates a new instance of the EC2 client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a EC2 client from just a session. +// svc := ec2.New(mySession) +// +// // Create a EC2 client with additional configuration +// svc := ec2.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2 { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *EC2 { + svc := &EC2{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-10-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a EC2 operation and runs any +// custom request initialization. +func (c *EC2) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ec2/waiters.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ec2/waiters.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ec2/waiters.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ec2/waiters.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,761 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ec2 + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *EC2) WaitUntilBundleTaskComplete(input *DescribeBundleTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeBundleTasks", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "BundleTasks[].State", + Expected: "complete", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "BundleTasks[].State", + Expected: "failed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilConversionTaskCancelled(input *DescribeConversionTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeConversionTasks", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "ConversionTasks[].State", + Expected: "cancelled", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilConversionTaskCompleted(input *DescribeConversionTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeConversionTasks", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "ConversionTasks[].State", + Expected: "completed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "ConversionTasks[].State", + Expected: "cancelled", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "ConversionTasks[].State", + Expected: "cancelling", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilConversionTaskDeleted(input *DescribeConversionTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeConversionTasks", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "ConversionTasks[].State", + Expected: "deleted", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilCustomerGatewayAvailable(input *DescribeCustomerGatewaysInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeCustomerGateways", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "CustomerGateways[].State", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CustomerGateways[].State", + Expected: "deleted", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CustomerGateways[].State", + Expected: "deleting", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilExportTaskCancelled(input *DescribeExportTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeExportTasks", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "ExportTasks[].State", + Expected: "cancelled", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilExportTaskCompleted(input *DescribeExportTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeExportTasks", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "ExportTasks[].State", + Expected: "completed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilImageAvailable(input *DescribeImagesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeImages", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Images[].State", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Images[].State", + Expected: "failed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilInstanceExists(input *DescribeInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstances", + Delay: 5, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "InvalidInstanceIDNotFound", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilInstanceRunning(input *DescribeInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstances", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Reservations[].Instances[].State.Name", + Expected: "running", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Reservations[].Instances[].State.Name", + Expected: "shutting-down", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Reservations[].Instances[].State.Name", + Expected: "terminated", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Reservations[].Instances[].State.Name", + Expected: "stopping", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilInstanceStatusOk(input *DescribeInstanceStatusInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstanceStatus", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "InstanceStatuses[].InstanceStatus.Status", + Expected: "ok", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilInstanceStopped(input *DescribeInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstances", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Reservations[].Instances[].State.Name", + Expected: "stopped", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Reservations[].Instances[].State.Name", + Expected: "pending", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Reservations[].Instances[].State.Name", + Expected: "terminated", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilInstanceTerminated(input *DescribeInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstances", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Reservations[].Instances[].State.Name", + Expected: "terminated", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Reservations[].Instances[].State.Name", + Expected: "pending", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Reservations[].Instances[].State.Name", + Expected: "stopping", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilKeyPairExists(input *DescribeKeyPairsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeKeyPairs", + Delay: 5, + MaxAttempts: 6, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "length(KeyPairs[].KeyName) > `0`", + Expected: true, + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "InvalidKeyPairNotFound", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilNetworkInterfaceAvailable(input *DescribeNetworkInterfacesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeNetworkInterfaces", + Delay: 20, + MaxAttempts: 10, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "NetworkInterfaces[].Status", + Expected: "available", + }, + { + State: "failure", + Matcher: "error", + Argument: "", + Expected: "InvalidNetworkInterfaceIDNotFound", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilPasswordDataAvailable(input *GetPasswordDataInput) error { + waiterCfg := waiter.Config{ + Operation: "GetPasswordData", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "length(PasswordData) > `0`", + Expected: true, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilSnapshotCompleted(input *DescribeSnapshotsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeSnapshots", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Snapshots[].State", + Expected: "completed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilSpotInstanceRequestFulfilled(input *DescribeSpotInstanceRequestsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeSpotInstanceRequests", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "SpotInstanceRequests[].Status.Code", + Expected: "fulfilled", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "SpotInstanceRequests[].Status.Code", + Expected: "schedule-expired", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "SpotInstanceRequests[].Status.Code", + Expected: "canceled-before-fulfillment", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "SpotInstanceRequests[].Status.Code", + Expected: "bad-parameters", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "SpotInstanceRequests[].Status.Code", + Expected: "system-error", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilSubnetAvailable(input *DescribeSubnetsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeSubnets", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Subnets[].State", + Expected: "available", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilSystemStatusOk(input *DescribeInstanceStatusInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstanceStatus", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "InstanceStatuses[].SystemStatus.Status", + Expected: "ok", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilVolumeAvailable(input *DescribeVolumesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVolumes", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Volumes[].State", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Volumes[].State", + Expected: "deleted", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilVolumeDeleted(input *DescribeVolumesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVolumes", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Volumes[].State", + Expected: "deleted", + }, + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "InvalidVolumeNotFound", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilVolumeInUse(input *DescribeVolumesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVolumes", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Volumes[].State", + Expected: "in-use", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Volumes[].State", + Expected: "deleted", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilVpcAvailable(input *DescribeVpcsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVpcs", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Vpcs[].State", + Expected: "available", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilVpnConnectionAvailable(input *DescribeVpnConnectionsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVpnConnections", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "VpnConnections[].State", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "VpnConnections[].State", + Expected: "deleting", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "VpnConnections[].State", + Expected: "deleted", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EC2) WaitUntilVpnConnectionDeleted(input *DescribeVpnConnectionsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVpnConnections", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "VpnConnections[].State", + Expected: "deleted", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "VpnConnections[].State", + Expected: "pending", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecr/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecr/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecr/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecr/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1433 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package ecr provides a client for Amazon EC2 Container Registry. +package ecr + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opBatchCheckLayerAvailability = "BatchCheckLayerAvailability" + +// BatchCheckLayerAvailabilityRequest generates a request for the BatchCheckLayerAvailability operation. +func (c *ECR) BatchCheckLayerAvailabilityRequest(input *BatchCheckLayerAvailabilityInput) (req *request.Request, output *BatchCheckLayerAvailabilityOutput) { + op := &request.Operation{ + Name: opBatchCheckLayerAvailability, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchCheckLayerAvailabilityInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchCheckLayerAvailabilityOutput{} + req.Data = output + return +} + +// Check the availability of multiple image layers in a specified registry and +// repository. +// +// This operation is used by the Amazon ECR proxy, and it is not intended +// for general use by customers. Use the docker CLI to pull, tag, and push images. +func (c *ECR) BatchCheckLayerAvailability(input *BatchCheckLayerAvailabilityInput) (*BatchCheckLayerAvailabilityOutput, error) { + req, out := c.BatchCheckLayerAvailabilityRequest(input) + err := req.Send() + return out, err +} + +const opBatchDeleteImage = "BatchDeleteImage" + +// BatchDeleteImageRequest generates a request for the BatchDeleteImage operation. +func (c *ECR) BatchDeleteImageRequest(input *BatchDeleteImageInput) (req *request.Request, output *BatchDeleteImageOutput) { + op := &request.Operation{ + Name: opBatchDeleteImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchDeleteImageInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchDeleteImageOutput{} + req.Data = output + return +} + +// Deletes a list of specified images within a specified repository. Images +// are specified with either imageTag or imageDigest. +func (c *ECR) BatchDeleteImage(input *BatchDeleteImageInput) (*BatchDeleteImageOutput, error) { + req, out := c.BatchDeleteImageRequest(input) + err := req.Send() + return out, err +} + +const opBatchGetImage = "BatchGetImage" + +// BatchGetImageRequest generates a request for the BatchGetImage operation. +func (c *ECR) BatchGetImageRequest(input *BatchGetImageInput) (req *request.Request, output *BatchGetImageOutput) { + op := &request.Operation{ + Name: opBatchGetImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchGetImageInput{} + } + + req = c.newRequest(op, input, output) + output = &BatchGetImageOutput{} + req.Data = output + return +} + +// Gets detailed information for specified images within a specified repository. +// Images are specified with either imageTag or imageDigest. +func (c *ECR) BatchGetImage(input *BatchGetImageInput) (*BatchGetImageOutput, error) { + req, out := c.BatchGetImageRequest(input) + err := req.Send() + return out, err +} + +const opCompleteLayerUpload = "CompleteLayerUpload" + +// CompleteLayerUploadRequest generates a request for the CompleteLayerUpload operation. +func (c *ECR) CompleteLayerUploadRequest(input *CompleteLayerUploadInput) (req *request.Request, output *CompleteLayerUploadOutput) { + op := &request.Operation{ + Name: opCompleteLayerUpload, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CompleteLayerUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &CompleteLayerUploadOutput{} + req.Data = output + return +} + +// Inform Amazon ECR that the image layer upload for a specified registry, repository +// name, and upload ID, has completed. You can optionally provide a sha256 digest +// of the image layer for data validation purposes. +// +// This operation is used by the Amazon ECR proxy, and it is not intended +// for general use by customers. Use the docker CLI to pull, tag, and push images. +func (c *ECR) CompleteLayerUpload(input *CompleteLayerUploadInput) (*CompleteLayerUploadOutput, error) { + req, out := c.CompleteLayerUploadRequest(input) + err := req.Send() + return out, err +} + +const opCreateRepository = "CreateRepository" + +// CreateRepositoryRequest generates a request for the CreateRepository operation. +func (c *ECR) CreateRepositoryRequest(input *CreateRepositoryInput) (req *request.Request, output *CreateRepositoryOutput) { + op := &request.Operation{ + Name: opCreateRepository, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateRepositoryInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateRepositoryOutput{} + req.Data = output + return +} + +// Creates an image repository. +func (c *ECR) CreateRepository(input *CreateRepositoryInput) (*CreateRepositoryOutput, error) { + req, out := c.CreateRepositoryRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRepository = "DeleteRepository" + +// DeleteRepositoryRequest generates a request for the DeleteRepository operation. +func (c *ECR) DeleteRepositoryRequest(input *DeleteRepositoryInput) (req *request.Request, output *DeleteRepositoryOutput) { + op := &request.Operation{ + Name: opDeleteRepository, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRepositoryInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteRepositoryOutput{} + req.Data = output + return +} + +// Deletes an existing image repository. If a repository contains images, you +// must use the force option to delete it. +func (c *ECR) DeleteRepository(input *DeleteRepositoryInput) (*DeleteRepositoryOutput, error) { + req, out := c.DeleteRepositoryRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRepositoryPolicy = "DeleteRepositoryPolicy" + +// DeleteRepositoryPolicyRequest generates a request for the DeleteRepositoryPolicy operation. +func (c *ECR) DeleteRepositoryPolicyRequest(input *DeleteRepositoryPolicyInput) (req *request.Request, output *DeleteRepositoryPolicyOutput) { + op := &request.Operation{ + Name: opDeleteRepositoryPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRepositoryPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteRepositoryPolicyOutput{} + req.Data = output + return +} + +// Deletes the repository policy from a specified repository. +func (c *ECR) DeleteRepositoryPolicy(input *DeleteRepositoryPolicyInput) (*DeleteRepositoryPolicyOutput, error) { + req, out := c.DeleteRepositoryPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDescribeRepositories = "DescribeRepositories" + +// DescribeRepositoriesRequest generates a request for the DescribeRepositories operation. +func (c *ECR) DescribeRepositoriesRequest(input *DescribeRepositoriesInput) (req *request.Request, output *DescribeRepositoriesOutput) { + op := &request.Operation{ + Name: opDescribeRepositories, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRepositoriesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeRepositoriesOutput{} + req.Data = output + return +} + +// Describes image repositories in a registry. +func (c *ECR) DescribeRepositories(input *DescribeRepositoriesInput) (*DescribeRepositoriesOutput, error) { + req, out := c.DescribeRepositoriesRequest(input) + err := req.Send() + return out, err +} + +const opGetAuthorizationToken = "GetAuthorizationToken" + +// GetAuthorizationTokenRequest generates a request for the GetAuthorizationToken operation. +func (c *ECR) GetAuthorizationTokenRequest(input *GetAuthorizationTokenInput) (req *request.Request, output *GetAuthorizationTokenOutput) { + op := &request.Operation{ + Name: opGetAuthorizationToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAuthorizationTokenInput{} + } + + req = c.newRequest(op, input, output) + output = &GetAuthorizationTokenOutput{} + req.Data = output + return +} + +// Retrieves a token that is valid for a specified registry for 12 hours. This +// command allows you to use the docker CLI to push and pull images with Amazon +// ECR. If you do not specify a registry, the default registry is assumed. +// +// The authorizationToken returned for each registry specified is a base64 +// encoded string that can be decoded and used in a docker login command to +// authenticate to a registry. The AWS CLI offers an aws ecr get-login command +// that simplifies the login process. +func (c *ECR) GetAuthorizationToken(input *GetAuthorizationTokenInput) (*GetAuthorizationTokenOutput, error) { + req, out := c.GetAuthorizationTokenRequest(input) + err := req.Send() + return out, err +} + +const opGetDownloadUrlForLayer = "GetDownloadUrlForLayer" + +// GetDownloadUrlForLayerRequest generates a request for the GetDownloadUrlForLayer operation. +func (c *ECR) GetDownloadUrlForLayerRequest(input *GetDownloadUrlForLayerInput) (req *request.Request, output *GetDownloadUrlForLayerOutput) { + op := &request.Operation{ + Name: opGetDownloadUrlForLayer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDownloadUrlForLayerInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDownloadUrlForLayerOutput{} + req.Data = output + return +} + +// Retrieves the pre-signed Amazon S3 download URL corresponding to an image +// layer. You can only get URLs for image layers that are referenced in an image. +// +// This operation is used by the Amazon ECR proxy, and it is not intended +// for general use by customers. Use the docker CLI to pull, tag, and push images. +func (c *ECR) GetDownloadUrlForLayer(input *GetDownloadUrlForLayerInput) (*GetDownloadUrlForLayerOutput, error) { + req, out := c.GetDownloadUrlForLayerRequest(input) + err := req.Send() + return out, err +} + +const opGetRepositoryPolicy = "GetRepositoryPolicy" + +// GetRepositoryPolicyRequest generates a request for the GetRepositoryPolicy operation. +func (c *ECR) GetRepositoryPolicyRequest(input *GetRepositoryPolicyInput) (req *request.Request, output *GetRepositoryPolicyOutput) { + op := &request.Operation{ + Name: opGetRepositoryPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRepositoryPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetRepositoryPolicyOutput{} + req.Data = output + return +} + +// Retrieves the repository policy for a specified repository. +func (c *ECR) GetRepositoryPolicy(input *GetRepositoryPolicyInput) (*GetRepositoryPolicyOutput, error) { + req, out := c.GetRepositoryPolicyRequest(input) + err := req.Send() + return out, err +} + +const opInitiateLayerUpload = "InitiateLayerUpload" + +// InitiateLayerUploadRequest generates a request for the InitiateLayerUpload operation. +func (c *ECR) InitiateLayerUploadRequest(input *InitiateLayerUploadInput) (req *request.Request, output *InitiateLayerUploadOutput) { + op := &request.Operation{ + Name: opInitiateLayerUpload, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &InitiateLayerUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &InitiateLayerUploadOutput{} + req.Data = output + return +} + +// Notify Amazon ECR that you intend to upload an image layer. +// +// This operation is used by the Amazon ECR proxy, and it is not intended +// for general use by customers. Use the docker CLI to pull, tag, and push images. +func (c *ECR) InitiateLayerUpload(input *InitiateLayerUploadInput) (*InitiateLayerUploadOutput, error) { + req, out := c.InitiateLayerUploadRequest(input) + err := req.Send() + return out, err +} + +const opListImages = "ListImages" + +// ListImagesRequest generates a request for the ListImages operation. +func (c *ECR) ListImagesRequest(input *ListImagesInput) (req *request.Request, output *ListImagesOutput) { + op := &request.Operation{ + Name: opListImages, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListImagesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListImagesOutput{} + req.Data = output + return +} + +// Lists all the image IDs for a given repository. +func (c *ECR) ListImages(input *ListImagesInput) (*ListImagesOutput, error) { + req, out := c.ListImagesRequest(input) + err := req.Send() + return out, err +} + +const opPutImage = "PutImage" + +// PutImageRequest generates a request for the PutImage operation. +func (c *ECR) PutImageRequest(input *PutImageInput) (req *request.Request, output *PutImageOutput) { + op := &request.Operation{ + Name: opPutImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutImageInput{} + } + + req = c.newRequest(op, input, output) + output = &PutImageOutput{} + req.Data = output + return +} + +// Creates or updates the image manifest associated with an image. +// +// This operation is used by the Amazon ECR proxy, and it is not intended +// for general use by customers. Use the docker CLI to pull, tag, and push images. +func (c *ECR) PutImage(input *PutImageInput) (*PutImageOutput, error) { + req, out := c.PutImageRequest(input) + err := req.Send() + return out, err +} + +const opSetRepositoryPolicy = "SetRepositoryPolicy" + +// SetRepositoryPolicyRequest generates a request for the SetRepositoryPolicy operation. +func (c *ECR) SetRepositoryPolicyRequest(input *SetRepositoryPolicyInput) (req *request.Request, output *SetRepositoryPolicyOutput) { + op := &request.Operation{ + Name: opSetRepositoryPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetRepositoryPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &SetRepositoryPolicyOutput{} + req.Data = output + return +} + +// Applies a repository policy on a specified repository to control access permissions. +func (c *ECR) SetRepositoryPolicy(input *SetRepositoryPolicyInput) (*SetRepositoryPolicyOutput, error) { + req, out := c.SetRepositoryPolicyRequest(input) + err := req.Send() + return out, err +} + +const opUploadLayerPart = "UploadLayerPart" + +// UploadLayerPartRequest generates a request for the UploadLayerPart operation. +func (c *ECR) UploadLayerPartRequest(input *UploadLayerPartInput) (req *request.Request, output *UploadLayerPartOutput) { + op := &request.Operation{ + Name: opUploadLayerPart, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UploadLayerPartInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadLayerPartOutput{} + req.Data = output + return +} + +// Uploads an image layer part to Amazon ECR. +// +// This operation is used by the Amazon ECR proxy, and it is not intended +// for general use by customers. Use the docker CLI to pull, tag, and push images. +func (c *ECR) UploadLayerPart(input *UploadLayerPartInput) (*UploadLayerPartOutput, error) { + req, out := c.UploadLayerPartRequest(input) + err := req.Send() + return out, err +} + +// An object representing authorization data for an Amazon ECR registry. +type AuthorizationData struct { + _ struct{} `type:"structure"` + + // A base64-encoded string that contains authorization data for the specified + // Amazon ECR registry. When the string is decoded, it is presented in the format + // user:password for private registry authentication using docker login. + AuthorizationToken *string `locationName:"authorizationToken" type:"string"` + + // The Unix time in seconds and milliseconds when the authorization token expires. + // Authorization tokens are valid for 12 hours. + ExpiresAt *time.Time `locationName:"expiresAt" type:"timestamp" timestampFormat:"unix"` + + // The registry URL to use for this authorization token in a docker login command. + // The Amazon ECR registry URL format is https://aws_account_id.dkr.ecr.region.amazonaws.com. + // For example, https://012345678910.dkr.ecr.us-east-1.amazonaws.com. + ProxyEndpoint *string `locationName:"proxyEndpoint" type:"string"` +} + +// String returns the string representation +func (s AuthorizationData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizationData) GoString() string { + return s.String() +} + +type BatchCheckLayerAvailabilityInput struct { + _ struct{} `type:"structure"` + + // The digests of the image layers to check. + LayerDigests []*string `locationName:"layerDigests" min:"1" type:"list" required:"true"` + + // The AWS account ID associated with the registry that contains the image layers + // to check. If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository that is associated with the image layers to check. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchCheckLayerAvailabilityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchCheckLayerAvailabilityInput) GoString() string { + return s.String() +} + +type BatchCheckLayerAvailabilityOutput struct { + _ struct{} `type:"structure"` + + // Any failures associated with the call. + Failures []*LayerFailure `locationName:"failures" type:"list"` + + // A list of image layer objects corresponding to the image layer references + // in the request. + Layers []*Layer `locationName:"layers" type:"list"` +} + +// String returns the string representation +func (s BatchCheckLayerAvailabilityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchCheckLayerAvailabilityOutput) GoString() string { + return s.String() +} + +// Deletes specified images within a specified repository. Images are specified +// with either the imageTag or imageDigest. +type BatchDeleteImageInput struct { + _ struct{} `type:"structure"` + + // A list of image ID references that correspond to images to delete. The format + // of the imageIds reference is imageTag=tag or imageDigest=digest. + ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list" required:"true"` + + // The AWS account ID associated with the registry that contains the image to + // delete. If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository that contains the image to delete. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchDeleteImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDeleteImageInput) GoString() string { + return s.String() +} + +type BatchDeleteImageOutput struct { + _ struct{} `type:"structure"` + + // Any failures associated with the call. + Failures []*ImageFailure `locationName:"failures" type:"list"` + + // The image IDs of the deleted images. + ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list"` +} + +// String returns the string representation +func (s BatchDeleteImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDeleteImageOutput) GoString() string { + return s.String() +} + +type BatchGetImageInput struct { + _ struct{} `type:"structure"` + + // A list of image ID references that correspond to images to describe. The + // format of the imageIds reference is imageTag=tag or imageDigest=digest. + ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list" required:"true"` + + // The AWS account ID associated with the registry that contains the images + // to describe. If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository that contains the images to describe. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchGetImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetImageInput) GoString() string { + return s.String() +} + +type BatchGetImageOutput struct { + _ struct{} `type:"structure"` + + // Any failures associated with the call. + Failures []*ImageFailure `locationName:"failures" type:"list"` + + // A list of image objects corresponding to the image references in the request. + Images []*Image `locationName:"images" type:"list"` +} + +// String returns the string representation +func (s BatchGetImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetImageOutput) GoString() string { + return s.String() +} + +type CompleteLayerUploadInput struct { + _ struct{} `type:"structure"` + + // The sha256 digest of the image layer. + LayerDigests []*string `locationName:"layerDigests" min:"1" type:"list" required:"true"` + + // The AWS account ID associated with the registry to which to upload layers. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository to associate with the image layer. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` + + // The upload ID from a previous InitiateLayerUpload operation to associate + // with the image layer. + UploadId *string `locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteLayerUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteLayerUploadInput) GoString() string { + return s.String() +} + +type CompleteLayerUploadOutput struct { + _ struct{} `type:"structure"` + + // The sha256 digest of the image layer. + LayerDigest *string `locationName:"layerDigest" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` + + // The upload ID associated with the layer. + UploadId *string `locationName:"uploadId" type:"string"` +} + +// String returns the string representation +func (s CompleteLayerUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteLayerUploadOutput) GoString() string { + return s.String() +} + +type CreateRepositoryInput struct { + _ struct{} `type:"structure"` + + // The name to use for the repository. The repository name may be specified + // on its own (such as nginx-web-app) or it can be prepended with a namespace + // to group the repository into a category (such as project-a/nginx-web-app). + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateRepositoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRepositoryInput) GoString() string { + return s.String() +} + +type CreateRepositoryOutput struct { + _ struct{} `type:"structure"` + + // Object representing a repository. + Repository *Repository `locationName:"repository" type:"structure"` +} + +// String returns the string representation +func (s CreateRepositoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRepositoryOutput) GoString() string { + return s.String() +} + +type DeleteRepositoryInput struct { + _ struct{} `type:"structure"` + + // Force the deletion of the repository if it contains images. + Force *bool `locationName:"force" type:"boolean"` + + // The AWS account ID associated with the registry that contains the repository + // to delete. If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository to delete. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRepositoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRepositoryInput) GoString() string { + return s.String() +} + +type DeleteRepositoryOutput struct { + _ struct{} `type:"structure"` + + // Object representing a repository. + Repository *Repository `locationName:"repository" type:"structure"` +} + +// String returns the string representation +func (s DeleteRepositoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRepositoryOutput) GoString() string { + return s.String() +} + +type DeleteRepositoryPolicyInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID associated with the registry that contains the repository + // policy to delete. If you do not specify a registry, the default registry + // is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository that is associated with the repository policy + // to delete. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRepositoryPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRepositoryPolicyInput) GoString() string { + return s.String() +} + +type DeleteRepositoryPolicyOutput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy that was deleted from the repository. + PolicyText *string `locationName:"policyText" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s DeleteRepositoryPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRepositoryPolicyOutput) GoString() string { + return s.String() +} + +type DescribeRepositoriesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of repository results returned by DescribeRepositories + // in paginated output. When this parameter is used, DescribeRepositories only + // returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another DescribeRepositories request with the returned nextToken value. This + // value can be between 1 and 100. If this parameter is not used, then DescribeRepositories + // returns up to 100 results and a nextToken value, if applicable. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The nextToken value returned from a previous paginated DescribeRepositories + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. This value is null when there are no more results + // to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The AWS account ID associated with the registry that contains the repositories + // to be described. If you do not specify a registry, the default registry is + // assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // A list of repositories to describe. If this parameter is omitted, then all + // repositories in a registry are described. + RepositoryNames []*string `locationName:"repositoryNames" min:"1" type:"list"` +} + +// String returns the string representation +func (s DescribeRepositoriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRepositoriesInput) GoString() string { + return s.String() +} + +type DescribeRepositoriesOutput struct { + _ struct{} `type:"structure"` + + // The nextToken value to include in a future DescribeRepositories request. + // When the results of a DescribeRepositories request exceed maxResults, this + // value can be used to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of repository objects corresponding to valid repositories. + Repositories []*Repository `locationName:"repositories" type:"list"` +} + +// String returns the string representation +func (s DescribeRepositoriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRepositoriesOutput) GoString() string { + return s.String() +} + +type GetAuthorizationTokenInput struct { + _ struct{} `type:"structure"` + + // A list of AWS account IDs that are associated with the registries for which + // to get authorization tokens. If you do not specify a registry, the default + // registry is assumed. + RegistryIds []*string `locationName:"registryIds" min:"1" type:"list"` +} + +// String returns the string representation +func (s GetAuthorizationTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAuthorizationTokenInput) GoString() string { + return s.String() +} + +type GetAuthorizationTokenOutput struct { + _ struct{} `type:"structure"` + + // A list of authorization token data objects that correspond to the registryIds + // values in the request. + AuthorizationData []*AuthorizationData `locationName:"authorizationData" type:"list"` +} + +// String returns the string representation +func (s GetAuthorizationTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAuthorizationTokenOutput) GoString() string { + return s.String() +} + +type GetDownloadUrlForLayerInput struct { + _ struct{} `type:"structure"` + + // The digest of the image layer to download. + LayerDigest *string `locationName:"layerDigest" type:"string" required:"true"` + + // The AWS account ID associated with the registry that contains the image layer + // to download. If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository that is associated with the image layer to download. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDownloadUrlForLayerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDownloadUrlForLayerInput) GoString() string { + return s.String() +} + +type GetDownloadUrlForLayerOutput struct { + _ struct{} `type:"structure"` + + // The pre-signed Amazon S3 download URL for the requested layer. + DownloadUrl *string `locationName:"downloadUrl" type:"string"` + + // The digest of the image layer to download. + LayerDigest *string `locationName:"layerDigest" type:"string"` +} + +// String returns the string representation +func (s GetDownloadUrlForLayerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDownloadUrlForLayerOutput) GoString() string { + return s.String() +} + +type GetRepositoryPolicyInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID associated with the registry that contains the repository. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository whose policy you want to retrieve. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRepositoryPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRepositoryPolicyInput) GoString() string { + return s.String() +} + +type GetRepositoryPolicyOutput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy text associated with the repository. + PolicyText *string `locationName:"policyText" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s GetRepositoryPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRepositoryPolicyOutput) GoString() string { + return s.String() +} + +// Object representing an image. +type Image struct { + _ struct{} `type:"structure"` + + // An object containing the image tag and image digest associated with an image. + ImageId *ImageIdentifier `locationName:"imageId" type:"structure"` + + // The image manifest associated with the image. + ImageManifest *string `locationName:"imageManifest" type:"string"` + + // The AWS account ID associated with the registry containing the image. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository associated with the image. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s Image) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Image) GoString() string { + return s.String() +} + +type ImageFailure struct { + _ struct{} `type:"structure"` + + // The code associated with the failure. + FailureCode *string `locationName:"failureCode" type:"string" enum:"ImageFailureCode"` + + // The reason for the failure. + FailureReason *string `locationName:"failureReason" type:"string"` + + // The image ID associated with the failure. + ImageId *ImageIdentifier `locationName:"imageId" type:"structure"` +} + +// String returns the string representation +func (s ImageFailure) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageFailure) GoString() string { + return s.String() +} + +type ImageIdentifier struct { + _ struct{} `type:"structure"` + + // The sha256 digest of the image manifest. + ImageDigest *string `locationName:"imageDigest" type:"string"` + + // The tag used for the image. + ImageTag *string `locationName:"imageTag" type:"string"` +} + +// String returns the string representation +func (s ImageIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageIdentifier) GoString() string { + return s.String() +} + +type InitiateLayerUploadInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID associated with the registry that you intend to upload + // layers to. If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository that you intend to upload layers to. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s InitiateLayerUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InitiateLayerUploadInput) GoString() string { + return s.String() +} + +type InitiateLayerUploadOutput struct { + _ struct{} `type:"structure"` + + // The size, in bytes, that Amazon ECR expects future layer part uploads to + // be. + PartSize *int64 `locationName:"partSize" type:"long"` + + // The upload ID for the layer upload. This parameter is passed to further UploadLayerPart + // and CompleteLayerUpload operations. + UploadId *string `locationName:"uploadId" type:"string"` +} + +// String returns the string representation +func (s InitiateLayerUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InitiateLayerUploadOutput) GoString() string { + return s.String() +} + +type Layer struct { + _ struct{} `type:"structure"` + + // The availability status of the image layer. Valid values are AVAILABLE and + // UNAVAILABLE. + LayerAvailability *string `locationName:"layerAvailability" type:"string" enum:"LayerAvailability"` + + // The sha256 digest of the image layer. + LayerDigest *string `locationName:"layerDigest" type:"string"` + + // The size, in bytes, of the image layer. + LayerSize *int64 `locationName:"layerSize" type:"long"` +} + +// String returns the string representation +func (s Layer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Layer) GoString() string { + return s.String() +} + +type LayerFailure struct { + _ struct{} `type:"structure"` + + // The failure code associated with the failure. + FailureCode *string `locationName:"failureCode" type:"string" enum:"LayerFailureCode"` + + // The reason for the failure. + FailureReason *string `locationName:"failureReason" type:"string"` + + // The layer digest associated with the failure. + LayerDigest *string `locationName:"layerDigest" type:"string"` +} + +// String returns the string representation +func (s LayerFailure) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LayerFailure) GoString() string { + return s.String() +} + +type ListImagesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of image results returned by ListImages in paginated output. + // When this parameter is used, ListImages only returns maxResults results in + // a single page along with a nextToken response element. The remaining results + // of the initial request can be seen by sending another ListImages request + // with the returned nextToken value. This value can be between 1 and 100. If + // this parameter is not used, then ListImages returns up to 100 results and + // a nextToken value, if applicable. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The nextToken value returned from a previous paginated ListImages request + // where maxResults was used and the results exceeded the value of that parameter. + // Pagination continues from the end of the previous results that returned the + // nextToken value. This value is null when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The AWS account ID associated with the registry that contains the repository + // to list images in. If you do not specify a registry, the default registry + // is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository whose image IDs are to be listed. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListImagesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListImagesInput) GoString() string { + return s.String() +} + +type ListImagesOutput struct { + _ struct{} `type:"structure"` + + // The list of image IDs for the requested repository. + ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list"` + + // The nextToken value to include in a future ListImages request. When the results + // of a ListImages request exceed maxResults, this value can be used to retrieve + // the next page of results. This value is null when there are no more results + // to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListImagesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListImagesOutput) GoString() string { + return s.String() +} + +type PutImageInput struct { + _ struct{} `type:"structure"` + + // The image manifest corresponding to the image to be uploaded. + ImageManifest *string `locationName:"imageManifest" type:"string" required:"true"` + + // The AWS account ID associated with the registry that contains the repository + // in which to put the image. If you do not specify a registry, the default + // registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository in which to put the image. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutImageInput) GoString() string { + return s.String() +} + +type PutImageOutput struct { + _ struct{} `type:"structure"` + + // Details of the image uploaded. + Image *Image `locationName:"image" type:"structure"` +} + +// String returns the string representation +func (s PutImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutImageOutput) GoString() string { + return s.String() +} + +// Object representing a repository. +type Repository struct { + _ struct{} `type:"structure"` + + // The AWS account ID associated with the registry that contains the repository. + RegistryId *string `locationName:"registryId" type:"string"` + + // The Amazon Resource Name (ARN) that identifies the repository. The ARN contains + // the arn:aws:ecr namespace, followed by the region of the repository, the + // AWS account ID of the repository owner, the repository namespace, and then + // the repository name. For example, arn:aws:ecr:region:012345678910:repository/test. + RepositoryArn *string `locationName:"repositoryArn" type:"string"` + + // The name of the repository. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s Repository) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Repository) GoString() string { + return s.String() +} + +type SetRepositoryPolicyInput struct { + _ struct{} `type:"structure"` + + // If the policy you are attempting to set on a repository policy would prevent + // you from setting another policy in the future, you must force the SetRepositoryPolicy + // operation. This is intended to prevent accidental repository lock outs. + Force *bool `locationName:"force" type:"boolean"` + + // The JSON repository policy text to apply to the repository. + PolicyText *string `locationName:"policyText" type:"string" required:"true"` + + // The AWS account ID associated with the registry that contains the repository. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository to receive the policy. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s SetRepositoryPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetRepositoryPolicyInput) GoString() string { + return s.String() +} + +type SetRepositoryPolicyOutput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy text applied to the repository. + PolicyText *string `locationName:"policyText" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s SetRepositoryPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetRepositoryPolicyOutput) GoString() string { + return s.String() +} + +type UploadLayerPartInput struct { + _ struct{} `type:"structure"` + + // The base64-encoded layer part payload. + LayerPartBlob []byte `locationName:"layerPartBlob" type:"blob" required:"true"` + + // The integer value of the first byte of the layer part. + PartFirstByte *int64 `locationName:"partFirstByte" type:"long" required:"true"` + + // The integer value of the last byte of the layer part. + PartLastByte *int64 `locationName:"partLastByte" type:"long" required:"true"` + + // The AWS account ID associated with the registry that you are uploading layer + // parts to. If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository that you are uploading layer parts to. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` + + // The upload ID from a previous InitiateLayerUpload operation to associate + // with the layer part upload. + UploadId *string `locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadLayerPartInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadLayerPartInput) GoString() string { + return s.String() +} + +type UploadLayerPartOutput struct { + _ struct{} `type:"structure"` + + // The integer value of the last byte received in the request. + LastByteReceived *int64 `locationName:"lastByteReceived" type:"long"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` + + // The upload ID associated with the request. + UploadId *string `locationName:"uploadId" type:"string"` +} + +// String returns the string representation +func (s UploadLayerPartOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadLayerPartOutput) GoString() string { + return s.String() +} + +const ( + // @enum ImageFailureCode + ImageFailureCodeInvalidImageDigest = "InvalidImageDigest" + // @enum ImageFailureCode + ImageFailureCodeInvalidImageTag = "InvalidImageTag" + // @enum ImageFailureCode + ImageFailureCodeImageTagDoesNotMatchDigest = "ImageTagDoesNotMatchDigest" + // @enum ImageFailureCode + ImageFailureCodeImageNotFound = "ImageNotFound" + // @enum ImageFailureCode + ImageFailureCodeMissingDigestAndTag = "MissingDigestAndTag" +) + +const ( + // @enum LayerAvailability + LayerAvailabilityAvailable = "AVAILABLE" + // @enum LayerAvailability + LayerAvailabilityUnavailable = "UNAVAILABLE" +) + +const ( + // @enum LayerFailureCode + LayerFailureCodeInvalidLayerDigest = "InvalidLayerDigest" + // @enum LayerFailureCode + LayerFailureCodeMissingLayerDigest = "MissingLayerDigest" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecr/ecriface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecr/ecriface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecr/ecriface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecr/ecriface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,78 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package ecriface provides an interface for the Amazon EC2 Container Registry. +package ecriface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/ecr" +) + +// ECRAPI is the interface type for ecr.ECR. +type ECRAPI interface { + BatchCheckLayerAvailabilityRequest(*ecr.BatchCheckLayerAvailabilityInput) (*request.Request, *ecr.BatchCheckLayerAvailabilityOutput) + + BatchCheckLayerAvailability(*ecr.BatchCheckLayerAvailabilityInput) (*ecr.BatchCheckLayerAvailabilityOutput, error) + + BatchDeleteImageRequest(*ecr.BatchDeleteImageInput) (*request.Request, *ecr.BatchDeleteImageOutput) + + BatchDeleteImage(*ecr.BatchDeleteImageInput) (*ecr.BatchDeleteImageOutput, error) + + BatchGetImageRequest(*ecr.BatchGetImageInput) (*request.Request, *ecr.BatchGetImageOutput) + + BatchGetImage(*ecr.BatchGetImageInput) (*ecr.BatchGetImageOutput, error) + + CompleteLayerUploadRequest(*ecr.CompleteLayerUploadInput) (*request.Request, *ecr.CompleteLayerUploadOutput) + + CompleteLayerUpload(*ecr.CompleteLayerUploadInput) (*ecr.CompleteLayerUploadOutput, error) + + CreateRepositoryRequest(*ecr.CreateRepositoryInput) (*request.Request, *ecr.CreateRepositoryOutput) + + CreateRepository(*ecr.CreateRepositoryInput) (*ecr.CreateRepositoryOutput, error) + + DeleteRepositoryRequest(*ecr.DeleteRepositoryInput) (*request.Request, *ecr.DeleteRepositoryOutput) + + DeleteRepository(*ecr.DeleteRepositoryInput) (*ecr.DeleteRepositoryOutput, error) + + DeleteRepositoryPolicyRequest(*ecr.DeleteRepositoryPolicyInput) (*request.Request, *ecr.DeleteRepositoryPolicyOutput) + + DeleteRepositoryPolicy(*ecr.DeleteRepositoryPolicyInput) (*ecr.DeleteRepositoryPolicyOutput, error) + + DescribeRepositoriesRequest(*ecr.DescribeRepositoriesInput) (*request.Request, *ecr.DescribeRepositoriesOutput) + + DescribeRepositories(*ecr.DescribeRepositoriesInput) (*ecr.DescribeRepositoriesOutput, error) + + GetAuthorizationTokenRequest(*ecr.GetAuthorizationTokenInput) (*request.Request, *ecr.GetAuthorizationTokenOutput) + + GetAuthorizationToken(*ecr.GetAuthorizationTokenInput) (*ecr.GetAuthorizationTokenOutput, error) + + GetDownloadUrlForLayerRequest(*ecr.GetDownloadUrlForLayerInput) (*request.Request, *ecr.GetDownloadUrlForLayerOutput) + + GetDownloadUrlForLayer(*ecr.GetDownloadUrlForLayerInput) (*ecr.GetDownloadUrlForLayerOutput, error) + + GetRepositoryPolicyRequest(*ecr.GetRepositoryPolicyInput) (*request.Request, *ecr.GetRepositoryPolicyOutput) + + GetRepositoryPolicy(*ecr.GetRepositoryPolicyInput) (*ecr.GetRepositoryPolicyOutput, error) + + InitiateLayerUploadRequest(*ecr.InitiateLayerUploadInput) (*request.Request, *ecr.InitiateLayerUploadOutput) + + InitiateLayerUpload(*ecr.InitiateLayerUploadInput) (*ecr.InitiateLayerUploadOutput, error) + + ListImagesRequest(*ecr.ListImagesInput) (*request.Request, *ecr.ListImagesOutput) + + ListImages(*ecr.ListImagesInput) (*ecr.ListImagesOutput, error) + + PutImageRequest(*ecr.PutImageInput) (*request.Request, *ecr.PutImageOutput) + + PutImage(*ecr.PutImageInput) (*ecr.PutImageOutput, error) + + SetRepositoryPolicyRequest(*ecr.SetRepositoryPolicyInput) (*request.Request, *ecr.SetRepositoryPolicyOutput) + + SetRepositoryPolicy(*ecr.SetRepositoryPolicyInput) (*ecr.SetRepositoryPolicyOutput, error) + + UploadLayerPartRequest(*ecr.UploadLayerPartInput) (*request.Request, *ecr.UploadLayerPartOutput) + + UploadLayerPart(*ecr.UploadLayerPartInput) (*ecr.UploadLayerPartOutput, error) +} + +var _ ECRAPI = (*ecr.ECR)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecr/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecr/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecr/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecr/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,376 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ecr_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ecr" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleECR_BatchCheckLayerAvailability() { + svc := ecr.New(session.New()) + + params := &ecr.BatchCheckLayerAvailabilityInput{ + LayerDigests: []*string{ // Required + aws.String("BatchedOperationLayerDigest"), // Required + // More values... + }, + RepositoryName: aws.String("RepositoryName"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.BatchCheckLayerAvailability(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_BatchDeleteImage() { + svc := ecr.New(session.New()) + + params := &ecr.BatchDeleteImageInput{ + ImageIds: []*ecr.ImageIdentifier{ // Required + { // Required + ImageDigest: aws.String("ImageDigest"), + ImageTag: aws.String("ImageTag"), + }, + // More values... + }, + RepositoryName: aws.String("RepositoryName"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.BatchDeleteImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_BatchGetImage() { + svc := ecr.New(session.New()) + + params := &ecr.BatchGetImageInput{ + ImageIds: []*ecr.ImageIdentifier{ // Required + { // Required + ImageDigest: aws.String("ImageDigest"), + ImageTag: aws.String("ImageTag"), + }, + // More values... + }, + RepositoryName: aws.String("RepositoryName"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.BatchGetImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_CompleteLayerUpload() { + svc := ecr.New(session.New()) + + params := &ecr.CompleteLayerUploadInput{ + LayerDigests: []*string{ // Required + aws.String("LayerDigest"), // Required + // More values... + }, + RepositoryName: aws.String("RepositoryName"), // Required + UploadId: aws.String("UploadId"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.CompleteLayerUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_CreateRepository() { + svc := ecr.New(session.New()) + + params := &ecr.CreateRepositoryInput{ + RepositoryName: aws.String("RepositoryName"), // Required + } + resp, err := svc.CreateRepository(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_DeleteRepository() { + svc := ecr.New(session.New()) + + params := &ecr.DeleteRepositoryInput{ + RepositoryName: aws.String("RepositoryName"), // Required + Force: aws.Bool(true), + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.DeleteRepository(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_DeleteRepositoryPolicy() { + svc := ecr.New(session.New()) + + params := &ecr.DeleteRepositoryPolicyInput{ + RepositoryName: aws.String("RepositoryName"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.DeleteRepositoryPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_DescribeRepositories() { + svc := ecr.New(session.New()) + + params := &ecr.DescribeRepositoriesInput{ + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + RegistryId: aws.String("RegistryId"), + RepositoryNames: []*string{ + aws.String("RepositoryName"), // Required + // More values... + }, + } + resp, err := svc.DescribeRepositories(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_GetAuthorizationToken() { + svc := ecr.New(session.New()) + + params := &ecr.GetAuthorizationTokenInput{ + RegistryIds: []*string{ + aws.String("RegistryId"), // Required + // More values... + }, + } + resp, err := svc.GetAuthorizationToken(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_GetDownloadUrlForLayer() { + svc := ecr.New(session.New()) + + params := &ecr.GetDownloadUrlForLayerInput{ + LayerDigest: aws.String("LayerDigest"), // Required + RepositoryName: aws.String("RepositoryName"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.GetDownloadUrlForLayer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_GetRepositoryPolicy() { + svc := ecr.New(session.New()) + + params := &ecr.GetRepositoryPolicyInput{ + RepositoryName: aws.String("RepositoryName"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.GetRepositoryPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_InitiateLayerUpload() { + svc := ecr.New(session.New()) + + params := &ecr.InitiateLayerUploadInput{ + RepositoryName: aws.String("RepositoryName"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.InitiateLayerUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_ListImages() { + svc := ecr.New(session.New()) + + params := &ecr.ListImagesInput{ + RepositoryName: aws.String("RepositoryName"), // Required + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.ListImages(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_PutImage() { + svc := ecr.New(session.New()) + + params := &ecr.PutImageInput{ + ImageManifest: aws.String("ImageManifest"), // Required + RepositoryName: aws.String("RepositoryName"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.PutImage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_SetRepositoryPolicy() { + svc := ecr.New(session.New()) + + params := &ecr.SetRepositoryPolicyInput{ + PolicyText: aws.String("RepositoryPolicyText"), // Required + RepositoryName: aws.String("RepositoryName"), // Required + Force: aws.Bool(true), + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.SetRepositoryPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECR_UploadLayerPart() { + svc := ecr.New(session.New()) + + params := &ecr.UploadLayerPartInput{ + LayerPartBlob: []byte("PAYLOAD"), // Required + PartFirstByte: aws.Int64(1), // Required + PartLastByte: aws.Int64(1), // Required + RepositoryName: aws.String("RepositoryName"), // Required + UploadId: aws.String("UploadId"), // Required + RegistryId: aws.String("RegistryId"), + } + resp, err := svc.UploadLayerPart(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecr/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecr/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecr/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecr/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,93 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ecr + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Amazon EC2 Container Registry (Amazon ECR) is a managed AWS Docker registry +// service. Customers can use the familiar Docker CLI to push, pull, and manage +// images. Amazon ECR provides a secure, scalable, and reliable registry. Amazon +// ECR supports private Docker repositories with resource-based permissions +// using AWS IAM so that specific users or Amazon EC2 instances can access repositories +// and images. Developers can use the Docker CLI to author and manage images. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type ECR struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "ecr" + +// New creates a new instance of the ECR client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ECR client from just a session. +// svc := ecr.New(mySession) +// +// // Create a ECR client with additional configuration +// svc := ecr.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ECR { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *ECR { + svc := &ECR{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-09-21", + JSONVersion: "1.1", + TargetPrefix: "AmazonEC2ContainerRegistry_V20150921", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ECR operation and runs any +// custom request initialization. +func (c *ECR) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecs/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecs/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecs/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecs/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,3638 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package ecs provides a client for Amazon EC2 Container Service. +package ecs + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opCreateCluster = "CreateCluster" + +// CreateClusterRequest generates a request for the CreateCluster operation. +func (c *ECS) CreateClusterRequest(input *CreateClusterInput) (req *request.Request, output *CreateClusterOutput) { + op := &request.Operation{ + Name: opCreateCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateClusterOutput{} + req.Data = output + return +} + +// Creates a new Amazon ECS cluster. By default, your account receives a default +// cluster when you launch your first container instance. However, you can create +// your own cluster with a unique name with the CreateCluster action. +func (c *ECS) CreateCluster(input *CreateClusterInput) (*CreateClusterOutput, error) { + req, out := c.CreateClusterRequest(input) + err := req.Send() + return out, err +} + +const opCreateService = "CreateService" + +// CreateServiceRequest generates a request for the CreateService operation. +func (c *ECS) CreateServiceRequest(input *CreateServiceInput) (req *request.Request, output *CreateServiceOutput) { + op := &request.Operation{ + Name: opCreateService, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateServiceInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateServiceOutput{} + req.Data = output + return +} + +// Runs and maintains a desired number of tasks from a specified task definition. +// If the number of tasks running in a service drops below desiredCount, Amazon +// ECS spawns another instantiation of the task in the specified cluster. To +// update an existing service, see UpdateService. +// +// You can optionally specify a deployment configuration for your service. +// During a deployment (which is triggered by changing the task definition of +// a service with an UpdateService operation), the service scheduler uses the +// minimumHealthyPercent and maximumPercent parameters to determine the deployment +// strategy. +// +// If the minimumHealthyPercent is below 100%, the scheduler can ignore the +// desiredCount temporarily during a deployment. For example, if your service +// has a desiredCount of four tasks, a minimumHealthyPercent of 50% allows the +// scheduler to stop two existing tasks before starting two new tasks. Tasks +// for services that do not use a load balancer are considered healthy if they +// are in the RUNNING state; tasks for services that do use a load balancer +// are considered healthy if they are in the RUNNING state and the container +// instance it is hosted on is reported as healthy by the load balancer. The +// default value for minimumHealthyPercent is 50% in the console and 100% for +// the AWS CLI, the AWS SDKs, and the APIs. +// +// The maximumPercent parameter represents an upper limit on the number of +// running tasks during a deployment, which enables you to define the deployment +// batch size. For example, if your service has a desiredCount of four tasks, +// a maximumPercent value of 200% starts four new tasks before stopping the +// four older tasks (provided that the cluster resources required to do this +// are available). The default value for maximumPercent is 200%. +// +// When the service scheduler launches new tasks, it attempts to balance them +// across the Availability Zones in your cluster with the following logic: +// +// Determine which of the container instances in your cluster can support +// your service's task definition (for example, they have the required CPU, +// memory, ports, and container instance attributes). +// +// Sort the valid container instances by the fewest number of running tasks +// for this service in the same Availability Zone as the instance. For example, +// if zone A has one running service task and zones B and C each have zero, +// valid container instances in either zone B or C are considered optimal for +// placement. +// +// Place the new service task on a valid container instance in an optimal +// Availability Zone (based on the previous steps), favoring container instances +// with the fewest number of running tasks for this service. +func (c *ECS) CreateService(input *CreateServiceInput) (*CreateServiceOutput, error) { + req, out := c.CreateServiceRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCluster = "DeleteCluster" + +// DeleteClusterRequest generates a request for the DeleteCluster operation. +func (c *ECS) DeleteClusterRequest(input *DeleteClusterInput) (req *request.Request, output *DeleteClusterOutput) { + op := &request.Operation{ + Name: opDeleteCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteClusterOutput{} + req.Data = output + return +} + +// Deletes the specified cluster. You must deregister all container instances +// from this cluster before you may delete it. You can list the container instances +// in a cluster with ListContainerInstances and deregister them with DeregisterContainerInstance. +func (c *ECS) DeleteCluster(input *DeleteClusterInput) (*DeleteClusterOutput, error) { + req, out := c.DeleteClusterRequest(input) + err := req.Send() + return out, err +} + +const opDeleteService = "DeleteService" + +// DeleteServiceRequest generates a request for the DeleteService operation. +func (c *ECS) DeleteServiceRequest(input *DeleteServiceInput) (req *request.Request, output *DeleteServiceOutput) { + op := &request.Operation{ + Name: opDeleteService, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteServiceInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteServiceOutput{} + req.Data = output + return +} + +// Deletes a specified service within a cluster. You can delete a service if +// you have no running tasks in it and the desired task count is zero. If the +// service is actively maintaining tasks, you cannot delete it, and you must +// update the service to a desired task count of zero. For more information, +// see UpdateService. +// +// When you delete a service, if there are still running tasks that require +// cleanup, the service status moves from ACTIVE to DRAINING, and the service +// is no longer visible in the console or in ListServices API operations. After +// the tasks have stopped, then the service status moves from DRAINING to INACTIVE. +// Services in the DRAINING or INACTIVE status can still be viewed with DescribeServices +// API operations; however, in the future, INACTIVE services may be cleaned +// up and purged from Amazon ECS record keeping, and DescribeServices API operations +// on those services will return a ServiceNotFoundException error. +func (c *ECS) DeleteService(input *DeleteServiceInput) (*DeleteServiceOutput, error) { + req, out := c.DeleteServiceRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterContainerInstance = "DeregisterContainerInstance" + +// DeregisterContainerInstanceRequest generates a request for the DeregisterContainerInstance operation. +func (c *ECS) DeregisterContainerInstanceRequest(input *DeregisterContainerInstanceInput) (req *request.Request, output *DeregisterContainerInstanceOutput) { + op := &request.Operation{ + Name: opDeregisterContainerInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterContainerInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &DeregisterContainerInstanceOutput{} + req.Data = output + return +} + +// Deregisters an Amazon ECS container instance from the specified cluster. +// This instance is no longer available to run tasks. +// +// If you intend to use the container instance for some other purpose after +// deregistration, you should stop all of the tasks running on the container +// instance before deregistration to avoid any orphaned tasks from consuming +// resources. +// +// Deregistering a container instance removes the instance from a cluster, +// but it does not terminate the EC2 instance; if you are finished using the +// instance, be sure to terminate it in the Amazon EC2 console to stop billing. +// +// When you terminate a container instance, it is automatically deregistered +// from your cluster. +func (c *ECS) DeregisterContainerInstance(input *DeregisterContainerInstanceInput) (*DeregisterContainerInstanceOutput, error) { + req, out := c.DeregisterContainerInstanceRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterTaskDefinition = "DeregisterTaskDefinition" + +// DeregisterTaskDefinitionRequest generates a request for the DeregisterTaskDefinition operation. +func (c *ECS) DeregisterTaskDefinitionRequest(input *DeregisterTaskDefinitionInput) (req *request.Request, output *DeregisterTaskDefinitionOutput) { + op := &request.Operation{ + Name: opDeregisterTaskDefinition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterTaskDefinitionInput{} + } + + req = c.newRequest(op, input, output) + output = &DeregisterTaskDefinitionOutput{} + req.Data = output + return +} + +// Deregisters the specified task definition by family and revision. Upon deregistration, +// the task definition is marked as INACTIVE. Existing tasks and services that +// reference an INACTIVE task definition continue to run without disruption. +// Existing services that reference an INACTIVE task definition can still scale +// up or down by modifying the service's desired count. +// +// You cannot use an INACTIVE task definition to run new tasks or create new +// services, and you cannot update an existing service to reference an INACTIVE +// task definition (although there may be up to a 10 minute window following +// deregistration where these restrictions have not yet taken effect). +func (c *ECS) DeregisterTaskDefinition(input *DeregisterTaskDefinitionInput) (*DeregisterTaskDefinitionOutput, error) { + req, out := c.DeregisterTaskDefinitionRequest(input) + err := req.Send() + return out, err +} + +const opDescribeClusters = "DescribeClusters" + +// DescribeClustersRequest generates a request for the DescribeClusters operation. +func (c *ECS) DescribeClustersRequest(input *DescribeClustersInput) (req *request.Request, output *DescribeClustersOutput) { + op := &request.Operation{ + Name: opDescribeClusters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeClustersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClustersOutput{} + req.Data = output + return +} + +// Describes one or more of your clusters. +func (c *ECS) DescribeClusters(input *DescribeClustersInput) (*DescribeClustersOutput, error) { + req, out := c.DescribeClustersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeContainerInstances = "DescribeContainerInstances" + +// DescribeContainerInstancesRequest generates a request for the DescribeContainerInstances operation. +func (c *ECS) DescribeContainerInstancesRequest(input *DescribeContainerInstancesInput) (req *request.Request, output *DescribeContainerInstancesOutput) { + op := &request.Operation{ + Name: opDescribeContainerInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeContainerInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeContainerInstancesOutput{} + req.Data = output + return +} + +// Describes Amazon EC2 Container Service container instances. Returns metadata +// about registered and remaining resources on each container instance requested. +func (c *ECS) DescribeContainerInstances(input *DescribeContainerInstancesInput) (*DescribeContainerInstancesOutput, error) { + req, out := c.DescribeContainerInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeServices = "DescribeServices" + +// DescribeServicesRequest generates a request for the DescribeServices operation. +func (c *ECS) DescribeServicesRequest(input *DescribeServicesInput) (req *request.Request, output *DescribeServicesOutput) { + op := &request.Operation{ + Name: opDescribeServices, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeServicesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeServicesOutput{} + req.Data = output + return +} + +// Describes the specified services running in your cluster. +func (c *ECS) DescribeServices(input *DescribeServicesInput) (*DescribeServicesOutput, error) { + req, out := c.DescribeServicesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTaskDefinition = "DescribeTaskDefinition" + +// DescribeTaskDefinitionRequest generates a request for the DescribeTaskDefinition operation. +func (c *ECS) DescribeTaskDefinitionRequest(input *DescribeTaskDefinitionInput) (req *request.Request, output *DescribeTaskDefinitionOutput) { + op := &request.Operation{ + Name: opDescribeTaskDefinition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTaskDefinitionInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTaskDefinitionOutput{} + req.Data = output + return +} + +// Describes a task definition. You can specify a family and revision to find +// information about a specific task definition, or you can simply specify the +// family to find the latest ACTIVE revision in that family. +// +// You can only describe INACTIVE task definitions while an active task or +// service references them. +func (c *ECS) DescribeTaskDefinition(input *DescribeTaskDefinitionInput) (*DescribeTaskDefinitionOutput, error) { + req, out := c.DescribeTaskDefinitionRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTasks = "DescribeTasks" + +// DescribeTasksRequest generates a request for the DescribeTasks operation. +func (c *ECS) DescribeTasksRequest(input *DescribeTasksInput) (req *request.Request, output *DescribeTasksOutput) { + op := &request.Operation{ + Name: opDescribeTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTasksOutput{} + req.Data = output + return +} + +// Describes a specified task or tasks. +func (c *ECS) DescribeTasks(input *DescribeTasksInput) (*DescribeTasksOutput, error) { + req, out := c.DescribeTasksRequest(input) + err := req.Send() + return out, err +} + +const opDiscoverPollEndpoint = "DiscoverPollEndpoint" + +// DiscoverPollEndpointRequest generates a request for the DiscoverPollEndpoint operation. +func (c *ECS) DiscoverPollEndpointRequest(input *DiscoverPollEndpointInput) (req *request.Request, output *DiscoverPollEndpointOutput) { + op := &request.Operation{ + Name: opDiscoverPollEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DiscoverPollEndpointInput{} + } + + req = c.newRequest(op, input, output) + output = &DiscoverPollEndpointOutput{} + req.Data = output + return +} + +// This action is only used by the Amazon EC2 Container Service agent, and it +// is not intended for use outside of the agent. +// +// Returns an endpoint for the Amazon EC2 Container Service agent to poll for +// updates. +func (c *ECS) DiscoverPollEndpoint(input *DiscoverPollEndpointInput) (*DiscoverPollEndpointOutput, error) { + req, out := c.DiscoverPollEndpointRequest(input) + err := req.Send() + return out, err +} + +const opListClusters = "ListClusters" + +// ListClustersRequest generates a request for the ListClusters operation. +func (c *ECS) ListClustersRequest(input *ListClustersInput) (req *request.Request, output *ListClustersOutput) { + op := &request.Operation{ + Name: opListClusters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListClustersInput{} + } + + req = c.newRequest(op, input, output) + output = &ListClustersOutput{} + req.Data = output + return +} + +// Returns a list of existing clusters. +func (c *ECS) ListClusters(input *ListClustersInput) (*ListClustersOutput, error) { + req, out := c.ListClustersRequest(input) + err := req.Send() + return out, err +} + +func (c *ECS) ListClustersPages(input *ListClustersInput, fn func(p *ListClustersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListClustersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListClustersOutput), lastPage) + }) +} + +const opListContainerInstances = "ListContainerInstances" + +// ListContainerInstancesRequest generates a request for the ListContainerInstances operation. +func (c *ECS) ListContainerInstancesRequest(input *ListContainerInstancesInput) (req *request.Request, output *ListContainerInstancesOutput) { + op := &request.Operation{ + Name: opListContainerInstances, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListContainerInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListContainerInstancesOutput{} + req.Data = output + return +} + +// Returns a list of container instances in a specified cluster. +func (c *ECS) ListContainerInstances(input *ListContainerInstancesInput) (*ListContainerInstancesOutput, error) { + req, out := c.ListContainerInstancesRequest(input) + err := req.Send() + return out, err +} + +func (c *ECS) ListContainerInstancesPages(input *ListContainerInstancesInput, fn func(p *ListContainerInstancesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListContainerInstancesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListContainerInstancesOutput), lastPage) + }) +} + +const opListServices = "ListServices" + +// ListServicesRequest generates a request for the ListServices operation. +func (c *ECS) ListServicesRequest(input *ListServicesInput) (req *request.Request, output *ListServicesOutput) { + op := &request.Operation{ + Name: opListServices, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListServicesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListServicesOutput{} + req.Data = output + return +} + +// Lists the services that are running in a specified cluster. +func (c *ECS) ListServices(input *ListServicesInput) (*ListServicesOutput, error) { + req, out := c.ListServicesRequest(input) + err := req.Send() + return out, err +} + +func (c *ECS) ListServicesPages(input *ListServicesInput, fn func(p *ListServicesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListServicesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListServicesOutput), lastPage) + }) +} + +const opListTaskDefinitionFamilies = "ListTaskDefinitionFamilies" + +// ListTaskDefinitionFamiliesRequest generates a request for the ListTaskDefinitionFamilies operation. +func (c *ECS) ListTaskDefinitionFamiliesRequest(input *ListTaskDefinitionFamiliesInput) (req *request.Request, output *ListTaskDefinitionFamiliesOutput) { + op := &request.Operation{ + Name: opListTaskDefinitionFamilies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTaskDefinitionFamiliesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTaskDefinitionFamiliesOutput{} + req.Data = output + return +} + +// Returns a list of task definition families that are registered to your account +// (which may include task definition families that no longer have any ACTIVE +// task definitions). You can filter the results with the familyPrefix parameter. +func (c *ECS) ListTaskDefinitionFamilies(input *ListTaskDefinitionFamiliesInput) (*ListTaskDefinitionFamiliesOutput, error) { + req, out := c.ListTaskDefinitionFamiliesRequest(input) + err := req.Send() + return out, err +} + +func (c *ECS) ListTaskDefinitionFamiliesPages(input *ListTaskDefinitionFamiliesInput, fn func(p *ListTaskDefinitionFamiliesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListTaskDefinitionFamiliesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListTaskDefinitionFamiliesOutput), lastPage) + }) +} + +const opListTaskDefinitions = "ListTaskDefinitions" + +// ListTaskDefinitionsRequest generates a request for the ListTaskDefinitions operation. +func (c *ECS) ListTaskDefinitionsRequest(input *ListTaskDefinitionsInput) (req *request.Request, output *ListTaskDefinitionsOutput) { + op := &request.Operation{ + Name: opListTaskDefinitions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTaskDefinitionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTaskDefinitionsOutput{} + req.Data = output + return +} + +// Returns a list of task definitions that are registered to your account. You +// can filter the results by family name with the familyPrefix parameter or +// by status with the status parameter. +func (c *ECS) ListTaskDefinitions(input *ListTaskDefinitionsInput) (*ListTaskDefinitionsOutput, error) { + req, out := c.ListTaskDefinitionsRequest(input) + err := req.Send() + return out, err +} + +func (c *ECS) ListTaskDefinitionsPages(input *ListTaskDefinitionsInput, fn func(p *ListTaskDefinitionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListTaskDefinitionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListTaskDefinitionsOutput), lastPage) + }) +} + +const opListTasks = "ListTasks" + +// ListTasksRequest generates a request for the ListTasks operation. +func (c *ECS) ListTasksRequest(input *ListTasksInput) (req *request.Request, output *ListTasksOutput) { + op := &request.Operation{ + Name: opListTasks, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTasksOutput{} + req.Data = output + return +} + +// Returns a list of tasks for a specified cluster. You can filter the results +// by family name, by a particular container instance, or by the desired status +// of the task with the family, containerInstance, and desiredStatus parameters. +func (c *ECS) ListTasks(input *ListTasksInput) (*ListTasksOutput, error) { + req, out := c.ListTasksRequest(input) + err := req.Send() + return out, err +} + +func (c *ECS) ListTasksPages(input *ListTasksInput, fn func(p *ListTasksOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListTasksRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListTasksOutput), lastPage) + }) +} + +const opRegisterContainerInstance = "RegisterContainerInstance" + +// RegisterContainerInstanceRequest generates a request for the RegisterContainerInstance operation. +func (c *ECS) RegisterContainerInstanceRequest(input *RegisterContainerInstanceInput) (req *request.Request, output *RegisterContainerInstanceOutput) { + op := &request.Operation{ + Name: opRegisterContainerInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterContainerInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterContainerInstanceOutput{} + req.Data = output + return +} + +// This action is only used by the Amazon EC2 Container Service agent, and it +// is not intended for use outside of the agent. +// +// Registers an EC2 instance into the specified cluster. This instance becomes +// available to place containers on. +func (c *ECS) RegisterContainerInstance(input *RegisterContainerInstanceInput) (*RegisterContainerInstanceOutput, error) { + req, out := c.RegisterContainerInstanceRequest(input) + err := req.Send() + return out, err +} + +const opRegisterTaskDefinition = "RegisterTaskDefinition" + +// RegisterTaskDefinitionRequest generates a request for the RegisterTaskDefinition operation. +func (c *ECS) RegisterTaskDefinitionRequest(input *RegisterTaskDefinitionInput) (req *request.Request, output *RegisterTaskDefinitionOutput) { + op := &request.Operation{ + Name: opRegisterTaskDefinition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterTaskDefinitionInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterTaskDefinitionOutput{} + req.Data = output + return +} + +// Registers a new task definition from the supplied family and containerDefinitions. +// Optionally, you can add data volumes to your containers with the volumes +// parameter. For more information about task definition parameters and defaults, +// see Amazon ECS Task Definitions (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html) +// in the Amazon EC2 Container Service Developer Guide. +func (c *ECS) RegisterTaskDefinition(input *RegisterTaskDefinitionInput) (*RegisterTaskDefinitionOutput, error) { + req, out := c.RegisterTaskDefinitionRequest(input) + err := req.Send() + return out, err +} + +const opRunTask = "RunTask" + +// RunTaskRequest generates a request for the RunTask operation. +func (c *ECS) RunTaskRequest(input *RunTaskInput) (req *request.Request, output *RunTaskOutput) { + op := &request.Operation{ + Name: opRunTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RunTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &RunTaskOutput{} + req.Data = output + return +} + +// Start a task using random placement and the default Amazon ECS scheduler. +// To use your own scheduler or place a task on a specific container instance, +// use StartTask instead. +// +// The count parameter is limited to 10 tasks per call. +func (c *ECS) RunTask(input *RunTaskInput) (*RunTaskOutput, error) { + req, out := c.RunTaskRequest(input) + err := req.Send() + return out, err +} + +const opStartTask = "StartTask" + +// StartTaskRequest generates a request for the StartTask operation. +func (c *ECS) StartTaskRequest(input *StartTaskInput) (req *request.Request, output *StartTaskOutput) { + op := &request.Operation{ + Name: opStartTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &StartTaskOutput{} + req.Data = output + return +} + +// Starts a new task from the specified task definition on the specified container +// instance or instances. To use the default Amazon ECS scheduler to place your +// task, use RunTask instead. +// +// The list of container instances to start tasks on is limited to 10. +func (c *ECS) StartTask(input *StartTaskInput) (*StartTaskOutput, error) { + req, out := c.StartTaskRequest(input) + err := req.Send() + return out, err +} + +const opStopTask = "StopTask" + +// StopTaskRequest generates a request for the StopTask operation. +func (c *ECS) StopTaskRequest(input *StopTaskInput) (req *request.Request, output *StopTaskOutput) { + op := &request.Operation{ + Name: opStopTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &StopTaskOutput{} + req.Data = output + return +} + +// Stops a running task. +// +// When StopTask is called on a task, the equivalent of docker stop is issued +// to the containers running in the task. This results in a SIGTERM and a 30-second +// timeout, after which SIGKILL is sent and the containers are forcibly stopped. +// If the container handles the SIGTERM gracefully and exits within 30 seconds +// from receiving it, no SIGKILL is sent. +func (c *ECS) StopTask(input *StopTaskInput) (*StopTaskOutput, error) { + req, out := c.StopTaskRequest(input) + err := req.Send() + return out, err +} + +const opSubmitContainerStateChange = "SubmitContainerStateChange" + +// SubmitContainerStateChangeRequest generates a request for the SubmitContainerStateChange operation. +func (c *ECS) SubmitContainerStateChangeRequest(input *SubmitContainerStateChangeInput) (req *request.Request, output *SubmitContainerStateChangeOutput) { + op := &request.Operation{ + Name: opSubmitContainerStateChange, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SubmitContainerStateChangeInput{} + } + + req = c.newRequest(op, input, output) + output = &SubmitContainerStateChangeOutput{} + req.Data = output + return +} + +// This action is only used by the Amazon EC2 Container Service agent, and it +// is not intended for use outside of the agent. +// +// Sent to acknowledge that a container changed states. +func (c *ECS) SubmitContainerStateChange(input *SubmitContainerStateChangeInput) (*SubmitContainerStateChangeOutput, error) { + req, out := c.SubmitContainerStateChangeRequest(input) + err := req.Send() + return out, err +} + +const opSubmitTaskStateChange = "SubmitTaskStateChange" + +// SubmitTaskStateChangeRequest generates a request for the SubmitTaskStateChange operation. +func (c *ECS) SubmitTaskStateChangeRequest(input *SubmitTaskStateChangeInput) (req *request.Request, output *SubmitTaskStateChangeOutput) { + op := &request.Operation{ + Name: opSubmitTaskStateChange, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SubmitTaskStateChangeInput{} + } + + req = c.newRequest(op, input, output) + output = &SubmitTaskStateChangeOutput{} + req.Data = output + return +} + +// This action is only used by the Amazon EC2 Container Service agent, and it +// is not intended for use outside of the agent. +// +// Sent to acknowledge that a task changed states. +func (c *ECS) SubmitTaskStateChange(input *SubmitTaskStateChangeInput) (*SubmitTaskStateChangeOutput, error) { + req, out := c.SubmitTaskStateChangeRequest(input) + err := req.Send() + return out, err +} + +const opUpdateContainerAgent = "UpdateContainerAgent" + +// UpdateContainerAgentRequest generates a request for the UpdateContainerAgent operation. +func (c *ECS) UpdateContainerAgentRequest(input *UpdateContainerAgentInput) (req *request.Request, output *UpdateContainerAgentOutput) { + op := &request.Operation{ + Name: opUpdateContainerAgent, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateContainerAgentInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateContainerAgentOutput{} + req.Data = output + return +} + +// Updates the Amazon ECS container agent on a specified container instance. +// Updating the Amazon ECS container agent does not interrupt running tasks +// or services on the container instance. The process for updating the agent +// differs depending on whether your container instance was launched with the +// Amazon ECS-optimized AMI or another operating system. +// +// UpdateContainerAgent requires the Amazon ECS-optimized AMI or Amazon Linux +// with the ecs-init service installed and running. For help updating the Amazon +// ECS container agent on other operating systems, see Manually Updating the +// Amazon ECS Container Agent (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html#manually_update_agent) +// in the Amazon EC2 Container Service Developer Guide. +func (c *ECS) UpdateContainerAgent(input *UpdateContainerAgentInput) (*UpdateContainerAgentOutput, error) { + req, out := c.UpdateContainerAgentRequest(input) + err := req.Send() + return out, err +} + +const opUpdateService = "UpdateService" + +// UpdateServiceRequest generates a request for the UpdateService operation. +func (c *ECS) UpdateServiceRequest(input *UpdateServiceInput) (req *request.Request, output *UpdateServiceOutput) { + op := &request.Operation{ + Name: opUpdateService, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateServiceInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateServiceOutput{} + req.Data = output + return +} + +// Modifies the desired count, deployment configuration, or task definition +// used in a service. +// +// You can add to or subtract from the number of instantiations of a task definition +// in a service by specifying the cluster that the service is running in and +// a new desiredCount parameter. +// +// You can use UpdateService to modify your task definition and deploy a new +// version of your service. +// +// You can also update the deployment configuration of a service. When a deployment +// is triggered by updating the task definition of a service, the service scheduler +// uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, +// to determine the deployment strategy. +// +// If the minimumHealthyPercent is below 100%, the scheduler can ignore the +// desiredCount temporarily during a deployment. For example, if your service +// has a desiredCount of four tasks, a minimumHealthyPercent of 50% allows the +// scheduler to stop two existing tasks before starting two new tasks. Tasks +// for services that do not use a load balancer are considered healthy if they +// are in the RUNNING state; tasks for services that do use a load balancer +// are considered healthy if they are in the RUNNING state and the container +// instance it is hosted on is reported as healthy by the load balancer. +// +// The maximumPercent parameter represents an upper limit on the number of +// running tasks during a deployment, which enables you to define the deployment +// batch size. For example, if your service has a desiredCount of four tasks, +// a maximumPercent value of 200% starts four new tasks before stopping the +// four older tasks (provided that the cluster resources required to do this +// are available). +// +// When UpdateService stops a task during a deployment, the equivalent of docker +// stop is issued to the containers running in the task. This results in a SIGTERM +// and a 30-second timeout, after which SIGKILL is sent and the containers are +// forcibly stopped. If the container handles the SIGTERM gracefully and exits +// within 30 seconds from receiving it, no SIGKILL is sent. +// +// When the service scheduler launches new tasks, it attempts to balance them +// across the Availability Zones in your cluster with the following logic: +// +// Determine which of the container instances in your cluster can support +// your service's task definition (for example, they have the required CPU, +// memory, ports, and container instance attributes). +// +// Sort the valid container instances by the fewest number of running tasks +// for this service in the same Availability Zone as the instance. For example, +// if zone A has one running service task and zones B and C each have zero, +// valid container instances in either zone B or C are considered optimal for +// placement. +// +// Place the new service task on a valid container instance in an optimal +// Availability Zone (based on the previous steps), favoring container instances +// with the fewest number of running tasks for this service. +func (c *ECS) UpdateService(input *UpdateServiceInput) (*UpdateServiceOutput, error) { + req, out := c.UpdateServiceRequest(input) + err := req.Send() + return out, err +} + +// The attributes applicable to a container instance when it is registered. +type Attribute struct { + _ struct{} `type:"structure"` + + // The name of the container instance attribute. + Name *string `locationName:"name" type:"string" required:"true"` + + // The value of the container instance attribute (at this time, the value here + // is Null, but this could change in future revisions for expandability). + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s Attribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Attribute) GoString() string { + return s.String() +} + +// A regional grouping of one or more container instances on which you can run +// task requests. Each account receives a default cluster the first time you +// use the Amazon ECS service, but you may also create other clusters. Clusters +// may contain more than one instance type simultaneously. +type Cluster struct { + _ struct{} `type:"structure"` + + // The number of services that are running on the cluster in an ACTIVE state. + // You can view these services with ListServices. + ActiveServicesCount *int64 `locationName:"activeServicesCount" type:"integer"` + + // The Amazon Resource Name (ARN) that identifies the cluster. The ARN contains + // the arn:aws:ecs namespace, followed by the region of the cluster, the AWS + // account ID of the cluster owner, the cluster namespace, and then the cluster + // name. For example, arn:aws:ecs:region:012345678910:cluster/test. + ClusterArn *string `locationName:"clusterArn" type:"string"` + + // A user-generated string that you use to identify your cluster. + ClusterName *string `locationName:"clusterName" type:"string"` + + // The number of tasks in the cluster that are in the PENDING state. + PendingTasksCount *int64 `locationName:"pendingTasksCount" type:"integer"` + + // The number of container instances registered into the cluster. + RegisteredContainerInstancesCount *int64 `locationName:"registeredContainerInstancesCount" type:"integer"` + + // The number of tasks in the cluster that are in the RUNNING state. + RunningTasksCount *int64 `locationName:"runningTasksCount" type:"integer"` + + // The status of the cluster. The valid values are ACTIVE or INACTIVE. ACTIVE + // indicates that you can register container instances with the cluster and + // the associated instances can accept tasks. + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s Cluster) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Cluster) GoString() string { + return s.String() +} + +// A Docker container that is part of a task. +type Container struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the container. + ContainerArn *string `locationName:"containerArn" type:"string"` + + // The exit code returned from the container. + ExitCode *int64 `locationName:"exitCode" type:"integer"` + + // The last known status of the container. + LastStatus *string `locationName:"lastStatus" type:"string"` + + // The name of the container. + Name *string `locationName:"name" type:"string"` + + // The network bindings associated with the container. + NetworkBindings []*NetworkBinding `locationName:"networkBindings" type:"list"` + + // A short (255 max characters) human-readable string to provide additional + // detail about a running or stopped container. + Reason *string `locationName:"reason" type:"string"` + + // The Amazon Resource Name (ARN) of the task. + TaskArn *string `locationName:"taskArn" type:"string"` +} + +// String returns the string representation +func (s Container) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Container) GoString() string { + return s.String() +} + +// Container definitions are used in task definitions to describe the different +// containers that are launched as part of a task. +type ContainerDefinition struct { + _ struct{} `type:"structure"` + + // The command that is passed to the container. This parameter maps to Cmd in + // the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the COMMAND parameter to docker run (https://docs.docker.com/reference/commandline/run/). + // For more information, see https://docs.docker.com/reference/builder/#cmd + // (https://docs.docker.com/reference/builder/#cmd). + Command []*string `locationName:"command" type:"list"` + + // The number of cpu units reserved for the container. A container instance + // has 1,024 cpu units for every CPU core. This parameter specifies the minimum + // amount of CPU to reserve for a container, and containers share unallocated + // CPU units with other containers on the instance with the same ratio as their + // allocated amount. This parameter maps to CpuShares in the Create a container + // (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --cpu-shares option to docker run (https://docs.docker.com/reference/commandline/run/). + // + // You can determine the number of CPU units that are available per EC2 instance + // type by multiplying the vCPUs listed for that instance type on the Amazon + // EC2 Instances (http://aws.amazon.com/ec2/instance-types/) detail page by + // 1,024. + // + // For example, if you run a single-container task on a single-core instance + // type with 512 CPU units specified for that container, and that is the only + // task running on the container instance, that container could use the full + // 1,024 CPU unit share at any given time. However, if you launched another + // copy of the same task on that container instance, each task would be guaranteed + // a minimum of 512 CPU units when needed, and each container could float to + // higher CPU usage if the other container was not using it, but if both tasks + // were 100% active all of the time, they would be limited to 512 CPU units. + // + // The Docker daemon on the container instance uses the CPU value to calculate + // the relative CPU share ratios for running containers. For more information, + // see CPU share constraint (https://docs.docker.com/reference/run/#cpu-share-constraint) + // in the Docker documentation. The minimum valid CPU share value that the Linux + // kernel allows is 2; however, the CPU parameter is not required, and you can + // use CPU values below 2 in your container definitions. For CPU values below + // 2 (including null), the behavior varies based on your Amazon ECS container + // agent version: + // + // Agent versions less than or equal to 1.1.0: Null and zero CPU values are + // passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU + // values of 1 are passed to Docker as 1, which the Linux kernel converts to + // 2 CPU shares. Agent versions greater than or equal to 1.2.0: Null, zero, + // and CPU values of 1 are passed to Docker as 2. + Cpu *int64 `locationName:"cpu" type:"integer"` + + // When this parameter is true, networking is disabled within the container. + // This parameter maps to NetworkDisabled in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/). + DisableNetworking *bool `locationName:"disableNetworking" type:"boolean"` + + // A list of DNS search domains that are presented to the container. This parameter + // maps to DnsSearch in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --dns-search option to docker run (https://docs.docker.com/reference/commandline/run/). + DnsSearchDomains []*string `locationName:"dnsSearchDomains" type:"list"` + + // A list of DNS servers that are presented to the container. This parameter + // maps to Dns in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --dns option to docker run (https://docs.docker.com/reference/commandline/run/). + DnsServers []*string `locationName:"dnsServers" type:"list"` + + // A key/value map of labels to add to the container. This parameter maps to + // Labels in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --label option to docker run (https://docs.docker.com/reference/commandline/run/). + // This parameter requires version 1.18 of the Docker Remote API or greater + // on your container instance. To check the Docker Remote API version on your + // container instance, log into your container instance and run the following + // command: sudo docker version | grep "Server API version" + DockerLabels map[string]*string `locationName:"dockerLabels" type:"map"` + + // A list of strings to provide custom labels for SELinux and AppArmor multi-level + // security systems. This parameter maps to SecurityOpt in the Create a container + // (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --security-opt option to docker run (https://docs.docker.com/reference/commandline/run/). + // + // The Amazon ECS container agent running on a container instance must register + // with the ECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true environment + // variables before containers placed on that instance can use these security + // options. For more information, see Amazon ECS Container Agent Configuration + // (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/developerguide/ecs-agent-config.html) + // in the Amazon EC2 Container Service Developer Guide. + DockerSecurityOptions []*string `locationName:"dockerSecurityOptions" type:"list"` + + // Early versions of the Amazon ECS container agent do not properly handle entryPoint + // parameters. If you have problems using entryPoint, update your container + // agent or enter your commands and arguments as command array items instead. + // + // The entry point that is passed to the container. This parameter maps to + // Entrypoint in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --entrypoint option to docker run (https://docs.docker.com/reference/commandline/run/). + // For more information, see https://docs.docker.com/reference/builder/#entrypoint + // (https://docs.docker.com/reference/builder/#entrypoint). + EntryPoint []*string `locationName:"entryPoint" type:"list"` + + // The environment variables to pass to a container. This parameter maps to + // Env in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --env option to docker run (https://docs.docker.com/reference/commandline/run/). + // + // We do not recommend using plain text environment variables for sensitive + // information, such as credential data. + Environment []*KeyValuePair `locationName:"environment" type:"list"` + + // If the essential parameter of a container is marked as true, the failure + // of that container stops the task. If the essential parameter of a container + // is marked as false, then its failure does not affect the rest of the containers + // in a task. If this parameter is omitted, a container is assumed to be essential. + // + // All tasks must have at least one essential container. + Essential *bool `locationName:"essential" type:"boolean"` + + // A list of hostnames and IP address mappings to append to the /etc/hosts file + // on the container. This parameter maps to ExtraHosts in the Create a container + // (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --add-host option to docker run (https://docs.docker.com/reference/commandline/run/). + ExtraHosts []*HostEntry `locationName:"extraHosts" type:"list"` + + // The hostname to use for your container. This parameter maps to Hostname in + // the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --hostname option to docker run (https://docs.docker.com/reference/commandline/run/). + Hostname *string `locationName:"hostname" type:"string"` + + // The image used to start a container. This string is passed directly to the + // Docker daemon. Images in the Docker Hub registry are available by default. + // Other repositories are specified with repository-url/image:tag. Up to 255 + // letters (uppercase and lowercase), numbers, hyphens, underscores, colons, + // periods, forward slashes, and number signs are allowed. This parameter maps + // to Image in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the IMAGE parameter of docker run (https://docs.docker.com/reference/commandline/run/). + // + // Images in official repositories on Docker Hub use a single name (for example, + // ubuntu or mongo). Images in other repositories on Docker Hub are qualified + // with an organization name (for example, amazon/amazon-ecs-agent). Images + // in other online repositories are qualified further by a domain name (for + // example, quay.io/assemblyline/ubuntu). + Image *string `locationName:"image" type:"string"` + + // The link parameter allows containers to communicate with each other without + // the need for port mappings, using the name parameter and optionally, an alias + // for the link. This construct is analogous to name:alias in Docker links. + // Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores + // are allowed for each name and alias. For more information on linking Docker + // containers, see https://docs.docker.com/userguide/dockerlinks/ (https://docs.docker.com/userguide/dockerlinks/). + // This parameter maps to Links in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --link option to docker run (https://docs.docker.com/reference/commandline/run/). + // + // Containers that are collocated on a single container instance may be able + // to communicate with each other without requiring links or host port mappings. + // Network isolation is achieved on the container instance using security groups + // and VPC settings. + Links []*string `locationName:"links" type:"list"` + + // The log configuration specification for the container. This parameter maps + // to LogConfig in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --log-driver option to docker run (https://docs.docker.com/reference/commandline/run/). + // Valid log drivers are displayed in the LogConfiguration data type. This parameter + // requires version 1.18 of the Docker Remote API or greater on your container + // instance. To check the Docker Remote API version on your container instance, + // log into your container instance and run the following command: sudo docker + // version | grep "Server API version" + // + // The Amazon ECS container agent running on a container instance must register + // the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS + // environment variable before containers placed on that instance can use these + // log configuration options. For more information, see Amazon ECS Container + // Agent Configuration (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/developerguide/ecs-agent-config.html) + // in the Amazon EC2 Container Service Developer Guide. + LogConfiguration *LogConfiguration `locationName:"logConfiguration" type:"structure"` + + // The number of MiB of memory to reserve for the container. You must specify + // a non-zero integer for this parameter; the Docker daemon reserves a minimum + // of 4 MiB of memory for a container, so you should not specify fewer than + // 4 MiB of memory for your containers. If your container attempts to exceed + // the memory allocated here, the container is killed. This parameter maps to + // Memory in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --memory option to docker run (https://docs.docker.com/reference/commandline/run/). + Memory *int64 `locationName:"memory" type:"integer"` + + // The mount points for data volumes in your container. This parameter maps + // to Volumes in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --volume option to docker run (https://docs.docker.com/reference/commandline/run/). + MountPoints []*MountPoint `locationName:"mountPoints" type:"list"` + + // The name of a container. If you are linking multiple containers together + // in a task definition, the name of one container can be entered in the links + // of another container to connect the containers. Up to 255 letters (uppercase + // and lowercase), numbers, hyphens, and underscores are allowed. This parameter + // maps to name in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --name option to docker run (https://docs.docker.com/reference/commandline/run/). + Name *string `locationName:"name" type:"string"` + + // The list of port mappings for the container. Port mappings allow containers + // to access ports on the host container instance to send or receive traffic. + // This parameter maps to PortBindings in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --publish option to docker run (https://docs.docker.com/reference/commandline/run/). + // + // After a task reaches the RUNNING status, manual and automatic host and + // container port assignments are visible in the Network Bindings section of + // a container description of a selected task in the Amazon ECS console, or + // the networkBindings section DescribeTasks responses. + PortMappings []*PortMapping `locationName:"portMappings" type:"list"` + + // When this parameter is true, the container is given elevated privileges on + // the host container instance (similar to the root user). This parameter maps + // to Privileged in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --privileged option to docker run (https://docs.docker.com/reference/commandline/run/). + Privileged *bool `locationName:"privileged" type:"boolean"` + + // When this parameter is true, the container is given read-only access to its + // root file system. This parameter maps to ReadonlyRootfs in the Create a container + // (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --read-only option to docker run. + ReadonlyRootFilesystem *bool `locationName:"readonlyRootFilesystem" type:"boolean"` + + // A list of ulimits to set in the container. This parameter maps to Ulimits + // in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --ulimit option to docker run (https://docs.docker.com/reference/commandline/run/). + // Valid naming values are displayed in the Ulimit data type. This parameter + // requires version 1.18 of the Docker Remote API or greater on your container + // instance. To check the Docker Remote API version on your container instance, + // log into your container instance and run the following command: sudo docker + // version | grep "Server API version" + Ulimits []*Ulimit `locationName:"ulimits" type:"list"` + + // The user name to use inside the container. This parameter maps to User in + // the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --user option to docker run (https://docs.docker.com/reference/commandline/run/). + User *string `locationName:"user" type:"string"` + + // Data volumes to mount from another container. This parameter maps to VolumesFrom + // in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --volumes-from option to docker run (https://docs.docker.com/reference/commandline/run/). + VolumesFrom []*VolumeFrom `locationName:"volumesFrom" type:"list"` + + // The working directory in which to run commands inside the container. This + // parameter maps to WorkingDir in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) + // and the --workdir option to docker run (https://docs.docker.com/reference/commandline/run/). + WorkingDirectory *string `locationName:"workingDirectory" type:"string"` +} + +// String returns the string representation +func (s ContainerDefinition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContainerDefinition) GoString() string { + return s.String() +} + +// An EC2 instance that is running the Amazon ECS agent and has been registered +// with a cluster. +type ContainerInstance struct { + _ struct{} `type:"structure"` + + // This parameter returns true if the agent is actually connected to Amazon + // ECS. Registered instances with an agent that may be unhealthy or stopped + // return false, and instances without a connected agent cannot accept placement + // requests. + AgentConnected *bool `locationName:"agentConnected" type:"boolean"` + + // The status of the most recent agent update. If an update has never been requested, + // this value is NULL. + AgentUpdateStatus *string `locationName:"agentUpdateStatus" type:"string" enum:"AgentUpdateStatus"` + + // The attributes set for the container instance by the Amazon ECS container + // agent at instance registration. + Attributes []*Attribute `locationName:"attributes" type:"list"` + + // The Amazon Resource Name (ARN) of the container instance. The ARN contains + // the arn:aws:ecs namespace, followed by the region of the container instance, + // the AWS account ID of the container instance owner, the container-instance + // namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID. + ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` + + // The EC2 instance ID of the container instance. + Ec2InstanceId *string `locationName:"ec2InstanceId" type:"string"` + + // The number of tasks on the container instance that are in the PENDING status. + PendingTasksCount *int64 `locationName:"pendingTasksCount" type:"integer"` + + // The registered resources on the container instance that are in use by current + // tasks. + RegisteredResources []*Resource `locationName:"registeredResources" type:"list"` + + // The remaining resources of the container instance that are available for + // new tasks. + RemainingResources []*Resource `locationName:"remainingResources" type:"list"` + + // The number of tasks on the container instance that are in the RUNNING status. + RunningTasksCount *int64 `locationName:"runningTasksCount" type:"integer"` + + // The status of the container instance. The valid values are ACTIVE or INACTIVE. + // ACTIVE indicates that the container instance can accept tasks. + Status *string `locationName:"status" type:"string"` + + // The version information for the Amazon ECS container agent and Docker daemon + // running on the container instance. + VersionInfo *VersionInfo `locationName:"versionInfo" type:"structure"` +} + +// String returns the string representation +func (s ContainerInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContainerInstance) GoString() string { + return s.String() +} + +// The overrides that should be sent to a container. +type ContainerOverride struct { + _ struct{} `type:"structure"` + + // The command to send to the container that overrides the default command from + // the Docker image or the task definition. + Command []*string `locationName:"command" type:"list"` + + // The environment variables to send to the container. You can add new environment + // variables, which are added to the container at launch, or you can override + // the existing environment variables from the Docker image or the task definition. + Environment []*KeyValuePair `locationName:"environment" type:"list"` + + // The name of the container that receives the override. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s ContainerOverride) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContainerOverride) GoString() string { + return s.String() +} + +type CreateClusterInput struct { + _ struct{} `type:"structure"` + + // The name of your cluster. If you do not specify a name for your cluster, + // you create a cluster named default. Up to 255 letters (uppercase and lowercase), + // numbers, hyphens, and underscores are allowed. + ClusterName *string `locationName:"clusterName" type:"string"` +} + +// String returns the string representation +func (s CreateClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterInput) GoString() string { + return s.String() +} + +type CreateClusterOutput struct { + _ struct{} `type:"structure"` + + // The full description of your new cluster. + Cluster *Cluster `locationName:"cluster" type:"structure"` +} + +// String returns the string representation +func (s CreateClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterOutput) GoString() string { + return s.String() +} + +type CreateServiceInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. Up to 32 ASCII characters are allowed. + ClientToken *string `locationName:"clientToken" type:"string"` + + // The short name or full Amazon Resource Name (ARN) of the cluster on which + // to run your service. If you do not specify a cluster, the default cluster + // is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // Optional deployment parameters that control how many tasks run during the + // deployment and the ordering of stopping and starting tasks. + DeploymentConfiguration *DeploymentConfiguration `locationName:"deploymentConfiguration" type:"structure"` + + // The number of instantiations of the specified task definition to place and + // keep running on your cluster. + DesiredCount *int64 `locationName:"desiredCount" type:"integer" required:"true"` + + // A list of load balancer objects, containing the load balancer name, the container + // name (as it appears in a container definition), and the container port to + // access from the load balancer. + LoadBalancers []*LoadBalancer `locationName:"loadBalancers" type:"list"` + + // The name or full Amazon Resource Name (ARN) of the IAM role that allows your + // Amazon ECS container agent to make calls to your load balancer on your behalf. + // This parameter is only required if you are using a load balancer with your + // service. + Role *string `locationName:"role" type:"string"` + + // The name of your service. Up to 255 letters (uppercase and lowercase), numbers, + // hyphens, and underscores are allowed. Service names must be unique within + // a cluster, but you can have similarly named services in multiple clusters + // within a region or across multiple regions. + ServiceName *string `locationName:"serviceName" type:"string" required:"true"` + + // The family and revision (family:revision) or full Amazon Resource Name (ARN) + // of the task definition to run in your service. If a revision is not specified, + // the latest ACTIVE revision is used. + TaskDefinition *string `locationName:"taskDefinition" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateServiceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateServiceInput) GoString() string { + return s.String() +} + +type CreateServiceOutput struct { + _ struct{} `type:"structure"` + + // The full description of your service following the create call. + Service *Service `locationName:"service" type:"structure"` +} + +// String returns the string representation +func (s CreateServiceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateServiceOutput) GoString() string { + return s.String() +} + +type DeleteClusterInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster to delete. + Cluster *string `locationName:"cluster" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterInput) GoString() string { + return s.String() +} + +type DeleteClusterOutput struct { + _ struct{} `type:"structure"` + + // The full description of the deleted cluster. + Cluster *Cluster `locationName:"cluster" type:"structure"` +} + +// String returns the string representation +func (s DeleteClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterOutput) GoString() string { + return s.String() +} + +type DeleteServiceInput struct { + _ struct{} `type:"structure"` + + // The name of the cluster that hosts the service to delete. If you do not specify + // a cluster, the default cluster is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // The name of the service to delete. + Service *string `locationName:"service" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteServiceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteServiceInput) GoString() string { + return s.String() +} + +type DeleteServiceOutput struct { + _ struct{} `type:"structure"` + + // The full description of the deleted service. + Service *Service `locationName:"service" type:"structure"` +} + +// String returns the string representation +func (s DeleteServiceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteServiceOutput) GoString() string { + return s.String() +} + +// The details of an Amazon ECS service deployment. +type Deployment struct { + _ struct{} `type:"structure"` + + // The Unix time in seconds and milliseconds when the service was created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix"` + + // The most recent desired count of tasks that was specified for the service + // to deploy or maintain. + DesiredCount *int64 `locationName:"desiredCount" type:"integer"` + + // The ID of the deployment. + Id *string `locationName:"id" type:"string"` + + // The number of tasks in the deployment that are in the PENDING status. + PendingCount *int64 `locationName:"pendingCount" type:"integer"` + + // The number of tasks in the deployment that are in the RUNNING status. + RunningCount *int64 `locationName:"runningCount" type:"integer"` + + // The status of the deployment. Valid values are PRIMARY (for the most recent + // deployment), ACTIVE (for previous deployments that still have tasks running, + // but are being replaced with the PRIMARY deployment), and INACTIVE (for deployments + // that have been completely replaced). + Status *string `locationName:"status" type:"string"` + + // The most recent task definition that was specified for the service to use. + TaskDefinition *string `locationName:"taskDefinition" type:"string"` + + // The Unix time in seconds and milliseconds when the service was last updated. + UpdatedAt *time.Time `locationName:"updatedAt" type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s Deployment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Deployment) GoString() string { + return s.String() +} + +// Optional deployment parameters that control how many tasks run during the +// deployment and the ordering of stopping and starting tasks. +type DeploymentConfiguration struct { + _ struct{} `type:"structure"` + + // The upper limit (as a percentage of the service's desiredCount) of the number + // of running tasks that can be running in a service during a deployment. The + // maximum number of tasks during a deployment is the desiredCount multiplied + // by the maximumPercent/100, rounded down to the nearest integer value. + MaximumPercent *int64 `locationName:"maximumPercent" type:"integer"` + + // The lower limit (as a percentage of the service's desiredCount) of the number + // of running tasks that must remain running and healthy in a service during + // a deployment. The minimum healthy tasks during a deployment is the desiredCount + // multiplied by the minimumHealthyPercent/100, rounded up to the nearest integer + // value. + MinimumHealthyPercent *int64 `locationName:"minimumHealthyPercent" type:"integer"` +} + +// String returns the string representation +func (s DeploymentConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeploymentConfiguration) GoString() string { + return s.String() +} + +type DeregisterContainerInstanceInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the container instance to deregister. If you do not specify a cluster, the + // default cluster is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // The container instance ID or full Amazon Resource Name (ARN) of the container + // instance to deregister. The ARN contains the arn:aws:ecs namespace, followed + // by the region of the container instance, the AWS account ID of the container + // instance owner, the container-instance namespace, and then the container + // instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID. + ContainerInstance *string `locationName:"containerInstance" type:"string" required:"true"` + + // Forces the deregistration of the container instance. If you have tasks running + // on the container instance when you deregister it with the force option, these + // tasks remain running and they continue to pass Elastic Load Balancing load + // balancer health checks until you terminate the instance or the tasks stop + // through some other means, but they are orphaned (no longer monitored or accounted + // for by Amazon ECS). If an orphaned task on your container instance is part + // of an Amazon ECS service, then the service scheduler starts another copy + // of that task, on a different container instance if possible. + Force *bool `locationName:"force" type:"boolean"` +} + +// String returns the string representation +func (s DeregisterContainerInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterContainerInstanceInput) GoString() string { + return s.String() +} + +type DeregisterContainerInstanceOutput struct { + _ struct{} `type:"structure"` + + // An EC2 instance that is running the Amazon ECS agent and has been registered + // with a cluster. + ContainerInstance *ContainerInstance `locationName:"containerInstance" type:"structure"` +} + +// String returns the string representation +func (s DeregisterContainerInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterContainerInstanceOutput) GoString() string { + return s.String() +} + +type DeregisterTaskDefinitionInput struct { + _ struct{} `type:"structure"` + + // The family and revision (family:revision) or full Amazon Resource Name (ARN) + // of the task definition to deregister. You must specify a revision. + TaskDefinition *string `locationName:"taskDefinition" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterTaskDefinitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterTaskDefinitionInput) GoString() string { + return s.String() +} + +type DeregisterTaskDefinitionOutput struct { + _ struct{} `type:"structure"` + + // The full description of the deregistered task. + TaskDefinition *TaskDefinition `locationName:"taskDefinition" type:"structure"` +} + +// String returns the string representation +func (s DeregisterTaskDefinitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterTaskDefinitionOutput) GoString() string { + return s.String() +} + +type DescribeClustersInput struct { + _ struct{} `type:"structure"` + + // A space-separated list of cluster names or full cluster Amazon Resource Name + // (ARN) entries. If you do not specify a cluster, the default cluster is assumed. + Clusters []*string `locationName:"clusters" type:"list"` +} + +// String returns the string representation +func (s DescribeClustersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClustersInput) GoString() string { + return s.String() +} + +type DescribeClustersOutput struct { + _ struct{} `type:"structure"` + + // The list of clusters. + Clusters []*Cluster `locationName:"clusters" type:"list"` + + // Any failures associated with the call. + Failures []*Failure `locationName:"failures" type:"list"` +} + +// String returns the string representation +func (s DescribeClustersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClustersOutput) GoString() string { + return s.String() +} + +type DescribeContainerInstancesInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the container instances to describe. If you do not specify a cluster, the + // default cluster is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // A space-separated list of container instance IDs or full Amazon Resource + // Name (ARN) entries. + ContainerInstances []*string `locationName:"containerInstances" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeContainerInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeContainerInstancesInput) GoString() string { + return s.String() +} + +type DescribeContainerInstancesOutput struct { + _ struct{} `type:"structure"` + + // The list of container instances. + ContainerInstances []*ContainerInstance `locationName:"containerInstances" type:"list"` + + // Any failures associated with the call. + Failures []*Failure `locationName:"failures" type:"list"` +} + +// String returns the string representation +func (s DescribeContainerInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeContainerInstancesOutput) GoString() string { + return s.String() +} + +type DescribeServicesInput struct { + _ struct{} `type:"structure"` + + // The name of the cluster that hosts the service to describe. If you do not + // specify a cluster, the default cluster is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // A list of services to describe. + Services []*string `locationName:"services" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeServicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeServicesInput) GoString() string { + return s.String() +} + +type DescribeServicesOutput struct { + _ struct{} `type:"structure"` + + // Any failures associated with the call. + Failures []*Failure `locationName:"failures" type:"list"` + + // The list of services described. + Services []*Service `locationName:"services" type:"list"` +} + +// String returns the string representation +func (s DescribeServicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeServicesOutput) GoString() string { + return s.String() +} + +type DescribeTaskDefinitionInput struct { + _ struct{} `type:"structure"` + + // The family for the latest ACTIVE revision, family and revision (family:revision) + // for a specific revision in the family, or full Amazon Resource Name (ARN) + // of the task definition to describe. + TaskDefinition *string `locationName:"taskDefinition" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeTaskDefinitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTaskDefinitionInput) GoString() string { + return s.String() +} + +type DescribeTaskDefinitionOutput struct { + _ struct{} `type:"structure"` + + // The full task definition description. + TaskDefinition *TaskDefinition `locationName:"taskDefinition" type:"structure"` +} + +// String returns the string representation +func (s DescribeTaskDefinitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTaskDefinitionOutput) GoString() string { + return s.String() +} + +type DescribeTasksInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the task to describe. If you do not specify a cluster, the default cluster + // is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // A space-separated list of task IDs or full Amazon Resource Name (ARN) entries. + Tasks []*string `locationName:"tasks" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTasksInput) GoString() string { + return s.String() +} + +type DescribeTasksOutput struct { + _ struct{} `type:"structure"` + + // Any failures associated with the call. + Failures []*Failure `locationName:"failures" type:"list"` + + // The list of tasks. + Tasks []*Task `locationName:"tasks" type:"list"` +} + +// String returns the string representation +func (s DescribeTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTasksOutput) GoString() string { + return s.String() +} + +type DiscoverPollEndpointInput struct { + _ struct{} `type:"structure"` + + // The cluster that the container instance belongs to. + Cluster *string `locationName:"cluster" type:"string"` + + // The container instance ID or full Amazon Resource Name (ARN) of the container + // instance. The ARN contains the arn:aws:ecs namespace, followed by the region + // of the container instance, the AWS account ID of the container instance owner, + // the container-instance namespace, and then the container instance ID. For + // example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID. + ContainerInstance *string `locationName:"containerInstance" type:"string"` +} + +// String returns the string representation +func (s DiscoverPollEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiscoverPollEndpointInput) GoString() string { + return s.String() +} + +type DiscoverPollEndpointOutput struct { + _ struct{} `type:"structure"` + + // The endpoint for the Amazon ECS agent to poll. + Endpoint *string `locationName:"endpoint" type:"string"` + + // The telemetry endpoint for the Amazon ECS agent. + TelemetryEndpoint *string `locationName:"telemetryEndpoint" type:"string"` +} + +// String returns the string representation +func (s DiscoverPollEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiscoverPollEndpointOutput) GoString() string { + return s.String() +} + +// A failed resource. +type Failure struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the failed resource. + Arn *string `locationName:"arn" type:"string"` + + // The reason for the failure. + Reason *string `locationName:"reason" type:"string"` +} + +// String returns the string representation +func (s Failure) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Failure) GoString() string { + return s.String() +} + +// Hostnames and IP address entries that are added to the /etc/hosts file of +// a container via the extraHosts parameter of its ContainerDefinition. +type HostEntry struct { + _ struct{} `type:"structure"` + + // The hostname to use in the /etc/hosts entry. + Hostname *string `locationName:"hostname" type:"string" required:"true"` + + // The IP address to use in the /etc/hosts entry. + IpAddress *string `locationName:"ipAddress" type:"string" required:"true"` +} + +// String returns the string representation +func (s HostEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostEntry) GoString() string { + return s.String() +} + +// Details on a container instance host volume. +type HostVolumeProperties struct { + _ struct{} `type:"structure"` + + // The path on the host container instance that is presented to the container. + // If this parameter is empty, then the Docker daemon has assigned a host path + // for you. If the host parameter contains a sourcePath file location, then + // the data volume persists at the specified location on the host container + // instance until you delete it manually. If the sourcePath value does not exist + // on the host container instance, the Docker daemon creates it. If the location + // does exist, the contents of the source path folder are exported. + SourcePath *string `locationName:"sourcePath" type:"string"` +} + +// String returns the string representation +func (s HostVolumeProperties) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostVolumeProperties) GoString() string { + return s.String() +} + +// A key and value pair object. +type KeyValuePair struct { + _ struct{} `type:"structure"` + + // The name of the key value pair. For environment variables, this is the name + // of the environment variable. + Name *string `locationName:"name" type:"string"` + + // The value of the key value pair. For environment variables, this is the value + // of the environment variable. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s KeyValuePair) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyValuePair) GoString() string { + return s.String() +} + +type ListClustersInput struct { + _ struct{} `type:"structure"` + + // The maximum number of cluster results returned by ListClusters in paginated + // output. When this parameter is used, ListClusters only returns maxResults + // results in a single page along with a nextToken response element. The remaining + // results of the initial request can be seen by sending another ListClusters + // request with the returned nextToken value. This value can be between 1 and + // 100. If this parameter is not used, then ListClusters returns up to 100 results + // and a nextToken value if applicable. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The nextToken value returned from a previous paginated ListClusters request + // where maxResults was used and the results exceeded the value of that parameter. + // Pagination continues from the end of the previous results that returned the + // nextToken value. This value is null when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListClustersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListClustersInput) GoString() string { + return s.String() +} + +type ListClustersOutput struct { + _ struct{} `type:"structure"` + + // The list of full Amazon Resource Name (ARN) entries for each cluster associated + // with your account. + ClusterArns []*string `locationName:"clusterArns" type:"list"` + + // The nextToken value to include in a future ListClusters request. When the + // results of a ListClusters request exceed maxResults, this value can be used + // to retrieve the next page of results. This value is null when there are no + // more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListClustersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListClustersOutput) GoString() string { + return s.String() +} + +type ListContainerInstancesInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the container instances to list. If you do not specify a cluster, the default + // cluster is assumed.. + Cluster *string `locationName:"cluster" type:"string"` + + // The maximum number of container instance results returned by ListContainerInstances + // in paginated output. When this parameter is used, ListContainerInstances + // only returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another ListContainerInstances request with the returned nextToken value. + // This value can be between 1 and 100. If this parameter is not used, then + // ListContainerInstances returns up to 100 results and a nextToken value if + // applicable. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The nextToken value returned from a previous paginated ListContainerInstances + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. This value is null when there are no more results + // to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListContainerInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListContainerInstancesInput) GoString() string { + return s.String() +} + +type ListContainerInstancesOutput struct { + _ struct{} `type:"structure"` + + // The list of container instances with full Amazon Resource Name (ARN) entries + // for each container instance associated with the specified cluster. + ContainerInstanceArns []*string `locationName:"containerInstanceArns" type:"list"` + + // The nextToken value to include in a future ListContainerInstances request. + // When the results of a ListContainerInstances request exceed maxResults, this + // value can be used to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListContainerInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListContainerInstancesOutput) GoString() string { + return s.String() +} + +type ListServicesInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the services to list. If you do not specify a cluster, the default cluster + // is assumed.. + Cluster *string `locationName:"cluster" type:"string"` + + // The maximum number of container instance results returned by ListServices + // in paginated output. When this parameter is used, ListServices only returns + // maxResults results in a single page along with a nextToken response element. + // The remaining results of the initial request can be seen by sending another + // ListServices request with the returned nextToken value. This value can be + // between 1 and 10. If this parameter is not used, then ListServices returns + // up to 10 results and a nextToken value if applicable. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The nextToken value returned from a previous paginated ListServices request + // where maxResults was used and the results exceeded the value of that parameter. + // Pagination continues from the end of the previous results that returned the + // nextToken value. This value is null when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListServicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListServicesInput) GoString() string { + return s.String() +} + +type ListServicesOutput struct { + _ struct{} `type:"structure"` + + // The nextToken value to include in a future ListServices request. When the + // results of a ListServices request exceed maxResults, this value can be used + // to retrieve the next page of results. This value is null when there are no + // more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The list of full Amazon Resource Name (ARN) entries for each service associated + // with the specified cluster. + ServiceArns []*string `locationName:"serviceArns" type:"list"` +} + +// String returns the string representation +func (s ListServicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListServicesOutput) GoString() string { + return s.String() +} + +type ListTaskDefinitionFamiliesInput struct { + _ struct{} `type:"structure"` + + // The familyPrefix is a string that is used to filter the results of ListTaskDefinitionFamilies. + // If you specify a familyPrefix, only task definition family names that begin + // with the familyPrefix string are returned. + FamilyPrefix *string `locationName:"familyPrefix" type:"string"` + + // The maximum number of task definition family results returned by ListTaskDefinitionFamilies + // in paginated output. When this parameter is used, ListTaskDefinitions only + // returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another ListTaskDefinitionFamilies request with the returned nextToken value. + // This value can be between 1 and 100. If this parameter is not used, then + // ListTaskDefinitionFamilies returns up to 100 results and a nextToken value + // if applicable. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The nextToken value returned from a previous paginated ListTaskDefinitionFamilies + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. This value is null when there are no more results + // to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListTaskDefinitionFamiliesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTaskDefinitionFamiliesInput) GoString() string { + return s.String() +} + +type ListTaskDefinitionFamiliesOutput struct { + _ struct{} `type:"structure"` + + // The list of task definition family names that match the ListTaskDefinitionFamilies + // request. + Families []*string `locationName:"families" type:"list"` + + // The nextToken value to include in a future ListTaskDefinitionFamilies request. + // When the results of a ListTaskDefinitionFamilies request exceed maxResults, + // this value can be used to retrieve the next page of results. This value is + // null when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListTaskDefinitionFamiliesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTaskDefinitionFamiliesOutput) GoString() string { + return s.String() +} + +type ListTaskDefinitionsInput struct { + _ struct{} `type:"structure"` + + // The full family name with which to filter the ListTaskDefinitions results. + // Specifying a familyPrefix limits the listed task definitions to task definition + // revisions that belong to that family. + FamilyPrefix *string `locationName:"familyPrefix" type:"string"` + + // The maximum number of task definition results returned by ListTaskDefinitions + // in paginated output. When this parameter is used, ListTaskDefinitions only + // returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another ListTaskDefinitions request with the returned nextToken value. This + // value can be between 1 and 100. If this parameter is not used, then ListTaskDefinitions + // returns up to 100 results and a nextToken value if applicable. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The nextToken value returned from a previous paginated ListTaskDefinitions + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. This value is null when there are no more results + // to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The order in which to sort the results. Valid values are ASC and DESC. By + // default (ASC), task definitions are listed lexicographically by family name + // and in ascending numerical order by revision so that the newest task definitions + // in a family are listed last. Setting this parameter to DESC reverses the + // sort order on family name and revision so that the newest task definitions + // in a family are listed first. + Sort *string `locationName:"sort" type:"string" enum:"SortOrder"` + + // The task definition status with which to filter the ListTaskDefinitions results. + // By default, only ACTIVE task definitions are listed. By setting this parameter + // to INACTIVE, you can view task definitions that are INACTIVE as long as an + // active task or service still references them. If you paginate the resulting + // output, be sure to keep the status value constant in each subsequent request. + Status *string `locationName:"status" type:"string" enum:"TaskDefinitionStatus"` +} + +// String returns the string representation +func (s ListTaskDefinitionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTaskDefinitionsInput) GoString() string { + return s.String() +} + +type ListTaskDefinitionsOutput struct { + _ struct{} `type:"structure"` + + // The nextToken value to include in a future ListTaskDefinitions request. When + // the results of a ListTaskDefinitions request exceed maxResults, this value + // can be used to retrieve the next page of results. This value is null when + // there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The list of task definition Amazon Resource Name (ARN) entries for the ListTaskDefinitions + // request. + TaskDefinitionArns []*string `locationName:"taskDefinitionArns" type:"list"` +} + +// String returns the string representation +func (s ListTaskDefinitionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTaskDefinitionsOutput) GoString() string { + return s.String() +} + +type ListTasksInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the tasks to list. If you do not specify a cluster, the default cluster is + // assumed.. + Cluster *string `locationName:"cluster" type:"string"` + + // The container instance ID or full Amazon Resource Name (ARN) of the container + // instance with which to filter the ListTasks results. Specifying a containerInstance + // limits the results to tasks that belong to that container instance. + ContainerInstance *string `locationName:"containerInstance" type:"string"` + + // The task status with which to filter the ListTasks results. Specifying a + // desiredStatus of STOPPED limits the results to tasks that are in the STOPPED + // status, which can be useful for debugging tasks that are not starting properly + // or have died or finished. The default status filter is RUNNING. + DesiredStatus *string `locationName:"desiredStatus" type:"string" enum:"DesiredStatus"` + + // The name of the family with which to filter the ListTasks results. Specifying + // a family limits the results to tasks that belong to that family. + Family *string `locationName:"family" type:"string"` + + // The maximum number of task results returned by ListTasks in paginated output. + // When this parameter is used, ListTasks only returns maxResults results in + // a single page along with a nextToken response element. The remaining results + // of the initial request can be seen by sending another ListTasks request with + // the returned nextToken value. This value can be between 1 and 100. If this + // parameter is not used, then ListTasks returns up to 100 results and a nextToken + // value if applicable. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The nextToken value returned from a previous paginated ListTasks request + // where maxResults was used and the results exceeded the value of that parameter. + // Pagination continues from the end of the previous results that returned the + // nextToken value. This value is null when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The name of the service with which to filter the ListTasks results. Specifying + // a serviceName limits the results to tasks that belong to that service. + ServiceName *string `locationName:"serviceName" type:"string"` + + // The startedBy value with which to filter the task results. Specifying a startedBy + // value limits the results to tasks that were started with that value. + StartedBy *string `locationName:"startedBy" type:"string"` +} + +// String returns the string representation +func (s ListTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTasksInput) GoString() string { + return s.String() +} + +type ListTasksOutput struct { + _ struct{} `type:"structure"` + + // The nextToken value to include in a future ListTasks request. When the results + // of a ListTasks request exceed maxResults, this value can be used to retrieve + // the next page of results. This value is null when there are no more results + // to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The list of task Amazon Resource Name (ARN) entries for the ListTasks request. + TaskArns []*string `locationName:"taskArns" type:"list"` +} + +// String returns the string representation +func (s ListTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTasksOutput) GoString() string { + return s.String() +} + +// Details on a load balancer that is used with a service. +type LoadBalancer struct { + _ struct{} `type:"structure"` + + // The name of the container (as it appears in a container definition) to associate + // with the load balancer. + ContainerName *string `locationName:"containerName" type:"string"` + + // The port on the container to associate with the load balancer. This port + // must correspond to a containerPort in the service's task definition. Your + // container instances must allow ingress traffic on the hostPort of the port + // mapping. + ContainerPort *int64 `locationName:"containerPort" type:"integer"` + + // The name of the load balancer. + LoadBalancerName *string `locationName:"loadBalancerName" type:"string"` +} + +// String returns the string representation +func (s LoadBalancer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadBalancer) GoString() string { + return s.String() +} + +// Log configuration options to send to a custom log driver for the container. +type LogConfiguration struct { + _ struct{} `type:"structure"` + + // The log driver to use for the container. This parameter requires version + // 1.18 of the Docker Remote API or greater on your container instance. To check + // the Docker Remote API version on your container instance, log into your container + // instance and run the following command: sudo docker version | grep "Server + // API version" + LogDriver *string `locationName:"logDriver" type:"string" required:"true" enum:"LogDriver"` + + // The configuration options to send to the log driver. This parameter requires + // version 1.19 of the Docker Remote API or greater on your container instance. + // To check the Docker Remote API version on your container instance, log into + // your container instance and run the following command: sudo docker version + // | grep "Server API version" + Options map[string]*string `locationName:"options" type:"map"` +} + +// String returns the string representation +func (s LogConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LogConfiguration) GoString() string { + return s.String() +} + +// Details on a volume mount point that is used in a container definition. +type MountPoint struct { + _ struct{} `type:"structure"` + + // The path on the container to mount the host volume at. + ContainerPath *string `locationName:"containerPath" type:"string"` + + // If this value is true, the container has read-only access to the volume. + // If this value is false, then the container can write to the volume. The default + // value is false. + ReadOnly *bool `locationName:"readOnly" type:"boolean"` + + // The name of the volume to mount. + SourceVolume *string `locationName:"sourceVolume" type:"string"` +} + +// String returns the string representation +func (s MountPoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MountPoint) GoString() string { + return s.String() +} + +// Details on the network bindings between a container and its host container +// instance. After a task reaches the RUNNING status, manual and automatic host +// and container port assignments are visible in the networkBindings section +// of DescribeTasks API responses. +type NetworkBinding struct { + _ struct{} `type:"structure"` + + // The IP address that the container is bound to on the container instance. + BindIP *string `locationName:"bindIP" type:"string"` + + // The port number on the container that is be used with the network binding. + ContainerPort *int64 `locationName:"containerPort" type:"integer"` + + // The port number on the host that is used with the network binding. + HostPort *int64 `locationName:"hostPort" type:"integer"` + + // The protocol used for the network binding. + Protocol *string `locationName:"protocol" type:"string" enum:"TransportProtocol"` +} + +// String returns the string representation +func (s NetworkBinding) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkBinding) GoString() string { + return s.String() +} + +// Port mappings allow containers to access ports on the host container instance +// to send or receive traffic. Port mappings are specified as part of the container +// definition. After a task reaches the RUNNING status, manual and automatic +// host and container port assignments are visible in the networkBindings section +// of DescribeTasks API responses. +type PortMapping struct { + _ struct{} `type:"structure"` + + // The port number on the container that is bound to the user-specified or automatically + // assigned host port. If you specify a container port and not a host port, + // your container automatically receives a host port in the ephemeral port range + // (for more information, see hostPort). + ContainerPort *int64 `locationName:"containerPort" type:"integer"` + + // The port number on the container instance to reserve for your container. + // You can specify a non-reserved host port for your container port mapping, + // or you can omit the hostPort (or set it to 0) while specifying a containerPort + // and your container automatically receives a port in the ephemeral port range + // for your container instance operating system and Docker version. + // + // The default ephemeral port range is 49153 to 65535, and this range is used + // for Docker versions prior to 1.6.0. For Docker version 1.6.0 and later, the + // Docker daemon tries to read the ephemeral port range from /proc/sys/net/ipv4/ip_local_port_range; + // if this kernel parameter is unavailable, the default ephemeral port range + // is used. You should not attempt to specify a host port in the ephemeral port + // range, because these are reserved for automatic assignment. In general, ports + // below 32768 are outside of the ephemeral port range. + // + // The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, + // and the Amazon ECS container agent port 51678. Any host port that was previously + // specified in a running task is also reserved while the task is running (after + // a task stops, the host port is released).The current reserved ports are displayed + // in the remainingResources of DescribeContainerInstances output, and a container + // instance may have up to 50 reserved ports at a time, including the default + // reserved ports (automatically assigned ports do not count toward this limit). + HostPort *int64 `locationName:"hostPort" type:"integer"` + + // The protocol used for the port mapping. Valid values are tcp and udp. The + // default is tcp. + Protocol *string `locationName:"protocol" type:"string" enum:"TransportProtocol"` +} + +// String returns the string representation +func (s PortMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PortMapping) GoString() string { + return s.String() +} + +type RegisterContainerInstanceInput struct { + _ struct{} `type:"structure"` + + // The container instance attributes that this container instance supports. + Attributes []*Attribute `locationName:"attributes" type:"list"` + + // The short name or full Amazon Resource Name (ARN) of the cluster with which + // to register your container instance. If you do not specify a cluster, the + // default cluster is assumed.. + Cluster *string `locationName:"cluster" type:"string"` + + // The Amazon Resource Name (ARN) of the container instance (if it was previously + // registered). + ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` + + // The instance identity document for the EC2 instance to register. This document + // can be found by running the following command from the instance: curl http://169.254.169.254/latest/dynamic/instance-identity/document/ + InstanceIdentityDocument *string `locationName:"instanceIdentityDocument" type:"string"` + + // The instance identity document signature for the EC2 instance to register. + // This signature can be found by running the following command from the instance: + // curl http://169.254.169.254/latest/dynamic/instance-identity/signature/ + InstanceIdentityDocumentSignature *string `locationName:"instanceIdentityDocumentSignature" type:"string"` + + // The resources available on the instance. + TotalResources []*Resource `locationName:"totalResources" type:"list"` + + // The version information for the Amazon ECS container agent and Docker daemon + // running on the container instance. + VersionInfo *VersionInfo `locationName:"versionInfo" type:"structure"` +} + +// String returns the string representation +func (s RegisterContainerInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterContainerInstanceInput) GoString() string { + return s.String() +} + +type RegisterContainerInstanceOutput struct { + _ struct{} `type:"structure"` + + // An EC2 instance that is running the Amazon ECS agent and has been registered + // with a cluster. + ContainerInstance *ContainerInstance `locationName:"containerInstance" type:"structure"` +} + +// String returns the string representation +func (s RegisterContainerInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterContainerInstanceOutput) GoString() string { + return s.String() +} + +type RegisterTaskDefinitionInput struct { + _ struct{} `type:"structure"` + + // A list of container definitions in JSON format that describe the different + // containers that make up your task. + ContainerDefinitions []*ContainerDefinition `locationName:"containerDefinitions" type:"list" required:"true"` + + // You must specify a family for a task definition, which allows you to track + // multiple versions of the same task definition. The family is used as a name + // for your task definition. Up to 255 letters (uppercase and lowercase), numbers, + // hyphens, and underscores are allowed. + Family *string `locationName:"family" type:"string" required:"true"` + + // A list of volume definitions in JSON format that containers in your task + // may use. + Volumes []*Volume `locationName:"volumes" type:"list"` +} + +// String returns the string representation +func (s RegisterTaskDefinitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterTaskDefinitionInput) GoString() string { + return s.String() +} + +type RegisterTaskDefinitionOutput struct { + _ struct{} `type:"structure"` + + // The full description of the registered task definition. + TaskDefinition *TaskDefinition `locationName:"taskDefinition" type:"structure"` +} + +// String returns the string representation +func (s RegisterTaskDefinitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterTaskDefinitionOutput) GoString() string { + return s.String() +} + +// Describes the resources available for a container instance. +type Resource struct { + _ struct{} `type:"structure"` + + // When the doubleValue type is set, the value of the resource must be a double + // precision floating-point type. + DoubleValue *float64 `locationName:"doubleValue" type:"double"` + + // When the integerValue type is set, the value of the resource must be an integer. + IntegerValue *int64 `locationName:"integerValue" type:"integer"` + + // When the longValue type is set, the value of the resource must be an extended + // precision floating-point type. + LongValue *int64 `locationName:"longValue" type:"long"` + + // The name of the resource, such as CPU, MEMORY, PORTS, or a user-defined resource. + Name *string `locationName:"name" type:"string"` + + // When the stringSetValue type is set, the value of the resource must be a + // string type. + StringSetValue []*string `locationName:"stringSetValue" type:"list"` + + // The type of the resource, such as INTEGER, DOUBLE, LONG, or STRINGSET. + Type *string `locationName:"type" type:"string"` +} + +// String returns the string representation +func (s Resource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Resource) GoString() string { + return s.String() +} + +type RunTaskInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster on which + // to run your task. If you do not specify a cluster, the default cluster is + // assumed.. + Cluster *string `locationName:"cluster" type:"string"` + + // The number of instantiations of the specified task to place on your cluster. + // + // The count parameter is limited to 10 tasks per call. + Count *int64 `locationName:"count" type:"integer"` + + // A list of container overrides in JSON format that specify the name of a container + // in the specified task definition and the overrides it should receive. You + // can override the default command for a container (that is specified in the + // task definition or Docker image) with a command override. You can also override + // existing environment variables (that are specified in the task definition + // or Docker image) on a container or add new environment variables to it with + // an environment override. + // + // A total of 8192 characters are allowed for overrides. This limit includes + // the JSON formatting characters of the override structure. + Overrides *TaskOverride `locationName:"overrides" type:"structure"` + + // An optional tag specified when a task is started. For example if you automatically + // trigger a task to run a batch process job, you could apply a unique identifier + // for that job to your task with the startedBy parameter. You can then identify + // which tasks belong to that job by filtering the results of a ListTasks call + // with the startedBy value. + // + // If a task is started by an Amazon ECS service, then the startedBy parameter + // contains the deployment ID of the service that starts it. + StartedBy *string `locationName:"startedBy" type:"string"` + + // The family and revision (family:revision) or full Amazon Resource Name (ARN) + // of the task definition to run. If a revision is not specified, the latest + // ACTIVE revision is used. + TaskDefinition *string `locationName:"taskDefinition" type:"string" required:"true"` +} + +// String returns the string representation +func (s RunTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunTaskInput) GoString() string { + return s.String() +} + +type RunTaskOutput struct { + _ struct{} `type:"structure"` + + // Any failures associated with the call. + Failures []*Failure `locationName:"failures" type:"list"` + + // A full description of the tasks that were run. Each task that was successfully + // placed on your cluster are described here. + Tasks []*Task `locationName:"tasks" type:"list"` +} + +// String returns the string representation +func (s RunTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunTaskOutput) GoString() string { + return s.String() +} + +// Details on a service within a cluster +type Service struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the of the cluster that hosts the service. + ClusterArn *string `locationName:"clusterArn" type:"string"` + + // Optional deployment parameters that control how many tasks run during the + // deployment and the ordering of stopping and starting tasks. + DeploymentConfiguration *DeploymentConfiguration `locationName:"deploymentConfiguration" type:"structure"` + + // The current state of deployments for the service. + Deployments []*Deployment `locationName:"deployments" type:"list"` + + // The desired number of instantiations of the task definition to keep running + // on the service. This value is specified when the service is created with + // CreateService, and it can be modified with UpdateService. + DesiredCount *int64 `locationName:"desiredCount" type:"integer"` + + // The event stream for your service. A maximum of 100 of the latest events + // are displayed. + Events []*ServiceEvent `locationName:"events" type:"list"` + + // A list of load balancer objects, containing the load balancer name, the container + // name (as it appears in a container definition), and the container port to + // access from the load balancer. + LoadBalancers []*LoadBalancer `locationName:"loadBalancers" type:"list"` + + // The number of tasks in the cluster that are in the PENDING state. + PendingCount *int64 `locationName:"pendingCount" type:"integer"` + + // The Amazon Resource Name (ARN) of the IAM role associated with the service + // that allows the Amazon ECS container agent to register container instances + // with a load balancer. + RoleArn *string `locationName:"roleArn" type:"string"` + + // The number of tasks in the cluster that are in the RUNNING state. + RunningCount *int64 `locationName:"runningCount" type:"integer"` + + // The Amazon Resource Name (ARN) that identifies the service. The ARN contains + // the arn:aws:ecs namespace, followed by the region of the service, the AWS + // account ID of the service owner, the service namespace, and then the service + // name. For example, arn:aws:ecs:region:012345678910:service/my-service. + ServiceArn *string `locationName:"serviceArn" type:"string"` + + // The name of your service. Up to 255 letters (uppercase and lowercase), numbers, + // hyphens, and underscores are allowed. Service names must be unique within + // a cluster, but you can have similarly named services in multiple clusters + // within a region or across multiple regions. + ServiceName *string `locationName:"serviceName" type:"string"` + + // The status of the service. The valid values are ACTIVE, DRAINING, or INACTIVE. + Status *string `locationName:"status" type:"string"` + + // The task definition to use for tasks in the service. This value is specified + // when the service is created with CreateService, and it can be modified with + // UpdateService. + TaskDefinition *string `locationName:"taskDefinition" type:"string"` +} + +// String returns the string representation +func (s Service) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Service) GoString() string { + return s.String() +} + +// Details on an event associated with a service. +type ServiceEvent struct { + _ struct{} `type:"structure"` + + // The Unix time in seconds and milliseconds when the event was triggered. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix"` + + // The ID string of the event. + Id *string `locationName:"id" type:"string"` + + // The event message. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ServiceEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceEvent) GoString() string { + return s.String() +} + +type StartTaskInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster on which + // to start your task. If you do not specify a cluster, the default cluster + // is assumed.. + Cluster *string `locationName:"cluster" type:"string"` + + // The container instance IDs or full Amazon Resource Name (ARN) entries for + // the container instances on which you would like to place your task. + // + // The list of container instances to start tasks on is limited to 10. + ContainerInstances []*string `locationName:"containerInstances" type:"list" required:"true"` + + // A list of container overrides in JSON format that specify the name of a container + // in the specified task definition and the overrides it should receive. You + // can override the default command for a container (that is specified in the + // task definition or Docker image) with a command override. You can also override + // existing environment variables (that are specified in the task definition + // or Docker image) on a container or add new environment variables to it with + // an environment override. + // + // A total of 8192 characters are allowed for overrides. This limit includes + // the JSON formatting characters of the override structure. + Overrides *TaskOverride `locationName:"overrides" type:"structure"` + + // An optional tag specified when a task is started. For example if you automatically + // trigger a task to run a batch process job, you could apply a unique identifier + // for that job to your task with the startedBy parameter. You can then identify + // which tasks belong to that job by filtering the results of a ListTasks call + // with the startedBy value. + // + // If a task is started by an Amazon ECS service, then the startedBy parameter + // contains the deployment ID of the service that starts it. + StartedBy *string `locationName:"startedBy" type:"string"` + + // The family and revision (family:revision) or full Amazon Resource Name (ARN) + // of the task definition to start. If a revision is not specified, the latest + // ACTIVE revision is used. + TaskDefinition *string `locationName:"taskDefinition" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartTaskInput) GoString() string { + return s.String() +} + +type StartTaskOutput struct { + _ struct{} `type:"structure"` + + // Any failures associated with the call. + Failures []*Failure `locationName:"failures" type:"list"` + + // A full description of the tasks that were started. Each task that was successfully + // placed on your container instances are described here. + Tasks []*Task `locationName:"tasks" type:"list"` +} + +// String returns the string representation +func (s StartTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartTaskOutput) GoString() string { + return s.String() +} + +type StopTaskInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the task to stop. If you do not specify a cluster, the default cluster is + // assumed.. + Cluster *string `locationName:"cluster" type:"string"` + + // An optional message specified when a task is stopped. For example, if you + // are using a custom scheduler, you can use this parameter to specify the reason + // for stopping the task here, and the message will appear in subsequent DescribeTasks + // API operations on this task. Up to 255 characters are allowed in this message. + Reason *string `locationName:"reason" type:"string"` + + // The task ID or full Amazon Resource Name (ARN) entry of the task to stop. + Task *string `locationName:"task" type:"string" required:"true"` +} + +// String returns the string representation +func (s StopTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopTaskInput) GoString() string { + return s.String() +} + +type StopTaskOutput struct { + _ struct{} `type:"structure"` + + // Details on a task in a cluster. + Task *Task `locationName:"task" type:"structure"` +} + +// String returns the string representation +func (s StopTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopTaskOutput) GoString() string { + return s.String() +} + +type SubmitContainerStateChangeInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the container. + Cluster *string `locationName:"cluster" type:"string"` + + // The name of the container. + ContainerName *string `locationName:"containerName" type:"string"` + + // The exit code returned for the state change request. + ExitCode *int64 `locationName:"exitCode" type:"integer"` + + // The network bindings of the container. + NetworkBindings []*NetworkBinding `locationName:"networkBindings" type:"list"` + + // The reason for the state change request. + Reason *string `locationName:"reason" type:"string"` + + // The status of the state change request. + Status *string `locationName:"status" type:"string"` + + // The task ID or full Amazon Resource Name (ARN) of the task that hosts the + // container. + Task *string `locationName:"task" type:"string"` +} + +// String returns the string representation +func (s SubmitContainerStateChangeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubmitContainerStateChangeInput) GoString() string { + return s.String() +} + +type SubmitContainerStateChangeOutput struct { + _ struct{} `type:"structure"` + + // Acknowledgement of the state change. + Acknowledgment *string `locationName:"acknowledgment" type:"string"` +} + +// String returns the string representation +func (s SubmitContainerStateChangeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubmitContainerStateChangeOutput) GoString() string { + return s.String() +} + +type SubmitTaskStateChangeInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the task. + Cluster *string `locationName:"cluster" type:"string"` + + // The reason for the state change request. + Reason *string `locationName:"reason" type:"string"` + + // The status of the state change request. + Status *string `locationName:"status" type:"string"` + + // The task ID or full Amazon Resource Name (ARN) of the task in the state change + // request. + Task *string `locationName:"task" type:"string"` +} + +// String returns the string representation +func (s SubmitTaskStateChangeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubmitTaskStateChangeInput) GoString() string { + return s.String() +} + +type SubmitTaskStateChangeOutput struct { + _ struct{} `type:"structure"` + + // Acknowledgement of the state change. + Acknowledgment *string `locationName:"acknowledgment" type:"string"` +} + +// String returns the string representation +func (s SubmitTaskStateChangeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubmitTaskStateChangeOutput) GoString() string { + return s.String() +} + +// Details on a task in a cluster. +type Task struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the of the cluster that hosts the task. + ClusterArn *string `locationName:"clusterArn" type:"string"` + + // The Amazon Resource Name (ARN) of the container instances that host the task. + ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` + + // The containers associated with the task. + Containers []*Container `locationName:"containers" type:"list"` + + // The Unix time in seconds and milliseconds when the task was created (the + // task entered the PENDING state). + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix"` + + // The desired status of the task. + DesiredStatus *string `locationName:"desiredStatus" type:"string"` + + // The last known status of the task. + LastStatus *string `locationName:"lastStatus" type:"string"` + + // One or more container overrides. + Overrides *TaskOverride `locationName:"overrides" type:"structure"` + + // The Unix time in seconds and milliseconds when the task was started (the + // task transitioned from the PENDING state to the RUNNING state). + StartedAt *time.Time `locationName:"startedAt" type:"timestamp" timestampFormat:"unix"` + + // The tag specified when a task is started. If the task is started by an Amazon + // ECS service, then the startedBy parameter contains the deployment ID of the + // service that starts it. + StartedBy *string `locationName:"startedBy" type:"string"` + + // The Unix time in seconds and milliseconds when the task was stopped (the + // task transitioned from the RUNNING state to the STOPPED state). + StoppedAt *time.Time `locationName:"stoppedAt" type:"timestamp" timestampFormat:"unix"` + + // The reason the task was stopped. + StoppedReason *string `locationName:"stoppedReason" type:"string"` + + // The Amazon Resource Name (ARN) of the task. + TaskArn *string `locationName:"taskArn" type:"string"` + + // The Amazon Resource Name (ARN) of the of the task definition that creates + // the task. + TaskDefinitionArn *string `locationName:"taskDefinitionArn" type:"string"` +} + +// String returns the string representation +func (s Task) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Task) GoString() string { + return s.String() +} + +// Details of a task definition. +type TaskDefinition struct { + _ struct{} `type:"structure"` + + // A list of container definitions in JSON format that describe the different + // containers that make up your task. For more information about container definition + // parameters and defaults, see Amazon ECS Task Definitions (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html) + // in the Amazon EC2 Container Service Developer Guide. + ContainerDefinitions []*ContainerDefinition `locationName:"containerDefinitions" type:"list"` + + // The family of your task definition, used as the definition name. + Family *string `locationName:"family" type:"string"` + + // The container instance attributes required by your task. + RequiresAttributes []*Attribute `locationName:"requiresAttributes" type:"list"` + + // The revision of the task in a particular family. The revision is a version + // number of a task definition in a family. When you register a task definition + // for the first time, the revision is 1; each time you register a new revision + // of a task definition in the same family, the revision value always increases + // by one (even if you have deregistered previous revisions in this family). + Revision *int64 `locationName:"revision" type:"integer"` + + // The status of the task definition. + Status *string `locationName:"status" type:"string" enum:"TaskDefinitionStatus"` + + // The full Amazon Resource Name (ARN) of the of the task definition. + TaskDefinitionArn *string `locationName:"taskDefinitionArn" type:"string"` + + // The list of volumes in a task. For more information about volume definition + // parameters and defaults, see Amazon ECS Task Definitions (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html) + // in the Amazon EC2 Container Service Developer Guide. + Volumes []*Volume `locationName:"volumes" type:"list"` +} + +// String returns the string representation +func (s TaskDefinition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TaskDefinition) GoString() string { + return s.String() +} + +// The overrides associated with a task. +type TaskOverride struct { + _ struct{} `type:"structure"` + + // One or more container overrides sent to a task. + ContainerOverrides []*ContainerOverride `locationName:"containerOverrides" type:"list"` +} + +// String returns the string representation +func (s TaskOverride) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TaskOverride) GoString() string { + return s.String() +} + +// The ulimit settings to pass to the container. +type Ulimit struct { + _ struct{} `type:"structure"` + + // The hard limit for the ulimit type. + HardLimit *int64 `locationName:"hardLimit" type:"integer" required:"true"` + + // The type of the ulimit. + Name *string `locationName:"name" type:"string" required:"true" enum:"UlimitName"` + + // The soft limit for the ulimit type. + SoftLimit *int64 `locationName:"softLimit" type:"integer" required:"true"` +} + +// String returns the string representation +func (s Ulimit) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Ulimit) GoString() string { + return s.String() +} + +type UpdateContainerAgentInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that your + // container instance is running on. If you do not specify a cluster, the default + // cluster is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // The container instance ID or full Amazon Resource Name (ARN) entries for + // the container instance on which you would like to update the Amazon ECS container + // agent. + ContainerInstance *string `locationName:"containerInstance" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateContainerAgentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateContainerAgentInput) GoString() string { + return s.String() +} + +type UpdateContainerAgentOutput struct { + _ struct{} `type:"structure"` + + // An EC2 instance that is running the Amazon ECS agent and has been registered + // with a cluster. + ContainerInstance *ContainerInstance `locationName:"containerInstance" type:"structure"` +} + +// String returns the string representation +func (s UpdateContainerAgentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateContainerAgentOutput) GoString() string { + return s.String() +} + +type UpdateServiceInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that your + // service is running on. If you do not specify a cluster, the default cluster + // is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // Optional deployment parameters that control how many tasks run during the + // deployment and the ordering of stopping and starting tasks. + DeploymentConfiguration *DeploymentConfiguration `locationName:"deploymentConfiguration" type:"structure"` + + // The number of instantiations of the task to place and keep running in your + // service. + DesiredCount *int64 `locationName:"desiredCount" type:"integer"` + + // The name of the service to update. + Service *string `locationName:"service" type:"string" required:"true"` + + // The family and revision (family:revision) or full Amazon Resource Name (ARN) + // of the task definition to run in your service. If a revision is not specified, + // the latest ACTIVE revision is used. If you modify the task definition with + // UpdateService, Amazon ECS spawns a task with the new version of the task + // definition and then stops an old task after the new version is running. + TaskDefinition *string `locationName:"taskDefinition" type:"string"` +} + +// String returns the string representation +func (s UpdateServiceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateServiceInput) GoString() string { + return s.String() +} + +type UpdateServiceOutput struct { + _ struct{} `type:"structure"` + + // The full description of your service following the update call. + Service *Service `locationName:"service" type:"structure"` +} + +// String returns the string representation +func (s UpdateServiceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateServiceOutput) GoString() string { + return s.String() +} + +// The Docker and Amazon ECS container agent version information about a container +// instance. +type VersionInfo struct { + _ struct{} `type:"structure"` + + // The Git commit hash for the Amazon ECS container agent build on the amazon-ecs-agent + // (https://github.com/aws/amazon-ecs-agent/commits/master) GitHub repository. + AgentHash *string `locationName:"agentHash" type:"string"` + + // The version number of the Amazon ECS container agent. + AgentVersion *string `locationName:"agentVersion" type:"string"` + + // The Docker version running on the container instance. + DockerVersion *string `locationName:"dockerVersion" type:"string"` +} + +// String returns the string representation +func (s VersionInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VersionInfo) GoString() string { + return s.String() +} + +// A data volume used in a task definition. +type Volume struct { + _ struct{} `type:"structure"` + + // The contents of the host parameter determine whether your data volume persists + // on the host container instance and where it is stored. If the host parameter + // is empty, then the Docker daemon assigns a host path for your data volume, + // but the data is not guaranteed to persist after the containers associated + // with it stop running. + Host *HostVolumeProperties `locationName:"host" type:"structure"` + + // The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, + // hyphens, and underscores are allowed. This name is referenced in the sourceVolume + // parameter of container definition mountPoints. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s Volume) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Volume) GoString() string { + return s.String() +} + +// Details on a data volume from another container. +type VolumeFrom struct { + _ struct{} `type:"structure"` + + // If this value is true, the container has read-only access to the volume. + // If this value is false, then the container can write to the volume. The default + // value is false. + ReadOnly *bool `locationName:"readOnly" type:"boolean"` + + // The name of the container to mount volumes from. + SourceContainer *string `locationName:"sourceContainer" type:"string"` +} + +// String returns the string representation +func (s VolumeFrom) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeFrom) GoString() string { + return s.String() +} + +const ( + // @enum AgentUpdateStatus + AgentUpdateStatusPending = "PENDING" + // @enum AgentUpdateStatus + AgentUpdateStatusStaging = "STAGING" + // @enum AgentUpdateStatus + AgentUpdateStatusStaged = "STAGED" + // @enum AgentUpdateStatus + AgentUpdateStatusUpdating = "UPDATING" + // @enum AgentUpdateStatus + AgentUpdateStatusUpdated = "UPDATED" + // @enum AgentUpdateStatus + AgentUpdateStatusFailed = "FAILED" +) + +const ( + // @enum DesiredStatus + DesiredStatusRunning = "RUNNING" + // @enum DesiredStatus + DesiredStatusPending = "PENDING" + // @enum DesiredStatus + DesiredStatusStopped = "STOPPED" +) + +const ( + // @enum LogDriver + LogDriverJsonFile = "json-file" + // @enum LogDriver + LogDriverSyslog = "syslog" + // @enum LogDriver + LogDriverJournald = "journald" + // @enum LogDriver + LogDriverGelf = "gelf" + // @enum LogDriver + LogDriverFluentd = "fluentd" +) + +const ( + // @enum SortOrder + SortOrderAsc = "ASC" + // @enum SortOrder + SortOrderDesc = "DESC" +) + +const ( + // @enum TaskDefinitionStatus + TaskDefinitionStatusActive = "ACTIVE" + // @enum TaskDefinitionStatus + TaskDefinitionStatusInactive = "INACTIVE" +) + +const ( + // @enum TransportProtocol + TransportProtocolTcp = "tcp" + // @enum TransportProtocol + TransportProtocolUdp = "udp" +) + +const ( + // @enum UlimitName + UlimitNameCore = "core" + // @enum UlimitName + UlimitNameCpu = "cpu" + // @enum UlimitName + UlimitNameData = "data" + // @enum UlimitName + UlimitNameFsize = "fsize" + // @enum UlimitName + UlimitNameLocks = "locks" + // @enum UlimitName + UlimitNameMemlock = "memlock" + // @enum UlimitName + UlimitNameMsgqueue = "msgqueue" + // @enum UlimitName + UlimitNameNice = "nice" + // @enum UlimitName + UlimitNameNofile = "nofile" + // @enum UlimitName + UlimitNameNproc = "nproc" + // @enum UlimitName + UlimitNameRss = "rss" + // @enum UlimitName + UlimitNameRtprio = "rtprio" + // @enum UlimitName + UlimitNameRttime = "rttime" + // @enum UlimitName + UlimitNameSigpending = "sigpending" + // @enum UlimitName + UlimitNameStack = "stack" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecs/ecsiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecs/ecsiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecs/ecsiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecs/ecsiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,134 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package ecsiface provides an interface for the Amazon EC2 Container Service. +package ecsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/ecs" +) + +// ECSAPI is the interface type for ecs.ECS. +type ECSAPI interface { + CreateClusterRequest(*ecs.CreateClusterInput) (*request.Request, *ecs.CreateClusterOutput) + + CreateCluster(*ecs.CreateClusterInput) (*ecs.CreateClusterOutput, error) + + CreateServiceRequest(*ecs.CreateServiceInput) (*request.Request, *ecs.CreateServiceOutput) + + CreateService(*ecs.CreateServiceInput) (*ecs.CreateServiceOutput, error) + + DeleteClusterRequest(*ecs.DeleteClusterInput) (*request.Request, *ecs.DeleteClusterOutput) + + DeleteCluster(*ecs.DeleteClusterInput) (*ecs.DeleteClusterOutput, error) + + DeleteServiceRequest(*ecs.DeleteServiceInput) (*request.Request, *ecs.DeleteServiceOutput) + + DeleteService(*ecs.DeleteServiceInput) (*ecs.DeleteServiceOutput, error) + + DeregisterContainerInstanceRequest(*ecs.DeregisterContainerInstanceInput) (*request.Request, *ecs.DeregisterContainerInstanceOutput) + + DeregisterContainerInstance(*ecs.DeregisterContainerInstanceInput) (*ecs.DeregisterContainerInstanceOutput, error) + + DeregisterTaskDefinitionRequest(*ecs.DeregisterTaskDefinitionInput) (*request.Request, *ecs.DeregisterTaskDefinitionOutput) + + DeregisterTaskDefinition(*ecs.DeregisterTaskDefinitionInput) (*ecs.DeregisterTaskDefinitionOutput, error) + + DescribeClustersRequest(*ecs.DescribeClustersInput) (*request.Request, *ecs.DescribeClustersOutput) + + DescribeClusters(*ecs.DescribeClustersInput) (*ecs.DescribeClustersOutput, error) + + DescribeContainerInstancesRequest(*ecs.DescribeContainerInstancesInput) (*request.Request, *ecs.DescribeContainerInstancesOutput) + + DescribeContainerInstances(*ecs.DescribeContainerInstancesInput) (*ecs.DescribeContainerInstancesOutput, error) + + DescribeServicesRequest(*ecs.DescribeServicesInput) (*request.Request, *ecs.DescribeServicesOutput) + + DescribeServices(*ecs.DescribeServicesInput) (*ecs.DescribeServicesOutput, error) + + DescribeTaskDefinitionRequest(*ecs.DescribeTaskDefinitionInput) (*request.Request, *ecs.DescribeTaskDefinitionOutput) + + DescribeTaskDefinition(*ecs.DescribeTaskDefinitionInput) (*ecs.DescribeTaskDefinitionOutput, error) + + DescribeTasksRequest(*ecs.DescribeTasksInput) (*request.Request, *ecs.DescribeTasksOutput) + + DescribeTasks(*ecs.DescribeTasksInput) (*ecs.DescribeTasksOutput, error) + + DiscoverPollEndpointRequest(*ecs.DiscoverPollEndpointInput) (*request.Request, *ecs.DiscoverPollEndpointOutput) + + DiscoverPollEndpoint(*ecs.DiscoverPollEndpointInput) (*ecs.DiscoverPollEndpointOutput, error) + + ListClustersRequest(*ecs.ListClustersInput) (*request.Request, *ecs.ListClustersOutput) + + ListClusters(*ecs.ListClustersInput) (*ecs.ListClustersOutput, error) + + ListClustersPages(*ecs.ListClustersInput, func(*ecs.ListClustersOutput, bool) bool) error + + ListContainerInstancesRequest(*ecs.ListContainerInstancesInput) (*request.Request, *ecs.ListContainerInstancesOutput) + + ListContainerInstances(*ecs.ListContainerInstancesInput) (*ecs.ListContainerInstancesOutput, error) + + ListContainerInstancesPages(*ecs.ListContainerInstancesInput, func(*ecs.ListContainerInstancesOutput, bool) bool) error + + ListServicesRequest(*ecs.ListServicesInput) (*request.Request, *ecs.ListServicesOutput) + + ListServices(*ecs.ListServicesInput) (*ecs.ListServicesOutput, error) + + ListServicesPages(*ecs.ListServicesInput, func(*ecs.ListServicesOutput, bool) bool) error + + ListTaskDefinitionFamiliesRequest(*ecs.ListTaskDefinitionFamiliesInput) (*request.Request, *ecs.ListTaskDefinitionFamiliesOutput) + + ListTaskDefinitionFamilies(*ecs.ListTaskDefinitionFamiliesInput) (*ecs.ListTaskDefinitionFamiliesOutput, error) + + ListTaskDefinitionFamiliesPages(*ecs.ListTaskDefinitionFamiliesInput, func(*ecs.ListTaskDefinitionFamiliesOutput, bool) bool) error + + ListTaskDefinitionsRequest(*ecs.ListTaskDefinitionsInput) (*request.Request, *ecs.ListTaskDefinitionsOutput) + + ListTaskDefinitions(*ecs.ListTaskDefinitionsInput) (*ecs.ListTaskDefinitionsOutput, error) + + ListTaskDefinitionsPages(*ecs.ListTaskDefinitionsInput, func(*ecs.ListTaskDefinitionsOutput, bool) bool) error + + ListTasksRequest(*ecs.ListTasksInput) (*request.Request, *ecs.ListTasksOutput) + + ListTasks(*ecs.ListTasksInput) (*ecs.ListTasksOutput, error) + + ListTasksPages(*ecs.ListTasksInput, func(*ecs.ListTasksOutput, bool) bool) error + + RegisterContainerInstanceRequest(*ecs.RegisterContainerInstanceInput) (*request.Request, *ecs.RegisterContainerInstanceOutput) + + RegisterContainerInstance(*ecs.RegisterContainerInstanceInput) (*ecs.RegisterContainerInstanceOutput, error) + + RegisterTaskDefinitionRequest(*ecs.RegisterTaskDefinitionInput) (*request.Request, *ecs.RegisterTaskDefinitionOutput) + + RegisterTaskDefinition(*ecs.RegisterTaskDefinitionInput) (*ecs.RegisterTaskDefinitionOutput, error) + + RunTaskRequest(*ecs.RunTaskInput) (*request.Request, *ecs.RunTaskOutput) + + RunTask(*ecs.RunTaskInput) (*ecs.RunTaskOutput, error) + + StartTaskRequest(*ecs.StartTaskInput) (*request.Request, *ecs.StartTaskOutput) + + StartTask(*ecs.StartTaskInput) (*ecs.StartTaskOutput, error) + + StopTaskRequest(*ecs.StopTaskInput) (*request.Request, *ecs.StopTaskOutput) + + StopTask(*ecs.StopTaskInput) (*ecs.StopTaskOutput, error) + + SubmitContainerStateChangeRequest(*ecs.SubmitContainerStateChangeInput) (*request.Request, *ecs.SubmitContainerStateChangeOutput) + + SubmitContainerStateChange(*ecs.SubmitContainerStateChangeInput) (*ecs.SubmitContainerStateChangeOutput, error) + + SubmitTaskStateChangeRequest(*ecs.SubmitTaskStateChangeInput) (*request.Request, *ecs.SubmitTaskStateChangeOutput) + + SubmitTaskStateChange(*ecs.SubmitTaskStateChangeInput) (*ecs.SubmitTaskStateChangeOutput, error) + + UpdateContainerAgentRequest(*ecs.UpdateContainerAgentInput) (*request.Request, *ecs.UpdateContainerAgentOutput) + + UpdateContainerAgent(*ecs.UpdateContainerAgentInput) (*ecs.UpdateContainerAgentOutput, error) + + UpdateServiceRequest(*ecs.UpdateServiceInput) (*request.Request, *ecs.UpdateServiceOutput) + + UpdateService(*ecs.UpdateServiceInput) (*ecs.UpdateServiceOutput, error) +} + +var _ ECSAPI = (*ecs.ECS)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecs/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecs/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecs/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecs/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,791 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ecs_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ecs" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleECS_CreateCluster() { + svc := ecs.New(session.New()) + + params := &ecs.CreateClusterInput{ + ClusterName: aws.String("String"), + } + resp, err := svc.CreateCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_CreateService() { + svc := ecs.New(session.New()) + + params := &ecs.CreateServiceInput{ + DesiredCount: aws.Int64(1), // Required + ServiceName: aws.String("String"), // Required + TaskDefinition: aws.String("String"), // Required + ClientToken: aws.String("String"), + Cluster: aws.String("String"), + DeploymentConfiguration: &ecs.DeploymentConfiguration{ + MaximumPercent: aws.Int64(1), + MinimumHealthyPercent: aws.Int64(1), + }, + LoadBalancers: []*ecs.LoadBalancer{ + { // Required + ContainerName: aws.String("String"), + ContainerPort: aws.Int64(1), + LoadBalancerName: aws.String("String"), + }, + // More values... + }, + Role: aws.String("String"), + } + resp, err := svc.CreateService(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DeleteCluster() { + svc := ecs.New(session.New()) + + params := &ecs.DeleteClusterInput{ + Cluster: aws.String("String"), // Required + } + resp, err := svc.DeleteCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DeleteService() { + svc := ecs.New(session.New()) + + params := &ecs.DeleteServiceInput{ + Service: aws.String("String"), // Required + Cluster: aws.String("String"), + } + resp, err := svc.DeleteService(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DeregisterContainerInstance() { + svc := ecs.New(session.New()) + + params := &ecs.DeregisterContainerInstanceInput{ + ContainerInstance: aws.String("String"), // Required + Cluster: aws.String("String"), + Force: aws.Bool(true), + } + resp, err := svc.DeregisterContainerInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DeregisterTaskDefinition() { + svc := ecs.New(session.New()) + + params := &ecs.DeregisterTaskDefinitionInput{ + TaskDefinition: aws.String("String"), // Required + } + resp, err := svc.DeregisterTaskDefinition(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DescribeClusters() { + svc := ecs.New(session.New()) + + params := &ecs.DescribeClustersInput{ + Clusters: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeClusters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DescribeContainerInstances() { + svc := ecs.New(session.New()) + + params := &ecs.DescribeContainerInstancesInput{ + ContainerInstances: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Cluster: aws.String("String"), + } + resp, err := svc.DescribeContainerInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DescribeServices() { + svc := ecs.New(session.New()) + + params := &ecs.DescribeServicesInput{ + Services: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Cluster: aws.String("String"), + } + resp, err := svc.DescribeServices(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DescribeTaskDefinition() { + svc := ecs.New(session.New()) + + params := &ecs.DescribeTaskDefinitionInput{ + TaskDefinition: aws.String("String"), // Required + } + resp, err := svc.DescribeTaskDefinition(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DescribeTasks() { + svc := ecs.New(session.New()) + + params := &ecs.DescribeTasksInput{ + Tasks: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Cluster: aws.String("String"), + } + resp, err := svc.DescribeTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_DiscoverPollEndpoint() { + svc := ecs.New(session.New()) + + params := &ecs.DiscoverPollEndpointInput{ + Cluster: aws.String("String"), + ContainerInstance: aws.String("String"), + } + resp, err := svc.DiscoverPollEndpoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_ListClusters() { + svc := ecs.New(session.New()) + + params := &ecs.ListClustersInput{ + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.ListClusters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_ListContainerInstances() { + svc := ecs.New(session.New()) + + params := &ecs.ListContainerInstancesInput{ + Cluster: aws.String("String"), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.ListContainerInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_ListServices() { + svc := ecs.New(session.New()) + + params := &ecs.ListServicesInput{ + Cluster: aws.String("String"), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.ListServices(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_ListTaskDefinitionFamilies() { + svc := ecs.New(session.New()) + + params := &ecs.ListTaskDefinitionFamiliesInput{ + FamilyPrefix: aws.String("String"), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.ListTaskDefinitionFamilies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_ListTaskDefinitions() { + svc := ecs.New(session.New()) + + params := &ecs.ListTaskDefinitionsInput{ + FamilyPrefix: aws.String("String"), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + Sort: aws.String("SortOrder"), + Status: aws.String("TaskDefinitionStatus"), + } + resp, err := svc.ListTaskDefinitions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_ListTasks() { + svc := ecs.New(session.New()) + + params := &ecs.ListTasksInput{ + Cluster: aws.String("String"), + ContainerInstance: aws.String("String"), + DesiredStatus: aws.String("DesiredStatus"), + Family: aws.String("String"), + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + ServiceName: aws.String("String"), + StartedBy: aws.String("String"), + } + resp, err := svc.ListTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_RegisterContainerInstance() { + svc := ecs.New(session.New()) + + params := &ecs.RegisterContainerInstanceInput{ + Attributes: []*ecs.Attribute{ + { // Required + Name: aws.String("String"), // Required + Value: aws.String("String"), + }, + // More values... + }, + Cluster: aws.String("String"), + ContainerInstanceArn: aws.String("String"), + InstanceIdentityDocument: aws.String("String"), + InstanceIdentityDocumentSignature: aws.String("String"), + TotalResources: []*ecs.Resource{ + { // Required + DoubleValue: aws.Float64(1.0), + IntegerValue: aws.Int64(1), + LongValue: aws.Int64(1), + Name: aws.String("String"), + StringSetValue: []*string{ + aws.String("String"), // Required + // More values... + }, + Type: aws.String("String"), + }, + // More values... + }, + VersionInfo: &ecs.VersionInfo{ + AgentHash: aws.String("String"), + AgentVersion: aws.String("String"), + DockerVersion: aws.String("String"), + }, + } + resp, err := svc.RegisterContainerInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_RegisterTaskDefinition() { + svc := ecs.New(session.New()) + + params := &ecs.RegisterTaskDefinitionInput{ + ContainerDefinitions: []*ecs.ContainerDefinition{ // Required + { // Required + Command: []*string{ + aws.String("String"), // Required + // More values... + }, + Cpu: aws.Int64(1), + DisableNetworking: aws.Bool(true), + DnsSearchDomains: []*string{ + aws.String("String"), // Required + // More values... + }, + DnsServers: []*string{ + aws.String("String"), // Required + // More values... + }, + DockerLabels: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + DockerSecurityOptions: []*string{ + aws.String("String"), // Required + // More values... + }, + EntryPoint: []*string{ + aws.String("String"), // Required + // More values... + }, + Environment: []*ecs.KeyValuePair{ + { // Required + Name: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + Essential: aws.Bool(true), + ExtraHosts: []*ecs.HostEntry{ + { // Required + Hostname: aws.String("String"), // Required + IpAddress: aws.String("String"), // Required + }, + // More values... + }, + Hostname: aws.String("String"), + Image: aws.String("String"), + Links: []*string{ + aws.String("String"), // Required + // More values... + }, + LogConfiguration: &ecs.LogConfiguration{ + LogDriver: aws.String("LogDriver"), // Required + Options: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + }, + Memory: aws.Int64(1), + MountPoints: []*ecs.MountPoint{ + { // Required + ContainerPath: aws.String("String"), + ReadOnly: aws.Bool(true), + SourceVolume: aws.String("String"), + }, + // More values... + }, + Name: aws.String("String"), + PortMappings: []*ecs.PortMapping{ + { // Required + ContainerPort: aws.Int64(1), + HostPort: aws.Int64(1), + Protocol: aws.String("TransportProtocol"), + }, + // More values... + }, + Privileged: aws.Bool(true), + ReadonlyRootFilesystem: aws.Bool(true), + Ulimits: []*ecs.Ulimit{ + { // Required + HardLimit: aws.Int64(1), // Required + Name: aws.String("UlimitName"), // Required + SoftLimit: aws.Int64(1), // Required + }, + // More values... + }, + User: aws.String("String"), + VolumesFrom: []*ecs.VolumeFrom{ + { // Required + ReadOnly: aws.Bool(true), + SourceContainer: aws.String("String"), + }, + // More values... + }, + WorkingDirectory: aws.String("String"), + }, + // More values... + }, + Family: aws.String("String"), // Required + Volumes: []*ecs.Volume{ + { // Required + Host: &ecs.HostVolumeProperties{ + SourcePath: aws.String("String"), + }, + Name: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.RegisterTaskDefinition(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_RunTask() { + svc := ecs.New(session.New()) + + params := &ecs.RunTaskInput{ + TaskDefinition: aws.String("String"), // Required + Cluster: aws.String("String"), + Count: aws.Int64(1), + Overrides: &ecs.TaskOverride{ + ContainerOverrides: []*ecs.ContainerOverride{ + { // Required + Command: []*string{ + aws.String("String"), // Required + // More values... + }, + Environment: []*ecs.KeyValuePair{ + { // Required + Name: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + Name: aws.String("String"), + }, + // More values... + }, + }, + StartedBy: aws.String("String"), + } + resp, err := svc.RunTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_StartTask() { + svc := ecs.New(session.New()) + + params := &ecs.StartTaskInput{ + ContainerInstances: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + TaskDefinition: aws.String("String"), // Required + Cluster: aws.String("String"), + Overrides: &ecs.TaskOverride{ + ContainerOverrides: []*ecs.ContainerOverride{ + { // Required + Command: []*string{ + aws.String("String"), // Required + // More values... + }, + Environment: []*ecs.KeyValuePair{ + { // Required + Name: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + Name: aws.String("String"), + }, + // More values... + }, + }, + StartedBy: aws.String("String"), + } + resp, err := svc.StartTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_StopTask() { + svc := ecs.New(session.New()) + + params := &ecs.StopTaskInput{ + Task: aws.String("String"), // Required + Cluster: aws.String("String"), + Reason: aws.String("String"), + } + resp, err := svc.StopTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_SubmitContainerStateChange() { + svc := ecs.New(session.New()) + + params := &ecs.SubmitContainerStateChangeInput{ + Cluster: aws.String("String"), + ContainerName: aws.String("String"), + ExitCode: aws.Int64(1), + NetworkBindings: []*ecs.NetworkBinding{ + { // Required + BindIP: aws.String("String"), + ContainerPort: aws.Int64(1), + HostPort: aws.Int64(1), + Protocol: aws.String("TransportProtocol"), + }, + // More values... + }, + Reason: aws.String("String"), + Status: aws.String("String"), + Task: aws.String("String"), + } + resp, err := svc.SubmitContainerStateChange(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_SubmitTaskStateChange() { + svc := ecs.New(session.New()) + + params := &ecs.SubmitTaskStateChangeInput{ + Cluster: aws.String("String"), + Reason: aws.String("String"), + Status: aws.String("String"), + Task: aws.String("String"), + } + resp, err := svc.SubmitTaskStateChange(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_UpdateContainerAgent() { + svc := ecs.New(session.New()) + + params := &ecs.UpdateContainerAgentInput{ + ContainerInstance: aws.String("String"), // Required + Cluster: aws.String("String"), + } + resp, err := svc.UpdateContainerAgent(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleECS_UpdateService() { + svc := ecs.New(session.New()) + + params := &ecs.UpdateServiceInput{ + Service: aws.String("String"), // Required + Cluster: aws.String("String"), + DeploymentConfiguration: &ecs.DeploymentConfiguration{ + MaximumPercent: aws.Int64(1), + MinimumHealthyPercent: aws.Int64(1), + }, + DesiredCount: aws.Int64(1), + TaskDefinition: aws.String("String"), + } + resp, err := svc.UpdateService(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecs/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecs/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecs/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecs/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,99 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ecs + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Amazon EC2 Container Service (Amazon ECS) is a highly scalable, fast, container +// management service that makes it easy to run, stop, and manage Docker containers +// on a cluster of EC2 instances. Amazon ECS lets you launch and stop container-enabled +// applications with simple API calls, allows you to get the state of your cluster +// from a centralized service, and gives you access to many familiar Amazon +// EC2 features like security groups, Amazon EBS volumes, and IAM roles. +// +// You can use Amazon ECS to schedule the placement of containers across your +// cluster based on your resource needs, isolation policies, and availability +// requirements. Amazon EC2 Container Service eliminates the need for you to +// operate your own cluster management and configuration management systems +// or worry about scaling your management infrastructure. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type ECS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "ecs" + +// New creates a new instance of the ECS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ECS client from just a session. +// svc := ecs.New(mySession) +// +// // Create a ECS client with additional configuration +// svc := ecs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ECS { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *ECS { + svc := &ECS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-11-13", + JSONVersion: "1.1", + TargetPrefix: "AmazonEC2ContainerServiceV20141113", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ECS operation and runs any +// custom request initialization. +func (c *ECS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecs/waiters.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecs/waiters.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecs/waiters.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ecs/waiters.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,135 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ecs + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *ECS) WaitUntilServicesInactive(input *DescribeServicesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeServices", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "failure", + Matcher: "pathAny", + Argument: "failures[].reason", + Expected: "MISSING", + }, + { + State: "success", + Matcher: "pathAny", + Argument: "services[].status", + Expected: "INACTIVE", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *ECS) WaitUntilServicesStable(input *DescribeServicesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeServices", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "failure", + Matcher: "pathAny", + Argument: "failures[].reason", + Expected: "MISSING", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "services[].status", + Expected: "DRAINING", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "services[].status", + Expected: "INACTIVE", + }, + { + State: "success", + Matcher: "path", + Argument: "services | [@[?length(deployments)!=`1`], @[?desiredCount!=runningCount]][] | length(@) == `0`", + Expected: true, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *ECS) WaitUntilTasksRunning(input *DescribeTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeTasks", + Delay: 6, + MaxAttempts: 100, + Acceptors: []waiter.WaitAcceptor{ + { + State: "failure", + Matcher: "pathAny", + Argument: "tasks[].lastStatus", + Expected: "STOPPED", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "failures[].reason", + Expected: "MISSING", + }, + { + State: "success", + Matcher: "pathAll", + Argument: "tasks[].lastStatus", + Expected: "RUNNING", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *ECS) WaitUntilTasksStopped(input *DescribeTasksInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeTasks", + Delay: 6, + MaxAttempts: 100, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "tasks[].lastStatus", + Expected: "STOPPED", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/efs/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/efs/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/efs/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/efs/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1095 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package efs provides a client for Amazon Elastic File System. +package efs + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opCreateFileSystem = "CreateFileSystem" + +// CreateFileSystemRequest generates a request for the CreateFileSystem operation. +func (c *EFS) CreateFileSystemRequest(input *CreateFileSystemInput) (req *request.Request, output *FileSystemDescription) { + op := &request.Operation{ + Name: opCreateFileSystem, + HTTPMethod: "POST", + HTTPPath: "/2015-02-01/file-systems", + } + + if input == nil { + input = &CreateFileSystemInput{} + } + + req = c.newRequest(op, input, output) + output = &FileSystemDescription{} + req.Data = output + return +} + +// Creates a new, empty file system. The operation requires a creation token +// in the request that Amazon EFS uses to ensure idempotent creation (calling +// the operation with same creation token has no effect). If a file system does +// not currently exist that is owned by the caller's AWS account with the specified +// creation token, this operation does the following: +// +// Creates a new, empty file system. The file system will have an Amazon EFS +// assigned ID, and an initial lifecycle state "creating". Returns with the +// description of the created file system. Otherwise, this operation returns +// a FileSystemAlreadyExists error with the ID of the existing file system. +// +// For basic use cases, you can use a randomly generated UUID for the creation +// token. The idempotent operation allows you to retry a CreateFileSystem call +// without risk of creating an extra file system. This can happen when an initial +// call fails in a way that leaves it uncertain whether or not a file system +// was actually created. An example might be that a transport level timeout +// occurred or your connection was reset. As long as you use the same creation +// token, if the initial call had succeeded in creating a file system, the client +// can learn of its existence from the FileSystemAlreadyExists error. +// +// The CreateFileSystem call returns while the file system's lifecycle state +// is still "creating". You can check the file system creation status by calling +// the DescribeFileSystems API, which among other things returns the file system +// state. After the file system is fully created, Amazon EFS sets its lifecycle +// state to "available", at which point you can create one or more mount targets +// for the file system (CreateMountTarget) in your VPC. You mount your Amazon +// EFS file system on an EC2 instances in your VPC via the mount target. For +// more information, see Amazon EFS: How it Works (http://docs.aws.amazon.com/efs/latest/ug/how-it-works.html) +// +// This operation requires permission for the elasticfilesystem:CreateFileSystem +// action. +func (c *EFS) CreateFileSystem(input *CreateFileSystemInput) (*FileSystemDescription, error) { + req, out := c.CreateFileSystemRequest(input) + err := req.Send() + return out, err +} + +const opCreateMountTarget = "CreateMountTarget" + +// CreateMountTargetRequest generates a request for the CreateMountTarget operation. +func (c *EFS) CreateMountTargetRequest(input *CreateMountTargetInput) (req *request.Request, output *MountTargetDescription) { + op := &request.Operation{ + Name: opCreateMountTarget, + HTTPMethod: "POST", + HTTPPath: "/2015-02-01/mount-targets", + } + + if input == nil { + input = &CreateMountTargetInput{} + } + + req = c.newRequest(op, input, output) + output = &MountTargetDescription{} + req.Data = output + return +} + +// Creates a mount target for a file system. You can then mount the file system +// on EC2 instances via the mount target. +// +// You can create one mount target in each Availability Zone in your VPC. All +// EC2 instances in a VPC within a given Availability Zone share a single mount +// target for a given file system. If you have multiple subnets in an Availability +// Zone, you create a mount target in one of the subnets. EC2 instances do not +// need to be in the same subnet as the mount target in order to access their +// file system. For more information, see Amazon EFS: How it Works (http://docs.aws.amazon.com/efs/latest/ug/how-it-works.html). +// +// In the request, you also specify a file system ID for which you are creating +// the mount target and the file system's lifecycle state must be "available" +// (see DescribeFileSystems). +// +// In the request, you also provide a subnet ID, which serves several purposes: +// +// It determines the VPC in which Amazon EFS creates the mount target. It +// determines the Availability Zone in which Amazon EFS creates the mount target. +// It determines the IP address range from which Amazon EFS selects the IP +// address of the mount target if you don't specify an IP address in the request. +// After creating the mount target, Amazon EFS returns a response that includes, +// a MountTargetId and an IpAddress. You use this IP address when mounting the +// file system in an EC2 instance. You can also use the mount target's DNS name +// when mounting the file system. The EC2 instance on which you mount the file +// system via the mount target can resolve the mount target's DNS name to its +// IP address. For more information, see How it Works: Implementation Overview +// (http://docs.aws.amazon.com/efs/latest/ug/how-it-works.html#how-it-works-implementation). +// +// Note that you can create mount targets for a file system in only one VPC, +// and there can be only one mount target per Availability Zone. That is, if +// the file system already has one or more mount targets created for it, the +// request to add another mount target must meet the following requirements: +// +// The subnet specified in the request must belong to the same VPC as the +// subnets of the existing mount targets. +// +// The subnet specified in the request must not be in the same Availability +// Zone as any of the subnets of the existing mount targets. If the request +// satisfies the requirements, Amazon EFS does the following: +// +// Creates a new mount target in the specified subnet. Also creates a new +// network interface in the subnet as follows: If the request provides an IpAddress, +// Amazon EFS assigns that IP address to the network interface. Otherwise, Amazon +// EFS assigns a free address in the subnet (in the same way that the Amazon +// EC2 CreateNetworkInterface call does when a request does not specify a primary +// private IP address). If the request provides SecurityGroups, this network +// interface is associated with those security groups. Otherwise, it belongs +// to the default security group for the subnet's VPC. Assigns the description +// "Mount target fsmt-id for file system fs-id" where fsmt-id is the mount target +// ID, and fs-id is the FileSystemId. Sets the requesterManaged property of +// the network interface to "true", and the requesterId value to "EFS". Each +// Amazon EFS mount target has one corresponding requestor-managed EC2 network +// interface. After the network interface is created, Amazon EFS sets the NetworkInterfaceId +// field in the mount target's description to the network interface ID, and +// the IpAddress field to its address. If network interface creation fails, +// the entire CreateMountTarget operation fails. +// +// The CreateMountTarget call returns only after creating the network interface, +// but while the mount target state is still "creating". You can check the mount +// target creation status by calling the DescribeFileSystems API, which among +// other things returns the mount target state. We recommend you create a mount +// target in each of the Availability Zones. There are cost considerations for +// using a file system in an Availability Zone through a mount target created +// in another Availability Zone. For more information, go to Amazon EFS (http://aws.amazon.com/efs/) +// product detail page. In addition, by always using a mount target local to +// the instance's Availability Zone, you eliminate a partial failure scenario; +// if the Availability Zone in which your mount target is created goes down, +// then you won't be able to access your file system through that mount target. +// +// This operation requires permission for the following action on the file +// system: +// +// elasticfilesystem:CreateMountTarget This operation also requires permission +// for the following Amazon EC2 actions: +// +// ec2:DescribeSubnets ec2:DescribeNetworkInterfaces ec2:CreateNetworkInterface +func (c *EFS) CreateMountTarget(input *CreateMountTargetInput) (*MountTargetDescription, error) { + req, out := c.CreateMountTargetRequest(input) + err := req.Send() + return out, err +} + +const opCreateTags = "CreateTags" + +// CreateTagsRequest generates a request for the CreateTags operation. +func (c *EFS) CreateTagsRequest(input *CreateTagsInput) (req *request.Request, output *CreateTagsOutput) { + op := &request.Operation{ + Name: opCreateTags, + HTTPMethod: "POST", + HTTPPath: "/2015-02-01/create-tags/{FileSystemId}", + } + + if input == nil { + input = &CreateTagsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateTagsOutput{} + req.Data = output + return +} + +// Creates or overwrites tags associated with a file system. Each tag is a key-value +// pair. If a tag key specified in the request already exists on the file system, +// this operation overwrites its value with the value provided in the request. +// If you add the "Name" tag to your file system, Amazon EFS returns it in the +// response to the DescribeFileSystems API. +// +// This operation requires permission for the elasticfilesystem:CreateTags +// action. +func (c *EFS) CreateTags(input *CreateTagsInput) (*CreateTagsOutput, error) { + req, out := c.CreateTagsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteFileSystem = "DeleteFileSystem" + +// DeleteFileSystemRequest generates a request for the DeleteFileSystem operation. +func (c *EFS) DeleteFileSystemRequest(input *DeleteFileSystemInput) (req *request.Request, output *DeleteFileSystemOutput) { + op := &request.Operation{ + Name: opDeleteFileSystem, + HTTPMethod: "DELETE", + HTTPPath: "/2015-02-01/file-systems/{FileSystemId}", + } + + if input == nil { + input = &DeleteFileSystemInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteFileSystemOutput{} + req.Data = output + return +} + +// Deletes a file system, permanently severing access to its contents. Upon +// return, the file system no longer exists and you will not be able to access +// any contents of the deleted file system. +// +// You cannot delete a file system that is in use. That is, if the file system +// has any mount targets, you must first delete them. For more information, +// see DescribeMountTargets and DeleteMountTarget. +// +// The DeleteFileSystem call returns while the file system state is still "deleting". +// You can check the file system deletion status by calling the DescribeFileSystems +// API, which returns a list of file systems in your account. If you pass file +// system ID or creation token for the deleted file system, the DescribeFileSystems +// will return a 404 "FileSystemNotFound" error. This operation requires permission +// for the elasticfilesystem:DeleteFileSystem action. +func (c *EFS) DeleteFileSystem(input *DeleteFileSystemInput) (*DeleteFileSystemOutput, error) { + req, out := c.DeleteFileSystemRequest(input) + err := req.Send() + return out, err +} + +const opDeleteMountTarget = "DeleteMountTarget" + +// DeleteMountTargetRequest generates a request for the DeleteMountTarget operation. +func (c *EFS) DeleteMountTargetRequest(input *DeleteMountTargetInput) (req *request.Request, output *DeleteMountTargetOutput) { + op := &request.Operation{ + Name: opDeleteMountTarget, + HTTPMethod: "DELETE", + HTTPPath: "/2015-02-01/mount-targets/{MountTargetId}", + } + + if input == nil { + input = &DeleteMountTargetInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteMountTargetOutput{} + req.Data = output + return +} + +// Deletes the specified mount target. +// +// This operation forcibly breaks any mounts of the file system via the mount +// target being deleted, which might disrupt instances or applications using +// those mounts. To avoid applications getting cut off abruptly, you might consider +// unmounting any mounts of the mount target, if feasible. The operation also +// deletes the associated network interface. Uncommitted writes may be lost, +// but breaking a mount target using this operation does not corrupt the file +// system itself. The file system you created remains. You can mount an EC2 +// instance in your VPC using another mount target. +// +// This operation requires permission for the following action on the file +// system: +// +// elasticfilesystem:DeleteMountTarget The DeleteMountTarget call returns +// while the mount target state is still "deleting". You can check the mount +// target deletion by calling the DescribeMountTargets API, which returns a +// list of mount target descriptions for the given file system. The operation +// also requires permission for the following Amazon EC2 action on the mount +// target's network interface: +// +// ec2:DeleteNetworkInterface +func (c *EFS) DeleteMountTarget(input *DeleteMountTargetInput) (*DeleteMountTargetOutput, error) { + req, out := c.DeleteMountTargetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTags = "DeleteTags" + +// DeleteTagsRequest generates a request for the DeleteTags operation. +func (c *EFS) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, output *DeleteTagsOutput) { + op := &request.Operation{ + Name: opDeleteTags, + HTTPMethod: "POST", + HTTPPath: "/2015-02-01/delete-tags/{FileSystemId}", + } + + if input == nil { + input = &DeleteTagsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteTagsOutput{} + req.Data = output + return +} + +// Deletes the specified tags from a file system. If the DeleteTags request +// includes a tag key that does not exist, Amazon EFS ignores it; it is not +// an error. For more information about tags and related restrictions, go to +// Tag Restrictions (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) +// in the AWS Billing and Cost Management User Guide. +// +// This operation requires permission for the elasticfilesystem:DeleteTags +// action. +func (c *EFS) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) { + req, out := c.DeleteTagsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeFileSystems = "DescribeFileSystems" + +// DescribeFileSystemsRequest generates a request for the DescribeFileSystems operation. +func (c *EFS) DescribeFileSystemsRequest(input *DescribeFileSystemsInput) (req *request.Request, output *DescribeFileSystemsOutput) { + op := &request.Operation{ + Name: opDescribeFileSystems, + HTTPMethod: "GET", + HTTPPath: "/2015-02-01/file-systems", + } + + if input == nil { + input = &DescribeFileSystemsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeFileSystemsOutput{} + req.Data = output + return +} + +// Returns the description of a specific Amazon EFS file system if either the +// file system CreationToken or the FileSystemId is provided; otherwise, returns +// descriptions of all file systems owned by the caller's AWS account in the +// AWS region of the endpoint that you're calling. +// +// When retrieving all file system descriptions, you can optionally specify +// the MaxItems parameter to limit the number of descriptions in a response. +// If more file system descriptions remain, Amazon EFS returns a NextMarker, +// an opaque token, in the response. In this case, you should send a subsequent +// request with the Marker request parameter set to the value of NextMarker. +// +// So to retrieve a list of your file system descriptions, the expected usage +// of this API is an iterative process of first calling DescribeFileSystems +// without the Marker and then continuing to call it with the Marker parameter +// set to the value of the NextMarker from the previous response until the response +// has no NextMarker. +// +// Note that the implementation may return fewer than MaxItems file system +// descriptions while still including a NextMarker value. +// +// The order of file systems returned in the response of one DescribeFileSystems +// call, and the order of file systems returned across the responses of a multi-call +// iteration, is unspecified. +// +// This operation requires permission for the elasticfilesystem:DescribeFileSystems +// action. +func (c *EFS) DescribeFileSystems(input *DescribeFileSystemsInput) (*DescribeFileSystemsOutput, error) { + req, out := c.DescribeFileSystemsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeMountTargetSecurityGroups = "DescribeMountTargetSecurityGroups" + +// DescribeMountTargetSecurityGroupsRequest generates a request for the DescribeMountTargetSecurityGroups operation. +func (c *EFS) DescribeMountTargetSecurityGroupsRequest(input *DescribeMountTargetSecurityGroupsInput) (req *request.Request, output *DescribeMountTargetSecurityGroupsOutput) { + op := &request.Operation{ + Name: opDescribeMountTargetSecurityGroups, + HTTPMethod: "GET", + HTTPPath: "/2015-02-01/mount-targets/{MountTargetId}/security-groups", + } + + if input == nil { + input = &DescribeMountTargetSecurityGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeMountTargetSecurityGroupsOutput{} + req.Data = output + return +} + +// Returns the security groups currently in effect for a mount target. This +// operation requires that the network interface of the mount target has been +// created and the life cycle state of the mount target is not "deleted". +// +// This operation requires permissions for the following actions: +// +// elasticfilesystem:DescribeMountTargetSecurityGroups action on the mount +// target's file system. ec2:DescribeNetworkInterfaceAttribute action on the +// mount target's network interface. +func (c *EFS) DescribeMountTargetSecurityGroups(input *DescribeMountTargetSecurityGroupsInput) (*DescribeMountTargetSecurityGroupsOutput, error) { + req, out := c.DescribeMountTargetSecurityGroupsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeMountTargets = "DescribeMountTargets" + +// DescribeMountTargetsRequest generates a request for the DescribeMountTargets operation. +func (c *EFS) DescribeMountTargetsRequest(input *DescribeMountTargetsInput) (req *request.Request, output *DescribeMountTargetsOutput) { + op := &request.Operation{ + Name: opDescribeMountTargets, + HTTPMethod: "GET", + HTTPPath: "/2015-02-01/mount-targets", + } + + if input == nil { + input = &DescribeMountTargetsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeMountTargetsOutput{} + req.Data = output + return +} + +// Returns the descriptions of all the current mount targets, or a specific +// mount target, for a file system. When requesting all of the current mount +// targets, the order of mount targets returned in the response is unspecified. +// +// This operation requires permission for the elasticfilesystem:DescribeMountTargets +// action, on either the file system id that you specify in FileSystemId, or +// on the file system of the mount target that you specify in MountTargetId. +func (c *EFS) DescribeMountTargets(input *DescribeMountTargetsInput) (*DescribeMountTargetsOutput, error) { + req, out := c.DescribeMountTargetsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTags = "DescribeTags" + +// DescribeTagsRequest generates a request for the DescribeTags operation. +func (c *EFS) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { + op := &request.Operation{ + Name: opDescribeTags, + HTTPMethod: "GET", + HTTPPath: "/2015-02-01/tags/{FileSystemId}/", + } + + if input == nil { + input = &DescribeTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTagsOutput{} + req.Data = output + return +} + +// Returns the tags associated with a file system. The order of tags returned +// in the response of one DescribeTags call, and the order of tags returned +// across the responses of a multi-call iteration (when using pagination), is +// unspecified. +// +// This operation requires permission for the elasticfilesystem:DescribeTags +// action. +func (c *EFS) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + err := req.Send() + return out, err +} + +const opModifyMountTargetSecurityGroups = "ModifyMountTargetSecurityGroups" + +// ModifyMountTargetSecurityGroupsRequest generates a request for the ModifyMountTargetSecurityGroups operation. +func (c *EFS) ModifyMountTargetSecurityGroupsRequest(input *ModifyMountTargetSecurityGroupsInput) (req *request.Request, output *ModifyMountTargetSecurityGroupsOutput) { + op := &request.Operation{ + Name: opModifyMountTargetSecurityGroups, + HTTPMethod: "PUT", + HTTPPath: "/2015-02-01/mount-targets/{MountTargetId}/security-groups", + } + + if input == nil { + input = &ModifyMountTargetSecurityGroupsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ModifyMountTargetSecurityGroupsOutput{} + req.Data = output + return +} + +// Modifies the set of security groups in effect for a mount target. +// +// When you create a mount target, Amazon EFS also creates a new network interface +// (see CreateMountTarget). This operation replaces the security groups in effect +// for the network interface associated with a mount target, with the SecurityGroups +// provided in the request. This operation requires that the network interface +// of the mount target has been created and the life cycle state of the mount +// target is not "deleted". +// +// The operation requires permissions for the following actions: +// +// elasticfilesystem:ModifyMountTargetSecurityGroups action on the mount +// target's file system. ec2:ModifyNetworkInterfaceAttribute action on the +// mount target's network interface. +func (c *EFS) ModifyMountTargetSecurityGroups(input *ModifyMountTargetSecurityGroupsInput) (*ModifyMountTargetSecurityGroupsOutput, error) { + req, out := c.ModifyMountTargetSecurityGroupsRequest(input) + err := req.Send() + return out, err +} + +type CreateFileSystemInput struct { + _ struct{} `type:"structure"` + + // String of up to 64 ASCII characters. Amazon EFS uses this to ensure idempotent + // creation. + CreationToken *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateFileSystemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFileSystemInput) GoString() string { + return s.String() +} + +type CreateMountTargetInput struct { + _ struct{} `type:"structure"` + + // The ID of the file system for which to create the mount target. + FileSystemId *string `type:"string" required:"true"` + + // A valid IPv4 address within the address range of the specified subnet. + IpAddress *string `type:"string"` + + // Up to 5 VPC security group IDs, of the form "sg-xxxxxxxx". These must be + // for the same VPC as subnet specified. + SecurityGroups []*string `type:"list"` + + // The ID of the subnet to add the mount target in. + SubnetId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateMountTargetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMountTargetInput) GoString() string { + return s.String() +} + +type CreateTagsInput struct { + _ struct{} `type:"structure"` + + // String. The ID of the file system whose tags you want to modify. This operation + // modifies only the tags and not the file system. + FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"` + + // An array of Tag objects to add. Each Tag object is a key-value pair. + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTagsInput) GoString() string { + return s.String() +} + +type CreateTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTagsOutput) GoString() string { + return s.String() +} + +type DeleteFileSystemInput struct { + _ struct{} `type:"structure"` + + // The ID of the file system you want to delete. + FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteFileSystemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFileSystemInput) GoString() string { + return s.String() +} + +type DeleteFileSystemOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteFileSystemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFileSystemOutput) GoString() string { + return s.String() +} + +type DeleteMountTargetInput struct { + _ struct{} `type:"structure"` + + // String. The ID of the mount target to delete. + MountTargetId *string `location:"uri" locationName:"MountTargetId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMountTargetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMountTargetInput) GoString() string { + return s.String() +} + +type DeleteMountTargetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteMountTargetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMountTargetOutput) GoString() string { + return s.String() +} + +type DeleteTagsInput struct { + _ struct{} `type:"structure"` + + // String. The ID of the file system whose tags you want to delete. + FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"` + + // A list of tag keys to delete. + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsInput) GoString() string { + return s.String() +} + +type DeleteTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsOutput) GoString() string { + return s.String() +} + +type DescribeFileSystemsInput struct { + _ struct{} `type:"structure"` + + // Optional string. Restricts the list to the file system with this creation + // token (you specify a creation token at the time of creating an Amazon EFS + // file system). + CreationToken *string `location:"querystring" locationName:"CreationToken" min:"1" type:"string"` + + // Optional string. File system ID whose description you want to retrieve. + FileSystemId *string `location:"querystring" locationName:"FileSystemId" type:"string"` + + // Optional string. Opaque pagination token returned from a previous DescribeFileSystems + // operation. If present, specifies to continue the list from where the returning + // call had left off. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // Optional integer. Specifies the maximum number of file systems to return + // in the response. This parameter value must be greater than 0. The number + // of items Amazon EFS returns will be the minimum of the MaxItems parameter + // specified in the request and the service's internal maximum number of items + // per page. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` +} + +// String returns the string representation +func (s DescribeFileSystemsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFileSystemsInput) GoString() string { + return s.String() +} + +type DescribeFileSystemsOutput struct { + _ struct{} `type:"structure"` + + // An array of file system descriptions. + FileSystems []*FileSystemDescription `type:"list"` + + // A string, present if provided by caller in the request. + Marker *string `type:"string"` + + // A string, present if there are more file systems than returned in the response. + // You can use the NextMarker in the subsequent request to fetch the descriptions. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeFileSystemsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFileSystemsOutput) GoString() string { + return s.String() +} + +type DescribeMountTargetSecurityGroupsInput struct { + _ struct{} `type:"structure"` + + // The ID of the mount target whose security groups you want to retrieve. + MountTargetId *string `location:"uri" locationName:"MountTargetId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeMountTargetSecurityGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMountTargetSecurityGroupsInput) GoString() string { + return s.String() +} + +type DescribeMountTargetSecurityGroupsOutput struct { + _ struct{} `type:"structure"` + + // An array of security groups. + SecurityGroups []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeMountTargetSecurityGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMountTargetSecurityGroupsOutput) GoString() string { + return s.String() +} + +type DescribeMountTargetsInput struct { + _ struct{} `type:"structure"` + + // Optional. String. The ID of the file system whose mount targets you want + // to list. It must be included in your request if MountTargetId is not included. + FileSystemId *string `location:"querystring" locationName:"FileSystemId" type:"string"` + + // Optional. String. Opaque pagination token returned from a previous DescribeMountTargets + // operation. If present, it specifies to continue the list from where the previous + // returning call left off. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // Optional. Maximum number of mount targets to return in the response. It must + // be an integer with a value greater than zero. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` + + // Optional. String. The ID of the mount target that you want to have described. + // It must be included in your request if FileSystemId is not included. + MountTargetId *string `location:"querystring" locationName:"MountTargetId" type:"string"` +} + +// String returns the string representation +func (s DescribeMountTargetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMountTargetsInput) GoString() string { + return s.String() +} + +type DescribeMountTargetsOutput struct { + _ struct{} `type:"structure"` + + // If the request included the Marker, the response returns that value in this + // field. + Marker *string `type:"string"` + + // Returns the file system's mount targets as an array of MountTargetDescription + // objects. + MountTargets []*MountTargetDescription `type:"list"` + + // If a value is present, there are more mount targets to return. In a subsequent + // request, you can provide Marker in your request with this value to retrieve + // the next set of mount targets. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeMountTargetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMountTargetsOutput) GoString() string { + return s.String() +} + +type DescribeTagsInput struct { + _ struct{} `type:"structure"` + + // The ID of the file system whose tag set you want to retrieve. + FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"` + + // Optional. String. Opaque pagination token returned from a previous DescribeTags + // operation. If present, it specifies to continue the list from where the previous + // call left off. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // Optional. Maximum number of file system tags to return in the response. It + // must be an integer with a value greater than zero. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` +} + +// String returns the string representation +func (s DescribeTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsInput) GoString() string { + return s.String() +} + +type DescribeTagsOutput struct { + _ struct{} `type:"structure"` + + // If the request included a Marker, the response returns that value in this + // field. + Marker *string `type:"string"` + + // If a value is present, there are more tags to return. In a subsequent request, + // you can provide the value of NextMarker as the value of the Marker parameter + // in your next request to retrieve the next set of tags. + NextMarker *string `type:"string"` + + // Returns tags associated with the file system as an array of Tag objects. + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsOutput) GoString() string { + return s.String() +} + +// This object provides description of a file system. +type FileSystemDescription struct { + _ struct{} `type:"structure"` + + // The time at which the file system was created, in seconds, since 1970-01-01T00:00:00Z. + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // Opaque string specified in the request. + CreationToken *string `min:"1" type:"string" required:"true"` + + // The file system ID assigned by Amazon EFS. + FileSystemId *string `type:"string" required:"true"` + + // A predefined string value that indicates the lifecycle phase of the file + // system. + LifeCycleState *string `type:"string" required:"true" enum:"LifeCycleState"` + + // You can add tags to a file system (see CreateTags) including a "Name" tag. + // If the file system has a "Name" tag, Amazon EFS returns the value in this + // field. + Name *string `type:"string"` + + // The current number of mount targets (see CreateMountTarget) the file system + // has. + NumberOfMountTargets *int64 `type:"integer" required:"true"` + + // The AWS account that created the file system. If the file system was created + // by an IAM user, the parent account to which the user belongs is the owner. + OwnerId *string `type:"string" required:"true"` + + // This object provides the latest known metered size of data stored in the + // file system, in bytes, in its Value field, and the time at which that size + // was determined in its Timestamp field. The Timestamp value is the integer + // number of seconds since 1970-01-01T00:00:00Z. Note that the value does not + // represent the size of a consistent snapshot of the file system, but it is + // eventually consistent when there are no writes to the file system. That is, + // the value will represent actual size only if the file system is not modified + // for a period longer than a couple of hours. Otherwise, the value is not the + // exact size the file system was at any instant in time. + SizeInBytes *FileSystemSize `type:"structure" required:"true"` +} + +// String returns the string representation +func (s FileSystemDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FileSystemDescription) GoString() string { + return s.String() +} + +// This object provides the latest known metered size, in bytes, of data stored +// in the file system, in its Value field, and the time at which that size was +// determined in its Timestamp field. Note that the value does not represent +// the size of a consistent snapshot of the file system, but it is eventually +// consistent when there are no writes to the file system. That is, the value +// will represent the actual size only if the file system is not modified for +// a period longer than a couple of hours. Otherwise, the value is not necessarily +// the exact size the file system was at any instant in time. +type FileSystemSize struct { + _ struct{} `type:"structure"` + + // The time at which the size of data, returned in the Value field, was determined. + // The value is the integer number of seconds since 1970-01-01T00:00:00Z. + Timestamp *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The latest known metered size, in bytes, of data stored in the file system. + Value *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s FileSystemSize) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FileSystemSize) GoString() string { + return s.String() +} + +type ModifyMountTargetSecurityGroupsInput struct { + _ struct{} `type:"structure"` + + // The ID of the mount target whose security groups you want to modify. + MountTargetId *string `location:"uri" locationName:"MountTargetId" type:"string" required:"true"` + + // An array of up to five VPC security group IDs. + SecurityGroups []*string `type:"list"` +} + +// String returns the string representation +func (s ModifyMountTargetSecurityGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyMountTargetSecurityGroupsInput) GoString() string { + return s.String() +} + +type ModifyMountTargetSecurityGroupsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyMountTargetSecurityGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyMountTargetSecurityGroupsOutput) GoString() string { + return s.String() +} + +// This object provides description of a mount target. +type MountTargetDescription struct { + _ struct{} `type:"structure"` + + // The ID of the file system for which the mount target is intended. + FileSystemId *string `type:"string" required:"true"` + + // The address at which the file system may be mounted via the mount target. + IpAddress *string `type:"string"` + + // The lifecycle state the mount target is in. + LifeCycleState *string `type:"string" required:"true" enum:"LifeCycleState"` + + // The system-assigned mount target ID. + MountTargetId *string `type:"string" required:"true"` + + // The ID of the network interface that Amazon EFS created when it created the + // mount target. + NetworkInterfaceId *string `type:"string"` + + // The AWS account ID that owns the resource. + OwnerId *string `type:"string"` + + // The ID of the subnet that the mount target is in. + SubnetId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s MountTargetDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MountTargetDescription) GoString() string { + return s.String() +} + +// A tag is a pair of key and value. The allowed characters in keys and values +// are letters, whitespace, and numbers, representable in UTF-8, and the characters +// '+', '-', '=', '.', '_', ':', and '/'. +type Tag struct { + _ struct{} `type:"structure"` + + // Tag key, a string. The key must not start with "aws:". + Key *string `min:"1" type:"string" required:"true"` + + // Value of the tag key. + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +const ( + // @enum LifeCycleState + LifeCycleStateCreating = "creating" + // @enum LifeCycleState + LifeCycleStateAvailable = "available" + // @enum LifeCycleState + LifeCycleStateDeleting = "deleting" + // @enum LifeCycleState + LifeCycleStateDeleted = "deleted" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/efs/efsiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/efs/efsiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/efs/efsiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/efs/efsiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,58 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package efsiface provides an interface for the Amazon Elastic File System. +package efsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/efs" +) + +// EFSAPI is the interface type for efs.EFS. +type EFSAPI interface { + CreateFileSystemRequest(*efs.CreateFileSystemInput) (*request.Request, *efs.FileSystemDescription) + + CreateFileSystem(*efs.CreateFileSystemInput) (*efs.FileSystemDescription, error) + + CreateMountTargetRequest(*efs.CreateMountTargetInput) (*request.Request, *efs.MountTargetDescription) + + CreateMountTarget(*efs.CreateMountTargetInput) (*efs.MountTargetDescription, error) + + CreateTagsRequest(*efs.CreateTagsInput) (*request.Request, *efs.CreateTagsOutput) + + CreateTags(*efs.CreateTagsInput) (*efs.CreateTagsOutput, error) + + DeleteFileSystemRequest(*efs.DeleteFileSystemInput) (*request.Request, *efs.DeleteFileSystemOutput) + + DeleteFileSystem(*efs.DeleteFileSystemInput) (*efs.DeleteFileSystemOutput, error) + + DeleteMountTargetRequest(*efs.DeleteMountTargetInput) (*request.Request, *efs.DeleteMountTargetOutput) + + DeleteMountTarget(*efs.DeleteMountTargetInput) (*efs.DeleteMountTargetOutput, error) + + DeleteTagsRequest(*efs.DeleteTagsInput) (*request.Request, *efs.DeleteTagsOutput) + + DeleteTags(*efs.DeleteTagsInput) (*efs.DeleteTagsOutput, error) + + DescribeFileSystemsRequest(*efs.DescribeFileSystemsInput) (*request.Request, *efs.DescribeFileSystemsOutput) + + DescribeFileSystems(*efs.DescribeFileSystemsInput) (*efs.DescribeFileSystemsOutput, error) + + DescribeMountTargetSecurityGroupsRequest(*efs.DescribeMountTargetSecurityGroupsInput) (*request.Request, *efs.DescribeMountTargetSecurityGroupsOutput) + + DescribeMountTargetSecurityGroups(*efs.DescribeMountTargetSecurityGroupsInput) (*efs.DescribeMountTargetSecurityGroupsOutput, error) + + DescribeMountTargetsRequest(*efs.DescribeMountTargetsInput) (*request.Request, *efs.DescribeMountTargetsOutput) + + DescribeMountTargets(*efs.DescribeMountTargetsInput) (*efs.DescribeMountTargetsOutput, error) + + DescribeTagsRequest(*efs.DescribeTagsInput) (*request.Request, *efs.DescribeTagsOutput) + + DescribeTags(*efs.DescribeTagsInput) (*efs.DescribeTagsOutput, error) + + ModifyMountTargetSecurityGroupsRequest(*efs.ModifyMountTargetSecurityGroupsInput) (*request.Request, *efs.ModifyMountTargetSecurityGroupsOutput) + + ModifyMountTargetSecurityGroups(*efs.ModifyMountTargetSecurityGroupsInput) (*efs.ModifyMountTargetSecurityGroupsOutput, error) +} + +var _ EFSAPI = (*efs.EFS)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/efs/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/efs/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/efs/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/efs/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,254 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package efs_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/efs" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleEFS_CreateFileSystem() { + svc := efs.New(session.New()) + + params := &efs.CreateFileSystemInput{ + CreationToken: aws.String("CreationToken"), // Required + } + resp, err := svc.CreateFileSystem(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_CreateMountTarget() { + svc := efs.New(session.New()) + + params := &efs.CreateMountTargetInput{ + FileSystemId: aws.String("FileSystemId"), // Required + SubnetId: aws.String("SubnetId"), // Required + IpAddress: aws.String("IpAddress"), + SecurityGroups: []*string{ + aws.String("SecurityGroup"), // Required + // More values... + }, + } + resp, err := svc.CreateMountTarget(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_CreateTags() { + svc := efs.New(session.New()) + + params := &efs.CreateTagsInput{ + FileSystemId: aws.String("FileSystemId"), // Required + Tags: []*efs.Tag{ // Required + { // Required + Key: aws.String("TagKey"), // Required + Value: aws.String("TagValue"), // Required + }, + // More values... + }, + } + resp, err := svc.CreateTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_DeleteFileSystem() { + svc := efs.New(session.New()) + + params := &efs.DeleteFileSystemInput{ + FileSystemId: aws.String("FileSystemId"), // Required + } + resp, err := svc.DeleteFileSystem(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_DeleteMountTarget() { + svc := efs.New(session.New()) + + params := &efs.DeleteMountTargetInput{ + MountTargetId: aws.String("MountTargetId"), // Required + } + resp, err := svc.DeleteMountTarget(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_DeleteTags() { + svc := efs.New(session.New()) + + params := &efs.DeleteTagsInput{ + FileSystemId: aws.String("FileSystemId"), // Required + TagKeys: []*string{ // Required + aws.String("TagKey"), // Required + // More values... + }, + } + resp, err := svc.DeleteTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_DescribeFileSystems() { + svc := efs.New(session.New()) + + params := &efs.DescribeFileSystemsInput{ + CreationToken: aws.String("CreationToken"), + FileSystemId: aws.String("FileSystemId"), + Marker: aws.String("Marker"), + MaxItems: aws.Int64(1), + } + resp, err := svc.DescribeFileSystems(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_DescribeMountTargetSecurityGroups() { + svc := efs.New(session.New()) + + params := &efs.DescribeMountTargetSecurityGroupsInput{ + MountTargetId: aws.String("MountTargetId"), // Required + } + resp, err := svc.DescribeMountTargetSecurityGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_DescribeMountTargets() { + svc := efs.New(session.New()) + + params := &efs.DescribeMountTargetsInput{ + FileSystemId: aws.String("FileSystemId"), + Marker: aws.String("Marker"), + MaxItems: aws.Int64(1), + MountTargetId: aws.String("MountTargetId"), + } + resp, err := svc.DescribeMountTargets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_DescribeTags() { + svc := efs.New(session.New()) + + params := &efs.DescribeTagsInput{ + FileSystemId: aws.String("FileSystemId"), // Required + Marker: aws.String("Marker"), + MaxItems: aws.Int64(1), + } + resp, err := svc.DescribeTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEFS_ModifyMountTargetSecurityGroups() { + svc := efs.New(session.New()) + + params := &efs.ModifyMountTargetSecurityGroupsInput{ + MountTargetId: aws.String("MountTargetId"), // Required + SecurityGroups: []*string{ + aws.String("SecurityGroup"), // Required + // More values... + }, + } + resp, err := svc.ModifyMountTargetSecurityGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/efs/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/efs/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/efs/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/efs/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,85 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package efs + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restjson" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type EFS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "elasticfilesystem" + +// New creates a new instance of the EFS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a EFS client from just a session. +// svc := efs.New(mySession) +// +// // Create a EFS client with additional configuration +// svc := efs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EFS { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *EFS { + svc := &EFS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-02-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a EFS operation and runs any +// custom request initialization. +func (c *EFS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticache/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticache/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticache/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticache/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,4899 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package elasticache provides a client for Amazon ElastiCache. +package elasticache + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opAddTagsToResource = "AddTagsToResource" + +// AddTagsToResourceRequest generates a request for the AddTagsToResource operation. +func (c *ElastiCache) AddTagsToResourceRequest(input *AddTagsToResourceInput) (req *request.Request, output *TagListMessage) { + op := &request.Operation{ + Name: opAddTagsToResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsToResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &TagListMessage{} + req.Data = output + return +} + +// The AddTagsToResource action adds up to 10 cost allocation tags to the named +// resource. A cost allocation tag is a key-value pair where the key and value +// are case-sensitive. Cost allocation tags can be used to categorize and track +// your AWS costs. +// +// When you apply tags to your ElastiCache resources, AWS generates a cost +// allocation report as a comma-separated value (CSV) file with your usage and +// costs aggregated by your tags. You can apply tags that represent business +// categories (such as cost centers, application names, or owners) to organize +// your costs across multiple services. For more information, see Using Cost +// Allocation Tags in Amazon ElastiCache (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Tagging.html). +func (c *ElastiCache) AddTagsToResource(input *AddTagsToResourceInput) (*TagListMessage, error) { + req, out := c.AddTagsToResourceRequest(input) + err := req.Send() + return out, err +} + +const opAuthorizeCacheSecurityGroupIngress = "AuthorizeCacheSecurityGroupIngress" + +// AuthorizeCacheSecurityGroupIngressRequest generates a request for the AuthorizeCacheSecurityGroupIngress operation. +func (c *ElastiCache) AuthorizeCacheSecurityGroupIngressRequest(input *AuthorizeCacheSecurityGroupIngressInput) (req *request.Request, output *AuthorizeCacheSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opAuthorizeCacheSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AuthorizeCacheSecurityGroupIngressInput{} + } + + req = c.newRequest(op, input, output) + output = &AuthorizeCacheSecurityGroupIngressOutput{} + req.Data = output + return +} + +// The AuthorizeCacheSecurityGroupIngress action allows network ingress to a +// cache security group. Applications using ElastiCache must be running on Amazon +// EC2, and Amazon EC2 security groups are used as the authorization mechanism. +// +// You cannot authorize ingress from an Amazon EC2 security group in one region +// to an ElastiCache cluster in another region. +func (c *ElastiCache) AuthorizeCacheSecurityGroupIngress(input *AuthorizeCacheSecurityGroupIngressInput) (*AuthorizeCacheSecurityGroupIngressOutput, error) { + req, out := c.AuthorizeCacheSecurityGroupIngressRequest(input) + err := req.Send() + return out, err +} + +const opCopySnapshot = "CopySnapshot" + +// CopySnapshotRequest generates a request for the CopySnapshot operation. +func (c *ElastiCache) CopySnapshotRequest(input *CopySnapshotInput) (req *request.Request, output *CopySnapshotOutput) { + op := &request.Operation{ + Name: opCopySnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopySnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CopySnapshotOutput{} + req.Data = output + return +} + +// The CopySnapshot action makes a copy of an existing snapshot. +func (c *ElastiCache) CopySnapshot(input *CopySnapshotInput) (*CopySnapshotOutput, error) { + req, out := c.CopySnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCreateCacheCluster = "CreateCacheCluster" + +// CreateCacheClusterRequest generates a request for the CreateCacheCluster operation. +func (c *ElastiCache) CreateCacheClusterRequest(input *CreateCacheClusterInput) (req *request.Request, output *CreateCacheClusterOutput) { + op := &request.Operation{ + Name: opCreateCacheCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCacheClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateCacheClusterOutput{} + req.Data = output + return +} + +// The CreateCacheCluster action creates a cache cluster. All nodes in the cache +// cluster run the same protocol-compliant cache engine software, either Memcached +// or Redis. +func (c *ElastiCache) CreateCacheCluster(input *CreateCacheClusterInput) (*CreateCacheClusterOutput, error) { + req, out := c.CreateCacheClusterRequest(input) + err := req.Send() + return out, err +} + +const opCreateCacheParameterGroup = "CreateCacheParameterGroup" + +// CreateCacheParameterGroupRequest generates a request for the CreateCacheParameterGroup operation. +func (c *ElastiCache) CreateCacheParameterGroupRequest(input *CreateCacheParameterGroupInput) (req *request.Request, output *CreateCacheParameterGroupOutput) { + op := &request.Operation{ + Name: opCreateCacheParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCacheParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateCacheParameterGroupOutput{} + req.Data = output + return +} + +// The CreateCacheParameterGroup action creates a new cache parameter group. +// A cache parameter group is a collection of parameters that you apply to all +// of the nodes in a cache cluster. +func (c *ElastiCache) CreateCacheParameterGroup(input *CreateCacheParameterGroupInput) (*CreateCacheParameterGroupOutput, error) { + req, out := c.CreateCacheParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateCacheSecurityGroup = "CreateCacheSecurityGroup" + +// CreateCacheSecurityGroupRequest generates a request for the CreateCacheSecurityGroup operation. +func (c *ElastiCache) CreateCacheSecurityGroupRequest(input *CreateCacheSecurityGroupInput) (req *request.Request, output *CreateCacheSecurityGroupOutput) { + op := &request.Operation{ + Name: opCreateCacheSecurityGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCacheSecurityGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateCacheSecurityGroupOutput{} + req.Data = output + return +} + +// The CreateCacheSecurityGroup action creates a new cache security group. Use +// a cache security group to control access to one or more cache clusters. +// +// Cache security groups are only used when you are creating a cache cluster +// outside of an Amazon Virtual Private Cloud (VPC). If you are creating a cache +// cluster inside of a VPC, use a cache subnet group instead. For more information, +// see CreateCacheSubnetGroup (http://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_CreateCacheSubnetGroup.html). +func (c *ElastiCache) CreateCacheSecurityGroup(input *CreateCacheSecurityGroupInput) (*CreateCacheSecurityGroupOutput, error) { + req, out := c.CreateCacheSecurityGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateCacheSubnetGroup = "CreateCacheSubnetGroup" + +// CreateCacheSubnetGroupRequest generates a request for the CreateCacheSubnetGroup operation. +func (c *ElastiCache) CreateCacheSubnetGroupRequest(input *CreateCacheSubnetGroupInput) (req *request.Request, output *CreateCacheSubnetGroupOutput) { + op := &request.Operation{ + Name: opCreateCacheSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCacheSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateCacheSubnetGroupOutput{} + req.Data = output + return +} + +// The CreateCacheSubnetGroup action creates a new cache subnet group. +// +// Use this parameter only when you are creating a cluster in an Amazon Virtual +// Private Cloud (VPC). +func (c *ElastiCache) CreateCacheSubnetGroup(input *CreateCacheSubnetGroupInput) (*CreateCacheSubnetGroupOutput, error) { + req, out := c.CreateCacheSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateReplicationGroup = "CreateReplicationGroup" + +// CreateReplicationGroupRequest generates a request for the CreateReplicationGroup operation. +func (c *ElastiCache) CreateReplicationGroupRequest(input *CreateReplicationGroupInput) (req *request.Request, output *CreateReplicationGroupOutput) { + op := &request.Operation{ + Name: opCreateReplicationGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateReplicationGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateReplicationGroupOutput{} + req.Data = output + return +} + +// The CreateReplicationGroup action creates a replication group. A replication +// group is a collection of cache clusters, where one of the cache clusters +// is a read/write primary and the others are read-only replicas. Writes to +// the primary are automatically propagated to the replicas. +// +// When you create a replication group, you must specify an existing cache +// cluster that is in the primary role. When the replication group has been +// successfully created, you can add one or more read replica replicas to it, +// up to a total of five read replicas. +// +// Note: This action is valid only for Redis. +func (c *ElastiCache) CreateReplicationGroup(input *CreateReplicationGroupInput) (*CreateReplicationGroupOutput, error) { + req, out := c.CreateReplicationGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateSnapshot = "CreateSnapshot" + +// CreateSnapshotRequest generates a request for the CreateSnapshot operation. +func (c *ElastiCache) CreateSnapshotRequest(input *CreateSnapshotInput) (req *request.Request, output *CreateSnapshotOutput) { + op := &request.Operation{ + Name: opCreateSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSnapshotOutput{} + req.Data = output + return +} + +// The CreateSnapshot action creates a copy of an entire cache cluster at a +// specific moment in time. +func (c *ElastiCache) CreateSnapshot(input *CreateSnapshotInput) (*CreateSnapshotOutput, error) { + req, out := c.CreateSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCacheCluster = "DeleteCacheCluster" + +// DeleteCacheClusterRequest generates a request for the DeleteCacheCluster operation. +func (c *ElastiCache) DeleteCacheClusterRequest(input *DeleteCacheClusterInput) (req *request.Request, output *DeleteCacheClusterOutput) { + op := &request.Operation{ + Name: opDeleteCacheCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCacheClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteCacheClusterOutput{} + req.Data = output + return +} + +// The DeleteCacheCluster action deletes a previously provisioned cache cluster. +// DeleteCacheCluster deletes all associated cache nodes, node endpoints and +// the cache cluster itself. When you receive a successful response from this +// action, Amazon ElastiCache immediately begins deleting the cache cluster; +// you cannot cancel or revert this action. +// +// This API cannot be used to delete a cache cluster that is the last read +// replica of a replication group that has Multi-AZ mode enabled. +func (c *ElastiCache) DeleteCacheCluster(input *DeleteCacheClusterInput) (*DeleteCacheClusterOutput, error) { + req, out := c.DeleteCacheClusterRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCacheParameterGroup = "DeleteCacheParameterGroup" + +// DeleteCacheParameterGroupRequest generates a request for the DeleteCacheParameterGroup operation. +func (c *ElastiCache) DeleteCacheParameterGroupRequest(input *DeleteCacheParameterGroupInput) (req *request.Request, output *DeleteCacheParameterGroupOutput) { + op := &request.Operation{ + Name: opDeleteCacheParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCacheParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteCacheParameterGroupOutput{} + req.Data = output + return +} + +// The DeleteCacheParameterGroup action deletes the specified cache parameter +// group. You cannot delete a cache parameter group if it is associated with +// any cache clusters. +func (c *ElastiCache) DeleteCacheParameterGroup(input *DeleteCacheParameterGroupInput) (*DeleteCacheParameterGroupOutput, error) { + req, out := c.DeleteCacheParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCacheSecurityGroup = "DeleteCacheSecurityGroup" + +// DeleteCacheSecurityGroupRequest generates a request for the DeleteCacheSecurityGroup operation. +func (c *ElastiCache) DeleteCacheSecurityGroupRequest(input *DeleteCacheSecurityGroupInput) (req *request.Request, output *DeleteCacheSecurityGroupOutput) { + op := &request.Operation{ + Name: opDeleteCacheSecurityGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCacheSecurityGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteCacheSecurityGroupOutput{} + req.Data = output + return +} + +// The DeleteCacheSecurityGroup action deletes a cache security group. +// +// You cannot delete a cache security group if it is associated with any cache +// clusters. +func (c *ElastiCache) DeleteCacheSecurityGroup(input *DeleteCacheSecurityGroupInput) (*DeleteCacheSecurityGroupOutput, error) { + req, out := c.DeleteCacheSecurityGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCacheSubnetGroup = "DeleteCacheSubnetGroup" + +// DeleteCacheSubnetGroupRequest generates a request for the DeleteCacheSubnetGroup operation. +func (c *ElastiCache) DeleteCacheSubnetGroupRequest(input *DeleteCacheSubnetGroupInput) (req *request.Request, output *DeleteCacheSubnetGroupOutput) { + op := &request.Operation{ + Name: opDeleteCacheSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCacheSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteCacheSubnetGroupOutput{} + req.Data = output + return +} + +// The DeleteCacheSubnetGroup action deletes a cache subnet group. +// +// You cannot delete a cache subnet group if it is associated with any cache +// clusters. +func (c *ElastiCache) DeleteCacheSubnetGroup(input *DeleteCacheSubnetGroupInput) (*DeleteCacheSubnetGroupOutput, error) { + req, out := c.DeleteCacheSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteReplicationGroup = "DeleteReplicationGroup" + +// DeleteReplicationGroupRequest generates a request for the DeleteReplicationGroup operation. +func (c *ElastiCache) DeleteReplicationGroupRequest(input *DeleteReplicationGroupInput) (req *request.Request, output *DeleteReplicationGroupOutput) { + op := &request.Operation{ + Name: opDeleteReplicationGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteReplicationGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteReplicationGroupOutput{} + req.Data = output + return +} + +// The DeleteReplicationGroup action deletes an existing replication group. +// By default, this action deletes the entire replication group, including the +// primary cluster and all of the read replicas. You can optionally delete only +// the read replicas, while retaining the primary cluster. +// +// When you receive a successful response from this action, Amazon ElastiCache +// immediately begins deleting the selected resources; you cannot cancel or +// revert this action. +func (c *ElastiCache) DeleteReplicationGroup(input *DeleteReplicationGroupInput) (*DeleteReplicationGroupOutput, error) { + req, out := c.DeleteReplicationGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSnapshot = "DeleteSnapshot" + +// DeleteSnapshotRequest generates a request for the DeleteSnapshot operation. +func (c *ElastiCache) DeleteSnapshotRequest(input *DeleteSnapshotInput) (req *request.Request, output *DeleteSnapshotOutput) { + op := &request.Operation{ + Name: opDeleteSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteSnapshotOutput{} + req.Data = output + return +} + +// The DeleteSnapshot action deletes an existing snapshot. When you receive +// a successful response from this action, ElastiCache immediately begins deleting +// the snapshot; you cannot cancel or revert this action. +func (c *ElastiCache) DeleteSnapshot(input *DeleteSnapshotInput) (*DeleteSnapshotOutput, error) { + req, out := c.DeleteSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opDescribeCacheClusters = "DescribeCacheClusters" + +// DescribeCacheClustersRequest generates a request for the DescribeCacheClusters operation. +func (c *ElastiCache) DescribeCacheClustersRequest(input *DescribeCacheClustersInput) (req *request.Request, output *DescribeCacheClustersOutput) { + op := &request.Operation{ + Name: opDescribeCacheClusters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCacheClustersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCacheClustersOutput{} + req.Data = output + return +} + +// The DescribeCacheClusters action returns information about all provisioned +// cache clusters if no cache cluster identifier is specified, or about a specific +// cache cluster if a cache cluster identifier is supplied. +// +// By default, abbreviated information about the cache clusters(s) will be +// returned. You can use the optional ShowDetails flag to retrieve detailed +// information about the cache nodes associated with the cache clusters. These +// details include the DNS address and port for the cache node endpoint. +// +// If the cluster is in the CREATING state, only cluster level information +// will be displayed until all of the nodes are successfully provisioned. +// +// If the cluster is in the DELETING state, only cluster level information +// will be displayed. +// +// If cache nodes are currently being added to the cache cluster, node endpoint +// information and creation time for the additional nodes will not be displayed +// until they are completely provisioned. When the cache cluster state is available, +// the cluster is ready for use. +// +// If cache nodes are currently being removed from the cache cluster, no endpoint +// information for the removed nodes is displayed. +func (c *ElastiCache) DescribeCacheClusters(input *DescribeCacheClustersInput) (*DescribeCacheClustersOutput, error) { + req, out := c.DescribeCacheClustersRequest(input) + err := req.Send() + return out, err +} + +func (c *ElastiCache) DescribeCacheClustersPages(input *DescribeCacheClustersInput, fn func(p *DescribeCacheClustersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeCacheClustersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeCacheClustersOutput), lastPage) + }) +} + +const opDescribeCacheEngineVersions = "DescribeCacheEngineVersions" + +// DescribeCacheEngineVersionsRequest generates a request for the DescribeCacheEngineVersions operation. +func (c *ElastiCache) DescribeCacheEngineVersionsRequest(input *DescribeCacheEngineVersionsInput) (req *request.Request, output *DescribeCacheEngineVersionsOutput) { + op := &request.Operation{ + Name: opDescribeCacheEngineVersions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCacheEngineVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCacheEngineVersionsOutput{} + req.Data = output + return +} + +// The DescribeCacheEngineVersions action returns a list of the available cache +// engines and their versions. +func (c *ElastiCache) DescribeCacheEngineVersions(input *DescribeCacheEngineVersionsInput) (*DescribeCacheEngineVersionsOutput, error) { + req, out := c.DescribeCacheEngineVersionsRequest(input) + err := req.Send() + return out, err +} + +func (c *ElastiCache) DescribeCacheEngineVersionsPages(input *DescribeCacheEngineVersionsInput, fn func(p *DescribeCacheEngineVersionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeCacheEngineVersionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeCacheEngineVersionsOutput), lastPage) + }) +} + +const opDescribeCacheParameterGroups = "DescribeCacheParameterGroups" + +// DescribeCacheParameterGroupsRequest generates a request for the DescribeCacheParameterGroups operation. +func (c *ElastiCache) DescribeCacheParameterGroupsRequest(input *DescribeCacheParameterGroupsInput) (req *request.Request, output *DescribeCacheParameterGroupsOutput) { + op := &request.Operation{ + Name: opDescribeCacheParameterGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCacheParameterGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCacheParameterGroupsOutput{} + req.Data = output + return +} + +// The DescribeCacheParameterGroups action returns a list of cache parameter +// group descriptions. If a cache parameter group name is specified, the list +// will contain only the descriptions for that group. +func (c *ElastiCache) DescribeCacheParameterGroups(input *DescribeCacheParameterGroupsInput) (*DescribeCacheParameterGroupsOutput, error) { + req, out := c.DescribeCacheParameterGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *ElastiCache) DescribeCacheParameterGroupsPages(input *DescribeCacheParameterGroupsInput, fn func(p *DescribeCacheParameterGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeCacheParameterGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeCacheParameterGroupsOutput), lastPage) + }) +} + +const opDescribeCacheParameters = "DescribeCacheParameters" + +// DescribeCacheParametersRequest generates a request for the DescribeCacheParameters operation. +func (c *ElastiCache) DescribeCacheParametersRequest(input *DescribeCacheParametersInput) (req *request.Request, output *DescribeCacheParametersOutput) { + op := &request.Operation{ + Name: opDescribeCacheParameters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCacheParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCacheParametersOutput{} + req.Data = output + return +} + +// The DescribeCacheParameters action returns the detailed parameter list for +// a particular cache parameter group. +func (c *ElastiCache) DescribeCacheParameters(input *DescribeCacheParametersInput) (*DescribeCacheParametersOutput, error) { + req, out := c.DescribeCacheParametersRequest(input) + err := req.Send() + return out, err +} + +func (c *ElastiCache) DescribeCacheParametersPages(input *DescribeCacheParametersInput, fn func(p *DescribeCacheParametersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeCacheParametersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeCacheParametersOutput), lastPage) + }) +} + +const opDescribeCacheSecurityGroups = "DescribeCacheSecurityGroups" + +// DescribeCacheSecurityGroupsRequest generates a request for the DescribeCacheSecurityGroups operation. +func (c *ElastiCache) DescribeCacheSecurityGroupsRequest(input *DescribeCacheSecurityGroupsInput) (req *request.Request, output *DescribeCacheSecurityGroupsOutput) { + op := &request.Operation{ + Name: opDescribeCacheSecurityGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCacheSecurityGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCacheSecurityGroupsOutput{} + req.Data = output + return +} + +// The DescribeCacheSecurityGroups action returns a list of cache security group +// descriptions. If a cache security group name is specified, the list will +// contain only the description of that group. +func (c *ElastiCache) DescribeCacheSecurityGroups(input *DescribeCacheSecurityGroupsInput) (*DescribeCacheSecurityGroupsOutput, error) { + req, out := c.DescribeCacheSecurityGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *ElastiCache) DescribeCacheSecurityGroupsPages(input *DescribeCacheSecurityGroupsInput, fn func(p *DescribeCacheSecurityGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeCacheSecurityGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeCacheSecurityGroupsOutput), lastPage) + }) +} + +const opDescribeCacheSubnetGroups = "DescribeCacheSubnetGroups" + +// DescribeCacheSubnetGroupsRequest generates a request for the DescribeCacheSubnetGroups operation. +func (c *ElastiCache) DescribeCacheSubnetGroupsRequest(input *DescribeCacheSubnetGroupsInput) (req *request.Request, output *DescribeCacheSubnetGroupsOutput) { + op := &request.Operation{ + Name: opDescribeCacheSubnetGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCacheSubnetGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCacheSubnetGroupsOutput{} + req.Data = output + return +} + +// The DescribeCacheSubnetGroups action returns a list of cache subnet group +// descriptions. If a subnet group name is specified, the list will contain +// only the description of that group. +func (c *ElastiCache) DescribeCacheSubnetGroups(input *DescribeCacheSubnetGroupsInput) (*DescribeCacheSubnetGroupsOutput, error) { + req, out := c.DescribeCacheSubnetGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *ElastiCache) DescribeCacheSubnetGroupsPages(input *DescribeCacheSubnetGroupsInput, fn func(p *DescribeCacheSubnetGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeCacheSubnetGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeCacheSubnetGroupsOutput), lastPage) + }) +} + +const opDescribeEngineDefaultParameters = "DescribeEngineDefaultParameters" + +// DescribeEngineDefaultParametersRequest generates a request for the DescribeEngineDefaultParameters operation. +func (c *ElastiCache) DescribeEngineDefaultParametersRequest(input *DescribeEngineDefaultParametersInput) (req *request.Request, output *DescribeEngineDefaultParametersOutput) { + op := &request.Operation{ + Name: opDescribeEngineDefaultParameters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"EngineDefaults.Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEngineDefaultParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEngineDefaultParametersOutput{} + req.Data = output + return +} + +// The DescribeEngineDefaultParameters action returns the default engine and +// system parameter information for the specified cache engine. +func (c *ElastiCache) DescribeEngineDefaultParameters(input *DescribeEngineDefaultParametersInput) (*DescribeEngineDefaultParametersOutput, error) { + req, out := c.DescribeEngineDefaultParametersRequest(input) + err := req.Send() + return out, err +} + +func (c *ElastiCache) DescribeEngineDefaultParametersPages(input *DescribeEngineDefaultParametersInput, fn func(p *DescribeEngineDefaultParametersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeEngineDefaultParametersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeEngineDefaultParametersOutput), lastPage) + }) +} + +const opDescribeEvents = "DescribeEvents" + +// DescribeEventsRequest generates a request for the DescribeEvents operation. +func (c *ElastiCache) DescribeEventsRequest(input *DescribeEventsInput) (req *request.Request, output *DescribeEventsOutput) { + op := &request.Operation{ + Name: opDescribeEvents, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEventsOutput{} + req.Data = output + return +} + +// The DescribeEvents action returns events related to cache clusters, cache +// security groups, and cache parameter groups. You can obtain events specific +// to a particular cache cluster, cache security group, or cache parameter group +// by providing the name as a parameter. +// +// By default, only the events occurring within the last hour are returned; +// however, you can retrieve up to 14 days' worth of events if necessary. +func (c *ElastiCache) DescribeEvents(input *DescribeEventsInput) (*DescribeEventsOutput, error) { + req, out := c.DescribeEventsRequest(input) + err := req.Send() + return out, err +} + +func (c *ElastiCache) DescribeEventsPages(input *DescribeEventsInput, fn func(p *DescribeEventsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeEventsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeEventsOutput), lastPage) + }) +} + +const opDescribeReplicationGroups = "DescribeReplicationGroups" + +// DescribeReplicationGroupsRequest generates a request for the DescribeReplicationGroups operation. +func (c *ElastiCache) DescribeReplicationGroupsRequest(input *DescribeReplicationGroupsInput) (req *request.Request, output *DescribeReplicationGroupsOutput) { + op := &request.Operation{ + Name: opDescribeReplicationGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReplicationGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReplicationGroupsOutput{} + req.Data = output + return +} + +// The DescribeReplicationGroups action returns information about a particular +// replication group. If no identifier is specified, DescribeReplicationGroups +// returns information about all replication groups. +func (c *ElastiCache) DescribeReplicationGroups(input *DescribeReplicationGroupsInput) (*DescribeReplicationGroupsOutput, error) { + req, out := c.DescribeReplicationGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *ElastiCache) DescribeReplicationGroupsPages(input *DescribeReplicationGroupsInput, fn func(p *DescribeReplicationGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReplicationGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReplicationGroupsOutput), lastPage) + }) +} + +const opDescribeReservedCacheNodes = "DescribeReservedCacheNodes" + +// DescribeReservedCacheNodesRequest generates a request for the DescribeReservedCacheNodes operation. +func (c *ElastiCache) DescribeReservedCacheNodesRequest(input *DescribeReservedCacheNodesInput) (req *request.Request, output *DescribeReservedCacheNodesOutput) { + op := &request.Operation{ + Name: opDescribeReservedCacheNodes, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReservedCacheNodesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedCacheNodesOutput{} + req.Data = output + return +} + +// The DescribeReservedCacheNodes action returns information about reserved +// cache nodes for this account, or about a specified reserved cache node. +func (c *ElastiCache) DescribeReservedCacheNodes(input *DescribeReservedCacheNodesInput) (*DescribeReservedCacheNodesOutput, error) { + req, out := c.DescribeReservedCacheNodesRequest(input) + err := req.Send() + return out, err +} + +func (c *ElastiCache) DescribeReservedCacheNodesPages(input *DescribeReservedCacheNodesInput, fn func(p *DescribeReservedCacheNodesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReservedCacheNodesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReservedCacheNodesOutput), lastPage) + }) +} + +const opDescribeReservedCacheNodesOfferings = "DescribeReservedCacheNodesOfferings" + +// DescribeReservedCacheNodesOfferingsRequest generates a request for the DescribeReservedCacheNodesOfferings operation. +func (c *ElastiCache) DescribeReservedCacheNodesOfferingsRequest(input *DescribeReservedCacheNodesOfferingsInput) (req *request.Request, output *DescribeReservedCacheNodesOfferingsOutput) { + op := &request.Operation{ + Name: opDescribeReservedCacheNodesOfferings, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReservedCacheNodesOfferingsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedCacheNodesOfferingsOutput{} + req.Data = output + return +} + +// The DescribeReservedCacheNodesOfferings action lists available reserved cache +// node offerings. +func (c *ElastiCache) DescribeReservedCacheNodesOfferings(input *DescribeReservedCacheNodesOfferingsInput) (*DescribeReservedCacheNodesOfferingsOutput, error) { + req, out := c.DescribeReservedCacheNodesOfferingsRequest(input) + err := req.Send() + return out, err +} + +func (c *ElastiCache) DescribeReservedCacheNodesOfferingsPages(input *DescribeReservedCacheNodesOfferingsInput, fn func(p *DescribeReservedCacheNodesOfferingsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReservedCacheNodesOfferingsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReservedCacheNodesOfferingsOutput), lastPage) + }) +} + +const opDescribeSnapshots = "DescribeSnapshots" + +// DescribeSnapshotsRequest generates a request for the DescribeSnapshots operation. +func (c *ElastiCache) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *request.Request, output *DescribeSnapshotsOutput) { + op := &request.Operation{ + Name: opDescribeSnapshots, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeSnapshotsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSnapshotsOutput{} + req.Data = output + return +} + +// The DescribeSnapshots action returns information about cache cluster snapshots. +// By default, DescribeSnapshots lists all of your snapshots; it can optionally +// describe a single snapshot, or just the snapshots associated with a particular +// cache cluster. +func (c *ElastiCache) DescribeSnapshots(input *DescribeSnapshotsInput) (*DescribeSnapshotsOutput, error) { + req, out := c.DescribeSnapshotsRequest(input) + err := req.Send() + return out, err +} + +func (c *ElastiCache) DescribeSnapshotsPages(input *DescribeSnapshotsInput, fn func(p *DescribeSnapshotsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeSnapshotsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeSnapshotsOutput), lastPage) + }) +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a request for the ListTagsForResource operation. +func (c *ElastiCache) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *TagListMessage) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &TagListMessage{} + req.Data = output + return +} + +// The ListTagsForResource action lists all cost allocation tags currently on +// the named resource. A cost allocation tag is a key-value pair where the key +// is case-sensitive and the value is optional. Cost allocation tags can be +// used to categorize and track your AWS costs. +// +// You can have a maximum of 10 cost allocation tags on an ElastiCache resource. +// For more information, see Using Cost Allocation Tags in Amazon ElastiCache +// (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/BestPractices.html). +func (c *ElastiCache) ListTagsForResource(input *ListTagsForResourceInput) (*TagListMessage, error) { + req, out := c.ListTagsForResourceRequest(input) + err := req.Send() + return out, err +} + +const opModifyCacheCluster = "ModifyCacheCluster" + +// ModifyCacheClusterRequest generates a request for the ModifyCacheCluster operation. +func (c *ElastiCache) ModifyCacheClusterRequest(input *ModifyCacheClusterInput) (req *request.Request, output *ModifyCacheClusterOutput) { + op := &request.Operation{ + Name: opModifyCacheCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyCacheClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyCacheClusterOutput{} + req.Data = output + return +} + +// The ModifyCacheCluster action modifies the settings for a cache cluster. +// You can use this action to change one or more cluster configuration parameters +// by specifying the parameters and the new values. +func (c *ElastiCache) ModifyCacheCluster(input *ModifyCacheClusterInput) (*ModifyCacheClusterOutput, error) { + req, out := c.ModifyCacheClusterRequest(input) + err := req.Send() + return out, err +} + +const opModifyCacheParameterGroup = "ModifyCacheParameterGroup" + +// ModifyCacheParameterGroupRequest generates a request for the ModifyCacheParameterGroup operation. +func (c *ElastiCache) ModifyCacheParameterGroupRequest(input *ModifyCacheParameterGroupInput) (req *request.Request, output *CacheParameterGroupNameMessage) { + op := &request.Operation{ + Name: opModifyCacheParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyCacheParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CacheParameterGroupNameMessage{} + req.Data = output + return +} + +// The ModifyCacheParameterGroup action modifies the parameters of a cache parameter +// group. You can modify up to 20 parameters in a single request by submitting +// a list parameter name and value pairs. +func (c *ElastiCache) ModifyCacheParameterGroup(input *ModifyCacheParameterGroupInput) (*CacheParameterGroupNameMessage, error) { + req, out := c.ModifyCacheParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opModifyCacheSubnetGroup = "ModifyCacheSubnetGroup" + +// ModifyCacheSubnetGroupRequest generates a request for the ModifyCacheSubnetGroup operation. +func (c *ElastiCache) ModifyCacheSubnetGroupRequest(input *ModifyCacheSubnetGroupInput) (req *request.Request, output *ModifyCacheSubnetGroupOutput) { + op := &request.Operation{ + Name: opModifyCacheSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyCacheSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyCacheSubnetGroupOutput{} + req.Data = output + return +} + +// The ModifyCacheSubnetGroup action modifies an existing cache subnet group. +func (c *ElastiCache) ModifyCacheSubnetGroup(input *ModifyCacheSubnetGroupInput) (*ModifyCacheSubnetGroupOutput, error) { + req, out := c.ModifyCacheSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opModifyReplicationGroup = "ModifyReplicationGroup" + +// ModifyReplicationGroupRequest generates a request for the ModifyReplicationGroup operation. +func (c *ElastiCache) ModifyReplicationGroupRequest(input *ModifyReplicationGroupInput) (req *request.Request, output *ModifyReplicationGroupOutput) { + op := &request.Operation{ + Name: opModifyReplicationGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyReplicationGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyReplicationGroupOutput{} + req.Data = output + return +} + +// The ModifyReplicationGroup action modifies the settings for a replication +// group. +func (c *ElastiCache) ModifyReplicationGroup(input *ModifyReplicationGroupInput) (*ModifyReplicationGroupOutput, error) { + req, out := c.ModifyReplicationGroupRequest(input) + err := req.Send() + return out, err +} + +const opPurchaseReservedCacheNodesOffering = "PurchaseReservedCacheNodesOffering" + +// PurchaseReservedCacheNodesOfferingRequest generates a request for the PurchaseReservedCacheNodesOffering operation. +func (c *ElastiCache) PurchaseReservedCacheNodesOfferingRequest(input *PurchaseReservedCacheNodesOfferingInput) (req *request.Request, output *PurchaseReservedCacheNodesOfferingOutput) { + op := &request.Operation{ + Name: opPurchaseReservedCacheNodesOffering, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PurchaseReservedCacheNodesOfferingInput{} + } + + req = c.newRequest(op, input, output) + output = &PurchaseReservedCacheNodesOfferingOutput{} + req.Data = output + return +} + +// The PurchaseReservedCacheNodesOffering action allows you to purchase a reserved +// cache node offering. +func (c *ElastiCache) PurchaseReservedCacheNodesOffering(input *PurchaseReservedCacheNodesOfferingInput) (*PurchaseReservedCacheNodesOfferingOutput, error) { + req, out := c.PurchaseReservedCacheNodesOfferingRequest(input) + err := req.Send() + return out, err +} + +const opRebootCacheCluster = "RebootCacheCluster" + +// RebootCacheClusterRequest generates a request for the RebootCacheCluster operation. +func (c *ElastiCache) RebootCacheClusterRequest(input *RebootCacheClusterInput) (req *request.Request, output *RebootCacheClusterOutput) { + op := &request.Operation{ + Name: opRebootCacheCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RebootCacheClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &RebootCacheClusterOutput{} + req.Data = output + return +} + +// The RebootCacheCluster action reboots some, or all, of the cache nodes within +// a provisioned cache cluster. This API will apply any modified cache parameter +// groups to the cache cluster. The reboot action takes place as soon as possible, +// and results in a momentary outage to the cache cluster. During the reboot, +// the cache cluster status is set to REBOOTING. +// +// The reboot causes the contents of the cache (for each cache node being rebooted) +// to be lost. +// +// When the reboot is complete, a cache cluster event is created. +func (c *ElastiCache) RebootCacheCluster(input *RebootCacheClusterInput) (*RebootCacheClusterOutput, error) { + req, out := c.RebootCacheClusterRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTagsFromResource = "RemoveTagsFromResource" + +// RemoveTagsFromResourceRequest generates a request for the RemoveTagsFromResource operation. +func (c *ElastiCache) RemoveTagsFromResourceRequest(input *RemoveTagsFromResourceInput) (req *request.Request, output *TagListMessage) { + op := &request.Operation{ + Name: opRemoveTagsFromResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsFromResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &TagListMessage{} + req.Data = output + return +} + +// The RemoveTagsFromResource action removes the tags identified by the TagKeys +// list from the named resource. +func (c *ElastiCache) RemoveTagsFromResource(input *RemoveTagsFromResourceInput) (*TagListMessage, error) { + req, out := c.RemoveTagsFromResourceRequest(input) + err := req.Send() + return out, err +} + +const opResetCacheParameterGroup = "ResetCacheParameterGroup" + +// ResetCacheParameterGroupRequest generates a request for the ResetCacheParameterGroup operation. +func (c *ElastiCache) ResetCacheParameterGroupRequest(input *ResetCacheParameterGroupInput) (req *request.Request, output *CacheParameterGroupNameMessage) { + op := &request.Operation{ + Name: opResetCacheParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetCacheParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CacheParameterGroupNameMessage{} + req.Data = output + return +} + +// The ResetCacheParameterGroup action modifies the parameters of a cache parameter +// group to the engine or system default value. You can reset specific parameters +// by submitting a list of parameter names. To reset the entire cache parameter +// group, specify the ResetAllParameters and CacheParameterGroupName parameters. +func (c *ElastiCache) ResetCacheParameterGroup(input *ResetCacheParameterGroupInput) (*CacheParameterGroupNameMessage, error) { + req, out := c.ResetCacheParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opRevokeCacheSecurityGroupIngress = "RevokeCacheSecurityGroupIngress" + +// RevokeCacheSecurityGroupIngressRequest generates a request for the RevokeCacheSecurityGroupIngress operation. +func (c *ElastiCache) RevokeCacheSecurityGroupIngressRequest(input *RevokeCacheSecurityGroupIngressInput) (req *request.Request, output *RevokeCacheSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opRevokeCacheSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RevokeCacheSecurityGroupIngressInput{} + } + + req = c.newRequest(op, input, output) + output = &RevokeCacheSecurityGroupIngressOutput{} + req.Data = output + return +} + +// The RevokeCacheSecurityGroupIngress action revokes ingress from a cache security +// group. Use this action to disallow access from an Amazon EC2 security group +// that had been previously authorized. +func (c *ElastiCache) RevokeCacheSecurityGroupIngress(input *RevokeCacheSecurityGroupIngressInput) (*RevokeCacheSecurityGroupIngressOutput, error) { + req, out := c.RevokeCacheSecurityGroupIngressRequest(input) + err := req.Send() + return out, err +} + +// Represents the input of an AddTagsToResource action. +type AddTagsToResourceInput struct { + _ struct{} `type:"structure"` + + // The name of the resource to which the tags are to be added, for example arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster. + ResourceName *string `type:"string" required:"true"` + + // A list of cost allocation tags to be added to this resource. A tag is a key-value + // pair. A tag key must be accompanied by a tag value. + Tags []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsToResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToResourceInput) GoString() string { + return s.String() +} + +// Represents the input of an AuthorizeCacheSecurityGroupIngress action. +type AuthorizeCacheSecurityGroupIngressInput struct { + _ struct{} `type:"structure"` + + // The cache security group which will allow network ingress. + CacheSecurityGroupName *string `type:"string" required:"true"` + + // The Amazon EC2 security group to be authorized for ingress to the cache security + // group. + EC2SecurityGroupName *string `type:"string" required:"true"` + + // The AWS account number of the Amazon EC2 security group owner. Note that + // this is not the same thing as an AWS access key ID - you must provide a valid + // AWS account number for this parameter. + EC2SecurityGroupOwnerId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AuthorizeCacheSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeCacheSecurityGroupIngressInput) GoString() string { + return s.String() +} + +type AuthorizeCacheSecurityGroupIngressOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of one of the following actions: + // + // AuthorizeCacheSecurityGroupIngress CreateCacheSecurityGroup RevokeCacheSecurityGroupIngress + CacheSecurityGroup *CacheSecurityGroup `type:"structure"` +} + +// String returns the string representation +func (s AuthorizeCacheSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeCacheSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +// Describes an Availability Zone in which the cache cluster is launched. +type AvailabilityZone struct { + _ struct{} `type:"structure"` + + // The name of the Availability Zone. + Name *string `type:"string"` +} + +// String returns the string representation +func (s AvailabilityZone) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailabilityZone) GoString() string { + return s.String() +} + +// Contains all of the attributes of a specific cache cluster. +type CacheCluster struct { + _ struct{} `type:"structure"` + + // This parameter is currently disabled. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The date and time when the cache cluster was created. + CacheClusterCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The user-supplied identifier of the cache cluster. This identifier is a unique + // key that identifies a cache cluster. + CacheClusterId *string `type:"string"` + + // The current state of this cache cluster, one of the following values: available, + // creating, deleted, deleting, incompatible-network, modifying, rebooting cache + // cluster nodes, restore-failed, or snapshotting. + CacheClusterStatus *string `type:"string"` + + // The name of the compute and memory capacity node type for the cache cluster. + // + // Valid node types are as follows: + // + // General purpose: Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, + // cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Previous + // generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge Compute optimized: cache.c1.xlarge Memory optimized Current + // generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // cache.r3.8xlarge Previous generation: cache.m2.xlarge, cache.m2.2xlarge, + // cache.m2.4xlarge Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // Redis backup/restore is not supported for t2 instances. Redis Append-only + // files (AOF) functionality is not supported for t1 or t2 instances. For a + // complete listing of cache node types and specifications, see Amazon ElastiCache + // Product Features and Details (http://aws.amazon.com/elasticache/details) + // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) + // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). + CacheNodeType *string `type:"string"` + + // A list of cache nodes that are members of the cache cluster. + CacheNodes []*CacheNode `locationNameList:"CacheNode" type:"list"` + + // The status of the cache parameter group. + CacheParameterGroup *CacheParameterGroupStatus `type:"structure"` + + // A list of cache security group elements, composed of name and status sub-elements. + CacheSecurityGroups []*CacheSecurityGroupMembership `locationNameList:"CacheSecurityGroup" type:"list"` + + // The name of the cache subnet group associated with the cache cluster. + CacheSubnetGroupName *string `type:"string"` + + // The URL of the web page where you can download the latest ElastiCache client + // library. + ClientDownloadLandingPage *string `type:"string"` + + // Represents the information required for client programs to connect to a cache + // node. + ConfigurationEndpoint *Endpoint `type:"structure"` + + // The name of the cache engine (memcached or redis) to be used for this cache + // cluster. + Engine *string `type:"string"` + + // The version of the cache engine version that is used in this cache cluster. + EngineVersion *string `type:"string"` + + // Describes a notification topic and its status. Notification topics are used + // for publishing ElastiCache events to subscribers using Amazon Simple Notification + // Service (SNS). + NotificationConfiguration *NotificationConfiguration `type:"structure"` + + // The number of cache nodes in the cache cluster. + // + // For clusters running Redis, this value must be 1. For clusters running Memcached, + // this value must be between 1 and 20. + NumCacheNodes *int64 `type:"integer"` + + // A group of settings that will be applied to the cache cluster in the future, + // or that are currently being applied. + PendingModifiedValues *PendingModifiedValues `type:"structure"` + + // The name of the Availability Zone in which the cache cluster is located or + // "Multiple" if the cache nodes are located in different Availability Zones. + PreferredAvailabilityZone *string `type:"string"` + + // Specifies the weekly time range during which maintenance on the cache cluster + // is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid + // values for ddd are: + // + // sun mon tue wed thu fri sat Example: sun:05:00-sun:09:00 + PreferredMaintenanceWindow *string `type:"string"` + + // The replication group to which this cache cluster belongs. If this field + // is empty, the cache cluster is not associated with any replication group. + ReplicationGroupId *string `type:"string"` + + // A list of VPC Security Groups associated with the cache cluster. + SecurityGroups []*SecurityGroupMembership `type:"list"` + + // The number of days for which ElastiCache will retain automatic cache cluster + // snapshots before deleting them. For example, if you set SnapshotRetentionLimit + // to 5, then a snapshot that was taken today will be retained for 5 days before + // being deleted. + // + // ImportantIf the value of SnapshotRetentionLimit is set to zero (0), backups + // are turned off. + SnapshotRetentionLimit *int64 `type:"integer"` + + // The daily time range (in UTC) during which ElastiCache will begin taking + // a daily snapshot of your cache cluster. + // + // Example: 05:00-09:00 + SnapshotWindow *string `type:"string"` +} + +// String returns the string representation +func (s CacheCluster) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheCluster) GoString() string { + return s.String() +} + +// Provides all of the details about a particular cache engine version. +type CacheEngineVersion struct { + _ struct{} `type:"structure"` + + // The description of the cache engine. + CacheEngineDescription *string `type:"string"` + + // The description of the cache engine version. + CacheEngineVersionDescription *string `type:"string"` + + // The name of the cache parameter group family associated with this cache engine. + CacheParameterGroupFamily *string `type:"string"` + + // The name of the cache engine. + Engine *string `type:"string"` + + // The version number of the cache engine. + EngineVersion *string `type:"string"` +} + +// String returns the string representation +func (s CacheEngineVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheEngineVersion) GoString() string { + return s.String() +} + +// Represents an individual cache node within a cache cluster. Each cache node +// runs its own instance of the cluster's protocol-compliant caching software +// - either Memcached or Redis. +// +// Valid node types are as follows: +// +// General purpose: Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, +// cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Previous +// generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, +// cache.m1.xlarge Compute optimized: cache.c1.xlarge Memory optimized Current +// generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, +// cache.r3.8xlarge Previous generation: cache.m2.xlarge, cache.m2.2xlarge, +// cache.m2.4xlarge Notes: +// +// All t2 instances are created in an Amazon Virtual Private Cloud (VPC). +// Redis backup/restore is not supported for t2 instances. Redis Append-only +// files (AOF) functionality is not supported for t1 or t2 instances. For a +// complete listing of cache node types and specifications, see Amazon ElastiCache +// Product Features and Details (http://aws.amazon.com/elasticache/details) +// and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) +// or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). +type CacheNode struct { + _ struct{} `type:"structure"` + + // The date and time when the cache node was created. + CacheNodeCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The cache node identifier. A node ID is a numeric identifier (0001, 0002, + // etc.). The combination of cluster ID and node ID uniquely identifies every + // cache node used in a customer's AWS account. + CacheNodeId *string `type:"string"` + + // The current state of this cache node. + CacheNodeStatus *string `type:"string"` + + // The Availability Zone where this node was created and now resides. + CustomerAvailabilityZone *string `type:"string"` + + // The hostname for connecting to this cache node. + Endpoint *Endpoint `type:"structure"` + + // The status of the parameter group applied to this cache node. + ParameterGroupStatus *string `type:"string"` + + // The ID of the primary node to which this read replica node is synchronized. + // If this field is empty, then this node is not associated with a primary cache + // cluster. + SourceCacheNodeId *string `type:"string"` +} + +// String returns the string representation +func (s CacheNode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheNode) GoString() string { + return s.String() +} + +// A parameter that has a different value for each cache node type it is applied +// to. For example, in a Redis cache cluster, a cache.m1.large cache node type +// would have a larger maxmemory value than a cache.m1.small type. +type CacheNodeTypeSpecificParameter struct { + _ struct{} `type:"structure"` + + // The valid range of values for the parameter. + AllowedValues *string `type:"string"` + + // A list of cache node types and their corresponding values for this parameter. + CacheNodeTypeSpecificValues []*CacheNodeTypeSpecificValue `locationNameList:"CacheNodeTypeSpecificValue" type:"list"` + + // The valid data type for the parameter. + DataType *string `type:"string"` + + // A description of the parameter. + Description *string `type:"string"` + + // Indicates whether (true) or not (false) the parameter can be modified. Some + // parameters have security or operational implications that prevent them from + // being changed. + IsModifiable *bool `type:"boolean"` + + // The earliest cache engine version to which the parameter can apply. + MinimumEngineVersion *string `type:"string"` + + // The name of the parameter. + ParameterName *string `type:"string"` + + // The source of the parameter value. + Source *string `type:"string"` +} + +// String returns the string representation +func (s CacheNodeTypeSpecificParameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheNodeTypeSpecificParameter) GoString() string { + return s.String() +} + +// A value that applies only to a certain cache node type. +type CacheNodeTypeSpecificValue struct { + _ struct{} `type:"structure"` + + // The cache node type for which this value applies. + CacheNodeType *string `type:"string"` + + // The value for the cache node type. + Value *string `type:"string"` +} + +// String returns the string representation +func (s CacheNodeTypeSpecificValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheNodeTypeSpecificValue) GoString() string { + return s.String() +} + +// Represents the output of a CreateCacheParameterGroup action. +type CacheParameterGroup struct { + _ struct{} `type:"structure"` + + // The name of the cache parameter group family that this cache parameter group + // is compatible with. + CacheParameterGroupFamily *string `type:"string"` + + // The name of the cache parameter group. + CacheParameterGroupName *string `type:"string"` + + // The description for this cache parameter group. + Description *string `type:"string"` +} + +// String returns the string representation +func (s CacheParameterGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheParameterGroup) GoString() string { + return s.String() +} + +// Represents the output of one of the following actions: +// +// ModifyCacheParameterGroup ResetCacheParameterGroup +type CacheParameterGroupNameMessage struct { + _ struct{} `type:"structure"` + + // The name of the cache parameter group. + CacheParameterGroupName *string `type:"string"` +} + +// String returns the string representation +func (s CacheParameterGroupNameMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheParameterGroupNameMessage) GoString() string { + return s.String() +} + +// The status of the cache parameter group. +type CacheParameterGroupStatus struct { + _ struct{} `type:"structure"` + + // A list of the cache node IDs which need to be rebooted for parameter changes + // to be applied. A node ID is a numeric identifier (0001, 0002, etc.). + CacheNodeIdsToReboot []*string `locationNameList:"CacheNodeId" type:"list"` + + // The name of the cache parameter group. + CacheParameterGroupName *string `type:"string"` + + // The status of parameter updates. + ParameterApplyStatus *string `type:"string"` +} + +// String returns the string representation +func (s CacheParameterGroupStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheParameterGroupStatus) GoString() string { + return s.String() +} + +// Represents the output of one of the following actions: +// +// AuthorizeCacheSecurityGroupIngress CreateCacheSecurityGroup RevokeCacheSecurityGroupIngress +type CacheSecurityGroup struct { + _ struct{} `type:"structure"` + + // The name of the cache security group. + CacheSecurityGroupName *string `type:"string"` + + // The description of the cache security group. + Description *string `type:"string"` + + // A list of Amazon EC2 security groups that are associated with this cache + // security group. + EC2SecurityGroups []*EC2SecurityGroup `locationNameList:"EC2SecurityGroup" type:"list"` + + // The AWS account ID of the cache security group owner. + OwnerId *string `type:"string"` +} + +// String returns the string representation +func (s CacheSecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheSecurityGroup) GoString() string { + return s.String() +} + +// Represents a cache cluster's status within a particular cache security group. +type CacheSecurityGroupMembership struct { + _ struct{} `type:"structure"` + + // The name of the cache security group. + CacheSecurityGroupName *string `type:"string"` + + // The membership status in the cache security group. The status changes when + // a cache security group is modified, or when the cache security groups assigned + // to a cache cluster are modified. + Status *string `type:"string"` +} + +// String returns the string representation +func (s CacheSecurityGroupMembership) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheSecurityGroupMembership) GoString() string { + return s.String() +} + +// Represents the output of one of the following actions: +// +// CreateCacheSubnetGroup ModifyCacheSubnetGroup +type CacheSubnetGroup struct { + _ struct{} `type:"structure"` + + // The description of the cache subnet group. + CacheSubnetGroupDescription *string `type:"string"` + + // The name of the cache subnet group. + CacheSubnetGroupName *string `type:"string"` + + // A list of subnets associated with the cache subnet group. + Subnets []*Subnet `locationNameList:"Subnet" type:"list"` + + // The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet + // group. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s CacheSubnetGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheSubnetGroup) GoString() string { + return s.String() +} + +// Represents the input of a CopySnapshotMessage action. +type CopySnapshotInput struct { + _ struct{} `type:"structure"` + + // The name of an existing snapshot from which to copy. + SourceSnapshotName *string `type:"string" required:"true"` + + // A name for the copied snapshot. + TargetSnapshotName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopySnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopySnapshotInput) GoString() string { + return s.String() +} + +type CopySnapshotOutput struct { + _ struct{} `type:"structure"` + + // Represents a copy of an entire cache cluster as of the time when the snapshot + // was taken. + Snapshot *Snapshot `type:"structure"` +} + +// String returns the string representation +func (s CopySnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopySnapshotOutput) GoString() string { + return s.String() +} + +// Represents the input of a CreateCacheCluster action. +type CreateCacheClusterInput struct { + _ struct{} `type:"structure"` + + // Specifies whether the nodes in this Memcached node group are created in a + // single Availability Zone or created across multiple Availability Zones in + // the cluster's region. + // + // This parameter is only supported for Memcached cache clusters. + // + // If the AZMode and PreferredAvailabilityZones are not specified, ElastiCache + // assumes single-az mode. + AZMode *string `type:"string" enum:"AZMode"` + + // This parameter is currently disabled. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The node group identifier. This parameter is stored as a lowercase string. + // + // Constraints: + // + // A name must contain from 1 to 20 alphanumeric characters or hyphens. The + // first character must be a letter. A name cannot end with a hyphen or contain + // two consecutive hyphens. + CacheClusterId *string `type:"string" required:"true"` + + // The compute and memory capacity of the nodes in the node group. + // + // Valid node types are as follows: + // + // General purpose: Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, + // cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Previous + // generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge Compute optimized: cache.c1.xlarge Memory optimized Current + // generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // cache.r3.8xlarge Previous generation: cache.m2.xlarge, cache.m2.2xlarge, + // cache.m2.4xlarge Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // Redis backup/restore is not supported for t2 instances. Redis Append-only + // files (AOF) functionality is not supported for t1 or t2 instances. For a + // complete listing of cache node types and specifications, see Amazon ElastiCache + // Product Features and Details (http://aws.amazon.com/elasticache/details) + // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) + // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). + CacheNodeType *string `type:"string"` + + // The name of the parameter group to associate with this cache cluster. If + // this argument is omitted, the default parameter group for the specified engine + // is used. + CacheParameterGroupName *string `type:"string"` + + // A list of security group names to associate with this cache cluster. + // + // Use this parameter only when you are creating a cache cluster outside of + // an Amazon Virtual Private Cloud (VPC). + CacheSecurityGroupNames []*string `locationNameList:"CacheSecurityGroupName" type:"list"` + + // The name of the subnet group to be used for the cache cluster. + // + // Use this parameter only when you are creating a cache cluster in an Amazon + // Virtual Private Cloud (VPC). + CacheSubnetGroupName *string `type:"string"` + + // The name of the cache engine to be used for this cache cluster. + // + // Valid values for this parameter are: + // + // memcached | redis + Engine *string `type:"string"` + + // The version number of the cache engine to be used for this cache cluster. + // To view the supported cache engine versions, use the DescribeCacheEngineVersions + // action. + EngineVersion *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service + // (SNS) topic to which notifications will be sent. + // + // The Amazon SNS topic owner must be the same as the cache cluster owner. + NotificationTopicArn *string `type:"string"` + + // The initial number of cache nodes that the cache cluster will have. + // + // For clusters running Redis, this value must be 1. For clusters running Memcached, + // this value must be between 1 and 20. + // + // If you need more than 20 nodes for your Memcached cluster, please fill out + // the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/ + // (http://aws.amazon.com/contact-us/elasticache-node-limit-request/). + NumCacheNodes *int64 `type:"integer"` + + // The port number on which each of the cache nodes will accept connections. + Port *int64 `type:"integer"` + + // The EC2 Availability Zone in which the cache cluster will be created. + // + // All nodes belonging to this Memcached cache cluster are placed in the preferred + // Availability Zone. If you want to create your nodes across multiple Availability + // Zones, use PreferredAvailabilityZones. + // + // Default: System chosen Availability Zone. + PreferredAvailabilityZone *string `type:"string"` + + // A list of the Availability Zones in which cache nodes will be created. The + // order of the zones in the list is not important. + // + // This option is only supported on Memcached. + // + // If you are creating your cache cluster in an Amazon VPC (recommended) you + // can only locate nodes in Availability Zones that are associated with the + // subnets in the selected subnet group. + // + // The number of Availability Zones listed must equal the value of NumCacheNodes. + // + // If you want all the nodes in the same Availability Zone, use PreferredAvailabilityZone + // instead, or repeat the Availability Zone multiple times in the list. + // + // Default: System chosen Availability Zones. + // + // Example: One Memcached node in each of three different Availability Zones: + // PreferredAvailabilityZones.member.1=us-west-2a&PreferredAvailabilityZones.member.2=us-west-2b&PreferredAvailabilityZones.member.3=us-west-2c + // + // Example: All three Memcached nodes in one Availability Zone: PreferredAvailabilityZones.member.1=us-west-2a&PreferredAvailabilityZones.member.2=us-west-2a&PreferredAvailabilityZones.member.3=us-west-2a + PreferredAvailabilityZones []*string `locationNameList:"PreferredAvailabilityZone" type:"list"` + + // Specifies the weekly time range during which maintenance on the cache cluster + // is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid + // values for ddd are: + // + // sun mon tue wed thu fri sat Example: sun:05:00-sun:09:00 + PreferredMaintenanceWindow *string `type:"string"` + + // The ID of the replication group to which this cache cluster should belong. + // If this parameter is specified, the cache cluster will be added to the specified + // replication group as a read replica; otherwise, the cache cluster will be + // a standalone primary that is not part of any replication group. + // + // If the specified replication group is Multi-AZ enabled and the availability + // zone is not specified, the cache cluster will be created in availability + // zones that provide the best spread of read replicas across availability zones. + // + // Note: This parameter is only valid if the Engine parameter is redis. + ReplicationGroupId *string `type:"string"` + + // One or more VPC security groups associated with the cache cluster. + // + // Use this parameter only when you are creating a cache cluster in an Amazon + // Virtual Private Cloud (VPC). + SecurityGroupIds []*string `locationNameList:"SecurityGroupId" type:"list"` + + // A single-element string list containing an Amazon Resource Name (ARN) that + // uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot + // file will be used to populate the node group. The Amazon S3 object name in + // the ARN cannot contain any commas. + // + // Note: This parameter is only valid if the Engine parameter is redis. + // + // Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb + SnapshotArns []*string `locationNameList:"SnapshotArn" type:"list"` + + // The name of a snapshot from which to restore data into the new node group. + // The snapshot status changes to restoring while the new node group is being + // created. + // + // Note: This parameter is only valid if the Engine parameter is redis. + SnapshotName *string `type:"string"` + + // The number of days for which ElastiCache will retain automatic snapshots + // before deleting them. For example, if you set SnapshotRetentionLimit to 5, + // then a snapshot that was taken today will be retained for 5 days before being + // deleted. + // + // Note: This parameter is only valid if the Engine parameter is redis. + // + // Default: 0 (i.e., automatic backups are disabled for this cache cluster). + SnapshotRetentionLimit *int64 `type:"integer"` + + // The daily time range (in UTC) during which ElastiCache will begin taking + // a daily snapshot of your node group. + // + // Example: 05:00-09:00 + // + // If you do not specify this parameter, then ElastiCache will automatically + // choose an appropriate time range. + // + // Note: This parameter is only valid if the Engine parameter is redis. + SnapshotWindow *string `type:"string"` + + // A list of cost allocation tags to be added to this resource. A tag is a key-value + // pair. A tag key must be accompanied by a tag value. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateCacheClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCacheClusterInput) GoString() string { + return s.String() +} + +type CreateCacheClusterOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific cache cluster. + CacheCluster *CacheCluster `type:"structure"` +} + +// String returns the string representation +func (s CreateCacheClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCacheClusterOutput) GoString() string { + return s.String() +} + +// Represents the input of a CreateCacheParameterGroup action. +type CreateCacheParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the cache parameter group family the cache parameter group can + // be used with. + // + // Valid values are: memcached1.4 | redis2.6 | redis2.8 + CacheParameterGroupFamily *string `type:"string" required:"true"` + + // A user-specified name for the cache parameter group. + CacheParameterGroupName *string `type:"string" required:"true"` + + // A user-specified description for the cache parameter group. + Description *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateCacheParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCacheParameterGroupInput) GoString() string { + return s.String() +} + +type CreateCacheParameterGroupOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of a CreateCacheParameterGroup action. + CacheParameterGroup *CacheParameterGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateCacheParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCacheParameterGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a CreateCacheSecurityGroup action. +type CreateCacheSecurityGroupInput struct { + _ struct{} `type:"structure"` + + // A name for the cache security group. This value is stored as a lowercase + // string. + // + // Constraints: Must contain no more than 255 alphanumeric characters. Cannot + // be the word "Default". + // + // Example: mysecuritygroup + CacheSecurityGroupName *string `type:"string" required:"true"` + + // A description for the cache security group. + Description *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateCacheSecurityGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCacheSecurityGroupInput) GoString() string { + return s.String() +} + +type CreateCacheSecurityGroupOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of one of the following actions: + // + // AuthorizeCacheSecurityGroupIngress CreateCacheSecurityGroup RevokeCacheSecurityGroupIngress + CacheSecurityGroup *CacheSecurityGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateCacheSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCacheSecurityGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a CreateCacheSubnetGroup action. +type CreateCacheSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // A description for the cache subnet group. + CacheSubnetGroupDescription *string `type:"string" required:"true"` + + // A name for the cache subnet group. This value is stored as a lowercase string. + // + // Constraints: Must contain no more than 255 alphanumeric characters or hyphens. + // + // Example: mysubnetgroup + CacheSubnetGroupName *string `type:"string" required:"true"` + + // A list of VPC subnet IDs for the cache subnet group. + SubnetIds []*string `locationNameList:"SubnetIdentifier" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateCacheSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCacheSubnetGroupInput) GoString() string { + return s.String() +} + +type CreateCacheSubnetGroupOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of one of the following actions: + // + // CreateCacheSubnetGroup ModifyCacheSubnetGroup + CacheSubnetGroup *CacheSubnetGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateCacheSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCacheSubnetGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a CreateReplicationGroup action. +type CreateReplicationGroupInput struct { + _ struct{} `type:"structure"` + + // This parameter is currently disabled. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // Specifies whether a read-only replica will be automatically promoted to read/write + // primary if the existing primary fails. + // + // If true, Multi-AZ is enabled for this replication group. If false, Multi-AZ + // is disabled for this replication group. + // + // Default: false + // + // ElastiCache Multi-AZ replication groups is not supported on: + // + // Redis versions earlier than 2.8.6. T1 and T2 cache node types. + AutomaticFailoverEnabled *bool `type:"boolean"` + + // The compute and memory capacity of the nodes in the node group. + // + // Valid node types are as follows: + // + // General purpose: Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, + // cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Previous + // generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge Compute optimized: cache.c1.xlarge Memory optimized Current + // generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // cache.r3.8xlarge Previous generation: cache.m2.xlarge, cache.m2.2xlarge, + // cache.m2.4xlarge Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // Redis backup/restore is not supported for t2 instances. Redis Append-only + // files (AOF) functionality is not supported for t1 or t2 instances. For a + // complete listing of cache node types and specifications, see Amazon ElastiCache + // Product Features and Details (http://aws.amazon.com/elasticache/details) + // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) + // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). + CacheNodeType *string `type:"string"` + + // The name of the parameter group to associate with this replication group. + // If this argument is omitted, the default cache parameter group for the specified + // engine is used. + CacheParameterGroupName *string `type:"string"` + + // A list of cache security group names to associate with this replication group. + CacheSecurityGroupNames []*string `locationNameList:"CacheSecurityGroupName" type:"list"` + + // The name of the cache subnet group to be used for the replication group. + CacheSubnetGroupName *string `type:"string"` + + // The name of the cache engine to be used for the cache clusters in this replication + // group. + // + // Default: redis + Engine *string `type:"string"` + + // The version number of the cache engine to be used for the cache clusters + // in this replication group. To view the supported cache engine versions, use + // the DescribeCacheEngineVersions action. + EngineVersion *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service + // (SNS) topic to which notifications will be sent. + // + // The Amazon SNS topic owner must be the same as the cache cluster owner. + NotificationTopicArn *string `type:"string"` + + // The number of cache clusters this replication group will initially have. + // + // If Multi-AZ is enabled, the value of this parameter must be at least 2. + // + // The maximum permitted value for NumCacheClusters is 6 (primary plus 5 replicas). + // If you need to exceed this limit, please fill out the ElastiCache Limit Increase + // Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request + // (http://aws.amazon.com/contact-us/elasticache-node-limit-request). + NumCacheClusters *int64 `type:"integer"` + + // The port number on which each member of the replication group will accept + // connections. + Port *int64 `type:"integer"` + + // A list of EC2 availability zones in which the replication group's cache clusters + // will be created. The order of the availability zones in the list is not important. + // + // If you are creating your replication group in an Amazon VPC (recommended), + // you can only locate cache clusters in availability zones associated with + // the subnets in the selected subnet group. The number of availability zones + // listed must equal the value of NumCacheClusters. + // + // Default: system chosen availability zones. + // + // Example: One Redis cache cluster in each of three availability zones. PreferredAvailabilityZones.member.1=us-west-2a + // PreferredAvailabilityZones.member.2=us-west-2c PreferredAvailabilityZones.member.3=us-west-2c + PreferredCacheClusterAZs []*string `locationNameList:"AvailabilityZone" type:"list"` + + // Specifies the weekly time range during which maintenance on the cache cluster + // is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid + // values for ddd are: + // + // sun mon tue wed thu fri sat Example: sun:05:00-sun:09:00 + PreferredMaintenanceWindow *string `type:"string"` + + // The identifier of the cache cluster that will serve as the primary for this + // replication group. This cache cluster must already exist and have a status + // of available. + // + // This parameter is not required if NumCacheClusters is specified. + PrimaryClusterId *string `type:"string"` + + // A user-created description for the replication group. + ReplicationGroupDescription *string `type:"string" required:"true"` + + // The replication group identifier. This parameter is stored as a lowercase + // string. + // + // Constraints: + // + // A name must contain from 1 to 20 alphanumeric characters or hyphens. The + // first character must be a letter. A name cannot end with a hyphen or contain + // two consecutive hyphens. + ReplicationGroupId *string `type:"string" required:"true"` + + // One or more Amazon VPC security groups associated with this replication group. + // + // Use this parameter only when you are creating a replication group in an + // Amazon Virtual Private Cloud (VPC). + SecurityGroupIds []*string `locationNameList:"SecurityGroupId" type:"list"` + + // A single-element string list containing an Amazon Resource Name (ARN) that + // uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot + // file will be used to populate the node group. The Amazon S3 object name in + // the ARN cannot contain any commas. + // + // Note: This parameter is only valid if the Engine parameter is redis. + // + // Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb + SnapshotArns []*string `locationNameList:"SnapshotArn" type:"list"` + + // The name of a snapshot from which to restore data into the new node group. + // The snapshot status changes to restoring while the new node group is being + // created. + // + // Note: This parameter is only valid if the Engine parameter is redis. + SnapshotName *string `type:"string"` + + // The number of days for which ElastiCache will retain automatic snapshots + // before deleting them. For example, if you set SnapshotRetentionLimit to 5, + // then a snapshot that was taken today will be retained for 5 days before being + // deleted. + // + // Note: This parameter is only valid if the Engine parameter is redis. + // + // Default: 0 (i.e., automatic backups are disabled for this cache cluster). + SnapshotRetentionLimit *int64 `type:"integer"` + + // The daily time range (in UTC) during which ElastiCache will begin taking + // a daily snapshot of your node group. + // + // Example: 05:00-09:00 + // + // If you do not specify this parameter, then ElastiCache will automatically + // choose an appropriate time range. + // + // Note: This parameter is only valid if the Engine parameter is redis. + SnapshotWindow *string `type:"string"` + + // A list of cost allocation tags to be added to this resource. A tag is a key-value + // pair. A tag key must be accompanied by a tag value. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateReplicationGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReplicationGroupInput) GoString() string { + return s.String() +} + +type CreateReplicationGroupOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific replication group. + ReplicationGroup *ReplicationGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateReplicationGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReplicationGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a CreateSnapshot action. +type CreateSnapshotInput struct { + _ struct{} `type:"structure"` + + // The identifier of an existing cache cluster. The snapshot will be created + // from this cache cluster. + CacheClusterId *string `type:"string" required:"true"` + + // A name for the snapshot being created. + SnapshotName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotInput) GoString() string { + return s.String() +} + +type CreateSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Represents a copy of an entire cache cluster as of the time when the snapshot + // was taken. + Snapshot *Snapshot `type:"structure"` +} + +// String returns the string representation +func (s CreateSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotOutput) GoString() string { + return s.String() +} + +// Represents the input of a DeleteCacheCluster action. +type DeleteCacheClusterInput struct { + _ struct{} `type:"structure"` + + // The cache cluster identifier for the cluster to be deleted. This parameter + // is not case sensitive. + CacheClusterId *string `type:"string" required:"true"` + + // The user-supplied name of a final cache cluster snapshot. This is the unique + // name that identifies the snapshot. ElastiCache creates the snapshot, and + // then deletes the cache cluster immediately afterward. + FinalSnapshotIdentifier *string `type:"string"` +} + +// String returns the string representation +func (s DeleteCacheClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCacheClusterInput) GoString() string { + return s.String() +} + +type DeleteCacheClusterOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific cache cluster. + CacheCluster *CacheCluster `type:"structure"` +} + +// String returns the string representation +func (s DeleteCacheClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCacheClusterOutput) GoString() string { + return s.String() +} + +// Represents the input of a DeleteCacheParameterGroup action. +type DeleteCacheParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the cache parameter group to delete. + // + // The specified cache security group must not be associated with any cache + // clusters. + CacheParameterGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteCacheParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCacheParameterGroupInput) GoString() string { + return s.String() +} + +type DeleteCacheParameterGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteCacheParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCacheParameterGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a DeleteCacheSecurityGroup action. +type DeleteCacheSecurityGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the cache security group to delete. + // + // You cannot delete the default security group. + CacheSecurityGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteCacheSecurityGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCacheSecurityGroupInput) GoString() string { + return s.String() +} + +type DeleteCacheSecurityGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteCacheSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCacheSecurityGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a DeleteCacheSubnetGroup action. +type DeleteCacheSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the cache subnet group to delete. + // + // Constraints: Must contain no more than 255 alphanumeric characters or hyphens. + CacheSubnetGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteCacheSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCacheSubnetGroupInput) GoString() string { + return s.String() +} + +type DeleteCacheSubnetGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteCacheSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCacheSubnetGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a DeleteReplicationGroup action. +type DeleteReplicationGroupInput struct { + _ struct{} `type:"structure"` + + // The name of a final node group snapshot. ElastiCache creates the snapshot + // from the primary node in the cluster, rather than one of the replicas; this + // is to ensure that it captures the freshest data. After the final snapshot + // is taken, the cluster is immediately deleted. + FinalSnapshotIdentifier *string `type:"string"` + + // The identifier for the cluster to be deleted. This parameter is not case + // sensitive. + ReplicationGroupId *string `type:"string" required:"true"` + + // If set to true, all of the read replicas will be deleted, but the primary + // node will be retained. + RetainPrimaryCluster *bool `type:"boolean"` +} + +// String returns the string representation +func (s DeleteReplicationGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReplicationGroupInput) GoString() string { + return s.String() +} + +type DeleteReplicationGroupOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific replication group. + ReplicationGroup *ReplicationGroup `type:"structure"` +} + +// String returns the string representation +func (s DeleteReplicationGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReplicationGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a DeleteSnapshot action. +type DeleteSnapshotInput struct { + _ struct{} `type:"structure"` + + // The name of the snapshot to be deleted. + SnapshotName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotInput) GoString() string { + return s.String() +} + +type DeleteSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Represents a copy of an entire cache cluster as of the time when the snapshot + // was taken. + Snapshot *Snapshot `type:"structure"` +} + +// String returns the string representation +func (s DeleteSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeCacheClusters action. +type DescribeCacheClustersInput struct { + _ struct{} `type:"structure"` + + // The user-supplied cluster identifier. If this parameter is specified, only + // information about that specific cache cluster is returned. This parameter + // isn't case sensitive. + CacheClusterId *string `type:"string"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` + + // An optional flag that can be included in the DescribeCacheCluster request + // to retrieve information about the individual cache nodes. + ShowCacheNodeInfo *bool `type:"boolean"` +} + +// String returns the string representation +func (s DescribeCacheClustersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheClustersInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeCacheClusters action. +type DescribeCacheClustersOutput struct { + _ struct{} `type:"structure"` + + // A list of cache clusters. Each item in the list contains detailed information + // about one cache cluster. + CacheClusters []*CacheCluster `locationNameList:"CacheCluster" type:"list"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCacheClustersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheClustersOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeCacheEngineVersions action. +type DescribeCacheEngineVersionsInput struct { + _ struct{} `type:"structure"` + + // The name of a specific cache parameter group family to return details for. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + CacheParameterGroupFamily *string `type:"string"` + + // If true, specifies that only the default version of the specified engine + // or engine and major version combination is to be returned. + DefaultOnly *bool `type:"boolean"` + + // The cache engine to return. Valid values: memcached | redis + Engine *string `type:"string"` + + // The cache engine version to return. + // + // Example: 1.4.14 + EngineVersion *string `type:"string"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeCacheEngineVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheEngineVersionsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeCacheEngineVersions action. +type DescribeCacheEngineVersionsOutput struct { + _ struct{} `type:"structure"` + + // A list of cache engine version details. Each element in the list contains + // detailed information about one cache engine version. + CacheEngineVersions []*CacheEngineVersion `locationNameList:"CacheEngineVersion" type:"list"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCacheEngineVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheEngineVersionsOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeCacheParameterGroups action. +type DescribeCacheParameterGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of a specific cache parameter group to return details for. + CacheParameterGroupName *string `type:"string"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeCacheParameterGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheParameterGroupsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeCacheParameterGroups action. +type DescribeCacheParameterGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of cache parameter groups. Each element in the list contains detailed + // information about one cache parameter group. + CacheParameterGroups []*CacheParameterGroup `locationNameList:"CacheParameterGroup" type:"list"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCacheParameterGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheParameterGroupsOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeCacheParameters action. +type DescribeCacheParametersInput struct { + _ struct{} `type:"structure"` + + // The name of a specific cache parameter group to return details for. + CacheParameterGroupName *string `type:"string" required:"true"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` + + // The parameter types to return. + // + // Valid values: user | system | engine-default + Source *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCacheParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheParametersInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeCacheParameters action. +type DescribeCacheParametersOutput struct { + _ struct{} `type:"structure"` + + // A list of parameters specific to a particular cache node type. Each element + // in the list contains detailed information about one parameter. + CacheNodeTypeSpecificParameters []*CacheNodeTypeSpecificParameter `locationNameList:"CacheNodeTypeSpecificParameter" type:"list"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` + + // A list of Parameter instances. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` +} + +// String returns the string representation +func (s DescribeCacheParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheParametersOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeCacheSecurityGroups action. +type DescribeCacheSecurityGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of the cache security group to return details for. + CacheSecurityGroupName *string `type:"string"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeCacheSecurityGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheSecurityGroupsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeCacheSecurityGroups action. +type DescribeCacheSecurityGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of cache security groups. Each element in the list contains detailed + // information about one group. + CacheSecurityGroups []*CacheSecurityGroup `locationNameList:"CacheSecurityGroup" type:"list"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCacheSecurityGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheSecurityGroupsOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeCacheSubnetGroups action. +type DescribeCacheSubnetGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of the cache subnet group to return details for. + CacheSubnetGroupName *string `type:"string"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeCacheSubnetGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheSubnetGroupsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeCacheSubnetGroups action. +type DescribeCacheSubnetGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of cache subnet groups. Each element in the list contains detailed + // information about one group. + CacheSubnetGroups []*CacheSubnetGroup `locationNameList:"CacheSubnetGroup" type:"list"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCacheSubnetGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheSubnetGroupsOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeEngineDefaultParameters action. +type DescribeEngineDefaultParametersInput struct { + _ struct{} `type:"structure"` + + // The name of the cache parameter group family. Valid values are: memcached1.4 + // | redis2.6 | redis2.8 + CacheParameterGroupFamily *string `type:"string" required:"true"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeEngineDefaultParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEngineDefaultParametersInput) GoString() string { + return s.String() +} + +type DescribeEngineDefaultParametersOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of a DescribeEngineDefaultParameters action. + EngineDefaults *EngineDefaults `type:"structure"` +} + +// String returns the string representation +func (s DescribeEngineDefaultParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEngineDefaultParametersOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeEvents action. +type DescribeEventsInput struct { + _ struct{} `type:"structure"` + + // The number of minutes' worth of events to retrieve. + Duration *int64 `type:"integer"` + + // The end of the time interval for which to retrieve events, specified in ISO + // 8601 format. + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` + + // The identifier of the event source for which events will be returned. If + // not specified, then all sources are included in the response. + SourceIdentifier *string `type:"string"` + + // The event source to retrieve events for. If no value is specified, all events + // are returned. + // + // Valid values are: cache-cluster | cache-parameter-group | cache-security-group + // | cache-subnet-group + SourceType *string `type:"string" enum:"SourceType"` + + // The beginning of the time interval to retrieve events for, specified in ISO + // 8601 format. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s DescribeEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeEvents action. +type DescribeEventsOutput struct { + _ struct{} `type:"structure"` + + // A list of events. Each element in the list contains detailed information + // about one event. + Events []*Event `locationNameList:"Event" type:"list"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventsOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeReplicationGroups action. +type DescribeReplicationGroupsInput struct { + _ struct{} `type:"structure"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` + + // The identifier for the replication group to be described. This parameter + // is not case sensitive. + // + // If you do not specify this parameter, information about all replication + // groups is returned. + ReplicationGroupId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeReplicationGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReplicationGroupsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeReplicationGroups action. +type DescribeReplicationGroupsOutput struct { + _ struct{} `type:"structure"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` + + // A list of replication groups. Each item in the list contains detailed information + // about one replication group. + ReplicationGroups []*ReplicationGroup `locationNameList:"ReplicationGroup" type:"list"` +} + +// String returns the string representation +func (s DescribeReplicationGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReplicationGroupsOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeReservedCacheNodes action. +type DescribeReservedCacheNodesInput struct { + _ struct{} `type:"structure"` + + // The cache node type filter value. Use this parameter to show only those reservations + // matching the specified cache node type. + // + // Valid node types are as follows: + // + // General purpose: Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, + // cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Previous + // generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge Compute optimized: cache.c1.xlarge Memory optimized Current + // generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // cache.r3.8xlarge Previous generation: cache.m2.xlarge, cache.m2.2xlarge, + // cache.m2.4xlarge Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // Redis backup/restore is not supported for t2 instances. Redis Append-only + // files (AOF) functionality is not supported for t1 or t2 instances. For a + // complete listing of cache node types and specifications, see Amazon ElastiCache + // Product Features and Details (http://aws.amazon.com/elasticache/details) + // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) + // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). + CacheNodeType *string `type:"string"` + + // The duration filter value, specified in years or seconds. Use this parameter + // to show only reservations for this duration. + // + // Valid Values: 1 | 3 | 31536000 | 94608000 + Duration *string `type:"string"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` + + // The offering type filter value. Use this parameter to show only the available + // offerings matching the specified offering type. + // + // Valid values: "Light Utilization"|"Medium Utilization"|"Heavy Utilization" + OfferingType *string `type:"string"` + + // The product description filter value. Use this parameter to show only those + // reservations matching the specified product description. + ProductDescription *string `type:"string"` + + // The reserved cache node identifier filter value. Use this parameter to show + // only the reservation that matches the specified reservation ID. + ReservedCacheNodeId *string `type:"string"` + + // The offering identifier filter value. Use this parameter to show only purchased + // reservations matching the specified offering identifier. + ReservedCacheNodesOfferingId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeReservedCacheNodesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedCacheNodesInput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeReservedCacheNodesOfferings action. +type DescribeReservedCacheNodesOfferingsInput struct { + _ struct{} `type:"structure"` + + // The cache node type filter value. Use this parameter to show only the available + // offerings matching the specified cache node type. + // + // Valid node types are as follows: + // + // General purpose: Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, + // cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Previous + // generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge Compute optimized: cache.c1.xlarge Memory optimized Current + // generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // cache.r3.8xlarge Previous generation: cache.m2.xlarge, cache.m2.2xlarge, + // cache.m2.4xlarge Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // Redis backup/restore is not supported for t2 instances. Redis Append-only + // files (AOF) functionality is not supported for t1 or t2 instances. For a + // complete listing of cache node types and specifications, see Amazon ElastiCache + // Product Features and Details (http://aws.amazon.com/elasticache/details) + // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) + // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). + CacheNodeType *string `type:"string"` + + // Duration filter value, specified in years or seconds. Use this parameter + // to show only reservations for a given duration. + // + // Valid Values: 1 | 3 | 31536000 | 94608000 + Duration *string `type:"string"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` + + // The offering type filter value. Use this parameter to show only the available + // offerings matching the specified offering type. + // + // Valid Values: "Light Utilization"|"Medium Utilization"|"Heavy Utilization" + OfferingType *string `type:"string"` + + // The product description filter value. Use this parameter to show only the + // available offerings matching the specified product description. + ProductDescription *string `type:"string"` + + // The offering identifier filter value. Use this parameter to show only the + // available offering that matches the specified reservation identifier. + // + // Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706 + ReservedCacheNodesOfferingId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeReservedCacheNodesOfferingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedCacheNodesOfferingsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeReservedCacheNodesOfferings action. +type DescribeReservedCacheNodesOfferingsOutput struct { + _ struct{} `type:"structure"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` + + // A list of reserved cache node offerings. Each element in the list contains + // detailed information about one offering. + ReservedCacheNodesOfferings []*ReservedCacheNodesOffering `locationNameList:"ReservedCacheNodesOffering" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedCacheNodesOfferingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedCacheNodesOfferingsOutput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeReservedCacheNodes action. +type DescribeReservedCacheNodesOutput struct { + _ struct{} `type:"structure"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` + + // A list of reserved cache nodes. Each element in the list contains detailed + // information about one node. + ReservedCacheNodes []*ReservedCacheNode `locationNameList:"ReservedCacheNode" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedCacheNodesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedCacheNodesOutput) GoString() string { + return s.String() +} + +// Represents the input of a DescribeSnapshotsMessage action. +type DescribeSnapshotsInput struct { + _ struct{} `type:"structure"` + + // A user-supplied cluster identifier. If this parameter is specified, only + // snapshots associated with that specific cache cluster will be described. + CacheClusterId *string `type:"string"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 50 + // + // Constraints: minimum 20; maximum 50. + MaxRecords *int64 `type:"integer"` + + // A user-supplied name of the snapshot. If this parameter is specified, only + // this snapshot will be described. + SnapshotName *string `type:"string"` + + // If set to system, the output shows snapshots that were automatically created + // by ElastiCache. If set to user the output shows snapshots that were manually + // created. If omitted, the output shows both automatically and manually created + // snapshots. + SnapshotSource *string `type:"string"` +} + +// String returns the string representation +func (s DescribeSnapshotsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeSnapshots action. +type DescribeSnapshotsOutput struct { + _ struct{} `type:"structure"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this action. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // A list of snapshots. Each item in the list contains detailed information + // about one snapshot. + Snapshots []*Snapshot `locationNameList:"Snapshot" type:"list"` +} + +// String returns the string representation +func (s DescribeSnapshotsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotsOutput) GoString() string { + return s.String() +} + +// Provides ownership and status information for an Amazon EC2 security group. +type EC2SecurityGroup struct { + _ struct{} `type:"structure"` + + // The name of the Amazon EC2 security group. + EC2SecurityGroupName *string `type:"string"` + + // The AWS account ID of the Amazon EC2 security group owner. + EC2SecurityGroupOwnerId *string `type:"string"` + + // The status of the Amazon EC2 security group. + Status *string `type:"string"` +} + +// String returns the string representation +func (s EC2SecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EC2SecurityGroup) GoString() string { + return s.String() +} + +// Represents the information required for client programs to connect to a cache +// node. +type Endpoint struct { + _ struct{} `type:"structure"` + + // The DNS hostname of the cache node. + Address *string `type:"string"` + + // The port number that the cache engine is listening on. + Port *int64 `type:"integer"` +} + +// String returns the string representation +func (s Endpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Endpoint) GoString() string { + return s.String() +} + +// Represents the output of a DescribeEngineDefaultParameters action. +type EngineDefaults struct { + _ struct{} `type:"structure"` + + // A list of parameters specific to a particular cache node type. Each element + // in the list contains detailed information about one parameter. + CacheNodeTypeSpecificParameters []*CacheNodeTypeSpecificParameter `locationNameList:"CacheNodeTypeSpecificParameter" type:"list"` + + // Specifies the name of the cache parameter group family to which the engine + // default parameters apply. + CacheParameterGroupFamily *string `type:"string"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` + + // Contains a list of engine default parameters. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` +} + +// String returns the string representation +func (s EngineDefaults) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EngineDefaults) GoString() string { + return s.String() +} + +// Represents a single occurrence of something interesting within the system. +// Some examples of events are creating a cache cluster, adding or removing +// a cache node, or rebooting a node. +type Event struct { + _ struct{} `type:"structure"` + + // The date and time when the event occurred. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The text of the event. + Message *string `type:"string"` + + // The identifier for the source of the event. For example, if the event occurred + // at the cache cluster level, the identifier would be the name of the cache + // cluster. + SourceIdentifier *string `type:"string"` + + // Specifies the origin of this event - a cache cluster, a parameter group, + // a security group, etc. + SourceType *string `type:"string" enum:"SourceType"` +} + +// String returns the string representation +func (s Event) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Event) GoString() string { + return s.String() +} + +// The input parameters for the ListTagsForResource action. +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The name of the resource for which you want the list of tags, for example + // arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster. + ResourceName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Represents the input of a ModifyCacheCluster action. +type ModifyCacheClusterInput struct { + _ struct{} `type:"structure"` + + // Specifies whether the new nodes in this Memcached cache cluster are all created + // in a single Availability Zone or created across multiple Availability Zones. + // + // Valid values: single-az | cross-az. + // + // This option is only supported for Memcached cache clusters. + // + // You cannot specify single-az if the Memcached cache cluster already has + // cache nodes in different Availability Zones. If cross-az is specified, existing + // Memcached nodes remain in their current Availability Zone. + // + // Only newly created nodes will be located in different Availability Zones. + // For instructions on how to move existing Memcached nodes to different Availability + // Zones, see the Availability Zone Considerations section of Cache Node Considerations + // for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheNode.Memcached.html). + AZMode *string `type:"string" enum:"AZMode"` + + // If true, this parameter causes the modifications in this request and any + // pending modifications to be applied, asynchronously and as soon as possible, + // regardless of the PreferredMaintenanceWindow setting for the cache cluster. + // + // If false, then changes to the cache cluster are applied on the next maintenance + // reboot, or the next failure reboot, whichever occurs first. + // + // If you perform a ModifyCacheCluster before a pending modification is applied, + // the pending modification is replaced by the newer modification. Valid values: + // true | false + // + // Default: false + ApplyImmediately *bool `type:"boolean"` + + // This parameter is currently disabled. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The cache cluster identifier. This value is stored as a lowercase string. + CacheClusterId *string `type:"string" required:"true"` + + // A list of cache node IDs to be removed. A node ID is a numeric identifier + // (0001, 0002, etc.). This parameter is only valid when NumCacheNodes is less + // than the existing number of cache nodes. The number of cache node IDs supplied + // in this parameter must match the difference between the existing number of + // cache nodes in the cluster or pending cache nodes, whichever is greater, + // and the value of NumCacheNodes in the request. + // + // For example: If you have 3 active cache nodes, 7 pending cache nodes, and + // the number of cache nodes in this ModifyCacheCluser call is 5, you must list + // 2 (7 - 5) cache node IDs to remove. + CacheNodeIdsToRemove []*string `locationNameList:"CacheNodeId" type:"list"` + + // The name of the cache parameter group to apply to this cache cluster. This + // change is asynchronously applied as soon as possible for parameters when + // the ApplyImmediately parameter is specified as true for this request. + CacheParameterGroupName *string `type:"string"` + + // A list of cache security group names to authorize on this cache cluster. + // This change is asynchronously applied as soon as possible. + // + // This parameter can be used only with clusters that are created outside of + // an Amazon Virtual Private Cloud (VPC). + // + // Constraints: Must contain no more than 255 alphanumeric characters. Must + // not be "Default". + CacheSecurityGroupNames []*string `locationNameList:"CacheSecurityGroupName" type:"list"` + + // The upgraded version of the cache engine to be run on the cache nodes. + EngineVersion *string `type:"string"` + + // The list of Availability Zones where the new Memcached cache nodes will be + // created. + // + // This parameter is only valid when NumCacheNodes in the request is greater + // than the sum of the number of active cache nodes and the number of cache + // nodes pending creation (which may be zero). The number of Availability Zones + // supplied in this list must match the cache nodes being added in this request. + // + // This option is only supported on Memcached clusters. + // + // Scenarios: Scenario 1: You have 3 active nodes and wish to add 2 nodes. + // Specify NumCacheNodes=5 (3 + 2) and optionally specify two Availability Zones + // for the two new nodes. Scenario 2: You have 3 active nodes and 2 nodes pending + // creation (from the scenario 1 call) and want to add 1 more node. Specify + // NumCacheNodes=6 ((3 + 2) + 1) and optionally specify an Availability Zone + // for the new node. Scenario 3: You want to cancel all pending actions. Specify + // NumCacheNodes=3 to cancel all pending actions. + // + // The Availability Zone placement of nodes pending creation cannot be modified. + // If you wish to cancel any nodes pending creation, add 0 nodes by setting + // NumCacheNodes to the number of current nodes. + // + // If cross-az is specified, existing Memcached nodes remain in their current + // Availability Zone. Only newly created nodes can be located in different Availability + // Zones. For guidance on how to move existing Memcached nodes to different + // Availability Zones, see the Availability Zone Considerations section of Cache + // Node Considerations for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheNode.Memcached.html). + // + // Impact of new add/remove requests upon pending requests + // + // Scenarios Pending action New Request Results Scenario-1 Delete Delete + // The new delete, pending or immediate, replaces the pending delete. Scenario-2 + // Delete Create The new create, pending or immediate, replaces the pending + // delete. Scenario-3 Create Delete The new delete, pending or immediate, + // replaces the pending create. Scenario-4 Create Create The new create is + // added to the pending create. Important:If the new create request is Apply + // Immediately - Yes, all creates are performed immediately. If the new create + // request is Apply Immediately - No, all creates are pending. Example: NewAvailabilityZones.member.1=us-west-2a&NewAvailabilityZones.member.2=us-west-2b&NewAvailabilityZones.member.3=us-west-2c + NewAvailabilityZones []*string `locationNameList:"PreferredAvailabilityZone" type:"list"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications + // will be sent. + // + // The Amazon SNS topic owner must be same as the cache cluster owner. + NotificationTopicArn *string `type:"string"` + + // The status of the Amazon SNS notification topic. Notifications are sent only + // if the status is active. + // + // Valid values: active | inactive + NotificationTopicStatus *string `type:"string"` + + // The number of cache nodes that the cache cluster should have. If the value + // for NumCacheNodes is greater than the sum of the number of current cache + // nodes and the number of cache nodes pending creation (which may be zero), + // then more nodes will be added. If the value is less than the number of existing + // cache nodes, then nodes will be removed. If the value is equal to the number + // of current cache nodes, then any pending add or remove requests are canceled. + // + // If you are removing cache nodes, you must use the CacheNodeIdsToRemove parameter + // to provide the IDs of the specific cache nodes to remove. + // + // For clusters running Redis, this value must be 1. For clusters running Memcached, + // this value must be between 1 and 20. + // + // Note:Adding or removing Memcached cache nodes can be applied immediately + // or as a pending action. See ApplyImmediately. A pending action to modify + // the number of cache nodes in a cluster during its maintenance window, whether + // by adding or removing nodes in accordance with the scale out architecture, + // is not queued. The customer's latest request to add or remove nodes to the + // cluster overrides any previous pending actions to modify the number of cache + // nodes in the cluster. For example, a request to remove 2 nodes would override + // a previous pending action to remove 3 nodes. Similarly, a request to add + // 2 nodes would override a previous pending action to remove 3 nodes and vice + // versa. As Memcached cache nodes may now be provisioned in different Availability + // Zones with flexible cache node placement, a request to add nodes does not + // automatically override a previous pending action to add nodes. The customer + // can modify the previous pending action to add more nodes or explicitly cancel + // the pending request and retry the new request. To cancel pending actions + // to modify the number of cache nodes in a cluster, use the ModifyCacheCluster + // request and set NumCacheNodes equal to the number of cache nodes currently + // in the cache cluster. + NumCacheNodes *int64 `type:"integer"` + + // Specifies the weekly time range during which maintenance on the cache cluster + // is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid + // values for ddd are: + // + // sun mon tue wed thu fri sat Example: sun:05:00-sun:09:00 + PreferredMaintenanceWindow *string `type:"string"` + + // Specifies the VPC Security Groups associated with the cache cluster. + // + // This parameter can be used only with clusters that are created in an Amazon + // Virtual Private Cloud (VPC). + SecurityGroupIds []*string `locationNameList:"SecurityGroupId" type:"list"` + + // The number of days for which ElastiCache will retain automatic cache cluster + // snapshots before deleting them. For example, if you set SnapshotRetentionLimit + // to 5, then a snapshot that was taken today will be retained for 5 days before + // being deleted. + // + // ImportantIf the value of SnapshotRetentionLimit is set to zero (0), backups + // are turned off. + SnapshotRetentionLimit *int64 `type:"integer"` + + // The daily time range (in UTC) during which ElastiCache will begin taking + // a daily snapshot of your cache cluster. + SnapshotWindow *string `type:"string"` +} + +// String returns the string representation +func (s ModifyCacheClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyCacheClusterInput) GoString() string { + return s.String() +} + +type ModifyCacheClusterOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific cache cluster. + CacheCluster *CacheCluster `type:"structure"` +} + +// String returns the string representation +func (s ModifyCacheClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyCacheClusterOutput) GoString() string { + return s.String() +} + +// Represents the input of a ModifyCacheParameterGroup action. +type ModifyCacheParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the cache parameter group to modify. + CacheParameterGroupName *string `type:"string" required:"true"` + + // An array of parameter names and values for the parameter update. You must + // supply at least one parameter name and value; subsequent arguments are optional. + // A maximum of 20 parameters may be modified per request. + ParameterNameValues []*ParameterNameValue `locationNameList:"ParameterNameValue" type:"list" required:"true"` +} + +// String returns the string representation +func (s ModifyCacheParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyCacheParameterGroupInput) GoString() string { + return s.String() +} + +// Represents the input of a ModifyCacheSubnetGroup action. +type ModifyCacheSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // A description for the cache subnet group. + CacheSubnetGroupDescription *string `type:"string"` + + // The name for the cache subnet group. This value is stored as a lowercase + // string. + // + // Constraints: Must contain no more than 255 alphanumeric characters or hyphens. + // + // Example: mysubnetgroup + CacheSubnetGroupName *string `type:"string" required:"true"` + + // The EC2 subnet IDs for the cache subnet group. + SubnetIds []*string `locationNameList:"SubnetIdentifier" type:"list"` +} + +// String returns the string representation +func (s ModifyCacheSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyCacheSubnetGroupInput) GoString() string { + return s.String() +} + +type ModifyCacheSubnetGroupOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of one of the following actions: + // + // CreateCacheSubnetGroup ModifyCacheSubnetGroup + CacheSubnetGroup *CacheSubnetGroup `type:"structure"` +} + +// String returns the string representation +func (s ModifyCacheSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyCacheSubnetGroupOutput) GoString() string { + return s.String() +} + +// Represents the input of a ModifyReplicationGroups action. +type ModifyReplicationGroupInput struct { + _ struct{} `type:"structure"` + + // If true, this parameter causes the modifications in this request and any + // pending modifications to be applied, asynchronously and as soon as possible, + // regardless of the PreferredMaintenanceWindow setting for the replication + // group. + // + // If false, then changes to the nodes in the replication group are applied + // on the next maintenance reboot, or the next failure reboot, whichever occurs + // first. + // + // Valid values: true | false + // + // Default: false + ApplyImmediately *bool `type:"boolean"` + + // This parameter is currently disabled. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // Whether a read replica will be automatically promoted to read/write primary + // if the existing primary encounters a failure. + // + // Valid values: true | false + // + // ElastiCache Multi-AZ replication groups are not supported on: + // + // Redis versions earlier than 2.8.6. T1 and T2 cache node types. + AutomaticFailoverEnabled *bool `type:"boolean"` + + // The name of the cache parameter group to apply to all of the clusters in + // this replication group. This change is asynchronously applied as soon as + // possible for parameters when the ApplyImmediately parameter is specified + // as true for this request. + CacheParameterGroupName *string `type:"string"` + + // A list of cache security group names to authorize for the clusters in this + // replication group. This change is asynchronously applied as soon as possible. + // + // This parameter can be used only with replication group containing cache + // clusters running outside of an Amazon Virtual Private Cloud (VPC). + // + // Constraints: Must contain no more than 255 alphanumeric characters. Must + // not be "Default". + CacheSecurityGroupNames []*string `locationNameList:"CacheSecurityGroupName" type:"list"` + + // The upgraded version of the cache engine to be run on the cache clusters + // in the replication group. + EngineVersion *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications + // will be sent. + // + // The Amazon SNS topic owner must be same as the replication group owner. + NotificationTopicArn *string `type:"string"` + + // The status of the Amazon SNS notification topic for the replication group. + // Notifications are sent only if the status is active. + // + // Valid values: active | inactive + NotificationTopicStatus *string `type:"string"` + + // Specifies the weekly time range during which maintenance on the cache cluster + // is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid + // values for ddd are: + // + // sun mon tue wed thu fri sat Example: sun:05:00-sun:09:00 + PreferredMaintenanceWindow *string `type:"string"` + + // If this parameter is specified, ElastiCache will promote each of the cache + // clusters in the specified replication group to the primary role. The nodes + // of all other cache clusters in the replication group will be read replicas. + PrimaryClusterId *string `type:"string"` + + // A description for the replication group. Maximum length is 255 characters. + ReplicationGroupDescription *string `type:"string"` + + // The identifier of the replication group to modify. + ReplicationGroupId *string `type:"string" required:"true"` + + // Specifies the VPC Security Groups associated with the cache clusters in the + // replication group. + // + // This parameter can be used only with replication group containing cache + // clusters running in an Amazon Virtual Private Cloud (VPC). + SecurityGroupIds []*string `locationNameList:"SecurityGroupId" type:"list"` + + // The number of days for which ElastiCache will retain automatic node group + // snapshots before deleting them. For example, if you set SnapshotRetentionLimit + // to 5, then a snapshot that was taken today will be retained for 5 days before + // being deleted. + // + // ImportantIf the value of SnapshotRetentionLimit is set to zero (0), backups + // are turned off. + SnapshotRetentionLimit *int64 `type:"integer"` + + // The daily time range (in UTC) during which ElastiCache will begin taking + // a daily snapshot of the node group specified by SnapshottingClusterId. + // + // Example: 05:00-09:00 + // + // If you do not specify this parameter, then ElastiCache will automatically + // choose an appropriate time range. + SnapshotWindow *string `type:"string"` + + // The cache cluster ID that will be used as the daily snapshot source for the + // replication group. + SnapshottingClusterId *string `type:"string"` +} + +// String returns the string representation +func (s ModifyReplicationGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyReplicationGroupInput) GoString() string { + return s.String() +} + +type ModifyReplicationGroupOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific replication group. + ReplicationGroup *ReplicationGroup `type:"structure"` +} + +// String returns the string representation +func (s ModifyReplicationGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyReplicationGroupOutput) GoString() string { + return s.String() +} + +// Represents a collection of cache nodes in a replication group. +type NodeGroup struct { + _ struct{} `type:"structure"` + + // The identifier for the node group. A replication group contains only one + // node group; therefore, the node group ID is 0001. + NodeGroupId *string `type:"string"` + + // A list containing information about individual nodes within the node group. + NodeGroupMembers []*NodeGroupMember `locationNameList:"NodeGroupMember" type:"list"` + + // Represents the information required for client programs to connect to a cache + // node. + PrimaryEndpoint *Endpoint `type:"structure"` + + // The current state of this replication group - creating, available, etc. + Status *string `type:"string"` +} + +// String returns the string representation +func (s NodeGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NodeGroup) GoString() string { + return s.String() +} + +// Represents a single node within a node group. +type NodeGroupMember struct { + _ struct{} `type:"structure"` + + // The ID of the cache cluster to which the node belongs. + CacheClusterId *string `type:"string"` + + // The ID of the node within its cache cluster. A node ID is a numeric identifier + // (0001, 0002, etc.). + CacheNodeId *string `type:"string"` + + // The role that is currently assigned to the node - primary or replica. + CurrentRole *string `type:"string"` + + // The name of the Availability Zone in which the node is located. + PreferredAvailabilityZone *string `type:"string"` + + // Represents the information required for client programs to connect to a cache + // node. + ReadEndpoint *Endpoint `type:"structure"` +} + +// String returns the string representation +func (s NodeGroupMember) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NodeGroupMember) GoString() string { + return s.String() +} + +// Represents an individual cache node in a snapshot of a cache cluster. +type NodeSnapshot struct { + _ struct{} `type:"structure"` + + // The date and time when the cache node was created in the source cache cluster. + CacheNodeCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The cache node identifier for the node in the source cache cluster. + CacheNodeId *string `type:"string"` + + // The size of the cache on the source cache node. + CacheSize *string `type:"string"` + + // The date and time when the source node's metadata and cache data set was + // obtained for the snapshot. + SnapshotCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s NodeSnapshot) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NodeSnapshot) GoString() string { + return s.String() +} + +// Describes a notification topic and its status. Notification topics are used +// for publishing ElastiCache events to subscribers using Amazon Simple Notification +// Service (SNS). +type NotificationConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that identifies the topic. + TopicArn *string `type:"string"` + + // The current state of the topic. + TopicStatus *string `type:"string"` +} + +// String returns the string representation +func (s NotificationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfiguration) GoString() string { + return s.String() +} + +// Describes an individual setting that controls some aspect of ElastiCache +// behavior. +type Parameter struct { + _ struct{} `type:"structure"` + + // The valid range of values for the parameter. + AllowedValues *string `type:"string"` + + // The valid data type for the parameter. + DataType *string `type:"string"` + + // A description of the parameter. + Description *string `type:"string"` + + // Indicates whether (true) or not (false) the parameter can be modified. Some + // parameters have security or operational implications that prevent them from + // being changed. + IsModifiable *bool `type:"boolean"` + + // The earliest cache engine version to which the parameter can apply. + MinimumEngineVersion *string `type:"string"` + + // The name of the parameter. + ParameterName *string `type:"string"` + + // The value of the parameter. + ParameterValue *string `type:"string"` + + // The source of the parameter. + Source *string `type:"string"` +} + +// String returns the string representation +func (s Parameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Parameter) GoString() string { + return s.String() +} + +// Describes a name-value pair that is used to update the value of a parameter. +type ParameterNameValue struct { + _ struct{} `type:"structure"` + + // The name of the parameter. + ParameterName *string `type:"string"` + + // The value of the parameter. + ParameterValue *string `type:"string"` +} + +// String returns the string representation +func (s ParameterNameValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterNameValue) GoString() string { + return s.String() +} + +// A group of settings that will be applied to the cache cluster in the future, +// or that are currently being applied. +type PendingModifiedValues struct { + _ struct{} `type:"structure"` + + // A list of cache node IDs that are being removed (or will be removed) from + // the cache cluster. A node ID is a numeric identifier (0001, 0002, etc.). + CacheNodeIdsToRemove []*string `locationNameList:"CacheNodeId" type:"list"` + + // The new cache engine version that the cache cluster will run. + EngineVersion *string `type:"string"` + + // The new number of cache nodes for the cache cluster. + // + // For clusters running Redis, this value must be 1. For clusters running Memcached, + // this value must be between 1 and 20. + NumCacheNodes *int64 `type:"integer"` +} + +// String returns the string representation +func (s PendingModifiedValues) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PendingModifiedValues) GoString() string { + return s.String() +} + +// Represents the input of a PurchaseReservedCacheNodesOffering action. +type PurchaseReservedCacheNodesOfferingInput struct { + _ struct{} `type:"structure"` + + // The number of cache node instances to reserve. + // + // Default: 1 + CacheNodeCount *int64 `type:"integer"` + + // A customer-specified identifier to track this reservation. + // + // Example: myreservationID + ReservedCacheNodeId *string `type:"string"` + + // The ID of the reserved cache node offering to purchase. + // + // Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706 + ReservedCacheNodesOfferingId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PurchaseReservedCacheNodesOfferingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseReservedCacheNodesOfferingInput) GoString() string { + return s.String() +} + +type PurchaseReservedCacheNodesOfferingOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of a PurchaseReservedCacheNodesOffering action. + ReservedCacheNode *ReservedCacheNode `type:"structure"` +} + +// String returns the string representation +func (s PurchaseReservedCacheNodesOfferingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseReservedCacheNodesOfferingOutput) GoString() string { + return s.String() +} + +// Represents the input of a RebootCacheCluster action. +type RebootCacheClusterInput struct { + _ struct{} `type:"structure"` + + // The cache cluster identifier. This parameter is stored as a lowercase string. + CacheClusterId *string `type:"string" required:"true"` + + // A list of cache node IDs to reboot. A node ID is a numeric identifier (0001, + // 0002, etc.). To reboot an entire cache cluster, specify all of the cache + // node IDs. + CacheNodeIdsToReboot []*string `locationNameList:"CacheNodeId" type:"list" required:"true"` +} + +// String returns the string representation +func (s RebootCacheClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootCacheClusterInput) GoString() string { + return s.String() +} + +type RebootCacheClusterOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific cache cluster. + CacheCluster *CacheCluster `type:"structure"` +} + +// String returns the string representation +func (s RebootCacheClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootCacheClusterOutput) GoString() string { + return s.String() +} + +// Contains the specific price and frequency of a recurring charges for a reserved +// cache node, or for a reserved cache node offering. +type RecurringCharge struct { + _ struct{} `type:"structure"` + + // The monetary amount of the recurring charge. + RecurringChargeAmount *float64 `type:"double"` + + // The frequency of the recurring charge. + RecurringChargeFrequency *string `type:"string"` +} + +// String returns the string representation +func (s RecurringCharge) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecurringCharge) GoString() string { + return s.String() +} + +// Represents the input of a RemoveTagsFromResource action. +type RemoveTagsFromResourceInput struct { + _ struct{} `type:"structure"` + + // The name of the ElastiCache resource from which you want the listed tags + // removed, for example arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster. + ResourceName *string `type:"string" required:"true"` + + // A list of TagKeys identifying the tags you want removed from the named resource. + // For example, TagKeys.member.1=Region removes the cost allocation tag with + // the key name Region from the resource named by the ResourceName parameter. + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsFromResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromResourceInput) GoString() string { + return s.String() +} + +// Contains all of the attributes of a specific replication group. +type ReplicationGroup struct { + _ struct{} `type:"structure"` + + // Indicates the status of Multi-AZ for this replication group. + // + // ElastiCache Multi-AZ replication groups are not supported on: + // + // Redis versions earlier than 2.8.6. T1 and T2 cache node types. + AutomaticFailover *string `type:"string" enum:"AutomaticFailoverStatus"` + + // The description of the replication group. + Description *string `type:"string"` + + // The names of all the cache clusters that are part of this replication group. + MemberClusters []*string `locationNameList:"ClusterId" type:"list"` + + // A single element list with information about the nodes in the replication + // group. + NodeGroups []*NodeGroup `locationNameList:"NodeGroup" type:"list"` + + // A group of settings to be applied to the replication group, either immediately + // or during the next maintenance window. + PendingModifiedValues *ReplicationGroupPendingModifiedValues `type:"structure"` + + // The identifier for the replication group. + ReplicationGroupId *string `type:"string"` + + // The cache cluster ID that is used as the daily snapshot source for the replication + // group. + SnapshottingClusterId *string `type:"string"` + + // The current state of this replication group - creating, available, etc. + Status *string `type:"string"` +} + +// String returns the string representation +func (s ReplicationGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationGroup) GoString() string { + return s.String() +} + +// The settings to be applied to the replication group, either immediately or +// during the next maintenance window. +type ReplicationGroupPendingModifiedValues struct { + _ struct{} `type:"structure"` + + // Indicates the status of Multi-AZ for this replication group. + // + // ElastiCache Multi-AZ replication groups are not supported on: + // + // Redis versions earlier than 2.8.6. T1 and T2 cache node types. + AutomaticFailoverStatus *string `type:"string" enum:"PendingAutomaticFailoverStatus"` + + // The primary cluster ID which will be applied immediately (if --apply-immediately + // was specified), or during the next maintenance window. + PrimaryClusterId *string `type:"string"` +} + +// String returns the string representation +func (s ReplicationGroupPendingModifiedValues) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationGroupPendingModifiedValues) GoString() string { + return s.String() +} + +// Represents the output of a PurchaseReservedCacheNodesOffering action. +type ReservedCacheNode struct { + _ struct{} `type:"structure"` + + // The number of cache nodes that have been reserved. + CacheNodeCount *int64 `type:"integer"` + + // The cache node type for the reserved cache nodes. + // + // Valid node types are as follows: + // + // General purpose: Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, + // cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Previous + // generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge Compute optimized: cache.c1.xlarge Memory optimized Current + // generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // cache.r3.8xlarge Previous generation: cache.m2.xlarge, cache.m2.2xlarge, + // cache.m2.4xlarge Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // Redis backup/restore is not supported for t2 instances. Redis Append-only + // files (AOF) functionality is not supported for t1 or t2 instances. For a + // complete listing of cache node types and specifications, see Amazon ElastiCache + // Product Features and Details (http://aws.amazon.com/elasticache/details) + // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) + // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). + CacheNodeType *string `type:"string"` + + // The duration of the reservation in seconds. + Duration *int64 `type:"integer"` + + // The fixed price charged for this reserved cache node. + FixedPrice *float64 `type:"double"` + + // The offering type of this reserved cache node. + OfferingType *string `type:"string"` + + // The description of the reserved cache node. + ProductDescription *string `type:"string"` + + // The recurring price charged to run this reserved cache node. + RecurringCharges []*RecurringCharge `locationNameList:"RecurringCharge" type:"list"` + + // The unique identifier for the reservation. + ReservedCacheNodeId *string `type:"string"` + + // The offering identifier. + ReservedCacheNodesOfferingId *string `type:"string"` + + // The time the reservation started. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The state of the reserved cache node. + State *string `type:"string"` + + // The hourly price charged for this reserved cache node. + UsagePrice *float64 `type:"double"` +} + +// String returns the string representation +func (s ReservedCacheNode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedCacheNode) GoString() string { + return s.String() +} + +// Describes all of the attributes of a reserved cache node offering. +type ReservedCacheNodesOffering struct { + _ struct{} `type:"structure"` + + // The cache node type for the reserved cache node. + // + // Valid node types are as follows: + // + // General purpose: Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, + // cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Previous + // generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge Compute optimized: cache.c1.xlarge Memory optimized Current + // generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // cache.r3.8xlarge Previous generation: cache.m2.xlarge, cache.m2.2xlarge, + // cache.m2.4xlarge Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // Redis backup/restore is not supported for t2 instances. Redis Append-only + // files (AOF) functionality is not supported for t1 or t2 instances. For a + // complete listing of cache node types and specifications, see Amazon ElastiCache + // Product Features and Details (http://aws.amazon.com/elasticache/details) + // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) + // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). + CacheNodeType *string `type:"string"` + + // The duration of the offering. in seconds. + Duration *int64 `type:"integer"` + + // The fixed price charged for this offering. + FixedPrice *float64 `type:"double"` + + // The offering type. + OfferingType *string `type:"string"` + + // The cache engine used by the offering. + ProductDescription *string `type:"string"` + + // The recurring price charged to run this reserved cache node. + RecurringCharges []*RecurringCharge `locationNameList:"RecurringCharge" type:"list"` + + // A unique identifier for the reserved cache node offering. + ReservedCacheNodesOfferingId *string `type:"string"` + + // The hourly price charged for this offering. + UsagePrice *float64 `type:"double"` +} + +// String returns the string representation +func (s ReservedCacheNodesOffering) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedCacheNodesOffering) GoString() string { + return s.String() +} + +// Represents the input of a ResetCacheParameterGroup action. +type ResetCacheParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the cache parameter group to reset. + CacheParameterGroupName *string `type:"string" required:"true"` + + // An array of parameter names to be reset. If you are not resetting the entire + // cache parameter group, you must specify at least one parameter name. + ParameterNameValues []*ParameterNameValue `locationNameList:"ParameterNameValue" type:"list" required:"true"` + + // If true, all parameters in the cache parameter group will be reset to default + // values. If false, no such action occurs. + // + // Valid values: true | false + ResetAllParameters *bool `type:"boolean"` +} + +// String returns the string representation +func (s ResetCacheParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetCacheParameterGroupInput) GoString() string { + return s.String() +} + +// Represents the input of a RevokeCacheSecurityGroupIngress action. +type RevokeCacheSecurityGroupIngressInput struct { + _ struct{} `type:"structure"` + + // The name of the cache security group to revoke ingress from. + CacheSecurityGroupName *string `type:"string" required:"true"` + + // The name of the Amazon EC2 security group to revoke access from. + EC2SecurityGroupName *string `type:"string" required:"true"` + + // The AWS account number of the Amazon EC2 security group owner. Note that + // this is not the same thing as an AWS access key ID - you must provide a valid + // AWS account number for this parameter. + EC2SecurityGroupOwnerId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RevokeCacheSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeCacheSecurityGroupIngressInput) GoString() string { + return s.String() +} + +type RevokeCacheSecurityGroupIngressOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of one of the following actions: + // + // AuthorizeCacheSecurityGroupIngress CreateCacheSecurityGroup RevokeCacheSecurityGroupIngress + CacheSecurityGroup *CacheSecurityGroup `type:"structure"` +} + +// String returns the string representation +func (s RevokeCacheSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeCacheSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +// Represents a single cache security group and its status. +type SecurityGroupMembership struct { + _ struct{} `type:"structure"` + + // The identifier of the cache security group. + SecurityGroupId *string `type:"string"` + + // The status of the cache security group membership. The status changes whenever + // a cache security group is modified, or when the cache security groups assigned + // to a cache cluster are modified. + Status *string `type:"string"` +} + +// String returns the string representation +func (s SecurityGroupMembership) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SecurityGroupMembership) GoString() string { + return s.String() +} + +// Represents a copy of an entire cache cluster as of the time when the snapshot +// was taken. +type Snapshot struct { + _ struct{} `type:"structure"` + + // This parameter is currently disabled. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The date and time when the source cache cluster was created. + CacheClusterCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The user-supplied identifier of the source cache cluster. + CacheClusterId *string `type:"string"` + + // The name of the compute and memory capacity node type for the source cache + // cluster. + // + // Valid node types are as follows: + // + // General purpose: Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, + // cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Previous + // generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge Compute optimized: cache.c1.xlarge Memory optimized Current + // generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // cache.r3.8xlarge Previous generation: cache.m2.xlarge, cache.m2.2xlarge, + // cache.m2.4xlarge Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // Redis backup/restore is not supported for t2 instances. Redis Append-only + // files (AOF) functionality is not supported for t1 or t2 instances. For a + // complete listing of cache node types and specifications, see Amazon ElastiCache + // Product Features and Details (http://aws.amazon.com/elasticache/details) + // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) + // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). + CacheNodeType *string `type:"string"` + + // The cache parameter group that is associated with the source cache cluster. + CacheParameterGroupName *string `type:"string"` + + // The name of the cache subnet group associated with the source cache cluster. + CacheSubnetGroupName *string `type:"string"` + + // The name of the cache engine (memcached or redis) used by the source cache + // cluster. + Engine *string `type:"string"` + + // The version of the cache engine version that is used by the source cache + // cluster. + EngineVersion *string `type:"string"` + + // A list of the cache nodes in the source cache cluster. + NodeSnapshots []*NodeSnapshot `locationNameList:"NodeSnapshot" type:"list"` + + // The number of cache nodes in the source cache cluster. + // + // For clusters running Redis, this value must be 1. For clusters running Memcached, + // this value must be between 1 and 20. + NumCacheNodes *int64 `type:"integer"` + + // The port number used by each cache nodes in the source cache cluster. + Port *int64 `type:"integer"` + + // The name of the Availability Zone in which the source cache cluster is located. + PreferredAvailabilityZone *string `type:"string"` + + // Specifies the weekly time range during which maintenance on the cache cluster + // is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid + // values for ddd are: + // + // sun mon tue wed thu fri sat Example: sun:05:00-sun:09:00 + PreferredMaintenanceWindow *string `type:"string"` + + // The name of a snapshot. For an automatic snapshot, the name is system-generated; + // for a manual snapshot, this is the user-provided name. + SnapshotName *string `type:"string"` + + // For an automatic snapshot, the number of days for which ElastiCache will + // retain the snapshot before deleting it. + // + // For manual snapshots, this field reflects the SnapshotRetentionLimit for + // the source cache cluster when the snapshot was created. This field is otherwise + // ignored: Manual snapshots do not expire, and can only be deleted using the + // DeleteSnapshot action. + // + // ImportantIf the value of SnapshotRetentionLimit is set to zero (0), backups + // are turned off. + SnapshotRetentionLimit *int64 `type:"integer"` + + // Indicates whether the snapshot is from an automatic backup (automated) or + // was created manually (manual). + SnapshotSource *string `type:"string"` + + // The status of the snapshot. Valid values: creating | available | restoring + // | copying | deleting. + SnapshotStatus *string `type:"string"` + + // The daily time range during which ElastiCache takes daily snapshots of the + // source cache cluster. + SnapshotWindow *string `type:"string"` + + // The Amazon Resource Name (ARN) for the topic used by the source cache cluster + // for publishing notifications. + TopicArn *string `type:"string"` + + // The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet + // group for the source cache cluster. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s Snapshot) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Snapshot) GoString() string { + return s.String() +} + +// Represents the subnet associated with a cache cluster. This parameter refers +// to subnets defined in Amazon Virtual Private Cloud (Amazon VPC) and used +// with ElastiCache. +type Subnet struct { + _ struct{} `type:"structure"` + + // The Availability Zone associated with the subnet. + SubnetAvailabilityZone *AvailabilityZone `type:"structure"` + + // The unique identifier for the subnet. + SubnetIdentifier *string `type:"string"` +} + +// String returns the string representation +func (s Subnet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Subnet) GoString() string { + return s.String() +} + +// A cost allocation Tag that can be added to an ElastiCache cluster or replication +// group. Tags are composed of a Key/Value pair. A tag with a null Value is +// permitted. +type Tag struct { + _ struct{} `type:"structure"` + + // The key for the tag. + Key *string `type:"string"` + + // The tag's value. May not be null. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Represents the output from the AddTagsToResource, ListTagsOnResource, and +// RemoveTagsFromResource actions. +type TagListMessage struct { + _ struct{} `type:"structure"` + + // A list of cost allocation tags as key-value pairs. + TagList []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s TagListMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagListMessage) GoString() string { + return s.String() +} + +const ( + // @enum AZMode + AZModeSingleAz = "single-az" + // @enum AZMode + AZModeCrossAz = "cross-az" +) + +const ( + // @enum AutomaticFailoverStatus + AutomaticFailoverStatusEnabled = "enabled" + // @enum AutomaticFailoverStatus + AutomaticFailoverStatusDisabled = "disabled" + // @enum AutomaticFailoverStatus + AutomaticFailoverStatusEnabling = "enabling" + // @enum AutomaticFailoverStatus + AutomaticFailoverStatusDisabling = "disabling" +) + +const ( + // @enum PendingAutomaticFailoverStatus + PendingAutomaticFailoverStatusEnabled = "enabled" + // @enum PendingAutomaticFailoverStatus + PendingAutomaticFailoverStatusDisabled = "disabled" +) + +const ( + // @enum SourceType + SourceTypeCacheCluster = "cache-cluster" + // @enum SourceType + SourceTypeCacheParameterGroup = "cache-parameter-group" + // @enum SourceType + SourceTypeCacheSecurityGroup = "cache-security-group" + // @enum SourceType + SourceTypeCacheSubnetGroup = "cache-subnet-group" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,186 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package elasticacheiface provides an interface for the Amazon ElastiCache. +package elasticacheiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/elasticache" +) + +// ElastiCacheAPI is the interface type for elasticache.ElastiCache. +type ElastiCacheAPI interface { + AddTagsToResourceRequest(*elasticache.AddTagsToResourceInput) (*request.Request, *elasticache.TagListMessage) + + AddTagsToResource(*elasticache.AddTagsToResourceInput) (*elasticache.TagListMessage, error) + + AuthorizeCacheSecurityGroupIngressRequest(*elasticache.AuthorizeCacheSecurityGroupIngressInput) (*request.Request, *elasticache.AuthorizeCacheSecurityGroupIngressOutput) + + AuthorizeCacheSecurityGroupIngress(*elasticache.AuthorizeCacheSecurityGroupIngressInput) (*elasticache.AuthorizeCacheSecurityGroupIngressOutput, error) + + CopySnapshotRequest(*elasticache.CopySnapshotInput) (*request.Request, *elasticache.CopySnapshotOutput) + + CopySnapshot(*elasticache.CopySnapshotInput) (*elasticache.CopySnapshotOutput, error) + + CreateCacheClusterRequest(*elasticache.CreateCacheClusterInput) (*request.Request, *elasticache.CreateCacheClusterOutput) + + CreateCacheCluster(*elasticache.CreateCacheClusterInput) (*elasticache.CreateCacheClusterOutput, error) + + CreateCacheParameterGroupRequest(*elasticache.CreateCacheParameterGroupInput) (*request.Request, *elasticache.CreateCacheParameterGroupOutput) + + CreateCacheParameterGroup(*elasticache.CreateCacheParameterGroupInput) (*elasticache.CreateCacheParameterGroupOutput, error) + + CreateCacheSecurityGroupRequest(*elasticache.CreateCacheSecurityGroupInput) (*request.Request, *elasticache.CreateCacheSecurityGroupOutput) + + CreateCacheSecurityGroup(*elasticache.CreateCacheSecurityGroupInput) (*elasticache.CreateCacheSecurityGroupOutput, error) + + CreateCacheSubnetGroupRequest(*elasticache.CreateCacheSubnetGroupInput) (*request.Request, *elasticache.CreateCacheSubnetGroupOutput) + + CreateCacheSubnetGroup(*elasticache.CreateCacheSubnetGroupInput) (*elasticache.CreateCacheSubnetGroupOutput, error) + + CreateReplicationGroupRequest(*elasticache.CreateReplicationGroupInput) (*request.Request, *elasticache.CreateReplicationGroupOutput) + + CreateReplicationGroup(*elasticache.CreateReplicationGroupInput) (*elasticache.CreateReplicationGroupOutput, error) + + CreateSnapshotRequest(*elasticache.CreateSnapshotInput) (*request.Request, *elasticache.CreateSnapshotOutput) + + CreateSnapshot(*elasticache.CreateSnapshotInput) (*elasticache.CreateSnapshotOutput, error) + + DeleteCacheClusterRequest(*elasticache.DeleteCacheClusterInput) (*request.Request, *elasticache.DeleteCacheClusterOutput) + + DeleteCacheCluster(*elasticache.DeleteCacheClusterInput) (*elasticache.DeleteCacheClusterOutput, error) + + DeleteCacheParameterGroupRequest(*elasticache.DeleteCacheParameterGroupInput) (*request.Request, *elasticache.DeleteCacheParameterGroupOutput) + + DeleteCacheParameterGroup(*elasticache.DeleteCacheParameterGroupInput) (*elasticache.DeleteCacheParameterGroupOutput, error) + + DeleteCacheSecurityGroupRequest(*elasticache.DeleteCacheSecurityGroupInput) (*request.Request, *elasticache.DeleteCacheSecurityGroupOutput) + + DeleteCacheSecurityGroup(*elasticache.DeleteCacheSecurityGroupInput) (*elasticache.DeleteCacheSecurityGroupOutput, error) + + DeleteCacheSubnetGroupRequest(*elasticache.DeleteCacheSubnetGroupInput) (*request.Request, *elasticache.DeleteCacheSubnetGroupOutput) + + DeleteCacheSubnetGroup(*elasticache.DeleteCacheSubnetGroupInput) (*elasticache.DeleteCacheSubnetGroupOutput, error) + + DeleteReplicationGroupRequest(*elasticache.DeleteReplicationGroupInput) (*request.Request, *elasticache.DeleteReplicationGroupOutput) + + DeleteReplicationGroup(*elasticache.DeleteReplicationGroupInput) (*elasticache.DeleteReplicationGroupOutput, error) + + DeleteSnapshotRequest(*elasticache.DeleteSnapshotInput) (*request.Request, *elasticache.DeleteSnapshotOutput) + + DeleteSnapshot(*elasticache.DeleteSnapshotInput) (*elasticache.DeleteSnapshotOutput, error) + + DescribeCacheClustersRequest(*elasticache.DescribeCacheClustersInput) (*request.Request, *elasticache.DescribeCacheClustersOutput) + + DescribeCacheClusters(*elasticache.DescribeCacheClustersInput) (*elasticache.DescribeCacheClustersOutput, error) + + DescribeCacheClustersPages(*elasticache.DescribeCacheClustersInput, func(*elasticache.DescribeCacheClustersOutput, bool) bool) error + + DescribeCacheEngineVersionsRequest(*elasticache.DescribeCacheEngineVersionsInput) (*request.Request, *elasticache.DescribeCacheEngineVersionsOutput) + + DescribeCacheEngineVersions(*elasticache.DescribeCacheEngineVersionsInput) (*elasticache.DescribeCacheEngineVersionsOutput, error) + + DescribeCacheEngineVersionsPages(*elasticache.DescribeCacheEngineVersionsInput, func(*elasticache.DescribeCacheEngineVersionsOutput, bool) bool) error + + DescribeCacheParameterGroupsRequest(*elasticache.DescribeCacheParameterGroupsInput) (*request.Request, *elasticache.DescribeCacheParameterGroupsOutput) + + DescribeCacheParameterGroups(*elasticache.DescribeCacheParameterGroupsInput) (*elasticache.DescribeCacheParameterGroupsOutput, error) + + DescribeCacheParameterGroupsPages(*elasticache.DescribeCacheParameterGroupsInput, func(*elasticache.DescribeCacheParameterGroupsOutput, bool) bool) error + + DescribeCacheParametersRequest(*elasticache.DescribeCacheParametersInput) (*request.Request, *elasticache.DescribeCacheParametersOutput) + + DescribeCacheParameters(*elasticache.DescribeCacheParametersInput) (*elasticache.DescribeCacheParametersOutput, error) + + DescribeCacheParametersPages(*elasticache.DescribeCacheParametersInput, func(*elasticache.DescribeCacheParametersOutput, bool) bool) error + + DescribeCacheSecurityGroupsRequest(*elasticache.DescribeCacheSecurityGroupsInput) (*request.Request, *elasticache.DescribeCacheSecurityGroupsOutput) + + DescribeCacheSecurityGroups(*elasticache.DescribeCacheSecurityGroupsInput) (*elasticache.DescribeCacheSecurityGroupsOutput, error) + + DescribeCacheSecurityGroupsPages(*elasticache.DescribeCacheSecurityGroupsInput, func(*elasticache.DescribeCacheSecurityGroupsOutput, bool) bool) error + + DescribeCacheSubnetGroupsRequest(*elasticache.DescribeCacheSubnetGroupsInput) (*request.Request, *elasticache.DescribeCacheSubnetGroupsOutput) + + DescribeCacheSubnetGroups(*elasticache.DescribeCacheSubnetGroupsInput) (*elasticache.DescribeCacheSubnetGroupsOutput, error) + + DescribeCacheSubnetGroupsPages(*elasticache.DescribeCacheSubnetGroupsInput, func(*elasticache.DescribeCacheSubnetGroupsOutput, bool) bool) error + + DescribeEngineDefaultParametersRequest(*elasticache.DescribeEngineDefaultParametersInput) (*request.Request, *elasticache.DescribeEngineDefaultParametersOutput) + + DescribeEngineDefaultParameters(*elasticache.DescribeEngineDefaultParametersInput) (*elasticache.DescribeEngineDefaultParametersOutput, error) + + DescribeEngineDefaultParametersPages(*elasticache.DescribeEngineDefaultParametersInput, func(*elasticache.DescribeEngineDefaultParametersOutput, bool) bool) error + + DescribeEventsRequest(*elasticache.DescribeEventsInput) (*request.Request, *elasticache.DescribeEventsOutput) + + DescribeEvents(*elasticache.DescribeEventsInput) (*elasticache.DescribeEventsOutput, error) + + DescribeEventsPages(*elasticache.DescribeEventsInput, func(*elasticache.DescribeEventsOutput, bool) bool) error + + DescribeReplicationGroupsRequest(*elasticache.DescribeReplicationGroupsInput) (*request.Request, *elasticache.DescribeReplicationGroupsOutput) + + DescribeReplicationGroups(*elasticache.DescribeReplicationGroupsInput) (*elasticache.DescribeReplicationGroupsOutput, error) + + DescribeReplicationGroupsPages(*elasticache.DescribeReplicationGroupsInput, func(*elasticache.DescribeReplicationGroupsOutput, bool) bool) error + + DescribeReservedCacheNodesRequest(*elasticache.DescribeReservedCacheNodesInput) (*request.Request, *elasticache.DescribeReservedCacheNodesOutput) + + DescribeReservedCacheNodes(*elasticache.DescribeReservedCacheNodesInput) (*elasticache.DescribeReservedCacheNodesOutput, error) + + DescribeReservedCacheNodesPages(*elasticache.DescribeReservedCacheNodesInput, func(*elasticache.DescribeReservedCacheNodesOutput, bool) bool) error + + DescribeReservedCacheNodesOfferingsRequest(*elasticache.DescribeReservedCacheNodesOfferingsInput) (*request.Request, *elasticache.DescribeReservedCacheNodesOfferingsOutput) + + DescribeReservedCacheNodesOfferings(*elasticache.DescribeReservedCacheNodesOfferingsInput) (*elasticache.DescribeReservedCacheNodesOfferingsOutput, error) + + DescribeReservedCacheNodesOfferingsPages(*elasticache.DescribeReservedCacheNodesOfferingsInput, func(*elasticache.DescribeReservedCacheNodesOfferingsOutput, bool) bool) error + + DescribeSnapshotsRequest(*elasticache.DescribeSnapshotsInput) (*request.Request, *elasticache.DescribeSnapshotsOutput) + + DescribeSnapshots(*elasticache.DescribeSnapshotsInput) (*elasticache.DescribeSnapshotsOutput, error) + + DescribeSnapshotsPages(*elasticache.DescribeSnapshotsInput, func(*elasticache.DescribeSnapshotsOutput, bool) bool) error + + ListTagsForResourceRequest(*elasticache.ListTagsForResourceInput) (*request.Request, *elasticache.TagListMessage) + + ListTagsForResource(*elasticache.ListTagsForResourceInput) (*elasticache.TagListMessage, error) + + ModifyCacheClusterRequest(*elasticache.ModifyCacheClusterInput) (*request.Request, *elasticache.ModifyCacheClusterOutput) + + ModifyCacheCluster(*elasticache.ModifyCacheClusterInput) (*elasticache.ModifyCacheClusterOutput, error) + + ModifyCacheParameterGroupRequest(*elasticache.ModifyCacheParameterGroupInput) (*request.Request, *elasticache.CacheParameterGroupNameMessage) + + ModifyCacheParameterGroup(*elasticache.ModifyCacheParameterGroupInput) (*elasticache.CacheParameterGroupNameMessage, error) + + ModifyCacheSubnetGroupRequest(*elasticache.ModifyCacheSubnetGroupInput) (*request.Request, *elasticache.ModifyCacheSubnetGroupOutput) + + ModifyCacheSubnetGroup(*elasticache.ModifyCacheSubnetGroupInput) (*elasticache.ModifyCacheSubnetGroupOutput, error) + + ModifyReplicationGroupRequest(*elasticache.ModifyReplicationGroupInput) (*request.Request, *elasticache.ModifyReplicationGroupOutput) + + ModifyReplicationGroup(*elasticache.ModifyReplicationGroupInput) (*elasticache.ModifyReplicationGroupOutput, error) + + PurchaseReservedCacheNodesOfferingRequest(*elasticache.PurchaseReservedCacheNodesOfferingInput) (*request.Request, *elasticache.PurchaseReservedCacheNodesOfferingOutput) + + PurchaseReservedCacheNodesOffering(*elasticache.PurchaseReservedCacheNodesOfferingInput) (*elasticache.PurchaseReservedCacheNodesOfferingOutput, error) + + RebootCacheClusterRequest(*elasticache.RebootCacheClusterInput) (*request.Request, *elasticache.RebootCacheClusterOutput) + + RebootCacheCluster(*elasticache.RebootCacheClusterInput) (*elasticache.RebootCacheClusterOutput, error) + + RemoveTagsFromResourceRequest(*elasticache.RemoveTagsFromResourceInput) (*request.Request, *elasticache.TagListMessage) + + RemoveTagsFromResource(*elasticache.RemoveTagsFromResourceInput) (*elasticache.TagListMessage, error) + + ResetCacheParameterGroupRequest(*elasticache.ResetCacheParameterGroupInput) (*request.Request, *elasticache.CacheParameterGroupNameMessage) + + ResetCacheParameterGroup(*elasticache.ResetCacheParameterGroupInput) (*elasticache.CacheParameterGroupNameMessage, error) + + RevokeCacheSecurityGroupIngressRequest(*elasticache.RevokeCacheSecurityGroupIngressInput) (*request.Request, *elasticache.RevokeCacheSecurityGroupIngressOutput) + + RevokeCacheSecurityGroupIngress(*elasticache.RevokeCacheSecurityGroupIngressInput) (*elasticache.RevokeCacheSecurityGroupIngressOutput, error) +} + +var _ ElastiCacheAPI = (*elasticache.ElastiCache)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticache/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticache/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticache/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticache/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,943 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elasticache_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/elasticache" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleElastiCache_AddTagsToResource() { + svc := elasticache.New(session.New()) + + params := &elasticache.AddTagsToResourceInput{ + ResourceName: aws.String("String"), // Required + Tags: []*elasticache.Tag{ // Required + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.AddTagsToResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_AuthorizeCacheSecurityGroupIngress() { + svc := elasticache.New(session.New()) + + params := &elasticache.AuthorizeCacheSecurityGroupIngressInput{ + CacheSecurityGroupName: aws.String("String"), // Required + EC2SecurityGroupName: aws.String("String"), // Required + EC2SecurityGroupOwnerId: aws.String("String"), // Required + } + resp, err := svc.AuthorizeCacheSecurityGroupIngress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_CopySnapshot() { + svc := elasticache.New(session.New()) + + params := &elasticache.CopySnapshotInput{ + SourceSnapshotName: aws.String("String"), // Required + TargetSnapshotName: aws.String("String"), // Required + } + resp, err := svc.CopySnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_CreateCacheCluster() { + svc := elasticache.New(session.New()) + + params := &elasticache.CreateCacheClusterInput{ + CacheClusterId: aws.String("String"), // Required + AZMode: aws.String("AZMode"), + AutoMinorVersionUpgrade: aws.Bool(true), + CacheNodeType: aws.String("String"), + CacheParameterGroupName: aws.String("String"), + CacheSecurityGroupNames: []*string{ + aws.String("String"), // Required + // More values... + }, + CacheSubnetGroupName: aws.String("String"), + Engine: aws.String("String"), + EngineVersion: aws.String("String"), + NotificationTopicArn: aws.String("String"), + NumCacheNodes: aws.Int64(1), + Port: aws.Int64(1), + PreferredAvailabilityZone: aws.String("String"), + PreferredAvailabilityZones: []*string{ + aws.String("String"), // Required + // More values... + }, + PreferredMaintenanceWindow: aws.String("String"), + ReplicationGroupId: aws.String("String"), + SecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SnapshotArns: []*string{ + aws.String("String"), // Required + // More values... + }, + SnapshotName: aws.String("String"), + SnapshotRetentionLimit: aws.Int64(1), + SnapshotWindow: aws.String("String"), + Tags: []*elasticache.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateCacheCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_CreateCacheParameterGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.CreateCacheParameterGroupInput{ + CacheParameterGroupFamily: aws.String("String"), // Required + CacheParameterGroupName: aws.String("String"), // Required + Description: aws.String("String"), // Required + } + resp, err := svc.CreateCacheParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_CreateCacheSecurityGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.CreateCacheSecurityGroupInput{ + CacheSecurityGroupName: aws.String("String"), // Required + Description: aws.String("String"), // Required + } + resp, err := svc.CreateCacheSecurityGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_CreateCacheSubnetGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.CreateCacheSubnetGroupInput{ + CacheSubnetGroupDescription: aws.String("String"), // Required + CacheSubnetGroupName: aws.String("String"), // Required + SubnetIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateCacheSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_CreateReplicationGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.CreateReplicationGroupInput{ + ReplicationGroupDescription: aws.String("String"), // Required + ReplicationGroupId: aws.String("String"), // Required + AutoMinorVersionUpgrade: aws.Bool(true), + AutomaticFailoverEnabled: aws.Bool(true), + CacheNodeType: aws.String("String"), + CacheParameterGroupName: aws.String("String"), + CacheSecurityGroupNames: []*string{ + aws.String("String"), // Required + // More values... + }, + CacheSubnetGroupName: aws.String("String"), + Engine: aws.String("String"), + EngineVersion: aws.String("String"), + NotificationTopicArn: aws.String("String"), + NumCacheClusters: aws.Int64(1), + Port: aws.Int64(1), + PreferredCacheClusterAZs: []*string{ + aws.String("String"), // Required + // More values... + }, + PreferredMaintenanceWindow: aws.String("String"), + PrimaryClusterId: aws.String("String"), + SecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SnapshotArns: []*string{ + aws.String("String"), // Required + // More values... + }, + SnapshotName: aws.String("String"), + SnapshotRetentionLimit: aws.Int64(1), + SnapshotWindow: aws.String("String"), + Tags: []*elasticache.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateReplicationGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_CreateSnapshot() { + svc := elasticache.New(session.New()) + + params := &elasticache.CreateSnapshotInput{ + CacheClusterId: aws.String("String"), // Required + SnapshotName: aws.String("String"), // Required + } + resp, err := svc.CreateSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DeleteCacheCluster() { + svc := elasticache.New(session.New()) + + params := &elasticache.DeleteCacheClusterInput{ + CacheClusterId: aws.String("String"), // Required + FinalSnapshotIdentifier: aws.String("String"), + } + resp, err := svc.DeleteCacheCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DeleteCacheParameterGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.DeleteCacheParameterGroupInput{ + CacheParameterGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteCacheParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DeleteCacheSecurityGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.DeleteCacheSecurityGroupInput{ + CacheSecurityGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteCacheSecurityGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DeleteCacheSubnetGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.DeleteCacheSubnetGroupInput{ + CacheSubnetGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteCacheSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DeleteReplicationGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.DeleteReplicationGroupInput{ + ReplicationGroupId: aws.String("String"), // Required + FinalSnapshotIdentifier: aws.String("String"), + RetainPrimaryCluster: aws.Bool(true), + } + resp, err := svc.DeleteReplicationGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DeleteSnapshot() { + svc := elasticache.New(session.New()) + + params := &elasticache.DeleteSnapshotInput{ + SnapshotName: aws.String("String"), // Required + } + resp, err := svc.DeleteSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeCacheClusters() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeCacheClustersInput{ + CacheClusterId: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + ShowCacheNodeInfo: aws.Bool(true), + } + resp, err := svc.DescribeCacheClusters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeCacheEngineVersions() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeCacheEngineVersionsInput{ + CacheParameterGroupFamily: aws.String("String"), + DefaultOnly: aws.Bool(true), + Engine: aws.String("String"), + EngineVersion: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeCacheEngineVersions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeCacheParameterGroups() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeCacheParameterGroupsInput{ + CacheParameterGroupName: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeCacheParameterGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeCacheParameters() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeCacheParametersInput{ + CacheParameterGroupName: aws.String("String"), // Required + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + Source: aws.String("String"), + } + resp, err := svc.DescribeCacheParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeCacheSecurityGroups() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeCacheSecurityGroupsInput{ + CacheSecurityGroupName: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeCacheSecurityGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeCacheSubnetGroups() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeCacheSubnetGroupsInput{ + CacheSubnetGroupName: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeCacheSubnetGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeEngineDefaultParameters() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeEngineDefaultParametersInput{ + CacheParameterGroupFamily: aws.String("String"), // Required + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeEngineDefaultParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeEvents() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeEventsInput{ + Duration: aws.Int64(1), + EndTime: aws.Time(time.Now()), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + SourceIdentifier: aws.String("String"), + SourceType: aws.String("SourceType"), + StartTime: aws.Time(time.Now()), + } + resp, err := svc.DescribeEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeReplicationGroups() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeReplicationGroupsInput{ + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + ReplicationGroupId: aws.String("String"), + } + resp, err := svc.DescribeReplicationGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeReservedCacheNodes() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeReservedCacheNodesInput{ + CacheNodeType: aws.String("String"), + Duration: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + OfferingType: aws.String("String"), + ProductDescription: aws.String("String"), + ReservedCacheNodeId: aws.String("String"), + ReservedCacheNodesOfferingId: aws.String("String"), + } + resp, err := svc.DescribeReservedCacheNodes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeReservedCacheNodesOfferings() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeReservedCacheNodesOfferingsInput{ + CacheNodeType: aws.String("String"), + Duration: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + OfferingType: aws.String("String"), + ProductDescription: aws.String("String"), + ReservedCacheNodesOfferingId: aws.String("String"), + } + resp, err := svc.DescribeReservedCacheNodesOfferings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_DescribeSnapshots() { + svc := elasticache.New(session.New()) + + params := &elasticache.DescribeSnapshotsInput{ + CacheClusterId: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + SnapshotName: aws.String("String"), + SnapshotSource: aws.String("String"), + } + resp, err := svc.DescribeSnapshots(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_ListTagsForResource() { + svc := elasticache.New(session.New()) + + params := &elasticache.ListTagsForResourceInput{ + ResourceName: aws.String("String"), // Required + } + resp, err := svc.ListTagsForResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_ModifyCacheCluster() { + svc := elasticache.New(session.New()) + + params := &elasticache.ModifyCacheClusterInput{ + CacheClusterId: aws.String("String"), // Required + AZMode: aws.String("AZMode"), + ApplyImmediately: aws.Bool(true), + AutoMinorVersionUpgrade: aws.Bool(true), + CacheNodeIdsToRemove: []*string{ + aws.String("String"), // Required + // More values... + }, + CacheParameterGroupName: aws.String("String"), + CacheSecurityGroupNames: []*string{ + aws.String("String"), // Required + // More values... + }, + EngineVersion: aws.String("String"), + NewAvailabilityZones: []*string{ + aws.String("String"), // Required + // More values... + }, + NotificationTopicArn: aws.String("String"), + NotificationTopicStatus: aws.String("String"), + NumCacheNodes: aws.Int64(1), + PreferredMaintenanceWindow: aws.String("String"), + SecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SnapshotRetentionLimit: aws.Int64(1), + SnapshotWindow: aws.String("String"), + } + resp, err := svc.ModifyCacheCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_ModifyCacheParameterGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.ModifyCacheParameterGroupInput{ + CacheParameterGroupName: aws.String("String"), // Required + ParameterNameValues: []*elasticache.ParameterNameValue{ // Required + { // Required + ParameterName: aws.String("String"), + ParameterValue: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.ModifyCacheParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_ModifyCacheSubnetGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.ModifyCacheSubnetGroupInput{ + CacheSubnetGroupName: aws.String("String"), // Required + CacheSubnetGroupDescription: aws.String("String"), + SubnetIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ModifyCacheSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_ModifyReplicationGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.ModifyReplicationGroupInput{ + ReplicationGroupId: aws.String("String"), // Required + ApplyImmediately: aws.Bool(true), + AutoMinorVersionUpgrade: aws.Bool(true), + AutomaticFailoverEnabled: aws.Bool(true), + CacheParameterGroupName: aws.String("String"), + CacheSecurityGroupNames: []*string{ + aws.String("String"), // Required + // More values... + }, + EngineVersion: aws.String("String"), + NotificationTopicArn: aws.String("String"), + NotificationTopicStatus: aws.String("String"), + PreferredMaintenanceWindow: aws.String("String"), + PrimaryClusterId: aws.String("String"), + ReplicationGroupDescription: aws.String("String"), + SecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SnapshotRetentionLimit: aws.Int64(1), + SnapshotWindow: aws.String("String"), + SnapshottingClusterId: aws.String("String"), + } + resp, err := svc.ModifyReplicationGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_PurchaseReservedCacheNodesOffering() { + svc := elasticache.New(session.New()) + + params := &elasticache.PurchaseReservedCacheNodesOfferingInput{ + ReservedCacheNodesOfferingId: aws.String("String"), // Required + CacheNodeCount: aws.Int64(1), + ReservedCacheNodeId: aws.String("String"), + } + resp, err := svc.PurchaseReservedCacheNodesOffering(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_RebootCacheCluster() { + svc := elasticache.New(session.New()) + + params := &elasticache.RebootCacheClusterInput{ + CacheClusterId: aws.String("String"), // Required + CacheNodeIdsToReboot: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.RebootCacheCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_RemoveTagsFromResource() { + svc := elasticache.New(session.New()) + + params := &elasticache.RemoveTagsFromResourceInput{ + ResourceName: aws.String("String"), // Required + TagKeys: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.RemoveTagsFromResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_ResetCacheParameterGroup() { + svc := elasticache.New(session.New()) + + params := &elasticache.ResetCacheParameterGroupInput{ + CacheParameterGroupName: aws.String("String"), // Required + ParameterNameValues: []*elasticache.ParameterNameValue{ // Required + { // Required + ParameterName: aws.String("String"), + ParameterValue: aws.String("String"), + }, + // More values... + }, + ResetAllParameters: aws.Bool(true), + } + resp, err := svc.ResetCacheParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElastiCache_RevokeCacheSecurityGroupIngress() { + svc := elasticache.New(session.New()) + + params := &elasticache.RevokeCacheSecurityGroupIngressInput{ + CacheSecurityGroupName: aws.String("String"), // Required + EC2SecurityGroupName: aws.String("String"), // Required + EC2SecurityGroupOwnerId: aws.String("String"), // Required + } + resp, err := svc.RevokeCacheSecurityGroupIngress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticache/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticache/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticache/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticache/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,96 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elasticache + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Amazon ElastiCache is a web service that makes it easier to set up, operate, +// and scale a distributed cache in the cloud. +// +// With ElastiCache, customers gain all of the benefits of a high-performance, +// in-memory cache with far less of the administrative burden of launching and +// managing a distributed cache. The service makes setup, scaling, and cluster +// failure handling much simpler than in a self-managed cache deployment. +// +// In addition, through integration with Amazon CloudWatch, customers get enhanced +// visibility into the key performance statistics associated with their cache +// and can receive alarms if a part of their cache runs hot. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type ElastiCache struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "elasticache" + +// New creates a new instance of the ElastiCache client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ElastiCache client from just a session. +// svc := elasticache.New(mySession) +// +// // Create a ElastiCache client with additional configuration +// svc := elasticache.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ElastiCache { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *ElastiCache { + svc := &ElastiCache{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-02-02", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ElastiCache operation and runs any +// custom request initialization. +func (c *ElastiCache) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticache/waiters.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticache/waiters.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticache/waiters.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticache/waiters.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,183 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elasticache + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *ElastiCache) WaitUntilCacheClusterAvailable(input *DescribeCacheClustersInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeCacheClusters", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "deleted", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "deleting", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "incompatible-network", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "restore-failed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *ElastiCache) WaitUntilCacheClusterDeleted(input *DescribeCacheClustersInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeCacheClusters", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "deleted", + }, + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "CacheClusterNotFound", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "creating", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "incompatible-network", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "modifying", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "restore-failed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "CacheClusters[].CacheClusterStatus", + Expected: "snapshotting", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *ElastiCache) WaitUntilReplicationGroupAvailable(input *DescribeReplicationGroupsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeReplicationGroups", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "ReplicationGroups[].Status", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "ReplicationGroups[].Status", + Expected: "deleted", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *ElastiCache) WaitUntilReplicationGroupDeleted(input *DescribeReplicationGroupsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeReplicationGroups", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "ReplicationGroups[].Status", + Expected: "deleted", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "ReplicationGroups[].Status", + Expected: "available", + }, + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "ReplicationGroupNotFoundFault", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticbeanstalk/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticbeanstalk/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticbeanstalk/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticbeanstalk/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,3859 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package elasticbeanstalk provides a client for AWS Elastic Beanstalk. +package elasticbeanstalk + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opAbortEnvironmentUpdate = "AbortEnvironmentUpdate" + +// AbortEnvironmentUpdateRequest generates a request for the AbortEnvironmentUpdate operation. +func (c *ElasticBeanstalk) AbortEnvironmentUpdateRequest(input *AbortEnvironmentUpdateInput) (req *request.Request, output *AbortEnvironmentUpdateOutput) { + op := &request.Operation{ + Name: opAbortEnvironmentUpdate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AbortEnvironmentUpdateInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AbortEnvironmentUpdateOutput{} + req.Data = output + return +} + +// Cancels in-progress environment configuration update or application version +// deployment. +func (c *ElasticBeanstalk) AbortEnvironmentUpdate(input *AbortEnvironmentUpdateInput) (*AbortEnvironmentUpdateOutput, error) { + req, out := c.AbortEnvironmentUpdateRequest(input) + err := req.Send() + return out, err +} + +const opCheckDNSAvailability = "CheckDNSAvailability" + +// CheckDNSAvailabilityRequest generates a request for the CheckDNSAvailability operation. +func (c *ElasticBeanstalk) CheckDNSAvailabilityRequest(input *CheckDNSAvailabilityInput) (req *request.Request, output *CheckDNSAvailabilityOutput) { + op := &request.Operation{ + Name: opCheckDNSAvailability, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CheckDNSAvailabilityInput{} + } + + req = c.newRequest(op, input, output) + output = &CheckDNSAvailabilityOutput{} + req.Data = output + return +} + +// Checks if the specified CNAME is available. +func (c *ElasticBeanstalk) CheckDNSAvailability(input *CheckDNSAvailabilityInput) (*CheckDNSAvailabilityOutput, error) { + req, out := c.CheckDNSAvailabilityRequest(input) + err := req.Send() + return out, err +} + +const opComposeEnvironments = "ComposeEnvironments" + +// ComposeEnvironmentsRequest generates a request for the ComposeEnvironments operation. +func (c *ElasticBeanstalk) ComposeEnvironmentsRequest(input *ComposeEnvironmentsInput) (req *request.Request, output *EnvironmentDescriptionsMessage) { + op := &request.Operation{ + Name: opComposeEnvironments, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ComposeEnvironmentsInput{} + } + + req = c.newRequest(op, input, output) + output = &EnvironmentDescriptionsMessage{} + req.Data = output + return +} + +// Create or update a group of environments that each run a separate component +// of a single application. Takes a list of version labels that specify application +// source bundles for each of the environments to create or update. The name +// of each environment and other required information must be included in the +// source bundles in an environment manifest named env.yaml. See Compose Environments +// (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/environment-mgmt-compose.html) +// for details. +func (c *ElasticBeanstalk) ComposeEnvironments(input *ComposeEnvironmentsInput) (*EnvironmentDescriptionsMessage, error) { + req, out := c.ComposeEnvironmentsRequest(input) + err := req.Send() + return out, err +} + +const opCreateApplication = "CreateApplication" + +// CreateApplicationRequest generates a request for the CreateApplication operation. +func (c *ElasticBeanstalk) CreateApplicationRequest(input *CreateApplicationInput) (req *request.Request, output *ApplicationDescriptionMessage) { + op := &request.Operation{ + Name: opCreateApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateApplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &ApplicationDescriptionMessage{} + req.Data = output + return +} + +// Creates an application that has one configuration template named default +// and no application versions. +func (c *ElasticBeanstalk) CreateApplication(input *CreateApplicationInput) (*ApplicationDescriptionMessage, error) { + req, out := c.CreateApplicationRequest(input) + err := req.Send() + return out, err +} + +const opCreateApplicationVersion = "CreateApplicationVersion" + +// CreateApplicationVersionRequest generates a request for the CreateApplicationVersion operation. +func (c *ElasticBeanstalk) CreateApplicationVersionRequest(input *CreateApplicationVersionInput) (req *request.Request, output *ApplicationVersionDescriptionMessage) { + op := &request.Operation{ + Name: opCreateApplicationVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateApplicationVersionInput{} + } + + req = c.newRequest(op, input, output) + output = &ApplicationVersionDescriptionMessage{} + req.Data = output + return +} + +// Creates an application version for the specified application. +// +// Once you create an application version with a specified Amazon S3 bucket +// and key location, you cannot change that Amazon S3 location. If you change +// the Amazon S3 location, you receive an exception when you attempt to launch +// an environment from the application version. +func (c *ElasticBeanstalk) CreateApplicationVersion(input *CreateApplicationVersionInput) (*ApplicationVersionDescriptionMessage, error) { + req, out := c.CreateApplicationVersionRequest(input) + err := req.Send() + return out, err +} + +const opCreateConfigurationTemplate = "CreateConfigurationTemplate" + +// CreateConfigurationTemplateRequest generates a request for the CreateConfigurationTemplate operation. +func (c *ElasticBeanstalk) CreateConfigurationTemplateRequest(input *CreateConfigurationTemplateInput) (req *request.Request, output *ConfigurationSettingsDescription) { + op := &request.Operation{ + Name: opCreateConfigurationTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateConfigurationTemplateInput{} + } + + req = c.newRequest(op, input, output) + output = &ConfigurationSettingsDescription{} + req.Data = output + return +} + +// Creates a configuration template. Templates are associated with a specific +// application and are used to deploy different versions of the application +// with the same configuration settings. +// +// Related Topics +// +// DescribeConfigurationOptions DescribeConfigurationSettings ListAvailableSolutionStacks +func (c *ElasticBeanstalk) CreateConfigurationTemplate(input *CreateConfigurationTemplateInput) (*ConfigurationSettingsDescription, error) { + req, out := c.CreateConfigurationTemplateRequest(input) + err := req.Send() + return out, err +} + +const opCreateEnvironment = "CreateEnvironment" + +// CreateEnvironmentRequest generates a request for the CreateEnvironment operation. +func (c *ElasticBeanstalk) CreateEnvironmentRequest(input *CreateEnvironmentInput) (req *request.Request, output *EnvironmentDescription) { + op := &request.Operation{ + Name: opCreateEnvironment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateEnvironmentInput{} + } + + req = c.newRequest(op, input, output) + output = &EnvironmentDescription{} + req.Data = output + return +} + +// Launches an environment for the specified application using the specified +// configuration. +func (c *ElasticBeanstalk) CreateEnvironment(input *CreateEnvironmentInput) (*EnvironmentDescription, error) { + req, out := c.CreateEnvironmentRequest(input) + err := req.Send() + return out, err +} + +const opCreateStorageLocation = "CreateStorageLocation" + +// CreateStorageLocationRequest generates a request for the CreateStorageLocation operation. +func (c *ElasticBeanstalk) CreateStorageLocationRequest(input *CreateStorageLocationInput) (req *request.Request, output *CreateStorageLocationOutput) { + op := &request.Operation{ + Name: opCreateStorageLocation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateStorageLocationInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateStorageLocationOutput{} + req.Data = output + return +} + +// Creates the Amazon S3 storage location for the account. +// +// This location is used to store user log files. +func (c *ElasticBeanstalk) CreateStorageLocation(input *CreateStorageLocationInput) (*CreateStorageLocationOutput, error) { + req, out := c.CreateStorageLocationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteApplication = "DeleteApplication" + +// DeleteApplicationRequest generates a request for the DeleteApplication operation. +func (c *ElasticBeanstalk) DeleteApplicationRequest(input *DeleteApplicationInput) (req *request.Request, output *DeleteApplicationOutput) { + op := &request.Operation{ + Name: opDeleteApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteApplicationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteApplicationOutput{} + req.Data = output + return +} + +// Deletes the specified application along with all associated versions and +// configurations. The application versions will not be deleted from your Amazon +// S3 bucket. +// +// You cannot delete an application that has a running environment. +func (c *ElasticBeanstalk) DeleteApplication(input *DeleteApplicationInput) (*DeleteApplicationOutput, error) { + req, out := c.DeleteApplicationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteApplicationVersion = "DeleteApplicationVersion" + +// DeleteApplicationVersionRequest generates a request for the DeleteApplicationVersion operation. +func (c *ElasticBeanstalk) DeleteApplicationVersionRequest(input *DeleteApplicationVersionInput) (req *request.Request, output *DeleteApplicationVersionOutput) { + op := &request.Operation{ + Name: opDeleteApplicationVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteApplicationVersionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteApplicationVersionOutput{} + req.Data = output + return +} + +// Deletes the specified version from the specified application. +// +// You cannot delete an application version that is associated with a running +// environment. +func (c *ElasticBeanstalk) DeleteApplicationVersion(input *DeleteApplicationVersionInput) (*DeleteApplicationVersionOutput, error) { + req, out := c.DeleteApplicationVersionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteConfigurationTemplate = "DeleteConfigurationTemplate" + +// DeleteConfigurationTemplateRequest generates a request for the DeleteConfigurationTemplate operation. +func (c *ElasticBeanstalk) DeleteConfigurationTemplateRequest(input *DeleteConfigurationTemplateInput) (req *request.Request, output *DeleteConfigurationTemplateOutput) { + op := &request.Operation{ + Name: opDeleteConfigurationTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteConfigurationTemplateInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteConfigurationTemplateOutput{} + req.Data = output + return +} + +// Deletes the specified configuration template. +// +// When you launch an environment using a configuration template, the environment +// gets a copy of the template. You can delete or modify the environment's copy +// of the template without affecting the running environment. +func (c *ElasticBeanstalk) DeleteConfigurationTemplate(input *DeleteConfigurationTemplateInput) (*DeleteConfigurationTemplateOutput, error) { + req, out := c.DeleteConfigurationTemplateRequest(input) + err := req.Send() + return out, err +} + +const opDeleteEnvironmentConfiguration = "DeleteEnvironmentConfiguration" + +// DeleteEnvironmentConfigurationRequest generates a request for the DeleteEnvironmentConfiguration operation. +func (c *ElasticBeanstalk) DeleteEnvironmentConfigurationRequest(input *DeleteEnvironmentConfigurationInput) (req *request.Request, output *DeleteEnvironmentConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteEnvironmentConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteEnvironmentConfigurationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteEnvironmentConfigurationOutput{} + req.Data = output + return +} + +// Deletes the draft configuration associated with the running environment. +// +// Updating a running environment with any configuration changes creates a +// draft configuration set. You can get the draft configuration using DescribeConfigurationSettings +// while the update is in progress or if the update fails. The DeploymentStatus +// for the draft configuration indicates whether the deployment is in process +// or has failed. The draft configuration remains in existence until it is deleted +// with this action. +func (c *ElasticBeanstalk) DeleteEnvironmentConfiguration(input *DeleteEnvironmentConfigurationInput) (*DeleteEnvironmentConfigurationOutput, error) { + req, out := c.DeleteEnvironmentConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opDescribeApplicationVersions = "DescribeApplicationVersions" + +// DescribeApplicationVersionsRequest generates a request for the DescribeApplicationVersions operation. +func (c *ElasticBeanstalk) DescribeApplicationVersionsRequest(input *DescribeApplicationVersionsInput) (req *request.Request, output *DescribeApplicationVersionsOutput) { + op := &request.Operation{ + Name: opDescribeApplicationVersions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeApplicationVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeApplicationVersionsOutput{} + req.Data = output + return +} + +// Retrieve a list of application versions stored in your AWS Elastic Beanstalk +// storage bucket. +func (c *ElasticBeanstalk) DescribeApplicationVersions(input *DescribeApplicationVersionsInput) (*DescribeApplicationVersionsOutput, error) { + req, out := c.DescribeApplicationVersionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeApplications = "DescribeApplications" + +// DescribeApplicationsRequest generates a request for the DescribeApplications operation. +func (c *ElasticBeanstalk) DescribeApplicationsRequest(input *DescribeApplicationsInput) (req *request.Request, output *DescribeApplicationsOutput) { + op := &request.Operation{ + Name: opDescribeApplications, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeApplicationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeApplicationsOutput{} + req.Data = output + return +} + +// Returns the descriptions of existing applications. +func (c *ElasticBeanstalk) DescribeApplications(input *DescribeApplicationsInput) (*DescribeApplicationsOutput, error) { + req, out := c.DescribeApplicationsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeConfigurationOptions = "DescribeConfigurationOptions" + +// DescribeConfigurationOptionsRequest generates a request for the DescribeConfigurationOptions operation. +func (c *ElasticBeanstalk) DescribeConfigurationOptionsRequest(input *DescribeConfigurationOptionsInput) (req *request.Request, output *DescribeConfigurationOptionsOutput) { + op := &request.Operation{ + Name: opDescribeConfigurationOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConfigurationOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeConfigurationOptionsOutput{} + req.Data = output + return +} + +// Describes the configuration options that are used in a particular configuration +// template or environment, or that a specified solution stack defines. The +// description includes the values the options, their default values, and an +// indication of the required action on a running environment if an option value +// is changed. +func (c *ElasticBeanstalk) DescribeConfigurationOptions(input *DescribeConfigurationOptionsInput) (*DescribeConfigurationOptionsOutput, error) { + req, out := c.DescribeConfigurationOptionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeConfigurationSettings = "DescribeConfigurationSettings" + +// DescribeConfigurationSettingsRequest generates a request for the DescribeConfigurationSettings operation. +func (c *ElasticBeanstalk) DescribeConfigurationSettingsRequest(input *DescribeConfigurationSettingsInput) (req *request.Request, output *DescribeConfigurationSettingsOutput) { + op := &request.Operation{ + Name: opDescribeConfigurationSettings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConfigurationSettingsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeConfigurationSettingsOutput{} + req.Data = output + return +} + +// Returns a description of the settings for the specified configuration set, +// that is, either a configuration template or the configuration set associated +// with a running environment. +// +// When describing the settings for the configuration set associated with +// a running environment, it is possible to receive two sets of setting descriptions. +// One is the deployed configuration set, and the other is a draft configuration +// of an environment that is either in the process of deployment or that failed +// to deploy. +// +// Related Topics +// +// DeleteEnvironmentConfiguration +func (c *ElasticBeanstalk) DescribeConfigurationSettings(input *DescribeConfigurationSettingsInput) (*DescribeConfigurationSettingsOutput, error) { + req, out := c.DescribeConfigurationSettingsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeEnvironmentHealth = "DescribeEnvironmentHealth" + +// DescribeEnvironmentHealthRequest generates a request for the DescribeEnvironmentHealth operation. +func (c *ElasticBeanstalk) DescribeEnvironmentHealthRequest(input *DescribeEnvironmentHealthInput) (req *request.Request, output *DescribeEnvironmentHealthOutput) { + op := &request.Operation{ + Name: opDescribeEnvironmentHealth, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEnvironmentHealthInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEnvironmentHealthOutput{} + req.Data = output + return +} + +// Returns information about the overall health of the specified environment. +// The DescribeEnvironmentHealth operation is only available with AWS Elastic +// Beanstalk Enhanced Health. +func (c *ElasticBeanstalk) DescribeEnvironmentHealth(input *DescribeEnvironmentHealthInput) (*DescribeEnvironmentHealthOutput, error) { + req, out := c.DescribeEnvironmentHealthRequest(input) + err := req.Send() + return out, err +} + +const opDescribeEnvironmentResources = "DescribeEnvironmentResources" + +// DescribeEnvironmentResourcesRequest generates a request for the DescribeEnvironmentResources operation. +func (c *ElasticBeanstalk) DescribeEnvironmentResourcesRequest(input *DescribeEnvironmentResourcesInput) (req *request.Request, output *DescribeEnvironmentResourcesOutput) { + op := &request.Operation{ + Name: opDescribeEnvironmentResources, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEnvironmentResourcesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEnvironmentResourcesOutput{} + req.Data = output + return +} + +// Returns AWS resources for this environment. +func (c *ElasticBeanstalk) DescribeEnvironmentResources(input *DescribeEnvironmentResourcesInput) (*DescribeEnvironmentResourcesOutput, error) { + req, out := c.DescribeEnvironmentResourcesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeEnvironments = "DescribeEnvironments" + +// DescribeEnvironmentsRequest generates a request for the DescribeEnvironments operation. +func (c *ElasticBeanstalk) DescribeEnvironmentsRequest(input *DescribeEnvironmentsInput) (req *request.Request, output *EnvironmentDescriptionsMessage) { + op := &request.Operation{ + Name: opDescribeEnvironments, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEnvironmentsInput{} + } + + req = c.newRequest(op, input, output) + output = &EnvironmentDescriptionsMessage{} + req.Data = output + return +} + +// Returns descriptions for existing environments. +func (c *ElasticBeanstalk) DescribeEnvironments(input *DescribeEnvironmentsInput) (*EnvironmentDescriptionsMessage, error) { + req, out := c.DescribeEnvironmentsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeEvents = "DescribeEvents" + +// DescribeEventsRequest generates a request for the DescribeEvents operation. +func (c *ElasticBeanstalk) DescribeEventsRequest(input *DescribeEventsInput) (req *request.Request, output *DescribeEventsOutput) { + op := &request.Operation{ + Name: opDescribeEvents, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEventsOutput{} + req.Data = output + return +} + +// Returns list of event descriptions matching criteria up to the last 6 weeks. +// +// This action returns the most recent 1,000 events from the specified NextToken. +func (c *ElasticBeanstalk) DescribeEvents(input *DescribeEventsInput) (*DescribeEventsOutput, error) { + req, out := c.DescribeEventsRequest(input) + err := req.Send() + return out, err +} + +func (c *ElasticBeanstalk) DescribeEventsPages(input *DescribeEventsInput, fn func(p *DescribeEventsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeEventsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeEventsOutput), lastPage) + }) +} + +const opDescribeInstancesHealth = "DescribeInstancesHealth" + +// DescribeInstancesHealthRequest generates a request for the DescribeInstancesHealth operation. +func (c *ElasticBeanstalk) DescribeInstancesHealthRequest(input *DescribeInstancesHealthInput) (req *request.Request, output *DescribeInstancesHealthOutput) { + op := &request.Operation{ + Name: opDescribeInstancesHealth, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInstancesHealthInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInstancesHealthOutput{} + req.Data = output + return +} + +// Returns more detailed information about the health of the specified instances +// (for example, CPU utilization, load average, and causes). The DescribeInstancesHealth +// operation is only available with AWS Elastic Beanstalk Enhanced Health. +func (c *ElasticBeanstalk) DescribeInstancesHealth(input *DescribeInstancesHealthInput) (*DescribeInstancesHealthOutput, error) { + req, out := c.DescribeInstancesHealthRequest(input) + err := req.Send() + return out, err +} + +const opListAvailableSolutionStacks = "ListAvailableSolutionStacks" + +// ListAvailableSolutionStacksRequest generates a request for the ListAvailableSolutionStacks operation. +func (c *ElasticBeanstalk) ListAvailableSolutionStacksRequest(input *ListAvailableSolutionStacksInput) (req *request.Request, output *ListAvailableSolutionStacksOutput) { + op := &request.Operation{ + Name: opListAvailableSolutionStacks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListAvailableSolutionStacksInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAvailableSolutionStacksOutput{} + req.Data = output + return +} + +// Returns a list of the available solution stack names. +func (c *ElasticBeanstalk) ListAvailableSolutionStacks(input *ListAvailableSolutionStacksInput) (*ListAvailableSolutionStacksOutput, error) { + req, out := c.ListAvailableSolutionStacksRequest(input) + err := req.Send() + return out, err +} + +const opRebuildEnvironment = "RebuildEnvironment" + +// RebuildEnvironmentRequest generates a request for the RebuildEnvironment operation. +func (c *ElasticBeanstalk) RebuildEnvironmentRequest(input *RebuildEnvironmentInput) (req *request.Request, output *RebuildEnvironmentOutput) { + op := &request.Operation{ + Name: opRebuildEnvironment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RebuildEnvironmentInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RebuildEnvironmentOutput{} + req.Data = output + return +} + +// Deletes and recreates all of the AWS resources (for example: the Auto Scaling +// group, load balancer, etc.) for a specified environment and forces a restart. +func (c *ElasticBeanstalk) RebuildEnvironment(input *RebuildEnvironmentInput) (*RebuildEnvironmentOutput, error) { + req, out := c.RebuildEnvironmentRequest(input) + err := req.Send() + return out, err +} + +const opRequestEnvironmentInfo = "RequestEnvironmentInfo" + +// RequestEnvironmentInfoRequest generates a request for the RequestEnvironmentInfo operation. +func (c *ElasticBeanstalk) RequestEnvironmentInfoRequest(input *RequestEnvironmentInfoInput) (req *request.Request, output *RequestEnvironmentInfoOutput) { + op := &request.Operation{ + Name: opRequestEnvironmentInfo, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RequestEnvironmentInfoInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RequestEnvironmentInfoOutput{} + req.Data = output + return +} + +// Initiates a request to compile the specified type of information of the deployed +// environment. +// +// Setting the InfoType to tail compiles the last lines from the application +// server log files of every Amazon EC2 instance in your environment. +// +// Setting the InfoType to bundle compresses the application server log files +// for every Amazon EC2 instance into a .zip file. Legacy and .NET containers +// do not support bundle logs. +// +// Use RetrieveEnvironmentInfo to obtain the set of logs. +// +// Related Topics +// +// RetrieveEnvironmentInfo +func (c *ElasticBeanstalk) RequestEnvironmentInfo(input *RequestEnvironmentInfoInput) (*RequestEnvironmentInfoOutput, error) { + req, out := c.RequestEnvironmentInfoRequest(input) + err := req.Send() + return out, err +} + +const opRestartAppServer = "RestartAppServer" + +// RestartAppServerRequest generates a request for the RestartAppServer operation. +func (c *ElasticBeanstalk) RestartAppServerRequest(input *RestartAppServerInput) (req *request.Request, output *RestartAppServerOutput) { + op := &request.Operation{ + Name: opRestartAppServer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestartAppServerInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RestartAppServerOutput{} + req.Data = output + return +} + +// Causes the environment to restart the application container server running +// on each Amazon EC2 instance. +func (c *ElasticBeanstalk) RestartAppServer(input *RestartAppServerInput) (*RestartAppServerOutput, error) { + req, out := c.RestartAppServerRequest(input) + err := req.Send() + return out, err +} + +const opRetrieveEnvironmentInfo = "RetrieveEnvironmentInfo" + +// RetrieveEnvironmentInfoRequest generates a request for the RetrieveEnvironmentInfo operation. +func (c *ElasticBeanstalk) RetrieveEnvironmentInfoRequest(input *RetrieveEnvironmentInfoInput) (req *request.Request, output *RetrieveEnvironmentInfoOutput) { + op := &request.Operation{ + Name: opRetrieveEnvironmentInfo, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RetrieveEnvironmentInfoInput{} + } + + req = c.newRequest(op, input, output) + output = &RetrieveEnvironmentInfoOutput{} + req.Data = output + return +} + +// Retrieves the compiled information from a RequestEnvironmentInfo request. +// +// Related Topics +// +// RequestEnvironmentInfo +func (c *ElasticBeanstalk) RetrieveEnvironmentInfo(input *RetrieveEnvironmentInfoInput) (*RetrieveEnvironmentInfoOutput, error) { + req, out := c.RetrieveEnvironmentInfoRequest(input) + err := req.Send() + return out, err +} + +const opSwapEnvironmentCNAMEs = "SwapEnvironmentCNAMEs" + +// SwapEnvironmentCNAMEsRequest generates a request for the SwapEnvironmentCNAMEs operation. +func (c *ElasticBeanstalk) SwapEnvironmentCNAMEsRequest(input *SwapEnvironmentCNAMEsInput) (req *request.Request, output *SwapEnvironmentCNAMEsOutput) { + op := &request.Operation{ + Name: opSwapEnvironmentCNAMEs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SwapEnvironmentCNAMEsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SwapEnvironmentCNAMEsOutput{} + req.Data = output + return +} + +// Swaps the CNAMEs of two environments. +func (c *ElasticBeanstalk) SwapEnvironmentCNAMEs(input *SwapEnvironmentCNAMEsInput) (*SwapEnvironmentCNAMEsOutput, error) { + req, out := c.SwapEnvironmentCNAMEsRequest(input) + err := req.Send() + return out, err +} + +const opTerminateEnvironment = "TerminateEnvironment" + +// TerminateEnvironmentRequest generates a request for the TerminateEnvironment operation. +func (c *ElasticBeanstalk) TerminateEnvironmentRequest(input *TerminateEnvironmentInput) (req *request.Request, output *EnvironmentDescription) { + op := &request.Operation{ + Name: opTerminateEnvironment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TerminateEnvironmentInput{} + } + + req = c.newRequest(op, input, output) + output = &EnvironmentDescription{} + req.Data = output + return +} + +// Terminates the specified environment. +func (c *ElasticBeanstalk) TerminateEnvironment(input *TerminateEnvironmentInput) (*EnvironmentDescription, error) { + req, out := c.TerminateEnvironmentRequest(input) + err := req.Send() + return out, err +} + +const opUpdateApplication = "UpdateApplication" + +// UpdateApplicationRequest generates a request for the UpdateApplication operation. +func (c *ElasticBeanstalk) UpdateApplicationRequest(input *UpdateApplicationInput) (req *request.Request, output *ApplicationDescriptionMessage) { + op := &request.Operation{ + Name: opUpdateApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateApplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &ApplicationDescriptionMessage{} + req.Data = output + return +} + +// Updates the specified application to have the specified properties. +// +// If a property (for example, description) is not provided, the value remains +// unchanged. To clear these properties, specify an empty string. +func (c *ElasticBeanstalk) UpdateApplication(input *UpdateApplicationInput) (*ApplicationDescriptionMessage, error) { + req, out := c.UpdateApplicationRequest(input) + err := req.Send() + return out, err +} + +const opUpdateApplicationVersion = "UpdateApplicationVersion" + +// UpdateApplicationVersionRequest generates a request for the UpdateApplicationVersion operation. +func (c *ElasticBeanstalk) UpdateApplicationVersionRequest(input *UpdateApplicationVersionInput) (req *request.Request, output *ApplicationVersionDescriptionMessage) { + op := &request.Operation{ + Name: opUpdateApplicationVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateApplicationVersionInput{} + } + + req = c.newRequest(op, input, output) + output = &ApplicationVersionDescriptionMessage{} + req.Data = output + return +} + +// Updates the specified application version to have the specified properties. +// +// If a property (for example, description) is not provided, the value remains +// unchanged. To clear properties, specify an empty string. +func (c *ElasticBeanstalk) UpdateApplicationVersion(input *UpdateApplicationVersionInput) (*ApplicationVersionDescriptionMessage, error) { + req, out := c.UpdateApplicationVersionRequest(input) + err := req.Send() + return out, err +} + +const opUpdateConfigurationTemplate = "UpdateConfigurationTemplate" + +// UpdateConfigurationTemplateRequest generates a request for the UpdateConfigurationTemplate operation. +func (c *ElasticBeanstalk) UpdateConfigurationTemplateRequest(input *UpdateConfigurationTemplateInput) (req *request.Request, output *ConfigurationSettingsDescription) { + op := &request.Operation{ + Name: opUpdateConfigurationTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateConfigurationTemplateInput{} + } + + req = c.newRequest(op, input, output) + output = &ConfigurationSettingsDescription{} + req.Data = output + return +} + +// Updates the specified configuration template to have the specified properties +// or configuration option values. +// +// If a property (for example, ApplicationName) is not provided, its value +// remains unchanged. To clear such properties, specify an empty string. Related +// Topics +// +// DescribeConfigurationOptions +func (c *ElasticBeanstalk) UpdateConfigurationTemplate(input *UpdateConfigurationTemplateInput) (*ConfigurationSettingsDescription, error) { + req, out := c.UpdateConfigurationTemplateRequest(input) + err := req.Send() + return out, err +} + +const opUpdateEnvironment = "UpdateEnvironment" + +// UpdateEnvironmentRequest generates a request for the UpdateEnvironment operation. +func (c *ElasticBeanstalk) UpdateEnvironmentRequest(input *UpdateEnvironmentInput) (req *request.Request, output *EnvironmentDescription) { + op := &request.Operation{ + Name: opUpdateEnvironment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateEnvironmentInput{} + } + + req = c.newRequest(op, input, output) + output = &EnvironmentDescription{} + req.Data = output + return +} + +// Updates the environment description, deploys a new application version, updates +// the configuration settings to an entirely new configuration template, or +// updates select configuration option values in the running environment. +// +// Attempting to update both the release and configuration is not allowed +// and AWS Elastic Beanstalk returns an InvalidParameterCombination error. +// +// When updating the configuration settings to a new template or individual +// settings, a draft configuration is created and DescribeConfigurationSettings +// for this environment returns two setting descriptions with different DeploymentStatus +// values. +func (c *ElasticBeanstalk) UpdateEnvironment(input *UpdateEnvironmentInput) (*EnvironmentDescription, error) { + req, out := c.UpdateEnvironmentRequest(input) + err := req.Send() + return out, err +} + +const opValidateConfigurationSettings = "ValidateConfigurationSettings" + +// ValidateConfigurationSettingsRequest generates a request for the ValidateConfigurationSettings operation. +func (c *ElasticBeanstalk) ValidateConfigurationSettingsRequest(input *ValidateConfigurationSettingsInput) (req *request.Request, output *ValidateConfigurationSettingsOutput) { + op := &request.Operation{ + Name: opValidateConfigurationSettings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ValidateConfigurationSettingsInput{} + } + + req = c.newRequest(op, input, output) + output = &ValidateConfigurationSettingsOutput{} + req.Data = output + return +} + +// Takes a set of configuration settings and either a configuration template +// or environment, and determines whether those values are valid. +// +// This action returns a list of messages indicating any errors or warnings +// associated with the selection of option values. +func (c *ElasticBeanstalk) ValidateConfigurationSettings(input *ValidateConfigurationSettingsInput) (*ValidateConfigurationSettingsOutput, error) { + req, out := c.ValidateConfigurationSettingsRequest(input) + err := req.Send() + return out, err +} + +type AbortEnvironmentUpdateInput struct { + _ struct{} `type:"structure"` + + // This specifies the ID of the environment with the in-progress update that + // you want to cancel. + EnvironmentId *string `type:"string"` + + // This specifies the name of the environment with the in-progress update that + // you want to cancel. + EnvironmentName *string `min:"4" type:"string"` +} + +// String returns the string representation +func (s AbortEnvironmentUpdateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortEnvironmentUpdateInput) GoString() string { + return s.String() +} + +type AbortEnvironmentUpdateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AbortEnvironmentUpdateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortEnvironmentUpdateOutput) GoString() string { + return s.String() +} + +// Describes the properties of an application. +type ApplicationDescription struct { + _ struct{} `type:"structure"` + + // The name of the application. + ApplicationName *string `min:"1" type:"string"` + + // The names of the configuration templates associated with this application. + ConfigurationTemplates []*string `type:"list"` + + // The date when the application was created. + DateCreated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The date when the application was last modified. + DateUpdated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // User-defined description of the application. + Description *string `type:"string"` + + // The names of the versions for this application. + Versions []*string `type:"list"` +} + +// String returns the string representation +func (s ApplicationDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplicationDescription) GoString() string { + return s.String() +} + +// Result message containing a single description of an application. +type ApplicationDescriptionMessage struct { + _ struct{} `type:"structure"` + + // The ApplicationDescription of the application. + Application *ApplicationDescription `type:"structure"` +} + +// String returns the string representation +func (s ApplicationDescriptionMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplicationDescriptionMessage) GoString() string { + return s.String() +} + +// Represents the application metrics for a specified environment. +type ApplicationMetrics struct { + _ struct{} `type:"structure"` + + // The amount of time that the metrics cover (usually 10 seconds). For example, + // you might have 5 requests (request_count) within the most recent time slice + // of 10 seconds (duration). + Duration *int64 `type:"integer"` + + // Represents the average latency for the slowest X percent of requests over + // the last 10 seconds. Latencies are in seconds with one milisecond resolution. + Latency *Latency `type:"structure"` + + // Average number of requests handled by the web server per second over the + // last 10 seconds. + RequestCount *int64 `type:"integer"` + + // Represents the percentage of requests over the last 10 seconds that resulted + // in each type of status code response. + StatusCodes *StatusCodes `type:"structure"` +} + +// String returns the string representation +func (s ApplicationMetrics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplicationMetrics) GoString() string { + return s.String() +} + +// Describes the properties of an application version. +type ApplicationVersionDescription struct { + _ struct{} `type:"structure"` + + // The name of the application associated with this release. + ApplicationName *string `min:"1" type:"string"` + + // The creation date of the application version. + DateCreated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The last modified date of the application version. + DateUpdated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The description of this application version. + Description *string `type:"string"` + + // The location where the source bundle is located for this version. + SourceBundle *S3Location `type:"structure"` + + // The processing status of the application version. + Status *string `type:"string" enum:"ApplicationVersionStatus"` + + // A label uniquely identifying the version for the associated application. + VersionLabel *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ApplicationVersionDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplicationVersionDescription) GoString() string { + return s.String() +} + +// Result message wrapping a single description of an application version. +type ApplicationVersionDescriptionMessage struct { + _ struct{} `type:"structure"` + + // The ApplicationVersionDescription of the application version. + ApplicationVersion *ApplicationVersionDescription `type:"structure"` +} + +// String returns the string representation +func (s ApplicationVersionDescriptionMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplicationVersionDescriptionMessage) GoString() string { + return s.String() +} + +// Describes an Auto Scaling launch configuration. +type AutoScalingGroup struct { + _ struct{} `type:"structure"` + + // The name of the AutoScalingGroup . + Name *string `type:"string"` +} + +// String returns the string representation +func (s AutoScalingGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutoScalingGroup) GoString() string { + return s.String() +} + +// Represents CPU utilization information from the specified instance that belongs +// to the AWS Elastic Beanstalk environment. Use the instanceId property to +// specify the application instance for which you'd like to return data. +type CPUUtilization struct { + _ struct{} `type:"structure"` + + // Percentage of time that the CPU has spent in the I/O Wait state over the + // last 10 seconds. + IOWait *float64 `type:"double"` + + // Percentage of time that the CPU has spent in the IRQ state over the last + // 10 seconds. + IRQ *float64 `type:"double"` + + // Percentage of time that the CPU has spent in the Idle state over the last + // 10 seconds. + Idle *float64 `type:"double"` + + // Percentage of time that the CPU has spent in the Nice state over the last + // 10 seconds. + Nice *float64 `type:"double"` + + // Percentage of time that the CPU has spent in the SoftIRQ state over the last + // 10 seconds. + SoftIRQ *float64 `type:"double"` + + // Percentage of time that the CPU has spent in the System state over the last + // 10 seconds. + System *float64 `type:"double"` + + // Percentage of time that the CPU has spent in the User state over the last + // 10 seconds. + User *float64 `type:"double"` +} + +// String returns the string representation +func (s CPUUtilization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CPUUtilization) GoString() string { + return s.String() +} + +// Results message indicating whether a CNAME is available. +type CheckDNSAvailabilityInput struct { + _ struct{} `type:"structure"` + + // The prefix used when this CNAME is reserved. + CNAMEPrefix *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s CheckDNSAvailabilityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CheckDNSAvailabilityInput) GoString() string { + return s.String() +} + +// Indicates if the specified CNAME is available. +type CheckDNSAvailabilityOutput struct { + _ struct{} `type:"structure"` + + // Indicates if the specified CNAME is available: + // + // true : The CNAME is available. false : The CNAME is not available. + Available *bool `type:"boolean"` + + // The fully qualified CNAME to reserve when CreateEnvironment is called with + // the provided prefix. + FullyQualifiedCNAME *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CheckDNSAvailabilityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CheckDNSAvailabilityOutput) GoString() string { + return s.String() +} + +type ComposeEnvironmentsInput struct { + _ struct{} `type:"structure"` + + // The name of the application to which the specified source bundles belong. + ApplicationName *string `min:"1" type:"string"` + + // The name of the group to which the target environments belong. Specify a + // group name only if the environment name defined in each target environment's + // manifest ends with a + (plus) character. See Environment Manifest (env.yaml) + // (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/environment-mgmt-compose.html#environment-mgmt-compose-envyaml) + // for details. + GroupName *string `min:"1" type:"string"` + + // A list of version labels, specifying one or more application source bundles + // that belong to the target application. Each source bundle must include an + // environment manifest that specifies the name of the environment and the name + // of the solution stack to use, and optionally can specify environment links + // to create. + VersionLabels []*string `type:"list"` +} + +// String returns the string representation +func (s ComposeEnvironmentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComposeEnvironmentsInput) GoString() string { + return s.String() +} + +// Describes the possible values for a configuration option. +type ConfigurationOptionDescription struct { + _ struct{} `type:"structure"` + + // An indication of which action is required if the value for this configuration + // option changes: + // + // NoInterruption : There is no interruption to the environment or application + // availability. RestartEnvironment : The environment is entirely restarted, + // all AWS resources are deleted and recreated, and the environment is unavailable + // during the process. RestartApplicationServer : The environment is available + // the entire time. However, a short application outage occurs when the application + // servers on the running Amazon EC2 instances are restarted. + ChangeSeverity *string `type:"string"` + + // The default value for this configuration option. + DefaultValue *string `type:"string"` + + // If specified, the configuration option must be a string value no longer than + // this value. + MaxLength *int64 `type:"integer"` + + // If specified, the configuration option must be a numeric value less than + // this value. + MaxValue *int64 `type:"integer"` + + // If specified, the configuration option must be a numeric value greater than + // this value. + MinValue *int64 `type:"integer"` + + // The name of the configuration option. + Name *string `type:"string"` + + // A unique namespace identifying the option's associated AWS resource. + Namespace *string `type:"string"` + + // If specified, the configuration option must be a string value that satisfies + // this regular expression. + Regex *OptionRestrictionRegex `type:"structure"` + + // An indication of whether the user defined this configuration option: + // + // true : This configuration option was defined by the user. It is a valid + // choice for specifying if this as an Option to Remove when updating configuration + // settings. + // + // false : This configuration was not defined by the user. Constraint: + // You can remove only UserDefined options from a configuration. + // + // Valid Values: true | false + UserDefined *bool `type:"boolean"` + + // If specified, values for the configuration option are selected from this + // list. + ValueOptions []*string `type:"list"` + + // An indication of which type of values this option has and whether it is allowable + // to select one or more than one of the possible values: + // + // Scalar : Values for this option are a single selection from the possible + // values, or an unformatted string, or numeric value governed by the MIN/MAX/Regex + // constraints. List : Values for this option are multiple selections from + // the possible values. Boolean : Values for this option are either true or + // false . Json : Values for this option are a JSON representation of a ConfigDocument. + ValueType *string `type:"string" enum:"ConfigurationOptionValueType"` +} + +// String returns the string representation +func (s ConfigurationOptionDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigurationOptionDescription) GoString() string { + return s.String() +} + +// A specification identifying an individual configuration option along with +// its current value. For a list of possible option values, go to Option Values +// (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/command-options.html) +// in the AWS Elastic Beanstalk Developer Guide. +type ConfigurationOptionSetting struct { + _ struct{} `type:"structure"` + + // A unique namespace identifying the option's associated AWS resource. + Namespace *string `type:"string"` + + // The name of the configuration option. + OptionName *string `type:"string"` + + // A unique resource name for a time-based scaling configuration option. + ResourceName *string `min:"1" type:"string"` + + // The current value for the configuration option. + Value *string `type:"string"` +} + +// String returns the string representation +func (s ConfigurationOptionSetting) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigurationOptionSetting) GoString() string { + return s.String() +} + +// Describes the settings for a configuration set. +type ConfigurationSettingsDescription struct { + _ struct{} `type:"structure"` + + // The name of the application associated with this configuration set. + ApplicationName *string `min:"1" type:"string"` + + // The date (in UTC time) when this configuration set was created. + DateCreated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The date (in UTC time) when this configuration set was last modified. + DateUpdated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // If this configuration set is associated with an environment, the DeploymentStatus + // parameter indicates the deployment status of this configuration set: + // + // null: This configuration is not associated with a running environment. + // pending: This is a draft configuration that is not deployed to the associated + // environment but is in the process of deploying. deployed: This is the configuration + // that is currently deployed to the associated running environment. failed: + // This is a draft configuration that failed to successfully deploy. + DeploymentStatus *string `type:"string" enum:"ConfigurationDeploymentStatus"` + + // Describes this configuration set. + Description *string `type:"string"` + + // If not null, the name of the environment for this configuration set. + EnvironmentName *string `min:"4" type:"string"` + + // A list of the configuration options and their values in this configuration + // set. + OptionSettings []*ConfigurationOptionSetting `type:"list"` + + // The name of the solution stack this configuration set uses. + SolutionStackName *string `type:"string"` + + // If not null, the name of the configuration template for this configuration + // set. + TemplateName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ConfigurationSettingsDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigurationSettingsDescription) GoString() string { + return s.String() +} + +type CreateApplicationInput struct { + _ struct{} `type:"structure"` + + // The name of the application. + // + // Constraint: This name must be unique within your account. If the specified + // name already exists, the action returns an InvalidParameterValue error. + ApplicationName *string `min:"1" type:"string" required:"true"` + + // Describes the application. + Description *string `type:"string"` +} + +// String returns the string representation +func (s CreateApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateApplicationInput) GoString() string { + return s.String() +} + +type CreateApplicationVersionInput struct { + _ struct{} `type:"structure"` + + // The name of the application. If no application is found with this name, and + // AutoCreateApplication is false, returns an InvalidParameterValue error. + ApplicationName *string `min:"1" type:"string" required:"true"` + + // Determines how the system behaves if the specified application for this version + // does not already exist: + // + // true : Automatically creates the specified application for this release + // if it does not already exist. false : Throws an InvalidParameterValue if + // the specified application for this release does not already exist. Default: + // false + // + // Valid Values: true | false + AutoCreateApplication *bool `type:"boolean"` + + // Describes this version. + Description *string `type:"string"` + + // Preprocesses and validates the environment manifest and configuration files + // in the source bundle. Validating configuration files can identify issues + // prior to deploying the application version to an environment. + Process *bool `type:"boolean"` + + // The Amazon S3 bucket and key that identify the location of the source bundle + // for this version. + // + // If data found at the Amazon S3 location exceeds the maximum allowed source + // bundle size, AWS Elastic Beanstalk returns an InvalidParameterValue error. + // The maximum size allowed is 512 MB. + // + // Default: If not specified, AWS Elastic Beanstalk uses a sample application. + // If only partially specified (for example, a bucket is provided but not the + // key) or if no data is found at the Amazon S3 location, AWS Elastic Beanstalk + // returns an InvalidParameterCombination error. + SourceBundle *S3Location `type:"structure"` + + // A label identifying this version. + // + // Constraint: Must be unique per application. If an application version already + // exists with this label for the specified application, AWS Elastic Beanstalk + // returns an InvalidParameterValue error. + VersionLabel *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateApplicationVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateApplicationVersionInput) GoString() string { + return s.String() +} + +type CreateConfigurationTemplateInput struct { + _ struct{} `type:"structure"` + + // The name of the application to associate with this configuration template. + // If no application is found with this name, AWS Elastic Beanstalk returns + // an InvalidParameterValue error. + ApplicationName *string `min:"1" type:"string" required:"true"` + + // Describes this configuration. + Description *string `type:"string"` + + // The ID of the environment used with this configuration template. + EnvironmentId *string `type:"string"` + + // If specified, AWS Elastic Beanstalk sets the specified configuration option + // to the requested value. The new value overrides the value obtained from the + // solution stack or the source configuration template. + OptionSettings []*ConfigurationOptionSetting `type:"list"` + + // The name of the solution stack used by this configuration. The solution stack + // specifies the operating system, architecture, and application server for + // a configuration template. It determines the set of configuration options + // as well as the possible and default values. + // + // Use ListAvailableSolutionStacks to obtain a list of available solution + // stacks. + // + // A solution stack name or a source configuration parameter must be specified, + // otherwise AWS Elastic Beanstalk returns an InvalidParameterValue error. + // + // If a solution stack name is not specified and the source configuration + // parameter is specified, AWS Elastic Beanstalk uses the same solution stack + // as the source configuration template. + SolutionStackName *string `type:"string"` + + // If specified, AWS Elastic Beanstalk uses the configuration values from the + // specified configuration template to create a new configuration. + // + // Values specified in the OptionSettings parameter of this call overrides + // any values obtained from the SourceConfiguration. + // + // If no configuration template is found, returns an InvalidParameterValue + // error. + // + // Constraint: If both the solution stack name parameter and the source configuration + // parameters are specified, the solution stack of the source configuration + // template must match the specified solution stack name or else AWS Elastic + // Beanstalk returns an InvalidParameterCombination error. + SourceConfiguration *SourceConfiguration `type:"structure"` + + // The name of the configuration template. + // + // Constraint: This name must be unique per application. + // + // Default: If a configuration template already exists with this name, AWS + // Elastic Beanstalk returns an InvalidParameterValue error. + TemplateName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateConfigurationTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateConfigurationTemplateInput) GoString() string { + return s.String() +} + +type CreateEnvironmentInput struct { + _ struct{} `type:"structure"` + + // The name of the application that contains the version to be deployed. + // + // If no application is found with this name, CreateEnvironment returns an + // InvalidParameterValue error. + ApplicationName *string `min:"1" type:"string" required:"true"` + + // If specified, the environment attempts to use this value as the prefix for + // the CNAME. If not specified, the CNAME is generated automatically by appending + // a random alphanumeric string to the environment name. + CNAMEPrefix *string `min:"4" type:"string"` + + // Describes this environment. + Description *string `type:"string"` + + // A unique name for the deployment environment. Used in the application URL. + // + // Constraint: Must be from 4 to 23 characters in length. The name can contain + // only letters, numbers, and hyphens. It cannot start or end with a hyphen. + // This name must be unique in your account. If the specified name already exists, + // AWS Elastic Beanstalk returns an InvalidParameterValue error. + // + // Default: If the CNAME parameter is not specified, the environment name becomes + // part of the CNAME, and therefore part of the visible URL for your application. + EnvironmentName *string `min:"4" type:"string"` + + // The name of the group to which the target environment belongs. Specify a + // group name only if the environment's name is specified in an environment + // manifest and not with the environment name parameter. See Environment Manifest + // (env.yaml) (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/environment-mgmt-compose.html#environment-mgmt-compose-envyaml) + // for details. + GroupName *string `min:"1" type:"string"` + + // If specified, AWS Elastic Beanstalk sets the specified configuration options + // to the requested value in the configuration set for the new environment. + // These override the values obtained from the solution stack or the configuration + // template. + OptionSettings []*ConfigurationOptionSetting `type:"list"` + + // A list of custom user-defined configuration options to remove from the configuration + // set for this new environment. + OptionsToRemove []*OptionSpecification `type:"list"` + + // This is an alternative to specifying a template name. If specified, AWS Elastic + // Beanstalk sets the configuration values to the default values associated + // with the specified solution stack. + // + // Condition: You must specify either this or a TemplateName, but not both. + // If you specify both, AWS Elastic Beanstalk returns an InvalidParameterCombination + // error. If you do not specify either, AWS Elastic Beanstalk returns a MissingRequiredParameter + // error. + SolutionStackName *string `type:"string"` + + // This specifies the tags applied to resources in the environment. + Tags []*Tag `type:"list"` + + // The name of the configuration template to use in deployment. If no configuration + // template is found with this name, AWS Elastic Beanstalk returns an InvalidParameterValue + // error. + // + // Condition: You must specify either this parameter or a SolutionStackName, + // but not both. If you specify both, AWS Elastic Beanstalk returns an InvalidParameterCombination + // error. If you do not specify either, AWS Elastic Beanstalk returns a MissingRequiredParameter + // error. + TemplateName *string `min:"1" type:"string"` + + // This specifies the tier to use for creating this environment. + Tier *EnvironmentTier `type:"structure"` + + // The name of the application version to deploy. + // + // If the specified application has no associated application versions, AWS + // Elastic Beanstalk UpdateEnvironment returns an InvalidParameterValue error. + // + // Default: If not specified, AWS Elastic Beanstalk attempts to launch the + // sample application in the container. + VersionLabel *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateEnvironmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEnvironmentInput) GoString() string { + return s.String() +} + +type CreateStorageLocationInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateStorageLocationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStorageLocationInput) GoString() string { + return s.String() +} + +// Results of a CreateStorageLocationResult call. +type CreateStorageLocationOutput struct { + _ struct{} `type:"structure"` + + // The name of the Amazon S3 bucket created. + S3Bucket *string `type:"string"` +} + +// String returns the string representation +func (s CreateStorageLocationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStorageLocationOutput) GoString() string { + return s.String() +} + +type DeleteApplicationInput struct { + _ struct{} `type:"structure"` + + // The name of the application to delete. + ApplicationName *string `min:"1" type:"string" required:"true"` + + // When set to true, running environments will be terminated before deleting + // the application. + TerminateEnvByForce *bool `type:"boolean"` +} + +// String returns the string representation +func (s DeleteApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteApplicationInput) GoString() string { + return s.String() +} + +type DeleteApplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteApplicationOutput) GoString() string { + return s.String() +} + +type DeleteApplicationVersionInput struct { + _ struct{} `type:"structure"` + + // The name of the application to delete releases from. + ApplicationName *string `min:"1" type:"string" required:"true"` + + // Indicates whether to delete the associated source bundle from Amazon S3: + // + // true: An attempt is made to delete the associated Amazon S3 source bundle + // specified at time of creation. false: No action is taken on the Amazon + // S3 source bundle specified at time of creation. Valid Values: true | false + DeleteSourceBundle *bool `type:"boolean"` + + // The label of the version to delete. + VersionLabel *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteApplicationVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteApplicationVersionInput) GoString() string { + return s.String() +} + +type DeleteApplicationVersionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteApplicationVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteApplicationVersionOutput) GoString() string { + return s.String() +} + +type DeleteConfigurationTemplateInput struct { + _ struct{} `type:"structure"` + + // The name of the application to delete the configuration template from. + ApplicationName *string `min:"1" type:"string" required:"true"` + + // The name of the configuration template to delete. + TemplateName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteConfigurationTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteConfigurationTemplateInput) GoString() string { + return s.String() +} + +type DeleteConfigurationTemplateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteConfigurationTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteConfigurationTemplateOutput) GoString() string { + return s.String() +} + +type DeleteEnvironmentConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the application the environment is associated with. + ApplicationName *string `min:"1" type:"string" required:"true"` + + // The name of the environment to delete the draft configuration from. + EnvironmentName *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteEnvironmentConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEnvironmentConfigurationInput) GoString() string { + return s.String() +} + +type DeleteEnvironmentConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteEnvironmentConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEnvironmentConfigurationOutput) GoString() string { + return s.String() +} + +// Result message containing a list of configuration descriptions. +type DescribeApplicationVersionsInput struct { + _ struct{} `type:"structure"` + + // If specified, AWS Elastic Beanstalk restricts the returned descriptions to + // only include ones that are associated with the specified application. + ApplicationName *string `min:"1" type:"string"` + + // If specified, restricts the returned descriptions to only include ones that + // have the specified version labels. + VersionLabels []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeApplicationVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeApplicationVersionsInput) GoString() string { + return s.String() +} + +// Result message wrapping a list of application version descriptions. +type DescribeApplicationVersionsOutput struct { + _ struct{} `type:"structure"` + + // List of ApplicationVersionDescription objects sorted by order of creation. + ApplicationVersions []*ApplicationVersionDescription `type:"list"` +} + +// String returns the string representation +func (s DescribeApplicationVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeApplicationVersionsOutput) GoString() string { + return s.String() +} + +type DescribeApplicationsInput struct { + _ struct{} `type:"structure"` + + // If specified, AWS Elastic Beanstalk restricts the returned descriptions to + // only include those with the specified names. + ApplicationNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeApplicationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeApplicationsInput) GoString() string { + return s.String() +} + +// Result message containing a list of application descriptions. +type DescribeApplicationsOutput struct { + _ struct{} `type:"structure"` + + // This parameter contains a list of ApplicationDescription. + Applications []*ApplicationDescription `type:"list"` +} + +// String returns the string representation +func (s DescribeApplicationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeApplicationsOutput) GoString() string { + return s.String() +} + +// Result message containig a list of application version descriptions. +type DescribeConfigurationOptionsInput struct { + _ struct{} `type:"structure"` + + // The name of the application associated with the configuration template or + // environment. Only needed if you want to describe the configuration options + // associated with either the configuration template or environment. + ApplicationName *string `min:"1" type:"string"` + + // The name of the environment whose configuration options you want to describe. + EnvironmentName *string `min:"4" type:"string"` + + // If specified, restricts the descriptions to only the specified options. + Options []*OptionSpecification `type:"list"` + + // The name of the solution stack whose configuration options you want to describe. + SolutionStackName *string `type:"string"` + + // The name of the configuration template whose configuration options you want + // to describe. + TemplateName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeConfigurationOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigurationOptionsInput) GoString() string { + return s.String() +} + +// Describes the settings for a specified configuration set. +type DescribeConfigurationOptionsOutput struct { + _ struct{} `type:"structure"` + + // A list of ConfigurationOptionDescription. + Options []*ConfigurationOptionDescription `type:"list"` + + // The name of the solution stack these configuration options belong to. + SolutionStackName *string `type:"string"` +} + +// String returns the string representation +func (s DescribeConfigurationOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigurationOptionsOutput) GoString() string { + return s.String() +} + +// Result message containing all of the configuration settings for a specified +// solution stack or configuration template. +type DescribeConfigurationSettingsInput struct { + _ struct{} `type:"structure"` + + // The application for the environment or configuration template. + ApplicationName *string `min:"1" type:"string" required:"true"` + + // The name of the environment to describe. + // + // Condition: You must specify either this or a TemplateName, but not both. + // If you specify both, AWS Elastic Beanstalk returns an InvalidParameterCombination + // error. If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentName *string `min:"4" type:"string"` + + // The name of the configuration template to describe. + // + // Conditional: You must specify either this parameter or an EnvironmentName, + // but not both. If you specify both, AWS Elastic Beanstalk returns an InvalidParameterCombination + // error. If you do not specify either, AWS Elastic Beanstalk returns a MissingRequiredParameter + // error. + TemplateName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeConfigurationSettingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigurationSettingsInput) GoString() string { + return s.String() +} + +// The results from a request to change the configuration settings of an environment. +type DescribeConfigurationSettingsOutput struct { + _ struct{} `type:"structure"` + + // A list of ConfigurationSettingsDescription. + ConfigurationSettings []*ConfigurationSettingsDescription `type:"list"` +} + +// String returns the string representation +func (s DescribeConfigurationSettingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConfigurationSettingsOutput) GoString() string { + return s.String() +} + +// See the example below to learn how to create a request body. +type DescribeEnvironmentHealthInput struct { + _ struct{} `type:"structure"` + + // Specifies the response elements you wish to receive. If no attribute names + // are specified, AWS Elastic Beanstalk only returns the name of the environment. + AttributeNames []*string `type:"list"` + + // Specifies the AWS Elastic Beanstalk environment ID. + // + // Condition: You must specify either this or an EnvironmentName, or both. + // If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentId *string `type:"string"` + + // Specifies the AWS Elastic Beanstalk environment name. + // + // Condition: You must specify either this or an EnvironmentId, or both. If + // you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentName *string `min:"4" type:"string"` +} + +// String returns the string representation +func (s DescribeEnvironmentHealthInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEnvironmentHealthInput) GoString() string { + return s.String() +} + +// See the example below for a sample response. +type DescribeEnvironmentHealthOutput struct { + _ struct{} `type:"structure"` + + // Represents the application metrics for a specified environment. + ApplicationMetrics *ApplicationMetrics `type:"structure"` + + // Returns potential causes for the reported status. + Causes []*string `type:"list"` + + // Returns the color indicator that tells you information about the health of + // the environment. For more information, see Health Colors and Statuses (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/health-enhanced-status.html). + Color *string `type:"string"` + + // The AWS Elastic Beanstalk environment name. + EnvironmentName *string `min:"4" type:"string"` + + // Contains the response body with information about the health of the environment. + HealthStatus *string `type:"string"` + + // Represents summary information about the health of an instance. For more + // information, see Health Colors and Statuses (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/health-enhanced-status.html). + InstancesHealth *InstanceHealthSummary `type:"structure"` + + // The date and time the information was last refreshed. + RefreshedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Returns the health status value of the environment. For more information, + // see Health Colors and Statuses (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/health-enhanced-status.html). + Status *string `type:"string" enum:"EnvironmentHealth"` +} + +// String returns the string representation +func (s DescribeEnvironmentHealthOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEnvironmentHealthOutput) GoString() string { + return s.String() +} + +type DescribeEnvironmentResourcesInput struct { + _ struct{} `type:"structure"` + + // The ID of the environment to retrieve AWS resource usage data. + // + // Condition: You must specify either this or an EnvironmentName, or both. + // If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentId *string `type:"string"` + + // The name of the environment to retrieve AWS resource usage data. + // + // Condition: You must specify either this or an EnvironmentId, or both. If + // you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentName *string `min:"4" type:"string"` +} + +// String returns the string representation +func (s DescribeEnvironmentResourcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEnvironmentResourcesInput) GoString() string { + return s.String() +} + +// Result message containing a list of environment resource descriptions. +type DescribeEnvironmentResourcesOutput struct { + _ struct{} `type:"structure"` + + // A list of EnvironmentResourceDescription. + EnvironmentResources *EnvironmentResourceDescription `type:"structure"` +} + +// String returns the string representation +func (s DescribeEnvironmentResourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEnvironmentResourcesOutput) GoString() string { + return s.String() +} + +type DescribeEnvironmentsInput struct { + _ struct{} `type:"structure"` + + // If specified, AWS Elastic Beanstalk restricts the returned descriptions to + // include only those that are associated with this application. + ApplicationName *string `min:"1" type:"string"` + + // If specified, AWS Elastic Beanstalk restricts the returned descriptions to + // include only those that have the specified IDs. + EnvironmentIds []*string `type:"list"` + + // If specified, AWS Elastic Beanstalk restricts the returned descriptions to + // include only those that have the specified names. + EnvironmentNames []*string `type:"list"` + + // Indicates whether to include deleted environments: + // + // true: Environments that have been deleted after IncludedDeletedBackTo are + // displayed. + // + // false: Do not include deleted environments. + IncludeDeleted *bool `type:"boolean"` + + // If specified when IncludeDeleted is set to true, then environments deleted + // after this date are displayed. + IncludedDeletedBackTo *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // If specified, AWS Elastic Beanstalk restricts the returned descriptions to + // include only those that are associated with this application version. + VersionLabel *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeEnvironmentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEnvironmentsInput) GoString() string { + return s.String() +} + +type DescribeEventsInput struct { + _ struct{} `type:"structure"` + + // If specified, AWS Elastic Beanstalk restricts the returned descriptions to + // include only those associated with this application. + ApplicationName *string `min:"1" type:"string"` + + // If specified, AWS Elastic Beanstalk restricts the returned descriptions to + // those that occur up to, but not including, the EndTime. + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // If specified, AWS Elastic Beanstalk restricts the returned descriptions to + // those associated with this environment. + EnvironmentId *string `type:"string"` + + // If specified, AWS Elastic Beanstalk restricts the returned descriptions to + // those associated with this environment. + EnvironmentName *string `min:"4" type:"string"` + + // Specifies the maximum number of events that can be returned, beginning with + // the most recent event. + MaxRecords *int64 `min:"1" type:"integer"` + + // Pagination token. If specified, the events return the next batch of results. + NextToken *string `type:"string"` + + // If specified, AWS Elastic Beanstalk restricts the described events to include + // only those associated with this request ID. + RequestId *string `type:"string"` + + // If specified, limits the events returned from this call to include only those + // with the specified severity or higher. + Severity *string `type:"string" enum:"EventSeverity"` + + // If specified, AWS Elastic Beanstalk restricts the returned descriptions to + // those that occur on or after this time. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // If specified, AWS Elastic Beanstalk restricts the returned descriptions to + // those that are associated with this environment configuration. + TemplateName *string `min:"1" type:"string"` + + // If specified, AWS Elastic Beanstalk restricts the returned descriptions to + // those associated with this application version. + VersionLabel *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventsInput) GoString() string { + return s.String() +} + +// Result message wrapping a list of event descriptions. +type DescribeEventsOutput struct { + _ struct{} `type:"structure"` + + // A list of EventDescription. + Events []*EventDescription `type:"list"` + + // If returned, this indicates that there are more results to obtain. Use this + // token in the next DescribeEvents call to get the next batch of events. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventsOutput) GoString() string { + return s.String() +} + +// See the example below to learn how to create a request body. +type DescribeInstancesHealthInput struct { + _ struct{} `type:"structure"` + + // Specifies the response elements you wish to receive. If no attribute names + // are specified, AWS Elastic Beanstalk only returns a list of instances. + AttributeNames []*string `type:"list"` + + // Specifies the AWS Elastic Beanstalk environment ID. + EnvironmentId *string `type:"string"` + + // Specifies the AWS Elastic Beanstalk environment name. + EnvironmentName *string `min:"4" type:"string"` + + // Specifies the next token of the request. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeInstancesHealthInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstancesHealthInput) GoString() string { + return s.String() +} + +// See the example below for a sample response. +type DescribeInstancesHealthOutput struct { + _ struct{} `type:"structure"` + + // Contains the response body with information about the health of the instance. + InstanceHealthList []*SingleInstanceHealth `type:"list"` + + // The next token. + NextToken *string `min:"1" type:"string"` + + // The date and time the information was last refreshed. + RefreshedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s DescribeInstancesHealthOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstancesHealthOutput) GoString() string { + return s.String() +} + +// Describes the properties of an environment. +type EnvironmentDescription struct { + _ struct{} `type:"structure"` + + // Indicates if there is an in-progress environment configuration update or + // application version deployment that you can cancel. + // + // true: There is an update in progress. + // + // false: There are no updates currently in progress. + AbortableOperationInProgress *bool `type:"boolean"` + + // The name of the application associated with this environment. + ApplicationName *string `min:"1" type:"string"` + + // The URL to the CNAME for this environment. + CNAME *string `min:"1" type:"string"` + + // The creation date for this environment. + DateCreated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The last modified date for this environment. + DateUpdated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Describes this environment. + Description *string `type:"string"` + + // For load-balanced, autoscaling environments, the URL to the LoadBalancer. + // For single-instance environments, the IP address of the instance. + EndpointURL *string `type:"string"` + + // The ID of this environment. + EnvironmentId *string `type:"string"` + + // A list of links to other environments in the same group. + EnvironmentLinks []*EnvironmentLink `type:"list"` + + // The name of this environment. + EnvironmentName *string `min:"4" type:"string"` + + // Describes the health status of the environment. AWS Elastic Beanstalk indicates + // the failure levels for a running environment: + // + // Red: Indicates the environment is not responsive. Occurs when three or + // more consecutive failures occur for an environment. Yellow: Indicates that + // something is wrong. Occurs when two consecutive failures occur for an environment. + // Green: Indicates the environment is healthy and fully functional. Grey: + // Default health for a new environment. The environment is not fully launched + // and health checks have not started or health checks are suspended during + // an UpdateEnvironment or RestartEnvironement request. Default: Grey + Health *string `type:"string" enum:"EnvironmentHealth"` + + // Returns the health status of the application running in your environment. + // For more information, see Health Colors and Statuses (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/health-enhanced-status.html). + HealthStatus *string `type:"string" enum:"EnvironmentHealthStatus"` + + // The description of the AWS resources used by this environment. + Resources *EnvironmentResourcesDescription `type:"structure"` + + // The name of the SolutionStack deployed with this environment. + SolutionStackName *string `type:"string"` + + // The current operational status of the environment: + // + // Launching: Environment is in the process of initial deployment. Updating: + // Environment is in the process of updating its configuration settings or application + // version. Ready: Environment is available to have an action performed on + // it, such as update or terminate. Terminating: Environment is in the shut-down + // process. Terminated: Environment is not running. + Status *string `type:"string" enum:"EnvironmentStatus"` + + // The name of the configuration template used to originally launch this environment. + TemplateName *string `min:"1" type:"string"` + + // Describes the current tier of this environment. + Tier *EnvironmentTier `type:"structure"` + + // The application version deployed in this environment. + VersionLabel *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s EnvironmentDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnvironmentDescription) GoString() string { + return s.String() +} + +// Result message containing a list of environment descriptions. +type EnvironmentDescriptionsMessage struct { + _ struct{} `type:"structure"` + + // Returns an EnvironmentDescription list. + Environments []*EnvironmentDescription `type:"list"` +} + +// String returns the string representation +func (s EnvironmentDescriptionsMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnvironmentDescriptionsMessage) GoString() string { + return s.String() +} + +// The information retrieved from the Amazon EC2 instances. +type EnvironmentInfoDescription struct { + _ struct{} `type:"structure"` + + // The Amazon EC2 Instance ID for this information. + Ec2InstanceId *string `type:"string"` + + // The type of information retrieved. + InfoType *string `type:"string" enum:"EnvironmentInfoType"` + + // The retrieved information. + Message *string `type:"string"` + + // The time stamp when this information was retrieved. + SampleTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s EnvironmentInfoDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnvironmentInfoDescription) GoString() string { + return s.String() +} + +// A link to another environment, defined in the environment's manifest. Links +// provide connection information in system properties that can be used to connect +// to another environment in the same group. See Environment Manifest (env.yaml) +// (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/environment-mgmt-compose.html#environment-mgmt-compose-envyaml) +// for details. +type EnvironmentLink struct { + _ struct{} `type:"structure"` + + // The name of the linked environment (the dependency). + EnvironmentName *string `type:"string"` + + // The name of the link. + LinkName *string `type:"string"` +} + +// String returns the string representation +func (s EnvironmentLink) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnvironmentLink) GoString() string { + return s.String() +} + +// Describes the AWS resources in use by this environment. This data is live. +type EnvironmentResourceDescription struct { + _ struct{} `type:"structure"` + + // The AutoScalingGroups used by this environment. + AutoScalingGroups []*AutoScalingGroup `type:"list"` + + // The name of the environment. + EnvironmentName *string `min:"4" type:"string"` + + // The Amazon EC2 instances used by this environment. + Instances []*Instance `type:"list"` + + // The Auto Scaling launch configurations in use by this environment. + LaunchConfigurations []*LaunchConfiguration `type:"list"` + + // The LoadBalancers in use by this environment. + LoadBalancers []*LoadBalancer `type:"list"` + + // The queues used by this environment. + Queues []*Queue `type:"list"` + + // The AutoScaling triggers in use by this environment. + Triggers []*Trigger `type:"list"` +} + +// String returns the string representation +func (s EnvironmentResourceDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnvironmentResourceDescription) GoString() string { + return s.String() +} + +// Describes the AWS resources in use by this environment. This data is not +// live data. +type EnvironmentResourcesDescription struct { + _ struct{} `type:"structure"` + + // Describes the LoadBalancer. + LoadBalancer *LoadBalancerDescription `type:"structure"` +} + +// String returns the string representation +func (s EnvironmentResourcesDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnvironmentResourcesDescription) GoString() string { + return s.String() +} + +// Describes the properties of an environment tier +type EnvironmentTier struct { + _ struct{} `type:"structure"` + + // The name of this environment tier. + Name *string `type:"string"` + + // The type of this environment tier. + Type *string `type:"string"` + + // The version of this environment tier. + Version *string `type:"string"` +} + +// String returns the string representation +func (s EnvironmentTier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnvironmentTier) GoString() string { + return s.String() +} + +// Describes an event. +type EventDescription struct { + _ struct{} `type:"structure"` + + // The application associated with the event. + ApplicationName *string `min:"1" type:"string"` + + // The name of the environment associated with this event. + EnvironmentName *string `min:"4" type:"string"` + + // The date when the event occurred. + EventDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The event message. + Message *string `type:"string"` + + // The web service request ID for the activity of this event. + RequestId *string `type:"string"` + + // The severity level of this event. + Severity *string `type:"string" enum:"EventSeverity"` + + // The name of the configuration associated with this event. + TemplateName *string `min:"1" type:"string"` + + // The release label for the application version associated with this event. + VersionLabel *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s EventDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventDescription) GoString() string { + return s.String() +} + +// The description of an Amazon EC2 instance. +type Instance struct { + _ struct{} `type:"structure"` + + // The ID of the Amazon EC2 instance. + Id *string `type:"string"` +} + +// String returns the string representation +func (s Instance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Instance) GoString() string { + return s.String() +} + +// Represents summary information about the health of an instance. For more +// information, see Health Colors and Statuses (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/health-enhanced-status.html). +type InstanceHealthSummary struct { + _ struct{} `type:"structure"` + + // Red. The health agent is reporting a high number of request failures or other + // issues for an instance or environment. + Degraded *int64 `type:"integer"` + + // Green. An operation is in progress on an instance. + Info *int64 `type:"integer"` + + // Grey. AWS Elastic Beanstalk and the health agent are reporting no data on + // an instance. + NoData *int64 `type:"integer"` + + // Green. An instance is passing health checks and the health agent is not reporting + // any problems. + Ok *int64 `type:"integer"` + + // Grey. An operation is in progress on an instance within the command timeout. + Pending *int64 `type:"integer"` + + // Red. The health agent is reporting a very high number of request failures + // or other issues for an instance or environment. + Severe *int64 `type:"integer"` + + // Grey. AWS Elastic Beanstalk and the health agent are reporting an insufficient + // amount of data on an instance. + Unknown *int64 `type:"integer"` + + // Yellow. The health agent is reporting a moderate number of request failures + // or other issues for an instance or environment. + Warning *int64 `type:"integer"` +} + +// String returns the string representation +func (s InstanceHealthSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceHealthSummary) GoString() string { + return s.String() +} + +// Represents the average latency for the slowest X percent of requests over +// the last 10 seconds. +type Latency struct { + _ struct{} `type:"structure"` + + // The average latency for the slowest 90 percent of requests over the last + // 10 seconds. + P10 *float64 `type:"double"` + + // The average latency for the slowest 50 percent of requests over the last + // 10 seconds. + P50 *float64 `type:"double"` + + // The average latency for the slowest 25 percent of requests over the last + // 10 seconds. + P75 *float64 `type:"double"` + + // The average latency for the slowest 15 percent of requests over the last + // 10 seconds. + P85 *float64 `type:"double"` + + // The average latency for the slowest 10 percent of requests over the last + // 10 seconds. + P90 *float64 `type:"double"` + + // The average latency for the slowest 5 percent of requests over the last 10 + // seconds. + P95 *float64 `type:"double"` + + // The average latency for the slowest 1 percent of requests over the last 10 + // seconds. + P99 *float64 `type:"double"` + + // The average latency for the slowest 0.1 percent of requests over the last + // 10 seconds. + P999 *float64 `type:"double"` +} + +// String returns the string representation +func (s Latency) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Latency) GoString() string { + return s.String() +} + +// Describes an Auto Scaling launch configuration. +type LaunchConfiguration struct { + _ struct{} `type:"structure"` + + // The name of the launch configuration. + Name *string `type:"string"` +} + +// String returns the string representation +func (s LaunchConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchConfiguration) GoString() string { + return s.String() +} + +type ListAvailableSolutionStacksInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListAvailableSolutionStacksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAvailableSolutionStacksInput) GoString() string { + return s.String() +} + +// A list of available AWS Elastic Beanstalk solution stacks. +type ListAvailableSolutionStacksOutput struct { + _ struct{} `type:"structure"` + + // A list of available solution stacks and their SolutionStackDescription. + SolutionStackDetails []*SolutionStackDescription `type:"list"` + + // A list of available solution stacks. + SolutionStacks []*string `type:"list"` +} + +// String returns the string representation +func (s ListAvailableSolutionStacksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAvailableSolutionStacksOutput) GoString() string { + return s.String() +} + +// Describes the properties of a Listener for the LoadBalancer. +type Listener struct { + _ struct{} `type:"structure"` + + // The port that is used by the Listener. + Port *int64 `type:"integer"` + + // The protocol that is used by the Listener. + Protocol *string `type:"string"` +} + +// String returns the string representation +func (s Listener) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Listener) GoString() string { + return s.String() +} + +// Describes a LoadBalancer. +type LoadBalancer struct { + _ struct{} `type:"structure"` + + // The name of the LoadBalancer. + Name *string `type:"string"` +} + +// String returns the string representation +func (s LoadBalancer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadBalancer) GoString() string { + return s.String() +} + +// Describes the details of a LoadBalancer. +type LoadBalancerDescription struct { + _ struct{} `type:"structure"` + + // The domain name of the LoadBalancer. + Domain *string `type:"string"` + + // A list of Listeners used by the LoadBalancer. + Listeners []*Listener `type:"list"` + + // The name of the LoadBalancer. + LoadBalancerName *string `type:"string"` +} + +// String returns the string representation +func (s LoadBalancerDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadBalancerDescription) GoString() string { + return s.String() +} + +// A regular expression representing a restriction on a string configuration +// option value. +type OptionRestrictionRegex struct { + _ struct{} `type:"structure"` + + // A unique name representing this regular expression. + Label *string `type:"string"` + + // The regular expression pattern that a string configuration option value with + // this restriction must match. + Pattern *string `type:"string"` +} + +// String returns the string representation +func (s OptionRestrictionRegex) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionRestrictionRegex) GoString() string { + return s.String() +} + +// A specification identifying an individual configuration option. +type OptionSpecification struct { + _ struct{} `type:"structure"` + + // A unique namespace identifying the option's associated AWS resource. + Namespace *string `type:"string"` + + // The name of the configuration option. + OptionName *string `type:"string"` + + // A unique resource name for a time-based scaling configuration option. + ResourceName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s OptionSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionSpecification) GoString() string { + return s.String() +} + +// Describes a queue. +type Queue struct { + _ struct{} `type:"structure"` + + // The name of the queue. + Name *string `type:"string"` + + // The URL of the queue. + URL *string `type:"string"` +} + +// String returns the string representation +func (s Queue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Queue) GoString() string { + return s.String() +} + +type RebuildEnvironmentInput struct { + _ struct{} `type:"structure"` + + // The ID of the environment to rebuild. + // + // Condition: You must specify either this or an EnvironmentName, or both. + // If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentId *string `type:"string"` + + // The name of the environment to rebuild. + // + // Condition: You must specify either this or an EnvironmentId, or both. If + // you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentName *string `min:"4" type:"string"` +} + +// String returns the string representation +func (s RebuildEnvironmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebuildEnvironmentInput) GoString() string { + return s.String() +} + +type RebuildEnvironmentOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RebuildEnvironmentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebuildEnvironmentOutput) GoString() string { + return s.String() +} + +type RequestEnvironmentInfoInput struct { + _ struct{} `type:"structure"` + + // The ID of the environment of the requested data. + // + // If no such environment is found, RequestEnvironmentInfo returns an InvalidParameterValue + // error. + // + // Condition: You must specify either this or an EnvironmentName, or both. + // If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentId *string `type:"string"` + + // The name of the environment of the requested data. + // + // If no such environment is found, RequestEnvironmentInfo returns an InvalidParameterValue + // error. + // + // Condition: You must specify either this or an EnvironmentId, or both. If + // you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentName *string `min:"4" type:"string"` + + // The type of information to request. + InfoType *string `type:"string" required:"true" enum:"EnvironmentInfoType"` +} + +// String returns the string representation +func (s RequestEnvironmentInfoInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestEnvironmentInfoInput) GoString() string { + return s.String() +} + +type RequestEnvironmentInfoOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RequestEnvironmentInfoOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestEnvironmentInfoOutput) GoString() string { + return s.String() +} + +type RestartAppServerInput struct { + _ struct{} `type:"structure"` + + // The ID of the environment to restart the server for. + // + // Condition: You must specify either this or an EnvironmentName, or both. + // If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentId *string `type:"string"` + + // The name of the environment to restart the server for. + // + // Condition: You must specify either this or an EnvironmentId, or both. If + // you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentName *string `min:"4" type:"string"` +} + +// String returns the string representation +func (s RestartAppServerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestartAppServerInput) GoString() string { + return s.String() +} + +type RestartAppServerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RestartAppServerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestartAppServerOutput) GoString() string { + return s.String() +} + +type RetrieveEnvironmentInfoInput struct { + _ struct{} `type:"structure"` + + // The ID of the data's environment. + // + // If no such environment is found, returns an InvalidParameterValue error. + // + // Condition: You must specify either this or an EnvironmentName, or both. + // If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentId *string `type:"string"` + + // The name of the data's environment. + // + // If no such environment is found, returns an InvalidParameterValue error. + // + // Condition: You must specify either this or an EnvironmentId, or both. If + // you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentName *string `min:"4" type:"string"` + + // The type of information to retrieve. + InfoType *string `type:"string" required:"true" enum:"EnvironmentInfoType"` +} + +// String returns the string representation +func (s RetrieveEnvironmentInfoInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RetrieveEnvironmentInfoInput) GoString() string { + return s.String() +} + +// Result message containing a description of the requested environment info. +type RetrieveEnvironmentInfoOutput struct { + _ struct{} `type:"structure"` + + // The EnvironmentInfoDescription of the environment. + EnvironmentInfo []*EnvironmentInfoDescription `type:"list"` +} + +// String returns the string representation +func (s RetrieveEnvironmentInfoOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RetrieveEnvironmentInfoOutput) GoString() string { + return s.String() +} + +// A specification of a location in Amazon S3. +type S3Location struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket where the data is located. + S3Bucket *string `type:"string"` + + // The Amazon S3 key where the data is located. + S3Key *string `type:"string"` +} + +// String returns the string representation +func (s S3Location) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3Location) GoString() string { + return s.String() +} + +// Represents health information from the specified instance that belongs to +// the AWS Elastic Beanstalk environment. Use the InstanceId property to specify +// the application instance for which you'd like to return data. +type SingleInstanceHealth struct { + _ struct{} `type:"structure"` + + // Represents the application metrics for a specified environment. + ApplicationMetrics *ApplicationMetrics `type:"structure"` + + // Represents the causes, which provide more information about the current health + // status. + Causes []*string `type:"list"` + + // Represents the color indicator that gives you information about the health + // of the EC2 instance. For more information, see Health Colors and Statuses + // (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/health-enhanced-status.html). + Color *string `type:"string"` + + // Returns the health status of the specified instance. For more information, + // see Health Colors and Statuses (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/health-enhanced-status.html). + HealthStatus *string `type:"string"` + + // The ID of the Amazon EC2 instance. + InstanceId *string `min:"1" type:"string"` + + // The time at which the EC2 instance was launched. + LaunchedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Represents CPU utilization and load average information for applications + // running in the specified environment. + System *SystemStatus `type:"structure"` +} + +// String returns the string representation +func (s SingleInstanceHealth) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SingleInstanceHealth) GoString() string { + return s.String() +} + +// Describes the solution stack. +type SolutionStackDescription struct { + _ struct{} `type:"structure"` + + // The permitted file types allowed for a solution stack. + PermittedFileTypes []*string `type:"list"` + + // The name of the solution stack. + SolutionStackName *string `type:"string"` +} + +// String returns the string representation +func (s SolutionStackDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SolutionStackDescription) GoString() string { + return s.String() +} + +// A specification for an environment configuration +type SourceConfiguration struct { + _ struct{} `type:"structure"` + + // The name of the application associated with the configuration. + ApplicationName *string `min:"1" type:"string"` + + // The name of the configuration template. + TemplateName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SourceConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SourceConfiguration) GoString() string { + return s.String() +} + +// Represents the percentage of requests over the last 10 seconds that resulted +// in each type of status code response. For more information, see Status Code +// Definitions (http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html). +type StatusCodes struct { + _ struct{} `type:"structure"` + + // The percentage of requests over the last 10 seconds that resulted in a 2xx + // (200, 201, etc.) status code. + Status2xx *int64 `type:"integer"` + + // The percentage of requests over the last 10 seconds that resulted in a 3xx + // (300, 301, etc.) status code. + Status3xx *int64 `type:"integer"` + + // The percentage of requests over the last 10 seconds that resulted in a 4xx + // (400, 401, etc.) status code. + Status4xx *int64 `type:"integer"` + + // The percentage of requests over the last 10 seconds that resulted in a 5xx + // (500, 501, etc.) status code. + Status5xx *int64 `type:"integer"` +} + +// String returns the string representation +func (s StatusCodes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StatusCodes) GoString() string { + return s.String() +} + +// Swaps the CNAMEs of two environments. +type SwapEnvironmentCNAMEsInput struct { + _ struct{} `type:"structure"` + + // The ID of the destination environment. + // + // Condition: You must specify at least the DestinationEnvironmentID or the + // DestinationEnvironmentName. You may also specify both. You must specify the + // SourceEnvironmentId with the DestinationEnvironmentId. + DestinationEnvironmentId *string `type:"string"` + + // The name of the destination environment. + // + // Condition: You must specify at least the DestinationEnvironmentID or the + // DestinationEnvironmentName. You may also specify both. You must specify the + // SourceEnvironmentName with the DestinationEnvironmentName. + DestinationEnvironmentName *string `min:"4" type:"string"` + + // The ID of the source environment. + // + // Condition: You must specify at least the SourceEnvironmentID or the SourceEnvironmentName. + // You may also specify both. If you specify the SourceEnvironmentId, you must + // specify the DestinationEnvironmentId. + SourceEnvironmentId *string `type:"string"` + + // The name of the source environment. + // + // Condition: You must specify at least the SourceEnvironmentID or the SourceEnvironmentName. + // You may also specify both. If you specify the SourceEnvironmentName, you + // must specify the DestinationEnvironmentName. + SourceEnvironmentName *string `min:"4" type:"string"` +} + +// String returns the string representation +func (s SwapEnvironmentCNAMEsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SwapEnvironmentCNAMEsInput) GoString() string { + return s.String() +} + +type SwapEnvironmentCNAMEsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SwapEnvironmentCNAMEsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SwapEnvironmentCNAMEsOutput) GoString() string { + return s.String() +} + +// Represents CPU utilization and load average information for applications +// running in the specified environment. +type SystemStatus struct { + _ struct{} `type:"structure"` + + // Represents CPU utilization information from the specified instance that belongs + // to the AWS Elastic Beanstalk environment. Use the instanceId property to + // specify the application instance for which you'd like to return data. + CPUUtilization *CPUUtilization `type:"structure"` + + // Load average in the last 1-minute and 5-minute periods. For more information, + // see Operating System Metrics (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/health-enhanced-metrics.html#health-enhanced-metrics-os). + LoadAverage []*float64 `type:"list"` +} + +// String returns the string representation +func (s SystemStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SystemStatus) GoString() string { + return s.String() +} + +// Describes a tag applied to a resource in an environment. +type Tag struct { + _ struct{} `type:"structure"` + + // The key of the tag. + Key *string `min:"1" type:"string"` + + // The value of the tag. + Value *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +type TerminateEnvironmentInput struct { + _ struct{} `type:"structure"` + + // The ID of the environment to terminate. + // + // Condition: You must specify either this or an EnvironmentName, or both. + // If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentId *string `type:"string"` + + // The name of the environment to terminate. + // + // Condition: You must specify either this or an EnvironmentId, or both. If + // you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentName *string `min:"4" type:"string"` + + // Terminates the target environment even if another environment in the same + // group is dependent on it. + ForceTerminate *bool `type:"boolean"` + + // Indicates whether the associated AWS resources should shut down when the + // environment is terminated: + // + // true: The specified environment as well as the associated AWS resources, + // such as Auto Scaling group and LoadBalancer, are terminated. false: AWS + // Elastic Beanstalk resource management is removed from the environment, but + // the AWS resources continue to operate. For more information, see the + // AWS Elastic Beanstalk User Guide. (http://docs.aws.amazon.com/elasticbeanstalk/latest/ug/) + // + // Default: true + // + // Valid Values: true | false + TerminateResources *bool `type:"boolean"` +} + +// String returns the string representation +func (s TerminateEnvironmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateEnvironmentInput) GoString() string { + return s.String() +} + +// Describes a trigger. +type Trigger struct { + _ struct{} `type:"structure"` + + // The name of the trigger. + Name *string `type:"string"` +} + +// String returns the string representation +func (s Trigger) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Trigger) GoString() string { + return s.String() +} + +type UpdateApplicationInput struct { + _ struct{} `type:"structure"` + + // The name of the application to update. If no such application is found, UpdateApplication + // returns an InvalidParameterValue error. + ApplicationName *string `min:"1" type:"string" required:"true"` + + // A new description for the application. + // + // Default: If not specified, AWS Elastic Beanstalk does not update the description. + Description *string `type:"string"` +} + +// String returns the string representation +func (s UpdateApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateApplicationInput) GoString() string { + return s.String() +} + +type UpdateApplicationVersionInput struct { + _ struct{} `type:"structure"` + + // The name of the application associated with this version. + // + // If no application is found with this name, UpdateApplication returns an + // InvalidParameterValue error. + ApplicationName *string `min:"1" type:"string" required:"true"` + + // A new description for this release. + Description *string `type:"string"` + + // The name of the version to update. + // + // If no application version is found with this label, UpdateApplication returns + // an InvalidParameterValue error. + VersionLabel *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateApplicationVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateApplicationVersionInput) GoString() string { + return s.String() +} + +// The result message containing the options for the specified solution stack. +type UpdateConfigurationTemplateInput struct { + _ struct{} `type:"structure"` + + // The name of the application associated with the configuration template to + // update. + // + // If no application is found with this name, UpdateConfigurationTemplate + // returns an InvalidParameterValue error. + ApplicationName *string `min:"1" type:"string" required:"true"` + + // A new description for the configuration. + Description *string `type:"string"` + + // A list of configuration option settings to update with the new specified + // option value. + OptionSettings []*ConfigurationOptionSetting `type:"list"` + + // A list of configuration options to remove from the configuration set. + // + // Constraint: You can remove only UserDefined configuration options. + OptionsToRemove []*OptionSpecification `type:"list"` + + // The name of the configuration template to update. + // + // If no configuration template is found with this name, UpdateConfigurationTemplate + // returns an InvalidParameterValue error. + TemplateName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateConfigurationTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateConfigurationTemplateInput) GoString() string { + return s.String() +} + +type UpdateEnvironmentInput struct { + _ struct{} `type:"structure"` + + // The name of the application with which the environment is associated. + ApplicationName *string `min:"1" type:"string"` + + // If this parameter is specified, AWS Elastic Beanstalk updates the description + // of this environment. + Description *string `type:"string"` + + // The ID of the environment to update. + // + // If no environment with this ID exists, AWS Elastic Beanstalk returns an + // InvalidParameterValue error. + // + // Condition: You must specify either this or an EnvironmentName, or both. + // If you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentId *string `type:"string"` + + // The name of the environment to update. If no environment with this name exists, + // AWS Elastic Beanstalk returns an InvalidParameterValue error. + // + // Condition: You must specify either this or an EnvironmentId, or both. If + // you do not specify either, AWS Elastic Beanstalk returns MissingRequiredParameter + // error. + EnvironmentName *string `min:"4" type:"string"` + + // The name of the group to which the target environment belongs. Specify a + // group name only if the environment's name is specified in an environment + // manifest and not with the environment name or environment ID parameters. + // See Environment Manifest (env.yaml) (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/environment-mgmt-compose.html#environment-mgmt-compose-envyaml) + // for details. + GroupName *string `min:"1" type:"string"` + + // If specified, AWS Elastic Beanstalk updates the configuration set associated + // with the running environment and sets the specified configuration options + // to the requested value. + OptionSettings []*ConfigurationOptionSetting `type:"list"` + + // A list of custom user-defined configuration options to remove from the configuration + // set for this environment. + OptionsToRemove []*OptionSpecification `type:"list"` + + // This specifies the platform version that the environment will run after the + // environment is updated. + SolutionStackName *string `type:"string"` + + // If this parameter is specified, AWS Elastic Beanstalk deploys this configuration + // template to the environment. If no such configuration template is found, + // AWS Elastic Beanstalk returns an InvalidParameterValue error. + TemplateName *string `min:"1" type:"string"` + + // This specifies the tier to use to update the environment. + // + // Condition: At this time, if you change the tier version, name, or type, + // AWS Elastic Beanstalk returns InvalidParameterValue error. + Tier *EnvironmentTier `type:"structure"` + + // If this parameter is specified, AWS Elastic Beanstalk deploys the named application + // version to the environment. If no such application version is found, returns + // an InvalidParameterValue error. + VersionLabel *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateEnvironmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateEnvironmentInput) GoString() string { + return s.String() +} + +// A list of validation messages for a specified configuration template. +type ValidateConfigurationSettingsInput struct { + _ struct{} `type:"structure"` + + // The name of the application that the configuration template or environment + // belongs to. + ApplicationName *string `min:"1" type:"string" required:"true"` + + // The name of the environment to validate the settings against. + // + // Condition: You cannot specify both this and a configuration template name. + EnvironmentName *string `min:"4" type:"string"` + + // A list of the options and desired values to evaluate. + OptionSettings []*ConfigurationOptionSetting `type:"list" required:"true"` + + // The name of the configuration template to validate the settings against. + // + // Condition: You cannot specify both this and an environment name. + TemplateName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ValidateConfigurationSettingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidateConfigurationSettingsInput) GoString() string { + return s.String() +} + +// Provides a list of validation messages. +type ValidateConfigurationSettingsOutput struct { + _ struct{} `type:"structure"` + + // A list of ValidationMessage. + Messages []*ValidationMessage `type:"list"` +} + +// String returns the string representation +func (s ValidateConfigurationSettingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidateConfigurationSettingsOutput) GoString() string { + return s.String() +} + +// An error or warning for a desired configuration option value. +type ValidationMessage struct { + _ struct{} `type:"structure"` + + // A message describing the error or warning. + Message *string `type:"string"` + + Namespace *string `type:"string"` + + OptionName *string `type:"string"` + + // An indication of the severity of this message: + // + // error: This message indicates that this is not a valid setting for an + // option. warning: This message is providing information you should take + // into account. + Severity *string `type:"string" enum:"ValidationSeverity"` +} + +// String returns the string representation +func (s ValidationMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidationMessage) GoString() string { + return s.String() +} + +const ( + // @enum ApplicationVersionStatus + ApplicationVersionStatusProcessed = "Processed" + // @enum ApplicationVersionStatus + ApplicationVersionStatusUnprocessed = "Unprocessed" + // @enum ApplicationVersionStatus + ApplicationVersionStatusFailed = "Failed" + // @enum ApplicationVersionStatus + ApplicationVersionStatusProcessing = "Processing" +) + +const ( + // @enum ConfigurationDeploymentStatus + ConfigurationDeploymentStatusDeployed = "deployed" + // @enum ConfigurationDeploymentStatus + ConfigurationDeploymentStatusPending = "pending" + // @enum ConfigurationDeploymentStatus + ConfigurationDeploymentStatusFailed = "failed" +) + +const ( + // @enum ConfigurationOptionValueType + ConfigurationOptionValueTypeScalar = "Scalar" + // @enum ConfigurationOptionValueType + ConfigurationOptionValueTypeList = "List" +) + +const ( + // @enum EnvironmentHealth + EnvironmentHealthGreen = "Green" + // @enum EnvironmentHealth + EnvironmentHealthYellow = "Yellow" + // @enum EnvironmentHealth + EnvironmentHealthRed = "Red" + // @enum EnvironmentHealth + EnvironmentHealthGrey = "Grey" +) + +const ( + // @enum EnvironmentHealthAttribute + EnvironmentHealthAttributeStatus = "Status" + // @enum EnvironmentHealthAttribute + EnvironmentHealthAttributeColor = "Color" + // @enum EnvironmentHealthAttribute + EnvironmentHealthAttributeCauses = "Causes" + // @enum EnvironmentHealthAttribute + EnvironmentHealthAttributeApplicationMetrics = "ApplicationMetrics" + // @enum EnvironmentHealthAttribute + EnvironmentHealthAttributeInstancesHealth = "InstancesHealth" + // @enum EnvironmentHealthAttribute + EnvironmentHealthAttributeAll = "All" + // @enum EnvironmentHealthAttribute + EnvironmentHealthAttributeHealthStatus = "HealthStatus" + // @enum EnvironmentHealthAttribute + EnvironmentHealthAttributeRefreshedAt = "RefreshedAt" +) + +const ( + // @enum EnvironmentHealthStatus + EnvironmentHealthStatusNoData = "NoData" + // @enum EnvironmentHealthStatus + EnvironmentHealthStatusUnknown = "Unknown" + // @enum EnvironmentHealthStatus + EnvironmentHealthStatusPending = "Pending" + // @enum EnvironmentHealthStatus + EnvironmentHealthStatusOk = "Ok" + // @enum EnvironmentHealthStatus + EnvironmentHealthStatusInfo = "Info" + // @enum EnvironmentHealthStatus + EnvironmentHealthStatusWarning = "Warning" + // @enum EnvironmentHealthStatus + EnvironmentHealthStatusDegraded = "Degraded" + // @enum EnvironmentHealthStatus + EnvironmentHealthStatusSevere = "Severe" +) + +const ( + // @enum EnvironmentInfoType + EnvironmentInfoTypeTail = "tail" + // @enum EnvironmentInfoType + EnvironmentInfoTypeBundle = "bundle" +) + +const ( + // @enum EnvironmentStatus + EnvironmentStatusLaunching = "Launching" + // @enum EnvironmentStatus + EnvironmentStatusUpdating = "Updating" + // @enum EnvironmentStatus + EnvironmentStatusReady = "Ready" + // @enum EnvironmentStatus + EnvironmentStatusTerminating = "Terminating" + // @enum EnvironmentStatus + EnvironmentStatusTerminated = "Terminated" +) + +const ( + // @enum EventSeverity + EventSeverityTrace = "TRACE" + // @enum EventSeverity + EventSeverityDebug = "DEBUG" + // @enum EventSeverity + EventSeverityInfo = "INFO" + // @enum EventSeverity + EventSeverityWarn = "WARN" + // @enum EventSeverity + EventSeverityError = "ERROR" + // @enum EventSeverity + EventSeverityFatal = "FATAL" +) + +const ( + // @enum InstancesHealthAttribute + InstancesHealthAttributeHealthStatus = "HealthStatus" + // @enum InstancesHealthAttribute + InstancesHealthAttributeColor = "Color" + // @enum InstancesHealthAttribute + InstancesHealthAttributeCauses = "Causes" + // @enum InstancesHealthAttribute + InstancesHealthAttributeApplicationMetrics = "ApplicationMetrics" + // @enum InstancesHealthAttribute + InstancesHealthAttributeRefreshedAt = "RefreshedAt" + // @enum InstancesHealthAttribute + InstancesHealthAttributeLaunchedAt = "LaunchedAt" + // @enum InstancesHealthAttribute + InstancesHealthAttributeSystem = "System" + // @enum InstancesHealthAttribute + InstancesHealthAttributeAll = "All" +) + +const ( + // @enum ValidationSeverity + ValidationSeverityError = "error" + // @enum ValidationSeverity + ValidationSeverityWarning = "warning" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticbeanstalk/elasticbeanstalkiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticbeanstalk/elasticbeanstalkiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticbeanstalk/elasticbeanstalkiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticbeanstalk/elasticbeanstalkiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,148 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package elasticbeanstalkiface provides an interface for the AWS Elastic Beanstalk. +package elasticbeanstalkiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/elasticbeanstalk" +) + +// ElasticBeanstalkAPI is the interface type for elasticbeanstalk.ElasticBeanstalk. +type ElasticBeanstalkAPI interface { + AbortEnvironmentUpdateRequest(*elasticbeanstalk.AbortEnvironmentUpdateInput) (*request.Request, *elasticbeanstalk.AbortEnvironmentUpdateOutput) + + AbortEnvironmentUpdate(*elasticbeanstalk.AbortEnvironmentUpdateInput) (*elasticbeanstalk.AbortEnvironmentUpdateOutput, error) + + CheckDNSAvailabilityRequest(*elasticbeanstalk.CheckDNSAvailabilityInput) (*request.Request, *elasticbeanstalk.CheckDNSAvailabilityOutput) + + CheckDNSAvailability(*elasticbeanstalk.CheckDNSAvailabilityInput) (*elasticbeanstalk.CheckDNSAvailabilityOutput, error) + + ComposeEnvironmentsRequest(*elasticbeanstalk.ComposeEnvironmentsInput) (*request.Request, *elasticbeanstalk.EnvironmentDescriptionsMessage) + + ComposeEnvironments(*elasticbeanstalk.ComposeEnvironmentsInput) (*elasticbeanstalk.EnvironmentDescriptionsMessage, error) + + CreateApplicationRequest(*elasticbeanstalk.CreateApplicationInput) (*request.Request, *elasticbeanstalk.ApplicationDescriptionMessage) + + CreateApplication(*elasticbeanstalk.CreateApplicationInput) (*elasticbeanstalk.ApplicationDescriptionMessage, error) + + CreateApplicationVersionRequest(*elasticbeanstalk.CreateApplicationVersionInput) (*request.Request, *elasticbeanstalk.ApplicationVersionDescriptionMessage) + + CreateApplicationVersion(*elasticbeanstalk.CreateApplicationVersionInput) (*elasticbeanstalk.ApplicationVersionDescriptionMessage, error) + + CreateConfigurationTemplateRequest(*elasticbeanstalk.CreateConfigurationTemplateInput) (*request.Request, *elasticbeanstalk.ConfigurationSettingsDescription) + + CreateConfigurationTemplate(*elasticbeanstalk.CreateConfigurationTemplateInput) (*elasticbeanstalk.ConfigurationSettingsDescription, error) + + CreateEnvironmentRequest(*elasticbeanstalk.CreateEnvironmentInput) (*request.Request, *elasticbeanstalk.EnvironmentDescription) + + CreateEnvironment(*elasticbeanstalk.CreateEnvironmentInput) (*elasticbeanstalk.EnvironmentDescription, error) + + CreateStorageLocationRequest(*elasticbeanstalk.CreateStorageLocationInput) (*request.Request, *elasticbeanstalk.CreateStorageLocationOutput) + + CreateStorageLocation(*elasticbeanstalk.CreateStorageLocationInput) (*elasticbeanstalk.CreateStorageLocationOutput, error) + + DeleteApplicationRequest(*elasticbeanstalk.DeleteApplicationInput) (*request.Request, *elasticbeanstalk.DeleteApplicationOutput) + + DeleteApplication(*elasticbeanstalk.DeleteApplicationInput) (*elasticbeanstalk.DeleteApplicationOutput, error) + + DeleteApplicationVersionRequest(*elasticbeanstalk.DeleteApplicationVersionInput) (*request.Request, *elasticbeanstalk.DeleteApplicationVersionOutput) + + DeleteApplicationVersion(*elasticbeanstalk.DeleteApplicationVersionInput) (*elasticbeanstalk.DeleteApplicationVersionOutput, error) + + DeleteConfigurationTemplateRequest(*elasticbeanstalk.DeleteConfigurationTemplateInput) (*request.Request, *elasticbeanstalk.DeleteConfigurationTemplateOutput) + + DeleteConfigurationTemplate(*elasticbeanstalk.DeleteConfigurationTemplateInput) (*elasticbeanstalk.DeleteConfigurationTemplateOutput, error) + + DeleteEnvironmentConfigurationRequest(*elasticbeanstalk.DeleteEnvironmentConfigurationInput) (*request.Request, *elasticbeanstalk.DeleteEnvironmentConfigurationOutput) + + DeleteEnvironmentConfiguration(*elasticbeanstalk.DeleteEnvironmentConfigurationInput) (*elasticbeanstalk.DeleteEnvironmentConfigurationOutput, error) + + DescribeApplicationVersionsRequest(*elasticbeanstalk.DescribeApplicationVersionsInput) (*request.Request, *elasticbeanstalk.DescribeApplicationVersionsOutput) + + DescribeApplicationVersions(*elasticbeanstalk.DescribeApplicationVersionsInput) (*elasticbeanstalk.DescribeApplicationVersionsOutput, error) + + DescribeApplicationsRequest(*elasticbeanstalk.DescribeApplicationsInput) (*request.Request, *elasticbeanstalk.DescribeApplicationsOutput) + + DescribeApplications(*elasticbeanstalk.DescribeApplicationsInput) (*elasticbeanstalk.DescribeApplicationsOutput, error) + + DescribeConfigurationOptionsRequest(*elasticbeanstalk.DescribeConfigurationOptionsInput) (*request.Request, *elasticbeanstalk.DescribeConfigurationOptionsOutput) + + DescribeConfigurationOptions(*elasticbeanstalk.DescribeConfigurationOptionsInput) (*elasticbeanstalk.DescribeConfigurationOptionsOutput, error) + + DescribeConfigurationSettingsRequest(*elasticbeanstalk.DescribeConfigurationSettingsInput) (*request.Request, *elasticbeanstalk.DescribeConfigurationSettingsOutput) + + DescribeConfigurationSettings(*elasticbeanstalk.DescribeConfigurationSettingsInput) (*elasticbeanstalk.DescribeConfigurationSettingsOutput, error) + + DescribeEnvironmentHealthRequest(*elasticbeanstalk.DescribeEnvironmentHealthInput) (*request.Request, *elasticbeanstalk.DescribeEnvironmentHealthOutput) + + DescribeEnvironmentHealth(*elasticbeanstalk.DescribeEnvironmentHealthInput) (*elasticbeanstalk.DescribeEnvironmentHealthOutput, error) + + DescribeEnvironmentResourcesRequest(*elasticbeanstalk.DescribeEnvironmentResourcesInput) (*request.Request, *elasticbeanstalk.DescribeEnvironmentResourcesOutput) + + DescribeEnvironmentResources(*elasticbeanstalk.DescribeEnvironmentResourcesInput) (*elasticbeanstalk.DescribeEnvironmentResourcesOutput, error) + + DescribeEnvironmentsRequest(*elasticbeanstalk.DescribeEnvironmentsInput) (*request.Request, *elasticbeanstalk.EnvironmentDescriptionsMessage) + + DescribeEnvironments(*elasticbeanstalk.DescribeEnvironmentsInput) (*elasticbeanstalk.EnvironmentDescriptionsMessage, error) + + DescribeEventsRequest(*elasticbeanstalk.DescribeEventsInput) (*request.Request, *elasticbeanstalk.DescribeEventsOutput) + + DescribeEvents(*elasticbeanstalk.DescribeEventsInput) (*elasticbeanstalk.DescribeEventsOutput, error) + + DescribeEventsPages(*elasticbeanstalk.DescribeEventsInput, func(*elasticbeanstalk.DescribeEventsOutput, bool) bool) error + + DescribeInstancesHealthRequest(*elasticbeanstalk.DescribeInstancesHealthInput) (*request.Request, *elasticbeanstalk.DescribeInstancesHealthOutput) + + DescribeInstancesHealth(*elasticbeanstalk.DescribeInstancesHealthInput) (*elasticbeanstalk.DescribeInstancesHealthOutput, error) + + ListAvailableSolutionStacksRequest(*elasticbeanstalk.ListAvailableSolutionStacksInput) (*request.Request, *elasticbeanstalk.ListAvailableSolutionStacksOutput) + + ListAvailableSolutionStacks(*elasticbeanstalk.ListAvailableSolutionStacksInput) (*elasticbeanstalk.ListAvailableSolutionStacksOutput, error) + + RebuildEnvironmentRequest(*elasticbeanstalk.RebuildEnvironmentInput) (*request.Request, *elasticbeanstalk.RebuildEnvironmentOutput) + + RebuildEnvironment(*elasticbeanstalk.RebuildEnvironmentInput) (*elasticbeanstalk.RebuildEnvironmentOutput, error) + + RequestEnvironmentInfoRequest(*elasticbeanstalk.RequestEnvironmentInfoInput) (*request.Request, *elasticbeanstalk.RequestEnvironmentInfoOutput) + + RequestEnvironmentInfo(*elasticbeanstalk.RequestEnvironmentInfoInput) (*elasticbeanstalk.RequestEnvironmentInfoOutput, error) + + RestartAppServerRequest(*elasticbeanstalk.RestartAppServerInput) (*request.Request, *elasticbeanstalk.RestartAppServerOutput) + + RestartAppServer(*elasticbeanstalk.RestartAppServerInput) (*elasticbeanstalk.RestartAppServerOutput, error) + + RetrieveEnvironmentInfoRequest(*elasticbeanstalk.RetrieveEnvironmentInfoInput) (*request.Request, *elasticbeanstalk.RetrieveEnvironmentInfoOutput) + + RetrieveEnvironmentInfo(*elasticbeanstalk.RetrieveEnvironmentInfoInput) (*elasticbeanstalk.RetrieveEnvironmentInfoOutput, error) + + SwapEnvironmentCNAMEsRequest(*elasticbeanstalk.SwapEnvironmentCNAMEsInput) (*request.Request, *elasticbeanstalk.SwapEnvironmentCNAMEsOutput) + + SwapEnvironmentCNAMEs(*elasticbeanstalk.SwapEnvironmentCNAMEsInput) (*elasticbeanstalk.SwapEnvironmentCNAMEsOutput, error) + + TerminateEnvironmentRequest(*elasticbeanstalk.TerminateEnvironmentInput) (*request.Request, *elasticbeanstalk.EnvironmentDescription) + + TerminateEnvironment(*elasticbeanstalk.TerminateEnvironmentInput) (*elasticbeanstalk.EnvironmentDescription, error) + + UpdateApplicationRequest(*elasticbeanstalk.UpdateApplicationInput) (*request.Request, *elasticbeanstalk.ApplicationDescriptionMessage) + + UpdateApplication(*elasticbeanstalk.UpdateApplicationInput) (*elasticbeanstalk.ApplicationDescriptionMessage, error) + + UpdateApplicationVersionRequest(*elasticbeanstalk.UpdateApplicationVersionInput) (*request.Request, *elasticbeanstalk.ApplicationVersionDescriptionMessage) + + UpdateApplicationVersion(*elasticbeanstalk.UpdateApplicationVersionInput) (*elasticbeanstalk.ApplicationVersionDescriptionMessage, error) + + UpdateConfigurationTemplateRequest(*elasticbeanstalk.UpdateConfigurationTemplateInput) (*request.Request, *elasticbeanstalk.ConfigurationSettingsDescription) + + UpdateConfigurationTemplate(*elasticbeanstalk.UpdateConfigurationTemplateInput) (*elasticbeanstalk.ConfigurationSettingsDescription, error) + + UpdateEnvironmentRequest(*elasticbeanstalk.UpdateEnvironmentInput) (*request.Request, *elasticbeanstalk.EnvironmentDescription) + + UpdateEnvironment(*elasticbeanstalk.UpdateEnvironmentInput) (*elasticbeanstalk.EnvironmentDescription, error) + + ValidateConfigurationSettingsRequest(*elasticbeanstalk.ValidateConfigurationSettingsInput) (*request.Request, *elasticbeanstalk.ValidateConfigurationSettingsOutput) + + ValidateConfigurationSettings(*elasticbeanstalk.ValidateConfigurationSettingsInput) (*elasticbeanstalk.ValidateConfigurationSettingsOutput, error) +} + +var _ ElasticBeanstalkAPI = (*elasticbeanstalk.ElasticBeanstalk)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticbeanstalk/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticbeanstalk/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticbeanstalk/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticbeanstalk/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,839 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elasticbeanstalk_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/elasticbeanstalk" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleElasticBeanstalk_AbortEnvironmentUpdate() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.AbortEnvironmentUpdateInput{ + EnvironmentId: aws.String("EnvironmentId"), + EnvironmentName: aws.String("EnvironmentName"), + } + resp, err := svc.AbortEnvironmentUpdate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_CheckDNSAvailability() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.CheckDNSAvailabilityInput{ + CNAMEPrefix: aws.String("DNSCnamePrefix"), // Required + } + resp, err := svc.CheckDNSAvailability(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_ComposeEnvironments() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.ComposeEnvironmentsInput{ + ApplicationName: aws.String("ApplicationName"), + GroupName: aws.String("GroupName"), + VersionLabels: []*string{ + aws.String("VersionLabel"), // Required + // More values... + }, + } + resp, err := svc.ComposeEnvironments(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_CreateApplication() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.CreateApplicationInput{ + ApplicationName: aws.String("ApplicationName"), // Required + Description: aws.String("Description"), + } + resp, err := svc.CreateApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_CreateApplicationVersion() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.CreateApplicationVersionInput{ + ApplicationName: aws.String("ApplicationName"), // Required + VersionLabel: aws.String("VersionLabel"), // Required + AutoCreateApplication: aws.Bool(true), + Description: aws.String("Description"), + Process: aws.Bool(true), + SourceBundle: &elasticbeanstalk.S3Location{ + S3Bucket: aws.String("S3Bucket"), + S3Key: aws.String("S3Key"), + }, + } + resp, err := svc.CreateApplicationVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_CreateConfigurationTemplate() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.CreateConfigurationTemplateInput{ + ApplicationName: aws.String("ApplicationName"), // Required + TemplateName: aws.String("ConfigurationTemplateName"), // Required + Description: aws.String("Description"), + EnvironmentId: aws.String("EnvironmentId"), + OptionSettings: []*elasticbeanstalk.ConfigurationOptionSetting{ + { // Required + Namespace: aws.String("OptionNamespace"), + OptionName: aws.String("ConfigurationOptionName"), + ResourceName: aws.String("ResourceName"), + Value: aws.String("ConfigurationOptionValue"), + }, + // More values... + }, + SolutionStackName: aws.String("SolutionStackName"), + SourceConfiguration: &elasticbeanstalk.SourceConfiguration{ + ApplicationName: aws.String("ApplicationName"), + TemplateName: aws.String("ConfigurationTemplateName"), + }, + } + resp, err := svc.CreateConfigurationTemplate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_CreateEnvironment() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.CreateEnvironmentInput{ + ApplicationName: aws.String("ApplicationName"), // Required + CNAMEPrefix: aws.String("DNSCnamePrefix"), + Description: aws.String("Description"), + EnvironmentName: aws.String("EnvironmentName"), + GroupName: aws.String("GroupName"), + OptionSettings: []*elasticbeanstalk.ConfigurationOptionSetting{ + { // Required + Namespace: aws.String("OptionNamespace"), + OptionName: aws.String("ConfigurationOptionName"), + ResourceName: aws.String("ResourceName"), + Value: aws.String("ConfigurationOptionValue"), + }, + // More values... + }, + OptionsToRemove: []*elasticbeanstalk.OptionSpecification{ + { // Required + Namespace: aws.String("OptionNamespace"), + OptionName: aws.String("ConfigurationOptionName"), + ResourceName: aws.String("ResourceName"), + }, + // More values... + }, + SolutionStackName: aws.String("SolutionStackName"), + Tags: []*elasticbeanstalk.Tag{ + { // Required + Key: aws.String("TagKey"), + Value: aws.String("TagValue"), + }, + // More values... + }, + TemplateName: aws.String("ConfigurationTemplateName"), + Tier: &elasticbeanstalk.EnvironmentTier{ + Name: aws.String("String"), + Type: aws.String("String"), + Version: aws.String("String"), + }, + VersionLabel: aws.String("VersionLabel"), + } + resp, err := svc.CreateEnvironment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_CreateStorageLocation() { + svc := elasticbeanstalk.New(session.New()) + + var params *elasticbeanstalk.CreateStorageLocationInput + resp, err := svc.CreateStorageLocation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_DeleteApplication() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.DeleteApplicationInput{ + ApplicationName: aws.String("ApplicationName"), // Required + TerminateEnvByForce: aws.Bool(true), + } + resp, err := svc.DeleteApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_DeleteApplicationVersion() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.DeleteApplicationVersionInput{ + ApplicationName: aws.String("ApplicationName"), // Required + VersionLabel: aws.String("VersionLabel"), // Required + DeleteSourceBundle: aws.Bool(true), + } + resp, err := svc.DeleteApplicationVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_DeleteConfigurationTemplate() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.DeleteConfigurationTemplateInput{ + ApplicationName: aws.String("ApplicationName"), // Required + TemplateName: aws.String("ConfigurationTemplateName"), // Required + } + resp, err := svc.DeleteConfigurationTemplate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_DeleteEnvironmentConfiguration() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.DeleteEnvironmentConfigurationInput{ + ApplicationName: aws.String("ApplicationName"), // Required + EnvironmentName: aws.String("EnvironmentName"), // Required + } + resp, err := svc.DeleteEnvironmentConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_DescribeApplicationVersions() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.DescribeApplicationVersionsInput{ + ApplicationName: aws.String("ApplicationName"), + VersionLabels: []*string{ + aws.String("VersionLabel"), // Required + // More values... + }, + } + resp, err := svc.DescribeApplicationVersions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_DescribeApplications() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.DescribeApplicationsInput{ + ApplicationNames: []*string{ + aws.String("ApplicationName"), // Required + // More values... + }, + } + resp, err := svc.DescribeApplications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_DescribeConfigurationOptions() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.DescribeConfigurationOptionsInput{ + ApplicationName: aws.String("ApplicationName"), + EnvironmentName: aws.String("EnvironmentName"), + Options: []*elasticbeanstalk.OptionSpecification{ + { // Required + Namespace: aws.String("OptionNamespace"), + OptionName: aws.String("ConfigurationOptionName"), + ResourceName: aws.String("ResourceName"), + }, + // More values... + }, + SolutionStackName: aws.String("SolutionStackName"), + TemplateName: aws.String("ConfigurationTemplateName"), + } + resp, err := svc.DescribeConfigurationOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_DescribeConfigurationSettings() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.DescribeConfigurationSettingsInput{ + ApplicationName: aws.String("ApplicationName"), // Required + EnvironmentName: aws.String("EnvironmentName"), + TemplateName: aws.String("ConfigurationTemplateName"), + } + resp, err := svc.DescribeConfigurationSettings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_DescribeEnvironmentHealth() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.DescribeEnvironmentHealthInput{ + AttributeNames: []*string{ + aws.String("EnvironmentHealthAttribute"), // Required + // More values... + }, + EnvironmentId: aws.String("EnvironmentId"), + EnvironmentName: aws.String("EnvironmentName"), + } + resp, err := svc.DescribeEnvironmentHealth(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_DescribeEnvironmentResources() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.DescribeEnvironmentResourcesInput{ + EnvironmentId: aws.String("EnvironmentId"), + EnvironmentName: aws.String("EnvironmentName"), + } + resp, err := svc.DescribeEnvironmentResources(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_DescribeEnvironments() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.DescribeEnvironmentsInput{ + ApplicationName: aws.String("ApplicationName"), + EnvironmentIds: []*string{ + aws.String("EnvironmentId"), // Required + // More values... + }, + EnvironmentNames: []*string{ + aws.String("EnvironmentName"), // Required + // More values... + }, + IncludeDeleted: aws.Bool(true), + IncludedDeletedBackTo: aws.Time(time.Now()), + VersionLabel: aws.String("VersionLabel"), + } + resp, err := svc.DescribeEnvironments(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_DescribeEvents() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.DescribeEventsInput{ + ApplicationName: aws.String("ApplicationName"), + EndTime: aws.Time(time.Now()), + EnvironmentId: aws.String("EnvironmentId"), + EnvironmentName: aws.String("EnvironmentName"), + MaxRecords: aws.Int64(1), + NextToken: aws.String("Token"), + RequestId: aws.String("RequestId"), + Severity: aws.String("EventSeverity"), + StartTime: aws.Time(time.Now()), + TemplateName: aws.String("ConfigurationTemplateName"), + VersionLabel: aws.String("VersionLabel"), + } + resp, err := svc.DescribeEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_DescribeInstancesHealth() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.DescribeInstancesHealthInput{ + AttributeNames: []*string{ + aws.String("InstancesHealthAttribute"), // Required + // More values... + }, + EnvironmentId: aws.String("EnvironmentId"), + EnvironmentName: aws.String("EnvironmentName"), + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeInstancesHealth(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_ListAvailableSolutionStacks() { + svc := elasticbeanstalk.New(session.New()) + + var params *elasticbeanstalk.ListAvailableSolutionStacksInput + resp, err := svc.ListAvailableSolutionStacks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_RebuildEnvironment() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.RebuildEnvironmentInput{ + EnvironmentId: aws.String("EnvironmentId"), + EnvironmentName: aws.String("EnvironmentName"), + } + resp, err := svc.RebuildEnvironment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_RequestEnvironmentInfo() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.RequestEnvironmentInfoInput{ + InfoType: aws.String("EnvironmentInfoType"), // Required + EnvironmentId: aws.String("EnvironmentId"), + EnvironmentName: aws.String("EnvironmentName"), + } + resp, err := svc.RequestEnvironmentInfo(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_RestartAppServer() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.RestartAppServerInput{ + EnvironmentId: aws.String("EnvironmentId"), + EnvironmentName: aws.String("EnvironmentName"), + } + resp, err := svc.RestartAppServer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_RetrieveEnvironmentInfo() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.RetrieveEnvironmentInfoInput{ + InfoType: aws.String("EnvironmentInfoType"), // Required + EnvironmentId: aws.String("EnvironmentId"), + EnvironmentName: aws.String("EnvironmentName"), + } + resp, err := svc.RetrieveEnvironmentInfo(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_SwapEnvironmentCNAMEs() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.SwapEnvironmentCNAMEsInput{ + DestinationEnvironmentId: aws.String("EnvironmentId"), + DestinationEnvironmentName: aws.String("EnvironmentName"), + SourceEnvironmentId: aws.String("EnvironmentId"), + SourceEnvironmentName: aws.String("EnvironmentName"), + } + resp, err := svc.SwapEnvironmentCNAMEs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_TerminateEnvironment() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.TerminateEnvironmentInput{ + EnvironmentId: aws.String("EnvironmentId"), + EnvironmentName: aws.String("EnvironmentName"), + ForceTerminate: aws.Bool(true), + TerminateResources: aws.Bool(true), + } + resp, err := svc.TerminateEnvironment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_UpdateApplication() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.UpdateApplicationInput{ + ApplicationName: aws.String("ApplicationName"), // Required + Description: aws.String("Description"), + } + resp, err := svc.UpdateApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_UpdateApplicationVersion() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.UpdateApplicationVersionInput{ + ApplicationName: aws.String("ApplicationName"), // Required + VersionLabel: aws.String("VersionLabel"), // Required + Description: aws.String("Description"), + } + resp, err := svc.UpdateApplicationVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_UpdateConfigurationTemplate() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.UpdateConfigurationTemplateInput{ + ApplicationName: aws.String("ApplicationName"), // Required + TemplateName: aws.String("ConfigurationTemplateName"), // Required + Description: aws.String("Description"), + OptionSettings: []*elasticbeanstalk.ConfigurationOptionSetting{ + { // Required + Namespace: aws.String("OptionNamespace"), + OptionName: aws.String("ConfigurationOptionName"), + ResourceName: aws.String("ResourceName"), + Value: aws.String("ConfigurationOptionValue"), + }, + // More values... + }, + OptionsToRemove: []*elasticbeanstalk.OptionSpecification{ + { // Required + Namespace: aws.String("OptionNamespace"), + OptionName: aws.String("ConfigurationOptionName"), + ResourceName: aws.String("ResourceName"), + }, + // More values... + }, + } + resp, err := svc.UpdateConfigurationTemplate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_UpdateEnvironment() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.UpdateEnvironmentInput{ + ApplicationName: aws.String("ApplicationName"), + Description: aws.String("Description"), + EnvironmentId: aws.String("EnvironmentId"), + EnvironmentName: aws.String("EnvironmentName"), + GroupName: aws.String("GroupName"), + OptionSettings: []*elasticbeanstalk.ConfigurationOptionSetting{ + { // Required + Namespace: aws.String("OptionNamespace"), + OptionName: aws.String("ConfigurationOptionName"), + ResourceName: aws.String("ResourceName"), + Value: aws.String("ConfigurationOptionValue"), + }, + // More values... + }, + OptionsToRemove: []*elasticbeanstalk.OptionSpecification{ + { // Required + Namespace: aws.String("OptionNamespace"), + OptionName: aws.String("ConfigurationOptionName"), + ResourceName: aws.String("ResourceName"), + }, + // More values... + }, + SolutionStackName: aws.String("SolutionStackName"), + TemplateName: aws.String("ConfigurationTemplateName"), + Tier: &elasticbeanstalk.EnvironmentTier{ + Name: aws.String("String"), + Type: aws.String("String"), + Version: aws.String("String"), + }, + VersionLabel: aws.String("VersionLabel"), + } + resp, err := svc.UpdateEnvironment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticBeanstalk_ValidateConfigurationSettings() { + svc := elasticbeanstalk.New(session.New()) + + params := &elasticbeanstalk.ValidateConfigurationSettingsInput{ + ApplicationName: aws.String("ApplicationName"), // Required + OptionSettings: []*elasticbeanstalk.ConfigurationOptionSetting{ // Required + { // Required + Namespace: aws.String("OptionNamespace"), + OptionName: aws.String("ConfigurationOptionName"), + ResourceName: aws.String("ResourceName"), + Value: aws.String("ConfigurationOptionValue"), + }, + // More values... + }, + EnvironmentName: aws.String("EnvironmentName"), + TemplateName: aws.String("ConfigurationTemplateName"), + } + resp, err := svc.ValidateConfigurationSettings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticbeanstalk/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticbeanstalk/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticbeanstalk/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticbeanstalk/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,106 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elasticbeanstalk + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// This is the AWS Elastic Beanstalk API Reference. This guide provides detailed +// information about AWS Elastic Beanstalk actions, data types, parameters, +// and errors. +// +// AWS Elastic Beanstalk is a tool that makes it easy for you to create, deploy, +// and manage scalable, fault-tolerant applications running on Amazon Web Services +// cloud resources. +// +// For more information about this product, go to the AWS Elastic Beanstalk +// (http://aws.amazon.com/elasticbeanstalk/) details page. The location of the +// latest AWS Elastic Beanstalk WSDL is http://elasticbeanstalk.s3.amazonaws.com/doc/2010-12-01/AWSElasticBeanstalk.wsdl +// (http://elasticbeanstalk.s3.amazonaws.com/doc/2010-12-01/AWSElasticBeanstalk.wsdl). +// To install the Software Development Kits (SDKs), Integrated Development Environment +// (IDE) Toolkits, and command line tools that enable you to access the API, +// go to Tools for Amazon Web Services (https://aws.amazon.com/tools/). +// +// Endpoints +// +// For a list of region-specific endpoints that AWS Elastic Beanstalk supports, +// go to Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#elasticbeanstalk_region) +// in the Amazon Web Services Glossary. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type ElasticBeanstalk struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "elasticbeanstalk" + +// New creates a new instance of the ElasticBeanstalk client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ElasticBeanstalk client from just a session. +// svc := elasticbeanstalk.New(mySession) +// +// // Create a ElasticBeanstalk client with additional configuration +// svc := elasticbeanstalk.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ElasticBeanstalk { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *ElasticBeanstalk { + svc := &ElasticBeanstalk{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2010-12-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ElasticBeanstalk operation and runs any +// custom request initialization. +func (c *ElasticBeanstalk) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1149 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package elasticsearchservice provides a client for Amazon Elasticsearch Service. +package elasticsearchservice + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opAddTags = "AddTags" + +// AddTagsRequest generates a request for the AddTags operation. +func (c *ElasticsearchService) AddTagsRequest(input *AddTagsInput) (req *request.Request, output *AddTagsOutput) { + op := &request.Operation{ + Name: opAddTags, + HTTPMethod: "POST", + HTTPPath: "/2015-01-01/tags", + } + + if input == nil { + input = &AddTagsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AddTagsOutput{} + req.Data = output + return +} + +// Attaches tags to an existing Elasticsearch domain. Tags are a set of case-sensitive +// key value pairs. An Elasticsearch domain may have up to 10 tags. See Tagging +// Amazon Elasticsearch Service Domains for more information. (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains.html#es-managedomains-awsresorcetagging" +// target="_blank) +func (c *ElasticsearchService) AddTags(input *AddTagsInput) (*AddTagsOutput, error) { + req, out := c.AddTagsRequest(input) + err := req.Send() + return out, err +} + +const opCreateElasticsearchDomain = "CreateElasticsearchDomain" + +// CreateElasticsearchDomainRequest generates a request for the CreateElasticsearchDomain operation. +func (c *ElasticsearchService) CreateElasticsearchDomainRequest(input *CreateElasticsearchDomainInput) (req *request.Request, output *CreateElasticsearchDomainOutput) { + op := &request.Operation{ + Name: opCreateElasticsearchDomain, + HTTPMethod: "POST", + HTTPPath: "/2015-01-01/es/domain", + } + + if input == nil { + input = &CreateElasticsearchDomainInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateElasticsearchDomainOutput{} + req.Data = output + return +} + +// Creates a new Elasticsearch domain. For more information, see Creating Elasticsearch +// Domains (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomains" +// target="_blank) in the Amazon Elasticsearch Service Developer Guide. +func (c *ElasticsearchService) CreateElasticsearchDomain(input *CreateElasticsearchDomainInput) (*CreateElasticsearchDomainOutput, error) { + req, out := c.CreateElasticsearchDomainRequest(input) + err := req.Send() + return out, err +} + +const opDeleteElasticsearchDomain = "DeleteElasticsearchDomain" + +// DeleteElasticsearchDomainRequest generates a request for the DeleteElasticsearchDomain operation. +func (c *ElasticsearchService) DeleteElasticsearchDomainRequest(input *DeleteElasticsearchDomainInput) (req *request.Request, output *DeleteElasticsearchDomainOutput) { + op := &request.Operation{ + Name: opDeleteElasticsearchDomain, + HTTPMethod: "DELETE", + HTTPPath: "/2015-01-01/es/domain/{DomainName}", + } + + if input == nil { + input = &DeleteElasticsearchDomainInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteElasticsearchDomainOutput{} + req.Data = output + return +} + +// Permanently deletes the specified Elasticsearch domain and all of its data. +// Once a domain is deleted, it cannot be recovered. +func (c *ElasticsearchService) DeleteElasticsearchDomain(input *DeleteElasticsearchDomainInput) (*DeleteElasticsearchDomainOutput, error) { + req, out := c.DeleteElasticsearchDomainRequest(input) + err := req.Send() + return out, err +} + +const opDescribeElasticsearchDomain = "DescribeElasticsearchDomain" + +// DescribeElasticsearchDomainRequest generates a request for the DescribeElasticsearchDomain operation. +func (c *ElasticsearchService) DescribeElasticsearchDomainRequest(input *DescribeElasticsearchDomainInput) (req *request.Request, output *DescribeElasticsearchDomainOutput) { + op := &request.Operation{ + Name: opDescribeElasticsearchDomain, + HTTPMethod: "GET", + HTTPPath: "/2015-01-01/es/domain/{DomainName}", + } + + if input == nil { + input = &DescribeElasticsearchDomainInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeElasticsearchDomainOutput{} + req.Data = output + return +} + +// Returns domain configuration information about the specified Elasticsearch +// domain, including the domain ID, domain endpoint, and domain ARN. +func (c *ElasticsearchService) DescribeElasticsearchDomain(input *DescribeElasticsearchDomainInput) (*DescribeElasticsearchDomainOutput, error) { + req, out := c.DescribeElasticsearchDomainRequest(input) + err := req.Send() + return out, err +} + +const opDescribeElasticsearchDomainConfig = "DescribeElasticsearchDomainConfig" + +// DescribeElasticsearchDomainConfigRequest generates a request for the DescribeElasticsearchDomainConfig operation. +func (c *ElasticsearchService) DescribeElasticsearchDomainConfigRequest(input *DescribeElasticsearchDomainConfigInput) (req *request.Request, output *DescribeElasticsearchDomainConfigOutput) { + op := &request.Operation{ + Name: opDescribeElasticsearchDomainConfig, + HTTPMethod: "GET", + HTTPPath: "/2015-01-01/es/domain/{DomainName}/config", + } + + if input == nil { + input = &DescribeElasticsearchDomainConfigInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeElasticsearchDomainConfigOutput{} + req.Data = output + return +} + +// Provides cluster configuration information about the specified Elasticsearch +// domain, such as the state, creation date, update version, and update date +// for cluster options. +func (c *ElasticsearchService) DescribeElasticsearchDomainConfig(input *DescribeElasticsearchDomainConfigInput) (*DescribeElasticsearchDomainConfigOutput, error) { + req, out := c.DescribeElasticsearchDomainConfigRequest(input) + err := req.Send() + return out, err +} + +const opDescribeElasticsearchDomains = "DescribeElasticsearchDomains" + +// DescribeElasticsearchDomainsRequest generates a request for the DescribeElasticsearchDomains operation. +func (c *ElasticsearchService) DescribeElasticsearchDomainsRequest(input *DescribeElasticsearchDomainsInput) (req *request.Request, output *DescribeElasticsearchDomainsOutput) { + op := &request.Operation{ + Name: opDescribeElasticsearchDomains, + HTTPMethod: "POST", + HTTPPath: "/2015-01-01/es/domain-info", + } + + if input == nil { + input = &DescribeElasticsearchDomainsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeElasticsearchDomainsOutput{} + req.Data = output + return +} + +// Returns domain configuration information about the specified Elasticsearch +// domains, including the domain ID, domain endpoint, and domain ARN. +func (c *ElasticsearchService) DescribeElasticsearchDomains(input *DescribeElasticsearchDomainsInput) (*DescribeElasticsearchDomainsOutput, error) { + req, out := c.DescribeElasticsearchDomainsRequest(input) + err := req.Send() + return out, err +} + +const opListDomainNames = "ListDomainNames" + +// ListDomainNamesRequest generates a request for the ListDomainNames operation. +func (c *ElasticsearchService) ListDomainNamesRequest(input *ListDomainNamesInput) (req *request.Request, output *ListDomainNamesOutput) { + op := &request.Operation{ + Name: opListDomainNames, + HTTPMethod: "GET", + HTTPPath: "/2015-01-01/domain", + } + + if input == nil { + input = &ListDomainNamesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDomainNamesOutput{} + req.Data = output + return +} + +// Returns the name of all Elasticsearch domains owned by the current user's +// account. +func (c *ElasticsearchService) ListDomainNames(input *ListDomainNamesInput) (*ListDomainNamesOutput, error) { + req, out := c.ListDomainNamesRequest(input) + err := req.Send() + return out, err +} + +const opListTags = "ListTags" + +// ListTagsRequest generates a request for the ListTags operation. +func (c *ElasticsearchService) ListTagsRequest(input *ListTagsInput) (req *request.Request, output *ListTagsOutput) { + op := &request.Operation{ + Name: opListTags, + HTTPMethod: "GET", + HTTPPath: "/2015-01-01/tags/", + } + + if input == nil { + input = &ListTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsOutput{} + req.Data = output + return +} + +// Returns all tags for the given Elasticsearch domain. +func (c *ElasticsearchService) ListTags(input *ListTagsInput) (*ListTagsOutput, error) { + req, out := c.ListTagsRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTags = "RemoveTags" + +// RemoveTagsRequest generates a request for the RemoveTags operation. +func (c *ElasticsearchService) RemoveTagsRequest(input *RemoveTagsInput) (req *request.Request, output *RemoveTagsOutput) { + op := &request.Operation{ + Name: opRemoveTags, + HTTPMethod: "POST", + HTTPPath: "/2015-01-01/tags-removal", + } + + if input == nil { + input = &RemoveTagsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RemoveTagsOutput{} + req.Data = output + return +} + +// Removes the specified set of tags from the specified Elasticsearch domain. +func (c *ElasticsearchService) RemoveTags(input *RemoveTagsInput) (*RemoveTagsOutput, error) { + req, out := c.RemoveTagsRequest(input) + err := req.Send() + return out, err +} + +const opUpdateElasticsearchDomainConfig = "UpdateElasticsearchDomainConfig" + +// UpdateElasticsearchDomainConfigRequest generates a request for the UpdateElasticsearchDomainConfig operation. +func (c *ElasticsearchService) UpdateElasticsearchDomainConfigRequest(input *UpdateElasticsearchDomainConfigInput) (req *request.Request, output *UpdateElasticsearchDomainConfigOutput) { + op := &request.Operation{ + Name: opUpdateElasticsearchDomainConfig, + HTTPMethod: "POST", + HTTPPath: "/2015-01-01/es/domain/{DomainName}/config", + } + + if input == nil { + input = &UpdateElasticsearchDomainConfigInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateElasticsearchDomainConfigOutput{} + req.Data = output + return +} + +// Modifies the cluster configuration of the specified Elasticsearch domain, +// setting as setting the instance type and the number of instances. +func (c *ElasticsearchService) UpdateElasticsearchDomainConfig(input *UpdateElasticsearchDomainConfigInput) (*UpdateElasticsearchDomainConfigOutput, error) { + req, out := c.UpdateElasticsearchDomainConfigRequest(input) + err := req.Send() + return out, err +} + +// The configured access rules for the domain's document and search endpoints, +// and the current status of those rules. +type AccessPoliciesStatus struct { + _ struct{} `type:"structure"` + + // The access policy configured for the Elasticsearch domain. Access policies + // may be resource-based, IP-based, or IAM-based. See Configuring Access Policies + // (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-access-policies" + // target="_blank)for more information. + Options *string `type:"string" required:"true"` + + // The status of the access policy for the Elasticsearch domain. See OptionStatus + // for the status information that's included. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AccessPoliciesStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessPoliciesStatus) GoString() string { + return s.String() +} + +// Container for the parameters to the AddTags operation. Specify the tags that +// you want to attach to the Elasticsearch domain. +type AddTagsInput struct { + _ struct{} `type:"structure"` + + // Specify the ARN for which you want to add the tags. + ARN *string `type:"string" required:"true"` + + // List of Tag that need to be added for the Elasticsearch domain. + TagList []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsInput) GoString() string { + return s.String() +} + +type AddTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsOutput) GoString() string { + return s.String() +} + +// Status of the advanced options for the specified Elasticsearch domain. Currently, +// the following advanced options are available: +// +// Option to allow references to indices in an HTTP request body. Must be +// false when configuring access to individual sub-resources. By default, the +// value is true. See Configuration Advanced Options (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options" +// target="_blank) for more information. Option to specify the percentage of +// heap space that is allocated to field data. By default, this setting is unbounded. +// For more information, see Configuring Advanced Options (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options). +type AdvancedOptionsStatus struct { + _ struct{} `type:"structure"` + + // Specifies the status of advanced options for the specified Elasticsearch + // domain. + Options map[string]*string `type:"map" required:"true"` + + // Specifies the status of OptionStatus for advanced options for the specified + // Elasticsearch domain. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AdvancedOptionsStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdvancedOptionsStatus) GoString() string { + return s.String() +} + +type CreateElasticsearchDomainInput struct { + _ struct{} `type:"structure"` + + // IAM access policy as a JSON-formatted string. + AccessPolicies *string `type:"string"` + + // Option to allow references to indices in an HTTP request body. Must be false + // when configuring access to individual sub-resources. By default, the value + // is true. See Configuration Advanced Options (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options" + // target="_blank) for more information. + AdvancedOptions map[string]*string `type:"map"` + + // The name of the Elasticsearch domain that you are creating. Domain names + // are unique across the domains owned by an account within an AWS region. Domain + // names must start with a letter or number and can contain the following characters: + // a-z (lowercase), 0-9, and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` + + // Options to enable, disable and specify the type and size of EBS storage volumes. + EBSOptions *EBSOptions `type:"structure"` + + // Configuration options for an Elasticsearch domain. Specifies the instance + // type and number of instances in the domain cluster. + ElasticsearchClusterConfig *ElasticsearchClusterConfig `type:"structure"` + + // Option to set time, in UTC format, of the daily automated snapshot. Default + // value is 0 hours. + SnapshotOptions *SnapshotOptions `type:"structure"` +} + +// String returns the string representation +func (s CreateElasticsearchDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateElasticsearchDomainInput) GoString() string { + return s.String() +} + +// The result of a CreateElasticsearchDomain operation. Contains the status +// of the newly created Elasticsearch domain. +type CreateElasticsearchDomainOutput struct { + _ struct{} `type:"structure"` + + // The status of the newly created Elasticsearch domain. + DomainStatus *ElasticsearchDomainStatus `type:"structure"` +} + +// String returns the string representation +func (s CreateElasticsearchDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateElasticsearchDomainOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DeleteElasticsearchDomain operation. +// Specifies the name of the Elasticsearch domain that you want to delete. +type DeleteElasticsearchDomainInput struct { + _ struct{} `type:"structure"` + + // The name of the Elasticsearch domain that you want to permanently delete. + DomainName *string `location:"uri" locationName:"DomainName" min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteElasticsearchDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteElasticsearchDomainInput) GoString() string { + return s.String() +} + +// The result of a DeleteElasticsearchDomain request. Contains the status of +// the pending deletion, or no status if the domain and all of its resources +// have been deleted. +type DeleteElasticsearchDomainOutput struct { + _ struct{} `type:"structure"` + + // The status of the Elasticsearch domain being deleted. + DomainStatus *ElasticsearchDomainStatus `type:"structure"` +} + +// String returns the string representation +func (s DeleteElasticsearchDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteElasticsearchDomainOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeElasticsearchDomainConfig operation. +// Specifies the domain name for which you want configuration information. +type DescribeElasticsearchDomainConfigInput struct { + _ struct{} `type:"structure"` + + // The Elasticsearch domain that you want to get information about. + DomainName *string `location:"uri" locationName:"DomainName" min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeElasticsearchDomainConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticsearchDomainConfigInput) GoString() string { + return s.String() +} + +// The result of a DescribeElasticsearchDomainConfig request. Contains the configuration +// information of the requested domain. +type DescribeElasticsearchDomainConfigOutput struct { + _ struct{} `type:"structure"` + + // The configuration information of the domain requested in the DescribeElasticsearchDomainConfig + // request. + DomainConfig *ElasticsearchDomainConfig `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeElasticsearchDomainConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticsearchDomainConfigOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeElasticsearchDomain operation. +type DescribeElasticsearchDomainInput struct { + _ struct{} `type:"structure"` + + // The name of the Elasticsearch domain for which you want information. + DomainName *string `location:"uri" locationName:"DomainName" min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeElasticsearchDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticsearchDomainInput) GoString() string { + return s.String() +} + +// The result of a DescribeElasticsearchDomain request. Contains the status +// of the domain specified in the request. +type DescribeElasticsearchDomainOutput struct { + _ struct{} `type:"structure"` + + // The current status of the Elasticsearch domain. + DomainStatus *ElasticsearchDomainStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeElasticsearchDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticsearchDomainOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the DescribeElasticsearchDomains operation. +// By default, the API returns the status of all Elasticsearch domains. +type DescribeElasticsearchDomainsInput struct { + _ struct{} `type:"structure"` + + // The Elasticsearch domains for which you want information. + DomainNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeElasticsearchDomainsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticsearchDomainsInput) GoString() string { + return s.String() +} + +// The result of a DescribeElasticsearchDomains request. Contains the status +// of the specified domains or all domains owned by the account. +type DescribeElasticsearchDomainsOutput struct { + _ struct{} `type:"structure"` + + // The status of the domains requested in the DescribeElasticsearchDomains request. + DomainStatusList []*ElasticsearchDomainStatus `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeElasticsearchDomainsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticsearchDomainsOutput) GoString() string { + return s.String() +} + +type DomainInfo struct { + _ struct{} `type:"structure"` + + // Specifies the DomainName. + DomainName *string `min:"3" type:"string"` +} + +// String returns the string representation +func (s DomainInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainInfo) GoString() string { + return s.String() +} + +// Options to enable, disable, and specify the properties of EBS storage volumes. +// For more information, see Configuring EBS-based Storage (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-ebs" +// target="_blank). +type EBSOptions struct { + _ struct{} `type:"structure"` + + // Specifies whether EBS-based storage is enabled. + EBSEnabled *bool `type:"boolean"` + + // Specifies the IOPD for a Provisioned IOPS EBS volume (SSD). + Iops *int64 `type:"integer"` + + // Integer to specify the size of an EBS volume. + VolumeSize *int64 `type:"integer"` + + // Specifies the volume type for EBS-based storage. + VolumeType *string `type:"string" enum:"VolumeType"` +} + +// String returns the string representation +func (s EBSOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EBSOptions) GoString() string { + return s.String() +} + +// Status of the EBS options for the specified Elasticsearch domain. +type EBSOptionsStatus struct { + _ struct{} `type:"structure"` + + // Specifies the EBS options for the specified Elasticsearch domain. + Options *EBSOptions `type:"structure" required:"true"` + + // Specifies the status of the EBS options for the specified Elasticsearch domain. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s EBSOptionsStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EBSOptionsStatus) GoString() string { + return s.String() +} + +// Specifies the configuration for the domain cluster, such as the type and +// number of instances. +type ElasticsearchClusterConfig struct { + _ struct{} `type:"structure"` + + // Total number of dedicated master nodes, active and on standby, for the cluster. + DedicatedMasterCount *int64 `type:"integer"` + + // A boolean value to indicate whether a dedicated master node is enabled. See + // About Dedicated Master Nodes (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains.html#es-managedomains-dedicatedmasternodes" + // target="_blank) for more information. + DedicatedMasterEnabled *bool `type:"boolean"` + + // The instance type for a dedicated master node. + DedicatedMasterType *string `type:"string" enum:"ESPartitionInstanceType"` + + // The number of instances in the specified domain cluster. + InstanceCount *int64 `type:"integer"` + + // The instance type for an Elasticsearch cluster. + InstanceType *string `type:"string" enum:"ESPartitionInstanceType"` + + // A boolean value to indicate whether zone awareness is enabled. See About + // Zone Awareness (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains.html#es-managedomains-zoneawareness" + // target="_blank) for more information. + ZoneAwarenessEnabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s ElasticsearchClusterConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticsearchClusterConfig) GoString() string { + return s.String() +} + +// Specifies the configuration status for the specified Elasticsearch domain. +type ElasticsearchClusterConfigStatus struct { + _ struct{} `type:"structure"` + + // Specifies the cluster configuration for the specified Elasticsearch domain. + Options *ElasticsearchClusterConfig `type:"structure" required:"true"` + + // Specifies the status of the configuration for the specified Elasticsearch + // domain. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ElasticsearchClusterConfigStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticsearchClusterConfigStatus) GoString() string { + return s.String() +} + +// The configuration of an Elasticsearch domain. +type ElasticsearchDomainConfig struct { + _ struct{} `type:"structure"` + + // IAM access policy as a JSON-formatted string. + AccessPolicies *AccessPoliciesStatus `type:"structure"` + + // Specifies the AdvancedOptions for the domain. See Configuring Advanced Options + // (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options" + // target="_blank) for more information. + AdvancedOptions *AdvancedOptionsStatus `type:"structure"` + + // Specifies the EBSOptions for the Elasticsearch domain. + EBSOptions *EBSOptionsStatus `type:"structure"` + + // Specifies the ElasticsearchClusterConfig for the Elasticsearch domain. + ElasticsearchClusterConfig *ElasticsearchClusterConfigStatus `type:"structure"` + + // Specifies the SnapshotOptions for the Elasticsearch domain. + SnapshotOptions *SnapshotOptionsStatus `type:"structure"` +} + +// String returns the string representation +func (s ElasticsearchDomainConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticsearchDomainConfig) GoString() string { + return s.String() +} + +// The current status of an Elasticsearch domain. +type ElasticsearchDomainStatus struct { + _ struct{} `type:"structure"` + + // The Amazon resource name (ARN) of an Elasticsearch domain. See Identifiers + // for IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/index.html?Using_Identifiers.html" + // target="_blank) in Using AWS Identity and Access Management for more information. + ARN *string `type:"string" required:"true"` + + // IAM access policy as a JSON-formatted string. + AccessPolicies *string `type:"string"` + + // Specifies the status of the AdvancedOptions + AdvancedOptions map[string]*string `type:"map"` + + // The domain creation status. True if the creation of an Elasticsearch domain + // is complete. False if domain creation is still in progress. + Created *bool `type:"boolean"` + + // The domain deletion status. True if a delete request has been received for + // the domain but resource cleanup is still in progress. False if the domain + // has not been deleted. Once domain deletion is complete, the status of the + // domain is no longer returned. + Deleted *bool `type:"boolean"` + + // The unique identifier for the specified Elasticsearch domain. + DomainId *string `min:"1" type:"string" required:"true"` + + // The name of an Elasticsearch domain. Domain names are unique across the domains + // owned by an account within an AWS region. Domain names start with a letter + // or number and can contain the following characters: a-z (lowercase), 0-9, + // and - (hyphen). + DomainName *string `min:"3" type:"string" required:"true"` + + // The EBSOptions for the specified domain. See Configuring EBS-based Storage + // (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-ebs" + // target="_blank) for more information. + EBSOptions *EBSOptions `type:"structure"` + + // The type and number of instances in the domain cluster. + ElasticsearchClusterConfig *ElasticsearchClusterConfig `type:"structure" required:"true"` + + // The Elasticsearch domain endpoint that you use to submit index and search + // requests. + Endpoint *string `type:"string"` + + // The status of the Elasticsearch domain configuration. True if Amazon Elasticsearch + // Service is processing configuration changes. False if the configuration is + // active. + Processing *bool `type:"boolean"` + + // Specifies the status of the SnapshotOptions + SnapshotOptions *SnapshotOptions `type:"structure"` +} + +// String returns the string representation +func (s ElasticsearchDomainStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticsearchDomainStatus) GoString() string { + return s.String() +} + +type ListDomainNamesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListDomainNamesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDomainNamesInput) GoString() string { + return s.String() +} + +// The result of a ListDomainNames operation. Contains the names of all Elasticsearch +// domains owned by this account. +type ListDomainNamesOutput struct { + _ struct{} `type:"structure"` + + // List of Elasticsearch domain names. + DomainNames []*DomainInfo `type:"list"` +} + +// String returns the string representation +func (s ListDomainNamesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDomainNamesOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the ListTags operation. Specify the ARN for +// the Elasticsearch domain to which the tags are attached that you want to +// view are attached. +type ListTagsInput struct { + _ struct{} `type:"structure"` + + // Specify the ARN for the Elasticsearch domain to which the tags are attached + // that you want to view. + ARN *string `location:"querystring" locationName:"arn" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsInput) GoString() string { + return s.String() +} + +// The result of a ListTags operation. Contains tags for all requested Elasticsearch +// domains. +type ListTagsOutput struct { + _ struct{} `type:"structure"` + + // List of Tag for the requested Elasticsearch domain. + TagList []*Tag `type:"list"` +} + +// String returns the string representation +func (s ListTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsOutput) GoString() string { + return s.String() +} + +// Provides the current status of the entity. +type OptionStatus struct { + _ struct{} `type:"structure"` + + // Timestamp which tells the creation date for the entity. + CreationDate *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // Indicates whether the Elasticsearch domain is being deleted. + PendingDeletion *bool `type:"boolean"` + + // Provides the OptionState for the Elasticsearch domain. + State *string `type:"string" required:"true" enum:"OptionState"` + + // Timestamp which tells the last updated time for the entity. + UpdateDate *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // Specifies the latest version for the entity. + UpdateVersion *int64 `type:"integer"` +} + +// String returns the string representation +func (s OptionStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionStatus) GoString() string { + return s.String() +} + +// Container for the parameters to the RemoveTags operation. Specify the ARN +// for the Elasticsearch domain from which you want to remove the specified +// TagKey. +type RemoveTagsInput struct { + _ struct{} `type:"structure"` + + // Specifies the ARN for the Elasticsearch domain from which you want to delete + // the specified tags. + ARN *string `type:"string" required:"true"` + + // Specifies the TagKey list which you want to remove from the Elasticsearch + // domain. + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsInput) GoString() string { + return s.String() +} + +type RemoveTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsOutput) GoString() string { + return s.String() +} + +// Specifies the time, in UTC format, when the service takes a daily automated +// snapshot of the specified Elasticsearch domain. Default value is 0 hours. +type SnapshotOptions struct { + _ struct{} `type:"structure"` + + // Specifies the time, in UTC format, when the service takes a daily automated + // snapshot of the specified Elasticsearch domain. Default value is 0 hours. + AutomatedSnapshotStartHour *int64 `type:"integer"` +} + +// String returns the string representation +func (s SnapshotOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotOptions) GoString() string { + return s.String() +} + +// Status of a daily automated snapshot. +type SnapshotOptionsStatus struct { + _ struct{} `type:"structure"` + + // Specifies the daily snapshot options specified for the Elasticsearch domain. + Options *SnapshotOptions `type:"structure" required:"true"` + + // Specifies the status of a daily automated snapshot. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s SnapshotOptionsStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotOptionsStatus) GoString() string { + return s.String() +} + +// Specifies a key value pair for a resource tag. +type Tag struct { + _ struct{} `type:"structure"` + + // Specifies the TagKey, the name of the tag. Tag keys must be unique for the + // Elasticsearch domain to which they are attached. + Key *string `min:"1" type:"string" required:"true"` + + // Specifies the TagValue, the value assigned to the corresponding tag key. + // Tag values can be null and do not have to be unique in a tag set. For example, + // you can have a key value pair in a tag set of project : Trinity and cost-center + // : Trinity + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Container for the parameters to the UpdateElasticsearchDomain operation. +// Specifies the type and number of instances in the domain cluster. +type UpdateElasticsearchDomainConfigInput struct { + _ struct{} `type:"structure"` + + // IAM access policy as a JSON-formatted string. + AccessPolicies *string `type:"string"` + + // Modifies the advanced option to allow references to indices in an HTTP request + // body. Must be false when configuring access to individual sub-resources. + // By default, the value is true. See Configuration Advanced Options (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options" + // target="_blank) for more information. + AdvancedOptions map[string]*string `type:"map"` + + // The name of the Elasticsearch domain that you are updating. + DomainName *string `location:"uri" locationName:"DomainName" min:"3" type:"string" required:"true"` + + // Specify the type and size of the EBS volume that you want to use. + EBSOptions *EBSOptions `type:"structure"` + + // The type and number of instances to instantiate for the domain cluster. + ElasticsearchClusterConfig *ElasticsearchClusterConfig `type:"structure"` + + // Option to set the time, in UTC format, for the daily automated snapshot. + // Default value is 0 hours. + SnapshotOptions *SnapshotOptions `type:"structure"` +} + +// String returns the string representation +func (s UpdateElasticsearchDomainConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateElasticsearchDomainConfigInput) GoString() string { + return s.String() +} + +// The result of an UpdateElasticsearchDomain request. Contains the status of +// the Elasticsearch domain being updated. +type UpdateElasticsearchDomainConfigOutput struct { + _ struct{} `type:"structure"` + + // The status of the updated Elasticsearch domain. + DomainConfig *ElasticsearchDomainConfig `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateElasticsearchDomainConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateElasticsearchDomainConfigOutput) GoString() string { + return s.String() +} + +const ( + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeM3MediumElasticsearch = "m3.medium.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeM3LargeElasticsearch = "m3.large.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeM3XlargeElasticsearch = "m3.xlarge.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeM32xlargeElasticsearch = "m3.2xlarge.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeT2MicroElasticsearch = "t2.micro.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeT2SmallElasticsearch = "t2.small.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeT2MediumElasticsearch = "t2.medium.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeR3LargeElasticsearch = "r3.large.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeR3XlargeElasticsearch = "r3.xlarge.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeR32xlargeElasticsearch = "r3.2xlarge.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeR34xlargeElasticsearch = "r3.4xlarge.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeR38xlargeElasticsearch = "r3.8xlarge.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeI2XlargeElasticsearch = "i2.xlarge.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeI22xlargeElasticsearch = "i2.2xlarge.elasticsearch" +) + +// The state of a requested change. One of the following: +// +// Processing: The request change is still in-process. Active: The request +// change is processed and deployed to the Elasticsearch domain. +const ( + // @enum OptionState + OptionStateRequiresIndexDocuments = "RequiresIndexDocuments" + // @enum OptionState + OptionStateProcessing = "Processing" + // @enum OptionState + OptionStateActive = "Active" +) + +// The type of EBS volume, standard, gp2, or io1. See Configuring EBS-based +// Storage (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-ebs" +// target="_blank)for more information. +const ( + // @enum VolumeType + VolumeTypeStandard = "standard" + // @enum VolumeType + VolumeTypeGp2 = "gp2" + // @enum VolumeType + VolumeTypeIo1 = "io1" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticsearchservice/elasticsearchserviceiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticsearchservice/elasticsearchserviceiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticsearchservice/elasticsearchserviceiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticsearchservice/elasticsearchserviceiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,54 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package elasticsearchserviceiface provides an interface for the Amazon Elasticsearch Service. +package elasticsearchserviceiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/elasticsearchservice" +) + +// ElasticsearchServiceAPI is the interface type for elasticsearchservice.ElasticsearchService. +type ElasticsearchServiceAPI interface { + AddTagsRequest(*elasticsearchservice.AddTagsInput) (*request.Request, *elasticsearchservice.AddTagsOutput) + + AddTags(*elasticsearchservice.AddTagsInput) (*elasticsearchservice.AddTagsOutput, error) + + CreateElasticsearchDomainRequest(*elasticsearchservice.CreateElasticsearchDomainInput) (*request.Request, *elasticsearchservice.CreateElasticsearchDomainOutput) + + CreateElasticsearchDomain(*elasticsearchservice.CreateElasticsearchDomainInput) (*elasticsearchservice.CreateElasticsearchDomainOutput, error) + + DeleteElasticsearchDomainRequest(*elasticsearchservice.DeleteElasticsearchDomainInput) (*request.Request, *elasticsearchservice.DeleteElasticsearchDomainOutput) + + DeleteElasticsearchDomain(*elasticsearchservice.DeleteElasticsearchDomainInput) (*elasticsearchservice.DeleteElasticsearchDomainOutput, error) + + DescribeElasticsearchDomainRequest(*elasticsearchservice.DescribeElasticsearchDomainInput) (*request.Request, *elasticsearchservice.DescribeElasticsearchDomainOutput) + + DescribeElasticsearchDomain(*elasticsearchservice.DescribeElasticsearchDomainInput) (*elasticsearchservice.DescribeElasticsearchDomainOutput, error) + + DescribeElasticsearchDomainConfigRequest(*elasticsearchservice.DescribeElasticsearchDomainConfigInput) (*request.Request, *elasticsearchservice.DescribeElasticsearchDomainConfigOutput) + + DescribeElasticsearchDomainConfig(*elasticsearchservice.DescribeElasticsearchDomainConfigInput) (*elasticsearchservice.DescribeElasticsearchDomainConfigOutput, error) + + DescribeElasticsearchDomainsRequest(*elasticsearchservice.DescribeElasticsearchDomainsInput) (*request.Request, *elasticsearchservice.DescribeElasticsearchDomainsOutput) + + DescribeElasticsearchDomains(*elasticsearchservice.DescribeElasticsearchDomainsInput) (*elasticsearchservice.DescribeElasticsearchDomainsOutput, error) + + ListDomainNamesRequest(*elasticsearchservice.ListDomainNamesInput) (*request.Request, *elasticsearchservice.ListDomainNamesOutput) + + ListDomainNames(*elasticsearchservice.ListDomainNamesInput) (*elasticsearchservice.ListDomainNamesOutput, error) + + ListTagsRequest(*elasticsearchservice.ListTagsInput) (*request.Request, *elasticsearchservice.ListTagsOutput) + + ListTags(*elasticsearchservice.ListTagsInput) (*elasticsearchservice.ListTagsOutput, error) + + RemoveTagsRequest(*elasticsearchservice.RemoveTagsInput) (*request.Request, *elasticsearchservice.RemoveTagsOutput) + + RemoveTags(*elasticsearchservice.RemoveTagsInput) (*elasticsearchservice.RemoveTagsOutput, error) + + UpdateElasticsearchDomainConfigRequest(*elasticsearchservice.UpdateElasticsearchDomainConfigInput) (*request.Request, *elasticsearchservice.UpdateElasticsearchDomainConfigOutput) + + UpdateElasticsearchDomainConfig(*elasticsearchservice.UpdateElasticsearchDomainConfigInput) (*elasticsearchservice.UpdateElasticsearchDomainConfigOutput, error) +} + +var _ ElasticsearchServiceAPI = (*elasticsearchservice.ElasticsearchService)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticsearchservice/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticsearchservice/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticsearchservice/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticsearchservice/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,262 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elasticsearchservice_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/elasticsearchservice" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleElasticsearchService_AddTags() { + svc := elasticsearchservice.New(session.New()) + + params := &elasticsearchservice.AddTagsInput{ + ARN: aws.String("ARN"), // Required + TagList: []*elasticsearchservice.Tag{ // Required + { // Required + Key: aws.String("TagKey"), // Required + Value: aws.String("TagValue"), // Required + }, + // More values... + }, + } + resp, err := svc.AddTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticsearchService_CreateElasticsearchDomain() { + svc := elasticsearchservice.New(session.New()) + + params := &elasticsearchservice.CreateElasticsearchDomainInput{ + DomainName: aws.String("DomainName"), // Required + AccessPolicies: aws.String("PolicyDocument"), + AdvancedOptions: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + EBSOptions: &elasticsearchservice.EBSOptions{ + EBSEnabled: aws.Bool(true), + Iops: aws.Int64(1), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + ElasticsearchClusterConfig: &elasticsearchservice.ElasticsearchClusterConfig{ + DedicatedMasterCount: aws.Int64(1), + DedicatedMasterEnabled: aws.Bool(true), + DedicatedMasterType: aws.String("ESPartitionInstanceType"), + InstanceCount: aws.Int64(1), + InstanceType: aws.String("ESPartitionInstanceType"), + ZoneAwarenessEnabled: aws.Bool(true), + }, + SnapshotOptions: &elasticsearchservice.SnapshotOptions{ + AutomatedSnapshotStartHour: aws.Int64(1), + }, + } + resp, err := svc.CreateElasticsearchDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticsearchService_DeleteElasticsearchDomain() { + svc := elasticsearchservice.New(session.New()) + + params := &elasticsearchservice.DeleteElasticsearchDomainInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.DeleteElasticsearchDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticsearchService_DescribeElasticsearchDomain() { + svc := elasticsearchservice.New(session.New()) + + params := &elasticsearchservice.DescribeElasticsearchDomainInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.DescribeElasticsearchDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticsearchService_DescribeElasticsearchDomainConfig() { + svc := elasticsearchservice.New(session.New()) + + params := &elasticsearchservice.DescribeElasticsearchDomainConfigInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.DescribeElasticsearchDomainConfig(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticsearchService_DescribeElasticsearchDomains() { + svc := elasticsearchservice.New(session.New()) + + params := &elasticsearchservice.DescribeElasticsearchDomainsInput{ + DomainNames: []*string{ // Required + aws.String("DomainName"), // Required + // More values... + }, + } + resp, err := svc.DescribeElasticsearchDomains(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticsearchService_ListDomainNames() { + svc := elasticsearchservice.New(session.New()) + + var params *elasticsearchservice.ListDomainNamesInput + resp, err := svc.ListDomainNames(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticsearchService_ListTags() { + svc := elasticsearchservice.New(session.New()) + + params := &elasticsearchservice.ListTagsInput{ + ARN: aws.String("ARN"), // Required + } + resp, err := svc.ListTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticsearchService_RemoveTags() { + svc := elasticsearchservice.New(session.New()) + + params := &elasticsearchservice.RemoveTagsInput{ + ARN: aws.String("ARN"), // Required + TagKeys: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.RemoveTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticsearchService_UpdateElasticsearchDomainConfig() { + svc := elasticsearchservice.New(session.New()) + + params := &elasticsearchservice.UpdateElasticsearchDomainConfigInput{ + DomainName: aws.String("DomainName"), // Required + AccessPolicies: aws.String("PolicyDocument"), + AdvancedOptions: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + EBSOptions: &elasticsearchservice.EBSOptions{ + EBSEnabled: aws.Bool(true), + Iops: aws.Int64(1), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + ElasticsearchClusterConfig: &elasticsearchservice.ElasticsearchClusterConfig{ + DedicatedMasterCount: aws.Int64(1), + DedicatedMasterEnabled: aws.Bool(true), + DedicatedMasterType: aws.String("ESPartitionInstanceType"), + InstanceCount: aws.Int64(1), + InstanceType: aws.String("ESPartitionInstanceType"), + ZoneAwarenessEnabled: aws.Bool(true), + }, + SnapshotOptions: &elasticsearchservice.SnapshotOptions{ + AutomatedSnapshotStartHour: aws.Int64(1), + }, + } + resp, err := svc.UpdateElasticsearchDomainConfig(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,92 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elasticsearchservice + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restjson" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Use the Amazon Elasticsearch configuration API to create, configure, and +// manage Elasticsearch domains. +// +// The endpoint for configuration service requests is region-specific: es.region.amazonaws.com. +// For example, es.us-east-1.amazonaws.com. For a current list of supported +// regions and endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#cloudsearch_region" +// target="_blank). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type ElasticsearchService struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "es" + +// New creates a new instance of the ElasticsearchService client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ElasticsearchService client from just a session. +// svc := elasticsearchservice.New(mySession) +// +// // Create a ElasticsearchService client with additional configuration +// svc := elasticsearchservice.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ElasticsearchService { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *ElasticsearchService { + svc := &ElasticsearchService{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ElasticsearchService operation and runs any +// custom request initialization. +func (c *ElasticsearchService) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elastictranscoder/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elastictranscoder/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elastictranscoder/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elastictranscoder/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,3997 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package elastictranscoder provides a client for Amazon Elastic Transcoder. +package elastictranscoder + +import ( + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opCancelJob = "CancelJob" + +// CancelJobRequest generates a request for the CancelJob operation. +func (c *ElasticTranscoder) CancelJobRequest(input *CancelJobInput) (req *request.Request, output *CancelJobOutput) { + op := &request.Operation{ + Name: opCancelJob, + HTTPMethod: "DELETE", + HTTPPath: "/2012-09-25/jobs/{Id}", + } + + if input == nil { + input = &CancelJobInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelJobOutput{} + req.Data = output + return +} + +// The CancelJob operation cancels an unfinished job. +// +// You can only cancel a job that has a status of Submitted. To prevent a pipeline +// from starting to process a job while you're getting the job identifier, use +// UpdatePipelineStatus to temporarily pause the pipeline. +func (c *ElasticTranscoder) CancelJob(input *CancelJobInput) (*CancelJobOutput, error) { + req, out := c.CancelJobRequest(input) + err := req.Send() + return out, err +} + +const opCreateJob = "CreateJob" + +// CreateJobRequest generates a request for the CreateJob operation. +func (c *ElasticTranscoder) CreateJobRequest(input *CreateJobInput) (req *request.Request, output *CreateJobResponse) { + op := &request.Operation{ + Name: opCreateJob, + HTTPMethod: "POST", + HTTPPath: "/2012-09-25/jobs", + } + + if input == nil { + input = &CreateJobInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateJobResponse{} + req.Data = output + return +} + +// When you create a job, Elastic Transcoder returns JSON data that includes +// the values that you specified plus information about the job that is created. +// +// If you have specified more than one output for your jobs (for example, one +// output for the Kindle Fire and another output for the Apple iPhone 4s), you +// currently must use the Elastic Transcoder API to list the jobs (as opposed +// to the AWS Console). +func (c *ElasticTranscoder) CreateJob(input *CreateJobInput) (*CreateJobResponse, error) { + req, out := c.CreateJobRequest(input) + err := req.Send() + return out, err +} + +const opCreatePipeline = "CreatePipeline" + +// CreatePipelineRequest generates a request for the CreatePipeline operation. +func (c *ElasticTranscoder) CreatePipelineRequest(input *CreatePipelineInput) (req *request.Request, output *CreatePipelineOutput) { + op := &request.Operation{ + Name: opCreatePipeline, + HTTPMethod: "POST", + HTTPPath: "/2012-09-25/pipelines", + } + + if input == nil { + input = &CreatePipelineInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePipelineOutput{} + req.Data = output + return +} + +// The CreatePipeline operation creates a pipeline with settings that you specify. +func (c *ElasticTranscoder) CreatePipeline(input *CreatePipelineInput) (*CreatePipelineOutput, error) { + req, out := c.CreatePipelineRequest(input) + err := req.Send() + return out, err +} + +const opCreatePreset = "CreatePreset" + +// CreatePresetRequest generates a request for the CreatePreset operation. +func (c *ElasticTranscoder) CreatePresetRequest(input *CreatePresetInput) (req *request.Request, output *CreatePresetOutput) { + op := &request.Operation{ + Name: opCreatePreset, + HTTPMethod: "POST", + HTTPPath: "/2012-09-25/presets", + } + + if input == nil { + input = &CreatePresetInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePresetOutput{} + req.Data = output + return +} + +// The CreatePreset operation creates a preset with settings that you specify. +// +// Elastic Transcoder checks the CreatePreset settings to ensure that they +// meet Elastic Transcoder requirements and to determine whether they comply +// with H.264 standards. If your settings are not valid for Elastic Transcoder, +// Elastic Transcoder returns an HTTP 400 response (ValidationException) and +// does not create the preset. If the settings are valid for Elastic Transcoder +// but aren't strictly compliant with the H.264 standard, Elastic Transcoder +// creates the preset and returns a warning message in the response. This helps +// you determine whether your settings comply with the H.264 standard while +// giving you greater flexibility with respect to the video that Elastic Transcoder +// produces. Elastic Transcoder uses the H.264 video-compression format. For +// more information, see the International Telecommunication Union publication +// Recommendation ITU-T H.264: Advanced video coding for generic audiovisual +// services. +func (c *ElasticTranscoder) CreatePreset(input *CreatePresetInput) (*CreatePresetOutput, error) { + req, out := c.CreatePresetRequest(input) + err := req.Send() + return out, err +} + +const opDeletePipeline = "DeletePipeline" + +// DeletePipelineRequest generates a request for the DeletePipeline operation. +func (c *ElasticTranscoder) DeletePipelineRequest(input *DeletePipelineInput) (req *request.Request, output *DeletePipelineOutput) { + op := &request.Operation{ + Name: opDeletePipeline, + HTTPMethod: "DELETE", + HTTPPath: "/2012-09-25/pipelines/{Id}", + } + + if input == nil { + input = &DeletePipelineInput{} + } + + req = c.newRequest(op, input, output) + output = &DeletePipelineOutput{} + req.Data = output + return +} + +// The DeletePipeline operation removes a pipeline. +// +// You can only delete a pipeline that has never been used or that is not +// currently in use (doesn't contain any active jobs). If the pipeline is currently +// in use, DeletePipeline returns an error. +func (c *ElasticTranscoder) DeletePipeline(input *DeletePipelineInput) (*DeletePipelineOutput, error) { + req, out := c.DeletePipelineRequest(input) + err := req.Send() + return out, err +} + +const opDeletePreset = "DeletePreset" + +// DeletePresetRequest generates a request for the DeletePreset operation. +func (c *ElasticTranscoder) DeletePresetRequest(input *DeletePresetInput) (req *request.Request, output *DeletePresetOutput) { + op := &request.Operation{ + Name: opDeletePreset, + HTTPMethod: "DELETE", + HTTPPath: "/2012-09-25/presets/{Id}", + } + + if input == nil { + input = &DeletePresetInput{} + } + + req = c.newRequest(op, input, output) + output = &DeletePresetOutput{} + req.Data = output + return +} + +// The DeletePreset operation removes a preset that you've added in an AWS region. +// +// You can't delete the default presets that are included with Elastic Transcoder. +func (c *ElasticTranscoder) DeletePreset(input *DeletePresetInput) (*DeletePresetOutput, error) { + req, out := c.DeletePresetRequest(input) + err := req.Send() + return out, err +} + +const opListJobsByPipeline = "ListJobsByPipeline" + +// ListJobsByPipelineRequest generates a request for the ListJobsByPipeline operation. +func (c *ElasticTranscoder) ListJobsByPipelineRequest(input *ListJobsByPipelineInput) (req *request.Request, output *ListJobsByPipelineOutput) { + op := &request.Operation{ + Name: opListJobsByPipeline, + HTTPMethod: "GET", + HTTPPath: "/2012-09-25/jobsByPipeline/{PipelineId}", + Paginator: &request.Paginator{ + InputTokens: []string{"PageToken"}, + OutputTokens: []string{"NextPageToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListJobsByPipelineInput{} + } + + req = c.newRequest(op, input, output) + output = &ListJobsByPipelineOutput{} + req.Data = output + return +} + +// The ListJobsByPipeline operation gets a list of the jobs currently in a pipeline. +// +// Elastic Transcoder returns all of the jobs currently in the specified pipeline. +// The response body contains one element for each job that satisfies the search +// criteria. +func (c *ElasticTranscoder) ListJobsByPipeline(input *ListJobsByPipelineInput) (*ListJobsByPipelineOutput, error) { + req, out := c.ListJobsByPipelineRequest(input) + err := req.Send() + return out, err +} + +func (c *ElasticTranscoder) ListJobsByPipelinePages(input *ListJobsByPipelineInput, fn func(p *ListJobsByPipelineOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListJobsByPipelineRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListJobsByPipelineOutput), lastPage) + }) +} + +const opListJobsByStatus = "ListJobsByStatus" + +// ListJobsByStatusRequest generates a request for the ListJobsByStatus operation. +func (c *ElasticTranscoder) ListJobsByStatusRequest(input *ListJobsByStatusInput) (req *request.Request, output *ListJobsByStatusOutput) { + op := &request.Operation{ + Name: opListJobsByStatus, + HTTPMethod: "GET", + HTTPPath: "/2012-09-25/jobsByStatus/{Status}", + Paginator: &request.Paginator{ + InputTokens: []string{"PageToken"}, + OutputTokens: []string{"NextPageToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListJobsByStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &ListJobsByStatusOutput{} + req.Data = output + return +} + +// The ListJobsByStatus operation gets a list of jobs that have a specified +// status. The response body contains one element for each job that satisfies +// the search criteria. +func (c *ElasticTranscoder) ListJobsByStatus(input *ListJobsByStatusInput) (*ListJobsByStatusOutput, error) { + req, out := c.ListJobsByStatusRequest(input) + err := req.Send() + return out, err +} + +func (c *ElasticTranscoder) ListJobsByStatusPages(input *ListJobsByStatusInput, fn func(p *ListJobsByStatusOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListJobsByStatusRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListJobsByStatusOutput), lastPage) + }) +} + +const opListPipelines = "ListPipelines" + +// ListPipelinesRequest generates a request for the ListPipelines operation. +func (c *ElasticTranscoder) ListPipelinesRequest(input *ListPipelinesInput) (req *request.Request, output *ListPipelinesOutput) { + op := &request.Operation{ + Name: opListPipelines, + HTTPMethod: "GET", + HTTPPath: "/2012-09-25/pipelines", + Paginator: &request.Paginator{ + InputTokens: []string{"PageToken"}, + OutputTokens: []string{"NextPageToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListPipelinesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPipelinesOutput{} + req.Data = output + return +} + +// The ListPipelines operation gets a list of the pipelines associated with +// the current AWS account. +func (c *ElasticTranscoder) ListPipelines(input *ListPipelinesInput) (*ListPipelinesOutput, error) { + req, out := c.ListPipelinesRequest(input) + err := req.Send() + return out, err +} + +func (c *ElasticTranscoder) ListPipelinesPages(input *ListPipelinesInput, fn func(p *ListPipelinesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListPipelinesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListPipelinesOutput), lastPage) + }) +} + +const opListPresets = "ListPresets" + +// ListPresetsRequest generates a request for the ListPresets operation. +func (c *ElasticTranscoder) ListPresetsRequest(input *ListPresetsInput) (req *request.Request, output *ListPresetsOutput) { + op := &request.Operation{ + Name: opListPresets, + HTTPMethod: "GET", + HTTPPath: "/2012-09-25/presets", + Paginator: &request.Paginator{ + InputTokens: []string{"PageToken"}, + OutputTokens: []string{"NextPageToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListPresetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPresetsOutput{} + req.Data = output + return +} + +// The ListPresets operation gets a list of the default presets included with +// Elastic Transcoder and the presets that you've added in an AWS region. +func (c *ElasticTranscoder) ListPresets(input *ListPresetsInput) (*ListPresetsOutput, error) { + req, out := c.ListPresetsRequest(input) + err := req.Send() + return out, err +} + +func (c *ElasticTranscoder) ListPresetsPages(input *ListPresetsInput, fn func(p *ListPresetsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListPresetsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListPresetsOutput), lastPage) + }) +} + +const opReadJob = "ReadJob" + +// ReadJobRequest generates a request for the ReadJob operation. +func (c *ElasticTranscoder) ReadJobRequest(input *ReadJobInput) (req *request.Request, output *ReadJobOutput) { + op := &request.Operation{ + Name: opReadJob, + HTTPMethod: "GET", + HTTPPath: "/2012-09-25/jobs/{Id}", + } + + if input == nil { + input = &ReadJobInput{} + } + + req = c.newRequest(op, input, output) + output = &ReadJobOutput{} + req.Data = output + return +} + +// The ReadJob operation returns detailed information about a job. +func (c *ElasticTranscoder) ReadJob(input *ReadJobInput) (*ReadJobOutput, error) { + req, out := c.ReadJobRequest(input) + err := req.Send() + return out, err +} + +const opReadPipeline = "ReadPipeline" + +// ReadPipelineRequest generates a request for the ReadPipeline operation. +func (c *ElasticTranscoder) ReadPipelineRequest(input *ReadPipelineInput) (req *request.Request, output *ReadPipelineOutput) { + op := &request.Operation{ + Name: opReadPipeline, + HTTPMethod: "GET", + HTTPPath: "/2012-09-25/pipelines/{Id}", + } + + if input == nil { + input = &ReadPipelineInput{} + } + + req = c.newRequest(op, input, output) + output = &ReadPipelineOutput{} + req.Data = output + return +} + +// The ReadPipeline operation gets detailed information about a pipeline. +func (c *ElasticTranscoder) ReadPipeline(input *ReadPipelineInput) (*ReadPipelineOutput, error) { + req, out := c.ReadPipelineRequest(input) + err := req.Send() + return out, err +} + +const opReadPreset = "ReadPreset" + +// ReadPresetRequest generates a request for the ReadPreset operation. +func (c *ElasticTranscoder) ReadPresetRequest(input *ReadPresetInput) (req *request.Request, output *ReadPresetOutput) { + op := &request.Operation{ + Name: opReadPreset, + HTTPMethod: "GET", + HTTPPath: "/2012-09-25/presets/{Id}", + } + + if input == nil { + input = &ReadPresetInput{} + } + + req = c.newRequest(op, input, output) + output = &ReadPresetOutput{} + req.Data = output + return +} + +// The ReadPreset operation gets detailed information about a preset. +func (c *ElasticTranscoder) ReadPreset(input *ReadPresetInput) (*ReadPresetOutput, error) { + req, out := c.ReadPresetRequest(input) + err := req.Send() + return out, err +} + +const opTestRole = "TestRole" + +// TestRoleRequest generates a request for the TestRole operation. +func (c *ElasticTranscoder) TestRoleRequest(input *TestRoleInput) (req *request.Request, output *TestRoleOutput) { + op := &request.Operation{ + Name: opTestRole, + HTTPMethod: "POST", + HTTPPath: "/2012-09-25/roleTests", + } + + if input == nil { + input = &TestRoleInput{} + } + + req = c.newRequest(op, input, output) + output = &TestRoleOutput{} + req.Data = output + return +} + +// The TestRole operation tests the IAM role used to create the pipeline. +// +// The TestRole action lets you determine whether the IAM role you are using +// has sufficient permissions to let Elastic Transcoder perform tasks associated +// with the transcoding process. The action attempts to assume the specified +// IAM role, checks read access to the input and output buckets, and tries to +// send a test notification to Amazon SNS topics that you specify. +func (c *ElasticTranscoder) TestRole(input *TestRoleInput) (*TestRoleOutput, error) { + req, out := c.TestRoleRequest(input) + err := req.Send() + return out, err +} + +const opUpdatePipeline = "UpdatePipeline" + +// UpdatePipelineRequest generates a request for the UpdatePipeline operation. +func (c *ElasticTranscoder) UpdatePipelineRequest(input *UpdatePipelineInput) (req *request.Request, output *UpdatePipelineOutput) { + op := &request.Operation{ + Name: opUpdatePipeline, + HTTPMethod: "PUT", + HTTPPath: "/2012-09-25/pipelines/{Id}", + } + + if input == nil { + input = &UpdatePipelineInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdatePipelineOutput{} + req.Data = output + return +} + +// Use the UpdatePipeline operation to update settings for a pipeline. When +// you change pipeline settings, your changes take effect immediately. Jobs +// that you have already submitted and that Elastic Transcoder has not started +// to process are affected in addition to jobs that you submit after you change +// settings. +func (c *ElasticTranscoder) UpdatePipeline(input *UpdatePipelineInput) (*UpdatePipelineOutput, error) { + req, out := c.UpdatePipelineRequest(input) + err := req.Send() + return out, err +} + +const opUpdatePipelineNotifications = "UpdatePipelineNotifications" + +// UpdatePipelineNotificationsRequest generates a request for the UpdatePipelineNotifications operation. +func (c *ElasticTranscoder) UpdatePipelineNotificationsRequest(input *UpdatePipelineNotificationsInput) (req *request.Request, output *UpdatePipelineNotificationsOutput) { + op := &request.Operation{ + Name: opUpdatePipelineNotifications, + HTTPMethod: "POST", + HTTPPath: "/2012-09-25/pipelines/{Id}/notifications", + } + + if input == nil { + input = &UpdatePipelineNotificationsInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdatePipelineNotificationsOutput{} + req.Data = output + return +} + +// With the UpdatePipelineNotifications operation, you can update Amazon Simple +// Notification Service (Amazon SNS) notifications for a pipeline. +// +// When you update notifications for a pipeline, Elastic Transcoder returns +// the values that you specified in the request. +func (c *ElasticTranscoder) UpdatePipelineNotifications(input *UpdatePipelineNotificationsInput) (*UpdatePipelineNotificationsOutput, error) { + req, out := c.UpdatePipelineNotificationsRequest(input) + err := req.Send() + return out, err +} + +const opUpdatePipelineStatus = "UpdatePipelineStatus" + +// UpdatePipelineStatusRequest generates a request for the UpdatePipelineStatus operation. +func (c *ElasticTranscoder) UpdatePipelineStatusRequest(input *UpdatePipelineStatusInput) (req *request.Request, output *UpdatePipelineStatusOutput) { + op := &request.Operation{ + Name: opUpdatePipelineStatus, + HTTPMethod: "POST", + HTTPPath: "/2012-09-25/pipelines/{Id}/status", + } + + if input == nil { + input = &UpdatePipelineStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdatePipelineStatusOutput{} + req.Data = output + return +} + +// The UpdatePipelineStatus operation pauses or reactivates a pipeline, so that +// the pipeline stops or restarts the processing of jobs. +// +// Changing the pipeline status is useful if you want to cancel one or more +// jobs. You can't cancel jobs after Elastic Transcoder has started processing +// them; if you pause the pipeline to which you submitted the jobs, you have +// more time to get the job IDs for the jobs that you want to cancel, and to +// send a CancelJob request. +func (c *ElasticTranscoder) UpdatePipelineStatus(input *UpdatePipelineStatusInput) (*UpdatePipelineStatusOutput, error) { + req, out := c.UpdatePipelineStatusRequest(input) + err := req.Send() + return out, err +} + +// The file to be used as album art. There can be multiple artworks associated +// with an audio file, to a maximum of 20. +// +// To remove artwork or leave the artwork empty, you can either set Artwork +// to null, or set the Merge Policy to "Replace" and use an empty Artwork array. +// +// To pass through existing artwork unchanged, set the Merge Policy to "Prepend", +// "Append", or "Fallback", and use an empty Artwork array. +type Artwork struct { + _ struct{} `type:"structure"` + + // The format of album art, if any. Valid formats are .jpg and .png. + AlbumArtFormat *string `type:"string"` + + // The encryption settings, if any, that you want Elastic Transcoder to apply + // to your artwork. + Encryption *Encryption `type:"structure"` + + // The name of the file to be used as album art. To determine which Amazon S3 + // bucket contains the specified file, Elastic Transcoder checks the pipeline + // specified by PipelineId; the InputBucket object in that pipeline identifies + // the bucket. + // + // If the file name includes a prefix, for example, cooking/pie.jpg, include + // the prefix in the key. If the file isn't in the specified bucket, Elastic + // Transcoder returns an error. + InputKey *string `min:"1" type:"string"` + + // The maximum height of the output album art in pixels. If you specify auto, + // Elastic Transcoder uses 600 as the default value. If you specify a numeric + // value, enter an even integer between 32 and 3072, inclusive. + MaxHeight *string `type:"string"` + + // The maximum width of the output album art in pixels. If you specify auto, + // Elastic Transcoder uses 600 as the default value. If you specify a numeric + // value, enter an even integer between 32 and 4096, inclusive. + MaxWidth *string `type:"string"` + + // When you set PaddingPolicy to Pad, Elastic Transcoder may add white bars + // to the top and bottom and/or left and right sides of the output album art + // to make the total size of the output art match the values that you specified + // for MaxWidth and MaxHeight. + PaddingPolicy *string `type:"string"` + + // Specify one of the following values to control scaling of the output album + // art: + // + // Fit: Elastic Transcoder scales the output art so it matches the value + // that you specified in either MaxWidth or MaxHeight without exceeding the + // other value. Fill: Elastic Transcoder scales the output art so it matches + // the value that you specified in either MaxWidth or MaxHeight and matches + // or exceeds the other value. Elastic Transcoder centers the output art and + // then crops it in the dimension (if any) that exceeds the maximum value. + // Stretch: Elastic Transcoder stretches the output art to match the values + // that you specified for MaxWidth and MaxHeight. If the relative proportions + // of the input art and the output art are different, the output art will be + // distorted. Keep: Elastic Transcoder does not scale the output art. If either + // dimension of the input art exceeds the values that you specified for MaxWidth + // and MaxHeight, Elastic Transcoder crops the output art. ShrinkToFit: Elastic + // Transcoder scales the output art down so that its dimensions match the values + // that you specified for at least one of MaxWidth and MaxHeight without exceeding + // either value. If you specify this option, Elastic Transcoder does not scale + // the art up. ShrinkToFill Elastic Transcoder scales the output art down so + // that its dimensions match the values that you specified for at least one + // of MaxWidth and MaxHeight without dropping below either value. If you specify + // this option, Elastic Transcoder does not scale the art up. + SizingPolicy *string `type:"string"` +} + +// String returns the string representation +func (s Artwork) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Artwork) GoString() string { + return s.String() +} + +// Options associated with your audio codec. +type AudioCodecOptions struct { + _ struct{} `type:"structure"` + + // You can only choose an audio bit depth when you specify flac or pcm for the + // value of Audio:Codec. + // + // The bit depth of a sample is how many bits of information are included in + // the audio samples. The higher the bit depth, the better the audio, but the + // larger the file. + // + // Valid values are 16 and 24. + // + // The most common bit depth is 24. + BitDepth *string `type:"string"` + + // You can only choose an audio bit order when you specify pcm for the value + // of Audio:Codec. + // + // The order the bits of a PCM sample are stored in. + // + // The supported value is LittleEndian. + BitOrder *string `type:"string"` + + // You can only choose an audio profile when you specify AAC for the value of + // Audio:Codec. + // + // Specify the AAC profile for the output file. Elastic Transcoder supports + // the following profiles: + // + // auto: If you specify auto, Elastic Transcoder will select the profile + // based on the bit rate selected for the output file. AAC-LC: The most common + // AAC profile. Use for bit rates larger than 64 kbps. HE-AAC: Not supported + // on some older players and devices. Use for bit rates between 40 and 80 kbps. + // HE-AACv2: Not supported on some players and devices. Use for bit rates less + // than 48 kbps. All outputs in a Smooth playlist must have the same value + // for Profile. + // + // If you created any presets before AAC profiles were added, Elastic Transcoder + // automatically updated your presets to use AAC-LC. You can change the value + // as required. + Profile *string `type:"string"` + + // You can only choose whether an audio sample is signed when you specify pcm + // for the value of Audio:Codec. + // + // Whether audio samples are represented with negative and positive numbers + // (signed) or only positive numbers (unsigned). + // + // The supported value is Signed. + Signed *string `type:"string"` +} + +// String returns the string representation +func (s AudioCodecOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AudioCodecOptions) GoString() string { + return s.String() +} + +// Parameters required for transcoding audio. +type AudioParameters struct { + _ struct{} `type:"structure"` + + // The method of organizing audio channels and tracks. Use Audio:Channels to + // specify the number of channels in your output, and Audio:AudioPackingMode + // to specify the number of tracks and their relation to the channels. If you + // do not specify an Audio:AudioPackingMode, Elastic Transcoder uses SingleTrack. + // + // The following values are valid: + // + // SingleTrack, OneChannelPerTrack, and OneChannelPerTrackWithMosTo8Tracks + // + // When you specify SingleTrack, Elastic Transcoder creates a single track + // for your output. The track can have up to eight channels. Use SingleTrack + // for all non-mxf containers. + // + // The outputs of SingleTrack for a specific channel value and inputs are as + // follows: + // + // 0 channels with any input: Audio omitted from the output 1, 2, or auto + // channels with no audio input: Audio omitted from the output 1 channel + // with any input with audio: One track with one channel, downmixed if necessary + // 2 channels with one track with one channel: One track with two identical + // channels 2 or auto channels with two tracks with one channel each: One + // track with two channels 2 or auto channels with one track with two channels: + // One track with two channels 2 channels with one track with multiple channels: + // One track with two channels auto channels with one track with one channel: + // One track with one channel auto channels with one track with multiple channels: + // One track with multiple channels When you specify OneChannelPerTrack, Elastic + // Transcoder creates a new track for every channel in your output. Your output + // can have up to eight single-channel tracks. + // + // The outputs of OneChannelPerTrack for a specific channel value and inputs + // are as follows: + // + // 0 channels with any input: Audio omitted from the output 1, 2, or auto + // channels with no audio input: Audio omitted from the output 1 channel + // with any input with audio: One track with one channel, downmixed if necessary + // 2 channels with one track with one channel: Two tracks with one identical + // channel each 2 or auto channels with two tracks with one channel each: + // Two tracks with one channel each 2 or auto channels with one track with + // two channels: Two tracks with one channel each 2 channels with one track + // with multiple channels: Two tracks with one channel each auto channels + // with one track with one channel: One track with one channel auto channels + // with one track with multiple channels: Up to eight tracks with one channel + // each When you specify OneChannelPerTrackWithMosTo8Tracks, Elastic Transcoder + // creates eight single-channel tracks for your output. All tracks that do not + // contain audio data from an input channel are MOS, or Mit Out Sound, tracks. + // + // The outputs of OneChannelPerTrackWithMosTo8Tracks for a specific channel + // value and inputs are as follows: + // + // 0 channels with any input: Audio omitted from the output 1, 2, or auto + // channels with no audio input: Audio omitted from the output 1 channel + // with any input with audio: One track with one channel, downmixed if necessary, + // plus six MOS tracks 2 channels with one track with one channel: Two tracks + // with one identical channel each, plus six MOS tracks 2 or auto channels + // with two tracks with one channel each: Two tracks with one channel each, + // plus six MOS tracks 2 or auto channels with one track with two channels: + // Two tracks with one channel each, plus six MOS tracks 2 channels with one + // track with multiple channels: Two tracks with one channel each, plus six + // MOS tracks auto channels with one track with one channel: One track with + // one channel, plus seven MOS tracks auto channels with one track with multiple + // channels: Up to eight tracks with one channel each, plus MOS tracks until + // there are eight tracks in all + AudioPackingMode *string `type:"string"` + + // The bit rate of the audio stream in the output file, in kilobits/second. + // Enter an integer between 64 and 320, inclusive. + BitRate *string `type:"string"` + + // The number of audio channels in the output file. The following values are + // valid: + // + // auto, 0, 1, 2 + // + // One channel carries the information played by a single speaker. For example, + // a stereo track with two channels sends one channel to the left speaker, and + // the other channel to the right speaker. The output channels are organized + // into tracks. If you want Elastic Transcoder to automatically detect the number + // of audio channels in the input file and use that value for the output file, + // select auto. + // + // The output of a specific channel value and inputs are as follows: + // + // auto channel specified, with any input: Pass through up to eight input + // channels. 0 channels specified, with any input: Audio omitted from the output. + // 1 channel specified, with at least one input channel: Mono sound. 2 channels + // specified, with any input: Two identical mono channels or stereo. For more + // information about tracks, see Audio:AudioPackingMode. For more information + // about how Elastic Transcoder organizes channels and tracks, see Audio:AudioPackingMode. + Channels *string `type:"string"` + + // The audio codec for the output file. Valid values include aac, flac, mp2, + // mp3, pcm, and vorbis. + Codec *string `type:"string"` + + // If you specified AAC for Audio:Codec, this is the AAC compression profile + // to use. Valid values include: + // + // auto, AAC-LC, HE-AAC, HE-AACv2 + // + // If you specify auto, Elastic Transcoder chooses a profile based on the bit + // rate of the output file. + CodecOptions *AudioCodecOptions `type:"structure"` + + // The sample rate of the audio stream in the output file, in Hertz. Valid values + // include: + // + // auto, 22050, 32000, 44100, 48000, 96000 + // + // If you specify auto, Elastic Transcoder automatically detects the sample + // rate. + SampleRate *string `type:"string"` +} + +// String returns the string representation +func (s AudioParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AudioParameters) GoString() string { + return s.String() +} + +// The CancelJobRequest structure. +type CancelJobInput struct { + _ struct{} `type:"structure"` + + // The identifier of the job that you want to cancel. + // + // To get a list of the jobs (including their jobId) that have a status of + // Submitted, use the ListJobsByStatus API action. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelJobInput) GoString() string { + return s.String() +} + +// The response body contains a JSON object. If the job is successfully canceled, +// the value of Success is true. +type CancelJobOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CancelJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelJobOutput) GoString() string { + return s.String() +} + +// The file format of the output captions. If you leave this value blank, Elastic +// Transcoder returns an error. +type CaptionFormat struct { + _ struct{} `type:"structure"` + + // The encryption settings, if any, that you want Elastic Transcoder to apply + // to your caption formats. + Encryption *Encryption `type:"structure"` + + // The format you specify determines whether Elastic Transcoder generates an + // embedded or sidecar caption for this output. + // + // Valid Embedded Caption Formats: + // + // for FLAC: None + // + // For MP3: None + // + // For MP4: mov-text + // + // For MPEG-TS: None + // + // For ogg: None + // + // For webm: None + // + // Valid Sidecar Caption Formats: Elastic Transcoder supports dfxp (first + // div element only), scc, srt, and webvtt. If you want ttml or smpte-tt compatible + // captions, specify dfxp as your output format. + // + // For FMP4: dfxp + // + // Non-FMP4 outputs: All sidecar types + // + // fmp4 captions have an extension of .ismt + Format *string `type:"string"` + + // The prefix for caption filenames, in the form description-{language}, where: + // + // description is a description of the video. {language} is a literal value + // that Elastic Transcoder replaces with the two- or three-letter code for the + // language of the caption in the output file names. If you don't include {language} + // in the file name pattern, Elastic Transcoder automatically appends "{language}" + // to the value that you specify for the description. In addition, Elastic Transcoder + // automatically appends the count to the end of the segment files. + // + // For example, suppose you're transcoding into srt format. When you enter + // "Sydney-{language}-sunrise", and the language of the captions is English + // (en), the name of the first caption file will be Sydney-en-sunrise00000.srt. + Pattern *string `type:"string"` +} + +// String returns the string representation +func (s CaptionFormat) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CaptionFormat) GoString() string { + return s.String() +} + +// A source file for the input sidecar captions used during the transcoding +// process. +type CaptionSource struct { + _ struct{} `type:"structure"` + + // The encryption settings, if any, that you want Elastic Transcoder to apply + // to your caption sources. + Encryption *Encryption `type:"structure"` + + // The name of the sidecar caption file that you want Elastic Transcoder to + // include in the output file. + Key *string `min:"1" type:"string"` + + // The label of the caption shown in the player when choosing a language. We + // recommend that you put the caption language name here, in the language of + // the captions. + Label *string `min:"1" type:"string"` + + // A string that specifies the language of the caption. Specify this as one + // of: + // + // 2-character ISO 639-1 code + // + // 3-character ISO 639-2 code + // + // For more information on ISO language codes and language names, see the + // List of ISO 639-1 codes. + Language *string `min:"1" type:"string"` + + // For clip generation or captions that do not start at the same time as the + // associated video file, the TimeOffset tells Elastic Transcoder how much of + // the video to encode before including captions. + // + // Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss. + TimeOffset *string `type:"string"` +} + +// String returns the string representation +func (s CaptionSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CaptionSource) GoString() string { + return s.String() +} + +// The captions to be created, if any. +type Captions struct { + _ struct{} `type:"structure"` + + // The array of file formats for the output captions. If you leave this value + // blank, Elastic Transcoder returns an error. + CaptionFormats []*CaptionFormat `type:"list"` + + // Source files for the input sidecar captions used during the transcoding process. + // To omit all sidecar captions, leave CaptionSources blank. + CaptionSources []*CaptionSource `type:"list"` + + // A policy that determines how Elastic Transcoder handles the existence of + // multiple captions. + // + // MergeOverride: Elastic Transcoder transcodes both embedded and sidecar + // captions into outputs. If captions for a language are embedded in the input + // file and also appear in a sidecar file, Elastic Transcoder uses the sidecar + // captions and ignores the embedded captions for that language. + // + // MergeRetain: Elastic Transcoder transcodes both embedded and sidecar captions + // into outputs. If captions for a language are embedded in the input file and + // also appear in a sidecar file, Elastic Transcoder uses the embedded captions + // and ignores the sidecar captions for that language. If CaptionSources is + // empty, Elastic Transcoder omits all sidecar captions from the output files. + // + // Override: Elastic Transcoder transcodes only the sidecar captions that you + // specify in CaptionSources. + // + // MergePolicy cannot be null. + MergePolicy *string `type:"string"` +} + +// String returns the string representation +func (s Captions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Captions) GoString() string { + return s.String() +} + +// Settings for one clip in a composition. All jobs in a playlist must have +// the same clip settings. +type Clip struct { + _ struct{} `type:"structure"` + + // Settings that determine when a clip begins and how long it lasts. + TimeSpan *TimeSpan `type:"structure"` +} + +// String returns the string representation +func (s Clip) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Clip) GoString() string { + return s.String() +} + +// The CreateJobRequest structure. +type CreateJobInput struct { + _ struct{} `type:"structure"` + + // A section of the request body that provides information about the file that + // is being transcoded. + Input *JobInput `type:"structure" required:"true"` + + // The CreateJobOutput structure. + Output *CreateJobOutput `type:"structure"` + + // The value, if any, that you want Elastic Transcoder to prepend to the names + // of all files that this job creates, including output files, thumbnails, and + // playlists. + OutputKeyPrefix *string `min:"1" type:"string"` + + // A section of the request body that provides information about the transcoded + // (target) files. We recommend that you use the Outputs syntax instead of the + // Output syntax. + Outputs []*CreateJobOutput `type:"list"` + + // The Id of the pipeline that you want Elastic Transcoder to use for transcoding. + // The pipeline determines several settings, including the Amazon S3 bucket + // from which Elastic Transcoder gets the files to transcode and the bucket + // into which Elastic Transcoder puts the transcoded files. + PipelineId *string `type:"string" required:"true"` + + // If you specify a preset in PresetId for which the value of Container is fmp4 + // (Fragmented MP4) or ts (MPEG-TS), Playlists contains information about the + // master playlists that you want Elastic Transcoder to create. + // + // The maximum number of master playlists in a job is 30. + Playlists []*CreateJobPlaylist `type:"list"` + + // User-defined metadata that you want to associate with an Elastic Transcoder + // job. You specify metadata in key/value pairs, and you can add up to 10 key/value + // pairs per job. Elastic Transcoder does not guarantee that key/value pairs + // will be returned in the same order in which you specify them. + UserMetadata map[string]*string `type:"map"` +} + +// String returns the string representation +func (s CreateJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateJobInput) GoString() string { + return s.String() +} + +// The CreateJobOutput structure. +type CreateJobOutput struct { + _ struct{} `type:"structure"` + + // Information about the album art that you want Elastic Transcoder to add to + // the file during transcoding. You can specify up to twenty album artworks + // for each output. Settings for each artwork must be defined in the job for + // the current output. + AlbumArt *JobAlbumArt `type:"structure"` + + // You can configure Elastic Transcoder to transcode captions, or subtitles, + // from one format to another. All captions must be in UTF-8. Elastic Transcoder + // supports two types of captions: + // + // Embedded: Embedded captions are included in the same file as the audio + // and video. Elastic Transcoder supports only one embedded caption per language, + // to a maximum of 300 embedded captions per file. + // + // Valid input values include: CEA-608 (EIA-608, first non-empty channel only), + // CEA-708 (EIA-708, first non-empty channel only), and mov-text + // + // Valid outputs include: mov-text + // + // Elastic Transcoder supports a maximum of one embedded format per output. + // + // Sidecar: Sidecar captions are kept in a separate metadata file from the + // audio and video data. Sidecar captions require a player that is capable of + // understanding the relationship between the video file and the sidecar file. + // Elastic Transcoder supports only one sidecar caption per language, to a maximum + // of 20 sidecar captions per file. + // + // Valid input values include: dfxp (first div element only), ebu-tt, scc, + // smpt, srt, ttml (first div element only), and webvtt + // + // Valid outputs include: dfxp (first div element only), scc, srt, and webvtt. + // + // If you want ttml or smpte-tt compatible captions, specify dfxp as your + // output format. + // + // Elastic Transcoder does not support OCR (Optical Character Recognition), + // does not accept pictures as a valid input for captions, and is not available + // for audio-only transcoding. Elastic Transcoder does not preserve text formatting + // (for example, italics) during the transcoding process. + // + // To remove captions or leave the captions empty, set Captions to null. To + // pass through existing captions unchanged, set the MergePolicy to MergeRetain, + // and pass in a null CaptionSources array. + // + // For more information on embedded files, see the Subtitles Wikipedia page. + // + // For more information on sidecar files, see the Extensible Metadata Platform + // and Sidecar file Wikipedia pages. + Captions *Captions `type:"structure"` + + // You can create an output file that contains an excerpt from the input file. + // This excerpt, called a clip, can come from the beginning, middle, or end + // of the file. The Composition object contains settings for the clips that + // make up an output file. For the current release, you can only specify settings + // for a single clip per output file. The Composition object cannot be null. + Composition []*Clip `type:"list"` + + // You can specify encryption settings for any output files that you want to + // use for a transcoding job. This includes the output file and any watermarks, + // thumbnails, album art, or captions that you want to use. You must specify + // encryption settings for each file individually. + Encryption *Encryption `type:"structure"` + + // The name to assign to the transcoded file. Elastic Transcoder saves the file + // in the Amazon S3 bucket specified by the OutputBucket object in the pipeline + // that is specified by the pipeline ID. If a file with the specified name already + // exists in the output bucket, the job fails. + Key *string `min:"1" type:"string"` + + // The Id of the preset to use for this job. The preset determines the audio, + // video, and thumbnail settings that Elastic Transcoder uses for transcoding. + PresetId *string `type:"string"` + + // The number of degrees clockwise by which you want Elastic Transcoder to rotate + // the output relative to the input. Enter one of the following values: auto, + // 0, 90, 180, 270. The value auto generally works only if the file that you're + // transcoding contains rotation metadata. + Rotate *string `type:"string"` + + // (Outputs in Fragmented MP4 or MPEG-TS format only.If you specify a preset + // in PresetId for which the value of Container is fmp4 (Fragmented MP4) or + // ts (MPEG-TS), SegmentDuration is the target maximum duration of each segment + // in seconds. For HLSv3 format playlists, each media segment is stored in a + // separate .ts file. For HLSv4 and Smooth playlists, all media segments for + // an output are stored in a single file. Each segment is approximately the + // length of the SegmentDuration, though individual segments might be shorter + // or longer. + // + // The range of valid values is 1 to 60 seconds. If the duration of the video + // is not evenly divisible by SegmentDuration, the duration of the last segment + // is the remainder of total length/SegmentDuration. + // + // Elastic Transcoder creates an output-specific playlist for each output HLS + // output that you specify in OutputKeys. To add an output to the master playlist + // for this job, include it in the OutputKeys of the associated playlist. + SegmentDuration *string `type:"string"` + + // The encryption settings, if any, that you want Elastic Transcoder to apply + // to your thumbnail. + ThumbnailEncryption *Encryption `type:"structure"` + + // Whether you want Elastic Transcoder to create thumbnails for your videos + // and, if so, how you want Elastic Transcoder to name the files. + // + // If you don't want Elastic Transcoder to create thumbnails, specify "". + // + // If you do want Elastic Transcoder to create thumbnails, specify the information + // that you want to include in the file name for each thumbnail. You can specify + // the following values in any sequence: + // + // {count} (Required): If you want to create thumbnails, you must include + // {count} in the ThumbnailPattern object. Wherever you specify {count}, Elastic + // Transcoder adds a five-digit sequence number (beginning with 00001) to thumbnail + // file names. The number indicates where a given thumbnail appears in the sequence + // of thumbnails for a transcoded file. + // + // If you specify a literal value and/or {resolution} but you omit {count}, + // Elastic Transcoder returns a validation error and does not create the job. + // Literal values (Optional): You can specify literal values anywhere in + // the ThumbnailPattern object. For example, you can include them as a file + // name prefix or as a delimiter between {resolution} and {count}. + // + // {resolution} (Optional): If you want Elastic Transcoder to include the + // resolution in the file name, include {resolution} in the ThumbnailPattern + // object. + // + // When creating thumbnails, Elastic Transcoder automatically saves the files + // in the format (.jpg or .png) that appears in the preset that you specified + // in the PresetID value of CreateJobOutput. Elastic Transcoder also appends + // the applicable file name extension. + ThumbnailPattern *string `type:"string"` + + // Information about the watermarks that you want Elastic Transcoder to add + // to the video during transcoding. You can specify up to four watermarks for + // each output. Settings for each watermark must be defined in the preset for + // the current output. + Watermarks []*JobWatermark `type:"list"` +} + +// String returns the string representation +func (s CreateJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateJobOutput) GoString() string { + return s.String() +} + +// Information about the master playlist. +type CreateJobPlaylist struct { + _ struct{} `type:"structure"` + + // The format of the output playlist. Valid formats include HLSv3, HLSv4, and + // Smooth. + Format *string `type:"string"` + + // The HLS content protection settings, if any, that you want Elastic Transcoder + // to apply to the output files associated with this playlist. + HlsContentProtection *HlsContentProtection `type:"structure"` + + // The name that you want Elastic Transcoder to assign to the master playlist, + // for example, nyc-vacation.m3u8. If the name includes a / character, the section + // of the name before the last / must be identical for all Name objects. If + // you create more than one master playlist, the values of all Name objects + // must be unique. + // + // Note: Elastic Transcoder automatically appends the relevant file extension + // to the file name (.m3u8 for HLSv3 and HLSv4 playlists, and .ism and .ismc + // for Smooth playlists). If you include a file extension in Name, the file + // name will have two extensions. + Name *string `min:"1" type:"string"` + + // For each output in this job that you want to include in a master playlist, + // the value of the Outputs:Key object. + // + // If your output is not HLS or does not have a segment duration set, the + // name of the output file is a concatenation of OutputKeyPrefix and Outputs:Key: + // + // OutputKeyPrefixOutputs:Key + // + // If your output is HLSv3 and has a segment duration set, or is not included + // in a playlist, Elastic Transcoder creates an output playlist file with a + // file extension of .m3u8, and a series of .ts files that include a five-digit + // sequential counter beginning with 00000: + // + // OutputKeyPrefixOutputs:Key.m3u8 + // + // OutputKeyPrefixOutputs:Key00000.ts + // + // If your output is HLSv4, has a segment duration set, and is included in + // an HLSv4 playlist, Elastic Transcoder creates an output playlist file with + // a file extension of _v4.m3u8. If the output is video, Elastic Transcoder + // also creates an output file with an extension of _iframe.m3u8: + // + // OutputKeyPrefixOutputs:Key_v4.m3u8 + // + // OutputKeyPrefixOutputs:Key_iframe.m3u8 + // + // OutputKeyPrefixOutputs:Key.ts + // + // Elastic Transcoder automatically appends the relevant file extension to + // the file name. If you include a file extension in Output Key, the file name + // will have two extensions. + // + // If you include more than one output in a playlist, any segment duration + // settings, clip settings, or caption settings must be the same for all outputs + // in the playlist. For Smooth playlists, the Audio:Profile, Video:Profile, + // and Video:FrameRate to Video:KeyframesMaxDist ratio must be the same for + // all outputs. + OutputKeys []*string `type:"list"` + + // The DRM settings, if any, that you want Elastic Transcoder to apply to the + // output files associated with this playlist. + PlayReadyDrm *PlayReadyDrm `type:"structure"` +} + +// String returns the string representation +func (s CreateJobPlaylist) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateJobPlaylist) GoString() string { + return s.String() +} + +// The CreateJobResponse structure. +type CreateJobResponse struct { + _ struct{} `type:"structure"` + + // A section of the response body that provides information about the job that + // is created. + Job *Job `type:"structure"` +} + +// String returns the string representation +func (s CreateJobResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateJobResponse) GoString() string { + return s.String() +} + +// The CreatePipelineRequest structure. +type CreatePipelineInput struct { + _ struct{} `type:"structure"` + + // The AWS Key Management Service (AWS KMS) key that you want to use with this + // pipeline. + // + // If you use either S3 or S3-AWS-KMS as your Encryption:Mode, you don't need + // to provide a key with your job because a default key, known as an AWS-KMS + // key, is created for you automatically. You need to provide an AWS-KMS key + // only if you want to use a non-default AWS-KMS key, or if you are using an + // Encryption:Mode of AES-PKCS7, AES-CTR, or AES-GCM. + AwsKmsKeyArn *string `type:"string"` + + // The optional ContentConfig object specifies information about the Amazon + // S3 bucket in which you want Elastic Transcoder to save transcoded files and + // playlists: which bucket to use, which users you want to have access to the + // files, the type of access you want users to have, and the storage class that + // you want to assign to the files. + // + // If you specify values for ContentConfig, you must also specify values for + // ThumbnailConfig. + // + // If you specify values for ContentConfig and ThumbnailConfig, omit the OutputBucket + // object. + // + // Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save + // transcoded files and playlists. Permissions (Optional): The Permissions + // object specifies which users you want to have access to transcoded files + // and the type of access you want them to have. You can grant permissions to + // a maximum of 30 users and/or predefined Amazon S3 groups. Grantee Type: + // Specify the type of value that appears in the Grantee object: Canonical: + // The value in the Grantee object is either the canonical user ID for an AWS + // account or an origin access identity for an Amazon CloudFront distribution. + // For more information about canonical user IDs, see Access Control List (ACL) + // Overview in the Amazon Simple Storage Service Developer Guide. For more information + // about using CloudFront origin access identities to require that users use + // CloudFront URLs instead of Amazon S3 URLs, see Using an Origin Access Identity + // to Restrict Access to Your Amazon S3 Content. A canonical user ID is not + // the same as an AWS account number. Email: The value in the Grantee object + // is the registered email address of an AWS account. Group: The value in the + // Grantee object is one of the following predefined Amazon S3 groups: AllUsers, + // AuthenticatedUsers, or LogDelivery. Grantee: The AWS user or group that + // you want to have access to transcoded files and playlists. To identify the + // user or group, you can specify the canonical user ID for an AWS account, + // an origin access identity for a CloudFront distribution, the registered email + // address of an AWS account, or a predefined Amazon S3 group Access: The + // permission that you want to give to the AWS user that you specified in Grantee. + // Permissions are granted on the files that Elastic Transcoder adds to the + // bucket, including playlists and video files. Valid values include: READ: + // The grantee can read the objects and metadata for objects that Elastic Transcoder + // adds to the Amazon S3 bucket. READ_ACP: The grantee can read the object + // ACL for objects that Elastic Transcoder adds to the Amazon S3 bucket. WRITE_ACP: + // The grantee can write the ACL for the objects that Elastic Transcoder adds + // to the Amazon S3 bucket. FULL_CONTROL: The grantee has READ, READ_ACP, and + // WRITE_ACP permissions for the objects that Elastic Transcoder adds to the + // Amazon S3 bucket. StorageClass: The Amazon S3 storage class, Standard + // or ReducedRedundancy, that you want Elastic Transcoder to assign to the video + // files and playlists that it stores in your Amazon S3 bucket. + ContentConfig *PipelineOutputConfig `type:"structure"` + + // The Amazon S3 bucket in which you saved the media files that you want to + // transcode. + InputBucket *string `type:"string" required:"true"` + + // The name of the pipeline. We recommend that the name be unique within the + // AWS account, but uniqueness is not enforced. + // + // Constraints: Maximum 40 characters. + Name *string `min:"1" type:"string" required:"true"` + + // The Amazon Simple Notification Service (Amazon SNS) topic that you want to + // notify to report job status. + // + // To receive notifications, you must also subscribe to the new topic in the + // Amazon SNS console. Progressing: The topic ARN for the Amazon Simple Notification + // Service (Amazon SNS) topic that you want to notify when Elastic Transcoder + // has started to process a job in this pipeline. This is the ARN that Amazon + // SNS returned when you created the topic. For more information, see Create + // a Topic in the Amazon Simple Notification Service Developer Guide. Completed: + // The topic ARN for the Amazon SNS topic that you want to notify when Elastic + // Transcoder has finished processing a job in this pipeline. This is the ARN + // that Amazon SNS returned when you created the topic. Warning: The topic + // ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder + // encounters a warning condition while processing a job in this pipeline. This + // is the ARN that Amazon SNS returned when you created the topic. Error: The + // topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder + // encounters an error condition while processing a job in this pipeline. This + // is the ARN that Amazon SNS returned when you created the topic. + Notifications *Notifications `type:"structure"` + + // The Amazon S3 bucket in which you want Elastic Transcoder to save the transcoded + // files. (Use this, or use ContentConfig:Bucket plus ThumbnailConfig:Bucket.) + // + // Specify this value when all of the following are true: You want to save + // transcoded files, thumbnails (if any), and playlists (if any) together in + // one bucket. You do not want to specify the users or groups who have access + // to the transcoded files, thumbnails, and playlists. You do not want to specify + // the permissions that Elastic Transcoder grants to the files. When Elastic + // Transcoder saves files in OutputBucket, it grants full control over the files + // only to the AWS account that owns the role that is specified by Role. You + // want to associate the transcoded files and thumbnails with the Amazon S3 + // Standard storage class. + // + // If you want to save transcoded files and playlists in one bucket and thumbnails + // in another bucket, specify which users can access the transcoded files or + // the permissions the users have, or change the Amazon S3 storage class, omit + // OutputBucket and specify values for ContentConfig and ThumbnailConfig instead. + OutputBucket *string `type:"string"` + + // The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder + // to use to create the pipeline. + Role *string `type:"string" required:"true"` + + // The ThumbnailConfig object specifies several values, including the Amazon + // S3 bucket in which you want Elastic Transcoder to save thumbnail files, which + // users you want to have access to the files, the type of access you want users + // to have, and the storage class that you want to assign to the files. + // + // If you specify values for ContentConfig, you must also specify values for + // ThumbnailConfig even if you don't want to create thumbnails. + // + // If you specify values for ContentConfig and ThumbnailConfig, omit the OutputBucket + // object. + // + // Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save + // thumbnail files. Permissions (Optional): The Permissions object specifies + // which users and/or predefined Amazon S3 groups you want to have access to + // thumbnail files, and the type of access you want them to have. You can grant + // permissions to a maximum of 30 users and/or predefined Amazon S3 groups. + // GranteeType: Specify the type of value that appears in the Grantee object: + // Canonical: The value in the Grantee object is either the canonical user + // ID for an AWS account or an origin access identity for an Amazon CloudFront + // distribution. A canonical user ID is not the same as an AWS account number. + // Email: The value in the Grantee object is the registered email address + // of an AWS account. Group: The value in the Grantee object is one of the + // following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery. + // Grantee: The AWS user or group that you want to have access to thumbnail + // files. To identify the user or group, you can specify the canonical user + // ID for an AWS account, an origin access identity for a CloudFront distribution, + // the registered email address of an AWS account, or a predefined Amazon S3 + // group. Access: The permission that you want to give to the AWS user that + // you specified in Grantee. Permissions are granted on the thumbnail files + // that Elastic Transcoder adds to the bucket. Valid values include: READ: + // The grantee can read the thumbnails and metadata for objects that Elastic + // Transcoder adds to the Amazon S3 bucket. READ_ACP: The grantee can read + // the object ACL for thumbnails that Elastic Transcoder adds to the Amazon + // S3 bucket. WRITE_ACP: The grantee can write the ACL for the thumbnails + // that Elastic Transcoder adds to the Amazon S3 bucket. FULL_CONTROL: The + // grantee has READ, READ_ACP, and WRITE_ACP permissions for the thumbnails + // that Elastic Transcoder adds to the Amazon S3 bucket. StorageClass: The + // Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic + // Transcoder to assign to the thumbnails that it stores in your Amazon S3 bucket. + ThumbnailConfig *PipelineOutputConfig `type:"structure"` +} + +// String returns the string representation +func (s CreatePipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePipelineInput) GoString() string { + return s.String() +} + +// When you create a pipeline, Elastic Transcoder returns the values that you +// specified in the request. +type CreatePipelineOutput struct { + _ struct{} `type:"structure"` + + // A section of the response body that provides information about the pipeline + // that is created. + Pipeline *Pipeline `type:"structure"` + + // Elastic Transcoder returns a warning if the resources used by your pipeline + // are not in the same region as the pipeline. + // + // Using resources in the same region, such as your Amazon S3 buckets, Amazon + // SNS notification topics, and AWS KMS key, reduces processing time and prevents + // cross-regional charges. + Warnings []*Warning `type:"list"` +} + +// String returns the string representation +func (s CreatePipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePipelineOutput) GoString() string { + return s.String() +} + +// The CreatePresetRequest structure. +type CreatePresetInput struct { + _ struct{} `type:"structure"` + + // A section of the request body that specifies the audio parameters. + Audio *AudioParameters `type:"structure"` + + // The container type for the output file. Valid values include flac, flv, fmp4, + // gif, mp3, mp4, mpg, mxf, oga, ogg, ts, and webm. + Container *string `type:"string" required:"true"` + + // A description of the preset. + Description *string `type:"string"` + + // The name of the preset. We recommend that the name be unique within the AWS + // account, but uniqueness is not enforced. + Name *string `min:"1" type:"string" required:"true"` + + // A section of the request body that specifies the thumbnail parameters, if + // any. + Thumbnails *Thumbnails `type:"structure"` + + // A section of the request body that specifies the video parameters. + Video *VideoParameters `type:"structure"` +} + +// String returns the string representation +func (s CreatePresetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePresetInput) GoString() string { + return s.String() +} + +// The CreatePresetResponse structure. +type CreatePresetOutput struct { + _ struct{} `type:"structure"` + + // A section of the response body that provides information about the preset + // that is created. + Preset *Preset `type:"structure"` + + // If the preset settings don't comply with the standards for the video codec + // but Elastic Transcoder created the preset, this message explains the reason + // the preset settings don't meet the standard. Elastic Transcoder created the + // preset because the settings might produce acceptable output. + Warning *string `type:"string"` +} + +// String returns the string representation +func (s CreatePresetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePresetOutput) GoString() string { + return s.String() +} + +// The DeletePipelineRequest structure. +type DeletePipelineInput struct { + _ struct{} `type:"structure"` + + // The identifier of the pipeline that you want to delete. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePipelineInput) GoString() string { + return s.String() +} + +// The DeletePipelineResponse structure. +type DeletePipelineOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePipelineOutput) GoString() string { + return s.String() +} + +// The DeletePresetRequest structure. +type DeletePresetInput struct { + _ struct{} `type:"structure"` + + // The identifier of the preset for which you want to get detailed information. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePresetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePresetInput) GoString() string { + return s.String() +} + +// The DeletePresetResponse structure. +type DeletePresetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePresetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePresetOutput) GoString() string { + return s.String() +} + +// The detected properties of the input file. Elastic Transcoder identifies +// these values from the input file. +type DetectedProperties struct { + _ struct{} `type:"structure"` + + // The detected duration of the input file, in milliseconds. + DurationMillis *int64 `type:"long"` + + // The detected file size of the input file, in bytes. + FileSize *int64 `type:"long"` + + // The detected frame rate of the input file, in frames per second. + FrameRate *string `type:"string"` + + // The detected height of the input file, in pixels. + Height *int64 `type:"integer"` + + // The detected width of the input file, in pixels. + Width *int64 `type:"integer"` +} + +// String returns the string representation +func (s DetectedProperties) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetectedProperties) GoString() string { + return s.String() +} + +// The encryption settings, if any, that are used for decrypting your input +// files or encrypting your output files. If your input file is encrypted, you +// must specify the mode that Elastic Transcoder will use to decrypt your file, +// otherwise you must specify the mode you want Elastic Transcoder to use to +// encrypt your output files. +type Encryption struct { + _ struct{} `type:"structure"` + + // The series of random bits created by a random bit generator, unique for every + // encryption operation, that you used to encrypt your input files or that you + // want Elastic Transcoder to use to encrypt your output files. The initialization + // vector must be base64-encoded, and it must be exactly 16 bytes long before + // being base64-encoded. + InitializationVector *string `type:"string"` + + // The data encryption key that you want Elastic Transcoder to use to encrypt + // your output file, or that was used to encrypt your input file. The key must + // be base64-encoded and it must be one of the following bit lengths before + // being base64-encoded: + // + // 128, 192, or 256. + // + // The key must also be encrypted by using the Amazon Key Management Service. + Key *string `type:"string"` + + // The MD5 digest of the key that you used to encrypt your input file, or that + // you want Elastic Transcoder to use to encrypt your output file. Elastic Transcoder + // uses the key digest as a checksum to make sure your key was not corrupted + // in transit. The key MD5 must be base64-encoded, and it must be exactly 16 + // bytes long before being base64-encoded. + KeyMd5 *string `type:"string"` + + // The specific server-side encryption mode that you want Elastic Transcoder + // to use when decrypting your input files or encrypting your output files. + // Elastic Transcoder supports the following options: + // + // S3: Amazon S3 creates and manages the keys used for encrypting your files. + // + // S3-AWS-KMS: Amazon S3 calls the Amazon Key Management Service, which creates + // and manages the keys that are used for encrypting your files. If you specify + // S3-AWS-KMS and you don't want to use the default key, you must add the AWS-KMS + // key that you want to use to your pipeline. + // + // AES-CBC-PKCS7: A padded cipher-block mode of operation originally used for + // HLS files. + // + // AES-CTR: AES Counter Mode. + // + // AES-GCM: AES Galois Counter Mode, a mode of operation that is an authenticated + // encryption format, meaning that a file, key, or initialization vector that + // has been tampered with will fail the decryption process. + // + // For all three AES options, you must provide the following settings, which + // must be base64-encoded: + // + // Key + // + // Key MD5 + // + // Initialization Vector + // + // For the AES modes, your private encryption keys and your unencrypted data + // are never stored by AWS; therefore, it is important that you safely manage + // your encryption keys. If you lose them, you won't be able to unencrypt your + // data. + Mode *string `type:"string"` +} + +// String returns the string representation +func (s Encryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Encryption) GoString() string { + return s.String() +} + +// The HLS content protection settings, if any, that you want Elastic Transcoder +// to apply to your output files. +type HlsContentProtection struct { + _ struct{} `type:"structure"` + + // If Elastic Transcoder is generating your key for you, you must leave this + // field blank. + // + // The series of random bits created by a random bit generator, unique for + // every encryption operation, that you want Elastic Transcoder to use to encrypt + // your output files. The initialization vector must be base64-encoded, and + // it must be exactly 16 bytes before being base64-encoded. + InitializationVector *string `type:"string"` + + // If you want Elastic Transcoder to generate a key for you, leave this field + // blank. + // + // If you choose to supply your own key, you must encrypt the key by using + // AWS KMS. The key must be base64-encoded, and it must be one of the following + // bit lengths before being base64-encoded: + // + // 128, 192, or 256. + Key *string `type:"string"` + + // If Elastic Transcoder is generating your key for you, you must leave this + // field blank. + // + // The MD5 digest of the key that you want Elastic Transcoder to use to encrypt + // your output file, and that you want Elastic Transcoder to use as a checksum + // to make sure your key was not corrupted in transit. The key MD5 must be base64-encoded, + // and it must be exactly 16 bytes before being base64- encoded. + KeyMd5 *string `type:"string"` + + // Specify whether you want Elastic Transcoder to write your HLS license key + // to an Amazon S3 bucket. If you choose WithVariantPlaylists, LicenseAcquisitionUrl + // must be left blank and Elastic Transcoder writes your data key into the same + // bucket as the associated playlist. + KeyStoragePolicy *string `type:"string"` + + // The location of the license key required to decrypt your HLS playlist. The + // URL must be an absolute path, and is referenced in the URI attribute of the + // EXT-X-KEY metadata tag in the playlist file. + LicenseAcquisitionUrl *string `type:"string"` + + // The content protection method for your output. The only valid value is: aes-128. + // + // This value will be written into the method attribute of the EXT-X-KEY metadata + // tag in the output playlist. + Method *string `type:"string"` +} + +// String returns the string representation +func (s HlsContentProtection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HlsContentProtection) GoString() string { + return s.String() +} + +// A section of the response body that provides information about the job that +// is created. +type Job struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the job. + Arn *string `type:"string"` + + // The identifier that Elastic Transcoder assigned to the job. You use this + // value to get settings for the job or to delete the job. + Id *string `type:"string"` + + // A section of the request or response body that provides information about + // the file that is being transcoded. + Input *JobInput `type:"structure"` + + // If you specified one output for a job, information about that output. If + // you specified multiple outputs for a job, the Output object lists information + // about the first output. This duplicates the information that is listed for + // the first output in the Outputs object. + // + // Outputs recommended instead. A section of the request or response body that + // provides information about the transcoded (target) file. + Output *JobOutput `type:"structure"` + + // The value, if any, that you want Elastic Transcoder to prepend to the names + // of all files that this job creates, including output files, thumbnails, and + // playlists. We recommend that you add a / or some other delimiter to the end + // of the OutputKeyPrefix. + OutputKeyPrefix *string `min:"1" type:"string"` + + // Information about the output files. We recommend that you use the Outputs + // syntax for all jobs, even when you want Elastic Transcoder to transcode a + // file into only one format. Do not use both the Outputs and Output syntaxes + // in the same request. You can create a maximum of 30 outputs per job. + // + // If you specify more than one output for a job, Elastic Transcoder creates + // the files for each output in the order in which you specify them in the job. + Outputs []*JobOutput `type:"list"` + + // The Id of the pipeline that you want Elastic Transcoder to use for transcoding. + // The pipeline determines several settings, including the Amazon S3 bucket + // from which Elastic Transcoder gets the files to transcode and the bucket + // into which Elastic Transcoder puts the transcoded files. + PipelineId *string `type:"string"` + + // Outputs in Fragmented MP4 or MPEG-TS format only.If you specify a preset + // in PresetId for which the value of Container is fmp4 (Fragmented MP4) or + // ts (MPEG-TS), Playlists contains information about the master playlists that + // you want Elastic Transcoder to create. + // + // The maximum number of master playlists in a job is 30. + Playlists []*Playlist `type:"list"` + + // The status of the job: Submitted, Progressing, Complete, Canceled, or Error. + Status *string `type:"string"` + + // Details about the timing of a job. + Timing *Timing `type:"structure"` + + // User-defined metadata that you want to associate with an Elastic Transcoder + // job. You specify metadata in key/value pairs, and you can add up to 10 key/value + // pairs per job. Elastic Transcoder does not guarantee that key/value pairs + // will be returned in the same order in which you specify them. + // + // Metadata keys and values must use characters from the following list: + // + // 0-9 + // + // A-Z and a-z + // + // Space + // + // The following symbols: _.:/=+-%@ + UserMetadata map[string]*string `type:"map"` +} + +// String returns the string representation +func (s Job) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Job) GoString() string { + return s.String() +} + +// The .jpg or .png file associated with an audio file. +type JobAlbumArt struct { + _ struct{} `type:"structure"` + + // The file to be used as album art. There can be multiple artworks associated + // with an audio file, to a maximum of 20. Valid formats are .jpg and .png + Artwork []*Artwork `type:"list"` + + // A policy that determines how Elastic Transcoder will handle the existence + // of multiple album artwork files. + // + // Replace: The specified album art will replace any existing album art. + // Prepend: The specified album art will be placed in front of any existing + // album art. Append: The specified album art will be placed after any existing + // album art. Fallback: If the original input file contains artwork, Elastic + // Transcoder will use that artwork for the output. If the original input does + // not contain artwork, Elastic Transcoder will use the specified album art + // file. + MergePolicy *string `type:"string"` +} + +// String returns the string representation +func (s JobAlbumArt) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobAlbumArt) GoString() string { + return s.String() +} + +// Information about the file that you're transcoding. +type JobInput struct { + _ struct{} `type:"structure"` + + // The aspect ratio of the input file. If you want Elastic Transcoder to automatically + // detect the aspect ratio of the input file, specify auto. If you want to specify + // the aspect ratio for the output file, enter one of the following values: + // + // 1:1, 4:3, 3:2, 16:9 + // + // If you specify a value other than auto, Elastic Transcoder disables automatic + // detection of the aspect ratio. + AspectRatio *string `type:"string"` + + // The container type for the input file. If you want Elastic Transcoder to + // automatically detect the container type of the input file, specify auto. + // If you want to specify the container type for the input file, enter one of + // the following values: + // + // 3gp, aac, asf, avi, divx, flv, m4a, mkv, mov, mp3, mp4, mpeg, mpeg-ps, + // mpeg-ts, mxf, ogg, vob, wav, webm + Container *string `type:"string"` + + // The detected properties of the input file. + DetectedProperties *DetectedProperties `type:"structure"` + + // The encryption settings, if any, that are used for decrypting your input + // files. If your input file is encrypted, you must specify the mode that Elastic + // Transcoder will use to decrypt your file. + Encryption *Encryption `type:"structure"` + + // The frame rate of the input file. If you want Elastic Transcoder to automatically + // detect the frame rate of the input file, specify auto. If you want to specify + // the frame rate for the input file, enter one of the following values: + // + // 10, 15, 23.97, 24, 25, 29.97, 30, 60 + // + // If you specify a value other than auto, Elastic Transcoder disables automatic + // detection of the frame rate. + FrameRate *string `type:"string"` + + // Whether the input file is interlaced. If you want Elastic Transcoder to automatically + // detect whether the input file is interlaced, specify auto. If you want to + // specify whether the input file is interlaced, enter one of the following + // values: + // + // true, false + // + // If you specify a value other than auto, Elastic Transcoder disables automatic + // detection of interlacing. + Interlaced *string `type:"string"` + + // The name of the file to transcode. Elsewhere in the body of the JSON block + // is the the ID of the pipeline to use for processing the job. The InputBucket + // object in that pipeline tells Elastic Transcoder which Amazon S3 bucket to + // get the file from. + // + // If the file name includes a prefix, such as cooking/lasagna.mpg, include + // the prefix in the key. If the file isn't in the specified bucket, Elastic + // Transcoder returns an error. + Key *string `min:"1" type:"string"` + + // This value must be auto, which causes Elastic Transcoder to automatically + // detect the resolution of the input file. + Resolution *string `type:"string"` +} + +// String returns the string representation +func (s JobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobInput) GoString() string { + return s.String() +} + +// Outputs recommended instead.If you specified one output for a job, information +// about that output. If you specified multiple outputs for a job, the Output +// object lists information about the first output. This duplicates the information +// that is listed for the first output in the Outputs object. +type JobOutput struct { + _ struct{} `type:"structure"` + + // The album art to be associated with the output file, if any. + AlbumArt *JobAlbumArt `type:"structure"` + + // If Elastic Transcoder used a preset with a ColorSpaceConversionMode to transcode + // the output file, the AppliedColorSpaceConversion parameter shows the conversion + // used. If no ColorSpaceConversionMode was defined in the preset, this parameter + // will not be included in the job response. + AppliedColorSpaceConversion *string `type:"string"` + + // You can configure Elastic Transcoder to transcode captions, or subtitles, + // from one format to another. All captions must be in UTF-8. Elastic Transcoder + // supports two types of captions: + // + // Embedded: Embedded captions are included in the same file as the audio + // and video. Elastic Transcoder supports only one embedded caption per language, + // to a maximum of 300 embedded captions per file. + // + // Valid input values include: CEA-608 (EIA-608, first non-empty channel only), + // CEA-708 (EIA-708, first non-empty channel only), and mov-text + // + // Valid outputs include: mov-text + // + // Elastic Transcoder supports a maximum of one embedded format per output. + // + // Sidecar: Sidecar captions are kept in a separate metadata file from the + // audio and video data. Sidecar captions require a player that is capable of + // understanding the relationship between the video file and the sidecar file. + // Elastic Transcoder supports only one sidecar caption per language, to a maximum + // of 20 sidecar captions per file. + // + // Valid input values include: dfxp (first div element only), ebu-tt, scc, + // smpt, srt, ttml (first div element only), and webvtt + // + // Valid outputs include: dfxp (first div element only), scc, srt, and webvtt. + // + // If you want ttml or smpte-tt compatible captions, specify dfxp as your + // output format. + // + // Elastic Transcoder does not support OCR (Optical Character Recognition), + // does not accept pictures as a valid input for captions, and is not available + // for audio-only transcoding. Elastic Transcoder does not preserve text formatting + // (for example, italics) during the transcoding process. + // + // To remove captions or leave the captions empty, set Captions to null. To + // pass through existing captions unchanged, set the MergePolicy to MergeRetain, + // and pass in a null CaptionSources array. + // + // For more information on embedded files, see the Subtitles Wikipedia page. + // + // For more information on sidecar files, see the Extensible Metadata Platform + // and Sidecar file Wikipedia pages. + Captions *Captions `type:"structure"` + + // You can create an output file that contains an excerpt from the input file. + // This excerpt, called a clip, can come from the beginning, middle, or end + // of the file. The Composition object contains settings for the clips that + // make up an output file. For the current release, you can only specify settings + // for a single clip per output file. The Composition object cannot be null. + Composition []*Clip `type:"list"` + + // Duration of the output file, in seconds. + Duration *int64 `type:"long"` + + // Duration of the output file, in milliseconds. + DurationMillis *int64 `type:"long"` + + // The encryption settings, if any, that you want Elastic Transcoder to apply + // to your output files. If you choose to use encryption, you must specify a + // mode to use. If you choose not to use encryption, Elastic Transcoder will + // write an unencrypted file to your Amazon S3 bucket. + Encryption *Encryption `type:"structure"` + + // File size of the output file, in bytes. + FileSize *int64 `type:"long"` + + // Frame rate of the output file, in frames per second. + FrameRate *string `type:"string"` + + // Height of the output file, in pixels. + Height *int64 `type:"integer"` + + // A sequential counter, starting with 1, that identifies an output among the + // outputs from the current job. In the Output syntax, this value is always + // 1. + Id *string `type:"string"` + + // The name to assign to the transcoded file. Elastic Transcoder saves the file + // in the Amazon S3 bucket specified by the OutputBucket object in the pipeline + // that is specified by the pipeline ID. + Key *string `min:"1" type:"string"` + + // The value of the Id object for the preset that you want to use for this job. + // The preset determines the audio, video, and thumbnail settings that Elastic + // Transcoder uses for transcoding. To use a preset that you created, specify + // the preset ID that Elastic Transcoder returned in the response when you created + // the preset. You can also use the Elastic Transcoder system presets, which + // you can get with ListPresets. + PresetId *string `type:"string"` + + // The number of degrees clockwise by which you want Elastic Transcoder to rotate + // the output relative to the input. Enter one of the following values: + // + // auto, 0, 90, 180, 270 + // + // The value auto generally works only if the file that you're transcoding + // contains rotation metadata. + Rotate *string `type:"string"` + + // (Outputs in Fragmented MP4 or MPEG-TS format only.If you specify a preset + // in PresetId for which the value of Container is fmp4 (Fragmented MP4) or + // ts (MPEG-TS), SegmentDuration is the target maximum duration of each segment + // in seconds. For HLSv3 format playlists, each media segment is stored in a + // separate .ts file. For HLSv4 and Smooth playlists, all media segments for + // an output are stored in a single file. Each segment is approximately the + // length of the SegmentDuration, though individual segments might be shorter + // or longer. + // + // The range of valid values is 1 to 60 seconds. If the duration of the video + // is not evenly divisible by SegmentDuration, the duration of the last segment + // is the remainder of total length/SegmentDuration. + // + // Elastic Transcoder creates an output-specific playlist for each output HLS + // output that you specify in OutputKeys. To add an output to the master playlist + // for this job, include it in the OutputKeys of the associated playlist. + SegmentDuration *string `type:"string"` + + // The status of one output in a job. If you specified only one output for the + // job, Outputs:Status is always the same as Job:Status. If you specified more + // than one output: Job:Status and Outputs:Status for all of the outputs is + // Submitted until Elastic Transcoder starts to process the first output. When + // Elastic Transcoder starts to process the first output, Outputs:Status for + // that output and Job:Status both change to Progressing. For each output, the + // value of Outputs:Status remains Submitted until Elastic Transcoder starts + // to process the output. Job:Status remains Progressing until all of the outputs + // reach a terminal status, either Complete or Error. When all of the outputs + // reach a terminal status, Job:Status changes to Complete only if Outputs:Status + // for all of the outputs is Complete. If Outputs:Status for one or more outputs + // is Error, the terminal status for Job:Status is also Error. The value of + // Status is one of the following: Submitted, Progressing, Complete, Canceled, + // or Error. + Status *string `type:"string"` + + // Information that further explains Status. + StatusDetail *string `type:"string"` + + // The encryption settings, if any, that you want Elastic Transcoder to apply + // to your thumbnail. + ThumbnailEncryption *Encryption `type:"structure"` + + // Whether you want Elastic Transcoder to create thumbnails for your videos + // and, if so, how you want Elastic Transcoder to name the files. + // + // If you don't want Elastic Transcoder to create thumbnails, specify "". + // + // If you do want Elastic Transcoder to create thumbnails, specify the information + // that you want to include in the file name for each thumbnail. You can specify + // the following values in any sequence: + // + // {count} (Required): If you want to create thumbnails, you must include + // {count} in the ThumbnailPattern object. Wherever you specify {count}, Elastic + // Transcoder adds a five-digit sequence number (beginning with 00001) to thumbnail + // file names. The number indicates where a given thumbnail appears in the sequence + // of thumbnails for a transcoded file. + // + // If you specify a literal value and/or {resolution} but you omit {count}, + // Elastic Transcoder returns a validation error and does not create the job. + // Literal values (Optional): You can specify literal values anywhere in + // the ThumbnailPattern object. For example, you can include them as a file + // name prefix or as a delimiter between {resolution} and {count}. + // + // {resolution} (Optional): If you want Elastic Transcoder to include the + // resolution in the file name, include {resolution} in the ThumbnailPattern + // object. + // + // When creating thumbnails, Elastic Transcoder automatically saves the files + // in the format (.jpg or .png) that appears in the preset that you specified + // in the PresetID value of CreateJobOutput. Elastic Transcoder also appends + // the applicable file name extension. + ThumbnailPattern *string `type:"string"` + + // Information about the watermarks that you want Elastic Transcoder to add + // to the video during transcoding. You can specify up to four watermarks for + // each output. Settings for each watermark must be defined in the preset that + // you specify in Preset for the current output. + // + // Watermarks are added to the output video in the sequence in which you list + // them in the job output—the first watermark in the list is added to the output + // video first, the second watermark in the list is added next, and so on. As + // a result, if the settings in a preset cause Elastic Transcoder to place all + // watermarks in the same location, the second watermark that you add will cover + // the first one, the third one will cover the second, and the fourth one will + // cover the third. + Watermarks []*JobWatermark `type:"list"` + + // Specifies the width of the output file in pixels. + Width *int64 `type:"integer"` +} + +// String returns the string representation +func (s JobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobOutput) GoString() string { + return s.String() +} + +// Watermarks can be in .png or .jpg format. If you want to display a watermark +// that is not rectangular, use the .png format, which supports transparency. +type JobWatermark struct { + _ struct{} `type:"structure"` + + // The encryption settings, if any, that you want Elastic Transcoder to apply + // to your watermarks. + Encryption *Encryption `type:"structure"` + + // The name of the .png or .jpg file that you want to use for the watermark. + // To determine which Amazon S3 bucket contains the specified file, Elastic + // Transcoder checks the pipeline specified by Pipeline; the Input Bucket object + // in that pipeline identifies the bucket. + // + // If the file name includes a prefix, for example, logos/128x64.png, include + // the prefix in the key. If the file isn't in the specified bucket, Elastic + // Transcoder returns an error. + InputKey *string `min:"1" type:"string"` + + // The ID of the watermark settings that Elastic Transcoder uses to add watermarks + // to the video during transcoding. The settings are in the preset specified + // by Preset for the current output. In that preset, the value of Watermarks + // Id tells Elastic Transcoder which settings to use. + PresetWatermarkId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s JobWatermark) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobWatermark) GoString() string { + return s.String() +} + +// The ListJobsByPipelineRequest structure. +type ListJobsByPipelineInput struct { + _ struct{} `type:"structure"` + + // To list jobs in chronological order by the date and time that they were submitted, + // enter true. To list jobs in reverse chronological order, enter false. + Ascending *string `location:"querystring" locationName:"Ascending" type:"string"` + + // When Elastic Transcoder returns more than one page of results, use pageToken + // in subsequent GET requests to get each successive page of results. + PageToken *string `location:"querystring" locationName:"PageToken" type:"string"` + + // The ID of the pipeline for which you want to get job information. + PipelineId *string `location:"uri" locationName:"PipelineId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListJobsByPipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListJobsByPipelineInput) GoString() string { + return s.String() +} + +// The ListJobsByPipelineResponse structure. +type ListJobsByPipelineOutput struct { + _ struct{} `type:"structure"` + + // An array of Job objects that are in the specified pipeline. + Jobs []*Job `type:"list"` + + // A value that you use to access the second and subsequent pages of results, + // if any. When the jobs in the specified pipeline fit on one page or when you've + // reached the last page of results, the value of NextPageToken is null. + NextPageToken *string `type:"string"` +} + +// String returns the string representation +func (s ListJobsByPipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListJobsByPipelineOutput) GoString() string { + return s.String() +} + +// The ListJobsByStatusRequest structure. +type ListJobsByStatusInput struct { + _ struct{} `type:"structure"` + + // To list jobs in chronological order by the date and time that they were submitted, + // enter true. To list jobs in reverse chronological order, enter false. + Ascending *string `location:"querystring" locationName:"Ascending" type:"string"` + + // When Elastic Transcoder returns more than one page of results, use pageToken + // in subsequent GET requests to get each successive page of results. + PageToken *string `location:"querystring" locationName:"PageToken" type:"string"` + + // To get information about all of the jobs associated with the current AWS + // account that have a given status, specify the following status: Submitted, + // Progressing, Complete, Canceled, or Error. + Status *string `location:"uri" locationName:"Status" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListJobsByStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListJobsByStatusInput) GoString() string { + return s.String() +} + +// The ListJobsByStatusResponse structure. +type ListJobsByStatusOutput struct { + _ struct{} `type:"structure"` + + // An array of Job objects that have the specified status. + Jobs []*Job `type:"list"` + + // A value that you use to access the second and subsequent pages of results, + // if any. When the jobs in the specified pipeline fit on one page or when you've + // reached the last page of results, the value of NextPageToken is null. + NextPageToken *string `type:"string"` +} + +// String returns the string representation +func (s ListJobsByStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListJobsByStatusOutput) GoString() string { + return s.String() +} + +// The ListPipelineRequest structure. +type ListPipelinesInput struct { + _ struct{} `type:"structure"` + + // To list pipelines in chronological order by the date and time that they were + // created, enter true. To list pipelines in reverse chronological order, enter + // false. + Ascending *string `location:"querystring" locationName:"Ascending" type:"string"` + + // When Elastic Transcoder returns more than one page of results, use pageToken + // in subsequent GET requests to get each successive page of results. + PageToken *string `location:"querystring" locationName:"PageToken" type:"string"` +} + +// String returns the string representation +func (s ListPipelinesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPipelinesInput) GoString() string { + return s.String() +} + +// A list of the pipelines associated with the current AWS account. +type ListPipelinesOutput struct { + _ struct{} `type:"structure"` + + // A value that you use to access the second and subsequent pages of results, + // if any. When the pipelines fit on one page or when you've reached the last + // page of results, the value of NextPageToken is null. + NextPageToken *string `type:"string"` + + // An array of Pipeline objects. + Pipelines []*Pipeline `type:"list"` +} + +// String returns the string representation +func (s ListPipelinesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPipelinesOutput) GoString() string { + return s.String() +} + +// The ListPresetsRequest structure. +type ListPresetsInput struct { + _ struct{} `type:"structure"` + + // To list presets in chronological order by the date and time that they were + // created, enter true. To list presets in reverse chronological order, enter + // false. + Ascending *string `location:"querystring" locationName:"Ascending" type:"string"` + + // When Elastic Transcoder returns more than one page of results, use pageToken + // in subsequent GET requests to get each successive page of results. + PageToken *string `location:"querystring" locationName:"PageToken" type:"string"` +} + +// String returns the string representation +func (s ListPresetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPresetsInput) GoString() string { + return s.String() +} + +// The ListPresetsResponse structure. +type ListPresetsOutput struct { + _ struct{} `type:"structure"` + + // A value that you use to access the second and subsequent pages of results, + // if any. When the presets fit on one page or when you've reached the last + // page of results, the value of NextPageToken is null. + NextPageToken *string `type:"string"` + + // An array of Preset objects. + Presets []*Preset `type:"list"` +} + +// String returns the string representation +func (s ListPresetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPresetsOutput) GoString() string { + return s.String() +} + +// The Amazon Simple Notification Service (Amazon SNS) topic or topics to notify +// in order to report job status. +// +// To receive notifications, you must also subscribe to the new topic in the +// Amazon SNS console. +type Notifications struct { + _ struct{} `type:"structure"` + + // The Amazon SNS topic that you want to notify when Elastic Transcoder has + // finished processing the job. + Completed *string `type:"string"` + + // The Amazon SNS topic that you want to notify when Elastic Transcoder encounters + // an error condition. + Error *string `type:"string"` + + // The Amazon Simple Notification Service (Amazon SNS) topic that you want to + // notify when Elastic Transcoder has started to process the job. + Progressing *string `type:"string"` + + // The Amazon SNS topic that you want to notify when Elastic Transcoder encounters + // a warning condition. + Warning *string `type:"string"` +} + +// String returns the string representation +func (s Notifications) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Notifications) GoString() string { + return s.String() +} + +// The Permission structure. +type Permission struct { + _ struct{} `type:"structure"` + + // The permission that you want to give to the AWS user that is listed in Grantee. + // Valid values include: READ: The grantee can read the thumbnails and metadata + // for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. READ_ACP: + // The grantee can read the object ACL for thumbnails that Elastic Transcoder + // adds to the Amazon S3 bucket. WRITE_ACP: The grantee can write the ACL for + // the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. FULL_CONTROL: + // The grantee has READ, READ_ACP, and WRITE_ACP permissions for the thumbnails + // that Elastic Transcoder adds to the Amazon S3 bucket. + Access []*string `type:"list"` + + // The AWS user or group that you want to have access to transcoded files and + // playlists. To identify the user or group, you can specify the canonical user + // ID for an AWS account, an origin access identity for a CloudFront distribution, + // the registered email address of an AWS account, or a predefined Amazon S3 + // group. + Grantee *string `min:"1" type:"string"` + + // The type of value that appears in the Grantee object: Canonical: Either + // the canonical user ID for an AWS account or an origin access identity for + // an Amazon CloudFront distribution. A canonical user ID is not the same as + // an AWS account number. Email: The registered email address of an AWS account. + // Group: One of the following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, + // or LogDelivery. + GranteeType *string `type:"string"` +} + +// String returns the string representation +func (s Permission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Permission) GoString() string { + return s.String() +} + +// The pipeline (queue) that is used to manage jobs. +type Pipeline struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the pipeline. + Arn *string `type:"string"` + + // The AWS Key Management Service (AWS KMS) key that you want to use with this + // pipeline. + // + // If you use either S3 or S3-AWS-KMS as your Encryption:Mode, you don't need + // to provide a key with your job because a default key, known as an AWS-KMS + // key, is created for you automatically. You need to provide an AWS-KMS key + // only if you want to use a non-default AWS-KMS key, or if you are using an + // Encryption:Mode of AES-PKCS7, AES-CTR, or AES-GCM. + AwsKmsKeyArn *string `type:"string"` + + // Information about the Amazon S3 bucket in which you want Elastic Transcoder + // to save transcoded files and playlists. Either you specify both ContentConfig + // and ThumbnailConfig, or you specify OutputBucket. + // + // Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save + // transcoded files and playlists. Permissions: A list of the users and/or + // predefined Amazon S3 groups you want to have access to transcoded files and + // playlists, and the type of access that you want them to have. GranteeType: + // The type of value that appears in the Grantee object: Canonical: Either + // the canonical user ID for an AWS account or an origin access identity for + // an Amazon CloudFront distribution. Email: The registered email address of + // an AWS account. Group: One of the following predefined Amazon S3 groups: + // AllUsers, AuthenticatedUsers, or LogDelivery. Grantee: The AWS user or + // group that you want to have access to transcoded files and playlists. Access: + // The permission that you want to give to the AWS user that is listed in Grantee. + // Valid values include: READ: The grantee can read the objects and metadata + // for objects that Elastic Transcoder adds to the Amazon S3 bucket. READ_ACP: + // The grantee can read the object ACL for objects that Elastic Transcoder adds + // to the Amazon S3 bucket. WRITE_ACP: The grantee can write the ACL for the + // objects that Elastic Transcoder adds to the Amazon S3 bucket. FULL_CONTROL: + // The grantee has READ, READ_ACP, and WRITE_ACP permissions for the objects + // that Elastic Transcoder adds to the Amazon S3 bucket. StorageClass: + // The Amazon S3 storage class, Standard or ReducedRedundancy, that you want + // Elastic Transcoder to assign to the video files and playlists that it stores + // in your Amazon S3 bucket. + ContentConfig *PipelineOutputConfig `type:"structure"` + + // The identifier for the pipeline. You use this value to identify the pipeline + // in which you want to perform a variety of operations, such as creating a + // job or a preset. + Id *string `type:"string"` + + // The Amazon S3 bucket from which Elastic Transcoder gets media files for transcoding + // and the graphics files, if any, that you want to use for watermarks. + InputBucket *string `type:"string"` + + // The name of the pipeline. We recommend that the name be unique within the + // AWS account, but uniqueness is not enforced. + // + // Constraints: Maximum 40 characters + Name *string `min:"1" type:"string"` + + // The Amazon Simple Notification Service (Amazon SNS) topic that you want to + // notify to report job status. + // + // To receive notifications, you must also subscribe to the new topic in the + // Amazon SNS console. Progressing (optional): The Amazon Simple Notification + // Service (Amazon SNS) topic that you want to notify when Elastic Transcoder + // has started to process the job. Completed (optional): The Amazon SNS topic + // that you want to notify when Elastic Transcoder has finished processing the + // job. Warning (optional): The Amazon SNS topic that you want to notify when + // Elastic Transcoder encounters a warning condition. Error (optional): The + // Amazon SNS topic that you want to notify when Elastic Transcoder encounters + // an error condition. + Notifications *Notifications `type:"structure"` + + // The Amazon S3 bucket in which you want Elastic Transcoder to save transcoded + // files, thumbnails, and playlists. Either you specify this value, or you specify + // both ContentConfig and ThumbnailConfig. + OutputBucket *string `type:"string"` + + // The IAM Amazon Resource Name (ARN) for the role that Elastic Transcoder uses + // to transcode jobs for this pipeline. + Role *string `type:"string"` + + // The current status of the pipeline: + // + // Active: The pipeline is processing jobs. Paused: The pipeline is not + // currently processing jobs. + Status *string `type:"string"` + + // Information about the Amazon S3 bucket in which you want Elastic Transcoder + // to save thumbnail files. Either you specify both ContentConfig and ThumbnailConfig, + // or you specify OutputBucket. + // + // Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save + // thumbnail files. Permissions: A list of the users and/or predefined Amazon + // S3 groups you want to have access to thumbnail files, and the type of access + // that you want them to have. GranteeType: The type of value that appears + // in the Grantee object: Canonical: Either the canonical user ID for an AWS + // account or an origin access identity for an Amazon CloudFront distribution. + // A canonical user ID is not the same as an AWS account number. Email: The + // registered email address of an AWS account. Group: One of the following + // predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery. + // Grantee: The AWS user or group that you want to have access to thumbnail + // files. Access: The permission that you want to give to the AWS user that + // is listed in Grantee. Valid values include: READ: The grantee can read + // the thumbnails and metadata for thumbnails that Elastic Transcoder adds to + // the Amazon S3 bucket. READ_ACP: The grantee can read the object ACL for + // thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. WRITE_ACP: + // The grantee can write the ACL for the thumbnails that Elastic Transcoder + // adds to the Amazon S3 bucket. FULL_CONTROL: The grantee has READ, READ_ACP, + // and WRITE_ACP permissions for the thumbnails that Elastic Transcoder adds + // to the Amazon S3 bucket. StorageClass: The Amazon S3 storage class, + // Standard or ReducedRedundancy, that you want Elastic Transcoder to assign + // to the thumbnails that it stores in your Amazon S3 bucket. + ThumbnailConfig *PipelineOutputConfig `type:"structure"` +} + +// String returns the string representation +func (s Pipeline) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Pipeline) GoString() string { + return s.String() +} + +// The PipelineOutputConfig structure. +type PipelineOutputConfig struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket in which you want Elastic Transcoder to save the transcoded + // files. Specify this value when all of the following are true: You want to + // save transcoded files, thumbnails (if any), and playlists (if any) together + // in one bucket. You do not want to specify the users or groups who have access + // to the transcoded files, thumbnails, and playlists. You do not want to specify + // the permissions that Elastic Transcoder grants to the files. You want to + // associate the transcoded files and thumbnails with the Amazon S3 Standard + // storage class. If you want to save transcoded files and playlists in one + // bucket and thumbnails in another bucket, specify which users can access the + // transcoded files or the permissions the users have, or change the Amazon + // S3 storage class, omit OutputBucket and specify values for ContentConfig + // and ThumbnailConfig instead. + Bucket *string `type:"string"` + + // Optional. The Permissions object specifies which users and/or predefined + // Amazon S3 groups you want to have access to transcoded files and playlists, + // and the type of access you want them to have. You can grant permissions to + // a maximum of 30 users and/or predefined Amazon S3 groups. + // + // If you include Permissions, Elastic Transcoder grants only the permissions + // that you specify. It does not grant full permissions to the owner of the + // role specified by Role. If you want that user to have full control, you must + // explicitly grant full control to the user. + // + // If you omit Permissions, Elastic Transcoder grants full control over the + // transcoded files and playlists to the owner of the role specified by Role, + // and grants no other permissions to any other user or group. + Permissions []*Permission `type:"list"` + + // The Amazon S3 storage class, Standard or ReducedRedundancy, that you want + // Elastic Transcoder to assign to the video files and playlists that it stores + // in your Amazon S3 bucket. + StorageClass *string `type:"string"` +} + +// String returns the string representation +func (s PipelineOutputConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PipelineOutputConfig) GoString() string { + return s.String() +} + +// The PlayReady DRM settings, if any, that you want Elastic Transcoder to apply +// to the output files associated with this playlist. +// +// PlayReady DRM encrypts your media files using AES-CTR encryption. +// +// If you use DRM for an HLSv3 playlist, your outputs must have a master playlist. +type PlayReadyDrm struct { + _ struct{} `type:"structure"` + + // The type of DRM, if any, that you want Elastic Transcoder to apply to the + // output files associated with this playlist. + Format *string `type:"string"` + + // The series of random bits created by a random bit generator, unique for every + // encryption operation, that you want Elastic Transcoder to use to encrypt + // your files. The initialization vector must be base64-encoded, and it must + // be exactly 8 bytes long before being base64-encoded. If no initialization + // vector is provided, Elastic Transcoder generates one for you. + InitializationVector *string `type:"string"` + + // The DRM key for your file, provided by your DRM license provider. The key + // must be base64-encoded, and it must be one of the following bit lengths before + // being base64-encoded: + // + // 128, 192, or 256. + // + // The key must also be encrypted by using AWS KMS. + Key *string `type:"string"` + + // The ID for your DRM key, so that your DRM license provider knows which key + // to provide. + // + // The key ID must be provided in big endian, and Elastic Transcoder will convert + // it to little endian before inserting it into the PlayReady DRM headers. If + // you are unsure whether your license server provides your key ID in big or + // little endian, check with your DRM provider. + KeyId *string `type:"string"` + + // The MD5 digest of the key used for DRM on your file, and that you want Elastic + // Transcoder to use as a checksum to make sure your key was not corrupted in + // transit. The key MD5 must be base64-encoded, and it must be exactly 16 bytes + // before being base64-encoded. + KeyMd5 *string `type:"string"` + + // The location of the license key required to play DRM content. The URL must + // be an absolute path, and is referenced by the PlayReady header. The PlayReady + // header is referenced in the protection header of the client manifest for + // Smooth Streaming outputs, and in the EXT-X-DXDRM and EXT-XDXDRMINFO metadata + // tags for HLS playlist outputs. An example URL looks like this: https://www.example.com/exampleKey/ + LicenseAcquisitionUrl *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PlayReadyDrm) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PlayReadyDrm) GoString() string { + return s.String() +} + +// Use Only for Fragmented MP4 or MPEG-TS Outputs. If you specify a preset for +// which the value of Container is fmp4 (Fragmented MP4) or ts (MPEG-TS), Playlists +// contains information about the master playlists that you want Elastic Transcoder +// to create. We recommend that you create only one master playlist per output +// format. The maximum number of master playlists in a job is 30. +type Playlist struct { + _ struct{} `type:"structure"` + + // The format of the output playlist. Valid formats include HLSv3, HLSv4, and + // Smooth. + Format *string `type:"string"` + + // The HLS content protection settings, if any, that you want Elastic Transcoder + // to apply to the output files associated with this playlist. + HlsContentProtection *HlsContentProtection `type:"structure"` + + // The name that you want Elastic Transcoder to assign to the master playlist, + // for example, nyc-vacation.m3u8. If the name includes a / character, the section + // of the name before the last / must be identical for all Name objects. If + // you create more than one master playlist, the values of all Name objects + // must be unique. + // + // Note: Elastic Transcoder automatically appends the relevant file extension + // to the file name (.m3u8 for HLSv3 and HLSv4 playlists, and .ism and .ismc + // for Smooth playlists). If you include a file extension in Name, the file + // name will have two extensions. + Name *string `min:"1" type:"string"` + + // For each output in this job that you want to include in a master playlist, + // the value of the Outputs:Key object. + // + // If your output is not HLS or does not have a segment duration set, the + // name of the output file is a concatenation of OutputKeyPrefix and Outputs:Key: + // + // OutputKeyPrefixOutputs:Key + // + // If your output is HLSv3 and has a segment duration set, or is not included + // in a playlist, Elastic Transcoder creates an output playlist file with a + // file extension of .m3u8, and a series of .ts files that include a five-digit + // sequential counter beginning with 00000: + // + // OutputKeyPrefixOutputs:Key.m3u8 + // + // OutputKeyPrefixOutputs:Key00000.ts + // + // If your output is HLSv4, has a segment duration set, and is included in + // an HLSv4 playlist, Elastic Transcoder creates an output playlist file with + // a file extension of _v4.m3u8. If the output is video, Elastic Transcoder + // also creates an output file with an extension of _iframe.m3u8: + // + // OutputKeyPrefixOutputs:Key_v4.m3u8 + // + // OutputKeyPrefixOutputs:Key_iframe.m3u8 + // + // OutputKeyPrefixOutputs:Key.ts + // + // Elastic Transcoder automatically appends the relevant file extension to + // the file name. If you include a file extension in Output Key, the file name + // will have two extensions. + // + // If you include more than one output in a playlist, any segment duration + // settings, clip settings, or caption settings must be the same for all outputs + // in the playlist. For Smooth playlists, the Audio:Profile, Video:Profile, + // and Video:FrameRate to Video:KeyframesMaxDist ratio must be the same for + // all outputs. + OutputKeys []*string `type:"list"` + + // The DRM settings, if any, that you want Elastic Transcoder to apply to the + // output files associated with this playlist. + PlayReadyDrm *PlayReadyDrm `type:"structure"` + + // The status of the job with which the playlist is associated. + Status *string `type:"string"` + + // Information that further explains the status. + StatusDetail *string `type:"string"` +} + +// String returns the string representation +func (s Playlist) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Playlist) GoString() string { + return s.String() +} + +// Presets are templates that contain most of the settings for transcoding media +// files from one format to another. Elastic Transcoder includes some default +// presets for common formats, for example, several iPod and iPhone versions. +// You can also create your own presets for formats that aren't included among +// the default presets. You specify which preset you want to use when you create +// a job. +type Preset struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the preset. + Arn *string `type:"string"` + + // A section of the response body that provides information about the audio + // preset values. + Audio *AudioParameters `type:"structure"` + + // The container type for the output file. Valid values include flac, flv, fmp4, + // gif, mp3, mp4, mpg, mxf, oga, ogg, ts, and webm. + Container *string `type:"string"` + + // A description of the preset. + Description *string `type:"string"` + + // Identifier for the new preset. You use this value to get settings for the + // preset or to delete it. + Id *string `type:"string"` + + // The name of the preset. + Name *string `min:"1" type:"string"` + + // A section of the response body that provides information about the thumbnail + // preset values, if any. + Thumbnails *Thumbnails `type:"structure"` + + // Whether the preset is a default preset provided by Elastic Transcoder (System) + // or a preset that you have defined (Custom). + Type *string `type:"string"` + + // A section of the response body that provides information about the video + // preset values. + Video *VideoParameters `type:"structure"` +} + +// String returns the string representation +func (s Preset) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Preset) GoString() string { + return s.String() +} + +// Settings for the size, location, and opacity of graphics that you want Elastic +// Transcoder to overlay over videos that are transcoded using this preset. +// You can specify settings for up to four watermarks. Watermarks appear in +// the specified size and location, and with the specified opacity for the duration +// of the transcoded video. +// +// Watermarks can be in .png or .jpg format. If you want to display a watermark +// that is not rectangular, use the .png format, which supports transparency. +// +// When you create a job that uses this preset, you specify the .png or .jpg +// graphics that you want Elastic Transcoder to include in the transcoded videos. +// You can specify fewer graphics in the job than you specify watermark settings +// in the preset, which allows you to use the same preset for up to four watermarks +// that have different dimensions. +type PresetWatermark struct { + _ struct{} `type:"structure"` + + // The horizontal position of the watermark unless you specify a non-zero value + // for HorizontalOffset: Left: The left edge of the watermark is aligned with + // the left border of the video. Right: The right edge of the watermark is + // aligned with the right border of the video. Center: The watermark is centered + // between the left and right borders. + HorizontalAlign *string `type:"string"` + + // The amount by which you want the horizontal position of the watermark to + // be offset from the position specified by HorizontalAlign: number of pixels + // (px): The minimum value is 0 pixels, and the maximum value is the value of + // MaxWidth. integer percentage (%): The range of valid values is 0 to 100. + // For example, if you specify Left for HorizontalAlign and 5px for HorizontalOffset, + // the left side of the watermark appears 5 pixels from the left border of the + // output video. + // + // HorizontalOffset is only valid when the value of HorizontalAlign is Left + // or Right. If you specify an offset that causes the watermark to extend beyond + // the left or right border and Elastic Transcoder has not added black bars, + // the watermark is cropped. If Elastic Transcoder has added black bars, the + // watermark extends into the black bars. If the watermark extends beyond the + // black bars, it is cropped. + // + // Use the value of Target to specify whether you want to include the black + // bars that are added by Elastic Transcoder, if any, in the offset calculation. + HorizontalOffset *string `type:"string"` + + // A unique identifier for the settings for one watermark. The value of Id can + // be up to 40 characters long. + Id *string `min:"1" type:"string"` + + // The maximum height of the watermark in one of the following formats: number + // of pixels (px): The minimum value is 16 pixels, and the maximum value is + // the value of MaxHeight. integer percentage (%): The range of valid values + // is 0 to 100. Use the value of Target to specify whether you want Elastic + // Transcoder to include the black bars that are added by Elastic Transcoder, + // if any, in the calculation. If you specify the value in pixels, it must + // be less than or equal to the value of MaxHeight. + MaxHeight *string `type:"string"` + + // The maximum width of the watermark in one of the following formats: number + // of pixels (px): The minimum value is 16 pixels, and the maximum value is + // the value of MaxWidth. integer percentage (%): The range of valid values + // is 0 to 100. Use the value of Target to specify whether you want Elastic + // Transcoder to include the black bars that are added by Elastic Transcoder, + // if any, in the calculation. If you specify the value in pixels, it must be + // less than or equal to the value of MaxWidth. + MaxWidth *string `type:"string"` + + // A percentage that indicates how much you want a watermark to obscure the + // video in the location where it appears. Valid values are 0 (the watermark + // is invisible) to 100 (the watermark completely obscures the video in the + // specified location). The datatype of Opacity is float. + // + // Elastic Transcoder supports transparent .png graphics. If you use a transparent + // .png, the transparent portion of the video appears as if you had specified + // a value of 0 for Opacity. The .jpg file format doesn't support transparency. + Opacity *string `type:"string"` + + // A value that controls scaling of the watermark: Fit: Elastic Transcoder + // scales the watermark so it matches the value that you specified in either + // MaxWidth or MaxHeight without exceeding the other value. Stretch: Elastic + // Transcoder stretches the watermark to match the values that you specified + // for MaxWidth and MaxHeight. If the relative proportions of the watermark + // and the values of MaxWidth and MaxHeight are different, the watermark will + // be distorted. ShrinkToFit: Elastic Transcoder scales the watermark down + // so that its dimensions match the values that you specified for at least one + // of MaxWidth and MaxHeight without exceeding either value. If you specify + // this option, Elastic Transcoder does not scale the watermark up. + SizingPolicy *string `type:"string"` + + // A value that determines how Elastic Transcoder interprets values that you + // specified for HorizontalOffset, VerticalOffset, MaxWidth, and MaxHeight: + // Content: HorizontalOffset and VerticalOffset values are calculated based + // on the borders of the video excluding black bars added by Elastic Transcoder, + // if any. In addition, MaxWidth and MaxHeight, if specified as a percentage, + // are calculated based on the borders of the video excluding black bars added + // by Elastic Transcoder, if any. Frame: HorizontalOffset and VerticalOffset + // values are calculated based on the borders of the video including black bars + // added by Elastic Transcoder, if any. In addition, MaxWidth and MaxHeight, + // if specified as a percentage, are calculated based on the borders of the + // video including black bars added by Elastic Transcoder, if any. + Target *string `type:"string"` + + // The vertical position of the watermark unless you specify a non-zero value + // for VerticalOffset: Top: The top edge of the watermark is aligned with + // the top border of the video. Bottom: The bottom edge of the watermark is + // aligned with the bottom border of the video. Center: The watermark is centered + // between the top and bottom borders. + VerticalAlign *string `type:"string"` + + // VerticalOffset The amount by which you want the vertical position of the + // watermark to be offset from the position specified by VerticalAlign: number + // of pixels (px): The minimum value is 0 pixels, and the maximum value is the + // value of MaxHeight. integer percentage (%): The range of valid values is + // 0 to 100. For example, if you specify Top for VerticalAlign and 5px for + // VerticalOffset, the top of the watermark appears 5 pixels from the top border + // of the output video. + // + // VerticalOffset is only valid when the value of VerticalAlign is Top or Bottom. + // + // If you specify an offset that causes the watermark to extend beyond the + // top or bottom border and Elastic Transcoder has not added black bars, the + // watermark is cropped. If Elastic Transcoder has added black bars, the watermark + // extends into the black bars. If the watermark extends beyond the black bars, + // it is cropped. + // + // Use the value of Target to specify whether you want Elastic Transcoder to + // include the black bars that are added by Elastic Transcoder, if any, in the + // offset calculation. + VerticalOffset *string `type:"string"` +} + +// String returns the string representation +func (s PresetWatermark) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PresetWatermark) GoString() string { + return s.String() +} + +// The ReadJobRequest structure. +type ReadJobInput struct { + _ struct{} `type:"structure"` + + // The identifier of the job for which you want to get detailed information. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s ReadJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReadJobInput) GoString() string { + return s.String() +} + +// The ReadJobResponse structure. +type ReadJobOutput struct { + _ struct{} `type:"structure"` + + // A section of the response body that provides information about the job. + Job *Job `type:"structure"` +} + +// String returns the string representation +func (s ReadJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReadJobOutput) GoString() string { + return s.String() +} + +// The ReadPipelineRequest structure. +type ReadPipelineInput struct { + _ struct{} `type:"structure"` + + // The identifier of the pipeline to read. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s ReadPipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReadPipelineInput) GoString() string { + return s.String() +} + +// The ReadPipelineResponse structure. +type ReadPipelineOutput struct { + _ struct{} `type:"structure"` + + // A section of the response body that provides information about the pipeline. + Pipeline *Pipeline `type:"structure"` + + // Elastic Transcoder returns a warning if the resources used by your pipeline + // are not in the same region as the pipeline. + // + // Using resources in the same region, such as your Amazon S3 buckets, Amazon + // SNS notification topics, and AWS KMS key, reduces processing time and prevents + // cross-regional charges. + Warnings []*Warning `type:"list"` +} + +// String returns the string representation +func (s ReadPipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReadPipelineOutput) GoString() string { + return s.String() +} + +// The ReadPresetRequest structure. +type ReadPresetInput struct { + _ struct{} `type:"structure"` + + // The identifier of the preset for which you want to get detailed information. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s ReadPresetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReadPresetInput) GoString() string { + return s.String() +} + +// The ReadPresetResponse structure. +type ReadPresetOutput struct { + _ struct{} `type:"structure"` + + // A section of the response body that provides information about the preset. + Preset *Preset `type:"structure"` +} + +// String returns the string representation +func (s ReadPresetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReadPresetOutput) GoString() string { + return s.String() +} + +// The TestRoleRequest structure. +type TestRoleInput struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket that contains media files to be transcoded. The action + // attempts to read from this bucket. + InputBucket *string `type:"string" required:"true"` + + // The Amazon S3 bucket that Elastic Transcoder will write transcoded media + // files to. The action attempts to read from this bucket. + OutputBucket *string `type:"string" required:"true"` + + // The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder + // to test. + Role *string `type:"string" required:"true"` + + // The ARNs of one or more Amazon Simple Notification Service (Amazon SNS) topics + // that you want the action to send a test notification to. + Topics []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s TestRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestRoleInput) GoString() string { + return s.String() +} + +// The TestRoleResponse structure. +type TestRoleOutput struct { + _ struct{} `type:"structure"` + + // If the Success element contains false, this value is an array of one or more + // error messages that were generated during the test process. + Messages []*string `type:"list"` + + // If the operation is successful, this value is true; otherwise, the value + // is false. + Success *string `type:"string"` +} + +// String returns the string representation +func (s TestRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestRoleOutput) GoString() string { + return s.String() +} + +// Thumbnails for videos. +type Thumbnails struct { + _ struct{} `type:"structure"` + + // To better control resolution and aspect ratio of thumbnails, we recommend + // that you use the values MaxWidth, MaxHeight, SizingPolicy, and PaddingPolicy + // instead of Resolution and AspectRatio. The two groups of settings are mutually + // exclusive. Do not use them together. + // + // The aspect ratio of thumbnails. Valid values include: + // + // auto, 1:1, 4:3, 3:2, 16:9 + // + // If you specify auto, Elastic Transcoder tries to preserve the aspect ratio + // of the video in the output file. + AspectRatio *string `type:"string"` + + // The format of thumbnails, if any. Valid values are jpg and png. + // + // You specify whether you want Elastic Transcoder to create thumbnails when + // you create a job. + Format *string `type:"string"` + + // The approximate number of seconds between thumbnails. Specify an integer + // value. + Interval *string `type:"string"` + + // The maximum height of thumbnails in pixels. If you specify auto, Elastic + // Transcoder uses 1080 (Full HD) as the default value. If you specify a numeric + // value, enter an even integer between 32 and 3072. + MaxHeight *string `type:"string"` + + // The maximum width of thumbnails in pixels. If you specify auto, Elastic Transcoder + // uses 1920 (Full HD) as the default value. If you specify a numeric value, + // enter an even integer between 32 and 4096. + MaxWidth *string `type:"string"` + + // When you set PaddingPolicy to Pad, Elastic Transcoder may add black bars + // to the top and bottom and/or left and right sides of thumbnails to make the + // total size of the thumbnails match the values that you specified for thumbnail + // MaxWidth and MaxHeight settings. + PaddingPolicy *string `type:"string"` + + // To better control resolution and aspect ratio of thumbnails, we recommend + // that you use the values MaxWidth, MaxHeight, SizingPolicy, and PaddingPolicy + // instead of Resolution and AspectRatio. The two groups of settings are mutually + // exclusive. Do not use them together. + // + // The width and height of thumbnail files in pixels. Specify a value in the + // format width x height where both values are even integers. The values cannot + // exceed the width and height that you specified in the Video:Resolution object. + Resolution *string `type:"string"` + + // Specify one of the following values to control scaling of thumbnails: + // + // Fit: Elastic Transcoder scales thumbnails so they match the value that + // you specified in thumbnail MaxWidth or MaxHeight settings without exceeding + // the other value. Fill: Elastic Transcoder scales thumbnails so they match + // the value that you specified in thumbnail MaxWidth or MaxHeight settings + // and matches or exceeds the other value. Elastic Transcoder centers the image + // in thumbnails and then crops in the dimension (if any) that exceeds the maximum + // value. Stretch: Elastic Transcoder stretches thumbnails to match the values + // that you specified for thumbnail MaxWidth and MaxHeight settings. If the + // relative proportions of the input video and thumbnails are different, the + // thumbnails will be distorted. Keep: Elastic Transcoder does not scale thumbnails. + // If either dimension of the input video exceeds the values that you specified + // for thumbnail MaxWidth and MaxHeight settings, Elastic Transcoder crops the + // thumbnails. ShrinkToFit: Elastic Transcoder scales thumbnails down so that + // their dimensions match the values that you specified for at least one of + // thumbnail MaxWidth and MaxHeight without exceeding either value. If you specify + // this option, Elastic Transcoder does not scale thumbnails up. ShrinkToFill: + // Elastic Transcoder scales thumbnails down so that their dimensions match + // the values that you specified for at least one of MaxWidth and MaxHeight + // without dropping below either value. If you specify this option, Elastic + // Transcoder does not scale thumbnails up. + SizingPolicy *string `type:"string"` +} + +// String returns the string representation +func (s Thumbnails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Thumbnails) GoString() string { + return s.String() +} + +// Settings that determine when a clip begins and how long it lasts. +type TimeSpan struct { + _ struct{} `type:"structure"` + + // The duration of the clip. The format can be either HH:mm:ss.SSS (maximum + // value: 23:59:59.999; SSS is thousandths of a second) or sssss.SSS (maximum + // value: 86399.999). If you don't specify a value, Elastic Transcoder creates + // an output file from StartTime to the end of the file. + // + // If you specify a value longer than the duration of the input file, Elastic + // Transcoder transcodes the file and returns a warning message. + Duration *string `type:"string"` + + // The place in the input file where you want a clip to start. The format can + // be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS is thousandths of + // a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a + // value, Elastic Transcoder starts at the beginning of the input file. + StartTime *string `type:"string"` +} + +// String returns the string representation +func (s TimeSpan) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimeSpan) GoString() string { + return s.String() +} + +// Details about the timing of a job. +type Timing struct { + _ struct{} `type:"structure"` + + // The time the job finished transcoding, in epoch milliseconds. + FinishTimeMillis *int64 `type:"long"` + + // The time the job began transcoding, in epoch milliseconds. + StartTimeMillis *int64 `type:"long"` + + // The time the job was submitted to Elastic Transcoder, in epoch milliseconds. + SubmitTimeMillis *int64 `type:"long"` +} + +// String returns the string representation +func (s Timing) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Timing) GoString() string { + return s.String() +} + +// The UpdatePipelineRequest structure. +type UpdatePipelineInput struct { + _ struct{} `type:"structure"` + + // The AWS Key Management Service (AWS KMS) key that you want to use with this + // pipeline. + // + // If you use either S3 or S3-AWS-KMS as your Encryption:Mode, you don't need + // to provide a key with your job because a default key, known as an AWS-KMS + // key, is created for you automatically. You need to provide an AWS-KMS key + // only if you want to use a non-default AWS-KMS key, or if you are using an + // Encryption:Mode of AES-PKCS7, AES-CTR, or AES-GCM. + AwsKmsKeyArn *string `type:"string"` + + // The optional ContentConfig object specifies information about the Amazon + // S3 bucket in which you want Elastic Transcoder to save transcoded files and + // playlists: which bucket to use, which users you want to have access to the + // files, the type of access you want users to have, and the storage class that + // you want to assign to the files. + // + // If you specify values for ContentConfig, you must also specify values for + // ThumbnailConfig. + // + // If you specify values for ContentConfig and ThumbnailConfig, omit the OutputBucket + // object. + // + // Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save + // transcoded files and playlists. Permissions (Optional): The Permissions + // object specifies which users you want to have access to transcoded files + // and the type of access you want them to have. You can grant permissions to + // a maximum of 30 users and/or predefined Amazon S3 groups. Grantee Type: + // Specify the type of value that appears in the Grantee object: Canonical: + // The value in the Grantee object is either the canonical user ID for an AWS + // account or an origin access identity for an Amazon CloudFront distribution. + // For more information about canonical user IDs, see Access Control List (ACL) + // Overview in the Amazon Simple Storage Service Developer Guide. For more information + // about using CloudFront origin access identities to require that users use + // CloudFront URLs instead of Amazon S3 URLs, see Using an Origin Access Identity + // to Restrict Access to Your Amazon S3 Content. A canonical user ID is not + // the same as an AWS account number. Email: The value in the Grantee object + // is the registered email address of an AWS account. Group: The value in the + // Grantee object is one of the following predefined Amazon S3 groups: AllUsers, + // AuthenticatedUsers, or LogDelivery. Grantee: The AWS user or group that + // you want to have access to transcoded files and playlists. To identify the + // user or group, you can specify the canonical user ID for an AWS account, + // an origin access identity for a CloudFront distribution, the registered email + // address of an AWS account, or a predefined Amazon S3 group Access: The + // permission that you want to give to the AWS user that you specified in Grantee. + // Permissions are granted on the files that Elastic Transcoder adds to the + // bucket, including playlists and video files. Valid values include: READ: + // The grantee can read the objects and metadata for objects that Elastic Transcoder + // adds to the Amazon S3 bucket. READ_ACP: The grantee can read the object + // ACL for objects that Elastic Transcoder adds to the Amazon S3 bucket. WRITE_ACP: + // The grantee can write the ACL for the objects that Elastic Transcoder adds + // to the Amazon S3 bucket. FULL_CONTROL: The grantee has READ, READ_ACP, and + // WRITE_ACP permissions for the objects that Elastic Transcoder adds to the + // Amazon S3 bucket. StorageClass: The Amazon S3 storage class, Standard + // or ReducedRedundancy, that you want Elastic Transcoder to assign to the video + // files and playlists that it stores in your Amazon S3 bucket. + ContentConfig *PipelineOutputConfig `type:"structure"` + + // The ID of the pipeline that you want to update. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The Amazon S3 bucket in which you saved the media files that you want to + // transcode and the graphics that you want to use as watermarks. + InputBucket *string `type:"string"` + + // The name of the pipeline. We recommend that the name be unique within the + // AWS account, but uniqueness is not enforced. + // + // Constraints: Maximum 40 characters + Name *string `min:"1" type:"string"` + + // The Amazon Simple Notification Service (Amazon SNS) topic or topics to notify + // in order to report job status. + // + // To receive notifications, you must also subscribe to the new topic in the + // Amazon SNS console. + Notifications *Notifications `type:"structure"` + + // The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder + // to use to transcode jobs for this pipeline. + Role *string `type:"string"` + + // The ThumbnailConfig object specifies several values, including the Amazon + // S3 bucket in which you want Elastic Transcoder to save thumbnail files, which + // users you want to have access to the files, the type of access you want users + // to have, and the storage class that you want to assign to the files. + // + // If you specify values for ContentConfig, you must also specify values for + // ThumbnailConfig even if you don't want to create thumbnails. + // + // If you specify values for ContentConfig and ThumbnailConfig, omit the OutputBucket + // object. + // + // Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save + // thumbnail files. Permissions (Optional): The Permissions object specifies + // which users and/or predefined Amazon S3 groups you want to have access to + // thumbnail files, and the type of access you want them to have. You can grant + // permissions to a maximum of 30 users and/or predefined Amazon S3 groups. + // GranteeType: Specify the type of value that appears in the Grantee object: + // Canonical: The value in the Grantee object is either the canonical user + // ID for an AWS account or an origin access identity for an Amazon CloudFront + // distribution. A canonical user ID is not the same as an AWS account number. + // Email: The value in the Grantee object is the registered email address + // of an AWS account. Group: The value in the Grantee object is one of the + // following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery. + // Grantee: The AWS user or group that you want to have access to thumbnail + // files. To identify the user or group, you can specify the canonical user + // ID for an AWS account, an origin access identity for a CloudFront distribution, + // the registered email address of an AWS account, or a predefined Amazon S3 + // group. Access: The permission that you want to give to the AWS user that + // you specified in Grantee. Permissions are granted on the thumbnail files + // that Elastic Transcoder adds to the bucket. Valid values include: READ: + // The grantee can read the thumbnails and metadata for objects that Elastic + // Transcoder adds to the Amazon S3 bucket. READ_ACP: The grantee can read + // the object ACL for thumbnails that Elastic Transcoder adds to the Amazon + // S3 bucket. WRITE_ACP: The grantee can write the ACL for the thumbnails + // that Elastic Transcoder adds to the Amazon S3 bucket. FULL_CONTROL: The + // grantee has READ, READ_ACP, and WRITE_ACP permissions for the thumbnails + // that Elastic Transcoder adds to the Amazon S3 bucket. StorageClass: The + // Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic + // Transcoder to assign to the thumbnails that it stores in your Amazon S3 bucket. + ThumbnailConfig *PipelineOutputConfig `type:"structure"` +} + +// String returns the string representation +func (s UpdatePipelineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdatePipelineInput) GoString() string { + return s.String() +} + +// The UpdatePipelineNotificationsRequest structure. +type UpdatePipelineNotificationsInput struct { + _ struct{} `type:"structure"` + + // The identifier of the pipeline for which you want to change notification + // settings. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic + // that you want to notify to report job status. + // + // To receive notifications, you must also subscribe to the new topic in the + // Amazon SNS console. Progressing: The topic ARN for the Amazon Simple Notification + // Service (Amazon SNS) topic that you want to notify when Elastic Transcoder + // has started to process jobs that are added to this pipeline. This is the + // ARN that Amazon SNS returned when you created the topic. Completed: The + // topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder + // has finished processing a job. This is the ARN that Amazon SNS returned when + // you created the topic. Warning: The topic ARN for the Amazon SNS topic that + // you want to notify when Elastic Transcoder encounters a warning condition. + // This is the ARN that Amazon SNS returned when you created the topic. Error: + // The topic ARN for the Amazon SNS topic that you want to notify when Elastic + // Transcoder encounters an error condition. This is the ARN that Amazon SNS + // returned when you created the topic. + Notifications *Notifications `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdatePipelineNotificationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdatePipelineNotificationsInput) GoString() string { + return s.String() +} + +// The UpdatePipelineNotificationsResponse structure. +type UpdatePipelineNotificationsOutput struct { + _ struct{} `type:"structure"` + + // A section of the response body that provides information about the pipeline. + Pipeline *Pipeline `type:"structure"` +} + +// String returns the string representation +func (s UpdatePipelineNotificationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdatePipelineNotificationsOutput) GoString() string { + return s.String() +} + +// When you update a pipeline, Elastic Transcoder returns the values that you +// specified in the request. +type UpdatePipelineOutput struct { + _ struct{} `type:"structure"` + + // The pipeline (queue) that is used to manage jobs. + Pipeline *Pipeline `type:"structure"` + + // Elastic Transcoder returns a warning if the resources used by your pipeline + // are not in the same region as the pipeline. + // + // Using resources in the same region, such as your Amazon S3 buckets, Amazon + // SNS notification topics, and AWS KMS key, reduces processing time and prevents + // cross-regional charges. + Warnings []*Warning `type:"list"` +} + +// String returns the string representation +func (s UpdatePipelineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdatePipelineOutput) GoString() string { + return s.String() +} + +// The UpdatePipelineStatusRequest structure. +type UpdatePipelineStatusInput struct { + _ struct{} `type:"structure"` + + // The identifier of the pipeline to update. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The desired status of the pipeline: + // + // Active: The pipeline is processing jobs. Paused: The pipeline is not + // currently processing jobs. + Status *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdatePipelineStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdatePipelineStatusInput) GoString() string { + return s.String() +} + +// When you update status for a pipeline, Elastic Transcoder returns the values +// that you specified in the request. +type UpdatePipelineStatusOutput struct { + _ struct{} `type:"structure"` + + // A section of the response body that provides information about the pipeline. + Pipeline *Pipeline `type:"structure"` +} + +// String returns the string representation +func (s UpdatePipelineStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdatePipelineStatusOutput) GoString() string { + return s.String() +} + +// The VideoParameters structure. +type VideoParameters struct { + _ struct{} `type:"structure"` + + // To better control resolution and aspect ratio of output videos, we recommend + // that you use the values MaxWidth, MaxHeight, SizingPolicy, PaddingPolicy, + // and DisplayAspectRatio instead of Resolution and AspectRatio. The two groups + // of settings are mutually exclusive. Do not use them together. + // + // The display aspect ratio of the video in the output file. Valid values + // include: + // + // auto, 1:1, 4:3, 3:2, 16:9 + // + // If you specify auto, Elastic Transcoder tries to preserve the aspect ratio + // of the input file. + // + // If you specify an aspect ratio for the output file that differs from aspect + // ratio of the input file, Elastic Transcoder adds pillarboxing (black bars + // on the sides) or letterboxing (black bars on the top and bottom) to maintain + // the aspect ratio of the active region of the video. + AspectRatio *string `type:"string"` + + // The bit rate of the video stream in the output file, in kilobits/second. + // Valid values depend on the values of Level and Profile. If you specify auto, + // Elastic Transcoder uses the detected bit rate of the input source. If you + // specify a value other than auto, we recommend that you specify a value less + // than or equal to the maximum H.264-compliant value listed for your level + // and profile: + // + // Level - Maximum video bit rate in kilobits/second (baseline and main Profile) + // : maximum video bit rate in kilobits/second (high Profile) + // + // 1 - 64 : 80 1b - 128 : 160 1.1 - 192 : 240 1.2 - 384 : 480 1.3 - 768 : + // 960 2 - 2000 : 2500 3 - 10000 : 12500 3.1 - 14000 : 17500 3.2 - 20000 : 25000 + // 4 - 20000 : 25000 4.1 - 50000 : 62500 + BitRate *string `type:"string"` + + // The video codec for the output file. Valid values include gif, H.264, mpeg2, + // and vp8. You can only specify vp8 when the container type is webm, gif when + // the container type is gif, and mpeg2 when the container type is mpg. + Codec *string `type:"string"` + + // Profile (H.264/VP8 Only) + // + // The H.264 profile that you want to use for the output file. Elastic Transcoder + // supports the following profiles: + // + // baseline: The profile most commonly used for videoconferencing and for + // mobile applications. main: The profile used for standard-definition digital + // TV broadcasts. high: The profile used for high-definition digital TV broadcasts + // and for Blu-ray discs. Level (H.264 Only) + // + // The H.264 level that you want to use for the output file. Elastic Transcoder + // supports the following levels: + // + // 1, 1b, 1.1, 1.2, 1.3, 2, 2.1, 2.2, 3, 3.1, 3.2, 4, 4.1 + // + // MaxReferenceFrames (H.264 Only) + // + // Applicable only when the value of Video:Codec is H.264. The maximum number + // of previously decoded frames to use as a reference for decoding future frames. + // Valid values are integers 0 through 16, but we recommend that you not use + // a value greater than the following: + // + // Min(Floor(Maximum decoded picture buffer in macroblocks * 256 / (Width + // in pixels * Height in pixels)), 16) + // + // where Width in pixels and Height in pixels represent either MaxWidth and + // MaxHeight, or Resolution. Maximum decoded picture buffer in macroblocks depends + // on the value of the Level object. See the list below. (A macroblock is a + // block of pixels measuring 16x16.) + // + // 1 - 396 1b - 396 1.1 - 900 1.2 - 2376 1.3 - 2376 2 - 2376 2.1 - 4752 2.2 + // - 8100 3 - 8100 3.1 - 18000 3.2 - 20480 4 - 32768 4.1 - 32768 MaxBitRate + // (Optional, H.264/MPEG2/VP8 only) + // + // The maximum number of bits per second in a video buffer; the size of the + // buffer is specified by BufferSize. Specify a value between 16 and 62,500. + // You can reduce the bandwidth required to stream a video by reducing the maximum + // bit rate, but this also reduces the quality of the video. + // + // BufferSize (Optional, H.264/MPEG2/VP8 only) + // + // The maximum number of bits in any x seconds of the output video. This window + // is commonly 10 seconds, the standard segment duration when you're using FMP4 + // or MPEG-TS for the container type of the output video. Specify an integer + // greater than 0. If you specify MaxBitRate and omit BufferSize, Elastic Transcoder + // sets BufferSize to 10 times the value of MaxBitRate. + // + // InterlacedMode (Optional, H.264/MPEG2 Only) + // + // The interlace mode for the output video. + // + // Interlaced video is used to double the perceived frame rate for a video + // by interlacing two fields (one field on every other line, the other field + // on the other lines) so that the human eye registers multiple pictures per + // frame. Interlacing reduces the bandwidth required for transmitting a video, + // but can result in blurred images and flickering. + // + // Valid values include Progressive (no interlacing, top to bottom), TopFirst + // (top field first), BottomFirst (bottom field first), and Auto. + // + // If InterlaceMode is not specified, Elastic Transcoder uses Progressive for + // the output. If Auto is specified, Elastic Transcoder interlaces the output. + // + // ColorSpaceConversionMode (Optional, H.264/MPEG2 Only) + // + // The color space conversion Elastic Transcoder applies to the output video. + // Color spaces are the algorithms used by the computer to store information + // about how to render color. Bt.601 is the standard for standard definition + // video, while Bt.709 is the standard for high definition video. + // + // Valid values include None, Bt709toBt601, Bt601toBt709, and Auto. + // + // If you chose Auto for ColorSpaceConversionMode and your output is interlaced, + // your frame rate is one of 23.97, 24, 25, 29.97, 50, or 60, your SegmentDuration + // is null, and you are using one of the resolution changes from the list below, + // Elastic Transcoder applies the following color space conversions: + // + // Standard to HD, 720x480 to 1920x1080 - Elastic Transcoder applies Bt601ToBt709 + // Standard to HD, 720x576 to 1920x1080 - Elastic Transcoder applies Bt601ToBt709 + // HD to Standard, 1920x1080 to 720x480 - Elastic Transcoder applies Bt709ToBt601 + // HD to Standard, 1920x1080 to 720x576 - Elastic Transcoder applies Bt709ToBt601 + // Elastic Transcoder may change the behavior of the ColorspaceConversionMode + // Auto mode in the future. All outputs in a playlist must use the same ColorSpaceConversionMode. + // If you do not specify a ColorSpaceConversionMode, Elastic Transcoder does + // not change the color space of a file. If you are unsure what ColorSpaceConversionMode + // was applied to your output file, you can check the AppliedColorSpaceConversion + // parameter included in your job response. If your job does not have an AppliedColorSpaceConversion + // in its response, no ColorSpaceConversionMode was applied. + // + // ChromaSubsampling + // + // The sampling pattern for the chroma (color) channels of the output video. + // Valid values include yuv420p and yuv422p. + // + // yuv420p samples the chroma information of every other horizontal and every + // other vertical line, yuv422p samples the color information of every horizontal + // line and every other vertical line. + // + // LoopCount (Gif Only) + // + // The number of times you want the output gif to loop. Valid values include + // Infinite and integers between 0 and 100, inclusive. + CodecOptions map[string]*string `type:"map"` + + // The value that Elastic Transcoder adds to the metadata in the output file. + DisplayAspectRatio *string `type:"string"` + + // Applicable only when the value of Video:Codec is one of H.264, MPEG2, or + // VP8. + // + // Whether to use a fixed value for FixedGOP. Valid values are true and false: + // + // true: Elastic Transcoder uses the value of KeyframesMaxDist for the distance + // between key frames (the number of frames in a group of pictures, or GOP). + // false: The distance between key frames can vary. FixedGOP must be set to + // true for fmp4 containers. + FixedGOP *string `type:"string"` + + // The frames per second for the video stream in the output file. Valid values + // include: + // + // auto, 10, 15, 23.97, 24, 25, 29.97, 30, 60 + // + // If you specify auto, Elastic Transcoder uses the detected frame rate of + // the input source. If you specify a frame rate, we recommend that you perform + // the following calculation: + // + // Frame rate = maximum recommended decoding speed in luma samples/second + // / (width in pixels * height in pixels) + // + // where: + // + // width in pixels and height in pixels represent the Resolution of the output + // video. maximum recommended decoding speed in Luma samples/second is less + // than or equal to the maximum value listed in the following table, based on + // the value that you specified for Level. The maximum recommended decoding + // speed in Luma samples/second for each level is described in the following + // list (Level - Decoding speed): + // + // 1 - 380160 1b - 380160 1.1 - 76800 1.2 - 1536000 1.3 - 3041280 2 - 3041280 + // 2.1 - 5068800 2.2 - 5184000 3 - 10368000 3.1 - 27648000 3.2 - 55296000 4 + // - 62914560 4.1 - 62914560 + FrameRate *string `type:"string"` + + // Applicable only when the value of Video:Codec is one of H.264, MPEG2, or + // VP8. + // + // The maximum number of frames between key frames. Key frames are fully encoded + // frames; the frames between key frames are encoded based, in part, on the + // content of the key frames. The value is an integer formatted as a string; + // valid values are between 1 (every frame is a key frame) and 100000, inclusive. + // A higher value results in higher compression but may also discernibly decrease + // video quality. + // + // For Smooth outputs, the FrameRate must have a constant ratio to the KeyframesMaxDist. + // This allows Smooth playlists to switch between different quality levels while + // the file is being played. + // + // For example, an input file can have a FrameRate of 30 with a KeyframesMaxDist + // of 90. The output file then needs to have a ratio of 1:3. Valid outputs would + // have FrameRate of 30, 25, and 10, and KeyframesMaxDist of 90, 75, and 30, + // respectively. + // + // Alternately, this can be achieved by setting FrameRate to auto and having + // the same values for MaxFrameRate and KeyframesMaxDist. + KeyframesMaxDist *string `type:"string"` + + // If you specify auto for FrameRate, Elastic Transcoder uses the frame rate + // of the input video for the frame rate of the output video. Specify the maximum + // frame rate that you want Elastic Transcoder to use when the frame rate of + // the input video is greater than the desired maximum frame rate of the output + // video. Valid values include: 10, 15, 23.97, 24, 25, 29.97, 30, 60. + MaxFrameRate *string `type:"string"` + + // The maximum height of the output video in pixels. If you specify auto, Elastic + // Transcoder uses 1080 (Full HD) as the default value. If you specify a numeric + // value, enter an even integer between 96 and 3072. + MaxHeight *string `type:"string"` + + // The maximum width of the output video in pixels. If you specify auto, Elastic + // Transcoder uses 1920 (Full HD) as the default value. If you specify a numeric + // value, enter an even integer between 128 and 4096. + MaxWidth *string `type:"string"` + + // When you set PaddingPolicy to Pad, Elastic Transcoder may add black bars + // to the top and bottom and/or left and right sides of the output video to + // make the total size of the output video match the values that you specified + // for MaxWidth and MaxHeight. + PaddingPolicy *string `type:"string"` + + // To better control resolution and aspect ratio of output videos, we recommend + // that you use the values MaxWidth, MaxHeight, SizingPolicy, PaddingPolicy, + // and DisplayAspectRatio instead of Resolution and AspectRatio. The two groups + // of settings are mutually exclusive. Do not use them together. + // + // The width and height of the video in the output file, in pixels. Valid + // values are auto and width x height: + // + // auto: Elastic Transcoder attempts to preserve the width and height of + // the input file, subject to the following rules. width x height: The width + // and height of the output video in pixels. Note the following about specifying + // the width and height: + // + // The width must be an even integer between 128 and 4096, inclusive. The + // height must be an even integer between 96 and 3072, inclusive. If you specify + // a resolution that is less than the resolution of the input file, Elastic + // Transcoder rescales the output file to the lower resolution. If you specify + // a resolution that is greater than the resolution of the input file, Elastic + // Transcoder rescales the output to the higher resolution. We recommend that + // you specify a resolution for which the product of width and height is less + // than or equal to the applicable value in the following list (List - Max width + // x height value): 1 - 25344 1b - 25344 1.1 - 101376 1.2 - 101376 1.3 - 101376 + // 2 - 101376 2.1 - 202752 2.2 - 404720 3 - 404720 3.1 - 921600 3.2 - 1310720 + // 4 - 2097152 4.1 - 2097152 + Resolution *string `type:"string"` + + // Specify one of the following values to control scaling of the output video: + // + // Fit: Elastic Transcoder scales the output video so it matches the value + // that you specified in either MaxWidth or MaxHeight without exceeding the + // other value. Fill: Elastic Transcoder scales the output video so it matches + // the value that you specified in either MaxWidth or MaxHeight and matches + // or exceeds the other value. Elastic Transcoder centers the output video and + // then crops it in the dimension (if any) that exceeds the maximum value. + // Stretch: Elastic Transcoder stretches the output video to match the values + // that you specified for MaxWidth and MaxHeight. If the relative proportions + // of the input video and the output video are different, the output video will + // be distorted. Keep: Elastic Transcoder does not scale the output video. + // If either dimension of the input video exceeds the values that you specified + // for MaxWidth and MaxHeight, Elastic Transcoder crops the output video. ShrinkToFit: + // Elastic Transcoder scales the output video down so that its dimensions match + // the values that you specified for at least one of MaxWidth and MaxHeight + // without exceeding either value. If you specify this option, Elastic Transcoder + // does not scale the video up. ShrinkToFill: Elastic Transcoder scales the + // output video down so that its dimensions match the values that you specified + // for at least one of MaxWidth and MaxHeight without dropping below either + // value. If you specify this option, Elastic Transcoder does not scale the + // video up. + SizingPolicy *string `type:"string"` + + // Settings for the size, location, and opacity of graphics that you want Elastic + // Transcoder to overlay over videos that are transcoded using this preset. + // You can specify settings for up to four watermarks. Watermarks appear in + // the specified size and location, and with the specified opacity for the duration + // of the transcoded video. + // + // Watermarks can be in .png or .jpg format. If you want to display a watermark + // that is not rectangular, use the .png format, which supports transparency. + // + // When you create a job that uses this preset, you specify the .png or .jpg + // graphics that you want Elastic Transcoder to include in the transcoded videos. + // You can specify fewer graphics in the job than you specify watermark settings + // in the preset, which allows you to use the same preset for up to four watermarks + // that have different dimensions. + Watermarks []*PresetWatermark `type:"list"` +} + +// String returns the string representation +func (s VideoParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VideoParameters) GoString() string { + return s.String() +} + +// Elastic Transcoder returns a warning if the resources used by your pipeline +// are not in the same region as the pipeline. +// +// Using resources in the same region, such as your Amazon S3 buckets, Amazon +// SNS notification topics, and AWS KMS key, reduces processing time and prevents +// cross-regional charges. +type Warning struct { + _ struct{} `type:"structure"` + + // The code of the cross-regional warning. + Code *string `type:"string"` + + // The message explaining what resources are in a different region from the + // pipeline. + // + // Note: AWS KMS keys must be in the same region as the pipeline. + Message *string `type:"string"` +} + +// String returns the string representation +func (s Warning) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Warning) GoString() string { + return s.String() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elastictranscoder/elastictranscoderiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elastictranscoder/elastictranscoderiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elastictranscoder/elastictranscoderiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elastictranscoder/elastictranscoderiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,90 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package elastictranscoderiface provides an interface for the Amazon Elastic Transcoder. +package elastictranscoderiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/elastictranscoder" +) + +// ElasticTranscoderAPI is the interface type for elastictranscoder.ElasticTranscoder. +type ElasticTranscoderAPI interface { + CancelJobRequest(*elastictranscoder.CancelJobInput) (*request.Request, *elastictranscoder.CancelJobOutput) + + CancelJob(*elastictranscoder.CancelJobInput) (*elastictranscoder.CancelJobOutput, error) + + CreateJobRequest(*elastictranscoder.CreateJobInput) (*request.Request, *elastictranscoder.CreateJobResponse) + + CreateJob(*elastictranscoder.CreateJobInput) (*elastictranscoder.CreateJobResponse, error) + + CreatePipelineRequest(*elastictranscoder.CreatePipelineInput) (*request.Request, *elastictranscoder.CreatePipelineOutput) + + CreatePipeline(*elastictranscoder.CreatePipelineInput) (*elastictranscoder.CreatePipelineOutput, error) + + CreatePresetRequest(*elastictranscoder.CreatePresetInput) (*request.Request, *elastictranscoder.CreatePresetOutput) + + CreatePreset(*elastictranscoder.CreatePresetInput) (*elastictranscoder.CreatePresetOutput, error) + + DeletePipelineRequest(*elastictranscoder.DeletePipelineInput) (*request.Request, *elastictranscoder.DeletePipelineOutput) + + DeletePipeline(*elastictranscoder.DeletePipelineInput) (*elastictranscoder.DeletePipelineOutput, error) + + DeletePresetRequest(*elastictranscoder.DeletePresetInput) (*request.Request, *elastictranscoder.DeletePresetOutput) + + DeletePreset(*elastictranscoder.DeletePresetInput) (*elastictranscoder.DeletePresetOutput, error) + + ListJobsByPipelineRequest(*elastictranscoder.ListJobsByPipelineInput) (*request.Request, *elastictranscoder.ListJobsByPipelineOutput) + + ListJobsByPipeline(*elastictranscoder.ListJobsByPipelineInput) (*elastictranscoder.ListJobsByPipelineOutput, error) + + ListJobsByPipelinePages(*elastictranscoder.ListJobsByPipelineInput, func(*elastictranscoder.ListJobsByPipelineOutput, bool) bool) error + + ListJobsByStatusRequest(*elastictranscoder.ListJobsByStatusInput) (*request.Request, *elastictranscoder.ListJobsByStatusOutput) + + ListJobsByStatus(*elastictranscoder.ListJobsByStatusInput) (*elastictranscoder.ListJobsByStatusOutput, error) + + ListJobsByStatusPages(*elastictranscoder.ListJobsByStatusInput, func(*elastictranscoder.ListJobsByStatusOutput, bool) bool) error + + ListPipelinesRequest(*elastictranscoder.ListPipelinesInput) (*request.Request, *elastictranscoder.ListPipelinesOutput) + + ListPipelines(*elastictranscoder.ListPipelinesInput) (*elastictranscoder.ListPipelinesOutput, error) + + ListPipelinesPages(*elastictranscoder.ListPipelinesInput, func(*elastictranscoder.ListPipelinesOutput, bool) bool) error + + ListPresetsRequest(*elastictranscoder.ListPresetsInput) (*request.Request, *elastictranscoder.ListPresetsOutput) + + ListPresets(*elastictranscoder.ListPresetsInput) (*elastictranscoder.ListPresetsOutput, error) + + ListPresetsPages(*elastictranscoder.ListPresetsInput, func(*elastictranscoder.ListPresetsOutput, bool) bool) error + + ReadJobRequest(*elastictranscoder.ReadJobInput) (*request.Request, *elastictranscoder.ReadJobOutput) + + ReadJob(*elastictranscoder.ReadJobInput) (*elastictranscoder.ReadJobOutput, error) + + ReadPipelineRequest(*elastictranscoder.ReadPipelineInput) (*request.Request, *elastictranscoder.ReadPipelineOutput) + + ReadPipeline(*elastictranscoder.ReadPipelineInput) (*elastictranscoder.ReadPipelineOutput, error) + + ReadPresetRequest(*elastictranscoder.ReadPresetInput) (*request.Request, *elastictranscoder.ReadPresetOutput) + + ReadPreset(*elastictranscoder.ReadPresetInput) (*elastictranscoder.ReadPresetOutput, error) + + TestRoleRequest(*elastictranscoder.TestRoleInput) (*request.Request, *elastictranscoder.TestRoleOutput) + + TestRole(*elastictranscoder.TestRoleInput) (*elastictranscoder.TestRoleOutput, error) + + UpdatePipelineRequest(*elastictranscoder.UpdatePipelineInput) (*request.Request, *elastictranscoder.UpdatePipelineOutput) + + UpdatePipeline(*elastictranscoder.UpdatePipelineInput) (*elastictranscoder.UpdatePipelineOutput, error) + + UpdatePipelineNotificationsRequest(*elastictranscoder.UpdatePipelineNotificationsInput) (*request.Request, *elastictranscoder.UpdatePipelineNotificationsOutput) + + UpdatePipelineNotifications(*elastictranscoder.UpdatePipelineNotificationsInput) (*elastictranscoder.UpdatePipelineNotificationsOutput, error) + + UpdatePipelineStatusRequest(*elastictranscoder.UpdatePipelineStatusInput) (*request.Request, *elastictranscoder.UpdatePipelineStatusOutput) + + UpdatePipelineStatus(*elastictranscoder.UpdatePipelineStatusInput) (*elastictranscoder.UpdatePipelineStatusOutput, error) +} + +var _ ElasticTranscoderAPI = (*elastictranscoder.ElasticTranscoder)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elastictranscoder/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elastictranscoder/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elastictranscoder/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elastictranscoder/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,737 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elastictranscoder_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/elastictranscoder" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleElasticTranscoder_CancelJob() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.CancelJobInput{ + Id: aws.String("Id"), // Required + } + resp, err := svc.CancelJob(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_CreateJob() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.CreateJobInput{ + Input: &elastictranscoder.JobInput{ // Required + AspectRatio: aws.String("AspectRatio"), + Container: aws.String("JobContainer"), + DetectedProperties: &elastictranscoder.DetectedProperties{ + DurationMillis: aws.Int64(1), + FileSize: aws.Int64(1), + FrameRate: aws.String("FloatString"), + Height: aws.Int64(1), + Width: aws.Int64(1), + }, + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + FrameRate: aws.String("FrameRate"), + Interlaced: aws.String("Interlaced"), + Key: aws.String("Key"), + Resolution: aws.String("Resolution"), + }, + PipelineId: aws.String("Id"), // Required + Output: &elastictranscoder.CreateJobOutput{ + AlbumArt: &elastictranscoder.JobAlbumArt{ + Artwork: []*elastictranscoder.Artwork{ + { // Required + AlbumArtFormat: aws.String("JpgOrPng"), + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + InputKey: aws.String("WatermarkKey"), + MaxHeight: aws.String("DigitsOrAuto"), + MaxWidth: aws.String("DigitsOrAuto"), + PaddingPolicy: aws.String("PaddingPolicy"), + SizingPolicy: aws.String("SizingPolicy"), + }, + // More values... + }, + MergePolicy: aws.String("MergePolicy"), + }, + Captions: &elastictranscoder.Captions{ + CaptionFormats: []*elastictranscoder.CaptionFormat{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Format: aws.String("CaptionFormatFormat"), + Pattern: aws.String("CaptionFormatPattern"), + }, + // More values... + }, + CaptionSources: []*elastictranscoder.CaptionSource{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Key: aws.String("Key"), + Label: aws.String("Name"), + Language: aws.String("Key"), + TimeOffset: aws.String("TimeOffset"), + }, + // More values... + }, + MergePolicy: aws.String("CaptionMergePolicy"), + }, + Composition: []*elastictranscoder.Clip{ + { // Required + TimeSpan: &elastictranscoder.TimeSpan{ + Duration: aws.String("Time"), + StartTime: aws.String("Time"), + }, + }, + // More values... + }, + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Key: aws.String("Key"), + PresetId: aws.String("Id"), + Rotate: aws.String("Rotate"), + SegmentDuration: aws.String("FloatString"), + ThumbnailEncryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + ThumbnailPattern: aws.String("ThumbnailPattern"), + Watermarks: []*elastictranscoder.JobWatermark{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + InputKey: aws.String("WatermarkKey"), + PresetWatermarkId: aws.String("PresetWatermarkId"), + }, + // More values... + }, + }, + OutputKeyPrefix: aws.String("Key"), + Outputs: []*elastictranscoder.CreateJobOutput{ + { // Required + AlbumArt: &elastictranscoder.JobAlbumArt{ + Artwork: []*elastictranscoder.Artwork{ + { // Required + AlbumArtFormat: aws.String("JpgOrPng"), + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + InputKey: aws.String("WatermarkKey"), + MaxHeight: aws.String("DigitsOrAuto"), + MaxWidth: aws.String("DigitsOrAuto"), + PaddingPolicy: aws.String("PaddingPolicy"), + SizingPolicy: aws.String("SizingPolicy"), + }, + // More values... + }, + MergePolicy: aws.String("MergePolicy"), + }, + Captions: &elastictranscoder.Captions{ + CaptionFormats: []*elastictranscoder.CaptionFormat{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Format: aws.String("CaptionFormatFormat"), + Pattern: aws.String("CaptionFormatPattern"), + }, + // More values... + }, + CaptionSources: []*elastictranscoder.CaptionSource{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Key: aws.String("Key"), + Label: aws.String("Name"), + Language: aws.String("Key"), + TimeOffset: aws.String("TimeOffset"), + }, + // More values... + }, + MergePolicy: aws.String("CaptionMergePolicy"), + }, + Composition: []*elastictranscoder.Clip{ + { // Required + TimeSpan: &elastictranscoder.TimeSpan{ + Duration: aws.String("Time"), + StartTime: aws.String("Time"), + }, + }, + // More values... + }, + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Key: aws.String("Key"), + PresetId: aws.String("Id"), + Rotate: aws.String("Rotate"), + SegmentDuration: aws.String("FloatString"), + ThumbnailEncryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + ThumbnailPattern: aws.String("ThumbnailPattern"), + Watermarks: []*elastictranscoder.JobWatermark{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + InputKey: aws.String("WatermarkKey"), + PresetWatermarkId: aws.String("PresetWatermarkId"), + }, + // More values... + }, + }, + // More values... + }, + Playlists: []*elastictranscoder.CreateJobPlaylist{ + { // Required + Format: aws.String("PlaylistFormat"), + HlsContentProtection: &elastictranscoder.HlsContentProtection{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + KeyStoragePolicy: aws.String("KeyStoragePolicy"), + LicenseAcquisitionUrl: aws.String("ZeroTo512String"), + Method: aws.String("HlsContentProtectionMethod"), + }, + Name: aws.String("Filename"), + OutputKeys: []*string{ + aws.String("Key"), // Required + // More values... + }, + PlayReadyDrm: &elastictranscoder.PlayReadyDrm{ + Format: aws.String("PlayReadyDrmFormatString"), + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("NonEmptyBase64EncodedString"), + KeyId: aws.String("KeyIdGuid"), + KeyMd5: aws.String("NonEmptyBase64EncodedString"), + LicenseAcquisitionUrl: aws.String("OneTo512String"), + }, + }, + // More values... + }, + UserMetadata: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateJob(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_CreatePipeline() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.CreatePipelineInput{ + InputBucket: aws.String("BucketName"), // Required + Name: aws.String("Name"), // Required + Role: aws.String("Role"), // Required + AwsKmsKeyArn: aws.String("KeyArn"), + ContentConfig: &elastictranscoder.PipelineOutputConfig{ + Bucket: aws.String("BucketName"), + Permissions: []*elastictranscoder.Permission{ + { // Required + Access: []*string{ + aws.String("AccessControl"), // Required + // More values... + }, + Grantee: aws.String("Grantee"), + GranteeType: aws.String("GranteeType"), + }, + // More values... + }, + StorageClass: aws.String("StorageClass"), + }, + Notifications: &elastictranscoder.Notifications{ + Completed: aws.String("SnsTopic"), + Error: aws.String("SnsTopic"), + Progressing: aws.String("SnsTopic"), + Warning: aws.String("SnsTopic"), + }, + OutputBucket: aws.String("BucketName"), + ThumbnailConfig: &elastictranscoder.PipelineOutputConfig{ + Bucket: aws.String("BucketName"), + Permissions: []*elastictranscoder.Permission{ + { // Required + Access: []*string{ + aws.String("AccessControl"), // Required + // More values... + }, + Grantee: aws.String("Grantee"), + GranteeType: aws.String("GranteeType"), + }, + // More values... + }, + StorageClass: aws.String("StorageClass"), + }, + } + resp, err := svc.CreatePipeline(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_CreatePreset() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.CreatePresetInput{ + Container: aws.String("PresetContainer"), // Required + Name: aws.String("Name"), // Required + Audio: &elastictranscoder.AudioParameters{ + AudioPackingMode: aws.String("AudioPackingMode"), + BitRate: aws.String("AudioBitRate"), + Channels: aws.String("AudioChannels"), + Codec: aws.String("AudioCodec"), + CodecOptions: &elastictranscoder.AudioCodecOptions{ + BitDepth: aws.String("AudioBitDepth"), + BitOrder: aws.String("AudioBitOrder"), + Profile: aws.String("AudioCodecProfile"), + Signed: aws.String("AudioSigned"), + }, + SampleRate: aws.String("AudioSampleRate"), + }, + Description: aws.String("Description"), + Thumbnails: &elastictranscoder.Thumbnails{ + AspectRatio: aws.String("AspectRatio"), + Format: aws.String("JpgOrPng"), + Interval: aws.String("Digits"), + MaxHeight: aws.String("DigitsOrAuto"), + MaxWidth: aws.String("DigitsOrAuto"), + PaddingPolicy: aws.String("PaddingPolicy"), + Resolution: aws.String("ThumbnailResolution"), + SizingPolicy: aws.String("SizingPolicy"), + }, + Video: &elastictranscoder.VideoParameters{ + AspectRatio: aws.String("AspectRatio"), + BitRate: aws.String("VideoBitRate"), + Codec: aws.String("VideoCodec"), + CodecOptions: map[string]*string{ + "Key": aws.String("CodecOption"), // Required + // More values... + }, + DisplayAspectRatio: aws.String("AspectRatio"), + FixedGOP: aws.String("FixedGOP"), + FrameRate: aws.String("FrameRate"), + KeyframesMaxDist: aws.String("KeyframesMaxDist"), + MaxFrameRate: aws.String("MaxFrameRate"), + MaxHeight: aws.String("DigitsOrAuto"), + MaxWidth: aws.String("DigitsOrAuto"), + PaddingPolicy: aws.String("PaddingPolicy"), + Resolution: aws.String("Resolution"), + SizingPolicy: aws.String("SizingPolicy"), + Watermarks: []*elastictranscoder.PresetWatermark{ + { // Required + HorizontalAlign: aws.String("HorizontalAlign"), + HorizontalOffset: aws.String("PixelsOrPercent"), + Id: aws.String("PresetWatermarkId"), + MaxHeight: aws.String("PixelsOrPercent"), + MaxWidth: aws.String("PixelsOrPercent"), + Opacity: aws.String("Opacity"), + SizingPolicy: aws.String("WatermarkSizingPolicy"), + Target: aws.String("Target"), + VerticalAlign: aws.String("VerticalAlign"), + VerticalOffset: aws.String("PixelsOrPercent"), + }, + // More values... + }, + }, + } + resp, err := svc.CreatePreset(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_DeletePipeline() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.DeletePipelineInput{ + Id: aws.String("Id"), // Required + } + resp, err := svc.DeletePipeline(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_DeletePreset() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.DeletePresetInput{ + Id: aws.String("Id"), // Required + } + resp, err := svc.DeletePreset(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_ListJobsByPipeline() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.ListJobsByPipelineInput{ + PipelineId: aws.String("Id"), // Required + Ascending: aws.String("Ascending"), + PageToken: aws.String("Id"), + } + resp, err := svc.ListJobsByPipeline(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_ListJobsByStatus() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.ListJobsByStatusInput{ + Status: aws.String("JobStatus"), // Required + Ascending: aws.String("Ascending"), + PageToken: aws.String("Id"), + } + resp, err := svc.ListJobsByStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_ListPipelines() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.ListPipelinesInput{ + Ascending: aws.String("Ascending"), + PageToken: aws.String("Id"), + } + resp, err := svc.ListPipelines(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_ListPresets() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.ListPresetsInput{ + Ascending: aws.String("Ascending"), + PageToken: aws.String("Id"), + } + resp, err := svc.ListPresets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_ReadJob() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.ReadJobInput{ + Id: aws.String("Id"), // Required + } + resp, err := svc.ReadJob(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_ReadPipeline() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.ReadPipelineInput{ + Id: aws.String("Id"), // Required + } + resp, err := svc.ReadPipeline(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_ReadPreset() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.ReadPresetInput{ + Id: aws.String("Id"), // Required + } + resp, err := svc.ReadPreset(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_TestRole() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.TestRoleInput{ + InputBucket: aws.String("BucketName"), // Required + OutputBucket: aws.String("BucketName"), // Required + Role: aws.String("Role"), // Required + Topics: []*string{ // Required + aws.String("SnsTopic"), // Required + // More values... + }, + } + resp, err := svc.TestRole(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_UpdatePipeline() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.UpdatePipelineInput{ + Id: aws.String("Id"), // Required + AwsKmsKeyArn: aws.String("KeyArn"), + ContentConfig: &elastictranscoder.PipelineOutputConfig{ + Bucket: aws.String("BucketName"), + Permissions: []*elastictranscoder.Permission{ + { // Required + Access: []*string{ + aws.String("AccessControl"), // Required + // More values... + }, + Grantee: aws.String("Grantee"), + GranteeType: aws.String("GranteeType"), + }, + // More values... + }, + StorageClass: aws.String("StorageClass"), + }, + InputBucket: aws.String("BucketName"), + Name: aws.String("Name"), + Notifications: &elastictranscoder.Notifications{ + Completed: aws.String("SnsTopic"), + Error: aws.String("SnsTopic"), + Progressing: aws.String("SnsTopic"), + Warning: aws.String("SnsTopic"), + }, + Role: aws.String("Role"), + ThumbnailConfig: &elastictranscoder.PipelineOutputConfig{ + Bucket: aws.String("BucketName"), + Permissions: []*elastictranscoder.Permission{ + { // Required + Access: []*string{ + aws.String("AccessControl"), // Required + // More values... + }, + Grantee: aws.String("Grantee"), + GranteeType: aws.String("GranteeType"), + }, + // More values... + }, + StorageClass: aws.String("StorageClass"), + }, + } + resp, err := svc.UpdatePipeline(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_UpdatePipelineNotifications() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.UpdatePipelineNotificationsInput{ + Id: aws.String("Id"), // Required + Notifications: &elastictranscoder.Notifications{ // Required + Completed: aws.String("SnsTopic"), + Error: aws.String("SnsTopic"), + Progressing: aws.String("SnsTopic"), + Warning: aws.String("SnsTopic"), + }, + } + resp, err := svc.UpdatePipelineNotifications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleElasticTranscoder_UpdatePipelineStatus() { + svc := elastictranscoder.New(session.New()) + + params := &elastictranscoder.UpdatePipelineStatusInput{ + Id: aws.String("Id"), // Required + Status: aws.String("PipelineStatus"), // Required + } + resp, err := svc.UpdatePipelineStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elastictranscoder/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elastictranscoder/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elastictranscoder/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elastictranscoder/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,86 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elastictranscoder + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restjson" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// The AWS Elastic Transcoder Service. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type ElasticTranscoder struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "elastictranscoder" + +// New creates a new instance of the ElasticTranscoder client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ElasticTranscoder client from just a session. +// svc := elastictranscoder.New(mySession) +// +// // Create a ElasticTranscoder client with additional configuration +// svc := elastictranscoder.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ElasticTranscoder { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *ElasticTranscoder { + svc := &ElasticTranscoder{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2012-09-25", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ElasticTranscoder operation and runs any +// custom request initialization. +func (c *ElasticTranscoder) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elastictranscoder/waiters.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elastictranscoder/waiters.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elastictranscoder/waiters.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elastictranscoder/waiters.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,42 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elastictranscoder + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *ElasticTranscoder) WaitUntilJobComplete(input *ReadJobInput) error { + waiterCfg := waiter.Config{ + Operation: "ReadJob", + Delay: 30, + MaxAttempts: 120, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "Job.Status", + Expected: "Complete", + }, + { + State: "failure", + Matcher: "path", + Argument: "Job.Status", + Expected: "Canceled", + }, + { + State: "failure", + Matcher: "path", + Argument: "Job.Status", + Expected: "Error", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elb/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elb/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elb/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elb/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2784 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package elb provides a client for Elastic Load Balancing. +package elb + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAddTags = "AddTags" + +// AddTagsRequest generates a request for the AddTags operation. +func (c *ELB) AddTagsRequest(input *AddTagsInput) (req *request.Request, output *AddTagsOutput) { + op := &request.Operation{ + Name: opAddTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &AddTagsOutput{} + req.Data = output + return +} + +// Adds the specified tags to the specified load balancer. Each load balancer +// can have a maximum of 10 tags. +// +// Each tag consists of a key and an optional value. If a tag with the same +// key is already associated with the load balancer, AddTags updates its value. +// +// For more information, see Tag Your Load Balancer (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/add-remove-tags.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) AddTags(input *AddTagsInput) (*AddTagsOutput, error) { + req, out := c.AddTagsRequest(input) + err := req.Send() + return out, err +} + +const opApplySecurityGroupsToLoadBalancer = "ApplySecurityGroupsToLoadBalancer" + +// ApplySecurityGroupsToLoadBalancerRequest generates a request for the ApplySecurityGroupsToLoadBalancer operation. +func (c *ELB) ApplySecurityGroupsToLoadBalancerRequest(input *ApplySecurityGroupsToLoadBalancerInput) (req *request.Request, output *ApplySecurityGroupsToLoadBalancerOutput) { + op := &request.Operation{ + Name: opApplySecurityGroupsToLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ApplySecurityGroupsToLoadBalancerInput{} + } + + req = c.newRequest(op, input, output) + output = &ApplySecurityGroupsToLoadBalancerOutput{} + req.Data = output + return +} + +// Associates one or more security groups with your load balancer in a virtual +// private cloud (VPC). The specified security groups override the previously +// associated security groups. +// +// For more information, see Security Groups for Load Balancers in a VPC (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-security-groups.html#elb-vpc-security-groups) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) ApplySecurityGroupsToLoadBalancer(input *ApplySecurityGroupsToLoadBalancerInput) (*ApplySecurityGroupsToLoadBalancerOutput, error) { + req, out := c.ApplySecurityGroupsToLoadBalancerRequest(input) + err := req.Send() + return out, err +} + +const opAttachLoadBalancerToSubnets = "AttachLoadBalancerToSubnets" + +// AttachLoadBalancerToSubnetsRequest generates a request for the AttachLoadBalancerToSubnets operation. +func (c *ELB) AttachLoadBalancerToSubnetsRequest(input *AttachLoadBalancerToSubnetsInput) (req *request.Request, output *AttachLoadBalancerToSubnetsOutput) { + op := &request.Operation{ + Name: opAttachLoadBalancerToSubnets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachLoadBalancerToSubnetsInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachLoadBalancerToSubnetsOutput{} + req.Data = output + return +} + +// Adds one or more subnets to the set of configured subnets for the specified +// load balancer. +// +// The load balancer evenly distributes requests across all registered subnets. +// For more information, see Add or Remove Subnets for Your Load Balancer in +// a VPC (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-manage-subnets.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) AttachLoadBalancerToSubnets(input *AttachLoadBalancerToSubnetsInput) (*AttachLoadBalancerToSubnetsOutput, error) { + req, out := c.AttachLoadBalancerToSubnetsRequest(input) + err := req.Send() + return out, err +} + +const opConfigureHealthCheck = "ConfigureHealthCheck" + +// ConfigureHealthCheckRequest generates a request for the ConfigureHealthCheck operation. +func (c *ELB) ConfigureHealthCheckRequest(input *ConfigureHealthCheckInput) (req *request.Request, output *ConfigureHealthCheckOutput) { + op := &request.Operation{ + Name: opConfigureHealthCheck, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ConfigureHealthCheckInput{} + } + + req = c.newRequest(op, input, output) + output = &ConfigureHealthCheckOutput{} + req.Data = output + return +} + +// Specifies the health check settings to use when evaluating the health state +// of your back-end instances. +// +// For more information, see Configure Health Checks (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-healthchecks.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) ConfigureHealthCheck(input *ConfigureHealthCheckInput) (*ConfigureHealthCheckOutput, error) { + req, out := c.ConfigureHealthCheckRequest(input) + err := req.Send() + return out, err +} + +const opCreateAppCookieStickinessPolicy = "CreateAppCookieStickinessPolicy" + +// CreateAppCookieStickinessPolicyRequest generates a request for the CreateAppCookieStickinessPolicy operation. +func (c *ELB) CreateAppCookieStickinessPolicyRequest(input *CreateAppCookieStickinessPolicyInput) (req *request.Request, output *CreateAppCookieStickinessPolicyOutput) { + op := &request.Operation{ + Name: opCreateAppCookieStickinessPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAppCookieStickinessPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateAppCookieStickinessPolicyOutput{} + req.Data = output + return +} + +// Generates a stickiness policy with sticky session lifetimes that follow that +// of an application-generated cookie. This policy can be associated only with +// HTTP/HTTPS listeners. +// +// This policy is similar to the policy created by CreateLBCookieStickinessPolicy, +// except that the lifetime of the special Elastic Load Balancing cookie, AWSELB, +// follows the lifetime of the application-generated cookie specified in the +// policy configuration. The load balancer only inserts a new stickiness cookie +// when the application response includes a new application cookie. +// +// If the application cookie is explicitly removed or expires, the session +// stops being sticky until a new application cookie is issued. +// +// For more information, see Application-Controlled Session Stickiness (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-sticky-sessions.html#enable-sticky-sessions-application) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) CreateAppCookieStickinessPolicy(input *CreateAppCookieStickinessPolicyInput) (*CreateAppCookieStickinessPolicyOutput, error) { + req, out := c.CreateAppCookieStickinessPolicyRequest(input) + err := req.Send() + return out, err +} + +const opCreateLBCookieStickinessPolicy = "CreateLBCookieStickinessPolicy" + +// CreateLBCookieStickinessPolicyRequest generates a request for the CreateLBCookieStickinessPolicy operation. +func (c *ELB) CreateLBCookieStickinessPolicyRequest(input *CreateLBCookieStickinessPolicyInput) (req *request.Request, output *CreateLBCookieStickinessPolicyOutput) { + op := &request.Operation{ + Name: opCreateLBCookieStickinessPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLBCookieStickinessPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateLBCookieStickinessPolicyOutput{} + req.Data = output + return +} + +// Generates a stickiness policy with sticky session lifetimes controlled by +// the lifetime of the browser (user-agent) or a specified expiration period. +// This policy can be associated only with HTTP/HTTPS listeners. +// +// When a load balancer implements this policy, the load balancer uses a special +// cookie to track the back-end server instance for each request. When the load +// balancer receives a request, it first checks to see if this cookie is present +// in the request. If so, the load balancer sends the request to the application +// server specified in the cookie. If not, the load balancer sends the request +// to a server that is chosen based on the existing load-balancing algorithm. +// +// A cookie is inserted into the response for binding subsequent requests from +// the same user to that server. The validity of the cookie is based on the +// cookie expiration time, which is specified in the policy configuration. +// +// For more information, see Duration-Based Session Stickiness (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-sticky-sessions.html#enable-sticky-sessions-duration) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) CreateLBCookieStickinessPolicy(input *CreateLBCookieStickinessPolicyInput) (*CreateLBCookieStickinessPolicyOutput, error) { + req, out := c.CreateLBCookieStickinessPolicyRequest(input) + err := req.Send() + return out, err +} + +const opCreateLoadBalancer = "CreateLoadBalancer" + +// CreateLoadBalancerRequest generates a request for the CreateLoadBalancer operation. +func (c *ELB) CreateLoadBalancerRequest(input *CreateLoadBalancerInput) (req *request.Request, output *CreateLoadBalancerOutput) { + op := &request.Operation{ + Name: opCreateLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLoadBalancerInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateLoadBalancerOutput{} + req.Data = output + return +} + +// Creates a load balancer. +// +// If the call completes successfully, a new load balancer is created with +// a unique Domain Name Service (DNS) name. The load balancer receives incoming +// traffic and routes it to the registered instances. For more information, +// see How Elastic Load Balancing Works (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/how-elb-works.html) +// in the Elastic Load Balancing Developer Guide. +// +// You can create up to 20 load balancers per region per account. You can request +// an increase for the number of load balancers for your account. For more information, +// see Elastic Load Balancing Limits (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-limits.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) CreateLoadBalancer(input *CreateLoadBalancerInput) (*CreateLoadBalancerOutput, error) { + req, out := c.CreateLoadBalancerRequest(input) + err := req.Send() + return out, err +} + +const opCreateLoadBalancerListeners = "CreateLoadBalancerListeners" + +// CreateLoadBalancerListenersRequest generates a request for the CreateLoadBalancerListeners operation. +func (c *ELB) CreateLoadBalancerListenersRequest(input *CreateLoadBalancerListenersInput) (req *request.Request, output *CreateLoadBalancerListenersOutput) { + op := &request.Operation{ + Name: opCreateLoadBalancerListeners, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLoadBalancerListenersInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateLoadBalancerListenersOutput{} + req.Data = output + return +} + +// Creates one or more listeners for the specified load balancer. If a listener +// with the specified port does not already exist, it is created; otherwise, +// the properties of the new listener must match the properties of the existing +// listener. +// +// For more information, see Add a Listener to Your Load Balancer (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/us-add-listener.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) CreateLoadBalancerListeners(input *CreateLoadBalancerListenersInput) (*CreateLoadBalancerListenersOutput, error) { + req, out := c.CreateLoadBalancerListenersRequest(input) + err := req.Send() + return out, err +} + +const opCreateLoadBalancerPolicy = "CreateLoadBalancerPolicy" + +// CreateLoadBalancerPolicyRequest generates a request for the CreateLoadBalancerPolicy operation. +func (c *ELB) CreateLoadBalancerPolicyRequest(input *CreateLoadBalancerPolicyInput) (req *request.Request, output *CreateLoadBalancerPolicyOutput) { + op := &request.Operation{ + Name: opCreateLoadBalancerPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLoadBalancerPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateLoadBalancerPolicyOutput{} + req.Data = output + return +} + +// Creates a policy with the specified attributes for the specified load balancer. +// +// Policies are settings that are saved for your load balancer and that can +// be applied to the front-end listener or the back-end application server, +// depending on the policy type. +func (c *ELB) CreateLoadBalancerPolicy(input *CreateLoadBalancerPolicyInput) (*CreateLoadBalancerPolicyOutput, error) { + req, out := c.CreateLoadBalancerPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLoadBalancer = "DeleteLoadBalancer" + +// DeleteLoadBalancerRequest generates a request for the DeleteLoadBalancer operation. +func (c *ELB) DeleteLoadBalancerRequest(input *DeleteLoadBalancerInput) (req *request.Request, output *DeleteLoadBalancerOutput) { + op := &request.Operation{ + Name: opDeleteLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLoadBalancerInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteLoadBalancerOutput{} + req.Data = output + return +} + +// Deletes the specified load balancer. +// +// If you are attempting to recreate a load balancer, you must reconfigure +// all settings. The DNS name associated with a deleted load balancer are no +// longer usable. The name and associated DNS record of the deleted load balancer +// no longer exist and traffic sent to any of its IP addresses is no longer +// delivered to back-end instances. +// +// If the load balancer does not exist or has already been deleted, the call +// to DeleteLoadBalancer still succeeds. +func (c *ELB) DeleteLoadBalancer(input *DeleteLoadBalancerInput) (*DeleteLoadBalancerOutput, error) { + req, out := c.DeleteLoadBalancerRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLoadBalancerListeners = "DeleteLoadBalancerListeners" + +// DeleteLoadBalancerListenersRequest generates a request for the DeleteLoadBalancerListeners operation. +func (c *ELB) DeleteLoadBalancerListenersRequest(input *DeleteLoadBalancerListenersInput) (req *request.Request, output *DeleteLoadBalancerListenersOutput) { + op := &request.Operation{ + Name: opDeleteLoadBalancerListeners, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLoadBalancerListenersInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteLoadBalancerListenersOutput{} + req.Data = output + return +} + +// Deletes the specified listeners from the specified load balancer. +func (c *ELB) DeleteLoadBalancerListeners(input *DeleteLoadBalancerListenersInput) (*DeleteLoadBalancerListenersOutput, error) { + req, out := c.DeleteLoadBalancerListenersRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLoadBalancerPolicy = "DeleteLoadBalancerPolicy" + +// DeleteLoadBalancerPolicyRequest generates a request for the DeleteLoadBalancerPolicy operation. +func (c *ELB) DeleteLoadBalancerPolicyRequest(input *DeleteLoadBalancerPolicyInput) (req *request.Request, output *DeleteLoadBalancerPolicyOutput) { + op := &request.Operation{ + Name: opDeleteLoadBalancerPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLoadBalancerPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteLoadBalancerPolicyOutput{} + req.Data = output + return +} + +// Deletes the specified policy from the specified load balancer. This policy +// must not be enabled for any listeners. +func (c *ELB) DeleteLoadBalancerPolicy(input *DeleteLoadBalancerPolicyInput) (*DeleteLoadBalancerPolicyOutput, error) { + req, out := c.DeleteLoadBalancerPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterInstancesFromLoadBalancer = "DeregisterInstancesFromLoadBalancer" + +// DeregisterInstancesFromLoadBalancerRequest generates a request for the DeregisterInstancesFromLoadBalancer operation. +func (c *ELB) DeregisterInstancesFromLoadBalancerRequest(input *DeregisterInstancesFromLoadBalancerInput) (req *request.Request, output *DeregisterInstancesFromLoadBalancerOutput) { + op := &request.Operation{ + Name: opDeregisterInstancesFromLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterInstancesFromLoadBalancerInput{} + } + + req = c.newRequest(op, input, output) + output = &DeregisterInstancesFromLoadBalancerOutput{} + req.Data = output + return +} + +// Deregisters the specified instances from the specified load balancer. After +// the instance is deregistered, it no longer receives traffic from the load +// balancer. +// +// You can use DescribeLoadBalancers to verify that the instance is deregistered +// from the load balancer. +// +// For more information, see Deregister and Register Amazon EC2 Instances (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/US_DeReg_Reg_Instances.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) DeregisterInstancesFromLoadBalancer(input *DeregisterInstancesFromLoadBalancerInput) (*DeregisterInstancesFromLoadBalancerOutput, error) { + req, out := c.DeregisterInstancesFromLoadBalancerRequest(input) + err := req.Send() + return out, err +} + +const opDescribeInstanceHealth = "DescribeInstanceHealth" + +// DescribeInstanceHealthRequest generates a request for the DescribeInstanceHealth operation. +func (c *ELB) DescribeInstanceHealthRequest(input *DescribeInstanceHealthInput) (req *request.Request, output *DescribeInstanceHealthOutput) { + op := &request.Operation{ + Name: opDescribeInstanceHealth, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInstanceHealthInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInstanceHealthOutput{} + req.Data = output + return +} + +// Describes the state of the specified instances with respect to the specified +// load balancer. If no instances are specified, the call describes the state +// of all instances that are currently registered with the load balancer. If +// instances are specified, their state is returned even if they are no longer +// registered with the load balancer. The state of terminated instances is not +// returned. +func (c *ELB) DescribeInstanceHealth(input *DescribeInstanceHealthInput) (*DescribeInstanceHealthOutput, error) { + req, out := c.DescribeInstanceHealthRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLoadBalancerAttributes = "DescribeLoadBalancerAttributes" + +// DescribeLoadBalancerAttributesRequest generates a request for the DescribeLoadBalancerAttributes operation. +func (c *ELB) DescribeLoadBalancerAttributesRequest(input *DescribeLoadBalancerAttributesInput) (req *request.Request, output *DescribeLoadBalancerAttributesOutput) { + op := &request.Operation{ + Name: opDescribeLoadBalancerAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLoadBalancerAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLoadBalancerAttributesOutput{} + req.Data = output + return +} + +// Describes the attributes for the specified load balancer. +func (c *ELB) DescribeLoadBalancerAttributes(input *DescribeLoadBalancerAttributesInput) (*DescribeLoadBalancerAttributesOutput, error) { + req, out := c.DescribeLoadBalancerAttributesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLoadBalancerPolicies = "DescribeLoadBalancerPolicies" + +// DescribeLoadBalancerPoliciesRequest generates a request for the DescribeLoadBalancerPolicies operation. +func (c *ELB) DescribeLoadBalancerPoliciesRequest(input *DescribeLoadBalancerPoliciesInput) (req *request.Request, output *DescribeLoadBalancerPoliciesOutput) { + op := &request.Operation{ + Name: opDescribeLoadBalancerPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLoadBalancerPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLoadBalancerPoliciesOutput{} + req.Data = output + return +} + +// Describes the specified policies. +// +// If you specify a load balancer name, the action returns the descriptions +// of all policies created for the load balancer. If you specify a policy name +// associated with your load balancer, the action returns the description of +// that policy. If you don't specify a load balancer name, the action returns +// descriptions of the specified sample policies, or descriptions of all sample +// policies. The names of the sample policies have the ELBSample- prefix. +func (c *ELB) DescribeLoadBalancerPolicies(input *DescribeLoadBalancerPoliciesInput) (*DescribeLoadBalancerPoliciesOutput, error) { + req, out := c.DescribeLoadBalancerPoliciesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLoadBalancerPolicyTypes = "DescribeLoadBalancerPolicyTypes" + +// DescribeLoadBalancerPolicyTypesRequest generates a request for the DescribeLoadBalancerPolicyTypes operation. +func (c *ELB) DescribeLoadBalancerPolicyTypesRequest(input *DescribeLoadBalancerPolicyTypesInput) (req *request.Request, output *DescribeLoadBalancerPolicyTypesOutput) { + op := &request.Operation{ + Name: opDescribeLoadBalancerPolicyTypes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLoadBalancerPolicyTypesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLoadBalancerPolicyTypesOutput{} + req.Data = output + return +} + +// Describes the specified load balancer policy types. +// +// You can use these policy types with CreateLoadBalancerPolicy to create policy +// configurations for a load balancer. +func (c *ELB) DescribeLoadBalancerPolicyTypes(input *DescribeLoadBalancerPolicyTypesInput) (*DescribeLoadBalancerPolicyTypesOutput, error) { + req, out := c.DescribeLoadBalancerPolicyTypesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLoadBalancers = "DescribeLoadBalancers" + +// DescribeLoadBalancersRequest generates a request for the DescribeLoadBalancers operation. +func (c *ELB) DescribeLoadBalancersRequest(input *DescribeLoadBalancersInput) (req *request.Request, output *DescribeLoadBalancersOutput) { + op := &request.Operation{ + Name: opDescribeLoadBalancers, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeLoadBalancersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLoadBalancersOutput{} + req.Data = output + return +} + +// Describes the specified the load balancers. If no load balancers are specified, +// the call describes all of your load balancers. +func (c *ELB) DescribeLoadBalancers(input *DescribeLoadBalancersInput) (*DescribeLoadBalancersOutput, error) { + req, out := c.DescribeLoadBalancersRequest(input) + err := req.Send() + return out, err +} + +func (c *ELB) DescribeLoadBalancersPages(input *DescribeLoadBalancersInput, fn func(p *DescribeLoadBalancersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeLoadBalancersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeLoadBalancersOutput), lastPage) + }) +} + +const opDescribeTags = "DescribeTags" + +// DescribeTagsRequest generates a request for the DescribeTags operation. +func (c *ELB) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { + op := &request.Operation{ + Name: opDescribeTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTagsOutput{} + req.Data = output + return +} + +// Describes the tags associated with the specified load balancers. +func (c *ELB) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + err := req.Send() + return out, err +} + +const opDetachLoadBalancerFromSubnets = "DetachLoadBalancerFromSubnets" + +// DetachLoadBalancerFromSubnetsRequest generates a request for the DetachLoadBalancerFromSubnets operation. +func (c *ELB) DetachLoadBalancerFromSubnetsRequest(input *DetachLoadBalancerFromSubnetsInput) (req *request.Request, output *DetachLoadBalancerFromSubnetsOutput) { + op := &request.Operation{ + Name: opDetachLoadBalancerFromSubnets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachLoadBalancerFromSubnetsInput{} + } + + req = c.newRequest(op, input, output) + output = &DetachLoadBalancerFromSubnetsOutput{} + req.Data = output + return +} + +// Removes the specified subnets from the set of configured subnets for the +// load balancer. +// +// After a subnet is removed, all EC2 instances registered with the load balancer +// in the removed subnet go into the OutOfService state. Then, the load balancer +// balances the traffic among the remaining routable subnets. +func (c *ELB) DetachLoadBalancerFromSubnets(input *DetachLoadBalancerFromSubnetsInput) (*DetachLoadBalancerFromSubnetsOutput, error) { + req, out := c.DetachLoadBalancerFromSubnetsRequest(input) + err := req.Send() + return out, err +} + +const opDisableAvailabilityZonesForLoadBalancer = "DisableAvailabilityZonesForLoadBalancer" + +// DisableAvailabilityZonesForLoadBalancerRequest generates a request for the DisableAvailabilityZonesForLoadBalancer operation. +func (c *ELB) DisableAvailabilityZonesForLoadBalancerRequest(input *DisableAvailabilityZonesForLoadBalancerInput) (req *request.Request, output *DisableAvailabilityZonesForLoadBalancerOutput) { + op := &request.Operation{ + Name: opDisableAvailabilityZonesForLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableAvailabilityZonesForLoadBalancerInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableAvailabilityZonesForLoadBalancerOutput{} + req.Data = output + return +} + +// Removes the specified Availability Zones from the set of Availability Zones +// for the specified load balancer. +// +// There must be at least one Availability Zone registered with a load balancer +// at all times. After an Availability Zone is removed, all instances registered +// with the load balancer that are in the removed Availability Zone go into +// the OutOfService state. Then, the load balancer attempts to equally balance +// the traffic among its remaining Availability Zones. +// +// For more information, see Disable an Availability Zone from a Load-Balanced +// Application (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/US_ShrinkLBApp04.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) DisableAvailabilityZonesForLoadBalancer(input *DisableAvailabilityZonesForLoadBalancerInput) (*DisableAvailabilityZonesForLoadBalancerOutput, error) { + req, out := c.DisableAvailabilityZonesForLoadBalancerRequest(input) + err := req.Send() + return out, err +} + +const opEnableAvailabilityZonesForLoadBalancer = "EnableAvailabilityZonesForLoadBalancer" + +// EnableAvailabilityZonesForLoadBalancerRequest generates a request for the EnableAvailabilityZonesForLoadBalancer operation. +func (c *ELB) EnableAvailabilityZonesForLoadBalancerRequest(input *EnableAvailabilityZonesForLoadBalancerInput) (req *request.Request, output *EnableAvailabilityZonesForLoadBalancerOutput) { + op := &request.Operation{ + Name: opEnableAvailabilityZonesForLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableAvailabilityZonesForLoadBalancerInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableAvailabilityZonesForLoadBalancerOutput{} + req.Data = output + return +} + +// Adds the specified Availability Zones to the set of Availability Zones for +// the specified load balancer. +// +// The load balancer evenly distributes requests across all its registered +// Availability Zones that contain instances. +// +// For more information, see Add Availability Zone (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/US_AddLBAvailabilityZone.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) EnableAvailabilityZonesForLoadBalancer(input *EnableAvailabilityZonesForLoadBalancerInput) (*EnableAvailabilityZonesForLoadBalancerOutput, error) { + req, out := c.EnableAvailabilityZonesForLoadBalancerRequest(input) + err := req.Send() + return out, err +} + +const opModifyLoadBalancerAttributes = "ModifyLoadBalancerAttributes" + +// ModifyLoadBalancerAttributesRequest generates a request for the ModifyLoadBalancerAttributes operation. +func (c *ELB) ModifyLoadBalancerAttributesRequest(input *ModifyLoadBalancerAttributesInput) (req *request.Request, output *ModifyLoadBalancerAttributesOutput) { + op := &request.Operation{ + Name: opModifyLoadBalancerAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyLoadBalancerAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyLoadBalancerAttributesOutput{} + req.Data = output + return +} + +// Modifies the attributes of the specified load balancer. +// +// You can modify the load balancer attributes, such as AccessLogs, ConnectionDraining, +// and CrossZoneLoadBalancing by either enabling or disabling them. Or, you +// can modify the load balancer attribute ConnectionSettings by specifying an +// idle connection timeout value for your load balancer. +// +// For more information, see the following in the Elastic Load Balancing Developer +// Guide: +// +// Cross-Zone Load Balancing (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/TerminologyandKeyConcepts.html#request-routing) +// Connection Draining (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/TerminologyandKeyConcepts.html#conn-drain) +// Access Logs (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/access-log-collection.html) +// Idle Connection Timeout (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/TerminologyandKeyConcepts.html#idle-timeout) +func (c *ELB) ModifyLoadBalancerAttributes(input *ModifyLoadBalancerAttributesInput) (*ModifyLoadBalancerAttributesOutput, error) { + req, out := c.ModifyLoadBalancerAttributesRequest(input) + err := req.Send() + return out, err +} + +const opRegisterInstancesWithLoadBalancer = "RegisterInstancesWithLoadBalancer" + +// RegisterInstancesWithLoadBalancerRequest generates a request for the RegisterInstancesWithLoadBalancer operation. +func (c *ELB) RegisterInstancesWithLoadBalancerRequest(input *RegisterInstancesWithLoadBalancerInput) (req *request.Request, output *RegisterInstancesWithLoadBalancerOutput) { + op := &request.Operation{ + Name: opRegisterInstancesWithLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterInstancesWithLoadBalancerInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterInstancesWithLoadBalancerOutput{} + req.Data = output + return +} + +// Adds the specified instances to the specified load balancer. +// +// The instance must be a running instance in the same network as the load +// balancer (EC2-Classic or the same VPC). If you have EC2-Classic instances +// and a load balancer in a VPC with ClassicLink enabled, you can link the EC2-Classic +// instances to that VPC and then register the linked EC2-Classic instances +// with the load balancer in the VPC. +// +// Note that RegisterInstanceWithLoadBalancer completes when the request has +// been registered. Instance registration takes a little time to complete. To +// check the state of the registered instances, use DescribeLoadBalancers or +// DescribeInstanceHealth. +// +// After the instance is registered, it starts receiving traffic and requests +// from the load balancer. Any instance that is not in one of the Availability +// Zones registered for the load balancer is moved to the OutOfService state. +// If an Availability Zone is added to the load balancer later, any instances +// registered with the load balancer move to the InService state. +// +// If you stop an instance registered with a load balancer and then start it, +// the IP addresses associated with the instance changes. Elastic Load Balancing +// cannot recognize the new IP address, which prevents it from routing traffic +// to the instances. We recommend that you use the following sequence: stop +// the instance, deregister the instance, start the instance, and then register +// the instance. To deregister instances from a load balancer, use DeregisterInstancesFromLoadBalancer. +// +// For more information, see Deregister and Register EC2 Instances (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/US_DeReg_Reg_Instances.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) RegisterInstancesWithLoadBalancer(input *RegisterInstancesWithLoadBalancerInput) (*RegisterInstancesWithLoadBalancerOutput, error) { + req, out := c.RegisterInstancesWithLoadBalancerRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTags = "RemoveTags" + +// RemoveTagsRequest generates a request for the RemoveTags operation. +func (c *ELB) RemoveTagsRequest(input *RemoveTagsInput) (req *request.Request, output *RemoveTagsOutput) { + op := &request.Operation{ + Name: opRemoveTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveTagsOutput{} + req.Data = output + return +} + +// Removes one or more tags from the specified load balancer. +func (c *ELB) RemoveTags(input *RemoveTagsInput) (*RemoveTagsOutput, error) { + req, out := c.RemoveTagsRequest(input) + err := req.Send() + return out, err +} + +const opSetLoadBalancerListenerSSLCertificate = "SetLoadBalancerListenerSSLCertificate" + +// SetLoadBalancerListenerSSLCertificateRequest generates a request for the SetLoadBalancerListenerSSLCertificate operation. +func (c *ELB) SetLoadBalancerListenerSSLCertificateRequest(input *SetLoadBalancerListenerSSLCertificateInput) (req *request.Request, output *SetLoadBalancerListenerSSLCertificateOutput) { + op := &request.Operation{ + Name: opSetLoadBalancerListenerSSLCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetLoadBalancerListenerSSLCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &SetLoadBalancerListenerSSLCertificateOutput{} + req.Data = output + return +} + +// Sets the certificate that terminates the specified listener's SSL connections. +// The specified certificate replaces any prior certificate that was used on +// the same load balancer and port. +// +// For more information about updating your SSL certificate, see Updating an +// SSL Certificate for a Load Balancer (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/US_UpdatingLoadBalancerSSL.html) +// in the Elastic Load Balancing Developer Guide. +func (c *ELB) SetLoadBalancerListenerSSLCertificate(input *SetLoadBalancerListenerSSLCertificateInput) (*SetLoadBalancerListenerSSLCertificateOutput, error) { + req, out := c.SetLoadBalancerListenerSSLCertificateRequest(input) + err := req.Send() + return out, err +} + +const opSetLoadBalancerPoliciesForBackendServer = "SetLoadBalancerPoliciesForBackendServer" + +// SetLoadBalancerPoliciesForBackendServerRequest generates a request for the SetLoadBalancerPoliciesForBackendServer operation. +func (c *ELB) SetLoadBalancerPoliciesForBackendServerRequest(input *SetLoadBalancerPoliciesForBackendServerInput) (req *request.Request, output *SetLoadBalancerPoliciesForBackendServerOutput) { + op := &request.Operation{ + Name: opSetLoadBalancerPoliciesForBackendServer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetLoadBalancerPoliciesForBackendServerInput{} + } + + req = c.newRequest(op, input, output) + output = &SetLoadBalancerPoliciesForBackendServerOutput{} + req.Data = output + return +} + +// Replaces the set of policies associated with the specified port on which +// the back-end server is listening with a new set of policies. At this time, +// only the back-end server authentication policy type can be applied to the +// back-end ports; this policy type is composed of multiple public key policies. +// +// Each time you use SetLoadBalancerPoliciesForBackendServer to enable the +// policies, use the PolicyNames parameter to list the policies that you want +// to enable. +// +// You can use DescribeLoadBalancers or DescribeLoadBalancerPolicies to verify +// that the policy is associated with the back-end server. +func (c *ELB) SetLoadBalancerPoliciesForBackendServer(input *SetLoadBalancerPoliciesForBackendServerInput) (*SetLoadBalancerPoliciesForBackendServerOutput, error) { + req, out := c.SetLoadBalancerPoliciesForBackendServerRequest(input) + err := req.Send() + return out, err +} + +const opSetLoadBalancerPoliciesOfListener = "SetLoadBalancerPoliciesOfListener" + +// SetLoadBalancerPoliciesOfListenerRequest generates a request for the SetLoadBalancerPoliciesOfListener operation. +func (c *ELB) SetLoadBalancerPoliciesOfListenerRequest(input *SetLoadBalancerPoliciesOfListenerInput) (req *request.Request, output *SetLoadBalancerPoliciesOfListenerOutput) { + op := &request.Operation{ + Name: opSetLoadBalancerPoliciesOfListener, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetLoadBalancerPoliciesOfListenerInput{} + } + + req = c.newRequest(op, input, output) + output = &SetLoadBalancerPoliciesOfListenerOutput{} + req.Data = output + return +} + +// Associates, updates, or disables a policy with a listener for the specified +// load balancer. You can associate multiple policies with a listener. +func (c *ELB) SetLoadBalancerPoliciesOfListener(input *SetLoadBalancerPoliciesOfListenerInput) (*SetLoadBalancerPoliciesOfListenerOutput, error) { + req, out := c.SetLoadBalancerPoliciesOfListenerRequest(input) + err := req.Send() + return out, err +} + +// Information about the AccessLog attribute. +type AccessLog struct { + _ struct{} `type:"structure"` + + // The interval for publishing the access logs. You can specify an interval + // of either 5 minutes or 60 minutes. + // + // Default: 60 minutes + EmitInterval *int64 `type:"integer"` + + // Specifies whether access log is enabled for the load balancer. + Enabled *bool `type:"boolean" required:"true"` + + // The name of the Amazon S3 bucket where the access logs are stored. + S3BucketName *string `type:"string"` + + // The logical hierarchy you created for your Amazon S3 bucket, for example + // my-bucket-prefix/prod. If the prefix is not provided, the log is placed at + // the root level of the bucket. + S3BucketPrefix *string `type:"string"` +} + +// String returns the string representation +func (s AccessLog) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessLog) GoString() string { + return s.String() +} + +type AddTagsInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. You can specify one load balancer only. + LoadBalancerNames []*string `type:"list" required:"true"` + + // The tags. + Tags []*Tag `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsInput) GoString() string { + return s.String() +} + +type AddTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsOutput) GoString() string { + return s.String() +} + +// This data type is reserved. +type AdditionalAttribute struct { + _ struct{} `type:"structure"` + + // This parameter is reserved. + Key *string `type:"string"` + + // This parameter is reserved. + Value *string `type:"string"` +} + +// String returns the string representation +func (s AdditionalAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdditionalAttribute) GoString() string { + return s.String() +} + +// Information about a policy for application-controlled session stickiness. +type AppCookieStickinessPolicy struct { + _ struct{} `type:"structure"` + + // The name of the application cookie used for stickiness. + CookieName *string `type:"string"` + + // The mnemonic name for the policy being created. The name must be unique within + // a set of policies for this load balancer. + PolicyName *string `type:"string"` +} + +// String returns the string representation +func (s AppCookieStickinessPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AppCookieStickinessPolicy) GoString() string { + return s.String() +} + +type ApplySecurityGroupsToLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The IDs of the security groups to associate with the load balancer. Note + // that you cannot specify the name of the security group. + SecurityGroups []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ApplySecurityGroupsToLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplySecurityGroupsToLoadBalancerInput) GoString() string { + return s.String() +} + +type ApplySecurityGroupsToLoadBalancerOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the security groups associated with the load balancer. + SecurityGroups []*string `type:"list"` +} + +// String returns the string representation +func (s ApplySecurityGroupsToLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplySecurityGroupsToLoadBalancerOutput) GoString() string { + return s.String() +} + +type AttachLoadBalancerToSubnetsInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The IDs of the subnets to add for the load balancer. You can add only one + // subnet per Availability Zone. + Subnets []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s AttachLoadBalancerToSubnetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachLoadBalancerToSubnetsInput) GoString() string { + return s.String() +} + +type AttachLoadBalancerToSubnetsOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the subnets attached to the load balancer. + Subnets []*string `type:"list"` +} + +// String returns the string representation +func (s AttachLoadBalancerToSubnetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachLoadBalancerToSubnetsOutput) GoString() string { + return s.String() +} + +// Information about the configuration of a back-end server. +type BackendServerDescription struct { + _ struct{} `type:"structure"` + + // The port on which the back-end server is listening. + InstancePort *int64 `min:"1" type:"integer"` + + // The names of the policies enabled for the back-end server. + PolicyNames []*string `type:"list"` +} + +// String returns the string representation +func (s BackendServerDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BackendServerDescription) GoString() string { + return s.String() +} + +type ConfigureHealthCheckInput struct { + _ struct{} `type:"structure"` + + // The configuration information for the new health check. + HealthCheck *HealthCheck `type:"structure" required:"true"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ConfigureHealthCheckInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigureHealthCheckInput) GoString() string { + return s.String() +} + +type ConfigureHealthCheckOutput struct { + _ struct{} `type:"structure"` + + // The updated health check. + HealthCheck *HealthCheck `type:"structure"` +} + +// String returns the string representation +func (s ConfigureHealthCheckOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigureHealthCheckOutput) GoString() string { + return s.String() +} + +// Information about the ConnectionDraining attribute. +type ConnectionDraining struct { + _ struct{} `type:"structure"` + + // Specifies whether connection draining is enabled for the load balancer. + Enabled *bool `type:"boolean" required:"true"` + + // The maximum time, in seconds, to keep the existing connections open before + // deregistering the instances. + Timeout *int64 `type:"integer"` +} + +// String returns the string representation +func (s ConnectionDraining) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConnectionDraining) GoString() string { + return s.String() +} + +// Information about the ConnectionSettings attribute. +type ConnectionSettings struct { + _ struct{} `type:"structure"` + + // The time, in seconds, that the connection is allowed to be idle (no data + // has been sent over the connection) before it is closed by the load balancer. + IdleTimeout *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s ConnectionSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConnectionSettings) GoString() string { + return s.String() +} + +type CreateAppCookieStickinessPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the application cookie used for stickiness. + CookieName *string `type:"string" required:"true"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The name of the policy being created. Policy names must consist of alphanumeric + // characters and dashes (-). This name must be unique within the set of policies + // for this load balancer. + PolicyName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateAppCookieStickinessPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAppCookieStickinessPolicyInput) GoString() string { + return s.String() +} + +type CreateAppCookieStickinessPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateAppCookieStickinessPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAppCookieStickinessPolicyOutput) GoString() string { + return s.String() +} + +type CreateLBCookieStickinessPolicyInput struct { + _ struct{} `type:"structure"` + + // The time period, in seconds, after which the cookie should be considered + // stale. If you do not specify this parameter, the sticky session lasts for + // the duration of the browser session. + CookieExpirationPeriod *int64 `type:"long"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The name of the policy being created. Policy names must consist of alphanumeric + // characters and dashes (-). This name must be unique within the set of policies + // for this load balancer. + PolicyName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateLBCookieStickinessPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLBCookieStickinessPolicyInput) GoString() string { + return s.String() +} + +type CreateLBCookieStickinessPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateLBCookieStickinessPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLBCookieStickinessPolicyOutput) GoString() string { + return s.String() +} + +type CreateLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // One or more Availability Zones from the same region as the load balancer. + // Traffic is equally distributed across all specified Availability Zones. + // + // You must specify at least one Availability Zone. + // + // You can add more Availability Zones after you create the load balancer using + // EnableAvailabilityZonesForLoadBalancer. + AvailabilityZones []*string `type:"list"` + + // The listeners. + // + // For more information, see Listeners for Your Load Balancer (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-listener-config.html) + // in the Elastic Load Balancing Developer Guide. + Listeners []*Listener `type:"list" required:"true"` + + // The name of the load balancer. + // + // This name must be unique within your set of load balancers for the region, + // must have a maximum of 32 characters, must contain only alphanumeric characters + // or hyphens, and cannot begin or end with a hyphen. + LoadBalancerName *string `type:"string" required:"true"` + + // The type of a load balancer. Valid only for load balancers in a VPC. + // + // By default, Elastic Load Balancing creates an Internet-facing load balancer + // with a publicly resolvable DNS name, which resolves to public IP addresses. + // For more information about Internet-facing and Internal load balancers, see + // Internet-facing and Internal Load Balancers (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/vpc-loadbalancer-types.html) + // in the Elastic Load Balancing Developer Guide. + // + // Specify internal to create an internal load balancer with a DNS name that + // resolves to private IP addresses. + Scheme *string `type:"string"` + + // The IDs of the security groups to assign to the load balancer. + SecurityGroups []*string `type:"list"` + + // The IDs of the subnets in your VPC to attach to the load balancer. Specify + // one subnet per Availability Zone specified in AvailabilityZones. + Subnets []*string `type:"list"` + + // A list of tags to assign to the load balancer. + // + // For more information about tagging your load balancer, see Tagging (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/TerminologyandKeyConcepts.html#tagging-elb) + // in the Elastic Load Balancing Developer Guide. + Tags []*Tag `min:"1" type:"list"` +} + +// String returns the string representation +func (s CreateLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLoadBalancerInput) GoString() string { + return s.String() +} + +type CreateLoadBalancerListenersInput struct { + _ struct{} `type:"structure"` + + // The listeners. + Listeners []*Listener `type:"list" required:"true"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateLoadBalancerListenersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLoadBalancerListenersInput) GoString() string { + return s.String() +} + +type CreateLoadBalancerListenersOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateLoadBalancerListenersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLoadBalancerListenersOutput) GoString() string { + return s.String() +} + +type CreateLoadBalancerOutput struct { + _ struct{} `type:"structure"` + + // The DNS name of the load balancer. + DNSName *string `type:"string"` +} + +// String returns the string representation +func (s CreateLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLoadBalancerOutput) GoString() string { + return s.String() +} + +type CreateLoadBalancerPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The attributes for the policy. + PolicyAttributes []*PolicyAttribute `type:"list"` + + // The name of the load balancer policy to be created. This name must be unique + // within the set of policies for this load balancer. + PolicyName *string `type:"string" required:"true"` + + // The name of the base policy type. To get the list of policy types, use DescribeLoadBalancerPolicyTypes. + PolicyTypeName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateLoadBalancerPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLoadBalancerPolicyInput) GoString() string { + return s.String() +} + +type CreateLoadBalancerPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateLoadBalancerPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLoadBalancerPolicyOutput) GoString() string { + return s.String() +} + +// Information about the CrossZoneLoadBalancing attribute. +type CrossZoneLoadBalancing struct { + _ struct{} `type:"structure"` + + // Specifies whether cross-zone load balancing is enabled for the load balancer. + Enabled *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s CrossZoneLoadBalancing) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CrossZoneLoadBalancing) GoString() string { + return s.String() +} + +type DeleteLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLoadBalancerInput) GoString() string { + return s.String() +} + +type DeleteLoadBalancerListenersInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The client port numbers of the listeners. + LoadBalancerPorts []*int64 `type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteLoadBalancerListenersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLoadBalancerListenersInput) GoString() string { + return s.String() +} + +type DeleteLoadBalancerListenersOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLoadBalancerListenersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLoadBalancerListenersOutput) GoString() string { + return s.String() +} + +type DeleteLoadBalancerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLoadBalancerOutput) GoString() string { + return s.String() +} + +// = +type DeleteLoadBalancerPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The name of the policy. + PolicyName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLoadBalancerPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLoadBalancerPolicyInput) GoString() string { + return s.String() +} + +type DeleteLoadBalancerPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLoadBalancerPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLoadBalancerPolicyOutput) GoString() string { + return s.String() +} + +type DeregisterInstancesFromLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // The IDs of the instances. + Instances []*Instance `type:"list" required:"true"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterInstancesFromLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterInstancesFromLoadBalancerInput) GoString() string { + return s.String() +} + +type DeregisterInstancesFromLoadBalancerOutput struct { + _ struct{} `type:"structure"` + + // The remaining instances registered with the load balancer. + Instances []*Instance `type:"list"` +} + +// String returns the string representation +func (s DeregisterInstancesFromLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterInstancesFromLoadBalancerOutput) GoString() string { + return s.String() +} + +type DescribeInstanceHealthInput struct { + _ struct{} `type:"structure"` + + // The IDs of the instances. + Instances []*Instance `type:"list"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeInstanceHealthInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceHealthInput) GoString() string { + return s.String() +} + +type DescribeInstanceHealthOutput struct { + _ struct{} `type:"structure"` + + // Information about the health of the instances. + InstanceStates []*InstanceState `type:"list"` +} + +// String returns the string representation +func (s DescribeInstanceHealthOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceHealthOutput) GoString() string { + return s.String() +} + +type DescribeLoadBalancerAttributesInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeLoadBalancerAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancerAttributesInput) GoString() string { + return s.String() +} + +type DescribeLoadBalancerAttributesOutput struct { + _ struct{} `type:"structure"` + + // Information about the load balancer attributes. + LoadBalancerAttributes *LoadBalancerAttributes `type:"structure"` +} + +// String returns the string representation +func (s DescribeLoadBalancerAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancerAttributesOutput) GoString() string { + return s.String() +} + +type DescribeLoadBalancerPoliciesInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string"` + + // The names of the policies. + PolicyNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeLoadBalancerPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancerPoliciesInput) GoString() string { + return s.String() +} + +type DescribeLoadBalancerPoliciesOutput struct { + _ struct{} `type:"structure"` + + // Information about the policies. + PolicyDescriptions []*PolicyDescription `type:"list"` +} + +// String returns the string representation +func (s DescribeLoadBalancerPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancerPoliciesOutput) GoString() string { + return s.String() +} + +type DescribeLoadBalancerPolicyTypesInput struct { + _ struct{} `type:"structure"` + + // The names of the policy types. If no names are specified, describes all policy + // types defined by Elastic Load Balancing. + PolicyTypeNames []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeLoadBalancerPolicyTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancerPolicyTypesInput) GoString() string { + return s.String() +} + +type DescribeLoadBalancerPolicyTypesOutput struct { + _ struct{} `type:"structure"` + + // Information about the policy types. + PolicyTypeDescriptions []*PolicyTypeDescription `type:"list"` +} + +// String returns the string representation +func (s DescribeLoadBalancerPolicyTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancerPolicyTypesOutput) GoString() string { + return s.String() +} + +type DescribeLoadBalancersInput struct { + _ struct{} `type:"structure"` + + // The names of the load balancers. + LoadBalancerNames []*string `type:"list"` + + // The marker for the next set of results. (You received this marker from a + // previous call.) + Marker *string `type:"string"` + + // The maximum number of results to return with this call (a number from 1 to + // 400). The default is 400. + PageSize *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s DescribeLoadBalancersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancersInput) GoString() string { + return s.String() +} + +type DescribeLoadBalancersOutput struct { + _ struct{} `type:"structure"` + + // Information about the load balancers. + LoadBalancerDescriptions []*LoadBalancerDescription `type:"list"` + + // The marker to use when requesting the next set of results. If there are no + // additional results, the string is empty. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLoadBalancersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancersOutput) GoString() string { + return s.String() +} + +type DescribeTagsInput struct { + _ struct{} `type:"structure"` + + // The names of the load balancers. + LoadBalancerNames []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsInput) GoString() string { + return s.String() +} + +type DescribeTagsOutput struct { + _ struct{} `type:"structure"` + + // Information about the tags. + TagDescriptions []*TagDescription `type:"list"` +} + +// String returns the string representation +func (s DescribeTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsOutput) GoString() string { + return s.String() +} + +type DetachLoadBalancerFromSubnetsInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The IDs of the subnets. + Subnets []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DetachLoadBalancerFromSubnetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachLoadBalancerFromSubnetsInput) GoString() string { + return s.String() +} + +type DetachLoadBalancerFromSubnetsOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the remaining subnets for the load balancer. + Subnets []*string `type:"list"` +} + +// String returns the string representation +func (s DetachLoadBalancerFromSubnetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachLoadBalancerFromSubnetsOutput) GoString() string { + return s.String() +} + +type DisableAvailabilityZonesForLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // The Availability Zones. + AvailabilityZones []*string `type:"list" required:"true"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableAvailabilityZonesForLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableAvailabilityZonesForLoadBalancerInput) GoString() string { + return s.String() +} + +type DisableAvailabilityZonesForLoadBalancerOutput struct { + _ struct{} `type:"structure"` + + // The remaining Availability Zones for the load balancer. + AvailabilityZones []*string `type:"list"` +} + +// String returns the string representation +func (s DisableAvailabilityZonesForLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableAvailabilityZonesForLoadBalancerOutput) GoString() string { + return s.String() +} + +type EnableAvailabilityZonesForLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // The Availability Zones. These must be in the same region as the load balancer. + AvailabilityZones []*string `type:"list" required:"true"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableAvailabilityZonesForLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableAvailabilityZonesForLoadBalancerInput) GoString() string { + return s.String() +} + +type EnableAvailabilityZonesForLoadBalancerOutput struct { + _ struct{} `type:"structure"` + + // The updated list of Availability Zones for the load balancer. + AvailabilityZones []*string `type:"list"` +} + +// String returns the string representation +func (s EnableAvailabilityZonesForLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableAvailabilityZonesForLoadBalancerOutput) GoString() string { + return s.String() +} + +// Information about a health check. +type HealthCheck struct { + _ struct{} `type:"structure"` + + // The number of consecutive health checks successes required before moving + // the instance to the Healthy state. + HealthyThreshold *int64 `min:"2" type:"integer" required:"true"` + + // The approximate interval, in seconds, between health checks of an individual + // instance. + Interval *int64 `min:"1" type:"integer" required:"true"` + + // The instance being checked. The protocol is either TCP, HTTP, HTTPS, or SSL. + // The range of valid ports is one (1) through 65535. + // + // TCP is the default, specified as a TCP: port pair, for example "TCP:5000". + // In this case, a health check simply attempts to open a TCP connection to + // the instance on the specified port. Failure to connect within the configured + // timeout is considered unhealthy. + // + // SSL is also specified as SSL: port pair, for example, SSL:5000. + // + // For HTTP/HTTPS, you must include a ping path in the string. HTTP is specified + // as a HTTP:port;/;PathToPing; grouping, for example "HTTP:80/weather/us/wa/seattle". + // In this case, a HTTP GET request is issued to the instance on the given port + // and path. Any answer other than "200 OK" within the timeout period is considered + // unhealthy. + // + // The total length of the HTTP ping target must be 1024 16-bit Unicode characters + // or less. + Target *string `type:"string" required:"true"` + + // The amount of time, in seconds, during which no response means a failed health + // check. + // + // This value must be less than the Interval value. + Timeout *int64 `min:"1" type:"integer" required:"true"` + + // The number of consecutive health check failures required before moving the + // instance to the Unhealthy state. + UnhealthyThreshold *int64 `min:"2" type:"integer" required:"true"` +} + +// String returns the string representation +func (s HealthCheck) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HealthCheck) GoString() string { + return s.String() +} + +// The ID of a back-end instance. +type Instance struct { + _ struct{} `type:"structure"` + + // The ID of the instance. + InstanceId *string `type:"string"` +} + +// String returns the string representation +func (s Instance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Instance) GoString() string { + return s.String() +} + +// Information about the state of a back-end instance. +type InstanceState struct { + _ struct{} `type:"structure"` + + // A description of the instance state. This string can contain one or more + // of the following messages. + // + // N/A + // + // A transient error occurred. Please try again later. + // + // Instance has failed at least the UnhealthyThreshold number of health checks + // consecutively. + // + // Instance has not passed the configured HealthyThreshold number of health + // checks consecutively. + // + // Instance registration is still in progress. + // + // Instance is in the EC2 Availability Zone for which LoadBalancer is not + // configured to route traffic to. + // + // Instance is not currently registered with the LoadBalancer. + // + // Instance deregistration currently in progress. + // + // Disable Availability Zone is currently in progress. + // + // Instance is in pending state. + // + // Instance is in stopped state. + // + // Instance is in terminated state. + Description *string `type:"string"` + + // The ID of the instance. + InstanceId *string `type:"string"` + + // Information about the cause of OutOfService instances. Specifically, whether + // the cause is Elastic Load Balancing or the instance. + // + // Valid values: ELB | Instance | N/A + ReasonCode *string `type:"string"` + + // The current state of the instance. + // + // Valid values: InService | OutOfService | Unknown + State *string `type:"string"` +} + +// String returns the string representation +func (s InstanceState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceState) GoString() string { + return s.String() +} + +// Information about a policy for duration-based session stickiness. +type LBCookieStickinessPolicy struct { + _ struct{} `type:"structure"` + + // The time period, in seconds, after which the cookie should be considered + // stale. If this parameter is not specified, the stickiness session lasts for + // the duration of the browser session. + CookieExpirationPeriod *int64 `type:"long"` + + // The name for the policy being created. The name must be unique within the + // set of policies for this load balancer. + PolicyName *string `type:"string"` +} + +// String returns the string representation +func (s LBCookieStickinessPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LBCookieStickinessPolicy) GoString() string { + return s.String() +} + +// Information about a listener. +// +// For information about the protocols and the ports supported by Elastic Load +// Balancing, see Listener Configurations for Elastic Load Balancing (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-listener-config.html) +// in the Elastic Load Balancing Developer Guide. +type Listener struct { + _ struct{} `type:"structure"` + + // The port on which the instance is listening. + InstancePort *int64 `min:"1" type:"integer" required:"true"` + + // The protocol to use for routing traffic to back-end instances: HTTP, HTTPS, + // TCP, or SSL. + // + // If the front-end protocol is HTTP, HTTPS, TCP, or SSL, InstanceProtocol + // must be at the same protocol. + // + // If there is another listener with the same InstancePort whose InstanceProtocol + // is secure, (HTTPS or SSL), the listener's InstanceProtocol must also be secure. + // + // If there is another listener with the same InstancePort whose InstanceProtocol + // is HTTP or TCP, the listener's InstanceProtocol must be HTTP or TCP. + InstanceProtocol *string `type:"string"` + + // The port on which the load balancer is listening. On EC2-VPC, you can specify + // any port from the range 1-65535. On EC2-Classic, you can specify any port + // from the following list: 25, 80, 443, 465, 587, 1024-65535. + LoadBalancerPort *int64 `type:"integer" required:"true"` + + // The load balancer transport protocol to use for routing: HTTP, HTTPS, TCP, + // or SSL. + Protocol *string `type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the server certificate. + SSLCertificateId *string `type:"string"` +} + +// String returns the string representation +func (s Listener) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Listener) GoString() string { + return s.String() +} + +// The policies enabled for a listener. +type ListenerDescription struct { + _ struct{} `type:"structure"` + + // Information about a listener. + // + // For information about the protocols and the ports supported by Elastic Load + // Balancing, see Listener Configurations for Elastic Load Balancing (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-listener-config.html) + // in the Elastic Load Balancing Developer Guide. + Listener *Listener `type:"structure"` + + // The policies. If there are no policies enabled, the list is empty. + PolicyNames []*string `type:"list"` +} + +// String returns the string representation +func (s ListenerDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListenerDescription) GoString() string { + return s.String() +} + +// The attributes for a load balancer. +type LoadBalancerAttributes struct { + _ struct{} `type:"structure"` + + // If enabled, the load balancer captures detailed information of all requests + // and delivers the information to the Amazon S3 bucket that you specify. + // + // For more information, see Enable Access Logs (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/enable-access-logs.html) + // in the Elastic Load Balancing Developer Guide. + AccessLog *AccessLog `type:"structure"` + + // This parameter is reserved. + AdditionalAttributes []*AdditionalAttribute `type:"list"` + + // If enabled, the load balancer allows existing requests to complete before + // the load balancer shifts traffic away from a deregistered or unhealthy back-end + // instance. + // + // For more information, see Enable Connection Draining (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/config-conn-drain.html) + // in the Elastic Load Balancing Developer Guide. + ConnectionDraining *ConnectionDraining `type:"structure"` + + // If enabled, the load balancer allows the connections to remain idle (no data + // is sent over the connection) for the specified duration. + // + // By default, Elastic Load Balancing maintains a 60-second idle connection + // timeout for both front-end and back-end connections of your load balancer. + // For more information, see Configure Idle Connection Timeout (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/config-idle-timeout.html) + // in the Elastic Load Balancing Developer Guide. + ConnectionSettings *ConnectionSettings `type:"structure"` + + // If enabled, the load balancer routes the request traffic evenly across all + // back-end instances regardless of the Availability Zones. + // + // For more information, see Enable Cross-Zone Load Balancing (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/enable-disable-crosszone-lb.html) + // in the Elastic Load Balancing Developer Guide. + CrossZoneLoadBalancing *CrossZoneLoadBalancing `type:"structure"` +} + +// String returns the string representation +func (s LoadBalancerAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadBalancerAttributes) GoString() string { + return s.String() +} + +// Information about a load balancer. +type LoadBalancerDescription struct { + _ struct{} `type:"structure"` + + // The Availability Zones for the load balancer. + AvailabilityZones []*string `type:"list"` + + // Information about the back-end servers. + BackendServerDescriptions []*BackendServerDescription `type:"list"` + + // The Amazon Route 53 hosted zone associated with the load balancer. + // + // For more information, see Using Domain Names With Elastic Load Balancing + // (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/using-domain-names-with-elb.html) + // in the Elastic Load Balancing Developer Guide. + CanonicalHostedZoneName *string `type:"string"` + + // The ID of the Amazon Route 53 hosted zone name associated with the load balancer. + CanonicalHostedZoneNameID *string `type:"string"` + + // The date and time the load balancer was created. + CreatedTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The external DNS name of the load balancer. + DNSName *string `type:"string"` + + // Information about the health checks conducted on the load balancer. + HealthCheck *HealthCheck `type:"structure"` + + // The IDs of the instances for the load balancer. + Instances []*Instance `type:"list"` + + // The listeners for the load balancer. + ListenerDescriptions []*ListenerDescription `type:"list"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string"` + + // The policies defined for the load balancer. + Policies *Policies `type:"structure"` + + // The type of load balancer. Valid only for load balancers in a VPC. + // + // If Scheme is internet-facing, the load balancer has a public DNS name that + // resolves to a public IP address. + // + // If Scheme is internal, the load balancer has a public DNS name that resolves + // to a private IP address. + Scheme *string `type:"string"` + + // The security groups for the load balancer. Valid only for load balancers + // in a VPC. + SecurityGroups []*string `type:"list"` + + // The security group that you can use as part of your inbound rules for your + // load balancer's back-end application instances. To only allow traffic from + // load balancers, add a security group rule to your back end instance that + // specifies this source security group as the inbound source. + SourceSecurityGroup *SourceSecurityGroup `type:"structure"` + + // The IDs of the subnets for the load balancer. + Subnets []*string `type:"list"` + + // The ID of the VPC for the load balancer. + VPCId *string `type:"string"` +} + +// String returns the string representation +func (s LoadBalancerDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadBalancerDescription) GoString() string { + return s.String() +} + +type ModifyLoadBalancerAttributesInput struct { + _ struct{} `type:"structure"` + + // The attributes of the load balancer. + LoadBalancerAttributes *LoadBalancerAttributes `type:"structure" required:"true"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyLoadBalancerAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyLoadBalancerAttributesInput) GoString() string { + return s.String() +} + +type ModifyLoadBalancerAttributesOutput struct { + _ struct{} `type:"structure"` + + // The attributes for a load balancer. + LoadBalancerAttributes *LoadBalancerAttributes `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string"` +} + +// String returns the string representation +func (s ModifyLoadBalancerAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyLoadBalancerAttributesOutput) GoString() string { + return s.String() +} + +// The policies for a load balancer. +type Policies struct { + _ struct{} `type:"structure"` + + // The stickiness policies created using CreateAppCookieStickinessPolicy. + AppCookieStickinessPolicies []*AppCookieStickinessPolicy `type:"list"` + + // The stickiness policies created using CreateLBCookieStickinessPolicy. + LBCookieStickinessPolicies []*LBCookieStickinessPolicy `type:"list"` + + // The policies other than the stickiness policies. + OtherPolicies []*string `type:"list"` +} + +// String returns the string representation +func (s Policies) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Policies) GoString() string { + return s.String() +} + +// Information about a policy attribute. +type PolicyAttribute struct { + _ struct{} `type:"structure"` + + // The name of the attribute. + AttributeName *string `type:"string"` + + // The value of the attribute. + AttributeValue *string `type:"string"` +} + +// String returns the string representation +func (s PolicyAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyAttribute) GoString() string { + return s.String() +} + +// Information about a policy attribute. +type PolicyAttributeDescription struct { + _ struct{} `type:"structure"` + + // The name of the attribute. + AttributeName *string `type:"string"` + + // The value of the attribute. + AttributeValue *string `type:"string"` +} + +// String returns the string representation +func (s PolicyAttributeDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyAttributeDescription) GoString() string { + return s.String() +} + +// Information about a policy attribute type. +type PolicyAttributeTypeDescription struct { + _ struct{} `type:"structure"` + + // The name of the attribute. + AttributeName *string `type:"string"` + + // The type of the attribute. For example, Boolean or Integer. + AttributeType *string `type:"string"` + + // The cardinality of the attribute. + // + // Valid values: + // + // ONE(1) : Single value required ZERO_OR_ONE(0..1) : Up to one value can + // be supplied ZERO_OR_MORE(0..*) : Optional. Multiple values are allowed ONE_OR_MORE(1..*0) + // : Required. Multiple values are allowed + Cardinality *string `type:"string"` + + // The default value of the attribute, if applicable. + DefaultValue *string `type:"string"` + + // A description of the attribute. + Description *string `type:"string"` +} + +// String returns the string representation +func (s PolicyAttributeTypeDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyAttributeTypeDescription) GoString() string { + return s.String() +} + +// Information about a policy. +type PolicyDescription struct { + _ struct{} `type:"structure"` + + // The policy attributes. + PolicyAttributeDescriptions []*PolicyAttributeDescription `type:"list"` + + // The name of the policy. + PolicyName *string `type:"string"` + + // The name of the policy type. + PolicyTypeName *string `type:"string"` +} + +// String returns the string representation +func (s PolicyDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyDescription) GoString() string { + return s.String() +} + +// Information about a policy type. +type PolicyTypeDescription struct { + _ struct{} `type:"structure"` + + // A description of the policy type. + Description *string `type:"string"` + + // The description of the policy attributes associated with the policies defined + // by Elastic Load Balancing. + PolicyAttributeTypeDescriptions []*PolicyAttributeTypeDescription `type:"list"` + + // The name of the policy type. + PolicyTypeName *string `type:"string"` +} + +// String returns the string representation +func (s PolicyTypeDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyTypeDescription) GoString() string { + return s.String() +} + +type RegisterInstancesWithLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // The IDs of the instances. + Instances []*Instance `type:"list" required:"true"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterInstancesWithLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterInstancesWithLoadBalancerInput) GoString() string { + return s.String() +} + +type RegisterInstancesWithLoadBalancerOutput struct { + _ struct{} `type:"structure"` + + // The updated list of instances for the load balancer. + Instances []*Instance `type:"list"` +} + +// String returns the string representation +func (s RegisterInstancesWithLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterInstancesWithLoadBalancerOutput) GoString() string { + return s.String() +} + +type RemoveTagsInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. You can specify a maximum of one load balancer + // name. + LoadBalancerNames []*string `type:"list" required:"true"` + + // The list of tag keys to remove. + Tags []*TagKeyOnly `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsInput) GoString() string { + return s.String() +} + +type RemoveTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsOutput) GoString() string { + return s.String() +} + +type SetLoadBalancerListenerSSLCertificateInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The port that uses the specified SSL certificate. + LoadBalancerPort *int64 `type:"integer" required:"true"` + + // The Amazon Resource Name (ARN) of the SSL certificate. + SSLCertificateId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetLoadBalancerListenerSSLCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLoadBalancerListenerSSLCertificateInput) GoString() string { + return s.String() +} + +type SetLoadBalancerListenerSSLCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetLoadBalancerListenerSSLCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLoadBalancerListenerSSLCertificateOutput) GoString() string { + return s.String() +} + +type SetLoadBalancerPoliciesForBackendServerInput struct { + _ struct{} `type:"structure"` + + // The port number associated with the back-end server. + InstancePort *int64 `type:"integer" required:"true"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The names of the policies. If the list is empty, then all current polices + // are removed from the back-end server. + PolicyNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s SetLoadBalancerPoliciesForBackendServerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLoadBalancerPoliciesForBackendServerInput) GoString() string { + return s.String() +} + +type SetLoadBalancerPoliciesForBackendServerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetLoadBalancerPoliciesForBackendServerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLoadBalancerPoliciesForBackendServerOutput) GoString() string { + return s.String() +} + +type SetLoadBalancerPoliciesOfListenerInput struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string" required:"true"` + + // The external port of the load balancer for the policy. + LoadBalancerPort *int64 `type:"integer" required:"true"` + + // The names of the policies. If the list is empty, the current policy is removed + // from the listener. + PolicyNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s SetLoadBalancerPoliciesOfListenerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLoadBalancerPoliciesOfListenerInput) GoString() string { + return s.String() +} + +type SetLoadBalancerPoliciesOfListenerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetLoadBalancerPoliciesOfListenerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLoadBalancerPoliciesOfListenerOutput) GoString() string { + return s.String() +} + +// Information about a source security group. +type SourceSecurityGroup struct { + _ struct{} `type:"structure"` + + // The name of the security group. + GroupName *string `type:"string"` + + // The owner of the security group. + OwnerAlias *string `type:"string"` +} + +// String returns the string representation +func (s SourceSecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SourceSecurityGroup) GoString() string { + return s.String() +} + +// Information about a tag. +type Tag struct { + _ struct{} `type:"structure"` + + // The key of the tag. + Key *string `min:"1" type:"string" required:"true"` + + // The value of the tag. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// The tags associated with a load balancer. +type TagDescription struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string"` + + // The tags. + Tags []*Tag `min:"1" type:"list"` +} + +// String returns the string representation +func (s TagDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagDescription) GoString() string { + return s.String() +} + +// The key of a tag. +type TagKeyOnly struct { + _ struct{} `type:"structure"` + + // The name of the key. + Key *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s TagKeyOnly) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagKeyOnly) GoString() string { + return s.String() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elb/elbiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elb/elbiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elb/elbiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elb/elbiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,128 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package elbiface provides an interface for the Elastic Load Balancing. +package elbiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/elb" +) + +// ELBAPI is the interface type for elb.ELB. +type ELBAPI interface { + AddTagsRequest(*elb.AddTagsInput) (*request.Request, *elb.AddTagsOutput) + + AddTags(*elb.AddTagsInput) (*elb.AddTagsOutput, error) + + ApplySecurityGroupsToLoadBalancerRequest(*elb.ApplySecurityGroupsToLoadBalancerInput) (*request.Request, *elb.ApplySecurityGroupsToLoadBalancerOutput) + + ApplySecurityGroupsToLoadBalancer(*elb.ApplySecurityGroupsToLoadBalancerInput) (*elb.ApplySecurityGroupsToLoadBalancerOutput, error) + + AttachLoadBalancerToSubnetsRequest(*elb.AttachLoadBalancerToSubnetsInput) (*request.Request, *elb.AttachLoadBalancerToSubnetsOutput) + + AttachLoadBalancerToSubnets(*elb.AttachLoadBalancerToSubnetsInput) (*elb.AttachLoadBalancerToSubnetsOutput, error) + + ConfigureHealthCheckRequest(*elb.ConfigureHealthCheckInput) (*request.Request, *elb.ConfigureHealthCheckOutput) + + ConfigureHealthCheck(*elb.ConfigureHealthCheckInput) (*elb.ConfigureHealthCheckOutput, error) + + CreateAppCookieStickinessPolicyRequest(*elb.CreateAppCookieStickinessPolicyInput) (*request.Request, *elb.CreateAppCookieStickinessPolicyOutput) + + CreateAppCookieStickinessPolicy(*elb.CreateAppCookieStickinessPolicyInput) (*elb.CreateAppCookieStickinessPolicyOutput, error) + + CreateLBCookieStickinessPolicyRequest(*elb.CreateLBCookieStickinessPolicyInput) (*request.Request, *elb.CreateLBCookieStickinessPolicyOutput) + + CreateLBCookieStickinessPolicy(*elb.CreateLBCookieStickinessPolicyInput) (*elb.CreateLBCookieStickinessPolicyOutput, error) + + CreateLoadBalancerRequest(*elb.CreateLoadBalancerInput) (*request.Request, *elb.CreateLoadBalancerOutput) + + CreateLoadBalancer(*elb.CreateLoadBalancerInput) (*elb.CreateLoadBalancerOutput, error) + + CreateLoadBalancerListenersRequest(*elb.CreateLoadBalancerListenersInput) (*request.Request, *elb.CreateLoadBalancerListenersOutput) + + CreateLoadBalancerListeners(*elb.CreateLoadBalancerListenersInput) (*elb.CreateLoadBalancerListenersOutput, error) + + CreateLoadBalancerPolicyRequest(*elb.CreateLoadBalancerPolicyInput) (*request.Request, *elb.CreateLoadBalancerPolicyOutput) + + CreateLoadBalancerPolicy(*elb.CreateLoadBalancerPolicyInput) (*elb.CreateLoadBalancerPolicyOutput, error) + + DeleteLoadBalancerRequest(*elb.DeleteLoadBalancerInput) (*request.Request, *elb.DeleteLoadBalancerOutput) + + DeleteLoadBalancer(*elb.DeleteLoadBalancerInput) (*elb.DeleteLoadBalancerOutput, error) + + DeleteLoadBalancerListenersRequest(*elb.DeleteLoadBalancerListenersInput) (*request.Request, *elb.DeleteLoadBalancerListenersOutput) + + DeleteLoadBalancerListeners(*elb.DeleteLoadBalancerListenersInput) (*elb.DeleteLoadBalancerListenersOutput, error) + + DeleteLoadBalancerPolicyRequest(*elb.DeleteLoadBalancerPolicyInput) (*request.Request, *elb.DeleteLoadBalancerPolicyOutput) + + DeleteLoadBalancerPolicy(*elb.DeleteLoadBalancerPolicyInput) (*elb.DeleteLoadBalancerPolicyOutput, error) + + DeregisterInstancesFromLoadBalancerRequest(*elb.DeregisterInstancesFromLoadBalancerInput) (*request.Request, *elb.DeregisterInstancesFromLoadBalancerOutput) + + DeregisterInstancesFromLoadBalancer(*elb.DeregisterInstancesFromLoadBalancerInput) (*elb.DeregisterInstancesFromLoadBalancerOutput, error) + + DescribeInstanceHealthRequest(*elb.DescribeInstanceHealthInput) (*request.Request, *elb.DescribeInstanceHealthOutput) + + DescribeInstanceHealth(*elb.DescribeInstanceHealthInput) (*elb.DescribeInstanceHealthOutput, error) + + DescribeLoadBalancerAttributesRequest(*elb.DescribeLoadBalancerAttributesInput) (*request.Request, *elb.DescribeLoadBalancerAttributesOutput) + + DescribeLoadBalancerAttributes(*elb.DescribeLoadBalancerAttributesInput) (*elb.DescribeLoadBalancerAttributesOutput, error) + + DescribeLoadBalancerPoliciesRequest(*elb.DescribeLoadBalancerPoliciesInput) (*request.Request, *elb.DescribeLoadBalancerPoliciesOutput) + + DescribeLoadBalancerPolicies(*elb.DescribeLoadBalancerPoliciesInput) (*elb.DescribeLoadBalancerPoliciesOutput, error) + + DescribeLoadBalancerPolicyTypesRequest(*elb.DescribeLoadBalancerPolicyTypesInput) (*request.Request, *elb.DescribeLoadBalancerPolicyTypesOutput) + + DescribeLoadBalancerPolicyTypes(*elb.DescribeLoadBalancerPolicyTypesInput) (*elb.DescribeLoadBalancerPolicyTypesOutput, error) + + DescribeLoadBalancersRequest(*elb.DescribeLoadBalancersInput) (*request.Request, *elb.DescribeLoadBalancersOutput) + + DescribeLoadBalancers(*elb.DescribeLoadBalancersInput) (*elb.DescribeLoadBalancersOutput, error) + + DescribeLoadBalancersPages(*elb.DescribeLoadBalancersInput, func(*elb.DescribeLoadBalancersOutput, bool) bool) error + + DescribeTagsRequest(*elb.DescribeTagsInput) (*request.Request, *elb.DescribeTagsOutput) + + DescribeTags(*elb.DescribeTagsInput) (*elb.DescribeTagsOutput, error) + + DetachLoadBalancerFromSubnetsRequest(*elb.DetachLoadBalancerFromSubnetsInput) (*request.Request, *elb.DetachLoadBalancerFromSubnetsOutput) + + DetachLoadBalancerFromSubnets(*elb.DetachLoadBalancerFromSubnetsInput) (*elb.DetachLoadBalancerFromSubnetsOutput, error) + + DisableAvailabilityZonesForLoadBalancerRequest(*elb.DisableAvailabilityZonesForLoadBalancerInput) (*request.Request, *elb.DisableAvailabilityZonesForLoadBalancerOutput) + + DisableAvailabilityZonesForLoadBalancer(*elb.DisableAvailabilityZonesForLoadBalancerInput) (*elb.DisableAvailabilityZonesForLoadBalancerOutput, error) + + EnableAvailabilityZonesForLoadBalancerRequest(*elb.EnableAvailabilityZonesForLoadBalancerInput) (*request.Request, *elb.EnableAvailabilityZonesForLoadBalancerOutput) + + EnableAvailabilityZonesForLoadBalancer(*elb.EnableAvailabilityZonesForLoadBalancerInput) (*elb.EnableAvailabilityZonesForLoadBalancerOutput, error) + + ModifyLoadBalancerAttributesRequest(*elb.ModifyLoadBalancerAttributesInput) (*request.Request, *elb.ModifyLoadBalancerAttributesOutput) + + ModifyLoadBalancerAttributes(*elb.ModifyLoadBalancerAttributesInput) (*elb.ModifyLoadBalancerAttributesOutput, error) + + RegisterInstancesWithLoadBalancerRequest(*elb.RegisterInstancesWithLoadBalancerInput) (*request.Request, *elb.RegisterInstancesWithLoadBalancerOutput) + + RegisterInstancesWithLoadBalancer(*elb.RegisterInstancesWithLoadBalancerInput) (*elb.RegisterInstancesWithLoadBalancerOutput, error) + + RemoveTagsRequest(*elb.RemoveTagsInput) (*request.Request, *elb.RemoveTagsOutput) + + RemoveTags(*elb.RemoveTagsInput) (*elb.RemoveTagsOutput, error) + + SetLoadBalancerListenerSSLCertificateRequest(*elb.SetLoadBalancerListenerSSLCertificateInput) (*request.Request, *elb.SetLoadBalancerListenerSSLCertificateOutput) + + SetLoadBalancerListenerSSLCertificate(*elb.SetLoadBalancerListenerSSLCertificateInput) (*elb.SetLoadBalancerListenerSSLCertificateOutput, error) + + SetLoadBalancerPoliciesForBackendServerRequest(*elb.SetLoadBalancerPoliciesForBackendServerInput) (*request.Request, *elb.SetLoadBalancerPoliciesForBackendServerOutput) + + SetLoadBalancerPoliciesForBackendServer(*elb.SetLoadBalancerPoliciesForBackendServerInput) (*elb.SetLoadBalancerPoliciesForBackendServerOutput, error) + + SetLoadBalancerPoliciesOfListenerRequest(*elb.SetLoadBalancerPoliciesOfListenerInput) (*request.Request, *elb.SetLoadBalancerPoliciesOfListenerOutput) + + SetLoadBalancerPoliciesOfListener(*elb.SetLoadBalancerPoliciesOfListenerInput) (*elb.SetLoadBalancerPoliciesOfListenerOutput, error) +} + +var _ ELBAPI = (*elb.ELB)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elb/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elb/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elb/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elb/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,722 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elb_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/elb" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleELB_AddTags() { + svc := elb.New(session.New()) + + params := &elb.AddTagsInput{ + LoadBalancerNames: []*string{ // Required + aws.String("AccessPointName"), // Required + // More values... + }, + Tags: []*elb.Tag{ // Required + { // Required + Key: aws.String("TagKey"), // Required + Value: aws.String("TagValue"), + }, + // More values... + }, + } + resp, err := svc.AddTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_ApplySecurityGroupsToLoadBalancer() { + svc := elb.New(session.New()) + + params := &elb.ApplySecurityGroupsToLoadBalancerInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + SecurityGroups: []*string{ // Required + aws.String("SecurityGroupId"), // Required + // More values... + }, + } + resp, err := svc.ApplySecurityGroupsToLoadBalancer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_AttachLoadBalancerToSubnets() { + svc := elb.New(session.New()) + + params := &elb.AttachLoadBalancerToSubnetsInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + Subnets: []*string{ // Required + aws.String("SubnetId"), // Required + // More values... + }, + } + resp, err := svc.AttachLoadBalancerToSubnets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_ConfigureHealthCheck() { + svc := elb.New(session.New()) + + params := &elb.ConfigureHealthCheckInput{ + HealthCheck: &elb.HealthCheck{ // Required + HealthyThreshold: aws.Int64(1), // Required + Interval: aws.Int64(1), // Required + Target: aws.String("HealthCheckTarget"), // Required + Timeout: aws.Int64(1), // Required + UnhealthyThreshold: aws.Int64(1), // Required + }, + LoadBalancerName: aws.String("AccessPointName"), // Required + } + resp, err := svc.ConfigureHealthCheck(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_CreateAppCookieStickinessPolicy() { + svc := elb.New(session.New()) + + params := &elb.CreateAppCookieStickinessPolicyInput{ + CookieName: aws.String("CookieName"), // Required + LoadBalancerName: aws.String("AccessPointName"), // Required + PolicyName: aws.String("PolicyName"), // Required + } + resp, err := svc.CreateAppCookieStickinessPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_CreateLBCookieStickinessPolicy() { + svc := elb.New(session.New()) + + params := &elb.CreateLBCookieStickinessPolicyInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + PolicyName: aws.String("PolicyName"), // Required + CookieExpirationPeriod: aws.Int64(1), + } + resp, err := svc.CreateLBCookieStickinessPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_CreateLoadBalancer() { + svc := elb.New(session.New()) + + params := &elb.CreateLoadBalancerInput{ + Listeners: []*elb.Listener{ // Required + { // Required + InstancePort: aws.Int64(1), // Required + LoadBalancerPort: aws.Int64(1), // Required + Protocol: aws.String("Protocol"), // Required + InstanceProtocol: aws.String("Protocol"), + SSLCertificateId: aws.String("SSLCertificateId"), + }, + // More values... + }, + LoadBalancerName: aws.String("AccessPointName"), // Required + AvailabilityZones: []*string{ + aws.String("AvailabilityZone"), // Required + // More values... + }, + Scheme: aws.String("LoadBalancerScheme"), + SecurityGroups: []*string{ + aws.String("SecurityGroupId"), // Required + // More values... + }, + Subnets: []*string{ + aws.String("SubnetId"), // Required + // More values... + }, + Tags: []*elb.Tag{ + { // Required + Key: aws.String("TagKey"), // Required + Value: aws.String("TagValue"), + }, + // More values... + }, + } + resp, err := svc.CreateLoadBalancer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_CreateLoadBalancerListeners() { + svc := elb.New(session.New()) + + params := &elb.CreateLoadBalancerListenersInput{ + Listeners: []*elb.Listener{ // Required + { // Required + InstancePort: aws.Int64(1), // Required + LoadBalancerPort: aws.Int64(1), // Required + Protocol: aws.String("Protocol"), // Required + InstanceProtocol: aws.String("Protocol"), + SSLCertificateId: aws.String("SSLCertificateId"), + }, + // More values... + }, + LoadBalancerName: aws.String("AccessPointName"), // Required + } + resp, err := svc.CreateLoadBalancerListeners(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_CreateLoadBalancerPolicy() { + svc := elb.New(session.New()) + + params := &elb.CreateLoadBalancerPolicyInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + PolicyName: aws.String("PolicyName"), // Required + PolicyTypeName: aws.String("PolicyTypeName"), // Required + PolicyAttributes: []*elb.PolicyAttribute{ + { // Required + AttributeName: aws.String("AttributeName"), + AttributeValue: aws.String("AttributeValue"), + }, + // More values... + }, + } + resp, err := svc.CreateLoadBalancerPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DeleteLoadBalancer() { + svc := elb.New(session.New()) + + params := &elb.DeleteLoadBalancerInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + } + resp, err := svc.DeleteLoadBalancer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DeleteLoadBalancerListeners() { + svc := elb.New(session.New()) + + params := &elb.DeleteLoadBalancerListenersInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + LoadBalancerPorts: []*int64{ // Required + aws.Int64(1), // Required + // More values... + }, + } + resp, err := svc.DeleteLoadBalancerListeners(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DeleteLoadBalancerPolicy() { + svc := elb.New(session.New()) + + params := &elb.DeleteLoadBalancerPolicyInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + PolicyName: aws.String("PolicyName"), // Required + } + resp, err := svc.DeleteLoadBalancerPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DeregisterInstancesFromLoadBalancer() { + svc := elb.New(session.New()) + + params := &elb.DeregisterInstancesFromLoadBalancerInput{ + Instances: []*elb.Instance{ // Required + { // Required + InstanceId: aws.String("InstanceId"), + }, + // More values... + }, + LoadBalancerName: aws.String("AccessPointName"), // Required + } + resp, err := svc.DeregisterInstancesFromLoadBalancer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DescribeInstanceHealth() { + svc := elb.New(session.New()) + + params := &elb.DescribeInstanceHealthInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + Instances: []*elb.Instance{ + { // Required + InstanceId: aws.String("InstanceId"), + }, + // More values... + }, + } + resp, err := svc.DescribeInstanceHealth(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DescribeLoadBalancerAttributes() { + svc := elb.New(session.New()) + + params := &elb.DescribeLoadBalancerAttributesInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + } + resp, err := svc.DescribeLoadBalancerAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DescribeLoadBalancerPolicies() { + svc := elb.New(session.New()) + + params := &elb.DescribeLoadBalancerPoliciesInput{ + LoadBalancerName: aws.String("AccessPointName"), + PolicyNames: []*string{ + aws.String("PolicyName"), // Required + // More values... + }, + } + resp, err := svc.DescribeLoadBalancerPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DescribeLoadBalancerPolicyTypes() { + svc := elb.New(session.New()) + + params := &elb.DescribeLoadBalancerPolicyTypesInput{ + PolicyTypeNames: []*string{ + aws.String("PolicyTypeName"), // Required + // More values... + }, + } + resp, err := svc.DescribeLoadBalancerPolicyTypes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DescribeLoadBalancers() { + svc := elb.New(session.New()) + + params := &elb.DescribeLoadBalancersInput{ + LoadBalancerNames: []*string{ + aws.String("AccessPointName"), // Required + // More values... + }, + Marker: aws.String("Marker"), + PageSize: aws.Int64(1), + } + resp, err := svc.DescribeLoadBalancers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DescribeTags() { + svc := elb.New(session.New()) + + params := &elb.DescribeTagsInput{ + LoadBalancerNames: []*string{ // Required + aws.String("AccessPointName"), // Required + // More values... + }, + } + resp, err := svc.DescribeTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DetachLoadBalancerFromSubnets() { + svc := elb.New(session.New()) + + params := &elb.DetachLoadBalancerFromSubnetsInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + Subnets: []*string{ // Required + aws.String("SubnetId"), // Required + // More values... + }, + } + resp, err := svc.DetachLoadBalancerFromSubnets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_DisableAvailabilityZonesForLoadBalancer() { + svc := elb.New(session.New()) + + params := &elb.DisableAvailabilityZonesForLoadBalancerInput{ + AvailabilityZones: []*string{ // Required + aws.String("AvailabilityZone"), // Required + // More values... + }, + LoadBalancerName: aws.String("AccessPointName"), // Required + } + resp, err := svc.DisableAvailabilityZonesForLoadBalancer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_EnableAvailabilityZonesForLoadBalancer() { + svc := elb.New(session.New()) + + params := &elb.EnableAvailabilityZonesForLoadBalancerInput{ + AvailabilityZones: []*string{ // Required + aws.String("AvailabilityZone"), // Required + // More values... + }, + LoadBalancerName: aws.String("AccessPointName"), // Required + } + resp, err := svc.EnableAvailabilityZonesForLoadBalancer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_ModifyLoadBalancerAttributes() { + svc := elb.New(session.New()) + + params := &elb.ModifyLoadBalancerAttributesInput{ + LoadBalancerAttributes: &elb.LoadBalancerAttributes{ // Required + AccessLog: &elb.AccessLog{ + Enabled: aws.Bool(true), // Required + EmitInterval: aws.Int64(1), + S3BucketName: aws.String("S3BucketName"), + S3BucketPrefix: aws.String("AccessLogPrefix"), + }, + AdditionalAttributes: []*elb.AdditionalAttribute{ + { // Required + Key: aws.String("StringVal"), + Value: aws.String("StringVal"), + }, + // More values... + }, + ConnectionDraining: &elb.ConnectionDraining{ + Enabled: aws.Bool(true), // Required + Timeout: aws.Int64(1), + }, + ConnectionSettings: &elb.ConnectionSettings{ + IdleTimeout: aws.Int64(1), // Required + }, + CrossZoneLoadBalancing: &elb.CrossZoneLoadBalancing{ + Enabled: aws.Bool(true), // Required + }, + }, + LoadBalancerName: aws.String("AccessPointName"), // Required + } + resp, err := svc.ModifyLoadBalancerAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_RegisterInstancesWithLoadBalancer() { + svc := elb.New(session.New()) + + params := &elb.RegisterInstancesWithLoadBalancerInput{ + Instances: []*elb.Instance{ // Required + { // Required + InstanceId: aws.String("InstanceId"), + }, + // More values... + }, + LoadBalancerName: aws.String("AccessPointName"), // Required + } + resp, err := svc.RegisterInstancesWithLoadBalancer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_RemoveTags() { + svc := elb.New(session.New()) + + params := &elb.RemoveTagsInput{ + LoadBalancerNames: []*string{ // Required + aws.String("AccessPointName"), // Required + // More values... + }, + Tags: []*elb.TagKeyOnly{ // Required + { // Required + Key: aws.String("TagKey"), + }, + // More values... + }, + } + resp, err := svc.RemoveTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_SetLoadBalancerListenerSSLCertificate() { + svc := elb.New(session.New()) + + params := &elb.SetLoadBalancerListenerSSLCertificateInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + LoadBalancerPort: aws.Int64(1), // Required + SSLCertificateId: aws.String("SSLCertificateId"), // Required + } + resp, err := svc.SetLoadBalancerListenerSSLCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_SetLoadBalancerPoliciesForBackendServer() { + svc := elb.New(session.New()) + + params := &elb.SetLoadBalancerPoliciesForBackendServerInput{ + InstancePort: aws.Int64(1), // Required + LoadBalancerName: aws.String("AccessPointName"), // Required + PolicyNames: []*string{ // Required + aws.String("PolicyName"), // Required + // More values... + }, + } + resp, err := svc.SetLoadBalancerPoliciesForBackendServer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleELB_SetLoadBalancerPoliciesOfListener() { + svc := elb.New(session.New()) + + params := &elb.SetLoadBalancerPoliciesOfListenerInput{ + LoadBalancerName: aws.String("AccessPointName"), // Required + LoadBalancerPort: aws.Int64(1), // Required + PolicyNames: []*string{ // Required + aws.String("PolicyName"), // Required + // More values... + }, + } + resp, err := svc.SetLoadBalancerPoliciesOfListener(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elb/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elb/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elb/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elb/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,98 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elb + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Elastic Load Balancing distributes incoming traffic across your EC2 instances. +// +// For information about the features of Elastic Load Balancing, see What Is +// Elastic Load Balancing? (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elastic-load-balancing.html) +// in the Elastic Load Balancing Developer Guide. +// +// For information about the AWS regions supported by Elastic Load Balancing, +// see Regions and Endpoints - Elastic Load Balancing (http://docs.aws.amazon.com/general/latest/gr/rande.html#elb_region) +// in the Amazon Web Services General Reference. +// +// All Elastic Load Balancing operations are idempotent, which means that they +// complete at most one time. If you repeat an operation, it succeeds with a +// 200 OK response code. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type ELB struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "elasticloadbalancing" + +// New creates a new instance of the ELB client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ELB client from just a session. +// svc := elb.New(mySession) +// +// // Create a ELB client with additional configuration +// svc := elb.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ELB { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *ELB { + svc := &ELB{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2012-06-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ELB operation and runs any +// custom request initialization. +func (c *ELB) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elb/waiters.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elb/waiters.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elb/waiters.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/elb/waiters.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,53 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package elb + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *ELB) WaitUntilAnyInstanceInService(input *DescribeInstanceHealthInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstanceHealth", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAny", + Argument: "InstanceStates[].State", + Expected: "InService", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *ELB) WaitUntilInstanceInService(input *DescribeInstanceHealthInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstanceHealth", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "InstanceStates[].State", + Expected: "InService", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/emr/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/emr/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/emr/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/emr/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2990 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package emr provides a client for Amazon Elastic MapReduce. +package emr + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opAddInstanceGroups = "AddInstanceGroups" + +// AddInstanceGroupsRequest generates a request for the AddInstanceGroups operation. +func (c *EMR) AddInstanceGroupsRequest(input *AddInstanceGroupsInput) (req *request.Request, output *AddInstanceGroupsOutput) { + op := &request.Operation{ + Name: opAddInstanceGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddInstanceGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &AddInstanceGroupsOutput{} + req.Data = output + return +} + +// AddInstanceGroups adds an instance group to a running cluster. +func (c *EMR) AddInstanceGroups(input *AddInstanceGroupsInput) (*AddInstanceGroupsOutput, error) { + req, out := c.AddInstanceGroupsRequest(input) + err := req.Send() + return out, err +} + +const opAddJobFlowSteps = "AddJobFlowSteps" + +// AddJobFlowStepsRequest generates a request for the AddJobFlowSteps operation. +func (c *EMR) AddJobFlowStepsRequest(input *AddJobFlowStepsInput) (req *request.Request, output *AddJobFlowStepsOutput) { + op := &request.Operation{ + Name: opAddJobFlowSteps, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddJobFlowStepsInput{} + } + + req = c.newRequest(op, input, output) + output = &AddJobFlowStepsOutput{} + req.Data = output + return +} + +// AddJobFlowSteps adds new steps to a running job flow. A maximum of 256 steps +// are allowed in each job flow. +// +// If your job flow is long-running (such as a Hive data warehouse) or complex, +// you may require more than 256 steps to process your data. You can bypass +// the 256-step limitation in various ways, including using the SSH shell to +// connect to the master node and submitting queries directly to the software +// running on the master node, such as Hive and Hadoop. For more information +// on how to do this, go to Add More than 256 Steps to a Job Flow (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/AddMoreThan256Steps.html) +// in the Amazon Elastic MapReduce Developer's Guide. +// +// A step specifies the location of a JAR file stored either on the master +// node of the job flow or in Amazon S3. Each step is performed by the main +// function of the main class of the JAR file. The main class can be specified +// either in the manifest of the JAR or by using the MainFunction parameter +// of the step. +// +// Elastic MapReduce executes each step in the order listed. For a step to +// be considered complete, the main function must exit with a zero exit code +// and all Hadoop jobs started while the step was running must have completed +// and run successfully. +// +// You can only add steps to a job flow that is in one of the following states: +// STARTING, BOOTSTRAPPING, RUNNING, or WAITING. +func (c *EMR) AddJobFlowSteps(input *AddJobFlowStepsInput) (*AddJobFlowStepsOutput, error) { + req, out := c.AddJobFlowStepsRequest(input) + err := req.Send() + return out, err +} + +const opAddTags = "AddTags" + +// AddTagsRequest generates a request for the AddTags operation. +func (c *EMR) AddTagsRequest(input *AddTagsInput) (req *request.Request, output *AddTagsOutput) { + op := &request.Operation{ + Name: opAddTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &AddTagsOutput{} + req.Data = output + return +} + +// Adds tags to an Amazon EMR resource. Tags make it easier to associate clusters +// in various ways, such as grouping clusters to track your Amazon EMR resource +// allocation costs. For more information, see Tagging Amazon EMR Resources +// (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/emr-plan-tags.html). +func (c *EMR) AddTags(input *AddTagsInput) (*AddTagsOutput, error) { + req, out := c.AddTagsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeCluster = "DescribeCluster" + +// DescribeClusterRequest generates a request for the DescribeCluster operation. +func (c *EMR) DescribeClusterRequest(input *DescribeClusterInput) (req *request.Request, output *DescribeClusterOutput) { + op := &request.Operation{ + Name: opDescribeCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClusterOutput{} + req.Data = output + return +} + +// Provides cluster-level details including status, hardware and software configuration, +// VPC settings, and so on. For information about the cluster steps, see ListSteps. +func (c *EMR) DescribeCluster(input *DescribeClusterInput) (*DescribeClusterOutput, error) { + req, out := c.DescribeClusterRequest(input) + err := req.Send() + return out, err +} + +const opDescribeJobFlows = "DescribeJobFlows" + +// DescribeJobFlowsRequest generates a request for the DescribeJobFlows operation. +func (c *EMR) DescribeJobFlowsRequest(input *DescribeJobFlowsInput) (req *request.Request, output *DescribeJobFlowsOutput) { + op := &request.Operation{ + Name: opDescribeJobFlows, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeJobFlowsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeJobFlowsOutput{} + req.Data = output + return +} + +// This API is deprecated and will eventually be removed. We recommend you use +// ListClusters, DescribeCluster, ListSteps, ListInstanceGroups and ListBootstrapActions +// instead. +// +// DescribeJobFlows returns a list of job flows that match all of the supplied +// parameters. The parameters can include a list of job flow IDs, job flow states, +// and restrictions on job flow creation date and time. +// +// Regardless of supplied parameters, only job flows created within the last +// two months are returned. +// +// If no parameters are supplied, then job flows matching either of the following +// criteria are returned: +// +// Job flows created and completed in the last two weeks Job flows created +// within the last two months that are in one of the following states: RUNNING, +// WAITING, SHUTTING_DOWN, STARTING Amazon Elastic MapReduce can return a +// maximum of 512 job flow descriptions. +func (c *EMR) DescribeJobFlows(input *DescribeJobFlowsInput) (*DescribeJobFlowsOutput, error) { + req, out := c.DescribeJobFlowsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeStep = "DescribeStep" + +// DescribeStepRequest generates a request for the DescribeStep operation. +func (c *EMR) DescribeStepRequest(input *DescribeStepInput) (req *request.Request, output *DescribeStepOutput) { + op := &request.Operation{ + Name: opDescribeStep, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeStepInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStepOutput{} + req.Data = output + return +} + +// Provides more detail about the cluster step. +func (c *EMR) DescribeStep(input *DescribeStepInput) (*DescribeStepOutput, error) { + req, out := c.DescribeStepRequest(input) + err := req.Send() + return out, err +} + +const opListBootstrapActions = "ListBootstrapActions" + +// ListBootstrapActionsRequest generates a request for the ListBootstrapActions operation. +func (c *EMR) ListBootstrapActionsRequest(input *ListBootstrapActionsInput) (req *request.Request, output *ListBootstrapActionsOutput) { + op := &request.Operation{ + Name: opListBootstrapActions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListBootstrapActionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListBootstrapActionsOutput{} + req.Data = output + return +} + +// Provides information about the bootstrap actions associated with a cluster. +func (c *EMR) ListBootstrapActions(input *ListBootstrapActionsInput) (*ListBootstrapActionsOutput, error) { + req, out := c.ListBootstrapActionsRequest(input) + err := req.Send() + return out, err +} + +func (c *EMR) ListBootstrapActionsPages(input *ListBootstrapActionsInput, fn func(p *ListBootstrapActionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListBootstrapActionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListBootstrapActionsOutput), lastPage) + }) +} + +const opListClusters = "ListClusters" + +// ListClustersRequest generates a request for the ListClusters operation. +func (c *EMR) ListClustersRequest(input *ListClustersInput) (req *request.Request, output *ListClustersOutput) { + op := &request.Operation{ + Name: opListClusters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListClustersInput{} + } + + req = c.newRequest(op, input, output) + output = &ListClustersOutput{} + req.Data = output + return +} + +// Provides the status of all clusters visible to this AWS account. Allows you +// to filter the list of clusters based on certain criteria; for example, filtering +// by cluster creation date and time or by status. This call returns a maximum +// of 50 clusters per call, but returns a marker to track the paging of the +// cluster list across multiple ListClusters calls. +func (c *EMR) ListClusters(input *ListClustersInput) (*ListClustersOutput, error) { + req, out := c.ListClustersRequest(input) + err := req.Send() + return out, err +} + +func (c *EMR) ListClustersPages(input *ListClustersInput, fn func(p *ListClustersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListClustersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListClustersOutput), lastPage) + }) +} + +const opListInstanceGroups = "ListInstanceGroups" + +// ListInstanceGroupsRequest generates a request for the ListInstanceGroups operation. +func (c *EMR) ListInstanceGroupsRequest(input *ListInstanceGroupsInput) (req *request.Request, output *ListInstanceGroupsOutput) { + op := &request.Operation{ + Name: opListInstanceGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListInstanceGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListInstanceGroupsOutput{} + req.Data = output + return +} + +// Provides all available details about the instance groups in a cluster. +func (c *EMR) ListInstanceGroups(input *ListInstanceGroupsInput) (*ListInstanceGroupsOutput, error) { + req, out := c.ListInstanceGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *EMR) ListInstanceGroupsPages(input *ListInstanceGroupsInput, fn func(p *ListInstanceGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListInstanceGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListInstanceGroupsOutput), lastPage) + }) +} + +const opListInstances = "ListInstances" + +// ListInstancesRequest generates a request for the ListInstances operation. +func (c *EMR) ListInstancesRequest(input *ListInstancesInput) (req *request.Request, output *ListInstancesOutput) { + op := &request.Operation{ + Name: opListInstances, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListInstancesOutput{} + req.Data = output + return +} + +// Provides information about the cluster instances that Amazon EMR provisions +// on behalf of a user when it creates the cluster. For example, this operation +// indicates when the EC2 instances reach the Ready state, when instances become +// available to Amazon EMR to use for jobs, and the IP addresses for cluster +// instances, etc. +func (c *EMR) ListInstances(input *ListInstancesInput) (*ListInstancesOutput, error) { + req, out := c.ListInstancesRequest(input) + err := req.Send() + return out, err +} + +func (c *EMR) ListInstancesPages(input *ListInstancesInput, fn func(p *ListInstancesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListInstancesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListInstancesOutput), lastPage) + }) +} + +const opListSteps = "ListSteps" + +// ListStepsRequest generates a request for the ListSteps operation. +func (c *EMR) ListStepsRequest(input *ListStepsInput) (req *request.Request, output *ListStepsOutput) { + op := &request.Operation{ + Name: opListSteps, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListStepsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListStepsOutput{} + req.Data = output + return +} + +// Provides a list of steps for the cluster. +func (c *EMR) ListSteps(input *ListStepsInput) (*ListStepsOutput, error) { + req, out := c.ListStepsRequest(input) + err := req.Send() + return out, err +} + +func (c *EMR) ListStepsPages(input *ListStepsInput, fn func(p *ListStepsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListStepsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListStepsOutput), lastPage) + }) +} + +const opModifyInstanceGroups = "ModifyInstanceGroups" + +// ModifyInstanceGroupsRequest generates a request for the ModifyInstanceGroups operation. +func (c *EMR) ModifyInstanceGroupsRequest(input *ModifyInstanceGroupsInput) (req *request.Request, output *ModifyInstanceGroupsOutput) { + op := &request.Operation{ + Name: opModifyInstanceGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyInstanceGroupsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ModifyInstanceGroupsOutput{} + req.Data = output + return +} + +// ModifyInstanceGroups modifies the number of nodes and configuration settings +// of an instance group. The input parameters include the new target instance +// count for the group and the instance group ID. The call will either succeed +// or fail atomically. +func (c *EMR) ModifyInstanceGroups(input *ModifyInstanceGroupsInput) (*ModifyInstanceGroupsOutput, error) { + req, out := c.ModifyInstanceGroupsRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTags = "RemoveTags" + +// RemoveTagsRequest generates a request for the RemoveTags operation. +func (c *EMR) RemoveTagsRequest(input *RemoveTagsInput) (req *request.Request, output *RemoveTagsOutput) { + op := &request.Operation{ + Name: opRemoveTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveTagsOutput{} + req.Data = output + return +} + +// Removes tags from an Amazon EMR resource. Tags make it easier to associate +// clusters in various ways, such as grouping clusters to track your Amazon +// EMR resource allocation costs. For more information, see Tagging Amazon EMR +// Resources (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/emr-plan-tags.html). +// +// The following example removes the stack tag with value Prod from a cluster: +func (c *EMR) RemoveTags(input *RemoveTagsInput) (*RemoveTagsOutput, error) { + req, out := c.RemoveTagsRequest(input) + err := req.Send() + return out, err +} + +const opRunJobFlow = "RunJobFlow" + +// RunJobFlowRequest generates a request for the RunJobFlow operation. +func (c *EMR) RunJobFlowRequest(input *RunJobFlowInput) (req *request.Request, output *RunJobFlowOutput) { + op := &request.Operation{ + Name: opRunJobFlow, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RunJobFlowInput{} + } + + req = c.newRequest(op, input, output) + output = &RunJobFlowOutput{} + req.Data = output + return +} + +// RunJobFlow creates and starts running a new job flow. The job flow will run +// the steps specified. Once the job flow completes, the cluster is stopped +// and the HDFS partition is lost. To prevent loss of data, configure the last +// step of the job flow to store results in Amazon S3. If the JobFlowInstancesConfig +// KeepJobFlowAliveWhenNoSteps parameter is set to TRUE, the job flow will transition +// to the WAITING state rather than shutting down once the steps have completed. +// +// For additional protection, you can set the JobFlowInstancesConfig TerminationProtected +// parameter to TRUE to lock the job flow and prevent it from being terminated +// by API call, user intervention, or in the event of a job flow error. +// +// A maximum of 256 steps are allowed in each job flow. +// +// If your job flow is long-running (such as a Hive data warehouse) or complex, +// you may require more than 256 steps to process your data. You can bypass +// the 256-step limitation in various ways, including using the SSH shell to +// connect to the master node and submitting queries directly to the software +// running on the master node, such as Hive and Hadoop. For more information +// on how to do this, go to Add More than 256 Steps to a Job Flow (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/AddMoreThan256Steps.html) +// in the Amazon Elastic MapReduce Developer's Guide. +// +// For long running job flows, we recommend that you periodically store your +// results. +func (c *EMR) RunJobFlow(input *RunJobFlowInput) (*RunJobFlowOutput, error) { + req, out := c.RunJobFlowRequest(input) + err := req.Send() + return out, err +} + +const opSetTerminationProtection = "SetTerminationProtection" + +// SetTerminationProtectionRequest generates a request for the SetTerminationProtection operation. +func (c *EMR) SetTerminationProtectionRequest(input *SetTerminationProtectionInput) (req *request.Request, output *SetTerminationProtectionOutput) { + op := &request.Operation{ + Name: opSetTerminationProtection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetTerminationProtectionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetTerminationProtectionOutput{} + req.Data = output + return +} + +// SetTerminationProtection locks a job flow so the Amazon EC2 instances in +// the cluster cannot be terminated by user intervention, an API call, or in +// the event of a job-flow error. The cluster still terminates upon successful +// completion of the job flow. Calling SetTerminationProtection on a job flow +// is analogous to calling the Amazon EC2 DisableAPITermination API on all of +// the EC2 instances in a cluster. +// +// SetTerminationProtection is used to prevent accidental termination of a +// job flow and to ensure that in the event of an error, the instances will +// persist so you can recover any data stored in their ephemeral instance storage. +// +// To terminate a job flow that has been locked by setting SetTerminationProtection +// to true, you must first unlock the job flow by a subsequent call to SetTerminationProtection +// in which you set the value to false. +// +// For more information, go to Protecting a Job Flow from Termination (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/UsingEMR_TerminationProtection.html) +// in the Amazon Elastic MapReduce Developer's Guide. +func (c *EMR) SetTerminationProtection(input *SetTerminationProtectionInput) (*SetTerminationProtectionOutput, error) { + req, out := c.SetTerminationProtectionRequest(input) + err := req.Send() + return out, err +} + +const opSetVisibleToAllUsers = "SetVisibleToAllUsers" + +// SetVisibleToAllUsersRequest generates a request for the SetVisibleToAllUsers operation. +func (c *EMR) SetVisibleToAllUsersRequest(input *SetVisibleToAllUsersInput) (req *request.Request, output *SetVisibleToAllUsersOutput) { + op := &request.Operation{ + Name: opSetVisibleToAllUsers, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetVisibleToAllUsersInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetVisibleToAllUsersOutput{} + req.Data = output + return +} + +// Sets whether all AWS Identity and Access Management (IAM) users under your +// account can access the specified job flows. This action works on running +// job flows. You can also set the visibility of a job flow when you launch +// it using the VisibleToAllUsers parameter of RunJobFlow. The SetVisibleToAllUsers +// action can be called only by an IAM user who created the job flow or the +// AWS account that owns the job flow. +func (c *EMR) SetVisibleToAllUsers(input *SetVisibleToAllUsersInput) (*SetVisibleToAllUsersOutput, error) { + req, out := c.SetVisibleToAllUsersRequest(input) + err := req.Send() + return out, err +} + +const opTerminateJobFlows = "TerminateJobFlows" + +// TerminateJobFlowsRequest generates a request for the TerminateJobFlows operation. +func (c *EMR) TerminateJobFlowsRequest(input *TerminateJobFlowsInput) (req *request.Request, output *TerminateJobFlowsOutput) { + op := &request.Operation{ + Name: opTerminateJobFlows, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TerminateJobFlowsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &TerminateJobFlowsOutput{} + req.Data = output + return +} + +// TerminateJobFlows shuts a list of job flows down. When a job flow is shut +// down, any step not yet completed is canceled and the EC2 instances on which +// the job flow is running are stopped. Any log files not already saved are +// uploaded to Amazon S3 if a LogUri was specified when the job flow was created. +// +// The maximum number of JobFlows allowed is 10. The call to TerminateJobFlows +// is asynchronous. Depending on the configuration of the job flow, it may take +// up to 5-20 minutes for the job flow to completely terminate and release allocated +// resources, such as Amazon EC2 instances. +func (c *EMR) TerminateJobFlows(input *TerminateJobFlowsInput) (*TerminateJobFlowsOutput, error) { + req, out := c.TerminateJobFlowsRequest(input) + err := req.Send() + return out, err +} + +// Input to an AddInstanceGroups call. +type AddInstanceGroupsInput struct { + _ struct{} `type:"structure"` + + // Instance Groups to add. + InstanceGroups []*InstanceGroupConfig `type:"list" required:"true"` + + // Job flow in which to add the instance groups. + JobFlowId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AddInstanceGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddInstanceGroupsInput) GoString() string { + return s.String() +} + +// Output from an AddInstanceGroups call. +type AddInstanceGroupsOutput struct { + _ struct{} `type:"structure"` + + // Instance group IDs of the newly created instance groups. + InstanceGroupIds []*string `type:"list"` + + // The job flow ID in which the instance groups are added. + JobFlowId *string `type:"string"` +} + +// String returns the string representation +func (s AddInstanceGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddInstanceGroupsOutput) GoString() string { + return s.String() +} + +// The input argument to the AddJobFlowSteps operation. +type AddJobFlowStepsInput struct { + _ struct{} `type:"structure"` + + // A string that uniquely identifies the job flow. This identifier is returned + // by RunJobFlow and can also be obtained from ListClusters. + JobFlowId *string `type:"string" required:"true"` + + // A list of StepConfig to be executed by the job flow. + Steps []*StepConfig `type:"list" required:"true"` +} + +// String returns the string representation +func (s AddJobFlowStepsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddJobFlowStepsInput) GoString() string { + return s.String() +} + +// The output for the AddJobFlowSteps operation. +type AddJobFlowStepsOutput struct { + _ struct{} `type:"structure"` + + // The identifiers of the list of steps added to the job flow. + StepIds []*string `type:"list"` +} + +// String returns the string representation +func (s AddJobFlowStepsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddJobFlowStepsOutput) GoString() string { + return s.String() +} + +// This input identifies a cluster and a list of tags to attach. +type AddTagsInput struct { + _ struct{} `type:"structure"` + + // The Amazon EMR resource identifier to which tags will be added. This value + // must be a cluster identifier. + ResourceId *string `type:"string" required:"true"` + + // A list of tags to associate with a cluster and propagate to Amazon EC2 instances. + // Tags are user-defined key/value pairs that consist of a required key string + // with a maximum of 128 characters, and an optional value string with a maximum + // of 256 characters. + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsInput) GoString() string { + return s.String() +} + +// This output indicates the result of adding tags to a resource. +type AddTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsOutput) GoString() string { + return s.String() +} + +// An application is any Amazon or third-party software that you can add to +// the cluster. This structure contains a list of strings that indicates the +// software to use with the cluster and accepts a user argument list. Amazon +// EMR accepts and forwards the argument list to the corresponding installation +// script as bootstrap action argument. For more information, see Launch a Job +// Flow on the MapR Distribution for Hadoop (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/emr-mapr.html). +// Currently supported values are: +// +// "mapr-m3" - launch the job flow using MapR M3 Edition. "mapr-m5" - launch +// the job flow using MapR M5 Edition. "mapr" with the user arguments specifying +// "--edition,m3" or "--edition,m5" - launch the job flow using MapR M3 or M5 +// Edition, respectively. In Amazon EMR releases 4.0 and greater, the only +// accepted parameter is the application name. To pass arguments to applications, +// you supply a configuration for each application. +type Application struct { + _ struct{} `type:"structure"` + + // This option is for advanced users only. This is meta information about third-party + // applications that third-party vendors use for testing purposes. + AdditionalInfo map[string]*string `type:"map"` + + // Arguments for Amazon EMR to pass to the application. + Args []*string `type:"list"` + + // The name of the application. + Name *string `type:"string"` + + // The version of the application. + Version *string `type:"string"` +} + +// String returns the string representation +func (s Application) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Application) GoString() string { + return s.String() +} + +// Configuration of a bootstrap action. +type BootstrapActionConfig struct { + _ struct{} `type:"structure"` + + // The name of the bootstrap action. + Name *string `type:"string" required:"true"` + + // The script run by the bootstrap action. + ScriptBootstrapAction *ScriptBootstrapActionConfig `type:"structure" required:"true"` +} + +// String returns the string representation +func (s BootstrapActionConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BootstrapActionConfig) GoString() string { + return s.String() +} + +// Reports the configuration of a bootstrap action in a job flow. +type BootstrapActionDetail struct { + _ struct{} `type:"structure"` + + // A description of the bootstrap action. + BootstrapActionConfig *BootstrapActionConfig `type:"structure"` +} + +// String returns the string representation +func (s BootstrapActionDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BootstrapActionDetail) GoString() string { + return s.String() +} + +// The detailed description of the cluster. +type Cluster struct { + _ struct{} `type:"structure"` + + // The applications installed on this cluster. + Applications []*Application `type:"list"` + + // Specifies whether the cluster should terminate after completing all steps. + AutoTerminate *bool `type:"boolean"` + + // Amazon EMR releases 4.x or later. + // + // The list of Configurations supplied to the EMR cluster. + Configurations []*Configuration `type:"list"` + + // Provides information about the EC2 instances in a cluster grouped by category. + // For example, key name, subnet ID, IAM instance profile, and so on. + Ec2InstanceAttributes *Ec2InstanceAttributes `type:"structure"` + + // The unique identifier for the cluster. + Id *string `type:"string"` + + // The path to the Amazon S3 location where logs for this cluster are stored. + LogUri *string `type:"string"` + + // The public DNS name of the master EC2 instance. + MasterPublicDnsName *string `type:"string"` + + // The name of the cluster. + Name *string `type:"string"` + + // An approximation of the cost of the job flow, represented in m1.small/hours. + // This value is incremented one time for every hour an m1.small instance runs. + // Larger instances are weighted more, so an EC2 instance that is roughly four + // times more expensive would result in the normalized instance hours being + // incremented by four. This result is only an approximation and does not reflect + // the actual billing rate. + NormalizedInstanceHours *int64 `type:"integer"` + + // The release label for the Amazon EMR release. For Amazon EMR 3.x and 2.x + // AMIs, use amiVersion instead instead of ReleaseLabel. + ReleaseLabel *string `type:"string"` + + // The AMI version requested for this cluster. + RequestedAmiVersion *string `type:"string"` + + // The AMI version running on this cluster. + RunningAmiVersion *string `type:"string"` + + // The IAM role that will be assumed by the Amazon EMR service to access AWS + // resources on your behalf. + ServiceRole *string `type:"string"` + + // The current status details about the cluster. + Status *ClusterStatus `type:"structure"` + + // A list of tags associated with a cluster. + Tags []*Tag `type:"list"` + + // Indicates whether Amazon EMR will lock the cluster to prevent the EC2 instances + // from being terminated by an API call or user intervention, or in the event + // of a cluster error. + TerminationProtected *bool `type:"boolean"` + + // Indicates whether the job flow is visible to all IAM users of the AWS account + // associated with the job flow. If this value is set to true, all IAM users + // of that AWS account can view and manage the job flow if they have the proper + // policy permissions set. If this value is false, only the IAM user that created + // the cluster can view and manage it. This value can be changed using the SetVisibleToAllUsers + // action. + VisibleToAllUsers *bool `type:"boolean"` +} + +// String returns the string representation +func (s Cluster) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Cluster) GoString() string { + return s.String() +} + +// The reason that the cluster changed to its current state. +type ClusterStateChangeReason struct { + _ struct{} `type:"structure"` + + // The programmatic code for the state change reason. + Code *string `type:"string" enum:"ClusterStateChangeReasonCode"` + + // The descriptive message for the state change reason. + Message *string `type:"string"` +} + +// String returns the string representation +func (s ClusterStateChangeReason) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterStateChangeReason) GoString() string { + return s.String() +} + +// The detailed status of the cluster. +type ClusterStatus struct { + _ struct{} `type:"structure"` + + // The current state of the cluster. + State *string `type:"string" enum:"ClusterState"` + + // The reason for the cluster status change. + StateChangeReason *ClusterStateChangeReason `type:"structure"` + + // A timeline that represents the status of a cluster over the lifetime of the + // cluster. + Timeline *ClusterTimeline `type:"structure"` +} + +// String returns the string representation +func (s ClusterStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterStatus) GoString() string { + return s.String() +} + +// The summary description of the cluster. +type ClusterSummary struct { + _ struct{} `type:"structure"` + + // The unique identifier for the cluster. + Id *string `type:"string"` + + // The name of the cluster. + Name *string `type:"string"` + + // An approximation of the cost of the job flow, represented in m1.small/hours. + // This value is incremented one time for every hour an m1.small instance runs. + // Larger instances are weighted more, so an EC2 instance that is roughly four + // times more expensive would result in the normalized instance hours being + // incremented by four. This result is only an approximation and does not reflect + // the actual billing rate. + NormalizedInstanceHours *int64 `type:"integer"` + + // The details about the current status of the cluster. + Status *ClusterStatus `type:"structure"` +} + +// String returns the string representation +func (s ClusterSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterSummary) GoString() string { + return s.String() +} + +// Represents the timeline of the cluster's lifecycle. +type ClusterTimeline struct { + _ struct{} `type:"structure"` + + // The creation date and time of the cluster. + CreationDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The date and time when the cluster was terminated. + EndDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The date and time when the cluster was ready to execute steps. + ReadyDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s ClusterTimeline) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterTimeline) GoString() string { + return s.String() +} + +// An entity describing an executable that runs on a cluster. +type Command struct { + _ struct{} `type:"structure"` + + // Arguments for Amazon EMR to pass to the command for execution. + Args []*string `type:"list"` + + // The name of the command. + Name *string `type:"string"` + + // The Amazon S3 location of the command script. + ScriptPath *string `type:"string"` +} + +// String returns the string representation +func (s Command) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Command) GoString() string { + return s.String() +} + +// Amazon EMR releases 4.x or later. +// +// Specifies a hardware and software configuration of the EMR cluster. This +// includes configurations for applications and software bundled with Amazon +// EMR. The Configuration object is a JSON object which is defined by a classification +// and a set of properties. Configurations can be nested, so a configuration +// may have its own Configuration objects listed. +type Configuration struct { + _ struct{} `type:"structure"` + + // The classification of a configuration. For more information see, Amazon EMR + // Configurations (http://docs.aws.amazon.com/ElasticMapReduce/latest/API/EmrConfigurations.html). + Classification *string `type:"string"` + + // A list of configurations you apply to this configuration object. + Configurations []*Configuration `type:"list"` + + // A set of properties supplied to the Configuration object. + Properties map[string]*string `type:"map"` +} + +// String returns the string representation +func (s Configuration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Configuration) GoString() string { + return s.String() +} + +// This input determines which cluster to describe. +type DescribeClusterInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster to describe. + ClusterId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterInput) GoString() string { + return s.String() +} + +// This output contains the description of the cluster. +type DescribeClusterOutput struct { + _ struct{} `type:"structure"` + + // This output contains the details for the requested cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s DescribeClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterOutput) GoString() string { + return s.String() +} + +// The input for the DescribeJobFlows operation. +type DescribeJobFlowsInput struct { + _ struct{} `type:"structure"` + + // Return only job flows created after this date and time. + CreatedAfter *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Return only job flows created before this date and time. + CreatedBefore *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Return only job flows whose job flow ID is contained in this list. + JobFlowIds []*string `type:"list"` + + // Return only job flows whose state is contained in this list. + JobFlowStates []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeJobFlowsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeJobFlowsInput) GoString() string { + return s.String() +} + +// The output for the DescribeJobFlows operation. +type DescribeJobFlowsOutput struct { + _ struct{} `type:"structure"` + + // A list of job flows matching the parameters supplied. + JobFlows []*JobFlowDetail `type:"list"` +} + +// String returns the string representation +func (s DescribeJobFlowsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeJobFlowsOutput) GoString() string { + return s.String() +} + +// This input determines which step to describe. +type DescribeStepInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster with steps to describe. + ClusterId *string `type:"string" required:"true"` + + // The identifier of the step to describe. + StepId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeStepInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStepInput) GoString() string { + return s.String() +} + +// This output contains the description of the cluster step. +type DescribeStepOutput struct { + _ struct{} `type:"structure"` + + // The step details for the requested step identifier. + Step *Step `type:"structure"` +} + +// String returns the string representation +func (s DescribeStepOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStepOutput) GoString() string { + return s.String() +} + +// Provides information about the EC2 instances in a cluster grouped by category. +// For example, key name, subnet ID, IAM instance profile, and so on. +type Ec2InstanceAttributes struct { + _ struct{} `type:"structure"` + + // A list of additional Amazon EC2 security group IDs for the master node. + AdditionalMasterSecurityGroups []*string `type:"list"` + + // A list of additional Amazon EC2 security group IDs for the slave nodes. + AdditionalSlaveSecurityGroups []*string `type:"list"` + + // The Availability Zone in which the cluster will run. + Ec2AvailabilityZone *string `type:"string"` + + // The name of the Amazon EC2 key pair to use when connecting with SSH into + // the master node as a user named "hadoop". + Ec2KeyName *string `type:"string"` + + // To launch the job flow in Amazon VPC, set this parameter to the identifier + // of the Amazon VPC subnet where you want the job flow to launch. If you do + // not specify this value, the job flow is launched in the normal AWS cloud, + // outside of a VPC. + // + // Amazon VPC currently does not support cluster compute quadruple extra large + // (cc1.4xlarge) instances. Thus, you cannot specify the cc1.4xlarge instance + // type for nodes of a job flow launched in a VPC. + Ec2SubnetId *string `type:"string"` + + // The identifier of the Amazon EC2 security group for the master node. + EmrManagedMasterSecurityGroup *string `type:"string"` + + // The identifier of the Amazon EC2 security group for the slave nodes. + EmrManagedSlaveSecurityGroup *string `type:"string"` + + // The IAM role that was specified when the job flow was launched. The EC2 instances + // of the job flow assume this role. + IamInstanceProfile *string `type:"string"` + + // The identifier of the Amazon EC2 security group for the Amazon EMR service + // to access clusters in VPC private subnets. + ServiceAccessSecurityGroup *string `type:"string"` +} + +// String returns the string representation +func (s Ec2InstanceAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Ec2InstanceAttributes) GoString() string { + return s.String() +} + +// A job flow step consisting of a JAR file whose main function will be executed. +// The main function submits a job for Hadoop to execute and waits for the job +// to finish or fail. +type HadoopJarStepConfig struct { + _ struct{} `type:"structure"` + + // A list of command line arguments passed to the JAR file's main function when + // executed. + Args []*string `type:"list"` + + // A path to a JAR file run during the step. + Jar *string `type:"string" required:"true"` + + // The name of the main class in the specified Java file. If not specified, + // the JAR file should specify a Main-Class in its manifest file. + MainClass *string `type:"string"` + + // A list of Java properties that are set when the step runs. You can use these + // properties to pass key value pairs to your main function. + Properties []*KeyValue `type:"list"` +} + +// String returns the string representation +func (s HadoopJarStepConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HadoopJarStepConfig) GoString() string { + return s.String() +} + +// A cluster step consisting of a JAR file whose main function will be executed. +// The main function submits a job for Hadoop to execute and waits for the job +// to finish or fail. +type HadoopStepConfig struct { + _ struct{} `type:"structure"` + + // The list of command line arguments to pass to the JAR file's main function + // for execution. + Args []*string `type:"list"` + + // The path to the JAR file that runs during the step. + Jar *string `type:"string"` + + // The name of the main class in the specified Java file. If not specified, + // the JAR file should specify a main class in its manifest file. + MainClass *string `type:"string"` + + // The list of Java properties that are set when the step runs. You can use + // these properties to pass key value pairs to your main function. + Properties map[string]*string `type:"map"` +} + +// String returns the string representation +func (s HadoopStepConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HadoopStepConfig) GoString() string { + return s.String() +} + +// Represents an EC2 instance provisioned as part of cluster. +type Instance struct { + _ struct{} `type:"structure"` + + // The unique identifier of the instance in Amazon EC2. + Ec2InstanceId *string `type:"string"` + + // The unique identifier for the instance in Amazon EMR. + Id *string `type:"string"` + + // The private DNS name of the instance. + PrivateDnsName *string `type:"string"` + + // The private IP address of the instance. + PrivateIpAddress *string `type:"string"` + + // The public DNS name of the instance. + PublicDnsName *string `type:"string"` + + // The public IP address of the instance. + PublicIpAddress *string `type:"string"` + + // The current status of the instance. + Status *InstanceStatus `type:"structure"` +} + +// String returns the string representation +func (s Instance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Instance) GoString() string { + return s.String() +} + +// This entity represents an instance group, which is a group of instances that +// have common purpose. For example, CORE instance group is used for HDFS. +type InstanceGroup struct { + _ struct{} `type:"structure"` + + // The bid price for each EC2 instance in the instance group when launching + // nodes as Spot Instances, expressed in USD. + BidPrice *string `type:"string"` + + // Amazon EMR releases 4.x or later. + // + // The list of configurations supplied for an EMR cluster instance group. You + // can specify a separate configuration for each instance group (master, core, + // and task). + Configurations []*Configuration `type:"list"` + + // The identifier of the instance group. + Id *string `type:"string"` + + // The type of the instance group. Valid values are MASTER, CORE or TASK. + InstanceGroupType *string `type:"string" enum:"InstanceGroupType"` + + // The EC2 instance type for all instances in the instance group. + InstanceType *string `min:"1" type:"string"` + + // The marketplace to provision instances for this group. Valid values are ON_DEMAND + // or SPOT. + Market *string `type:"string" enum:"MarketType"` + + // The name of the instance group. + Name *string `type:"string"` + + // The target number of instances for the instance group. + RequestedInstanceCount *int64 `type:"integer"` + + // The number of instances currently running in this instance group. + RunningInstanceCount *int64 `type:"integer"` + + // The current status of the instance group. + Status *InstanceGroupStatus `type:"structure"` +} + +// String returns the string representation +func (s InstanceGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceGroup) GoString() string { + return s.String() +} + +// Configuration defining a new instance group. +type InstanceGroupConfig struct { + _ struct{} `type:"structure"` + + // Bid price for each Amazon EC2 instance in the instance group when launching + // nodes as Spot Instances, expressed in USD. + BidPrice *string `type:"string"` + + // Amazon EMR releases 4.x or later. + // + // The list of configurations supplied for an EMR cluster instance group. You + // can specify a separate configuration for each instance group (master, core, + // and task). + Configurations []*Configuration `type:"list"` + + // Target number of instances for the instance group. + InstanceCount *int64 `type:"integer" required:"true"` + + // The role of the instance group in the cluster. + InstanceRole *string `type:"string" required:"true" enum:"InstanceRoleType"` + + // The Amazon EC2 instance type for all instances in the instance group. + InstanceType *string `min:"1" type:"string" required:"true"` + + // Market type of the Amazon EC2 instances used to create a cluster node. + Market *string `type:"string" enum:"MarketType"` + + // Friendly name given to the instance group. + Name *string `type:"string"` +} + +// String returns the string representation +func (s InstanceGroupConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceGroupConfig) GoString() string { + return s.String() +} + +// Detailed information about an instance group. +type InstanceGroupDetail struct { + _ struct{} `type:"structure"` + + // Bid price for EC2 Instances when launching nodes as Spot Instances, expressed + // in USD. + BidPrice *string `type:"string"` + + // The date/time the instance group was created. + CreationDateTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // The date/time the instance group was terminated. + EndDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Unique identifier for the instance group. + InstanceGroupId *string `type:"string"` + + // Target number of instances to run in the instance group. + InstanceRequestCount *int64 `type:"integer" required:"true"` + + // Instance group role in the cluster + InstanceRole *string `type:"string" required:"true" enum:"InstanceRoleType"` + + // Actual count of running instances. + InstanceRunningCount *int64 `type:"integer" required:"true"` + + // Amazon EC2 Instance type. + InstanceType *string `min:"1" type:"string" required:"true"` + + // Details regarding the state of the instance group. + LastStateChangeReason *string `type:"string"` + + // Market type of the Amazon EC2 instances used to create a cluster node. + Market *string `type:"string" required:"true" enum:"MarketType"` + + // Friendly name for the instance group. + Name *string `type:"string"` + + // The date/time the instance group was available to the cluster. + ReadyDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The date/time the instance group was started. + StartDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // State of instance group. The following values are deprecated: STARTING, TERMINATED, + // and FAILED. + State *string `type:"string" required:"true" enum:"InstanceGroupState"` +} + +// String returns the string representation +func (s InstanceGroupDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceGroupDetail) GoString() string { + return s.String() +} + +// Modify an instance group size. +type InstanceGroupModifyConfig struct { + _ struct{} `type:"structure"` + + // The EC2 InstanceIds to terminate. For advanced users only. Once you terminate + // the instances, the instance group will not return to its original requested + // size. + EC2InstanceIdsToTerminate []*string `type:"list"` + + // Target size for the instance group. + InstanceCount *int64 `type:"integer"` + + // Unique ID of the instance group to expand or shrink. + InstanceGroupId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s InstanceGroupModifyConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceGroupModifyConfig) GoString() string { + return s.String() +} + +// The status change reason details for the instance group. +type InstanceGroupStateChangeReason struct { + _ struct{} `type:"structure"` + + // The programmable code for the state change reason. + Code *string `type:"string" enum:"InstanceGroupStateChangeReasonCode"` + + // The status change reason description. + Message *string `type:"string"` +} + +// String returns the string representation +func (s InstanceGroupStateChangeReason) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceGroupStateChangeReason) GoString() string { + return s.String() +} + +// The details of the instance group status. +type InstanceGroupStatus struct { + _ struct{} `type:"structure"` + + // The current state of the instance group. + State *string `type:"string" enum:"InstanceGroupState"` + + // The status change reason details for the instance group. + StateChangeReason *InstanceGroupStateChangeReason `type:"structure"` + + // The timeline of the instance group status over time. + Timeline *InstanceGroupTimeline `type:"structure"` +} + +// String returns the string representation +func (s InstanceGroupStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceGroupStatus) GoString() string { + return s.String() +} + +// The timeline of the instance group lifecycle. +type InstanceGroupTimeline struct { + _ struct{} `type:"structure"` + + // The creation date and time of the instance group. + CreationDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The date and time when the instance group terminated. + EndDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The date and time when the instance group became ready to perform tasks. + ReadyDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s InstanceGroupTimeline) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceGroupTimeline) GoString() string { + return s.String() +} + +// The details of the status change reason for the instance. +type InstanceStateChangeReason struct { + _ struct{} `type:"structure"` + + // The programmable code for the state change reason. + Code *string `type:"string" enum:"InstanceStateChangeReasonCode"` + + // The status change reason description. + Message *string `type:"string"` +} + +// String returns the string representation +func (s InstanceStateChangeReason) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStateChangeReason) GoString() string { + return s.String() +} + +// The instance status details. +type InstanceStatus struct { + _ struct{} `type:"structure"` + + // The current state of the instance. + State *string `type:"string" enum:"InstanceState"` + + // The details of the status change reason for the instance. + StateChangeReason *InstanceStateChangeReason `type:"structure"` + + // The timeline of the instance status over time. + Timeline *InstanceTimeline `type:"structure"` +} + +// String returns the string representation +func (s InstanceStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStatus) GoString() string { + return s.String() +} + +// The timeline of the instance lifecycle. +type InstanceTimeline struct { + _ struct{} `type:"structure"` + + // The creation date and time of the instance. + CreationDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The date and time when the instance was terminated. + EndDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The date and time when the instance was ready to perform tasks. + ReadyDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s InstanceTimeline) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceTimeline) GoString() string { + return s.String() +} + +// A description of a job flow. +type JobFlowDetail struct { + _ struct{} `type:"structure"` + + // The version of the AMI used to initialize Amazon EC2 instances in the job + // flow. For a list of AMI versions currently supported by Amazon ElasticMapReduce, + // go to AMI Versions Supported in Elastic MapReduce (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/EnvironmentConfig_AMIVersion.html#ami-versions-supported) + // in the Amazon Elastic MapReduce Developer Guide. + AmiVersion *string `type:"string"` + + // A list of the bootstrap actions run by the job flow. + BootstrapActions []*BootstrapActionDetail `type:"list"` + + // Describes the execution status of the job flow. + ExecutionStatusDetail *JobFlowExecutionStatusDetail `type:"structure" required:"true"` + + // Describes the Amazon EC2 instances of the job flow. + Instances *JobFlowInstancesDetail `type:"structure" required:"true"` + + // The job flow identifier. + JobFlowId *string `type:"string" required:"true"` + + // The IAM role that was specified when the job flow was launched. The EC2 instances + // of the job flow assume this role. + JobFlowRole *string `type:"string"` + + // The location in Amazon S3 where log files for the job are stored. + LogUri *string `type:"string"` + + // The name of the job flow. + Name *string `type:"string" required:"true"` + + // The IAM role that will be assumed by the Amazon EMR service to access AWS + // resources on your behalf. + ServiceRole *string `type:"string"` + + // A list of steps run by the job flow. + Steps []*StepDetail `type:"list"` + + // A list of strings set by third party software when the job flow is launched. + // If you are not using third party software to manage the job flow this value + // is empty. + SupportedProducts []*string `type:"list"` + + // Specifies whether the job flow is visible to all IAM users of the AWS account + // associated with the job flow. If this value is set to true, all IAM users + // of that AWS account can view and (if they have the proper policy permissions + // set) manage the job flow. If it is set to false, only the IAM user that created + // the job flow can view and manage it. This value can be changed using the + // SetVisibleToAllUsers action. + VisibleToAllUsers *bool `type:"boolean"` +} + +// String returns the string representation +func (s JobFlowDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobFlowDetail) GoString() string { + return s.String() +} + +// Describes the status of the job flow. +type JobFlowExecutionStatusDetail struct { + _ struct{} `type:"structure"` + + // The creation date and time of the job flow. + CreationDateTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // The completion date and time of the job flow. + EndDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Description of the job flow last changed state. + LastStateChangeReason *string `type:"string"` + + // The date and time when the job flow was ready to start running bootstrap + // actions. + ReadyDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The start date and time of the job flow. + StartDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The state of the job flow. + State *string `type:"string" required:"true" enum:"JobFlowExecutionState"` +} + +// String returns the string representation +func (s JobFlowExecutionStatusDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobFlowExecutionStatusDetail) GoString() string { + return s.String() +} + +// A description of the Amazon EC2 instance running the job flow. A valid JobFlowInstancesConfig +// must contain at least InstanceGroups, which is the recommended configuration. +// However, a valid alternative is to have MasterInstanceType, SlaveInstanceType, +// and InstanceCount (all three must be present). +type JobFlowInstancesConfig struct { + _ struct{} `type:"structure"` + + // A list of additional Amazon EC2 security group IDs for the master node. + AdditionalMasterSecurityGroups []*string `type:"list"` + + // A list of additional Amazon EC2 security group IDs for the slave nodes. + AdditionalSlaveSecurityGroups []*string `type:"list"` + + // The name of the Amazon EC2 key pair that can be used to ssh to the master + // node as the user called "hadoop." + Ec2KeyName *string `type:"string"` + + // To launch the job flow in Amazon Virtual Private Cloud (Amazon VPC), set + // this parameter to the identifier of the Amazon VPC subnet where you want + // the job flow to launch. If you do not specify this value, the job flow is + // launched in the normal Amazon Web Services cloud, outside of an Amazon VPC. + // + // Amazon VPC currently does not support cluster compute quadruple extra large + // (cc1.4xlarge) instances. Thus you cannot specify the cc1.4xlarge instance + // type for nodes of a job flow launched in a Amazon VPC. + Ec2SubnetId *string `type:"string"` + + // The identifier of the Amazon EC2 security group for the master node. + EmrManagedMasterSecurityGroup *string `type:"string"` + + // The identifier of the Amazon EC2 security group for the slave nodes. + EmrManagedSlaveSecurityGroup *string `type:"string"` + + // The Hadoop version for the job flow. Valid inputs are "0.18" (deprecated), + // "0.20" (deprecated), "0.20.205" (deprecated), "1.0.3", "2.2.0", or "2.4.0". + // If you do not set this value, the default of 0.18 is used, unless the AmiVersion + // parameter is set in the RunJobFlow call, in which case the default version + // of Hadoop for that AMI version is used. + HadoopVersion *string `type:"string"` + + // The number of Amazon EC2 instances used to execute the job flow. + InstanceCount *int64 `type:"integer"` + + // Configuration for the job flow's instance groups. + InstanceGroups []*InstanceGroupConfig `type:"list"` + + // Specifies whether the job flow should be kept alive after completing all + // steps. + KeepJobFlowAliveWhenNoSteps *bool `type:"boolean"` + + // The EC2 instance type of the master node. + MasterInstanceType *string `min:"1" type:"string"` + + // The Availability Zone the job flow will run in. + Placement *PlacementType `type:"structure"` + + // The identifier of the Amazon EC2 security group for the Amazon EMR service + // to access clusters in VPC private subnets. + ServiceAccessSecurityGroup *string `type:"string"` + + // The EC2 instance type of the slave nodes. + SlaveInstanceType *string `min:"1" type:"string"` + + // Specifies whether to lock the job flow to prevent the Amazon EC2 instances + // from being terminated by API call, user intervention, or in the event of + // a job flow error. + TerminationProtected *bool `type:"boolean"` +} + +// String returns the string representation +func (s JobFlowInstancesConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobFlowInstancesConfig) GoString() string { + return s.String() +} + +// Specify the type of Amazon EC2 instances to run the job flow on. +type JobFlowInstancesDetail struct { + _ struct{} `type:"structure"` + + // The name of an Amazon EC2 key pair that can be used to ssh to the master + // node of job flow. + Ec2KeyName *string `type:"string"` + + // For job flows launched within Amazon Virtual Private Cloud, this value specifies + // the identifier of the subnet where the job flow was launched. + Ec2SubnetId *string `type:"string"` + + // The Hadoop version for the job flow. + HadoopVersion *string `type:"string"` + + // The number of Amazon EC2 instances in the cluster. If the value is 1, the + // same instance serves as both the master and slave node. If the value is greater + // than 1, one instance is the master node and all others are slave nodes. + InstanceCount *int64 `type:"integer" required:"true"` + + // Details about the job flow's instance groups. + InstanceGroups []*InstanceGroupDetail `type:"list"` + + // Specifies whether the job flow should terminate after completing all steps. + KeepJobFlowAliveWhenNoSteps *bool `type:"boolean"` + + // The Amazon EC2 instance identifier of the master node. + MasterInstanceId *string `type:"string"` + + // The Amazon EC2 master node instance type. + MasterInstanceType *string `min:"1" type:"string" required:"true"` + + // The DNS name of the master node. + MasterPublicDnsName *string `type:"string"` + + // An approximation of the cost of the job flow, represented in m1.small/hours. + // This value is incremented once for every hour an m1.small runs. Larger instances + // are weighted more, so an Amazon EC2 instance that is roughly four times more + // expensive would result in the normalized instance hours being incremented + // by four. This result is only an approximation and does not reflect the actual + // billing rate. + NormalizedInstanceHours *int64 `type:"integer"` + + // The Amazon EC2 Availability Zone for the job flow. + Placement *PlacementType `type:"structure"` + + // The Amazon EC2 slave node instance type. + SlaveInstanceType *string `min:"1" type:"string" required:"true"` + + // Specifies whether the Amazon EC2 instances in the cluster are protected from + // termination by API calls, user intervention, or in the event of a job flow + // error. + TerminationProtected *bool `type:"boolean"` +} + +// String returns the string representation +func (s JobFlowInstancesDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobFlowInstancesDetail) GoString() string { + return s.String() +} + +// A key value pair. +type KeyValue struct { + _ struct{} `type:"structure"` + + // The unique identifier of a key value pair. + Key *string `type:"string"` + + // The value part of the identified key. + Value *string `type:"string"` +} + +// String returns the string representation +func (s KeyValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyValue) GoString() string { + return s.String() +} + +// This input determines which bootstrap actions to retrieve. +type ListBootstrapActionsInput struct { + _ struct{} `type:"structure"` + + // The cluster identifier for the bootstrap actions to list . + ClusterId *string `type:"string" required:"true"` + + // The pagination token that indicates the next set of results to retrieve . + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListBootstrapActionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBootstrapActionsInput) GoString() string { + return s.String() +} + +// This output contains the boostrap actions detail . +type ListBootstrapActionsOutput struct { + _ struct{} `type:"structure"` + + // The bootstrap actions associated with the cluster . + BootstrapActions []*Command `type:"list"` + + // The pagination token that indicates the next set of results to retrieve . + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListBootstrapActionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBootstrapActionsOutput) GoString() string { + return s.String() +} + +// This input determines how the ListClusters action filters the list of clusters +// that it returns. +type ListClustersInput struct { + _ struct{} `type:"structure"` + + // The cluster state filters to apply when listing clusters. + ClusterStates []*string `type:"list"` + + // The creation date and time beginning value filter for listing clusters . + CreatedAfter *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The creation date and time end value filter for listing clusters . + CreatedBefore *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The pagination token that indicates the next set of results to retrieve. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListClustersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListClustersInput) GoString() string { + return s.String() +} + +// This contains a ClusterSummaryList with the cluster details; for example, +// the cluster IDs, names, and status. +type ListClustersOutput struct { + _ struct{} `type:"structure"` + + // The list of clusters for the account based on the given filters. + Clusters []*ClusterSummary `type:"list"` + + // The pagination token that indicates the next set of results to retrieve. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListClustersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListClustersOutput) GoString() string { + return s.String() +} + +// This input determines which instance groups to retrieve. +type ListInstanceGroupsInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster for which to list the instance groups. + ClusterId *string `type:"string" required:"true"` + + // The pagination token that indicates the next set of results to retrieve. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListInstanceGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInstanceGroupsInput) GoString() string { + return s.String() +} + +// This input determines which instance groups to retrieve. +type ListInstanceGroupsOutput struct { + _ struct{} `type:"structure"` + + // The list of instance groups for the cluster and given filters. + InstanceGroups []*InstanceGroup `type:"list"` + + // The pagination token that indicates the next set of results to retrieve. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListInstanceGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInstanceGroupsOutput) GoString() string { + return s.String() +} + +// This input determines which instances to list. +type ListInstancesInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster for which to list the instances. + ClusterId *string `type:"string" required:"true"` + + // The identifier of the instance group for which to list the instances. + InstanceGroupId *string `type:"string"` + + // The type of instance group for which to list the instances. + InstanceGroupTypes []*string `type:"list"` + + // The pagination token that indicates the next set of results to retrieve. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInstancesInput) GoString() string { + return s.String() +} + +// This output contains the list of instances. +type ListInstancesOutput struct { + _ struct{} `type:"structure"` + + // The list of instances for the cluster and given filters. + Instances []*Instance `type:"list"` + + // The pagination token that indicates the next set of results to retrieve. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInstancesOutput) GoString() string { + return s.String() +} + +// This input determines which steps to list. +type ListStepsInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster for which to list the steps. + ClusterId *string `type:"string" required:"true"` + + // The pagination token that indicates the next set of results to retrieve. + Marker *string `type:"string"` + + // The filter to limit the step list based on the identifier of the steps. + StepIds []*string `type:"list"` + + // The filter to limit the step list based on certain states. + StepStates []*string `type:"list"` +} + +// String returns the string representation +func (s ListStepsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStepsInput) GoString() string { + return s.String() +} + +// This output contains the list of steps. +type ListStepsOutput struct { + _ struct{} `type:"structure"` + + // The pagination token that indicates the next set of results to retrieve. + Marker *string `type:"string"` + + // The filtered list of steps for the cluster. + Steps []*StepSummary `type:"list"` +} + +// String returns the string representation +func (s ListStepsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStepsOutput) GoString() string { + return s.String() +} + +// Change the size of some instance groups. +type ModifyInstanceGroupsInput struct { + _ struct{} `type:"structure"` + + // Instance groups to change. + InstanceGroups []*InstanceGroupModifyConfig `type:"list"` +} + +// String returns the string representation +func (s ModifyInstanceGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstanceGroupsInput) GoString() string { + return s.String() +} + +type ModifyInstanceGroupsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyInstanceGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstanceGroupsOutput) GoString() string { + return s.String() +} + +// The Amazon EC2 location for the job flow. +type PlacementType struct { + _ struct{} `type:"structure"` + + // The Amazon EC2 Availability Zone for the job flow. + AvailabilityZone *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PlacementType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PlacementType) GoString() string { + return s.String() +} + +// This input identifies a cluster and a list of tags to remove. +type RemoveTagsInput struct { + _ struct{} `type:"structure"` + + // The Amazon EMR resource identifier from which tags will be removed. This + // value must be a cluster identifier. + ResourceId *string `type:"string" required:"true"` + + // A list of tag keys to remove from a resource. + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsInput) GoString() string { + return s.String() +} + +// This output indicates the result of removing tags from a resource. +type RemoveTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsOutput) GoString() string { + return s.String() +} + +// Input to the RunJobFlow operation. +type RunJobFlowInput struct { + _ struct{} `type:"structure"` + + // A JSON string for selecting additional features. + AdditionalInfo *string `type:"string"` + + // For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and greater, + // use ReleaseLabel. + // + // The version of the Amazon Machine Image (AMI) to use when launching Amazon + // EC2 instances in the job flow. The following values are valid: + // + // The version number of the AMI to use, for example, "2.0." If the AMI supports + // multiple versions of Hadoop (for example, AMI 1.0 supports both Hadoop 0.18 + // and 0.20) you can use the JobFlowInstancesConfig HadoopVersion parameter + // to modify the version of Hadoop from the defaults shown above. + // + // For details about the AMI versions currently supported by Amazon Elastic + // MapReduce, go to AMI Versions Supported in Elastic MapReduce (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/EnvironmentConfig_AMIVersion.html#ami-versions-supported) + // in the Amazon Elastic MapReduce Developer's Guide. + AmiVersion *string `type:"string"` + + // Amazon EMR releases 4.x or later. + // + // A list of applications for the cluster. Valid values are: "Hadoop", "Hive", + // "Mahout", "Pig", and "Spark." They are case insensitive. + Applications []*Application `type:"list"` + + // A list of bootstrap actions that will be run before Hadoop is started on + // the cluster nodes. + BootstrapActions []*BootstrapActionConfig `type:"list"` + + // Amazon EMR releases 4.x or later. + // + // The list of configurations supplied for the EMR cluster you are creating. + Configurations []*Configuration `type:"list"` + + // A specification of the number and type of Amazon EC2 instances on which to + // run the job flow. + Instances *JobFlowInstancesConfig `type:"structure" required:"true"` + + // Also called instance profile and EC2 role. An IAM role for an EMR cluster. + // The EC2 instances of the cluster assume this role. The default role is EMR_EC2_DefaultRole. + // In order to use the default role, you must have already created it using + // the CLI or console. + JobFlowRole *string `type:"string"` + + // The location in Amazon S3 to write the log files of the job flow. If a value + // is not provided, logs are not created. + LogUri *string `type:"string"` + + // The name of the job flow. + Name *string `type:"string" required:"true"` + + // For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and greater, + // use Applications. + // + // A list of strings that indicates third-party software to use with the job + // flow that accepts a user argument list. EMR accepts and forwards the argument + // list to the corresponding installation script as bootstrap action arguments. + // For more information, see Launch a Job Flow on the MapR Distribution for + // Hadoop (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/emr-mapr.html). + // Currently supported values are: + // + // "mapr-m3" - launch the cluster using MapR M3 Edition. "mapr-m5" - launch + // the cluster using MapR M5 Edition. "mapr" with the user arguments specifying + // "--edition,m3" or "--edition,m5" - launch the job flow using MapR M3 or M5 + // Edition respectively. "mapr-m7" - launch the cluster using MapR M7 Edition. + // "hunk" - launch the cluster with the Hunk Big Data Analtics Platform. "hue"- + // launch the cluster with Hue installed. "spark" - launch the cluster with + // Apache Spark installed. "ganglia" - launch the cluster with the Ganglia Monitoring + // System installed. + NewSupportedProducts []*SupportedProductConfig `type:"list"` + + // Amazon EMR releases 4.x or later. + // + // The release label for the Amazon EMR release. For Amazon EMR 3.x and 2.x + // AMIs, use amiVersion instead instead of ReleaseLabel. + ReleaseLabel *string `type:"string"` + + // The IAM role that will be assumed by the Amazon EMR service to access AWS + // resources on your behalf. + ServiceRole *string `type:"string"` + + // A list of steps to be executed by the job flow. + Steps []*StepConfig `type:"list"` + + // For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and greater, + // use Applications. + // + // A list of strings that indicates third-party software to use with the job + // flow. For more information, go to Use Third Party Applications with Amazon + // EMR (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/emr-supported-products.html). + // Currently supported values are: + // + // "mapr-m3" - launch the job flow using MapR M3 Edition. "mapr-m5" - launch + // the job flow using MapR M5 Edition. + SupportedProducts []*string `type:"list"` + + // A list of tags to associate with a cluster and propagate to Amazon EC2 instances. + Tags []*Tag `type:"list"` + + // Whether the job flow is visible to all IAM users of the AWS account associated + // with the job flow. If this value is set to true, all IAM users of that AWS + // account can view and (if they have the proper policy permissions set) manage + // the job flow. If it is set to false, only the IAM user that created the job + // flow can view and manage it. + VisibleToAllUsers *bool `type:"boolean"` +} + +// String returns the string representation +func (s RunJobFlowInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunJobFlowInput) GoString() string { + return s.String() +} + +// The result of the RunJobFlow operation. +type RunJobFlowOutput struct { + _ struct{} `type:"structure"` + + // An unique identifier for the job flow. + JobFlowId *string `type:"string"` +} + +// String returns the string representation +func (s RunJobFlowOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunJobFlowOutput) GoString() string { + return s.String() +} + +// Configuration of the script to run during a bootstrap action. +type ScriptBootstrapActionConfig struct { + _ struct{} `type:"structure"` + + // A list of command line arguments to pass to the bootstrap action script. + Args []*string `type:"list"` + + // Location of the script to run during a bootstrap action. Can be either a + // location in Amazon S3 or on a local file system. + Path *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ScriptBootstrapActionConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScriptBootstrapActionConfig) GoString() string { + return s.String() +} + +// The input argument to the TerminationProtection operation. +type SetTerminationProtectionInput struct { + _ struct{} `type:"structure"` + + // A list of strings that uniquely identify the job flows to protect. This identifier + // is returned by RunJobFlow and can also be obtained from DescribeJobFlows + // . + JobFlowIds []*string `type:"list" required:"true"` + + // A Boolean that indicates whether to protect the job flow and prevent the + // Amazon EC2 instances in the cluster from shutting down due to API calls, + // user intervention, or job-flow error. + TerminationProtected *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s SetTerminationProtectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetTerminationProtectionInput) GoString() string { + return s.String() +} + +type SetTerminationProtectionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetTerminationProtectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetTerminationProtectionOutput) GoString() string { + return s.String() +} + +// The input to the SetVisibleToAllUsers action. +type SetVisibleToAllUsersInput struct { + _ struct{} `type:"structure"` + + // Identifiers of the job flows to receive the new visibility setting. + JobFlowIds []*string `type:"list" required:"true"` + + // Whether the specified job flows are visible to all IAM users of the AWS account + // associated with the job flow. If this value is set to True, all IAM users + // of that AWS account can view and, if they have the proper IAM policy permissions + // set, manage the job flows. If it is set to False, only the IAM user that + // created a job flow can view and manage it. + VisibleToAllUsers *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s SetVisibleToAllUsersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetVisibleToAllUsersInput) GoString() string { + return s.String() +} + +type SetVisibleToAllUsersOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetVisibleToAllUsersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetVisibleToAllUsersOutput) GoString() string { + return s.String() +} + +// This represents a step in a cluster. +type Step struct { + _ struct{} `type:"structure"` + + // This specifies what action to take when the cluster step fails. Possible + // values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. + ActionOnFailure *string `type:"string" enum:"ActionOnFailure"` + + // The Hadoop job configuration of the cluster step. + Config *HadoopStepConfig `type:"structure"` + + // The identifier of the cluster step. + Id *string `type:"string"` + + // The name of the cluster step. + Name *string `type:"string"` + + // The current execution status details of the cluster step. + Status *StepStatus `type:"structure"` +} + +// String returns the string representation +func (s Step) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Step) GoString() string { + return s.String() +} + +// Specification of a job flow step. +type StepConfig struct { + _ struct{} `type:"structure"` + + // The action to take if the job flow step fails. + ActionOnFailure *string `type:"string" enum:"ActionOnFailure"` + + // The JAR file used for the job flow step. + HadoopJarStep *HadoopJarStepConfig `type:"structure" required:"true"` + + // The name of the job flow step. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StepConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StepConfig) GoString() string { + return s.String() +} + +// Combines the execution state and configuration of a step. +type StepDetail struct { + _ struct{} `type:"structure"` + + // The description of the step status. + ExecutionStatusDetail *StepExecutionStatusDetail `type:"structure" required:"true"` + + // The step configuration. + StepConfig *StepConfig `type:"structure" required:"true"` +} + +// String returns the string representation +func (s StepDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StepDetail) GoString() string { + return s.String() +} + +// The execution state of a step. +type StepExecutionStatusDetail struct { + _ struct{} `type:"structure"` + + // The creation date and time of the step. + CreationDateTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // The completion date and time of the step. + EndDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A description of the step's current state. + LastStateChangeReason *string `type:"string"` + + // The start date and time of the step. + StartDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The state of the job flow step. + State *string `type:"string" required:"true" enum:"StepExecutionState"` +} + +// String returns the string representation +func (s StepExecutionStatusDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StepExecutionStatusDetail) GoString() string { + return s.String() +} + +// The details of the step state change reason. +type StepStateChangeReason struct { + _ struct{} `type:"structure"` + + // The programmable code for the state change reason. Note: Currently, the service + // provides no code for the state change. + Code *string `type:"string" enum:"StepStateChangeReasonCode"` + + // The descriptive message for the state change reason. + Message *string `type:"string"` +} + +// String returns the string representation +func (s StepStateChangeReason) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StepStateChangeReason) GoString() string { + return s.String() +} + +// The execution status details of the cluster step. +type StepStatus struct { + _ struct{} `type:"structure"` + + // The execution state of the cluster step. + State *string `type:"string" enum:"StepState"` + + // The reason for the step execution status change. + StateChangeReason *StepStateChangeReason `type:"structure"` + + // The timeline of the cluster step status over time. + Timeline *StepTimeline `type:"structure"` +} + +// String returns the string representation +func (s StepStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StepStatus) GoString() string { + return s.String() +} + +// The summary of the cluster step. +type StepSummary struct { + _ struct{} `type:"structure"` + + // This specifies what action to take when the cluster step fails. Possible + // values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. + ActionOnFailure *string `type:"string" enum:"ActionOnFailure"` + + // The Hadoop job configuration of the cluster step. + Config *HadoopStepConfig `type:"structure"` + + // The identifier of the cluster step. + Id *string `type:"string"` + + // The name of the cluster step. + Name *string `type:"string"` + + // The current execution status details of the cluster step. + Status *StepStatus `type:"structure"` +} + +// String returns the string representation +func (s StepSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StepSummary) GoString() string { + return s.String() +} + +// The timeline of the cluster step lifecycle. +type StepTimeline struct { + _ struct{} `type:"structure"` + + // The date and time when the cluster step was created. + CreationDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The date and time when the cluster step execution completed or failed. + EndDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The date and time when the cluster step execution started. + StartDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s StepTimeline) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StepTimeline) GoString() string { + return s.String() +} + +// The list of supported product configurations which allow user-supplied arguments. +// EMR accepts these arguments and forwards them to the corresponding installation +// script as bootstrap action arguments. +type SupportedProductConfig struct { + _ struct{} `type:"structure"` + + // The list of user-supplied arguments. + Args []*string `type:"list"` + + // The name of the product configuration. + Name *string `type:"string"` +} + +// String returns the string representation +func (s SupportedProductConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SupportedProductConfig) GoString() string { + return s.String() +} + +// A key/value pair containing user-defined metadata that you can associate +// with an Amazon EMR resource. Tags make it easier to associate clusters in +// various ways, such as grouping clu\ sters to track your Amazon EMR resource +// allocation costs. For more information, see Tagging Amazon EMR Resources +// (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/emr-plan-tags.html). +type Tag struct { + _ struct{} `type:"structure"` + + // A user-defined key, which is the minimum required information for a valid + // tag. For more information, see Tagging Amazon EMR Resources (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/emr-plan-tags.html). + Key *string `type:"string"` + + // A user-defined value, which is optional in a tag. For more information, see + // Tagging Amazon EMR Resources (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/emr-plan-tags.html). + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Input to the TerminateJobFlows operation. +type TerminateJobFlowsInput struct { + _ struct{} `type:"structure"` + + // A list of job flows to be shutdown. + JobFlowIds []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s TerminateJobFlowsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateJobFlowsInput) GoString() string { + return s.String() +} + +type TerminateJobFlowsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TerminateJobFlowsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateJobFlowsOutput) GoString() string { + return s.String() +} + +const ( + // @enum ActionOnFailure + ActionOnFailureTerminateJobFlow = "TERMINATE_JOB_FLOW" + // @enum ActionOnFailure + ActionOnFailureTerminateCluster = "TERMINATE_CLUSTER" + // @enum ActionOnFailure + ActionOnFailureCancelAndWait = "CANCEL_AND_WAIT" + // @enum ActionOnFailure + ActionOnFailureContinue = "CONTINUE" +) + +const ( + // @enum ClusterState + ClusterStateStarting = "STARTING" + // @enum ClusterState + ClusterStateBootstrapping = "BOOTSTRAPPING" + // @enum ClusterState + ClusterStateRunning = "RUNNING" + // @enum ClusterState + ClusterStateWaiting = "WAITING" + // @enum ClusterState + ClusterStateTerminating = "TERMINATING" + // @enum ClusterState + ClusterStateTerminated = "TERMINATED" + // @enum ClusterState + ClusterStateTerminatedWithErrors = "TERMINATED_WITH_ERRORS" +) + +const ( + // @enum ClusterStateChangeReasonCode + ClusterStateChangeReasonCodeInternalError = "INTERNAL_ERROR" + // @enum ClusterStateChangeReasonCode + ClusterStateChangeReasonCodeValidationError = "VALIDATION_ERROR" + // @enum ClusterStateChangeReasonCode + ClusterStateChangeReasonCodeInstanceFailure = "INSTANCE_FAILURE" + // @enum ClusterStateChangeReasonCode + ClusterStateChangeReasonCodeBootstrapFailure = "BOOTSTRAP_FAILURE" + // @enum ClusterStateChangeReasonCode + ClusterStateChangeReasonCodeUserRequest = "USER_REQUEST" + // @enum ClusterStateChangeReasonCode + ClusterStateChangeReasonCodeStepFailure = "STEP_FAILURE" + // @enum ClusterStateChangeReasonCode + ClusterStateChangeReasonCodeAllStepsCompleted = "ALL_STEPS_COMPLETED" +) + +const ( + // @enum InstanceGroupState + InstanceGroupStateProvisioning = "PROVISIONING" + // @enum InstanceGroupState + InstanceGroupStateBootstrapping = "BOOTSTRAPPING" + // @enum InstanceGroupState + InstanceGroupStateRunning = "RUNNING" + // @enum InstanceGroupState + InstanceGroupStateResizing = "RESIZING" + // @enum InstanceGroupState + InstanceGroupStateSuspended = "SUSPENDED" + // @enum InstanceGroupState + InstanceGroupStateTerminating = "TERMINATING" + // @enum InstanceGroupState + InstanceGroupStateTerminated = "TERMINATED" + // @enum InstanceGroupState + InstanceGroupStateArrested = "ARRESTED" + // @enum InstanceGroupState + InstanceGroupStateShuttingDown = "SHUTTING_DOWN" + // @enum InstanceGroupState + InstanceGroupStateEnded = "ENDED" +) + +const ( + // @enum InstanceGroupStateChangeReasonCode + InstanceGroupStateChangeReasonCodeInternalError = "INTERNAL_ERROR" + // @enum InstanceGroupStateChangeReasonCode + InstanceGroupStateChangeReasonCodeValidationError = "VALIDATION_ERROR" + // @enum InstanceGroupStateChangeReasonCode + InstanceGroupStateChangeReasonCodeInstanceFailure = "INSTANCE_FAILURE" + // @enum InstanceGroupStateChangeReasonCode + InstanceGroupStateChangeReasonCodeClusterTerminated = "CLUSTER_TERMINATED" +) + +const ( + // @enum InstanceGroupType + InstanceGroupTypeMaster = "MASTER" + // @enum InstanceGroupType + InstanceGroupTypeCore = "CORE" + // @enum InstanceGroupType + InstanceGroupTypeTask = "TASK" +) + +const ( + // @enum InstanceRoleType + InstanceRoleTypeMaster = "MASTER" + // @enum InstanceRoleType + InstanceRoleTypeCore = "CORE" + // @enum InstanceRoleType + InstanceRoleTypeTask = "TASK" +) + +const ( + // @enum InstanceState + InstanceStateAwaitingFulfillment = "AWAITING_FULFILLMENT" + // @enum InstanceState + InstanceStateProvisioning = "PROVISIONING" + // @enum InstanceState + InstanceStateBootstrapping = "BOOTSTRAPPING" + // @enum InstanceState + InstanceStateRunning = "RUNNING" + // @enum InstanceState + InstanceStateTerminated = "TERMINATED" +) + +const ( + // @enum InstanceStateChangeReasonCode + InstanceStateChangeReasonCodeInternalError = "INTERNAL_ERROR" + // @enum InstanceStateChangeReasonCode + InstanceStateChangeReasonCodeValidationError = "VALIDATION_ERROR" + // @enum InstanceStateChangeReasonCode + InstanceStateChangeReasonCodeInstanceFailure = "INSTANCE_FAILURE" + // @enum InstanceStateChangeReasonCode + InstanceStateChangeReasonCodeBootstrapFailure = "BOOTSTRAP_FAILURE" + // @enum InstanceStateChangeReasonCode + InstanceStateChangeReasonCodeClusterTerminated = "CLUSTER_TERMINATED" +) + +// The type of instance. +// +// A small instance +// +// A large instance +const ( + // @enum JobFlowExecutionState + JobFlowExecutionStateStarting = "STARTING" + // @enum JobFlowExecutionState + JobFlowExecutionStateBootstrapping = "BOOTSTRAPPING" + // @enum JobFlowExecutionState + JobFlowExecutionStateRunning = "RUNNING" + // @enum JobFlowExecutionState + JobFlowExecutionStateWaiting = "WAITING" + // @enum JobFlowExecutionState + JobFlowExecutionStateShuttingDown = "SHUTTING_DOWN" + // @enum JobFlowExecutionState + JobFlowExecutionStateTerminated = "TERMINATED" + // @enum JobFlowExecutionState + JobFlowExecutionStateCompleted = "COMPLETED" + // @enum JobFlowExecutionState + JobFlowExecutionStateFailed = "FAILED" +) + +const ( + // @enum MarketType + MarketTypeOnDemand = "ON_DEMAND" + // @enum MarketType + MarketTypeSpot = "SPOT" +) + +const ( + // @enum StepExecutionState + StepExecutionStatePending = "PENDING" + // @enum StepExecutionState + StepExecutionStateRunning = "RUNNING" + // @enum StepExecutionState + StepExecutionStateContinue = "CONTINUE" + // @enum StepExecutionState + StepExecutionStateCompleted = "COMPLETED" + // @enum StepExecutionState + StepExecutionStateCancelled = "CANCELLED" + // @enum StepExecutionState + StepExecutionStateFailed = "FAILED" + // @enum StepExecutionState + StepExecutionStateInterrupted = "INTERRUPTED" +) + +const ( + // @enum StepState + StepStatePending = "PENDING" + // @enum StepState + StepStateRunning = "RUNNING" + // @enum StepState + StepStateCompleted = "COMPLETED" + // @enum StepState + StepStateCancelled = "CANCELLED" + // @enum StepState + StepStateFailed = "FAILED" + // @enum StepState + StepStateInterrupted = "INTERRUPTED" +) + +const ( + // @enum StepStateChangeReasonCode + StepStateChangeReasonCodeNone = "NONE" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/emr/emriface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/emr/emriface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/emr/emriface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/emr/emriface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,92 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package emriface provides an interface for the Amazon Elastic MapReduce. +package emriface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/emr" +) + +// EMRAPI is the interface type for emr.EMR. +type EMRAPI interface { + AddInstanceGroupsRequest(*emr.AddInstanceGroupsInput) (*request.Request, *emr.AddInstanceGroupsOutput) + + AddInstanceGroups(*emr.AddInstanceGroupsInput) (*emr.AddInstanceGroupsOutput, error) + + AddJobFlowStepsRequest(*emr.AddJobFlowStepsInput) (*request.Request, *emr.AddJobFlowStepsOutput) + + AddJobFlowSteps(*emr.AddJobFlowStepsInput) (*emr.AddJobFlowStepsOutput, error) + + AddTagsRequest(*emr.AddTagsInput) (*request.Request, *emr.AddTagsOutput) + + AddTags(*emr.AddTagsInput) (*emr.AddTagsOutput, error) + + DescribeClusterRequest(*emr.DescribeClusterInput) (*request.Request, *emr.DescribeClusterOutput) + + DescribeCluster(*emr.DescribeClusterInput) (*emr.DescribeClusterOutput, error) + + DescribeJobFlowsRequest(*emr.DescribeJobFlowsInput) (*request.Request, *emr.DescribeJobFlowsOutput) + + DescribeJobFlows(*emr.DescribeJobFlowsInput) (*emr.DescribeJobFlowsOutput, error) + + DescribeStepRequest(*emr.DescribeStepInput) (*request.Request, *emr.DescribeStepOutput) + + DescribeStep(*emr.DescribeStepInput) (*emr.DescribeStepOutput, error) + + ListBootstrapActionsRequest(*emr.ListBootstrapActionsInput) (*request.Request, *emr.ListBootstrapActionsOutput) + + ListBootstrapActions(*emr.ListBootstrapActionsInput) (*emr.ListBootstrapActionsOutput, error) + + ListBootstrapActionsPages(*emr.ListBootstrapActionsInput, func(*emr.ListBootstrapActionsOutput, bool) bool) error + + ListClustersRequest(*emr.ListClustersInput) (*request.Request, *emr.ListClustersOutput) + + ListClusters(*emr.ListClustersInput) (*emr.ListClustersOutput, error) + + ListClustersPages(*emr.ListClustersInput, func(*emr.ListClustersOutput, bool) bool) error + + ListInstanceGroupsRequest(*emr.ListInstanceGroupsInput) (*request.Request, *emr.ListInstanceGroupsOutput) + + ListInstanceGroups(*emr.ListInstanceGroupsInput) (*emr.ListInstanceGroupsOutput, error) + + ListInstanceGroupsPages(*emr.ListInstanceGroupsInput, func(*emr.ListInstanceGroupsOutput, bool) bool) error + + ListInstancesRequest(*emr.ListInstancesInput) (*request.Request, *emr.ListInstancesOutput) + + ListInstances(*emr.ListInstancesInput) (*emr.ListInstancesOutput, error) + + ListInstancesPages(*emr.ListInstancesInput, func(*emr.ListInstancesOutput, bool) bool) error + + ListStepsRequest(*emr.ListStepsInput) (*request.Request, *emr.ListStepsOutput) + + ListSteps(*emr.ListStepsInput) (*emr.ListStepsOutput, error) + + ListStepsPages(*emr.ListStepsInput, func(*emr.ListStepsOutput, bool) bool) error + + ModifyInstanceGroupsRequest(*emr.ModifyInstanceGroupsInput) (*request.Request, *emr.ModifyInstanceGroupsOutput) + + ModifyInstanceGroups(*emr.ModifyInstanceGroupsInput) (*emr.ModifyInstanceGroupsOutput, error) + + RemoveTagsRequest(*emr.RemoveTagsInput) (*request.Request, *emr.RemoveTagsOutput) + + RemoveTags(*emr.RemoveTagsInput) (*emr.RemoveTagsOutput, error) + + RunJobFlowRequest(*emr.RunJobFlowInput) (*request.Request, *emr.RunJobFlowOutput) + + RunJobFlow(*emr.RunJobFlowInput) (*emr.RunJobFlowOutput, error) + + SetTerminationProtectionRequest(*emr.SetTerminationProtectionInput) (*request.Request, *emr.SetTerminationProtectionOutput) + + SetTerminationProtection(*emr.SetTerminationProtectionInput) (*emr.SetTerminationProtectionOutput, error) + + SetVisibleToAllUsersRequest(*emr.SetVisibleToAllUsersInput) (*request.Request, *emr.SetVisibleToAllUsersOutput) + + SetVisibleToAllUsers(*emr.SetVisibleToAllUsersInput) (*emr.SetVisibleToAllUsersOutput, error) + + TerminateJobFlowsRequest(*emr.TerminateJobFlowsInput) (*request.Request, *emr.TerminateJobFlowsOutput) + + TerminateJobFlows(*emr.TerminateJobFlowsInput) (*emr.TerminateJobFlowsOutput, error) +} + +var _ EMRAPI = (*emr.EMR)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/emr/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/emr/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/emr/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/emr/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,589 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package emr_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/emr" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleEMR_AddInstanceGroups() { + svc := emr.New(session.New()) + + params := &emr.AddInstanceGroupsInput{ + InstanceGroups: []*emr.InstanceGroupConfig{ // Required + { // Required + InstanceCount: aws.Int64(1), // Required + InstanceRole: aws.String("InstanceRoleType"), // Required + InstanceType: aws.String("InstanceType"), // Required + BidPrice: aws.String("XmlStringMaxLen256"), + Configurations: []*emr.Configuration{ + { // Required + Classification: aws.String("String"), + Configurations: []*emr.Configuration{ + // Recursive values... + }, + Properties: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Market: aws.String("MarketType"), + Name: aws.String("XmlStringMaxLen256"), + }, + // More values... + }, + JobFlowId: aws.String("XmlStringMaxLen256"), // Required + } + resp, err := svc.AddInstanceGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_AddJobFlowSteps() { + svc := emr.New(session.New()) + + params := &emr.AddJobFlowStepsInput{ + JobFlowId: aws.String("XmlStringMaxLen256"), // Required + Steps: []*emr.StepConfig{ // Required + { // Required + HadoopJarStep: &emr.HadoopJarStepConfig{ // Required + Jar: aws.String("XmlString"), // Required + Args: []*string{ + aws.String("XmlString"), // Required + // More values... + }, + MainClass: aws.String("XmlString"), + Properties: []*emr.KeyValue{ + { // Required + Key: aws.String("XmlString"), + Value: aws.String("XmlString"), + }, + // More values... + }, + }, + Name: aws.String("XmlStringMaxLen256"), // Required + ActionOnFailure: aws.String("ActionOnFailure"), + }, + // More values... + }, + } + resp, err := svc.AddJobFlowSteps(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_AddTags() { + svc := emr.New(session.New()) + + params := &emr.AddTagsInput{ + ResourceId: aws.String("ResourceId"), // Required + Tags: []*emr.Tag{ // Required + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.AddTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_DescribeCluster() { + svc := emr.New(session.New()) + + params := &emr.DescribeClusterInput{ + ClusterId: aws.String("ClusterId"), // Required + } + resp, err := svc.DescribeCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_DescribeJobFlows() { + svc := emr.New(session.New()) + + params := &emr.DescribeJobFlowsInput{ + CreatedAfter: aws.Time(time.Now()), + CreatedBefore: aws.Time(time.Now()), + JobFlowIds: []*string{ + aws.String("XmlString"), // Required + // More values... + }, + JobFlowStates: []*string{ + aws.String("JobFlowExecutionState"), // Required + // More values... + }, + } + resp, err := svc.DescribeJobFlows(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_DescribeStep() { + svc := emr.New(session.New()) + + params := &emr.DescribeStepInput{ + ClusterId: aws.String("ClusterId"), // Required + StepId: aws.String("StepId"), // Required + } + resp, err := svc.DescribeStep(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_ListBootstrapActions() { + svc := emr.New(session.New()) + + params := &emr.ListBootstrapActionsInput{ + ClusterId: aws.String("ClusterId"), // Required + Marker: aws.String("Marker"), + } + resp, err := svc.ListBootstrapActions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_ListClusters() { + svc := emr.New(session.New()) + + params := &emr.ListClustersInput{ + ClusterStates: []*string{ + aws.String("ClusterState"), // Required + // More values... + }, + CreatedAfter: aws.Time(time.Now()), + CreatedBefore: aws.Time(time.Now()), + Marker: aws.String("Marker"), + } + resp, err := svc.ListClusters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_ListInstanceGroups() { + svc := emr.New(session.New()) + + params := &emr.ListInstanceGroupsInput{ + ClusterId: aws.String("ClusterId"), // Required + Marker: aws.String("Marker"), + } + resp, err := svc.ListInstanceGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_ListInstances() { + svc := emr.New(session.New()) + + params := &emr.ListInstancesInput{ + ClusterId: aws.String("ClusterId"), // Required + InstanceGroupId: aws.String("InstanceGroupId"), + InstanceGroupTypes: []*string{ + aws.String("InstanceGroupType"), // Required + // More values... + }, + Marker: aws.String("Marker"), + } + resp, err := svc.ListInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_ListSteps() { + svc := emr.New(session.New()) + + params := &emr.ListStepsInput{ + ClusterId: aws.String("ClusterId"), // Required + Marker: aws.String("Marker"), + StepIds: []*string{ + aws.String("XmlString"), // Required + // More values... + }, + StepStates: []*string{ + aws.String("StepState"), // Required + // More values... + }, + } + resp, err := svc.ListSteps(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_ModifyInstanceGroups() { + svc := emr.New(session.New()) + + params := &emr.ModifyInstanceGroupsInput{ + InstanceGroups: []*emr.InstanceGroupModifyConfig{ + { // Required + InstanceGroupId: aws.String("XmlStringMaxLen256"), // Required + EC2InstanceIdsToTerminate: []*string{ + aws.String("InstanceId"), // Required + // More values... + }, + InstanceCount: aws.Int64(1), + }, + // More values... + }, + } + resp, err := svc.ModifyInstanceGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_RemoveTags() { + svc := emr.New(session.New()) + + params := &emr.RemoveTagsInput{ + ResourceId: aws.String("ResourceId"), // Required + TagKeys: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.RemoveTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_RunJobFlow() { + svc := emr.New(session.New()) + + params := &emr.RunJobFlowInput{ + Instances: &emr.JobFlowInstancesConfig{ // Required + AdditionalMasterSecurityGroups: []*string{ + aws.String("XmlStringMaxLen256"), // Required + // More values... + }, + AdditionalSlaveSecurityGroups: []*string{ + aws.String("XmlStringMaxLen256"), // Required + // More values... + }, + Ec2KeyName: aws.String("XmlStringMaxLen256"), + Ec2SubnetId: aws.String("XmlStringMaxLen256"), + EmrManagedMasterSecurityGroup: aws.String("XmlStringMaxLen256"), + EmrManagedSlaveSecurityGroup: aws.String("XmlStringMaxLen256"), + HadoopVersion: aws.String("XmlStringMaxLen256"), + InstanceCount: aws.Int64(1), + InstanceGroups: []*emr.InstanceGroupConfig{ + { // Required + InstanceCount: aws.Int64(1), // Required + InstanceRole: aws.String("InstanceRoleType"), // Required + InstanceType: aws.String("InstanceType"), // Required + BidPrice: aws.String("XmlStringMaxLen256"), + Configurations: []*emr.Configuration{ + { // Required + Classification: aws.String("String"), + Configurations: []*emr.Configuration{ + // Recursive values... + }, + Properties: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Market: aws.String("MarketType"), + Name: aws.String("XmlStringMaxLen256"), + }, + // More values... + }, + KeepJobFlowAliveWhenNoSteps: aws.Bool(true), + MasterInstanceType: aws.String("InstanceType"), + Placement: &emr.PlacementType{ + AvailabilityZone: aws.String("XmlString"), // Required + }, + ServiceAccessSecurityGroup: aws.String("XmlStringMaxLen256"), + SlaveInstanceType: aws.String("InstanceType"), + TerminationProtected: aws.Bool(true), + }, + Name: aws.String("XmlStringMaxLen256"), // Required + AdditionalInfo: aws.String("XmlString"), + AmiVersion: aws.String("XmlStringMaxLen256"), + Applications: []*emr.Application{ + { // Required + AdditionalInfo: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + Args: []*string{ + aws.String("String"), // Required + // More values... + }, + Name: aws.String("String"), + Version: aws.String("String"), + }, + // More values... + }, + BootstrapActions: []*emr.BootstrapActionConfig{ + { // Required + Name: aws.String("XmlStringMaxLen256"), // Required + ScriptBootstrapAction: &emr.ScriptBootstrapActionConfig{ // Required + Path: aws.String("XmlString"), // Required + Args: []*string{ + aws.String("XmlString"), // Required + // More values... + }, + }, + }, + // More values... + }, + Configurations: []*emr.Configuration{ + { // Required + Classification: aws.String("String"), + Configurations: []*emr.Configuration{ + // Recursive values... + }, + Properties: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + JobFlowRole: aws.String("XmlString"), + LogUri: aws.String("XmlString"), + NewSupportedProducts: []*emr.SupportedProductConfig{ + { // Required + Args: []*string{ + aws.String("XmlString"), // Required + // More values... + }, + Name: aws.String("XmlStringMaxLen256"), + }, + // More values... + }, + ReleaseLabel: aws.String("XmlStringMaxLen256"), + ServiceRole: aws.String("XmlString"), + Steps: []*emr.StepConfig{ + { // Required + HadoopJarStep: &emr.HadoopJarStepConfig{ // Required + Jar: aws.String("XmlString"), // Required + Args: []*string{ + aws.String("XmlString"), // Required + // More values... + }, + MainClass: aws.String("XmlString"), + Properties: []*emr.KeyValue{ + { // Required + Key: aws.String("XmlString"), + Value: aws.String("XmlString"), + }, + // More values... + }, + }, + Name: aws.String("XmlStringMaxLen256"), // Required + ActionOnFailure: aws.String("ActionOnFailure"), + }, + // More values... + }, + SupportedProducts: []*string{ + aws.String("XmlStringMaxLen256"), // Required + // More values... + }, + Tags: []*emr.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + VisibleToAllUsers: aws.Bool(true), + } + resp, err := svc.RunJobFlow(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_SetTerminationProtection() { + svc := emr.New(session.New()) + + params := &emr.SetTerminationProtectionInput{ + JobFlowIds: []*string{ // Required + aws.String("XmlString"), // Required + // More values... + }, + TerminationProtected: aws.Bool(true), // Required + } + resp, err := svc.SetTerminationProtection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_SetVisibleToAllUsers() { + svc := emr.New(session.New()) + + params := &emr.SetVisibleToAllUsersInput{ + JobFlowIds: []*string{ // Required + aws.String("XmlString"), // Required + // More values... + }, + VisibleToAllUsers: aws.Bool(true), // Required + } + resp, err := svc.SetVisibleToAllUsers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleEMR_TerminateJobFlows() { + svc := emr.New(session.New()) + + params := &emr.TerminateJobFlowsInput{ + JobFlowIds: []*string{ // Required + aws.String("XmlString"), // Required + // More values... + }, + } + resp, err := svc.TerminateJobFlows(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/emr/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/emr/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/emr/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/emr/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,92 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package emr + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Amazon Elastic MapReduce (Amazon EMR) is a web service that makes it easy +// to process large amounts of data efficiently. Amazon EMR uses Hadoop processing +// combined with several AWS products to do tasks such as web indexing, data +// mining, log file analysis, machine learning, scientific simulation, and data +// warehousing. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type EMR struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "elasticmapreduce" + +// New creates a new instance of the EMR client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a EMR client from just a session. +// svc := emr.New(mySession) +// +// // Create a EMR client with additional configuration +// svc := emr.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EMR { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *EMR { + svc := &EMR{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2009-03-31", + JSONVersion: "1.1", + TargetPrefix: "ElasticMapReduce", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a EMR operation and runs any +// custom request initialization. +func (c *EMR) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/emr/waiters.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/emr/waiters.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/emr/waiters.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/emr/waiters.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,89 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package emr + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *EMR) WaitUntilClusterRunning(input *DescribeClusterInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeCluster", + Delay: 30, + MaxAttempts: 60, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "Cluster.Status.State", + Expected: "RUNNING", + }, + { + State: "success", + Matcher: "path", + Argument: "Cluster.Status.State", + Expected: "WAITING", + }, + { + State: "failure", + Matcher: "path", + Argument: "Cluster.Status.State", + Expected: "TERMINATING", + }, + { + State: "failure", + Matcher: "path", + Argument: "Cluster.Status.State", + Expected: "TERMINATED", + }, + { + State: "failure", + Matcher: "path", + Argument: "Cluster.Status.State", + Expected: "TERMINATED_WITH_ERRORS", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *EMR) WaitUntilStepComplete(input *DescribeStepInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeStep", + Delay: 30, + MaxAttempts: 60, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "Step.Status.State", + Expected: "COMPLETED", + }, + { + State: "failure", + Matcher: "path", + Argument: "Step.Status.State", + Expected: "FAILED", + }, + { + State: "failure", + Matcher: "path", + Argument: "Step.Status.State", + Expected: "CANCELLED", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/firehose/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/firehose/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/firehose/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/firehose/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1163 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package firehose provides a client for Amazon Kinesis Firehose. +package firehose + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opCreateDeliveryStream = "CreateDeliveryStream" + +// CreateDeliveryStreamRequest generates a request for the CreateDeliveryStream operation. +func (c *Firehose) CreateDeliveryStreamRequest(input *CreateDeliveryStreamInput) (req *request.Request, output *CreateDeliveryStreamOutput) { + op := &request.Operation{ + Name: opCreateDeliveryStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDeliveryStreamInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDeliveryStreamOutput{} + req.Data = output + return +} + +// Creates a delivery stream. +// +// CreateDeliveryStream is an asynchronous operation that immediately returns. +// The initial status of the delivery stream is CREATING. After the delivery +// stream is created, its status is ACTIVE and it now accepts data. Attempts +// to send data to a delivery stream that is not in the ACTIVE state cause an +// exception. To check the state of a delivery stream, use DescribeDeliveryStream. +// +// The name of a delivery stream identifies it. You can't have two delivery +// streams with the same name in the same region. Two delivery streams in different +// AWS accounts or different regions in the same AWS account can have the same +// name. +// +// By default, you can create up to 5 delivery streams per region. +// +// A delivery stream can only be configured with a single destination, Amazon +// S3 or Amazon Redshift. For correct CreateDeliveryStream request syntax, specify +// only one destination configuration parameter: either RedshiftDestinationConfiguration +// or S3DestinationConfiguration +// +// As part of S3DestinationConfiguration, optional values BufferingHints, EncryptionConfiguration, +// and CompressionFormat can be provided. By default, if no BufferingHints value +// is provided, Amazon Kinesis Firehose buffers data up to 5 MB or for 5 minutes, +// whichever condition is satisfied first. Note that BufferingHints is a hint, +// so there are some cases where the service cannot adhere to these conditions +// strictly; for example, record boundaries are such that the size is a little +// over or under the configured buffering size. By default, no encryption is +// performed. We strongly recommend that you enable encryption to ensure secure +// data storage in Amazon S3. +// +// A few notes about RedshiftDestinationConfiguration: +// +// An Amazon Redshift destination requires an S3 bucket as intermediate location, +// as Amazon Kinesis Firehose first delivers data to S3 and then uses COPY syntax +// to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration +// parameter element. The compression formats SNAPPY or ZIP cannot be specified +// in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift +// COPY operation that reads from the S3 bucket doesn't support these compression +// formats. We strongly recommend that the username and password provided is +// used exclusively for Amazon Kinesis Firehose purposes, and that the permissions +// for the account are restricted for Amazon Redshift INSERT permissions. Amazon +// Kinesis Firehose assumes the IAM role that is configured as part of destinations. +// The IAM role should allow the Amazon Kinesis Firehose principal to assume +// the role, and the role should have permissions that allows the service to +// deliver the data. For more information, see Amazon S3 Bucket Access (http://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3) +// in the Amazon Kinesis Firehose Developer Guide. +func (c *Firehose) CreateDeliveryStream(input *CreateDeliveryStreamInput) (*CreateDeliveryStreamOutput, error) { + req, out := c.CreateDeliveryStreamRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDeliveryStream = "DeleteDeliveryStream" + +// DeleteDeliveryStreamRequest generates a request for the DeleteDeliveryStream operation. +func (c *Firehose) DeleteDeliveryStreamRequest(input *DeleteDeliveryStreamInput) (req *request.Request, output *DeleteDeliveryStreamOutput) { + op := &request.Operation{ + Name: opDeleteDeliveryStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDeliveryStreamInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDeliveryStreamOutput{} + req.Data = output + return +} + +// Deletes a delivery stream and its data. +// +// You can delete a delivery stream only if it is in ACTIVE or DELETING state, +// and not in the CREATING state. While the deletion request is in process, +// the delivery stream is in the DELETING state. +// +// To check the state of a delivery stream, use DescribeDeliveryStream. +// +// While the delivery stream is DELETING state, the service may continue to +// accept the records, but the service doesn't make any guarantees with respect +// to delivering the data. Therefore, as a best practice, you should first stop +// any applications that are sending records before deleting a delivery stream. +func (c *Firehose) DeleteDeliveryStream(input *DeleteDeliveryStreamInput) (*DeleteDeliveryStreamOutput, error) { + req, out := c.DeleteDeliveryStreamRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDeliveryStream = "DescribeDeliveryStream" + +// DescribeDeliveryStreamRequest generates a request for the DescribeDeliveryStream operation. +func (c *Firehose) DescribeDeliveryStreamRequest(input *DescribeDeliveryStreamInput) (req *request.Request, output *DescribeDeliveryStreamOutput) { + op := &request.Operation{ + Name: opDescribeDeliveryStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDeliveryStreamInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDeliveryStreamOutput{} + req.Data = output + return +} + +// Describes the specified delivery stream and gets the status. For example, +// after your delivery stream is created, call DescribeDeliveryStream to see +// if the delivery stream is ACTIVE and therefore ready for data to be sent +// to it. +func (c *Firehose) DescribeDeliveryStream(input *DescribeDeliveryStreamInput) (*DescribeDeliveryStreamOutput, error) { + req, out := c.DescribeDeliveryStreamRequest(input) + err := req.Send() + return out, err +} + +const opListDeliveryStreams = "ListDeliveryStreams" + +// ListDeliveryStreamsRequest generates a request for the ListDeliveryStreams operation. +func (c *Firehose) ListDeliveryStreamsRequest(input *ListDeliveryStreamsInput) (req *request.Request, output *ListDeliveryStreamsOutput) { + op := &request.Operation{ + Name: opListDeliveryStreams, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListDeliveryStreamsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDeliveryStreamsOutput{} + req.Data = output + return +} + +// Lists your delivery streams. +// +// The number of delivery streams might be too large to return using a single +// call to ListDeliveryStreams. You can limit the number of delivery streams +// returned, using the Limit parameter. To determine whether there are more +// delivery streams to list, check the value of HasMoreDeliveryStreams in the +// output. If there are more delivery streams to list, you can request them +// by specifying the name of the last delivery stream returned in the call in +// the ExclusiveStartDeliveryStreamName parameter of a subsequent call. +func (c *Firehose) ListDeliveryStreams(input *ListDeliveryStreamsInput) (*ListDeliveryStreamsOutput, error) { + req, out := c.ListDeliveryStreamsRequest(input) + err := req.Send() + return out, err +} + +const opPutRecord = "PutRecord" + +// PutRecordRequest generates a request for the PutRecord operation. +func (c *Firehose) PutRecordRequest(input *PutRecordInput) (req *request.Request, output *PutRecordOutput) { + op := &request.Operation{ + Name: opPutRecord, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRecordInput{} + } + + req = c.newRequest(op, input, output) + output = &PutRecordOutput{} + req.Data = output + return +} + +// Writes a single data record into an Amazon Kinesis Firehose delivery stream. +// To write multiple data records into a delivery stream, use PutRecordBatch. +// Applications using these operations are referred to as producers. +// +// By default, each delivery stream can take in up to 2,000 transactions per +// second, 5,000 records per second, or 5 MB per second. Note that if you use +// PutRecord and PutRecordBatch, the limits are an aggregate across these two +// operations for each delivery stream. For more information about limits and +// how to request an increase, see Amazon Kinesis Firehose Limits (http://docs.aws.amazon.com/firehose/latest/dev/limits.html). +// +// You must specify the name of the delivery stream and the data record when +// using PutRecord. The data record consists of a data blob that can be up to +// 1,000 KB in size, and any kind of data, for example, a segment from a log +// file, geographic location data, web site clickstream data, etc. +// +// Amazon Kinesis Firehose buffers records before delivering them to the destination. +// To disambiguate the data blobs at the destination, a common solution is to +// use delimiters in the data, such as a newline (\n) or some other character +// unique within the data. This allows the consumer application(s) to parse +// individual data items when reading the data from the destination. +// +// Amazon Kinesis Firehose does not maintain data record ordering. If the destination +// data needs to be re-ordered by the consumer application, the producer should +// include some form of sequence number in each data record. +// +// The PutRecord operation returns a RecordId, which is a unique string assigned +// to each record. Producer applications can use this ID for purposes such as +// auditability and investigation. +// +// If the PutRecord operation throws a ServiceUnavailableException, back off +// and retry. If the exception persists, it is possible that the throughput +// limits have been exceeded for the delivery stream. +// +// Data records sent to Amazon Kinesis Firehose are stored for 24 hours from +// the time they are added to a delivery stream as it attempts to send the records +// to the destination. If the destination is unreachable for more than 24 hours, +// the data is no longer available. +func (c *Firehose) PutRecord(input *PutRecordInput) (*PutRecordOutput, error) { + req, out := c.PutRecordRequest(input) + err := req.Send() + return out, err +} + +const opPutRecordBatch = "PutRecordBatch" + +// PutRecordBatchRequest generates a request for the PutRecordBatch operation. +func (c *Firehose) PutRecordBatchRequest(input *PutRecordBatchInput) (req *request.Request, output *PutRecordBatchOutput) { + op := &request.Operation{ + Name: opPutRecordBatch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRecordBatchInput{} + } + + req = c.newRequest(op, input, output) + output = &PutRecordBatchOutput{} + req.Data = output + return +} + +// Writes multiple data records into a delivery stream in a single call, which +// can achieve higher throughput per producer than when writing single records. +// To write single data records into a delivery stream, use PutRecord. Applications +// using these operations are referred to as producers. +// +// Each PutRecordBatch request supports up to 500 records. Each record in the +// request can be as large as 1,000 KB (before 64-bit encoding), up to a limit +// of 4 MB for the entire request. By default, each delivery stream can take +// in up to 2,000 transactions per second, 5,000 records per second, or 5 MB +// per second. Note that if you use PutRecord and PutRecordBatch, the limits +// are an aggregate across these two operations for each delivery stream. For +// more information about limits and how to request an increase, see Amazon +// Kinesis Firehose Limits (http://docs.aws.amazon.com/firehose/latest/dev/limits.html). +// +// You must specify the name of the delivery stream and the data record when +// using PutRecord. The data record consists of a data blob that can be up to +// 1,000 KB in size, and any kind of data, for example, a segment from a log +// file, geographic location data, web site clickstream data, and so on. +// +// Amazon Kinesis Firehose buffers records before delivering them to the destination. +// To disambiguate the data blobs at the destination, a common solution is to +// use delimiters in the data, such as a newline (\n) or some other character +// unique within the data. This allows the consumer application(s) to parse +// individual data items when reading the data from the destination. +// +// The PutRecordBatch response includes a count of any failed records, FailedPutCount, +// and an array of responses, RequestResponses. The FailedPutCount value is +// a count of records that failed. Each entry in the RequestResponses array +// gives additional information of the processed record. Each entry in RequestResponses +// directly correlates with a record in the request array using the same ordering, +// from the top to the bottom of the request and response. RequestResponses +// always includes the same number of records as the request array. RequestResponses +// both successfully and unsuccessfully processed records. Amazon Kinesis Firehose +// attempts to process all records in each PutRecordBatch request. A single +// record failure does not stop the processing of subsequent records. +// +// A successfully processed record includes a RecordId value, which is a unique +// value identified for the record. An unsuccessfully processed record includes +// ErrorCode and ErrorMessage values. ErrorCode reflects the type of error and +// is one of the following values: ServiceUnavailable or InternalFailure. ErrorMessage +// provides more detailed information about the error. +// +// If FailedPutCount is greater than 0 (zero), retry the request. A retry of +// the entire batch of records is possible; however, we strongly recommend that +// you inspect the entire response and resend only those records that failed +// processing. This minimizes duplicate records and also reduces the total bytes +// sent (and corresponding charges). +// +// If the PutRecordBatch operation throws a ServiceUnavailableException, back +// off and retry. If the exception persists, it is possible that the throughput +// limits have been exceeded for the delivery stream. +// +// Data records sent to Amazon Kinesis Firehose are stored for 24 hours from +// the time they are added to a delivery stream as it attempts to send the records +// to the destination. If the destination is unreachable for more than 24 hours, +// the data is no longer available. +func (c *Firehose) PutRecordBatch(input *PutRecordBatchInput) (*PutRecordBatchOutput, error) { + req, out := c.PutRecordBatchRequest(input) + err := req.Send() + return out, err +} + +const opUpdateDestination = "UpdateDestination" + +// UpdateDestinationRequest generates a request for the UpdateDestination operation. +func (c *Firehose) UpdateDestinationRequest(input *UpdateDestinationInput) (req *request.Request, output *UpdateDestinationOutput) { + op := &request.Operation{ + Name: opUpdateDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDestinationInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateDestinationOutput{} + req.Data = output + return +} + +// Updates the specified destination of the specified delivery stream. +// +// This operation can be used to change the destination type (for example, +// to replace the Amazon S3 destination with Amazon Redshift) or change the +// parameters associated with a given destination (for example, to change the +// bucket name of the Amazon S3 destination). The update may not occur immediately. +// The target delivery stream remains active while the configurations are updated, +// so data writes to the delivery stream can continue during this process. The +// updated configurations are normally effective within a few minutes. +// +// If the destination type is the same, Amazon Kinesis Firehose merges the +// configuration parameters specified in the UpdateDestination request with +// the destination configuration that already exists on the delivery stream. +// If any of the parameters are not specified in the update request, then the +// existing configuration parameters are retained. For example, in the Amazon +// S3 destination, if EncryptionConfiguration is not specified then the existing +// EncryptionConfiguration is maintained on the destination. +// +// If the destination type is not the same, for example, changing the destination +// from Amazon S3 to Amazon Redshift, Amazon Kinesis Firehose does not merge +// any parameters. In this case, all parameters must be specified. +// +// Amazon Kinesis Firehose uses the CurrentDeliveryStreamVersionId to avoid +// race conditions and conflicting merges. This is a required field in every +// request and the service only updates the configuration if the existing configuration +// matches the VersionId. After the update is applied successfully, the VersionId +// is updated, which can be retrieved with the DescribeDeliveryStream operation. +// The new VersionId should be uses to set CurrentDeliveryStreamVersionId in +// the next UpdateDestination operation. +func (c *Firehose) UpdateDestination(input *UpdateDestinationInput) (*UpdateDestinationOutput, error) { + req, out := c.UpdateDestinationRequest(input) + err := req.Send() + return out, err +} + +// Describes the buffering to perform before delivering data to the destination. +type BufferingHints struct { + _ struct{} `type:"structure"` + + // Buffer incoming data for the specified period of time, in seconds, before + // delivering it to the destination. The default value is 300. + IntervalInSeconds *int64 `min:"60" type:"integer"` + + // Buffer incoming data to the specified size, in MBs, before delivering it + // to the destination. The default value is 5. + // + // We recommend setting SizeInMBs to a value greater than the amount of data + // you typically ingest into the delivery stream in 10 seconds. For example, + // if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher. + SizeInMBs *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s BufferingHints) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BufferingHints) GoString() string { + return s.String() +} + +// Describes a COPY command for Amazon Redshift. +type CopyCommand struct { + _ struct{} `type:"structure"` + + // Optional parameters to use with the Amazon Redshift COPY command. For more + // information, see the "Optional Parameters" section of Amazon Redshift COPY + // command (http://docs.aws.amazon.com/redshift/latest/dg/r_COPY.html). Some + // possible examples that would apply to Amazon Kinesis Firehose are as follows. + // + // delimiter '\t' lzop; - fields are delimited with "\t" (TAB character) and + // compressed using lzop. + // + // delimiter '| - fields are delimited with "|" (this is the default delimiter). + // + // delimiter '|' escape - the delimiter should be escaped. + // + // fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6' + // - fields are fixed width in the source, with each width specified after every + // column in the table. + // + // JSON 's3://mybucket/jsonpaths.txt' - data is in JSON format, and the path + // specified is the format of the data. + // + // For more examples, see and Amazon Redshift COPY command exmaples (http://docs.aws.amazon.com/redshift/latest/dg/r_COPY_command_examples.html). + CopyOptions *string `type:"string"` + + // A comma-separated list of column names. + DataTableColumns *string `type:"string"` + + // The name of the target table. The table must already exist in the database. + DataTableName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CopyCommand) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyCommand) GoString() string { + return s.String() +} + +// Contains the parameters for CreateDeliveryStream. +type CreateDeliveryStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery stream. + DeliveryStreamName *string `min:"1" type:"string" required:"true"` + + // The destination in Amazon Redshift. This value cannot be specified if Amazon + // S3 is the desired destination (see restrictions listed above). + RedshiftDestinationConfiguration *RedshiftDestinationConfiguration `type:"structure"` + + // The destination in Amazon S3. This value must be specified if RedshiftDestinationConfiguration + // is specified (see restrictions listed above). + S3DestinationConfiguration *S3DestinationConfiguration `type:"structure"` +} + +// String returns the string representation +func (s CreateDeliveryStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeliveryStreamInput) GoString() string { + return s.String() +} + +// Contains the output of CreateDeliveryStream. +type CreateDeliveryStreamOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the delivery stream. + DeliveryStreamARN *string `type:"string"` +} + +// String returns the string representation +func (s CreateDeliveryStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeliveryStreamOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteDeliveryStream. +type DeleteDeliveryStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery stream. + DeliveryStreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDeliveryStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDeliveryStreamInput) GoString() string { + return s.String() +} + +// Contains the output of DeleteDeliveryStream. +type DeleteDeliveryStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDeliveryStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDeliveryStreamOutput) GoString() string { + return s.String() +} + +// Contains information about a delivery stream. +type DeliveryStreamDescription struct { + _ struct{} `type:"structure"` + + // The date and time that the delivery stream was created. + CreateTimestamp *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The Amazon Resource Name (ARN) of the delivery stream. + DeliveryStreamARN *string `type:"string" required:"true"` + + // The name of the delivery stream. + DeliveryStreamName *string `min:"1" type:"string" required:"true"` + + // The status of the delivery stream. + DeliveryStreamStatus *string `type:"string" required:"true" enum:"DeliveryStreamStatus"` + + // The destinations. + Destinations []*DestinationDescription `type:"list" required:"true"` + + // Indicates whether there are more destinations available to list. + HasMoreDestinations *bool `type:"boolean" required:"true"` + + // The date and time that the delivery stream was last updated. + LastUpdateTimestamp *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Used when calling the UpdateDestination operation. Each time the destination + // is updated for the delivery stream, the VersionId is changed, and the current + // VersionId is required when updating the destination. This is so that the + // service knows it is applying the changes to the correct version of the delivery + // stream. + VersionId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeliveryStreamDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeliveryStreamDescription) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeDeliveryStream. +type DescribeDeliveryStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery stream. + DeliveryStreamName *string `min:"1" type:"string" required:"true"` + + // Specifies the destination ID to start returning the destination information. + // Currently Amazon Kinesis Firehose supports one destination per delivery stream. + ExclusiveStartDestinationId *string `min:"1" type:"string"` + + // The limit on the number of destinations to return. Currently, you can have + // one destination per delivery stream. + Limit *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s DescribeDeliveryStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDeliveryStreamInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeDeliveryStream. +type DescribeDeliveryStreamOutput struct { + _ struct{} `type:"structure"` + + // Information about the delivery stream. + DeliveryStreamDescription *DeliveryStreamDescription `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeDeliveryStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDeliveryStreamOutput) GoString() string { + return s.String() +} + +// Describes the destination for a delivery stream. +type DestinationDescription struct { + _ struct{} `type:"structure"` + + // The ID of the destination. + DestinationId *string `min:"1" type:"string" required:"true"` + + // The destination in Amazon Redshift. + RedshiftDestinationDescription *RedshiftDestinationDescription `type:"structure"` + + // The Amazon S3 destination. + S3DestinationDescription *S3DestinationDescription `type:"structure"` +} + +// String returns the string representation +func (s DestinationDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DestinationDescription) GoString() string { + return s.String() +} + +// Describes the encryption for a destination in Amazon S3. +type EncryptionConfiguration struct { + _ struct{} `type:"structure"` + + // The encryption key. + KMSEncryptionConfig *KMSEncryptionConfig `type:"structure"` + + // Specifically override existing encryption information to ensure no encryption + // is used. + NoEncryptionConfig *string `type:"string" enum:"NoEncryptionConfig"` +} + +// String returns the string representation +func (s EncryptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EncryptionConfiguration) GoString() string { + return s.String() +} + +// Describes an encryption key for a destination in Amazon S3. +type KMSEncryptionConfig struct { + _ struct{} `type:"structure"` + + // The ARN of the encryption key. Must belong to the same region as the destination + // Amazon S3 bucket. + AWSKMSKeyARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s KMSEncryptionConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KMSEncryptionConfig) GoString() string { + return s.String() +} + +// Contains the parameters for ListDeliveryStreams. +type ListDeliveryStreamsInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery stream to start the list with. + ExclusiveStartDeliveryStreamName *string `min:"1" type:"string"` + + // The maximum number of delivery streams to list. + Limit *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListDeliveryStreamsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeliveryStreamsInput) GoString() string { + return s.String() +} + +// Contains the output of ListDeliveryStreams. +type ListDeliveryStreamsOutput struct { + _ struct{} `type:"structure"` + + // The names of the delivery streams. + DeliveryStreamNames []*string `type:"list" required:"true"` + + // Indicates whether there are more delivery streams available to list. + HasMoreDeliveryStreams *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s ListDeliveryStreamsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeliveryStreamsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for PutRecordBatch. +type PutRecordBatchInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery stream. + DeliveryStreamName *string `min:"1" type:"string" required:"true"` + + // One or more records. + Records []*Record `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s PutRecordBatchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordBatchInput) GoString() string { + return s.String() +} + +// Contains the output of PutRecordBatch. +type PutRecordBatchOutput struct { + _ struct{} `type:"structure"` + + // The number of unsuccessfully written records. + FailedPutCount *int64 `type:"integer" required:"true"` + + // The results for the individual records. The index of each element matches + // the same index in which records were sent. + RequestResponses []*PutRecordBatchResponseEntry `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s PutRecordBatchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordBatchOutput) GoString() string { + return s.String() +} + +// Contains the result for an individual record from a PutRecordBatch request. +// If the record is successfully added to your delivery stream, it receives +// a record ID. If the record fails to be added to your delivery stream, the +// result includes an error code and an error message. +type PutRecordBatchResponseEntry struct { + _ struct{} `type:"structure"` + + // The error code for an individual record result. + ErrorCode *string `type:"string"` + + // The error message for an individual record result. + ErrorMessage *string `type:"string"` + + // The ID of the record. + RecordId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PutRecordBatchResponseEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordBatchResponseEntry) GoString() string { + return s.String() +} + +// Contains the parameters for PutRecord. +type PutRecordInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery stream. + DeliveryStreamName *string `min:"1" type:"string" required:"true"` + + // The record. + Record *Record `type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutRecordInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordInput) GoString() string { + return s.String() +} + +// Contains the output of PutRecord. +type PutRecordOutput struct { + _ struct{} `type:"structure"` + + // The ID of the record. + RecordId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutRecordOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordOutput) GoString() string { + return s.String() +} + +// The unit of data in a delivery stream. +type Record struct { + _ struct{} `type:"structure"` + + // The data blob, which is base64-encoded when the blob is serialized. The maximum + // size of the data blob, before base64-encoding, is 1,000 KB. + Data []byte `type:"blob" required:"true"` +} + +// String returns the string representation +func (s Record) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Record) GoString() string { + return s.String() +} + +// Describes the configuration of a destination in Amazon Redshift. +type RedshiftDestinationConfiguration struct { + _ struct{} `type:"structure"` + + // The database connection string. + ClusterJDBCURL *string `min:"1" type:"string" required:"true"` + + // The COPY command. + CopyCommand *CopyCommand `type:"structure" required:"true"` + + // The user password. + Password *string `min:"6" type:"string" required:"true"` + + // The ARN of the AWS credentials. + RoleARN *string `min:"1" type:"string" required:"true"` + + // The S3 configuration for the intermediate location from which Amazon Redshift + // obtains data. Restrictions are described in the topic for CreateDeliveryStream. + // + // The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration + // because the Amazon Redshift COPY operation that reads from the S3 bucket + // doesn't support these compression formats. + S3Configuration *S3DestinationConfiguration `type:"structure" required:"true"` + + // The name of the user. + Username *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RedshiftDestinationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedshiftDestinationConfiguration) GoString() string { + return s.String() +} + +// Describes a destination in Amazon Redshift. +type RedshiftDestinationDescription struct { + _ struct{} `type:"structure"` + + // The database connection string. + ClusterJDBCURL *string `min:"1" type:"string" required:"true"` + + // The COPY command. + CopyCommand *CopyCommand `type:"structure" required:"true"` + + // The ARN of the AWS credentials. + RoleARN *string `min:"1" type:"string" required:"true"` + + // The Amazon S3 destination. + S3DestinationDescription *S3DestinationDescription `type:"structure" required:"true"` + + // The name of the user. + Username *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RedshiftDestinationDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedshiftDestinationDescription) GoString() string { + return s.String() +} + +// Describes an update for a destination in Amazon Redshift. +type RedshiftDestinationUpdate struct { + _ struct{} `type:"structure"` + + // The database connection string. + ClusterJDBCURL *string `min:"1" type:"string"` + + // The COPY command. + CopyCommand *CopyCommand `type:"structure"` + + // The user password. + Password *string `min:"6" type:"string"` + + // The ARN of the AWS credentials. + RoleARN *string `min:"1" type:"string"` + + // The Amazon S3 destination. + // + // The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationUpdate.S3Update + // because the Amazon Redshift COPY operation that reads from the S3 bucket + // doesn't support these compression formats. + S3Update *S3DestinationUpdate `type:"structure"` + + // The name of the user. + Username *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s RedshiftDestinationUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedshiftDestinationUpdate) GoString() string { + return s.String() +} + +// Describes the configuration of a destination in Amazon S3. +type S3DestinationConfiguration struct { + _ struct{} `type:"structure"` + + // The ARN of the S3 bucket. + BucketARN *string `min:"1" type:"string" required:"true"` + + // The buffering option. If no value is specified, BufferingHints object default + // values are used. + BufferingHints *BufferingHints `type:"structure"` + + // The compression format. If no value is specified, the default is UNCOMPRESSED. + // + // The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift + // destinations because they are not supported by the Amazon Redshift COPY operation + // that reads from the S3 bucket. + CompressionFormat *string `type:"string" enum:"CompressionFormat"` + + // The encryption configuration. If no value is specified, the default is no + // encryption. + EncryptionConfiguration *EncryptionConfiguration `type:"structure"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered + // S3 files. You can specify an extra prefix to be added in front of the time + // format prefix. Note that if the prefix ends with a slash, it appears as a + // folder in the S3 bucket. For more information, see Amazon S3 Object Name + // Format (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html) + // in the guide-fh-dev (http://docs.aws.amazon.com/firehose/latest/dev/). + Prefix *string `type:"string"` + + // The ARN of the AWS credentials. + RoleARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s S3DestinationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3DestinationConfiguration) GoString() string { + return s.String() +} + +// Describes a destination in Amazon S3. +type S3DestinationDescription struct { + _ struct{} `type:"structure"` + + // The ARN of the S3 bucket. + BucketARN *string `min:"1" type:"string" required:"true"` + + // The buffering option. If no value is specified, BufferingHints object default + // values are used. + BufferingHints *BufferingHints `type:"structure" required:"true"` + + // The compression format. If no value is specified, the default is NOCOMPRESSION. + CompressionFormat *string `type:"string" required:"true" enum:"CompressionFormat"` + + // The encryption configuration. If no value is specified, the default is no + // encryption. + EncryptionConfiguration *EncryptionConfiguration `type:"structure" required:"true"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered + // S3 files. You can specify an extra prefix to be added in front of the time + // format prefix. Note that if the prefix ends with a slash, it appears as a + // folder in the S3 bucket. For more information, see Amazon S3 Object Name + // Format (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html) + // in the guide-fh-dev (http://docs.aws.amazon.com/firehose/latest/dev/). + Prefix *string `type:"string"` + + // The ARN of the AWS credentials. + RoleARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s S3DestinationDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3DestinationDescription) GoString() string { + return s.String() +} + +// Describes an update for a destination in Amazon S3. +type S3DestinationUpdate struct { + _ struct{} `type:"structure"` + + // The ARN of the S3 bucket. + BucketARN *string `min:"1" type:"string"` + + // The buffering option. If no value is specified, BufferingHints object default + // values are used. + BufferingHints *BufferingHints `type:"structure"` + + // The compression format. If no value is specified, the default is NOCOMPRESSION. + // + // The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift + // destinations because they are not supported by the Amazon Redshift COPY operation + // that reads from the S3 bucket. + CompressionFormat *string `type:"string" enum:"CompressionFormat"` + + // The encryption configuration. If no value is specified, the default is no + // encryption. + EncryptionConfiguration *EncryptionConfiguration `type:"structure"` + + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered + // S3 files. You can specify an extra prefix to be added in front of the time + // format prefix. Note that if the prefix ends with a slash, it appears as a + // folder in the S3 bucket. For more information, see Amazon S3 Object Name + // Format (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html) + // in the guide-fh-dev (http://docs.aws.amazon.com/firehose/latest/dev/). + Prefix *string `type:"string"` + + // The ARN of the AWS credentials. + RoleARN *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s S3DestinationUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3DestinationUpdate) GoString() string { + return s.String() +} + +// Contains the parameters for UpdateDestination. +type UpdateDestinationInput struct { + _ struct{} `type:"structure"` + + // Obtain this value from the VersionId result of the DeliveryStreamDescription + // operation. This value is required, and helps the service to perform conditional + // operations. For example, if there is a interleaving update and this value + // is null, then the update destination fails. After the update is successful, + // the VersionId value is updated. The service then performs a merge of the + // old configuration with the new configuration. + CurrentDeliveryStreamVersionId *string `min:"1" type:"string" required:"true"` + + // The name of the delivery stream. + DeliveryStreamName *string `min:"1" type:"string" required:"true"` + + // The ID of the destination. + DestinationId *string `min:"1" type:"string" required:"true"` + + // Describes an update for a destination in Amazon Redshift. + RedshiftDestinationUpdate *RedshiftDestinationUpdate `type:"structure"` + + // Describes an update for a destination in Amazon S3. + S3DestinationUpdate *S3DestinationUpdate `type:"structure"` +} + +// String returns the string representation +func (s UpdateDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDestinationInput) GoString() string { + return s.String() +} + +// Contains the output of UpdateDestination. +type UpdateDestinationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDestinationOutput) GoString() string { + return s.String() +} + +const ( + // @enum CompressionFormat + CompressionFormatUncompressed = "UNCOMPRESSED" + // @enum CompressionFormat + CompressionFormatGzip = "GZIP" + // @enum CompressionFormat + CompressionFormatZip = "ZIP" + // @enum CompressionFormat + CompressionFormatSnappy = "Snappy" +) + +const ( + // @enum DeliveryStreamStatus + DeliveryStreamStatusCreating = "CREATING" + // @enum DeliveryStreamStatus + DeliveryStreamStatusDeleting = "DELETING" + // @enum DeliveryStreamStatus + DeliveryStreamStatusActive = "ACTIVE" +) + +const ( + // @enum NoEncryptionConfig + NoEncryptionConfigNoEncryption = "NoEncryption" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/firehose/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/firehose/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/firehose/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/firehose/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,249 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package firehose_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/firehose" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleFirehose_CreateDeliveryStream() { + svc := firehose.New(session.New()) + + params := &firehose.CreateDeliveryStreamInput{ + DeliveryStreamName: aws.String("DeliveryStreamName"), // Required + RedshiftDestinationConfiguration: &firehose.RedshiftDestinationConfiguration{ + ClusterJDBCURL: aws.String("ClusterJDBCURL"), // Required + CopyCommand: &firehose.CopyCommand{ // Required + DataTableName: aws.String("DataTableName"), // Required + CopyOptions: aws.String("CopyOptions"), + DataTableColumns: aws.String("DataTableColumns"), + }, + Password: aws.String("Password"), // Required + RoleARN: aws.String("RoleARN"), // Required + S3Configuration: &firehose.S3DestinationConfiguration{ // Required + BucketARN: aws.String("BucketARN"), // Required + RoleARN: aws.String("RoleARN"), // Required + BufferingHints: &firehose.BufferingHints{ + IntervalInSeconds: aws.Int64(1), + SizeInMBs: aws.Int64(1), + }, + CompressionFormat: aws.String("CompressionFormat"), + EncryptionConfiguration: &firehose.EncryptionConfiguration{ + KMSEncryptionConfig: &firehose.KMSEncryptionConfig{ + AWSKMSKeyARN: aws.String("AWSKMSKeyARN"), // Required + }, + NoEncryptionConfig: aws.String("NoEncryptionConfig"), + }, + Prefix: aws.String("Prefix"), + }, + Username: aws.String("Username"), // Required + }, + S3DestinationConfiguration: &firehose.S3DestinationConfiguration{ + BucketARN: aws.String("BucketARN"), // Required + RoleARN: aws.String("RoleARN"), // Required + BufferingHints: &firehose.BufferingHints{ + IntervalInSeconds: aws.Int64(1), + SizeInMBs: aws.Int64(1), + }, + CompressionFormat: aws.String("CompressionFormat"), + EncryptionConfiguration: &firehose.EncryptionConfiguration{ + KMSEncryptionConfig: &firehose.KMSEncryptionConfig{ + AWSKMSKeyARN: aws.String("AWSKMSKeyARN"), // Required + }, + NoEncryptionConfig: aws.String("NoEncryptionConfig"), + }, + Prefix: aws.String("Prefix"), + }, + } + resp, err := svc.CreateDeliveryStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleFirehose_DeleteDeliveryStream() { + svc := firehose.New(session.New()) + + params := &firehose.DeleteDeliveryStreamInput{ + DeliveryStreamName: aws.String("DeliveryStreamName"), // Required + } + resp, err := svc.DeleteDeliveryStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleFirehose_DescribeDeliveryStream() { + svc := firehose.New(session.New()) + + params := &firehose.DescribeDeliveryStreamInput{ + DeliveryStreamName: aws.String("DeliveryStreamName"), // Required + ExclusiveStartDestinationId: aws.String("DestinationId"), + Limit: aws.Int64(1), + } + resp, err := svc.DescribeDeliveryStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleFirehose_ListDeliveryStreams() { + svc := firehose.New(session.New()) + + params := &firehose.ListDeliveryStreamsInput{ + ExclusiveStartDeliveryStreamName: aws.String("DeliveryStreamName"), + Limit: aws.Int64(1), + } + resp, err := svc.ListDeliveryStreams(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleFirehose_PutRecord() { + svc := firehose.New(session.New()) + + params := &firehose.PutRecordInput{ + DeliveryStreamName: aws.String("DeliveryStreamName"), // Required + Record: &firehose.Record{ // Required + Data: []byte("PAYLOAD"), // Required + }, + } + resp, err := svc.PutRecord(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleFirehose_PutRecordBatch() { + svc := firehose.New(session.New()) + + params := &firehose.PutRecordBatchInput{ + DeliveryStreamName: aws.String("DeliveryStreamName"), // Required + Records: []*firehose.Record{ // Required + { // Required + Data: []byte("PAYLOAD"), // Required + }, + // More values... + }, + } + resp, err := svc.PutRecordBatch(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleFirehose_UpdateDestination() { + svc := firehose.New(session.New()) + + params := &firehose.UpdateDestinationInput{ + CurrentDeliveryStreamVersionId: aws.String("DeliveryStreamVersionId"), // Required + DeliveryStreamName: aws.String("DeliveryStreamName"), // Required + DestinationId: aws.String("DestinationId"), // Required + RedshiftDestinationUpdate: &firehose.RedshiftDestinationUpdate{ + ClusterJDBCURL: aws.String("ClusterJDBCURL"), + CopyCommand: &firehose.CopyCommand{ + DataTableName: aws.String("DataTableName"), // Required + CopyOptions: aws.String("CopyOptions"), + DataTableColumns: aws.String("DataTableColumns"), + }, + Password: aws.String("Password"), + RoleARN: aws.String("RoleARN"), + S3Update: &firehose.S3DestinationUpdate{ + BucketARN: aws.String("BucketARN"), + BufferingHints: &firehose.BufferingHints{ + IntervalInSeconds: aws.Int64(1), + SizeInMBs: aws.Int64(1), + }, + CompressionFormat: aws.String("CompressionFormat"), + EncryptionConfiguration: &firehose.EncryptionConfiguration{ + KMSEncryptionConfig: &firehose.KMSEncryptionConfig{ + AWSKMSKeyARN: aws.String("AWSKMSKeyARN"), // Required + }, + NoEncryptionConfig: aws.String("NoEncryptionConfig"), + }, + Prefix: aws.String("Prefix"), + RoleARN: aws.String("RoleARN"), + }, + Username: aws.String("Username"), + }, + S3DestinationUpdate: &firehose.S3DestinationUpdate{ + BucketARN: aws.String("BucketARN"), + BufferingHints: &firehose.BufferingHints{ + IntervalInSeconds: aws.Int64(1), + SizeInMBs: aws.Int64(1), + }, + CompressionFormat: aws.String("CompressionFormat"), + EncryptionConfiguration: &firehose.EncryptionConfiguration{ + KMSEncryptionConfig: &firehose.KMSEncryptionConfig{ + AWSKMSKeyARN: aws.String("AWSKMSKeyARN"), // Required + }, + NoEncryptionConfig: aws.String("NoEncryptionConfig"), + }, + Prefix: aws.String("Prefix"), + RoleARN: aws.String("RoleARN"), + }, + } + resp, err := svc.UpdateDestination(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/firehose/firehoseiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/firehose/firehoseiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/firehose/firehoseiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/firehose/firehoseiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,42 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package firehoseiface provides an interface for the Amazon Kinesis Firehose. +package firehoseiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/firehose" +) + +// FirehoseAPI is the interface type for firehose.Firehose. +type FirehoseAPI interface { + CreateDeliveryStreamRequest(*firehose.CreateDeliveryStreamInput) (*request.Request, *firehose.CreateDeliveryStreamOutput) + + CreateDeliveryStream(*firehose.CreateDeliveryStreamInput) (*firehose.CreateDeliveryStreamOutput, error) + + DeleteDeliveryStreamRequest(*firehose.DeleteDeliveryStreamInput) (*request.Request, *firehose.DeleteDeliveryStreamOutput) + + DeleteDeliveryStream(*firehose.DeleteDeliveryStreamInput) (*firehose.DeleteDeliveryStreamOutput, error) + + DescribeDeliveryStreamRequest(*firehose.DescribeDeliveryStreamInput) (*request.Request, *firehose.DescribeDeliveryStreamOutput) + + DescribeDeliveryStream(*firehose.DescribeDeliveryStreamInput) (*firehose.DescribeDeliveryStreamOutput, error) + + ListDeliveryStreamsRequest(*firehose.ListDeliveryStreamsInput) (*request.Request, *firehose.ListDeliveryStreamsOutput) + + ListDeliveryStreams(*firehose.ListDeliveryStreamsInput) (*firehose.ListDeliveryStreamsOutput, error) + + PutRecordRequest(*firehose.PutRecordInput) (*request.Request, *firehose.PutRecordOutput) + + PutRecord(*firehose.PutRecordInput) (*firehose.PutRecordOutput, error) + + PutRecordBatchRequest(*firehose.PutRecordBatchInput) (*request.Request, *firehose.PutRecordBatchOutput) + + PutRecordBatch(*firehose.PutRecordBatchInput) (*firehose.PutRecordBatchOutput, error) + + UpdateDestinationRequest(*firehose.UpdateDestinationInput) (*request.Request, *firehose.UpdateDestinationOutput) + + UpdateDestination(*firehose.UpdateDestinationInput) (*firehose.UpdateDestinationOutput, error) +} + +var _ FirehoseAPI = (*firehose.Firehose)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/firehose/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/firehose/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/firehose/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/firehose/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,89 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package firehose + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Amazon Kinesis Firehose is a fully-managed service that delivers real-time +// streaming data to destinations such as Amazon S3 and Amazon Redshift. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type Firehose struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "firehose" + +// New creates a new instance of the Firehose client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Firehose client from just a session. +// svc := firehose.New(mySession) +// +// // Create a Firehose client with additional configuration +// svc := firehose.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Firehose { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *Firehose { + svc := &Firehose{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-08-04", + JSONVersion: "1.1", + TargetPrefix: "Firehose_20150804", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Firehose operation and runs any +// custom request initialization. +func (c *Firehose) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/generate.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/generate.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/generate.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/generate.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5 @@ +// Package service contains automatically generated AWS clients. +package service + +//go:generate go run ../private/model/cli/gen-api/main.go -path=../service ../models/apis/*/*/api-2.json +//go:generate gofmt -s -w ../service diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,3574 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package glacier provides a client for Amazon Glacier. +package glacier + +import ( + "io" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opAbortMultipartUpload = "AbortMultipartUpload" + +// AbortMultipartUploadRequest generates a request for the AbortMultipartUpload operation. +func (c *Glacier) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { + op := &request.Operation{ + Name: opAbortMultipartUpload, + HTTPMethod: "DELETE", + HTTPPath: "/{accountId}/vaults/{vaultName}/multipart-uploads/{uploadId}", + } + + if input == nil { + input = &AbortMultipartUploadInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AbortMultipartUploadOutput{} + req.Data = output + return +} + +// This operation aborts a multipart upload identified by the upload ID. +// +// After the Abort Multipart Upload request succeeds, you cannot upload any +// more parts to the multipart upload or complete the multipart upload. Aborting +// a completed upload fails. However, aborting an already-aborted upload will +// succeed, for a short time. For more information about uploading a part and +// completing a multipart upload, see UploadMultipartPart and CompleteMultipartUpload. +// +// This operation is idempotent. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Working with +// Archives in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html) +// and Abort Multipart Upload (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-abort-upload.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + err := req.Send() + return out, err +} + +const opAbortVaultLock = "AbortVaultLock" + +// AbortVaultLockRequest generates a request for the AbortVaultLock operation. +func (c *Glacier) AbortVaultLockRequest(input *AbortVaultLockInput) (req *request.Request, output *AbortVaultLockOutput) { + op := &request.Operation{ + Name: opAbortVaultLock, + HTTPMethod: "DELETE", + HTTPPath: "/{accountId}/vaults/{vaultName}/lock-policy", + } + + if input == nil { + input = &AbortVaultLockInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AbortVaultLockOutput{} + req.Data = output + return +} + +// This operation aborts the vault locking process if the vault lock is not +// in the Locked state. If the vault lock is in the Locked state when this operation +// is requested, the operation returns an AccessDeniedException error. Aborting +// the vault locking process removes the vault lock policy from the specified +// vault. +// +// A vault lock is put into the InProgress state by calling InitiateVaultLock. +// A vault lock is put into the Locked state by calling CompleteVaultLock. You +// can get the state of a vault lock by calling GetVaultLock. For more information +// about the vault locking process, see Amazon Glacier Vault Lock (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html). +// For more information about vault lock policies, see Amazon Glacier Access +// Control with Vault Lock Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html). +// +// This operation is idempotent. You can successfully invoke this operation +// multiple times, if the vault lock is in the InProgress state or if there +// is no policy associated with the vault. +func (c *Glacier) AbortVaultLock(input *AbortVaultLockInput) (*AbortVaultLockOutput, error) { + req, out := c.AbortVaultLockRequest(input) + err := req.Send() + return out, err +} + +const opAddTagsToVault = "AddTagsToVault" + +// AddTagsToVaultRequest generates a request for the AddTagsToVault operation. +func (c *Glacier) AddTagsToVaultRequest(input *AddTagsToVaultInput) (req *request.Request, output *AddTagsToVaultOutput) { + op := &request.Operation{ + Name: opAddTagsToVault, + HTTPMethod: "POST", + HTTPPath: "/{accountId}/vaults/{vaultName}/tags?operation=add", + } + + if input == nil { + input = &AddTagsToVaultInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AddTagsToVaultOutput{} + req.Data = output + return +} + +// This operation adds the specified tags to a vault. Each tag is composed of +// a key and a value. Each vault can have up to 10 tags. If your request would +// cause the tag limit for the vault to be exceeded, the operation throws the +// LimitExceededException error. If a tag already exists on the vault under +// a specified key, the existing key value will be overwritten. For more information +// about tags, see Tagging Amazon Glacier Resources (http://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html). +func (c *Glacier) AddTagsToVault(input *AddTagsToVaultInput) (*AddTagsToVaultOutput, error) { + req, out := c.AddTagsToVaultRequest(input) + err := req.Send() + return out, err +} + +const opCompleteMultipartUpload = "CompleteMultipartUpload" + +// CompleteMultipartUploadRequest generates a request for the CompleteMultipartUpload operation. +func (c *Glacier) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *ArchiveCreationOutput) { + op := &request.Operation{ + Name: opCompleteMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{accountId}/vaults/{vaultName}/multipart-uploads/{uploadId}", + } + + if input == nil { + input = &CompleteMultipartUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &ArchiveCreationOutput{} + req.Data = output + return +} + +// You call this operation to inform Amazon Glacier that all the archive parts +// have been uploaded and that Amazon Glacier can now assemble the archive from +// the uploaded parts. After assembling and saving the archive to the vault, +// Amazon Glacier returns the URI path of the newly created archive resource. +// Using the URI path, you can then access the archive. After you upload an +// archive, you should save the archive ID returned to retrieve the archive +// at a later point. You can also get the vault inventory to obtain a list of +// archive IDs in a vault. For more information, see InitiateJob. +// +// In the request, you must include the computed SHA256 tree hash of the entire +// archive you have uploaded. For information about computing a SHA256 tree +// hash, see Computing Checksums (http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html). +// On the server side, Amazon Glacier also constructs the SHA256 tree hash of +// the assembled archive. If the values match, Amazon Glacier saves the archive +// to the vault; otherwise, it returns an error, and the operation fails. The +// ListParts operation returns a list of parts uploaded for a specific multipart +// upload. It includes checksum information for each uploaded part that can +// be used to debug a bad checksum issue. +// +// Additionally, Amazon Glacier also checks for any missing content ranges +// when assembling the archive, if missing content ranges are found, Amazon +// Glacier returns an error and the operation fails. +// +// Complete Multipart Upload is an idempotent operation. After your first successful +// complete multipart upload, if you call the operation again within a short +// period, the operation will succeed and return the same archive ID. This is +// useful in the event you experience a network issue that causes an aborted +// connection or receive a 500 server error, in which case you can repeat your +// Complete Multipart Upload request and get the same archive ID without creating +// duplicate archives. Note, however, that after the multipart upload completes, +// you cannot call the List Parts operation and the multipart upload will not +// appear in List Multipart Uploads response, even if idempotent complete is +// possible. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Uploading Large +// Archives in Parts (Multipart Upload) (http://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html) +// and Complete Multipart Upload (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-complete-upload.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*ArchiveCreationOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + err := req.Send() + return out, err +} + +const opCompleteVaultLock = "CompleteVaultLock" + +// CompleteVaultLockRequest generates a request for the CompleteVaultLock operation. +func (c *Glacier) CompleteVaultLockRequest(input *CompleteVaultLockInput) (req *request.Request, output *CompleteVaultLockOutput) { + op := &request.Operation{ + Name: opCompleteVaultLock, + HTTPMethod: "POST", + HTTPPath: "/{accountId}/vaults/{vaultName}/lock-policy/{lockId}", + } + + if input == nil { + input = &CompleteVaultLockInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CompleteVaultLockOutput{} + req.Data = output + return +} + +// This operation completes the vault locking process by transitioning the vault +// lock from the InProgress state to the Locked state, which causes the vault +// lock policy to become unchangeable. A vault lock is put into the InProgress +// state by calling InitiateVaultLock. You can obtain the state of the vault +// lock by calling GetVaultLock. For more information about the vault locking +// process, Amazon Glacier Vault Lock (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html). +// +// This operation is idempotent. This request is always successful if the vault +// lock is in the Locked state and the provided lock ID matches the lock ID +// originally used to lock the vault. +// +// If an invalid lock ID is passed in the request when the vault lock is in +// the Locked state, the operation returns an AccessDeniedException error. If +// an invalid lock ID is passed in the request when the vault lock is in the +// InProgress state, the operation throws an InvalidParameter error. +func (c *Glacier) CompleteVaultLock(input *CompleteVaultLockInput) (*CompleteVaultLockOutput, error) { + req, out := c.CompleteVaultLockRequest(input) + err := req.Send() + return out, err +} + +const opCreateVault = "CreateVault" + +// CreateVaultRequest generates a request for the CreateVault operation. +func (c *Glacier) CreateVaultRequest(input *CreateVaultInput) (req *request.Request, output *CreateVaultOutput) { + op := &request.Operation{ + Name: opCreateVault, + HTTPMethod: "PUT", + HTTPPath: "/{accountId}/vaults/{vaultName}", + } + + if input == nil { + input = &CreateVaultInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateVaultOutput{} + req.Data = output + return +} + +// This operation creates a new vault with the specified name. The name of the +// vault must be unique within a region for an AWS account. You can create up +// to 1,000 vaults per account. If you need to create more vaults, contact Amazon +// Glacier. +// +// You must use the following guidelines when naming a vault. +// +// Names can be between 1 and 255 characters long. +// +// Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), +// and '.' (period). +// +// This operation is idempotent. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Creating a Vault +// in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/creating-vaults.html) +// and Create Vault (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-put.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) CreateVault(input *CreateVaultInput) (*CreateVaultOutput, error) { + req, out := c.CreateVaultRequest(input) + err := req.Send() + return out, err +} + +const opDeleteArchive = "DeleteArchive" + +// DeleteArchiveRequest generates a request for the DeleteArchive operation. +func (c *Glacier) DeleteArchiveRequest(input *DeleteArchiveInput) (req *request.Request, output *DeleteArchiveOutput) { + op := &request.Operation{ + Name: opDeleteArchive, + HTTPMethod: "DELETE", + HTTPPath: "/{accountId}/vaults/{vaultName}/archives/{archiveId}", + } + + if input == nil { + input = &DeleteArchiveInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteArchiveOutput{} + req.Data = output + return +} + +// This operation deletes an archive from a vault. Subsequent requests to initiate +// a retrieval of this archive will fail. Archive retrievals that are in progress +// for this archive ID may or may not succeed according to the following scenarios: +// +// If the archive retrieval job is actively preparing the data for download +// when Amazon Glacier receives the delete archive request, the archival retrieval +// operation might fail. If the archive retrieval job has successfully prepared +// the archive for download when Amazon Glacier receives the delete archive +// request, you will be able to download the output. This operation is idempotent. +// Attempting to delete an already-deleted archive does not result in an error. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Deleting an Archive +// in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/deleting-an-archive.html) +// and Delete Archive (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-delete.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) DeleteArchive(input *DeleteArchiveInput) (*DeleteArchiveOutput, error) { + req, out := c.DeleteArchiveRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVault = "DeleteVault" + +// DeleteVaultRequest generates a request for the DeleteVault operation. +func (c *Glacier) DeleteVaultRequest(input *DeleteVaultInput) (req *request.Request, output *DeleteVaultOutput) { + op := &request.Operation{ + Name: opDeleteVault, + HTTPMethod: "DELETE", + HTTPPath: "/{accountId}/vaults/{vaultName}", + } + + if input == nil { + input = &DeleteVaultInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteVaultOutput{} + req.Data = output + return +} + +// This operation deletes a vault. Amazon Glacier will delete a vault only if +// there are no archives in the vault as of the last inventory and there have +// been no writes to the vault since the last inventory. If either of these +// conditions is not satisfied, the vault deletion fails (that is, the vault +// is not removed) and Amazon Glacier returns an error. You can use DescribeVault +// to return the number of archives in a vault, and you can use Initiate a Job +// (POST jobs) (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-initiate-job-post.html) +// to initiate a new inventory retrieval for a vault. The inventory contains +// the archive IDs you use to delete archives using Delete Archive (DELETE archive) +// (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-delete.html). +// +// This operation is idempotent. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Deleting a Vault +// in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/deleting-vaults.html) +// and Delete Vault (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-delete.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) DeleteVault(input *DeleteVaultInput) (*DeleteVaultOutput, error) { + req, out := c.DeleteVaultRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVaultAccessPolicy = "DeleteVaultAccessPolicy" + +// DeleteVaultAccessPolicyRequest generates a request for the DeleteVaultAccessPolicy operation. +func (c *Glacier) DeleteVaultAccessPolicyRequest(input *DeleteVaultAccessPolicyInput) (req *request.Request, output *DeleteVaultAccessPolicyOutput) { + op := &request.Operation{ + Name: opDeleteVaultAccessPolicy, + HTTPMethod: "DELETE", + HTTPPath: "/{accountId}/vaults/{vaultName}/access-policy", + } + + if input == nil { + input = &DeleteVaultAccessPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteVaultAccessPolicyOutput{} + req.Data = output + return +} + +// This operation deletes the access policy associated with the specified vault. +// The operation is eventually consistent; that is, it might take some time +// for Amazon Glacier to completely remove the access policy, and you might +// still see the effect of the policy for a short time after you send the delete +// request. +// +// This operation is idempotent. You can invoke delete multiple times, even +// if there is no policy associated with the vault. For more information about +// vault access policies, see Amazon Glacier Access Control with Vault Access +// Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html). +func (c *Glacier) DeleteVaultAccessPolicy(input *DeleteVaultAccessPolicyInput) (*DeleteVaultAccessPolicyOutput, error) { + req, out := c.DeleteVaultAccessPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVaultNotifications = "DeleteVaultNotifications" + +// DeleteVaultNotificationsRequest generates a request for the DeleteVaultNotifications operation. +func (c *Glacier) DeleteVaultNotificationsRequest(input *DeleteVaultNotificationsInput) (req *request.Request, output *DeleteVaultNotificationsOutput) { + op := &request.Operation{ + Name: opDeleteVaultNotifications, + HTTPMethod: "DELETE", + HTTPPath: "/{accountId}/vaults/{vaultName}/notification-configuration", + } + + if input == nil { + input = &DeleteVaultNotificationsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteVaultNotificationsOutput{} + req.Data = output + return +} + +// This operation deletes the notification configuration set for a vault. The +// operation is eventually consistent; that is, it might take some time for +// Amazon Glacier to completely disable the notifications and you might still +// receive some notifications for a short time after you send the delete request. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Configuring Vault +// Notifications in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html) +// and Delete Vault Notification Configuration (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-delete.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) DeleteVaultNotifications(input *DeleteVaultNotificationsInput) (*DeleteVaultNotificationsOutput, error) { + req, out := c.DeleteVaultNotificationsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeJob = "DescribeJob" + +// DescribeJobRequest generates a request for the DescribeJob operation. +func (c *Glacier) DescribeJobRequest(input *DescribeJobInput) (req *request.Request, output *JobDescription) { + op := &request.Operation{ + Name: opDescribeJob, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}/jobs/{jobId}", + } + + if input == nil { + input = &DescribeJobInput{} + } + + req = c.newRequest(op, input, output) + output = &JobDescription{} + req.Data = output + return +} + +// This operation returns information about a job you previously initiated, +// including the job initiation date, the user who initiated the job, the job +// status code/message and the Amazon SNS topic to notify after Amazon Glacier +// completes the job. For more information about initiating a job, see InitiateJob. +// +// This operation enables you to check the status of your job. However, it +// is strongly recommended that you set up an Amazon SNS topic and specify it +// in your initiate job request so that Amazon Glacier can notify the topic +// after it completes the job. +// +// A job ID will not expire for at least 24 hours after Amazon Glacier completes +// the job. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For information about the underlying REST API, go to Working with Archives +// in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-describe-job-get.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) DescribeJob(input *DescribeJobInput) (*JobDescription, error) { + req, out := c.DescribeJobRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVault = "DescribeVault" + +// DescribeVaultRequest generates a request for the DescribeVault operation. +func (c *Glacier) DescribeVaultRequest(input *DescribeVaultInput) (req *request.Request, output *DescribeVaultOutput) { + op := &request.Operation{ + Name: opDescribeVault, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}", + } + + if input == nil { + input = &DescribeVaultInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVaultOutput{} + req.Data = output + return +} + +// This operation returns information about a vault, including the vault's Amazon +// Resource Name (ARN), the date the vault was created, the number of archives +// it contains, and the total size of all the archives in the vault. The number +// of archives and their total size are as of the last inventory generation. +// This means that if you add or remove an archive from a vault, and then immediately +// use Describe Vault, the change in contents will not be immediately reflected. +// If you want to retrieve the latest inventory of the vault, use InitiateJob. +// Amazon Glacier generates vault inventories approximately daily. For more +// information, see Downloading a Vault Inventory in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-inventory.html). +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Retrieving Vault +// Metadata in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/retrieving-vault-info.html) +// and Describe Vault (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-get.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) DescribeVault(input *DescribeVaultInput) (*DescribeVaultOutput, error) { + req, out := c.DescribeVaultRequest(input) + err := req.Send() + return out, err +} + +const opGetDataRetrievalPolicy = "GetDataRetrievalPolicy" + +// GetDataRetrievalPolicyRequest generates a request for the GetDataRetrievalPolicy operation. +func (c *Glacier) GetDataRetrievalPolicyRequest(input *GetDataRetrievalPolicyInput) (req *request.Request, output *GetDataRetrievalPolicyOutput) { + op := &request.Operation{ + Name: opGetDataRetrievalPolicy, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/policies/data-retrieval", + } + + if input == nil { + input = &GetDataRetrievalPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDataRetrievalPolicyOutput{} + req.Data = output + return +} + +// This operation returns the current data retrieval policy for the account +// and region specified in the GET request. For more information about data +// retrieval policies, see Amazon Glacier Data Retrieval Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/data-retrieval-policy.html). +func (c *Glacier) GetDataRetrievalPolicy(input *GetDataRetrievalPolicyInput) (*GetDataRetrievalPolicyOutput, error) { + req, out := c.GetDataRetrievalPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetJobOutput = "GetJobOutput" + +// GetJobOutputRequest generates a request for the GetJobOutput operation. +func (c *Glacier) GetJobOutputRequest(input *GetJobOutputInput) (req *request.Request, output *GetJobOutputOutput) { + op := &request.Operation{ + Name: opGetJobOutput, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}/jobs/{jobId}/output", + } + + if input == nil { + input = &GetJobOutputInput{} + } + + req = c.newRequest(op, input, output) + output = &GetJobOutputOutput{} + req.Data = output + return +} + +// This operation downloads the output of the job you initiated using InitiateJob. +// Depending on the job type you specified when you initiated the job, the output +// will be either the content of an archive or a vault inventory. +// +// A job ID will not expire for at least 24 hours after Amazon Glacier completes +// the job. That is, you can download the job output within the 24 hours period +// after Amazon Glacier completes the job. +// +// If the job output is large, then you can use the Range request header to +// retrieve a portion of the output. This allows you to download the entire +// output in smaller chunks of bytes. For example, suppose you have 1 GB of +// job output you want to download and you decide to download 128 MB chunks +// of data at a time, which is a total of eight Get Job Output requests. You +// use the following process to download the job output: +// +// Download a 128 MB chunk of output by specifying the appropriate byte range +// using the Range header. +// +// Along with the data, the response includes a SHA256 tree hash of the payload. +// You compute the checksum of the payload on the client and compare it with +// the checksum you received in the response to ensure you received all the +// expected data. +// +// Repeat steps 1 and 2 for all the eight 128 MB chunks of output data, each +// time specifying the appropriate byte range. +// +// After downloading all the parts of the job output, you have a list of +// eight checksum values. Compute the tree hash of these values to find the +// checksum of the entire output. Using the DescribeJob API, obtain job information +// of the job that provided you the output. The response includes the checksum +// of the entire archive stored in Amazon Glacier. You compare this value with +// the checksum you computed to ensure you have downloaded the entire archive +// content with no errors. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and the underlying REST API, go to Downloading +// a Vault Inventory (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-inventory.html), +// Downloading an Archive (http://docs.aws.amazon.com/amazonglacier/latest/dev/downloading-an-archive.html), +// and Get Job Output (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-job-output-get.html) +func (c *Glacier) GetJobOutput(input *GetJobOutputInput) (*GetJobOutputOutput, error) { + req, out := c.GetJobOutputRequest(input) + err := req.Send() + return out, err +} + +const opGetVaultAccessPolicy = "GetVaultAccessPolicy" + +// GetVaultAccessPolicyRequest generates a request for the GetVaultAccessPolicy operation. +func (c *Glacier) GetVaultAccessPolicyRequest(input *GetVaultAccessPolicyInput) (req *request.Request, output *GetVaultAccessPolicyOutput) { + op := &request.Operation{ + Name: opGetVaultAccessPolicy, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}/access-policy", + } + + if input == nil { + input = &GetVaultAccessPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetVaultAccessPolicyOutput{} + req.Data = output + return +} + +// This operation retrieves the access-policy subresource set on the vault; +// for more information on setting this subresource, see Set Vault Access Policy +// (PUT access-policy) (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-SetVaultAccessPolicy.html). +// If there is no access policy set on the vault, the operation returns a 404 +// Not found error. For more information about vault access policies, see Amazon +// Glacier Access Control with Vault Access Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html). +func (c *Glacier) GetVaultAccessPolicy(input *GetVaultAccessPolicyInput) (*GetVaultAccessPolicyOutput, error) { + req, out := c.GetVaultAccessPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetVaultLock = "GetVaultLock" + +// GetVaultLockRequest generates a request for the GetVaultLock operation. +func (c *Glacier) GetVaultLockRequest(input *GetVaultLockInput) (req *request.Request, output *GetVaultLockOutput) { + op := &request.Operation{ + Name: opGetVaultLock, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}/lock-policy", + } + + if input == nil { + input = &GetVaultLockInput{} + } + + req = c.newRequest(op, input, output) + output = &GetVaultLockOutput{} + req.Data = output + return +} + +// This operation retrieves the following attributes from the lock-policy subresource +// set on the specified vault: The vault lock policy set on the vault. +// +// The state of the vault lock, which is either InProgess or Locked. +// +// When the lock ID expires. The lock ID is used to complete the vault locking +// process. +// +// When the vault lock was initiated and put into the InProgress state. +// +// A vault lock is put into the InProgress state by calling InitiateVaultLock. +// A vault lock is put into the Locked state by calling CompleteVaultLock. You +// can abort the vault locking process by calling AbortVaultLock. For more information +// about the vault locking process, Amazon Glacier Vault Lock (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html). +// +// If there is no vault lock policy set on the vault, the operation returns +// a 404 Not found error. For more information about vault lock policies, Amazon +// Glacier Access Control with Vault Lock Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html). +func (c *Glacier) GetVaultLock(input *GetVaultLockInput) (*GetVaultLockOutput, error) { + req, out := c.GetVaultLockRequest(input) + err := req.Send() + return out, err +} + +const opGetVaultNotifications = "GetVaultNotifications" + +// GetVaultNotificationsRequest generates a request for the GetVaultNotifications operation. +func (c *Glacier) GetVaultNotificationsRequest(input *GetVaultNotificationsInput) (req *request.Request, output *GetVaultNotificationsOutput) { + op := &request.Operation{ + Name: opGetVaultNotifications, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}/notification-configuration", + } + + if input == nil { + input = &GetVaultNotificationsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetVaultNotificationsOutput{} + req.Data = output + return +} + +// This operation retrieves the notification-configuration subresource of the +// specified vault. +// +// For information about setting a notification configuration on a vault, see +// SetVaultNotifications. If a notification configuration for a vault is not +// set, the operation returns a 404 Not Found error. For more information about +// vault notifications, see Configuring Vault Notifications in Amazon Glacier +// (http://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html). +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Configuring Vault +// Notifications in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html) +// and Get Vault Notification Configuration (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-get.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) GetVaultNotifications(input *GetVaultNotificationsInput) (*GetVaultNotificationsOutput, error) { + req, out := c.GetVaultNotificationsRequest(input) + err := req.Send() + return out, err +} + +const opInitiateJob = "InitiateJob" + +// InitiateJobRequest generates a request for the InitiateJob operation. +func (c *Glacier) InitiateJobRequest(input *InitiateJobInput) (req *request.Request, output *InitiateJobOutput) { + op := &request.Operation{ + Name: opInitiateJob, + HTTPMethod: "POST", + HTTPPath: "/{accountId}/vaults/{vaultName}/jobs", + } + + if input == nil { + input = &InitiateJobInput{} + } + + req = c.newRequest(op, input, output) + output = &InitiateJobOutput{} + req.Data = output + return +} + +// This operation initiates a job of the specified type. In this release, you +// can initiate a job to retrieve either an archive or a vault inventory (a +// list of archives in a vault). +// +// Retrieving data from Amazon Glacier is a two-step process: +// +// Initiate a retrieval job. +// +// A data retrieval policy can cause your initiate retrieval job request to +// fail with a PolicyEnforcedException exception. For more information about +// data retrieval policies, see Amazon Glacier Data Retrieval Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/data-retrieval-policy.html). +// For more information about the PolicyEnforcedException exception, see Error +// Responses (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-error-responses.html). +// +// After the job completes, download the bytes. +// +// The retrieval request is executed asynchronously. When you initiate a retrieval +// job, Amazon Glacier creates a job and returns a job ID in the response. When +// Amazon Glacier completes the job, you can get the job output (archive or +// inventory data). For information about getting job output, see GetJobOutput +// operation. +// +// The job must complete before you can get its output. To determine when a +// job is complete, you have the following options: +// +// Use Amazon SNS Notification You can specify an Amazon Simple Notification +// Service (Amazon SNS) topic to which Amazon Glacier can post a notification +// after the job is completed. You can specify an SNS topic per job request. +// The notification is sent only after Amazon Glacier completes the job. In +// addition to specifying an SNS topic per job request, you can configure vault +// notifications for a vault so that job notifications are always sent. For +// more information, see SetVaultNotifications. +// +// Get job details You can make a DescribeJob request to obtain job status +// information while a job is in progress. However, it is more efficient to +// use an Amazon SNS notification to determine when a job is complete. +// +// The information you get via notification is same that you get by calling +// DescribeJob. +// +// If for a specific event, you add both the notification configuration on +// the vault and also specify an SNS topic in your initiate job request, Amazon +// Glacier sends both notifications. For more information, see SetVaultNotifications. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// About the Vault Inventory +// +// Amazon Glacier prepares an inventory for each vault periodically, every +// 24 hours. When you initiate a job for a vault inventory, Amazon Glacier returns +// the last inventory for the vault. The inventory data you get might be up +// to a day or two days old. Also, the initiate inventory job might take some +// time to complete before you can download the vault inventory. So you do not +// want to retrieve a vault inventory for each vault operation. However, in +// some scenarios, you might find the vault inventory useful. For example, when +// you upload an archive, you can provide an archive description but not an +// archive name. Amazon Glacier provides you a unique archive ID, an opaque +// string of characters. So, you might maintain your own database that maps +// archive names to their corresponding Amazon Glacier assigned archive IDs. +// You might find the vault inventory useful in the event you need to reconcile +// information in your database with the actual vault inventory. +// +// Range Inventory Retrieval +// +// You can limit the number of inventory items retrieved by filtering on the +// archive creation date or by setting a limit. +// +// Filtering by Archive Creation Date +// +// You can retrieve inventory items for archives created between StartDate +// and EndDate by specifying values for these parameters in the InitiateJob +// request. Archives created on or after the StartDate and before the EndDate +// will be returned. If you only provide the StartDate without the EndDate, +// you will retrieve the inventory for all archives created on or after the +// StartDate. If you only provide the EndDate without the StartDate, you will +// get back the inventory for all archives created before the EndDate. +// +// Limiting Inventory Items per Retrieval +// +// You can limit the number of inventory items returned by setting the Limit +// parameter in the InitiateJob request. The inventory job output will contain +// inventory items up to the specified Limit. If there are more inventory items +// available, the result is paginated. After a job is complete you can use the +// DescribeJob operation to get a marker that you use in a subsequent InitiateJob +// request. The marker will indicate the starting point to retrieve the next +// set of inventory items. You can page through your entire inventory by repeatedly +// making InitiateJob requests with the marker from the previous DescribeJob +// output, until you get a marker from DescribeJob that returns null, indicating +// that there are no more inventory items available. +// +// You can use the Limit parameter together with the date range parameters. +// +// About Ranged Archive Retrieval +// +// You can initiate an archive retrieval for the whole archive or a range +// of the archive. In the case of ranged archive retrieval, you specify a byte +// range to return or the whole archive. The range specified must be megabyte +// (MB) aligned, that is the range start value must be divisible by 1 MB and +// range end value plus 1 must be divisible by 1 MB or equal the end of the +// archive. If the ranged archive retrieval is not megabyte aligned, this operation +// returns a 400 response. Furthermore, to ensure you get checksum values for +// data you download using Get Job Output API, the range must be tree hash aligned. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and the underlying REST API, go to Initiate a +// Job (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-initiate-job-post.html) +// and Downloading a Vault Inventory (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-inventory.html) +func (c *Glacier) InitiateJob(input *InitiateJobInput) (*InitiateJobOutput, error) { + req, out := c.InitiateJobRequest(input) + err := req.Send() + return out, err +} + +const opInitiateMultipartUpload = "InitiateMultipartUpload" + +// InitiateMultipartUploadRequest generates a request for the InitiateMultipartUpload operation. +func (c *Glacier) InitiateMultipartUploadRequest(input *InitiateMultipartUploadInput) (req *request.Request, output *InitiateMultipartUploadOutput) { + op := &request.Operation{ + Name: opInitiateMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{accountId}/vaults/{vaultName}/multipart-uploads", + } + + if input == nil { + input = &InitiateMultipartUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &InitiateMultipartUploadOutput{} + req.Data = output + return +} + +// This operation initiates a multipart upload. Amazon Glacier creates a multipart +// upload resource and returns its ID in the response. The multipart upload +// ID is used in subsequent requests to upload parts of an archive (see UploadMultipartPart). +// +// When you initiate a multipart upload, you specify the part size in number +// of bytes. The part size must be a megabyte (1024 KB) multiplied by a power +// of 2-for example, 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 +// (8 MB), and so on. The minimum allowable part size is 1 MB, and the maximum +// is 4 GB. +// +// Every part you upload to this resource (see UploadMultipartPart), except +// the last one, must have the same size. The last one can be the same size +// or smaller. For example, suppose you want to upload a 16.2 MB file. If you +// initiate the multipart upload with a part size of 4 MB, you will upload four +// parts of 4 MB each and one part of 0.2 MB. +// +// You don't need to know the size of the archive when you start a multipart +// upload because Amazon Glacier does not require you to specify the overall +// archive size. +// +// After you complete the multipart upload, Amazon Glacier removes the multipart +// upload resource referenced by the ID. Amazon Glacier also removes the multipart +// upload resource if you cancel the multipart upload or it may be removed if +// there is no activity for a period of 24 hours. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Uploading Large +// Archives in Parts (Multipart Upload) (http://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html) +// and Initiate Multipart Upload (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-initiate-upload.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) InitiateMultipartUpload(input *InitiateMultipartUploadInput) (*InitiateMultipartUploadOutput, error) { + req, out := c.InitiateMultipartUploadRequest(input) + err := req.Send() + return out, err +} + +const opInitiateVaultLock = "InitiateVaultLock" + +// InitiateVaultLockRequest generates a request for the InitiateVaultLock operation. +func (c *Glacier) InitiateVaultLockRequest(input *InitiateVaultLockInput) (req *request.Request, output *InitiateVaultLockOutput) { + op := &request.Operation{ + Name: opInitiateVaultLock, + HTTPMethod: "POST", + HTTPPath: "/{accountId}/vaults/{vaultName}/lock-policy", + } + + if input == nil { + input = &InitiateVaultLockInput{} + } + + req = c.newRequest(op, input, output) + output = &InitiateVaultLockOutput{} + req.Data = output + return +} + +// This operation initiates the vault locking process by doing the following: +// Installing a vault lock policy on the specified vault. +// +// Setting the lock state of vault lock to InProgress. +// +// Returning a lock ID, which is used to complete the vault locking process. +// +// You can set one vault lock policy for each vault and this policy can +// be up to 20 KB in size. For more information about vault lock policies, see +// Amazon Glacier Access Control with Vault Lock Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html). +// +// You must complete the vault locking process within 24 hours after the vault +// lock enters the InProgress state. After the 24 hour window ends, the lock +// ID expires, the vault automatically exits the InProgress state, and the vault +// lock policy is removed from the vault. You call CompleteVaultLock to complete +// the vault locking process by setting the state of the vault lock to Locked. +// +// After a vault lock is in the Locked state, you cannot initiate a new vault +// lock for the vault. +// +// You can abort the vault locking process by calling AbortVaultLock. You can +// get the state of the vault lock by calling GetVaultLock. For more information +// about the vault locking process, Amazon Glacier Vault Lock (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html). +// +// If this operation is called when the vault lock is in the InProgress state, +// the operation returns an AccessDeniedException error. When the vault lock +// is in the InProgress state you must call AbortVaultLock before you can initiate +// a new vault lock policy. +func (c *Glacier) InitiateVaultLock(input *InitiateVaultLockInput) (*InitiateVaultLockOutput, error) { + req, out := c.InitiateVaultLockRequest(input) + err := req.Send() + return out, err +} + +const opListJobs = "ListJobs" + +// ListJobsRequest generates a request for the ListJobs operation. +func (c *Glacier) ListJobsRequest(input *ListJobsInput) (req *request.Request, output *ListJobsOutput) { + op := &request.Operation{ + Name: opListJobs, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}/jobs", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListJobsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListJobsOutput{} + req.Data = output + return +} + +// This operation lists jobs for a vault, including jobs that are in-progress +// and jobs that have recently finished. +// +// Amazon Glacier retains recently completed jobs for a period before deleting +// them; however, it eventually removes completed jobs. The output of completed +// jobs can be retrieved. Retaining completed jobs for a period of time after +// they have completed enables you to get a job output in the event you miss +// the job completion notification or your first attempt to download it fails. +// For example, suppose you start an archive retrieval job to download an archive. +// After the job completes, you start to download the archive but encounter +// a network error. In this scenario, you can retry and download the archive +// while the job exists. +// +// To retrieve an archive or retrieve a vault inventory from Amazon Glacier, +// you first initiate a job, and after the job completes, you download the data. +// For an archive retrieval, the output is the archive data, and for an inventory +// retrieval, it is the inventory list. The List Job operation returns a list +// of these jobs sorted by job initiation time. +// +// This List Jobs operation supports pagination. By default, this operation +// returns up to 1,000 jobs in the response. You should always check the response +// for a marker at which to continue the list; if there are no more items the +// marker is null. To return a list of jobs that begins at a specific job, set +// the marker request parameter to the value you obtained from a previous List +// Jobs request. You can also limit the number of jobs returned in the response +// by specifying the limit parameter in the request. +// +// Additionally, you can filter the jobs list returned by specifying an optional +// statuscode (InProgress, Succeeded, or Failed) and completed (true, false) +// parameter. The statuscode allows you to specify that only jobs that match +// a specified status are returned. The completed parameter allows you to specify +// that only jobs in a specific completion state are returned. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For the underlying REST API, go to List Jobs (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-jobs-get.html) +func (c *Glacier) ListJobs(input *ListJobsInput) (*ListJobsOutput, error) { + req, out := c.ListJobsRequest(input) + err := req.Send() + return out, err +} + +func (c *Glacier) ListJobsPages(input *ListJobsInput, fn func(p *ListJobsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListJobsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListJobsOutput), lastPage) + }) +} + +const opListMultipartUploads = "ListMultipartUploads" + +// ListMultipartUploadsRequest generates a request for the ListMultipartUploads operation. +func (c *Glacier) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { + op := &request.Operation{ + Name: opListMultipartUploads, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}/multipart-uploads", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListMultipartUploadsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListMultipartUploadsOutput{} + req.Data = output + return +} + +// This operation lists in-progress multipart uploads for the specified vault. +// An in-progress multipart upload is a multipart upload that has been initiated +// by an InitiateMultipartUpload request, but has not yet been completed or +// aborted. The list returned in the List Multipart Upload response has no guaranteed +// order. +// +// The List Multipart Uploads operation supports pagination. By default, this +// operation returns up to 1,000 multipart uploads in the response. You should +// always check the response for a marker at which to continue the list; if +// there are no more items the marker is null. To return a list of multipart +// uploads that begins at a specific upload, set the marker request parameter +// to the value you obtained from a previous List Multipart Upload request. +// You can also limit the number of uploads returned in the response by specifying +// the limit parameter in the request. +// +// Note the difference between this operation and listing parts (ListParts). +// The List Multipart Uploads operation lists all multipart uploads for a vault +// and does not require a multipart upload ID. The List Parts operation requires +// a multipart upload ID since parts are associated with a single upload. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and the underlying REST API, go to Working with +// Archives in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html) +// and List Multipart Uploads (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-list-uploads.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + err := req.Send() + return out, err +} + +func (c *Glacier) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(p *ListMultipartUploadsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListMultipartUploadsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListMultipartUploadsOutput), lastPage) + }) +} + +const opListParts = "ListParts" + +// ListPartsRequest generates a request for the ListParts operation. +func (c *Glacier) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { + op := &request.Operation{ + Name: opListParts, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}/multipart-uploads/{uploadId}", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListPartsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPartsOutput{} + req.Data = output + return +} + +// This operation lists the parts of an archive that have been uploaded in a +// specific multipart upload. You can make this request at any time during an +// in-progress multipart upload before you complete the upload (see CompleteMultipartUpload. +// List Parts returns an error for completed uploads. The list returned in the +// List Parts response is sorted by part range. +// +// The List Parts operation supports pagination. By default, this operation +// returns up to 1,000 uploaded parts in the response. You should always check +// the response for a marker at which to continue the list; if there are no +// more items the marker is null. To return a list of parts that begins at a +// specific part, set the marker request parameter to the value you obtained +// from a previous List Parts request. You can also limit the number of parts +// returned in the response by specifying the limit parameter in the request. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and the underlying REST API, go to Working with +// Archives in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html) +// and List Parts (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-list-parts.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + err := req.Send() + return out, err +} + +func (c *Glacier) ListPartsPages(input *ListPartsInput, fn func(p *ListPartsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListPartsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListPartsOutput), lastPage) + }) +} + +const opListTagsForVault = "ListTagsForVault" + +// ListTagsForVaultRequest generates a request for the ListTagsForVault operation. +func (c *Glacier) ListTagsForVaultRequest(input *ListTagsForVaultInput) (req *request.Request, output *ListTagsForVaultOutput) { + op := &request.Operation{ + Name: opListTagsForVault, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults/{vaultName}/tags", + } + + if input == nil { + input = &ListTagsForVaultInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForVaultOutput{} + req.Data = output + return +} + +// This operation lists all the tags attached to a vault. The operation returns +// an empty map if there are no tags. For more information about tags, see Tagging +// Amazon Glacier Resources (http://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html). +func (c *Glacier) ListTagsForVault(input *ListTagsForVaultInput) (*ListTagsForVaultOutput, error) { + req, out := c.ListTagsForVaultRequest(input) + err := req.Send() + return out, err +} + +const opListVaults = "ListVaults" + +// ListVaultsRequest generates a request for the ListVaults operation. +func (c *Glacier) ListVaultsRequest(input *ListVaultsInput) (req *request.Request, output *ListVaultsOutput) { + op := &request.Operation{ + Name: opListVaults, + HTTPMethod: "GET", + HTTPPath: "/{accountId}/vaults", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListVaultsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListVaultsOutput{} + req.Data = output + return +} + +// This operation lists all vaults owned by the calling user's account. The +// list returned in the response is ASCII-sorted by vault name. +// +// By default, this operation returns up to 1,000 items. If there are more +// vaults to list, the response marker field contains the vault Amazon Resource +// Name (ARN) at which to continue the list with a new List Vaults request; +// otherwise, the marker field is null. To return a list of vaults that begins +// at a specific vault, set the marker request parameter to the vault ARN you +// obtained from a previous List Vaults request. You can also limit the number +// of vaults returned in the response by specifying the limit parameter in the +// request. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Retrieving Vault +// Metadata in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/retrieving-vault-info.html) +// and List Vaults (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vaults-get.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) ListVaults(input *ListVaultsInput) (*ListVaultsOutput, error) { + req, out := c.ListVaultsRequest(input) + err := req.Send() + return out, err +} + +func (c *Glacier) ListVaultsPages(input *ListVaultsInput, fn func(p *ListVaultsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListVaultsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListVaultsOutput), lastPage) + }) +} + +const opRemoveTagsFromVault = "RemoveTagsFromVault" + +// RemoveTagsFromVaultRequest generates a request for the RemoveTagsFromVault operation. +func (c *Glacier) RemoveTagsFromVaultRequest(input *RemoveTagsFromVaultInput) (req *request.Request, output *RemoveTagsFromVaultOutput) { + op := &request.Operation{ + Name: opRemoveTagsFromVault, + HTTPMethod: "POST", + HTTPPath: "/{accountId}/vaults/{vaultName}/tags?operation=remove", + } + + if input == nil { + input = &RemoveTagsFromVaultInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RemoveTagsFromVaultOutput{} + req.Data = output + return +} + +// This operation removes one or more tags from the set of tags attached to +// a vault. For more information about tags, see Tagging Amazon Glacier Resources +// (http://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html). This +// operation is idempotent. The operation will be successful, even if there +// are no tags attached to the vault. +func (c *Glacier) RemoveTagsFromVault(input *RemoveTagsFromVaultInput) (*RemoveTagsFromVaultOutput, error) { + req, out := c.RemoveTagsFromVaultRequest(input) + err := req.Send() + return out, err +} + +const opSetDataRetrievalPolicy = "SetDataRetrievalPolicy" + +// SetDataRetrievalPolicyRequest generates a request for the SetDataRetrievalPolicy operation. +func (c *Glacier) SetDataRetrievalPolicyRequest(input *SetDataRetrievalPolicyInput) (req *request.Request, output *SetDataRetrievalPolicyOutput) { + op := &request.Operation{ + Name: opSetDataRetrievalPolicy, + HTTPMethod: "PUT", + HTTPPath: "/{accountId}/policies/data-retrieval", + } + + if input == nil { + input = &SetDataRetrievalPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetDataRetrievalPolicyOutput{} + req.Data = output + return +} + +// This operation sets and then enacts a data retrieval policy in the region +// specified in the PUT request. You can set one policy per region for an AWS +// account. The policy is enacted within a few minutes of a successful PUT operation. +// +// The set policy operation does not affect retrieval jobs that were in progress +// before the policy was enacted. For more information about data retrieval +// policies, see Amazon Glacier Data Retrieval Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/data-retrieval-policy.html). +func (c *Glacier) SetDataRetrievalPolicy(input *SetDataRetrievalPolicyInput) (*SetDataRetrievalPolicyOutput, error) { + req, out := c.SetDataRetrievalPolicyRequest(input) + err := req.Send() + return out, err +} + +const opSetVaultAccessPolicy = "SetVaultAccessPolicy" + +// SetVaultAccessPolicyRequest generates a request for the SetVaultAccessPolicy operation. +func (c *Glacier) SetVaultAccessPolicyRequest(input *SetVaultAccessPolicyInput) (req *request.Request, output *SetVaultAccessPolicyOutput) { + op := &request.Operation{ + Name: opSetVaultAccessPolicy, + HTTPMethod: "PUT", + HTTPPath: "/{accountId}/vaults/{vaultName}/access-policy", + } + + if input == nil { + input = &SetVaultAccessPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetVaultAccessPolicyOutput{} + req.Data = output + return +} + +// This operation configures an access policy for a vault and will overwrite +// an existing policy. To configure a vault access policy, send a PUT request +// to the access-policy subresource of the vault. An access policy is specific +// to a vault and is also called a vault subresource. You can set one access +// policy per vault and the policy can be up to 20 KB in size. For more information +// about vault access policies, see Amazon Glacier Access Control with Vault +// Access Policies (http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html). +func (c *Glacier) SetVaultAccessPolicy(input *SetVaultAccessPolicyInput) (*SetVaultAccessPolicyOutput, error) { + req, out := c.SetVaultAccessPolicyRequest(input) + err := req.Send() + return out, err +} + +const opSetVaultNotifications = "SetVaultNotifications" + +// SetVaultNotificationsRequest generates a request for the SetVaultNotifications operation. +func (c *Glacier) SetVaultNotificationsRequest(input *SetVaultNotificationsInput) (req *request.Request, output *SetVaultNotificationsOutput) { + op := &request.Operation{ + Name: opSetVaultNotifications, + HTTPMethod: "PUT", + HTTPPath: "/{accountId}/vaults/{vaultName}/notification-configuration", + } + + if input == nil { + input = &SetVaultNotificationsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetVaultNotificationsOutput{} + req.Data = output + return +} + +// This operation configures notifications that will be sent when specific events +// happen to a vault. By default, you don't get any notifications. +// +// To configure vault notifications, send a PUT request to the notification-configuration +// subresource of the vault. The request should include a JSON document that +// provides an Amazon SNS topic and specific events for which you want Amazon +// Glacier to send notifications to the topic. +// +// Amazon SNS topics must grant permission to the vault to be allowed to publish +// notifications to the topic. You can configure a vault to publish a notification +// for the following vault events: +// +// ArchiveRetrievalCompleted This event occurs when a job that was initiated +// for an archive retrieval is completed (InitiateJob). The status of the completed +// job can be "Succeeded" or "Failed". The notification sent to the SNS topic +// is the same output as returned from DescribeJob. InventoryRetrievalCompleted +// This event occurs when a job that was initiated for an inventory retrieval +// is completed (InitiateJob). The status of the completed job can be "Succeeded" +// or "Failed". The notification sent to the SNS topic is the same output as +// returned from DescribeJob. An AWS account has full permission to perform +// all operations (actions). However, AWS Identity and Access Management (IAM) +// users don't have any permissions by default. You must grant them explicit +// permission to perform specific actions. For more information, see Access +// Control Using AWS Identity and Access Management (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Configuring Vault +// Notifications in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html) +// and Set Vault Notification Configuration (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-put.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) SetVaultNotifications(input *SetVaultNotificationsInput) (*SetVaultNotificationsOutput, error) { + req, out := c.SetVaultNotificationsRequest(input) + err := req.Send() + return out, err +} + +const opUploadArchive = "UploadArchive" + +// UploadArchiveRequest generates a request for the UploadArchive operation. +func (c *Glacier) UploadArchiveRequest(input *UploadArchiveInput) (req *request.Request, output *ArchiveCreationOutput) { + op := &request.Operation{ + Name: opUploadArchive, + HTTPMethod: "POST", + HTTPPath: "/{accountId}/vaults/{vaultName}/archives", + } + + if input == nil { + input = &UploadArchiveInput{} + } + + req = c.newRequest(op, input, output) + output = &ArchiveCreationOutput{} + req.Data = output + return +} + +// This operation adds an archive to a vault. This is a synchronous operation, +// and for a successful upload, your data is durably persisted. Amazon Glacier +// returns the archive ID in the x-amz-archive-id header of the response. +// +// You must use the archive ID to access your data in Amazon Glacier. After +// you upload an archive, you should save the archive ID returned so that you +// can retrieve or delete the archive later. Besides saving the archive ID, +// you can also index it and give it a friendly name to allow for better searching. +// You can also use the optional archive description field to specify how the +// archive is referred to in an external index of archives, such as you might +// create in Amazon DynamoDB. You can also get the vault inventory to obtain +// a list of archive IDs in a vault. For more information, see InitiateJob. +// +// You must provide a SHA256 tree hash of the data you are uploading. For information +// about computing a SHA256 tree hash, see Computing Checksums (http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html). +// +// You can optionally specify an archive description of up to 1,024 printable +// ASCII characters. You can get the archive description when you either retrieve +// the archive or get the vault inventory. For more information, see InitiateJob. +// Amazon Glacier does not interpret the description in any way. An archive +// description does not need to be unique. You cannot use the description to +// retrieve or sort the archive list. +// +// Archives are immutable. After you upload an archive, you cannot edit the +// archive or its description. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Uploading an +// Archive in Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-an-archive.html) +// and Upload Archive (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-post.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) UploadArchive(input *UploadArchiveInput) (*ArchiveCreationOutput, error) { + req, out := c.UploadArchiveRequest(input) + err := req.Send() + return out, err +} + +const opUploadMultipartPart = "UploadMultipartPart" + +// UploadMultipartPartRequest generates a request for the UploadMultipartPart operation. +func (c *Glacier) UploadMultipartPartRequest(input *UploadMultipartPartInput) (req *request.Request, output *UploadMultipartPartOutput) { + op := &request.Operation{ + Name: opUploadMultipartPart, + HTTPMethod: "PUT", + HTTPPath: "/{accountId}/vaults/{vaultName}/multipart-uploads/{uploadId}", + } + + if input == nil { + input = &UploadMultipartPartInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadMultipartPartOutput{} + req.Data = output + return +} + +// This operation uploads a part of an archive. You can upload archive parts +// in any order. You can also upload them in parallel. You can upload up to +// 10,000 parts for a multipart upload. +// +// Amazon Glacier rejects your upload part request if any of the following +// conditions is true: +// +// SHA256 tree hash does not matchTo ensure that part data is not corrupted +// in transmission, you compute a SHA256 tree hash of the part and include it +// in your request. Upon receiving the part data, Amazon Glacier also computes +// a SHA256 tree hash. If these hash values don't match, the operation fails. +// For information about computing a SHA256 tree hash, see Computing Checksums +// (http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html). +// +// Part size does not matchThe size of each part except the last must match +// the size specified in the corresponding InitiateMultipartUpload request. +// The size of the last part must be the same size as, or smaller than, the +// specified size. +// +// If you upload a part whose size is smaller than the part size you specified +// in your initiate multipart upload request and that part is not the last part, +// then the upload part request will succeed. However, the subsequent Complete +// Multipart Upload request will fail. +// +// Range does not alignThe byte range value in the request does not align +// with the part size specified in the corresponding initiate request. For example, +// if you specify a part size of 4194304 bytes (4 MB), then 0 to 4194303 bytes +// (4 MB - 1) and 4194304 (4 MB) to 8388607 (8 MB - 1) are valid part ranges. +// However, if you set a range value of 2 MB to 6 MB, the range does not align +// with the part size and the upload will fail. This operation is idempotent. +// If you upload the same part multiple times, the data included in the most +// recent request overwrites the previously uploaded data. +// +// An AWS account has full permission to perform all operations (actions). +// However, AWS Identity and Access Management (IAM) users don't have any permissions +// by default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access Management +// (IAM) (http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). +// +// For conceptual information and underlying REST API, go to Uploading Large +// Archives in Parts (Multipart Upload) (http://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html) +// and Upload Part (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-upload-part.html) +// in the Amazon Glacier Developer Guide. +func (c *Glacier) UploadMultipartPart(input *UploadMultipartPartInput) (*UploadMultipartPartOutput, error) { + req, out := c.UploadMultipartPartRequest(input) + err := req.Send() + return out, err +} + +// Provides options to abort a multipart upload identified by the upload ID. +// +// For information about the underlying REST API, go to Abort Multipart Upload +// (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-abort-upload.html). +// For conceptual information, go to Working with Archives in Amazon Glacier +// (http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html). +type AbortMultipartUploadInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The upload ID of the multipart upload to delete. + UploadId *string `location:"uri" locationName:"uploadId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s AbortMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadInput) GoString() string { + return s.String() +} + +type AbortMultipartUploadOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AbortMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadOutput) GoString() string { + return s.String() +} + +// The input values for AbortVaultLock. +type AbortVaultLockInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID. This value must match the AWS + // account ID associated with the credentials used to sign the request. You + // can either specify an AWS account ID or optionally a single apos-apos (hyphen), + // in which case Amazon Glacier uses the AWS account ID associated with the + // credentials used to sign the request. If you specify your account ID, do + // not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s AbortVaultLockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortVaultLockInput) GoString() string { + return s.String() +} + +type AbortVaultLockOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AbortVaultLockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortVaultLockOutput) GoString() string { + return s.String() +} + +// The input values for AddTagsToVault. +type AddTagsToVaultInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The tags to add to the vault. Each tag is composed of a key and a value. + // The value can be an empty string. + Tags map[string]*string `type:"map"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddTagsToVaultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToVaultInput) GoString() string { + return s.String() +} + +type AddTagsToVaultOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsToVaultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToVaultOutput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +// +// For information about the underlying REST API, go to Upload Archive (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-post.html). +// For conceptual information, go to Working with Archives in Amazon Glacier +// (http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html). +type ArchiveCreationOutput struct { + _ struct{} `type:"structure"` + + // The ID of the archive. This value is also included as part of the location. + ArchiveId *string `location:"header" locationName:"x-amz-archive-id" type:"string"` + + // The checksum of the archive computed by Amazon Glacier. + Checksum *string `location:"header" locationName:"x-amz-sha256-tree-hash" type:"string"` + + // The relative URI path of the newly added archive resource. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s ArchiveCreationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ArchiveCreationOutput) GoString() string { + return s.String() +} + +// Provides options to complete a multipart upload operation. This informs Amazon +// Glacier that all the archive parts have been uploaded and Amazon Glacier +// can now assemble the archive from the uploaded parts. After assembling and +// saving the archive to the vault, Amazon Glacier returns the URI path of the +// newly created archive resource. +type CompleteMultipartUploadInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The total size, in bytes, of the entire archive. This value should be the + // sum of all the sizes of the individual parts that you uploaded. + ArchiveSize *string `location:"header" locationName:"x-amz-archive-size" type:"string"` + + // The SHA256 tree hash of the entire archive. It is the tree hash of SHA256 + // tree hash of the individual parts. If the value you specify in the request + // does not match the SHA256 tree hash of the final assembled archive as computed + // by Amazon Glacier, Amazon Glacier returns an error and the request fails. + Checksum *string `location:"header" locationName:"x-amz-sha256-tree-hash" type:"string"` + + // The upload ID of the multipart upload. + UploadId *string `location:"uri" locationName:"uploadId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadInput) GoString() string { + return s.String() +} + +// The input values for CompleteVaultLock. +type CompleteVaultLockInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID. This value must match the AWS + // account ID associated with the credentials used to sign the request. You + // can either specify an AWS account ID or optionally a single apos-apos (hyphen), + // in which case Amazon Glacier uses the AWS account ID associated with the + // credentials used to sign the request. If you specify your account ID, do + // not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The lockId value is the lock ID obtained from a InitiateVaultLock request. + LockId *string `location:"uri" locationName:"lockId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteVaultLockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteVaultLockInput) GoString() string { + return s.String() +} + +type CompleteVaultLockOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CompleteVaultLockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteVaultLockOutput) GoString() string { + return s.String() +} + +// Provides options to create a vault. +type CreateVaultInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID. This value must match the AWS + // account ID associated with the credentials used to sign the request. You + // can either specify an AWS account ID or optionally a single apos-apos (hyphen), + // in which case Amazon Glacier uses the AWS account ID associated with the + // credentials used to sign the request. If you specify your account ID, do + // not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateVaultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVaultInput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +type CreateVaultOutput struct { + _ struct{} `type:"structure"` + + // The URI of the vault that was created. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateVaultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVaultOutput) GoString() string { + return s.String() +} + +// Data retrieval policy. +type DataRetrievalPolicy struct { + _ struct{} `type:"structure"` + + // The policy rule. Although this is a list type, currently there must be only + // one rule, which contains a Strategy field and optionally a BytesPerHour field. + Rules []*DataRetrievalRule `type:"list"` +} + +// String returns the string representation +func (s DataRetrievalPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataRetrievalPolicy) GoString() string { + return s.String() +} + +// Data retrieval policy rule. +type DataRetrievalRule struct { + _ struct{} `type:"structure"` + + // The maximum number of bytes that can be retrieved in an hour. + // + // This field is required only if the value of the Strategy field is BytesPerHour. + // Your PUT operation will be rejected if the Strategy field is not set to BytesPerHour + // and you set this field. + BytesPerHour *int64 `type:"long"` + + // The type of data retrieval policy to set. + // + // Valid values: BytesPerHour|FreeTier|None + Strategy *string `type:"string"` +} + +// String returns the string representation +func (s DataRetrievalRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataRetrievalRule) GoString() string { + return s.String() +} + +// Provides options for deleting an archive from an Amazon Glacier vault. +type DeleteArchiveInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The ID of the archive to delete. + ArchiveId *string `location:"uri" locationName:"archiveId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteArchiveInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteArchiveInput) GoString() string { + return s.String() +} + +type DeleteArchiveOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteArchiveOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteArchiveOutput) GoString() string { + return s.String() +} + +// DeleteVaultAccessPolicy input. +type DeleteVaultAccessPolicyInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVaultAccessPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVaultAccessPolicyInput) GoString() string { + return s.String() +} + +type DeleteVaultAccessPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVaultAccessPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVaultAccessPolicyOutput) GoString() string { + return s.String() +} + +// Provides options for deleting a vault from Amazon Glacier. +type DeleteVaultInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVaultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVaultInput) GoString() string { + return s.String() +} + +// Provides options for deleting a vault notification configuration from an +// Amazon Glacier vault. +type DeleteVaultNotificationsInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVaultNotificationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVaultNotificationsInput) GoString() string { + return s.String() +} + +type DeleteVaultNotificationsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVaultNotificationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVaultNotificationsOutput) GoString() string { + return s.String() +} + +type DeleteVaultOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVaultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVaultOutput) GoString() string { + return s.String() +} + +// Provides options for retrieving a job description. +type DescribeJobInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The ID of the job to describe. + JobId *string `location:"uri" locationName:"jobId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeJobInput) GoString() string { + return s.String() +} + +// Provides options for retrieving metadata for a specific vault in Amazon Glacier. +type DescribeVaultInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeVaultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVaultInput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +type DescribeVaultOutput struct { + _ struct{} `type:"structure"` + + // The UTC date when the vault was created. A string representation of ISO 8601 + // date format, for example, "2012-03-20T17:03:43.221Z". + CreationDate *string `type:"string"` + + // The UTC date when Amazon Glacier completed the last vault inventory. A string + // representation of ISO 8601 date format, for example, "2012-03-20T17:03:43.221Z". + LastInventoryDate *string `type:"string"` + + // The number of archives in the vault as of the last inventory date. This field + // will return null if an inventory has not yet run on the vault, for example, + // if you just created the vault. + NumberOfArchives *int64 `type:"long"` + + // Total size, in bytes, of the archives in the vault as of the last inventory + // date. This field will return null if an inventory has not yet run on the + // vault, for example, if you just created the vault. + SizeInBytes *int64 `type:"long"` + + // The Amazon Resource Name (ARN) of the vault. + VaultARN *string `type:"string"` + + // The name of the vault. + VaultName *string `type:"string"` +} + +// String returns the string representation +func (s DescribeVaultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVaultOutput) GoString() string { + return s.String() +} + +// Input for GetDataRetrievalPolicy. +type GetDataRetrievalPolicyInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID. This value must match the AWS + // account ID associated with the credentials used to sign the request. You + // can either specify an AWS account ID or optionally a single apos-apos (hyphen), + // in which case Amazon Glacier uses the AWS account ID associated with the + // credentials used to sign the request. If you specify your account ID, do + // not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDataRetrievalPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDataRetrievalPolicyInput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to the GetDataRetrievalPolicy request. +type GetDataRetrievalPolicyOutput struct { + _ struct{} `type:"structure"` + + // Contains the returned data retrieval policy in JSON format. + Policy *DataRetrievalPolicy `type:"structure"` +} + +// String returns the string representation +func (s GetDataRetrievalPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDataRetrievalPolicyOutput) GoString() string { + return s.String() +} + +// Provides options for downloading output of an Amazon Glacier job. +type GetJobOutputInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The job ID whose data is downloaded. + JobId *string `location:"uri" locationName:"jobId" type:"string" required:"true"` + + // The range of bytes to retrieve from the output. For example, if you want + // to download the first 1,048,576 bytes, specify "Range: bytes=0-1048575". + // By default, this operation downloads the entire output. + Range *string `location:"header" locationName:"Range" type:"string"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetJobOutputInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetJobOutputInput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +type GetJobOutputOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + // Indicates the range units accepted. For more information, go to RFC2616 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html). + AcceptRanges *string `location:"header" locationName:"Accept-Ranges" type:"string"` + + // The description of an archive. + ArchiveDescription *string `location:"header" locationName:"x-amz-archive-description" type:"string"` + + // The job data, either archive data or inventory data. + Body io.ReadCloser `locationName:"body" type:"blob"` + + // The checksum of the data in the response. This header is returned only when + // retrieving the output for an archive retrieval job. Furthermore, this header + // appears only under the following conditions: You get the entire range of + // the archive. You request a range to return of the archive that starts and + // ends on a multiple of 1 MB. For example, if you have an 3.1 MB archive and + // you specify a range to return that starts at 1 MB and ends at 2 MB, then + // the x-amz-sha256-tree-hash is returned as a response header. You request + // a range of the archive to return that starts on a multiple of 1 MB and goes + // to the end of the archive. For example, if you have a 3.1 MB archive and + // you specify a range that starts at 2 MB and ends at 3.1 MB (the end of the + // archive), then the x-amz-sha256-tree-hash is returned as a response header. + Checksum *string `location:"header" locationName:"x-amz-sha256-tree-hash" type:"string"` + + // The range of bytes returned by Amazon Glacier. If only partial output is + // downloaded, the response provides the range of bytes Amazon Glacier returned. + // For example, bytes 0-1048575/8388608 returns the first 1 MB from 8 MB. + ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` + + // The Content-Type depends on whether the job output is an archive or a vault + // inventory. For archive data, the Content-Type is application/octet-stream. + // For vault inventory, if you requested CSV format when you initiated the job, + // the Content-Type is text/csv. Otherwise, by default, vault inventory is returned + // as JSON, and the Content-Type is application/json. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The HTTP response code for a job output request. The value depends on whether + // a range was specified in the request. + Status *int64 `location:"statusCode" locationName:"status" type:"integer"` +} + +// String returns the string representation +func (s GetJobOutputOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetJobOutputOutput) GoString() string { + return s.String() +} + +// Input for GetVaultAccessPolicy. +type GetVaultAccessPolicyInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetVaultAccessPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetVaultAccessPolicyInput) GoString() string { + return s.String() +} + +// Output for GetVaultAccessPolicy. +type GetVaultAccessPolicyOutput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // Contains the returned vault access policy as a JSON string. + Policy *VaultAccessPolicy `locationName:"policy" type:"structure"` +} + +// String returns the string representation +func (s GetVaultAccessPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetVaultAccessPolicyOutput) GoString() string { + return s.String() +} + +// The input values for GetVaultLock. +type GetVaultLockInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetVaultLockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetVaultLockInput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +type GetVaultLockOutput struct { + _ struct{} `type:"structure"` + + // The UTC date and time at which the vault lock was put into the InProgress + // state. + CreationDate *string `type:"string"` + + // The UTC date and time at which the lock ID expires. This value can be null + // if the vault lock is in a Locked state. + ExpirationDate *string `type:"string"` + + // The vault lock policy as a JSON string, which uses "\" as an escape character. + Policy *string `type:"string"` + + // The state of the vault lock. InProgress or Locked. + State *string `type:"string"` +} + +// String returns the string representation +func (s GetVaultLockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetVaultLockOutput) GoString() string { + return s.String() +} + +// Provides options for retrieving the notification configuration set on an +// Amazon Glacier vault. +type GetVaultNotificationsInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetVaultNotificationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetVaultNotificationsInput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +type GetVaultNotificationsOutput struct { + _ struct{} `type:"structure" payload:"VaultNotificationConfig"` + + // Returns the notification configuration set on the vault. + VaultNotificationConfig *VaultNotificationConfig `locationName:"vaultNotificationConfig" type:"structure"` +} + +// String returns the string representation +func (s GetVaultNotificationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetVaultNotificationsOutput) GoString() string { + return s.String() +} + +// Provides options for initiating an Amazon Glacier job. +type InitiateJobInput struct { + _ struct{} `type:"structure" payload:"JobParameters"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // Provides options for specifying job information. + JobParameters *JobParameters `locationName:"jobParameters" type:"structure"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s InitiateJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InitiateJobInput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +type InitiateJobOutput struct { + _ struct{} `type:"structure"` + + // The ID of the job. + JobId *string `location:"header" locationName:"x-amz-job-id" type:"string"` + + // The relative URI path of the job. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s InitiateJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InitiateJobOutput) GoString() string { + return s.String() +} + +// Provides options for initiating a multipart upload to an Amazon Glacier vault. +type InitiateMultipartUploadInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The archive description that you are uploading in parts. + // + // The part size must be a megabyte (1024 KB) multiplied by a power of 2, for + // example 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 (8 MB), and + // so on. The minimum allowable part size is 1 MB, and the maximum is 4 GB (4096 + // MB). + ArchiveDescription *string `location:"header" locationName:"x-amz-archive-description" type:"string"` + + // The size of each part except the last, in bytes. The last part can be smaller + // than this part size. + PartSize *string `location:"header" locationName:"x-amz-part-size" type:"string"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s InitiateMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InitiateMultipartUploadInput) GoString() string { + return s.String() +} + +// The Amazon Glacier response to your request. +type InitiateMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // The relative URI path of the multipart upload ID Amazon Glacier created. + Location *string `location:"header" locationName:"Location" type:"string"` + + // The ID of the multipart upload. This value is also included as part of the + // location. + UploadId *string `location:"header" locationName:"x-amz-multipart-upload-id" type:"string"` +} + +// String returns the string representation +func (s InitiateMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InitiateMultipartUploadOutput) GoString() string { + return s.String() +} + +// The input values for InitiateVaultLock. +type InitiateVaultLockInput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // The AccountId value is the AWS account ID. This value must match the AWS + // account ID associated with the credentials used to sign the request. You + // can either specify an AWS account ID or optionally a single apos-apos (hyphen), + // in which case Amazon Glacier uses the AWS account ID associated with the + // credentials used to sign the request. If you specify your account ID, do + // not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The vault lock policy as a JSON string, which uses "\" as an escape character. + Policy *VaultLockPolicy `locationName:"policy" type:"structure"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s InitiateVaultLockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InitiateVaultLockInput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +type InitiateVaultLockOutput struct { + _ struct{} `type:"structure"` + + // The lock ID, which is used to complete the vault locking process. + LockId *string `location:"header" locationName:"x-amz-lock-id" type:"string"` +} + +// String returns the string representation +func (s InitiateVaultLockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InitiateVaultLockOutput) GoString() string { + return s.String() +} + +// Describes the options for a range inventory retrieval job. +type InventoryRetrievalJobDescription struct { + _ struct{} `type:"structure"` + + // The end of the date range in UTC for vault inventory retrieval that includes + // archives created before this date. A string representation of ISO 8601 date + // format, for example, 2013-03-20T17:03:43Z. + EndDate *string `type:"string"` + + // The output format for the vault inventory list, which is set by the InitiateJob + // request when initiating a job to retrieve a vault inventory. Valid values + // are "CSV" and "JSON". + Format *string `type:"string"` + + // Specifies the maximum number of inventory items returned per vault inventory + // retrieval request. This limit is set when initiating the job with the a InitiateJob + // request. + Limit *string `type:"string"` + + // An opaque string that represents where to continue pagination of the vault + // inventory retrieval results. You use the marker in a new InitiateJob request + // to obtain additional inventory items. If there are no more inventory items, + // this value is null. For more information, see Range Inventory Retrieval + // (http://docs.aws.amazon.com/amazonglacier/latest/dev/api-initiate-job-post.html#api-initiate-job-post-vault-inventory-list-filtering). + Marker *string `type:"string"` + + // The start of the date range in UTC for vault inventory retrieval that includes + // archives created on or after this date. A string representation of ISO 8601 + // date format, for example, 2013-03-20T17:03:43Z. + StartDate *string `type:"string"` +} + +// String returns the string representation +func (s InventoryRetrievalJobDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryRetrievalJobDescription) GoString() string { + return s.String() +} + +// Provides options for specifying a range inventory retrieval job. +type InventoryRetrievalJobInput struct { + _ struct{} `type:"structure"` + + // The end of the date range in UTC for vault inventory retrieval that includes + // archives created before this date. A string representation of ISO 8601 date + // format, for example, 2013-03-20T17:03:43Z. + EndDate *string `type:"string"` + + // Specifies the maximum number of inventory items returned per vault inventory + // retrieval request. Valid values are greater than or equal to 1. + Limit *string `type:"string"` + + // An opaque string that represents where to continue pagination of the vault + // inventory retrieval results. You use the marker in a new InitiateJob request + // to obtain additional inventory items. If there are no more inventory items, + // this value is null. + Marker *string `type:"string"` + + // The start of the date range in UTC for vault inventory retrieval that includes + // archives created on or after this date. A string representation of ISO 8601 + // date format, for example, 2013-03-20T17:03:43Z. + StartDate *string `type:"string"` +} + +// String returns the string representation +func (s InventoryRetrievalJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryRetrievalJobInput) GoString() string { + return s.String() +} + +// Describes an Amazon Glacier job. +type JobDescription struct { + _ struct{} `type:"structure"` + + // The job type. It is either ArchiveRetrieval or InventoryRetrieval. + Action *string `type:"string" enum:"ActionCode"` + + // For an ArchiveRetrieval job, this is the archive ID requested for download. + // Otherwise, this field is null. + ArchiveId *string `type:"string"` + + // The SHA256 tree hash of the entire archive for an archive retrieval. For + // inventory retrieval jobs, this field is null. + ArchiveSHA256TreeHash *string `type:"string"` + + // For an ArchiveRetrieval job, this is the size in bytes of the archive being + // requested for download. For the InventoryRetrieval job, the value is null. + ArchiveSizeInBytes *int64 `type:"long"` + + // The job status. When a job is completed, you get the job's output. + Completed *bool `type:"boolean"` + + // The UTC time that the archive retrieval request completed. While the job + // is in progress, the value will be null. + CompletionDate *string `type:"string"` + + // The UTC date when the job was created. A string representation of ISO 8601 + // date format, for example, "2012-03-20T17:03:43.221Z". + CreationDate *string `type:"string"` + + // Parameters used for range inventory retrieval. + InventoryRetrievalParameters *InventoryRetrievalJobDescription `type:"structure"` + + // For an InventoryRetrieval job, this is the size in bytes of the inventory + // requested for download. For the ArchiveRetrieval job, the value is null. + InventorySizeInBytes *int64 `type:"long"` + + // The job description you provided when you initiated the job. + JobDescription *string `type:"string"` + + // An opaque string that identifies an Amazon Glacier job. + JobId *string `type:"string"` + + // The retrieved byte range for archive retrieval jobs in the form "StartByteValue-EndByteValue" + // If no range was specified in the archive retrieval, then the whole archive + // is retrieved and StartByteValue equals 0 and EndByteValue equals the size + // of the archive minus 1. For inventory retrieval jobs this field is null. + RetrievalByteRange *string `type:"string"` + + // For an ArchiveRetrieval job, it is the checksum of the archive. Otherwise, + // the value is null. + // + // The SHA256 tree hash value for the requested range of an archive. If the + // Initiate a Job request for an archive specified a tree-hash aligned range, + // then this field returns a value. + // + // For the specific case when the whole archive is retrieved, this value is + // the same as the ArchiveSHA256TreeHash value. + // + // This field is null in the following situations: Archive retrieval jobs + // that specify a range that is not tree-hash aligned. + // + // Archival jobs that specify a range that is equal to the whole archive + // and the job status is InProgress. + // + // Inventory jobs. + SHA256TreeHash *string `type:"string"` + + // An Amazon Simple Notification Service (Amazon SNS) topic that receives notification. + SNSTopic *string `type:"string"` + + // The status code can be InProgress, Succeeded, or Failed, and indicates the + // status of the job. + StatusCode *string `type:"string" enum:"StatusCode"` + + // A friendly message that describes the job status. + StatusMessage *string `type:"string"` + + // The Amazon Resource Name (ARN) of the vault from which the archive retrieval + // was requested. + VaultARN *string `type:"string"` +} + +// String returns the string representation +func (s JobDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobDescription) GoString() string { + return s.String() +} + +// Provides options for defining a job. +type JobParameters struct { + _ struct{} `type:"structure"` + + // The ID of the archive that you want to retrieve. This field is required only + // if Type is set to archive-retrieval. An error occurs if you specify this + // request parameter for an inventory retrieval job request. + ArchiveId *string `type:"string"` + + // The optional description for the job. The description must be less than or + // equal to 1,024 bytes. The allowable characters are 7-bit ASCII without control + // codes-specifically, ASCII values 32-126 decimal or 0x20-0x7E hexadecimal. + Description *string `type:"string"` + + // When initiating a job to retrieve a vault inventory, you can optionally add + // this parameter to your request to specify the output format. If you are initiating + // an inventory job and do not specify a Format field, JSON is the default format. + // Valid values are "CSV" and "JSON". + Format *string `type:"string"` + + // Input parameters used for range inventory retrieval. + InventoryRetrievalParameters *InventoryRetrievalJobInput `type:"structure"` + + // The byte range to retrieve for an archive retrieval. in the form "StartByteValue-EndByteValue" + // If not specified, the whole archive is retrieved. If specified, the byte + // range must be megabyte (1024*1024) aligned which means that StartByteValue + // must be divisible by 1 MB and EndByteValue plus 1 must be divisible by 1 + // MB or be the end of the archive specified as the archive byte size value + // minus 1. If RetrievalByteRange is not megabyte aligned, this operation returns + // a 400 response. + // + // An error occurs if you specify this field for an inventory retrieval job + // request. + RetrievalByteRange *string `type:"string"` + + // The Amazon SNS topic ARN to which Amazon Glacier sends a notification when + // the job is completed and the output is ready for you to download. The specified + // topic publishes the notification to its subscribers. The SNS topic must exist. + SNSTopic *string `type:"string"` + + // The job type. You can initiate a job to retrieve an archive or get an inventory + // of a vault. Valid values are "archive-retrieval" and "inventory-retrieval". + Type *string `type:"string"` +} + +// String returns the string representation +func (s JobParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobParameters) GoString() string { + return s.String() +} + +// Provides options for retrieving a job list for an Amazon Glacier vault. +type ListJobsInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // Specifies the state of the jobs to return. You can specify true or false. + Completed *string `location:"querystring" locationName:"completed" type:"string"` + + // Specifies that the response be limited to the specified number of items or + // fewer. If not specified, the List Jobs operation returns up to 1,000 jobs. + Limit *string `location:"querystring" locationName:"limit" type:"string"` + + // An opaque string used for pagination. This value specifies the job at which + // the listing of jobs should begin. Get the marker value from a previous List + // Jobs response. You need only include the marker if you are continuing the + // pagination of results started in a previous List Jobs request. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Specifies the type of job status to return. You can specify the following + // values: "InProgress", "Succeeded", or "Failed". + Statuscode *string `location:"querystring" locationName:"statuscode" type:"string"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListJobsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListJobsInput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +type ListJobsOutput struct { + _ struct{} `type:"structure"` + + // A list of job objects. Each job object contains metadata describing the job. + JobList []*JobDescription `type:"list"` + + // An opaque string that represents where to continue pagination of the results. + // You use this value in a new List Jobs request to obtain more jobs in the + // list. If there are no more jobs, this value is null. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListJobsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListJobsOutput) GoString() string { + return s.String() +} + +// Provides options for retrieving list of in-progress multipart uploads for +// an Amazon Glacier vault. +type ListMultipartUploadsInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // Specifies the maximum number of uploads returned in the response body. If + // this value is not specified, the List Uploads operation returns up to 1,000 + // uploads. + Limit *string `location:"querystring" locationName:"limit" type:"string"` + + // An opaque string used for pagination. This value specifies the upload at + // which the listing of uploads should begin. Get the marker value from a previous + // List Uploads response. You need only include the marker if you are continuing + // the pagination of results started in a previous List Uploads request. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListMultipartUploadsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsInput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +type ListMultipartUploadsOutput struct { + _ struct{} `type:"structure"` + + // An opaque string that represents where to continue pagination of the results. + // You use the marker in a new List Multipart Uploads request to obtain more + // uploads in the list. If there are no more uploads, this value is null. + Marker *string `type:"string"` + + // A list of in-progress multipart uploads. + UploadsList []*UploadListElement `type:"list"` +} + +// String returns the string representation +func (s ListMultipartUploadsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsOutput) GoString() string { + return s.String() +} + +// Provides options for retrieving a list of parts of an archive that have been +// uploaded in a specific multipart upload. +type ListPartsInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // Specifies the maximum number of parts returned in the response body. If this + // value is not specified, the List Parts operation returns up to 1,000 uploads. + Limit *string `location:"querystring" locationName:"limit" type:"string"` + + // An opaque string used for pagination. This value specifies the part at which + // the listing of parts should begin. Get the marker value from the response + // of a previous List Parts response. You need only include the marker if you + // are continuing the pagination of results started in a previous List Parts + // request. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // The upload ID of the multipart upload. + UploadId *string `location:"uri" locationName:"uploadId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPartsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsInput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +type ListPartsOutput struct { + _ struct{} `type:"structure"` + + // The description of the archive that was specified in the Initiate Multipart + // Upload request. + ArchiveDescription *string `type:"string"` + + // The UTC time at which the multipart upload was initiated. + CreationDate *string `type:"string"` + + // An opaque string that represents where to continue pagination of the results. + // You use the marker in a new List Parts request to obtain more jobs in the + // list. If there are no more parts, this value is null. + Marker *string `type:"string"` + + // The ID of the upload to which the parts are associated. + MultipartUploadId *string `type:"string"` + + // The part size in bytes. + PartSizeInBytes *int64 `type:"long"` + + // A list of the part sizes of the multipart upload. + Parts []*PartListElement `type:"list"` + + // The Amazon Resource Name (ARN) of the vault to which the multipart upload + // was initiated. + VaultARN *string `type:"string"` +} + +// String returns the string representation +func (s ListPartsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsOutput) GoString() string { + return s.String() +} + +// The input value for ListTagsForVaultInput. +type ListTagsForVaultInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForVaultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForVaultInput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +type ListTagsForVaultOutput struct { + _ struct{} `type:"structure"` + + // The tags attached to the vault. Each tag is composed of a key and a value. + Tags map[string]*string `type:"map"` +} + +// String returns the string representation +func (s ListTagsForVaultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForVaultOutput) GoString() string { + return s.String() +} + +// Provides options to retrieve the vault list owned by the calling user's account. +// The list provides metadata information for each vault. +type ListVaultsInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID. This value must match the AWS + // account ID associated with the credentials used to sign the request. You + // can either specify an AWS account ID or optionally a single apos-apos (hyphen), + // in which case Amazon Glacier uses the AWS account ID associated with the + // credentials used to sign the request. If you specify your account ID, do + // not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The maximum number of items returned in the response. If you don't specify + // a value, the List Vaults operation returns up to 1,000 items. + Limit *string `location:"querystring" locationName:"limit" type:"string"` + + // A string used for pagination. The marker specifies the vault ARN after which + // the listing of vaults should begin. + Marker *string `location:"querystring" locationName:"marker" type:"string"` +} + +// String returns the string representation +func (s ListVaultsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVaultsInput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +type ListVaultsOutput struct { + _ struct{} `type:"structure"` + + // The vault ARN at which to continue pagination of the results. You use the + // marker in another List Vaults request to obtain more vaults in the list. + Marker *string `type:"string"` + + // List of vaults. + VaultList []*DescribeVaultOutput `type:"list"` +} + +// String returns the string representation +func (s ListVaultsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVaultsOutput) GoString() string { + return s.String() +} + +// A list of the part sizes of the multipart upload. +type PartListElement struct { + _ struct{} `type:"structure"` + + // The byte range of a part, inclusive of the upper value of the range. + RangeInBytes *string `type:"string"` + + // The SHA256 tree hash value that Amazon Glacier calculated for the part. This + // field is never null. + SHA256TreeHash *string `type:"string"` +} + +// String returns the string representation +func (s PartListElement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PartListElement) GoString() string { + return s.String() +} + +// The input value for RemoveTagsFromVaultInput. +type RemoveTagsFromVaultInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // A list of tag keys. Each corresponding tag is removed from the vault. + TagKeys []*string `type:"list"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsFromVaultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromVaultInput) GoString() string { + return s.String() +} + +type RemoveTagsFromVaultOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsFromVaultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromVaultOutput) GoString() string { + return s.String() +} + +// SetDataRetrievalPolicy input. +type SetDataRetrievalPolicyInput struct { + _ struct{} `type:"structure"` + + // The AccountId value is the AWS account ID. This value must match the AWS + // account ID associated with the credentials used to sign the request. You + // can either specify an AWS account ID or optionally a single apos-apos (hyphen), + // in which case Amazon Glacier uses the AWS account ID associated with the + // credentials used to sign the request. If you specify your account ID, do + // not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The data retrieval policy in JSON format. + Policy *DataRetrievalPolicy `type:"structure"` +} + +// String returns the string representation +func (s SetDataRetrievalPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetDataRetrievalPolicyInput) GoString() string { + return s.String() +} + +type SetDataRetrievalPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetDataRetrievalPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetDataRetrievalPolicyOutput) GoString() string { + return s.String() +} + +// SetVaultAccessPolicy input. +type SetVaultAccessPolicyInput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The vault access policy as a JSON string. + Policy *VaultAccessPolicy `locationName:"policy" type:"structure"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s SetVaultAccessPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetVaultAccessPolicyInput) GoString() string { + return s.String() +} + +type SetVaultAccessPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetVaultAccessPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetVaultAccessPolicyOutput) GoString() string { + return s.String() +} + +// Provides options to configure notifications that will be sent when specific +// events happen to a vault. +type SetVaultNotificationsInput struct { + _ struct{} `type:"structure" payload:"VaultNotificationConfig"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` + + // Provides options for specifying notification configuration. + VaultNotificationConfig *VaultNotificationConfig `locationName:"vaultNotificationConfig" type:"structure"` +} + +// String returns the string representation +func (s SetVaultNotificationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetVaultNotificationsInput) GoString() string { + return s.String() +} + +type SetVaultNotificationsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetVaultNotificationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetVaultNotificationsOutput) GoString() string { + return s.String() +} + +// Provides options to add an archive to a vault. +type UploadArchiveInput struct { + _ struct{} `type:"structure" payload:"Body"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The optional description of the archive you are uploading. + ArchiveDescription *string `location:"header" locationName:"x-amz-archive-description" type:"string"` + + // The data to upload. + Body io.ReadSeeker `locationName:"body" type:"blob"` + + // The SHA256 tree hash of the data being uploaded. + Checksum *string `location:"header" locationName:"x-amz-sha256-tree-hash" type:"string"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadArchiveInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadArchiveInput) GoString() string { + return s.String() +} + +// A list of in-progress multipart uploads for a vault. +type UploadListElement struct { + _ struct{} `type:"structure"` + + // The description of the archive that was specified in the Initiate Multipart + // Upload request. + ArchiveDescription *string `type:"string"` + + // The UTC time at which the multipart upload was initiated. + CreationDate *string `type:"string"` + + // The ID of a multipart upload. + MultipartUploadId *string `type:"string"` + + // The part size, in bytes, specified in the Initiate Multipart Upload request. + // This is the size of all the parts in the upload except the last part, which + // may be smaller than this size. + PartSizeInBytes *int64 `type:"long"` + + // The Amazon Resource Name (ARN) of the vault that contains the archive. + VaultARN *string `type:"string"` +} + +// String returns the string representation +func (s UploadListElement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadListElement) GoString() string { + return s.String() +} + +// Provides options to upload a part of an archive in a multipart upload operation. +type UploadMultipartPartInput struct { + _ struct{} `type:"structure" payload:"Body"` + + // The AccountId value is the AWS account ID of the account that owns the vault. + // You can either specify an AWS account ID or optionally a single apos-apos + // (hyphen), in which case Amazon Glacier uses the AWS account ID associated + // with the credentials used to sign the request. If you use an account ID, + // do not include any hyphens (apos-apos) in the ID. + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The data to upload. + Body io.ReadSeeker `locationName:"body" type:"blob"` + + // The SHA256 tree hash of the data being uploaded. + Checksum *string `location:"header" locationName:"x-amz-sha256-tree-hash" type:"string"` + + // Identifies the range of bytes in the assembled archive that will be uploaded + // in this part. Amazon Glacier uses this information to assemble the archive + // in the proper sequence. The format of this header follows RFC 2616. An example + // header is Content-Range:bytes 0-4194303/*. + Range *string `location:"header" locationName:"Content-Range" type:"string"` + + // The upload ID of the multipart upload. + UploadId *string `location:"uri" locationName:"uploadId" type:"string" required:"true"` + + // The name of the vault. + VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadMultipartPartInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadMultipartPartInput) GoString() string { + return s.String() +} + +// Contains the Amazon Glacier response to your request. +type UploadMultipartPartOutput struct { + _ struct{} `type:"structure"` + + // The SHA256 tree hash that Amazon Glacier computed for the uploaded part. + Checksum *string `location:"header" locationName:"x-amz-sha256-tree-hash" type:"string"` +} + +// String returns the string representation +func (s UploadMultipartPartOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadMultipartPartOutput) GoString() string { + return s.String() +} + +// Contains the vault access policy. +type VaultAccessPolicy struct { + _ struct{} `type:"structure"` + + // The vault access policy. + Policy *string `type:"string"` +} + +// String returns the string representation +func (s VaultAccessPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VaultAccessPolicy) GoString() string { + return s.String() +} + +// Contains the vault lock policy. +type VaultLockPolicy struct { + _ struct{} `type:"structure"` + + // The vault lock policy. + Policy *string `type:"string"` +} + +// String returns the string representation +func (s VaultLockPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VaultLockPolicy) GoString() string { + return s.String() +} + +// Represents a vault's notification configuration. +type VaultNotificationConfig struct { + _ struct{} `type:"structure"` + + // A list of one or more events for which Amazon Glacier will send a notification + // to the specified Amazon SNS topic. + Events []*string `type:"list"` + + // The Amazon Simple Notification Service (Amazon SNS) topic Amazon Resource + // Name (ARN). + SNSTopic *string `type:"string"` +} + +// String returns the string representation +func (s VaultNotificationConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VaultNotificationConfig) GoString() string { + return s.String() +} + +const ( + // @enum ActionCode + ActionCodeArchiveRetrieval = "ArchiveRetrieval" + // @enum ActionCode + ActionCodeInventoryRetrieval = "InventoryRetrieval" +) + +const ( + // @enum StatusCode + StatusCodeInProgress = "InProgress" + // @enum StatusCode + StatusCodeSucceeded = "Succeeded" + // @enum StatusCode + StatusCodeFailed = "Failed" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/customizations.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/customizations.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/customizations.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/customizations.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,58 @@ +package glacier + +import ( + "encoding/hex" + "reflect" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var ( + defaultAccountID = "-" +) + +func init() { + initRequest = func(r *request.Request) { + r.Handlers.Validate.PushFront(addAccountID) + r.Handlers.Validate.PushFront(copyParams) // this happens first + r.Handlers.Build.PushBack(addChecksum) + r.Handlers.Build.PushBack(addAPIVersion) + } +} + +func copyParams(r *request.Request) { + r.Params = awsutil.CopyOf(r.Params) +} + +func addAccountID(r *request.Request) { + if !r.ParamsFilled() { + return + } + + v := reflect.Indirect(reflect.ValueOf(r.Params)) + if f := v.FieldByName("AccountId"); f.IsNil() { + f.Set(reflect.ValueOf(&defaultAccountID)) + } +} + +func addChecksum(r *request.Request) { + if r.Body == nil { + return + } + + h := ComputeHashes(r.Body) + + if r.HTTPRequest.Header.Get("X-Amz-Content-Sha256") == "" { + hstr := hex.EncodeToString(h.LinearHash) + r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", hstr) + } + if r.HTTPRequest.Header.Get("X-Amz-Sha256-Tree-Hash") == "" { + hstr := hex.EncodeToString(h.TreeHash) + r.HTTPRequest.Header.Set("X-Amz-Sha256-Tree-Hash", hstr) + } +} + +func addAPIVersion(r *request.Request) { + r.HTTPRequest.Header.Set("X-Amz-Glacier-Version", r.ClientInfo.APIVersion) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/customizations_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/customizations_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/customizations_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/customizations_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,77 @@ +// +build !integration + +package glacier_test + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/glacier" +) + +var ( + payloadBuf = func() *bytes.Reader { + buf := make([]byte, 5767168) // 5.5MB buffer + for i := range buf { + buf[i] = '0' // Fill with zero characters + } + return bytes.NewReader(buf) + }() + + svc = glacier.New(unit.Session) +) + +func TestCustomizations(t *testing.T) { + req, _ := svc.UploadArchiveRequest(&glacier.UploadArchiveInput{ + VaultName: aws.String("vault"), + Body: payloadBuf, + }) + err := req.Build() + assert.NoError(t, err) + + // Sets API version + assert.Equal(t, req.ClientInfo.APIVersion, req.HTTPRequest.Header.Get("x-amz-glacier-version")) + + // Sets Account ID + v, _ := awsutil.ValuesAtPath(req.Params, "AccountId") + assert.Equal(t, "-", *(v[0].(*string))) + + // Computes checksums + linear := "68aff0c5a91aa0491752bfb96e3fef33eb74953804f6a2f7b708d5bcefa8ff6b" + tree := "154e26c78fd74d0c2c9b3cc4644191619dc4f2cd539ae2a74d5fd07957a3ee6a" + assert.Equal(t, linear, req.HTTPRequest.Header.Get("x-amz-content-sha256")) + assert.Equal(t, tree, req.HTTPRequest.Header.Get("x-amz-sha256-tree-hash")) +} + +func TestShortcircuitTreehash(t *testing.T) { + req, _ := svc.UploadArchiveRequest(&glacier.UploadArchiveInput{ + VaultName: aws.String("vault"), + Body: payloadBuf, + Checksum: aws.String("000"), + }) + err := req.Build() + assert.NoError(t, err) + + assert.Equal(t, "000", req.HTTPRequest.Header.Get("x-amz-sha256-tree-hash")) +} + +func TestFillAccountIDWithNilStruct(t *testing.T) { + req, _ := svc.ListVaultsRequest(nil) + err := req.Build() + assert.NoError(t, err) + + empty := "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + + // Sets Account ID + v, _ := awsutil.ValuesAtPath(req.Params, "AccountId") + assert.Equal(t, "-", *(v[0].(*string))) + + // Does not set tree hash + assert.Equal(t, empty, req.HTTPRequest.Header.Get("x-amz-content-sha256")) + assert.Equal(t, "", req.HTTPRequest.Header.Get("x-amz-sha256-tree-hash")) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,706 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package glacier_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/glacier" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleGlacier_AbortMultipartUpload() { + svc := glacier.New(session.New()) + + params := &glacier.AbortMultipartUploadInput{ + AccountId: aws.String("string"), // Required + UploadId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.AbortMultipartUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_AbortVaultLock() { + svc := glacier.New(session.New()) + + params := &glacier.AbortVaultLockInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.AbortVaultLock(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_AddTagsToVault() { + svc := glacier.New(session.New()) + + params := &glacier.AddTagsToVaultInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + Tags: map[string]*string{ + "Key": aws.String("TagValue"), // Required + // More values... + }, + } + resp, err := svc.AddTagsToVault(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_CompleteMultipartUpload() { + svc := glacier.New(session.New()) + + params := &glacier.CompleteMultipartUploadInput{ + AccountId: aws.String("string"), // Required + UploadId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + ArchiveSize: aws.String("string"), + Checksum: aws.String("string"), + } + resp, err := svc.CompleteMultipartUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_CompleteVaultLock() { + svc := glacier.New(session.New()) + + params := &glacier.CompleteVaultLockInput{ + AccountId: aws.String("string"), // Required + LockId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.CompleteVaultLock(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_CreateVault() { + svc := glacier.New(session.New()) + + params := &glacier.CreateVaultInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.CreateVault(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_DeleteArchive() { + svc := glacier.New(session.New()) + + params := &glacier.DeleteArchiveInput{ + AccountId: aws.String("string"), // Required + ArchiveId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.DeleteArchive(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_DeleteVault() { + svc := glacier.New(session.New()) + + params := &glacier.DeleteVaultInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.DeleteVault(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_DeleteVaultAccessPolicy() { + svc := glacier.New(session.New()) + + params := &glacier.DeleteVaultAccessPolicyInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.DeleteVaultAccessPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_DeleteVaultNotifications() { + svc := glacier.New(session.New()) + + params := &glacier.DeleteVaultNotificationsInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.DeleteVaultNotifications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_DescribeJob() { + svc := glacier.New(session.New()) + + params := &glacier.DescribeJobInput{ + AccountId: aws.String("string"), // Required + JobId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.DescribeJob(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_DescribeVault() { + svc := glacier.New(session.New()) + + params := &glacier.DescribeVaultInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.DescribeVault(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_GetDataRetrievalPolicy() { + svc := glacier.New(session.New()) + + params := &glacier.GetDataRetrievalPolicyInput{ + AccountId: aws.String("string"), // Required + } + resp, err := svc.GetDataRetrievalPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_GetJobOutput() { + svc := glacier.New(session.New()) + + params := &glacier.GetJobOutputInput{ + AccountId: aws.String("string"), // Required + JobId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + Range: aws.String("string"), + } + resp, err := svc.GetJobOutput(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_GetVaultAccessPolicy() { + svc := glacier.New(session.New()) + + params := &glacier.GetVaultAccessPolicyInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.GetVaultAccessPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_GetVaultLock() { + svc := glacier.New(session.New()) + + params := &glacier.GetVaultLockInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.GetVaultLock(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_GetVaultNotifications() { + svc := glacier.New(session.New()) + + params := &glacier.GetVaultNotificationsInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.GetVaultNotifications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_InitiateJob() { + svc := glacier.New(session.New()) + + params := &glacier.InitiateJobInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + JobParameters: &glacier.JobParameters{ + ArchiveId: aws.String("string"), + Description: aws.String("string"), + Format: aws.String("string"), + InventoryRetrievalParameters: &glacier.InventoryRetrievalJobInput{ + EndDate: aws.String("string"), + Limit: aws.String("string"), + Marker: aws.String("string"), + StartDate: aws.String("string"), + }, + RetrievalByteRange: aws.String("string"), + SNSTopic: aws.String("string"), + Type: aws.String("string"), + }, + } + resp, err := svc.InitiateJob(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_InitiateMultipartUpload() { + svc := glacier.New(session.New()) + + params := &glacier.InitiateMultipartUploadInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + ArchiveDescription: aws.String("string"), + PartSize: aws.String("string"), + } + resp, err := svc.InitiateMultipartUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_InitiateVaultLock() { + svc := glacier.New(session.New()) + + params := &glacier.InitiateVaultLockInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + Policy: &glacier.VaultLockPolicy{ + Policy: aws.String("string"), + }, + } + resp, err := svc.InitiateVaultLock(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_ListJobs() { + svc := glacier.New(session.New()) + + params := &glacier.ListJobsInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + Completed: aws.String("string"), + Limit: aws.String("string"), + Marker: aws.String("string"), + Statuscode: aws.String("string"), + } + resp, err := svc.ListJobs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_ListMultipartUploads() { + svc := glacier.New(session.New()) + + params := &glacier.ListMultipartUploadsInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + Limit: aws.String("string"), + Marker: aws.String("string"), + } + resp, err := svc.ListMultipartUploads(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_ListParts() { + svc := glacier.New(session.New()) + + params := &glacier.ListPartsInput{ + AccountId: aws.String("string"), // Required + UploadId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + Limit: aws.String("string"), + Marker: aws.String("string"), + } + resp, err := svc.ListParts(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_ListTagsForVault() { + svc := glacier.New(session.New()) + + params := &glacier.ListTagsForVaultInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + } + resp, err := svc.ListTagsForVault(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_ListVaults() { + svc := glacier.New(session.New()) + + params := &glacier.ListVaultsInput{ + AccountId: aws.String("string"), // Required + Limit: aws.String("string"), + Marker: aws.String("string"), + } + resp, err := svc.ListVaults(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_RemoveTagsFromVault() { + svc := glacier.New(session.New()) + + params := &glacier.RemoveTagsFromVaultInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + TagKeys: []*string{ + aws.String("string"), // Required + // More values... + }, + } + resp, err := svc.RemoveTagsFromVault(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_SetDataRetrievalPolicy() { + svc := glacier.New(session.New()) + + params := &glacier.SetDataRetrievalPolicyInput{ + AccountId: aws.String("string"), // Required + Policy: &glacier.DataRetrievalPolicy{ + Rules: []*glacier.DataRetrievalRule{ + { // Required + BytesPerHour: aws.Int64(1), + Strategy: aws.String("string"), + }, + // More values... + }, + }, + } + resp, err := svc.SetDataRetrievalPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_SetVaultAccessPolicy() { + svc := glacier.New(session.New()) + + params := &glacier.SetVaultAccessPolicyInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + Policy: &glacier.VaultAccessPolicy{ + Policy: aws.String("string"), + }, + } + resp, err := svc.SetVaultAccessPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_SetVaultNotifications() { + svc := glacier.New(session.New()) + + params := &glacier.SetVaultNotificationsInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + VaultNotificationConfig: &glacier.VaultNotificationConfig{ + Events: []*string{ + aws.String("string"), // Required + // More values... + }, + SNSTopic: aws.String("string"), + }, + } + resp, err := svc.SetVaultNotifications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_UploadArchive() { + svc := glacier.New(session.New()) + + params := &glacier.UploadArchiveInput{ + AccountId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + ArchiveDescription: aws.String("string"), + Body: bytes.NewReader([]byte("PAYLOAD")), + Checksum: aws.String("string"), + } + resp, err := svc.UploadArchive(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleGlacier_UploadMultipartPart() { + svc := glacier.New(session.New()) + + params := &glacier.UploadMultipartPartInput{ + AccountId: aws.String("string"), // Required + UploadId: aws.String("string"), // Required + VaultName: aws.String("string"), // Required + Body: bytes.NewReader([]byte("PAYLOAD")), + Checksum: aws.String("string"), + Range: aws.String("string"), + } + resp, err := svc.UploadMultipartPart(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/glacieriface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/glacieriface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/glacieriface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/glacieriface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,146 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package glacieriface provides an interface for the Amazon Glacier. +package glacieriface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/glacier" +) + +// GlacierAPI is the interface type for glacier.Glacier. +type GlacierAPI interface { + AbortMultipartUploadRequest(*glacier.AbortMultipartUploadInput) (*request.Request, *glacier.AbortMultipartUploadOutput) + + AbortMultipartUpload(*glacier.AbortMultipartUploadInput) (*glacier.AbortMultipartUploadOutput, error) + + AbortVaultLockRequest(*glacier.AbortVaultLockInput) (*request.Request, *glacier.AbortVaultLockOutput) + + AbortVaultLock(*glacier.AbortVaultLockInput) (*glacier.AbortVaultLockOutput, error) + + AddTagsToVaultRequest(*glacier.AddTagsToVaultInput) (*request.Request, *glacier.AddTagsToVaultOutput) + + AddTagsToVault(*glacier.AddTagsToVaultInput) (*glacier.AddTagsToVaultOutput, error) + + CompleteMultipartUploadRequest(*glacier.CompleteMultipartUploadInput) (*request.Request, *glacier.ArchiveCreationOutput) + + CompleteMultipartUpload(*glacier.CompleteMultipartUploadInput) (*glacier.ArchiveCreationOutput, error) + + CompleteVaultLockRequest(*glacier.CompleteVaultLockInput) (*request.Request, *glacier.CompleteVaultLockOutput) + + CompleteVaultLock(*glacier.CompleteVaultLockInput) (*glacier.CompleteVaultLockOutput, error) + + CreateVaultRequest(*glacier.CreateVaultInput) (*request.Request, *glacier.CreateVaultOutput) + + CreateVault(*glacier.CreateVaultInput) (*glacier.CreateVaultOutput, error) + + DeleteArchiveRequest(*glacier.DeleteArchiveInput) (*request.Request, *glacier.DeleteArchiveOutput) + + DeleteArchive(*glacier.DeleteArchiveInput) (*glacier.DeleteArchiveOutput, error) + + DeleteVaultRequest(*glacier.DeleteVaultInput) (*request.Request, *glacier.DeleteVaultOutput) + + DeleteVault(*glacier.DeleteVaultInput) (*glacier.DeleteVaultOutput, error) + + DeleteVaultAccessPolicyRequest(*glacier.DeleteVaultAccessPolicyInput) (*request.Request, *glacier.DeleteVaultAccessPolicyOutput) + + DeleteVaultAccessPolicy(*glacier.DeleteVaultAccessPolicyInput) (*glacier.DeleteVaultAccessPolicyOutput, error) + + DeleteVaultNotificationsRequest(*glacier.DeleteVaultNotificationsInput) (*request.Request, *glacier.DeleteVaultNotificationsOutput) + + DeleteVaultNotifications(*glacier.DeleteVaultNotificationsInput) (*glacier.DeleteVaultNotificationsOutput, error) + + DescribeJobRequest(*glacier.DescribeJobInput) (*request.Request, *glacier.JobDescription) + + DescribeJob(*glacier.DescribeJobInput) (*glacier.JobDescription, error) + + DescribeVaultRequest(*glacier.DescribeVaultInput) (*request.Request, *glacier.DescribeVaultOutput) + + DescribeVault(*glacier.DescribeVaultInput) (*glacier.DescribeVaultOutput, error) + + GetDataRetrievalPolicyRequest(*glacier.GetDataRetrievalPolicyInput) (*request.Request, *glacier.GetDataRetrievalPolicyOutput) + + GetDataRetrievalPolicy(*glacier.GetDataRetrievalPolicyInput) (*glacier.GetDataRetrievalPolicyOutput, error) + + GetJobOutputRequest(*glacier.GetJobOutputInput) (*request.Request, *glacier.GetJobOutputOutput) + + GetJobOutput(*glacier.GetJobOutputInput) (*glacier.GetJobOutputOutput, error) + + GetVaultAccessPolicyRequest(*glacier.GetVaultAccessPolicyInput) (*request.Request, *glacier.GetVaultAccessPolicyOutput) + + GetVaultAccessPolicy(*glacier.GetVaultAccessPolicyInput) (*glacier.GetVaultAccessPolicyOutput, error) + + GetVaultLockRequest(*glacier.GetVaultLockInput) (*request.Request, *glacier.GetVaultLockOutput) + + GetVaultLock(*glacier.GetVaultLockInput) (*glacier.GetVaultLockOutput, error) + + GetVaultNotificationsRequest(*glacier.GetVaultNotificationsInput) (*request.Request, *glacier.GetVaultNotificationsOutput) + + GetVaultNotifications(*glacier.GetVaultNotificationsInput) (*glacier.GetVaultNotificationsOutput, error) + + InitiateJobRequest(*glacier.InitiateJobInput) (*request.Request, *glacier.InitiateJobOutput) + + InitiateJob(*glacier.InitiateJobInput) (*glacier.InitiateJobOutput, error) + + InitiateMultipartUploadRequest(*glacier.InitiateMultipartUploadInput) (*request.Request, *glacier.InitiateMultipartUploadOutput) + + InitiateMultipartUpload(*glacier.InitiateMultipartUploadInput) (*glacier.InitiateMultipartUploadOutput, error) + + InitiateVaultLockRequest(*glacier.InitiateVaultLockInput) (*request.Request, *glacier.InitiateVaultLockOutput) + + InitiateVaultLock(*glacier.InitiateVaultLockInput) (*glacier.InitiateVaultLockOutput, error) + + ListJobsRequest(*glacier.ListJobsInput) (*request.Request, *glacier.ListJobsOutput) + + ListJobs(*glacier.ListJobsInput) (*glacier.ListJobsOutput, error) + + ListJobsPages(*glacier.ListJobsInput, func(*glacier.ListJobsOutput, bool) bool) error + + ListMultipartUploadsRequest(*glacier.ListMultipartUploadsInput) (*request.Request, *glacier.ListMultipartUploadsOutput) + + ListMultipartUploads(*glacier.ListMultipartUploadsInput) (*glacier.ListMultipartUploadsOutput, error) + + ListMultipartUploadsPages(*glacier.ListMultipartUploadsInput, func(*glacier.ListMultipartUploadsOutput, bool) bool) error + + ListPartsRequest(*glacier.ListPartsInput) (*request.Request, *glacier.ListPartsOutput) + + ListParts(*glacier.ListPartsInput) (*glacier.ListPartsOutput, error) + + ListPartsPages(*glacier.ListPartsInput, func(*glacier.ListPartsOutput, bool) bool) error + + ListTagsForVaultRequest(*glacier.ListTagsForVaultInput) (*request.Request, *glacier.ListTagsForVaultOutput) + + ListTagsForVault(*glacier.ListTagsForVaultInput) (*glacier.ListTagsForVaultOutput, error) + + ListVaultsRequest(*glacier.ListVaultsInput) (*request.Request, *glacier.ListVaultsOutput) + + ListVaults(*glacier.ListVaultsInput) (*glacier.ListVaultsOutput, error) + + ListVaultsPages(*glacier.ListVaultsInput, func(*glacier.ListVaultsOutput, bool) bool) error + + RemoveTagsFromVaultRequest(*glacier.RemoveTagsFromVaultInput) (*request.Request, *glacier.RemoveTagsFromVaultOutput) + + RemoveTagsFromVault(*glacier.RemoveTagsFromVaultInput) (*glacier.RemoveTagsFromVaultOutput, error) + + SetDataRetrievalPolicyRequest(*glacier.SetDataRetrievalPolicyInput) (*request.Request, *glacier.SetDataRetrievalPolicyOutput) + + SetDataRetrievalPolicy(*glacier.SetDataRetrievalPolicyInput) (*glacier.SetDataRetrievalPolicyOutput, error) + + SetVaultAccessPolicyRequest(*glacier.SetVaultAccessPolicyInput) (*request.Request, *glacier.SetVaultAccessPolicyOutput) + + SetVaultAccessPolicy(*glacier.SetVaultAccessPolicyInput) (*glacier.SetVaultAccessPolicyOutput, error) + + SetVaultNotificationsRequest(*glacier.SetVaultNotificationsInput) (*request.Request, *glacier.SetVaultNotificationsOutput) + + SetVaultNotifications(*glacier.SetVaultNotificationsInput) (*glacier.SetVaultNotificationsOutput, error) + + UploadArchiveRequest(*glacier.UploadArchiveInput) (*request.Request, *glacier.ArchiveCreationOutput) + + UploadArchive(*glacier.UploadArchiveInput) (*glacier.ArchiveCreationOutput, error) + + UploadMultipartPartRequest(*glacier.UploadMultipartPartInput) (*request.Request, *glacier.UploadMultipartPartOutput) + + UploadMultipartPart(*glacier.UploadMultipartPartInput) (*glacier.UploadMultipartPartOutput, error) +} + +var _ GlacierAPI = (*glacier.Glacier)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,116 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package glacier + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restjson" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Amazon Glacier is a storage solution for "cold data." +// +// Amazon Glacier is an extremely low-cost storage service that provides secure, +// durable, and easy-to-use storage for data backup and archival. With Amazon +// Glacier, customers can store their data cost effectively for months, years, +// or decades. Amazon Glacier also enables customers to offload the administrative +// burdens of operating and scaling storage to AWS, so they don't have to worry +// about capacity planning, hardware provisioning, data replication, hardware +// failure and recovery, or time-consuming hardware migrations. +// +// Amazon Glacier is a great storage choice when low storage cost is paramount, +// your data is rarely retrieved, and retrieval latency of several hours is +// acceptable. If your application requires fast or frequent access to your +// data, consider using Amazon S3. For more information, go to Amazon Simple +// Storage Service (Amazon S3) (http://aws.amazon.com/s3/). +// +// You can store any kind of data in any format. There is no maximum limit +// on the total amount of data you can store in Amazon Glacier. +// +// If you are a first-time user of Amazon Glacier, we recommend that you begin +// by reading the following sections in the Amazon Glacier Developer Guide: +// +// What is Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/introduction.html) +// - This section of the Developer Guide describes the underlying data model, +// the operations it supports, and the AWS SDKs that you can use to interact +// with the service. +// +// Getting Started with Amazon Glacier (http://docs.aws.amazon.com/amazonglacier/latest/dev/amazon-glacier-getting-started.html) +// - The Getting Started section walks you through the process of creating a +// vault, uploading archives, creating jobs to download archives, retrieving +// the job output, and deleting archives. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type Glacier struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "glacier" + +// New creates a new instance of the Glacier client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Glacier client from just a session. +// svc := glacier.New(mySession) +// +// // Create a Glacier client with additional configuration +// svc := glacier.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Glacier { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *Glacier { + svc := &Glacier{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2012-06-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Glacier operation and runs any +// custom request initialization. +func (c *Glacier) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/treehash.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/treehash.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/treehash.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/treehash.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,71 @@ +package glacier + +import ( + "crypto/sha256" + "io" +) + +const bufsize = 1024 * 1024 + +// Hash contains information about the tree-hash and linear hash of a +// Glacier payload. This structure is generated by ComputeHashes(). +type Hash struct { + TreeHash []byte + LinearHash []byte +} + +// ComputeHashes computes the tree-hash and linear hash of a seekable reader r. +func ComputeHashes(r io.ReadSeeker) Hash { + r.Seek(0, 0) // Read the whole stream + defer r.Seek(0, 0) // Rewind stream at end + + buf := make([]byte, bufsize) + hashes := [][]byte{} + hsh := sha256.New() + + for { + // Build leaf nodes in 1MB chunks + n, err := io.ReadAtLeast(r, buf, bufsize) + if n == 0 { + break + } + + tmpHash := sha256.Sum256(buf[:n]) + hashes = append(hashes, tmpHash[:]) + hsh.Write(buf[:n]) // Track linear hash while we're at it + + if err != nil { + break // This is the last chunk + } + } + + return Hash{ + LinearHash: hsh.Sum(nil), + TreeHash: buildHashTree(hashes), + } +} + +// buildHashTree builds a hash tree root node given a set of hashes. +func buildHashTree(hashes [][]byte) []byte { + if hashes == nil || len(hashes) == 0 { + return nil + } + + for len(hashes) > 1 { + tmpHashes := [][]byte{} + + for i := 0; i < len(hashes); i += 2 { + if i+1 <= len(hashes)-1 { + tmpHash := append(append([]byte{}, hashes[i]...), hashes[i+1]...) + tmpSum := sha256.Sum256(tmpHash) + tmpHashes = append(tmpHashes, tmpSum[:]) + } else { + tmpHashes = append(tmpHashes, hashes[i]) + } + } + + hashes = tmpHashes + } + + return hashes[0] +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/treehash_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/treehash_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/treehash_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/treehash_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,28 @@ +package glacier_test + +import ( + "bytes" + "fmt" + + "github.com/aws/aws-sdk-go/service/glacier" +) + +func ExampleComputeHashes() { + buf := make([]byte, 5767168) // 5.5MB buffer + for i := range buf { + buf[i] = '0' // Fill with zero characters + } + + r := bytes.NewReader(buf) + h := glacier.ComputeHashes(r) + n, _ := r.Seek(0, 1) // Check position after checksumming + + fmt.Printf("linear: %x\n", h.LinearHash) + fmt.Printf("tree: %x\n", h.TreeHash) + fmt.Printf("pos: %d\n", n) + + // Output: + // linear: 68aff0c5a91aa0491752bfb96e3fef33eb74953804f6a2f7b708d5bcefa8ff6b + // tree: 154e26c78fd74d0c2c9b3cc4644191619dc4f2cd539ae2a74d5fd07957a3ee6a + // pos: 0 +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/waiters.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/waiters.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/waiters.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/glacier/waiters.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,65 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package glacier + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *Glacier) WaitUntilVaultExists(input *DescribeVaultInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVault", + Delay: 3, + MaxAttempts: 15, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "ResourceNotFoundException", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *Glacier) WaitUntilVaultNotExists(input *DescribeVaultInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVault", + Delay: 3, + MaxAttempts: 15, + Acceptors: []waiter.WaitAcceptor{ + { + State: "retry", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "ResourceNotFoundException", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iam/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iam/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iam/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iam/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,11270 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package iam provides a client for AWS Identity and Access Management. +package iam + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opAddClientIDToOpenIDConnectProvider = "AddClientIDToOpenIDConnectProvider" + +// AddClientIDToOpenIDConnectProviderRequest generates a request for the AddClientIDToOpenIDConnectProvider operation. +func (c *IAM) AddClientIDToOpenIDConnectProviderRequest(input *AddClientIDToOpenIDConnectProviderInput) (req *request.Request, output *AddClientIDToOpenIDConnectProviderOutput) { + op := &request.Operation{ + Name: opAddClientIDToOpenIDConnectProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddClientIDToOpenIDConnectProviderInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AddClientIDToOpenIDConnectProviderOutput{} + req.Data = output + return +} + +// Adds a new client ID (also known as audience) to the list of client IDs already +// registered for the specified IAM OpenID Connect provider. +// +// This action is idempotent; it does not fail or return an error if you add +// an existing client ID to the provider. +func (c *IAM) AddClientIDToOpenIDConnectProvider(input *AddClientIDToOpenIDConnectProviderInput) (*AddClientIDToOpenIDConnectProviderOutput, error) { + req, out := c.AddClientIDToOpenIDConnectProviderRequest(input) + err := req.Send() + return out, err +} + +const opAddRoleToInstanceProfile = "AddRoleToInstanceProfile" + +// AddRoleToInstanceProfileRequest generates a request for the AddRoleToInstanceProfile operation. +func (c *IAM) AddRoleToInstanceProfileRequest(input *AddRoleToInstanceProfileInput) (req *request.Request, output *AddRoleToInstanceProfileOutput) { + op := &request.Operation{ + Name: opAddRoleToInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddRoleToInstanceProfileInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AddRoleToInstanceProfileOutput{} + req.Data = output + return +} + +// Adds the specified role to the specified instance profile. For more information +// about roles, go to Working with Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// For more information about instance profiles, go to About Instance Profiles +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +func (c *IAM) AddRoleToInstanceProfile(input *AddRoleToInstanceProfileInput) (*AddRoleToInstanceProfileOutput, error) { + req, out := c.AddRoleToInstanceProfileRequest(input) + err := req.Send() + return out, err +} + +const opAddUserToGroup = "AddUserToGroup" + +// AddUserToGroupRequest generates a request for the AddUserToGroup operation. +func (c *IAM) AddUserToGroupRequest(input *AddUserToGroupInput) (req *request.Request, output *AddUserToGroupOutput) { + op := &request.Operation{ + Name: opAddUserToGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddUserToGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AddUserToGroupOutput{} + req.Data = output + return +} + +// Adds the specified user to the specified group. +func (c *IAM) AddUserToGroup(input *AddUserToGroupInput) (*AddUserToGroupOutput, error) { + req, out := c.AddUserToGroupRequest(input) + err := req.Send() + return out, err +} + +const opAttachGroupPolicy = "AttachGroupPolicy" + +// AttachGroupPolicyRequest generates a request for the AttachGroupPolicy operation. +func (c *IAM) AttachGroupPolicyRequest(input *AttachGroupPolicyInput) (req *request.Request, output *AttachGroupPolicyOutput) { + op := &request.Operation{ + Name: opAttachGroupPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachGroupPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AttachGroupPolicyOutput{} + req.Data = output + return +} + +// Attaches the specified managed policy to the specified group. +// +// You use this API to attach a managed policy to a group. To embed an inline +// policy in a group, use PutGroupPolicy. +// +// For more information about policies, refer to Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) AttachGroupPolicy(input *AttachGroupPolicyInput) (*AttachGroupPolicyOutput, error) { + req, out := c.AttachGroupPolicyRequest(input) + err := req.Send() + return out, err +} + +const opAttachRolePolicy = "AttachRolePolicy" + +// AttachRolePolicyRequest generates a request for the AttachRolePolicy operation. +func (c *IAM) AttachRolePolicyRequest(input *AttachRolePolicyInput) (req *request.Request, output *AttachRolePolicyOutput) { + op := &request.Operation{ + Name: opAttachRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachRolePolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AttachRolePolicyOutput{} + req.Data = output + return +} + +// Attaches the specified managed policy to the specified role. +// +// When you attach a managed policy to a role, the managed policy is used as +// the role's access (permissions) policy. You cannot use a managed policy as +// the role's trust policy. The role's trust policy is created at the same time +// as the role, using CreateRole. You can update a role's trust policy using +// UpdateAssumeRolePolicy. +// +// Use this API to attach a managed policy to a role. To embed an inline policy +// in a role, use PutRolePolicy. For more information about policies, refer +// to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) AttachRolePolicy(input *AttachRolePolicyInput) (*AttachRolePolicyOutput, error) { + req, out := c.AttachRolePolicyRequest(input) + err := req.Send() + return out, err +} + +const opAttachUserPolicy = "AttachUserPolicy" + +// AttachUserPolicyRequest generates a request for the AttachUserPolicy operation. +func (c *IAM) AttachUserPolicyRequest(input *AttachUserPolicyInput) (req *request.Request, output *AttachUserPolicyOutput) { + op := &request.Operation{ + Name: opAttachUserPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachUserPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AttachUserPolicyOutput{} + req.Data = output + return +} + +// Attaches the specified managed policy to the specified user. +// +// You use this API to attach a managed policy to a user. To embed an inline +// policy in a user, use PutUserPolicy. +// +// For more information about policies, refer to Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) AttachUserPolicy(input *AttachUserPolicyInput) (*AttachUserPolicyOutput, error) { + req, out := c.AttachUserPolicyRequest(input) + err := req.Send() + return out, err +} + +const opChangePassword = "ChangePassword" + +// ChangePasswordRequest generates a request for the ChangePassword operation. +func (c *IAM) ChangePasswordRequest(input *ChangePasswordInput) (req *request.Request, output *ChangePasswordOutput) { + op := &request.Operation{ + Name: opChangePassword, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ChangePasswordInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ChangePasswordOutput{} + req.Data = output + return +} + +// Changes the password of the IAM user who is calling this action. The root +// account password is not affected by this action. +// +// To change the password for a different user, see UpdateLoginProfile. For +// more information about modifying passwords, see Managing Passwords (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingLogins.html) +// in the IAM User Guide. +func (c *IAM) ChangePassword(input *ChangePasswordInput) (*ChangePasswordOutput, error) { + req, out := c.ChangePasswordRequest(input) + err := req.Send() + return out, err +} + +const opCreateAccessKey = "CreateAccessKey" + +// CreateAccessKeyRequest generates a request for the CreateAccessKey operation. +func (c *IAM) CreateAccessKeyRequest(input *CreateAccessKeyInput) (req *request.Request, output *CreateAccessKeyOutput) { + op := &request.Operation{ + Name: opCreateAccessKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAccessKeyInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateAccessKeyOutput{} + req.Data = output + return +} + +// Creates a new AWS secret access key and corresponding AWS access key ID for +// the specified user. The default status for new keys is Active. +// +// If you do not specify a user name, IAM determines the user name implicitly +// based on the AWS access key ID signing the request. Because this action works +// for access keys under the AWS account, you can use this action to manage +// root credentials even if the AWS account has no associated users. +// +// For information about limits on the number of keys you can create, see +// Limitations on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// To ensure the security of your AWS account, the secret access key is accessible +// only during key and user creation. You must save the key (for example, in +// a text file) if you want to be able to access it again. If a secret key is +// lost, you can delete the access keys for the associated user and then create +// new keys. +func (c *IAM) CreateAccessKey(input *CreateAccessKeyInput) (*CreateAccessKeyOutput, error) { + req, out := c.CreateAccessKeyRequest(input) + err := req.Send() + return out, err +} + +const opCreateAccountAlias = "CreateAccountAlias" + +// CreateAccountAliasRequest generates a request for the CreateAccountAlias operation. +func (c *IAM) CreateAccountAliasRequest(input *CreateAccountAliasInput) (req *request.Request, output *CreateAccountAliasOutput) { + op := &request.Operation{ + Name: opCreateAccountAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAccountAliasInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateAccountAliasOutput{} + req.Data = output + return +} + +// Creates an alias for your AWS account. For information about using an AWS +// account alias, see Using an Alias for Your AWS Account ID (http://docs.aws.amazon.com/IAM/latest/UserGuide/AccountAlias.html) +// in the IAM User Guide. +func (c *IAM) CreateAccountAlias(input *CreateAccountAliasInput) (*CreateAccountAliasOutput, error) { + req, out := c.CreateAccountAliasRequest(input) + err := req.Send() + return out, err +} + +const opCreateGroup = "CreateGroup" + +// CreateGroupRequest generates a request for the CreateGroup operation. +func (c *IAM) CreateGroupRequest(input *CreateGroupInput) (req *request.Request, output *CreateGroupOutput) { + op := &request.Operation{ + Name: opCreateGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateGroupOutput{} + req.Data = output + return +} + +// Creates a new group. +// +// For information about the number of groups you can create, see Limitations +// on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +func (c *IAM) CreateGroup(input *CreateGroupInput) (*CreateGroupOutput, error) { + req, out := c.CreateGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateInstanceProfile = "CreateInstanceProfile" + +// CreateInstanceProfileRequest generates a request for the CreateInstanceProfile operation. +func (c *IAM) CreateInstanceProfileRequest(input *CreateInstanceProfileInput) (req *request.Request, output *CreateInstanceProfileOutput) { + op := &request.Operation{ + Name: opCreateInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateInstanceProfileInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateInstanceProfileOutput{} + req.Data = output + return +} + +// Creates a new instance profile. For information about instance profiles, +// go to About Instance Profiles (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +// +// For information about the number of instance profiles you can create, see +// Limitations on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +func (c *IAM) CreateInstanceProfile(input *CreateInstanceProfileInput) (*CreateInstanceProfileOutput, error) { + req, out := c.CreateInstanceProfileRequest(input) + err := req.Send() + return out, err +} + +const opCreateLoginProfile = "CreateLoginProfile" + +// CreateLoginProfileRequest generates a request for the CreateLoginProfile operation. +func (c *IAM) CreateLoginProfileRequest(input *CreateLoginProfileInput) (req *request.Request, output *CreateLoginProfileOutput) { + op := &request.Operation{ + Name: opCreateLoginProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLoginProfileInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateLoginProfileOutput{} + req.Data = output + return +} + +// Creates a password for the specified user, giving the user the ability to +// access AWS services through the AWS Management Console. For more information +// about managing passwords, see Managing Passwords (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingLogins.html) +// in the Using IAM guide. +func (c *IAM) CreateLoginProfile(input *CreateLoginProfileInput) (*CreateLoginProfileOutput, error) { + req, out := c.CreateLoginProfileRequest(input) + err := req.Send() + return out, err +} + +const opCreateOpenIDConnectProvider = "CreateOpenIDConnectProvider" + +// CreateOpenIDConnectProviderRequest generates a request for the CreateOpenIDConnectProvider operation. +func (c *IAM) CreateOpenIDConnectProviderRequest(input *CreateOpenIDConnectProviderInput) (req *request.Request, output *CreateOpenIDConnectProviderOutput) { + op := &request.Operation{ + Name: opCreateOpenIDConnectProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateOpenIDConnectProviderInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateOpenIDConnectProviderOutput{} + req.Data = output + return +} + +// Creates an IAM entity to describe an identity provider (IdP) that supports +// OpenID Connect (OIDC) (http://openid.net/connect/). +// +// The OIDC provider that you create with this operation can be used as a principal +// in a role's trust policy to establish a trust relationship between AWS and +// the OIDC provider. +// +// When you create the IAM OIDC provider, you specify the URL of the OIDC identity +// provider (IdP) to trust, a list of client IDs (also known as audiences) that +// identify the application or applications that are allowed to authenticate +// using the OIDC provider, and a list of thumbprints of the server certificate(s) +// that the IdP uses. You get all of this information from the OIDC IdP that +// you want to use for access to AWS. +// +// Because trust for the OIDC provider is ultimately derived from the IAM provider +// that this action creates, it is a best practice to limit access to the CreateOpenIDConnectProvider +// action to highly-privileged users. +func (c *IAM) CreateOpenIDConnectProvider(input *CreateOpenIDConnectProviderInput) (*CreateOpenIDConnectProviderOutput, error) { + req, out := c.CreateOpenIDConnectProviderRequest(input) + err := req.Send() + return out, err +} + +const opCreatePolicy = "CreatePolicy" + +// CreatePolicyRequest generates a request for the CreatePolicy operation. +func (c *IAM) CreatePolicyRequest(input *CreatePolicyInput) (req *request.Request, output *CreatePolicyOutput) { + op := &request.Operation{ + Name: opCreatePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePolicyOutput{} + req.Data = output + return +} + +// Creates a new managed policy for your AWS account. +// +// This operation creates a policy version with a version identifier of v1 +// and sets v1 as the policy's default version. For more information about policy +// versions, see Versioning for Managed Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) +// in the IAM User Guide. +// +// For more information about managed policies in general, refer to Managed +// Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) CreatePolicy(input *CreatePolicyInput) (*CreatePolicyOutput, error) { + req, out := c.CreatePolicyRequest(input) + err := req.Send() + return out, err +} + +const opCreatePolicyVersion = "CreatePolicyVersion" + +// CreatePolicyVersionRequest generates a request for the CreatePolicyVersion operation. +func (c *IAM) CreatePolicyVersionRequest(input *CreatePolicyVersionInput) (req *request.Request, output *CreatePolicyVersionOutput) { + op := &request.Operation{ + Name: opCreatePolicyVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePolicyVersionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePolicyVersionOutput{} + req.Data = output + return +} + +// Creates a new version of the specified managed policy. To update a managed +// policy, you create a new policy version. A managed policy can have up to +// five versions. If the policy has five versions, you must delete an existing +// version using DeletePolicyVersion before you create a new version. +// +// Optionally, you can set the new version as the policy's default version. +// The default version is the operative version; that is, the version that is +// in effect for the IAM users, groups, and roles that the policy is attached +// to. +// +// For more information about managed policy versions, see Versioning for Managed +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) +// in the IAM User Guide. +func (c *IAM) CreatePolicyVersion(input *CreatePolicyVersionInput) (*CreatePolicyVersionOutput, error) { + req, out := c.CreatePolicyVersionRequest(input) + err := req.Send() + return out, err +} + +const opCreateRole = "CreateRole" + +// CreateRoleRequest generates a request for the CreateRole operation. +func (c *IAM) CreateRoleRequest(input *CreateRoleInput) (req *request.Request, output *CreateRoleOutput) { + op := &request.Operation{ + Name: opCreateRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateRoleInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateRoleOutput{} + req.Data = output + return +} + +// Creates a new role for your AWS account. For more information about roles, +// go to Working with Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// For information about limitations on role names and the number of roles you +// can create, go to Limitations on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +func (c *IAM) CreateRole(input *CreateRoleInput) (*CreateRoleOutput, error) { + req, out := c.CreateRoleRequest(input) + err := req.Send() + return out, err +} + +const opCreateSAMLProvider = "CreateSAMLProvider" + +// CreateSAMLProviderRequest generates a request for the CreateSAMLProvider operation. +func (c *IAM) CreateSAMLProviderRequest(input *CreateSAMLProviderInput) (req *request.Request, output *CreateSAMLProviderOutput) { + op := &request.Operation{ + Name: opCreateSAMLProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSAMLProviderInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSAMLProviderOutput{} + req.Data = output + return +} + +// Creates an IAM entity to describe an identity provider (IdP) that supports +// SAML 2.0. +// +// The SAML provider that you create with this operation can be used as a +// principal in a role's trust policy to establish a trust relationship between +// AWS and a SAML identity provider. You can create an IAM role that supports +// Web-based single sign-on (SSO) to the AWS Management Console or one that +// supports API access to AWS. +// +// When you create the SAML provider, you upload an a SAML metadata document +// that you get from your IdP and that includes the issuer's name, expiration +// information, and keys that can be used to validate the SAML authentication +// response (assertions) that are received from the IdP. You must generate the +// metadata document using the identity management software that is used as +// your organization's IdP. +// +// This operation requires Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// For more information, see Enabling SAML 2.0 Federated Users to Access the +// AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html) +// and About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +func (c *IAM) CreateSAMLProvider(input *CreateSAMLProviderInput) (*CreateSAMLProviderOutput, error) { + req, out := c.CreateSAMLProviderRequest(input) + err := req.Send() + return out, err +} + +const opCreateUser = "CreateUser" + +// CreateUserRequest generates a request for the CreateUser operation. +func (c *IAM) CreateUserRequest(input *CreateUserInput) (req *request.Request, output *CreateUserOutput) { + op := &request.Operation{ + Name: opCreateUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateUserInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateUserOutput{} + req.Data = output + return +} + +// Creates a new user for your AWS account. +// +// For information about limitations on the number of users you can create, +// see Limitations on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +func (c *IAM) CreateUser(input *CreateUserInput) (*CreateUserOutput, error) { + req, out := c.CreateUserRequest(input) + err := req.Send() + return out, err +} + +const opCreateVirtualMFADevice = "CreateVirtualMFADevice" + +// CreateVirtualMFADeviceRequest generates a request for the CreateVirtualMFADevice operation. +func (c *IAM) CreateVirtualMFADeviceRequest(input *CreateVirtualMFADeviceInput) (req *request.Request, output *CreateVirtualMFADeviceOutput) { + op := &request.Operation{ + Name: opCreateVirtualMFADevice, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVirtualMFADeviceInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateVirtualMFADeviceOutput{} + req.Data = output + return +} + +// Creates a new virtual MFA device for the AWS account. After creating the +// virtual MFA, use EnableMFADevice to attach the MFA device to an IAM user. +// For more information about creating and working with virtual MFA devices, +// go to Using a Virtual MFA Device (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_VirtualMFA.html) +// in the Using IAM guide. +// +// For information about limits on the number of MFA devices you can create, +// see Limitations on Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the Using IAM guide. +// +// The seed information contained in the QR code and the Base32 string should +// be treated like any other secret access information, such as your AWS access +// keys or your passwords. After you provision your virtual device, you should +// ensure that the information is destroyed following secure procedures. +func (c *IAM) CreateVirtualMFADevice(input *CreateVirtualMFADeviceInput) (*CreateVirtualMFADeviceOutput, error) { + req, out := c.CreateVirtualMFADeviceRequest(input) + err := req.Send() + return out, err +} + +const opDeactivateMFADevice = "DeactivateMFADevice" + +// DeactivateMFADeviceRequest generates a request for the DeactivateMFADevice operation. +func (c *IAM) DeactivateMFADeviceRequest(input *DeactivateMFADeviceInput) (req *request.Request, output *DeactivateMFADeviceOutput) { + op := &request.Operation{ + Name: opDeactivateMFADevice, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeactivateMFADeviceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeactivateMFADeviceOutput{} + req.Data = output + return +} + +// Deactivates the specified MFA device and removes it from association with +// the user name for which it was originally enabled. +// +// For more information about creating and working with virtual MFA devices, +// go to Using a Virtual MFA Device (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_VirtualMFA.html) +// in the Using IAM guide. +func (c *IAM) DeactivateMFADevice(input *DeactivateMFADeviceInput) (*DeactivateMFADeviceOutput, error) { + req, out := c.DeactivateMFADeviceRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAccessKey = "DeleteAccessKey" + +// DeleteAccessKeyRequest generates a request for the DeleteAccessKey operation. +func (c *IAM) DeleteAccessKeyRequest(input *DeleteAccessKeyInput) (req *request.Request, output *DeleteAccessKeyOutput) { + op := &request.Operation{ + Name: opDeleteAccessKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAccessKeyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteAccessKeyOutput{} + req.Data = output + return +} + +// Deletes the access key associated with the specified user. +// +// If you do not specify a user name, IAM determines the user name implicitly +// based on the AWS access key ID signing the request. Because this action works +// for access keys under the AWS account, you can use this action to manage +// root credentials even if the AWS account has no associated users. +func (c *IAM) DeleteAccessKey(input *DeleteAccessKeyInput) (*DeleteAccessKeyOutput, error) { + req, out := c.DeleteAccessKeyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAccountAlias = "DeleteAccountAlias" + +// DeleteAccountAliasRequest generates a request for the DeleteAccountAlias operation. +func (c *IAM) DeleteAccountAliasRequest(input *DeleteAccountAliasInput) (req *request.Request, output *DeleteAccountAliasOutput) { + op := &request.Operation{ + Name: opDeleteAccountAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAccountAliasInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteAccountAliasOutput{} + req.Data = output + return +} + +// Deletes the specified AWS account alias. For information about using an AWS +// account alias, see Using an Alias for Your AWS Account ID (http://docs.aws.amazon.com/IAM/latest/UserGuide/AccountAlias.html) +// in the IAM User Guide. +func (c *IAM) DeleteAccountAlias(input *DeleteAccountAliasInput) (*DeleteAccountAliasOutput, error) { + req, out := c.DeleteAccountAliasRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAccountPasswordPolicy = "DeleteAccountPasswordPolicy" + +// DeleteAccountPasswordPolicyRequest generates a request for the DeleteAccountPasswordPolicy operation. +func (c *IAM) DeleteAccountPasswordPolicyRequest(input *DeleteAccountPasswordPolicyInput) (req *request.Request, output *DeleteAccountPasswordPolicyOutput) { + op := &request.Operation{ + Name: opDeleteAccountPasswordPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAccountPasswordPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteAccountPasswordPolicyOutput{} + req.Data = output + return +} + +// Deletes the password policy for the AWS account. +func (c *IAM) DeleteAccountPasswordPolicy(input *DeleteAccountPasswordPolicyInput) (*DeleteAccountPasswordPolicyOutput, error) { + req, out := c.DeleteAccountPasswordPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteGroup = "DeleteGroup" + +// DeleteGroupRequest generates a request for the DeleteGroup operation. +func (c *IAM) DeleteGroupRequest(input *DeleteGroupInput) (req *request.Request, output *DeleteGroupOutput) { + op := &request.Operation{ + Name: opDeleteGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteGroupOutput{} + req.Data = output + return +} + +// Deletes the specified group. The group must not contain any users or have +// any attached policies. +func (c *IAM) DeleteGroup(input *DeleteGroupInput) (*DeleteGroupOutput, error) { + req, out := c.DeleteGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteGroupPolicy = "DeleteGroupPolicy" + +// DeleteGroupPolicyRequest generates a request for the DeleteGroupPolicy operation. +func (c *IAM) DeleteGroupPolicyRequest(input *DeleteGroupPolicyInput) (req *request.Request, output *DeleteGroupPolicyOutput) { + op := &request.Operation{ + Name: opDeleteGroupPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteGroupPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteGroupPolicyOutput{} + req.Data = output + return +} + +// Deletes the specified inline policy that is embedded in the specified group. +// +// A group can also have managed policies attached to it. To detach a managed +// policy from a group, use DetachGroupPolicy. For more information about policies, +// refer to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) DeleteGroupPolicy(input *DeleteGroupPolicyInput) (*DeleteGroupPolicyOutput, error) { + req, out := c.DeleteGroupPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteInstanceProfile = "DeleteInstanceProfile" + +// DeleteInstanceProfileRequest generates a request for the DeleteInstanceProfile operation. +func (c *IAM) DeleteInstanceProfileRequest(input *DeleteInstanceProfileInput) (req *request.Request, output *DeleteInstanceProfileOutput) { + op := &request.Operation{ + Name: opDeleteInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteInstanceProfileInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteInstanceProfileOutput{} + req.Data = output + return +} + +// Deletes the specified instance profile. The instance profile must not have +// an associated role. +// +// Make sure you do not have any Amazon EC2 instances running with the instance +// profile you are about to delete. Deleting a role or instance profile that +// is associated with a running instance will break any applications running +// on the instance. For more information about instance profiles, go to About +// Instance Profiles (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +func (c *IAM) DeleteInstanceProfile(input *DeleteInstanceProfileInput) (*DeleteInstanceProfileOutput, error) { + req, out := c.DeleteInstanceProfileRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLoginProfile = "DeleteLoginProfile" + +// DeleteLoginProfileRequest generates a request for the DeleteLoginProfile operation. +func (c *IAM) DeleteLoginProfileRequest(input *DeleteLoginProfileInput) (req *request.Request, output *DeleteLoginProfileOutput) { + op := &request.Operation{ + Name: opDeleteLoginProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLoginProfileInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteLoginProfileOutput{} + req.Data = output + return +} + +// Deletes the password for the specified user, which terminates the user's +// ability to access AWS services through the AWS Management Console. +// +// Deleting a user's password does not prevent a user from accessing IAM through +// the command line interface or the API. To prevent all user access you must +// also either make the access key inactive or delete it. For more information +// about making keys inactive or deleting them, see UpdateAccessKey and DeleteAccessKey. +func (c *IAM) DeleteLoginProfile(input *DeleteLoginProfileInput) (*DeleteLoginProfileOutput, error) { + req, out := c.DeleteLoginProfileRequest(input) + err := req.Send() + return out, err +} + +const opDeleteOpenIDConnectProvider = "DeleteOpenIDConnectProvider" + +// DeleteOpenIDConnectProviderRequest generates a request for the DeleteOpenIDConnectProvider operation. +func (c *IAM) DeleteOpenIDConnectProviderRequest(input *DeleteOpenIDConnectProviderInput) (req *request.Request, output *DeleteOpenIDConnectProviderOutput) { + op := &request.Operation{ + Name: opDeleteOpenIDConnectProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteOpenIDConnectProviderInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteOpenIDConnectProviderOutput{} + req.Data = output + return +} + +// Deletes an IAM OpenID Connect identity provider. +// +// Deleting an OIDC provider does not update any roles that reference the provider +// as a principal in their trust policies. Any attempt to assume a role that +// references a provider that has been deleted will fail. +// +// This action is idempotent; it does not fail or return an error if you call +// the action for a provider that was already deleted. +func (c *IAM) DeleteOpenIDConnectProvider(input *DeleteOpenIDConnectProviderInput) (*DeleteOpenIDConnectProviderOutput, error) { + req, out := c.DeleteOpenIDConnectProviderRequest(input) + err := req.Send() + return out, err +} + +const opDeletePolicy = "DeletePolicy" + +// DeletePolicyRequest generates a request for the DeletePolicy operation. +func (c *IAM) DeletePolicyRequest(input *DeletePolicyInput) (req *request.Request, output *DeletePolicyOutput) { + op := &request.Operation{ + Name: opDeletePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeletePolicyOutput{} + req.Data = output + return +} + +// Deletes the specified managed policy. +// +// Before you can delete a managed policy, you must detach the policy from +// all users, groups, and roles that it is attached to, and you must delete +// all of the policy's versions. The following steps describe the process for +// deleting a managed policy: Detach the policy from all users, groups, and +// roles that the policy is attached to, using the DetachUserPolicy, DetachGroupPolicy, +// or DetachRolePolicy APIs. To list all the users, groups, and roles that a +// policy is attached to, use ListEntitiesForPolicy. Delete all versions of +// the policy using DeletePolicyVersion. To list the policy's versions, use +// ListPolicyVersions. You cannot use DeletePolicyVersion to delete the version +// that is marked as the default version. You delete the policy's default version +// in the next step of the process. Delete the policy (this automatically deletes +// the policy's default version) using this API. +// +// For information about managed policies, refer to Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) DeletePolicy(input *DeletePolicyInput) (*DeletePolicyOutput, error) { + req, out := c.DeletePolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeletePolicyVersion = "DeletePolicyVersion" + +// DeletePolicyVersionRequest generates a request for the DeletePolicyVersion operation. +func (c *IAM) DeletePolicyVersionRequest(input *DeletePolicyVersionInput) (req *request.Request, output *DeletePolicyVersionOutput) { + op := &request.Operation{ + Name: opDeletePolicyVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePolicyVersionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeletePolicyVersionOutput{} + req.Data = output + return +} + +// Deletes the specified version of the specified managed policy. +// +// You cannot delete the default version of a policy using this API. To delete +// the default version of a policy, use DeletePolicy. To find out which version +// of a policy is marked as the default version, use ListPolicyVersions. +// +// For information about versions for managed policies, refer to Versioning +// for Managed Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) +// in the IAM User Guide. +func (c *IAM) DeletePolicyVersion(input *DeletePolicyVersionInput) (*DeletePolicyVersionOutput, error) { + req, out := c.DeletePolicyVersionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRole = "DeleteRole" + +// DeleteRoleRequest generates a request for the DeleteRole operation. +func (c *IAM) DeleteRoleRequest(input *DeleteRoleInput) (req *request.Request, output *DeleteRoleOutput) { + op := &request.Operation{ + Name: opDeleteRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRoleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteRoleOutput{} + req.Data = output + return +} + +// Deletes the specified role. The role must not have any policies attached. +// For more information about roles, go to Working with Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// +// Make sure you do not have any Amazon EC2 instances running with the role +// you are about to delete. Deleting a role or instance profile that is associated +// with a running instance will break any applications running on the instance. +func (c *IAM) DeleteRole(input *DeleteRoleInput) (*DeleteRoleOutput, error) { + req, out := c.DeleteRoleRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRolePolicy = "DeleteRolePolicy" + +// DeleteRolePolicyRequest generates a request for the DeleteRolePolicy operation. +func (c *IAM) DeleteRolePolicyRequest(input *DeleteRolePolicyInput) (req *request.Request, output *DeleteRolePolicyOutput) { + op := &request.Operation{ + Name: opDeleteRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRolePolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteRolePolicyOutput{} + req.Data = output + return +} + +// Deletes the specified inline policy that is embedded in the specified role. +// +// A role can also have managed policies attached to it. To detach a managed +// policy from a role, use DetachRolePolicy. For more information about policies, +// refer to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) DeleteRolePolicy(input *DeleteRolePolicyInput) (*DeleteRolePolicyOutput, error) { + req, out := c.DeleteRolePolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSAMLProvider = "DeleteSAMLProvider" + +// DeleteSAMLProviderRequest generates a request for the DeleteSAMLProvider operation. +func (c *IAM) DeleteSAMLProviderRequest(input *DeleteSAMLProviderInput) (req *request.Request, output *DeleteSAMLProviderOutput) { + op := &request.Operation{ + Name: opDeleteSAMLProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSAMLProviderInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteSAMLProviderOutput{} + req.Data = output + return +} + +// Deletes a SAML provider. +// +// Deleting the provider does not update any roles that reference the SAML +// provider as a principal in their trust policies. Any attempt to assume a +// role that references a SAML provider that has been deleted will fail. +// +// This operation requires Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +func (c *IAM) DeleteSAMLProvider(input *DeleteSAMLProviderInput) (*DeleteSAMLProviderOutput, error) { + req, out := c.DeleteSAMLProviderRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSSHPublicKey = "DeleteSSHPublicKey" + +// DeleteSSHPublicKeyRequest generates a request for the DeleteSSHPublicKey operation. +func (c *IAM) DeleteSSHPublicKeyRequest(input *DeleteSSHPublicKeyInput) (req *request.Request, output *DeleteSSHPublicKeyOutput) { + op := &request.Operation{ + Name: opDeleteSSHPublicKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSSHPublicKeyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteSSHPublicKeyOutput{} + req.Data = output + return +} + +// Deletes the specified SSH public key. +// +// The SSH public key deleted by this action is used only for authenticating +// the associated IAM user to an AWS CodeCommit repository. For more information +// about using SSH keys to authenticate to an AWS CodeCommit repository, see +// Set up AWS CodeCommit for SSH Connections (http://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-credentials-ssh.html) +// in the AWS CodeCommit User Guide. +func (c *IAM) DeleteSSHPublicKey(input *DeleteSSHPublicKeyInput) (*DeleteSSHPublicKeyOutput, error) { + req, out := c.DeleteSSHPublicKeyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteServerCertificate = "DeleteServerCertificate" + +// DeleteServerCertificateRequest generates a request for the DeleteServerCertificate operation. +func (c *IAM) DeleteServerCertificateRequest(input *DeleteServerCertificateInput) (req *request.Request, output *DeleteServerCertificateOutput) { + op := &request.Operation{ + Name: opDeleteServerCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteServerCertificateInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteServerCertificateOutput{} + req.Data = output + return +} + +// Deletes the specified server certificate. +// +// For more information about working with server certificates, including a +// list of AWS services that can use the server certificates that you manage +// with IAM, go to Working with Server Certificates (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html) +// in the IAM User Guide. +// +// If you are using a server certificate with Elastic Load Balancing, deleting +// the certificate could have implications for your application. If Elastic +// Load Balancing doesn't detect the deletion of bound certificates, it may +// continue to use the certificates. This could cause Elastic Load Balancing +// to stop accepting traffic. We recommend that you remove the reference to +// the certificate from Elastic Load Balancing before using this command to +// delete the certificate. For more information, go to DeleteLoadBalancerListeners +// (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/APIReference/API_DeleteLoadBalancerListeners.html) +// in the Elastic Load Balancing API Reference. +func (c *IAM) DeleteServerCertificate(input *DeleteServerCertificateInput) (*DeleteServerCertificateOutput, error) { + req, out := c.DeleteServerCertificateRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSigningCertificate = "DeleteSigningCertificate" + +// DeleteSigningCertificateRequest generates a request for the DeleteSigningCertificate operation. +func (c *IAM) DeleteSigningCertificateRequest(input *DeleteSigningCertificateInput) (req *request.Request, output *DeleteSigningCertificateOutput) { + op := &request.Operation{ + Name: opDeleteSigningCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSigningCertificateInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteSigningCertificateOutput{} + req.Data = output + return +} + +// Deletes the specified signing certificate associated with the specified user. +// +// If you do not specify a user name, IAM determines the user name implicitly +// based on the AWS access key ID signing the request. Because this action works +// for access keys under the AWS account, you can use this action to manage +// root credentials even if the AWS account has no associated users. +func (c *IAM) DeleteSigningCertificate(input *DeleteSigningCertificateInput) (*DeleteSigningCertificateOutput, error) { + req, out := c.DeleteSigningCertificateRequest(input) + err := req.Send() + return out, err +} + +const opDeleteUser = "DeleteUser" + +// DeleteUserRequest generates a request for the DeleteUser operation. +func (c *IAM) DeleteUserRequest(input *DeleteUserInput) (req *request.Request, output *DeleteUserOutput) { + op := &request.Operation{ + Name: opDeleteUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteUserInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteUserOutput{} + req.Data = output + return +} + +// Deletes the specified user. The user must not belong to any groups, have +// any keys or signing certificates, or have any attached policies. +func (c *IAM) DeleteUser(input *DeleteUserInput) (*DeleteUserOutput, error) { + req, out := c.DeleteUserRequest(input) + err := req.Send() + return out, err +} + +const opDeleteUserPolicy = "DeleteUserPolicy" + +// DeleteUserPolicyRequest generates a request for the DeleteUserPolicy operation. +func (c *IAM) DeleteUserPolicyRequest(input *DeleteUserPolicyInput) (req *request.Request, output *DeleteUserPolicyOutput) { + op := &request.Operation{ + Name: opDeleteUserPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteUserPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteUserPolicyOutput{} + req.Data = output + return +} + +// Deletes the specified inline policy that is embedded in the specified user. +// +// A user can also have managed policies attached to it. To detach a managed +// policy from a user, use DetachUserPolicy. For more information about policies, +// refer to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) DeleteUserPolicy(input *DeleteUserPolicyInput) (*DeleteUserPolicyOutput, error) { + req, out := c.DeleteUserPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVirtualMFADevice = "DeleteVirtualMFADevice" + +// DeleteVirtualMFADeviceRequest generates a request for the DeleteVirtualMFADevice operation. +func (c *IAM) DeleteVirtualMFADeviceRequest(input *DeleteVirtualMFADeviceInput) (req *request.Request, output *DeleteVirtualMFADeviceOutput) { + op := &request.Operation{ + Name: opDeleteVirtualMFADevice, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVirtualMFADeviceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteVirtualMFADeviceOutput{} + req.Data = output + return +} + +// Deletes a virtual MFA device. +// +// You must deactivate a user's virtual MFA device before you can delete it. +// For information about deactivating MFA devices, see DeactivateMFADevice. +func (c *IAM) DeleteVirtualMFADevice(input *DeleteVirtualMFADeviceInput) (*DeleteVirtualMFADeviceOutput, error) { + req, out := c.DeleteVirtualMFADeviceRequest(input) + err := req.Send() + return out, err +} + +const opDetachGroupPolicy = "DetachGroupPolicy" + +// DetachGroupPolicyRequest generates a request for the DetachGroupPolicy operation. +func (c *IAM) DetachGroupPolicyRequest(input *DetachGroupPolicyInput) (req *request.Request, output *DetachGroupPolicyOutput) { + op := &request.Operation{ + Name: opDetachGroupPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachGroupPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DetachGroupPolicyOutput{} + req.Data = output + return +} + +// Removes the specified managed policy from the specified group. +// +// A group can also have inline policies embedded with it. To delete an inline +// policy, use the DeleteGroupPolicy API. For information about policies, refer +// to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) DetachGroupPolicy(input *DetachGroupPolicyInput) (*DetachGroupPolicyOutput, error) { + req, out := c.DetachGroupPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDetachRolePolicy = "DetachRolePolicy" + +// DetachRolePolicyRequest generates a request for the DetachRolePolicy operation. +func (c *IAM) DetachRolePolicyRequest(input *DetachRolePolicyInput) (req *request.Request, output *DetachRolePolicyOutput) { + op := &request.Operation{ + Name: opDetachRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachRolePolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DetachRolePolicyOutput{} + req.Data = output + return +} + +// Removes the specified managed policy from the specified role. +// +// A role can also have inline policies embedded with it. To delete an inline +// policy, use the DeleteRolePolicy API. For information about policies, refer +// to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) DetachRolePolicy(input *DetachRolePolicyInput) (*DetachRolePolicyOutput, error) { + req, out := c.DetachRolePolicyRequest(input) + err := req.Send() + return out, err +} + +const opDetachUserPolicy = "DetachUserPolicy" + +// DetachUserPolicyRequest generates a request for the DetachUserPolicy operation. +func (c *IAM) DetachUserPolicyRequest(input *DetachUserPolicyInput) (req *request.Request, output *DetachUserPolicyOutput) { + op := &request.Operation{ + Name: opDetachUserPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachUserPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DetachUserPolicyOutput{} + req.Data = output + return +} + +// Removes the specified managed policy from the specified user. +// +// A user can also have inline policies embedded with it. To delete an inline +// policy, use the DeleteUserPolicy API. For information about policies, refer +// to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) DetachUserPolicy(input *DetachUserPolicyInput) (*DetachUserPolicyOutput, error) { + req, out := c.DetachUserPolicyRequest(input) + err := req.Send() + return out, err +} + +const opEnableMFADevice = "EnableMFADevice" + +// EnableMFADeviceRequest generates a request for the EnableMFADevice operation. +func (c *IAM) EnableMFADeviceRequest(input *EnableMFADeviceInput) (req *request.Request, output *EnableMFADeviceOutput) { + op := &request.Operation{ + Name: opEnableMFADevice, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableMFADeviceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &EnableMFADeviceOutput{} + req.Data = output + return +} + +// Enables the specified MFA device and associates it with the specified user +// name. When enabled, the MFA device is required for every subsequent login +// by the user name associated with the device. +func (c *IAM) EnableMFADevice(input *EnableMFADeviceInput) (*EnableMFADeviceOutput, error) { + req, out := c.EnableMFADeviceRequest(input) + err := req.Send() + return out, err +} + +const opGenerateCredentialReport = "GenerateCredentialReport" + +// GenerateCredentialReportRequest generates a request for the GenerateCredentialReport operation. +func (c *IAM) GenerateCredentialReportRequest(input *GenerateCredentialReportInput) (req *request.Request, output *GenerateCredentialReportOutput) { + op := &request.Operation{ + Name: opGenerateCredentialReport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GenerateCredentialReportInput{} + } + + req = c.newRequest(op, input, output) + output = &GenerateCredentialReportOutput{} + req.Data = output + return +} + +// Generates a credential report for the AWS account. For more information about +// the credential report, see Getting Credential Reports (http://docs.aws.amazon.com/IAM/latest/UserGuide/credential-reports.html) +// in the IAM User Guide. +func (c *IAM) GenerateCredentialReport(input *GenerateCredentialReportInput) (*GenerateCredentialReportOutput, error) { + req, out := c.GenerateCredentialReportRequest(input) + err := req.Send() + return out, err +} + +const opGetAccessKeyLastUsed = "GetAccessKeyLastUsed" + +// GetAccessKeyLastUsedRequest generates a request for the GetAccessKeyLastUsed operation. +func (c *IAM) GetAccessKeyLastUsedRequest(input *GetAccessKeyLastUsedInput) (req *request.Request, output *GetAccessKeyLastUsedOutput) { + op := &request.Operation{ + Name: opGetAccessKeyLastUsed, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccessKeyLastUsedInput{} + } + + req = c.newRequest(op, input, output) + output = &GetAccessKeyLastUsedOutput{} + req.Data = output + return +} + +// Retrieves information about when the specified access key was last used. +// The information includes the date and time of last use, along with the AWS +// service and region that were specified in the last request made with that +// key. +func (c *IAM) GetAccessKeyLastUsed(input *GetAccessKeyLastUsedInput) (*GetAccessKeyLastUsedOutput, error) { + req, out := c.GetAccessKeyLastUsedRequest(input) + err := req.Send() + return out, err +} + +const opGetAccountAuthorizationDetails = "GetAccountAuthorizationDetails" + +// GetAccountAuthorizationDetailsRequest generates a request for the GetAccountAuthorizationDetails operation. +func (c *IAM) GetAccountAuthorizationDetailsRequest(input *GetAccountAuthorizationDetailsInput) (req *request.Request, output *GetAccountAuthorizationDetailsOutput) { + op := &request.Operation{ + Name: opGetAccountAuthorizationDetails, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &GetAccountAuthorizationDetailsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetAccountAuthorizationDetailsOutput{} + req.Data = output + return +} + +// Retrieves information about all IAM users, groups, roles, and policies in +// your account, including their relationships to one another. Use this API +// to obtain a snapshot of the configuration of IAM permissions (users, groups, +// roles, and policies) in your account. +// +// You can optionally filter the results using the Filter parameter. You can +// paginate the results using the MaxItems and Marker parameters. +func (c *IAM) GetAccountAuthorizationDetails(input *GetAccountAuthorizationDetailsInput) (*GetAccountAuthorizationDetailsOutput, error) { + req, out := c.GetAccountAuthorizationDetailsRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) GetAccountAuthorizationDetailsPages(input *GetAccountAuthorizationDetailsInput, fn func(p *GetAccountAuthorizationDetailsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetAccountAuthorizationDetailsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetAccountAuthorizationDetailsOutput), lastPage) + }) +} + +const opGetAccountPasswordPolicy = "GetAccountPasswordPolicy" + +// GetAccountPasswordPolicyRequest generates a request for the GetAccountPasswordPolicy operation. +func (c *IAM) GetAccountPasswordPolicyRequest(input *GetAccountPasswordPolicyInput) (req *request.Request, output *GetAccountPasswordPolicyOutput) { + op := &request.Operation{ + Name: opGetAccountPasswordPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccountPasswordPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetAccountPasswordPolicyOutput{} + req.Data = output + return +} + +// Retrieves the password policy for the AWS account. For more information about +// using a password policy, go to Managing an IAM Password Policy (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingPasswordPolicies.html). +func (c *IAM) GetAccountPasswordPolicy(input *GetAccountPasswordPolicyInput) (*GetAccountPasswordPolicyOutput, error) { + req, out := c.GetAccountPasswordPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetAccountSummary = "GetAccountSummary" + +// GetAccountSummaryRequest generates a request for the GetAccountSummary operation. +func (c *IAM) GetAccountSummaryRequest(input *GetAccountSummaryInput) (req *request.Request, output *GetAccountSummaryOutput) { + op := &request.Operation{ + Name: opGetAccountSummary, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccountSummaryInput{} + } + + req = c.newRequest(op, input, output) + output = &GetAccountSummaryOutput{} + req.Data = output + return +} + +// Retrieves information about IAM entity usage and IAM quotas in the AWS account. +// +// For information about limitations on IAM entities, see Limitations on IAM +// Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +func (c *IAM) GetAccountSummary(input *GetAccountSummaryInput) (*GetAccountSummaryOutput, error) { + req, out := c.GetAccountSummaryRequest(input) + err := req.Send() + return out, err +} + +const opGetContextKeysForCustomPolicy = "GetContextKeysForCustomPolicy" + +// GetContextKeysForCustomPolicyRequest generates a request for the GetContextKeysForCustomPolicy operation. +func (c *IAM) GetContextKeysForCustomPolicyRequest(input *GetContextKeysForCustomPolicyInput) (req *request.Request, output *GetContextKeysForPolicyResponse) { + op := &request.Operation{ + Name: opGetContextKeysForCustomPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetContextKeysForCustomPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetContextKeysForPolicyResponse{} + req.Data = output + return +} + +// Gets a list of all of the context keys referenced in Condition elements in +// the input policies. The policies are supplied as a list of one or more strings. +// To get the context keys from policies associated with an IAM user, group, +// or role, use GetContextKeysForPrincipalPolicy. +// +// Context keys are variables maintained by AWS and its services that provide +// details about the context of an API query request, and can be evaluated by +// using the Condition element of an IAM policy. Use GetContextKeysForCustomPolicy +// to understand what key names and values you must supply when you call SimulateCustomPolicy. +// Note that all parameters are shown in unencoded form here for clarity, but +// must be URL encoded to be included as a part of a real HTML request. +func (c *IAM) GetContextKeysForCustomPolicy(input *GetContextKeysForCustomPolicyInput) (*GetContextKeysForPolicyResponse, error) { + req, out := c.GetContextKeysForCustomPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetContextKeysForPrincipalPolicy = "GetContextKeysForPrincipalPolicy" + +// GetContextKeysForPrincipalPolicyRequest generates a request for the GetContextKeysForPrincipalPolicy operation. +func (c *IAM) GetContextKeysForPrincipalPolicyRequest(input *GetContextKeysForPrincipalPolicyInput) (req *request.Request, output *GetContextKeysForPolicyResponse) { + op := &request.Operation{ + Name: opGetContextKeysForPrincipalPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetContextKeysForPrincipalPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetContextKeysForPolicyResponse{} + req.Data = output + return +} + +// Gets a list of all of the context keys referenced in Condition elements in +// all of the IAM policies attached to the specified IAM entity. The entity +// can be an IAM user, group, or role. If you specify a user, then the request +// also includes all of the policies attached to groups that the user is a member +// of. +// +// You can optionally include a list of one or more additional policies, specified +// as strings. If you want to include only a list of policies by string, use +// GetContextKeysForCustomPolicy instead. +// +// Note: This API discloses information about the permissions granted to other +// users. If you do not want users to see other user's permissions, then consider +// allowing them to use GetContextKeysForCustomPolicy instead. +// +// Context keys are variables maintained by AWS and its services that provide +// details about the context of an API query request, and can be evaluated by +// using the Condition element of an IAM policy. Use GetContextKeysForPrincipalPolicy +// to understand what key names and values you must supply when you call SimulatePrincipalPolicy. +func (c *IAM) GetContextKeysForPrincipalPolicy(input *GetContextKeysForPrincipalPolicyInput) (*GetContextKeysForPolicyResponse, error) { + req, out := c.GetContextKeysForPrincipalPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetCredentialReport = "GetCredentialReport" + +// GetCredentialReportRequest generates a request for the GetCredentialReport operation. +func (c *IAM) GetCredentialReportRequest(input *GetCredentialReportInput) (req *request.Request, output *GetCredentialReportOutput) { + op := &request.Operation{ + Name: opGetCredentialReport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCredentialReportInput{} + } + + req = c.newRequest(op, input, output) + output = &GetCredentialReportOutput{} + req.Data = output + return +} + +// Retrieves a credential report for the AWS account. For more information about +// the credential report, see Getting Credential Reports (http://docs.aws.amazon.com/IAM/latest/UserGuide/credential-reports.html) +// in the IAM User Guide. +func (c *IAM) GetCredentialReport(input *GetCredentialReportInput) (*GetCredentialReportOutput, error) { + req, out := c.GetCredentialReportRequest(input) + err := req.Send() + return out, err +} + +const opGetGroup = "GetGroup" + +// GetGroupRequest generates a request for the GetGroup operation. +func (c *IAM) GetGroupRequest(input *GetGroupInput) (req *request.Request, output *GetGroupOutput) { + op := &request.Operation{ + Name: opGetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &GetGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &GetGroupOutput{} + req.Data = output + return +} + +// Returns a list of users that are in the specified group. You can paginate +// the results using the MaxItems and Marker parameters. +func (c *IAM) GetGroup(input *GetGroupInput) (*GetGroupOutput, error) { + req, out := c.GetGroupRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) GetGroupPages(input *GetGroupInput, fn func(p *GetGroupOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetGroupRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetGroupOutput), lastPage) + }) +} + +const opGetGroupPolicy = "GetGroupPolicy" + +// GetGroupPolicyRequest generates a request for the GetGroupPolicy operation. +func (c *IAM) GetGroupPolicyRequest(input *GetGroupPolicyInput) (req *request.Request, output *GetGroupPolicyOutput) { + op := &request.Operation{ + Name: opGetGroupPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetGroupPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetGroupPolicyOutput{} + req.Data = output + return +} + +// Retrieves the specified inline policy document that is embedded in the specified +// group. +// +// A group can also have managed policies attached to it. To retrieve a managed +// policy document that is attached to a group, use GetPolicy to determine the +// policy's default version, then use GetPolicyVersion to retrieve the policy +// document. +// +// For more information about policies, refer to Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) GetGroupPolicy(input *GetGroupPolicyInput) (*GetGroupPolicyOutput, error) { + req, out := c.GetGroupPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetInstanceProfile = "GetInstanceProfile" + +// GetInstanceProfileRequest generates a request for the GetInstanceProfile operation. +func (c *IAM) GetInstanceProfileRequest(input *GetInstanceProfileInput) (req *request.Request, output *GetInstanceProfileOutput) { + op := &request.Operation{ + Name: opGetInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetInstanceProfileInput{} + } + + req = c.newRequest(op, input, output) + output = &GetInstanceProfileOutput{} + req.Data = output + return +} + +// Retrieves information about the specified instance profile, including the +// instance profile's path, GUID, ARN, and role. For more information about +// instance profiles, go to About Instance Profiles (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +// For more information about ARNs, go to ARNs (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs). +func (c *IAM) GetInstanceProfile(input *GetInstanceProfileInput) (*GetInstanceProfileOutput, error) { + req, out := c.GetInstanceProfileRequest(input) + err := req.Send() + return out, err +} + +const opGetLoginProfile = "GetLoginProfile" + +// GetLoginProfileRequest generates a request for the GetLoginProfile operation. +func (c *IAM) GetLoginProfileRequest(input *GetLoginProfileInput) (req *request.Request, output *GetLoginProfileOutput) { + op := &request.Operation{ + Name: opGetLoginProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetLoginProfileInput{} + } + + req = c.newRequest(op, input, output) + output = &GetLoginProfileOutput{} + req.Data = output + return +} + +// Retrieves the user name and password-creation date for the specified user. +// If the user has not been assigned a password, the action returns a 404 (NoSuchEntity) +// error. +func (c *IAM) GetLoginProfile(input *GetLoginProfileInput) (*GetLoginProfileOutput, error) { + req, out := c.GetLoginProfileRequest(input) + err := req.Send() + return out, err +} + +const opGetOpenIDConnectProvider = "GetOpenIDConnectProvider" + +// GetOpenIDConnectProviderRequest generates a request for the GetOpenIDConnectProvider operation. +func (c *IAM) GetOpenIDConnectProviderRequest(input *GetOpenIDConnectProviderInput) (req *request.Request, output *GetOpenIDConnectProviderOutput) { + op := &request.Operation{ + Name: opGetOpenIDConnectProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetOpenIDConnectProviderInput{} + } + + req = c.newRequest(op, input, output) + output = &GetOpenIDConnectProviderOutput{} + req.Data = output + return +} + +// Returns information about the specified OpenID Connect provider. +func (c *IAM) GetOpenIDConnectProvider(input *GetOpenIDConnectProviderInput) (*GetOpenIDConnectProviderOutput, error) { + req, out := c.GetOpenIDConnectProviderRequest(input) + err := req.Send() + return out, err +} + +const opGetPolicy = "GetPolicy" + +// GetPolicyRequest generates a request for the GetPolicy operation. +func (c *IAM) GetPolicyRequest(input *GetPolicyInput) (req *request.Request, output *GetPolicyOutput) { + op := &request.Operation{ + Name: opGetPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetPolicyOutput{} + req.Data = output + return +} + +// Retrieves information about the specified managed policy, including the policy's +// default version and the total number of users, groups, and roles that the +// policy is attached to. For a list of the specific users, groups, and roles +// that the policy is attached to, use the ListEntitiesForPolicy API. This API +// returns metadata about the policy. To retrieve the policy document for a +// specific version of the policy, use GetPolicyVersion. +// +// This API retrieves information about managed policies. To retrieve information +// about an inline policy that is embedded with a user, group, or role, use +// the GetUserPolicy, GetGroupPolicy, or GetRolePolicy API. +// +// For more information about policies, refer to Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) GetPolicy(input *GetPolicyInput) (*GetPolicyOutput, error) { + req, out := c.GetPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetPolicyVersion = "GetPolicyVersion" + +// GetPolicyVersionRequest generates a request for the GetPolicyVersion operation. +func (c *IAM) GetPolicyVersionRequest(input *GetPolicyVersionInput) (req *request.Request, output *GetPolicyVersionOutput) { + op := &request.Operation{ + Name: opGetPolicyVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPolicyVersionInput{} + } + + req = c.newRequest(op, input, output) + output = &GetPolicyVersionOutput{} + req.Data = output + return +} + +// Retrieves information about the specified version of the specified managed +// policy, including the policy document. +// +// To list the available versions for a policy, use ListPolicyVersions. +// +// This API retrieves information about managed policies. To retrieve information +// about an inline policy that is embedded in a user, group, or role, use the +// GetUserPolicy, GetGroupPolicy, or GetRolePolicy API. +// +// For more information about the types of policies, refer to Managed Policies +// and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) GetPolicyVersion(input *GetPolicyVersionInput) (*GetPolicyVersionOutput, error) { + req, out := c.GetPolicyVersionRequest(input) + err := req.Send() + return out, err +} + +const opGetRole = "GetRole" + +// GetRoleRequest generates a request for the GetRole operation. +func (c *IAM) GetRoleRequest(input *GetRoleInput) (req *request.Request, output *GetRoleOutput) { + op := &request.Operation{ + Name: opGetRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRoleInput{} + } + + req = c.newRequest(op, input, output) + output = &GetRoleOutput{} + req.Data = output + return +} + +// Retrieves information about the specified role, including the role's path, +// GUID, ARN, and the policy granting permission to assume the role. For more +// information about ARNs, go to ARNs (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs). +// For more information about roles, go to Working with Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +func (c *IAM) GetRole(input *GetRoleInput) (*GetRoleOutput, error) { + req, out := c.GetRoleRequest(input) + err := req.Send() + return out, err +} + +const opGetRolePolicy = "GetRolePolicy" + +// GetRolePolicyRequest generates a request for the GetRolePolicy operation. +func (c *IAM) GetRolePolicyRequest(input *GetRolePolicyInput) (req *request.Request, output *GetRolePolicyOutput) { + op := &request.Operation{ + Name: opGetRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRolePolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetRolePolicyOutput{} + req.Data = output + return +} + +// Retrieves the specified inline policy document that is embedded with the +// specified role. +// +// A role can also have managed policies attached to it. To retrieve a managed +// policy document that is attached to a role, use GetPolicy to determine the +// policy's default version, then use GetPolicyVersion to retrieve the policy +// document. +// +// For more information about policies, refer to Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// For more information about roles, go to Using Roles to Delegate Permissions +// and Federate Identities (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html). +func (c *IAM) GetRolePolicy(input *GetRolePolicyInput) (*GetRolePolicyOutput, error) { + req, out := c.GetRolePolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetSAMLProvider = "GetSAMLProvider" + +// GetSAMLProviderRequest generates a request for the GetSAMLProvider operation. +func (c *IAM) GetSAMLProviderRequest(input *GetSAMLProviderInput) (req *request.Request, output *GetSAMLProviderOutput) { + op := &request.Operation{ + Name: opGetSAMLProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSAMLProviderInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSAMLProviderOutput{} + req.Data = output + return +} + +// Returns the SAML provider metadocument that was uploaded when the provider +// was created or updated. +// +// This operation requires Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +func (c *IAM) GetSAMLProvider(input *GetSAMLProviderInput) (*GetSAMLProviderOutput, error) { + req, out := c.GetSAMLProviderRequest(input) + err := req.Send() + return out, err +} + +const opGetSSHPublicKey = "GetSSHPublicKey" + +// GetSSHPublicKeyRequest generates a request for the GetSSHPublicKey operation. +func (c *IAM) GetSSHPublicKeyRequest(input *GetSSHPublicKeyInput) (req *request.Request, output *GetSSHPublicKeyOutput) { + op := &request.Operation{ + Name: opGetSSHPublicKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSSHPublicKeyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSSHPublicKeyOutput{} + req.Data = output + return +} + +// Retrieves the specified SSH public key, including metadata about the key. +// +// The SSH public key retrieved by this action is used only for authenticating +// the associated IAM user to an AWS CodeCommit repository. For more information +// about using SSH keys to authenticate to an AWS CodeCommit repository, see +// Set up AWS CodeCommit for SSH Connections (http://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-credentials-ssh.html) +// in the AWS CodeCommit User Guide. +func (c *IAM) GetSSHPublicKey(input *GetSSHPublicKeyInput) (*GetSSHPublicKeyOutput, error) { + req, out := c.GetSSHPublicKeyRequest(input) + err := req.Send() + return out, err +} + +const opGetServerCertificate = "GetServerCertificate" + +// GetServerCertificateRequest generates a request for the GetServerCertificate operation. +func (c *IAM) GetServerCertificateRequest(input *GetServerCertificateInput) (req *request.Request, output *GetServerCertificateOutput) { + op := &request.Operation{ + Name: opGetServerCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetServerCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &GetServerCertificateOutput{} + req.Data = output + return +} + +// Retrieves information about the specified server certificate. +// +// For more information about working with server certificates, including a +// list of AWS services that can use the server certificates that you manage +// with IAM, go to Working with Server Certificates (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html) +// in the IAM User Guide. +func (c *IAM) GetServerCertificate(input *GetServerCertificateInput) (*GetServerCertificateOutput, error) { + req, out := c.GetServerCertificateRequest(input) + err := req.Send() + return out, err +} + +const opGetUser = "GetUser" + +// GetUserRequest generates a request for the GetUser operation. +func (c *IAM) GetUserRequest(input *GetUserInput) (req *request.Request, output *GetUserOutput) { + op := &request.Operation{ + Name: opGetUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetUserInput{} + } + + req = c.newRequest(op, input, output) + output = &GetUserOutput{} + req.Data = output + return +} + +// Retrieves information about the specified user, including the user's creation +// date, path, unique ID, and ARN. +// +// If you do not specify a user name, IAM determines the user name implicitly +// based on the AWS access key ID used to sign the request. +func (c *IAM) GetUser(input *GetUserInput) (*GetUserOutput, error) { + req, out := c.GetUserRequest(input) + err := req.Send() + return out, err +} + +const opGetUserPolicy = "GetUserPolicy" + +// GetUserPolicyRequest generates a request for the GetUserPolicy operation. +func (c *IAM) GetUserPolicyRequest(input *GetUserPolicyInput) (req *request.Request, output *GetUserPolicyOutput) { + op := &request.Operation{ + Name: opGetUserPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetUserPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetUserPolicyOutput{} + req.Data = output + return +} + +// Retrieves the specified inline policy document that is embedded in the specified +// user. +// +// A user can also have managed policies attached to it. To retrieve a managed +// policy document that is attached to a user, use GetPolicy to determine the +// policy's default version, then use GetPolicyVersion to retrieve the policy +// document. +// +// For more information about policies, refer to Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) GetUserPolicy(input *GetUserPolicyInput) (*GetUserPolicyOutput, error) { + req, out := c.GetUserPolicyRequest(input) + err := req.Send() + return out, err +} + +const opListAccessKeys = "ListAccessKeys" + +// ListAccessKeysRequest generates a request for the ListAccessKeys operation. +func (c *IAM) ListAccessKeysRequest(input *ListAccessKeysInput) (req *request.Request, output *ListAccessKeysOutput) { + op := &request.Operation{ + Name: opListAccessKeys, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListAccessKeysInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAccessKeysOutput{} + req.Data = output + return +} + +// Returns information about the access key IDs associated with the specified +// user. If there are none, the action returns an empty list. +// +// Although each user is limited to a small number of keys, you can still paginate +// the results using the MaxItems and Marker parameters. +// +// If the UserName field is not specified, the UserName is determined implicitly +// based on the AWS access key ID used to sign the request. Because this action +// works for access keys under the AWS account, you can use this action to manage +// root credentials even if the AWS account has no associated users. +// +// To ensure the security of your AWS account, the secret access key is accessible +// only during key and user creation. +func (c *IAM) ListAccessKeys(input *ListAccessKeysInput) (*ListAccessKeysOutput, error) { + req, out := c.ListAccessKeysRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListAccessKeysPages(input *ListAccessKeysInput, fn func(p *ListAccessKeysOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListAccessKeysRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListAccessKeysOutput), lastPage) + }) +} + +const opListAccountAliases = "ListAccountAliases" + +// ListAccountAliasesRequest generates a request for the ListAccountAliases operation. +func (c *IAM) ListAccountAliasesRequest(input *ListAccountAliasesInput) (req *request.Request, output *ListAccountAliasesOutput) { + op := &request.Operation{ + Name: opListAccountAliases, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListAccountAliasesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAccountAliasesOutput{} + req.Data = output + return +} + +// Lists the account alias associated with the account (Note: you can have only +// one). For information about using an AWS account alias, see Using an Alias +// for Your AWS Account ID (http://docs.aws.amazon.com/IAM/latest/UserGuide/AccountAlias.html) +// in the IAM User Guide. +func (c *IAM) ListAccountAliases(input *ListAccountAliasesInput) (*ListAccountAliasesOutput, error) { + req, out := c.ListAccountAliasesRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListAccountAliasesPages(input *ListAccountAliasesInput, fn func(p *ListAccountAliasesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListAccountAliasesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListAccountAliasesOutput), lastPage) + }) +} + +const opListAttachedGroupPolicies = "ListAttachedGroupPolicies" + +// ListAttachedGroupPoliciesRequest generates a request for the ListAttachedGroupPolicies operation. +func (c *IAM) ListAttachedGroupPoliciesRequest(input *ListAttachedGroupPoliciesInput) (req *request.Request, output *ListAttachedGroupPoliciesOutput) { + op := &request.Operation{ + Name: opListAttachedGroupPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListAttachedGroupPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAttachedGroupPoliciesOutput{} + req.Data = output + return +} + +// Lists all managed policies that are attached to the specified group. +// +// A group can also have inline policies embedded with it. To list the inline +// policies for a group, use the ListGroupPolicies API. For information about +// policies, refer to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. You +// can use the PathPrefix parameter to limit the list of policies to only those +// matching the specified path prefix. If there are no policies attached to +// the specified group (or none that match the specified path prefix), the action +// returns an empty list. +func (c *IAM) ListAttachedGroupPolicies(input *ListAttachedGroupPoliciesInput) (*ListAttachedGroupPoliciesOutput, error) { + req, out := c.ListAttachedGroupPoliciesRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListAttachedGroupPoliciesPages(input *ListAttachedGroupPoliciesInput, fn func(p *ListAttachedGroupPoliciesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListAttachedGroupPoliciesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListAttachedGroupPoliciesOutput), lastPage) + }) +} + +const opListAttachedRolePolicies = "ListAttachedRolePolicies" + +// ListAttachedRolePoliciesRequest generates a request for the ListAttachedRolePolicies operation. +func (c *IAM) ListAttachedRolePoliciesRequest(input *ListAttachedRolePoliciesInput) (req *request.Request, output *ListAttachedRolePoliciesOutput) { + op := &request.Operation{ + Name: opListAttachedRolePolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListAttachedRolePoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAttachedRolePoliciesOutput{} + req.Data = output + return +} + +// Lists all managed policies that are attached to the specified role. +// +// A role can also have inline policies embedded with it. To list the inline +// policies for a role, use the ListRolePolicies API. For information about +// policies, refer to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. You +// can use the PathPrefix parameter to limit the list of policies to only those +// matching the specified path prefix. If there are no policies attached to +// the specified role (or none that match the specified path prefix), the action +// returns an empty list. +func (c *IAM) ListAttachedRolePolicies(input *ListAttachedRolePoliciesInput) (*ListAttachedRolePoliciesOutput, error) { + req, out := c.ListAttachedRolePoliciesRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListAttachedRolePoliciesPages(input *ListAttachedRolePoliciesInput, fn func(p *ListAttachedRolePoliciesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListAttachedRolePoliciesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListAttachedRolePoliciesOutput), lastPage) + }) +} + +const opListAttachedUserPolicies = "ListAttachedUserPolicies" + +// ListAttachedUserPoliciesRequest generates a request for the ListAttachedUserPolicies operation. +func (c *IAM) ListAttachedUserPoliciesRequest(input *ListAttachedUserPoliciesInput) (req *request.Request, output *ListAttachedUserPoliciesOutput) { + op := &request.Operation{ + Name: opListAttachedUserPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListAttachedUserPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAttachedUserPoliciesOutput{} + req.Data = output + return +} + +// Lists all managed policies that are attached to the specified user. +// +// A user can also have inline policies embedded with it. To list the inline +// policies for a user, use the ListUserPolicies API. For information about +// policies, refer to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. You +// can use the PathPrefix parameter to limit the list of policies to only those +// matching the specified path prefix. If there are no policies attached to +// the specified group (or none that match the specified path prefix), the action +// returns an empty list. +func (c *IAM) ListAttachedUserPolicies(input *ListAttachedUserPoliciesInput) (*ListAttachedUserPoliciesOutput, error) { + req, out := c.ListAttachedUserPoliciesRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListAttachedUserPoliciesPages(input *ListAttachedUserPoliciesInput, fn func(p *ListAttachedUserPoliciesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListAttachedUserPoliciesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListAttachedUserPoliciesOutput), lastPage) + }) +} + +const opListEntitiesForPolicy = "ListEntitiesForPolicy" + +// ListEntitiesForPolicyRequest generates a request for the ListEntitiesForPolicy operation. +func (c *IAM) ListEntitiesForPolicyRequest(input *ListEntitiesForPolicyInput) (req *request.Request, output *ListEntitiesForPolicyOutput) { + op := &request.Operation{ + Name: opListEntitiesForPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListEntitiesForPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &ListEntitiesForPolicyOutput{} + req.Data = output + return +} + +// Lists all users, groups, and roles that the specified managed policy is attached +// to. +// +// You can use the optional EntityFilter parameter to limit the results to +// a particular type of entity (users, groups, or roles). For example, to list +// only the roles that are attached to the specified policy, set EntityFilter +// to Role. +// +// You can paginate the results using the MaxItems and Marker parameters. +func (c *IAM) ListEntitiesForPolicy(input *ListEntitiesForPolicyInput) (*ListEntitiesForPolicyOutput, error) { + req, out := c.ListEntitiesForPolicyRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListEntitiesForPolicyPages(input *ListEntitiesForPolicyInput, fn func(p *ListEntitiesForPolicyOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListEntitiesForPolicyRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListEntitiesForPolicyOutput), lastPage) + }) +} + +const opListGroupPolicies = "ListGroupPolicies" + +// ListGroupPoliciesRequest generates a request for the ListGroupPolicies operation. +func (c *IAM) ListGroupPoliciesRequest(input *ListGroupPoliciesInput) (req *request.Request, output *ListGroupPoliciesOutput) { + op := &request.Operation{ + Name: opListGroupPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListGroupPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListGroupPoliciesOutput{} + req.Data = output + return +} + +// Lists the names of the inline policies that are embedded in the specified +// group. +// +// A group can also have managed policies attached to it. To list the managed +// policies that are attached to a group, use ListAttachedGroupPolicies. For +// more information about policies, refer to Managed Policies and Inline Policies +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. If +// there are no inline policies embedded with the specified group, the action +// returns an empty list. +func (c *IAM) ListGroupPolicies(input *ListGroupPoliciesInput) (*ListGroupPoliciesOutput, error) { + req, out := c.ListGroupPoliciesRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListGroupPoliciesPages(input *ListGroupPoliciesInput, fn func(p *ListGroupPoliciesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListGroupPoliciesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListGroupPoliciesOutput), lastPage) + }) +} + +const opListGroups = "ListGroups" + +// ListGroupsRequest generates a request for the ListGroups operation. +func (c *IAM) ListGroupsRequest(input *ListGroupsInput) (req *request.Request, output *ListGroupsOutput) { + op := &request.Operation{ + Name: opListGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListGroupsOutput{} + req.Data = output + return +} + +// Lists the groups that have the specified path prefix. +// +// You can paginate the results using the MaxItems and Marker parameters. +func (c *IAM) ListGroups(input *ListGroupsInput) (*ListGroupsOutput, error) { + req, out := c.ListGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListGroupsPages(input *ListGroupsInput, fn func(p *ListGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListGroupsOutput), lastPage) + }) +} + +const opListGroupsForUser = "ListGroupsForUser" + +// ListGroupsForUserRequest generates a request for the ListGroupsForUser operation. +func (c *IAM) ListGroupsForUserRequest(input *ListGroupsForUserInput) (req *request.Request, output *ListGroupsForUserOutput) { + op := &request.Operation{ + Name: opListGroupsForUser, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListGroupsForUserInput{} + } + + req = c.newRequest(op, input, output) + output = &ListGroupsForUserOutput{} + req.Data = output + return +} + +// Lists the groups the specified user belongs to. +// +// You can paginate the results using the MaxItems and Marker parameters. +func (c *IAM) ListGroupsForUser(input *ListGroupsForUserInput) (*ListGroupsForUserOutput, error) { + req, out := c.ListGroupsForUserRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListGroupsForUserPages(input *ListGroupsForUserInput, fn func(p *ListGroupsForUserOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListGroupsForUserRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListGroupsForUserOutput), lastPage) + }) +} + +const opListInstanceProfiles = "ListInstanceProfiles" + +// ListInstanceProfilesRequest generates a request for the ListInstanceProfiles operation. +func (c *IAM) ListInstanceProfilesRequest(input *ListInstanceProfilesInput) (req *request.Request, output *ListInstanceProfilesOutput) { + op := &request.Operation{ + Name: opListInstanceProfiles, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListInstanceProfilesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListInstanceProfilesOutput{} + req.Data = output + return +} + +// Lists the instance profiles that have the specified path prefix. If there +// are none, the action returns an empty list. For more information about instance +// profiles, go to About Instance Profiles (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +// +// You can paginate the results using the MaxItems and Marker parameters. +func (c *IAM) ListInstanceProfiles(input *ListInstanceProfilesInput) (*ListInstanceProfilesOutput, error) { + req, out := c.ListInstanceProfilesRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListInstanceProfilesPages(input *ListInstanceProfilesInput, fn func(p *ListInstanceProfilesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListInstanceProfilesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListInstanceProfilesOutput), lastPage) + }) +} + +const opListInstanceProfilesForRole = "ListInstanceProfilesForRole" + +// ListInstanceProfilesForRoleRequest generates a request for the ListInstanceProfilesForRole operation. +func (c *IAM) ListInstanceProfilesForRoleRequest(input *ListInstanceProfilesForRoleInput) (req *request.Request, output *ListInstanceProfilesForRoleOutput) { + op := &request.Operation{ + Name: opListInstanceProfilesForRole, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListInstanceProfilesForRoleInput{} + } + + req = c.newRequest(op, input, output) + output = &ListInstanceProfilesForRoleOutput{} + req.Data = output + return +} + +// Lists the instance profiles that have the specified associated role. If there +// are none, the action returns an empty list. For more information about instance +// profiles, go to About Instance Profiles (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +// +// You can paginate the results using the MaxItems and Marker parameters. +func (c *IAM) ListInstanceProfilesForRole(input *ListInstanceProfilesForRoleInput) (*ListInstanceProfilesForRoleOutput, error) { + req, out := c.ListInstanceProfilesForRoleRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListInstanceProfilesForRolePages(input *ListInstanceProfilesForRoleInput, fn func(p *ListInstanceProfilesForRoleOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListInstanceProfilesForRoleRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListInstanceProfilesForRoleOutput), lastPage) + }) +} + +const opListMFADevices = "ListMFADevices" + +// ListMFADevicesRequest generates a request for the ListMFADevices operation. +func (c *IAM) ListMFADevicesRequest(input *ListMFADevicesInput) (req *request.Request, output *ListMFADevicesOutput) { + op := &request.Operation{ + Name: opListMFADevices, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListMFADevicesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListMFADevicesOutput{} + req.Data = output + return +} + +// Lists the MFA devices. If the request includes the user name, then this action +// lists all the MFA devices associated with the specified user name. If you +// do not specify a user name, IAM determines the user name implicitly based +// on the AWS access key ID signing the request. +// +// You can paginate the results using the MaxItems and Marker parameters. +func (c *IAM) ListMFADevices(input *ListMFADevicesInput) (*ListMFADevicesOutput, error) { + req, out := c.ListMFADevicesRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListMFADevicesPages(input *ListMFADevicesInput, fn func(p *ListMFADevicesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListMFADevicesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListMFADevicesOutput), lastPage) + }) +} + +const opListOpenIDConnectProviders = "ListOpenIDConnectProviders" + +// ListOpenIDConnectProvidersRequest generates a request for the ListOpenIDConnectProviders operation. +func (c *IAM) ListOpenIDConnectProvidersRequest(input *ListOpenIDConnectProvidersInput) (req *request.Request, output *ListOpenIDConnectProvidersOutput) { + op := &request.Operation{ + Name: opListOpenIDConnectProviders, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListOpenIDConnectProvidersInput{} + } + + req = c.newRequest(op, input, output) + output = &ListOpenIDConnectProvidersOutput{} + req.Data = output + return +} + +// Lists information about the OpenID Connect providers in the AWS account. +func (c *IAM) ListOpenIDConnectProviders(input *ListOpenIDConnectProvidersInput) (*ListOpenIDConnectProvidersOutput, error) { + req, out := c.ListOpenIDConnectProvidersRequest(input) + err := req.Send() + return out, err +} + +const opListPolicies = "ListPolicies" + +// ListPoliciesRequest generates a request for the ListPolicies operation. +func (c *IAM) ListPoliciesRequest(input *ListPoliciesInput) (req *request.Request, output *ListPoliciesOutput) { + op := &request.Operation{ + Name: opListPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPoliciesOutput{} + req.Data = output + return +} + +// Lists all the managed policies that are available to your account, including +// your own customer managed policies and all AWS managed policies. +// +// You can filter the list of policies that is returned using the optional +// OnlyAttached, Scope, and PathPrefix parameters. For example, to list only +// the customer managed policies in your AWS account, set Scope to Local. To +// list only AWS managed policies, set Scope to AWS. +// +// You can paginate the results using the MaxItems and Marker parameters. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) ListPolicies(input *ListPoliciesInput) (*ListPoliciesOutput, error) { + req, out := c.ListPoliciesRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListPoliciesPages(input *ListPoliciesInput, fn func(p *ListPoliciesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListPoliciesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListPoliciesOutput), lastPage) + }) +} + +const opListPolicyVersions = "ListPolicyVersions" + +// ListPolicyVersionsRequest generates a request for the ListPolicyVersions operation. +func (c *IAM) ListPolicyVersionsRequest(input *ListPolicyVersionsInput) (req *request.Request, output *ListPolicyVersionsOutput) { + op := &request.Operation{ + Name: opListPolicyVersions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListPolicyVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPolicyVersionsOutput{} + req.Data = output + return +} + +// Lists information about the versions of the specified managed policy, including +// the version that is set as the policy's default version. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) ListPolicyVersions(input *ListPolicyVersionsInput) (*ListPolicyVersionsOutput, error) { + req, out := c.ListPolicyVersionsRequest(input) + err := req.Send() + return out, err +} + +const opListRolePolicies = "ListRolePolicies" + +// ListRolePoliciesRequest generates a request for the ListRolePolicies operation. +func (c *IAM) ListRolePoliciesRequest(input *ListRolePoliciesInput) (req *request.Request, output *ListRolePoliciesOutput) { + op := &request.Operation{ + Name: opListRolePolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListRolePoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListRolePoliciesOutput{} + req.Data = output + return +} + +// Lists the names of the inline policies that are embedded in the specified +// role. +// +// A role can also have managed policies attached to it. To list the managed +// policies that are attached to a role, use ListAttachedRolePolicies. For more +// information about policies, refer to Managed Policies and Inline Policies +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. If +// there are no inline policies embedded with the specified role, the action +// returns an empty list. +func (c *IAM) ListRolePolicies(input *ListRolePoliciesInput) (*ListRolePoliciesOutput, error) { + req, out := c.ListRolePoliciesRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListRolePoliciesPages(input *ListRolePoliciesInput, fn func(p *ListRolePoliciesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListRolePoliciesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListRolePoliciesOutput), lastPage) + }) +} + +const opListRoles = "ListRoles" + +// ListRolesRequest generates a request for the ListRoles operation. +func (c *IAM) ListRolesRequest(input *ListRolesInput) (req *request.Request, output *ListRolesOutput) { + op := &request.Operation{ + Name: opListRoles, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListRolesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListRolesOutput{} + req.Data = output + return +} + +// Lists the roles that have the specified path prefix. If there are none, the +// action returns an empty list. For more information about roles, go to Working +// with Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// +// You can paginate the results using the MaxItems and Marker parameters. +func (c *IAM) ListRoles(input *ListRolesInput) (*ListRolesOutput, error) { + req, out := c.ListRolesRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListRolesPages(input *ListRolesInput, fn func(p *ListRolesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListRolesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListRolesOutput), lastPage) + }) +} + +const opListSAMLProviders = "ListSAMLProviders" + +// ListSAMLProvidersRequest generates a request for the ListSAMLProviders operation. +func (c *IAM) ListSAMLProvidersRequest(input *ListSAMLProvidersInput) (req *request.Request, output *ListSAMLProvidersOutput) { + op := &request.Operation{ + Name: opListSAMLProviders, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListSAMLProvidersInput{} + } + + req = c.newRequest(op, input, output) + output = &ListSAMLProvidersOutput{} + req.Data = output + return +} + +// Lists the SAML providers in the account. +// +// This operation requires Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +func (c *IAM) ListSAMLProviders(input *ListSAMLProvidersInput) (*ListSAMLProvidersOutput, error) { + req, out := c.ListSAMLProvidersRequest(input) + err := req.Send() + return out, err +} + +const opListSSHPublicKeys = "ListSSHPublicKeys" + +// ListSSHPublicKeysRequest generates a request for the ListSSHPublicKeys operation. +func (c *IAM) ListSSHPublicKeysRequest(input *ListSSHPublicKeysInput) (req *request.Request, output *ListSSHPublicKeysOutput) { + op := &request.Operation{ + Name: opListSSHPublicKeys, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListSSHPublicKeysInput{} + } + + req = c.newRequest(op, input, output) + output = &ListSSHPublicKeysOutput{} + req.Data = output + return +} + +// Returns information about the SSH public keys associated with the specified +// IAM user. If there are none, the action returns an empty list. +// +// The SSH public keys returned by this action are used only for authenticating +// the IAM user to an AWS CodeCommit repository. For more information about +// using SSH keys to authenticate to an AWS CodeCommit repository, see Set up +// AWS CodeCommit for SSH Connections (http://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-credentials-ssh.html) +// in the AWS CodeCommit User Guide. +// +// Although each user is limited to a small number of keys, you can still paginate +// the results using the MaxItems and Marker parameters. +func (c *IAM) ListSSHPublicKeys(input *ListSSHPublicKeysInput) (*ListSSHPublicKeysOutput, error) { + req, out := c.ListSSHPublicKeysRequest(input) + err := req.Send() + return out, err +} + +const opListServerCertificates = "ListServerCertificates" + +// ListServerCertificatesRequest generates a request for the ListServerCertificates operation. +func (c *IAM) ListServerCertificatesRequest(input *ListServerCertificatesInput) (req *request.Request, output *ListServerCertificatesOutput) { + op := &request.Operation{ + Name: opListServerCertificates, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListServerCertificatesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListServerCertificatesOutput{} + req.Data = output + return +} + +// Lists the server certificates that have the specified path prefix. If none +// exist, the action returns an empty list. +// +// You can paginate the results using the MaxItems and Marker parameters. +// +// For more information about working with server certificates, including a +// list of AWS services that can use the server certificates that you manage +// with IAM, go to Working with Server Certificates (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html) +// in the IAM User Guide. +func (c *IAM) ListServerCertificates(input *ListServerCertificatesInput) (*ListServerCertificatesOutput, error) { + req, out := c.ListServerCertificatesRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListServerCertificatesPages(input *ListServerCertificatesInput, fn func(p *ListServerCertificatesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListServerCertificatesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListServerCertificatesOutput), lastPage) + }) +} + +const opListSigningCertificates = "ListSigningCertificates" + +// ListSigningCertificatesRequest generates a request for the ListSigningCertificates operation. +func (c *IAM) ListSigningCertificatesRequest(input *ListSigningCertificatesInput) (req *request.Request, output *ListSigningCertificatesOutput) { + op := &request.Operation{ + Name: opListSigningCertificates, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListSigningCertificatesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListSigningCertificatesOutput{} + req.Data = output + return +} + +// Returns information about the signing certificates associated with the specified +// user. If there are none, the action returns an empty list. +// +// Although each user is limited to a small number of signing certificates, +// you can still paginate the results using the MaxItems and Marker parameters. +// +// If the UserName field is not specified, the user name is determined implicitly +// based on the AWS access key ID used to sign the request. Because this action +// works for access keys under the AWS account, you can use this action to manage +// root credentials even if the AWS account has no associated users. +func (c *IAM) ListSigningCertificates(input *ListSigningCertificatesInput) (*ListSigningCertificatesOutput, error) { + req, out := c.ListSigningCertificatesRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListSigningCertificatesPages(input *ListSigningCertificatesInput, fn func(p *ListSigningCertificatesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListSigningCertificatesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListSigningCertificatesOutput), lastPage) + }) +} + +const opListUserPolicies = "ListUserPolicies" + +// ListUserPoliciesRequest generates a request for the ListUserPolicies operation. +func (c *IAM) ListUserPoliciesRequest(input *ListUserPoliciesInput) (req *request.Request, output *ListUserPoliciesOutput) { + op := &request.Operation{ + Name: opListUserPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListUserPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListUserPoliciesOutput{} + req.Data = output + return +} + +// Lists the names of the inline policies embedded in the specified user. +// +// A user can also have managed policies attached to it. To list the managed +// policies that are attached to a user, use ListAttachedUserPolicies. For more +// information about policies, refer to Managed Policies and Inline Policies +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. If +// there are no inline policies embedded with the specified user, the action +// returns an empty list. +func (c *IAM) ListUserPolicies(input *ListUserPoliciesInput) (*ListUserPoliciesOutput, error) { + req, out := c.ListUserPoliciesRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListUserPoliciesPages(input *ListUserPoliciesInput, fn func(p *ListUserPoliciesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListUserPoliciesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListUserPoliciesOutput), lastPage) + }) +} + +const opListUsers = "ListUsers" + +// ListUsersRequest generates a request for the ListUsers operation. +func (c *IAM) ListUsersRequest(input *ListUsersInput) (req *request.Request, output *ListUsersOutput) { + op := &request.Operation{ + Name: opListUsers, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListUsersInput{} + } + + req = c.newRequest(op, input, output) + output = &ListUsersOutput{} + req.Data = output + return +} + +// Lists the IAM users that have the specified path prefix. If no path prefix +// is specified, the action returns all users in the AWS account. If there are +// none, the action returns an empty list. +// +// You can paginate the results using the MaxItems and Marker parameters. +func (c *IAM) ListUsers(input *ListUsersInput) (*ListUsersOutput, error) { + req, out := c.ListUsersRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListUsersPages(input *ListUsersInput, fn func(p *ListUsersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListUsersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListUsersOutput), lastPage) + }) +} + +const opListVirtualMFADevices = "ListVirtualMFADevices" + +// ListVirtualMFADevicesRequest generates a request for the ListVirtualMFADevices operation. +func (c *IAM) ListVirtualMFADevicesRequest(input *ListVirtualMFADevicesInput) (req *request.Request, output *ListVirtualMFADevicesOutput) { + op := &request.Operation{ + Name: opListVirtualMFADevices, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListVirtualMFADevicesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListVirtualMFADevicesOutput{} + req.Data = output + return +} + +// Lists the virtual MFA devices under the AWS account by assignment status. +// If you do not specify an assignment status, the action returns a list of +// all virtual MFA devices. Assignment status can be Assigned, Unassigned, or +// Any. +// +// You can paginate the results using the MaxItems and Marker parameters. +func (c *IAM) ListVirtualMFADevices(input *ListVirtualMFADevicesInput) (*ListVirtualMFADevicesOutput, error) { + req, out := c.ListVirtualMFADevicesRequest(input) + err := req.Send() + return out, err +} + +func (c *IAM) ListVirtualMFADevicesPages(input *ListVirtualMFADevicesInput, fn func(p *ListVirtualMFADevicesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListVirtualMFADevicesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListVirtualMFADevicesOutput), lastPage) + }) +} + +const opPutGroupPolicy = "PutGroupPolicy" + +// PutGroupPolicyRequest generates a request for the PutGroupPolicy operation. +func (c *IAM) PutGroupPolicyRequest(input *PutGroupPolicyInput) (req *request.Request, output *PutGroupPolicyOutput) { + op := &request.Operation{ + Name: opPutGroupPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutGroupPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutGroupPolicyOutput{} + req.Data = output + return +} + +// Adds (or updates) an inline policy document that is embedded in the specified +// group. +// +// A user can also have managed policies attached to it. To attach a managed +// policy to a group, use AttachGroupPolicy. To create a new managed policy, +// use CreatePolicy. For information about policies, refer to Managed Policies +// and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// For information about limits on the number of inline policies that you can +// embed in a group, see Limitations on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// Because policy documents can be large, you should use POST rather than GET +// when calling PutGroupPolicy. For general information about using the Query +// API with IAM, go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in the Using IAM guide. +func (c *IAM) PutGroupPolicy(input *PutGroupPolicyInput) (*PutGroupPolicyOutput, error) { + req, out := c.PutGroupPolicyRequest(input) + err := req.Send() + return out, err +} + +const opPutRolePolicy = "PutRolePolicy" + +// PutRolePolicyRequest generates a request for the PutRolePolicy operation. +func (c *IAM) PutRolePolicyRequest(input *PutRolePolicyInput) (req *request.Request, output *PutRolePolicyOutput) { + op := &request.Operation{ + Name: opPutRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRolePolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutRolePolicyOutput{} + req.Data = output + return +} + +// Adds (or updates) an inline policy document that is embedded in the specified +// role. +// +// When you embed an inline policy in a role, the inline policy is used as +// the role's access (permissions) policy. The role's trust policy is created +// at the same time as the role, using CreateRole. You can update a role's trust +// policy using UpdateAssumeRolePolicy. For more information about roles, go +// to Using Roles to Delegate Permissions and Federate Identities (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html). +// +// A role can also have a managed policy attached to it. To attach a managed +// policy to a role, use AttachRolePolicy. To create a new managed policy, use +// CreatePolicy. For information about policies, refer to Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// For information about limits on the number of inline policies that you can +// embed with a role, see Limitations on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// Because policy documents can be large, you should use POST rather than GET +// when calling PutRolePolicy. For general information about using the Query +// API with IAM, go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in the Using IAM guide. +func (c *IAM) PutRolePolicy(input *PutRolePolicyInput) (*PutRolePolicyOutput, error) { + req, out := c.PutRolePolicyRequest(input) + err := req.Send() + return out, err +} + +const opPutUserPolicy = "PutUserPolicy" + +// PutUserPolicyRequest generates a request for the PutUserPolicy operation. +func (c *IAM) PutUserPolicyRequest(input *PutUserPolicyInput) (req *request.Request, output *PutUserPolicyOutput) { + op := &request.Operation{ + Name: opPutUserPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutUserPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutUserPolicyOutput{} + req.Data = output + return +} + +// Adds (or updates) an inline policy document that is embedded in the specified +// user. +// +// A user can also have a managed policy attached to it. To attach a managed +// policy to a user, use AttachUserPolicy. To create a new managed policy, use +// CreatePolicy. For information about policies, refer to Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// For information about limits on the number of inline policies that you can +// embed in a user, see Limitations on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// Because policy documents can be large, you should use POST rather than GET +// when calling PutUserPolicy. For general information about using the Query +// API with IAM, go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in the Using IAM guide. +func (c *IAM) PutUserPolicy(input *PutUserPolicyInput) (*PutUserPolicyOutput, error) { + req, out := c.PutUserPolicyRequest(input) + err := req.Send() + return out, err +} + +const opRemoveClientIDFromOpenIDConnectProvider = "RemoveClientIDFromOpenIDConnectProvider" + +// RemoveClientIDFromOpenIDConnectProviderRequest generates a request for the RemoveClientIDFromOpenIDConnectProvider operation. +func (c *IAM) RemoveClientIDFromOpenIDConnectProviderRequest(input *RemoveClientIDFromOpenIDConnectProviderInput) (req *request.Request, output *RemoveClientIDFromOpenIDConnectProviderOutput) { + op := &request.Operation{ + Name: opRemoveClientIDFromOpenIDConnectProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveClientIDFromOpenIDConnectProviderInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RemoveClientIDFromOpenIDConnectProviderOutput{} + req.Data = output + return +} + +// Removes the specified client ID (also known as audience) from the list of +// client IDs registered for the specified IAM OpenID Connect provider. +// +// This action is idempotent; it does not fail or return an error if you try +// to remove a client ID that was removed previously. +func (c *IAM) RemoveClientIDFromOpenIDConnectProvider(input *RemoveClientIDFromOpenIDConnectProviderInput) (*RemoveClientIDFromOpenIDConnectProviderOutput, error) { + req, out := c.RemoveClientIDFromOpenIDConnectProviderRequest(input) + err := req.Send() + return out, err +} + +const opRemoveRoleFromInstanceProfile = "RemoveRoleFromInstanceProfile" + +// RemoveRoleFromInstanceProfileRequest generates a request for the RemoveRoleFromInstanceProfile operation. +func (c *IAM) RemoveRoleFromInstanceProfileRequest(input *RemoveRoleFromInstanceProfileInput) (req *request.Request, output *RemoveRoleFromInstanceProfileOutput) { + op := &request.Operation{ + Name: opRemoveRoleFromInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveRoleFromInstanceProfileInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RemoveRoleFromInstanceProfileOutput{} + req.Data = output + return +} + +// Removes the specified role from the specified instance profile. +// +// Make sure you do not have any Amazon EC2 instances running with the role +// you are about to remove from the instance profile. Removing a role from an +// instance profile that is associated with a running instance will break any +// applications running on the instance. For more information about roles, +// go to Working with Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// For more information about instance profiles, go to About Instance Profiles +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +func (c *IAM) RemoveRoleFromInstanceProfile(input *RemoveRoleFromInstanceProfileInput) (*RemoveRoleFromInstanceProfileOutput, error) { + req, out := c.RemoveRoleFromInstanceProfileRequest(input) + err := req.Send() + return out, err +} + +const opRemoveUserFromGroup = "RemoveUserFromGroup" + +// RemoveUserFromGroupRequest generates a request for the RemoveUserFromGroup operation. +func (c *IAM) RemoveUserFromGroupRequest(input *RemoveUserFromGroupInput) (req *request.Request, output *RemoveUserFromGroupOutput) { + op := &request.Operation{ + Name: opRemoveUserFromGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveUserFromGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RemoveUserFromGroupOutput{} + req.Data = output + return +} + +// Removes the specified user from the specified group. +func (c *IAM) RemoveUserFromGroup(input *RemoveUserFromGroupInput) (*RemoveUserFromGroupOutput, error) { + req, out := c.RemoveUserFromGroupRequest(input) + err := req.Send() + return out, err +} + +const opResyncMFADevice = "ResyncMFADevice" + +// ResyncMFADeviceRequest generates a request for the ResyncMFADevice operation. +func (c *IAM) ResyncMFADeviceRequest(input *ResyncMFADeviceInput) (req *request.Request, output *ResyncMFADeviceOutput) { + op := &request.Operation{ + Name: opResyncMFADevice, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResyncMFADeviceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ResyncMFADeviceOutput{} + req.Data = output + return +} + +// Synchronizes the specified MFA device with AWS servers. +// +// For more information about creating and working with virtual MFA devices, +// go to Using a Virtual MFA Device (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_VirtualMFA.html) +// in the Using IAM guide. +func (c *IAM) ResyncMFADevice(input *ResyncMFADeviceInput) (*ResyncMFADeviceOutput, error) { + req, out := c.ResyncMFADeviceRequest(input) + err := req.Send() + return out, err +} + +const opSetDefaultPolicyVersion = "SetDefaultPolicyVersion" + +// SetDefaultPolicyVersionRequest generates a request for the SetDefaultPolicyVersion operation. +func (c *IAM) SetDefaultPolicyVersionRequest(input *SetDefaultPolicyVersionInput) (req *request.Request, output *SetDefaultPolicyVersionOutput) { + op := &request.Operation{ + Name: opSetDefaultPolicyVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetDefaultPolicyVersionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetDefaultPolicyVersionOutput{} + req.Data = output + return +} + +// Sets the specified version of the specified policy as the policy's default +// (operative) version. +// +// This action affects all users, groups, and roles that the policy is attached +// to. To list the users, groups, and roles that the policy is attached to, +// use the ListEntitiesForPolicy API. +// +// For information about managed policies, refer to Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +func (c *IAM) SetDefaultPolicyVersion(input *SetDefaultPolicyVersionInput) (*SetDefaultPolicyVersionOutput, error) { + req, out := c.SetDefaultPolicyVersionRequest(input) + err := req.Send() + return out, err +} + +const opSimulateCustomPolicy = "SimulateCustomPolicy" + +// SimulateCustomPolicyRequest generates a request for the SimulateCustomPolicy operation. +func (c *IAM) SimulateCustomPolicyRequest(input *SimulateCustomPolicyInput) (req *request.Request, output *SimulatePolicyResponse) { + op := &request.Operation{ + Name: opSimulateCustomPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SimulateCustomPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &SimulatePolicyResponse{} + req.Data = output + return +} + +// Simulate how a set of IAM policies and optionally a resource-based policy +// works with a list of API actions and AWS resources to determine the policies' +// effective permissions. The policies are provided as strings. +// +// The simulation does not perform the API actions; it only checks the authorization +// to determine if the simulated policies allow or deny the actions. +// +// If you want to simulate existing policies attached to an IAM user, group, +// or role, use SimulatePrincipalPolicy instead. +// +// Context keys are variables maintained by AWS and its services that provide +// details about the context of an API query request. You can use the Condition +// element of an IAM policy to evaluate context keys. To get the list of context +// keys that the policies require for correct simulation, use GetContextKeysForCustomPolicy. +// +// If the output is long, you can use MaxItems and Marker parameters to paginate +// the results. +func (c *IAM) SimulateCustomPolicy(input *SimulateCustomPolicyInput) (*SimulatePolicyResponse, error) { + req, out := c.SimulateCustomPolicyRequest(input) + err := req.Send() + return out, err +} + +const opSimulatePrincipalPolicy = "SimulatePrincipalPolicy" + +// SimulatePrincipalPolicyRequest generates a request for the SimulatePrincipalPolicy operation. +func (c *IAM) SimulatePrincipalPolicyRequest(input *SimulatePrincipalPolicyInput) (req *request.Request, output *SimulatePolicyResponse) { + op := &request.Operation{ + Name: opSimulatePrincipalPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SimulatePrincipalPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &SimulatePolicyResponse{} + req.Data = output + return +} + +// Simulate how a set of IAM policies attached to an IAM entity works with a +// list of API actions and AWS resources to determine the policies' effective +// permissions. The entity can be an IAM user, group, or role. If you specify +// a user, then the simulation also includes all of the policies that are attached +// to groups that the user belongs to . +// +// You can optionally include a list of one or more additional policies specified +// as strings to include in the simulation. If you want to simulate only policies +// specified as strings, use SimulateCustomPolicy instead. +// +// You can also optionally include one resource-based policy to be evaluated +// with each of the resources included in the simulation. +// +// The simulation does not perform the API actions, it only checks the authorization +// to determine if the simulated policies allow or deny the actions. +// +// Note: This API discloses information about the permissions granted to other +// users. If you do not want users to see other user's permissions, then consider +// allowing them to use SimulateCustomPolicy instead. +// +// Context keys are variables maintained by AWS and its services that provide +// details about the context of an API query request. You can use the Condition +// element of an IAM policy to evaluate context keys. To get the list of context +// keys that the policies require for correct simulation, use GetContextKeysForPrincipalPolicy. +// +// If the output is long, you can use the MaxItems and Marker parameters to +// paginate the results. +func (c *IAM) SimulatePrincipalPolicy(input *SimulatePrincipalPolicyInput) (*SimulatePolicyResponse, error) { + req, out := c.SimulatePrincipalPolicyRequest(input) + err := req.Send() + return out, err +} + +const opUpdateAccessKey = "UpdateAccessKey" + +// UpdateAccessKeyRequest generates a request for the UpdateAccessKey operation. +func (c *IAM) UpdateAccessKeyRequest(input *UpdateAccessKeyInput) (req *request.Request, output *UpdateAccessKeyOutput) { + op := &request.Operation{ + Name: opUpdateAccessKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAccessKeyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateAccessKeyOutput{} + req.Data = output + return +} + +// Changes the status of the specified access key from Active to Inactive, or +// vice versa. This action can be used to disable a user's key as part of a +// key rotation work flow. +// +// If the UserName field is not specified, the UserName is determined implicitly +// based on the AWS access key ID used to sign the request. Because this action +// works for access keys under the AWS account, you can use this action to manage +// root credentials even if the AWS account has no associated users. +// +// For information about rotating keys, see Managing Keys and Certificates +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/ManagingCredentials.html) +// in the IAM User Guide. +func (c *IAM) UpdateAccessKey(input *UpdateAccessKeyInput) (*UpdateAccessKeyOutput, error) { + req, out := c.UpdateAccessKeyRequest(input) + err := req.Send() + return out, err +} + +const opUpdateAccountPasswordPolicy = "UpdateAccountPasswordPolicy" + +// UpdateAccountPasswordPolicyRequest generates a request for the UpdateAccountPasswordPolicy operation. +func (c *IAM) UpdateAccountPasswordPolicyRequest(input *UpdateAccountPasswordPolicyInput) (req *request.Request, output *UpdateAccountPasswordPolicyOutput) { + op := &request.Operation{ + Name: opUpdateAccountPasswordPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAccountPasswordPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateAccountPasswordPolicyOutput{} + req.Data = output + return +} + +// Updates the password policy settings for the AWS account. +// +// This action does not support partial updates. No parameters are required, +// but if you do not specify a parameter, that parameter's value reverts to +// its default value. See the Request Parameters section for each parameter's +// default value. +// +// For more information about using a password policy, see Managing an IAM +// Password Policy (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingPasswordPolicies.html) +// in the IAM User Guide. +func (c *IAM) UpdateAccountPasswordPolicy(input *UpdateAccountPasswordPolicyInput) (*UpdateAccountPasswordPolicyOutput, error) { + req, out := c.UpdateAccountPasswordPolicyRequest(input) + err := req.Send() + return out, err +} + +const opUpdateAssumeRolePolicy = "UpdateAssumeRolePolicy" + +// UpdateAssumeRolePolicyRequest generates a request for the UpdateAssumeRolePolicy operation. +func (c *IAM) UpdateAssumeRolePolicyRequest(input *UpdateAssumeRolePolicyInput) (req *request.Request, output *UpdateAssumeRolePolicyOutput) { + op := &request.Operation{ + Name: opUpdateAssumeRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAssumeRolePolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateAssumeRolePolicyOutput{} + req.Data = output + return +} + +// Updates the policy that grants an entity permission to assume a role. For +// more information about roles, go to Using Roles to Delegate Permissions and +// Federate Identities (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html). +func (c *IAM) UpdateAssumeRolePolicy(input *UpdateAssumeRolePolicyInput) (*UpdateAssumeRolePolicyOutput, error) { + req, out := c.UpdateAssumeRolePolicyRequest(input) + err := req.Send() + return out, err +} + +const opUpdateGroup = "UpdateGroup" + +// UpdateGroupRequest generates a request for the UpdateGroup operation. +func (c *IAM) UpdateGroupRequest(input *UpdateGroupInput) (req *request.Request, output *UpdateGroupOutput) { + op := &request.Operation{ + Name: opUpdateGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateGroupOutput{} + req.Data = output + return +} + +// Updates the name and/or the path of the specified group. +// +// You should understand the implications of changing a group's path or name. +// For more information, see Renaming Users and Groups (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_WorkingWithGroupsAndUsers.html) +// in the IAM User Guide. To change a group name the requester must have appropriate +// permissions on both the source object and the target object. For example, +// to change Managers to MGRs, the entity making the request must have permission +// on Managers and MGRs, or must have permission on all (*). For more information +// about permissions, see Permissions and Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/PermissionsAndPolicies.html" +// target="blank). +func (c *IAM) UpdateGroup(input *UpdateGroupInput) (*UpdateGroupOutput, error) { + req, out := c.UpdateGroupRequest(input) + err := req.Send() + return out, err +} + +const opUpdateLoginProfile = "UpdateLoginProfile" + +// UpdateLoginProfileRequest generates a request for the UpdateLoginProfile operation. +func (c *IAM) UpdateLoginProfileRequest(input *UpdateLoginProfileInput) (req *request.Request, output *UpdateLoginProfileOutput) { + op := &request.Operation{ + Name: opUpdateLoginProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateLoginProfileInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateLoginProfileOutput{} + req.Data = output + return +} + +// Changes the password for the specified user. +// +// Users can change their own passwords by calling ChangePassword. For more +// information about modifying passwords, see Managing Passwords (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingLogins.html) +// in the IAM User Guide. +func (c *IAM) UpdateLoginProfile(input *UpdateLoginProfileInput) (*UpdateLoginProfileOutput, error) { + req, out := c.UpdateLoginProfileRequest(input) + err := req.Send() + return out, err +} + +const opUpdateOpenIDConnectProviderThumbprint = "UpdateOpenIDConnectProviderThumbprint" + +// UpdateOpenIDConnectProviderThumbprintRequest generates a request for the UpdateOpenIDConnectProviderThumbprint operation. +func (c *IAM) UpdateOpenIDConnectProviderThumbprintRequest(input *UpdateOpenIDConnectProviderThumbprintInput) (req *request.Request, output *UpdateOpenIDConnectProviderThumbprintOutput) { + op := &request.Operation{ + Name: opUpdateOpenIDConnectProviderThumbprint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateOpenIDConnectProviderThumbprintInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateOpenIDConnectProviderThumbprintOutput{} + req.Data = output + return +} + +// Replaces the existing list of server certificate thumbprints with a new list. +// +// The list that you pass with this action completely replaces the existing +// list of thumbprints. (The lists are not merged.) +// +// Typically, you need to update a thumbprint only when the identity provider's +// certificate changes, which occurs rarely. However, if the provider's certificate +// does change, any attempt to assume an IAM role that specifies the OIDC provider +// as a principal will fail until the certificate thumbprint is updated. +// +// Because trust for the OpenID Connect provider is ultimately derived from +// the provider's certificate and is validated by the thumbprint, it is a best +// practice to limit access to the UpdateOpenIDConnectProviderThumbprint action +// to highly-privileged users. +func (c *IAM) UpdateOpenIDConnectProviderThumbprint(input *UpdateOpenIDConnectProviderThumbprintInput) (*UpdateOpenIDConnectProviderThumbprintOutput, error) { + req, out := c.UpdateOpenIDConnectProviderThumbprintRequest(input) + err := req.Send() + return out, err +} + +const opUpdateSAMLProvider = "UpdateSAMLProvider" + +// UpdateSAMLProviderRequest generates a request for the UpdateSAMLProvider operation. +func (c *IAM) UpdateSAMLProviderRequest(input *UpdateSAMLProviderInput) (req *request.Request, output *UpdateSAMLProviderOutput) { + op := &request.Operation{ + Name: opUpdateSAMLProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSAMLProviderInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateSAMLProviderOutput{} + req.Data = output + return +} + +// Updates the metadata document for an existing SAML provider. +// +// This operation requires Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +func (c *IAM) UpdateSAMLProvider(input *UpdateSAMLProviderInput) (*UpdateSAMLProviderOutput, error) { + req, out := c.UpdateSAMLProviderRequest(input) + err := req.Send() + return out, err +} + +const opUpdateSSHPublicKey = "UpdateSSHPublicKey" + +// UpdateSSHPublicKeyRequest generates a request for the UpdateSSHPublicKey operation. +func (c *IAM) UpdateSSHPublicKeyRequest(input *UpdateSSHPublicKeyInput) (req *request.Request, output *UpdateSSHPublicKeyOutput) { + op := &request.Operation{ + Name: opUpdateSSHPublicKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSSHPublicKeyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateSSHPublicKeyOutput{} + req.Data = output + return +} + +// Sets the status of the specified SSH public key to active or inactive. SSH +// public keys that are inactive cannot be used for authentication. This action +// can be used to disable a user's SSH public key as part of a key rotation +// work flow. +// +// The SSH public key affected by this action is used only for authenticating +// the associated IAM user to an AWS CodeCommit repository. For more information +// about using SSH keys to authenticate to an AWS CodeCommit repository, see +// Set up AWS CodeCommit for SSH Connections (http://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-credentials-ssh.html) +// in the AWS CodeCommit User Guide. +func (c *IAM) UpdateSSHPublicKey(input *UpdateSSHPublicKeyInput) (*UpdateSSHPublicKeyOutput, error) { + req, out := c.UpdateSSHPublicKeyRequest(input) + err := req.Send() + return out, err +} + +const opUpdateServerCertificate = "UpdateServerCertificate" + +// UpdateServerCertificateRequest generates a request for the UpdateServerCertificate operation. +func (c *IAM) UpdateServerCertificateRequest(input *UpdateServerCertificateInput) (req *request.Request, output *UpdateServerCertificateOutput) { + op := &request.Operation{ + Name: opUpdateServerCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateServerCertificateInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateServerCertificateOutput{} + req.Data = output + return +} + +// Updates the name and/or the path of the specified server certificate. +// +// For more information about working with server certificates, including a +// list of AWS services that can use the server certificates that you manage +// with IAM, go to Working with Server Certificates (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html) +// in the IAM User Guide. +// +// You should understand the implications of changing a server certificate's +// path or name. For more information, see Renaming a Server Certificate (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs_manage.html#RenamingServerCerts) +// in the IAM User Guide. To change a server certificate name the requester +// must have appropriate permissions on both the source object and the target +// object. For example, to change the name from ProductionCert to ProdCert, +// the entity making the request must have permission on ProductionCert and +// ProdCert, or must have permission on all (*). For more information about +// permissions, see Access Management (http://docs.aws.amazon.com/IAM/latest/UserGuide/access.html) +// in the IAM User Guide. +func (c *IAM) UpdateServerCertificate(input *UpdateServerCertificateInput) (*UpdateServerCertificateOutput, error) { + req, out := c.UpdateServerCertificateRequest(input) + err := req.Send() + return out, err +} + +const opUpdateSigningCertificate = "UpdateSigningCertificate" + +// UpdateSigningCertificateRequest generates a request for the UpdateSigningCertificate operation. +func (c *IAM) UpdateSigningCertificateRequest(input *UpdateSigningCertificateInput) (req *request.Request, output *UpdateSigningCertificateOutput) { + op := &request.Operation{ + Name: opUpdateSigningCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSigningCertificateInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateSigningCertificateOutput{} + req.Data = output + return +} + +// Changes the status of the specified signing certificate from active to disabled, +// or vice versa. This action can be used to disable a user's signing certificate +// as part of a certificate rotation work flow. +// +// If the UserName field is not specified, the UserName is determined implicitly +// based on the AWS access key ID used to sign the request. Because this action +// works for access keys under the AWS account, you can use this action to manage +// root credentials even if the AWS account has no associated users. +func (c *IAM) UpdateSigningCertificate(input *UpdateSigningCertificateInput) (*UpdateSigningCertificateOutput, error) { + req, out := c.UpdateSigningCertificateRequest(input) + err := req.Send() + return out, err +} + +const opUpdateUser = "UpdateUser" + +// UpdateUserRequest generates a request for the UpdateUser operation. +func (c *IAM) UpdateUserRequest(input *UpdateUserInput) (req *request.Request, output *UpdateUserOutput) { + op := &request.Operation{ + Name: opUpdateUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateUserInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateUserOutput{} + req.Data = output + return +} + +// Updates the name and/or the path of the specified user. +// +// You should understand the implications of changing a user's path or name. +// For more information, see Renaming Users and Groups (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_WorkingWithGroupsAndUsers.html) +// in the IAM User Guide. To change a user name the requester must have appropriate +// permissions on both the source object and the target object. For example, +// to change Bob to Robert, the entity making the request must have permission +// on Bob and Robert, or must have permission on all (*). For more information +// about permissions, see Permissions and Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/PermissionsAndPolicies.html" +// target="blank). +func (c *IAM) UpdateUser(input *UpdateUserInput) (*UpdateUserOutput, error) { + req, out := c.UpdateUserRequest(input) + err := req.Send() + return out, err +} + +const opUploadSSHPublicKey = "UploadSSHPublicKey" + +// UploadSSHPublicKeyRequest generates a request for the UploadSSHPublicKey operation. +func (c *IAM) UploadSSHPublicKeyRequest(input *UploadSSHPublicKeyInput) (req *request.Request, output *UploadSSHPublicKeyOutput) { + op := &request.Operation{ + Name: opUploadSSHPublicKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UploadSSHPublicKeyInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadSSHPublicKeyOutput{} + req.Data = output + return +} + +// Uploads an SSH public key and associates it with the specified IAM user. +// +// The SSH public key uploaded by this action can be used only for authenticating +// the associated IAM user to an AWS CodeCommit repository. For more information +// about using SSH keys to authenticate to an AWS CodeCommit repository, see +// Set up AWS CodeCommit for SSH Connections (http://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-credentials-ssh.html) +// in the AWS CodeCommit User Guide. +func (c *IAM) UploadSSHPublicKey(input *UploadSSHPublicKeyInput) (*UploadSSHPublicKeyOutput, error) { + req, out := c.UploadSSHPublicKeyRequest(input) + err := req.Send() + return out, err +} + +const opUploadServerCertificate = "UploadServerCertificate" + +// UploadServerCertificateRequest generates a request for the UploadServerCertificate operation. +func (c *IAM) UploadServerCertificateRequest(input *UploadServerCertificateInput) (req *request.Request, output *UploadServerCertificateOutput) { + op := &request.Operation{ + Name: opUploadServerCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UploadServerCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadServerCertificateOutput{} + req.Data = output + return +} + +// Uploads a server certificate entity for the AWS account. The server certificate +// entity includes a public key certificate, a private key, and an optional +// certificate chain, which should all be PEM-encoded. +// +// For more information about working with server certificates, including a +// list of AWS services that can use the server certificates that you manage +// with IAM, go to Working with Server Certificates (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html) +// in the IAM User Guide. +// +// For information about the number of server certificates you can upload, +// see Limitations on IAM Entities and Objects (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html) +// in the IAM User Guide. +// +// Because the body of the public key certificate, private key, and the certificate +// chain can be large, you should use POST rather than GET when calling UploadServerCertificate. +// For information about setting up signatures and authorization through the +// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// in the AWS General Reference. For general information about using the Query +// API with IAM, go to Calling the API by Making HTTP Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/programming.html) +// in the IAM User Guide. +func (c *IAM) UploadServerCertificate(input *UploadServerCertificateInput) (*UploadServerCertificateOutput, error) { + req, out := c.UploadServerCertificateRequest(input) + err := req.Send() + return out, err +} + +const opUploadSigningCertificate = "UploadSigningCertificate" + +// UploadSigningCertificateRequest generates a request for the UploadSigningCertificate operation. +func (c *IAM) UploadSigningCertificateRequest(input *UploadSigningCertificateInput) (req *request.Request, output *UploadSigningCertificateOutput) { + op := &request.Operation{ + Name: opUploadSigningCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UploadSigningCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadSigningCertificateOutput{} + req.Data = output + return +} + +// Uploads an X.509 signing certificate and associates it with the specified +// user. Some AWS services use X.509 signing certificates to validate requests +// that are signed with a corresponding private key. When you upload the certificate, +// its default status is Active. +// +// If the UserName field is not specified, the user name is determined implicitly +// based on the AWS access key ID used to sign the request. Because this action +// works for access keys under the AWS account, you can use this action to manage +// root credentials even if the AWS account has no associated users. +// +// Because the body of a X.509 certificate can be large, you should use POST +// rather than GET when calling UploadSigningCertificate. For information about +// setting up signatures and authorization through the API, go to Signing AWS +// API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// in the AWS General Reference. For general information about using the Query +// API with IAM, go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in the Using IAMguide. +func (c *IAM) UploadSigningCertificate(input *UploadSigningCertificateInput) (*UploadSigningCertificateOutput, error) { + req, out := c.UploadSigningCertificateRequest(input) + err := req.Send() + return out, err +} + +// Contains information about an AWS access key. +// +// This data type is used as a response element in the CreateAccessKey and +// ListAccessKeys actions. +// +// The SecretAccessKey value is returned only in response to CreateAccessKey. +// You can get a secret access key only when you first create an access key; +// you cannot recover the secret access key later. If you lose a secret access +// key, you must create a new access key. +type AccessKey struct { + _ struct{} `type:"structure"` + + // The ID for this access key. + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The date when the access key was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The secret key used to sign requests. + SecretAccessKey *string `type:"string" required:"true"` + + // The status of the access key. Active means the key is valid for API calls, + // while Inactive means it is not. + Status *string `type:"string" required:"true" enum:"statusType"` + + // The name of the IAM user that the access key is associated with. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AccessKey) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessKey) GoString() string { + return s.String() +} + +// Contains information about the last time an AWS access key was used. +// +// This data type is used as a response element in the GetAccessKeyLastUsed +// action. +type AccessKeyLastUsed struct { + _ struct{} `type:"structure"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the access key was most recently used. This field is null when: + // + // The user does not have an access key. + // + // An access key exists but has never been used, at least not since IAM started + // tracking this information on April 22nd, 2015. + // + // There is no sign-in data associated with the user + LastUsedDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The AWS region where this access key was most recently used. This field is + // null when: + // + // The user does not have an access key. + // + // An access key exists but has never been used, at least not since IAM started + // tracking this information on April 22nd, 2015. + // + // There is no sign-in data associated with the user + // + // For more information about AWS regions, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html) + // in the Amazon Web Services General Reference. + Region *string `type:"string" required:"true"` + + // The name of the AWS service with which this access key was most recently + // used. This field is null when: + // + // The user does not have an access key. + // + // An access key exists but has never been used, at least not since IAM started + // tracking this information on April 22nd, 2015. + // + // There is no sign-in data associated with the user + ServiceName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AccessKeyLastUsed) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessKeyLastUsed) GoString() string { + return s.String() +} + +// Contains information about an AWS access key, without its secret key. +// +// This data type is used as a response element in the ListAccessKeys action. +type AccessKeyMetadata struct { + _ struct{} `type:"structure"` + + // The ID for this access key. + AccessKeyId *string `min:"16" type:"string"` + + // The date when the access key was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The status of the access key. Active means the key is valid for API calls; + // Inactive means it is not. + Status *string `type:"string" enum:"statusType"` + + // The name of the IAM user that the key is associated with. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AccessKeyMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessKeyMetadata) GoString() string { + return s.String() +} + +type AddClientIDToOpenIDConnectProviderInput struct { + _ struct{} `type:"structure"` + + // The client ID (also known as audience) to add to the IAM OpenID Connect provider. + ClientID *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM OpenID Connect (OIDC) provider + // to add the client ID to. You can get a list of OIDC provider ARNs by using + // the ListOpenIDConnectProviders action. + OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddClientIDToOpenIDConnectProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddClientIDToOpenIDConnectProviderInput) GoString() string { + return s.String() +} + +type AddClientIDToOpenIDConnectProviderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddClientIDToOpenIDConnectProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddClientIDToOpenIDConnectProviderOutput) GoString() string { + return s.String() +} + +type AddRoleToInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the instance profile to update. + InstanceProfileName *string `min:"1" type:"string" required:"true"` + + // The name of the role to add. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddRoleToInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddRoleToInstanceProfileInput) GoString() string { + return s.String() +} + +type AddRoleToInstanceProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddRoleToInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddRoleToInstanceProfileOutput) GoString() string { + return s.String() +} + +type AddUserToGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the group to update. + GroupName *string `min:"1" type:"string" required:"true"` + + // The name of the user to add. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddUserToGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddUserToGroupInput) GoString() string { + return s.String() +} + +type AddUserToGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddUserToGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddUserToGroupOutput) GoString() string { + return s.String() +} + +type AttachGroupPolicyInput struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) of the group to attach the policy to. + GroupName *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachGroupPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachGroupPolicyInput) GoString() string { + return s.String() +} + +type AttachGroupPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachGroupPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachGroupPolicyOutput) GoString() string { + return s.String() +} + +type AttachRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The name (friendly name, not ARN) of the role to attach the policy to. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachRolePolicyInput) GoString() string { + return s.String() +} + +type AttachRolePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachRolePolicyOutput) GoString() string { + return s.String() +} + +type AttachUserPolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The name (friendly name, not ARN) of the user to attach the policy to. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachUserPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachUserPolicyInput) GoString() string { + return s.String() +} + +type AttachUserPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachUserPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachUserPolicyOutput) GoString() string { + return s.String() +} + +// Contains information about an attached policy. +// +// An attached policy is a managed policy that has been attached to a user, +// group, or role. This data type is used as a response element in the ListAttachedGroupPolicies, +// ListAttachedRolePolicies, ListAttachedUserPolicies, and GetAccountAuthorizationDetails +// actions. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +type AttachedPolicy struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string"` + + // The friendly name of the attached policy. + PolicyName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AttachedPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachedPolicy) GoString() string { + return s.String() +} + +type ChangePasswordInput struct { + _ struct{} `type:"structure"` + + // The new password. The new password must conform to the AWS account's password + // policy, if one exists. + NewPassword *string `min:"1" type:"string" required:"true"` + + // The IAM user's current password. + OldPassword *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ChangePasswordInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangePasswordInput) GoString() string { + return s.String() +} + +type ChangePasswordOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ChangePasswordOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangePasswordOutput) GoString() string { + return s.String() +} + +// Contains information about a condition context key. It includes the name +// of the key and specifies the value (or values, if the context key supports +// multiple values) to use in the simulation. This information is used when +// evaluating the Condition elements of the input policies. +// +// This data type is used as an input parameter to SimulatePolicy. +type ContextEntry struct { + _ struct{} `type:"structure"` + + // The full name of a condition context key, including the service prefix. For + // example, aws:SourceIp or s3:VersionId. + ContextKeyName *string `min:"5" type:"string"` + + // The data type of the value (or values) specified in the ContextKeyValues + // parameter. + ContextKeyType *string `type:"string" enum:"ContextKeyTypeEnum"` + + // The value (or values, if the condition context key supports multiple values) + // to provide to the simulation for use when the key is referenced by a Condition + // element in an input policy. + ContextKeyValues []*string `type:"list"` +} + +// String returns the string representation +func (s ContextEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContextEntry) GoString() string { + return s.String() +} + +type CreateAccessKeyInput struct { + _ struct{} `type:"structure"` + + // The user name that the new key will belong to. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateAccessKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAccessKeyInput) GoString() string { + return s.String() +} + +// Contains the response to a successful CreateAccessKey request. +type CreateAccessKeyOutput struct { + _ struct{} `type:"structure"` + + // Information about the access key. + AccessKey *AccessKey `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateAccessKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAccessKeyOutput) GoString() string { + return s.String() +} + +type CreateAccountAliasInput struct { + _ struct{} `type:"structure"` + + // The account alias to create. + AccountAlias *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateAccountAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAccountAliasInput) GoString() string { + return s.String() +} + +type CreateAccountAliasOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateAccountAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAccountAliasOutput) GoString() string { + return s.String() +} + +type CreateGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the group to create. Do not include the path in this value. + GroupName *string `min:"1" type:"string" required:"true"` + + // The path to the group. For more information about paths, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + Path *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGroupInput) GoString() string { + return s.String() +} + +// Contains the response to a successful CreateGroup request. +type CreateGroupOutput struct { + _ struct{} `type:"structure"` + + // Information about the group. + Group *Group `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGroupOutput) GoString() string { + return s.String() +} + +type CreateInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the instance profile to create. + InstanceProfileName *string `min:"1" type:"string" required:"true"` + + // The path to the instance profile. For more information about paths, see IAM + // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + Path *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInstanceProfileInput) GoString() string { + return s.String() +} + +// Contains the response to a successful CreateInstanceProfile request. +type CreateInstanceProfileOutput struct { + _ struct{} `type:"structure"` + + // Information about the instance profile. + InstanceProfile *InstanceProfile `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInstanceProfileOutput) GoString() string { + return s.String() +} + +type CreateLoginProfileInput struct { + _ struct{} `type:"structure"` + + // The new password for the user. + Password *string `min:"1" type:"string" required:"true"` + + // Specifies whether the user is required to set a new password on next sign-in. + PasswordResetRequired *bool `type:"boolean"` + + // The name of the user to create a password for. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateLoginProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLoginProfileInput) GoString() string { + return s.String() +} + +// Contains the response to a successful CreateLoginProfile request. +type CreateLoginProfileOutput struct { + _ struct{} `type:"structure"` + + // The user name and password create date. + LoginProfile *LoginProfile `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateLoginProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLoginProfileOutput) GoString() string { + return s.String() +} + +type CreateOpenIDConnectProviderInput struct { + _ struct{} `type:"structure"` + + // A list of client IDs (also known as audiences). When a mobile or web app + // registers with an OpenID Connect provider, they establish a value that identifies + // the application. (This is the value that's sent as the client_id parameter + // on OAuth requests.) + // + // You can register multiple client IDs with the same provider. For example, + // you might have multiple applications that use the same OIDC provider. You + // cannot register more than 100 client IDs with a single IAM OIDC provider. + // + // There is no defined format for a client ID. The CreateOpenIDConnectProviderRequest + // action accepts client IDs up to 255 characters long. + ClientIDList []*string `type:"list"` + + // A list of server certificate thumbprints for the OpenID Connect (OIDC) identity + // provider's server certificate(s). Typically this list includes only one entry. + // However, IAM lets you have up to five thumbprints for an OIDC provider. This + // lets you maintain multiple thumbprints if the identity provider is rotating + // certificates. + // + // The server certificate thumbprint is the hex-encoded SHA-1 hash value of + // the X.509 certificate used by the domain where the OpenID Connect provider + // makes its keys available. It is always a 40-character string. + // + // You must provide at least one thumbprint when creating an IAM OIDC provider. + // For example, if the OIDC provider is server.example.com and the provider + // stores its keys at "https://keys.server.example.com/openid-connect", the + // thumbprint string would be the hex-encoded SHA-1 hash value of the certificate + // used by https://keys.server.example.com. + // + // For more information about obtaining the OIDC provider's thumbprint, see + // Obtaining the Thumbprint for an OpenID Connect Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/identity-providers-oidc-obtain-thumbprint.html) + // in the IAM User Guide. + ThumbprintList []*string `type:"list" required:"true"` + + // The URL of the identity provider. The URL must begin with "https://" and + // should correspond to the iss claim in the provider's OpenID Connect ID tokens. + // Per the OIDC standard, path components are allowed but query parameters are + // not. Typically the URL consists of only a host name, like "https://server.example.org" + // or "https://example.com". + // + // You cannot register the same provider multiple times in a single AWS account. + // If you try to submit a URL that has already been used for an OpenID Connect + // provider in the AWS account, you will get an error. + Url *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateOpenIDConnectProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOpenIDConnectProviderInput) GoString() string { + return s.String() +} + +// Contains the response to a successful CreateOpenIDConnectProvider request. +type CreateOpenIDConnectProviderOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM OpenID Connect provider that was + // created. For more information, see OpenIDConnectProviderListEntry. + OpenIDConnectProviderArn *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s CreateOpenIDConnectProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOpenIDConnectProviderOutput) GoString() string { + return s.String() +} + +type CreatePolicyInput struct { + _ struct{} `type:"structure"` + + // A friendly description of the policy. + // + // Typically used to store information about the permissions defined in the + // policy. For example, "Grants access to production DynamoDB tables." + // + // The policy description is immutable. After a value is assigned, it cannot + // be changed. + Description *string `type:"string"` + + // The path for the policy. + // + // For more information about paths, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the IAM User Guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + Path *string `type:"string"` + + // The policy document. + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy document. + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreatePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePolicyInput) GoString() string { + return s.String() +} + +// Contains the response to a successful CreatePolicy request. +type CreatePolicyOutput struct { + _ struct{} `type:"structure"` + + // Information about the policy. + Policy *Policy `type:"structure"` +} + +// String returns the string representation +func (s CreatePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePolicyOutput) GoString() string { + return s.String() +} + +type CreatePolicyVersionInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The policy document. + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // Specifies whether to set this version as the policy's default version. + // + // When this parameter is true, the new policy version becomes the operative + // version; that is, the version that is in effect for the IAM users, groups, + // and roles that the policy is attached to. + // + // For more information about managed policy versions, see Versioning for Managed + // Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) + // in the IAM User Guide. + SetAsDefault *bool `type:"boolean"` +} + +// String returns the string representation +func (s CreatePolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePolicyVersionInput) GoString() string { + return s.String() +} + +// Contains the response to a successful CreatePolicyVersion request. +type CreatePolicyVersionOutput struct { + _ struct{} `type:"structure"` + + // Information about the policy version. + PolicyVersion *PolicyVersion `type:"structure"` +} + +// String returns the string representation +func (s CreatePolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePolicyVersionOutput) GoString() string { + return s.String() +} + +type CreateRoleInput struct { + _ struct{} `type:"structure"` + + // The trust relationship policy document that grants an entity permission to + // assume the role. + AssumeRolePolicyDocument *string `min:"1" type:"string" required:"true"` + + // The path to the role. For more information about paths, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + Path *string `min:"1" type:"string"` + + // The name of the role to create. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRoleInput) GoString() string { + return s.String() +} + +// Contains the response to a successful CreateRole request. +type CreateRoleOutput struct { + _ struct{} `type:"structure"` + + // Information about the role. + Role *Role `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRoleOutput) GoString() string { + return s.String() +} + +type CreateSAMLProviderInput struct { + _ struct{} `type:"structure"` + + // The name of the provider to create. + Name *string `min:"1" type:"string" required:"true"` + + // An XML document generated by an identity provider (IdP) that supports SAML + // 2.0. The document includes the issuer's name, expiration information, and + // keys that can be used to validate the SAML authentication response (assertions) + // that are received from the IdP. You must generate the metadata document using + // the identity management software that is used as your organization's IdP. + // + // For more information, see About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) + // in the IAM User Guide + SAMLMetadataDocument *string `min:"1000" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateSAMLProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSAMLProviderInput) GoString() string { + return s.String() +} + +// Contains the response to a successful CreateSAMLProvider request. +type CreateSAMLProviderOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the SAML provider. + SAMLProviderArn *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s CreateSAMLProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSAMLProviderOutput) GoString() string { + return s.String() +} + +type CreateUserInput struct { + _ struct{} `type:"structure"` + + // The path for the user name. For more information about paths, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + Path *string `min:"1" type:"string"` + + // The name of the user to create. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUserInput) GoString() string { + return s.String() +} + +// Contains the response to a successful CreateUser request. +type CreateUserOutput struct { + _ struct{} `type:"structure"` + + // Information about the user. + User *User `type:"structure"` +} + +// String returns the string representation +func (s CreateUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUserOutput) GoString() string { + return s.String() +} + +type CreateVirtualMFADeviceInput struct { + _ struct{} `type:"structure"` + + // The path for the virtual MFA device. For more information about paths, see + // IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + Path *string `min:"1" type:"string"` + + // The name of the virtual MFA device. Use with path to uniquely identify a + // virtual MFA device. + VirtualMFADeviceName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateVirtualMFADeviceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVirtualMFADeviceInput) GoString() string { + return s.String() +} + +// Contains the response to a successful CreateVirtualMFADevice request. +type CreateVirtualMFADeviceOutput struct { + _ struct{} `type:"structure"` + + // A newly created virtual MFA device. + VirtualMFADevice *VirtualMFADevice `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateVirtualMFADeviceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVirtualMFADeviceOutput) GoString() string { + return s.String() +} + +type DeactivateMFADeviceInput struct { + _ struct{} `type:"structure"` + + // The serial number that uniquely identifies the MFA device. For virtual MFA + // devices, the serial number is the device ARN. + SerialNumber *string `min:"9" type:"string" required:"true"` + + // The name of the user whose MFA device you want to deactivate. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeactivateMFADeviceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeactivateMFADeviceInput) GoString() string { + return s.String() +} + +type DeactivateMFADeviceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeactivateMFADeviceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeactivateMFADeviceOutput) GoString() string { + return s.String() +} + +type DeleteAccessKeyInput struct { + _ struct{} `type:"structure"` + + // The access key ID for the access key ID and secret access key you want to + // delete. + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The name of the user whose key you want to delete. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteAccessKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccessKeyInput) GoString() string { + return s.String() +} + +type DeleteAccessKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAccessKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccessKeyOutput) GoString() string { + return s.String() +} + +type DeleteAccountAliasInput struct { + _ struct{} `type:"structure"` + + // The name of the account alias to delete. + AccountAlias *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAccountAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccountAliasInput) GoString() string { + return s.String() +} + +type DeleteAccountAliasOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAccountAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccountAliasOutput) GoString() string { + return s.String() +} + +type DeleteAccountPasswordPolicyInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAccountPasswordPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccountPasswordPolicyInput) GoString() string { + return s.String() +} + +type DeleteAccountPasswordPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAccountPasswordPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccountPasswordPolicyOutput) GoString() string { + return s.String() +} + +type DeleteGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the group to delete. + GroupName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGroupInput) GoString() string { + return s.String() +} + +type DeleteGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGroupOutput) GoString() string { + return s.String() +} + +type DeleteGroupPolicyInput struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) identifying the group that the policy is + // embedded in. + GroupName *string `min:"1" type:"string" required:"true"` + + // The name identifying the policy document to delete. + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteGroupPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGroupPolicyInput) GoString() string { + return s.String() +} + +type DeleteGroupPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteGroupPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGroupPolicyOutput) GoString() string { + return s.String() +} + +type DeleteInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the instance profile to delete. + InstanceProfileName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInstanceProfileInput) GoString() string { + return s.String() +} + +type DeleteInstanceProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInstanceProfileOutput) GoString() string { + return s.String() +} + +type DeleteLoginProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the user whose password you want to delete. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLoginProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLoginProfileInput) GoString() string { + return s.String() +} + +type DeleteLoginProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLoginProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLoginProfileOutput) GoString() string { + return s.String() +} + +type DeleteOpenIDConnectProviderInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM OpenID Connect provider to delete. + // You can get a list of OpenID Connect provider ARNs by using the ListOpenIDConnectProviders + // action. + OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteOpenIDConnectProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteOpenIDConnectProviderInput) GoString() string { + return s.String() +} + +type DeleteOpenIDConnectProviderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteOpenIDConnectProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteOpenIDConnectProviderOutput) GoString() string { + return s.String() +} + +type DeletePolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyInput) GoString() string { + return s.String() +} + +type DeletePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyOutput) GoString() string { + return s.String() +} + +type DeletePolicyVersionInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The policy version to delete. + // + // For more information about managed policy versions, see Versioning for Managed + // Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) + // in the IAM User Guide. + VersionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyVersionInput) GoString() string { + return s.String() +} + +type DeletePolicyVersionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyVersionOutput) GoString() string { + return s.String() +} + +type DeleteRoleInput struct { + _ struct{} `type:"structure"` + + // The name of the role to delete. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRoleInput) GoString() string { + return s.String() +} + +type DeleteRoleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRoleOutput) GoString() string { + return s.String() +} + +type DeleteRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The name identifying the policy document to delete. + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name (friendly name, not ARN) identifying the role that the policy is + // embedded in. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRolePolicyInput) GoString() string { + return s.String() +} + +type DeleteRolePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRolePolicyOutput) GoString() string { + return s.String() +} + +type DeleteSAMLProviderInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the SAML provider to delete. + SAMLProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSAMLProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSAMLProviderInput) GoString() string { + return s.String() +} + +type DeleteSAMLProviderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSAMLProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSAMLProviderOutput) GoString() string { + return s.String() +} + +type DeleteSSHPublicKeyInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the SSH public key. + SSHPublicKeyId *string `min:"20" type:"string" required:"true"` + + // The name of the IAM user associated with the SSH public key. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSSHPublicKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSSHPublicKeyInput) GoString() string { + return s.String() +} + +type DeleteSSHPublicKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSSHPublicKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSSHPublicKeyOutput) GoString() string { + return s.String() +} + +type DeleteServerCertificateInput struct { + _ struct{} `type:"structure"` + + // The name of the server certificate you want to delete. + ServerCertificateName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteServerCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteServerCertificateInput) GoString() string { + return s.String() +} + +type DeleteServerCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteServerCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteServerCertificateOutput) GoString() string { + return s.String() +} + +type DeleteSigningCertificateInput struct { + _ struct{} `type:"structure"` + + // The ID of the signing certificate to delete. + CertificateId *string `min:"24" type:"string" required:"true"` + + // The name of the user the signing certificate belongs to. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteSigningCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSigningCertificateInput) GoString() string { + return s.String() +} + +type DeleteSigningCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSigningCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSigningCertificateOutput) GoString() string { + return s.String() +} + +type DeleteUserInput struct { + _ struct{} `type:"structure"` + + // The name of the user to delete. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserInput) GoString() string { + return s.String() +} + +type DeleteUserOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserOutput) GoString() string { + return s.String() +} + +type DeleteUserPolicyInput struct { + _ struct{} `type:"structure"` + + // The name identifying the policy document to delete. + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name (friendly name, not ARN) identifying the user that the policy is + // embedded in. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteUserPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserPolicyInput) GoString() string { + return s.String() +} + +type DeleteUserPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteUserPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserPolicyOutput) GoString() string { + return s.String() +} + +type DeleteVirtualMFADeviceInput struct { + _ struct{} `type:"structure"` + + // The serial number that uniquely identifies the MFA device. For virtual MFA + // devices, the serial number is the same as the ARN. + SerialNumber *string `min:"9" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVirtualMFADeviceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVirtualMFADeviceInput) GoString() string { + return s.String() +} + +type DeleteVirtualMFADeviceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVirtualMFADeviceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVirtualMFADeviceOutput) GoString() string { + return s.String() +} + +type DetachGroupPolicyInput struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) of the group to detach the policy from. + GroupName *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachGroupPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachGroupPolicyInput) GoString() string { + return s.String() +} + +type DetachGroupPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachGroupPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachGroupPolicyOutput) GoString() string { + return s.String() +} + +type DetachRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The name (friendly name, not ARN) of the role to detach the policy from. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachRolePolicyInput) GoString() string { + return s.String() +} + +type DetachRolePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachRolePolicyOutput) GoString() string { + return s.String() +} + +type DetachUserPolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The name (friendly name, not ARN) of the user to detach the policy from. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachUserPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachUserPolicyInput) GoString() string { + return s.String() +} + +type DetachUserPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachUserPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachUserPolicyOutput) GoString() string { + return s.String() +} + +type EnableMFADeviceInput struct { + _ struct{} `type:"structure"` + + // An authentication code emitted by the device. + AuthenticationCode1 *string `min:"6" type:"string" required:"true"` + + // A subsequent authentication code emitted by the device. + AuthenticationCode2 *string `min:"6" type:"string" required:"true"` + + // The serial number that uniquely identifies the MFA device. For virtual MFA + // devices, the serial number is the device ARN. + SerialNumber *string `min:"9" type:"string" required:"true"` + + // The name of the user for whom you want to enable the MFA device. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableMFADeviceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableMFADeviceInput) GoString() string { + return s.String() +} + +type EnableMFADeviceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableMFADeviceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableMFADeviceOutput) GoString() string { + return s.String() +} + +// Contains the results of a simulation. +// +// This data type is used by the return parameter of SimulatePolicy. +type EvaluationResult struct { + _ struct{} `type:"structure"` + + // The name of the API action tested on the indicated resource. + EvalActionName *string `min:"3" type:"string" required:"true"` + + // The result of the simulation. + EvalDecision *string `type:"string" required:"true" enum:"PolicyEvaluationDecisionType"` + + // Additional details about the results of the evaluation decision. When there + // are both IAM policies and resource policies, this parameter explains how + // each set of policies contributes to the final evaluation decision. When simulating + // cross-account access to a resource, both the resource-based policy and the + // caller's IAM policy must grant access. See How IAM Roles Differ from Resource-based + // Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_compare-resource-policies.html) + EvalDecisionDetails map[string]*string `type:"map"` + + // The ARN of the resource that the indicated API action was tested on. + EvalResourceName *string `min:"1" type:"string"` + + // A list of the statements in the input policies that determine the result + // for this scenario. Remember that even if multiple statements allow the action + // on the resource, if only one statement denies that action, then the explicit + // deny overrides any allow, and the deny statement is the only entry included + // in the result. + MatchedStatements []*Statement `type:"list"` + + // A list of context keys that are required by the included input policies but + // that were not provided by one of the input parameters. To discover the context + // keys used by a set of policies, you can call GetContextKeysForCustomPolicy + // or GetContextKeysForPrincipalPolicy. + // + // If the response includes any keys in this list, then the reported results + // might be untrustworthy because the simulation could not completely evaluate + // all of the conditions specified in the policies that would occur in a real + // world request. + MissingContextValues []*string `type:"list"` + + // The individual results of the simulation of the API action specified in EvalActionName + // on each resource. + ResourceSpecificResults []*ResourceSpecificResult `type:"list"` +} + +// String returns the string representation +func (s EvaluationResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EvaluationResult) GoString() string { + return s.String() +} + +type GenerateCredentialReportInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GenerateCredentialReportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateCredentialReportInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GenerateCredentialReport request. +type GenerateCredentialReportOutput struct { + _ struct{} `type:"structure"` + + // Information about the credential report. + Description *string `type:"string"` + + // Information about the state of the credential report. + State *string `type:"string" enum:"ReportStateType"` +} + +// String returns the string representation +func (s GenerateCredentialReportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateCredentialReportOutput) GoString() string { + return s.String() +} + +type GetAccessKeyLastUsedInput struct { + _ struct{} `type:"structure"` + + // The identifier of an access key. + AccessKeyId *string `min:"16" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAccessKeyLastUsedInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessKeyLastUsedInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetAccessKeyLastUsed request. It is +// also returned as a member of the AccessKeyMetaData structure returned by +// the ListAccessKeys action. +type GetAccessKeyLastUsedOutput struct { + _ struct{} `type:"structure"` + + // Contains information about the last time the access key was used. + AccessKeyLastUsed *AccessKeyLastUsed `type:"structure"` + + // The name of the AWS IAM user that owns this access key. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetAccessKeyLastUsedOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessKeyLastUsedOutput) GoString() string { + return s.String() +} + +type GetAccountAuthorizationDetailsInput struct { + _ struct{} `type:"structure"` + + // A list of entity types (user, group, role, local managed policy, or AWS managed + // policy) for filtering the results. + Filter []*string `type:"list"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s GetAccountAuthorizationDetailsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountAuthorizationDetailsInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetAccountAuthorizationDetails request. +type GetAccountAuthorizationDetailsOutput struct { + _ struct{} `type:"structure"` + + // A list containing information about IAM groups. + GroupDetailList []*GroupDetail `type:"list"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list containing information about managed policies. + Policies []*ManagedPolicyDetail `type:"list"` + + // A list containing information about IAM roles. + RoleDetailList []*RoleDetail `type:"list"` + + // A list containing information about IAM users. + UserDetailList []*UserDetail `type:"list"` +} + +// String returns the string representation +func (s GetAccountAuthorizationDetailsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountAuthorizationDetailsOutput) GoString() string { + return s.String() +} + +type GetAccountPasswordPolicyInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetAccountPasswordPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountPasswordPolicyInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetAccountPasswordPolicy request. +type GetAccountPasswordPolicyOutput struct { + _ struct{} `type:"structure"` + + // Contains information about the account password policy. + // + // This data type is used as a response element in the GetAccountPasswordPolicy + // action. + PasswordPolicy *PasswordPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetAccountPasswordPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountPasswordPolicyOutput) GoString() string { + return s.String() +} + +type GetAccountSummaryInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetAccountSummaryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountSummaryInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetAccountSummary request. +type GetAccountSummaryOutput struct { + _ struct{} `type:"structure"` + + // A set of key value pairs containing information about IAM entity usage and + // IAM quotas. + // + // SummaryMap contains the following keys: AccessKeysPerUserQuota + // + // The maximum number of active access keys allowed for each IAM user. + // + // AccountAccessKeysPresent + // + // This value is 1 if the AWS account (root) has an access key, otherwise it + // is 0. + // + // AccountMFAEnabled + // + // This value is 1 if the AWS account (root) has an MFA device assigned, otherwise + // it is 0. + // + // AccountSigningCertificatesPresent + // + // This value is 1 if the AWS account (root) has a signing certificate, otherwise + // it is 0. + // + // AssumeRolePolicySizeQuota + // + // The maximum allowed size for assume role policy documents (trust policies), + // in non-whitespace characters. + // + // AttachedPoliciesPerGroupQuota + // + // The maximum number of managed policies that can be attached to an IAM group. + // + // AttachedPoliciesPerRoleQuota + // + // The maximum number of managed policies that can be attached to an IAM role. + // + // AttachedPoliciesPerUserQuota + // + // The maximum number of managed policies that can be attached to an IAM user. + // + // GroupPolicySizeQuota + // + // The maximum allowed size for the aggregate of all inline policies embedded + // in an IAM group, in non-whitespace characters. + // + // Groups + // + // The number of IAM groups in the AWS account. + // + // GroupsPerUserQuota + // + // The maximum number of IAM groups each IAM user can belong to. + // + // GroupsQuota + // + // The maximum number of IAM groups allowed in the AWS account. + // + // InstanceProfiles + // + // The number of instance profiles in the AWS account. + // + // InstanceProfilesQuota + // + // The maximum number of instance profiles allowed in the AWS account. + // + // MFADevices + // + // The number of MFA devices in the AWS account, including those assigned and + // unassigned. + // + // MFADevicesInUse + // + // The number of MFA devices that have been assigned to an IAM user or to the + // AWS account (root). + // + // Policies + // + // The number of customer managed policies in the AWS account. + // + // PoliciesQuota + // + // The maximum number of customer managed policies allowed in the AWS account. + // + // PolicySizeQuota + // + // The maximum allowed size of a customer managed policy, in non-whitespace + // characters. + // + // PolicyVersionsInUse + // + // The number of managed policies that are attached to IAM users, groups, or + // roles in the AWS account. + // + // PolicyVersionsInUseQuota + // + // The maximum number of managed policies that can be attached to IAM users, + // groups, or roles in the AWS account. + // + // Providers + // + // The number of identity providers in the AWS account. + // + // RolePolicySizeQuota + // + // The maximum allowed size for the aggregate of all inline policies (access + // policies, not the trust policy) embedded in an IAM role, in non-whitespace + // characters. + // + // Roles + // + // The number of IAM roles in the AWS account. + // + // RolesQuota + // + // The maximum number of IAM roles allowed in the AWS account. + // + // ServerCertificates + // + // The number of server certificates in the AWS account. + // + // ServerCertificatesQuota + // + // The maximum number of server certificates allowed in the AWS account. + // + // SigningCertificatesPerUserQuota + // + // The maximum number of X.509 signing certificates allowed for each IAM user. + // + // UserPolicySizeQuota + // + // The maximum allowed size for the aggregate of all inline policies embedded + // in an IAM user, in non-whitespace characters. + // + // Users + // + // The number of IAM users in the AWS account. + // + // UsersQuota + // + // The maximum number of IAM users allowed in the AWS account. + // + // VersionsPerPolicyQuota + // + // The maximum number of policy versions allowed for each managed policy. + SummaryMap map[string]*int64 `type:"map"` +} + +// String returns the string representation +func (s GetAccountSummaryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountSummaryOutput) GoString() string { + return s.String() +} + +type GetContextKeysForCustomPolicyInput struct { + _ struct{} `type:"structure"` + + // A list of policies for which you want list of context keys used in Condition + // elements. Each document is specified as a string containing the complete, + // valid JSON text of an IAM policy. + PolicyInputList []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetContextKeysForCustomPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetContextKeysForCustomPolicyInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetContextKeysForPrincipalPolicy or +// GetContextKeysForCustomPolicy request. +type GetContextKeysForPolicyResponse struct { + _ struct{} `type:"structure"` + + // The list of context keys that are used in the Condition elements of the input + // policies. + ContextKeyNames []*string `type:"list"` +} + +// String returns the string representation +func (s GetContextKeysForPolicyResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetContextKeysForPolicyResponse) GoString() string { + return s.String() +} + +type GetContextKeysForPrincipalPolicyInput struct { + _ struct{} `type:"structure"` + + // A optional list of additional policies for which you want list of context + // keys used in Condition elements. + PolicyInputList []*string `type:"list"` + + // The ARN of a user, group, or role whose policies contain the context keys + // that you want listed. If you specify a user, the list includes context keys + // that are found in all policies attached to the user as well as to all groups + // that the user is a member of. If you pick a group or a role, then it includes + // only those context keys that are found in policies attached to that entity. + // Note that all parameters are shown in unencoded form here for clarity, but + // must be URL encoded to be included as a part of a real HTML request. + PolicySourceArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetContextKeysForPrincipalPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetContextKeysForPrincipalPolicyInput) GoString() string { + return s.String() +} + +type GetCredentialReportInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetCredentialReportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCredentialReportInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetCredentialReport request. +type GetCredentialReportOutput struct { + _ struct{} `type:"structure"` + + // Contains the credential report. The report is Base64-encoded. + Content []byte `type:"blob"` + + // The date and time when the credential report was created, in ISO 8601 date-time + // format (http://www.iso.org/iso/iso8601). + GeneratedTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The format (MIME type) of the credential report. + ReportFormat *string `type:"string" enum:"ReportFormatType"` +} + +// String returns the string representation +func (s GetCredentialReportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCredentialReportOutput) GoString() string { + return s.String() +} + +type GetGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + GroupName *string `min:"1" type:"string" required:"true"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s GetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGroupInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetGroup request. +type GetGroupOutput struct { + _ struct{} `type:"structure"` + + // Information about the group. + Group *Group `type:"structure" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of users in the group. + Users []*User `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGroupOutput) GoString() string { + return s.String() +} + +type GetGroupPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the group the policy is associated with. + GroupName *string `min:"1" type:"string" required:"true"` + + // The name of the policy document to get. + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetGroupPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGroupPolicyInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetGroupPolicy request. +type GetGroupPolicyOutput struct { + _ struct{} `type:"structure"` + + // The group the policy is associated with. + GroupName *string `min:"1" type:"string" required:"true"` + + // The policy document. + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy. + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetGroupPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGroupPolicyOutput) GoString() string { + return s.String() +} + +type GetInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the instance profile to get information about. + InstanceProfileName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetInstanceProfileInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetInstanceProfile request. +type GetInstanceProfileOutput struct { + _ struct{} `type:"structure"` + + // Information about the instance profile. + InstanceProfile *InstanceProfile `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetInstanceProfileOutput) GoString() string { + return s.String() +} + +type GetLoginProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the user whose login profile you want to retrieve. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetLoginProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLoginProfileInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetLoginProfile request. +type GetLoginProfileOutput struct { + _ struct{} `type:"structure"` + + // The user name and password create date for the user. + LoginProfile *LoginProfile `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetLoginProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLoginProfileOutput) GoString() string { + return s.String() +} + +type GetOpenIDConnectProviderInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM OpenID Connect (OIDC) provider + // to get information for. You can get a list of OIDC provider ARNs by using + // the ListOpenIDConnectProviders action. + OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetOpenIDConnectProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOpenIDConnectProviderInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetOpenIDConnectProvider request. +type GetOpenIDConnectProviderOutput struct { + _ struct{} `type:"structure"` + + // A list of client IDs (also known as audiences) that are associated with the + // specified IAM OpenID Connect provider. For more information, see CreateOpenIDConnectProvider. + ClientIDList []*string `type:"list"` + + // The date and time when the IAM OpenID Connect provider entity was created + // in the AWS account. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A list of certificate thumbprints that are associated with the specified + // IAM OpenID Connect provider. For more information, see CreateOpenIDConnectProvider. + ThumbprintList []*string `type:"list"` + + // The URL that the IAM OpenID Connect provider is associated with. For more + // information, see CreateOpenIDConnectProvider. + Url *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetOpenIDConnectProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOpenIDConnectProviderOutput) GoString() string { + return s.String() +} + +type GetPolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetPolicy request. +type GetPolicyOutput struct { + _ struct{} `type:"structure"` + + // Information about the policy. + Policy *Policy `type:"structure"` +} + +// String returns the string representation +func (s GetPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyOutput) GoString() string { + return s.String() +} + +type GetPolicyVersionInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` + + // Identifies the policy version to retrieve. + VersionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyVersionInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetPolicyVersion request. +type GetPolicyVersionOutput struct { + _ struct{} `type:"structure"` + + // Information about the policy version. + // + // For more information about managed policy versions, see Versioning for Managed + // Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) + // in the IAM User Guide. + PolicyVersion *PolicyVersion `type:"structure"` +} + +// String returns the string representation +func (s GetPolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyVersionOutput) GoString() string { + return s.String() +} + +type GetRoleInput struct { + _ struct{} `type:"structure"` + + // The name of the role to get information about. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRoleInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetRole request. +type GetRoleOutput struct { + _ struct{} `type:"structure"` + + // Information about the role. + Role *Role `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRoleOutput) GoString() string { + return s.String() +} + +type GetRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the policy document to get. + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name of the role associated with the policy. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRolePolicyInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetRolePolicy request. +type GetRolePolicyOutput struct { + _ struct{} `type:"structure"` + + // The policy document. + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy. + PolicyName *string `min:"1" type:"string" required:"true"` + + // The role the policy is associated with. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRolePolicyOutput) GoString() string { + return s.String() +} + +type GetSAMLProviderInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the SAML provider to get information about. + SAMLProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetSAMLProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSAMLProviderInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetSAMLProvider request. +type GetSAMLProviderOutput struct { + _ struct{} `type:"structure"` + + // The date and time when the SAML provider was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The XML metadata document that includes information about an identity provider. + SAMLMetadataDocument *string `min:"1000" type:"string"` + + // The expiration date and time for the SAML provider. + ValidUntil *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s GetSAMLProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSAMLProviderOutput) GoString() string { + return s.String() +} + +type GetSSHPublicKeyInput struct { + _ struct{} `type:"structure"` + + // Specifies the public key encoding format to use in the response. To retrieve + // the public key in ssh-rsa format, use SSH. To retrieve the public key in + // PEM format, use PEM. + Encoding *string `type:"string" required:"true" enum:"encodingType"` + + // The unique identifier for the SSH public key. + SSHPublicKeyId *string `min:"20" type:"string" required:"true"` + + // The name of the IAM user associated with the SSH public key. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetSSHPublicKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSSHPublicKeyInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetSSHPublicKey request. +type GetSSHPublicKeyOutput struct { + _ struct{} `type:"structure"` + + // Information about the SSH public key. + SSHPublicKey *SSHPublicKey `type:"structure"` +} + +// String returns the string representation +func (s GetSSHPublicKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSSHPublicKeyOutput) GoString() string { + return s.String() +} + +type GetServerCertificateInput struct { + _ struct{} `type:"structure"` + + // The name of the server certificate you want to retrieve information about. + ServerCertificateName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetServerCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetServerCertificateInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetServerCertificate request. +type GetServerCertificateOutput struct { + _ struct{} `type:"structure"` + + // Information about the server certificate. + ServerCertificate *ServerCertificate `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetServerCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetServerCertificateOutput) GoString() string { + return s.String() +} + +type GetUserInput struct { + _ struct{} `type:"structure"` + + // The name of the user to get information about. + // + // This parameter is optional. If it is not included, it defaults to the user + // making the request. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetUserInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetUser request. +type GetUserOutput struct { + _ struct{} `type:"structure"` + + // Information about the user. + User *User `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetUserOutput) GoString() string { + return s.String() +} + +type GetUserPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the policy document to get. + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name of the user who the policy is associated with. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetUserPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetUserPolicyInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetUserPolicy request. +type GetUserPolicyOutput struct { + _ struct{} `type:"structure"` + + // The policy document. + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy. + PolicyName *string `min:"1" type:"string" required:"true"` + + // The user the policy is associated with. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetUserPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetUserPolicyOutput) GoString() string { + return s.String() +} + +// Contains information about an IAM group entity. +// +// This data type is used as a response element in the following actions: +// +// CreateGroup GetGroup ListGroups +type Group struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) specifying the group. For more information + // about ARNs and how to use them in policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Arn *string `min:"20" type:"string" required:"true"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the group was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The stable and unique string identifying the group. For more information + // about IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + GroupId *string `min:"16" type:"string" required:"true"` + + // The friendly name that identifies the group. + GroupName *string `min:"1" type:"string" required:"true"` + + // The path to the group. For more information about paths, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Group) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Group) GoString() string { + return s.String() +} + +// Contains information about an IAM group, including all of the group's policies. +// +// This data type is used as a response element in the GetAccountAuthorizationDetails +// action. +type GroupDetail struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` + + // A list of the managed policies attached to the group. + AttachedManagedPolicies []*AttachedPolicy `type:"list"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the group was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The stable and unique string identifying the group. For more information + // about IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + GroupId *string `min:"16" type:"string"` + + // The friendly name that identifies the group. + GroupName *string `min:"1" type:"string"` + + // A list of the inline policies embedded in the group. + GroupPolicyList []*PolicyDetail `type:"list"` + + // The path to the group. For more information about paths, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GroupDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GroupDetail) GoString() string { + return s.String() +} + +// Contains information about an instance profile. +// +// This data type is used as a response element in the following actions: +// +// CreateInstanceProfile +// +// GetInstanceProfile +// +// ListInstanceProfiles +// +// ListInstanceProfilesForRole +type InstanceProfile struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) specifying the instance profile. For more + // information about ARNs and how to use them in policies, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Arn *string `min:"20" type:"string" required:"true"` + + // The date when the instance profile was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The stable and unique string identifying the instance profile. For more information + // about IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + InstanceProfileId *string `min:"16" type:"string" required:"true"` + + // The name identifying the instance profile. + InstanceProfileName *string `min:"1" type:"string" required:"true"` + + // The path to the instance profile. For more information about paths, see IAM + // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string" required:"true"` + + // The role associated with the instance profile. + Roles []*Role `type:"list" required:"true"` +} + +// String returns the string representation +func (s InstanceProfile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceProfile) GoString() string { + return s.String() +} + +type ListAccessKeysInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the user. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAccessKeysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAccessKeysInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListAccessKeys request. +type ListAccessKeysOutput struct { + _ struct{} `type:"structure"` + + // A list of access key metadata. + AccessKeyMetadata []*AccessKeyMetadata `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAccessKeysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAccessKeysOutput) GoString() string { + return s.String() +} + +type ListAccountAliasesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListAccountAliasesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAccountAliasesInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListAccountAliases request. +type ListAccountAliasesOutput struct { + _ struct{} `type:"structure"` + + // A list of aliases associated with the account. AWS supports only one alias + // per account. + AccountAliases []*string `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAccountAliasesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAccountAliasesOutput) GoString() string { + return s.String() +} + +type ListAttachedGroupPoliciesInput struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) of the group to list attached policies + // for. + GroupName *string `min:"1" type:"string" required:"true"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. This parameter is optional. If + // it is not included, it defaults to a slash (/), listing all policies. + PathPrefix *string `type:"string"` +} + +// String returns the string representation +func (s ListAttachedGroupPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttachedGroupPoliciesInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListAttachedGroupPolicies request. +type ListAttachedGroupPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A list of the attached policies. + AttachedPolicies []*AttachedPolicy `type:"list"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAttachedGroupPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttachedGroupPoliciesOutput) GoString() string { + return s.String() +} + +type ListAttachedRolePoliciesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. This parameter is optional. If + // it is not included, it defaults to a slash (/), listing all policies. + PathPrefix *string `type:"string"` + + // The name (friendly name, not ARN) of the role to list attached policies for. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListAttachedRolePoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttachedRolePoliciesInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListAttachedRolePolicies request. +type ListAttachedRolePoliciesOutput struct { + _ struct{} `type:"structure"` + + // A list of the attached policies. + AttachedPolicies []*AttachedPolicy `type:"list"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAttachedRolePoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttachedRolePoliciesOutput) GoString() string { + return s.String() +} + +type ListAttachedUserPoliciesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. This parameter is optional. If + // it is not included, it defaults to a slash (/), listing all policies. + PathPrefix *string `type:"string"` + + // The name (friendly name, not ARN) of the user to list attached policies for. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListAttachedUserPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttachedUserPoliciesInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListAttachedUserPolicies request. +type ListAttachedUserPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A list of the attached policies. + AttachedPolicies []*AttachedPolicy `type:"list"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAttachedUserPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttachedUserPoliciesOutput) GoString() string { + return s.String() +} + +type ListEntitiesForPolicyInput struct { + _ struct{} `type:"structure"` + + // The entity type to use for filtering the results. + // + // For example, when EntityFilter is Role, only the roles that are attached + // to the specified policy are returned. This parameter is optional. If it is + // not included, all attached entities (users, groups, and roles) are returned. + EntityFilter *string `type:"string" enum:"EntityType"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. This parameter is optional. If + // it is not included, it defaults to a slash (/), listing all entities. + PathPrefix *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListEntitiesForPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEntitiesForPolicyInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListEntitiesForPolicy request. +type ListEntitiesForPolicyOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of groups that the policy is attached to. + PolicyGroups []*PolicyGroup `type:"list"` + + // A list of roles that the policy is attached to. + PolicyRoles []*PolicyRole `type:"list"` + + // A list of users that the policy is attached to. + PolicyUsers []*PolicyUser `type:"list"` +} + +// String returns the string representation +func (s ListEntitiesForPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEntitiesForPolicyOutput) GoString() string { + return s.String() +} + +type ListGroupPoliciesInput struct { + _ struct{} `type:"structure"` + + // The name of the group to list policies for. + GroupName *string `min:"1" type:"string" required:"true"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListGroupPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGroupPoliciesInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListGroupPolicies request. +type ListGroupPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of policy names. + PolicyNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListGroupPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGroupPoliciesOutput) GoString() string { + return s.String() +} + +type ListGroupsForUserInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the user to list groups for. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListGroupsForUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGroupsForUserInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListGroupsForUser request. +type ListGroupsForUserOutput struct { + _ struct{} `type:"structure"` + + // A list of groups. + Groups []*Group `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListGroupsForUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGroupsForUserOutput) GoString() string { + return s.String() +} + +type ListGroupsInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. For example, the prefix /division_abc/subdivision_xyz/ + // gets all groups whose path starts with /division_abc/subdivision_xyz/. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all groups. + PathPrefix *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGroupsInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListGroups request. +type ListGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of groups. + Groups []*Group `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGroupsOutput) GoString() string { + return s.String() +} + +type ListInstanceProfilesForRoleInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the role to list instance profiles for. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListInstanceProfilesForRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInstanceProfilesForRoleInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListInstanceProfilesForRole request. +type ListInstanceProfilesForRoleOutput struct { + _ struct{} `type:"structure"` + + // A list of instance profiles. + InstanceProfiles []*InstanceProfile `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListInstanceProfilesForRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInstanceProfilesForRoleOutput) GoString() string { + return s.String() +} + +type ListInstanceProfilesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. For example, the prefix /application_abc/component_xyz/ + // gets all instance profiles whose path starts with /application_abc/component_xyz/. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all instance profiles. + PathPrefix *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListInstanceProfilesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInstanceProfilesInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListInstanceProfiles request. +type ListInstanceProfilesOutput struct { + _ struct{} `type:"structure"` + + // A list of instance profiles. + InstanceProfiles []*InstanceProfile `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListInstanceProfilesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInstanceProfilesOutput) GoString() string { + return s.String() +} + +type ListMFADevicesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the user whose MFA devices you want to list. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListMFADevicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMFADevicesInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListMFADevices request. +type ListMFADevicesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // A list of MFA devices. + MFADevices []*MFADevice `type:"list" required:"true"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListMFADevicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMFADevicesOutput) GoString() string { + return s.String() +} + +type ListOpenIDConnectProvidersInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListOpenIDConnectProvidersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListOpenIDConnectProvidersInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListOpenIDConnectProviders request. +type ListOpenIDConnectProvidersOutput struct { + _ struct{} `type:"structure"` + + // The list of IAM OpenID Connect providers in the AWS account. + OpenIDConnectProviderList []*OpenIDConnectProviderListEntry `type:"list"` +} + +// String returns the string representation +func (s ListOpenIDConnectProvidersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListOpenIDConnectProvidersOutput) GoString() string { + return s.String() +} + +type ListPoliciesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // A flag to filter the results to only the attached policies. + // + // When OnlyAttached is true, the returned list contains only the policies + // that are attached to a user, group, or role. When OnlyAttached is false, + // or when the parameter is not included, all policies are returned. + OnlyAttached *bool `type:"boolean"` + + // The path prefix for filtering the results. This parameter is optional. If + // it is not included, it defaults to a slash (/), listing all policies. + PathPrefix *string `type:"string"` + + // The scope to use for filtering the results. + // + // To list only AWS managed policies, set Scope to AWS. To list only the customer + // managed policies in your AWS account, set Scope to Local. + // + // This parameter is optional. If it is not included, or if it is set to All, + // all policies are returned. + Scope *string `type:"string" enum:"policyScopeType"` +} + +// String returns the string representation +func (s ListPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPoliciesInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListPolicies request. +type ListPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of policies. + Policies []*Policy `type:"list"` +} + +// String returns the string representation +func (s ListPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPoliciesOutput) GoString() string { + return s.String() +} + +type ListPolicyVersionsInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPolicyVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPolicyVersionsInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListPolicyVersions request. +type ListPolicyVersionsOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of policy versions. + // + // For more information about managed policy versions, see Versioning for Managed + // Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) + // in the IAM User Guide. + Versions []*PolicyVersion `type:"list"` +} + +// String returns the string representation +func (s ListPolicyVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPolicyVersionsOutput) GoString() string { + return s.String() +} + +type ListRolePoliciesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the role to list policies for. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListRolePoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRolePoliciesInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListRolePolicies request. +type ListRolePoliciesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of policy names. + PolicyNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListRolePoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRolePoliciesOutput) GoString() string { + return s.String() +} + +type ListRolesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. For example, the prefix /application_abc/component_xyz/ + // gets all roles whose path starts with /application_abc/component_xyz/. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all roles. + PathPrefix *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListRolesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRolesInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListRoles request. +type ListRolesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of roles. + Roles []*Role `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListRolesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRolesOutput) GoString() string { + return s.String() +} + +type ListSAMLProvidersInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListSAMLProvidersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSAMLProvidersInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListSAMLProviders request. +type ListSAMLProvidersOutput struct { + _ struct{} `type:"structure"` + + // The list of SAML providers for this account. + SAMLProviderList []*SAMLProviderListEntry `type:"list"` +} + +// String returns the string representation +func (s ListSAMLProvidersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSAMLProvidersOutput) GoString() string { + return s.String() +} + +type ListSSHPublicKeysInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the IAM user to list SSH public keys for. If none is specified, + // the UserName field is determined implicitly based on the AWS access key used + // to sign the request. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListSSHPublicKeysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSSHPublicKeysInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListSSHPublicKeys request. +type ListSSHPublicKeysOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of SSH public keys. + SSHPublicKeys []*SSHPublicKeyMetadata `type:"list"` +} + +// String returns the string representation +func (s ListSSHPublicKeysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSSHPublicKeysOutput) GoString() string { + return s.String() +} + +type ListServerCertificatesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. For example: /company/servercerts + // would get all server certificates for which the path starts with /company/servercerts. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all server certificates. + PathPrefix *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListServerCertificatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListServerCertificatesInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListServerCertificates request. +type ListServerCertificatesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of server certificates. + ServerCertificateMetadataList []*ServerCertificateMetadata `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListServerCertificatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListServerCertificatesOutput) GoString() string { + return s.String() +} + +type ListSigningCertificatesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the user. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListSigningCertificatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSigningCertificatesInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListSigningCertificates request. +type ListSigningCertificatesOutput struct { + _ struct{} `type:"structure"` + + // A list of the user's signing certificate information. + Certificates []*SigningCertificate `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListSigningCertificatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSigningCertificatesOutput) GoString() string { + return s.String() +} + +type ListUserPoliciesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the user to list policies for. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListUserPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUserPoliciesInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListUserPolicies request. +type ListUserPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of policy names. + PolicyNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListUserPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUserPoliciesOutput) GoString() string { + return s.String() +} + +type ListUsersInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. For example: /division_abc/subdivision_xyz/, + // which would get all user names whose path starts with /division_abc/subdivision_xyz/. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all user names. + PathPrefix *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListUsersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUsersInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListUsers request. +type ListUsersOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // A list of users. + Users []*User `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListUsersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUsersOutput) GoString() string { + return s.String() +} + +type ListVirtualMFADevicesInput struct { + _ struct{} `type:"structure"` + + // The status (unassigned or assigned) of the devices to list. If you do not + // specify an AssignmentStatus, the action defaults to Any which lists both + // assigned and unassigned virtual MFA devices. + AssignmentStatus *string `type:"string" enum:"assignmentStatusType"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListVirtualMFADevicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVirtualMFADevicesInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListVirtualMFADevices request. +type ListVirtualMFADevicesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // The list of virtual MFA devices in the current account that match the AssignmentStatus + // value that was passed in the request. + VirtualMFADevices []*VirtualMFADevice `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListVirtualMFADevicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVirtualMFADevicesOutput) GoString() string { + return s.String() +} + +// Contains the user name and password create date for a user. +// +// This data type is used as a response element in the CreateLoginProfile +// and GetLoginProfile actions. +type LoginProfile struct { + _ struct{} `type:"structure"` + + // The date when the password for the user was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // Specifies whether the user is required to set a new password on next sign-in. + PasswordResetRequired *bool `type:"boolean"` + + // The name of the user, which can be used for signing in to the AWS Management + // Console. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s LoginProfile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoginProfile) GoString() string { + return s.String() +} + +// Contains information about an MFA device. +// +// This data type is used as a response element in the ListMFADevices action. +type MFADevice struct { + _ struct{} `type:"structure"` + + // The date when the MFA device was enabled for the user. + EnableDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The serial number that uniquely identifies the MFA device. For virtual MFA + // devices, the serial number is the device ARN. + SerialNumber *string `min:"9" type:"string" required:"true"` + + // The user with whom the MFA device is associated. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s MFADevice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MFADevice) GoString() string { + return s.String() +} + +// Contains information about a managed policy, including the policy's ARN, +// versions, and the number of principal entities (users, groups, and roles) +// that the policy is attached to. +// +// This data type is used as a response element in the GetAccountAuthorizationDetails +// action. +// +// For more information about managed policies, see Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +type ManagedPolicyDetail struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` + + // The number of principal entities (users, groups, and roles) that the policy + // is attached to. + AttachmentCount *int64 `type:"integer"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the policy was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The identifier for the version of the policy that is set as the default (operative) + // version. + // + // For more information about policy versions, see Versioning for Managed Policies + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) + // in the Using IAM guide. + DefaultVersionId *string `type:"string"` + + // A friendly description of the policy. + Description *string `type:"string"` + + // Specifies whether the policy can be attached to an IAM user, group, or role. + IsAttachable *bool `type:"boolean"` + + // The path to the policy. + // + // For more information about paths, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `type:"string"` + + // The stable and unique string identifying the policy. + // + // For more information about IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + PolicyId *string `min:"16" type:"string"` + + // The friendly name (not ARN) identifying the policy. + PolicyName *string `min:"1" type:"string"` + + // A list containing information about the versions of the policy. + PolicyVersionList []*PolicyVersion `type:"list"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the policy was last updated. + // + // When a policy has only one version, this field contains the date and time + // when the policy was created. When a policy has more than one version, this + // field contains the date and time when the most recent policy version was + // created. + UpdateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ManagedPolicyDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ManagedPolicyDetail) GoString() string { + return s.String() +} + +// Contains the Amazon Resource Name (ARN) for an IAM OpenID Connect provider. +type OpenIDConnectProviderListEntry struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s OpenIDConnectProviderListEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OpenIDConnectProviderListEntry) GoString() string { + return s.String() +} + +// Contains information about the account password policy. +// +// This data type is used as a response element in the GetAccountPasswordPolicy +// action. +type PasswordPolicy struct { + _ struct{} `type:"structure"` + + // Specifies whether IAM users are allowed to change their own password. + AllowUsersToChangePassword *bool `type:"boolean"` + + // Indicates whether passwords in the account expire. Returns true if MaxPasswordAge + // is contains a value greater than 0. Returns false if MaxPasswordAge is 0 + // or not present. + ExpirePasswords *bool `type:"boolean"` + + // Specifies whether IAM users are prevented from setting a new password after + // their password has expired. + HardExpiry *bool `type:"boolean"` + + // The number of days that an IAM user password is valid. + MaxPasswordAge *int64 `min:"1" type:"integer"` + + // Minimum length to require for IAM user passwords. + MinimumPasswordLength *int64 `min:"6" type:"integer"` + + // Specifies the number of previous passwords that IAM users are prevented from + // reusing. + PasswordReusePrevention *int64 `min:"1" type:"integer"` + + // Specifies whether to require lowercase characters for IAM user passwords. + RequireLowercaseCharacters *bool `type:"boolean"` + + // Specifies whether to require numbers for IAM user passwords. + RequireNumbers *bool `type:"boolean"` + + // Specifies whether to require symbols for IAM user passwords. + RequireSymbols *bool `type:"boolean"` + + // Specifies whether to require uppercase characters for IAM user passwords. + RequireUppercaseCharacters *bool `type:"boolean"` +} + +// String returns the string representation +func (s PasswordPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PasswordPolicy) GoString() string { + return s.String() +} + +// Contains information about a managed policy. +// +// This data type is used as a response element in the CreatePolicy, GetPolicy, +// and ListPolicies actions. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +type Policy struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` + + // The number of entities (users, groups, and roles) that the policy is attached + // to. + AttachmentCount *int64 `type:"integer"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the policy was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The identifier for the version of the policy that is set as the default version. + DefaultVersionId *string `type:"string"` + + // A friendly description of the policy. + // + // This element is included in the response to the GetPolicy operation. It + // is not included in the response to the ListPolicies operation. + Description *string `type:"string"` + + // Specifies whether the policy can be attached to an IAM user, group, or role. + IsAttachable *bool `type:"boolean"` + + // The path to the policy. + // + // For more information about paths, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `type:"string"` + + // The stable and unique string identifying the policy. + // + // For more information about IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + PolicyId *string `min:"16" type:"string"` + + // The friendly name (not ARN) identifying the policy. + PolicyName *string `min:"1" type:"string"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the policy was last updated. + // + // When a policy has only one version, this field contains the date and time + // when the policy was created. When a policy has more than one version, this + // field contains the date and time when the most recent policy version was + // created. + UpdateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s Policy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Policy) GoString() string { + return s.String() +} + +// Contains information about an IAM policy, including the policy document. +// +// This data type is used as a response element in the GetAccountAuthorizationDetails +// action. +type PolicyDetail struct { + _ struct{} `type:"structure"` + + // The policy document. + PolicyDocument *string `min:"1" type:"string"` + + // The name of the policy. + PolicyName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PolicyDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyDetail) GoString() string { + return s.String() +} + +// Contains information about a group that a managed policy is attached to. +// +// This data type is used as a response element in the ListEntitiesForPolicy +// action. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +type PolicyGroup struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) identifying the group. + GroupName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PolicyGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyGroup) GoString() string { + return s.String() +} + +// Contains information about a role that a managed policy is attached to. +// +// This data type is used as a response element in the ListEntitiesForPolicy +// action. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +type PolicyRole struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) identifying the role. + RoleName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PolicyRole) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyRole) GoString() string { + return s.String() +} + +// Contains information about a user that a managed policy is attached to. +// +// This data type is used as a response element in the ListEntitiesForPolicy +// action. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +type PolicyUser struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) identifying the user. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PolicyUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyUser) GoString() string { + return s.String() +} + +// Contains information about a version of a managed policy. +// +// This data type is used as a response element in the CreatePolicyVersion, +// GetPolicyVersion, ListPolicyVersions, and GetAccountAuthorizationDetails +// actions. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +type PolicyVersion struct { + _ struct{} `type:"structure"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the policy version was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The policy document. + // + // The policy document is returned in the response to the GetPolicyVersion + // and GetAccountAuthorizationDetails operations. It is not returned in the + // response to the CreatePolicyVersion or ListPolicyVersions operations. + Document *string `min:"1" type:"string"` + + // Specifies whether the policy version is set as the policy's default version. + IsDefaultVersion *bool `type:"boolean"` + + // The identifier for the policy version. + // + // Policy version identifiers always begin with v (always lowercase). When + // a policy is created, the first policy version is v1. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s PolicyVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyVersion) GoString() string { + return s.String() +} + +// Contains the row and column of a location of a Statement element in a policy +// document. +// +// This data type is used as a member of the Statement type. +type Position struct { + _ struct{} `type:"structure"` + + // The column in the line containing the specified position in the document. + Column *int64 `type:"integer"` + + // The line containing the specified position in the document. + Line *int64 `type:"integer"` +} + +// String returns the string representation +func (s Position) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Position) GoString() string { + return s.String() +} + +type PutGroupPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the group to associate the policy with. + GroupName *string `min:"1" type:"string" required:"true"` + + // The policy document. + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy document. + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutGroupPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutGroupPolicyInput) GoString() string { + return s.String() +} + +type PutGroupPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutGroupPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutGroupPolicyOutput) GoString() string { + return s.String() +} + +type PutRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The policy document. + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy document. + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name of the role to associate the policy with. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRolePolicyInput) GoString() string { + return s.String() +} + +type PutRolePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRolePolicyOutput) GoString() string { + return s.String() +} + +type PutUserPolicyInput struct { + _ struct{} `type:"structure"` + + // The policy document. + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy document. + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name of the user to associate the policy with. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutUserPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutUserPolicyInput) GoString() string { + return s.String() +} + +type PutUserPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutUserPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutUserPolicyOutput) GoString() string { + return s.String() +} + +type RemoveClientIDFromOpenIDConnectProviderInput struct { + _ struct{} `type:"structure"` + + // The client ID (also known as audience) to remove from the IAM OpenID Connect + // provider. For more information about client IDs, see CreateOpenIDConnectProvider. + ClientID *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM OpenID Connect (OIDC) provider + // to remove the client ID from. You can get a list of OIDC provider ARNs by + // using the ListOpenIDConnectProviders action. + OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s RemoveClientIDFromOpenIDConnectProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveClientIDFromOpenIDConnectProviderInput) GoString() string { + return s.String() +} + +type RemoveClientIDFromOpenIDConnectProviderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveClientIDFromOpenIDConnectProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveClientIDFromOpenIDConnectProviderOutput) GoString() string { + return s.String() +} + +type RemoveRoleFromInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the instance profile to update. + InstanceProfileName *string `min:"1" type:"string" required:"true"` + + // The name of the role to remove. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RemoveRoleFromInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveRoleFromInstanceProfileInput) GoString() string { + return s.String() +} + +type RemoveRoleFromInstanceProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveRoleFromInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveRoleFromInstanceProfileOutput) GoString() string { + return s.String() +} + +type RemoveUserFromGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the group to update. + GroupName *string `min:"1" type:"string" required:"true"` + + // The name of the user to remove. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RemoveUserFromGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveUserFromGroupInput) GoString() string { + return s.String() +} + +type RemoveUserFromGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveUserFromGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveUserFromGroupOutput) GoString() string { + return s.String() +} + +// Contains the result of the simulation of a single API action call on a single +// resource. +// +// This data type is used by a member of the EvaluationResult data type. +type ResourceSpecificResult struct { + _ struct{} `type:"structure"` + + // Additional details about the results of the evaluation decision. When there + // are both IAM policies and resource policies, this parameter explains how + // each set of policies contributes to the final evaluation decision. When simulating + // cross-account access to a resource, both the resource-based policy and the + // caller's IAM policy must grant access. + EvalDecisionDetails map[string]*string `type:"map"` + + // The result of the simulation of the simulated API action on the resource + // specified in EvalResourceName. + EvalResourceDecision *string `type:"string" required:"true" enum:"PolicyEvaluationDecisionType"` + + // The name of the simulated resource, in Amazon Resource Name (ARN) format. + EvalResourceName *string `min:"1" type:"string" required:"true"` + + // A list of the statements in the input policies that determine the result + // for this part of the simulation. Remember that even if multiple statements + // allow the action on the resource, if any statement denies that action, then + // the explicit deny overrides any allow, and the deny statement is the only + // entry included in the result. + MatchedStatements []*Statement `type:"list"` + + // A list of context keys that are required by the included input policies but + // that were not provided by one of the input parameters. To discover the context + // keys used by a set of policies, you can call GetContextKeysForCustomPolicy + // or GetContextKeysForPrincipalPolicy. + MissingContextValues []*string `type:"list"` +} + +// String returns the string representation +func (s ResourceSpecificResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceSpecificResult) GoString() string { + return s.String() +} + +type ResyncMFADeviceInput struct { + _ struct{} `type:"structure"` + + // An authentication code emitted by the device. + AuthenticationCode1 *string `min:"6" type:"string" required:"true"` + + // A subsequent authentication code emitted by the device. + AuthenticationCode2 *string `min:"6" type:"string" required:"true"` + + // Serial number that uniquely identifies the MFA device. + SerialNumber *string `min:"9" type:"string" required:"true"` + + // The name of the user whose MFA device you want to resynchronize. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ResyncMFADeviceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResyncMFADeviceInput) GoString() string { + return s.String() +} + +type ResyncMFADeviceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ResyncMFADeviceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResyncMFADeviceOutput) GoString() string { + return s.String() +} + +// Contains information about an IAM role. +// +// This data type is used as a response element in the following actions: +// +// CreateRole +// +// GetRole +// +// ListRoles +type Role struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) specifying the role. For more information + // about ARNs and how to use them in policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Arn *string `min:"20" type:"string" required:"true"` + + // The policy that grants an entity permission to assume the role. + AssumeRolePolicyDocument *string `min:"1" type:"string"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the role was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The path to the role. For more information about paths, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string" required:"true"` + + // The stable and unique string identifying the role. For more information about + // IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + RoleId *string `min:"16" type:"string" required:"true"` + + // The friendly name that identifies the role. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Role) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Role) GoString() string { + return s.String() +} + +// Contains information about an IAM role, including all of the role's policies. +// +// This data type is used as a response element in the GetAccountAuthorizationDetails +// action. +type RoleDetail struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` + + // The trust policy that grants permission to assume the role. + AssumeRolePolicyDocument *string `min:"1" type:"string"` + + // A list of managed policies attached to the role. These policies are the role's + // access (permissions) policies. + AttachedManagedPolicies []*AttachedPolicy `type:"list"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the role was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Contains a list of instance profiles. + InstanceProfileList []*InstanceProfile `type:"list"` + + // The path to the role. For more information about paths, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string"` + + // The stable and unique string identifying the role. For more information about + // IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + RoleId *string `min:"16" type:"string"` + + // The friendly name that identifies the role. + RoleName *string `min:"1" type:"string"` + + // A list of inline policies embedded in the role. These policies are the role's + // access (permissions) policies. + RolePolicyList []*PolicyDetail `type:"list"` +} + +// String returns the string representation +func (s RoleDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoleDetail) GoString() string { + return s.String() +} + +// Contains the list of SAML providers for this account. +type SAMLProviderListEntry struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the SAML provider. + Arn *string `min:"20" type:"string"` + + // The date and time when the SAML provider was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The expiration date and time for the SAML provider. + ValidUntil *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s SAMLProviderListEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SAMLProviderListEntry) GoString() string { + return s.String() +} + +// Contains information about an SSH public key. +// +// This data type is used as a response element in the GetSSHPublicKey and +// UploadSSHPublicKey actions. +type SSHPublicKey struct { + _ struct{} `type:"structure"` + + // The MD5 message digest of the SSH public key. + Fingerprint *string `min:"48" type:"string" required:"true"` + + // The SSH public key. + SSHPublicKeyBody *string `min:"1" type:"string" required:"true"` + + // The unique identifier for the SSH public key. + SSHPublicKeyId *string `min:"20" type:"string" required:"true"` + + // The status of the SSH public key. Active means the key can be used for authentication + // with an AWS CodeCommit repository. Inactive means the key cannot be used. + Status *string `type:"string" required:"true" enum:"statusType"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the SSH public key was uploaded. + UploadDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The name of the IAM user associated with the SSH public key. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SSHPublicKey) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SSHPublicKey) GoString() string { + return s.String() +} + +// Contains information about an SSH public key, without the key's body or fingerprint. +// +// This data type is used as a response element in the ListSSHPublicKeys action. +type SSHPublicKeyMetadata struct { + _ struct{} `type:"structure"` + + // The unique identifier for the SSH public key. + SSHPublicKeyId *string `min:"20" type:"string" required:"true"` + + // The status of the SSH public key. Active means the key can be used for authentication + // with an AWS CodeCommit repository. Inactive means the key cannot be used. + Status *string `type:"string" required:"true" enum:"statusType"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the SSH public key was uploaded. + UploadDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The name of the IAM user associated with the SSH public key. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SSHPublicKeyMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SSHPublicKeyMetadata) GoString() string { + return s.String() +} + +// Contains information about a server certificate. +// +// This data type is used as a response element in the GetServerCertificate +// action. +type ServerCertificate struct { + _ struct{} `type:"structure"` + + // The contents of the public key certificate. + CertificateBody *string `min:"1" type:"string" required:"true"` + + // The contents of the public key certificate chain. + CertificateChain *string `min:"1" type:"string"` + + // The meta information of the server certificate, such as its name, path, ID, + // and ARN. + ServerCertificateMetadata *ServerCertificateMetadata `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ServerCertificate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerCertificate) GoString() string { + return s.String() +} + +// Contains information about a server certificate without its certificate body, +// certificate chain, and private key. +// +// This data type is used as a response element in the UploadServerCertificate +// and ListServerCertificates actions. +type ServerCertificateMetadata struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) specifying the server certificate. For more + // information about ARNs and how to use them in policies, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Arn *string `min:"20" type:"string" required:"true"` + + // The date on which the certificate is set to expire. + Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The path to the server certificate. For more information about paths, see + // IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string" required:"true"` + + // The stable and unique string identifying the server certificate. For more + // information about IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + ServerCertificateId *string `min:"16" type:"string" required:"true"` + + // The name that identifies the server certificate. + ServerCertificateName *string `min:"1" type:"string" required:"true"` + + // The date when the server certificate was uploaded. + UploadDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ServerCertificateMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerCertificateMetadata) GoString() string { + return s.String() +} + +type SetDefaultPolicyVersionInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The version of the policy to set as the default (operative) version. + // + // For more information about managed policy versions, see Versioning for Managed + // Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) + // in the IAM User Guide. + VersionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetDefaultPolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetDefaultPolicyVersionInput) GoString() string { + return s.String() +} + +type SetDefaultPolicyVersionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetDefaultPolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetDefaultPolicyVersionOutput) GoString() string { + return s.String() +} + +// Contains information about an X.509 signing certificate. +// +// This data type is used as a response element in the UploadSigningCertificate +// and ListSigningCertificates actions. +type SigningCertificate struct { + _ struct{} `type:"structure"` + + // The contents of the signing certificate. + CertificateBody *string `min:"1" type:"string" required:"true"` + + // The ID for the signing certificate. + CertificateId *string `min:"24" type:"string" required:"true"` + + // The status of the signing certificate. Active means the key is valid for + // API calls, while Inactive means it is not. + Status *string `type:"string" required:"true" enum:"statusType"` + + // The date when the signing certificate was uploaded. + UploadDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The name of the user the signing certificate is associated with. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SigningCertificate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SigningCertificate) GoString() string { + return s.String() +} + +type SimulateCustomPolicyInput struct { + _ struct{} `type:"structure"` + + // A list of names of API actions to evaluate in the simulation. Each action + // is evaluated against each resource. Each action must include the service + // identifier, such as iam:CreateUser. + ActionNames []*string `type:"list" required:"true"` + + // The ARN of the user that you want to use as the simulated caller of the APIs. + // CallerArn is required if you include a ResourcePolicy so that the policy's + // Principal element has a value to use in evaluating the policy. + // + // You can specify only the ARN of an IAM user. You cannot specify the ARN + // of an assumed role, federated user, or a service principal. + CallerArn *string `min:"1" type:"string"` + + // A list of context keys and corresponding values for the simulation to use. + // Whenever a context key is evaluated by a Condition element in one of the + // simulated IAM permission policies, the corresponding value is supplied. + ContextEntries []*ContextEntry `type:"list"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // A list of policy documents to include in the simulation. Each document is + // specified as a string containing the complete, valid JSON text of an IAM + // policy. Do not include any resource-based policies in this parameter. Any + // resource-based policy must be submitted with the ResourcePolicy parameter. + // The policies cannot be "scope-down" policies, such as you could include in + // a call to GetFederationToken (http://docs.aws.amazon.com/IAM/latest/APIReference/API_GetFederationToken.html) + // or one of the AssumeRole (http://docs.aws.amazon.com/IAM/latest/APIReference/API_AssumeRole.html) + // APIs to restrict what a user can do while using the temporary credentials. + PolicyInputList []*string `type:"list" required:"true"` + + // A list of ARNs of AWS resources to include in the simulation. If this parameter + // is not provided then the value defaults to * (all resources). Each API in + // the ActionNames parameter is evaluated for each resource in this list. The + // simulation determines the access result (allowed or denied) of each combination + // and reports it in the response. + // + // The simulation does not automatically retrieve policies for the specified + // resources. If you want to include a resource policy in the simulation, then + // you must include the policy as a string in the ResourcePolicy parameter. + // + // If you include a ResourcePolicy, then it must be applicable to all of the + // resources included in the simulation or you receive an invalid input error. + ResourceArns []*string `type:"list"` + + // Specifies the type of simulation to run. Different APIs that support resource-based + // policies require different combinations of resources. By specifying the type + // of simulation to run, you enable the policy simulator to enforce the presence + // of the required resources to ensure reliable simulation results. If your + // simulation does not match one of the following scenarios, then you can omit + // this parameter. The following list shows each of the supported scenario values + // and the resources that you must define to run the simulation. + // + // Each of the EC2 scenarios requires that you specify instance, image, and + // security-group resources. If your scenario includes an EBS volume, then you + // must specify that volume as a resource. If the EC2 scenario includes VPC, + // then you must supply the network-interface resource. If it includes an IP + // subnet, then you must specify the subnet resource. For more information on + // the EC2 scenario options, see Supported Platforms (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html) + // in the AWS EC2 User Guide. + // + // EC2-Classic-InstanceStore + // + // instance, image, security-group + // + // EC2-Classic-EBS + // + // instance, image, security-group, volume + // + // EC2-VPC-InstanceStore + // + // instance, image, security-group, network-interface + // + // EC2-VPC-InstanceStore-Subnet + // + // instance, image, security-group, network-interface, subnet + // + // EC2-VPC-EBS + // + // instance, image, security-group, network-interface, volume + // + // EC2-VPC-EBS-Subnet + // + // instance, image, security-group, network-interface, subnet, volume + ResourceHandlingOption *string `min:"1" type:"string"` + + // An AWS account ID that specifies the owner of any simulated resource that + // does not identify its owner in the resource ARN, such as an S3 bucket or + // object. If ResourceOwner is specified, it is also used as the account owner + // of any ResourcePolicy included in the simulation. If the ResourceOwner parameter + // is not specified, then the owner of the resources and the resource policy + // defaults to the account of the identity provided in CallerArn. This parameter + // is required only if you specify a resource-based policy and account that + // owns the resource is different from the account that owns the simulated calling + // user CallerArn. + ResourceOwner *string `min:"1" type:"string"` + + // A resource-based policy to include in the simulation provided as a string. + // Each resource in the simulation is treated as if it had this policy attached. + // You can include only one resource-based policy in a simulation. + ResourcePolicy *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SimulateCustomPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SimulateCustomPolicyInput) GoString() string { + return s.String() +} + +// Contains the response to a successful SimulatePrincipalPolicy or SimulateCustomPolicy +// request. +type SimulatePolicyResponse struct { + _ struct{} `type:"structure"` + + // The results of the simulation. + EvaluationResults []*EvaluationResult `type:"list"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all of your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SimulatePolicyResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SimulatePolicyResponse) GoString() string { + return s.String() +} + +type SimulatePrincipalPolicyInput struct { + _ struct{} `type:"structure"` + + // A list of names of API actions to evaluate in the simulation. Each action + // is evaluated for each resource. Each action must include the service identifier, + // such as iam:CreateUser. + ActionNames []*string `type:"list" required:"true"` + + // The ARN of the user that you want to specify as the simulated caller of the + // APIs. If you do not specify a CallerArn, it defaults to the ARN of the user + // that you specify in PolicySourceArn, if you specified a user. If you include + // both a PolicySourceArn (for example, arn:aws:iam::123456789012:user/David) + // and a CallerArn (for example, arn:aws:iam::123456789012:user/Bob), the result + // is that you simulate calling the APIs as Bob, as if Bob had David's policies. + // + // You can specify only the ARN of an IAM user. You cannot specify the ARN + // of an assumed role, federated user, or a service principal. + // + // CallerArn is required if you include a ResourcePolicy and the PolicySourceArn + // is not the ARN for an IAM user. This is required so that the resource-based + // policy's Principal element has a value to use in evaluating the policy. + CallerArn *string `min:"1" type:"string"` + + // A list of context keys and corresponding values for the simulation to use. + // Whenever a context key is evaluated by a Condition element in one of the + // simulated policies, the corresponding value is supplied. + ContextEntries []*ContextEntry `type:"list"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // This parameter is optional. If you do not include it, it defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // An optional list of additional policy documents to include in the simulation. + // Each document is specified as a string containing the complete, valid JSON + // text of an IAM policy. + PolicyInputList []*string `type:"list"` + + // The Amazon Resource Name (ARN) of a user, group, or role whose policies you + // want to include in the simulation. If you specify a user, group, or role, + // the simulation includes all policies that are associated with that entity. + // If you specify a user, the simulation also includes all policies that are + // attached to any groups the user belongs to. + PolicySourceArn *string `min:"20" type:"string" required:"true"` + + // A list of ARNs of AWS resources to include in the simulation. If this parameter + // is not provided then the value defaults to * (all resources). Each API in + // the ActionNames parameter is evaluated for each resource in this list. The + // simulation determines the access result (allowed or denied) of each combination + // and reports it in the response. + // + // The simulation does not automatically retrieve policies for the specified + // resources. If you want to include a resource policy in the simulation, then + // you must include the policy as a string in the ResourcePolicy parameter. + ResourceArns []*string `type:"list"` + + // Specifies the type of simulation to run. Different APIs that support resource-based + // policies require different combinations of resources. By specifying the type + // of simulation to run, you enable the policy simulator to enforce the presence + // of the required resources to ensure reliable simulation results. If your + // simulation does not match one of the following scenarios, then you can omit + // this parameter. The following list shows each of the supported scenario values + // and the resources that you must define to run the simulation. + // + // Each of the EC2 scenarios requires that you specify instance, image, and + // security-group resources. If your scenario includes an EBS volume, then you + // must specify that volume as a resource. If the EC2 scenario includes VPC, + // then you must supply the network-interface resource. If it includes an IP + // subnet, then you must specify the subnet resource. For more information on + // the EC2 scenario options, see Supported Platforms (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html) + // in the AWS EC2 User Guide. + // + // EC2-Classic-InstanceStore + // + // instance, image, security-group + // + // EC2-Classic-EBS + // + // instance, image, security-group, volume + // + // EC2-VPC-InstanceStore + // + // instance, image, security-group, network-interface + // + // EC2-VPC-InstanceStore-Subnet + // + // instance, image, security-group, network-interface, subnet + // + // EC2-VPC-EBS + // + // instance, image, security-group, network-interface, volume + // + // EC2-VPC-EBS-Subnet + // + // instance, image, security-group, network-interface, subnet, volume + ResourceHandlingOption *string `min:"1" type:"string"` + + // An AWS account ID that specifies the owner of any simulated resource that + // does not identify its owner in the resource ARN, such as an S3 bucket or + // object. If ResourceOwner is specified, it is also used as the account owner + // of any ResourcePolicy included in the simulation. If the ResourceOwner parameter + // is not specified, then the owner of the resources and the resource policy + // defaults to the account of the identity provided in CallerArn. This parameter + // is required only if you specify a resource-based policy and account that + // owns the resource is different from the account that owns the simulated calling + // user CallerArn. + ResourceOwner *string `min:"1" type:"string"` + + // A resource-based policy to include in the simulation provided as a string. + // Each resource in the simulation is treated as if it had this policy attached. + // You can include only one resource-based policy in a simulation. + ResourcePolicy *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SimulatePrincipalPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SimulatePrincipalPolicyInput) GoString() string { + return s.String() +} + +// Contains a reference to a Statement element in a policy document that determines +// the result of the simulation. +// +// This data type is used by the MatchedStatements member of the EvaluationResult +// type. +type Statement struct { + _ struct{} `type:"structure"` + + // The row and column of the end of a Statement in an IAM policy. + EndPosition *Position `type:"structure"` + + // The identifier of the policy that was provided as an input. + SourcePolicyId *string `type:"string"` + + // The type of the policy. + SourcePolicyType *string `type:"string" enum:"PolicySourceType"` + + // The row and column of the beginning of the Statement in an IAM policy. + StartPosition *Position `type:"structure"` +} + +// String returns the string representation +func (s Statement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Statement) GoString() string { + return s.String() +} + +type UpdateAccessKeyInput struct { + _ struct{} `type:"structure"` + + // The access key ID of the secret access key you want to update. + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The status you want to assign to the secret access key. Active means the + // key can be used for API calls to AWS, while Inactive means the key cannot + // be used. + Status *string `type:"string" required:"true" enum:"statusType"` + + // The name of the user whose key you want to update. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateAccessKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAccessKeyInput) GoString() string { + return s.String() +} + +type UpdateAccessKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateAccessKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAccessKeyOutput) GoString() string { + return s.String() +} + +type UpdateAccountPasswordPolicyInput struct { + _ struct{} `type:"structure"` + + // Allows all IAM users in your account to use the AWS Management Console to + // change their own passwords. For more information, see Letting IAM Users Change + // Their Own Passwords (http://docs.aws.amazon.com/IAM/latest/UserGuide/HowToPwdIAMUser.html) + // in the IAM User Guide. + // + // Default value: false + AllowUsersToChangePassword *bool `type:"boolean"` + + // Prevents IAM users from setting a new password after their password has expired. + // + // Default value: false + HardExpiry *bool `type:"boolean"` + + // The number of days that an IAM user password is valid. The default value + // of 0 means IAM user passwords never expire. + // + // Default value: 0 + MaxPasswordAge *int64 `min:"1" type:"integer"` + + // The minimum number of characters allowed in an IAM user password. + // + // Default value: 6 + MinimumPasswordLength *int64 `min:"6" type:"integer"` + + // Specifies the number of previous passwords that IAM users are prevented from + // reusing. The default value of 0 means IAM users are not prevented from reusing + // previous passwords. + // + // Default value: 0 + PasswordReusePrevention *int64 `min:"1" type:"integer"` + + // Specifies whether IAM user passwords must contain at least one lowercase + // character from the ISO basic Latin alphabet (a to z). + // + // Default value: false + RequireLowercaseCharacters *bool `type:"boolean"` + + // Specifies whether IAM user passwords must contain at least one numeric character + // (0 to 9). + // + // Default value: false + RequireNumbers *bool `type:"boolean"` + + // Specifies whether IAM user passwords must contain at least one of the following + // non-alphanumeric characters: + // + // ! @ # $ % ^ & * ( ) _ + - = [ ] { } | ' + // + // Default value: false + RequireSymbols *bool `type:"boolean"` + + // Specifies whether IAM user passwords must contain at least one uppercase + // character from the ISO basic Latin alphabet (A to Z). + // + // Default value: false + RequireUppercaseCharacters *bool `type:"boolean"` +} + +// String returns the string representation +func (s UpdateAccountPasswordPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAccountPasswordPolicyInput) GoString() string { + return s.String() +} + +type UpdateAccountPasswordPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateAccountPasswordPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAccountPasswordPolicyOutput) GoString() string { + return s.String() +} + +type UpdateAssumeRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The policy that grants an entity permission to assume the role. + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the role to update. + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateAssumeRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAssumeRolePolicyInput) GoString() string { + return s.String() +} + +type UpdateAssumeRolePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateAssumeRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAssumeRolePolicyOutput) GoString() string { + return s.String() +} + +type UpdateGroupInput struct { + _ struct{} `type:"structure"` + + // Name of the group to update. If you're changing the name of the group, this + // is the original name. + GroupName *string `min:"1" type:"string" required:"true"` + + // New name for the group. Only include this if changing the group's name. + NewGroupName *string `min:"1" type:"string"` + + // New path for the group. Only include this if changing the group's path. + NewPath *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGroupInput) GoString() string { + return s.String() +} + +type UpdateGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGroupOutput) GoString() string { + return s.String() +} + +type UpdateLoginProfileInput struct { + _ struct{} `type:"structure"` + + // The new password for the specified user. + Password *string `min:"1" type:"string"` + + // Require the specified user to set a new password on next sign-in. + PasswordResetRequired *bool `type:"boolean"` + + // The name of the user whose password you want to update. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateLoginProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateLoginProfileInput) GoString() string { + return s.String() +} + +type UpdateLoginProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateLoginProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateLoginProfileOutput) GoString() string { + return s.String() +} + +type UpdateOpenIDConnectProviderThumbprintInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM OpenID Connect (OIDC) provider + // to update the thumbprint for. You can get a list of OIDC provider ARNs by + // using the ListOpenIDConnectProviders action. + OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` + + // A list of certificate thumbprints that are associated with the specified + // IAM OpenID Connect provider. For more information, see CreateOpenIDConnectProvider. + ThumbprintList []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateOpenIDConnectProviderThumbprintInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateOpenIDConnectProviderThumbprintInput) GoString() string { + return s.String() +} + +type UpdateOpenIDConnectProviderThumbprintOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateOpenIDConnectProviderThumbprintOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateOpenIDConnectProviderThumbprintOutput) GoString() string { + return s.String() +} + +type UpdateSAMLProviderInput struct { + _ struct{} `type:"structure"` + + // An XML document generated by an identity provider (IdP) that supports SAML + // 2.0. The document includes the issuer's name, expiration information, and + // keys that can be used to validate the SAML authentication response (assertions) + // that are received from the IdP. You must generate the metadata document using + // the identity management software that is used as your organization's IdP. + SAMLMetadataDocument *string `min:"1000" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the SAML provider to update. + SAMLProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateSAMLProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSAMLProviderInput) GoString() string { + return s.String() +} + +// Contains the response to a successful UpdateSAMLProvider request. +type UpdateSAMLProviderOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the SAML provider that was updated. + SAMLProviderArn *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s UpdateSAMLProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSAMLProviderOutput) GoString() string { + return s.String() +} + +type UpdateSSHPublicKeyInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the SSH public key. + SSHPublicKeyId *string `min:"20" type:"string" required:"true"` + + // The status to assign to the SSH public key. Active means the key can be used + // for authentication with an AWS CodeCommit repository. Inactive means the + // key cannot be used. + Status *string `type:"string" required:"true" enum:"statusType"` + + // The name of the IAM user associated with the SSH public key. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateSSHPublicKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSSHPublicKeyInput) GoString() string { + return s.String() +} + +type UpdateSSHPublicKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateSSHPublicKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSSHPublicKeyOutput) GoString() string { + return s.String() +} + +type UpdateServerCertificateInput struct { + _ struct{} `type:"structure"` + + // The new path for the server certificate. Include this only if you are updating + // the server certificate's path. + NewPath *string `min:"1" type:"string"` + + // The new name for the server certificate. Include this only if you are updating + // the server certificate's name. The name of the certificate cannot contain + // any spaces. + NewServerCertificateName *string `min:"1" type:"string"` + + // The name of the server certificate that you want to update. + ServerCertificateName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateServerCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateServerCertificateInput) GoString() string { + return s.String() +} + +type UpdateServerCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateServerCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateServerCertificateOutput) GoString() string { + return s.String() +} + +type UpdateSigningCertificateInput struct { + _ struct{} `type:"structure"` + + // The ID of the signing certificate you want to update. + CertificateId *string `min:"24" type:"string" required:"true"` + + // The status you want to assign to the certificate. Active means the certificate + // can be used for API calls to AWS, while Inactive means the certificate cannot + // be used. + Status *string `type:"string" required:"true" enum:"statusType"` + + // The name of the user the signing certificate belongs to. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateSigningCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSigningCertificateInput) GoString() string { + return s.String() +} + +type UpdateSigningCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateSigningCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSigningCertificateOutput) GoString() string { + return s.String() +} + +type UpdateUserInput struct { + _ struct{} `type:"structure"` + + // New path for the user. Include this parameter only if you're changing the + // user's path. + NewPath *string `min:"1" type:"string"` + + // New name for the user. Include this parameter only if you're changing the + // user's name. + NewUserName *string `min:"1" type:"string"` + + // Name of the user to update. If you're changing the name of the user, this + // is the original user name. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateUserInput) GoString() string { + return s.String() +} + +type UpdateUserOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateUserOutput) GoString() string { + return s.String() +} + +type UploadSSHPublicKeyInput struct { + _ struct{} `type:"structure"` + + // The SSH public key. The public key must be encoded in ssh-rsa format or PEM + // format. + SSHPublicKeyBody *string `min:"1" type:"string" required:"true"` + + // The name of the IAM user to associate the SSH public key with. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadSSHPublicKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadSSHPublicKeyInput) GoString() string { + return s.String() +} + +// Contains the response to a successful UploadSSHPublicKey request. +type UploadSSHPublicKeyOutput struct { + _ struct{} `type:"structure"` + + // Contains information about the SSH public key. + SSHPublicKey *SSHPublicKey `type:"structure"` +} + +// String returns the string representation +func (s UploadSSHPublicKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadSSHPublicKeyOutput) GoString() string { + return s.String() +} + +type UploadServerCertificateInput struct { + _ struct{} `type:"structure"` + + // The contents of the public key certificate in PEM-encoded format. + CertificateBody *string `min:"1" type:"string" required:"true"` + + // The contents of the certificate chain. This is typically a concatenation + // of the PEM-encoded public key certificates of the chain. + CertificateChain *string `min:"1" type:"string"` + + // The path for the server certificate. For more information about paths, see + // IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + // + // If you are uploading a server certificate specifically for use with Amazon + // CloudFront distributions, you must specify a path using the --path option. + // The path must begin with /cloudfront and must include a trailing slash (for + // example, /cloudfront/test/). + Path *string `min:"1" type:"string"` + + // The contents of the private key in PEM-encoded format. + PrivateKey *string `min:"1" type:"string" required:"true"` + + // The name for the server certificate. Do not include the path in this value. + // The name of the certificate cannot contain any spaces. + ServerCertificateName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadServerCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadServerCertificateInput) GoString() string { + return s.String() +} + +// Contains the response to a successful UploadServerCertificate request. +type UploadServerCertificateOutput struct { + _ struct{} `type:"structure"` + + // The meta information of the uploaded server certificate without its certificate + // body, certificate chain, and private key. + ServerCertificateMetadata *ServerCertificateMetadata `type:"structure"` +} + +// String returns the string representation +func (s UploadServerCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadServerCertificateOutput) GoString() string { + return s.String() +} + +type UploadSigningCertificateInput struct { + _ struct{} `type:"structure"` + + // The contents of the signing certificate. + CertificateBody *string `min:"1" type:"string" required:"true"` + + // The name of the user the signing certificate is for. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UploadSigningCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadSigningCertificateInput) GoString() string { + return s.String() +} + +// Contains the response to a successful UploadSigningCertificate request. +type UploadSigningCertificateOutput struct { + _ struct{} `type:"structure"` + + // Information about the certificate. + Certificate *SigningCertificate `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UploadSigningCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadSigningCertificateOutput) GoString() string { + return s.String() +} + +// Contains information about an IAM user entity. +// +// This data type is used as a response element in the following actions: +// +// CreateUser +// +// GetUser +// +// ListUsers +type User struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that identifies the user. For more information + // about ARNs and how to use ARNs in policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Arn *string `min:"20" type:"string" required:"true"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the user was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the user's password was last used to sign in to an AWS website. For + // a list of AWS websites that capture a user's last sign-in time, see the Credential + // Reports (http://docs.aws.amazon.com/IAM/latest/UserGuide/credential-reports.html) + // topic in the Using IAM guide. If a password is used more than once in a five-minute + // span, only the first use is returned in this field. This field is null (not + // present) when: + // + // The user does not have a password + // + // The password exists but has never been used (at least not since IAM started + // tracking this information on October 20th, 2014 + // + // there is no sign-in data associated with the user + // + // This value is returned only in the GetUser and ListUsers actions. + PasswordLastUsed *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The path to the user. For more information about paths, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string" required:"true"` + + // The stable and unique string identifying the user. For more information about + // IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + UserId *string `min:"16" type:"string" required:"true"` + + // The friendly name identifying the user. + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s User) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s User) GoString() string { + return s.String() +} + +// Contains information about an IAM user, including all the user's policies +// and all the IAM groups the user is in. +// +// This data type is used as a response element in the GetAccountAuthorizationDetails +// action. +type UserDetail struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and + // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` + + // A list of the managed policies attached to the user. + AttachedManagedPolicies []*AttachedPolicy `type:"list"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the user was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A list of IAM groups that the user is in. + GroupList []*string `type:"list"` + + // The path to the user. For more information about paths, see IAM Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string"` + + // The stable and unique string identifying the user. For more information about + // IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + UserId *string `min:"16" type:"string"` + + // The friendly name identifying the user. + UserName *string `min:"1" type:"string"` + + // A list of the inline policies embedded in the user. + UserPolicyList []*PolicyDetail `type:"list"` +} + +// String returns the string representation +func (s UserDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserDetail) GoString() string { + return s.String() +} + +// Contains information about a virtual MFA device. +type VirtualMFADevice struct { + _ struct{} `type:"structure"` + + // The Base32 seed defined as specified in RFC3548 (http://www.ietf.org/rfc/rfc3548.txt). + // The Base32StringSeed is Base64-encoded. + Base32StringSeed []byte `type:"blob"` + + // The date and time on which the virtual MFA device was enabled. + EnableDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A QR code PNG image that encodes otpauth://totp/$virtualMFADeviceName@$AccountName?secret=$Base32String + // where $virtualMFADeviceName is one of the create call arguments, AccountName + // is the user name if set (otherwise, the account ID otherwise), and Base32String + // is the seed in Base32 format. The Base32String value is Base64-encoded. + QRCodePNG []byte `type:"blob"` + + // The serial number associated with VirtualMFADevice. + SerialNumber *string `min:"9" type:"string" required:"true"` + + // Contains information about an IAM user entity. + // + // This data type is used as a response element in the following actions: + // + // CreateUser + // + // GetUser + // + // ListUsers + User *User `type:"structure"` +} + +// String returns the string representation +func (s VirtualMFADevice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VirtualMFADevice) GoString() string { + return s.String() +} + +const ( + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumString = "string" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumStringList = "stringList" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumNumeric = "numeric" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumNumericList = "numericList" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumBoolean = "boolean" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumBooleanList = "booleanList" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumIp = "ip" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumIpList = "ipList" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumBinary = "binary" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumBinaryList = "binaryList" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumDate = "date" + // @enum ContextKeyTypeEnum + ContextKeyTypeEnumDateList = "dateList" +) + +const ( + // @enum EntityType + EntityTypeUser = "User" + // @enum EntityType + EntityTypeRole = "Role" + // @enum EntityType + EntityTypeGroup = "Group" + // @enum EntityType + EntityTypeLocalManagedPolicy = "LocalManagedPolicy" + // @enum EntityType + EntityTypeAwsmanagedPolicy = "AWSManagedPolicy" +) + +const ( + // @enum PolicyEvaluationDecisionType + PolicyEvaluationDecisionTypeAllowed = "allowed" + // @enum PolicyEvaluationDecisionType + PolicyEvaluationDecisionTypeExplicitDeny = "explicitDeny" + // @enum PolicyEvaluationDecisionType + PolicyEvaluationDecisionTypeImplicitDeny = "implicitDeny" +) + +const ( + // @enum PolicySourceType + PolicySourceTypeUser = "user" + // @enum PolicySourceType + PolicySourceTypeGroup = "group" + // @enum PolicySourceType + PolicySourceTypeRole = "role" + // @enum PolicySourceType + PolicySourceTypeAwsManaged = "aws-managed" + // @enum PolicySourceType + PolicySourceTypeUserManaged = "user-managed" + // @enum PolicySourceType + PolicySourceTypeResource = "resource" + // @enum PolicySourceType + PolicySourceTypeNone = "none" +) + +const ( + // @enum ReportFormatType + ReportFormatTypeTextCsv = "text/csv" +) + +const ( + // @enum ReportStateType + ReportStateTypeStarted = "STARTED" + // @enum ReportStateType + ReportStateTypeInprogress = "INPROGRESS" + // @enum ReportStateType + ReportStateTypeComplete = "COMPLETE" +) + +const ( + // @enum assignmentStatusType + AssignmentStatusTypeAssigned = "Assigned" + // @enum assignmentStatusType + AssignmentStatusTypeUnassigned = "Unassigned" + // @enum assignmentStatusType + AssignmentStatusTypeAny = "Any" +) + +const ( + // @enum encodingType + EncodingTypeSsh = "SSH" + // @enum encodingType + EncodingTypePem = "PEM" +) + +const ( + // @enum policyScopeType + PolicyScopeTypeAll = "All" + // @enum policyScopeType + PolicyScopeTypeAws = "AWS" + // @enum policyScopeType + PolicyScopeTypeLocal = "Local" +) + +const ( + // @enum statusType + StatusTypeActive = "Active" + // @enum statusType + StatusTypeInactive = "Inactive" +) + +const ( + // @enum summaryKeyType + SummaryKeyTypeUsers = "Users" + // @enum summaryKeyType + SummaryKeyTypeUsersQuota = "UsersQuota" + // @enum summaryKeyType + SummaryKeyTypeGroups = "Groups" + // @enum summaryKeyType + SummaryKeyTypeGroupsQuota = "GroupsQuota" + // @enum summaryKeyType + SummaryKeyTypeServerCertificates = "ServerCertificates" + // @enum summaryKeyType + SummaryKeyTypeServerCertificatesQuota = "ServerCertificatesQuota" + // @enum summaryKeyType + SummaryKeyTypeUserPolicySizeQuota = "UserPolicySizeQuota" + // @enum summaryKeyType + SummaryKeyTypeGroupPolicySizeQuota = "GroupPolicySizeQuota" + // @enum summaryKeyType + SummaryKeyTypeGroupsPerUserQuota = "GroupsPerUserQuota" + // @enum summaryKeyType + SummaryKeyTypeSigningCertificatesPerUserQuota = "SigningCertificatesPerUserQuota" + // @enum summaryKeyType + SummaryKeyTypeAccessKeysPerUserQuota = "AccessKeysPerUserQuota" + // @enum summaryKeyType + SummaryKeyTypeMfadevices = "MFADevices" + // @enum summaryKeyType + SummaryKeyTypeMfadevicesInUse = "MFADevicesInUse" + // @enum summaryKeyType + SummaryKeyTypeAccountMfaenabled = "AccountMFAEnabled" + // @enum summaryKeyType + SummaryKeyTypeAccountAccessKeysPresent = "AccountAccessKeysPresent" + // @enum summaryKeyType + SummaryKeyTypeAccountSigningCertificatesPresent = "AccountSigningCertificatesPresent" + // @enum summaryKeyType + SummaryKeyTypeAttachedPoliciesPerGroupQuota = "AttachedPoliciesPerGroupQuota" + // @enum summaryKeyType + SummaryKeyTypeAttachedPoliciesPerRoleQuota = "AttachedPoliciesPerRoleQuota" + // @enum summaryKeyType + SummaryKeyTypeAttachedPoliciesPerUserQuota = "AttachedPoliciesPerUserQuota" + // @enum summaryKeyType + SummaryKeyTypePolicies = "Policies" + // @enum summaryKeyType + SummaryKeyTypePoliciesQuota = "PoliciesQuota" + // @enum summaryKeyType + SummaryKeyTypePolicySizeQuota = "PolicySizeQuota" + // @enum summaryKeyType + SummaryKeyTypePolicyVersionsInUse = "PolicyVersionsInUse" + // @enum summaryKeyType + SummaryKeyTypePolicyVersionsInUseQuota = "PolicyVersionsInUseQuota" + // @enum summaryKeyType + SummaryKeyTypeVersionsPerPolicyQuota = "VersionsPerPolicyQuota" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iam/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iam/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iam/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iam/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2366 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package iam_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/iam" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleIAM_AddClientIDToOpenIDConnectProvider() { + svc := iam.New(session.New()) + + params := &iam.AddClientIDToOpenIDConnectProviderInput{ + ClientID: aws.String("clientIDType"), // Required + OpenIDConnectProviderArn: aws.String("arnType"), // Required + } + resp, err := svc.AddClientIDToOpenIDConnectProvider(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_AddRoleToInstanceProfile() { + svc := iam.New(session.New()) + + params := &iam.AddRoleToInstanceProfileInput{ + InstanceProfileName: aws.String("instanceProfileNameType"), // Required + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.AddRoleToInstanceProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_AddUserToGroup() { + svc := iam.New(session.New()) + + params := &iam.AddUserToGroupInput{ + GroupName: aws.String("groupNameType"), // Required + UserName: aws.String("existingUserNameType"), // Required + } + resp, err := svc.AddUserToGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_AttachGroupPolicy() { + svc := iam.New(session.New()) + + params := &iam.AttachGroupPolicyInput{ + GroupName: aws.String("groupNameType"), // Required + PolicyArn: aws.String("arnType"), // Required + } + resp, err := svc.AttachGroupPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_AttachRolePolicy() { + svc := iam.New(session.New()) + + params := &iam.AttachRolePolicyInput{ + PolicyArn: aws.String("arnType"), // Required + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.AttachRolePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_AttachUserPolicy() { + svc := iam.New(session.New()) + + params := &iam.AttachUserPolicyInput{ + PolicyArn: aws.String("arnType"), // Required + UserName: aws.String("userNameType"), // Required + } + resp, err := svc.AttachUserPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ChangePassword() { + svc := iam.New(session.New()) + + params := &iam.ChangePasswordInput{ + NewPassword: aws.String("passwordType"), // Required + OldPassword: aws.String("passwordType"), // Required + } + resp, err := svc.ChangePassword(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateAccessKey() { + svc := iam.New(session.New()) + + params := &iam.CreateAccessKeyInput{ + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.CreateAccessKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateAccountAlias() { + svc := iam.New(session.New()) + + params := &iam.CreateAccountAliasInput{ + AccountAlias: aws.String("accountAliasType"), // Required + } + resp, err := svc.CreateAccountAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateGroup() { + svc := iam.New(session.New()) + + params := &iam.CreateGroupInput{ + GroupName: aws.String("groupNameType"), // Required + Path: aws.String("pathType"), + } + resp, err := svc.CreateGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateInstanceProfile() { + svc := iam.New(session.New()) + + params := &iam.CreateInstanceProfileInput{ + InstanceProfileName: aws.String("instanceProfileNameType"), // Required + Path: aws.String("pathType"), + } + resp, err := svc.CreateInstanceProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateLoginProfile() { + svc := iam.New(session.New()) + + params := &iam.CreateLoginProfileInput{ + Password: aws.String("passwordType"), // Required + UserName: aws.String("userNameType"), // Required + PasswordResetRequired: aws.Bool(true), + } + resp, err := svc.CreateLoginProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateOpenIDConnectProvider() { + svc := iam.New(session.New()) + + params := &iam.CreateOpenIDConnectProviderInput{ + ThumbprintList: []*string{ // Required + aws.String("thumbprintType"), // Required + // More values... + }, + Url: aws.String("OpenIDConnectProviderUrlType"), // Required + ClientIDList: []*string{ + aws.String("clientIDType"), // Required + // More values... + }, + } + resp, err := svc.CreateOpenIDConnectProvider(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreatePolicy() { + svc := iam.New(session.New()) + + params := &iam.CreatePolicyInput{ + PolicyDocument: aws.String("policyDocumentType"), // Required + PolicyName: aws.String("policyNameType"), // Required + Description: aws.String("policyDescriptionType"), + Path: aws.String("policyPathType"), + } + resp, err := svc.CreatePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreatePolicyVersion() { + svc := iam.New(session.New()) + + params := &iam.CreatePolicyVersionInput{ + PolicyArn: aws.String("arnType"), // Required + PolicyDocument: aws.String("policyDocumentType"), // Required + SetAsDefault: aws.Bool(true), + } + resp, err := svc.CreatePolicyVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateRole() { + svc := iam.New(session.New()) + + params := &iam.CreateRoleInput{ + AssumeRolePolicyDocument: aws.String("policyDocumentType"), // Required + RoleName: aws.String("roleNameType"), // Required + Path: aws.String("pathType"), + } + resp, err := svc.CreateRole(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateSAMLProvider() { + svc := iam.New(session.New()) + + params := &iam.CreateSAMLProviderInput{ + Name: aws.String("SAMLProviderNameType"), // Required + SAMLMetadataDocument: aws.String("SAMLMetadataDocumentType"), // Required + } + resp, err := svc.CreateSAMLProvider(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateUser() { + svc := iam.New(session.New()) + + params := &iam.CreateUserInput{ + UserName: aws.String("userNameType"), // Required + Path: aws.String("pathType"), + } + resp, err := svc.CreateUser(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_CreateVirtualMFADevice() { + svc := iam.New(session.New()) + + params := &iam.CreateVirtualMFADeviceInput{ + VirtualMFADeviceName: aws.String("virtualMFADeviceName"), // Required + Path: aws.String("pathType"), + } + resp, err := svc.CreateVirtualMFADevice(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeactivateMFADevice() { + svc := iam.New(session.New()) + + params := &iam.DeactivateMFADeviceInput{ + SerialNumber: aws.String("serialNumberType"), // Required + UserName: aws.String("existingUserNameType"), // Required + } + resp, err := svc.DeactivateMFADevice(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteAccessKey() { + svc := iam.New(session.New()) + + params := &iam.DeleteAccessKeyInput{ + AccessKeyId: aws.String("accessKeyIdType"), // Required + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.DeleteAccessKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteAccountAlias() { + svc := iam.New(session.New()) + + params := &iam.DeleteAccountAliasInput{ + AccountAlias: aws.String("accountAliasType"), // Required + } + resp, err := svc.DeleteAccountAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteAccountPasswordPolicy() { + svc := iam.New(session.New()) + + var params *iam.DeleteAccountPasswordPolicyInput + resp, err := svc.DeleteAccountPasswordPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteGroup() { + svc := iam.New(session.New()) + + params := &iam.DeleteGroupInput{ + GroupName: aws.String("groupNameType"), // Required + } + resp, err := svc.DeleteGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteGroupPolicy() { + svc := iam.New(session.New()) + + params := &iam.DeleteGroupPolicyInput{ + GroupName: aws.String("groupNameType"), // Required + PolicyName: aws.String("policyNameType"), // Required + } + resp, err := svc.DeleteGroupPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteInstanceProfile() { + svc := iam.New(session.New()) + + params := &iam.DeleteInstanceProfileInput{ + InstanceProfileName: aws.String("instanceProfileNameType"), // Required + } + resp, err := svc.DeleteInstanceProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteLoginProfile() { + svc := iam.New(session.New()) + + params := &iam.DeleteLoginProfileInput{ + UserName: aws.String("userNameType"), // Required + } + resp, err := svc.DeleteLoginProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteOpenIDConnectProvider() { + svc := iam.New(session.New()) + + params := &iam.DeleteOpenIDConnectProviderInput{ + OpenIDConnectProviderArn: aws.String("arnType"), // Required + } + resp, err := svc.DeleteOpenIDConnectProvider(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeletePolicy() { + svc := iam.New(session.New()) + + params := &iam.DeletePolicyInput{ + PolicyArn: aws.String("arnType"), // Required + } + resp, err := svc.DeletePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeletePolicyVersion() { + svc := iam.New(session.New()) + + params := &iam.DeletePolicyVersionInput{ + PolicyArn: aws.String("arnType"), // Required + VersionId: aws.String("policyVersionIdType"), // Required + } + resp, err := svc.DeletePolicyVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteRole() { + svc := iam.New(session.New()) + + params := &iam.DeleteRoleInput{ + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.DeleteRole(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteRolePolicy() { + svc := iam.New(session.New()) + + params := &iam.DeleteRolePolicyInput{ + PolicyName: aws.String("policyNameType"), // Required + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.DeleteRolePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteSAMLProvider() { + svc := iam.New(session.New()) + + params := &iam.DeleteSAMLProviderInput{ + SAMLProviderArn: aws.String("arnType"), // Required + } + resp, err := svc.DeleteSAMLProvider(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteSSHPublicKey() { + svc := iam.New(session.New()) + + params := &iam.DeleteSSHPublicKeyInput{ + SSHPublicKeyId: aws.String("publicKeyIdType"), // Required + UserName: aws.String("userNameType"), // Required + } + resp, err := svc.DeleteSSHPublicKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteServerCertificate() { + svc := iam.New(session.New()) + + params := &iam.DeleteServerCertificateInput{ + ServerCertificateName: aws.String("serverCertificateNameType"), // Required + } + resp, err := svc.DeleteServerCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteSigningCertificate() { + svc := iam.New(session.New()) + + params := &iam.DeleteSigningCertificateInput{ + CertificateId: aws.String("certificateIdType"), // Required + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.DeleteSigningCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteUser() { + svc := iam.New(session.New()) + + params := &iam.DeleteUserInput{ + UserName: aws.String("existingUserNameType"), // Required + } + resp, err := svc.DeleteUser(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteUserPolicy() { + svc := iam.New(session.New()) + + params := &iam.DeleteUserPolicyInput{ + PolicyName: aws.String("policyNameType"), // Required + UserName: aws.String("existingUserNameType"), // Required + } + resp, err := svc.DeleteUserPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DeleteVirtualMFADevice() { + svc := iam.New(session.New()) + + params := &iam.DeleteVirtualMFADeviceInput{ + SerialNumber: aws.String("serialNumberType"), // Required + } + resp, err := svc.DeleteVirtualMFADevice(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DetachGroupPolicy() { + svc := iam.New(session.New()) + + params := &iam.DetachGroupPolicyInput{ + GroupName: aws.String("groupNameType"), // Required + PolicyArn: aws.String("arnType"), // Required + } + resp, err := svc.DetachGroupPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DetachRolePolicy() { + svc := iam.New(session.New()) + + params := &iam.DetachRolePolicyInput{ + PolicyArn: aws.String("arnType"), // Required + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.DetachRolePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_DetachUserPolicy() { + svc := iam.New(session.New()) + + params := &iam.DetachUserPolicyInput{ + PolicyArn: aws.String("arnType"), // Required + UserName: aws.String("userNameType"), // Required + } + resp, err := svc.DetachUserPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_EnableMFADevice() { + svc := iam.New(session.New()) + + params := &iam.EnableMFADeviceInput{ + AuthenticationCode1: aws.String("authenticationCodeType"), // Required + AuthenticationCode2: aws.String("authenticationCodeType"), // Required + SerialNumber: aws.String("serialNumberType"), // Required + UserName: aws.String("existingUserNameType"), // Required + } + resp, err := svc.EnableMFADevice(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GenerateCredentialReport() { + svc := iam.New(session.New()) + + var params *iam.GenerateCredentialReportInput + resp, err := svc.GenerateCredentialReport(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetAccessKeyLastUsed() { + svc := iam.New(session.New()) + + params := &iam.GetAccessKeyLastUsedInput{ + AccessKeyId: aws.String("accessKeyIdType"), // Required + } + resp, err := svc.GetAccessKeyLastUsed(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetAccountAuthorizationDetails() { + svc := iam.New(session.New()) + + params := &iam.GetAccountAuthorizationDetailsInput{ + Filter: []*string{ + aws.String("EntityType"), // Required + // More values... + }, + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.GetAccountAuthorizationDetails(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetAccountPasswordPolicy() { + svc := iam.New(session.New()) + + var params *iam.GetAccountPasswordPolicyInput + resp, err := svc.GetAccountPasswordPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetAccountSummary() { + svc := iam.New(session.New()) + + var params *iam.GetAccountSummaryInput + resp, err := svc.GetAccountSummary(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetContextKeysForCustomPolicy() { + svc := iam.New(session.New()) + + params := &iam.GetContextKeysForCustomPolicyInput{ + PolicyInputList: []*string{ // Required + aws.String("policyDocumentType"), // Required + // More values... + }, + } + resp, err := svc.GetContextKeysForCustomPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetContextKeysForPrincipalPolicy() { + svc := iam.New(session.New()) + + params := &iam.GetContextKeysForPrincipalPolicyInput{ + PolicySourceArn: aws.String("arnType"), // Required + PolicyInputList: []*string{ + aws.String("policyDocumentType"), // Required + // More values... + }, + } + resp, err := svc.GetContextKeysForPrincipalPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetCredentialReport() { + svc := iam.New(session.New()) + + var params *iam.GetCredentialReportInput + resp, err := svc.GetCredentialReport(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetGroup() { + svc := iam.New(session.New()) + + params := &iam.GetGroupInput{ + GroupName: aws.String("groupNameType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.GetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetGroupPolicy() { + svc := iam.New(session.New()) + + params := &iam.GetGroupPolicyInput{ + GroupName: aws.String("groupNameType"), // Required + PolicyName: aws.String("policyNameType"), // Required + } + resp, err := svc.GetGroupPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetInstanceProfile() { + svc := iam.New(session.New()) + + params := &iam.GetInstanceProfileInput{ + InstanceProfileName: aws.String("instanceProfileNameType"), // Required + } + resp, err := svc.GetInstanceProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetLoginProfile() { + svc := iam.New(session.New()) + + params := &iam.GetLoginProfileInput{ + UserName: aws.String("userNameType"), // Required + } + resp, err := svc.GetLoginProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetOpenIDConnectProvider() { + svc := iam.New(session.New()) + + params := &iam.GetOpenIDConnectProviderInput{ + OpenIDConnectProviderArn: aws.String("arnType"), // Required + } + resp, err := svc.GetOpenIDConnectProvider(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetPolicy() { + svc := iam.New(session.New()) + + params := &iam.GetPolicyInput{ + PolicyArn: aws.String("arnType"), // Required + } + resp, err := svc.GetPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetPolicyVersion() { + svc := iam.New(session.New()) + + params := &iam.GetPolicyVersionInput{ + PolicyArn: aws.String("arnType"), // Required + VersionId: aws.String("policyVersionIdType"), // Required + } + resp, err := svc.GetPolicyVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetRole() { + svc := iam.New(session.New()) + + params := &iam.GetRoleInput{ + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.GetRole(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetRolePolicy() { + svc := iam.New(session.New()) + + params := &iam.GetRolePolicyInput{ + PolicyName: aws.String("policyNameType"), // Required + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.GetRolePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetSAMLProvider() { + svc := iam.New(session.New()) + + params := &iam.GetSAMLProviderInput{ + SAMLProviderArn: aws.String("arnType"), // Required + } + resp, err := svc.GetSAMLProvider(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetSSHPublicKey() { + svc := iam.New(session.New()) + + params := &iam.GetSSHPublicKeyInput{ + Encoding: aws.String("encodingType"), // Required + SSHPublicKeyId: aws.String("publicKeyIdType"), // Required + UserName: aws.String("userNameType"), // Required + } + resp, err := svc.GetSSHPublicKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetServerCertificate() { + svc := iam.New(session.New()) + + params := &iam.GetServerCertificateInput{ + ServerCertificateName: aws.String("serverCertificateNameType"), // Required + } + resp, err := svc.GetServerCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetUser() { + svc := iam.New(session.New()) + + params := &iam.GetUserInput{ + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.GetUser(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_GetUserPolicy() { + svc := iam.New(session.New()) + + params := &iam.GetUserPolicyInput{ + PolicyName: aws.String("policyNameType"), // Required + UserName: aws.String("existingUserNameType"), // Required + } + resp, err := svc.GetUserPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListAccessKeys() { + svc := iam.New(session.New()) + + params := &iam.ListAccessKeysInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.ListAccessKeys(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListAccountAliases() { + svc := iam.New(session.New()) + + params := &iam.ListAccountAliasesInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListAccountAliases(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListAttachedGroupPolicies() { + svc := iam.New(session.New()) + + params := &iam.ListAttachedGroupPoliciesInput{ + GroupName: aws.String("groupNameType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PathPrefix: aws.String("policyPathType"), + } + resp, err := svc.ListAttachedGroupPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListAttachedRolePolicies() { + svc := iam.New(session.New()) + + params := &iam.ListAttachedRolePoliciesInput{ + RoleName: aws.String("roleNameType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PathPrefix: aws.String("policyPathType"), + } + resp, err := svc.ListAttachedRolePolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListAttachedUserPolicies() { + svc := iam.New(session.New()) + + params := &iam.ListAttachedUserPoliciesInput{ + UserName: aws.String("userNameType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PathPrefix: aws.String("policyPathType"), + } + resp, err := svc.ListAttachedUserPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListEntitiesForPolicy() { + svc := iam.New(session.New()) + + params := &iam.ListEntitiesForPolicyInput{ + PolicyArn: aws.String("arnType"), // Required + EntityFilter: aws.String("EntityType"), + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PathPrefix: aws.String("pathType"), + } + resp, err := svc.ListEntitiesForPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListGroupPolicies() { + svc := iam.New(session.New()) + + params := &iam.ListGroupPoliciesInput{ + GroupName: aws.String("groupNameType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListGroupPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListGroups() { + svc := iam.New(session.New()) + + params := &iam.ListGroupsInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PathPrefix: aws.String("pathPrefixType"), + } + resp, err := svc.ListGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListGroupsForUser() { + svc := iam.New(session.New()) + + params := &iam.ListGroupsForUserInput{ + UserName: aws.String("existingUserNameType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListGroupsForUser(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListInstanceProfiles() { + svc := iam.New(session.New()) + + params := &iam.ListInstanceProfilesInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PathPrefix: aws.String("pathPrefixType"), + } + resp, err := svc.ListInstanceProfiles(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListInstanceProfilesForRole() { + svc := iam.New(session.New()) + + params := &iam.ListInstanceProfilesForRoleInput{ + RoleName: aws.String("roleNameType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListInstanceProfilesForRole(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListMFADevices() { + svc := iam.New(session.New()) + + params := &iam.ListMFADevicesInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.ListMFADevices(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListOpenIDConnectProviders() { + svc := iam.New(session.New()) + + var params *iam.ListOpenIDConnectProvidersInput + resp, err := svc.ListOpenIDConnectProviders(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListPolicies() { + svc := iam.New(session.New()) + + params := &iam.ListPoliciesInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + OnlyAttached: aws.Bool(true), + PathPrefix: aws.String("policyPathType"), + Scope: aws.String("policyScopeType"), + } + resp, err := svc.ListPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListPolicyVersions() { + svc := iam.New(session.New()) + + params := &iam.ListPolicyVersionsInput{ + PolicyArn: aws.String("arnType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListPolicyVersions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListRolePolicies() { + svc := iam.New(session.New()) + + params := &iam.ListRolePoliciesInput{ + RoleName: aws.String("roleNameType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListRolePolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListRoles() { + svc := iam.New(session.New()) + + params := &iam.ListRolesInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PathPrefix: aws.String("pathPrefixType"), + } + resp, err := svc.ListRoles(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListSAMLProviders() { + svc := iam.New(session.New()) + + var params *iam.ListSAMLProvidersInput + resp, err := svc.ListSAMLProviders(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListSSHPublicKeys() { + svc := iam.New(session.New()) + + params := &iam.ListSSHPublicKeysInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + UserName: aws.String("userNameType"), + } + resp, err := svc.ListSSHPublicKeys(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListServerCertificates() { + svc := iam.New(session.New()) + + params := &iam.ListServerCertificatesInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PathPrefix: aws.String("pathPrefixType"), + } + resp, err := svc.ListServerCertificates(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListSigningCertificates() { + svc := iam.New(session.New()) + + params := &iam.ListSigningCertificatesInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.ListSigningCertificates(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListUserPolicies() { + svc := iam.New(session.New()) + + params := &iam.ListUserPoliciesInput{ + UserName: aws.String("existingUserNameType"), // Required + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListUserPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListUsers() { + svc := iam.New(session.New()) + + params := &iam.ListUsersInput{ + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PathPrefix: aws.String("pathPrefixType"), + } + resp, err := svc.ListUsers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ListVirtualMFADevices() { + svc := iam.New(session.New()) + + params := &iam.ListVirtualMFADevicesInput{ + AssignmentStatus: aws.String("assignmentStatusType"), + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListVirtualMFADevices(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_PutGroupPolicy() { + svc := iam.New(session.New()) + + params := &iam.PutGroupPolicyInput{ + GroupName: aws.String("groupNameType"), // Required + PolicyDocument: aws.String("policyDocumentType"), // Required + PolicyName: aws.String("policyNameType"), // Required + } + resp, err := svc.PutGroupPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_PutRolePolicy() { + svc := iam.New(session.New()) + + params := &iam.PutRolePolicyInput{ + PolicyDocument: aws.String("policyDocumentType"), // Required + PolicyName: aws.String("policyNameType"), // Required + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.PutRolePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_PutUserPolicy() { + svc := iam.New(session.New()) + + params := &iam.PutUserPolicyInput{ + PolicyDocument: aws.String("policyDocumentType"), // Required + PolicyName: aws.String("policyNameType"), // Required + UserName: aws.String("existingUserNameType"), // Required + } + resp, err := svc.PutUserPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_RemoveClientIDFromOpenIDConnectProvider() { + svc := iam.New(session.New()) + + params := &iam.RemoveClientIDFromOpenIDConnectProviderInput{ + ClientID: aws.String("clientIDType"), // Required + OpenIDConnectProviderArn: aws.String("arnType"), // Required + } + resp, err := svc.RemoveClientIDFromOpenIDConnectProvider(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_RemoveRoleFromInstanceProfile() { + svc := iam.New(session.New()) + + params := &iam.RemoveRoleFromInstanceProfileInput{ + InstanceProfileName: aws.String("instanceProfileNameType"), // Required + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.RemoveRoleFromInstanceProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_RemoveUserFromGroup() { + svc := iam.New(session.New()) + + params := &iam.RemoveUserFromGroupInput{ + GroupName: aws.String("groupNameType"), // Required + UserName: aws.String("existingUserNameType"), // Required + } + resp, err := svc.RemoveUserFromGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_ResyncMFADevice() { + svc := iam.New(session.New()) + + params := &iam.ResyncMFADeviceInput{ + AuthenticationCode1: aws.String("authenticationCodeType"), // Required + AuthenticationCode2: aws.String("authenticationCodeType"), // Required + SerialNumber: aws.String("serialNumberType"), // Required + UserName: aws.String("existingUserNameType"), // Required + } + resp, err := svc.ResyncMFADevice(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_SetDefaultPolicyVersion() { + svc := iam.New(session.New()) + + params := &iam.SetDefaultPolicyVersionInput{ + PolicyArn: aws.String("arnType"), // Required + VersionId: aws.String("policyVersionIdType"), // Required + } + resp, err := svc.SetDefaultPolicyVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_SimulateCustomPolicy() { + svc := iam.New(session.New()) + + params := &iam.SimulateCustomPolicyInput{ + ActionNames: []*string{ // Required + aws.String("ActionNameType"), // Required + // More values... + }, + PolicyInputList: []*string{ // Required + aws.String("policyDocumentType"), // Required + // More values... + }, + CallerArn: aws.String("ResourceNameType"), + ContextEntries: []*iam.ContextEntry{ + { // Required + ContextKeyName: aws.String("ContextKeyNameType"), + ContextKeyType: aws.String("ContextKeyTypeEnum"), + ContextKeyValues: []*string{ + aws.String("ContextKeyValueType"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + ResourceArns: []*string{ + aws.String("ResourceNameType"), // Required + // More values... + }, + ResourceHandlingOption: aws.String("ResourceHandlingOptionType"), + ResourceOwner: aws.String("ResourceNameType"), + ResourcePolicy: aws.String("policyDocumentType"), + } + resp, err := svc.SimulateCustomPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_SimulatePrincipalPolicy() { + svc := iam.New(session.New()) + + params := &iam.SimulatePrincipalPolicyInput{ + ActionNames: []*string{ // Required + aws.String("ActionNameType"), // Required + // More values... + }, + PolicySourceArn: aws.String("arnType"), // Required + CallerArn: aws.String("ResourceNameType"), + ContextEntries: []*iam.ContextEntry{ + { // Required + ContextKeyName: aws.String("ContextKeyNameType"), + ContextKeyType: aws.String("ContextKeyTypeEnum"), + ContextKeyValues: []*string{ + aws.String("ContextKeyValueType"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("markerType"), + MaxItems: aws.Int64(1), + PolicyInputList: []*string{ + aws.String("policyDocumentType"), // Required + // More values... + }, + ResourceArns: []*string{ + aws.String("ResourceNameType"), // Required + // More values... + }, + ResourceHandlingOption: aws.String("ResourceHandlingOptionType"), + ResourceOwner: aws.String("ResourceNameType"), + ResourcePolicy: aws.String("policyDocumentType"), + } + resp, err := svc.SimulatePrincipalPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateAccessKey() { + svc := iam.New(session.New()) + + params := &iam.UpdateAccessKeyInput{ + AccessKeyId: aws.String("accessKeyIdType"), // Required + Status: aws.String("statusType"), // Required + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.UpdateAccessKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateAccountPasswordPolicy() { + svc := iam.New(session.New()) + + params := &iam.UpdateAccountPasswordPolicyInput{ + AllowUsersToChangePassword: aws.Bool(true), + HardExpiry: aws.Bool(true), + MaxPasswordAge: aws.Int64(1), + MinimumPasswordLength: aws.Int64(1), + PasswordReusePrevention: aws.Int64(1), + RequireLowercaseCharacters: aws.Bool(true), + RequireNumbers: aws.Bool(true), + RequireSymbols: aws.Bool(true), + RequireUppercaseCharacters: aws.Bool(true), + } + resp, err := svc.UpdateAccountPasswordPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateAssumeRolePolicy() { + svc := iam.New(session.New()) + + params := &iam.UpdateAssumeRolePolicyInput{ + PolicyDocument: aws.String("policyDocumentType"), // Required + RoleName: aws.String("roleNameType"), // Required + } + resp, err := svc.UpdateAssumeRolePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateGroup() { + svc := iam.New(session.New()) + + params := &iam.UpdateGroupInput{ + GroupName: aws.String("groupNameType"), // Required + NewGroupName: aws.String("groupNameType"), + NewPath: aws.String("pathType"), + } + resp, err := svc.UpdateGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateLoginProfile() { + svc := iam.New(session.New()) + + params := &iam.UpdateLoginProfileInput{ + UserName: aws.String("userNameType"), // Required + Password: aws.String("passwordType"), + PasswordResetRequired: aws.Bool(true), + } + resp, err := svc.UpdateLoginProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateOpenIDConnectProviderThumbprint() { + svc := iam.New(session.New()) + + params := &iam.UpdateOpenIDConnectProviderThumbprintInput{ + OpenIDConnectProviderArn: aws.String("arnType"), // Required + ThumbprintList: []*string{ // Required + aws.String("thumbprintType"), // Required + // More values... + }, + } + resp, err := svc.UpdateOpenIDConnectProviderThumbprint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateSAMLProvider() { + svc := iam.New(session.New()) + + params := &iam.UpdateSAMLProviderInput{ + SAMLMetadataDocument: aws.String("SAMLMetadataDocumentType"), // Required + SAMLProviderArn: aws.String("arnType"), // Required + } + resp, err := svc.UpdateSAMLProvider(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateSSHPublicKey() { + svc := iam.New(session.New()) + + params := &iam.UpdateSSHPublicKeyInput{ + SSHPublicKeyId: aws.String("publicKeyIdType"), // Required + Status: aws.String("statusType"), // Required + UserName: aws.String("userNameType"), // Required + } + resp, err := svc.UpdateSSHPublicKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateServerCertificate() { + svc := iam.New(session.New()) + + params := &iam.UpdateServerCertificateInput{ + ServerCertificateName: aws.String("serverCertificateNameType"), // Required + NewPath: aws.String("pathType"), + NewServerCertificateName: aws.String("serverCertificateNameType"), + } + resp, err := svc.UpdateServerCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateSigningCertificate() { + svc := iam.New(session.New()) + + params := &iam.UpdateSigningCertificateInput{ + CertificateId: aws.String("certificateIdType"), // Required + Status: aws.String("statusType"), // Required + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.UpdateSigningCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UpdateUser() { + svc := iam.New(session.New()) + + params := &iam.UpdateUserInput{ + UserName: aws.String("existingUserNameType"), // Required + NewPath: aws.String("pathType"), + NewUserName: aws.String("userNameType"), + } + resp, err := svc.UpdateUser(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UploadSSHPublicKey() { + svc := iam.New(session.New()) + + params := &iam.UploadSSHPublicKeyInput{ + SSHPublicKeyBody: aws.String("publicKeyMaterialType"), // Required + UserName: aws.String("userNameType"), // Required + } + resp, err := svc.UploadSSHPublicKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UploadServerCertificate() { + svc := iam.New(session.New()) + + params := &iam.UploadServerCertificateInput{ + CertificateBody: aws.String("certificateBodyType"), // Required + PrivateKey: aws.String("privateKeyType"), // Required + ServerCertificateName: aws.String("serverCertificateNameType"), // Required + CertificateChain: aws.String("certificateChainType"), + Path: aws.String("pathType"), + } + resp, err := svc.UploadServerCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIAM_UploadSigningCertificate() { + svc := iam.New(session.New()) + + params := &iam.UploadSigningCertificateInput{ + CertificateBody: aws.String("certificateBodyType"), // Required + UserName: aws.String("existingUserNameType"), + } + resp, err := svc.UploadSigningCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iam/iamiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iam/iamiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iam/iamiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iam/iamiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,510 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package iamiface provides an interface for the AWS Identity and Access Management. +package iamiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/iam" +) + +// IAMAPI is the interface type for iam.IAM. +type IAMAPI interface { + AddClientIDToOpenIDConnectProviderRequest(*iam.AddClientIDToOpenIDConnectProviderInput) (*request.Request, *iam.AddClientIDToOpenIDConnectProviderOutput) + + AddClientIDToOpenIDConnectProvider(*iam.AddClientIDToOpenIDConnectProviderInput) (*iam.AddClientIDToOpenIDConnectProviderOutput, error) + + AddRoleToInstanceProfileRequest(*iam.AddRoleToInstanceProfileInput) (*request.Request, *iam.AddRoleToInstanceProfileOutput) + + AddRoleToInstanceProfile(*iam.AddRoleToInstanceProfileInput) (*iam.AddRoleToInstanceProfileOutput, error) + + AddUserToGroupRequest(*iam.AddUserToGroupInput) (*request.Request, *iam.AddUserToGroupOutput) + + AddUserToGroup(*iam.AddUserToGroupInput) (*iam.AddUserToGroupOutput, error) + + AttachGroupPolicyRequest(*iam.AttachGroupPolicyInput) (*request.Request, *iam.AttachGroupPolicyOutput) + + AttachGroupPolicy(*iam.AttachGroupPolicyInput) (*iam.AttachGroupPolicyOutput, error) + + AttachRolePolicyRequest(*iam.AttachRolePolicyInput) (*request.Request, *iam.AttachRolePolicyOutput) + + AttachRolePolicy(*iam.AttachRolePolicyInput) (*iam.AttachRolePolicyOutput, error) + + AttachUserPolicyRequest(*iam.AttachUserPolicyInput) (*request.Request, *iam.AttachUserPolicyOutput) + + AttachUserPolicy(*iam.AttachUserPolicyInput) (*iam.AttachUserPolicyOutput, error) + + ChangePasswordRequest(*iam.ChangePasswordInput) (*request.Request, *iam.ChangePasswordOutput) + + ChangePassword(*iam.ChangePasswordInput) (*iam.ChangePasswordOutput, error) + + CreateAccessKeyRequest(*iam.CreateAccessKeyInput) (*request.Request, *iam.CreateAccessKeyOutput) + + CreateAccessKey(*iam.CreateAccessKeyInput) (*iam.CreateAccessKeyOutput, error) + + CreateAccountAliasRequest(*iam.CreateAccountAliasInput) (*request.Request, *iam.CreateAccountAliasOutput) + + CreateAccountAlias(*iam.CreateAccountAliasInput) (*iam.CreateAccountAliasOutput, error) + + CreateGroupRequest(*iam.CreateGroupInput) (*request.Request, *iam.CreateGroupOutput) + + CreateGroup(*iam.CreateGroupInput) (*iam.CreateGroupOutput, error) + + CreateInstanceProfileRequest(*iam.CreateInstanceProfileInput) (*request.Request, *iam.CreateInstanceProfileOutput) + + CreateInstanceProfile(*iam.CreateInstanceProfileInput) (*iam.CreateInstanceProfileOutput, error) + + CreateLoginProfileRequest(*iam.CreateLoginProfileInput) (*request.Request, *iam.CreateLoginProfileOutput) + + CreateLoginProfile(*iam.CreateLoginProfileInput) (*iam.CreateLoginProfileOutput, error) + + CreateOpenIDConnectProviderRequest(*iam.CreateOpenIDConnectProviderInput) (*request.Request, *iam.CreateOpenIDConnectProviderOutput) + + CreateOpenIDConnectProvider(*iam.CreateOpenIDConnectProviderInput) (*iam.CreateOpenIDConnectProviderOutput, error) + + CreatePolicyRequest(*iam.CreatePolicyInput) (*request.Request, *iam.CreatePolicyOutput) + + CreatePolicy(*iam.CreatePolicyInput) (*iam.CreatePolicyOutput, error) + + CreatePolicyVersionRequest(*iam.CreatePolicyVersionInput) (*request.Request, *iam.CreatePolicyVersionOutput) + + CreatePolicyVersion(*iam.CreatePolicyVersionInput) (*iam.CreatePolicyVersionOutput, error) + + CreateRoleRequest(*iam.CreateRoleInput) (*request.Request, *iam.CreateRoleOutput) + + CreateRole(*iam.CreateRoleInput) (*iam.CreateRoleOutput, error) + + CreateSAMLProviderRequest(*iam.CreateSAMLProviderInput) (*request.Request, *iam.CreateSAMLProviderOutput) + + CreateSAMLProvider(*iam.CreateSAMLProviderInput) (*iam.CreateSAMLProviderOutput, error) + + CreateUserRequest(*iam.CreateUserInput) (*request.Request, *iam.CreateUserOutput) + + CreateUser(*iam.CreateUserInput) (*iam.CreateUserOutput, error) + + CreateVirtualMFADeviceRequest(*iam.CreateVirtualMFADeviceInput) (*request.Request, *iam.CreateVirtualMFADeviceOutput) + + CreateVirtualMFADevice(*iam.CreateVirtualMFADeviceInput) (*iam.CreateVirtualMFADeviceOutput, error) + + DeactivateMFADeviceRequest(*iam.DeactivateMFADeviceInput) (*request.Request, *iam.DeactivateMFADeviceOutput) + + DeactivateMFADevice(*iam.DeactivateMFADeviceInput) (*iam.DeactivateMFADeviceOutput, error) + + DeleteAccessKeyRequest(*iam.DeleteAccessKeyInput) (*request.Request, *iam.DeleteAccessKeyOutput) + + DeleteAccessKey(*iam.DeleteAccessKeyInput) (*iam.DeleteAccessKeyOutput, error) + + DeleteAccountAliasRequest(*iam.DeleteAccountAliasInput) (*request.Request, *iam.DeleteAccountAliasOutput) + + DeleteAccountAlias(*iam.DeleteAccountAliasInput) (*iam.DeleteAccountAliasOutput, error) + + DeleteAccountPasswordPolicyRequest(*iam.DeleteAccountPasswordPolicyInput) (*request.Request, *iam.DeleteAccountPasswordPolicyOutput) + + DeleteAccountPasswordPolicy(*iam.DeleteAccountPasswordPolicyInput) (*iam.DeleteAccountPasswordPolicyOutput, error) + + DeleteGroupRequest(*iam.DeleteGroupInput) (*request.Request, *iam.DeleteGroupOutput) + + DeleteGroup(*iam.DeleteGroupInput) (*iam.DeleteGroupOutput, error) + + DeleteGroupPolicyRequest(*iam.DeleteGroupPolicyInput) (*request.Request, *iam.DeleteGroupPolicyOutput) + + DeleteGroupPolicy(*iam.DeleteGroupPolicyInput) (*iam.DeleteGroupPolicyOutput, error) + + DeleteInstanceProfileRequest(*iam.DeleteInstanceProfileInput) (*request.Request, *iam.DeleteInstanceProfileOutput) + + DeleteInstanceProfile(*iam.DeleteInstanceProfileInput) (*iam.DeleteInstanceProfileOutput, error) + + DeleteLoginProfileRequest(*iam.DeleteLoginProfileInput) (*request.Request, *iam.DeleteLoginProfileOutput) + + DeleteLoginProfile(*iam.DeleteLoginProfileInput) (*iam.DeleteLoginProfileOutput, error) + + DeleteOpenIDConnectProviderRequest(*iam.DeleteOpenIDConnectProviderInput) (*request.Request, *iam.DeleteOpenIDConnectProviderOutput) + + DeleteOpenIDConnectProvider(*iam.DeleteOpenIDConnectProviderInput) (*iam.DeleteOpenIDConnectProviderOutput, error) + + DeletePolicyRequest(*iam.DeletePolicyInput) (*request.Request, *iam.DeletePolicyOutput) + + DeletePolicy(*iam.DeletePolicyInput) (*iam.DeletePolicyOutput, error) + + DeletePolicyVersionRequest(*iam.DeletePolicyVersionInput) (*request.Request, *iam.DeletePolicyVersionOutput) + + DeletePolicyVersion(*iam.DeletePolicyVersionInput) (*iam.DeletePolicyVersionOutput, error) + + DeleteRoleRequest(*iam.DeleteRoleInput) (*request.Request, *iam.DeleteRoleOutput) + + DeleteRole(*iam.DeleteRoleInput) (*iam.DeleteRoleOutput, error) + + DeleteRolePolicyRequest(*iam.DeleteRolePolicyInput) (*request.Request, *iam.DeleteRolePolicyOutput) + + DeleteRolePolicy(*iam.DeleteRolePolicyInput) (*iam.DeleteRolePolicyOutput, error) + + DeleteSAMLProviderRequest(*iam.DeleteSAMLProviderInput) (*request.Request, *iam.DeleteSAMLProviderOutput) + + DeleteSAMLProvider(*iam.DeleteSAMLProviderInput) (*iam.DeleteSAMLProviderOutput, error) + + DeleteSSHPublicKeyRequest(*iam.DeleteSSHPublicKeyInput) (*request.Request, *iam.DeleteSSHPublicKeyOutput) + + DeleteSSHPublicKey(*iam.DeleteSSHPublicKeyInput) (*iam.DeleteSSHPublicKeyOutput, error) + + DeleteServerCertificateRequest(*iam.DeleteServerCertificateInput) (*request.Request, *iam.DeleteServerCertificateOutput) + + DeleteServerCertificate(*iam.DeleteServerCertificateInput) (*iam.DeleteServerCertificateOutput, error) + + DeleteSigningCertificateRequest(*iam.DeleteSigningCertificateInput) (*request.Request, *iam.DeleteSigningCertificateOutput) + + DeleteSigningCertificate(*iam.DeleteSigningCertificateInput) (*iam.DeleteSigningCertificateOutput, error) + + DeleteUserRequest(*iam.DeleteUserInput) (*request.Request, *iam.DeleteUserOutput) + + DeleteUser(*iam.DeleteUserInput) (*iam.DeleteUserOutput, error) + + DeleteUserPolicyRequest(*iam.DeleteUserPolicyInput) (*request.Request, *iam.DeleteUserPolicyOutput) + + DeleteUserPolicy(*iam.DeleteUserPolicyInput) (*iam.DeleteUserPolicyOutput, error) + + DeleteVirtualMFADeviceRequest(*iam.DeleteVirtualMFADeviceInput) (*request.Request, *iam.DeleteVirtualMFADeviceOutput) + + DeleteVirtualMFADevice(*iam.DeleteVirtualMFADeviceInput) (*iam.DeleteVirtualMFADeviceOutput, error) + + DetachGroupPolicyRequest(*iam.DetachGroupPolicyInput) (*request.Request, *iam.DetachGroupPolicyOutput) + + DetachGroupPolicy(*iam.DetachGroupPolicyInput) (*iam.DetachGroupPolicyOutput, error) + + DetachRolePolicyRequest(*iam.DetachRolePolicyInput) (*request.Request, *iam.DetachRolePolicyOutput) + + DetachRolePolicy(*iam.DetachRolePolicyInput) (*iam.DetachRolePolicyOutput, error) + + DetachUserPolicyRequest(*iam.DetachUserPolicyInput) (*request.Request, *iam.DetachUserPolicyOutput) + + DetachUserPolicy(*iam.DetachUserPolicyInput) (*iam.DetachUserPolicyOutput, error) + + EnableMFADeviceRequest(*iam.EnableMFADeviceInput) (*request.Request, *iam.EnableMFADeviceOutput) + + EnableMFADevice(*iam.EnableMFADeviceInput) (*iam.EnableMFADeviceOutput, error) + + GenerateCredentialReportRequest(*iam.GenerateCredentialReportInput) (*request.Request, *iam.GenerateCredentialReportOutput) + + GenerateCredentialReport(*iam.GenerateCredentialReportInput) (*iam.GenerateCredentialReportOutput, error) + + GetAccessKeyLastUsedRequest(*iam.GetAccessKeyLastUsedInput) (*request.Request, *iam.GetAccessKeyLastUsedOutput) + + GetAccessKeyLastUsed(*iam.GetAccessKeyLastUsedInput) (*iam.GetAccessKeyLastUsedOutput, error) + + GetAccountAuthorizationDetailsRequest(*iam.GetAccountAuthorizationDetailsInput) (*request.Request, *iam.GetAccountAuthorizationDetailsOutput) + + GetAccountAuthorizationDetails(*iam.GetAccountAuthorizationDetailsInput) (*iam.GetAccountAuthorizationDetailsOutput, error) + + GetAccountAuthorizationDetailsPages(*iam.GetAccountAuthorizationDetailsInput, func(*iam.GetAccountAuthorizationDetailsOutput, bool) bool) error + + GetAccountPasswordPolicyRequest(*iam.GetAccountPasswordPolicyInput) (*request.Request, *iam.GetAccountPasswordPolicyOutput) + + GetAccountPasswordPolicy(*iam.GetAccountPasswordPolicyInput) (*iam.GetAccountPasswordPolicyOutput, error) + + GetAccountSummaryRequest(*iam.GetAccountSummaryInput) (*request.Request, *iam.GetAccountSummaryOutput) + + GetAccountSummary(*iam.GetAccountSummaryInput) (*iam.GetAccountSummaryOutput, error) + + GetContextKeysForCustomPolicyRequest(*iam.GetContextKeysForCustomPolicyInput) (*request.Request, *iam.GetContextKeysForPolicyResponse) + + GetContextKeysForCustomPolicy(*iam.GetContextKeysForCustomPolicyInput) (*iam.GetContextKeysForPolicyResponse, error) + + GetContextKeysForPrincipalPolicyRequest(*iam.GetContextKeysForPrincipalPolicyInput) (*request.Request, *iam.GetContextKeysForPolicyResponse) + + GetContextKeysForPrincipalPolicy(*iam.GetContextKeysForPrincipalPolicyInput) (*iam.GetContextKeysForPolicyResponse, error) + + GetCredentialReportRequest(*iam.GetCredentialReportInput) (*request.Request, *iam.GetCredentialReportOutput) + + GetCredentialReport(*iam.GetCredentialReportInput) (*iam.GetCredentialReportOutput, error) + + GetGroupRequest(*iam.GetGroupInput) (*request.Request, *iam.GetGroupOutput) + + GetGroup(*iam.GetGroupInput) (*iam.GetGroupOutput, error) + + GetGroupPages(*iam.GetGroupInput, func(*iam.GetGroupOutput, bool) bool) error + + GetGroupPolicyRequest(*iam.GetGroupPolicyInput) (*request.Request, *iam.GetGroupPolicyOutput) + + GetGroupPolicy(*iam.GetGroupPolicyInput) (*iam.GetGroupPolicyOutput, error) + + GetInstanceProfileRequest(*iam.GetInstanceProfileInput) (*request.Request, *iam.GetInstanceProfileOutput) + + GetInstanceProfile(*iam.GetInstanceProfileInput) (*iam.GetInstanceProfileOutput, error) + + GetLoginProfileRequest(*iam.GetLoginProfileInput) (*request.Request, *iam.GetLoginProfileOutput) + + GetLoginProfile(*iam.GetLoginProfileInput) (*iam.GetLoginProfileOutput, error) + + GetOpenIDConnectProviderRequest(*iam.GetOpenIDConnectProviderInput) (*request.Request, *iam.GetOpenIDConnectProviderOutput) + + GetOpenIDConnectProvider(*iam.GetOpenIDConnectProviderInput) (*iam.GetOpenIDConnectProviderOutput, error) + + GetPolicyRequest(*iam.GetPolicyInput) (*request.Request, *iam.GetPolicyOutput) + + GetPolicy(*iam.GetPolicyInput) (*iam.GetPolicyOutput, error) + + GetPolicyVersionRequest(*iam.GetPolicyVersionInput) (*request.Request, *iam.GetPolicyVersionOutput) + + GetPolicyVersion(*iam.GetPolicyVersionInput) (*iam.GetPolicyVersionOutput, error) + + GetRoleRequest(*iam.GetRoleInput) (*request.Request, *iam.GetRoleOutput) + + GetRole(*iam.GetRoleInput) (*iam.GetRoleOutput, error) + + GetRolePolicyRequest(*iam.GetRolePolicyInput) (*request.Request, *iam.GetRolePolicyOutput) + + GetRolePolicy(*iam.GetRolePolicyInput) (*iam.GetRolePolicyOutput, error) + + GetSAMLProviderRequest(*iam.GetSAMLProviderInput) (*request.Request, *iam.GetSAMLProviderOutput) + + GetSAMLProvider(*iam.GetSAMLProviderInput) (*iam.GetSAMLProviderOutput, error) + + GetSSHPublicKeyRequest(*iam.GetSSHPublicKeyInput) (*request.Request, *iam.GetSSHPublicKeyOutput) + + GetSSHPublicKey(*iam.GetSSHPublicKeyInput) (*iam.GetSSHPublicKeyOutput, error) + + GetServerCertificateRequest(*iam.GetServerCertificateInput) (*request.Request, *iam.GetServerCertificateOutput) + + GetServerCertificate(*iam.GetServerCertificateInput) (*iam.GetServerCertificateOutput, error) + + GetUserRequest(*iam.GetUserInput) (*request.Request, *iam.GetUserOutput) + + GetUser(*iam.GetUserInput) (*iam.GetUserOutput, error) + + GetUserPolicyRequest(*iam.GetUserPolicyInput) (*request.Request, *iam.GetUserPolicyOutput) + + GetUserPolicy(*iam.GetUserPolicyInput) (*iam.GetUserPolicyOutput, error) + + ListAccessKeysRequest(*iam.ListAccessKeysInput) (*request.Request, *iam.ListAccessKeysOutput) + + ListAccessKeys(*iam.ListAccessKeysInput) (*iam.ListAccessKeysOutput, error) + + ListAccessKeysPages(*iam.ListAccessKeysInput, func(*iam.ListAccessKeysOutput, bool) bool) error + + ListAccountAliasesRequest(*iam.ListAccountAliasesInput) (*request.Request, *iam.ListAccountAliasesOutput) + + ListAccountAliases(*iam.ListAccountAliasesInput) (*iam.ListAccountAliasesOutput, error) + + ListAccountAliasesPages(*iam.ListAccountAliasesInput, func(*iam.ListAccountAliasesOutput, bool) bool) error + + ListAttachedGroupPoliciesRequest(*iam.ListAttachedGroupPoliciesInput) (*request.Request, *iam.ListAttachedGroupPoliciesOutput) + + ListAttachedGroupPolicies(*iam.ListAttachedGroupPoliciesInput) (*iam.ListAttachedGroupPoliciesOutput, error) + + ListAttachedGroupPoliciesPages(*iam.ListAttachedGroupPoliciesInput, func(*iam.ListAttachedGroupPoliciesOutput, bool) bool) error + + ListAttachedRolePoliciesRequest(*iam.ListAttachedRolePoliciesInput) (*request.Request, *iam.ListAttachedRolePoliciesOutput) + + ListAttachedRolePolicies(*iam.ListAttachedRolePoliciesInput) (*iam.ListAttachedRolePoliciesOutput, error) + + ListAttachedRolePoliciesPages(*iam.ListAttachedRolePoliciesInput, func(*iam.ListAttachedRolePoliciesOutput, bool) bool) error + + ListAttachedUserPoliciesRequest(*iam.ListAttachedUserPoliciesInput) (*request.Request, *iam.ListAttachedUserPoliciesOutput) + + ListAttachedUserPolicies(*iam.ListAttachedUserPoliciesInput) (*iam.ListAttachedUserPoliciesOutput, error) + + ListAttachedUserPoliciesPages(*iam.ListAttachedUserPoliciesInput, func(*iam.ListAttachedUserPoliciesOutput, bool) bool) error + + ListEntitiesForPolicyRequest(*iam.ListEntitiesForPolicyInput) (*request.Request, *iam.ListEntitiesForPolicyOutput) + + ListEntitiesForPolicy(*iam.ListEntitiesForPolicyInput) (*iam.ListEntitiesForPolicyOutput, error) + + ListEntitiesForPolicyPages(*iam.ListEntitiesForPolicyInput, func(*iam.ListEntitiesForPolicyOutput, bool) bool) error + + ListGroupPoliciesRequest(*iam.ListGroupPoliciesInput) (*request.Request, *iam.ListGroupPoliciesOutput) + + ListGroupPolicies(*iam.ListGroupPoliciesInput) (*iam.ListGroupPoliciesOutput, error) + + ListGroupPoliciesPages(*iam.ListGroupPoliciesInput, func(*iam.ListGroupPoliciesOutput, bool) bool) error + + ListGroupsRequest(*iam.ListGroupsInput) (*request.Request, *iam.ListGroupsOutput) + + ListGroups(*iam.ListGroupsInput) (*iam.ListGroupsOutput, error) + + ListGroupsPages(*iam.ListGroupsInput, func(*iam.ListGroupsOutput, bool) bool) error + + ListGroupsForUserRequest(*iam.ListGroupsForUserInput) (*request.Request, *iam.ListGroupsForUserOutput) + + ListGroupsForUser(*iam.ListGroupsForUserInput) (*iam.ListGroupsForUserOutput, error) + + ListGroupsForUserPages(*iam.ListGroupsForUserInput, func(*iam.ListGroupsForUserOutput, bool) bool) error + + ListInstanceProfilesRequest(*iam.ListInstanceProfilesInput) (*request.Request, *iam.ListInstanceProfilesOutput) + + ListInstanceProfiles(*iam.ListInstanceProfilesInput) (*iam.ListInstanceProfilesOutput, error) + + ListInstanceProfilesPages(*iam.ListInstanceProfilesInput, func(*iam.ListInstanceProfilesOutput, bool) bool) error + + ListInstanceProfilesForRoleRequest(*iam.ListInstanceProfilesForRoleInput) (*request.Request, *iam.ListInstanceProfilesForRoleOutput) + + ListInstanceProfilesForRole(*iam.ListInstanceProfilesForRoleInput) (*iam.ListInstanceProfilesForRoleOutput, error) + + ListInstanceProfilesForRolePages(*iam.ListInstanceProfilesForRoleInput, func(*iam.ListInstanceProfilesForRoleOutput, bool) bool) error + + ListMFADevicesRequest(*iam.ListMFADevicesInput) (*request.Request, *iam.ListMFADevicesOutput) + + ListMFADevices(*iam.ListMFADevicesInput) (*iam.ListMFADevicesOutput, error) + + ListMFADevicesPages(*iam.ListMFADevicesInput, func(*iam.ListMFADevicesOutput, bool) bool) error + + ListOpenIDConnectProvidersRequest(*iam.ListOpenIDConnectProvidersInput) (*request.Request, *iam.ListOpenIDConnectProvidersOutput) + + ListOpenIDConnectProviders(*iam.ListOpenIDConnectProvidersInput) (*iam.ListOpenIDConnectProvidersOutput, error) + + ListPoliciesRequest(*iam.ListPoliciesInput) (*request.Request, *iam.ListPoliciesOutput) + + ListPolicies(*iam.ListPoliciesInput) (*iam.ListPoliciesOutput, error) + + ListPoliciesPages(*iam.ListPoliciesInput, func(*iam.ListPoliciesOutput, bool) bool) error + + ListPolicyVersionsRequest(*iam.ListPolicyVersionsInput) (*request.Request, *iam.ListPolicyVersionsOutput) + + ListPolicyVersions(*iam.ListPolicyVersionsInput) (*iam.ListPolicyVersionsOutput, error) + + ListRolePoliciesRequest(*iam.ListRolePoliciesInput) (*request.Request, *iam.ListRolePoliciesOutput) + + ListRolePolicies(*iam.ListRolePoliciesInput) (*iam.ListRolePoliciesOutput, error) + + ListRolePoliciesPages(*iam.ListRolePoliciesInput, func(*iam.ListRolePoliciesOutput, bool) bool) error + + ListRolesRequest(*iam.ListRolesInput) (*request.Request, *iam.ListRolesOutput) + + ListRoles(*iam.ListRolesInput) (*iam.ListRolesOutput, error) + + ListRolesPages(*iam.ListRolesInput, func(*iam.ListRolesOutput, bool) bool) error + + ListSAMLProvidersRequest(*iam.ListSAMLProvidersInput) (*request.Request, *iam.ListSAMLProvidersOutput) + + ListSAMLProviders(*iam.ListSAMLProvidersInput) (*iam.ListSAMLProvidersOutput, error) + + ListSSHPublicKeysRequest(*iam.ListSSHPublicKeysInput) (*request.Request, *iam.ListSSHPublicKeysOutput) + + ListSSHPublicKeys(*iam.ListSSHPublicKeysInput) (*iam.ListSSHPublicKeysOutput, error) + + ListServerCertificatesRequest(*iam.ListServerCertificatesInput) (*request.Request, *iam.ListServerCertificatesOutput) + + ListServerCertificates(*iam.ListServerCertificatesInput) (*iam.ListServerCertificatesOutput, error) + + ListServerCertificatesPages(*iam.ListServerCertificatesInput, func(*iam.ListServerCertificatesOutput, bool) bool) error + + ListSigningCertificatesRequest(*iam.ListSigningCertificatesInput) (*request.Request, *iam.ListSigningCertificatesOutput) + + ListSigningCertificates(*iam.ListSigningCertificatesInput) (*iam.ListSigningCertificatesOutput, error) + + ListSigningCertificatesPages(*iam.ListSigningCertificatesInput, func(*iam.ListSigningCertificatesOutput, bool) bool) error + + ListUserPoliciesRequest(*iam.ListUserPoliciesInput) (*request.Request, *iam.ListUserPoliciesOutput) + + ListUserPolicies(*iam.ListUserPoliciesInput) (*iam.ListUserPoliciesOutput, error) + + ListUserPoliciesPages(*iam.ListUserPoliciesInput, func(*iam.ListUserPoliciesOutput, bool) bool) error + + ListUsersRequest(*iam.ListUsersInput) (*request.Request, *iam.ListUsersOutput) + + ListUsers(*iam.ListUsersInput) (*iam.ListUsersOutput, error) + + ListUsersPages(*iam.ListUsersInput, func(*iam.ListUsersOutput, bool) bool) error + + ListVirtualMFADevicesRequest(*iam.ListVirtualMFADevicesInput) (*request.Request, *iam.ListVirtualMFADevicesOutput) + + ListVirtualMFADevices(*iam.ListVirtualMFADevicesInput) (*iam.ListVirtualMFADevicesOutput, error) + + ListVirtualMFADevicesPages(*iam.ListVirtualMFADevicesInput, func(*iam.ListVirtualMFADevicesOutput, bool) bool) error + + PutGroupPolicyRequest(*iam.PutGroupPolicyInput) (*request.Request, *iam.PutGroupPolicyOutput) + + PutGroupPolicy(*iam.PutGroupPolicyInput) (*iam.PutGroupPolicyOutput, error) + + PutRolePolicyRequest(*iam.PutRolePolicyInput) (*request.Request, *iam.PutRolePolicyOutput) + + PutRolePolicy(*iam.PutRolePolicyInput) (*iam.PutRolePolicyOutput, error) + + PutUserPolicyRequest(*iam.PutUserPolicyInput) (*request.Request, *iam.PutUserPolicyOutput) + + PutUserPolicy(*iam.PutUserPolicyInput) (*iam.PutUserPolicyOutput, error) + + RemoveClientIDFromOpenIDConnectProviderRequest(*iam.RemoveClientIDFromOpenIDConnectProviderInput) (*request.Request, *iam.RemoveClientIDFromOpenIDConnectProviderOutput) + + RemoveClientIDFromOpenIDConnectProvider(*iam.RemoveClientIDFromOpenIDConnectProviderInput) (*iam.RemoveClientIDFromOpenIDConnectProviderOutput, error) + + RemoveRoleFromInstanceProfileRequest(*iam.RemoveRoleFromInstanceProfileInput) (*request.Request, *iam.RemoveRoleFromInstanceProfileOutput) + + RemoveRoleFromInstanceProfile(*iam.RemoveRoleFromInstanceProfileInput) (*iam.RemoveRoleFromInstanceProfileOutput, error) + + RemoveUserFromGroupRequest(*iam.RemoveUserFromGroupInput) (*request.Request, *iam.RemoveUserFromGroupOutput) + + RemoveUserFromGroup(*iam.RemoveUserFromGroupInput) (*iam.RemoveUserFromGroupOutput, error) + + ResyncMFADeviceRequest(*iam.ResyncMFADeviceInput) (*request.Request, *iam.ResyncMFADeviceOutput) + + ResyncMFADevice(*iam.ResyncMFADeviceInput) (*iam.ResyncMFADeviceOutput, error) + + SetDefaultPolicyVersionRequest(*iam.SetDefaultPolicyVersionInput) (*request.Request, *iam.SetDefaultPolicyVersionOutput) + + SetDefaultPolicyVersion(*iam.SetDefaultPolicyVersionInput) (*iam.SetDefaultPolicyVersionOutput, error) + + SimulateCustomPolicyRequest(*iam.SimulateCustomPolicyInput) (*request.Request, *iam.SimulatePolicyResponse) + + SimulateCustomPolicy(*iam.SimulateCustomPolicyInput) (*iam.SimulatePolicyResponse, error) + + SimulatePrincipalPolicyRequest(*iam.SimulatePrincipalPolicyInput) (*request.Request, *iam.SimulatePolicyResponse) + + SimulatePrincipalPolicy(*iam.SimulatePrincipalPolicyInput) (*iam.SimulatePolicyResponse, error) + + UpdateAccessKeyRequest(*iam.UpdateAccessKeyInput) (*request.Request, *iam.UpdateAccessKeyOutput) + + UpdateAccessKey(*iam.UpdateAccessKeyInput) (*iam.UpdateAccessKeyOutput, error) + + UpdateAccountPasswordPolicyRequest(*iam.UpdateAccountPasswordPolicyInput) (*request.Request, *iam.UpdateAccountPasswordPolicyOutput) + + UpdateAccountPasswordPolicy(*iam.UpdateAccountPasswordPolicyInput) (*iam.UpdateAccountPasswordPolicyOutput, error) + + UpdateAssumeRolePolicyRequest(*iam.UpdateAssumeRolePolicyInput) (*request.Request, *iam.UpdateAssumeRolePolicyOutput) + + UpdateAssumeRolePolicy(*iam.UpdateAssumeRolePolicyInput) (*iam.UpdateAssumeRolePolicyOutput, error) + + UpdateGroupRequest(*iam.UpdateGroupInput) (*request.Request, *iam.UpdateGroupOutput) + + UpdateGroup(*iam.UpdateGroupInput) (*iam.UpdateGroupOutput, error) + + UpdateLoginProfileRequest(*iam.UpdateLoginProfileInput) (*request.Request, *iam.UpdateLoginProfileOutput) + + UpdateLoginProfile(*iam.UpdateLoginProfileInput) (*iam.UpdateLoginProfileOutput, error) + + UpdateOpenIDConnectProviderThumbprintRequest(*iam.UpdateOpenIDConnectProviderThumbprintInput) (*request.Request, *iam.UpdateOpenIDConnectProviderThumbprintOutput) + + UpdateOpenIDConnectProviderThumbprint(*iam.UpdateOpenIDConnectProviderThumbprintInput) (*iam.UpdateOpenIDConnectProviderThumbprintOutput, error) + + UpdateSAMLProviderRequest(*iam.UpdateSAMLProviderInput) (*request.Request, *iam.UpdateSAMLProviderOutput) + + UpdateSAMLProvider(*iam.UpdateSAMLProviderInput) (*iam.UpdateSAMLProviderOutput, error) + + UpdateSSHPublicKeyRequest(*iam.UpdateSSHPublicKeyInput) (*request.Request, *iam.UpdateSSHPublicKeyOutput) + + UpdateSSHPublicKey(*iam.UpdateSSHPublicKeyInput) (*iam.UpdateSSHPublicKeyOutput, error) + + UpdateServerCertificateRequest(*iam.UpdateServerCertificateInput) (*request.Request, *iam.UpdateServerCertificateOutput) + + UpdateServerCertificate(*iam.UpdateServerCertificateInput) (*iam.UpdateServerCertificateOutput, error) + + UpdateSigningCertificateRequest(*iam.UpdateSigningCertificateInput) (*request.Request, *iam.UpdateSigningCertificateOutput) + + UpdateSigningCertificate(*iam.UpdateSigningCertificateInput) (*iam.UpdateSigningCertificateOutput, error) + + UpdateUserRequest(*iam.UpdateUserInput) (*request.Request, *iam.UpdateUserOutput) + + UpdateUser(*iam.UpdateUserInput) (*iam.UpdateUserOutput, error) + + UploadSSHPublicKeyRequest(*iam.UploadSSHPublicKeyInput) (*request.Request, *iam.UploadSSHPublicKeyOutput) + + UploadSSHPublicKey(*iam.UploadSSHPublicKeyInput) (*iam.UploadSSHPublicKeyOutput, error) + + UploadServerCertificateRequest(*iam.UploadServerCertificateInput) (*request.Request, *iam.UploadServerCertificateOutput) + + UploadServerCertificate(*iam.UploadServerCertificateInput) (*iam.UploadServerCertificateOutput, error) + + UploadSigningCertificateRequest(*iam.UploadSigningCertificateInput) (*request.Request, *iam.UploadSigningCertificateOutput) + + UploadSigningCertificate(*iam.UploadSigningCertificateInput) (*iam.UploadSigningCertificateOutput, error) +} + +var _ IAMAPI = (*iam.IAM)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iam/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iam/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iam/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iam/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,133 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package iam + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// AWS Identity and Access Management (IAM) is a web service that you can use +// to manage users and user permissions under your AWS account. This guide provides +// descriptions of IAM actions that you can call programmatically. For general +// information about IAM, see AWS Identity and Access Management (IAM) (http://aws.amazon.com/iam/). +// For the user guide for IAM, see Using IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/). +// +// AWS provides SDKs that consist of libraries and sample code for various +// programming languages and platforms (Java, Ruby, .NET, iOS, Android, etc.). +// The SDKs provide a convenient way to create programmatic access to IAM and +// AWS. For example, the SDKs take care of tasks such as cryptographically signing +// requests (see below), managing errors, and retrying requests automatically. +// For information about the AWS SDKs, including how to download and install +// them, see the Tools for Amazon Web Services (http://aws.amazon.com/tools/) +// page. We recommend that you use the AWS SDKs to make programmatic API calls +// to IAM. However, you can also use the IAM Query API to make direct calls +// to the IAM web service. To learn more about the IAM Query API, see Making +// Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in the Using IAM guide. IAM supports GET and POST requests for all actions. +// That is, the API does not require you to use GET for some actions and POST +// for others. However, GET requests are subject to the limitation size of a +// URL. Therefore, for operations that require larger sizes, use a POST request. +// +// Signing Requests +// +// Requests must be signed using an access key ID and a secret access key. +// We strongly recommend that you do not use your AWS account access key ID +// and secret access key for everyday work with IAM. You can use the access +// key ID and secret access key for an IAM user or you can use the AWS Security +// Token Service to generate temporary security credentials and use those to +// sign requests. +// +// To sign requests, we recommend that you use Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// If you have an existing application that uses Signature Version 2, you do +// not have to update it to use Signature Version 4. However, some operations +// now require Signature Version 4. The documentation for operations that require +// version 4 indicate this requirement. +// +// Additional Resources +// +// For more information, see the following: +// +// AWS Security Credentials (http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html). +// This topic provides general information about the types of credentials used +// for accessing AWS. IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAMBestPractices.html). +// This topic presents a list of suggestions for using the IAM service to help +// secure your AWS resources. Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html). +// This set of topics walk you through the process of signing a request using +// an access key ID and secret access key. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type IAM struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "iam" + +// New creates a new instance of the IAM client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a IAM client from just a session. +// svc := iam.New(mySession) +// +// // Create a IAM client with additional configuration +// svc := iam.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *IAM { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *IAM { + svc := &IAM{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2010-05-08", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a IAM operation and runs any +// custom request initialization. +func (c *IAM) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iam/waiters.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iam/waiters.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iam/waiters.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iam/waiters.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,65 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package iam + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *IAM) WaitUntilInstanceProfileExists(input *GetInstanceProfileInput) error { + waiterCfg := waiter.Config{ + Operation: "GetInstanceProfile", + Delay: 1, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "retry", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *IAM) WaitUntilUserExists(input *GetUserInput) error { + waiterCfg := waiter.Config{ + Operation: "GetUser", + Delay: 1, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "NoSuchEntity", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/inspector/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/inspector/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/inspector/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/inspector/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,3157 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package inspector provides a client for Amazon Inspector. +package inspector + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAddAttributesToFindings = "AddAttributesToFindings" + +// AddAttributesToFindingsRequest generates a request for the AddAttributesToFindings operation. +func (c *Inspector) AddAttributesToFindingsRequest(input *AddAttributesToFindingsInput) (req *request.Request, output *AddAttributesToFindingsOutput) { + op := &request.Operation{ + Name: opAddAttributesToFindings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddAttributesToFindingsInput{} + } + + req = c.newRequest(op, input, output) + output = &AddAttributesToFindingsOutput{} + req.Data = output + return +} + +// Assigns attributes (key and value pair) to the findings specified by the +// findings' ARNs. +func (c *Inspector) AddAttributesToFindings(input *AddAttributesToFindingsInput) (*AddAttributesToFindingsOutput, error) { + req, out := c.AddAttributesToFindingsRequest(input) + err := req.Send() + return out, err +} + +const opAttachAssessmentAndRulesPackage = "AttachAssessmentAndRulesPackage" + +// AttachAssessmentAndRulesPackageRequest generates a request for the AttachAssessmentAndRulesPackage operation. +func (c *Inspector) AttachAssessmentAndRulesPackageRequest(input *AttachAssessmentAndRulesPackageInput) (req *request.Request, output *AttachAssessmentAndRulesPackageOutput) { + op := &request.Operation{ + Name: opAttachAssessmentAndRulesPackage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachAssessmentAndRulesPackageInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachAssessmentAndRulesPackageOutput{} + req.Data = output + return +} + +// Attaches the rules package specified by the rules package ARN to the assessment +// specified by the assessment ARN. +func (c *Inspector) AttachAssessmentAndRulesPackage(input *AttachAssessmentAndRulesPackageInput) (*AttachAssessmentAndRulesPackageOutput, error) { + req, out := c.AttachAssessmentAndRulesPackageRequest(input) + err := req.Send() + return out, err +} + +const opCreateApplication = "CreateApplication" + +// CreateApplicationRequest generates a request for the CreateApplication operation. +func (c *Inspector) CreateApplicationRequest(input *CreateApplicationInput) (req *request.Request, output *CreateApplicationOutput) { + op := &request.Operation{ + Name: opCreateApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateApplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateApplicationOutput{} + req.Data = output + return +} + +// Creates a new application using the resource group ARN generated by CreateResourceGroup. +// You can create up to 50 applications per AWS account. You can run up to 500 +// concurrent agents per AWS account. For more information, see Inspector Applications. +// (https://docs.aws.amazon.com/inspector/latest/userguide//inspector_applications.html) +func (c *Inspector) CreateApplication(input *CreateApplicationInput) (*CreateApplicationOutput, error) { + req, out := c.CreateApplicationRequest(input) + err := req.Send() + return out, err +} + +const opCreateAssessment = "CreateAssessment" + +// CreateAssessmentRequest generates a request for the CreateAssessment operation. +func (c *Inspector) CreateAssessmentRequest(input *CreateAssessmentInput) (req *request.Request, output *CreateAssessmentOutput) { + op := &request.Operation{ + Name: opCreateAssessment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAssessmentInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateAssessmentOutput{} + req.Data = output + return +} + +// Creates an assessment for the application specified by the application ARN. +// You can create up to 500 assessments per AWS account. +func (c *Inspector) CreateAssessment(input *CreateAssessmentInput) (*CreateAssessmentOutput, error) { + req, out := c.CreateAssessmentRequest(input) + err := req.Send() + return out, err +} + +const opCreateResourceGroup = "CreateResourceGroup" + +// CreateResourceGroupRequest generates a request for the CreateResourceGroup operation. +func (c *Inspector) CreateResourceGroupRequest(input *CreateResourceGroupInput) (req *request.Request, output *CreateResourceGroupOutput) { + op := &request.Operation{ + Name: opCreateResourceGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateResourceGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateResourceGroupOutput{} + req.Data = output + return +} + +// Creates a resource group using the specified set of tags (key and value pairs) +// that are used to select the EC2 instances to be included in an Inspector +// application. The created resource group is then used to create an Inspector +// application. +func (c *Inspector) CreateResourceGroup(input *CreateResourceGroupInput) (*CreateResourceGroupOutput, error) { + req, out := c.CreateResourceGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteApplication = "DeleteApplication" + +// DeleteApplicationRequest generates a request for the DeleteApplication operation. +func (c *Inspector) DeleteApplicationRequest(input *DeleteApplicationInput) (req *request.Request, output *DeleteApplicationOutput) { + op := &request.Operation{ + Name: opDeleteApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteApplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteApplicationOutput{} + req.Data = output + return +} + +// Deletes the application specified by the application ARN. +func (c *Inspector) DeleteApplication(input *DeleteApplicationInput) (*DeleteApplicationOutput, error) { + req, out := c.DeleteApplicationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAssessment = "DeleteAssessment" + +// DeleteAssessmentRequest generates a request for the DeleteAssessment operation. +func (c *Inspector) DeleteAssessmentRequest(input *DeleteAssessmentInput) (req *request.Request, output *DeleteAssessmentOutput) { + op := &request.Operation{ + Name: opDeleteAssessment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAssessmentInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteAssessmentOutput{} + req.Data = output + return +} + +// Deletes the assessment specified by the assessment ARN. +func (c *Inspector) DeleteAssessment(input *DeleteAssessmentInput) (*DeleteAssessmentOutput, error) { + req, out := c.DeleteAssessmentRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRun = "DeleteRun" + +// DeleteRunRequest generates a request for the DeleteRun operation. +func (c *Inspector) DeleteRunRequest(input *DeleteRunInput) (req *request.Request, output *DeleteRunOutput) { + op := &request.Operation{ + Name: opDeleteRun, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRunInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteRunOutput{} + req.Data = output + return +} + +// Deletes the assessment run specified by the run ARN. +func (c *Inspector) DeleteRun(input *DeleteRunInput) (*DeleteRunOutput, error) { + req, out := c.DeleteRunRequest(input) + err := req.Send() + return out, err +} + +const opDescribeApplication = "DescribeApplication" + +// DescribeApplicationRequest generates a request for the DescribeApplication operation. +func (c *Inspector) DescribeApplicationRequest(input *DescribeApplicationInput) (req *request.Request, output *DescribeApplicationOutput) { + op := &request.Operation{ + Name: opDescribeApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeApplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeApplicationOutput{} + req.Data = output + return +} + +// Describes the application specified by the application ARN. +func (c *Inspector) DescribeApplication(input *DescribeApplicationInput) (*DescribeApplicationOutput, error) { + req, out := c.DescribeApplicationRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAssessment = "DescribeAssessment" + +// DescribeAssessmentRequest generates a request for the DescribeAssessment operation. +func (c *Inspector) DescribeAssessmentRequest(input *DescribeAssessmentInput) (req *request.Request, output *DescribeAssessmentOutput) { + op := &request.Operation{ + Name: opDescribeAssessment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAssessmentInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAssessmentOutput{} + req.Data = output + return +} + +// Describes the assessment specified by the assessment ARN. +func (c *Inspector) DescribeAssessment(input *DescribeAssessmentInput) (*DescribeAssessmentOutput, error) { + req, out := c.DescribeAssessmentRequest(input) + err := req.Send() + return out, err +} + +const opDescribeCrossAccountAccessRole = "DescribeCrossAccountAccessRole" + +// DescribeCrossAccountAccessRoleRequest generates a request for the DescribeCrossAccountAccessRole operation. +func (c *Inspector) DescribeCrossAccountAccessRoleRequest(input *DescribeCrossAccountAccessRoleInput) (req *request.Request, output *DescribeCrossAccountAccessRoleOutput) { + op := &request.Operation{ + Name: opDescribeCrossAccountAccessRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeCrossAccountAccessRoleInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCrossAccountAccessRoleOutput{} + req.Data = output + return +} + +// Describes the IAM role that enables Inspector to access your AWS account. +func (c *Inspector) DescribeCrossAccountAccessRole(input *DescribeCrossAccountAccessRoleInput) (*DescribeCrossAccountAccessRoleOutput, error) { + req, out := c.DescribeCrossAccountAccessRoleRequest(input) + err := req.Send() + return out, err +} + +const opDescribeFinding = "DescribeFinding" + +// DescribeFindingRequest generates a request for the DescribeFinding operation. +func (c *Inspector) DescribeFindingRequest(input *DescribeFindingInput) (req *request.Request, output *DescribeFindingOutput) { + op := &request.Operation{ + Name: opDescribeFinding, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFindingInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeFindingOutput{} + req.Data = output + return +} + +// Describes the finding specified by the finding ARN. +func (c *Inspector) DescribeFinding(input *DescribeFindingInput) (*DescribeFindingOutput, error) { + req, out := c.DescribeFindingRequest(input) + err := req.Send() + return out, err +} + +const opDescribeResourceGroup = "DescribeResourceGroup" + +// DescribeResourceGroupRequest generates a request for the DescribeResourceGroup operation. +func (c *Inspector) DescribeResourceGroupRequest(input *DescribeResourceGroupInput) (req *request.Request, output *DescribeResourceGroupOutput) { + op := &request.Operation{ + Name: opDescribeResourceGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeResourceGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeResourceGroupOutput{} + req.Data = output + return +} + +// Describes the resource group specified by the resource group ARN. +func (c *Inspector) DescribeResourceGroup(input *DescribeResourceGroupInput) (*DescribeResourceGroupOutput, error) { + req, out := c.DescribeResourceGroupRequest(input) + err := req.Send() + return out, err +} + +const opDescribeRulesPackage = "DescribeRulesPackage" + +// DescribeRulesPackageRequest generates a request for the DescribeRulesPackage operation. +func (c *Inspector) DescribeRulesPackageRequest(input *DescribeRulesPackageInput) (req *request.Request, output *DescribeRulesPackageOutput) { + op := &request.Operation{ + Name: opDescribeRulesPackage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRulesPackageInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeRulesPackageOutput{} + req.Data = output + return +} + +// Describes the rules package specified by the rules package ARN. +func (c *Inspector) DescribeRulesPackage(input *DescribeRulesPackageInput) (*DescribeRulesPackageOutput, error) { + req, out := c.DescribeRulesPackageRequest(input) + err := req.Send() + return out, err +} + +const opDescribeRun = "DescribeRun" + +// DescribeRunRequest generates a request for the DescribeRun operation. +func (c *Inspector) DescribeRunRequest(input *DescribeRunInput) (req *request.Request, output *DescribeRunOutput) { + op := &request.Operation{ + Name: opDescribeRun, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRunInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeRunOutput{} + req.Data = output + return +} + +// Describes the assessment run specified by the run ARN. +func (c *Inspector) DescribeRun(input *DescribeRunInput) (*DescribeRunOutput, error) { + req, out := c.DescribeRunRequest(input) + err := req.Send() + return out, err +} + +const opDetachAssessmentAndRulesPackage = "DetachAssessmentAndRulesPackage" + +// DetachAssessmentAndRulesPackageRequest generates a request for the DetachAssessmentAndRulesPackage operation. +func (c *Inspector) DetachAssessmentAndRulesPackageRequest(input *DetachAssessmentAndRulesPackageInput) (req *request.Request, output *DetachAssessmentAndRulesPackageOutput) { + op := &request.Operation{ + Name: opDetachAssessmentAndRulesPackage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachAssessmentAndRulesPackageInput{} + } + + req = c.newRequest(op, input, output) + output = &DetachAssessmentAndRulesPackageOutput{} + req.Data = output + return +} + +// Detaches the rules package specified by the rules package ARN from the assessment +// specified by the assessment ARN. +func (c *Inspector) DetachAssessmentAndRulesPackage(input *DetachAssessmentAndRulesPackageInput) (*DetachAssessmentAndRulesPackageOutput, error) { + req, out := c.DetachAssessmentAndRulesPackageRequest(input) + err := req.Send() + return out, err +} + +const opGetAssessmentTelemetry = "GetAssessmentTelemetry" + +// GetAssessmentTelemetryRequest generates a request for the GetAssessmentTelemetry operation. +func (c *Inspector) GetAssessmentTelemetryRequest(input *GetAssessmentTelemetryInput) (req *request.Request, output *GetAssessmentTelemetryOutput) { + op := &request.Operation{ + Name: opGetAssessmentTelemetry, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAssessmentTelemetryInput{} + } + + req = c.newRequest(op, input, output) + output = &GetAssessmentTelemetryOutput{} + req.Data = output + return +} + +// Returns the metadata about the telemetry (application behavioral data) for +// the assessment specified by the assessment ARN. +func (c *Inspector) GetAssessmentTelemetry(input *GetAssessmentTelemetryInput) (*GetAssessmentTelemetryOutput, error) { + req, out := c.GetAssessmentTelemetryRequest(input) + err := req.Send() + return out, err +} + +const opListApplications = "ListApplications" + +// ListApplicationsRequest generates a request for the ListApplications operation. +func (c *Inspector) ListApplicationsRequest(input *ListApplicationsInput) (req *request.Request, output *ListApplicationsOutput) { + op := &request.Operation{ + Name: opListApplications, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListApplicationsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListApplicationsOutput{} + req.Data = output + return +} + +// Lists the ARNs of the applications within this AWS account. For more information +// about applications, see Inspector Applications (https://docs.aws.amazon.com/inspector/latest/userguide//inspector_applications.html). +func (c *Inspector) ListApplications(input *ListApplicationsInput) (*ListApplicationsOutput, error) { + req, out := c.ListApplicationsRequest(input) + err := req.Send() + return out, err +} + +const opListAssessmentAgents = "ListAssessmentAgents" + +// ListAssessmentAgentsRequest generates a request for the ListAssessmentAgents operation. +func (c *Inspector) ListAssessmentAgentsRequest(input *ListAssessmentAgentsInput) (req *request.Request, output *ListAssessmentAgentsOutput) { + op := &request.Operation{ + Name: opListAssessmentAgents, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListAssessmentAgentsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAssessmentAgentsOutput{} + req.Data = output + return +} + +// Lists the agents of the assessment specified by the assessment ARN. +func (c *Inspector) ListAssessmentAgents(input *ListAssessmentAgentsInput) (*ListAssessmentAgentsOutput, error) { + req, out := c.ListAssessmentAgentsRequest(input) + err := req.Send() + return out, err +} + +const opListAssessments = "ListAssessments" + +// ListAssessmentsRequest generates a request for the ListAssessments operation. +func (c *Inspector) ListAssessmentsRequest(input *ListAssessmentsInput) (req *request.Request, output *ListAssessmentsOutput) { + op := &request.Operation{ + Name: opListAssessments, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListAssessmentsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAssessmentsOutput{} + req.Data = output + return +} + +// Lists the assessments corresponding to applications specified by the applications' +// ARNs. +func (c *Inspector) ListAssessments(input *ListAssessmentsInput) (*ListAssessmentsOutput, error) { + req, out := c.ListAssessmentsRequest(input) + err := req.Send() + return out, err +} + +const opListAttachedAssessments = "ListAttachedAssessments" + +// ListAttachedAssessmentsRequest generates a request for the ListAttachedAssessments operation. +func (c *Inspector) ListAttachedAssessmentsRequest(input *ListAttachedAssessmentsInput) (req *request.Request, output *ListAttachedAssessmentsOutput) { + op := &request.Operation{ + Name: opListAttachedAssessments, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListAttachedAssessmentsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAttachedAssessmentsOutput{} + req.Data = output + return +} + +// Lists the assessments attached to the rules package specified by the rules +// package ARN. +func (c *Inspector) ListAttachedAssessments(input *ListAttachedAssessmentsInput) (*ListAttachedAssessmentsOutput, error) { + req, out := c.ListAttachedAssessmentsRequest(input) + err := req.Send() + return out, err +} + +const opListAttachedRulesPackages = "ListAttachedRulesPackages" + +// ListAttachedRulesPackagesRequest generates a request for the ListAttachedRulesPackages operation. +func (c *Inspector) ListAttachedRulesPackagesRequest(input *ListAttachedRulesPackagesInput) (req *request.Request, output *ListAttachedRulesPackagesOutput) { + op := &request.Operation{ + Name: opListAttachedRulesPackages, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListAttachedRulesPackagesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAttachedRulesPackagesOutput{} + req.Data = output + return +} + +// Lists the rules packages attached to the assessment specified by the assessment +// ARN. +func (c *Inspector) ListAttachedRulesPackages(input *ListAttachedRulesPackagesInput) (*ListAttachedRulesPackagesOutput, error) { + req, out := c.ListAttachedRulesPackagesRequest(input) + err := req.Send() + return out, err +} + +const opListFindings = "ListFindings" + +// ListFindingsRequest generates a request for the ListFindings operation. +func (c *Inspector) ListFindingsRequest(input *ListFindingsInput) (req *request.Request, output *ListFindingsOutput) { + op := &request.Operation{ + Name: opListFindings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListFindingsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListFindingsOutput{} + req.Data = output + return +} + +// Lists findings generated by the assessment run specified by the run ARNs. +func (c *Inspector) ListFindings(input *ListFindingsInput) (*ListFindingsOutput, error) { + req, out := c.ListFindingsRequest(input) + err := req.Send() + return out, err +} + +const opListRulesPackages = "ListRulesPackages" + +// ListRulesPackagesRequest generates a request for the ListRulesPackages operation. +func (c *Inspector) ListRulesPackagesRequest(input *ListRulesPackagesInput) (req *request.Request, output *ListRulesPackagesOutput) { + op := &request.Operation{ + Name: opListRulesPackages, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListRulesPackagesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListRulesPackagesOutput{} + req.Data = output + return +} + +// Lists all available Inspector rules packages. +func (c *Inspector) ListRulesPackages(input *ListRulesPackagesInput) (*ListRulesPackagesOutput, error) { + req, out := c.ListRulesPackagesRequest(input) + err := req.Send() + return out, err +} + +const opListRuns = "ListRuns" + +// ListRunsRequest generates a request for the ListRuns operation. +func (c *Inspector) ListRunsRequest(input *ListRunsInput) (req *request.Request, output *ListRunsOutput) { + op := &request.Operation{ + Name: opListRuns, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListRunsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListRunsOutput{} + req.Data = output + return +} + +// Lists the assessment runs associated with the assessments specified by the +// assessment ARNs. +func (c *Inspector) ListRuns(input *ListRunsInput) (*ListRunsOutput, error) { + req, out := c.ListRunsRequest(input) + err := req.Send() + return out, err +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a request for the ListTagsForResource operation. +func (c *Inspector) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForResourceOutput{} + req.Data = output + return +} + +// Lists all tags associated with a resource. +func (c *Inspector) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + err := req.Send() + return out, err +} + +const opLocalizeText = "LocalizeText" + +// LocalizeTextRequest generates a request for the LocalizeText operation. +func (c *Inspector) LocalizeTextRequest(input *LocalizeTextInput) (req *request.Request, output *LocalizeTextOutput) { + op := &request.Operation{ + Name: opLocalizeText, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &LocalizeTextInput{} + } + + req = c.newRequest(op, input, output) + output = &LocalizeTextOutput{} + req.Data = output + return +} + +// Translates a textual identifier into a user-readable text in a specified +// locale. +func (c *Inspector) LocalizeText(input *LocalizeTextInput) (*LocalizeTextOutput, error) { + req, out := c.LocalizeTextRequest(input) + err := req.Send() + return out, err +} + +const opPreviewAgentsForResourceGroup = "PreviewAgentsForResourceGroup" + +// PreviewAgentsForResourceGroupRequest generates a request for the PreviewAgentsForResourceGroup operation. +func (c *Inspector) PreviewAgentsForResourceGroupRequest(input *PreviewAgentsForResourceGroupInput) (req *request.Request, output *PreviewAgentsForResourceGroupOutput) { + op := &request.Operation{ + Name: opPreviewAgentsForResourceGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PreviewAgentsForResourceGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &PreviewAgentsForResourceGroupOutput{} + req.Data = output + return +} + +// Previews the agents installed on the EC2 instances that are included in the +// application created with the specified resource group. +func (c *Inspector) PreviewAgentsForResourceGroup(input *PreviewAgentsForResourceGroupInput) (*PreviewAgentsForResourceGroupOutput, error) { + req, out := c.PreviewAgentsForResourceGroupRequest(input) + err := req.Send() + return out, err +} + +const opRegisterCrossAccountAccessRole = "RegisterCrossAccountAccessRole" + +// RegisterCrossAccountAccessRoleRequest generates a request for the RegisterCrossAccountAccessRole operation. +func (c *Inspector) RegisterCrossAccountAccessRoleRequest(input *RegisterCrossAccountAccessRoleInput) (req *request.Request, output *RegisterCrossAccountAccessRoleOutput) { + op := &request.Operation{ + Name: opRegisterCrossAccountAccessRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterCrossAccountAccessRoleInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterCrossAccountAccessRoleOutput{} + req.Data = output + return +} + +// Register the role that Inspector uses to list your EC2 instances during the +// assessment. +func (c *Inspector) RegisterCrossAccountAccessRole(input *RegisterCrossAccountAccessRoleInput) (*RegisterCrossAccountAccessRoleOutput, error) { + req, out := c.RegisterCrossAccountAccessRoleRequest(input) + err := req.Send() + return out, err +} + +const opRemoveAttributesFromFindings = "RemoveAttributesFromFindings" + +// RemoveAttributesFromFindingsRequest generates a request for the RemoveAttributesFromFindings operation. +func (c *Inspector) RemoveAttributesFromFindingsRequest(input *RemoveAttributesFromFindingsInput) (req *request.Request, output *RemoveAttributesFromFindingsOutput) { + op := &request.Operation{ + Name: opRemoveAttributesFromFindings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveAttributesFromFindingsInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveAttributesFromFindingsOutput{} + req.Data = output + return +} + +// Removes the entire attribute (key and value pair) from the findings specified +// by the finding ARNs where an attribute with the specified key exists. +func (c *Inspector) RemoveAttributesFromFindings(input *RemoveAttributesFromFindingsInput) (*RemoveAttributesFromFindingsOutput, error) { + req, out := c.RemoveAttributesFromFindingsRequest(input) + err := req.Send() + return out, err +} + +const opRunAssessment = "RunAssessment" + +// RunAssessmentRequest generates a request for the RunAssessment operation. +func (c *Inspector) RunAssessmentRequest(input *RunAssessmentInput) (req *request.Request, output *RunAssessmentOutput) { + op := &request.Operation{ + Name: opRunAssessment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RunAssessmentInput{} + } + + req = c.newRequest(op, input, output) + output = &RunAssessmentOutput{} + req.Data = output + return +} + +// Starts the analysis of the application’s behavior against selected rule packages +// for the assessment specified by the assessment ARN. +func (c *Inspector) RunAssessment(input *RunAssessmentInput) (*RunAssessmentOutput, error) { + req, out := c.RunAssessmentRequest(input) + err := req.Send() + return out, err +} + +const opSetTagsForResource = "SetTagsForResource" + +// SetTagsForResourceRequest generates a request for the SetTagsForResource operation. +func (c *Inspector) SetTagsForResourceRequest(input *SetTagsForResourceInput) (req *request.Request, output *SetTagsForResourceOutput) { + op := &request.Operation{ + Name: opSetTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetTagsForResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &SetTagsForResourceOutput{} + req.Data = output + return +} + +// Sets tags (key and value pairs) to the assessment specified by the assessment +// ARN. +func (c *Inspector) SetTagsForResource(input *SetTagsForResourceInput) (*SetTagsForResourceOutput, error) { + req, out := c.SetTagsForResourceRequest(input) + err := req.Send() + return out, err +} + +const opStartDataCollection = "StartDataCollection" + +// StartDataCollectionRequest generates a request for the StartDataCollection operation. +func (c *Inspector) StartDataCollectionRequest(input *StartDataCollectionInput) (req *request.Request, output *StartDataCollectionOutput) { + op := &request.Operation{ + Name: opStartDataCollection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartDataCollectionInput{} + } + + req = c.newRequest(op, input, output) + output = &StartDataCollectionOutput{} + req.Data = output + return +} + +// Starts data collection for the assessment specified by the assessment ARN. +// For this API to function properly, you must not exceed the limit of running +// up to 500 concurrent agents per AWS account. +func (c *Inspector) StartDataCollection(input *StartDataCollectionInput) (*StartDataCollectionOutput, error) { + req, out := c.StartDataCollectionRequest(input) + err := req.Send() + return out, err +} + +const opStopDataCollection = "StopDataCollection" + +// StopDataCollectionRequest generates a request for the StopDataCollection operation. +func (c *Inspector) StopDataCollectionRequest(input *StopDataCollectionInput) (req *request.Request, output *StopDataCollectionOutput) { + op := &request.Operation{ + Name: opStopDataCollection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopDataCollectionInput{} + } + + req = c.newRequest(op, input, output) + output = &StopDataCollectionOutput{} + req.Data = output + return +} + +// Stop data collection for the assessment specified by the assessment ARN. +func (c *Inspector) StopDataCollection(input *StopDataCollectionInput) (*StopDataCollectionOutput, error) { + req, out := c.StopDataCollectionRequest(input) + err := req.Send() + return out, err +} + +const opUpdateApplication = "UpdateApplication" + +// UpdateApplicationRequest generates a request for the UpdateApplication operation. +func (c *Inspector) UpdateApplicationRequest(input *UpdateApplicationInput) (req *request.Request, output *UpdateApplicationOutput) { + op := &request.Operation{ + Name: opUpdateApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateApplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateApplicationOutput{} + req.Data = output + return +} + +// Updates application specified by the application ARN. +func (c *Inspector) UpdateApplication(input *UpdateApplicationInput) (*UpdateApplicationOutput, error) { + req, out := c.UpdateApplicationRequest(input) + err := req.Send() + return out, err +} + +const opUpdateAssessment = "UpdateAssessment" + +// UpdateAssessmentRequest generates a request for the UpdateAssessment operation. +func (c *Inspector) UpdateAssessmentRequest(input *UpdateAssessmentInput) (req *request.Request, output *UpdateAssessmentOutput) { + op := &request.Operation{ + Name: opUpdateAssessment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAssessmentInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateAssessmentOutput{} + req.Data = output + return +} + +// Updates the assessment specified by the assessment ARN. +func (c *Inspector) UpdateAssessment(input *UpdateAssessmentInput) (*UpdateAssessmentOutput, error) { + req, out := c.UpdateAssessmentRequest(input) + err := req.Send() + return out, err +} + +type AddAttributesToFindingsInput struct { + _ struct{} `type:"structure"` + + // The array of attributes that you want to assign to specified findings. + Attributes []*Attribute `locationName:"attributes" type:"list" required:"true"` + + // The ARNs specifying the findings that you want to assign attributes to. + FindingArns []*string `locationName:"findingArns" type:"list" required:"true"` +} + +// String returns the string representation +func (s AddAttributesToFindingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddAttributesToFindingsInput) GoString() string { + return s.String() +} + +type AddAttributesToFindingsOutput struct { + _ struct{} `type:"structure"` + + // Confirmation details of the action performed. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s AddAttributesToFindingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddAttributesToFindingsOutput) GoString() string { + return s.String() +} + +// Contains information about an Inspector agent. This data type is used as +// a response element in the ListAssessmentAgents action. +type Agent struct { + _ struct{} `type:"structure"` + + // AWS account of the EC2 instance where the agent is installed. + AccountId *string `locationName:"accountId" type:"string"` + + // The current health state of the agent. Values can be set to HEALTHY or UNHEALTHY. + AgentHealth *string `locationName:"agentHealth" type:"string"` + + // The detailed health state of the agent. Values can be set to RUNNING, HEALTHY, + // UNHEALTHY, UNKNOWN, BLACKLISTED, SHUTDOWN, THROTTLED. + AgentHealthCode *string `locationName:"agentHealthCode" type:"string"` + + // The description for the agent health code. + AgentHealthDetails *string `locationName:"agentHealthDetails" type:"string"` + + // The EC2 instance ID where the agent is installed. + AgentId *string `locationName:"agentId" type:"string"` + + // The ARN of the assessment that is associated with the agent. + AssessmentArn *string `locationName:"assessmentArn" type:"string"` + + // This data type property is currently not used. + AutoScalingGroup *string `locationName:"autoScalingGroup" type:"string"` + + // The Inspector application data metrics collected by the agent. + Telemetry []*Telemetry `locationName:"telemetry" type:"list"` +} + +// String returns the string representation +func (s Agent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Agent) GoString() string { + return s.String() +} + +// This data type is used as a response element in the PreviewAgentsForResourceGroup +// action. +type AgentPreview struct { + _ struct{} `type:"structure"` + + // The id of the EC2 instance where the agent is intalled. + AgentId *string `locationName:"agentId" type:"string"` + + // The autoscaling group for the EC2 instance where the agent is installed. + AutoScalingGroup *string `locationName:"autoScalingGroup" type:"string"` +} + +// String returns the string representation +func (s AgentPreview) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AgentPreview) GoString() string { + return s.String() +} + +// This data type is used as a response element in the ListAssessmentAgents +// action. +type AgentsFilter struct { + _ struct{} `type:"structure"` + + // For a record to match a filter, the value specified for this data type property + // must be the exact match of the value of the agentHealth property of the Agent + // data type. + AgentHealthList []*string `locationName:"agentHealthList" type:"list"` +} + +// String returns the string representation +func (s AgentsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AgentsFilter) GoString() string { + return s.String() +} + +// Contains information about an Inspector application. +// +// This data type is used as the response element in the DescribeApplication +// action. +type Application struct { + _ struct{} `type:"structure"` + + // The ARN specifying the Inspector application. + ApplicationArn *string `locationName:"applicationArn" type:"string"` + + // The name of the Inspector application. + ApplicationName *string `locationName:"applicationName" type:"string"` + + // The ARN specifying the resource group that is associated with the application. + ResourceGroupArn *string `locationName:"resourceGroupArn" type:"string"` +} + +// String returns the string representation +func (s Application) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Application) GoString() string { + return s.String() +} + +// This data type is used as the request parameter in the ListApplications action. +type ApplicationsFilter struct { + _ struct{} `type:"structure"` + + // For a record to match a filter, an explicit value or a string containing + // a wildcard specified for this data type property must match the value of + // the applicationName property of the Application data type. + ApplicationNamePatterns []*string `locationName:"applicationNamePatterns" type:"list"` +} + +// String returns the string representation +func (s ApplicationsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplicationsFilter) GoString() string { + return s.String() +} + +// Contains information about an Inspector assessment. +// +// This data type is used as the response element in the DescribeAssessment +// action. +type Assessment struct { + _ struct{} `type:"structure"` + + // The ARN of the application that corresponds to this assessment. + ApplicationArn *string `locationName:"applicationArn" type:"string"` + + // The ARN of the assessment. + AssessmentArn *string `locationName:"assessmentArn" type:"string"` + + // The name of the assessment. + AssessmentName *string `locationName:"assessmentName" type:"string"` + + // The state of the assessment. Values can be set to Created, Collecting Data, + // Stopping, and Completed. + AssessmentState *string `locationName:"assessmentState" type:"string"` + + // Boolean value (true or false) specifying whether the data collection process + // is completed. + DataCollected *bool `locationName:"dataCollected" type:"boolean"` + + // The assessment duration in seconds. The default value is 3600 seconds (one + // hour). The maximum value is 86400 seconds (one day). + DurationInSeconds *int64 `locationName:"durationInSeconds" type:"integer"` + + // The assessment end time. + EndTime *time.Time `locationName:"endTime" type:"timestamp" timestampFormat:"unix"` + + // This data type property is not currently used. + FailureMessage *string `locationName:"failureMessage" type:"string"` + + // The assessment start time. + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"unix"` + + // The user-defined attributes that are assigned to every generated finding. + UserAttributesForFindings []*Attribute `locationName:"userAttributesForFindings" type:"list"` +} + +// String returns the string representation +func (s Assessment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Assessment) GoString() string { + return s.String() +} + +// This data type is used as the request parameter in the ListAssessments and +// ListAttachedAssessments actions. +type AssessmentsFilter struct { + _ struct{} `type:"structure"` + + // For a record to match a filter, an explicit value or a string containing + // a wildcard specified for this data type property must match the value of + // the assessmentName property of the Assessment data type. + AssessmentNamePatterns []*string `locationName:"assessmentNamePatterns" type:"list"` + + // For a record to match a filter, the value specified for this data type property + // must be the exact match of the value of the assessmentState property of the + // Assessment data type. + AssessmentStates []*string `locationName:"assessmentStates" type:"list"` + + // For a record to match a filter, the value specified for this data type property + // must be the exact match of the value of the dataCollected property of the + // Assessment data type. + DataCollected *bool `locationName:"dataCollected" type:"boolean"` + + // For a record to match a filter, the value specified for this data type property + // must inclusively match any value between the specified minimum and maximum + // values of the durationInSeconds property of the Assessment data type. + DurationRange *DurationRange `locationName:"durationRange" type:"structure"` + + // For a record to match a filter, the value specified for this data type property + // must inclusively match any value between the specified minimum and maximum + // values of the endTime property of the Assessment data type. + EndTimeRange *TimestampRange `locationName:"endTimeRange" type:"structure"` + + // For a record to match a filter, the value specified for this data type property + // must inclusively match any value between the specified minimum and maximum + // values of the startTime property of the Assessment data type. + StartTimeRange *TimestampRange `locationName:"startTimeRange" type:"structure"` +} + +// String returns the string representation +func (s AssessmentsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssessmentsFilter) GoString() string { + return s.String() +} + +type AttachAssessmentAndRulesPackageInput struct { + _ struct{} `type:"structure"` + + // The ARN specifying the assessment to which you want to attach a rules package. + AssessmentArn *string `locationName:"assessmentArn" type:"string" required:"true"` + + // The ARN specifying the rules package that you want to attach to the assessment. + RulesPackageArn *string `locationName:"rulesPackageArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachAssessmentAndRulesPackageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachAssessmentAndRulesPackageInput) GoString() string { + return s.String() +} + +type AttachAssessmentAndRulesPackageOutput struct { + _ struct{} `type:"structure"` + + // Confirmation details of the action performed. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s AttachAssessmentAndRulesPackageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachAssessmentAndRulesPackageOutput) GoString() string { + return s.String() +} + +// This data type is used as a response element in the AddAttributesToFindings +// action and a request parameter in the CreateAssessment action. +type Attribute struct { + _ struct{} `type:"structure"` + + // The attribute key. + Key *string `locationName:"key" type:"string"` + + // The value assigned to the attribute key. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s Attribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Attribute) GoString() string { + return s.String() +} + +type CreateApplicationInput struct { + _ struct{} `type:"structure"` + + // The user-defined name identifying the application that you want to create. + // The name must be unique within the AWS account. + ApplicationName *string `locationName:"applicationName" type:"string" required:"true"` + + // The ARN specifying the resource group that is used to create the application. + ResourceGroupArn *string `locationName:"resourceGroupArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateApplicationInput) GoString() string { + return s.String() +} + +type CreateApplicationOutput struct { + _ struct{} `type:"structure"` + + // The ARN specifying the application that is created. + ApplicationArn *string `locationName:"applicationArn" type:"string"` +} + +// String returns the string representation +func (s CreateApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateApplicationOutput) GoString() string { + return s.String() +} + +type CreateAssessmentInput struct { + _ struct{} `type:"structure"` + + // The ARN specifying the application for which you want to create an assessment. + ApplicationArn *string `locationName:"applicationArn" type:"string" required:"true"` + + // The user-defined name identifying the assessment that you want to create. + // You can create several assessments for an application. The names of the assessments + // corresponding to a particular application must be unique. + AssessmentName *string `locationName:"assessmentName" type:"string" required:"true"` + + // The duration of the assessment in seconds. The default value is 3600 seconds + // (one hour). The maximum value is 86400 seconds (one day). + DurationInSeconds *int64 `locationName:"durationInSeconds" type:"integer" required:"true"` + + // The user-defined attributes that are assigned to every finding generated + // by running this assessment. + UserAttributesForFindings []*Attribute `locationName:"userAttributesForFindings" type:"list"` +} + +// String returns the string representation +func (s CreateAssessmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAssessmentInput) GoString() string { + return s.String() +} + +type CreateAssessmentOutput struct { + _ struct{} `type:"structure"` + + // The ARN specifying the assessment that is created. + AssessmentArn *string `locationName:"assessmentArn" type:"string"` +} + +// String returns the string representation +func (s CreateAssessmentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAssessmentOutput) GoString() string { + return s.String() +} + +type CreateResourceGroupInput struct { + _ struct{} `type:"structure"` + + // A collection of keys and an array of possible values in JSON format. + // + // For example, [{ "key1" : ["Value1","Value2"]},{"Key2": ["Value3"]}] + ResourceGroupTags *string `locationName:"resourceGroupTags" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateResourceGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateResourceGroupInput) GoString() string { + return s.String() +} + +type CreateResourceGroupOutput struct { + _ struct{} `type:"structure"` + + // The ARN specifying the resource group that is created. + ResourceGroupArn *string `locationName:"resourceGroupArn" type:"string"` +} + +// String returns the string representation +func (s CreateResourceGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateResourceGroupOutput) GoString() string { + return s.String() +} + +type DeleteApplicationInput struct { + _ struct{} `type:"structure"` + + // The ARN specifying the application that you want to delete. + ApplicationArn *string `locationName:"applicationArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteApplicationInput) GoString() string { + return s.String() +} + +type DeleteApplicationOutput struct { + _ struct{} `type:"structure"` + + // Confirmation details of the action performed. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s DeleteApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteApplicationOutput) GoString() string { + return s.String() +} + +type DeleteAssessmentInput struct { + _ struct{} `type:"structure"` + + // The ARN specifying the assessment that you want to delete. + AssessmentArn *string `locationName:"assessmentArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAssessmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAssessmentInput) GoString() string { + return s.String() +} + +type DeleteAssessmentOutput struct { + _ struct{} `type:"structure"` + + // Confirmation details of the action performed. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s DeleteAssessmentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAssessmentOutput) GoString() string { + return s.String() +} + +type DeleteRunInput struct { + _ struct{} `type:"structure"` + + // The ARN specifying the assessment run that you want to delete. + RunArn *string `locationName:"runArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRunInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRunInput) GoString() string { + return s.String() +} + +type DeleteRunOutput struct { + _ struct{} `type:"structure"` + + // Confirmation details of the action performed. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s DeleteRunOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRunOutput) GoString() string { + return s.String() +} + +type DescribeApplicationInput struct { + _ struct{} `type:"structure"` + + // The ARN specifying the application that you want to describe. + ApplicationArn *string `locationName:"applicationArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeApplicationInput) GoString() string { + return s.String() +} + +type DescribeApplicationOutput struct { + _ struct{} `type:"structure"` + + // Information about the application. + Application *Application `locationName:"application" type:"structure"` +} + +// String returns the string representation +func (s DescribeApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeApplicationOutput) GoString() string { + return s.String() +} + +type DescribeAssessmentInput struct { + _ struct{} `type:"structure"` + + // The ARN specifying the assessment that you want to describe. + AssessmentArn *string `locationName:"assessmentArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeAssessmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAssessmentInput) GoString() string { + return s.String() +} + +type DescribeAssessmentOutput struct { + _ struct{} `type:"structure"` + + // Information about the assessment. + Assessment *Assessment `locationName:"assessment" type:"structure"` +} + +// String returns the string representation +func (s DescribeAssessmentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAssessmentOutput) GoString() string { + return s.String() +} + +type DescribeCrossAccountAccessRoleInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeCrossAccountAccessRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCrossAccountAccessRoleInput) GoString() string { + return s.String() +} + +type DescribeCrossAccountAccessRoleOutput struct { + _ struct{} `type:"structure"` + + // The ARN specifying the IAM role that Inspector uses to access your AWS account. + RoleArn *string `locationName:"roleArn" type:"string"` + + // A Boolean value that specifies whether the IAM role has the necessary policies + // attached to enable Inspector to access your AWS account. + Valid *bool `locationName:"valid" type:"boolean"` +} + +// String returns the string representation +func (s DescribeCrossAccountAccessRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCrossAccountAccessRoleOutput) GoString() string { + return s.String() +} + +type DescribeFindingInput struct { + _ struct{} `type:"structure"` + + // The ARN specifying the finding that you want to describe. + FindingArn *string `locationName:"findingArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeFindingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFindingInput) GoString() string { + return s.String() +} + +type DescribeFindingOutput struct { + _ struct{} `type:"structure"` + + // Information about the finding. + Finding *Finding `locationName:"finding" type:"structure"` +} + +// String returns the string representation +func (s DescribeFindingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFindingOutput) GoString() string { + return s.String() +} + +type DescribeResourceGroupInput struct { + _ struct{} `type:"structure"` + + // The ARN specifying the resource group that you want to describe. + ResourceGroupArn *string `locationName:"resourceGroupArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeResourceGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeResourceGroupInput) GoString() string { + return s.String() +} + +type DescribeResourceGroupOutput struct { + _ struct{} `type:"structure"` + + // Information about the resource group. + ResourceGroup *ResourceGroup `locationName:"resourceGroup" type:"structure"` +} + +// String returns the string representation +func (s DescribeResourceGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeResourceGroupOutput) GoString() string { + return s.String() +} + +type DescribeRulesPackageInput struct { + _ struct{} `type:"structure"` + + // The ARN specifying the rules package that you want to describe. + RulesPackageArn *string `locationName:"rulesPackageArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeRulesPackageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRulesPackageInput) GoString() string { + return s.String() +} + +type DescribeRulesPackageOutput struct { + _ struct{} `type:"structure"` + + // Information about the rules package. + RulesPackage *RulesPackage `locationName:"rulesPackage" type:"structure"` +} + +// String returns the string representation +func (s DescribeRulesPackageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRulesPackageOutput) GoString() string { + return s.String() +} + +type DescribeRunInput struct { + _ struct{} `type:"structure"` + + // The ARN specifying the assessment run that you want to describe. + RunArn *string `locationName:"runArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeRunInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRunInput) GoString() string { + return s.String() +} + +type DescribeRunOutput struct { + _ struct{} `type:"structure"` + + // Information about the assessment run. + Run *Run `locationName:"run" type:"structure"` +} + +// String returns the string representation +func (s DescribeRunOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRunOutput) GoString() string { + return s.String() +} + +type DetachAssessmentAndRulesPackageInput struct { + _ struct{} `type:"structure"` + + // The ARN specifying the assessment from which you want to detach a rules package. + AssessmentArn *string `locationName:"assessmentArn" type:"string" required:"true"` + + // The ARN specifying the rules package that you want to detach from the assessment. + RulesPackageArn *string `locationName:"rulesPackageArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachAssessmentAndRulesPackageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachAssessmentAndRulesPackageInput) GoString() string { + return s.String() +} + +type DetachAssessmentAndRulesPackageOutput struct { + _ struct{} `type:"structure"` + + // Confirmation details of the action performed. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s DetachAssessmentAndRulesPackageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachAssessmentAndRulesPackageOutput) GoString() string { + return s.String() +} + +// This data type is used in the AssessmentsFilter data type. +type DurationRange struct { + _ struct{} `type:"structure"` + + // The maximum value of the duration range. Must be less than or equal to 604800 + // seconds (1 week). + Maximum *int64 `locationName:"maximum" type:"integer"` + + // The minimum value of the duration range. Must be greater than zero. + Minimum *int64 `locationName:"minimum" type:"integer"` +} + +// String returns the string representation +func (s DurationRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DurationRange) GoString() string { + return s.String() +} + +// Contains information about an Inspector finding. +// +// This data type is used as the response element in the DescribeFinding action. +type Finding struct { + _ struct{} `type:"structure"` + + // The EC2 instance ID where the agent is installed that is used during the + // assessment that generates the finding. + AgentId *string `locationName:"agentId" type:"string"` + + // The system-defined attributes for the finding. + Attributes []*Attribute `locationName:"attributes" type:"list"` + + // The autoscaling group of the EC2 instance where the agent is installed that + // is used during the assessment that generates the finding. + AutoScalingGroup *string `locationName:"autoScalingGroup" type:"string"` + + // The description of the finding. + Description *LocalizedText `locationName:"description" type:"structure"` + + // A short description that identifies the finding. + Finding *LocalizedText `locationName:"finding" type:"structure"` + + // The ARN specifying the finding. + FindingArn *string `locationName:"findingArn" type:"string"` + + // The recommendation for the finding. + Recommendation *LocalizedText `locationName:"recommendation" type:"structure"` + + // The rule name that is used to generate the finding. + RuleName *string `locationName:"ruleName" type:"string"` + + // The ARN of the rules package that is used to generate the finding. + RulesPackageArn *string `locationName:"rulesPackageArn" type:"string"` + + // The ARN of the assessment run that generated the finding. + RunArn *string `locationName:"runArn" type:"string"` + + // The finding severity. Values can be set to High, Medium, Low, and Informational. + Severity *string `locationName:"severity" type:"string"` + + // The user-defined attributes that are assigned to the finding. + UserAttributes []*Attribute `locationName:"userAttributes" type:"list"` +} + +// String returns the string representation +func (s Finding) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Finding) GoString() string { + return s.String() +} + +// This data type is used as a request parameter in the ListFindings action. +type FindingsFilter struct { + _ struct{} `type:"structure"` + + // For a record to match a filter, the value specified for this data type property + // must be the exact match of the value of the attributes property of the Finding + // data type. + Attributes []*Attribute `locationName:"attributes" type:"list"` + + // For a record to match a filter, the value specified for this data type property + // must be the exact match of the value of the ruleName property of the Finding + // data type. + RuleNames []*string `locationName:"ruleNames" type:"list"` + + // For a record to match a filter, the value specified for this data type property + // must be the exact match of the value of the rulesPackageArn property of the + // Finding data type. + RulesPackageArns []*string `locationName:"rulesPackageArns" type:"list"` + + // For a record to match a filter, the value specified for this data type property + // must be the exact match of the value of the severity property of the Finding + // data type. + Severities []*string `locationName:"severities" type:"list"` + + // For a record to match a filter, the value specified for this data type property + // must be the exact match of the value of the userAttributes property of the + // Finding data type. + UserAttributes []*Attribute `locationName:"userAttributes" type:"list"` +} + +// String returns the string representation +func (s FindingsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FindingsFilter) GoString() string { + return s.String() +} + +type GetAssessmentTelemetryInput struct { + _ struct{} `type:"structure"` + + // The ARN specifying the assessment the telemetry of which you want to obtain. + AssessmentArn *string `locationName:"assessmentArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAssessmentTelemetryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAssessmentTelemetryInput) GoString() string { + return s.String() +} + +type GetAssessmentTelemetryOutput struct { + _ struct{} `type:"structure"` + + // Telemetry details. + Telemetry []*Telemetry `locationName:"telemetry" type:"list"` +} + +// String returns the string representation +func (s GetAssessmentTelemetryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAssessmentTelemetryOutput) GoString() string { + return s.String() +} + +type ListApplicationsInput struct { + _ struct{} `type:"structure"` + + // You can use this parameter to specify a subset of data to be included in + // the action's response. + // + // For a record to match a filter, all specified filter attributes must match. + // When multiple values are specified for a filter attribute, any of the values + // can match. + Filter *ApplicationsFilter `locationName:"filter" type:"structure"` + + // You can use this parameter to indicate the maximum number of items you want + // in the response. The default value is 10. The maximum value is 500. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // You can use this parameter when paginating results. Set the value of this + // parameter to 'null' on your first call to the ListApplications action. Subsequent + // calls to the action fill nextToken in the request with the value of NextToken + // from previous response to continue listing data. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListApplicationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListApplicationsInput) GoString() string { + return s.String() +} + +type ListApplicationsOutput struct { + _ struct{} `type:"structure"` + + // A list of ARNs specifying the applications returned by the action. + ApplicationArnList []*string `locationName:"applicationArnList" type:"list"` + + // When a response is generated, if there is more data to be listed, this parameter + // is present in the response and contains the value to use for the nextToken + // parameter in a subsequent pagination request. If there is no more data to + // be listed, this parameter is set to 'null'. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListApplicationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListApplicationsOutput) GoString() string { + return s.String() +} + +type ListAssessmentAgentsInput struct { + _ struct{} `type:"structure"` + + // The ARN specifying the assessment whose agents you want to list. + AssessmentArn *string `locationName:"assessmentArn" type:"string" required:"true"` + + // You can use this parameter to specify a subset of data to be included in + // the action's response. + // + // For a record to match a filter, all specified filter attributes must match. + // When multiple values are specified for a filter attribute, any of the values + // can match. + Filter *AgentsFilter `locationName:"filter" type:"structure"` + + // You can use this parameter to indicate the maximum number of items you want + // in the response. The default value is 10. The maximum value is 500. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // You can use this parameter when paginating results. Set the value of this + // parameter to 'null' on your first call to the ListAssessmentAgents action. + // Subsequent calls to the action fill nextToken in the request with the value + // of NextToken from previous response to continue listing data. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListAssessmentAgentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAssessmentAgentsInput) GoString() string { + return s.String() +} + +type ListAssessmentAgentsOutput struct { + _ struct{} `type:"structure"` + + // A list of ARNs specifying the agents returned by the action. + AgentList []*Agent `locationName:"agentList" type:"list"` + + // When a response is generated, if there is more data to be listed, this parameter + // is present in the response and contains the value to use for the nextToken + // parameter in a subsequent pagination request. If there is no more data to + // be listed, this parameter is set to 'null'. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListAssessmentAgentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAssessmentAgentsOutput) GoString() string { + return s.String() +} + +type ListAssessmentsInput struct { + _ struct{} `type:"structure"` + + // A list of ARNs specifying the applications the assessments of which you want + // to list. + ApplicationArns []*string `locationName:"applicationArns" type:"list"` + + // You can use this parameter to specify a subset of data to be included in + // the action's response. + // + // For a record to match a filter, all specified filter attributes must match. + // When multiple values are specified for a filter attribute, any of the values + // can match. + Filter *AssessmentsFilter `locationName:"filter" type:"structure"` + + // You can use this parameter to indicate the maximum number of items you want + // in the response. The default value is 10. The maximum value is 500. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // You can use this parameter when paginating results. Set the value of this + // parameter to 'null' on your first call to the ListAssessments action. Subsequent + // calls to the action fill nextToken in the request with the value of NextToken + // from previous response to continue listing data. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListAssessmentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAssessmentsInput) GoString() string { + return s.String() +} + +type ListAssessmentsOutput struct { + _ struct{} `type:"structure"` + + // A list of ARNs specifying the assessments returned by the action. + AssessmentArnList []*string `locationName:"assessmentArnList" type:"list"` + + // When a response is generated, if there is more data to be listed, this parameter + // is present in the response and contains the value to use for the nextToken + // parameter in a subsequent pagination request. If there is no more data to + // be listed, this parameter is set to 'null'. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListAssessmentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAssessmentsOutput) GoString() string { + return s.String() +} + +type ListAttachedAssessmentsInput struct { + _ struct{} `type:"structure"` + + // You can use this parameter to specify a subset of data to be included in + // the action's response. + // + // For a record to match a filter, all specified filter attributes must match. + // When multiple values are specified for a filter attribute, any of the values + // can match. + Filter *AssessmentsFilter `locationName:"filter" type:"structure"` + + // You can use this parameter to indicate the maximum number of items you want + // in the response. The default value is 10. The maximum value is 500. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // You can use this parameter when paginating results. Set the value of this + // parameter to 'null' on your first call to the ListAttachedAssessments action. + // Subsequent calls to the action fill nextToken in the request with the value + // of NextToken from previous response to continue listing data. + NextToken *string `locationName:"nextToken" type:"string"` + + // The ARN specifying the rules package whose assessments you want to list. + RulesPackageArn *string `locationName:"rulesPackageArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListAttachedAssessmentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttachedAssessmentsInput) GoString() string { + return s.String() +} + +type ListAttachedAssessmentsOutput struct { + _ struct{} `type:"structure"` + + // A list of ARNs specifying the assessments returned by the action. + AssessmentArnList []*string `locationName:"assessmentArnList" type:"list"` + + // When a response is generated, if there is more data to be listed, this parameter + // is present in the response and contains the value to use for the nextToken + // parameter in a subsequent pagination request. If there is no more data to + // be listed, this parameter is set to 'null'. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListAttachedAssessmentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttachedAssessmentsOutput) GoString() string { + return s.String() +} + +type ListAttachedRulesPackagesInput struct { + _ struct{} `type:"structure"` + + // The ARN specifying the assessment whose rules packages you want to list. + AssessmentArn *string `locationName:"assessmentArn" type:"string" required:"true"` + + // You can use this parameter to indicate the maximum number of items you want + // in the response. The default value is 10. The maximum value is 500. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // You can use this parameter when paginating results. Set the value of this + // parameter to 'null' on your first call to the ListAttachedRulesPackages action. + // Subsequent calls to the action fill nextToken in the request with the value + // of NextToken from previous response to continue listing data. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListAttachedRulesPackagesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttachedRulesPackagesInput) GoString() string { + return s.String() +} + +type ListAttachedRulesPackagesOutput struct { + _ struct{} `type:"structure"` + + // When a response is generated, if there is more data to be listed, this parameter + // is present in the response and contains the value to use for the nextToken + // parameter in a subsequent pagination request. If there is no more data to + // be listed, this parameter is set to 'null'. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of ARNs specifying the rules packages returned by the action. + RulesPackageArnList []*string `locationName:"rulesPackageArnList" type:"list"` +} + +// String returns the string representation +func (s ListAttachedRulesPackagesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttachedRulesPackagesOutput) GoString() string { + return s.String() +} + +type ListFindingsInput struct { + _ struct{} `type:"structure"` + + // You can use this parameter to specify a subset of data to be included in + // the action's response. + // + // For a record to match a filter, all specified filter attributes must match. + // When multiple values are specified for a filter attribute, any of the values + // can match. + Filter *FindingsFilter `locationName:"filter" type:"structure"` + + // You can use this parameter to indicate the maximum number of items you want + // in the response. The default value is 10. The maximum value is 500. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // You can use this parameter when paginating results. Set the value of this + // parameter to 'null' on your first call to the ListFindings action. Subsequent + // calls to the action fill nextToken in the request with the value of NextToken + // from previous response to continue listing data. + NextToken *string `locationName:"nextToken" type:"string"` + + // The ARNs of the assessment runs that generate the findings that you want + // to list. + RunArns []*string `locationName:"runArns" type:"list"` +} + +// String returns the string representation +func (s ListFindingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListFindingsInput) GoString() string { + return s.String() +} + +type ListFindingsOutput struct { + _ struct{} `type:"structure"` + + // A list of ARNs specifying the findings returned by the action. + FindingArnList []*string `locationName:"findingArnList" type:"list"` + + // When a response is generated, if there is more data to be listed, this parameter + // is present in the response and contains the value to use for the nextToken + // parameter in a subsequent pagination request. If there is no more data to + // be listed, this parameter is set to 'null'. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListFindingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListFindingsOutput) GoString() string { + return s.String() +} + +type ListRulesPackagesInput struct { + _ struct{} `type:"structure"` + + // You can use this parameter to indicate the maximum number of items you want + // in the response. The default value is 10. The maximum value is 500. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // You can use this parameter when paginating results. Set the value of this + // parameter to 'null' on your first call to the ListRulesPackages action. Subsequent + // calls to the action fill nextToken in the request with the value of NextToken + // from previous response to continue listing data. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListRulesPackagesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRulesPackagesInput) GoString() string { + return s.String() +} + +type ListRulesPackagesOutput struct { + _ struct{} `type:"structure"` + + // When a response is generated, if there is more data to be listed, this parameter + // is present in the response and contains the value to use for the nextToken + // parameter in a subsequent pagination request. If there is no more data to + // be listed, this parameter is set to 'null'. + NextToken *string `locationName:"nextToken" type:"string"` + + // The list of ARNs specifying the rules packages returned by the action. + RulesPackageArnList []*string `locationName:"rulesPackageArnList" type:"list"` +} + +// String returns the string representation +func (s ListRulesPackagesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRulesPackagesOutput) GoString() string { + return s.String() +} + +type ListRunsInput struct { + _ struct{} `type:"structure"` + + // The ARNs specifying the assessments whose runs you want to list. + AssessmentArns []*string `locationName:"assessmentArns" type:"list"` + + // You can use this parameter to specify a subset of data to be included in + // the action's response. + // + // For a record to match a filter, all specified filter attributes must match. + // When multiple values are specified for a filter attribute, any of the values + // can match. + Filter *RunsFilter `locationName:"filter" type:"structure"` + + // You can use this parameter to indicate the maximum number of items you want + // in the response. The default value is 10. The maximum value is 500. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // You can use this parameter when paginating results. Set the value of this + // parameter to 'null' on your first call to the ListRuns action. Subsequent + // calls to the action fill nextToken in the request with the value of NextToken + // from previous response to continue listing data. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListRunsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRunsInput) GoString() string { + return s.String() +} + +type ListRunsOutput struct { + _ struct{} `type:"structure"` + + // When a response is generated, if there is more data to be listed, this parameter + // is present in the response and contains the value to use for the nextToken + // parameter in a subsequent pagination request. If there is no more data to + // be listed, this parameter is set to 'null'. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of ARNs specifying the assessment runs returned by the action. + RunArnList []*string `locationName:"runArnList" type:"list"` +} + +// String returns the string representation +func (s ListRunsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRunsOutput) GoString() string { + return s.String() +} + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN specifying the resource whose tags you want to list. + ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // A collection of key and value pairs. + TagList []*Tag `locationName:"tagList" type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +type LocalizeTextInput struct { + _ struct{} `type:"structure"` + + // The locale that you want to translate a textual identifier into. + Locale *string `locationName:"locale" type:"string" required:"true"` + + // A list of textual identifiers. + LocalizedTexts []*LocalizedText `locationName:"localizedTexts" type:"list" required:"true"` +} + +// String returns the string representation +func (s LocalizeTextInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LocalizeTextInput) GoString() string { + return s.String() +} + +type LocalizeTextOutput struct { + _ struct{} `type:"structure"` + + // Confirmation details of the action performed. + Message *string `locationName:"message" type:"string"` + + // The resulting list of user-readable texts. + Results []*string `locationName:"results" type:"list"` +} + +// String returns the string representation +func (s LocalizeTextOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LocalizeTextOutput) GoString() string { + return s.String() +} + +// The textual identifier. This data type is used as the request parameter in +// the LocalizeText action. +type LocalizedText struct { + _ struct{} `type:"structure"` + + // The facility and id properties of the LocalizedTextKey data type. + Key *LocalizedTextKey `locationName:"key" type:"structure"` + + // Values for the dynamic elements of the string specified by the textual identifier. + Parameters []*Parameter `locationName:"parameters" type:"list"` +} + +// String returns the string representation +func (s LocalizedText) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LocalizedText) GoString() string { + return s.String() +} + +// This data type is used in the LocalizedText data type. +type LocalizedTextKey struct { + _ struct{} `type:"structure"` + + // The module response source of the text. + Facility *string `locationName:"facility" type:"string"` + + // Part of the module response source of the text. + Id *string `locationName:"id" type:"string"` +} + +// String returns the string representation +func (s LocalizedTextKey) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LocalizedTextKey) GoString() string { + return s.String() +} + +// This data type is used in the Telemetry data type. +// +// This is metadata about the behavioral data collected by the Inspector agent +// on your EC2 instances during an assessment and passed to the Inspector service +// for analysis. +type MessageTypeTelemetry struct { + _ struct{} `type:"structure"` + + // The number of times that the behavioral data is collected by the agent during + // an assessment. + Count *int64 `locationName:"count" type:"long"` + + // The total size of the behavioral data that is collected by the agent during + // an assessment. + DataSize *int64 `locationName:"dataSize" type:"long"` + + // A specific type of behavioral data that is collected by the agent. + MessageType *string `locationName:"messageType" type:"string"` +} + +// String returns the string representation +func (s MessageTypeTelemetry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MessageTypeTelemetry) GoString() string { + return s.String() +} + +// This data type is used in the LocalizedText data type. +type Parameter struct { + _ struct{} `type:"structure"` + + // The name of the variable that is being replaced. + Name *string `locationName:"name" type:"string"` + + // The value assigned to the variable that is being replaced. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s Parameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Parameter) GoString() string { + return s.String() +} + +type PreviewAgentsForResourceGroupInput struct { + _ struct{} `type:"structure"` + + // You can use this parameter to indicate the maximum number of items you want + // in the response. The default value is 10. The maximum value is 500. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // You can use this parameter when paginating results. Set the value of this + // parameter to 'null' on your first call to the PreviewAgentsForResourceGroup + // action. Subsequent calls to the action fill nextToken in the request with + // the value of NextToken from previous response to continue listing data. + NextToken *string `locationName:"nextToken" type:"string"` + + // The ARN of the resource group that is used to create an application. + ResourceGroupArn *string `locationName:"resourceGroupArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s PreviewAgentsForResourceGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PreviewAgentsForResourceGroupInput) GoString() string { + return s.String() +} + +type PreviewAgentsForResourceGroupOutput struct { + _ struct{} `type:"structure"` + + // The resulting list of agents. + AgentPreviewList []*AgentPreview `locationName:"agentPreviewList" type:"list"` + + // When a response is generated, if there is more data to be listed, this parameter + // is present in the response and contains the value to use for the nextToken + // parameter in a subsequent pagination request. If there is no more data to + // be listed, this parameter is set to 'null'. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s PreviewAgentsForResourceGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PreviewAgentsForResourceGroupOutput) GoString() string { + return s.String() +} + +type RegisterCrossAccountAccessRoleInput struct { + _ struct{} `type:"structure"` + + // The ARN of the IAM role that Inspector uses to list your EC2 instances during + // the assessment. + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterCrossAccountAccessRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterCrossAccountAccessRoleInput) GoString() string { + return s.String() +} + +type RegisterCrossAccountAccessRoleOutput struct { + _ struct{} `type:"structure"` + + // Confirmation details of the action performed. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s RegisterCrossAccountAccessRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterCrossAccountAccessRoleOutput) GoString() string { + return s.String() +} + +type RemoveAttributesFromFindingsInput struct { + _ struct{} `type:"structure"` + + // The array of attribute keys that you want to remove from specified findings. + AttributeKeys []*string `locationName:"attributeKeys" type:"list" required:"true"` + + // The ARNs specifying the findings that you want to remove attributes from. + FindingArns []*string `locationName:"findingArns" type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveAttributesFromFindingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveAttributesFromFindingsInput) GoString() string { + return s.String() +} + +type RemoveAttributesFromFindingsOutput struct { + _ struct{} `type:"structure"` + + // Confirmation details of the action performed. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s RemoveAttributesFromFindingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveAttributesFromFindingsOutput) GoString() string { + return s.String() +} + +// Contains information about a resource group. The resource group defines a +// set of tags that, when queried, identify the AWS resources that comprise +// the application. +// +// This data type is used as the response element in the DescribeResourceGroup +// action. +type ResourceGroup struct { + _ struct{} `type:"structure"` + + // The ARN of the resource group. + ResourceGroupArn *string `locationName:"resourceGroupArn" type:"string"` + + // The tags (key and value pairs) of the resource group. + // + // This data type property is used in the CreateResourceGroup action. + // + // A collection of keys and an array of possible values in JSON format. + // + // For example, [{ "key1" : ["Value1","Value2"]},{"Key2": ["Value3"]}] + ResourceGroupTags *string `locationName:"resourceGroupTags" type:"string"` +} + +// String returns the string representation +func (s ResourceGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceGroup) GoString() string { + return s.String() +} + +// Contains information about an Inspector rules package. +// +// This data type is used as the response element in the DescribeRulesPackage +// action. +type RulesPackage struct { + _ struct{} `type:"structure"` + + // The description of the rules package. + Description *LocalizedText `locationName:"description" type:"structure"` + + // The provider of the rules package. + Provider *string `locationName:"provider" type:"string"` + + // The ARN of the rules package. + RulesPackageArn *string `locationName:"rulesPackageArn" type:"string"` + + // The name of the rules package. + RulesPackageName *string `locationName:"rulesPackageName" type:"string"` + + // The version id of the rules package. + Version *string `locationName:"version" type:"string"` +} + +// String returns the string representation +func (s RulesPackage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RulesPackage) GoString() string { + return s.String() +} + +// A snapshot of an Inspector assessment that contains the assessment's findings. +// +// This data type is used as the response element in the DescribeRun action. +type Run struct { + _ struct{} `type:"structure"` + + // The ARN of the assessment that is associated with the run. + AssessmentArn *string `locationName:"assessmentArn" type:"string"` + + // Run completion time that corresponds to the rules packages evaluation completion + // time or failure. + CompletionTime *time.Time `locationName:"completionTime" type:"timestamp" timestampFormat:"unix"` + + // Run creation time that corresponds to the data collection completion time + // or failure. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp" timestampFormat:"unix"` + + // Rules packages selected for the run of the assessment. + RulesPackages []*string `locationName:"rulesPackages" type:"list"` + + // The ARN of the run. + RunArn *string `locationName:"runArn" type:"string"` + + // The auto-generated name for the run. + RunName *string `locationName:"runName" type:"string"` + + // The state of the run. Values can be set to DataCollectionComplete, EvaluatingPolicies, + // EvaluatingPoliciesErrorCanRetry, Completed, Failed, TombStoned. + RunState *string `locationName:"runState" type:"string"` +} + +// String returns the string representation +func (s Run) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Run) GoString() string { + return s.String() +} + +type RunAssessmentInput struct { + _ struct{} `type:"structure"` + + // The ARN of the assessment that you want to run. + AssessmentArn *string `locationName:"assessmentArn" type:"string" required:"true"` + + // A name specifying the run of the assessment. + RunName *string `locationName:"runName" type:"string" required:"true"` +} + +// String returns the string representation +func (s RunAssessmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunAssessmentInput) GoString() string { + return s.String() +} + +type RunAssessmentOutput struct { + _ struct{} `type:"structure"` + + // The ARN specifying the run of the assessment. + RunArn *string `locationName:"runArn" type:"string"` +} + +// String returns the string representation +func (s RunAssessmentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunAssessmentOutput) GoString() string { + return s.String() +} + +// This data type is used as the request parameter in the ListRuns action. +type RunsFilter struct { + _ struct{} `type:"structure"` + + // For a record to match a filter, the value specified for this data type property + // must inclusively match any value between the specified minimum and maximum + // values of the completionTime property of the Run data type. + CompletionTime *TimestampRange `locationName:"completionTime" type:"structure"` + + // For a record to match a filter, the value specified for this data type property + // must inclusively match any value between the specified minimum and maximum + // values of the creationTime property of the Run data type. + CreationTime *TimestampRange `locationName:"creationTime" type:"structure"` + + // For a record to match a filter, the value specified for this data type property + // must match a list of values of the rulesPackages property of the Run data + // type. + RulesPackages []*string `locationName:"rulesPackages" type:"list"` + + // For a record to match a filter, an explicit value or a string containing + // a wildcard specified for this data type property must match the value of + // the runName property of the Run data type. + RunNamePatterns []*string `locationName:"runNamePatterns" type:"list"` + + // For a record to match a filter, the value specified for this data type property + // must be the exact match of the value of the runState property of the Run + // data type. + RunStates []*string `locationName:"runStates" type:"list"` +} + +// String returns the string representation +func (s RunsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunsFilter) GoString() string { + return s.String() +} + +type SetTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the assessment that you want to set tags to. + ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"` + + // A collection of key and value pairs that you want to set to an assessment. + Tags []*Tag `locationName:"tags" type:"list"` +} + +// String returns the string representation +func (s SetTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetTagsForResourceInput) GoString() string { + return s.String() +} + +type SetTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // Confirmation details of the action performed. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s SetTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetTagsForResourceOutput) GoString() string { + return s.String() +} + +type StartDataCollectionInput struct { + _ struct{} `type:"structure"` + + // The ARN of the assessment for which you want to start the data collection + // process. + AssessmentArn *string `locationName:"assessmentArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartDataCollectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartDataCollectionInput) GoString() string { + return s.String() +} + +type StartDataCollectionOutput struct { + _ struct{} `type:"structure"` + + // Confirmation details of the action performed. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s StartDataCollectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartDataCollectionOutput) GoString() string { + return s.String() +} + +type StopDataCollectionInput struct { + _ struct{} `type:"structure"` + + // The ARN of the assessment for which you want to stop the data collection + // process. + AssessmentArn *string `locationName:"assessmentArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s StopDataCollectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopDataCollectionInput) GoString() string { + return s.String() +} + +type StopDataCollectionOutput struct { + _ struct{} `type:"structure"` + + // Confirmation details of the action performed. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s StopDataCollectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopDataCollectionOutput) GoString() string { + return s.String() +} + +// A key and value pair. +// +// This data type is used as a request parameter in the SetTagsForResource +// action and a response element in the ListTagsForResource action. +type Tag struct { + _ struct{} `type:"structure"` + + // The tag key. + Key *string `type:"string"` + + // The value assigned to a tag key. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// The metadata about the Inspector application data metrics collected by the +// agent. +// +// This data type is used as the response element in the GetAssessmentTelemetry +// action. +type Telemetry struct { + _ struct{} `type:"structure"` + + // Counts of individual metrics received by Inspector from the agent. + MessageTypeTelemetries []*MessageTypeTelemetry `locationName:"messageTypeTelemetries" type:"list"` + + // The category of the individual metrics that together constitute the telemetry + // that Inspector received from the agent. + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s Telemetry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Telemetry) GoString() string { + return s.String() +} + +// This data type is used in the AssessmentsFilter and RunsFilter data types. +type TimestampRange struct { + _ struct{} `type:"structure"` + + // The maximum value of the timestamp range. + Maximum *time.Time `locationName:"maximum" type:"timestamp" timestampFormat:"unix"` + + // The minimum value of the timestamp range. + Minimum *time.Time `locationName:"minimum" type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s TimestampRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimestampRange) GoString() string { + return s.String() +} + +type UpdateApplicationInput struct { + _ struct{} `type:"structure"` + + // Application ARN that you want to update. + ApplicationArn *string `locationName:"applicationArn" type:"string" required:"true"` + + // Application name that you want to update. + ApplicationName *string `locationName:"applicationName" type:"string" required:"true"` + + // The resource group ARN that you want to update. + ResourceGroupArn *string `locationName:"resourceGroupArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateApplicationInput) GoString() string { + return s.String() +} + +type UpdateApplicationOutput struct { + _ struct{} `type:"structure"` + + // Confirmation details of the action performed. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s UpdateApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateApplicationOutput) GoString() string { + return s.String() +} + +type UpdateAssessmentInput struct { + _ struct{} `type:"structure"` + + // Asessment ARN that you want to update. + AssessmentArn *string `locationName:"assessmentArn" type:"string" required:"true"` + + // Assessment name that you want to update. + AssessmentName *string `locationName:"assessmentName" type:"string" required:"true"` + + // Assessment duration in seconds that you want to update. The default value + // is 3600 seconds (one hour). The maximum value is 86400 seconds (one day). + DurationInSeconds *int64 `locationName:"durationInSeconds" type:"integer" required:"true"` +} + +// String returns the string representation +func (s UpdateAssessmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAssessmentInput) GoString() string { + return s.String() +} + +type UpdateAssessmentOutput struct { + _ struct{} `type:"structure"` + + // Confirmation details of the action performed. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s UpdateAssessmentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAssessmentOutput) GoString() string { + return s.String() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/inspector/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/inspector/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/inspector/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/inspector/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,888 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package inspector_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/inspector" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleInspector_AddAttributesToFindings() { + svc := inspector.New(session.New()) + + params := &inspector.AddAttributesToFindingsInput{ + Attributes: []*inspector.Attribute{ // Required + { // Required + Key: aws.String("AttributeKey"), + Value: aws.String("AttributeValue"), + }, + // More values... + }, + FindingArns: []*string{ // Required + aws.String("Arn"), // Required + // More values... + }, + } + resp, err := svc.AddAttributesToFindings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_AttachAssessmentAndRulesPackage() { + svc := inspector.New(session.New()) + + params := &inspector.AttachAssessmentAndRulesPackageInput{ + AssessmentArn: aws.String("Arn"), // Required + RulesPackageArn: aws.String("Arn"), // Required + } + resp, err := svc.AttachAssessmentAndRulesPackage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_CreateApplication() { + svc := inspector.New(session.New()) + + params := &inspector.CreateApplicationInput{ + ApplicationName: aws.String("Name"), // Required + ResourceGroupArn: aws.String("Arn"), // Required + } + resp, err := svc.CreateApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_CreateAssessment() { + svc := inspector.New(session.New()) + + params := &inspector.CreateAssessmentInput{ + ApplicationArn: aws.String("Arn"), // Required + AssessmentName: aws.String("Name"), // Required + DurationInSeconds: aws.Int64(1), // Required + UserAttributesForFindings: []*inspector.Attribute{ + { // Required + Key: aws.String("AttributeKey"), + Value: aws.String("AttributeValue"), + }, + // More values... + }, + } + resp, err := svc.CreateAssessment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_CreateResourceGroup() { + svc := inspector.New(session.New()) + + params := &inspector.CreateResourceGroupInput{ + ResourceGroupTags: aws.String("ResourceGroupTags"), // Required + } + resp, err := svc.CreateResourceGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_DeleteApplication() { + svc := inspector.New(session.New()) + + params := &inspector.DeleteApplicationInput{ + ApplicationArn: aws.String("Arn"), // Required + } + resp, err := svc.DeleteApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_DeleteAssessment() { + svc := inspector.New(session.New()) + + params := &inspector.DeleteAssessmentInput{ + AssessmentArn: aws.String("Arn"), // Required + } + resp, err := svc.DeleteAssessment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_DeleteRun() { + svc := inspector.New(session.New()) + + params := &inspector.DeleteRunInput{ + RunArn: aws.String("Arn"), // Required + } + resp, err := svc.DeleteRun(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_DescribeApplication() { + svc := inspector.New(session.New()) + + params := &inspector.DescribeApplicationInput{ + ApplicationArn: aws.String("Arn"), // Required + } + resp, err := svc.DescribeApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_DescribeAssessment() { + svc := inspector.New(session.New()) + + params := &inspector.DescribeAssessmentInput{ + AssessmentArn: aws.String("Arn"), // Required + } + resp, err := svc.DescribeAssessment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_DescribeCrossAccountAccessRole() { + svc := inspector.New(session.New()) + + var params *inspector.DescribeCrossAccountAccessRoleInput + resp, err := svc.DescribeCrossAccountAccessRole(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_DescribeFinding() { + svc := inspector.New(session.New()) + + params := &inspector.DescribeFindingInput{ + FindingArn: aws.String("Arn"), // Required + } + resp, err := svc.DescribeFinding(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_DescribeResourceGroup() { + svc := inspector.New(session.New()) + + params := &inspector.DescribeResourceGroupInput{ + ResourceGroupArn: aws.String("Arn"), // Required + } + resp, err := svc.DescribeResourceGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_DescribeRulesPackage() { + svc := inspector.New(session.New()) + + params := &inspector.DescribeRulesPackageInput{ + RulesPackageArn: aws.String("Arn"), // Required + } + resp, err := svc.DescribeRulesPackage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_DescribeRun() { + svc := inspector.New(session.New()) + + params := &inspector.DescribeRunInput{ + RunArn: aws.String("Arn"), // Required + } + resp, err := svc.DescribeRun(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_DetachAssessmentAndRulesPackage() { + svc := inspector.New(session.New()) + + params := &inspector.DetachAssessmentAndRulesPackageInput{ + AssessmentArn: aws.String("Arn"), // Required + RulesPackageArn: aws.String("Arn"), // Required + } + resp, err := svc.DetachAssessmentAndRulesPackage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_GetAssessmentTelemetry() { + svc := inspector.New(session.New()) + + params := &inspector.GetAssessmentTelemetryInput{ + AssessmentArn: aws.String("Arn"), // Required + } + resp, err := svc.GetAssessmentTelemetry(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_ListApplications() { + svc := inspector.New(session.New()) + + params := &inspector.ListApplicationsInput{ + Filter: &inspector.ApplicationsFilter{ + ApplicationNamePatterns: []*string{ + aws.String("NamePattern"), // Required + // More values... + }, + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListApplications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_ListAssessmentAgents() { + svc := inspector.New(session.New()) + + params := &inspector.ListAssessmentAgentsInput{ + AssessmentArn: aws.String("Arn"), // Required + Filter: &inspector.AgentsFilter{ + AgentHealthList: []*string{ + aws.String("AgentHealth"), // Required + // More values... + }, + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListAssessmentAgents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_ListAssessments() { + svc := inspector.New(session.New()) + + params := &inspector.ListAssessmentsInput{ + ApplicationArns: []*string{ + aws.String("Arn"), // Required + // More values... + }, + Filter: &inspector.AssessmentsFilter{ + AssessmentNamePatterns: []*string{ + aws.String("NamePattern"), // Required + // More values... + }, + AssessmentStates: []*string{ + aws.String("AssessmentState"), // Required + // More values... + }, + DataCollected: aws.Bool(true), + DurationRange: &inspector.DurationRange{ + Maximum: aws.Int64(1), + Minimum: aws.Int64(1), + }, + EndTimeRange: &inspector.TimestampRange{ + Maximum: aws.Time(time.Now()), + Minimum: aws.Time(time.Now()), + }, + StartTimeRange: &inspector.TimestampRange{ + Maximum: aws.Time(time.Now()), + Minimum: aws.Time(time.Now()), + }, + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListAssessments(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_ListAttachedAssessments() { + svc := inspector.New(session.New()) + + params := &inspector.ListAttachedAssessmentsInput{ + RulesPackageArn: aws.String("Arn"), // Required + Filter: &inspector.AssessmentsFilter{ + AssessmentNamePatterns: []*string{ + aws.String("NamePattern"), // Required + // More values... + }, + AssessmentStates: []*string{ + aws.String("AssessmentState"), // Required + // More values... + }, + DataCollected: aws.Bool(true), + DurationRange: &inspector.DurationRange{ + Maximum: aws.Int64(1), + Minimum: aws.Int64(1), + }, + EndTimeRange: &inspector.TimestampRange{ + Maximum: aws.Time(time.Now()), + Minimum: aws.Time(time.Now()), + }, + StartTimeRange: &inspector.TimestampRange{ + Maximum: aws.Time(time.Now()), + Minimum: aws.Time(time.Now()), + }, + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListAttachedAssessments(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_ListAttachedRulesPackages() { + svc := inspector.New(session.New()) + + params := &inspector.ListAttachedRulesPackagesInput{ + AssessmentArn: aws.String("Arn"), // Required + MaxResults: aws.Int64(1), + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListAttachedRulesPackages(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_ListFindings() { + svc := inspector.New(session.New()) + + params := &inspector.ListFindingsInput{ + Filter: &inspector.FindingsFilter{ + Attributes: []*inspector.Attribute{ + { // Required + Key: aws.String("AttributeKey"), + Value: aws.String("AttributeValue"), + }, + // More values... + }, + RuleNames: []*string{ + aws.String("Name"), // Required + // More values... + }, + RulesPackageArns: []*string{ + aws.String("Arn"), // Required + // More values... + }, + Severities: []*string{ + aws.String("Severity"), // Required + // More values... + }, + UserAttributes: []*inspector.Attribute{ + { // Required + Key: aws.String("AttributeKey"), + Value: aws.String("AttributeValue"), + }, + // More values... + }, + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("PaginationToken"), + RunArns: []*string{ + aws.String("Arn"), // Required + // More values... + }, + } + resp, err := svc.ListFindings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_ListRulesPackages() { + svc := inspector.New(session.New()) + + params := &inspector.ListRulesPackagesInput{ + MaxResults: aws.Int64(1), + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListRulesPackages(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_ListRuns() { + svc := inspector.New(session.New()) + + params := &inspector.ListRunsInput{ + AssessmentArns: []*string{ + aws.String("Arn"), // Required + // More values... + }, + Filter: &inspector.RunsFilter{ + CompletionTime: &inspector.TimestampRange{ + Maximum: aws.Time(time.Now()), + Minimum: aws.Time(time.Now()), + }, + CreationTime: &inspector.TimestampRange{ + Maximum: aws.Time(time.Now()), + Minimum: aws.Time(time.Now()), + }, + RulesPackages: []*string{ + aws.String("Arn"), // Required + // More values... + }, + RunNamePatterns: []*string{ + aws.String("NamePattern"), // Required + // More values... + }, + RunStates: []*string{ + aws.String("RunState"), // Required + // More values... + }, + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.ListRuns(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_ListTagsForResource() { + svc := inspector.New(session.New()) + + params := &inspector.ListTagsForResourceInput{ + ResourceArn: aws.String("Arn"), // Required + } + resp, err := svc.ListTagsForResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_LocalizeText() { + svc := inspector.New(session.New()) + + params := &inspector.LocalizeTextInput{ + Locale: aws.String("Locale"), // Required + LocalizedTexts: []*inspector.LocalizedText{ // Required + { // Required + Key: &inspector.LocalizedTextKey{ + Facility: aws.String("LocalizedFacility"), + Id: aws.String("LocalizedTextId"), + }, + Parameters: []*inspector.Parameter{ + { // Required + Name: aws.String("ParameterName"), + Value: aws.String("ParameterValue"), + }, + // More values... + }, + }, + // More values... + }, + } + resp, err := svc.LocalizeText(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_PreviewAgentsForResourceGroup() { + svc := inspector.New(session.New()) + + params := &inspector.PreviewAgentsForResourceGroupInput{ + ResourceGroupArn: aws.String("Arn"), // Required + MaxResults: aws.Int64(1), + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.PreviewAgentsForResourceGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_RegisterCrossAccountAccessRole() { + svc := inspector.New(session.New()) + + params := &inspector.RegisterCrossAccountAccessRoleInput{ + RoleArn: aws.String("Arn"), // Required + } + resp, err := svc.RegisterCrossAccountAccessRole(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_RemoveAttributesFromFindings() { + svc := inspector.New(session.New()) + + params := &inspector.RemoveAttributesFromFindingsInput{ + AttributeKeys: []*string{ // Required + aws.String("AttributeKey"), // Required + // More values... + }, + FindingArns: []*string{ // Required + aws.String("Arn"), // Required + // More values... + }, + } + resp, err := svc.RemoveAttributesFromFindings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_RunAssessment() { + svc := inspector.New(session.New()) + + params := &inspector.RunAssessmentInput{ + AssessmentArn: aws.String("Arn"), // Required + RunName: aws.String("Name"), // Required + } + resp, err := svc.RunAssessment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_SetTagsForResource() { + svc := inspector.New(session.New()) + + params := &inspector.SetTagsForResourceInput{ + ResourceArn: aws.String("Arn"), // Required + Tags: []*inspector.Tag{ + { // Required + Key: aws.String("TagKey"), + Value: aws.String("TagValue"), + }, + // More values... + }, + } + resp, err := svc.SetTagsForResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_StartDataCollection() { + svc := inspector.New(session.New()) + + params := &inspector.StartDataCollectionInput{ + AssessmentArn: aws.String("Arn"), // Required + } + resp, err := svc.StartDataCollection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_StopDataCollection() { + svc := inspector.New(session.New()) + + params := &inspector.StopDataCollectionInput{ + AssessmentArn: aws.String("Arn"), // Required + } + resp, err := svc.StopDataCollection(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_UpdateApplication() { + svc := inspector.New(session.New()) + + params := &inspector.UpdateApplicationInput{ + ApplicationArn: aws.String("Arn"), // Required + ApplicationName: aws.String("Name"), // Required + ResourceGroupArn: aws.String("Arn"), // Required + } + resp, err := svc.UpdateApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleInspector_UpdateAssessment() { + svc := inspector.New(session.New()) + + params := &inspector.UpdateAssessmentInput{ + AssessmentArn: aws.String("Arn"), // Required + AssessmentName: aws.String("Name"), // Required + DurationInSeconds: aws.Int64(1), // Required + } + resp, err := svc.UpdateAssessment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/inspector/inspectoriface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/inspector/inspectoriface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/inspector/inspectoriface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/inspector/inspectoriface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,158 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package inspectoriface provides an interface for the Amazon Inspector. +package inspectoriface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/inspector" +) + +// InspectorAPI is the interface type for inspector.Inspector. +type InspectorAPI interface { + AddAttributesToFindingsRequest(*inspector.AddAttributesToFindingsInput) (*request.Request, *inspector.AddAttributesToFindingsOutput) + + AddAttributesToFindings(*inspector.AddAttributesToFindingsInput) (*inspector.AddAttributesToFindingsOutput, error) + + AttachAssessmentAndRulesPackageRequest(*inspector.AttachAssessmentAndRulesPackageInput) (*request.Request, *inspector.AttachAssessmentAndRulesPackageOutput) + + AttachAssessmentAndRulesPackage(*inspector.AttachAssessmentAndRulesPackageInput) (*inspector.AttachAssessmentAndRulesPackageOutput, error) + + CreateApplicationRequest(*inspector.CreateApplicationInput) (*request.Request, *inspector.CreateApplicationOutput) + + CreateApplication(*inspector.CreateApplicationInput) (*inspector.CreateApplicationOutput, error) + + CreateAssessmentRequest(*inspector.CreateAssessmentInput) (*request.Request, *inspector.CreateAssessmentOutput) + + CreateAssessment(*inspector.CreateAssessmentInput) (*inspector.CreateAssessmentOutput, error) + + CreateResourceGroupRequest(*inspector.CreateResourceGroupInput) (*request.Request, *inspector.CreateResourceGroupOutput) + + CreateResourceGroup(*inspector.CreateResourceGroupInput) (*inspector.CreateResourceGroupOutput, error) + + DeleteApplicationRequest(*inspector.DeleteApplicationInput) (*request.Request, *inspector.DeleteApplicationOutput) + + DeleteApplication(*inspector.DeleteApplicationInput) (*inspector.DeleteApplicationOutput, error) + + DeleteAssessmentRequest(*inspector.DeleteAssessmentInput) (*request.Request, *inspector.DeleteAssessmentOutput) + + DeleteAssessment(*inspector.DeleteAssessmentInput) (*inspector.DeleteAssessmentOutput, error) + + DeleteRunRequest(*inspector.DeleteRunInput) (*request.Request, *inspector.DeleteRunOutput) + + DeleteRun(*inspector.DeleteRunInput) (*inspector.DeleteRunOutput, error) + + DescribeApplicationRequest(*inspector.DescribeApplicationInput) (*request.Request, *inspector.DescribeApplicationOutput) + + DescribeApplication(*inspector.DescribeApplicationInput) (*inspector.DescribeApplicationOutput, error) + + DescribeAssessmentRequest(*inspector.DescribeAssessmentInput) (*request.Request, *inspector.DescribeAssessmentOutput) + + DescribeAssessment(*inspector.DescribeAssessmentInput) (*inspector.DescribeAssessmentOutput, error) + + DescribeCrossAccountAccessRoleRequest(*inspector.DescribeCrossAccountAccessRoleInput) (*request.Request, *inspector.DescribeCrossAccountAccessRoleOutput) + + DescribeCrossAccountAccessRole(*inspector.DescribeCrossAccountAccessRoleInput) (*inspector.DescribeCrossAccountAccessRoleOutput, error) + + DescribeFindingRequest(*inspector.DescribeFindingInput) (*request.Request, *inspector.DescribeFindingOutput) + + DescribeFinding(*inspector.DescribeFindingInput) (*inspector.DescribeFindingOutput, error) + + DescribeResourceGroupRequest(*inspector.DescribeResourceGroupInput) (*request.Request, *inspector.DescribeResourceGroupOutput) + + DescribeResourceGroup(*inspector.DescribeResourceGroupInput) (*inspector.DescribeResourceGroupOutput, error) + + DescribeRulesPackageRequest(*inspector.DescribeRulesPackageInput) (*request.Request, *inspector.DescribeRulesPackageOutput) + + DescribeRulesPackage(*inspector.DescribeRulesPackageInput) (*inspector.DescribeRulesPackageOutput, error) + + DescribeRunRequest(*inspector.DescribeRunInput) (*request.Request, *inspector.DescribeRunOutput) + + DescribeRun(*inspector.DescribeRunInput) (*inspector.DescribeRunOutput, error) + + DetachAssessmentAndRulesPackageRequest(*inspector.DetachAssessmentAndRulesPackageInput) (*request.Request, *inspector.DetachAssessmentAndRulesPackageOutput) + + DetachAssessmentAndRulesPackage(*inspector.DetachAssessmentAndRulesPackageInput) (*inspector.DetachAssessmentAndRulesPackageOutput, error) + + GetAssessmentTelemetryRequest(*inspector.GetAssessmentTelemetryInput) (*request.Request, *inspector.GetAssessmentTelemetryOutput) + + GetAssessmentTelemetry(*inspector.GetAssessmentTelemetryInput) (*inspector.GetAssessmentTelemetryOutput, error) + + ListApplicationsRequest(*inspector.ListApplicationsInput) (*request.Request, *inspector.ListApplicationsOutput) + + ListApplications(*inspector.ListApplicationsInput) (*inspector.ListApplicationsOutput, error) + + ListAssessmentAgentsRequest(*inspector.ListAssessmentAgentsInput) (*request.Request, *inspector.ListAssessmentAgentsOutput) + + ListAssessmentAgents(*inspector.ListAssessmentAgentsInput) (*inspector.ListAssessmentAgentsOutput, error) + + ListAssessmentsRequest(*inspector.ListAssessmentsInput) (*request.Request, *inspector.ListAssessmentsOutput) + + ListAssessments(*inspector.ListAssessmentsInput) (*inspector.ListAssessmentsOutput, error) + + ListAttachedAssessmentsRequest(*inspector.ListAttachedAssessmentsInput) (*request.Request, *inspector.ListAttachedAssessmentsOutput) + + ListAttachedAssessments(*inspector.ListAttachedAssessmentsInput) (*inspector.ListAttachedAssessmentsOutput, error) + + ListAttachedRulesPackagesRequest(*inspector.ListAttachedRulesPackagesInput) (*request.Request, *inspector.ListAttachedRulesPackagesOutput) + + ListAttachedRulesPackages(*inspector.ListAttachedRulesPackagesInput) (*inspector.ListAttachedRulesPackagesOutput, error) + + ListFindingsRequest(*inspector.ListFindingsInput) (*request.Request, *inspector.ListFindingsOutput) + + ListFindings(*inspector.ListFindingsInput) (*inspector.ListFindingsOutput, error) + + ListRulesPackagesRequest(*inspector.ListRulesPackagesInput) (*request.Request, *inspector.ListRulesPackagesOutput) + + ListRulesPackages(*inspector.ListRulesPackagesInput) (*inspector.ListRulesPackagesOutput, error) + + ListRunsRequest(*inspector.ListRunsInput) (*request.Request, *inspector.ListRunsOutput) + + ListRuns(*inspector.ListRunsInput) (*inspector.ListRunsOutput, error) + + ListTagsForResourceRequest(*inspector.ListTagsForResourceInput) (*request.Request, *inspector.ListTagsForResourceOutput) + + ListTagsForResource(*inspector.ListTagsForResourceInput) (*inspector.ListTagsForResourceOutput, error) + + LocalizeTextRequest(*inspector.LocalizeTextInput) (*request.Request, *inspector.LocalizeTextOutput) + + LocalizeText(*inspector.LocalizeTextInput) (*inspector.LocalizeTextOutput, error) + + PreviewAgentsForResourceGroupRequest(*inspector.PreviewAgentsForResourceGroupInput) (*request.Request, *inspector.PreviewAgentsForResourceGroupOutput) + + PreviewAgentsForResourceGroup(*inspector.PreviewAgentsForResourceGroupInput) (*inspector.PreviewAgentsForResourceGroupOutput, error) + + RegisterCrossAccountAccessRoleRequest(*inspector.RegisterCrossAccountAccessRoleInput) (*request.Request, *inspector.RegisterCrossAccountAccessRoleOutput) + + RegisterCrossAccountAccessRole(*inspector.RegisterCrossAccountAccessRoleInput) (*inspector.RegisterCrossAccountAccessRoleOutput, error) + + RemoveAttributesFromFindingsRequest(*inspector.RemoveAttributesFromFindingsInput) (*request.Request, *inspector.RemoveAttributesFromFindingsOutput) + + RemoveAttributesFromFindings(*inspector.RemoveAttributesFromFindingsInput) (*inspector.RemoveAttributesFromFindingsOutput, error) + + RunAssessmentRequest(*inspector.RunAssessmentInput) (*request.Request, *inspector.RunAssessmentOutput) + + RunAssessment(*inspector.RunAssessmentInput) (*inspector.RunAssessmentOutput, error) + + SetTagsForResourceRequest(*inspector.SetTagsForResourceInput) (*request.Request, *inspector.SetTagsForResourceOutput) + + SetTagsForResource(*inspector.SetTagsForResourceInput) (*inspector.SetTagsForResourceOutput, error) + + StartDataCollectionRequest(*inspector.StartDataCollectionInput) (*request.Request, *inspector.StartDataCollectionOutput) + + StartDataCollection(*inspector.StartDataCollectionInput) (*inspector.StartDataCollectionOutput, error) + + StopDataCollectionRequest(*inspector.StopDataCollectionInput) (*request.Request, *inspector.StopDataCollectionOutput) + + StopDataCollection(*inspector.StopDataCollectionInput) (*inspector.StopDataCollectionOutput, error) + + UpdateApplicationRequest(*inspector.UpdateApplicationInput) (*request.Request, *inspector.UpdateApplicationOutput) + + UpdateApplication(*inspector.UpdateApplicationInput) (*inspector.UpdateApplicationOutput, error) + + UpdateAssessmentRequest(*inspector.UpdateAssessmentInput) (*request.Request, *inspector.UpdateAssessmentOutput) + + UpdateAssessment(*inspector.UpdateAssessmentInput) (*inspector.UpdateAssessmentOutput, error) +} + +var _ InspectorAPI = (*inspector.Inspector)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/inspector/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/inspector/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/inspector/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/inspector/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,90 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package inspector + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Amazon Inspector enables you to analyze the behavior of the applications +// you run in AWS and to identify potential security issues. For more information, +// see Amazon Inspector User Guide (https://docs.aws.amazon.com/inspector/latest/userguide/inspector_introduction.html). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type Inspector struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "inspector" + +// New creates a new instance of the Inspector client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Inspector client from just a session. +// svc := inspector.New(mySession) +// +// // Create a Inspector client with additional configuration +// svc := inspector.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Inspector { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *Inspector { + svc := &Inspector{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-08-18", + JSONVersion: "1.1", + TargetPrefix: "InspectorService", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Inspector operation and runs any +// custom request initialization. +func (c *Inspector) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iot/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iot/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iot/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iot/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,3437 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package iot provides a client for AWS IoT. +package iot + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opAcceptCertificateTransfer = "AcceptCertificateTransfer" + +// AcceptCertificateTransferRequest generates a request for the AcceptCertificateTransfer operation. +func (c *IoT) AcceptCertificateTransferRequest(input *AcceptCertificateTransferInput) (req *request.Request, output *AcceptCertificateTransferOutput) { + op := &request.Operation{ + Name: opAcceptCertificateTransfer, + HTTPMethod: "PATCH", + HTTPPath: "/accept-certificate-transfer/{certificateId}", + } + + if input == nil { + input = &AcceptCertificateTransferInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AcceptCertificateTransferOutput{} + req.Data = output + return +} + +// Accepts a pending certificate transfer. The default state of the certificate +// is INACTIVE. +// +// To check for pending certificate transfers, call ListCertificates to enumerate +// your certificates. +func (c *IoT) AcceptCertificateTransfer(input *AcceptCertificateTransferInput) (*AcceptCertificateTransferOutput, error) { + req, out := c.AcceptCertificateTransferRequest(input) + err := req.Send() + return out, err +} + +const opAttachPrincipalPolicy = "AttachPrincipalPolicy" + +// AttachPrincipalPolicyRequest generates a request for the AttachPrincipalPolicy operation. +func (c *IoT) AttachPrincipalPolicyRequest(input *AttachPrincipalPolicyInput) (req *request.Request, output *AttachPrincipalPolicyOutput) { + op := &request.Operation{ + Name: opAttachPrincipalPolicy, + HTTPMethod: "PUT", + HTTPPath: "/principal-policies/{policyName}", + } + + if input == nil { + input = &AttachPrincipalPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AttachPrincipalPolicyOutput{} + req.Data = output + return +} + +// Attaches the specified policy to the specified principal (certificate or +// other credential). +func (c *IoT) AttachPrincipalPolicy(input *AttachPrincipalPolicyInput) (*AttachPrincipalPolicyOutput, error) { + req, out := c.AttachPrincipalPolicyRequest(input) + err := req.Send() + return out, err +} + +const opAttachThingPrincipal = "AttachThingPrincipal" + +// AttachThingPrincipalRequest generates a request for the AttachThingPrincipal operation. +func (c *IoT) AttachThingPrincipalRequest(input *AttachThingPrincipalInput) (req *request.Request, output *AttachThingPrincipalOutput) { + op := &request.Operation{ + Name: opAttachThingPrincipal, + HTTPMethod: "PUT", + HTTPPath: "/things/{thingName}/principals", + } + + if input == nil { + input = &AttachThingPrincipalInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachThingPrincipalOutput{} + req.Data = output + return +} + +// Attaches the specified principal to the specified thing. +func (c *IoT) AttachThingPrincipal(input *AttachThingPrincipalInput) (*AttachThingPrincipalOutput, error) { + req, out := c.AttachThingPrincipalRequest(input) + err := req.Send() + return out, err +} + +const opCancelCertificateTransfer = "CancelCertificateTransfer" + +// CancelCertificateTransferRequest generates a request for the CancelCertificateTransfer operation. +func (c *IoT) CancelCertificateTransferRequest(input *CancelCertificateTransferInput) (req *request.Request, output *CancelCertificateTransferOutput) { + op := &request.Operation{ + Name: opCancelCertificateTransfer, + HTTPMethod: "PATCH", + HTTPPath: "/cancel-certificate-transfer/{certificateId}", + } + + if input == nil { + input = &CancelCertificateTransferInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CancelCertificateTransferOutput{} + req.Data = output + return +} + +// Cancels a pending transfer for the specified certificate. +// +// Note Only the transfer source account can use this operation to cancel a +// transfer (transfer destinations can use RejectCertificateTransfer instead). +// After transfer, AWS IoT returns the certificate to the source account in +// the INACTIVE state. Once the destination account has accepted the transfer, +// the transfer may no longer be cancelled. +// +// After a certificate transfer is cancelled, the status of the certificate +// changes from PENDING_TRANSFER to INACTIVE. +func (c *IoT) CancelCertificateTransfer(input *CancelCertificateTransferInput) (*CancelCertificateTransferOutput, error) { + req, out := c.CancelCertificateTransferRequest(input) + err := req.Send() + return out, err +} + +const opCreateCertificateFromCsr = "CreateCertificateFromCsr" + +// CreateCertificateFromCsrRequest generates a request for the CreateCertificateFromCsr operation. +func (c *IoT) CreateCertificateFromCsrRequest(input *CreateCertificateFromCsrInput) (req *request.Request, output *CreateCertificateFromCsrOutput) { + op := &request.Operation{ + Name: opCreateCertificateFromCsr, + HTTPMethod: "POST", + HTTPPath: "/certificates", + } + + if input == nil { + input = &CreateCertificateFromCsrInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateCertificateFromCsrOutput{} + req.Data = output + return +} + +// Creates an X.509 certificate using the specified certificate signing request. +// +// Note Reusing the same certificate signing request (CSR) results in a distinct +// certificate. +// +// You can create multiple certificates in a batch by creating a directory +// and copying multiple .csr files into that directory and specifying that directory +// on the command line. The following commands show how to create a batch of +// certificates given a batch of CSRs. +// +// Assuming a set of CSRs are located inside of the directory my-csr-directory: +// +// > On Linux and OSX, the command is: +// +// $ ls my-csr-directory/ | xargs -I {} aws iot create-certificate-from-csr +// --certificate-signing-request file://my-csr-directory/{} +// +// This command lists all of the CSRs in my-csr-directory and pipes each CSR +// filename to the aws iot create-certificate-from-csr AWS CLI command to create +// a certificate for the corresponding CSR. +// +// The aws iot create-certificate-from-csr part of the command can also be +// run in parallel to speed up the certificate creation process: +// +// $ ls my-csr-directory/ | xargs -P 10 -I {} aws iot create-certificate-from-csr +// --certificate-signing-request file://my-csr-directory/{} +// +// On Windows PowerShell, the command to create certificates for all CSRs +// in my-csr-directory is: +// +// > ls -Name my-csr-directory | %{aws iot create-certificate-from-csr --certificate-signing-request +// file://my-csr-directory/$_} +// +// On Windows Command Prompt, the command to create certificates for all CSRs +// in my-csr-directory is: +// +// > forfiles /p my-csr-directory /c "cmd /c aws iot create-certificate-from-csr +// --certificate-signing-request file://@path" +func (c *IoT) CreateCertificateFromCsr(input *CreateCertificateFromCsrInput) (*CreateCertificateFromCsrOutput, error) { + req, out := c.CreateCertificateFromCsrRequest(input) + err := req.Send() + return out, err +} + +const opCreateKeysAndCertificate = "CreateKeysAndCertificate" + +// CreateKeysAndCertificateRequest generates a request for the CreateKeysAndCertificate operation. +func (c *IoT) CreateKeysAndCertificateRequest(input *CreateKeysAndCertificateInput) (req *request.Request, output *CreateKeysAndCertificateOutput) { + op := &request.Operation{ + Name: opCreateKeysAndCertificate, + HTTPMethod: "POST", + HTTPPath: "/keys-and-certificate", + } + + if input == nil { + input = &CreateKeysAndCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateKeysAndCertificateOutput{} + req.Data = output + return +} + +// Creates a 2048 bit RSA key pair and issues an X.509 certificate using the +// issued public key. +// +// Note This is the only time AWS IoT issues the private key for this certificate. +// It is important to keep track of the private key. +func (c *IoT) CreateKeysAndCertificate(input *CreateKeysAndCertificateInput) (*CreateKeysAndCertificateOutput, error) { + req, out := c.CreateKeysAndCertificateRequest(input) + err := req.Send() + return out, err +} + +const opCreatePolicy = "CreatePolicy" + +// CreatePolicyRequest generates a request for the CreatePolicy operation. +func (c *IoT) CreatePolicyRequest(input *CreatePolicyInput) (req *request.Request, output *CreatePolicyOutput) { + op := &request.Operation{ + Name: opCreatePolicy, + HTTPMethod: "POST", + HTTPPath: "/policies/{policyName}", + } + + if input == nil { + input = &CreatePolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePolicyOutput{} + req.Data = output + return +} + +// Creates an AWS IoT policy. +// +// The created policy is the default version for the policy. This operation +// creates a policy version with a version identifier of 1 and sets 1 as the +// policy's default version. +func (c *IoT) CreatePolicy(input *CreatePolicyInput) (*CreatePolicyOutput, error) { + req, out := c.CreatePolicyRequest(input) + err := req.Send() + return out, err +} + +const opCreatePolicyVersion = "CreatePolicyVersion" + +// CreatePolicyVersionRequest generates a request for the CreatePolicyVersion operation. +func (c *IoT) CreatePolicyVersionRequest(input *CreatePolicyVersionInput) (req *request.Request, output *CreatePolicyVersionOutput) { + op := &request.Operation{ + Name: opCreatePolicyVersion, + HTTPMethod: "POST", + HTTPPath: "/policies/{policyName}/version", + } + + if input == nil { + input = &CreatePolicyVersionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePolicyVersionOutput{} + req.Data = output + return +} + +// Creates a new version of the specified AWS IoT policy. To update a policy, +// create a new policy version. A managed policy can have up to five versions. +// If the policy has five versions, you must delete an existing version using +// DeletePolicyVersion before you create a new version. +// +// Optionally, you can set the new version as the policy's default version. +// The default version is the operative version; that is, the version that is +// in effect for the certificates that the policy is attached to. +func (c *IoT) CreatePolicyVersion(input *CreatePolicyVersionInput) (*CreatePolicyVersionOutput, error) { + req, out := c.CreatePolicyVersionRequest(input) + err := req.Send() + return out, err +} + +const opCreateThing = "CreateThing" + +// CreateThingRequest generates a request for the CreateThing operation. +func (c *IoT) CreateThingRequest(input *CreateThingInput) (req *request.Request, output *CreateThingOutput) { + op := &request.Operation{ + Name: opCreateThing, + HTTPMethod: "POST", + HTTPPath: "/things/{thingName}", + } + + if input == nil { + input = &CreateThingInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateThingOutput{} + req.Data = output + return +} + +// Creates a thing in the thing registry. +func (c *IoT) CreateThing(input *CreateThingInput) (*CreateThingOutput, error) { + req, out := c.CreateThingRequest(input) + err := req.Send() + return out, err +} + +const opCreateTopicRule = "CreateTopicRule" + +// CreateTopicRuleRequest generates a request for the CreateTopicRule operation. +func (c *IoT) CreateTopicRuleRequest(input *CreateTopicRuleInput) (req *request.Request, output *CreateTopicRuleOutput) { + op := &request.Operation{ + Name: opCreateTopicRule, + HTTPMethod: "POST", + HTTPPath: "/rules/{ruleName}", + } + + if input == nil { + input = &CreateTopicRuleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateTopicRuleOutput{} + req.Data = output + return +} + +// Creates a rule. +func (c *IoT) CreateTopicRule(input *CreateTopicRuleInput) (*CreateTopicRuleOutput, error) { + req, out := c.CreateTopicRuleRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCertificate = "DeleteCertificate" + +// DeleteCertificateRequest generates a request for the DeleteCertificate operation. +func (c *IoT) DeleteCertificateRequest(input *DeleteCertificateInput) (req *request.Request, output *DeleteCertificateOutput) { + op := &request.Operation{ + Name: opDeleteCertificate, + HTTPMethod: "DELETE", + HTTPPath: "/certificates/{certificateId}", + } + + if input == nil { + input = &DeleteCertificateInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteCertificateOutput{} + req.Data = output + return +} + +// Deletes the specified certificate. +// +// A certificate cannot be deleted if it has a policy attached to it or if +// its status is set to ACTIVE. To delete a certificate, first detach all policies +// using the DetachPrincipalPolicy API. Next use the UpdateCertificate API to +// set the certificate to the INACTIVE status. +func (c *IoT) DeleteCertificate(input *DeleteCertificateInput) (*DeleteCertificateOutput, error) { + req, out := c.DeleteCertificateRequest(input) + err := req.Send() + return out, err +} + +const opDeletePolicy = "DeletePolicy" + +// DeletePolicyRequest generates a request for the DeletePolicy operation. +func (c *IoT) DeletePolicyRequest(input *DeletePolicyInput) (req *request.Request, output *DeletePolicyOutput) { + op := &request.Operation{ + Name: opDeletePolicy, + HTTPMethod: "DELETE", + HTTPPath: "/policies/{policyName}", + } + + if input == nil { + input = &DeletePolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeletePolicyOutput{} + req.Data = output + return +} + +// Deletes the specified policy. +// +// A policy cannot be deleted if it has non-default versions and/or it is attached +// to any certificate. +// +// To delete a policy, delete all non-default versions of the policy using +// the DeletePolicyVersion API, detach the policy from any certificate using +// the DetachPrincipalPolicy API, and then use the DeletePolicy API to delete +// the policy. +// +// When a policy is deleted using DeletePolicy, its default version is deleted +// with it. +func (c *IoT) DeletePolicy(input *DeletePolicyInput) (*DeletePolicyOutput, error) { + req, out := c.DeletePolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeletePolicyVersion = "DeletePolicyVersion" + +// DeletePolicyVersionRequest generates a request for the DeletePolicyVersion operation. +func (c *IoT) DeletePolicyVersionRequest(input *DeletePolicyVersionInput) (req *request.Request, output *DeletePolicyVersionOutput) { + op := &request.Operation{ + Name: opDeletePolicyVersion, + HTTPMethod: "DELETE", + HTTPPath: "/policies/{policyName}/version/{policyVersionId}", + } + + if input == nil { + input = &DeletePolicyVersionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeletePolicyVersionOutput{} + req.Data = output + return +} + +// Deletes the specified version of the specified policy. You cannot delete +// the default version of a policy using this API. To delete the default version +// of a policy, use DeletePolicy. To find out which version of a policy is marked +// as the default version, use ListPolicyVersions. +func (c *IoT) DeletePolicyVersion(input *DeletePolicyVersionInput) (*DeletePolicyVersionOutput, error) { + req, out := c.DeletePolicyVersionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteThing = "DeleteThing" + +// DeleteThingRequest generates a request for the DeleteThing operation. +func (c *IoT) DeleteThingRequest(input *DeleteThingInput) (req *request.Request, output *DeleteThingOutput) { + op := &request.Operation{ + Name: opDeleteThing, + HTTPMethod: "DELETE", + HTTPPath: "/things/{thingName}", + } + + if input == nil { + input = &DeleteThingInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteThingOutput{} + req.Data = output + return +} + +// Deletes the specified thing from the Thing Registry. +func (c *IoT) DeleteThing(input *DeleteThingInput) (*DeleteThingOutput, error) { + req, out := c.DeleteThingRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTopicRule = "DeleteTopicRule" + +// DeleteTopicRuleRequest generates a request for the DeleteTopicRule operation. +func (c *IoT) DeleteTopicRuleRequest(input *DeleteTopicRuleInput) (req *request.Request, output *DeleteTopicRuleOutput) { + op := &request.Operation{ + Name: opDeleteTopicRule, + HTTPMethod: "DELETE", + HTTPPath: "/rules/{ruleName}", + } + + if input == nil { + input = &DeleteTopicRuleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteTopicRuleOutput{} + req.Data = output + return +} + +// Deletes the specified rule. +func (c *IoT) DeleteTopicRule(input *DeleteTopicRuleInput) (*DeleteTopicRuleOutput, error) { + req, out := c.DeleteTopicRuleRequest(input) + err := req.Send() + return out, err +} + +const opDescribeCertificate = "DescribeCertificate" + +// DescribeCertificateRequest generates a request for the DescribeCertificate operation. +func (c *IoT) DescribeCertificateRequest(input *DescribeCertificateInput) (req *request.Request, output *DescribeCertificateOutput) { + op := &request.Operation{ + Name: opDescribeCertificate, + HTTPMethod: "GET", + HTTPPath: "/certificates/{certificateId}", + } + + if input == nil { + input = &DescribeCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCertificateOutput{} + req.Data = output + return +} + +// Gets information about the specified certificate. +func (c *IoT) DescribeCertificate(input *DescribeCertificateInput) (*DescribeCertificateOutput, error) { + req, out := c.DescribeCertificateRequest(input) + err := req.Send() + return out, err +} + +const opDescribeEndpoint = "DescribeEndpoint" + +// DescribeEndpointRequest generates a request for the DescribeEndpoint operation. +func (c *IoT) DescribeEndpointRequest(input *DescribeEndpointInput) (req *request.Request, output *DescribeEndpointOutput) { + op := &request.Operation{ + Name: opDescribeEndpoint, + HTTPMethod: "GET", + HTTPPath: "/endpoint", + } + + if input == nil { + input = &DescribeEndpointInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEndpointOutput{} + req.Data = output + return +} + +// Returns a unique endpoint specific to the AWS account making the call. You +// specify the following URI when updating state information for your thing: +// https://endpoint/things/thingName/shadow. +func (c *IoT) DescribeEndpoint(input *DescribeEndpointInput) (*DescribeEndpointOutput, error) { + req, out := c.DescribeEndpointRequest(input) + err := req.Send() + return out, err +} + +const opDescribeThing = "DescribeThing" + +// DescribeThingRequest generates a request for the DescribeThing operation. +func (c *IoT) DescribeThingRequest(input *DescribeThingInput) (req *request.Request, output *DescribeThingOutput) { + op := &request.Operation{ + Name: opDescribeThing, + HTTPMethod: "GET", + HTTPPath: "/things/{thingName}", + } + + if input == nil { + input = &DescribeThingInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeThingOutput{} + req.Data = output + return +} + +// Gets information about the specified thing. +func (c *IoT) DescribeThing(input *DescribeThingInput) (*DescribeThingOutput, error) { + req, out := c.DescribeThingRequest(input) + err := req.Send() + return out, err +} + +const opDetachPrincipalPolicy = "DetachPrincipalPolicy" + +// DetachPrincipalPolicyRequest generates a request for the DetachPrincipalPolicy operation. +func (c *IoT) DetachPrincipalPolicyRequest(input *DetachPrincipalPolicyInput) (req *request.Request, output *DetachPrincipalPolicyOutput) { + op := &request.Operation{ + Name: opDetachPrincipalPolicy, + HTTPMethod: "DELETE", + HTTPPath: "/principal-policies/{policyName}", + } + + if input == nil { + input = &DetachPrincipalPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DetachPrincipalPolicyOutput{} + req.Data = output + return +} + +// Removes the specified policy from the specified certificate. +func (c *IoT) DetachPrincipalPolicy(input *DetachPrincipalPolicyInput) (*DetachPrincipalPolicyOutput, error) { + req, out := c.DetachPrincipalPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDetachThingPrincipal = "DetachThingPrincipal" + +// DetachThingPrincipalRequest generates a request for the DetachThingPrincipal operation. +func (c *IoT) DetachThingPrincipalRequest(input *DetachThingPrincipalInput) (req *request.Request, output *DetachThingPrincipalOutput) { + op := &request.Operation{ + Name: opDetachThingPrincipal, + HTTPMethod: "DELETE", + HTTPPath: "/things/{thingName}/principals", + } + + if input == nil { + input = &DetachThingPrincipalInput{} + } + + req = c.newRequest(op, input, output) + output = &DetachThingPrincipalOutput{} + req.Data = output + return +} + +// Detaches the specified principal from the specified thing. +func (c *IoT) DetachThingPrincipal(input *DetachThingPrincipalInput) (*DetachThingPrincipalOutput, error) { + req, out := c.DetachThingPrincipalRequest(input) + err := req.Send() + return out, err +} + +const opDisableTopicRule = "DisableTopicRule" + +// DisableTopicRuleRequest generates a request for the DisableTopicRule operation. +func (c *IoT) DisableTopicRuleRequest(input *DisableTopicRuleInput) (req *request.Request, output *DisableTopicRuleOutput) { + op := &request.Operation{ + Name: opDisableTopicRule, + HTTPMethod: "POST", + HTTPPath: "/rules/{ruleName}/disable", + } + + if input == nil { + input = &DisableTopicRuleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DisableTopicRuleOutput{} + req.Data = output + return +} + +// Disables the specified rule +func (c *IoT) DisableTopicRule(input *DisableTopicRuleInput) (*DisableTopicRuleOutput, error) { + req, out := c.DisableTopicRuleRequest(input) + err := req.Send() + return out, err +} + +const opEnableTopicRule = "EnableTopicRule" + +// EnableTopicRuleRequest generates a request for the EnableTopicRule operation. +func (c *IoT) EnableTopicRuleRequest(input *EnableTopicRuleInput) (req *request.Request, output *EnableTopicRuleOutput) { + op := &request.Operation{ + Name: opEnableTopicRule, + HTTPMethod: "POST", + HTTPPath: "/rules/{ruleName}/enable", + } + + if input == nil { + input = &EnableTopicRuleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &EnableTopicRuleOutput{} + req.Data = output + return +} + +// Enables the specified rule. +func (c *IoT) EnableTopicRule(input *EnableTopicRuleInput) (*EnableTopicRuleOutput, error) { + req, out := c.EnableTopicRuleRequest(input) + err := req.Send() + return out, err +} + +const opGetLoggingOptions = "GetLoggingOptions" + +// GetLoggingOptionsRequest generates a request for the GetLoggingOptions operation. +func (c *IoT) GetLoggingOptionsRequest(input *GetLoggingOptionsInput) (req *request.Request, output *GetLoggingOptionsOutput) { + op := &request.Operation{ + Name: opGetLoggingOptions, + HTTPMethod: "GET", + HTTPPath: "/loggingOptions", + } + + if input == nil { + input = &GetLoggingOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetLoggingOptionsOutput{} + req.Data = output + return +} + +// Gets the logging options. +func (c *IoT) GetLoggingOptions(input *GetLoggingOptionsInput) (*GetLoggingOptionsOutput, error) { + req, out := c.GetLoggingOptionsRequest(input) + err := req.Send() + return out, err +} + +const opGetPolicy = "GetPolicy" + +// GetPolicyRequest generates a request for the GetPolicy operation. +func (c *IoT) GetPolicyRequest(input *GetPolicyInput) (req *request.Request, output *GetPolicyOutput) { + op := &request.Operation{ + Name: opGetPolicy, + HTTPMethod: "GET", + HTTPPath: "/policies/{policyName}", + } + + if input == nil { + input = &GetPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetPolicyOutput{} + req.Data = output + return +} + +// Gets information about the specified policy with the policy document of the +// default version. +func (c *IoT) GetPolicy(input *GetPolicyInput) (*GetPolicyOutput, error) { + req, out := c.GetPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetPolicyVersion = "GetPolicyVersion" + +// GetPolicyVersionRequest generates a request for the GetPolicyVersion operation. +func (c *IoT) GetPolicyVersionRequest(input *GetPolicyVersionInput) (req *request.Request, output *GetPolicyVersionOutput) { + op := &request.Operation{ + Name: opGetPolicyVersion, + HTTPMethod: "GET", + HTTPPath: "/policies/{policyName}/version/{policyVersionId}", + } + + if input == nil { + input = &GetPolicyVersionInput{} + } + + req = c.newRequest(op, input, output) + output = &GetPolicyVersionOutput{} + req.Data = output + return +} + +// Gets information about the specified policy version. +func (c *IoT) GetPolicyVersion(input *GetPolicyVersionInput) (*GetPolicyVersionOutput, error) { + req, out := c.GetPolicyVersionRequest(input) + err := req.Send() + return out, err +} + +const opGetTopicRule = "GetTopicRule" + +// GetTopicRuleRequest generates a request for the GetTopicRule operation. +func (c *IoT) GetTopicRuleRequest(input *GetTopicRuleInput) (req *request.Request, output *GetTopicRuleOutput) { + op := &request.Operation{ + Name: opGetTopicRule, + HTTPMethod: "GET", + HTTPPath: "/rules/{ruleName}", + } + + if input == nil { + input = &GetTopicRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTopicRuleOutput{} + req.Data = output + return +} + +// Gets information about the specified rule. +func (c *IoT) GetTopicRule(input *GetTopicRuleInput) (*GetTopicRuleOutput, error) { + req, out := c.GetTopicRuleRequest(input) + err := req.Send() + return out, err +} + +const opListCertificates = "ListCertificates" + +// ListCertificatesRequest generates a request for the ListCertificates operation. +func (c *IoT) ListCertificatesRequest(input *ListCertificatesInput) (req *request.Request, output *ListCertificatesOutput) { + op := &request.Operation{ + Name: opListCertificates, + HTTPMethod: "GET", + HTTPPath: "/certificates", + } + + if input == nil { + input = &ListCertificatesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListCertificatesOutput{} + req.Data = output + return +} + +// Lists your certificates. +// +// The results are paginated with a default page size of 25. You can retrieve +// additional results using the returned marker. +func (c *IoT) ListCertificates(input *ListCertificatesInput) (*ListCertificatesOutput, error) { + req, out := c.ListCertificatesRequest(input) + err := req.Send() + return out, err +} + +const opListPolicies = "ListPolicies" + +// ListPoliciesRequest generates a request for the ListPolicies operation. +func (c *IoT) ListPoliciesRequest(input *ListPoliciesInput) (req *request.Request, output *ListPoliciesOutput) { + op := &request.Operation{ + Name: opListPolicies, + HTTPMethod: "GET", + HTTPPath: "/policies", + } + + if input == nil { + input = &ListPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPoliciesOutput{} + req.Data = output + return +} + +// Lists your policies. +func (c *IoT) ListPolicies(input *ListPoliciesInput) (*ListPoliciesOutput, error) { + req, out := c.ListPoliciesRequest(input) + err := req.Send() + return out, err +} + +const opListPolicyVersions = "ListPolicyVersions" + +// ListPolicyVersionsRequest generates a request for the ListPolicyVersions operation. +func (c *IoT) ListPolicyVersionsRequest(input *ListPolicyVersionsInput) (req *request.Request, output *ListPolicyVersionsOutput) { + op := &request.Operation{ + Name: opListPolicyVersions, + HTTPMethod: "GET", + HTTPPath: "/policies/{policyName}/version", + } + + if input == nil { + input = &ListPolicyVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPolicyVersionsOutput{} + req.Data = output + return +} + +// Lists the versions of the specified policy, and identifies the default version. +func (c *IoT) ListPolicyVersions(input *ListPolicyVersionsInput) (*ListPolicyVersionsOutput, error) { + req, out := c.ListPolicyVersionsRequest(input) + err := req.Send() + return out, err +} + +const opListPrincipalPolicies = "ListPrincipalPolicies" + +// ListPrincipalPoliciesRequest generates a request for the ListPrincipalPolicies operation. +func (c *IoT) ListPrincipalPoliciesRequest(input *ListPrincipalPoliciesInput) (req *request.Request, output *ListPrincipalPoliciesOutput) { + op := &request.Operation{ + Name: opListPrincipalPolicies, + HTTPMethod: "GET", + HTTPPath: "/principal-policies", + } + + if input == nil { + input = &ListPrincipalPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPrincipalPoliciesOutput{} + req.Data = output + return +} + +// Lists the policies attached to the specified principal. If you use an Amazon +// Cognito identity, the ID needs to be in Amazon Cognito Identity format (http://docs.aws.amazon.com/cognitoidentity/latest/APIReference/API_GetCredentialsForIdentity.html#API_GetCredentialsForIdentity_RequestSyntax). +func (c *IoT) ListPrincipalPolicies(input *ListPrincipalPoliciesInput) (*ListPrincipalPoliciesOutput, error) { + req, out := c.ListPrincipalPoliciesRequest(input) + err := req.Send() + return out, err +} + +const opListPrincipalThings = "ListPrincipalThings" + +// ListPrincipalThingsRequest generates a request for the ListPrincipalThings operation. +func (c *IoT) ListPrincipalThingsRequest(input *ListPrincipalThingsInput) (req *request.Request, output *ListPrincipalThingsOutput) { + op := &request.Operation{ + Name: opListPrincipalThings, + HTTPMethod: "GET", + HTTPPath: "/principals/things", + } + + if input == nil { + input = &ListPrincipalThingsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPrincipalThingsOutput{} + req.Data = output + return +} + +// Lists the things associated with the specified principal. +func (c *IoT) ListPrincipalThings(input *ListPrincipalThingsInput) (*ListPrincipalThingsOutput, error) { + req, out := c.ListPrincipalThingsRequest(input) + err := req.Send() + return out, err +} + +const opListThingPrincipals = "ListThingPrincipals" + +// ListThingPrincipalsRequest generates a request for the ListThingPrincipals operation. +func (c *IoT) ListThingPrincipalsRequest(input *ListThingPrincipalsInput) (req *request.Request, output *ListThingPrincipalsOutput) { + op := &request.Operation{ + Name: opListThingPrincipals, + HTTPMethod: "GET", + HTTPPath: "/things/{thingName}/principals", + } + + if input == nil { + input = &ListThingPrincipalsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListThingPrincipalsOutput{} + req.Data = output + return +} + +// Lists the principals associated with the specified thing. +func (c *IoT) ListThingPrincipals(input *ListThingPrincipalsInput) (*ListThingPrincipalsOutput, error) { + req, out := c.ListThingPrincipalsRequest(input) + err := req.Send() + return out, err +} + +const opListThings = "ListThings" + +// ListThingsRequest generates a request for the ListThings operation. +func (c *IoT) ListThingsRequest(input *ListThingsInput) (req *request.Request, output *ListThingsOutput) { + op := &request.Operation{ + Name: opListThings, + HTTPMethod: "GET", + HTTPPath: "/things", + } + + if input == nil { + input = &ListThingsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListThingsOutput{} + req.Data = output + return +} + +// Lists your things. You can pass an AttributeName and/or AttributeValue to +// filter your things. For example: "ListThings where AttributeName=Color and +// AttributeValue=Red" +func (c *IoT) ListThings(input *ListThingsInput) (*ListThingsOutput, error) { + req, out := c.ListThingsRequest(input) + err := req.Send() + return out, err +} + +const opListTopicRules = "ListTopicRules" + +// ListTopicRulesRequest generates a request for the ListTopicRules operation. +func (c *IoT) ListTopicRulesRequest(input *ListTopicRulesInput) (req *request.Request, output *ListTopicRulesOutput) { + op := &request.Operation{ + Name: opListTopicRules, + HTTPMethod: "GET", + HTTPPath: "/rules", + } + + if input == nil { + input = &ListTopicRulesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTopicRulesOutput{} + req.Data = output + return +} + +// Lists the rules for the specific topic. +func (c *IoT) ListTopicRules(input *ListTopicRulesInput) (*ListTopicRulesOutput, error) { + req, out := c.ListTopicRulesRequest(input) + err := req.Send() + return out, err +} + +const opRejectCertificateTransfer = "RejectCertificateTransfer" + +// RejectCertificateTransferRequest generates a request for the RejectCertificateTransfer operation. +func (c *IoT) RejectCertificateTransferRequest(input *RejectCertificateTransferInput) (req *request.Request, output *RejectCertificateTransferOutput) { + op := &request.Operation{ + Name: opRejectCertificateTransfer, + HTTPMethod: "PATCH", + HTTPPath: "/reject-certificate-transfer/{certificateId}", + } + + if input == nil { + input = &RejectCertificateTransferInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RejectCertificateTransferOutput{} + req.Data = output + return +} + +// Rejects a pending certificate transfer. After AWS IoT rejects a certificate +// transfer, the certificate status changes from PENDING_TRANFER to INACTIVE. +// +// To check for pending certificate transfers, call ListCertificates to enumerate +// your certificates. +// +// This operation can only be called by the transfer destination. Once called, +// the certificate will be returned to the source's account in the INACTIVE +// state. +func (c *IoT) RejectCertificateTransfer(input *RejectCertificateTransferInput) (*RejectCertificateTransferOutput, error) { + req, out := c.RejectCertificateTransferRequest(input) + err := req.Send() + return out, err +} + +const opReplaceTopicRule = "ReplaceTopicRule" + +// ReplaceTopicRuleRequest generates a request for the ReplaceTopicRule operation. +func (c *IoT) ReplaceTopicRuleRequest(input *ReplaceTopicRuleInput) (req *request.Request, output *ReplaceTopicRuleOutput) { + op := &request.Operation{ + Name: opReplaceTopicRule, + HTTPMethod: "PATCH", + HTTPPath: "/rules/{ruleName}", + } + + if input == nil { + input = &ReplaceTopicRuleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ReplaceTopicRuleOutput{} + req.Data = output + return +} + +// Replaces the specified rule. You must specify all parameters for the new +// rule. +func (c *IoT) ReplaceTopicRule(input *ReplaceTopicRuleInput) (*ReplaceTopicRuleOutput, error) { + req, out := c.ReplaceTopicRuleRequest(input) + err := req.Send() + return out, err +} + +const opSetDefaultPolicyVersion = "SetDefaultPolicyVersion" + +// SetDefaultPolicyVersionRequest generates a request for the SetDefaultPolicyVersion operation. +func (c *IoT) SetDefaultPolicyVersionRequest(input *SetDefaultPolicyVersionInput) (req *request.Request, output *SetDefaultPolicyVersionOutput) { + op := &request.Operation{ + Name: opSetDefaultPolicyVersion, + HTTPMethod: "PATCH", + HTTPPath: "/policies/{policyName}/version/{policyVersionId}", + } + + if input == nil { + input = &SetDefaultPolicyVersionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetDefaultPolicyVersionOutput{} + req.Data = output + return +} + +// Sets the specified version of the specified policy as the policy's default +// (operative) version. This action affects all certificates that the policy +// is attached to. To list the principals the policy is attached to, use the +// ListPrincipalPolicy API. +func (c *IoT) SetDefaultPolicyVersion(input *SetDefaultPolicyVersionInput) (*SetDefaultPolicyVersionOutput, error) { + req, out := c.SetDefaultPolicyVersionRequest(input) + err := req.Send() + return out, err +} + +const opSetLoggingOptions = "SetLoggingOptions" + +// SetLoggingOptionsRequest generates a request for the SetLoggingOptions operation. +func (c *IoT) SetLoggingOptionsRequest(input *SetLoggingOptionsInput) (req *request.Request, output *SetLoggingOptionsOutput) { + op := &request.Operation{ + Name: opSetLoggingOptions, + HTTPMethod: "POST", + HTTPPath: "/loggingOptions", + } + + if input == nil { + input = &SetLoggingOptionsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetLoggingOptionsOutput{} + req.Data = output + return +} + +// Sets the logging options. +func (c *IoT) SetLoggingOptions(input *SetLoggingOptionsInput) (*SetLoggingOptionsOutput, error) { + req, out := c.SetLoggingOptionsRequest(input) + err := req.Send() + return out, err +} + +const opTransferCertificate = "TransferCertificate" + +// TransferCertificateRequest generates a request for the TransferCertificate operation. +func (c *IoT) TransferCertificateRequest(input *TransferCertificateInput) (req *request.Request, output *TransferCertificateOutput) { + op := &request.Operation{ + Name: opTransferCertificate, + HTTPMethod: "PATCH", + HTTPPath: "/transfer-certificate/{certificateId}", + } + + if input == nil { + input = &TransferCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &TransferCertificateOutput{} + req.Data = output + return +} + +// Transfers the specified certificate to the specified AWS account. +// +// You can cancel the transfer until it is acknowledged by the recipient. +// +// No notification is sent to the transfer destination's account, it is up +// to the caller to notify the transfer target. +// +// The certificate being transferred must not be in the ACTIVE state. It can +// be deactivated using the UpdateCertificate API. +// +// The certificate must not have any policies attached to it. These can be +// detached using the DetachPrincipalPolicy API. +func (c *IoT) TransferCertificate(input *TransferCertificateInput) (*TransferCertificateOutput, error) { + req, out := c.TransferCertificateRequest(input) + err := req.Send() + return out, err +} + +const opUpdateCertificate = "UpdateCertificate" + +// UpdateCertificateRequest generates a request for the UpdateCertificate operation. +func (c *IoT) UpdateCertificateRequest(input *UpdateCertificateInput) (req *request.Request, output *UpdateCertificateOutput) { + op := &request.Operation{ + Name: opUpdateCertificate, + HTTPMethod: "PUT", + HTTPPath: "/certificates/{certificateId}", + } + + if input == nil { + input = &UpdateCertificateInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateCertificateOutput{} + req.Data = output + return +} + +// Updates the status of the specified certificate. This operation is idempotent. +// +// Moving a cert from the ACTIVE state (including REVOKED) will NOT disconnect +// currently-connected devices, although these devices will be unable to reconnect. +// +// The ACTIVE state is required to authenticate devices connecting to AWS IoT +// using a certificate. +func (c *IoT) UpdateCertificate(input *UpdateCertificateInput) (*UpdateCertificateOutput, error) { + req, out := c.UpdateCertificateRequest(input) + err := req.Send() + return out, err +} + +const opUpdateThing = "UpdateThing" + +// UpdateThingRequest generates a request for the UpdateThing operation. +func (c *IoT) UpdateThingRequest(input *UpdateThingInput) (req *request.Request, output *UpdateThingOutput) { + op := &request.Operation{ + Name: opUpdateThing, + HTTPMethod: "PATCH", + HTTPPath: "/things/{thingName}", + } + + if input == nil { + input = &UpdateThingInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateThingOutput{} + req.Data = output + return +} + +// Updates the data for a thing. +func (c *IoT) UpdateThing(input *UpdateThingInput) (*UpdateThingOutput, error) { + req, out := c.UpdateThingRequest(input) + err := req.Send() + return out, err +} + +// The input for the AcceptCertificateTransfer operation. +type AcceptCertificateTransferInput struct { + _ struct{} `type:"structure"` + + // The ID of the certificate. + CertificateId *string `location:"uri" locationName:"certificateId" min:"64" type:"string" required:"true"` + + // Specifies whether the certificate is active. + SetAsActive *bool `location:"querystring" locationName:"setAsActive" type:"boolean"` +} + +// String returns the string representation +func (s AcceptCertificateTransferInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcceptCertificateTransferInput) GoString() string { + return s.String() +} + +type AcceptCertificateTransferOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AcceptCertificateTransferOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcceptCertificateTransferOutput) GoString() string { + return s.String() +} + +// Describes the actions associated with a rule. +type Action struct { + _ struct{} `type:"structure"` + + // Write to a DynamoDB table. + DynamoDB *DynamoDBAction `locationName:"dynamoDB" type:"structure"` + + // Write to a Kinesis Firehose stream. + Firehose *FirehoseAction `locationName:"firehose" type:"structure"` + + // Write data to a Kinesis stream. + Kinesis *KinesisAction `locationName:"kinesis" type:"structure"` + + // Invoke a Lambda function. + Lambda *LambdaAction `locationName:"lambda" type:"structure"` + + // Publish to another MQTT topic. + Republish *RepublishAction `locationName:"republish" type:"structure"` + + // Write to an S3 bucket. + S3 *S3Action `locationName:"s3" type:"structure"` + + // Publish to an SNS topic. + Sns *SnsAction `locationName:"sns" type:"structure"` + + // Publish to an SQS queue. + Sqs *SqsAction `locationName:"sqs" type:"structure"` +} + +// String returns the string representation +func (s Action) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Action) GoString() string { + return s.String() +} + +// The input for the AttachPrincipalPolicy operation. +type AttachPrincipalPolicyInput struct { + _ struct{} `type:"structure"` + + // The policy name. + PolicyName *string `location:"uri" locationName:"policyName" min:"1" type:"string" required:"true"` + + // The principal which can be a certificate ARN (as returned from the CreateCertificate + // operation) or a Cognito ID. + Principal *string `location:"header" locationName:"x-amzn-iot-principal" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachPrincipalPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachPrincipalPolicyInput) GoString() string { + return s.String() +} + +type AttachPrincipalPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachPrincipalPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachPrincipalPolicyOutput) GoString() string { + return s.String() +} + +// The input for the AttachThingPrincipal operation. +type AttachThingPrincipalInput struct { + _ struct{} `type:"structure"` + + // The principal (certificate or other credential). + Principal *string `location:"header" locationName:"x-amzn-principal" type:"string" required:"true"` + + // The name of the thing. + ThingName *string `location:"uri" locationName:"thingName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachThingPrincipalInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachThingPrincipalInput) GoString() string { + return s.String() +} + +// The output from the AttachThingPrincipal operation. +type AttachThingPrincipalOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachThingPrincipalOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachThingPrincipalOutput) GoString() string { + return s.String() +} + +// The attribute payload, a JSON string containing up to three key-value pairs. +// +// For example: {\"attributes\":{\"string1\":\"string2\"}} +type AttributePayload struct { + _ struct{} `type:"structure"` + + // A JSON string containing up to three key-value pair in JSON format. + // + // For example: {\"attributes\":{\"string1\":\"string2\"}} + Attributes map[string]*string `locationName:"attributes" type:"map"` +} + +// String returns the string representation +func (s AttributePayload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributePayload) GoString() string { + return s.String() +} + +// The input for the CancelCertificateTransfer operation. +type CancelCertificateTransferInput struct { + _ struct{} `type:"structure"` + + // The ID of the certificate. + CertificateId *string `location:"uri" locationName:"certificateId" min:"64" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelCertificateTransferInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelCertificateTransferInput) GoString() string { + return s.String() +} + +type CancelCertificateTransferOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CancelCertificateTransferOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelCertificateTransferOutput) GoString() string { + return s.String() +} + +// Information about a certificate. +type Certificate struct { + _ struct{} `type:"structure"` + + // The ARN of the certificate. + CertificateArn *string `locationName:"certificateArn" type:"string"` + + // The ID of the certificate. + CertificateId *string `locationName:"certificateId" min:"64" type:"string"` + + // The date and time the certificate was created. + CreationDate *time.Time `locationName:"creationDate" type:"timestamp" timestampFormat:"unix"` + + // The status of the certificate. + Status *string `locationName:"status" type:"string" enum:"CertificateStatus"` +} + +// String returns the string representation +func (s Certificate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Certificate) GoString() string { + return s.String() +} + +// Describes a certificate. +type CertificateDescription struct { + _ struct{} `type:"structure"` + + // The ARN of the certificate. + CertificateArn *string `locationName:"certificateArn" type:"string"` + + // The ID of the certificate. + CertificateId *string `locationName:"certificateId" min:"64" type:"string"` + + // The certificate data, in PEM format. + CertificatePem *string `locationName:"certificatePem" min:"1" type:"string"` + + // The date and time the certificate was created. + CreationDate *time.Time `locationName:"creationDate" type:"timestamp" timestampFormat:"unix"` + + // The date and time the certificate was last modified. + LastModifiedDate *time.Time `locationName:"lastModifiedDate" type:"timestamp" timestampFormat:"unix"` + + // The ID of the AWS account that owns the certificate. + OwnedBy *string `locationName:"ownedBy" type:"string"` + + // The status of the certificate. + Status *string `locationName:"status" type:"string" enum:"CertificateStatus"` +} + +// String returns the string representation +func (s CertificateDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CertificateDescription) GoString() string { + return s.String() +} + +// The input for the CreateCertificateFromCsr operation. +type CreateCertificateFromCsrInput struct { + _ struct{} `type:"structure"` + + // The certificate signing request (CSR). + CertificateSigningRequest *string `locationName:"certificateSigningRequest" min:"1" type:"string" required:"true"` + + // Specifies whether the certificate is active. + SetAsActive *bool `location:"querystring" locationName:"setAsActive" type:"boolean"` +} + +// String returns the string representation +func (s CreateCertificateFromCsrInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCertificateFromCsrInput) GoString() string { + return s.String() +} + +// The output from the CreateCertificateFromCsr operation. +type CreateCertificateFromCsrOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the certificate. You can use the ARN as + // a principal for policy operations. + CertificateArn *string `locationName:"certificateArn" type:"string"` + + // The ID of the certificate. Certificate management operations only take a + // certificateId. + CertificateId *string `locationName:"certificateId" min:"64" type:"string"` + + // The certificate data, in PEM format. + CertificatePem *string `locationName:"certificatePem" min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateCertificateFromCsrOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCertificateFromCsrOutput) GoString() string { + return s.String() +} + +// The input for the CreateKeysAndCertificate operation. +type CreateKeysAndCertificateInput struct { + _ struct{} `type:"structure"` + + // Specifies whether the certificate is active. + SetAsActive *bool `location:"querystring" locationName:"setAsActive" type:"boolean"` +} + +// String returns the string representation +func (s CreateKeysAndCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateKeysAndCertificateInput) GoString() string { + return s.String() +} + +// The output of the CreateKeysAndCertificate operation. +type CreateKeysAndCertificateOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the certificate. + CertificateArn *string `locationName:"certificateArn" type:"string"` + + // The ID of the certificate. AWS IoT issues a default subject name for the + // certificate (e.g., AWS IoT Certificate). + CertificateId *string `locationName:"certificateId" min:"64" type:"string"` + + // The certificate data, in PEM format. + CertificatePem *string `locationName:"certificatePem" min:"1" type:"string"` + + // The generated key pair. + KeyPair *KeyPair `locationName:"keyPair" type:"structure"` +} + +// String returns the string representation +func (s CreateKeysAndCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateKeysAndCertificateOutput) GoString() string { + return s.String() +} + +// The input for the CreatePolicy operation. +type CreatePolicyInput struct { + _ struct{} `type:"structure"` + + // The JSON document that describes the policy. The length of the policyDocument + // must be a minimum length of 1, with a maximum length of 2048, excluding whitespace. + PolicyDocument *string `locationName:"policyDocument" type:"string" required:"true"` + + // The policy name. + PolicyName *string `location:"uri" locationName:"policyName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreatePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePolicyInput) GoString() string { + return s.String() +} + +// The output from the CreatePolicy operation. +type CreatePolicyOutput struct { + _ struct{} `type:"structure"` + + // The policy ARN. + PolicyArn *string `locationName:"policyArn" type:"string"` + + // The JSON document that describes the policy. + PolicyDocument *string `locationName:"policyDocument" type:"string"` + + // The policy name. + PolicyName *string `locationName:"policyName" min:"1" type:"string"` + + // The policy version ID. + PolicyVersionId *string `locationName:"policyVersionId" type:"string"` +} + +// String returns the string representation +func (s CreatePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePolicyOutput) GoString() string { + return s.String() +} + +// The input for the CreatePolicyVersion operation. +type CreatePolicyVersionInput struct { + _ struct{} `type:"structure"` + + // The JSON document that describes the policy. Minimum length of 1. Maximum + // length of 2048 excluding whitespaces + PolicyDocument *string `locationName:"policyDocument" type:"string" required:"true"` + + // The policy name. + PolicyName *string `location:"uri" locationName:"policyName" min:"1" type:"string" required:"true"` + + // Specifies whether the policy version is set as the default. When this parameter + // is true, the new policy version becomes the operative version; that is, the + // version that is in effect for the certificates that the policy is attached + // to. + SetAsDefault *bool `location:"querystring" locationName:"setAsDefault" type:"boolean"` +} + +// String returns the string representation +func (s CreatePolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePolicyVersionInput) GoString() string { + return s.String() +} + +// The output of the CreatePolicyVersion operation. +type CreatePolicyVersionOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether the policy version is the default. + IsDefaultVersion *bool `locationName:"isDefaultVersion" type:"boolean"` + + // The policy ARN. + PolicyArn *string `locationName:"policyArn" type:"string"` + + // The JSON document that describes the policy. + PolicyDocument *string `locationName:"policyDocument" type:"string"` + + // The policy version ID. + PolicyVersionId *string `locationName:"policyVersionId" type:"string"` +} + +// String returns the string representation +func (s CreatePolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePolicyVersionOutput) GoString() string { + return s.String() +} + +// The input for the CreateThing operation. +type CreateThingInput struct { + _ struct{} `type:"structure"` + + // The attribute payload. Which consists of up to 3 name/value pairs in a JSON + // document. For example: {\"attributes\":{\"string1\":\"string2\"}} + AttributePayload *AttributePayload `locationName:"attributePayload" type:"structure"` + + // The name of the thing. + ThingName *string `location:"uri" locationName:"thingName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateThingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateThingInput) GoString() string { + return s.String() +} + +// The output of the CreateThing operation. +type CreateThingOutput struct { + _ struct{} `type:"structure"` + + // The thing ARN. + ThingArn *string `locationName:"thingArn" type:"string"` + + // The name of the thing. + ThingName *string `locationName:"thingName" min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateThingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateThingOutput) GoString() string { + return s.String() +} + +// The input for the CreateTopicRule operation. +type CreateTopicRuleInput struct { + _ struct{} `type:"structure" payload:"TopicRulePayload"` + + // The name of the rule. + RuleName *string `location:"uri" locationName:"ruleName" min:"1" type:"string" required:"true"` + + // The rule payload. + TopicRulePayload *TopicRulePayload `locationName:"topicRulePayload" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateTopicRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTopicRuleInput) GoString() string { + return s.String() +} + +type CreateTopicRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateTopicRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTopicRuleOutput) GoString() string { + return s.String() +} + +// The input for the DeleteCertificate operation. +type DeleteCertificateInput struct { + _ struct{} `type:"structure"` + + // The ID of the certificate. + CertificateId *string `location:"uri" locationName:"certificateId" min:"64" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCertificateInput) GoString() string { + return s.String() +} + +type DeleteCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCertificateOutput) GoString() string { + return s.String() +} + +// The input for the DeletePolicy operation. +type DeletePolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the policy to delete. + PolicyName *string `location:"uri" locationName:"policyName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyInput) GoString() string { + return s.String() +} + +type DeletePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyOutput) GoString() string { + return s.String() +} + +// The input for the DeletePolicyVersion operation. +type DeletePolicyVersionInput struct { + _ struct{} `type:"structure"` + + // The name of the policy. + PolicyName *string `location:"uri" locationName:"policyName" min:"1" type:"string" required:"true"` + + // The policy version ID. + PolicyVersionId *string `location:"uri" locationName:"policyVersionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyVersionInput) GoString() string { + return s.String() +} + +type DeletePolicyVersionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyVersionOutput) GoString() string { + return s.String() +} + +// The input for the DeleteThing operation. +type DeleteThingInput struct { + _ struct{} `type:"structure"` + + // The thing name. + ThingName *string `location:"uri" locationName:"thingName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteThingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteThingInput) GoString() string { + return s.String() +} + +// The output of the DeleteThing operation. +type DeleteThingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteThingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteThingOutput) GoString() string { + return s.String() +} + +// The input for the DeleteTopicRule operation. +type DeleteTopicRuleInput struct { + _ struct{} `type:"structure"` + + // The name of the rule. + RuleName *string `location:"uri" locationName:"ruleName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTopicRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTopicRuleInput) GoString() string { + return s.String() +} + +type DeleteTopicRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTopicRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTopicRuleOutput) GoString() string { + return s.String() +} + +// The input for the DescribeCertificate operation. +type DescribeCertificateInput struct { + _ struct{} `type:"structure"` + + // The ID of the certificate. + CertificateId *string `location:"uri" locationName:"certificateId" min:"64" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCertificateInput) GoString() string { + return s.String() +} + +// The output of the DescribeCertificate operation. +type DescribeCertificateOutput struct { + _ struct{} `type:"structure"` + + // The description of the certificate. + CertificateDescription *CertificateDescription `locationName:"certificateDescription" type:"structure"` +} + +// String returns the string representation +func (s DescribeCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCertificateOutput) GoString() string { + return s.String() +} + +// The input for the DescribeEndpoint operation. +type DescribeEndpointInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEndpointInput) GoString() string { + return s.String() +} + +// The output from the DescribeEndpoint operation. +type DescribeEndpointOutput struct { + _ struct{} `type:"structure"` + + // The endpoint. The format of the endpoint is as follows: identifier.iot.region.amazonaws.com. + EndpointAddress *string `locationName:"endpointAddress" type:"string"` +} + +// String returns the string representation +func (s DescribeEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEndpointOutput) GoString() string { + return s.String() +} + +// The input for the DescribeThing operation. +type DescribeThingInput struct { + _ struct{} `type:"structure"` + + // The name of the thing. + ThingName *string `location:"uri" locationName:"thingName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeThingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeThingInput) GoString() string { + return s.String() +} + +// The output from the DescribeThing operation. +type DescribeThingOutput struct { + _ struct{} `type:"structure"` + + // The attributes which are name/value pairs in JSON format. For example: + // + // {\"attributes\":{\"some-name1\":\"some-value1\"}, {\"some-name2\":\"some-value2\"}, + // {\"some-name3\":\"some-value3\"}} + Attributes map[string]*string `locationName:"attributes" type:"map"` + + // The default client ID. + DefaultClientId *string `locationName:"defaultClientId" type:"string"` + + // The name of the thing. + ThingName *string `locationName:"thingName" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeThingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeThingOutput) GoString() string { + return s.String() +} + +// The input for the DetachPrincipalPolicy operation. +type DetachPrincipalPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the policy to detach. + PolicyName *string `location:"uri" locationName:"policyName" min:"1" type:"string" required:"true"` + + // The principal + // + // If the principal is a certificate, specify the certificate ARN. If the principal + // is a Cognito identity specify the identity ID. + Principal *string `location:"header" locationName:"x-amzn-iot-principal" type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachPrincipalPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachPrincipalPolicyInput) GoString() string { + return s.String() +} + +type DetachPrincipalPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachPrincipalPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachPrincipalPolicyOutput) GoString() string { + return s.String() +} + +// The input for the DetachThingPrincipal operation. +type DetachThingPrincipalInput struct { + _ struct{} `type:"structure"` + + // The principal. + Principal *string `location:"header" locationName:"x-amzn-principal" type:"string" required:"true"` + + // The name of the thing. + ThingName *string `location:"uri" locationName:"thingName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachThingPrincipalInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachThingPrincipalInput) GoString() string { + return s.String() +} + +// The output from the DetachThingPrincipal operation. +type DetachThingPrincipalOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachThingPrincipalOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachThingPrincipalOutput) GoString() string { + return s.String() +} + +// The input for the DisableTopicRuleRequest operation. +type DisableTopicRuleInput struct { + _ struct{} `type:"structure"` + + // The name of the rule to disable. + RuleName *string `location:"uri" locationName:"ruleName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableTopicRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableTopicRuleInput) GoString() string { + return s.String() +} + +type DisableTopicRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableTopicRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableTopicRuleOutput) GoString() string { + return s.String() +} + +// Describes an action to write to a DynamoDB table. +// +// The tableName, hashKeyField, and rangeKeyField values must match the values +// used when you created the table. +// +// The hashKeyValue and rangeKeyvalue fields use a substitution template syntax. +// These templates provide data at runtime. The syntax is as follows: ${sql-expression}. +// +// You can specify any expression that's valid in a WHERE or SELECT clause, +// including JSON properties, comparisons, calculations, and functions. For +// example, the following field uses the third level of the topic: +// +// "hashKeyValue": "${topic(3)}" +// +// The following field uses the timestamp: +// +// "rangeKeyValue": "${timestamp()}" +type DynamoDBAction struct { + _ struct{} `type:"structure"` + + // The hash key name. + HashKeyField *string `locationName:"hashKeyField" type:"string" required:"true"` + + // The hash key value. + HashKeyValue *string `locationName:"hashKeyValue" type:"string" required:"true"` + + // The action payload, this name can be customized. + PayloadField *string `locationName:"payloadField" type:"string"` + + // The range key name. + RangeKeyField *string `locationName:"rangeKeyField" type:"string" required:"true"` + + // The range key value. + RangeKeyValue *string `locationName:"rangeKeyValue" type:"string" required:"true"` + + // The ARN of the IAM role that grants access to the DynamoDB table. + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` + + // The name of the DynamoDB table. + TableName *string `locationName:"tableName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DynamoDBAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DynamoDBAction) GoString() string { + return s.String() +} + +// The input for the EnableTopicRuleRequest operation. +type EnableTopicRuleInput struct { + _ struct{} `type:"structure"` + + // The name of the topic rule to enable. + RuleName *string `location:"uri" locationName:"ruleName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableTopicRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableTopicRuleInput) GoString() string { + return s.String() +} + +type EnableTopicRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableTopicRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableTopicRuleOutput) GoString() string { + return s.String() +} + +// Describes an action that writes data to a Kinesis Firehose stream. +type FirehoseAction struct { + _ struct{} `type:"structure"` + + // The delivery stream name. + DeliveryStreamName *string `locationName:"deliveryStreamName" type:"string" required:"true"` + + // The IAM role that grants access to the firehose stream. + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s FirehoseAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FirehoseAction) GoString() string { + return s.String() +} + +// The input for the GetLoggingOptions operation. +type GetLoggingOptionsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetLoggingOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLoggingOptionsInput) GoString() string { + return s.String() +} + +// The output from the GetLoggingOptions operation. +type GetLoggingOptionsOutput struct { + _ struct{} `type:"structure"` + + // The logging level. + LogLevel *string `locationName:"logLevel" type:"string" enum:"LogLevel"` + + // The ARN of the IAM role that grants access. + RoleArn *string `locationName:"roleArn" type:"string"` +} + +// String returns the string representation +func (s GetLoggingOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLoggingOptionsOutput) GoString() string { + return s.String() +} + +// The input for the GetPolicy operation. +type GetPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the policy. + PolicyName *string `location:"uri" locationName:"policyName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyInput) GoString() string { + return s.String() +} + +// The output from the GetPolicy operation. +type GetPolicyOutput struct { + _ struct{} `type:"structure"` + + // The default policy version ID. + DefaultVersionId *string `locationName:"defaultVersionId" type:"string"` + + // The policy ARN. + PolicyArn *string `locationName:"policyArn" type:"string"` + + // The JSON document that describes the policy. + PolicyDocument *string `locationName:"policyDocument" type:"string"` + + // The policy name. + PolicyName *string `locationName:"policyName" min:"1" type:"string"` +} + +// String returns the string representation +func (s GetPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyOutput) GoString() string { + return s.String() +} + +// The input for the GetPolicyVersion operation. +type GetPolicyVersionInput struct { + _ struct{} `type:"structure"` + + // The name of the policy. + PolicyName *string `location:"uri" locationName:"policyName" min:"1" type:"string" required:"true"` + + // The policy version ID. + PolicyVersionId *string `location:"uri" locationName:"policyVersionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyVersionInput) GoString() string { + return s.String() +} + +// The output from the GetPolicyVersion operation. +type GetPolicyVersionOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether the policy version is the default. + IsDefaultVersion *bool `locationName:"isDefaultVersion" type:"boolean"` + + // The policy ARN. + PolicyArn *string `locationName:"policyArn" type:"string"` + + // The JSON document that describes the policy. + PolicyDocument *string `locationName:"policyDocument" type:"string"` + + // The policy name. + PolicyName *string `locationName:"policyName" min:"1" type:"string"` + + // The policy version ID. + PolicyVersionId *string `locationName:"policyVersionId" type:"string"` +} + +// String returns the string representation +func (s GetPolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyVersionOutput) GoString() string { + return s.String() +} + +// The input for the GetTopicRule operation. +type GetTopicRuleInput struct { + _ struct{} `type:"structure"` + + // The name of the rule. + RuleName *string `location:"uri" locationName:"ruleName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTopicRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTopicRuleInput) GoString() string { + return s.String() +} + +// The output from the GetTopicRule operation. +type GetTopicRuleOutput struct { + _ struct{} `type:"structure"` + + // The rule. + Rule *TopicRule `locationName:"rule" type:"structure"` + + // The rule ARN. + RuleArn *string `locationName:"ruleArn" type:"string"` +} + +// String returns the string representation +func (s GetTopicRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTopicRuleOutput) GoString() string { + return s.String() +} + +// Describes a key pair. +type KeyPair struct { + _ struct{} `type:"structure"` + + // The private key. + PrivateKey *string `min:"1" type:"string"` + + // The public key. + PublicKey *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s KeyPair) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyPair) GoString() string { + return s.String() +} + +// Describes an action to write data to an Amazon Kinesis stream. +type KinesisAction struct { + _ struct{} `type:"structure"` + + // The partition key. + PartitionKey *string `locationName:"partitionKey" type:"string"` + + // The ARN of the IAM role that grants access to the Kinesis stream. + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` + + // The name of the Kinesis stream. + StreamName *string `locationName:"streamName" type:"string" required:"true"` +} + +// String returns the string representation +func (s KinesisAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KinesisAction) GoString() string { + return s.String() +} + +// Describes an action to invoke a Lamdba function. +type LambdaAction struct { + _ struct{} `type:"structure"` + + // The ARN of the Lambda function. + FunctionArn *string `locationName:"functionArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s LambdaAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaAction) GoString() string { + return s.String() +} + +// The input for the ListCertificates operation. +type ListCertificatesInput struct { + _ struct{} `type:"structure"` + + // Specifies the order for results. If True, the results are returned in ascending + // order, based on the creation date. + AscendingOrder *bool `location:"querystring" locationName:"isAscendingOrder" type:"boolean"` + + // The marker for the next set of results. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // The result page size. + PageSize *int64 `location:"querystring" locationName:"pageSize" min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListCertificatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCertificatesInput) GoString() string { + return s.String() +} + +// The output of the ListCertificates operation. +type ListCertificatesOutput struct { + _ struct{} `type:"structure"` + + // The descriptions of the certificates. + Certificates []*Certificate `locationName:"certificates" type:"list"` + + // The marker for the next set of results, or null if there are no additional + // results. + NextMarker *string `locationName:"nextMarker" type:"string"` +} + +// String returns the string representation +func (s ListCertificatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCertificatesOutput) GoString() string { + return s.String() +} + +// The input for the ListPolicies operation. +type ListPoliciesInput struct { + _ struct{} `type:"structure"` + + // Specifies the order for results. If true, the results are returned in ascending + // creation order. + AscendingOrder *bool `location:"querystring" locationName:"isAscendingOrder" type:"boolean"` + + // The marker for the next set of results. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // The result page size. + PageSize *int64 `location:"querystring" locationName:"pageSize" min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPoliciesInput) GoString() string { + return s.String() +} + +// The output from the ListPolicies operation. +type ListPoliciesOutput struct { + _ struct{} `type:"structure"` + + // The marker for the next set of results, or null if there are no additional + // results. + NextMarker *string `locationName:"nextMarker" type:"string"` + + // The descriptions of the policies. + Policies []*Policy `locationName:"policies" type:"list"` +} + +// String returns the string representation +func (s ListPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPoliciesOutput) GoString() string { + return s.String() +} + +// The input for the ListPolicyVersions operation. +type ListPolicyVersionsInput struct { + _ struct{} `type:"structure"` + + // The policy name. + PolicyName *string `location:"uri" locationName:"policyName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPolicyVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPolicyVersionsInput) GoString() string { + return s.String() +} + +// The output from the ListPolicyVersions operation. +type ListPolicyVersionsOutput struct { + _ struct{} `type:"structure"` + + // The policy versions. + PolicyVersions []*PolicyVersion `locationName:"policyVersions" type:"list"` +} + +// String returns the string representation +func (s ListPolicyVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPolicyVersionsOutput) GoString() string { + return s.String() +} + +// The input for the ListPrincipalPolicies operation. +type ListPrincipalPoliciesInput struct { + _ struct{} `type:"structure"` + + // Specifies the order for results. If true, results are returned in ascending + // creation order. + AscendingOrder *bool `location:"querystring" locationName:"isAscendingOrder" type:"boolean"` + + // The marker for the next set of results. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // The result page size. + PageSize *int64 `location:"querystring" locationName:"pageSize" min:"1" type:"integer"` + + // The principal. + Principal *string `location:"header" locationName:"x-amzn-iot-principal" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPrincipalPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPrincipalPoliciesInput) GoString() string { + return s.String() +} + +// The output from the ListPrincipalPolicies operation. +type ListPrincipalPoliciesOutput struct { + _ struct{} `type:"structure"` + + // The marker for the next set of results, or null if there are no additional + // results. + NextMarker *string `locationName:"nextMarker" type:"string"` + + // The policies. + Policies []*Policy `locationName:"policies" type:"list"` +} + +// String returns the string representation +func (s ListPrincipalPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPrincipalPoliciesOutput) GoString() string { + return s.String() +} + +// The input for the ListPrincipalThings operation. +type ListPrincipalThingsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of principals to return. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // A token used to retrieve the next value. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` + + // The principal. + Principal *string `location:"header" locationName:"x-amzn-principal" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPrincipalThingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPrincipalThingsInput) GoString() string { + return s.String() +} + +// The output from the ListPrincipalThings operation. +type ListPrincipalThingsOutput struct { + _ struct{} `type:"structure"` + + // A token used to retrieve the next value. + NextToken *string `locationName:"nextToken" type:"string"` + + // The things. + Things []*string `locationName:"things" type:"list"` +} + +// String returns the string representation +func (s ListPrincipalThingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPrincipalThingsOutput) GoString() string { + return s.String() +} + +// The input for the ListThingPrincipal operation. +type ListThingPrincipalsInput struct { + _ struct{} `type:"structure"` + + // The name of the thing. + ThingName *string `location:"uri" locationName:"thingName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListThingPrincipalsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListThingPrincipalsInput) GoString() string { + return s.String() +} + +// The output from the ListThingPrincipals operation. +type ListThingPrincipalsOutput struct { + _ struct{} `type:"structure"` + + // The principals. + Principals []*string `locationName:"principals" type:"list"` +} + +// String returns the string representation +func (s ListThingPrincipalsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListThingPrincipalsOutput) GoString() string { + return s.String() +} + +// The input for the ListThings operation. +type ListThingsInput struct { + _ struct{} `type:"structure"` + + // The attribute name. + AttributeName *string `location:"querystring" locationName:"attributeName" type:"string"` + + // The attribute value. + AttributeValue *string `location:"querystring" locationName:"attributeValue" type:"string"` + + // The maximum number of results. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // The token for the next value. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListThingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListThingsInput) GoString() string { + return s.String() +} + +// The output from the ListThings operation. +type ListThingsOutput struct { + _ struct{} `type:"structure"` + + // A token used to retrieve the next value. + NextToken *string `locationName:"nextToken" type:"string"` + + // The things. + Things []*ThingAttribute `locationName:"things" type:"list"` +} + +// String returns the string representation +func (s ListThingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListThingsOutput) GoString() string { + return s.String() +} + +// The input for the ListTopicRules operation. +type ListTopicRulesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // A token used to retrieve the next value. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` + + // Specifies whether the rule is disabled. + RuleDisabled *bool `location:"querystring" locationName:"ruleDisabled" type:"boolean"` + + // The topic. + Topic *string `location:"querystring" locationName:"topic" type:"string"` +} + +// String returns the string representation +func (s ListTopicRulesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTopicRulesInput) GoString() string { + return s.String() +} + +// The output from the ListTopicRules operation. +type ListTopicRulesOutput struct { + _ struct{} `type:"structure"` + + // A token used to retrieve the next value. + NextToken *string `locationName:"nextToken" type:"string"` + + // The rules. + Rules []*TopicRuleListItem `locationName:"rules" type:"list"` +} + +// String returns the string representation +func (s ListTopicRulesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTopicRulesOutput) GoString() string { + return s.String() +} + +// Describes the logging options payload. +type LoggingOptionsPayload struct { + _ struct{} `type:"structure"` + + // The logging level. + LogLevel *string `locationName:"logLevel" type:"string" enum:"LogLevel"` + + // The ARN of the IAM role that grants access. + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s LoggingOptionsPayload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoggingOptionsPayload) GoString() string { + return s.String() +} + +// Describes an AWS IoT policy. +type Policy struct { + _ struct{} `type:"structure"` + + // The policy ARN. + PolicyArn *string `locationName:"policyArn" type:"string"` + + // The policy name. + PolicyName *string `locationName:"policyName" min:"1" type:"string"` +} + +// String returns the string representation +func (s Policy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Policy) GoString() string { + return s.String() +} + +// Describes a policy version. +type PolicyVersion struct { + _ struct{} `type:"structure"` + + // The date and time the policy was created. + CreateDate *time.Time `locationName:"createDate" type:"timestamp" timestampFormat:"unix"` + + // Specifies whether the policy version is the default. + IsDefaultVersion *bool `locationName:"isDefaultVersion" type:"boolean"` + + // The policy version ID. + VersionId *string `locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PolicyVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyVersion) GoString() string { + return s.String() +} + +// The input for the RejectCertificateTransfer operation. +type RejectCertificateTransferInput struct { + _ struct{} `type:"structure"` + + // The ID of the certificate. + CertificateId *string `location:"uri" locationName:"certificateId" min:"64" type:"string" required:"true"` +} + +// String returns the string representation +func (s RejectCertificateTransferInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RejectCertificateTransferInput) GoString() string { + return s.String() +} + +type RejectCertificateTransferOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RejectCertificateTransferOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RejectCertificateTransferOutput) GoString() string { + return s.String() +} + +// The input for the ReplaceTopicRule operation. +type ReplaceTopicRuleInput struct { + _ struct{} `type:"structure" payload:"TopicRulePayload"` + + // The name of the rule. + RuleName *string `location:"uri" locationName:"ruleName" min:"1" type:"string" required:"true"` + + // The rule payload. + TopicRulePayload *TopicRulePayload `locationName:"topicRulePayload" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ReplaceTopicRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceTopicRuleInput) GoString() string { + return s.String() +} + +type ReplaceTopicRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ReplaceTopicRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceTopicRuleOutput) GoString() string { + return s.String() +} + +// Describes an action to republish to another topic. +type RepublishAction struct { + _ struct{} `type:"structure"` + + // The ARN of the IAM role that grants access. + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` + + // The name of the MQTT topic. + Topic *string `locationName:"topic" type:"string" required:"true"` +} + +// String returns the string representation +func (s RepublishAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RepublishAction) GoString() string { + return s.String() +} + +// Describes an action to write data to an Amazon S3 bucket. +type S3Action struct { + _ struct{} `type:"structure"` + + // The S3 bucket. + BucketName *string `locationName:"bucketName" type:"string" required:"true"` + + // The object key. + Key *string `locationName:"key" type:"string" required:"true"` + + // The ARN of the IAM role that grants access. + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s S3Action) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3Action) GoString() string { + return s.String() +} + +// The input for the SetDefaultPolicyVersion operation. +type SetDefaultPolicyVersionInput struct { + _ struct{} `type:"structure"` + + // The policy name. + PolicyName *string `location:"uri" locationName:"policyName" min:"1" type:"string" required:"true"` + + // The policy version ID. + PolicyVersionId *string `location:"uri" locationName:"policyVersionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s SetDefaultPolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetDefaultPolicyVersionInput) GoString() string { + return s.String() +} + +type SetDefaultPolicyVersionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetDefaultPolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetDefaultPolicyVersionOutput) GoString() string { + return s.String() +} + +// The input for the SetLoggingOptions operation. +type SetLoggingOptionsInput struct { + _ struct{} `type:"structure" payload:"LoggingOptionsPayload"` + + // The logging options payload. + LoggingOptionsPayload *LoggingOptionsPayload `locationName:"loggingOptionsPayload" type:"structure" required:"true"` +} + +// String returns the string representation +func (s SetLoggingOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLoggingOptionsInput) GoString() string { + return s.String() +} + +type SetLoggingOptionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetLoggingOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLoggingOptionsOutput) GoString() string { + return s.String() +} + +// Describes an action to publish to an Amazon SNS topic. +type SnsAction struct { + _ struct{} `type:"structure"` + + // The ARN of the IAM role that grants access. + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` + + // The ARN of the SNS topic. + TargetArn *string `locationName:"targetArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s SnsAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnsAction) GoString() string { + return s.String() +} + +// Describes an action to publish data to an SQS queue. +type SqsAction struct { + _ struct{} `type:"structure"` + + // The URL of the Amazon SQS queue. + QueueUrl *string `locationName:"queueUrl" type:"string" required:"true"` + + // The ARN of the IAM role that grants access. + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` + + // Specifies whether to use Base64 encoding. + UseBase64 *bool `locationName:"useBase64" type:"boolean"` +} + +// String returns the string representation +func (s SqsAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SqsAction) GoString() string { + return s.String() +} + +// Describes a thing attribute. +type ThingAttribute struct { + _ struct{} `type:"structure"` + + // The attributes. + Attributes map[string]*string `locationName:"attributes" type:"map"` + + // The name of the thing. + ThingName *string `locationName:"thingName" min:"1" type:"string"` +} + +// String returns the string representation +func (s ThingAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ThingAttribute) GoString() string { + return s.String() +} + +// Describes a rule. +type TopicRule struct { + _ struct{} `type:"structure"` + + // The actions associated with the rule. + Actions []*Action `locationName:"actions" type:"list"` + + // The date and time the rule was created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix"` + + // The description of the rule. + Description *string `locationName:"description" type:"string"` + + // Specifies whether the rule is disabled. + RuleDisabled *bool `locationName:"ruleDisabled" type:"boolean"` + + // The name of the rule. + RuleName *string `locationName:"ruleName" min:"1" type:"string"` + + // The SQL statement used to query the topic. When using a SQL query with multiple + // lines, be sure to escape the newline characters properly. + Sql *string `locationName:"sql" type:"string"` +} + +// String returns the string representation +func (s TopicRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicRule) GoString() string { + return s.String() +} + +// Describes a rule. +type TopicRuleListItem struct { + _ struct{} `type:"structure"` + + // The date and time the rule was created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix"` + + // The rule ARN. + RuleArn *string `locationName:"ruleArn" type:"string"` + + // Specifies whether the rule is disabled. + RuleDisabled *bool `locationName:"ruleDisabled" type:"boolean"` + + // The name of the rule. + RuleName *string `locationName:"ruleName" min:"1" type:"string"` + + // The pattern for the topic names that apply. + TopicPattern *string `locationName:"topicPattern" type:"string"` +} + +// String returns the string representation +func (s TopicRuleListItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicRuleListItem) GoString() string { + return s.String() +} + +// Describes a rule. +type TopicRulePayload struct { + _ struct{} `type:"structure"` + + // The actions associated with the rule. + Actions []*Action `locationName:"actions" type:"list" required:"true"` + + // The description of the rule. + Description *string `locationName:"description" type:"string"` + + // Specifies whether the rule is disabled. + RuleDisabled *bool `locationName:"ruleDisabled" type:"boolean"` + + // The SQL statement used to query the topic. For more information, see AWS + // IoT SQL Reference (http://docs.aws.amazon.com/iot/latest/developerguide/iot-rules.html#aws-iot-sql-reference) + // in the AWS IoT Developer Guide. + Sql *string `locationName:"sql" type:"string" required:"true"` +} + +// String returns the string representation +func (s TopicRulePayload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicRulePayload) GoString() string { + return s.String() +} + +// The input for the TransferCertificate operation. +type TransferCertificateInput struct { + _ struct{} `type:"structure"` + + // The ID of the certificate. + CertificateId *string `location:"uri" locationName:"certificateId" min:"64" type:"string" required:"true"` + + // The AWS account. + TargetAwsAccount *string `location:"querystring" locationName:"targetAwsAccount" type:"string" required:"true"` +} + +// String returns the string representation +func (s TransferCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransferCertificateInput) GoString() string { + return s.String() +} + +// The output from the TransferCertificate operation. +type TransferCertificateOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the certificate. + TransferredCertificateArn *string `locationName:"transferredCertificateArn" type:"string"` +} + +// String returns the string representation +func (s TransferCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransferCertificateOutput) GoString() string { + return s.String() +} + +// The input for the UpdateCertificate operation. +type UpdateCertificateInput struct { + _ struct{} `type:"structure"` + + // The ID of the certificate. + CertificateId *string `location:"uri" locationName:"certificateId" min:"64" type:"string" required:"true"` + + // The new status. + // + // Note: setting the status to PENDING_TRANSFER will result in an exception + // being thrown. PENDING_TRANSFER is a status used internally by AWS IoT and + // is not meant to be used by developers. + NewStatus *string `location:"querystring" locationName:"newStatus" type:"string" required:"true" enum:"CertificateStatus"` +} + +// String returns the string representation +func (s UpdateCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateCertificateInput) GoString() string { + return s.String() +} + +type UpdateCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateCertificateOutput) GoString() string { + return s.String() +} + +// The input for the UpdateThing operation. +type UpdateThingInput struct { + _ struct{} `type:"structure"` + + // The attribute payload, a JSON string containing up to three key-value pairs. + // + // For example: {\"attributes\":{\"string1\":\"string2\"}} + AttributePayload *AttributePayload `locationName:"attributePayload" type:"structure" required:"true"` + + // The thing name. + ThingName *string `location:"uri" locationName:"thingName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateThingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateThingInput) GoString() string { + return s.String() +} + +// The output from the UpdateThing operation. +type UpdateThingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateThingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateThingOutput) GoString() string { + return s.String() +} + +const ( + // @enum CertificateStatus + CertificateStatusActive = "ACTIVE" + // @enum CertificateStatus + CertificateStatusInactive = "INACTIVE" + // @enum CertificateStatus + CertificateStatusRevoked = "REVOKED" + // @enum CertificateStatus + CertificateStatusPendingTransfer = "PENDING_TRANSFER" +) + +const ( + // @enum LogLevel + LogLevelDebug = "DEBUG" + // @enum LogLevel + LogLevelInfo = "INFO" + // @enum LogLevel + LogLevelError = "ERROR" + // @enum LogLevel + LogLevelWarn = "WARN" + // @enum LogLevel + LogLevelDisabled = "DISABLED" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iot/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iot/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iot/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iot/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,933 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package iot_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/iot" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleIoT_AcceptCertificateTransfer() { + svc := iot.New(session.New()) + + params := &iot.AcceptCertificateTransferInput{ + CertificateId: aws.String("CertificateId"), // Required + SetAsActive: aws.Bool(true), + } + resp, err := svc.AcceptCertificateTransfer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_AttachPrincipalPolicy() { + svc := iot.New(session.New()) + + params := &iot.AttachPrincipalPolicyInput{ + PolicyName: aws.String("PolicyName"), // Required + Principal: aws.String("Principal"), // Required + } + resp, err := svc.AttachPrincipalPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_AttachThingPrincipal() { + svc := iot.New(session.New()) + + params := &iot.AttachThingPrincipalInput{ + Principal: aws.String("Principal"), // Required + ThingName: aws.String("ThingName"), // Required + } + resp, err := svc.AttachThingPrincipal(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_CancelCertificateTransfer() { + svc := iot.New(session.New()) + + params := &iot.CancelCertificateTransferInput{ + CertificateId: aws.String("CertificateId"), // Required + } + resp, err := svc.CancelCertificateTransfer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_CreateCertificateFromCsr() { + svc := iot.New(session.New()) + + params := &iot.CreateCertificateFromCsrInput{ + CertificateSigningRequest: aws.String("CertificateSigningRequest"), // Required + SetAsActive: aws.Bool(true), + } + resp, err := svc.CreateCertificateFromCsr(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_CreateKeysAndCertificate() { + svc := iot.New(session.New()) + + params := &iot.CreateKeysAndCertificateInput{ + SetAsActive: aws.Bool(true), + } + resp, err := svc.CreateKeysAndCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_CreatePolicy() { + svc := iot.New(session.New()) + + params := &iot.CreatePolicyInput{ + PolicyDocument: aws.String("PolicyDocument"), // Required + PolicyName: aws.String("PolicyName"), // Required + } + resp, err := svc.CreatePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_CreatePolicyVersion() { + svc := iot.New(session.New()) + + params := &iot.CreatePolicyVersionInput{ + PolicyDocument: aws.String("PolicyDocument"), // Required + PolicyName: aws.String("PolicyName"), // Required + SetAsDefault: aws.Bool(true), + } + resp, err := svc.CreatePolicyVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_CreateThing() { + svc := iot.New(session.New()) + + params := &iot.CreateThingInput{ + ThingName: aws.String("ThingName"), // Required + AttributePayload: &iot.AttributePayload{ + Attributes: map[string]*string{ + "Key": aws.String("AttributeValue"), // Required + // More values... + }, + }, + } + resp, err := svc.CreateThing(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_CreateTopicRule() { + svc := iot.New(session.New()) + + params := &iot.CreateTopicRuleInput{ + RuleName: aws.String("RuleName"), // Required + TopicRulePayload: &iot.TopicRulePayload{ // Required + Actions: []*iot.Action{ // Required + { // Required + DynamoDB: &iot.DynamoDBAction{ + HashKeyField: aws.String("HashKeyField"), // Required + HashKeyValue: aws.String("HashKeyValue"), // Required + RangeKeyField: aws.String("RangeKeyField"), // Required + RangeKeyValue: aws.String("RangeKeyValue"), // Required + RoleArn: aws.String("AwsArn"), // Required + TableName: aws.String("TableName"), // Required + PayloadField: aws.String("PayloadField"), + }, + Firehose: &iot.FirehoseAction{ + DeliveryStreamName: aws.String("DeliveryStreamName"), // Required + RoleArn: aws.String("AwsArn"), // Required + }, + Kinesis: &iot.KinesisAction{ + RoleArn: aws.String("AwsArn"), // Required + StreamName: aws.String("StreamName"), // Required + PartitionKey: aws.String("PartitionKey"), + }, + Lambda: &iot.LambdaAction{ + FunctionArn: aws.String("FunctionArn"), // Required + }, + Republish: &iot.RepublishAction{ + RoleArn: aws.String("AwsArn"), // Required + Topic: aws.String("TopicPattern"), // Required + }, + S3: &iot.S3Action{ + BucketName: aws.String("BucketName"), // Required + Key: aws.String("Key"), // Required + RoleArn: aws.String("AwsArn"), // Required + }, + Sns: &iot.SnsAction{ + RoleArn: aws.String("AwsArn"), // Required + TargetArn: aws.String("AwsArn"), // Required + }, + Sqs: &iot.SqsAction{ + QueueUrl: aws.String("QueueUrl"), // Required + RoleArn: aws.String("AwsArn"), // Required + UseBase64: aws.Bool(true), + }, + }, + // More values... + }, + Sql: aws.String("SQL"), // Required + Description: aws.String("Description"), + RuleDisabled: aws.Bool(true), + }, + } + resp, err := svc.CreateTopicRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_DeleteCertificate() { + svc := iot.New(session.New()) + + params := &iot.DeleteCertificateInput{ + CertificateId: aws.String("CertificateId"), // Required + } + resp, err := svc.DeleteCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_DeletePolicy() { + svc := iot.New(session.New()) + + params := &iot.DeletePolicyInput{ + PolicyName: aws.String("PolicyName"), // Required + } + resp, err := svc.DeletePolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_DeletePolicyVersion() { + svc := iot.New(session.New()) + + params := &iot.DeletePolicyVersionInput{ + PolicyName: aws.String("PolicyName"), // Required + PolicyVersionId: aws.String("PolicyVersionId"), // Required + } + resp, err := svc.DeletePolicyVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_DeleteThing() { + svc := iot.New(session.New()) + + params := &iot.DeleteThingInput{ + ThingName: aws.String("ThingName"), // Required + } + resp, err := svc.DeleteThing(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_DeleteTopicRule() { + svc := iot.New(session.New()) + + params := &iot.DeleteTopicRuleInput{ + RuleName: aws.String("RuleName"), // Required + } + resp, err := svc.DeleteTopicRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_DescribeCertificate() { + svc := iot.New(session.New()) + + params := &iot.DescribeCertificateInput{ + CertificateId: aws.String("CertificateId"), // Required + } + resp, err := svc.DescribeCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_DescribeEndpoint() { + svc := iot.New(session.New()) + + var params *iot.DescribeEndpointInput + resp, err := svc.DescribeEndpoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_DescribeThing() { + svc := iot.New(session.New()) + + params := &iot.DescribeThingInput{ + ThingName: aws.String("ThingName"), // Required + } + resp, err := svc.DescribeThing(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_DetachPrincipalPolicy() { + svc := iot.New(session.New()) + + params := &iot.DetachPrincipalPolicyInput{ + PolicyName: aws.String("PolicyName"), // Required + Principal: aws.String("Principal"), // Required + } + resp, err := svc.DetachPrincipalPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_DetachThingPrincipal() { + svc := iot.New(session.New()) + + params := &iot.DetachThingPrincipalInput{ + Principal: aws.String("Principal"), // Required + ThingName: aws.String("ThingName"), // Required + } + resp, err := svc.DetachThingPrincipal(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_DisableTopicRule() { + svc := iot.New(session.New()) + + params := &iot.DisableTopicRuleInput{ + RuleName: aws.String("RuleName"), // Required + } + resp, err := svc.DisableTopicRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_EnableTopicRule() { + svc := iot.New(session.New()) + + params := &iot.EnableTopicRuleInput{ + RuleName: aws.String("RuleName"), // Required + } + resp, err := svc.EnableTopicRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_GetLoggingOptions() { + svc := iot.New(session.New()) + + var params *iot.GetLoggingOptionsInput + resp, err := svc.GetLoggingOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_GetPolicy() { + svc := iot.New(session.New()) + + params := &iot.GetPolicyInput{ + PolicyName: aws.String("PolicyName"), // Required + } + resp, err := svc.GetPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_GetPolicyVersion() { + svc := iot.New(session.New()) + + params := &iot.GetPolicyVersionInput{ + PolicyName: aws.String("PolicyName"), // Required + PolicyVersionId: aws.String("PolicyVersionId"), // Required + } + resp, err := svc.GetPolicyVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_GetTopicRule() { + svc := iot.New(session.New()) + + params := &iot.GetTopicRuleInput{ + RuleName: aws.String("RuleName"), // Required + } + resp, err := svc.GetTopicRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_ListCertificates() { + svc := iot.New(session.New()) + + params := &iot.ListCertificatesInput{ + AscendingOrder: aws.Bool(true), + Marker: aws.String("Marker"), + PageSize: aws.Int64(1), + } + resp, err := svc.ListCertificates(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_ListPolicies() { + svc := iot.New(session.New()) + + params := &iot.ListPoliciesInput{ + AscendingOrder: aws.Bool(true), + Marker: aws.String("Marker"), + PageSize: aws.Int64(1), + } + resp, err := svc.ListPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_ListPolicyVersions() { + svc := iot.New(session.New()) + + params := &iot.ListPolicyVersionsInput{ + PolicyName: aws.String("PolicyName"), // Required + } + resp, err := svc.ListPolicyVersions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_ListPrincipalPolicies() { + svc := iot.New(session.New()) + + params := &iot.ListPrincipalPoliciesInput{ + Principal: aws.String("Principal"), // Required + AscendingOrder: aws.Bool(true), + Marker: aws.String("Marker"), + PageSize: aws.Int64(1), + } + resp, err := svc.ListPrincipalPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_ListPrincipalThings() { + svc := iot.New(session.New()) + + params := &iot.ListPrincipalThingsInput{ + Principal: aws.String("Principal"), // Required + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListPrincipalThings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_ListThingPrincipals() { + svc := iot.New(session.New()) + + params := &iot.ListThingPrincipalsInput{ + ThingName: aws.String("ThingName"), // Required + } + resp, err := svc.ListThingPrincipals(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_ListThings() { + svc := iot.New(session.New()) + + params := &iot.ListThingsInput{ + AttributeName: aws.String("AttributeName"), + AttributeValue: aws.String("AttributeValue"), + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListThings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_ListTopicRules() { + svc := iot.New(session.New()) + + params := &iot.ListTopicRulesInput{ + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + RuleDisabled: aws.Bool(true), + Topic: aws.String("Topic"), + } + resp, err := svc.ListTopicRules(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_RejectCertificateTransfer() { + svc := iot.New(session.New()) + + params := &iot.RejectCertificateTransferInput{ + CertificateId: aws.String("CertificateId"), // Required + } + resp, err := svc.RejectCertificateTransfer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_ReplaceTopicRule() { + svc := iot.New(session.New()) + + params := &iot.ReplaceTopicRuleInput{ + RuleName: aws.String("RuleName"), // Required + TopicRulePayload: &iot.TopicRulePayload{ // Required + Actions: []*iot.Action{ // Required + { // Required + DynamoDB: &iot.DynamoDBAction{ + HashKeyField: aws.String("HashKeyField"), // Required + HashKeyValue: aws.String("HashKeyValue"), // Required + RangeKeyField: aws.String("RangeKeyField"), // Required + RangeKeyValue: aws.String("RangeKeyValue"), // Required + RoleArn: aws.String("AwsArn"), // Required + TableName: aws.String("TableName"), // Required + PayloadField: aws.String("PayloadField"), + }, + Firehose: &iot.FirehoseAction{ + DeliveryStreamName: aws.String("DeliveryStreamName"), // Required + RoleArn: aws.String("AwsArn"), // Required + }, + Kinesis: &iot.KinesisAction{ + RoleArn: aws.String("AwsArn"), // Required + StreamName: aws.String("StreamName"), // Required + PartitionKey: aws.String("PartitionKey"), + }, + Lambda: &iot.LambdaAction{ + FunctionArn: aws.String("FunctionArn"), // Required + }, + Republish: &iot.RepublishAction{ + RoleArn: aws.String("AwsArn"), // Required + Topic: aws.String("TopicPattern"), // Required + }, + S3: &iot.S3Action{ + BucketName: aws.String("BucketName"), // Required + Key: aws.String("Key"), // Required + RoleArn: aws.String("AwsArn"), // Required + }, + Sns: &iot.SnsAction{ + RoleArn: aws.String("AwsArn"), // Required + TargetArn: aws.String("AwsArn"), // Required + }, + Sqs: &iot.SqsAction{ + QueueUrl: aws.String("QueueUrl"), // Required + RoleArn: aws.String("AwsArn"), // Required + UseBase64: aws.Bool(true), + }, + }, + // More values... + }, + Sql: aws.String("SQL"), // Required + Description: aws.String("Description"), + RuleDisabled: aws.Bool(true), + }, + } + resp, err := svc.ReplaceTopicRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_SetDefaultPolicyVersion() { + svc := iot.New(session.New()) + + params := &iot.SetDefaultPolicyVersionInput{ + PolicyName: aws.String("PolicyName"), // Required + PolicyVersionId: aws.String("PolicyVersionId"), // Required + } + resp, err := svc.SetDefaultPolicyVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_SetLoggingOptions() { + svc := iot.New(session.New()) + + params := &iot.SetLoggingOptionsInput{ + LoggingOptionsPayload: &iot.LoggingOptionsPayload{ // Required + RoleArn: aws.String("AwsArn"), // Required + LogLevel: aws.String("LogLevel"), + }, + } + resp, err := svc.SetLoggingOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_TransferCertificate() { + svc := iot.New(session.New()) + + params := &iot.TransferCertificateInput{ + CertificateId: aws.String("CertificateId"), // Required + TargetAwsAccount: aws.String("AwsAccountId"), // Required + } + resp, err := svc.TransferCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_UpdateCertificate() { + svc := iot.New(session.New()) + + params := &iot.UpdateCertificateInput{ + CertificateId: aws.String("CertificateId"), // Required + NewStatus: aws.String("CertificateStatus"), // Required + } + resp, err := svc.UpdateCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoT_UpdateThing() { + svc := iot.New(session.New()) + + params := &iot.UpdateThingInput{ + AttributePayload: &iot.AttributePayload{ // Required + Attributes: map[string]*string{ + "Key": aws.String("AttributeValue"), // Required + // More values... + }, + }, + ThingName: aws.String("ThingName"), // Required + } + resp, err := svc.UpdateThing(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iot/iotiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iot/iotiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iot/iotiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iot/iotiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,178 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package iotiface provides an interface for the AWS IoT. +package iotiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/iot" +) + +// IoTAPI is the interface type for iot.IoT. +type IoTAPI interface { + AcceptCertificateTransferRequest(*iot.AcceptCertificateTransferInput) (*request.Request, *iot.AcceptCertificateTransferOutput) + + AcceptCertificateTransfer(*iot.AcceptCertificateTransferInput) (*iot.AcceptCertificateTransferOutput, error) + + AttachPrincipalPolicyRequest(*iot.AttachPrincipalPolicyInput) (*request.Request, *iot.AttachPrincipalPolicyOutput) + + AttachPrincipalPolicy(*iot.AttachPrincipalPolicyInput) (*iot.AttachPrincipalPolicyOutput, error) + + AttachThingPrincipalRequest(*iot.AttachThingPrincipalInput) (*request.Request, *iot.AttachThingPrincipalOutput) + + AttachThingPrincipal(*iot.AttachThingPrincipalInput) (*iot.AttachThingPrincipalOutput, error) + + CancelCertificateTransferRequest(*iot.CancelCertificateTransferInput) (*request.Request, *iot.CancelCertificateTransferOutput) + + CancelCertificateTransfer(*iot.CancelCertificateTransferInput) (*iot.CancelCertificateTransferOutput, error) + + CreateCertificateFromCsrRequest(*iot.CreateCertificateFromCsrInput) (*request.Request, *iot.CreateCertificateFromCsrOutput) + + CreateCertificateFromCsr(*iot.CreateCertificateFromCsrInput) (*iot.CreateCertificateFromCsrOutput, error) + + CreateKeysAndCertificateRequest(*iot.CreateKeysAndCertificateInput) (*request.Request, *iot.CreateKeysAndCertificateOutput) + + CreateKeysAndCertificate(*iot.CreateKeysAndCertificateInput) (*iot.CreateKeysAndCertificateOutput, error) + + CreatePolicyRequest(*iot.CreatePolicyInput) (*request.Request, *iot.CreatePolicyOutput) + + CreatePolicy(*iot.CreatePolicyInput) (*iot.CreatePolicyOutput, error) + + CreatePolicyVersionRequest(*iot.CreatePolicyVersionInput) (*request.Request, *iot.CreatePolicyVersionOutput) + + CreatePolicyVersion(*iot.CreatePolicyVersionInput) (*iot.CreatePolicyVersionOutput, error) + + CreateThingRequest(*iot.CreateThingInput) (*request.Request, *iot.CreateThingOutput) + + CreateThing(*iot.CreateThingInput) (*iot.CreateThingOutput, error) + + CreateTopicRuleRequest(*iot.CreateTopicRuleInput) (*request.Request, *iot.CreateTopicRuleOutput) + + CreateTopicRule(*iot.CreateTopicRuleInput) (*iot.CreateTopicRuleOutput, error) + + DeleteCertificateRequest(*iot.DeleteCertificateInput) (*request.Request, *iot.DeleteCertificateOutput) + + DeleteCertificate(*iot.DeleteCertificateInput) (*iot.DeleteCertificateOutput, error) + + DeletePolicyRequest(*iot.DeletePolicyInput) (*request.Request, *iot.DeletePolicyOutput) + + DeletePolicy(*iot.DeletePolicyInput) (*iot.DeletePolicyOutput, error) + + DeletePolicyVersionRequest(*iot.DeletePolicyVersionInput) (*request.Request, *iot.DeletePolicyVersionOutput) + + DeletePolicyVersion(*iot.DeletePolicyVersionInput) (*iot.DeletePolicyVersionOutput, error) + + DeleteThingRequest(*iot.DeleteThingInput) (*request.Request, *iot.DeleteThingOutput) + + DeleteThing(*iot.DeleteThingInput) (*iot.DeleteThingOutput, error) + + DeleteTopicRuleRequest(*iot.DeleteTopicRuleInput) (*request.Request, *iot.DeleteTopicRuleOutput) + + DeleteTopicRule(*iot.DeleteTopicRuleInput) (*iot.DeleteTopicRuleOutput, error) + + DescribeCertificateRequest(*iot.DescribeCertificateInput) (*request.Request, *iot.DescribeCertificateOutput) + + DescribeCertificate(*iot.DescribeCertificateInput) (*iot.DescribeCertificateOutput, error) + + DescribeEndpointRequest(*iot.DescribeEndpointInput) (*request.Request, *iot.DescribeEndpointOutput) + + DescribeEndpoint(*iot.DescribeEndpointInput) (*iot.DescribeEndpointOutput, error) + + DescribeThingRequest(*iot.DescribeThingInput) (*request.Request, *iot.DescribeThingOutput) + + DescribeThing(*iot.DescribeThingInput) (*iot.DescribeThingOutput, error) + + DetachPrincipalPolicyRequest(*iot.DetachPrincipalPolicyInput) (*request.Request, *iot.DetachPrincipalPolicyOutput) + + DetachPrincipalPolicy(*iot.DetachPrincipalPolicyInput) (*iot.DetachPrincipalPolicyOutput, error) + + DetachThingPrincipalRequest(*iot.DetachThingPrincipalInput) (*request.Request, *iot.DetachThingPrincipalOutput) + + DetachThingPrincipal(*iot.DetachThingPrincipalInput) (*iot.DetachThingPrincipalOutput, error) + + DisableTopicRuleRequest(*iot.DisableTopicRuleInput) (*request.Request, *iot.DisableTopicRuleOutput) + + DisableTopicRule(*iot.DisableTopicRuleInput) (*iot.DisableTopicRuleOutput, error) + + EnableTopicRuleRequest(*iot.EnableTopicRuleInput) (*request.Request, *iot.EnableTopicRuleOutput) + + EnableTopicRule(*iot.EnableTopicRuleInput) (*iot.EnableTopicRuleOutput, error) + + GetLoggingOptionsRequest(*iot.GetLoggingOptionsInput) (*request.Request, *iot.GetLoggingOptionsOutput) + + GetLoggingOptions(*iot.GetLoggingOptionsInput) (*iot.GetLoggingOptionsOutput, error) + + GetPolicyRequest(*iot.GetPolicyInput) (*request.Request, *iot.GetPolicyOutput) + + GetPolicy(*iot.GetPolicyInput) (*iot.GetPolicyOutput, error) + + GetPolicyVersionRequest(*iot.GetPolicyVersionInput) (*request.Request, *iot.GetPolicyVersionOutput) + + GetPolicyVersion(*iot.GetPolicyVersionInput) (*iot.GetPolicyVersionOutput, error) + + GetTopicRuleRequest(*iot.GetTopicRuleInput) (*request.Request, *iot.GetTopicRuleOutput) + + GetTopicRule(*iot.GetTopicRuleInput) (*iot.GetTopicRuleOutput, error) + + ListCertificatesRequest(*iot.ListCertificatesInput) (*request.Request, *iot.ListCertificatesOutput) + + ListCertificates(*iot.ListCertificatesInput) (*iot.ListCertificatesOutput, error) + + ListPoliciesRequest(*iot.ListPoliciesInput) (*request.Request, *iot.ListPoliciesOutput) + + ListPolicies(*iot.ListPoliciesInput) (*iot.ListPoliciesOutput, error) + + ListPolicyVersionsRequest(*iot.ListPolicyVersionsInput) (*request.Request, *iot.ListPolicyVersionsOutput) + + ListPolicyVersions(*iot.ListPolicyVersionsInput) (*iot.ListPolicyVersionsOutput, error) + + ListPrincipalPoliciesRequest(*iot.ListPrincipalPoliciesInput) (*request.Request, *iot.ListPrincipalPoliciesOutput) + + ListPrincipalPolicies(*iot.ListPrincipalPoliciesInput) (*iot.ListPrincipalPoliciesOutput, error) + + ListPrincipalThingsRequest(*iot.ListPrincipalThingsInput) (*request.Request, *iot.ListPrincipalThingsOutput) + + ListPrincipalThings(*iot.ListPrincipalThingsInput) (*iot.ListPrincipalThingsOutput, error) + + ListThingPrincipalsRequest(*iot.ListThingPrincipalsInput) (*request.Request, *iot.ListThingPrincipalsOutput) + + ListThingPrincipals(*iot.ListThingPrincipalsInput) (*iot.ListThingPrincipalsOutput, error) + + ListThingsRequest(*iot.ListThingsInput) (*request.Request, *iot.ListThingsOutput) + + ListThings(*iot.ListThingsInput) (*iot.ListThingsOutput, error) + + ListTopicRulesRequest(*iot.ListTopicRulesInput) (*request.Request, *iot.ListTopicRulesOutput) + + ListTopicRules(*iot.ListTopicRulesInput) (*iot.ListTopicRulesOutput, error) + + RejectCertificateTransferRequest(*iot.RejectCertificateTransferInput) (*request.Request, *iot.RejectCertificateTransferOutput) + + RejectCertificateTransfer(*iot.RejectCertificateTransferInput) (*iot.RejectCertificateTransferOutput, error) + + ReplaceTopicRuleRequest(*iot.ReplaceTopicRuleInput) (*request.Request, *iot.ReplaceTopicRuleOutput) + + ReplaceTopicRule(*iot.ReplaceTopicRuleInput) (*iot.ReplaceTopicRuleOutput, error) + + SetDefaultPolicyVersionRequest(*iot.SetDefaultPolicyVersionInput) (*request.Request, *iot.SetDefaultPolicyVersionOutput) + + SetDefaultPolicyVersion(*iot.SetDefaultPolicyVersionInput) (*iot.SetDefaultPolicyVersionOutput, error) + + SetLoggingOptionsRequest(*iot.SetLoggingOptionsInput) (*request.Request, *iot.SetLoggingOptionsOutput) + + SetLoggingOptions(*iot.SetLoggingOptionsInput) (*iot.SetLoggingOptionsOutput, error) + + TransferCertificateRequest(*iot.TransferCertificateInput) (*request.Request, *iot.TransferCertificateOutput) + + TransferCertificate(*iot.TransferCertificateInput) (*iot.TransferCertificateOutput, error) + + UpdateCertificateRequest(*iot.UpdateCertificateInput) (*request.Request, *iot.UpdateCertificateOutput) + + UpdateCertificate(*iot.UpdateCertificateInput) (*iot.UpdateCertificateOutput, error) + + UpdateThingRequest(*iot.UpdateThingInput) (*request.Request, *iot.UpdateThingOutput) + + UpdateThing(*iot.UpdateThingInput) (*iot.UpdateThingOutput, error) +} + +var _ IoTAPI = (*iot.IoT)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iot/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iot/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iot/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iot/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,94 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package iot + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restjson" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// AWS IoT provides secure, bi-directional communication between Internet-connected +// things (such as sensors, actuators, embedded devices, or smart appliances) +// and the AWS cloud. You can discover your custom IoT-Data endpoint to communicate +// with, configure rules for data processing and integration with other services, +// organize resources associated with each thing (Thing Registry), configure +// logging, and create and manage policies and credentials to authenticate things. +// +// For more information about how AWS IoT works, see the Developer Guide (http://docs.aws.amazon.com/iot/latest/developerguide/aws-iot-how-it-works.html). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type IoT struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "iot" + +// New creates a new instance of the IoT client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a IoT client from just a session. +// svc := iot.New(mySession) +// +// // Create a IoT client with additional configuration +// svc := iot.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *IoT { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *IoT { + svc := &IoT{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: "execute-api", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-05-28", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a IoT operation and runs any +// custom request initialization. +func (c *IoT) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iotdataplane/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iotdataplane/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iotdataplane/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iotdataplane/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,282 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package iotdataplane provides a client for AWS IoT Data Plane. +package iotdataplane + +import ( + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opDeleteThingShadow = "DeleteThingShadow" + +// DeleteThingShadowRequest generates a request for the DeleteThingShadow operation. +func (c *IoTDataPlane) DeleteThingShadowRequest(input *DeleteThingShadowInput) (req *request.Request, output *DeleteThingShadowOutput) { + op := &request.Operation{ + Name: opDeleteThingShadow, + HTTPMethod: "DELETE", + HTTPPath: "/things/{thingName}/shadow", + } + + if input == nil { + input = &DeleteThingShadowInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteThingShadowOutput{} + req.Data = output + return +} + +// Deletes the thing shadow for the specified thing. +// +// For more information, see DeleteThingShadow (http://docs.aws.amazon.com/iot/latest/developerguide/API_DeleteThingShadow.html) +// in the AWS IoT Developer Guide. +func (c *IoTDataPlane) DeleteThingShadow(input *DeleteThingShadowInput) (*DeleteThingShadowOutput, error) { + req, out := c.DeleteThingShadowRequest(input) + err := req.Send() + return out, err +} + +const opGetThingShadow = "GetThingShadow" + +// GetThingShadowRequest generates a request for the GetThingShadow operation. +func (c *IoTDataPlane) GetThingShadowRequest(input *GetThingShadowInput) (req *request.Request, output *GetThingShadowOutput) { + op := &request.Operation{ + Name: opGetThingShadow, + HTTPMethod: "GET", + HTTPPath: "/things/{thingName}/shadow", + } + + if input == nil { + input = &GetThingShadowInput{} + } + + req = c.newRequest(op, input, output) + output = &GetThingShadowOutput{} + req.Data = output + return +} + +// Gets the thing shadow for the specified thing. +// +// For more information, see GetThingShadow (http://docs.aws.amazon.com/iot/latest/developerguide/API_GetThingShadow.html) +// in the AWS IoT Developer Guide. +func (c *IoTDataPlane) GetThingShadow(input *GetThingShadowInput) (*GetThingShadowOutput, error) { + req, out := c.GetThingShadowRequest(input) + err := req.Send() + return out, err +} + +const opPublish = "Publish" + +// PublishRequest generates a request for the Publish operation. +func (c *IoTDataPlane) PublishRequest(input *PublishInput) (req *request.Request, output *PublishOutput) { + op := &request.Operation{ + Name: opPublish, + HTTPMethod: "POST", + HTTPPath: "/topics/{topic}", + } + + if input == nil { + input = &PublishInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PublishOutput{} + req.Data = output + return +} + +// Publishes state information. +// +// For more information, see HTTP Protocol (http://docs.aws.amazon.com/iot/latest/developerguide/protocols.html#http) +// in the AWS IoT Developer Guide. +func (c *IoTDataPlane) Publish(input *PublishInput) (*PublishOutput, error) { + req, out := c.PublishRequest(input) + err := req.Send() + return out, err +} + +const opUpdateThingShadow = "UpdateThingShadow" + +// UpdateThingShadowRequest generates a request for the UpdateThingShadow operation. +func (c *IoTDataPlane) UpdateThingShadowRequest(input *UpdateThingShadowInput) (req *request.Request, output *UpdateThingShadowOutput) { + op := &request.Operation{ + Name: opUpdateThingShadow, + HTTPMethod: "POST", + HTTPPath: "/things/{thingName}/shadow", + } + + if input == nil { + input = &UpdateThingShadowInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateThingShadowOutput{} + req.Data = output + return +} + +// Updates the thing shadow for the specified thing. +// +// For more information, see UpdateThingShadow (http://docs.aws.amazon.com/iot/latest/developerguide/API_UpdateThingShadow.html) +// in the AWS IoT Developer Guide. +func (c *IoTDataPlane) UpdateThingShadow(input *UpdateThingShadowInput) (*UpdateThingShadowOutput, error) { + req, out := c.UpdateThingShadowRequest(input) + err := req.Send() + return out, err +} + +// The input for the DeleteThingShadow operation. +type DeleteThingShadowInput struct { + _ struct{} `type:"structure"` + + // The name of the thing. + ThingName *string `location:"uri" locationName:"thingName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteThingShadowInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteThingShadowInput) GoString() string { + return s.String() +} + +// The output from the DeleteThingShadow operation. +type DeleteThingShadowOutput struct { + _ struct{} `type:"structure" payload:"Payload"` + + // The state information, in JSON format. + Payload []byte `locationName:"payload" type:"blob" required:"true"` +} + +// String returns the string representation +func (s DeleteThingShadowOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteThingShadowOutput) GoString() string { + return s.String() +} + +// The input for the GetThingShadow operation. +type GetThingShadowInput struct { + _ struct{} `type:"structure"` + + // The name of the thing. + ThingName *string `location:"uri" locationName:"thingName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetThingShadowInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetThingShadowInput) GoString() string { + return s.String() +} + +// The output from the GetThingShadow operation. +type GetThingShadowOutput struct { + _ struct{} `type:"structure" payload:"Payload"` + + // The state information, in JSON format. + Payload []byte `locationName:"payload" type:"blob"` +} + +// String returns the string representation +func (s GetThingShadowOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetThingShadowOutput) GoString() string { + return s.String() +} + +// The input for the Publish operation. +type PublishInput struct { + _ struct{} `type:"structure" payload:"Payload"` + + // The state information, in JSON format. + Payload []byte `locationName:"payload" type:"blob"` + + // The Quality of Service (QoS) level. + Qos *int64 `location:"querystring" locationName:"qos" type:"integer"` + + // The name of the MQTT topic. + Topic *string `location:"uri" locationName:"topic" type:"string" required:"true"` +} + +// String returns the string representation +func (s PublishInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublishInput) GoString() string { + return s.String() +} + +type PublishOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PublishOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublishOutput) GoString() string { + return s.String() +} + +// The input for the UpdateThingShadow operation. +type UpdateThingShadowInput struct { + _ struct{} `type:"structure" payload:"Payload"` + + // The state information, in JSON format. + Payload []byte `locationName:"payload" type:"blob" required:"true"` + + // The name of the thing. + ThingName *string `location:"uri" locationName:"thingName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateThingShadowInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateThingShadowInput) GoString() string { + return s.String() +} + +// The output from the UpdateThingShadow operation. +type UpdateThingShadowOutput struct { + _ struct{} `type:"structure" payload:"Payload"` + + // The state information, in JSON format. + Payload []byte `locationName:"payload" type:"blob"` +} + +// String returns the string representation +func (s UpdateThingShadowOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateThingShadowOutput) GoString() string { + return s.String() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iotdataplane/customizations_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iotdataplane/customizations_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iotdataplane/customizations_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iotdataplane/customizations_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,52 @@ +package iotdataplane_test + +import ( + "fmt" + "github.com/stretchr/testify/assert" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/iotdataplane" +) + +func TestRequireEndpointIfRegionProvided(t *testing.T) { + svc := iotdataplane.New(unit.Session, &aws.Config{ + Region: aws.String("mock-region"), + DisableParamValidation: aws.Bool(true), + }) + req, _ := svc.GetThingShadowRequest(nil) + err := req.Build() + + assert.Equal(t, "", svc.Endpoint) + assert.Error(t, err) + assert.Equal(t, aws.ErrMissingEndpoint, err) +} + +func TestRequireEndpointIfNoRegionProvided(t *testing.T) { + svc := iotdataplane.New(unit.Session, &aws.Config{ + Region: aws.String(""), + DisableParamValidation: aws.Bool(true), + }) + fmt.Println(svc.ClientInfo.SigningRegion) + + req, _ := svc.GetThingShadowRequest(nil) + err := req.Build() + + assert.Equal(t, "", svc.Endpoint) + assert.Error(t, err) + assert.Equal(t, aws.ErrMissingEndpoint, err) +} + +func TestRequireEndpointUsed(t *testing.T) { + svc := iotdataplane.New(unit.Session, &aws.Config{ + Region: aws.String("mock-region"), + DisableParamValidation: aws.Bool(true), + Endpoint: aws.String("https://endpoint"), + }) + req, _ := svc.GetThingShadowRequest(nil) + err := req.Build() + + assert.Equal(t, "https://endpoint", svc.Endpoint) + assert.NoError(t, err) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iotdataplane/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iotdataplane/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iotdataplane/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iotdataplane/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,95 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package iotdataplane_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/iotdataplane" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleIoTDataPlane_DeleteThingShadow() { + svc := iotdataplane.New(session.New()) + + params := &iotdataplane.DeleteThingShadowInput{ + ThingName: aws.String("ThingName"), // Required + } + resp, err := svc.DeleteThingShadow(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoTDataPlane_GetThingShadow() { + svc := iotdataplane.New(session.New()) + + params := &iotdataplane.GetThingShadowInput{ + ThingName: aws.String("ThingName"), // Required + } + resp, err := svc.GetThingShadow(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoTDataPlane_Publish() { + svc := iotdataplane.New(session.New()) + + params := &iotdataplane.PublishInput{ + Topic: aws.String("Topic"), // Required + Payload: []byte("PAYLOAD"), + Qos: aws.Int64(1), + } + resp, err := svc.Publish(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleIoTDataPlane_UpdateThingShadow() { + svc := iotdataplane.New(session.New()) + + params := &iotdataplane.UpdateThingShadowInput{ + Payload: []byte("PAYLOAD"), // Required + ThingName: aws.String("ThingName"), // Required + } + resp, err := svc.UpdateThingShadow(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iotdataplane/iotdataplaneiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iotdataplane/iotdataplaneiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iotdataplane/iotdataplaneiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iotdataplane/iotdataplaneiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,30 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package iotdataplaneiface provides an interface for the AWS IoT Data Plane. +package iotdataplaneiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/iotdataplane" +) + +// IoTDataPlaneAPI is the interface type for iotdataplane.IoTDataPlane. +type IoTDataPlaneAPI interface { + DeleteThingShadowRequest(*iotdataplane.DeleteThingShadowInput) (*request.Request, *iotdataplane.DeleteThingShadowOutput) + + DeleteThingShadow(*iotdataplane.DeleteThingShadowInput) (*iotdataplane.DeleteThingShadowOutput, error) + + GetThingShadowRequest(*iotdataplane.GetThingShadowInput) (*request.Request, *iotdataplane.GetThingShadowOutput) + + GetThingShadow(*iotdataplane.GetThingShadowInput) (*iotdataplane.GetThingShadowOutput, error) + + PublishRequest(*iotdataplane.PublishInput) (*request.Request, *iotdataplane.PublishOutput) + + Publish(*iotdataplane.PublishInput) (*iotdataplane.PublishOutput, error) + + UpdateThingShadowRequest(*iotdataplane.UpdateThingShadowInput) (*request.Request, *iotdataplane.UpdateThingShadowOutput) + + UpdateThingShadow(*iotdataplane.UpdateThingShadowInput) (*iotdataplane.UpdateThingShadowOutput, error) +} + +var _ IoTDataPlaneAPI = (*iotdataplane.IoTDataPlane)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iotdataplane/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iotdataplane/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iotdataplane/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/iotdataplane/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,92 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package iotdataplane + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restjson" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// AWS IoT-Data enables secure, bi-directional communication between Internet-connected +// things (such as sensors, actuators, embedded devices, or smart appliances) +// and the AWS cloud. It implements a broker for applications and things to +// publish messages over HTTP (Publish) and retrieve, update, and delete thing +// shadows. A thing shadow is a persistent representation of your things and +// their state in the AWS cloud. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type IoTDataPlane struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "data.iot" + +// New creates a new instance of the IoTDataPlane client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a IoTDataPlane client from just a session. +// svc := iotdataplane.New(mySession) +// +// // Create a IoTDataPlane client with additional configuration +// svc := iotdataplane.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *IoTDataPlane { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *IoTDataPlane { + svc := &IoTDataPlane{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: "iotdata", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-05-28", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a IoTDataPlane operation and runs any +// custom request initialization. +func (c *IoTDataPlane) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kinesis/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kinesis/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kinesis/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kinesis/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1765 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package kinesis provides a client for Amazon Kinesis. +package kinesis + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opAddTagsToStream = "AddTagsToStream" + +// AddTagsToStreamRequest generates a request for the AddTagsToStream operation. +func (c *Kinesis) AddTagsToStreamRequest(input *AddTagsToStreamInput) (req *request.Request, output *AddTagsToStreamOutput) { + op := &request.Operation{ + Name: opAddTagsToStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsToStreamInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AddTagsToStreamOutput{} + req.Data = output + return +} + +// Adds or updates tags for the specified Amazon Kinesis stream. Each stream +// can have up to 10 tags. +// +// If tags have already been assigned to the stream, AddTagsToStream overwrites +// any existing tags that correspond to the specified tag keys. +func (c *Kinesis) AddTagsToStream(input *AddTagsToStreamInput) (*AddTagsToStreamOutput, error) { + req, out := c.AddTagsToStreamRequest(input) + err := req.Send() + return out, err +} + +const opCreateStream = "CreateStream" + +// CreateStreamRequest generates a request for the CreateStream operation. +func (c *Kinesis) CreateStreamRequest(input *CreateStreamInput) (req *request.Request, output *CreateStreamOutput) { + op := &request.Operation{ + Name: opCreateStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateStreamInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateStreamOutput{} + req.Data = output + return +} + +// Creates a Amazon Kinesis stream. A stream captures and transports data records +// that are continuously emitted from different data sources or producers. Scale-out +// within an Amazon Kinesis stream is explicitly supported by means of shards, +// which are uniquely identified groups of data records in an Amazon Kinesis +// stream. +// +// You specify and control the number of shards that a stream is composed of. +// Each shard can support reads up to 5 transactions per second, up to a maximum +// data read total of 2 MB per second. Each shard can support writes up to 1,000 +// records per second, up to a maximum data write total of 1 MB per second. +// You can add shards to a stream if the amount of data input increases and +// you can remove shards if the amount of data input decreases. +// +// The stream name identifies the stream. The name is scoped to the AWS account +// used by the application. It is also scoped by region. That is, two streams +// in two different accounts can have the same name, and two streams in the +// same account, but in two different regions, can have the same name. +// +// CreateStream is an asynchronous operation. Upon receiving a CreateStream +// request, Amazon Kinesis immediately returns and sets the stream status to +// CREATING. After the stream is created, Amazon Kinesis sets the stream status +// to ACTIVE. You should perform read and write operations only on an ACTIVE +// stream. +// +// You receive a LimitExceededException when making a CreateStream request +// if you try to do one of the following: +// +// Have more than five streams in the CREATING state at any point in time. +// Create more shards than are authorized for your account. For the default +// shard limit for an AWS account, see Amazon Kinesis Limits (http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html). +// If you need to increase this limit, contact AWS Support (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html). +// +// You can use DescribeStream to check the stream status, which is returned +// in StreamStatus. +// +// CreateStream has a limit of 5 transactions per second per account. +func (c *Kinesis) CreateStream(input *CreateStreamInput) (*CreateStreamOutput, error) { + req, out := c.CreateStreamRequest(input) + err := req.Send() + return out, err +} + +const opDecreaseStreamRetentionPeriod = "DecreaseStreamRetentionPeriod" + +// DecreaseStreamRetentionPeriodRequest generates a request for the DecreaseStreamRetentionPeriod operation. +func (c *Kinesis) DecreaseStreamRetentionPeriodRequest(input *DecreaseStreamRetentionPeriodInput) (req *request.Request, output *DecreaseStreamRetentionPeriodOutput) { + op := &request.Operation{ + Name: opDecreaseStreamRetentionPeriod, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DecreaseStreamRetentionPeriodInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DecreaseStreamRetentionPeriodOutput{} + req.Data = output + return +} + +// Decreases the stream's retention period, which is the length of time data +// records are accessible after they are added to the stream. The minimum value +// of a stream’s retention period is 24 hours. +// +// This operation may result in lost data. For example, if the stream's retention +// period is 48 hours and is decreased to 24 hours, any data already in the +// stream that is older than 24 hours is inaccessible. +func (c *Kinesis) DecreaseStreamRetentionPeriod(input *DecreaseStreamRetentionPeriodInput) (*DecreaseStreamRetentionPeriodOutput, error) { + req, out := c.DecreaseStreamRetentionPeriodRequest(input) + err := req.Send() + return out, err +} + +const opDeleteStream = "DeleteStream" + +// DeleteStreamRequest generates a request for the DeleteStream operation. +func (c *Kinesis) DeleteStreamRequest(input *DeleteStreamInput) (req *request.Request, output *DeleteStreamOutput) { + op := &request.Operation{ + Name: opDeleteStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteStreamInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteStreamOutput{} + req.Data = output + return +} + +// Deletes a stream and all its shards and data. You must shut down any applications +// that are operating on the stream before you delete the stream. If an application +// attempts to operate on a deleted stream, it will receive the exception ResourceNotFoundException. +// +// If the stream is in the ACTIVE state, you can delete it. After a DeleteStream +// request, the specified stream is in the DELETING state until Amazon Kinesis +// completes the deletion. +// +// Note: Amazon Kinesis might continue to accept data read and write operations, +// such as PutRecord, PutRecords, and GetRecords, on a stream in the DELETING +// state until the stream deletion is complete. +// +// When you delete a stream, any shards in that stream are also deleted, and +// any tags are dissociated from the stream. +// +// You can use the DescribeStream operation to check the state of the stream, +// which is returned in StreamStatus. +// +// DeleteStream has a limit of 5 transactions per second per account. +func (c *Kinesis) DeleteStream(input *DeleteStreamInput) (*DeleteStreamOutput, error) { + req, out := c.DeleteStreamRequest(input) + err := req.Send() + return out, err +} + +const opDescribeStream = "DescribeStream" + +// DescribeStreamRequest generates a request for the DescribeStream operation. +func (c *Kinesis) DescribeStreamRequest(input *DescribeStreamInput) (req *request.Request, output *DescribeStreamOutput) { + op := &request.Operation{ + Name: opDescribeStream, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ExclusiveStartShardId"}, + OutputTokens: []string{"StreamDescription.Shards[-1].ShardId"}, + LimitToken: "Limit", + TruncationToken: "StreamDescription.HasMoreShards", + }, + } + + if input == nil { + input = &DescribeStreamInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStreamOutput{} + req.Data = output + return +} + +// Describes the specified stream. +// +// The information about the stream includes its current status, its Amazon +// Resource Name (ARN), and an array of shard objects. For each shard object, +// there is information about the hash key and sequence number ranges that the +// shard spans, and the IDs of any earlier shards that played in a role in creating +// the shard. A sequence number is the identifier associated with every record +// ingested in the Amazon Kinesis stream. The sequence number is assigned when +// a record is put into the stream. +// +// You can limit the number of returned shards using the Limit parameter. The +// number of shards in a stream may be too large to return from a single call +// to DescribeStream. You can detect this by using the HasMoreShards flag in +// the returned output. HasMoreShards is set to true when there is more data +// available. +// +// DescribeStream is a paginated operation. If there are more shards available, +// you can request them using the shard ID of the last shard returned. Specify +// this ID in the ExclusiveStartShardId parameter in a subsequent request to +// DescribeStream. +// +// DescribeStream has a limit of 10 transactions per second per account. +func (c *Kinesis) DescribeStream(input *DescribeStreamInput) (*DescribeStreamOutput, error) { + req, out := c.DescribeStreamRequest(input) + err := req.Send() + return out, err +} + +func (c *Kinesis) DescribeStreamPages(input *DescribeStreamInput, fn func(p *DescribeStreamOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeStreamRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeStreamOutput), lastPage) + }) +} + +const opGetRecords = "GetRecords" + +// GetRecordsRequest generates a request for the GetRecords operation. +func (c *Kinesis) GetRecordsRequest(input *GetRecordsInput) (req *request.Request, output *GetRecordsOutput) { + op := &request.Operation{ + Name: opGetRecords, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRecordsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetRecordsOutput{} + req.Data = output + return +} + +// Gets data records from a shard. +// +// Specify a shard iterator using the ShardIterator parameter. The shard iterator +// specifies the position in the shard from which you want to start reading +// data records sequentially. If there are no records available in the portion +// of the shard that the iterator points to, GetRecords returns an empty list. +// Note that it might take multiple calls to get to a portion of the shard that +// contains records. +// +// You can scale by provisioning multiple shards. Your application should have +// one thread per shard, each reading continuously from its stream. To read +// from a stream continually, call GetRecords in a loop. Use GetShardIterator +// to get the shard iterator to specify in the first GetRecords call. GetRecords +// returns a new shard iterator in NextShardIterator. Specify the shard iterator +// returned in NextShardIterator in subsequent calls to GetRecords. Note that +// if the shard has been closed, the shard iterator can't return more data and +// GetRecords returns null in NextShardIterator. You can terminate the loop +// when the shard is closed, or when the shard iterator reaches the record with +// the sequence number or other attribute that marks it as the last record to +// process. +// +// Each data record can be up to 1 MB in size, and each shard can read up to +// 2 MB per second. You can ensure that your calls don't exceed the maximum +// supported size or throughput by using the Limit parameter to specify the +// maximum number of records that GetRecords can return. Consider your average +// record size when determining this limit. +// +// The size of the data returned by GetRecords will vary depending on the utilization +// of the shard. The maximum size of data that GetRecords can return is 10 MB. +// If a call returns this amount of data, subsequent calls made within the next +// 5 seconds throw ProvisionedThroughputExceededException. If there is insufficient +// provisioned throughput on the shard, subsequent calls made within the next +// 1 second throw ProvisionedThroughputExceededException. Note that GetRecords +// won't return any data when it throws an exception. For this reason, we recommend +// that you wait one second between calls to GetRecords; however, it's possible +// that the application will get exceptions for longer than 1 second. +// +// To detect whether the application is falling behind in processing, you can +// use the MillisBehindLatest response attribute. You can also monitor the stream +// using CloudWatch metrics (see Monitoring Amazon Kinesis (http://docs.aws.amazon.com/kinesis/latest/dev/monitoring.html) +// in the Amazon Kinesis Developer Guide). +// +// Each Amazon Kinesis record includes a value, ApproximateArrivalTimestamp, +// that is set when an Amazon Kinesis stream successfully receives and stores +// a record. This is commonly referred to as a server-side timestamp, which +// is different than a client-side timestamp, where the timestamp is set when +// a data producer creates or sends the record to a stream. The timestamp has +// millisecond precision. There are no guarantees about the timestamp accuracy, +// or that the timestamp is always increasing. For example, records in a shard +// or across a stream might have timestamps that are out of order. +func (c *Kinesis) GetRecords(input *GetRecordsInput) (*GetRecordsOutput, error) { + req, out := c.GetRecordsRequest(input) + err := req.Send() + return out, err +} + +const opGetShardIterator = "GetShardIterator" + +// GetShardIteratorRequest generates a request for the GetShardIterator operation. +func (c *Kinesis) GetShardIteratorRequest(input *GetShardIteratorInput) (req *request.Request, output *GetShardIteratorOutput) { + op := &request.Operation{ + Name: opGetShardIterator, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetShardIteratorInput{} + } + + req = c.newRequest(op, input, output) + output = &GetShardIteratorOutput{} + req.Data = output + return +} + +// Gets a shard iterator. A shard iterator expires five minutes after it is +// returned to the requester. +// +// A shard iterator specifies the position in the shard from which to start +// reading data records sequentially. A shard iterator specifies this position +// using the sequence number of a data record in a shard. A sequence number +// is the identifier associated with every record ingested in the Amazon Kinesis +// stream. The sequence number is assigned when a record is put into the stream. +// +// You must specify the shard iterator type. For example, you can set the ShardIteratorType +// parameter to read exactly from the position denoted by a specific sequence +// number by using the AT_SEQUENCE_NUMBER shard iterator type, or right after +// the sequence number by using the AFTER_SEQUENCE_NUMBER shard iterator type, +// using sequence numbers returned by earlier calls to PutRecord, PutRecords, +// GetRecords, or DescribeStream. You can specify the shard iterator type TRIM_HORIZON +// in the request to cause ShardIterator to point to the last untrimmed record +// in the shard in the system, which is the oldest data record in the shard. +// Or you can point to just after the most recent record in the shard, by using +// the shard iterator type LATEST, so that you always read the most recent data +// in the shard. +// +// When you repeatedly read from an Amazon Kinesis stream use a GetShardIterator +// request to get the first shard iterator for use in your first GetRecords +// request and then use the shard iterator returned by the GetRecords request +// in NextShardIterator for subsequent reads. A new shard iterator is returned +// by every GetRecords request in NextShardIterator, which you use in the ShardIterator +// parameter of the next GetRecords request. +// +// If a GetShardIterator request is made too often, you receive a ProvisionedThroughputExceededException. +// For more information about throughput limits, see GetRecords. +// +// If the shard is closed, the iterator can't return more data, and GetShardIterator +// returns null for its ShardIterator. A shard can be closed using SplitShard +// or MergeShards. +// +// GetShardIterator has a limit of 5 transactions per second per account per +// open shard. +func (c *Kinesis) GetShardIterator(input *GetShardIteratorInput) (*GetShardIteratorOutput, error) { + req, out := c.GetShardIteratorRequest(input) + err := req.Send() + return out, err +} + +const opIncreaseStreamRetentionPeriod = "IncreaseStreamRetentionPeriod" + +// IncreaseStreamRetentionPeriodRequest generates a request for the IncreaseStreamRetentionPeriod operation. +func (c *Kinesis) IncreaseStreamRetentionPeriodRequest(input *IncreaseStreamRetentionPeriodInput) (req *request.Request, output *IncreaseStreamRetentionPeriodOutput) { + op := &request.Operation{ + Name: opIncreaseStreamRetentionPeriod, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &IncreaseStreamRetentionPeriodInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &IncreaseStreamRetentionPeriodOutput{} + req.Data = output + return +} + +// Increases the stream's retention period, which is the length of time data +// records are accessible after they are added to the stream. The maximum value +// of a stream’s retention period is 168 hours (7 days). +// +// Upon choosing a longer stream retention period, this operation will increase +// the time period records are accessible that have not yet expired. However, +// it will not make previous data that has expired (older than the stream’s +// previous retention period) accessible after the operation has been called. +// For example, if a stream’s retention period is set to 24 hours and is increased +// to 168 hours, any data that is older than 24 hours will remain inaccessible +// to consumer applications. +func (c *Kinesis) IncreaseStreamRetentionPeriod(input *IncreaseStreamRetentionPeriodInput) (*IncreaseStreamRetentionPeriodOutput, error) { + req, out := c.IncreaseStreamRetentionPeriodRequest(input) + err := req.Send() + return out, err +} + +const opListStreams = "ListStreams" + +// ListStreamsRequest generates a request for the ListStreams operation. +func (c *Kinesis) ListStreamsRequest(input *ListStreamsInput) (req *request.Request, output *ListStreamsOutput) { + op := &request.Operation{ + Name: opListStreams, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ExclusiveStartStreamName"}, + OutputTokens: []string{"StreamNames[-1]"}, + LimitToken: "Limit", + TruncationToken: "HasMoreStreams", + }, + } + + if input == nil { + input = &ListStreamsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListStreamsOutput{} + req.Data = output + return +} + +// Lists your streams. +// +// The number of streams may be too large to return from a single call to +// ListStreams. You can limit the number of returned streams using the Limit +// parameter. If you do not specify a value for the Limit parameter, Amazon +// Kinesis uses the default limit, which is currently 10. +// +// You can detect if there are more streams available to list by using the +// HasMoreStreams flag from the returned output. If there are more streams available, +// you can request more streams by using the name of the last stream returned +// by the ListStreams request in the ExclusiveStartStreamName parameter in a +// subsequent request to ListStreams. The group of stream names returned by +// the subsequent request is then added to the list. You can continue this process +// until all the stream names have been collected in the list. +// +// ListStreams has a limit of 5 transactions per second per account. +func (c *Kinesis) ListStreams(input *ListStreamsInput) (*ListStreamsOutput, error) { + req, out := c.ListStreamsRequest(input) + err := req.Send() + return out, err +} + +func (c *Kinesis) ListStreamsPages(input *ListStreamsInput, fn func(p *ListStreamsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListStreamsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListStreamsOutput), lastPage) + }) +} + +const opListTagsForStream = "ListTagsForStream" + +// ListTagsForStreamRequest generates a request for the ListTagsForStream operation. +func (c *Kinesis) ListTagsForStreamRequest(input *ListTagsForStreamInput) (req *request.Request, output *ListTagsForStreamOutput) { + op := &request.Operation{ + Name: opListTagsForStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForStreamInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForStreamOutput{} + req.Data = output + return +} + +// Lists the tags for the specified Amazon Kinesis stream. +func (c *Kinesis) ListTagsForStream(input *ListTagsForStreamInput) (*ListTagsForStreamOutput, error) { + req, out := c.ListTagsForStreamRequest(input) + err := req.Send() + return out, err +} + +const opMergeShards = "MergeShards" + +// MergeShardsRequest generates a request for the MergeShards operation. +func (c *Kinesis) MergeShardsRequest(input *MergeShardsInput) (req *request.Request, output *MergeShardsOutput) { + op := &request.Operation{ + Name: opMergeShards, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &MergeShardsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &MergeShardsOutput{} + req.Data = output + return +} + +// Merges two adjacent shards in a stream and combines them into a single shard +// to reduce the stream's capacity to ingest and transport data. Two shards +// are considered adjacent if the union of the hash key ranges for the two shards +// form a contiguous set with no gaps. For example, if you have two shards, +// one with a hash key range of 276...381 and the other with a hash key range +// of 382...454, then you could merge these two shards into a single shard that +// would have a hash key range of 276...454. After the merge, the single child +// shard receives data for all hash key values covered by the two parent shards. +// +// MergeShards is called when there is a need to reduce the overall capacity +// of a stream because of excess capacity that is not being used. You must specify +// the shard to be merged and the adjacent shard for a stream. For more information +// about merging shards, see Merge Two Shards (http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-merge.html) +// in the Amazon Kinesis Developer Guide. +// +// If the stream is in the ACTIVE state, you can call MergeShards. If a stream +// is in the CREATING, UPDATING, or DELETING state, MergeShards returns a ResourceInUseException. +// If the specified stream does not exist, MergeShards returns a ResourceNotFoundException. +// +// You can use DescribeStream to check the state of the stream, which is returned +// in StreamStatus. +// +// MergeShards is an asynchronous operation. Upon receiving a MergeShards request, +// Amazon Kinesis immediately returns a response and sets the StreamStatus to +// UPDATING. After the operation is completed, Amazon Kinesis sets the StreamStatus +// to ACTIVE. Read and write operations continue to work while the stream is +// in the UPDATING state. +// +// You use DescribeStream to determine the shard IDs that are specified in +// the MergeShards request. +// +// If you try to operate on too many streams in parallel using CreateStream, +// DeleteStream, MergeShards or SplitShard, you will receive a LimitExceededException. +// +// MergeShards has limit of 5 transactions per second per account. +func (c *Kinesis) MergeShards(input *MergeShardsInput) (*MergeShardsOutput, error) { + req, out := c.MergeShardsRequest(input) + err := req.Send() + return out, err +} + +const opPutRecord = "PutRecord" + +// PutRecordRequest generates a request for the PutRecord operation. +func (c *Kinesis) PutRecordRequest(input *PutRecordInput) (req *request.Request, output *PutRecordOutput) { + op := &request.Operation{ + Name: opPutRecord, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRecordInput{} + } + + req = c.newRequest(op, input, output) + output = &PutRecordOutput{} + req.Data = output + return +} + +// Writes a single data record from a producer into an Amazon Kinesis stream. +// Call PutRecord to send data from the producer into the Amazon Kinesis stream +// for real-time ingestion and subsequent processing, one record at a time. +// Each shard can support writes up to 1,000 records per second, up to a maximum +// data write total of 1 MB per second. +// +// You must specify the name of the stream that captures, stores, and transports +// the data; a partition key; and the data blob itself. +// +// The data blob can be any type of data; for example, a segment from a log +// file, geographic/location data, website clickstream data, and so on. +// +// The partition key is used by Amazon Kinesis to distribute data across shards. +// Amazon Kinesis segregates the data records that belong to a data stream into +// multiple shards, using the partition key associated with each data record +// to determine which shard a given data record belongs to. +// +// Partition keys are Unicode strings, with a maximum length limit of 256 characters +// for each key. An MD5 hash function is used to map partition keys to 128-bit +// integer values and to map associated data records to shards using the hash +// key ranges of the shards. You can override hashing the partition key to determine +// the shard by explicitly specifying a hash value using the ExplicitHashKey +// parameter. For more information, see Adding Data to a Stream (http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream) +// in the Amazon Kinesis Developer Guide. +// +// PutRecord returns the shard ID of where the data record was placed and the +// sequence number that was assigned to the data record. +// +// Sequence numbers generally increase over time. To guarantee strictly increasing +// ordering, use the SequenceNumberForOrdering parameter. For more information, +// see Adding Data to a Stream (http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream) +// in the Amazon Kinesis Developer Guide. +// +// If a PutRecord request cannot be processed because of insufficient provisioned +// throughput on the shard involved in the request, PutRecord throws ProvisionedThroughputExceededException. +// +// By default, data records are accessible for only 24 hours from the time +// that they are added to an Amazon Kinesis stream. This retention period can +// be modified using the DecreaseStreamRetentionPeriod and IncreaseStreamRetentionPeriod +// operations. +func (c *Kinesis) PutRecord(input *PutRecordInput) (*PutRecordOutput, error) { + req, out := c.PutRecordRequest(input) + err := req.Send() + return out, err +} + +const opPutRecords = "PutRecords" + +// PutRecordsRequest generates a request for the PutRecords operation. +func (c *Kinesis) PutRecordsRequest(input *PutRecordsInput) (req *request.Request, output *PutRecordsOutput) { + op := &request.Operation{ + Name: opPutRecords, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRecordsInput{} + } + + req = c.newRequest(op, input, output) + output = &PutRecordsOutput{} + req.Data = output + return +} + +// Writes multiple data records from a producer into an Amazon Kinesis stream +// in a single call (also referred to as a PutRecords request). Use this operation +// to send data from a data producer into the Amazon Kinesis stream for data +// ingestion and processing. +// +// Each PutRecords request can support up to 500 records. Each record in the +// request can be as large as 1 MB, up to a limit of 5 MB for the entire request, +// including partition keys. Each shard can support writes up to 1,000 records +// per second, up to a maximum data write total of 1 MB per second. +// +// You must specify the name of the stream that captures, stores, and transports +// the data; and an array of request Records, with each record in the array +// requiring a partition key and data blob. The record size limit applies to +// the total size of the partition key and data blob. +// +// The data blob can be any type of data; for example, a segment from a log +// file, geographic/location data, website clickstream data, and so on. +// +// The partition key is used by Amazon Kinesis as input to a hash function +// that maps the partition key and associated data to a specific shard. An MD5 +// hash function is used to map partition keys to 128-bit integer values and +// to map associated data records to shards. As a result of this hashing mechanism, +// all data records with the same partition key map to the same shard within +// the stream. For more information, see Adding Data to a Stream (http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream) +// in the Amazon Kinesis Developer Guide. +// +// Each record in the Records array may include an optional parameter, ExplicitHashKey, +// which overrides the partition key to shard mapping. This parameter allows +// a data producer to determine explicitly the shard where the record is stored. +// For more information, see Adding Multiple Records with PutRecords (http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-putrecords) +// in the Amazon Kinesis Developer Guide. +// +// The PutRecords response includes an array of response Records. Each record +// in the response array directly correlates with a record in the request array +// using natural ordering, from the top to the bottom of the request and response. +// The response Records array always includes the same number of records as +// the request array. +// +// The response Records array includes both successfully and unsuccessfully +// processed records. Amazon Kinesis attempts to process all records in each +// PutRecords request. A single record failure does not stop the processing +// of subsequent records. +// +// A successfully-processed record includes ShardId and SequenceNumber values. +// The ShardId parameter identifies the shard in the stream where the record +// is stored. The SequenceNumber parameter is an identifier assigned to the +// put record, unique to all records in the stream. +// +// An unsuccessfully-processed record includes ErrorCode and ErrorMessage values. +// ErrorCode reflects the type of error and can be one of the following values: +// ProvisionedThroughputExceededException or InternalFailure. ErrorMessage provides +// more detailed information about the ProvisionedThroughputExceededException +// exception including the account ID, stream name, and shard ID of the record +// that was throttled. For more information about partially successful responses, +// see Adding Multiple Records with PutRecords (http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-add-data-to-stream.html#kinesis-using-sdk-java-putrecords) +// in the Amazon Kinesis Developer Guide. +// +// By default, data records are accessible for only 24 hours from the time +// that they are added to an Amazon Kinesis stream. This retention period can +// be modified using the DecreaseStreamRetentionPeriod and IncreaseStreamRetentionPeriod +// operations. +func (c *Kinesis) PutRecords(input *PutRecordsInput) (*PutRecordsOutput, error) { + req, out := c.PutRecordsRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTagsFromStream = "RemoveTagsFromStream" + +// RemoveTagsFromStreamRequest generates a request for the RemoveTagsFromStream operation. +func (c *Kinesis) RemoveTagsFromStreamRequest(input *RemoveTagsFromStreamInput) (req *request.Request, output *RemoveTagsFromStreamOutput) { + op := &request.Operation{ + Name: opRemoveTagsFromStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsFromStreamInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RemoveTagsFromStreamOutput{} + req.Data = output + return +} + +// Deletes tags from the specified Amazon Kinesis stream. +// +// If you specify a tag that does not exist, it is ignored. +func (c *Kinesis) RemoveTagsFromStream(input *RemoveTagsFromStreamInput) (*RemoveTagsFromStreamOutput, error) { + req, out := c.RemoveTagsFromStreamRequest(input) + err := req.Send() + return out, err +} + +const opSplitShard = "SplitShard" + +// SplitShardRequest generates a request for the SplitShard operation. +func (c *Kinesis) SplitShardRequest(input *SplitShardInput) (req *request.Request, output *SplitShardOutput) { + op := &request.Operation{ + Name: opSplitShard, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SplitShardInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SplitShardOutput{} + req.Data = output + return +} + +// Splits a shard into two new shards in the stream, to increase the stream's +// capacity to ingest and transport data. SplitShard is called when there is +// a need to increase the overall capacity of stream because of an expected +// increase in the volume of data records being ingested. +// +// You can also use SplitShard when a shard appears to be approaching its maximum +// utilization, for example, when the set of producers sending data into the +// specific shard are suddenly sending more than previously anticipated. You +// can also call SplitShard to increase stream capacity, so that more Amazon +// Kinesis applications can simultaneously read data from the stream for real-time +// processing. +// +// You must specify the shard to be split and the new hash key, which is the +// position in the shard where the shard gets split in two. In many cases, the +// new hash key might simply be the average of the beginning and ending hash +// key, but it can be any hash key value in the range being mapped into the +// shard. For more information about splitting shards, see Split a Shard (http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-split.html) +// in the Amazon Kinesis Developer Guide. +// +// You can use DescribeStream to determine the shard ID and hash key values +// for the ShardToSplit and NewStartingHashKey parameters that are specified +// in the SplitShard request. +// +// SplitShard is an asynchronous operation. Upon receiving a SplitShard request, +// Amazon Kinesis immediately returns a response and sets the stream status +// to UPDATING. After the operation is completed, Amazon Kinesis sets the stream +// status to ACTIVE. Read and write operations continue to work while the stream +// is in the UPDATING state. +// +// You can use DescribeStream to check the status of the stream, which is returned +// in StreamStatus. If the stream is in the ACTIVE state, you can call SplitShard. +// If a stream is in CREATING or UPDATING or DELETING states, DescribeStream +// returns a ResourceInUseException. +// +// If the specified stream does not exist, DescribeStream returns a ResourceNotFoundException. +// If you try to create more shards than are authorized for your account, you +// receive a LimitExceededException. +// +// For the default shard limit for an AWS account, see Amazon Kinesis Limits +// (http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html). +// If you need to increase this limit, contact AWS Support (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html). +// +// If you try to operate on too many streams in parallel using CreateStream, +// DeleteStream, MergeShards or SplitShard, you receive a LimitExceededException. +// +// SplitShard has limit of 5 transactions per second per account. +func (c *Kinesis) SplitShard(input *SplitShardInput) (*SplitShardOutput, error) { + req, out := c.SplitShardRequest(input) + err := req.Send() + return out, err +} + +// Represents the input for AddTagsToStream. +type AddTagsToStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the stream. + StreamName *string `min:"1" type:"string" required:"true"` + + // The set of key-value pairs to use to create the tags. + Tags map[string]*string `min:"1" type:"map" required:"true"` +} + +// String returns the string representation +func (s AddTagsToStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToStreamInput) GoString() string { + return s.String() +} + +type AddTagsToStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsToStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToStreamOutput) GoString() string { + return s.String() +} + +// Represents the input for CreateStream. +type CreateStreamInput struct { + _ struct{} `type:"structure"` + + // The number of shards that the stream will use. The throughput of the stream + // is a function of the number of shards; more shards are required for greater + // provisioned throughput. + // + // DefaultShardLimit; + ShardCount *int64 `min:"1" type:"integer" required:"true"` + + // A name to identify the stream. The stream name is scoped to the AWS account + // used by the application that creates the stream. It is also scoped by region. + // That is, two streams in two different AWS accounts can have the same name, + // and two streams in the same AWS account, but in two different regions, can + // have the same name. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStreamInput) GoString() string { + return s.String() +} + +type CreateStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStreamOutput) GoString() string { + return s.String() +} + +// Represents the input for DecreaseStreamRetentionPeriod. +type DecreaseStreamRetentionPeriodInput struct { + _ struct{} `type:"structure"` + + // The new retention period of the stream, in hours. Must be less than the current + // retention period. + RetentionPeriodHours *int64 `min:"24" type:"integer" required:"true"` + + // The name of the stream to modify. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DecreaseStreamRetentionPeriodInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecreaseStreamRetentionPeriodInput) GoString() string { + return s.String() +} + +type DecreaseStreamRetentionPeriodOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DecreaseStreamRetentionPeriodOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecreaseStreamRetentionPeriodOutput) GoString() string { + return s.String() +} + +// Represents the input for DeleteStream. +type DeleteStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the stream to delete. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStreamInput) GoString() string { + return s.String() +} + +type DeleteStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStreamOutput) GoString() string { + return s.String() +} + +// Represents the input for DescribeStream. +type DescribeStreamInput struct { + _ struct{} `type:"structure"` + + // The shard ID of the shard to start with. + ExclusiveStartShardId *string `min:"1" type:"string"` + + // The maximum number of shards to return. + Limit *int64 `min:"1" type:"integer"` + + // The name of the stream to describe. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStreamInput) GoString() string { + return s.String() +} + +// Represents the output for DescribeStream. +type DescribeStreamOutput struct { + _ struct{} `type:"structure"` + + // The current status of the stream, the stream ARN, an array of shard objects + // that comprise the stream, and states whether there are more shards available. + StreamDescription *StreamDescription `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStreamOutput) GoString() string { + return s.String() +} + +// Represents the input for GetRecords. +type GetRecordsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of records to return. Specify a value of up to 10,000. + // If you specify a value that is greater than 10,000, GetRecords throws InvalidArgumentException. + Limit *int64 `min:"1" type:"integer"` + + // The position in the shard from which you want to start sequentially reading + // data records. A shard iterator specifies this position using the sequence + // number of a data record in the shard. + ShardIterator *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRecordsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRecordsInput) GoString() string { + return s.String() +} + +// Represents the output for GetRecords. +type GetRecordsOutput struct { + _ struct{} `type:"structure"` + + // The number of milliseconds the GetRecords response is from the tip of the + // stream, indicating how far behind current time the consumer is. A value of + // zero indicates record processing is caught up, and there are no new records + // to process at this moment. + MillisBehindLatest *int64 `type:"long"` + + // The next position in the shard from which to start sequentially reading data + // records. If set to null, the shard has been closed and the requested iterator + // will not return any more data. + NextShardIterator *string `min:"1" type:"string"` + + // The data records retrieved from the shard. + Records []*Record `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetRecordsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRecordsOutput) GoString() string { + return s.String() +} + +// Represents the input for GetShardIterator. +type GetShardIteratorInput struct { + _ struct{} `type:"structure"` + + // The shard ID of the shard to get the iterator for. + ShardId *string `min:"1" type:"string" required:"true"` + + // Determines how the shard iterator is used to start reading data records from + // the shard. + // + // The following are the valid shard iterator types: + // + // AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted by + // a specific sequence number. AFTER_SEQUENCE_NUMBER - Start reading right after + // the position denoted by a specific sequence number. TRIM_HORIZON - Start + // reading at the last untrimmed record in the shard in the system, which is + // the oldest data record in the shard. LATEST - Start reading just after the + // most recent record in the shard, so that you always read the most recent + // data in the shard. + ShardIteratorType *string `type:"string" required:"true" enum:"ShardIteratorType"` + + // The sequence number of the data record in the shard from which to start reading + // from. + StartingSequenceNumber *string `type:"string"` + + // The name of the stream. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetShardIteratorInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetShardIteratorInput) GoString() string { + return s.String() +} + +// Represents the output for GetShardIterator. +type GetShardIteratorOutput struct { + _ struct{} `type:"structure"` + + // The position in the shard from which to start reading data records sequentially. + // A shard iterator specifies this position using the sequence number of a data + // record in a shard. + ShardIterator *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetShardIteratorOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetShardIteratorOutput) GoString() string { + return s.String() +} + +// The range of possible hash key values for the shard, which is a set of ordered +// contiguous positive integers. +type HashKeyRange struct { + _ struct{} `type:"structure"` + + // The ending hash key of the hash key range. + EndingHashKey *string `type:"string" required:"true"` + + // The starting hash key of the hash key range. + StartingHashKey *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s HashKeyRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HashKeyRange) GoString() string { + return s.String() +} + +// Represents the input for IncreaseStreamRetentionPeriod. +type IncreaseStreamRetentionPeriodInput struct { + _ struct{} `type:"structure"` + + // The new retention period of the stream, in hours. Must be more than the current + // retention period. + RetentionPeriodHours *int64 `min:"24" type:"integer" required:"true"` + + // The name of the stream to modify. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s IncreaseStreamRetentionPeriodInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IncreaseStreamRetentionPeriodInput) GoString() string { + return s.String() +} + +type IncreaseStreamRetentionPeriodOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s IncreaseStreamRetentionPeriodOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IncreaseStreamRetentionPeriodOutput) GoString() string { + return s.String() +} + +// Represents the input for ListStreams. +type ListStreamsInput struct { + _ struct{} `type:"structure"` + + // The name of the stream to start the list with. + ExclusiveStartStreamName *string `min:"1" type:"string"` + + // The maximum number of streams to list. + Limit *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListStreamsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStreamsInput) GoString() string { + return s.String() +} + +// Represents the output for ListStreams. +type ListStreamsOutput struct { + _ struct{} `type:"structure"` + + // If set to true, there are more streams available to list. + HasMoreStreams *bool `type:"boolean" required:"true"` + + // The names of the streams that are associated with the AWS account making + // the ListStreams request. + StreamNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListStreamsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStreamsOutput) GoString() string { + return s.String() +} + +// Represents the input for ListTagsForStream. +type ListTagsForStreamInput struct { + _ struct{} `type:"structure"` + + // The key to use as the starting point for the list of tags. If this parameter + // is set, ListTagsForStream gets all tags that occur after ExclusiveStartTagKey. + ExclusiveStartTagKey *string `min:"1" type:"string"` + + // The number of tags to return. If this number is less than the total number + // of tags associated with the stream, HasMoreTags is set to true. To list additional + // tags, set ExclusiveStartTagKey to the last key in the response. + Limit *int64 `min:"1" type:"integer"` + + // The name of the stream. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForStreamInput) GoString() string { + return s.String() +} + +// Represents the output for ListTagsForStream. +type ListTagsForStreamOutput struct { + _ struct{} `type:"structure"` + + // If set to true, more tags are available. To request additional tags, set + // ExclusiveStartTagKey to the key of the last tag returned. + HasMoreTags *bool `type:"boolean" required:"true"` + + // A list of tags associated with StreamName, starting with the first tag after + // ExclusiveStartTagKey and up to the specified Limit. + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTagsForStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForStreamOutput) GoString() string { + return s.String() +} + +// Represents the input for MergeShards. +type MergeShardsInput struct { + _ struct{} `type:"structure"` + + // The shard ID of the adjacent shard for the merge. + AdjacentShardToMerge *string `min:"1" type:"string" required:"true"` + + // The shard ID of the shard to combine with the adjacent shard for the merge. + ShardToMerge *string `min:"1" type:"string" required:"true"` + + // The name of the stream for the merge. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s MergeShardsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MergeShardsInput) GoString() string { + return s.String() +} + +type MergeShardsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s MergeShardsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MergeShardsOutput) GoString() string { + return s.String() +} + +// Represents the input for PutRecord. +type PutRecordInput struct { + _ struct{} `type:"structure"` + + // The data blob to put into the record, which is base64-encoded when the blob + // is serialized. When the data blob (the payload before base64-encoding) is + // added to the partition key size, the total size must not exceed the maximum + // record size (1 MB). + Data []byte `type:"blob" required:"true"` + + // The hash value used to explicitly determine the shard the data record is + // assigned to by overriding the partition key hash. + ExplicitHashKey *string `type:"string"` + + // Determines which shard in the stream the data record is assigned to. Partition + // keys are Unicode strings with a maximum length limit of 256 characters for + // each key. Amazon Kinesis uses the partition key as input to a hash function + // that maps the partition key and associated data to a specific shard. Specifically, + // an MD5 hash function is used to map partition keys to 128-bit integer values + // and to map associated data records to shards. As a result of this hashing + // mechanism, all data records with the same partition key will map to the same + // shard within the stream. + PartitionKey *string `min:"1" type:"string" required:"true"` + + // Guarantees strictly increasing sequence numbers, for puts from the same client + // and to the same partition key. Usage: set the SequenceNumberForOrdering of + // record n to the sequence number of record n-1 (as returned in the result + // when putting record n-1). If this parameter is not set, records will be coarsely + // ordered based on arrival time. + SequenceNumberForOrdering *string `type:"string"` + + // The name of the stream to put the data record into. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutRecordInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordInput) GoString() string { + return s.String() +} + +// Represents the output for PutRecord. +type PutRecordOutput struct { + _ struct{} `type:"structure"` + + // The sequence number identifier that was assigned to the put data record. + // The sequence number for the record is unique across all records in the stream. + // A sequence number is the identifier associated with every record put into + // the stream. + SequenceNumber *string `type:"string" required:"true"` + + // The shard ID of the shard where the data record was placed. + ShardId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutRecordOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordOutput) GoString() string { + return s.String() +} + +// A PutRecords request. +type PutRecordsInput struct { + _ struct{} `type:"structure"` + + // The records associated with the request. + Records []*PutRecordsRequestEntry `min:"1" type:"list" required:"true"` + + // The stream name associated with the request. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutRecordsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordsInput) GoString() string { + return s.String() +} + +// PutRecords results. +type PutRecordsOutput struct { + _ struct{} `type:"structure"` + + // The number of unsuccessfully processed records in a PutRecords request. + FailedRecordCount *int64 `min:"1" type:"integer"` + + // An array of successfully and unsuccessfully processed record results, correlated + // with the request by natural ordering. A record that is successfully added + // to your Amazon Kinesis stream includes SequenceNumber and ShardId in the + // result. A record that fails to be added to your Amazon Kinesis stream includes + // ErrorCode and ErrorMessage in the result. + Records []*PutRecordsResultEntry `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s PutRecordsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordsOutput) GoString() string { + return s.String() +} + +// Represents the output for PutRecords. +type PutRecordsRequestEntry struct { + _ struct{} `type:"structure"` + + // The data blob to put into the record, which is base64-encoded when the blob + // is serialized. When the data blob (the payload before base64-encoding) is + // added to the partition key size, the total size must not exceed the maximum + // record size (1 MB). + Data []byte `type:"blob" required:"true"` + + // The hash value used to determine explicitly the shard that the data record + // is assigned to by overriding the partition key hash. + ExplicitHashKey *string `type:"string"` + + // Determines which shard in the stream the data record is assigned to. Partition + // keys are Unicode strings with a maximum length limit of 256 characters for + // each key. Amazon Kinesis uses the partition key as input to a hash function + // that maps the partition key and associated data to a specific shard. Specifically, + // an MD5 hash function is used to map partition keys to 128-bit integer values + // and to map associated data records to shards. As a result of this hashing + // mechanism, all data records with the same partition key map to the same shard + // within the stream. + PartitionKey *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutRecordsRequestEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordsRequestEntry) GoString() string { + return s.String() +} + +// Represents the result of an individual record from a PutRecords request. +// A record that is successfully added to your Amazon Kinesis stream includes +// SequenceNumber and ShardId in the result. A record that fails to be added +// to your Amazon Kinesis stream includes ErrorCode and ErrorMessage in the +// result. +type PutRecordsResultEntry struct { + _ struct{} `type:"structure"` + + // The error code for an individual record result. ErrorCodes can be either + // ProvisionedThroughputExceededException or InternalFailure. + ErrorCode *string `type:"string"` + + // The error message for an individual record result. An ErrorCode value of + // ProvisionedThroughputExceededException has an error message that includes + // the account ID, stream name, and shard ID. An ErrorCode value of InternalFailure + // has the error message "Internal Service Failure". + ErrorMessage *string `type:"string"` + + // The sequence number for an individual record result. + SequenceNumber *string `type:"string"` + + // The shard ID for an individual record result. + ShardId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PutRecordsResultEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRecordsResultEntry) GoString() string { + return s.String() +} + +// The unit of data of the Amazon Kinesis stream, which is composed of a sequence +// number, a partition key, and a data blob. +type Record struct { + _ struct{} `type:"structure"` + + // The approximate time that the record was inserted into the stream. + ApproximateArrivalTimestamp *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The data blob. The data in the blob is both opaque and immutable to the Amazon + // Kinesis service, which does not inspect, interpret, or change the data in + // the blob in any way. When the data blob (the payload before base64-encoding) + // is added to the partition key size, the total size must not exceed the maximum + // record size (1 MB). + Data []byte `type:"blob" required:"true"` + + // Identifies which shard in the stream the data record is assigned to. + PartitionKey *string `min:"1" type:"string" required:"true"` + + // The unique identifier of the record in the stream. + SequenceNumber *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Record) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Record) GoString() string { + return s.String() +} + +// Represents the input for RemoveTagsFromStream. +type RemoveTagsFromStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the stream. + StreamName *string `min:"1" type:"string" required:"true"` + + // A list of tag keys. Each corresponding tag is removed from the stream. + TagKeys []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsFromStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromStreamInput) GoString() string { + return s.String() +} + +type RemoveTagsFromStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsFromStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromStreamOutput) GoString() string { + return s.String() +} + +// The range of possible sequence numbers for the shard. +type SequenceNumberRange struct { + _ struct{} `type:"structure"` + + // The ending sequence number for the range. Shards that are in the OPEN state + // have an ending sequence number of null. + EndingSequenceNumber *string `type:"string"` + + // The starting sequence number for the range. + StartingSequenceNumber *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SequenceNumberRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SequenceNumberRange) GoString() string { + return s.String() +} + +// A uniquely identified group of data records in an Amazon Kinesis stream. +type Shard struct { + _ struct{} `type:"structure"` + + // The shard Id of the shard adjacent to the shard's parent. + AdjacentParentShardId *string `min:"1" type:"string"` + + // The range of possible hash key values for the shard, which is a set of ordered + // contiguous positive integers. + HashKeyRange *HashKeyRange `type:"structure" required:"true"` + + // The shard Id of the shard's parent. + ParentShardId *string `min:"1" type:"string"` + + // The range of possible sequence numbers for the shard. + SequenceNumberRange *SequenceNumberRange `type:"structure" required:"true"` + + // The unique identifier of the shard within the Amazon Kinesis stream. + ShardId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Shard) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Shard) GoString() string { + return s.String() +} + +// Represents the input for SplitShard. +type SplitShardInput struct { + _ struct{} `type:"structure"` + + // A hash key value for the starting hash key of one of the child shards created + // by the split. The hash key range for a given shard constitutes a set of ordered + // contiguous positive integers. The value for NewStartingHashKey must be in + // the range of hash keys being mapped into the shard. The NewStartingHashKey + // hash key value and all higher hash key values in hash key range are distributed + // to one of the child shards. All the lower hash key values in the range are + // distributed to the other child shard. + NewStartingHashKey *string `type:"string" required:"true"` + + // The shard ID of the shard to split. + ShardToSplit *string `min:"1" type:"string" required:"true"` + + // The name of the stream for the shard split. + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SplitShardInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SplitShardInput) GoString() string { + return s.String() +} + +type SplitShardOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SplitShardOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SplitShardOutput) GoString() string { + return s.String() +} + +// Represents the output for DescribeStream. +type StreamDescription struct { + _ struct{} `type:"structure"` + + // If set to true, more shards in the stream are available to describe. + HasMoreShards *bool `type:"boolean" required:"true"` + + // The current retention period, in hours. + RetentionPeriodHours *int64 `min:"24" type:"integer" required:"true"` + + // The shards that comprise the stream. + Shards []*Shard `type:"list" required:"true"` + + // The Amazon Resource Name (ARN) for the stream being described. + StreamARN *string `type:"string" required:"true"` + + // The name of the stream being described. + StreamName *string `min:"1" type:"string" required:"true"` + + // The current status of the stream being described. + // + // The stream status is one of the following states: + // + // CREATING - The stream is being created. Amazon Kinesis immediately returns + // and sets StreamStatus to CREATING. DELETING - The stream is being deleted. + // The specified stream is in the DELETING state until Amazon Kinesis completes + // the deletion. ACTIVE - The stream exists and is ready for read and write + // operations or deletion. You should perform read and write operations only + // on an ACTIVE stream. UPDATING - Shards in the stream are being merged or + // split. Read and write operations continue to work while the stream is in + // the UPDATING state. + StreamStatus *string `type:"string" required:"true" enum:"StreamStatus"` +} + +// String returns the string representation +func (s StreamDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StreamDescription) GoString() string { + return s.String() +} + +// Metadata assigned to the stream, consisting of a key-value pair. +type Tag struct { + _ struct{} `type:"structure"` + + // A unique identifier for the tag. Maximum length: 128 characters. Valid characters: + // Unicode letters, digits, white space, _ . / = + - % @ + Key *string `min:"1" type:"string" required:"true"` + + // An optional string, typically used to describe or define the tag. Maximum + // length: 256 characters. Valid characters: Unicode letters, digits, white + // space, _ . / = + - % @ + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +const ( + // @enum ShardIteratorType + ShardIteratorTypeAtSequenceNumber = "AT_SEQUENCE_NUMBER" + // @enum ShardIteratorType + ShardIteratorTypeAfterSequenceNumber = "AFTER_SEQUENCE_NUMBER" + // @enum ShardIteratorType + ShardIteratorTypeTrimHorizon = "TRIM_HORIZON" + // @enum ShardIteratorType + ShardIteratorTypeLatest = "LATEST" +) + +const ( + // @enum StreamStatus + StreamStatusCreating = "CREATING" + // @enum StreamStatus + StreamStatusDeleting = "DELETING" + // @enum StreamStatus + StreamStatusActive = "ACTIVE" + // @enum StreamStatus + StreamStatusUpdating = "UPDATING" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kinesis/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kinesis/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kinesis/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kinesis/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,337 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package kinesis_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/kinesis" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleKinesis_AddTagsToStream() { + svc := kinesis.New(session.New()) + + params := &kinesis.AddTagsToStreamInput{ + StreamName: aws.String("StreamName"), // Required + Tags: map[string]*string{ // Required + "Key": aws.String("TagValue"), // Required + // More values... + }, + } + resp, err := svc.AddTagsToStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_CreateStream() { + svc := kinesis.New(session.New()) + + params := &kinesis.CreateStreamInput{ + ShardCount: aws.Int64(1), // Required + StreamName: aws.String("StreamName"), // Required + } + resp, err := svc.CreateStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_DecreaseStreamRetentionPeriod() { + svc := kinesis.New(session.New()) + + params := &kinesis.DecreaseStreamRetentionPeriodInput{ + RetentionPeriodHours: aws.Int64(1), // Required + StreamName: aws.String("StreamName"), // Required + } + resp, err := svc.DecreaseStreamRetentionPeriod(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_DeleteStream() { + svc := kinesis.New(session.New()) + + params := &kinesis.DeleteStreamInput{ + StreamName: aws.String("StreamName"), // Required + } + resp, err := svc.DeleteStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_DescribeStream() { + svc := kinesis.New(session.New()) + + params := &kinesis.DescribeStreamInput{ + StreamName: aws.String("StreamName"), // Required + ExclusiveStartShardId: aws.String("ShardId"), + Limit: aws.Int64(1), + } + resp, err := svc.DescribeStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_GetRecords() { + svc := kinesis.New(session.New()) + + params := &kinesis.GetRecordsInput{ + ShardIterator: aws.String("ShardIterator"), // Required + Limit: aws.Int64(1), + } + resp, err := svc.GetRecords(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_GetShardIterator() { + svc := kinesis.New(session.New()) + + params := &kinesis.GetShardIteratorInput{ + ShardId: aws.String("ShardId"), // Required + ShardIteratorType: aws.String("ShardIteratorType"), // Required + StreamName: aws.String("StreamName"), // Required + StartingSequenceNumber: aws.String("SequenceNumber"), + } + resp, err := svc.GetShardIterator(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_IncreaseStreamRetentionPeriod() { + svc := kinesis.New(session.New()) + + params := &kinesis.IncreaseStreamRetentionPeriodInput{ + RetentionPeriodHours: aws.Int64(1), // Required + StreamName: aws.String("StreamName"), // Required + } + resp, err := svc.IncreaseStreamRetentionPeriod(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_ListStreams() { + svc := kinesis.New(session.New()) + + params := &kinesis.ListStreamsInput{ + ExclusiveStartStreamName: aws.String("StreamName"), + Limit: aws.Int64(1), + } + resp, err := svc.ListStreams(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_ListTagsForStream() { + svc := kinesis.New(session.New()) + + params := &kinesis.ListTagsForStreamInput{ + StreamName: aws.String("StreamName"), // Required + ExclusiveStartTagKey: aws.String("TagKey"), + Limit: aws.Int64(1), + } + resp, err := svc.ListTagsForStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_MergeShards() { + svc := kinesis.New(session.New()) + + params := &kinesis.MergeShardsInput{ + AdjacentShardToMerge: aws.String("ShardId"), // Required + ShardToMerge: aws.String("ShardId"), // Required + StreamName: aws.String("StreamName"), // Required + } + resp, err := svc.MergeShards(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_PutRecord() { + svc := kinesis.New(session.New()) + + params := &kinesis.PutRecordInput{ + Data: []byte("PAYLOAD"), // Required + PartitionKey: aws.String("PartitionKey"), // Required + StreamName: aws.String("StreamName"), // Required + ExplicitHashKey: aws.String("HashKey"), + SequenceNumberForOrdering: aws.String("SequenceNumber"), + } + resp, err := svc.PutRecord(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_PutRecords() { + svc := kinesis.New(session.New()) + + params := &kinesis.PutRecordsInput{ + Records: []*kinesis.PutRecordsRequestEntry{ // Required + { // Required + Data: []byte("PAYLOAD"), // Required + PartitionKey: aws.String("PartitionKey"), // Required + ExplicitHashKey: aws.String("HashKey"), + }, + // More values... + }, + StreamName: aws.String("StreamName"), // Required + } + resp, err := svc.PutRecords(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_RemoveTagsFromStream() { + svc := kinesis.New(session.New()) + + params := &kinesis.RemoveTagsFromStreamInput{ + StreamName: aws.String("StreamName"), // Required + TagKeys: []*string{ // Required + aws.String("TagKey"), // Required + // More values... + }, + } + resp, err := svc.RemoveTagsFromStream(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKinesis_SplitShard() { + svc := kinesis.New(session.New()) + + params := &kinesis.SplitShardInput{ + NewStartingHashKey: aws.String("HashKey"), // Required + ShardToSplit: aws.String("ShardId"), // Required + StreamName: aws.String("StreamName"), // Required + } + resp, err := svc.SplitShard(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kinesis/kinesisiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kinesis/kinesisiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kinesis/kinesisiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kinesis/kinesisiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,78 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package kinesisiface provides an interface for the Amazon Kinesis. +package kinesisiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/kinesis" +) + +// KinesisAPI is the interface type for kinesis.Kinesis. +type KinesisAPI interface { + AddTagsToStreamRequest(*kinesis.AddTagsToStreamInput) (*request.Request, *kinesis.AddTagsToStreamOutput) + + AddTagsToStream(*kinesis.AddTagsToStreamInput) (*kinesis.AddTagsToStreamOutput, error) + + CreateStreamRequest(*kinesis.CreateStreamInput) (*request.Request, *kinesis.CreateStreamOutput) + + CreateStream(*kinesis.CreateStreamInput) (*kinesis.CreateStreamOutput, error) + + DecreaseStreamRetentionPeriodRequest(*kinesis.DecreaseStreamRetentionPeriodInput) (*request.Request, *kinesis.DecreaseStreamRetentionPeriodOutput) + + DecreaseStreamRetentionPeriod(*kinesis.DecreaseStreamRetentionPeriodInput) (*kinesis.DecreaseStreamRetentionPeriodOutput, error) + + DeleteStreamRequest(*kinesis.DeleteStreamInput) (*request.Request, *kinesis.DeleteStreamOutput) + + DeleteStream(*kinesis.DeleteStreamInput) (*kinesis.DeleteStreamOutput, error) + + DescribeStreamRequest(*kinesis.DescribeStreamInput) (*request.Request, *kinesis.DescribeStreamOutput) + + DescribeStream(*kinesis.DescribeStreamInput) (*kinesis.DescribeStreamOutput, error) + + DescribeStreamPages(*kinesis.DescribeStreamInput, func(*kinesis.DescribeStreamOutput, bool) bool) error + + GetRecordsRequest(*kinesis.GetRecordsInput) (*request.Request, *kinesis.GetRecordsOutput) + + GetRecords(*kinesis.GetRecordsInput) (*kinesis.GetRecordsOutput, error) + + GetShardIteratorRequest(*kinesis.GetShardIteratorInput) (*request.Request, *kinesis.GetShardIteratorOutput) + + GetShardIterator(*kinesis.GetShardIteratorInput) (*kinesis.GetShardIteratorOutput, error) + + IncreaseStreamRetentionPeriodRequest(*kinesis.IncreaseStreamRetentionPeriodInput) (*request.Request, *kinesis.IncreaseStreamRetentionPeriodOutput) + + IncreaseStreamRetentionPeriod(*kinesis.IncreaseStreamRetentionPeriodInput) (*kinesis.IncreaseStreamRetentionPeriodOutput, error) + + ListStreamsRequest(*kinesis.ListStreamsInput) (*request.Request, *kinesis.ListStreamsOutput) + + ListStreams(*kinesis.ListStreamsInput) (*kinesis.ListStreamsOutput, error) + + ListStreamsPages(*kinesis.ListStreamsInput, func(*kinesis.ListStreamsOutput, bool) bool) error + + ListTagsForStreamRequest(*kinesis.ListTagsForStreamInput) (*request.Request, *kinesis.ListTagsForStreamOutput) + + ListTagsForStream(*kinesis.ListTagsForStreamInput) (*kinesis.ListTagsForStreamOutput, error) + + MergeShardsRequest(*kinesis.MergeShardsInput) (*request.Request, *kinesis.MergeShardsOutput) + + MergeShards(*kinesis.MergeShardsInput) (*kinesis.MergeShardsOutput, error) + + PutRecordRequest(*kinesis.PutRecordInput) (*request.Request, *kinesis.PutRecordOutput) + + PutRecord(*kinesis.PutRecordInput) (*kinesis.PutRecordOutput, error) + + PutRecordsRequest(*kinesis.PutRecordsInput) (*request.Request, *kinesis.PutRecordsOutput) + + PutRecords(*kinesis.PutRecordsInput) (*kinesis.PutRecordsOutput, error) + + RemoveTagsFromStreamRequest(*kinesis.RemoveTagsFromStreamInput) (*request.Request, *kinesis.RemoveTagsFromStreamOutput) + + RemoveTagsFromStream(*kinesis.RemoveTagsFromStreamInput) (*kinesis.RemoveTagsFromStreamOutput, error) + + SplitShardRequest(*kinesis.SplitShardInput) (*request.Request, *kinesis.SplitShardOutput) + + SplitShard(*kinesis.SplitShardInput) (*kinesis.SplitShardOutput, error) +} + +var _ KinesisAPI = (*kinesis.Kinesis)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kinesis/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kinesis/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kinesis/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kinesis/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,89 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package kinesis + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Amazon Kinesis is a managed service that scales elastically for real time +// processing of streaming big data. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type Kinesis struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "kinesis" + +// New creates a new instance of the Kinesis client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Kinesis client from just a session. +// svc := kinesis.New(mySession) +// +// // Create a Kinesis client with additional configuration +// svc := kinesis.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Kinesis { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *Kinesis { + svc := &Kinesis{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2013-12-02", + JSONVersion: "1.1", + TargetPrefix: "Kinesis_20131202", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Kinesis operation and runs any +// custom request initialization. +func (c *Kinesis) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kinesis/waiters.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kinesis/waiters.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kinesis/waiters.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kinesis/waiters.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,30 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package kinesis + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *Kinesis) WaitUntilStreamExists(input *DescribeStreamInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeStream", + Delay: 10, + MaxAttempts: 18, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "StreamDescription.StreamStatus", + Expected: "ACTIVE", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kms/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kms/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kms/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kms/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2619 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package kms provides a client for AWS Key Management Service. +package kms + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opCancelKeyDeletion = "CancelKeyDeletion" + +// CancelKeyDeletionRequest generates a request for the CancelKeyDeletion operation. +func (c *KMS) CancelKeyDeletionRequest(input *CancelKeyDeletionInput) (req *request.Request, output *CancelKeyDeletionOutput) { + op := &request.Operation{ + Name: opCancelKeyDeletion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelKeyDeletionInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelKeyDeletionOutput{} + req.Data = output + return +} + +// Cancels the deletion of a customer master key (CMK). When this operation +// is successful, the CMK is set to the Disabled state. To enable a CMK, use +// EnableKey. +// +// For more information about scheduling and canceling deletion of a CMK, go +// to Deleting Customer Master Keys (http://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html) +// in the AWS Key Management Service Developer Guide. +func (c *KMS) CancelKeyDeletion(input *CancelKeyDeletionInput) (*CancelKeyDeletionOutput, error) { + req, out := c.CancelKeyDeletionRequest(input) + err := req.Send() + return out, err +} + +const opCreateAlias = "CreateAlias" + +// CreateAliasRequest generates a request for the CreateAlias operation. +func (c *KMS) CreateAliasRequest(input *CreateAliasInput) (req *request.Request, output *CreateAliasOutput) { + op := &request.Operation{ + Name: opCreateAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAliasInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateAliasOutput{} + req.Data = output + return +} + +// Creates a display name for a customer master key. An alias can be used to +// identify a key and should be unique. The console enforces a one-to-one mapping +// between the alias and a key. An alias name can contain only alphanumeric +// characters, forward slashes (/), underscores (_), and dashes (-). An alias +// must start with the word "alias" followed by a forward slash (alias/). An +// alias that begins with "aws" after the forward slash (alias/aws...) is reserved +// by Amazon Web Services (AWS). +// +// The alias and the key it is mapped to must be in the same AWS account and +// the same region. +// +// To map an alias to a different key, call UpdateAlias. +func (c *KMS) CreateAlias(input *CreateAliasInput) (*CreateAliasOutput, error) { + req, out := c.CreateAliasRequest(input) + err := req.Send() + return out, err +} + +const opCreateGrant = "CreateGrant" + +// CreateGrantRequest generates a request for the CreateGrant operation. +func (c *KMS) CreateGrantRequest(input *CreateGrantInput) (req *request.Request, output *CreateGrantOutput) { + op := &request.Operation{ + Name: opCreateGrant, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateGrantInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateGrantOutput{} + req.Data = output + return +} + +// Adds a grant to a key to specify who can use the key and under what conditions. +// Grants are alternate permission mechanisms to key policies. +// +// For more information about grants, see Grants (http://docs.aws.amazon.com/kms/latest/developerguide/grants.html) +// in the AWS Key Management Service Developer Guide. +func (c *KMS) CreateGrant(input *CreateGrantInput) (*CreateGrantOutput, error) { + req, out := c.CreateGrantRequest(input) + err := req.Send() + return out, err +} + +const opCreateKey = "CreateKey" + +// CreateKeyRequest generates a request for the CreateKey operation. +func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, output *CreateKeyOutput) { + op := &request.Operation{ + Name: opCreateKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateKeyInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateKeyOutput{} + req.Data = output + return +} + +// Creates a customer master key. Customer master keys can be used to encrypt +// small amounts of data (less than 4K) directly, but they are most commonly +// used to encrypt or envelope data keys that are then used to encrypt customer +// data. For more information about data keys, see GenerateDataKey and GenerateDataKeyWithoutPlaintext. +func (c *KMS) CreateKey(input *CreateKeyInput) (*CreateKeyOutput, error) { + req, out := c.CreateKeyRequest(input) + err := req.Send() + return out, err +} + +const opDecrypt = "Decrypt" + +// DecryptRequest generates a request for the Decrypt operation. +func (c *KMS) DecryptRequest(input *DecryptInput) (req *request.Request, output *DecryptOutput) { + op := &request.Operation{ + Name: opDecrypt, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DecryptInput{} + } + + req = c.newRequest(op, input, output) + output = &DecryptOutput{} + req.Data = output + return +} + +// Decrypts ciphertext. Ciphertext is plaintext that has been previously encrypted +// by using any of the following functions: GenerateDataKey GenerateDataKeyWithoutPlaintext +// Encrypt +// +// Note that if a caller has been granted access permissions to all keys (through, +// for example, IAM user policies that grant Decrypt permission on all resources), +// then ciphertext encrypted by using keys in other accounts where the key grants +// access to the caller can be decrypted. To remedy this, we recommend that +// you do not grant Decrypt access in an IAM user policy. Instead grant Decrypt +// access only in key policies. If you must grant Decrypt access in an IAM user +// policy, you should scope the resource to specific keys or to specific trusted +// accounts. +func (c *KMS) Decrypt(input *DecryptInput) (*DecryptOutput, error) { + req, out := c.DecryptRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAlias = "DeleteAlias" + +// DeleteAliasRequest generates a request for the DeleteAlias operation. +func (c *KMS) DeleteAliasRequest(input *DeleteAliasInput) (req *request.Request, output *DeleteAliasOutput) { + op := &request.Operation{ + Name: opDeleteAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAliasInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteAliasOutput{} + req.Data = output + return +} + +// Deletes the specified alias. To map an alias to a different key, call UpdateAlias. +func (c *KMS) DeleteAlias(input *DeleteAliasInput) (*DeleteAliasOutput, error) { + req, out := c.DeleteAliasRequest(input) + err := req.Send() + return out, err +} + +const opDescribeKey = "DescribeKey" + +// DescribeKeyRequest generates a request for the DescribeKey operation. +func (c *KMS) DescribeKeyRequest(input *DescribeKeyInput) (req *request.Request, output *DescribeKeyOutput) { + op := &request.Operation{ + Name: opDescribeKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeKeyInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeKeyOutput{} + req.Data = output + return +} + +// Provides detailed information about the specified customer master key. +func (c *KMS) DescribeKey(input *DescribeKeyInput) (*DescribeKeyOutput, error) { + req, out := c.DescribeKeyRequest(input) + err := req.Send() + return out, err +} + +const opDisableKey = "DisableKey" + +// DisableKeyRequest generates a request for the DisableKey operation. +func (c *KMS) DisableKeyRequest(input *DisableKeyInput) (req *request.Request, output *DisableKeyOutput) { + op := &request.Operation{ + Name: opDisableKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableKeyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DisableKeyOutput{} + req.Data = output + return +} + +// Sets the state of a master key to disabled, thereby preventing its use for +// cryptographic operations. For more information about how key state affects +// the use of a master key, go to How Key State Affects the Use of a Customer +// Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the AWS Key Management Service Developer Guide. +func (c *KMS) DisableKey(input *DisableKeyInput) (*DisableKeyOutput, error) { + req, out := c.DisableKeyRequest(input) + err := req.Send() + return out, err +} + +const opDisableKeyRotation = "DisableKeyRotation" + +// DisableKeyRotationRequest generates a request for the DisableKeyRotation operation. +func (c *KMS) DisableKeyRotationRequest(input *DisableKeyRotationInput) (req *request.Request, output *DisableKeyRotationOutput) { + op := &request.Operation{ + Name: opDisableKeyRotation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableKeyRotationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DisableKeyRotationOutput{} + req.Data = output + return +} + +// Disables rotation of the specified key. +func (c *KMS) DisableKeyRotation(input *DisableKeyRotationInput) (*DisableKeyRotationOutput, error) { + req, out := c.DisableKeyRotationRequest(input) + err := req.Send() + return out, err +} + +const opEnableKey = "EnableKey" + +// EnableKeyRequest generates a request for the EnableKey operation. +func (c *KMS) EnableKeyRequest(input *EnableKeyInput) (req *request.Request, output *EnableKeyOutput) { + op := &request.Operation{ + Name: opEnableKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableKeyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &EnableKeyOutput{} + req.Data = output + return +} + +// Marks a key as enabled, thereby permitting its use. +func (c *KMS) EnableKey(input *EnableKeyInput) (*EnableKeyOutput, error) { + req, out := c.EnableKeyRequest(input) + err := req.Send() + return out, err +} + +const opEnableKeyRotation = "EnableKeyRotation" + +// EnableKeyRotationRequest generates a request for the EnableKeyRotation operation. +func (c *KMS) EnableKeyRotationRequest(input *EnableKeyRotationInput) (req *request.Request, output *EnableKeyRotationOutput) { + op := &request.Operation{ + Name: opEnableKeyRotation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableKeyRotationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &EnableKeyRotationOutput{} + req.Data = output + return +} + +// Enables rotation of the specified customer master key. +func (c *KMS) EnableKeyRotation(input *EnableKeyRotationInput) (*EnableKeyRotationOutput, error) { + req, out := c.EnableKeyRotationRequest(input) + err := req.Send() + return out, err +} + +const opEncrypt = "Encrypt" + +// EncryptRequest generates a request for the Encrypt operation. +func (c *KMS) EncryptRequest(input *EncryptInput) (req *request.Request, output *EncryptOutput) { + op := &request.Operation{ + Name: opEncrypt, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EncryptInput{} + } + + req = c.newRequest(op, input, output) + output = &EncryptOutput{} + req.Data = output + return +} + +// Encrypts plaintext into ciphertext by using a customer master key. The Encrypt +// function has two primary use cases: You can encrypt up to 4 KB of arbitrary +// data such as an RSA key, a database password, or other sensitive customer +// information. If you are moving encrypted data from one region to another, +// you can use this API to encrypt in the new region the plaintext data key +// that was used to encrypt the data in the original region. This provides you +// with an encrypted copy of the data key that can be decrypted in the new region +// and used there to decrypt the encrypted data. +// +// Unless you are moving encrypted data from one region to another, you don't +// use this function to encrypt a generated data key within a region. You retrieve +// data keys already encrypted by calling the GenerateDataKey or GenerateDataKeyWithoutPlaintext +// function. Data keys don't need to be encrypted again by calling Encrypt. +// +// If you want to encrypt data locally in your application, you can use the +// GenerateDataKey function to return a plaintext data encryption key and a +// copy of the key encrypted under the customer master key (CMK) of your choosing. +func (c *KMS) Encrypt(input *EncryptInput) (*EncryptOutput, error) { + req, out := c.EncryptRequest(input) + err := req.Send() + return out, err +} + +const opGenerateDataKey = "GenerateDataKey" + +// GenerateDataKeyRequest generates a request for the GenerateDataKey operation. +func (c *KMS) GenerateDataKeyRequest(input *GenerateDataKeyInput) (req *request.Request, output *GenerateDataKeyOutput) { + op := &request.Operation{ + Name: opGenerateDataKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GenerateDataKeyInput{} + } + + req = c.newRequest(op, input, output) + output = &GenerateDataKeyOutput{} + req.Data = output + return +} + +// Generates a data key that you can use in your application to locally encrypt +// data. This call returns a plaintext version of the key in the Plaintext field +// of the response object and an encrypted copy of the key in the CiphertextBlob +// field. The key is encrypted by using the master key specified by the KeyId +// field. To decrypt the encrypted key, pass it to the Decrypt API. +// +// We recommend that you use the following pattern to locally encrypt data: +// call the GenerateDataKey API, use the key returned in the Plaintext response +// field to locally encrypt data, and then erase the plaintext data key from +// memory. Store the encrypted data key (contained in the CiphertextBlob field) +// alongside of the locally encrypted data. +// +// You should not call the Encrypt function to re-encrypt your data keys within +// a region. GenerateDataKey always returns the data key encrypted and tied +// to the customer master key that will be used to decrypt it. There is no need +// to decrypt it twice. If you decide to use the optional EncryptionContext +// parameter, you must also store the context in full or at least store enough +// information along with the encrypted data to be able to reconstruct the context +// when submitting the ciphertext to the Decrypt API. It is a good practice +// to choose a context that you can reconstruct on the fly to better secure +// the ciphertext. For more information about how this parameter is used, see +// Encryption Context (http://docs.aws.amazon.com/kms/latest/developerguide/encrypt-context.html). +// +// To decrypt data, pass the encrypted data key to the Decrypt API. Decrypt +// uses the associated master key to decrypt the encrypted data key and returns +// it as plaintext. Use the plaintext data key to locally decrypt your data +// and then erase the key from memory. You must specify the encryption context, +// if any, that you specified when you generated the key. The encryption context +// is logged by CloudTrail, and you can use this log to help track the use of +// particular data. +func (c *KMS) GenerateDataKey(input *GenerateDataKeyInput) (*GenerateDataKeyOutput, error) { + req, out := c.GenerateDataKeyRequest(input) + err := req.Send() + return out, err +} + +const opGenerateDataKeyWithoutPlaintext = "GenerateDataKeyWithoutPlaintext" + +// GenerateDataKeyWithoutPlaintextRequest generates a request for the GenerateDataKeyWithoutPlaintext operation. +func (c *KMS) GenerateDataKeyWithoutPlaintextRequest(input *GenerateDataKeyWithoutPlaintextInput) (req *request.Request, output *GenerateDataKeyWithoutPlaintextOutput) { + op := &request.Operation{ + Name: opGenerateDataKeyWithoutPlaintext, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GenerateDataKeyWithoutPlaintextInput{} + } + + req = c.newRequest(op, input, output) + output = &GenerateDataKeyWithoutPlaintextOutput{} + req.Data = output + return +} + +// Returns a data key encrypted by a customer master key without the plaintext +// copy of that key. Otherwise, this API functions exactly like GenerateDataKey. +// You can use this API to, for example, satisfy an audit requirement that an +// encrypted key be made available without exposing the plaintext copy of that +// key. +func (c *KMS) GenerateDataKeyWithoutPlaintext(input *GenerateDataKeyWithoutPlaintextInput) (*GenerateDataKeyWithoutPlaintextOutput, error) { + req, out := c.GenerateDataKeyWithoutPlaintextRequest(input) + err := req.Send() + return out, err +} + +const opGenerateRandom = "GenerateRandom" + +// GenerateRandomRequest generates a request for the GenerateRandom operation. +func (c *KMS) GenerateRandomRequest(input *GenerateRandomInput) (req *request.Request, output *GenerateRandomOutput) { + op := &request.Operation{ + Name: opGenerateRandom, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GenerateRandomInput{} + } + + req = c.newRequest(op, input, output) + output = &GenerateRandomOutput{} + req.Data = output + return +} + +// Generates an unpredictable byte string. +func (c *KMS) GenerateRandom(input *GenerateRandomInput) (*GenerateRandomOutput, error) { + req, out := c.GenerateRandomRequest(input) + err := req.Send() + return out, err +} + +const opGetKeyPolicy = "GetKeyPolicy" + +// GetKeyPolicyRequest generates a request for the GetKeyPolicy operation. +func (c *KMS) GetKeyPolicyRequest(input *GetKeyPolicyInput) (req *request.Request, output *GetKeyPolicyOutput) { + op := &request.Operation{ + Name: opGetKeyPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetKeyPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetKeyPolicyOutput{} + req.Data = output + return +} + +// Retrieves a policy attached to the specified key. +func (c *KMS) GetKeyPolicy(input *GetKeyPolicyInput) (*GetKeyPolicyOutput, error) { + req, out := c.GetKeyPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetKeyRotationStatus = "GetKeyRotationStatus" + +// GetKeyRotationStatusRequest generates a request for the GetKeyRotationStatus operation. +func (c *KMS) GetKeyRotationStatusRequest(input *GetKeyRotationStatusInput) (req *request.Request, output *GetKeyRotationStatusOutput) { + op := &request.Operation{ + Name: opGetKeyRotationStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetKeyRotationStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &GetKeyRotationStatusOutput{} + req.Data = output + return +} + +// Retrieves a Boolean value that indicates whether key rotation is enabled +// for the specified key. +func (c *KMS) GetKeyRotationStatus(input *GetKeyRotationStatusInput) (*GetKeyRotationStatusOutput, error) { + req, out := c.GetKeyRotationStatusRequest(input) + err := req.Send() + return out, err +} + +const opListAliases = "ListAliases" + +// ListAliasesRequest generates a request for the ListAliases operation. +func (c *KMS) ListAliasesRequest(input *ListAliasesInput) (req *request.Request, output *ListAliasesOutput) { + op := &request.Operation{ + Name: opListAliases, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "Limit", + TruncationToken: "Truncated", + }, + } + + if input == nil { + input = &ListAliasesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAliasesOutput{} + req.Data = output + return +} + +// Lists all of the key aliases in the account. +func (c *KMS) ListAliases(input *ListAliasesInput) (*ListAliasesOutput, error) { + req, out := c.ListAliasesRequest(input) + err := req.Send() + return out, err +} + +func (c *KMS) ListAliasesPages(input *ListAliasesInput, fn func(p *ListAliasesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListAliasesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListAliasesOutput), lastPage) + }) +} + +const opListGrants = "ListGrants" + +// ListGrantsRequest generates a request for the ListGrants operation. +func (c *KMS) ListGrantsRequest(input *ListGrantsInput) (req *request.Request, output *ListGrantsResponse) { + op := &request.Operation{ + Name: opListGrants, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "Limit", + TruncationToken: "Truncated", + }, + } + + if input == nil { + input = &ListGrantsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListGrantsResponse{} + req.Data = output + return +} + +// List the grants for a specified key. +func (c *KMS) ListGrants(input *ListGrantsInput) (*ListGrantsResponse, error) { + req, out := c.ListGrantsRequest(input) + err := req.Send() + return out, err +} + +func (c *KMS) ListGrantsPages(input *ListGrantsInput, fn func(p *ListGrantsResponse, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListGrantsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListGrantsResponse), lastPage) + }) +} + +const opListKeyPolicies = "ListKeyPolicies" + +// ListKeyPoliciesRequest generates a request for the ListKeyPolicies operation. +func (c *KMS) ListKeyPoliciesRequest(input *ListKeyPoliciesInput) (req *request.Request, output *ListKeyPoliciesOutput) { + op := &request.Operation{ + Name: opListKeyPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "Limit", + TruncationToken: "Truncated", + }, + } + + if input == nil { + input = &ListKeyPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListKeyPoliciesOutput{} + req.Data = output + return +} + +// Retrieves a list of policies attached to a key. +func (c *KMS) ListKeyPolicies(input *ListKeyPoliciesInput) (*ListKeyPoliciesOutput, error) { + req, out := c.ListKeyPoliciesRequest(input) + err := req.Send() + return out, err +} + +func (c *KMS) ListKeyPoliciesPages(input *ListKeyPoliciesInput, fn func(p *ListKeyPoliciesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListKeyPoliciesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListKeyPoliciesOutput), lastPage) + }) +} + +const opListKeys = "ListKeys" + +// ListKeysRequest generates a request for the ListKeys operation. +func (c *KMS) ListKeysRequest(input *ListKeysInput) (req *request.Request, output *ListKeysOutput) { + op := &request.Operation{ + Name: opListKeys, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "Limit", + TruncationToken: "Truncated", + }, + } + + if input == nil { + input = &ListKeysInput{} + } + + req = c.newRequest(op, input, output) + output = &ListKeysOutput{} + req.Data = output + return +} + +// Lists the customer master keys. +func (c *KMS) ListKeys(input *ListKeysInput) (*ListKeysOutput, error) { + req, out := c.ListKeysRequest(input) + err := req.Send() + return out, err +} + +func (c *KMS) ListKeysPages(input *ListKeysInput, fn func(p *ListKeysOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListKeysRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListKeysOutput), lastPage) + }) +} + +const opListRetirableGrants = "ListRetirableGrants" + +// ListRetirableGrantsRequest generates a request for the ListRetirableGrants operation. +func (c *KMS) ListRetirableGrantsRequest(input *ListRetirableGrantsInput) (req *request.Request, output *ListGrantsResponse) { + op := &request.Operation{ + Name: opListRetirableGrants, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListRetirableGrantsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListGrantsResponse{} + req.Data = output + return +} + +// Returns a list of all grants for which the grant's RetiringPrincipal matches +// the one specified. +// +// A typical use is to list all grants that you are able to retire. To retire +// a grant, use RetireGrant. +func (c *KMS) ListRetirableGrants(input *ListRetirableGrantsInput) (*ListGrantsResponse, error) { + req, out := c.ListRetirableGrantsRequest(input) + err := req.Send() + return out, err +} + +const opPutKeyPolicy = "PutKeyPolicy" + +// PutKeyPolicyRequest generates a request for the PutKeyPolicy operation. +func (c *KMS) PutKeyPolicyRequest(input *PutKeyPolicyInput) (req *request.Request, output *PutKeyPolicyOutput) { + op := &request.Operation{ + Name: opPutKeyPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutKeyPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutKeyPolicyOutput{} + req.Data = output + return +} + +// Attaches a policy to the specified key. +func (c *KMS) PutKeyPolicy(input *PutKeyPolicyInput) (*PutKeyPolicyOutput, error) { + req, out := c.PutKeyPolicyRequest(input) + err := req.Send() + return out, err +} + +const opReEncrypt = "ReEncrypt" + +// ReEncryptRequest generates a request for the ReEncrypt operation. +func (c *KMS) ReEncryptRequest(input *ReEncryptInput) (req *request.Request, output *ReEncryptOutput) { + op := &request.Operation{ + Name: opReEncrypt, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReEncryptInput{} + } + + req = c.newRequest(op, input, output) + output = &ReEncryptOutput{} + req.Data = output + return +} + +// Encrypts data on the server side with a new customer master key without exposing +// the plaintext of the data on the client side. The data is first decrypted +// and then encrypted. This operation can also be used to change the encryption +// context of a ciphertext. +// +// Unlike other actions, ReEncrypt is authorized twice - once as ReEncryptFrom +// on the source key and once as ReEncryptTo on the destination key. We therefore +// recommend that you include the "action":"kms:ReEncrypt*" statement in your +// key policies to permit re-encryption from or to the key. The statement is +// included automatically when you authorize use of the key through the console +// but must be included manually when you set a policy by using the PutKeyPolicy +// function. +func (c *KMS) ReEncrypt(input *ReEncryptInput) (*ReEncryptOutput, error) { + req, out := c.ReEncryptRequest(input) + err := req.Send() + return out, err +} + +const opRetireGrant = "RetireGrant" + +// RetireGrantRequest generates a request for the RetireGrant operation. +func (c *KMS) RetireGrantRequest(input *RetireGrantInput) (req *request.Request, output *RetireGrantOutput) { + op := &request.Operation{ + Name: opRetireGrant, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RetireGrantInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RetireGrantOutput{} + req.Data = output + return +} + +// Retires a grant. You can retire a grant when you're done using it to clean +// up. You should revoke a grant when you intend to actively deny operations +// that depend on it. The following are permitted to call this API: The account +// that created the grant The RetiringPrincipal, if present The GranteePrincipal, +// if RetireGrant is a grantee operation The grant to retire must be identified +// by its grant token or by a combination of the key ARN and the grant ID. A +// grant token is a unique variable-length base64-encoded string. A grant ID +// is a 64 character unique identifier of a grant. Both are returned by the +// CreateGrant function. +func (c *KMS) RetireGrant(input *RetireGrantInput) (*RetireGrantOutput, error) { + req, out := c.RetireGrantRequest(input) + err := req.Send() + return out, err +} + +const opRevokeGrant = "RevokeGrant" + +// RevokeGrantRequest generates a request for the RevokeGrant operation. +func (c *KMS) RevokeGrantRequest(input *RevokeGrantInput) (req *request.Request, output *RevokeGrantOutput) { + op := &request.Operation{ + Name: opRevokeGrant, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RevokeGrantInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RevokeGrantOutput{} + req.Data = output + return +} + +// Revokes a grant. You can revoke a grant to actively deny operations that +// depend on it. +func (c *KMS) RevokeGrant(input *RevokeGrantInput) (*RevokeGrantOutput, error) { + req, out := c.RevokeGrantRequest(input) + err := req.Send() + return out, err +} + +const opScheduleKeyDeletion = "ScheduleKeyDeletion" + +// ScheduleKeyDeletionRequest generates a request for the ScheduleKeyDeletion operation. +func (c *KMS) ScheduleKeyDeletionRequest(input *ScheduleKeyDeletionInput) (req *request.Request, output *ScheduleKeyDeletionOutput) { + op := &request.Operation{ + Name: opScheduleKeyDeletion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ScheduleKeyDeletionInput{} + } + + req = c.newRequest(op, input, output) + output = &ScheduleKeyDeletionOutput{} + req.Data = output + return +} + +// Schedules the deletion of a customer master key (CMK). You may provide a +// waiting period, specified in days, before deletion occurs. If you do not +// provide a waiting period, the default period of 30 days is used. When this +// operation is successful, the state of the CMK changes to PendingDeletion. +// Before the waiting period ends, you can use CancelKeyDeletion to cancel the +// deletion of the CMK. After the waiting period ends, AWS KMS deletes the CMK +// and all AWS KMS data associated with it, including all aliases that point +// to it. +// +// Deleting a CMK is a destructive and potentially dangerous operation. When +// a CMK is deleted, all data that was encrypted under the CMK is rendered unrecoverable. +// To restrict the use of a CMK without deleting it, use DisableKey. +// +// For more information about scheduling a CMK for deletion, go to Deleting +// Customer Master Keys (http://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html) +// in the AWS Key Management Service Developer Guide. +func (c *KMS) ScheduleKeyDeletion(input *ScheduleKeyDeletionInput) (*ScheduleKeyDeletionOutput, error) { + req, out := c.ScheduleKeyDeletionRequest(input) + err := req.Send() + return out, err +} + +const opUpdateAlias = "UpdateAlias" + +// UpdateAliasRequest generates a request for the UpdateAlias operation. +func (c *KMS) UpdateAliasRequest(input *UpdateAliasInput) (req *request.Request, output *UpdateAliasOutput) { + op := &request.Operation{ + Name: opUpdateAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAliasInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateAliasOutput{} + req.Data = output + return +} + +// Updates an alias to map it to a different key. +// +// An alias is not a property of a key. Therefore, an alias can be mapped to +// and unmapped from an existing key without changing the properties of the +// key. +// +// An alias name can contain only alphanumeric characters, forward slashes +// (/), underscores (_), and dashes (-). An alias must start with the word "alias" +// followed by a forward slash (alias/). An alias that begins with "aws" after +// the forward slash (alias/aws...) is reserved by Amazon Web Services (AWS). +// +// The alias and the key it is mapped to must be in the same AWS account and +// the same region. +func (c *KMS) UpdateAlias(input *UpdateAliasInput) (*UpdateAliasOutput, error) { + req, out := c.UpdateAliasRequest(input) + err := req.Send() + return out, err +} + +const opUpdateKeyDescription = "UpdateKeyDescription" + +// UpdateKeyDescriptionRequest generates a request for the UpdateKeyDescription operation. +func (c *KMS) UpdateKeyDescriptionRequest(input *UpdateKeyDescriptionInput) (req *request.Request, output *UpdateKeyDescriptionOutput) { + op := &request.Operation{ + Name: opUpdateKeyDescription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateKeyDescriptionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateKeyDescriptionOutput{} + req.Data = output + return +} + +// Updates the description of a key. +func (c *KMS) UpdateKeyDescription(input *UpdateKeyDescriptionInput) (*UpdateKeyDescriptionOutput, error) { + req, out := c.UpdateKeyDescriptionRequest(input) + err := req.Send() + return out, err +} + +// Contains information about an alias. +type AliasListEntry struct { + _ struct{} `type:"structure"` + + // String that contains the key ARN. + AliasArn *string `min:"20" type:"string"` + + // String that contains the alias. + AliasName *string `min:"1" type:"string"` + + // String that contains the key identifier pointed to by the alias. + TargetKeyId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AliasListEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AliasListEntry) GoString() string { + return s.String() +} + +type CancelKeyDeletionInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the customer master key (CMK) for which to cancel + // deletion. + // + // To specify this value, use the unique key ID or the Amazon Resource Name + // (ARN) of the CMK. Examples: Unique key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // + // To obtain the unique key ID and key ARN for a given CMK, use ListKeys or + // DescribeKey. + KeyId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelKeyDeletionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelKeyDeletionInput) GoString() string { + return s.String() +} + +type CancelKeyDeletionOutput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the master key for which deletion is canceled. + KeyId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CancelKeyDeletionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelKeyDeletionOutput) GoString() string { + return s.String() +} + +type CreateAliasInput struct { + _ struct{} `type:"structure"` + + // String that contains the display name. The name must start with the word + // "alias" followed by a forward slash (alias/). Aliases that begin with "alias/AWS" + // are reserved. + AliasName *string `min:"1" type:"string" required:"true"` + + // An identifier of the key for which you are creating the alias. This value + // cannot be another alias but can be a globally unique identifier or a fully + // specified ARN to a key. Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + TargetKeyId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAliasInput) GoString() string { + return s.String() +} + +type CreateAliasOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAliasOutput) GoString() string { + return s.String() +} + +type CreateGrantInput struct { + _ struct{} `type:"structure"` + + // The conditions under which the operations permitted by the grant are allowed. + // + // You can use this value to allow the operations permitted by the grant only + // when a specified encryption context is present. For more information, see + // Encryption Context (http://docs.aws.amazon.com/kms/latest/developerguide/encrypt-context.html) + // in the AWS Key Management Service Developer Guide. + Constraints *GrantConstraints `type:"structure"` + + // A list of grant tokens. + // + // For more information, go to Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) + // in the AWS Key Management Service Developer Guide. + GrantTokens []*string `type:"list"` + + // The principal that is given permission to perform the operations that the + // grant permits. + // + // To specify the principal, use the Amazon Resource Name (ARN) (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // of an AWS principal. Valid AWS principals include AWS accounts (root), IAM + // users, federated users, and assumed role users. For examples of the ARN syntax + // to use for specifying a principal, see AWS Identity and Access Management + // (IAM) (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) + // in the Example ARNs section of the AWS General Reference. + GranteePrincipal *string `min:"1" type:"string" required:"true"` + + // The unique identifier for the customer master key (CMK) that the grant applies + // to. + // + // To specify this value, use the globally unique key ID or the Amazon Resource + // Name (ARN) of the key. Examples: Globally unique key ID: 12345678-1234-1234-1234-123456789012 + // Key ARN: arn:aws:kms:us-west-2:123456789012:key/12345678-1234-1234-1234-123456789012 + KeyId *string `min:"1" type:"string" required:"true"` + + // A friendly name for identifying the grant. Use this value to prevent unintended + // creation of duplicate grants when retrying this request. + // + // When this value is absent, all CreateGrant requests result in a new grant + // with a unique GrantId even if all the supplied parameters are identical. + // This can result in unintended duplicates when you retry the CreateGrant request. + // + // When this value is present, you can retry a CreateGrant request with identical + // parameters; if the grant already exists, the original GrantId is returned + // without creating a new grant. Note that the returned grant token is unique + // with every CreateGrant request, even when a duplicate GrantId is returned. + // All grant tokens obtained in this way can be used interchangeably. + Name *string `min:"1" type:"string"` + + // A list of operations that the grant permits. The list can contain any combination + // of one or more of the following values: Decrypt Encrypt GenerateDataKey + // GenerateDataKeyWithoutPlaintext ReEncryptFrom ReEncryptTo CreateGrant RetireGrant + Operations []*string `type:"list"` + + // The principal that is given permission to retire the grant by using RetireGrant + // operation. + // + // To specify the principal, use the Amazon Resource Name (ARN) (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // of an AWS principal. Valid AWS principals include AWS accounts (root), IAM + // users, federated users, and assumed role users. For examples of the ARN syntax + // to use for specifying a principal, see AWS Identity and Access Management + // (IAM) (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) + // in the Example ARNs section of the AWS General Reference. + RetiringPrincipal *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateGrantInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGrantInput) GoString() string { + return s.String() +} + +type CreateGrantOutput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the grant. + // + // You can use the GrantId in a subsequent RetireGrant or RevokeGrant operation. + GrantId *string `min:"1" type:"string"` + + // The grant token. + // + // For more information about using grant tokens, see Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) + // in the AWS Key Management Service Developer Guide. + GrantToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateGrantOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGrantOutput) GoString() string { + return s.String() +} + +type CreateKeyInput struct { + _ struct{} `type:"structure"` + + // Description of the key. We recommend that you choose a description that helps + // your customer decide whether the key is appropriate for a task. + Description *string `type:"string"` + + // Specifies the intended use of the key. Currently this defaults to ENCRYPT/DECRYPT, + // and only symmetric encryption and decryption are supported. + KeyUsage *string `type:"string" enum:"KeyUsageType"` + + // Policy to attach to the key. This is required and delegates back to the account. + // The key is the root of trust. The policy size limit is 32 KiB (32768 bytes). + Policy *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateKeyInput) GoString() string { + return s.String() +} + +type CreateKeyOutput struct { + _ struct{} `type:"structure"` + + // Metadata associated with the key. + KeyMetadata *KeyMetadata `type:"structure"` +} + +// String returns the string representation +func (s CreateKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateKeyOutput) GoString() string { + return s.String() +} + +type DecryptInput struct { + _ struct{} `type:"structure"` + + // Ciphertext to be decrypted. The blob includes metadata. + CiphertextBlob []byte `min:"1" type:"blob" required:"true"` + + // The encryption context. If this was specified in the Encrypt function, it + // must be specified here or the decryption operation will fail. For more information, + // see Encryption Context (http://docs.aws.amazon.com/kms/latest/developerguide/encrypt-context.html). + EncryptionContext map[string]*string `type:"map"` + + // A list of grant tokens. + // + // For more information, go to Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) + // in the AWS Key Management Service Developer Guide. + GrantTokens []*string `type:"list"` +} + +// String returns the string representation +func (s DecryptInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecryptInput) GoString() string { + return s.String() +} + +type DecryptOutput struct { + _ struct{} `type:"structure"` + + // ARN of the key used to perform the decryption. This value is returned if + // no errors are encountered during the operation. + KeyId *string `min:"1" type:"string"` + + // Decrypted plaintext data. This value may not be returned if the customer + // master key is not available or if you didn't have permission to use it. + Plaintext []byte `min:"1" type:"blob"` +} + +// String returns the string representation +func (s DecryptOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecryptOutput) GoString() string { + return s.String() +} + +type DeleteAliasInput struct { + _ struct{} `type:"structure"` + + // The alias to be deleted. The name must start with the word "alias" followed + // by a forward slash (alias/). Aliases that begin with "alias/AWS" are reserved. + AliasName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAliasInput) GoString() string { + return s.String() +} + +type DeleteAliasOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAliasOutput) GoString() string { + return s.String() +} + +type DescribeKeyInput struct { + _ struct{} `type:"structure"` + + // A list of grant tokens. + // + // For more information, go to Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) + // in the AWS Key Management Service Developer Guide. + GrantTokens []*string `type:"list"` + + // A unique identifier for the customer master key. This value can be a globally + // unique identifier, a fully specified ARN to either an alias or a key, or + // an alias name prefixed by "alias/". Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 Alias + // Name Example - alias/MyAliasName + KeyId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeKeyInput) GoString() string { + return s.String() +} + +type DescribeKeyOutput struct { + _ struct{} `type:"structure"` + + // Metadata associated with the key. + KeyMetadata *KeyMetadata `type:"structure"` +} + +// String returns the string representation +func (s DescribeKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeKeyOutput) GoString() string { + return s.String() +} + +type DisableKeyInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the customer master key. This value can be a globally + // unique identifier or the fully specified ARN to a key. Key ARN Example - + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + KeyId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableKeyInput) GoString() string { + return s.String() +} + +type DisableKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableKeyOutput) GoString() string { + return s.String() +} + +type DisableKeyRotationInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the customer master key. This value can be a globally + // unique identifier or the fully specified ARN to a key. Key ARN Example - + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + KeyId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableKeyRotationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableKeyRotationInput) GoString() string { + return s.String() +} + +type DisableKeyRotationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableKeyRotationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableKeyRotationOutput) GoString() string { + return s.String() +} + +type EnableKeyInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the customer master key. This value can be a globally + // unique identifier or the fully specified ARN to a key. Key ARN Example - + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + KeyId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableKeyInput) GoString() string { + return s.String() +} + +type EnableKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableKeyOutput) GoString() string { + return s.String() +} + +type EnableKeyRotationInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the customer master key. This value can be a globally + // unique identifier or the fully specified ARN to a key. Key ARN Example - + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + KeyId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableKeyRotationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableKeyRotationInput) GoString() string { + return s.String() +} + +type EnableKeyRotationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableKeyRotationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableKeyRotationOutput) GoString() string { + return s.String() +} + +type EncryptInput struct { + _ struct{} `type:"structure"` + + // Name/value pair that specifies the encryption context to be used for authenticated + // encryption. If used here, the same value must be supplied to the Decrypt + // API or decryption will fail. For more information, see Encryption Context + // (http://docs.aws.amazon.com/kms/latest/developerguide/encrypt-context.html). + EncryptionContext map[string]*string `type:"map"` + + // A list of grant tokens. + // + // For more information, go to Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) + // in the AWS Key Management Service Developer Guide. + GrantTokens []*string `type:"list"` + + // A unique identifier for the customer master key. This value can be a globally + // unique identifier, a fully specified ARN to either an alias or a key, or + // an alias name prefixed by "alias/". Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 Alias + // Name Example - alias/MyAliasName + KeyId *string `min:"1" type:"string" required:"true"` + + // Data to be encrypted. + Plaintext []byte `min:"1" type:"blob" required:"true"` +} + +// String returns the string representation +func (s EncryptInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EncryptInput) GoString() string { + return s.String() +} + +type EncryptOutput struct { + _ struct{} `type:"structure"` + + // The encrypted plaintext. If you are using the CLI, the value is Base64 encoded. + // Otherwise, it is not encoded. + CiphertextBlob []byte `min:"1" type:"blob"` + + // The ID of the key used during encryption. + KeyId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s EncryptOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EncryptOutput) GoString() string { + return s.String() +} + +type GenerateDataKeyInput struct { + _ struct{} `type:"structure"` + + // Name/value pair that contains additional data to be authenticated during + // the encryption and decryption processes that use the key. This value is logged + // by AWS CloudTrail to provide context around the data encrypted by the key. + EncryptionContext map[string]*string `type:"map"` + + // A list of grant tokens. + // + // For more information, go to Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) + // in the AWS Key Management Service Developer Guide. + GrantTokens []*string `type:"list"` + + // A unique identifier for the customer master key. This value can be a globally + // unique identifier, a fully specified ARN to either an alias or a key, or + // an alias name prefixed by "alias/". Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 Alias + // Name Example - alias/MyAliasName + KeyId *string `min:"1" type:"string" required:"true"` + + // Value that identifies the encryption algorithm and key size to generate a + // data key for. Currently this can be AES_128 or AES_256. + KeySpec *string `type:"string" enum:"DataKeySpec"` + + // Integer that contains the number of bytes to generate. Common values are + // 128, 256, 512, and 1024. 1024 is the current limit. We recommend that you + // use the KeySpec parameter instead. + NumberOfBytes *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s GenerateDataKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateDataKeyInput) GoString() string { + return s.String() +} + +type GenerateDataKeyOutput struct { + _ struct{} `type:"structure"` + + // Ciphertext that contains the encrypted data key. You must store the blob + // and enough information to reconstruct the encryption context so that the + // data encrypted by using the key can later be decrypted. You must provide + // both the ciphertext blob and the encryption context to the Decrypt API to + // recover the plaintext data key and decrypt the object. + // + // If you are using the CLI, the value is Base64 encoded. Otherwise, it is + // not encoded. + CiphertextBlob []byte `min:"1" type:"blob"` + + // System generated unique identifier of the key to be used to decrypt the encrypted + // copy of the data key. + KeyId *string `min:"1" type:"string"` + + // Plaintext that contains the data key. Use this for encryption and decryption + // and then remove it from memory as soon as possible. + Plaintext []byte `min:"1" type:"blob"` +} + +// String returns the string representation +func (s GenerateDataKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateDataKeyOutput) GoString() string { + return s.String() +} + +type GenerateDataKeyWithoutPlaintextInput struct { + _ struct{} `type:"structure"` + + // Name:value pair that contains additional data to be authenticated during + // the encryption and decryption processes. + EncryptionContext map[string]*string `type:"map"` + + // A list of grant tokens. + // + // For more information, go to Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) + // in the AWS Key Management Service Developer Guide. + GrantTokens []*string `type:"list"` + + // A unique identifier for the customer master key. This value can be a globally + // unique identifier, a fully specified ARN to either an alias or a key, or + // an alias name prefixed by "alias/". Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 Alias + // Name Example - alias/MyAliasName + KeyId *string `min:"1" type:"string" required:"true"` + + // Value that identifies the encryption algorithm and key size. Currently this + // can be AES_128 or AES_256. + KeySpec *string `type:"string" enum:"DataKeySpec"` + + // Integer that contains the number of bytes to generate. Common values are + // 128, 256, 512, 1024 and so on. We recommend that you use the KeySpec parameter + // instead. + NumberOfBytes *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s GenerateDataKeyWithoutPlaintextInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateDataKeyWithoutPlaintextInput) GoString() string { + return s.String() +} + +type GenerateDataKeyWithoutPlaintextOutput struct { + _ struct{} `type:"structure"` + + // Ciphertext that contains the wrapped data key. You must store the blob and + // encryption context so that the key can be used in a future decrypt operation. + // + // If you are using the CLI, the value is Base64 encoded. Otherwise, it is + // not encoded. + CiphertextBlob []byte `min:"1" type:"blob"` + + // System generated unique identifier of the key to be used to decrypt the encrypted + // copy of the data key. + KeyId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GenerateDataKeyWithoutPlaintextOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateDataKeyWithoutPlaintextOutput) GoString() string { + return s.String() +} + +type GenerateRandomInput struct { + _ struct{} `type:"structure"` + + // Integer that contains the number of bytes to generate. Common values are + // 128, 256, 512, 1024 and so on. The current limit is 1024 bytes. + NumberOfBytes *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s GenerateRandomInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateRandomInput) GoString() string { + return s.String() +} + +type GenerateRandomOutput struct { + _ struct{} `type:"structure"` + + // Plaintext that contains the unpredictable byte string. + Plaintext []byte `min:"1" type:"blob"` +} + +// String returns the string representation +func (s GenerateRandomOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateRandomOutput) GoString() string { + return s.String() +} + +type GetKeyPolicyInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the customer master key. This value can be a globally + // unique identifier or the fully specified ARN to a key. Key ARN Example - + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + KeyId *string `min:"1" type:"string" required:"true"` + + // String that contains the name of the policy. Currently, this must be "default". + // Policy names can be discovered by calling ListKeyPolicies. + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetKeyPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetKeyPolicyInput) GoString() string { + return s.String() +} + +type GetKeyPolicyOutput struct { + _ struct{} `type:"structure"` + + // A policy document in JSON format. + Policy *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetKeyPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetKeyPolicyOutput) GoString() string { + return s.String() +} + +type GetKeyRotationStatusInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the customer master key. This value can be a globally + // unique identifier or the fully specified ARN to a key. Key ARN Example - + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + KeyId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetKeyRotationStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetKeyRotationStatusInput) GoString() string { + return s.String() +} + +type GetKeyRotationStatusOutput struct { + _ struct{} `type:"structure"` + + // A Boolean value that specifies whether key rotation is enabled. + KeyRotationEnabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s GetKeyRotationStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetKeyRotationStatusOutput) GoString() string { + return s.String() +} + +// A structure for specifying the conditions under which the operations permitted +// by the grant are allowed. +// +// You can use this structure to allow the operations permitted by the grant +// only when a specified encryption context is present. For more information +// about encryption context, see Encryption Context (http://docs.aws.amazon.com/kms/latest/developerguide/encrypt-context.html) +// in the AWS Key Management Service Developer Guide. +type GrantConstraints struct { + _ struct{} `type:"structure"` + + // Contains a list of key-value pairs that must be present in the encryption + // context of a subsequent operation permitted by the grant. When a subsequent + // operation permitted by the grant includes an encryption context that matches + // this list, the grant allows the operation. Otherwise, the operation is not + // allowed. + EncryptionContextEquals map[string]*string `type:"map"` + + // Contains a list of key-value pairs, a subset of which must be present in + // the encryption context of a subsequent operation permitted by the grant. + // When a subsequent operation permitted by the grant includes an encryption + // context that matches this list or is a subset of this list, the grant allows + // the operation. Otherwise, the operation is not allowed. + EncryptionContextSubset map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GrantConstraints) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrantConstraints) GoString() string { + return s.String() +} + +// Contains information about an entry in a list of grants. +type GrantListEntry struct { + _ struct{} `type:"structure"` + + // The conditions under which the grant's operations are allowed. + Constraints *GrantConstraints `type:"structure"` + + // The date and time when the grant was created. + CreationDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The unique identifier for the grant. + GrantId *string `min:"1" type:"string"` + + // The principal that receives the grant's permissions. + GranteePrincipal *string `min:"1" type:"string"` + + // The AWS account under which the grant was issued. + IssuingAccount *string `min:"1" type:"string"` + + // The unique identifier for the customer master key (CMK) to which the grant + // applies. + KeyId *string `min:"1" type:"string"` + + // The friendly name that identifies the grant. If a name was provided in the + // CreateGrant request, that name is returned. Otherwise this value is null. + Name *string `min:"1" type:"string"` + + // The list of operations permitted by the grant. + Operations []*string `type:"list"` + + // The principal that can retire the grant. + RetiringPrincipal *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GrantListEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrantListEntry) GoString() string { + return s.String() +} + +// Contains information about each entry in the key list. +type KeyListEntry struct { + _ struct{} `type:"structure"` + + // ARN of the key. + KeyArn *string `min:"20" type:"string"` + + // Unique identifier of the key. + KeyId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s KeyListEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyListEntry) GoString() string { + return s.String() +} + +// Contains metadata about a customer master key (CMK). +// +// This data type is used as a response element for the CreateKey and DescribeKey +// operations. +type KeyMetadata struct { + _ struct{} `type:"structure"` + + // The twelve-digit account ID of the AWS account that owns the key. + AWSAccountId *string `type:"string"` + + // The Amazon Resource Name (ARN) of the key. For examples, see AWS Key Management + // Service (AWS KMS) (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kms) + // in the Example ARNs section of the AWS General Reference. + Arn *string `min:"20" type:"string"` + + // The date and time when the key was created. + CreationDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The date and time after which AWS KMS deletes the customer master key (CMK). + // This value is present only when KeyState is PendingDeletion, otherwise this + // value is null. + DeletionDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The friendly description of the key. + Description *string `type:"string"` + + // Specifies whether the key is enabled. When KeyState is Enabled this value + // is true, otherwise it is false. + Enabled *bool `type:"boolean"` + + // The globally unique identifier for the key. + KeyId *string `min:"1" type:"string" required:"true"` + + // The state of the customer master key (CMK). + // + // For more information about how key state affects the use of a CMK, go to + // How Key State Affects the Use of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) + // in the AWS Key Management Service Developer Guide. + KeyState *string `type:"string" enum:"KeyState"` + + // The cryptographic operations for which you can use the key. Currently the + // only allowed value is ENCRYPT_DECRYPT, which means you can use the key for + // the Encrypt and Decrypt operations. + KeyUsage *string `type:"string" enum:"KeyUsageType"` +} + +// String returns the string representation +func (s KeyMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyMetadata) GoString() string { + return s.String() +} + +type ListAliasesInput struct { + _ struct{} `type:"structure"` + + // When paginating results, specify the maximum number of items to return in + // the response. If additional items exist beyond the number you specify, the + // Truncated element in the response is set to true. + // + // This value is optional. If you include a value, it must be between 1 and + // 100, inclusive. If you do not include a value, it defaults to 50. + Limit *int64 `min:"1" type:"integer"` + + // Use this parameter only when paginating results and only in a subsequent + // request after you've received a response with truncated results. Set it to + // the value of NextMarker from the response you just received. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAliasesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAliasesInput) GoString() string { + return s.String() +} + +type ListAliasesOutput struct { + _ struct{} `type:"structure"` + + // A list of key aliases in the user's account. + Aliases []*AliasListEntry `type:"list"` + + // When Truncated is true, this value is present and contains the value to use + // for the Marker parameter in a subsequent pagination request. + NextMarker *string `min:"1" type:"string"` + + // A flag that indicates whether there are more items in the list. If your results + // were truncated, you can use the Marker parameter to make a subsequent pagination + // request to retrieve more items in the list. + Truncated *bool `type:"boolean"` +} + +// String returns the string representation +func (s ListAliasesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAliasesOutput) GoString() string { + return s.String() +} + +type ListGrantsInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the customer master key. This value can be a globally + // unique identifier or the fully specified ARN to a key. Key ARN Example - + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + KeyId *string `min:"1" type:"string" required:"true"` + + // When paginating results, specify the maximum number of items to return in + // the response. If additional items exist beyond the number you specify, the + // Truncated element in the response is set to true. + // + // This value is optional. If you include a value, it must be between 1 and + // 100, inclusive. If you do not include a value, it defaults to 50. + Limit *int64 `min:"1" type:"integer"` + + // Use this parameter only when paginating results and only in a subsequent + // request after you've received a response with truncated results. Set it to + // the value of NextMarker from the response you just received. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListGrantsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGrantsInput) GoString() string { + return s.String() +} + +type ListGrantsResponse struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*GrantListEntry `type:"list"` + + // When Truncated is true, this value is present and contains the value to use + // for the Marker parameter in a subsequent pagination request. + NextMarker *string `min:"1" type:"string"` + + // A flag that indicates whether there are more items in the list. If your results + // were truncated, you can use the Marker parameter to make a subsequent pagination + // request to retrieve more items in the list. + Truncated *bool `type:"boolean"` +} + +// String returns the string representation +func (s ListGrantsResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGrantsResponse) GoString() string { + return s.String() +} + +type ListKeyPoliciesInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the customer master key. This value can be a globally + // unique identifier, a fully specified ARN to either an alias or a key, or + // an alias name prefixed by "alias/". Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 Alias + // Name Example - alias/MyAliasName + KeyId *string `min:"1" type:"string" required:"true"` + + // When paginating results, specify the maximum number of items to return in + // the response. If additional items exist beyond the number you specify, the + // Truncated element in the response is set to true. + // + // This value is optional. If you include a value, it must be between 1 and + // 1000, inclusive. If you do not include a value, it defaults to 100. + // + // Currently only 1 policy can be attached to a key. + Limit *int64 `min:"1" type:"integer"` + + // Use this parameter only when paginating results and only in a subsequent + // request after you've received a response with truncated results. Set it to + // the value of NextMarker from the response you just received. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListKeyPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListKeyPoliciesInput) GoString() string { + return s.String() +} + +type ListKeyPoliciesOutput struct { + _ struct{} `type:"structure"` + + // When Truncated is true, this value is present and contains the value to use + // for the Marker parameter in a subsequent pagination request. + NextMarker *string `min:"1" type:"string"` + + // A list of policy names. Currently, there is only one policy and it is named + // "Default". + PolicyNames []*string `type:"list"` + + // A flag that indicates whether there are more items in the list. If your results + // were truncated, you can use the Marker parameter to make a subsequent pagination + // request to retrieve more items in the list. + Truncated *bool `type:"boolean"` +} + +// String returns the string representation +func (s ListKeyPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListKeyPoliciesOutput) GoString() string { + return s.String() +} + +type ListKeysInput struct { + _ struct{} `type:"structure"` + + // When paginating results, specify the maximum number of items to return in + // the response. If additional items exist beyond the number you specify, the + // Truncated element in the response is set to true. + // + // This value is optional. If you include a value, it must be between 1 and + // 1000, inclusive. If you do not include a value, it defaults to 100. + Limit *int64 `min:"1" type:"integer"` + + // Use this parameter only when paginating results and only in a subsequent + // request after you've received a response with truncated results. Set it to + // the value of NextMarker from the response you just received. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListKeysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListKeysInput) GoString() string { + return s.String() +} + +type ListKeysOutput struct { + _ struct{} `type:"structure"` + + // A list of keys. + Keys []*KeyListEntry `type:"list"` + + // When Truncated is true, this value is present and contains the value to use + // for the Marker parameter in a subsequent pagination request. + NextMarker *string `min:"1" type:"string"` + + // A flag that indicates whether there are more items in the list. If your results + // were truncated, you can use the Marker parameter to make a subsequent pagination + // request to retrieve more items in the list. + Truncated *bool `type:"boolean"` +} + +// String returns the string representation +func (s ListKeysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListKeysOutput) GoString() string { + return s.String() +} + +type ListRetirableGrantsInput struct { + _ struct{} `type:"structure"` + + // When paginating results, specify the maximum number of items to return in + // the response. If additional items exist beyond the number you specify, the + // Truncated element in the response is set to true. + // + // This value is optional. If you include a value, it must be between 1 and + // 100, inclusive. If you do not include a value, it defaults to 50. + Limit *int64 `min:"1" type:"integer"` + + // Use this parameter only when paginating results and only in a subsequent + // request after you've received a response with truncated results. Set it to + // the value of NextMarker from the response you just received. + Marker *string `min:"1" type:"string"` + + // The retiring principal for which to list grants. + // + // To specify the retiring principal, use the Amazon Resource Name (ARN) (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // of an AWS principal. Valid AWS principals include AWS accounts (root), IAM + // users, federated users, and assumed role users. For examples of the ARN syntax + // for specifying a principal, go to AWS Identity and Access Management (IAM) + // (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) + // in the Example ARNs section of the Amazon Web Services General Reference. + RetiringPrincipal *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListRetirableGrantsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRetirableGrantsInput) GoString() string { + return s.String() +} + +type PutKeyPolicyInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the customer master key. This value can be a globally + // unique identifier or the fully specified ARN to a key. Key ARN Example - + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + KeyId *string `min:"1" type:"string" required:"true"` + + // The policy to attach to the key. This is required and delegates back to the + // account. The key is the root of trust. The policy size limit is 32 KiB (32768 + // bytes). + Policy *string `min:"1" type:"string" required:"true"` + + // Name of the policy to be attached. Currently, the only supported name is + // "default". + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutKeyPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutKeyPolicyInput) GoString() string { + return s.String() +} + +type PutKeyPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutKeyPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutKeyPolicyOutput) GoString() string { + return s.String() +} + +type ReEncryptInput struct { + _ struct{} `type:"structure"` + + // Ciphertext of the data to re-encrypt. + CiphertextBlob []byte `min:"1" type:"blob" required:"true"` + + // Encryption context to be used when the data is re-encrypted. + DestinationEncryptionContext map[string]*string `type:"map"` + + // A unique identifier for the customer master key used to re-encrypt the data. + // This value can be a globally unique identifier, a fully specified ARN to + // either an alias or a key, or an alias name prefixed by "alias/". Key ARN + // Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 Alias + // Name Example - alias/MyAliasName + DestinationKeyId *string `min:"1" type:"string" required:"true"` + + // A list of grant tokens. + // + // For more information, go to Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) + // in the AWS Key Management Service Developer Guide. + GrantTokens []*string `type:"list"` + + // Encryption context used to encrypt and decrypt the data specified in the + // CiphertextBlob parameter. + SourceEncryptionContext map[string]*string `type:"map"` +} + +// String returns the string representation +func (s ReEncryptInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReEncryptInput) GoString() string { + return s.String() +} + +type ReEncryptOutput struct { + _ struct{} `type:"structure"` + + // The re-encrypted data. If you are using the CLI, the value is Base64 encoded. + // Otherwise, it is not encoded. + CiphertextBlob []byte `min:"1" type:"blob"` + + // Unique identifier of the key used to re-encrypt the data. + KeyId *string `min:"1" type:"string"` + + // Unique identifier of the key used to originally encrypt the data. + SourceKeyId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ReEncryptOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReEncryptOutput) GoString() string { + return s.String() +} + +type RetireGrantInput struct { + _ struct{} `type:"structure"` + + // Unique identifier of the grant to be retired. The grant ID is returned by + // the CreateGrant function. Grant ID Example - 0123456789012345678901234567890123456789012345678901234567890123 + GrantId *string `min:"1" type:"string"` + + // Token that identifies the grant to be retired. + GrantToken *string `min:"1" type:"string"` + + // A unique identifier for the customer master key associated with the grant. + // This value can be a globally unique identifier or a fully specified ARN of + // the key. Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + KeyId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s RetireGrantInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RetireGrantInput) GoString() string { + return s.String() +} + +type RetireGrantOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RetireGrantOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RetireGrantOutput) GoString() string { + return s.String() +} + +type RevokeGrantInput struct { + _ struct{} `type:"structure"` + + // Identifier of the grant to be revoked. + GrantId *string `min:"1" type:"string" required:"true"` + + // A unique identifier for the customer master key associated with the grant. + // This value can be a globally unique identifier or the fully specified ARN + // to a key. Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + KeyId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RevokeGrantInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeGrantInput) GoString() string { + return s.String() +} + +type RevokeGrantOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RevokeGrantOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeGrantOutput) GoString() string { + return s.String() +} + +type ScheduleKeyDeletionInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the customer master key (CMK) to delete. + // + // To specify this value, use the unique key ID or the Amazon Resource Name + // (ARN) of the CMK. Examples: Unique key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // + // To obtain the unique key ID and key ARN for a given CMK, use ListKeys or + // DescribeKey. + KeyId *string `min:"1" type:"string" required:"true"` + + // The waiting period, specified in number of days. After the waiting period + // ends, AWS KMS deletes the customer master key (CMK). + // + // This value is optional. If you include a value, it must be between 7 and + // 30, inclusive. If you do not include a value, it defaults to 30. + PendingWindowInDays *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ScheduleKeyDeletionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduleKeyDeletionInput) GoString() string { + return s.String() +} + +type ScheduleKeyDeletionOutput struct { + _ struct{} `type:"structure"` + + // The date and time after which AWS KMS deletes the customer master key (CMK). + DeletionDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The unique identifier of the customer master key (CMK) for which deletion + // is scheduled. + KeyId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ScheduleKeyDeletionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduleKeyDeletionOutput) GoString() string { + return s.String() +} + +type UpdateAliasInput struct { + _ struct{} `type:"structure"` + + // String that contains the name of the alias to be modified. The name must + // start with the word "alias" followed by a forward slash (alias/). Aliases + // that begin with "alias/aws" are reserved. + AliasName *string `min:"1" type:"string" required:"true"` + + // Unique identifier of the customer master key to be mapped to the alias. This + // value can be a globally unique identifier or the fully specified ARN of a + // key. Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + // + // You can call ListAliases to verify that the alias is mapped to the correct + // TargetKeyId. + TargetKeyId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAliasInput) GoString() string { + return s.String() +} + +type UpdateAliasOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAliasOutput) GoString() string { + return s.String() +} + +type UpdateKeyDescriptionInput struct { + _ struct{} `type:"structure"` + + // New description for the key. + Description *string `type:"string" required:"true"` + + // A unique identifier for the customer master key. This value can be a globally + // unique identifier or the fully specified ARN to a key. Key ARN Example - + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + KeyId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateKeyDescriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateKeyDescriptionInput) GoString() string { + return s.String() +} + +type UpdateKeyDescriptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateKeyDescriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateKeyDescriptionOutput) GoString() string { + return s.String() +} + +const ( + // @enum DataKeySpec + DataKeySpecAes256 = "AES_256" + // @enum DataKeySpec + DataKeySpecAes128 = "AES_128" +) + +const ( + // @enum GrantOperation + GrantOperationDecrypt = "Decrypt" + // @enum GrantOperation + GrantOperationEncrypt = "Encrypt" + // @enum GrantOperation + GrantOperationGenerateDataKey = "GenerateDataKey" + // @enum GrantOperation + GrantOperationGenerateDataKeyWithoutPlaintext = "GenerateDataKeyWithoutPlaintext" + // @enum GrantOperation + GrantOperationReEncryptFrom = "ReEncryptFrom" + // @enum GrantOperation + GrantOperationReEncryptTo = "ReEncryptTo" + // @enum GrantOperation + GrantOperationCreateGrant = "CreateGrant" + // @enum GrantOperation + GrantOperationRetireGrant = "RetireGrant" + // @enum GrantOperation + GrantOperationDescribeKey = "DescribeKey" +) + +const ( + // @enum KeyState + KeyStateEnabled = "Enabled" + // @enum KeyState + KeyStateDisabled = "Disabled" + // @enum KeyState + KeyStatePendingDeletion = "PendingDeletion" +) + +const ( + // @enum KeyUsageType + KeyUsageTypeEncryptDecrypt = "ENCRYPT_DECRYPT" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kms/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kms/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kms/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kms/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,662 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package kms_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/kms" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleKMS_CancelKeyDeletion() { + svc := kms.New(session.New()) + + params := &kms.CancelKeyDeletionInput{ + KeyId: aws.String("KeyIdType"), // Required + } + resp, err := svc.CancelKeyDeletion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_CreateAlias() { + svc := kms.New(session.New()) + + params := &kms.CreateAliasInput{ + AliasName: aws.String("AliasNameType"), // Required + TargetKeyId: aws.String("KeyIdType"), // Required + } + resp, err := svc.CreateAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_CreateGrant() { + svc := kms.New(session.New()) + + params := &kms.CreateGrantInput{ + GranteePrincipal: aws.String("PrincipalIdType"), // Required + KeyId: aws.String("KeyIdType"), // Required + Constraints: &kms.GrantConstraints{ + EncryptionContextEquals: map[string]*string{ + "Key": aws.String("EncryptionContextValue"), // Required + // More values... + }, + EncryptionContextSubset: map[string]*string{ + "Key": aws.String("EncryptionContextValue"), // Required + // More values... + }, + }, + GrantTokens: []*string{ + aws.String("GrantTokenType"), // Required + // More values... + }, + Name: aws.String("GrantNameType"), + Operations: []*string{ + aws.String("GrantOperation"), // Required + // More values... + }, + RetiringPrincipal: aws.String("PrincipalIdType"), + } + resp, err := svc.CreateGrant(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_CreateKey() { + svc := kms.New(session.New()) + + params := &kms.CreateKeyInput{ + Description: aws.String("DescriptionType"), + KeyUsage: aws.String("KeyUsageType"), + Policy: aws.String("PolicyType"), + } + resp, err := svc.CreateKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_Decrypt() { + svc := kms.New(session.New()) + + params := &kms.DecryptInput{ + CiphertextBlob: []byte("PAYLOAD"), // Required + EncryptionContext: map[string]*string{ + "Key": aws.String("EncryptionContextValue"), // Required + // More values... + }, + GrantTokens: []*string{ + aws.String("GrantTokenType"), // Required + // More values... + }, + } + resp, err := svc.Decrypt(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_DeleteAlias() { + svc := kms.New(session.New()) + + params := &kms.DeleteAliasInput{ + AliasName: aws.String("AliasNameType"), // Required + } + resp, err := svc.DeleteAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_DescribeKey() { + svc := kms.New(session.New()) + + params := &kms.DescribeKeyInput{ + KeyId: aws.String("KeyIdType"), // Required + GrantTokens: []*string{ + aws.String("GrantTokenType"), // Required + // More values... + }, + } + resp, err := svc.DescribeKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_DisableKey() { + svc := kms.New(session.New()) + + params := &kms.DisableKeyInput{ + KeyId: aws.String("KeyIdType"), // Required + } + resp, err := svc.DisableKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_DisableKeyRotation() { + svc := kms.New(session.New()) + + params := &kms.DisableKeyRotationInput{ + KeyId: aws.String("KeyIdType"), // Required + } + resp, err := svc.DisableKeyRotation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_EnableKey() { + svc := kms.New(session.New()) + + params := &kms.EnableKeyInput{ + KeyId: aws.String("KeyIdType"), // Required + } + resp, err := svc.EnableKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_EnableKeyRotation() { + svc := kms.New(session.New()) + + params := &kms.EnableKeyRotationInput{ + KeyId: aws.String("KeyIdType"), // Required + } + resp, err := svc.EnableKeyRotation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_Encrypt() { + svc := kms.New(session.New()) + + params := &kms.EncryptInput{ + KeyId: aws.String("KeyIdType"), // Required + Plaintext: []byte("PAYLOAD"), // Required + EncryptionContext: map[string]*string{ + "Key": aws.String("EncryptionContextValue"), // Required + // More values... + }, + GrantTokens: []*string{ + aws.String("GrantTokenType"), // Required + // More values... + }, + } + resp, err := svc.Encrypt(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_GenerateDataKey() { + svc := kms.New(session.New()) + + params := &kms.GenerateDataKeyInput{ + KeyId: aws.String("KeyIdType"), // Required + EncryptionContext: map[string]*string{ + "Key": aws.String("EncryptionContextValue"), // Required + // More values... + }, + GrantTokens: []*string{ + aws.String("GrantTokenType"), // Required + // More values... + }, + KeySpec: aws.String("DataKeySpec"), + NumberOfBytes: aws.Int64(1), + } + resp, err := svc.GenerateDataKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_GenerateDataKeyWithoutPlaintext() { + svc := kms.New(session.New()) + + params := &kms.GenerateDataKeyWithoutPlaintextInput{ + KeyId: aws.String("KeyIdType"), // Required + EncryptionContext: map[string]*string{ + "Key": aws.String("EncryptionContextValue"), // Required + // More values... + }, + GrantTokens: []*string{ + aws.String("GrantTokenType"), // Required + // More values... + }, + KeySpec: aws.String("DataKeySpec"), + NumberOfBytes: aws.Int64(1), + } + resp, err := svc.GenerateDataKeyWithoutPlaintext(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_GenerateRandom() { + svc := kms.New(session.New()) + + params := &kms.GenerateRandomInput{ + NumberOfBytes: aws.Int64(1), + } + resp, err := svc.GenerateRandom(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_GetKeyPolicy() { + svc := kms.New(session.New()) + + params := &kms.GetKeyPolicyInput{ + KeyId: aws.String("KeyIdType"), // Required + PolicyName: aws.String("PolicyNameType"), // Required + } + resp, err := svc.GetKeyPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_GetKeyRotationStatus() { + svc := kms.New(session.New()) + + params := &kms.GetKeyRotationStatusInput{ + KeyId: aws.String("KeyIdType"), // Required + } + resp, err := svc.GetKeyRotationStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_ListAliases() { + svc := kms.New(session.New()) + + params := &kms.ListAliasesInput{ + Limit: aws.Int64(1), + Marker: aws.String("MarkerType"), + } + resp, err := svc.ListAliases(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_ListGrants() { + svc := kms.New(session.New()) + + params := &kms.ListGrantsInput{ + KeyId: aws.String("KeyIdType"), // Required + Limit: aws.Int64(1), + Marker: aws.String("MarkerType"), + } + resp, err := svc.ListGrants(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_ListKeyPolicies() { + svc := kms.New(session.New()) + + params := &kms.ListKeyPoliciesInput{ + KeyId: aws.String("KeyIdType"), // Required + Limit: aws.Int64(1), + Marker: aws.String("MarkerType"), + } + resp, err := svc.ListKeyPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_ListKeys() { + svc := kms.New(session.New()) + + params := &kms.ListKeysInput{ + Limit: aws.Int64(1), + Marker: aws.String("MarkerType"), + } + resp, err := svc.ListKeys(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_ListRetirableGrants() { + svc := kms.New(session.New()) + + params := &kms.ListRetirableGrantsInput{ + RetiringPrincipal: aws.String("PrincipalIdType"), // Required + Limit: aws.Int64(1), + Marker: aws.String("MarkerType"), + } + resp, err := svc.ListRetirableGrants(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_PutKeyPolicy() { + svc := kms.New(session.New()) + + params := &kms.PutKeyPolicyInput{ + KeyId: aws.String("KeyIdType"), // Required + Policy: aws.String("PolicyType"), // Required + PolicyName: aws.String("PolicyNameType"), // Required + } + resp, err := svc.PutKeyPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_ReEncrypt() { + svc := kms.New(session.New()) + + params := &kms.ReEncryptInput{ + CiphertextBlob: []byte("PAYLOAD"), // Required + DestinationKeyId: aws.String("KeyIdType"), // Required + DestinationEncryptionContext: map[string]*string{ + "Key": aws.String("EncryptionContextValue"), // Required + // More values... + }, + GrantTokens: []*string{ + aws.String("GrantTokenType"), // Required + // More values... + }, + SourceEncryptionContext: map[string]*string{ + "Key": aws.String("EncryptionContextValue"), // Required + // More values... + }, + } + resp, err := svc.ReEncrypt(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_RetireGrant() { + svc := kms.New(session.New()) + + params := &kms.RetireGrantInput{ + GrantId: aws.String("GrantIdType"), + GrantToken: aws.String("GrantTokenType"), + KeyId: aws.String("KeyIdType"), + } + resp, err := svc.RetireGrant(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_RevokeGrant() { + svc := kms.New(session.New()) + + params := &kms.RevokeGrantInput{ + GrantId: aws.String("GrantIdType"), // Required + KeyId: aws.String("KeyIdType"), // Required + } + resp, err := svc.RevokeGrant(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_ScheduleKeyDeletion() { + svc := kms.New(session.New()) + + params := &kms.ScheduleKeyDeletionInput{ + KeyId: aws.String("KeyIdType"), // Required + PendingWindowInDays: aws.Int64(1), + } + resp, err := svc.ScheduleKeyDeletion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_UpdateAlias() { + svc := kms.New(session.New()) + + params := &kms.UpdateAliasInput{ + AliasName: aws.String("AliasNameType"), // Required + TargetKeyId: aws.String("KeyIdType"), // Required + } + resp, err := svc.UpdateAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleKMS_UpdateKeyDescription() { + svc := kms.New(session.New()) + + params := &kms.UpdateKeyDescriptionInput{ + Description: aws.String("DescriptionType"), // Required + KeyId: aws.String("KeyIdType"), // Required + } + resp, err := svc.UpdateKeyDescription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kms/kmsiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kms/kmsiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kms/kmsiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kms/kmsiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,138 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package kmsiface provides an interface for the AWS Key Management Service. +package kmsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/kms" +) + +// KMSAPI is the interface type for kms.KMS. +type KMSAPI interface { + CancelKeyDeletionRequest(*kms.CancelKeyDeletionInput) (*request.Request, *kms.CancelKeyDeletionOutput) + + CancelKeyDeletion(*kms.CancelKeyDeletionInput) (*kms.CancelKeyDeletionOutput, error) + + CreateAliasRequest(*kms.CreateAliasInput) (*request.Request, *kms.CreateAliasOutput) + + CreateAlias(*kms.CreateAliasInput) (*kms.CreateAliasOutput, error) + + CreateGrantRequest(*kms.CreateGrantInput) (*request.Request, *kms.CreateGrantOutput) + + CreateGrant(*kms.CreateGrantInput) (*kms.CreateGrantOutput, error) + + CreateKeyRequest(*kms.CreateKeyInput) (*request.Request, *kms.CreateKeyOutput) + + CreateKey(*kms.CreateKeyInput) (*kms.CreateKeyOutput, error) + + DecryptRequest(*kms.DecryptInput) (*request.Request, *kms.DecryptOutput) + + Decrypt(*kms.DecryptInput) (*kms.DecryptOutput, error) + + DeleteAliasRequest(*kms.DeleteAliasInput) (*request.Request, *kms.DeleteAliasOutput) + + DeleteAlias(*kms.DeleteAliasInput) (*kms.DeleteAliasOutput, error) + + DescribeKeyRequest(*kms.DescribeKeyInput) (*request.Request, *kms.DescribeKeyOutput) + + DescribeKey(*kms.DescribeKeyInput) (*kms.DescribeKeyOutput, error) + + DisableKeyRequest(*kms.DisableKeyInput) (*request.Request, *kms.DisableKeyOutput) + + DisableKey(*kms.DisableKeyInput) (*kms.DisableKeyOutput, error) + + DisableKeyRotationRequest(*kms.DisableKeyRotationInput) (*request.Request, *kms.DisableKeyRotationOutput) + + DisableKeyRotation(*kms.DisableKeyRotationInput) (*kms.DisableKeyRotationOutput, error) + + EnableKeyRequest(*kms.EnableKeyInput) (*request.Request, *kms.EnableKeyOutput) + + EnableKey(*kms.EnableKeyInput) (*kms.EnableKeyOutput, error) + + EnableKeyRotationRequest(*kms.EnableKeyRotationInput) (*request.Request, *kms.EnableKeyRotationOutput) + + EnableKeyRotation(*kms.EnableKeyRotationInput) (*kms.EnableKeyRotationOutput, error) + + EncryptRequest(*kms.EncryptInput) (*request.Request, *kms.EncryptOutput) + + Encrypt(*kms.EncryptInput) (*kms.EncryptOutput, error) + + GenerateDataKeyRequest(*kms.GenerateDataKeyInput) (*request.Request, *kms.GenerateDataKeyOutput) + + GenerateDataKey(*kms.GenerateDataKeyInput) (*kms.GenerateDataKeyOutput, error) + + GenerateDataKeyWithoutPlaintextRequest(*kms.GenerateDataKeyWithoutPlaintextInput) (*request.Request, *kms.GenerateDataKeyWithoutPlaintextOutput) + + GenerateDataKeyWithoutPlaintext(*kms.GenerateDataKeyWithoutPlaintextInput) (*kms.GenerateDataKeyWithoutPlaintextOutput, error) + + GenerateRandomRequest(*kms.GenerateRandomInput) (*request.Request, *kms.GenerateRandomOutput) + + GenerateRandom(*kms.GenerateRandomInput) (*kms.GenerateRandomOutput, error) + + GetKeyPolicyRequest(*kms.GetKeyPolicyInput) (*request.Request, *kms.GetKeyPolicyOutput) + + GetKeyPolicy(*kms.GetKeyPolicyInput) (*kms.GetKeyPolicyOutput, error) + + GetKeyRotationStatusRequest(*kms.GetKeyRotationStatusInput) (*request.Request, *kms.GetKeyRotationStatusOutput) + + GetKeyRotationStatus(*kms.GetKeyRotationStatusInput) (*kms.GetKeyRotationStatusOutput, error) + + ListAliasesRequest(*kms.ListAliasesInput) (*request.Request, *kms.ListAliasesOutput) + + ListAliases(*kms.ListAliasesInput) (*kms.ListAliasesOutput, error) + + ListAliasesPages(*kms.ListAliasesInput, func(*kms.ListAliasesOutput, bool) bool) error + + ListGrantsRequest(*kms.ListGrantsInput) (*request.Request, *kms.ListGrantsResponse) + + ListGrants(*kms.ListGrantsInput) (*kms.ListGrantsResponse, error) + + ListGrantsPages(*kms.ListGrantsInput, func(*kms.ListGrantsResponse, bool) bool) error + + ListKeyPoliciesRequest(*kms.ListKeyPoliciesInput) (*request.Request, *kms.ListKeyPoliciesOutput) + + ListKeyPolicies(*kms.ListKeyPoliciesInput) (*kms.ListKeyPoliciesOutput, error) + + ListKeyPoliciesPages(*kms.ListKeyPoliciesInput, func(*kms.ListKeyPoliciesOutput, bool) bool) error + + ListKeysRequest(*kms.ListKeysInput) (*request.Request, *kms.ListKeysOutput) + + ListKeys(*kms.ListKeysInput) (*kms.ListKeysOutput, error) + + ListKeysPages(*kms.ListKeysInput, func(*kms.ListKeysOutput, bool) bool) error + + ListRetirableGrantsRequest(*kms.ListRetirableGrantsInput) (*request.Request, *kms.ListGrantsResponse) + + ListRetirableGrants(*kms.ListRetirableGrantsInput) (*kms.ListGrantsResponse, error) + + PutKeyPolicyRequest(*kms.PutKeyPolicyInput) (*request.Request, *kms.PutKeyPolicyOutput) + + PutKeyPolicy(*kms.PutKeyPolicyInput) (*kms.PutKeyPolicyOutput, error) + + ReEncryptRequest(*kms.ReEncryptInput) (*request.Request, *kms.ReEncryptOutput) + + ReEncrypt(*kms.ReEncryptInput) (*kms.ReEncryptOutput, error) + + RetireGrantRequest(*kms.RetireGrantInput) (*request.Request, *kms.RetireGrantOutput) + + RetireGrant(*kms.RetireGrantInput) (*kms.RetireGrantOutput, error) + + RevokeGrantRequest(*kms.RevokeGrantInput) (*request.Request, *kms.RevokeGrantOutput) + + RevokeGrant(*kms.RevokeGrantInput) (*kms.RevokeGrantOutput, error) + + ScheduleKeyDeletionRequest(*kms.ScheduleKeyDeletionInput) (*request.Request, *kms.ScheduleKeyDeletionOutput) + + ScheduleKeyDeletion(*kms.ScheduleKeyDeletionInput) (*kms.ScheduleKeyDeletionOutput, error) + + UpdateAliasRequest(*kms.UpdateAliasInput) (*request.Request, *kms.UpdateAliasOutput) + + UpdateAlias(*kms.UpdateAliasInput) (*kms.UpdateAliasOutput, error) + + UpdateKeyDescriptionRequest(*kms.UpdateKeyDescriptionInput) (*request.Request, *kms.UpdateKeyDescriptionOutput) + + UpdateKeyDescription(*kms.UpdateKeyDescriptionInput) (*kms.UpdateKeyDescriptionOutput, error) +} + +var _ KMSAPI = (*kms.KMS)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kms/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kms/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kms/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/kms/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,146 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package kms + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// AWS Key Management Service (AWS KMS) is an encryption and key management +// web service. This guide describes the AWS KMS operations that you can call +// programmatically. For general information about AWS KMS, see the AWS Key +// Management Service Developer Guide (http://docs.aws.amazon.com/kms/latest/developerguide/). +// +// AWS provides SDKs that consist of libraries and sample code for various +// programming languages and platforms (Java, Ruby, .Net, iOS, Android, etc.). +// The SDKs provide a convenient way to create programmatic access to AWS KMS +// and other AWS services. For example, the SDKs take care of tasks such as +// signing requests (see below), managing errors, and retrying requests automatically. +// For more information about the AWS SDKs, including how to download and install +// them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/). +// +// We recommend that you use the AWS SDKs to make programmatic API calls to +// AWS KMS. +// +// Clients must support TLS (Transport Layer Security) 1.0. We recommend TLS +// 1.2. Clients must also support cipher suites with Perfect Forward Secrecy +// (PFS) such as Ephemeral Diffie-Hellman (DHE) or Elliptic Curve Ephemeral +// Diffie-Hellman (ECDHE). Most modern systems such as Java 7 and later support +// these modes. +// +// Signing Requests +// +// Requests must be signed by using an access key ID and a secret access key. +// We strongly recommend that you do not use your AWS account access key ID +// and secret key for everyday work with AWS KMS. Instead, use the access key +// ID and secret access key for an IAM user, or you can use the AWS Security +// Token Service to generate temporary security credentials that you can use +// to sign requests. +// +// All AWS KMS operations require Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// +// Logging API Requests +// +// AWS KMS supports AWS CloudTrail, a service that logs AWS API calls and related +// events for your AWS account and delivers them to an Amazon S3 bucket that +// you specify. By using the information collected by CloudTrail, you can determine +// what requests were made to AWS KMS, who made the request, when it was made, +// and so on. To learn more about CloudTrail, including how to turn it on and +// find your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/). +// +// Additional Resources +// +// For more information about credentials and request signing, see the following: +// +// AWS Security Credentials (http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html) +// - This topic provides general information about the types of credentials +// used for accessing AWS. AWS Security Token Service (http://docs.aws.amazon.com/STS/latest/UsingSTS/) +// - This guide describes how to create and use temporary security credentials. +// Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// - This set of topics walks you through the process of signing a request using +// an access key ID and a secret access key. Commonly Used APIs +// +// Of the APIs discussed in this guide, the following will prove the most +// useful for most applications. You will likely perform actions other than +// these, such as creating keys and assigning policies, by using the console. +// +// Encrypt Decrypt GenerateDataKey GenerateDataKeyWithoutPlaintext +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type KMS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "kms" + +// New creates a new instance of the KMS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a KMS client from just a session. +// svc := kms.New(mySession) +// +// // Create a KMS client with additional configuration +// svc := kms.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *KMS { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *KMS { + svc := &KMS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-11-01", + JSONVersion: "1.1", + TargetPrefix: "TrentService", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a KMS operation and runs any +// custom request initialization. +func (c *KMS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/lambda/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/lambda/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/lambda/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/lambda/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2158 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package lambda provides a client for AWS Lambda. +package lambda + +import ( + "io" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opAddPermission = "AddPermission" + +// AddPermissionRequest generates a request for the AddPermission operation. +func (c *Lambda) AddPermissionRequest(input *AddPermissionInput) (req *request.Request, output *AddPermissionOutput) { + op := &request.Operation{ + Name: opAddPermission, + HTTPMethod: "POST", + HTTPPath: "/2015-03-31/functions/{FunctionName}/policy", + } + + if input == nil { + input = &AddPermissionInput{} + } + + req = c.newRequest(op, input, output) + output = &AddPermissionOutput{} + req.Data = output + return +} + +// Adds a permission to the resource policy associated with the specified AWS +// Lambda function. You use resource policies to grant permissions to event +// sources that use "push" model. In "push" model, event sources (such as Amazon +// S3 and custom applications) invoke your Lambda function. Each permission +// you add to the resource policy allows an event source, permission to invoke +// the Lambda function. +// +// For information about the push model, see AWS Lambda: How it Works (http://docs.aws.amazon.com/lambda/latest/dg/lambda-introduction.html). +// +// If you are using versioning feature (see AWS Lambda Function Versioning +// and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases-v2.html)), +// a Lambda function can have multiple ARNs that can be used to invoke the function. +// Note that, each permission you add to resource policy using this API is specific +// to an ARN, specified using the Qualifier parameter +// +// This operation requires permission for the lambda:AddPermission action. +func (c *Lambda) AddPermission(input *AddPermissionInput) (*AddPermissionOutput, error) { + req, out := c.AddPermissionRequest(input) + err := req.Send() + return out, err +} + +const opCreateAlias = "CreateAlias" + +// CreateAliasRequest generates a request for the CreateAlias operation. +func (c *Lambda) CreateAliasRequest(input *CreateAliasInput) (req *request.Request, output *AliasConfiguration) { + op := &request.Operation{ + Name: opCreateAlias, + HTTPMethod: "POST", + HTTPPath: "/2015-03-31/functions/{FunctionName}/aliases", + } + + if input == nil { + input = &CreateAliasInput{} + } + + req = c.newRequest(op, input, output) + output = &AliasConfiguration{} + req.Data = output + return +} + +// Creates an alias to the specified Lambda function version. For more information, +// see Introduction to AWS Lambda Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-v2-intro-aliases.html) +// +// This requires permission for the lambda:CreateAlias action. +func (c *Lambda) CreateAlias(input *CreateAliasInput) (*AliasConfiguration, error) { + req, out := c.CreateAliasRequest(input) + err := req.Send() + return out, err +} + +const opCreateEventSourceMapping = "CreateEventSourceMapping" + +// CreateEventSourceMappingRequest generates a request for the CreateEventSourceMapping operation. +func (c *Lambda) CreateEventSourceMappingRequest(input *CreateEventSourceMappingInput) (req *request.Request, output *EventSourceMappingConfiguration) { + op := &request.Operation{ + Name: opCreateEventSourceMapping, + HTTPMethod: "POST", + HTTPPath: "/2015-03-31/event-source-mappings/", + } + + if input == nil { + input = &CreateEventSourceMappingInput{} + } + + req = c.newRequest(op, input, output) + output = &EventSourceMappingConfiguration{} + req.Data = output + return +} + +// Identifies a stream as an event source for a Lambda function. It can be either +// an Amazon Kinesis stream or an Amazon DynamoDB stream. AWS Lambda invokes +// the specified function when records are posted to the stream. +// +// This is the pull model, where AWS Lambda invokes the function. For more +// information, go to AWS Lambda: How it Works (http://docs.aws.amazon.com/lambda/latest/dg/lambda-introduction.html) +// in the AWS Lambda Developer Guide. +// +// This association between an Amazon Kinesis stream and a Lambda function +// is called the event source mapping. You provide the configuration information +// (for example, which stream to read from and which Lambda function to invoke) +// for the event source mapping in the request body. +// +// Each event source, such as an Amazon Kinesis or a DynamoDB stream, can +// be associated with multiple AWS Lambda function. A given Lambda function +// can be associated with multiple AWS event sources. +// +// This operation requires permission for the lambda:CreateEventSourceMapping +// action. +func (c *Lambda) CreateEventSourceMapping(input *CreateEventSourceMappingInput) (*EventSourceMappingConfiguration, error) { + req, out := c.CreateEventSourceMappingRequest(input) + err := req.Send() + return out, err +} + +const opCreateFunction = "CreateFunction" + +// CreateFunctionRequest generates a request for the CreateFunction operation. +func (c *Lambda) CreateFunctionRequest(input *CreateFunctionInput) (req *request.Request, output *FunctionConfiguration) { + op := &request.Operation{ + Name: opCreateFunction, + HTTPMethod: "POST", + HTTPPath: "/2015-03-31/functions", + } + + if input == nil { + input = &CreateFunctionInput{} + } + + req = c.newRequest(op, input, output) + output = &FunctionConfiguration{} + req.Data = output + return +} + +// Creates a new Lambda function. The function metadata is created from the +// request parameters, and the code for the function is provided by a .zip file +// in the request body. If the function name already exists, the operation will +// fail. Note that the function name is case-sensitive. +// +// This operation requires permission for the lambda:CreateFunction action. +func (c *Lambda) CreateFunction(input *CreateFunctionInput) (*FunctionConfiguration, error) { + req, out := c.CreateFunctionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAlias = "DeleteAlias" + +// DeleteAliasRequest generates a request for the DeleteAlias operation. +func (c *Lambda) DeleteAliasRequest(input *DeleteAliasInput) (req *request.Request, output *DeleteAliasOutput) { + op := &request.Operation{ + Name: opDeleteAlias, + HTTPMethod: "DELETE", + HTTPPath: "/2015-03-31/functions/{FunctionName}/aliases/{Name}", + } + + if input == nil { + input = &DeleteAliasInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteAliasOutput{} + req.Data = output + return +} + +// Deletes specified Lambda function alias. For more information, see Introduction +// to AWS Lambda Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-v2-intro-aliases.html) +// +// This requires permission for the lambda:DeleteAlias action. +func (c *Lambda) DeleteAlias(input *DeleteAliasInput) (*DeleteAliasOutput, error) { + req, out := c.DeleteAliasRequest(input) + err := req.Send() + return out, err +} + +const opDeleteEventSourceMapping = "DeleteEventSourceMapping" + +// DeleteEventSourceMappingRequest generates a request for the DeleteEventSourceMapping operation. +func (c *Lambda) DeleteEventSourceMappingRequest(input *DeleteEventSourceMappingInput) (req *request.Request, output *EventSourceMappingConfiguration) { + op := &request.Operation{ + Name: opDeleteEventSourceMapping, + HTTPMethod: "DELETE", + HTTPPath: "/2015-03-31/event-source-mappings/{UUID}", + } + + if input == nil { + input = &DeleteEventSourceMappingInput{} + } + + req = c.newRequest(op, input, output) + output = &EventSourceMappingConfiguration{} + req.Data = output + return +} + +// Removes an event source mapping. This means AWS Lambda will no longer invoke +// the function for events in the associated source. +// +// This operation requires permission for the lambda:DeleteEventSourceMapping +// action. +func (c *Lambda) DeleteEventSourceMapping(input *DeleteEventSourceMappingInput) (*EventSourceMappingConfiguration, error) { + req, out := c.DeleteEventSourceMappingRequest(input) + err := req.Send() + return out, err +} + +const opDeleteFunction = "DeleteFunction" + +// DeleteFunctionRequest generates a request for the DeleteFunction operation. +func (c *Lambda) DeleteFunctionRequest(input *DeleteFunctionInput) (req *request.Request, output *DeleteFunctionOutput) { + op := &request.Operation{ + Name: opDeleteFunction, + HTTPMethod: "DELETE", + HTTPPath: "/2015-03-31/functions/{FunctionName}", + } + + if input == nil { + input = &DeleteFunctionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteFunctionOutput{} + req.Data = output + return +} + +// Deletes the specified Lambda function code and configuration. +// +// If you don't specify a function version, AWS Lambda will delete the function, +// including all its versions, and any aliases pointing to the function versions. +// +// When you delete a function the associated resource policy is also deleted. +// You will need to delete the event source mappings explicitly. +// +// For information about function versioning, see AWS Lambda Function Versioning +// and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases-v2.html). +// +// This operation requires permission for the lambda:DeleteFunction action. +func (c *Lambda) DeleteFunction(input *DeleteFunctionInput) (*DeleteFunctionOutput, error) { + req, out := c.DeleteFunctionRequest(input) + err := req.Send() + return out, err +} + +const opGetAlias = "GetAlias" + +// GetAliasRequest generates a request for the GetAlias operation. +func (c *Lambda) GetAliasRequest(input *GetAliasInput) (req *request.Request, output *AliasConfiguration) { + op := &request.Operation{ + Name: opGetAlias, + HTTPMethod: "GET", + HTTPPath: "/2015-03-31/functions/{FunctionName}/aliases/{Name}", + } + + if input == nil { + input = &GetAliasInput{} + } + + req = c.newRequest(op, input, output) + output = &AliasConfiguration{} + req.Data = output + return +} + +// Returns the specified alias information such as the alias ARN, description, +// and function version it is pointing to. For more information, see Introduction +// to AWS Lambda Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-v2-intro-aliases.html) +// +// This requires permission for the lambda:GetAlias action. +func (c *Lambda) GetAlias(input *GetAliasInput) (*AliasConfiguration, error) { + req, out := c.GetAliasRequest(input) + err := req.Send() + return out, err +} + +const opGetEventSourceMapping = "GetEventSourceMapping" + +// GetEventSourceMappingRequest generates a request for the GetEventSourceMapping operation. +func (c *Lambda) GetEventSourceMappingRequest(input *GetEventSourceMappingInput) (req *request.Request, output *EventSourceMappingConfiguration) { + op := &request.Operation{ + Name: opGetEventSourceMapping, + HTTPMethod: "GET", + HTTPPath: "/2015-03-31/event-source-mappings/{UUID}", + } + + if input == nil { + input = &GetEventSourceMappingInput{} + } + + req = c.newRequest(op, input, output) + output = &EventSourceMappingConfiguration{} + req.Data = output + return +} + +// Returns configuration information for the specified event source mapping +// (see CreateEventSourceMapping). +// +// This operation requires permission for the lambda:GetEventSourceMapping +// action. +func (c *Lambda) GetEventSourceMapping(input *GetEventSourceMappingInput) (*EventSourceMappingConfiguration, error) { + req, out := c.GetEventSourceMappingRequest(input) + err := req.Send() + return out, err +} + +const opGetFunction = "GetFunction" + +// GetFunctionRequest generates a request for the GetFunction operation. +func (c *Lambda) GetFunctionRequest(input *GetFunctionInput) (req *request.Request, output *GetFunctionOutput) { + op := &request.Operation{ + Name: opGetFunction, + HTTPMethod: "GET", + HTTPPath: "/2015-03-31/functions/{FunctionName}", + } + + if input == nil { + input = &GetFunctionInput{} + } + + req = c.newRequest(op, input, output) + output = &GetFunctionOutput{} + req.Data = output + return +} + +// Returns the configuration information of the Lambda function and a presigned +// URL link to the .zip file you uploaded with CreateFunction so you can download +// the .zip file. Note that the URL is valid for up to 10 minutes. The configuration +// information is the same information you provided as parameters when uploading +// the function. +// +// Using the optional Qualifier parameter, you can specify a specific function +// version for which you want this information. If you don't specify this parameter, +// the API uses unqualified function ARN which return information about the +// $LATEST version of the Lambda function. For more information, see AWS Lambda +// Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases-v2.html). +// +// This operation requires permission for the lambda:GetFunction action. +func (c *Lambda) GetFunction(input *GetFunctionInput) (*GetFunctionOutput, error) { + req, out := c.GetFunctionRequest(input) + err := req.Send() + return out, err +} + +const opGetFunctionConfiguration = "GetFunctionConfiguration" + +// GetFunctionConfigurationRequest generates a request for the GetFunctionConfiguration operation. +func (c *Lambda) GetFunctionConfigurationRequest(input *GetFunctionConfigurationInput) (req *request.Request, output *FunctionConfiguration) { + op := &request.Operation{ + Name: opGetFunctionConfiguration, + HTTPMethod: "GET", + HTTPPath: "/2015-03-31/functions/{FunctionName}/configuration", + } + + if input == nil { + input = &GetFunctionConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &FunctionConfiguration{} + req.Data = output + return +} + +// Returns the configuration information of the Lambda function. This the same +// information you provided as parameters when uploading the function by using +// CreateFunction. +// +// You can use the optional Qualifier parameter to retrieve configuration information +// for a specific Lambda function version. If you don't provide it, the API +// returns information about the $LATEST version of the function. For more information +// about versioning, see AWS Lambda Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases-v2.html). +// +// This operation requires permission for the lambda:GetFunctionConfiguration +// operation. +func (c *Lambda) GetFunctionConfiguration(input *GetFunctionConfigurationInput) (*FunctionConfiguration, error) { + req, out := c.GetFunctionConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opGetPolicy = "GetPolicy" + +// GetPolicyRequest generates a request for the GetPolicy operation. +func (c *Lambda) GetPolicyRequest(input *GetPolicyInput) (req *request.Request, output *GetPolicyOutput) { + op := &request.Operation{ + Name: opGetPolicy, + HTTPMethod: "GET", + HTTPPath: "/2015-03-31/functions/{FunctionName}/policy", + } + + if input == nil { + input = &GetPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetPolicyOutput{} + req.Data = output + return +} + +// Returns the resource policy, containing a list of permissions that apply +// to a specific to an ARN that you specify via the Qualifier paramter. +// +// For informration about adding permissions, see AddPermission. +// +// You need permission for the lambda:GetPolicy action. +func (c *Lambda) GetPolicy(input *GetPolicyInput) (*GetPolicyOutput, error) { + req, out := c.GetPolicyRequest(input) + err := req.Send() + return out, err +} + +const opInvoke = "Invoke" + +// InvokeRequest generates a request for the Invoke operation. +func (c *Lambda) InvokeRequest(input *InvokeInput) (req *request.Request, output *InvokeOutput) { + op := &request.Operation{ + Name: opInvoke, + HTTPMethod: "POST", + HTTPPath: "/2015-03-31/functions/{FunctionName}/invocations", + } + + if input == nil { + input = &InvokeInput{} + } + + req = c.newRequest(op, input, output) + output = &InvokeOutput{} + req.Data = output + return +} + +// Invokes a specific Lambda function version. +// +// If you don't provide the Qualifier parameter, it uses the unqualified function +// ARN which results in invocation of the $LATEST version of the Lambda function +// (when you create a Lambda function, the $LATEST is the version). The AWS +// Lambda versioning and aliases feature allows you to publish multiple versions +// of a Lambda function and also create aliases for each function version. So +// each your Lambda function version can be invoked using multiple ARNs. For +// more information, see AWS Lambda Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases-v2.html). +// Using the Qualifier parameter, you can specify a function version or alias +// name to invoke specific function version. If you specify function version, +// the API uses the qualified function ARN to invoke a specific function version. +// If you specify alias name, the API uses the alias ARN to invoke the function +// version to which the alias points. +// +// This operation requires permission for the lambda:InvokeFunction action. +func (c *Lambda) Invoke(input *InvokeInput) (*InvokeOutput, error) { + req, out := c.InvokeRequest(input) + err := req.Send() + return out, err +} + +const opInvokeAsync = "InvokeAsync" + +// InvokeAsyncRequest generates a request for the InvokeAsync operation. +func (c *Lambda) InvokeAsyncRequest(input *InvokeAsyncInput) (req *request.Request, output *InvokeAsyncOutput) { + op := &request.Operation{ + Name: opInvokeAsync, + HTTPMethod: "POST", + HTTPPath: "/2014-11-13/functions/{FunctionName}/invoke-async/", + } + + if input == nil { + input = &InvokeAsyncInput{} + } + + req = c.newRequest(op, input, output) + output = &InvokeAsyncOutput{} + req.Data = output + return +} + +// This API is deprecated. We recommend you use Invoke API (see Invoke). Submits +// an invocation request to AWS Lambda. Upon receiving the request, Lambda executes +// the specified function asynchronously. To see the logs generated by the Lambda +// function execution, see the CloudWatch logs console. +// +// This operation requires permission for the lambda:InvokeFunction action. +func (c *Lambda) InvokeAsync(input *InvokeAsyncInput) (*InvokeAsyncOutput, error) { + req, out := c.InvokeAsyncRequest(input) + err := req.Send() + return out, err +} + +const opListAliases = "ListAliases" + +// ListAliasesRequest generates a request for the ListAliases operation. +func (c *Lambda) ListAliasesRequest(input *ListAliasesInput) (req *request.Request, output *ListAliasesOutput) { + op := &request.Operation{ + Name: opListAliases, + HTTPMethod: "GET", + HTTPPath: "/2015-03-31/functions/{FunctionName}/aliases", + } + + if input == nil { + input = &ListAliasesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAliasesOutput{} + req.Data = output + return +} + +// Returns list of aliases created for a Lambda function. For each alias, the +// response includes information such as the alias ARN, description, alias name, +// and the function version to which it points. For more information, see Introduction +// to AWS Lambda Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-v2-intro-aliases.html) +// +// This requires permission for the lambda:ListAliases action. +func (c *Lambda) ListAliases(input *ListAliasesInput) (*ListAliasesOutput, error) { + req, out := c.ListAliasesRequest(input) + err := req.Send() + return out, err +} + +const opListEventSourceMappings = "ListEventSourceMappings" + +// ListEventSourceMappingsRequest generates a request for the ListEventSourceMappings operation. +func (c *Lambda) ListEventSourceMappingsRequest(input *ListEventSourceMappingsInput) (req *request.Request, output *ListEventSourceMappingsOutput) { + op := &request.Operation{ + Name: opListEventSourceMappings, + HTTPMethod: "GET", + HTTPPath: "/2015-03-31/event-source-mappings/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListEventSourceMappingsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListEventSourceMappingsOutput{} + req.Data = output + return +} + +// Returns a list of event source mappings you created using the CreateEventSourceMapping +// (see CreateEventSourceMapping), where you identify a stream as an event source. +// This list does not include Amazon S3 event sources. +// +// For each mapping, the API returns configuration information. You can optionally +// specify filters to retrieve specific event source mappings. +// +// This operation requires permission for the lambda:ListEventSourceMappings +// action. +func (c *Lambda) ListEventSourceMappings(input *ListEventSourceMappingsInput) (*ListEventSourceMappingsOutput, error) { + req, out := c.ListEventSourceMappingsRequest(input) + err := req.Send() + return out, err +} + +func (c *Lambda) ListEventSourceMappingsPages(input *ListEventSourceMappingsInput, fn func(p *ListEventSourceMappingsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListEventSourceMappingsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListEventSourceMappingsOutput), lastPage) + }) +} + +const opListFunctions = "ListFunctions" + +// ListFunctionsRequest generates a request for the ListFunctions operation. +func (c *Lambda) ListFunctionsRequest(input *ListFunctionsInput) (req *request.Request, output *ListFunctionsOutput) { + op := &request.Operation{ + Name: opListFunctions, + HTTPMethod: "GET", + HTTPPath: "/2015-03-31/functions/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListFunctionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListFunctionsOutput{} + req.Data = output + return +} + +// Returns a list of your Lambda functions. For each function, the response +// includes the function configuration information. You must use GetFunction +// to retrieve the code for your function. +// +// This operation requires permission for the lambda:ListFunctions action. +func (c *Lambda) ListFunctions(input *ListFunctionsInput) (*ListFunctionsOutput, error) { + req, out := c.ListFunctionsRequest(input) + err := req.Send() + return out, err +} + +func (c *Lambda) ListFunctionsPages(input *ListFunctionsInput, fn func(p *ListFunctionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListFunctionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListFunctionsOutput), lastPage) + }) +} + +const opListVersionsByFunction = "ListVersionsByFunction" + +// ListVersionsByFunctionRequest generates a request for the ListVersionsByFunction operation. +func (c *Lambda) ListVersionsByFunctionRequest(input *ListVersionsByFunctionInput) (req *request.Request, output *ListVersionsByFunctionOutput) { + op := &request.Operation{ + Name: opListVersionsByFunction, + HTTPMethod: "GET", + HTTPPath: "/2015-03-31/functions/{FunctionName}/versions", + } + + if input == nil { + input = &ListVersionsByFunctionInput{} + } + + req = c.newRequest(op, input, output) + output = &ListVersionsByFunctionOutput{} + req.Data = output + return +} + +// List all versions of a function. +func (c *Lambda) ListVersionsByFunction(input *ListVersionsByFunctionInput) (*ListVersionsByFunctionOutput, error) { + req, out := c.ListVersionsByFunctionRequest(input) + err := req.Send() + return out, err +} + +const opPublishVersion = "PublishVersion" + +// PublishVersionRequest generates a request for the PublishVersion operation. +func (c *Lambda) PublishVersionRequest(input *PublishVersionInput) (req *request.Request, output *FunctionConfiguration) { + op := &request.Operation{ + Name: opPublishVersion, + HTTPMethod: "POST", + HTTPPath: "/2015-03-31/functions/{FunctionName}/versions", + } + + if input == nil { + input = &PublishVersionInput{} + } + + req = c.newRequest(op, input, output) + output = &FunctionConfiguration{} + req.Data = output + return +} + +// Publishes a version of your function from the current snapshot of HEAD. That +// is, AWS Lambda takes a snapshot of the function code and configuration information +// from HEAD and publishes a new version. The code and handler of this specific +// Lambda function version cannot be modified after publication, but you can +// modify the configuration information. +func (c *Lambda) PublishVersion(input *PublishVersionInput) (*FunctionConfiguration, error) { + req, out := c.PublishVersionRequest(input) + err := req.Send() + return out, err +} + +const opRemovePermission = "RemovePermission" + +// RemovePermissionRequest generates a request for the RemovePermission operation. +func (c *Lambda) RemovePermissionRequest(input *RemovePermissionInput) (req *request.Request, output *RemovePermissionOutput) { + op := &request.Operation{ + Name: opRemovePermission, + HTTPMethod: "DELETE", + HTTPPath: "/2015-03-31/functions/{FunctionName}/policy/{StatementId}", + } + + if input == nil { + input = &RemovePermissionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RemovePermissionOutput{} + req.Data = output + return +} + +// You can remove individual permissions from an resource policy associated +// with a Lambda function by providing a statement ID that you provided when +// you addded the permission. The API removes corresponding permission that +// is associated with the specific ARN identified by the Qualifier parameter. +// +// Note that removal of a permission will cause an active event source to lose +// permission to the function. +// +// You need permission for the lambda:RemovePermission action. +func (c *Lambda) RemovePermission(input *RemovePermissionInput) (*RemovePermissionOutput, error) { + req, out := c.RemovePermissionRequest(input) + err := req.Send() + return out, err +} + +const opUpdateAlias = "UpdateAlias" + +// UpdateAliasRequest generates a request for the UpdateAlias operation. +func (c *Lambda) UpdateAliasRequest(input *UpdateAliasInput) (req *request.Request, output *AliasConfiguration) { + op := &request.Operation{ + Name: opUpdateAlias, + HTTPMethod: "PUT", + HTTPPath: "/2015-03-31/functions/{FunctionName}/aliases/{Name}", + } + + if input == nil { + input = &UpdateAliasInput{} + } + + req = c.newRequest(op, input, output) + output = &AliasConfiguration{} + req.Data = output + return +} + +// Using this API you can update function version to which the alias points +// to and alias description. For more information, see Introduction to AWS Lambda +// Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-v2-intro-aliases.html) +// +// This requires permission for the lambda:UpdateAlias action. +func (c *Lambda) UpdateAlias(input *UpdateAliasInput) (*AliasConfiguration, error) { + req, out := c.UpdateAliasRequest(input) + err := req.Send() + return out, err +} + +const opUpdateEventSourceMapping = "UpdateEventSourceMapping" + +// UpdateEventSourceMappingRequest generates a request for the UpdateEventSourceMapping operation. +func (c *Lambda) UpdateEventSourceMappingRequest(input *UpdateEventSourceMappingInput) (req *request.Request, output *EventSourceMappingConfiguration) { + op := &request.Operation{ + Name: opUpdateEventSourceMapping, + HTTPMethod: "PUT", + HTTPPath: "/2015-03-31/event-source-mappings/{UUID}", + } + + if input == nil { + input = &UpdateEventSourceMappingInput{} + } + + req = c.newRequest(op, input, output) + output = &EventSourceMappingConfiguration{} + req.Data = output + return +} + +// You can update an event source mapping. This is useful if you want to change +// the parameters of the existing mapping without losing your position in the +// stream. You can change which function will receive the stream records, but +// to change the stream itself, you must create a new mapping. +// +// This operation requires permission for the lambda:UpdateEventSourceMapping +// action. +func (c *Lambda) UpdateEventSourceMapping(input *UpdateEventSourceMappingInput) (*EventSourceMappingConfiguration, error) { + req, out := c.UpdateEventSourceMappingRequest(input) + err := req.Send() + return out, err +} + +const opUpdateFunctionCode = "UpdateFunctionCode" + +// UpdateFunctionCodeRequest generates a request for the UpdateFunctionCode operation. +func (c *Lambda) UpdateFunctionCodeRequest(input *UpdateFunctionCodeInput) (req *request.Request, output *FunctionConfiguration) { + op := &request.Operation{ + Name: opUpdateFunctionCode, + HTTPMethod: "PUT", + HTTPPath: "/2015-03-31/functions/{FunctionName}/code", + } + + if input == nil { + input = &UpdateFunctionCodeInput{} + } + + req = c.newRequest(op, input, output) + output = &FunctionConfiguration{} + req.Data = output + return +} + +// Updates the code for the specified Lambda function. This operation must only +// be used on an existing Lambda function and cannot be used to update the function +// configuration. +// +// This operation requires permission for the lambda:UpdateFunctionCode action. +func (c *Lambda) UpdateFunctionCode(input *UpdateFunctionCodeInput) (*FunctionConfiguration, error) { + req, out := c.UpdateFunctionCodeRequest(input) + err := req.Send() + return out, err +} + +const opUpdateFunctionConfiguration = "UpdateFunctionConfiguration" + +// UpdateFunctionConfigurationRequest generates a request for the UpdateFunctionConfiguration operation. +func (c *Lambda) UpdateFunctionConfigurationRequest(input *UpdateFunctionConfigurationInput) (req *request.Request, output *FunctionConfiguration) { + op := &request.Operation{ + Name: opUpdateFunctionConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/2015-03-31/functions/{FunctionName}/configuration", + } + + if input == nil { + input = &UpdateFunctionConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &FunctionConfiguration{} + req.Data = output + return +} + +// Updates the configuration parameters for the specified Lambda function by +// using the values provided in the request. You provide only the parameters +// you want to change. This operation must only be used on an existing Lambda +// function and cannot be used to update the function's code. +// +// This operation requires permission for the lambda:UpdateFunctionConfiguration +// action. +func (c *Lambda) UpdateFunctionConfiguration(input *UpdateFunctionConfigurationInput) (*FunctionConfiguration, error) { + req, out := c.UpdateFunctionConfigurationRequest(input) + err := req.Send() + return out, err +} + +type AddPermissionInput struct { + _ struct{} `type:"structure"` + + // The AWS Lambda action you want to allow in this statement. Each Lambda action + // is a string starting with "lambda:" followed by the API name (see Operations). + // For example, "lambda:CreateFunction". You can use wildcard ("lambda:*") to + // grant permission for all AWS Lambda actions. + Action *string `type:"string" required:"true"` + + // Name of the Lambda function whose resource policy you are updating by adding + // a new permission. + // + // You can specify an unqualified function name (for example, "Thumbnail") + // or you can specify Amazon Resource Name (ARN) of the function (for example, + // "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). AWS Lambda also + // allows you to specify only the account ID qualifier (for example, "account-id:Thumbnail"). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // The principal who is getting this permission. It can be Amazon S3 service + // Principal ("s3.amazonaws.com") if you want Amazon S3 to invoke the function, + // an AWS account ID if you are granting cross-account permission, or any valid + // AWS service principal such as "sns.amazonaws.com". For example, you might + // want to allow a custom application in another AWS account to push events + // to AWS Lambda by invoking your function. + Principal *string `type:"string" required:"true"` + + // You can specify this optional query parameter to specify function version + // or alias name. The permission will then apply to the specific qualified ARN. + // For example, if you specify function version 2 as the qualifier, then permission + // applies only when request is made using qualified function ARN: + // + // arn:aws:lambda:aws-region:acct-id:function:function-name:2 + // + // If you specify alias name, for example "PROD", then the permission is valid + // only for requests made using the alias ARN: + // + // arn:aws:lambda:aws-region:acct-id:function:function-name:PROD + // + // If the qualifier is not specified, the permission is valid only when requests + // is made using unqualified function ARN. + // + // arn:aws:lambda:aws-region:acct-id:function:function-name + Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` + + // The AWS account ID (without a hyphen) of the source owner. For example, if + // the SourceArn identifies a bucket, then this is the bucket owner's account + // ID. You can use this additional condition to ensure the bucket you specify + // is owned by a specific account (it is possible the bucket owner deleted the + // bucket and some other AWS account created the bucket). You can also use this + // condition to specify all sources (that is, you don't specify the SourceArn) + // owned by a specific account. + SourceAccount *string `type:"string"` + + // This is optional; however, when granting Amazon S3 permission to invoke your + // function, you should specify this field with the bucket Amazon Resource Name + // (ARN) as its value. This ensures that only events generated from the specified + // bucket can invoke the function. + // + // If you add a permission for the Amazon S3 principal without providing the + // source ARN, any AWS account that creates a mapping to your function ARN can + // send events to invoke your Lambda function from Amazon S3. + SourceArn *string `type:"string"` + + // A unique statement identifier. + StatementId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddPermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddPermissionInput) GoString() string { + return s.String() +} + +type AddPermissionOutput struct { + _ struct{} `type:"structure"` + + // The permission statement you specified in the request. The response returns + // the same as a string using "\" as an escape character in the JSON. + Statement *string `type:"string"` +} + +// String returns the string representation +func (s AddPermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddPermissionOutput) GoString() string { + return s.String() +} + +// Provides configuration information about a Lambda function version alias. +type AliasConfiguration struct { + _ struct{} `type:"structure"` + + // Lambda function ARN that is qualified using alias name as the suffix. For + // example, if you create an alias "BETA" pointing to a helloworld function + // version, the ARN is arn:aws:lambda:aws-regions:acct-id:function:helloworld:BETA. + AliasArn *string `type:"string"` + + // Alias description. + Description *string `type:"string"` + + // Function version to which the alias points. + FunctionVersion *string `min:"1" type:"string"` + + // Alias name. + Name *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AliasConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AliasConfiguration) GoString() string { + return s.String() +} + +type CreateAliasInput struct { + _ struct{} `type:"structure"` + + // Description of the alias. + Description *string `type:"string"` + + // Name of the Lambda function for which you want to create an alias. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // Lambda function version for which you are creating the alias. + FunctionVersion *string `min:"1" type:"string" required:"true"` + + // Name for the alias your creating. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAliasInput) GoString() string { + return s.String() +} + +type CreateEventSourceMappingInput struct { + _ struct{} `type:"structure"` + + // The largest number of records that AWS Lambda will retrieve from your event + // source at the time of invoking your function. Your function receives an event + // with all the retrieved records. The default is 100 records. + BatchSize *int64 `min:"1" type:"integer"` + + // Indicates whether AWS Lambda should begin polling the event source. By default, + // Enabled is true. + Enabled *bool `type:"boolean"` + + // The Amazon Resource Name (ARN) of the Amazon Kinesis or the Amazon DynamoDB + // stream that is the event source. Any record added to this stream could cause + // AWS Lambda to invoke your Lambda function, it depends on the BatchSize. AWS + // Lambda POSTs the Amazon Kinesis event, containing records, to your Lambda + // function as JSON. + EventSourceArn *string `type:"string" required:"true"` + + // The Lambda function to invoke when AWS Lambda detects an event on the stream. + // + // You can specify an unqualified function name (for example, "Thumbnail") + // or you can specify Amazon Resource Name (ARN) of the function (for example, + // "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). AWS Lambda also + // allows you to specify only the account ID qualifier (for example, "account-id:Thumbnail"). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `min:"1" type:"string" required:"true"` + + // The position in the stream where AWS Lambda should start reading. For more + // information, go to ShardIteratorType (http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html#Kinesis-GetShardIterator-request-ShardIteratorType) + // in the Amazon Kinesis API Reference. + StartingPosition *string `type:"string" required:"true" enum:"EventSourcePosition"` +} + +// String returns the string representation +func (s CreateEventSourceMappingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEventSourceMappingInput) GoString() string { + return s.String() +} + +type CreateFunctionInput struct { + _ struct{} `type:"structure"` + + // The code for the Lambda function. + Code *FunctionCode `type:"structure" required:"true"` + + // A short, user-defined function description. Lambda does not use this value. + // Assign a meaningful description as you see fit. + Description *string `type:"string"` + + // The name you want to assign to the function you are uploading. You can specify + // an unqualified function name (for example, "Thumbnail") or you can specify + // Amazon Resource Name (ARN) of the function (for example, "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). + // AWS Lambda also allows you to specify only the account ID qualifier (for + // example, "account-id:Thumbnail"). Note that the length constraint applies + // only to the ARN. If you specify only the function name, it is limited to + // 64 character in length. The function names appear in the console and are + // returned in the ListFunctions API. Function names are used to specify functions + // to other AWS Lambda APIs, such as Invoke. + FunctionName *string `min:"1" type:"string" required:"true"` + + // The function within your code that Lambda calls to begin execution. For Node.js, + // it is the module-name.export value in your function. For Java, it can be + // package.class-name::handler or package.class-name. For more information, + // see Lambda Function Handler (Java) (http://docs.aws.amazon.com/lambda/latest/dg/java-programming-model-handler-types.html). + Handler *string `type:"string" required:"true"` + + // The amount of memory, in MB, your Lambda function is given. Lambda uses this + // memory size to infer the amount of CPU and memory allocated to your function. + // Your function use-case determines your CPU and memory requirements. For example, + // a database operation might need less memory compared to an image processing + // function. The default value is 128 MB. The value must be a multiple of 64 + // MB. + MemorySize *int64 `min:"128" type:"integer"` + + // This boolean parameter can be used to request AWS Lambda to create the Lambda + // function and publish a version as an atomic operation. + Publish *bool `type:"boolean"` + + // The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it + // executes your function to access any other Amazon Web Services (AWS) resources. + // For more information, see AWS Lambda: How it Works (http://docs.aws.amazon.com/lambda/latest/dg/lambda-introduction.html) + Role *string `type:"string" required:"true"` + + // The runtime environment for the Lambda function you are uploading. Currently, + // Lambda supports "java" and "nodejs" as the runtime. + Runtime *string `type:"string" required:"true" enum:"Runtime"` + + // The function execution time at which Lambda should terminate the function. + // Because the execution time has cost implications, we recommend you set this + // value based on your expected execution time. The default is 3 seconds. + Timeout *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s CreateFunctionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFunctionInput) GoString() string { + return s.String() +} + +type DeleteAliasInput struct { + _ struct{} `type:"structure"` + + // The Lambda function name for which the alias is created. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // Name of the alias to delete. + Name *string `location:"uri" locationName:"Name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAliasInput) GoString() string { + return s.String() +} + +type DeleteAliasOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAliasOutput) GoString() string { + return s.String() +} + +type DeleteEventSourceMappingInput struct { + _ struct{} `type:"structure"` + + // The event source mapping ID. + UUID *string `location:"uri" locationName:"UUID" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteEventSourceMappingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEventSourceMappingInput) GoString() string { + return s.String() +} + +type DeleteFunctionInput struct { + _ struct{} `type:"structure"` + + // The Lambda function to delete. + // + // You can specify an unqualified function name (for example, "Thumbnail") + // or you can specify Amazon Resource Name (ARN) of the function (for example, + // "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). AWS Lambda also + // allows you to specify only the account ID qualifier (for example, "account-id:Thumbnail"). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // Using this optional parameter you can specify a function version (but not + // the $LATEST version) to direct AWS Lambda to delete a specific function version. + // If the function version has one or more aliases pointing to it, you will + // get an error because you cannot have aliases pointing to it. You can delete + // any function version but not the $LATEST, that is, you cannot specify $LATEST + // as the value of this parameter. The $LATEST version can be deleted only when + // you want to delete all the function versions and aliases. + // + // You can only specify a function version and not alias name using this parameter. + // You cannot delete a function version using its alias. + // + // If you don't specify this parameter, AWS Lambda will delete the function, + // including all its versions and aliases. + Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteFunctionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFunctionInput) GoString() string { + return s.String() +} + +type DeleteFunctionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteFunctionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFunctionOutput) GoString() string { + return s.String() +} + +// Describes mapping between an Amazon Kinesis stream and a Lambda function. +type EventSourceMappingConfiguration struct { + _ struct{} `type:"structure"` + + // The largest number of records that AWS Lambda will retrieve from your event + // source at the time of invoking your function. Your function receives an event + // with all the retrieved records. + BatchSize *int64 `min:"1" type:"integer"` + + // The Amazon Resource Name (ARN) of the Amazon Kinesis stream that is the source + // of events. + EventSourceArn *string `type:"string"` + + // The Lambda function to invoke when AWS Lambda detects an event on the stream. + FunctionArn *string `type:"string"` + + // The UTC time string indicating the last time the event mapping was updated. + LastModified *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The result of the last AWS Lambda invocation of your Lambda function. + LastProcessingResult *string `type:"string"` + + // The state of the event source mapping. It can be "Creating", "Enabled", "Disabled", + // "Enabling", "Disabling", "Updating", or "Deleting". + State *string `type:"string"` + + // The reason the event source mapping is in its current state. It is either + // user-requested or an AWS Lambda-initiated state transition. + StateTransitionReason *string `type:"string"` + + // The AWS Lambda assigned opaque identifier for the mapping. + UUID *string `type:"string"` +} + +// String returns the string representation +func (s EventSourceMappingConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventSourceMappingConfiguration) GoString() string { + return s.String() +} + +// The code for the Lambda function. +type FunctionCode struct { + _ struct{} `type:"structure"` + + // Amazon S3 bucket name where the .zip file containing your deployment package + // is stored. This bucket must reside in the same AWS region where you are creating + // the Lambda function. + S3Bucket *string `min:"3" type:"string"` + + // The Amazon S3 object (the deployment package) key name you want to upload. + S3Key *string `min:"1" type:"string"` + + // The Amazon S3 object (the deployment package) version you want to upload. + S3ObjectVersion *string `min:"1" type:"string"` + + // A base64-encoded .zip file containing your deployment package. For more information + // about creating a .zip file, go to Execution Permissions (http://docs.aws.amazon.com/lambda/latest/dg/intro-permission-model.html#lambda-intro-execution-role.html) + // in the AWS Lambda Developer Guide. + ZipFile []byte `type:"blob"` +} + +// String returns the string representation +func (s FunctionCode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FunctionCode) GoString() string { + return s.String() +} + +// The object for the Lambda function location. +type FunctionCodeLocation struct { + _ struct{} `type:"structure"` + + // The presigned URL you can use to download the function's .zip file that you + // previously uploaded. The URL is valid for up to 10 minutes. + Location *string `type:"string"` + + // The repository from which you can download the function. + RepositoryType *string `type:"string"` +} + +// String returns the string representation +func (s FunctionCodeLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FunctionCodeLocation) GoString() string { + return s.String() +} + +// A complex type that describes function metadata. +type FunctionConfiguration struct { + _ struct{} `type:"structure"` + + // It is the SHA256 hash of your function deployment package. + CodeSha256 *string `type:"string"` + + // The size, in bytes, of the function .zip file you uploaded. + CodeSize *int64 `type:"long"` + + // The user-provided description. + Description *string `type:"string"` + + // The Amazon Resource Name (ARN) assigned to the function. + FunctionArn *string `type:"string"` + + // The name of the function. + FunctionName *string `min:"1" type:"string"` + + // The function Lambda calls to begin executing your function. + Handler *string `type:"string"` + + // The timestamp of the last time you updated the function. + LastModified *string `type:"string"` + + // The memory size, in MB, you configured for the function. Must be a multiple + // of 64 MB. + MemorySize *int64 `min:"128" type:"integer"` + + // The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it + // executes your function to access any other Amazon Web Services (AWS) resources. + Role *string `type:"string"` + + // The runtime environment for the Lambda function. + Runtime *string `type:"string" enum:"Runtime"` + + // The function execution time at which Lambda should terminate the function. + // Because the execution time has cost implications, we recommend you set this + // value based on your expected execution time. The default is 3 seconds. + Timeout *int64 `min:"1" type:"integer"` + + // The version of the Lambda function. + Version *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s FunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FunctionConfiguration) GoString() string { + return s.String() +} + +type GetAliasInput struct { + _ struct{} `type:"structure"` + + // Function name for which the alias is created. An alias is a subresource that + // exists only in the context of an existing Lambda function. So you must specify + // the function name. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // Name of the alias for which you want to retrieve information. + Name *string `location:"uri" locationName:"Name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAliasInput) GoString() string { + return s.String() +} + +type GetEventSourceMappingInput struct { + _ struct{} `type:"structure"` + + // The AWS Lambda assigned ID of the event source mapping. + UUID *string `location:"uri" locationName:"UUID" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetEventSourceMappingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetEventSourceMappingInput) GoString() string { + return s.String() +} + +type GetFunctionConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the Lambda function for which you want to retrieve the configuration + // information. + // + // You can specify an unqualified function name (for example, "Thumbnail") + // or you can specify Amazon Resource Name (ARN) of the function (for example, + // "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). AWS Lambda also + // allows you to specify only the account ID qualifier (for example, "account-id:Thumbnail"). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // Using this optional parameter you can specify function version or alias name. + // If you specify function version, the API uses qualified function ARN and + // returns information about the specific function version. if you specify alias + // name, the API uses alias ARN and returns information about the function version + // to which the alias points. + // + // If you don't specify this parameter, the API uses unqualified function ARN, + // and returns information about the $LATEST function version. + Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` +} + +// String returns the string representation +func (s GetFunctionConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFunctionConfigurationInput) GoString() string { + return s.String() +} + +type GetFunctionInput struct { + _ struct{} `type:"structure"` + + // The Lambda function name. + // + // You can specify an unqualified function name (for example, "Thumbnail") + // or you can specify Amazon Resource Name (ARN) of the function (for example, + // "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). AWS Lambda also + // allows you to specify only the account ID qualifier (for example, "account-id:Thumbnail"). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // Using this optional parameter to specify a function version or alias name. + // If you specify function version, the API uses qualified function ARN for + // the request and returns information about the specific Lambda function version. + // If you specify alias name, the API uses alias ARN and returns information + // about the function version to which the alias points. If you don't provide + // this parameter, the API uses unqualified function ARN and returns information + // about the $LATEST version of the Lambda function. + Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` +} + +// String returns the string representation +func (s GetFunctionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFunctionInput) GoString() string { + return s.String() +} + +// This response contains the object for the Lambda function location (see API_FunctionCodeLocation +type GetFunctionOutput struct { + _ struct{} `type:"structure"` + + // The object for the Lambda function location. + Code *FunctionCodeLocation `type:"structure"` + + // A complex type that describes function metadata. + Configuration *FunctionConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetFunctionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFunctionOutput) GoString() string { + return s.String() +} + +type GetPolicyInput struct { + _ struct{} `type:"structure"` + + // Function name whose resource policy you want to retrieve. + // + // You can specify an unqualified function name (for example, "Thumbnail") + // or you can specify Amazon Resource Name (ARN) of the function (for example, + // "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). AWS Lambda also + // allows you to specify only the account ID qualifier (for example, "account-id:Thumbnail"). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // You can specify this optional query parameter to specify function version + // or alias name in which case this API will return all permissions associated + // with the specific ARN. If you don't provide this parameter, the API will + // return permissions that apply to the unqualified function ARN. + Qualifier *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyInput) GoString() string { + return s.String() +} + +type GetPolicyOutput struct { + _ struct{} `type:"structure"` + + // The resource policy associated with the specified function. The response + // returns the same as a string using "\" as an escape character in the JSON. + Policy *string `type:"string"` +} + +// String returns the string representation +func (s GetPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyOutput) GoString() string { + return s.String() +} + +type InvokeAsyncInput struct { + _ struct{} `type:"structure" payload:"InvokeArgs"` + + // The Lambda function name. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // JSON that you want to provide to your Lambda function as input. + InvokeArgs io.ReadSeeker `type:"blob" required:"true"` +} + +// String returns the string representation +func (s InvokeAsyncInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvokeAsyncInput) GoString() string { + return s.String() +} + +// Upon success, it returns empty response. Otherwise, throws an exception. +type InvokeAsyncOutput struct { + _ struct{} `type:"structure"` + + // It will be 202 upon success. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s InvokeAsyncOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvokeAsyncOutput) GoString() string { + return s.String() +} + +type InvokeInput struct { + _ struct{} `type:"structure" payload:"Payload"` + + // Using the ClientContext you can pass client-specific information to the Lambda + // function you are invoking. You can then process the client information in + // your Lambda function as you choose through the context variable. For an example + // of a ClientContext JSON, go to PutEvents (http://docs.aws.amazon.com/mobileanalytics/latest/ug/PutEvents.html) + // in the Amazon Mobile Analytics API Reference and User Guide. + // + // The ClientContext JSON must be base64-encoded. + ClientContext *string `location:"header" locationName:"X-Amz-Client-Context" type:"string"` + + // The Lambda function name. + // + // You can specify an unqualified function name (for example, "Thumbnail") + // or you can specify Amazon Resource Name (ARN) of the function (for example, + // "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). AWS Lambda also + // allows you to specify only the account ID qualifier (for example, "account-id:Thumbnail"). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // By default, the Invoke API assumes "RequestResponse" invocation type. You + // can optionally request asynchronous execution by specifying "Event" as the + // InvocationType. You can also use this parameter to request AWS Lambda to + // not execute the function but do some verification, such as if the caller + // is authorized to invoke the function and if the inputs are valid. You request + // this by specifying "DryRun" as the InvocationType. This is useful in a cross-account + // scenario when you want to verify access to a function without running it. + InvocationType *string `location:"header" locationName:"X-Amz-Invocation-Type" type:"string" enum:"InvocationType"` + + // You can set this optional parameter to "Tail" in the request only if you + // specify the InvocationType parameter with value "RequestResponse". In this + // case, AWS Lambda returns the base64-encoded last 4 KB of log data produced + // by your Lambda function in the x-amz-log-results header. + LogType *string `location:"header" locationName:"X-Amz-Log-Type" type:"string" enum:"LogType"` + + // JSON that you want to provide to your Lambda function as input. + Payload []byte `type:"blob"` + + // You can use this optional paramter to specify a Lambda function version or + // alias name. If you specify function version, the API uses qualified function + // ARN to invoke a specific Lambda function. If you specify alias name, the + // API uses the alias ARN to invoke the Lambda function version to which the + // alias points. + // + // If you don't provide this parameter, then the API uses unqualified function + // ARN which results in invocation of the $LATEST version. + Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` +} + +// String returns the string representation +func (s InvokeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvokeInput) GoString() string { + return s.String() +} + +// Upon success, returns an empty response. Otherwise, throws an exception. +type InvokeOutput struct { + _ struct{} `type:"structure" payload:"Payload"` + + // Indicates whether an error occurred while executing the Lambda function. + // If an error occurred this field will have one of two values; Handled or Unhandled. + // Handled errors are errors that are reported by the function while the Unhandled + // errors are those detected and reported by AWS Lambda. Unhandled errors include + // out of memory errors and function timeouts. For information about how to + // report an Handled error, see Programming Model (http://docs.aws.amazon.com/lambda/latest/dg/programming-model.html). + FunctionError *string `location:"header" locationName:"X-Amz-Function-Error" type:"string"` + + // It is the base64-encoded logs for the Lambda function invocation. This is + // present only if the invocation type is "RequestResponse" and the logs were + // requested. + LogResult *string `location:"header" locationName:"X-Amz-Log-Result" type:"string"` + + // It is the JSON representation of the object returned by the Lambda function. + // In This is present only if the invocation type is "RequestResponse". + // + // In the event of a function error this field contains a message describing + // the error. For the Handled errors the Lambda function will report this message. + // For Unhandled errors AWS Lambda reports the message. + Payload []byte `type:"blob"` + + // The HTTP status code will be in the 200 range for successful request. For + // the "RequestResonse" invocation type this status code will be 200. For the + // "Event" invocation type this status code will be 202. For the "DryRun" invocation + // type the status code will be 204. + StatusCode *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s InvokeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvokeOutput) GoString() string { + return s.String() +} + +type ListAliasesInput struct { + _ struct{} `type:"structure"` + + // Lambda function name for which the alias is created. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // If you specify this optional parameter, the API returns only the aliases + // pointing to the specific Lambda function version, otherwise returns all aliases + // created for the Lambda function. + FunctionVersion *string `location:"querystring" locationName:"FunctionVersion" min:"1" type:"string"` + + // Optional string. An opaque pagination token returned from a previous ListAliases + // operation. If present, indicates where to continue the listing. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // Optional integer. Specifies the maximum number of aliases to return in response. + // This parameter value must be greater than 0. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListAliasesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAliasesInput) GoString() string { + return s.String() +} + +type ListAliasesOutput struct { + _ struct{} `type:"structure"` + + // An list of alises. + Aliases []*AliasConfiguration `type:"list"` + + // A string, present if there are more aliases. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListAliasesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAliasesOutput) GoString() string { + return s.String() +} + +type ListEventSourceMappingsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the Amazon Kinesis stream. + EventSourceArn *string `location:"querystring" locationName:"EventSourceArn" type:"string"` + + // The name of the Lambda function. + // + // You can specify an unqualified function name (for example, "Thumbnail") + // or you can specify Amazon Resource Name (ARN) of the function (for example, + // "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). AWS Lambda also + // allows you to specify only the account ID qualifier (for example, "account-id:Thumbnail"). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"querystring" locationName:"FunctionName" min:"1" type:"string"` + + // Optional string. An opaque pagination token returned from a previous ListEventSourceMappings + // operation. If present, specifies to continue the list from where the returning + // call left off. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // Optional integer. Specifies the maximum number of event sources to return + // in response. This value must be greater than 0. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListEventSourceMappingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEventSourceMappingsInput) GoString() string { + return s.String() +} + +// Contains a list of event sources (see API_EventSourceMappingConfiguration) +type ListEventSourceMappingsOutput struct { + _ struct{} `type:"structure"` + + // An array of EventSourceMappingConfiguration objects. + EventSourceMappings []*EventSourceMappingConfiguration `type:"list"` + + // A string, present if there are more event source mappings. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListEventSourceMappingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEventSourceMappingsOutput) GoString() string { + return s.String() +} + +type ListFunctionsInput struct { + _ struct{} `type:"structure"` + + // Optional string. An opaque pagination token returned from a previous ListFunctions + // operation. If present, indicates where to continue the listing. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // Optional integer. Specifies the maximum number of AWS Lambda functions to + // return in response. This parameter value must be greater than 0. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListFunctionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListFunctionsInput) GoString() string { + return s.String() +} + +// Contains a list of AWS Lambda function configurations (see FunctionConfiguration. +type ListFunctionsOutput struct { + _ struct{} `type:"structure"` + + // A list of Lambda functions. + Functions []*FunctionConfiguration `type:"list"` + + // A string, present if there are more functions. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListFunctionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListFunctionsOutput) GoString() string { + return s.String() +} + +type ListVersionsByFunctionInput struct { + _ struct{} `type:"structure"` + + // Function name whose versions to list. You can specify an unqualified function + // name (for example, "Thumbnail") or you can specify Amazon Resource Name (ARN) + // of the function (for example, "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). + // AWS Lambda also allows you to specify only the account ID qualifier (for + // example, "account-id:Thumbnail"). Note that the length constraint applies + // only to the ARN. If you specify only the function name, it is limited to + // 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // Optional string. An opaque pagination token returned from a previous ListVersionsByFunction + // operation. If present, indicates where to continue the listing. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // Optional integer. Specifies the maximum number of AWS Lambda function versions + // to return in response. This parameter value must be greater than 0. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListVersionsByFunctionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVersionsByFunctionInput) GoString() string { + return s.String() +} + +type ListVersionsByFunctionOutput struct { + _ struct{} `type:"structure"` + + // A string, present if there are more function versions. + NextMarker *string `type:"string"` + + // A list of Lambda function versions. + Versions []*FunctionConfiguration `type:"list"` +} + +// String returns the string representation +func (s ListVersionsByFunctionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVersionsByFunctionOutput) GoString() string { + return s.String() +} + +type PublishVersionInput struct { + _ struct{} `type:"structure"` + + // The SHA256 hash of the deployment package you want to publish. This provides + // validation on the code you are publishing. If you provide this parameter + // value must match the SHA256 of the HEAD version for the publication to succeed. + CodeSha256 *string `type:"string"` + + // The description for the version you are publishing. If not provided, AWS + // Lambda copies the description from the HEAD version. + Description *string `type:"string"` + + // The Lambda function name. You can specify an unqualified function name (for + // example, "Thumbnail") or you can specify Amazon Resource Name (ARN) of the + // function (for example, "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). + // AWS Lambda also allows you to specify only the account ID qualifier (for + // example, "account-id:Thumbnail"). Note that the length constraint applies + // only to the ARN. If you specify only the function name, it is limited to + // 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PublishVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublishVersionInput) GoString() string { + return s.String() +} + +type RemovePermissionInput struct { + _ struct{} `type:"structure"` + + // Lambda function whose resource policy you want to remove a permission from. + // + // You can specify an unqualified function name (for example, "Thumbnail") + // or you can specify Amazon Resource Name (ARN) of the function (for example, + // "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). AWS Lambda also + // allows you to specify only the account ID qualifier (for example, "account-id:Thumbnail"). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // You can specify this optional parameter to remove permission associated with + // a specific function version or function alias. The value of this paramter + // is the function version or alias name. If you don't specify this parameter, + // the API removes permission associated with the unqualified function ARN. + Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` + + // Statement ID of the permission to remove. + StatementId *string `location:"uri" locationName:"StatementId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RemovePermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemovePermissionInput) GoString() string { + return s.String() +} + +type RemovePermissionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemovePermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemovePermissionOutput) GoString() string { + return s.String() +} + +type UpdateAliasInput struct { + _ struct{} `type:"structure"` + + // You can optionally change the description of the alias using this parameter. + Description *string `type:"string"` + + // The function name for which the alias is created. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // Using this parameter you can optionally change the Lambda function version + // to which the alias to points to. + FunctionVersion *string `min:"1" type:"string"` + + // The alias name. + Name *string `location:"uri" locationName:"Name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAliasInput) GoString() string { + return s.String() +} + +type UpdateEventSourceMappingInput struct { + _ struct{} `type:"structure"` + + // The maximum number of stream records that can be sent to your Lambda function + // for a single invocation. + BatchSize *int64 `min:"1" type:"integer"` + + // Specifies whether AWS Lambda should actively poll the stream or not. If disabled, + // AWS Lambda will not poll the stream. + Enabled *bool `type:"boolean"` + + // The Lambda function to which you want the stream records sent. + // + // You can specify an unqualified function name (for example, "Thumbnail") + // or you can specify Amazon Resource Name (ARN) of the function (for example, + // "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). AWS Lambda also + // allows you to specify only the account ID qualifier (for example, "account-id:Thumbnail"). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `min:"1" type:"string"` + + // The event source mapping identifier. + UUID *string `location:"uri" locationName:"UUID" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateEventSourceMappingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateEventSourceMappingInput) GoString() string { + return s.String() +} + +type UpdateFunctionCodeInput struct { + _ struct{} `type:"structure"` + + // The existing Lambda function name whose code you want to replace. + // + // You can specify an unqualified function name (for example, "Thumbnail") + // or you can specify Amazon Resource Name (ARN) of the function (for example, + // "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). AWS Lambda also + // allows you to specify only the account ID qualifier (for example, "account-id:Thumbnail"). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // This boolean parameter can be used to request AWS Lambda to update the Lambda + // function and publish a version as an atomic operation. + Publish *bool `type:"boolean"` + + // Amazon S3 bucket name where the .zip file containing your deployment package + // is stored. This bucket must reside in the same AWS region where you are creating + // the Lambda function. + S3Bucket *string `min:"3" type:"string"` + + // The Amazon S3 object (the deployment package) key name you want to upload. + S3Key *string `min:"1" type:"string"` + + // The Amazon S3 object (the deployment package) version you want to upload. + S3ObjectVersion *string `min:"1" type:"string"` + + // Based64-encoded .zip file containing your packaged source code. + ZipFile []byte `type:"blob"` +} + +// String returns the string representation +func (s UpdateFunctionCodeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFunctionCodeInput) GoString() string { + return s.String() +} + +type UpdateFunctionConfigurationInput struct { + _ struct{} `type:"structure"` + + // A short user-defined function description. AWS Lambda does not use this value. + // Assign a meaningful description as you see fit. + Description *string `type:"string"` + + // The name of the Lambda function. + // + // You can specify an unqualified function name (for example, "Thumbnail") + // or you can specify Amazon Resource Name (ARN) of the function (for example, + // "arn:aws:lambda:us-west-2:account-id:function:ThumbNail"). AWS Lambda also + // allows you to specify only the account ID qualifier (for example, "account-id:Thumbnail"). + // Note that the length constraint applies only to the ARN. If you specify only + // the function name, it is limited to 64 character in length. + FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` + + // The function that Lambda calls to begin executing your function. For Node.js, + // it is the module-name.export value in your function. + Handler *string `type:"string"` + + // The amount of memory, in MB, your Lambda function is given. AWS Lambda uses + // this memory size to infer the amount of CPU allocated to your function. Your + // function use-case determines your CPU and memory requirements. For example, + // a database operation might need less memory compared to an image processing + // function. The default value is 128 MB. The value must be a multiple of 64 + // MB. + MemorySize *int64 `min:"128" type:"integer"` + + // The Amazon Resource Name (ARN) of the IAM role that Lambda will assume when + // it executes your function. + Role *string `type:"string"` + + // The function execution time at which AWS Lambda should terminate the function. + // Because the execution time has cost implications, we recommend you set this + // value based on your expected execution time. The default is 3 seconds. + Timeout *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s UpdateFunctionConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFunctionConfigurationInput) GoString() string { + return s.String() +} + +const ( + // @enum EventSourcePosition + EventSourcePositionTrimHorizon = "TRIM_HORIZON" + // @enum EventSourcePosition + EventSourcePositionLatest = "LATEST" +) + +const ( + // @enum InvocationType + InvocationTypeEvent = "Event" + // @enum InvocationType + InvocationTypeRequestResponse = "RequestResponse" + // @enum InvocationType + InvocationTypeDryRun = "DryRun" +) + +const ( + // @enum LogType + LogTypeNone = "None" + // @enum LogType + LogTypeTail = "Tail" +) + +const ( + // @enum Runtime + RuntimeNodejs = "nodejs" + // @enum Runtime + RuntimeJava8 = "java8" + // @enum Runtime + RuntimePython27 = "python2.7" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/lambda/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/lambda/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/lambda/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/lambda/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,539 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package lambda_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/lambda" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleLambda_AddPermission() { + svc := lambda.New(session.New()) + + params := &lambda.AddPermissionInput{ + Action: aws.String("Action"), // Required + FunctionName: aws.String("FunctionName"), // Required + Principal: aws.String("Principal"), // Required + StatementId: aws.String("StatementId"), // Required + Qualifier: aws.String("Qualifier"), + SourceAccount: aws.String("SourceOwner"), + SourceArn: aws.String("Arn"), + } + resp, err := svc.AddPermission(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_CreateAlias() { + svc := lambda.New(session.New()) + + params := &lambda.CreateAliasInput{ + FunctionName: aws.String("FunctionName"), // Required + FunctionVersion: aws.String("Version"), // Required + Name: aws.String("Alias"), // Required + Description: aws.String("Description"), + } + resp, err := svc.CreateAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_CreateEventSourceMapping() { + svc := lambda.New(session.New()) + + params := &lambda.CreateEventSourceMappingInput{ + EventSourceArn: aws.String("Arn"), // Required + FunctionName: aws.String("FunctionName"), // Required + StartingPosition: aws.String("EventSourcePosition"), // Required + BatchSize: aws.Int64(1), + Enabled: aws.Bool(true), + } + resp, err := svc.CreateEventSourceMapping(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_CreateFunction() { + svc := lambda.New(session.New()) + + params := &lambda.CreateFunctionInput{ + Code: &lambda.FunctionCode{ // Required + S3Bucket: aws.String("S3Bucket"), + S3Key: aws.String("S3Key"), + S3ObjectVersion: aws.String("S3ObjectVersion"), + ZipFile: []byte("PAYLOAD"), + }, + FunctionName: aws.String("FunctionName"), // Required + Handler: aws.String("Handler"), // Required + Role: aws.String("RoleArn"), // Required + Runtime: aws.String("Runtime"), // Required + Description: aws.String("Description"), + MemorySize: aws.Int64(1), + Publish: aws.Bool(true), + Timeout: aws.Int64(1), + } + resp, err := svc.CreateFunction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_DeleteAlias() { + svc := lambda.New(session.New()) + + params := &lambda.DeleteAliasInput{ + FunctionName: aws.String("FunctionName"), // Required + Name: aws.String("Alias"), // Required + } + resp, err := svc.DeleteAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_DeleteEventSourceMapping() { + svc := lambda.New(session.New()) + + params := &lambda.DeleteEventSourceMappingInput{ + UUID: aws.String("String"), // Required + } + resp, err := svc.DeleteEventSourceMapping(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_DeleteFunction() { + svc := lambda.New(session.New()) + + params := &lambda.DeleteFunctionInput{ + FunctionName: aws.String("FunctionName"), // Required + Qualifier: aws.String("Qualifier"), + } + resp, err := svc.DeleteFunction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_GetAlias() { + svc := lambda.New(session.New()) + + params := &lambda.GetAliasInput{ + FunctionName: aws.String("FunctionName"), // Required + Name: aws.String("Alias"), // Required + } + resp, err := svc.GetAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_GetEventSourceMapping() { + svc := lambda.New(session.New()) + + params := &lambda.GetEventSourceMappingInput{ + UUID: aws.String("String"), // Required + } + resp, err := svc.GetEventSourceMapping(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_GetFunction() { + svc := lambda.New(session.New()) + + params := &lambda.GetFunctionInput{ + FunctionName: aws.String("FunctionName"), // Required + Qualifier: aws.String("Qualifier"), + } + resp, err := svc.GetFunction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_GetFunctionConfiguration() { + svc := lambda.New(session.New()) + + params := &lambda.GetFunctionConfigurationInput{ + FunctionName: aws.String("FunctionName"), // Required + Qualifier: aws.String("Qualifier"), + } + resp, err := svc.GetFunctionConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_GetPolicy() { + svc := lambda.New(session.New()) + + params := &lambda.GetPolicyInput{ + FunctionName: aws.String("FunctionName"), // Required + Qualifier: aws.String("Qualifier"), + } + resp, err := svc.GetPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_Invoke() { + svc := lambda.New(session.New()) + + params := &lambda.InvokeInput{ + FunctionName: aws.String("FunctionName"), // Required + ClientContext: aws.String("String"), + InvocationType: aws.String("InvocationType"), + LogType: aws.String("LogType"), + Payload: []byte("PAYLOAD"), + Qualifier: aws.String("Qualifier"), + } + resp, err := svc.Invoke(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_InvokeAsync() { + svc := lambda.New(session.New()) + + params := &lambda.InvokeAsyncInput{ + FunctionName: aws.String("FunctionName"), // Required + InvokeArgs: bytes.NewReader([]byte("PAYLOAD")), // Required + } + resp, err := svc.InvokeAsync(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_ListAliases() { + svc := lambda.New(session.New()) + + params := &lambda.ListAliasesInput{ + FunctionName: aws.String("FunctionName"), // Required + FunctionVersion: aws.String("Version"), + Marker: aws.String("String"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListAliases(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_ListEventSourceMappings() { + svc := lambda.New(session.New()) + + params := &lambda.ListEventSourceMappingsInput{ + EventSourceArn: aws.String("Arn"), + FunctionName: aws.String("FunctionName"), + Marker: aws.String("String"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListEventSourceMappings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_ListFunctions() { + svc := lambda.New(session.New()) + + params := &lambda.ListFunctionsInput{ + Marker: aws.String("String"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListFunctions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_ListVersionsByFunction() { + svc := lambda.New(session.New()) + + params := &lambda.ListVersionsByFunctionInput{ + FunctionName: aws.String("FunctionName"), // Required + Marker: aws.String("String"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListVersionsByFunction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_PublishVersion() { + svc := lambda.New(session.New()) + + params := &lambda.PublishVersionInput{ + FunctionName: aws.String("FunctionName"), // Required + CodeSha256: aws.String("String"), + Description: aws.String("Description"), + } + resp, err := svc.PublishVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_RemovePermission() { + svc := lambda.New(session.New()) + + params := &lambda.RemovePermissionInput{ + FunctionName: aws.String("FunctionName"), // Required + StatementId: aws.String("StatementId"), // Required + Qualifier: aws.String("Qualifier"), + } + resp, err := svc.RemovePermission(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_UpdateAlias() { + svc := lambda.New(session.New()) + + params := &lambda.UpdateAliasInput{ + FunctionName: aws.String("FunctionName"), // Required + Name: aws.String("Alias"), // Required + Description: aws.String("Description"), + FunctionVersion: aws.String("Version"), + } + resp, err := svc.UpdateAlias(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_UpdateEventSourceMapping() { + svc := lambda.New(session.New()) + + params := &lambda.UpdateEventSourceMappingInput{ + UUID: aws.String("String"), // Required + BatchSize: aws.Int64(1), + Enabled: aws.Bool(true), + FunctionName: aws.String("FunctionName"), + } + resp, err := svc.UpdateEventSourceMapping(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_UpdateFunctionCode() { + svc := lambda.New(session.New()) + + params := &lambda.UpdateFunctionCodeInput{ + FunctionName: aws.String("FunctionName"), // Required + Publish: aws.Bool(true), + S3Bucket: aws.String("S3Bucket"), + S3Key: aws.String("S3Key"), + S3ObjectVersion: aws.String("S3ObjectVersion"), + ZipFile: []byte("PAYLOAD"), + } + resp, err := svc.UpdateFunctionCode(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleLambda_UpdateFunctionConfiguration() { + svc := lambda.New(session.New()) + + params := &lambda.UpdateFunctionConfigurationInput{ + FunctionName: aws.String("FunctionName"), // Required + Description: aws.String("Description"), + Handler: aws.String("Handler"), + MemorySize: aws.Int64(1), + Role: aws.String("RoleArn"), + Timeout: aws.Int64(1), + } + resp, err := svc.UpdateFunctionConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/lambda/lambdaiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/lambda/lambdaiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/lambda/lambdaiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/lambda/lambdaiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,114 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package lambdaiface provides an interface for the AWS Lambda. +package lambdaiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/lambda" +) + +// LambdaAPI is the interface type for lambda.Lambda. +type LambdaAPI interface { + AddPermissionRequest(*lambda.AddPermissionInput) (*request.Request, *lambda.AddPermissionOutput) + + AddPermission(*lambda.AddPermissionInput) (*lambda.AddPermissionOutput, error) + + CreateAliasRequest(*lambda.CreateAliasInput) (*request.Request, *lambda.AliasConfiguration) + + CreateAlias(*lambda.CreateAliasInput) (*lambda.AliasConfiguration, error) + + CreateEventSourceMappingRequest(*lambda.CreateEventSourceMappingInput) (*request.Request, *lambda.EventSourceMappingConfiguration) + + CreateEventSourceMapping(*lambda.CreateEventSourceMappingInput) (*lambda.EventSourceMappingConfiguration, error) + + CreateFunctionRequest(*lambda.CreateFunctionInput) (*request.Request, *lambda.FunctionConfiguration) + + CreateFunction(*lambda.CreateFunctionInput) (*lambda.FunctionConfiguration, error) + + DeleteAliasRequest(*lambda.DeleteAliasInput) (*request.Request, *lambda.DeleteAliasOutput) + + DeleteAlias(*lambda.DeleteAliasInput) (*lambda.DeleteAliasOutput, error) + + DeleteEventSourceMappingRequest(*lambda.DeleteEventSourceMappingInput) (*request.Request, *lambda.EventSourceMappingConfiguration) + + DeleteEventSourceMapping(*lambda.DeleteEventSourceMappingInput) (*lambda.EventSourceMappingConfiguration, error) + + DeleteFunctionRequest(*lambda.DeleteFunctionInput) (*request.Request, *lambda.DeleteFunctionOutput) + + DeleteFunction(*lambda.DeleteFunctionInput) (*lambda.DeleteFunctionOutput, error) + + GetAliasRequest(*lambda.GetAliasInput) (*request.Request, *lambda.AliasConfiguration) + + GetAlias(*lambda.GetAliasInput) (*lambda.AliasConfiguration, error) + + GetEventSourceMappingRequest(*lambda.GetEventSourceMappingInput) (*request.Request, *lambda.EventSourceMappingConfiguration) + + GetEventSourceMapping(*lambda.GetEventSourceMappingInput) (*lambda.EventSourceMappingConfiguration, error) + + GetFunctionRequest(*lambda.GetFunctionInput) (*request.Request, *lambda.GetFunctionOutput) + + GetFunction(*lambda.GetFunctionInput) (*lambda.GetFunctionOutput, error) + + GetFunctionConfigurationRequest(*lambda.GetFunctionConfigurationInput) (*request.Request, *lambda.FunctionConfiguration) + + GetFunctionConfiguration(*lambda.GetFunctionConfigurationInput) (*lambda.FunctionConfiguration, error) + + GetPolicyRequest(*lambda.GetPolicyInput) (*request.Request, *lambda.GetPolicyOutput) + + GetPolicy(*lambda.GetPolicyInput) (*lambda.GetPolicyOutput, error) + + InvokeRequest(*lambda.InvokeInput) (*request.Request, *lambda.InvokeOutput) + + Invoke(*lambda.InvokeInput) (*lambda.InvokeOutput, error) + + InvokeAsyncRequest(*lambda.InvokeAsyncInput) (*request.Request, *lambda.InvokeAsyncOutput) + + InvokeAsync(*lambda.InvokeAsyncInput) (*lambda.InvokeAsyncOutput, error) + + ListAliasesRequest(*lambda.ListAliasesInput) (*request.Request, *lambda.ListAliasesOutput) + + ListAliases(*lambda.ListAliasesInput) (*lambda.ListAliasesOutput, error) + + ListEventSourceMappingsRequest(*lambda.ListEventSourceMappingsInput) (*request.Request, *lambda.ListEventSourceMappingsOutput) + + ListEventSourceMappings(*lambda.ListEventSourceMappingsInput) (*lambda.ListEventSourceMappingsOutput, error) + + ListEventSourceMappingsPages(*lambda.ListEventSourceMappingsInput, func(*lambda.ListEventSourceMappingsOutput, bool) bool) error + + ListFunctionsRequest(*lambda.ListFunctionsInput) (*request.Request, *lambda.ListFunctionsOutput) + + ListFunctions(*lambda.ListFunctionsInput) (*lambda.ListFunctionsOutput, error) + + ListFunctionsPages(*lambda.ListFunctionsInput, func(*lambda.ListFunctionsOutput, bool) bool) error + + ListVersionsByFunctionRequest(*lambda.ListVersionsByFunctionInput) (*request.Request, *lambda.ListVersionsByFunctionOutput) + + ListVersionsByFunction(*lambda.ListVersionsByFunctionInput) (*lambda.ListVersionsByFunctionOutput, error) + + PublishVersionRequest(*lambda.PublishVersionInput) (*request.Request, *lambda.FunctionConfiguration) + + PublishVersion(*lambda.PublishVersionInput) (*lambda.FunctionConfiguration, error) + + RemovePermissionRequest(*lambda.RemovePermissionInput) (*request.Request, *lambda.RemovePermissionOutput) + + RemovePermission(*lambda.RemovePermissionInput) (*lambda.RemovePermissionOutput, error) + + UpdateAliasRequest(*lambda.UpdateAliasInput) (*request.Request, *lambda.AliasConfiguration) + + UpdateAlias(*lambda.UpdateAliasInput) (*lambda.AliasConfiguration, error) + + UpdateEventSourceMappingRequest(*lambda.UpdateEventSourceMappingInput) (*request.Request, *lambda.EventSourceMappingConfiguration) + + UpdateEventSourceMapping(*lambda.UpdateEventSourceMappingInput) (*lambda.EventSourceMappingConfiguration, error) + + UpdateFunctionCodeRequest(*lambda.UpdateFunctionCodeInput) (*request.Request, *lambda.FunctionConfiguration) + + UpdateFunctionCode(*lambda.UpdateFunctionCodeInput) (*lambda.FunctionConfiguration, error) + + UpdateFunctionConfigurationRequest(*lambda.UpdateFunctionConfigurationInput) (*request.Request, *lambda.FunctionConfiguration) + + UpdateFunctionConfiguration(*lambda.UpdateFunctionConfigurationInput) (*lambda.FunctionConfiguration, error) +} + +var _ LambdaAPI = (*lambda.Lambda)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/lambda/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/lambda/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/lambda/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/lambda/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,92 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package lambda + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restjson" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Overview +// +// This is the AWS Lambda API Reference. The AWS Lambda Developer Guide provides +// additional information. For the service overview, go to What is AWS Lambda +// (http://docs.aws.amazon.com/lambda/latest/dg/welcome.html), and for information +// about how the service works, go to AWS Lambda: How it Works (http://docs.aws.amazon.com/lambda/latest/dg/lambda-introduction.html) +// in the AWS Lambda Developer Guide. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type Lambda struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "lambda" + +// New creates a new instance of the Lambda client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Lambda client from just a session. +// svc := lambda.New(mySession) +// +// // Create a Lambda client with additional configuration +// svc := lambda.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Lambda { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *Lambda { + svc := &Lambda{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-03-31", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Lambda operation and runs any +// custom request initialization. +func (c *Lambda) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/machinelearning/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/machinelearning/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/machinelearning/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/machinelearning/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,3690 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package machinelearning provides a client for Amazon Machine Learning. +package machinelearning + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opCreateBatchPrediction = "CreateBatchPrediction" + +// CreateBatchPredictionRequest generates a request for the CreateBatchPrediction operation. +func (c *MachineLearning) CreateBatchPredictionRequest(input *CreateBatchPredictionInput) (req *request.Request, output *CreateBatchPredictionOutput) { + op := &request.Operation{ + Name: opCreateBatchPrediction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateBatchPredictionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateBatchPredictionOutput{} + req.Data = output + return +} + +// Generates predictions for a group of observations. The observations to process +// exist in one or more data files referenced by a DataSource. This operation +// creates a new BatchPrediction, and uses an MLModel and the data files referenced +// by the DataSource as information sources. +// +// CreateBatchPrediction is an asynchronous operation. In response to CreateBatchPrediction, +// Amazon Machine Learning (Amazon ML) immediately returns and sets the BatchPrediction +// status to PENDING. After the BatchPrediction completes, Amazon ML sets the +// status to COMPLETED. +// +// You can poll for status updates by using the GetBatchPrediction operation +// and checking the Status parameter of the result. After the COMPLETED status +// appears, the results are available in the location specified by the OutputUri +// parameter. +func (c *MachineLearning) CreateBatchPrediction(input *CreateBatchPredictionInput) (*CreateBatchPredictionOutput, error) { + req, out := c.CreateBatchPredictionRequest(input) + err := req.Send() + return out, err +} + +const opCreateDataSourceFromRDS = "CreateDataSourceFromRDS" + +// CreateDataSourceFromRDSRequest generates a request for the CreateDataSourceFromRDS operation. +func (c *MachineLearning) CreateDataSourceFromRDSRequest(input *CreateDataSourceFromRDSInput) (req *request.Request, output *CreateDataSourceFromRDSOutput) { + op := &request.Operation{ + Name: opCreateDataSourceFromRDS, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDataSourceFromRDSInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDataSourceFromRDSOutput{} + req.Data = output + return +} + +// Creates a DataSource object from an Amazon Relational Database Service (http://aws.amazon.com/rds/) +// (Amazon RDS). A DataSource references data that can be used to perform CreateMLModel, +// CreateEvaluation, or CreateBatchPrediction operations. +// +// CreateDataSourceFromRDS is an asynchronous operation. In response to CreateDataSourceFromRDS, +// Amazon Machine Learning (Amazon ML) immediately returns and sets the DataSource +// status to PENDING. After the DataSource is created and ready for use, Amazon +// ML sets the Status parameter to COMPLETED. DataSource in COMPLETED or PENDING +// status can only be used to perform CreateMLModel, CreateEvaluation, or CreateBatchPrediction +// operations. +// +// If Amazon ML cannot accept the input source, it sets the Status parameter +// to FAILED and includes an error message in the Message attribute of the GetDataSource +// operation response. +func (c *MachineLearning) CreateDataSourceFromRDS(input *CreateDataSourceFromRDSInput) (*CreateDataSourceFromRDSOutput, error) { + req, out := c.CreateDataSourceFromRDSRequest(input) + err := req.Send() + return out, err +} + +const opCreateDataSourceFromRedshift = "CreateDataSourceFromRedshift" + +// CreateDataSourceFromRedshiftRequest generates a request for the CreateDataSourceFromRedshift operation. +func (c *MachineLearning) CreateDataSourceFromRedshiftRequest(input *CreateDataSourceFromRedshiftInput) (req *request.Request, output *CreateDataSourceFromRedshiftOutput) { + op := &request.Operation{ + Name: opCreateDataSourceFromRedshift, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDataSourceFromRedshiftInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDataSourceFromRedshiftOutput{} + req.Data = output + return +} + +// Creates a DataSource from Amazon Redshift (http://aws.amazon.com/redshift/). +// A DataSource references data that can be used to perform either CreateMLModel, +// CreateEvaluation or CreateBatchPrediction operations. +// +// CreateDataSourceFromRedshift is an asynchronous operation. In response to +// CreateDataSourceFromRedshift, Amazon Machine Learning (Amazon ML) immediately +// returns and sets the DataSource status to PENDING. After the DataSource is +// created and ready for use, Amazon ML sets the Status parameter to COMPLETED. +// DataSource in COMPLETED or PENDING status can only be used to perform CreateMLModel, +// CreateEvaluation, or CreateBatchPrediction operations. +// +// If Amazon ML cannot accept the input source, it sets the Status parameter +// to FAILED and includes an error message in the Message attribute of the GetDataSource +// operation response. +// +// The observations should exist in the database hosted on an Amazon Redshift +// cluster and should be specified by a SelectSqlQuery. Amazon ML executes +// Unload (http://docs.aws.amazon.com/redshift/latest/dg/t_Unloading_tables.html) +// command in Amazon Redshift to transfer the result set of SelectSqlQuery to +// S3StagingLocation. +// +// After the DataSource is created, it's ready for use in evaluations and batch +// predictions. If you plan to use the DataSource to train an MLModel, the DataSource +// requires another item -- a recipe. A recipe describes the observation variables +// that participate in training an MLModel. A recipe describes how each input +// variable will be used in training. Will the variable be included or excluded +// from training? Will the variable be manipulated, for example, combined with +// another variable or split apart into word combinations? The recipe provides +// answers to these questions. For more information, see the Amazon Machine +// Learning Developer Guide. +func (c *MachineLearning) CreateDataSourceFromRedshift(input *CreateDataSourceFromRedshiftInput) (*CreateDataSourceFromRedshiftOutput, error) { + req, out := c.CreateDataSourceFromRedshiftRequest(input) + err := req.Send() + return out, err +} + +const opCreateDataSourceFromS3 = "CreateDataSourceFromS3" + +// CreateDataSourceFromS3Request generates a request for the CreateDataSourceFromS3 operation. +func (c *MachineLearning) CreateDataSourceFromS3Request(input *CreateDataSourceFromS3Input) (req *request.Request, output *CreateDataSourceFromS3Output) { + op := &request.Operation{ + Name: opCreateDataSourceFromS3, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDataSourceFromS3Input{} + } + + req = c.newRequest(op, input, output) + output = &CreateDataSourceFromS3Output{} + req.Data = output + return +} + +// Creates a DataSource object. A DataSource references data that can be used +// to perform CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations. +// +// CreateDataSourceFromS3 is an asynchronous operation. In response to CreateDataSourceFromS3, +// Amazon Machine Learning (Amazon ML) immediately returns and sets the DataSource +// status to PENDING. After the DataSource is created and ready for use, Amazon +// ML sets the Status parameter to COMPLETED. DataSource in COMPLETED or PENDING +// status can only be used to perform CreateMLModel, CreateEvaluation or CreateBatchPrediction +// operations. +// +// If Amazon ML cannot accept the input source, it sets the Status parameter +// to FAILED and includes an error message in the Message attribute of the GetDataSource +// operation response. +// +// The observation data used in a DataSource should be ready to use; that is, +// it should have a consistent structure, and missing data values should be +// kept to a minimum. The observation data must reside in one or more CSV files +// in an Amazon Simple Storage Service (Amazon S3) bucket, along with a schema +// that describes the data items by name and type. The same schema must be used +// for all of the data files referenced by the DataSource. +// +// After the DataSource has been created, it's ready to use in evaluations +// and batch predictions. If you plan to use the DataSource to train an MLModel, +// the DataSource requires another item: a recipe. A recipe describes the observation +// variables that participate in training an MLModel. A recipe describes how +// each input variable will be used in training. Will the variable be included +// or excluded from training? Will the variable be manipulated, for example, +// combined with another variable, or split apart into word combinations? The +// recipe provides answers to these questions. For more information, see the +// Amazon Machine Learning Developer Guide (http://docs.aws.amazon.com/machine-learning/latest/dg). +func (c *MachineLearning) CreateDataSourceFromS3(input *CreateDataSourceFromS3Input) (*CreateDataSourceFromS3Output, error) { + req, out := c.CreateDataSourceFromS3Request(input) + err := req.Send() + return out, err +} + +const opCreateEvaluation = "CreateEvaluation" + +// CreateEvaluationRequest generates a request for the CreateEvaluation operation. +func (c *MachineLearning) CreateEvaluationRequest(input *CreateEvaluationInput) (req *request.Request, output *CreateEvaluationOutput) { + op := &request.Operation{ + Name: opCreateEvaluation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateEvaluationInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateEvaluationOutput{} + req.Data = output + return +} + +// Creates a new Evaluation of an MLModel. An MLModel is evaluated on a set +// of observations associated to a DataSource. Like a DataSource for an MLModel, +// the DataSource for an Evaluation contains values for the Target Variable. +// The Evaluation compares the predicted result for each observation to the +// actual outcome and provides a summary so that you know how effective the +// MLModel functions on the test data. Evaluation generates a relevant performance +// metric such as BinaryAUC, RegressionRMSE or MulticlassAvgFScore based on +// the corresponding MLModelType: BINARY, REGRESSION or MULTICLASS. +// +// CreateEvaluation is an asynchronous operation. In response to CreateEvaluation, +// Amazon Machine Learning (Amazon ML) immediately returns and sets the evaluation +// status to PENDING. After the Evaluation is created and ready for use, Amazon +// ML sets the status to COMPLETED. +// +// You can use the GetEvaluation operation to check progress of the evaluation +// during the creation operation. +func (c *MachineLearning) CreateEvaluation(input *CreateEvaluationInput) (*CreateEvaluationOutput, error) { + req, out := c.CreateEvaluationRequest(input) + err := req.Send() + return out, err +} + +const opCreateMLModel = "CreateMLModel" + +// CreateMLModelRequest generates a request for the CreateMLModel operation. +func (c *MachineLearning) CreateMLModelRequest(input *CreateMLModelInput) (req *request.Request, output *CreateMLModelOutput) { + op := &request.Operation{ + Name: opCreateMLModel, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateMLModelInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateMLModelOutput{} + req.Data = output + return +} + +// Creates a new MLModel using the data files and the recipe as information +// sources. +// +// An MLModel is nearly immutable. Users can only update the MLModelName and +// the ScoreThreshold in an MLModel without creating a new MLModel. +// +// CreateMLModel is an asynchronous operation. In response to CreateMLModel, +// Amazon Machine Learning (Amazon ML) immediately returns and sets the MLModel +// status to PENDING. After the MLModel is created and ready for use, Amazon +// ML sets the status to COMPLETED. +// +// You can use the GetMLModel operation to check progress of the MLModel during +// the creation operation. +// +// CreateMLModel requires a DataSource with computed statistics, which can +// be created by setting ComputeStatistics to true in CreateDataSourceFromRDS, +// CreateDataSourceFromS3, or CreateDataSourceFromRedshift operations. +func (c *MachineLearning) CreateMLModel(input *CreateMLModelInput) (*CreateMLModelOutput, error) { + req, out := c.CreateMLModelRequest(input) + err := req.Send() + return out, err +} + +const opCreateRealtimeEndpoint = "CreateRealtimeEndpoint" + +// CreateRealtimeEndpointRequest generates a request for the CreateRealtimeEndpoint operation. +func (c *MachineLearning) CreateRealtimeEndpointRequest(input *CreateRealtimeEndpointInput) (req *request.Request, output *CreateRealtimeEndpointOutput) { + op := &request.Operation{ + Name: opCreateRealtimeEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateRealtimeEndpointInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateRealtimeEndpointOutput{} + req.Data = output + return +} + +// Creates a real-time endpoint for the MLModel. The endpoint contains the URI +// of the MLModel; that is, the location to send real-time prediction requests +// for the specified MLModel. +func (c *MachineLearning) CreateRealtimeEndpoint(input *CreateRealtimeEndpointInput) (*CreateRealtimeEndpointOutput, error) { + req, out := c.CreateRealtimeEndpointRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBatchPrediction = "DeleteBatchPrediction" + +// DeleteBatchPredictionRequest generates a request for the DeleteBatchPrediction operation. +func (c *MachineLearning) DeleteBatchPredictionRequest(input *DeleteBatchPredictionInput) (req *request.Request, output *DeleteBatchPredictionOutput) { + op := &request.Operation{ + Name: opDeleteBatchPrediction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteBatchPredictionInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteBatchPredictionOutput{} + req.Data = output + return +} + +// Assigns the DELETED status to a BatchPrediction, rendering it unusable. +// +// After using the DeleteBatchPrediction operation, you can use the GetBatchPrediction +// operation to verify that the status of the BatchPrediction changed to DELETED. +// +// Caution: The result of the DeleteBatchPrediction operation is irreversible. +func (c *MachineLearning) DeleteBatchPrediction(input *DeleteBatchPredictionInput) (*DeleteBatchPredictionOutput, error) { + req, out := c.DeleteBatchPredictionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDataSource = "DeleteDataSource" + +// DeleteDataSourceRequest generates a request for the DeleteDataSource operation. +func (c *MachineLearning) DeleteDataSourceRequest(input *DeleteDataSourceInput) (req *request.Request, output *DeleteDataSourceOutput) { + op := &request.Operation{ + Name: opDeleteDataSource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDataSourceInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDataSourceOutput{} + req.Data = output + return +} + +// Assigns the DELETED status to a DataSource, rendering it unusable. +// +// After using the DeleteDataSource operation, you can use the GetDataSource +// operation to verify that the status of the DataSource changed to DELETED. +// +// Caution: The results of the DeleteDataSource operation are irreversible. +func (c *MachineLearning) DeleteDataSource(input *DeleteDataSourceInput) (*DeleteDataSourceOutput, error) { + req, out := c.DeleteDataSourceRequest(input) + err := req.Send() + return out, err +} + +const opDeleteEvaluation = "DeleteEvaluation" + +// DeleteEvaluationRequest generates a request for the DeleteEvaluation operation. +func (c *MachineLearning) DeleteEvaluationRequest(input *DeleteEvaluationInput) (req *request.Request, output *DeleteEvaluationOutput) { + op := &request.Operation{ + Name: opDeleteEvaluation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteEvaluationInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteEvaluationOutput{} + req.Data = output + return +} + +// Assigns the DELETED status to an Evaluation, rendering it unusable. +// +// After invoking the DeleteEvaluation operation, you can use the GetEvaluation +// operation to verify that the status of the Evaluation changed to DELETED. +// +// Caution: The results of the DeleteEvaluation operation are irreversible. +func (c *MachineLearning) DeleteEvaluation(input *DeleteEvaluationInput) (*DeleteEvaluationOutput, error) { + req, out := c.DeleteEvaluationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteMLModel = "DeleteMLModel" + +// DeleteMLModelRequest generates a request for the DeleteMLModel operation. +func (c *MachineLearning) DeleteMLModelRequest(input *DeleteMLModelInput) (req *request.Request, output *DeleteMLModelOutput) { + op := &request.Operation{ + Name: opDeleteMLModel, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteMLModelInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteMLModelOutput{} + req.Data = output + return +} + +// Assigns the DELETED status to an MLModel, rendering it unusable. +// +// After using the DeleteMLModel operation, you can use the GetMLModel operation +// to verify that the status of the MLModel changed to DELETED. +// +// Caution: The result of the DeleteMLModel operation is irreversible. +func (c *MachineLearning) DeleteMLModel(input *DeleteMLModelInput) (*DeleteMLModelOutput, error) { + req, out := c.DeleteMLModelRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRealtimeEndpoint = "DeleteRealtimeEndpoint" + +// DeleteRealtimeEndpointRequest generates a request for the DeleteRealtimeEndpoint operation. +func (c *MachineLearning) DeleteRealtimeEndpointRequest(input *DeleteRealtimeEndpointInput) (req *request.Request, output *DeleteRealtimeEndpointOutput) { + op := &request.Operation{ + Name: opDeleteRealtimeEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRealtimeEndpointInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteRealtimeEndpointOutput{} + req.Data = output + return +} + +// Deletes a real time endpoint of an MLModel. +func (c *MachineLearning) DeleteRealtimeEndpoint(input *DeleteRealtimeEndpointInput) (*DeleteRealtimeEndpointOutput, error) { + req, out := c.DeleteRealtimeEndpointRequest(input) + err := req.Send() + return out, err +} + +const opDescribeBatchPredictions = "DescribeBatchPredictions" + +// DescribeBatchPredictionsRequest generates a request for the DescribeBatchPredictions operation. +func (c *MachineLearning) DescribeBatchPredictionsRequest(input *DescribeBatchPredictionsInput) (req *request.Request, output *DescribeBatchPredictionsOutput) { + op := &request.Operation{ + Name: opDescribeBatchPredictions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeBatchPredictionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeBatchPredictionsOutput{} + req.Data = output + return +} + +// Returns a list of BatchPrediction operations that match the search criteria +// in the request. +func (c *MachineLearning) DescribeBatchPredictions(input *DescribeBatchPredictionsInput) (*DescribeBatchPredictionsOutput, error) { + req, out := c.DescribeBatchPredictionsRequest(input) + err := req.Send() + return out, err +} + +func (c *MachineLearning) DescribeBatchPredictionsPages(input *DescribeBatchPredictionsInput, fn func(p *DescribeBatchPredictionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeBatchPredictionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeBatchPredictionsOutput), lastPage) + }) +} + +const opDescribeDataSources = "DescribeDataSources" + +// DescribeDataSourcesRequest generates a request for the DescribeDataSources operation. +func (c *MachineLearning) DescribeDataSourcesRequest(input *DescribeDataSourcesInput) (req *request.Request, output *DescribeDataSourcesOutput) { + op := &request.Operation{ + Name: opDescribeDataSources, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDataSourcesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDataSourcesOutput{} + req.Data = output + return +} + +// Returns a list of DataSource that match the search criteria in the request. +func (c *MachineLearning) DescribeDataSources(input *DescribeDataSourcesInput) (*DescribeDataSourcesOutput, error) { + req, out := c.DescribeDataSourcesRequest(input) + err := req.Send() + return out, err +} + +func (c *MachineLearning) DescribeDataSourcesPages(input *DescribeDataSourcesInput, fn func(p *DescribeDataSourcesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDataSourcesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDataSourcesOutput), lastPage) + }) +} + +const opDescribeEvaluations = "DescribeEvaluations" + +// DescribeEvaluationsRequest generates a request for the DescribeEvaluations operation. +func (c *MachineLearning) DescribeEvaluationsRequest(input *DescribeEvaluationsInput) (req *request.Request, output *DescribeEvaluationsOutput) { + op := &request.Operation{ + Name: opDescribeEvaluations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEvaluationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEvaluationsOutput{} + req.Data = output + return +} + +// Returns a list of DescribeEvaluations that match the search criteria in the +// request. +func (c *MachineLearning) DescribeEvaluations(input *DescribeEvaluationsInput) (*DescribeEvaluationsOutput, error) { + req, out := c.DescribeEvaluationsRequest(input) + err := req.Send() + return out, err +} + +func (c *MachineLearning) DescribeEvaluationsPages(input *DescribeEvaluationsInput, fn func(p *DescribeEvaluationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeEvaluationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeEvaluationsOutput), lastPage) + }) +} + +const opDescribeMLModels = "DescribeMLModels" + +// DescribeMLModelsRequest generates a request for the DescribeMLModels operation. +func (c *MachineLearning) DescribeMLModelsRequest(input *DescribeMLModelsInput) (req *request.Request, output *DescribeMLModelsOutput) { + op := &request.Operation{ + Name: opDescribeMLModels, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeMLModelsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeMLModelsOutput{} + req.Data = output + return +} + +// Returns a list of MLModel that match the search criteria in the request. +func (c *MachineLearning) DescribeMLModels(input *DescribeMLModelsInput) (*DescribeMLModelsOutput, error) { + req, out := c.DescribeMLModelsRequest(input) + err := req.Send() + return out, err +} + +func (c *MachineLearning) DescribeMLModelsPages(input *DescribeMLModelsInput, fn func(p *DescribeMLModelsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeMLModelsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeMLModelsOutput), lastPage) + }) +} + +const opGetBatchPrediction = "GetBatchPrediction" + +// GetBatchPredictionRequest generates a request for the GetBatchPrediction operation. +func (c *MachineLearning) GetBatchPredictionRequest(input *GetBatchPredictionInput) (req *request.Request, output *GetBatchPredictionOutput) { + op := &request.Operation{ + Name: opGetBatchPrediction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetBatchPredictionInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBatchPredictionOutput{} + req.Data = output + return +} + +// Returns a BatchPrediction that includes detailed metadata, status, and data +// file information for a Batch Prediction request. +func (c *MachineLearning) GetBatchPrediction(input *GetBatchPredictionInput) (*GetBatchPredictionOutput, error) { + req, out := c.GetBatchPredictionRequest(input) + err := req.Send() + return out, err +} + +const opGetDataSource = "GetDataSource" + +// GetDataSourceRequest generates a request for the GetDataSource operation. +func (c *MachineLearning) GetDataSourceRequest(input *GetDataSourceInput) (req *request.Request, output *GetDataSourceOutput) { + op := &request.Operation{ + Name: opGetDataSource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDataSourceInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDataSourceOutput{} + req.Data = output + return +} + +// Returns a DataSource that includes metadata and data file information, as +// well as the current status of the DataSource. +// +// GetDataSource provides results in normal or verbose format. The verbose +// format adds the schema description and the list of files pointed to by the +// DataSource to the normal format. +func (c *MachineLearning) GetDataSource(input *GetDataSourceInput) (*GetDataSourceOutput, error) { + req, out := c.GetDataSourceRequest(input) + err := req.Send() + return out, err +} + +const opGetEvaluation = "GetEvaluation" + +// GetEvaluationRequest generates a request for the GetEvaluation operation. +func (c *MachineLearning) GetEvaluationRequest(input *GetEvaluationInput) (req *request.Request, output *GetEvaluationOutput) { + op := &request.Operation{ + Name: opGetEvaluation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetEvaluationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetEvaluationOutput{} + req.Data = output + return +} + +// Returns an Evaluation that includes metadata as well as the current status +// of the Evaluation. +func (c *MachineLearning) GetEvaluation(input *GetEvaluationInput) (*GetEvaluationOutput, error) { + req, out := c.GetEvaluationRequest(input) + err := req.Send() + return out, err +} + +const opGetMLModel = "GetMLModel" + +// GetMLModelRequest generates a request for the GetMLModel operation. +func (c *MachineLearning) GetMLModelRequest(input *GetMLModelInput) (req *request.Request, output *GetMLModelOutput) { + op := &request.Operation{ + Name: opGetMLModel, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetMLModelInput{} + } + + req = c.newRequest(op, input, output) + output = &GetMLModelOutput{} + req.Data = output + return +} + +// Returns an MLModel that includes detailed metadata, and data source information +// as well as the current status of the MLModel. +// +// GetMLModel provides results in normal or verbose format. +func (c *MachineLearning) GetMLModel(input *GetMLModelInput) (*GetMLModelOutput, error) { + req, out := c.GetMLModelRequest(input) + err := req.Send() + return out, err +} + +const opPredict = "Predict" + +// PredictRequest generates a request for the Predict operation. +func (c *MachineLearning) PredictRequest(input *PredictInput) (req *request.Request, output *PredictOutput) { + op := &request.Operation{ + Name: opPredict, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PredictInput{} + } + + req = c.newRequest(op, input, output) + output = &PredictOutput{} + req.Data = output + return +} + +// Generates a prediction for the observation using the specified ML Model. +// +// Note Not all response parameters will be populated. Whether a response parameter +// is populated depends on the type of model requested. +func (c *MachineLearning) Predict(input *PredictInput) (*PredictOutput, error) { + req, out := c.PredictRequest(input) + err := req.Send() + return out, err +} + +const opUpdateBatchPrediction = "UpdateBatchPrediction" + +// UpdateBatchPredictionRequest generates a request for the UpdateBatchPrediction operation. +func (c *MachineLearning) UpdateBatchPredictionRequest(input *UpdateBatchPredictionInput) (req *request.Request, output *UpdateBatchPredictionOutput) { + op := &request.Operation{ + Name: opUpdateBatchPrediction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateBatchPredictionInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateBatchPredictionOutput{} + req.Data = output + return +} + +// Updates the BatchPredictionName of a BatchPrediction. +// +// You can use the GetBatchPrediction operation to view the contents of the +// updated data element. +func (c *MachineLearning) UpdateBatchPrediction(input *UpdateBatchPredictionInput) (*UpdateBatchPredictionOutput, error) { + req, out := c.UpdateBatchPredictionRequest(input) + err := req.Send() + return out, err +} + +const opUpdateDataSource = "UpdateDataSource" + +// UpdateDataSourceRequest generates a request for the UpdateDataSource operation. +func (c *MachineLearning) UpdateDataSourceRequest(input *UpdateDataSourceInput) (req *request.Request, output *UpdateDataSourceOutput) { + op := &request.Operation{ + Name: opUpdateDataSource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDataSourceInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateDataSourceOutput{} + req.Data = output + return +} + +// Updates the DataSourceName of a DataSource. +// +// You can use the GetDataSource operation to view the contents of the updated +// data element. +func (c *MachineLearning) UpdateDataSource(input *UpdateDataSourceInput) (*UpdateDataSourceOutput, error) { + req, out := c.UpdateDataSourceRequest(input) + err := req.Send() + return out, err +} + +const opUpdateEvaluation = "UpdateEvaluation" + +// UpdateEvaluationRequest generates a request for the UpdateEvaluation operation. +func (c *MachineLearning) UpdateEvaluationRequest(input *UpdateEvaluationInput) (req *request.Request, output *UpdateEvaluationOutput) { + op := &request.Operation{ + Name: opUpdateEvaluation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateEvaluationInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateEvaluationOutput{} + req.Data = output + return +} + +// Updates the EvaluationName of an Evaluation. +// +// You can use the GetEvaluation operation to view the contents of the updated +// data element. +func (c *MachineLearning) UpdateEvaluation(input *UpdateEvaluationInput) (*UpdateEvaluationOutput, error) { + req, out := c.UpdateEvaluationRequest(input) + err := req.Send() + return out, err +} + +const opUpdateMLModel = "UpdateMLModel" + +// UpdateMLModelRequest generates a request for the UpdateMLModel operation. +func (c *MachineLearning) UpdateMLModelRequest(input *UpdateMLModelInput) (req *request.Request, output *UpdateMLModelOutput) { + op := &request.Operation{ + Name: opUpdateMLModel, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateMLModelInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateMLModelOutput{} + req.Data = output + return +} + +// Updates the MLModelName and the ScoreThreshold of an MLModel. +// +// You can use the GetMLModel operation to view the contents of the updated +// data element. +func (c *MachineLearning) UpdateMLModel(input *UpdateMLModelInput) (*UpdateMLModelOutput, error) { + req, out := c.UpdateMLModelRequest(input) + err := req.Send() + return out, err +} + +// Represents the output of GetBatchPrediction operation. +// +// The content consists of the detailed metadata, the status, and the data +// file information of a Batch Prediction. +type BatchPrediction struct { + _ struct{} `type:"structure"` + + // The ID of the DataSource that points to the group of observations to predict. + BatchPredictionDataSourceId *string `min:"1" type:"string"` + + // The ID assigned to the BatchPrediction at creation. This value should be + // identical to the value of the BatchPredictionID in the request. + BatchPredictionId *string `min:"1" type:"string"` + + // The time that the BatchPrediction was created. The time is expressed in epoch + // time. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The AWS user account that invoked the BatchPrediction. The account type can + // be either an AWS root account or an AWS Identity and Access Management (IAM) + // user account. + CreatedByIamUser *string `type:"string"` + + // The location of the data file or directory in Amazon Simple Storage Service + // (Amazon S3). + InputDataLocationS3 *string `type:"string"` + + // The time of the most recent edit to the BatchPrediction. The time is expressed + // in epoch time. + LastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The ID of the MLModel that generated predictions for the BatchPrediction + // request. + MLModelId *string `min:"1" type:"string"` + + // A description of the most recent details about processing the batch prediction + // request. + Message *string `type:"string"` + + // A user-supplied name or description of the BatchPrediction. + Name *string `type:"string"` + + // The location of an Amazon S3 bucket or directory to receive the operation + // results. The following substrings are not allowed in the s3 key portion of + // the "outputURI" field: ':', '//', '/./', '/../'. + OutputUri *string `type:"string"` + + // The status of the BatchPrediction. This element can have one of the following + // values: + // + // PENDING - Amazon Machine Learning (Amazon ML) submitted a request to generate + // predictions for a batch of observations. INPROGRESS - The process is underway. + // FAILED - The request to peform a batch prediction did not run to completion. + // It is not usable. COMPLETED - The batch prediction process completed successfully. + // DELETED - The BatchPrediction is marked as deleted. It is not usable. + Status *string `type:"string" enum:"EntityStatus"` +} + +// String returns the string representation +func (s BatchPrediction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchPrediction) GoString() string { + return s.String() +} + +type CreateBatchPredictionInput struct { + _ struct{} `type:"structure"` + + // The ID of the DataSource that points to the group of observations to predict. + BatchPredictionDataSourceId *string `min:"1" type:"string" required:"true"` + + // A user-supplied ID that uniquely identifies the BatchPrediction. + BatchPredictionId *string `min:"1" type:"string" required:"true"` + + // A user-supplied name or description of the BatchPrediction. BatchPredictionName + // can only use the UTF-8 character set. + BatchPredictionName *string `type:"string"` + + // The ID of the MLModel that will generate predictions for the group of observations. + MLModelId *string `min:"1" type:"string" required:"true"` + + // The location of an Amazon Simple Storage Service (Amazon S3) bucket or directory + // to store the batch prediction results. The following substrings are not allowed + // in the s3 key portion of the "outputURI" field: ':', '//', '/./', '/../'. + // + // Amazon ML needs permissions to store and retrieve the logs on your behalf. + // For information about how to set permissions, see the Amazon Machine Learning + // Developer Guide (http://docs.aws.amazon.com/machine-learning/latest/dg). + OutputUri *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateBatchPredictionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBatchPredictionInput) GoString() string { + return s.String() +} + +// Represents the output of a CreateBatchPrediction operation, and is an acknowledgement +// that Amazon ML received the request. +// +// The CreateBatchPrediction operation is asynchronous. You can poll for status +// updates by using the GetBatchPrediction operation and checking the Status +// parameter of the result. +type CreateBatchPredictionOutput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the BatchPrediction. This value + // is identical to the value of the BatchPredictionId in the request. + BatchPredictionId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateBatchPredictionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBatchPredictionOutput) GoString() string { + return s.String() +} + +type CreateDataSourceFromRDSInput struct { + _ struct{} `type:"structure"` + + // The compute statistics for a DataSource. The statistics are generated from + // the observation data referenced by a DataSource. Amazon ML uses the statistics + // internally during an MLModel training. This parameter must be set to true + // if the DataSource needs to be used for MLModel training. + ComputeStatistics *bool `type:"boolean"` + + // A user-supplied ID that uniquely identifies the DataSource. Typically, an + // Amazon Resource Number (ARN) becomes the ID for a DataSource. + DataSourceId *string `min:"1" type:"string" required:"true"` + + // A user-supplied name or description of the DataSource. + DataSourceName *string `type:"string"` + + // The data specification of an Amazon RDS DataSource: + // + // DatabaseInformation - DatabaseName - Name of the Amazon RDS database. + // InstanceIdentifier - Unique identifier for the Amazon RDS database instance. + // + // + // DatabaseCredentials - AWS Identity and Access Management (IAM) credentials + // that are used to connect to the Amazon RDS database. + // + // ResourceRole - Role (DataPipelineDefaultResourceRole) assumed by an Amazon + // Elastic Compute Cloud (EC2) instance to carry out the copy task from Amazon + // RDS to Amazon S3. For more information, see Role templates (http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html) + // for data pipelines. + // + // ServiceRole - Role (DataPipelineDefaultRole) assumed by the AWS Data Pipeline + // service to monitor the progress of the copy task from Amazon RDS to Amazon + // Simple Storage Service (S3). For more information, see Role templates (http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html) + // for data pipelines. + // + // SecurityInfo - Security information to use to access an Amazon RDS instance. + // You need to set up appropriate ingress rules for the security entity IDs + // provided to allow access to the Amazon RDS instance. Specify a [SubnetId, + // SecurityGroupIds] pair for a VPC-based Amazon RDS instance. + // + // SelectSqlQuery - Query that is used to retrieve the observation data for + // the Datasource. + // + // S3StagingLocation - Amazon S3 location for staging RDS data. The data retrieved + // from Amazon RDS using SelectSqlQuery is stored in this location. + // + // DataSchemaUri - Amazon S3 location of the DataSchema. + // + // DataSchema - A JSON string representing the schema. This is not required + // if DataSchemaUri is specified. + // + // DataRearrangement - A JSON string representing the splitting requirement + // of a Datasource. + // + // Sample - "{\"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}" + RDSData *RDSDataSpec `type:"structure" required:"true"` + + // The role that Amazon ML assumes on behalf of the user to create and activate + // a data pipeline in the user’s account and copy data (using the SelectSqlQuery) + // query from Amazon RDS to Amazon S3. + RoleARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateDataSourceFromRDSInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDataSourceFromRDSInput) GoString() string { + return s.String() +} + +// Represents the output of a CreateDataSourceFromRDS operation, and is an acknowledgement +// that Amazon ML received the request. +// +// The CreateDataSourceFromRDS operation is asynchronous. You can poll for +// updates by using the GetBatchPrediction operation and checking the Status +// parameter. You can inspect the Message when Status shows up as FAILED. You +// can also check the progress of the copy operation by going to the DataPipeline +// console and looking up the pipeline using the pipelineId from the describe +// call. +type CreateDataSourceFromRDSOutput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the datasource. This value should + // be identical to the value of the DataSourceID in the request. + DataSourceId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateDataSourceFromRDSOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDataSourceFromRDSOutput) GoString() string { + return s.String() +} + +type CreateDataSourceFromRedshiftInput struct { + _ struct{} `type:"structure"` + + // The compute statistics for a DataSource. The statistics are generated from + // the observation data referenced by a DataSource. Amazon ML uses the statistics + // internally during MLModel training. This parameter must be set to true if + // the DataSource needs to be used for MLModel training + ComputeStatistics *bool `type:"boolean"` + + // A user-supplied ID that uniquely identifies the DataSource. + DataSourceId *string `min:"1" type:"string" required:"true"` + + // A user-supplied name or description of the DataSource. + DataSourceName *string `type:"string"` + + // The data specification of an Amazon Redshift DataSource: + // + // DatabaseInformation - DatabaseName - Name of the Amazon Redshift database. + // ClusterIdentifier - Unique ID for the Amazon Redshift cluster. + // + // DatabaseCredentials - AWS Identity abd Access Management (IAM) credentials + // that are used to connect to the Amazon Redshift database. + // + // SelectSqlQuery - Query that is used to retrieve the observation data for + // the Datasource. + // + // S3StagingLocation - Amazon Simple Storage Service (Amazon S3) location for + // staging Amazon Redshift data. The data retrieved from Amazon Relational Database + // Service (Amazon RDS) using SelectSqlQuery is stored in this location. + // + // DataSchemaUri - Amazon S3 location of the DataSchema. + // + // DataSchema - A JSON string representing the schema. This is not required + // if DataSchemaUri is specified. + // + // DataRearrangement - A JSON string representing the splitting requirement + // of a Datasource. + // + // Sample - "{\"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}" + DataSpec *RedshiftDataSpec `type:"structure" required:"true"` + + // A fully specified role Amazon Resource Name (ARN). Amazon ML assumes the + // role on behalf of the user to create the following: + // + // A security group to allow Amazon ML to execute the SelectSqlQuery query + // on an Amazon Redshift cluster + // + // An Amazon S3 bucket policy to grant Amazon ML read/write permissions on + // the S3StagingLocation + RoleARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateDataSourceFromRedshiftInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDataSourceFromRedshiftInput) GoString() string { + return s.String() +} + +// Represents the output of a CreateDataSourceFromRedshift operation, and is +// an acknowledgement that Amazon ML received the request. +// +// The CreateDataSourceFromRedshift operation is asynchronous. You can poll +// for updates by using the GetBatchPrediction operation and checking the Status +// parameter. +type CreateDataSourceFromRedshiftOutput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the datasource. This value should + // be identical to the value of the DataSourceID in the request. + DataSourceId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateDataSourceFromRedshiftOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDataSourceFromRedshiftOutput) GoString() string { + return s.String() +} + +type CreateDataSourceFromS3Input struct { + _ struct{} `type:"structure"` + + // The compute statistics for a DataSource. The statistics are generated from + // the observation data referenced by a DataSource. Amazon ML uses the statistics + // internally during an MLModel training. This parameter must be set to true + // if the DataSource needs to be used for MLModel training + ComputeStatistics *bool `type:"boolean"` + + // A user-supplied identifier that uniquely identifies the DataSource. + DataSourceId *string `min:"1" type:"string" required:"true"` + + // A user-supplied name or description of the DataSource. + DataSourceName *string `type:"string"` + + // The data specification of a DataSource: + // + // DataLocationS3 - Amazon Simple Storage Service (Amazon S3) location of + // the observation data. + // + // DataSchemaLocationS3 - Amazon S3 location of the DataSchema. + // + // DataSchema - A JSON string representing the schema. This is not required + // if DataSchemaUri is specified. + // + // DataRearrangement - A JSON string representing the splitting requirement + // of a Datasource. + // + // Sample - "{\"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}" + DataSpec *S3DataSpec `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateDataSourceFromS3Input) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDataSourceFromS3Input) GoString() string { + return s.String() +} + +// Represents the output of a CreateDataSourceFromS3 operation, and is an acknowledgement +// that Amazon ML received the request. +// +// The CreateDataSourceFromS3 operation is asynchronous. You can poll for updates +// by using the GetBatchPrediction operation and checking the Status parameter. +type CreateDataSourceFromS3Output struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the datasource. This value should + // be identical to the value of the DataSourceID in the request. + DataSourceId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateDataSourceFromS3Output) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDataSourceFromS3Output) GoString() string { + return s.String() +} + +type CreateEvaluationInput struct { + _ struct{} `type:"structure"` + + // The ID of the DataSource for the evaluation. The schema of the DataSource + // must match the schema used to create the MLModel. + EvaluationDataSourceId *string `min:"1" type:"string" required:"true"` + + // A user-supplied ID that uniquely identifies the Evaluation. + EvaluationId *string `min:"1" type:"string" required:"true"` + + // A user-supplied name or description of the Evaluation. + EvaluationName *string `type:"string"` + + // The ID of the MLModel to evaluate. + // + // The schema used in creating the MLModel must match the schema of the DataSource + // used in the Evaluation. + MLModelId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateEvaluationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEvaluationInput) GoString() string { + return s.String() +} + +// Represents the output of a CreateEvaluation operation, and is an acknowledgement +// that Amazon ML received the request. +// +// CreateEvaluation operation is asynchronous. You can poll for status updates +// by using the GetEvaluation operation and checking the Status parameter. +type CreateEvaluationOutput struct { + _ struct{} `type:"structure"` + + // The user-supplied ID that uniquely identifies the Evaluation. This value + // should be identical to the value of the EvaluationId in the request. + EvaluationId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateEvaluationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEvaluationOutput) GoString() string { + return s.String() +} + +type CreateMLModelInput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the MLModel. + MLModelId *string `min:"1" type:"string" required:"true"` + + // A user-supplied name or description of the MLModel. + MLModelName *string `type:"string"` + + // The category of supervised learning that this MLModel will address. Choose + // from the following types: + // + // Choose REGRESSION if the MLModel will be used to predict a numeric value. + // Choose BINARY if the MLModel result has two possible values. Choose MULTICLASS + // if the MLModel result has a limited number of values. For more information, + // see the Amazon Machine Learning Developer Guide (http://docs.aws.amazon.com/machine-learning/latest/dg). + MLModelType *string `type:"string" required:"true" enum:"MLModelType"` + + // A list of the training parameters in the MLModel. The list is implemented + // as a map of key/value pairs. + // + // The following is the current set of training parameters: + // + // sgd.l1RegularizationAmount - Coefficient regularization L1 norm. It controls + // overfitting the data by penalizing large coefficients. This tends to drive + // coefficients to zero, resulting in sparse feature set. If you use this parameter, + // start by specifying a small value such as 1.0E-08. + // + // The value is a double that ranges from 0 to MAX_DOUBLE. The default is not + // to use L1 normalization. The parameter cannot be used when L2 is specified. + // Use this parameter sparingly. + // + // sgd.l2RegularizationAmount - Coefficient regularization L2 norm. It controls + // overfitting the data by penalizing large coefficients. This tends to drive + // coefficients to small, nonzero values. If you use this parameter, start by + // specifying a small value such as 1.0E-08. + // + // The valuseis a double that ranges from 0 to MAX_DOUBLE. The default is not + // to use L2 normalization. This cannot be used when L1 is specified. Use this + // parameter sparingly. + // + // sgd.maxPasses - Number of times that the training process traverses the + // observations to build the MLModel. The value is an integer that ranges from + // 1 to 10000. The default value is 10. + // + // sgd.maxMLModelSizeInBytes - Maximum allowed size of the model. Depending + // on the input data, the size of the model might affect its performance. + // + // The value is an integer that ranges from 100000 to 2147483648. The default + // value is 33554432. + Parameters map[string]*string `type:"map"` + + // The data recipe for creating MLModel. You must specify either the recipe + // or its URI. If you don’t specify a recipe or its URI, Amazon ML creates a + // default. + Recipe *string `type:"string"` + + // The Amazon Simple Storage Service (Amazon S3) location and file name that + // contains the MLModel recipe. You must specify either the recipe or its URI. + // If you don’t specify a recipe or its URI, Amazon ML creates a default. + RecipeUri *string `type:"string"` + + // The DataSource that points to the training data. + TrainingDataSourceId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateMLModelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMLModelInput) GoString() string { + return s.String() +} + +// Represents the output of a CreateMLModel operation, and is an acknowledgement +// that Amazon ML received the request. +// +// The CreateMLModel operation is asynchronous. You can poll for status updates +// by using the GetMLModel operation and checking the Status parameter. +type CreateMLModelOutput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the MLModel. This value should + // be identical to the value of the MLModelId in the request. + MLModelId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateMLModelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMLModelOutput) GoString() string { + return s.String() +} + +type CreateRealtimeEndpointInput struct { + _ struct{} `type:"structure"` + + // The ID assigned to the MLModel during creation. + MLModelId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateRealtimeEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRealtimeEndpointInput) GoString() string { + return s.String() +} + +// Represents the output of an CreateRealtimeEndpoint operation. +// +// The result contains the MLModelId and the endpoint information for the MLModel. +// +// The endpoint information includes the URI of the MLModel; that is, the +// location to send online prediction requests for the specified MLModel. +type CreateRealtimeEndpointOutput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the MLModel. This value should + // be identical to the value of the MLModelId in the request. + MLModelId *string `min:"1" type:"string"` + + // The endpoint information of the MLModel + RealtimeEndpointInfo *RealtimeEndpointInfo `type:"structure"` +} + +// String returns the string representation +func (s CreateRealtimeEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRealtimeEndpointOutput) GoString() string { + return s.String() +} + +// Represents the output of the GetDataSource operation. +// +// The content consists of the detailed metadata and data file information +// and the current status of the DataSource. +type DataSource struct { + _ struct{} `type:"structure"` + + // The parameter is true if statistics need to be generated from the observation + // data. + ComputeStatistics *bool `type:"boolean"` + + // The time that the DataSource was created. The time is expressed in epoch + // time. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The AWS user account from which the DataSource was created. The account type + // can be either an AWS root account or an AWS Identity and Access Management + // (IAM) user account. + CreatedByIamUser *string `type:"string"` + + // The location and name of the data in Amazon Simple Storage Service (Amazon + // S3) that is used by a DataSource. + DataLocationS3 *string `type:"string"` + + // A JSON string that represents the splitting requirement of a Datasource. + DataRearrangement *string `type:"string"` + + // The total number of observations contained in the data files that the DataSource + // references. + DataSizeInBytes *int64 `type:"long"` + + // The ID that is assigned to the DataSource during creation. + DataSourceId *string `min:"1" type:"string"` + + // The time of the most recent edit to the BatchPrediction. The time is expressed + // in epoch time. + LastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A description of the most recent details about creating the DataSource. + Message *string `type:"string"` + + // A user-supplied name or description of the DataSource. + Name *string `type:"string"` + + // The number of data files referenced by the DataSource. + NumberOfFiles *int64 `type:"long"` + + // The datasource details that are specific to Amazon RDS. + RDSMetadata *RDSMetadata `type:"structure"` + + // Describes the DataSource details specific to Amazon Redshift. + RedshiftMetadata *RedshiftMetadata `type:"structure"` + + // The Amazon Resource Name (ARN) of an AWS IAM Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html#roles-about-termsandconcepts) + // such as the following: arn:aws:iam::account:role/rolename. + RoleARN *string `min:"1" type:"string"` + + // The current status of the DataSource. This element can have one of the following + // values: + // + // PENDING - Amazon Machine Learning (Amazon ML) submitted a request to create + // a DataSource. INPROGRESS - The creation process is underway. FAILED - The + // request to create a DataSource did not run to completion. It is not usable. + // COMPLETED - The creation process completed successfully. DELETED - The DataSource + // is marked as deleted. It is not usable. + Status *string `type:"string" enum:"EntityStatus"` +} + +// String returns the string representation +func (s DataSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataSource) GoString() string { + return s.String() +} + +type DeleteBatchPredictionInput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the BatchPrediction. + BatchPredictionId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBatchPredictionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBatchPredictionInput) GoString() string { + return s.String() +} + +// Represents the output of a DeleteBatchPrediction operation. +// +// You can use the GetBatchPrediction operation and check the value of the +// Status parameter to see whether a BatchPrediction is marked as DELETED. +type DeleteBatchPredictionOutput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the BatchPrediction. This value + // should be identical to the value of the BatchPredictionID in the request. + BatchPredictionId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteBatchPredictionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBatchPredictionOutput) GoString() string { + return s.String() +} + +type DeleteDataSourceInput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the DataSource. + DataSourceId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDataSourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDataSourceInput) GoString() string { + return s.String() +} + +// Represents the output of a DeleteDataSource operation. +type DeleteDataSourceOutput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the DataSource. This value should + // be identical to the value of the DataSourceID in the request. + DataSourceId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteDataSourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDataSourceOutput) GoString() string { + return s.String() +} + +type DeleteEvaluationInput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the Evaluation to delete. + EvaluationId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteEvaluationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEvaluationInput) GoString() string { + return s.String() +} + +// Represents the output of a DeleteEvaluation operation. The output indicates +// that Amazon Machine Learning (Amazon ML) received the request. +// +// You can use the GetEvaluation operation and check the value of the Status +// parameter to see whether an Evaluation is marked as DELETED. +type DeleteEvaluationOutput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the Evaluation. This value should + // be identical to the value of the EvaluationId in the request. + EvaluationId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteEvaluationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEvaluationOutput) GoString() string { + return s.String() +} + +type DeleteMLModelInput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the MLModel. + MLModelId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMLModelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMLModelInput) GoString() string { + return s.String() +} + +// Represents the output of a DeleteMLModel operation. +// +// You can use the GetMLModel operation and check the value of the Status parameter +// to see whether an MLModel is marked as DELETED. +type DeleteMLModelOutput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the MLModel. This value should + // be identical to the value of the MLModelID in the request. + MLModelId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteMLModelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMLModelOutput) GoString() string { + return s.String() +} + +type DeleteRealtimeEndpointInput struct { + _ struct{} `type:"structure"` + + // The ID assigned to the MLModel during creation. + MLModelId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRealtimeEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRealtimeEndpointInput) GoString() string { + return s.String() +} + +// Represents the output of an DeleteRealtimeEndpoint operation. +// +// The result contains the MLModelId and the endpoint information for the MLModel. +type DeleteRealtimeEndpointOutput struct { + _ struct{} `type:"structure"` + + // A user-supplied ID that uniquely identifies the MLModel. This value should + // be identical to the value of the MLModelId in the request. + MLModelId *string `min:"1" type:"string"` + + // The endpoint information of the MLModel + RealtimeEndpointInfo *RealtimeEndpointInfo `type:"structure"` +} + +// String returns the string representation +func (s DeleteRealtimeEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRealtimeEndpointOutput) GoString() string { + return s.String() +} + +type DescribeBatchPredictionsInput struct { + _ struct{} `type:"structure"` + + // The equal to operator. The BatchPrediction results will have FilterVariable + // values that exactly match the value specified with EQ. + EQ *string `type:"string"` + + // Use one of the following variables to filter a list of BatchPrediction: + // + // CreatedAt - Sets the search criteria to the BatchPrediction creation date. + // Status - Sets the search criteria to the BatchPrediction status. Name - + // Sets the search criteria to the contents of the BatchPrediction Name. IAMUser + // - Sets the search criteria to the user account that invoked the BatchPrediction + // creation. MLModelId - Sets the search criteria to the MLModel used in the + // BatchPrediction. DataSourceId - Sets the search criteria to the DataSource + // used in the BatchPrediction. DataURI - Sets the search criteria to the data + // file(s) used in the BatchPrediction. The URL can identify either a file or + // an Amazon Simple Storage Solution (Amazon S3) bucket or directory. + FilterVariable *string `type:"string" enum:"BatchPredictionFilterVariable"` + + // The greater than or equal to operator. The BatchPrediction results will have + // FilterVariable values that are greater than or equal to the value specified + // with GE. + GE *string `type:"string"` + + // The greater than operator. The BatchPrediction results will have FilterVariable + // values that are greater than the value specified with GT. + GT *string `type:"string"` + + // The less than or equal to operator. The BatchPrediction results will have + // FilterVariable values that are less than or equal to the value specified + // with LE. + LE *string `type:"string"` + + // The less than operator. The BatchPrediction results will have FilterVariable + // values that are less than the value specified with LT. + LT *string `type:"string"` + + // The number of pages of information to include in the result. The range of + // acceptable values is 1 through 100. The default value is 100. + Limit *int64 `min:"1" type:"integer"` + + // The not equal to operator. The BatchPrediction results will have FilterVariable + // values not equal to the value specified with NE. + NE *string `type:"string"` + + // An ID of the page in the paginated results. + NextToken *string `type:"string"` + + // A string that is found at the beginning of a variable, such as Name or Id. + // + // For example, a Batch Prediction operation could have the Name 2014-09-09-HolidayGiftMailer. + // To search for this BatchPrediction, select Name for the FilterVariable and + // any of the following strings for the Prefix: + // + // 2014-09 + // + // 2014-09-09 + // + // 2014-09-09-Holiday + Prefix *string `type:"string"` + + // A two-value parameter that determines the sequence of the resulting list + // of MLModels. + // + // asc - Arranges the list in ascending order (A-Z, 0-9). dsc - Arranges + // the list in descending order (Z-A, 9-0). Results are sorted by FilterVariable. + SortOrder *string `type:"string" enum:"SortOrder"` +} + +// String returns the string representation +func (s DescribeBatchPredictionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBatchPredictionsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeBatchPredictions operation. The content +// is essentially a list of BatchPredictions. +type DescribeBatchPredictionsOutput struct { + _ struct{} `type:"structure"` + + // The ID of the next page in the paginated results that indicates at least + // one more page follows. + NextToken *string `type:"string"` + + // A list of BatchPrediction objects that meet the search criteria. + Results []*BatchPrediction `type:"list"` +} + +// String returns the string representation +func (s DescribeBatchPredictionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBatchPredictionsOutput) GoString() string { + return s.String() +} + +type DescribeDataSourcesInput struct { + _ struct{} `type:"structure"` + + // The equal to operator. The DataSource results will have FilterVariable values + // that exactly match the value specified with EQ. + EQ *string `type:"string"` + + // Use one of the following variables to filter a list of DataSource: + // + // CreatedAt - Sets the search criteria to DataSource creation dates. Status + // - Sets the search criteria to DataSource statuses. Name - Sets the search + // criteria to the contents of DataSource Name. DataUri - Sets the search + // criteria to the URI of data files used to create the DataSource. The URI + // can identify either a file or an Amazon Simple Storage Service (Amazon S3) + // bucket or directory. IAMUser - Sets the search criteria to the user account + // that invoked the DataSource creation. + FilterVariable *string `type:"string" enum:"DataSourceFilterVariable"` + + // The greater than or equal to operator. The DataSource results will have FilterVariable + // values that are greater than or equal to the value specified with GE. + GE *string `type:"string"` + + // The greater than operator. The DataSource results will have FilterVariable + // values that are greater than the value specified with GT. + GT *string `type:"string"` + + // The less than or equal to operator. The DataSource results will have FilterVariable + // values that are less than or equal to the value specified with LE. + LE *string `type:"string"` + + // The less than operator. The DataSource results will have FilterVariable values + // that are less than the value specified with LT. + LT *string `type:"string"` + + // The maximum number of DataSource to include in the result. + Limit *int64 `min:"1" type:"integer"` + + // The not equal to operator. The DataSource results will have FilterVariable + // values not equal to the value specified with NE. + NE *string `type:"string"` + + // The ID of the page in the paginated results. + NextToken *string `type:"string"` + + // A string that is found at the beginning of a variable, such as Name or Id. + // + // For example, a DataSource could have the Name 2014-09-09-HolidayGiftMailer. + // To search for this DataSource, select Name for the FilterVariable and any + // of the following strings for the Prefix: + // + // 2014-09 + // + // 2014-09-09 + // + // 2014-09-09-Holiday + Prefix *string `type:"string"` + + // A two-value parameter that determines the sequence of the resulting list + // of DataSource. + // + // asc - Arranges the list in ascending order (A-Z, 0-9). dsc - Arranges + // the list in descending order (Z-A, 9-0). Results are sorted by FilterVariable. + SortOrder *string `type:"string" enum:"SortOrder"` +} + +// String returns the string representation +func (s DescribeDataSourcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDataSourcesInput) GoString() string { + return s.String() +} + +// Represents the query results from a DescribeDataSources operation. The content +// is essentially a list of DataSource. +type DescribeDataSourcesOutput struct { + _ struct{} `type:"structure"` + + // An ID of the next page in the paginated results that indicates at least one + // more page follows. + NextToken *string `type:"string"` + + // A list of DataSource that meet the search criteria. + Results []*DataSource `type:"list"` +} + +// String returns the string representation +func (s DescribeDataSourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDataSourcesOutput) GoString() string { + return s.String() +} + +type DescribeEvaluationsInput struct { + _ struct{} `type:"structure"` + + // The equal to operator. The Evaluation results will have FilterVariable values + // that exactly match the value specified with EQ. + EQ *string `type:"string"` + + // Use one of the following variable to filter a list of Evaluation objects: + // + // CreatedAt - Sets the search criteria to the Evaluation creation date. + // Status - Sets the search criteria to the Evaluation status. Name - Sets + // the search criteria to the contents of Evaluation Name. IAMUser - Sets + // the search criteria to the user account that invoked an Evaluation. MLModelId + // - Sets the search criteria to the MLModel that was evaluated. DataSourceId + // - Sets the search criteria to the DataSource used in Evaluation. DataUri + // - Sets the search criteria to the data file(s) used in Evaluation. The URL + // can identify either a file or an Amazon Simple Storage Solution (Amazon S3) + // bucket or directory. + FilterVariable *string `type:"string" enum:"EvaluationFilterVariable"` + + // The greater than or equal to operator. The Evaluation results will have FilterVariable + // values that are greater than or equal to the value specified with GE. + GE *string `type:"string"` + + // The greater than operator. The Evaluation results will have FilterVariable + // values that are greater than the value specified with GT. + GT *string `type:"string"` + + // The less than or equal to operator. The Evaluation results will have FilterVariable + // values that are less than or equal to the value specified with LE. + LE *string `type:"string"` + + // The less than operator. The Evaluation results will have FilterVariable values + // that are less than the value specified with LT. + LT *string `type:"string"` + + // The maximum number of Evaluation to include in the result. + Limit *int64 `min:"1" type:"integer"` + + // The not equal to operator. The Evaluation results will have FilterVariable + // values not equal to the value specified with NE. + NE *string `type:"string"` + + // The ID of the page in the paginated results. + NextToken *string `type:"string"` + + // A string that is found at the beginning of a variable, such as Name or Id. + // + // For example, an Evaluation could have the Name 2014-09-09-HolidayGiftMailer. + // To search for this Evaluation, select Name for the FilterVariable and any + // of the following strings for the Prefix: + // + // 2014-09 + // + // 2014-09-09 + // + // 2014-09-09-Holiday + Prefix *string `type:"string"` + + // A two-value parameter that determines the sequence of the resulting list + // of Evaluation. + // + // asc - Arranges the list in ascending order (A-Z, 0-9). dsc - Arranges + // the list in descending order (Z-A, 9-0). Results are sorted by FilterVariable. + SortOrder *string `type:"string" enum:"SortOrder"` +} + +// String returns the string representation +func (s DescribeEvaluationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEvaluationsInput) GoString() string { + return s.String() +} + +// Represents the query results from a DescribeEvaluations operation. The content +// is essentially a list of Evaluation. +type DescribeEvaluationsOutput struct { + _ struct{} `type:"structure"` + + // The ID of the next page in the paginated results that indicates at least + // one more page follows. + NextToken *string `type:"string"` + + // A list of Evaluation that meet the search criteria. + Results []*Evaluation `type:"list"` +} + +// String returns the string representation +func (s DescribeEvaluationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEvaluationsOutput) GoString() string { + return s.String() +} + +type DescribeMLModelsInput struct { + _ struct{} `type:"structure"` + + // The equal to operator. The MLModel results will have FilterVariable values + // that exactly match the value specified with EQ. + EQ *string `type:"string"` + + // Use one of the following variables to filter a list of MLModel: + // + // CreatedAt - Sets the search criteria to MLModel creation date. Status + // - Sets the search criteria to MLModel status. Name - Sets the search criteria + // to the contents of MLModel Name. IAMUser - Sets the search criteria to + // the user account that invoked the MLModel creation. TrainingDataSourceId + // - Sets the search criteria to the DataSource used to train one or more MLModel. + // RealtimeEndpointStatus - Sets the search criteria to the MLModel real-time + // endpoint status. MLModelType - Sets the search criteria to MLModel type: + // binary, regression, or multi-class. Algorithm - Sets the search criteria + // to the algorithm that the MLModel uses. TrainingDataURI - Sets the search + // criteria to the data file(s) used in training a MLModel. The URL can identify + // either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory. + FilterVariable *string `type:"string" enum:"MLModelFilterVariable"` + + // The greater than or equal to operator. The MLModel results will have FilterVariable + // values that are greater than or equal to the value specified with GE. + GE *string `type:"string"` + + // The greater than operator. The MLModel results will have FilterVariable values + // that are greater than the value specified with GT. + GT *string `type:"string"` + + // The less than or equal to operator. The MLModel results will have FilterVariable + // values that are less than or equal to the value specified with LE. + LE *string `type:"string"` + + // The less than operator. The MLModel results will have FilterVariable values + // that are less than the value specified with LT. + LT *string `type:"string"` + + // The number of pages of information to include in the result. The range of + // acceptable values is 1 through 100. The default value is 100. + Limit *int64 `min:"1" type:"integer"` + + // The not equal to operator. The MLModel results will have FilterVariable values + // not equal to the value specified with NE. + NE *string `type:"string"` + + // The ID of the page in the paginated results. + NextToken *string `type:"string"` + + // A string that is found at the beginning of a variable, such as Name or Id. + // + // For example, an MLModel could have the Name 2014-09-09-HolidayGiftMailer. + // To search for this MLModel, select Name for the FilterVariable and any of + // the following strings for the Prefix: + // + // 2014-09 + // + // 2014-09-09 + // + // 2014-09-09-Holiday + Prefix *string `type:"string"` + + // A two-value parameter that determines the sequence of the resulting list + // of MLModel. + // + // asc - Arranges the list in ascending order (A-Z, 0-9). dsc - Arranges + // the list in descending order (Z-A, 9-0). Results are sorted by FilterVariable. + SortOrder *string `type:"string" enum:"SortOrder"` +} + +// String returns the string representation +func (s DescribeMLModelsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMLModelsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeMLModels operation. The content is essentially +// a list of MLModel. +type DescribeMLModelsOutput struct { + _ struct{} `type:"structure"` + + // The ID of the next page in the paginated results that indicates at least + // one more page follows. + NextToken *string `type:"string"` + + // A list of MLModel that meet the search criteria. + Results []*MLModel `type:"list"` +} + +// String returns the string representation +func (s DescribeMLModelsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMLModelsOutput) GoString() string { + return s.String() +} + +// Represents the output of GetEvaluation operation. +// +// The content consists of the detailed metadata and data file information +// and the current status of the Evaluation. +type Evaluation struct { + _ struct{} `type:"structure"` + + // The time that the Evaluation was created. The time is expressed in epoch + // time. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The AWS user account that invoked the evaluation. The account type can be + // either an AWS root account or an AWS Identity and Access Management (IAM) + // user account. + CreatedByIamUser *string `type:"string"` + + // The ID of the DataSource that is used to evaluate the MLModel. + EvaluationDataSourceId *string `min:"1" type:"string"` + + // The ID that is assigned to the Evaluation at creation. + EvaluationId *string `min:"1" type:"string"` + + // The location and name of the data in Amazon Simple Storage Server (Amazon + // S3) that is used in the evaluation. + InputDataLocationS3 *string `type:"string"` + + // The time of the most recent edit to the Evaluation. The time is expressed + // in epoch time. + LastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The ID of the MLModel that is the focus of the evaluation. + MLModelId *string `min:"1" type:"string"` + + // A description of the most recent details about evaluating the MLModel. + Message *string `type:"string"` + + // A user-supplied name or description of the Evaluation. + Name *string `type:"string"` + + // Measurements of how well the MLModel performed, using observations referenced + // by the DataSource. One of the following metrics is returned, based on the + // type of the MLModel: + // + // BinaryAUC: A binary MLModel uses the Area Under the Curve (AUC) technique + // to measure performance. + // + // RegressionRMSE: A regression MLModel uses the Root Mean Square Error (RMSE) + // technique to measure performance. RMSE measures the difference between predicted + // and actual values for a single variable. + // + // MulticlassAvgFScore: A multiclass MLModel uses the F1 score technique + // to measure performance. + // + // For more information about performance metrics, please see the Amazon + // Machine Learning Developer Guide (http://docs.aws.amazon.com/machine-learning/latest/dg). + PerformanceMetrics *PerformanceMetrics `type:"structure"` + + // The status of the evaluation. This element can have one of the following + // values: + // + // PENDING - Amazon Machine Learning (Amazon ML) submitted a request to evaluate + // an MLModel. INPROGRESS - The evaluation is underway. FAILED - The request + // to evaluate an MLModel did not run to completion. It is not usable. COMPLETED + // - The evaluation process completed successfully. DELETED - The Evaluation + // is marked as deleted. It is not usable. + Status *string `type:"string" enum:"EntityStatus"` +} + +// String returns the string representation +func (s Evaluation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Evaluation) GoString() string { + return s.String() +} + +type GetBatchPredictionInput struct { + _ struct{} `type:"structure"` + + // An ID assigned to the BatchPrediction at creation. + BatchPredictionId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBatchPredictionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBatchPredictionInput) GoString() string { + return s.String() +} + +// Represents the output of a GetBatchPrediction operation and describes a BatchPrediction. +type GetBatchPredictionOutput struct { + _ struct{} `type:"structure"` + + // The ID of the DataSource that was used to create the BatchPrediction. + BatchPredictionDataSourceId *string `min:"1" type:"string"` + + // An ID assigned to the BatchPrediction at creation. This value should be identical + // to the value of the BatchPredictionID in the request. + BatchPredictionId *string `min:"1" type:"string"` + + // The time when the BatchPrediction was created. The time is expressed in epoch + // time. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The AWS user account that invoked the BatchPrediction. The account type can + // be either an AWS root account or an AWS Identity and Access Management (IAM) + // user account. + CreatedByIamUser *string `type:"string"` + + // The location of the data file or directory in Amazon Simple Storage Service + // (Amazon S3). + InputDataLocationS3 *string `type:"string"` + + // The time of the most recent edit to BatchPrediction. The time is expressed + // in epoch time. + LastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A link to the file that contains logs of the CreateBatchPrediction operation. + LogUri *string `type:"string"` + + // The ID of the MLModel that generated predictions for the BatchPrediction + // request. + MLModelId *string `min:"1" type:"string"` + + // A description of the most recent details about processing the batch prediction + // request. + Message *string `type:"string"` + + // A user-supplied name or description of the BatchPrediction. + Name *string `type:"string"` + + // The location of an Amazon S3 bucket or directory to receive the operation + // results. + OutputUri *string `type:"string"` + + // The status of the BatchPrediction, which can be one of the following values: + // + // PENDING - Amazon Machine Learning (Amazon ML) submitted a request to generate + // batch predictions. INPROGRESS - The batch predictions are in progress. + // FAILED - The request to perform a batch prediction did not run to completion. + // It is not usable. COMPLETED - The batch prediction process completed successfully. + // DELETED - The BatchPrediction is marked as deleted. It is not usable. + Status *string `type:"string" enum:"EntityStatus"` +} + +// String returns the string representation +func (s GetBatchPredictionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBatchPredictionOutput) GoString() string { + return s.String() +} + +type GetDataSourceInput struct { + _ struct{} `type:"structure"` + + // The ID assigned to the DataSource at creation. + DataSourceId *string `min:"1" type:"string" required:"true"` + + // Specifies whether the GetDataSource operation should return DataSourceSchema. + // + // If true, DataSourceSchema is returned. + // + // If false, DataSourceSchema is not returned. + Verbose *bool `type:"boolean"` +} + +// String returns the string representation +func (s GetDataSourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDataSourceInput) GoString() string { + return s.String() +} + +// Represents the output of a GetDataSource operation and describes a DataSource. +type GetDataSourceOutput struct { + _ struct{} `type:"structure"` + + // The parameter is true if statistics need to be generated from the observation + // data. + ComputeStatistics *bool `type:"boolean"` + + // The time that the DataSource was created. The time is expressed in epoch + // time. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The AWS user account from which the DataSource was created. The account type + // can be either an AWS root account or an AWS Identity and Access Management + // (IAM) user account. + CreatedByIamUser *string `type:"string"` + + // The location of the data file or directory in Amazon Simple Storage Service + // (Amazon S3). + DataLocationS3 *string `type:"string"` + + // A JSON string that captures the splitting rearrangement requirement of the + // DataSource. + DataRearrangement *string `type:"string"` + + // The total size of observations in the data files. + DataSizeInBytes *int64 `type:"long"` + + // The ID assigned to the DataSource at creation. This value should be identical + // to the value of the DataSourceId in the request. + DataSourceId *string `min:"1" type:"string"` + + // The schema used by all of the data files of this DataSource. + // + // Note This parameter is provided as part of the verbose format. + DataSourceSchema *string `type:"string"` + + // The time of the most recent edit to the DataSource. The time is expressed + // in epoch time. + LastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A link to the file containining logs of either create DataSource operation. + LogUri *string `type:"string"` + + // The description of the most recent details about creating the DataSource. + Message *string `type:"string"` + + // A user-supplied name or description of the DataSource. + Name *string `type:"string"` + + // The number of data files referenced by the DataSource. + NumberOfFiles *int64 `type:"long"` + + // The datasource details that are specific to Amazon RDS. + RDSMetadata *RDSMetadata `type:"structure"` + + // Describes the DataSource details specific to Amazon Redshift. + RedshiftMetadata *RedshiftMetadata `type:"structure"` + + // The Amazon Resource Name (ARN) of an AWS IAM Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html#roles-about-termsandconcepts) + // such as the following: arn:aws:iam::account:role/rolename. + RoleARN *string `min:"1" type:"string"` + + // The current status of the DataSource. This element can have one of the following + // values: + // + // PENDING - Amazon Machine Language (Amazon ML) submitted a request to create + // a DataSource. INPROGRESS - The creation process is underway. FAILED - The + // request to create a DataSource did not run to completion. It is not usable. + // COMPLETED - The creation process completed successfully. DELETED - The + // DataSource is marked as deleted. It is not usable. + Status *string `type:"string" enum:"EntityStatus"` +} + +// String returns the string representation +func (s GetDataSourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDataSourceOutput) GoString() string { + return s.String() +} + +type GetEvaluationInput struct { + _ struct{} `type:"structure"` + + // The ID of the Evaluation to retrieve. The evaluation of each MLModel is recorded + // and cataloged. The ID provides the means to access the information. + EvaluationId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetEvaluationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetEvaluationInput) GoString() string { + return s.String() +} + +// Represents the output of a GetEvaluation operation and describes an Evaluation. +type GetEvaluationOutput struct { + _ struct{} `type:"structure"` + + // The time that the Evaluation was created. The time is expressed in epoch + // time. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The AWS user account that invoked the evaluation. The account type can be + // either an AWS root account or an AWS Identity and Access Management (IAM) + // user account. + CreatedByIamUser *string `type:"string"` + + // The DataSource used for this evaluation. + EvaluationDataSourceId *string `min:"1" type:"string"` + + // The evaluation ID which is same as the EvaluationId in the request. + EvaluationId *string `min:"1" type:"string"` + + // The location of the data file or directory in Amazon Simple Storage Service + // (Amazon S3). + InputDataLocationS3 *string `type:"string"` + + // The time of the most recent edit to the BatchPrediction. The time is expressed + // in epoch time. + LastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A link to the file that contains logs of the CreateEvaluation operation. + LogUri *string `type:"string"` + + // The ID of the MLModel that was the focus of the evaluation. + MLModelId *string `min:"1" type:"string"` + + // A description of the most recent details about evaluating the MLModel. + Message *string `type:"string"` + + // A user-supplied name or description of the Evaluation. + Name *string `type:"string"` + + // Measurements of how well the MLModel performed using observations referenced + // by the DataSource. One of the following metric is returned based on the type + // of the MLModel: + // + // BinaryAUC: A binary MLModel uses the Area Under the Curve (AUC) technique + // to measure performance. + // + // RegressionRMSE: A regression MLModel uses the Root Mean Square Error (RMSE) + // technique to measure performance. RMSE measures the difference between predicted + // and actual values for a single variable. + // + // MulticlassAvgFScore: A multiclass MLModel uses the F1 score technique + // to measure performance. + // + // For more information about performance metrics, please see the Amazon + // Machine Learning Developer Guide (http://docs.aws.amazon.com/machine-learning/latest/dg). + PerformanceMetrics *PerformanceMetrics `type:"structure"` + + // The status of the evaluation. This element can have one of the following + // values: + // + // PENDING - Amazon Machine Language (Amazon ML) submitted a request to evaluate + // an MLModel. INPROGRESS - The evaluation is underway. FAILED - The request + // to evaluate an MLModel did not run to completion. It is not usable. COMPLETED + // - The evaluation process completed successfully. DELETED - The Evaluation + // is marked as deleted. It is not usable. + Status *string `type:"string" enum:"EntityStatus"` +} + +// String returns the string representation +func (s GetEvaluationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetEvaluationOutput) GoString() string { + return s.String() +} + +type GetMLModelInput struct { + _ struct{} `type:"structure"` + + // The ID assigned to the MLModel at creation. + MLModelId *string `min:"1" type:"string" required:"true"` + + // Specifies whether the GetMLModel operation should return Recipe. + // + // If true, Recipe is returned. + // + // If false, Recipe is not returned. + Verbose *bool `type:"boolean"` +} + +// String returns the string representation +func (s GetMLModelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMLModelInput) GoString() string { + return s.String() +} + +// Represents the output of a GetMLModel operation, and provides detailed information +// about a MLModel. +type GetMLModelOutput struct { + _ struct{} `type:"structure"` + + // The time that the MLModel was created. The time is expressed in epoch time. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The AWS user account from which the MLModel was created. The account type + // can be either an AWS root account or an AWS Identity and Access Management + // (IAM) user account. + CreatedByIamUser *string `type:"string"` + + // The current endpoint of the MLModel + EndpointInfo *RealtimeEndpointInfo `type:"structure"` + + // The location of the data file or directory in Amazon Simple Storage Service + // (Amazon S3). + InputDataLocationS3 *string `type:"string"` + + // The time of the most recent edit to the MLModel. The time is expressed in + // epoch time. + LastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A link to the file that contains logs of the CreateMLModel operation. + LogUri *string `type:"string"` + + // The MLModel ID which is same as the MLModelId in the request. + MLModelId *string `min:"1" type:"string"` + + // Identifies the MLModel category. The following are the available types: + // + // REGRESSION -- Produces a numeric result. For example, "What listing price + // should a house have?" BINARY -- Produces one of two possible results. For + // example, "Is this an e-commerce website?" MULTICLASS -- Produces more than + // two possible results. For example, "Is this a HIGH, LOW or MEDIUM risk trade?" + MLModelType *string `type:"string" enum:"MLModelType"` + + // Description of the most recent details about accessing the MLModel. + Message *string `type:"string"` + + // A user-supplied name or description of the MLModel. + Name *string `type:"string"` + + // The recipe to use when training the MLModel. The Recipe provides detailed + // information about the observation data to use during training, as well as + // manipulations to perform on the observation data during training. + // + // Note This parameter is provided as part of the verbose format. + Recipe *string `type:"string"` + + // The schema used by all of the data files referenced by the DataSource. + // + // Note This parameter is provided as part of the verbose format. + Schema *string `type:"string"` + + // The scoring threshold is used in binary classification MLModels, and marks + // the boundary between a positive prediction and a negative prediction. + // + // Output values greater than or equal to the threshold receive a positive + // result from the MLModel, such as true. Output values less than the threshold + // receive a negative response from the MLModel, such as false. + ScoreThreshold *float64 `type:"float"` + + // The time of the most recent edit to the ScoreThreshold. The time is expressed + // in epoch time. + ScoreThresholdLastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Long integer type that is a 64-bit signed number. + SizeInBytes *int64 `type:"long"` + + // The current status of the MLModel. This element can have one of the following + // values: + // + // PENDING - Amazon Machine Learning (Amazon ML) submitted a request to describe + // a MLModel. INPROGRESS - The request is processing. FAILED - The request + // did not run to completion. It is not usable. COMPLETED - The request completed + // successfully. DELETED - The MLModel is marked as deleted. It is not usable. + Status *string `type:"string" enum:"EntityStatus"` + + // The ID of the training DataSource. + TrainingDataSourceId *string `min:"1" type:"string"` + + // A list of the training parameters in the MLModel. The list is implemented + // as a map of key/value pairs. + // + // The following is the current set of training parameters: + // + // sgd.l1RegularizationAmount - Coefficient regularization L1 norm. It controls + // overfitting the data by penalizing large coefficients. This tends to drive + // coefficients to zero, resulting in a sparse feature set. If you use this + // parameter, specify a small value, such as 1.0E-04 or 1.0E-08. + // + // The value is a double that ranges from 0 to MAX_DOUBLE. The default is not + // to use L1 normalization. The parameter cannot be used when L2 is specified. + // Use this parameter sparingly. + // + // sgd.l2RegularizationAmount - Coefficient regularization L2 norm. It controls + // overfitting the data by penalizing large coefficients. This tends to drive + // coefficients to small, nonzero values. If you use this parameter, specify + // a small value, such as 1.0E-04 or 1.0E-08. + // + // The value is a double that ranges from 0 to MAX_DOUBLE. The default is not + // to use L2 normalization. This parameter cannot be used when L1 is specified. + // Use this parameter sparingly. + // + // sgd.maxPasses - The number of times that the training process traverses + // the observations to build the MLModel. The value is an integer that ranges + // from 1 to 10000. The default value is 10. + // + // sgd.maxMLModelSizeInBytes - The maximum allowed size of the model. Depending + // on the input data, the model size might affect performance. + // + // The value is an integer that ranges from 100000 to 2147483648. The default + // value is 33554432. + TrainingParameters map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetMLModelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMLModelOutput) GoString() string { + return s.String() +} + +// Represents the output of a GetMLModel operation. +// +// The content consists of the detailed metadata and the current status of +// the MLModel. +type MLModel struct { + _ struct{} `type:"structure"` + + // The algorithm used to train the MLModel. The following algorithm is supported: + // + // SGD -- Stochastic gradient descent. The goal of SGD is to minimize the + // gradient of the loss function. + Algorithm *string `type:"string" enum:"Algorithm"` + + // The time that the MLModel was created. The time is expressed in epoch time. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The AWS user account from which the MLModel was created. The account type + // can be either an AWS root account or an AWS Identity and Access Management + // (IAM) user account. + CreatedByIamUser *string `type:"string"` + + // The current endpoint of the MLModel. + EndpointInfo *RealtimeEndpointInfo `type:"structure"` + + // The location of the data file or directory in Amazon Simple Storage Service + // (Amazon S3). + InputDataLocationS3 *string `type:"string"` + + // The time of the most recent edit to the MLModel. The time is expressed in + // epoch time. + LastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The ID assigned to the MLModel at creation. + MLModelId *string `min:"1" type:"string"` + + // Identifies the MLModel category. The following are the available types: + // + // REGRESSION - Produces a numeric result. For example, "What listing price + // should a house have?". BINARY - Produces one of two possible results. For + // example, "Is this a child-friendly web site?". MULTICLASS - Produces more + // than two possible results. For example, "Is this a HIGH, LOW or MEDIUM risk + // trade?". + MLModelType *string `type:"string" enum:"MLModelType"` + + // A description of the most recent details about accessing the MLModel. + Message *string `type:"string"` + + // A user-supplied name or description of the MLModel. + Name *string `type:"string"` + + ScoreThreshold *float64 `type:"float"` + + // The time of the most recent edit to the ScoreThreshold. The time is expressed + // in epoch time. + ScoreThresholdLastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Long integer type that is a 64-bit signed number. + SizeInBytes *int64 `type:"long"` + + // The current status of an MLModel. This element can have one of the following + // values: + // + // PENDING - Amazon Machine Learning (Amazon ML) submitted a request to create + // an MLModel. INPROGRESS - The creation process is underway. FAILED - The request + // to create an MLModel did not run to completion. It is not usable. COMPLETED + // - The creation process completed successfully. DELETED - The MLModel is marked + // as deleted. It is not usable. + Status *string `type:"string" enum:"EntityStatus"` + + // The ID of the training DataSource. The CreateMLModel operation uses the TrainingDataSourceId. + TrainingDataSourceId *string `min:"1" type:"string"` + + // A list of the training parameters in the MLModel. The list is implemented + // as a map of key/value pairs. + // + // The following is the current set of training parameters: + // + // sgd.l1RegularizationAmount - Coefficient regularization L1 norm. It controls + // overfitting the data by penalizing large coefficients. This tends to drive + // coefficients to zero, resulting in a sparse feature set. If you use this + // parameter, specify a small value, such as 1.0E-04 or 1.0E-08. + // + // The value is a double that ranges from 0 to MAX_DOUBLE. The default is not + // to use L1 normalization. The parameter cannot be used when L2 is specified. + // Use this parameter sparingly. + // + // sgd.l2RegularizationAmount - Coefficient regularization L2 norm. It controls + // overfitting the data by penalizing large coefficients. This tends to drive + // coefficients to small, nonzero values. If you use this parameter, specify + // a small value, such as 1.0E-04 or 1.0E-08. + // + // The valus is a double that ranges from 0 to MAX_DOUBLE. The default is not + // to use L2 normalization. This cannot be used when L1 is specified. Use this + // parameter sparingly. + // + // sgd.maxPasses - Number of times that the training process traverses the + // observations to build the MLModel. The value is an integer that ranges from + // 1 to 10000. The default value is 10. + // + // sgd.maxMLModelSizeInBytes - Maximum allowed size of the model. Depending + // on the input data, the model size might affect performance. + // + // The value is an integer that ranges from 100000 to 2147483648. The default + // value is 33554432. + TrainingParameters map[string]*string `type:"map"` +} + +// String returns the string representation +func (s MLModel) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MLModel) GoString() string { + return s.String() +} + +// Measurements of how well the MLModel performed on known observations. One +// of the following metrics is returned, based on the type of the MLModel: +// +// BinaryAUC: The binary MLModel uses the Area Under the Curve (AUC) technique +// to measure performance. +// +// RegressionRMSE: The regression MLModel uses the Root Mean Square Error +// (RMSE) technique to measure performance. RMSE measures the difference between +// predicted and actual values for a single variable. +// +// MulticlassAvgFScore: The multiclass MLModel uses the F1 score technique +// to measure performance. +// +// For more information about performance metrics, please see the Amazon +// Machine Learning Developer Guide (http://docs.aws.amazon.com/machine-learning/latest/dg). +type PerformanceMetrics struct { + _ struct{} `type:"structure"` + + Properties map[string]*string `type:"map"` +} + +// String returns the string representation +func (s PerformanceMetrics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PerformanceMetrics) GoString() string { + return s.String() +} + +type PredictInput struct { + _ struct{} `type:"structure"` + + // A unique identifier of the MLModel. + MLModelId *string `min:"1" type:"string" required:"true"` + + PredictEndpoint *string `type:"string" required:"true"` + + // A map of variable name-value pairs that represent an observation. + Record map[string]*string `type:"map" required:"true"` +} + +// String returns the string representation +func (s PredictInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PredictInput) GoString() string { + return s.String() +} + +type PredictOutput struct { + _ struct{} `type:"structure"` + + // The output from a Predict operation: + // + // Details - Contains the following attributes: DetailsAttributes.PREDICTIVE_MODEL_TYPE + // - REGRESSION | BINARY | MULTICLASS DetailsAttributes.ALGORITHM - SGD + // + // PredictedLabel - Present for either a BINARY or MULTICLASS MLModel request. + // + // PredictedScores - Contains the raw classification score corresponding + // to each label. + // + // PredictedValue - Present for a REGRESSION MLModel request. + Prediction *Prediction `type:"structure"` +} + +// String returns the string representation +func (s PredictOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PredictOutput) GoString() string { + return s.String() +} + +// The output from a Predict operation: +// +// Details - Contains the following attributes: DetailsAttributes.PREDICTIVE_MODEL_TYPE +// - REGRESSION | BINARY | MULTICLASS DetailsAttributes.ALGORITHM - SGD +// +// PredictedLabel - Present for either a BINARY or MULTICLASS MLModel request. +// +// PredictedScores - Contains the raw classification score corresponding +// to each label. +// +// PredictedValue - Present for a REGRESSION MLModel request. +type Prediction struct { + _ struct{} `type:"structure"` + + // Provides any additional details regarding the prediction. + Details map[string]*string `locationName:"details" type:"map"` + + // The prediction label for either a BINARY or MULTICLASS MLModel. + PredictedLabel *string `locationName:"predictedLabel" min:"1" type:"string"` + + // Provides the raw classification score corresponding to each label. + PredictedScores map[string]*float64 `locationName:"predictedScores" type:"map"` + + // The prediction value for REGRESSION MLModel. + PredictedValue *float64 `locationName:"predictedValue" type:"float"` +} + +// String returns the string representation +func (s Prediction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Prediction) GoString() string { + return s.String() +} + +// The data specification of an Amazon Relational Database Service (Amazon RDS) +// DataSource. +type RDSDataSpec struct { + _ struct{} `type:"structure"` + + // DataRearrangement - A JSON string that represents the splitting requirement + // of a DataSource. + // + // Sample - "{\"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}" + DataRearrangement *string `type:"string"` + + // A JSON string that represents the schema for an Amazon RDS DataSource. The + // DataSchema defines the structure of the observation data in the data file(s) + // referenced in the DataSource. + // + // A DataSchema is not required if you specify a DataSchemaUri + // + // Define your DataSchema as a series of key-value pairs. attributes and excludedVariableNames + // have an array of key-value pairs for their value. Use the following format + // to define your DataSchema. + // + // { "version": "1.0", + // + // "recordAnnotationFieldName": "F1", + // + // "recordWeightFieldName": "F2", + // + // "targetFieldName": "F3", + // + // "dataFormat": "CSV", + // + // "dataFileContainsHeader": true, + // + // "attributes": [ + // + // { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": + // "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": + // "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" + // }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": + // "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" + // } ], + // + // "excludedVariableNames": [ "F6" ] } + DataSchema *string `type:"string"` + + // The Amazon S3 location of the DataSchema. + DataSchemaUri *string `type:"string"` + + // The AWS Identity and Access Management (IAM) credentials that are used connect + // to the Amazon RDS database. + DatabaseCredentials *RDSDatabaseCredentials `type:"structure" required:"true"` + + // Describes the DatabaseName and InstanceIdentifier of an an Amazon RDS database. + DatabaseInformation *RDSDatabase `type:"structure" required:"true"` + + // The role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic Compute + // Cloud (Amazon EC2) instance to carry out the copy operation from Amazon RDS + // to an Amazon S3 task. For more information, see Role templates (http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html) + // for data pipelines. + ResourceRole *string `min:"1" type:"string" required:"true"` + + // The Amazon S3 location for staging Amazon RDS data. The data retrieved from + // Amazon RDS using SelectSqlQuery is stored in this location. + S3StagingLocation *string `type:"string" required:"true"` + + // The security group IDs to be used to access a VPC-based RDS DB instance. + // Ensure that there are appropriate ingress rules set up to allow access to + // the RDS DB instance. This attribute is used by Data Pipeline to carry out + // the copy operation from Amazon RDS to an Amazon S3 task. + SecurityGroupIds []*string `type:"list" required:"true"` + + // The query that is used to retrieve the observation data for the DataSource. + SelectSqlQuery *string `min:"1" type:"string" required:"true"` + + // The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline service to + // monitor the progress of the copy task from Amazon RDS to Amazon S3. For more + // information, see Role templates (http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html) + // for data pipelines. + ServiceRole *string `min:"1" type:"string" required:"true"` + + // The subnet ID to be used to access a VPC-based RDS DB instance. This attribute + // is used by Data Pipeline to carry out the copy task from Amazon RDS to Amazon + // S3. + SubnetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RDSDataSpec) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RDSDataSpec) GoString() string { + return s.String() +} + +// The database details of an Amazon RDS database. +type RDSDatabase struct { + _ struct{} `type:"structure"` + + // The name of a database hosted on an RDS DB instance. + DatabaseName *string `min:"1" type:"string" required:"true"` + + // The ID of an RDS DB instance. + InstanceIdentifier *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RDSDatabase) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RDSDatabase) GoString() string { + return s.String() +} + +// The database credentials to connect to a database on an RDS DB instance. +type RDSDatabaseCredentials struct { + _ struct{} `type:"structure"` + + // The password to be used by Amazon ML to connect to a database on an RDS DB + // instance. The password should have sufficient permissions to execute the + // RDSSelectQuery query. + Password *string `min:"8" type:"string" required:"true"` + + // The username to be used by Amazon ML to connect to database on an Amazon + // RDS instance. The username should have sufficient permissions to execute + // an RDSSelectSqlQuery query. + Username *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RDSDatabaseCredentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RDSDatabaseCredentials) GoString() string { + return s.String() +} + +// The datasource details that are specific to Amazon RDS. +type RDSMetadata struct { + _ struct{} `type:"structure"` + + // The ID of the Data Pipeline instance that is used to carry to copy data from + // Amazon RDS to Amazon S3. You can use the ID to find details about the instance + // in the Data Pipeline console. + DataPipelineId *string `min:"1" type:"string"` + + // The database details required to connect to an Amazon RDS. + Database *RDSDatabase `type:"structure"` + + // The username to be used by Amazon ML to connect to database on an Amazon + // RDS instance. The username should have sufficient permissions to execute + // an RDSSelectSqlQuery query. + DatabaseUserName *string `min:"1" type:"string"` + + // The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2 instance + // to carry out the copy task from Amazon RDS to Amazon S3. For more information, + // see Role templates (http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html) + // for data pipelines. + ResourceRole *string `min:"1" type:"string"` + + // The SQL query that is supplied during CreateDataSourceFromRDS. Returns only + // if Verbose is true in GetDataSourceInput. + SelectSqlQuery *string `min:"1" type:"string"` + + // The role (DataPipelineDefaultRole) assumed by the Data Pipeline service to + // monitor the progress of the copy task from Amazon RDS to Amazon S3. For more + // information, see Role templates (http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html) + // for data pipelines. + ServiceRole *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s RDSMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RDSMetadata) GoString() string { + return s.String() +} + +// Describes the real-time endpoint information for an MLModel. +type RealtimeEndpointInfo struct { + _ struct{} `type:"structure"` + + // The time that the request to create the real-time endpoint for the MLModel + // was received. The time is expressed in epoch time. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The current status of the real-time endpoint for the MLModel. This element + // can have one of the following values: + // + // NONE - Endpoint does not exist or was previously deleted. READY - Endpoint + // is ready to be used for real-time predictions. UPDATING - Updating/creating + // the endpoint. + EndpointStatus *string `type:"string" enum:"RealtimeEndpointStatus"` + + // The URI that specifies where to send real-time prediction requests for the + // MLModel. + // + // Note The application must wait until the real-time endpoint is ready before + // using this URI. + EndpointUrl *string `type:"string"` + + // The maximum processing rate for the real-time endpoint for MLModel, measured + // in incoming requests per second. + PeakRequestsPerSecond *int64 `type:"integer"` +} + +// String returns the string representation +func (s RealtimeEndpointInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RealtimeEndpointInfo) GoString() string { + return s.String() +} + +// Describes the data specification of an Amazon Redshift DataSource. +type RedshiftDataSpec struct { + _ struct{} `type:"structure"` + + // Describes the splitting specifications for a DataSource. + DataRearrangement *string `type:"string"` + + // A JSON string that represents the schema for an Amazon Redshift DataSource. + // The DataSchema defines the structure of the observation data in the data + // file(s) referenced in the DataSource. + // + // A DataSchema is not required if you specify a DataSchemaUri. + // + // Define your DataSchema as a series of key-value pairs. attributes and excludedVariableNames + // have an array of key-value pairs for their value. Use the following format + // to define your DataSchema. + // + // { "version": "1.0", + // + // "recordAnnotationFieldName": "F1", + // + // "recordWeightFieldName": "F2", + // + // "targetFieldName": "F3", + // + // "dataFormat": "CSV", + // + // "dataFileContainsHeader": true, + // + // "attributes": [ + // + // { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": + // "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": + // "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" + // }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": + // "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" + // } ], + // + // "excludedVariableNames": [ "F6" ] } + DataSchema *string `type:"string"` + + // Describes the schema location for an Amazon Redshift DataSource. + DataSchemaUri *string `type:"string"` + + // Describes AWS Identity and Access Management (IAM) credentials that are used + // connect to the Amazon Redshift database. + DatabaseCredentials *RedshiftDatabaseCredentials `type:"structure" required:"true"` + + // Describes the DatabaseName and ClusterIdentifier for an Amazon Redshift DataSource. + DatabaseInformation *RedshiftDatabase `type:"structure" required:"true"` + + // Describes an Amazon S3 location to store the result set of the SelectSqlQuery + // query. + S3StagingLocation *string `type:"string" required:"true"` + + // Describes the SQL Query to execute on an Amazon Redshift database for an + // Amazon Redshift DataSource. + SelectSqlQuery *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RedshiftDataSpec) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedshiftDataSpec) GoString() string { + return s.String() +} + +// Describes the database details required to connect to an Amazon Redshift +// database. +type RedshiftDatabase struct { + _ struct{} `type:"structure"` + + // The ID of an Amazon Redshift cluster. + ClusterIdentifier *string `min:"1" type:"string" required:"true"` + + // The name of a database hosted on an Amazon Redshift cluster. + DatabaseName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RedshiftDatabase) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedshiftDatabase) GoString() string { + return s.String() +} + +// Describes the database credentials for connecting to a database on an Amazon +// Redshift cluster. +type RedshiftDatabaseCredentials struct { + _ struct{} `type:"structure"` + + // A password to be used by Amazon ML to connect to a database on an Amazon + // Redshift cluster. The password should have sufficient permissions to execute + // a RedshiftSelectSqlQuery query. The password should be valid for an Amazon + // Redshift USER (http://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html). + Password *string `min:"8" type:"string" required:"true"` + + // A username to be used by Amazon Machine Learning (Amazon ML)to connect to + // a database on an Amazon Redshift cluster. The username should have sufficient + // permissions to execute the RedshiftSelectSqlQuery query. The username should + // be valid for an Amazon Redshift USER (http://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html). + Username *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RedshiftDatabaseCredentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedshiftDatabaseCredentials) GoString() string { + return s.String() +} + +// Describes the DataSource details specific to Amazon Redshift. +type RedshiftMetadata struct { + _ struct{} `type:"structure"` + + // A username to be used by Amazon Machine Learning (Amazon ML)to connect to + // a database on an Amazon Redshift cluster. The username should have sufficient + // permissions to execute the RedshiftSelectSqlQuery query. The username should + // be valid for an Amazon Redshift USER (http://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html). + DatabaseUserName *string `min:"1" type:"string"` + + // Describes the database details required to connect to an Amazon Redshift + // database. + RedshiftDatabase *RedshiftDatabase `type:"structure"` + + // The SQL query that is specified during CreateDataSourceFromRedshift. Returns + // only if Verbose is true in GetDataSourceInput. + SelectSqlQuery *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s RedshiftMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedshiftMetadata) GoString() string { + return s.String() +} + +// Describes the data specification of a DataSource. +type S3DataSpec struct { + _ struct{} `type:"structure"` + + // The location of the data file(s) used by a DataSource. The URI specifies + // a data file or an Amazon Simple Storage Service (Amazon S3) directory or + // bucket containing data files. + DataLocationS3 *string `type:"string" required:"true"` + + // Describes the splitting requirement of a Datasource. + DataRearrangement *string `type:"string"` + + // A JSON string that represents the schema for an Amazon S3 DataSource. The + // DataSchema defines the structure of the observation data in the data file(s) + // referenced in the DataSource. + // + // Define your DataSchema as a series of key-value pairs. attributes and excludedVariableNames + // have an array of key-value pairs for their value. Use the following format + // to define your DataSchema. + // + // { "version": "1.0", + // + // "recordAnnotationFieldName": "F1", + // + // "recordWeightFieldName": "F2", + // + // "targetFieldName": "F3", + // + // "dataFormat": "CSV", + // + // "dataFileContainsHeader": true, + // + // "attributes": [ + // + // { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": + // "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": + // "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" + // }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": + // "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" + // } ], + // + // "excludedVariableNames": [ "F6" ] } + DataSchema *string `type:"string"` + + // Describes the schema Location in Amazon S3. + DataSchemaLocationS3 *string `type:"string"` +} + +// String returns the string representation +func (s S3DataSpec) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3DataSpec) GoString() string { + return s.String() +} + +type UpdateBatchPredictionInput struct { + _ struct{} `type:"structure"` + + // The ID assigned to the BatchPrediction during creation. + BatchPredictionId *string `min:"1" type:"string" required:"true"` + + // A new user-supplied name or description of the BatchPrediction. + BatchPredictionName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateBatchPredictionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBatchPredictionInput) GoString() string { + return s.String() +} + +// Represents the output of an UpdateBatchPrediction operation. +// +// You can see the updated content by using the GetBatchPrediction operation. +type UpdateBatchPredictionOutput struct { + _ struct{} `type:"structure"` + + // The ID assigned to the BatchPrediction during creation. This value should + // be identical to the value of the BatchPredictionId in the request. + BatchPredictionId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateBatchPredictionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBatchPredictionOutput) GoString() string { + return s.String() +} + +type UpdateDataSourceInput struct { + _ struct{} `type:"structure"` + + // The ID assigned to the DataSource during creation. + DataSourceId *string `min:"1" type:"string" required:"true"` + + // A new user-supplied name or description of the DataSource that will replace + // the current description. + DataSourceName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateDataSourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDataSourceInput) GoString() string { + return s.String() +} + +// Represents the output of an UpdateDataSource operation. +// +// You can see the updated content by using the GetBatchPrediction operation. +type UpdateDataSourceOutput struct { + _ struct{} `type:"structure"` + + // The ID assigned to the DataSource during creation. This value should be identical + // to the value of the DataSourceID in the request. + DataSourceId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateDataSourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDataSourceOutput) GoString() string { + return s.String() +} + +type UpdateEvaluationInput struct { + _ struct{} `type:"structure"` + + // The ID assigned to the Evaluation during creation. + EvaluationId *string `min:"1" type:"string" required:"true"` + + // A new user-supplied name or description of the Evaluation that will replace + // the current content. + EvaluationName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateEvaluationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateEvaluationInput) GoString() string { + return s.String() +} + +// Represents the output of an UpdateEvaluation operation. +// +// You can see the updated content by using the GetEvaluation operation. +type UpdateEvaluationOutput struct { + _ struct{} `type:"structure"` + + // The ID assigned to the Evaluation during creation. This value should be identical + // to the value of the Evaluation in the request. + EvaluationId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateEvaluationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateEvaluationOutput) GoString() string { + return s.String() +} + +type UpdateMLModelInput struct { + _ struct{} `type:"structure"` + + // The ID assigned to the MLModel during creation. + MLModelId *string `min:"1" type:"string" required:"true"` + + // A user-supplied name or description of the MLModel. + MLModelName *string `type:"string"` + + // The ScoreThreshold used in binary classification MLModel that marks the boundary + // between a positive prediction and a negative prediction. + // + // Output values greater than or equal to the ScoreThreshold receive a positive + // result from the MLModel, such as true. Output values less than the ScoreThreshold + // receive a negative response from the MLModel, such as false. + ScoreThreshold *float64 `type:"float"` +} + +// String returns the string representation +func (s UpdateMLModelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMLModelInput) GoString() string { + return s.String() +} + +// Represents the output of an UpdateMLModel operation. +// +// You can see the updated content by using the GetMLModel operation. +type UpdateMLModelOutput struct { + _ struct{} `type:"structure"` + + // The ID assigned to the MLModel during creation. This value should be identical + // to the value of the MLModelID in the request. + MLModelId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateMLModelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMLModelOutput) GoString() string { + return s.String() +} + +// The function used to train a MLModel. Training choices supported by Amazon +// ML include the following: +// +// SGD - Stochastic Gradient Descent. RandomForest - Random forest of decision +// trees. +const ( + // @enum Algorithm + AlgorithmSgd = "sgd" +) + +// A list of the variables to use in searching or filtering BatchPrediction. +// +// CreatedAt - Sets the search criteria to BatchPrediction creation date. +// Status - Sets the search criteria to BatchPrediction status. Name - Sets +// the search criteria to the contents of BatchPrediction Name. IAMUser - +// Sets the search criteria to the user account that invoked the BatchPrediction +// creation. MLModelId - Sets the search criteria to the MLModel used in the +// BatchPrediction. DataSourceId - Sets the search criteria to the DataSource +// used in the BatchPrediction. DataURI - Sets the search criteria to the data +// file(s) used in the BatchPrediction. The URL can identify either a file or +// an Amazon Simple Storage Service (Amazon S3) bucket or directory. +const ( + // @enum BatchPredictionFilterVariable + BatchPredictionFilterVariableCreatedAt = "CreatedAt" + // @enum BatchPredictionFilterVariable + BatchPredictionFilterVariableLastUpdatedAt = "LastUpdatedAt" + // @enum BatchPredictionFilterVariable + BatchPredictionFilterVariableStatus = "Status" + // @enum BatchPredictionFilterVariable + BatchPredictionFilterVariableName = "Name" + // @enum BatchPredictionFilterVariable + BatchPredictionFilterVariableIamuser = "IAMUser" + // @enum BatchPredictionFilterVariable + BatchPredictionFilterVariableMlmodelId = "MLModelId" + // @enum BatchPredictionFilterVariable + BatchPredictionFilterVariableDataSourceId = "DataSourceId" + // @enum BatchPredictionFilterVariable + BatchPredictionFilterVariableDataUri = "DataURI" +) + +// A list of the variables to use in searching or filtering DataSource. +// +// CreatedAt - Sets the search criteria to DataSource creation date. Status +// - Sets the search criteria to DataSource status. Name - Sets the search +// criteria to the contents of DataSource Name. DataUri - Sets the search +// criteria to the URI of data files used to create the DataSource. The URI +// can identify either a file or an Amazon Simple Storage Service (Amazon S3) +// bucket or directory. IAMUser - Sets the search criteria to the user account +// that invoked the DataSource creation. Note The variable names should match +// the variable names in the DataSource. +const ( + // @enum DataSourceFilterVariable + DataSourceFilterVariableCreatedAt = "CreatedAt" + // @enum DataSourceFilterVariable + DataSourceFilterVariableLastUpdatedAt = "LastUpdatedAt" + // @enum DataSourceFilterVariable + DataSourceFilterVariableStatus = "Status" + // @enum DataSourceFilterVariable + DataSourceFilterVariableName = "Name" + // @enum DataSourceFilterVariable + DataSourceFilterVariableDataLocationS3 = "DataLocationS3" + // @enum DataSourceFilterVariable + DataSourceFilterVariableIamuser = "IAMUser" +) + +// Contains the key values of DetailsMap: PredictiveModelType - Indicates the +// type of the MLModel. Algorithm - Indicates the algorithm was used for the +// MLModel. +const ( + // @enum DetailsAttributes + DetailsAttributesPredictiveModelType = "PredictiveModelType" + // @enum DetailsAttributes + DetailsAttributesAlgorithm = "Algorithm" +) + +// Entity status with the following possible values: +// +// PENDING INPROGRESS FAILED COMPLETED DELETED +const ( + // @enum EntityStatus + EntityStatusPending = "PENDING" + // @enum EntityStatus + EntityStatusInprogress = "INPROGRESS" + // @enum EntityStatus + EntityStatusFailed = "FAILED" + // @enum EntityStatus + EntityStatusCompleted = "COMPLETED" + // @enum EntityStatus + EntityStatusDeleted = "DELETED" +) + +// A list of the variables to use in searching or filtering Evaluation. +// +// CreatedAt - Sets the search criteria to Evaluation creation date. Status +// - Sets the search criteria to Evaluation status. Name - Sets the search +// criteria to the contents of Evaluation Name. IAMUser - Sets the search +// criteria to the user account that invoked an evaluation. MLModelId - Sets +// the search criteria to the Predictor that was evaluated. DataSourceId - +// Sets the search criteria to the DataSource used in evaluation. DataUri - +// Sets the search criteria to the data file(s) used in evaluation. The URL +// can identify either a file or an Amazon Simple Storage Service (Amazon S3) +// bucket or directory. +const ( + // @enum EvaluationFilterVariable + EvaluationFilterVariableCreatedAt = "CreatedAt" + // @enum EvaluationFilterVariable + EvaluationFilterVariableLastUpdatedAt = "LastUpdatedAt" + // @enum EvaluationFilterVariable + EvaluationFilterVariableStatus = "Status" + // @enum EvaluationFilterVariable + EvaluationFilterVariableName = "Name" + // @enum EvaluationFilterVariable + EvaluationFilterVariableIamuser = "IAMUser" + // @enum EvaluationFilterVariable + EvaluationFilterVariableMlmodelId = "MLModelId" + // @enum EvaluationFilterVariable + EvaluationFilterVariableDataSourceId = "DataSourceId" + // @enum EvaluationFilterVariable + EvaluationFilterVariableDataUri = "DataURI" +) + +const ( + // @enum MLModelFilterVariable + MLModelFilterVariableCreatedAt = "CreatedAt" + // @enum MLModelFilterVariable + MLModelFilterVariableLastUpdatedAt = "LastUpdatedAt" + // @enum MLModelFilterVariable + MLModelFilterVariableStatus = "Status" + // @enum MLModelFilterVariable + MLModelFilterVariableName = "Name" + // @enum MLModelFilterVariable + MLModelFilterVariableIamuser = "IAMUser" + // @enum MLModelFilterVariable + MLModelFilterVariableTrainingDataSourceId = "TrainingDataSourceId" + // @enum MLModelFilterVariable + MLModelFilterVariableRealtimeEndpointStatus = "RealtimeEndpointStatus" + // @enum MLModelFilterVariable + MLModelFilterVariableMlmodelType = "MLModelType" + // @enum MLModelFilterVariable + MLModelFilterVariableAlgorithm = "Algorithm" + // @enum MLModelFilterVariable + MLModelFilterVariableTrainingDataUri = "TrainingDataURI" +) + +const ( + // @enum MLModelType + MLModelTypeRegression = "REGRESSION" + // @enum MLModelType + MLModelTypeBinary = "BINARY" + // @enum MLModelType + MLModelTypeMulticlass = "MULTICLASS" +) + +const ( + // @enum RealtimeEndpointStatus + RealtimeEndpointStatusNone = "NONE" + // @enum RealtimeEndpointStatus + RealtimeEndpointStatusReady = "READY" + // @enum RealtimeEndpointStatus + RealtimeEndpointStatusUpdating = "UPDATING" + // @enum RealtimeEndpointStatus + RealtimeEndpointStatusFailed = "FAILED" +) + +// The sort order specified in a listing condition. Possible values include +// the following: +// +// asc - Present the information in ascending order (from A-Z). dsc - Present +// the information in descending order (from Z-A). +const ( + // @enum SortOrder + SortOrderAsc = "asc" + // @enum SortOrder + SortOrderDsc = "dsc" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/machinelearning/customizations.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/machinelearning/customizations.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/machinelearning/customizations.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/machinelearning/customizations.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,33 @@ +package machinelearning + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/request" +) + +func init() { + initRequest = func(r *request.Request) { + switch r.Operation.Name { + case opPredict: + r.Handlers.Build.PushBack(updatePredictEndpoint) + } + } +} + +// updatePredictEndpoint rewrites the request endpoint to use the +// "PredictEndpoint" parameter of the Predict operation. +func updatePredictEndpoint(r *request.Request) { + if !r.ParamsFilled() { + return + } + + r.ClientInfo.Endpoint = *r.Params.(*PredictInput).PredictEndpoint + + uri, err := url.Parse(r.ClientInfo.Endpoint) + if err != nil { + r.Error = err + return + } + r.HTTPRequest.URL = uri +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/machinelearning/customizations_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/machinelearning/customizations_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/machinelearning/customizations_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/machinelearning/customizations_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,37 @@ +package machinelearning_test + +import ( + "bytes" + "io/ioutil" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/machinelearning" +) + +func TestPredictEndpoint(t *testing.T) { + ml := machinelearning.New(unit.Session) + ml.Handlers.Send.Clear() + ml.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: ioutil.NopCloser(bytes.NewReader([]byte("{}"))), + } + }) + + req, _ := ml.PredictRequest(&machinelearning.PredictInput{ + PredictEndpoint: aws.String("https://localhost/endpoint"), + MLModelId: aws.String("id"), + Record: map[string]*string{}, + }) + err := req.Send() + + assert.Nil(t, err) + assert.Equal(t, "https://localhost/endpoint", req.HTTPRequest.URL.String()) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/machinelearning/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/machinelearning/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/machinelearning/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/machinelearning/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,610 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package machinelearning_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/machinelearning" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleMachineLearning_CreateBatchPrediction() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.CreateBatchPredictionInput{ + BatchPredictionDataSourceId: aws.String("EntityId"), // Required + BatchPredictionId: aws.String("EntityId"), // Required + MLModelId: aws.String("EntityId"), // Required + OutputUri: aws.String("S3Url"), // Required + BatchPredictionName: aws.String("EntityName"), + } + resp, err := svc.CreateBatchPrediction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_CreateDataSourceFromRDS() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.CreateDataSourceFromRDSInput{ + DataSourceId: aws.String("EntityId"), // Required + RDSData: &machinelearning.RDSDataSpec{ // Required + DatabaseCredentials: &machinelearning.RDSDatabaseCredentials{ // Required + Password: aws.String("RDSDatabasePassword"), // Required + Username: aws.String("RDSDatabaseUsername"), // Required + }, + DatabaseInformation: &machinelearning.RDSDatabase{ // Required + DatabaseName: aws.String("RDSDatabaseName"), // Required + InstanceIdentifier: aws.String("RDSInstanceIdentifier"), // Required + }, + ResourceRole: aws.String("EDPResourceRole"), // Required + S3StagingLocation: aws.String("S3Url"), // Required + SecurityGroupIds: []*string{ // Required + aws.String("EDPSecurityGroupId"), // Required + // More values... + }, + SelectSqlQuery: aws.String("RDSSelectSqlQuery"), // Required + ServiceRole: aws.String("EDPServiceRole"), // Required + SubnetId: aws.String("EDPSubnetId"), // Required + DataRearrangement: aws.String("DataRearrangement"), + DataSchema: aws.String("DataSchema"), + DataSchemaUri: aws.String("S3Url"), + }, + RoleARN: aws.String("RoleARN"), // Required + ComputeStatistics: aws.Bool(true), + DataSourceName: aws.String("EntityName"), + } + resp, err := svc.CreateDataSourceFromRDS(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_CreateDataSourceFromRedshift() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.CreateDataSourceFromRedshiftInput{ + DataSourceId: aws.String("EntityId"), // Required + DataSpec: &machinelearning.RedshiftDataSpec{ // Required + DatabaseCredentials: &machinelearning.RedshiftDatabaseCredentials{ // Required + Password: aws.String("RedshiftDatabasePassword"), // Required + Username: aws.String("RedshiftDatabaseUsername"), // Required + }, + DatabaseInformation: &machinelearning.RedshiftDatabase{ // Required + ClusterIdentifier: aws.String("RedshiftClusterIdentifier"), // Required + DatabaseName: aws.String("RedshiftDatabaseName"), // Required + }, + S3StagingLocation: aws.String("S3Url"), // Required + SelectSqlQuery: aws.String("RedshiftSelectSqlQuery"), // Required + DataRearrangement: aws.String("DataRearrangement"), + DataSchema: aws.String("DataSchema"), + DataSchemaUri: aws.String("S3Url"), + }, + RoleARN: aws.String("RoleARN"), // Required + ComputeStatistics: aws.Bool(true), + DataSourceName: aws.String("EntityName"), + } + resp, err := svc.CreateDataSourceFromRedshift(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_CreateDataSourceFromS3() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.CreateDataSourceFromS3Input{ + DataSourceId: aws.String("EntityId"), // Required + DataSpec: &machinelearning.S3DataSpec{ // Required + DataLocationS3: aws.String("S3Url"), // Required + DataRearrangement: aws.String("DataRearrangement"), + DataSchema: aws.String("DataSchema"), + DataSchemaLocationS3: aws.String("S3Url"), + }, + ComputeStatistics: aws.Bool(true), + DataSourceName: aws.String("EntityName"), + } + resp, err := svc.CreateDataSourceFromS3(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_CreateEvaluation() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.CreateEvaluationInput{ + EvaluationDataSourceId: aws.String("EntityId"), // Required + EvaluationId: aws.String("EntityId"), // Required + MLModelId: aws.String("EntityId"), // Required + EvaluationName: aws.String("EntityName"), + } + resp, err := svc.CreateEvaluation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_CreateMLModel() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.CreateMLModelInput{ + MLModelId: aws.String("EntityId"), // Required + MLModelType: aws.String("MLModelType"), // Required + TrainingDataSourceId: aws.String("EntityId"), // Required + MLModelName: aws.String("EntityName"), + Parameters: map[string]*string{ + "Key": aws.String("StringType"), // Required + // More values... + }, + Recipe: aws.String("Recipe"), + RecipeUri: aws.String("S3Url"), + } + resp, err := svc.CreateMLModel(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_CreateRealtimeEndpoint() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.CreateRealtimeEndpointInput{ + MLModelId: aws.String("EntityId"), // Required + } + resp, err := svc.CreateRealtimeEndpoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_DeleteBatchPrediction() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.DeleteBatchPredictionInput{ + BatchPredictionId: aws.String("EntityId"), // Required + } + resp, err := svc.DeleteBatchPrediction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_DeleteDataSource() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.DeleteDataSourceInput{ + DataSourceId: aws.String("EntityId"), // Required + } + resp, err := svc.DeleteDataSource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_DeleteEvaluation() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.DeleteEvaluationInput{ + EvaluationId: aws.String("EntityId"), // Required + } + resp, err := svc.DeleteEvaluation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_DeleteMLModel() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.DeleteMLModelInput{ + MLModelId: aws.String("EntityId"), // Required + } + resp, err := svc.DeleteMLModel(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_DeleteRealtimeEndpoint() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.DeleteRealtimeEndpointInput{ + MLModelId: aws.String("EntityId"), // Required + } + resp, err := svc.DeleteRealtimeEndpoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_DescribeBatchPredictions() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.DescribeBatchPredictionsInput{ + EQ: aws.String("ComparatorValue"), + FilterVariable: aws.String("BatchPredictionFilterVariable"), + GE: aws.String("ComparatorValue"), + GT: aws.String("ComparatorValue"), + LE: aws.String("ComparatorValue"), + LT: aws.String("ComparatorValue"), + Limit: aws.Int64(1), + NE: aws.String("ComparatorValue"), + NextToken: aws.String("StringType"), + Prefix: aws.String("ComparatorValue"), + SortOrder: aws.String("SortOrder"), + } + resp, err := svc.DescribeBatchPredictions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_DescribeDataSources() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.DescribeDataSourcesInput{ + EQ: aws.String("ComparatorValue"), + FilterVariable: aws.String("DataSourceFilterVariable"), + GE: aws.String("ComparatorValue"), + GT: aws.String("ComparatorValue"), + LE: aws.String("ComparatorValue"), + LT: aws.String("ComparatorValue"), + Limit: aws.Int64(1), + NE: aws.String("ComparatorValue"), + NextToken: aws.String("StringType"), + Prefix: aws.String("ComparatorValue"), + SortOrder: aws.String("SortOrder"), + } + resp, err := svc.DescribeDataSources(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_DescribeEvaluations() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.DescribeEvaluationsInput{ + EQ: aws.String("ComparatorValue"), + FilterVariable: aws.String("EvaluationFilterVariable"), + GE: aws.String("ComparatorValue"), + GT: aws.String("ComparatorValue"), + LE: aws.String("ComparatorValue"), + LT: aws.String("ComparatorValue"), + Limit: aws.Int64(1), + NE: aws.String("ComparatorValue"), + NextToken: aws.String("StringType"), + Prefix: aws.String("ComparatorValue"), + SortOrder: aws.String("SortOrder"), + } + resp, err := svc.DescribeEvaluations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_DescribeMLModels() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.DescribeMLModelsInput{ + EQ: aws.String("ComparatorValue"), + FilterVariable: aws.String("MLModelFilterVariable"), + GE: aws.String("ComparatorValue"), + GT: aws.String("ComparatorValue"), + LE: aws.String("ComparatorValue"), + LT: aws.String("ComparatorValue"), + Limit: aws.Int64(1), + NE: aws.String("ComparatorValue"), + NextToken: aws.String("StringType"), + Prefix: aws.String("ComparatorValue"), + SortOrder: aws.String("SortOrder"), + } + resp, err := svc.DescribeMLModels(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_GetBatchPrediction() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.GetBatchPredictionInput{ + BatchPredictionId: aws.String("EntityId"), // Required + } + resp, err := svc.GetBatchPrediction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_GetDataSource() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.GetDataSourceInput{ + DataSourceId: aws.String("EntityId"), // Required + Verbose: aws.Bool(true), + } + resp, err := svc.GetDataSource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_GetEvaluation() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.GetEvaluationInput{ + EvaluationId: aws.String("EntityId"), // Required + } + resp, err := svc.GetEvaluation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_GetMLModel() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.GetMLModelInput{ + MLModelId: aws.String("EntityId"), // Required + Verbose: aws.Bool(true), + } + resp, err := svc.GetMLModel(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_Predict() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.PredictInput{ + MLModelId: aws.String("EntityId"), // Required + PredictEndpoint: aws.String("VipURL"), // Required + Record: map[string]*string{ // Required + "Key": aws.String("VariableValue"), // Required + // More values... + }, + } + resp, err := svc.Predict(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_UpdateBatchPrediction() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.UpdateBatchPredictionInput{ + BatchPredictionId: aws.String("EntityId"), // Required + BatchPredictionName: aws.String("EntityName"), // Required + } + resp, err := svc.UpdateBatchPrediction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_UpdateDataSource() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.UpdateDataSourceInput{ + DataSourceId: aws.String("EntityId"), // Required + DataSourceName: aws.String("EntityName"), // Required + } + resp, err := svc.UpdateDataSource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_UpdateEvaluation() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.UpdateEvaluationInput{ + EvaluationId: aws.String("EntityId"), // Required + EvaluationName: aws.String("EntityName"), // Required + } + resp, err := svc.UpdateEvaluation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleMachineLearning_UpdateMLModel() { + svc := machinelearning.New(session.New()) + + params := &machinelearning.UpdateMLModelInput{ + MLModelId: aws.String("EntityId"), // Required + MLModelName: aws.String("EntityName"), + ScoreThreshold: aws.Float64(1.0), + } + resp, err := svc.UpdateMLModel(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/machinelearning/machinelearningiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/machinelearning/machinelearningiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/machinelearning/machinelearningiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/machinelearning/machinelearningiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,122 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package machinelearningiface provides an interface for the Amazon Machine Learning. +package machinelearningiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/machinelearning" +) + +// MachineLearningAPI is the interface type for machinelearning.MachineLearning. +type MachineLearningAPI interface { + CreateBatchPredictionRequest(*machinelearning.CreateBatchPredictionInput) (*request.Request, *machinelearning.CreateBatchPredictionOutput) + + CreateBatchPrediction(*machinelearning.CreateBatchPredictionInput) (*machinelearning.CreateBatchPredictionOutput, error) + + CreateDataSourceFromRDSRequest(*machinelearning.CreateDataSourceFromRDSInput) (*request.Request, *machinelearning.CreateDataSourceFromRDSOutput) + + CreateDataSourceFromRDS(*machinelearning.CreateDataSourceFromRDSInput) (*machinelearning.CreateDataSourceFromRDSOutput, error) + + CreateDataSourceFromRedshiftRequest(*machinelearning.CreateDataSourceFromRedshiftInput) (*request.Request, *machinelearning.CreateDataSourceFromRedshiftOutput) + + CreateDataSourceFromRedshift(*machinelearning.CreateDataSourceFromRedshiftInput) (*machinelearning.CreateDataSourceFromRedshiftOutput, error) + + CreateDataSourceFromS3Request(*machinelearning.CreateDataSourceFromS3Input) (*request.Request, *machinelearning.CreateDataSourceFromS3Output) + + CreateDataSourceFromS3(*machinelearning.CreateDataSourceFromS3Input) (*machinelearning.CreateDataSourceFromS3Output, error) + + CreateEvaluationRequest(*machinelearning.CreateEvaluationInput) (*request.Request, *machinelearning.CreateEvaluationOutput) + + CreateEvaluation(*machinelearning.CreateEvaluationInput) (*machinelearning.CreateEvaluationOutput, error) + + CreateMLModelRequest(*machinelearning.CreateMLModelInput) (*request.Request, *machinelearning.CreateMLModelOutput) + + CreateMLModel(*machinelearning.CreateMLModelInput) (*machinelearning.CreateMLModelOutput, error) + + CreateRealtimeEndpointRequest(*machinelearning.CreateRealtimeEndpointInput) (*request.Request, *machinelearning.CreateRealtimeEndpointOutput) + + CreateRealtimeEndpoint(*machinelearning.CreateRealtimeEndpointInput) (*machinelearning.CreateRealtimeEndpointOutput, error) + + DeleteBatchPredictionRequest(*machinelearning.DeleteBatchPredictionInput) (*request.Request, *machinelearning.DeleteBatchPredictionOutput) + + DeleteBatchPrediction(*machinelearning.DeleteBatchPredictionInput) (*machinelearning.DeleteBatchPredictionOutput, error) + + DeleteDataSourceRequest(*machinelearning.DeleteDataSourceInput) (*request.Request, *machinelearning.DeleteDataSourceOutput) + + DeleteDataSource(*machinelearning.DeleteDataSourceInput) (*machinelearning.DeleteDataSourceOutput, error) + + DeleteEvaluationRequest(*machinelearning.DeleteEvaluationInput) (*request.Request, *machinelearning.DeleteEvaluationOutput) + + DeleteEvaluation(*machinelearning.DeleteEvaluationInput) (*machinelearning.DeleteEvaluationOutput, error) + + DeleteMLModelRequest(*machinelearning.DeleteMLModelInput) (*request.Request, *machinelearning.DeleteMLModelOutput) + + DeleteMLModel(*machinelearning.DeleteMLModelInput) (*machinelearning.DeleteMLModelOutput, error) + + DeleteRealtimeEndpointRequest(*machinelearning.DeleteRealtimeEndpointInput) (*request.Request, *machinelearning.DeleteRealtimeEndpointOutput) + + DeleteRealtimeEndpoint(*machinelearning.DeleteRealtimeEndpointInput) (*machinelearning.DeleteRealtimeEndpointOutput, error) + + DescribeBatchPredictionsRequest(*machinelearning.DescribeBatchPredictionsInput) (*request.Request, *machinelearning.DescribeBatchPredictionsOutput) + + DescribeBatchPredictions(*machinelearning.DescribeBatchPredictionsInput) (*machinelearning.DescribeBatchPredictionsOutput, error) + + DescribeBatchPredictionsPages(*machinelearning.DescribeBatchPredictionsInput, func(*machinelearning.DescribeBatchPredictionsOutput, bool) bool) error + + DescribeDataSourcesRequest(*machinelearning.DescribeDataSourcesInput) (*request.Request, *machinelearning.DescribeDataSourcesOutput) + + DescribeDataSources(*machinelearning.DescribeDataSourcesInput) (*machinelearning.DescribeDataSourcesOutput, error) + + DescribeDataSourcesPages(*machinelearning.DescribeDataSourcesInput, func(*machinelearning.DescribeDataSourcesOutput, bool) bool) error + + DescribeEvaluationsRequest(*machinelearning.DescribeEvaluationsInput) (*request.Request, *machinelearning.DescribeEvaluationsOutput) + + DescribeEvaluations(*machinelearning.DescribeEvaluationsInput) (*machinelearning.DescribeEvaluationsOutput, error) + + DescribeEvaluationsPages(*machinelearning.DescribeEvaluationsInput, func(*machinelearning.DescribeEvaluationsOutput, bool) bool) error + + DescribeMLModelsRequest(*machinelearning.DescribeMLModelsInput) (*request.Request, *machinelearning.DescribeMLModelsOutput) + + DescribeMLModels(*machinelearning.DescribeMLModelsInput) (*machinelearning.DescribeMLModelsOutput, error) + + DescribeMLModelsPages(*machinelearning.DescribeMLModelsInput, func(*machinelearning.DescribeMLModelsOutput, bool) bool) error + + GetBatchPredictionRequest(*machinelearning.GetBatchPredictionInput) (*request.Request, *machinelearning.GetBatchPredictionOutput) + + GetBatchPrediction(*machinelearning.GetBatchPredictionInput) (*machinelearning.GetBatchPredictionOutput, error) + + GetDataSourceRequest(*machinelearning.GetDataSourceInput) (*request.Request, *machinelearning.GetDataSourceOutput) + + GetDataSource(*machinelearning.GetDataSourceInput) (*machinelearning.GetDataSourceOutput, error) + + GetEvaluationRequest(*machinelearning.GetEvaluationInput) (*request.Request, *machinelearning.GetEvaluationOutput) + + GetEvaluation(*machinelearning.GetEvaluationInput) (*machinelearning.GetEvaluationOutput, error) + + GetMLModelRequest(*machinelearning.GetMLModelInput) (*request.Request, *machinelearning.GetMLModelOutput) + + GetMLModel(*machinelearning.GetMLModelInput) (*machinelearning.GetMLModelOutput, error) + + PredictRequest(*machinelearning.PredictInput) (*request.Request, *machinelearning.PredictOutput) + + Predict(*machinelearning.PredictInput) (*machinelearning.PredictOutput, error) + + UpdateBatchPredictionRequest(*machinelearning.UpdateBatchPredictionInput) (*request.Request, *machinelearning.UpdateBatchPredictionOutput) + + UpdateBatchPrediction(*machinelearning.UpdateBatchPredictionInput) (*machinelearning.UpdateBatchPredictionOutput, error) + + UpdateDataSourceRequest(*machinelearning.UpdateDataSourceInput) (*request.Request, *machinelearning.UpdateDataSourceOutput) + + UpdateDataSource(*machinelearning.UpdateDataSourceInput) (*machinelearning.UpdateDataSourceOutput, error) + + UpdateEvaluationRequest(*machinelearning.UpdateEvaluationInput) (*request.Request, *machinelearning.UpdateEvaluationOutput) + + UpdateEvaluation(*machinelearning.UpdateEvaluationInput) (*machinelearning.UpdateEvaluationOutput, error) + + UpdateMLModelRequest(*machinelearning.UpdateMLModelInput) (*request.Request, *machinelearning.UpdateMLModelOutput) + + UpdateMLModel(*machinelearning.UpdateMLModelInput) (*machinelearning.UpdateMLModelOutput, error) +} + +var _ MachineLearningAPI = (*machinelearning.MachineLearning)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/machinelearning/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/machinelearning/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/machinelearning/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/machinelearning/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,88 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package machinelearning + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Definition of the public APIs exposed by Amazon Machine Learning +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type MachineLearning struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "machinelearning" + +// New creates a new instance of the MachineLearning client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a MachineLearning client from just a session. +// svc := machinelearning.New(mySession) +// +// // Create a MachineLearning client with additional configuration +// svc := machinelearning.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *MachineLearning { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *MachineLearning { + svc := &MachineLearning{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-12-12", + JSONVersion: "1.1", + TargetPrefix: "AmazonML_20141212", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a MachineLearning operation and runs any +// custom request initialization. +func (c *MachineLearning) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,147 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package marketplacecommerceanalytics provides a client for AWS Marketplace Commerce Analytics. +package marketplacecommerceanalytics + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opGenerateDataSet = "GenerateDataSet" + +// GenerateDataSetRequest generates a request for the GenerateDataSet operation. +func (c *MarketplaceCommerceAnalytics) GenerateDataSetRequest(input *GenerateDataSetInput) (req *request.Request, output *GenerateDataSetOutput) { + op := &request.Operation{ + Name: opGenerateDataSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GenerateDataSetInput{} + } + + req = c.newRequest(op, input, output) + output = &GenerateDataSetOutput{} + req.Data = output + return +} + +// Given a data set type and data set publication date, asynchronously publishes +// the requested data set to the specified S3 bucket and notifies the specified +// SNS topic once the data is available. Returns a unique request identifier +// that can be used to correlate requests with notifications from the SNS topic. +// Data sets will be published in comma-separated values (CSV) format with the +// file name {data_set_type}_YYYY-MM-DD.csv. If a file with the same name already +// exists (e.g. if the same data set is requested twice), the original file +// will be overwritten by the new file. Requires a Role with an attached permissions +// policy providing Allow permissions for the following actions: s3:PutObject, +// s3:getBucketLocation, sns:SetRegion, sns:ListTopics, sns:Publish, iam:GetRolePolicy. +func (c *MarketplaceCommerceAnalytics) GenerateDataSet(input *GenerateDataSetInput) (*GenerateDataSetOutput, error) { + req, out := c.GenerateDataSetRequest(input) + err := req.Send() + return out, err +} + +// Container for the parameters to the GenerateDataSet operation. +type GenerateDataSetInput struct { + _ struct{} `type:"structure"` + + // The date a data set was published. For daily data sets, provide a date with + // day-level granularity for the desired day. For weekly data sets, provide + // a date with day-level granularity within the desired week (the day value + // will be ignored). For monthly data sets, provide a date with month-level + // granularity for the desired month (the day value will be ignored). + DataSetPublicationDate *time.Time `locationName:"dataSetPublicationDate" type:"timestamp" timestampFormat:"unix" required:"true"` + + // The type of the data set to publish. + DataSetType *string `locationName:"dataSetType" min:"1" type:"string" required:"true" enum:"DataSetType"` + + // The name (friendly name, not ARN) of the destination S3 bucket. + DestinationS3BucketName *string `locationName:"destinationS3BucketName" min:"1" type:"string" required:"true"` + + // (Optional) The desired S3 prefix for the published data set, similar to a + // directory path in standard file systems. For example, if given the bucket + // name "mybucket" and the prefix "myprefix/mydatasets", the output file "outputfile" + // would be published to "s3://mybucket/myprefix/mydatasets/outputfile". If + // the prefix directory structure does not exist, it will be created. If no + // prefix is provided, the data set will be published to the S3 bucket root. + DestinationS3Prefix *string `locationName:"destinationS3Prefix" type:"string"` + + // The Amazon Resource Name (ARN) of the Role with an attached permissions policy + // to interact with the provided AWS services. + RoleNameArn *string `locationName:"roleNameArn" min:"1" type:"string" required:"true"` + + // Amazon Resource Name (ARN) for the SNS Topic that will be notified when the + // data set has been published or if an error has occurred. + SnsTopicArn *string `locationName:"snsTopicArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GenerateDataSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateDataSetInput) GoString() string { + return s.String() +} + +// Container for the result of the GenerateDataSet operation. +type GenerateDataSetOutput struct { + _ struct{} `type:"structure"` + + // A unique identifier representing a specific request to the GenerateDataSet + // operation. This identifier can be used to correlate a request with notifications + // from the SNS topic. + DataSetRequestId *string `locationName:"dataSetRequestId" type:"string"` +} + +// String returns the string representation +func (s GenerateDataSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateDataSetOutput) GoString() string { + return s.String() +} + +// The type of the data set to publish. +const ( + // @enum DataSetType + DataSetTypeCustomerSubscriberHourlyMonthlySubscriptions = "customer_subscriber_hourly_monthly_subscriptions" + // @enum DataSetType + DataSetTypeCustomerSubscriberAnnualSubscriptions = "customer_subscriber_annual_subscriptions" + // @enum DataSetType + DataSetTypeDailyBusinessUsageByInstanceType = "daily_business_usage_by_instance_type" + // @enum DataSetType + DataSetTypeDailyBusinessFees = "daily_business_fees" + // @enum DataSetType + DataSetTypeDailyBusinessFreeTrialConversions = "daily_business_free_trial_conversions" + // @enum DataSetType + DataSetTypeDailyBusinessNewInstances = "daily_business_new_instances" + // @enum DataSetType + DataSetTypeDailyBusinessNewProductSubscribers = "daily_business_new_product_subscribers" + // @enum DataSetType + DataSetTypeDailyBusinessCanceledProductSubscribers = "daily_business_canceled_product_subscribers" + // @enum DataSetType + DataSetTypeMonthlyRevenueBillingAndRevenueData = "monthly_revenue_billing_and_revenue_data" + // @enum DataSetType + DataSetTypeMonthlyRevenueAnnualSubscriptions = "monthly_revenue_annual_subscriptions" + // @enum DataSetType + DataSetTypeDisbursedAmountByProduct = "disbursed_amount_by_product" + // @enum DataSetType + DataSetTypeDisbursedAmountByCustomerGeo = "disbursed_amount_by_customer_geo" + // @enum DataSetType + DataSetTypeDisbursedAmountByAgeOfUncollectedFunds = "disbursed_amount_by_age_of_uncollected_funds" + // @enum DataSetType + DataSetTypeDisbursedAmountByAgeOfDisbursedFunds = "disbursed_amount_by_age_of_disbursed_funds" + // @enum DataSetType + DataSetTypeCustomerProfileByIndustry = "customer_profile_by_industry" + // @enum DataSetType + DataSetTypeCustomerProfileByRevenue = "customer_profile_by_revenue" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,40 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package marketplacecommerceanalytics_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleMarketplaceCommerceAnalytics_GenerateDataSet() { + svc := marketplacecommerceanalytics.New(session.New()) + + params := &marketplacecommerceanalytics.GenerateDataSetInput{ + DataSetPublicationDate: aws.Time(time.Now()), // Required + DataSetType: aws.String("DataSetType"), // Required + DestinationS3BucketName: aws.String("DestinationS3BucketName"), // Required + RoleNameArn: aws.String("RoleNameArn"), // Required + SnsTopicArn: aws.String("SnsTopicArn"), // Required + DestinationS3Prefix: aws.String("DestinationS3Prefix"), + } + resp, err := svc.GenerateDataSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/marketplacecommerceanalyticsiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/marketplacecommerceanalyticsiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/marketplacecommerceanalyticsiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/marketplacecommerceanalyticsiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,18 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package marketplacecommerceanalyticsiface provides an interface for the AWS Marketplace Commerce Analytics. +package marketplacecommerceanalyticsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics" +) + +// MarketplaceCommerceAnalyticsAPI is the interface type for marketplacecommerceanalytics.MarketplaceCommerceAnalytics. +type MarketplaceCommerceAnalyticsAPI interface { + GenerateDataSetRequest(*marketplacecommerceanalytics.GenerateDataSetInput) (*request.Request, *marketplacecommerceanalytics.GenerateDataSetOutput) + + GenerateDataSet(*marketplacecommerceanalytics.GenerateDataSetInput) (*marketplacecommerceanalytics.GenerateDataSetOutput, error) +} + +var _ MarketplaceCommerceAnalyticsAPI = (*marketplacecommerceanalytics.MarketplaceCommerceAnalytics)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/marketplacecommerceanalytics/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,89 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package marketplacecommerceanalytics + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Provides AWS Marketplace business intelligence data on-demand. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type MarketplaceCommerceAnalytics struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "marketplacecommerceanalytics" + +// New creates a new instance of the MarketplaceCommerceAnalytics client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a MarketplaceCommerceAnalytics client from just a session. +// svc := marketplacecommerceanalytics.New(mySession) +// +// // Create a MarketplaceCommerceAnalytics client with additional configuration +// svc := marketplacecommerceanalytics.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *MarketplaceCommerceAnalytics { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *MarketplaceCommerceAnalytics { + svc := &MarketplaceCommerceAnalytics{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: "marketplacecommerceanalytics", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-07-01", + JSONVersion: "1.1", + TargetPrefix: "MarketplaceCommerceAnalytics20150701", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a MarketplaceCommerceAnalytics operation and runs any +// custom request initialization. +func (c *MarketplaceCommerceAnalytics) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/mobileanalytics/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/mobileanalytics/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/mobileanalytics/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/mobileanalytics/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,151 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package mobileanalytics provides a client for Amazon Mobile Analytics. +package mobileanalytics + +import ( + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opPutEvents = "PutEvents" + +// PutEventsRequest generates a request for the PutEvents operation. +func (c *MobileAnalytics) PutEventsRequest(input *PutEventsInput) (req *request.Request, output *PutEventsOutput) { + op := &request.Operation{ + Name: opPutEvents, + HTTPMethod: "POST", + HTTPPath: "/2014-06-05/events", + } + + if input == nil { + input = &PutEventsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutEventsOutput{} + req.Data = output + return +} + +// The PutEvents operation records one or more events. You can have up to 1,500 +// unique custom events per app, any combination of up to 40 attributes and +// metrics per custom event, and any number of attribute or metric values. +func (c *MobileAnalytics) PutEvents(input *PutEventsInput) (*PutEventsOutput, error) { + req, out := c.PutEventsRequest(input) + err := req.Send() + return out, err +} + +// A JSON object representing a batch of unique event occurrences in your app. +type Event struct { + _ struct{} `type:"structure"` + + // A collection of key-value pairs that give additional context to the event. + // The key-value pairs are specified by the developer. + // + // This collection can be empty or the attribute object can be omitted. + Attributes map[string]*string `locationName:"attributes" type:"map"` + + // A name signifying an event that occurred in your app. This is used for grouping + // and aggregating like events together for reporting purposes. + EventType *string `locationName:"eventType" min:"1" type:"string" required:"true"` + + // A collection of key-value pairs that gives additional, measurable context + // to the event. The key-value pairs are specified by the developer. + // + // This collection can be empty or the attribute object can be omitted. + Metrics map[string]*float64 `locationName:"metrics" type:"map"` + + // The session the event occured within. + Session *Session `locationName:"session" type:"structure"` + + // The time the event occurred in ISO 8601 standard date time format. For example, + // 2014-06-30T19:07:47.885Z + Timestamp *string `locationName:"timestamp" type:"string" required:"true"` + + // The version of the event. + Version *string `locationName:"version" min:"1" type:"string"` +} + +// String returns the string representation +func (s Event) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Event) GoString() string { + return s.String() +} + +// A container for the data needed for a PutEvent operation +type PutEventsInput struct { + _ struct{} `type:"structure"` + + // The client context including the client ID, app title, app version and package + // name. + ClientContext *string `location:"header" locationName:"x-amz-Client-Context" type:"string" required:"true"` + + // The encoding used for the client context. + ClientContextEncoding *string `location:"header" locationName:"x-amz-Client-Context-Encoding" type:"string"` + + // An array of Event JSON objects + Events []*Event `locationName:"events" type:"list" required:"true"` +} + +// String returns the string representation +func (s PutEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutEventsInput) GoString() string { + return s.String() +} + +type PutEventsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutEventsOutput) GoString() string { + return s.String() +} + +// Describes the session. Session information is required on ALL events. +type Session struct { + _ struct{} `type:"structure"` + + // The duration of the session. + Duration *int64 `locationName:"duration" type:"long"` + + // A unique identifier for the session + Id *string `locationName:"id" min:"1" type:"string"` + + // The time the event started in ISO 8601 standard date time format. For example, + // 2014-06-30T19:07:47.885Z + StartTimestamp *string `locationName:"startTimestamp" type:"string"` + + // The time the event terminated in ISO 8601 standard date time format. For + // example, 2014-06-30T19:07:47.885Z + StopTimestamp *string `locationName:"stopTimestamp" type:"string"` +} + +// String returns the string representation +func (s Session) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Session) GoString() string { + return s.String() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/mobileanalytics/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/mobileanalytics/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/mobileanalytics/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/mobileanalytics/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,58 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package mobileanalytics_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/mobileanalytics" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleMobileAnalytics_PutEvents() { + svc := mobileanalytics.New(session.New()) + + params := &mobileanalytics.PutEventsInput{ + ClientContext: aws.String("String"), // Required + Events: []*mobileanalytics.Event{ // Required + { // Required + EventType: aws.String("String50Chars"), // Required + Timestamp: aws.String("ISO8601Timestamp"), // Required + Attributes: map[string]*string{ + "Key": aws.String("String0to1000Chars"), // Required + // More values... + }, + Metrics: map[string]*float64{ + "Key": aws.Float64(1.0), // Required + // More values... + }, + Session: &mobileanalytics.Session{ + Duration: aws.Int64(1), + Id: aws.String("String50Chars"), + StartTimestamp: aws.String("ISO8601Timestamp"), + StopTimestamp: aws.String("ISO8601Timestamp"), + }, + Version: aws.String("String10Chars"), + }, + // More values... + }, + ClientContextEncoding: aws.String("String"), + } + resp, err := svc.PutEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/mobileanalytics/mobileanalyticsiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/mobileanalytics/mobileanalyticsiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/mobileanalytics/mobileanalyticsiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/mobileanalytics/mobileanalyticsiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,18 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package mobileanalyticsiface provides an interface for the Amazon Mobile Analytics. +package mobileanalyticsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/mobileanalytics" +) + +// MobileAnalyticsAPI is the interface type for mobileanalytics.MobileAnalytics. +type MobileAnalyticsAPI interface { + PutEventsRequest(*mobileanalytics.PutEventsInput) (*request.Request, *mobileanalytics.PutEventsOutput) + + PutEvents(*mobileanalytics.PutEventsInput) (*mobileanalytics.PutEventsOutput, error) +} + +var _ MobileAnalyticsAPI = (*mobileanalytics.MobileAnalytics)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/mobileanalytics/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/mobileanalytics/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/mobileanalytics/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/mobileanalytics/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,87 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package mobileanalytics + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restjson" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Amazon Mobile Analytics is a service for collecting, visualizing, and understanding +// app usage data at scale. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type MobileAnalytics struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "mobileanalytics" + +// New creates a new instance of the MobileAnalytics client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a MobileAnalytics client from just a session. +// svc := mobileanalytics.New(mySession) +// +// // Create a MobileAnalytics client with additional configuration +// svc := mobileanalytics.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *MobileAnalytics { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *MobileAnalytics { + svc := &MobileAnalytics{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-06-05", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a MobileAnalytics operation and runs any +// custom request initialization. +func (c *MobileAnalytics) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/opsworks/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/opsworks/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/opsworks/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/opsworks/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,7786 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package opsworks provides a client for AWS OpsWorks. +package opsworks + +import ( + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opAssignInstance = "AssignInstance" + +// AssignInstanceRequest generates a request for the AssignInstance operation. +func (c *OpsWorks) AssignInstanceRequest(input *AssignInstanceInput) (req *request.Request, output *AssignInstanceOutput) { + op := &request.Operation{ + Name: opAssignInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssignInstanceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AssignInstanceOutput{} + req.Data = output + return +} + +// Assign a registered instance to a layer. +// +// You can assign registered on-premises instances to any layer type. You +// can assign registered Amazon EC2 instances only to custom layers. You cannot +// use this action with instances that were created with AWS OpsWorks. Required +// Permissions: To use this action, an AWS Identity and Access Management (IAM) +// user must have a Manage permissions level for the stack or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) AssignInstance(input *AssignInstanceInput) (*AssignInstanceOutput, error) { + req, out := c.AssignInstanceRequest(input) + err := req.Send() + return out, err +} + +const opAssignVolume = "AssignVolume" + +// AssignVolumeRequest generates a request for the AssignVolume operation. +func (c *OpsWorks) AssignVolumeRequest(input *AssignVolumeInput) (req *request.Request, output *AssignVolumeOutput) { + op := &request.Operation{ + Name: opAssignVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssignVolumeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AssignVolumeOutput{} + req.Data = output + return +} + +// Assigns one of the stack's registered Amazon EBS volumes to a specified instance. +// The volume must first be registered with the stack by calling RegisterVolume. +// After you register the volume, you must call UpdateVolume to specify a mount +// point before calling AssignVolume. For more information, see Resource Management +// (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) AssignVolume(input *AssignVolumeInput) (*AssignVolumeOutput, error) { + req, out := c.AssignVolumeRequest(input) + err := req.Send() + return out, err +} + +const opAssociateElasticIp = "AssociateElasticIp" + +// AssociateElasticIpRequest generates a request for the AssociateElasticIp operation. +func (c *OpsWorks) AssociateElasticIpRequest(input *AssociateElasticIpInput) (req *request.Request, output *AssociateElasticIpOutput) { + op := &request.Operation{ + Name: opAssociateElasticIp, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateElasticIpInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AssociateElasticIpOutput{} + req.Data = output + return +} + +// Associates one of the stack's registered Elastic IP addresses with a specified +// instance. The address must first be registered with the stack by calling +// RegisterElasticIp. For more information, see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) AssociateElasticIp(input *AssociateElasticIpInput) (*AssociateElasticIpOutput, error) { + req, out := c.AssociateElasticIpRequest(input) + err := req.Send() + return out, err +} + +const opAttachElasticLoadBalancer = "AttachElasticLoadBalancer" + +// AttachElasticLoadBalancerRequest generates a request for the AttachElasticLoadBalancer operation. +func (c *OpsWorks) AttachElasticLoadBalancerRequest(input *AttachElasticLoadBalancerInput) (req *request.Request, output *AttachElasticLoadBalancerOutput) { + op := &request.Operation{ + Name: opAttachElasticLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachElasticLoadBalancerInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AttachElasticLoadBalancerOutput{} + req.Data = output + return +} + +// Attaches an Elastic Load Balancing load balancer to a specified layer. For +// more information, see Elastic Load Balancing (http://docs.aws.amazon.com/opsworks/latest/userguide/load-balancer-elb.html). +// +// You must create the Elastic Load Balancing instance separately, by using +// the Elastic Load Balancing console, API, or CLI. For more information, see +// Elastic Load Balancing Developer Guide (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/Welcome.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) AttachElasticLoadBalancer(input *AttachElasticLoadBalancerInput) (*AttachElasticLoadBalancerOutput, error) { + req, out := c.AttachElasticLoadBalancerRequest(input) + err := req.Send() + return out, err +} + +const opCloneStack = "CloneStack" + +// CloneStackRequest generates a request for the CloneStack operation. +func (c *OpsWorks) CloneStackRequest(input *CloneStackInput) (req *request.Request, output *CloneStackOutput) { + op := &request.Operation{ + Name: opCloneStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CloneStackInput{} + } + + req = c.newRequest(op, input, output) + output = &CloneStackOutput{} + req.Data = output + return +} + +// Creates a clone of a specified stack. For more information, see Clone a Stack +// (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-cloning.html). +// By default, all parameters are set to the values used by the parent stack. +// +// Required Permissions: To use this action, an IAM user must have an attached +// policy that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) CloneStack(input *CloneStackInput) (*CloneStackOutput, error) { + req, out := c.CloneStackRequest(input) + err := req.Send() + return out, err +} + +const opCreateApp = "CreateApp" + +// CreateAppRequest generates a request for the CreateApp operation. +func (c *OpsWorks) CreateAppRequest(input *CreateAppInput) (req *request.Request, output *CreateAppOutput) { + op := &request.Operation{ + Name: opCreateApp, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAppInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateAppOutput{} + req.Data = output + return +} + +// Creates an app for a specified stack. For more information, see Creating +// Apps (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) CreateApp(input *CreateAppInput) (*CreateAppOutput, error) { + req, out := c.CreateAppRequest(input) + err := req.Send() + return out, err +} + +const opCreateDeployment = "CreateDeployment" + +// CreateDeploymentRequest generates a request for the CreateDeployment operation. +func (c *OpsWorks) CreateDeploymentRequest(input *CreateDeploymentInput) (req *request.Request, output *CreateDeploymentOutput) { + op := &request.Operation{ + Name: opCreateDeployment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDeploymentInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDeploymentOutput{} + req.Data = output + return +} + +// Runs deployment or stack commands. For more information, see Deploying Apps +// (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-deploying.html) +// and Run Stack Commands (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-commands.html). +// +// Required Permissions: To use this action, an IAM user must have a Deploy +// or Manage permissions level for the stack, or an attached policy that explicitly +// grants permissions. For more information on user permissions, see Managing +// User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) CreateDeployment(input *CreateDeploymentInput) (*CreateDeploymentOutput, error) { + req, out := c.CreateDeploymentRequest(input) + err := req.Send() + return out, err +} + +const opCreateInstance = "CreateInstance" + +// CreateInstanceRequest generates a request for the CreateInstance operation. +func (c *OpsWorks) CreateInstanceRequest(input *CreateInstanceInput) (req *request.Request, output *CreateInstanceOutput) { + op := &request.Operation{ + Name: opCreateInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateInstanceOutput{} + req.Data = output + return +} + +// Creates an instance in a specified stack. For more information, see Adding +// an Instance to a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-add.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) CreateInstance(input *CreateInstanceInput) (*CreateInstanceOutput, error) { + req, out := c.CreateInstanceRequest(input) + err := req.Send() + return out, err +} + +const opCreateLayer = "CreateLayer" + +// CreateLayerRequest generates a request for the CreateLayer operation. +func (c *OpsWorks) CreateLayerRequest(input *CreateLayerInput) (req *request.Request, output *CreateLayerOutput) { + op := &request.Operation{ + Name: opCreateLayer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLayerInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateLayerOutput{} + req.Data = output + return +} + +// Creates a layer. For more information, see How to Create a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-create.html). +// +// You should use CreateLayer for noncustom layer types such as PHP App Server +// only if the stack does not have an existing layer of that type. A stack can +// have at most one instance of each noncustom layer; if you attempt to create +// a second instance, CreateLayer fails. A stack can have an arbitrary number +// of custom layers, so you can call CreateLayer as many times as you like for +// that layer type. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) CreateLayer(input *CreateLayerInput) (*CreateLayerOutput, error) { + req, out := c.CreateLayerRequest(input) + err := req.Send() + return out, err +} + +const opCreateStack = "CreateStack" + +// CreateStackRequest generates a request for the CreateStack operation. +func (c *OpsWorks) CreateStackRequest(input *CreateStackInput) (req *request.Request, output *CreateStackOutput) { + op := &request.Operation{ + Name: opCreateStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateStackInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateStackOutput{} + req.Data = output + return +} + +// Creates a new stack. For more information, see Create a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-edit.html). +// +// Required Permissions: To use this action, an IAM user must have an attached +// policy that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) CreateStack(input *CreateStackInput) (*CreateStackOutput, error) { + req, out := c.CreateStackRequest(input) + err := req.Send() + return out, err +} + +const opCreateUserProfile = "CreateUserProfile" + +// CreateUserProfileRequest generates a request for the CreateUserProfile operation. +func (c *OpsWorks) CreateUserProfileRequest(input *CreateUserProfileInput) (req *request.Request, output *CreateUserProfileOutput) { + op := &request.Operation{ + Name: opCreateUserProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateUserProfileInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateUserProfileOutput{} + req.Data = output + return +} + +// Creates a new user profile. +// +// Required Permissions: To use this action, an IAM user must have an attached +// policy that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) CreateUserProfile(input *CreateUserProfileInput) (*CreateUserProfileOutput, error) { + req, out := c.CreateUserProfileRequest(input) + err := req.Send() + return out, err +} + +const opDeleteApp = "DeleteApp" + +// DeleteAppRequest generates a request for the DeleteApp operation. +func (c *OpsWorks) DeleteAppRequest(input *DeleteAppInput) (req *request.Request, output *DeleteAppOutput) { + op := &request.Operation{ + Name: opDeleteApp, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAppInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteAppOutput{} + req.Data = output + return +} + +// Deletes a specified app. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DeleteApp(input *DeleteAppInput) (*DeleteAppOutput, error) { + req, out := c.DeleteAppRequest(input) + err := req.Send() + return out, err +} + +const opDeleteInstance = "DeleteInstance" + +// DeleteInstanceRequest generates a request for the DeleteInstance operation. +func (c *OpsWorks) DeleteInstanceRequest(input *DeleteInstanceInput) (req *request.Request, output *DeleteInstanceOutput) { + op := &request.Operation{ + Name: opDeleteInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteInstanceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteInstanceOutput{} + req.Data = output + return +} + +// Deletes a specified instance, which terminates the associated Amazon EC2 +// instance. You must stop an instance before you can delete it. +// +// For more information, see Deleting Instances (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-delete.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DeleteInstance(input *DeleteInstanceInput) (*DeleteInstanceOutput, error) { + req, out := c.DeleteInstanceRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLayer = "DeleteLayer" + +// DeleteLayerRequest generates a request for the DeleteLayer operation. +func (c *OpsWorks) DeleteLayerRequest(input *DeleteLayerInput) (req *request.Request, output *DeleteLayerOutput) { + op := &request.Operation{ + Name: opDeleteLayer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLayerInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteLayerOutput{} + req.Data = output + return +} + +// Deletes a specified layer. You must first stop and then delete all associated +// instances or unassign registered instances. For more information, see How +// to Delete a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-delete.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DeleteLayer(input *DeleteLayerInput) (*DeleteLayerOutput, error) { + req, out := c.DeleteLayerRequest(input) + err := req.Send() + return out, err +} + +const opDeleteStack = "DeleteStack" + +// DeleteStackRequest generates a request for the DeleteStack operation. +func (c *OpsWorks) DeleteStackRequest(input *DeleteStackInput) (req *request.Request, output *DeleteStackOutput) { + op := &request.Operation{ + Name: opDeleteStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteStackInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteStackOutput{} + req.Data = output + return +} + +// Deletes a specified stack. You must first delete all instances, layers, and +// apps or deregister registered instances. For more information, see Shut Down +// a Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-shutting.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DeleteStack(input *DeleteStackInput) (*DeleteStackOutput, error) { + req, out := c.DeleteStackRequest(input) + err := req.Send() + return out, err +} + +const opDeleteUserProfile = "DeleteUserProfile" + +// DeleteUserProfileRequest generates a request for the DeleteUserProfile operation. +func (c *OpsWorks) DeleteUserProfileRequest(input *DeleteUserProfileInput) (req *request.Request, output *DeleteUserProfileOutput) { + op := &request.Operation{ + Name: opDeleteUserProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteUserProfileInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteUserProfileOutput{} + req.Data = output + return +} + +// Deletes a user profile. +// +// Required Permissions: To use this action, an IAM user must have an attached +// policy that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DeleteUserProfile(input *DeleteUserProfileInput) (*DeleteUserProfileOutput, error) { + req, out := c.DeleteUserProfileRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterEcsCluster = "DeregisterEcsCluster" + +// DeregisterEcsClusterRequest generates a request for the DeregisterEcsCluster operation. +func (c *OpsWorks) DeregisterEcsClusterRequest(input *DeregisterEcsClusterInput) (req *request.Request, output *DeregisterEcsClusterOutput) { + op := &request.Operation{ + Name: opDeregisterEcsCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterEcsClusterInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeregisterEcsClusterOutput{} + req.Data = output + return +} + +// Deregisters a specified Amazon ECS cluster from a stack. For more information, +// see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-ecscluster.html#workinglayers-ecscluster-delete). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack or an attached policy that explicitly grants +// permissions. For more information on user permissions, see . +func (c *OpsWorks) DeregisterEcsCluster(input *DeregisterEcsClusterInput) (*DeregisterEcsClusterOutput, error) { + req, out := c.DeregisterEcsClusterRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterElasticIp = "DeregisterElasticIp" + +// DeregisterElasticIpRequest generates a request for the DeregisterElasticIp operation. +func (c *OpsWorks) DeregisterElasticIpRequest(input *DeregisterElasticIpInput) (req *request.Request, output *DeregisterElasticIpOutput) { + op := &request.Operation{ + Name: opDeregisterElasticIp, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterElasticIpInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeregisterElasticIpOutput{} + req.Data = output + return +} + +// Deregisters a specified Elastic IP address. The address can then be registered +// by another stack. For more information, see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DeregisterElasticIp(input *DeregisterElasticIpInput) (*DeregisterElasticIpOutput, error) { + req, out := c.DeregisterElasticIpRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterInstance = "DeregisterInstance" + +// DeregisterInstanceRequest generates a request for the DeregisterInstance operation. +func (c *OpsWorks) DeregisterInstanceRequest(input *DeregisterInstanceInput) (req *request.Request, output *DeregisterInstanceOutput) { + op := &request.Operation{ + Name: opDeregisterInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterInstanceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeregisterInstanceOutput{} + req.Data = output + return +} + +// Deregister a registered Amazon EC2 or on-premises instance. This action removes +// the instance from the stack and returns it to your control. This action can +// not be used with instances that were created with AWS OpsWorks. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DeregisterInstance(input *DeregisterInstanceInput) (*DeregisterInstanceOutput, error) { + req, out := c.DeregisterInstanceRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterRdsDbInstance = "DeregisterRdsDbInstance" + +// DeregisterRdsDbInstanceRequest generates a request for the DeregisterRdsDbInstance operation. +func (c *OpsWorks) DeregisterRdsDbInstanceRequest(input *DeregisterRdsDbInstanceInput) (req *request.Request, output *DeregisterRdsDbInstanceOutput) { + op := &request.Operation{ + Name: opDeregisterRdsDbInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterRdsDbInstanceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeregisterRdsDbInstanceOutput{} + req.Data = output + return +} + +// Deregisters an Amazon RDS instance. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DeregisterRdsDbInstance(input *DeregisterRdsDbInstanceInput) (*DeregisterRdsDbInstanceOutput, error) { + req, out := c.DeregisterRdsDbInstanceRequest(input) + err := req.Send() + return out, err +} + +const opDeregisterVolume = "DeregisterVolume" + +// DeregisterVolumeRequest generates a request for the DeregisterVolume operation. +func (c *OpsWorks) DeregisterVolumeRequest(input *DeregisterVolumeInput) (req *request.Request, output *DeregisterVolumeOutput) { + op := &request.Operation{ + Name: opDeregisterVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterVolumeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeregisterVolumeOutput{} + req.Data = output + return +} + +// Deregisters an Amazon EBS volume. The volume can then be registered by another +// stack. For more information, see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DeregisterVolume(input *DeregisterVolumeInput) (*DeregisterVolumeOutput, error) { + req, out := c.DeregisterVolumeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAgentVersions = "DescribeAgentVersions" + +// DescribeAgentVersionsRequest generates a request for the DescribeAgentVersions operation. +func (c *OpsWorks) DescribeAgentVersionsRequest(input *DescribeAgentVersionsInput) (req *request.Request, output *DescribeAgentVersionsOutput) { + op := &request.Operation{ + Name: opDescribeAgentVersions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAgentVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAgentVersionsOutput{} + req.Data = output + return +} + +// Describes the available AWS OpsWorks agent versions. You must specify a stack +// ID or a configuration manager. DescribeAgentVersions returns a list of available +// agent versions for the specified stack or configuration manager. +func (c *OpsWorks) DescribeAgentVersions(input *DescribeAgentVersionsInput) (*DescribeAgentVersionsOutput, error) { + req, out := c.DescribeAgentVersionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeApps = "DescribeApps" + +// DescribeAppsRequest generates a request for the DescribeApps operation. +func (c *OpsWorks) DescribeAppsRequest(input *DescribeAppsInput) (req *request.Request, output *DescribeAppsOutput) { + op := &request.Operation{ + Name: opDescribeApps, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAppsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAppsOutput{} + req.Data = output + return +} + +// Requests a description of a specified set of apps. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeApps(input *DescribeAppsInput) (*DescribeAppsOutput, error) { + req, out := c.DescribeAppsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeCommands = "DescribeCommands" + +// DescribeCommandsRequest generates a request for the DescribeCommands operation. +func (c *OpsWorks) DescribeCommandsRequest(input *DescribeCommandsInput) (req *request.Request, output *DescribeCommandsOutput) { + op := &request.Operation{ + Name: opDescribeCommands, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeCommandsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCommandsOutput{} + req.Data = output + return +} + +// Describes the results of specified commands. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeCommands(input *DescribeCommandsInput) (*DescribeCommandsOutput, error) { + req, out := c.DescribeCommandsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDeployments = "DescribeDeployments" + +// DescribeDeploymentsRequest generates a request for the DescribeDeployments operation. +func (c *OpsWorks) DescribeDeploymentsRequest(input *DescribeDeploymentsInput) (req *request.Request, output *DescribeDeploymentsOutput) { + op := &request.Operation{ + Name: opDescribeDeployments, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDeploymentsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDeploymentsOutput{} + req.Data = output + return +} + +// Requests a description of a specified set of deployments. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeDeployments(input *DescribeDeploymentsInput) (*DescribeDeploymentsOutput, error) { + req, out := c.DescribeDeploymentsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeEcsClusters = "DescribeEcsClusters" + +// DescribeEcsClustersRequest generates a request for the DescribeEcsClusters operation. +func (c *OpsWorks) DescribeEcsClustersRequest(input *DescribeEcsClustersInput) (req *request.Request, output *DescribeEcsClustersOutput) { + op := &request.Operation{ + Name: opDescribeEcsClusters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEcsClustersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEcsClustersOutput{} + req.Data = output + return +} + +// Describes Amazon ECS clusters that are registered with a stack. If you specify +// only a stack ID, you can use the MaxResults and NextToken parameters to paginate +// the response. However, AWS OpsWorks currently supports only one cluster per +// layer, so the result set has a maximum of one element. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack or an attached policy that +// explicitly grants permission. For more information on user permissions, see +// Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeEcsClusters(input *DescribeEcsClustersInput) (*DescribeEcsClustersOutput, error) { + req, out := c.DescribeEcsClustersRequest(input) + err := req.Send() + return out, err +} + +func (c *OpsWorks) DescribeEcsClustersPages(input *DescribeEcsClustersInput, fn func(p *DescribeEcsClustersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeEcsClustersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeEcsClustersOutput), lastPage) + }) +} + +const opDescribeElasticIps = "DescribeElasticIps" + +// DescribeElasticIpsRequest generates a request for the DescribeElasticIps operation. +func (c *OpsWorks) DescribeElasticIpsRequest(input *DescribeElasticIpsInput) (req *request.Request, output *DescribeElasticIpsOutput) { + op := &request.Operation{ + Name: opDescribeElasticIps, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeElasticIpsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeElasticIpsOutput{} + req.Data = output + return +} + +// Describes Elastic IP addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html). +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeElasticIps(input *DescribeElasticIpsInput) (*DescribeElasticIpsOutput, error) { + req, out := c.DescribeElasticIpsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeElasticLoadBalancers = "DescribeElasticLoadBalancers" + +// DescribeElasticLoadBalancersRequest generates a request for the DescribeElasticLoadBalancers operation. +func (c *OpsWorks) DescribeElasticLoadBalancersRequest(input *DescribeElasticLoadBalancersInput) (req *request.Request, output *DescribeElasticLoadBalancersOutput) { + op := &request.Operation{ + Name: opDescribeElasticLoadBalancers, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeElasticLoadBalancersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeElasticLoadBalancersOutput{} + req.Data = output + return +} + +// Describes a stack's Elastic Load Balancing instances. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeElasticLoadBalancers(input *DescribeElasticLoadBalancersInput) (*DescribeElasticLoadBalancersOutput, error) { + req, out := c.DescribeElasticLoadBalancersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeInstances = "DescribeInstances" + +// DescribeInstancesRequest generates a request for the DescribeInstances operation. +func (c *OpsWorks) DescribeInstancesRequest(input *DescribeInstancesInput) (req *request.Request, output *DescribeInstancesOutput) { + op := &request.Operation{ + Name: opDescribeInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInstancesOutput{} + req.Data = output + return +} + +// Requests a description of a set of instances. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeInstances(input *DescribeInstancesInput) (*DescribeInstancesOutput, error) { + req, out := c.DescribeInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLayers = "DescribeLayers" + +// DescribeLayersRequest generates a request for the DescribeLayers operation. +func (c *OpsWorks) DescribeLayersRequest(input *DescribeLayersInput) (req *request.Request, output *DescribeLayersOutput) { + op := &request.Operation{ + Name: opDescribeLayers, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLayersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLayersOutput{} + req.Data = output + return +} + +// Requests a description of one or more layers in a specified stack. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeLayers(input *DescribeLayersInput) (*DescribeLayersOutput, error) { + req, out := c.DescribeLayersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLoadBasedAutoScaling = "DescribeLoadBasedAutoScaling" + +// DescribeLoadBasedAutoScalingRequest generates a request for the DescribeLoadBasedAutoScaling operation. +func (c *OpsWorks) DescribeLoadBasedAutoScalingRequest(input *DescribeLoadBasedAutoScalingInput) (req *request.Request, output *DescribeLoadBasedAutoScalingOutput) { + op := &request.Operation{ + Name: opDescribeLoadBasedAutoScaling, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLoadBasedAutoScalingInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLoadBasedAutoScalingOutput{} + req.Data = output + return +} + +// Describes load-based auto scaling configurations for specified layers. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeLoadBasedAutoScaling(input *DescribeLoadBasedAutoScalingInput) (*DescribeLoadBasedAutoScalingOutput, error) { + req, out := c.DescribeLoadBasedAutoScalingRequest(input) + err := req.Send() + return out, err +} + +const opDescribeMyUserProfile = "DescribeMyUserProfile" + +// DescribeMyUserProfileRequest generates a request for the DescribeMyUserProfile operation. +func (c *OpsWorks) DescribeMyUserProfileRequest(input *DescribeMyUserProfileInput) (req *request.Request, output *DescribeMyUserProfileOutput) { + op := &request.Operation{ + Name: opDescribeMyUserProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeMyUserProfileInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeMyUserProfileOutput{} + req.Data = output + return +} + +// Describes a user's SSH information. +// +// Required Permissions: To use this action, an IAM user must have self-management +// enabled or an attached policy that explicitly grants permissions. For more +// information on user permissions, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeMyUserProfile(input *DescribeMyUserProfileInput) (*DescribeMyUserProfileOutput, error) { + req, out := c.DescribeMyUserProfileRequest(input) + err := req.Send() + return out, err +} + +const opDescribePermissions = "DescribePermissions" + +// DescribePermissionsRequest generates a request for the DescribePermissions operation. +func (c *OpsWorks) DescribePermissionsRequest(input *DescribePermissionsInput) (req *request.Request, output *DescribePermissionsOutput) { + op := &request.Operation{ + Name: opDescribePermissions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribePermissionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribePermissionsOutput{} + req.Data = output + return +} + +// Describes the permissions for a specified stack. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribePermissions(input *DescribePermissionsInput) (*DescribePermissionsOutput, error) { + req, out := c.DescribePermissionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeRaidArrays = "DescribeRaidArrays" + +// DescribeRaidArraysRequest generates a request for the DescribeRaidArrays operation. +func (c *OpsWorks) DescribeRaidArraysRequest(input *DescribeRaidArraysInput) (req *request.Request, output *DescribeRaidArraysOutput) { + op := &request.Operation{ + Name: opDescribeRaidArrays, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRaidArraysInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeRaidArraysOutput{} + req.Data = output + return +} + +// Describe an instance's RAID arrays. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeRaidArrays(input *DescribeRaidArraysInput) (*DescribeRaidArraysOutput, error) { + req, out := c.DescribeRaidArraysRequest(input) + err := req.Send() + return out, err +} + +const opDescribeRdsDbInstances = "DescribeRdsDbInstances" + +// DescribeRdsDbInstancesRequest generates a request for the DescribeRdsDbInstances operation. +func (c *OpsWorks) DescribeRdsDbInstancesRequest(input *DescribeRdsDbInstancesInput) (req *request.Request, output *DescribeRdsDbInstancesOutput) { + op := &request.Operation{ + Name: opDescribeRdsDbInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRdsDbInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeRdsDbInstancesOutput{} + req.Data = output + return +} + +// Describes Amazon RDS instances. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeRdsDbInstances(input *DescribeRdsDbInstancesInput) (*DescribeRdsDbInstancesOutput, error) { + req, out := c.DescribeRdsDbInstancesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeServiceErrors = "DescribeServiceErrors" + +// DescribeServiceErrorsRequest generates a request for the DescribeServiceErrors operation. +func (c *OpsWorks) DescribeServiceErrorsRequest(input *DescribeServiceErrorsInput) (req *request.Request, output *DescribeServiceErrorsOutput) { + op := &request.Operation{ + Name: opDescribeServiceErrors, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeServiceErrorsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeServiceErrorsOutput{} + req.Data = output + return +} + +// Describes AWS OpsWorks service errors. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeServiceErrors(input *DescribeServiceErrorsInput) (*DescribeServiceErrorsOutput, error) { + req, out := c.DescribeServiceErrorsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeStackProvisioningParameters = "DescribeStackProvisioningParameters" + +// DescribeStackProvisioningParametersRequest generates a request for the DescribeStackProvisioningParameters operation. +func (c *OpsWorks) DescribeStackProvisioningParametersRequest(input *DescribeStackProvisioningParametersInput) (req *request.Request, output *DescribeStackProvisioningParametersOutput) { + op := &request.Operation{ + Name: opDescribeStackProvisioningParameters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeStackProvisioningParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStackProvisioningParametersOutput{} + req.Data = output + return +} + +// Requests a description of a stack's provisioning parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack or an attached policy that +// explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeStackProvisioningParameters(input *DescribeStackProvisioningParametersInput) (*DescribeStackProvisioningParametersOutput, error) { + req, out := c.DescribeStackProvisioningParametersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeStackSummary = "DescribeStackSummary" + +// DescribeStackSummaryRequest generates a request for the DescribeStackSummary operation. +func (c *OpsWorks) DescribeStackSummaryRequest(input *DescribeStackSummaryInput) (req *request.Request, output *DescribeStackSummaryOutput) { + op := &request.Operation{ + Name: opDescribeStackSummary, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeStackSummaryInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStackSummaryOutput{} + req.Data = output + return +} + +// Describes the number of layers and apps in a specified stack, and the number +// of instances in each state, such as running_setup or online. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeStackSummary(input *DescribeStackSummaryInput) (*DescribeStackSummaryOutput, error) { + req, out := c.DescribeStackSummaryRequest(input) + err := req.Send() + return out, err +} + +const opDescribeStacks = "DescribeStacks" + +// DescribeStacksRequest generates a request for the DescribeStacks operation. +func (c *OpsWorks) DescribeStacksRequest(input *DescribeStacksInput) (req *request.Request, output *DescribeStacksOutput) { + op := &request.Operation{ + Name: opDescribeStacks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeStacksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStacksOutput{} + req.Data = output + return +} + +// Requests a description of one or more stacks. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeStacks(input *DescribeStacksInput) (*DescribeStacksOutput, error) { + req, out := c.DescribeStacksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTimeBasedAutoScaling = "DescribeTimeBasedAutoScaling" + +// DescribeTimeBasedAutoScalingRequest generates a request for the DescribeTimeBasedAutoScaling operation. +func (c *OpsWorks) DescribeTimeBasedAutoScalingRequest(input *DescribeTimeBasedAutoScalingInput) (req *request.Request, output *DescribeTimeBasedAutoScalingOutput) { + op := &request.Operation{ + Name: opDescribeTimeBasedAutoScaling, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTimeBasedAutoScalingInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTimeBasedAutoScalingOutput{} + req.Data = output + return +} + +// Describes time-based auto scaling configurations for specified instances. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeTimeBasedAutoScaling(input *DescribeTimeBasedAutoScalingInput) (*DescribeTimeBasedAutoScalingOutput, error) { + req, out := c.DescribeTimeBasedAutoScalingRequest(input) + err := req.Send() + return out, err +} + +const opDescribeUserProfiles = "DescribeUserProfiles" + +// DescribeUserProfilesRequest generates a request for the DescribeUserProfiles operation. +func (c *OpsWorks) DescribeUserProfilesRequest(input *DescribeUserProfilesInput) (req *request.Request, output *DescribeUserProfilesOutput) { + op := &request.Operation{ + Name: opDescribeUserProfiles, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeUserProfilesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeUserProfilesOutput{} + req.Data = output + return +} + +// Describe specified users. +// +// Required Permissions: To use this action, an IAM user must have an attached +// policy that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeUserProfiles(input *DescribeUserProfilesInput) (*DescribeUserProfilesOutput, error) { + req, out := c.DescribeUserProfilesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVolumes = "DescribeVolumes" + +// DescribeVolumesRequest generates a request for the DescribeVolumes operation. +func (c *OpsWorks) DescribeVolumesRequest(input *DescribeVolumesInput) (req *request.Request, output *DescribeVolumesOutput) { + op := &request.Operation{ + Name: opDescribeVolumes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVolumesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVolumesOutput{} + req.Data = output + return +} + +// Describes an instance's Amazon EBS volumes. +// +// You must specify at least one of the parameters. +// +// Required Permissions: To use this action, an IAM user must have a Show, +// Deploy, or Manage permissions level for the stack, or an attached policy +// that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DescribeVolumes(input *DescribeVolumesInput) (*DescribeVolumesOutput, error) { + req, out := c.DescribeVolumesRequest(input) + err := req.Send() + return out, err +} + +const opDetachElasticLoadBalancer = "DetachElasticLoadBalancer" + +// DetachElasticLoadBalancerRequest generates a request for the DetachElasticLoadBalancer operation. +func (c *OpsWorks) DetachElasticLoadBalancerRequest(input *DetachElasticLoadBalancerInput) (req *request.Request, output *DetachElasticLoadBalancerOutput) { + op := &request.Operation{ + Name: opDetachElasticLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachElasticLoadBalancerInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DetachElasticLoadBalancerOutput{} + req.Data = output + return +} + +// Detaches a specified Elastic Load Balancing instance from its layer. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DetachElasticLoadBalancer(input *DetachElasticLoadBalancerInput) (*DetachElasticLoadBalancerOutput, error) { + req, out := c.DetachElasticLoadBalancerRequest(input) + err := req.Send() + return out, err +} + +const opDisassociateElasticIp = "DisassociateElasticIp" + +// DisassociateElasticIpRequest generates a request for the DisassociateElasticIp operation. +func (c *OpsWorks) DisassociateElasticIpRequest(input *DisassociateElasticIpInput) (req *request.Request, output *DisassociateElasticIpOutput) { + op := &request.Operation{ + Name: opDisassociateElasticIp, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateElasticIpInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DisassociateElasticIpOutput{} + req.Data = output + return +} + +// Disassociates an Elastic IP address from its instance. The address remains +// registered with the stack. For more information, see Resource Management +// (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) DisassociateElasticIp(input *DisassociateElasticIpInput) (*DisassociateElasticIpOutput, error) { + req, out := c.DisassociateElasticIpRequest(input) + err := req.Send() + return out, err +} + +const opGetHostnameSuggestion = "GetHostnameSuggestion" + +// GetHostnameSuggestionRequest generates a request for the GetHostnameSuggestion operation. +func (c *OpsWorks) GetHostnameSuggestionRequest(input *GetHostnameSuggestionInput) (req *request.Request, output *GetHostnameSuggestionOutput) { + op := &request.Operation{ + Name: opGetHostnameSuggestion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetHostnameSuggestionInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHostnameSuggestionOutput{} + req.Data = output + return +} + +// Gets a generated host name for the specified layer, based on the current +// host name theme. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) GetHostnameSuggestion(input *GetHostnameSuggestionInput) (*GetHostnameSuggestionOutput, error) { + req, out := c.GetHostnameSuggestionRequest(input) + err := req.Send() + return out, err +} + +const opGrantAccess = "GrantAccess" + +// GrantAccessRequest generates a request for the GrantAccess operation. +func (c *OpsWorks) GrantAccessRequest(input *GrantAccessInput) (req *request.Request, output *GrantAccessOutput) { + op := &request.Operation{ + Name: opGrantAccess, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GrantAccessInput{} + } + + req = c.newRequest(op, input, output) + output = &GrantAccessOutput{} + req.Data = output + return +} + +// This action can be used only with Windows stacks. Grants RDP access to a +// Windows instance for a specified time period. +func (c *OpsWorks) GrantAccess(input *GrantAccessInput) (*GrantAccessOutput, error) { + req, out := c.GrantAccessRequest(input) + err := req.Send() + return out, err +} + +const opRebootInstance = "RebootInstance" + +// RebootInstanceRequest generates a request for the RebootInstance operation. +func (c *OpsWorks) RebootInstanceRequest(input *RebootInstanceInput) (req *request.Request, output *RebootInstanceOutput) { + op := &request.Operation{ + Name: opRebootInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RebootInstanceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RebootInstanceOutput{} + req.Data = output + return +} + +// Reboots a specified instance. For more information, see Starting, Stopping, +// and Rebooting Instances (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-starting.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) RebootInstance(input *RebootInstanceInput) (*RebootInstanceOutput, error) { + req, out := c.RebootInstanceRequest(input) + err := req.Send() + return out, err +} + +const opRegisterEcsCluster = "RegisterEcsCluster" + +// RegisterEcsClusterRequest generates a request for the RegisterEcsCluster operation. +func (c *OpsWorks) RegisterEcsClusterRequest(input *RegisterEcsClusterInput) (req *request.Request, output *RegisterEcsClusterOutput) { + op := &request.Operation{ + Name: opRegisterEcsCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterEcsClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterEcsClusterOutput{} + req.Data = output + return +} + +// Registers a specified Amazon ECS cluster with a stack. You can register only +// one cluster with a stack. A cluster can be registered with only one stack. +// For more information, see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-ecscluster.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) RegisterEcsCluster(input *RegisterEcsClusterInput) (*RegisterEcsClusterOutput, error) { + req, out := c.RegisterEcsClusterRequest(input) + err := req.Send() + return out, err +} + +const opRegisterElasticIp = "RegisterElasticIp" + +// RegisterElasticIpRequest generates a request for the RegisterElasticIp operation. +func (c *OpsWorks) RegisterElasticIpRequest(input *RegisterElasticIpInput) (req *request.Request, output *RegisterElasticIpOutput) { + op := &request.Operation{ + Name: opRegisterElasticIp, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterElasticIpInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterElasticIpOutput{} + req.Data = output + return +} + +// Registers an Elastic IP address with a specified stack. An address can be +// registered with only one stack at a time. If the address is already registered, +// you must first deregister it by calling DeregisterElasticIp. For more information, +// see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) RegisterElasticIp(input *RegisterElasticIpInput) (*RegisterElasticIpOutput, error) { + req, out := c.RegisterElasticIpRequest(input) + err := req.Send() + return out, err +} + +const opRegisterInstance = "RegisterInstance" + +// RegisterInstanceRequest generates a request for the RegisterInstance operation. +func (c *OpsWorks) RegisterInstanceRequest(input *RegisterInstanceInput) (req *request.Request, output *RegisterInstanceOutput) { + op := &request.Operation{ + Name: opRegisterInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterInstanceOutput{} + req.Data = output + return +} + +// Registers instances with a specified stack that were created outside of AWS +// OpsWorks. +// +// We do not recommend using this action to register instances. The complete +// registration operation has two primary steps, installing the AWS OpsWorks +// agent on the instance and registering the instance with the stack. RegisterInstance +// handles only the second step. You should instead use the AWS CLI register +// command, which performs the entire registration operation. For more information, +// see Registering an Instance with an AWS OpsWorks Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/registered-instances-register.html). +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) RegisterInstance(input *RegisterInstanceInput) (*RegisterInstanceOutput, error) { + req, out := c.RegisterInstanceRequest(input) + err := req.Send() + return out, err +} + +const opRegisterRdsDbInstance = "RegisterRdsDbInstance" + +// RegisterRdsDbInstanceRequest generates a request for the RegisterRdsDbInstance operation. +func (c *OpsWorks) RegisterRdsDbInstanceRequest(input *RegisterRdsDbInstanceInput) (req *request.Request, output *RegisterRdsDbInstanceOutput) { + op := &request.Operation{ + Name: opRegisterRdsDbInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterRdsDbInstanceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RegisterRdsDbInstanceOutput{} + req.Data = output + return +} + +// Registers an Amazon RDS instance with a stack. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) RegisterRdsDbInstance(input *RegisterRdsDbInstanceInput) (*RegisterRdsDbInstanceOutput, error) { + req, out := c.RegisterRdsDbInstanceRequest(input) + err := req.Send() + return out, err +} + +const opRegisterVolume = "RegisterVolume" + +// RegisterVolumeRequest generates a request for the RegisterVolume operation. +func (c *OpsWorks) RegisterVolumeRequest(input *RegisterVolumeInput) (req *request.Request, output *RegisterVolumeOutput) { + op := &request.Operation{ + Name: opRegisterVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterVolumeOutput{} + req.Data = output + return +} + +// Registers an Amazon EBS volume with a specified stack. A volume can be registered +// with only one stack at a time. If the volume is already registered, you must +// first deregister it by calling DeregisterVolume. For more information, see +// Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) RegisterVolume(input *RegisterVolumeInput) (*RegisterVolumeOutput, error) { + req, out := c.RegisterVolumeRequest(input) + err := req.Send() + return out, err +} + +const opSetLoadBasedAutoScaling = "SetLoadBasedAutoScaling" + +// SetLoadBasedAutoScalingRequest generates a request for the SetLoadBasedAutoScaling operation. +func (c *OpsWorks) SetLoadBasedAutoScalingRequest(input *SetLoadBasedAutoScalingInput) (req *request.Request, output *SetLoadBasedAutoScalingOutput) { + op := &request.Operation{ + Name: opSetLoadBasedAutoScaling, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetLoadBasedAutoScalingInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetLoadBasedAutoScalingOutput{} + req.Data = output + return +} + +// Specify the load-based auto scaling configuration for a specified layer. +// For more information, see Managing Load with Time-based and Load-based Instances +// (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-autoscaling.html). +// +// To use load-based auto scaling, you must create a set of load-based auto +// scaling instances. Load-based auto scaling operates only on the instances +// from that set, so you must ensure that you have created enough instances +// to handle the maximum anticipated load. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) SetLoadBasedAutoScaling(input *SetLoadBasedAutoScalingInput) (*SetLoadBasedAutoScalingOutput, error) { + req, out := c.SetLoadBasedAutoScalingRequest(input) + err := req.Send() + return out, err +} + +const opSetPermission = "SetPermission" + +// SetPermissionRequest generates a request for the SetPermission operation. +func (c *OpsWorks) SetPermissionRequest(input *SetPermissionInput) (req *request.Request, output *SetPermissionOutput) { + op := &request.Operation{ + Name: opSetPermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetPermissionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetPermissionOutput{} + req.Data = output + return +} + +// Specifies a user's permissions. For more information, see Security and Permissions +// (http://docs.aws.amazon.com/opsworks/latest/userguide/workingsecurity.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) SetPermission(input *SetPermissionInput) (*SetPermissionOutput, error) { + req, out := c.SetPermissionRequest(input) + err := req.Send() + return out, err +} + +const opSetTimeBasedAutoScaling = "SetTimeBasedAutoScaling" + +// SetTimeBasedAutoScalingRequest generates a request for the SetTimeBasedAutoScaling operation. +func (c *OpsWorks) SetTimeBasedAutoScalingRequest(input *SetTimeBasedAutoScalingInput) (req *request.Request, output *SetTimeBasedAutoScalingOutput) { + op := &request.Operation{ + Name: opSetTimeBasedAutoScaling, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetTimeBasedAutoScalingInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetTimeBasedAutoScalingOutput{} + req.Data = output + return +} + +// Specify the time-based auto scaling configuration for a specified instance. +// For more information, see Managing Load with Time-based and Load-based Instances +// (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-autoscaling.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) SetTimeBasedAutoScaling(input *SetTimeBasedAutoScalingInput) (*SetTimeBasedAutoScalingOutput, error) { + req, out := c.SetTimeBasedAutoScalingRequest(input) + err := req.Send() + return out, err +} + +const opStartInstance = "StartInstance" + +// StartInstanceRequest generates a request for the StartInstance operation. +func (c *OpsWorks) StartInstanceRequest(input *StartInstanceInput) (req *request.Request, output *StartInstanceOutput) { + op := &request.Operation{ + Name: opStartInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartInstanceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &StartInstanceOutput{} + req.Data = output + return +} + +// Starts a specified instance. For more information, see Starting, Stopping, +// and Rebooting Instances (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-starting.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) StartInstance(input *StartInstanceInput) (*StartInstanceOutput, error) { + req, out := c.StartInstanceRequest(input) + err := req.Send() + return out, err +} + +const opStartStack = "StartStack" + +// StartStackRequest generates a request for the StartStack operation. +func (c *OpsWorks) StartStackRequest(input *StartStackInput) (req *request.Request, output *StartStackOutput) { + op := &request.Operation{ + Name: opStartStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartStackInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &StartStackOutput{} + req.Data = output + return +} + +// Starts a stack's instances. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) StartStack(input *StartStackInput) (*StartStackOutput, error) { + req, out := c.StartStackRequest(input) + err := req.Send() + return out, err +} + +const opStopInstance = "StopInstance" + +// StopInstanceRequest generates a request for the StopInstance operation. +func (c *OpsWorks) StopInstanceRequest(input *StopInstanceInput) (req *request.Request, output *StopInstanceOutput) { + op := &request.Operation{ + Name: opStopInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopInstanceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &StopInstanceOutput{} + req.Data = output + return +} + +// Stops a specified instance. When you stop a standard instance, the data disappears +// and must be reinstalled when you restart the instance. You can stop an Amazon +// EBS-backed instance without losing data. For more information, see Starting, +// Stopping, and Rebooting Instances (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-starting.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) StopInstance(input *StopInstanceInput) (*StopInstanceOutput, error) { + req, out := c.StopInstanceRequest(input) + err := req.Send() + return out, err +} + +const opStopStack = "StopStack" + +// StopStackRequest generates a request for the StopStack operation. +func (c *OpsWorks) StopStackRequest(input *StopStackInput) (req *request.Request, output *StopStackOutput) { + op := &request.Operation{ + Name: opStopStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopStackInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &StopStackOutput{} + req.Data = output + return +} + +// Stops a specified stack. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) StopStack(input *StopStackInput) (*StopStackOutput, error) { + req, out := c.StopStackRequest(input) + err := req.Send() + return out, err +} + +const opUnassignInstance = "UnassignInstance" + +// UnassignInstanceRequest generates a request for the UnassignInstance operation. +func (c *OpsWorks) UnassignInstanceRequest(input *UnassignInstanceInput) (req *request.Request, output *UnassignInstanceOutput) { + op := &request.Operation{ + Name: opUnassignInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnassignInstanceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UnassignInstanceOutput{} + req.Data = output + return +} + +// Unassigns a registered instance from all of it's layers. The instance remains +// in the stack as an unassigned instance and can be assigned to another layer, +// as needed. You cannot use this action with instances that were created with +// AWS OpsWorks. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UnassignInstance(input *UnassignInstanceInput) (*UnassignInstanceOutput, error) { + req, out := c.UnassignInstanceRequest(input) + err := req.Send() + return out, err +} + +const opUnassignVolume = "UnassignVolume" + +// UnassignVolumeRequest generates a request for the UnassignVolume operation. +func (c *OpsWorks) UnassignVolumeRequest(input *UnassignVolumeInput) (req *request.Request, output *UnassignVolumeOutput) { + op := &request.Operation{ + Name: opUnassignVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnassignVolumeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UnassignVolumeOutput{} + req.Data = output + return +} + +// Unassigns an assigned Amazon EBS volume. The volume remains registered with +// the stack. For more information, see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UnassignVolume(input *UnassignVolumeInput) (*UnassignVolumeOutput, error) { + req, out := c.UnassignVolumeRequest(input) + err := req.Send() + return out, err +} + +const opUpdateApp = "UpdateApp" + +// UpdateAppRequest generates a request for the UpdateApp operation. +func (c *OpsWorks) UpdateAppRequest(input *UpdateAppInput) (req *request.Request, output *UpdateAppOutput) { + op := &request.Operation{ + Name: opUpdateApp, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAppInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateAppOutput{} + req.Data = output + return +} + +// Updates a specified app. +// +// Required Permissions: To use this action, an IAM user must have a Deploy +// or Manage permissions level for the stack, or an attached policy that explicitly +// grants permissions. For more information on user permissions, see Managing +// User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UpdateApp(input *UpdateAppInput) (*UpdateAppOutput, error) { + req, out := c.UpdateAppRequest(input) + err := req.Send() + return out, err +} + +const opUpdateElasticIp = "UpdateElasticIp" + +// UpdateElasticIpRequest generates a request for the UpdateElasticIp operation. +func (c *OpsWorks) UpdateElasticIpRequest(input *UpdateElasticIpInput) (req *request.Request, output *UpdateElasticIpOutput) { + op := &request.Operation{ + Name: opUpdateElasticIp, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateElasticIpInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateElasticIpOutput{} + req.Data = output + return +} + +// Updates a registered Elastic IP address's name. For more information, see +// Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UpdateElasticIp(input *UpdateElasticIpInput) (*UpdateElasticIpOutput, error) { + req, out := c.UpdateElasticIpRequest(input) + err := req.Send() + return out, err +} + +const opUpdateInstance = "UpdateInstance" + +// UpdateInstanceRequest generates a request for the UpdateInstance operation. +func (c *OpsWorks) UpdateInstanceRequest(input *UpdateInstanceInput) (req *request.Request, output *UpdateInstanceOutput) { + op := &request.Operation{ + Name: opUpdateInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateInstanceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateInstanceOutput{} + req.Data = output + return +} + +// Updates a specified instance. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UpdateInstance(input *UpdateInstanceInput) (*UpdateInstanceOutput, error) { + req, out := c.UpdateInstanceRequest(input) + err := req.Send() + return out, err +} + +const opUpdateLayer = "UpdateLayer" + +// UpdateLayerRequest generates a request for the UpdateLayer operation. +func (c *OpsWorks) UpdateLayerRequest(input *UpdateLayerInput) (req *request.Request, output *UpdateLayerOutput) { + op := &request.Operation{ + Name: opUpdateLayer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateLayerInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateLayerOutput{} + req.Data = output + return +} + +// Updates a specified layer. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UpdateLayer(input *UpdateLayerInput) (*UpdateLayerOutput, error) { + req, out := c.UpdateLayerRequest(input) + err := req.Send() + return out, err +} + +const opUpdateMyUserProfile = "UpdateMyUserProfile" + +// UpdateMyUserProfileRequest generates a request for the UpdateMyUserProfile operation. +func (c *OpsWorks) UpdateMyUserProfileRequest(input *UpdateMyUserProfileInput) (req *request.Request, output *UpdateMyUserProfileOutput) { + op := &request.Operation{ + Name: opUpdateMyUserProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateMyUserProfileInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateMyUserProfileOutput{} + req.Data = output + return +} + +// Updates a user's SSH public key. +// +// Required Permissions: To use this action, an IAM user must have self-management +// enabled or an attached policy that explicitly grants permissions. For more +// information on user permissions, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UpdateMyUserProfile(input *UpdateMyUserProfileInput) (*UpdateMyUserProfileOutput, error) { + req, out := c.UpdateMyUserProfileRequest(input) + err := req.Send() + return out, err +} + +const opUpdateRdsDbInstance = "UpdateRdsDbInstance" + +// UpdateRdsDbInstanceRequest generates a request for the UpdateRdsDbInstance operation. +func (c *OpsWorks) UpdateRdsDbInstanceRequest(input *UpdateRdsDbInstanceInput) (req *request.Request, output *UpdateRdsDbInstanceOutput) { + op := &request.Operation{ + Name: opUpdateRdsDbInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateRdsDbInstanceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateRdsDbInstanceOutput{} + req.Data = output + return +} + +// Updates an Amazon RDS instance. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UpdateRdsDbInstance(input *UpdateRdsDbInstanceInput) (*UpdateRdsDbInstanceOutput, error) { + req, out := c.UpdateRdsDbInstanceRequest(input) + err := req.Send() + return out, err +} + +const opUpdateStack = "UpdateStack" + +// UpdateStackRequest generates a request for the UpdateStack operation. +func (c *OpsWorks) UpdateStackRequest(input *UpdateStackInput) (req *request.Request, output *UpdateStackOutput) { + op := &request.Operation{ + Name: opUpdateStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateStackInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateStackOutput{} + req.Data = output + return +} + +// Updates a specified stack. +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UpdateStack(input *UpdateStackInput) (*UpdateStackOutput, error) { + req, out := c.UpdateStackRequest(input) + err := req.Send() + return out, err +} + +const opUpdateUserProfile = "UpdateUserProfile" + +// UpdateUserProfileRequest generates a request for the UpdateUserProfile operation. +func (c *OpsWorks) UpdateUserProfileRequest(input *UpdateUserProfileInput) (req *request.Request, output *UpdateUserProfileOutput) { + op := &request.Operation{ + Name: opUpdateUserProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateUserProfileInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateUserProfileOutput{} + req.Data = output + return +} + +// Updates a specified user profile. +// +// Required Permissions: To use this action, an IAM user must have an attached +// policy that explicitly grants permissions. For more information on user permissions, +// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UpdateUserProfile(input *UpdateUserProfileInput) (*UpdateUserProfileOutput, error) { + req, out := c.UpdateUserProfileRequest(input) + err := req.Send() + return out, err +} + +const opUpdateVolume = "UpdateVolume" + +// UpdateVolumeRequest generates a request for the UpdateVolume operation. +func (c *OpsWorks) UpdateVolumeRequest(input *UpdateVolumeInput) (req *request.Request, output *UpdateVolumeOutput) { + op := &request.Operation{ + Name: opUpdateVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateVolumeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UpdateVolumeOutput{} + req.Data = output + return +} + +// Updates an Amazon EBS volume's name or mount point. For more information, +// see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). +// +// Required Permissions: To use this action, an IAM user must have a Manage +// permissions level for the stack, or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User +// Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +func (c *OpsWorks) UpdateVolume(input *UpdateVolumeInput) (*UpdateVolumeOutput, error) { + req, out := c.UpdateVolumeRequest(input) + err := req.Send() + return out, err +} + +// Describes an agent version. +type AgentVersion struct { + _ struct{} `type:"structure"` + + // The configuration manager. + ConfigurationManager *StackConfigurationManager `type:"structure"` + + // The agent version. + Version *string `type:"string"` +} + +// String returns the string representation +func (s AgentVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AgentVersion) GoString() string { + return s.String() +} + +// A description of the app. +type App struct { + _ struct{} `type:"structure"` + + // The app ID. + AppId *string `type:"string"` + + // A Source object that describes the app repository. + AppSource *Source `type:"structure"` + + // The stack attributes. + Attributes map[string]*string `type:"map"` + + // When the app was created. + CreatedAt *string `type:"string"` + + // The app's data sources. + DataSources []*DataSource `type:"list"` + + // A description of the app. + Description *string `type:"string"` + + // The app vhost settings with multiple domains separated by commas. For example: + // 'www.example.com, example.com' + Domains []*string `type:"list"` + + // Whether to enable SSL for the app. + EnableSsl *bool `type:"boolean"` + + // An array of EnvironmentVariable objects that specify environment variables + // to be associated with the app. After you deploy the app, these variables + // are defined on the associated app server instances. For more information, + // see Environment Variables (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html#workingapps-creating-environment). + // + // There is no specific limit on the number of environment variables. However, + // the size of the associated data structure - which includes the variables' + // names, values, and protected flag values - cannot exceed 10 KB (10240 Bytes). + // This limit should accommodate most if not all use cases, but if you do exceed + // it, you will cause an exception (API) with an "Environment: is too large + // (maximum is 10KB)" message. + Environment []*EnvironmentVariable `type:"list"` + + // The app name. + Name *string `type:"string"` + + // The app's short name. + Shortname *string `type:"string"` + + // An SslConfiguration object with the SSL configuration. + SslConfiguration *SslConfiguration `type:"structure"` + + // The app stack ID. + StackId *string `type:"string"` + + // The app type. + Type *string `type:"string" enum:"AppType"` +} + +// String returns the string representation +func (s App) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s App) GoString() string { + return s.String() +} + +type AssignInstanceInput struct { + _ struct{} `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` + + // The layer ID, which must correspond to a custom layer. You cannot assign + // a registered instance to a built-in layer. + LayerIds []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s AssignInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssignInstanceInput) GoString() string { + return s.String() +} + +type AssignInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AssignInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssignInstanceOutput) GoString() string { + return s.String() +} + +type AssignVolumeInput struct { + _ struct{} `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string"` + + // The volume ID. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AssignVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssignVolumeInput) GoString() string { + return s.String() +} + +type AssignVolumeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AssignVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssignVolumeOutput) GoString() string { + return s.String() +} + +type AssociateElasticIpInput struct { + _ struct{} `type:"structure"` + + // The Elastic IP address. + ElasticIp *string `type:"string" required:"true"` + + // The instance ID. + InstanceId *string `type:"string"` +} + +// String returns the string representation +func (s AssociateElasticIpInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateElasticIpInput) GoString() string { + return s.String() +} + +type AssociateElasticIpOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AssociateElasticIpOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateElasticIpOutput) GoString() string { + return s.String() +} + +type AttachElasticLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // The Elastic Load Balancing instance's name. + ElasticLoadBalancerName *string `type:"string" required:"true"` + + // The ID of the layer that the Elastic Load Balancing instance is to be attached + // to. + LayerId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachElasticLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachElasticLoadBalancerInput) GoString() string { + return s.String() +} + +type AttachElasticLoadBalancerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachElasticLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachElasticLoadBalancerOutput) GoString() string { + return s.String() +} + +// Describes a load-based auto scaling upscaling or downscaling threshold configuration, +// which specifies when AWS OpsWorks starts or stops load-based instances. +type AutoScalingThresholds struct { + _ struct{} `type:"structure"` + + // Custom Cloudwatch auto scaling alarms, to be used as thresholds. This parameter + // takes a list of up to five alarm names, which are case sensitive and must + // be in the same region as the stack. + // + // To use custom alarms, you must update your service role to allow cloudwatch:DescribeAlarms. + // You can either have AWS OpsWorks update the role for you when you first use + // this feature or you can edit the role manually. For more information, see + // Allowing AWS OpsWorks to Act on Your Behalf (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-servicerole.html). + Alarms []*string `type:"list"` + + // The CPU utilization threshold, as a percent of the available CPU. A value + // of -1 disables the threshold. + CpuThreshold *float64 `type:"double"` + + // The amount of time (in minutes) after a scaling event occurs that AWS OpsWorks + // should ignore metrics and suppress additional scaling events. For example, + // AWS OpsWorks adds new instances following an upscaling event but the instances + // won't start reducing the load until they have been booted and configured. + // There is no point in raising additional scaling events during that operation, + // which typically takes several minutes. IgnoreMetricsTime allows you to direct + // AWS OpsWorks to suppress scaling events long enough to get the new instances + // online. + IgnoreMetricsTime *int64 `min:"1" type:"integer"` + + // The number of instances to add or remove when the load exceeds a threshold. + InstanceCount *int64 `type:"integer"` + + // The load threshold. A value of -1 disables the threshold. For more information + // about how load is computed, see Load (computing) (http://en.wikipedia.org/wiki/Load_%28computing%29). + LoadThreshold *float64 `type:"double"` + + // The memory utilization threshold, as a percent of the available memory. A + // value of -1 disables the threshold. + MemoryThreshold *float64 `type:"double"` + + // The amount of time, in minutes, that the load must exceed a threshold before + // more instances are added or removed. + ThresholdsWaitTime *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s AutoScalingThresholds) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutoScalingThresholds) GoString() string { + return s.String() +} + +// Describes a block device mapping. This data type maps directly to the Amazon +// EC2 BlockDeviceMapping (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html) +// data type. +type BlockDeviceMapping struct { + _ struct{} `type:"structure"` + + // The device name that is exposed to the instance, such as /dev/sdh. For the + // root device, you can use the explicit device name or you can set this parameter + // to ROOT_DEVICE and AWS OpsWorks will provide the correct device name. + DeviceName *string `type:"string"` + + // An EBSBlockDevice that defines how to configure an Amazon EBS volume when + // the instance is launched. + Ebs *EbsBlockDevice `type:"structure"` + + // Suppresses the specified device included in the AMI's block device mapping. + NoDevice *string `type:"string"` + + // The virtual device name. For more information, see BlockDeviceMapping (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html). + VirtualName *string `type:"string"` +} + +// String returns the string representation +func (s BlockDeviceMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BlockDeviceMapping) GoString() string { + return s.String() +} + +// Describes the Chef configuration. +type ChefConfiguration struct { + _ struct{} `type:"structure"` + + // The Berkshelf version. + BerkshelfVersion *string `type:"string"` + + // Whether to enable Berkshelf. + ManageBerkshelf *bool `type:"boolean"` +} + +// String returns the string representation +func (s ChefConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChefConfiguration) GoString() string { + return s.String() +} + +type CloneStackInput struct { + _ struct{} `type:"structure"` + + // The default AWS OpsWorks agent version. You have the following options: + // + // Auto-update - Set this parameter to LATEST. AWS OpsWorks automatically + // installs new agent versions on the stack's instances as soon as they are + // available. Fixed version - Set this parameter to your preferred agent version. + // To update the agent version, you must edit the stack configuration and specify + // a new version. AWS OpsWorks then automatically installs that version on the + // stack's instances. The default setting is LATEST. To specify an agent version, + // you must use the complete version number, not the abbreviated number shown + // on the console. For a list of available agent version numbers, call DescribeAgentVersions. + // + // You can also specify an agent version when you create or update an instance, + // which overrides the stack's default setting. + AgentVersion *string `type:"string"` + + // A list of stack attributes and values as key/value pairs to be added to the + // cloned stack. + Attributes map[string]*string `type:"map"` + + // A ChefConfiguration object that specifies whether to enable Berkshelf and + // the Berkshelf version on Chef 11.10 stacks. For more information, see Create + // a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + ChefConfiguration *ChefConfiguration `type:"structure"` + + // A list of source stack app IDs to be included in the cloned stack. + CloneAppIds []*string `type:"list"` + + // Whether to clone the source stack's permissions. + ClonePermissions *bool `type:"boolean"` + + // The configuration manager. When you clone a stack we recommend that you use + // the configuration manager to specify the Chef version: 12, 11.10, or 11.4 + // for Linux stacks, or 12.2 for Windows stacks. The default value for Linux + // stacks is currently 11.4. + ConfigurationManager *StackConfigurationManager `type:"structure"` + + // Contains the information required to retrieve an app or cookbook from a repository. + // For more information, see Creating Apps (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html) + // or Custom Recipes and Cookbooks (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook.html). + CustomCookbooksSource *Source `type:"structure"` + + // A string that contains user-defined, custom JSON. It is used to override + // the corresponding default stack configuration JSON values. The string should + // be in the following format and must escape characters such as '"': + // + // "{\"key1\": \"value1\", \"key2\": \"value2\",...}" + // + // For more information on custom JSON, see Use Custom JSON to Modify the Stack + // Configuration Attributes (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html) + CustomJson *string `type:"string"` + + // The cloned stack's default Availability Zone, which must be in the specified + // region. For more information, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + // If you also specify a value for DefaultSubnetId, the subnet must be in the + // same zone. For more information, see the VpcId parameter description. + DefaultAvailabilityZone *string `type:"string"` + + // The Amazon Resource Name (ARN) of an IAM profile that is the default profile + // for all of the stack's EC2 instances. For more information about IAM ARNs, + // see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + DefaultInstanceProfileArn *string `type:"string"` + + // The stack's operating system, which must be set to one of the following. + // + // A supported Linux operating system: An Amazon Linux version, such as Amazon + // Linux 2015.03, Red Hat Enterprise Linux 7, Ubuntu 12.04 LTS, or Ubuntu 14.04 + // LTS. Microsoft Windows Server 2012 R2 Base. A custom AMI: Custom. You specify + // the custom AMI you want to use when you create instances. For more information + // on how to use custom AMIs with OpsWorks, see Using Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). + // The default option is the parent stack's operating system. For more information + // on the supported operating systems, see AWS OpsWorks Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + // + // You can specify a different Linux operating system for the cloned stack, + // but you cannot change from Linux to Windows or Windows to Linux. + DefaultOs *string `type:"string"` + + // The default root device type. This value is used by default for all instances + // in the cloned stack, but you can override it when you create an instance. + // For more information, see Storage for the Root Device (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). + DefaultRootDeviceType *string `type:"string" enum:"RootDeviceType"` + + // A default Amazon EC2 key pair name. The default value is none. If you specify + // a key pair name, AWS OpsWorks installs the public key on the instance and + // you can use the private key with an SSH client to log in to the instance. + // For more information, see Using SSH to Communicate with an Instance (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-ssh.html) + // and Managing SSH Access (http://docs.aws.amazon.com/opsworks/latest/userguide/security-ssh-access.html). + // You can override this setting by specifying a different key pair, or no key + // pair, when you create an instance (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-add.html). + DefaultSshKeyName *string `type:"string"` + + // The stack's default VPC subnet ID. This parameter is required if you specify + // a value for the VpcId parameter. All instances are launched into this subnet + // unless you specify otherwise when you create the instance. If you also specify + // a value for DefaultAvailabilityZone, the subnet must be in that zone. For + // information on default values and when this parameter is required, see the + // VpcId parameter description. + DefaultSubnetId *string `type:"string"` + + // The stack's host name theme, with spaces are replaced by underscores. The + // theme is used to generate host names for the stack's instances. By default, + // HostnameTheme is set to Layer_Dependent, which creates host names by appending + // integers to the layer's short name. The other themes are: + // + // Baked_Goods Clouds Europe_Cities Fruits Greek_Deities Legendary_creatures_from_Japan + // Planets_and_Moons Roman_Deities Scottish_Islands US_Cities Wild_Cats + // To obtain a generated host name, call GetHostNameSuggestion, which returns + // a host name based on the current theme. + HostnameTheme *string `type:"string"` + + // The cloned stack name. + Name *string `type:"string"` + + // The cloned stack AWS region, such as "us-east-1". For more information about + // AWS regions, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + Region *string `type:"string"` + + // The stack AWS Identity and Access Management (IAM) role, which allows AWS + // OpsWorks to work with AWS resources on your behalf. You must set this parameter + // to the Amazon Resource Name (ARN) for an existing IAM role. If you create + // a stack by using the AWS OpsWorks console, it creates the role for you. You + // can obtain an existing stack's IAM ARN programmatically by calling DescribePermissions. + // For more information about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + // + // You must set this parameter to a valid service role ARN or the action will + // fail; there is no default value. You can specify the source stack's service + // role ARN, if you prefer, but you must do so explicitly. + ServiceRoleArn *string `type:"string" required:"true"` + + // The source stack ID. + SourceStackId *string `type:"string" required:"true"` + + // Whether to use custom cookbooks. + UseCustomCookbooks *bool `type:"boolean"` + + // Whether to associate the AWS OpsWorks built-in security groups with the stack's + // layers. + // + // AWS OpsWorks provides a standard set of built-in security groups, one for + // each layer, which are associated with layers by default. With UseOpsworksSecurityGroups + // you can instead provide your own custom security groups. UseOpsworksSecurityGroups + // has the following settings: + // + // True - AWS OpsWorks automatically associates the appropriate built-in security + // group with each layer (default setting). You can associate additional security + // groups with a layer after you create it but you cannot delete the built-in + // security group. False - AWS OpsWorks does not associate built-in security + // groups with layers. You must create appropriate Amazon Elastic Compute Cloud + // (Amazon EC2) security groups and associate a security group with each layer + // that you create. However, you can still manually associate a built-in security + // group with a layer on creation; custom security groups are required only + // for those layers that need custom settings. For more information, see Create + // a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + UseOpsworksSecurityGroups *bool `type:"boolean"` + + // The ID of the VPC that the cloned stack is to be launched into. It must be + // in the specified region. All instances are launched into this VPC, and you + // cannot change the ID later. + // + // If your account supports EC2 Classic, the default value is no VPC. If your + // account does not support EC2 Classic, the default value is the default VPC + // for the specified region. If the VPC ID corresponds to a default VPC and + // you have specified either the DefaultAvailabilityZone or the DefaultSubnetId + // parameter only, AWS OpsWorks infers the value of the other parameter. If + // you specify neither parameter, AWS OpsWorks sets these parameters to the + // first valid Availability Zone for the specified region and the corresponding + // default VPC subnet ID, respectively. + // + // If you specify a nondefault VPC ID, note the following: + // + // It must belong to a VPC in your account that is in the specified region. + // You must specify a value for DefaultSubnetId. For more information on how + // to use AWS OpsWorks with a VPC, see Running a Stack in a VPC (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-vpc.html). + // For more information on default VPC and EC2 Classic, see Supported Platforms + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html). + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s CloneStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloneStackInput) GoString() string { + return s.String() +} + +// Contains the response to a CloneStack request. +type CloneStackOutput struct { + _ struct{} `type:"structure"` + + // The cloned stack ID. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s CloneStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloneStackOutput) GoString() string { + return s.String() +} + +// Describes a command. +type Command struct { + _ struct{} `type:"structure"` + + // Date and time when the command was acknowledged. + AcknowledgedAt *string `type:"string"` + + // The command ID. + CommandId *string `type:"string"` + + // Date when the command completed. + CompletedAt *string `type:"string"` + + // Date and time when the command was run. + CreatedAt *string `type:"string"` + + // The command deployment ID. + DeploymentId *string `type:"string"` + + // The command exit code. + ExitCode *int64 `type:"integer"` + + // The ID of the instance where the command was executed. + InstanceId *string `type:"string"` + + // The URL of the command log. + LogUrl *string `type:"string"` + + // The command status: + // + // failed successful skipped pending + Status *string `type:"string"` + + // The command type: + // + // deploy rollback start stop restart undeploy update_dependencies + // install_dependencies update_custom_cookbooks execute_recipes + Type *string `type:"string"` +} + +// String returns the string representation +func (s Command) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Command) GoString() string { + return s.String() +} + +type CreateAppInput struct { + _ struct{} `type:"structure"` + + // A Source object that specifies the app repository. + AppSource *Source `type:"structure"` + + // One or more user-defined key/value pairs to be added to the stack attributes. + Attributes map[string]*string `type:"map"` + + // The app's data source. + DataSources []*DataSource `type:"list"` + + // A description of the app. + Description *string `type:"string"` + + // The app virtual host settings, with multiple domains separated by commas. + // For example: 'www.example.com, example.com' + Domains []*string `type:"list"` + + // Whether to enable SSL for the app. + EnableSsl *bool `type:"boolean"` + + // An array of EnvironmentVariable objects that specify environment variables + // to be associated with the app. After you deploy the app, these variables + // are defined on the associated app server instance. For more information, + // see Environment Variables (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html#workingapps-creating-environment). + // + // There is no specific limit on the number of environment variables. However, + // the size of the associated data structure - which includes the variables' + // names, values, and protected flag values - cannot exceed 10 KB (10240 Bytes). + // This limit should accommodate most if not all use cases. Exceeding it will + // cause an exception with the message, "Environment: is too large (maximum + // is 10KB)." + // + // This parameter is supported only by Chef 11.10 stacks. If you have specified + // one or more environment variables, you cannot modify the stack's Chef version. + Environment []*EnvironmentVariable `type:"list"` + + // The app name. + Name *string `type:"string" required:"true"` + + // The app's short name. + Shortname *string `type:"string"` + + // An SslConfiguration object with the SSL configuration. + SslConfiguration *SslConfiguration `type:"structure"` + + // The stack ID. + StackId *string `type:"string" required:"true"` + + // The app type. Each supported type is associated with a particular layer. + // For example, PHP applications are associated with a PHP layer. AWS OpsWorks + // deploys an application to those instances that are members of the corresponding + // layer. If your app isn't one of the standard types, or you prefer to implement + // your own Deploy recipes, specify other. + Type *string `type:"string" required:"true" enum:"AppType"` +} + +// String returns the string representation +func (s CreateAppInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAppInput) GoString() string { + return s.String() +} + +// Contains the response to a CreateApp request. +type CreateAppOutput struct { + _ struct{} `type:"structure"` + + // The app ID. + AppId *string `type:"string"` +} + +// String returns the string representation +func (s CreateAppOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAppOutput) GoString() string { + return s.String() +} + +type CreateDeploymentInput struct { + _ struct{} `type:"structure"` + + // The app ID. This parameter is required for app deployments, but not for other + // deployment commands. + AppId *string `type:"string"` + + // A DeploymentCommand object that specifies the deployment command and any + // associated arguments. + Command *DeploymentCommand `type:"structure" required:"true"` + + // A user-defined comment. + Comment *string `type:"string"` + + // A string that contains user-defined, custom JSON. It is used to override + // the corresponding default stack configuration JSON values. The string should + // be in the following format and must escape characters such as '"': + // + // "{\"key1\": \"value1\", \"key2\": \"value2\",...}" + // + // For more information on custom JSON, see Use Custom JSON to Modify the Stack + // Configuration Attributes (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html). + CustomJson *string `type:"string"` + + // The instance IDs for the deployment targets. + InstanceIds []*string `type:"list"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateDeploymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeploymentInput) GoString() string { + return s.String() +} + +// Contains the response to a CreateDeployment request. +type CreateDeploymentOutput struct { + _ struct{} `type:"structure"` + + // The deployment ID, which can be used with other requests to identify the + // deployment. + DeploymentId *string `type:"string"` +} + +// String returns the string representation +func (s CreateDeploymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDeploymentOutput) GoString() string { + return s.String() +} + +type CreateInstanceInput struct { + _ struct{} `type:"structure"` + + // The default AWS OpsWorks agent version. You have the following options: + // + // INHERIT - Use the stack's default agent version setting. version_number + // - Use the specified agent version. This value overrides the stack's default + // setting. To update the agent version, edit the instance configuration and + // specify a new version. AWS OpsWorks then automatically installs that version + // on the instance. The default setting is INHERIT. To specify an agent version, + // you must use the complete version number, not the abbreviated number shown + // on the console. For a list of available agent version numbers, call DescribeAgentVersions. + AgentVersion *string `type:"string"` + + // A custom AMI ID to be used to create the instance. The AMI should be based + // on one of the supported operating systems. For more information, see Using + // Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). + // + // If you specify a custom AMI, you must set Os to Custom. + AmiId *string `type:"string"` + + // The instance architecture. The default option is x86_64. Instance types do + // not necessarily support both architectures. For a list of the architectures + // that are supported by the different instance types, see Instance Families + // and Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + Architecture *string `type:"string" enum:"Architecture"` + + // For load-based or time-based instances, the type. Windows stacks can use + // only time-based instances. + AutoScalingType *string `type:"string" enum:"AutoScalingType"` + + // The instance Availability Zone. For more information, see Regions and Endpoints + // (http://docs.aws.amazon.com/general/latest/gr/rande.html). + AvailabilityZone *string `type:"string"` + + // An array of BlockDeviceMapping objects that specify the instance's block + // devices. For more information, see Block Device Mapping (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html). + // Note that block device mappings are not supported for custom AMIs. + BlockDeviceMappings []*BlockDeviceMapping `type:"list"` + + // Whether to create an Amazon EBS-optimized instance. + EbsOptimized *bool `type:"boolean"` + + // The instance host name. + Hostname *string `type:"string"` + + // Whether to install operating system and package updates when the instance + // boots. The default value is true. To control when updates are installed, + // set this value to false. You must then update your instances manually by + // using CreateDeployment to run the update_dependencies stack command or by + // manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances. + // + // We strongly recommend using the default value of true to ensure that your + // instances have the latest security updates. + InstallUpdatesOnBoot *bool `type:"boolean"` + + // The instance type, such as t2.micro. For a list of supported instance types, + // open the stack in the console, choose Instances, and choose + Instance. The + // Size list contains the currently supported types. For more information, see + // Instance Families and Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + // The parameter values that you use to specify the various types are in the + // API Name column of the Available Instance Types table. + InstanceType *string `type:"string" required:"true"` + + // An array that contains the instance's layer IDs. + LayerIds []*string `type:"list" required:"true"` + + // The instance's operating system, which must be set to one of the following. + // + // A supported Linux operating system: An Amazon Linux version, such as Amazon + // Linux 2015.03, Red Hat Enterprise Linux 7, Ubuntu 12.04 LTS, or Ubuntu 14.04 + // LTS. Microsoft Windows Server 2012 R2 Base. A custom AMI: Custom. For more + // information on the supported operating systems, see AWS OpsWorks Operating + // Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + // + // The default option is the current Amazon Linux version. If you set this + // parameter to Custom, you must use the CreateInstance action's AmiId parameter + // to specify the custom AMI that you want to use. Block device mappings are + // not supported if the value is Custom. For more information on the supported + // operating systems, see Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html)For + // more information on how to use custom AMIs with AWS OpsWorks, see Using Custom + // AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). + Os *string `type:"string"` + + // The instance root device type. For more information, see Storage for the + // Root Device (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). + RootDeviceType *string `type:"string" enum:"RootDeviceType"` + + // The instance's Amazon EC2 key-pair name. + SshKeyName *string `type:"string"` + + // The stack ID. + StackId *string `type:"string" required:"true"` + + // The ID of the instance's subnet. If the stack is running in a VPC, you can + // use this parameter to override the stack's default subnet ID value and direct + // AWS OpsWorks to launch the instance in a different subnet. + SubnetId *string `type:"string"` + + // The instance's virtualization type, paravirtual or hvm. + VirtualizationType *string `type:"string"` +} + +// String returns the string representation +func (s CreateInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInstanceInput) GoString() string { + return s.String() +} + +// Contains the response to a CreateInstance request. +type CreateInstanceOutput struct { + _ struct{} `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string"` +} + +// String returns the string representation +func (s CreateInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInstanceOutput) GoString() string { + return s.String() +} + +type CreateLayerInput struct { + _ struct{} `type:"structure"` + + // One or more user-defined key-value pairs to be added to the stack attributes. + // + // To create a cluster layer, set the EcsClusterArn attribute to the cluster's + // ARN. + Attributes map[string]*string `type:"map"` + + // Whether to automatically assign an Elastic IP address (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) + // to the layer's instances. For more information, see How to Edit a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). + AutoAssignElasticIps *bool `type:"boolean"` + + // For stacks that are running in a VPC, whether to automatically assign a public + // IP address to the layer's instances. For more information, see How to Edit + // a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). + AutoAssignPublicIps *bool `type:"boolean"` + + // The ARN of an IAM profile to be used for the layer's EC2 instances. For more + // information about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + CustomInstanceProfileArn *string `type:"string"` + + // A JSON-formatted string containing custom stack configuration and deployment + // attributes to be installed on the layer's instances. For more information, + // see Using Custom JSON (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook-json-override.html). + // This feature is supported as of version 1.7.42 of the AWS CLI. + CustomJson *string `type:"string"` + + // A LayerCustomRecipes object that specifies the layer custom recipes. + CustomRecipes *Recipes `type:"structure"` + + // An array containing the layer custom security group IDs. + CustomSecurityGroupIds []*string `type:"list"` + + // Whether to disable auto healing for the layer. + EnableAutoHealing *bool `type:"boolean"` + + // Whether to install operating system and package updates when the instance + // boots. The default value is true. To control when updates are installed, + // set this value to false. You must then update your instances manually by + // using CreateDeployment to run the update_dependencies stack command or by + // manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances. + // + // To ensure that your instances have the latest security updates, we strongly + // recommend using the default value of true. + InstallUpdatesOnBoot *bool `type:"boolean"` + + // A LifeCycleEventConfiguration object that you can use to configure the Shutdown + // event to specify an execution timeout and enable or disable Elastic Load + // Balancer connection draining. + LifecycleEventConfiguration *LifecycleEventConfiguration `type:"structure"` + + // The layer name, which is used by the console. + Name *string `type:"string" required:"true"` + + // An array of Package objects that describes the layer packages. + Packages []*string `type:"list"` + + // For custom layers only, use this parameter to specify the layer's short name, + // which is used internally by AWS OpsWorks and by Chef recipes. The short name + // is also used as the name for the directory where your app files are installed. + // It can have a maximum of 200 characters, which are limited to the alphanumeric + // characters, '-', '_', and '.'. + // + // The built-in layers' short names are defined by AWS OpsWorks. For more information, + // see the Layer Reference (http://docs.aws.amazon.com/opsworks/latest/userguide/layers.html). + Shortname *string `type:"string" required:"true"` + + // The layer stack ID. + StackId *string `type:"string" required:"true"` + + // The layer type. A stack cannot have more than one built-in layer of the same + // type. It can have any number of custom layers. + Type *string `type:"string" required:"true" enum:"LayerType"` + + // Whether to use Amazon EBS-optimized instances. + UseEbsOptimizedInstances *bool `type:"boolean"` + + // A VolumeConfigurations object that describes the layer's Amazon EBS volumes. + VolumeConfigurations []*VolumeConfiguration `type:"list"` +} + +// String returns the string representation +func (s CreateLayerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLayerInput) GoString() string { + return s.String() +} + +// Contains the response to a CreateLayer request. +type CreateLayerOutput struct { + _ struct{} `type:"structure"` + + // The layer ID. + LayerId *string `type:"string"` +} + +// String returns the string representation +func (s CreateLayerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLayerOutput) GoString() string { + return s.String() +} + +type CreateStackInput struct { + _ struct{} `type:"structure"` + + // The default AWS OpsWorks agent version. You have the following options: + // + // Auto-update - Set this parameter to LATEST. AWS OpsWorks automatically + // installs new agent versions on the stack's instances as soon as they are + // available. Fixed version - Set this parameter to your preferred agent version. + // To update the agent version, you must edit the stack configuration and specify + // a new version. AWS OpsWorks then automatically installs that version on the + // stack's instances. The default setting is the most recent release of the + // agent. To specify an agent version, you must use the complete version number, + // not the abbreviated number shown on the console. For a list of available + // agent version numbers, call DescribeAgentVersions. + // + // You can also specify an agent version when you create or update an instance, + // which overrides the stack's default setting. + AgentVersion *string `type:"string"` + + // One or more user-defined key-value pairs to be added to the stack attributes. + Attributes map[string]*string `type:"map"` + + // A ChefConfiguration object that specifies whether to enable Berkshelf and + // the Berkshelf version on Chef 11.10 stacks. For more information, see Create + // a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + ChefConfiguration *ChefConfiguration `type:"structure"` + + // The configuration manager. When you create a stack we recommend that you + // use the configuration manager to specify the Chef version: 12, 11.10, or + // 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for + // Linux stacks is currently 11.4. + ConfigurationManager *StackConfigurationManager `type:"structure"` + + // Contains the information required to retrieve an app or cookbook from a repository. + // For more information, see Creating Apps (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html) + // or Custom Recipes and Cookbooks (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook.html). + CustomCookbooksSource *Source `type:"structure"` + + // A string that contains user-defined, custom JSON. It can be used to override + // the corresponding default stack configuration attribute values or to pass + // data to recipes. The string should be in the following escape characters + // such as '"': + // + // "{\"key1\": \"value1\", \"key2\": \"value2\",...}" + // + // For more information on custom JSON, see Use Custom JSON to Modify the Stack + // Configuration Attributes (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html). + CustomJson *string `type:"string"` + + // The stack's default Availability Zone, which must be in the specified region. + // For more information, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + // If you also specify a value for DefaultSubnetId, the subnet must be in the + // same zone. For more information, see the VpcId parameter description. + DefaultAvailabilityZone *string `type:"string"` + + // The Amazon Resource Name (ARN) of an IAM profile that is the default profile + // for all of the stack's EC2 instances. For more information about IAM ARNs, + // see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + DefaultInstanceProfileArn *string `type:"string" required:"true"` + + // The stack's default operating system, which is installed on every instance + // unless you specify a different operating system when you create the instance. + // You can specify one of the following. + // + // A supported Linux operating system: An Amazon Linux version, such as Amazon + // Linux 2015.03, Red Hat Enterprise Linux 7, Ubuntu 12.04 LTS, or Ubuntu 14.04 + // LTS. Microsoft Windows Server 2012 R2 Base. A custom AMI: Custom. You specify + // the custom AMI you want to use when you create instances. For more information, + // see Using Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). + // The default option is the current Amazon Linux version. For more information + // on the supported operating systems, see AWS OpsWorks Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + DefaultOs *string `type:"string"` + + // The default root device type. This value is the default for all instances + // in the stack, but you can override it when you create an instance. The default + // option is instance-store. For more information, see Storage for the Root + // Device (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). + DefaultRootDeviceType *string `type:"string" enum:"RootDeviceType"` + + // A default Amazon EC2 key pair name. The default value is none. If you specify + // a key pair name, AWS OpsWorks installs the public key on the instance and + // you can use the private key with an SSH client to log in to the instance. + // For more information, see Using SSH to Communicate with an Instance (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-ssh.html) + // and Managing SSH Access (http://docs.aws.amazon.com/opsworks/latest/userguide/security-ssh-access.html). + // You can override this setting by specifying a different key pair, or no key + // pair, when you create an instance (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-add.html). + DefaultSshKeyName *string `type:"string"` + + // The stack's default VPC subnet ID. This parameter is required if you specify + // a value for the VpcId parameter. All instances are launched into this subnet + // unless you specify otherwise when you create the instance. If you also specify + // a value for DefaultAvailabilityZone, the subnet must be in that zone. For + // information on default values and when this parameter is required, see the + // VpcId parameter description. + DefaultSubnetId *string `type:"string"` + + // The stack's host name theme, with spaces replaced by underscores. The theme + // is used to generate host names for the stack's instances. By default, HostnameTheme + // is set to Layer_Dependent, which creates host names by appending integers + // to the layer's short name. The other themes are: + // + // Baked_Goods Clouds Europe_Cities Fruits Greek_Deities Legendary_creatures_from_Japan + // Planets_and_Moons Roman_Deities Scottish_Islands US_Cities Wild_Cats + // To obtain a generated host name, call GetHostNameSuggestion, which returns + // a host name based on the current theme. + HostnameTheme *string `type:"string"` + + // The stack name. + Name *string `type:"string" required:"true"` + + // The stack's AWS region, such as "us-east-1". For more information about Amazon + // regions, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + Region *string `type:"string" required:"true"` + + // The stack's AWS Identity and Access Management (IAM) role, which allows AWS + // OpsWorks to work with AWS resources on your behalf. You must set this parameter + // to the Amazon Resource Name (ARN) for an existing IAM role. For more information + // about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + ServiceRoleArn *string `type:"string" required:"true"` + + // Whether the stack uses custom cookbooks. + UseCustomCookbooks *bool `type:"boolean"` + + // Whether to associate the AWS OpsWorks built-in security groups with the stack's + // layers. + // + // AWS OpsWorks provides a standard set of built-in security groups, one for + // each layer, which are associated with layers by default. With UseOpsworksSecurityGroups + // you can instead provide your own custom security groups. UseOpsworksSecurityGroups + // has the following settings: + // + // True - AWS OpsWorks automatically associates the appropriate built-in security + // group with each layer (default setting). You can associate additional security + // groups with a layer after you create it, but you cannot delete the built-in + // security group. False - AWS OpsWorks does not associate built-in security + // groups with layers. You must create appropriate EC2 security groups and associate + // a security group with each layer that you create. However, you can still + // manually associate a built-in security group with a layer on creation; custom + // security groups are required only for those layers that need custom settings. + // For more information, see Create a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + UseOpsworksSecurityGroups *bool `type:"boolean"` + + // The ID of the VPC that the stack is to be launched into. The VPC must be + // in the stack's region. All instances are launched into this VPC. You cannot + // change the ID later. + // + // If your account supports EC2-Classic, the default value is no VPC. If your + // account does not support EC2-Classic, the default value is the default VPC + // for the specified region. If the VPC ID corresponds to a default VPC and + // you have specified either the DefaultAvailabilityZone or the DefaultSubnetId + // parameter only, AWS OpsWorks infers the value of the other parameter. If + // you specify neither parameter, AWS OpsWorks sets these parameters to the + // first valid Availability Zone for the specified region and the corresponding + // default VPC subnet ID, respectively. + // + // If you specify a nondefault VPC ID, note the following: + // + // It must belong to a VPC in your account that is in the specified region. + // You must specify a value for DefaultSubnetId. For more information on how + // to use AWS OpsWorks with a VPC, see Running a Stack in a VPC (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-vpc.html). + // For more information on default VPC and EC2-Classic, see Supported Platforms + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html). + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s CreateStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStackInput) GoString() string { + return s.String() +} + +// Contains the response to a CreateStack request. +type CreateStackOutput struct { + _ struct{} `type:"structure"` + + // The stack ID, which is an opaque string that you use to identify the stack + // when performing actions such as DescribeStacks. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s CreateStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStackOutput) GoString() string { + return s.String() +} + +type CreateUserProfileInput struct { + _ struct{} `type:"structure"` + + // Whether users can specify their own SSH public key through the My Settings + // page. For more information, see Setting an IAM User's Public SSH Key (http://docs.aws.amazon.com/opsworks/latest/userguide/security-settingsshkey.html). + AllowSelfManagement *bool `type:"boolean"` + + // The user's IAM ARN. + IamUserArn *string `type:"string" required:"true"` + + // The user's public SSH key. + SshPublicKey *string `type:"string"` + + // The user's SSH user name. The allowable characters are [a-z], [A-Z], [0-9], + // '-', and '_'. If the specified name includes other punctuation marks, AWS + // OpsWorks removes them. For example, my.name will be changed to myname. If + // you do not specify an SSH user name, AWS OpsWorks generates one from the + // IAM user name. + SshUsername *string `type:"string"` +} + +// String returns the string representation +func (s CreateUserProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUserProfileInput) GoString() string { + return s.String() +} + +// Contains the response to a CreateUserProfile request. +type CreateUserProfileOutput struct { + _ struct{} `type:"structure"` + + // The user's IAM ARN. + IamUserArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateUserProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUserProfileOutput) GoString() string { + return s.String() +} + +// Describes an app's data source. +type DataSource struct { + _ struct{} `type:"structure"` + + // The data source's ARN. + Arn *string `type:"string"` + + // The database name. + DatabaseName *string `type:"string"` + + // The data source's type, AutoSelectOpsworksMysqlInstance, OpsworksMysqlInstance, + // or RdsDbInstance. + Type *string `type:"string"` +} + +// String returns the string representation +func (s DataSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataSource) GoString() string { + return s.String() +} + +type DeleteAppInput struct { + _ struct{} `type:"structure"` + + // The app ID. + AppId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAppInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAppInput) GoString() string { + return s.String() +} + +type DeleteAppOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAppOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAppOutput) GoString() string { + return s.String() +} + +type DeleteInstanceInput struct { + _ struct{} `type:"structure"` + + // Whether to delete the instance Elastic IP address. + DeleteElasticIp *bool `type:"boolean"` + + // Whether to delete the instance's Amazon EBS volumes. + DeleteVolumes *bool `type:"boolean"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInstanceInput) GoString() string { + return s.String() +} + +type DeleteInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInstanceOutput) GoString() string { + return s.String() +} + +type DeleteLayerInput struct { + _ struct{} `type:"structure"` + + // The layer ID. + LayerId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLayerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLayerInput) GoString() string { + return s.String() +} + +type DeleteLayerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLayerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLayerOutput) GoString() string { + return s.String() +} + +type DeleteStackInput struct { + _ struct{} `type:"structure"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStackInput) GoString() string { + return s.String() +} + +type DeleteStackOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteStackOutput) GoString() string { + return s.String() +} + +type DeleteUserProfileInput struct { + _ struct{} `type:"structure"` + + // The user's IAM ARN. + IamUserArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteUserProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserProfileInput) GoString() string { + return s.String() +} + +type DeleteUserProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteUserProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserProfileOutput) GoString() string { + return s.String() +} + +// Describes a deployment of a stack or app. +type Deployment struct { + _ struct{} `type:"structure"` + + // The app ID. + AppId *string `type:"string"` + + // Used to specify a stack or deployment command. + Command *DeploymentCommand `type:"structure"` + + // A user-defined comment. + Comment *string `type:"string"` + + // Date when the deployment completed. + CompletedAt *string `type:"string"` + + // Date when the deployment was created. + CreatedAt *string `type:"string"` + + // A string that contains user-defined custom JSON. It can be used to override + // the corresponding default stack configuration attribute values for stack + // or to pass data to recipes. The string should be in the following format + // and must escape characters such as '"': + // + // "{\"key1\": \"value1\", \"key2\": \"value2\",...}" + // + // For more information on custom JSON, see Use Custom JSON to Modify the Stack + // Configuration Attributes (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html). + CustomJson *string `type:"string"` + + // The deployment ID. + DeploymentId *string `type:"string"` + + // The deployment duration. + Duration *int64 `type:"integer"` + + // The user's IAM ARN. + IamUserArn *string `type:"string"` + + // The IDs of the target instances. + InstanceIds []*string `type:"list"` + + // The stack ID. + StackId *string `type:"string"` + + // The deployment status: + // + // running successful failed + Status *string `type:"string"` +} + +// String returns the string representation +func (s Deployment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Deployment) GoString() string { + return s.String() +} + +// Used to specify a stack or deployment command. +type DeploymentCommand struct { + _ struct{} `type:"structure"` + + // The arguments of those commands that take arguments. It should be set to + // a JSON object with the following format: + // + // {"arg_name1" : ["value1", "value2", ...], "arg_name2" : ["value1", "value2", + // ...], ...} + // + // The update_dependencies command takes two arguments: + // + // upgrade_os_to - Specifies the desired Amazon Linux version for instances + // whose OS you want to upgrade, such as Amazon Linux 2014.09. You must also + // set the allow_reboot argument to true. allow_reboot - Specifies whether to + // allow AWS OpsWorks to reboot the instances if necessary, after installing + // the updates. This argument can be set to either true or false. The default + // value is false. For example, to upgrade an instance to Amazon Linux 2014.09, + // set Args to the following. + // + // { "upgrade_os_to":["Amazon Linux 2014.09"], "allow_reboot":["true"] } + Args map[string][]*string `type:"map"` + + // Specifies the operation. You can specify only one command. + // + // For stacks, the following commands are available: + // + // execute_recipes: Execute one or more recipes. To specify the recipes, set + // an Args parameter named recipes to the list of recipes to be executed. For + // example, to execute phpapp::appsetup, set Args to {"recipes":["phpapp::appsetup"]}. + // install_dependencies: Install the stack's dependencies. update_custom_cookbooks: + // Update the stack's custom cookbooks. update_dependencies: Update the stack's + // dependencies. The update_dependencies and install_dependencies commands + // are supported only for Linux instances. You can run the commands successfully + // on Windows instances, but they do nothing. For apps, the following commands + // are available: + // + // deploy: Deploy an app. Ruby on Rails apps have an optional Args parameter + // named migrate. Set Args to {"migrate":["true"]} to migrate the database. + // The default setting is {"migrate":["false"]}. rollback Roll the app back + // to the previous version. When you update an app, AWS OpsWorks stores the + // previous version, up to a maximum of five versions. You can use this command + // to roll an app back as many as four versions. start: Start the app's web + // or application server. stop: Stop the app's web or application server. restart: + // Restart the app's web or application server. undeploy: Undeploy the app. + Name *string `type:"string" required:"true" enum:"DeploymentCommandName"` +} + +// String returns the string representation +func (s DeploymentCommand) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeploymentCommand) GoString() string { + return s.String() +} + +type DeregisterEcsClusterInput struct { + _ struct{} `type:"structure"` + + // The cluster's ARN. + EcsClusterArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterEcsClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterEcsClusterInput) GoString() string { + return s.String() +} + +type DeregisterEcsClusterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterEcsClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterEcsClusterOutput) GoString() string { + return s.String() +} + +type DeregisterElasticIpInput struct { + _ struct{} `type:"structure"` + + // The Elastic IP address. + ElasticIp *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterElasticIpInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterElasticIpInput) GoString() string { + return s.String() +} + +type DeregisterElasticIpOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterElasticIpOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterElasticIpOutput) GoString() string { + return s.String() +} + +type DeregisterInstanceInput struct { + _ struct{} `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterInstanceInput) GoString() string { + return s.String() +} + +type DeregisterInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterInstanceOutput) GoString() string { + return s.String() +} + +type DeregisterRdsDbInstanceInput struct { + _ struct{} `type:"structure"` + + // The Amazon RDS instance's ARN. + RdsDbInstanceArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterRdsDbInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterRdsDbInstanceInput) GoString() string { + return s.String() +} + +type DeregisterRdsDbInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterRdsDbInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterRdsDbInstanceOutput) GoString() string { + return s.String() +} + +type DeregisterVolumeInput struct { + _ struct{} `type:"structure"` + + // The AWS OpsWorks volume ID, which is the GUID that AWS OpsWorks assigned + // to the instance when you registered the volume with the stack, not the Amazon + // EC2 volume ID. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterVolumeInput) GoString() string { + return s.String() +} + +type DeregisterVolumeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterVolumeOutput) GoString() string { + return s.String() +} + +type DescribeAgentVersionsInput struct { + _ struct{} `type:"structure"` + + // The configuration manager. + ConfigurationManager *StackConfigurationManager `type:"structure"` + + // The stack ID. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAgentVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAgentVersionsInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeAgentVersions request. +type DescribeAgentVersionsOutput struct { + _ struct{} `type:"structure"` + + // The agent versions for the specified stack or configuration manager. Note + // that this value is the complete version number, not the abbreviated number + // used by the console. + AgentVersions []*AgentVersion `type:"list"` +} + +// String returns the string representation +func (s DescribeAgentVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAgentVersionsOutput) GoString() string { + return s.String() +} + +type DescribeAppsInput struct { + _ struct{} `type:"structure"` + + // An array of app IDs for the apps to be described. If you use this parameter, + // DescribeApps returns a description of the specified apps. Otherwise, it returns + // a description of every app. + AppIds []*string `type:"list"` + + // The app stack ID. If you use this parameter, DescribeApps returns a description + // of the apps in the specified stack. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAppsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAppsInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeApps request. +type DescribeAppsOutput struct { + _ struct{} `type:"structure"` + + // An array of App objects that describe the specified apps. + Apps []*App `type:"list"` +} + +// String returns the string representation +func (s DescribeAppsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAppsOutput) GoString() string { + return s.String() +} + +type DescribeCommandsInput struct { + _ struct{} `type:"structure"` + + // An array of command IDs. If you include this parameter, DescribeCommands + // returns a description of the specified commands. Otherwise, it returns a + // description of every command. + CommandIds []*string `type:"list"` + + // The deployment ID. If you include this parameter, DescribeCommands returns + // a description of the commands associated with the specified deployment. + DeploymentId *string `type:"string"` + + // The instance ID. If you include this parameter, DescribeCommands returns + // a description of the commands associated with the specified instance. + InstanceId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCommandsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCommandsInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeCommands request. +type DescribeCommandsOutput struct { + _ struct{} `type:"structure"` + + // An array of Command objects that describe each of the specified commands. + Commands []*Command `type:"list"` +} + +// String returns the string representation +func (s DescribeCommandsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCommandsOutput) GoString() string { + return s.String() +} + +type DescribeDeploymentsInput struct { + _ struct{} `type:"structure"` + + // The app ID. If you include this parameter, DescribeDeployments returns a + // description of the commands associated with the specified app. + AppId *string `type:"string"` + + // An array of deployment IDs to be described. If you include this parameter, + // DescribeDeployments returns a description of the specified deployments. Otherwise, + // it returns a description of every deployment. + DeploymentIds []*string `type:"list"` + + // The stack ID. If you include this parameter, DescribeDeployments returns + // a description of the commands associated with the specified stack. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDeploymentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDeploymentsInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeDeployments request. +type DescribeDeploymentsOutput struct { + _ struct{} `type:"structure"` + + // An array of Deployment objects that describe the deployments. + Deployments []*Deployment `type:"list"` +} + +// String returns the string representation +func (s DescribeDeploymentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDeploymentsOutput) GoString() string { + return s.String() +} + +type DescribeEcsClustersInput struct { + _ struct{} `type:"structure"` + + // A list of ARNs, one for each cluster to be described. + EcsClusterArns []*string `type:"list"` + + // To receive a paginated response, use this parameter to specify the maximum + // number of results to be returned with a single call. If the number of available + // results exceeds this maximum, the response includes a NextToken value that + // you can assign to the NextToken request parameter to get the next set of + // results. + MaxResults *int64 `type:"integer"` + + // If the previous paginated request did not return all of the remaining results, + // the response object'sNextToken parameter value is set to a token. To retrieve + // the next set of results, call DescribeEcsClusters again and assign that token + // to the request object's NextToken parameter. If there are no remaining results, + // the previous response object's NextToken parameter is set to null. + NextToken *string `type:"string"` + + // A stack ID. DescribeEcsClusters returns a description of the cluster that + // is registered with the stack. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEcsClustersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEcsClustersInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeEcsClusters request. +type DescribeEcsClustersOutput struct { + _ struct{} `type:"structure"` + + // A list of EcsCluster objects containing the cluster descriptions. + EcsClusters []*EcsCluster `type:"list"` + + // If a paginated request does not return all of the remaining results, this + // parameter is set to a token that you can assign to the request object's NextToken + // parameter to retrieve the next set of results. If the previous paginated + // request returned all of the remaining results, this parameter is set to null. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEcsClustersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEcsClustersOutput) GoString() string { + return s.String() +} + +type DescribeElasticIpsInput struct { + _ struct{} `type:"structure"` + + // The instance ID. If you include this parameter, DescribeElasticIps returns + // a description of the Elastic IP addresses associated with the specified instance. + InstanceId *string `type:"string"` + + // An array of Elastic IP addresses to be described. If you include this parameter, + // DescribeElasticIps returns a description of the specified Elastic IP addresses. + // Otherwise, it returns a description of every Elastic IP address. + Ips []*string `type:"list"` + + // A stack ID. If you include this parameter, DescribeElasticIps returns a description + // of the Elastic IP addresses that are registered with the specified stack. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeElasticIpsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticIpsInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeElasticIps request. +type DescribeElasticIpsOutput struct { + _ struct{} `type:"structure"` + + // An ElasticIps object that describes the specified Elastic IP addresses. + ElasticIps []*ElasticIp `type:"list"` +} + +// String returns the string representation +func (s DescribeElasticIpsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticIpsOutput) GoString() string { + return s.String() +} + +type DescribeElasticLoadBalancersInput struct { + _ struct{} `type:"structure"` + + // A list of layer IDs. The action describes the Elastic Load Balancing instances + // for the specified layers. + LayerIds []*string `type:"list"` + + // A stack ID. The action describes the stack's Elastic Load Balancing instances. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeElasticLoadBalancersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticLoadBalancersInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeElasticLoadBalancers request. +type DescribeElasticLoadBalancersOutput struct { + _ struct{} `type:"structure"` + + // A list of ElasticLoadBalancer objects that describe the specified Elastic + // Load Balancing instances. + ElasticLoadBalancers []*ElasticLoadBalancer `type:"list"` +} + +// String returns the string representation +func (s DescribeElasticLoadBalancersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticLoadBalancersOutput) GoString() string { + return s.String() +} + +type DescribeInstancesInput struct { + _ struct{} `type:"structure"` + + // An array of instance IDs to be described. If you use this parameter, DescribeInstances + // returns a description of the specified instances. Otherwise, it returns a + // description of every instance. + InstanceIds []*string `type:"list"` + + // A layer ID. If you use this parameter, DescribeInstances returns descriptions + // of the instances associated with the specified layer. + LayerId *string `type:"string"` + + // A stack ID. If you use this parameter, DescribeInstances returns descriptions + // of the instances associated with the specified stack. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstancesInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeInstances request. +type DescribeInstancesOutput struct { + _ struct{} `type:"structure"` + + // An array of Instance objects that describe the instances. + Instances []*Instance `type:"list"` +} + +// String returns the string representation +func (s DescribeInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstancesOutput) GoString() string { + return s.String() +} + +type DescribeLayersInput struct { + _ struct{} `type:"structure"` + + // An array of layer IDs that specify the layers to be described. If you omit + // this parameter, DescribeLayers returns a description of every layer in the + // specified stack. + LayerIds []*string `type:"list"` + + // The stack ID. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLayersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLayersInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeLayers request. +type DescribeLayersOutput struct { + _ struct{} `type:"structure"` + + // An array of Layer objects that describe the layers. + Layers []*Layer `type:"list"` +} + +// String returns the string representation +func (s DescribeLayersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLayersOutput) GoString() string { + return s.String() +} + +type DescribeLoadBasedAutoScalingInput struct { + _ struct{} `type:"structure"` + + // An array of layer IDs. + LayerIds []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeLoadBasedAutoScalingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBasedAutoScalingInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeLoadBasedAutoScaling request. +type DescribeLoadBasedAutoScalingOutput struct { + _ struct{} `type:"structure"` + + // An array of LoadBasedAutoScalingConfiguration objects that describe each + // layer's configuration. + LoadBasedAutoScalingConfigurations []*LoadBasedAutoScalingConfiguration `type:"list"` +} + +// String returns the string representation +func (s DescribeLoadBasedAutoScalingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBasedAutoScalingOutput) GoString() string { + return s.String() +} + +type DescribeMyUserProfileInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeMyUserProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMyUserProfileInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeMyUserProfile request. +type DescribeMyUserProfileOutput struct { + _ struct{} `type:"structure"` + + // A UserProfile object that describes the user's SSH information. + UserProfile *SelfUserProfile `type:"structure"` +} + +// String returns the string representation +func (s DescribeMyUserProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMyUserProfileOutput) GoString() string { + return s.String() +} + +type DescribePermissionsInput struct { + _ struct{} `type:"structure"` + + // The user's IAM ARN. For more information about IAM ARNs, see Using Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + IamUserArn *string `type:"string"` + + // The stack ID. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribePermissionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePermissionsInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribePermissions request. +type DescribePermissionsOutput struct { + _ struct{} `type:"structure"` + + // An array of Permission objects that describe the stack permissions. + // + // If the request object contains only a stack ID, the array contains a Permission + // object with permissions for each of the stack IAM ARNs. If the request object + // contains only an IAM ARN, the array contains a Permission object with permissions + // for each of the user's stack IDs. If the request contains a stack ID and + // an IAM ARN, the array contains a single Permission object with permissions + // for the specified stack and IAM ARN. + Permissions []*Permission `type:"list"` +} + +// String returns the string representation +func (s DescribePermissionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePermissionsOutput) GoString() string { + return s.String() +} + +type DescribeRaidArraysInput struct { + _ struct{} `type:"structure"` + + // The instance ID. If you use this parameter, DescribeRaidArrays returns descriptions + // of the RAID arrays associated with the specified instance. + InstanceId *string `type:"string"` + + // An array of RAID array IDs. If you use this parameter, DescribeRaidArrays + // returns descriptions of the specified arrays. Otherwise, it returns a description + // of every array. + RaidArrayIds []*string `type:"list"` + + // The stack ID. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeRaidArraysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRaidArraysInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeRaidArrays request. +type DescribeRaidArraysOutput struct { + _ struct{} `type:"structure"` + + // A RaidArrays object that describes the specified RAID arrays. + RaidArrays []*RaidArray `type:"list"` +} + +// String returns the string representation +func (s DescribeRaidArraysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRaidArraysOutput) GoString() string { + return s.String() +} + +type DescribeRdsDbInstancesInput struct { + _ struct{} `type:"structure"` + + // An array containing the ARNs of the instances to be described. + RdsDbInstanceArns []*string `type:"list"` + + // The stack ID that the instances are registered with. The operation returns + // descriptions of all registered Amazon RDS instances. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeRdsDbInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRdsDbInstancesInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeRdsDbInstances request. +type DescribeRdsDbInstancesOutput struct { + _ struct{} `type:"structure"` + + // An a array of RdsDbInstance objects that describe the instances. + RdsDbInstances []*RdsDbInstance `type:"list"` +} + +// String returns the string representation +func (s DescribeRdsDbInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRdsDbInstancesOutput) GoString() string { + return s.String() +} + +type DescribeServiceErrorsInput struct { + _ struct{} `type:"structure"` + + // The instance ID. If you use this parameter, DescribeServiceErrors returns + // descriptions of the errors associated with the specified instance. + InstanceId *string `type:"string"` + + // An array of service error IDs. If you use this parameter, DescribeServiceErrors + // returns descriptions of the specified errors. Otherwise, it returns a description + // of every error. + ServiceErrorIds []*string `type:"list"` + + // The stack ID. If you use this parameter, DescribeServiceErrors returns descriptions + // of the errors associated with the specified stack. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeServiceErrorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeServiceErrorsInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeServiceErrors request. +type DescribeServiceErrorsOutput struct { + _ struct{} `type:"structure"` + + // An array of ServiceError objects that describe the specified service errors. + ServiceErrors []*ServiceError `type:"list"` +} + +// String returns the string representation +func (s DescribeServiceErrorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeServiceErrorsOutput) GoString() string { + return s.String() +} + +type DescribeStackProvisioningParametersInput struct { + _ struct{} `type:"structure"` + + // The stack ID + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeStackProvisioningParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackProvisioningParametersInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeStackProvisioningParameters request. +type DescribeStackProvisioningParametersOutput struct { + _ struct{} `type:"structure"` + + // The AWS OpsWorks agent installer's URL. + AgentInstallerUrl *string `type:"string"` + + // An embedded object that contains the provisioning parameters. + Parameters map[string]*string `type:"map"` +} + +// String returns the string representation +func (s DescribeStackProvisioningParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackProvisioningParametersOutput) GoString() string { + return s.String() +} + +type DescribeStackSummaryInput struct { + _ struct{} `type:"structure"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeStackSummaryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackSummaryInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeStackSummary request. +type DescribeStackSummaryOutput struct { + _ struct{} `type:"structure"` + + // A StackSummary object that contains the results. + StackSummary *StackSummary `type:"structure"` +} + +// String returns the string representation +func (s DescribeStackSummaryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStackSummaryOutput) GoString() string { + return s.String() +} + +type DescribeStacksInput struct { + _ struct{} `type:"structure"` + + // An array of stack IDs that specify the stacks to be described. If you omit + // this parameter, DescribeStacks returns a description of every stack. + StackIds []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeStacksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStacksInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeStacks request. +type DescribeStacksOutput struct { + _ struct{} `type:"structure"` + + // An array of Stack objects that describe the stacks. + Stacks []*Stack `type:"list"` +} + +// String returns the string representation +func (s DescribeStacksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStacksOutput) GoString() string { + return s.String() +} + +type DescribeTimeBasedAutoScalingInput struct { + _ struct{} `type:"structure"` + + // An array of instance IDs. + InstanceIds []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeTimeBasedAutoScalingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTimeBasedAutoScalingInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeTimeBasedAutoScaling request. +type DescribeTimeBasedAutoScalingOutput struct { + _ struct{} `type:"structure"` + + // An array of TimeBasedAutoScalingConfiguration objects that describe the configuration + // for the specified instances. + TimeBasedAutoScalingConfigurations []*TimeBasedAutoScalingConfiguration `type:"list"` +} + +// String returns the string representation +func (s DescribeTimeBasedAutoScalingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTimeBasedAutoScalingOutput) GoString() string { + return s.String() +} + +type DescribeUserProfilesInput struct { + _ struct{} `type:"structure"` + + // An array of IAM user ARNs that identify the users to be described. + IamUserArns []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeUserProfilesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeUserProfilesInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeUserProfiles request. +type DescribeUserProfilesOutput struct { + _ struct{} `type:"structure"` + + // A Users object that describes the specified users. + UserProfiles []*UserProfile `type:"list"` +} + +// String returns the string representation +func (s DescribeUserProfilesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeUserProfilesOutput) GoString() string { + return s.String() +} + +type DescribeVolumesInput struct { + _ struct{} `type:"structure"` + + // The instance ID. If you use this parameter, DescribeVolumes returns descriptions + // of the volumes associated with the specified instance. + InstanceId *string `type:"string"` + + // The RAID array ID. If you use this parameter, DescribeVolumes returns descriptions + // of the volumes associated with the specified RAID array. + RaidArrayId *string `type:"string"` + + // A stack ID. The action describes the stack's registered Amazon EBS volumes. + StackId *string `type:"string"` + + // Am array of volume IDs. If you use this parameter, DescribeVolumes returns + // descriptions of the specified volumes. Otherwise, it returns a description + // of every volume. + VolumeIds []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeVolumesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumesInput) GoString() string { + return s.String() +} + +// Contains the response to a DescribeVolumes request. +type DescribeVolumesOutput struct { + _ struct{} `type:"structure"` + + // An array of volume IDs. + Volumes []*Volume `type:"list"` +} + +// String returns the string representation +func (s DescribeVolumesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumesOutput) GoString() string { + return s.String() +} + +type DetachElasticLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // The Elastic Load Balancing instance's name. + ElasticLoadBalancerName *string `type:"string" required:"true"` + + // The ID of the layer that the Elastic Load Balancing instance is attached + // to. + LayerId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachElasticLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachElasticLoadBalancerInput) GoString() string { + return s.String() +} + +type DetachElasticLoadBalancerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachElasticLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachElasticLoadBalancerOutput) GoString() string { + return s.String() +} + +type DisassociateElasticIpInput struct { + _ struct{} `type:"structure"` + + // The Elastic IP address. + ElasticIp *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisassociateElasticIpInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateElasticIpInput) GoString() string { + return s.String() +} + +type DisassociateElasticIpOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisassociateElasticIpOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateElasticIpOutput) GoString() string { + return s.String() +} + +// Describes an Amazon EBS volume. This data type maps directly to the Amazon +// EC2 EbsBlockDevice (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html) +// data type. +type EbsBlockDevice struct { + _ struct{} `type:"structure"` + + // Whether the volume is deleted on instance termination. + DeleteOnTermination *bool `type:"boolean"` + + // The number of I/O operations per second (IOPS) that the volume supports. + // For more information, see EbsBlockDevice (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html). + Iops *int64 `type:"integer"` + + // The snapshot ID. + SnapshotId *string `type:"string"` + + // The volume size, in GiB. For more information, see EbsBlockDevice (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html). + VolumeSize *int64 `type:"integer"` + + // The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned + // IOPS (SSD) volumes, and standard for Magnetic volumes. + VolumeType *string `type:"string" enum:"VolumeType"` +} + +// String returns the string representation +func (s EbsBlockDevice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EbsBlockDevice) GoString() string { + return s.String() +} + +// Describes a registered Amazon ECS cluster. +type EcsCluster struct { + _ struct{} `type:"structure"` + + // The cluster's ARN. + EcsClusterArn *string `type:"string"` + + // The cluster name. + EcsClusterName *string `type:"string"` + + // The time and date that the cluster was registered with the stack. + RegisteredAt *string `type:"string"` + + // The stack ID. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s EcsCluster) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EcsCluster) GoString() string { + return s.String() +} + +// Describes an Elastic IP address. +type ElasticIp struct { + _ struct{} `type:"structure"` + + // The domain. + Domain *string `type:"string"` + + // The ID of the instance that the address is attached to. + InstanceId *string `type:"string"` + + // The IP address. + Ip *string `type:"string"` + + // The name. + Name *string `type:"string"` + + // The AWS region. For more information, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + Region *string `type:"string"` +} + +// String returns the string representation +func (s ElasticIp) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticIp) GoString() string { + return s.String() +} + +// Describes an Elastic Load Balancing instance. +type ElasticLoadBalancer struct { + _ struct{} `type:"structure"` + + // A list of Availability Zones. + AvailabilityZones []*string `type:"list"` + + // The instance's public DNS name. + DnsName *string `type:"string"` + + // A list of the EC2 instances that the Elastic Load Balancing instance is managing + // traffic for. + Ec2InstanceIds []*string `type:"list"` + + // The Elastic Load Balancing instance's name. + ElasticLoadBalancerName *string `type:"string"` + + // The ID of the layer that the instance is attached to. + LayerId *string `type:"string"` + + // The instance's AWS region. + Region *string `type:"string"` + + // The ID of the stack that the instance is associated with. + StackId *string `type:"string"` + + // A list of subnet IDs, if the stack is running in a VPC. + SubnetIds []*string `type:"list"` + + // The VPC ID. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s ElasticLoadBalancer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticLoadBalancer) GoString() string { + return s.String() +} + +// Represents an app's environment variable. +type EnvironmentVariable struct { + _ struct{} `type:"structure"` + + // (Required) The environment variable's name, which can consist of up to 64 + // characters and must be specified. The name can contain upper- and lowercase + // letters, numbers, and underscores (_), but it must start with a letter or + // underscore. + Key *string `type:"string" required:"true"` + + // (Optional) Whether the variable's value will be returned by the DescribeApps + // action. To conceal an environment variable's value, set Secure to true. DescribeApps + // then returns *****FILTERED***** instead of the actual value. The default + // value for Secure is false. + Secure *bool `type:"boolean"` + + // (Optional) The environment variable's value, which can be left empty. If + // you specify a value, it can contain up to 256 characters, which must all + // be printable. + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s EnvironmentVariable) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnvironmentVariable) GoString() string { + return s.String() +} + +type GetHostnameSuggestionInput struct { + _ struct{} `type:"structure"` + + // The layer ID. + LayerId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetHostnameSuggestionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHostnameSuggestionInput) GoString() string { + return s.String() +} + +// Contains the response to a GetHostnameSuggestion request. +type GetHostnameSuggestionOutput struct { + _ struct{} `type:"structure"` + + // The generated host name. + Hostname *string `type:"string"` + + // The layer ID. + LayerId *string `type:"string"` +} + +// String returns the string representation +func (s GetHostnameSuggestionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHostnameSuggestionOutput) GoString() string { + return s.String() +} + +type GrantAccessInput struct { + _ struct{} `type:"structure"` + + // The instance's AWS OpsWorks ID. + InstanceId *string `type:"string" required:"true"` + + // The length of time (in minutes) that the grant is valid. When the grant expires + // at the end of this period, the user will no longer be able to use the credentials + // to log in. If the user is logged in at the time, he or she automatically + // will be logged out. + ValidForInMinutes *int64 `min:"60" type:"integer"` +} + +// String returns the string representation +func (s GrantAccessInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrantAccessInput) GoString() string { + return s.String() +} + +// Contains the response to a GrantAccess request. +type GrantAccessOutput struct { + _ struct{} `type:"structure"` + + // A TemporaryCredential object that contains the data needed to log in to the + // instance by RDP clients, such as the Microsoft Remote Desktop Connection. + TemporaryCredential *TemporaryCredential `type:"structure"` +} + +// String returns the string representation +func (s GrantAccessOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrantAccessOutput) GoString() string { + return s.String() +} + +// Describes an instance. +type Instance struct { + _ struct{} `type:"structure"` + + // The agent version. This parameter is set to INHERIT if the instance inherits + // the default stack setting or to a a version number for a fixed agent version. + AgentVersion *string `type:"string"` + + // A custom AMI ID to be used to create the instance. For more information, + // see Instances (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html) + AmiId *string `type:"string"` + + // The instance architecture: "i386" or "x86_64". + Architecture *string `type:"string" enum:"Architecture"` + + // For load-based or time-based instances, the type. + AutoScalingType *string `type:"string" enum:"AutoScalingType"` + + // The instance Availability Zone. For more information, see Regions and Endpoints + // (http://docs.aws.amazon.com/general/latest/gr/rande.html). + AvailabilityZone *string `type:"string"` + + // An array of BlockDeviceMapping objects that specify the instance's block + // device mappings. + BlockDeviceMappings []*BlockDeviceMapping `type:"list"` + + // The time that the instance was created. + CreatedAt *string `type:"string"` + + // Whether this is an Amazon EBS-optimized instance. + EbsOptimized *bool `type:"boolean"` + + // The ID of the associated Amazon EC2 instance. + Ec2InstanceId *string `type:"string"` + + // For container instances, the Amazon ECS cluster's ARN. + EcsClusterArn *string `type:"string"` + + // For container instances, the instance's ARN. + EcsContainerInstanceArn *string `type:"string"` + + // The instance Elastic IP address (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html). + ElasticIp *string `type:"string"` + + // The instance host name. + Hostname *string `type:"string"` + + // For registered instances, the infrastructure class: ec2 or on-premises. + InfrastructureClass *string `type:"string"` + + // Whether to install operating system and package updates when the instance + // boots. The default value is true. If this value is set to false, you must + // then update your instances manually by using CreateDeployment to run the + // update_dependencies stack command or by manually running yum (Amazon Linux) + // or apt-get (Ubuntu) on the instances. + // + // We strongly recommend using the default value of true, to ensure that your + // instances have the latest security updates. + InstallUpdatesOnBoot *bool `type:"boolean"` + + // The instance ID. + InstanceId *string `type:"string"` + + // The ARN of the instance's IAM profile. For more information about IAM ARNs, + // see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + InstanceProfileArn *string `type:"string"` + + // The instance type, such as t2.micro. + InstanceType *string `type:"string"` + + // The ID of the last service error. For more information, call DescribeServiceErrors. + LastServiceErrorId *string `type:"string"` + + // An array containing the instance layer IDs. + LayerIds []*string `type:"list"` + + // The instance's operating system. + Os *string `type:"string"` + + // The instance's platform. + Platform *string `type:"string"` + + // The The instance's private DNS name. + PrivateDns *string `type:"string"` + + // The instance's private IP address. + PrivateIp *string `type:"string"` + + // The instance public DNS name. + PublicDns *string `type:"string"` + + // The instance public IP address. + PublicIp *string `type:"string"` + + // For registered instances, who performed the registration. + RegisteredBy *string `type:"string"` + + // The instance's reported AWS OpsWorks agent version. + ReportedAgentVersion *string `type:"string"` + + // For registered instances, the reported operating system. + ReportedOs *ReportedOs `type:"structure"` + + // The instance's root device type. For more information, see Storage for the + // Root Device (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). + RootDeviceType *string `type:"string" enum:"RootDeviceType"` + + // The root device volume ID. + RootDeviceVolumeId *string `type:"string"` + + // An array containing the instance security group IDs. + SecurityGroupIds []*string `type:"list"` + + // The SSH key's Deep Security Agent (DSA) fingerprint. + SshHostDsaKeyFingerprint *string `type:"string"` + + // The SSH key's RSA fingerprint. + SshHostRsaKeyFingerprint *string `type:"string"` + + // The instance's Amazon EC2 key-pair name. + SshKeyName *string `type:"string"` + + // The stack ID. + StackId *string `type:"string"` + + // The instance status: + // + // booting connection_lost online pending rebooting requested + // running_setup setup_failed shutting_down start_failed stopped + // stopping terminated terminating + Status *string `type:"string"` + + // The instance's subnet ID; applicable only if the stack is running in a VPC. + SubnetId *string `type:"string"` + + // The instance's virtualization type: paravirtual or hvm. + VirtualizationType *string `type:"string" enum:"VirtualizationType"` +} + +// String returns the string representation +func (s Instance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Instance) GoString() string { + return s.String() +} + +// Contains a description of an Amazon EC2 instance from the Amazon EC2 metadata +// service. For more information, see Instance Metadata and User Data (http://docs.aws.amazon.com/sdkfornet/latest/apidocs/Index.html). +type InstanceIdentity struct { + _ struct{} `type:"structure"` + + // A JSON document that contains the metadata. + Document *string `type:"string"` + + // A signature that can be used to verify the document's accuracy and authenticity. + Signature *string `type:"string"` +} + +// String returns the string representation +func (s InstanceIdentity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceIdentity) GoString() string { + return s.String() +} + +// Describes how many instances a stack has for each status. +type InstancesCount struct { + _ struct{} `type:"structure"` + + // The number of instances in the Assigning state. + Assigning *int64 `type:"integer"` + + // The number of instances with booting status. + Booting *int64 `type:"integer"` + + // The number of instances with connection_lost status. + ConnectionLost *int64 `type:"integer"` + + // The number of instances in the Deregistering state. + Deregistering *int64 `type:"integer"` + + // The number of instances with online status. + Online *int64 `type:"integer"` + + // The number of instances with pending status. + Pending *int64 `type:"integer"` + + // The number of instances with rebooting status. + Rebooting *int64 `type:"integer"` + + // The number of instances in the Registered state. + Registered *int64 `type:"integer"` + + // The number of instances in the Registering state. + Registering *int64 `type:"integer"` + + // The number of instances with requested status. + Requested *int64 `type:"integer"` + + // The number of instances with running_setup status. + RunningSetup *int64 `type:"integer"` + + // The number of instances with setup_failed status. + SetupFailed *int64 `type:"integer"` + + // The number of instances with shutting_down status. + ShuttingDown *int64 `type:"integer"` + + // The number of instances with start_failed status. + StartFailed *int64 `type:"integer"` + + // The number of instances with stopped status. + Stopped *int64 `type:"integer"` + + // The number of instances with stopping status. + Stopping *int64 `type:"integer"` + + // The number of instances with terminated status. + Terminated *int64 `type:"integer"` + + // The number of instances with terminating status. + Terminating *int64 `type:"integer"` + + // The number of instances in the Unassigning state. + Unassigning *int64 `type:"integer"` +} + +// String returns the string representation +func (s InstancesCount) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstancesCount) GoString() string { + return s.String() +} + +// Describes a layer. +type Layer struct { + _ struct{} `type:"structure"` + + // The layer attributes. + // + // For the HaproxyStatsPassword, MysqlRootPassword, and GangliaPassword attributes, + // AWS OpsWorks returns *****FILTERED***** instead of the actual value + // + // For an ECS Cluster layer, AWS OpsWorks the EcsClusterArn attribute is set + // to the cluster's ARN. + Attributes map[string]*string `type:"map"` + + // Whether to automatically assign an Elastic IP address (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) + // to the layer's instances. For more information, see How to Edit a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). + AutoAssignElasticIps *bool `type:"boolean"` + + // For stacks that are running in a VPC, whether to automatically assign a public + // IP address to the layer's instances. For more information, see How to Edit + // a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). + AutoAssignPublicIps *bool `type:"boolean"` + + // Date when the layer was created. + CreatedAt *string `type:"string"` + + // The ARN of the default IAM profile to be used for the layer's EC2 instances. + // For more information about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + CustomInstanceProfileArn *string `type:"string"` + + // A JSON formatted string containing the layer's custom stack configuration + // and deployment attributes. + CustomJson *string `type:"string"` + + // A LayerCustomRecipes object that specifies the layer's custom recipes. + CustomRecipes *Recipes `type:"structure"` + + // An array containing the layer's custom security group IDs. + CustomSecurityGroupIds []*string `type:"list"` + + // AWS OpsWorks supports five lifecycle events: setup, configuration, deploy, + // undeploy, and shutdown. For each layer, AWS OpsWorks runs a set of standard + // recipes for each event. In addition, you can provide custom recipes for any + // or all layers and events. AWS OpsWorks runs custom event recipes after the + // standard recipes. LayerCustomRecipes specifies the custom recipes for a particular + // layer to be run in response to each of the five events. + // + // To specify a recipe, use the cookbook's directory name in the repository + // followed by two colons and the recipe name, which is the recipe's file name + // without the .rb extension. For example: phpapp2::dbsetup specifies the dbsetup.rb + // recipe in the repository's phpapp2 folder. + DefaultRecipes *Recipes `type:"structure"` + + // An array containing the layer's security group names. + DefaultSecurityGroupNames []*string `type:"list"` + + // Whether auto healing is disabled for the layer. + EnableAutoHealing *bool `type:"boolean"` + + // Whether to install operating system and package updates when the instance + // boots. The default value is true. If this value is set to false, you must + // then update your instances manually by using CreateDeployment to run the + // update_dependencies stack command or manually running yum (Amazon Linux) + // or apt-get (Ubuntu) on the instances. + // + // We strongly recommend using the default value of true, to ensure that your + // instances have the latest security updates. + InstallUpdatesOnBoot *bool `type:"boolean"` + + // The layer ID. + LayerId *string `type:"string"` + + // A LifeCycleEventConfiguration object that specifies the Shutdown event configuration. + LifecycleEventConfiguration *LifecycleEventConfiguration `type:"structure"` + + // The layer name. + Name *string `type:"string"` + + // An array of Package objects that describe the layer's packages. + Packages []*string `type:"list"` + + // The layer short name. + Shortname *string `type:"string"` + + // The layer stack ID. + StackId *string `type:"string"` + + // The layer type. + Type *string `type:"string" enum:"LayerType"` + + // Whether the layer uses Amazon EBS-optimized instances. + UseEbsOptimizedInstances *bool `type:"boolean"` + + // A VolumeConfigurations object that describes the layer's Amazon EBS volumes. + VolumeConfigurations []*VolumeConfiguration `type:"list"` +} + +// String returns the string representation +func (s Layer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Layer) GoString() string { + return s.String() +} + +// Specifies the lifecycle event configuration +type LifecycleEventConfiguration struct { + _ struct{} `type:"structure"` + + // A ShutdownEventConfiguration object that specifies the Shutdown event configuration. + Shutdown *ShutdownEventConfiguration `type:"structure"` +} + +// String returns the string representation +func (s LifecycleEventConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleEventConfiguration) GoString() string { + return s.String() +} + +// Describes a layer's load-based auto scaling configuration. +type LoadBasedAutoScalingConfiguration struct { + _ struct{} `type:"structure"` + + // An AutoScalingThresholds object that describes the downscaling configuration, + // which defines how and when AWS OpsWorks reduces the number of instances. + DownScaling *AutoScalingThresholds `type:"structure"` + + // Whether load-based auto scaling is enabled for the layer. + Enable *bool `type:"boolean"` + + // The layer ID. + LayerId *string `type:"string"` + + // An AutoScalingThresholds object that describes the upscaling configuration, + // which defines how and when AWS OpsWorks increases the number of instances. + UpScaling *AutoScalingThresholds `type:"structure"` +} + +// String returns the string representation +func (s LoadBasedAutoScalingConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadBasedAutoScalingConfiguration) GoString() string { + return s.String() +} + +// Describes stack or user permissions. +type Permission struct { + _ struct{} `type:"structure"` + + // Whether the user can use SSH. + AllowSsh *bool `type:"boolean"` + + // Whether the user can use sudo. + AllowSudo *bool `type:"boolean"` + + // The Amazon Resource Name (ARN) for an AWS Identity and Access Management + // (IAM) role. For more information about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + IamUserArn *string `type:"string"` + + // The user's permission level, which must be the following: + // + // deny show deploy manage iam_only For more information on the + // permissions associated with these levels, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html) + Level *string `type:"string"` + + // A stack ID. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s Permission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Permission) GoString() string { + return s.String() +} + +// Describes an instance's RAID array. +type RaidArray struct { + _ struct{} `type:"structure"` + + // The array's Availability Zone. For more information, see Regions and Endpoints + // (http://docs.aws.amazon.com/general/latest/gr/rande.html). + AvailabilityZone *string `type:"string"` + + // When the RAID array was created. + CreatedAt *string `type:"string"` + + // The array's Linux device. For example /dev/mdadm0. + Device *string `type:"string"` + + // The instance ID. + InstanceId *string `type:"string"` + + // For PIOPS volumes, the IOPS per disk. + Iops *int64 `type:"integer"` + + // The array's mount point. + MountPoint *string `type:"string"` + + // The array name. + Name *string `type:"string"` + + // The number of disks in the array. + NumberOfDisks *int64 `type:"integer"` + + // The array ID. + RaidArrayId *string `type:"string"` + + // The RAID level (http://en.wikipedia.org/wiki/Standard_RAID_levels). + RaidLevel *int64 `type:"integer"` + + // The array's size. + Size *int64 `type:"integer"` + + // The stack ID. + StackId *string `type:"string"` + + // The volume type, standard or PIOPS. + VolumeType *string `type:"string"` +} + +// String returns the string representation +func (s RaidArray) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RaidArray) GoString() string { + return s.String() +} + +// Describes an Amazon RDS instance. +type RdsDbInstance struct { + _ struct{} `type:"structure"` + + // The instance's address. + Address *string `type:"string"` + + // The DB instance identifier. + DbInstanceIdentifier *string `type:"string"` + + // AWS OpsWorks returns *****FILTERED***** instead of the actual value. + DbPassword *string `type:"string"` + + // The master user name. + DbUser *string `type:"string"` + + // The instance's database engine. + Engine *string `type:"string"` + + // Set to true if AWS OpsWorks was unable to discover the Amazon RDS instance. + // AWS OpsWorks attempts to discover the instance only once. If this value is + // set to true, you must deregister the instance and then register it again. + MissingOnRds *bool `type:"boolean"` + + // The instance's ARN. + RdsDbInstanceArn *string `type:"string"` + + // The instance's AWS region. + Region *string `type:"string"` + + // The ID of the stack that the instance is registered with. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s RdsDbInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RdsDbInstance) GoString() string { + return s.String() +} + +type RebootInstanceInput struct { + _ struct{} `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RebootInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootInstanceInput) GoString() string { + return s.String() +} + +type RebootInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RebootInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootInstanceOutput) GoString() string { + return s.String() +} + +// AWS OpsWorks supports five lifecycle events: setup, configuration, deploy, +// undeploy, and shutdown. For each layer, AWS OpsWorks runs a set of standard +// recipes for each event. In addition, you can provide custom recipes for any +// or all layers and events. AWS OpsWorks runs custom event recipes after the +// standard recipes. LayerCustomRecipes specifies the custom recipes for a particular +// layer to be run in response to each of the five events. +// +// To specify a recipe, use the cookbook's directory name in the repository +// followed by two colons and the recipe name, which is the recipe's file name +// without the .rb extension. For example: phpapp2::dbsetup specifies the dbsetup.rb +// recipe in the repository's phpapp2 folder. +type Recipes struct { + _ struct{} `type:"structure"` + + // An array of custom recipe names to be run following a configure event. + Configure []*string `type:"list"` + + // An array of custom recipe names to be run following a deploy event. + Deploy []*string `type:"list"` + + // An array of custom recipe names to be run following a setup event. + Setup []*string `type:"list"` + + // An array of custom recipe names to be run following a shutdown event. + Shutdown []*string `type:"list"` + + // An array of custom recipe names to be run following a undeploy event. + Undeploy []*string `type:"list"` +} + +// String returns the string representation +func (s Recipes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Recipes) GoString() string { + return s.String() +} + +type RegisterEcsClusterInput struct { + _ struct{} `type:"structure"` + + // The cluster's ARN. + EcsClusterArn *string `type:"string" required:"true"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterEcsClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterEcsClusterInput) GoString() string { + return s.String() +} + +// Contains the response to a RegisterEcsCluster request. +type RegisterEcsClusterOutput struct { + _ struct{} `type:"structure"` + + // The cluster's ARN. + EcsClusterArn *string `type:"string"` +} + +// String returns the string representation +func (s RegisterEcsClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterEcsClusterOutput) GoString() string { + return s.String() +} + +type RegisterElasticIpInput struct { + _ struct{} `type:"structure"` + + // The Elastic IP address. + ElasticIp *string `type:"string" required:"true"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterElasticIpInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterElasticIpInput) GoString() string { + return s.String() +} + +// Contains the response to a RegisterElasticIp request. +type RegisterElasticIpOutput struct { + _ struct{} `type:"structure"` + + // The Elastic IP address. + ElasticIp *string `type:"string"` +} + +// String returns the string representation +func (s RegisterElasticIpOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterElasticIpOutput) GoString() string { + return s.String() +} + +type RegisterInstanceInput struct { + _ struct{} `type:"structure"` + + // The instance's hostname. + Hostname *string `type:"string"` + + // An InstanceIdentity object that contains the instance's identity. + InstanceIdentity *InstanceIdentity `type:"structure"` + + // The instance's private IP address. + PrivateIp *string `type:"string"` + + // The instance's public IP address. + PublicIp *string `type:"string"` + + // The instances public RSA key. This key is used to encrypt communication between + // the instance and the service. + RsaPublicKey *string `type:"string"` + + // The instances public RSA key fingerprint. + RsaPublicKeyFingerprint *string `type:"string"` + + // The ID of the stack that the instance is to be registered with. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterInstanceInput) GoString() string { + return s.String() +} + +// Contains the response to a RegisterInstanceResult request. +type RegisterInstanceOutput struct { + _ struct{} `type:"structure"` + + // The registered instance's AWS OpsWorks ID. + InstanceId *string `type:"string"` +} + +// String returns the string representation +func (s RegisterInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterInstanceOutput) GoString() string { + return s.String() +} + +type RegisterRdsDbInstanceInput struct { + _ struct{} `type:"structure"` + + // The database password. + DbPassword *string `type:"string" required:"true"` + + // The database's master user name. + DbUser *string `type:"string" required:"true"` + + // The Amazon RDS instance's ARN. + RdsDbInstanceArn *string `type:"string" required:"true"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterRdsDbInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterRdsDbInstanceInput) GoString() string { + return s.String() +} + +type RegisterRdsDbInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RegisterRdsDbInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterRdsDbInstanceOutput) GoString() string { + return s.String() +} + +type RegisterVolumeInput struct { + _ struct{} `type:"structure"` + + // The Amazon EBS volume ID. + Ec2VolumeId *string `type:"string"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterVolumeInput) GoString() string { + return s.String() +} + +// Contains the response to a RegisterVolume request. +type RegisterVolumeOutput struct { + _ struct{} `type:"structure"` + + // The volume ID. + VolumeId *string `type:"string"` +} + +// String returns the string representation +func (s RegisterVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterVolumeOutput) GoString() string { + return s.String() +} + +// A registered instance's reported operating system. +type ReportedOs struct { + _ struct{} `type:"structure"` + + // The operating system family. + Family *string `type:"string"` + + // The operating system name. + Name *string `type:"string"` + + // The operating system version. + Version *string `type:"string"` +} + +// String returns the string representation +func (s ReportedOs) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReportedOs) GoString() string { + return s.String() +} + +// Describes a user's SSH information. +type SelfUserProfile struct { + _ struct{} `type:"structure"` + + // The user's IAM ARN. + IamUserArn *string `type:"string"` + + // The user's name. + Name *string `type:"string"` + + // The user's SSH public key. + SshPublicKey *string `type:"string"` + + // The user's SSH user name. + SshUsername *string `type:"string"` +} + +// String returns the string representation +func (s SelfUserProfile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelfUserProfile) GoString() string { + return s.String() +} + +// Describes an AWS OpsWorks service error. +type ServiceError struct { + _ struct{} `type:"structure"` + + // When the error occurred. + CreatedAt *string `type:"string"` + + // The instance ID. + InstanceId *string `type:"string"` + + // A message that describes the error. + Message *string `type:"string"` + + // The error ID. + ServiceErrorId *string `type:"string"` + + // The stack ID. + StackId *string `type:"string"` + + // The error type. + Type *string `type:"string"` +} + +// String returns the string representation +func (s ServiceError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceError) GoString() string { + return s.String() +} + +type SetLoadBasedAutoScalingInput struct { + _ struct{} `type:"structure"` + + // An AutoScalingThresholds object with the downscaling threshold configuration. + // If the load falls below these thresholds for a specified amount of time, + // AWS OpsWorks stops a specified number of instances. + DownScaling *AutoScalingThresholds `type:"structure"` + + // Enables load-based auto scaling for the layer. + Enable *bool `type:"boolean"` + + // The layer ID. + LayerId *string `type:"string" required:"true"` + + // An AutoScalingThresholds object with the upscaling threshold configuration. + // If the load exceeds these thresholds for a specified amount of time, AWS + // OpsWorks starts a specified number of instances. + UpScaling *AutoScalingThresholds `type:"structure"` +} + +// String returns the string representation +func (s SetLoadBasedAutoScalingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLoadBasedAutoScalingInput) GoString() string { + return s.String() +} + +type SetLoadBasedAutoScalingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetLoadBasedAutoScalingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetLoadBasedAutoScalingOutput) GoString() string { + return s.String() +} + +type SetPermissionInput struct { + _ struct{} `type:"structure"` + + // The user is allowed to use SSH to communicate with the instance. + AllowSsh *bool `type:"boolean"` + + // The user is allowed to use sudo to elevate privileges. + AllowSudo *bool `type:"boolean"` + + // The user's IAM ARN. + IamUserArn *string `type:"string" required:"true"` + + // The user's permission level, which must be set to one of the following strings. + // You cannot set your own permissions level. + // + // deny show deploy manage iam_only For more information on the + // permissions associated with these levels, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). + Level *string `type:"string"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetPermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetPermissionInput) GoString() string { + return s.String() +} + +type SetPermissionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetPermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetPermissionOutput) GoString() string { + return s.String() +} + +type SetTimeBasedAutoScalingInput struct { + _ struct{} `type:"structure"` + + // An AutoScalingSchedule with the instance schedule. + AutoScalingSchedule *WeeklyAutoScalingSchedule `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetTimeBasedAutoScalingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetTimeBasedAutoScalingInput) GoString() string { + return s.String() +} + +type SetTimeBasedAutoScalingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetTimeBasedAutoScalingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetTimeBasedAutoScalingOutput) GoString() string { + return s.String() +} + +// The Shutdown event configuration. +type ShutdownEventConfiguration struct { + _ struct{} `type:"structure"` + + // Whether to enable Elastic Load Balancing connection draining. For more information, + // see Connection Draining (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/TerminologyandKeyConcepts.html#conn-drain) + DelayUntilElbConnectionsDrained *bool `type:"boolean"` + + // The time, in seconds, that AWS OpsWorks will wait after triggering a Shutdown + // event before shutting down an instance. + ExecutionTimeout *int64 `type:"integer"` +} + +// String returns the string representation +func (s ShutdownEventConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ShutdownEventConfiguration) GoString() string { + return s.String() +} + +// Contains the information required to retrieve an app or cookbook from a repository. +// For more information, see Creating Apps (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html) +// or Custom Recipes and Cookbooks (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook.html). +type Source struct { + _ struct{} `type:"structure"` + + // When included in a request, the parameter depends on the repository type. + // + // For Amazon S3 bundles, set Password to the appropriate IAM secret access + // key. For HTTP bundles and Subversion repositories, set Password to the password. + // For more information on how to safely handle IAM credentials, see . + // + // In responses, AWS OpsWorks returns *****FILTERED***** instead of the actual + // value. + Password *string `type:"string"` + + // The application's version. AWS OpsWorks enables you to easily deploy new + // versions of an application. One of the simplest approaches is to have branches + // or revisions in your repository that represent different versions that can + // potentially be deployed. + Revision *string `type:"string"` + + // In requests, the repository's SSH key. + // + // In responses, AWS OpsWorks returns *****FILTERED***** instead of the actual + // value. + SshKey *string `type:"string"` + + // The repository type. + Type *string `type:"string" enum:"SourceType"` + + // The source URL. + Url *string `type:"string"` + + // This parameter depends on the repository type. + // + // For Amazon S3 bundles, set Username to the appropriate IAM access key ID. + // For HTTP bundles, Git repositories, and Subversion repositories, set Username + // to the user name. + Username *string `type:"string"` +} + +// String returns the string representation +func (s Source) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Source) GoString() string { + return s.String() +} + +// Describes an app's SSL configuration. +type SslConfiguration struct { + _ struct{} `type:"structure"` + + // The contents of the certificate's domain.crt file. + Certificate *string `type:"string" required:"true"` + + // Optional. Can be used to specify an intermediate certificate authority key + // or client authentication. + Chain *string `type:"string"` + + // The private key; the contents of the certificate's domain.kex file. + PrivateKey *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SslConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SslConfiguration) GoString() string { + return s.String() +} + +// Describes a stack. +type Stack struct { + _ struct{} `type:"structure"` + + // The agent version. This parameter is set to LATEST for auto-update. or a + // version number for a fixed agent version. + AgentVersion *string `type:"string"` + + // The stack's ARN. + Arn *string `type:"string"` + + // The stack's attributes. + Attributes map[string]*string `type:"map"` + + // A ChefConfiguration object that specifies whether to enable Berkshelf and + // the Berkshelf version. For more information, see Create a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + ChefConfiguration *ChefConfiguration `type:"structure"` + + // The configuration manager. + ConfigurationManager *StackConfigurationManager `type:"structure"` + + // The date when the stack was created. + CreatedAt *string `type:"string"` + + // Contains the information required to retrieve an app or cookbook from a repository. + // For more information, see Creating Apps (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html) + // or Custom Recipes and Cookbooks (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook.html). + CustomCookbooksSource *Source `type:"structure"` + + // A JSON object that contains user-defined attributes to be added to the stack + // configuration and deployment attributes. You can use custom JSON to override + // the corresponding default stack configuration attribute values or to pass + // data to recipes. The string should be in the following format and must escape + // characters such as '"': + // + // "{\"key1\": \"value1\", \"key2\": \"value2\",...}" + // + // For more information on custom JSON, see Use Custom JSON to Modify the Stack + // Configuration Attributes (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html). + CustomJson *string `type:"string"` + + // The stack's default Availability Zone. For more information, see Regions + // and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + DefaultAvailabilityZone *string `type:"string"` + + // The ARN of an IAM profile that is the default profile for all of the stack's + // EC2 instances. For more information about IAM ARNs, see Using Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + DefaultInstanceProfileArn *string `type:"string"` + + // The stack's default operating system. + DefaultOs *string `type:"string"` + + // The default root device type. This value is used by default for all instances + // in the stack, but you can override it when you create an instance. For more + // information, see Storage for the Root Device (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). + DefaultRootDeviceType *string `type:"string" enum:"RootDeviceType"` + + // A default Amazon EC2 key pair for the stack's instances. You can override + // this value when you create or update an instance. + DefaultSshKeyName *string `type:"string"` + + // The default subnet ID; applicable only if the stack is running in a VPC. + DefaultSubnetId *string `type:"string"` + + // The stack host name theme, with spaces replaced by underscores. + HostnameTheme *string `type:"string"` + + // The stack name. + Name *string `type:"string"` + + // The stack AWS region, such as "us-east-1". For more information about AWS + // regions, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + Region *string `type:"string"` + + // The stack AWS Identity and Access Management (IAM) role. + ServiceRoleArn *string `type:"string"` + + // The stack ID. + StackId *string `type:"string"` + + // Whether the stack uses custom cookbooks. + UseCustomCookbooks *bool `type:"boolean"` + + // Whether the stack automatically associates the AWS OpsWorks built-in security + // groups with the stack's layers. + UseOpsworksSecurityGroups *bool `type:"boolean"` + + // The VPC ID; applicable only if the stack is running in a VPC. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s Stack) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Stack) GoString() string { + return s.String() +} + +// Describes the configuration manager. +type StackConfigurationManager struct { + _ struct{} `type:"structure"` + + // The name. This parameter must be set to "Chef". + Name *string `type:"string"` + + // The Chef version. This parameter must be set to 12, 11.10, or 11.4 for Linux + // stacks, and to 12.2 for Windows stacks. The default value for Linux stacks + // is 11.4. + Version *string `type:"string"` +} + +// String returns the string representation +func (s StackConfigurationManager) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackConfigurationManager) GoString() string { + return s.String() +} + +// Summarizes the number of layers, instances, and apps in a stack. +type StackSummary struct { + _ struct{} `type:"structure"` + + // The number of apps. + AppsCount *int64 `type:"integer"` + + // The stack's ARN. + Arn *string `type:"string"` + + // An InstancesCount object with the number of instances in each status. + InstancesCount *InstancesCount `type:"structure"` + + // The number of layers. + LayersCount *int64 `type:"integer"` + + // The stack name. + Name *string `type:"string"` + + // The stack ID. + StackId *string `type:"string"` +} + +// String returns the string representation +func (s StackSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackSummary) GoString() string { + return s.String() +} + +type StartInstanceInput struct { + _ struct{} `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StartInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartInstanceInput) GoString() string { + return s.String() +} + +type StartInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StartInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartInstanceOutput) GoString() string { + return s.String() +} + +type StartStackInput struct { + _ struct{} `type:"structure"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StartStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartStackInput) GoString() string { + return s.String() +} + +type StartStackOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StartStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartStackOutput) GoString() string { + return s.String() +} + +type StopInstanceInput struct { + _ struct{} `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StopInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopInstanceInput) GoString() string { + return s.String() +} + +type StopInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StopInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopInstanceOutput) GoString() string { + return s.String() +} + +type StopStackInput struct { + _ struct{} `type:"structure"` + + // The stack ID. + StackId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StopStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopStackInput) GoString() string { + return s.String() +} + +type StopStackOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StopStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopStackOutput) GoString() string { + return s.String() +} + +// Contains the data needed by RDP clients such as the Microsoft Remote Desktop +// Connection to log in to the instance. +type TemporaryCredential struct { + _ struct{} `type:"structure"` + + // The instance's AWS OpsWorks ID. + InstanceId *string `type:"string"` + + // The password. + Password *string `type:"string"` + + // The user name. + Username *string `type:"string"` + + // The length of time (in minutes) that the grant is valid. When the grant expires, + // at the end of this period, the user will no longer be able to use the credentials + // to log in. If they are logged in at the time, they will be automatically + // logged out. + ValidForInMinutes *int64 `type:"integer"` +} + +// String returns the string representation +func (s TemporaryCredential) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TemporaryCredential) GoString() string { + return s.String() +} + +// Describes an instance's time-based auto scaling configuration. +type TimeBasedAutoScalingConfiguration struct { + _ struct{} `type:"structure"` + + // A WeeklyAutoScalingSchedule object with the instance schedule. + AutoScalingSchedule *WeeklyAutoScalingSchedule `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string"` +} + +// String returns the string representation +func (s TimeBasedAutoScalingConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimeBasedAutoScalingConfiguration) GoString() string { + return s.String() +} + +type UnassignInstanceInput struct { + _ struct{} `type:"structure"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UnassignInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnassignInstanceInput) GoString() string { + return s.String() +} + +type UnassignInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UnassignInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnassignInstanceOutput) GoString() string { + return s.String() +} + +type UnassignVolumeInput struct { + _ struct{} `type:"structure"` + + // The volume ID. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UnassignVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnassignVolumeInput) GoString() string { + return s.String() +} + +type UnassignVolumeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UnassignVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnassignVolumeOutput) GoString() string { + return s.String() +} + +type UpdateAppInput struct { + _ struct{} `type:"structure"` + + // The app ID. + AppId *string `type:"string" required:"true"` + + // A Source object that specifies the app repository. + AppSource *Source `type:"structure"` + + // One or more user-defined key/value pairs to be added to the stack attributes. + Attributes map[string]*string `type:"map"` + + // The app's data sources. + DataSources []*DataSource `type:"list"` + + // A description of the app. + Description *string `type:"string"` + + // The app's virtual host settings, with multiple domains separated by commas. + // For example: 'www.example.com, example.com' + Domains []*string `type:"list"` + + // Whether SSL is enabled for the app. + EnableSsl *bool `type:"boolean"` + + // An array of EnvironmentVariable objects that specify environment variables + // to be associated with the app. After you deploy the app, these variables + // are defined on the associated app server instances.For more information, + // see Environment Variables (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html#workingapps-creating-environment). + // + // There is no specific limit on the number of environment variables. However, + // the size of the associated data structure - which includes the variables' + // names, values, and protected flag values - cannot exceed 10 KB (10240 Bytes). + // This limit should accommodate most if not all use cases. Exceeding it will + // cause an exception with the message, "Environment: is too large (maximum + // is 10KB)." + // + // This parameter is supported only by Chef 11.10 stacks. If you have specified + // one or more environment variables, you cannot modify the stack's Chef version. + Environment []*EnvironmentVariable `type:"list"` + + // The app name. + Name *string `type:"string"` + + // An SslConfiguration object with the SSL configuration. + SslConfiguration *SslConfiguration `type:"structure"` + + // The app type. + Type *string `type:"string" enum:"AppType"` +} + +// String returns the string representation +func (s UpdateAppInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAppInput) GoString() string { + return s.String() +} + +type UpdateAppOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateAppOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAppOutput) GoString() string { + return s.String() +} + +type UpdateElasticIpInput struct { + _ struct{} `type:"structure"` + + // The address. + ElasticIp *string `type:"string" required:"true"` + + // The new name. + Name *string `type:"string"` +} + +// String returns the string representation +func (s UpdateElasticIpInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateElasticIpInput) GoString() string { + return s.String() +} + +type UpdateElasticIpOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateElasticIpOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateElasticIpOutput) GoString() string { + return s.String() +} + +type UpdateInstanceInput struct { + _ struct{} `type:"structure"` + + // The default AWS OpsWorks agent version. You have the following options: + // + // INHERIT - Use the stack's default agent version setting. version_number + // - Use the specified agent version. This value overrides the stack's default + // setting. To update the agent version, you must edit the instance configuration + // and specify a new version. AWS OpsWorks then automatically installs that + // version on the instance. The default setting is INHERIT. To specify an agent + // version, you must use the complete version number, not the abbreviated number + // shown on the console. For a list of available agent version numbers, call + // DescribeAgentVersions. + AgentVersion *string `type:"string"` + + // A custom AMI ID to be used to create the instance. The AMI must be based + // on one of the supported operating systems. For more information, see Instances + // (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html) + // + // If you specify a custom AMI, you must set Os to Custom. + AmiId *string `type:"string"` + + // The instance architecture. Instance types do not necessarily support both + // architectures. For a list of the architectures that are supported by the + // different instance types, see Instance Families and Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + Architecture *string `type:"string" enum:"Architecture"` + + // For load-based or time-based instances, the type. Windows stacks can use + // only time-based instances. + AutoScalingType *string `type:"string" enum:"AutoScalingType"` + + // This property cannot be updated. + EbsOptimized *bool `type:"boolean"` + + // The instance host name. + Hostname *string `type:"string"` + + // Whether to install operating system and package updates when the instance + // boots. The default value is true. To control when updates are installed, + // set this value to false. You must then update your instances manually by + // using CreateDeployment to run the update_dependencies stack command or by + // manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances. + // + // We strongly recommend using the default value of true, to ensure that your + // instances have the latest security updates. + InstallUpdatesOnBoot *bool `type:"boolean"` + + // The instance ID. + InstanceId *string `type:"string" required:"true"` + + // The instance type, such as t2.micro. For a list of supported instance types, + // open the stack in the console, choose Instances, and choose + Instance. The + // Size list contains the currently supported types. For more information, see + // Instance Families and Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + // The parameter values that you use to specify the various types are in the + // API Name column of the Available Instance Types table. + InstanceType *string `type:"string"` + + // The instance's layer IDs. + LayerIds []*string `type:"list"` + + // The instance's operating system, which must be set to one of the following. + // + // A supported Linux operating system: An Amazon Linux version, such as Amazon + // Linux 2015.03, Red Hat Enterprise Linux 7, Ubuntu 12.04 LTS, or Ubuntu 14.04 + // LTS. Microsoft Windows Server 2012 R2 Base. A custom AMI: Custom. For more + // information on the supported operating systems, see AWS OpsWorks Operating + // Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + // + // The default option is the current Amazon Linux version. If you set this + // parameter to Custom, you must use the AmiId parameter to specify the custom + // AMI that you want to use. For more information on the supported operating + // systems, see Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + // For more information on how to use custom AMIs with OpsWorks, see Using Custom + // AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). + // + // You can specify a different Linux operating system for the updated stack, + // but you cannot change from Linux to Windows or Windows to Linux. + Os *string `type:"string"` + + // The instance's Amazon EC2 key name. + SshKeyName *string `type:"string"` +} + +// String returns the string representation +func (s UpdateInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateInstanceInput) GoString() string { + return s.String() +} + +type UpdateInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateInstanceOutput) GoString() string { + return s.String() +} + +type UpdateLayerInput struct { + _ struct{} `type:"structure"` + + // One or more user-defined key/value pairs to be added to the stack attributes. + Attributes map[string]*string `type:"map"` + + // Whether to automatically assign an Elastic IP address (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) + // to the layer's instances. For more information, see How to Edit a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). + AutoAssignElasticIps *bool `type:"boolean"` + + // For stacks that are running in a VPC, whether to automatically assign a public + // IP address to the layer's instances. For more information, see How to Edit + // a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-edit.html). + AutoAssignPublicIps *bool `type:"boolean"` + + // The ARN of an IAM profile to be used for all of the layer's EC2 instances. + // For more information about IAM ARNs, see Using Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + CustomInstanceProfileArn *string `type:"string"` + + // A JSON-formatted string containing custom stack configuration and deployment + // attributes to be installed on the layer's instances. For more information, + // see Using Custom JSON (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook-json-override.html). + CustomJson *string `type:"string"` + + // A LayerCustomRecipes object that specifies the layer's custom recipes. + CustomRecipes *Recipes `type:"structure"` + + // An array containing the layer's custom security group IDs. + CustomSecurityGroupIds []*string `type:"list"` + + // Whether to disable auto healing for the layer. + EnableAutoHealing *bool `type:"boolean"` + + // Whether to install operating system and package updates when the instance + // boots. The default value is true. To control when updates are installed, + // set this value to false. You must then update your instances manually by + // using CreateDeployment to run the update_dependencies stack command or manually + // running yum (Amazon Linux) or apt-get (Ubuntu) on the instances. + // + // We strongly recommend using the default value of true, to ensure that your + // instances have the latest security updates. + InstallUpdatesOnBoot *bool `type:"boolean"` + + // The layer ID. + LayerId *string `type:"string" required:"true"` + + LifecycleEventConfiguration *LifecycleEventConfiguration `type:"structure"` + + // The layer name, which is used by the console. + Name *string `type:"string"` + + // An array of Package objects that describe the layer's packages. + Packages []*string `type:"list"` + + // For custom layers only, use this parameter to specify the layer's short name, + // which is used internally by AWS OpsWorksand by Chef. The short name is also + // used as the name for the directory where your app files are installed. It + // can have a maximum of 200 characters and must be in the following format: + // /\A[a-z0-9\-\_\.]+\Z/. + // + // The built-in layers' short names are defined by AWS OpsWorks. For more information, + // see the Layer Reference (http://docs.aws.amazon.com/opsworks/latest/userguide/layers.html) + Shortname *string `type:"string"` + + // Whether to use Amazon EBS-optimized instances. + UseEbsOptimizedInstances *bool `type:"boolean"` + + // A VolumeConfigurations object that describes the layer's Amazon EBS volumes. + VolumeConfigurations []*VolumeConfiguration `type:"list"` +} + +// String returns the string representation +func (s UpdateLayerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateLayerInput) GoString() string { + return s.String() +} + +type UpdateLayerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateLayerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateLayerOutput) GoString() string { + return s.String() +} + +type UpdateMyUserProfileInput struct { + _ struct{} `type:"structure"` + + // The user's SSH public key. + SshPublicKey *string `type:"string"` +} + +// String returns the string representation +func (s UpdateMyUserProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMyUserProfileInput) GoString() string { + return s.String() +} + +type UpdateMyUserProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateMyUserProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMyUserProfileOutput) GoString() string { + return s.String() +} + +type UpdateRdsDbInstanceInput struct { + _ struct{} `type:"structure"` + + // The database password. + DbPassword *string `type:"string"` + + // The master user name. + DbUser *string `type:"string"` + + // The Amazon RDS instance's ARN. + RdsDbInstanceArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateRdsDbInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRdsDbInstanceInput) GoString() string { + return s.String() +} + +type UpdateRdsDbInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateRdsDbInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRdsDbInstanceOutput) GoString() string { + return s.String() +} + +type UpdateStackInput struct { + _ struct{} `type:"structure"` + + // The default AWS OpsWorks agent version. You have the following options: + // + // Auto-update - Set this parameter to LATEST. AWS OpsWorks automatically + // installs new agent versions on the stack's instances as soon as they are + // available. Fixed version - Set this parameter to your preferred agent version. + // To update the agent version, you must edit the stack configuration and specify + // a new version. AWS OpsWorks then automatically installs that version on the + // stack's instances. The default setting is LATEST. To specify an agent version, + // you must use the complete version number, not the abbreviated number shown + // on the console. For a list of available agent version numbers, call DescribeAgentVersions. + // + // You can also specify an agent version when you create or update an instance, + // which overrides the stack's default setting. + AgentVersion *string `type:"string"` + + // One or more user-defined key-value pairs to be added to the stack attributes. + Attributes map[string]*string `type:"map"` + + // A ChefConfiguration object that specifies whether to enable Berkshelf and + // the Berkshelf version on Chef 11.10 stacks. For more information, see Create + // a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + ChefConfiguration *ChefConfiguration `type:"structure"` + + // The configuration manager. When you update a stack, we recommend that you + // use the configuration manager to specify the Chef version: 12, 11.10, or + // 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for + // Linux stacks is currently 11.4. + ConfigurationManager *StackConfigurationManager `type:"structure"` + + // Contains the information required to retrieve an app or cookbook from a repository. + // For more information, see Creating Apps (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html) + // or Custom Recipes and Cookbooks (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook.html). + CustomCookbooksSource *Source `type:"structure"` + + // A string that contains user-defined, custom JSON. It can be used to override + // the corresponding default stack configuration JSON values or to pass data + // to recipes. The string should be in the following format and escape characters + // such as '"': + // + // "{\"key1\": \"value1\", \"key2\": \"value2\",...}" + // + // For more information on custom JSON, see Use Custom JSON to Modify the Stack + // Configuration Attributes (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-json.html). + CustomJson *string `type:"string"` + + // The stack's default Availability Zone, which must be in the stack's region. + // For more information, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html). + // If you also specify a value for DefaultSubnetId, the subnet must be in the + // same zone. For more information, see CreateStack. + DefaultAvailabilityZone *string `type:"string"` + + // The ARN of an IAM profile that is the default profile for all of the stack's + // EC2 instances. For more information about IAM ARNs, see Using Identifiers + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html). + DefaultInstanceProfileArn *string `type:"string"` + + // The stack's operating system, which must be set to one of the following: + // + // A supported Linux operating system: An Amazon Linux version, such as Amazon + // Linux 2015.03, Red Hat Enterprise Linux 7, Ubuntu 12.04 LTS, or Ubuntu 14.04 + // LTS. Microsoft Windows Server 2012 R2 Base. A custom AMI: Custom. You specify + // the custom AMI you want to use when you create instances. For more information + // on how to use custom AMIs with OpsWorks, see Using Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). + // The default option is the stack's current operating system. For more information + // on the supported operating systems, see AWS OpsWorks Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + DefaultOs *string `type:"string"` + + // The default root device type. This value is used by default for all instances + // in the stack, but you can override it when you create an instance. For more + // information, see Storage for the Root Device (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). + DefaultRootDeviceType *string `type:"string" enum:"RootDeviceType"` + + // A default Amazon EC2 key-pair name. The default value is none. If you specify + // a key-pair name, AWS OpsWorks installs the public key on the instance and + // you can use the private key with an SSH client to log in to the instance. + // For more information, see Using SSH to Communicate with an Instance (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-ssh.html) + // and Managing SSH Access (http://docs.aws.amazon.com/opsworks/latest/userguide/security-ssh-access.html). + // You can override this setting by specifying a different key pair, or no key + // pair, when you create an instance (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-add.html). + DefaultSshKeyName *string `type:"string"` + + // The stack's default VPC subnet ID. This parameter is required if you specify + // a value for the VpcId parameter. All instances are launched into this subnet + // unless you specify otherwise when you create the instance. If you also specify + // a value for DefaultAvailabilityZone, the subnet must be in that zone. For + // information on default values and when this parameter is required, see the + // VpcId parameter description. + DefaultSubnetId *string `type:"string"` + + // The stack's new host name theme, with spaces replaced by underscores. The + // theme is used to generate host names for the stack's instances. By default, + // HostnameTheme is set to Layer_Dependent, which creates host names by appending + // integers to the layer's short name. The other themes are: + // + // Baked_Goods Clouds Europe_Cities Fruits Greek_Deities Legendary_creatures_from_Japan + // Planets_and_Moons Roman_Deities Scottish_Islands US_Cities Wild_Cats + // To obtain a generated host name, call GetHostNameSuggestion, which returns + // a host name based on the current theme. + HostnameTheme *string `type:"string"` + + // The stack's new name. + Name *string `type:"string"` + + // Do not use this parameter. You cannot update a stack's service role. + ServiceRoleArn *string `type:"string"` + + // The stack ID. + StackId *string `type:"string" required:"true"` + + // Whether the stack uses custom cookbooks. + UseCustomCookbooks *bool `type:"boolean"` + + // Whether to associate the AWS OpsWorks built-in security groups with the stack's + // layers. + // + // AWS OpsWorks provides a standard set of built-in security groups, one for + // each layer, which are associated with layers by default. UseOpsworksSecurityGroups + // allows you to provide your own custom security groups instead of using the + // built-in groups. UseOpsworksSecurityGroups has the following settings: + // + // True - AWS OpsWorks automatically associates the appropriate built-in security + // group with each layer (default setting). You can associate additional security + // groups with a layer after you create it, but you cannot delete the built-in + // security group. False - AWS OpsWorks does not associate built-in security + // groups with layers. You must create appropriate EC2 security groups and associate + // a security group with each layer that you create. However, you can still + // manually associate a built-in security group with a layer on. Custom security + // groups are required only for those layers that need custom settings. For + // more information, see Create a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + UseOpsworksSecurityGroups *bool `type:"boolean"` +} + +// String returns the string representation +func (s UpdateStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateStackInput) GoString() string { + return s.String() +} + +type UpdateStackOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateStackOutput) GoString() string { + return s.String() +} + +type UpdateUserProfileInput struct { + _ struct{} `type:"structure"` + + // Whether users can specify their own SSH public key through the My Settings + // page. For more information, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/security-settingsshkey.html). + AllowSelfManagement *bool `type:"boolean"` + + // The user IAM ARN. + IamUserArn *string `type:"string" required:"true"` + + // The user's new SSH public key. + SshPublicKey *string `type:"string"` + + // The user's SSH user name. The allowable characters are [a-z], [A-Z], [0-9], + // '-', and '_'. If the specified name includes other punctuation marks, AWS + // OpsWorks removes them. For example, my.name will be changed to myname. If + // you do not specify an SSH user name, AWS OpsWorks generates one from the + // IAM user name. + SshUsername *string `type:"string"` +} + +// String returns the string representation +func (s UpdateUserProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateUserProfileInput) GoString() string { + return s.String() +} + +type UpdateUserProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateUserProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateUserProfileOutput) GoString() string { + return s.String() +} + +type UpdateVolumeInput struct { + _ struct{} `type:"structure"` + + // The new mount point. + MountPoint *string `type:"string"` + + // The new name. + Name *string `type:"string"` + + // The volume ID. + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateVolumeInput) GoString() string { + return s.String() +} + +type UpdateVolumeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateVolumeOutput) GoString() string { + return s.String() +} + +// Describes a user's SSH information. +type UserProfile struct { + _ struct{} `type:"structure"` + + // Whether users can specify their own SSH public key through the My Settings + // page. For more information, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/security-settingsshkey.html). + AllowSelfManagement *bool `type:"boolean"` + + // The user's IAM ARN. + IamUserArn *string `type:"string"` + + // The user's name. + Name *string `type:"string"` + + // The user's SSH public key. + SshPublicKey *string `type:"string"` + + // The user's SSH user name. + SshUsername *string `type:"string"` +} + +// String returns the string representation +func (s UserProfile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserProfile) GoString() string { + return s.String() +} + +// Describes an instance's Amazon EBS volume. +type Volume struct { + _ struct{} `type:"structure"` + + // The volume Availability Zone. For more information, see Regions and Endpoints + // (http://docs.aws.amazon.com/general/latest/gr/rande.html). + AvailabilityZone *string `type:"string"` + + // The device name. + Device *string `type:"string"` + + // The Amazon EC2 volume ID. + Ec2VolumeId *string `type:"string"` + + // The instance ID. + InstanceId *string `type:"string"` + + // For PIOPS volumes, the IOPS per disk. + Iops *int64 `type:"integer"` + + // The volume mount point. For example "/dev/sdh". + MountPoint *string `type:"string"` + + // The volume name. + Name *string `type:"string"` + + // The RAID array ID. + RaidArrayId *string `type:"string"` + + // The AWS region. For more information about AWS regions, see Regions and Endpoints + // (http://docs.aws.amazon.com/general/latest/gr/rande.html). + Region *string `type:"string"` + + // The volume size. + Size *int64 `type:"integer"` + + // The value returned by DescribeVolumes (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeVolumes.html). + Status *string `type:"string"` + + // The volume ID. + VolumeId *string `type:"string"` + + // The volume type, standard or PIOPS. + VolumeType *string `type:"string"` +} + +// String returns the string representation +func (s Volume) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Volume) GoString() string { + return s.String() +} + +// Describes an Amazon EBS volume configuration. +type VolumeConfiguration struct { + _ struct{} `type:"structure"` + + // For PIOPS volumes, the IOPS per disk. + Iops *int64 `type:"integer"` + + // The volume mount point. For example "/dev/sdh". + MountPoint *string `type:"string" required:"true"` + + // The number of disks in the volume. + NumberOfDisks *int64 `type:"integer" required:"true"` + + // The volume RAID level (http://en.wikipedia.org/wiki/Standard_RAID_levels). + RaidLevel *int64 `type:"integer"` + + // The volume size. + Size *int64 `type:"integer" required:"true"` + + // The volume type: + // + // standard - Magnetic io1 - Provisioned IOPS (SSD) gp2 - General Purpose + // (SSD) + VolumeType *string `type:"string"` +} + +// String returns the string representation +func (s VolumeConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeConfiguration) GoString() string { + return s.String() +} + +// Describes a time-based instance's auto scaling schedule. The schedule consists +// of a set of key-value pairs. +// +// The key is the time period (a UTC hour) and must be an integer from 0 - +// 23. The value indicates whether the instance should be online or offline +// for the specified period, and must be set to "on" or "off" The default setting +// for all time periods is off, so you use the following parameters primarily +// to specify the online periods. You don't have to explicitly specify offline +// periods unless you want to change an online period to an offline period. +// +// The following example specifies that the instance should be online for four +// hours, from UTC 1200 - 1600. It will be off for the remainder of the day. +// +// { "12":"on", "13":"on", "14":"on", "15":"on" } +type WeeklyAutoScalingSchedule struct { + _ struct{} `type:"structure"` + + // The schedule for Friday. + Friday map[string]*string `type:"map"` + + // The schedule for Monday. + Monday map[string]*string `type:"map"` + + // The schedule for Saturday. + Saturday map[string]*string `type:"map"` + + // The schedule for Sunday. + Sunday map[string]*string `type:"map"` + + // The schedule for Thursday. + Thursday map[string]*string `type:"map"` + + // The schedule for Tuesday. + Tuesday map[string]*string `type:"map"` + + // The schedule for Wednesday. + Wednesday map[string]*string `type:"map"` +} + +// String returns the string representation +func (s WeeklyAutoScalingSchedule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WeeklyAutoScalingSchedule) GoString() string { + return s.String() +} + +const ( + // @enum AppAttributesKeys + AppAttributesKeysDocumentRoot = "DocumentRoot" + // @enum AppAttributesKeys + AppAttributesKeysRailsEnv = "RailsEnv" + // @enum AppAttributesKeys + AppAttributesKeysAutoBundleOnDeploy = "AutoBundleOnDeploy" + // @enum AppAttributesKeys + AppAttributesKeysAwsFlowRubySettings = "AwsFlowRubySettings" +) + +const ( + // @enum AppType + AppTypeAwsFlowRuby = "aws-flow-ruby" + // @enum AppType + AppTypeJava = "java" + // @enum AppType + AppTypeRails = "rails" + // @enum AppType + AppTypePhp = "php" + // @enum AppType + AppTypeNodejs = "nodejs" + // @enum AppType + AppTypeStatic = "static" + // @enum AppType + AppTypeOther = "other" +) + +const ( + // @enum Architecture + ArchitectureX8664 = "x86_64" + // @enum Architecture + ArchitectureI386 = "i386" +) + +const ( + // @enum AutoScalingType + AutoScalingTypeLoad = "load" + // @enum AutoScalingType + AutoScalingTypeTimer = "timer" +) + +const ( + // @enum DeploymentCommandName + DeploymentCommandNameInstallDependencies = "install_dependencies" + // @enum DeploymentCommandName + DeploymentCommandNameUpdateDependencies = "update_dependencies" + // @enum DeploymentCommandName + DeploymentCommandNameUpdateCustomCookbooks = "update_custom_cookbooks" + // @enum DeploymentCommandName + DeploymentCommandNameExecuteRecipes = "execute_recipes" + // @enum DeploymentCommandName + DeploymentCommandNameConfigure = "configure" + // @enum DeploymentCommandName + DeploymentCommandNameSetup = "setup" + // @enum DeploymentCommandName + DeploymentCommandNameDeploy = "deploy" + // @enum DeploymentCommandName + DeploymentCommandNameRollback = "rollback" + // @enum DeploymentCommandName + DeploymentCommandNameStart = "start" + // @enum DeploymentCommandName + DeploymentCommandNameStop = "stop" + // @enum DeploymentCommandName + DeploymentCommandNameRestart = "restart" + // @enum DeploymentCommandName + DeploymentCommandNameUndeploy = "undeploy" +) + +const ( + // @enum LayerAttributesKeys + LayerAttributesKeysEcsClusterArn = "EcsClusterArn" + // @enum LayerAttributesKeys + LayerAttributesKeysEnableHaproxyStats = "EnableHaproxyStats" + // @enum LayerAttributesKeys + LayerAttributesKeysHaproxyStatsUrl = "HaproxyStatsUrl" + // @enum LayerAttributesKeys + LayerAttributesKeysHaproxyStatsUser = "HaproxyStatsUser" + // @enum LayerAttributesKeys + LayerAttributesKeysHaproxyStatsPassword = "HaproxyStatsPassword" + // @enum LayerAttributesKeys + LayerAttributesKeysHaproxyHealthCheckUrl = "HaproxyHealthCheckUrl" + // @enum LayerAttributesKeys + LayerAttributesKeysHaproxyHealthCheckMethod = "HaproxyHealthCheckMethod" + // @enum LayerAttributesKeys + LayerAttributesKeysMysqlRootPassword = "MysqlRootPassword" + // @enum LayerAttributesKeys + LayerAttributesKeysMysqlRootPasswordUbiquitous = "MysqlRootPasswordUbiquitous" + // @enum LayerAttributesKeys + LayerAttributesKeysGangliaUrl = "GangliaUrl" + // @enum LayerAttributesKeys + LayerAttributesKeysGangliaUser = "GangliaUser" + // @enum LayerAttributesKeys + LayerAttributesKeysGangliaPassword = "GangliaPassword" + // @enum LayerAttributesKeys + LayerAttributesKeysMemcachedMemory = "MemcachedMemory" + // @enum LayerAttributesKeys + LayerAttributesKeysNodejsVersion = "NodejsVersion" + // @enum LayerAttributesKeys + LayerAttributesKeysRubyVersion = "RubyVersion" + // @enum LayerAttributesKeys + LayerAttributesKeysRubygemsVersion = "RubygemsVersion" + // @enum LayerAttributesKeys + LayerAttributesKeysManageBundler = "ManageBundler" + // @enum LayerAttributesKeys + LayerAttributesKeysBundlerVersion = "BundlerVersion" + // @enum LayerAttributesKeys + LayerAttributesKeysRailsStack = "RailsStack" + // @enum LayerAttributesKeys + LayerAttributesKeysPassengerVersion = "PassengerVersion" + // @enum LayerAttributesKeys + LayerAttributesKeysJvm = "Jvm" + // @enum LayerAttributesKeys + LayerAttributesKeysJvmVersion = "JvmVersion" + // @enum LayerAttributesKeys + LayerAttributesKeysJvmOptions = "JvmOptions" + // @enum LayerAttributesKeys + LayerAttributesKeysJavaAppServer = "JavaAppServer" + // @enum LayerAttributesKeys + LayerAttributesKeysJavaAppServerVersion = "JavaAppServerVersion" +) + +const ( + // @enum LayerType + LayerTypeAwsFlowRuby = "aws-flow-ruby" + // @enum LayerType + LayerTypeEcsCluster = "ecs-cluster" + // @enum LayerType + LayerTypeJavaApp = "java-app" + // @enum LayerType + LayerTypeLb = "lb" + // @enum LayerType + LayerTypeWeb = "web" + // @enum LayerType + LayerTypePhpApp = "php-app" + // @enum LayerType + LayerTypeRailsApp = "rails-app" + // @enum LayerType + LayerTypeNodejsApp = "nodejs-app" + // @enum LayerType + LayerTypeMemcached = "memcached" + // @enum LayerType + LayerTypeDbMaster = "db-master" + // @enum LayerType + LayerTypeMonitoringMaster = "monitoring-master" + // @enum LayerType + LayerTypeCustom = "custom" +) + +const ( + // @enum RootDeviceType + RootDeviceTypeEbs = "ebs" + // @enum RootDeviceType + RootDeviceTypeInstanceStore = "instance-store" +) + +const ( + // @enum SourceType + SourceTypeGit = "git" + // @enum SourceType + SourceTypeSvn = "svn" + // @enum SourceType + SourceTypeArchive = "archive" + // @enum SourceType + SourceTypeS3 = "s3" +) + +const ( + // @enum StackAttributesKeys + StackAttributesKeysColor = "Color" +) + +const ( + // @enum VirtualizationType + VirtualizationTypeParavirtual = "paravirtual" + // @enum VirtualizationType + VirtualizationTypeHvm = "hvm" +) + +const ( + // @enum VolumeType + VolumeTypeGp2 = "gp2" + // @enum VolumeType + VolumeTypeIo1 = "io1" + // @enum VolumeType + VolumeTypeStandard = "standard" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/opsworks/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/opsworks/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/opsworks/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/opsworks/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1890 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package opsworks_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/opsworks" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleOpsWorks_AssignInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.AssignInstanceInput{ + InstanceId: aws.String("String"), // Required + LayerIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.AssignInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_AssignVolume() { + svc := opsworks.New(session.New()) + + params := &opsworks.AssignVolumeInput{ + VolumeId: aws.String("String"), // Required + InstanceId: aws.String("String"), + } + resp, err := svc.AssignVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_AssociateElasticIp() { + svc := opsworks.New(session.New()) + + params := &opsworks.AssociateElasticIpInput{ + ElasticIp: aws.String("String"), // Required + InstanceId: aws.String("String"), + } + resp, err := svc.AssociateElasticIp(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_AttachElasticLoadBalancer() { + svc := opsworks.New(session.New()) + + params := &opsworks.AttachElasticLoadBalancerInput{ + ElasticLoadBalancerName: aws.String("String"), // Required + LayerId: aws.String("String"), // Required + } + resp, err := svc.AttachElasticLoadBalancer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_CloneStack() { + svc := opsworks.New(session.New()) + + params := &opsworks.CloneStackInput{ + ServiceRoleArn: aws.String("String"), // Required + SourceStackId: aws.String("String"), // Required + AgentVersion: aws.String("String"), + Attributes: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + ChefConfiguration: &opsworks.ChefConfiguration{ + BerkshelfVersion: aws.String("String"), + ManageBerkshelf: aws.Bool(true), + }, + CloneAppIds: []*string{ + aws.String("String"), // Required + // More values... + }, + ClonePermissions: aws.Bool(true), + ConfigurationManager: &opsworks.StackConfigurationManager{ + Name: aws.String("String"), + Version: aws.String("String"), + }, + CustomCookbooksSource: &opsworks.Source{ + Password: aws.String("String"), + Revision: aws.String("String"), + SshKey: aws.String("String"), + Type: aws.String("SourceType"), + Url: aws.String("String"), + Username: aws.String("String"), + }, + CustomJson: aws.String("String"), + DefaultAvailabilityZone: aws.String("String"), + DefaultInstanceProfileArn: aws.String("String"), + DefaultOs: aws.String("String"), + DefaultRootDeviceType: aws.String("RootDeviceType"), + DefaultSshKeyName: aws.String("String"), + DefaultSubnetId: aws.String("String"), + HostnameTheme: aws.String("String"), + Name: aws.String("String"), + Region: aws.String("String"), + UseCustomCookbooks: aws.Bool(true), + UseOpsworksSecurityGroups: aws.Bool(true), + VpcId: aws.String("String"), + } + resp, err := svc.CloneStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_CreateApp() { + svc := opsworks.New(session.New()) + + params := &opsworks.CreateAppInput{ + Name: aws.String("String"), // Required + StackId: aws.String("String"), // Required + Type: aws.String("AppType"), // Required + AppSource: &opsworks.Source{ + Password: aws.String("String"), + Revision: aws.String("String"), + SshKey: aws.String("String"), + Type: aws.String("SourceType"), + Url: aws.String("String"), + Username: aws.String("String"), + }, + Attributes: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + DataSources: []*opsworks.DataSource{ + { // Required + Arn: aws.String("String"), + DatabaseName: aws.String("String"), + Type: aws.String("String"), + }, + // More values... + }, + Description: aws.String("String"), + Domains: []*string{ + aws.String("String"), // Required + // More values... + }, + EnableSsl: aws.Bool(true), + Environment: []*opsworks.EnvironmentVariable{ + { // Required + Key: aws.String("String"), // Required + Value: aws.String("String"), // Required + Secure: aws.Bool(true), + }, + // More values... + }, + Shortname: aws.String("String"), + SslConfiguration: &opsworks.SslConfiguration{ + Certificate: aws.String("String"), // Required + PrivateKey: aws.String("String"), // Required + Chain: aws.String("String"), + }, + } + resp, err := svc.CreateApp(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_CreateDeployment() { + svc := opsworks.New(session.New()) + + params := &opsworks.CreateDeploymentInput{ + Command: &opsworks.DeploymentCommand{ // Required + Name: aws.String("DeploymentCommandName"), // Required + Args: map[string][]*string{ + "Key": { // Required + aws.String("String"), // Required + // More values... + }, + // More values... + }, + }, + StackId: aws.String("String"), // Required + AppId: aws.String("String"), + Comment: aws.String("String"), + CustomJson: aws.String("String"), + InstanceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateDeployment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_CreateInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.CreateInstanceInput{ + InstanceType: aws.String("String"), // Required + LayerIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + StackId: aws.String("String"), // Required + AgentVersion: aws.String("String"), + AmiId: aws.String("String"), + Architecture: aws.String("Architecture"), + AutoScalingType: aws.String("AutoScalingType"), + AvailabilityZone: aws.String("String"), + BlockDeviceMappings: []*opsworks.BlockDeviceMapping{ + { // Required + DeviceName: aws.String("String"), + Ebs: &opsworks.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(true), + Iops: aws.Int64(1), + SnapshotId: aws.String("String"), + VolumeSize: aws.Int64(1), + VolumeType: aws.String("VolumeType"), + }, + NoDevice: aws.String("String"), + VirtualName: aws.String("String"), + }, + // More values... + }, + EbsOptimized: aws.Bool(true), + Hostname: aws.String("String"), + InstallUpdatesOnBoot: aws.Bool(true), + Os: aws.String("String"), + RootDeviceType: aws.String("RootDeviceType"), + SshKeyName: aws.String("String"), + SubnetId: aws.String("String"), + VirtualizationType: aws.String("String"), + } + resp, err := svc.CreateInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_CreateLayer() { + svc := opsworks.New(session.New()) + + params := &opsworks.CreateLayerInput{ + Name: aws.String("String"), // Required + Shortname: aws.String("String"), // Required + StackId: aws.String("String"), // Required + Type: aws.String("LayerType"), // Required + Attributes: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + AutoAssignElasticIps: aws.Bool(true), + AutoAssignPublicIps: aws.Bool(true), + CustomInstanceProfileArn: aws.String("String"), + CustomJson: aws.String("String"), + CustomRecipes: &opsworks.Recipes{ + Configure: []*string{ + aws.String("String"), // Required + // More values... + }, + Deploy: []*string{ + aws.String("String"), // Required + // More values... + }, + Setup: []*string{ + aws.String("String"), // Required + // More values... + }, + Shutdown: []*string{ + aws.String("String"), // Required + // More values... + }, + Undeploy: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + CustomSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + EnableAutoHealing: aws.Bool(true), + InstallUpdatesOnBoot: aws.Bool(true), + LifecycleEventConfiguration: &opsworks.LifecycleEventConfiguration{ + Shutdown: &opsworks.ShutdownEventConfiguration{ + DelayUntilElbConnectionsDrained: aws.Bool(true), + ExecutionTimeout: aws.Int64(1), + }, + }, + Packages: []*string{ + aws.String("String"), // Required + // More values... + }, + UseEbsOptimizedInstances: aws.Bool(true), + VolumeConfigurations: []*opsworks.VolumeConfiguration{ + { // Required + MountPoint: aws.String("String"), // Required + NumberOfDisks: aws.Int64(1), // Required + Size: aws.Int64(1), // Required + Iops: aws.Int64(1), + RaidLevel: aws.Int64(1), + VolumeType: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateLayer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_CreateStack() { + svc := opsworks.New(session.New()) + + params := &opsworks.CreateStackInput{ + DefaultInstanceProfileArn: aws.String("String"), // Required + Name: aws.String("String"), // Required + Region: aws.String("String"), // Required + ServiceRoleArn: aws.String("String"), // Required + AgentVersion: aws.String("String"), + Attributes: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + ChefConfiguration: &opsworks.ChefConfiguration{ + BerkshelfVersion: aws.String("String"), + ManageBerkshelf: aws.Bool(true), + }, + ConfigurationManager: &opsworks.StackConfigurationManager{ + Name: aws.String("String"), + Version: aws.String("String"), + }, + CustomCookbooksSource: &opsworks.Source{ + Password: aws.String("String"), + Revision: aws.String("String"), + SshKey: aws.String("String"), + Type: aws.String("SourceType"), + Url: aws.String("String"), + Username: aws.String("String"), + }, + CustomJson: aws.String("String"), + DefaultAvailabilityZone: aws.String("String"), + DefaultOs: aws.String("String"), + DefaultRootDeviceType: aws.String("RootDeviceType"), + DefaultSshKeyName: aws.String("String"), + DefaultSubnetId: aws.String("String"), + HostnameTheme: aws.String("String"), + UseCustomCookbooks: aws.Bool(true), + UseOpsworksSecurityGroups: aws.Bool(true), + VpcId: aws.String("String"), + } + resp, err := svc.CreateStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_CreateUserProfile() { + svc := opsworks.New(session.New()) + + params := &opsworks.CreateUserProfileInput{ + IamUserArn: aws.String("String"), // Required + AllowSelfManagement: aws.Bool(true), + SshPublicKey: aws.String("String"), + SshUsername: aws.String("String"), + } + resp, err := svc.CreateUserProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeleteApp() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeleteAppInput{ + AppId: aws.String("String"), // Required + } + resp, err := svc.DeleteApp(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeleteInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeleteInstanceInput{ + InstanceId: aws.String("String"), // Required + DeleteElasticIp: aws.Bool(true), + DeleteVolumes: aws.Bool(true), + } + resp, err := svc.DeleteInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeleteLayer() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeleteLayerInput{ + LayerId: aws.String("String"), // Required + } + resp, err := svc.DeleteLayer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeleteStack() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeleteStackInput{ + StackId: aws.String("String"), // Required + } + resp, err := svc.DeleteStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeleteUserProfile() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeleteUserProfileInput{ + IamUserArn: aws.String("String"), // Required + } + resp, err := svc.DeleteUserProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeregisterEcsCluster() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeregisterEcsClusterInput{ + EcsClusterArn: aws.String("String"), // Required + } + resp, err := svc.DeregisterEcsCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeregisterElasticIp() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeregisterElasticIpInput{ + ElasticIp: aws.String("String"), // Required + } + resp, err := svc.DeregisterElasticIp(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeregisterInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeregisterInstanceInput{ + InstanceId: aws.String("String"), // Required + } + resp, err := svc.DeregisterInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeregisterRdsDbInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeregisterRdsDbInstanceInput{ + RdsDbInstanceArn: aws.String("String"), // Required + } + resp, err := svc.DeregisterRdsDbInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DeregisterVolume() { + svc := opsworks.New(session.New()) + + params := &opsworks.DeregisterVolumeInput{ + VolumeId: aws.String("String"), // Required + } + resp, err := svc.DeregisterVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeAgentVersions() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeAgentVersionsInput{ + ConfigurationManager: &opsworks.StackConfigurationManager{ + Name: aws.String("String"), + Version: aws.String("String"), + }, + StackId: aws.String("String"), + } + resp, err := svc.DescribeAgentVersions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeApps() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeAppsInput{ + AppIds: []*string{ + aws.String("String"), // Required + // More values... + }, + StackId: aws.String("String"), + } + resp, err := svc.DescribeApps(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeCommands() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeCommandsInput{ + CommandIds: []*string{ + aws.String("String"), // Required + // More values... + }, + DeploymentId: aws.String("String"), + InstanceId: aws.String("String"), + } + resp, err := svc.DescribeCommands(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeDeployments() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeDeploymentsInput{ + AppId: aws.String("String"), + DeploymentIds: []*string{ + aws.String("String"), // Required + // More values... + }, + StackId: aws.String("String"), + } + resp, err := svc.DescribeDeployments(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeEcsClusters() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeEcsClustersInput{ + EcsClusterArns: []*string{ + aws.String("String"), // Required + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("String"), + StackId: aws.String("String"), + } + resp, err := svc.DescribeEcsClusters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeElasticIps() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeElasticIpsInput{ + InstanceId: aws.String("String"), + Ips: []*string{ + aws.String("String"), // Required + // More values... + }, + StackId: aws.String("String"), + } + resp, err := svc.DescribeElasticIps(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeElasticLoadBalancers() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeElasticLoadBalancersInput{ + LayerIds: []*string{ + aws.String("String"), // Required + // More values... + }, + StackId: aws.String("String"), + } + resp, err := svc.DescribeElasticLoadBalancers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeInstances() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeInstancesInput{ + InstanceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + LayerId: aws.String("String"), + StackId: aws.String("String"), + } + resp, err := svc.DescribeInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeLayers() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeLayersInput{ + LayerIds: []*string{ + aws.String("String"), // Required + // More values... + }, + StackId: aws.String("String"), + } + resp, err := svc.DescribeLayers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeLoadBasedAutoScaling() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeLoadBasedAutoScalingInput{ + LayerIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeLoadBasedAutoScaling(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeMyUserProfile() { + svc := opsworks.New(session.New()) + + var params *opsworks.DescribeMyUserProfileInput + resp, err := svc.DescribeMyUserProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribePermissions() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribePermissionsInput{ + IamUserArn: aws.String("String"), + StackId: aws.String("String"), + } + resp, err := svc.DescribePermissions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeRaidArrays() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeRaidArraysInput{ + InstanceId: aws.String("String"), + RaidArrayIds: []*string{ + aws.String("String"), // Required + // More values... + }, + StackId: aws.String("String"), + } + resp, err := svc.DescribeRaidArrays(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeRdsDbInstances() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeRdsDbInstancesInput{ + StackId: aws.String("String"), // Required + RdsDbInstanceArns: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeRdsDbInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeServiceErrors() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeServiceErrorsInput{ + InstanceId: aws.String("String"), + ServiceErrorIds: []*string{ + aws.String("String"), // Required + // More values... + }, + StackId: aws.String("String"), + } + resp, err := svc.DescribeServiceErrors(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeStackProvisioningParameters() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeStackProvisioningParametersInput{ + StackId: aws.String("String"), // Required + } + resp, err := svc.DescribeStackProvisioningParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeStackSummary() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeStackSummaryInput{ + StackId: aws.String("String"), // Required + } + resp, err := svc.DescribeStackSummary(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeStacks() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeStacksInput{ + StackIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeStacks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeTimeBasedAutoScaling() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeTimeBasedAutoScalingInput{ + InstanceIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeTimeBasedAutoScaling(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeUserProfiles() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeUserProfilesInput{ + IamUserArns: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeUserProfiles(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DescribeVolumes() { + svc := opsworks.New(session.New()) + + params := &opsworks.DescribeVolumesInput{ + InstanceId: aws.String("String"), + RaidArrayId: aws.String("String"), + StackId: aws.String("String"), + VolumeIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeVolumes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DetachElasticLoadBalancer() { + svc := opsworks.New(session.New()) + + params := &opsworks.DetachElasticLoadBalancerInput{ + ElasticLoadBalancerName: aws.String("String"), // Required + LayerId: aws.String("String"), // Required + } + resp, err := svc.DetachElasticLoadBalancer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_DisassociateElasticIp() { + svc := opsworks.New(session.New()) + + params := &opsworks.DisassociateElasticIpInput{ + ElasticIp: aws.String("String"), // Required + } + resp, err := svc.DisassociateElasticIp(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_GetHostnameSuggestion() { + svc := opsworks.New(session.New()) + + params := &opsworks.GetHostnameSuggestionInput{ + LayerId: aws.String("String"), // Required + } + resp, err := svc.GetHostnameSuggestion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_GrantAccess() { + svc := opsworks.New(session.New()) + + params := &opsworks.GrantAccessInput{ + InstanceId: aws.String("String"), // Required + ValidForInMinutes: aws.Int64(1), + } + resp, err := svc.GrantAccess(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_RebootInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.RebootInstanceInput{ + InstanceId: aws.String("String"), // Required + } + resp, err := svc.RebootInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_RegisterEcsCluster() { + svc := opsworks.New(session.New()) + + params := &opsworks.RegisterEcsClusterInput{ + EcsClusterArn: aws.String("String"), // Required + StackId: aws.String("String"), // Required + } + resp, err := svc.RegisterEcsCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_RegisterElasticIp() { + svc := opsworks.New(session.New()) + + params := &opsworks.RegisterElasticIpInput{ + ElasticIp: aws.String("String"), // Required + StackId: aws.String("String"), // Required + } + resp, err := svc.RegisterElasticIp(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_RegisterInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.RegisterInstanceInput{ + StackId: aws.String("String"), // Required + Hostname: aws.String("String"), + InstanceIdentity: &opsworks.InstanceIdentity{ + Document: aws.String("String"), + Signature: aws.String("String"), + }, + PrivateIp: aws.String("String"), + PublicIp: aws.String("String"), + RsaPublicKey: aws.String("String"), + RsaPublicKeyFingerprint: aws.String("String"), + } + resp, err := svc.RegisterInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_RegisterRdsDbInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.RegisterRdsDbInstanceInput{ + DbPassword: aws.String("String"), // Required + DbUser: aws.String("String"), // Required + RdsDbInstanceArn: aws.String("String"), // Required + StackId: aws.String("String"), // Required + } + resp, err := svc.RegisterRdsDbInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_RegisterVolume() { + svc := opsworks.New(session.New()) + + params := &opsworks.RegisterVolumeInput{ + StackId: aws.String("String"), // Required + Ec2VolumeId: aws.String("String"), + } + resp, err := svc.RegisterVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_SetLoadBasedAutoScaling() { + svc := opsworks.New(session.New()) + + params := &opsworks.SetLoadBasedAutoScalingInput{ + LayerId: aws.String("String"), // Required + DownScaling: &opsworks.AutoScalingThresholds{ + Alarms: []*string{ + aws.String("String"), // Required + // More values... + }, + CpuThreshold: aws.Float64(1.0), + IgnoreMetricsTime: aws.Int64(1), + InstanceCount: aws.Int64(1), + LoadThreshold: aws.Float64(1.0), + MemoryThreshold: aws.Float64(1.0), + ThresholdsWaitTime: aws.Int64(1), + }, + Enable: aws.Bool(true), + UpScaling: &opsworks.AutoScalingThresholds{ + Alarms: []*string{ + aws.String("String"), // Required + // More values... + }, + CpuThreshold: aws.Float64(1.0), + IgnoreMetricsTime: aws.Int64(1), + InstanceCount: aws.Int64(1), + LoadThreshold: aws.Float64(1.0), + MemoryThreshold: aws.Float64(1.0), + ThresholdsWaitTime: aws.Int64(1), + }, + } + resp, err := svc.SetLoadBasedAutoScaling(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_SetPermission() { + svc := opsworks.New(session.New()) + + params := &opsworks.SetPermissionInput{ + IamUserArn: aws.String("String"), // Required + StackId: aws.String("String"), // Required + AllowSsh: aws.Bool(true), + AllowSudo: aws.Bool(true), + Level: aws.String("String"), + } + resp, err := svc.SetPermission(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_SetTimeBasedAutoScaling() { + svc := opsworks.New(session.New()) + + params := &opsworks.SetTimeBasedAutoScalingInput{ + InstanceId: aws.String("String"), // Required + AutoScalingSchedule: &opsworks.WeeklyAutoScalingSchedule{ + Friday: map[string]*string{ + "Key": aws.String("Switch"), // Required + // More values... + }, + Monday: map[string]*string{ + "Key": aws.String("Switch"), // Required + // More values... + }, + Saturday: map[string]*string{ + "Key": aws.String("Switch"), // Required + // More values... + }, + Sunday: map[string]*string{ + "Key": aws.String("Switch"), // Required + // More values... + }, + Thursday: map[string]*string{ + "Key": aws.String("Switch"), // Required + // More values... + }, + Tuesday: map[string]*string{ + "Key": aws.String("Switch"), // Required + // More values... + }, + Wednesday: map[string]*string{ + "Key": aws.String("Switch"), // Required + // More values... + }, + }, + } + resp, err := svc.SetTimeBasedAutoScaling(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_StartInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.StartInstanceInput{ + InstanceId: aws.String("String"), // Required + } + resp, err := svc.StartInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_StartStack() { + svc := opsworks.New(session.New()) + + params := &opsworks.StartStackInput{ + StackId: aws.String("String"), // Required + } + resp, err := svc.StartStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_StopInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.StopInstanceInput{ + InstanceId: aws.String("String"), // Required + } + resp, err := svc.StopInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_StopStack() { + svc := opsworks.New(session.New()) + + params := &opsworks.StopStackInput{ + StackId: aws.String("String"), // Required + } + resp, err := svc.StopStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UnassignInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.UnassignInstanceInput{ + InstanceId: aws.String("String"), // Required + } + resp, err := svc.UnassignInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UnassignVolume() { + svc := opsworks.New(session.New()) + + params := &opsworks.UnassignVolumeInput{ + VolumeId: aws.String("String"), // Required + } + resp, err := svc.UnassignVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UpdateApp() { + svc := opsworks.New(session.New()) + + params := &opsworks.UpdateAppInput{ + AppId: aws.String("String"), // Required + AppSource: &opsworks.Source{ + Password: aws.String("String"), + Revision: aws.String("String"), + SshKey: aws.String("String"), + Type: aws.String("SourceType"), + Url: aws.String("String"), + Username: aws.String("String"), + }, + Attributes: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + DataSources: []*opsworks.DataSource{ + { // Required + Arn: aws.String("String"), + DatabaseName: aws.String("String"), + Type: aws.String("String"), + }, + // More values... + }, + Description: aws.String("String"), + Domains: []*string{ + aws.String("String"), // Required + // More values... + }, + EnableSsl: aws.Bool(true), + Environment: []*opsworks.EnvironmentVariable{ + { // Required + Key: aws.String("String"), // Required + Value: aws.String("String"), // Required + Secure: aws.Bool(true), + }, + // More values... + }, + Name: aws.String("String"), + SslConfiguration: &opsworks.SslConfiguration{ + Certificate: aws.String("String"), // Required + PrivateKey: aws.String("String"), // Required + Chain: aws.String("String"), + }, + Type: aws.String("AppType"), + } + resp, err := svc.UpdateApp(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UpdateElasticIp() { + svc := opsworks.New(session.New()) + + params := &opsworks.UpdateElasticIpInput{ + ElasticIp: aws.String("String"), // Required + Name: aws.String("String"), + } + resp, err := svc.UpdateElasticIp(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UpdateInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.UpdateInstanceInput{ + InstanceId: aws.String("String"), // Required + AgentVersion: aws.String("String"), + AmiId: aws.String("String"), + Architecture: aws.String("Architecture"), + AutoScalingType: aws.String("AutoScalingType"), + EbsOptimized: aws.Bool(true), + Hostname: aws.String("String"), + InstallUpdatesOnBoot: aws.Bool(true), + InstanceType: aws.String("String"), + LayerIds: []*string{ + aws.String("String"), // Required + // More values... + }, + Os: aws.String("String"), + SshKeyName: aws.String("String"), + } + resp, err := svc.UpdateInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UpdateLayer() { + svc := opsworks.New(session.New()) + + params := &opsworks.UpdateLayerInput{ + LayerId: aws.String("String"), // Required + Attributes: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + AutoAssignElasticIps: aws.Bool(true), + AutoAssignPublicIps: aws.Bool(true), + CustomInstanceProfileArn: aws.String("String"), + CustomJson: aws.String("String"), + CustomRecipes: &opsworks.Recipes{ + Configure: []*string{ + aws.String("String"), // Required + // More values... + }, + Deploy: []*string{ + aws.String("String"), // Required + // More values... + }, + Setup: []*string{ + aws.String("String"), // Required + // More values... + }, + Shutdown: []*string{ + aws.String("String"), // Required + // More values... + }, + Undeploy: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + CustomSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + EnableAutoHealing: aws.Bool(true), + InstallUpdatesOnBoot: aws.Bool(true), + LifecycleEventConfiguration: &opsworks.LifecycleEventConfiguration{ + Shutdown: &opsworks.ShutdownEventConfiguration{ + DelayUntilElbConnectionsDrained: aws.Bool(true), + ExecutionTimeout: aws.Int64(1), + }, + }, + Name: aws.String("String"), + Packages: []*string{ + aws.String("String"), // Required + // More values... + }, + Shortname: aws.String("String"), + UseEbsOptimizedInstances: aws.Bool(true), + VolumeConfigurations: []*opsworks.VolumeConfiguration{ + { // Required + MountPoint: aws.String("String"), // Required + NumberOfDisks: aws.Int64(1), // Required + Size: aws.Int64(1), // Required + Iops: aws.Int64(1), + RaidLevel: aws.Int64(1), + VolumeType: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.UpdateLayer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UpdateMyUserProfile() { + svc := opsworks.New(session.New()) + + params := &opsworks.UpdateMyUserProfileInput{ + SshPublicKey: aws.String("String"), + } + resp, err := svc.UpdateMyUserProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UpdateRdsDbInstance() { + svc := opsworks.New(session.New()) + + params := &opsworks.UpdateRdsDbInstanceInput{ + RdsDbInstanceArn: aws.String("String"), // Required + DbPassword: aws.String("String"), + DbUser: aws.String("String"), + } + resp, err := svc.UpdateRdsDbInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UpdateStack() { + svc := opsworks.New(session.New()) + + params := &opsworks.UpdateStackInput{ + StackId: aws.String("String"), // Required + AgentVersion: aws.String("String"), + Attributes: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + ChefConfiguration: &opsworks.ChefConfiguration{ + BerkshelfVersion: aws.String("String"), + ManageBerkshelf: aws.Bool(true), + }, + ConfigurationManager: &opsworks.StackConfigurationManager{ + Name: aws.String("String"), + Version: aws.String("String"), + }, + CustomCookbooksSource: &opsworks.Source{ + Password: aws.String("String"), + Revision: aws.String("String"), + SshKey: aws.String("String"), + Type: aws.String("SourceType"), + Url: aws.String("String"), + Username: aws.String("String"), + }, + CustomJson: aws.String("String"), + DefaultAvailabilityZone: aws.String("String"), + DefaultInstanceProfileArn: aws.String("String"), + DefaultOs: aws.String("String"), + DefaultRootDeviceType: aws.String("RootDeviceType"), + DefaultSshKeyName: aws.String("String"), + DefaultSubnetId: aws.String("String"), + HostnameTheme: aws.String("String"), + Name: aws.String("String"), + ServiceRoleArn: aws.String("String"), + UseCustomCookbooks: aws.Bool(true), + UseOpsworksSecurityGroups: aws.Bool(true), + } + resp, err := svc.UpdateStack(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UpdateUserProfile() { + svc := opsworks.New(session.New()) + + params := &opsworks.UpdateUserProfileInput{ + IamUserArn: aws.String("String"), // Required + AllowSelfManagement: aws.Bool(true), + SshPublicKey: aws.String("String"), + SshUsername: aws.String("String"), + } + resp, err := svc.UpdateUserProfile(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleOpsWorks_UpdateVolume() { + svc := opsworks.New(session.New()) + + params := &opsworks.UpdateVolumeInput{ + VolumeId: aws.String("String"), // Required + MountPoint: aws.String("String"), + Name: aws.String("String"), + } + resp, err := svc.UpdateVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/opsworks/opsworksiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/opsworks/opsworksiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/opsworks/opsworksiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/opsworks/opsworksiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,296 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package opsworksiface provides an interface for the AWS OpsWorks. +package opsworksiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/opsworks" +) + +// OpsWorksAPI is the interface type for opsworks.OpsWorks. +type OpsWorksAPI interface { + AssignInstanceRequest(*opsworks.AssignInstanceInput) (*request.Request, *opsworks.AssignInstanceOutput) + + AssignInstance(*opsworks.AssignInstanceInput) (*opsworks.AssignInstanceOutput, error) + + AssignVolumeRequest(*opsworks.AssignVolumeInput) (*request.Request, *opsworks.AssignVolumeOutput) + + AssignVolume(*opsworks.AssignVolumeInput) (*opsworks.AssignVolumeOutput, error) + + AssociateElasticIpRequest(*opsworks.AssociateElasticIpInput) (*request.Request, *opsworks.AssociateElasticIpOutput) + + AssociateElasticIp(*opsworks.AssociateElasticIpInput) (*opsworks.AssociateElasticIpOutput, error) + + AttachElasticLoadBalancerRequest(*opsworks.AttachElasticLoadBalancerInput) (*request.Request, *opsworks.AttachElasticLoadBalancerOutput) + + AttachElasticLoadBalancer(*opsworks.AttachElasticLoadBalancerInput) (*opsworks.AttachElasticLoadBalancerOutput, error) + + CloneStackRequest(*opsworks.CloneStackInput) (*request.Request, *opsworks.CloneStackOutput) + + CloneStack(*opsworks.CloneStackInput) (*opsworks.CloneStackOutput, error) + + CreateAppRequest(*opsworks.CreateAppInput) (*request.Request, *opsworks.CreateAppOutput) + + CreateApp(*opsworks.CreateAppInput) (*opsworks.CreateAppOutput, error) + + CreateDeploymentRequest(*opsworks.CreateDeploymentInput) (*request.Request, *opsworks.CreateDeploymentOutput) + + CreateDeployment(*opsworks.CreateDeploymentInput) (*opsworks.CreateDeploymentOutput, error) + + CreateInstanceRequest(*opsworks.CreateInstanceInput) (*request.Request, *opsworks.CreateInstanceOutput) + + CreateInstance(*opsworks.CreateInstanceInput) (*opsworks.CreateInstanceOutput, error) + + CreateLayerRequest(*opsworks.CreateLayerInput) (*request.Request, *opsworks.CreateLayerOutput) + + CreateLayer(*opsworks.CreateLayerInput) (*opsworks.CreateLayerOutput, error) + + CreateStackRequest(*opsworks.CreateStackInput) (*request.Request, *opsworks.CreateStackOutput) + + CreateStack(*opsworks.CreateStackInput) (*opsworks.CreateStackOutput, error) + + CreateUserProfileRequest(*opsworks.CreateUserProfileInput) (*request.Request, *opsworks.CreateUserProfileOutput) + + CreateUserProfile(*opsworks.CreateUserProfileInput) (*opsworks.CreateUserProfileOutput, error) + + DeleteAppRequest(*opsworks.DeleteAppInput) (*request.Request, *opsworks.DeleteAppOutput) + + DeleteApp(*opsworks.DeleteAppInput) (*opsworks.DeleteAppOutput, error) + + DeleteInstanceRequest(*opsworks.DeleteInstanceInput) (*request.Request, *opsworks.DeleteInstanceOutput) + + DeleteInstance(*opsworks.DeleteInstanceInput) (*opsworks.DeleteInstanceOutput, error) + + DeleteLayerRequest(*opsworks.DeleteLayerInput) (*request.Request, *opsworks.DeleteLayerOutput) + + DeleteLayer(*opsworks.DeleteLayerInput) (*opsworks.DeleteLayerOutput, error) + + DeleteStackRequest(*opsworks.DeleteStackInput) (*request.Request, *opsworks.DeleteStackOutput) + + DeleteStack(*opsworks.DeleteStackInput) (*opsworks.DeleteStackOutput, error) + + DeleteUserProfileRequest(*opsworks.DeleteUserProfileInput) (*request.Request, *opsworks.DeleteUserProfileOutput) + + DeleteUserProfile(*opsworks.DeleteUserProfileInput) (*opsworks.DeleteUserProfileOutput, error) + + DeregisterEcsClusterRequest(*opsworks.DeregisterEcsClusterInput) (*request.Request, *opsworks.DeregisterEcsClusterOutput) + + DeregisterEcsCluster(*opsworks.DeregisterEcsClusterInput) (*opsworks.DeregisterEcsClusterOutput, error) + + DeregisterElasticIpRequest(*opsworks.DeregisterElasticIpInput) (*request.Request, *opsworks.DeregisterElasticIpOutput) + + DeregisterElasticIp(*opsworks.DeregisterElasticIpInput) (*opsworks.DeregisterElasticIpOutput, error) + + DeregisterInstanceRequest(*opsworks.DeregisterInstanceInput) (*request.Request, *opsworks.DeregisterInstanceOutput) + + DeregisterInstance(*opsworks.DeregisterInstanceInput) (*opsworks.DeregisterInstanceOutput, error) + + DeregisterRdsDbInstanceRequest(*opsworks.DeregisterRdsDbInstanceInput) (*request.Request, *opsworks.DeregisterRdsDbInstanceOutput) + + DeregisterRdsDbInstance(*opsworks.DeregisterRdsDbInstanceInput) (*opsworks.DeregisterRdsDbInstanceOutput, error) + + DeregisterVolumeRequest(*opsworks.DeregisterVolumeInput) (*request.Request, *opsworks.DeregisterVolumeOutput) + + DeregisterVolume(*opsworks.DeregisterVolumeInput) (*opsworks.DeregisterVolumeOutput, error) + + DescribeAgentVersionsRequest(*opsworks.DescribeAgentVersionsInput) (*request.Request, *opsworks.DescribeAgentVersionsOutput) + + DescribeAgentVersions(*opsworks.DescribeAgentVersionsInput) (*opsworks.DescribeAgentVersionsOutput, error) + + DescribeAppsRequest(*opsworks.DescribeAppsInput) (*request.Request, *opsworks.DescribeAppsOutput) + + DescribeApps(*opsworks.DescribeAppsInput) (*opsworks.DescribeAppsOutput, error) + + DescribeCommandsRequest(*opsworks.DescribeCommandsInput) (*request.Request, *opsworks.DescribeCommandsOutput) + + DescribeCommands(*opsworks.DescribeCommandsInput) (*opsworks.DescribeCommandsOutput, error) + + DescribeDeploymentsRequest(*opsworks.DescribeDeploymentsInput) (*request.Request, *opsworks.DescribeDeploymentsOutput) + + DescribeDeployments(*opsworks.DescribeDeploymentsInput) (*opsworks.DescribeDeploymentsOutput, error) + + DescribeEcsClustersRequest(*opsworks.DescribeEcsClustersInput) (*request.Request, *opsworks.DescribeEcsClustersOutput) + + DescribeEcsClusters(*opsworks.DescribeEcsClustersInput) (*opsworks.DescribeEcsClustersOutput, error) + + DescribeEcsClustersPages(*opsworks.DescribeEcsClustersInput, func(*opsworks.DescribeEcsClustersOutput, bool) bool) error + + DescribeElasticIpsRequest(*opsworks.DescribeElasticIpsInput) (*request.Request, *opsworks.DescribeElasticIpsOutput) + + DescribeElasticIps(*opsworks.DescribeElasticIpsInput) (*opsworks.DescribeElasticIpsOutput, error) + + DescribeElasticLoadBalancersRequest(*opsworks.DescribeElasticLoadBalancersInput) (*request.Request, *opsworks.DescribeElasticLoadBalancersOutput) + + DescribeElasticLoadBalancers(*opsworks.DescribeElasticLoadBalancersInput) (*opsworks.DescribeElasticLoadBalancersOutput, error) + + DescribeInstancesRequest(*opsworks.DescribeInstancesInput) (*request.Request, *opsworks.DescribeInstancesOutput) + + DescribeInstances(*opsworks.DescribeInstancesInput) (*opsworks.DescribeInstancesOutput, error) + + DescribeLayersRequest(*opsworks.DescribeLayersInput) (*request.Request, *opsworks.DescribeLayersOutput) + + DescribeLayers(*opsworks.DescribeLayersInput) (*opsworks.DescribeLayersOutput, error) + + DescribeLoadBasedAutoScalingRequest(*opsworks.DescribeLoadBasedAutoScalingInput) (*request.Request, *opsworks.DescribeLoadBasedAutoScalingOutput) + + DescribeLoadBasedAutoScaling(*opsworks.DescribeLoadBasedAutoScalingInput) (*opsworks.DescribeLoadBasedAutoScalingOutput, error) + + DescribeMyUserProfileRequest(*opsworks.DescribeMyUserProfileInput) (*request.Request, *opsworks.DescribeMyUserProfileOutput) + + DescribeMyUserProfile(*opsworks.DescribeMyUserProfileInput) (*opsworks.DescribeMyUserProfileOutput, error) + + DescribePermissionsRequest(*opsworks.DescribePermissionsInput) (*request.Request, *opsworks.DescribePermissionsOutput) + + DescribePermissions(*opsworks.DescribePermissionsInput) (*opsworks.DescribePermissionsOutput, error) + + DescribeRaidArraysRequest(*opsworks.DescribeRaidArraysInput) (*request.Request, *opsworks.DescribeRaidArraysOutput) + + DescribeRaidArrays(*opsworks.DescribeRaidArraysInput) (*opsworks.DescribeRaidArraysOutput, error) + + DescribeRdsDbInstancesRequest(*opsworks.DescribeRdsDbInstancesInput) (*request.Request, *opsworks.DescribeRdsDbInstancesOutput) + + DescribeRdsDbInstances(*opsworks.DescribeRdsDbInstancesInput) (*opsworks.DescribeRdsDbInstancesOutput, error) + + DescribeServiceErrorsRequest(*opsworks.DescribeServiceErrorsInput) (*request.Request, *opsworks.DescribeServiceErrorsOutput) + + DescribeServiceErrors(*opsworks.DescribeServiceErrorsInput) (*opsworks.DescribeServiceErrorsOutput, error) + + DescribeStackProvisioningParametersRequest(*opsworks.DescribeStackProvisioningParametersInput) (*request.Request, *opsworks.DescribeStackProvisioningParametersOutput) + + DescribeStackProvisioningParameters(*opsworks.DescribeStackProvisioningParametersInput) (*opsworks.DescribeStackProvisioningParametersOutput, error) + + DescribeStackSummaryRequest(*opsworks.DescribeStackSummaryInput) (*request.Request, *opsworks.DescribeStackSummaryOutput) + + DescribeStackSummary(*opsworks.DescribeStackSummaryInput) (*opsworks.DescribeStackSummaryOutput, error) + + DescribeStacksRequest(*opsworks.DescribeStacksInput) (*request.Request, *opsworks.DescribeStacksOutput) + + DescribeStacks(*opsworks.DescribeStacksInput) (*opsworks.DescribeStacksOutput, error) + + DescribeTimeBasedAutoScalingRequest(*opsworks.DescribeTimeBasedAutoScalingInput) (*request.Request, *opsworks.DescribeTimeBasedAutoScalingOutput) + + DescribeTimeBasedAutoScaling(*opsworks.DescribeTimeBasedAutoScalingInput) (*opsworks.DescribeTimeBasedAutoScalingOutput, error) + + DescribeUserProfilesRequest(*opsworks.DescribeUserProfilesInput) (*request.Request, *opsworks.DescribeUserProfilesOutput) + + DescribeUserProfiles(*opsworks.DescribeUserProfilesInput) (*opsworks.DescribeUserProfilesOutput, error) + + DescribeVolumesRequest(*opsworks.DescribeVolumesInput) (*request.Request, *opsworks.DescribeVolumesOutput) + + DescribeVolumes(*opsworks.DescribeVolumesInput) (*opsworks.DescribeVolumesOutput, error) + + DetachElasticLoadBalancerRequest(*opsworks.DetachElasticLoadBalancerInput) (*request.Request, *opsworks.DetachElasticLoadBalancerOutput) + + DetachElasticLoadBalancer(*opsworks.DetachElasticLoadBalancerInput) (*opsworks.DetachElasticLoadBalancerOutput, error) + + DisassociateElasticIpRequest(*opsworks.DisassociateElasticIpInput) (*request.Request, *opsworks.DisassociateElasticIpOutput) + + DisassociateElasticIp(*opsworks.DisassociateElasticIpInput) (*opsworks.DisassociateElasticIpOutput, error) + + GetHostnameSuggestionRequest(*opsworks.GetHostnameSuggestionInput) (*request.Request, *opsworks.GetHostnameSuggestionOutput) + + GetHostnameSuggestion(*opsworks.GetHostnameSuggestionInput) (*opsworks.GetHostnameSuggestionOutput, error) + + GrantAccessRequest(*opsworks.GrantAccessInput) (*request.Request, *opsworks.GrantAccessOutput) + + GrantAccess(*opsworks.GrantAccessInput) (*opsworks.GrantAccessOutput, error) + + RebootInstanceRequest(*opsworks.RebootInstanceInput) (*request.Request, *opsworks.RebootInstanceOutput) + + RebootInstance(*opsworks.RebootInstanceInput) (*opsworks.RebootInstanceOutput, error) + + RegisterEcsClusterRequest(*opsworks.RegisterEcsClusterInput) (*request.Request, *opsworks.RegisterEcsClusterOutput) + + RegisterEcsCluster(*opsworks.RegisterEcsClusterInput) (*opsworks.RegisterEcsClusterOutput, error) + + RegisterElasticIpRequest(*opsworks.RegisterElasticIpInput) (*request.Request, *opsworks.RegisterElasticIpOutput) + + RegisterElasticIp(*opsworks.RegisterElasticIpInput) (*opsworks.RegisterElasticIpOutput, error) + + RegisterInstanceRequest(*opsworks.RegisterInstanceInput) (*request.Request, *opsworks.RegisterInstanceOutput) + + RegisterInstance(*opsworks.RegisterInstanceInput) (*opsworks.RegisterInstanceOutput, error) + + RegisterRdsDbInstanceRequest(*opsworks.RegisterRdsDbInstanceInput) (*request.Request, *opsworks.RegisterRdsDbInstanceOutput) + + RegisterRdsDbInstance(*opsworks.RegisterRdsDbInstanceInput) (*opsworks.RegisterRdsDbInstanceOutput, error) + + RegisterVolumeRequest(*opsworks.RegisterVolumeInput) (*request.Request, *opsworks.RegisterVolumeOutput) + + RegisterVolume(*opsworks.RegisterVolumeInput) (*opsworks.RegisterVolumeOutput, error) + + SetLoadBasedAutoScalingRequest(*opsworks.SetLoadBasedAutoScalingInput) (*request.Request, *opsworks.SetLoadBasedAutoScalingOutput) + + SetLoadBasedAutoScaling(*opsworks.SetLoadBasedAutoScalingInput) (*opsworks.SetLoadBasedAutoScalingOutput, error) + + SetPermissionRequest(*opsworks.SetPermissionInput) (*request.Request, *opsworks.SetPermissionOutput) + + SetPermission(*opsworks.SetPermissionInput) (*opsworks.SetPermissionOutput, error) + + SetTimeBasedAutoScalingRequest(*opsworks.SetTimeBasedAutoScalingInput) (*request.Request, *opsworks.SetTimeBasedAutoScalingOutput) + + SetTimeBasedAutoScaling(*opsworks.SetTimeBasedAutoScalingInput) (*opsworks.SetTimeBasedAutoScalingOutput, error) + + StartInstanceRequest(*opsworks.StartInstanceInput) (*request.Request, *opsworks.StartInstanceOutput) + + StartInstance(*opsworks.StartInstanceInput) (*opsworks.StartInstanceOutput, error) + + StartStackRequest(*opsworks.StartStackInput) (*request.Request, *opsworks.StartStackOutput) + + StartStack(*opsworks.StartStackInput) (*opsworks.StartStackOutput, error) + + StopInstanceRequest(*opsworks.StopInstanceInput) (*request.Request, *opsworks.StopInstanceOutput) + + StopInstance(*opsworks.StopInstanceInput) (*opsworks.StopInstanceOutput, error) + + StopStackRequest(*opsworks.StopStackInput) (*request.Request, *opsworks.StopStackOutput) + + StopStack(*opsworks.StopStackInput) (*opsworks.StopStackOutput, error) + + UnassignInstanceRequest(*opsworks.UnassignInstanceInput) (*request.Request, *opsworks.UnassignInstanceOutput) + + UnassignInstance(*opsworks.UnassignInstanceInput) (*opsworks.UnassignInstanceOutput, error) + + UnassignVolumeRequest(*opsworks.UnassignVolumeInput) (*request.Request, *opsworks.UnassignVolumeOutput) + + UnassignVolume(*opsworks.UnassignVolumeInput) (*opsworks.UnassignVolumeOutput, error) + + UpdateAppRequest(*opsworks.UpdateAppInput) (*request.Request, *opsworks.UpdateAppOutput) + + UpdateApp(*opsworks.UpdateAppInput) (*opsworks.UpdateAppOutput, error) + + UpdateElasticIpRequest(*opsworks.UpdateElasticIpInput) (*request.Request, *opsworks.UpdateElasticIpOutput) + + UpdateElasticIp(*opsworks.UpdateElasticIpInput) (*opsworks.UpdateElasticIpOutput, error) + + UpdateInstanceRequest(*opsworks.UpdateInstanceInput) (*request.Request, *opsworks.UpdateInstanceOutput) + + UpdateInstance(*opsworks.UpdateInstanceInput) (*opsworks.UpdateInstanceOutput, error) + + UpdateLayerRequest(*opsworks.UpdateLayerInput) (*request.Request, *opsworks.UpdateLayerOutput) + + UpdateLayer(*opsworks.UpdateLayerInput) (*opsworks.UpdateLayerOutput, error) + + UpdateMyUserProfileRequest(*opsworks.UpdateMyUserProfileInput) (*request.Request, *opsworks.UpdateMyUserProfileOutput) + + UpdateMyUserProfile(*opsworks.UpdateMyUserProfileInput) (*opsworks.UpdateMyUserProfileOutput, error) + + UpdateRdsDbInstanceRequest(*opsworks.UpdateRdsDbInstanceInput) (*request.Request, *opsworks.UpdateRdsDbInstanceOutput) + + UpdateRdsDbInstance(*opsworks.UpdateRdsDbInstanceInput) (*opsworks.UpdateRdsDbInstanceOutput, error) + + UpdateStackRequest(*opsworks.UpdateStackInput) (*request.Request, *opsworks.UpdateStackOutput) + + UpdateStack(*opsworks.UpdateStackInput) (*opsworks.UpdateStackOutput, error) + + UpdateUserProfileRequest(*opsworks.UpdateUserProfileInput) (*request.Request, *opsworks.UpdateUserProfileOutput) + + UpdateUserProfile(*opsworks.UpdateUserProfileInput) (*opsworks.UpdateUserProfileOutput, error) + + UpdateVolumeRequest(*opsworks.UpdateVolumeInput) (*request.Request, *opsworks.UpdateVolumeOutput) + + UpdateVolume(*opsworks.UpdateVolumeInput) (*opsworks.UpdateVolumeOutput, error) +} + +var _ OpsWorksAPI = (*opsworks.OpsWorks)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/opsworks/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/opsworks/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/opsworks/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/opsworks/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,124 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package opsworks + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Welcome to the AWS OpsWorks API Reference. This guide provides descriptions, +// syntax, and usage examples about AWS OpsWorks actions and data types, including +// common parameters and error codes. +// +// AWS OpsWorks is an application management service that provides an integrated +// experience for overseeing the complete application lifecycle. For information +// about this product, go to the AWS OpsWorks (http://aws.amazon.com/opsworks/) +// details page. +// +// SDKs and CLI +// +// The most common way to use the AWS OpsWorks API is by using the AWS Command +// Line Interface (CLI) or by using one of the AWS SDKs to implement applications +// in your preferred language. For more information, see: +// +// AWS CLI (http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html) +// AWS SDK for Java (http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/opsworks/AWSOpsWorksClient.html) +// AWS SDK for .NET (http://docs.aws.amazon.com/sdkfornet/latest/apidocs/html/N_Amazon_OpsWorks.htm) +// AWS SDK for PHP 2 (http://docs.aws.amazon.com/aws-sdk-php-2/latest/class-Aws.OpsWorks.OpsWorksClient.html) +// AWS SDK for Ruby (http://docs.aws.amazon.com/AWSRubySDK/latest/AWS/OpsWorks/Client.html) +// AWS SDK for Node.js (http://aws.amazon.com/documentation/sdkforjavascript/) +// AWS SDK for Python(Boto) (http://docs.pythonboto.org/en/latest/ref/opsworks.html) +// Endpoints +// +// AWS OpsWorks supports only one endpoint, opsworks.us-east-1.amazonaws.com +// (HTTPS), so you must connect to that endpoint. You can then use the API to +// direct AWS OpsWorks to create stacks in any AWS Region. +// +// Chef Versions +// +// When you call CreateStack, CloneStack, or UpdateStack we recommend you use +// the ConfigurationManager parameter to specify the Chef version. The recommended +// value for Linux stacks is currently 12 (the default is 11.4). Windows stacks +// use Chef 12.2. For more information, see Chef Versions (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook-chef11.html). +// +// You can specify Chef 12, 11.10, or 11.4 for your Linux stack. We recommend +// migrating your existing Linux stacks to Chef 12 as soon as possible. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type OpsWorks struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "opsworks" + +// New creates a new instance of the OpsWorks client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a OpsWorks client from just a session. +// svc := opsworks.New(mySession) +// +// // Create a OpsWorks client with additional configuration +// svc := opsworks.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *OpsWorks { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OpsWorks { + svc := &OpsWorks{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2013-02-18", + JSONVersion: "1.1", + TargetPrefix: "OpsWorks_20130218", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a OpsWorks operation and runs any +// custom request initialization. +func (c *OpsWorks) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/opsworks/waiters.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/opsworks/waiters.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/opsworks/waiters.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/opsworks/waiters.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,290 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package opsworks + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *OpsWorks) WaitUntilAppExists(input *DescribeAppsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeApps", + Delay: 1, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "failure", + Matcher: "status", + Argument: "", + Expected: 400, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *OpsWorks) WaitUntilDeploymentSuccessful(input *DescribeDeploymentsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeDeployments", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Deployments[].Status", + Expected: "successful", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Deployments[].Status", + Expected: "failed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *OpsWorks) WaitUntilInstanceOnline(input *DescribeInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstances", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Instances[].Status", + Expected: "online", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "setup_failed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "shutting_down", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "start_failed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "stopped", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "stopping", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "terminating", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "terminated", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "stop_failed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *OpsWorks) WaitUntilInstanceStopped(input *DescribeInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstances", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Instances[].Status", + Expected: "stopped", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "booting", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "online", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "pending", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "rebooting", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "requested", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "running_setup", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "setup_failed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "start_failed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "stop_failed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *OpsWorks) WaitUntilInstanceTerminated(input *DescribeInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstances", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Instances[].Status", + Expected: "terminated", + }, + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "ResourceNotFoundException", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "booting", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "online", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "pending", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "rebooting", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "requested", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "running_setup", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "setup_failed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "start_failed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/rds/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/rds/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/rds/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/rds/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,10588 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package rds provides a client for Amazon Relational Database Service. +package rds + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opAddSourceIdentifierToSubscription = "AddSourceIdentifierToSubscription" + +// AddSourceIdentifierToSubscriptionRequest generates a request for the AddSourceIdentifierToSubscription operation. +func (c *RDS) AddSourceIdentifierToSubscriptionRequest(input *AddSourceIdentifierToSubscriptionInput) (req *request.Request, output *AddSourceIdentifierToSubscriptionOutput) { + op := &request.Operation{ + Name: opAddSourceIdentifierToSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddSourceIdentifierToSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &AddSourceIdentifierToSubscriptionOutput{} + req.Data = output + return +} + +// Adds a source identifier to an existing RDS event notification subscription. +func (c *RDS) AddSourceIdentifierToSubscription(input *AddSourceIdentifierToSubscriptionInput) (*AddSourceIdentifierToSubscriptionOutput, error) { + req, out := c.AddSourceIdentifierToSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opAddTagsToResource = "AddTagsToResource" + +// AddTagsToResourceRequest generates a request for the AddTagsToResource operation. +func (c *RDS) AddTagsToResourceRequest(input *AddTagsToResourceInput) (req *request.Request, output *AddTagsToResourceOutput) { + op := &request.Operation{ + Name: opAddTagsToResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsToResourceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AddTagsToResourceOutput{} + req.Data = output + return +} + +// Adds metadata tags to an Amazon RDS resource. These tags can also be used +// with cost allocation reporting to track cost associated with Amazon RDS resources, +// or used in a Condition statement in an IAM policy for Amazon RDS. +// +// For an overview on tagging Amazon RDS resources, see Tagging Amazon RDS +// Resources (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Tagging.html). +func (c *RDS) AddTagsToResource(input *AddTagsToResourceInput) (*AddTagsToResourceOutput, error) { + req, out := c.AddTagsToResourceRequest(input) + err := req.Send() + return out, err +} + +const opApplyPendingMaintenanceAction = "ApplyPendingMaintenanceAction" + +// ApplyPendingMaintenanceActionRequest generates a request for the ApplyPendingMaintenanceAction operation. +func (c *RDS) ApplyPendingMaintenanceActionRequest(input *ApplyPendingMaintenanceActionInput) (req *request.Request, output *ApplyPendingMaintenanceActionOutput) { + op := &request.Operation{ + Name: opApplyPendingMaintenanceAction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ApplyPendingMaintenanceActionInput{} + } + + req = c.newRequest(op, input, output) + output = &ApplyPendingMaintenanceActionOutput{} + req.Data = output + return +} + +// Applies a pending maintenance action to a resource (for example, to a DB +// instance). +func (c *RDS) ApplyPendingMaintenanceAction(input *ApplyPendingMaintenanceActionInput) (*ApplyPendingMaintenanceActionOutput, error) { + req, out := c.ApplyPendingMaintenanceActionRequest(input) + err := req.Send() + return out, err +} + +const opAuthorizeDBSecurityGroupIngress = "AuthorizeDBSecurityGroupIngress" + +// AuthorizeDBSecurityGroupIngressRequest generates a request for the AuthorizeDBSecurityGroupIngress operation. +func (c *RDS) AuthorizeDBSecurityGroupIngressRequest(input *AuthorizeDBSecurityGroupIngressInput) (req *request.Request, output *AuthorizeDBSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opAuthorizeDBSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AuthorizeDBSecurityGroupIngressInput{} + } + + req = c.newRequest(op, input, output) + output = &AuthorizeDBSecurityGroupIngressOutput{} + req.Data = output + return +} + +// Enables ingress to a DBSecurityGroup using one of two forms of authorization. +// First, EC2 or VPC security groups can be added to the DBSecurityGroup if +// the application using the database is running on EC2 or VPC instances. Second, +// IP ranges are available if the application accessing your database is running +// on the Internet. Required parameters for this API are one of CIDR range, +// EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName +// or EC2SecurityGroupId for non-VPC). +// +// You cannot authorize ingress from an EC2 security group in one region to +// an Amazon RDS DB instance in another. You cannot authorize ingress from a +// VPC security group in one VPC to an Amazon RDS DB instance in another. For +// an overview of CIDR ranges, go to the Wikipedia Tutorial (http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). +func (c *RDS) AuthorizeDBSecurityGroupIngress(input *AuthorizeDBSecurityGroupIngressInput) (*AuthorizeDBSecurityGroupIngressOutput, error) { + req, out := c.AuthorizeDBSecurityGroupIngressRequest(input) + err := req.Send() + return out, err +} + +const opCopyDBClusterSnapshot = "CopyDBClusterSnapshot" + +// CopyDBClusterSnapshotRequest generates a request for the CopyDBClusterSnapshot operation. +func (c *RDS) CopyDBClusterSnapshotRequest(input *CopyDBClusterSnapshotInput) (req *request.Request, output *CopyDBClusterSnapshotOutput) { + op := &request.Operation{ + Name: opCopyDBClusterSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopyDBClusterSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CopyDBClusterSnapshotOutput{} + req.Data = output + return +} + +// Creates a snapshot of a DB cluster. For more information on Amazon Aurora, +// see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) CopyDBClusterSnapshot(input *CopyDBClusterSnapshotInput) (*CopyDBClusterSnapshotOutput, error) { + req, out := c.CopyDBClusterSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCopyDBParameterGroup = "CopyDBParameterGroup" + +// CopyDBParameterGroupRequest generates a request for the CopyDBParameterGroup operation. +func (c *RDS) CopyDBParameterGroupRequest(input *CopyDBParameterGroupInput) (req *request.Request, output *CopyDBParameterGroupOutput) { + op := &request.Operation{ + Name: opCopyDBParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopyDBParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CopyDBParameterGroupOutput{} + req.Data = output + return +} + +// Copies the specified DB parameter group. +func (c *RDS) CopyDBParameterGroup(input *CopyDBParameterGroupInput) (*CopyDBParameterGroupOutput, error) { + req, out := c.CopyDBParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opCopyDBSnapshot = "CopyDBSnapshot" + +// CopyDBSnapshotRequest generates a request for the CopyDBSnapshot operation. +func (c *RDS) CopyDBSnapshotRequest(input *CopyDBSnapshotInput) (req *request.Request, output *CopyDBSnapshotOutput) { + op := &request.Operation{ + Name: opCopyDBSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopyDBSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CopyDBSnapshotOutput{} + req.Data = output + return +} + +// Copies the specified DBSnapshot. The source DB snapshot must be in the "available" +// state. +// +// If you are copying from a shared manual DB snapshot, the SourceDBSnapshotIdentifier +// must be the ARN of the shared DB snapshot. +func (c *RDS) CopyDBSnapshot(input *CopyDBSnapshotInput) (*CopyDBSnapshotOutput, error) { + req, out := c.CopyDBSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCopyOptionGroup = "CopyOptionGroup" + +// CopyOptionGroupRequest generates a request for the CopyOptionGroup operation. +func (c *RDS) CopyOptionGroupRequest(input *CopyOptionGroupInput) (req *request.Request, output *CopyOptionGroupOutput) { + op := &request.Operation{ + Name: opCopyOptionGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopyOptionGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CopyOptionGroupOutput{} + req.Data = output + return +} + +// Copies the specified option group. +func (c *RDS) CopyOptionGroup(input *CopyOptionGroupInput) (*CopyOptionGroupOutput, error) { + req, out := c.CopyOptionGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateDBCluster = "CreateDBCluster" + +// CreateDBClusterRequest generates a request for the CreateDBCluster operation. +func (c *RDS) CreateDBClusterRequest(input *CreateDBClusterInput) (req *request.Request, output *CreateDBClusterOutput) { + op := &request.Operation{ + Name: opCreateDBCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDBClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDBClusterOutput{} + req.Data = output + return +} + +// Creates a new Amazon Aurora DB cluster. For more information on Amazon Aurora, +// see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) CreateDBCluster(input *CreateDBClusterInput) (*CreateDBClusterOutput, error) { + req, out := c.CreateDBClusterRequest(input) + err := req.Send() + return out, err +} + +const opCreateDBClusterParameterGroup = "CreateDBClusterParameterGroup" + +// CreateDBClusterParameterGroupRequest generates a request for the CreateDBClusterParameterGroup operation. +func (c *RDS) CreateDBClusterParameterGroupRequest(input *CreateDBClusterParameterGroupInput) (req *request.Request, output *CreateDBClusterParameterGroupOutput) { + op := &request.Operation{ + Name: opCreateDBClusterParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDBClusterParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDBClusterParameterGroupOutput{} + req.Data = output + return +} + +// Creates a new DB cluster parameter group. +// +// Parameters in a DB cluster parameter group apply to all of the instances +// in a DB cluster. +// +// A DB cluster parameter group is initially created with the default parameters +// for the database engine used by instances in the DB cluster. To provide custom +// values for any of the parameters, you must modify the group after creating +// it using ModifyDBClusterParameterGroup. Once you've created a DB cluster +// parameter group, you need to associate it with your DB cluster using ModifyDBCluster. +// When you associate a new DB cluster parameter group with a running DB cluster, +// you need to reboot the DB instances in the DB cluster without failover for +// the new DB cluster parameter group and associated settings to take effect. +// +// After you create a DB cluster parameter group, you should wait at least +// 5 minutes before creating your first DB cluster that uses that DB cluster +// parameter group as the default parameter group. This allows Amazon RDS to +// fully complete the create action before the DB cluster parameter group is +// used as the default for a new DB cluster. This is especially important for +// parameters that are critical when creating the default database for a DB +// cluster, such as the character set for the default database defined by the +// character_set_database parameter. You can use the Parameter Groups option +// of the Amazon RDS console (https://console.aws.amazon.com/rds/) or the DescribeDBClusterParameters +// command to verify that your DB cluster parameter group has been created or +// modified. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) CreateDBClusterParameterGroup(input *CreateDBClusterParameterGroupInput) (*CreateDBClusterParameterGroupOutput, error) { + req, out := c.CreateDBClusterParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateDBClusterSnapshot = "CreateDBClusterSnapshot" + +// CreateDBClusterSnapshotRequest generates a request for the CreateDBClusterSnapshot operation. +func (c *RDS) CreateDBClusterSnapshotRequest(input *CreateDBClusterSnapshotInput) (req *request.Request, output *CreateDBClusterSnapshotOutput) { + op := &request.Operation{ + Name: opCreateDBClusterSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDBClusterSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDBClusterSnapshotOutput{} + req.Data = output + return +} + +// Creates a snapshot of a DB cluster. For more information on Amazon Aurora, +// see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) CreateDBClusterSnapshot(input *CreateDBClusterSnapshotInput) (*CreateDBClusterSnapshotOutput, error) { + req, out := c.CreateDBClusterSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCreateDBInstance = "CreateDBInstance" + +// CreateDBInstanceRequest generates a request for the CreateDBInstance operation. +func (c *RDS) CreateDBInstanceRequest(input *CreateDBInstanceInput) (req *request.Request, output *CreateDBInstanceOutput) { + op := &request.Operation{ + Name: opCreateDBInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDBInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDBInstanceOutput{} + req.Data = output + return +} + +// Creates a new DB instance. +func (c *RDS) CreateDBInstance(input *CreateDBInstanceInput) (*CreateDBInstanceOutput, error) { + req, out := c.CreateDBInstanceRequest(input) + err := req.Send() + return out, err +} + +const opCreateDBInstanceReadReplica = "CreateDBInstanceReadReplica" + +// CreateDBInstanceReadReplicaRequest generates a request for the CreateDBInstanceReadReplica operation. +func (c *RDS) CreateDBInstanceReadReplicaRequest(input *CreateDBInstanceReadReplicaInput) (req *request.Request, output *CreateDBInstanceReadReplicaOutput) { + op := &request.Operation{ + Name: opCreateDBInstanceReadReplica, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDBInstanceReadReplicaInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDBInstanceReadReplicaOutput{} + req.Data = output + return +} + +// Creates a DB instance for a DB instance running MySQL, MariaDB, or PostgreSQL +// that acts as a Read Replica of a source DB instance. +// +// All Read Replica DB instances are created as Single-AZ deployments with +// backups disabled. All other DB instance attributes (including DB security +// groups and DB parameter groups) are inherited from the source DB instance, +// except as specified below. +// +// The source DB instance must have backup retention enabled. +func (c *RDS) CreateDBInstanceReadReplica(input *CreateDBInstanceReadReplicaInput) (*CreateDBInstanceReadReplicaOutput, error) { + req, out := c.CreateDBInstanceReadReplicaRequest(input) + err := req.Send() + return out, err +} + +const opCreateDBParameterGroup = "CreateDBParameterGroup" + +// CreateDBParameterGroupRequest generates a request for the CreateDBParameterGroup operation. +func (c *RDS) CreateDBParameterGroupRequest(input *CreateDBParameterGroupInput) (req *request.Request, output *CreateDBParameterGroupOutput) { + op := &request.Operation{ + Name: opCreateDBParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDBParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDBParameterGroupOutput{} + req.Data = output + return +} + +// Creates a new DB parameter group. +// +// A DB parameter group is initially created with the default parameters for +// the database engine used by the DB instance. To provide custom values for +// any of the parameters, you must modify the group after creating it using +// ModifyDBParameterGroup. Once you've created a DB parameter group, you need +// to associate it with your DB instance using ModifyDBInstance. When you associate +// a new DB parameter group with a running DB instance, you need to reboot the +// DB instance without failover for the new DB parameter group and associated +// settings to take effect. +// +// After you create a DB parameter group, you should wait at least 5 minutes +// before creating your first DB instance that uses that DB parameter group +// as the default parameter group. This allows Amazon RDS to fully complete +// the create action before the parameter group is used as the default for a +// new DB instance. This is especially important for parameters that are critical +// when creating the default database for a DB instance, such as the character +// set for the default database defined by the character_set_database parameter. +// You can use the Parameter Groups option of the Amazon RDS console (https://console.aws.amazon.com/rds/) +// or the DescribeDBParameters command to verify that your DB parameter group +// has been created or modified. +func (c *RDS) CreateDBParameterGroup(input *CreateDBParameterGroupInput) (*CreateDBParameterGroupOutput, error) { + req, out := c.CreateDBParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateDBSecurityGroup = "CreateDBSecurityGroup" + +// CreateDBSecurityGroupRequest generates a request for the CreateDBSecurityGroup operation. +func (c *RDS) CreateDBSecurityGroupRequest(input *CreateDBSecurityGroupInput) (req *request.Request, output *CreateDBSecurityGroupOutput) { + op := &request.Operation{ + Name: opCreateDBSecurityGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDBSecurityGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDBSecurityGroupOutput{} + req.Data = output + return +} + +// Creates a new DB security group. DB security groups control access to a DB +// instance. +func (c *RDS) CreateDBSecurityGroup(input *CreateDBSecurityGroupInput) (*CreateDBSecurityGroupOutput, error) { + req, out := c.CreateDBSecurityGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateDBSnapshot = "CreateDBSnapshot" + +// CreateDBSnapshotRequest generates a request for the CreateDBSnapshot operation. +func (c *RDS) CreateDBSnapshotRequest(input *CreateDBSnapshotInput) (req *request.Request, output *CreateDBSnapshotOutput) { + op := &request.Operation{ + Name: opCreateDBSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDBSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDBSnapshotOutput{} + req.Data = output + return +} + +// Creates a DBSnapshot. The source DBInstance must be in "available" state. +func (c *RDS) CreateDBSnapshot(input *CreateDBSnapshotInput) (*CreateDBSnapshotOutput, error) { + req, out := c.CreateDBSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCreateDBSubnetGroup = "CreateDBSubnetGroup" + +// CreateDBSubnetGroupRequest generates a request for the CreateDBSubnetGroup operation. +func (c *RDS) CreateDBSubnetGroupRequest(input *CreateDBSubnetGroupInput) (req *request.Request, output *CreateDBSubnetGroupOutput) { + op := &request.Operation{ + Name: opCreateDBSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDBSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDBSubnetGroupOutput{} + req.Data = output + return +} + +// Creates a new DB subnet group. DB subnet groups must contain at least one +// subnet in at least two AZs in the region. +func (c *RDS) CreateDBSubnetGroup(input *CreateDBSubnetGroupInput) (*CreateDBSubnetGroupOutput, error) { + req, out := c.CreateDBSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateEventSubscription = "CreateEventSubscription" + +// CreateEventSubscriptionRequest generates a request for the CreateEventSubscription operation. +func (c *RDS) CreateEventSubscriptionRequest(input *CreateEventSubscriptionInput) (req *request.Request, output *CreateEventSubscriptionOutput) { + op := &request.Operation{ + Name: opCreateEventSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateEventSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateEventSubscriptionOutput{} + req.Data = output + return +} + +// Creates an RDS event notification subscription. This action requires a topic +// ARN (Amazon Resource Name) created by either the RDS console, the SNS console, +// or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon +// SNS and subscribe to the topic. The ARN is displayed in the SNS console. +// +// You can specify the type of source (SourceType) you want to be notified +// of, provide a list of RDS sources (SourceIds) that triggers the events, and +// provide a list of event categories (EventCategories) for events you want +// to be notified of. For example, you can specify SourceType = db-instance, +// SourceIds = mydbinstance1, mydbinstance2 and EventCategories = Availability, +// Backup. +// +// If you specify both the SourceType and SourceIds, such as SourceType = db-instance +// and SourceIdentifier = myDBInstance1, you will be notified of all the db-instance +// events for the specified source. If you specify a SourceType but do not specify +// a SourceIdentifier, you will receive notice of the events for that source +// type for all your RDS sources. If you do not specify either the SourceType +// nor the SourceIdentifier, you will be notified of events generated from all +// RDS sources belonging to your customer account. +func (c *RDS) CreateEventSubscription(input *CreateEventSubscriptionInput) (*CreateEventSubscriptionOutput, error) { + req, out := c.CreateEventSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opCreateOptionGroup = "CreateOptionGroup" + +// CreateOptionGroupRequest generates a request for the CreateOptionGroup operation. +func (c *RDS) CreateOptionGroupRequest(input *CreateOptionGroupInput) (req *request.Request, output *CreateOptionGroupOutput) { + op := &request.Operation{ + Name: opCreateOptionGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateOptionGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateOptionGroupOutput{} + req.Data = output + return +} + +// Creates a new option group. You can create up to 20 option groups. +func (c *RDS) CreateOptionGroup(input *CreateOptionGroupInput) (*CreateOptionGroupOutput, error) { + req, out := c.CreateOptionGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDBCluster = "DeleteDBCluster" + +// DeleteDBClusterRequest generates a request for the DeleteDBCluster operation. +func (c *RDS) DeleteDBClusterRequest(input *DeleteDBClusterInput) (req *request.Request, output *DeleteDBClusterOutput) { + op := &request.Operation{ + Name: opDeleteDBCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDBClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDBClusterOutput{} + req.Data = output + return +} + +// The DeleteDBCluster action deletes a previously provisioned DB cluster. A +// successful response from the web service indicates the request was received +// correctly. When you delete a DB cluster, all automated backups for that DB +// cluster are deleted and cannot be recovered. Manual DB cluster snapshots +// of the DB cluster to be deleted are not deleted. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) DeleteDBCluster(input *DeleteDBClusterInput) (*DeleteDBClusterOutput, error) { + req, out := c.DeleteDBClusterRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDBClusterParameterGroup = "DeleteDBClusterParameterGroup" + +// DeleteDBClusterParameterGroupRequest generates a request for the DeleteDBClusterParameterGroup operation. +func (c *RDS) DeleteDBClusterParameterGroupRequest(input *DeleteDBClusterParameterGroupInput) (req *request.Request, output *DeleteDBClusterParameterGroupOutput) { + op := &request.Operation{ + Name: opDeleteDBClusterParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDBClusterParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteDBClusterParameterGroupOutput{} + req.Data = output + return +} + +// Deletes a specified DB cluster parameter group. The DB cluster parameter +// group to be deleted cannot be associated with any DB clusters. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) DeleteDBClusterParameterGroup(input *DeleteDBClusterParameterGroupInput) (*DeleteDBClusterParameterGroupOutput, error) { + req, out := c.DeleteDBClusterParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDBClusterSnapshot = "DeleteDBClusterSnapshot" + +// DeleteDBClusterSnapshotRequest generates a request for the DeleteDBClusterSnapshot operation. +func (c *RDS) DeleteDBClusterSnapshotRequest(input *DeleteDBClusterSnapshotInput) (req *request.Request, output *DeleteDBClusterSnapshotOutput) { + op := &request.Operation{ + Name: opDeleteDBClusterSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDBClusterSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDBClusterSnapshotOutput{} + req.Data = output + return +} + +// Deletes a DB cluster snapshot. If the snapshot is being copied, the copy +// operation is terminated. +// +// The DB cluster snapshot must be in the available state to be deleted. For +// more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) DeleteDBClusterSnapshot(input *DeleteDBClusterSnapshotInput) (*DeleteDBClusterSnapshotOutput, error) { + req, out := c.DeleteDBClusterSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDBInstance = "DeleteDBInstance" + +// DeleteDBInstanceRequest generates a request for the DeleteDBInstance operation. +func (c *RDS) DeleteDBInstanceRequest(input *DeleteDBInstanceInput) (req *request.Request, output *DeleteDBInstanceOutput) { + op := &request.Operation{ + Name: opDeleteDBInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDBInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDBInstanceOutput{} + req.Data = output + return +} + +// The DeleteDBInstance action deletes a previously provisioned DB instance. +// A successful response from the web service indicates the request was received +// correctly. When you delete a DB instance, all automated backups for that +// instance are deleted and cannot be recovered. Manual DB snapshots of the +// DB instance to be deleted are not deleted. +// +// If a final DB snapshot is requested the status of the RDS instance will +// be "deleting" until the DB snapshot is created. The API action DescribeDBInstance +// is used to monitor the status of this operation. The action cannot be canceled +// or reverted once submitted. +// +// Note that when a DB instance is in a failure state and has a status of 'failed', +// 'incompatible-restore', or 'incompatible-network', it can only be deleted +// when the SkipFinalSnapshot parameter is set to "true". +func (c *RDS) DeleteDBInstance(input *DeleteDBInstanceInput) (*DeleteDBInstanceOutput, error) { + req, out := c.DeleteDBInstanceRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDBParameterGroup = "DeleteDBParameterGroup" + +// DeleteDBParameterGroupRequest generates a request for the DeleteDBParameterGroup operation. +func (c *RDS) DeleteDBParameterGroupRequest(input *DeleteDBParameterGroupInput) (req *request.Request, output *DeleteDBParameterGroupOutput) { + op := &request.Operation{ + Name: opDeleteDBParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDBParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteDBParameterGroupOutput{} + req.Data = output + return +} + +// Deletes a specified DBParameterGroup. The DBParameterGroup to be deleted +// cannot be associated with any DB instances. +func (c *RDS) DeleteDBParameterGroup(input *DeleteDBParameterGroupInput) (*DeleteDBParameterGroupOutput, error) { + req, out := c.DeleteDBParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDBSecurityGroup = "DeleteDBSecurityGroup" + +// DeleteDBSecurityGroupRequest generates a request for the DeleteDBSecurityGroup operation. +func (c *RDS) DeleteDBSecurityGroupRequest(input *DeleteDBSecurityGroupInput) (req *request.Request, output *DeleteDBSecurityGroupOutput) { + op := &request.Operation{ + Name: opDeleteDBSecurityGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDBSecurityGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteDBSecurityGroupOutput{} + req.Data = output + return +} + +// Deletes a DB security group. +// +// The specified DB security group must not be associated with any DB instances. +func (c *RDS) DeleteDBSecurityGroup(input *DeleteDBSecurityGroupInput) (*DeleteDBSecurityGroupOutput, error) { + req, out := c.DeleteDBSecurityGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDBSnapshot = "DeleteDBSnapshot" + +// DeleteDBSnapshotRequest generates a request for the DeleteDBSnapshot operation. +func (c *RDS) DeleteDBSnapshotRequest(input *DeleteDBSnapshotInput) (req *request.Request, output *DeleteDBSnapshotOutput) { + op := &request.Operation{ + Name: opDeleteDBSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDBSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDBSnapshotOutput{} + req.Data = output + return +} + +// Deletes a DBSnapshot. If the snapshot is being copied, the copy operation +// is terminated. +// +// The DBSnapshot must be in the available state to be deleted. +func (c *RDS) DeleteDBSnapshot(input *DeleteDBSnapshotInput) (*DeleteDBSnapshotOutput, error) { + req, out := c.DeleteDBSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDBSubnetGroup = "DeleteDBSubnetGroup" + +// DeleteDBSubnetGroupRequest generates a request for the DeleteDBSubnetGroup operation. +func (c *RDS) DeleteDBSubnetGroupRequest(input *DeleteDBSubnetGroupInput) (req *request.Request, output *DeleteDBSubnetGroupOutput) { + op := &request.Operation{ + Name: opDeleteDBSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDBSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteDBSubnetGroupOutput{} + req.Data = output + return +} + +// Deletes a DB subnet group. +// +// The specified database subnet group must not be associated with any DB instances. +func (c *RDS) DeleteDBSubnetGroup(input *DeleteDBSubnetGroupInput) (*DeleteDBSubnetGroupOutput, error) { + req, out := c.DeleteDBSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteEventSubscription = "DeleteEventSubscription" + +// DeleteEventSubscriptionRequest generates a request for the DeleteEventSubscription operation. +func (c *RDS) DeleteEventSubscriptionRequest(input *DeleteEventSubscriptionInput) (req *request.Request, output *DeleteEventSubscriptionOutput) { + op := &request.Operation{ + Name: opDeleteEventSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteEventSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteEventSubscriptionOutput{} + req.Data = output + return +} + +// Deletes an RDS event notification subscription. +func (c *RDS) DeleteEventSubscription(input *DeleteEventSubscriptionInput) (*DeleteEventSubscriptionOutput, error) { + req, out := c.DeleteEventSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteOptionGroup = "DeleteOptionGroup" + +// DeleteOptionGroupRequest generates a request for the DeleteOptionGroup operation. +func (c *RDS) DeleteOptionGroupRequest(input *DeleteOptionGroupInput) (req *request.Request, output *DeleteOptionGroupOutput) { + op := &request.Operation{ + Name: opDeleteOptionGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteOptionGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteOptionGroupOutput{} + req.Data = output + return +} + +// Deletes an existing option group. +func (c *RDS) DeleteOptionGroup(input *DeleteOptionGroupInput) (*DeleteOptionGroupOutput, error) { + req, out := c.DeleteOptionGroupRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAccountAttributes = "DescribeAccountAttributes" + +// DescribeAccountAttributesRequest generates a request for the DescribeAccountAttributes operation. +func (c *RDS) DescribeAccountAttributesRequest(input *DescribeAccountAttributesInput) (req *request.Request, output *DescribeAccountAttributesOutput) { + op := &request.Operation{ + Name: opDescribeAccountAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAccountAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAccountAttributesOutput{} + req.Data = output + return +} + +// Lists all of the attributes for a customer account. The attributes include +// Amazon RDS quotas for the account, such as the number of DB instances allowed. +// The description for a quota includes the quota name, current usage toward +// that quota, and the quota's maximum value. +// +// This command does not take any parameters. +func (c *RDS) DescribeAccountAttributes(input *DescribeAccountAttributesInput) (*DescribeAccountAttributesOutput, error) { + req, out := c.DescribeAccountAttributesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeCertificates = "DescribeCertificates" + +// DescribeCertificatesRequest generates a request for the DescribeCertificates operation. +func (c *RDS) DescribeCertificatesRequest(input *DescribeCertificatesInput) (req *request.Request, output *DescribeCertificatesOutput) { + op := &request.Operation{ + Name: opDescribeCertificates, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeCertificatesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCertificatesOutput{} + req.Data = output + return +} + +// Lists the set of CA certificates provided by Amazon RDS for this AWS account. +func (c *RDS) DescribeCertificates(input *DescribeCertificatesInput) (*DescribeCertificatesOutput, error) { + req, out := c.DescribeCertificatesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDBClusterParameterGroups = "DescribeDBClusterParameterGroups" + +// DescribeDBClusterParameterGroupsRequest generates a request for the DescribeDBClusterParameterGroups operation. +func (c *RDS) DescribeDBClusterParameterGroupsRequest(input *DescribeDBClusterParameterGroupsInput) (req *request.Request, output *DescribeDBClusterParameterGroupsOutput) { + op := &request.Operation{ + Name: opDescribeDBClusterParameterGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDBClusterParameterGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBClusterParameterGroupsOutput{} + req.Data = output + return +} + +// Returns a list of DBClusterParameterGroup descriptions. If a DBClusterParameterGroupName +// parameter is specified, the list will contain only the description of the +// specified DB cluster parameter group. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) DescribeDBClusterParameterGroups(input *DescribeDBClusterParameterGroupsInput) (*DescribeDBClusterParameterGroupsOutput, error) { + req, out := c.DescribeDBClusterParameterGroupsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDBClusterParameters = "DescribeDBClusterParameters" + +// DescribeDBClusterParametersRequest generates a request for the DescribeDBClusterParameters operation. +func (c *RDS) DescribeDBClusterParametersRequest(input *DescribeDBClusterParametersInput) (req *request.Request, output *DescribeDBClusterParametersOutput) { + op := &request.Operation{ + Name: opDescribeDBClusterParameters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDBClusterParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBClusterParametersOutput{} + req.Data = output + return +} + +// Returns the detailed parameter list for a particular DB cluster parameter +// group. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) DescribeDBClusterParameters(input *DescribeDBClusterParametersInput) (*DescribeDBClusterParametersOutput, error) { + req, out := c.DescribeDBClusterParametersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDBClusterSnapshots = "DescribeDBClusterSnapshots" + +// DescribeDBClusterSnapshotsRequest generates a request for the DescribeDBClusterSnapshots operation. +func (c *RDS) DescribeDBClusterSnapshotsRequest(input *DescribeDBClusterSnapshotsInput) (req *request.Request, output *DescribeDBClusterSnapshotsOutput) { + op := &request.Operation{ + Name: opDescribeDBClusterSnapshots, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDBClusterSnapshotsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBClusterSnapshotsOutput{} + req.Data = output + return +} + +// Returns information about DB cluster snapshots. This API supports pagination. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) DescribeDBClusterSnapshots(input *DescribeDBClusterSnapshotsInput) (*DescribeDBClusterSnapshotsOutput, error) { + req, out := c.DescribeDBClusterSnapshotsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDBClusters = "DescribeDBClusters" + +// DescribeDBClustersRequest generates a request for the DescribeDBClusters operation. +func (c *RDS) DescribeDBClustersRequest(input *DescribeDBClustersInput) (req *request.Request, output *DescribeDBClustersOutput) { + op := &request.Operation{ + Name: opDescribeDBClusters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDBClustersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBClustersOutput{} + req.Data = output + return +} + +// Returns information about provisioned Aurora DB clusters. This API supports +// pagination. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) DescribeDBClusters(input *DescribeDBClustersInput) (*DescribeDBClustersOutput, error) { + req, out := c.DescribeDBClustersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDBEngineVersions = "DescribeDBEngineVersions" + +// DescribeDBEngineVersionsRequest generates a request for the DescribeDBEngineVersions operation. +func (c *RDS) DescribeDBEngineVersionsRequest(input *DescribeDBEngineVersionsInput) (req *request.Request, output *DescribeDBEngineVersionsOutput) { + op := &request.Operation{ + Name: opDescribeDBEngineVersions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDBEngineVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBEngineVersionsOutput{} + req.Data = output + return +} + +// Returns a list of the available DB engines. +func (c *RDS) DescribeDBEngineVersions(input *DescribeDBEngineVersionsInput) (*DescribeDBEngineVersionsOutput, error) { + req, out := c.DescribeDBEngineVersionsRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeDBEngineVersionsPages(input *DescribeDBEngineVersionsInput, fn func(p *DescribeDBEngineVersionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDBEngineVersionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDBEngineVersionsOutput), lastPage) + }) +} + +const opDescribeDBInstances = "DescribeDBInstances" + +// DescribeDBInstancesRequest generates a request for the DescribeDBInstances operation. +func (c *RDS) DescribeDBInstancesRequest(input *DescribeDBInstancesInput) (req *request.Request, output *DescribeDBInstancesOutput) { + op := &request.Operation{ + Name: opDescribeDBInstances, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDBInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBInstancesOutput{} + req.Data = output + return +} + +// Returns information about provisioned RDS instances. This API supports pagination. +func (c *RDS) DescribeDBInstances(input *DescribeDBInstancesInput) (*DescribeDBInstancesOutput, error) { + req, out := c.DescribeDBInstancesRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeDBInstancesPages(input *DescribeDBInstancesInput, fn func(p *DescribeDBInstancesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDBInstancesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDBInstancesOutput), lastPage) + }) +} + +const opDescribeDBLogFiles = "DescribeDBLogFiles" + +// DescribeDBLogFilesRequest generates a request for the DescribeDBLogFiles operation. +func (c *RDS) DescribeDBLogFilesRequest(input *DescribeDBLogFilesInput) (req *request.Request, output *DescribeDBLogFilesOutput) { + op := &request.Operation{ + Name: opDescribeDBLogFiles, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDBLogFilesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBLogFilesOutput{} + req.Data = output + return +} + +// Returns a list of DB log files for the DB instance. +func (c *RDS) DescribeDBLogFiles(input *DescribeDBLogFilesInput) (*DescribeDBLogFilesOutput, error) { + req, out := c.DescribeDBLogFilesRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeDBLogFilesPages(input *DescribeDBLogFilesInput, fn func(p *DescribeDBLogFilesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDBLogFilesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDBLogFilesOutput), lastPage) + }) +} + +const opDescribeDBParameterGroups = "DescribeDBParameterGroups" + +// DescribeDBParameterGroupsRequest generates a request for the DescribeDBParameterGroups operation. +func (c *RDS) DescribeDBParameterGroupsRequest(input *DescribeDBParameterGroupsInput) (req *request.Request, output *DescribeDBParameterGroupsOutput) { + op := &request.Operation{ + Name: opDescribeDBParameterGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDBParameterGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBParameterGroupsOutput{} + req.Data = output + return +} + +// Returns a list of DBParameterGroup descriptions. If a DBParameterGroupName +// is specified, the list will contain only the description of the specified +// DB parameter group. +func (c *RDS) DescribeDBParameterGroups(input *DescribeDBParameterGroupsInput) (*DescribeDBParameterGroupsOutput, error) { + req, out := c.DescribeDBParameterGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeDBParameterGroupsPages(input *DescribeDBParameterGroupsInput, fn func(p *DescribeDBParameterGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDBParameterGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDBParameterGroupsOutput), lastPage) + }) +} + +const opDescribeDBParameters = "DescribeDBParameters" + +// DescribeDBParametersRequest generates a request for the DescribeDBParameters operation. +func (c *RDS) DescribeDBParametersRequest(input *DescribeDBParametersInput) (req *request.Request, output *DescribeDBParametersOutput) { + op := &request.Operation{ + Name: opDescribeDBParameters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDBParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBParametersOutput{} + req.Data = output + return +} + +// Returns the detailed parameter list for a particular DB parameter group. +func (c *RDS) DescribeDBParameters(input *DescribeDBParametersInput) (*DescribeDBParametersOutput, error) { + req, out := c.DescribeDBParametersRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeDBParametersPages(input *DescribeDBParametersInput, fn func(p *DescribeDBParametersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDBParametersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDBParametersOutput), lastPage) + }) +} + +const opDescribeDBSecurityGroups = "DescribeDBSecurityGroups" + +// DescribeDBSecurityGroupsRequest generates a request for the DescribeDBSecurityGroups operation. +func (c *RDS) DescribeDBSecurityGroupsRequest(input *DescribeDBSecurityGroupsInput) (req *request.Request, output *DescribeDBSecurityGroupsOutput) { + op := &request.Operation{ + Name: opDescribeDBSecurityGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDBSecurityGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBSecurityGroupsOutput{} + req.Data = output + return +} + +// Returns a list of DBSecurityGroup descriptions. If a DBSecurityGroupName +// is specified, the list will contain only the descriptions of the specified +// DB security group. +func (c *RDS) DescribeDBSecurityGroups(input *DescribeDBSecurityGroupsInput) (*DescribeDBSecurityGroupsOutput, error) { + req, out := c.DescribeDBSecurityGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeDBSecurityGroupsPages(input *DescribeDBSecurityGroupsInput, fn func(p *DescribeDBSecurityGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDBSecurityGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDBSecurityGroupsOutput), lastPage) + }) +} + +const opDescribeDBSnapshotAttributes = "DescribeDBSnapshotAttributes" + +// DescribeDBSnapshotAttributesRequest generates a request for the DescribeDBSnapshotAttributes operation. +func (c *RDS) DescribeDBSnapshotAttributesRequest(input *DescribeDBSnapshotAttributesInput) (req *request.Request, output *DescribeDBSnapshotAttributesOutput) { + op := &request.Operation{ + Name: opDescribeDBSnapshotAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDBSnapshotAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBSnapshotAttributesOutput{} + req.Data = output + return +} + +// Returns a list of DB snapshot attribute names and values for a manual DB +// snapshot. +// +// When sharing snapshots with other AWS accounts, DescribeDBSnapshotAttributes +// returns the restore attribute and a list of the AWS account ids that are +// authorized to copy or restore the manual DB snapshot. If all is included +// in the list of values for the restore attribute, then the manual DB snapshot +// is public and can be copied or restored by all AWS accounts. +// +// To add or remove access for an AWS account to copy or restore a manual DB +// snapshot, or to make the manual DB snapshot public or private, use the ModifyDBSnapshotAttribute +// API. +func (c *RDS) DescribeDBSnapshotAttributes(input *DescribeDBSnapshotAttributesInput) (*DescribeDBSnapshotAttributesOutput, error) { + req, out := c.DescribeDBSnapshotAttributesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDBSnapshots = "DescribeDBSnapshots" + +// DescribeDBSnapshotsRequest generates a request for the DescribeDBSnapshots operation. +func (c *RDS) DescribeDBSnapshotsRequest(input *DescribeDBSnapshotsInput) (req *request.Request, output *DescribeDBSnapshotsOutput) { + op := &request.Operation{ + Name: opDescribeDBSnapshots, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDBSnapshotsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBSnapshotsOutput{} + req.Data = output + return +} + +// Returns information about DB snapshots. This API supports pagination. +func (c *RDS) DescribeDBSnapshots(input *DescribeDBSnapshotsInput) (*DescribeDBSnapshotsOutput, error) { + req, out := c.DescribeDBSnapshotsRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeDBSnapshotsPages(input *DescribeDBSnapshotsInput, fn func(p *DescribeDBSnapshotsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDBSnapshotsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDBSnapshotsOutput), lastPage) + }) +} + +const opDescribeDBSubnetGroups = "DescribeDBSubnetGroups" + +// DescribeDBSubnetGroupsRequest generates a request for the DescribeDBSubnetGroups operation. +func (c *RDS) DescribeDBSubnetGroupsRequest(input *DescribeDBSubnetGroupsInput) (req *request.Request, output *DescribeDBSubnetGroupsOutput) { + op := &request.Operation{ + Name: opDescribeDBSubnetGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDBSubnetGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBSubnetGroupsOutput{} + req.Data = output + return +} + +// Returns a list of DBSubnetGroup descriptions. If a DBSubnetGroupName is specified, +// the list will contain only the descriptions of the specified DBSubnetGroup. +// +// For an overview of CIDR ranges, go to the Wikipedia Tutorial (http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). +func (c *RDS) DescribeDBSubnetGroups(input *DescribeDBSubnetGroupsInput) (*DescribeDBSubnetGroupsOutput, error) { + req, out := c.DescribeDBSubnetGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeDBSubnetGroupsPages(input *DescribeDBSubnetGroupsInput, fn func(p *DescribeDBSubnetGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDBSubnetGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDBSubnetGroupsOutput), lastPage) + }) +} + +const opDescribeEngineDefaultClusterParameters = "DescribeEngineDefaultClusterParameters" + +// DescribeEngineDefaultClusterParametersRequest generates a request for the DescribeEngineDefaultClusterParameters operation. +func (c *RDS) DescribeEngineDefaultClusterParametersRequest(input *DescribeEngineDefaultClusterParametersInput) (req *request.Request, output *DescribeEngineDefaultClusterParametersOutput) { + op := &request.Operation{ + Name: opDescribeEngineDefaultClusterParameters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEngineDefaultClusterParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEngineDefaultClusterParametersOutput{} + req.Data = output + return +} + +// Returns the default engine and system parameter information for the cluster +// database engine. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) DescribeEngineDefaultClusterParameters(input *DescribeEngineDefaultClusterParametersInput) (*DescribeEngineDefaultClusterParametersOutput, error) { + req, out := c.DescribeEngineDefaultClusterParametersRequest(input) + err := req.Send() + return out, err +} + +const opDescribeEngineDefaultParameters = "DescribeEngineDefaultParameters" + +// DescribeEngineDefaultParametersRequest generates a request for the DescribeEngineDefaultParameters operation. +func (c *RDS) DescribeEngineDefaultParametersRequest(input *DescribeEngineDefaultParametersInput) (req *request.Request, output *DescribeEngineDefaultParametersOutput) { + op := &request.Operation{ + Name: opDescribeEngineDefaultParameters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"EngineDefaults.Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEngineDefaultParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEngineDefaultParametersOutput{} + req.Data = output + return +} + +// Returns the default engine and system parameter information for the specified +// database engine. +func (c *RDS) DescribeEngineDefaultParameters(input *DescribeEngineDefaultParametersInput) (*DescribeEngineDefaultParametersOutput, error) { + req, out := c.DescribeEngineDefaultParametersRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeEngineDefaultParametersPages(input *DescribeEngineDefaultParametersInput, fn func(p *DescribeEngineDefaultParametersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeEngineDefaultParametersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeEngineDefaultParametersOutput), lastPage) + }) +} + +const opDescribeEventCategories = "DescribeEventCategories" + +// DescribeEventCategoriesRequest generates a request for the DescribeEventCategories operation. +func (c *RDS) DescribeEventCategoriesRequest(input *DescribeEventCategoriesInput) (req *request.Request, output *DescribeEventCategoriesOutput) { + op := &request.Operation{ + Name: opDescribeEventCategories, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEventCategoriesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEventCategoriesOutput{} + req.Data = output + return +} + +// Displays a list of categories for all event source types, or, if specified, +// for a specified source type. You can see a list of the event categories and +// source types in the Events (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) +// topic in the Amazon RDS User Guide. +func (c *RDS) DescribeEventCategories(input *DescribeEventCategoriesInput) (*DescribeEventCategoriesOutput, error) { + req, out := c.DescribeEventCategoriesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeEventSubscriptions = "DescribeEventSubscriptions" + +// DescribeEventSubscriptionsRequest generates a request for the DescribeEventSubscriptions operation. +func (c *RDS) DescribeEventSubscriptionsRequest(input *DescribeEventSubscriptionsInput) (req *request.Request, output *DescribeEventSubscriptionsOutput) { + op := &request.Operation{ + Name: opDescribeEventSubscriptions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEventSubscriptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEventSubscriptionsOutput{} + req.Data = output + return +} + +// Lists all the subscription descriptions for a customer account. The description +// for a subscription includes SubscriptionName, SNSTopicARN, CustomerID, SourceType, +// SourceID, CreationTime, and Status. +// +// If you specify a SubscriptionName, lists the description for that subscription. +func (c *RDS) DescribeEventSubscriptions(input *DescribeEventSubscriptionsInput) (*DescribeEventSubscriptionsOutput, error) { + req, out := c.DescribeEventSubscriptionsRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeEventSubscriptionsPages(input *DescribeEventSubscriptionsInput, fn func(p *DescribeEventSubscriptionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeEventSubscriptionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeEventSubscriptionsOutput), lastPage) + }) +} + +const opDescribeEvents = "DescribeEvents" + +// DescribeEventsRequest generates a request for the DescribeEvents operation. +func (c *RDS) DescribeEventsRequest(input *DescribeEventsInput) (req *request.Request, output *DescribeEventsOutput) { + op := &request.Operation{ + Name: opDescribeEvents, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEventsOutput{} + req.Data = output + return +} + +// Returns events related to DB instances, DB security groups, DB snapshots, +// and DB parameter groups for the past 14 days. Events specific to a particular +// DB instance, DB security group, database snapshot, or DB parameter group +// can be obtained by providing the name as a parameter. By default, the past +// hour of events are returned. +func (c *RDS) DescribeEvents(input *DescribeEventsInput) (*DescribeEventsOutput, error) { + req, out := c.DescribeEventsRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeEventsPages(input *DescribeEventsInput, fn func(p *DescribeEventsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeEventsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeEventsOutput), lastPage) + }) +} + +const opDescribeOptionGroupOptions = "DescribeOptionGroupOptions" + +// DescribeOptionGroupOptionsRequest generates a request for the DescribeOptionGroupOptions operation. +func (c *RDS) DescribeOptionGroupOptionsRequest(input *DescribeOptionGroupOptionsInput) (req *request.Request, output *DescribeOptionGroupOptionsOutput) { + op := &request.Operation{ + Name: opDescribeOptionGroupOptions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeOptionGroupOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeOptionGroupOptionsOutput{} + req.Data = output + return +} + +// Describes all available options. +func (c *RDS) DescribeOptionGroupOptions(input *DescribeOptionGroupOptionsInput) (*DescribeOptionGroupOptionsOutput, error) { + req, out := c.DescribeOptionGroupOptionsRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeOptionGroupOptionsPages(input *DescribeOptionGroupOptionsInput, fn func(p *DescribeOptionGroupOptionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeOptionGroupOptionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeOptionGroupOptionsOutput), lastPage) + }) +} + +const opDescribeOptionGroups = "DescribeOptionGroups" + +// DescribeOptionGroupsRequest generates a request for the DescribeOptionGroups operation. +func (c *RDS) DescribeOptionGroupsRequest(input *DescribeOptionGroupsInput) (req *request.Request, output *DescribeOptionGroupsOutput) { + op := &request.Operation{ + Name: opDescribeOptionGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeOptionGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeOptionGroupsOutput{} + req.Data = output + return +} + +// Describes the available option groups. +func (c *RDS) DescribeOptionGroups(input *DescribeOptionGroupsInput) (*DescribeOptionGroupsOutput, error) { + req, out := c.DescribeOptionGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeOptionGroupsPages(input *DescribeOptionGroupsInput, fn func(p *DescribeOptionGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeOptionGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeOptionGroupsOutput), lastPage) + }) +} + +const opDescribeOrderableDBInstanceOptions = "DescribeOrderableDBInstanceOptions" + +// DescribeOrderableDBInstanceOptionsRequest generates a request for the DescribeOrderableDBInstanceOptions operation. +func (c *RDS) DescribeOrderableDBInstanceOptionsRequest(input *DescribeOrderableDBInstanceOptionsInput) (req *request.Request, output *DescribeOrderableDBInstanceOptionsOutput) { + op := &request.Operation{ + Name: opDescribeOrderableDBInstanceOptions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeOrderableDBInstanceOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeOrderableDBInstanceOptionsOutput{} + req.Data = output + return +} + +// Returns a list of orderable DB instance options for the specified engine. +func (c *RDS) DescribeOrderableDBInstanceOptions(input *DescribeOrderableDBInstanceOptionsInput) (*DescribeOrderableDBInstanceOptionsOutput, error) { + req, out := c.DescribeOrderableDBInstanceOptionsRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeOrderableDBInstanceOptionsPages(input *DescribeOrderableDBInstanceOptionsInput, fn func(p *DescribeOrderableDBInstanceOptionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeOrderableDBInstanceOptionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeOrderableDBInstanceOptionsOutput), lastPage) + }) +} + +const opDescribePendingMaintenanceActions = "DescribePendingMaintenanceActions" + +// DescribePendingMaintenanceActionsRequest generates a request for the DescribePendingMaintenanceActions operation. +func (c *RDS) DescribePendingMaintenanceActionsRequest(input *DescribePendingMaintenanceActionsInput) (req *request.Request, output *DescribePendingMaintenanceActionsOutput) { + op := &request.Operation{ + Name: opDescribePendingMaintenanceActions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribePendingMaintenanceActionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribePendingMaintenanceActionsOutput{} + req.Data = output + return +} + +// Returns a list of resources (for example, DB instances) that have at least +// one pending maintenance action. +func (c *RDS) DescribePendingMaintenanceActions(input *DescribePendingMaintenanceActionsInput) (*DescribePendingMaintenanceActionsOutput, error) { + req, out := c.DescribePendingMaintenanceActionsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeReservedDBInstances = "DescribeReservedDBInstances" + +// DescribeReservedDBInstancesRequest generates a request for the DescribeReservedDBInstances operation. +func (c *RDS) DescribeReservedDBInstancesRequest(input *DescribeReservedDBInstancesInput) (req *request.Request, output *DescribeReservedDBInstancesOutput) { + op := &request.Operation{ + Name: opDescribeReservedDBInstances, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReservedDBInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedDBInstancesOutput{} + req.Data = output + return +} + +// Returns information about reserved DB instances for this account, or about +// a specified reserved DB instance. +func (c *RDS) DescribeReservedDBInstances(input *DescribeReservedDBInstancesInput) (*DescribeReservedDBInstancesOutput, error) { + req, out := c.DescribeReservedDBInstancesRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeReservedDBInstancesPages(input *DescribeReservedDBInstancesInput, fn func(p *DescribeReservedDBInstancesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReservedDBInstancesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReservedDBInstancesOutput), lastPage) + }) +} + +const opDescribeReservedDBInstancesOfferings = "DescribeReservedDBInstancesOfferings" + +// DescribeReservedDBInstancesOfferingsRequest generates a request for the DescribeReservedDBInstancesOfferings operation. +func (c *RDS) DescribeReservedDBInstancesOfferingsRequest(input *DescribeReservedDBInstancesOfferingsInput) (req *request.Request, output *DescribeReservedDBInstancesOfferingsOutput) { + op := &request.Operation{ + Name: opDescribeReservedDBInstancesOfferings, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReservedDBInstancesOfferingsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedDBInstancesOfferingsOutput{} + req.Data = output + return +} + +// Lists available reserved DB instance offerings. +func (c *RDS) DescribeReservedDBInstancesOfferings(input *DescribeReservedDBInstancesOfferingsInput) (*DescribeReservedDBInstancesOfferingsOutput, error) { + req, out := c.DescribeReservedDBInstancesOfferingsRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DescribeReservedDBInstancesOfferingsPages(input *DescribeReservedDBInstancesOfferingsInput, fn func(p *DescribeReservedDBInstancesOfferingsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReservedDBInstancesOfferingsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReservedDBInstancesOfferingsOutput), lastPage) + }) +} + +const opDownloadDBLogFilePortion = "DownloadDBLogFilePortion" + +// DownloadDBLogFilePortionRequest generates a request for the DownloadDBLogFilePortion operation. +func (c *RDS) DownloadDBLogFilePortionRequest(input *DownloadDBLogFilePortionInput) (req *request.Request, output *DownloadDBLogFilePortionOutput) { + op := &request.Operation{ + Name: opDownloadDBLogFilePortion, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "NumberOfLines", + TruncationToken: "AdditionalDataPending", + }, + } + + if input == nil { + input = &DownloadDBLogFilePortionInput{} + } + + req = c.newRequest(op, input, output) + output = &DownloadDBLogFilePortionOutput{} + req.Data = output + return +} + +// Downloads all or a portion of the specified log file, up to 1 MB in size. +func (c *RDS) DownloadDBLogFilePortion(input *DownloadDBLogFilePortionInput) (*DownloadDBLogFilePortionOutput, error) { + req, out := c.DownloadDBLogFilePortionRequest(input) + err := req.Send() + return out, err +} + +func (c *RDS) DownloadDBLogFilePortionPages(input *DownloadDBLogFilePortionInput, fn func(p *DownloadDBLogFilePortionOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DownloadDBLogFilePortionRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DownloadDBLogFilePortionOutput), lastPage) + }) +} + +const opFailoverDBCluster = "FailoverDBCluster" + +// FailoverDBClusterRequest generates a request for the FailoverDBCluster operation. +func (c *RDS) FailoverDBClusterRequest(input *FailoverDBClusterInput) (req *request.Request, output *FailoverDBClusterOutput) { + op := &request.Operation{ + Name: opFailoverDBCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &FailoverDBClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &FailoverDBClusterOutput{} + req.Data = output + return +} + +// Forces a failover for a DB cluster. +// +// A failover for a DB cluster promotes one of the read-only instances in the +// DB cluster to the master DB instance (the cluster writer) and deletes the +// current primary instance. +// +// Amazon Aurora will automatically fail over to a read-only instance, if one +// exists, when the primary instance fails. You can force a failover when you +// want to simulate a failure of a DB instance for testing. Because each instance +// in a DB cluster has its own endpoint address, you will need to clean up and +// re-establish any existing connections that use those endpoint addresses when +// the failover is complete. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) FailoverDBCluster(input *FailoverDBClusterInput) (*FailoverDBClusterOutput, error) { + req, out := c.FailoverDBClusterRequest(input) + err := req.Send() + return out, err +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a request for the ListTagsForResource operation. +func (c *RDS) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForResourceOutput{} + req.Data = output + return +} + +// Lists all tags on an Amazon RDS resource. +// +// For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS +// Resources (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Tagging.html). +func (c *RDS) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + err := req.Send() + return out, err +} + +const opModifyDBCluster = "ModifyDBCluster" + +// ModifyDBClusterRequest generates a request for the ModifyDBCluster operation. +func (c *RDS) ModifyDBClusterRequest(input *ModifyDBClusterInput) (req *request.Request, output *ModifyDBClusterOutput) { + op := &request.Operation{ + Name: opModifyDBCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyDBClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyDBClusterOutput{} + req.Data = output + return +} + +// Modify a setting for an Amazon Aurora DB cluster. You can change one or more +// database configuration parameters by specifying these parameters and the +// new values in the request. For more information on Amazon Aurora, see Aurora +// on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) ModifyDBCluster(input *ModifyDBClusterInput) (*ModifyDBClusterOutput, error) { + req, out := c.ModifyDBClusterRequest(input) + err := req.Send() + return out, err +} + +const opModifyDBClusterParameterGroup = "ModifyDBClusterParameterGroup" + +// ModifyDBClusterParameterGroupRequest generates a request for the ModifyDBClusterParameterGroup operation. +func (c *RDS) ModifyDBClusterParameterGroupRequest(input *ModifyDBClusterParameterGroupInput) (req *request.Request, output *DBClusterParameterGroupNameMessage) { + op := &request.Operation{ + Name: opModifyDBClusterParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyDBClusterParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DBClusterParameterGroupNameMessage{} + req.Data = output + return +} + +// Modifies the parameters of a DB cluster parameter group. To modify more than +// one parameter, submit a list of the following: ParameterName, ParameterValue, +// and ApplyMethod. A maximum of 20 parameters can be modified in a single request. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +// +// Changes to dynamic parameters are applied immediately. Changes to static +// parameters require a reboot without failover to the DB cluster associated +// with the parameter group before the change can take effect. +// +// After you create a DB cluster parameter group, you should wait at least +// 5 minutes before creating your first DB cluster that uses that DB cluster +// parameter group as the default parameter group. This allows Amazon RDS to +// fully complete the create action before the parameter group is used as the +// default for a new DB cluster. This is especially important for parameters +// that are critical when creating the default database for a DB cluster, such +// as the character set for the default database defined by the character_set_database +// parameter. You can use the Parameter Groups option of the Amazon RDS console +// (https://console.aws.amazon.com/rds/) or the DescribeDBClusterParameters +// command to verify that your DB cluster parameter group has been created or +// modified. +func (c *RDS) ModifyDBClusterParameterGroup(input *ModifyDBClusterParameterGroupInput) (*DBClusterParameterGroupNameMessage, error) { + req, out := c.ModifyDBClusterParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opModifyDBInstance = "ModifyDBInstance" + +// ModifyDBInstanceRequest generates a request for the ModifyDBInstance operation. +func (c *RDS) ModifyDBInstanceRequest(input *ModifyDBInstanceInput) (req *request.Request, output *ModifyDBInstanceOutput) { + op := &request.Operation{ + Name: opModifyDBInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyDBInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyDBInstanceOutput{} + req.Data = output + return +} + +// Modify settings for a DB instance. You can change one or more database configuration +// parameters by specifying these parameters and the new values in the request. +func (c *RDS) ModifyDBInstance(input *ModifyDBInstanceInput) (*ModifyDBInstanceOutput, error) { + req, out := c.ModifyDBInstanceRequest(input) + err := req.Send() + return out, err +} + +const opModifyDBParameterGroup = "ModifyDBParameterGroup" + +// ModifyDBParameterGroupRequest generates a request for the ModifyDBParameterGroup operation. +func (c *RDS) ModifyDBParameterGroupRequest(input *ModifyDBParameterGroupInput) (req *request.Request, output *DBParameterGroupNameMessage) { + op := &request.Operation{ + Name: opModifyDBParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyDBParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DBParameterGroupNameMessage{} + req.Data = output + return +} + +// Modifies the parameters of a DB parameter group. To modify more than one +// parameter, submit a list of the following: ParameterName, ParameterValue, +// and ApplyMethod. A maximum of 20 parameters can be modified in a single request. +// +// Changes to dynamic parameters are applied immediately. Changes to static +// parameters require a reboot without failover to the DB instance associated +// with the parameter group before the change can take effect. +// +// After you modify a DB parameter group, you should wait at least 5 minutes +// before creating your first DB instance that uses that DB parameter group +// as the default parameter group. This allows Amazon RDS to fully complete +// the modify action before the parameter group is used as the default for a +// new DB instance. This is especially important for parameters that are critical +// when creating the default database for a DB instance, such as the character +// set for the default database defined by the character_set_database parameter. +// You can use the Parameter Groups option of the Amazon RDS console (https://console.aws.amazon.com/rds/) +// or the DescribeDBParameters command to verify that your DB parameter group +// has been created or modified. +func (c *RDS) ModifyDBParameterGroup(input *ModifyDBParameterGroupInput) (*DBParameterGroupNameMessage, error) { + req, out := c.ModifyDBParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opModifyDBSnapshotAttribute = "ModifyDBSnapshotAttribute" + +// ModifyDBSnapshotAttributeRequest generates a request for the ModifyDBSnapshotAttribute operation. +func (c *RDS) ModifyDBSnapshotAttributeRequest(input *ModifyDBSnapshotAttributeInput) (req *request.Request, output *ModifyDBSnapshotAttributeOutput) { + op := &request.Operation{ + Name: opModifyDBSnapshotAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyDBSnapshotAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyDBSnapshotAttributeOutput{} + req.Data = output + return +} + +// Adds an attribute and values to, or removes an attribute and values from +// a manual DB snapshot. +// +// To share a manual DB snapshot with other AWS accounts, specify restore as +// the AttributeName and use the ValuesToAdd parameter to add a list of the +// AWS account ids that are authorized to retore the manual DB snapshot. Uses +// the value all to make the manual DB snapshot public and can by copied or +// restored by all AWS accounts. Do not add the all value for any manual DB +// snapshots that contain private information that you do not want to be available +// to all AWS accounts. +// +// To view which AWS accounts have access to copy or restore a manual DB snapshot, +// or whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes +// API. +// +// If the manual DB snapshot is encrypted, it cannot be shared. +func (c *RDS) ModifyDBSnapshotAttribute(input *ModifyDBSnapshotAttributeInput) (*ModifyDBSnapshotAttributeOutput, error) { + req, out := c.ModifyDBSnapshotAttributeRequest(input) + err := req.Send() + return out, err +} + +const opModifyDBSubnetGroup = "ModifyDBSubnetGroup" + +// ModifyDBSubnetGroupRequest generates a request for the ModifyDBSubnetGroup operation. +func (c *RDS) ModifyDBSubnetGroupRequest(input *ModifyDBSubnetGroupInput) (req *request.Request, output *ModifyDBSubnetGroupOutput) { + op := &request.Operation{ + Name: opModifyDBSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyDBSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyDBSubnetGroupOutput{} + req.Data = output + return +} + +// Modifies an existing DB subnet group. DB subnet groups must contain at least +// one subnet in at least two AZs in the region. +func (c *RDS) ModifyDBSubnetGroup(input *ModifyDBSubnetGroupInput) (*ModifyDBSubnetGroupOutput, error) { + req, out := c.ModifyDBSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opModifyEventSubscription = "ModifyEventSubscription" + +// ModifyEventSubscriptionRequest generates a request for the ModifyEventSubscription operation. +func (c *RDS) ModifyEventSubscriptionRequest(input *ModifyEventSubscriptionInput) (req *request.Request, output *ModifyEventSubscriptionOutput) { + op := &request.Operation{ + Name: opModifyEventSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyEventSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyEventSubscriptionOutput{} + req.Data = output + return +} + +// Modifies an existing RDS event notification subscription. Note that you cannot +// modify the source identifiers using this call; to change source identifiers +// for a subscription, use the AddSourceIdentifierToSubscription and RemoveSourceIdentifierFromSubscription +// calls. +// +// You can see a list of the event categories for a given SourceType in the +// Events (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) +// topic in the Amazon RDS User Guide or by using the DescribeEventCategories +// action. +func (c *RDS) ModifyEventSubscription(input *ModifyEventSubscriptionInput) (*ModifyEventSubscriptionOutput, error) { + req, out := c.ModifyEventSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opModifyOptionGroup = "ModifyOptionGroup" + +// ModifyOptionGroupRequest generates a request for the ModifyOptionGroup operation. +func (c *RDS) ModifyOptionGroupRequest(input *ModifyOptionGroupInput) (req *request.Request, output *ModifyOptionGroupOutput) { + op := &request.Operation{ + Name: opModifyOptionGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyOptionGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyOptionGroupOutput{} + req.Data = output + return +} + +// Modifies an existing option group. +func (c *RDS) ModifyOptionGroup(input *ModifyOptionGroupInput) (*ModifyOptionGroupOutput, error) { + req, out := c.ModifyOptionGroupRequest(input) + err := req.Send() + return out, err +} + +const opPromoteReadReplica = "PromoteReadReplica" + +// PromoteReadReplicaRequest generates a request for the PromoteReadReplica operation. +func (c *RDS) PromoteReadReplicaRequest(input *PromoteReadReplicaInput) (req *request.Request, output *PromoteReadReplicaOutput) { + op := &request.Operation{ + Name: opPromoteReadReplica, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PromoteReadReplicaInput{} + } + + req = c.newRequest(op, input, output) + output = &PromoteReadReplicaOutput{} + req.Data = output + return +} + +// Promotes a Read Replica DB instance to a standalone DB instance. +// +// We recommend that you enable automated backups on your Read Replica before +// promoting the Read Replica. This ensures that no backup is taken during the +// promotion process. Once the instance is promoted to a primary instance, backups +// are taken based on your backup settings. +func (c *RDS) PromoteReadReplica(input *PromoteReadReplicaInput) (*PromoteReadReplicaOutput, error) { + req, out := c.PromoteReadReplicaRequest(input) + err := req.Send() + return out, err +} + +const opPurchaseReservedDBInstancesOffering = "PurchaseReservedDBInstancesOffering" + +// PurchaseReservedDBInstancesOfferingRequest generates a request for the PurchaseReservedDBInstancesOffering operation. +func (c *RDS) PurchaseReservedDBInstancesOfferingRequest(input *PurchaseReservedDBInstancesOfferingInput) (req *request.Request, output *PurchaseReservedDBInstancesOfferingOutput) { + op := &request.Operation{ + Name: opPurchaseReservedDBInstancesOffering, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PurchaseReservedDBInstancesOfferingInput{} + } + + req = c.newRequest(op, input, output) + output = &PurchaseReservedDBInstancesOfferingOutput{} + req.Data = output + return +} + +// Purchases a reserved DB instance offering. +func (c *RDS) PurchaseReservedDBInstancesOffering(input *PurchaseReservedDBInstancesOfferingInput) (*PurchaseReservedDBInstancesOfferingOutput, error) { + req, out := c.PurchaseReservedDBInstancesOfferingRequest(input) + err := req.Send() + return out, err +} + +const opRebootDBInstance = "RebootDBInstance" + +// RebootDBInstanceRequest generates a request for the RebootDBInstance operation. +func (c *RDS) RebootDBInstanceRequest(input *RebootDBInstanceInput) (req *request.Request, output *RebootDBInstanceOutput) { + op := &request.Operation{ + Name: opRebootDBInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RebootDBInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &RebootDBInstanceOutput{} + req.Data = output + return +} + +// Rebooting a DB instance restarts the database engine service. A reboot also +// applies to the DB instance any modifications to the associated DB parameter +// group that were pending. Rebooting a DB instance results in a momentary outage +// of the instance, during which the DB instance status is set to rebooting. +// If the RDS instance is configured for MultiAZ, it is possible that the reboot +// will be conducted through a failover. An Amazon RDS event is created when +// the reboot is completed. +// +// If your DB instance is deployed in multiple Availability Zones, you can +// force a failover from one AZ to the other during the reboot. You might force +// a failover to test the availability of your DB instance deployment or to +// restore operations to the original AZ after a failover occurs. +// +// The time required to reboot is a function of the specific database engine's +// crash recovery process. To improve the reboot time, we recommend that you +// reduce database activities as much as possible during the reboot process +// to reduce rollback activity for in-transit transactions. +func (c *RDS) RebootDBInstance(input *RebootDBInstanceInput) (*RebootDBInstanceOutput, error) { + req, out := c.RebootDBInstanceRequest(input) + err := req.Send() + return out, err +} + +const opRemoveSourceIdentifierFromSubscription = "RemoveSourceIdentifierFromSubscription" + +// RemoveSourceIdentifierFromSubscriptionRequest generates a request for the RemoveSourceIdentifierFromSubscription operation. +func (c *RDS) RemoveSourceIdentifierFromSubscriptionRequest(input *RemoveSourceIdentifierFromSubscriptionInput) (req *request.Request, output *RemoveSourceIdentifierFromSubscriptionOutput) { + op := &request.Operation{ + Name: opRemoveSourceIdentifierFromSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveSourceIdentifierFromSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveSourceIdentifierFromSubscriptionOutput{} + req.Data = output + return +} + +// Removes a source identifier from an existing RDS event notification subscription. +func (c *RDS) RemoveSourceIdentifierFromSubscription(input *RemoveSourceIdentifierFromSubscriptionInput) (*RemoveSourceIdentifierFromSubscriptionOutput, error) { + req, out := c.RemoveSourceIdentifierFromSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opRemoveTagsFromResource = "RemoveTagsFromResource" + +// RemoveTagsFromResourceRequest generates a request for the RemoveTagsFromResource operation. +func (c *RDS) RemoveTagsFromResourceRequest(input *RemoveTagsFromResourceInput) (req *request.Request, output *RemoveTagsFromResourceOutput) { + op := &request.Operation{ + Name: opRemoveTagsFromResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsFromResourceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RemoveTagsFromResourceOutput{} + req.Data = output + return +} + +// Removes metadata tags from an Amazon RDS resource. +// +// For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS +// Resources (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Tagging.html). +func (c *RDS) RemoveTagsFromResource(input *RemoveTagsFromResourceInput) (*RemoveTagsFromResourceOutput, error) { + req, out := c.RemoveTagsFromResourceRequest(input) + err := req.Send() + return out, err +} + +const opResetDBClusterParameterGroup = "ResetDBClusterParameterGroup" + +// ResetDBClusterParameterGroupRequest generates a request for the ResetDBClusterParameterGroup operation. +func (c *RDS) ResetDBClusterParameterGroupRequest(input *ResetDBClusterParameterGroupInput) (req *request.Request, output *DBClusterParameterGroupNameMessage) { + op := &request.Operation{ + Name: opResetDBClusterParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetDBClusterParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DBClusterParameterGroupNameMessage{} + req.Data = output + return +} + +// Modifies the parameters of a DB cluster parameter group to the default value. +// To reset specific parameters submit a list of the following: ParameterName +// and ApplyMethod. To reset the entire DB cluster parameter group, specify +// the DBClusterParameterGroupName and ResetAllParameters parameters. +// +// When resetting the entire group, dynamic parameters are updated immediately +// and static parameters are set to pending-reboot to take effect on the next +// DB instance restart or RebootDBInstance request. You must call RebootDBInstance +// for every DB instance in your DB cluster that you want the updated static +// parameter to apply to. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) ResetDBClusterParameterGroup(input *ResetDBClusterParameterGroupInput) (*DBClusterParameterGroupNameMessage, error) { + req, out := c.ResetDBClusterParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opResetDBParameterGroup = "ResetDBParameterGroup" + +// ResetDBParameterGroupRequest generates a request for the ResetDBParameterGroup operation. +func (c *RDS) ResetDBParameterGroupRequest(input *ResetDBParameterGroupInput) (req *request.Request, output *DBParameterGroupNameMessage) { + op := &request.Operation{ + Name: opResetDBParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetDBParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &DBParameterGroupNameMessage{} + req.Data = output + return +} + +// Modifies the parameters of a DB parameter group to the engine/system default +// value. To reset specific parameters submit a list of the following: ParameterName +// and ApplyMethod. To reset the entire DB parameter group, specify the DBParameterGroup +// name and ResetAllParameters parameters. When resetting the entire group, +// dynamic parameters are updated immediately and static parameters are set +// to pending-reboot to take effect on the next DB instance restart or RebootDBInstance +// request. +func (c *RDS) ResetDBParameterGroup(input *ResetDBParameterGroupInput) (*DBParameterGroupNameMessage, error) { + req, out := c.ResetDBParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opRestoreDBClusterFromSnapshot = "RestoreDBClusterFromSnapshot" + +// RestoreDBClusterFromSnapshotRequest generates a request for the RestoreDBClusterFromSnapshot operation. +func (c *RDS) RestoreDBClusterFromSnapshotRequest(input *RestoreDBClusterFromSnapshotInput) (req *request.Request, output *RestoreDBClusterFromSnapshotOutput) { + op := &request.Operation{ + Name: opRestoreDBClusterFromSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreDBClusterFromSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreDBClusterFromSnapshotOutput{} + req.Data = output + return +} + +// Creates a new DB cluster from a DB cluster snapshot. The target DB cluster +// is created from the source DB cluster restore point with the same configuration +// as the original source DB cluster, except that the new DB cluster is created +// with the default security group. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) RestoreDBClusterFromSnapshot(input *RestoreDBClusterFromSnapshotInput) (*RestoreDBClusterFromSnapshotOutput, error) { + req, out := c.RestoreDBClusterFromSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opRestoreDBClusterToPointInTime = "RestoreDBClusterToPointInTime" + +// RestoreDBClusterToPointInTimeRequest generates a request for the RestoreDBClusterToPointInTime operation. +func (c *RDS) RestoreDBClusterToPointInTimeRequest(input *RestoreDBClusterToPointInTimeInput) (req *request.Request, output *RestoreDBClusterToPointInTimeOutput) { + op := &request.Operation{ + Name: opRestoreDBClusterToPointInTime, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreDBClusterToPointInTimeInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreDBClusterToPointInTimeOutput{} + req.Data = output + return +} + +// Restores a DB cluster to an arbitrary point in time. Users can restore to +// any point in time before LatestRestorableTime for up to BackupRetentionPeriod +// days. The target DB cluster is created from the source DB cluster with the +// same configuration as the original DB cluster, except that the new DB cluster +// is created with the default DB security group. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// in the Amazon RDS User Guide. +func (c *RDS) RestoreDBClusterToPointInTime(input *RestoreDBClusterToPointInTimeInput) (*RestoreDBClusterToPointInTimeOutput, error) { + req, out := c.RestoreDBClusterToPointInTimeRequest(input) + err := req.Send() + return out, err +} + +const opRestoreDBInstanceFromDBSnapshot = "RestoreDBInstanceFromDBSnapshot" + +// RestoreDBInstanceFromDBSnapshotRequest generates a request for the RestoreDBInstanceFromDBSnapshot operation. +func (c *RDS) RestoreDBInstanceFromDBSnapshotRequest(input *RestoreDBInstanceFromDBSnapshotInput) (req *request.Request, output *RestoreDBInstanceFromDBSnapshotOutput) { + op := &request.Operation{ + Name: opRestoreDBInstanceFromDBSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreDBInstanceFromDBSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreDBInstanceFromDBSnapshotOutput{} + req.Data = output + return +} + +// Creates a new DB instance from a DB snapshot. The target database is created +// from the source database restore point with the most of original configuration, +// but in a system chosen availability zone with the default security group, +// the default subnet group, and the default DB parameter group. By default, +// the new DB instance is created as a single-AZ deployment except when the +// instance is a SQL Server instance that has an option group that is associated +// with mirroring; in this case, the instance becomes a mirrored AZ deployment +// and not a single-AZ deployment. +// +// If your intent is to replace your original DB instance with the new, restored +// DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot +// action. RDS does not allow two DB instances with the same name. Once you +// have renamed your original DB instance with a different identifier, then +// you can pass the original name of the DB instance as the DBInstanceIdentifier +// in the call to the RestoreDBInstanceFromDBSnapshot action. The result is +// that you will replace the original DB instance with the DB instance created +// from the snapshot. +// +// If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier +// must be the ARN of the shared DB snapshot. +func (c *RDS) RestoreDBInstanceFromDBSnapshot(input *RestoreDBInstanceFromDBSnapshotInput) (*RestoreDBInstanceFromDBSnapshotOutput, error) { + req, out := c.RestoreDBInstanceFromDBSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opRestoreDBInstanceToPointInTime = "RestoreDBInstanceToPointInTime" + +// RestoreDBInstanceToPointInTimeRequest generates a request for the RestoreDBInstanceToPointInTime operation. +func (c *RDS) RestoreDBInstanceToPointInTimeRequest(input *RestoreDBInstanceToPointInTimeInput) (req *request.Request, output *RestoreDBInstanceToPointInTimeOutput) { + op := &request.Operation{ + Name: opRestoreDBInstanceToPointInTime, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreDBInstanceToPointInTimeInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreDBInstanceToPointInTimeOutput{} + req.Data = output + return +} + +// Restores a DB instance to an arbitrary point-in-time. Users can restore to +// any point in time before the LatestRestorableTime for up to BackupRetentionPeriod +// days. The target database is created with the most of original configuration, +// but in a system chosen availability zone with the default security group, +// the default subnet group, and the default DB parameter group. By default, +// the new DB instance is created as a single-AZ deployment except when the +// instance is a SQL Server instance that has an option group that is associated +// with mirroring; in this case, the instance becomes a mirrored deployment +// and not a single-AZ deployment. +func (c *RDS) RestoreDBInstanceToPointInTime(input *RestoreDBInstanceToPointInTimeInput) (*RestoreDBInstanceToPointInTimeOutput, error) { + req, out := c.RestoreDBInstanceToPointInTimeRequest(input) + err := req.Send() + return out, err +} + +const opRevokeDBSecurityGroupIngress = "RevokeDBSecurityGroupIngress" + +// RevokeDBSecurityGroupIngressRequest generates a request for the RevokeDBSecurityGroupIngress operation. +func (c *RDS) RevokeDBSecurityGroupIngressRequest(input *RevokeDBSecurityGroupIngressInput) (req *request.Request, output *RevokeDBSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opRevokeDBSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RevokeDBSecurityGroupIngressInput{} + } + + req = c.newRequest(op, input, output) + output = &RevokeDBSecurityGroupIngressOutput{} + req.Data = output + return +} + +// Revokes ingress from a DBSecurityGroup for previously authorized IP ranges +// or EC2 or VPC Security Groups. Required parameters for this API are one of +// CIDRIP, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either +// EC2SecurityGroupName or EC2SecurityGroupId). +func (c *RDS) RevokeDBSecurityGroupIngress(input *RevokeDBSecurityGroupIngressInput) (*RevokeDBSecurityGroupIngressOutput, error) { + req, out := c.RevokeDBSecurityGroupIngressRequest(input) + err := req.Send() + return out, err +} + +// Describes a quota for an AWS account, for example, the number of DB instances +// allowed. +type AccountQuota struct { + _ struct{} `type:"structure"` + + // The name of the Amazon RDS quota for this AWS account. + AccountQuotaName *string `type:"string"` + + // The maximum allowed value for the quota. + Max *int64 `type:"long"` + + // The amount currently used toward the quota maximum. + Used *int64 `type:"long"` +} + +// String returns the string representation +func (s AccountQuota) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountQuota) GoString() string { + return s.String() +} + +type AddSourceIdentifierToSubscriptionInput struct { + _ struct{} `type:"structure"` + + // The identifier of the event source to be added. An identifier must begin + // with a letter and must contain only ASCII letters, digits, and hyphens; it + // cannot end with a hyphen or contain two consecutive hyphens. + // + // Constraints: + // + // If the source type is a DB instance, then a DBInstanceIdentifier must be + // supplied. If the source type is a DB security group, a DBSecurityGroupName + // must be supplied. If the source type is a DB parameter group, a DBParameterGroupName + // must be supplied. If the source type is a DB snapshot, a DBSnapshotIdentifier + // must be supplied. + SourceIdentifier *string `type:"string" required:"true"` + + // The name of the RDS event notification subscription you want to add a source + // identifier to. + SubscriptionName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AddSourceIdentifierToSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddSourceIdentifierToSubscriptionInput) GoString() string { + return s.String() +} + +type AddSourceIdentifierToSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // Contains the results of a successful invocation of the DescribeEventSubscriptions + // action. + EventSubscription *EventSubscription `type:"structure"` +} + +// String returns the string representation +func (s AddSourceIdentifierToSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddSourceIdentifierToSubscriptionOutput) GoString() string { + return s.String() +} + +type AddTagsToResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon RDS resource the tags will be added to. This value is an Amazon + // Resource Name (ARN). For information about creating an ARN, see Constructing + // an RDS Amazon Resource Name (ARN) (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN). + ResourceName *string `type:"string" required:"true"` + + // The tags to be assigned to the Amazon RDS resource. + Tags []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsToResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToResourceInput) GoString() string { + return s.String() +} + +type AddTagsToResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsToResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToResourceOutput) GoString() string { + return s.String() +} + +type ApplyPendingMaintenanceActionInput struct { + _ struct{} `type:"structure"` + + // The pending maintenance action to apply to this resource. + ApplyAction *string `type:"string" required:"true"` + + // A value that specifies the type of opt-in request, or undoes an opt-in request. + // An opt-in request of type immediate cannot be undone. + // + // Valid values: + // + // immediate - Apply the maintenance action immediately. next-maintenance + // - Apply the maintenance action during the next maintenance window for the + // resource. undo-opt-in - Cancel any existing next-maintenance opt-in requests. + OptInType *string `type:"string" required:"true"` + + // The RDS Amazon Resource Name (ARN) of the resource that the pending maintenance + // action applies to. For information about creating an ARN, see Constructing + // an RDS Amazon Resource Name (ARN) (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN). + ResourceIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ApplyPendingMaintenanceActionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplyPendingMaintenanceActionInput) GoString() string { + return s.String() +} + +type ApplyPendingMaintenanceActionOutput struct { + _ struct{} `type:"structure"` + + // Describes the pending maintenance actions for a resource. + ResourcePendingMaintenanceActions *ResourcePendingMaintenanceActions `type:"structure"` +} + +// String returns the string representation +func (s ApplyPendingMaintenanceActionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplyPendingMaintenanceActionOutput) GoString() string { + return s.String() +} + +type AuthorizeDBSecurityGroupIngressInput struct { + _ struct{} `type:"structure"` + + // The IP range to authorize. + CIDRIP *string `type:"string"` + + // The name of the DB security group to add authorization to. + DBSecurityGroupName *string `type:"string" required:"true"` + + // Id of the EC2 security group to authorize. For VPC DB security groups, EC2SecurityGroupId + // must be provided. Otherwise, EC2SecurityGroupOwnerId and either EC2SecurityGroupName + // or EC2SecurityGroupId must be provided. + EC2SecurityGroupId *string `type:"string"` + + // Name of the EC2 security group to authorize. For VPC DB security groups, + // EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId and + // either EC2SecurityGroupName or EC2SecurityGroupId must be provided. + EC2SecurityGroupName *string `type:"string"` + + // AWS account number of the owner of the EC2 security group specified in the + // EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable + // value. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, + // EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId + // must be provided. + EC2SecurityGroupOwnerId *string `type:"string"` +} + +// String returns the string representation +func (s AuthorizeDBSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeDBSecurityGroupIngressInput) GoString() string { + return s.String() +} + +type AuthorizeDBSecurityGroupIngressOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // DescribeDBSecurityGroups AuthorizeDBSecurityGroupIngress CreateDBSecurityGroup + // RevokeDBSecurityGroupIngress This data type is used as a response element + // in the DescribeDBSecurityGroups action. + DBSecurityGroup *DBSecurityGroup `type:"structure"` +} + +// String returns the string representation +func (s AuthorizeDBSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeDBSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +// Contains Availability Zone information. +// +// This data type is used as an element in the following data type: OrderableDBInstanceOption +type AvailabilityZone struct { + _ struct{} `type:"structure"` + + // The name of the availability zone. + Name *string `type:"string"` +} + +// String returns the string representation +func (s AvailabilityZone) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailabilityZone) GoString() string { + return s.String() +} + +// A CA certificate for an AWS account. +type Certificate struct { + _ struct{} `type:"structure"` + + // The unique key that identifies a certificate. + CertificateIdentifier *string `type:"string"` + + // The type of the certificate. + CertificateType *string `type:"string"` + + // The thumbprint of the certificate. + Thumbprint *string `type:"string"` + + // The starting date from which the certificate is valid. + ValidFrom *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The final date that the certificate continues to be valid. + ValidTill *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s Certificate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Certificate) GoString() string { + return s.String() +} + +// This data type is used as a response element in the action DescribeDBEngineVersions. +type CharacterSet struct { + _ struct{} `type:"structure"` + + // The description of the character set. + CharacterSetDescription *string `type:"string"` + + // The name of the character set. + CharacterSetName *string `type:"string"` +} + +// String returns the string representation +func (s CharacterSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CharacterSet) GoString() string { + return s.String() +} + +type CopyDBClusterSnapshotInput struct { + _ struct{} `type:"structure"` + + // The identifier of the DB cluster snapshot to copy. This parameter is not + // case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. First character + // must be a letter. Cannot end with a hyphen or contain two consecutive hyphens. + // Example: my-cluster-snapshot1 + SourceDBClusterSnapshotIdentifier *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The identifier of the new DB cluster snapshot to create from the source DB + // cluster snapshot. This parameter is not case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. First character + // must be a letter. Cannot end with a hyphen or contain two consecutive hyphens. + // Example: my-cluster-snapshot2 + TargetDBClusterSnapshotIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopyDBClusterSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyDBClusterSnapshotInput) GoString() string { + return s.String() +} + +type CopyDBClusterSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBClusterSnapshot DeleteDBClusterSnapshot This data type is + // used as a response element in the DescribeDBClusterSnapshots action. + DBClusterSnapshot *DBClusterSnapshot `type:"structure"` +} + +// String returns the string representation +func (s CopyDBClusterSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyDBClusterSnapshotOutput) GoString() string { + return s.String() +} + +type CopyDBParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The identifier or ARN for the source DB parameter group. For information + // about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN) + // (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN). + // + // Constraints: + // + // Must specify a valid DB parameter group. If the source DB parameter group + // is in the same region as the copy, specify a valid DB parameter group identifier, + // for example my-db-param-group, or a valid ARN. If the source DB parameter + // group is in a different region than the copy, specify a valid DB parameter + // group ARN, for example arn:aws:rds:us-west-2:123456789012:pg:special-parameters. + SourceDBParameterGroupIdentifier *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // A description for the copied DB parameter group. + TargetDBParameterGroupDescription *string `type:"string" required:"true"` + + // The identifier for the copied DB parameter group. + // + // Constraints: + // + // Cannot be null, empty, or blank Must contain from 1 to 255 alphanumeric + // characters or hyphens First character must be a letter Cannot end with a + // hyphen or contain two consecutive hyphens Example: my-db-parameter-group + TargetDBParameterGroupIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopyDBParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyDBParameterGroupInput) GoString() string { + return s.String() +} + +type CopyDBParameterGroupOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the CreateDBParameterGroup + // action. + // + // This data type is used as a request parameter in the DeleteDBParameterGroup + // action, and as a response element in the DescribeDBParameterGroups action. + DBParameterGroup *DBParameterGroup `type:"structure"` +} + +// String returns the string representation +func (s CopyDBParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyDBParameterGroupOutput) GoString() string { + return s.String() +} + +type CopyDBSnapshotInput struct { + _ struct{} `type:"structure"` + + // True to copy all tags from the source DB snapshot to the target DB snapshot; + // otherwise false. The default is false. + CopyTags *bool `type:"boolean"` + + // The identifier for the source DB snapshot. + // + // If you are copying from a shared manual DB snapshot, this must be the ARN + // of the shared DB snapshot. + // + // Constraints: + // + // Must specify a valid system snapshot in the "available" state. If the source + // snapshot is in the same region as the copy, specify a valid DB snapshot identifier. + // If the source snapshot is in a different region than the copy, specify a + // valid DB snapshot ARN. For more information, go to Copying a DB Snapshot + // (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_CopySnapshot.html). + // Example: rds:mydb-2012-04-02-00-01 + // + // Example: arn:aws:rds:rr-regn-1:123456789012:snapshot:mysql-instance1-snapshot-20130805 + SourceDBSnapshotIdentifier *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The identifier for the copied snapshot. + // + // Constraints: + // + // Cannot be null, empty, or blank Must contain from 1 to 255 alphanumeric + // characters or hyphens First character must be a letter Cannot end with a + // hyphen or contain two consecutive hyphens Example: my-db-snapshot + TargetDBSnapshotIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopyDBSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyDBSnapshotInput) GoString() string { + return s.String() +} + +type CopyDBSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBSnapshot DeleteDBSnapshot This data type is used as a response + // element in the DescribeDBSnapshots action. + DBSnapshot *DBSnapshot `type:"structure"` +} + +// String returns the string representation +func (s CopyDBSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyDBSnapshotOutput) GoString() string { + return s.String() +} + +type CopyOptionGroupInput struct { + _ struct{} `type:"structure"` + + // The identifier or ARN for the source option group. For information about + // creating an ARN, see Constructing an RDS Amazon Resource Name (ARN) (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN). + // + // Constraints: + // + // Must specify a valid option group. If the source option group is in the + // same region as the copy, specify a valid option group identifier, for example + // my-option-group, or a valid ARN. If the source option group is in a different + // region than the copy, specify a valid option group ARN, for example arn:aws:rds:us-west-2:123456789012:og:special-options. + SourceOptionGroupIdentifier *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The description for the copied option group. + TargetOptionGroupDescription *string `type:"string" required:"true"` + + // The identifier for the copied option group. + // + // Constraints: + // + // Cannot be null, empty, or blank Must contain from 1 to 255 alphanumeric + // characters or hyphens First character must be a letter Cannot end with a + // hyphen or contain two consecutive hyphens Example: my-option-group + TargetOptionGroupIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopyOptionGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyOptionGroupInput) GoString() string { + return s.String() +} + +type CopyOptionGroupOutput struct { + _ struct{} `type:"structure"` + + OptionGroup *OptionGroup `type:"structure"` +} + +// String returns the string representation +func (s CopyOptionGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyOptionGroupOutput) GoString() string { + return s.String() +} + +type CreateDBClusterInput struct { + _ struct{} `type:"structure"` + + // A list of EC2 Availability Zones that instances in the DB cluster can be + // created in. For information on regions and Availability Zones, see Regions + // and Availability Zones (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html). + AvailabilityZones []*string `locationNameList:"AvailabilityZone" type:"list"` + + // The number of days for which automated backups are retained. You must specify + // a minimum value of 1. + // + // Default: 1 + // + // Constraints: + // + // Must be a value from 1 to 35 + BackupRetentionPeriod *int64 `type:"integer"` + + // A value that indicates that the DB cluster should be associated with the + // specified CharacterSet. + CharacterSetName *string `type:"string"` + + // The DB cluster identifier. This parameter is stored as a lowercase string. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. First character + // must be a letter. Cannot end with a hyphen or contain two consecutive hyphens. + // Example: my-cluster1 + DBClusterIdentifier *string `type:"string" required:"true"` + + // The name of the DB cluster parameter group to associate with this DB cluster. + // If this argument is omitted, default.aurora5.6 for the specified engine will + // be used. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + DBClusterParameterGroupName *string `type:"string"` + + // A DB subnet group to associate with this DB cluster. + DBSubnetGroupName *string `type:"string"` + + // The name for your database of up to 8 alpha-numeric characters. If you do + // not provide a name, Amazon RDS will not create a database in the DB cluster + // you are creating. + DatabaseName *string `type:"string"` + + // The name of the database engine to be used for this DB cluster. + // + // Valid Values: aurora + Engine *string `type:"string" required:"true"` + + // The version number of the database engine to use. + // + // Aurora + // + // Example: 5.6.10a + EngineVersion *string `type:"string"` + + // The KMS key identifier for an encrypted DB cluster. + // + // The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption + // key. If you are creating a DB cluster with the same AWS account that owns + // the KMS encryption key used to encrypt the new DB cluster, then you can use + // the KMS key alias instead of the ARN for the KM encryption key. + // + // If the StorageEncrypted parameter is true, and you do not specify a value + // for the KmsKeyId parameter, then Amazon RDS will use your default encryption + // key. AWS KMS creates the default encryption key for your AWS account. Your + // AWS account has a different default encryption key for each AWS region. + KmsKeyId *string `type:"string"` + + // The password for the master database user. This password can contain any + // printable ASCII character except "/", """, or "@". + // + // Constraints: Must contain from 8 to 41 characters. + MasterUserPassword *string `type:"string" required:"true"` + + // The name of the master user for the client DB cluster. + // + // Constraints: + // + // Must be 1 to 16 alphanumeric characters. First character must be a letter. + // Cannot be a reserved word for the chosen database engine. + MasterUsername *string `type:"string" required:"true"` + + // A value that indicates that the DB cluster should be associated with the + // specified option group. + // + // Permanent options cannot be removed from an option group. The option group + // cannot be removed from a DB cluster once it is associated with a DB cluster. + OptionGroupName *string `type:"string"` + + // The port number on which the instances in the DB cluster accept connections. + // + // Default: 3306 + Port *int64 `type:"integer"` + + // The daily time range during which automated backups are created if automated + // backups are enabled using the BackupRetentionPeriod parameter. + // + // Default: A 30-minute window selected at random from an 8-hour block of time + // per region. To see the time blocks available, see Adjusting the Preferred + // Maintenance Window (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html) + // in the Amazon RDS User Guide. + // + // Constraints: + // + // Must be in the format hh24:mi-hh24:mi. Times should be in Universal Coordinated + // Time (UTC). Must not conflict with the preferred maintenance window. Must + // be at least 30 minutes. + PreferredBackupWindow *string `type:"string"` + + // The weekly time range during which system maintenance can occur, in Universal + // Coordinated Time (UTC). + // + // Format: ddd:hh24:mi-ddd:hh24:mi + // + // Default: A 30-minute window selected at random from an 8-hour block of time + // per region, occurring on a random day of the week. To see the time blocks + // available, see Adjusting the Preferred Maintenance Window (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html) + // in the Amazon RDS User Guide. + // + // Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun + // + // Constraints: Minimum 30-minute window. + PreferredMaintenanceWindow *string `type:"string"` + + // Specifies whether the DB cluster is encrypted. + StorageEncrypted *bool `type:"boolean"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // A list of EC2 VPC security groups to associate with this DB cluster. + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s CreateDBClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBClusterInput) GoString() string { + return s.String() +} + +type CreateDBClusterOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBCluster DeleteDBCluster FailoverDBCluster ModifyDBCluster + // RestoreDBClusterFromSnapshot This data type is used as a response element + // in the DescribeDBClusters action. + DBCluster *DBCluster `type:"structure"` +} + +// String returns the string representation +func (s CreateDBClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBClusterOutput) GoString() string { + return s.String() +} + +type CreateDBClusterParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the DB cluster parameter group. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens This value is + // stored as a lowercase string. + DBClusterParameterGroupName *string `type:"string" required:"true"` + + // The DB cluster parameter group family name. A DB cluster parameter group + // can be associated with one and only one DB cluster parameter group family, + // and can be applied only to a DB cluster running a database engine and engine + // version compatible with that DB cluster parameter group family. + DBParameterGroupFamily *string `type:"string" required:"true"` + + // The description for the DB cluster parameter group. + Description *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateDBClusterParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBClusterParameterGroupInput) GoString() string { + return s.String() +} + +type CreateDBClusterParameterGroupOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the CreateDBClusterParameterGroup + // action. + // + // This data type is used as a request parameter in the DeleteDBClusterParameterGroup + // action, and as a response element in the DescribeDBClusterParameterGroups + // action. + DBClusterParameterGroup *DBClusterParameterGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateDBClusterParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBClusterParameterGroupOutput) GoString() string { + return s.String() +} + +type CreateDBClusterSnapshotInput struct { + _ struct{} `type:"structure"` + + // The identifier of the DB cluster to create a snapshot for. This parameter + // is not case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. First character + // must be a letter. Cannot end with a hyphen or contain two consecutive hyphens. + // Example: my-cluster1 + DBClusterIdentifier *string `type:"string" required:"true"` + + // The identifier of the DB cluster snapshot. This parameter is stored as a + // lowercase string. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. First character + // must be a letter. Cannot end with a hyphen or contain two consecutive hyphens. + // Example: my-cluster1-snapshot1 + DBClusterSnapshotIdentifier *string `type:"string" required:"true"` + + // The tags to be assigned to the DB cluster snapshot. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateDBClusterSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBClusterSnapshotInput) GoString() string { + return s.String() +} + +type CreateDBClusterSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBClusterSnapshot DeleteDBClusterSnapshot This data type is + // used as a response element in the DescribeDBClusterSnapshots action. + DBClusterSnapshot *DBClusterSnapshot `type:"structure"` +} + +// String returns the string representation +func (s CreateDBClusterSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBClusterSnapshotOutput) GoString() string { + return s.String() +} + +type CreateDBInstanceInput struct { + _ struct{} `type:"structure"` + + // The amount of storage (in gigabytes) to be initially allocated for the database + // instance. + // + // Type: Integer + // + // MySQL + // + // Constraints: Must be an integer from 5 to 6144. + // + // MariaDB + // + // Constraints: Must be an integer from 5 to 6144. + // + // PostgreSQL + // + // Constraints: Must be an integer from 5 to 6144. + // + // Oracle + // + // Constraints: Must be an integer from 10 to 6144. + // + // SQL Server + // + // Constraints: Must be an integer from 200 to 4096 (Standard Edition and + // Enterprise Edition) or from 20 to 4096 (Express Edition and Web Edition) + AllocatedStorage *int64 `type:"integer"` + + // Indicates that minor engine upgrades will be applied automatically to the + // DB instance during the maintenance window. + // + // Default: true + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The EC2 Availability Zone that the database instance will be created in. + // For information on regions and Availability Zones, see Regions and Availability + // Zones (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html). + // + // Default: A random, system-chosen Availability Zone in the endpoint's region. + // + // Example: us-east-1d + // + // Constraint: The AvailabilityZone parameter cannot be specified if the MultiAZ + // parameter is set to true. The specified Availability Zone must be in the + // same region as the current endpoint. + AvailabilityZone *string `type:"string"` + + // The number of days for which automated backups are retained. Setting this + // parameter to a positive number enables backups. Setting this parameter to + // 0 disables automated backups. + // + // Default: 1 + // + // Constraints: + // + // Must be a value from 0 to 35 Cannot be set to 0 if the DB instance is a + // source to Read Replicas + BackupRetentionPeriod *int64 `type:"integer"` + + // For supported engines, indicates that the DB instance should be associated + // with the specified CharacterSet. + CharacterSetName *string `type:"string"` + + // True to copy all tags from the DB instance to snapshots of the DB instance; + // otherwise false. The default is false. + CopyTagsToSnapshot *bool `type:"boolean"` + + // The identifier of the DB cluster that the instance will belong to. + // + // For information on creating a DB cluster, see CreateDBCluster. + // + // Type: String + DBClusterIdentifier *string `type:"string"` + + // The compute and memory capacity of the DB instance. + // + // Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | + // db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge | db.m3.medium + // | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.m4.large | db.m4.xlarge + // | db.m4.2xlarge | db.m4.4xlarge | db.m4.10xlarge | db.r3.large | db.r3.xlarge + // | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small + // | db.t2.medium | db.t2.large + DBInstanceClass *string `type:"string" required:"true"` + + // The DB instance identifier. This parameter is stored as a lowercase string. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 for + // SQL Server). First character must be a letter. Cannot end with a hyphen or + // contain two consecutive hyphens. Example: mydbinstance + DBInstanceIdentifier *string `type:"string" required:"true"` + + // The meaning of this parameter differs according to the database engine you + // use. + // + // Type: String + // + // MySQL + // + // The name of the database to create when the DB instance is created. If this + // parameter is not specified, no database is created in the DB instance. + // + // Constraints: + // + // Must contain 1 to 64 alphanumeric characters Cannot be a word reserved + // by the specified database engine MariaDB + // + // The name of the database to create when the DB instance is created. If this + // parameter is not specified, no database is created in the DB instance. + // + // Constraints: + // + // Must contain 1 to 64 alphanumeric characters Cannot be a word reserved + // by the specified database engine PostgreSQL + // + // The name of the database to create when the DB instance is created. If this + // parameter is not specified, the default "postgres" database is created in + // the DB instance. + // + // Constraints: + // + // Must contain 1 to 63 alphanumeric characters Must begin with a letter or + // an underscore. Subsequent characters can be letters, underscores, or digits + // (0-9). Cannot be a word reserved by the specified database engine Oracle + // + // The Oracle System ID (SID) of the created DB instance. + // + // Default: ORCL + // + // Constraints: + // + // Cannot be longer than 8 characters SQL Server + // + // Not applicable. Must be null. + // + // Amazon Aurora + // + // The name of the database to create when the primary instance of the DB cluster + // is created. If this parameter is not specified, no database is created in + // the DB instance. + // + // Constraints: + // + // Must contain 1 to 64 alphanumeric characters Cannot be a word reserved + // by the specified database engine + DBName *string `type:"string"` + + // The name of the DB parameter group to associate with this DB instance. If + // this argument is omitted, the default DBParameterGroup for the specified + // engine will be used. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + DBParameterGroupName *string `type:"string"` + + // A list of DB security groups to associate with this DB instance. + // + // Default: The default DB security group for the database engine. + DBSecurityGroups []*string `locationNameList:"DBSecurityGroupName" type:"list"` + + // A DB subnet group to associate with this DB instance. + // + // If there is no DB subnet group, then it is a non-VPC DB instance. + DBSubnetGroupName *string `type:"string"` + + // The name of the database engine to be used for this instance. + // + // Valid Values: MySQL | mariadb | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee + // | sqlserver-se | sqlserver-ex | sqlserver-web | postgres | aurora + // + // Not every database engine is available for every AWS region. + Engine *string `type:"string" required:"true"` + + // The version number of the database engine to use. + // + // The following are the database engines and major and minor versions that + // are available with Amazon RDS. Not every database engine is available for + // every AWS region. + // + // MySQL + // + // Version 5.1 (Only available in the following regions: ap-northeast-1, ap-southeast-1, + // ap-southeast-2, eu-west-1, sa-east-1, us-west-1, us-west-2): 5.1.73a | 5.1.73b + // Version 5.5 (Only available in the following regions: ap-northeast-1, ap-southeast-1, + // ap-southeast-2, eu-west-1, sa-east-1, us-west-1, us-west-2): 5.5.40 | 5.5.40a + // Version 5.5 (Available in all regions): 5.5.40b | 5.5.41 | 5.5.42 Version + // 5.6 (Available in all regions): 5.6.19a | 5.6.19b | 5.6.21 | 5.6.21b | 5.6.22 + // | 5.6.23 MariaDB + // + // Version 10.0 (Available in all regions except AWS GovCloud (US) Region + // (us-gov-west-1)): 10.0.17 Oracle Database Enterprise Edition (oracle-ee) + // + // Version 11.2 (Only available in the following regions: ap-northeast-1, + // ap-southeast-1, ap-southeast-2, eu-west-1, sa-east-1, us-west-1, us-west-2): + // 11.2.0.2.v3 | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7 Version + // 11.2 (Available in all regions): 11.2.0.3.v1 | 11.2.0.3.v2 | 11.2.0.3.v3 + // | 11.2.0.4.v1 | 11.2.0.4.v3 | 11.2.0.4.v4 Version 12.1 (Available in all + // regions): 12.1.0.1.v1 | 12.1.0.1.v2 | 12.1.0.2.v1 Oracle Database Standard + // Edition (oracle-se) + // + // Version 11.2 (Only available in the following regions: us-west-1): 11.2.0.2.v3 + // | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7 Version 11.2 (Only + // available in the following regions: eu-central-1, us-west-1): 11.2.0.3.v1 + // | 11.2.0.3.v2 | 11.2.0.3.v3 | 11.2.0.4.v1 | 11.2.0.4.v3 | 11.2.0.4.v4 Version + // 12.1 (Only available in the following regions: eu-central-1, us-west-1): + // 12.1.0.1.v1 | 12.1.0.1.v2 Oracle Database Standard Edition One (oracle-se1) + // + // Version 11.2 (Only available in the following regions: us-west-1): 11.2.0.2.v3 + // | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7 Version 11.2 (Only + // available in the following regions: eu-central-1, us-west-1): 11.2.0.3.v1 + // | 11.2.0.3.v2 | 11.2.0.3.v3 | 11.2.0.4.v1 | 11.2.0.4.v3 | 11.2.0.4.v4 Version + // 12.1 (Only available in the following regions: eu-central-1, us-west-1): + // 12.1.0.1.v1 | 12.1.0.1.v2 PostgreSQL + // + // Version 9.3 (Only available in the following regions: ap-northeast-1, ap-southeast-1, + // ap-southeast-2, eu-west-1, sa-east-1, us-west-1, us-west-2): 9.3.1 | 9.3.2 + // Version 9.3 (Available in all regions): 9.3.3 | 9.3.5 | 9.3.6 | 9.3.9 | + // 9.3.10 Version 9.4 (Available in all regions): 9.4.1 | 9.4.4 | 9.4.5 Microsoft + // SQL Server Enterprise Edition (sqlserver-ee) + // + // Version 10.50 (Available in all regions): 10.50.2789.0.v1 Version 10.50 + // (Available in all regions): 10.50.6000.34.v1 Version 11.00 (Available in + // all regions): 11.00.2100.60.v1 Version 11.00 (Available in all regions): + // 11.00.5058.0.v1 Microsoft SQL Server Express Edition (sqlserver-ex) + // + // Version 10.50 (Available in all regions): 10.50.2789.0.v1 Version 10.50 + // (Available in all regions): 10.50.6000.34.v1 Version 11.00 (Available in + // all regions): 11.00.2100.60.v1 Version 11.00 (Available in all regions): + // 11.00.5058.0.v1 Version 12.00 (Available in all regions): 12.00.4422.0.v1 + // Microsoft SQL Server Standard Edition (sqlserver-se) + // + // Version 10.50 (Available in all regions): 10.50.2789.0.v1 Version 10.50 + // (Available in all regions): 10.50.6000.34.v1 Version 11.00 (Available in + // all regions): 11.00.2100.60.v1 Version 11.00 (Available in all regions): + // 11.00.5058.0.v1 Version 12.00 (Available in all regions): 12.00.4422.0.v1 + // Microsoft SQL Server Web Edition (sqlserver-web) + // + // Version 10.50 (Available in all regions): 10.50.2789.0.v1 Version 10.50 + // (Available in all regions): 10.50.6000.34.v1 Version 11.00 (Available in + // all regions): 11.00.2100.60.v1 Version 11.00 (Available in all regions): + // 11.00.5058.0.v1 Version 12.00 (Available in all regions): 12.00.4422.0.v1 + EngineVersion *string `type:"string"` + + // The amount of Provisioned IOPS (input/output operations per second) to be + // initially allocated for the DB instance. + // + // Constraints: To use PIOPS, this value must be an integer greater than 1000. + Iops *int64 `type:"integer"` + + // The KMS key identifier for an encrypted DB instance. + // + // The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption + // key. If you are creating a DB instance with the same AWS account that owns + // the KMS encryption key used to encrypt the new DB instance, then you can + // use the KMS key alias instead of the ARN for the KM encryption key. + // + // If the StorageEncrypted parameter is true, and you do not specify a value + // for the KmsKeyId parameter, then Amazon RDS will use your default encryption + // key. AWS KMS creates the default encryption key for your AWS account. Your + // AWS account has a different default encryption key for each AWS region. + KmsKeyId *string `type:"string"` + + // License model information for this DB instance. + // + // Valid values: license-included | bring-your-own-license | general-public-license + LicenseModel *string `type:"string"` + + // The password for the master database user. Can be any printable ASCII character + // except "/", """, or "@". + // + // Type: String + // + // MySQL + // + // Constraints: Must contain from 8 to 41 characters. + // + // MariaDB + // + // Constraints: Must contain from 8 to 41 characters. + // + // Oracle + // + // Constraints: Must contain from 8 to 30 characters. + // + // SQL Server + // + // Constraints: Must contain from 8 to 128 characters. + // + // PostgreSQL + // + // Constraints: Must contain from 8 to 128 characters. + // + // Amazon Aurora + // + // Constraints: Must contain from 8 to 41 characters. + MasterUserPassword *string `type:"string"` + + // The name of master user for the client DB instance. + // + // MySQL + // + // Constraints: + // + // Must be 1 to 16 alphanumeric characters. First character must be a letter. + // Cannot be a reserved word for the chosen database engine. MariaDB + // + // Constraints: + // + // Must be 1 to 16 alphanumeric characters. Cannot be a reserved word for + // the chosen database engine. Type: String + // + // Oracle + // + // Constraints: + // + // Must be 1 to 30 alphanumeric characters. First character must be a letter. + // Cannot be a reserved word for the chosen database engine. SQL Server + // + // Constraints: + // + // Must be 1 to 128 alphanumeric characters. First character must be a letter. + // Cannot be a reserved word for the chosen database engine. PostgreSQL + // + // Constraints: + // + // Must be 1 to 63 alphanumeric characters. First character must be a letter. + // Cannot be a reserved word for the chosen database engine. + MasterUsername *string `type:"string"` + + // The interval, in seconds, between points when Enhanced Monitoring metrics + // are collected for the DB instance. To disable collecting Enhanced Monitoring + // metrics, specify 0. The default is 60. + // + // If MonitoringRoleArn is specified, then you must also set MonitoringInterval + // to a value other than 0. + // + // Valid Values: 0, 1, 5, 10, 15, 30, 60 + MonitoringInterval *int64 `type:"integer"` + + // The ARN for the IAM role that permits RDS to send enhanced monitoring metrics + // to CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. + // For information on creating a monitoring role, go to To create an IAM role + // for Amazon RDS Enhanced Monitoring (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.html#USER_Monitoring.OS.IAMRole). + // + // If MonitoringInterval is set to a value other than 0, then you must supply + // a MonitoringRoleArn value. + MonitoringRoleArn *string `type:"string"` + + // Specifies if the DB instance is a Multi-AZ deployment. You cannot set the + // AvailabilityZone parameter if the MultiAZ parameter is set to true. Do not + // set this value if you want a Multi-AZ deployment for a SQL Server DB instance. + // Multi-AZ for SQL Server is set using the Mirroring option in an option group. + MultiAZ *bool `type:"boolean"` + + // Indicates that the DB instance should be associated with the specified option + // group. + // + // Permanent options, such as the TDE option for Oracle Advanced Security + // TDE, cannot be removed from an option group, and that option group cannot + // be removed from a DB instance once it is associated with a DB instance + OptionGroupName *string `type:"string"` + + // The port number on which the database accepts connections. + // + // MySQL + // + // Default: 3306 + // + // Valid Values: 1150-65535 + // + // Type: Integer + // + // MariaDB + // + // Default: 3306 + // + // Valid Values: 1150-65535 + // + // Type: Integer + // + // PostgreSQL + // + // Default: 5432 + // + // Valid Values: 1150-65535 + // + // Type: Integer + // + // Oracle + // + // Default: 1521 + // + // Valid Values: 1150-65535 + // + // SQL Server + // + // Default: 1433 + // + // Valid Values: 1150-65535 except for 1434, 3389, 47001, 49152, and 49152 + // through 49156. + // + // Amazon Aurora + // + // Default: 3306 + // + // Valid Values: 1150-65535 + // + // Type: Integer + Port *int64 `type:"integer"` + + // The daily time range during which automated backups are created if automated + // backups are enabled, using the BackupRetentionPeriod parameter. For more + // information, see DB Instance Backups (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.BackingUpAndRestoringAmazonRDSInstances.html). + // + // Default: A 30-minute window selected at random from an 8-hour block of + // time per region. To see the time blocks available, see Adjusting the Preferred + // Maintenance Window (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html) + // in the Amazon RDS User Guide. + // + // Constraints: + // + // Must be in the format hh24:mi-hh24:mi. Times should be in Universal Coordinated + // Time (UTC). Must not conflict with the preferred maintenance window. Must + // be at least 30 minutes. + PreferredBackupWindow *string `type:"string"` + + // The weekly time range during which system maintenance can occur, in Universal + // Coordinated Time (UTC). For more information, see DB Instance Maintenance + // (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBMaintenance.html). + // + // Format: ddd:hh24:mi-ddd:hh24:mi + // + // Default: A 30-minute window selected at random from an 8-hour block of + // time per region, occurring on a random day of the week. To see the time blocks + // available, see Adjusting the Preferred Maintenance Window (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html) + // in the Amazon RDS User Guide. + // + // Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun + // + // Constraints: Minimum 30-minute window. + PreferredMaintenanceWindow *string `type:"string"` + + // Specifies the accessibility options for the DB instance. A value of true + // specifies an Internet-facing instance with a publicly resolvable DNS name, + // which resolves to a public IP address. A value of false specifies an internal + // instance with a DNS name that resolves to a private IP address. + // + // Default: The default behavior varies depending on whether a VPC has been + // requested or not. The following list shows the default behavior in each case. + // + // Default VPC: true VPC: false If no DB subnet group has been specified + // as part of the request and the PubliclyAccessible value has not been set, + // the DB instance will be publicly accessible. If a specific DB subnet group + // has been specified as part of the request and the PubliclyAccessible value + // has not been set, the DB instance will be private. + PubliclyAccessible *bool `type:"boolean"` + + // Specifies whether the DB instance is encrypted. + // + // Default: false + StorageEncrypted *bool `type:"boolean"` + + // Specifies the storage type to be associated with the DB instance. + // + // Valid values: standard | gp2 | io1 + // + // If you specify io1, you must also include a value for the Iops parameter. + // + // Default: io1 if the Iops parameter is specified; otherwise standard + StorageType *string `type:"string"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The ARN from the Key Store with which to associate the instance for TDE encryption. + TdeCredentialArn *string `type:"string"` + + // The password for the given ARN from the Key Store in order to access the + // device. + TdeCredentialPassword *string `type:"string"` + + // A list of EC2 VPC security groups to associate with this DB instance. + // + // Default: The default EC2 VPC security group for the DB subnet group's VPC. + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s CreateDBInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBInstanceInput) GoString() string { + return s.String() +} + +type CreateDBInstanceOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBInstance DeleteDBInstance ModifyDBInstance This data type + // is used as a response element in the DescribeDBInstances action. + DBInstance *DBInstance `type:"structure"` +} + +// String returns the string representation +func (s CreateDBInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBInstanceOutput) GoString() string { + return s.String() +} + +type CreateDBInstanceReadReplicaInput struct { + _ struct{} `type:"structure"` + + // Indicates that minor engine upgrades will be applied automatically to the + // Read Replica during the maintenance window. + // + // Default: Inherits from the source DB instance + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The Amazon EC2 Availability Zone that the Read Replica will be created in. + // + // Default: A random, system-chosen Availability Zone in the endpoint's region. + // + // Example: us-east-1d + AvailabilityZone *string `type:"string"` + + // True to copy all tags from the Read Replica to snapshots of the Read Replica; + // otherwise false. The default is false. + CopyTagsToSnapshot *bool `type:"boolean"` + + // The compute and memory capacity of the Read Replica. + // + // Valid Values: db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | + // db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large + // | db.m3.xlarge | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge + // | db.m4.4xlarge | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge + // | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium + // | db.t2.large + // + // Default: Inherits from the source DB instance. + DBInstanceClass *string `type:"string"` + + // The DB instance identifier of the Read Replica. This identifier is the unique + // key that identifies a DB instance. This parameter is stored as a lowercase + // string. + DBInstanceIdentifier *string `type:"string" required:"true"` + + // Specifies a DB subnet group for the DB instance. The new DB instance will + // be created in the VPC associated with the DB subnet group. If no DB subnet + // group is specified, then the new DB instance is not created in a VPC. + // + // Constraints: + // + // Can only be specified if the source DB instance identifier specifies a + // DB instance in another region. The specified DB subnet group must be in the + // same region in which the operation is running. All Read Replicas in one + // region that are created from the same source DB instance must either: Specify + // DB subnet groups from the same VPC. All these Read Replicas will be created + // in the same VPC.Not specify a DB subnet group. All these Read Replicas will + // be created outside of any VPC. + DBSubnetGroupName *string `type:"string"` + + // The amount of Provisioned IOPS (input/output operations per second) to be + // initially allocated for the DB instance. + Iops *int64 `type:"integer"` + + // The interval, in seconds, between points when Enhanced Monitoring metrics + // are collected for the Read Replica. To disable collecting Enhanced Monitoring + // metrics, specify 0. The default is 60. + // + // If MonitoringRoleArn is specified, then you must also set MonitoringInterval + // to a value other than 0. + // + // Valid Values: 0, 1, 5, 10, 15, 30, 60 + MonitoringInterval *int64 `type:"integer"` + + // The ARN for the IAM role that permits RDS to send enhanced monitoring metrics + // to CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. + // For information on creating a monitoring role, go to To create an IAM role + // for Amazon RDS Enhanced Monitoring (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.html#USER_Monitoring.OS.IAMRole). + // + // If MonitoringInterval is set to a value other than 0, then you must supply + // a MonitoringRoleArn value. + MonitoringRoleArn *string `type:"string"` + + // The option group the DB instance will be associated with. If omitted, the + // default option group for the engine specified will be used. + OptionGroupName *string `type:"string"` + + // The port number that the DB instance uses for connections. + // + // Default: Inherits from the source DB instance + // + // Valid Values: 1150-65535 + Port *int64 `type:"integer"` + + // Specifies the accessibility options for the DB instance. A value of true + // specifies an Internet-facing instance with a publicly resolvable DNS name, + // which resolves to a public IP address. A value of false specifies an internal + // instance with a DNS name that resolves to a private IP address. + // + // Default: The default behavior varies depending on whether a VPC has been + // requested or not. The following list shows the default behavior in each case. + // + // Default VPC:true VPC:false If no DB subnet group has been specified + // as part of the request and the PubliclyAccessible value has not been set, + // the DB instance will be publicly accessible. If a specific DB subnet group + // has been specified as part of the request and the PubliclyAccessible value + // has not been set, the DB instance will be private. + PubliclyAccessible *bool `type:"boolean"` + + // The identifier of the DB instance that will act as the source for the Read + // Replica. Each DB instance can have up to five Read Replicas. + // + // Constraints: + // + // Must be the identifier of an existing MySQL, MariaDB, or PostgreSQL DB + // instance. Can specify a DB instance that is a MySQL Read Replica only if + // the source is running MySQL 5.6. Can specify a DB instance that is a PostgreSQL + // Read Replica only if the source is running PostgreSQL 9.3.5. The specified + // DB instance must have automatic backups enabled, its backup retention period + // must be greater than 0. If the source DB instance is in the same region as + // the Read Replica, specify a valid DB instance identifier. If the source DB + // instance is in a different region than the Read Replica, specify a valid + // DB instance ARN. For more information, go to Constructing a Amazon RDS Amazon + // Resource Name (ARN) (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN). + SourceDBInstanceIdentifier *string `type:"string" required:"true"` + + // Specifies the storage type to be associated with the Read Replica. + // + // Valid values: standard | gp2 | io1 + // + // If you specify io1, you must also include a value for the Iops parameter. + // + // Default: io1 if the Iops parameter is specified; otherwise standard + StorageType *string `type:"string"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateDBInstanceReadReplicaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBInstanceReadReplicaInput) GoString() string { + return s.String() +} + +type CreateDBInstanceReadReplicaOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBInstance DeleteDBInstance ModifyDBInstance This data type + // is used as a response element in the DescribeDBInstances action. + DBInstance *DBInstance `type:"structure"` +} + +// String returns the string representation +func (s CreateDBInstanceReadReplicaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBInstanceReadReplicaOutput) GoString() string { + return s.String() +} + +type CreateDBParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The DB parameter group family name. A DB parameter group can be associated + // with one and only one DB parameter group family, and can be applied only + // to a DB instance running a database engine and engine version compatible + // with that DB parameter group family. + DBParameterGroupFamily *string `type:"string" required:"true"` + + // The name of the DB parameter group. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens This value is + // stored as a lowercase string. + DBParameterGroupName *string `type:"string" required:"true"` + + // The description for the DB parameter group. + Description *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateDBParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBParameterGroupInput) GoString() string { + return s.String() +} + +type CreateDBParameterGroupOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the CreateDBParameterGroup + // action. + // + // This data type is used as a request parameter in the DeleteDBParameterGroup + // action, and as a response element in the DescribeDBParameterGroups action. + DBParameterGroup *DBParameterGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateDBParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBParameterGroupOutput) GoString() string { + return s.String() +} + +type CreateDBSecurityGroupInput struct { + _ struct{} `type:"structure"` + + // The description for the DB security group. + DBSecurityGroupDescription *string `type:"string" required:"true"` + + // The name for the DB security group. This value is stored as a lowercase string. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens Must not be "Default" + // Cannot contain spaces Example: mysecuritygroup + DBSecurityGroupName *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateDBSecurityGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBSecurityGroupInput) GoString() string { + return s.String() +} + +type CreateDBSecurityGroupOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // DescribeDBSecurityGroups AuthorizeDBSecurityGroupIngress CreateDBSecurityGroup + // RevokeDBSecurityGroupIngress This data type is used as a response element + // in the DescribeDBSecurityGroups action. + DBSecurityGroup *DBSecurityGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateDBSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBSecurityGroupOutput) GoString() string { + return s.String() +} + +type CreateDBSnapshotInput struct { + _ struct{} `type:"structure"` + + // The DB instance identifier. This is the unique key that identifies a DB instance. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + DBInstanceIdentifier *string `type:"string" required:"true"` + + // The identifier for the DB snapshot. + // + // Constraints: + // + // Cannot be null, empty, or blank Must contain from 1 to 255 alphanumeric + // characters or hyphens First character must be a letter Cannot end with a + // hyphen or contain two consecutive hyphens Example: my-snapshot-id + DBSnapshotIdentifier *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateDBSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBSnapshotInput) GoString() string { + return s.String() +} + +type CreateDBSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBSnapshot DeleteDBSnapshot This data type is used as a response + // element in the DescribeDBSnapshots action. + DBSnapshot *DBSnapshot `type:"structure"` +} + +// String returns the string representation +func (s CreateDBSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBSnapshotOutput) GoString() string { + return s.String() +} + +type CreateDBSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // The description for the DB subnet group. + DBSubnetGroupDescription *string `type:"string" required:"true"` + + // The name for the DB subnet group. This value is stored as a lowercase string. + // + // Constraints: Must contain no more than 255 alphanumeric characters, periods, + // underscores, or hyphens. Must not be default. + // + // Example: mySubnetgroup + DBSubnetGroupName *string `type:"string" required:"true"` + + // The EC2 Subnet IDs for the DB subnet group. + SubnetIds []*string `locationNameList:"SubnetIdentifier" type:"list" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateDBSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBSubnetGroupInput) GoString() string { + return s.String() +} + +type CreateDBSubnetGroupOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBSubnetGroup ModifyDBSubnetGroup DescribeDBSubnetGroups DeleteDBSubnetGroup + // This data type is used as a response element in the DescribeDBSubnetGroups + // action. + DBSubnetGroup *DBSubnetGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateDBSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDBSubnetGroupOutput) GoString() string { + return s.String() +} + +type CreateEventSubscriptionInput struct { + _ struct{} `type:"structure"` + + // A Boolean value; set to true to activate the subscription, set to false to + // create the subscription but not active it. + Enabled *bool `type:"boolean"` + + // A list of event categories for a SourceType that you want to subscribe to. + // You can see a list of the categories for a given SourceType in the Events + // (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) + // topic in the Amazon RDS User Guide or by using the DescribeEventCategories + // action. + EventCategories []*string `locationNameList:"EventCategory" type:"list"` + + // The Amazon Resource Name (ARN) of the SNS topic created for event notification. + // The ARN is created by Amazon SNS when you create a topic and subscribe to + // it. + SnsTopicArn *string `type:"string" required:"true"` + + // The list of identifiers of the event sources for which events will be returned. + // If not specified, then all sources are included in the response. An identifier + // must begin with a letter and must contain only ASCII letters, digits, and + // hyphens; it cannot end with a hyphen or contain two consecutive hyphens. + // + // Constraints: + // + // If SourceIds are supplied, SourceType must also be provided. If the source + // type is a DB instance, then a DBInstanceIdentifier must be supplied. If the + // source type is a DB security group, a DBSecurityGroupName must be supplied. + // If the source type is a DB parameter group, a DBParameterGroupName must be + // supplied. If the source type is a DB snapshot, a DBSnapshotIdentifier must + // be supplied. + SourceIds []*string `locationNameList:"SourceId" type:"list"` + + // The type of source that will be generating the events. For example, if you + // want to be notified of events generated by a DB instance, you would set this + // parameter to db-instance. if this value is not specified, all events are + // returned. + // + // Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot + SourceType *string `type:"string"` + + // The name of the subscription. + // + // Constraints: The name must be less than 255 characters. + SubscriptionName *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateEventSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEventSubscriptionInput) GoString() string { + return s.String() +} + +type CreateEventSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // Contains the results of a successful invocation of the DescribeEventSubscriptions + // action. + EventSubscription *EventSubscription `type:"structure"` +} + +// String returns the string representation +func (s CreateEventSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEventSubscriptionOutput) GoString() string { + return s.String() +} + +type CreateOptionGroupInput struct { + _ struct{} `type:"structure"` + + // Specifies the name of the engine that this option group should be associated + // with. + EngineName *string `type:"string" required:"true"` + + // Specifies the major version of the engine that this option group should be + // associated with. + MajorEngineVersion *string `type:"string" required:"true"` + + // The description of the option group. + OptionGroupDescription *string `type:"string" required:"true"` + + // Specifies the name of the option group to be created. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters or hyphens First character must + // be a letter Cannot end with a hyphen or contain two consecutive hyphens + // Example: myoptiongroup + OptionGroupName *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateOptionGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOptionGroupInput) GoString() string { + return s.String() +} + +type CreateOptionGroupOutput struct { + _ struct{} `type:"structure"` + + OptionGroup *OptionGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateOptionGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOptionGroupOutput) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the following actions: +// +// CreateDBCluster DeleteDBCluster FailoverDBCluster ModifyDBCluster +// RestoreDBClusterFromSnapshot This data type is used as a response element +// in the DescribeDBClusters action. +type DBCluster struct { + _ struct{} `type:"structure"` + + // Specifies the allocated storage size in gigabytes (GB). + AllocatedStorage *int64 `type:"integer"` + + // Provides the list of EC2 Availability Zones that instances in the DB cluster + // can be created in. + AvailabilityZones []*string `locationNameList:"AvailabilityZone" type:"list"` + + // Specifies the number of days for which automatic DB snapshots are retained. + BackupRetentionPeriod *int64 `type:"integer"` + + // If present, specifies the name of the character set that this cluster is + // associated with. + CharacterSetName *string `type:"string"` + + // Contains a user-supplied DB cluster identifier. This identifier is the unique + // key that identifies a DB cluster. + DBClusterIdentifier *string `type:"string"` + + // Provides the list of instances that make up the DB cluster. + DBClusterMembers []*DBClusterMember `locationNameList:"DBClusterMember" type:"list"` + + // Provides the list of option group memberships for this DB cluster. + DBClusterOptionGroupMemberships []*DBClusterOptionGroupStatus `locationNameList:"DBClusterOptionGroup" type:"list"` + + // Specifies the name of the DB cluster parameter group for the DB cluster. + DBClusterParameterGroup *string `type:"string"` + + // Specifies information on the subnet group associated with the DB cluster, + // including the name, description, and subnets in the subnet group. + DBSubnetGroup *string `type:"string"` + + // Contains the name of the initial database of this DB cluster that was provided + // at create time, if one was specified when the DB cluster was created. This + // same name is returned for the life of the DB cluster. + DatabaseName *string `type:"string"` + + // If StorageEncrypted is true, the region-unique, immutable identifier for + // the encrypted DB cluster. This identifier is found in AWS CloudTrail log + // entries whenever the KMS key for the DB cluster is accessed. + DbClusterResourceId *string `type:"string"` + + // Specifies the earliest time to which a database can be restored with point-in-time + // restore. + EarliestRestorableTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Specifies the connection endpoint for the primary instance of the DB cluster. + Endpoint *string `type:"string"` + + // Provides the name of the database engine to be used for this DB cluster. + Engine *string `type:"string"` + + // Indicates the database engine version. + EngineVersion *string `type:"string"` + + // Specifies the ID that Amazon Route 53 assigns when you create a hosted zone. + HostedZoneId *string `type:"string"` + + // If StorageEncrypted is true, the KMS key identifier for the encrypted DB + // cluster. + KmsKeyId *string `type:"string"` + + // Specifies the latest time to which a database can be restored with point-in-time + // restore. + LatestRestorableTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Contains the master username for the DB cluster. + MasterUsername *string `type:"string"` + + // Specifies the progress of the operation as a percentage. + PercentProgress *string `type:"string"` + + // Specifies the port that the database engine is listening on. + Port *int64 `type:"integer"` + + // Specifies the daily time range during which automated backups are created + // if automated backups are enabled, as determined by the BackupRetentionPeriod. + PreferredBackupWindow *string `type:"string"` + + // Specifies the weekly time range during which system maintenance can occur, + // in Universal Coordinated Time (UTC). + PreferredMaintenanceWindow *string `type:"string"` + + // Specifies the current state of this DB cluster. + Status *string `type:"string"` + + // Specifies whether the DB cluster is encrypted. + StorageEncrypted *bool `type:"boolean"` + + // Provides a list of VPC security groups that the DB cluster belongs to. + VpcSecurityGroups []*VpcSecurityGroupMembership `locationNameList:"VpcSecurityGroupMembership" type:"list"` +} + +// String returns the string representation +func (s DBCluster) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBCluster) GoString() string { + return s.String() +} + +// Contains information about an instance that is part of a DB cluster. +type DBClusterMember struct { + _ struct{} `type:"structure"` + + // Specifies the status of the DB cluster parameter group for this member of + // the DB cluster. + DBClusterParameterGroupStatus *string `type:"string"` + + // Specifies the instance identifier for this member of the DB cluster. + DBInstanceIdentifier *string `type:"string"` + + // Value that is true if the cluster member is the primary instance for the + // DB cluster and false otherwise. + IsClusterWriter *bool `type:"boolean"` +} + +// String returns the string representation +func (s DBClusterMember) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBClusterMember) GoString() string { + return s.String() +} + +// Contains status information for a DB cluster option group. +type DBClusterOptionGroupStatus struct { + _ struct{} `type:"structure"` + + // Specifies the name of the DB cluster option group. + DBClusterOptionGroupName *string `type:"string"` + + // Specifies the status of the DB cluster option group. + Status *string `type:"string"` +} + +// String returns the string representation +func (s DBClusterOptionGroupStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBClusterOptionGroupStatus) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the CreateDBClusterParameterGroup +// action. +// +// This data type is used as a request parameter in the DeleteDBClusterParameterGroup +// action, and as a response element in the DescribeDBClusterParameterGroups +// action. +type DBClusterParameterGroup struct { + _ struct{} `type:"structure"` + + // Provides the name of the DB cluster parameter group. + DBClusterParameterGroupName *string `type:"string"` + + // Provides the name of the DB parameter group family that this DB cluster parameter + // group is compatible with. + DBParameterGroupFamily *string `type:"string"` + + // Provides the customer-specified description for this DB cluster parameter + // group. + Description *string `type:"string"` +} + +// String returns the string representation +func (s DBClusterParameterGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBClusterParameterGroup) GoString() string { + return s.String() +} + +type DBClusterParameterGroupNameMessage struct { + _ struct{} `type:"structure"` + + // The name of the DB cluster parameter group. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens This value is + // stored as a lowercase string. + DBClusterParameterGroupName *string `type:"string"` +} + +// String returns the string representation +func (s DBClusterParameterGroupNameMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBClusterParameterGroupNameMessage) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the following actions: +// +// CreateDBClusterSnapshot DeleteDBClusterSnapshot This data type is +// used as a response element in the DescribeDBClusterSnapshots action. +type DBClusterSnapshot struct { + _ struct{} `type:"structure"` + + // Specifies the allocated storage size in gigabytes (GB). + AllocatedStorage *int64 `type:"integer"` + + // Provides the list of EC2 Availability Zones that instances in the DB cluster + // snapshot can be restored in. + AvailabilityZones []*string `locationNameList:"AvailabilityZone" type:"list"` + + // Specifies the time when the DB cluster was created, in Universal Coordinated + // Time (UTC). + ClusterCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Specifies the DB cluster identifier of the DB cluster that this DB cluster + // snapshot was created from. + DBClusterIdentifier *string `type:"string"` + + // Specifies the identifier for the DB cluster snapshot. + DBClusterSnapshotIdentifier *string `type:"string"` + + // Specifies the name of the database engine. + Engine *string `type:"string"` + + // Provides the version of the database engine for this DB cluster snapshot. + EngineVersion *string `type:"string"` + + // If StorageEncrypted is true, the KMS key identifier for the encrypted DB + // cluster snapshot. + KmsKeyId *string `type:"string"` + + // Provides the license model information for this DB cluster snapshot. + LicenseModel *string `type:"string"` + + // Provides the master username for the DB cluster snapshot. + MasterUsername *string `type:"string"` + + // Specifies the percentage of the estimated data that has been transferred. + PercentProgress *int64 `type:"integer"` + + // Specifies the port that the DB cluster was listening on at the time of the + // snapshot. + Port *int64 `type:"integer"` + + // Provides the time when the snapshot was taken, in Universal Coordinated Time + // (UTC). + SnapshotCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Provides the type of the DB cluster snapshot. + SnapshotType *string `type:"string"` + + // Specifies the status of this DB cluster snapshot. + Status *string `type:"string"` + + // Specifies whether the DB cluster snapshot is encrypted. + StorageEncrypted *bool `type:"boolean"` + + // Provides the VPC ID associated with the DB cluster snapshot. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s DBClusterSnapshot) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBClusterSnapshot) GoString() string { + return s.String() +} + +// This data type is used as a response element in the action DescribeDBEngineVersions. +type DBEngineVersion struct { + _ struct{} `type:"structure"` + + // The description of the database engine. + DBEngineDescription *string `type:"string"` + + // The description of the database engine version. + DBEngineVersionDescription *string `type:"string"` + + // The name of the DB parameter group family for the database engine. + DBParameterGroupFamily *string `type:"string"` + + // The default character set for new instances of this engine version, if the + // CharacterSetName parameter of the CreateDBInstance API is not specified. + DefaultCharacterSet *CharacterSet `type:"structure"` + + // The name of the database engine. + Engine *string `type:"string"` + + // The version number of the database engine. + EngineVersion *string `type:"string"` + + // A list of the character sets supported by this engine for the CharacterSetName + // parameter of the CreateDBInstance API. + SupportedCharacterSets []*CharacterSet `locationNameList:"CharacterSet" type:"list"` + + // A list of engine versions that this database engine version can be upgraded + // to. + ValidUpgradeTarget []*UpgradeTarget `locationNameList:"UpgradeTarget" type:"list"` +} + +// String returns the string representation +func (s DBEngineVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBEngineVersion) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the following actions: +// +// CreateDBInstance DeleteDBInstance ModifyDBInstance This data type +// is used as a response element in the DescribeDBInstances action. +type DBInstance struct { + _ struct{} `type:"structure"` + + // Specifies the allocated storage size specified in gigabytes. + AllocatedStorage *int64 `type:"integer"` + + // Indicates that minor version patches are applied automatically. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // Specifies the name of the Availability Zone the DB instance is located in. + AvailabilityZone *string `type:"string"` + + // Specifies the number of days for which automatic DB snapshots are retained. + BackupRetentionPeriod *int64 `type:"integer"` + + // The identifier of the CA certificate for this DB instance. + CACertificateIdentifier *string `type:"string"` + + // If present, specifies the name of the character set that this instance is + // associated with. + CharacterSetName *string `type:"string"` + + // Specifies whether tags are copied from the DB instance to snapshots of the + // DB instance. + CopyTagsToSnapshot *bool `type:"boolean"` + + // If the DB instance is a member of a DB cluster, contains the name of the + // DB cluster that the DB instance is a member of. + DBClusterIdentifier *string `type:"string"` + + // Contains the name of the compute and memory capacity class of the DB instance. + DBInstanceClass *string `type:"string"` + + // Contains a user-supplied database identifier. This identifier is the unique + // key that identifies a DB instance. + DBInstanceIdentifier *string `type:"string"` + + // Specifies the current state of this database. + DBInstanceStatus *string `type:"string"` + + // The meaning of this parameter differs according to the database engine you + // use. For example, this value returns MySQL, MariaDB, or PostgreSQL information + // when returning values from CreateDBInstanceReadReplica since Read Replicas + // are only supported for these engines. + // + // MySQL, MariaDB, SQL Server, PostgreSQL, Amazon Aurora + // + // Contains the name of the initial database of this instance that was provided + // at create time, if one was specified when the DB instance was created. This + // same name is returned for the life of the DB instance. + // + // Type: String + // + // Oracle + // + // Contains the Oracle System ID (SID) of the created DB instance. Not shown + // when the returned parameters do not apply to an Oracle DB instance. + DBName *string `type:"string"` + + // Provides the list of DB parameter groups applied to this DB instance. + DBParameterGroups []*DBParameterGroupStatus `locationNameList:"DBParameterGroup" type:"list"` + + // Provides List of DB security group elements containing only DBSecurityGroup.Name + // and DBSecurityGroup.Status subelements. + DBSecurityGroups []*DBSecurityGroupMembership `locationNameList:"DBSecurityGroup" type:"list"` + + // Specifies information on the subnet group associated with the DB instance, + // including the name, description, and subnets in the subnet group. + DBSubnetGroup *DBSubnetGroup `type:"structure"` + + // Specifies the port that the DB instance listens on. If the DB instance is + // part of a DB cluster, this can be a different port than the DB cluster port. + DbInstancePort *int64 `type:"integer"` + + // If StorageEncrypted is true, the region-unique, immutable identifier for + // the encrypted DB instance. This identifier is found in AWS CloudTrail log + // entries whenever the KMS key for the DB instance is accessed. + DbiResourceId *string `type:"string"` + + // Specifies the connection endpoint. + Endpoint *Endpoint `type:"structure"` + + // Provides the name of the database engine to be used for this DB instance. + Engine *string `type:"string"` + + // Indicates the database engine version. + EngineVersion *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon CloudWatch Logs log stream that + // receives the Enhanced Monitoring metrics data for the DB instance. + EnhancedMonitoringResourceArn *string `type:"string"` + + // Provides the date and time the DB instance was created. + InstanceCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Specifies the Provisioned IOPS (I/O operations per second) value. + Iops *int64 `type:"integer"` + + // If StorageEncrypted is true, the KMS key identifier for the encrypted DB + // instance. + KmsKeyId *string `type:"string"` + + // Specifies the latest time to which a database can be restored with point-in-time + // restore. + LatestRestorableTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // License model information for this DB instance. + LicenseModel *string `type:"string"` + + // Contains the master username for the DB instance. + MasterUsername *string `type:"string"` + + // The interval, in seconds, between points when Enhanced Monitoring metrics + // are collected for the DB instance. + MonitoringInterval *int64 `type:"integer"` + + // The ARN for the IAM role that permits RDS to send Enhanced Monitoring metrics + // to CloudWatch Logs. + MonitoringRoleArn *string `type:"string"` + + // Specifies if the DB instance is a Multi-AZ deployment. + MultiAZ *bool `type:"boolean"` + + // Provides the list of option group memberships for this DB instance. + OptionGroupMemberships []*OptionGroupMembership `locationNameList:"OptionGroupMembership" type:"list"` + + // Specifies that changes to the DB instance are pending. This element is only + // included when changes are pending. Specific changes are identified by subelements. + PendingModifiedValues *PendingModifiedValues `type:"structure"` + + // Specifies the daily time range during which automated backups are created + // if automated backups are enabled, as determined by the BackupRetentionPeriod. + PreferredBackupWindow *string `type:"string"` + + // Specifies the weekly time range during which system maintenance can occur, + // in Universal Coordinated Time (UTC). + PreferredMaintenanceWindow *string `type:"string"` + + // Specifies the accessibility options for the DB instance. A value of true + // specifies an Internet-facing instance with a publicly resolvable DNS name, + // which resolves to a public IP address. A value of false specifies an internal + // instance with a DNS name that resolves to a private IP address. + // + // Default: The default behavior varies depending on whether a VPC has been + // requested or not. The following list shows the default behavior in each case. + // + // Default VPC:true VPC:false If no DB subnet group has been specified + // as part of the request and the PubliclyAccessible value has not been set, + // the DB instance will be publicly accessible. If a specific DB subnet group + // has been specified as part of the request and the PubliclyAccessible value + // has not been set, the DB instance will be private. + PubliclyAccessible *bool `type:"boolean"` + + // Contains one or more identifiers of the Read Replicas associated with this + // DB instance. + ReadReplicaDBInstanceIdentifiers []*string `locationNameList:"ReadReplicaDBInstanceIdentifier" type:"list"` + + // Contains the identifier of the source DB instance if this DB instance is + // a Read Replica. + ReadReplicaSourceDBInstanceIdentifier *string `type:"string"` + + // If present, specifies the name of the secondary Availability Zone for a DB + // instance with multi-AZ support. + SecondaryAvailabilityZone *string `type:"string"` + + // The status of a Read Replica. If the instance is not a Read Replica, this + // will be blank. + StatusInfos []*DBInstanceStatusInfo `locationNameList:"DBInstanceStatusInfo" type:"list"` + + // Specifies whether the DB instance is encrypted. + StorageEncrypted *bool `type:"boolean"` + + // Specifies the storage type associated with DB instance. + StorageType *string `type:"string"` + + // The ARN from the Key Store with which the instance is associated for TDE + // encryption. + TdeCredentialArn *string `type:"string"` + + // Provides List of VPC security group elements that the DB instance belongs + // to. + VpcSecurityGroups []*VpcSecurityGroupMembership `locationNameList:"VpcSecurityGroupMembership" type:"list"` +} + +// String returns the string representation +func (s DBInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBInstance) GoString() string { + return s.String() +} + +// Provides a list of status information for a DB instance. +type DBInstanceStatusInfo struct { + _ struct{} `type:"structure"` + + // Details of the error if there is an error for the instance. If the instance + // is not in an error state, this value is blank. + Message *string `type:"string"` + + // Boolean value that is true if the instance is operating normally, or false + // if the instance is in an error state. + Normal *bool `type:"boolean"` + + // Status of the DB instance. For a StatusType of read replica, the values can + // be replicating, error, stopped, or terminated. + Status *string `type:"string"` + + // This value is currently "read replication." + StatusType *string `type:"string"` +} + +// String returns the string representation +func (s DBInstanceStatusInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBInstanceStatusInfo) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the CreateDBParameterGroup +// action. +// +// This data type is used as a request parameter in the DeleteDBParameterGroup +// action, and as a response element in the DescribeDBParameterGroups action. +type DBParameterGroup struct { + _ struct{} `type:"structure"` + + // Provides the name of the DB parameter group family that this DB parameter + // group is compatible with. + DBParameterGroupFamily *string `type:"string"` + + // Provides the name of the DB parameter group. + DBParameterGroupName *string `type:"string"` + + // Provides the customer-specified description for this DB parameter group. + Description *string `type:"string"` +} + +// String returns the string representation +func (s DBParameterGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBParameterGroup) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the ModifyDBParameterGroup +// or ResetDBParameterGroup action. +type DBParameterGroupNameMessage struct { + _ struct{} `type:"structure"` + + // Provides the name of the DB parameter group. + DBParameterGroupName *string `type:"string"` +} + +// String returns the string representation +func (s DBParameterGroupNameMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBParameterGroupNameMessage) GoString() string { + return s.String() +} + +// The status of the DB parameter group. +// +// This data type is used as a response element in the following actions: +// +// CreateDBInstance CreateDBInstanceReadReplica DeleteDBInstance ModifyDBInstance +// RebootDBInstance RestoreDBInstanceFromDBSnapshot +type DBParameterGroupStatus struct { + _ struct{} `type:"structure"` + + // The name of the DP parameter group. + DBParameterGroupName *string `type:"string"` + + // The status of parameter updates. + ParameterApplyStatus *string `type:"string"` +} + +// String returns the string representation +func (s DBParameterGroupStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBParameterGroupStatus) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the following actions: +// +// DescribeDBSecurityGroups AuthorizeDBSecurityGroupIngress CreateDBSecurityGroup +// RevokeDBSecurityGroupIngress This data type is used as a response element +// in the DescribeDBSecurityGroups action. +type DBSecurityGroup struct { + _ struct{} `type:"structure"` + + // Provides the description of the DB security group. + DBSecurityGroupDescription *string `type:"string"` + + // Specifies the name of the DB security group. + DBSecurityGroupName *string `type:"string"` + + // Contains a list of EC2SecurityGroup elements. + EC2SecurityGroups []*EC2SecurityGroup `locationNameList:"EC2SecurityGroup" type:"list"` + + // Contains a list of IPRange elements. + IPRanges []*IPRange `locationNameList:"IPRange" type:"list"` + + // Provides the AWS ID of the owner of a specific DB security group. + OwnerId *string `type:"string"` + + // Provides the VpcId of the DB security group. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s DBSecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBSecurityGroup) GoString() string { + return s.String() +} + +// This data type is used as a response element in the following actions: +// +// ModifyDBInstance RebootDBInstance RestoreDBInstanceFromDBSnapshot +// RestoreDBInstanceToPointInTime +type DBSecurityGroupMembership struct { + _ struct{} `type:"structure"` + + // The name of the DB security group. + DBSecurityGroupName *string `type:"string"` + + // The status of the DB security group. + Status *string `type:"string"` +} + +// String returns the string representation +func (s DBSecurityGroupMembership) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBSecurityGroupMembership) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the following actions: +// +// CreateDBSnapshot DeleteDBSnapshot This data type is used as a response +// element in the DescribeDBSnapshots action. +type DBSnapshot struct { + _ struct{} `type:"structure"` + + // Specifies the allocated storage size in gigabytes (GB). + AllocatedStorage *int64 `type:"integer"` + + // Specifies the name of the Availability Zone the DB instance was located in + // at the time of the DB snapshot. + AvailabilityZone *string `type:"string"` + + // Specifies the DB instance identifier of the DB instance this DB snapshot + // was created from. + DBInstanceIdentifier *string `type:"string"` + + // Specifies the identifier for the DB snapshot. + DBSnapshotIdentifier *string `type:"string"` + + // Specifies whether the DB snapshot is encrypted. + Encrypted *bool `type:"boolean"` + + // Specifies the name of the database engine. + Engine *string `type:"string"` + + // Specifies the version of the database engine. + EngineVersion *string `type:"string"` + + // Specifies the time when the snapshot was taken, in Universal Coordinated + // Time (UTC). + InstanceCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Specifies the Provisioned IOPS (I/O operations per second) value of the DB + // instance at the time of the snapshot. + Iops *int64 `type:"integer"` + + // If Encrypted is true, the KMS key identifier for the encrypted DB snapshot. + KmsKeyId *string `type:"string"` + + // License model information for the restored DB instance. + LicenseModel *string `type:"string"` + + // Provides the master username for the DB snapshot. + MasterUsername *string `type:"string"` + + // Provides the option group name for the DB snapshot. + OptionGroupName *string `type:"string"` + + // The percentage of the estimated data that has been transferred. + PercentProgress *int64 `type:"integer"` + + // Specifies the port that the database engine was listening on at the time + // of the snapshot. + Port *int64 `type:"integer"` + + // Provides the time when the snapshot was taken, in Universal Coordinated Time + // (UTC). + SnapshotCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Provides the type of the DB snapshot. + SnapshotType *string `type:"string"` + + // The DB snapshot Arn that the DB snapshot was copied from. It only has value + // in case of cross customer or cross region copy. + SourceDBSnapshotIdentifier *string `type:"string"` + + // The region that the DB snapshot was created in or copied from. + SourceRegion *string `type:"string"` + + // Specifies the status of this DB snapshot. + Status *string `type:"string"` + + // Specifies the storage type associated with DB Snapshot. + StorageType *string `type:"string"` + + // The ARN from the Key Store with which to associate the instance for TDE encryption. + TdeCredentialArn *string `type:"string"` + + // Provides the VPC ID associated with the DB snapshot. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s DBSnapshot) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBSnapshot) GoString() string { + return s.String() +} + +// Contains the name and values of a manual DB snapshot attribute +// +// Manual DB snapshot attributes are used to authorize other AWS accounts to +// restore a manual DB snapshot. For more information, see the ModifyDBSnapshotAttribute +// API. +type DBSnapshotAttribute struct { + _ struct{} `type:"structure"` + + // The name of the manual DB snapshot attribute. + // + // An attribute name of restore applies to the list of AWS accounts that have + // permission to copy or restore the manual DB snapshot. + AttributeName *string `type:"string"` + + // The value(s) for the manual DB snapshot attribute. + // + // If the AttributeName field is restore, then this field returns a list of + // AWS account ids that are authorized to copy or restore the manual DB snapshot. + // If a value of all is in the list, then the manual DB snapshot is public and + // available for any AWS account to copy or restore. + AttributeValues []*string `locationNameList:"AttributeValue" type:"list"` +} + +// String returns the string representation +func (s DBSnapshotAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBSnapshotAttribute) GoString() string { + return s.String() +} + +// Contains the results of a successful call to the DescribeDBSnapshotAttributes +// API. +// +// Manual DB snapshot attributes are used to authorize other AWS accounts to +// copy or restore a manual DB snapshot. For more information, see the ModifyDBSnapshotAttribute +// API. +type DBSnapshotAttributesResult struct { + _ struct{} `type:"structure"` + + // The list of attributes and values for the manual DB snapshot. + DBSnapshotAttributes []*DBSnapshotAttribute `locationNameList:"DBSnapshotAttribute" type:"list"` + + // The identifier of the manual DB snapshot that the attributes apply to. + DBSnapshotIdentifier *string `type:"string"` +} + +// String returns the string representation +func (s DBSnapshotAttributesResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBSnapshotAttributesResult) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the following actions: +// +// CreateDBSubnetGroup ModifyDBSubnetGroup DescribeDBSubnetGroups DeleteDBSubnetGroup +// This data type is used as a response element in the DescribeDBSubnetGroups +// action. +type DBSubnetGroup struct { + _ struct{} `type:"structure"` + + // Provides the description of the DB subnet group. + DBSubnetGroupDescription *string `type:"string"` + + // Specifies the name of the DB subnet group. + DBSubnetGroupName *string `type:"string"` + + // Provides the status of the DB subnet group. + SubnetGroupStatus *string `type:"string"` + + // Contains a list of Subnet elements. + Subnets []*Subnet `locationNameList:"Subnet" type:"list"` + + // Provides the VpcId of the DB subnet group. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s DBSubnetGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBSubnetGroup) GoString() string { + return s.String() +} + +type DeleteDBClusterInput struct { + _ struct{} `type:"structure"` + + // The DB cluster identifier for the DB cluster to be deleted. This parameter + // isn't case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + DBClusterIdentifier *string `type:"string" required:"true"` + + // The DB cluster snapshot identifier of the new DB cluster snapshot created + // when SkipFinalSnapshot is set to false. + // + // Specifying this parameter and also setting the SkipFinalShapshot parameter + // to true results in an error. Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + FinalDBSnapshotIdentifier *string `type:"string"` + + // Determines whether a final DB cluster snapshot is created before the DB cluster + // is deleted. If true is specified, no DB cluster snapshot is created. If false + // is specified, a DB cluster snapshot is created before the DB cluster is deleted. + // + // You must specify a FinalDBSnapshotIdentifier parameter if SkipFinalSnapshot + // is false. Default: false + SkipFinalSnapshot *bool `type:"boolean"` +} + +// String returns the string representation +func (s DeleteDBClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBClusterInput) GoString() string { + return s.String() +} + +type DeleteDBClusterOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBCluster DeleteDBCluster FailoverDBCluster ModifyDBCluster + // RestoreDBClusterFromSnapshot This data type is used as a response element + // in the DescribeDBClusters action. + DBCluster *DBCluster `type:"structure"` +} + +// String returns the string representation +func (s DeleteDBClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBClusterOutput) GoString() string { + return s.String() +} + +type DeleteDBClusterParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the DB cluster parameter group. + // + // Constraints: + // + // Must be the name of an existing DB cluster parameter group. You cannot + // delete a default DB cluster parameter group. Cannot be associated with any + // DB clusters. + DBClusterParameterGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDBClusterParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBClusterParameterGroupInput) GoString() string { + return s.String() +} + +type DeleteDBClusterParameterGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDBClusterParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBClusterParameterGroupOutput) GoString() string { + return s.String() +} + +type DeleteDBClusterSnapshotInput struct { + _ struct{} `type:"structure"` + + // The identifier of the DB cluster snapshot to delete. + // + // Constraints: Must be the name of an existing DB cluster snapshot in the + // available state. + DBClusterSnapshotIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDBClusterSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBClusterSnapshotInput) GoString() string { + return s.String() +} + +type DeleteDBClusterSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBClusterSnapshot DeleteDBClusterSnapshot This data type is + // used as a response element in the DescribeDBClusterSnapshots action. + DBClusterSnapshot *DBClusterSnapshot `type:"structure"` +} + +// String returns the string representation +func (s DeleteDBClusterSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBClusterSnapshotOutput) GoString() string { + return s.String() +} + +type DeleteDBInstanceInput struct { + _ struct{} `type:"structure"` + + // The DB instance identifier for the DB instance to be deleted. This parameter + // isn't case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + DBInstanceIdentifier *string `type:"string" required:"true"` + + // The DBSnapshotIdentifier of the new DBSnapshot created when SkipFinalSnapshot + // is set to false. + // + // Specifying this parameter and also setting the SkipFinalShapshot parameter + // to true results in an error. Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens Cannot be specified + // when deleting a Read Replica. + FinalDBSnapshotIdentifier *string `type:"string"` + + // Determines whether a final DB snapshot is created before the DB instance + // is deleted. If true is specified, no DBSnapshot is created. If false is specified, + // a DB snapshot is created before the DB instance is deleted. + // + // Note that when a DB instance is in a failure state and has a status of 'failed', + // 'incompatible-restore', or 'incompatible-network', it can only be deleted + // when the SkipFinalSnapshot parameter is set to "true". + // + // Specify true when deleting a Read Replica. + // + // The FinalDBSnapshotIdentifier parameter must be specified if SkipFinalSnapshot + // is false. Default: false + SkipFinalSnapshot *bool `type:"boolean"` +} + +// String returns the string representation +func (s DeleteDBInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBInstanceInput) GoString() string { + return s.String() +} + +type DeleteDBInstanceOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBInstance DeleteDBInstance ModifyDBInstance This data type + // is used as a response element in the DescribeDBInstances action. + DBInstance *DBInstance `type:"structure"` +} + +// String returns the string representation +func (s DeleteDBInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBInstanceOutput) GoString() string { + return s.String() +} + +type DeleteDBParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the DB parameter group. + // + // Constraints: + // + // Must be the name of an existing DB parameter group You cannot delete a + // default DB parameter group Cannot be associated with any DB instances + DBParameterGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDBParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBParameterGroupInput) GoString() string { + return s.String() +} + +type DeleteDBParameterGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDBParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBParameterGroupOutput) GoString() string { + return s.String() +} + +type DeleteDBSecurityGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the DB security group to delete. + // + // You cannot delete the default DB security group. Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens Must not be "Default" + // Cannot contain spaces + DBSecurityGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDBSecurityGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBSecurityGroupInput) GoString() string { + return s.String() +} + +type DeleteDBSecurityGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDBSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBSecurityGroupOutput) GoString() string { + return s.String() +} + +type DeleteDBSnapshotInput struct { + _ struct{} `type:"structure"` + + // The DBSnapshot identifier. + // + // Constraints: Must be the name of an existing DB snapshot in the available + // state. + DBSnapshotIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDBSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBSnapshotInput) GoString() string { + return s.String() +} + +type DeleteDBSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBSnapshot DeleteDBSnapshot This data type is used as a response + // element in the DescribeDBSnapshots action. + DBSnapshot *DBSnapshot `type:"structure"` +} + +// String returns the string representation +func (s DeleteDBSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBSnapshotOutput) GoString() string { + return s.String() +} + +type DeleteDBSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the database subnet group to delete. + // + // You cannot delete the default subnet group. Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + DBSubnetGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDBSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBSubnetGroupInput) GoString() string { + return s.String() +} + +type DeleteDBSubnetGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDBSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDBSubnetGroupOutput) GoString() string { + return s.String() +} + +type DeleteEventSubscriptionInput struct { + _ struct{} `type:"structure"` + + // The name of the RDS event notification subscription you want to delete. + SubscriptionName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteEventSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEventSubscriptionInput) GoString() string { + return s.String() +} + +type DeleteEventSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // Contains the results of a successful invocation of the DescribeEventSubscriptions + // action. + EventSubscription *EventSubscription `type:"structure"` +} + +// String returns the string representation +func (s DeleteEventSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEventSubscriptionOutput) GoString() string { + return s.String() +} + +type DeleteOptionGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the option group to be deleted. + // + // You cannot delete default option groups. + OptionGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteOptionGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteOptionGroupInput) GoString() string { + return s.String() +} + +type DeleteOptionGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteOptionGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteOptionGroupOutput) GoString() string { + return s.String() +} + +type DescribeAccountAttributesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeAccountAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountAttributesInput) GoString() string { + return s.String() +} + +// Data returned by the DescribeAccountAttributes action. +type DescribeAccountAttributesOutput struct { + _ struct{} `type:"structure"` + + // A list of AccountQuota objects. Within this list, each quota has a name, + // a count of usage toward the quota maximum, and a maximum value for the quota. + AccountQuotas []*AccountQuota `locationNameList:"AccountQuota" type:"list"` +} + +// String returns the string representation +func (s DescribeAccountAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountAttributesOutput) GoString() string { + return s.String() +} + +type DescribeCertificatesInput struct { + _ struct{} `type:"structure"` + + // The user-supplied certificate identifier. If this parameter is specified, + // information for only the identified certificate is returned. This parameter + // isn't case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + CertificateIdentifier *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeCertificates + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeCertificatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCertificatesInput) GoString() string { + return s.String() +} + +// Data returned by the DescribeCertificates action. +type DescribeCertificatesOutput struct { + _ struct{} `type:"structure"` + + // The list of Certificate objects for the AWS account. + Certificates []*Certificate `locationNameList:"Certificate" type:"list"` + + // An optional pagination token provided by a previous DescribeCertificates + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords . + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCertificatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCertificatesOutput) GoString() string { + return s.String() +} + +type DescribeDBClusterParameterGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of a specific DB cluster parameter group to return details for. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + DBClusterParameterGroupName *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeDBClusterParameterGroups + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeDBClusterParameterGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBClusterParameterGroupsInput) GoString() string { + return s.String() +} + +type DescribeDBClusterParameterGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of DB cluster parameter groups. + DBClusterParameterGroups []*DBClusterParameterGroup `locationNameList:"DBClusterParameterGroup" type:"list"` + + // An optional pagination token provided by a previous DescribeDBClusterParameterGroups + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBClusterParameterGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBClusterParameterGroupsOutput) GoString() string { + return s.String() +} + +type DescribeDBClusterParametersInput struct { + _ struct{} `type:"structure"` + + // The name of a specific DB cluster parameter group to return parameter details + // for. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + DBClusterParameterGroupName *string `type:"string" required:"true"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeDBClusterParameters + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // A value that indicates to return only parameters for a specific source. Parameter + // sources can be engine, service, or customer. + Source *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBClusterParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBClusterParametersInput) GoString() string { + return s.String() +} + +// Provides details about a DB cluster parameter group including the parameters +// in the DB cluster parameter group. +type DescribeDBClusterParametersOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous DescribeDBClusterParameters + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords . + Marker *string `type:"string"` + + // Provides a list of parameters for the DB cluster parameter group. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` +} + +// String returns the string representation +func (s DescribeDBClusterParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBClusterParametersOutput) GoString() string { + return s.String() +} + +type DescribeDBClusterSnapshotsInput struct { + _ struct{} `type:"structure"` + + // A DB cluster identifier to retrieve the list of DB cluster snapshots for. + // This parameter cannot be used in conjunction with the DBClusterSnapshotIdentifier + // parameter. This parameter is not case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + DBClusterIdentifier *string `type:"string"` + + // A specific DB cluster snapshot identifier to describe. This parameter cannot + // be used in conjunction with the DBClusterIdentifier parameter. This value + // is stored as a lowercase string. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens If this is the + // identifier of an automated snapshot, the SnapshotType parameter must also + // be specified. + DBClusterSnapshotIdentifier *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeDBClusterSnapshots + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The type of DB cluster snapshots that will be returned. Values can be automated + // or manual. If this parameter is not specified, the returned results will + // include all snapshot types. + SnapshotType *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBClusterSnapshotsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBClusterSnapshotsInput) GoString() string { + return s.String() +} + +// Provides a list of DB cluster snapshots for the user as the result of a call +// to the DescribeDBClusterSnapshots action. +type DescribeDBClusterSnapshotsOutput struct { + _ struct{} `type:"structure"` + + // Provides a list of DB cluster snapshots for the user. + DBClusterSnapshots []*DBClusterSnapshot `locationNameList:"DBClusterSnapshot" type:"list"` + + // An optional pagination token provided by a previous DescribeDBClusterSnapshots + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBClusterSnapshotsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBClusterSnapshotsOutput) GoString() string { + return s.String() +} + +type DescribeDBClustersInput struct { + _ struct{} `type:"structure"` + + // The user-supplied DB cluster identifier. If this parameter is specified, + // information from only the specific DB cluster is returned. This parameter + // isn't case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + DBClusterIdentifier *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeDBClusters request. + // If this parameter is specified, the response includes only records beyond + // the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeDBClustersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBClustersInput) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the DescribeDBClusters +// action. +type DescribeDBClustersOutput struct { + _ struct{} `type:"structure"` + + // Contains a list of DB clusters for the user. + DBClusters []*DBCluster `locationNameList:"DBCluster" type:"list"` + + // A pagination token that can be used in a subsequent DescribeDBClusters request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBClustersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBClustersOutput) GoString() string { + return s.String() +} + +type DescribeDBEngineVersionsInput struct { + _ struct{} `type:"structure"` + + // The name of a specific DB parameter group family to return details for. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + DBParameterGroupFamily *string `type:"string"` + + // Indicates that only the default version of the specified engine or engine + // and major version combination is returned. + DefaultOnly *bool `type:"boolean"` + + // The database engine to return. + Engine *string `type:"string"` + + // The database engine version to return. + // + // Example: 5.1.49 + EngineVersion *string `type:"string"` + + // Not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // If this parameter is specified, and if the requested engine supports the + // CharacterSetName parameter for CreateDBInstance, the response includes a + // list of supported character sets for each engine version. + ListSupportedCharacterSets *bool `type:"boolean"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more than the + // MaxRecords value is available, a pagination token called a marker is included + // in the response so that the following results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeDBEngineVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBEngineVersionsInput) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the DescribeDBEngineVersions +// action. +type DescribeDBEngineVersionsOutput struct { + _ struct{} `type:"structure"` + + // A list of DBEngineVersion elements. + DBEngineVersions []*DBEngineVersion `locationNameList:"DBEngineVersion" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBEngineVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBEngineVersionsOutput) GoString() string { + return s.String() +} + +type DescribeDBInstancesInput struct { + _ struct{} `type:"structure"` + + // The user-supplied instance identifier. If this parameter is specified, information + // from only the specific DB instance is returned. This parameter isn't case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + DBInstanceIdentifier *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeDBInstances request. + // If this parameter is specified, the response includes only records beyond + // the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeDBInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBInstancesInput) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the DescribeDBInstances +// action. +type DescribeDBInstancesOutput struct { + _ struct{} `type:"structure"` + + // A list of DBInstance instances. + DBInstances []*DBInstance `locationNameList:"DBInstance" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords . + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBInstancesOutput) GoString() string { + return s.String() +} + +// This data type is used as a response element to DescribeDBLogFiles. +type DescribeDBLogFilesDetails struct { + _ struct{} `type:"structure"` + + // A POSIX timestamp when the last log entry was written. + LastWritten *int64 `type:"long"` + + // The name of the log file for the specified DB instance. + LogFileName *string `type:"string"` + + // The size, in bytes, of the log file for the specified DB instance. + Size *int64 `type:"long"` +} + +// String returns the string representation +func (s DescribeDBLogFilesDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBLogFilesDetails) GoString() string { + return s.String() +} + +type DescribeDBLogFilesInput struct { + _ struct{} `type:"structure"` + + // The customer-assigned name of the DB instance that contains the log files + // you want to list. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + DBInstanceIdentifier *string `type:"string" required:"true"` + + // Filters the available log files for files written since the specified date, + // in POSIX timestamp format with milliseconds. + FileLastWritten *int64 `type:"long"` + + // Filters the available log files for files larger than the specified size. + FileSize *int64 `type:"long"` + + // Filters the available log files for log file names that contain the specified + // string. + FilenameContains *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // The pagination token provided in the previous request. If this parameter + // is specified the response includes only records beyond the marker, up to + // MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeDBLogFilesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBLogFilesInput) GoString() string { + return s.String() +} + +// The response from a call to DescribeDBLogFiles. +type DescribeDBLogFilesOutput struct { + _ struct{} `type:"structure"` + + // The DB log files returned. + DescribeDBLogFiles []*DescribeDBLogFilesDetails `locationNameList:"DescribeDBLogFilesDetails" type:"list"` + + // A pagination token that can be used in a subsequent DescribeDBLogFiles request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBLogFilesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBLogFilesOutput) GoString() string { + return s.String() +} + +type DescribeDBParameterGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of a specific DB parameter group to return details for. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + DBParameterGroupName *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeDBParameterGroups + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeDBParameterGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBParameterGroupsInput) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the DescribeDBParameterGroups +// action. +type DescribeDBParameterGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of DBParameterGroup instances. + DBParameterGroups []*DBParameterGroup `locationNameList:"DBParameterGroup" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBParameterGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBParameterGroupsOutput) GoString() string { + return s.String() +} + +type DescribeDBParametersInput struct { + _ struct{} `type:"structure"` + + // The name of a specific DB parameter group to return details for. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + DBParameterGroupName *string `type:"string" required:"true"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeDBParameters + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The parameter types to return. + // + // Default: All parameter types returned + // + // Valid Values: user | system | engine-default + Source *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBParametersInput) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the DescribeDBParameters +// action. +type DescribeDBParametersOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // A list of Parameter values. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` +} + +// String returns the string representation +func (s DescribeDBParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBParametersOutput) GoString() string { + return s.String() +} + +type DescribeDBSecurityGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of the DB security group to return details for. + DBSecurityGroupName *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeDBSecurityGroups + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeDBSecurityGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBSecurityGroupsInput) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the DescribeDBSecurityGroups +// action. +type DescribeDBSecurityGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of DBSecurityGroup instances. + DBSecurityGroups []*DBSecurityGroup `locationNameList:"DBSecurityGroup" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBSecurityGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBSecurityGroupsOutput) GoString() string { + return s.String() +} + +type DescribeDBSnapshotAttributesInput struct { + _ struct{} `type:"structure"` + + // The identifier for the DB snapshot to modify the attributes for. + DBSnapshotIdentifier *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBSnapshotAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBSnapshotAttributesInput) GoString() string { + return s.String() +} + +type DescribeDBSnapshotAttributesOutput struct { + _ struct{} `type:"structure"` + + // Contains the results of a successful call to the DescribeDBSnapshotAttributes + // API. + // + // Manual DB snapshot attributes are used to authorize other AWS accounts to + // copy or restore a manual DB snapshot. For more information, see the ModifyDBSnapshotAttribute + // API. + DBSnapshotAttributesResult *DBSnapshotAttributesResult `type:"structure"` +} + +// String returns the string representation +func (s DescribeDBSnapshotAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBSnapshotAttributesOutput) GoString() string { + return s.String() +} + +type DescribeDBSnapshotsInput struct { + _ struct{} `type:"structure"` + + // A DB instance identifier to retrieve the list of DB snapshots for. This parameter + // cannot be used in conjunction with DBSnapshotIdentifier. This parameter is + // not case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + DBInstanceIdentifier *string `type:"string"` + + // A specific DB snapshot identifier to describe. This parameter cannot be used + // in conjunction with DBInstanceIdentifier. This value is stored as a lowercase + // string. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters. First character must be a letter. + // Cannot end with a hyphen or contain two consecutive hyphens. If this is the + // identifier of an automated snapshot, the SnapshotType parameter must also + // be specified. + DBSnapshotIdentifier *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // True to include manual DB snapshots that are public and can be copied or + // restored by any AWS account; otherwise false. The default is false. + // + // An manual DB snapshot is shared as public by the ModifyDBSnapshotAttribute + // API. + IncludePublic *bool `type:"boolean"` + + // True to include shared manual DB snapshots from other AWS accounts that this + // AWS account has been given permission to copy or restore; otherwise false. + // The default is false. + // + // An AWS account is given permission to restore a manual DB snapshot from + // another AWS account by the ModifyDBSnapshotAttribute API. + IncludeShared *bool `type:"boolean"` + + // An optional pagination token provided by a previous DescribeDBSnapshots request. + // If this parameter is specified, the response includes only records beyond + // the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The type of snapshots that will be returned. You can specify one of the following + // values: + // + // automated - Return all DB snapshots that have been automatically taken + // by Amazon RDS for my AWS account. manual - Return all DB snapshots that have + // been taken by my AWS account. shared - Return all manual DB snapshots that + // have been shared to my AWS account. public - Return all DB snapshots that + // have been marked as public. If you do not specify a SnapshotType, then both + // automated and manual snapshots are returned. You can include shared snapshots + // with these results by setting the IncludeShared parameter to true. You can + // include public snapshots with these results by setting the IncludePublic + // parameter to true. + // + // The IncludeShared and IncludePublic parameters do not apply for SnapshotType + // values of manual or automated. The IncludePublic parameter does not apply + // when SnapshotType is set to shared. the IncludeShared parameter does not + // apply when SnapshotType is set to public. + SnapshotType *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBSnapshotsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBSnapshotsInput) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the DescribeDBSnapshots +// action. +type DescribeDBSnapshotsOutput struct { + _ struct{} `type:"structure"` + + // A list of DBSnapshot instances. + DBSnapshots []*DBSnapshot `locationNameList:"DBSnapshot" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBSnapshotsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBSnapshotsOutput) GoString() string { + return s.String() +} + +type DescribeDBSubnetGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of the DB subnet group to return details for. + DBSubnetGroupName *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeDBSubnetGroups + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeDBSubnetGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBSubnetGroupsInput) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the DescribeDBSubnetGroups +// action. +type DescribeDBSubnetGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of DBSubnetGroup instances. + DBSubnetGroups []*DBSubnetGroup `locationNameList:"DBSubnetGroup" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDBSubnetGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBSubnetGroupsOutput) GoString() string { + return s.String() +} + +type DescribeEngineDefaultClusterParametersInput struct { + _ struct{} `type:"structure"` + + // The name of the DB cluster parameter group family to return engine parameter + // information for. + DBParameterGroupFamily *string `type:"string" required:"true"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeEngineDefaultClusterParameters + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeEngineDefaultClusterParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEngineDefaultClusterParametersInput) GoString() string { + return s.String() +} + +type DescribeEngineDefaultClusterParametersOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the DescribeEngineDefaultParameters + // action. + EngineDefaults *EngineDefaults `type:"structure"` +} + +// String returns the string representation +func (s DescribeEngineDefaultClusterParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEngineDefaultClusterParametersOutput) GoString() string { + return s.String() +} + +type DescribeEngineDefaultParametersInput struct { + _ struct{} `type:"structure"` + + // The name of the DB parameter group family. + DBParameterGroupFamily *string `type:"string" required:"true"` + + // Not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeEngineDefaultParameters + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeEngineDefaultParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEngineDefaultParametersInput) GoString() string { + return s.String() +} + +type DescribeEngineDefaultParametersOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the DescribeEngineDefaultParameters + // action. + EngineDefaults *EngineDefaults `type:"structure"` +} + +// String returns the string representation +func (s DescribeEngineDefaultParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEngineDefaultParametersOutput) GoString() string { + return s.String() +} + +type DescribeEventCategoriesInput struct { + _ struct{} `type:"structure"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // The type of source that will be generating the events. + // + // Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot + SourceType *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventCategoriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventCategoriesInput) GoString() string { + return s.String() +} + +// Data returned from the DescribeEventCategories action. +type DescribeEventCategoriesOutput struct { + _ struct{} `type:"structure"` + + // A list of EventCategoriesMap data types. + EventCategoriesMapList []*EventCategoriesMap `locationNameList:"EventCategoriesMap" type:"list"` +} + +// String returns the string representation +func (s DescribeEventCategoriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventCategoriesOutput) GoString() string { + return s.String() +} + +type DescribeEventSubscriptionsInput struct { + _ struct{} `type:"structure"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords . + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The name of the RDS event notification subscription you want to describe. + SubscriptionName *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventSubscriptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventSubscriptionsInput) GoString() string { + return s.String() +} + +// Data returned by the DescribeEventSubscriptions action. +type DescribeEventSubscriptionsOutput struct { + _ struct{} `type:"structure"` + + // A list of EventSubscriptions data types. + EventSubscriptionsList []*EventSubscription `locationNameList:"EventSubscription" type:"list"` + + // An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventSubscriptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventSubscriptionsOutput) GoString() string { + return s.String() +} + +type DescribeEventsInput struct { + _ struct{} `type:"structure"` + + // The number of minutes to retrieve events for. + // + // Default: 60 + Duration *int64 `type:"integer"` + + // The end of the time interval for which to retrieve events, specified in ISO + // 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia + // page. (http://en.wikipedia.org/wiki/ISO_8601) + // + // Example: 2009-07-08T18:00Z + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A list of event categories that trigger notifications for a event notification + // subscription. + EventCategories []*string `locationNameList:"EventCategory" type:"list"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeEvents request. + // If this parameter is specified, the response includes only records beyond + // the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The identifier of the event source for which events will be returned. If + // not specified, then all sources are included in the response. + // + // Constraints: + // + // If SourceIdentifier is supplied, SourceType must also be provided. If the + // source type is DBInstance, then a DBInstanceIdentifier must be supplied. + // If the source type is DBSecurityGroup, a DBSecurityGroupName must be supplied. + // If the source type is DBParameterGroup, a DBParameterGroupName must be supplied. + // If the source type is DBSnapshot, a DBSnapshotIdentifier must be supplied. + // Cannot end with a hyphen or contain two consecutive hyphens. + SourceIdentifier *string `type:"string"` + + // The event source to retrieve events for. If no value is specified, all events + // are returned. + SourceType *string `type:"string" enum:"SourceType"` + + // The beginning of the time interval to retrieve events for, specified in ISO + // 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia + // page. (http://en.wikipedia.org/wiki/ISO_8601) + // + // Example: 2009-07-08T18:00Z + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s DescribeEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventsInput) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the DescribeEvents action. +type DescribeEventsOutput struct { + _ struct{} `type:"structure"` + + // A list of Event instances. + Events []*Event `locationNameList:"Event" type:"list"` + + // An optional pagination token provided by a previous Events request. If this + // parameter is specified, the response includes only records beyond the marker, + // up to the value specified by MaxRecords . + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventsOutput) GoString() string { + return s.String() +} + +type DescribeOptionGroupOptionsInput struct { + _ struct{} `type:"structure"` + + // A required parameter. Options available for the given engine name will be + // described. + EngineName *string `type:"string" required:"true"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // If specified, filters the results to include only options for the specified + // major engine version. + MajorEngineVersion *string `type:"string"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeOptionGroupOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOptionGroupOptionsInput) GoString() string { + return s.String() +} + +type DescribeOptionGroupOptionsOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // List of available option group options. + OptionGroupOptions []*OptionGroupOption `locationNameList:"OptionGroupOption" type:"list"` +} + +// String returns the string representation +func (s DescribeOptionGroupOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOptionGroupOptionsOutput) GoString() string { + return s.String() +} + +type DescribeOptionGroupsInput struct { + _ struct{} `type:"structure"` + + // Filters the list of option groups to only include groups associated with + // a specific database engine. + EngineName *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // Filters the list of option groups to only include groups associated with + // a specific database engine version. If specified, then EngineName must also + // be specified. + MajorEngineVersion *string `type:"string"` + + // An optional pagination token provided by a previous DescribeOptionGroups + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The name of the option group to describe. Cannot be supplied together with + // EngineName or MajorEngineVersion. + OptionGroupName *string `type:"string"` +} + +// String returns the string representation +func (s DescribeOptionGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOptionGroupsInput) GoString() string { + return s.String() +} + +// List of option groups. +type DescribeOptionGroupsOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // List of option groups. + OptionGroupsList []*OptionGroup `locationNameList:"OptionGroup" type:"list"` +} + +// String returns the string representation +func (s DescribeOptionGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOptionGroupsOutput) GoString() string { + return s.String() +} + +type DescribeOrderableDBInstanceOptionsInput struct { + _ struct{} `type:"structure"` + + // The DB instance class filter value. Specify this parameter to show only the + // available offerings matching the specified DB instance class. + DBInstanceClass *string `type:"string"` + + // The name of the engine to retrieve DB instance options for. + Engine *string `type:"string" required:"true"` + + // The engine version filter value. Specify this parameter to show only the + // available offerings matching the specified engine version. + EngineVersion *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // The license model filter value. Specify this parameter to show only the available + // offerings matching the specified license model. + LicenseModel *string `type:"string"` + + // An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords . + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The VPC filter value. Specify this parameter to show only the available VPC + // or non-VPC offerings. + Vpc *bool `type:"boolean"` +} + +// String returns the string representation +func (s DescribeOrderableDBInstanceOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOrderableDBInstanceOptionsInput) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the DescribeOrderableDBInstanceOptions +// action. +type DescribeOrderableDBInstanceOptionsOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous OrderableDBInstanceOptions + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords . + Marker *string `type:"string"` + + // An OrderableDBInstanceOption structure containing information about orderable + // options for the DB instance. + OrderableDBInstanceOptions []*OrderableDBInstanceOption `locationNameList:"OrderableDBInstanceOption" type:"list"` +} + +// String returns the string representation +func (s DescribeOrderableDBInstanceOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOrderableDBInstanceOptionsOutput) GoString() string { + return s.String() +} + +type DescribePendingMaintenanceActionsInput struct { + _ struct{} `type:"structure"` + + // A filter that specifies one or more resources to return pending maintenance + // actions for. + // + // Supported filters: + // + // db-instance-id - Accepts DB instance identifiers and DB instance Amazon + // Resource Names (ARNs). The results list will only include pending maintenance + // actions for the DB instances identified by these ARNs. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribePendingMaintenanceActions + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to a number of records specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The ARN of a resource to return pending maintenance actions for. + ResourceIdentifier *string `type:"string"` +} + +// String returns the string representation +func (s DescribePendingMaintenanceActionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePendingMaintenanceActionsInput) GoString() string { + return s.String() +} + +// Data returned from the DescribePendingMaintenanceActions action. +type DescribePendingMaintenanceActionsOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous DescribePendingMaintenanceActions + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to a number of records specified by MaxRecords. + Marker *string `type:"string"` + + // A list of the pending maintenance actions for the resource. + PendingMaintenanceActions []*ResourcePendingMaintenanceActions `locationNameList:"ResourcePendingMaintenanceActions" type:"list"` +} + +// String returns the string representation +func (s DescribePendingMaintenanceActionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePendingMaintenanceActionsOutput) GoString() string { + return s.String() +} + +type DescribeReservedDBInstancesInput struct { + _ struct{} `type:"structure"` + + // The DB instance class filter value. Specify this parameter to show only those + // reservations matching the specified DB instances class. + DBInstanceClass *string `type:"string"` + + // The duration filter value, specified in years or seconds. Specify this parameter + // to show only reservations for this duration. + // + // Valid Values: 1 | 3 | 31536000 | 94608000 + Duration *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more than the + // MaxRecords value is available, a pagination token called a marker is included + // in the response so that the following results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The Multi-AZ filter value. Specify this parameter to show only those reservations + // matching the specified Multi-AZ parameter. + MultiAZ *bool `type:"boolean"` + + // The offering type filter value. Specify this parameter to show only the available + // offerings matching the specified offering type. + // + // Valid Values: "Partial Upfront" | "All Upfront" | "No Upfront" + OfferingType *string `type:"string"` + + // The product description filter value. Specify this parameter to show only + // those reservations matching the specified product description. + ProductDescription *string `type:"string"` + + // The reserved DB instance identifier filter value. Specify this parameter + // to show only the reservation that matches the specified reservation ID. + ReservedDBInstanceId *string `type:"string"` + + // The offering identifier filter value. Specify this parameter to show only + // purchased reservations matching the specified offering identifier. + ReservedDBInstancesOfferingId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeReservedDBInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedDBInstancesInput) GoString() string { + return s.String() +} + +type DescribeReservedDBInstancesOfferingsInput struct { + _ struct{} `type:"structure"` + + // The DB instance class filter value. Specify this parameter to show only the + // available offerings matching the specified DB instance class. + DBInstanceClass *string `type:"string"` + + // Duration filter value, specified in years or seconds. Specify this parameter + // to show only reservations for this duration. + // + // Valid Values: 1 | 3 | 31536000 | 94608000 + Duration *string `type:"string"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more than the + // MaxRecords value is available, a pagination token called a marker is included + // in the response so that the following results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The Multi-AZ filter value. Specify this parameter to show only the available + // offerings matching the specified Multi-AZ parameter. + MultiAZ *bool `type:"boolean"` + + // The offering type filter value. Specify this parameter to show only the available + // offerings matching the specified offering type. + // + // Valid Values: "Partial Upfront" | "All Upfront" | "No Upfront" + OfferingType *string `type:"string"` + + // Product description filter value. Specify this parameter to show only the + // available offerings matching the specified product description. + ProductDescription *string `type:"string"` + + // The offering identifier filter value. Specify this parameter to show only + // the available offering that matches the specified reservation identifier. + // + // Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706 + ReservedDBInstancesOfferingId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeReservedDBInstancesOfferingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedDBInstancesOfferingsInput) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the DescribeReservedDBInstancesOfferings +// action. +type DescribeReservedDBInstancesOfferingsOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // A list of reserved DB instance offerings. + ReservedDBInstancesOfferings []*ReservedDBInstancesOffering `locationNameList:"ReservedDBInstancesOffering" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedDBInstancesOfferingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedDBInstancesOfferingsOutput) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the DescribeReservedDBInstances +// action. +type DescribeReservedDBInstancesOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // A list of reserved DB instances. + ReservedDBInstances []*ReservedDBInstance `locationNameList:"ReservedDBInstance" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedDBInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedDBInstancesOutput) GoString() string { + return s.String() +} + +type DownloadDBLogFilePortionInput struct { + _ struct{} `type:"structure"` + + // The customer-assigned name of the DB instance that contains the log files + // you want to list. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + DBInstanceIdentifier *string `type:"string" required:"true"` + + // The name of the log file to be downloaded. + LogFileName *string `type:"string" required:"true"` + + // The pagination token provided in the previous request or "0". If the Marker + // parameter is specified the response includes only records beyond the marker + // until the end of the file or up to NumberOfLines. + Marker *string `type:"string"` + + // The number of lines to download. If the number of lines specified results + // in a file over 1 MB in size, the file will be truncated at 1 MB in size. + // + // If the NumberOfLines parameter is specified, then the block of lines returned + // can be from the beginning or the end of the log file, depending on the value + // of the Marker parameter. If neither Marker or NumberOfLines are specified, + // the entire log file is returned. + // + // If NumberOfLines is specified and Marker is not specified, then the most + // recent lines from the end of the log file are returned. + // + // If Marker is specified as "0", then the specified number of lines from the + // beginning of the log file are returned. + // + // You can download the log file in blocks of lines by specifying the size of + // the block using the NumberOfLines parameter, and by specifying a value of + // "0" for the Marker parameter in your first request. Include the Marker value + // returned in the response as the Marker value for the next request, continuing + // until the AdditionalDataPending response element returns false. + NumberOfLines *int64 `type:"integer"` +} + +// String returns the string representation +func (s DownloadDBLogFilePortionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DownloadDBLogFilePortionInput) GoString() string { + return s.String() +} + +// This data type is used as a response element to DownloadDBLogFilePortion. +type DownloadDBLogFilePortionOutput struct { + _ struct{} `type:"structure"` + + // Boolean value that if true, indicates there is more data to be downloaded. + AdditionalDataPending *bool `type:"boolean"` + + // Entries from the specified log file. + LogFileData *string `type:"string"` + + // A pagination token that can be used in a subsequent DownloadDBLogFilePortion + // request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DownloadDBLogFilePortionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DownloadDBLogFilePortionOutput) GoString() string { + return s.String() +} + +// This data type is used as a response element in the following actions: +// +// AuthorizeDBSecurityGroupIngress DescribeDBSecurityGroups RevokeDBSecurityGroupIngress +type EC2SecurityGroup struct { + _ struct{} `type:"structure"` + + // Specifies the id of the EC2 security group. + EC2SecurityGroupId *string `type:"string"` + + // Specifies the name of the EC2 security group. + EC2SecurityGroupName *string `type:"string"` + + // Specifies the AWS ID of the owner of the EC2 security group specified in + // the EC2SecurityGroupName field. + EC2SecurityGroupOwnerId *string `type:"string"` + + // Provides the status of the EC2 security group. Status can be "authorizing", + // "authorized", "revoking", and "revoked". + Status *string `type:"string"` +} + +// String returns the string representation +func (s EC2SecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EC2SecurityGroup) GoString() string { + return s.String() +} + +// This data type is used as a response element in the following actions: +// +// CreateDBInstance DescribeDBInstances DeleteDBInstance +type Endpoint struct { + _ struct{} `type:"structure"` + + // Specifies the DNS address of the DB instance. + Address *string `type:"string"` + + // Specifies the ID that Amazon Route 53 assigns when you create a hosted zone. + HostedZoneId *string `type:"string"` + + // Specifies the port that the database engine is listening on. + Port *int64 `type:"integer"` +} + +// String returns the string representation +func (s Endpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Endpoint) GoString() string { + return s.String() +} + +// Contains the result of a successful invocation of the DescribeEngineDefaultParameters +// action. +type EngineDefaults struct { + _ struct{} `type:"structure"` + + // Specifies the name of the DB parameter group family that the engine default + // parameters apply to. + DBParameterGroupFamily *string `type:"string"` + + // An optional pagination token provided by a previous EngineDefaults request. + // If this parameter is specified, the response includes only records beyond + // the marker, up to the value specified by MaxRecords . + Marker *string `type:"string"` + + // Contains a list of engine default parameters. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` +} + +// String returns the string representation +func (s EngineDefaults) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EngineDefaults) GoString() string { + return s.String() +} + +// This data type is used as a response element in the DescribeEvents action. +type Event struct { + _ struct{} `type:"structure"` + + // Specifies the date and time of the event. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Specifies the category for the event. + EventCategories []*string `locationNameList:"EventCategory" type:"list"` + + // Provides the text of this event. + Message *string `type:"string"` + + // Provides the identifier for the source of the event. + SourceIdentifier *string `type:"string"` + + // Specifies the source type for this event. + SourceType *string `type:"string" enum:"SourceType"` +} + +// String returns the string representation +func (s Event) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Event) GoString() string { + return s.String() +} + +// Contains the results of a successful invocation of the DescribeEventCategories +// action. +type EventCategoriesMap struct { + _ struct{} `type:"structure"` + + // The event categories for the specified source type + EventCategories []*string `locationNameList:"EventCategory" type:"list"` + + // The source type that the returned categories belong to + SourceType *string `type:"string"` +} + +// String returns the string representation +func (s EventCategoriesMap) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventCategoriesMap) GoString() string { + return s.String() +} + +// Contains the results of a successful invocation of the DescribeEventSubscriptions +// action. +type EventSubscription struct { + _ struct{} `type:"structure"` + + // The RDS event notification subscription Id. + CustSubscriptionId *string `type:"string"` + + // The AWS customer account associated with the RDS event notification subscription. + CustomerAwsId *string `type:"string"` + + // A Boolean value indicating if the subscription is enabled. True indicates + // the subscription is enabled. + Enabled *bool `type:"boolean"` + + // A list of event categories for the RDS event notification subscription. + EventCategoriesList []*string `locationNameList:"EventCategory" type:"list"` + + // The topic ARN of the RDS event notification subscription. + SnsTopicArn *string `type:"string"` + + // A list of source IDs for the RDS event notification subscription. + SourceIdsList []*string `locationNameList:"SourceId" type:"list"` + + // The source type for the RDS event notification subscription. + SourceType *string `type:"string"` + + // The status of the RDS event notification subscription. + // + // Constraints: + // + // Can be one of the following: creating | modifying | deleting | active | + // no-permission | topic-not-exist + // + // The status "no-permission" indicates that RDS no longer has permission to + // post to the SNS topic. The status "topic-not-exist" indicates that the topic + // was deleted after the subscription was created. + Status *string `type:"string"` + + // The time the RDS event notification subscription was created. + SubscriptionCreationTime *string `type:"string"` +} + +// String returns the string representation +func (s EventSubscription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventSubscription) GoString() string { + return s.String() +} + +type FailoverDBClusterInput struct { + _ struct{} `type:"structure"` + + // A DB cluster identifier to force a failover for. This parameter is not case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + DBClusterIdentifier *string `type:"string"` +} + +// String returns the string representation +func (s FailoverDBClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailoverDBClusterInput) GoString() string { + return s.String() +} + +type FailoverDBClusterOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBCluster DeleteDBCluster FailoverDBCluster ModifyDBCluster + // RestoreDBClusterFromSnapshot This data type is used as a response element + // in the DescribeDBClusters action. + DBCluster *DBCluster `type:"structure"` +} + +// String returns the string representation +func (s FailoverDBClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailoverDBClusterOutput) GoString() string { + return s.String() +} + +type Filter struct { + _ struct{} `type:"structure"` + + // This parameter is not currently supported. + Name *string `type:"string" required:"true"` + + // This parameter is not currently supported. + Values []*string `locationNameList:"Value" type:"list" required:"true"` +} + +// String returns the string representation +func (s Filter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Filter) GoString() string { + return s.String() +} + +// This data type is used as a response element in the DescribeDBSecurityGroups +// action. +type IPRange struct { + _ struct{} `type:"structure"` + + // Specifies the IP range. + CIDRIP *string `type:"string"` + + // Specifies the status of the IP range. Status can be "authorizing", "authorized", + // "revoking", and "revoked". + Status *string `type:"string"` +} + +// String returns the string representation +func (s IPRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IPRange) GoString() string { + return s.String() +} + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // This parameter is not currently supported. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // The Amazon RDS resource with tags to be listed. This value is an Amazon Resource + // Name (ARN). For information about creating an ARN, see Constructing an RDS + // Amazon Resource Name (ARN) (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN). + ResourceName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // List of tags returned by the ListTagsForResource operation. + TagList []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +type ModifyDBClusterInput struct { + _ struct{} `type:"structure"` + + // A value that specifies whether the modifications in this request and any + // pending modifications are asynchronously applied as soon as possible, regardless + // of the PreferredMaintenanceWindow setting for the DB cluster. + // + // If this parameter is set to false, changes to the DB cluster are applied + // during the next maintenance window. + // + // Default: false + ApplyImmediately *bool `type:"boolean"` + + // The number of days for which automated backups are retained. You must specify + // a minimum value of 1. + // + // Default: 1 + // + // Constraints: + // + // Must be a value from 1 to 35 + BackupRetentionPeriod *int64 `type:"integer"` + + // The DB cluster identifier for the cluster being modified. This parameter + // is not case-sensitive. + // + // Constraints: + // + // Must be the identifier for an existing DB cluster. Must contain from 1 + // to 63 alphanumeric characters or hyphens. First character must be a letter. + // Cannot end with a hyphen or contain two consecutive hyphens. + DBClusterIdentifier *string `type:"string" required:"true"` + + // The name of the DB cluster parameter group to use for the DB cluster. + DBClusterParameterGroupName *string `type:"string"` + + // The new password for the master database user. This password can contain + // any printable ASCII character except "/", """, or "@". + // + // Constraints: Must contain from 8 to 41 characters. + MasterUserPassword *string `type:"string"` + + // The new DB cluster identifier for the DB cluster when renaming a DB cluster. + // This value is stored as a lowercase string. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + // Example: my-cluster2 + NewDBClusterIdentifier *string `type:"string"` + + // A value that indicates that the DB cluster should be associated with the + // specified option group. Changing this parameter does not result in an outage + // except in the following case, and the change is applied during the next maintenance + // window unless the ApplyImmediately parameter is set to true for this request. + // If the parameter change results in an option group that enables OEM, this + // change can cause a brief (sub-second) period during which new connections + // are rejected but existing connections are not interrupted. + // + // Permanent options cannot be removed from an option group. The option group + // cannot be removed from a DB cluster once it is associated with a DB cluster. + OptionGroupName *string `type:"string"` + + // The port number on which the DB cluster accepts connections. + // + // Constraints: Value must be 1150-65535 + // + // Default: The same port as the original DB cluster. + Port *int64 `type:"integer"` + + // The daily time range during which automated backups are created if automated + // backups are enabled, using the BackupRetentionPeriod parameter. + // + // Default: A 30-minute window selected at random from an 8-hour block of time + // per region. To see the time blocks available, see Adjusting the Preferred + // Maintenance Window (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html) + // in the Amazon RDS User Guide. + // + // Constraints: + // + // Must be in the format hh24:mi-hh24:mi. Times should be in Universal Coordinated + // Time (UTC). Must not conflict with the preferred maintenance window. Must + // be at least 30 minutes. + PreferredBackupWindow *string `type:"string"` + + // The weekly time range during which system maintenance can occur, in Universal + // Coordinated Time (UTC). + // + // Format: ddd:hh24:mi-ddd:hh24:mi + // + // Default: A 30-minute window selected at random from an 8-hour block of time + // per region, occurring on a random day of the week. To see the time blocks + // available, see Adjusting the Preferred Maintenance Window (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html) + // in the Amazon RDS User Guide. + // + // Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun + // + // Constraints: Minimum 30-minute window. + PreferredMaintenanceWindow *string `type:"string"` + + // A lst of VPC security groups that the DB cluster will belong to. + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s ModifyDBClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBClusterInput) GoString() string { + return s.String() +} + +type ModifyDBClusterOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBCluster DeleteDBCluster FailoverDBCluster ModifyDBCluster + // RestoreDBClusterFromSnapshot This data type is used as a response element + // in the DescribeDBClusters action. + DBCluster *DBCluster `type:"structure"` +} + +// String returns the string representation +func (s ModifyDBClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBClusterOutput) GoString() string { + return s.String() +} + +type ModifyDBClusterParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the DB cluster parameter group to modify. + DBClusterParameterGroupName *string `type:"string" required:"true"` + + // A list of parameters in the DB cluster parameter group to modify. + Parameters []*Parameter `locationNameList:"Parameter" type:"list" required:"true"` +} + +// String returns the string representation +func (s ModifyDBClusterParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBClusterParameterGroupInput) GoString() string { + return s.String() +} + +type ModifyDBInstanceInput struct { + _ struct{} `type:"structure"` + + // The new storage capacity of the RDS instance. Changing this setting does + // not result in an outage and the change is applied during the next maintenance + // window unless ApplyImmediately is set to true for this request. + // + // MySQL + // + // Default: Uses existing setting + // + // Valid Values: 5-6144 + // + // Constraints: Value supplied must be at least 10% greater than the current + // value. Values that are not at least 10% greater than the existing value are + // rounded up so that they are 10% greater than the current value. + // + // Type: Integer + // + // MariaDB + // + // Default: Uses existing setting + // + // Valid Values: 5-6144 + // + // Constraints: Value supplied must be at least 10% greater than the current + // value. Values that are not at least 10% greater than the existing value are + // rounded up so that they are 10% greater than the current value. + // + // Type: Integer + // + // PostgreSQL + // + // Default: Uses existing setting + // + // Valid Values: 5-6144 + // + // Constraints: Value supplied must be at least 10% greater than the current + // value. Values that are not at least 10% greater than the existing value are + // rounded up so that they are 10% greater than the current value. + // + // Type: Integer + // + // Oracle + // + // Default: Uses existing setting + // + // Valid Values: 10-6144 + // + // Constraints: Value supplied must be at least 10% greater than the current + // value. Values that are not at least 10% greater than the existing value are + // rounded up so that they are 10% greater than the current value. + // + // SQL Server + // + // Cannot be modified. + // + // If you choose to migrate your DB instance from using standard storage to + // using Provisioned IOPS, or from using Provisioned IOPS to using standard + // storage, the process can take time. The duration of the migration depends + // on several factors such as database load, storage size, storage type (standard + // or Provisioned IOPS), amount of IOPS provisioned (if any), and the number + // of prior scale storage operations. Typical migration times are under 24 hours, + // but the process can take up to several days in some cases. During the migration, + // the DB instance will be available for use, but might experience performance + // degradation. While the migration takes place, nightly backups for the instance + // will be suspended. No other Amazon RDS operations can take place for the + // instance, including modifying the instance, rebooting the instance, deleting + // the instance, creating a Read Replica for the instance, and creating a DB + // snapshot of the instance. + AllocatedStorage *int64 `type:"integer"` + + // Indicates that major version upgrades are allowed. Changing this parameter + // does not result in an outage and the change is asynchronously applied as + // soon as possible. + // + // Constraints: This parameter must be set to true when specifying a value + // for the EngineVersion parameter that is a different major version than the + // DB instance's current version. + AllowMajorVersionUpgrade *bool `type:"boolean"` + + // Specifies whether the modifications in this request and any pending modifications + // are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow + // setting for the DB instance. + // + // If this parameter is set to false, changes to the DB instance are applied + // during the next maintenance window. Some parameter changes can cause an outage + // and will be applied on the next call to RebootDBInstance, or the next failure + // reboot. Review the table of parameters in Modifying a DB Instance and Using + // the Apply Immediately Parameter (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html) + // to see the impact that setting ApplyImmediately to true or false has for + // each modified parameter and to determine when the changes will be applied. + // + // Default: false + ApplyImmediately *bool `type:"boolean"` + + // Indicates that minor version upgrades will be applied automatically to the + // DB instance during the maintenance window. Changing this parameter does not + // result in an outage except in the following case and the change is asynchronously + // applied as soon as possible. An outage will result if this parameter is set + // to true during the maintenance window, and a newer minor version is available, + // and RDS has enabled auto patching for that engine version. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The number of days to retain automated backups. Setting this parameter to + // a positive number enables backups. Setting this parameter to 0 disables automated + // backups. + // + // Changing this parameter can result in an outage if you change from 0 to + // a non-zero value or from a non-zero value to 0. These changes are applied + // during the next maintenance window unless the ApplyImmediately parameter + // is set to true for this request. If you change the parameter from one non-zero + // value to another non-zero value, the change is asynchronously applied as + // soon as possible. + // + // Default: Uses existing setting + // + // Constraints: + // + // Must be a value from 0 to 35 Can be specified for a MySQL Read Replica + // only if the source is running MySQL 5.6 Can be specified for a PostgreSQL + // Read Replica only if the source is running PostgreSQL 9.3.5 Cannot be set + // to 0 if the DB instance is a source to Read Replicas + BackupRetentionPeriod *int64 `type:"integer"` + + // Indicates the certificate that needs to be associated with the instance. + CACertificateIdentifier *string `type:"string"` + + // True to copy all tags from the DB instance to snapshots of the DB instance; + // otherwise false. The default is false. + CopyTagsToSnapshot *bool `type:"boolean"` + + // The new compute and memory capacity of the DB instance. To determine the + // instance classes that are available for a particular DB engine, use the DescribeOrderableDBInstanceOptions + // action. + // + // Passing a value for this setting causes an outage during the change and + // is applied during the next maintenance window, unless ApplyImmediately is + // specified as true for this request. + // + // Default: Uses existing setting + // + // Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge + // | db.m2.xlarge | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large + // | db.m3.xlarge | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge + // | db.m4.4xlarge | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge + // | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium + // | db.t2.large + DBInstanceClass *string `type:"string"` + + // The DB instance identifier. This value is stored as a lowercase string. + // + // Constraints: + // + // Must be the identifier for an existing DB instance Must contain from 1 + // to 63 alphanumeric characters or hyphens First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + DBInstanceIdentifier *string `type:"string" required:"true"` + + // The name of the DB parameter group to apply to the DB instance. Changing + // this setting does not result in an outage. The parameter group name itself + // is changed immediately, but the actual parameter changes are not applied + // until you reboot the instance without failover. The db instance will NOT + // be rebooted automatically and the parameter changes will NOT be applied during + // the next maintenance window. + // + // Default: Uses existing setting + // + // Constraints: The DB parameter group must be in the same DB parameter group + // family as this DB instance. + DBParameterGroupName *string `type:"string"` + + // The port number on which the database accepts connections. + // + // The value of the DBPortNumber parameter must not match any of the port values + // specified for options in the option group for the DB instance. + // + // Your database will restart when you change the DBPortNumber value regardless + // of the value of the ApplyImmediately parameter. + // + // MySQL + // + // Default: 3306 + // + // Valid Values: 1150-65535 + // + // MariaDB + // + // Default: 3306 + // + // Valid Values: 1150-65535 + // + // PostgreSQL + // + // Default: 5432 + // + // Valid Values: 1150-65535 + // + // Oracle + // + // Default: 1521 + // + // Valid Values: 1150-65535 + // + // SQL Server + // + // Default: 1433 + // + // Valid Values: 1150-65535 except for 1434, 3389, 47001, 49152, and 49152 + // through 49156. + // + // Amazon Aurora + // + // Default: 3306 + // + // Valid Values: 1150-65535 + DBPortNumber *int64 `type:"integer"` + + // A list of DB security groups to authorize on this DB instance. Changing this + // setting does not result in an outage and the change is asynchronously applied + // as soon as possible. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + DBSecurityGroups []*string `locationNameList:"DBSecurityGroupName" type:"list"` + + // The version number of the database engine to upgrade to. Changing this parameter + // results in an outage and the change is applied during the next maintenance + // window unless the ApplyImmediately parameter is set to true for this request. + // + // For major version upgrades, if a non-default DB parameter group is currently + // in use, a new DB parameter group in the DB parameter group family for the + // new engine version must be specified. The new DB parameter group can be the + // default for that DB parameter group family. + // + // For a list of valid engine versions, see CreateDBInstance. + EngineVersion *string `type:"string"` + + // The new Provisioned IOPS (I/O operations per second) value for the RDS instance. + // Changing this setting does not result in an outage and the change is applied + // during the next maintenance window unless the ApplyImmediately parameter + // is set to true for this request. + // + // Default: Uses existing setting + // + // Constraints: Value supplied must be at least 10% greater than the current + // value. Values that are not at least 10% greater than the existing value are + // rounded up so that they are 10% greater than the current value. If you are + // migrating from Provisioned IOPS to standard storage, set this value to 0. + // The DB instance will require a reboot for the change in storage type to take + // effect. + // + // SQL Server + // + // Setting the IOPS value for the SQL Server database engine is not supported. + // + // Type: Integer + // + // If you choose to migrate your DB instance from using standard storage to + // using Provisioned IOPS, or from using Provisioned IOPS to using standard + // storage, the process can take time. The duration of the migration depends + // on several factors such as database load, storage size, storage type (standard + // or Provisioned IOPS), amount of IOPS provisioned (if any), and the number + // of prior scale storage operations. Typical migration times are under 24 hours, + // but the process can take up to several days in some cases. During the migration, + // the DB instance will be available for use, but might experience performance + // degradation. While the migration takes place, nightly backups for the instance + // will be suspended. No other Amazon RDS operations can take place for the + // instance, including modifying the instance, rebooting the instance, deleting + // the instance, creating a Read Replica for the instance, and creating a DB + // snapshot of the instance. + Iops *int64 `type:"integer"` + + // The new password for the DB instance master user. Can be any printable ASCII + // character except "/", """, or "@". + // + // Changing this parameter does not result in an outage and the change is + // asynchronously applied as soon as possible. Between the time of the request + // and the completion of the request, the MasterUserPassword element exists + // in the PendingModifiedValues element of the operation response. + // + // Default: Uses existing setting + // + // Constraints: Must be 8 to 41 alphanumeric characters (MySQL, MariaDB, and + // Amazon Aurora), 8 to 30 alphanumeric characters (Oracle), or 8 to 128 alphanumeric + // characters (SQL Server). + // + // Amazon RDS API actions never return the password, so this action provides + // a way to regain access to a primary instance user if the password is lost. + // This includes restoring privileges that might have been accidentally revoked. + MasterUserPassword *string `type:"string"` + + // The interval, in seconds, between points when Enhanced Monitoring metrics + // are collected for the DB instance. To disable collecting Enhanced Monitoring + // metrics, specify 0. The default is 60. + // + // If MonitoringRoleArn is specified, then you must also set MonitoringInterval + // to a value other than 0. + // + // Valid Values: 0, 1, 5, 10, 15, 30, 60 + MonitoringInterval *int64 `type:"integer"` + + // The ARN for the IAM role that permits RDS to send enhanced monitoring metrics + // to CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. + // For information on creating a monitoring role, go to To create an IAM role + // for Amazon RDS Enhanced Monitoring (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.html#USER_Monitoring.OS.IAMRole). + // + // If MonitoringInterval is set to a value other than 0, then you must supply + // a MonitoringRoleArn value. + MonitoringRoleArn *string `type:"string"` + + // Specifies if the DB instance is a Multi-AZ deployment. Changing this parameter + // does not result in an outage and the change is applied during the next maintenance + // window unless the ApplyImmediately parameter is set to true for this request. + // + // Constraints: Cannot be specified if the DB instance is a Read Replica. This + // parameter cannot be used with SQL Server DB instances. Multi-AZ for SQL Server + // DB instances is set using the Mirroring option in an option group associated + // with the DB instance. + MultiAZ *bool `type:"boolean"` + + // The new DB instance identifier for the DB instance when renaming a DB instance. + // When you change the DB instance identifier, an instance reboot will occur + // immediately if you set Apply Immediately to true, or will occur during the + // next maintenance window if Apply Immediately to false. This value is stored + // as a lowercase string. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + NewDBInstanceIdentifier *string `type:"string"` + + // Indicates that the DB instance should be associated with the specified option + // group. Changing this parameter does not result in an outage except in the + // following case and the change is applied during the next maintenance window + // unless the ApplyImmediately parameter is set to true for this request. If + // the parameter change results in an option group that enables OEM, this change + // can cause a brief (sub-second) period during which new connections are rejected + // but existing connections are not interrupted. + // + // Permanent options, such as the TDE option for Oracle Advanced Security + // TDE, cannot be removed from an option group, and that option group cannot + // be removed from a DB instance once it is associated with a DB instance + OptionGroupName *string `type:"string"` + + // The daily time range during which automated backups are created if automated + // backups are enabled, as determined by the BackupRetentionPeriod parameter. + // Changing this parameter does not result in an outage and the change is asynchronously + // applied as soon as possible. + // + // Constraints: + // + // Must be in the format hh24:mi-hh24:mi Times should be in Universal Time + // Coordinated (UTC) Must not conflict with the preferred maintenance window + // Must be at least 30 minutes + PreferredBackupWindow *string `type:"string"` + + // The weekly time range (in UTC) during which system maintenance can occur, + // which might result in an outage. Changing this parameter does not result + // in an outage, except in the following situation, and the change is asynchronously + // applied as soon as possible. If there are pending actions that cause a reboot, + // and the maintenance window is changed to include the current time, then changing + // this parameter will cause a reboot of the DB instance. If moving this window + // to the current time, there must be at least 30 minutes between the current + // time and end of the window to ensure pending changes are applied. + // + // Default: Uses existing setting + // + // Format: ddd:hh24:mi-ddd:hh24:mi + // + // Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun + // + // Constraints: Must be at least 30 minutes + PreferredMaintenanceWindow *string `type:"string"` + + // True to make the DB instance Internet-facing with a publicly resolvable DNS + // name, which resolves to a public IP address. False to make the DB instance + // internal with a DNS name that resolves to a private IP address. + // + // PubliclyAccessible only applies to DB instances in a VPC. The DB instance + // must be part of a public subnet and PubliclyAccessible must be true in order + // for it to be publicly accessible. + // + // Changes to the PubliclyAccessible parameter are applied immediately regardless + // of the value of the ApplyImmediately parameter. + // + // Default: false + PubliclyAccessible *bool `type:"boolean"` + + // Specifies the storage type to be associated with the DB instance. + // + // Valid values: standard | gp2 | io1 + // + // If you specify io1, you must also include a value for the Iops parameter. + // + // Default: io1 if the Iops parameter is specified; otherwise standard + StorageType *string `type:"string"` + + // The ARN from the Key Store with which to associate the instance for TDE encryption. + TdeCredentialArn *string `type:"string"` + + // The password for the given ARN from the Key Store in order to access the + // device. + TdeCredentialPassword *string `type:"string"` + + // A list of EC2 VPC security groups to authorize on this DB instance. This + // change is asynchronously applied as soon as possible. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s ModifyDBInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBInstanceInput) GoString() string { + return s.String() +} + +type ModifyDBInstanceOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBInstance DeleteDBInstance ModifyDBInstance This data type + // is used as a response element in the DescribeDBInstances action. + DBInstance *DBInstance `type:"structure"` +} + +// String returns the string representation +func (s ModifyDBInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBInstanceOutput) GoString() string { + return s.String() +} + +type ModifyDBParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the DB parameter group. + // + // Constraints: + // + // Must be the name of an existing DB parameter group Must be 1 to 255 alphanumeric + // characters First character must be a letter Cannot end with a hyphen or contain + // two consecutive hyphens + DBParameterGroupName *string `type:"string" required:"true"` + + // An array of parameter names, values, and the apply method for the parameter + // update. At least one parameter name, value, and apply method must be supplied; + // subsequent arguments are optional. A maximum of 20 parameters can be modified + // in a single request. + // + // Valid Values (for the application method): immediate | pending-reboot + // + // You can use the immediate value with dynamic parameters only. You can use + // the pending-reboot value for both dynamic and static parameters, and changes + // are applied when you reboot the DB instance without failover. + Parameters []*Parameter `locationNameList:"Parameter" type:"list" required:"true"` +} + +// String returns the string representation +func (s ModifyDBParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBParameterGroupInput) GoString() string { + return s.String() +} + +type ModifyDBSnapshotAttributeInput struct { + _ struct{} `type:"structure"` + + // The name of the DB snapshot attribute to modify. + // + // To manage authorization for other AWS accounts to copy or restore a manual + // DB snapshot, this value is restore. + AttributeName *string `type:"string"` + + // The identifier for the DB snapshot to modify the attributes for. + DBSnapshotIdentifier *string `type:"string" required:"true"` + + // A list of DB snapshot attributes to add to the attribute specified by AttributeName. + // + // To authorize other AWS Accounts to copy or restore a manual snapshot, this + // is one or more AWS account identifiers, or all to make the manual DB snapshot + // restorable by any AWS account. Do not add the all value for any manual DB + // snapshots that contain private information that you do not want to be available + // to all AWS accounts. + ValuesToAdd []*string `locationNameList:"AttributeValue" type:"list"` + + // A list of DB snapshot attributes to remove from the attribute specified by + // AttributeName. + // + // To remove authorization for other AWS Accounts to copy or restore a manual + // snapshot, this is one or more AWS account identifiers, or all to remove authorization + // for any AWS account to copy or restore the DB snapshot. If you specify all, + // AWS accounts that have their account identifier explicitly added to the restore + // attribute can still copy or restore the manual DB snapshot. + ValuesToRemove []*string `locationNameList:"AttributeValue" type:"list"` +} + +// String returns the string representation +func (s ModifyDBSnapshotAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBSnapshotAttributeInput) GoString() string { + return s.String() +} + +type ModifyDBSnapshotAttributeOutput struct { + _ struct{} `type:"structure"` + + // Contains the results of a successful call to the DescribeDBSnapshotAttributes + // API. + // + // Manual DB snapshot attributes are used to authorize other AWS accounts to + // copy or restore a manual DB snapshot. For more information, see the ModifyDBSnapshotAttribute + // API. + DBSnapshotAttributesResult *DBSnapshotAttributesResult `type:"structure"` +} + +// String returns the string representation +func (s ModifyDBSnapshotAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBSnapshotAttributeOutput) GoString() string { + return s.String() +} + +type ModifyDBSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // The description for the DB subnet group. + DBSubnetGroupDescription *string `type:"string"` + + // The name for the DB subnet group. This value is stored as a lowercase string. + // + // Constraints: Must contain no more than 255 alphanumeric characters or hyphens. + // Must not be "Default". + // + // Example: mySubnetgroup + DBSubnetGroupName *string `type:"string" required:"true"` + + // The EC2 subnet IDs for the DB subnet group. + SubnetIds []*string `locationNameList:"SubnetIdentifier" type:"list" required:"true"` +} + +// String returns the string representation +func (s ModifyDBSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBSubnetGroupInput) GoString() string { + return s.String() +} + +type ModifyDBSubnetGroupOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBSubnetGroup ModifyDBSubnetGroup DescribeDBSubnetGroups DeleteDBSubnetGroup + // This data type is used as a response element in the DescribeDBSubnetGroups + // action. + DBSubnetGroup *DBSubnetGroup `type:"structure"` +} + +// String returns the string representation +func (s ModifyDBSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBSubnetGroupOutput) GoString() string { + return s.String() +} + +type ModifyEventSubscriptionInput struct { + _ struct{} `type:"structure"` + + // A Boolean value; set to true to activate the subscription. + Enabled *bool `type:"boolean"` + + // A list of event categories for a SourceType that you want to subscribe to. + // You can see a list of the categories for a given SourceType in the Events + // (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) + // topic in the Amazon RDS User Guide or by using the DescribeEventCategories + // action. + EventCategories []*string `locationNameList:"EventCategory" type:"list"` + + // The Amazon Resource Name (ARN) of the SNS topic created for event notification. + // The ARN is created by Amazon SNS when you create a topic and subscribe to + // it. + SnsTopicArn *string `type:"string"` + + // The type of source that will be generating the events. For example, if you + // want to be notified of events generated by a DB instance, you would set this + // parameter to db-instance. if this value is not specified, all events are + // returned. + // + // Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot + SourceType *string `type:"string"` + + // The name of the RDS event notification subscription. + SubscriptionName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyEventSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyEventSubscriptionInput) GoString() string { + return s.String() +} + +type ModifyEventSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // Contains the results of a successful invocation of the DescribeEventSubscriptions + // action. + EventSubscription *EventSubscription `type:"structure"` +} + +// String returns the string representation +func (s ModifyEventSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyEventSubscriptionOutput) GoString() string { + return s.String() +} + +type ModifyOptionGroupInput struct { + _ struct{} `type:"structure"` + + // Indicates whether the changes should be applied immediately, or during the + // next maintenance window for each instance associated with the option group. + ApplyImmediately *bool `type:"boolean"` + + // The name of the option group to be modified. + // + // Permanent options, such as the TDE option for Oracle Advanced Security + // TDE, cannot be removed from an option group, and that option group cannot + // be removed from a DB instance once it is associated with a DB instance + OptionGroupName *string `type:"string" required:"true"` + + // Options in this list are added to the option group or, if already present, + // the specified configuration is used to update the existing configuration. + OptionsToInclude []*OptionConfiguration `locationNameList:"OptionConfiguration" type:"list"` + + // Options in this list are removed from the option group. + OptionsToRemove []*string `type:"list"` +} + +// String returns the string representation +func (s ModifyOptionGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyOptionGroupInput) GoString() string { + return s.String() +} + +type ModifyOptionGroupOutput struct { + _ struct{} `type:"structure"` + + OptionGroup *OptionGroup `type:"structure"` +} + +// String returns the string representation +func (s ModifyOptionGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyOptionGroupOutput) GoString() string { + return s.String() +} + +// Option details. +type Option struct { + _ struct{} `type:"structure"` + + // If the option requires access to a port, then this DB security group allows + // access to the port. + DBSecurityGroupMemberships []*DBSecurityGroupMembership `locationNameList:"DBSecurityGroup" type:"list"` + + // The description of the option. + OptionDescription *string `type:"string"` + + // The name of the option. + OptionName *string `type:"string"` + + // The option settings for this option. + OptionSettings []*OptionSetting `locationNameList:"OptionSetting" type:"list"` + + // Indicate if this option is permanent. + Permanent *bool `type:"boolean"` + + // Indicate if this option is persistent. + Persistent *bool `type:"boolean"` + + // If required, the port configured for this option to use. + Port *int64 `type:"integer"` + + // If the option requires access to a port, then this VPC security group allows + // access to the port. + VpcSecurityGroupMemberships []*VpcSecurityGroupMembership `locationNameList:"VpcSecurityGroupMembership" type:"list"` +} + +// String returns the string representation +func (s Option) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Option) GoString() string { + return s.String() +} + +// A list of all available options +type OptionConfiguration struct { + _ struct{} `type:"structure"` + + // A list of DBSecurityGroupMemebrship name strings used for this option. + DBSecurityGroupMemberships []*string `locationNameList:"DBSecurityGroupName" type:"list"` + + // The configuration of options to include in a group. + OptionName *string `type:"string" required:"true"` + + // The option settings to include in an option group. + OptionSettings []*OptionSetting `locationNameList:"OptionSetting" type:"list"` + + // The optional port for the option. + Port *int64 `type:"integer"` + + // A list of VpcSecurityGroupMemebrship name strings used for this option. + VpcSecurityGroupMemberships []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s OptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionConfiguration) GoString() string { + return s.String() +} + +type OptionGroup struct { + _ struct{} `type:"structure"` + + // Indicates whether this option group can be applied to both VPC and non-VPC + // instances. The value true indicates the option group can be applied to both + // VPC and non-VPC instances. + AllowsVpcAndNonVpcInstanceMemberships *bool `type:"boolean"` + + // Indicates the name of the engine that this option group can be applied to. + EngineName *string `type:"string"` + + // Indicates the major engine version associated with this option group. + MajorEngineVersion *string `type:"string"` + + // Provides a description of the option group. + OptionGroupDescription *string `type:"string"` + + // Specifies the name of the option group. + OptionGroupName *string `type:"string"` + + // Indicates what options are available in the option group. + Options []*Option `locationNameList:"Option" type:"list"` + + // If AllowsVpcAndNonVpcInstanceMemberships is false, this field is blank. If + // AllowsVpcAndNonVpcInstanceMemberships is true and this field is blank, then + // this option group can be applied to both VPC and non-VPC instances. If this + // field contains a value, then this option group can only be applied to instances + // that are in the VPC indicated by this field. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s OptionGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionGroup) GoString() string { + return s.String() +} + +// Provides information on the option groups the DB instance is a member of. +type OptionGroupMembership struct { + _ struct{} `type:"structure"` + + // The name of the option group that the instance belongs to. + OptionGroupName *string `type:"string"` + + // The status of the DB instance's option group membership (e.g. in-sync, pending, + // pending-maintenance, applying). + Status *string `type:"string"` +} + +// String returns the string representation +func (s OptionGroupMembership) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionGroupMembership) GoString() string { + return s.String() +} + +// Available option. +type OptionGroupOption struct { + _ struct{} `type:"structure"` + + // If the option requires a port, specifies the default port for the option. + DefaultPort *int64 `type:"integer"` + + // The description of the option. + Description *string `type:"string"` + + // The name of the engine that this option can be applied to. + EngineName *string `type:"string"` + + // Indicates the major engine version that the option is available for. + MajorEngineVersion *string `type:"string"` + + // The minimum required engine version for the option to be applied. + MinimumRequiredMinorEngineVersion *string `type:"string"` + + // The name of the option. + Name *string `type:"string"` + + // Specifies the option settings that are available (and the default value) + // for each option in an option group. + OptionGroupOptionSettings []*OptionGroupOptionSetting `locationNameList:"OptionGroupOptionSetting" type:"list"` + + // List of all options that are prerequisites for this option. + OptionsDependedOn []*string `locationNameList:"OptionName" type:"list"` + + // A permanent option cannot be removed from the option group once the option + // group is used, and it cannot be removed from the db instance after assigning + // an option group with this permanent option. + Permanent *bool `type:"boolean"` + + // A persistent option cannot be removed from the option group once the option + // group is used, but this option can be removed from the db instance while + // modifying the related data and assigning another option group without this + // option. + Persistent *bool `type:"boolean"` + + // Specifies whether the option requires a port. + PortRequired *bool `type:"boolean"` +} + +// String returns the string representation +func (s OptionGroupOption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionGroupOption) GoString() string { + return s.String() +} + +// Option group option settings are used to display settings available for each +// option with their default values and other information. These values are +// used with the DescribeOptionGroupOptions action. +type OptionGroupOptionSetting struct { + _ struct{} `type:"structure"` + + // Indicates the acceptable values for the option group option. + AllowedValues *string `type:"string"` + + // The DB engine specific parameter type for the option group option. + ApplyType *string `type:"string"` + + // The default value for the option group option. + DefaultValue *string `type:"string"` + + // Boolean value where true indicates that this option group option can be changed + // from the default value. + IsModifiable *bool `type:"boolean"` + + // The description of the option group option. + SettingDescription *string `type:"string"` + + // The name of the option group option. + SettingName *string `type:"string"` +} + +// String returns the string representation +func (s OptionGroupOptionSetting) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionGroupOptionSetting) GoString() string { + return s.String() +} + +// Option settings are the actual settings being applied or configured for that +// option. It is used when you modify an option group or describe option groups. +// For example, the NATIVE_NETWORK_ENCRYPTION option has a setting called SQLNET.ENCRYPTION_SERVER +// that can have several different values. +type OptionSetting struct { + _ struct{} `type:"structure"` + + // The allowed values of the option setting. + AllowedValues *string `type:"string"` + + // The DB engine specific parameter type. + ApplyType *string `type:"string"` + + // The data type of the option setting. + DataType *string `type:"string"` + + // The default value of the option setting. + DefaultValue *string `type:"string"` + + // The description of the option setting. + Description *string `type:"string"` + + // Indicates if the option setting is part of a collection. + IsCollection *bool `type:"boolean"` + + // A Boolean value that, when true, indicates the option setting can be modified + // from the default. + IsModifiable *bool `type:"boolean"` + + // The name of the option that has settings that you can set. + Name *string `type:"string"` + + // The current value of the option setting. + Value *string `type:"string"` +} + +// String returns the string representation +func (s OptionSetting) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionSetting) GoString() string { + return s.String() +} + +// Contains a list of available options for a DB instance +// +// This data type is used as a response element in the DescribeOrderableDBInstanceOptions +// action. +type OrderableDBInstanceOption struct { + _ struct{} `type:"structure"` + + // A list of Availability Zones for the orderable DB instance. + AvailabilityZones []*AvailabilityZone `locationNameList:"AvailabilityZone" type:"list"` + + // The DB instance class for the orderable DB instance. + DBInstanceClass *string `type:"string"` + + // The engine type of the orderable DB instance. + Engine *string `type:"string"` + + // The engine version of the orderable DB instance. + EngineVersion *string `type:"string"` + + // The license model for the orderable DB instance. + LicenseModel *string `type:"string"` + + // Indicates whether this orderable DB instance is multi-AZ capable. + MultiAZCapable *bool `type:"boolean"` + + // Indicates whether this orderable DB instance can have a Read Replica. + ReadReplicaCapable *bool `type:"boolean"` + + // Indicates the storage type for this orderable DB instance. + StorageType *string `type:"string"` + + // Indicates whether the DB instance supports enhanced monitoring at intervals + // from 1 to 60 seconds. + SupportsEnhancedMonitoring *bool `type:"boolean"` + + // Indicates whether this orderable DB instance supports provisioned IOPS. + SupportsIops *bool `type:"boolean"` + + // Indicates whether this orderable DB instance supports encrypted storage. + SupportsStorageEncryption *bool `type:"boolean"` + + // Indicates whether this is a VPC orderable DB instance. + Vpc *bool `type:"boolean"` +} + +// String returns the string representation +func (s OrderableDBInstanceOption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OrderableDBInstanceOption) GoString() string { + return s.String() +} + +// This data type is used as a request parameter in the ModifyDBParameterGroup +// and ResetDBParameterGroup actions. +// +// This data type is used as a response element in the DescribeEngineDefaultParameters +// and DescribeDBParameters actions. +type Parameter struct { + _ struct{} `type:"structure"` + + // Specifies the valid range of values for the parameter. + AllowedValues *string `type:"string"` + + // Indicates when to apply parameter updates. + ApplyMethod *string `type:"string" enum:"ApplyMethod"` + + // Specifies the engine specific parameters type. + ApplyType *string `type:"string"` + + // Specifies the valid data type for the parameter. + DataType *string `type:"string"` + + // Provides a description of the parameter. + Description *string `type:"string"` + + // Indicates whether (true) or not (false) the parameter can be modified. Some + // parameters have security or operational implications that prevent them from + // being changed. + IsModifiable *bool `type:"boolean"` + + // The earliest engine version to which the parameter can apply. + MinimumEngineVersion *string `type:"string"` + + // Specifies the name of the parameter. + ParameterName *string `type:"string"` + + // Specifies the value of the parameter. + ParameterValue *string `type:"string"` + + // Indicates the source of the parameter value. + Source *string `type:"string"` +} + +// String returns the string representation +func (s Parameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Parameter) GoString() string { + return s.String() +} + +// Provides information about a pending maintenance action for a resource. +type PendingMaintenanceAction struct { + _ struct{} `type:"structure"` + + // The type of pending maintenance action that is available for the resource. + Action *string `type:"string"` + + // The date of the maintenance window when the action will be applied. The maintenance + // action will be applied to the resource during its first maintenance window + // after this date. If this date is specified, any next-maintenance opt-in requests + // are ignored. + AutoAppliedAfterDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The effective date when the pending maintenance action will be applied to + // the resource. This date takes into account opt-in requests received from + // the ApplyPendingMaintenanceAction API, the AutoAppliedAfterDate, and the + // ForcedApplyDate. This value is blank if an opt-in request has not been received + // and nothing has been specified as AutoAppliedAfterDate or ForcedApplyDate. + CurrentApplyDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A description providing more detail about the maintenance action. + Description *string `type:"string"` + + // The date when the maintenance action will be automatically applied. The maintenance + // action will be applied to the resource on this date regardless of the maintenance + // window for the resource. If this date is specified, any immediate opt-in + // requests are ignored. + ForcedApplyDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the type of opt-in request that has been received for the resource. + OptInStatus *string `type:"string"` +} + +// String returns the string representation +func (s PendingMaintenanceAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PendingMaintenanceAction) GoString() string { + return s.String() +} + +// This data type is used as a response element in the ModifyDBInstance action. +type PendingModifiedValues struct { + _ struct{} `type:"structure"` + + // Contains the new AllocatedStorage size for the DB instance that will be applied + // or is in progress. + AllocatedStorage *int64 `type:"integer"` + + // Specifies the pending number of days for which automated backups are retained. + BackupRetentionPeriod *int64 `type:"integer"` + + // Specifies the identifier of the CA certificate for the DB instance. + CACertificateIdentifier *string `type:"string"` + + // Contains the new DBInstanceClass for the DB instance that will be applied + // or is in progress. + DBInstanceClass *string `type:"string"` + + // Contains the new DBInstanceIdentifier for the DB instance that will be applied + // or is in progress. + DBInstanceIdentifier *string `type:"string"` + + // Indicates the database engine version. + EngineVersion *string `type:"string"` + + // Specifies the new Provisioned IOPS value for the DB instance that will be + // applied or is being applied. + Iops *int64 `type:"integer"` + + // Contains the pending or in-progress change of the master credentials for + // the DB instance. + MasterUserPassword *string `type:"string"` + + // Indicates that the Single-AZ DB instance is to change to a Multi-AZ deployment. + MultiAZ *bool `type:"boolean"` + + // Specifies the pending port for the DB instance. + Port *int64 `type:"integer"` + + // Specifies the storage type to be associated with the DB instance. + StorageType *string `type:"string"` +} + +// String returns the string representation +func (s PendingModifiedValues) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PendingModifiedValues) GoString() string { + return s.String() +} + +type PromoteReadReplicaInput struct { + _ struct{} `type:"structure"` + + // The number of days to retain automated backups. Setting this parameter to + // a positive number enables backups. Setting this parameter to 0 disables automated + // backups. + // + // Default: 1 + // + // Constraints: + // + // Must be a value from 0 to 8 + BackupRetentionPeriod *int64 `type:"integer"` + + // The DB instance identifier. This value is stored as a lowercase string. + // + // Constraints: + // + // Must be the identifier for an existing Read Replica DB instance Must contain + // from 1 to 63 alphanumeric characters or hyphens First character must be a + // letter Cannot end with a hyphen or contain two consecutive hyphens Example: + // mydbinstance + DBInstanceIdentifier *string `type:"string" required:"true"` + + // The daily time range during which automated backups are created if automated + // backups are enabled, using the BackupRetentionPeriod parameter. + // + // Default: A 30-minute window selected at random from an 8-hour block of + // time per region. To see the time blocks available, see Adjusting the Preferred + // Maintenance Window (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html) + // in the Amazon RDS User Guide. + // + // Constraints: + // + // Must be in the format hh24:mi-hh24:mi. Times should be in Universal Coordinated + // Time (UTC). Must not conflict with the preferred maintenance window. Must + // be at least 30 minutes. + PreferredBackupWindow *string `type:"string"` +} + +// String returns the string representation +func (s PromoteReadReplicaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PromoteReadReplicaInput) GoString() string { + return s.String() +} + +type PromoteReadReplicaOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBInstance DeleteDBInstance ModifyDBInstance This data type + // is used as a response element in the DescribeDBInstances action. + DBInstance *DBInstance `type:"structure"` +} + +// String returns the string representation +func (s PromoteReadReplicaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PromoteReadReplicaOutput) GoString() string { + return s.String() +} + +type PurchaseReservedDBInstancesOfferingInput struct { + _ struct{} `type:"structure"` + + // The number of instances to reserve. + // + // Default: 1 + DBInstanceCount *int64 `type:"integer"` + + // Customer-specified identifier to track this reservation. + // + // Example: myreservationID + ReservedDBInstanceId *string `type:"string"` + + // The ID of the Reserved DB instance offering to purchase. + // + // Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706 + ReservedDBInstancesOfferingId *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s PurchaseReservedDBInstancesOfferingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseReservedDBInstancesOfferingInput) GoString() string { + return s.String() +} + +type PurchaseReservedDBInstancesOfferingOutput struct { + _ struct{} `type:"structure"` + + // This data type is used as a response element in the DescribeReservedDBInstances + // and PurchaseReservedDBInstancesOffering actions. + ReservedDBInstance *ReservedDBInstance `type:"structure"` +} + +// String returns the string representation +func (s PurchaseReservedDBInstancesOfferingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseReservedDBInstancesOfferingOutput) GoString() string { + return s.String() +} + +type RebootDBInstanceInput struct { + _ struct{} `type:"structure"` + + // The DB instance identifier. This parameter is stored as a lowercase string. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + DBInstanceIdentifier *string `type:"string" required:"true"` + + // When true, the reboot will be conducted through a MultiAZ failover. + // + // Constraint: You cannot specify true if the instance is not configured for + // MultiAZ. + ForceFailover *bool `type:"boolean"` +} + +// String returns the string representation +func (s RebootDBInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootDBInstanceInput) GoString() string { + return s.String() +} + +type RebootDBInstanceOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBInstance DeleteDBInstance ModifyDBInstance This data type + // is used as a response element in the DescribeDBInstances action. + DBInstance *DBInstance `type:"structure"` +} + +// String returns the string representation +func (s RebootDBInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootDBInstanceOutput) GoString() string { + return s.String() +} + +// This data type is used as a response element in the DescribeReservedDBInstances +// and DescribeReservedDBInstancesOfferings actions. +type RecurringCharge struct { + _ struct{} `type:"structure"` + + // The amount of the recurring charge. + RecurringChargeAmount *float64 `type:"double"` + + // The frequency of the recurring charge. + RecurringChargeFrequency *string `type:"string"` +} + +// String returns the string representation +func (s RecurringCharge) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecurringCharge) GoString() string { + return s.String() +} + +type RemoveSourceIdentifierFromSubscriptionInput struct { + _ struct{} `type:"structure"` + + // The source identifier to be removed from the subscription, such as the DB + // instance identifier for a DB instance or the name of a security group. + SourceIdentifier *string `type:"string" required:"true"` + + // The name of the RDS event notification subscription you want to remove a + // source identifier from. + SubscriptionName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RemoveSourceIdentifierFromSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveSourceIdentifierFromSubscriptionInput) GoString() string { + return s.String() +} + +type RemoveSourceIdentifierFromSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // Contains the results of a successful invocation of the DescribeEventSubscriptions + // action. + EventSubscription *EventSubscription `type:"structure"` +} + +// String returns the string representation +func (s RemoveSourceIdentifierFromSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveSourceIdentifierFromSubscriptionOutput) GoString() string { + return s.String() +} + +type RemoveTagsFromResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon RDS resource the tags will be removed from. This value is an Amazon + // Resource Name (ARN). For information about creating an ARN, see Constructing + // an RDS Amazon Resource Name (ARN) (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN). + ResourceName *string `type:"string" required:"true"` + + // The tag key (name) of the tag to be removed. + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsFromResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromResourceInput) GoString() string { + return s.String() +} + +type RemoveTagsFromResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsFromResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromResourceOutput) GoString() string { + return s.String() +} + +// This data type is used as a response element in the DescribeReservedDBInstances +// and PurchaseReservedDBInstancesOffering actions. +type ReservedDBInstance struct { + _ struct{} `type:"structure"` + + // The currency code for the reserved DB instance. + CurrencyCode *string `type:"string"` + + // The DB instance class for the reserved DB instance. + DBInstanceClass *string `type:"string"` + + // The number of reserved DB instances. + DBInstanceCount *int64 `type:"integer"` + + // The duration of the reservation in seconds. + Duration *int64 `type:"integer"` + + // The fixed price charged for this reserved DB instance. + FixedPrice *float64 `type:"double"` + + // Indicates if the reservation applies to Multi-AZ deployments. + MultiAZ *bool `type:"boolean"` + + // The offering type of this reserved DB instance. + OfferingType *string `type:"string"` + + // The description of the reserved DB instance. + ProductDescription *string `type:"string"` + + // The recurring price charged to run this reserved DB instance. + RecurringCharges []*RecurringCharge `locationNameList:"RecurringCharge" type:"list"` + + // The unique identifier for the reservation. + ReservedDBInstanceId *string `type:"string"` + + // The offering identifier. + ReservedDBInstancesOfferingId *string `type:"string"` + + // The time the reservation started. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The state of the reserved DB instance. + State *string `type:"string"` + + // The hourly price charged for this reserved DB instance. + UsagePrice *float64 `type:"double"` +} + +// String returns the string representation +func (s ReservedDBInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedDBInstance) GoString() string { + return s.String() +} + +// This data type is used as a response element in the DescribeReservedDBInstancesOfferings +// action. +type ReservedDBInstancesOffering struct { + _ struct{} `type:"structure"` + + // The currency code for the reserved DB instance offering. + CurrencyCode *string `type:"string"` + + // The DB instance class for the reserved DB instance. + DBInstanceClass *string `type:"string"` + + // The duration of the offering in seconds. + Duration *int64 `type:"integer"` + + // The fixed price charged for this offering. + FixedPrice *float64 `type:"double"` + + // Indicates if the offering applies to Multi-AZ deployments. + MultiAZ *bool `type:"boolean"` + + // The offering type. + OfferingType *string `type:"string"` + + // The database engine used by the offering. + ProductDescription *string `type:"string"` + + // The recurring price charged to run this reserved DB instance. + RecurringCharges []*RecurringCharge `locationNameList:"RecurringCharge" type:"list"` + + // The offering identifier. + ReservedDBInstancesOfferingId *string `type:"string"` + + // The hourly price charged for this offering. + UsagePrice *float64 `type:"double"` +} + +// String returns the string representation +func (s ReservedDBInstancesOffering) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedDBInstancesOffering) GoString() string { + return s.String() +} + +type ResetDBClusterParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the DB cluster parameter group to reset. + DBClusterParameterGroupName *string `type:"string" required:"true"` + + // A list of parameter names in the DB cluster parameter group to reset to the + // default values. You cannot use this parameter if the ResetAllParameters parameter + // is set to true. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` + + // A value that is set to true to reset all parameters in the DB cluster parameter + // group to their default values, and false otherwise. You cannot use this parameter + // if there is a list of parameter names specified for the Parameters parameter. + ResetAllParameters *bool `type:"boolean"` +} + +// String returns the string representation +func (s ResetDBClusterParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetDBClusterParameterGroupInput) GoString() string { + return s.String() +} + +type ResetDBParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the DB parameter group. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + DBParameterGroupName *string `type:"string" required:"true"` + + // An array of parameter names, values, and the apply method for the parameter + // update. At least one parameter name, value, and apply method must be supplied; + // subsequent arguments are optional. A maximum of 20 parameters can be modified + // in a single request. + // + // MySQL + // + // Valid Values (for Apply method): immediate | pending-reboot + // + // You can use the immediate value with dynamic parameters only. You can use + // the pending-reboot value for both dynamic and static parameters, and changes + // are applied when DB instance reboots. + // + // MariaDB + // + // Valid Values (for Apply method): immediate | pending-reboot + // + // You can use the immediate value with dynamic parameters only. You can use + // the pending-reboot value for both dynamic and static parameters, and changes + // are applied when DB instance reboots. + // + // Oracle + // + // Valid Values (for Apply method): pending-reboot + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` + + // Specifies whether (true) or not (false) to reset all parameters in the DB + // parameter group to default values. + // + // Default: true + ResetAllParameters *bool `type:"boolean"` +} + +// String returns the string representation +func (s ResetDBParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetDBParameterGroupInput) GoString() string { + return s.String() +} + +// Describes the pending maintenance actions for a resource. +type ResourcePendingMaintenanceActions struct { + _ struct{} `type:"structure"` + + // A list that provides details about the pending maintenance actions for the + // resource. + PendingMaintenanceActionDetails []*PendingMaintenanceAction `locationNameList:"PendingMaintenanceAction" type:"list"` + + // The ARN of the resource that has pending maintenance actions. + ResourceIdentifier *string `type:"string"` +} + +// String returns the string representation +func (s ResourcePendingMaintenanceActions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourcePendingMaintenanceActions) GoString() string { + return s.String() +} + +type RestoreDBClusterFromSnapshotInput struct { + _ struct{} `type:"structure"` + + // Provides the list of EC2 Availability Zones that instances in the restored + // DB cluster can be created in. + AvailabilityZones []*string `locationNameList:"AvailabilityZone" type:"list"` + + // The name of the DB cluster to create from the DB cluster snapshot. This parameter + // isn't case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 255 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + // Example: my-snapshot-id + DBClusterIdentifier *string `type:"string" required:"true"` + + // The name of the DB subnet group to use for the new DB cluster. + DBSubnetGroupName *string `type:"string"` + + // The database name for the restored DB cluster. + DatabaseName *string `type:"string"` + + // The database engine to use for the new DB cluster. + // + // Default: The same as source + // + // Constraint: Must be compatible with the engine of the source + Engine *string `type:"string" required:"true"` + + // The version of the database engine to use for the new DB cluster. + EngineVersion *string `type:"string"` + + // The KMS key identifier to use when restoring an encrypted DB cluster from + // an encrypted DB cluster snapshot. + // + // The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption + // key. If you are restoring a DB cluster with the same AWS account that owns + // the KMS encryption key used to encrypt the new DB cluster, then you can use + // the KMS key alias instead of the ARN for the KMS encryption key. + // + // If you do not specify a value for the KmsKeyId parameter, then the following + // will occur: + // + // If the DB cluster snapshot is encrypted, then the restored DB cluster is + // encrypted using the KMS key that was used to encrypt the DB cluster snapshot. + // If the DB cluster snapshot is not encrypted, then the restored DB cluster + // is not encrypted. If SnapshotIdentifier refers to a DB cluster snapshot + // that is not encrypted, and you specify a value for the KmsKeyId parameter, + // then the restore request is rejected. + KmsKeyId *string `type:"string"` + + // The name of the option group to use for the restored DB cluster. + OptionGroupName *string `type:"string"` + + // The port number on which the new DB cluster accepts connections. + // + // Constraints: Value must be 1150-65535 + // + // Default: The same port as the original DB cluster. + Port *int64 `type:"integer"` + + // The identifier for the DB cluster snapshot to restore from. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + SnapshotIdentifier *string `type:"string" required:"true"` + + // The tags to be assigned to the restored DB cluster. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // A list of VPC security groups that the new DB cluster will belong to. + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s RestoreDBClusterFromSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreDBClusterFromSnapshotInput) GoString() string { + return s.String() +} + +type RestoreDBClusterFromSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBCluster DeleteDBCluster FailoverDBCluster ModifyDBCluster + // RestoreDBClusterFromSnapshot This data type is used as a response element + // in the DescribeDBClusters action. + DBCluster *DBCluster `type:"structure"` +} + +// String returns the string representation +func (s RestoreDBClusterFromSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreDBClusterFromSnapshotOutput) GoString() string { + return s.String() +} + +type RestoreDBClusterToPointInTimeInput struct { + _ struct{} `type:"structure"` + + // The name of the new DB cluster to be created. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + DBClusterIdentifier *string `type:"string" required:"true"` + + // The DB subnet group name to use for the new DB cluster. + DBSubnetGroupName *string `type:"string"` + + // The KMS key identifier to use when restoring an encrypted DB cluster from + // an encrypted DB cluster. + // + // The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption + // key. If you are restoring a DB cluster with the same AWS account that owns + // the KMS encryption key used to encrypt the new DB cluster, then you can use + // the KMS key alias instead of the ARN for the KMS encryption key. + // + // You can restore to a new DB cluster and encrypt the new DB cluster with + // a KMS key that is different than the KMS key used to encrypt the source DB + // cluster. The new DB cluster will be encrypted with the KMS key identified + // by the KmsKeyId parameter. + // + // If you do not specify a value for the KmsKeyId parameter, then the following + // will occur: + // + // If the DB cluster is encrypted, then the restored DB cluster is encrypted + // using the KMS key that was used to encrypt the source DB cluster. If the + // DB cluster is not encrypted, then the restored DB cluster is not encrypted. + // If DBClusterIdentifier refers to a DB cluster that is note encrypted, then + // the restore request is rejected. + KmsKeyId *string `type:"string"` + + // The name of the option group for the new DB cluster. + OptionGroupName *string `type:"string"` + + // The port number on which the new DB cluster accepts connections. + // + // Constraints: Value must be 1150-65535 + // + // Default: The same port as the original DB cluster. + Port *int64 `type:"integer"` + + // The date and time to restore the DB cluster to. + // + // Valid Values: Value must be a time in Universal Coordinated Time (UTC) format + // + // Constraints: + // + // Must be before the latest restorable time for the DB instance Cannot be + // specified if UseLatestRestorableTime parameter is true Example: 2015-03-07T23:45:00Z + RestoreToTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The identifier of the source DB cluster from which to restore. + // + // Constraints: + // + // Must be the identifier of an existing database instance Must contain from + // 1 to 63 alphanumeric characters or hyphens First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + SourceDBClusterIdentifier *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // A value that is set to true to restore the DB cluster to the latest restorable + // backup time, and false otherwise. + // + // Default: false + // + // Constraints: Cannot be specified if RestoreToTime parameter is provided. + UseLatestRestorableTime *bool `type:"boolean"` + + // A lst of VPC security groups that the new DB cluster belongs to. + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s RestoreDBClusterToPointInTimeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreDBClusterToPointInTimeInput) GoString() string { + return s.String() +} + +type RestoreDBClusterToPointInTimeOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBCluster DeleteDBCluster FailoverDBCluster ModifyDBCluster + // RestoreDBClusterFromSnapshot This data type is used as a response element + // in the DescribeDBClusters action. + DBCluster *DBCluster `type:"structure"` +} + +// String returns the string representation +func (s RestoreDBClusterToPointInTimeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreDBClusterToPointInTimeOutput) GoString() string { + return s.String() +} + +type RestoreDBInstanceFromDBSnapshotInput struct { + _ struct{} `type:"structure"` + + // Indicates that minor version upgrades will be applied automatically to the + // DB instance during the maintenance window. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The EC2 Availability Zone that the database instance will be created in. + // + // Default: A random, system-chosen Availability Zone. + // + // Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ + // parameter is set to true. + // + // Example: us-east-1a + AvailabilityZone *string `type:"string"` + + // True to copy all tags from the restored DB instance to snapshots of the DB + // instance; otherwise false. The default is false. + CopyTagsToSnapshot *bool `type:"boolean"` + + // The compute and memory capacity of the Amazon RDS DB instance. + // + // Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge + // | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge + // | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge | db.m4.4xlarge + // | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge + // | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium | db.t2.large + DBInstanceClass *string `type:"string"` + + // Name of the DB instance to create from the DB snapshot. This parameter isn't + // case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 for + // SQL Server) First character must be a letter Cannot end with a hyphen or + // contain two consecutive hyphens Example: my-snapshot-id + DBInstanceIdentifier *string `type:"string" required:"true"` + + // The database name for the restored DB instance. + // + // This parameter doesn't apply to the MySQL or MariaDB engines. + DBName *string `type:"string"` + + // The identifier for the DB snapshot to restore from. + // + // Constraints: + // + // Must contain from 1 to 255 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + // If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier + // must be the ARN of the shared DB snapshot. + DBSnapshotIdentifier *string `type:"string" required:"true"` + + // The DB subnet group name to use for the new instance. + DBSubnetGroupName *string `type:"string"` + + // The database engine to use for the new instance. + // + // Default: The same as source + // + // Constraint: Must be compatible with the engine of the source + // + // Valid Values: MySQL | mariadb | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee + // | sqlserver-se | sqlserver-ex | sqlserver-web | postgres | aurora + Engine *string `type:"string"` + + // Specifies the amount of provisioned IOPS for the DB instance, expressed in + // I/O operations per second. If this parameter is not specified, the IOPS value + // will be taken from the backup. If this parameter is set to 0, the new instance + // will be converted to a non-PIOPS instance, which will take additional time, + // though your DB instance will be available for connections before the conversion + // starts. + // + // Constraints: Must be an integer greater than 1000. + // + // SQL Server + // + // Setting the IOPS value for the SQL Server database engine is not supported. + Iops *int64 `type:"integer"` + + // License model information for the restored DB instance. + // + // Default: Same as source. + // + // Valid values: license-included | bring-your-own-license | general-public-license + LicenseModel *string `type:"string"` + + // Specifies if the DB instance is a Multi-AZ deployment. + // + // Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ + // parameter is set to true. + MultiAZ *bool `type:"boolean"` + + // The name of the option group to be used for the restored DB instance. + // + // Permanent options, such as the TDE option for Oracle Advanced Security TDE, + // cannot be removed from an option group, and that option group cannot be removed + // from a DB instance once it is associated with a DB instance + OptionGroupName *string `type:"string"` + + // The port number on which the database accepts connections. + // + // Default: The same port as the original DB instance + // + // Constraints: Value must be 1150-65535 + Port *int64 `type:"integer"` + + // Specifies the accessibility options for the DB instance. A value of true + // specifies an Internet-facing instance with a publicly resolvable DNS name, + // which resolves to a public IP address. A value of false specifies an internal + // instance with a DNS name that resolves to a private IP address. + // + // Default: The default behavior varies depending on whether a VPC has been + // requested or not. The following list shows the default behavior in each case. + // + // Default VPC: true VPC: false If no DB subnet group has been specified + // as part of the request and the PubliclyAccessible value has not been set, + // the DB instance will be publicly accessible. If a specific DB subnet group + // has been specified as part of the request and the PubliclyAccessible value + // has not been set, the DB instance will be private. + PubliclyAccessible *bool `type:"boolean"` + + // Specifies the storage type to be associated with the DB instance. + // + // Valid values: standard | gp2 | io1 + // + // If you specify io1, you must also include a value for the Iops parameter. + // + // Default: io1 if the Iops parameter is specified; otherwise standard + StorageType *string `type:"string"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The ARN from the Key Store with which to associate the instance for TDE encryption. + TdeCredentialArn *string `type:"string"` + + // The password for the given ARN from the Key Store in order to access the + // device. + TdeCredentialPassword *string `type:"string"` +} + +// String returns the string representation +func (s RestoreDBInstanceFromDBSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreDBInstanceFromDBSnapshotInput) GoString() string { + return s.String() +} + +type RestoreDBInstanceFromDBSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBInstance DeleteDBInstance ModifyDBInstance This data type + // is used as a response element in the DescribeDBInstances action. + DBInstance *DBInstance `type:"structure"` +} + +// String returns the string representation +func (s RestoreDBInstanceFromDBSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreDBInstanceFromDBSnapshotOutput) GoString() string { + return s.String() +} + +type RestoreDBInstanceToPointInTimeInput struct { + _ struct{} `type:"structure"` + + // Indicates that minor version upgrades will be applied automatically to the + // DB instance during the maintenance window. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The EC2 Availability Zone that the database instance will be created in. + // + // Default: A random, system-chosen Availability Zone. + // + // Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ + // parameter is set to true. + // + // Example: us-east-1a + AvailabilityZone *string `type:"string"` + + // True to copy all tags from the restored DB instance to snapshots of the DB + // instance; otherwise false. The default is false. + CopyTagsToSnapshot *bool `type:"boolean"` + + // The compute and memory capacity of the Amazon RDS DB instance. + // + // Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge + // | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge + // | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge | db.m4.4xlarge + // | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge + // | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium | db.t2.large + // + // Default: The same DBInstanceClass as the original DB instance. + DBInstanceClass *string `type:"string"` + + // The database name for the restored DB instance. + // + // This parameter is not used for the MySQL or MariaDB engines. + DBName *string `type:"string"` + + // The DB subnet group name to use for the new instance. + DBSubnetGroupName *string `type:"string"` + + // The database engine to use for the new instance. + // + // Default: The same as source + // + // Constraint: Must be compatible with the engine of the source + // + // Valid Values: MySQL | mariadb | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee + // | sqlserver-se | sqlserver-ex | sqlserver-web | postgres| aurora + Engine *string `type:"string"` + + // The amount of Provisioned IOPS (input/output operations per second) to be + // initially allocated for the DB instance. + // + // Constraints: Must be an integer greater than 1000. + // + // SQL Server + // + // Setting the IOPS value for the SQL Server database engine is not supported. + Iops *int64 `type:"integer"` + + // License model information for the restored DB instance. + // + // Default: Same as source. + // + // Valid values: license-included | bring-your-own-license | general-public-license + LicenseModel *string `type:"string"` + + // Specifies if the DB instance is a Multi-AZ deployment. + // + // Constraint: You cannot specify the AvailabilityZone parameter if the MultiAZ + // parameter is set to true. + MultiAZ *bool `type:"boolean"` + + // The name of the option group to be used for the restored DB instance. + // + // Permanent options, such as the TDE option for Oracle Advanced Security TDE, + // cannot be removed from an option group, and that option group cannot be removed + // from a DB instance once it is associated with a DB instance + OptionGroupName *string `type:"string"` + + // The port number on which the database accepts connections. + // + // Constraints: Value must be 1150-65535 + // + // Default: The same port as the original DB instance. + Port *int64 `type:"integer"` + + // Specifies the accessibility options for the DB instance. A value of true + // specifies an Internet-facing instance with a publicly resolvable DNS name, + // which resolves to a public IP address. A value of false specifies an internal + // instance with a DNS name that resolves to a private IP address. + // + // Default: The default behavior varies depending on whether a VPC has been + // requested or not. The following list shows the default behavior in each case. + // + // Default VPC:true VPC:false If no DB subnet group has been specified + // as part of the request and the PubliclyAccessible value has not been set, + // the DB instance will be publicly accessible. If a specific DB subnet group + // has been specified as part of the request and the PubliclyAccessible value + // has not been set, the DB instance will be private. + PubliclyAccessible *bool `type:"boolean"` + + // The date and time to restore from. + // + // Valid Values: Value must be a time in Universal Coordinated Time (UTC) format + // + // Constraints: + // + // Must be before the latest restorable time for the DB instance Cannot be + // specified if UseLatestRestorableTime parameter is true Example: 2009-09-07T23:45:00Z + RestoreTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The identifier of the source DB instance from which to restore. + // + // Constraints: + // + // Must be the identifier of an existing database instance Must contain from + // 1 to 63 alphanumeric characters or hyphens First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + SourceDBInstanceIdentifier *string `type:"string" required:"true"` + + // Specifies the storage type to be associated with the DB instance. + // + // Valid values: standard | gp2 | io1 + // + // If you specify io1, you must also include a value for the Iops parameter. + // + // Default: io1 if the Iops parameter is specified; otherwise standard + StorageType *string `type:"string"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The name of the new database instance to be created. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens First character + // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + TargetDBInstanceIdentifier *string `type:"string" required:"true"` + + // The ARN from the Key Store with which to associate the instance for TDE encryption. + TdeCredentialArn *string `type:"string"` + + // The password for the given ARN from the Key Store in order to access the + // device. + TdeCredentialPassword *string `type:"string"` + + // Specifies whether (true) or not (false) the DB instance is restored from + // the latest backup time. + // + // Default: false + // + // Constraints: Cannot be specified if RestoreTime parameter is provided. + UseLatestRestorableTime *bool `type:"boolean"` +} + +// String returns the string representation +func (s RestoreDBInstanceToPointInTimeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreDBInstanceToPointInTimeInput) GoString() string { + return s.String() +} + +type RestoreDBInstanceToPointInTimeOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBInstance DeleteDBInstance ModifyDBInstance This data type + // is used as a response element in the DescribeDBInstances action. + DBInstance *DBInstance `type:"structure"` +} + +// String returns the string representation +func (s RestoreDBInstanceToPointInTimeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreDBInstanceToPointInTimeOutput) GoString() string { + return s.String() +} + +type RevokeDBSecurityGroupIngressInput struct { + _ struct{} `type:"structure"` + + // The IP range to revoke access from. Must be a valid CIDR range. If CIDRIP + // is specified, EC2SecurityGroupName, EC2SecurityGroupId and EC2SecurityGroupOwnerId + // cannot be provided. + CIDRIP *string `type:"string"` + + // The name of the DB security group to revoke ingress from. + DBSecurityGroupName *string `type:"string" required:"true"` + + // The id of the EC2 security group to revoke access from. For VPC DB security + // groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId + // and either EC2SecurityGroupName or EC2SecurityGroupId must be provided. + EC2SecurityGroupId *string `type:"string"` + + // The name of the EC2 security group to revoke access from. For VPC DB security + // groups, EC2SecurityGroupId must be provided. Otherwise, EC2SecurityGroupOwnerId + // and either EC2SecurityGroupName or EC2SecurityGroupId must be provided. + EC2SecurityGroupName *string `type:"string"` + + // The AWS Account Number of the owner of the EC2 security group specified in + // the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable + // value. For VPC DB security groups, EC2SecurityGroupId must be provided. Otherwise, + // EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId + // must be provided. + EC2SecurityGroupOwnerId *string `type:"string"` +} + +// String returns the string representation +func (s RevokeDBSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeDBSecurityGroupIngressInput) GoString() string { + return s.String() +} + +type RevokeDBSecurityGroupIngressOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // DescribeDBSecurityGroups AuthorizeDBSecurityGroupIngress CreateDBSecurityGroup + // RevokeDBSecurityGroupIngress This data type is used as a response element + // in the DescribeDBSecurityGroups action. + DBSecurityGroup *DBSecurityGroup `type:"structure"` +} + +// String returns the string representation +func (s RevokeDBSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeDBSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +// This data type is used as a response element in the DescribeDBSubnetGroups +// action. +type Subnet struct { + _ struct{} `type:"structure"` + + // Contains Availability Zone information. + // + // This data type is used as an element in the following data type: OrderableDBInstanceOption + SubnetAvailabilityZone *AvailabilityZone `type:"structure"` + + // Specifies the identifier of the subnet. + SubnetIdentifier *string `type:"string"` + + // Specifies the status of the subnet. + SubnetStatus *string `type:"string"` +} + +// String returns the string representation +func (s Subnet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Subnet) GoString() string { + return s.String() +} + +// Metadata assigned to an Amazon RDS resource consisting of a key-value pair. +type Tag struct { + _ struct{} `type:"structure"` + + // A key is the required name of the tag. The string value can be from 1 to + // 128 Unicode characters in length and cannot be prefixed with "aws:" or "rds:". + // The string can only contain only the set of Unicode letters, digits, white-space, + // '_', '.', '/', '=', '+', '-' (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$"). + Key *string `type:"string"` + + // A value is the optional value of the tag. The string value can be from 1 + // to 256 Unicode characters in length and cannot be prefixed with "aws:" or + // "rds:". The string can only contain only the set of Unicode letters, digits, + // white-space, '_', '.', '/', '=', '+', '-' (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$"). + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// The version of the database engine that a DB instance can be upgraded to. +type UpgradeTarget struct { + _ struct{} `type:"structure"` + + // A value that indicates whether the target version will be applied to any + // source DB instances that have AutoMinorVersionUpgrade set to true. + AutoUpgrade *bool `type:"boolean"` + + // The version of the database engine that a DB instance can be upgraded to. + Description *string `type:"string"` + + // The name of the upgrade target database engine. + Engine *string `type:"string"` + + // The version number of the upgrade target database engine. + EngineVersion *string `type:"string"` + + // A value that indicates whether a database engine will be upgraded to a major + // version. + IsMajorVersionUpgrade *bool `type:"boolean"` +} + +// String returns the string representation +func (s UpgradeTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpgradeTarget) GoString() string { + return s.String() +} + +// This data type is used as a response element for queries on VPC security +// group membership. +type VpcSecurityGroupMembership struct { + _ struct{} `type:"structure"` + + // The status of the VPC security group. + Status *string `type:"string"` + + // The name of the VPC security group. + VpcSecurityGroupId *string `type:"string"` +} + +// String returns the string representation +func (s VpcSecurityGroupMembership) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcSecurityGroupMembership) GoString() string { + return s.String() +} + +const ( + // @enum ApplyMethod + ApplyMethodImmediate = "immediate" + // @enum ApplyMethod + ApplyMethodPendingReboot = "pending-reboot" +) + +const ( + // @enum SourceType + SourceTypeDbInstance = "db-instance" + // @enum SourceType + SourceTypeDbParameterGroup = "db-parameter-group" + // @enum SourceType + SourceTypeDbSecurityGroup = "db-security-group" + // @enum SourceType + SourceTypeDbSnapshot = "db-snapshot" + // @enum SourceType + SourceTypeDbCluster = "db-cluster" + // @enum SourceType + SourceTypeDbClusterSnapshot = "db-cluster-snapshot" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/rds/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/rds/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/rds/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/rds/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2340 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package rds_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/rds" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleRDS_AddSourceIdentifierToSubscription() { + svc := rds.New(session.New()) + + params := &rds.AddSourceIdentifierToSubscriptionInput{ + SourceIdentifier: aws.String("String"), // Required + SubscriptionName: aws.String("String"), // Required + } + resp, err := svc.AddSourceIdentifierToSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_AddTagsToResource() { + svc := rds.New(session.New()) + + params := &rds.AddTagsToResourceInput{ + ResourceName: aws.String("String"), // Required + Tags: []*rds.Tag{ // Required + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.AddTagsToResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ApplyPendingMaintenanceAction() { + svc := rds.New(session.New()) + + params := &rds.ApplyPendingMaintenanceActionInput{ + ApplyAction: aws.String("String"), // Required + OptInType: aws.String("String"), // Required + ResourceIdentifier: aws.String("String"), // Required + } + resp, err := svc.ApplyPendingMaintenanceAction(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_AuthorizeDBSecurityGroupIngress() { + svc := rds.New(session.New()) + + params := &rds.AuthorizeDBSecurityGroupIngressInput{ + DBSecurityGroupName: aws.String("String"), // Required + CIDRIP: aws.String("String"), + EC2SecurityGroupId: aws.String("String"), + EC2SecurityGroupName: aws.String("String"), + EC2SecurityGroupOwnerId: aws.String("String"), + } + resp, err := svc.AuthorizeDBSecurityGroupIngress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CopyDBClusterSnapshot() { + svc := rds.New(session.New()) + + params := &rds.CopyDBClusterSnapshotInput{ + SourceDBClusterSnapshotIdentifier: aws.String("String"), // Required + TargetDBClusterSnapshotIdentifier: aws.String("String"), // Required + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CopyDBClusterSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CopyDBParameterGroup() { + svc := rds.New(session.New()) + + params := &rds.CopyDBParameterGroupInput{ + SourceDBParameterGroupIdentifier: aws.String("String"), // Required + TargetDBParameterGroupDescription: aws.String("String"), // Required + TargetDBParameterGroupIdentifier: aws.String("String"), // Required + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CopyDBParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CopyDBSnapshot() { + svc := rds.New(session.New()) + + params := &rds.CopyDBSnapshotInput{ + SourceDBSnapshotIdentifier: aws.String("String"), // Required + TargetDBSnapshotIdentifier: aws.String("String"), // Required + CopyTags: aws.Bool(true), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CopyDBSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CopyOptionGroup() { + svc := rds.New(session.New()) + + params := &rds.CopyOptionGroupInput{ + SourceOptionGroupIdentifier: aws.String("String"), // Required + TargetOptionGroupDescription: aws.String("String"), // Required + TargetOptionGroupIdentifier: aws.String("String"), // Required + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CopyOptionGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateDBCluster() { + svc := rds.New(session.New()) + + params := &rds.CreateDBClusterInput{ + DBClusterIdentifier: aws.String("String"), // Required + Engine: aws.String("String"), // Required + MasterUserPassword: aws.String("String"), // Required + MasterUsername: aws.String("String"), // Required + AvailabilityZones: []*string{ + aws.String("String"), // Required + // More values... + }, + BackupRetentionPeriod: aws.Int64(1), + CharacterSetName: aws.String("String"), + DBClusterParameterGroupName: aws.String("String"), + DBSubnetGroupName: aws.String("String"), + DatabaseName: aws.String("String"), + EngineVersion: aws.String("String"), + KmsKeyId: aws.String("String"), + OptionGroupName: aws.String("String"), + Port: aws.Int64(1), + PreferredBackupWindow: aws.String("String"), + PreferredMaintenanceWindow: aws.String("String"), + StorageEncrypted: aws.Bool(true), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateDBCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateDBClusterParameterGroup() { + svc := rds.New(session.New()) + + params := &rds.CreateDBClusterParameterGroupInput{ + DBClusterParameterGroupName: aws.String("String"), // Required + DBParameterGroupFamily: aws.String("String"), // Required + Description: aws.String("String"), // Required + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateDBClusterParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateDBClusterSnapshot() { + svc := rds.New(session.New()) + + params := &rds.CreateDBClusterSnapshotInput{ + DBClusterIdentifier: aws.String("String"), // Required + DBClusterSnapshotIdentifier: aws.String("String"), // Required + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateDBClusterSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateDBInstance() { + svc := rds.New(session.New()) + + params := &rds.CreateDBInstanceInput{ + DBInstanceClass: aws.String("String"), // Required + DBInstanceIdentifier: aws.String("String"), // Required + Engine: aws.String("String"), // Required + AllocatedStorage: aws.Int64(1), + AutoMinorVersionUpgrade: aws.Bool(true), + AvailabilityZone: aws.String("String"), + BackupRetentionPeriod: aws.Int64(1), + CharacterSetName: aws.String("String"), + CopyTagsToSnapshot: aws.Bool(true), + DBClusterIdentifier: aws.String("String"), + DBName: aws.String("String"), + DBParameterGroupName: aws.String("String"), + DBSecurityGroups: []*string{ + aws.String("String"), // Required + // More values... + }, + DBSubnetGroupName: aws.String("String"), + EngineVersion: aws.String("String"), + Iops: aws.Int64(1), + KmsKeyId: aws.String("String"), + LicenseModel: aws.String("String"), + MasterUserPassword: aws.String("String"), + MasterUsername: aws.String("String"), + MonitoringInterval: aws.Int64(1), + MonitoringRoleArn: aws.String("String"), + MultiAZ: aws.Bool(true), + OptionGroupName: aws.String("String"), + Port: aws.Int64(1), + PreferredBackupWindow: aws.String("String"), + PreferredMaintenanceWindow: aws.String("String"), + PubliclyAccessible: aws.Bool(true), + StorageEncrypted: aws.Bool(true), + StorageType: aws.String("String"), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + TdeCredentialArn: aws.String("String"), + TdeCredentialPassword: aws.String("String"), + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateDBInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateDBInstanceReadReplica() { + svc := rds.New(session.New()) + + params := &rds.CreateDBInstanceReadReplicaInput{ + DBInstanceIdentifier: aws.String("String"), // Required + SourceDBInstanceIdentifier: aws.String("String"), // Required + AutoMinorVersionUpgrade: aws.Bool(true), + AvailabilityZone: aws.String("String"), + CopyTagsToSnapshot: aws.Bool(true), + DBInstanceClass: aws.String("String"), + DBSubnetGroupName: aws.String("String"), + Iops: aws.Int64(1), + MonitoringInterval: aws.Int64(1), + MonitoringRoleArn: aws.String("String"), + OptionGroupName: aws.String("String"), + Port: aws.Int64(1), + PubliclyAccessible: aws.Bool(true), + StorageType: aws.String("String"), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateDBInstanceReadReplica(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateDBParameterGroup() { + svc := rds.New(session.New()) + + params := &rds.CreateDBParameterGroupInput{ + DBParameterGroupFamily: aws.String("String"), // Required + DBParameterGroupName: aws.String("String"), // Required + Description: aws.String("String"), // Required + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateDBParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateDBSecurityGroup() { + svc := rds.New(session.New()) + + params := &rds.CreateDBSecurityGroupInput{ + DBSecurityGroupDescription: aws.String("String"), // Required + DBSecurityGroupName: aws.String("String"), // Required + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateDBSecurityGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateDBSnapshot() { + svc := rds.New(session.New()) + + params := &rds.CreateDBSnapshotInput{ + DBInstanceIdentifier: aws.String("String"), // Required + DBSnapshotIdentifier: aws.String("String"), // Required + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateDBSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateDBSubnetGroup() { + svc := rds.New(session.New()) + + params := &rds.CreateDBSubnetGroupInput{ + DBSubnetGroupDescription: aws.String("String"), // Required + DBSubnetGroupName: aws.String("String"), // Required + SubnetIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateDBSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateEventSubscription() { + svc := rds.New(session.New()) + + params := &rds.CreateEventSubscriptionInput{ + SnsTopicArn: aws.String("String"), // Required + SubscriptionName: aws.String("String"), // Required + Enabled: aws.Bool(true), + EventCategories: []*string{ + aws.String("String"), // Required + // More values... + }, + SourceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SourceType: aws.String("String"), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateEventSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_CreateOptionGroup() { + svc := rds.New(session.New()) + + params := &rds.CreateOptionGroupInput{ + EngineName: aws.String("String"), // Required + MajorEngineVersion: aws.String("String"), // Required + OptionGroupDescription: aws.String("String"), // Required + OptionGroupName: aws.String("String"), // Required + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateOptionGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteDBCluster() { + svc := rds.New(session.New()) + + params := &rds.DeleteDBClusterInput{ + DBClusterIdentifier: aws.String("String"), // Required + FinalDBSnapshotIdentifier: aws.String("String"), + SkipFinalSnapshot: aws.Bool(true), + } + resp, err := svc.DeleteDBCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteDBClusterParameterGroup() { + svc := rds.New(session.New()) + + params := &rds.DeleteDBClusterParameterGroupInput{ + DBClusterParameterGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteDBClusterParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteDBClusterSnapshot() { + svc := rds.New(session.New()) + + params := &rds.DeleteDBClusterSnapshotInput{ + DBClusterSnapshotIdentifier: aws.String("String"), // Required + } + resp, err := svc.DeleteDBClusterSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteDBInstance() { + svc := rds.New(session.New()) + + params := &rds.DeleteDBInstanceInput{ + DBInstanceIdentifier: aws.String("String"), // Required + FinalDBSnapshotIdentifier: aws.String("String"), + SkipFinalSnapshot: aws.Bool(true), + } + resp, err := svc.DeleteDBInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteDBParameterGroup() { + svc := rds.New(session.New()) + + params := &rds.DeleteDBParameterGroupInput{ + DBParameterGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteDBParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteDBSecurityGroup() { + svc := rds.New(session.New()) + + params := &rds.DeleteDBSecurityGroupInput{ + DBSecurityGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteDBSecurityGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteDBSnapshot() { + svc := rds.New(session.New()) + + params := &rds.DeleteDBSnapshotInput{ + DBSnapshotIdentifier: aws.String("String"), // Required + } + resp, err := svc.DeleteDBSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteDBSubnetGroup() { + svc := rds.New(session.New()) + + params := &rds.DeleteDBSubnetGroupInput{ + DBSubnetGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteDBSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteEventSubscription() { + svc := rds.New(session.New()) + + params := &rds.DeleteEventSubscriptionInput{ + SubscriptionName: aws.String("String"), // Required + } + resp, err := svc.DeleteEventSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DeleteOptionGroup() { + svc := rds.New(session.New()) + + params := &rds.DeleteOptionGroupInput{ + OptionGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteOptionGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeAccountAttributes() { + svc := rds.New(session.New()) + + var params *rds.DescribeAccountAttributesInput + resp, err := svc.DescribeAccountAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeCertificates() { + svc := rds.New(session.New()) + + params := &rds.DescribeCertificatesInput{ + CertificateIdentifier: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeCertificates(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBClusterParameterGroups() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBClusterParameterGroupsInput{ + DBClusterParameterGroupName: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeDBClusterParameterGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBClusterParameters() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBClusterParametersInput{ + DBClusterParameterGroupName: aws.String("String"), // Required + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + Source: aws.String("String"), + } + resp, err := svc.DescribeDBClusterParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBClusterSnapshots() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBClusterSnapshotsInput{ + DBClusterIdentifier: aws.String("String"), + DBClusterSnapshotIdentifier: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + SnapshotType: aws.String("String"), + } + resp, err := svc.DescribeDBClusterSnapshots(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBClusters() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBClustersInput{ + DBClusterIdentifier: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeDBClusters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBEngineVersions() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBEngineVersionsInput{ + DBParameterGroupFamily: aws.String("String"), + DefaultOnly: aws.Bool(true), + Engine: aws.String("String"), + EngineVersion: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + ListSupportedCharacterSets: aws.Bool(true), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeDBEngineVersions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBInstances() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBInstancesInput{ + DBInstanceIdentifier: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeDBInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBLogFiles() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBLogFilesInput{ + DBInstanceIdentifier: aws.String("String"), // Required + FileLastWritten: aws.Int64(1), + FileSize: aws.Int64(1), + FilenameContains: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeDBLogFiles(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBParameterGroups() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBParameterGroupsInput{ + DBParameterGroupName: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeDBParameterGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBParameters() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBParametersInput{ + DBParameterGroupName: aws.String("String"), // Required + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + Source: aws.String("String"), + } + resp, err := svc.DescribeDBParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBSecurityGroups() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBSecurityGroupsInput{ + DBSecurityGroupName: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeDBSecurityGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBSnapshotAttributes() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBSnapshotAttributesInput{ + DBSnapshotIdentifier: aws.String("String"), + } + resp, err := svc.DescribeDBSnapshotAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBSnapshots() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBSnapshotsInput{ + DBInstanceIdentifier: aws.String("String"), + DBSnapshotIdentifier: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + IncludePublic: aws.Bool(true), + IncludeShared: aws.Bool(true), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + SnapshotType: aws.String("String"), + } + resp, err := svc.DescribeDBSnapshots(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeDBSubnetGroups() { + svc := rds.New(session.New()) + + params := &rds.DescribeDBSubnetGroupsInput{ + DBSubnetGroupName: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeDBSubnetGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeEngineDefaultClusterParameters() { + svc := rds.New(session.New()) + + params := &rds.DescribeEngineDefaultClusterParametersInput{ + DBParameterGroupFamily: aws.String("String"), // Required + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeEngineDefaultClusterParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeEngineDefaultParameters() { + svc := rds.New(session.New()) + + params := &rds.DescribeEngineDefaultParametersInput{ + DBParameterGroupFamily: aws.String("String"), // Required + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeEngineDefaultParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeEventCategories() { + svc := rds.New(session.New()) + + params := &rds.DescribeEventCategoriesInput{ + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + SourceType: aws.String("String"), + } + resp, err := svc.DescribeEventCategories(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeEventSubscriptions() { + svc := rds.New(session.New()) + + params := &rds.DescribeEventSubscriptionsInput{ + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + SubscriptionName: aws.String("String"), + } + resp, err := svc.DescribeEventSubscriptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeEvents() { + svc := rds.New(session.New()) + + params := &rds.DescribeEventsInput{ + Duration: aws.Int64(1), + EndTime: aws.Time(time.Now()), + EventCategories: []*string{ + aws.String("String"), // Required + // More values... + }, + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + SourceIdentifier: aws.String("String"), + SourceType: aws.String("SourceType"), + StartTime: aws.Time(time.Now()), + } + resp, err := svc.DescribeEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeOptionGroupOptions() { + svc := rds.New(session.New()) + + params := &rds.DescribeOptionGroupOptionsInput{ + EngineName: aws.String("String"), // Required + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MajorEngineVersion: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeOptionGroupOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeOptionGroups() { + svc := rds.New(session.New()) + + params := &rds.DescribeOptionGroupsInput{ + EngineName: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + MajorEngineVersion: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + OptionGroupName: aws.String("String"), + } + resp, err := svc.DescribeOptionGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeOrderableDBInstanceOptions() { + svc := rds.New(session.New()) + + params := &rds.DescribeOrderableDBInstanceOptionsInput{ + Engine: aws.String("String"), // Required + DBInstanceClass: aws.String("String"), + EngineVersion: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + LicenseModel: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + Vpc: aws.Bool(true), + } + resp, err := svc.DescribeOrderableDBInstanceOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribePendingMaintenanceActions() { + svc := rds.New(session.New()) + + params := &rds.DescribePendingMaintenanceActionsInput{ + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + ResourceIdentifier: aws.String("String"), + } + resp, err := svc.DescribePendingMaintenanceActions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeReservedDBInstances() { + svc := rds.New(session.New()) + + params := &rds.DescribeReservedDBInstancesInput{ + DBInstanceClass: aws.String("String"), + Duration: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + MultiAZ: aws.Bool(true), + OfferingType: aws.String("String"), + ProductDescription: aws.String("String"), + ReservedDBInstanceId: aws.String("String"), + ReservedDBInstancesOfferingId: aws.String("String"), + } + resp, err := svc.DescribeReservedDBInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DescribeReservedDBInstancesOfferings() { + svc := rds.New(session.New()) + + params := &rds.DescribeReservedDBInstancesOfferingsInput{ + DBInstanceClass: aws.String("String"), + Duration: aws.String("String"), + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + MultiAZ: aws.Bool(true), + OfferingType: aws.String("String"), + ProductDescription: aws.String("String"), + ReservedDBInstancesOfferingId: aws.String("String"), + } + resp, err := svc.DescribeReservedDBInstancesOfferings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_DownloadDBLogFilePortion() { + svc := rds.New(session.New()) + + params := &rds.DownloadDBLogFilePortionInput{ + DBInstanceIdentifier: aws.String("String"), // Required + LogFileName: aws.String("String"), // Required + Marker: aws.String("String"), + NumberOfLines: aws.Int64(1), + } + resp, err := svc.DownloadDBLogFilePortion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_FailoverDBCluster() { + svc := rds.New(session.New()) + + params := &rds.FailoverDBClusterInput{ + DBClusterIdentifier: aws.String("String"), + } + resp, err := svc.FailoverDBCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ListTagsForResource() { + svc := rds.New(session.New()) + + params := &rds.ListTagsForResourceInput{ + ResourceName: aws.String("String"), // Required + Filters: []*rds.Filter{ + { // Required + Name: aws.String("String"), // Required + Values: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + } + resp, err := svc.ListTagsForResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ModifyDBCluster() { + svc := rds.New(session.New()) + + params := &rds.ModifyDBClusterInput{ + DBClusterIdentifier: aws.String("String"), // Required + ApplyImmediately: aws.Bool(true), + BackupRetentionPeriod: aws.Int64(1), + DBClusterParameterGroupName: aws.String("String"), + MasterUserPassword: aws.String("String"), + NewDBClusterIdentifier: aws.String("String"), + OptionGroupName: aws.String("String"), + Port: aws.Int64(1), + PreferredBackupWindow: aws.String("String"), + PreferredMaintenanceWindow: aws.String("String"), + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ModifyDBCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ModifyDBClusterParameterGroup() { + svc := rds.New(session.New()) + + params := &rds.ModifyDBClusterParameterGroupInput{ + DBClusterParameterGroupName: aws.String("String"), // Required + Parameters: []*rds.Parameter{ // Required + { // Required + AllowedValues: aws.String("String"), + ApplyMethod: aws.String("ApplyMethod"), + ApplyType: aws.String("String"), + DataType: aws.String("String"), + Description: aws.String("String"), + IsModifiable: aws.Bool(true), + MinimumEngineVersion: aws.String("String"), + ParameterName: aws.String("String"), + ParameterValue: aws.String("String"), + Source: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.ModifyDBClusterParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ModifyDBInstance() { + svc := rds.New(session.New()) + + params := &rds.ModifyDBInstanceInput{ + DBInstanceIdentifier: aws.String("String"), // Required + AllocatedStorage: aws.Int64(1), + AllowMajorVersionUpgrade: aws.Bool(true), + ApplyImmediately: aws.Bool(true), + AutoMinorVersionUpgrade: aws.Bool(true), + BackupRetentionPeriod: aws.Int64(1), + CACertificateIdentifier: aws.String("String"), + CopyTagsToSnapshot: aws.Bool(true), + DBInstanceClass: aws.String("String"), + DBParameterGroupName: aws.String("String"), + DBPortNumber: aws.Int64(1), + DBSecurityGroups: []*string{ + aws.String("String"), // Required + // More values... + }, + EngineVersion: aws.String("String"), + Iops: aws.Int64(1), + MasterUserPassword: aws.String("String"), + MonitoringInterval: aws.Int64(1), + MonitoringRoleArn: aws.String("String"), + MultiAZ: aws.Bool(true), + NewDBInstanceIdentifier: aws.String("String"), + OptionGroupName: aws.String("String"), + PreferredBackupWindow: aws.String("String"), + PreferredMaintenanceWindow: aws.String("String"), + PubliclyAccessible: aws.Bool(true), + StorageType: aws.String("String"), + TdeCredentialArn: aws.String("String"), + TdeCredentialPassword: aws.String("String"), + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ModifyDBInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ModifyDBParameterGroup() { + svc := rds.New(session.New()) + + params := &rds.ModifyDBParameterGroupInput{ + DBParameterGroupName: aws.String("String"), // Required + Parameters: []*rds.Parameter{ // Required + { // Required + AllowedValues: aws.String("String"), + ApplyMethod: aws.String("ApplyMethod"), + ApplyType: aws.String("String"), + DataType: aws.String("String"), + Description: aws.String("String"), + IsModifiable: aws.Bool(true), + MinimumEngineVersion: aws.String("String"), + ParameterName: aws.String("String"), + ParameterValue: aws.String("String"), + Source: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.ModifyDBParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ModifyDBSnapshotAttribute() { + svc := rds.New(session.New()) + + params := &rds.ModifyDBSnapshotAttributeInput{ + DBSnapshotIdentifier: aws.String("String"), // Required + AttributeName: aws.String("String"), + ValuesToAdd: []*string{ + aws.String("String"), // Required + // More values... + }, + ValuesToRemove: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ModifyDBSnapshotAttribute(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ModifyDBSubnetGroup() { + svc := rds.New(session.New()) + + params := &rds.ModifyDBSubnetGroupInput{ + DBSubnetGroupName: aws.String("String"), // Required + SubnetIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + DBSubnetGroupDescription: aws.String("String"), + } + resp, err := svc.ModifyDBSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ModifyEventSubscription() { + svc := rds.New(session.New()) + + params := &rds.ModifyEventSubscriptionInput{ + SubscriptionName: aws.String("String"), // Required + Enabled: aws.Bool(true), + EventCategories: []*string{ + aws.String("String"), // Required + // More values... + }, + SnsTopicArn: aws.String("String"), + SourceType: aws.String("String"), + } + resp, err := svc.ModifyEventSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ModifyOptionGroup() { + svc := rds.New(session.New()) + + params := &rds.ModifyOptionGroupInput{ + OptionGroupName: aws.String("String"), // Required + ApplyImmediately: aws.Bool(true), + OptionsToInclude: []*rds.OptionConfiguration{ + { // Required + OptionName: aws.String("String"), // Required + DBSecurityGroupMemberships: []*string{ + aws.String("String"), // Required + // More values... + }, + OptionSettings: []*rds.OptionSetting{ + { // Required + AllowedValues: aws.String("String"), + ApplyType: aws.String("String"), + DataType: aws.String("String"), + DefaultValue: aws.String("String"), + Description: aws.String("String"), + IsCollection: aws.Bool(true), + IsModifiable: aws.Bool(true), + Name: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + Port: aws.Int64(1), + VpcSecurityGroupMemberships: []*string{ + aws.String("String"), // Required + // More values... + }, + }, + // More values... + }, + OptionsToRemove: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ModifyOptionGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_PromoteReadReplica() { + svc := rds.New(session.New()) + + params := &rds.PromoteReadReplicaInput{ + DBInstanceIdentifier: aws.String("String"), // Required + BackupRetentionPeriod: aws.Int64(1), + PreferredBackupWindow: aws.String("String"), + } + resp, err := svc.PromoteReadReplica(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_PurchaseReservedDBInstancesOffering() { + svc := rds.New(session.New()) + + params := &rds.PurchaseReservedDBInstancesOfferingInput{ + ReservedDBInstancesOfferingId: aws.String("String"), // Required + DBInstanceCount: aws.Int64(1), + ReservedDBInstanceId: aws.String("String"), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.PurchaseReservedDBInstancesOffering(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_RebootDBInstance() { + svc := rds.New(session.New()) + + params := &rds.RebootDBInstanceInput{ + DBInstanceIdentifier: aws.String("String"), // Required + ForceFailover: aws.Bool(true), + } + resp, err := svc.RebootDBInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_RemoveSourceIdentifierFromSubscription() { + svc := rds.New(session.New()) + + params := &rds.RemoveSourceIdentifierFromSubscriptionInput{ + SourceIdentifier: aws.String("String"), // Required + SubscriptionName: aws.String("String"), // Required + } + resp, err := svc.RemoveSourceIdentifierFromSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_RemoveTagsFromResource() { + svc := rds.New(session.New()) + + params := &rds.RemoveTagsFromResourceInput{ + ResourceName: aws.String("String"), // Required + TagKeys: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.RemoveTagsFromResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ResetDBClusterParameterGroup() { + svc := rds.New(session.New()) + + params := &rds.ResetDBClusterParameterGroupInput{ + DBClusterParameterGroupName: aws.String("String"), // Required + Parameters: []*rds.Parameter{ + { // Required + AllowedValues: aws.String("String"), + ApplyMethod: aws.String("ApplyMethod"), + ApplyType: aws.String("String"), + DataType: aws.String("String"), + Description: aws.String("String"), + IsModifiable: aws.Bool(true), + MinimumEngineVersion: aws.String("String"), + ParameterName: aws.String("String"), + ParameterValue: aws.String("String"), + Source: aws.String("String"), + }, + // More values... + }, + ResetAllParameters: aws.Bool(true), + } + resp, err := svc.ResetDBClusterParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_ResetDBParameterGroup() { + svc := rds.New(session.New()) + + params := &rds.ResetDBParameterGroupInput{ + DBParameterGroupName: aws.String("String"), // Required + Parameters: []*rds.Parameter{ + { // Required + AllowedValues: aws.String("String"), + ApplyMethod: aws.String("ApplyMethod"), + ApplyType: aws.String("String"), + DataType: aws.String("String"), + Description: aws.String("String"), + IsModifiable: aws.Bool(true), + MinimumEngineVersion: aws.String("String"), + ParameterName: aws.String("String"), + ParameterValue: aws.String("String"), + Source: aws.String("String"), + }, + // More values... + }, + ResetAllParameters: aws.Bool(true), + } + resp, err := svc.ResetDBParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_RestoreDBClusterFromSnapshot() { + svc := rds.New(session.New()) + + params := &rds.RestoreDBClusterFromSnapshotInput{ + DBClusterIdentifier: aws.String("String"), // Required + Engine: aws.String("String"), // Required + SnapshotIdentifier: aws.String("String"), // Required + AvailabilityZones: []*string{ + aws.String("String"), // Required + // More values... + }, + DBSubnetGroupName: aws.String("String"), + DatabaseName: aws.String("String"), + EngineVersion: aws.String("String"), + KmsKeyId: aws.String("String"), + OptionGroupName: aws.String("String"), + Port: aws.Int64(1), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.RestoreDBClusterFromSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_RestoreDBClusterToPointInTime() { + svc := rds.New(session.New()) + + params := &rds.RestoreDBClusterToPointInTimeInput{ + DBClusterIdentifier: aws.String("String"), // Required + SourceDBClusterIdentifier: aws.String("String"), // Required + DBSubnetGroupName: aws.String("String"), + KmsKeyId: aws.String("String"), + OptionGroupName: aws.String("String"), + Port: aws.Int64(1), + RestoreToTime: aws.Time(time.Now()), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + UseLatestRestorableTime: aws.Bool(true), + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.RestoreDBClusterToPointInTime(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_RestoreDBInstanceFromDBSnapshot() { + svc := rds.New(session.New()) + + params := &rds.RestoreDBInstanceFromDBSnapshotInput{ + DBInstanceIdentifier: aws.String("String"), // Required + DBSnapshotIdentifier: aws.String("String"), // Required + AutoMinorVersionUpgrade: aws.Bool(true), + AvailabilityZone: aws.String("String"), + CopyTagsToSnapshot: aws.Bool(true), + DBInstanceClass: aws.String("String"), + DBName: aws.String("String"), + DBSubnetGroupName: aws.String("String"), + Engine: aws.String("String"), + Iops: aws.Int64(1), + LicenseModel: aws.String("String"), + MultiAZ: aws.Bool(true), + OptionGroupName: aws.String("String"), + Port: aws.Int64(1), + PubliclyAccessible: aws.Bool(true), + StorageType: aws.String("String"), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + TdeCredentialArn: aws.String("String"), + TdeCredentialPassword: aws.String("String"), + } + resp, err := svc.RestoreDBInstanceFromDBSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_RestoreDBInstanceToPointInTime() { + svc := rds.New(session.New()) + + params := &rds.RestoreDBInstanceToPointInTimeInput{ + SourceDBInstanceIdentifier: aws.String("String"), // Required + TargetDBInstanceIdentifier: aws.String("String"), // Required + AutoMinorVersionUpgrade: aws.Bool(true), + AvailabilityZone: aws.String("String"), + CopyTagsToSnapshot: aws.Bool(true), + DBInstanceClass: aws.String("String"), + DBName: aws.String("String"), + DBSubnetGroupName: aws.String("String"), + Engine: aws.String("String"), + Iops: aws.Int64(1), + LicenseModel: aws.String("String"), + MultiAZ: aws.Bool(true), + OptionGroupName: aws.String("String"), + Port: aws.Int64(1), + PubliclyAccessible: aws.Bool(true), + RestoreTime: aws.Time(time.Now()), + StorageType: aws.String("String"), + Tags: []*rds.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + TdeCredentialArn: aws.String("String"), + TdeCredentialPassword: aws.String("String"), + UseLatestRestorableTime: aws.Bool(true), + } + resp, err := svc.RestoreDBInstanceToPointInTime(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRDS_RevokeDBSecurityGroupIngress() { + svc := rds.New(session.New()) + + params := &rds.RevokeDBSecurityGroupIngressInput{ + DBSecurityGroupName: aws.String("String"), // Required + CIDRIP: aws.String("String"), + EC2SecurityGroupId: aws.String("String"), + EC2SecurityGroupName: aws.String("String"), + EC2SecurityGroupOwnerId: aws.String("String"), + } + resp, err := svc.RevokeDBSecurityGroupIngress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/rds/rdsiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/rds/rdsiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/rds/rdsiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/rds/rdsiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,360 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package rdsiface provides an interface for the Amazon Relational Database Service. +package rdsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/rds" +) + +// RDSAPI is the interface type for rds.RDS. +type RDSAPI interface { + AddSourceIdentifierToSubscriptionRequest(*rds.AddSourceIdentifierToSubscriptionInput) (*request.Request, *rds.AddSourceIdentifierToSubscriptionOutput) + + AddSourceIdentifierToSubscription(*rds.AddSourceIdentifierToSubscriptionInput) (*rds.AddSourceIdentifierToSubscriptionOutput, error) + + AddTagsToResourceRequest(*rds.AddTagsToResourceInput) (*request.Request, *rds.AddTagsToResourceOutput) + + AddTagsToResource(*rds.AddTagsToResourceInput) (*rds.AddTagsToResourceOutput, error) + + ApplyPendingMaintenanceActionRequest(*rds.ApplyPendingMaintenanceActionInput) (*request.Request, *rds.ApplyPendingMaintenanceActionOutput) + + ApplyPendingMaintenanceAction(*rds.ApplyPendingMaintenanceActionInput) (*rds.ApplyPendingMaintenanceActionOutput, error) + + AuthorizeDBSecurityGroupIngressRequest(*rds.AuthorizeDBSecurityGroupIngressInput) (*request.Request, *rds.AuthorizeDBSecurityGroupIngressOutput) + + AuthorizeDBSecurityGroupIngress(*rds.AuthorizeDBSecurityGroupIngressInput) (*rds.AuthorizeDBSecurityGroupIngressOutput, error) + + CopyDBClusterSnapshotRequest(*rds.CopyDBClusterSnapshotInput) (*request.Request, *rds.CopyDBClusterSnapshotOutput) + + CopyDBClusterSnapshot(*rds.CopyDBClusterSnapshotInput) (*rds.CopyDBClusterSnapshotOutput, error) + + CopyDBParameterGroupRequest(*rds.CopyDBParameterGroupInput) (*request.Request, *rds.CopyDBParameterGroupOutput) + + CopyDBParameterGroup(*rds.CopyDBParameterGroupInput) (*rds.CopyDBParameterGroupOutput, error) + + CopyDBSnapshotRequest(*rds.CopyDBSnapshotInput) (*request.Request, *rds.CopyDBSnapshotOutput) + + CopyDBSnapshot(*rds.CopyDBSnapshotInput) (*rds.CopyDBSnapshotOutput, error) + + CopyOptionGroupRequest(*rds.CopyOptionGroupInput) (*request.Request, *rds.CopyOptionGroupOutput) + + CopyOptionGroup(*rds.CopyOptionGroupInput) (*rds.CopyOptionGroupOutput, error) + + CreateDBClusterRequest(*rds.CreateDBClusterInput) (*request.Request, *rds.CreateDBClusterOutput) + + CreateDBCluster(*rds.CreateDBClusterInput) (*rds.CreateDBClusterOutput, error) + + CreateDBClusterParameterGroupRequest(*rds.CreateDBClusterParameterGroupInput) (*request.Request, *rds.CreateDBClusterParameterGroupOutput) + + CreateDBClusterParameterGroup(*rds.CreateDBClusterParameterGroupInput) (*rds.CreateDBClusterParameterGroupOutput, error) + + CreateDBClusterSnapshotRequest(*rds.CreateDBClusterSnapshotInput) (*request.Request, *rds.CreateDBClusterSnapshotOutput) + + CreateDBClusterSnapshot(*rds.CreateDBClusterSnapshotInput) (*rds.CreateDBClusterSnapshotOutput, error) + + CreateDBInstanceRequest(*rds.CreateDBInstanceInput) (*request.Request, *rds.CreateDBInstanceOutput) + + CreateDBInstance(*rds.CreateDBInstanceInput) (*rds.CreateDBInstanceOutput, error) + + CreateDBInstanceReadReplicaRequest(*rds.CreateDBInstanceReadReplicaInput) (*request.Request, *rds.CreateDBInstanceReadReplicaOutput) + + CreateDBInstanceReadReplica(*rds.CreateDBInstanceReadReplicaInput) (*rds.CreateDBInstanceReadReplicaOutput, error) + + CreateDBParameterGroupRequest(*rds.CreateDBParameterGroupInput) (*request.Request, *rds.CreateDBParameterGroupOutput) + + CreateDBParameterGroup(*rds.CreateDBParameterGroupInput) (*rds.CreateDBParameterGroupOutput, error) + + CreateDBSecurityGroupRequest(*rds.CreateDBSecurityGroupInput) (*request.Request, *rds.CreateDBSecurityGroupOutput) + + CreateDBSecurityGroup(*rds.CreateDBSecurityGroupInput) (*rds.CreateDBSecurityGroupOutput, error) + + CreateDBSnapshotRequest(*rds.CreateDBSnapshotInput) (*request.Request, *rds.CreateDBSnapshotOutput) + + CreateDBSnapshot(*rds.CreateDBSnapshotInput) (*rds.CreateDBSnapshotOutput, error) + + CreateDBSubnetGroupRequest(*rds.CreateDBSubnetGroupInput) (*request.Request, *rds.CreateDBSubnetGroupOutput) + + CreateDBSubnetGroup(*rds.CreateDBSubnetGroupInput) (*rds.CreateDBSubnetGroupOutput, error) + + CreateEventSubscriptionRequest(*rds.CreateEventSubscriptionInput) (*request.Request, *rds.CreateEventSubscriptionOutput) + + CreateEventSubscription(*rds.CreateEventSubscriptionInput) (*rds.CreateEventSubscriptionOutput, error) + + CreateOptionGroupRequest(*rds.CreateOptionGroupInput) (*request.Request, *rds.CreateOptionGroupOutput) + + CreateOptionGroup(*rds.CreateOptionGroupInput) (*rds.CreateOptionGroupOutput, error) + + DeleteDBClusterRequest(*rds.DeleteDBClusterInput) (*request.Request, *rds.DeleteDBClusterOutput) + + DeleteDBCluster(*rds.DeleteDBClusterInput) (*rds.DeleteDBClusterOutput, error) + + DeleteDBClusterParameterGroupRequest(*rds.DeleteDBClusterParameterGroupInput) (*request.Request, *rds.DeleteDBClusterParameterGroupOutput) + + DeleteDBClusterParameterGroup(*rds.DeleteDBClusterParameterGroupInput) (*rds.DeleteDBClusterParameterGroupOutput, error) + + DeleteDBClusterSnapshotRequest(*rds.DeleteDBClusterSnapshotInput) (*request.Request, *rds.DeleteDBClusterSnapshotOutput) + + DeleteDBClusterSnapshot(*rds.DeleteDBClusterSnapshotInput) (*rds.DeleteDBClusterSnapshotOutput, error) + + DeleteDBInstanceRequest(*rds.DeleteDBInstanceInput) (*request.Request, *rds.DeleteDBInstanceOutput) + + DeleteDBInstance(*rds.DeleteDBInstanceInput) (*rds.DeleteDBInstanceOutput, error) + + DeleteDBParameterGroupRequest(*rds.DeleteDBParameterGroupInput) (*request.Request, *rds.DeleteDBParameterGroupOutput) + + DeleteDBParameterGroup(*rds.DeleteDBParameterGroupInput) (*rds.DeleteDBParameterGroupOutput, error) + + DeleteDBSecurityGroupRequest(*rds.DeleteDBSecurityGroupInput) (*request.Request, *rds.DeleteDBSecurityGroupOutput) + + DeleteDBSecurityGroup(*rds.DeleteDBSecurityGroupInput) (*rds.DeleteDBSecurityGroupOutput, error) + + DeleteDBSnapshotRequest(*rds.DeleteDBSnapshotInput) (*request.Request, *rds.DeleteDBSnapshotOutput) + + DeleteDBSnapshot(*rds.DeleteDBSnapshotInput) (*rds.DeleteDBSnapshotOutput, error) + + DeleteDBSubnetGroupRequest(*rds.DeleteDBSubnetGroupInput) (*request.Request, *rds.DeleteDBSubnetGroupOutput) + + DeleteDBSubnetGroup(*rds.DeleteDBSubnetGroupInput) (*rds.DeleteDBSubnetGroupOutput, error) + + DeleteEventSubscriptionRequest(*rds.DeleteEventSubscriptionInput) (*request.Request, *rds.DeleteEventSubscriptionOutput) + + DeleteEventSubscription(*rds.DeleteEventSubscriptionInput) (*rds.DeleteEventSubscriptionOutput, error) + + DeleteOptionGroupRequest(*rds.DeleteOptionGroupInput) (*request.Request, *rds.DeleteOptionGroupOutput) + + DeleteOptionGroup(*rds.DeleteOptionGroupInput) (*rds.DeleteOptionGroupOutput, error) + + DescribeAccountAttributesRequest(*rds.DescribeAccountAttributesInput) (*request.Request, *rds.DescribeAccountAttributesOutput) + + DescribeAccountAttributes(*rds.DescribeAccountAttributesInput) (*rds.DescribeAccountAttributesOutput, error) + + DescribeCertificatesRequest(*rds.DescribeCertificatesInput) (*request.Request, *rds.DescribeCertificatesOutput) + + DescribeCertificates(*rds.DescribeCertificatesInput) (*rds.DescribeCertificatesOutput, error) + + DescribeDBClusterParameterGroupsRequest(*rds.DescribeDBClusterParameterGroupsInput) (*request.Request, *rds.DescribeDBClusterParameterGroupsOutput) + + DescribeDBClusterParameterGroups(*rds.DescribeDBClusterParameterGroupsInput) (*rds.DescribeDBClusterParameterGroupsOutput, error) + + DescribeDBClusterParametersRequest(*rds.DescribeDBClusterParametersInput) (*request.Request, *rds.DescribeDBClusterParametersOutput) + + DescribeDBClusterParameters(*rds.DescribeDBClusterParametersInput) (*rds.DescribeDBClusterParametersOutput, error) + + DescribeDBClusterSnapshotsRequest(*rds.DescribeDBClusterSnapshotsInput) (*request.Request, *rds.DescribeDBClusterSnapshotsOutput) + + DescribeDBClusterSnapshots(*rds.DescribeDBClusterSnapshotsInput) (*rds.DescribeDBClusterSnapshotsOutput, error) + + DescribeDBClustersRequest(*rds.DescribeDBClustersInput) (*request.Request, *rds.DescribeDBClustersOutput) + + DescribeDBClusters(*rds.DescribeDBClustersInput) (*rds.DescribeDBClustersOutput, error) + + DescribeDBEngineVersionsRequest(*rds.DescribeDBEngineVersionsInput) (*request.Request, *rds.DescribeDBEngineVersionsOutput) + + DescribeDBEngineVersions(*rds.DescribeDBEngineVersionsInput) (*rds.DescribeDBEngineVersionsOutput, error) + + DescribeDBEngineVersionsPages(*rds.DescribeDBEngineVersionsInput, func(*rds.DescribeDBEngineVersionsOutput, bool) bool) error + + DescribeDBInstancesRequest(*rds.DescribeDBInstancesInput) (*request.Request, *rds.DescribeDBInstancesOutput) + + DescribeDBInstances(*rds.DescribeDBInstancesInput) (*rds.DescribeDBInstancesOutput, error) + + DescribeDBInstancesPages(*rds.DescribeDBInstancesInput, func(*rds.DescribeDBInstancesOutput, bool) bool) error + + DescribeDBLogFilesRequest(*rds.DescribeDBLogFilesInput) (*request.Request, *rds.DescribeDBLogFilesOutput) + + DescribeDBLogFiles(*rds.DescribeDBLogFilesInput) (*rds.DescribeDBLogFilesOutput, error) + + DescribeDBLogFilesPages(*rds.DescribeDBLogFilesInput, func(*rds.DescribeDBLogFilesOutput, bool) bool) error + + DescribeDBParameterGroupsRequest(*rds.DescribeDBParameterGroupsInput) (*request.Request, *rds.DescribeDBParameterGroupsOutput) + + DescribeDBParameterGroups(*rds.DescribeDBParameterGroupsInput) (*rds.DescribeDBParameterGroupsOutput, error) + + DescribeDBParameterGroupsPages(*rds.DescribeDBParameterGroupsInput, func(*rds.DescribeDBParameterGroupsOutput, bool) bool) error + + DescribeDBParametersRequest(*rds.DescribeDBParametersInput) (*request.Request, *rds.DescribeDBParametersOutput) + + DescribeDBParameters(*rds.DescribeDBParametersInput) (*rds.DescribeDBParametersOutput, error) + + DescribeDBParametersPages(*rds.DescribeDBParametersInput, func(*rds.DescribeDBParametersOutput, bool) bool) error + + DescribeDBSecurityGroupsRequest(*rds.DescribeDBSecurityGroupsInput) (*request.Request, *rds.DescribeDBSecurityGroupsOutput) + + DescribeDBSecurityGroups(*rds.DescribeDBSecurityGroupsInput) (*rds.DescribeDBSecurityGroupsOutput, error) + + DescribeDBSecurityGroupsPages(*rds.DescribeDBSecurityGroupsInput, func(*rds.DescribeDBSecurityGroupsOutput, bool) bool) error + + DescribeDBSnapshotAttributesRequest(*rds.DescribeDBSnapshotAttributesInput) (*request.Request, *rds.DescribeDBSnapshotAttributesOutput) + + DescribeDBSnapshotAttributes(*rds.DescribeDBSnapshotAttributesInput) (*rds.DescribeDBSnapshotAttributesOutput, error) + + DescribeDBSnapshotsRequest(*rds.DescribeDBSnapshotsInput) (*request.Request, *rds.DescribeDBSnapshotsOutput) + + DescribeDBSnapshots(*rds.DescribeDBSnapshotsInput) (*rds.DescribeDBSnapshotsOutput, error) + + DescribeDBSnapshotsPages(*rds.DescribeDBSnapshotsInput, func(*rds.DescribeDBSnapshotsOutput, bool) bool) error + + DescribeDBSubnetGroupsRequest(*rds.DescribeDBSubnetGroupsInput) (*request.Request, *rds.DescribeDBSubnetGroupsOutput) + + DescribeDBSubnetGroups(*rds.DescribeDBSubnetGroupsInput) (*rds.DescribeDBSubnetGroupsOutput, error) + + DescribeDBSubnetGroupsPages(*rds.DescribeDBSubnetGroupsInput, func(*rds.DescribeDBSubnetGroupsOutput, bool) bool) error + + DescribeEngineDefaultClusterParametersRequest(*rds.DescribeEngineDefaultClusterParametersInput) (*request.Request, *rds.DescribeEngineDefaultClusterParametersOutput) + + DescribeEngineDefaultClusterParameters(*rds.DescribeEngineDefaultClusterParametersInput) (*rds.DescribeEngineDefaultClusterParametersOutput, error) + + DescribeEngineDefaultParametersRequest(*rds.DescribeEngineDefaultParametersInput) (*request.Request, *rds.DescribeEngineDefaultParametersOutput) + + DescribeEngineDefaultParameters(*rds.DescribeEngineDefaultParametersInput) (*rds.DescribeEngineDefaultParametersOutput, error) + + DescribeEngineDefaultParametersPages(*rds.DescribeEngineDefaultParametersInput, func(*rds.DescribeEngineDefaultParametersOutput, bool) bool) error + + DescribeEventCategoriesRequest(*rds.DescribeEventCategoriesInput) (*request.Request, *rds.DescribeEventCategoriesOutput) + + DescribeEventCategories(*rds.DescribeEventCategoriesInput) (*rds.DescribeEventCategoriesOutput, error) + + DescribeEventSubscriptionsRequest(*rds.DescribeEventSubscriptionsInput) (*request.Request, *rds.DescribeEventSubscriptionsOutput) + + DescribeEventSubscriptions(*rds.DescribeEventSubscriptionsInput) (*rds.DescribeEventSubscriptionsOutput, error) + + DescribeEventSubscriptionsPages(*rds.DescribeEventSubscriptionsInput, func(*rds.DescribeEventSubscriptionsOutput, bool) bool) error + + DescribeEventsRequest(*rds.DescribeEventsInput) (*request.Request, *rds.DescribeEventsOutput) + + DescribeEvents(*rds.DescribeEventsInput) (*rds.DescribeEventsOutput, error) + + DescribeEventsPages(*rds.DescribeEventsInput, func(*rds.DescribeEventsOutput, bool) bool) error + + DescribeOptionGroupOptionsRequest(*rds.DescribeOptionGroupOptionsInput) (*request.Request, *rds.DescribeOptionGroupOptionsOutput) + + DescribeOptionGroupOptions(*rds.DescribeOptionGroupOptionsInput) (*rds.DescribeOptionGroupOptionsOutput, error) + + DescribeOptionGroupOptionsPages(*rds.DescribeOptionGroupOptionsInput, func(*rds.DescribeOptionGroupOptionsOutput, bool) bool) error + + DescribeOptionGroupsRequest(*rds.DescribeOptionGroupsInput) (*request.Request, *rds.DescribeOptionGroupsOutput) + + DescribeOptionGroups(*rds.DescribeOptionGroupsInput) (*rds.DescribeOptionGroupsOutput, error) + + DescribeOptionGroupsPages(*rds.DescribeOptionGroupsInput, func(*rds.DescribeOptionGroupsOutput, bool) bool) error + + DescribeOrderableDBInstanceOptionsRequest(*rds.DescribeOrderableDBInstanceOptionsInput) (*request.Request, *rds.DescribeOrderableDBInstanceOptionsOutput) + + DescribeOrderableDBInstanceOptions(*rds.DescribeOrderableDBInstanceOptionsInput) (*rds.DescribeOrderableDBInstanceOptionsOutput, error) + + DescribeOrderableDBInstanceOptionsPages(*rds.DescribeOrderableDBInstanceOptionsInput, func(*rds.DescribeOrderableDBInstanceOptionsOutput, bool) bool) error + + DescribePendingMaintenanceActionsRequest(*rds.DescribePendingMaintenanceActionsInput) (*request.Request, *rds.DescribePendingMaintenanceActionsOutput) + + DescribePendingMaintenanceActions(*rds.DescribePendingMaintenanceActionsInput) (*rds.DescribePendingMaintenanceActionsOutput, error) + + DescribeReservedDBInstancesRequest(*rds.DescribeReservedDBInstancesInput) (*request.Request, *rds.DescribeReservedDBInstancesOutput) + + DescribeReservedDBInstances(*rds.DescribeReservedDBInstancesInput) (*rds.DescribeReservedDBInstancesOutput, error) + + DescribeReservedDBInstancesPages(*rds.DescribeReservedDBInstancesInput, func(*rds.DescribeReservedDBInstancesOutput, bool) bool) error + + DescribeReservedDBInstancesOfferingsRequest(*rds.DescribeReservedDBInstancesOfferingsInput) (*request.Request, *rds.DescribeReservedDBInstancesOfferingsOutput) + + DescribeReservedDBInstancesOfferings(*rds.DescribeReservedDBInstancesOfferingsInput) (*rds.DescribeReservedDBInstancesOfferingsOutput, error) + + DescribeReservedDBInstancesOfferingsPages(*rds.DescribeReservedDBInstancesOfferingsInput, func(*rds.DescribeReservedDBInstancesOfferingsOutput, bool) bool) error + + DownloadDBLogFilePortionRequest(*rds.DownloadDBLogFilePortionInput) (*request.Request, *rds.DownloadDBLogFilePortionOutput) + + DownloadDBLogFilePortion(*rds.DownloadDBLogFilePortionInput) (*rds.DownloadDBLogFilePortionOutput, error) + + DownloadDBLogFilePortionPages(*rds.DownloadDBLogFilePortionInput, func(*rds.DownloadDBLogFilePortionOutput, bool) bool) error + + FailoverDBClusterRequest(*rds.FailoverDBClusterInput) (*request.Request, *rds.FailoverDBClusterOutput) + + FailoverDBCluster(*rds.FailoverDBClusterInput) (*rds.FailoverDBClusterOutput, error) + + ListTagsForResourceRequest(*rds.ListTagsForResourceInput) (*request.Request, *rds.ListTagsForResourceOutput) + + ListTagsForResource(*rds.ListTagsForResourceInput) (*rds.ListTagsForResourceOutput, error) + + ModifyDBClusterRequest(*rds.ModifyDBClusterInput) (*request.Request, *rds.ModifyDBClusterOutput) + + ModifyDBCluster(*rds.ModifyDBClusterInput) (*rds.ModifyDBClusterOutput, error) + + ModifyDBClusterParameterGroupRequest(*rds.ModifyDBClusterParameterGroupInput) (*request.Request, *rds.DBClusterParameterGroupNameMessage) + + ModifyDBClusterParameterGroup(*rds.ModifyDBClusterParameterGroupInput) (*rds.DBClusterParameterGroupNameMessage, error) + + ModifyDBInstanceRequest(*rds.ModifyDBInstanceInput) (*request.Request, *rds.ModifyDBInstanceOutput) + + ModifyDBInstance(*rds.ModifyDBInstanceInput) (*rds.ModifyDBInstanceOutput, error) + + ModifyDBParameterGroupRequest(*rds.ModifyDBParameterGroupInput) (*request.Request, *rds.DBParameterGroupNameMessage) + + ModifyDBParameterGroup(*rds.ModifyDBParameterGroupInput) (*rds.DBParameterGroupNameMessage, error) + + ModifyDBSnapshotAttributeRequest(*rds.ModifyDBSnapshotAttributeInput) (*request.Request, *rds.ModifyDBSnapshotAttributeOutput) + + ModifyDBSnapshotAttribute(*rds.ModifyDBSnapshotAttributeInput) (*rds.ModifyDBSnapshotAttributeOutput, error) + + ModifyDBSubnetGroupRequest(*rds.ModifyDBSubnetGroupInput) (*request.Request, *rds.ModifyDBSubnetGroupOutput) + + ModifyDBSubnetGroup(*rds.ModifyDBSubnetGroupInput) (*rds.ModifyDBSubnetGroupOutput, error) + + ModifyEventSubscriptionRequest(*rds.ModifyEventSubscriptionInput) (*request.Request, *rds.ModifyEventSubscriptionOutput) + + ModifyEventSubscription(*rds.ModifyEventSubscriptionInput) (*rds.ModifyEventSubscriptionOutput, error) + + ModifyOptionGroupRequest(*rds.ModifyOptionGroupInput) (*request.Request, *rds.ModifyOptionGroupOutput) + + ModifyOptionGroup(*rds.ModifyOptionGroupInput) (*rds.ModifyOptionGroupOutput, error) + + PromoteReadReplicaRequest(*rds.PromoteReadReplicaInput) (*request.Request, *rds.PromoteReadReplicaOutput) + + PromoteReadReplica(*rds.PromoteReadReplicaInput) (*rds.PromoteReadReplicaOutput, error) + + PurchaseReservedDBInstancesOfferingRequest(*rds.PurchaseReservedDBInstancesOfferingInput) (*request.Request, *rds.PurchaseReservedDBInstancesOfferingOutput) + + PurchaseReservedDBInstancesOffering(*rds.PurchaseReservedDBInstancesOfferingInput) (*rds.PurchaseReservedDBInstancesOfferingOutput, error) + + RebootDBInstanceRequest(*rds.RebootDBInstanceInput) (*request.Request, *rds.RebootDBInstanceOutput) + + RebootDBInstance(*rds.RebootDBInstanceInput) (*rds.RebootDBInstanceOutput, error) + + RemoveSourceIdentifierFromSubscriptionRequest(*rds.RemoveSourceIdentifierFromSubscriptionInput) (*request.Request, *rds.RemoveSourceIdentifierFromSubscriptionOutput) + + RemoveSourceIdentifierFromSubscription(*rds.RemoveSourceIdentifierFromSubscriptionInput) (*rds.RemoveSourceIdentifierFromSubscriptionOutput, error) + + RemoveTagsFromResourceRequest(*rds.RemoveTagsFromResourceInput) (*request.Request, *rds.RemoveTagsFromResourceOutput) + + RemoveTagsFromResource(*rds.RemoveTagsFromResourceInput) (*rds.RemoveTagsFromResourceOutput, error) + + ResetDBClusterParameterGroupRequest(*rds.ResetDBClusterParameterGroupInput) (*request.Request, *rds.DBClusterParameterGroupNameMessage) + + ResetDBClusterParameterGroup(*rds.ResetDBClusterParameterGroupInput) (*rds.DBClusterParameterGroupNameMessage, error) + + ResetDBParameterGroupRequest(*rds.ResetDBParameterGroupInput) (*request.Request, *rds.DBParameterGroupNameMessage) + + ResetDBParameterGroup(*rds.ResetDBParameterGroupInput) (*rds.DBParameterGroupNameMessage, error) + + RestoreDBClusterFromSnapshotRequest(*rds.RestoreDBClusterFromSnapshotInput) (*request.Request, *rds.RestoreDBClusterFromSnapshotOutput) + + RestoreDBClusterFromSnapshot(*rds.RestoreDBClusterFromSnapshotInput) (*rds.RestoreDBClusterFromSnapshotOutput, error) + + RestoreDBClusterToPointInTimeRequest(*rds.RestoreDBClusterToPointInTimeInput) (*request.Request, *rds.RestoreDBClusterToPointInTimeOutput) + + RestoreDBClusterToPointInTime(*rds.RestoreDBClusterToPointInTimeInput) (*rds.RestoreDBClusterToPointInTimeOutput, error) + + RestoreDBInstanceFromDBSnapshotRequest(*rds.RestoreDBInstanceFromDBSnapshotInput) (*request.Request, *rds.RestoreDBInstanceFromDBSnapshotOutput) + + RestoreDBInstanceFromDBSnapshot(*rds.RestoreDBInstanceFromDBSnapshotInput) (*rds.RestoreDBInstanceFromDBSnapshotOutput, error) + + RestoreDBInstanceToPointInTimeRequest(*rds.RestoreDBInstanceToPointInTimeInput) (*request.Request, *rds.RestoreDBInstanceToPointInTimeOutput) + + RestoreDBInstanceToPointInTime(*rds.RestoreDBInstanceToPointInTimeInput) (*rds.RestoreDBInstanceToPointInTimeOutput, error) + + RevokeDBSecurityGroupIngressRequest(*rds.RevokeDBSecurityGroupIngressInput) (*request.Request, *rds.RevokeDBSecurityGroupIngressOutput) + + RevokeDBSecurityGroupIngress(*rds.RevokeDBSecurityGroupIngressInput) (*rds.RevokeDBSecurityGroupIngressOutput, error) +} + +var _ RDSAPI = (*rds.RDS)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/rds/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/rds/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/rds/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/rds/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,109 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package rds + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Amazon Relational Database Service (Amazon RDS) is a web service that makes +// it easier to set up, operate, and scale a relational database in the cloud. +// It provides cost-efficient, resizeable capacity for an industry-standard +// relational database and manages common database administration tasks, freeing +// up developers to focus on what makes their applications and businesses unique. +// +// Amazon RDS gives you access to the capabilities of a MySQL, MariaDB, PostgreSQL, +// Microsoft SQL Server, Oracle, or Aurora database server. This means the code, +// applications, and tools you already use today with your existing databases +// work with Amazon RDS without modification. Amazon RDS automatically backs +// up your database and maintains the database software that powers your DB +// instance. Amazon RDS is flexible: you can scale your database instance's +// compute resources and storage capacity to meet your application's demand. +// As with all Amazon Web Services, there are no up-front investments, and you +// pay only for the resources you use. +// +// This is an interface reference for Amazon RDS. It contains documentation +// for a programming or command line interface you can use to manage Amazon +// RDS. Note that Amazon RDS is asynchronous, which means that some interfaces +// might require techniques such as polling or callback functions to determine +// when a command has been applied. In this reference, the parameter descriptions +// indicate whether a command is applied immediately, on the next instance reboot, +// or during the maintenance window. For a summary of the Amazon RDS interfaces, +// go to Available RDS Interfaces (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Welcome.html#Welcome.Interfaces). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type RDS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "rds" + +// New creates a new instance of the RDS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a RDS client from just a session. +// svc := rds.New(mySession) +// +// // Create a RDS client with additional configuration +// svc := rds.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *RDS { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *RDS { + svc := &RDS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-10-31", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a RDS operation and runs any +// custom request initialization. +func (c *RDS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/rds/waiters.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/rds/waiters.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/rds/waiters.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/rds/waiters.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,119 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package rds + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *RDS) WaitUntilDBInstanceAvailable(input *DescribeDBInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeDBInstances", + Delay: 30, + MaxAttempts: 60, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "deleted", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "deleting", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "failed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "incompatible-restore", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "incompatible-parameters", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "incompatible-restore", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *RDS) WaitUntilDBInstanceDeleted(input *DescribeDBInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeDBInstances", + Delay: 30, + MaxAttempts: 60, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "deleted", + }, + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "DBInstanceNotFound", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "creating", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "modifying", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "rebooting", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "DBInstances[].DBInstanceStatus", + Expected: "resetting-master-credentials", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/redshift/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/redshift/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/redshift/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/redshift/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,7191 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package redshift provides a client for Amazon Redshift. +package redshift + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opAuthorizeClusterSecurityGroupIngress = "AuthorizeClusterSecurityGroupIngress" + +// AuthorizeClusterSecurityGroupIngressRequest generates a request for the AuthorizeClusterSecurityGroupIngress operation. +func (c *Redshift) AuthorizeClusterSecurityGroupIngressRequest(input *AuthorizeClusterSecurityGroupIngressInput) (req *request.Request, output *AuthorizeClusterSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opAuthorizeClusterSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AuthorizeClusterSecurityGroupIngressInput{} + } + + req = c.newRequest(op, input, output) + output = &AuthorizeClusterSecurityGroupIngressOutput{} + req.Data = output + return +} + +// Adds an inbound (ingress) rule to an Amazon Redshift security group. Depending +// on whether the application accessing your cluster is running on the Internet +// or an EC2 instance, you can authorize inbound access to either a Classless +// Interdomain Routing (CIDR) IP address range or an EC2 security group. You +// can add as many as 20 ingress rules to an Amazon Redshift security group. +// +// The EC2 security group must be defined in the AWS region where the cluster +// resides. For an overview of CIDR blocks, see the Wikipedia article on Classless +// Inter-Domain Routing (http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). +// +// You must also associate the security group with a cluster so that clients +// running on these IP addresses or the EC2 instance are authorized to connect +// to the cluster. For information about managing security groups, go to Working +// with Security Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) AuthorizeClusterSecurityGroupIngress(input *AuthorizeClusterSecurityGroupIngressInput) (*AuthorizeClusterSecurityGroupIngressOutput, error) { + req, out := c.AuthorizeClusterSecurityGroupIngressRequest(input) + err := req.Send() + return out, err +} + +const opAuthorizeSnapshotAccess = "AuthorizeSnapshotAccess" + +// AuthorizeSnapshotAccessRequest generates a request for the AuthorizeSnapshotAccess operation. +func (c *Redshift) AuthorizeSnapshotAccessRequest(input *AuthorizeSnapshotAccessInput) (req *request.Request, output *AuthorizeSnapshotAccessOutput) { + op := &request.Operation{ + Name: opAuthorizeSnapshotAccess, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AuthorizeSnapshotAccessInput{} + } + + req = c.newRequest(op, input, output) + output = &AuthorizeSnapshotAccessOutput{} + req.Data = output + return +} + +// Authorizes the specified AWS customer account to restore the specified snapshot. +// +// For more information about working with snapshots, go to Amazon Redshift +// Snapshots (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) AuthorizeSnapshotAccess(input *AuthorizeSnapshotAccessInput) (*AuthorizeSnapshotAccessOutput, error) { + req, out := c.AuthorizeSnapshotAccessRequest(input) + err := req.Send() + return out, err +} + +const opCopyClusterSnapshot = "CopyClusterSnapshot" + +// CopyClusterSnapshotRequest generates a request for the CopyClusterSnapshot operation. +func (c *Redshift) CopyClusterSnapshotRequest(input *CopyClusterSnapshotInput) (req *request.Request, output *CopyClusterSnapshotOutput) { + op := &request.Operation{ + Name: opCopyClusterSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopyClusterSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CopyClusterSnapshotOutput{} + req.Data = output + return +} + +// Copies the specified automated cluster snapshot to a new manual cluster snapshot. +// The source must be an automated snapshot and it must be in the available +// state. +// +// When you delete a cluster, Amazon Redshift deletes any automated snapshots +// of the cluster. Also, when the retention period of the snapshot expires, +// Amazon Redshift automatically deletes it. If you want to keep an automated +// snapshot for a longer period, you can make a manual copy of the snapshot. +// Manual snapshots are retained until you delete them. +// +// For more information about working with snapshots, go to Amazon Redshift +// Snapshots (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) CopyClusterSnapshot(input *CopyClusterSnapshotInput) (*CopyClusterSnapshotOutput, error) { + req, out := c.CopyClusterSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCreateCluster = "CreateCluster" + +// CreateClusterRequest generates a request for the CreateCluster operation. +func (c *Redshift) CreateClusterRequest(input *CreateClusterInput) (req *request.Request, output *CreateClusterOutput) { + op := &request.Operation{ + Name: opCreateCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateClusterOutput{} + req.Data = output + return +} + +// Creates a new cluster. To create the cluster in virtual private cloud (VPC), +// you must provide cluster subnet group name. If you don't provide a cluster +// subnet group name or the cluster security group parameter, Amazon Redshift +// creates a non-VPC cluster, it associates the default cluster security group +// with the cluster. For more information about managing clusters, go to Amazon +// Redshift Clusters (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html) +// in the Amazon Redshift Cluster Management Guide . +func (c *Redshift) CreateCluster(input *CreateClusterInput) (*CreateClusterOutput, error) { + req, out := c.CreateClusterRequest(input) + err := req.Send() + return out, err +} + +const opCreateClusterParameterGroup = "CreateClusterParameterGroup" + +// CreateClusterParameterGroupRequest generates a request for the CreateClusterParameterGroup operation. +func (c *Redshift) CreateClusterParameterGroupRequest(input *CreateClusterParameterGroupInput) (req *request.Request, output *CreateClusterParameterGroupOutput) { + op := &request.Operation{ + Name: opCreateClusterParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateClusterParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateClusterParameterGroupOutput{} + req.Data = output + return +} + +// Creates an Amazon Redshift parameter group. +// +// Creating parameter groups is independent of creating clusters. You can associate +// a cluster with a parameter group when you create the cluster. You can also +// associate an existing cluster with a parameter group after the cluster is +// created by using ModifyCluster. +// +// Parameters in the parameter group define specific behavior that applies +// to the databases you create on the cluster. For more information about parameters +// and parameter groups, go to Amazon Redshift Parameter Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) CreateClusterParameterGroup(input *CreateClusterParameterGroupInput) (*CreateClusterParameterGroupOutput, error) { + req, out := c.CreateClusterParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateClusterSecurityGroup = "CreateClusterSecurityGroup" + +// CreateClusterSecurityGroupRequest generates a request for the CreateClusterSecurityGroup operation. +func (c *Redshift) CreateClusterSecurityGroupRequest(input *CreateClusterSecurityGroupInput) (req *request.Request, output *CreateClusterSecurityGroupOutput) { + op := &request.Operation{ + Name: opCreateClusterSecurityGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateClusterSecurityGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateClusterSecurityGroupOutput{} + req.Data = output + return +} + +// Creates a new Amazon Redshift security group. You use security groups to +// control access to non-VPC clusters. +// +// For information about managing security groups, go to Amazon Redshift Cluster +// Security Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) CreateClusterSecurityGroup(input *CreateClusterSecurityGroupInput) (*CreateClusterSecurityGroupOutput, error) { + req, out := c.CreateClusterSecurityGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateClusterSnapshot = "CreateClusterSnapshot" + +// CreateClusterSnapshotRequest generates a request for the CreateClusterSnapshot operation. +func (c *Redshift) CreateClusterSnapshotRequest(input *CreateClusterSnapshotInput) (req *request.Request, output *CreateClusterSnapshotOutput) { + op := &request.Operation{ + Name: opCreateClusterSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateClusterSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateClusterSnapshotOutput{} + req.Data = output + return +} + +// Creates a manual snapshot of the specified cluster. The cluster must be in +// the available state. +// +// For more information about working with snapshots, go to Amazon Redshift +// Snapshots (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) CreateClusterSnapshot(input *CreateClusterSnapshotInput) (*CreateClusterSnapshotOutput, error) { + req, out := c.CreateClusterSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCreateClusterSubnetGroup = "CreateClusterSubnetGroup" + +// CreateClusterSubnetGroupRequest generates a request for the CreateClusterSubnetGroup operation. +func (c *Redshift) CreateClusterSubnetGroupRequest(input *CreateClusterSubnetGroupInput) (req *request.Request, output *CreateClusterSubnetGroupOutput) { + op := &request.Operation{ + Name: opCreateClusterSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateClusterSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateClusterSubnetGroupOutput{} + req.Data = output + return +} + +// Creates a new Amazon Redshift subnet group. You must provide a list of one +// or more subnets in your existing Amazon Virtual Private Cloud (Amazon VPC) +// when creating Amazon Redshift subnet group. +// +// For information about subnet groups, go to Amazon Redshift Cluster Subnet +// Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-cluster-subnet-groups.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) CreateClusterSubnetGroup(input *CreateClusterSubnetGroupInput) (*CreateClusterSubnetGroupOutput, error) { + req, out := c.CreateClusterSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateEventSubscription = "CreateEventSubscription" + +// CreateEventSubscriptionRequest generates a request for the CreateEventSubscription operation. +func (c *Redshift) CreateEventSubscriptionRequest(input *CreateEventSubscriptionInput) (req *request.Request, output *CreateEventSubscriptionOutput) { + op := &request.Operation{ + Name: opCreateEventSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateEventSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateEventSubscriptionOutput{} + req.Data = output + return +} + +// Creates an Amazon Redshift event notification subscription. This action requires +// an ARN (Amazon Resource Name) of an Amazon SNS topic created by either the +// Amazon Redshift console, the Amazon SNS console, or the Amazon SNS API. To +// obtain an ARN with Amazon SNS, you must create a topic in Amazon SNS and +// subscribe to the topic. The ARN is displayed in the SNS console. +// +// You can specify the source type, and lists of Amazon Redshift source IDs, +// event categories, and event severities. Notifications will be sent for all +// events you want that match those criteria. For example, you can specify source +// type = cluster, source ID = my-cluster-1 and mycluster2, event categories +// = Availability, Backup, and severity = ERROR. The subscription will only +// send notifications for those ERROR events in the Availability and Backup +// categories for the specified clusters. +// +// If you specify both the source type and source IDs, such as source type +// = cluster and source identifier = my-cluster-1, notifications will be sent +// for all the cluster events for my-cluster-1. If you specify a source type +// but do not specify a source identifier, you will receive notice of the events +// for the objects of that type in your AWS account. If you do not specify either +// the SourceType nor the SourceIdentifier, you will be notified of events generated +// from all Amazon Redshift sources belonging to your AWS account. You must +// specify a source type if you specify a source ID. +func (c *Redshift) CreateEventSubscription(input *CreateEventSubscriptionInput) (*CreateEventSubscriptionOutput, error) { + req, out := c.CreateEventSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opCreateHsmClientCertificate = "CreateHsmClientCertificate" + +// CreateHsmClientCertificateRequest generates a request for the CreateHsmClientCertificate operation. +func (c *Redshift) CreateHsmClientCertificateRequest(input *CreateHsmClientCertificateInput) (req *request.Request, output *CreateHsmClientCertificateOutput) { + op := &request.Operation{ + Name: opCreateHsmClientCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateHsmClientCertificateInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateHsmClientCertificateOutput{} + req.Data = output + return +} + +// Creates an HSM client certificate that an Amazon Redshift cluster will use +// to connect to the client's HSM in order to store and retrieve the keys used +// to encrypt the cluster databases. +// +// The command returns a public key, which you must store in the HSM. In addition +// to creating the HSM certificate, you must create an Amazon Redshift HSM configuration +// that provides a cluster the information needed to store and use encryption +// keys in the HSM. For more information, go to Hardware Security Modules (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-HSM.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) CreateHsmClientCertificate(input *CreateHsmClientCertificateInput) (*CreateHsmClientCertificateOutput, error) { + req, out := c.CreateHsmClientCertificateRequest(input) + err := req.Send() + return out, err +} + +const opCreateHsmConfiguration = "CreateHsmConfiguration" + +// CreateHsmConfigurationRequest generates a request for the CreateHsmConfiguration operation. +func (c *Redshift) CreateHsmConfigurationRequest(input *CreateHsmConfigurationInput) (req *request.Request, output *CreateHsmConfigurationOutput) { + op := &request.Operation{ + Name: opCreateHsmConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateHsmConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateHsmConfigurationOutput{} + req.Data = output + return +} + +// Creates an HSM configuration that contains the information required by an +// Amazon Redshift cluster to store and use database encryption keys in a Hardware +// Security Module (HSM). After creating the HSM configuration, you can specify +// it as a parameter when creating a cluster. The cluster will then store its +// encryption keys in the HSM. +// +// In addition to creating an HSM configuration, you must also create an HSM +// client certificate. For more information, go to Hardware Security Modules +// (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-HSM.html) in +// the Amazon Redshift Cluster Management Guide. +func (c *Redshift) CreateHsmConfiguration(input *CreateHsmConfigurationInput) (*CreateHsmConfigurationOutput, error) { + req, out := c.CreateHsmConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opCreateSnapshotCopyGrant = "CreateSnapshotCopyGrant" + +// CreateSnapshotCopyGrantRequest generates a request for the CreateSnapshotCopyGrant operation. +func (c *Redshift) CreateSnapshotCopyGrantRequest(input *CreateSnapshotCopyGrantInput) (req *request.Request, output *CreateSnapshotCopyGrantOutput) { + op := &request.Operation{ + Name: opCreateSnapshotCopyGrant, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSnapshotCopyGrantInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSnapshotCopyGrantOutput{} + req.Data = output + return +} + +// Creates a snapshot copy grant that permits Amazon Redshift to use a customer +// master key (CMK) from AWS Key Management Service (AWS KMS) to encrypt copied +// snapshots in a destination region. +// +// For more information about managing snapshot copy grants, go to Amazon +// Redshift Database Encryption (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-db-encryption.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) CreateSnapshotCopyGrant(input *CreateSnapshotCopyGrantInput) (*CreateSnapshotCopyGrantOutput, error) { + req, out := c.CreateSnapshotCopyGrantRequest(input) + err := req.Send() + return out, err +} + +const opCreateTags = "CreateTags" + +// CreateTagsRequest generates a request for the CreateTags operation. +func (c *Redshift) CreateTagsRequest(input *CreateTagsInput) (req *request.Request, output *CreateTagsOutput) { + op := &request.Operation{ + Name: opCreateTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTagsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateTagsOutput{} + req.Data = output + return +} + +// Adds one or more tags to a specified resource. +// +// A resource can have up to 10 tags. If you try to create more than 10 tags +// for a resource, you will receive an error and the attempt will fail. +// +// If you specify a key that already exists for the resource, the value for +// that key will be updated with the new value. +func (c *Redshift) CreateTags(input *CreateTagsInput) (*CreateTagsOutput, error) { + req, out := c.CreateTagsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCluster = "DeleteCluster" + +// DeleteClusterRequest generates a request for the DeleteCluster operation. +func (c *Redshift) DeleteClusterRequest(input *DeleteClusterInput) (req *request.Request, output *DeleteClusterOutput) { + op := &request.Operation{ + Name: opDeleteCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteClusterOutput{} + req.Data = output + return +} + +// Deletes a previously provisioned cluster. A successful response from the +// web service indicates that the request was received correctly. Use DescribeClusters +// to monitor the status of the deletion. The delete operation cannot be canceled +// or reverted once submitted. For more information about managing clusters, +// go to Amazon Redshift Clusters (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html) +// in the Amazon Redshift Cluster Management Guide . +// +// If you want to shut down the cluster and retain it for future use, set +// SkipFinalClusterSnapshot to false and specify a name for FinalClusterSnapshotIdentifier. +// You can later restore this snapshot to resume using the cluster. If a final +// cluster snapshot is requested, the status of the cluster will be "final-snapshot" +// while the snapshot is being taken, then it's "deleting" once Amazon Redshift +// begins deleting the cluster. +// +// For more information about managing clusters, go to Amazon Redshift Clusters +// (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html) +// in the Amazon Redshift Cluster Management Guide . +func (c *Redshift) DeleteCluster(input *DeleteClusterInput) (*DeleteClusterOutput, error) { + req, out := c.DeleteClusterRequest(input) + err := req.Send() + return out, err +} + +const opDeleteClusterParameterGroup = "DeleteClusterParameterGroup" + +// DeleteClusterParameterGroupRequest generates a request for the DeleteClusterParameterGroup operation. +func (c *Redshift) DeleteClusterParameterGroupRequest(input *DeleteClusterParameterGroupInput) (req *request.Request, output *DeleteClusterParameterGroupOutput) { + op := &request.Operation{ + Name: opDeleteClusterParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteClusterParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteClusterParameterGroupOutput{} + req.Data = output + return +} + +// Deletes a specified Amazon Redshift parameter group. You cannot delete a +// parameter group if it is associated with a cluster. +func (c *Redshift) DeleteClusterParameterGroup(input *DeleteClusterParameterGroupInput) (*DeleteClusterParameterGroupOutput, error) { + req, out := c.DeleteClusterParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteClusterSecurityGroup = "DeleteClusterSecurityGroup" + +// DeleteClusterSecurityGroupRequest generates a request for the DeleteClusterSecurityGroup operation. +func (c *Redshift) DeleteClusterSecurityGroupRequest(input *DeleteClusterSecurityGroupInput) (req *request.Request, output *DeleteClusterSecurityGroupOutput) { + op := &request.Operation{ + Name: opDeleteClusterSecurityGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteClusterSecurityGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteClusterSecurityGroupOutput{} + req.Data = output + return +} + +// Deletes an Amazon Redshift security group. +// +// You cannot delete a security group that is associated with any clusters. +// You cannot delete the default security group. For information about managing +// security groups, go to Amazon Redshift Cluster Security Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) DeleteClusterSecurityGroup(input *DeleteClusterSecurityGroupInput) (*DeleteClusterSecurityGroupOutput, error) { + req, out := c.DeleteClusterSecurityGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteClusterSnapshot = "DeleteClusterSnapshot" + +// DeleteClusterSnapshotRequest generates a request for the DeleteClusterSnapshot operation. +func (c *Redshift) DeleteClusterSnapshotRequest(input *DeleteClusterSnapshotInput) (req *request.Request, output *DeleteClusterSnapshotOutput) { + op := &request.Operation{ + Name: opDeleteClusterSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteClusterSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteClusterSnapshotOutput{} + req.Data = output + return +} + +// Deletes the specified manual snapshot. The snapshot must be in the available +// state, with no other users authorized to access the snapshot. +// +// Unlike automated snapshots, manual snapshots are retained even after you +// delete your cluster. Amazon Redshift does not delete your manual snapshots. +// You must delete manual snapshot explicitly to avoid getting charged. If other +// accounts are authorized to access the snapshot, you must revoke all of the +// authorizations before you can delete the snapshot. +func (c *Redshift) DeleteClusterSnapshot(input *DeleteClusterSnapshotInput) (*DeleteClusterSnapshotOutput, error) { + req, out := c.DeleteClusterSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opDeleteClusterSubnetGroup = "DeleteClusterSubnetGroup" + +// DeleteClusterSubnetGroupRequest generates a request for the DeleteClusterSubnetGroup operation. +func (c *Redshift) DeleteClusterSubnetGroupRequest(input *DeleteClusterSubnetGroupInput) (req *request.Request, output *DeleteClusterSubnetGroupOutput) { + op := &request.Operation{ + Name: opDeleteClusterSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteClusterSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteClusterSubnetGroupOutput{} + req.Data = output + return +} + +// Deletes the specified cluster subnet group. +func (c *Redshift) DeleteClusterSubnetGroup(input *DeleteClusterSubnetGroupInput) (*DeleteClusterSubnetGroupOutput, error) { + req, out := c.DeleteClusterSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteEventSubscription = "DeleteEventSubscription" + +// DeleteEventSubscriptionRequest generates a request for the DeleteEventSubscription operation. +func (c *Redshift) DeleteEventSubscriptionRequest(input *DeleteEventSubscriptionInput) (req *request.Request, output *DeleteEventSubscriptionOutput) { + op := &request.Operation{ + Name: opDeleteEventSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteEventSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteEventSubscriptionOutput{} + req.Data = output + return +} + +// Deletes an Amazon Redshift event notification subscription. +func (c *Redshift) DeleteEventSubscription(input *DeleteEventSubscriptionInput) (*DeleteEventSubscriptionOutput, error) { + req, out := c.DeleteEventSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteHsmClientCertificate = "DeleteHsmClientCertificate" + +// DeleteHsmClientCertificateRequest generates a request for the DeleteHsmClientCertificate operation. +func (c *Redshift) DeleteHsmClientCertificateRequest(input *DeleteHsmClientCertificateInput) (req *request.Request, output *DeleteHsmClientCertificateOutput) { + op := &request.Operation{ + Name: opDeleteHsmClientCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteHsmClientCertificateInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteHsmClientCertificateOutput{} + req.Data = output + return +} + +// Deletes the specified HSM client certificate. +func (c *Redshift) DeleteHsmClientCertificate(input *DeleteHsmClientCertificateInput) (*DeleteHsmClientCertificateOutput, error) { + req, out := c.DeleteHsmClientCertificateRequest(input) + err := req.Send() + return out, err +} + +const opDeleteHsmConfiguration = "DeleteHsmConfiguration" + +// DeleteHsmConfigurationRequest generates a request for the DeleteHsmConfiguration operation. +func (c *Redshift) DeleteHsmConfigurationRequest(input *DeleteHsmConfigurationInput) (req *request.Request, output *DeleteHsmConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteHsmConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteHsmConfigurationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteHsmConfigurationOutput{} + req.Data = output + return +} + +// Deletes the specified Amazon Redshift HSM configuration. +func (c *Redshift) DeleteHsmConfiguration(input *DeleteHsmConfigurationInput) (*DeleteHsmConfigurationOutput, error) { + req, out := c.DeleteHsmConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSnapshotCopyGrant = "DeleteSnapshotCopyGrant" + +// DeleteSnapshotCopyGrantRequest generates a request for the DeleteSnapshotCopyGrant operation. +func (c *Redshift) DeleteSnapshotCopyGrantRequest(input *DeleteSnapshotCopyGrantInput) (req *request.Request, output *DeleteSnapshotCopyGrantOutput) { + op := &request.Operation{ + Name: opDeleteSnapshotCopyGrant, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSnapshotCopyGrantInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteSnapshotCopyGrantOutput{} + req.Data = output + return +} + +// Deletes the specified snapshot copy grant. +func (c *Redshift) DeleteSnapshotCopyGrant(input *DeleteSnapshotCopyGrantInput) (*DeleteSnapshotCopyGrantOutput, error) { + req, out := c.DeleteSnapshotCopyGrantRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTags = "DeleteTags" + +// DeleteTagsRequest generates a request for the DeleteTags operation. +func (c *Redshift) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, output *DeleteTagsOutput) { + op := &request.Operation{ + Name: opDeleteTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTagsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteTagsOutput{} + req.Data = output + return +} + +// Deletes a tag or tags from a resource. You must provide the ARN of the resource +// from which you want to delete the tag or tags. +func (c *Redshift) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) { + req, out := c.DeleteTagsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeClusterParameterGroups = "DescribeClusterParameterGroups" + +// DescribeClusterParameterGroupsRequest generates a request for the DescribeClusterParameterGroups operation. +func (c *Redshift) DescribeClusterParameterGroupsRequest(input *DescribeClusterParameterGroupsInput) (req *request.Request, output *DescribeClusterParameterGroupsOutput) { + op := &request.Operation{ + Name: opDescribeClusterParameterGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeClusterParameterGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClusterParameterGroupsOutput{} + req.Data = output + return +} + +// Returns a list of Amazon Redshift parameter groups, including parameter groups +// you created and the default parameter group. For each parameter group, the +// response includes the parameter group name, description, and parameter group +// family name. You can optionally specify a name to retrieve the description +// of a specific parameter group. +// +// For more information about parameters and parameter groups, go to Amazon +// Redshift Parameter Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html) +// in the Amazon Redshift Cluster Management Guide. +// +// If you specify both tag keys and tag values in the same request, Amazon +// Redshift returns all parameter groups that match any combination of the specified +// keys and values. For example, if you have owner and environment for tag keys, +// and admin and test for tag values, all parameter groups that have any combination +// of those values are returned. +// +// If both tag keys and values are omitted from the request, parameter groups +// are returned regardless of whether they have tag keys or values associated +// with them. +func (c *Redshift) DescribeClusterParameterGroups(input *DescribeClusterParameterGroupsInput) (*DescribeClusterParameterGroupsOutput, error) { + req, out := c.DescribeClusterParameterGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeClusterParameterGroupsPages(input *DescribeClusterParameterGroupsInput, fn func(p *DescribeClusterParameterGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeClusterParameterGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeClusterParameterGroupsOutput), lastPage) + }) +} + +const opDescribeClusterParameters = "DescribeClusterParameters" + +// DescribeClusterParametersRequest generates a request for the DescribeClusterParameters operation. +func (c *Redshift) DescribeClusterParametersRequest(input *DescribeClusterParametersInput) (req *request.Request, output *DescribeClusterParametersOutput) { + op := &request.Operation{ + Name: opDescribeClusterParameters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeClusterParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClusterParametersOutput{} + req.Data = output + return +} + +// Returns a detailed list of parameters contained within the specified Amazon +// Redshift parameter group. For each parameter the response includes information +// such as parameter name, description, data type, value, whether the parameter +// value is modifiable, and so on. +// +// You can specify source filter to retrieve parameters of only specific type. +// For example, to retrieve parameters that were modified by a user action such +// as from ModifyClusterParameterGroup, you can specify source equal to user. +// +// For more information about parameters and parameter groups, go to Amazon +// Redshift Parameter Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) DescribeClusterParameters(input *DescribeClusterParametersInput) (*DescribeClusterParametersOutput, error) { + req, out := c.DescribeClusterParametersRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeClusterParametersPages(input *DescribeClusterParametersInput, fn func(p *DescribeClusterParametersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeClusterParametersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeClusterParametersOutput), lastPage) + }) +} + +const opDescribeClusterSecurityGroups = "DescribeClusterSecurityGroups" + +// DescribeClusterSecurityGroupsRequest generates a request for the DescribeClusterSecurityGroups operation. +func (c *Redshift) DescribeClusterSecurityGroupsRequest(input *DescribeClusterSecurityGroupsInput) (req *request.Request, output *DescribeClusterSecurityGroupsOutput) { + op := &request.Operation{ + Name: opDescribeClusterSecurityGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeClusterSecurityGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClusterSecurityGroupsOutput{} + req.Data = output + return +} + +// Returns information about Amazon Redshift security groups. If the name of +// a security group is specified, the response will contain only information +// about only that security group. +// +// For information about managing security groups, go to Amazon Redshift Cluster +// Security Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html) +// in the Amazon Redshift Cluster Management Guide. +// +// If you specify both tag keys and tag values in the same request, Amazon +// Redshift returns all security groups that match any combination of the specified +// keys and values. For example, if you have owner and environment for tag keys, +// and admin and test for tag values, all security groups that have any combination +// of those values are returned. +// +// If both tag keys and values are omitted from the request, security groups +// are returned regardless of whether they have tag keys or values associated +// with them. +func (c *Redshift) DescribeClusterSecurityGroups(input *DescribeClusterSecurityGroupsInput) (*DescribeClusterSecurityGroupsOutput, error) { + req, out := c.DescribeClusterSecurityGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeClusterSecurityGroupsPages(input *DescribeClusterSecurityGroupsInput, fn func(p *DescribeClusterSecurityGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeClusterSecurityGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeClusterSecurityGroupsOutput), lastPage) + }) +} + +const opDescribeClusterSnapshots = "DescribeClusterSnapshots" + +// DescribeClusterSnapshotsRequest generates a request for the DescribeClusterSnapshots operation. +func (c *Redshift) DescribeClusterSnapshotsRequest(input *DescribeClusterSnapshotsInput) (req *request.Request, output *DescribeClusterSnapshotsOutput) { + op := &request.Operation{ + Name: opDescribeClusterSnapshots, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeClusterSnapshotsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClusterSnapshotsOutput{} + req.Data = output + return +} + +// Returns one or more snapshot objects, which contain metadata about your cluster +// snapshots. By default, this operation returns information about all snapshots +// of all clusters that are owned by you AWS customer account. No information +// is returned for snapshots owned by inactive AWS customer accounts. +// +// If you specify both tag keys and tag values in the same request, Amazon +// Redshift returns all snapshots that match any combination of the specified +// keys and values. For example, if you have owner and environment for tag keys, +// and admin and test for tag values, all snapshots that have any combination +// of those values are returned. Only snapshots that you own are returned in +// the response; shared snapshots are not returned with the tag key and tag +// value request parameters. +// +// If both tag keys and values are omitted from the request, snapshots are +// returned regardless of whether they have tag keys or values associated with +// them. +func (c *Redshift) DescribeClusterSnapshots(input *DescribeClusterSnapshotsInput) (*DescribeClusterSnapshotsOutput, error) { + req, out := c.DescribeClusterSnapshotsRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeClusterSnapshotsPages(input *DescribeClusterSnapshotsInput, fn func(p *DescribeClusterSnapshotsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeClusterSnapshotsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeClusterSnapshotsOutput), lastPage) + }) +} + +const opDescribeClusterSubnetGroups = "DescribeClusterSubnetGroups" + +// DescribeClusterSubnetGroupsRequest generates a request for the DescribeClusterSubnetGroups operation. +func (c *Redshift) DescribeClusterSubnetGroupsRequest(input *DescribeClusterSubnetGroupsInput) (req *request.Request, output *DescribeClusterSubnetGroupsOutput) { + op := &request.Operation{ + Name: opDescribeClusterSubnetGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeClusterSubnetGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClusterSubnetGroupsOutput{} + req.Data = output + return +} + +// Returns one or more cluster subnet group objects, which contain metadata +// about your cluster subnet groups. By default, this operation returns information +// about all cluster subnet groups that are defined in you AWS account. +// +// If you specify both tag keys and tag values in the same request, Amazon +// Redshift returns all subnet groups that match any combination of the specified +// keys and values. For example, if you have owner and environment for tag keys, +// and admin and test for tag values, all subnet groups that have any combination +// of those values are returned. +// +// If both tag keys and values are omitted from the request, subnet groups +// are returned regardless of whether they have tag keys or values associated +// with them. +func (c *Redshift) DescribeClusterSubnetGroups(input *DescribeClusterSubnetGroupsInput) (*DescribeClusterSubnetGroupsOutput, error) { + req, out := c.DescribeClusterSubnetGroupsRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeClusterSubnetGroupsPages(input *DescribeClusterSubnetGroupsInput, fn func(p *DescribeClusterSubnetGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeClusterSubnetGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeClusterSubnetGroupsOutput), lastPage) + }) +} + +const opDescribeClusterVersions = "DescribeClusterVersions" + +// DescribeClusterVersionsRequest generates a request for the DescribeClusterVersions operation. +func (c *Redshift) DescribeClusterVersionsRequest(input *DescribeClusterVersionsInput) (req *request.Request, output *DescribeClusterVersionsOutput) { + op := &request.Operation{ + Name: opDescribeClusterVersions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeClusterVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClusterVersionsOutput{} + req.Data = output + return +} + +// Returns descriptions of the available Amazon Redshift cluster versions. You +// can call this operation even before creating any clusters to learn more about +// the Amazon Redshift versions. For more information about managing clusters, +// go to Amazon Redshift Clusters (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html) +// in the Amazon Redshift Cluster Management Guide +func (c *Redshift) DescribeClusterVersions(input *DescribeClusterVersionsInput) (*DescribeClusterVersionsOutput, error) { + req, out := c.DescribeClusterVersionsRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeClusterVersionsPages(input *DescribeClusterVersionsInput, fn func(p *DescribeClusterVersionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeClusterVersionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeClusterVersionsOutput), lastPage) + }) +} + +const opDescribeClusters = "DescribeClusters" + +// DescribeClustersRequest generates a request for the DescribeClusters operation. +func (c *Redshift) DescribeClustersRequest(input *DescribeClustersInput) (req *request.Request, output *DescribeClustersOutput) { + op := &request.Operation{ + Name: opDescribeClusters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeClustersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeClustersOutput{} + req.Data = output + return +} + +// Returns properties of provisioned clusters including general cluster properties, +// cluster database properties, maintenance and backup properties, and security +// and access properties. This operation supports pagination. For more information +// about managing clusters, go to Amazon Redshift Clusters (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html) +// in the Amazon Redshift Cluster Management Guide . +// +// If you specify both tag keys and tag values in the same request, Amazon +// Redshift returns all clusters that match any combination of the specified +// keys and values. For example, if you have owner and environment for tag keys, +// and admin and test for tag values, all clusters that have any combination +// of those values are returned. +// +// If both tag keys and values are omitted from the request, clusters are returned +// regardless of whether they have tag keys or values associated with them. +func (c *Redshift) DescribeClusters(input *DescribeClustersInput) (*DescribeClustersOutput, error) { + req, out := c.DescribeClustersRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeClustersPages(input *DescribeClustersInput, fn func(p *DescribeClustersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeClustersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeClustersOutput), lastPage) + }) +} + +const opDescribeDefaultClusterParameters = "DescribeDefaultClusterParameters" + +// DescribeDefaultClusterParametersRequest generates a request for the DescribeDefaultClusterParameters operation. +func (c *Redshift) DescribeDefaultClusterParametersRequest(input *DescribeDefaultClusterParametersInput) (req *request.Request, output *DescribeDefaultClusterParametersOutput) { + op := &request.Operation{ + Name: opDescribeDefaultClusterParameters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"DefaultClusterParameters.Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDefaultClusterParametersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDefaultClusterParametersOutput{} + req.Data = output + return +} + +// Returns a list of parameter settings for the specified parameter group family. +// +// For more information about parameters and parameter groups, go to Amazon +// Redshift Parameter Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) DescribeDefaultClusterParameters(input *DescribeDefaultClusterParametersInput) (*DescribeDefaultClusterParametersOutput, error) { + req, out := c.DescribeDefaultClusterParametersRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeDefaultClusterParametersPages(input *DescribeDefaultClusterParametersInput, fn func(p *DescribeDefaultClusterParametersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDefaultClusterParametersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDefaultClusterParametersOutput), lastPage) + }) +} + +const opDescribeEventCategories = "DescribeEventCategories" + +// DescribeEventCategoriesRequest generates a request for the DescribeEventCategories operation. +func (c *Redshift) DescribeEventCategoriesRequest(input *DescribeEventCategoriesInput) (req *request.Request, output *DescribeEventCategoriesOutput) { + op := &request.Operation{ + Name: opDescribeEventCategories, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEventCategoriesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEventCategoriesOutput{} + req.Data = output + return +} + +// Displays a list of event categories for all event source types, or for a +// specified source type. For a list of the event categories and source types, +// go to Amazon Redshift Event Notifications (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-event-notifications.html). +func (c *Redshift) DescribeEventCategories(input *DescribeEventCategoriesInput) (*DescribeEventCategoriesOutput, error) { + req, out := c.DescribeEventCategoriesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeEventSubscriptions = "DescribeEventSubscriptions" + +// DescribeEventSubscriptionsRequest generates a request for the DescribeEventSubscriptions operation. +func (c *Redshift) DescribeEventSubscriptionsRequest(input *DescribeEventSubscriptionsInput) (req *request.Request, output *DescribeEventSubscriptionsOutput) { + op := &request.Operation{ + Name: opDescribeEventSubscriptions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEventSubscriptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEventSubscriptionsOutput{} + req.Data = output + return +} + +// Lists descriptions of all the Amazon Redshift event notifications subscription +// for a customer account. If you specify a subscription name, lists the description +// for that subscription. +func (c *Redshift) DescribeEventSubscriptions(input *DescribeEventSubscriptionsInput) (*DescribeEventSubscriptionsOutput, error) { + req, out := c.DescribeEventSubscriptionsRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeEventSubscriptionsPages(input *DescribeEventSubscriptionsInput, fn func(p *DescribeEventSubscriptionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeEventSubscriptionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeEventSubscriptionsOutput), lastPage) + }) +} + +const opDescribeEvents = "DescribeEvents" + +// DescribeEventsRequest generates a request for the DescribeEvents operation. +func (c *Redshift) DescribeEventsRequest(input *DescribeEventsInput) (req *request.Request, output *DescribeEventsOutput) { + op := &request.Operation{ + Name: opDescribeEvents, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeEventsOutput{} + req.Data = output + return +} + +// Returns events related to clusters, security groups, snapshots, and parameter +// groups for the past 14 days. Events specific to a particular cluster, security +// group, snapshot or parameter group can be obtained by providing the name +// as a parameter. By default, the past hour of events are returned. +func (c *Redshift) DescribeEvents(input *DescribeEventsInput) (*DescribeEventsOutput, error) { + req, out := c.DescribeEventsRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeEventsPages(input *DescribeEventsInput, fn func(p *DescribeEventsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeEventsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeEventsOutput), lastPage) + }) +} + +const opDescribeHsmClientCertificates = "DescribeHsmClientCertificates" + +// DescribeHsmClientCertificatesRequest generates a request for the DescribeHsmClientCertificates operation. +func (c *Redshift) DescribeHsmClientCertificatesRequest(input *DescribeHsmClientCertificatesInput) (req *request.Request, output *DescribeHsmClientCertificatesOutput) { + op := &request.Operation{ + Name: opDescribeHsmClientCertificates, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeHsmClientCertificatesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeHsmClientCertificatesOutput{} + req.Data = output + return +} + +// Returns information about the specified HSM client certificate. If no certificate +// ID is specified, returns information about all the HSM certificates owned +// by your AWS customer account. +// +// If you specify both tag keys and tag values in the same request, Amazon +// Redshift returns all HSM client certificates that match any combination of +// the specified keys and values. For example, if you have owner and environment +// for tag keys, and admin and test for tag values, all HSM client certificates +// that have any combination of those values are returned. +// +// If both tag keys and values are omitted from the request, HSM client certificates +// are returned regardless of whether they have tag keys or values associated +// with them. +func (c *Redshift) DescribeHsmClientCertificates(input *DescribeHsmClientCertificatesInput) (*DescribeHsmClientCertificatesOutput, error) { + req, out := c.DescribeHsmClientCertificatesRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeHsmClientCertificatesPages(input *DescribeHsmClientCertificatesInput, fn func(p *DescribeHsmClientCertificatesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeHsmClientCertificatesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeHsmClientCertificatesOutput), lastPage) + }) +} + +const opDescribeHsmConfigurations = "DescribeHsmConfigurations" + +// DescribeHsmConfigurationsRequest generates a request for the DescribeHsmConfigurations operation. +func (c *Redshift) DescribeHsmConfigurationsRequest(input *DescribeHsmConfigurationsInput) (req *request.Request, output *DescribeHsmConfigurationsOutput) { + op := &request.Operation{ + Name: opDescribeHsmConfigurations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeHsmConfigurationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeHsmConfigurationsOutput{} + req.Data = output + return +} + +// Returns information about the specified Amazon Redshift HSM configuration. +// If no configuration ID is specified, returns information about all the HSM +// configurations owned by your AWS customer account. +// +// If you specify both tag keys and tag values in the same request, Amazon +// Redshift returns all HSM connections that match any combination of the specified +// keys and values. For example, if you have owner and environment for tag keys, +// and admin and test for tag values, all HSM connections that have any combination +// of those values are returned. +// +// If both tag keys and values are omitted from the request, HSM connections +// are returned regardless of whether they have tag keys or values associated +// with them. +func (c *Redshift) DescribeHsmConfigurations(input *DescribeHsmConfigurationsInput) (*DescribeHsmConfigurationsOutput, error) { + req, out := c.DescribeHsmConfigurationsRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeHsmConfigurationsPages(input *DescribeHsmConfigurationsInput, fn func(p *DescribeHsmConfigurationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeHsmConfigurationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeHsmConfigurationsOutput), lastPage) + }) +} + +const opDescribeLoggingStatus = "DescribeLoggingStatus" + +// DescribeLoggingStatusRequest generates a request for the DescribeLoggingStatus operation. +func (c *Redshift) DescribeLoggingStatusRequest(input *DescribeLoggingStatusInput) (req *request.Request, output *LoggingStatus) { + op := &request.Operation{ + Name: opDescribeLoggingStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLoggingStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &LoggingStatus{} + req.Data = output + return +} + +// Describes whether information, such as queries and connection attempts, is +// being logged for the specified Amazon Redshift cluster. +func (c *Redshift) DescribeLoggingStatus(input *DescribeLoggingStatusInput) (*LoggingStatus, error) { + req, out := c.DescribeLoggingStatusRequest(input) + err := req.Send() + return out, err +} + +const opDescribeOrderableClusterOptions = "DescribeOrderableClusterOptions" + +// DescribeOrderableClusterOptionsRequest generates a request for the DescribeOrderableClusterOptions operation. +func (c *Redshift) DescribeOrderableClusterOptionsRequest(input *DescribeOrderableClusterOptionsInput) (req *request.Request, output *DescribeOrderableClusterOptionsOutput) { + op := &request.Operation{ + Name: opDescribeOrderableClusterOptions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeOrderableClusterOptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeOrderableClusterOptionsOutput{} + req.Data = output + return +} + +// Returns a list of orderable cluster options. Before you create a new cluster +// you can use this operation to find what options are available, such as the +// EC2 Availability Zones (AZ) in the specific AWS region that you can specify, +// and the node types you can request. The node types differ by available storage, +// memory, CPU and price. With the cost involved you might want to obtain a +// list of cluster options in the specific region and specify values when creating +// a cluster. For more information about managing clusters, go to Amazon Redshift +// Clusters (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html) +// in the Amazon Redshift Cluster Management Guide +func (c *Redshift) DescribeOrderableClusterOptions(input *DescribeOrderableClusterOptionsInput) (*DescribeOrderableClusterOptionsOutput, error) { + req, out := c.DescribeOrderableClusterOptionsRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeOrderableClusterOptionsPages(input *DescribeOrderableClusterOptionsInput, fn func(p *DescribeOrderableClusterOptionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeOrderableClusterOptionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeOrderableClusterOptionsOutput), lastPage) + }) +} + +const opDescribeReservedNodeOfferings = "DescribeReservedNodeOfferings" + +// DescribeReservedNodeOfferingsRequest generates a request for the DescribeReservedNodeOfferings operation. +func (c *Redshift) DescribeReservedNodeOfferingsRequest(input *DescribeReservedNodeOfferingsInput) (req *request.Request, output *DescribeReservedNodeOfferingsOutput) { + op := &request.Operation{ + Name: opDescribeReservedNodeOfferings, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReservedNodeOfferingsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedNodeOfferingsOutput{} + req.Data = output + return +} + +// Returns a list of the available reserved node offerings by Amazon Redshift +// with their descriptions including the node type, the fixed and recurring +// costs of reserving the node and duration the node will be reserved for you. +// These descriptions help you determine which reserve node offering you want +// to purchase. You then use the unique offering ID in you call to PurchaseReservedNodeOffering +// to reserve one or more nodes for your Amazon Redshift cluster. +// +// For more information about reserved node offerings, go to Purchasing Reserved +// Nodes (http://docs.aws.amazon.com/redshift/latest/mgmt/purchase-reserved-node-instance.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) DescribeReservedNodeOfferings(input *DescribeReservedNodeOfferingsInput) (*DescribeReservedNodeOfferingsOutput, error) { + req, out := c.DescribeReservedNodeOfferingsRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeReservedNodeOfferingsPages(input *DescribeReservedNodeOfferingsInput, fn func(p *DescribeReservedNodeOfferingsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReservedNodeOfferingsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReservedNodeOfferingsOutput), lastPage) + }) +} + +const opDescribeReservedNodes = "DescribeReservedNodes" + +// DescribeReservedNodesRequest generates a request for the DescribeReservedNodes operation. +func (c *Redshift) DescribeReservedNodesRequest(input *DescribeReservedNodesInput) (req *request.Request, output *DescribeReservedNodesOutput) { + op := &request.Operation{ + Name: opDescribeReservedNodes, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReservedNodesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReservedNodesOutput{} + req.Data = output + return +} + +// Returns the descriptions of the reserved nodes. +func (c *Redshift) DescribeReservedNodes(input *DescribeReservedNodesInput) (*DescribeReservedNodesOutput, error) { + req, out := c.DescribeReservedNodesRequest(input) + err := req.Send() + return out, err +} + +func (c *Redshift) DescribeReservedNodesPages(input *DescribeReservedNodesInput, fn func(p *DescribeReservedNodesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeReservedNodesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeReservedNodesOutput), lastPage) + }) +} + +const opDescribeResize = "DescribeResize" + +// DescribeResizeRequest generates a request for the DescribeResize operation. +func (c *Redshift) DescribeResizeRequest(input *DescribeResizeInput) (req *request.Request, output *DescribeResizeOutput) { + op := &request.Operation{ + Name: opDescribeResize, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeResizeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeResizeOutput{} + req.Data = output + return +} + +// Returns information about the last resize operation for the specified cluster. +// If no resize operation has ever been initiated for the specified cluster, +// a HTTP 404 error is returned. If a resize operation was initiated and completed, +// the status of the resize remains as SUCCEEDED until the next resize. +// +// A resize operation can be requested using ModifyCluster and specifying +// a different number or type of nodes for the cluster. +func (c *Redshift) DescribeResize(input *DescribeResizeInput) (*DescribeResizeOutput, error) { + req, out := c.DescribeResizeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSnapshotCopyGrants = "DescribeSnapshotCopyGrants" + +// DescribeSnapshotCopyGrantsRequest generates a request for the DescribeSnapshotCopyGrants operation. +func (c *Redshift) DescribeSnapshotCopyGrantsRequest(input *DescribeSnapshotCopyGrantsInput) (req *request.Request, output *DescribeSnapshotCopyGrantsOutput) { + op := &request.Operation{ + Name: opDescribeSnapshotCopyGrants, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSnapshotCopyGrantsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSnapshotCopyGrantsOutput{} + req.Data = output + return +} + +// Returns a list of snapshot copy grants owned by the AWS account in the destination +// region. +// +// For more information about managing snapshot copy grants, go to Amazon +// Redshift Database Encryption (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-db-encryption.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) DescribeSnapshotCopyGrants(input *DescribeSnapshotCopyGrantsInput) (*DescribeSnapshotCopyGrantsOutput, error) { + req, out := c.DescribeSnapshotCopyGrantsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTags = "DescribeTags" + +// DescribeTagsRequest generates a request for the DescribeTags operation. +func (c *Redshift) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { + op := &request.Operation{ + Name: opDescribeTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTagsOutput{} + req.Data = output + return +} + +// Returns a list of tags. You can return tags from a specific resource by specifying +// an ARN, or you can return all tags for a given type of resource, such as +// clusters, snapshots, and so on. +// +// The following are limitations for DescribeTags: You cannot specify an +// ARN and a resource-type value together in the same request. You cannot use +// the MaxRecords and Marker parameters together with the ARN parameter. The +// MaxRecords parameter can be a range from 10 to 50 results to return in a +// request. +// +// If you specify both tag keys and tag values in the same request, Amazon +// Redshift returns all resources that match any combination of the specified +// keys and values. For example, if you have owner and environment for tag keys, +// and admin and test for tag values, all resources that have any combination +// of those values are returned. +// +// If both tag keys and values are omitted from the request, resources are +// returned regardless of whether they have tag keys or values associated with +// them. +func (c *Redshift) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + err := req.Send() + return out, err +} + +const opDisableLogging = "DisableLogging" + +// DisableLoggingRequest generates a request for the DisableLogging operation. +func (c *Redshift) DisableLoggingRequest(input *DisableLoggingInput) (req *request.Request, output *LoggingStatus) { + op := &request.Operation{ + Name: opDisableLogging, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableLoggingInput{} + } + + req = c.newRequest(op, input, output) + output = &LoggingStatus{} + req.Data = output + return +} + +// Stops logging information, such as queries and connection attempts, for the +// specified Amazon Redshift cluster. +func (c *Redshift) DisableLogging(input *DisableLoggingInput) (*LoggingStatus, error) { + req, out := c.DisableLoggingRequest(input) + err := req.Send() + return out, err +} + +const opDisableSnapshotCopy = "DisableSnapshotCopy" + +// DisableSnapshotCopyRequest generates a request for the DisableSnapshotCopy operation. +func (c *Redshift) DisableSnapshotCopyRequest(input *DisableSnapshotCopyInput) (req *request.Request, output *DisableSnapshotCopyOutput) { + op := &request.Operation{ + Name: opDisableSnapshotCopy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableSnapshotCopyInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableSnapshotCopyOutput{} + req.Data = output + return +} + +// Disables the automatic copying of snapshots from one region to another region +// for a specified cluster. +// +// If your cluster and its snapshots are encrypted using a customer master +// key (CMK) from AWS KMS, use DeleteSnapshotCopyGrant to delete the grant that +// grants Amazon Redshift permission to the CMK in the destination region. +func (c *Redshift) DisableSnapshotCopy(input *DisableSnapshotCopyInput) (*DisableSnapshotCopyOutput, error) { + req, out := c.DisableSnapshotCopyRequest(input) + err := req.Send() + return out, err +} + +const opEnableLogging = "EnableLogging" + +// EnableLoggingRequest generates a request for the EnableLogging operation. +func (c *Redshift) EnableLoggingRequest(input *EnableLoggingInput) (req *request.Request, output *LoggingStatus) { + op := &request.Operation{ + Name: opEnableLogging, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableLoggingInput{} + } + + req = c.newRequest(op, input, output) + output = &LoggingStatus{} + req.Data = output + return +} + +// Starts logging information, such as queries and connection attempts, for +// the specified Amazon Redshift cluster. +func (c *Redshift) EnableLogging(input *EnableLoggingInput) (*LoggingStatus, error) { + req, out := c.EnableLoggingRequest(input) + err := req.Send() + return out, err +} + +const opEnableSnapshotCopy = "EnableSnapshotCopy" + +// EnableSnapshotCopyRequest generates a request for the EnableSnapshotCopy operation. +func (c *Redshift) EnableSnapshotCopyRequest(input *EnableSnapshotCopyInput) (req *request.Request, output *EnableSnapshotCopyOutput) { + op := &request.Operation{ + Name: opEnableSnapshotCopy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableSnapshotCopyInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableSnapshotCopyOutput{} + req.Data = output + return +} + +// Enables the automatic copy of snapshots from one region to another region +// for a specified cluster. +func (c *Redshift) EnableSnapshotCopy(input *EnableSnapshotCopyInput) (*EnableSnapshotCopyOutput, error) { + req, out := c.EnableSnapshotCopyRequest(input) + err := req.Send() + return out, err +} + +const opModifyCluster = "ModifyCluster" + +// ModifyClusterRequest generates a request for the ModifyCluster operation. +func (c *Redshift) ModifyClusterRequest(input *ModifyClusterInput) (req *request.Request, output *ModifyClusterOutput) { + op := &request.Operation{ + Name: opModifyCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyClusterOutput{} + req.Data = output + return +} + +// Modifies the settings for a cluster. For example, you can add another security +// or parameter group, update the preferred maintenance window, or change the +// master user password. Resetting a cluster password or modifying the security +// groups associated with a cluster do not need a reboot. However, modifying +// a parameter group requires a reboot for parameters to take effect. For more +// information about managing clusters, go to Amazon Redshift Clusters (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html) +// in the Amazon Redshift Cluster Management Guide . +// +// You can also change node type and the number of nodes to scale up or down +// the cluster. When resizing a cluster, you must specify both the number of +// nodes and the node type even if one of the parameters does not change. +func (c *Redshift) ModifyCluster(input *ModifyClusterInput) (*ModifyClusterOutput, error) { + req, out := c.ModifyClusterRequest(input) + err := req.Send() + return out, err +} + +const opModifyClusterParameterGroup = "ModifyClusterParameterGroup" + +// ModifyClusterParameterGroupRequest generates a request for the ModifyClusterParameterGroup operation. +func (c *Redshift) ModifyClusterParameterGroupRequest(input *ModifyClusterParameterGroupInput) (req *request.Request, output *ClusterParameterGroupNameMessage) { + op := &request.Operation{ + Name: opModifyClusterParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyClusterParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &ClusterParameterGroupNameMessage{} + req.Data = output + return +} + +// Modifies the parameters of a parameter group. +// +// For more information about parameters and parameter groups, go to Amazon +// Redshift Parameter Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) ModifyClusterParameterGroup(input *ModifyClusterParameterGroupInput) (*ClusterParameterGroupNameMessage, error) { + req, out := c.ModifyClusterParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opModifyClusterSubnetGroup = "ModifyClusterSubnetGroup" + +// ModifyClusterSubnetGroupRequest generates a request for the ModifyClusterSubnetGroup operation. +func (c *Redshift) ModifyClusterSubnetGroupRequest(input *ModifyClusterSubnetGroupInput) (req *request.Request, output *ModifyClusterSubnetGroupOutput) { + op := &request.Operation{ + Name: opModifyClusterSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyClusterSubnetGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyClusterSubnetGroupOutput{} + req.Data = output + return +} + +// Modifies a cluster subnet group to include the specified list of VPC subnets. +// The operation replaces the existing list of subnets with the new list of +// subnets. +func (c *Redshift) ModifyClusterSubnetGroup(input *ModifyClusterSubnetGroupInput) (*ModifyClusterSubnetGroupOutput, error) { + req, out := c.ModifyClusterSubnetGroupRequest(input) + err := req.Send() + return out, err +} + +const opModifyEventSubscription = "ModifyEventSubscription" + +// ModifyEventSubscriptionRequest generates a request for the ModifyEventSubscription operation. +func (c *Redshift) ModifyEventSubscriptionRequest(input *ModifyEventSubscriptionInput) (req *request.Request, output *ModifyEventSubscriptionOutput) { + op := &request.Operation{ + Name: opModifyEventSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyEventSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyEventSubscriptionOutput{} + req.Data = output + return +} + +// Modifies an existing Amazon Redshift event notification subscription. +func (c *Redshift) ModifyEventSubscription(input *ModifyEventSubscriptionInput) (*ModifyEventSubscriptionOutput, error) { + req, out := c.ModifyEventSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opModifySnapshotCopyRetentionPeriod = "ModifySnapshotCopyRetentionPeriod" + +// ModifySnapshotCopyRetentionPeriodRequest generates a request for the ModifySnapshotCopyRetentionPeriod operation. +func (c *Redshift) ModifySnapshotCopyRetentionPeriodRequest(input *ModifySnapshotCopyRetentionPeriodInput) (req *request.Request, output *ModifySnapshotCopyRetentionPeriodOutput) { + op := &request.Operation{ + Name: opModifySnapshotCopyRetentionPeriod, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifySnapshotCopyRetentionPeriodInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifySnapshotCopyRetentionPeriodOutput{} + req.Data = output + return +} + +// Modifies the number of days to retain automated snapshots in the destination +// region after they are copied from the source region. +func (c *Redshift) ModifySnapshotCopyRetentionPeriod(input *ModifySnapshotCopyRetentionPeriodInput) (*ModifySnapshotCopyRetentionPeriodOutput, error) { + req, out := c.ModifySnapshotCopyRetentionPeriodRequest(input) + err := req.Send() + return out, err +} + +const opPurchaseReservedNodeOffering = "PurchaseReservedNodeOffering" + +// PurchaseReservedNodeOfferingRequest generates a request for the PurchaseReservedNodeOffering operation. +func (c *Redshift) PurchaseReservedNodeOfferingRequest(input *PurchaseReservedNodeOfferingInput) (req *request.Request, output *PurchaseReservedNodeOfferingOutput) { + op := &request.Operation{ + Name: opPurchaseReservedNodeOffering, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PurchaseReservedNodeOfferingInput{} + } + + req = c.newRequest(op, input, output) + output = &PurchaseReservedNodeOfferingOutput{} + req.Data = output + return +} + +// Allows you to purchase reserved nodes. Amazon Redshift offers a predefined +// set of reserved node offerings. You can purchase one or more of the offerings. +// You can call the DescribeReservedNodeOfferings API to obtain the available +// reserved node offerings. You can call this API by providing a specific reserved +// node offering and the number of nodes you want to reserve. +// +// For more information about reserved node offerings, go to Purchasing Reserved +// Nodes (http://docs.aws.amazon.com/redshift/latest/mgmt/purchase-reserved-node-instance.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) PurchaseReservedNodeOffering(input *PurchaseReservedNodeOfferingInput) (*PurchaseReservedNodeOfferingOutput, error) { + req, out := c.PurchaseReservedNodeOfferingRequest(input) + err := req.Send() + return out, err +} + +const opRebootCluster = "RebootCluster" + +// RebootClusterRequest generates a request for the RebootCluster operation. +func (c *Redshift) RebootClusterRequest(input *RebootClusterInput) (req *request.Request, output *RebootClusterOutput) { + op := &request.Operation{ + Name: opRebootCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RebootClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &RebootClusterOutput{} + req.Data = output + return +} + +// Reboots a cluster. This action is taken as soon as possible. It results in +// a momentary outage to the cluster, during which the cluster status is set +// to rebooting. A cluster event is created when the reboot is completed. Any +// pending cluster modifications (see ModifyCluster) are applied at this reboot. +// For more information about managing clusters, go to Amazon Redshift Clusters +// (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html) +// in the Amazon Redshift Cluster Management Guide +func (c *Redshift) RebootCluster(input *RebootClusterInput) (*RebootClusterOutput, error) { + req, out := c.RebootClusterRequest(input) + err := req.Send() + return out, err +} + +const opResetClusterParameterGroup = "ResetClusterParameterGroup" + +// ResetClusterParameterGroupRequest generates a request for the ResetClusterParameterGroup operation. +func (c *Redshift) ResetClusterParameterGroupRequest(input *ResetClusterParameterGroupInput) (req *request.Request, output *ClusterParameterGroupNameMessage) { + op := &request.Operation{ + Name: opResetClusterParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetClusterParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &ClusterParameterGroupNameMessage{} + req.Data = output + return +} + +// Sets one or more parameters of the specified parameter group to their default +// values and sets the source values of the parameters to "engine-default". +// To reset the entire parameter group specify the ResetAllParameters parameter. +// For parameter changes to take effect you must reboot any associated clusters. +func (c *Redshift) ResetClusterParameterGroup(input *ResetClusterParameterGroupInput) (*ClusterParameterGroupNameMessage, error) { + req, out := c.ResetClusterParameterGroupRequest(input) + err := req.Send() + return out, err +} + +const opRestoreFromClusterSnapshot = "RestoreFromClusterSnapshot" + +// RestoreFromClusterSnapshotRequest generates a request for the RestoreFromClusterSnapshot operation. +func (c *Redshift) RestoreFromClusterSnapshotRequest(input *RestoreFromClusterSnapshotInput) (req *request.Request, output *RestoreFromClusterSnapshotOutput) { + op := &request.Operation{ + Name: opRestoreFromClusterSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreFromClusterSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreFromClusterSnapshotOutput{} + req.Data = output + return +} + +// Creates a new cluster from a snapshot. By default, Amazon Redshift creates +// the resulting cluster with the same configuration as the original cluster +// from which the snapshot was created, except that the new cluster is created +// with the default cluster security and parameter groups. After Amazon Redshift +// creates the cluster, you can use the ModifyCluster API to associate a different +// security group and different parameter group with the restored cluster. If +// you are using a DS node type, you can also choose to change to another DS +// node type of the same size during restore. +// +// If you restore a cluster into a VPC, you must provide a cluster subnet +// group where you want the cluster restored. +// +// For more information about working with snapshots, go to Amazon Redshift +// Snapshots (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) RestoreFromClusterSnapshot(input *RestoreFromClusterSnapshotInput) (*RestoreFromClusterSnapshotOutput, error) { + req, out := c.RestoreFromClusterSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opRevokeClusterSecurityGroupIngress = "RevokeClusterSecurityGroupIngress" + +// RevokeClusterSecurityGroupIngressRequest generates a request for the RevokeClusterSecurityGroupIngress operation. +func (c *Redshift) RevokeClusterSecurityGroupIngressRequest(input *RevokeClusterSecurityGroupIngressInput) (req *request.Request, output *RevokeClusterSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opRevokeClusterSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RevokeClusterSecurityGroupIngressInput{} + } + + req = c.newRequest(op, input, output) + output = &RevokeClusterSecurityGroupIngressOutput{} + req.Data = output + return +} + +// Revokes an ingress rule in an Amazon Redshift security group for a previously +// authorized IP range or Amazon EC2 security group. To add an ingress rule, +// see AuthorizeClusterSecurityGroupIngress. For information about managing +// security groups, go to Amazon Redshift Cluster Security Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-security-groups.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) RevokeClusterSecurityGroupIngress(input *RevokeClusterSecurityGroupIngressInput) (*RevokeClusterSecurityGroupIngressOutput, error) { + req, out := c.RevokeClusterSecurityGroupIngressRequest(input) + err := req.Send() + return out, err +} + +const opRevokeSnapshotAccess = "RevokeSnapshotAccess" + +// RevokeSnapshotAccessRequest generates a request for the RevokeSnapshotAccess operation. +func (c *Redshift) RevokeSnapshotAccessRequest(input *RevokeSnapshotAccessInput) (req *request.Request, output *RevokeSnapshotAccessOutput) { + op := &request.Operation{ + Name: opRevokeSnapshotAccess, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RevokeSnapshotAccessInput{} + } + + req = c.newRequest(op, input, output) + output = &RevokeSnapshotAccessOutput{} + req.Data = output + return +} + +// Removes the ability of the specified AWS customer account to restore the +// specified snapshot. If the account is currently restoring the snapshot, the +// restore will run to completion. +// +// For more information about working with snapshots, go to Amazon Redshift +// Snapshots (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html) +// in the Amazon Redshift Cluster Management Guide. +func (c *Redshift) RevokeSnapshotAccess(input *RevokeSnapshotAccessInput) (*RevokeSnapshotAccessOutput, error) { + req, out := c.RevokeSnapshotAccessRequest(input) + err := req.Send() + return out, err +} + +const opRotateEncryptionKey = "RotateEncryptionKey" + +// RotateEncryptionKeyRequest generates a request for the RotateEncryptionKey operation. +func (c *Redshift) RotateEncryptionKeyRequest(input *RotateEncryptionKeyInput) (req *request.Request, output *RotateEncryptionKeyOutput) { + op := &request.Operation{ + Name: opRotateEncryptionKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RotateEncryptionKeyInput{} + } + + req = c.newRequest(op, input, output) + output = &RotateEncryptionKeyOutput{} + req.Data = output + return +} + +// Rotates the encryption keys for a cluster. +func (c *Redshift) RotateEncryptionKey(input *RotateEncryptionKeyInput) (*RotateEncryptionKeyOutput, error) { + req, out := c.RotateEncryptionKeyRequest(input) + err := req.Send() + return out, err +} + +// Describes an AWS customer account authorized to restore a snapshot. +type AccountWithRestoreAccess struct { + _ struct{} `type:"structure"` + + // The identifier of an AWS customer account authorized to restore a snapshot. + AccountId *string `type:"string"` +} + +// String returns the string representation +func (s AccountWithRestoreAccess) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountWithRestoreAccess) GoString() string { + return s.String() +} + +// ??? +type AuthorizeClusterSecurityGroupIngressInput struct { + _ struct{} `type:"structure"` + + // The IP range to be added the Amazon Redshift security group. + CIDRIP *string `type:"string"` + + // The name of the security group to which the ingress rule is added. + ClusterSecurityGroupName *string `type:"string" required:"true"` + + // The EC2 security group to be added the Amazon Redshift security group. + EC2SecurityGroupName *string `type:"string"` + + // The AWS account number of the owner of the security group specified by the + // EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable + // value. + // + // Example: 111122223333 + EC2SecurityGroupOwnerId *string `type:"string"` +} + +// String returns the string representation +func (s AuthorizeClusterSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeClusterSecurityGroupIngressInput) GoString() string { + return s.String() +} + +type AuthorizeClusterSecurityGroupIngressOutput struct { + _ struct{} `type:"structure"` + + // Describes a security group. + ClusterSecurityGroup *ClusterSecurityGroup `type:"structure"` +} + +// String returns the string representation +func (s AuthorizeClusterSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeClusterSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +type AuthorizeSnapshotAccessInput struct { + _ struct{} `type:"structure"` + + // The identifier of the AWS customer account authorized to restore the specified + // snapshot. + AccountWithRestoreAccess *string `type:"string" required:"true"` + + // The identifier of the cluster the snapshot was created from. This parameter + // is required if your IAM user has a policy containing a snapshot resource + // element that specifies anything other than * for the cluster name. + SnapshotClusterIdentifier *string `type:"string"` + + // The identifier of the snapshot the account is authorized to restore. + SnapshotIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AuthorizeSnapshotAccessInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeSnapshotAccessInput) GoString() string { + return s.String() +} + +type AuthorizeSnapshotAccessOutput struct { + _ struct{} `type:"structure"` + + // Describes a snapshot. + Snapshot *Snapshot `type:"structure"` +} + +// String returns the string representation +func (s AuthorizeSnapshotAccessOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeSnapshotAccessOutput) GoString() string { + return s.String() +} + +// Describes an availability zone. +type AvailabilityZone struct { + _ struct{} `type:"structure"` + + // The name of the availability zone. + Name *string `type:"string"` +} + +// String returns the string representation +func (s AvailabilityZone) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailabilityZone) GoString() string { + return s.String() +} + +// Describes a cluster. +type Cluster struct { + _ struct{} `type:"structure"` + + // If true, major version upgrades will be applied automatically to the cluster + // during the maintenance window. + AllowVersionUpgrade *bool `type:"boolean"` + + // The number of days that automatic cluster snapshots are retained. + AutomatedSnapshotRetentionPeriod *int64 `type:"integer"` + + // The name of the Availability Zone in which the cluster is located. + AvailabilityZone *string `type:"string"` + + // The date and time that the cluster was created. + ClusterCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The unique identifier of the cluster. + ClusterIdentifier *string `type:"string"` + + // The nodes in a cluster. + ClusterNodes []*ClusterNode `type:"list"` + + // The list of cluster parameter groups that are associated with this cluster. + // Each parameter group in the list is returned with its status. + ClusterParameterGroups []*ClusterParameterGroupStatus `locationNameList:"ClusterParameterGroup" type:"list"` + + // The public key for the cluster. + ClusterPublicKey *string `type:"string"` + + // The specific revision number of the database in the cluster. + ClusterRevisionNumber *string `type:"string"` + + // A list of cluster security group that are associated with the cluster. Each + // security group is represented by an element that contains ClusterSecurityGroup.Name + // and ClusterSecurityGroup.Status subelements. + // + // Cluster security groups are used when the cluster is not created in a VPC. + // Clusters that are created in a VPC use VPC security groups, which are listed + // by the VpcSecurityGroups parameter. + ClusterSecurityGroups []*ClusterSecurityGroupMembership `locationNameList:"ClusterSecurityGroup" type:"list"` + + // Returns the destination region and retention period that are configured for + // cross-region snapshot copy. + ClusterSnapshotCopyStatus *ClusterSnapshotCopyStatus `type:"structure"` + + // The current state of this cluster. Possible values include available, creating, + // deleting, rebooting, renaming, and resizing. + ClusterStatus *string `type:"string"` + + // The name of the subnet group that is associated with the cluster. This parameter + // is valid only when the cluster is in a VPC. + ClusterSubnetGroupName *string `type:"string"` + + // The version ID of the Amazon Redshift engine that is running on the cluster. + ClusterVersion *string `type:"string"` + + // The name of the initial database that was created when the cluster was created. + // This same name is returned for the life of the cluster. If an initial database + // was not specified, a database named "dev" was created by default. + DBName *string `type:"string"` + + // Describes the status of the elastic IP (EIP) address. + ElasticIpStatus *ElasticIpStatus `type:"structure"` + + // If true, data in the cluster is encrypted at rest. + Encrypted *bool `type:"boolean"` + + // The connection endpoint. + Endpoint *Endpoint `type:"structure"` + + // Reports whether the Amazon Redshift cluster has finished applying any HSM + // settings changes specified in a modify cluster command. + // + // Values: active, applying + HsmStatus *HsmStatus `type:"structure"` + + // The AWS Key Management Service (KMS) key ID of the encryption key used to + // encrypt data in the cluster. + KmsKeyId *string `type:"string"` + + // The master user name for the cluster. This name is used to connect to the + // database that is specified in DBName. + MasterUsername *string `type:"string"` + + // The status of a modify operation, if any, initiated for the cluster. + ModifyStatus *string `type:"string"` + + // The node type for the nodes in the cluster. + NodeType *string `type:"string"` + + // The number of compute nodes in the cluster. + NumberOfNodes *int64 `type:"integer"` + + // If present, changes to the cluster are pending. Specific pending changes + // are identified by subelements. + PendingModifiedValues *PendingModifiedValues `type:"structure"` + + // The weekly time range (in UTC) during which system maintenance can occur. + PreferredMaintenanceWindow *string `type:"string"` + + // If true, the cluster can be accessed from a public network. + PubliclyAccessible *bool `type:"boolean"` + + // Describes the status of a cluster restore action. Returns null if the cluster + // was not created by restoring a snapshot. + RestoreStatus *RestoreStatus `type:"structure"` + + // The list of tags for the cluster. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The identifier of the VPC the cluster is in, if the cluster is in a VPC. + VpcId *string `type:"string"` + + // A list of Virtual Private Cloud (VPC) security groups that are associated + // with the cluster. This parameter is returned only if the cluster is in a + // VPC. + VpcSecurityGroups []*VpcSecurityGroupMembership `locationNameList:"VpcSecurityGroup" type:"list"` +} + +// String returns the string representation +func (s Cluster) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Cluster) GoString() string { + return s.String() +} + +// The identifier of a node in a cluster. +type ClusterNode struct { + _ struct{} `type:"structure"` + + // Whether the node is a leader node or a compute node. + NodeRole *string `type:"string"` + + // The private IP address of a node within a cluster. + PrivateIPAddress *string `type:"string"` + + // The public IP address of a node within a cluster. + PublicIPAddress *string `type:"string"` +} + +// String returns the string representation +func (s ClusterNode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterNode) GoString() string { + return s.String() +} + +// Describes a parameter group. +type ClusterParameterGroup struct { + _ struct{} `type:"structure"` + + // The description of the parameter group. + Description *string `type:"string"` + + // The name of the cluster parameter group family that this cluster parameter + // group is compatible with. + ParameterGroupFamily *string `type:"string"` + + // The name of the cluster parameter group. + ParameterGroupName *string `type:"string"` + + // The list of tags for the cluster parameter group. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s ClusterParameterGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterParameterGroup) GoString() string { + return s.String() +} + +// Contains the output from the ModifyClusterParameterGroup and ResetClusterParameterGroup +// actions and indicate the parameter group involved and the status of the operation +// on the parameter group. +type ClusterParameterGroupNameMessage struct { + _ struct{} `type:"structure"` + + // The name of the cluster parameter group. + ParameterGroupName *string `type:"string"` + + // The status of the parameter group. For example, if you made a change to a + // parameter group name-value pair, then the change could be pending a reboot + // of an associated cluster. + ParameterGroupStatus *string `type:"string"` +} + +// String returns the string representation +func (s ClusterParameterGroupNameMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterParameterGroupNameMessage) GoString() string { + return s.String() +} + +// Describes the status of a parameter group. +type ClusterParameterGroupStatus struct { + _ struct{} `type:"structure"` + + // The list of parameter statuses. + // + // For more information about parameters and parameter groups, go to Amazon + // Redshift Parameter Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html) + // in the Amazon Redshift Cluster Management Guide. + ClusterParameterStatusList []*ClusterParameterStatus `type:"list"` + + // The status of parameter updates. + ParameterApplyStatus *string `type:"string"` + + // The name of the cluster parameter group. + ParameterGroupName *string `type:"string"` +} + +// String returns the string representation +func (s ClusterParameterGroupStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterParameterGroupStatus) GoString() string { + return s.String() +} + +// Describes the status of a parameter group. +type ClusterParameterStatus struct { + _ struct{} `type:"structure"` + + // The error that prevented the parameter from being applied to the database. + ParameterApplyErrorDescription *string `type:"string"` + + // The status of the parameter that indicates whether the parameter is in sync + // with the database, waiting for a cluster reboot, or encountered an error + // when being applied. + // + // The following are possible statuses and descriptions. in-sync: The parameter + // value is in sync with the database. pending-reboot: The parameter value + // will be applied after the cluster reboots. applying: The parameter value + // is being applied to the database. invalid-parameter: Cannot apply the parameter + // value because it has an invalid value or syntax. apply-deferred: The parameter + // contains static property changes. The changes are deferred until the cluster + // reboots. apply-error: Cannot connect to the cluster. The parameter change + // will be applied after the cluster reboots. unknown-error: Cannot apply the + // parameter change right now. The change will be applied after the cluster + // reboots. + ParameterApplyStatus *string `type:"string"` + + // The name of the parameter. + ParameterName *string `type:"string"` +} + +// String returns the string representation +func (s ClusterParameterStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterParameterStatus) GoString() string { + return s.String() +} + +// Describes a security group. +type ClusterSecurityGroup struct { + _ struct{} `type:"structure"` + + // The name of the cluster security group to which the operation was applied. + ClusterSecurityGroupName *string `type:"string"` + + // A description of the security group. + Description *string `type:"string"` + + // A list of EC2 security groups that are permitted to access clusters associated + // with this cluster security group. + EC2SecurityGroups []*EC2SecurityGroup `locationNameList:"EC2SecurityGroup" type:"list"` + + // A list of IP ranges (CIDR blocks) that are permitted to access clusters associated + // with this cluster security group. + IPRanges []*IPRange `locationNameList:"IPRange" type:"list"` + + // The list of tags for the cluster security group. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s ClusterSecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterSecurityGroup) GoString() string { + return s.String() +} + +// Describes a security group. +type ClusterSecurityGroupMembership struct { + _ struct{} `type:"structure"` + + // The name of the cluster security group. + ClusterSecurityGroupName *string `type:"string"` + + // The status of the cluster security group. + Status *string `type:"string"` +} + +// String returns the string representation +func (s ClusterSecurityGroupMembership) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterSecurityGroupMembership) GoString() string { + return s.String() +} + +// Returns the destination region and retention period that are configured for +// cross-region snapshot copy. +type ClusterSnapshotCopyStatus struct { + _ struct{} `type:"structure"` + + // The destination region that snapshots are automatically copied to when cross-region + // snapshot copy is enabled. + DestinationRegion *string `type:"string"` + + // The number of days that automated snapshots are retained in the destination + // region after they are copied from a source region. + RetentionPeriod *int64 `type:"long"` + + // The name of the snapshot copy grant. + SnapshotCopyGrantName *string `type:"string"` +} + +// String returns the string representation +func (s ClusterSnapshotCopyStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterSnapshotCopyStatus) GoString() string { + return s.String() +} + +// Describes a subnet group. +type ClusterSubnetGroup struct { + _ struct{} `type:"structure"` + + // The name of the cluster subnet group. + ClusterSubnetGroupName *string `type:"string"` + + // The description of the cluster subnet group. + Description *string `type:"string"` + + // The status of the cluster subnet group. Possible values are Complete, Incomplete + // and Invalid. + SubnetGroupStatus *string `type:"string"` + + // A list of the VPC Subnet elements. + Subnets []*Subnet `locationNameList:"Subnet" type:"list"` + + // The list of tags for the cluster subnet group. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The VPC ID of the cluster subnet group. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s ClusterSubnetGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterSubnetGroup) GoString() string { + return s.String() +} + +// Describes a cluster version, including the parameter group family and description +// of the version. +type ClusterVersion struct { + _ struct{} `type:"structure"` + + // The name of the cluster parameter group family for the cluster. + ClusterParameterGroupFamily *string `type:"string"` + + // The version number used by the cluster. + ClusterVersion *string `type:"string"` + + // The description of the cluster version. + Description *string `type:"string"` +} + +// String returns the string representation +func (s ClusterVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterVersion) GoString() string { + return s.String() +} + +type CopyClusterSnapshotInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster the source snapshot was created from. This + // parameter is required if your IAM user has a policy containing a snapshot + // resource element that specifies anything other than * for the cluster name. + // + // Constraints: + // + // Must be the identifier for a valid cluster. + SourceSnapshotClusterIdentifier *string `type:"string"` + + // The identifier for the source snapshot. + // + // Constraints: + // + // Must be the identifier for a valid automated snapshot whose state is available. + SourceSnapshotIdentifier *string `type:"string" required:"true"` + + // The identifier given to the new manual snapshot. + // + // Constraints: + // + // Cannot be null, empty, or blank. Must contain from 1 to 255 alphanumeric + // characters or hyphens. First character must be a letter. Cannot end with + // a hyphen or contain two consecutive hyphens. Must be unique for the AWS account + // that is making the request. + TargetSnapshotIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopyClusterSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyClusterSnapshotInput) GoString() string { + return s.String() +} + +type CopyClusterSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Describes a snapshot. + Snapshot *Snapshot `type:"structure"` +} + +// String returns the string representation +func (s CopyClusterSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyClusterSnapshotOutput) GoString() string { + return s.String() +} + +type CreateClusterInput struct { + _ struct{} `type:"structure"` + + // If true, major version upgrades can be applied during the maintenance window + // to the Amazon Redshift engine that is running on the cluster. + // + // When a new major version of the Amazon Redshift engine is released, you + // can request that the service automatically apply upgrades during the maintenance + // window to the Amazon Redshift engine that is running on your cluster. + // + // Default: true + AllowVersionUpgrade *bool `type:"boolean"` + + // The number of days that automated snapshots are retained. If the value is + // 0, automated snapshots are disabled. Even if automated snapshots are disabled, + // you can still create manual snapshots when you want with CreateClusterSnapshot. + // + // Default: 1 + // + // Constraints: Must be a value from 0 to 35. + AutomatedSnapshotRetentionPeriod *int64 `type:"integer"` + + // The EC2 Availability Zone (AZ) in which you want Amazon Redshift to provision + // the cluster. For example, if you have several EC2 instances running in a + // specific Availability Zone, then you might want the cluster to be provisioned + // in the same zone in order to decrease network latency. + // + // Default: A random, system-chosen Availability Zone in the region that is + // specified by the endpoint. + // + // Example: us-east-1d + // + // Constraint: The specified Availability Zone must be in the same region + // as the current endpoint. + AvailabilityZone *string `type:"string"` + + // A unique identifier for the cluster. You use this identifier to refer to + // the cluster for any subsequent cluster operations such as deleting or modifying. + // The identifier also appears in the Amazon Redshift console. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. Alphabetic + // characters must be lowercase. First character must be a letter. Cannot end + // with a hyphen or contain two consecutive hyphens. Must be unique for all + // clusters within an AWS account. Example: myexamplecluster + ClusterIdentifier *string `type:"string" required:"true"` + + // The name of the parameter group to be associated with this cluster. + // + // Default: The default Amazon Redshift cluster parameter group. For information + // about the default parameter group, go to Working with Amazon Redshift Parameter + // Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html) + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters or hyphens. First character must + // be a letter. Cannot end with a hyphen or contain two consecutive hyphens. + ClusterParameterGroupName *string `type:"string"` + + // A list of security groups to be associated with this cluster. + // + // Default: The default cluster security group for Amazon Redshift. + ClusterSecurityGroups []*string `locationNameList:"ClusterSecurityGroupName" type:"list"` + + // The name of a cluster subnet group to be associated with this cluster. + // + // If this parameter is not provided the resulting cluster will be deployed + // outside virtual private cloud (VPC). + ClusterSubnetGroupName *string `type:"string"` + + // The type of the cluster. When cluster type is specified as single-node, + // the NumberOfNodes parameter is not required. multi-node, the NumberOfNodes + // parameter is required. + // + // Valid Values: multi-node | single-node + // + // Default: multi-node + ClusterType *string `type:"string"` + + // The version of the Amazon Redshift engine software that you want to deploy + // on the cluster. + // + // The version selected runs on all the nodes in the cluster. + // + // Constraints: Only version 1.0 is currently available. + // + // Example: 1.0 + ClusterVersion *string `type:"string"` + + // The name of the first database to be created when the cluster is created. + // + // To create additional databases after the cluster is created, connect to + // the cluster with a SQL client and use SQL commands to create a database. + // For more information, go to Create a Database (http://docs.aws.amazon.com/redshift/latest/dg/t_creating_database.html) + // in the Amazon Redshift Database Developer Guide. + // + // Default: dev + // + // Constraints: + // + // Must contain 1 to 64 alphanumeric characters. Must contain only lowercase + // letters. Cannot be a word that is reserved by the service. A list of reserved + // words can be found in Reserved Words (http://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html) + // in the Amazon Redshift Database Developer Guide. + DBName *string `type:"string"` + + // The Elastic IP (EIP) address for the cluster. + // + // Constraints: The cluster must be provisioned in EC2-VPC and publicly-accessible + // through an Internet gateway. For more information about provisioning clusters + // in EC2-VPC, go to Supported Platforms to Launch Your Cluster (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#cluster-platforms) + // in the Amazon Redshift Cluster Management Guide. + ElasticIp *string `type:"string"` + + // If true, the data in the cluster is encrypted at rest. + // + // Default: false + Encrypted *bool `type:"boolean"` + + // Specifies the name of the HSM client certificate the Amazon Redshift cluster + // uses to retrieve the data encryption keys stored in an HSM. + HsmClientCertificateIdentifier *string `type:"string"` + + // Specifies the name of the HSM configuration that contains the information + // the Amazon Redshift cluster can use to retrieve and store keys in an HSM. + HsmConfigurationIdentifier *string `type:"string"` + + // The AWS Key Management Service (KMS) key ID of the encryption key that you + // want to use to encrypt data in the cluster. + KmsKeyId *string `type:"string"` + + // The password associated with the master user account for the cluster that + // is being created. + // + // Constraints: + // + // Must be between 8 and 64 characters in length. Must contain at least one + // uppercase letter. Must contain at least one lowercase letter. Must contain + // one number. Can be any printable ASCII character (ASCII code 33 to 126) except + // ' (single quote), " (double quote), \, /, @, or space. + MasterUserPassword *string `type:"string" required:"true"` + + // The user name associated with the master user account for the cluster that + // is being created. + // + // Constraints: + // + // Must be 1 - 128 alphanumeric characters. First character must be a letter. + // Cannot be a reserved word. A list of reserved words can be found in Reserved + // Words (http://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html) + // in the Amazon Redshift Database Developer Guide. + MasterUsername *string `type:"string" required:"true"` + + // The node type to be provisioned for the cluster. For information about node + // types, go to Working with Clusters (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#how-many-nodes) + // in the Amazon Redshift Cluster Management Guide. + // + // Valid Values: ds1.xlarge | ds1.8xlarge | ds2.xlarge | ds2.8xlarge | dc1.large + // | dc1.8xlarge. + NodeType *string `type:"string" required:"true"` + + // The number of compute nodes in the cluster. This parameter is required when + // the ClusterType parameter is specified as multi-node. + // + // For information about determining how many nodes you need, go to Working + // with Clusters (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#how-many-nodes) + // in the Amazon Redshift Cluster Management Guide. + // + // If you don't specify this parameter, you get a single-node cluster. When + // requesting a multi-node cluster, you must specify the number of nodes that + // you want in the cluster. + // + // Default: 1 + // + // Constraints: Value must be at least 1 and no more than 100. + NumberOfNodes *int64 `type:"integer"` + + // The port number on which the cluster accepts incoming connections. + // + // The cluster is accessible only via the JDBC and ODBC connection strings. + // Part of the connection string requires the port on which the cluster will + // listen for incoming connections. + // + // Default: 5439 + // + // Valid Values: 1150-65535 + Port *int64 `type:"integer"` + + // The weekly time range (in UTC) during which automated cluster maintenance + // can occur. + // + // Format: ddd:hh24:mi-ddd:hh24:mi + // + // Default: A 30-minute window selected at random from an 8-hour block of + // time per region, occurring on a random day of the week. For more information + // about the time blocks for each region, see Maintenance Windows (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#rs-maintenance-windows) + // in Amazon Redshift Cluster Management Guide. + // + // Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun + // + // Constraints: Minimum 30-minute window. + PreferredMaintenanceWindow *string `type:"string"` + + // If true, the cluster can be accessed from a public network. + PubliclyAccessible *bool `type:"boolean"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // A list of Virtual Private Cloud (VPC) security groups to be associated with + // the cluster. + // + // Default: The default VPC security group is associated with the cluster. + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s CreateClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterInput) GoString() string { + return s.String() +} + +type CreateClusterOutput struct { + _ struct{} `type:"structure"` + + // Describes a cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s CreateClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterOutput) GoString() string { + return s.String() +} + +type CreateClusterParameterGroupInput struct { + _ struct{} `type:"structure"` + + // A description of the parameter group. + Description *string `type:"string" required:"true"` + + // The Amazon Redshift engine version to which the cluster parameter group applies. + // The cluster engine version determines the set of parameters. + // + // To get a list of valid parameter group family names, you can call DescribeClusterParameterGroups. + // By default, Amazon Redshift returns a list of all the parameter groups that + // are owned by your AWS account, including the default parameter groups for + // each Amazon Redshift engine version. The parameter group family names associated + // with the default parameter groups provide you the valid values. For example, + // a valid family name is "redshift-1.0". + ParameterGroupFamily *string `type:"string" required:"true"` + + // The name of the cluster parameter group. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters or hyphens First character must + // be a letter. Cannot end with a hyphen or contain two consecutive hyphens. + // Must be unique withing your AWS account. This value is stored as a lower-case + // string. + ParameterGroupName *string `type:"string" required:"true"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateClusterParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterParameterGroupInput) GoString() string { + return s.String() +} + +type CreateClusterParameterGroupOutput struct { + _ struct{} `type:"structure"` + + // Describes a parameter group. + ClusterParameterGroup *ClusterParameterGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateClusterParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterParameterGroupOutput) GoString() string { + return s.String() +} + +// ??? +type CreateClusterSecurityGroupInput struct { + _ struct{} `type:"structure"` + + // The name for the security group. Amazon Redshift stores the value as a lowercase + // string. + // + // Constraints: + // + // Must contain no more than 255 alphanumeric characters or hyphens. Must + // not be "Default". Must be unique for all security groups that are created + // by your AWS account. Example: examplesecuritygroup + ClusterSecurityGroupName *string `type:"string" required:"true"` + + // A description for the security group. + Description *string `type:"string" required:"true"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateClusterSecurityGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterSecurityGroupInput) GoString() string { + return s.String() +} + +type CreateClusterSecurityGroupOutput struct { + _ struct{} `type:"structure"` + + // Describes a security group. + ClusterSecurityGroup *ClusterSecurityGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateClusterSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterSecurityGroupOutput) GoString() string { + return s.String() +} + +type CreateClusterSnapshotInput struct { + _ struct{} `type:"structure"` + + // The cluster identifier for which you want a snapshot. + ClusterIdentifier *string `type:"string" required:"true"` + + // A unique identifier for the snapshot that you are requesting. This identifier + // must be unique for all snapshots within the AWS account. + // + // Constraints: + // + // Cannot be null, empty, or blank Must contain from 1 to 255 alphanumeric + // characters or hyphens First character must be a letter Cannot end with a + // hyphen or contain two consecutive hyphens Example: my-snapshot-id + SnapshotIdentifier *string `type:"string" required:"true"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateClusterSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterSnapshotInput) GoString() string { + return s.String() +} + +type CreateClusterSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Describes a snapshot. + Snapshot *Snapshot `type:"structure"` +} + +// String returns the string representation +func (s CreateClusterSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterSnapshotOutput) GoString() string { + return s.String() +} + +type CreateClusterSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // The name for the subnet group. Amazon Redshift stores the value as a lowercase + // string. + // + // Constraints: + // + // Must contain no more than 255 alphanumeric characters or hyphens. Must + // not be "Default". Must be unique for all subnet groups that are created by + // your AWS account. Example: examplesubnetgroup + ClusterSubnetGroupName *string `type:"string" required:"true"` + + // A description for the subnet group. + Description *string `type:"string" required:"true"` + + // An array of VPC subnet IDs. A maximum of 20 subnets can be modified in a + // single request. + SubnetIds []*string `locationNameList:"SubnetIdentifier" type:"list" required:"true"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateClusterSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterSubnetGroupInput) GoString() string { + return s.String() +} + +type CreateClusterSubnetGroupOutput struct { + _ struct{} `type:"structure"` + + // Describes a subnet group. + ClusterSubnetGroup *ClusterSubnetGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateClusterSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterSubnetGroupOutput) GoString() string { + return s.String() +} + +type CreateEventSubscriptionInput struct { + _ struct{} `type:"structure"` + + // A Boolean value; set to true to activate the subscription, set to false to + // create the subscription but not active it. + Enabled *bool `type:"boolean"` + + // Specifies the Amazon Redshift event categories to be published by the event + // notification subscription. + // + // Values: Configuration, Management, Monitoring, Security + EventCategories []*string `locationNameList:"EventCategory" type:"list"` + + // Specifies the Amazon Redshift event severity to be published by the event + // notification subscription. + // + // Values: ERROR, INFO + Severity *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic used to transmit the + // event notifications. The ARN is created by Amazon SNS when you create a topic + // and subscribe to it. + SnsTopicArn *string `type:"string" required:"true"` + + // A list of one or more identifiers of Amazon Redshift source objects. All + // of the objects must be of the same type as was specified in the source type + // parameter. The event subscription will return only events generated by the + // specified objects. If not specified, then events are returned for all objects + // within the source type specified. + // + // Example: my-cluster-1, my-cluster-2 + // + // Example: my-snapshot-20131010 + SourceIds []*string `locationNameList:"SourceId" type:"list"` + + // The type of source that will be generating the events. For example, if you + // want to be notified of events generated by a cluster, you would set this + // parameter to cluster. If this value is not specified, events are returned + // for all Amazon Redshift objects in your AWS account. You must specify a source + // type in order to specify source IDs. + // + // Valid values: cluster, cluster-parameter-group, cluster-security-group, + // and cluster-snapshot. + SourceType *string `type:"string"` + + // The name of the event subscription to be created. + // + // Constraints: + // + // Cannot be null, empty, or blank. Must contain from 1 to 255 alphanumeric + // characters or hyphens. First character must be a letter. Cannot end with + // a hyphen or contain two consecutive hyphens. + SubscriptionName *string `type:"string" required:"true"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateEventSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEventSubscriptionInput) GoString() string { + return s.String() +} + +type CreateEventSubscriptionOutput struct { + _ struct{} `type:"structure"` + + EventSubscription *EventSubscription `type:"structure"` +} + +// String returns the string representation +func (s CreateEventSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEventSubscriptionOutput) GoString() string { + return s.String() +} + +type CreateHsmClientCertificateInput struct { + _ struct{} `type:"structure"` + + // The identifier to be assigned to the new HSM client certificate that the + // cluster will use to connect to the HSM to use the database encryption keys. + HsmClientCertificateIdentifier *string `type:"string" required:"true"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateHsmClientCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHsmClientCertificateInput) GoString() string { + return s.String() +} + +type CreateHsmClientCertificateOutput struct { + _ struct{} `type:"structure"` + + // Returns information about an HSM client certificate. The certificate is stored + // in a secure Hardware Storage Module (HSM), and used by the Amazon Redshift + // cluster to encrypt data files. + HsmClientCertificate *HsmClientCertificate `type:"structure"` +} + +// String returns the string representation +func (s CreateHsmClientCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHsmClientCertificateOutput) GoString() string { + return s.String() +} + +type CreateHsmConfigurationInput struct { + _ struct{} `type:"structure"` + + // A text description of the HSM configuration to be created. + Description *string `type:"string" required:"true"` + + // The identifier to be assigned to the new Amazon Redshift HSM configuration. + HsmConfigurationIdentifier *string `type:"string" required:"true"` + + // The IP address that the Amazon Redshift cluster must use to access the HSM. + HsmIpAddress *string `type:"string" required:"true"` + + // The name of the partition in the HSM where the Amazon Redshift clusters will + // store their database encryption keys. + HsmPartitionName *string `type:"string" required:"true"` + + // The password required to access the HSM partition. + HsmPartitionPassword *string `type:"string" required:"true"` + + // The HSMs public certificate file. When using Cloud HSM, the file name is + // server.pem. + HsmServerPublicCertificate *string `type:"string" required:"true"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateHsmConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHsmConfigurationInput) GoString() string { + return s.String() +} + +type CreateHsmConfigurationOutput struct { + _ struct{} `type:"structure"` + + // Returns information about an HSM configuration, which is an object that describes + // to Amazon Redshift clusters the information they require to connect to an + // HSM where they can store database encryption keys. + HsmConfiguration *HsmConfiguration `type:"structure"` +} + +// String returns the string representation +func (s CreateHsmConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHsmConfigurationOutput) GoString() string { + return s.String() +} + +// The result of the CreateSnapshotCopyGrant action. +type CreateSnapshotCopyGrantInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the customer master key (CMK) to which to grant + // Amazon Redshift permission. If no key is specified, the default key is used. + KmsKeyId *string `type:"string"` + + // The name of the snapshot copy grant. This name must be unique in the region + // for the AWS account. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. Alphabetic + // characters must be lowercase. First character must be a letter. Cannot end + // with a hyphen or contain two consecutive hyphens. Must be unique for all + // clusters within an AWS account. + SnapshotCopyGrantName *string `type:"string" required:"true"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateSnapshotCopyGrantInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotCopyGrantInput) GoString() string { + return s.String() +} + +type CreateSnapshotCopyGrantOutput struct { + _ struct{} `type:"structure"` + + // The snapshot copy grant that grants Amazon Redshift permission to encrypt + // copied snapshots with the specified customer master key (CMK) from AWS KMS + // in the destination region. + // + // For more information about managing snapshot copy grants, go to Amazon + // Redshift Database Encryption (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-db-encryption.html) + // in the Amazon Redshift Cluster Management Guide. + SnapshotCopyGrant *SnapshotCopyGrant `type:"structure"` +} + +// String returns the string representation +func (s CreateSnapshotCopyGrantOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotCopyGrantOutput) GoString() string { + return s.String() +} + +// Contains the output from the CreateTags action. +type CreateTagsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) to which you want to add the tag or tags. + // For example, arn:aws:redshift:us-east-1:123456789:cluster:t1. + ResourceName *string `type:"string" required:"true"` + + // One or more name/value pairs to add as tags to the specified resource. Each + // tag name is passed in with the parameter Key and the corresponding value + // is passed in with the parameter Value. The Key and Value parameters are separated + // by a comma (,). Separate multiple tags with a space. For example, --tags + // "Key"="owner","Value"="admin" "Key"="environment","Value"="test" "Key"="version","Value"="1.0". + Tags []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTagsInput) GoString() string { + return s.String() +} + +type CreateTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTagsOutput) GoString() string { + return s.String() +} + +// Describes the default cluster parameters for a parameter group family. +type DefaultClusterParameters struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // The name of the cluster parameter group family to which the engine default + // parameters apply. + ParameterGroupFamily *string `type:"string"` + + // The list of cluster default parameters. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` +} + +// String returns the string representation +func (s DefaultClusterParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefaultClusterParameters) GoString() string { + return s.String() +} + +type DeleteClusterInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster to be deleted. + // + // Constraints: + // + // Must contain lowercase characters. Must contain from 1 to 63 alphanumeric + // characters or hyphens. First character must be a letter. Cannot end with + // a hyphen or contain two consecutive hyphens. + ClusterIdentifier *string `type:"string" required:"true"` + + // The identifier of the final snapshot that is to be created immediately before + // deleting the cluster. If this parameter is provided, SkipFinalClusterSnapshot + // must be false. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters. First character must be a letter. + // Cannot end with a hyphen or contain two consecutive hyphens. + FinalClusterSnapshotIdentifier *string `type:"string"` + + // Determines whether a final snapshot of the cluster is created before Amazon + // Redshift deletes the cluster. If true, a final cluster snapshot is not created. + // If false, a final cluster snapshot is created before the cluster is deleted. + // + // The FinalClusterSnapshotIdentifier parameter must be specified if SkipFinalClusterSnapshot + // is false. Default: false + SkipFinalClusterSnapshot *bool `type:"boolean"` +} + +// String returns the string representation +func (s DeleteClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterInput) GoString() string { + return s.String() +} + +type DeleteClusterOutput struct { + _ struct{} `type:"structure"` + + // Describes a cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s DeleteClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterOutput) GoString() string { + return s.String() +} + +type DeleteClusterParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the parameter group to be deleted. + // + // Constraints: + // + // Must be the name of an existing cluster parameter group. Cannot delete + // a default cluster parameter group. + ParameterGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteClusterParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterParameterGroupInput) GoString() string { + return s.String() +} + +type DeleteClusterParameterGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteClusterParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterParameterGroupOutput) GoString() string { + return s.String() +} + +type DeleteClusterSecurityGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the cluster security group to be deleted. + ClusterSecurityGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteClusterSecurityGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterSecurityGroupInput) GoString() string { + return s.String() +} + +type DeleteClusterSecurityGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteClusterSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterSecurityGroupOutput) GoString() string { + return s.String() +} + +type DeleteClusterSnapshotInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the cluster the snapshot was created from. This + // parameter is required if your IAM user has a policy containing a snapshot + // resource element that specifies anything other than * for the cluster name. + // + // Constraints: Must be the name of valid cluster. + SnapshotClusterIdentifier *string `type:"string"` + + // The unique identifier of the manual snapshot to be deleted. + // + // Constraints: Must be the name of an existing snapshot that is in the available + // state. + SnapshotIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteClusterSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterSnapshotInput) GoString() string { + return s.String() +} + +type DeleteClusterSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Describes a snapshot. + Snapshot *Snapshot `type:"structure"` +} + +// String returns the string representation +func (s DeleteClusterSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterSnapshotOutput) GoString() string { + return s.String() +} + +type DeleteClusterSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the cluster subnet group name to be deleted. + ClusterSubnetGroupName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteClusterSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterSubnetGroupInput) GoString() string { + return s.String() +} + +type DeleteClusterSubnetGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteClusterSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterSubnetGroupOutput) GoString() string { + return s.String() +} + +type DeleteEventSubscriptionInput struct { + _ struct{} `type:"structure"` + + // The name of the Amazon Redshift event notification subscription to be deleted. + SubscriptionName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteEventSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEventSubscriptionInput) GoString() string { + return s.String() +} + +type DeleteEventSubscriptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteEventSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEventSubscriptionOutput) GoString() string { + return s.String() +} + +type DeleteHsmClientCertificateInput struct { + _ struct{} `type:"structure"` + + // The identifier of the HSM client certificate to be deleted. + HsmClientCertificateIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteHsmClientCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHsmClientCertificateInput) GoString() string { + return s.String() +} + +type DeleteHsmClientCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteHsmClientCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHsmClientCertificateOutput) GoString() string { + return s.String() +} + +type DeleteHsmConfigurationInput struct { + _ struct{} `type:"structure"` + + // The identifier of the Amazon Redshift HSM configuration to be deleted. + HsmConfigurationIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteHsmConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHsmConfigurationInput) GoString() string { + return s.String() +} + +type DeleteHsmConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteHsmConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHsmConfigurationOutput) GoString() string { + return s.String() +} + +// The result of the DeleteSnapshotCopyGrant action. +type DeleteSnapshotCopyGrantInput struct { + _ struct{} `type:"structure"` + + // The name of the snapshot copy grant to delete. + SnapshotCopyGrantName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSnapshotCopyGrantInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotCopyGrantInput) GoString() string { + return s.String() +} + +type DeleteSnapshotCopyGrantOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSnapshotCopyGrantOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotCopyGrantOutput) GoString() string { + return s.String() +} + +// Contains the output from the DeleteTags action. +type DeleteTagsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) from which you want to remove the tag or tags. + // For example, arn:aws:redshift:us-east-1:123456789:cluster:t1. + ResourceName *string `type:"string" required:"true"` + + // The tag key that you want to delete. + TagKeys []*string `locationNameList:"TagKey" type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsInput) GoString() string { + return s.String() +} + +type DeleteTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsOutput) GoString() string { + return s.String() +} + +type DescribeClusterParameterGroupsInput struct { + _ struct{} `type:"structure"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeClusterParameterGroups request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The name of a specific parameter group for which to return details. By default, + // details about all parameter groups and the default parameter group are returned. + ParameterGroupName *string `type:"string"` + + // A tag key or keys for which you want to return all matching cluster parameter + // groups that are associated with the specified key or keys. For example, suppose + // that you have parameter groups that are tagged with keys called owner and + // environment. If you specify both of these tag keys in the request, Amazon + // Redshift returns a response with the parameter groups that have either or + // both of these tag keys associated with them. + TagKeys []*string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching cluster parameter + // groups that are associated with the specified tag value or values. For example, + // suppose that you have parameter groups that are tagged with values called + // admin and test. If you specify both of these tag values in the request, Amazon + // Redshift returns a response with the parameter groups that have either or + // both of these tag values associated with them. + TagValues []*string `locationNameList:"TagValue" type:"list"` +} + +// String returns the string representation +func (s DescribeClusterParameterGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterParameterGroupsInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeClusterParameterGroups action. +type DescribeClusterParameterGroupsOutput struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // A list of ClusterParameterGroup instances. Each instance describes one cluster + // parameter group. + ParameterGroups []*ClusterParameterGroup `locationNameList:"ClusterParameterGroup" type:"list"` +} + +// String returns the string representation +func (s DescribeClusterParameterGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterParameterGroupsOutput) GoString() string { + return s.String() +} + +type DescribeClusterParametersInput struct { + _ struct{} `type:"structure"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeClusterParameters request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The name of a cluster parameter group for which to return details. + ParameterGroupName *string `type:"string" required:"true"` + + // The parameter types to return. Specify user to show parameters that are different + // form the default. Similarly, specify engine-default to show parameters that + // are the same as the default parameter group. + // + // Default: All parameter types returned. + // + // Valid Values: user | engine-default + Source *string `type:"string"` +} + +// String returns the string representation +func (s DescribeClusterParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterParametersInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeClusterParameters action. +type DescribeClusterParametersOutput struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // A list of Parameter instances. Each instance lists the parameters of one + // cluster parameter group. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` +} + +// String returns the string representation +func (s DescribeClusterParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterParametersOutput) GoString() string { + return s.String() +} + +// ??? +type DescribeClusterSecurityGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of a cluster security group for which you are requesting details. + // You can specify either the Marker parameter or a ClusterSecurityGroupName + // parameter, but not both. + // + // Example: securitygroup1 + ClusterSecurityGroupName *string `type:"string"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeClusterSecurityGroups request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + // + // Constraints: You can specify either the ClusterSecurityGroupName parameter + // or the Marker parameter, but not both. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // A tag key or keys for which you want to return all matching cluster security + // groups that are associated with the specified key or keys. For example, suppose + // that you have security groups that are tagged with keys called owner and + // environment. If you specify both of these tag keys in the request, Amazon + // Redshift returns a response with the security groups that have either or + // both of these tag keys associated with them. + TagKeys []*string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching cluster security + // groups that are associated with the specified tag value or values. For example, + // suppose that you have security groups that are tagged with values called + // admin and test. If you specify both of these tag values in the request, Amazon + // Redshift returns a response with the security groups that have either or + // both of these tag values associated with them. + TagValues []*string `locationNameList:"TagValue" type:"list"` +} + +// String returns the string representation +func (s DescribeClusterSecurityGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterSecurityGroupsInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeClusterSecurityGroups action. +type DescribeClusterSecurityGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of ClusterSecurityGroup instances. + ClusterSecurityGroups []*ClusterSecurityGroup `locationNameList:"ClusterSecurityGroup" type:"list"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeClusterSecurityGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterSecurityGroupsOutput) GoString() string { + return s.String() +} + +type DescribeClusterSnapshotsInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster for which information about snapshots is requested. + ClusterIdentifier *string `type:"string"` + + // A time value that requests only snapshots created at or before the specified + // time. The time value is specified in ISO 8601 format. For more information + // about ISO 8601, go to the ISO8601 Wikipedia page. (http://en.wikipedia.org/wiki/ISO_8601) + // + // Example: 2012-07-16T18:00:00Z + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeClusterSnapshots request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The AWS customer account used to create or copy the snapshot. Use this field + // to filter the results to snapshots owned by a particular account. To describe + // snapshots you own, either specify your AWS customer account, or do not specify + // the parameter. + OwnerAccount *string `type:"string"` + + // The snapshot identifier of the snapshot about which to return information. + SnapshotIdentifier *string `type:"string"` + + // The type of snapshots for which you are requesting information. By default, + // snapshots of all types are returned. + // + // Valid Values: automated | manual + SnapshotType *string `type:"string"` + + // A value that requests only snapshots created at or after the specified time. + // The time value is specified in ISO 8601 format. For more information about + // ISO 8601, go to the ISO8601 Wikipedia page. (http://en.wikipedia.org/wiki/ISO_8601) + // + // Example: 2012-07-16T18:00:00Z + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A tag key or keys for which you want to return all matching cluster snapshots + // that are associated with the specified key or keys. For example, suppose + // that you have snapshots that are tagged with keys called owner and environment. + // If you specify both of these tag keys in the request, Amazon Redshift returns + // a response with the snapshots that have either or both of these tag keys + // associated with them. + TagKeys []*string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching cluster snapshots + // that are associated with the specified tag value or values. For example, + // suppose that you have snapshots that are tagged with values called admin + // and test. If you specify both of these tag values in the request, Amazon + // Redshift returns a response with the snapshots that have either or both of + // these tag values associated with them. + TagValues []*string `locationNameList:"TagValue" type:"list"` +} + +// String returns the string representation +func (s DescribeClusterSnapshotsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterSnapshotsInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeClusterSnapshots action. +type DescribeClusterSnapshotsOutput struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // A list of Snapshot instances. + Snapshots []*Snapshot `locationNameList:"Snapshot" type:"list"` +} + +// String returns the string representation +func (s DescribeClusterSnapshotsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterSnapshotsOutput) GoString() string { + return s.String() +} + +type DescribeClusterSubnetGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of the cluster subnet group for which information is requested. + ClusterSubnetGroupName *string `type:"string"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeClusterSubnetGroups request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // A tag key or keys for which you want to return all matching cluster subnet + // groups that are associated with the specified key or keys. For example, suppose + // that you have subnet groups that are tagged with keys called owner and environment. + // If you specify both of these tag keys in the request, Amazon Redshift returns + // a response with the subnet groups that have either or both of these tag keys + // associated with them. + TagKeys []*string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching cluster subnet + // groups that are associated with the specified tag value or values. For example, + // suppose that you have subnet groups that are tagged with values called admin + // and test. If you specify both of these tag values in the request, Amazon + // Redshift returns a response with the subnet groups that have either or both + // of these tag values associated with them. + TagValues []*string `locationNameList:"TagValue" type:"list"` +} + +// String returns the string representation +func (s DescribeClusterSubnetGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterSubnetGroupsInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeClusterSubnetGroups action. +type DescribeClusterSubnetGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of ClusterSubnetGroup instances. + ClusterSubnetGroups []*ClusterSubnetGroup `locationNameList:"ClusterSubnetGroup" type:"list"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeClusterSubnetGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterSubnetGroupsOutput) GoString() string { + return s.String() +} + +type DescribeClusterVersionsInput struct { + _ struct{} `type:"structure"` + + // The name of a specific cluster parameter group family to return details for. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters First character must be a letter + // Cannot end with a hyphen or contain two consecutive hyphens + ClusterParameterGroupFamily *string `type:"string"` + + // The specific cluster version to return. + // + // Example: 1.0 + ClusterVersion *string `type:"string"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeClusterVersions request exceed + // the value specified in MaxRecords, AWS returns a value in the Marker field + // of the response. You can retrieve the next set of response records by providing + // the returned marker value in the Marker parameter and retrying the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeClusterVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterVersionsInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeClusterVersions action. +type DescribeClusterVersionsOutput struct { + _ struct{} `type:"structure"` + + // A list of Version elements. + ClusterVersions []*ClusterVersion `locationNameList:"ClusterVersion" type:"list"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeClusterVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterVersionsOutput) GoString() string { + return s.String() +} + +type DescribeClustersInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of a cluster whose properties you are requesting. This + // parameter is case sensitive. + // + // The default is that all clusters defined for an account are returned. + ClusterIdentifier *string `type:"string"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeClusters request exceed the + // value specified in MaxRecords, AWS returns a value in the Marker field of + // the response. You can retrieve the next set of response records by providing + // the returned marker value in the Marker parameter and retrying the request. + // + // Constraints: You can specify either the ClusterIdentifier parameter or + // the Marker parameter, but not both. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // A tag key or keys for which you want to return all matching clusters that + // are associated with the specified key or keys. For example, suppose that + // you have clusters that are tagged with keys called owner and environment. + // If you specify both of these tag keys in the request, Amazon Redshift returns + // a response with the clusters that have either or both of these tag keys associated + // with them. + TagKeys []*string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching clusters + // that are associated with the specified tag value or values. For example, + // suppose that you have clusters that are tagged with values called admin and + // test. If you specify both of these tag values in the request, Amazon Redshift + // returns a response with the clusters that have either or both of these tag + // values associated with them. + TagValues []*string `locationNameList:"TagValue" type:"list"` +} + +// String returns the string representation +func (s DescribeClustersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClustersInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeClusters action. +type DescribeClustersOutput struct { + _ struct{} `type:"structure"` + + // A list of Cluster objects, where each object describes one cluster. + Clusters []*Cluster `locationNameList:"Cluster" type:"list"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeClustersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClustersOutput) GoString() string { + return s.String() +} + +type DescribeDefaultClusterParametersInput struct { + _ struct{} `type:"structure"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeDefaultClusterParameters + // request exceed the value specified in MaxRecords, AWS returns a value in + // the Marker field of the response. You can retrieve the next set of response + // records by providing the returned marker value in the Marker parameter and + // retrying the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The name of the cluster parameter group family. + ParameterGroupFamily *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDefaultClusterParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDefaultClusterParametersInput) GoString() string { + return s.String() +} + +type DescribeDefaultClusterParametersOutput struct { + _ struct{} `type:"structure"` + + // Describes the default cluster parameters for a parameter group family. + DefaultClusterParameters *DefaultClusterParameters `type:"structure"` +} + +// String returns the string representation +func (s DescribeDefaultClusterParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDefaultClusterParametersOutput) GoString() string { + return s.String() +} + +type DescribeEventCategoriesInput struct { + _ struct{} `type:"structure"` + + // The source type, such as cluster or parameter group, to which the described + // event categories apply. + // + // Valid values: cluster, snapshot, parameter group, and security group. + SourceType *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventCategoriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventCategoriesInput) GoString() string { + return s.String() +} + +type DescribeEventCategoriesOutput struct { + _ struct{} `type:"structure"` + + // A list of event categories descriptions. + EventCategoriesMapList []*EventCategoriesMap `locationNameList:"EventCategoriesMap" type:"list"` +} + +// String returns the string representation +func (s DescribeEventCategoriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventCategoriesOutput) GoString() string { + return s.String() +} + +type DescribeEventSubscriptionsInput struct { + _ struct{} `type:"structure"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeEventSubscriptions request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The name of the Amazon Redshift event notification subscription to be described. + SubscriptionName *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventSubscriptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventSubscriptionsInput) GoString() string { + return s.String() +} + +type DescribeEventSubscriptionsOutput struct { + _ struct{} `type:"structure"` + + // A list of event subscriptions. + EventSubscriptionsList []*EventSubscription `locationNameList:"EventSubscription" type:"list"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventSubscriptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventSubscriptionsOutput) GoString() string { + return s.String() +} + +type DescribeEventsInput struct { + _ struct{} `type:"structure"` + + // The number of minutes prior to the time of the request for which to retrieve + // events. For example, if the request is sent at 18:00 and you specify a duration + // of 60, then only events which have occurred after 17:00 will be returned. + // + // Default: 60 + Duration *int64 `type:"integer"` + + // The end of the time interval for which to retrieve events, specified in ISO + // 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia + // page. (http://en.wikipedia.org/wiki/ISO_8601) + // + // Example: 2009-07-08T18:00Z + EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeEvents request exceed the + // value specified in MaxRecords, AWS returns a value in the Marker field of + // the response. You can retrieve the next set of response records by providing + // the returned marker value in the Marker parameter and retrying the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The identifier of the event source for which events will be returned. If + // this parameter is not specified, then all sources are included in the response. + // + // Constraints: + // + // If SourceIdentifier is supplied, SourceType must also be provided. + // + // Specify a cluster identifier when SourceType is cluster. Specify a cluster + // security group name when SourceType is cluster-security-group. Specify a + // cluster parameter group name when SourceType is cluster-parameter-group. + // Specify a cluster snapshot identifier when SourceType is cluster-snapshot. + SourceIdentifier *string `type:"string"` + + // The event source to retrieve events for. If no value is specified, all events + // are returned. + // + // Constraints: + // + // If SourceType is supplied, SourceIdentifier must also be provided. + // + // Specify cluster when SourceIdentifier is a cluster identifier. Specify + // cluster-security-group when SourceIdentifier is a cluster security group + // name. Specify cluster-parameter-group when SourceIdentifier is a cluster + // parameter group name. Specify cluster-snapshot when SourceIdentifier is a + // cluster snapshot identifier. + SourceType *string `type:"string" enum:"SourceType"` + + // The beginning of the time interval to retrieve events for, specified in ISO + // 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia + // page. (http://en.wikipedia.org/wiki/ISO_8601) + // + // Example: 2009-07-08T18:00Z + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s DescribeEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventsInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeEvents action. +type DescribeEventsOutput struct { + _ struct{} `type:"structure"` + + // A list of Event instances. + Events []*Event `locationNameList:"Event" type:"list"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventsOutput) GoString() string { + return s.String() +} + +type DescribeHsmClientCertificatesInput struct { + _ struct{} `type:"structure"` + + // The identifier of a specific HSM client certificate for which you want information. + // If no identifier is specified, information is returned for all HSM client + // certificates owned by your AWS customer account. + HsmClientCertificateIdentifier *string `type:"string"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeHsmClientCertificates request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // A tag key or keys for which you want to return all matching HSM client certificates + // that are associated with the specified key or keys. For example, suppose + // that you have HSM client certificates that are tagged with keys called owner + // and environment. If you specify both of these tag keys in the request, Amazon + // Redshift returns a response with the HSM client certificates that have either + // or both of these tag keys associated with them. + TagKeys []*string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching HSM client + // certificates that are associated with the specified tag value or values. + // For example, suppose that you have HSM client certificates that are tagged + // with values called admin and test. If you specify both of these tag values + // in the request, Amazon Redshift returns a response with the HSM client certificates + // that have either or both of these tag values associated with them. + TagValues []*string `locationNameList:"TagValue" type:"list"` +} + +// String returns the string representation +func (s DescribeHsmClientCertificatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHsmClientCertificatesInput) GoString() string { + return s.String() +} + +type DescribeHsmClientCertificatesOutput struct { + _ struct{} `type:"structure"` + + // A list of the identifiers for one or more HSM client certificates used by + // Amazon Redshift clusters to store and retrieve database encryption keys in + // an HSM. + HsmClientCertificates []*HsmClientCertificate `locationNameList:"HsmClientCertificate" type:"list"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeHsmClientCertificatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHsmClientCertificatesOutput) GoString() string { + return s.String() +} + +type DescribeHsmConfigurationsInput struct { + _ struct{} `type:"structure"` + + // The identifier of a specific Amazon Redshift HSM configuration to be described. + // If no identifier is specified, information is returned for all HSM configurations + // owned by your AWS customer account. + HsmConfigurationIdentifier *string `type:"string"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeHsmConfigurations request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // A tag key or keys for which you want to return all matching HSM configurations + // that are associated with the specified key or keys. For example, suppose + // that you have HSM configurations that are tagged with keys called owner and + // environment. If you specify both of these tag keys in the request, Amazon + // Redshift returns a response with the HSM configurations that have either + // or both of these tag keys associated with them. + TagKeys []*string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching HSM configurations + // that are associated with the specified tag value or values. For example, + // suppose that you have HSM configurations that are tagged with values called + // admin and test. If you specify both of these tag values in the request, Amazon + // Redshift returns a response with the HSM configurations that have either + // or both of these tag values associated with them. + TagValues []*string `locationNameList:"TagValue" type:"list"` +} + +// String returns the string representation +func (s DescribeHsmConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHsmConfigurationsInput) GoString() string { + return s.String() +} + +type DescribeHsmConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // A list of Amazon Redshift HSM configurations. + HsmConfigurations []*HsmConfiguration `locationNameList:"HsmConfiguration" type:"list"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeHsmConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHsmConfigurationsOutput) GoString() string { + return s.String() +} + +type DescribeLoggingStatusInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster to get the logging status from. + // + // Example: examplecluster + ClusterIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeLoggingStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoggingStatusInput) GoString() string { + return s.String() +} + +type DescribeOrderableClusterOptionsInput struct { + _ struct{} `type:"structure"` + + // The version filter value. Specify this parameter to show only the available + // offerings matching the specified version. + // + // Default: All versions. + // + // Constraints: Must be one of the version returned from DescribeClusterVersions. + ClusterVersion *string `type:"string"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeOrderableClusterOptions request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The node type filter value. Specify this parameter to show only the available + // offerings matching the specified node type. + NodeType *string `type:"string"` +} + +// String returns the string representation +func (s DescribeOrderableClusterOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOrderableClusterOptionsInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeOrderableClusterOptions action. +type DescribeOrderableClusterOptionsOutput struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // An OrderableClusterOption structure containing information about orderable + // options for the Cluster. + OrderableClusterOptions []*OrderableClusterOption `locationNameList:"OrderableClusterOption" type:"list"` +} + +// String returns the string representation +func (s DescribeOrderableClusterOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOrderableClusterOptionsOutput) GoString() string { + return s.String() +} + +type DescribeReservedNodeOfferingsInput struct { + _ struct{} `type:"structure"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeReservedNodeOfferings request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The unique identifier for the offering. + ReservedNodeOfferingId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeReservedNodeOfferingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedNodeOfferingsInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeReservedNodeOfferings action. +type DescribeReservedNodeOfferingsOutput struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // A list of reserved node offerings. + ReservedNodeOfferings []*ReservedNodeOffering `locationNameList:"ReservedNodeOffering" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedNodeOfferingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedNodeOfferingsOutput) GoString() string { + return s.String() +} + +type DescribeReservedNodesInput struct { + _ struct{} `type:"structure"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeReservedNodes request exceed + // the value specified in MaxRecords, AWS returns a value in the Marker field + // of the response. You can retrieve the next set of response records by providing + // the returned marker value in the Marker parameter and retrying the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // Identifier for the node reservation. + ReservedNodeId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeReservedNodesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedNodesInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeReservedNodes action. +type DescribeReservedNodesOutput struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // The list of reserved nodes. + ReservedNodes []*ReservedNode `locationNameList:"ReservedNode" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedNodesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedNodesOutput) GoString() string { + return s.String() +} + +type DescribeResizeInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of a cluster whose resize progress you are requesting. + // This parameter is case-sensitive. + // + // By default, resize operations for all clusters defined for an AWS account + // are returned. + ClusterIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeResizeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeResizeInput) GoString() string { + return s.String() +} + +// Describes the result of a cluster resize operation. +type DescribeResizeOutput struct { + _ struct{} `type:"structure"` + + // The average rate of the resize operation over the last few minutes, measured + // in megabytes per second. After the resize operation completes, this value + // shows the average rate of the entire resize operation. + AvgResizeRateInMegaBytesPerSecond *float64 `type:"double"` + + // The amount of seconds that have elapsed since the resize operation began. + // After the resize operation completes, this value shows the total actual time, + // in seconds, for the resize operation. + ElapsedTimeInSeconds *int64 `type:"long"` + + // The estimated time remaining, in seconds, until the resize operation is complete. + // This value is calculated based on the average resize rate and the estimated + // amount of data remaining to be processed. Once the resize operation is complete, + // this value will be 0. + EstimatedTimeToCompletionInSeconds *int64 `type:"long"` + + // The names of tables that have been completely imported . + // + // Valid Values: List of table names. + ImportTablesCompleted []*string `type:"list"` + + // The names of tables that are being currently imported. + // + // Valid Values: List of table names. + ImportTablesInProgress []*string `type:"list"` + + // The names of tables that have not been yet imported. + // + // Valid Values: List of table names + ImportTablesNotStarted []*string `type:"list"` + + // While the resize operation is in progress, this value shows the current amount + // of data, in megabytes, that has been processed so far. When the resize operation + // is complete, this value shows the total amount of data, in megabytes, on + // the cluster, which may be more or less than TotalResizeDataInMegaBytes (the + // estimated total amount of data before resize). + ProgressInMegaBytes *int64 `type:"long"` + + // The status of the resize operation. + // + // Valid Values: NONE | IN_PROGRESS | FAILED | SUCCEEDED + Status *string `type:"string"` + + // The cluster type after the resize operation is complete. + // + // Valid Values: multi-node | single-node + TargetClusterType *string `type:"string"` + + // The node type that the cluster will have after the resize operation is complete. + TargetNodeType *string `type:"string"` + + // The number of nodes that the cluster will have after the resize operation + // is complete. + TargetNumberOfNodes *int64 `type:"integer"` + + // The estimated total amount of data, in megabytes, on the cluster before the + // resize operation began. + TotalResizeDataInMegaBytes *int64 `type:"long"` +} + +// String returns the string representation +func (s DescribeResizeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeResizeOutput) GoString() string { + return s.String() +} + +// The result of the DescribeSnapshotCopyGrants action. +type DescribeSnapshotCopyGrantsInput struct { + _ struct{} `type:"structure"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeSnapshotCopyGrant request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + // + // Constraints: You can specify either the SnapshotCopyGrantName parameter + // or the Marker parameter, but not both. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // The name of the snapshot copy grant. + SnapshotCopyGrantName *string `type:"string"` + + // A tag key or keys for which you want to return all matching resources that + // are associated with the specified key or keys. For example, suppose that + // you have resources tagged with keys called owner and environment. If you + // specify both of these tag keys in the request, Amazon Redshift returns a + // response with all resources that have either or both of these tag keys associated + // with them. + TagKeys []*string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching resources + // that are associated with the specified value or values. For example, suppose + // that you have resources tagged with values called admin and test. If you + // specify both of these tag values in the request, Amazon Redshift returns + // a response with all resources that have either or both of these tag values + // associated with them. + TagValues []*string `locationNameList:"TagValue" type:"list"` +} + +// String returns the string representation +func (s DescribeSnapshotCopyGrantsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotCopyGrantsInput) GoString() string { + return s.String() +} + +// The result of the snapshot copy grant. +type DescribeSnapshotCopyGrantsOutput struct { + _ struct{} `type:"structure"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeSnapshotCopyGrant request + // exceed the value specified in MaxRecords, AWS returns a value in the Marker + // field of the response. You can retrieve the next set of response records + // by providing the returned marker value in the Marker parameter and retrying + // the request. + // + // Constraints: You can specify either the SnapshotCopyGrantName parameter + // or the Marker parameter, but not both. + Marker *string `type:"string"` + + // The list of snapshot copy grants. + SnapshotCopyGrants []*SnapshotCopyGrant `locationNameList:"SnapshotCopyGrant" type:"list"` +} + +// String returns the string representation +func (s DescribeSnapshotCopyGrantsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotCopyGrantsOutput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeTags action. +type DescribeTagsInput struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the marker + // parameter and retrying the command. If the marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // The maximum number or response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + MaxRecords *int64 `type:"integer"` + + // The Amazon Resource Name (ARN) for which you want to describe the tag or + // tags. For example, arn:aws:redshift:us-east-1:123456789:cluster:t1. + ResourceName *string `type:"string"` + + // The type of resource with which you want to view tags. Valid resource types + // are: Cluster CIDR/IP EC2 security group Snapshot Cluster security group + // Subnet group HSM connection HSM certificate Parameter group Snapshot copy + // grant + // + // For more information about Amazon Redshift resource types and constructing + // ARNs, go to Constructing an Amazon Redshift Amazon Resource Name (ARN) (http://docs.aws.amazon.com/redshift/latest/mgmt/constructing-redshift-arn.html) + // in the Amazon Redshift Cluster Management Guide. + ResourceType *string `type:"string"` + + // A tag key or keys for which you want to return all matching resources that + // are associated with the specified key or keys. For example, suppose that + // you have resources tagged with keys called owner and environment. If you + // specify both of these tag keys in the request, Amazon Redshift returns a + // response with all resources that have either or both of these tag keys associated + // with them. + TagKeys []*string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching resources + // that are associated with the specified value or values. For example, suppose + // that you have resources tagged with values called admin and test. If you + // specify both of these tag values in the request, Amazon Redshift returns + // a response with all resources that have either or both of these tag values + // associated with them. + TagValues []*string `locationNameList:"TagValue" type:"list"` +} + +// String returns the string representation +func (s DescribeTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsInput) GoString() string { + return s.String() +} + +// Contains the output from the DescribeTags action. +type DescribeTagsOutput struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // A list of tags with their associated resources. + TaggedResources []*TaggedResource `locationNameList:"TaggedResource" type:"list"` +} + +// String returns the string representation +func (s DescribeTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsOutput) GoString() string { + return s.String() +} + +type DisableLoggingInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster on which logging is to be stopped. + // + // Example: examplecluster + ClusterIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableLoggingInput) GoString() string { + return s.String() +} + +type DisableSnapshotCopyInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the source cluster that you want to disable copying + // of snapshots to a destination region. + // + // Constraints: Must be the valid name of an existing cluster that has cross-region + // snapshot copy enabled. + ClusterIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableSnapshotCopyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableSnapshotCopyInput) GoString() string { + return s.String() +} + +type DisableSnapshotCopyOutput struct { + _ struct{} `type:"structure"` + + // Describes a cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s DisableSnapshotCopyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableSnapshotCopyOutput) GoString() string { + return s.String() +} + +// Describes an Amazon EC2 security group. +type EC2SecurityGroup struct { + _ struct{} `type:"structure"` + + // The name of the EC2 Security Group. + EC2SecurityGroupName *string `type:"string"` + + // The AWS ID of the owner of the EC2 security group specified in the EC2SecurityGroupName + // field. + EC2SecurityGroupOwnerId *string `type:"string"` + + // The status of the EC2 security group. + Status *string `type:"string"` + + // The list of tags for the EC2 security group. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s EC2SecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EC2SecurityGroup) GoString() string { + return s.String() +} + +// Describes the status of the elastic IP (EIP) address. +type ElasticIpStatus struct { + _ struct{} `type:"structure"` + + // The elastic IP (EIP) address for the cluster. + ElasticIp *string `type:"string"` + + // Describes the status of the elastic IP (EIP) address. + Status *string `type:"string"` +} + +// String returns the string representation +func (s ElasticIpStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticIpStatus) GoString() string { + return s.String() +} + +type EnableLoggingInput struct { + _ struct{} `type:"structure"` + + // The name of an existing S3 bucket where the log files are to be stored. + // + // Constraints: + // + // Must be in the same region as the cluster The cluster must have read bucket + // and put object permissions + BucketName *string `type:"string" required:"true"` + + // The identifier of the cluster on which logging is to be started. + // + // Example: examplecluster + ClusterIdentifier *string `type:"string" required:"true"` + + // The prefix applied to the log file names. + // + // Constraints: + // + // Cannot exceed 512 characters Cannot contain spaces( ), double quotes ("), + // single quotes ('), a backslash (\), or control characters. The hexadecimal + // codes for invalid characters are: x00 to x20 x22 x27 x5c x7f or larger + S3KeyPrefix *string `type:"string"` +} + +// String returns the string representation +func (s EnableLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableLoggingInput) GoString() string { + return s.String() +} + +type EnableSnapshotCopyInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the source cluster to copy snapshots from. + // + // Constraints: Must be the valid name of an existing cluster that does not + // already have cross-region snapshot copy enabled. + ClusterIdentifier *string `type:"string" required:"true"` + + // The destination region that you want to copy snapshots to. + // + // Constraints: Must be the name of a valid region. For more information, + // see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#redshift_region) + // in the Amazon Web Services General Reference. + DestinationRegion *string `type:"string" required:"true"` + + // The number of days to retain automated snapshots in the destination region + // after they are copied from the source region. + // + // Default: 7. + // + // Constraints: Must be at least 1 and no more than 35. + RetentionPeriod *int64 `type:"integer"` + + // The name of the snapshot copy grant to use when snapshots of an AWS KMS-encrypted + // cluster are copied to the destination region. + SnapshotCopyGrantName *string `type:"string"` +} + +// String returns the string representation +func (s EnableSnapshotCopyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableSnapshotCopyInput) GoString() string { + return s.String() +} + +type EnableSnapshotCopyOutput struct { + _ struct{} `type:"structure"` + + // Describes a cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s EnableSnapshotCopyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableSnapshotCopyOutput) GoString() string { + return s.String() +} + +// Describes a connection endpoint. +type Endpoint struct { + _ struct{} `type:"structure"` + + // The DNS address of the Cluster. + Address *string `type:"string"` + + // The port that the database engine is listening on. + Port *int64 `type:"integer"` +} + +// String returns the string representation +func (s Endpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Endpoint) GoString() string { + return s.String() +} + +// Describes an event. +type Event struct { + _ struct{} `type:"structure"` + + // The date and time of the event. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A list of the event categories. + // + // Values: Configuration, Management, Monitoring, Security + EventCategories []*string `locationNameList:"EventCategory" type:"list"` + + // The identifier of the event. + EventId *string `type:"string"` + + // The text of this event. + Message *string `type:"string"` + + // The severity of the event. + // + // Values: ERROR, INFO + Severity *string `type:"string"` + + // The identifier for the source of the event. + SourceIdentifier *string `type:"string"` + + // The source type for this event. + SourceType *string `type:"string" enum:"SourceType"` +} + +// String returns the string representation +func (s Event) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Event) GoString() string { + return s.String() +} + +type EventCategoriesMap struct { + _ struct{} `type:"structure"` + + // The events in the event category. + Events []*EventInfoMap `locationNameList:"EventInfoMap" type:"list"` + + // The Amazon Redshift source type, such as cluster or cluster-snapshot, that + // the returned categories belong to. + SourceType *string `type:"string"` +} + +// String returns the string representation +func (s EventCategoriesMap) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventCategoriesMap) GoString() string { + return s.String() +} + +type EventInfoMap struct { + _ struct{} `type:"structure"` + + // The category of an Amazon Redshift event. + EventCategories []*string `locationNameList:"EventCategory" type:"list"` + + // The description of an Amazon Redshift event. + EventDescription *string `type:"string"` + + // The identifier of an Amazon Redshift event. + EventId *string `type:"string"` + + // The severity of the event. + // + // Values: ERROR, INFO + Severity *string `type:"string"` +} + +// String returns the string representation +func (s EventInfoMap) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventInfoMap) GoString() string { + return s.String() +} + +type EventSubscription struct { + _ struct{} `type:"structure"` + + // The name of the Amazon Redshift event notification subscription. + CustSubscriptionId *string `type:"string"` + + // The AWS customer account associated with the Amazon Redshift event notification + // subscription. + CustomerAwsId *string `type:"string"` + + // A Boolean value indicating whether the subscription is enabled. true indicates + // the subscription is enabled. + Enabled *bool `type:"boolean"` + + // The list of Amazon Redshift event categories specified in the event notification + // subscription. + // + // Values: Configuration, Management, Monitoring, Security + EventCategoriesList []*string `locationNameList:"EventCategory" type:"list"` + + // The event severity specified in the Amazon Redshift event notification subscription. + // + // Values: ERROR, INFO + Severity *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic used by the event + // notification subscription. + SnsTopicArn *string `type:"string"` + + // A list of the sources that publish events to the Amazon Redshift event notification + // subscription. + SourceIdsList []*string `locationNameList:"SourceId" type:"list"` + + // The source type of the events returned the Amazon Redshift event notification, + // such as cluster, or cluster-snapshot. + SourceType *string `type:"string"` + + // The status of the Amazon Redshift event notification subscription. + // + // Constraints: + // + // Can be one of the following: active | no-permission | topic-not-exist The + // status "no-permission" indicates that Amazon Redshift no longer has permission + // to post to the Amazon SNS topic. The status "topic-not-exist" indicates that + // the topic was deleted after the subscription was created. + Status *string `type:"string"` + + // The date and time the Amazon Redshift event notification subscription was + // created. + SubscriptionCreationTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The list of tags for the event subscription. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s EventSubscription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventSubscription) GoString() string { + return s.String() +} + +// Returns information about an HSM client certificate. The certificate is stored +// in a secure Hardware Storage Module (HSM), and used by the Amazon Redshift +// cluster to encrypt data files. +type HsmClientCertificate struct { + _ struct{} `type:"structure"` + + // The identifier of the HSM client certificate. + HsmClientCertificateIdentifier *string `type:"string"` + + // The public key that the Amazon Redshift cluster will use to connect to the + // HSM. You must register the public key in the HSM. + HsmClientCertificatePublicKey *string `type:"string"` + + // The list of tags for the HSM client certificate. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s HsmClientCertificate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HsmClientCertificate) GoString() string { + return s.String() +} + +// Returns information about an HSM configuration, which is an object that describes +// to Amazon Redshift clusters the information they require to connect to an +// HSM where they can store database encryption keys. +type HsmConfiguration struct { + _ struct{} `type:"structure"` + + // A text description of the HSM configuration. + Description *string `type:"string"` + + // The name of the Amazon Redshift HSM configuration. + HsmConfigurationIdentifier *string `type:"string"` + + // The IP address that the Amazon Redshift cluster must use to access the HSM. + HsmIpAddress *string `type:"string"` + + // The name of the partition in the HSM where the Amazon Redshift clusters will + // store their database encryption keys. + HsmPartitionName *string `type:"string"` + + // The list of tags for the HSM configuration. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s HsmConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HsmConfiguration) GoString() string { + return s.String() +} + +type HsmStatus struct { + _ struct{} `type:"structure"` + + // Specifies the name of the HSM client certificate the Amazon Redshift cluster + // uses to retrieve the data encryption keys stored in an HSM. + HsmClientCertificateIdentifier *string `type:"string"` + + // Specifies the name of the HSM configuration that contains the information + // the Amazon Redshift cluster can use to retrieve and store keys in an HSM. + HsmConfigurationIdentifier *string `type:"string"` + + // Reports whether the Amazon Redshift cluster has finished applying any HSM + // settings changes specified in a modify cluster command. + // + // Values: active, applying + Status *string `type:"string"` +} + +// String returns the string representation +func (s HsmStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HsmStatus) GoString() string { + return s.String() +} + +// Describes an IP range used in a security group. +type IPRange struct { + _ struct{} `type:"structure"` + + // The IP range in Classless Inter-Domain Routing (CIDR) notation. + CIDRIP *string `type:"string"` + + // The status of the IP range, for example, "authorized". + Status *string `type:"string"` + + // The list of tags for the IP range. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s IPRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IPRange) GoString() string { + return s.String() +} + +// Describes the status of logging for a cluster. +type LoggingStatus struct { + _ struct{} `type:"structure"` + + // The name of the S3 bucket where the log files are stored. + BucketName *string `type:"string"` + + // The message indicating that logs failed to be delivered. + LastFailureMessage *string `type:"string"` + + // The last time when logs failed to be delivered. + LastFailureTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The last time when logs were delivered. + LastSuccessfulDeliveryTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // true if logging is on, false if logging is off. + LoggingEnabled *bool `type:"boolean"` + + // The prefix applied to the log file names. + S3KeyPrefix *string `type:"string"` +} + +// String returns the string representation +func (s LoggingStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoggingStatus) GoString() string { + return s.String() +} + +type ModifyClusterInput struct { + _ struct{} `type:"structure"` + + // If true, major version upgrades will be applied automatically to the cluster + // during the maintenance window. + // + // Default: false + AllowVersionUpgrade *bool `type:"boolean"` + + // The number of days that automated snapshots are retained. If the value is + // 0, automated snapshots are disabled. Even if automated snapshots are disabled, + // you can still create manual snapshots when you want with CreateClusterSnapshot. + // + // If you decrease the automated snapshot retention period from its current + // value, existing automated snapshots that fall outside of the new retention + // period will be immediately deleted. + // + // Default: Uses existing setting. + // + // Constraints: Must be a value from 0 to 35. + AutomatedSnapshotRetentionPeriod *int64 `type:"integer"` + + // The unique identifier of the cluster to be modified. + // + // Example: examplecluster + ClusterIdentifier *string `type:"string" required:"true"` + + // The name of the cluster parameter group to apply to this cluster. This change + // is applied only after the cluster is rebooted. To reboot a cluster use RebootCluster. + // + // Default: Uses existing setting. + // + // Constraints: The cluster parameter group must be in the same parameter group + // family that matches the cluster version. + ClusterParameterGroupName *string `type:"string"` + + // A list of cluster security groups to be authorized on this cluster. This + // change is asynchronously applied as soon as possible. + // + // Security groups currently associated with the cluster, and not in the list + // of groups to apply, will be revoked from the cluster. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters or hyphens First character must + // be a letter Cannot end with a hyphen or contain two consecutive hyphens + ClusterSecurityGroups []*string `locationNameList:"ClusterSecurityGroupName" type:"list"` + + // The new cluster type. + // + // When you submit your cluster resize request, your existing cluster goes + // into a read-only mode. After Amazon Redshift provisions a new cluster based + // on your resize requirements, there will be outage for a period while the + // old cluster is deleted and your connection is switched to the new cluster. + // You can use DescribeResize to track the progress of the resize request. + // + // Valid Values: multi-node | single-node + ClusterType *string `type:"string"` + + // The new version number of the Amazon Redshift engine to upgrade to. + // + // For major version upgrades, if a non-default cluster parameter group is + // currently in use, a new cluster parameter group in the cluster parameter + // group family for the new version must be specified. The new cluster parameter + // group can be the default for that cluster parameter group family. For more + // information about parameters and parameter groups, go to Amazon Redshift + // Parameter Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html) + // in the Amazon Redshift Cluster Management Guide. + // + // Example: 1.0 + ClusterVersion *string `type:"string"` + + // Specifies the name of the HSM client certificate the Amazon Redshift cluster + // uses to retrieve the data encryption keys stored in an HSM. + HsmClientCertificateIdentifier *string `type:"string"` + + // Specifies the name of the HSM configuration that contains the information + // the Amazon Redshift cluster can use to retrieve and store keys in an HSM. + HsmConfigurationIdentifier *string `type:"string"` + + // The new password for the cluster master user. This change is asynchronously + // applied as soon as possible. Between the time of the request and the completion + // of the request, the MasterUserPassword element exists in the PendingModifiedValues + // element of the operation response. Operations never return the password, + // so this operation provides a way to regain access to the master user account + // for a cluster if the password is lost. + // + // Default: Uses existing setting. + // + // Constraints: + // + // Must be between 8 and 64 characters in length. Must contain at least one + // uppercase letter. Must contain at least one lowercase letter. Must contain + // one number. Can be any printable ASCII character (ASCII code 33 to 126) except + // ' (single quote), " (double quote), \, /, @, or space. + MasterUserPassword *string `type:"string"` + + // The new identifier for the cluster. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. Alphabetic + // characters must be lowercase. First character must be a letter. Cannot end + // with a hyphen or contain two consecutive hyphens. Must be unique for all + // clusters within an AWS account. Example: examplecluster + NewClusterIdentifier *string `type:"string"` + + // The new node type of the cluster. If you specify a new node type, you must + // also specify the number of nodes parameter. + // + // When you submit your request to resize a cluster, Amazon Redshift sets + // access permissions for the cluster to read-only. After Amazon Redshift provisions + // a new cluster according to your resize requirements, there will be a temporary + // outage while the old cluster is deleted and your connection is switched to + // the new cluster. When the new connection is complete, the original access + // permissions for the cluster are restored. You can use DescribeResize to track + // the progress of the resize request. + // + // Valid Values: ds1.xlarge | ds1.8xlarge | ds2.xlarge | ds2.8xlarge | dc1.large + // | dc1.8xlarge. + NodeType *string `type:"string"` + + // The new number of nodes of the cluster. If you specify a new number of nodes, + // you must also specify the node type parameter. + // + // When you submit your request to resize a cluster, Amazon Redshift sets + // access permissions for the cluster to read-only. After Amazon Redshift provisions + // a new cluster according to your resize requirements, there will be a temporary + // outage while the old cluster is deleted and your connection is switched to + // the new cluster. When the new connection is complete, the original access + // permissions for the cluster are restored. You can use DescribeResize to track + // the progress of the resize request. + // + // Valid Values: Integer greater than 0. + NumberOfNodes *int64 `type:"integer"` + + // The weekly time range (in UTC) during which system maintenance can occur, + // if necessary. If system maintenance is necessary during the window, it may + // result in an outage. + // + // This maintenance window change is made immediately. If the new maintenance + // window indicates the current time, there must be at least 120 minutes between + // the current time and end of the window in order to ensure that pending changes + // are applied. + // + // Default: Uses existing setting. + // + // Format: ddd:hh24:mi-ddd:hh24:mi, for example wed:07:30-wed:08:00. + // + // Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun + // + // Constraints: Must be at least 30 minutes. + PreferredMaintenanceWindow *string `type:"string"` + + // A list of virtual private cloud (VPC) security groups to be associated with + // the cluster. + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s ModifyClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyClusterInput) GoString() string { + return s.String() +} + +type ModifyClusterOutput struct { + _ struct{} `type:"structure"` + + // Describes a cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s ModifyClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyClusterOutput) GoString() string { + return s.String() +} + +type ModifyClusterParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the parameter group to be modified. + ParameterGroupName *string `type:"string" required:"true"` + + // An array of parameters to be modified. A maximum of 20 parameters can be + // modified in a single request. + // + // For each parameter to be modified, you must supply at least the parameter + // name and parameter value; other name-value pairs of the parameter are optional. + // + // For the workload management (WLM) configuration, you must supply all the + // name-value pairs in the wlm_json_configuration parameter. + Parameters []*Parameter `locationNameList:"Parameter" type:"list" required:"true"` +} + +// String returns the string representation +func (s ModifyClusterParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyClusterParameterGroupInput) GoString() string { + return s.String() +} + +type ModifyClusterSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the subnet group to be modified. + ClusterSubnetGroupName *string `type:"string" required:"true"` + + // A text description of the subnet group to be modified. + Description *string `type:"string"` + + // An array of VPC subnet IDs. A maximum of 20 subnets can be modified in a + // single request. + SubnetIds []*string `locationNameList:"SubnetIdentifier" type:"list" required:"true"` +} + +// String returns the string representation +func (s ModifyClusterSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyClusterSubnetGroupInput) GoString() string { + return s.String() +} + +type ModifyClusterSubnetGroupOutput struct { + _ struct{} `type:"structure"` + + // Describes a subnet group. + ClusterSubnetGroup *ClusterSubnetGroup `type:"structure"` +} + +// String returns the string representation +func (s ModifyClusterSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyClusterSubnetGroupOutput) GoString() string { + return s.String() +} + +type ModifyEventSubscriptionInput struct { + _ struct{} `type:"structure"` + + // A Boolean value indicating if the subscription is enabled. true indicates + // the subscription is enabled + Enabled *bool `type:"boolean"` + + // Specifies the Amazon Redshift event categories to be published by the event + // notification subscription. + // + // Values: Configuration, Management, Monitoring, Security + EventCategories []*string `locationNameList:"EventCategory" type:"list"` + + // Specifies the Amazon Redshift event severity to be published by the event + // notification subscription. + // + // Values: ERROR, INFO + Severity *string `type:"string"` + + // The Amazon Resource Name (ARN) of the SNS topic to be used by the event notification + // subscription. + SnsTopicArn *string `type:"string"` + + // A list of one or more identifiers of Amazon Redshift source objects. All + // of the objects must be of the same type as was specified in the source type + // parameter. The event subscription will return only events generated by the + // specified objects. If not specified, then events are returned for all objects + // within the source type specified. + // + // Example: my-cluster-1, my-cluster-2 + // + // Example: my-snapshot-20131010 + SourceIds []*string `locationNameList:"SourceId" type:"list"` + + // The type of source that will be generating the events. For example, if you + // want to be notified of events generated by a cluster, you would set this + // parameter to cluster. If this value is not specified, events are returned + // for all Amazon Redshift objects in your AWS account. You must specify a source + // type in order to specify source IDs. + // + // Valid values: cluster, cluster-parameter-group, cluster-security-group, + // and cluster-snapshot. + SourceType *string `type:"string"` + + // The name of the modified Amazon Redshift event notification subscription. + SubscriptionName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyEventSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyEventSubscriptionInput) GoString() string { + return s.String() +} + +type ModifyEventSubscriptionOutput struct { + _ struct{} `type:"structure"` + + EventSubscription *EventSubscription `type:"structure"` +} + +// String returns the string representation +func (s ModifyEventSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyEventSubscriptionOutput) GoString() string { + return s.String() +} + +type ModifySnapshotCopyRetentionPeriodInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the cluster for which you want to change the retention + // period for automated snapshots that are copied to a destination region. + // + // Constraints: Must be the valid name of an existing cluster that has cross-region + // snapshot copy enabled. + ClusterIdentifier *string `type:"string" required:"true"` + + // The number of days to retain automated snapshots in the destination region + // after they are copied from the source region. + // + // If you decrease the retention period for automated snapshots that are copied + // to a destination region, Amazon Redshift will delete any existing automated + // snapshots that were copied to the destination region and that fall outside + // of the new retention period. + // + // Constraints: Must be at least 1 and no more than 35. + RetentionPeriod *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s ModifySnapshotCopyRetentionPeriodInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySnapshotCopyRetentionPeriodInput) GoString() string { + return s.String() +} + +type ModifySnapshotCopyRetentionPeriodOutput struct { + _ struct{} `type:"structure"` + + // Describes a cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s ModifySnapshotCopyRetentionPeriodOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySnapshotCopyRetentionPeriodOutput) GoString() string { + return s.String() +} + +// Describes an orderable cluster option. +type OrderableClusterOption struct { + _ struct{} `type:"structure"` + + // A list of availability zones for the orderable cluster. + AvailabilityZones []*AvailabilityZone `locationNameList:"AvailabilityZone" type:"list"` + + // The cluster type, for example multi-node. + ClusterType *string `type:"string"` + + // The version of the orderable cluster. + ClusterVersion *string `type:"string"` + + // The node type for the orderable cluster. + NodeType *string `type:"string"` +} + +// String returns the string representation +func (s OrderableClusterOption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OrderableClusterOption) GoString() string { + return s.String() +} + +// Describes a parameter in a cluster parameter group. +type Parameter struct { + _ struct{} `type:"structure"` + + // The valid range of values for the parameter. + AllowedValues *string `type:"string"` + + // Specifies how to apply the parameter. Supported value: static. + ApplyType *string `type:"string" enum:"ParameterApplyType"` + + // The data type of the parameter. + DataType *string `type:"string"` + + // A description of the parameter. + Description *string `type:"string"` + + // If true, the parameter can be modified. Some parameters have security or + // operational implications that prevent them from being changed. + IsModifiable *bool `type:"boolean"` + + // The earliest engine version to which the parameter can apply. + MinimumEngineVersion *string `type:"string"` + + // The name of the parameter. + ParameterName *string `type:"string"` + + // The value of the parameter. + ParameterValue *string `type:"string"` + + // The source of the parameter value, such as "engine-default" or "user". + Source *string `type:"string"` +} + +// String returns the string representation +func (s Parameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Parameter) GoString() string { + return s.String() +} + +// Describes cluster attributes that are in a pending state. A change to one +// or more the attributes was requested and is in progress or will be applied. +type PendingModifiedValues struct { + _ struct{} `type:"structure"` + + // The pending or in-progress change of the automated snapshot retention period. + AutomatedSnapshotRetentionPeriod *int64 `type:"integer"` + + // The pending or in-progress change of the new identifier for the cluster. + ClusterIdentifier *string `type:"string"` + + // The pending or in-progress change of the cluster type. + ClusterType *string `type:"string"` + + // The pending or in-progress change of the service version. + ClusterVersion *string `type:"string"` + + // The pending or in-progress change of the master user password for the cluster. + MasterUserPassword *string `type:"string"` + + // The pending or in-progress change of the cluster's node type. + NodeType *string `type:"string"` + + // The pending or in-progress change of the number of nodes in the cluster. + NumberOfNodes *int64 `type:"integer"` +} + +// String returns the string representation +func (s PendingModifiedValues) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PendingModifiedValues) GoString() string { + return s.String() +} + +type PurchaseReservedNodeOfferingInput struct { + _ struct{} `type:"structure"` + + // The number of reserved nodes you want to purchase. + // + // Default: 1 + NodeCount *int64 `type:"integer"` + + // The unique identifier of the reserved node offering you want to purchase. + ReservedNodeOfferingId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PurchaseReservedNodeOfferingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseReservedNodeOfferingInput) GoString() string { + return s.String() +} + +type PurchaseReservedNodeOfferingOutput struct { + _ struct{} `type:"structure"` + + // Describes a reserved node. You can call the DescribeReservedNodeOfferings + // API to obtain the available reserved node offerings. + ReservedNode *ReservedNode `type:"structure"` +} + +// String returns the string representation +func (s PurchaseReservedNodeOfferingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseReservedNodeOfferingOutput) GoString() string { + return s.String() +} + +type RebootClusterInput struct { + _ struct{} `type:"structure"` + + // The cluster identifier. + ClusterIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RebootClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootClusterInput) GoString() string { + return s.String() +} + +type RebootClusterOutput struct { + _ struct{} `type:"structure"` + + // Describes a cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s RebootClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootClusterOutput) GoString() string { + return s.String() +} + +// Describes a recurring charge. +type RecurringCharge struct { + _ struct{} `type:"structure"` + + // The amount charged per the period of time specified by the recurring charge + // frequency. + RecurringChargeAmount *float64 `type:"double"` + + // The frequency at which the recurring charge amount is applied. + RecurringChargeFrequency *string `type:"string"` +} + +// String returns the string representation +func (s RecurringCharge) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecurringCharge) GoString() string { + return s.String() +} + +// Describes a reserved node. You can call the DescribeReservedNodeOfferings +// API to obtain the available reserved node offerings. +type ReservedNode struct { + _ struct{} `type:"structure"` + + // The currency code for the reserved cluster. + CurrencyCode *string `type:"string"` + + // The duration of the node reservation in seconds. + Duration *int64 `type:"integer"` + + // The fixed cost Amazon Redshift charges you for this reserved node. + FixedPrice *float64 `type:"double"` + + // The number of reserved compute nodes. + NodeCount *int64 `type:"integer"` + + // The node type of the reserved node. + NodeType *string `type:"string"` + + // The anticipated utilization of the reserved node, as defined in the reserved + // node offering. + OfferingType *string `type:"string"` + + // The recurring charges for the reserved node. + RecurringCharges []*RecurringCharge `locationNameList:"RecurringCharge" type:"list"` + + // The unique identifier for the reservation. + ReservedNodeId *string `type:"string"` + + // The identifier for the reserved node offering. + ReservedNodeOfferingId *string `type:"string"` + + // The time the reservation started. You purchase a reserved node offering for + // a duration. This is the start time of that duration. + StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The state of the reserved compute node. + // + // Possible Values: + // + // pending-payment-This reserved node has recently been purchased, and the + // sale has been approved, but payment has not yet been confirmed. active-This + // reserved node is owned by the caller and is available for use. payment-failed-Payment + // failed for the purchase attempt. + State *string `type:"string"` + + // The hourly rate Amazon Redshift charges you for this reserved node. + UsagePrice *float64 `type:"double"` +} + +// String returns the string representation +func (s ReservedNode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedNode) GoString() string { + return s.String() +} + +// Describes a reserved node offering. +type ReservedNodeOffering struct { + _ struct{} `type:"structure"` + + // The currency code for the compute nodes offering. + CurrencyCode *string `type:"string"` + + // The duration, in seconds, for which the offering will reserve the node. + Duration *int64 `type:"integer"` + + // The upfront fixed charge you will pay to purchase the specific reserved node + // offering. + FixedPrice *float64 `type:"double"` + + // The node type offered by the reserved node offering. + NodeType *string `type:"string"` + + // The anticipated utilization of the reserved node, as defined in the reserved + // node offering. + OfferingType *string `type:"string"` + + // The charge to your account regardless of whether you are creating any clusters + // using the node offering. Recurring charges are only in effect for heavy-utilization + // reserved nodes. + RecurringCharges []*RecurringCharge `locationNameList:"RecurringCharge" type:"list"` + + // The offering identifier. + ReservedNodeOfferingId *string `type:"string"` + + // The rate you are charged for each hour the cluster that is using the offering + // is running. + UsagePrice *float64 `type:"double"` +} + +// String returns the string representation +func (s ReservedNodeOffering) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedNodeOffering) GoString() string { + return s.String() +} + +type ResetClusterParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the cluster parameter group to be reset. + ParameterGroupName *string `type:"string" required:"true"` + + // An array of names of parameters to be reset. If ResetAllParameters option + // is not used, then at least one parameter name must be supplied. + // + // Constraints: A maximum of 20 parameters can be reset in a single request. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` + + // If true, all parameters in the specified parameter group will be reset to + // their default values. + // + // Default: true + ResetAllParameters *bool `type:"boolean"` +} + +// String returns the string representation +func (s ResetClusterParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetClusterParameterGroupInput) GoString() string { + return s.String() +} + +type RestoreFromClusterSnapshotInput struct { + _ struct{} `type:"structure"` + + // If true, major version upgrades can be applied during the maintenance window + // to the Amazon Redshift engine that is running on the cluster. + // + // Default: true + AllowVersionUpgrade *bool `type:"boolean"` + + // The number of days that automated snapshots are retained. If the value is + // 0, automated snapshots are disabled. Even if automated snapshots are disabled, + // you can still create manual snapshots when you want with CreateClusterSnapshot. + // + // Default: The value selected for the cluster from which the snapshot was + // taken. + // + // Constraints: Must be a value from 0 to 35. + AutomatedSnapshotRetentionPeriod *int64 `type:"integer"` + + // The Amazon EC2 Availability Zone in which to restore the cluster. + // + // Default: A random, system-chosen Availability Zone. + // + // Example: us-east-1a + AvailabilityZone *string `type:"string"` + + // The identifier of the cluster that will be created from restoring the snapshot. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. Alphabetic + // characters must be lowercase. First character must be a letter. Cannot end + // with a hyphen or contain two consecutive hyphens. Must be unique for all + // clusters within an AWS account. + ClusterIdentifier *string `type:"string" required:"true"` + + // The name of the parameter group to be associated with this cluster. + // + // Default: The default Amazon Redshift cluster parameter group. For information + // about the default parameter group, go to Working with Amazon Redshift Parameter + // Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html). + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters or hyphens. First character must + // be a letter. Cannot end with a hyphen or contain two consecutive hyphens. + ClusterParameterGroupName *string `type:"string"` + + // A list of security groups to be associated with this cluster. + // + // Default: The default cluster security group for Amazon Redshift. + // + // Cluster security groups only apply to clusters outside of VPCs. + ClusterSecurityGroups []*string `locationNameList:"ClusterSecurityGroupName" type:"list"` + + // The name of the subnet group where you want to cluster restored. + // + // A snapshot of cluster in VPC can be restored only in VPC. Therefore, you + // must provide subnet group name where you want the cluster restored. + ClusterSubnetGroupName *string `type:"string"` + + // The elastic IP (EIP) address for the cluster. + ElasticIp *string `type:"string"` + + // Specifies the name of the HSM client certificate the Amazon Redshift cluster + // uses to retrieve the data encryption keys stored in an HSM. + HsmClientCertificateIdentifier *string `type:"string"` + + // Specifies the name of the HSM configuration that contains the information + // the Amazon Redshift cluster can use to retrieve and store keys in an HSM. + HsmConfigurationIdentifier *string `type:"string"` + + // The AWS Key Management Service (KMS) key ID of the encryption key that you + // want to use to encrypt data in the cluster that you restore from a shared + // snapshot. + KmsKeyId *string `type:"string"` + + // The node type that the restored cluster will be provisioned with. + // + // Default: The node type of the cluster from which the snapshot was taken. + // You can modify this if you are using any DS node type. In that case, you + // can choose to restore into another DS node type of the same size. For example, + // you can restore ds1.8xlarge into ds2.8xlarge, or ds2.xlarge into ds1.xlarge. + // If you have a DC instance type, you must restore into that same instance + // type and size. In other words, you can only restore a dc1.large instance + // type into another dc1.large instance type. For more information about node + // types, see About Clusters and Nodes (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#rs-about-clusters-and-nodes) + // in the Amazon Redshift Cluster Management Guide + NodeType *string `type:"string"` + + // The AWS customer account used to create or copy the snapshot. Required if + // you are restoring a snapshot you do not own, optional if you own the snapshot. + OwnerAccount *string `type:"string"` + + // The port number on which the cluster accepts connections. + // + // Default: The same port as the original cluster. + // + // Constraints: Must be between 1115 and 65535. + Port *int64 `type:"integer"` + + // The weekly time range (in UTC) during which automated cluster maintenance + // can occur. + // + // Format: ddd:hh24:mi-ddd:hh24:mi + // + // Default: The value selected for the cluster from which the snapshot was + // taken. For more information about the time blocks for each region, see Maintenance + // Windows (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#rs-maintenance-windows) + // in Amazon Redshift Cluster Management Guide. + // + // Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun + // + // Constraints: Minimum 30-minute window. + PreferredMaintenanceWindow *string `type:"string"` + + // If true, the cluster can be accessed from a public network. + PubliclyAccessible *bool `type:"boolean"` + + // The name of the cluster the source snapshot was created from. This parameter + // is required if your IAM user has a policy containing a snapshot resource + // element that specifies anything other than * for the cluster name. + SnapshotClusterIdentifier *string `type:"string"` + + // The name of the snapshot from which to create the new cluster. This parameter + // isn't case sensitive. + // + // Example: my-snapshot-id + SnapshotIdentifier *string `type:"string" required:"true"` + + // A list of Virtual Private Cloud (VPC) security groups to be associated with + // the cluster. + // + // Default: The default VPC security group is associated with the cluster. + // + // VPC security groups only apply to clusters in VPCs. + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s RestoreFromClusterSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreFromClusterSnapshotInput) GoString() string { + return s.String() +} + +type RestoreFromClusterSnapshotOutput struct { + _ struct{} `type:"structure"` + + // Describes a cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s RestoreFromClusterSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreFromClusterSnapshotOutput) GoString() string { + return s.String() +} + +// Describes the status of a cluster restore action. Returns null if the cluster +// was not created by restoring a snapshot. +type RestoreStatus struct { + _ struct{} `type:"structure"` + + // The number of megabytes per second being transferred from the backup storage. + // Returns the average rate for a completed backup. + CurrentRestoreRateInMegaBytesPerSecond *float64 `type:"double"` + + // The amount of time an in-progress restore has been running, or the amount + // of time it took a completed restore to finish. + ElapsedTimeInSeconds *int64 `type:"long"` + + // The estimate of the time remaining before the restore will complete. Returns + // 0 for a completed restore. + EstimatedTimeToCompletionInSeconds *int64 `type:"long"` + + // The number of megabytes that have been transferred from snapshot storage. + ProgressInMegaBytes *int64 `type:"long"` + + // The size of the set of snapshot data used to restore the cluster. + SnapshotSizeInMegaBytes *int64 `type:"long"` + + // The status of the restore action. Returns starting, restoring, completed, + // or failed. + Status *string `type:"string"` +} + +// String returns the string representation +func (s RestoreStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreStatus) GoString() string { + return s.String() +} + +// ??? +type RevokeClusterSecurityGroupIngressInput struct { + _ struct{} `type:"structure"` + + // The IP range for which to revoke access. This range must be a valid Classless + // Inter-Domain Routing (CIDR) block of IP addresses. If CIDRIP is specified, + // EC2SecurityGroupName and EC2SecurityGroupOwnerId cannot be provided. + CIDRIP *string `type:"string"` + + // The name of the security Group from which to revoke the ingress rule. + ClusterSecurityGroupName *string `type:"string" required:"true"` + + // The name of the EC2 Security Group whose access is to be revoked. If EC2SecurityGroupName + // is specified, EC2SecurityGroupOwnerId must also be provided and CIDRIP cannot + // be provided. + EC2SecurityGroupName *string `type:"string"` + + // The AWS account number of the owner of the security group specified in the + // EC2SecurityGroupName parameter. The AWS access key ID is not an acceptable + // value. If EC2SecurityGroupOwnerId is specified, EC2SecurityGroupName must + // also be provided. and CIDRIP cannot be provided. + // + // Example: 111122223333 + EC2SecurityGroupOwnerId *string `type:"string"` +} + +// String returns the string representation +func (s RevokeClusterSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeClusterSecurityGroupIngressInput) GoString() string { + return s.String() +} + +type RevokeClusterSecurityGroupIngressOutput struct { + _ struct{} `type:"structure"` + + // Describes a security group. + ClusterSecurityGroup *ClusterSecurityGroup `type:"structure"` +} + +// String returns the string representation +func (s RevokeClusterSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeClusterSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +type RevokeSnapshotAccessInput struct { + _ struct{} `type:"structure"` + + // The identifier of the AWS customer account that can no longer restore the + // specified snapshot. + AccountWithRestoreAccess *string `type:"string" required:"true"` + + // The identifier of the cluster the snapshot was created from. This parameter + // is required if your IAM user has a policy containing a snapshot resource + // element that specifies anything other than * for the cluster name. + SnapshotClusterIdentifier *string `type:"string"` + + // The identifier of the snapshot that the account can no longer access. + SnapshotIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RevokeSnapshotAccessInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeSnapshotAccessInput) GoString() string { + return s.String() +} + +type RevokeSnapshotAccessOutput struct { + _ struct{} `type:"structure"` + + // Describes a snapshot. + Snapshot *Snapshot `type:"structure"` +} + +// String returns the string representation +func (s RevokeSnapshotAccessOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeSnapshotAccessOutput) GoString() string { + return s.String() +} + +type RotateEncryptionKeyInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the cluster that you want to rotate the encryption + // keys for. + // + // Constraints: Must be the name of valid cluster that has encryption enabled. + ClusterIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RotateEncryptionKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RotateEncryptionKeyInput) GoString() string { + return s.String() +} + +type RotateEncryptionKeyOutput struct { + _ struct{} `type:"structure"` + + // Describes a cluster. + Cluster *Cluster `type:"structure"` +} + +// String returns the string representation +func (s RotateEncryptionKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RotateEncryptionKeyOutput) GoString() string { + return s.String() +} + +// Describes a snapshot. +type Snapshot struct { + _ struct{} `type:"structure"` + + // A list of the AWS customer accounts authorized to restore the snapshot. Returns + // null if no accounts are authorized. Visible only to the snapshot owner. + AccountsWithRestoreAccess []*AccountWithRestoreAccess `locationNameList:"AccountWithRestoreAccess" type:"list"` + + // The size of the incremental backup. + ActualIncrementalBackupSizeInMegaBytes *float64 `type:"double"` + + // The Availability Zone in which the cluster was created. + AvailabilityZone *string `type:"string"` + + // The number of megabytes that have been transferred to the snapshot backup. + BackupProgressInMegaBytes *float64 `type:"double"` + + // The time (UTC) when the cluster was originally created. + ClusterCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The identifier of the cluster for which the snapshot was taken. + ClusterIdentifier *string `type:"string"` + + // The version ID of the Amazon Redshift engine that is running on the cluster. + ClusterVersion *string `type:"string"` + + // The number of megabytes per second being transferred to the snapshot backup. + // Returns 0 for a completed backup. + CurrentBackupRateInMegaBytesPerSecond *float64 `type:"double"` + + // The name of the database that was created when the cluster was created. + DBName *string `type:"string"` + + // The amount of time an in-progress snapshot backup has been running, or the + // amount of time it took a completed backup to finish. + ElapsedTimeInSeconds *int64 `type:"long"` + + // If true, the data in the snapshot is encrypted at rest. + Encrypted *bool `type:"boolean"` + + // A boolean that indicates whether the snapshot data is encrypted using the + // HSM keys of the source cluster. true indicates that the data is encrypted + // using HSM keys. + EncryptedWithHSM *bool `type:"boolean"` + + // The estimate of the time remaining before the snapshot backup will complete. + // Returns 0 for a completed backup. + EstimatedSecondsToCompletion *int64 `type:"long"` + + // The AWS Key Management Service (KMS) key ID of the encryption key that was + // used to encrypt data in the cluster from which the snapshot was taken. + KmsKeyId *string `type:"string"` + + // The master user name for the cluster. + MasterUsername *string `type:"string"` + + // The node type of the nodes in the cluster. + NodeType *string `type:"string"` + + // The number of nodes in the cluster. + NumberOfNodes *int64 `type:"integer"` + + // For manual snapshots, the AWS customer account used to create or copy the + // snapshot. For automatic snapshots, the owner of the cluster. The owner can + // perform all snapshot actions, such as sharing a manual snapshot. + OwnerAccount *string `type:"string"` + + // The port that the cluster is listening on. + Port *int64 `type:"integer"` + + // The list of node types that this cluster snapshot is able to restore into. + RestorableNodeTypes []*string `locationNameList:"NodeType" type:"list"` + + // The time (UTC) when Amazon Redshift began the snapshot. A snapshot contains + // a copy of the cluster data as of this exact time. + SnapshotCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The snapshot identifier that is provided in the request. + SnapshotIdentifier *string `type:"string"` + + // The snapshot type. Snapshots created using CreateClusterSnapshot and CopyClusterSnapshot + // will be of type "manual". + SnapshotType *string `type:"string"` + + // The source region from which the snapshot was copied. + SourceRegion *string `type:"string"` + + // The snapshot status. The value of the status depends on the API operation + // used. CreateClusterSnapshot and CopyClusterSnapshot returns status as "creating". + // DescribeClusterSnapshots returns status as "creating", "available", "final + // snapshot", or "failed". DeleteClusterSnapshot returns status as "deleted". + Status *string `type:"string"` + + // The list of tags for the cluster snapshot. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The size of the complete set of backup data that would be used to restore + // the cluster. + TotalBackupSizeInMegaBytes *float64 `type:"double"` + + // The VPC identifier of the cluster if the snapshot is from a cluster in a + // VPC. Otherwise, this field is not in the output. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s Snapshot) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Snapshot) GoString() string { + return s.String() +} + +// The snapshot copy grant that grants Amazon Redshift permission to encrypt +// copied snapshots with the specified customer master key (CMK) from AWS KMS +// in the destination region. +// +// For more information about managing snapshot copy grants, go to Amazon +// Redshift Database Encryption (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-db-encryption.html) +// in the Amazon Redshift Cluster Management Guide. +type SnapshotCopyGrant struct { + _ struct{} `type:"structure"` + + // The unique identifier of the customer master key (CMK) in AWS KMS to which + // Amazon Redshift is granted permission. + KmsKeyId *string `type:"string"` + + // The name of the snapshot copy grant. + SnapshotCopyGrantName *string `type:"string"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s SnapshotCopyGrant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotCopyGrant) GoString() string { + return s.String() +} + +// Describes a subnet. +type Subnet struct { + _ struct{} `type:"structure"` + + // Describes an availability zone. + SubnetAvailabilityZone *AvailabilityZone `type:"structure"` + + // The identifier of the subnet. + SubnetIdentifier *string `type:"string"` + + // The status of the subnet. + SubnetStatus *string `type:"string"` +} + +// String returns the string representation +func (s Subnet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Subnet) GoString() string { + return s.String() +} + +// A tag consisting of a name/value pair for a resource. +type Tag struct { + _ struct{} `type:"structure"` + + // The key, or name, for the resource tag. + Key *string `type:"string"` + + // The value for the resource tag. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// A tag and its associated resource. +type TaggedResource struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) with which the tag is associated. For example, + // arn:aws:redshift:us-east-1:123456789:cluster:t1. + ResourceName *string `type:"string"` + + // The type of resource with which the tag is associated. Valid resource types + // are: Cluster CIDR/IP EC2 security group Snapshot Cluster security group + // Subnet group HSM connection HSM certificate Parameter group + // + // For more information about Amazon Redshift resource types and constructing + // ARNs, go to Constructing an Amazon Redshift Amazon Resource Name (ARN) (http://docs.aws.amazon.com/redshift/latest/mgmt/constructing-redshift-arn.html) + // in the Amazon Redshift Cluster Management Guide. + ResourceType *string `type:"string"` + + // The tag for the resource. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s TaggedResource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TaggedResource) GoString() string { + return s.String() +} + +// Describes the members of a VPC security group. +type VpcSecurityGroupMembership struct { + _ struct{} `type:"structure"` + + Status *string `type:"string"` + + VpcSecurityGroupId *string `type:"string"` +} + +// String returns the string representation +func (s VpcSecurityGroupMembership) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcSecurityGroupMembership) GoString() string { + return s.String() +} + +const ( + // @enum ParameterApplyType + ParameterApplyTypeStatic = "static" + // @enum ParameterApplyType + ParameterApplyTypeDynamic = "dynamic" +) + +const ( + // @enum SourceType + SourceTypeCluster = "cluster" + // @enum SourceType + SourceTypeClusterParameterGroup = "cluster-parameter-group" + // @enum SourceType + SourceTypeClusterSecurityGroup = "cluster-security-group" + // @enum SourceType + SourceTypeClusterSnapshot = "cluster-snapshot" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/redshift/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/redshift/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/redshift/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/redshift/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1497 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package redshift_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/redshift" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleRedshift_AuthorizeClusterSecurityGroupIngress() { + svc := redshift.New(session.New()) + + params := &redshift.AuthorizeClusterSecurityGroupIngressInput{ + ClusterSecurityGroupName: aws.String("String"), // Required + CIDRIP: aws.String("String"), + EC2SecurityGroupName: aws.String("String"), + EC2SecurityGroupOwnerId: aws.String("String"), + } + resp, err := svc.AuthorizeClusterSecurityGroupIngress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_AuthorizeSnapshotAccess() { + svc := redshift.New(session.New()) + + params := &redshift.AuthorizeSnapshotAccessInput{ + AccountWithRestoreAccess: aws.String("String"), // Required + SnapshotIdentifier: aws.String("String"), // Required + SnapshotClusterIdentifier: aws.String("String"), + } + resp, err := svc.AuthorizeSnapshotAccess(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CopyClusterSnapshot() { + svc := redshift.New(session.New()) + + params := &redshift.CopyClusterSnapshotInput{ + SourceSnapshotIdentifier: aws.String("String"), // Required + TargetSnapshotIdentifier: aws.String("String"), // Required + SourceSnapshotClusterIdentifier: aws.String("String"), + } + resp, err := svc.CopyClusterSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateCluster() { + svc := redshift.New(session.New()) + + params := &redshift.CreateClusterInput{ + ClusterIdentifier: aws.String("String"), // Required + MasterUserPassword: aws.String("String"), // Required + MasterUsername: aws.String("String"), // Required + NodeType: aws.String("String"), // Required + AllowVersionUpgrade: aws.Bool(true), + AutomatedSnapshotRetentionPeriod: aws.Int64(1), + AvailabilityZone: aws.String("String"), + ClusterParameterGroupName: aws.String("String"), + ClusterSecurityGroups: []*string{ + aws.String("String"), // Required + // More values... + }, + ClusterSubnetGroupName: aws.String("String"), + ClusterType: aws.String("String"), + ClusterVersion: aws.String("String"), + DBName: aws.String("String"), + ElasticIp: aws.String("String"), + Encrypted: aws.Bool(true), + HsmClientCertificateIdentifier: aws.String("String"), + HsmConfigurationIdentifier: aws.String("String"), + KmsKeyId: aws.String("String"), + NumberOfNodes: aws.Int64(1), + Port: aws.Int64(1), + PreferredMaintenanceWindow: aws.String("String"), + PubliclyAccessible: aws.Bool(true), + Tags: []*redshift.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateClusterParameterGroup() { + svc := redshift.New(session.New()) + + params := &redshift.CreateClusterParameterGroupInput{ + Description: aws.String("String"), // Required + ParameterGroupFamily: aws.String("String"), // Required + ParameterGroupName: aws.String("String"), // Required + Tags: []*redshift.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateClusterParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateClusterSecurityGroup() { + svc := redshift.New(session.New()) + + params := &redshift.CreateClusterSecurityGroupInput{ + ClusterSecurityGroupName: aws.String("String"), // Required + Description: aws.String("String"), // Required + Tags: []*redshift.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateClusterSecurityGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateClusterSnapshot() { + svc := redshift.New(session.New()) + + params := &redshift.CreateClusterSnapshotInput{ + ClusterIdentifier: aws.String("String"), // Required + SnapshotIdentifier: aws.String("String"), // Required + Tags: []*redshift.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateClusterSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateClusterSubnetGroup() { + svc := redshift.New(session.New()) + + params := &redshift.CreateClusterSubnetGroupInput{ + ClusterSubnetGroupName: aws.String("String"), // Required + Description: aws.String("String"), // Required + SubnetIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Tags: []*redshift.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateClusterSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateEventSubscription() { + svc := redshift.New(session.New()) + + params := &redshift.CreateEventSubscriptionInput{ + SnsTopicArn: aws.String("String"), // Required + SubscriptionName: aws.String("String"), // Required + Enabled: aws.Bool(true), + EventCategories: []*string{ + aws.String("String"), // Required + // More values... + }, + Severity: aws.String("String"), + SourceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SourceType: aws.String("String"), + Tags: []*redshift.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateEventSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateHsmClientCertificate() { + svc := redshift.New(session.New()) + + params := &redshift.CreateHsmClientCertificateInput{ + HsmClientCertificateIdentifier: aws.String("String"), // Required + Tags: []*redshift.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateHsmClientCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateHsmConfiguration() { + svc := redshift.New(session.New()) + + params := &redshift.CreateHsmConfigurationInput{ + Description: aws.String("String"), // Required + HsmConfigurationIdentifier: aws.String("String"), // Required + HsmIpAddress: aws.String("String"), // Required + HsmPartitionName: aws.String("String"), // Required + HsmPartitionPassword: aws.String("String"), // Required + HsmServerPublicCertificate: aws.String("String"), // Required + Tags: []*redshift.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateHsmConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateSnapshotCopyGrant() { + svc := redshift.New(session.New()) + + params := &redshift.CreateSnapshotCopyGrantInput{ + SnapshotCopyGrantName: aws.String("String"), // Required + KmsKeyId: aws.String("String"), + Tags: []*redshift.Tag{ + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateSnapshotCopyGrant(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_CreateTags() { + svc := redshift.New(session.New()) + + params := &redshift.CreateTagsInput{ + ResourceName: aws.String("String"), // Required + Tags: []*redshift.Tag{ // Required + { // Required + Key: aws.String("String"), + Value: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.CreateTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteCluster() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteClusterInput{ + ClusterIdentifier: aws.String("String"), // Required + FinalClusterSnapshotIdentifier: aws.String("String"), + SkipFinalClusterSnapshot: aws.Bool(true), + } + resp, err := svc.DeleteCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteClusterParameterGroup() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteClusterParameterGroupInput{ + ParameterGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteClusterParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteClusterSecurityGroup() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteClusterSecurityGroupInput{ + ClusterSecurityGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteClusterSecurityGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteClusterSnapshot() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteClusterSnapshotInput{ + SnapshotIdentifier: aws.String("String"), // Required + SnapshotClusterIdentifier: aws.String("String"), + } + resp, err := svc.DeleteClusterSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteClusterSubnetGroup() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteClusterSubnetGroupInput{ + ClusterSubnetGroupName: aws.String("String"), // Required + } + resp, err := svc.DeleteClusterSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteEventSubscription() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteEventSubscriptionInput{ + SubscriptionName: aws.String("String"), // Required + } + resp, err := svc.DeleteEventSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteHsmClientCertificate() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteHsmClientCertificateInput{ + HsmClientCertificateIdentifier: aws.String("String"), // Required + } + resp, err := svc.DeleteHsmClientCertificate(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteHsmConfiguration() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteHsmConfigurationInput{ + HsmConfigurationIdentifier: aws.String("String"), // Required + } + resp, err := svc.DeleteHsmConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteSnapshotCopyGrant() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteSnapshotCopyGrantInput{ + SnapshotCopyGrantName: aws.String("String"), // Required + } + resp, err := svc.DeleteSnapshotCopyGrant(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DeleteTags() { + svc := redshift.New(session.New()) + + params := &redshift.DeleteTagsInput{ + ResourceName: aws.String("String"), // Required + TagKeys: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DeleteTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeClusterParameterGroups() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeClusterParameterGroupsInput{ + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + ParameterGroupName: aws.String("String"), + TagKeys: []*string{ + aws.String("String"), // Required + // More values... + }, + TagValues: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeClusterParameterGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeClusterParameters() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeClusterParametersInput{ + ParameterGroupName: aws.String("String"), // Required + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + Source: aws.String("String"), + } + resp, err := svc.DescribeClusterParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeClusterSecurityGroups() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeClusterSecurityGroupsInput{ + ClusterSecurityGroupName: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + TagKeys: []*string{ + aws.String("String"), // Required + // More values... + }, + TagValues: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeClusterSecurityGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeClusterSnapshots() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeClusterSnapshotsInput{ + ClusterIdentifier: aws.String("String"), + EndTime: aws.Time(time.Now()), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + OwnerAccount: aws.String("String"), + SnapshotIdentifier: aws.String("String"), + SnapshotType: aws.String("String"), + StartTime: aws.Time(time.Now()), + TagKeys: []*string{ + aws.String("String"), // Required + // More values... + }, + TagValues: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeClusterSnapshots(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeClusterSubnetGroups() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeClusterSubnetGroupsInput{ + ClusterSubnetGroupName: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + TagKeys: []*string{ + aws.String("String"), // Required + // More values... + }, + TagValues: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeClusterSubnetGroups(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeClusterVersions() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeClusterVersionsInput{ + ClusterParameterGroupFamily: aws.String("String"), + ClusterVersion: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeClusterVersions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeClusters() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeClustersInput{ + ClusterIdentifier: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + TagKeys: []*string{ + aws.String("String"), // Required + // More values... + }, + TagValues: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeClusters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeDefaultClusterParameters() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeDefaultClusterParametersInput{ + ParameterGroupFamily: aws.String("String"), // Required + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + } + resp, err := svc.DescribeDefaultClusterParameters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeEventCategories() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeEventCategoriesInput{ + SourceType: aws.String("String"), + } + resp, err := svc.DescribeEventCategories(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeEventSubscriptions() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeEventSubscriptionsInput{ + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + SubscriptionName: aws.String("String"), + } + resp, err := svc.DescribeEventSubscriptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeEvents() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeEventsInput{ + Duration: aws.Int64(1), + EndTime: aws.Time(time.Now()), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + SourceIdentifier: aws.String("String"), + SourceType: aws.String("SourceType"), + StartTime: aws.Time(time.Now()), + } + resp, err := svc.DescribeEvents(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeHsmClientCertificates() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeHsmClientCertificatesInput{ + HsmClientCertificateIdentifier: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + TagKeys: []*string{ + aws.String("String"), // Required + // More values... + }, + TagValues: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeHsmClientCertificates(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeHsmConfigurations() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeHsmConfigurationsInput{ + HsmConfigurationIdentifier: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + TagKeys: []*string{ + aws.String("String"), // Required + // More values... + }, + TagValues: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeHsmConfigurations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeLoggingStatus() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeLoggingStatusInput{ + ClusterIdentifier: aws.String("String"), // Required + } + resp, err := svc.DescribeLoggingStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeOrderableClusterOptions() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeOrderableClusterOptionsInput{ + ClusterVersion: aws.String("String"), + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + NodeType: aws.String("String"), + } + resp, err := svc.DescribeOrderableClusterOptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeReservedNodeOfferings() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeReservedNodeOfferingsInput{ + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + ReservedNodeOfferingId: aws.String("String"), + } + resp, err := svc.DescribeReservedNodeOfferings(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeReservedNodes() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeReservedNodesInput{ + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + ReservedNodeId: aws.String("String"), + } + resp, err := svc.DescribeReservedNodes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeResize() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeResizeInput{ + ClusterIdentifier: aws.String("String"), // Required + } + resp, err := svc.DescribeResize(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeSnapshotCopyGrants() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeSnapshotCopyGrantsInput{ + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + SnapshotCopyGrantName: aws.String("String"), + TagKeys: []*string{ + aws.String("String"), // Required + // More values... + }, + TagValues: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeSnapshotCopyGrants(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DescribeTags() { + svc := redshift.New(session.New()) + + params := &redshift.DescribeTagsInput{ + Marker: aws.String("String"), + MaxRecords: aws.Int64(1), + ResourceName: aws.String("String"), + ResourceType: aws.String("String"), + TagKeys: []*string{ + aws.String("String"), // Required + // More values... + }, + TagValues: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeTags(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DisableLogging() { + svc := redshift.New(session.New()) + + params := &redshift.DisableLoggingInput{ + ClusterIdentifier: aws.String("String"), // Required + } + resp, err := svc.DisableLogging(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_DisableSnapshotCopy() { + svc := redshift.New(session.New()) + + params := &redshift.DisableSnapshotCopyInput{ + ClusterIdentifier: aws.String("String"), // Required + } + resp, err := svc.DisableSnapshotCopy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_EnableLogging() { + svc := redshift.New(session.New()) + + params := &redshift.EnableLoggingInput{ + BucketName: aws.String("String"), // Required + ClusterIdentifier: aws.String("String"), // Required + S3KeyPrefix: aws.String("String"), + } + resp, err := svc.EnableLogging(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_EnableSnapshotCopy() { + svc := redshift.New(session.New()) + + params := &redshift.EnableSnapshotCopyInput{ + ClusterIdentifier: aws.String("String"), // Required + DestinationRegion: aws.String("String"), // Required + RetentionPeriod: aws.Int64(1), + SnapshotCopyGrantName: aws.String("String"), + } + resp, err := svc.EnableSnapshotCopy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_ModifyCluster() { + svc := redshift.New(session.New()) + + params := &redshift.ModifyClusterInput{ + ClusterIdentifier: aws.String("String"), // Required + AllowVersionUpgrade: aws.Bool(true), + AutomatedSnapshotRetentionPeriod: aws.Int64(1), + ClusterParameterGroupName: aws.String("String"), + ClusterSecurityGroups: []*string{ + aws.String("String"), // Required + // More values... + }, + ClusterType: aws.String("String"), + ClusterVersion: aws.String("String"), + HsmClientCertificateIdentifier: aws.String("String"), + HsmConfigurationIdentifier: aws.String("String"), + MasterUserPassword: aws.String("String"), + NewClusterIdentifier: aws.String("String"), + NodeType: aws.String("String"), + NumberOfNodes: aws.Int64(1), + PreferredMaintenanceWindow: aws.String("String"), + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.ModifyCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_ModifyClusterParameterGroup() { + svc := redshift.New(session.New()) + + params := &redshift.ModifyClusterParameterGroupInput{ + ParameterGroupName: aws.String("String"), // Required + Parameters: []*redshift.Parameter{ // Required + { // Required + AllowedValues: aws.String("String"), + ApplyType: aws.String("ParameterApplyType"), + DataType: aws.String("String"), + Description: aws.String("String"), + IsModifiable: aws.Bool(true), + MinimumEngineVersion: aws.String("String"), + ParameterName: aws.String("String"), + ParameterValue: aws.String("String"), + Source: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.ModifyClusterParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_ModifyClusterSubnetGroup() { + svc := redshift.New(session.New()) + + params := &redshift.ModifyClusterSubnetGroupInput{ + ClusterSubnetGroupName: aws.String("String"), // Required + SubnetIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Description: aws.String("String"), + } + resp, err := svc.ModifyClusterSubnetGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_ModifyEventSubscription() { + svc := redshift.New(session.New()) + + params := &redshift.ModifyEventSubscriptionInput{ + SubscriptionName: aws.String("String"), // Required + Enabled: aws.Bool(true), + EventCategories: []*string{ + aws.String("String"), // Required + // More values... + }, + Severity: aws.String("String"), + SnsTopicArn: aws.String("String"), + SourceIds: []*string{ + aws.String("String"), // Required + // More values... + }, + SourceType: aws.String("String"), + } + resp, err := svc.ModifyEventSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_ModifySnapshotCopyRetentionPeriod() { + svc := redshift.New(session.New()) + + params := &redshift.ModifySnapshotCopyRetentionPeriodInput{ + ClusterIdentifier: aws.String("String"), // Required + RetentionPeriod: aws.Int64(1), // Required + } + resp, err := svc.ModifySnapshotCopyRetentionPeriod(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_PurchaseReservedNodeOffering() { + svc := redshift.New(session.New()) + + params := &redshift.PurchaseReservedNodeOfferingInput{ + ReservedNodeOfferingId: aws.String("String"), // Required + NodeCount: aws.Int64(1), + } + resp, err := svc.PurchaseReservedNodeOffering(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_RebootCluster() { + svc := redshift.New(session.New()) + + params := &redshift.RebootClusterInput{ + ClusterIdentifier: aws.String("String"), // Required + } + resp, err := svc.RebootCluster(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_ResetClusterParameterGroup() { + svc := redshift.New(session.New()) + + params := &redshift.ResetClusterParameterGroupInput{ + ParameterGroupName: aws.String("String"), // Required + Parameters: []*redshift.Parameter{ + { // Required + AllowedValues: aws.String("String"), + ApplyType: aws.String("ParameterApplyType"), + DataType: aws.String("String"), + Description: aws.String("String"), + IsModifiable: aws.Bool(true), + MinimumEngineVersion: aws.String("String"), + ParameterName: aws.String("String"), + ParameterValue: aws.String("String"), + Source: aws.String("String"), + }, + // More values... + }, + ResetAllParameters: aws.Bool(true), + } + resp, err := svc.ResetClusterParameterGroup(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_RestoreFromClusterSnapshot() { + svc := redshift.New(session.New()) + + params := &redshift.RestoreFromClusterSnapshotInput{ + ClusterIdentifier: aws.String("String"), // Required + SnapshotIdentifier: aws.String("String"), // Required + AllowVersionUpgrade: aws.Bool(true), + AutomatedSnapshotRetentionPeriod: aws.Int64(1), + AvailabilityZone: aws.String("String"), + ClusterParameterGroupName: aws.String("String"), + ClusterSecurityGroups: []*string{ + aws.String("String"), // Required + // More values... + }, + ClusterSubnetGroupName: aws.String("String"), + ElasticIp: aws.String("String"), + HsmClientCertificateIdentifier: aws.String("String"), + HsmConfigurationIdentifier: aws.String("String"), + KmsKeyId: aws.String("String"), + NodeType: aws.String("String"), + OwnerAccount: aws.String("String"), + Port: aws.Int64(1), + PreferredMaintenanceWindow: aws.String("String"), + PubliclyAccessible: aws.Bool(true), + SnapshotClusterIdentifier: aws.String("String"), + VpcSecurityGroupIds: []*string{ + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.RestoreFromClusterSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_RevokeClusterSecurityGroupIngress() { + svc := redshift.New(session.New()) + + params := &redshift.RevokeClusterSecurityGroupIngressInput{ + ClusterSecurityGroupName: aws.String("String"), // Required + CIDRIP: aws.String("String"), + EC2SecurityGroupName: aws.String("String"), + EC2SecurityGroupOwnerId: aws.String("String"), + } + resp, err := svc.RevokeClusterSecurityGroupIngress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_RevokeSnapshotAccess() { + svc := redshift.New(session.New()) + + params := &redshift.RevokeSnapshotAccessInput{ + AccountWithRestoreAccess: aws.String("String"), // Required + SnapshotIdentifier: aws.String("String"), // Required + SnapshotClusterIdentifier: aws.String("String"), + } + resp, err := svc.RevokeSnapshotAccess(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRedshift_RotateEncryptionKey() { + svc := redshift.New(session.New()) + + params := &redshift.RotateEncryptionKeyInput{ + ClusterIdentifier: aws.String("String"), // Required + } + resp, err := svc.RotateEncryptionKey(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/redshift/redshiftiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/redshift/redshiftiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/redshift/redshiftiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/redshift/redshiftiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,280 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package redshiftiface provides an interface for the Amazon Redshift. +package redshiftiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/redshift" +) + +// RedshiftAPI is the interface type for redshift.Redshift. +type RedshiftAPI interface { + AuthorizeClusterSecurityGroupIngressRequest(*redshift.AuthorizeClusterSecurityGroupIngressInput) (*request.Request, *redshift.AuthorizeClusterSecurityGroupIngressOutput) + + AuthorizeClusterSecurityGroupIngress(*redshift.AuthorizeClusterSecurityGroupIngressInput) (*redshift.AuthorizeClusterSecurityGroupIngressOutput, error) + + AuthorizeSnapshotAccessRequest(*redshift.AuthorizeSnapshotAccessInput) (*request.Request, *redshift.AuthorizeSnapshotAccessOutput) + + AuthorizeSnapshotAccess(*redshift.AuthorizeSnapshotAccessInput) (*redshift.AuthorizeSnapshotAccessOutput, error) + + CopyClusterSnapshotRequest(*redshift.CopyClusterSnapshotInput) (*request.Request, *redshift.CopyClusterSnapshotOutput) + + CopyClusterSnapshot(*redshift.CopyClusterSnapshotInput) (*redshift.CopyClusterSnapshotOutput, error) + + CreateClusterRequest(*redshift.CreateClusterInput) (*request.Request, *redshift.CreateClusterOutput) + + CreateCluster(*redshift.CreateClusterInput) (*redshift.CreateClusterOutput, error) + + CreateClusterParameterGroupRequest(*redshift.CreateClusterParameterGroupInput) (*request.Request, *redshift.CreateClusterParameterGroupOutput) + + CreateClusterParameterGroup(*redshift.CreateClusterParameterGroupInput) (*redshift.CreateClusterParameterGroupOutput, error) + + CreateClusterSecurityGroupRequest(*redshift.CreateClusterSecurityGroupInput) (*request.Request, *redshift.CreateClusterSecurityGroupOutput) + + CreateClusterSecurityGroup(*redshift.CreateClusterSecurityGroupInput) (*redshift.CreateClusterSecurityGroupOutput, error) + + CreateClusterSnapshotRequest(*redshift.CreateClusterSnapshotInput) (*request.Request, *redshift.CreateClusterSnapshotOutput) + + CreateClusterSnapshot(*redshift.CreateClusterSnapshotInput) (*redshift.CreateClusterSnapshotOutput, error) + + CreateClusterSubnetGroupRequest(*redshift.CreateClusterSubnetGroupInput) (*request.Request, *redshift.CreateClusterSubnetGroupOutput) + + CreateClusterSubnetGroup(*redshift.CreateClusterSubnetGroupInput) (*redshift.CreateClusterSubnetGroupOutput, error) + + CreateEventSubscriptionRequest(*redshift.CreateEventSubscriptionInput) (*request.Request, *redshift.CreateEventSubscriptionOutput) + + CreateEventSubscription(*redshift.CreateEventSubscriptionInput) (*redshift.CreateEventSubscriptionOutput, error) + + CreateHsmClientCertificateRequest(*redshift.CreateHsmClientCertificateInput) (*request.Request, *redshift.CreateHsmClientCertificateOutput) + + CreateHsmClientCertificate(*redshift.CreateHsmClientCertificateInput) (*redshift.CreateHsmClientCertificateOutput, error) + + CreateHsmConfigurationRequest(*redshift.CreateHsmConfigurationInput) (*request.Request, *redshift.CreateHsmConfigurationOutput) + + CreateHsmConfiguration(*redshift.CreateHsmConfigurationInput) (*redshift.CreateHsmConfigurationOutput, error) + + CreateSnapshotCopyGrantRequest(*redshift.CreateSnapshotCopyGrantInput) (*request.Request, *redshift.CreateSnapshotCopyGrantOutput) + + CreateSnapshotCopyGrant(*redshift.CreateSnapshotCopyGrantInput) (*redshift.CreateSnapshotCopyGrantOutput, error) + + CreateTagsRequest(*redshift.CreateTagsInput) (*request.Request, *redshift.CreateTagsOutput) + + CreateTags(*redshift.CreateTagsInput) (*redshift.CreateTagsOutput, error) + + DeleteClusterRequest(*redshift.DeleteClusterInput) (*request.Request, *redshift.DeleteClusterOutput) + + DeleteCluster(*redshift.DeleteClusterInput) (*redshift.DeleteClusterOutput, error) + + DeleteClusterParameterGroupRequest(*redshift.DeleteClusterParameterGroupInput) (*request.Request, *redshift.DeleteClusterParameterGroupOutput) + + DeleteClusterParameterGroup(*redshift.DeleteClusterParameterGroupInput) (*redshift.DeleteClusterParameterGroupOutput, error) + + DeleteClusterSecurityGroupRequest(*redshift.DeleteClusterSecurityGroupInput) (*request.Request, *redshift.DeleteClusterSecurityGroupOutput) + + DeleteClusterSecurityGroup(*redshift.DeleteClusterSecurityGroupInput) (*redshift.DeleteClusterSecurityGroupOutput, error) + + DeleteClusterSnapshotRequest(*redshift.DeleteClusterSnapshotInput) (*request.Request, *redshift.DeleteClusterSnapshotOutput) + + DeleteClusterSnapshot(*redshift.DeleteClusterSnapshotInput) (*redshift.DeleteClusterSnapshotOutput, error) + + DeleteClusterSubnetGroupRequest(*redshift.DeleteClusterSubnetGroupInput) (*request.Request, *redshift.DeleteClusterSubnetGroupOutput) + + DeleteClusterSubnetGroup(*redshift.DeleteClusterSubnetGroupInput) (*redshift.DeleteClusterSubnetGroupOutput, error) + + DeleteEventSubscriptionRequest(*redshift.DeleteEventSubscriptionInput) (*request.Request, *redshift.DeleteEventSubscriptionOutput) + + DeleteEventSubscription(*redshift.DeleteEventSubscriptionInput) (*redshift.DeleteEventSubscriptionOutput, error) + + DeleteHsmClientCertificateRequest(*redshift.DeleteHsmClientCertificateInput) (*request.Request, *redshift.DeleteHsmClientCertificateOutput) + + DeleteHsmClientCertificate(*redshift.DeleteHsmClientCertificateInput) (*redshift.DeleteHsmClientCertificateOutput, error) + + DeleteHsmConfigurationRequest(*redshift.DeleteHsmConfigurationInput) (*request.Request, *redshift.DeleteHsmConfigurationOutput) + + DeleteHsmConfiguration(*redshift.DeleteHsmConfigurationInput) (*redshift.DeleteHsmConfigurationOutput, error) + + DeleteSnapshotCopyGrantRequest(*redshift.DeleteSnapshotCopyGrantInput) (*request.Request, *redshift.DeleteSnapshotCopyGrantOutput) + + DeleteSnapshotCopyGrant(*redshift.DeleteSnapshotCopyGrantInput) (*redshift.DeleteSnapshotCopyGrantOutput, error) + + DeleteTagsRequest(*redshift.DeleteTagsInput) (*request.Request, *redshift.DeleteTagsOutput) + + DeleteTags(*redshift.DeleteTagsInput) (*redshift.DeleteTagsOutput, error) + + DescribeClusterParameterGroupsRequest(*redshift.DescribeClusterParameterGroupsInput) (*request.Request, *redshift.DescribeClusterParameterGroupsOutput) + + DescribeClusterParameterGroups(*redshift.DescribeClusterParameterGroupsInput) (*redshift.DescribeClusterParameterGroupsOutput, error) + + DescribeClusterParameterGroupsPages(*redshift.DescribeClusterParameterGroupsInput, func(*redshift.DescribeClusterParameterGroupsOutput, bool) bool) error + + DescribeClusterParametersRequest(*redshift.DescribeClusterParametersInput) (*request.Request, *redshift.DescribeClusterParametersOutput) + + DescribeClusterParameters(*redshift.DescribeClusterParametersInput) (*redshift.DescribeClusterParametersOutput, error) + + DescribeClusterParametersPages(*redshift.DescribeClusterParametersInput, func(*redshift.DescribeClusterParametersOutput, bool) bool) error + + DescribeClusterSecurityGroupsRequest(*redshift.DescribeClusterSecurityGroupsInput) (*request.Request, *redshift.DescribeClusterSecurityGroupsOutput) + + DescribeClusterSecurityGroups(*redshift.DescribeClusterSecurityGroupsInput) (*redshift.DescribeClusterSecurityGroupsOutput, error) + + DescribeClusterSecurityGroupsPages(*redshift.DescribeClusterSecurityGroupsInput, func(*redshift.DescribeClusterSecurityGroupsOutput, bool) bool) error + + DescribeClusterSnapshotsRequest(*redshift.DescribeClusterSnapshotsInput) (*request.Request, *redshift.DescribeClusterSnapshotsOutput) + + DescribeClusterSnapshots(*redshift.DescribeClusterSnapshotsInput) (*redshift.DescribeClusterSnapshotsOutput, error) + + DescribeClusterSnapshotsPages(*redshift.DescribeClusterSnapshotsInput, func(*redshift.DescribeClusterSnapshotsOutput, bool) bool) error + + DescribeClusterSubnetGroupsRequest(*redshift.DescribeClusterSubnetGroupsInput) (*request.Request, *redshift.DescribeClusterSubnetGroupsOutput) + + DescribeClusterSubnetGroups(*redshift.DescribeClusterSubnetGroupsInput) (*redshift.DescribeClusterSubnetGroupsOutput, error) + + DescribeClusterSubnetGroupsPages(*redshift.DescribeClusterSubnetGroupsInput, func(*redshift.DescribeClusterSubnetGroupsOutput, bool) bool) error + + DescribeClusterVersionsRequest(*redshift.DescribeClusterVersionsInput) (*request.Request, *redshift.DescribeClusterVersionsOutput) + + DescribeClusterVersions(*redshift.DescribeClusterVersionsInput) (*redshift.DescribeClusterVersionsOutput, error) + + DescribeClusterVersionsPages(*redshift.DescribeClusterVersionsInput, func(*redshift.DescribeClusterVersionsOutput, bool) bool) error + + DescribeClustersRequest(*redshift.DescribeClustersInput) (*request.Request, *redshift.DescribeClustersOutput) + + DescribeClusters(*redshift.DescribeClustersInput) (*redshift.DescribeClustersOutput, error) + + DescribeClustersPages(*redshift.DescribeClustersInput, func(*redshift.DescribeClustersOutput, bool) bool) error + + DescribeDefaultClusterParametersRequest(*redshift.DescribeDefaultClusterParametersInput) (*request.Request, *redshift.DescribeDefaultClusterParametersOutput) + + DescribeDefaultClusterParameters(*redshift.DescribeDefaultClusterParametersInput) (*redshift.DescribeDefaultClusterParametersOutput, error) + + DescribeDefaultClusterParametersPages(*redshift.DescribeDefaultClusterParametersInput, func(*redshift.DescribeDefaultClusterParametersOutput, bool) bool) error + + DescribeEventCategoriesRequest(*redshift.DescribeEventCategoriesInput) (*request.Request, *redshift.DescribeEventCategoriesOutput) + + DescribeEventCategories(*redshift.DescribeEventCategoriesInput) (*redshift.DescribeEventCategoriesOutput, error) + + DescribeEventSubscriptionsRequest(*redshift.DescribeEventSubscriptionsInput) (*request.Request, *redshift.DescribeEventSubscriptionsOutput) + + DescribeEventSubscriptions(*redshift.DescribeEventSubscriptionsInput) (*redshift.DescribeEventSubscriptionsOutput, error) + + DescribeEventSubscriptionsPages(*redshift.DescribeEventSubscriptionsInput, func(*redshift.DescribeEventSubscriptionsOutput, bool) bool) error + + DescribeEventsRequest(*redshift.DescribeEventsInput) (*request.Request, *redshift.DescribeEventsOutput) + + DescribeEvents(*redshift.DescribeEventsInput) (*redshift.DescribeEventsOutput, error) + + DescribeEventsPages(*redshift.DescribeEventsInput, func(*redshift.DescribeEventsOutput, bool) bool) error + + DescribeHsmClientCertificatesRequest(*redshift.DescribeHsmClientCertificatesInput) (*request.Request, *redshift.DescribeHsmClientCertificatesOutput) + + DescribeHsmClientCertificates(*redshift.DescribeHsmClientCertificatesInput) (*redshift.DescribeHsmClientCertificatesOutput, error) + + DescribeHsmClientCertificatesPages(*redshift.DescribeHsmClientCertificatesInput, func(*redshift.DescribeHsmClientCertificatesOutput, bool) bool) error + + DescribeHsmConfigurationsRequest(*redshift.DescribeHsmConfigurationsInput) (*request.Request, *redshift.DescribeHsmConfigurationsOutput) + + DescribeHsmConfigurations(*redshift.DescribeHsmConfigurationsInput) (*redshift.DescribeHsmConfigurationsOutput, error) + + DescribeHsmConfigurationsPages(*redshift.DescribeHsmConfigurationsInput, func(*redshift.DescribeHsmConfigurationsOutput, bool) bool) error + + DescribeLoggingStatusRequest(*redshift.DescribeLoggingStatusInput) (*request.Request, *redshift.LoggingStatus) + + DescribeLoggingStatus(*redshift.DescribeLoggingStatusInput) (*redshift.LoggingStatus, error) + + DescribeOrderableClusterOptionsRequest(*redshift.DescribeOrderableClusterOptionsInput) (*request.Request, *redshift.DescribeOrderableClusterOptionsOutput) + + DescribeOrderableClusterOptions(*redshift.DescribeOrderableClusterOptionsInput) (*redshift.DescribeOrderableClusterOptionsOutput, error) + + DescribeOrderableClusterOptionsPages(*redshift.DescribeOrderableClusterOptionsInput, func(*redshift.DescribeOrderableClusterOptionsOutput, bool) bool) error + + DescribeReservedNodeOfferingsRequest(*redshift.DescribeReservedNodeOfferingsInput) (*request.Request, *redshift.DescribeReservedNodeOfferingsOutput) + + DescribeReservedNodeOfferings(*redshift.DescribeReservedNodeOfferingsInput) (*redshift.DescribeReservedNodeOfferingsOutput, error) + + DescribeReservedNodeOfferingsPages(*redshift.DescribeReservedNodeOfferingsInput, func(*redshift.DescribeReservedNodeOfferingsOutput, bool) bool) error + + DescribeReservedNodesRequest(*redshift.DescribeReservedNodesInput) (*request.Request, *redshift.DescribeReservedNodesOutput) + + DescribeReservedNodes(*redshift.DescribeReservedNodesInput) (*redshift.DescribeReservedNodesOutput, error) + + DescribeReservedNodesPages(*redshift.DescribeReservedNodesInput, func(*redshift.DescribeReservedNodesOutput, bool) bool) error + + DescribeResizeRequest(*redshift.DescribeResizeInput) (*request.Request, *redshift.DescribeResizeOutput) + + DescribeResize(*redshift.DescribeResizeInput) (*redshift.DescribeResizeOutput, error) + + DescribeSnapshotCopyGrantsRequest(*redshift.DescribeSnapshotCopyGrantsInput) (*request.Request, *redshift.DescribeSnapshotCopyGrantsOutput) + + DescribeSnapshotCopyGrants(*redshift.DescribeSnapshotCopyGrantsInput) (*redshift.DescribeSnapshotCopyGrantsOutput, error) + + DescribeTagsRequest(*redshift.DescribeTagsInput) (*request.Request, *redshift.DescribeTagsOutput) + + DescribeTags(*redshift.DescribeTagsInput) (*redshift.DescribeTagsOutput, error) + + DisableLoggingRequest(*redshift.DisableLoggingInput) (*request.Request, *redshift.LoggingStatus) + + DisableLogging(*redshift.DisableLoggingInput) (*redshift.LoggingStatus, error) + + DisableSnapshotCopyRequest(*redshift.DisableSnapshotCopyInput) (*request.Request, *redshift.DisableSnapshotCopyOutput) + + DisableSnapshotCopy(*redshift.DisableSnapshotCopyInput) (*redshift.DisableSnapshotCopyOutput, error) + + EnableLoggingRequest(*redshift.EnableLoggingInput) (*request.Request, *redshift.LoggingStatus) + + EnableLogging(*redshift.EnableLoggingInput) (*redshift.LoggingStatus, error) + + EnableSnapshotCopyRequest(*redshift.EnableSnapshotCopyInput) (*request.Request, *redshift.EnableSnapshotCopyOutput) + + EnableSnapshotCopy(*redshift.EnableSnapshotCopyInput) (*redshift.EnableSnapshotCopyOutput, error) + + ModifyClusterRequest(*redshift.ModifyClusterInput) (*request.Request, *redshift.ModifyClusterOutput) + + ModifyCluster(*redshift.ModifyClusterInput) (*redshift.ModifyClusterOutput, error) + + ModifyClusterParameterGroupRequest(*redshift.ModifyClusterParameterGroupInput) (*request.Request, *redshift.ClusterParameterGroupNameMessage) + + ModifyClusterParameterGroup(*redshift.ModifyClusterParameterGroupInput) (*redshift.ClusterParameterGroupNameMessage, error) + + ModifyClusterSubnetGroupRequest(*redshift.ModifyClusterSubnetGroupInput) (*request.Request, *redshift.ModifyClusterSubnetGroupOutput) + + ModifyClusterSubnetGroup(*redshift.ModifyClusterSubnetGroupInput) (*redshift.ModifyClusterSubnetGroupOutput, error) + + ModifyEventSubscriptionRequest(*redshift.ModifyEventSubscriptionInput) (*request.Request, *redshift.ModifyEventSubscriptionOutput) + + ModifyEventSubscription(*redshift.ModifyEventSubscriptionInput) (*redshift.ModifyEventSubscriptionOutput, error) + + ModifySnapshotCopyRetentionPeriodRequest(*redshift.ModifySnapshotCopyRetentionPeriodInput) (*request.Request, *redshift.ModifySnapshotCopyRetentionPeriodOutput) + + ModifySnapshotCopyRetentionPeriod(*redshift.ModifySnapshotCopyRetentionPeriodInput) (*redshift.ModifySnapshotCopyRetentionPeriodOutput, error) + + PurchaseReservedNodeOfferingRequest(*redshift.PurchaseReservedNodeOfferingInput) (*request.Request, *redshift.PurchaseReservedNodeOfferingOutput) + + PurchaseReservedNodeOffering(*redshift.PurchaseReservedNodeOfferingInput) (*redshift.PurchaseReservedNodeOfferingOutput, error) + + RebootClusterRequest(*redshift.RebootClusterInput) (*request.Request, *redshift.RebootClusterOutput) + + RebootCluster(*redshift.RebootClusterInput) (*redshift.RebootClusterOutput, error) + + ResetClusterParameterGroupRequest(*redshift.ResetClusterParameterGroupInput) (*request.Request, *redshift.ClusterParameterGroupNameMessage) + + ResetClusterParameterGroup(*redshift.ResetClusterParameterGroupInput) (*redshift.ClusterParameterGroupNameMessage, error) + + RestoreFromClusterSnapshotRequest(*redshift.RestoreFromClusterSnapshotInput) (*request.Request, *redshift.RestoreFromClusterSnapshotOutput) + + RestoreFromClusterSnapshot(*redshift.RestoreFromClusterSnapshotInput) (*redshift.RestoreFromClusterSnapshotOutput, error) + + RevokeClusterSecurityGroupIngressRequest(*redshift.RevokeClusterSecurityGroupIngressInput) (*request.Request, *redshift.RevokeClusterSecurityGroupIngressOutput) + + RevokeClusterSecurityGroupIngress(*redshift.RevokeClusterSecurityGroupIngressInput) (*redshift.RevokeClusterSecurityGroupIngressOutput, error) + + RevokeSnapshotAccessRequest(*redshift.RevokeSnapshotAccessInput) (*request.Request, *redshift.RevokeSnapshotAccessOutput) + + RevokeSnapshotAccess(*redshift.RevokeSnapshotAccessInput) (*redshift.RevokeSnapshotAccessOutput, error) + + RotateEncryptionKeyRequest(*redshift.RotateEncryptionKeyInput) (*request.Request, *redshift.RotateEncryptionKeyOutput) + + RotateEncryptionKey(*redshift.RotateEncryptionKeyInput) (*redshift.RotateEncryptionKeyOutput, error) +} + +var _ RedshiftAPI = (*redshift.Redshift)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/redshift/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/redshift/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/redshift/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/redshift/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,107 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package redshift + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Overview This is an interface reference for Amazon Redshift. It contains +// documentation for one of the programming or command line interfaces you can +// use to manage Amazon Redshift clusters. Note that Amazon Redshift is asynchronous, +// which means that some interfaces may require techniques, such as polling +// or asynchronous callback handlers, to determine when a command has been applied. +// In this reference, the parameter descriptions indicate whether a change is +// applied immediately, on the next instance reboot, or during the next maintenance +// window. For a summary of the Amazon Redshift cluster management interfaces, +// go to Using the Amazon Redshift Management Interfaces (http://docs.aws.amazon.com/redshift/latest/mgmt/using-aws-sdk.html). +// +// Amazon Redshift manages all the work of setting up, operating, and scaling +// a data warehouse: provisioning capacity, monitoring and backing up the cluster, +// and applying patches and upgrades to the Amazon Redshift engine. You can +// focus on using your data to acquire new insights for your business and customers. +// +// If you are a first-time user of Amazon Redshift, we recommend that you begin +// by reading the The Amazon Redshift Getting Started Guide (http://docs.aws.amazon.com/redshift/latest/gsg/getting-started.html) +// +// If you are a database developer, the Amazon Redshift Database Developer +// Guide (http://docs.aws.amazon.com/redshift/latest/dg/welcome.html) explains +// how to design, build, query, and maintain the databases that make up your +// data warehouse. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type Redshift struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "redshift" + +// New creates a new instance of the Redshift client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Redshift client from just a session. +// svc := redshift.New(mySession) +// +// // Create a Redshift client with additional configuration +// svc := redshift.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Redshift { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *Redshift { + svc := &Redshift{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2012-12-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Redshift operation and runs any +// custom request initialization. +func (c *Redshift) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/redshift/waiters.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/redshift/waiters.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/redshift/waiters.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/redshift/waiters.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,141 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package redshift + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *Redshift) WaitUntilClusterAvailable(input *DescribeClustersInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeClusters", + Delay: 60, + MaxAttempts: 30, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Clusters[].ClusterStatus", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Clusters[].ClusterStatus", + Expected: "deleting", + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "ClusterNotFound", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *Redshift) WaitUntilClusterDeleted(input *DescribeClustersInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeClusters", + Delay: 60, + MaxAttempts: 30, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "error", + Argument: "", + Expected: "ClusterNotFound", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Clusters[].ClusterStatus", + Expected: "creating", + }, + { + State: "failure", + Matcher: "pathList", + Argument: "Clusters[].ClusterStatus", + Expected: "pathAny", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *Redshift) WaitUntilClusterRestored(input *DescribeClustersInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeClusters", + Delay: 60, + MaxAttempts: 30, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Clusters[].RestoreStatus.Status", + Expected: "completed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Clusters[].ClusterStatus", + Expected: "deleting", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *Redshift) WaitUntilSnapshotAvailable(input *DescribeClusterSnapshotsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeClusterSnapshots", + Delay: 15, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Snapshots[].Status", + Expected: "available", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Snapshots[].Status", + Expected: "failed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Snapshots[].Status", + Expected: "deleted", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,5646 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package route53 provides a client for Amazon Route 53. +package route53 + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAssociateVPCWithHostedZone = "AssociateVPCWithHostedZone" + +// AssociateVPCWithHostedZoneRequest generates a request for the AssociateVPCWithHostedZone operation. +func (c *Route53) AssociateVPCWithHostedZoneRequest(input *AssociateVPCWithHostedZoneInput) (req *request.Request, output *AssociateVPCWithHostedZoneOutput) { + op := &request.Operation{ + Name: opAssociateVPCWithHostedZone, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone/{Id}/associatevpc", + } + + if input == nil { + input = &AssociateVPCWithHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &AssociateVPCWithHostedZoneOutput{} + req.Data = output + return +} + +// This action associates a VPC with an hosted zone. +// +// To associate a VPC with an hosted zone, send a POST request to the 2013-04-01/hostedzone/hosted +// zone ID/associatevpc resource. The request body must include an XML document +// with a AssociateVPCWithHostedZoneRequest element. The response returns the +// AssociateVPCWithHostedZoneResponse element that contains ChangeInfo for you +// to track the progress of the AssociateVPCWithHostedZoneRequest you made. +// See GetChange operation for how to track the progress of your change. +func (c *Route53) AssociateVPCWithHostedZone(input *AssociateVPCWithHostedZoneInput) (*AssociateVPCWithHostedZoneOutput, error) { + req, out := c.AssociateVPCWithHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opChangeResourceRecordSets = "ChangeResourceRecordSets" + +// ChangeResourceRecordSetsRequest generates a request for the ChangeResourceRecordSets operation. +func (c *Route53) ChangeResourceRecordSetsRequest(input *ChangeResourceRecordSetsInput) (req *request.Request, output *ChangeResourceRecordSetsOutput) { + op := &request.Operation{ + Name: opChangeResourceRecordSets, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone/{Id}/rrset/", + } + + if input == nil { + input = &ChangeResourceRecordSetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ChangeResourceRecordSetsOutput{} + req.Data = output + return +} + +// Use this action to create or change your authoritative DNS information. To +// use this action, send a POST request to the 2013-04-01/hostedzone/hosted +// Zone ID/rrset resource. The request body must include an XML document with +// a ChangeResourceRecordSetsRequest element. +// +// Changes are a list of change items and are considered transactional. For +// more information on transactional changes, also known as change batches, +// see POST ChangeResourceRecordSets (http://docs.aws.amazon.com/Route53/latest/APIReference/) +// in the Amazon Route 53 API Reference. +// +// Due to the nature of transactional changes, you cannot delete the same resource +// record set more than once in a single change batch. If you attempt to delete +// the same change batch more than once, Amazon Route 53 returns an InvalidChangeBatch +// error. In response to a ChangeResourceRecordSets request, your DNS data is +// changed on all Amazon Route 53 DNS servers. Initially, the status of a change +// is PENDING. This means the change has not yet propagated to all the authoritative +// Amazon Route 53 DNS servers. When the change is propagated to all hosts, +// the change returns a status of INSYNC. +// +// Note the following limitations on a ChangeResourceRecordSets request: +// +// A request cannot contain more than 100 Change elements. A request cannot +// contain more than 1000 ResourceRecord elements. The sum of the number of +// characters (including spaces) in all Value elements in a request cannot exceed +// 32,000 characters. +func (c *Route53) ChangeResourceRecordSets(input *ChangeResourceRecordSetsInput) (*ChangeResourceRecordSetsOutput, error) { + req, out := c.ChangeResourceRecordSetsRequest(input) + err := req.Send() + return out, err +} + +const opChangeTagsForResource = "ChangeTagsForResource" + +// ChangeTagsForResourceRequest generates a request for the ChangeTagsForResource operation. +func (c *Route53) ChangeTagsForResourceRequest(input *ChangeTagsForResourceInput) (req *request.Request, output *ChangeTagsForResourceOutput) { + op := &request.Operation{ + Name: opChangeTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/tags/{ResourceType}/{ResourceId}", + } + + if input == nil { + input = &ChangeTagsForResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &ChangeTagsForResourceOutput{} + req.Data = output + return +} + +func (c *Route53) ChangeTagsForResource(input *ChangeTagsForResourceInput) (*ChangeTagsForResourceOutput, error) { + req, out := c.ChangeTagsForResourceRequest(input) + err := req.Send() + return out, err +} + +const opCreateHealthCheck = "CreateHealthCheck" + +// CreateHealthCheckRequest generates a request for the CreateHealthCheck operation. +func (c *Route53) CreateHealthCheckRequest(input *CreateHealthCheckInput) (req *request.Request, output *CreateHealthCheckOutput) { + op := &request.Operation{ + Name: opCreateHealthCheck, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/healthcheck", + } + + if input == nil { + input = &CreateHealthCheckInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateHealthCheckOutput{} + req.Data = output + return +} + +// This action creates a new health check. +// +// To create a new health check, send a POST request to the 2013-04-01/healthcheck +// resource. The request body must include an XML document with a CreateHealthCheckRequest +// element. The response returns the CreateHealthCheckResponse element that +// contains metadata about the health check. +func (c *Route53) CreateHealthCheck(input *CreateHealthCheckInput) (*CreateHealthCheckOutput, error) { + req, out := c.CreateHealthCheckRequest(input) + err := req.Send() + return out, err +} + +const opCreateHostedZone = "CreateHostedZone" + +// CreateHostedZoneRequest generates a request for the CreateHostedZone operation. +func (c *Route53) CreateHostedZoneRequest(input *CreateHostedZoneInput) (req *request.Request, output *CreateHostedZoneOutput) { + op := &request.Operation{ + Name: opCreateHostedZone, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone", + } + + if input == nil { + input = &CreateHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateHostedZoneOutput{} + req.Data = output + return +} + +// This action creates a new hosted zone. +// +// To create a new hosted zone, send a POST request to the 2013-04-01/hostedzone +// resource. The request body must include an XML document with a CreateHostedZoneRequest +// element. The response returns the CreateHostedZoneResponse element that contains +// metadata about the hosted zone. +// +// Amazon Route 53 automatically creates a default SOA record and four NS records +// for the zone. The NS records in the hosted zone are the name servers you +// give your registrar to delegate your domain to. For more information about +// SOA and NS records, see NS and SOA Records that Amazon Route 53 Creates for +// a Hosted Zone (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/SOA-NSrecords.html) +// in the Amazon Route 53 Developer Guide. +// +// When you create a zone, its initial status is PENDING. This means that it +// is not yet available on all DNS servers. The status of the zone changes to +// INSYNC when the NS and SOA records are available on all Amazon Route 53 DNS +// servers. +// +// When trying to create a hosted zone using a reusable delegation set, you +// could specify an optional DelegationSetId, and Route53 would assign those +// 4 NS records for the zone, instead of alloting a new one. +func (c *Route53) CreateHostedZone(input *CreateHostedZoneInput) (*CreateHostedZoneOutput, error) { + req, out := c.CreateHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opCreateReusableDelegationSet = "CreateReusableDelegationSet" + +// CreateReusableDelegationSetRequest generates a request for the CreateReusableDelegationSet operation. +func (c *Route53) CreateReusableDelegationSetRequest(input *CreateReusableDelegationSetInput) (req *request.Request, output *CreateReusableDelegationSetOutput) { + op := &request.Operation{ + Name: opCreateReusableDelegationSet, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/delegationset", + } + + if input == nil { + input = &CreateReusableDelegationSetInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateReusableDelegationSetOutput{} + req.Data = output + return +} + +// This action creates a reusable delegationSet. +// +// To create a new reusable delegationSet, send a POST request to the 2013-04-01/delegationset +// resource. The request body must include an XML document with a CreateReusableDelegationSetRequest +// element. The response returns the CreateReusableDelegationSetResponse element +// that contains metadata about the delegationSet. +// +// If the optional parameter HostedZoneId is specified, it marks the delegationSet +// associated with that particular hosted zone as reusable. +func (c *Route53) CreateReusableDelegationSet(input *CreateReusableDelegationSetInput) (*CreateReusableDelegationSetOutput, error) { + req, out := c.CreateReusableDelegationSetRequest(input) + err := req.Send() + return out, err +} + +const opCreateTrafficPolicy = "CreateTrafficPolicy" + +// CreateTrafficPolicyRequest generates a request for the CreateTrafficPolicy operation. +func (c *Route53) CreateTrafficPolicyRequest(input *CreateTrafficPolicyInput) (req *request.Request, output *CreateTrafficPolicyOutput) { + op := &request.Operation{ + Name: opCreateTrafficPolicy, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/trafficpolicy", + } + + if input == nil { + input = &CreateTrafficPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTrafficPolicyOutput{} + req.Data = output + return +} + +// Creates a traffic policy, which you use to create multiple DNS resource record +// sets for one domain name (such as example.com) or one subdomain name (such +// as www.example.com). +// +// To create a traffic policy, send a POST request to the 2013-04-01/trafficpolicy +// resource. The request body must include an XML document with a CreateTrafficPolicyRequest +// element. The response includes the CreateTrafficPolicyResponse element, which +// contains information about the new traffic policy. +func (c *Route53) CreateTrafficPolicy(input *CreateTrafficPolicyInput) (*CreateTrafficPolicyOutput, error) { + req, out := c.CreateTrafficPolicyRequest(input) + err := req.Send() + return out, err +} + +const opCreateTrafficPolicyInstance = "CreateTrafficPolicyInstance" + +// CreateTrafficPolicyInstanceRequest generates a request for the CreateTrafficPolicyInstance operation. +func (c *Route53) CreateTrafficPolicyInstanceRequest(input *CreateTrafficPolicyInstanceInput) (req *request.Request, output *CreateTrafficPolicyInstanceOutput) { + op := &request.Operation{ + Name: opCreateTrafficPolicyInstance, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/trafficpolicyinstance", + } + + if input == nil { + input = &CreateTrafficPolicyInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTrafficPolicyInstanceOutput{} + req.Data = output + return +} + +// Creates resource record sets in a specified hosted zone based on the settings +// in a specified traffic policy version. In addition, CreateTrafficPolicyInstance +// associates the resource record sets with a specified domain name (such as +// example.com) or subdomain name (such as www.example.com). Amazon Route 53 +// responds to DNS queries for the domain or subdomain name by using the resource +// record sets that CreateTrafficPolicyInstance created. +// +// To create a traffic policy instance, send a POST request to the 2013-04-01/trafficpolicyinstance +// resource. The request body must include an XML document with a CreateTrafficPolicyRequest +// element. The response returns the CreateTrafficPolicyInstanceResponse element, +// which contains information about the traffic policy instance. +func (c *Route53) CreateTrafficPolicyInstance(input *CreateTrafficPolicyInstanceInput) (*CreateTrafficPolicyInstanceOutput, error) { + req, out := c.CreateTrafficPolicyInstanceRequest(input) + err := req.Send() + return out, err +} + +const opCreateTrafficPolicyVersion = "CreateTrafficPolicyVersion" + +// CreateTrafficPolicyVersionRequest generates a request for the CreateTrafficPolicyVersion operation. +func (c *Route53) CreateTrafficPolicyVersionRequest(input *CreateTrafficPolicyVersionInput) (req *request.Request, output *CreateTrafficPolicyVersionOutput) { + op := &request.Operation{ + Name: opCreateTrafficPolicyVersion, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/trafficpolicy/{Id}", + } + + if input == nil { + input = &CreateTrafficPolicyVersionInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTrafficPolicyVersionOutput{} + req.Data = output + return +} + +// Creates a new version of an existing traffic policy. When you create a new +// version of a traffic policy, you specify the ID of the traffic policy that +// you want to update and a JSON-formatted document that describes the new version. +// +// You use traffic policies to create multiple DNS resource record sets for +// one domain name (such as example.com) or one subdomain name (such as www.example.com). +// +// To create a new version, send a POST request to the 2013-04-01/trafficpolicy/ +// resource. The request body includes an XML document with a CreateTrafficPolicyVersionRequest +// element. The response returns the CreateTrafficPolicyVersionResponse element, +// which contains information about the new version of the traffic policy. +func (c *Route53) CreateTrafficPolicyVersion(input *CreateTrafficPolicyVersionInput) (*CreateTrafficPolicyVersionOutput, error) { + req, out := c.CreateTrafficPolicyVersionRequest(input) + err := req.Send() + return out, err +} + +const opDeleteHealthCheck = "DeleteHealthCheck" + +// DeleteHealthCheckRequest generates a request for the DeleteHealthCheck operation. +func (c *Route53) DeleteHealthCheckRequest(input *DeleteHealthCheckInput) (req *request.Request, output *DeleteHealthCheckOutput) { + op := &request.Operation{ + Name: opDeleteHealthCheck, + HTTPMethod: "DELETE", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}", + } + + if input == nil { + input = &DeleteHealthCheckInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteHealthCheckOutput{} + req.Data = output + return +} + +// This action deletes a health check. To delete a health check, send a DELETE +// request to the 2013-04-01/healthcheck/health check ID resource. +// +// You can delete a health check only if there are no resource record sets +// associated with this health check. If resource record sets are associated +// with this health check, you must disassociate them before you can delete +// your health check. If you try to delete a health check that is associated +// with resource record sets, Amazon Route 53 will deny your request with a +// HealthCheckInUse error. For information about disassociating the records +// from your health check, see ChangeResourceRecordSets. +func (c *Route53) DeleteHealthCheck(input *DeleteHealthCheckInput) (*DeleteHealthCheckOutput, error) { + req, out := c.DeleteHealthCheckRequest(input) + err := req.Send() + return out, err +} + +const opDeleteHostedZone = "DeleteHostedZone" + +// DeleteHostedZoneRequest generates a request for the DeleteHostedZone operation. +func (c *Route53) DeleteHostedZoneRequest(input *DeleteHostedZoneInput) (req *request.Request, output *DeleteHostedZoneOutput) { + op := &request.Operation{ + Name: opDeleteHostedZone, + HTTPMethod: "DELETE", + HTTPPath: "/2013-04-01/hostedzone/{Id}", + } + + if input == nil { + input = &DeleteHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteHostedZoneOutput{} + req.Data = output + return +} + +// This action deletes a hosted zone. To delete a hosted zone, send a DELETE +// request to the 2013-04-01/hostedzone/hosted zone ID resource. +// +// For more information about deleting a hosted zone, see Deleting a Hosted +// Zone (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DeleteHostedZone.html) +// in the Amazon Route 53 Developer Guide. +// +// You can delete a hosted zone only if there are no resource record sets +// other than the default SOA record and NS resource record sets. If your hosted +// zone contains other resource record sets, you must delete them before you +// can delete your hosted zone. If you try to delete a hosted zone that contains +// other resource record sets, Amazon Route 53 will deny your request with a +// HostedZoneNotEmpty error. For information about deleting records from your +// hosted zone, see ChangeResourceRecordSets. +func (c *Route53) DeleteHostedZone(input *DeleteHostedZoneInput) (*DeleteHostedZoneOutput, error) { + req, out := c.DeleteHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opDeleteReusableDelegationSet = "DeleteReusableDelegationSet" + +// DeleteReusableDelegationSetRequest generates a request for the DeleteReusableDelegationSet operation. +func (c *Route53) DeleteReusableDelegationSetRequest(input *DeleteReusableDelegationSetInput) (req *request.Request, output *DeleteReusableDelegationSetOutput) { + op := &request.Operation{ + Name: opDeleteReusableDelegationSet, + HTTPMethod: "DELETE", + HTTPPath: "/2013-04-01/delegationset/{Id}", + } + + if input == nil { + input = &DeleteReusableDelegationSetInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteReusableDelegationSetOutput{} + req.Data = output + return +} + +// This action deletes a reusable delegation set. To delete a reusable delegation +// set, send a DELETE request to the 2013-04-01/delegationset/delegation set +// ID resource. +// +// You can delete a reusable delegation set only if there are no associated +// hosted zones. If your reusable delegation set contains associated hosted +// zones, you must delete them before you can delete your reusable delegation +// set. If you try to delete a reusable delegation set that contains associated +// hosted zones, Amazon Route 53 will deny your request with a DelegationSetInUse +// error. +func (c *Route53) DeleteReusableDelegationSet(input *DeleteReusableDelegationSetInput) (*DeleteReusableDelegationSetOutput, error) { + req, out := c.DeleteReusableDelegationSetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTrafficPolicy = "DeleteTrafficPolicy" + +// DeleteTrafficPolicyRequest generates a request for the DeleteTrafficPolicy operation. +func (c *Route53) DeleteTrafficPolicyRequest(input *DeleteTrafficPolicyInput) (req *request.Request, output *DeleteTrafficPolicyOutput) { + op := &request.Operation{ + Name: opDeleteTrafficPolicy, + HTTPMethod: "DELETE", + HTTPPath: "/2013-04-01/trafficpolicy/{Id}/{Version}", + } + + if input == nil { + input = &DeleteTrafficPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTrafficPolicyOutput{} + req.Data = output + return +} + +// Deletes a traffic policy. To delete a traffic policy, send a DELETE request +// to the 2013-04-01/trafficpolicy resource. +func (c *Route53) DeleteTrafficPolicy(input *DeleteTrafficPolicyInput) (*DeleteTrafficPolicyOutput, error) { + req, out := c.DeleteTrafficPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTrafficPolicyInstance = "DeleteTrafficPolicyInstance" + +// DeleteTrafficPolicyInstanceRequest generates a request for the DeleteTrafficPolicyInstance operation. +func (c *Route53) DeleteTrafficPolicyInstanceRequest(input *DeleteTrafficPolicyInstanceInput) (req *request.Request, output *DeleteTrafficPolicyInstanceOutput) { + op := &request.Operation{ + Name: opDeleteTrafficPolicyInstance, + HTTPMethod: "DELETE", + HTTPPath: "/2013-04-01/trafficpolicyinstance/{Id}", + } + + if input == nil { + input = &DeleteTrafficPolicyInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTrafficPolicyInstanceOutput{} + req.Data = output + return +} + +// Deletes a traffic policy instance and all of the resource record sets that +// Amazon Route 53 created when you created the instance. +// +// To delete a traffic policy instance, send a DELETE request to the 2013-04-01/trafficpolicy/traffic +// policy instance ID resource. +// +// When you delete a traffic policy instance, Amazon Route 53 also deletes +// all of the resource record sets that were created when you created the traffic +// policy instance. +func (c *Route53) DeleteTrafficPolicyInstance(input *DeleteTrafficPolicyInstanceInput) (*DeleteTrafficPolicyInstanceOutput, error) { + req, out := c.DeleteTrafficPolicyInstanceRequest(input) + err := req.Send() + return out, err +} + +const opDisassociateVPCFromHostedZone = "DisassociateVPCFromHostedZone" + +// DisassociateVPCFromHostedZoneRequest generates a request for the DisassociateVPCFromHostedZone operation. +func (c *Route53) DisassociateVPCFromHostedZoneRequest(input *DisassociateVPCFromHostedZoneInput) (req *request.Request, output *DisassociateVPCFromHostedZoneOutput) { + op := &request.Operation{ + Name: opDisassociateVPCFromHostedZone, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone/{Id}/disassociatevpc", + } + + if input == nil { + input = &DisassociateVPCFromHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &DisassociateVPCFromHostedZoneOutput{} + req.Data = output + return +} + +// This action disassociates a VPC from an hosted zone. +// +// To disassociate a VPC to a hosted zone, send a POST request to the 2013-04-01/hostedzone/hosted +// zone ID/disassociatevpc resource. The request body must include an XML document +// with a DisassociateVPCFromHostedZoneRequest element. The response returns +// the DisassociateVPCFromHostedZoneResponse element that contains ChangeInfo +// for you to track the progress of the DisassociateVPCFromHostedZoneRequest +// you made. See GetChange operation for how to track the progress of your change. +func (c *Route53) DisassociateVPCFromHostedZone(input *DisassociateVPCFromHostedZoneInput) (*DisassociateVPCFromHostedZoneOutput, error) { + req, out := c.DisassociateVPCFromHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opGetChange = "GetChange" + +// GetChangeRequest generates a request for the GetChange operation. +func (c *Route53) GetChangeRequest(input *GetChangeInput) (req *request.Request, output *GetChangeOutput) { + op := &request.Operation{ + Name: opGetChange, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/change/{Id}", + } + + if input == nil { + input = &GetChangeInput{} + } + + req = c.newRequest(op, input, output) + output = &GetChangeOutput{} + req.Data = output + return +} + +// This action returns the current status of a change batch request. The status +// is one of the following values: +// +// - PENDING indicates that the changes in this request have not replicated +// to all Amazon Route 53 DNS servers. This is the initial status of all change +// batch requests. +// +// - INSYNC indicates that the changes have replicated to all Amazon Route +// 53 DNS servers. +func (c *Route53) GetChange(input *GetChangeInput) (*GetChangeOutput, error) { + req, out := c.GetChangeRequest(input) + err := req.Send() + return out, err +} + +const opGetChangeDetails = "GetChangeDetails" + +// GetChangeDetailsRequest generates a request for the GetChangeDetails operation. +func (c *Route53) GetChangeDetailsRequest(input *GetChangeDetailsInput) (req *request.Request, output *GetChangeDetailsOutput) { + op := &request.Operation{ + Name: opGetChangeDetails, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/changedetails/{Id}", + } + + if input == nil { + input = &GetChangeDetailsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetChangeDetailsOutput{} + req.Data = output + return +} + +// This action returns the status and changes of a change batch request. +func (c *Route53) GetChangeDetails(input *GetChangeDetailsInput) (*GetChangeDetailsOutput, error) { + req, out := c.GetChangeDetailsRequest(input) + err := req.Send() + return out, err +} + +const opGetCheckerIpRanges = "GetCheckerIpRanges" + +// GetCheckerIpRangesRequest generates a request for the GetCheckerIpRanges operation. +func (c *Route53) GetCheckerIpRangesRequest(input *GetCheckerIpRangesInput) (req *request.Request, output *GetCheckerIpRangesOutput) { + op := &request.Operation{ + Name: opGetCheckerIpRanges, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/checkeripranges", + } + + if input == nil { + input = &GetCheckerIpRangesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetCheckerIpRangesOutput{} + req.Data = output + return +} + +// To retrieve a list of the IP ranges used by Amazon Route 53 health checkers +// to check the health of your resources, send a GET request to the 2013-04-01/checkeripranges +// resource. You can use these IP addresses to configure router and firewall +// rules to allow health checkers to check the health of your resources. +func (c *Route53) GetCheckerIpRanges(input *GetCheckerIpRangesInput) (*GetCheckerIpRangesOutput, error) { + req, out := c.GetCheckerIpRangesRequest(input) + err := req.Send() + return out, err +} + +const opGetGeoLocation = "GetGeoLocation" + +// GetGeoLocationRequest generates a request for the GetGeoLocation operation. +func (c *Route53) GetGeoLocationRequest(input *GetGeoLocationInput) (req *request.Request, output *GetGeoLocationOutput) { + op := &request.Operation{ + Name: opGetGeoLocation, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/geolocation", + } + + if input == nil { + input = &GetGeoLocationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetGeoLocationOutput{} + req.Data = output + return +} + +// To retrieve a single geo location, send a GET request to the 2013-04-01/geolocation +// resource with one of these options: continentcode | countrycode | countrycode +// and subdivisioncode. +func (c *Route53) GetGeoLocation(input *GetGeoLocationInput) (*GetGeoLocationOutput, error) { + req, out := c.GetGeoLocationRequest(input) + err := req.Send() + return out, err +} + +const opGetHealthCheck = "GetHealthCheck" + +// GetHealthCheckRequest generates a request for the GetHealthCheck operation. +func (c *Route53) GetHealthCheckRequest(input *GetHealthCheckInput) (req *request.Request, output *GetHealthCheckOutput) { + op := &request.Operation{ + Name: opGetHealthCheck, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}", + } + + if input == nil { + input = &GetHealthCheckInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHealthCheckOutput{} + req.Data = output + return +} + +// To retrieve the health check, send a GET request to the 2013-04-01/healthcheck/health +// check ID resource. +func (c *Route53) GetHealthCheck(input *GetHealthCheckInput) (*GetHealthCheckOutput, error) { + req, out := c.GetHealthCheckRequest(input) + err := req.Send() + return out, err +} + +const opGetHealthCheckCount = "GetHealthCheckCount" + +// GetHealthCheckCountRequest generates a request for the GetHealthCheckCount operation. +func (c *Route53) GetHealthCheckCountRequest(input *GetHealthCheckCountInput) (req *request.Request, output *GetHealthCheckCountOutput) { + op := &request.Operation{ + Name: opGetHealthCheckCount, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheckcount", + } + + if input == nil { + input = &GetHealthCheckCountInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHealthCheckCountOutput{} + req.Data = output + return +} + +// To retrieve a count of all your health checks, send a GET request to the +// 2013-04-01/healthcheckcount resource. +func (c *Route53) GetHealthCheckCount(input *GetHealthCheckCountInput) (*GetHealthCheckCountOutput, error) { + req, out := c.GetHealthCheckCountRequest(input) + err := req.Send() + return out, err +} + +const opGetHealthCheckLastFailureReason = "GetHealthCheckLastFailureReason" + +// GetHealthCheckLastFailureReasonRequest generates a request for the GetHealthCheckLastFailureReason operation. +func (c *Route53) GetHealthCheckLastFailureReasonRequest(input *GetHealthCheckLastFailureReasonInput) (req *request.Request, output *GetHealthCheckLastFailureReasonOutput) { + op := &request.Operation{ + Name: opGetHealthCheckLastFailureReason, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}/lastfailurereason", + } + + if input == nil { + input = &GetHealthCheckLastFailureReasonInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHealthCheckLastFailureReasonOutput{} + req.Data = output + return +} + +// If you want to learn why a health check is currently failing or why it failed +// most recently (if at all), you can get the failure reason for the most recent +// failure. Send a GET request to the 2013-04-01/healthcheck/health check ID/lastfailurereason +// resource. +func (c *Route53) GetHealthCheckLastFailureReason(input *GetHealthCheckLastFailureReasonInput) (*GetHealthCheckLastFailureReasonOutput, error) { + req, out := c.GetHealthCheckLastFailureReasonRequest(input) + err := req.Send() + return out, err +} + +const opGetHealthCheckStatus = "GetHealthCheckStatus" + +// GetHealthCheckStatusRequest generates a request for the GetHealthCheckStatus operation. +func (c *Route53) GetHealthCheckStatusRequest(input *GetHealthCheckStatusInput) (req *request.Request, output *GetHealthCheckStatusOutput) { + op := &request.Operation{ + Name: opGetHealthCheckStatus, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}/status", + } + + if input == nil { + input = &GetHealthCheckStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHealthCheckStatusOutput{} + req.Data = output + return +} + +// To retrieve the health check status, send a GET request to the 2013-04-01/healthcheck/health +// check ID/status resource. You can use this call to get a health check's current +// status. +func (c *Route53) GetHealthCheckStatus(input *GetHealthCheckStatusInput) (*GetHealthCheckStatusOutput, error) { + req, out := c.GetHealthCheckStatusRequest(input) + err := req.Send() + return out, err +} + +const opGetHostedZone = "GetHostedZone" + +// GetHostedZoneRequest generates a request for the GetHostedZone operation. +func (c *Route53) GetHostedZoneRequest(input *GetHostedZoneInput) (req *request.Request, output *GetHostedZoneOutput) { + op := &request.Operation{ + Name: opGetHostedZone, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzone/{Id}", + } + + if input == nil { + input = &GetHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHostedZoneOutput{} + req.Data = output + return +} + +// To retrieve the delegation set for a hosted zone, send a GET request to the +// 2013-04-01/hostedzone/hosted zone ID resource. The delegation set is the +// four Amazon Route 53 name servers that were assigned to the hosted zone when +// you created it. +func (c *Route53) GetHostedZone(input *GetHostedZoneInput) (*GetHostedZoneOutput, error) { + req, out := c.GetHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opGetHostedZoneCount = "GetHostedZoneCount" + +// GetHostedZoneCountRequest generates a request for the GetHostedZoneCount operation. +func (c *Route53) GetHostedZoneCountRequest(input *GetHostedZoneCountInput) (req *request.Request, output *GetHostedZoneCountOutput) { + op := &request.Operation{ + Name: opGetHostedZoneCount, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzonecount", + } + + if input == nil { + input = &GetHostedZoneCountInput{} + } + + req = c.newRequest(op, input, output) + output = &GetHostedZoneCountOutput{} + req.Data = output + return +} + +// To retrieve a count of all your hosted zones, send a GET request to the 2013-04-01/hostedzonecount +// resource. +func (c *Route53) GetHostedZoneCount(input *GetHostedZoneCountInput) (*GetHostedZoneCountOutput, error) { + req, out := c.GetHostedZoneCountRequest(input) + err := req.Send() + return out, err +} + +const opGetReusableDelegationSet = "GetReusableDelegationSet" + +// GetReusableDelegationSetRequest generates a request for the GetReusableDelegationSet operation. +func (c *Route53) GetReusableDelegationSetRequest(input *GetReusableDelegationSetInput) (req *request.Request, output *GetReusableDelegationSetOutput) { + op := &request.Operation{ + Name: opGetReusableDelegationSet, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/delegationset/{Id}", + } + + if input == nil { + input = &GetReusableDelegationSetInput{} + } + + req = c.newRequest(op, input, output) + output = &GetReusableDelegationSetOutput{} + req.Data = output + return +} + +// To retrieve the reusable delegation set, send a GET request to the 2013-04-01/delegationset/delegation +// set ID resource. +func (c *Route53) GetReusableDelegationSet(input *GetReusableDelegationSetInput) (*GetReusableDelegationSetOutput, error) { + req, out := c.GetReusableDelegationSetRequest(input) + err := req.Send() + return out, err +} + +const opGetTrafficPolicy = "GetTrafficPolicy" + +// GetTrafficPolicyRequest generates a request for the GetTrafficPolicy operation. +func (c *Route53) GetTrafficPolicyRequest(input *GetTrafficPolicyInput) (req *request.Request, output *GetTrafficPolicyOutput) { + op := &request.Operation{ + Name: opGetTrafficPolicy, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicy/{Id}/{Version}", + } + + if input == nil { + input = &GetTrafficPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTrafficPolicyOutput{} + req.Data = output + return +} + +// Gets information about a specific traffic policy version. To get the information, +// send a GET request to the 2013-04-01/trafficpolicy resource. +func (c *Route53) GetTrafficPolicy(input *GetTrafficPolicyInput) (*GetTrafficPolicyOutput, error) { + req, out := c.GetTrafficPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetTrafficPolicyInstance = "GetTrafficPolicyInstance" + +// GetTrafficPolicyInstanceRequest generates a request for the GetTrafficPolicyInstance operation. +func (c *Route53) GetTrafficPolicyInstanceRequest(input *GetTrafficPolicyInstanceInput) (req *request.Request, output *GetTrafficPolicyInstanceOutput) { + op := &request.Operation{ + Name: opGetTrafficPolicyInstance, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicyinstance/{Id}", + } + + if input == nil { + input = &GetTrafficPolicyInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTrafficPolicyInstanceOutput{} + req.Data = output + return +} + +// Gets information about a specified traffic policy instance. +// +// To get information about the traffic policy instance, send a GET request +// to the 2013-04-01/trafficpolicyinstance resource. +// +// After you submit a CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance +// request, there's a brief delay while Amazon Route 53 creates the resource +// record sets that are specified in the traffic policy definition. For more +// information, see the State response element. +func (c *Route53) GetTrafficPolicyInstance(input *GetTrafficPolicyInstanceInput) (*GetTrafficPolicyInstanceOutput, error) { + req, out := c.GetTrafficPolicyInstanceRequest(input) + err := req.Send() + return out, err +} + +const opGetTrafficPolicyInstanceCount = "GetTrafficPolicyInstanceCount" + +// GetTrafficPolicyInstanceCountRequest generates a request for the GetTrafficPolicyInstanceCount operation. +func (c *Route53) GetTrafficPolicyInstanceCountRequest(input *GetTrafficPolicyInstanceCountInput) (req *request.Request, output *GetTrafficPolicyInstanceCountOutput) { + op := &request.Operation{ + Name: opGetTrafficPolicyInstanceCount, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicyinstancecount", + } + + if input == nil { + input = &GetTrafficPolicyInstanceCountInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTrafficPolicyInstanceCountOutput{} + req.Data = output + return +} + +// Gets the number of traffic policy instances that are associated with the +// current AWS account. +// +// To get the number of traffic policy instances, send a GET request to the +// 2013-04-01/trafficpolicyinstancecount resource. +func (c *Route53) GetTrafficPolicyInstanceCount(input *GetTrafficPolicyInstanceCountInput) (*GetTrafficPolicyInstanceCountOutput, error) { + req, out := c.GetTrafficPolicyInstanceCountRequest(input) + err := req.Send() + return out, err +} + +const opListChangeBatchesByHostedZone = "ListChangeBatchesByHostedZone" + +// ListChangeBatchesByHostedZoneRequest generates a request for the ListChangeBatchesByHostedZone operation. +func (c *Route53) ListChangeBatchesByHostedZoneRequest(input *ListChangeBatchesByHostedZoneInput) (req *request.Request, output *ListChangeBatchesByHostedZoneOutput) { + op := &request.Operation{ + Name: opListChangeBatchesByHostedZone, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzone/{Id}/changes", + } + + if input == nil { + input = &ListChangeBatchesByHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &ListChangeBatchesByHostedZoneOutput{} + req.Data = output + return +} + +// This action gets the list of ChangeBatches in a given time period for a given +// hosted zone. +func (c *Route53) ListChangeBatchesByHostedZone(input *ListChangeBatchesByHostedZoneInput) (*ListChangeBatchesByHostedZoneOutput, error) { + req, out := c.ListChangeBatchesByHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opListChangeBatchesByRRSet = "ListChangeBatchesByRRSet" + +// ListChangeBatchesByRRSetRequest generates a request for the ListChangeBatchesByRRSet operation. +func (c *Route53) ListChangeBatchesByRRSetRequest(input *ListChangeBatchesByRRSetInput) (req *request.Request, output *ListChangeBatchesByRRSetOutput) { + op := &request.Operation{ + Name: opListChangeBatchesByRRSet, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzone/{Id}/rrsChanges", + } + + if input == nil { + input = &ListChangeBatchesByRRSetInput{} + } + + req = c.newRequest(op, input, output) + output = &ListChangeBatchesByRRSetOutput{} + req.Data = output + return +} + +// This action gets the list of ChangeBatches in a given time period for a given +// hosted zone and RRSet. +func (c *Route53) ListChangeBatchesByRRSet(input *ListChangeBatchesByRRSetInput) (*ListChangeBatchesByRRSetOutput, error) { + req, out := c.ListChangeBatchesByRRSetRequest(input) + err := req.Send() + return out, err +} + +const opListGeoLocations = "ListGeoLocations" + +// ListGeoLocationsRequest generates a request for the ListGeoLocations operation. +func (c *Route53) ListGeoLocationsRequest(input *ListGeoLocationsInput) (req *request.Request, output *ListGeoLocationsOutput) { + op := &request.Operation{ + Name: opListGeoLocations, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/geolocations", + } + + if input == nil { + input = &ListGeoLocationsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListGeoLocationsOutput{} + req.Data = output + return +} + +// To retrieve a list of supported geo locations, send a GET request to the +// 2013-04-01/geolocations resource. The response to this request includes a +// GeoLocationDetailsList element with zero, one, or multiple GeoLocationDetails +// child elements. The list is sorted by country code, and then subdivision +// code, followed by continents at the end of the list. +// +// By default, the list of geo locations is displayed on a single page. You +// can control the length of the page that is displayed by using the MaxItems +// parameter. If the list is truncated, IsTruncated will be set to true and +// a combination of NextContinentCode, NextCountryCode, NextSubdivisionCode +// will be populated. You can pass these as parameters to StartContinentCode, +// StartCountryCode, StartSubdivisionCode to control the geo location that the +// list begins with. +func (c *Route53) ListGeoLocations(input *ListGeoLocationsInput) (*ListGeoLocationsOutput, error) { + req, out := c.ListGeoLocationsRequest(input) + err := req.Send() + return out, err +} + +const opListHealthChecks = "ListHealthChecks" + +// ListHealthChecksRequest generates a request for the ListHealthChecks operation. +func (c *Route53) ListHealthChecksRequest(input *ListHealthChecksInput) (req *request.Request, output *ListHealthChecksOutput) { + op := &request.Operation{ + Name: opListHealthChecks, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/healthcheck", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListHealthChecksInput{} + } + + req = c.newRequest(op, input, output) + output = &ListHealthChecksOutput{} + req.Data = output + return +} + +// To retrieve a list of your health checks, send a GET request to the 2013-04-01/healthcheck +// resource. The response to this request includes a HealthChecks element with +// zero, one, or multiple HealthCheck child elements. By default, the list of +// health checks is displayed on a single page. You can control the length of +// the page that is displayed by using the MaxItems parameter. You can use the +// Marker parameter to control the health check that the list begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +func (c *Route53) ListHealthChecks(input *ListHealthChecksInput) (*ListHealthChecksOutput, error) { + req, out := c.ListHealthChecksRequest(input) + err := req.Send() + return out, err +} + +func (c *Route53) ListHealthChecksPages(input *ListHealthChecksInput, fn func(p *ListHealthChecksOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListHealthChecksRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListHealthChecksOutput), lastPage) + }) +} + +const opListHostedZones = "ListHostedZones" + +// ListHostedZonesRequest generates a request for the ListHostedZones operation. +func (c *Route53) ListHostedZonesRequest(input *ListHostedZonesInput) (req *request.Request, output *ListHostedZonesOutput) { + op := &request.Operation{ + Name: opListHostedZones, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzone", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListHostedZonesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListHostedZonesOutput{} + req.Data = output + return +} + +// To retrieve a list of your hosted zones, send a GET request to the 2013-04-01/hostedzone +// resource. The response to this request includes a HostedZones element with +// zero, one, or multiple HostedZone child elements. By default, the list of +// hosted zones is displayed on a single page. You can control the length of +// the page that is displayed by using the MaxItems parameter. You can use the +// Marker parameter to control the hosted zone that the list begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +func (c *Route53) ListHostedZones(input *ListHostedZonesInput) (*ListHostedZonesOutput, error) { + req, out := c.ListHostedZonesRequest(input) + err := req.Send() + return out, err +} + +func (c *Route53) ListHostedZonesPages(input *ListHostedZonesInput, fn func(p *ListHostedZonesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListHostedZonesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListHostedZonesOutput), lastPage) + }) +} + +const opListHostedZonesByName = "ListHostedZonesByName" + +// ListHostedZonesByNameRequest generates a request for the ListHostedZonesByName operation. +func (c *Route53) ListHostedZonesByNameRequest(input *ListHostedZonesByNameInput) (req *request.Request, output *ListHostedZonesByNameOutput) { + op := &request.Operation{ + Name: opListHostedZonesByName, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzonesbyname", + } + + if input == nil { + input = &ListHostedZonesByNameInput{} + } + + req = c.newRequest(op, input, output) + output = &ListHostedZonesByNameOutput{} + req.Data = output + return +} + +// To retrieve a list of your hosted zones in lexicographic order, send a GET +// request to the 2013-04-01/hostedzonesbyname resource. The response to this +// request includes a HostedZones element with zero or more HostedZone child +// elements lexicographically ordered by DNS name. By default, the list of hosted +// zones is displayed on a single page. You can control the length of the page +// that is displayed by using the MaxItems parameter. You can use the DNSName +// and HostedZoneId parameters to control the hosted zone that the list begins +// with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +func (c *Route53) ListHostedZonesByName(input *ListHostedZonesByNameInput) (*ListHostedZonesByNameOutput, error) { + req, out := c.ListHostedZonesByNameRequest(input) + err := req.Send() + return out, err +} + +const opListResourceRecordSets = "ListResourceRecordSets" + +// ListResourceRecordSetsRequest generates a request for the ListResourceRecordSets operation. +func (c *Route53) ListResourceRecordSetsRequest(input *ListResourceRecordSetsInput) (req *request.Request, output *ListResourceRecordSetsOutput) { + op := &request.Operation{ + Name: opListResourceRecordSets, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzone/{Id}/rrset", + Paginator: &request.Paginator{ + InputTokens: []string{"StartRecordName", "StartRecordType", "StartRecordIdentifier"}, + OutputTokens: []string{"NextRecordName", "NextRecordType", "NextRecordIdentifier"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListResourceRecordSetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListResourceRecordSetsOutput{} + req.Data = output + return +} + +// Imagine all the resource record sets in a zone listed out in front of you. +// Imagine them sorted lexicographically first by DNS name (with the labels +// reversed, like "com.amazon.www" for example), and secondarily, lexicographically +// by record type. This operation retrieves at most MaxItems resource record +// sets from this list, in order, starting at a position specified by the Name +// and Type arguments: +// +// If both Name and Type are omitted, this means start the results at the +// first RRSET in the HostedZone. If Name is specified but Type is omitted, +// this means start the results at the first RRSET in the list whose name is +// greater than or equal to Name. If both Name and Type are specified, this +// means start the results at the first RRSET in the list whose name is greater +// than or equal to Name and whose type is greater than or equal to Type. It +// is an error to specify the Type but not the Name. Use ListResourceRecordSets +// to retrieve a single known record set by specifying the record set's name +// and type, and setting MaxItems = 1 +// +// To retrieve all the records in a HostedZone, first pause any processes making +// calls to ChangeResourceRecordSets. Initially call ListResourceRecordSets +// without a Name and Type to get the first page of record sets. For subsequent +// calls, set Name and Type to the NextName and NextType values returned by +// the previous response. +// +// In the presence of concurrent ChangeResourceRecordSets calls, there is no +// consistency of results across calls to ListResourceRecordSets. The only way +// to get a consistent multi-page snapshot of all RRSETs in a zone is to stop +// making changes while pagination is in progress. +// +// However, the results from ListResourceRecordSets are consistent within a +// page. If MakeChange calls are taking place concurrently, the result of each +// one will either be completely visible in your results or not at all. You +// will not see partial changes, or changes that do not ultimately succeed. +// (This follows from the fact that MakeChange is atomic) +// +// The results from ListResourceRecordSets are strongly consistent with ChangeResourceRecordSets. +// To be precise, if a single process makes a call to ChangeResourceRecordSets +// and receives a successful response, the effects of that change will be visible +// in a subsequent call to ListResourceRecordSets by that process. +func (c *Route53) ListResourceRecordSets(input *ListResourceRecordSetsInput) (*ListResourceRecordSetsOutput, error) { + req, out := c.ListResourceRecordSetsRequest(input) + err := req.Send() + return out, err +} + +func (c *Route53) ListResourceRecordSetsPages(input *ListResourceRecordSetsInput, fn func(p *ListResourceRecordSetsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListResourceRecordSetsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListResourceRecordSetsOutput), lastPage) + }) +} + +const opListReusableDelegationSets = "ListReusableDelegationSets" + +// ListReusableDelegationSetsRequest generates a request for the ListReusableDelegationSets operation. +func (c *Route53) ListReusableDelegationSetsRequest(input *ListReusableDelegationSetsInput) (req *request.Request, output *ListReusableDelegationSetsOutput) { + op := &request.Operation{ + Name: opListReusableDelegationSets, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/delegationset", + } + + if input == nil { + input = &ListReusableDelegationSetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListReusableDelegationSetsOutput{} + req.Data = output + return +} + +// To retrieve a list of your reusable delegation sets, send a GET request to +// the 2013-04-01/delegationset resource. The response to this request includes +// a DelegationSets element with zero, one, or multiple DelegationSet child +// elements. By default, the list of delegation sets is displayed on a single +// page. You can control the length of the page that is displayed by using the +// MaxItems parameter. You can use the Marker parameter to control the delegation +// set that the list begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +func (c *Route53) ListReusableDelegationSets(input *ListReusableDelegationSetsInput) (*ListReusableDelegationSetsOutput, error) { + req, out := c.ListReusableDelegationSetsRequest(input) + err := req.Send() + return out, err +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a request for the ListTagsForResource operation. +func (c *Route53) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/tags/{ResourceType}/{ResourceId}", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForResourceOutput{} + req.Data = output + return +} + +func (c *Route53) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + err := req.Send() + return out, err +} + +const opListTagsForResources = "ListTagsForResources" + +// ListTagsForResourcesRequest generates a request for the ListTagsForResources operation. +func (c *Route53) ListTagsForResourcesRequest(input *ListTagsForResourcesInput) (req *request.Request, output *ListTagsForResourcesOutput) { + op := &request.Operation{ + Name: opListTagsForResources, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/tags/{ResourceType}", + } + + if input == nil { + input = &ListTagsForResourcesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForResourcesOutput{} + req.Data = output + return +} + +func (c *Route53) ListTagsForResources(input *ListTagsForResourcesInput) (*ListTagsForResourcesOutput, error) { + req, out := c.ListTagsForResourcesRequest(input) + err := req.Send() + return out, err +} + +const opListTrafficPolicies = "ListTrafficPolicies" + +// ListTrafficPoliciesRequest generates a request for the ListTrafficPolicies operation. +func (c *Route53) ListTrafficPoliciesRequest(input *ListTrafficPoliciesInput) (req *request.Request, output *ListTrafficPoliciesOutput) { + op := &request.Operation{ + Name: opListTrafficPolicies, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicies", + } + + if input == nil { + input = &ListTrafficPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTrafficPoliciesOutput{} + req.Data = output + return +} + +// Gets information about the latest version for every traffic policy that is +// associated with the current AWS account. To get the information, send a GET +// request to the 2013-04-01/trafficpolicy resource. +// +// Amazon Route 53 returns a maximum of 100 items in each response. If you +// have a lot of traffic policies, you can use the maxitems parameter to list +// them in groups of up to 100. +// +// The response includes three values that help you navigate from one group +// of maxitems traffic policies to the next: +// +// IsTruncated If the value of IsTruncated in the response is true, there +// are more traffic policies associated with the current AWS account. +// +// If IsTruncated is false, this response includes the last traffic policy +// that is associated with the current account. +// +// TrafficPolicyIdMarker If IsTruncated is true, TrafficPolicyIdMarker is the +// ID of the first traffic policy in the next group of MaxItems traffic policies. +// If you want to list more traffic policies, make another call to ListTrafficPolicies, +// and specify the value of the TrafficPolicyIdMarker element from the response +// in the TrafficPolicyIdMarker request parameter. +// +// If IsTruncated is false, the TrafficPolicyIdMarker element is omitted from +// the response. +// +// MaxItems The value that you specified for the MaxItems parameter in the +// request that produced the current response. +func (c *Route53) ListTrafficPolicies(input *ListTrafficPoliciesInput) (*ListTrafficPoliciesOutput, error) { + req, out := c.ListTrafficPoliciesRequest(input) + err := req.Send() + return out, err +} + +const opListTrafficPolicyInstances = "ListTrafficPolicyInstances" + +// ListTrafficPolicyInstancesRequest generates a request for the ListTrafficPolicyInstances operation. +func (c *Route53) ListTrafficPolicyInstancesRequest(input *ListTrafficPolicyInstancesInput) (req *request.Request, output *ListTrafficPolicyInstancesOutput) { + op := &request.Operation{ + Name: opListTrafficPolicyInstances, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicyinstances", + } + + if input == nil { + input = &ListTrafficPolicyInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTrafficPolicyInstancesOutput{} + req.Data = output + return +} + +// Gets information about the traffic policy instances that you created by using +// the current AWS account. +// +// After you submit an UpdateTrafficPolicyInstance request, there's a brief +// delay while Amazon Route 53 creates the resource record sets that are specified +// in the traffic policy definition. For more information, see the State response +// element. To get information about the traffic policy instances that are associated +// with the current AWS account, send a GET request to the 2013-04-01/trafficpolicyinstance +// resource. +// +// Amazon Route 53 returns a maximum of 100 items in each response. If you +// have a lot of traffic policy instances, you can use the MaxItems parameter +// to list them in groups of up to 100. +// +// The response includes five values that help you navigate from one group +// of MaxItems traffic policy instances to the next: +// +// IsTruncated If the value of IsTruncated in the response is true, there +// are more traffic policy instances associated with the current AWS account. +// +// If IsTruncated is false, this response includes the last traffic policy +// instance that is associated with the current account. +// +// MaxItems The value that you specified for the MaxItems parameter in the +// request that produced the current response. +// +// HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker +// If IsTruncated is true, these three values in the response represent the +// first traffic policy instance in the next group of MaxItems traffic policy +// instances. To list more traffic policy instances, make another call to ListTrafficPolicyInstances, +// and specify these values in the corresponding request parameters. +// +// If IsTruncated is false, all three elements are omitted from the response. +func (c *Route53) ListTrafficPolicyInstances(input *ListTrafficPolicyInstancesInput) (*ListTrafficPolicyInstancesOutput, error) { + req, out := c.ListTrafficPolicyInstancesRequest(input) + err := req.Send() + return out, err +} + +const opListTrafficPolicyInstancesByHostedZone = "ListTrafficPolicyInstancesByHostedZone" + +// ListTrafficPolicyInstancesByHostedZoneRequest generates a request for the ListTrafficPolicyInstancesByHostedZone operation. +func (c *Route53) ListTrafficPolicyInstancesByHostedZoneRequest(input *ListTrafficPolicyInstancesByHostedZoneInput) (req *request.Request, output *ListTrafficPolicyInstancesByHostedZoneOutput) { + op := &request.Operation{ + Name: opListTrafficPolicyInstancesByHostedZone, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicyinstances/hostedzone", + } + + if input == nil { + input = &ListTrafficPolicyInstancesByHostedZoneInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTrafficPolicyInstancesByHostedZoneOutput{} + req.Data = output + return +} + +// Gets information about the traffic policy instances that you created in a +// specified hosted zone. +// +// After you submit an UpdateTrafficPolicyInstance request, there's a brief +// delay while Amazon Route 53 creates the resource record sets that are specified +// in the traffic policy definition. For more information, see the State response +// element. To get information about the traffic policy instances that you created +// in a specified hosted zone, send a GET request to the 2013-04-01/trafficpolicyinstance +// resource and include the ID of the hosted zone. +// +// Amazon Route 53 returns a maximum of 100 items in each response. If you +// have a lot of traffic policy instances, you can use the MaxItems parameter +// to list them in groups of up to 100. +// +// The response includes four values that help you navigate from one group +// of MaxItems traffic policy instances to the next: +// +// IsTruncated If the value of IsTruncated in the response is true, there +// are more traffic policy instances associated with the current AWS account. +// +// If IsTruncated is false, this response includes the last traffic policy +// instance that is associated with the current account. +// +// MaxItems The value that you specified for the MaxItems parameter in the +// request that produced the current response. +// +// TrafficPolicyInstanceNameMarker and TrafficPolicyInstanceTypeMarker If IsTruncated +// is true, these two values in the response represent the first traffic policy +// instance in the next group of MaxItems traffic policy instances. To list +// more traffic policy instances, make another call to ListTrafficPolicyInstancesByHostedZone, +// and specify these values in the corresponding request parameters. +// +// If IsTruncated is false, all three elements are omitted from the response. +func (c *Route53) ListTrafficPolicyInstancesByHostedZone(input *ListTrafficPolicyInstancesByHostedZoneInput) (*ListTrafficPolicyInstancesByHostedZoneOutput, error) { + req, out := c.ListTrafficPolicyInstancesByHostedZoneRequest(input) + err := req.Send() + return out, err +} + +const opListTrafficPolicyInstancesByPolicy = "ListTrafficPolicyInstancesByPolicy" + +// ListTrafficPolicyInstancesByPolicyRequest generates a request for the ListTrafficPolicyInstancesByPolicy operation. +func (c *Route53) ListTrafficPolicyInstancesByPolicyRequest(input *ListTrafficPolicyInstancesByPolicyInput) (req *request.Request, output *ListTrafficPolicyInstancesByPolicyOutput) { + op := &request.Operation{ + Name: opListTrafficPolicyInstancesByPolicy, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicyinstances/trafficpolicy", + } + + if input == nil { + input = &ListTrafficPolicyInstancesByPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTrafficPolicyInstancesByPolicyOutput{} + req.Data = output + return +} + +// Gets information about the traffic policy instances that you created by using +// a specify traffic policy version. +// +// After you submit a CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance +// request, there's a brief delay while Amazon Route 53 creates the resource +// record sets that are specified in the traffic policy definition. For more +// information, see the State response element. To get information about the +// traffic policy instances that you created by using a specify traffic policy +// version, send a GET request to the 2013-04-01/trafficpolicyinstance resource +// and include the ID and version of the traffic policy. +// +// Amazon Route 53 returns a maximum of 100 items in each response. If you +// have a lot of traffic policy instances, you can use the MaxItems parameter +// to list them in groups of up to 100. +// +// The response includes five values that help you navigate from one group +// of MaxItems traffic policy instances to the next: +// +// IsTruncated If the value of IsTruncated in the response is true, there +// are more traffic policy instances associated with the specified traffic policy. +// +// If IsTruncated is false, this response includes the last traffic policy +// instance that is associated with the specified traffic policy. +// +// MaxItems The value that you specified for the MaxItems parameter in the +// request that produced the current response. +// +// HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker +// If IsTruncated is true, these values in the response represent the first +// traffic policy instance in the next group of MaxItems traffic policy instances. +// To list more traffic policy instances, make another call to ListTrafficPolicyInstancesByPolicy, +// and specify these values in the corresponding request parameters. +// +// If IsTruncated is false, all three elements are omitted from the response. +func (c *Route53) ListTrafficPolicyInstancesByPolicy(input *ListTrafficPolicyInstancesByPolicyInput) (*ListTrafficPolicyInstancesByPolicyOutput, error) { + req, out := c.ListTrafficPolicyInstancesByPolicyRequest(input) + err := req.Send() + return out, err +} + +const opListTrafficPolicyVersions = "ListTrafficPolicyVersions" + +// ListTrafficPolicyVersionsRequest generates a request for the ListTrafficPolicyVersions operation. +func (c *Route53) ListTrafficPolicyVersionsRequest(input *ListTrafficPolicyVersionsInput) (req *request.Request, output *ListTrafficPolicyVersionsOutput) { + op := &request.Operation{ + Name: opListTrafficPolicyVersions, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/trafficpolicies/{Id}/versions", + } + + if input == nil { + input = &ListTrafficPolicyVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTrafficPolicyVersionsOutput{} + req.Data = output + return +} + +// Gets information about all of the versions for a specified traffic policy. +// ListTrafficPolicyVersions lists only versions that have not been deleted. +// +// Amazon Route 53 returns a maximum of 100 items in each response. If you +// have a lot of traffic policies, you can use the maxitems parameter to list +// them in groups of up to 100. +// +// The response includes three values that help you navigate from one group +// of maxitemsmaxitems traffic policies to the next: +// +// IsTruncated If the value of IsTruncated in the response is true, there +// are more traffic policy versions associated with the specified traffic policy. +// +// If IsTruncated is false, this response includes the last traffic policy +// version that is associated with the specified traffic policy. +// +// TrafficPolicyVersionMarker The ID of the next traffic policy version that +// is associated with the current AWS account. If you want to list more traffic +// policies, make another call to ListTrafficPolicyVersions, and specify the +// value of the TrafficPolicyVersionMarker element in the TrafficPolicyVersionMarker +// request parameter. +// +// If IsTruncated is false, Amazon Route 53 omits the TrafficPolicyVersionMarker +// element from the response. +// +// MaxItems The value that you specified for the MaxItems parameter in the +// request that produced the current response. +func (c *Route53) ListTrafficPolicyVersions(input *ListTrafficPolicyVersionsInput) (*ListTrafficPolicyVersionsOutput, error) { + req, out := c.ListTrafficPolicyVersionsRequest(input) + err := req.Send() + return out, err +} + +const opUpdateHealthCheck = "UpdateHealthCheck" + +// UpdateHealthCheckRequest generates a request for the UpdateHealthCheck operation. +func (c *Route53) UpdateHealthCheckRequest(input *UpdateHealthCheckInput) (req *request.Request, output *UpdateHealthCheckOutput) { + op := &request.Operation{ + Name: opUpdateHealthCheck, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/healthcheck/{HealthCheckId}", + } + + if input == nil { + input = &UpdateHealthCheckInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateHealthCheckOutput{} + req.Data = output + return +} + +// This action updates an existing health check. +// +// To update a health check, send a POST request to the 2013-04-01/healthcheck/health +// check ID resource. The request body must include an XML document with an +// UpdateHealthCheckRequest element. The response returns an UpdateHealthCheckResponse +// element, which contains metadata about the health check. +func (c *Route53) UpdateHealthCheck(input *UpdateHealthCheckInput) (*UpdateHealthCheckOutput, error) { + req, out := c.UpdateHealthCheckRequest(input) + err := req.Send() + return out, err +} + +const opUpdateHostedZoneComment = "UpdateHostedZoneComment" + +// UpdateHostedZoneCommentRequest generates a request for the UpdateHostedZoneComment operation. +func (c *Route53) UpdateHostedZoneCommentRequest(input *UpdateHostedZoneCommentInput) (req *request.Request, output *UpdateHostedZoneCommentOutput) { + op := &request.Operation{ + Name: opUpdateHostedZoneComment, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/hostedzone/{Id}", + } + + if input == nil { + input = &UpdateHostedZoneCommentInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateHostedZoneCommentOutput{} + req.Data = output + return +} + +// To update the hosted zone comment, send a POST request to the 2013-04-01/hostedzone/hosted +// zone ID resource. The request body must include an XML document with a UpdateHostedZoneCommentRequest +// element. The response to this request includes the modified HostedZone element. +// +// The comment can have a maximum length of 256 characters. +func (c *Route53) UpdateHostedZoneComment(input *UpdateHostedZoneCommentInput) (*UpdateHostedZoneCommentOutput, error) { + req, out := c.UpdateHostedZoneCommentRequest(input) + err := req.Send() + return out, err +} + +const opUpdateTrafficPolicyComment = "UpdateTrafficPolicyComment" + +// UpdateTrafficPolicyCommentRequest generates a request for the UpdateTrafficPolicyComment operation. +func (c *Route53) UpdateTrafficPolicyCommentRequest(input *UpdateTrafficPolicyCommentInput) (req *request.Request, output *UpdateTrafficPolicyCommentOutput) { + op := &request.Operation{ + Name: opUpdateTrafficPolicyComment, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/trafficpolicy/{Id}/{Version}", + } + + if input == nil { + input = &UpdateTrafficPolicyCommentInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateTrafficPolicyCommentOutput{} + req.Data = output + return +} + +// Updates the comment for a specified traffic policy version. +// +// To update the comment, send a POST request to the /2013-04-01/trafficpolicy/ +// resource. +// +// The request body must include an XML document with an UpdateTrafficPolicyCommentRequest +// element. +func (c *Route53) UpdateTrafficPolicyComment(input *UpdateTrafficPolicyCommentInput) (*UpdateTrafficPolicyCommentOutput, error) { + req, out := c.UpdateTrafficPolicyCommentRequest(input) + err := req.Send() + return out, err +} + +const opUpdateTrafficPolicyInstance = "UpdateTrafficPolicyInstance" + +// UpdateTrafficPolicyInstanceRequest generates a request for the UpdateTrafficPolicyInstance operation. +func (c *Route53) UpdateTrafficPolicyInstanceRequest(input *UpdateTrafficPolicyInstanceInput) (req *request.Request, output *UpdateTrafficPolicyInstanceOutput) { + op := &request.Operation{ + Name: opUpdateTrafficPolicyInstance, + HTTPMethod: "POST", + HTTPPath: "/2013-04-01/trafficpolicyinstance/{Id}", + } + + if input == nil { + input = &UpdateTrafficPolicyInstanceInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateTrafficPolicyInstanceOutput{} + req.Data = output + return +} + +// Updates the resource record sets in a specified hosted zone that were created +// based on the settings in a specified traffic policy version. +// +// The DNS type of the resource record sets that you're updating must match +// the DNS type in the JSON document that is associated with the traffic policy +// version that you're using to update the traffic policy instance. When you +// update a traffic policy instance, Amazon Route 53 continues to respond to +// DNS queries for the root resource record set name (such as example.com) while +// it replaces one group of resource record sets with another. Amazon Route +// 53 performs the following operations: +// +// Amazon Route 53 creates a new group of resource record sets based on the +// specified traffic policy. This is true regardless of how substantial the +// differences are between the existing resource record sets and the new resource +// record sets. When all of the new resource record sets have been created, +// Amazon Route 53 starts to respond to DNS queries for the root resource record +// set name (such as example.com) by using the new resource record sets. Amazon +// Route 53 deletes the old group of resource record sets that are associated +// with the root resource record set name. To update a traffic policy instance, +// send a POST request to the /2013-04-01/trafficpolicyinstance/traffic policy +// ID resource. The request body must include an XML document with an UpdateTrafficPolicyInstanceRequest +// element. +func (c *Route53) UpdateTrafficPolicyInstance(input *UpdateTrafficPolicyInstanceInput) (*UpdateTrafficPolicyInstanceOutput, error) { + req, out := c.UpdateTrafficPolicyInstanceRequest(input) + err := req.Send() + return out, err +} + +// Alias resource record sets only: Information about the CloudFront distribution, +// ELB load balancer, Amazon S3 bucket, or Amazon Route 53 resource record set +// to which you are routing traffic. +// +// If you're creating resource record sets for a private hosted zone, note +// the following: +// +// You can create alias resource record sets only for Amazon Route 53 resource +// record sets in the same private hosted zone. Creating alias resource record +// sets for CloudFront distributions, ELB load balancers, and Amazon S3 buckets +// is not supported. You can't create alias resource record sets for failover, +// geolocation, or latency resource record sets in a private hosted zone. For +// more information and an example, see Example: Creating Alias Resource Record +// Sets (http://docs.aws.amazon.com/Route53/latest/APIReference/CreateAliasRRSAPI.html) +// in the Amazon Route 53 API Reference. +type AliasTarget struct { + _ struct{} `type:"structure"` + + // Alias resource record sets only: The external DNS name associated with the + // AWS Resource. The value that you specify depends on where you want to route + // queries: + // + // A CloudFront distribution: Specify the domain name that CloudFront assigned + // when you created your distribution. Your CloudFront distribution must include + // an alternate domain name that matches the name of the resource record set. + // For example, if the name of the resource record set is acme.example.com, + // your CloudFront distribution must include acme.example.com as one of the + // alternate domain names. For more information, see Using Alternate Domain + // Names (CNAMEs) (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/CNAMEs.html) + // in the Amazon CloudFront Developer Guide. An ELB load balancer: Specify the + // DNS name associated with the load balancer. You can get the DNS name by using + // the AWS Management Console, the ELB API, or the AWS CLI. Use the same method + // to get values for HostedZoneId and DNSName. If you get one value from the + // console and the other value from the API or the CLI, creating the resource + // record set will fail. An Amazon S3 bucket that is configured as a static + // website: Specify the domain name of the Amazon S3 website endpoint in which + // you created the bucket; for example, s3-website-us-east-1.amazonaws.com. + // For more information about valid values, see the table Amazon Simple Storage + // Service (S3) Website Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. For more information about + // using Amazon S3 buckets for websites, see Hosting a Static Website on Amazon + // S3 (http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) in + // the Amazon Simple Storage Service Developer Guide. For more information + // and an example, see Example: Creating Alias Resource Record Sets (http://docs.aws.amazon.com/Route53/latest/APIReference/CreateAliasRRSAPI.html) + // in the Amazon Route 53 API Reference. + DNSName *string `type:"string" required:"true"` + + // Alias resource record sets only: If you set the value of EvaluateTargetHealth + // to true for the resource record set or sets in an alias, weighted alias, + // latency alias, or failover alias resource record set, and if you specify + // a value for HealthCheckId for every resource record set that is referenced + // by these alias resource record sets, the alias resource record sets inherit + // the health of the referenced resource record sets. + // + // In this configuration, when Amazon Route 53 receives a DNS query for an + // alias resource record set: + // + // Amazon Route 53 looks at the resource record sets that are referenced by + // the alias resource record sets to determine which health checks they're using. + // Amazon Route 53 checks the current status of each health check. (Amazon Route + // 53 periodically checks the health of the endpoint that is specified in a + // health check; it doesn't perform the health check when the DNS query arrives.) + // Based on the status of the health checks, Amazon Route 53 determines which + // resource record sets are healthy. Unhealthy resource record sets are immediately + // removed from consideration. In addition, if all of the resource record sets + // that are referenced by an alias resource record set are unhealthy, that alias + // resource record set also is immediately removed from consideration. Based + // on the configuration of the alias resource record sets (weighted alias or + // latency alias, for example) and the configuration of the resource record + // sets that they reference, Amazon Route 53 chooses a resource record set from + // the healthy resource record sets, and responds to the query. Note the following: + // + // You cannot set EvaluateTargetHealth to true when the alias target is a CloudFront + // distribution. If the AWS resource that you specify in AliasTarget is a resource + // record set or a group of resource record sets (for example, a group of weighted + // resource record sets), but it is not another alias resource record set, we + // recommend that you associate a health check with all of the resource record + // sets in the alias target. For more information, see What Happens When You + // Omit Health Checks? (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-complex-configs.html#dns-failover-complex-configs-hc-omitting) + // in the Amazon Route 53 Developer Guide. If you specify an ELB load balancer + // in AliasTarget, Elastic Load Balancing routes queries only to the healthy + // Amazon EC2 instances that are registered with the load balancer. If no Amazon + // EC2 instances are healthy or if the load balancer itself is unhealthy, and + // if EvaluateTargetHealth is true for the corresponding alias resource record + // set, Amazon Route 53 routes queries to other resources. When you create a + // load balancer, you configure settings for Elastic Load Balancing health checks; + // they're not Amazon Route 53 health checks, but they perform a similar function. + // Do not create Amazon Route 53 health checks for the Amazon EC2 instances + // that you register with an ELB load balancer. For more information, see How + // Health Checks Work in More Complex Amazon Route 53 Configurations (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-complex-configs.html) + // in the Amazon Route 53 Developer Guide. We recommend that you set EvaluateTargetHealth + // to true only when you have enough idle capacity to handle the failure of + // one or more endpoints. + // + // For more information and examples, see Amazon Route 53 Health Checks and + // DNS Failover (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html) + // in the Amazon Route 53 Developer Guide. + EvaluateTargetHealth *bool `type:"boolean" required:"true"` + + // Alias resource record sets only: The value you use depends on where you want + // to route queries: + // + // A CloudFront distribution: Specify Z2FDTNDATAQYW2. An ELB load balancer: + // Specify the value of the hosted zone ID for the load balancer. You can get + // the hosted zone ID by using the AWS Management Console, the ELB API, or the + // AWS CLI. Use the same method to get values for HostedZoneId and DNSName. + // If you get one value from the console and the other value from the API or + // the CLI, creating the resource record set will fail. An Amazon S3 bucket + // that is configured as a static website: Specify the hosted zone ID for the + // Amazon S3 website endpoint in which you created the bucket. For more information + // about valid values, see the table Amazon Simple Storage Service (S3) Website + // Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. Another Amazon Route 53 resource + // record set in your hosted zone: Specify the hosted zone ID of your hosted + // zone. (An alias resource record set cannot reference a resource record set + // in a different hosted zone.) For more information and an example, see Example: + // Creating Alias Resource Record Sets (http://docs.aws.amazon.com/Route53/latest/APIReference/CreateAliasRRSAPI.html) + // in the Amazon Route 53 API Reference. + HostedZoneId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AliasTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AliasTarget) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to associate a +// VPC with an hosted zone. +type AssociateVPCWithHostedZoneInput struct { + _ struct{} `locationName:"AssociateVPCWithHostedZoneRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // Optional: Any comments you want to include about a AssociateVPCWithHostedZoneRequest. + Comment *string `type:"string"` + + // The ID of the hosted zone you want to associate your VPC with. + // + // Note that you cannot associate a VPC with a hosted zone that doesn't have + // an existing VPC association. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The VPC that you want your hosted zone to be associated with. + VPC *VPC `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AssociateVPCWithHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateVPCWithHostedZoneInput) GoString() string { + return s.String() +} + +// A complex type containing the response information for the request. +type AssociateVPCWithHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the ID, the status, and the date and time of + // your AssociateVPCWithHostedZoneRequest. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AssociateVPCWithHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateVPCWithHostedZoneOutput) GoString() string { + return s.String() +} + +// A complex type that contains the information for each change in a change +// batch request. +type Change struct { + _ struct{} `type:"structure"` + + // The action to perform: + // + // CREATE: Creates a resource record set that has the specified values. DELETE: + // Deletes a existing resource record set that has the specified values for + // Name, Type, SetIdentifier (for latency, weighted, geolocation, and failover + // resource record sets), and TTL (except alias resource record sets, for which + // the TTL is determined by the AWS resource that you're routing DNS queries + // to). UPSERT: If a resource record set does not already exist, Amazon Route + // 53 creates it. If a resource record set does exist, Amazon Route 53 updates + // it with the values in the request. Amazon Route 53 can update an existing + // resource record set only when all of the following values match: Name, Type, + // and SetIdentifier (for weighted, latency, geolocation, and failover resource + // record sets). + Action *string `type:"string" required:"true" enum:"ChangeAction"` + + // Information about the resource record set to create or delete. + ResourceRecordSet *ResourceRecordSet `type:"structure" required:"true"` +} + +// String returns the string representation +func (s Change) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Change) GoString() string { + return s.String() +} + +// A complex type that contains an optional comment and the changes that you +// want to make with a change batch request. +type ChangeBatch struct { + _ struct{} `type:"structure"` + + // A complex type that contains one Change element for each resource record + // set that you want to create or delete. + Changes []*Change `locationNameList:"Change" min:"1" type:"list" required:"true"` + + // Optional: Any comments you want to include about a change batch request. + Comment *string `type:"string"` +} + +// String returns the string representation +func (s ChangeBatch) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeBatch) GoString() string { + return s.String() +} + +// A complex type that lists the changes and information for a ChangeBatch. +type ChangeBatchRecord struct { + _ struct{} `type:"structure"` + + // A list of changes made in the ChangeBatch. + Changes []*Change `locationNameList:"Change" min:"1" type:"list"` + + // A complex type that describes change information about changes made to your + // hosted zone. + // + // This element contains an ID that you use when performing a GetChange action + // to get detailed information about the change. + Comment *string `type:"string"` + + // The ID of the request. Use this ID to track when the change has completed + // across all Amazon Route 53 DNS servers. + Id *string `type:"string" required:"true"` + + // The current state of the request. PENDING indicates that this request has + // not yet been applied to all Amazon Route 53 DNS servers. + // + // Valid Values: PENDING | INSYNC + Status *string `type:"string" required:"true" enum:"ChangeStatus"` + + // The date and time the change was submitted, in the format YYYY-MM-DDThh:mm:ssZ, + // as specified in the ISO 8601 standard (for example, 2009-11-19T19:37:58Z). + // The Z after the time indicates that the time is listed in Coordinated Universal + // Time (UTC). + SubmittedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The AWS account ID attached to the changes. + Submitter *string `type:"string"` +} + +// String returns the string representation +func (s ChangeBatchRecord) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeBatchRecord) GoString() string { + return s.String() +} + +// A complex type that describes change information about changes made to your +// hosted zone. +// +// This element contains an ID that you use when performing a GetChange action +// to get detailed information about the change. +type ChangeInfo struct { + _ struct{} `type:"structure"` + + // A complex type that describes change information about changes made to your + // hosted zone. + // + // This element contains an ID that you use when performing a GetChange action + // to get detailed information about the change. + Comment *string `type:"string"` + + // The ID of the request. Use this ID to track when the change has completed + // across all Amazon Route 53 DNS servers. + Id *string `type:"string" required:"true"` + + // The current state of the request. PENDING indicates that this request has + // not yet been applied to all Amazon Route 53 DNS servers. + // + // Valid Values: PENDING | INSYNC + Status *string `type:"string" required:"true" enum:"ChangeStatus"` + + // The date and time the change was submitted, in the format YYYY-MM-DDThh:mm:ssZ, + // as specified in the ISO 8601 standard (for example, 2009-11-19T19:37:58Z). + // The Z after the time indicates that the time is listed in Coordinated Universal + // Time (UTC). + SubmittedAt *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s ChangeInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeInfo) GoString() string { + return s.String() +} + +// A complex type that contains a change batch. +type ChangeResourceRecordSetsInput struct { + _ struct{} `locationName:"ChangeResourceRecordSetsRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A complex type that contains an optional comment and the Changes element. + ChangeBatch *ChangeBatch `type:"structure" required:"true"` + + // The ID of the hosted zone that contains the resource record sets that you + // want to change. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s ChangeResourceRecordSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeResourceRecordSetsInput) GoString() string { + return s.String() +} + +// A complex type containing the response for the request. +type ChangeResourceRecordSetsOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about changes made to your hosted + // zone. + // + // This element contains an ID that you use when performing a GetChange action + // to get detailed information about the change. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ChangeResourceRecordSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeResourceRecordSetsOutput) GoString() string { + return s.String() +} + +// A complex type containing information about a request to add, change, or +// delete the tags that are associated with a resource. +type ChangeTagsForResourceInput struct { + _ struct{} `locationName:"ChangeTagsForResourceRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A complex type that contains a list of Tag elements. Each Tag element identifies + // a tag that you want to add or update for the specified resource. + AddTags []*Tag `locationNameList:"Tag" min:"1" type:"list"` + + // A list of Tag keys that you want to remove from the specified resource. + RemoveTagKeys []*string `locationNameList:"Key" min:"1" type:"list"` + + // The ID of the resource for which you want to add, change, or delete tags. + ResourceId *string `location:"uri" locationName:"ResourceId" type:"string" required:"true"` + + // The type of the resource. + // + // - The resource type for health checks is healthcheck. + // + // - The resource type for hosted zones is hostedzone. + ResourceType *string `location:"uri" locationName:"ResourceType" type:"string" required:"true" enum:"TagResourceType"` +} + +// String returns the string representation +func (s ChangeTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeTagsForResourceInput) GoString() string { + return s.String() +} + +// Empty response for the request. +type ChangeTagsForResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ChangeTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeTagsForResourceOutput) GoString() string { + return s.String() +} + +// >A complex type that contains information about the request to create a health +// check. +type CreateHealthCheckInput struct { + _ struct{} `locationName:"CreateHealthCheckRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A unique string that identifies the request and that allows failed CreateHealthCheck + // requests to be retried without the risk of executing the operation twice. + // You must use a unique CallerReference string every time you create a health + // check. CallerReference can be any unique string; you might choose to use + // a string that identifies your project. + // + // Valid characters are any Unicode code points that are legal in an XML 1.0 + // document. The UTF-8 encoding of the value must be less than 128 bytes. + CallerReference *string `min:"1" type:"string" required:"true"` + + // A complex type that contains health check configuration. + HealthCheckConfig *HealthCheckConfig `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateHealthCheckInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHealthCheckInput) GoString() string { + return s.String() +} + +// A complex type containing the response information for the new health check. +type CreateHealthCheckOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains identifying information about the health check. + HealthCheck *HealthCheck `type:"structure" required:"true"` + + // The unique URL representing the new health check. + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateHealthCheckOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHealthCheckOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to create a hosted +// zone. +type CreateHostedZoneInput struct { + _ struct{} `locationName:"CreateHostedZoneRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A unique string that identifies the request and that allows failed CreateHostedZone + // requests to be retried without the risk of executing the operation twice. + // You must use a unique CallerReference string every time you create a hosted + // zone. CallerReference can be any unique string; you might choose to use a + // string that identifies your project, such as DNSMigration_01. + // + // Valid characters are any Unicode code points that are legal in an XML 1.0 + // document. The UTF-8 encoding of the value must be less than 128 bytes. + CallerReference *string `min:"1" type:"string" required:"true"` + + // The delegation set id of the reusable delgation set whose NS records you + // want to assign to the new hosted zone. + DelegationSetId *string `type:"string"` + + // A complex type that contains an optional comment about your hosted zone. + HostedZoneConfig *HostedZoneConfig `type:"structure"` + + // The name of the domain. This must be a fully-specified domain, for example, + // www.example.com. The trailing dot is optional; Amazon Route 53 assumes that + // the domain name is fully qualified. This means that Amazon Route 53 treats + // www.example.com (without a trailing dot) and www.example.com. (with a trailing + // dot) as identical. + // + // This is the name you have registered with your DNS registrar. You should + // ask your registrar to change the authoritative name servers for your domain + // to the set of NameServers elements returned in DelegationSet. + Name *string `type:"string" required:"true"` + + // The VPC that you want your hosted zone to be associated with. By providing + // this parameter, your newly created hosted cannot be resolved anywhere other + // than the given VPC. + VPC *VPC `type:"structure"` +} + +// String returns the string representation +func (s CreateHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHostedZoneInput) GoString() string { + return s.String() +} + +// A complex type containing the response information for the new hosted zone. +type CreateHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the request to create a hosted + // zone. This includes an ID that you use when you call the GetChange action + // to get the current status of the change request. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` + + // A complex type that contains name server information. + DelegationSet *DelegationSet `type:"structure" required:"true"` + + // A complex type that contains identifying information about the hosted zone. + HostedZone *HostedZone `type:"structure" required:"true"` + + // The unique URL representing the new hosted zone. + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` + + VPC *VPC `type:"structure"` +} + +// String returns the string representation +func (s CreateHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateHostedZoneOutput) GoString() string { + return s.String() +} + +type CreateReusableDelegationSetInput struct { + _ struct{} `locationName:"CreateReusableDelegationSetRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A unique string that identifies the request and that allows failed CreateReusableDelegationSet + // requests to be retried without the risk of executing the operation twice. + // You must use a unique CallerReference string every time you create a reusable + // delegation set. CallerReference can be any unique string; you might choose + // to use a string that identifies your project, such as DNSMigration_01. + // + // Valid characters are any Unicode code points that are legal in an XML 1.0 + // document. The UTF-8 encoding of the value must be less than 128 bytes. + CallerReference *string `min:"1" type:"string" required:"true"` + + // The ID of the hosted zone whose delegation set you want to mark as reusable. + // It is an optional parameter. + HostedZoneId *string `type:"string"` +} + +// String returns the string representation +func (s CreateReusableDelegationSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReusableDelegationSetInput) GoString() string { + return s.String() +} + +type CreateReusableDelegationSetOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains name server information. + DelegationSet *DelegationSet `type:"structure" required:"true"` + + // The unique URL representing the new reusbale delegation set. + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateReusableDelegationSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReusableDelegationSetOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the traffic policy that you +// want to create. +type CreateTrafficPolicyInput struct { + _ struct{} `locationName:"CreateTrafficPolicyRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // Any comments that you want to include about the traffic policy. + Comment *string `type:"string"` + + // The definition of this traffic policy in JSON format. + Document *string `type:"string" required:"true"` + + // The name of the traffic policy. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the resource record sets that +// you want to create based on a specified traffic policy. +type CreateTrafficPolicyInstanceInput struct { + _ struct{} `locationName:"CreateTrafficPolicyInstanceRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // The ID of the hosted zone in which you want Amazon Route 53 to create resource + // record sets by using the configuration in a traffic policy. + HostedZoneId *string `type:"string" required:"true"` + + // The domain name (such as example.com) or subdomain name (such as www.example.com) + // for which Amazon Route 53 responds to DNS queries by using the resource record + // sets that Amazon Route 53 creates for this traffic policy instance. + Name *string `type:"string" required:"true"` + + // The TTL that you want Amazon Route 53 to assign to all of the resource record + // sets that it creates in the specified hosted zone. + TTL *int64 `type:"long" required:"true"` + + // The ID of the traffic policy that you want to use to create resource record + // sets in the specified hosted zone. + TrafficPolicyId *string `type:"string" required:"true"` + + // The version of the traffic policy that you want to use to create resource + // record sets in the specified hosted zone. + TrafficPolicyVersion *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyInstanceInput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the CreateTrafficPolicyInstance +// request. +type CreateTrafficPolicyInstanceOutput struct { + _ struct{} `type:"structure"` + + // A unique URL that represents a new traffic policy instance. + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` + + // A complex type that contains settings for the new traffic policy instance. + TrafficPolicyInstance *TrafficPolicyInstance `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyInstanceOutput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the CreateTrafficPolicy +// request. +type CreateTrafficPolicyOutput struct { + _ struct{} `type:"structure"` + + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` + + // A complex type that contains settings for the new traffic policy. + TrafficPolicy *TrafficPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the traffic policy for which +// you want to create a new version. +type CreateTrafficPolicyVersionInput struct { + _ struct{} `locationName:"CreateTrafficPolicyVersionRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // Any comments that you want to include about the new traffic policy version. + Comment *string `type:"string"` + + // The definition of a new traffic policy version, in JSON format. You must + // specify the full definition of the new traffic policy. You cannot specify + // just the differences between the new version and a previous version. + Document *string `type:"string" required:"true"` + + // The ID of the traffic policy for which you want to create a new version. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyVersionInput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the CreateTrafficPolicyVersion +// request. +type CreateTrafficPolicyVersionOutput struct { + _ struct{} `type:"structure"` + + Location *string `location:"header" locationName:"Location" type:"string" required:"true"` + + // A complex type that contains settings for the new version of the traffic + // policy. + TrafficPolicy *TrafficPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficPolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficPolicyVersionOutput) GoString() string { + return s.String() +} + +// A complex type that contains name server information. +type DelegationSet struct { + _ struct{} `type:"structure"` + + CallerReference *string `min:"1" type:"string"` + + Id *string `type:"string"` + + // A complex type that contains the authoritative name servers for the hosted + // zone. Use the method provided by your domain registrar to add an NS record + // to your domain for each NameServer that is assigned to your hosted zone. + NameServers []*string `locationNameList:"NameServer" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s DelegationSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DelegationSet) GoString() string { + return s.String() +} + +// A complex type containing the request information for delete health check. +type DeleteHealthCheckInput struct { + _ struct{} `type:"structure"` + + // The ID of the health check to delete. + HealthCheckId *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteHealthCheckInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHealthCheckInput) GoString() string { + return s.String() +} + +// Empty response for the request. +type DeleteHealthCheckOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteHealthCheckOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHealthCheckOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the hosted zone that you want +// to delete. +type DeleteHostedZoneInput struct { + _ struct{} `type:"structure"` + + // The ID of the hosted zone you want to delete. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHostedZoneInput) GoString() string { + return s.String() +} + +// A complex type containing the response information for the request. +type DeleteHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the ID, the status, and the date and time of + // your delete request. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DeleteHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHostedZoneOutput) GoString() string { + return s.String() +} + +// A complex type containing the information for the delete request. +type DeleteReusableDelegationSetInput struct { + _ struct{} `type:"structure"` + + // The ID of the reusable delegation set you want to delete. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteReusableDelegationSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReusableDelegationSetInput) GoString() string { + return s.String() +} + +// Empty response for the request. +type DeleteReusableDelegationSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteReusableDelegationSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReusableDelegationSetOutput) GoString() string { + return s.String() +} + +// A request to delete a specified traffic policy version. +type DeleteTrafficPolicyInput struct { + _ struct{} `type:"structure"` + + // The ID of the traffic policy that you want to delete. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The version number of the traffic policy that you want to delete. + Version *int64 `location:"uri" locationName:"Version" min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s DeleteTrafficPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficPolicyInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the traffic policy instance +// that you want to delete. +type DeleteTrafficPolicyInstanceInput struct { + _ struct{} `type:"structure"` + + // The ID of the traffic policy instance that you want to delete. + // + // When you delete a traffic policy instance, Amazon Route 53 also deletes + // all of the resource record sets that were created when you created the traffic + // policy instance. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTrafficPolicyInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficPolicyInstanceInput) GoString() string { + return s.String() +} + +// An empty element. +type DeleteTrafficPolicyInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTrafficPolicyInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficPolicyInstanceOutput) GoString() string { + return s.String() +} + +// An empty element. +type DeleteTrafficPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTrafficPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficPolicyOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to disassociate +// a VPC from an hosted zone. +type DisassociateVPCFromHostedZoneInput struct { + _ struct{} `locationName:"DisassociateVPCFromHostedZoneRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // Optional: Any comments you want to include about a DisassociateVPCFromHostedZoneRequest. + Comment *string `type:"string"` + + // The ID of the hosted zone you want to disassociate your VPC from. + // + // Note that you cannot disassociate the last VPC from a hosted zone. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The VPC that you want your hosted zone to be disassociated from. + VPC *VPC `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DisassociateVPCFromHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateVPCFromHostedZoneInput) GoString() string { + return s.String() +} + +// A complex type containing the response information for the request. +type DisassociateVPCFromHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the ID, the status, and the date and time of + // your DisassociateVPCFromHostedZoneRequest. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DisassociateVPCFromHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateVPCFromHostedZoneOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about a geo location. +type GeoLocation struct { + _ struct{} `type:"structure"` + + // The code for a continent geo location. Note: only continent locations have + // a continent code. + // + // Valid values: AF | AN | AS | EU | OC | NA | SA + // + // Constraint: Specifying ContinentCode with either CountryCode or SubdivisionCode + // returns an InvalidInput error. + ContinentCode *string `min:"2" type:"string"` + + // The code for a country geo location. The default location uses '*' for the + // country code and will match all locations that are not matched by a geo location. + // + // The default geo location uses a * for the country code. All other country + // codes follow the ISO 3166 two-character code. + CountryCode *string `min:"1" type:"string"` + + // The code for a country's subdivision (e.g., a province of Canada). A subdivision + // code is only valid with the appropriate country code. + // + // Constraint: Specifying SubdivisionCode without CountryCode returns an InvalidInput + // error. + SubdivisionCode *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GeoLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GeoLocation) GoString() string { + return s.String() +} + +// A complex type that contains information about a GeoLocation. +type GeoLocationDetails struct { + _ struct{} `type:"structure"` + + // The code for a continent geo location. Note: only continent locations have + // a continent code. + ContinentCode *string `min:"2" type:"string"` + + // The name of the continent. This element is only present if ContinentCode + // is also present. + ContinentName *string `min:"1" type:"string"` + + // The code for a country geo location. The default location uses '*' for the + // country code and will match all locations that are not matched by a geo location. + // + // The default geo location uses a * for the country code. All other country + // codes follow the ISO 3166 two-character code. + CountryCode *string `min:"1" type:"string"` + + // The name of the country. This element is only present if CountryCode is also + // present. + CountryName *string `min:"1" type:"string"` + + // The code for a country's subdivision (e.g., a province of Canada). A subdivision + // code is only valid with the appropriate country code. + SubdivisionCode *string `min:"1" type:"string"` + + // The name of the subdivision. This element is only present if SubdivisionCode + // is also present. + SubdivisionName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GeoLocationDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GeoLocationDetails) GoString() string { + return s.String() +} + +// The input for a GetChangeDetails request. +type GetChangeDetailsInput struct { + _ struct{} `type:"structure"` + + // The ID of the change batch request. The value that you specify here is the + // value that ChangeResourceRecordSets returned in the Id element when you submitted + // the request. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetChangeDetailsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeDetailsInput) GoString() string { + return s.String() +} + +// A complex type that contains the ChangeBatchRecord element. +type GetChangeDetailsOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the specified change batch, + // including the change batch ID, the status of the change, and the contained + // changes. + ChangeBatchRecord *ChangeBatchRecord `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetChangeDetailsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeDetailsOutput) GoString() string { + return s.String() +} + +// The input for a GetChange request. +type GetChangeInput struct { + _ struct{} `type:"structure"` + + // The ID of the change batch request. The value that you specify here is the + // value that ChangeResourceRecordSets returned in the Id element when you submitted + // the request. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetChangeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeInput) GoString() string { + return s.String() +} + +// A complex type that contains the ChangeInfo element. +type GetChangeOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the specified change batch, + // including the change batch ID, the status of the change, and the date and + // time of the request. + ChangeInfo *ChangeInfo `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetChangeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeOutput) GoString() string { + return s.String() +} + +// Empty request. +type GetCheckerIpRangesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetCheckerIpRangesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCheckerIpRangesInput) GoString() string { + return s.String() +} + +// A complex type that contains the CheckerIpRanges element. +type GetCheckerIpRangesOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains sorted list of IP ranges in CIDR format for + // Amazon Route 53 health checkers. + CheckerIpRanges []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetCheckerIpRangesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCheckerIpRangesOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to get a geo location. +type GetGeoLocationInput struct { + _ struct{} `type:"structure"` + + // The code for a continent geo location. Note: only continent locations have + // a continent code. + // + // Valid values: AF | AN | AS | EU | OC | NA | SA + // + // Constraint: Specifying ContinentCode with either CountryCode or SubdivisionCode + // returns an InvalidInput error. + ContinentCode *string `location:"querystring" locationName:"continentcode" min:"2" type:"string"` + + // The code for a country geo location. The default location uses '*' for the + // country code and will match all locations that are not matched by a geo location. + // + // The default geo location uses a * for the country code. All other country + // codes follow the ISO 3166 two-character code. + CountryCode *string `location:"querystring" locationName:"countrycode" min:"1" type:"string"` + + // The code for a country's subdivision (e.g., a province of Canada). A subdivision + // code is only valid with the appropriate country code. + // + // Constraint: Specifying SubdivisionCode without CountryCode returns an InvalidInput + // error. + SubdivisionCode *string `location:"querystring" locationName:"subdivisioncode" min:"1" type:"string"` +} + +// String returns the string representation +func (s GetGeoLocationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGeoLocationInput) GoString() string { + return s.String() +} + +// A complex type containing information about the specified geo location. +type GetGeoLocationOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the information about the specified geo location. + GeoLocationDetails *GeoLocationDetails `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetGeoLocationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGeoLocationOutput) GoString() string { + return s.String() +} + +// To retrieve a count of all your health checks, send a GET request to the +// 2013-04-01/healthcheckcount resource. +type GetHealthCheckCountInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetHealthCheckCountInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckCountInput) GoString() string { + return s.String() +} + +// A complex type that contains the count of health checks associated with the +// current AWS account. +type GetHealthCheckCountOutput struct { + _ struct{} `type:"structure"` + + // The number of health checks associated with the current AWS account. + HealthCheckCount *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckCountOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckCountOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to get a health +// check. +type GetHealthCheckInput struct { + _ struct{} `type:"structure"` + + // The ID of the health check to retrieve. + HealthCheckId *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to get the most +// recent failure reason for a health check. +type GetHealthCheckLastFailureReasonInput struct { + _ struct{} `type:"structure"` + + // The ID of the health check for which you want to retrieve the reason for + // the most recent failure. + HealthCheckId *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckLastFailureReasonInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckLastFailureReasonInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the most recent failure for +// the specified health check. +type GetHealthCheckLastFailureReasonOutput struct { + _ struct{} `type:"structure"` + + // A list that contains one HealthCheckObservation element for each Amazon Route + // 53 health checker. + HealthCheckObservations []*HealthCheckObservation `locationNameList:"HealthCheckObservation" type:"list" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckLastFailureReasonOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckLastFailureReasonOutput) GoString() string { + return s.String() +} + +// A complex type containing information about the specified health check. +type GetHealthCheckOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the information about the specified health check. + HealthCheck *HealthCheck `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to get health +// check status for a health check. +type GetHealthCheckStatusInput struct { + _ struct{} `type:"structure"` + + // If you want Amazon Route 53 to return this resource record set in response + // to a DNS query only when a health check is passing, include the HealthCheckId + // element and specify the ID of the applicable health check. + // + // Amazon Route 53 determines whether a resource record set is healthy by periodically + // sending a request to the endpoint that is specified in the health check. + // If that endpoint returns an HTTP status code of 2xx or 3xx, the endpoint + // is healthy. If the endpoint returns an HTTP status code of 400 or greater, + // or if the endpoint doesn't respond for a certain amount of time, Amazon Route + // 53 considers the endpoint unhealthy and also considers the resource record + // set unhealthy. + // + // The HealthCheckId element is only useful when Amazon Route 53 is choosing + // between two or more resource record sets to respond to a DNS query, and you + // want Amazon Route 53 to base the choice in part on the status of a health + // check. Configuring health checks only makes sense in the following configurations: + // + // You're checking the health of the resource record sets in a weighted, latency, + // geolocation, or failover resource record set, and you specify health check + // IDs for all of the resource record sets. If the health check for one resource + // record set specifies an endpoint that is not healthy, Amazon Route 53 stops + // responding to queries using the value for that resource record set. You set + // EvaluateTargetHealth to true for the resource record sets in an alias, weighted + // alias, latency alias, geolocation alias, or failover alias resource record + // set, and you specify health check IDs for all of the resource record sets + // that are referenced by the alias resource record sets. For more information + // about this configuration, see EvaluateTargetHealth. + // + // Amazon Route 53 doesn't check the health of the endpoint specified in the + // resource record set, for example, the endpoint specified by the IP address + // in the Value element. When you add a HealthCheckId element to a resource + // record set, Amazon Route 53 checks the health of the endpoint that you specified + // in the health check. + // + // For geolocation resource record sets, if an endpoint is unhealthy, Amazon + // Route 53 looks for a resource record set for the larger, associated geographic + // region. For example, suppose you have resource record sets for a state in + // the United States, for the United States, for North America, and for all + // locations. If the endpoint for the state resource record set is unhealthy, + // Amazon Route 53 checks the resource record sets for the United States, for + // North America, and for all locations (a resource record set for which the + // value of CountryCode is *), in that order, until it finds a resource record + // set for which the endpoint is healthy. + // + // If your health checks specify the endpoint only by domain name, we recommend + // that you create a separate health check for each endpoint. For example, create + // a health check for each HTTP server that is serving content for www.example.com. + // For the value of FullyQualifiedDomainName, specify the domain name of the + // server (such as us-east-1-www.example.com), not the name of the resource + // record sets (example.com). + // + // In this configuration, if you create a health check for which the value + // of FullyQualifiedDomainName matches the name of the resource record sets + // and then associate the health check with those resource record sets, health + // check results will be unpredictable. + HealthCheckId *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckStatusInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the status of the specified +// health check. +type GetHealthCheckStatusOutput struct { + _ struct{} `type:"structure"` + + // A list that contains one HealthCheckObservation element for each Amazon Route + // 53 health checker. + HealthCheckObservations []*HealthCheckObservation `locationNameList:"HealthCheckObservation" type:"list" required:"true"` +} + +// String returns the string representation +func (s GetHealthCheckStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHealthCheckStatusOutput) GoString() string { + return s.String() +} + +// To retrieve a count of all your hosted zones, send a GET request to the 2013-04-01/hostedzonecount +// resource. +type GetHostedZoneCountInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetHostedZoneCountInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHostedZoneCountInput) GoString() string { + return s.String() +} + +// A complex type that contains the count of hosted zones associated with the +// current AWS account. +type GetHostedZoneCountOutput struct { + _ struct{} `type:"structure"` + + // The number of hosted zones associated with the current AWS account. + HostedZoneCount *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s GetHostedZoneCountOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHostedZoneCountOutput) GoString() string { + return s.String() +} + +// The input for a GetHostedZone request. +type GetHostedZoneInput struct { + _ struct{} `type:"structure"` + + // The ID of the hosted zone for which you want to get a list of the name servers + // in the delegation set. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHostedZoneInput) GoString() string { + return s.String() +} + +// A complex type containing information about the specified hosted zone. +type GetHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the name servers for the specified + // hosted zone. + DelegationSet *DelegationSet `type:"structure"` + + // A complex type that contains the information about the specified hosted zone. + HostedZone *HostedZone `type:"structure" required:"true"` + + // A complex type that contains information about VPCs associated with the specified + // hosted zone. + VPCs []*VPC `locationNameList:"VPC" min:"1" type:"list"` +} + +// String returns the string representation +func (s GetHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHostedZoneOutput) GoString() string { + return s.String() +} + +// The input for a GetReusableDelegationSet request. +type GetReusableDelegationSetInput struct { + _ struct{} `type:"structure"` + + // The ID of the reusable delegation set for which you want to get a list of + // the name server. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetReusableDelegationSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetReusableDelegationSetInput) GoString() string { + return s.String() +} + +// A complex type containing information about the specified reusable delegation +// set. +type GetReusableDelegationSetOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains the information about the nameservers for the + // specified delegation set ID. + DelegationSet *DelegationSet `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetReusableDelegationSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetReusableDelegationSetOutput) GoString() string { + return s.String() +} + +// Gets information about a specific traffic policy version. To get the information, +// send a GET request to the 2013-04-01/trafficpolicy resource, and specify +// the ID and the version of the traffic policy. +type GetTrafficPolicyInput struct { + _ struct{} `type:"structure"` + + // The ID of the traffic policy that you want to get information about. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The version number of the traffic policy that you want to get information + // about. + Version *int64 `location:"uri" locationName:"Version" min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s GetTrafficPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyInput) GoString() string { + return s.String() +} + +// To retrieve a count of all your traffic policy instances, send a GET request +// to the 2013-04-01/trafficpolicyinstancecount resource. +type GetTrafficPolicyInstanceCountInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetTrafficPolicyInstanceCountInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyInstanceCountInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the number of traffic policy +// instances that are associated with the current AWS account. +type GetTrafficPolicyInstanceCountOutput struct { + _ struct{} `type:"structure"` + + // The number of traffic policy instances that are associated with the current + // AWS account. + TrafficPolicyInstanceCount *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s GetTrafficPolicyInstanceCountOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyInstanceCountOutput) GoString() string { + return s.String() +} + +// Gets information about a specified traffic policy instance. +// +// To get information about a traffic policy instance, send a GET request to +// the 2013-04-01/trafficpolicyinstance/Id resource. +type GetTrafficPolicyInstanceInput struct { + _ struct{} `type:"structure"` + + // The ID of the traffic policy instance that you want to get information about. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTrafficPolicyInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyInstanceInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the resource record sets that +// Amazon Route 53 created based on a specified traffic policy. +type GetTrafficPolicyInstanceOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains settings for the traffic policy instance. + TrafficPolicyInstance *TrafficPolicyInstance `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetTrafficPolicyInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyInstanceOutput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the request. +type GetTrafficPolicyOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains settings for the specified traffic policy. + TrafficPolicy *TrafficPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetTrafficPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTrafficPolicyOutput) GoString() string { + return s.String() +} + +// A complex type that contains identifying information about the health check. +type HealthCheck struct { + _ struct{} `type:"structure"` + + // A unique string that identifies the request to create the health check. + CallerReference *string `min:"1" type:"string" required:"true"` + + // A complex type that contains the health check configuration. + HealthCheckConfig *HealthCheckConfig `type:"structure" required:"true"` + + // The version of the health check. You can optionally pass this value in a + // call to UpdateHealthCheck to prevent overwriting another change to the health + // check. + HealthCheckVersion *int64 `min:"1" type:"long" required:"true"` + + // The ID of the specified health check. + Id *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s HealthCheck) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HealthCheck) GoString() string { + return s.String() +} + +// A complex type that contains the health check configuration. +type HealthCheckConfig struct { + _ struct{} `type:"structure"` + + // For a specified parent health check, a list of HealthCheckId values for the + // associated child health checks. + ChildHealthChecks []*string `locationNameList:"ChildHealthCheck" type:"list"` + + // The number of consecutive health checks that an endpoint must pass or fail + // for Amazon Route 53 to change the current status of the endpoint from unhealthy + // to healthy or vice versa. + // + // Valid values are integers between 1 and 10. For more information, see "How + // Amazon Route 53 Determines Whether an Endpoint Is Healthy" in the Amazon + // Route 53 Developer Guide. + FailureThreshold *int64 `min:"1" type:"integer"` + + // Fully qualified domain name of the instance to be health checked. + FullyQualifiedDomainName *string `type:"string"` + + // The minimum number of child health checks that must be healthy for Amazon + // Route 53 to consider the parent health check to be healthy. Valid values + // are integers between 0 and 256, inclusive. + HealthThreshold *int64 `type:"integer"` + + // IP Address of the instance being checked. + IPAddress *string `type:"string"` + + // A boolean value that indicates whether the status of health check should + // be inverted. For example, if a health check is healthy but Inverted is True, + // then Amazon Route 53 considers the health check to be unhealthy. + Inverted *bool `type:"boolean"` + + // A Boolean value that indicates whether you want Amazon Route 53 to measure + // the latency between health checkers in multiple AWS regions and your endpoint + // and to display CloudWatch latency graphs in the Amazon Route 53 console. + MeasureLatency *bool `type:"boolean"` + + // Port on which connection will be opened to the instance to health check. + // For HTTP and HTTP_STR_MATCH this defaults to 80 if the port is not specified. + // For HTTPS and HTTPS_STR_MATCH this defaults to 443 if the port is not specified. + Port *int64 `min:"1" type:"integer"` + + // The number of seconds between the time that Amazon Route 53 gets a response + // from your endpoint and the time that it sends the next health-check request. + // + // Each Amazon Route 53 health checker makes requests at this interval. Valid + // values are 10 and 30. The default value is 30. + RequestInterval *int64 `min:"10" type:"integer"` + + // Path to ping on the instance to check the health. Required for HTTP, HTTPS, + // HTTP_STR_MATCH, and HTTPS_STR_MATCH health checks. The HTTP request is issued + // to the instance on the given port and path. + ResourcePath *string `type:"string"` + + // A string to search for in the body of a health check response. Required for + // HTTP_STR_MATCH and HTTPS_STR_MATCH health checks. + SearchString *string `type:"string"` + + // The type of health check to be performed. Currently supported types are TCP, + // HTTP, HTTPS, HTTP_STR_MATCH, and HTTPS_STR_MATCH. + Type *string `type:"string" required:"true" enum:"HealthCheckType"` +} + +// String returns the string representation +func (s HealthCheckConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HealthCheckConfig) GoString() string { + return s.String() +} + +// A complex type that contains the IP address of a Amazon Route 53 health checker +// and the reason for the health check status. +type HealthCheckObservation struct { + _ struct{} `type:"structure"` + + // The IP address of the Amazon Route 53 health checker that performed the health + // check. + IPAddress *string `type:"string"` + + // A complex type that contains information about the health check status for + // the current observation. + StatusReport *StatusReport `type:"structure"` +} + +// String returns the string representation +func (s HealthCheckObservation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HealthCheckObservation) GoString() string { + return s.String() +} + +// A complex type that contain information about the specified hosted zone. +type HostedZone struct { + _ struct{} `type:"structure"` + + // A unique string that identifies the request to create the hosted zone. + CallerReference *string `min:"1" type:"string" required:"true"` + + // A complex type that contains the Comment element. + Config *HostedZoneConfig `type:"structure"` + + // The ID of the specified hosted zone. + Id *string `type:"string" required:"true"` + + // The name of the domain. This must be a fully-specified domain, for example, + // www.example.com. The trailing dot is optional; Amazon Route 53 assumes that + // the domain name is fully qualified. This means that Amazon Route 53 treats + // www.example.com (without a trailing dot) and www.example.com. (with a trailing + // dot) as identical. + // + // This is the name you have registered with your DNS registrar. You should + // ask your registrar to change the authoritative name servers for your domain + // to the set of NameServers elements returned in DelegationSet. + Name *string `type:"string" required:"true"` + + // Total number of resource record sets in the hosted zone. + ResourceRecordSetCount *int64 `type:"long"` +} + +// String returns the string representation +func (s HostedZone) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostedZone) GoString() string { + return s.String() +} + +// A complex type that contains an optional comment about your hosted zone. +// If you don't want to specify a comment, you can omit the HostedZoneConfig +// and Comment elements from the XML document. +type HostedZoneConfig struct { + _ struct{} `type:"structure"` + + // An optional comment about your hosted zone. If you don't want to specify + // a comment, you can omit the HostedZoneConfig and Comment elements from the + // XML document. + Comment *string `type:"string"` + + PrivateZone *bool `type:"boolean"` +} + +// String returns the string representation +func (s HostedZoneConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostedZoneConfig) GoString() string { + return s.String() +} + +// The input for a ListChangeBatchesByHostedZone request. +type ListChangeBatchesByHostedZoneInput struct { + _ struct{} `type:"structure"` + + // The end of the time period you want to see changes for. + EndDate *string `location:"querystring" locationName:"endDate" type:"string" required:"true"` + + // The ID of the hosted zone that you want to see changes for. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The page marker. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // The maximum number of items on a page. + MaxItems *string `location:"querystring" locationName:"maxItems" type:"string"` + + // The start of the time period you want to see changes for. + StartDate *string `location:"querystring" locationName:"startDate" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListChangeBatchesByHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListChangeBatchesByHostedZoneInput) GoString() string { + return s.String() +} + +// The input for a ListChangeBatchesByHostedZone request. +type ListChangeBatchesByHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // The change batches within the given hosted zone and time period. + ChangeBatchRecords []*ChangeBatchRecord `locationNameList:"ChangeBatchRecord" min:"1" type:"list" required:"true"` + + // A flag that indicates if there are more change batches to list. + IsTruncated *bool `type:"boolean"` + + // The page marker. + Marker *string `type:"string" required:"true"` + + // The maximum number of items on a page. + MaxItems *string `type:"string" required:"true"` + + // The next page marker. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListChangeBatchesByHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListChangeBatchesByHostedZoneOutput) GoString() string { + return s.String() +} + +// The input for a ListChangeBatchesByRRSet request. +type ListChangeBatchesByRRSetInput struct { + _ struct{} `type:"structure"` + + // The end of the time period you want to see changes for. + EndDate *string `location:"querystring" locationName:"endDate" type:"string" required:"true"` + + // The ID of the hosted zone that you want to see changes for. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The page marker. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // The maximum number of items on a page. + MaxItems *string `location:"querystring" locationName:"maxItems" type:"string"` + + // The name of the RRSet that you want to see changes for. + Name *string `location:"querystring" locationName:"rrSet_name" type:"string" required:"true"` + + // The identifier of the RRSet that you want to see changes for. + SetIdentifier *string `location:"querystring" locationName:"identifier" min:"1" type:"string"` + + // The start of the time period you want to see changes for. + StartDate *string `location:"querystring" locationName:"startDate" type:"string" required:"true"` + + // The type of the RRSet that you want to see changes for. + Type *string `location:"querystring" locationName:"type" type:"string" required:"true" enum:"RRType"` +} + +// String returns the string representation +func (s ListChangeBatchesByRRSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListChangeBatchesByRRSetInput) GoString() string { + return s.String() +} + +// The input for a ListChangeBatchesByRRSet request. +type ListChangeBatchesByRRSetOutput struct { + _ struct{} `type:"structure"` + + // The change batches within the given hosted zone and time period. + ChangeBatchRecords []*ChangeBatchRecord `locationNameList:"ChangeBatchRecord" min:"1" type:"list" required:"true"` + + // A flag that indicates if there are more change batches to list. + IsTruncated *bool `type:"boolean"` + + // The page marker. + Marker *string `type:"string" required:"true"` + + // The maximum number of items on a page. + MaxItems *string `type:"string" required:"true"` + + // The next page marker. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListChangeBatchesByRRSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListChangeBatchesByRRSetOutput) GoString() string { + return s.String() +} + +// The input for a ListGeoLocations request. +type ListGeoLocationsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of geo locations you want in the response body. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // The first continent code in the lexicographic ordering of geo locations that + // you want the ListGeoLocations request to list. For non-continent geo locations, + // this should be null. + // + // Valid values: AF | AN | AS | EU | OC | NA | SA + // + // Constraint: Specifying ContinentCode with either CountryCode or SubdivisionCode + // returns an InvalidInput error. + StartContinentCode *string `location:"querystring" locationName:"startcontinentcode" min:"2" type:"string"` + + // The first country code in the lexicographic ordering of geo locations that + // you want the ListGeoLocations request to list. + // + // The default geo location uses a * for the country code. All other country + // codes follow the ISO 3166 two-character code. + StartCountryCode *string `location:"querystring" locationName:"startcountrycode" min:"1" type:"string"` + + // The first subdivision code in the lexicographic ordering of geo locations + // that you want the ListGeoLocations request to list. + // + // Constraint: Specifying SubdivisionCode without CountryCode returns an InvalidInput + // error. + StartSubdivisionCode *string `location:"querystring" locationName:"startsubdivisioncode" min:"1" type:"string"` +} + +// String returns the string representation +func (s ListGeoLocationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGeoLocationsInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the geo locations that are +// returned by the request and information about the response. +type ListGeoLocationsOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the geo locations that are + // returned by the request. + GeoLocationDetailsList []*GeoLocationDetails `locationNameList:"GeoLocationDetails" type:"list" required:"true"` + + // A flag that indicates whether there are more geo locations to be listed. + // If your results were truncated, you can make a follow-up request for the + // next page of results by using the values included in the ListGeoLocationsResponse$NextContinentCode, + // ListGeoLocationsResponse$NextCountryCode and ListGeoLocationsResponse$NextSubdivisionCode + // elements. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The maximum number of records you requested. The maximum value of MaxItems + // is 100. + MaxItems *string `type:"string" required:"true"` + + // If the results were truncated, the continent code of the next geo location + // in the list. This element is present only if ListGeoLocationsResponse$IsTruncated + // is true and the next geo location to list is a continent location. + NextContinentCode *string `min:"2" type:"string"` + + // If the results were truncated, the country code of the next geo location + // in the list. This element is present only if ListGeoLocationsResponse$IsTruncated + // is true and the next geo location to list is not a continent location. + NextCountryCode *string `min:"1" type:"string"` + + // If the results were truncated, the subdivision code of the next geo location + // in the list. This element is present only if ListGeoLocationsResponse$IsTruncated + // is true and the next geo location has a subdivision. + NextSubdivisionCode *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListGeoLocationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGeoLocationsOutput) GoString() string { + return s.String() +} + +// To retrieve a list of your health checks, send a GET request to the 2013-04-01/healthcheck +// resource. The response to this request includes a HealthChecks element with +// zero or more HealthCheck child elements. By default, the list of health checks +// is displayed on a single page. You can control the length of the page that +// is displayed by using the MaxItems parameter. You can use the Marker parameter +// to control the health check that the list begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +type ListHealthChecksInput struct { + _ struct{} `type:"structure"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Specify the maximum number of health checks to return per page of results. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` +} + +// String returns the string representation +func (s ListHealthChecksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHealthChecksInput) GoString() string { + return s.String() +} + +// A complex type that contains the response for the request. +type ListHealthChecksOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the health checks associated + // with the current AWS account. + HealthChecks []*HealthCheck `locationNameList:"HealthCheck" type:"list" required:"true"` + + // A flag indicating whether there are more health checks to be listed. If your + // results were truncated, you can make a follow-up request for the next page + // of results by using the Marker element. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `type:"string" required:"true"` + + // The maximum number of health checks to be included in the response body. + // If the number of health checks associated with this AWS account exceeds MaxItems, + // the value of ListHealthChecksResponse$IsTruncated in the response is true. + // Call ListHealthChecks again and specify the value of ListHealthChecksResponse$NextMarker + // in the ListHostedZonesRequest$Marker element to get the next page of results. + MaxItems *string `type:"string" required:"true"` + + // Indicates where to continue listing health checks. If ListHealthChecksResponse$IsTruncated + // is true, make another request to ListHealthChecks and include the value of + // the NextMarker element in the Marker element to get the next page of results. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListHealthChecksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHealthChecksOutput) GoString() string { + return s.String() +} + +// To retrieve a list of your hosted zones in lexicographic order, send a GET +// request to the 2013-04-01/hostedzonesbyname resource. The response to this +// request includes a HostedZones element with zero or more HostedZone child +// elements lexicographically ordered by DNS name. By default, the list of hosted +// zones is displayed on a single page. You can control the length of the page +// that is displayed by using the MaxItems parameter. You can use the DNSName +// and HostedZoneId parameters to control the hosted zone that the list begins +// with. +// +// For more information about listing hosted zones, see Listing the Hosted +// Zones for an AWS Account (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ListInfoOnHostedZone.html) +// in the Amazon Route 53 Developer Guide. +type ListHostedZonesByNameInput struct { + _ struct{} `type:"structure"` + + // The first name in the lexicographic ordering of domain names that you want + // the ListHostedZonesByNameRequest request to list. + // + // If the request returned more than one page of results, submit another request + // and specify the value of NextDNSName and NextHostedZoneId from the last response + // in the DNSName and HostedZoneId parameters to get the next page of results. + DNSName *string `location:"querystring" locationName:"dnsname" type:"string"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextDNSName and NextHostedZoneId from the last response + // in the DNSName and HostedZoneId parameters to get the next page of results. + HostedZoneId *string `location:"querystring" locationName:"hostedzoneid" type:"string"` + + // Specify the maximum number of hosted zones to return per page of results. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` +} + +// String returns the string representation +func (s ListHostedZonesByNameInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHostedZonesByNameInput) GoString() string { + return s.String() +} + +// A complex type that contains the response for the request. +type ListHostedZonesByNameOutput struct { + _ struct{} `type:"structure"` + + // The DNSName value sent in the request. + DNSName *string `type:"string"` + + // The HostedZoneId value sent in the request. + HostedZoneId *string `type:"string"` + + // A complex type that contains information about the hosted zones associated + // with the current AWS account. + HostedZones []*HostedZone `locationNameList:"HostedZone" type:"list" required:"true"` + + // A flag indicating whether there are more hosted zones to be listed. If your + // results were truncated, you can make a follow-up request for the next page + // of results by using the NextDNSName and NextHostedZoneId elements. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The maximum number of hosted zones to be included in the response body. If + // the number of hosted zones associated with this AWS account exceeds MaxItems, + // the value of ListHostedZonesByNameResponse$IsTruncated in the response is + // true. Call ListHostedZonesByName again and specify the value of ListHostedZonesByNameResponse$NextDNSName + // and ListHostedZonesByNameResponse$NextHostedZoneId elements respectively + // to get the next page of results. + MaxItems *string `type:"string" required:"true"` + + // If ListHostedZonesByNameResponse$IsTruncated is true, there are more hosted + // zones associated with the current AWS account. To get the next page of results, + // make another request to ListHostedZonesByName. Specify the value of ListHostedZonesByNameResponse$NextDNSName + // in the ListHostedZonesByNameRequest$DNSName element and ListHostedZonesByNameResponse$NextHostedZoneId + // in the ListHostedZonesByNameRequest$HostedZoneId element. + NextDNSName *string `type:"string"` + + // If ListHostedZonesByNameResponse$IsTruncated is true, there are more hosted + // zones associated with the current AWS account. To get the next page of results, + // make another request to ListHostedZonesByName. Specify the value of ListHostedZonesByNameResponse$NextDNSName + // in the ListHostedZonesByNameRequest$DNSName element and ListHostedZonesByNameResponse$NextHostedZoneId + // in the ListHostedZonesByNameRequest$HostedZoneId element. + NextHostedZoneId *string `type:"string"` +} + +// String returns the string representation +func (s ListHostedZonesByNameOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHostedZonesByNameOutput) GoString() string { + return s.String() +} + +// To retrieve a list of your hosted zones, send a GET request to the 2013-04-01/hostedzone +// resource. The response to this request includes a HostedZones element with +// zero or more HostedZone child elements. By default, the list of hosted zones +// is displayed on a single page. You can control the length of the page that +// is displayed by using the MaxItems parameter. You can use the Marker parameter +// to control the hosted zone that the list begins with. For more information +// about listing hosted zones, see Listing the Hosted Zones for an AWS Account +// (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ListInfoOnHostedZone.html) +// in the Amazon Route 53 Developer Guide. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +type ListHostedZonesInput struct { + _ struct{} `type:"structure"` + + DelegationSetId *string `location:"querystring" locationName:"delegationsetid" type:"string"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Specify the maximum number of hosted zones to return per page of results. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` +} + +// String returns the string representation +func (s ListHostedZonesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHostedZonesInput) GoString() string { + return s.String() +} + +// A complex type that contains the response for the request. +type ListHostedZonesOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the hosted zones associated + // with the current AWS account. + HostedZones []*HostedZone `locationNameList:"HostedZone" type:"list" required:"true"` + + // A flag indicating whether there are more hosted zones to be listed. If your + // results were truncated, you can make a follow-up request for the next page + // of results by using the Marker element. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `type:"string" required:"true"` + + // The maximum number of hosted zones to be included in the response body. If + // the number of hosted zones associated with this AWS account exceeds MaxItems, + // the value of ListHostedZonesResponse$IsTruncated in the response is true. + // Call ListHostedZones again and specify the value of ListHostedZonesResponse$NextMarker + // in the ListHostedZonesRequest$Marker element to get the next page of results. + MaxItems *string `type:"string" required:"true"` + + // Indicates where to continue listing hosted zones. If ListHostedZonesResponse$IsTruncated + // is true, make another request to ListHostedZones and include the value of + // the NextMarker element in the Marker element to get the next page of results. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListHostedZonesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHostedZonesOutput) GoString() string { + return s.String() +} + +// The input for a ListResourceRecordSets request. +type ListResourceRecordSetsInput struct { + _ struct{} `type:"structure"` + + // The ID of the hosted zone that contains the resource record sets that you + // want to get. + HostedZoneId *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The maximum number of records you want in the response body. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // Weighted resource record sets only: If results were truncated for a given + // DNS name and type, specify the value of ListResourceRecordSetsResponse$NextRecordIdentifier + // from the previous response to get the next resource record set that has the + // current DNS name and type. + StartRecordIdentifier *string `location:"querystring" locationName:"identifier" min:"1" type:"string"` + + // The first name in the lexicographic ordering of domain names that you want + // the ListResourceRecordSets request to list. + StartRecordName *string `location:"querystring" locationName:"name" type:"string"` + + // The DNS type at which to begin the listing of resource record sets. + // + // Valid values: A | AAAA | CNAME | MX | NS | PTR | SOA | SPF | SRV | TXT + // + // Values for Weighted Resource Record Sets: A | AAAA | CNAME | TXT + // + // Values for Regional Resource Record Sets: A | AAAA | CNAME | TXT + // + // Values for Alias Resource Record Sets: A | AAAA + // + // Constraint: Specifying type without specifying name returns an InvalidInput + // error. + StartRecordType *string `location:"querystring" locationName:"type" type:"string" enum:"RRType"` +} + +// String returns the string representation +func (s ListResourceRecordSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListResourceRecordSetsInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the resource record sets that +// are returned by the request and information about the response. +type ListResourceRecordSetsOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more resource record sets to be listed. + // If your results were truncated, you can make a follow-up request for the + // next page of results by using the ListResourceRecordSetsResponse$NextRecordName + // element. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The maximum number of records you requested. The maximum value of MaxItems + // is 100. + MaxItems *string `type:"string" required:"true"` + + // Weighted resource record sets only: If results were truncated for a given + // DNS name and type, the value of SetIdentifier for the next resource record + // set that has the current DNS name and type. + NextRecordIdentifier *string `min:"1" type:"string"` + + // If the results were truncated, the name of the next record in the list. This + // element is present only if ListResourceRecordSetsResponse$IsTruncated is + // true. + NextRecordName *string `type:"string"` + + // If the results were truncated, the type of the next record in the list. This + // element is present only if ListResourceRecordSetsResponse$IsTruncated is + // true. + NextRecordType *string `type:"string" enum:"RRType"` + + // A complex type that contains information about the resource record sets that + // are returned by the request. + ResourceRecordSets []*ResourceRecordSet `locationNameList:"ResourceRecordSet" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListResourceRecordSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListResourceRecordSetsOutput) GoString() string { + return s.String() +} + +// To retrieve a list of your reusable delegation sets, send a GET request to +// the 2013-04-01/delegationset resource. The response to this request includes +// a DelegationSets element with zero or more DelegationSet child elements. +// By default, the list of reusable delegation sets is displayed on a single +// page. You can control the length of the page that is displayed by using the +// MaxItems parameter. You can use the Marker parameter to control the delegation +// set that the list begins with. +// +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to +// a value greater than 100, Amazon Route 53 returns only the first 100. +type ListReusableDelegationSetsInput struct { + _ struct{} `type:"structure"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Specify the maximum number of reusable delegation sets to return per page + // of results. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` +} + +// String returns the string representation +func (s ListReusableDelegationSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListReusableDelegationSetsInput) GoString() string { + return s.String() +} + +// A complex type that contains the response for the request. +type ListReusableDelegationSetsOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains information about the reusable delegation sets + // associated with the current AWS account. + DelegationSets []*DelegationSet `locationNameList:"DelegationSet" type:"list" required:"true"` + + // A flag indicating whether there are more reusable delegation sets to be listed. + // If your results were truncated, you can make a follow-up request for the + // next page of results by using the Marker element. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // If the request returned more than one page of results, submit another request + // and specify the value of NextMarker from the last response in the marker + // parameter to get the next page of results. + Marker *string `type:"string" required:"true"` + + // The maximum number of reusable delegation sets to be included in the response + // body. If the number of reusable delegation sets associated with this AWS + // account exceeds MaxItems, the value of ListReusablDelegationSetsResponse$IsTruncated + // in the response is true. Call ListReusableDelegationSets again and specify + // the value of ListReusableDelegationSetsResponse$NextMarker in the ListReusableDelegationSetsRequest$Marker + // element to get the next page of results. + MaxItems *string `type:"string" required:"true"` + + // Indicates where to continue listing reusable delegation sets. If ListReusableDelegationSetsResponse$IsTruncated + // is true, make another request to ListReusableDelegationSets and include the + // value of the NextMarker element in the Marker element to get the next page + // of results. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListReusableDelegationSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListReusableDelegationSetsOutput) GoString() string { + return s.String() +} + +// A complex type containing information about a request for a list of the tags +// that are associated with an individual resource. +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The ID of the resource for which you want to retrieve tags. + ResourceId *string `location:"uri" locationName:"ResourceId" type:"string" required:"true"` + + // The type of the resource. + // + // - The resource type for health checks is healthcheck. + // + // - The resource type for hosted zones is hostedzone. + ResourceType *string `location:"uri" locationName:"ResourceType" type:"string" required:"true" enum:"TagResourceType"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// A complex type containing tags for the specified resource. +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // A ResourceTagSet containing tags associated with the specified resource. + ResourceTagSet *ResourceTagSet `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// A complex type containing information about a request for a list of the tags +// that are associated with up to 10 specified resources. +type ListTagsForResourcesInput struct { + _ struct{} `locationName:"ListTagsForResourcesRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A complex type that contains the ResourceId element for each resource for + // which you want to get a list of tags. + ResourceIds []*string `locationNameList:"ResourceId" min:"1" type:"list" required:"true"` + + // The type of the resources. + // + // - The resource type for health checks is healthcheck. + // + // - The resource type for hosted zones is hostedzone. + ResourceType *string `location:"uri" locationName:"ResourceType" type:"string" required:"true" enum:"TagResourceType"` +} + +// String returns the string representation +func (s ListTagsForResourcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourcesInput) GoString() string { + return s.String() +} + +// A complex type containing tags for the specified resources. +type ListTagsForResourcesOutput struct { + _ struct{} `type:"structure"` + + // A list of ResourceTagSets containing tags associated with the specified resources. + ResourceTagSets []*ResourceTagSet `locationNameList:"ResourceTagSet" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourcesOutput) GoString() string { + return s.String() +} + +// A complex type that contains the information about the request to list the +// traffic policies that are associated with the current AWS account. +type ListTrafficPoliciesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of traffic policies to be included in the response body + // for this request. If you have more than MaxItems traffic policies, the value + // of the IsTruncated element in the response is true, and the value of the + // TrafficPolicyIdMarker element is the ID of the first traffic policy in the + // next group of MaxItems traffic policies. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // For your first request to ListTrafficPolicies, do not include the TrafficPolicyIdMarker + // parameter. + // + // If you have more traffic policies than the value of MaxItems, ListTrafficPolicies + // returns only the first MaxItems traffic policies. To get the next group of + // MaxItems policies, submit another request to ListTrafficPolicies. For the + // value of TrafficPolicyIdMarker, specify the value of the TrafficPolicyIdMarker + // element that was returned in the previous response. + // + // Policies are listed in the order in which they were created. + TrafficPolicyIdMarker *string `location:"querystring" locationName:"trafficpolicyid" type:"string"` +} + +// String returns the string representation +func (s ListTrafficPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPoliciesInput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the request. +type ListTrafficPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more traffic policies to be listed. + // If the response was truncated, you can get the next group of MaxItems traffic + // policies by calling ListTrafficPolicies again and specifying the value of + // the TrafficPolicyIdMarker element in the TrafficPolicyIdMarker request parameter. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The value that you specified for the MaxItems parameter in the call to ListTrafficPolicies + // that produced the current response. + MaxItems *string `type:"string" required:"true"` + + // If the value of IsTruncated is true, TrafficPolicyIdMarker is the ID of the + // first traffic policy in the next group of MaxItems traffic policies. + TrafficPolicyIdMarker *string `type:"string" required:"true"` + + // A list that contains one TrafficPolicySummary element for each traffic policy + // that was created by the current AWS account. + TrafficPolicySummaries []*TrafficPolicySummary `locationNameList:"TrafficPolicySummary" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPoliciesOutput) GoString() string { + return s.String() +} + +// A request for the traffic policy instances that you created in a specified +// hosted zone. +type ListTrafficPolicyInstancesByHostedZoneInput struct { + _ struct{} `type:"structure"` + + // The ID of the hosted zone for which you want to list traffic policy instances. + HostedZoneId *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // The maximum number of traffic policy instances to be included in the response + // body for this request. If you have more than MaxItems traffic policy instances, + // the value of the IsTruncated element in the response is true, and the values + // of HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker + // represent the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // For the first request to ListTrafficPolicyInstancesByHostedZone, omit this + // value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceNameMarker + // is the name of the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get for this hosted zone. + // + // If the value of IsTruncated in the previous response was false, omit this + // value. + TrafficPolicyInstanceNameMarker *string `location:"querystring" locationName:"trafficpolicyinstancename" type:"string"` + + // For the first request to ListTrafficPolicyInstancesByHostedZone, omit this + // value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceTypeMarker + // is the DNS type of the first traffic policy instance in the next group of + // MaxItems traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get for this hosted zone. + TrafficPolicyInstanceTypeMarker *string `location:"querystring" locationName:"trafficpolicyinstancetype" type:"string" enum:"RRType"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesByHostedZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesByHostedZoneInput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the request. +type ListTrafficPolicyInstancesByHostedZoneOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more traffic policy instances to + // be listed. If the response was truncated, you can get the next group of MaxItems + // traffic policy instances by calling ListTrafficPolicyInstancesByHostedZone + // again and specifying the values of the HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, + // and TrafficPolicyInstanceTypeMarker elements in the corresponding request + // parameters. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The value that you specified for the MaxItems parameter in the call to ListTrafficPolicyInstancesByHostedZone + // that produced the current response. + MaxItems *string `type:"string" required:"true"` + + // If IsTruncated is true, TrafficPolicyInstanceNameMarker is the name of the + // first traffic policy instance in the next group of MaxItems traffic policy + // instances. + TrafficPolicyInstanceNameMarker *string `type:"string"` + + // If IsTruncated is true, TrafficPolicyInstanceTypeMarker is the DNS type of + // the resource record sets that are associated with the first traffic policy + // instance in the next group of MaxItems traffic policy instances. + TrafficPolicyInstanceTypeMarker *string `type:"string" enum:"RRType"` + + // A list that contains one TrafficPolicyInstance element for each traffic policy + // instance that matches the elements in the request. + TrafficPolicyInstances []*TrafficPolicyInstance `locationNameList:"TrafficPolicyInstance" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesByHostedZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesByHostedZoneOutput) GoString() string { + return s.String() +} + +// A complex type that contains the information about the request to list your +// traffic policy instances. +type ListTrafficPolicyInstancesByPolicyInput struct { + _ struct{} `type:"structure"` + + // For the first request to ListTrafficPolicyInstancesByPolicy, omit this value. + // + // If the value of IsTruncated in the previous response was true, HostedZoneIdMarker + // is the ID of the hosted zone for the first traffic policy instance in the + // next group of MaxItems traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get for this hosted zone. + // + // If the value of IsTruncated in the previous response was false, omit this + // value. + HostedZoneIdMarker *string `location:"querystring" locationName:"hostedzoneid" type:"string"` + + // The maximum number of traffic policy instances to be included in the response + // body for this request. If you have more than MaxItems traffic policy instances, + // the value of the IsTruncated element in the response is true, and the values + // of HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker + // represent the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // The ID of the traffic policy for which you want to list traffic policy instances. + TrafficPolicyId *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // For the first request to ListTrafficPolicyInstancesByPolicy, omit this value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceNameMarker + // is the name of the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get for this hosted zone. + // + // If the value of IsTruncated in the previous response was false, omit this + // value. + TrafficPolicyInstanceNameMarker *string `location:"querystring" locationName:"trafficpolicyinstancename" type:"string"` + + // For the first request to ListTrafficPolicyInstancesByPolicy, omit this value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceTypeMarker + // is the DNS type of the first traffic policy instance in the next group of + // MaxItems traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get for this hosted zone. + TrafficPolicyInstanceTypeMarker *string `location:"querystring" locationName:"trafficpolicyinstancetype" type:"string" enum:"RRType"` + + // The version of the traffic policy for which you want to list traffic policy + // instances. The version must be associated with the traffic policy that is + // specified by TrafficPolicyId. + TrafficPolicyVersion *int64 `location:"querystring" locationName:"version" min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesByPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesByPolicyInput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the request. +type ListTrafficPolicyInstancesByPolicyOutput struct { + _ struct{} `type:"structure"` + + // If IsTruncated is true, HostedZoneIdMarker is the ID of the hosted zone of + // the first traffic policy instance in the next group of MaxItems traffic policy + // instances. + HostedZoneIdMarker *string `type:"string"` + + // A flag that indicates whether there are more traffic policy instances to + // be listed. If the response was truncated, you can get the next group of MaxItems + // traffic policy instances by calling ListTrafficPolicyInstancesByPolicy again + // and specifying the values of the HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, + // and TrafficPolicyInstanceTypeMarker elements in the corresponding request + // parameters. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The value that you specified for the MaxItems parameter in the call to ListTrafficPolicyInstancesByPolicy + // that produced the current response. + MaxItems *string `type:"string" required:"true"` + + // If IsTruncated is true, TrafficPolicyInstanceNameMarker is the name of the + // first traffic policy instance in the next group of MaxItems traffic policy + // instances. + TrafficPolicyInstanceNameMarker *string `type:"string"` + + // If IsTruncated is true, TrafficPolicyInstanceTypeMarker is the DNS type of + // the resource record sets that are associated with the first traffic policy + // instance in the next group of MaxItems traffic policy instances. + TrafficPolicyInstanceTypeMarker *string `type:"string" enum:"RRType"` + + // A list that contains one TrafficPolicyInstance element for each traffic policy + // instance that matches the elements in the request. + TrafficPolicyInstances []*TrafficPolicyInstance `locationNameList:"TrafficPolicyInstance" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesByPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesByPolicyOutput) GoString() string { + return s.String() +} + +// A complex type that contains the information about the request to list your +// traffic policy instances. +type ListTrafficPolicyInstancesInput struct { + _ struct{} `type:"structure"` + + // For the first request to ListTrafficPolicyInstances, omit this value. + // + // If the value of IsTruncated in the previous response was true, you have + // more traffic policy instances. To get the next group of MaxItems traffic + // policy instances, submit another ListTrafficPolicyInstances request. For + // the value of HostedZoneIdMarker, specify the value of HostedZoneIdMarker + // from the previous response, which is the hosted zone ID of the first traffic + // policy instance in the next group of MaxItems traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get. + HostedZoneIdMarker *string `location:"querystring" locationName:"hostedzoneid" type:"string"` + + // The maximum number of traffic policy instances to be included in the response + // body for this request. If you have more than MaxItems traffic policy instances, + // the value of the IsTruncated element in the response is true, and the values + // of HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, and TrafficPolicyInstanceTypeMarker + // represent the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // For the first request to ListTrafficPolicyInstances, omit this value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceNameMarker + // is the name of the first traffic policy instance in the next group of MaxItems + // traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get. + TrafficPolicyInstanceNameMarker *string `location:"querystring" locationName:"trafficpolicyinstancename" type:"string"` + + // For the first request to ListTrafficPolicyInstances, omit this value. + // + // If the value of IsTruncated in the previous response was true, TrafficPolicyInstanceTypeMarker + // is the DNS type of the first traffic policy instance in the next group of + // MaxItems traffic policy instances. + // + // If the value of IsTruncated in the previous response was false, there are + // no more traffic policy instances to get. + TrafficPolicyInstanceTypeMarker *string `location:"querystring" locationName:"trafficpolicyinstancetype" type:"string" enum:"RRType"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesInput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the request. +type ListTrafficPolicyInstancesOutput struct { + _ struct{} `type:"structure"` + + // If IsTruncated is true, HostedZoneIdMarker is the ID of the hosted zone of + // the first traffic policy instance in the next group of MaxItems traffic policy + // instances. + HostedZoneIdMarker *string `type:"string"` + + // A flag that indicates whether there are more traffic policy instances to + // be listed. If the response was truncated, you can get the next group of MaxItems + // traffic policy instances by calling ListTrafficPolicyInstances again and + // specifying the values of the HostedZoneIdMarker, TrafficPolicyInstanceNameMarker, + // and TrafficPolicyInstanceTypeMarker elements in the corresponding request + // parameters. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The value that you specified for the MaxItems parameter in the call to ListTrafficPolicyInstances + // that produced the current response. + MaxItems *string `type:"string" required:"true"` + + // If IsTruncated is true, TrafficPolicyInstanceNameMarker is the name of the + // first traffic policy instance in the next group of MaxItems traffic policy + // instances. + TrafficPolicyInstanceNameMarker *string `type:"string"` + + // If IsTruncated is true, TrafficPolicyInstanceTypeMarker is the DNS type of + // the resource record sets that are associated with the first traffic policy + // instance in the next group of MaxItems traffic policy instances. + TrafficPolicyInstanceTypeMarker *string `type:"string" enum:"RRType"` + + // A list that contains one TrafficPolicyInstance element for each traffic policy + // instance that matches the elements in the request. + TrafficPolicyInstances []*TrafficPolicyInstance `locationNameList:"TrafficPolicyInstance" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPolicyInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyInstancesOutput) GoString() string { + return s.String() +} + +// A complex type that contains the information about the request to list your +// traffic policies. +type ListTrafficPolicyVersionsInput struct { + _ struct{} `type:"structure"` + + // Specify the value of Id of the traffic policy for which you want to list + // all versions. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The maximum number of traffic policy versions that you want Amazon Route + // 53 to include in the response body for this request. If the specified traffic + // policy has more than MaxItems versions, the value of the IsTruncated element + // in the response is true, and the value of the TrafficPolicyVersionMarker + // element is the ID of the first version in the next group of MaxItems traffic + // policy versions. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // For your first request to ListTrafficPolicyVersions, do not include the TrafficPolicyVersionMarker + // parameter. + // + // If you have more traffic policy versions than the value of MaxItems, ListTrafficPolicyVersions + // returns only the first group of MaxItems versions. To get the next group + // of MaxItems traffic policy versions, submit another request to ListTrafficPolicyVersions. + // For the value of TrafficPolicyVersionMarker, specify the value of the TrafficPolicyVersionMarker + // element that was returned in the previous response. + // + // Traffic policy versions are listed in sequential order. + TrafficPolicyVersionMarker *string `location:"querystring" locationName:"trafficpolicyversion" type:"string"` +} + +// String returns the string representation +func (s ListTrafficPolicyVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyVersionsInput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the request. +type ListTrafficPolicyVersionsOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more traffic policies to be listed. + // If the response was truncated, you can get the next group of maxitems traffic + // policies by calling ListTrafficPolicyVersions again and specifying the value + // of the NextMarker element in the marker parameter. + // + // Valid Values: true | false + IsTruncated *bool `type:"boolean" required:"true"` + + // The value that you specified for the maxitems parameter in the call to ListTrafficPolicyVersions + // that produced the current response. + MaxItems *string `type:"string" required:"true"` + + // A list that contains one TrafficPolicy element for each traffic policy version + // that is associated with the specified traffic policy. + TrafficPolicies []*TrafficPolicy `locationNameList:"TrafficPolicy" type:"list" required:"true"` + + // If IsTruncated is true, the value of TrafficPolicyVersionMarker identifies + // the first traffic policy in the next group of MaxItems traffic policies. + // Call ListTrafficPolicyVersions again and specify the value of TrafficPolicyVersionMarker + // in the TrafficPolicyVersionMarker request parameter. + // + // This element is present only if IsTruncated is true. + TrafficPolicyVersionMarker *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTrafficPolicyVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTrafficPolicyVersionsOutput) GoString() string { + return s.String() +} + +// A complex type that contains the value of the Value element for the current +// resource record set. +type ResourceRecord struct { + _ struct{} `type:"structure"` + + // The current or new DNS record value, not to exceed 4,000 characters. In the + // case of a DELETE action, if the current value does not match the actual value, + // an error is returned. For descriptions about how to format Value for different + // record types, see Supported DNS Resource Record Types (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) + // in the Amazon Route 53 Developer Guide. + // + // You can specify more than one value for all record types except CNAME and + // SOA. + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ResourceRecord) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceRecord) GoString() string { + return s.String() +} + +// A complex type that contains information about the current resource record +// set. +type ResourceRecordSet struct { + _ struct{} `type:"structure"` + + // Alias resource record sets only: Information about the AWS resource to which + // you are redirecting traffic. + AliasTarget *AliasTarget `type:"structure"` + + // Failover resource record sets only: To configure failover, you add the Failover + // element to two resource record sets. For one resource record set, you specify + // PRIMARY as the value for Failover; for the other resource record set, you + // specify SECONDARY. In addition, you include the HealthCheckId element and + // specify the health check that you want Amazon Route 53 to perform for each + // resource record set. + // + // You can create failover and failover alias resource record sets only in + // public hosted zones. Except where noted, the following failover behaviors + // assume that you have included the HealthCheckId element in both resource + // record sets: + // + // When the primary resource record set is healthy, Amazon Route 53 responds + // to DNS queries with the applicable value from the primary resource record + // set regardless of the health of the secondary resource record set. When the + // primary resource record set is unhealthy and the secondary resource record + // set is healthy, Amazon Route 53 responds to DNS queries with the applicable + // value from the secondary resource record set. When the secondary resource + // record set is unhealthy, Amazon Route 53 responds to DNS queries with the + // applicable value from the primary resource record set regardless of the health + // of the primary resource record set. If you omit the HealthCheckId element + // for the secondary resource record set, and if the primary resource record + // set is unhealthy, Amazon Route 53 always responds to DNS queries with the + // applicable value from the secondary resource record set. This is true regardless + // of the health of the associated endpoint. You cannot create non-failover + // resource record sets that have the same values for the Name and Type elements + // as failover resource record sets. + // + // For failover alias resource record sets, you must also include the EvaluateTargetHealth + // element and set the value to true. + // + // For more information about configuring failover for Amazon Route 53, see + // Amazon Route 53 Health Checks and DNS Failover (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html) + // in the Amazon Route 53 Developer Guide. + // + // Valid values: PRIMARY | SECONDARY + Failover *string `type:"string" enum:"ResourceRecordSetFailover"` + + // Geo location resource record sets only: A complex type that lets you control + // how Amazon Route 53 responds to DNS queries based on the geographic origin + // of the query. For example, if you want all queries from Africa to be routed + // to a web server with an IP address of 192.0.2.111, create a resource record + // set with a Type of A and a ContinentCode of AF. + // + // You can create geolocation and geolocation alias resource record sets only + // in public hosted zones. If you create separate resource record sets for overlapping + // geographic regions (for example, one resource record set for a continent + // and one for a country on the same continent), priority goes to the smallest + // geographic region. This allows you to route most queries for a continent + // to one resource and to route queries for a country on that continent to a + // different resource. + // + // You cannot create two geolocation resource record sets that specify the + // same geographic location. + // + // The value * in the CountryCode element matches all geographic locations + // that aren't specified in other geolocation resource record sets that have + // the same values for the Name and Type elements. + // + // Geolocation works by mapping IP addresses to locations. However, some IP + // addresses aren't mapped to geographic locations, so even if you create geolocation + // resource record sets that cover all seven continents, Amazon Route 53 will + // receive some DNS queries from locations that it can't identify. We recommend + // that you create a resource record set for which the value of CountryCode + // is *, which handles both queries that come from locations for which you haven't + // created geolocation resource record sets and queries from IP addresses that + // aren't mapped to a location. If you don't create a * resource record set, + // Amazon Route 53 returns a "no answer" response for queries from those locations. + // You cannot create non-geolocation resource record sets that have the same + // values for the Name and Type elements as geolocation resource record sets. + GeoLocation *GeoLocation `type:"structure"` + + // Health Check resource record sets only, not required for alias resource record + // sets: An identifier that is used to identify health check associated with + // the resource record set. + HealthCheckId *string `type:"string"` + + // The name of the domain you want to perform the action on. + // + // Enter a fully qualified domain name, for example, www.example.com. You can + // optionally include a trailing dot. If you omit the trailing dot, Amazon Route + // 53 still assumes that the domain name that you specify is fully qualified. + // This means that Amazon Route 53 treats www.example.com (without a trailing + // dot) and www.example.com. (with a trailing dot) as identical. + // + // For information about how to specify characters other than a-z, 0-9, and + // - (hyphen) and how to specify internationalized domain names, see DNS Domain + // Name Format (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DomainNameFormat.html) + // in the Amazon Route 53 Developer Guide. + // + // You can use an asterisk (*) character in the name. DNS treats the * character + // either as a wildcard or as the * character (ASCII 42), depending on where + // it appears in the name. For more information, see Using an Asterisk (*) in + // the Names of Hosted Zones and Resource Record Sets (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DomainNameFormat.html#domain-name-format-asterisk) + // in the Amazon Route 53 Developer Guide + // + // You can't use the * wildcard for resource records sets that have a type + // of NS. + Name *string `type:"string" required:"true"` + + // Latency-based resource record sets only: The Amazon EC2 region where the + // resource that is specified in this resource record set resides. The resource + // typically is an AWS resource, such as an Amazon EC2 instance or an ELB load + // balancer, and is referred to by an IP address or a DNS domain name, depending + // on the record type. + // + // You can create latency and latency alias resource record sets only in public + // hosted zones. When Amazon Route 53 receives a DNS query for a domain name + // and type for which you have created latency resource record sets, Amazon + // Route 53 selects the latency resource record set that has the lowest latency + // between the end user and the associated Amazon EC2 region. Amazon Route 53 + // then returns the value that is associated with the selected resource record + // set. + // + // Note the following: + // + // You can only specify one ResourceRecord per latency resource record set. + // You can only create one latency resource record set for each Amazon EC2 region. + // You are not required to create latency resource record sets for all Amazon + // EC2 regions. Amazon Route 53 will choose the region with the best latency + // from among the regions for which you create latency resource record sets. + // You cannot create non-latency resource record sets that have the same values + // for the Name and Type elements as latency resource record sets. + Region *string `min:"1" type:"string" enum:"ResourceRecordSetRegion"` + + // A complex type that contains the resource records for the current resource + // record set. + ResourceRecords []*ResourceRecord `locationNameList:"ResourceRecord" min:"1" type:"list"` + + // Weighted, Latency, Geo, and Failover resource record sets only: An identifier + // that differentiates among multiple resource record sets that have the same + // combination of DNS name and type. The value of SetIdentifier must be unique + // for each resource record set that has the same combination of DNS name and + // type. + SetIdentifier *string `min:"1" type:"string"` + + // The cache time to live for the current resource record set. Note the following: + // + // If you're creating an alias resource record set, omit TTL. Amazon Route + // 53 uses the value of TTL for the alias target. If you're associating this + // resource record set with a health check (if you're adding a HealthCheckId + // element), we recommend that you specify a TTL of 60 seconds or less so clients + // respond quickly to changes in health status. All of the resource record sets + // in a group of weighted, latency, geolocation, or failover resource record + // sets must have the same value for TTL. If a group of weighted resource record + // sets includes one or more weighted alias resource record sets for which the + // alias target is an ELB load balancer, we recommend that you specify a TTL + // of 60 seconds for all of the non-alias weighted resource record sets that + // have the same name and type. Values other than 60 seconds (the TTL for load + // balancers) will change the effect of the values that you specify for Weight. + TTL *int64 `type:"long"` + + TrafficPolicyInstanceId *string `type:"string"` + + // The DNS record type. For information about different record types and how + // data is encoded for them, see Supported DNS Resource Record Types (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) + // in the Amazon Route 53 Developer Guide. + // + // Valid values for basic resource record sets: A | AAAA | CNAME | MX | NS + // | PTR | SOA | SPF | SRV | TXT + // + // Values for weighted, latency, geolocation, and failover resource record + // sets: A | AAAA | CNAME | MX | PTR | SPF | SRV | TXT. When creating a group + // of weighted, latency, geolocation, or failover resource record sets, specify + // the same value for all of the resource record sets in the group. + // + // SPF records were formerly used to verify the identity of the sender of email + // messages. However, we no longer recommend that you create resource record + // sets for which the value of Type is SPF. RFC 7208, Sender Policy Framework + // (SPF) for Authorizing Use of Domains in Email, Version 1, has been updated + // to say, "...[I]ts existence and mechanism defined in [RFC4408] have led to + // some interoperability issues. Accordingly, its use is no longer appropriate + // for SPF version 1; implementations are not to use it." In RFC 7208, see section + // 14.1, The SPF DNS Record Type (http://tools.ietf.org/html/rfc7208#section-14.1). + // Values for alias resource record sets: + // + // CloudFront distributions: A ELB load balancers: A | AAAA Amazon S3 buckets: + // A Another resource record set in this hosted zone: Specify the type of the + // resource record set for which you're creating the alias. Specify any value + // except NS or SOA. + Type *string `type:"string" required:"true" enum:"RRType"` + + // Weighted resource record sets only: Among resource record sets that have + // the same combination of DNS name and type, a value that determines the proportion + // of DNS queries that Amazon Route 53 responds to using the current resource + // record set. Amazon Route 53 calculates the sum of the weights for the resource + // record sets that have the same combination of DNS name and type. Amazon Route + // 53 then responds to queries based on the ratio of a resource's weight to + // the total. Note the following: + // + // You must specify a value for the Weight element for every weighted resource + // record set. You can only specify one ResourceRecord per weighted resource + // record set. You cannot create latency, failover, or geolocation resource + // record sets that have the same values for the Name and Type elements as weighted + // resource record sets. You can create a maximum of 100 weighted resource record + // sets that have the same values for the Name and Type elements. For weighted + // (but not weighted alias) resource record sets, if you set Weight to 0 for + // a resource record set, Amazon Route 53 never responds to queries with the + // applicable value for that resource record set. However, if you set Weight + // to 0 for all resource record sets that have the same combination of DNS name + // and type, traffic is routed to all resources with equal probability. + // + // The effect of setting Weight to 0 is different when you associate health + // checks with weighted resource record sets. For more information, see Options + // for Configuring Amazon Route 53 Active-Active and Active-Passive Failover + // (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-configuring-options.html) + // in the Amazon Route 53 Developer Guide. + Weight *int64 `type:"long"` +} + +// String returns the string representation +func (s ResourceRecordSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceRecordSet) GoString() string { + return s.String() +} + +// A complex type containing a resource and its associated tags. +type ResourceTagSet struct { + _ struct{} `type:"structure"` + + // The ID for the specified resource. + ResourceId *string `type:"string"` + + // The type of the resource. + // + // - The resource type for health checks is healthcheck. + // + // - The resource type for hosted zones is hostedzone. + ResourceType *string `type:"string" enum:"TagResourceType"` + + // The tags associated with the specified resource. + Tags []*Tag `locationNameList:"Tag" min:"1" type:"list"` +} + +// String returns the string representation +func (s ResourceTagSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceTagSet) GoString() string { + return s.String() +} + +// A complex type that contains information about the health check status for +// the current observation. +type StatusReport struct { + _ struct{} `type:"structure"` + + // The date and time the health check status was observed, in the format YYYY-MM-DDThh:mm:ssZ, + // as specified in the ISO 8601 standard (for example, 2009-11-19T19:37:58Z). + // The Z after the time indicates that the time is listed in Coordinated Universal + // Time (UTC). + CheckedTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The observed health check status. + Status *string `type:"string"` +} + +// String returns the string representation +func (s StatusReport) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StatusReport) GoString() string { + return s.String() +} + +// A single tag containing a key and value. +type Tag struct { + _ struct{} `type:"structure"` + + // The key for a Tag. + Key *string `type:"string"` + + // The value for a Tag. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +type TrafficPolicy struct { + _ struct{} `type:"structure"` + + Comment *string `type:"string"` + + Document *string `type:"string" required:"true"` + + Id *string `type:"string" required:"true"` + + Name *string `type:"string" required:"true"` + + Type *string `type:"string" required:"true" enum:"RRType"` + + Version *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s TrafficPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficPolicy) GoString() string { + return s.String() +} + +type TrafficPolicyInstance struct { + _ struct{} `type:"structure"` + + HostedZoneId *string `type:"string" required:"true"` + + Id *string `type:"string" required:"true"` + + Message *string `type:"string" required:"true"` + + Name *string `type:"string" required:"true"` + + State *string `type:"string" required:"true"` + + TTL *int64 `type:"long" required:"true"` + + TrafficPolicyId *string `type:"string" required:"true"` + + TrafficPolicyType *string `type:"string" required:"true" enum:"RRType"` + + TrafficPolicyVersion *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s TrafficPolicyInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficPolicyInstance) GoString() string { + return s.String() +} + +type TrafficPolicySummary struct { + _ struct{} `type:"structure"` + + Id *string `type:"string" required:"true"` + + LatestVersion *int64 `min:"1" type:"integer" required:"true"` + + Name *string `type:"string" required:"true"` + + TrafficPolicyCount *int64 `min:"1" type:"integer" required:"true"` + + Type *string `type:"string" required:"true" enum:"RRType"` +} + +// String returns the string representation +func (s TrafficPolicySummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficPolicySummary) GoString() string { + return s.String() +} + +// >A complex type that contains information about the request to update a health +// check. +type UpdateHealthCheckInput struct { + _ struct{} `locationName:"UpdateHealthCheckRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // For a specified parent health check, a list of HealthCheckId values for the + // associated child health checks. + // + // Specify this value only if you want to change it. + ChildHealthChecks []*string `locationNameList:"ChildHealthCheck" type:"list"` + + // The number of consecutive health checks that an endpoint must pass or fail + // for Amazon Route 53 to change the current status of the endpoint from unhealthy + // to healthy or vice versa. + // + // Valid values are integers between 1 and 10. For more information, see "How + // Amazon Route 53 Determines Whether an Endpoint Is Healthy" in the Amazon + // Route 53 Developer Guide. + // + // Specify this value only if you want to change it. + FailureThreshold *int64 `min:"1" type:"integer"` + + // Fully qualified domain name of the instance to be health checked. + // + // Specify this value only if you want to change it. + FullyQualifiedDomainName *string `type:"string"` + + // The ID of the health check to update. + HealthCheckId *string `location:"uri" locationName:"HealthCheckId" type:"string" required:"true"` + + // Optional. When you specify a health check version, Amazon Route 53 compares + // this value with the current value in the health check, which prevents you + // from updating the health check when the versions don't match. Using HealthCheckVersion + // lets you prevent overwriting another change to the health check. + HealthCheckVersion *int64 `min:"1" type:"long"` + + // The minimum number of child health checks that must be healthy for Amazon + // Route 53 to consider the parent health check to be healthy. Valid values + // are integers between 0 and 256, inclusive. + // + // Specify this value only if you want to change it. + HealthThreshold *int64 `type:"integer"` + + // The IP address of the resource that you want to check. + // + // Specify this value only if you want to change it. + IPAddress *string `type:"string"` + + // A boolean value that indicates whether the status of health check should + // be inverted. For example, if a health check is healthy but Inverted is True, + // then Amazon Route 53 considers the health check to be unhealthy. + // + // Specify this value only if you want to change it. + Inverted *bool `type:"boolean"` + + // The port on which you want Amazon Route 53 to open a connection to perform + // health checks. + // + // Specify this value only if you want to change it. + Port *int64 `min:"1" type:"integer"` + + // The path that you want Amazon Route 53 to request when performing health + // checks. The path can be any value for which your endpoint will return an + // HTTP status code of 2xx or 3xx when the endpoint is healthy, for example + // the file /docs/route53-health-check.html. + // + // Specify this value only if you want to change it. + ResourcePath *string `type:"string"` + + // If the value of Type is HTTP_STR_MATCH or HTTP_STR_MATCH, the string that + // you want Amazon Route 53 to search for in the response body from the specified + // resource. If the string appears in the response body, Amazon Route 53 considers + // the resource healthy. + // + // Specify this value only if you want to change it. + SearchString *string `type:"string"` +} + +// String returns the string representation +func (s UpdateHealthCheckInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateHealthCheckInput) GoString() string { + return s.String() +} + +type UpdateHealthCheckOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains identifying information about the health check. + HealthCheck *HealthCheck `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateHealthCheckOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateHealthCheckOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the request to update a hosted +// zone comment. +type UpdateHostedZoneCommentInput struct { + _ struct{} `locationName:"UpdateHostedZoneCommentRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // A comment about your hosted zone. + Comment *string `type:"string"` + + // The ID of the hosted zone you want to update. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateHostedZoneCommentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateHostedZoneCommentInput) GoString() string { + return s.String() +} + +// A complex type containing information about the specified hosted zone after +// the update. +type UpdateHostedZoneCommentOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contain information about the specified hosted zone. + HostedZone *HostedZone `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateHostedZoneCommentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateHostedZoneCommentOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the traffic policy for which +// you want to update the comment. +type UpdateTrafficPolicyCommentInput struct { + _ struct{} `locationName:"UpdateTrafficPolicyCommentRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // The new comment for the specified traffic policy and version. + Comment *string `type:"string" required:"true"` + + // The value of Id for the traffic policy for which you want to update the comment. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The value of Version for the traffic policy for which you want to update + // the comment. + Version *int64 `location:"uri" locationName:"Version" min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s UpdateTrafficPolicyCommentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTrafficPolicyCommentInput) GoString() string { + return s.String() +} + +// A complex type that contains the response information for the traffic policy. +type UpdateTrafficPolicyCommentOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains settings for the specified traffic policy. + TrafficPolicy *TrafficPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateTrafficPolicyCommentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTrafficPolicyCommentOutput) GoString() string { + return s.String() +} + +// A complex type that contains information about the resource record sets that +// you want to update based on a specified traffic policy instance. +type UpdateTrafficPolicyInstanceInput struct { + _ struct{} `locationName:"UpdateTrafficPolicyInstanceRequest" type:"structure" xmlURI:"https://route53.amazonaws.com/doc/2013-04-01/"` + + // The ID of the traffic policy instance that you want to update. + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The TTL that you want Amazon Route 53 to assign to all of the updated resource + // record sets. + TTL *int64 `type:"long" required:"true"` + + // The ID of the traffic policy that you want Amazon Route 53 to use to update + // resource record sets for the specified traffic policy instance. + TrafficPolicyId *string `type:"string" required:"true"` + + // The version of the traffic policy that you want Amazon Route 53 to use to + // update resource record sets for the specified traffic policy instance. + TrafficPolicyVersion *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s UpdateTrafficPolicyInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTrafficPolicyInstanceInput) GoString() string { + return s.String() +} + +// A complex type that contains information about the resource record sets that +// Amazon Route 53 created based on a specified traffic policy. +type UpdateTrafficPolicyInstanceOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains settings for the updated traffic policy instance. + TrafficPolicyInstance *TrafficPolicyInstance `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateTrafficPolicyInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTrafficPolicyInstanceOutput) GoString() string { + return s.String() +} + +type VPC struct { + _ struct{} `type:"structure"` + + // A VPC ID + VPCId *string `type:"string"` + + VPCRegion *string `min:"1" type:"string" enum:"VPCRegion"` +} + +// String returns the string representation +func (s VPC) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VPC) GoString() string { + return s.String() +} + +const ( + // @enum ChangeAction + ChangeActionCreate = "CREATE" + // @enum ChangeAction + ChangeActionDelete = "DELETE" + // @enum ChangeAction + ChangeActionUpsert = "UPSERT" +) + +const ( + // @enum ChangeStatus + ChangeStatusPending = "PENDING" + // @enum ChangeStatus + ChangeStatusInsync = "INSYNC" +) + +const ( + // @enum HealthCheckType + HealthCheckTypeHttp = "HTTP" + // @enum HealthCheckType + HealthCheckTypeHttps = "HTTPS" + // @enum HealthCheckType + HealthCheckTypeHttpStrMatch = "HTTP_STR_MATCH" + // @enum HealthCheckType + HealthCheckTypeHttpsStrMatch = "HTTPS_STR_MATCH" + // @enum HealthCheckType + HealthCheckTypeTcp = "TCP" + // @enum HealthCheckType + HealthCheckTypeCalculated = "CALCULATED" +) + +const ( + // @enum RRType + RRTypeSoa = "SOA" + // @enum RRType + RRTypeA = "A" + // @enum RRType + RRTypeTxt = "TXT" + // @enum RRType + RRTypeNs = "NS" + // @enum RRType + RRTypeCname = "CNAME" + // @enum RRType + RRTypeMx = "MX" + // @enum RRType + RRTypePtr = "PTR" + // @enum RRType + RRTypeSrv = "SRV" + // @enum RRType + RRTypeSpf = "SPF" + // @enum RRType + RRTypeAaaa = "AAAA" +) + +const ( + // @enum ResourceRecordSetFailover + ResourceRecordSetFailoverPrimary = "PRIMARY" + // @enum ResourceRecordSetFailover + ResourceRecordSetFailoverSecondary = "SECONDARY" +) + +const ( + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionUsEast1 = "us-east-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionUsWest1 = "us-west-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionUsWest2 = "us-west-2" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionEuWest1 = "eu-west-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionEuCentral1 = "eu-central-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionApSoutheast1 = "ap-southeast-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionApSoutheast2 = "ap-southeast-2" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionApNortheast1 = "ap-northeast-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionSaEast1 = "sa-east-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionCnNorth1 = "cn-north-1" +) + +const ( + // @enum TagResourceType + TagResourceTypeHealthcheck = "healthcheck" + // @enum TagResourceType + TagResourceTypeHostedzone = "hostedzone" +) + +const ( + // @enum VPCRegion + VPCRegionUsEast1 = "us-east-1" + // @enum VPCRegion + VPCRegionUsWest1 = "us-west-1" + // @enum VPCRegion + VPCRegionUsWest2 = "us-west-2" + // @enum VPCRegion + VPCRegionEuWest1 = "eu-west-1" + // @enum VPCRegion + VPCRegionEuCentral1 = "eu-central-1" + // @enum VPCRegion + VPCRegionApSoutheast1 = "ap-southeast-1" + // @enum VPCRegion + VPCRegionApSoutheast2 = "ap-southeast-2" + // @enum VPCRegion + VPCRegionApNortheast1 = "ap-northeast-1" + // @enum VPCRegion + VPCRegionSaEast1 = "sa-east-1" + // @enum VPCRegion + VPCRegionCnNorth1 = "cn-north-1" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53/customizations.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53/customizations.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53/customizations.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53/customizations.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,21 @@ +package route53 + +import ( + "regexp" + + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" +) + +func init() { + initClient = func(c *client.Client) { + c.Handlers.Build.PushBack(sanitizeURL) + } +} + +var reSanitizeURL = regexp.MustCompile(`\/%2F\w+%2F`) + +func sanitizeURL(r *request.Request) { + r.HTTPRequest.URL.Opaque = + reSanitizeURL.ReplaceAllString(r.HTTPRequest.URL.Opaque, "/") +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53/customizations_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53/customizations_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53/customizations_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53/customizations_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,22 @@ +package route53_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/route53" +) + +func TestBuildCorrectURI(t *testing.T) { + svc := route53.New(unit.Session) + svc.Handlers.Validate.Clear() + req, _ := svc.GetHostedZoneRequest(&route53.GetHostedZoneInput{ + Id: aws.String("/hostedzone/ABCDEFG"), + }) + + req.Build() + + awstesting.Match(t, `\/hostedzone\/ABCDEFG$`, req.HTTPRequest.URL.String()) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1080 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package route53_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/route53" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleRoute53_AssociateVPCWithHostedZone() { + svc := route53.New(session.New()) + + params := &route53.AssociateVPCWithHostedZoneInput{ + HostedZoneId: aws.String("ResourceId"), // Required + VPC: &route53.VPC{ // Required + VPCId: aws.String("VPCId"), + VPCRegion: aws.String("VPCRegion"), + }, + Comment: aws.String("AssociateVPCComment"), + } + resp, err := svc.AssociateVPCWithHostedZone(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ChangeResourceRecordSets() { + svc := route53.New(session.New()) + + params := &route53.ChangeResourceRecordSetsInput{ + ChangeBatch: &route53.ChangeBatch{ // Required + Changes: []*route53.Change{ // Required + { // Required + Action: aws.String("ChangeAction"), // Required + ResourceRecordSet: &route53.ResourceRecordSet{ // Required + Name: aws.String("DNSName"), // Required + Type: aws.String("RRType"), // Required + AliasTarget: &route53.AliasTarget{ + DNSName: aws.String("DNSName"), // Required + EvaluateTargetHealth: aws.Bool(true), // Required + HostedZoneId: aws.String("ResourceId"), // Required + }, + Failover: aws.String("ResourceRecordSetFailover"), + GeoLocation: &route53.GeoLocation{ + ContinentCode: aws.String("GeoLocationContinentCode"), + CountryCode: aws.String("GeoLocationCountryCode"), + SubdivisionCode: aws.String("GeoLocationSubdivisionCode"), + }, + HealthCheckId: aws.String("HealthCheckId"), + Region: aws.String("ResourceRecordSetRegion"), + ResourceRecords: []*route53.ResourceRecord{ + { // Required + Value: aws.String("RData"), // Required + }, + // More values... + }, + SetIdentifier: aws.String("ResourceRecordSetIdentifier"), + TTL: aws.Int64(1), + TrafficPolicyInstanceId: aws.String("TrafficPolicyInstanceId"), + Weight: aws.Int64(1), + }, + }, + // More values... + }, + Comment: aws.String("ResourceDescription"), + }, + HostedZoneId: aws.String("ResourceId"), // Required + } + resp, err := svc.ChangeResourceRecordSets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ChangeTagsForResource() { + svc := route53.New(session.New()) + + params := &route53.ChangeTagsForResourceInput{ + ResourceId: aws.String("TagResourceId"), // Required + ResourceType: aws.String("TagResourceType"), // Required + AddTags: []*route53.Tag{ + { // Required + Key: aws.String("TagKey"), + Value: aws.String("TagValue"), + }, + // More values... + }, + RemoveTagKeys: []*string{ + aws.String("TagKey"), // Required + // More values... + }, + } + resp, err := svc.ChangeTagsForResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_CreateHealthCheck() { + svc := route53.New(session.New()) + + params := &route53.CreateHealthCheckInput{ + CallerReference: aws.String("HealthCheckNonce"), // Required + HealthCheckConfig: &route53.HealthCheckConfig{ // Required + Type: aws.String("HealthCheckType"), // Required + ChildHealthChecks: []*string{ + aws.String("HealthCheckId"), // Required + // More values... + }, + FailureThreshold: aws.Int64(1), + FullyQualifiedDomainName: aws.String("FullyQualifiedDomainName"), + HealthThreshold: aws.Int64(1), + IPAddress: aws.String("IPAddress"), + Inverted: aws.Bool(true), + MeasureLatency: aws.Bool(true), + Port: aws.Int64(1), + RequestInterval: aws.Int64(1), + ResourcePath: aws.String("ResourcePath"), + SearchString: aws.String("SearchString"), + }, + } + resp, err := svc.CreateHealthCheck(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_CreateHostedZone() { + svc := route53.New(session.New()) + + params := &route53.CreateHostedZoneInput{ + CallerReference: aws.String("Nonce"), // Required + Name: aws.String("DNSName"), // Required + DelegationSetId: aws.String("ResourceId"), + HostedZoneConfig: &route53.HostedZoneConfig{ + Comment: aws.String("ResourceDescription"), + PrivateZone: aws.Bool(true), + }, + VPC: &route53.VPC{ + VPCId: aws.String("VPCId"), + VPCRegion: aws.String("VPCRegion"), + }, + } + resp, err := svc.CreateHostedZone(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_CreateReusableDelegationSet() { + svc := route53.New(session.New()) + + params := &route53.CreateReusableDelegationSetInput{ + CallerReference: aws.String("Nonce"), // Required + HostedZoneId: aws.String("ResourceId"), + } + resp, err := svc.CreateReusableDelegationSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_CreateTrafficPolicy() { + svc := route53.New(session.New()) + + params := &route53.CreateTrafficPolicyInput{ + Document: aws.String("TrafficPolicyDocument"), // Required + Name: aws.String("TrafficPolicyName"), // Required + Comment: aws.String("TrafficPolicyComment"), + } + resp, err := svc.CreateTrafficPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_CreateTrafficPolicyInstance() { + svc := route53.New(session.New()) + + params := &route53.CreateTrafficPolicyInstanceInput{ + HostedZoneId: aws.String("ResourceId"), // Required + Name: aws.String("DNSName"), // Required + TTL: aws.Int64(1), // Required + TrafficPolicyId: aws.String("TrafficPolicyId"), // Required + TrafficPolicyVersion: aws.Int64(1), // Required + } + resp, err := svc.CreateTrafficPolicyInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_CreateTrafficPolicyVersion() { + svc := route53.New(session.New()) + + params := &route53.CreateTrafficPolicyVersionInput{ + Document: aws.String("TrafficPolicyDocument"), // Required + Id: aws.String("TrafficPolicyId"), // Required + Comment: aws.String("TrafficPolicyComment"), + } + resp, err := svc.CreateTrafficPolicyVersion(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_DeleteHealthCheck() { + svc := route53.New(session.New()) + + params := &route53.DeleteHealthCheckInput{ + HealthCheckId: aws.String("HealthCheckId"), // Required + } + resp, err := svc.DeleteHealthCheck(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_DeleteHostedZone() { + svc := route53.New(session.New()) + + params := &route53.DeleteHostedZoneInput{ + Id: aws.String("ResourceId"), // Required + } + resp, err := svc.DeleteHostedZone(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_DeleteReusableDelegationSet() { + svc := route53.New(session.New()) + + params := &route53.DeleteReusableDelegationSetInput{ + Id: aws.String("ResourceId"), // Required + } + resp, err := svc.DeleteReusableDelegationSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_DeleteTrafficPolicy() { + svc := route53.New(session.New()) + + params := &route53.DeleteTrafficPolicyInput{ + Id: aws.String("TrafficPolicyId"), // Required + Version: aws.Int64(1), // Required + } + resp, err := svc.DeleteTrafficPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_DeleteTrafficPolicyInstance() { + svc := route53.New(session.New()) + + params := &route53.DeleteTrafficPolicyInstanceInput{ + Id: aws.String("TrafficPolicyInstanceId"), // Required + } + resp, err := svc.DeleteTrafficPolicyInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_DisassociateVPCFromHostedZone() { + svc := route53.New(session.New()) + + params := &route53.DisassociateVPCFromHostedZoneInput{ + HostedZoneId: aws.String("ResourceId"), // Required + VPC: &route53.VPC{ // Required + VPCId: aws.String("VPCId"), + VPCRegion: aws.String("VPCRegion"), + }, + Comment: aws.String("DisassociateVPCComment"), + } + resp, err := svc.DisassociateVPCFromHostedZone(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetChange() { + svc := route53.New(session.New()) + + params := &route53.GetChangeInput{ + Id: aws.String("ResourceId"), // Required + } + resp, err := svc.GetChange(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetChangeDetails() { + svc := route53.New(session.New()) + + params := &route53.GetChangeDetailsInput{ + Id: aws.String("ResourceId"), // Required + } + resp, err := svc.GetChangeDetails(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetCheckerIpRanges() { + svc := route53.New(session.New()) + + var params *route53.GetCheckerIpRangesInput + resp, err := svc.GetCheckerIpRanges(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetGeoLocation() { + svc := route53.New(session.New()) + + params := &route53.GetGeoLocationInput{ + ContinentCode: aws.String("GeoLocationContinentCode"), + CountryCode: aws.String("GeoLocationCountryCode"), + SubdivisionCode: aws.String("GeoLocationSubdivisionCode"), + } + resp, err := svc.GetGeoLocation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetHealthCheck() { + svc := route53.New(session.New()) + + params := &route53.GetHealthCheckInput{ + HealthCheckId: aws.String("HealthCheckId"), // Required + } + resp, err := svc.GetHealthCheck(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetHealthCheckCount() { + svc := route53.New(session.New()) + + var params *route53.GetHealthCheckCountInput + resp, err := svc.GetHealthCheckCount(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetHealthCheckLastFailureReason() { + svc := route53.New(session.New()) + + params := &route53.GetHealthCheckLastFailureReasonInput{ + HealthCheckId: aws.String("HealthCheckId"), // Required + } + resp, err := svc.GetHealthCheckLastFailureReason(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetHealthCheckStatus() { + svc := route53.New(session.New()) + + params := &route53.GetHealthCheckStatusInput{ + HealthCheckId: aws.String("HealthCheckId"), // Required + } + resp, err := svc.GetHealthCheckStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetHostedZone() { + svc := route53.New(session.New()) + + params := &route53.GetHostedZoneInput{ + Id: aws.String("ResourceId"), // Required + } + resp, err := svc.GetHostedZone(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetHostedZoneCount() { + svc := route53.New(session.New()) + + var params *route53.GetHostedZoneCountInput + resp, err := svc.GetHostedZoneCount(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetReusableDelegationSet() { + svc := route53.New(session.New()) + + params := &route53.GetReusableDelegationSetInput{ + Id: aws.String("ResourceId"), // Required + } + resp, err := svc.GetReusableDelegationSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetTrafficPolicy() { + svc := route53.New(session.New()) + + params := &route53.GetTrafficPolicyInput{ + Id: aws.String("TrafficPolicyId"), // Required + Version: aws.Int64(1), // Required + } + resp, err := svc.GetTrafficPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetTrafficPolicyInstance() { + svc := route53.New(session.New()) + + params := &route53.GetTrafficPolicyInstanceInput{ + Id: aws.String("TrafficPolicyInstanceId"), // Required + } + resp, err := svc.GetTrafficPolicyInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_GetTrafficPolicyInstanceCount() { + svc := route53.New(session.New()) + + var params *route53.GetTrafficPolicyInstanceCountInput + resp, err := svc.GetTrafficPolicyInstanceCount(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListChangeBatchesByHostedZone() { + svc := route53.New(session.New()) + + params := &route53.ListChangeBatchesByHostedZoneInput{ + EndDate: aws.String("Date"), // Required + HostedZoneId: aws.String("ResourceId"), // Required + StartDate: aws.String("Date"), // Required + Marker: aws.String("PageMarker"), + MaxItems: aws.String("PageMaxItems"), + } + resp, err := svc.ListChangeBatchesByHostedZone(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListChangeBatchesByRRSet() { + svc := route53.New(session.New()) + + params := &route53.ListChangeBatchesByRRSetInput{ + EndDate: aws.String("Date"), // Required + HostedZoneId: aws.String("ResourceId"), // Required + Name: aws.String("DNSName"), // Required + StartDate: aws.String("Date"), // Required + Type: aws.String("RRType"), // Required + Marker: aws.String("PageMarker"), + MaxItems: aws.String("PageMaxItems"), + SetIdentifier: aws.String("ResourceRecordSetIdentifier"), + } + resp, err := svc.ListChangeBatchesByRRSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListGeoLocations() { + svc := route53.New(session.New()) + + params := &route53.ListGeoLocationsInput{ + MaxItems: aws.String("PageMaxItems"), + StartContinentCode: aws.String("GeoLocationContinentCode"), + StartCountryCode: aws.String("GeoLocationCountryCode"), + StartSubdivisionCode: aws.String("GeoLocationSubdivisionCode"), + } + resp, err := svc.ListGeoLocations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListHealthChecks() { + svc := route53.New(session.New()) + + params := &route53.ListHealthChecksInput{ + Marker: aws.String("PageMarker"), + MaxItems: aws.String("PageMaxItems"), + } + resp, err := svc.ListHealthChecks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListHostedZones() { + svc := route53.New(session.New()) + + params := &route53.ListHostedZonesInput{ + DelegationSetId: aws.String("ResourceId"), + Marker: aws.String("PageMarker"), + MaxItems: aws.String("PageMaxItems"), + } + resp, err := svc.ListHostedZones(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListHostedZonesByName() { + svc := route53.New(session.New()) + + params := &route53.ListHostedZonesByNameInput{ + DNSName: aws.String("DNSName"), + HostedZoneId: aws.String("ResourceId"), + MaxItems: aws.String("PageMaxItems"), + } + resp, err := svc.ListHostedZonesByName(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListResourceRecordSets() { + svc := route53.New(session.New()) + + params := &route53.ListResourceRecordSetsInput{ + HostedZoneId: aws.String("ResourceId"), // Required + MaxItems: aws.String("PageMaxItems"), + StartRecordIdentifier: aws.String("ResourceRecordSetIdentifier"), + StartRecordName: aws.String("DNSName"), + StartRecordType: aws.String("RRType"), + } + resp, err := svc.ListResourceRecordSets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListReusableDelegationSets() { + svc := route53.New(session.New()) + + params := &route53.ListReusableDelegationSetsInput{ + Marker: aws.String("PageMarker"), + MaxItems: aws.String("PageMaxItems"), + } + resp, err := svc.ListReusableDelegationSets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListTagsForResource() { + svc := route53.New(session.New()) + + params := &route53.ListTagsForResourceInput{ + ResourceId: aws.String("TagResourceId"), // Required + ResourceType: aws.String("TagResourceType"), // Required + } + resp, err := svc.ListTagsForResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListTagsForResources() { + svc := route53.New(session.New()) + + params := &route53.ListTagsForResourcesInput{ + ResourceIds: []*string{ // Required + aws.String("TagResourceId"), // Required + // More values... + }, + ResourceType: aws.String("TagResourceType"), // Required + } + resp, err := svc.ListTagsForResources(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListTrafficPolicies() { + svc := route53.New(session.New()) + + params := &route53.ListTrafficPoliciesInput{ + MaxItems: aws.String("PageMaxItems"), + TrafficPolicyIdMarker: aws.String("TrafficPolicyId"), + } + resp, err := svc.ListTrafficPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListTrafficPolicyInstances() { + svc := route53.New(session.New()) + + params := &route53.ListTrafficPolicyInstancesInput{ + HostedZoneIdMarker: aws.String("ResourceId"), + MaxItems: aws.String("PageMaxItems"), + TrafficPolicyInstanceNameMarker: aws.String("DNSName"), + TrafficPolicyInstanceTypeMarker: aws.String("RRType"), + } + resp, err := svc.ListTrafficPolicyInstances(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListTrafficPolicyInstancesByHostedZone() { + svc := route53.New(session.New()) + + params := &route53.ListTrafficPolicyInstancesByHostedZoneInput{ + HostedZoneId: aws.String("ResourceId"), // Required + MaxItems: aws.String("PageMaxItems"), + TrafficPolicyInstanceNameMarker: aws.String("DNSName"), + TrafficPolicyInstanceTypeMarker: aws.String("RRType"), + } + resp, err := svc.ListTrafficPolicyInstancesByHostedZone(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListTrafficPolicyInstancesByPolicy() { + svc := route53.New(session.New()) + + params := &route53.ListTrafficPolicyInstancesByPolicyInput{ + TrafficPolicyId: aws.String("TrafficPolicyId"), // Required + TrafficPolicyVersion: aws.Int64(1), // Required + HostedZoneIdMarker: aws.String("ResourceId"), + MaxItems: aws.String("PageMaxItems"), + TrafficPolicyInstanceNameMarker: aws.String("DNSName"), + TrafficPolicyInstanceTypeMarker: aws.String("RRType"), + } + resp, err := svc.ListTrafficPolicyInstancesByPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_ListTrafficPolicyVersions() { + svc := route53.New(session.New()) + + params := &route53.ListTrafficPolicyVersionsInput{ + Id: aws.String("TrafficPolicyId"), // Required + MaxItems: aws.String("PageMaxItems"), + TrafficPolicyVersionMarker: aws.String("TrafficPolicyVersionMarker"), + } + resp, err := svc.ListTrafficPolicyVersions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_UpdateHealthCheck() { + svc := route53.New(session.New()) + + params := &route53.UpdateHealthCheckInput{ + HealthCheckId: aws.String("HealthCheckId"), // Required + ChildHealthChecks: []*string{ + aws.String("HealthCheckId"), // Required + // More values... + }, + FailureThreshold: aws.Int64(1), + FullyQualifiedDomainName: aws.String("FullyQualifiedDomainName"), + HealthCheckVersion: aws.Int64(1), + HealthThreshold: aws.Int64(1), + IPAddress: aws.String("IPAddress"), + Inverted: aws.Bool(true), + Port: aws.Int64(1), + ResourcePath: aws.String("ResourcePath"), + SearchString: aws.String("SearchString"), + } + resp, err := svc.UpdateHealthCheck(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_UpdateHostedZoneComment() { + svc := route53.New(session.New()) + + params := &route53.UpdateHostedZoneCommentInput{ + Id: aws.String("ResourceId"), // Required + Comment: aws.String("ResourceDescription"), + } + resp, err := svc.UpdateHostedZoneComment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_UpdateTrafficPolicyComment() { + svc := route53.New(session.New()) + + params := &route53.UpdateTrafficPolicyCommentInput{ + Comment: aws.String("TrafficPolicyComment"), // Required + Id: aws.String("TrafficPolicyId"), // Required + Version: aws.Int64(1), // Required + } + resp, err := svc.UpdateTrafficPolicyComment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53_UpdateTrafficPolicyInstance() { + svc := route53.New(session.New()) + + params := &route53.UpdateTrafficPolicyInstanceInput{ + Id: aws.String("TrafficPolicyInstanceId"), // Required + TTL: aws.Int64(1), // Required + TrafficPolicyId: aws.String("TrafficPolicyId"), // Required + TrafficPolicyVersion: aws.Int64(1), // Required + } + resp, err := svc.UpdateTrafficPolicyInstance(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53/route53iface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53/route53iface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53/route53iface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53/route53iface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,212 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package route53iface provides an interface for the Amazon Route 53. +package route53iface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/route53" +) + +// Route53API is the interface type for route53.Route53. +type Route53API interface { + AssociateVPCWithHostedZoneRequest(*route53.AssociateVPCWithHostedZoneInput) (*request.Request, *route53.AssociateVPCWithHostedZoneOutput) + + AssociateVPCWithHostedZone(*route53.AssociateVPCWithHostedZoneInput) (*route53.AssociateVPCWithHostedZoneOutput, error) + + ChangeResourceRecordSetsRequest(*route53.ChangeResourceRecordSetsInput) (*request.Request, *route53.ChangeResourceRecordSetsOutput) + + ChangeResourceRecordSets(*route53.ChangeResourceRecordSetsInput) (*route53.ChangeResourceRecordSetsOutput, error) + + ChangeTagsForResourceRequest(*route53.ChangeTagsForResourceInput) (*request.Request, *route53.ChangeTagsForResourceOutput) + + ChangeTagsForResource(*route53.ChangeTagsForResourceInput) (*route53.ChangeTagsForResourceOutput, error) + + CreateHealthCheckRequest(*route53.CreateHealthCheckInput) (*request.Request, *route53.CreateHealthCheckOutput) + + CreateHealthCheck(*route53.CreateHealthCheckInput) (*route53.CreateHealthCheckOutput, error) + + CreateHostedZoneRequest(*route53.CreateHostedZoneInput) (*request.Request, *route53.CreateHostedZoneOutput) + + CreateHostedZone(*route53.CreateHostedZoneInput) (*route53.CreateHostedZoneOutput, error) + + CreateReusableDelegationSetRequest(*route53.CreateReusableDelegationSetInput) (*request.Request, *route53.CreateReusableDelegationSetOutput) + + CreateReusableDelegationSet(*route53.CreateReusableDelegationSetInput) (*route53.CreateReusableDelegationSetOutput, error) + + CreateTrafficPolicyRequest(*route53.CreateTrafficPolicyInput) (*request.Request, *route53.CreateTrafficPolicyOutput) + + CreateTrafficPolicy(*route53.CreateTrafficPolicyInput) (*route53.CreateTrafficPolicyOutput, error) + + CreateTrafficPolicyInstanceRequest(*route53.CreateTrafficPolicyInstanceInput) (*request.Request, *route53.CreateTrafficPolicyInstanceOutput) + + CreateTrafficPolicyInstance(*route53.CreateTrafficPolicyInstanceInput) (*route53.CreateTrafficPolicyInstanceOutput, error) + + CreateTrafficPolicyVersionRequest(*route53.CreateTrafficPolicyVersionInput) (*request.Request, *route53.CreateTrafficPolicyVersionOutput) + + CreateTrafficPolicyVersion(*route53.CreateTrafficPolicyVersionInput) (*route53.CreateTrafficPolicyVersionOutput, error) + + DeleteHealthCheckRequest(*route53.DeleteHealthCheckInput) (*request.Request, *route53.DeleteHealthCheckOutput) + + DeleteHealthCheck(*route53.DeleteHealthCheckInput) (*route53.DeleteHealthCheckOutput, error) + + DeleteHostedZoneRequest(*route53.DeleteHostedZoneInput) (*request.Request, *route53.DeleteHostedZoneOutput) + + DeleteHostedZone(*route53.DeleteHostedZoneInput) (*route53.DeleteHostedZoneOutput, error) + + DeleteReusableDelegationSetRequest(*route53.DeleteReusableDelegationSetInput) (*request.Request, *route53.DeleteReusableDelegationSetOutput) + + DeleteReusableDelegationSet(*route53.DeleteReusableDelegationSetInput) (*route53.DeleteReusableDelegationSetOutput, error) + + DeleteTrafficPolicyRequest(*route53.DeleteTrafficPolicyInput) (*request.Request, *route53.DeleteTrafficPolicyOutput) + + DeleteTrafficPolicy(*route53.DeleteTrafficPolicyInput) (*route53.DeleteTrafficPolicyOutput, error) + + DeleteTrafficPolicyInstanceRequest(*route53.DeleteTrafficPolicyInstanceInput) (*request.Request, *route53.DeleteTrafficPolicyInstanceOutput) + + DeleteTrafficPolicyInstance(*route53.DeleteTrafficPolicyInstanceInput) (*route53.DeleteTrafficPolicyInstanceOutput, error) + + DisassociateVPCFromHostedZoneRequest(*route53.DisassociateVPCFromHostedZoneInput) (*request.Request, *route53.DisassociateVPCFromHostedZoneOutput) + + DisassociateVPCFromHostedZone(*route53.DisassociateVPCFromHostedZoneInput) (*route53.DisassociateVPCFromHostedZoneOutput, error) + + GetChangeRequest(*route53.GetChangeInput) (*request.Request, *route53.GetChangeOutput) + + GetChange(*route53.GetChangeInput) (*route53.GetChangeOutput, error) + + GetChangeDetailsRequest(*route53.GetChangeDetailsInput) (*request.Request, *route53.GetChangeDetailsOutput) + + GetChangeDetails(*route53.GetChangeDetailsInput) (*route53.GetChangeDetailsOutput, error) + + GetCheckerIpRangesRequest(*route53.GetCheckerIpRangesInput) (*request.Request, *route53.GetCheckerIpRangesOutput) + + GetCheckerIpRanges(*route53.GetCheckerIpRangesInput) (*route53.GetCheckerIpRangesOutput, error) + + GetGeoLocationRequest(*route53.GetGeoLocationInput) (*request.Request, *route53.GetGeoLocationOutput) + + GetGeoLocation(*route53.GetGeoLocationInput) (*route53.GetGeoLocationOutput, error) + + GetHealthCheckRequest(*route53.GetHealthCheckInput) (*request.Request, *route53.GetHealthCheckOutput) + + GetHealthCheck(*route53.GetHealthCheckInput) (*route53.GetHealthCheckOutput, error) + + GetHealthCheckCountRequest(*route53.GetHealthCheckCountInput) (*request.Request, *route53.GetHealthCheckCountOutput) + + GetHealthCheckCount(*route53.GetHealthCheckCountInput) (*route53.GetHealthCheckCountOutput, error) + + GetHealthCheckLastFailureReasonRequest(*route53.GetHealthCheckLastFailureReasonInput) (*request.Request, *route53.GetHealthCheckLastFailureReasonOutput) + + GetHealthCheckLastFailureReason(*route53.GetHealthCheckLastFailureReasonInput) (*route53.GetHealthCheckLastFailureReasonOutput, error) + + GetHealthCheckStatusRequest(*route53.GetHealthCheckStatusInput) (*request.Request, *route53.GetHealthCheckStatusOutput) + + GetHealthCheckStatus(*route53.GetHealthCheckStatusInput) (*route53.GetHealthCheckStatusOutput, error) + + GetHostedZoneRequest(*route53.GetHostedZoneInput) (*request.Request, *route53.GetHostedZoneOutput) + + GetHostedZone(*route53.GetHostedZoneInput) (*route53.GetHostedZoneOutput, error) + + GetHostedZoneCountRequest(*route53.GetHostedZoneCountInput) (*request.Request, *route53.GetHostedZoneCountOutput) + + GetHostedZoneCount(*route53.GetHostedZoneCountInput) (*route53.GetHostedZoneCountOutput, error) + + GetReusableDelegationSetRequest(*route53.GetReusableDelegationSetInput) (*request.Request, *route53.GetReusableDelegationSetOutput) + + GetReusableDelegationSet(*route53.GetReusableDelegationSetInput) (*route53.GetReusableDelegationSetOutput, error) + + GetTrafficPolicyRequest(*route53.GetTrafficPolicyInput) (*request.Request, *route53.GetTrafficPolicyOutput) + + GetTrafficPolicy(*route53.GetTrafficPolicyInput) (*route53.GetTrafficPolicyOutput, error) + + GetTrafficPolicyInstanceRequest(*route53.GetTrafficPolicyInstanceInput) (*request.Request, *route53.GetTrafficPolicyInstanceOutput) + + GetTrafficPolicyInstance(*route53.GetTrafficPolicyInstanceInput) (*route53.GetTrafficPolicyInstanceOutput, error) + + GetTrafficPolicyInstanceCountRequest(*route53.GetTrafficPolicyInstanceCountInput) (*request.Request, *route53.GetTrafficPolicyInstanceCountOutput) + + GetTrafficPolicyInstanceCount(*route53.GetTrafficPolicyInstanceCountInput) (*route53.GetTrafficPolicyInstanceCountOutput, error) + + ListChangeBatchesByHostedZoneRequest(*route53.ListChangeBatchesByHostedZoneInput) (*request.Request, *route53.ListChangeBatchesByHostedZoneOutput) + + ListChangeBatchesByHostedZone(*route53.ListChangeBatchesByHostedZoneInput) (*route53.ListChangeBatchesByHostedZoneOutput, error) + + ListChangeBatchesByRRSetRequest(*route53.ListChangeBatchesByRRSetInput) (*request.Request, *route53.ListChangeBatchesByRRSetOutput) + + ListChangeBatchesByRRSet(*route53.ListChangeBatchesByRRSetInput) (*route53.ListChangeBatchesByRRSetOutput, error) + + ListGeoLocationsRequest(*route53.ListGeoLocationsInput) (*request.Request, *route53.ListGeoLocationsOutput) + + ListGeoLocations(*route53.ListGeoLocationsInput) (*route53.ListGeoLocationsOutput, error) + + ListHealthChecksRequest(*route53.ListHealthChecksInput) (*request.Request, *route53.ListHealthChecksOutput) + + ListHealthChecks(*route53.ListHealthChecksInput) (*route53.ListHealthChecksOutput, error) + + ListHealthChecksPages(*route53.ListHealthChecksInput, func(*route53.ListHealthChecksOutput, bool) bool) error + + ListHostedZonesRequest(*route53.ListHostedZonesInput) (*request.Request, *route53.ListHostedZonesOutput) + + ListHostedZones(*route53.ListHostedZonesInput) (*route53.ListHostedZonesOutput, error) + + ListHostedZonesPages(*route53.ListHostedZonesInput, func(*route53.ListHostedZonesOutput, bool) bool) error + + ListHostedZonesByNameRequest(*route53.ListHostedZonesByNameInput) (*request.Request, *route53.ListHostedZonesByNameOutput) + + ListHostedZonesByName(*route53.ListHostedZonesByNameInput) (*route53.ListHostedZonesByNameOutput, error) + + ListResourceRecordSetsRequest(*route53.ListResourceRecordSetsInput) (*request.Request, *route53.ListResourceRecordSetsOutput) + + ListResourceRecordSets(*route53.ListResourceRecordSetsInput) (*route53.ListResourceRecordSetsOutput, error) + + ListResourceRecordSetsPages(*route53.ListResourceRecordSetsInput, func(*route53.ListResourceRecordSetsOutput, bool) bool) error + + ListReusableDelegationSetsRequest(*route53.ListReusableDelegationSetsInput) (*request.Request, *route53.ListReusableDelegationSetsOutput) + + ListReusableDelegationSets(*route53.ListReusableDelegationSetsInput) (*route53.ListReusableDelegationSetsOutput, error) + + ListTagsForResourceRequest(*route53.ListTagsForResourceInput) (*request.Request, *route53.ListTagsForResourceOutput) + + ListTagsForResource(*route53.ListTagsForResourceInput) (*route53.ListTagsForResourceOutput, error) + + ListTagsForResourcesRequest(*route53.ListTagsForResourcesInput) (*request.Request, *route53.ListTagsForResourcesOutput) + + ListTagsForResources(*route53.ListTagsForResourcesInput) (*route53.ListTagsForResourcesOutput, error) + + ListTrafficPoliciesRequest(*route53.ListTrafficPoliciesInput) (*request.Request, *route53.ListTrafficPoliciesOutput) + + ListTrafficPolicies(*route53.ListTrafficPoliciesInput) (*route53.ListTrafficPoliciesOutput, error) + + ListTrafficPolicyInstancesRequest(*route53.ListTrafficPolicyInstancesInput) (*request.Request, *route53.ListTrafficPolicyInstancesOutput) + + ListTrafficPolicyInstances(*route53.ListTrafficPolicyInstancesInput) (*route53.ListTrafficPolicyInstancesOutput, error) + + ListTrafficPolicyInstancesByHostedZoneRequest(*route53.ListTrafficPolicyInstancesByHostedZoneInput) (*request.Request, *route53.ListTrafficPolicyInstancesByHostedZoneOutput) + + ListTrafficPolicyInstancesByHostedZone(*route53.ListTrafficPolicyInstancesByHostedZoneInput) (*route53.ListTrafficPolicyInstancesByHostedZoneOutput, error) + + ListTrafficPolicyInstancesByPolicyRequest(*route53.ListTrafficPolicyInstancesByPolicyInput) (*request.Request, *route53.ListTrafficPolicyInstancesByPolicyOutput) + + ListTrafficPolicyInstancesByPolicy(*route53.ListTrafficPolicyInstancesByPolicyInput) (*route53.ListTrafficPolicyInstancesByPolicyOutput, error) + + ListTrafficPolicyVersionsRequest(*route53.ListTrafficPolicyVersionsInput) (*request.Request, *route53.ListTrafficPolicyVersionsOutput) + + ListTrafficPolicyVersions(*route53.ListTrafficPolicyVersionsInput) (*route53.ListTrafficPolicyVersionsOutput, error) + + UpdateHealthCheckRequest(*route53.UpdateHealthCheckInput) (*request.Request, *route53.UpdateHealthCheckOutput) + + UpdateHealthCheck(*route53.UpdateHealthCheckInput) (*route53.UpdateHealthCheckOutput, error) + + UpdateHostedZoneCommentRequest(*route53.UpdateHostedZoneCommentInput) (*request.Request, *route53.UpdateHostedZoneCommentOutput) + + UpdateHostedZoneComment(*route53.UpdateHostedZoneCommentInput) (*route53.UpdateHostedZoneCommentOutput, error) + + UpdateTrafficPolicyCommentRequest(*route53.UpdateTrafficPolicyCommentInput) (*request.Request, *route53.UpdateTrafficPolicyCommentOutput) + + UpdateTrafficPolicyComment(*route53.UpdateTrafficPolicyCommentInput) (*route53.UpdateTrafficPolicyCommentOutput, error) + + UpdateTrafficPolicyInstanceRequest(*route53.UpdateTrafficPolicyInstanceInput) (*request.Request, *route53.UpdateTrafficPolicyInstanceOutput) + + UpdateTrafficPolicyInstance(*route53.UpdateTrafficPolicyInstanceInput) (*route53.UpdateTrafficPolicyInstanceOutput, error) +} + +var _ Route53API = (*route53.Route53)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,86 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package route53 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restxml" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Route53 is a client for Route 53. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type Route53 struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "route53" + +// New creates a new instance of the Route53 client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Route53 client from just a session. +// svc := route53.New(mySession) +// +// // Create a Route53 client with additional configuration +// svc := route53.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Route53 { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *Route53 { + svc := &Route53{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2013-04-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Route53 operation and runs any +// custom request initialization. +func (c *Route53) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53domains/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53domains/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53domains/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53domains/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,3040 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package route53domains provides a client for Amazon Route 53 Domains. +package route53domains + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opCheckDomainAvailability = "CheckDomainAvailability" + +// CheckDomainAvailabilityRequest generates a request for the CheckDomainAvailability operation. +func (c *Route53Domains) CheckDomainAvailabilityRequest(input *CheckDomainAvailabilityInput) (req *request.Request, output *CheckDomainAvailabilityOutput) { + op := &request.Operation{ + Name: opCheckDomainAvailability, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CheckDomainAvailabilityInput{} + } + + req = c.newRequest(op, input, output) + output = &CheckDomainAvailabilityOutput{} + req.Data = output + return +} + +// This operation checks the availability of one domain name. You can access +// this API without authenticating. Note that if the availability status of +// a domain is pending, you must submit another request to determine the availability +// of the domain name. +func (c *Route53Domains) CheckDomainAvailability(input *CheckDomainAvailabilityInput) (*CheckDomainAvailabilityOutput, error) { + req, out := c.CheckDomainAvailabilityRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTagsForDomain = "DeleteTagsForDomain" + +// DeleteTagsForDomainRequest generates a request for the DeleteTagsForDomain operation. +func (c *Route53Domains) DeleteTagsForDomainRequest(input *DeleteTagsForDomainInput) (req *request.Request, output *DeleteTagsForDomainOutput) { + op := &request.Operation{ + Name: opDeleteTagsForDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTagsForDomainInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTagsForDomainOutput{} + req.Data = output + return +} + +// This operation deletes the specified tags for a domain. +// +// All tag operations are eventually consistent; subsequent operations may +// not immediately represent all issued operations. +func (c *Route53Domains) DeleteTagsForDomain(input *DeleteTagsForDomainInput) (*DeleteTagsForDomainOutput, error) { + req, out := c.DeleteTagsForDomainRequest(input) + err := req.Send() + return out, err +} + +const opDisableDomainAutoRenew = "DisableDomainAutoRenew" + +// DisableDomainAutoRenewRequest generates a request for the DisableDomainAutoRenew operation. +func (c *Route53Domains) DisableDomainAutoRenewRequest(input *DisableDomainAutoRenewInput) (req *request.Request, output *DisableDomainAutoRenewOutput) { + op := &request.Operation{ + Name: opDisableDomainAutoRenew, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableDomainAutoRenewInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableDomainAutoRenewOutput{} + req.Data = output + return +} + +// This operation disables automatic renewal of domain registration for the +// specified domain. +// +// Caution! Amazon Route 53 doesn't have a manual renewal process, so if you +// disable automatic renewal, registration for the domain will not be renewed +// when the expiration date passes, and you will lose control of the domain +// name. +func (c *Route53Domains) DisableDomainAutoRenew(input *DisableDomainAutoRenewInput) (*DisableDomainAutoRenewOutput, error) { + req, out := c.DisableDomainAutoRenewRequest(input) + err := req.Send() + return out, err +} + +const opDisableDomainTransferLock = "DisableDomainTransferLock" + +// DisableDomainTransferLockRequest generates a request for the DisableDomainTransferLock operation. +func (c *Route53Domains) DisableDomainTransferLockRequest(input *DisableDomainTransferLockInput) (req *request.Request, output *DisableDomainTransferLockOutput) { + op := &request.Operation{ + Name: opDisableDomainTransferLock, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableDomainTransferLockInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableDomainTransferLockOutput{} + req.Data = output + return +} + +// This operation removes the transfer lock on the domain (specifically the +// clientTransferProhibited status) to allow domain transfers. We recommend +// you refrain from performing this action unless you intend to transfer the +// domain to a different registrar. Successful submission returns an operation +// ID that you can use to track the progress and completion of the action. If +// the request is not completed successfully, the domain registrant will be +// notified by email. +func (c *Route53Domains) DisableDomainTransferLock(input *DisableDomainTransferLockInput) (*DisableDomainTransferLockOutput, error) { + req, out := c.DisableDomainTransferLockRequest(input) + err := req.Send() + return out, err +} + +const opEnableDomainAutoRenew = "EnableDomainAutoRenew" + +// EnableDomainAutoRenewRequest generates a request for the EnableDomainAutoRenew operation. +func (c *Route53Domains) EnableDomainAutoRenewRequest(input *EnableDomainAutoRenewInput) (req *request.Request, output *EnableDomainAutoRenewOutput) { + op := &request.Operation{ + Name: opEnableDomainAutoRenew, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableDomainAutoRenewInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableDomainAutoRenewOutput{} + req.Data = output + return +} + +// This operation configures Amazon Route 53 to automatically renew the specified +// domain before the domain registration expires. The cost of renewing your +// domain registration is billed to your AWS account. +// +// The period during which you can renew a domain name varies by TLD. For a +// list of TLDs and their renewal policies, see "Renewal, restoration, and deletion +// times" (http://wiki.gandi.net/en/domains/renew#renewal_restoration_and_deletion_times) +// on the website for our registrar partner, Gandi. Route 53 requires that you +// renew before the end of the renewal period that is listed on the Gandi website +// so we can complete processing before the deadline. +func (c *Route53Domains) EnableDomainAutoRenew(input *EnableDomainAutoRenewInput) (*EnableDomainAutoRenewOutput, error) { + req, out := c.EnableDomainAutoRenewRequest(input) + err := req.Send() + return out, err +} + +const opEnableDomainTransferLock = "EnableDomainTransferLock" + +// EnableDomainTransferLockRequest generates a request for the EnableDomainTransferLock operation. +func (c *Route53Domains) EnableDomainTransferLockRequest(input *EnableDomainTransferLockInput) (req *request.Request, output *EnableDomainTransferLockOutput) { + op := &request.Operation{ + Name: opEnableDomainTransferLock, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableDomainTransferLockInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableDomainTransferLockOutput{} + req.Data = output + return +} + +// This operation sets the transfer lock on the domain (specifically the clientTransferProhibited +// status) to prevent domain transfers. Successful submission returns an operation +// ID that you can use to track the progress and completion of the action. If +// the request is not completed successfully, the domain registrant will be +// notified by email. +func (c *Route53Domains) EnableDomainTransferLock(input *EnableDomainTransferLockInput) (*EnableDomainTransferLockOutput, error) { + req, out := c.EnableDomainTransferLockRequest(input) + err := req.Send() + return out, err +} + +const opGetDomainDetail = "GetDomainDetail" + +// GetDomainDetailRequest generates a request for the GetDomainDetail operation. +func (c *Route53Domains) GetDomainDetailRequest(input *GetDomainDetailInput) (req *request.Request, output *GetDomainDetailOutput) { + op := &request.Operation{ + Name: opGetDomainDetail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDomainDetailInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDomainDetailOutput{} + req.Data = output + return +} + +// This operation returns detailed information about the domain. The domain's +// contact information is also returned as part of the output. +func (c *Route53Domains) GetDomainDetail(input *GetDomainDetailInput) (*GetDomainDetailOutput, error) { + req, out := c.GetDomainDetailRequest(input) + err := req.Send() + return out, err +} + +const opGetOperationDetail = "GetOperationDetail" + +// GetOperationDetailRequest generates a request for the GetOperationDetail operation. +func (c *Route53Domains) GetOperationDetailRequest(input *GetOperationDetailInput) (req *request.Request, output *GetOperationDetailOutput) { + op := &request.Operation{ + Name: opGetOperationDetail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetOperationDetailInput{} + } + + req = c.newRequest(op, input, output) + output = &GetOperationDetailOutput{} + req.Data = output + return +} + +// This operation returns the current status of an operation that is not completed. +func (c *Route53Domains) GetOperationDetail(input *GetOperationDetailInput) (*GetOperationDetailOutput, error) { + req, out := c.GetOperationDetailRequest(input) + err := req.Send() + return out, err +} + +const opListDomains = "ListDomains" + +// ListDomainsRequest generates a request for the ListDomains operation. +func (c *Route53Domains) ListDomainsRequest(input *ListDomainsInput) (req *request.Request, output *ListDomainsOutput) { + op := &request.Operation{ + Name: opListDomains, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextPageMarker"}, + LimitToken: "MaxItems", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDomainsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDomainsOutput{} + req.Data = output + return +} + +// This operation returns all the domain names registered with Amazon Route +// 53 for the current AWS account. +func (c *Route53Domains) ListDomains(input *ListDomainsInput) (*ListDomainsOutput, error) { + req, out := c.ListDomainsRequest(input) + err := req.Send() + return out, err +} + +func (c *Route53Domains) ListDomainsPages(input *ListDomainsInput, fn func(p *ListDomainsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListDomainsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListDomainsOutput), lastPage) + }) +} + +const opListOperations = "ListOperations" + +// ListOperationsRequest generates a request for the ListOperations operation. +func (c *Route53Domains) ListOperationsRequest(input *ListOperationsInput) (req *request.Request, output *ListOperationsOutput) { + op := &request.Operation{ + Name: opListOperations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextPageMarker"}, + LimitToken: "MaxItems", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListOperationsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListOperationsOutput{} + req.Data = output + return +} + +// This operation returns the operation IDs of operations that are not yet complete. +func (c *Route53Domains) ListOperations(input *ListOperationsInput) (*ListOperationsOutput, error) { + req, out := c.ListOperationsRequest(input) + err := req.Send() + return out, err +} + +func (c *Route53Domains) ListOperationsPages(input *ListOperationsInput, fn func(p *ListOperationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListOperationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListOperationsOutput), lastPage) + }) +} + +const opListTagsForDomain = "ListTagsForDomain" + +// ListTagsForDomainRequest generates a request for the ListTagsForDomain operation. +func (c *Route53Domains) ListTagsForDomainRequest(input *ListTagsForDomainInput) (req *request.Request, output *ListTagsForDomainOutput) { + op := &request.Operation{ + Name: opListTagsForDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForDomainInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForDomainOutput{} + req.Data = output + return +} + +// This operation returns all of the tags that are associated with the specified +// domain. +// +// All tag operations are eventually consistent; subsequent operations may +// not immediately represent all issued operations. +func (c *Route53Domains) ListTagsForDomain(input *ListTagsForDomainInput) (*ListTagsForDomainOutput, error) { + req, out := c.ListTagsForDomainRequest(input) + err := req.Send() + return out, err +} + +const opRegisterDomain = "RegisterDomain" + +// RegisterDomainRequest generates a request for the RegisterDomain operation. +func (c *Route53Domains) RegisterDomainRequest(input *RegisterDomainInput) (req *request.Request, output *RegisterDomainOutput) { + op := &request.Operation{ + Name: opRegisterDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterDomainInput{} + } + + req = c.newRequest(op, input, output) + output = &RegisterDomainOutput{} + req.Data = output + return +} + +// This operation registers a domain. Domains are registered by the AWS registrar +// partner, Gandi. For some top-level domains (TLDs), this operation requires +// extra parameters. +// +// When you register a domain, Amazon Route 53 does the following: +// +// Creates a Amazon Route 53 hosted zone that has the same name as the domain. +// Amazon Route 53 assigns four name servers to your hosted zone and automatically +// updates your domain registration with the names of these name servers. Enables +// autorenew, so your domain registration will renew automatically each year. +// We'll notify you in advance of the renewal date so you can choose whether +// to renew the registration. Optionally enables privacy protection, so WHOIS +// queries return contact information for our registrar partner, Gandi, instead +// of the information you entered for registrant, admin, and tech contacts. +// If registration is successful, returns an operation ID that you can use to +// track the progress and completion of the action. If the request is not completed +// successfully, the domain registrant is notified by email. Charges your AWS +// account an amount based on the top-level domain. For more information, see +// Amazon Route 53 Pricing (http://aws.amazon.com/route53/pricing/). +func (c *Route53Domains) RegisterDomain(input *RegisterDomainInput) (*RegisterDomainOutput, error) { + req, out := c.RegisterDomainRequest(input) + err := req.Send() + return out, err +} + +const opRetrieveDomainAuthCode = "RetrieveDomainAuthCode" + +// RetrieveDomainAuthCodeRequest generates a request for the RetrieveDomainAuthCode operation. +func (c *Route53Domains) RetrieveDomainAuthCodeRequest(input *RetrieveDomainAuthCodeInput) (req *request.Request, output *RetrieveDomainAuthCodeOutput) { + op := &request.Operation{ + Name: opRetrieveDomainAuthCode, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RetrieveDomainAuthCodeInput{} + } + + req = c.newRequest(op, input, output) + output = &RetrieveDomainAuthCodeOutput{} + req.Data = output + return +} + +// This operation returns the AuthCode for the domain. To transfer a domain +// to another registrar, you provide this value to the new registrar. +func (c *Route53Domains) RetrieveDomainAuthCode(input *RetrieveDomainAuthCodeInput) (*RetrieveDomainAuthCodeOutput, error) { + req, out := c.RetrieveDomainAuthCodeRequest(input) + err := req.Send() + return out, err +} + +const opTransferDomain = "TransferDomain" + +// TransferDomainRequest generates a request for the TransferDomain operation. +func (c *Route53Domains) TransferDomainRequest(input *TransferDomainInput) (req *request.Request, output *TransferDomainOutput) { + op := &request.Operation{ + Name: opTransferDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TransferDomainInput{} + } + + req = c.newRequest(op, input, output) + output = &TransferDomainOutput{} + req.Data = output + return +} + +// This operation transfers a domain from another registrar to Amazon Route +// 53. When the transfer is complete, the domain is registered with the AWS +// registrar partner, Gandi. +// +// For transfer requirements, a detailed procedure, and information about viewing +// the status of a domain transfer, see Transferring Registration for a Domain +// to Amazon Route 53 (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/domain-transfer-to-route-53.html) +// in the Amazon Route 53 Developer Guide. +// +// If the registrar for your domain is also the DNS service provider for the +// domain, we highly recommend that you consider transferring your DNS service +// to Amazon Route 53 or to another DNS service provider before you transfer +// your registration. Some registrars provide free DNS service when you purchase +// a domain registration. When you transfer the registration, the previous registrar +// will not renew your domain registration and could end your DNS service at +// any time. +// +// Caution! If the registrar for your domain is also the DNS service provider +// for the domain and you don't transfer DNS service to another provider, your +// website, email, and the web applications associated with the domain might +// become unavailable. If the transfer is successful, this method returns an +// operation ID that you can use to track the progress and completion of the +// action. If the transfer doesn't complete successfully, the domain registrant +// will be notified by email. +func (c *Route53Domains) TransferDomain(input *TransferDomainInput) (*TransferDomainOutput, error) { + req, out := c.TransferDomainRequest(input) + err := req.Send() + return out, err +} + +const opUpdateDomainContact = "UpdateDomainContact" + +// UpdateDomainContactRequest generates a request for the UpdateDomainContact operation. +func (c *Route53Domains) UpdateDomainContactRequest(input *UpdateDomainContactInput) (req *request.Request, output *UpdateDomainContactOutput) { + op := &request.Operation{ + Name: opUpdateDomainContact, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDomainContactInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateDomainContactOutput{} + req.Data = output + return +} + +// This operation updates the contact information for a particular domain. Information +// for at least one contact (registrant, administrator, or technical) must be +// supplied for update. +// +// If the update is successful, this method returns an operation ID that you +// can use to track the progress and completion of the action. If the request +// is not completed successfully, the domain registrant will be notified by +// email. +func (c *Route53Domains) UpdateDomainContact(input *UpdateDomainContactInput) (*UpdateDomainContactOutput, error) { + req, out := c.UpdateDomainContactRequest(input) + err := req.Send() + return out, err +} + +const opUpdateDomainContactPrivacy = "UpdateDomainContactPrivacy" + +// UpdateDomainContactPrivacyRequest generates a request for the UpdateDomainContactPrivacy operation. +func (c *Route53Domains) UpdateDomainContactPrivacyRequest(input *UpdateDomainContactPrivacyInput) (req *request.Request, output *UpdateDomainContactPrivacyOutput) { + op := &request.Operation{ + Name: opUpdateDomainContactPrivacy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDomainContactPrivacyInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateDomainContactPrivacyOutput{} + req.Data = output + return +} + +// This operation updates the specified domain contact's privacy setting. When +// the privacy option is enabled, personal information such as postal or email +// address is hidden from the results of a public WHOIS query. The privacy services +// are provided by the AWS registrar, Gandi. For more information, see the Gandi +// privacy features (http://www.gandi.net/domain/whois/?currency=USD&lang=en). +// +// This operation only affects the privacy of the specified contact type (registrant, +// administrator, or tech). Successful acceptance returns an operation ID that +// you can use with GetOperationDetail to track the progress and completion +// of the action. If the request is not completed successfully, the domain registrant +// will be notified by email. +func (c *Route53Domains) UpdateDomainContactPrivacy(input *UpdateDomainContactPrivacyInput) (*UpdateDomainContactPrivacyOutput, error) { + req, out := c.UpdateDomainContactPrivacyRequest(input) + err := req.Send() + return out, err +} + +const opUpdateDomainNameservers = "UpdateDomainNameservers" + +// UpdateDomainNameserversRequest generates a request for the UpdateDomainNameservers operation. +func (c *Route53Domains) UpdateDomainNameserversRequest(input *UpdateDomainNameserversInput) (req *request.Request, output *UpdateDomainNameserversOutput) { + op := &request.Operation{ + Name: opUpdateDomainNameservers, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDomainNameserversInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateDomainNameserversOutput{} + req.Data = output + return +} + +// This operation replaces the current set of name servers for the domain with +// the specified set of name servers. If you use Amazon Route 53 as your DNS +// service, specify the four name servers in the delegation set for the hosted +// zone for the domain. +// +// If successful, this operation returns an operation ID that you can use to +// track the progress and completion of the action. If the request is not completed +// successfully, the domain registrant will be notified by email. +func (c *Route53Domains) UpdateDomainNameservers(input *UpdateDomainNameserversInput) (*UpdateDomainNameserversOutput, error) { + req, out := c.UpdateDomainNameserversRequest(input) + err := req.Send() + return out, err +} + +const opUpdateTagsForDomain = "UpdateTagsForDomain" + +// UpdateTagsForDomainRequest generates a request for the UpdateTagsForDomain operation. +func (c *Route53Domains) UpdateTagsForDomainRequest(input *UpdateTagsForDomainInput) (req *request.Request, output *UpdateTagsForDomainOutput) { + op := &request.Operation{ + Name: opUpdateTagsForDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateTagsForDomainInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateTagsForDomainOutput{} + req.Data = output + return +} + +// This operation adds or updates tags for a specified domain. +// +// All tag operations are eventually consistent; subsequent operations may +// not immediately represent all issued operations. +func (c *Route53Domains) UpdateTagsForDomain(input *UpdateTagsForDomainInput) (*UpdateTagsForDomainOutput, error) { + req, out := c.UpdateTagsForDomainRequest(input) + err := req.Send() + return out, err +} + +// The CheckDomainAvailability request contains the following elements. +type CheckDomainAvailabilityInput struct { + _ struct{} `type:"structure"` + + // The name of a domain. + // + // Type: String + // + // Default: None + // + // Constraints: The domain name can contain only the letters a through z, the + // numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not + // supported. + // + // Required: Yes + DomainName *string `type:"string" required:"true"` + + // Reserved for future use. + IdnLangCode *string `type:"string"` +} + +// String returns the string representation +func (s CheckDomainAvailabilityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CheckDomainAvailabilityInput) GoString() string { + return s.String() +} + +// The CheckDomainAvailability response includes the following elements. +type CheckDomainAvailabilityOutput struct { + _ struct{} `type:"structure"` + + // Whether the domain name is available for registering. + // + // You can only register domains designated as AVAILABLE. + // + // Type: String + // + // Valid values: + // + // AVAILABLE – The domain name is available. AVAILABLE_RESERVED – The domain + // name is reserved under specific conditions. AVAILABLE_PREORDER – The domain + // name is available and can be preordered. UNAVAILABLE – The domain name is + // not available. UNAVAILABLE_PREMIUM – The domain name is not available. + // UNAVAILABLE_RESTRICTED – The domain name is forbidden. RESERVED – The domain + // name has been reserved for another person or organization. DONT_KNOW – The + // TLD registry didn't reply with a definitive answer about whether the domain + // name is available. Amazon Route 53 can return this response for a variety + // of reasons, for example, the registry is performing maintenance. Try again + // later. + Availability *string `type:"string" required:"true" enum:"DomainAvailability"` +} + +// String returns the string representation +func (s CheckDomainAvailabilityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CheckDomainAvailabilityOutput) GoString() string { + return s.String() +} + +// ContactDetail includes the following elements. +type ContactDetail struct { + _ struct{} `type:"structure"` + + // First line of the contact's address. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + // + // Parents: RegistrantContact, AdminContact, TechContact + // + // Required: Yes + AddressLine1 *string `type:"string"` + + // Second line of contact's address, if any. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + // + // Parents: RegistrantContact, AdminContact, TechContact + // + // Required: No + AddressLine2 *string `type:"string"` + + // The city of the contact's address. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + // + // Parents: RegistrantContact, AdminContact, TechContact + // + // Required: Yes + City *string `type:"string"` + + // Indicates whether the contact is a person, company, association, or public + // organization. If you choose an option other than PERSON, you must enter an + // organization name, and you can't enable privacy protection for the contact. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + // + // Valid values: PERSON | COMPANY | ASSOCIATION | PUBLIC_BODY + // + // Parents: RegistrantContact, AdminContact, TechContact + // + // Required: Yes + ContactType *string `type:"string" enum:"ContactType"` + + // Code for the country of the contact's address. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + // + // Parents: RegistrantContact, AdminContact, TechContact + // + // Required: Yes + CountryCode *string `type:"string" enum:"CountryCode"` + + // Email address of the contact. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 254 characters. + // + // Parents: RegistrantContact, AdminContact, TechContact + // + // Required: Yes + Email *string `type:"string"` + + // A list of name-value pairs for parameters required by certain top-level domains. + // + // Type: Complex + // + // Default: None + // + // Parents: RegistrantContact, AdminContact, TechContact + // + // Children: Name, Value + // + // Required: No + ExtraParams []*ExtraParam `type:"list"` + + // Fax number of the contact. + // + // Type: String + // + // Default: None + // + // Constraints: Phone number must be specified in the format "+[country dialing + // code].[number including any area code]". For example, a US phone number might + // appear as "+1.1234567890". + // + // Parents: RegistrantContact, AdminContact, TechContact + // + // Required: No + Fax *string `type:"string"` + + // First name of contact. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + // + // Parents: RegistrantContact, AdminContact, TechContact + // + // Required: Yes + FirstName *string `type:"string"` + + // Last name of contact. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + // + // Parents: RegistrantContact, AdminContact, TechContact + // + // Required: Yes + LastName *string `type:"string"` + + // Name of the organization for contact types other than PERSON. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. Contact type must not be PERSON. + // + // Parents: RegistrantContact, AdminContact, TechContact + // + // Required: No + OrganizationName *string `type:"string"` + + // The phone number of the contact. + // + // Type: String + // + // Default: None + // + // Constraints: Phone number must be specified in the format "+[country dialing + // code].[number including any area code>]". For example, a US phone number + // might appear as "+1.1234567890". + // + // Parents: RegistrantContact, AdminContact, TechContact + // + // Required: Yes + PhoneNumber *string `type:"string"` + + // The state or province of the contact's city. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + // + // Parents: RegistrantContact, AdminContact, TechContact + // + // Required: No + State *string `type:"string"` + + // The zip or postal code of the contact's address. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + // + // Parents: RegistrantContact, AdminContact, TechContact + // + // Required: No + ZipCode *string `type:"string"` +} + +// String returns the string representation +func (s ContactDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContactDetail) GoString() string { + return s.String() +} + +// The DeleteTagsForDomainRequest includes the following elements. +type DeleteTagsForDomainInput struct { + _ struct{} `type:"structure"` + + // The domain for which you want to delete one or more tags. + // + // The name of a domain. + // + // Type: String + // + // Default: None + // + // Constraints: The domain name can contain only the letters a through z, the + // numbers 0 through 9, and hyphen (-). Hyphens are allowed only when theyaposre + // surrounded by letters, numbers, or other hyphens. You canapost specify a + // hyphen at the beginning or end of a label. To specify an Internationalized + // Domain Name, you must convert the name to Punycode. + // + // Required: Yes + DomainName *string `type:"string" required:"true"` + + // A list of tag keys to delete. + // + // Type: A list that contains the keys of the tags that you want to delete. + // + // Default: None + // + // Required: No + // + // '> + TagsToDelete []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteTagsForDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsForDomainInput) GoString() string { + return s.String() +} + +type DeleteTagsForDomainOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTagsForDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsForDomainOutput) GoString() string { + return s.String() +} + +type DisableDomainAutoRenewInput struct { + _ struct{} `type:"structure"` + + DomainName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableDomainAutoRenewInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableDomainAutoRenewInput) GoString() string { + return s.String() +} + +type DisableDomainAutoRenewOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableDomainAutoRenewOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableDomainAutoRenewOutput) GoString() string { + return s.String() +} + +// The DisableDomainTransferLock request includes the following element. +type DisableDomainTransferLockInput struct { + _ struct{} `type:"structure"` + + // The name of a domain. + // + // Type: String + // + // Default: None + // + // Constraints: The domain name can contain only the letters a through z, the + // numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not + // supported. + // + // Required: Yes + DomainName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableDomainTransferLockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableDomainTransferLockInput) GoString() string { + return s.String() +} + +// The DisableDomainTransferLock response includes the following element. +type DisableDomainTransferLockOutput struct { + _ struct{} `type:"structure"` + + // Identifier for tracking the progress of the request. To use this ID to query + // the operation status, use GetOperationDetail. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + OperationId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableDomainTransferLockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableDomainTransferLockOutput) GoString() string { + return s.String() +} + +type DomainSummary struct { + _ struct{} `type:"structure"` + + // Indicates whether the domain is automatically renewed upon expiration. + // + // Type: Boolean + // + // Valid values: True | False + AutoRenew *bool `type:"boolean"` + + // The name of a domain. + // + // Type: String + DomainName *string `type:"string" required:"true"` + + // Expiration date of the domain in Coordinated Universal Time (UTC). + // + // Type: Long + Expiry *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Indicates whether a domain is locked from unauthorized transfer to another + // party. + // + // Type: Boolean + // + // Valid values: True | False + TransferLock *bool `type:"boolean"` +} + +// String returns the string representation +func (s DomainSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainSummary) GoString() string { + return s.String() +} + +type EnableDomainAutoRenewInput struct { + _ struct{} `type:"structure"` + + DomainName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableDomainAutoRenewInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableDomainAutoRenewInput) GoString() string { + return s.String() +} + +type EnableDomainAutoRenewOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableDomainAutoRenewOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableDomainAutoRenewOutput) GoString() string { + return s.String() +} + +// The EnableDomainTransferLock request includes the following element. +type EnableDomainTransferLockInput struct { + _ struct{} `type:"structure"` + + // The name of a domain. + // + // Type: String + // + // Default: None + // + // Constraints: The domain name can contain only the letters a through z, the + // numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not + // supported. + // + // Required: Yes + DomainName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableDomainTransferLockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableDomainTransferLockInput) GoString() string { + return s.String() +} + +// The EnableDomainTransferLock response includes the following elements. +type EnableDomainTransferLockOutput struct { + _ struct{} `type:"structure"` + + // Identifier for tracking the progress of the request. To use this ID to query + // the operation status, use GetOperationDetail. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + OperationId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableDomainTransferLockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableDomainTransferLockOutput) GoString() string { + return s.String() +} + +// ExtraParam includes the following elements. +type ExtraParam struct { + _ struct{} `type:"structure"` + + // Name of the additional parameter required by the top-level domain. + // + // Type: String + // + // Default: None + // + // Valid values: DUNS_NUMBER | BRAND_NUMBER | BIRTH_DEPARTMENT | BIRTH_DATE_IN_YYYY_MM_DD + // | BIRTH_COUNTRY | BIRTH_CITY | DOCUMENT_NUMBER | AU_ID_NUMBER | AU_ID_TYPE + // | CA_LEGAL_TYPE | ES_IDENTIFICATION | ES_IDENTIFICATION_TYPE | ES_LEGAL_FORM + // | FI_BUSINESS_NUMBER | FI_ID_NUMBER | IT_PIN | RU_PASSPORT_DATA | SE_ID_NUMBER + // | SG_ID_NUMBER | VAT_NUMBER + // + // Parent: ExtraParams + // + // Required: Yes + Name *string `type:"string" required:"true" enum:"ExtraParamName"` + + // Values corresponding to the additional parameter names required by some top-level + // domains. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 2048 characters. + // + // Parent: ExtraParams + // + // Required: Yes + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ExtraParam) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExtraParam) GoString() string { + return s.String() +} + +// The GetDomainDetail request includes the following element. +type GetDomainDetailInput struct { + _ struct{} `type:"structure"` + + // The name of a domain. + // + // Type: String + // + // Default: None + // + // Constraints: The domain name can contain only the letters a through z, the + // numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not + // supported. + // + // Required: Yes + DomainName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDomainDetailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDomainDetailInput) GoString() string { + return s.String() +} + +// The GetDomainDetail response includes the following elements. +type GetDomainDetailOutput struct { + _ struct{} `type:"structure"` + + // Email address to contact to report incorrect contact information for a domain, + // to report that the domain is being used to send spam, to report that someone + // is cybersquatting on a domain name, or report some other type of abuse. + // + // Type: String + AbuseContactEmail *string `type:"string"` + + // Phone number for reporting abuse. + // + // Type: String + AbuseContactPhone *string `type:"string"` + + // Provides details about the domain administrative contact. + // + // Type: Complex + // + // Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, + // AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, + // Email, Fax, ExtraParams + AdminContact *ContactDetail `type:"structure" required:"true"` + + // Specifies whether contact information for the admin contact is concealed + // from WHOIS queries. If the value is true, WHOIS ("who is") queries will return + // contact information for our registrar partner, Gandi, instead of the contact + // information that you enter. + // + // Type: Boolean + AdminPrivacy *bool `type:"boolean"` + + // Specifies whether the domain registration is set to renew automatically. + // + // Type: Boolean + AutoRenew *bool `type:"boolean"` + + // The date when the domain was created as found in the response to a WHOIS + // query. The date format is Unix time. + CreationDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Reserved for future use. + DnsSec *string `type:"string"` + + // The name of a domain. + // + // Type: String + DomainName *string `type:"string" required:"true"` + + // The date when the registration for the domain is set to expire. The date + // format is Unix time. + ExpirationDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The name of the domain. + // + // Type: String + Nameservers []*Nameserver `type:"list" required:"true"` + + // Provides details about the domain registrant. + // + // Type: Complex + // + // Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, + // AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, + // Email, Fax, ExtraParams + RegistrantContact *ContactDetail `type:"structure" required:"true"` + + // Specifies whether contact information for the registrant contact is concealed + // from WHOIS queries. If the value is true, WHOIS ("who is") queries will return + // contact information for our registrar partner, Gandi, instead of the contact + // information that you enter. + // + // Type: Boolean + RegistrantPrivacy *bool `type:"boolean"` + + // Name of the registrar of the domain as identified in the registry. Amazon + // Route 53 domains are registered by registrar Gandi. The value is "GANDI SAS". + // + // Type: String + RegistrarName *string `type:"string"` + + // Web address of the registrar. + // + // Type: String + RegistrarUrl *string `type:"string"` + + // Reserved for future use. + RegistryDomainId *string `type:"string"` + + // Reseller of the domain. Domains registered or transferred using Amazon Route + // 53 domains will have "Amazon" as the reseller. + // + // Type: String + Reseller *string `type:"string"` + + // An array of domain name status codes, also known as Extensible Provisioning + // Protocol (EPP) status codes. + // + // ICANN, the organization that maintains a central database of domain names, + // has developed a set of domain name status codes that tell you the status + // of a variety of operations on a domain name, for example, registering a domain + // name, transferring a domain name to another registrar, renewing the registration + // for a domain name, and so on. All registrars use this same set of status + // codes. + // + // For a current list of domain name status codes and an explanation of what + // each code means, go to the ICANN website (https://www.icann.org/) and search + // for epp status codes. (Search on the ICANN website; web searches sometimes + // return an old version of the document.) + // + // Type: Array of String + StatusList []*string `type:"list"` + + // Provides details about the domain technical contact. + // + // Type: Complex + // + // Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, + // AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, + // Email, Fax, ExtraParams + TechContact *ContactDetail `type:"structure" required:"true"` + + // Specifies whether contact information for the tech contact is concealed from + // WHOIS queries. If the value is true, WHOIS ("who is") queries will return + // contact information for our registrar partner, Gandi, instead of the contact + // information that you enter. + // + // Type: Boolean + TechPrivacy *bool `type:"boolean"` + + // The last updated date of the domain as found in the response to a WHOIS query. + // The date format is Unix time. + UpdatedDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The fully qualified name of the WHOIS server that can answer the WHOIS query + // for the domain. + // + // Type: String + WhoIsServer *string `type:"string"` +} + +// String returns the string representation +func (s GetDomainDetailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDomainDetailOutput) GoString() string { + return s.String() +} + +// The GetOperationDetail request includes the following element. +type GetOperationDetailInput struct { + _ struct{} `type:"structure"` + + // The identifier for the operation for which you want to get the status. Amazon + // Route 53 returned the identifier in the response to the original request. + // + // Type: String + // + // Default: None + // + // Required: Yes + OperationId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetOperationDetailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOperationDetailInput) GoString() string { + return s.String() +} + +// The GetOperationDetail response includes the following elements. +type GetOperationDetailOutput struct { + _ struct{} `type:"structure"` + + // The name of a domain. + // + // Type: String + DomainName *string `type:"string"` + + // Detailed information on the status including possible errors. + // + // Type: String + Message *string `type:"string"` + + // The identifier for the operation. + // + // Type: String + OperationId *string `type:"string"` + + // The current status of the requested operation in the system. + // + // Type: String + Status *string `type:"string" enum:"OperationStatus"` + + // The date when the request was submitted. + SubmittedDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The type of operation that was requested. + // + // Type: String + Type *string `type:"string" enum:"OperationType"` +} + +// String returns the string representation +func (s GetOperationDetailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOperationDetailOutput) GoString() string { + return s.String() +} + +// The ListDomains request includes the following elements. +type ListDomainsInput struct { + _ struct{} `type:"structure"` + + // For an initial request for a list of domains, omit this element. If the number + // of domains that are associated with the current AWS account is greater than + // the value that you specified for MaxItems, you can use Marker to return additional + // domains. Get the value of NextPageMarker from the previous response, and + // submit another request that includes the value of NextPageMarker in the Marker + // element. + // + // Type: String + // + // Default: None + // + // Constraints: The marker must match the value specified in the previous request. + // + // Required: No + Marker *string `type:"string"` + + // Number of domains to be returned. + // + // Type: Integer + // + // Default: 20 + // + // Constraints: A numeral between 1 and 100. + // + // Required: No + MaxItems *int64 `type:"integer"` +} + +// String returns the string representation +func (s ListDomainsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDomainsInput) GoString() string { + return s.String() +} + +// The ListDomains response includes the following elements. +type ListDomainsOutput struct { + _ struct{} `type:"structure"` + + // A summary of domains. + // + // Type: Complex type containing a list of domain summaries. + // + // Children: AutoRenew, DomainName, Expiry, TransferLock + Domains []*DomainSummary `type:"list" required:"true"` + + // If there are more domains than you specified for MaxItems in the request, + // submit another request and include the value of NextPageMarker in the value + // of Marker. + // + // Type: String + // + // Parent: Operations + NextPageMarker *string `type:"string"` +} + +// String returns the string representation +func (s ListDomainsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDomainsOutput) GoString() string { + return s.String() +} + +// The ListOperations request includes the following elements. +type ListOperationsInput struct { + _ struct{} `type:"structure"` + + // For an initial request for a list of operations, omit this element. If the + // number of operations that are not yet complete is greater than the value + // that you specified for MaxItems, you can use Marker to return additional + // operations. Get the value of NextPageMarker from the previous response, and + // submit another request that includes the value of NextPageMarker in the Marker + // element. + // + // Type: String + // + // Default: None + // + // Required: No + Marker *string `type:"string"` + + // Number of domains to be returned. + // + // Type: Integer + // + // Default: 20 + // + // Constraints: A value between 1 and 100. + // + // Required: No + MaxItems *int64 `type:"integer"` +} + +// String returns the string representation +func (s ListOperationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListOperationsInput) GoString() string { + return s.String() +} + +// The ListOperations response includes the following elements. +type ListOperationsOutput struct { + _ struct{} `type:"structure"` + + // If there are more operations than you specified for MaxItems in the request, + // submit another request and include the value of NextPageMarker in the value + // of Marker. + // + // Type: String + // + // Parent: Operations + NextPageMarker *string `type:"string"` + + // Lists summaries of the operations. + // + // Type: Complex type containing a list of operation summaries + // + // Children: OperationId, Status, SubmittedDate, Type + Operations []*OperationSummary `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListOperationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListOperationsOutput) GoString() string { + return s.String() +} + +// The ListTagsForDomainRequest includes the following elements. +type ListTagsForDomainInput struct { + _ struct{} `type:"structure"` + + // The domain for which you want to get a list of tags. + DomainName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForDomainInput) GoString() string { + return s.String() +} + +// The ListTagsForDomain response includes the following elements. +type ListTagsForDomainOutput struct { + _ struct{} `type:"structure"` + + // A list of the tags that are associated with the specified domain. + // + // Type: A complex type containing a list of tags + // + // Each tag includes the following elements. + // + // Key + // + // The key (name) of a tag. + // + // Type: String + // + // Value + // + // The value of a tag. + // + // Type: String + TagList []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTagsForDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForDomainOutput) GoString() string { + return s.String() +} + +// Nameserver includes the following elements. +type Nameserver struct { + _ struct{} `type:"structure"` + + // Glue IP address of a name server entry. Glue IP addresses are required only + // when the name of the name server is a subdomain of the domain. For example, + // if your domain is example.com and the name server for the domain is ns.example.com, + // you need to specify the IP address for ns.example.com. + // + // Type: List of IP addresses. + // + // Constraints: The list can contain only one IPv4 and one IPv6 address. + // + // Parent: Nameservers + GlueIps []*string `type:"list"` + + // The fully qualified host name of the name server. + // + // Type: String + // + // Constraint: Maximum 255 characterss + // + // Parent: Nameservers + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Nameserver) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Nameserver) GoString() string { + return s.String() +} + +// OperationSummary includes the following elements. +type OperationSummary struct { + _ struct{} `type:"structure"` + + // Identifier returned to track the requested action. + // + // Type: String + OperationId *string `type:"string" required:"true"` + + // The current status of the requested operation in the system. + // + // Type: String + Status *string `type:"string" required:"true" enum:"OperationStatus"` + + // The date when the request was submitted. + SubmittedDate *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // Type of the action requested. + // + // Type: String + // + // Valid values: REGISTER_DOMAIN | DELETE_DOMAIN | TRANSFER_IN_DOMAIN | UPDATE_DOMAIN_CONTACT + // | UPDATE_NAMESERVER | CHANGE_PRIVACY_PROTECTION | DOMAIN_LOCK + Type *string `type:"string" required:"true" enum:"OperationType"` +} + +// String returns the string representation +func (s OperationSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OperationSummary) GoString() string { + return s.String() +} + +// The RegisterDomain request includes the following elements. +type RegisterDomainInput struct { + _ struct{} `type:"structure"` + + // Provides detailed contact information. + // + // Type: Complex + // + // Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, + // AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, + // Email, Fax, ExtraParams + // + // Required: Yes + AdminContact *ContactDetail `type:"structure" required:"true"` + + // Indicates whether the domain will be automatically renewed (true) or not + // (false). Autorenewal only takes effect after the account is charged. + // + // Type: Boolean + // + // Valid values: true | false + // + // Default: true + // + // Required: No + AutoRenew *bool `type:"boolean"` + + // The name of a domain. + // + // Type: String + // + // Default: None + // + // Constraints: The domain name can contain only the letters a through z, the + // numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not + // supported. + // + // Required: Yes + DomainName *string `type:"string" required:"true"` + + // The number of years the domain will be registered. Domains are registered + // for a minimum of one year. The maximum period depends on the top-level domain. + // + // Type: Integer + // + // Default: 1 + // + // Valid values: Integer from 1 to 10 + // + // Required: Yes + DurationInYears *int64 `min:"1" type:"integer" required:"true"` + + // Reserved for future use. + IdnLangCode *string `type:"string"` + + // Whether you want to conceal contact information from WHOIS queries. If you + // specify true, WHOIS ("who is") queries will return contact information for + // our registrar partner, Gandi, instead of the contact information that you + // enter. + // + // Type: Boolean + // + // Default: true + // + // Valid values: true | false + // + // Required: No + PrivacyProtectAdminContact *bool `type:"boolean"` + + // Whether you want to conceal contact information from WHOIS queries. If you + // specify true, WHOIS ("who is") queries will return contact information for + // our registrar partner, Gandi, instead of the contact information that you + // enter. + // + // Type: Boolean + // + // Default: true + // + // Valid values: true | false + // + // Required: No + PrivacyProtectRegistrantContact *bool `type:"boolean"` + + // Whether you want to conceal contact information from WHOIS queries. If you + // specify true, WHOIS ("who is") queries will return contact information for + // our registrar partner, Gandi, instead of the contact information that you + // enter. + // + // Type: Boolean + // + // Default: true + // + // Valid values: true | false + // + // Required: No + PrivacyProtectTechContact *bool `type:"boolean"` + + // Provides detailed contact information. + // + // Type: Complex + // + // Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, + // AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, + // Email, Fax, ExtraParams + // + // Required: Yes + RegistrantContact *ContactDetail `type:"structure" required:"true"` + + // Provides detailed contact information. + // + // Type: Complex + // + // Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, + // AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, + // Email, Fax, ExtraParams + // + // Required: Yes + TechContact *ContactDetail `type:"structure" required:"true"` +} + +// String returns the string representation +func (s RegisterDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterDomainInput) GoString() string { + return s.String() +} + +// The RegisterDomain response includes the following element. +type RegisterDomainOutput struct { + _ struct{} `type:"structure"` + + // Identifier for tracking the progress of the request. To use this ID to query + // the operation status, use GetOperationDetail. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + OperationId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterDomainOutput) GoString() string { + return s.String() +} + +// The RetrieveDomainAuthCode request includes the following element. +type RetrieveDomainAuthCodeInput struct { + _ struct{} `type:"structure"` + + // The name of a domain. + // + // Type: String + // + // Default: None + // + // Constraints: The domain name can contain only the letters a through z, the + // numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not + // supported. + // + // Required: Yes + DomainName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RetrieveDomainAuthCodeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RetrieveDomainAuthCodeInput) GoString() string { + return s.String() +} + +// The RetrieveDomainAuthCode response includes the following element. +type RetrieveDomainAuthCodeOutput struct { + _ struct{} `type:"structure"` + + // The authorization code for the domain. + // + // Type: String + AuthCode *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RetrieveDomainAuthCodeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RetrieveDomainAuthCodeOutput) GoString() string { + return s.String() +} + +// Each tag includes the following elements. +type Tag struct { + _ struct{} `type:"structure"` + + // The key (name) of a tag. + // + // Type: String + // + // Default: None + // + // Valid values: A-Z, a-z, 0-9, space, ".:/=+\-@" + // + // Constraints: Each key can be 1-128 characters long. + // + // Required: Yes + Key *string `type:"string"` + + // The value of a tag. + // + // Type: String + // + // Default: None + // + // Valid values: A-Z, a-z, 0-9, space, ".:/=+\-@" + // + // Constraints: Each value can be 0-256 characters long. + // + // Required: Yes + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// The TransferDomain request includes the following elements. +type TransferDomainInput struct { + _ struct{} `type:"structure"` + + // Provides detailed contact information. + // + // Type: Complex + // + // Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, + // AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, + // Email, Fax, ExtraParams + // + // Required: Yes + AdminContact *ContactDetail `type:"structure" required:"true"` + + // The authorization code for the domain. You get this value from the current + // registrar. + // + // Type: String + // + // Required: Yes + AuthCode *string `type:"string"` + + // Indicates whether the domain will be automatically renewed (true) or not + // (false). Autorenewal only takes effect after the account is charged. + // + // Type: Boolean + // + // Valid values: true | false + // + // Default: true + // + // Required: No + AutoRenew *bool `type:"boolean"` + + // The name of a domain. + // + // Type: String + // + // Default: None + // + // Constraints: The domain name can contain only the letters a through z, the + // numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not + // supported. + // + // Required: Yes + DomainName *string `type:"string" required:"true"` + + // The number of years the domain will be registered. Domains are registered + // for a minimum of one year. The maximum period depends on the top-level domain. + // + // Type: Integer + // + // Default: 1 + // + // Valid values: Integer from 1 to 10 + // + // Required: Yes + DurationInYears *int64 `min:"1" type:"integer" required:"true"` + + // Reserved for future use. + IdnLangCode *string `type:"string"` + + // Contains details for the host and glue IP addresses. + // + // Type: Complex + // + // Children: GlueIps, Name + // + // Required: No + Nameservers []*Nameserver `type:"list"` + + // Whether you want to conceal contact information from WHOIS queries. If you + // specify true, WHOIS ("who is") queries will return contact information for + // our registrar partner, Gandi, instead of the contact information that you + // enter. + // + // Type: Boolean + // + // Default: true + // + // Valid values: true | false + // + // Required: No + PrivacyProtectAdminContact *bool `type:"boolean"` + + // Whether you want to conceal contact information from WHOIS queries. If you + // specify true, WHOIS ("who is") queries will return contact information for + // our registrar partner, Gandi, instead of the contact information that you + // enter. + // + // Type: Boolean + // + // Default: true + // + // Valid values: true | false + // + // Required: No + PrivacyProtectRegistrantContact *bool `type:"boolean"` + + // Whether you want to conceal contact information from WHOIS queries. If you + // specify true, WHOIS ("who is") queries will return contact information for + // our registrar partner, Gandi, instead of the contact information that you + // enter. + // + // Type: Boolean + // + // Default: true + // + // Valid values: true | false + // + // Required: No + PrivacyProtectTechContact *bool `type:"boolean"` + + // Provides detailed contact information. + // + // Type: Complex + // + // Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, + // AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, + // Email, Fax, ExtraParams + // + // Required: Yes + RegistrantContact *ContactDetail `type:"structure" required:"true"` + + // Provides detailed contact information. + // + // Type: Complex + // + // Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, + // AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, + // Email, Fax, ExtraParams + // + // Required: Yes + TechContact *ContactDetail `type:"structure" required:"true"` +} + +// String returns the string representation +func (s TransferDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransferDomainInput) GoString() string { + return s.String() +} + +// The TranserDomain response includes the following element. +type TransferDomainOutput struct { + _ struct{} `type:"structure"` + + // Identifier for tracking the progress of the request. To use this ID to query + // the operation status, use GetOperationDetail. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + OperationId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s TransferDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransferDomainOutput) GoString() string { + return s.String() +} + +// The UpdateDomainContact request includes the following elements. +type UpdateDomainContactInput struct { + _ struct{} `type:"structure"` + + // Provides detailed contact information. + // + // Type: Complex + // + // Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, + // AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, + // Email, Fax, ExtraParams + // + // Required: Yes + AdminContact *ContactDetail `type:"structure"` + + // The name of a domain. + // + // Type: String + // + // Default: None + // + // Constraints: The domain name can contain only the letters a through z, the + // numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not + // supported. + // + // Required: Yes + DomainName *string `type:"string" required:"true"` + + // Provides detailed contact information. + // + // Type: Complex + // + // Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, + // AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, + // Email, Fax, ExtraParams + // + // Required: Yes + RegistrantContact *ContactDetail `type:"structure"` + + // Provides detailed contact information. + // + // Type: Complex + // + // Children: FirstName, MiddleName, LastName, ContactType, OrganizationName, + // AddressLine1, AddressLine2, City, State, CountryCode, ZipCode, PhoneNumber, + // Email, Fax, ExtraParams + // + // Required: Yes + TechContact *ContactDetail `type:"structure"` +} + +// String returns the string representation +func (s UpdateDomainContactInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDomainContactInput) GoString() string { + return s.String() +} + +// The UpdateDomainContact response includes the following element. +type UpdateDomainContactOutput struct { + _ struct{} `type:"structure"` + + // Identifier for tracking the progress of the request. To use this ID to query + // the operation status, use GetOperationDetail. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + OperationId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateDomainContactOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDomainContactOutput) GoString() string { + return s.String() +} + +// The UpdateDomainContactPrivacy request includes the following elements. +type UpdateDomainContactPrivacyInput struct { + _ struct{} `type:"structure"` + + // Whether you want to conceal contact information from WHOIS queries. If you + // specify true, WHOIS ("who is") queries will return contact information for + // our registrar partner, Gandi, instead of the contact information that you + // enter. + // + // Type: Boolean + // + // Default: None + // + // Valid values: true | false + // + // Required: No + AdminPrivacy *bool `type:"boolean"` + + // The name of a domain. + // + // Type: String + // + // Default: None + // + // Constraints: The domain name can contain only the letters a through z, the + // numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not + // supported. + // + // Required: Yes + DomainName *string `type:"string" required:"true"` + + // Whether you want to conceal contact information from WHOIS queries. If you + // specify true, WHOIS ("who is") queries will return contact information for + // our registrar partner, Gandi, instead of the contact information that you + // enter. + // + // Type: Boolean + // + // Default: None + // + // Valid values: true | false + // + // Required: No + RegistrantPrivacy *bool `type:"boolean"` + + // Whether you want to conceal contact information from WHOIS queries. If you + // specify true, WHOIS ("who is") queries will return contact information for + // our registrar partner, Gandi, instead of the contact information that you + // enter. + // + // Type: Boolean + // + // Default: None + // + // Valid values: true | false + // + // Required: No + TechPrivacy *bool `type:"boolean"` +} + +// String returns the string representation +func (s UpdateDomainContactPrivacyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDomainContactPrivacyInput) GoString() string { + return s.String() +} + +// The UpdateDomainContactPrivacy response includes the following element. +type UpdateDomainContactPrivacyOutput struct { + _ struct{} `type:"structure"` + + // Identifier for tracking the progress of the request. To use this ID to query + // the operation status, use GetOperationDetail. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + OperationId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateDomainContactPrivacyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDomainContactPrivacyOutput) GoString() string { + return s.String() +} + +// The UpdateDomainNameserver request includes the following elements. +type UpdateDomainNameserversInput struct { + _ struct{} `type:"structure"` + + // The name of a domain. + // + // Type: String + // + // Default: None + // + // Constraints: The domain name can contain only the letters a through z, the + // numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not + // supported. + // + // Required: Yes + DomainName *string `type:"string" required:"true"` + + // The authorization key for .fi domains + FIAuthKey *string `type:"string"` + + // A list of new name servers for the domain. + // + // Type: Complex + // + // Children: Name, GlueIps + // + // Required: Yes + Nameservers []*Nameserver `type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateDomainNameserversInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDomainNameserversInput) GoString() string { + return s.String() +} + +// The UpdateDomainNameservers response includes the following element. +type UpdateDomainNameserversOutput struct { + _ struct{} `type:"structure"` + + // Identifier for tracking the progress of the request. To use this ID to query + // the operation status, use GetOperationDetail. + // + // Type: String + // + // Default: None + // + // Constraints: Maximum 255 characters. + OperationId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateDomainNameserversOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDomainNameserversOutput) GoString() string { + return s.String() +} + +// The UpdateTagsForDomainRequest includes the following elements. +type UpdateTagsForDomainInput struct { + _ struct{} `type:"structure"` + + // The domain for which you want to add or update tags. + // + // The name of a domain. + // + // Type: String + // + // Default: None + // + // Constraints: The domain name can contain only the letters a through z, the + // numbers 0 through 9, and hyphen (-). Hyphens are allowed only when theyaposre + // surrounded by letters, numbers, or other hyphens. You canapost specify a + // hyphen at the beginning or end of a label. To specify an Internationalized + // Domain Name, you must convert the name to Punycode. + // + // Required: Yes + DomainName *string `type:"string" required:"true"` + + // A list of the tag keys and values that you want to add or update. If you + // specify a key that already exists, the corresponding value will be replaced. + // + // Type: A complex type containing a list of tags + // + // Default: None + // + // Required: No + // + // '> Each tag includes the following elements: + // + // Key + // + // The key (name) of a tag. + // + // Type: String + // + // Default: None + // + // Valid values: Unicode characters including alphanumeric, space, and ".:/=+\-@" + // + // Constraints: Each key can be 1-128 characters long. + // + // Required: Yes + // + // Value + // + // The value of a tag. + // + // Type: String + // + // Default: None + // + // Valid values: Unicode characters including alphanumeric, space, and ".:/=+\-@" + // + // Constraints: Each value can be 0-256 characters long. + // + // Required: Yes + TagsToUpdate []*Tag `type:"list"` +} + +// String returns the string representation +func (s UpdateTagsForDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTagsForDomainInput) GoString() string { + return s.String() +} + +type UpdateTagsForDomainOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateTagsForDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTagsForDomainOutput) GoString() string { + return s.String() +} + +const ( + // @enum ContactType + ContactTypePerson = "PERSON" + // @enum ContactType + ContactTypeCompany = "COMPANY" + // @enum ContactType + ContactTypeAssociation = "ASSOCIATION" + // @enum ContactType + ContactTypePublicBody = "PUBLIC_BODY" + // @enum ContactType + ContactTypeReseller = "RESELLER" +) + +const ( + // @enum CountryCode + CountryCodeAd = "AD" + // @enum CountryCode + CountryCodeAe = "AE" + // @enum CountryCode + CountryCodeAf = "AF" + // @enum CountryCode + CountryCodeAg = "AG" + // @enum CountryCode + CountryCodeAi = "AI" + // @enum CountryCode + CountryCodeAl = "AL" + // @enum CountryCode + CountryCodeAm = "AM" + // @enum CountryCode + CountryCodeAn = "AN" + // @enum CountryCode + CountryCodeAo = "AO" + // @enum CountryCode + CountryCodeAq = "AQ" + // @enum CountryCode + CountryCodeAr = "AR" + // @enum CountryCode + CountryCodeAs = "AS" + // @enum CountryCode + CountryCodeAt = "AT" + // @enum CountryCode + CountryCodeAu = "AU" + // @enum CountryCode + CountryCodeAw = "AW" + // @enum CountryCode + CountryCodeAz = "AZ" + // @enum CountryCode + CountryCodeBa = "BA" + // @enum CountryCode + CountryCodeBb = "BB" + // @enum CountryCode + CountryCodeBd = "BD" + // @enum CountryCode + CountryCodeBe = "BE" + // @enum CountryCode + CountryCodeBf = "BF" + // @enum CountryCode + CountryCodeBg = "BG" + // @enum CountryCode + CountryCodeBh = "BH" + // @enum CountryCode + CountryCodeBi = "BI" + // @enum CountryCode + CountryCodeBj = "BJ" + // @enum CountryCode + CountryCodeBl = "BL" + // @enum CountryCode + CountryCodeBm = "BM" + // @enum CountryCode + CountryCodeBn = "BN" + // @enum CountryCode + CountryCodeBo = "BO" + // @enum CountryCode + CountryCodeBr = "BR" + // @enum CountryCode + CountryCodeBs = "BS" + // @enum CountryCode + CountryCodeBt = "BT" + // @enum CountryCode + CountryCodeBw = "BW" + // @enum CountryCode + CountryCodeBy = "BY" + // @enum CountryCode + CountryCodeBz = "BZ" + // @enum CountryCode + CountryCodeCa = "CA" + // @enum CountryCode + CountryCodeCc = "CC" + // @enum CountryCode + CountryCodeCd = "CD" + // @enum CountryCode + CountryCodeCf = "CF" + // @enum CountryCode + CountryCodeCg = "CG" + // @enum CountryCode + CountryCodeCh = "CH" + // @enum CountryCode + CountryCodeCi = "CI" + // @enum CountryCode + CountryCodeCk = "CK" + // @enum CountryCode + CountryCodeCl = "CL" + // @enum CountryCode + CountryCodeCm = "CM" + // @enum CountryCode + CountryCodeCn = "CN" + // @enum CountryCode + CountryCodeCo = "CO" + // @enum CountryCode + CountryCodeCr = "CR" + // @enum CountryCode + CountryCodeCu = "CU" + // @enum CountryCode + CountryCodeCv = "CV" + // @enum CountryCode + CountryCodeCx = "CX" + // @enum CountryCode + CountryCodeCy = "CY" + // @enum CountryCode + CountryCodeCz = "CZ" + // @enum CountryCode + CountryCodeDe = "DE" + // @enum CountryCode + CountryCodeDj = "DJ" + // @enum CountryCode + CountryCodeDk = "DK" + // @enum CountryCode + CountryCodeDm = "DM" + // @enum CountryCode + CountryCodeDo = "DO" + // @enum CountryCode + CountryCodeDz = "DZ" + // @enum CountryCode + CountryCodeEc = "EC" + // @enum CountryCode + CountryCodeEe = "EE" + // @enum CountryCode + CountryCodeEg = "EG" + // @enum CountryCode + CountryCodeEr = "ER" + // @enum CountryCode + CountryCodeEs = "ES" + // @enum CountryCode + CountryCodeEt = "ET" + // @enum CountryCode + CountryCodeFi = "FI" + // @enum CountryCode + CountryCodeFj = "FJ" + // @enum CountryCode + CountryCodeFk = "FK" + // @enum CountryCode + CountryCodeFm = "FM" + // @enum CountryCode + CountryCodeFo = "FO" + // @enum CountryCode + CountryCodeFr = "FR" + // @enum CountryCode + CountryCodeGa = "GA" + // @enum CountryCode + CountryCodeGb = "GB" + // @enum CountryCode + CountryCodeGd = "GD" + // @enum CountryCode + CountryCodeGe = "GE" + // @enum CountryCode + CountryCodeGh = "GH" + // @enum CountryCode + CountryCodeGi = "GI" + // @enum CountryCode + CountryCodeGl = "GL" + // @enum CountryCode + CountryCodeGm = "GM" + // @enum CountryCode + CountryCodeGn = "GN" + // @enum CountryCode + CountryCodeGq = "GQ" + // @enum CountryCode + CountryCodeGr = "GR" + // @enum CountryCode + CountryCodeGt = "GT" + // @enum CountryCode + CountryCodeGu = "GU" + // @enum CountryCode + CountryCodeGw = "GW" + // @enum CountryCode + CountryCodeGy = "GY" + // @enum CountryCode + CountryCodeHk = "HK" + // @enum CountryCode + CountryCodeHn = "HN" + // @enum CountryCode + CountryCodeHr = "HR" + // @enum CountryCode + CountryCodeHt = "HT" + // @enum CountryCode + CountryCodeHu = "HU" + // @enum CountryCode + CountryCodeId = "ID" + // @enum CountryCode + CountryCodeIe = "IE" + // @enum CountryCode + CountryCodeIl = "IL" + // @enum CountryCode + CountryCodeIm = "IM" + // @enum CountryCode + CountryCodeIn = "IN" + // @enum CountryCode + CountryCodeIq = "IQ" + // @enum CountryCode + CountryCodeIr = "IR" + // @enum CountryCode + CountryCodeIs = "IS" + // @enum CountryCode + CountryCodeIt = "IT" + // @enum CountryCode + CountryCodeJm = "JM" + // @enum CountryCode + CountryCodeJo = "JO" + // @enum CountryCode + CountryCodeJp = "JP" + // @enum CountryCode + CountryCodeKe = "KE" + // @enum CountryCode + CountryCodeKg = "KG" + // @enum CountryCode + CountryCodeKh = "KH" + // @enum CountryCode + CountryCodeKi = "KI" + // @enum CountryCode + CountryCodeKm = "KM" + // @enum CountryCode + CountryCodeKn = "KN" + // @enum CountryCode + CountryCodeKp = "KP" + // @enum CountryCode + CountryCodeKr = "KR" + // @enum CountryCode + CountryCodeKw = "KW" + // @enum CountryCode + CountryCodeKy = "KY" + // @enum CountryCode + CountryCodeKz = "KZ" + // @enum CountryCode + CountryCodeLa = "LA" + // @enum CountryCode + CountryCodeLb = "LB" + // @enum CountryCode + CountryCodeLc = "LC" + // @enum CountryCode + CountryCodeLi = "LI" + // @enum CountryCode + CountryCodeLk = "LK" + // @enum CountryCode + CountryCodeLr = "LR" + // @enum CountryCode + CountryCodeLs = "LS" + // @enum CountryCode + CountryCodeLt = "LT" + // @enum CountryCode + CountryCodeLu = "LU" + // @enum CountryCode + CountryCodeLv = "LV" + // @enum CountryCode + CountryCodeLy = "LY" + // @enum CountryCode + CountryCodeMa = "MA" + // @enum CountryCode + CountryCodeMc = "MC" + // @enum CountryCode + CountryCodeMd = "MD" + // @enum CountryCode + CountryCodeMe = "ME" + // @enum CountryCode + CountryCodeMf = "MF" + // @enum CountryCode + CountryCodeMg = "MG" + // @enum CountryCode + CountryCodeMh = "MH" + // @enum CountryCode + CountryCodeMk = "MK" + // @enum CountryCode + CountryCodeMl = "ML" + // @enum CountryCode + CountryCodeMm = "MM" + // @enum CountryCode + CountryCodeMn = "MN" + // @enum CountryCode + CountryCodeMo = "MO" + // @enum CountryCode + CountryCodeMp = "MP" + // @enum CountryCode + CountryCodeMr = "MR" + // @enum CountryCode + CountryCodeMs = "MS" + // @enum CountryCode + CountryCodeMt = "MT" + // @enum CountryCode + CountryCodeMu = "MU" + // @enum CountryCode + CountryCodeMv = "MV" + // @enum CountryCode + CountryCodeMw = "MW" + // @enum CountryCode + CountryCodeMx = "MX" + // @enum CountryCode + CountryCodeMy = "MY" + // @enum CountryCode + CountryCodeMz = "MZ" + // @enum CountryCode + CountryCodeNa = "NA" + // @enum CountryCode + CountryCodeNc = "NC" + // @enum CountryCode + CountryCodeNe = "NE" + // @enum CountryCode + CountryCodeNg = "NG" + // @enum CountryCode + CountryCodeNi = "NI" + // @enum CountryCode + CountryCodeNl = "NL" + // @enum CountryCode + CountryCodeNo = "NO" + // @enum CountryCode + CountryCodeNp = "NP" + // @enum CountryCode + CountryCodeNr = "NR" + // @enum CountryCode + CountryCodeNu = "NU" + // @enum CountryCode + CountryCodeNz = "NZ" + // @enum CountryCode + CountryCodeOm = "OM" + // @enum CountryCode + CountryCodePa = "PA" + // @enum CountryCode + CountryCodePe = "PE" + // @enum CountryCode + CountryCodePf = "PF" + // @enum CountryCode + CountryCodePg = "PG" + // @enum CountryCode + CountryCodePh = "PH" + // @enum CountryCode + CountryCodePk = "PK" + // @enum CountryCode + CountryCodePl = "PL" + // @enum CountryCode + CountryCodePm = "PM" + // @enum CountryCode + CountryCodePn = "PN" + // @enum CountryCode + CountryCodePr = "PR" + // @enum CountryCode + CountryCodePt = "PT" + // @enum CountryCode + CountryCodePw = "PW" + // @enum CountryCode + CountryCodePy = "PY" + // @enum CountryCode + CountryCodeQa = "QA" + // @enum CountryCode + CountryCodeRo = "RO" + // @enum CountryCode + CountryCodeRs = "RS" + // @enum CountryCode + CountryCodeRu = "RU" + // @enum CountryCode + CountryCodeRw = "RW" + // @enum CountryCode + CountryCodeSa = "SA" + // @enum CountryCode + CountryCodeSb = "SB" + // @enum CountryCode + CountryCodeSc = "SC" + // @enum CountryCode + CountryCodeSd = "SD" + // @enum CountryCode + CountryCodeSe = "SE" + // @enum CountryCode + CountryCodeSg = "SG" + // @enum CountryCode + CountryCodeSh = "SH" + // @enum CountryCode + CountryCodeSi = "SI" + // @enum CountryCode + CountryCodeSk = "SK" + // @enum CountryCode + CountryCodeSl = "SL" + // @enum CountryCode + CountryCodeSm = "SM" + // @enum CountryCode + CountryCodeSn = "SN" + // @enum CountryCode + CountryCodeSo = "SO" + // @enum CountryCode + CountryCodeSr = "SR" + // @enum CountryCode + CountryCodeSt = "ST" + // @enum CountryCode + CountryCodeSv = "SV" + // @enum CountryCode + CountryCodeSy = "SY" + // @enum CountryCode + CountryCodeSz = "SZ" + // @enum CountryCode + CountryCodeTc = "TC" + // @enum CountryCode + CountryCodeTd = "TD" + // @enum CountryCode + CountryCodeTg = "TG" + // @enum CountryCode + CountryCodeTh = "TH" + // @enum CountryCode + CountryCodeTj = "TJ" + // @enum CountryCode + CountryCodeTk = "TK" + // @enum CountryCode + CountryCodeTl = "TL" + // @enum CountryCode + CountryCodeTm = "TM" + // @enum CountryCode + CountryCodeTn = "TN" + // @enum CountryCode + CountryCodeTo = "TO" + // @enum CountryCode + CountryCodeTr = "TR" + // @enum CountryCode + CountryCodeTt = "TT" + // @enum CountryCode + CountryCodeTv = "TV" + // @enum CountryCode + CountryCodeTw = "TW" + // @enum CountryCode + CountryCodeTz = "TZ" + // @enum CountryCode + CountryCodeUa = "UA" + // @enum CountryCode + CountryCodeUg = "UG" + // @enum CountryCode + CountryCodeUs = "US" + // @enum CountryCode + CountryCodeUy = "UY" + // @enum CountryCode + CountryCodeUz = "UZ" + // @enum CountryCode + CountryCodeVa = "VA" + // @enum CountryCode + CountryCodeVc = "VC" + // @enum CountryCode + CountryCodeVe = "VE" + // @enum CountryCode + CountryCodeVg = "VG" + // @enum CountryCode + CountryCodeVi = "VI" + // @enum CountryCode + CountryCodeVn = "VN" + // @enum CountryCode + CountryCodeVu = "VU" + // @enum CountryCode + CountryCodeWf = "WF" + // @enum CountryCode + CountryCodeWs = "WS" + // @enum CountryCode + CountryCodeYe = "YE" + // @enum CountryCode + CountryCodeYt = "YT" + // @enum CountryCode + CountryCodeZa = "ZA" + // @enum CountryCode + CountryCodeZm = "ZM" + // @enum CountryCode + CountryCodeZw = "ZW" +) + +const ( + // @enum DomainAvailability + DomainAvailabilityAvailable = "AVAILABLE" + // @enum DomainAvailability + DomainAvailabilityAvailableReserved = "AVAILABLE_RESERVED" + // @enum DomainAvailability + DomainAvailabilityAvailablePreorder = "AVAILABLE_PREORDER" + // @enum DomainAvailability + DomainAvailabilityUnavailable = "UNAVAILABLE" + // @enum DomainAvailability + DomainAvailabilityUnavailablePremium = "UNAVAILABLE_PREMIUM" + // @enum DomainAvailability + DomainAvailabilityUnavailableRestricted = "UNAVAILABLE_RESTRICTED" + // @enum DomainAvailability + DomainAvailabilityReserved = "RESERVED" + // @enum DomainAvailability + DomainAvailabilityDontKnow = "DONT_KNOW" +) + +const ( + // @enum ExtraParamName + ExtraParamNameDunsNumber = "DUNS_NUMBER" + // @enum ExtraParamName + ExtraParamNameBrandNumber = "BRAND_NUMBER" + // @enum ExtraParamName + ExtraParamNameBirthDepartment = "BIRTH_DEPARTMENT" + // @enum ExtraParamName + ExtraParamNameBirthDateInYyyyMmDd = "BIRTH_DATE_IN_YYYY_MM_DD" + // @enum ExtraParamName + ExtraParamNameBirthCountry = "BIRTH_COUNTRY" + // @enum ExtraParamName + ExtraParamNameBirthCity = "BIRTH_CITY" + // @enum ExtraParamName + ExtraParamNameDocumentNumber = "DOCUMENT_NUMBER" + // @enum ExtraParamName + ExtraParamNameAuIdNumber = "AU_ID_NUMBER" + // @enum ExtraParamName + ExtraParamNameAuIdType = "AU_ID_TYPE" + // @enum ExtraParamName + ExtraParamNameCaLegalType = "CA_LEGAL_TYPE" + // @enum ExtraParamName + ExtraParamNameEsIdentification = "ES_IDENTIFICATION" + // @enum ExtraParamName + ExtraParamNameEsIdentificationType = "ES_IDENTIFICATION_TYPE" + // @enum ExtraParamName + ExtraParamNameEsLegalForm = "ES_LEGAL_FORM" + // @enum ExtraParamName + ExtraParamNameFiBusinessNumber = "FI_BUSINESS_NUMBER" + // @enum ExtraParamName + ExtraParamNameFiIdNumber = "FI_ID_NUMBER" + // @enum ExtraParamName + ExtraParamNameItPin = "IT_PIN" + // @enum ExtraParamName + ExtraParamNameRuPassportData = "RU_PASSPORT_DATA" + // @enum ExtraParamName + ExtraParamNameSeIdNumber = "SE_ID_NUMBER" + // @enum ExtraParamName + ExtraParamNameSgIdNumber = "SG_ID_NUMBER" + // @enum ExtraParamName + ExtraParamNameVatNumber = "VAT_NUMBER" +) + +const ( + // @enum OperationStatus + OperationStatusSubmitted = "SUBMITTED" + // @enum OperationStatus + OperationStatusInProgress = "IN_PROGRESS" + // @enum OperationStatus + OperationStatusError = "ERROR" + // @enum OperationStatus + OperationStatusSuccessful = "SUCCESSFUL" + // @enum OperationStatus + OperationStatusFailed = "FAILED" +) + +const ( + // @enum OperationType + OperationTypeRegisterDomain = "REGISTER_DOMAIN" + // @enum OperationType + OperationTypeDeleteDomain = "DELETE_DOMAIN" + // @enum OperationType + OperationTypeTransferInDomain = "TRANSFER_IN_DOMAIN" + // @enum OperationType + OperationTypeUpdateDomainContact = "UPDATE_DOMAIN_CONTACT" + // @enum OperationType + OperationTypeUpdateNameserver = "UPDATE_NAMESERVER" + // @enum OperationType + OperationTypeChangePrivacyProtection = "CHANGE_PRIVACY_PROTECTION" + // @enum OperationType + OperationTypeDomainLock = "DOMAIN_LOCK" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53domains/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53domains/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53domains/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53domains/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,607 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package route53domains_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/route53domains" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleRoute53Domains_CheckDomainAvailability() { + svc := route53domains.New(session.New()) + + params := &route53domains.CheckDomainAvailabilityInput{ + DomainName: aws.String("DomainName"), // Required + IdnLangCode: aws.String("LangCode"), + } + resp, err := svc.CheckDomainAvailability(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_DeleteTagsForDomain() { + svc := route53domains.New(session.New()) + + params := &route53domains.DeleteTagsForDomainInput{ + DomainName: aws.String("DomainName"), // Required + TagsToDelete: []*string{ // Required + aws.String("TagKey"), // Required + // More values... + }, + } + resp, err := svc.DeleteTagsForDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_DisableDomainAutoRenew() { + svc := route53domains.New(session.New()) + + params := &route53domains.DisableDomainAutoRenewInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.DisableDomainAutoRenew(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_DisableDomainTransferLock() { + svc := route53domains.New(session.New()) + + params := &route53domains.DisableDomainTransferLockInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.DisableDomainTransferLock(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_EnableDomainAutoRenew() { + svc := route53domains.New(session.New()) + + params := &route53domains.EnableDomainAutoRenewInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.EnableDomainAutoRenew(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_EnableDomainTransferLock() { + svc := route53domains.New(session.New()) + + params := &route53domains.EnableDomainTransferLockInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.EnableDomainTransferLock(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_GetDomainDetail() { + svc := route53domains.New(session.New()) + + params := &route53domains.GetDomainDetailInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.GetDomainDetail(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_GetOperationDetail() { + svc := route53domains.New(session.New()) + + params := &route53domains.GetOperationDetailInput{ + OperationId: aws.String("OperationId"), // Required + } + resp, err := svc.GetOperationDetail(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_ListDomains() { + svc := route53domains.New(session.New()) + + params := &route53domains.ListDomainsInput{ + Marker: aws.String("PageMarker"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListDomains(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_ListOperations() { + svc := route53domains.New(session.New()) + + params := &route53domains.ListOperationsInput{ + Marker: aws.String("PageMarker"), + MaxItems: aws.Int64(1), + } + resp, err := svc.ListOperations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_ListTagsForDomain() { + svc := route53domains.New(session.New()) + + params := &route53domains.ListTagsForDomainInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.ListTagsForDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_RegisterDomain() { + svc := route53domains.New(session.New()) + + params := &route53domains.RegisterDomainInput{ + AdminContact: &route53domains.ContactDetail{ // Required + AddressLine1: aws.String("AddressLine"), + AddressLine2: aws.String("AddressLine"), + City: aws.String("City"), + ContactType: aws.String("ContactType"), + CountryCode: aws.String("CountryCode"), + Email: aws.String("Email"), + ExtraParams: []*route53domains.ExtraParam{ + { // Required + Name: aws.String("ExtraParamName"), // Required + Value: aws.String("ExtraParamValue"), // Required + }, + // More values... + }, + Fax: aws.String("ContactNumber"), + FirstName: aws.String("ContactName"), + LastName: aws.String("ContactName"), + OrganizationName: aws.String("ContactName"), + PhoneNumber: aws.String("ContactNumber"), + State: aws.String("State"), + ZipCode: aws.String("ZipCode"), + }, + DomainName: aws.String("DomainName"), // Required + DurationInYears: aws.Int64(1), // Required + RegistrantContact: &route53domains.ContactDetail{ // Required + AddressLine1: aws.String("AddressLine"), + AddressLine2: aws.String("AddressLine"), + City: aws.String("City"), + ContactType: aws.String("ContactType"), + CountryCode: aws.String("CountryCode"), + Email: aws.String("Email"), + ExtraParams: []*route53domains.ExtraParam{ + { // Required + Name: aws.String("ExtraParamName"), // Required + Value: aws.String("ExtraParamValue"), // Required + }, + // More values... + }, + Fax: aws.String("ContactNumber"), + FirstName: aws.String("ContactName"), + LastName: aws.String("ContactName"), + OrganizationName: aws.String("ContactName"), + PhoneNumber: aws.String("ContactNumber"), + State: aws.String("State"), + ZipCode: aws.String("ZipCode"), + }, + TechContact: &route53domains.ContactDetail{ // Required + AddressLine1: aws.String("AddressLine"), + AddressLine2: aws.String("AddressLine"), + City: aws.String("City"), + ContactType: aws.String("ContactType"), + CountryCode: aws.String("CountryCode"), + Email: aws.String("Email"), + ExtraParams: []*route53domains.ExtraParam{ + { // Required + Name: aws.String("ExtraParamName"), // Required + Value: aws.String("ExtraParamValue"), // Required + }, + // More values... + }, + Fax: aws.String("ContactNumber"), + FirstName: aws.String("ContactName"), + LastName: aws.String("ContactName"), + OrganizationName: aws.String("ContactName"), + PhoneNumber: aws.String("ContactNumber"), + State: aws.String("State"), + ZipCode: aws.String("ZipCode"), + }, + AutoRenew: aws.Bool(true), + IdnLangCode: aws.String("LangCode"), + PrivacyProtectAdminContact: aws.Bool(true), + PrivacyProtectRegistrantContact: aws.Bool(true), + PrivacyProtectTechContact: aws.Bool(true), + } + resp, err := svc.RegisterDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_RetrieveDomainAuthCode() { + svc := route53domains.New(session.New()) + + params := &route53domains.RetrieveDomainAuthCodeInput{ + DomainName: aws.String("DomainName"), // Required + } + resp, err := svc.RetrieveDomainAuthCode(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_TransferDomain() { + svc := route53domains.New(session.New()) + + params := &route53domains.TransferDomainInput{ + AdminContact: &route53domains.ContactDetail{ // Required + AddressLine1: aws.String("AddressLine"), + AddressLine2: aws.String("AddressLine"), + City: aws.String("City"), + ContactType: aws.String("ContactType"), + CountryCode: aws.String("CountryCode"), + Email: aws.String("Email"), + ExtraParams: []*route53domains.ExtraParam{ + { // Required + Name: aws.String("ExtraParamName"), // Required + Value: aws.String("ExtraParamValue"), // Required + }, + // More values... + }, + Fax: aws.String("ContactNumber"), + FirstName: aws.String("ContactName"), + LastName: aws.String("ContactName"), + OrganizationName: aws.String("ContactName"), + PhoneNumber: aws.String("ContactNumber"), + State: aws.String("State"), + ZipCode: aws.String("ZipCode"), + }, + DomainName: aws.String("DomainName"), // Required + DurationInYears: aws.Int64(1), // Required + RegistrantContact: &route53domains.ContactDetail{ // Required + AddressLine1: aws.String("AddressLine"), + AddressLine2: aws.String("AddressLine"), + City: aws.String("City"), + ContactType: aws.String("ContactType"), + CountryCode: aws.String("CountryCode"), + Email: aws.String("Email"), + ExtraParams: []*route53domains.ExtraParam{ + { // Required + Name: aws.String("ExtraParamName"), // Required + Value: aws.String("ExtraParamValue"), // Required + }, + // More values... + }, + Fax: aws.String("ContactNumber"), + FirstName: aws.String("ContactName"), + LastName: aws.String("ContactName"), + OrganizationName: aws.String("ContactName"), + PhoneNumber: aws.String("ContactNumber"), + State: aws.String("State"), + ZipCode: aws.String("ZipCode"), + }, + TechContact: &route53domains.ContactDetail{ // Required + AddressLine1: aws.String("AddressLine"), + AddressLine2: aws.String("AddressLine"), + City: aws.String("City"), + ContactType: aws.String("ContactType"), + CountryCode: aws.String("CountryCode"), + Email: aws.String("Email"), + ExtraParams: []*route53domains.ExtraParam{ + { // Required + Name: aws.String("ExtraParamName"), // Required + Value: aws.String("ExtraParamValue"), // Required + }, + // More values... + }, + Fax: aws.String("ContactNumber"), + FirstName: aws.String("ContactName"), + LastName: aws.String("ContactName"), + OrganizationName: aws.String("ContactName"), + PhoneNumber: aws.String("ContactNumber"), + State: aws.String("State"), + ZipCode: aws.String("ZipCode"), + }, + AuthCode: aws.String("DomainAuthCode"), + AutoRenew: aws.Bool(true), + IdnLangCode: aws.String("LangCode"), + Nameservers: []*route53domains.Nameserver{ + { // Required + Name: aws.String("HostName"), // Required + GlueIps: []*string{ + aws.String("GlueIp"), // Required + // More values... + }, + }, + // More values... + }, + PrivacyProtectAdminContact: aws.Bool(true), + PrivacyProtectRegistrantContact: aws.Bool(true), + PrivacyProtectTechContact: aws.Bool(true), + } + resp, err := svc.TransferDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_UpdateDomainContact() { + svc := route53domains.New(session.New()) + + params := &route53domains.UpdateDomainContactInput{ + DomainName: aws.String("DomainName"), // Required + AdminContact: &route53domains.ContactDetail{ + AddressLine1: aws.String("AddressLine"), + AddressLine2: aws.String("AddressLine"), + City: aws.String("City"), + ContactType: aws.String("ContactType"), + CountryCode: aws.String("CountryCode"), + Email: aws.String("Email"), + ExtraParams: []*route53domains.ExtraParam{ + { // Required + Name: aws.String("ExtraParamName"), // Required + Value: aws.String("ExtraParamValue"), // Required + }, + // More values... + }, + Fax: aws.String("ContactNumber"), + FirstName: aws.String("ContactName"), + LastName: aws.String("ContactName"), + OrganizationName: aws.String("ContactName"), + PhoneNumber: aws.String("ContactNumber"), + State: aws.String("State"), + ZipCode: aws.String("ZipCode"), + }, + RegistrantContact: &route53domains.ContactDetail{ + AddressLine1: aws.String("AddressLine"), + AddressLine2: aws.String("AddressLine"), + City: aws.String("City"), + ContactType: aws.String("ContactType"), + CountryCode: aws.String("CountryCode"), + Email: aws.String("Email"), + ExtraParams: []*route53domains.ExtraParam{ + { // Required + Name: aws.String("ExtraParamName"), // Required + Value: aws.String("ExtraParamValue"), // Required + }, + // More values... + }, + Fax: aws.String("ContactNumber"), + FirstName: aws.String("ContactName"), + LastName: aws.String("ContactName"), + OrganizationName: aws.String("ContactName"), + PhoneNumber: aws.String("ContactNumber"), + State: aws.String("State"), + ZipCode: aws.String("ZipCode"), + }, + TechContact: &route53domains.ContactDetail{ + AddressLine1: aws.String("AddressLine"), + AddressLine2: aws.String("AddressLine"), + City: aws.String("City"), + ContactType: aws.String("ContactType"), + CountryCode: aws.String("CountryCode"), + Email: aws.String("Email"), + ExtraParams: []*route53domains.ExtraParam{ + { // Required + Name: aws.String("ExtraParamName"), // Required + Value: aws.String("ExtraParamValue"), // Required + }, + // More values... + }, + Fax: aws.String("ContactNumber"), + FirstName: aws.String("ContactName"), + LastName: aws.String("ContactName"), + OrganizationName: aws.String("ContactName"), + PhoneNumber: aws.String("ContactNumber"), + State: aws.String("State"), + ZipCode: aws.String("ZipCode"), + }, + } + resp, err := svc.UpdateDomainContact(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_UpdateDomainContactPrivacy() { + svc := route53domains.New(session.New()) + + params := &route53domains.UpdateDomainContactPrivacyInput{ + DomainName: aws.String("DomainName"), // Required + AdminPrivacy: aws.Bool(true), + RegistrantPrivacy: aws.Bool(true), + TechPrivacy: aws.Bool(true), + } + resp, err := svc.UpdateDomainContactPrivacy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_UpdateDomainNameservers() { + svc := route53domains.New(session.New()) + + params := &route53domains.UpdateDomainNameserversInput{ + DomainName: aws.String("DomainName"), // Required + Nameservers: []*route53domains.Nameserver{ // Required + { // Required + Name: aws.String("HostName"), // Required + GlueIps: []*string{ + aws.String("GlueIp"), // Required + // More values... + }, + }, + // More values... + }, + FIAuthKey: aws.String("FIAuthKey"), + } + resp, err := svc.UpdateDomainNameservers(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleRoute53Domains_UpdateTagsForDomain() { + svc := route53domains.New(session.New()) + + params := &route53domains.UpdateTagsForDomainInput{ + DomainName: aws.String("DomainName"), // Required + TagsToUpdate: []*route53domains.Tag{ + { // Required + Key: aws.String("TagKey"), + Value: aws.String("TagValue"), + }, + // More values... + }, + } + resp, err := svc.UpdateTagsForDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53domains/route53domainsiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53domains/route53domainsiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53domains/route53domainsiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53domains/route53domainsiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,90 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package route53domainsiface provides an interface for the Amazon Route 53 Domains. +package route53domainsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/route53domains" +) + +// Route53DomainsAPI is the interface type for route53domains.Route53Domains. +type Route53DomainsAPI interface { + CheckDomainAvailabilityRequest(*route53domains.CheckDomainAvailabilityInput) (*request.Request, *route53domains.CheckDomainAvailabilityOutput) + + CheckDomainAvailability(*route53domains.CheckDomainAvailabilityInput) (*route53domains.CheckDomainAvailabilityOutput, error) + + DeleteTagsForDomainRequest(*route53domains.DeleteTagsForDomainInput) (*request.Request, *route53domains.DeleteTagsForDomainOutput) + + DeleteTagsForDomain(*route53domains.DeleteTagsForDomainInput) (*route53domains.DeleteTagsForDomainOutput, error) + + DisableDomainAutoRenewRequest(*route53domains.DisableDomainAutoRenewInput) (*request.Request, *route53domains.DisableDomainAutoRenewOutput) + + DisableDomainAutoRenew(*route53domains.DisableDomainAutoRenewInput) (*route53domains.DisableDomainAutoRenewOutput, error) + + DisableDomainTransferLockRequest(*route53domains.DisableDomainTransferLockInput) (*request.Request, *route53domains.DisableDomainTransferLockOutput) + + DisableDomainTransferLock(*route53domains.DisableDomainTransferLockInput) (*route53domains.DisableDomainTransferLockOutput, error) + + EnableDomainAutoRenewRequest(*route53domains.EnableDomainAutoRenewInput) (*request.Request, *route53domains.EnableDomainAutoRenewOutput) + + EnableDomainAutoRenew(*route53domains.EnableDomainAutoRenewInput) (*route53domains.EnableDomainAutoRenewOutput, error) + + EnableDomainTransferLockRequest(*route53domains.EnableDomainTransferLockInput) (*request.Request, *route53domains.EnableDomainTransferLockOutput) + + EnableDomainTransferLock(*route53domains.EnableDomainTransferLockInput) (*route53domains.EnableDomainTransferLockOutput, error) + + GetDomainDetailRequest(*route53domains.GetDomainDetailInput) (*request.Request, *route53domains.GetDomainDetailOutput) + + GetDomainDetail(*route53domains.GetDomainDetailInput) (*route53domains.GetDomainDetailOutput, error) + + GetOperationDetailRequest(*route53domains.GetOperationDetailInput) (*request.Request, *route53domains.GetOperationDetailOutput) + + GetOperationDetail(*route53domains.GetOperationDetailInput) (*route53domains.GetOperationDetailOutput, error) + + ListDomainsRequest(*route53domains.ListDomainsInput) (*request.Request, *route53domains.ListDomainsOutput) + + ListDomains(*route53domains.ListDomainsInput) (*route53domains.ListDomainsOutput, error) + + ListDomainsPages(*route53domains.ListDomainsInput, func(*route53domains.ListDomainsOutput, bool) bool) error + + ListOperationsRequest(*route53domains.ListOperationsInput) (*request.Request, *route53domains.ListOperationsOutput) + + ListOperations(*route53domains.ListOperationsInput) (*route53domains.ListOperationsOutput, error) + + ListOperationsPages(*route53domains.ListOperationsInput, func(*route53domains.ListOperationsOutput, bool) bool) error + + ListTagsForDomainRequest(*route53domains.ListTagsForDomainInput) (*request.Request, *route53domains.ListTagsForDomainOutput) + + ListTagsForDomain(*route53domains.ListTagsForDomainInput) (*route53domains.ListTagsForDomainOutput, error) + + RegisterDomainRequest(*route53domains.RegisterDomainInput) (*request.Request, *route53domains.RegisterDomainOutput) + + RegisterDomain(*route53domains.RegisterDomainInput) (*route53domains.RegisterDomainOutput, error) + + RetrieveDomainAuthCodeRequest(*route53domains.RetrieveDomainAuthCodeInput) (*request.Request, *route53domains.RetrieveDomainAuthCodeOutput) + + RetrieveDomainAuthCode(*route53domains.RetrieveDomainAuthCodeInput) (*route53domains.RetrieveDomainAuthCodeOutput, error) + + TransferDomainRequest(*route53domains.TransferDomainInput) (*request.Request, *route53domains.TransferDomainOutput) + + TransferDomain(*route53domains.TransferDomainInput) (*route53domains.TransferDomainOutput, error) + + UpdateDomainContactRequest(*route53domains.UpdateDomainContactInput) (*request.Request, *route53domains.UpdateDomainContactOutput) + + UpdateDomainContact(*route53domains.UpdateDomainContactInput) (*route53domains.UpdateDomainContactOutput, error) + + UpdateDomainContactPrivacyRequest(*route53domains.UpdateDomainContactPrivacyInput) (*request.Request, *route53domains.UpdateDomainContactPrivacyOutput) + + UpdateDomainContactPrivacy(*route53domains.UpdateDomainContactPrivacyInput) (*route53domains.UpdateDomainContactPrivacyOutput, error) + + UpdateDomainNameserversRequest(*route53domains.UpdateDomainNameserversInput) (*request.Request, *route53domains.UpdateDomainNameserversOutput) + + UpdateDomainNameservers(*route53domains.UpdateDomainNameserversInput) (*route53domains.UpdateDomainNameserversOutput, error) + + UpdateTagsForDomainRequest(*route53domains.UpdateTagsForDomainInput) (*request.Request, *route53domains.UpdateTagsForDomainOutput) + + UpdateTagsForDomain(*route53domains.UpdateTagsForDomainInput) (*route53domains.UpdateTagsForDomainOutput, error) +} + +var _ Route53DomainsAPI = (*route53domains.Route53Domains)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53domains/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53domains/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53domains/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/route53domains/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,88 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package route53domains + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Route53Domains is a client for Amazon Route 53 Domains. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type Route53Domains struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "route53domains" + +// New creates a new instance of the Route53Domains client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Route53Domains client from just a session. +// svc := route53domains.New(mySession) +// +// // Create a Route53Domains client with additional configuration +// svc := route53domains.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Route53Domains { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *Route53Domains { + svc := &Route53Domains{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-05-15", + JSONVersion: "1.1", + TargetPrefix: "Route53Domains_v20140515", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Route53Domains operation and runs any +// custom request initialization. +func (c *Route53Domains) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,6311 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package s3 provides a client for Amazon Simple Storage Service. +package s3 + +import ( + "io" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +const opAbortMultipartUpload = "AbortMultipartUpload" + +// AbortMultipartUploadRequest generates a request for the AbortMultipartUpload operation. +func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { + op := &request.Operation{ + Name: opAbortMultipartUpload, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &AbortMultipartUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &AbortMultipartUploadOutput{} + req.Data = output + return +} + +// Aborts a multipart upload. +// +// To verify that all parts have been removed, so you don't get charged for +// the part storage, you should call the List Parts operation and ensure the +// parts list is empty. +func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + err := req.Send() + return out, err +} + +const opCompleteMultipartUpload = "CompleteMultipartUpload" + +// CompleteMultipartUploadRequest generates a request for the CompleteMultipartUpload operation. +func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { + op := &request.Operation{ + Name: opCompleteMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CompleteMultipartUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &CompleteMultipartUploadOutput{} + req.Data = output + return +} + +// Completes a multipart upload by assembling previously uploaded parts. +func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + err := req.Send() + return out, err +} + +const opCopyObject = "CopyObject" + +// CopyObjectRequest generates a request for the CopyObject operation. +func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { + op := &request.Operation{ + Name: opCopyObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CopyObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &CopyObjectOutput{} + req.Data = output + return +} + +// Creates a copy of an object that is already stored in Amazon S3. +func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + err := req.Send() + return out, err +} + +const opCreateBucket = "CreateBucket" + +// CreateBucketRequest generates a request for the CreateBucket operation. +func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { + op := &request.Operation{ + Name: opCreateBucket, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &CreateBucketInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateBucketOutput{} + req.Data = output + return +} + +// Creates a new bucket. +func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + err := req.Send() + return out, err +} + +const opCreateMultipartUpload = "CreateMultipartUpload" + +// CreateMultipartUploadRequest generates a request for the CreateMultipartUpload operation. +func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { + op := &request.Operation{ + Name: opCreateMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?uploads", + } + + if input == nil { + input = &CreateMultipartUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateMultipartUploadOutput{} + req.Data = output + return +} + +// Initiates a multipart upload and returns an upload ID. +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucket = "DeleteBucket" + +// DeleteBucketRequest generates a request for the DeleteBucket operation. +func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { + op := &request.Operation{ + Name: opDeleteBucket, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &DeleteBucketInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketOutput{} + req.Data = output + return +} + +// Deletes the bucket. All objects (including all object versions and Delete +// Markers) in the bucket must be deleted before the bucket itself can be deleted. +func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketCors = "DeleteBucketCors" + +// DeleteBucketCorsRequest generates a request for the DeleteBucketCors operation. +func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { + op := &request.Operation{ + Name: opDeleteBucketCors, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &DeleteBucketCorsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketCorsOutput{} + req.Data = output + return +} + +// Deletes the cors configuration information set for the bucket. +func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketLifecycle = "DeleteBucketLifecycle" + +// DeleteBucketLifecycleRequest generates a request for the DeleteBucketLifecycle operation. +func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { + op := &request.Operation{ + Name: opDeleteBucketLifecycle, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &DeleteBucketLifecycleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketLifecycleOutput{} + req.Data = output + return +} + +// Deletes the lifecycle configuration from the bucket. +func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketPolicy = "DeleteBucketPolicy" + +// DeleteBucketPolicyRequest generates a request for the DeleteBucketPolicy operation. +func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { + op := &request.Operation{ + Name: opDeleteBucketPolicy, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &DeleteBucketPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketPolicyOutput{} + req.Data = output + return +} + +// Deletes the policy from the bucket. +func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketReplication = "DeleteBucketReplication" + +// DeleteBucketReplicationRequest generates a request for the DeleteBucketReplication operation. +func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { + op := &request.Operation{ + Name: opDeleteBucketReplication, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &DeleteBucketReplicationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketReplicationOutput{} + req.Data = output + return +} + +func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketTagging = "DeleteBucketTagging" + +// DeleteBucketTaggingRequest generates a request for the DeleteBucketTagging operation. +func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { + op := &request.Operation{ + Name: opDeleteBucketTagging, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &DeleteBucketTaggingInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketTaggingOutput{} + req.Data = output + return +} + +// Deletes the tags from the bucket. +func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketWebsite = "DeleteBucketWebsite" + +// DeleteBucketWebsiteRequest generates a request for the DeleteBucketWebsite operation. +func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { + op := &request.Operation{ + Name: opDeleteBucketWebsite, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &DeleteBucketWebsiteInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketWebsiteOutput{} + req.Data = output + return +} + +// This operation removes the website configuration from the bucket. +func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + err := req.Send() + return out, err +} + +const opDeleteObject = "DeleteObject" + +// DeleteObjectRequest generates a request for the DeleteObject operation. +func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { + op := &request.Operation{ + Name: opDeleteObject, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &DeleteObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteObjectOutput{} + req.Data = output + return +} + +// Removes the null version (if there is one) of an object and inserts a delete +// marker, which becomes the latest version of the object. If there isn't a +// null version, Amazon S3 does not remove any objects. +func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + err := req.Send() + return out, err +} + +const opDeleteObjects = "DeleteObjects" + +// DeleteObjectsRequest generates a request for the DeleteObjects operation. +func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { + op := &request.Operation{ + Name: opDeleteObjects, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}?delete", + } + + if input == nil { + input = &DeleteObjectsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteObjectsOutput{} + req.Data = output + return +} + +// This operation enables you to delete multiple objects from a bucket using +// a single HTTP request. You may specify up to 1000 keys. +func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketAcl = "GetBucketAcl" + +// GetBucketAclRequest generates a request for the GetBucketAcl operation. +func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { + op := &request.Operation{ + Name: opGetBucketAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &GetBucketAclInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketAclOutput{} + req.Data = output + return +} + +// Gets the access control policy for the bucket. +func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketCors = "GetBucketCors" + +// GetBucketCorsRequest generates a request for the GetBucketCors operation. +func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { + op := &request.Operation{ + Name: opGetBucketCors, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &GetBucketCorsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketCorsOutput{} + req.Data = output + return +} + +// Returns the cors configuration for the bucket. +func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketLifecycle = "GetBucketLifecycle" + +// GetBucketLifecycleRequest generates a request for the GetBucketLifecycle operation. +func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { + op := &request.Operation{ + Name: opGetBucketLifecycle, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketLifecycleOutput{} + req.Data = output + return +} + +// Deprecated, see the GetBucketLifecycleConfiguration operation. +func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" + +// GetBucketLifecycleConfigurationRequest generates a request for the GetBucketLifecycleConfiguration operation. +func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketLifecycleConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketLifecycleConfigurationOutput{} + req.Data = output + return +} + +// Returns the lifecycle configuration information set on the bucket. +func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketLocation = "GetBucketLocation" + +// GetBucketLocationRequest generates a request for the GetBucketLocation operation. +func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { + op := &request.Operation{ + Name: opGetBucketLocation, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?location", + } + + if input == nil { + input = &GetBucketLocationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketLocationOutput{} + req.Data = output + return +} + +// Returns the region the bucket resides in. +func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketLogging = "GetBucketLogging" + +// GetBucketLoggingRequest generates a request for the GetBucketLogging operation. +func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { + op := &request.Operation{ + Name: opGetBucketLogging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &GetBucketLoggingInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketLoggingOutput{} + req.Data = output + return +} + +// Returns the logging status of a bucket and the permissions users have to +// view and modify that status. To use GET, you must be the bucket owner. +func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketNotification = "GetBucketNotification" + +// GetBucketNotificationRequest generates a request for the GetBucketNotification operation. +func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { + op := &request.Operation{ + Name: opGetBucketNotification, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + req = c.newRequest(op, input, output) + output = &NotificationConfigurationDeprecated{} + req.Data = output + return +} + +// Deprecated, see the GetBucketNotificationConfiguration operation. +func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration" + +// GetBucketNotificationConfigurationRequest generates a request for the GetBucketNotificationConfiguration operation. +func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { + op := &request.Operation{ + Name: opGetBucketNotificationConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + req = c.newRequest(op, input, output) + output = &NotificationConfiguration{} + req.Data = output + return +} + +// Returns the notification configuration of a bucket. +func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketPolicy = "GetBucketPolicy" + +// GetBucketPolicyRequest generates a request for the GetBucketPolicy operation. +func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { + op := &request.Operation{ + Name: opGetBucketPolicy, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &GetBucketPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketPolicyOutput{} + req.Data = output + return +} + +// Returns the policy of a specified bucket. +func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketReplication = "GetBucketReplication" + +// GetBucketReplicationRequest generates a request for the GetBucketReplication operation. +func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { + op := &request.Operation{ + Name: opGetBucketReplication, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &GetBucketReplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketReplicationOutput{} + req.Data = output + return +} + +func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketRequestPayment = "GetBucketRequestPayment" + +// GetBucketRequestPaymentRequest generates a request for the GetBucketRequestPayment operation. +func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opGetBucketRequestPayment, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &GetBucketRequestPaymentInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketRequestPaymentOutput{} + req.Data = output + return +} + +// Returns the request payment configuration of a bucket. +func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketTagging = "GetBucketTagging" + +// GetBucketTaggingRequest generates a request for the GetBucketTagging operation. +func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { + op := &request.Operation{ + Name: opGetBucketTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &GetBucketTaggingInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketTaggingOutput{} + req.Data = output + return +} + +// Returns the tag set associated with the bucket. +func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketVersioning = "GetBucketVersioning" + +// GetBucketVersioningRequest generates a request for the GetBucketVersioning operation. +func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { + op := &request.Operation{ + Name: opGetBucketVersioning, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &GetBucketVersioningInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketVersioningOutput{} + req.Data = output + return +} + +// Returns the versioning state of a bucket. +func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketWebsite = "GetBucketWebsite" + +// GetBucketWebsiteRequest generates a request for the GetBucketWebsite operation. +func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { + op := &request.Operation{ + Name: opGetBucketWebsite, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &GetBucketWebsiteInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketWebsiteOutput{} + req.Data = output + return +} + +// Returns the website configuration for a bucket. +func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + err := req.Send() + return out, err +} + +const opGetObject = "GetObject" + +// GetObjectRequest generates a request for the GetObject operation. +func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { + op := &request.Operation{ + Name: opGetObject, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &GetObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &GetObjectOutput{} + req.Data = output + return +} + +// Retrieves objects from Amazon S3. +func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + err := req.Send() + return out, err +} + +const opGetObjectAcl = "GetObjectAcl" + +// GetObjectAclRequest generates a request for the GetObjectAcl operation. +func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { + op := &request.Operation{ + Name: opGetObjectAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &GetObjectAclInput{} + } + + req = c.newRequest(op, input, output) + output = &GetObjectAclOutput{} + req.Data = output + return +} + +// Returns the access control list (ACL) of an object. +func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + err := req.Send() + return out, err +} + +const opGetObjectTorrent = "GetObjectTorrent" + +// GetObjectTorrentRequest generates a request for the GetObjectTorrent operation. +func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { + op := &request.Operation{ + Name: opGetObjectTorrent, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?torrent", + } + + if input == nil { + input = &GetObjectTorrentInput{} + } + + req = c.newRequest(op, input, output) + output = &GetObjectTorrentOutput{} + req.Data = output + return +} + +// Return torrent files from a bucket. +func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + err := req.Send() + return out, err +} + +const opHeadBucket = "HeadBucket" + +// HeadBucketRequest generates a request for the HeadBucket operation. +func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { + op := &request.Operation{ + Name: opHeadBucket, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &HeadBucketInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &HeadBucketOutput{} + req.Data = output + return +} + +// This operation is useful to determine if a bucket exists and you have permission +// to access it. +func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + err := req.Send() + return out, err +} + +const opHeadObject = "HeadObject" + +// HeadObjectRequest generates a request for the HeadObject operation. +func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { + op := &request.Operation{ + Name: opHeadObject, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &HeadObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &HeadObjectOutput{} + req.Data = output + return +} + +// The HEAD operation retrieves metadata from an object without returning the +// object itself. This operation is useful if you're only interested in an object's +// metadata. To use HEAD, you must have READ access to the object. +func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + err := req.Send() + return out, err +} + +const opListBuckets = "ListBuckets" + +// ListBucketsRequest generates a request for the ListBuckets operation. +func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { + op := &request.Operation{ + Name: opListBuckets, + HTTPMethod: "GET", + HTTPPath: "/", + } + + if input == nil { + input = &ListBucketsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListBucketsOutput{} + req.Data = output + return +} + +// Returns a list of all buckets owned by the authenticated sender of the request. +func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + err := req.Send() + return out, err +} + +const opListMultipartUploads = "ListMultipartUploads" + +// ListMultipartUploadsRequest generates a request for the ListMultipartUploads operation. +func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { + op := &request.Operation{ + Name: opListMultipartUploads, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?uploads", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "UploadIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"}, + LimitToken: "MaxUploads", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListMultipartUploadsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListMultipartUploadsOutput{} + req.Data = output + return +} + +// This operation lists in-progress multipart uploads. +func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + err := req.Send() + return out, err +} + +func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(p *ListMultipartUploadsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListMultipartUploadsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListMultipartUploadsOutput), lastPage) + }) +} + +const opListObjectVersions = "ListObjectVersions" + +// ListObjectVersionsRequest generates a request for the ListObjectVersions operation. +func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { + op := &request.Operation{ + Name: opListObjectVersions, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versions", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "VersionIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListObjectVersionsOutput{} + req.Data = output + return +} + +// Returns metadata about all of the versions of objects in a bucket. +func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + err := req.Send() + return out, err +} + +func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(p *ListObjectVersionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListObjectVersionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListObjectVersionsOutput), lastPage) + }) +} + +const opListObjects = "ListObjects" + +// ListObjectsRequest generates a request for the ListObjects operation. +func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { + op := &request.Operation{ + Name: opListObjects, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker || Contents[-1].Key"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListObjectsOutput{} + req.Data = output + return +} + +// Returns some or all (up to 1000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. +func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + err := req.Send() + return out, err +} + +func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(p *ListObjectsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListObjectsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListObjectsOutput), lastPage) + }) +} + +const opListParts = "ListParts" + +// ListPartsRequest generates a request for the ListParts operation. +func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { + op := &request.Operation{ + Name: opListParts, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + Paginator: &request.Paginator{ + InputTokens: []string{"PartNumberMarker"}, + OutputTokens: []string{"NextPartNumberMarker"}, + LimitToken: "MaxParts", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListPartsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPartsOutput{} + req.Data = output + return +} + +// Lists the parts that have been uploaded for a specific multipart upload. +func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + err := req.Send() + return out, err +} + +func (c *S3) ListPartsPages(input *ListPartsInput, fn func(p *ListPartsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListPartsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListPartsOutput), lastPage) + }) +} + +const opPutBucketAcl = "PutBucketAcl" + +// PutBucketAclRequest generates a request for the PutBucketAcl operation. +func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { + op := &request.Operation{ + Name: opPutBucketAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &PutBucketAclInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketAclOutput{} + req.Data = output + return +} + +// Sets the permissions on a bucket using access control lists (ACL). +func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketCors = "PutBucketCors" + +// PutBucketCorsRequest generates a request for the PutBucketCors operation. +func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { + op := &request.Operation{ + Name: opPutBucketCors, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &PutBucketCorsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketCorsOutput{} + req.Data = output + return +} + +// Sets the cors configuration for a bucket. +func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketLifecycle = "PutBucketLifecycle" + +// PutBucketLifecycleRequest generates a request for the PutBucketLifecycle operation. +func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { + op := &request.Operation{ + Name: opPutBucketLifecycle, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketLifecycleOutput{} + req.Data = output + return +} + +// Deprecated, see the PutBucketLifecycleConfiguration operation. +func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" + +// PutBucketLifecycleConfigurationRequest generates a request for the PutBucketLifecycleConfiguration operation. +func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketLifecycleConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleConfigurationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketLifecycleConfigurationOutput{} + req.Data = output + return +} + +// Sets lifecycle configuration for your bucket. If a lifecycle configuration +// exists, it replaces it. +func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketLogging = "PutBucketLogging" + +// PutBucketLoggingRequest generates a request for the PutBucketLogging operation. +func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { + op := &request.Operation{ + Name: opPutBucketLogging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &PutBucketLoggingInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketLoggingOutput{} + req.Data = output + return +} + +// Set the logging parameters for a bucket and to specify permissions for who +// can view and modify the logging parameters. To set the logging status of +// a bucket, you must be the bucket owner. +func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketNotification = "PutBucketNotification" + +// PutBucketNotificationRequest generates a request for the PutBucketNotification operation. +func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { + op := &request.Operation{ + Name: opPutBucketNotification, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketNotificationOutput{} + req.Data = output + return +} + +// Deprecated, see the PutBucketNotificationConfiguraiton operation. +func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration" + +// PutBucketNotificationConfigurationRequest generates a request for the PutBucketNotificationConfiguration operation. +func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketNotificationConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationConfigurationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketNotificationConfigurationOutput{} + req.Data = output + return +} + +// Enables notifications of specified events for a bucket. +func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketPolicy = "PutBucketPolicy" + +// PutBucketPolicyRequest generates a request for the PutBucketPolicy operation. +func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { + op := &request.Operation{ + Name: opPutBucketPolicy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &PutBucketPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketPolicyOutput{} + req.Data = output + return +} + +// Replaces a policy on a bucket. If the bucket already has a policy, the one +// in this request completely replaces it. +func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketReplication = "PutBucketReplication" + +// PutBucketReplicationRequest generates a request for the PutBucketReplication operation. +func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { + op := &request.Operation{ + Name: opPutBucketReplication, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &PutBucketReplicationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketReplicationOutput{} + req.Data = output + return +} + +// Creates a new replication configuration (or replaces an existing one, if +// present). +func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketRequestPayment = "PutBucketRequestPayment" + +// PutBucketRequestPaymentRequest generates a request for the PutBucketRequestPayment operation. +func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opPutBucketRequestPayment, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &PutBucketRequestPaymentInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketRequestPaymentOutput{} + req.Data = output + return +} + +// Sets the request payment configuration for a bucket. By default, the bucket +// owner pays for downloads from the bucket. This configuration parameter enables +// the bucket owner (only) to specify that the person requesting the download +// will be charged for the download. Documentation on requester pays buckets +// can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html +func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketTagging = "PutBucketTagging" + +// PutBucketTaggingRequest generates a request for the PutBucketTagging operation. +func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { + op := &request.Operation{ + Name: opPutBucketTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &PutBucketTaggingInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketTaggingOutput{} + req.Data = output + return +} + +// Sets the tags for a bucket. +func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketVersioning = "PutBucketVersioning" + +// PutBucketVersioningRequest generates a request for the PutBucketVersioning operation. +func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { + op := &request.Operation{ + Name: opPutBucketVersioning, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &PutBucketVersioningInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketVersioningOutput{} + req.Data = output + return +} + +// Sets the versioning state of an existing bucket. To set the versioning state, +// you must be the bucket owner. +func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketWebsite = "PutBucketWebsite" + +// PutBucketWebsiteRequest generates a request for the PutBucketWebsite operation. +func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { + op := &request.Operation{ + Name: opPutBucketWebsite, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &PutBucketWebsiteInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketWebsiteOutput{} + req.Data = output + return +} + +// Set the website configuration for a bucket. +func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + err := req.Send() + return out, err +} + +const opPutObject = "PutObject" + +// PutObjectRequest generates a request for the PutObject operation. +func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { + op := &request.Operation{ + Name: opPutObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &PutObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &PutObjectOutput{} + req.Data = output + return +} + +// Adds an object to a bucket. +func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + err := req.Send() + return out, err +} + +const opPutObjectAcl = "PutObjectAcl" + +// PutObjectAclRequest generates a request for the PutObjectAcl operation. +func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { + op := &request.Operation{ + Name: opPutObjectAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &PutObjectAclInput{} + } + + req = c.newRequest(op, input, output) + output = &PutObjectAclOutput{} + req.Data = output + return +} + +// uses the acl subresource to set the access control list (ACL) permissions +// for an object that already exists in a bucket +func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + err := req.Send() + return out, err +} + +const opRestoreObject = "RestoreObject" + +// RestoreObjectRequest generates a request for the RestoreObject operation. +func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { + op := &request.Operation{ + Name: opRestoreObject, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?restore", + } + + if input == nil { + input = &RestoreObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreObjectOutput{} + req.Data = output + return +} + +// Restores an archived copy of an object back into Amazon S3 +func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + err := req.Send() + return out, err +} + +const opUploadPart = "UploadPart" + +// UploadPartRequest generates a request for the UploadPart operation. +func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { + op := &request.Operation{ + Name: opUploadPart, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadPartOutput{} + req.Data = output + return +} + +// Uploads a part in a multipart upload. +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + err := req.Send() + return out, err +} + +const opUploadPartCopy = "UploadPartCopy" + +// UploadPartCopyRequest generates a request for the UploadPartCopy operation. +func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { + op := &request.Operation{ + Name: opUploadPartCopy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartCopyInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadPartCopyOutput{} + req.Data = output + return +} + +// Uploads a part by copying data from an existing object as data source. +func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + err := req.Send() + return out, err +} + +type AbortMultipartUploadInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AbortMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadInput) GoString() string { + return s.String() +} + +type AbortMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s AbortMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadOutput) GoString() string { + return s.String() +} + +type AccessControlPolicy struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s AccessControlPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessControlPolicy) GoString() string { + return s.String() +} + +type Bucket struct { + _ struct{} `type:"structure"` + + // Date the bucket was created. + CreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The name of the bucket. + Name *string `type:"string"` +} + +// String returns the string representation +func (s Bucket) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Bucket) GoString() string { + return s.String() +} + +type BucketLifecycleConfiguration struct { + _ struct{} `type:"structure"` + + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s BucketLifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLifecycleConfiguration) GoString() string { + return s.String() +} + +type BucketLoggingStatus struct { + _ struct{} `type:"structure"` + + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s BucketLoggingStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLoggingStatus) GoString() string { + return s.String() +} + +type CORSConfiguration struct { + _ struct{} `type:"structure"` + + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s CORSConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSConfiguration) GoString() string { + return s.String() +} + +type CORSRule struct { + _ struct{} `type:"structure"` + + // Specifies which headers are allowed in a pre-flight OPTIONS request. + AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` + + // Identifies HTTP methods that the domain/origin specified in the rule is allowed + // to execute. + AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` + + // One or more origins you want customers to be able to access the bucket from. + AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"` + + // One or more headers in the response that you want customers to be able to + // access from their applications (for example, from a JavaScript XMLHttpRequest + // object). + ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"` + + // The time in seconds that your browser is to cache the preflight response + // for the specified resource. + MaxAgeSeconds *int64 `type:"integer"` +} + +// String returns the string representation +func (s CORSRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSRule) GoString() string { + return s.String() +} + +type CloudFunctionConfiguration struct { + _ struct{} `type:"structure"` + + CloudFunction *string `type:"string"` + + // Bucket event for which to send notifications. + Event *string `type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + InvocationRole *string `type:"string"` +} + +// String returns the string representation +func (s CloudFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudFunctionConfiguration) GoString() string { + return s.String() +} + +type CommonPrefix struct { + _ struct{} `type:"structure"` + + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s CommonPrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CommonPrefix) GoString() string { + return s.String() +} + +type CompleteMultipartUploadInput struct { + _ struct{} `type:"structure" payload:"MultipartUpload"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadInput) GoString() string { + return s.String() +} + +type CompleteMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + Bucket *string `type:"string"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + Key *string `min:"1" type:"string"` + + Location *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CompleteMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadOutput) GoString() string { + return s.String() +} + +type CompletedMultipartUpload struct { + _ struct{} `type:"structure"` + + Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s CompletedMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedMultipartUpload) GoString() string { + return s.String() +} + +type CompletedPart struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Part number that identifies the part. This is a positive integer between + // 1 and 10,000. + PartNumber *int64 `type:"integer"` +} + +// String returns the string representation +func (s CompletedPart) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedPart) GoString() string { + return s.String() +} + +type Condition struct { + _ struct{} `type:"structure"` + + // The HTTP error code when the redirect is applied. In the event of an error, + // if the error code equals this value, then the specified redirect is applied. + // Required when parent element Condition is specified and sibling KeyPrefixEquals + // is not specified. If both are specified, then both must be true for the redirect + // to be applied. + HttpErrorCodeReturnedEquals *string `type:"string"` + + // The object key name prefix when the redirect is applied. For example, to + // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. + // To redirect request for all pages with the prefix docs/, the key prefix will + // be /docs, which identifies all objects in the docs/ folder. Required when + // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals + // is not specified. If both conditions are specified, both must be true for + // the redirect to be applied. + KeyPrefixEquals *string `type:"string"` +} + +// String returns the string representation +func (s Condition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Condition) GoString() string { + return s.String() +} + +type CopyObjectInput struct { + _ struct{} `type:"structure"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The name of the source bucket and key name of the source object, separated + // by a slash (/). Must be URL-encoded. + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether the metadata is copied from the source object or replaced + // with metadata provided in the request. + MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CopyObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectInput) GoString() string { + return s.String() +} + +type CopyObjectOutput struct { + _ struct{} `type:"structure" payload:"CopyObjectResult"` + + CopyObjectResult *CopyObjectResult `type:"structure"` + + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If the object expiration is configured, the response includes this header. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version ID of the newly created copy. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CopyObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectOutput) GoString() string { + return s.String() +} + +type CopyObjectResult struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s CopyObjectResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectResult) GoString() string { + return s.String() +} + +type CopyPartResult struct { + _ struct{} `type:"structure"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // Date and time at which the object was uploaded. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s CopyPartResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyPartResult) GoString() string { + return s.String() +} + +type CreateBucketConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the region where the bucket will be created. If you don't specify + // a region, the bucket will be created in US Standard. + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s CreateBucketConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketConfiguration) GoString() string { + return s.String() +} + +type CreateBucketInput struct { + _ struct{} `type:"structure" payload:"CreateBucketConfiguration"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +} + +// String returns the string representation +func (s CreateBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketInput) GoString() string { + return s.String() +} + +type CreateBucketOutput struct { + _ struct{} `type:"structure"` + + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketOutput) GoString() string { + return s.String() +} + +type CreateMultipartUploadInput struct { + _ struct{} `type:"structure"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CreateMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMultipartUploadInput) GoString() string { + return s.String() +} + +type CreateMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `locationName:"Bucket" type:"string"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // ID for the initiated multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s CreateMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMultipartUploadOutput) GoString() string { + return s.String() +} + +type Delete struct { + _ struct{} `type:"structure"` + + Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` + + // Element to enable quiet mode for the request. When you add this element, + // you must set its value to true. + Quiet *bool `type:"boolean"` +} + +// String returns the string representation +func (s Delete) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Delete) GoString() string { + return s.String() +} + +type DeleteBucketCorsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsInput) GoString() string { + return s.String() +} + +type DeleteBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsOutput) GoString() string { + return s.String() +} + +type DeleteBucketInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInput) GoString() string { + return s.String() +} + +type DeleteBucketLifecycleInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleInput) GoString() string { + return s.String() +} + +type DeleteBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleOutput) GoString() string { + return s.String() +} + +type DeleteBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketOutput) GoString() string { + return s.String() +} + +type DeleteBucketPolicyInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketPolicyInput) GoString() string { + return s.String() +} + +type DeleteBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketPolicyOutput) GoString() string { + return s.String() +} + +type DeleteBucketReplicationInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketReplicationInput) GoString() string { + return s.String() +} + +type DeleteBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketReplicationOutput) GoString() string { + return s.String() +} + +type DeleteBucketTaggingInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketTaggingInput) GoString() string { + return s.String() +} + +type DeleteBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketTaggingOutput) GoString() string { + return s.String() +} + +type DeleteBucketWebsiteInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteInput) GoString() string { + return s.String() +} + +type DeleteBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteOutput) GoString() string { + return s.String() +} + +type DeleteMarkerEntry struct { + _ struct{} `type:"structure"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s DeleteMarkerEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMarkerEntry) GoString() string { + return s.String() +} + +type DeleteObjectInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectInput) GoString() string { + return s.String() +} + +type DeleteObjectOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Returns the version ID of the delete marker created as a result of the DELETE + // operation. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectOutput) GoString() string { + return s.String() +} + +type DeleteObjectsInput struct { + _ struct{} `type:"structure" payload:"Delete"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Delete *Delete `locationName:"Delete" type:"structure" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s DeleteObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsInput) GoString() string { + return s.String() +} + +type DeleteObjectsOutput struct { + _ struct{} `type:"structure"` + + Deleted []*DeletedObject `type:"list" flattened:"true"` + + Errors []*Error `locationName:"Error" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s DeleteObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsOutput) GoString() string { + return s.String() +} + +type DeletedObject struct { + _ struct{} `type:"structure"` + + DeleteMarker *bool `type:"boolean"` + + DeleteMarkerVersionId *string `type:"string"` + + Key *string `min:"1" type:"string"` + + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s DeletedObject) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletedObject) GoString() string { + return s.String() +} + +type Destination struct { + _ struct{} `type:"structure"` + + // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store + // replicas of the object identified by the rule. + Bucket *string `type:"string" required:"true"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` +} + +// String returns the string representation +func (s Destination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Destination) GoString() string { + return s.String() +} + +type Error struct { + _ struct{} `type:"structure"` + + Code *string `type:"string"` + + Key *string `min:"1" type:"string"` + + Message *string `type:"string"` + + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s Error) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Error) GoString() string { + return s.String() +} + +type ErrorDocument struct { + _ struct{} `type:"structure"` + + // The object key name to use when a 4XX class error occurs. + Key *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ErrorDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorDocument) GoString() string { + return s.String() +} + +// Container for key value pair that defines the criteria for the filter rule. +type FilterRule struct { + _ struct{} `type:"structure"` + + // Object key name prefix or suffix identifying one or more objects to which + // the filtering rule applies. Maximum prefix length can be up to 1,024 characters. + // Overlapping prefixes and suffixes are not supported. For more information, + // go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Name *string `type:"string" enum:"FilterRuleName"` + + Value *string `type:"string"` +} + +// String returns the string representation +func (s FilterRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilterRule) GoString() string { + return s.String() +} + +type GetBucketAclInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclInput) GoString() string { + return s.String() +} + +type GetBucketAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s GetBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclOutput) GoString() string { + return s.String() +} + +type GetBucketCorsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketCorsInput) GoString() string { + return s.String() +} + +type GetBucketCorsOutput struct { + _ struct{} `type:"structure"` + + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketCorsOutput) GoString() string { + return s.String() +} + +type GetBucketLifecycleConfigurationInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +type GetBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` + + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +type GetBucketLifecycleInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleInput) GoString() string { + return s.String() +} + +type GetBucketLifecycleOutput struct { + _ struct{} `type:"structure"` + + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleOutput) GoString() string { + return s.String() +} + +type GetBucketLocationInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLocationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLocationInput) GoString() string { + return s.String() +} + +type GetBucketLocationOutput struct { + _ struct{} `type:"structure"` + + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s GetBucketLocationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLocationOutput) GoString() string { + return s.String() +} + +type GetBucketLoggingInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingInput) GoString() string { + return s.String() +} + +type GetBucketLoggingOutput struct { + _ struct{} `type:"structure"` + + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s GetBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingOutput) GoString() string { + return s.String() +} + +type GetBucketNotificationConfigurationRequest struct { + _ struct{} `type:"structure"` + + // Name of the buket to get the notification configuration for. + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketNotificationConfigurationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketNotificationConfigurationRequest) GoString() string { + return s.String() +} + +type GetBucketPolicyInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyInput) GoString() string { + return s.String() +} + +type GetBucketPolicyOutput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // The bucket policy as a JSON document. + Policy *string `type:"string"` +} + +// String returns the string representation +func (s GetBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyOutput) GoString() string { + return s.String() +} + +type GetBucketReplicationInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketReplicationInput) GoString() string { + return s.String() +} + +type GetBucketReplicationOutput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + + // Container for replication rules. You can add as many as 1,000 rules. Total + // replication configuration size can be up to 2 MB. + ReplicationConfiguration *ReplicationConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketReplicationOutput) GoString() string { + return s.String() +} + +type GetBucketRequestPaymentInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketRequestPaymentInput) GoString() string { + return s.String() +} + +type GetBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + Payer *string `type:"string" enum:"Payer"` +} + +// String returns the string representation +func (s GetBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +type GetBucketTaggingInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketTaggingInput) GoString() string { + return s.String() +} + +type GetBucketTaggingOutput struct { + _ struct{} `type:"structure"` + + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s GetBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketTaggingOutput) GoString() string { + return s.String() +} + +type GetBucketVersioningInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketVersioningInput) GoString() string { + return s.String() +} + +type GetBucketVersioningOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation +func (s GetBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketVersioningOutput) GoString() string { + return s.String() +} + +type GetBucketWebsiteInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketWebsiteInput) GoString() string { + return s.String() +} + +type GetBucketWebsiteOutput struct { + _ struct{} `type:"structure"` + + ErrorDocument *ErrorDocument `type:"structure"` + + IndexDocument *IndexDocument `type:"structure"` + + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation +func (s GetBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketWebsiteOutput) GoString() string { + return s.String() +} + +type GetObjectAclInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectAclInput) GoString() string { + return s.String() +} + +type GetObjectAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s GetObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectAclOutput) GoString() string { + return s.String() +} + +type GetObjectInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Sets the Cache-Control header of the response. + ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` + + // Sets the Content-Disposition header of the response + ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` + + // Sets the Content-Type header of the response. + ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` + + // Sets the Expires header of the response. + ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"iso8601"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectInput) GoString() string { + return s.String() +} + +type GetObjectOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Object data. + Body io.ReadCloser `type:"blob"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + + // The portion of the object returned in the response. + ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Last modified date of the object + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s GetObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectOutput) GoString() string { + return s.String() +} + +type GetObjectTorrentInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s GetObjectTorrentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTorrentInput) GoString() string { + return s.String() +} + +type GetObjectTorrentOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + Body io.ReadCloser `type:"blob"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s GetObjectTorrentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTorrentOutput) GoString() string { + return s.String() +} + +type Grant struct { + _ struct{} `type:"structure"` + + Grantee *Grantee `type:"structure"` + + // Specifies the permission given to the grantee. + Permission *string `type:"string" enum:"Permission"` +} + +// String returns the string representation +func (s Grant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Grant) GoString() string { + return s.String() +} + +type Grantee struct { + _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Screen name of the grantee. + DisplayName *string `type:"string"` + + // Email address of the grantee. + EmailAddress *string `type:"string"` + + // The canonical user ID of the grantee. + ID *string `type:"string"` + + // Type of grantee + Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"` + + // URI of the grantee group. + URI *string `type:"string"` +} + +// String returns the string representation +func (s Grantee) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Grantee) GoString() string { + return s.String() +} + +type HeadBucketInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s HeadBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadBucketInput) GoString() string { + return s.String() +} + +type HeadBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s HeadBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadBucketOutput) GoString() string { + return s.String() +} + +type HeadObjectInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s HeadObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadObjectInput) GoString() string { + return s.String() +} + +type HeadObjectOutput struct { + _ struct{} `type:"structure"` + + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Last modified date of the object + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s HeadObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadObjectOutput) GoString() string { + return s.String() +} + +type IndexDocument struct { + _ struct{} `type:"structure"` + + // A suffix that is appended to a request that is for a directory on the website + // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ + // the data that is returned will be for the object with the key name images/index.html) + // The suffix must not be empty and must not include a slash character. + Suffix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s IndexDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IndexDocument) GoString() string { + return s.String() +} + +type Initiator struct { + _ struct{} `type:"structure"` + + // Name of the Principal. + DisplayName *string `type:"string"` + + // If the principal is an AWS account, it provides the Canonical User ID. If + // the principal is an IAM User, it provides a user ARN value. + ID *string `type:"string"` +} + +// String returns the string representation +func (s Initiator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Initiator) GoString() string { + return s.String() +} + +// Container for object key name prefix and suffix filtering rules. +type KeyFilter struct { + _ struct{} `type:"structure"` + + // A list of containers for key value pair that defines the criteria for the + // filter rule. + FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s KeyFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyFilter) GoString() string { + return s.String() +} + +// Container for specifying the AWS Lambda notification configuration. +type LambdaFunctionConfiguration struct { + _ struct{} `type:"structure"` + + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Lambda cloud function ARN that Amazon S3 can invoke when it detects events + // of the specified type. + LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` +} + +// String returns the string representation +func (s LambdaFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaFunctionConfiguration) GoString() string { + return s.String() +} + +type LifecycleConfiguration struct { + _ struct{} `type:"structure"` + + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s LifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleConfiguration) GoString() string { + return s.String() +} + +type LifecycleExpiration struct { + _ struct{} `type:"structure"` + + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` +} + +// String returns the string representation +func (s LifecycleExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleExpiration) GoString() string { + return s.String() +} + +type LifecycleRule struct { + _ struct{} `type:"structure"` + + Expiration *LifecycleExpiration `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` + + // Prefix identifying one or more objects to which the rule applies. + Prefix *string `type:"string" required:"true"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s LifecycleRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleRule) GoString() string { + return s.String() +} + +type ListBucketsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListBucketsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsInput) GoString() string { + return s.String() +} + +type ListBucketsOutput struct { + _ struct{} `type:"structure"` + + Buckets []*Bucket `locationNameList:"Bucket" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s ListBucketsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsOutput) GoString() string { + return s.String() +} + +type ListMultipartUploadsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Together with upload-id-marker, this parameter specifies the multipart upload + // after which listing should begin. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of multipart uploads, from 1 to 1,000, to return + // in the response body. 1,000 is the maximum number of uploads that can be + // returned in a response. + MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` + + // Lists in-progress uploads only for those keys that begin with the specified + // prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter + // is ignored. + UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` +} + +// String returns the string representation +func (s ListMultipartUploadsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsInput) GoString() string { + return s.String() +} + +type ListMultipartUploadsOutput struct { + _ struct{} `type:"structure"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // Indicates whether the returned list of multipart uploads is truncated. A + // value of true indicates that the list was truncated. The list can be truncated + // if the number of multipart uploads exceeds the limit allowed or specified + // by max uploads. + IsTruncated *bool `type:"boolean"` + + // The key at or after which the listing began. + KeyMarker *string `type:"string"` + + // Maximum number of multipart uploads that could have been included in the + // response. + MaxUploads *int64 `type:"integer"` + + // When a list is truncated, this element specifies the value that should be + // used for the key-marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // When a list is truncated, this element specifies the value that should be + // used for the upload-id-marker request parameter in a subsequent request. + NextUploadIdMarker *string `type:"string"` + + // When a prefix is provided in the request, this field contains the specified + // prefix. The result contains only keys starting with the specified prefix. + Prefix *string `type:"string"` + + // Upload ID after which listing began. + UploadIdMarker *string `type:"string"` + + Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListMultipartUploadsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsOutput) GoString() string { + return s.String() +} + +type ListObjectVersionsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Specifies the key to start with when listing objects in a bucket. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Specifies the object version you want to start listing from. + VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` +} + +// String returns the string representation +func (s ListObjectVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectVersionsInput) GoString() string { + return s.String() +} + +type ListObjectVersionsOutput struct { + _ struct{} `type:"structure"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. If your results were truncated, you can + // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker + // response parameters as a starting place in another request to return the + // rest of the results. + IsTruncated *bool `type:"boolean"` + + // Marks the last Key returned in a truncated response. + KeyMarker *string `type:"string"` + + MaxKeys *int64 `type:"integer"` + + Name *string `type:"string"` + + // Use this value for the key marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // Use this value for the next version id marker parameter in a subsequent request. + NextVersionIdMarker *string `type:"string"` + + Prefix *string `type:"string"` + + VersionIdMarker *string `type:"string"` + + Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListObjectVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectVersionsOutput) GoString() string { + return s.String() +} + +type ListObjectsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Specifies the key to start with when listing objects in a bucket. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` +} + +// String returns the string representation +func (s ListObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsInput) GoString() string { + return s.String() +} + +type ListObjectsOutput struct { + _ struct{} `type:"structure"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + Contents []*Object `type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + Marker *string `type:"string"` + + MaxKeys *int64 `type:"integer"` + + Name *string `type:"string"` + + // When response is truncated (the IsTruncated element value in the response + // is true), you can use the key name in this field as marker in the subsequent + // request to get next set of objects. Amazon S3 lists objects in alphabetical + // order Note: This element is returned only if you have delimiter request parameter + // specified. If response does not include the NextMaker and it is truncated, + // you can use the value of the last Key in the response as the marker in the + // subsequent request to get the next set of object keys. + NextMarker *string `type:"string"` + + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s ListObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsOutput) GoString() string { + return s.String() +} + +type ListPartsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Sets the maximum number of parts to return. + MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"` + + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. + PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPartsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsInput) GoString() string { + return s.String() +} + +type ListPartsOutput struct { + _ struct{} `type:"structure"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Indicates whether the returned list of parts is truncated. + IsTruncated *bool `type:"boolean"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // Maximum number of parts that were allowed in the response. + MaxParts *int64 `type:"integer"` + + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the part-number-marker request parameter + // in a subsequent request. + NextPartNumberMarker *int64 `type:"integer"` + + Owner *Owner `type:"structure"` + + // Part number after which listing begins. + PartNumberMarker *int64 `type:"integer"` + + Parts []*Part `locationName:"Part" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s ListPartsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsOutput) GoString() string { + return s.String() +} + +type LoggingEnabled struct { + _ struct{} `type:"structure"` + + // Specifies the bucket where you want Amazon S3 to store server access logs. + // You can have your logs delivered to any bucket that you own, including the + // same bucket that is being logged. You can also configure multiple buckets + // to deliver their logs to the same target bucket. In this case you should + // choose a different TargetPrefix for each source bucket so that the delivered + // log files can be distinguished by key. + TargetBucket *string `type:"string"` + + TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` + + // This element lets you specify a prefix for the keys that the log files will + // be stored under. + TargetPrefix *string `type:"string"` +} + +// String returns the string representation +func (s LoggingEnabled) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoggingEnabled) GoString() string { + return s.String() +} + +type MultipartUpload struct { + _ struct{} `type:"structure"` + + // Date and time at which the multipart upload was initiated. + Initiated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Key of the object for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + Owner *Owner `type:"structure"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID that identifies the multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s MultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MultipartUpload) GoString() string { + return s.String() +} + +// Specifies when noncurrent object versions expire. Upon expiration, Amazon +// S3 permanently deletes the noncurrent object versions. You set this lifecycle +// configuration action on a bucket that has versioning enabled (or suspended) +// to request that Amazon S3 delete noncurrent object versions at a specific +// period in the object's lifetime. +type NoncurrentVersionExpiration struct { + _ struct{} `type:"structure"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (/AmazonS3/latest/dev/s3-access-control.html) in the Amazon Simple Storage + // Service Developer Guide. + NoncurrentDays *int64 `type:"integer"` +} + +// String returns the string representation +func (s NoncurrentVersionExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NoncurrentVersionExpiration) GoString() string { + return s.String() +} + +// Container for the transition rule that describes when noncurrent objects +// transition to the STANDARD_IA or GLACIER storage class. If your bucket is +// versioning-enabled (or versioning is suspended), you can set this action +// to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA +// or GLACIER storage class at a specific period in the object's lifetime. +type NoncurrentVersionTransition struct { + _ struct{} `type:"structure"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (/AmazonS3/latest/dev/s3-access-control.html) in the Amazon Simple Storage + // Service Developer Guide. + NoncurrentDays *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation +func (s NoncurrentVersionTransition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NoncurrentVersionTransition) GoString() string { + return s.String() +} + +// Container for specifying the notification configuration of the bucket. If +// this element is empty, notifications are turned off on the bucket. +type NotificationConfiguration struct { + _ struct{} `type:"structure"` + + LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` + + QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` + + TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s NotificationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfiguration) GoString() string { + return s.String() +} + +type NotificationConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` + + QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` + + TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` +} + +// String returns the string representation +func (s NotificationConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfigurationDeprecated) GoString() string { + return s.String() +} + +// Container for object key name filtering rules. For information about key +// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// in the Amazon Simple Storage Service Developer Guide. +type NotificationConfigurationFilter struct { + _ struct{} `type:"structure"` + + // Container for object key name prefix and suffix filtering rules. + Key *KeyFilter `locationName:"S3Key" type:"structure"` +} + +// String returns the string representation +func (s NotificationConfigurationFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfigurationFilter) GoString() string { + return s.String() +} + +type Object struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + Key *string `min:"1" type:"string"` + + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectStorageClass"` +} + +// String returns the string representation +func (s Object) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Object) GoString() string { + return s.String() +} + +type ObjectIdentifier struct { + _ struct{} `type:"structure"` + + // Key name of the object to delete. + Key *string `min:"1" type:"string" required:"true"` + + // VersionId for the specific version of the object to delete. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s ObjectIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectIdentifier) GoString() string { + return s.String() +} + +type ObjectVersion struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + // Size in bytes of the object. + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s ObjectVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectVersion) GoString() string { + return s.String() +} + +type Owner struct { + _ struct{} `type:"structure"` + + DisplayName *string `type:"string"` + + ID *string `type:"string"` +} + +// String returns the string representation +func (s Owner) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Owner) GoString() string { + return s.String() +} + +type Part struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Date and time at which the part was uploaded. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Part number identifying the part. This is a positive integer between 1 and + // 10,000. + PartNumber *int64 `type:"integer"` + + // Size of the uploaded part data. + Size *int64 `type:"integer"` +} + +// String returns the string representation +func (s Part) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Part) GoString() string { + return s.String() +} + +type PutBucketAclInput struct { + _ struct{} `type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +} + +// String returns the string representation +func (s PutBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAclInput) GoString() string { + return s.String() +} + +type PutBucketAclOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAclOutput) GoString() string { + return s.String() +} + +type PutBucketCorsInput struct { + _ struct{} `type:"structure" payload:"CORSConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketCorsInput) GoString() string { + return s.String() +} + +type PutBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketCorsOutput) GoString() string { + return s.String() +} + +type PutBucketLifecycleConfigurationInput struct { + _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +type PutBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketLifecycleInput struct { + _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleInput) GoString() string { + return s.String() +} + +type PutBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleOutput) GoString() string { + return s.String() +} + +type PutBucketLoggingInput struct { + _ struct{} `type:"structure" payload:"BucketLoggingStatus"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLoggingInput) GoString() string { + return s.String() +} + +type PutBucketLoggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLoggingOutput) GoString() string { + return s.String() +} + +type PutBucketNotificationConfigurationInput struct { + _ struct{} `type:"structure" payload:"NotificationConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for specifying the notification configuration of the bucket. If + // this element is empty, notifications are turned off on the bucket. + NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketNotificationConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationConfigurationInput) GoString() string { + return s.String() +} + +type PutBucketNotificationConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketNotificationConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketNotificationInput struct { + _ struct{} `type:"structure" payload:"NotificationConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketNotificationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationInput) GoString() string { + return s.String() +} + +type PutBucketNotificationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketNotificationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationOutput) GoString() string { + return s.String() +} + +type PutBucketPolicyInput struct { + _ struct{} `type:"structure" payload:"Policy"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The bucket policy as a JSON document. + Policy *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PutBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketPolicyInput) GoString() string { + return s.String() +} + +type PutBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketPolicyOutput) GoString() string { + return s.String() +} + +type PutBucketReplicationInput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for replication rules. You can add as many as 1,000 rules. Total + // replication configuration size can be up to 2 MB. + ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketReplicationInput) GoString() string { + return s.String() +} + +type PutBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketReplicationOutput) GoString() string { + return s.String() +} + +type PutBucketRequestPaymentInput struct { + _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketRequestPaymentInput) GoString() string { + return s.String() +} + +type PutBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +type PutBucketTaggingInput struct { + _ struct{} `type:"structure" payload:"Tagging"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketTaggingInput) GoString() string { + return s.String() +} + +type PutBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketTaggingOutput) GoString() string { + return s.String() +} + +type PutBucketVersioningInput struct { + _ struct{} `type:"structure" payload:"VersioningConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketVersioningInput) GoString() string { + return s.String() +} + +type PutBucketVersioningOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketVersioningOutput) GoString() string { + return s.String() +} + +type PutBucketWebsiteInput struct { + _ struct{} `type:"structure" payload:"WebsiteConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketWebsiteInput) GoString() string { + return s.String() +} + +type PutBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketWebsiteOutput) GoString() string { + return s.String() +} + +type PutObjectAclInput struct { + _ struct{} `type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s PutObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectAclInput) GoString() string { + return s.String() +} + +type PutObjectAclOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectAclOutput) GoString() string { + return s.String() +} + +type PutObjectInput struct { + _ struct{} `type:"structure" payload:"Body"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s PutObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectInput) GoString() string { + return s.String() +} + +type PutObjectOutput struct { + _ struct{} `type:"structure"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s PutObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectOutput) GoString() string { + return s.String() +} + +// Container for specifying an configuration when you want Amazon S3 to publish +// events to an Amazon Simple Queue Service (Amazon SQS) queue. +type QueueConfiguration struct { + _ struct{} `type:"structure"` + + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects + // events of specified type. + QueueArn *string `locationName:"Queue" type:"string" required:"true"` +} + +// String returns the string representation +func (s QueueConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueueConfiguration) GoString() string { + return s.String() +} + +type QueueConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Bucket event for which to send notifications. + Event *string `type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + Queue *string `type:"string"` +} + +// String returns the string representation +func (s QueueConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueueConfigurationDeprecated) GoString() string { + return s.String() +} + +type Redirect struct { + _ struct{} `type:"structure"` + + // The host name to use in the redirect request. + HostName *string `type:"string"` + + // The HTTP redirect code to use on the response. Not required if one of the + // siblings is present. + HttpRedirectCode *string `type:"string"` + + // Protocol to use (http, https) when redirecting requests. The default is the + // protocol that is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` + + // The object key prefix to use in the redirect request. For example, to redirect + // requests for all pages with prefix docs/ (objects in the docs/ folder) to + // documents/, you can set a condition block with KeyPrefixEquals set to docs/ + // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required + // if one of the siblings is present. Can be present only if ReplaceKeyWith + // is not provided. + ReplaceKeyPrefixWith *string `type:"string"` + + // The specific object key to use in the redirect request. For example, redirect + // request to error.html. Not required if one of the sibling is present. Can + // be present only if ReplaceKeyPrefixWith is not provided. + ReplaceKeyWith *string `type:"string"` +} + +// String returns the string representation +func (s Redirect) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Redirect) GoString() string { + return s.String() +} + +type RedirectAllRequestsTo struct { + _ struct{} `type:"structure"` + + // Name of the host where requests will be redirected. + HostName *string `type:"string" required:"true"` + + // Protocol to use (http, https) when redirecting requests. The default is the + // protocol that is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` +} + +// String returns the string representation +func (s RedirectAllRequestsTo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedirectAllRequestsTo) GoString() string { + return s.String() +} + +// Container for replication rules. You can add as many as 1,000 rules. Total +// replication configuration size can be up to 2 MB. +type ReplicationConfiguration struct { + _ struct{} `type:"structure"` + + // Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating + // the objects. + Role *string `type:"string" required:"true"` + + // Container for information about a particular replication rule. Replication + // configuration must have at least one rule and can contain up to 1,000 rules. + Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s ReplicationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationConfiguration) GoString() string { + return s.String() +} + +type ReplicationRule struct { + _ struct{} `type:"structure"` + + Destination *Destination `type:"structure" required:"true"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Object keyname prefix identifying one or more objects to which the rule applies. + // Maximum prefix length can be up to 1,024 characters. Overlapping prefixes + // are not supported. + Prefix *string `type:"string" required:"true"` + + // The rule is ignored if status is not Enabled. + Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` +} + +// String returns the string representation +func (s ReplicationRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationRule) GoString() string { + return s.String() +} + +type RequestPaymentConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + Payer *string `type:"string" required:"true" enum:"Payer"` +} + +// String returns the string representation +func (s RequestPaymentConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestPaymentConfiguration) GoString() string { + return s.String() +} + +type RestoreObjectInput struct { + _ struct{} `type:"structure" payload:"RestoreRequest"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure"` + + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s RestoreObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectInput) GoString() string { + return s.String() +} + +type RestoreObjectOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s RestoreObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectOutput) GoString() string { + return s.String() +} + +type RestoreRequest struct { + _ struct{} `type:"structure"` + + // Lifetime of the active copy in days + Days *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s RestoreRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreRequest) GoString() string { + return s.String() +} + +type RoutingRule struct { + _ struct{} `type:"structure"` + + // A container for describing a condition that must be met for the specified + // redirect to apply. For example, 1. If request is for pages in the /docs folder, + // redirect to the /documents folder. 2. If request results in HTTP error 4xx, + // redirect request to another host where you might process the error. + Condition *Condition `type:"structure"` + + // Container for redirect information. You can redirect requests to another + // host, to another page, or with another protocol. In the event of an error, + // you can can specify a different error code to return. + Redirect *Redirect `type:"structure" required:"true"` +} + +// String returns the string representation +func (s RoutingRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoutingRule) GoString() string { + return s.String() +} + +type Rule struct { + _ struct{} `type:"structure"` + + Expiration *LifecycleExpiration `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + // Container for the transition rule that describes when noncurrent objects + // transition to the STANDARD_IA or GLACIER storage class. If your bucket is + // versioning-enabled (or versioning is suspended), you can set this action + // to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA + // or GLACIER storage class at a specific period in the object's lifetime. + NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` + + // Prefix identifying one or more objects to which the rule applies. + Prefix *string `type:"string" required:"true"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + Transition *Transition `type:"structure"` +} + +// String returns the string representation +func (s Rule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Rule) GoString() string { + return s.String() +} + +type Tag struct { + _ struct{} `type:"structure"` + + // Name of the tag. + Key *string `min:"1" type:"string" required:"true"` + + // Value of the tag. + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +type Tagging struct { + _ struct{} `type:"structure"` + + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s Tagging) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tagging) GoString() string { + return s.String() +} + +type TargetGrant struct { + _ struct{} `type:"structure"` + + Grantee *Grantee `type:"structure"` + + // Logging permissions assigned to the Grantee for the bucket. + Permission *string `type:"string" enum:"BucketLogsPermission"` +} + +// String returns the string representation +func (s TargetGrant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetGrant) GoString() string { + return s.String() +} + +// Container for specifying the configuration when you want Amazon S3 to publish +// events to an Amazon Simple Notification Service (Amazon SNS) topic. +type TopicConfiguration struct { + _ struct{} `type:"structure"` + + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects + // events of specified type. + TopicArn *string `locationName:"Topic" type:"string" required:"true"` +} + +// String returns the string representation +func (s TopicConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicConfiguration) GoString() string { + return s.String() +} + +type TopicConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Bucket event for which to send notifications. + Event *string `type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SNS topic to which Amazon S3 will publish a message to report the + // specified events for the bucket. + Topic *string `type:"string"` +} + +// String returns the string representation +func (s TopicConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicConfigurationDeprecated) GoString() string { + return s.String() +} + +type Transition struct { + _ struct{} `type:"structure"` + + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation +func (s Transition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Transition) GoString() string { + return s.String() +} + +type UploadPartCopyInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The name of the source bucket and key name of the source object, separated + // by a slash (/). Must be URL-encoded. + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + + // The range of bytes to copy from the source object. The range value must use + // the form bytes=first-last, where the first and last are the zero-based byte + // offsets to copy. For example, bytes=0-9 indicates that you want to copy the + // first ten bytes of the source. You can copy a range only if the source object + // is greater than 5 GB. + CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` + + // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being copied. This is a positive integer between 1 and + // 10,000. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being copied. + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadPartCopyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartCopyInput) GoString() string { + return s.String() +} + +type UploadPartCopyOutput struct { + _ struct{} `type:"structure" payload:"CopyPartResult"` + + CopyPartResult *CopyPartResult `type:"structure"` + + // The version of the source object that was copied, if you have enabled versioning + // on the source bucket. + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s UploadPartCopyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartCopyOutput) GoString() string { + return s.String() +} + +type UploadPartInput struct { + _ struct{} `type:"structure" payload:"Body"` + + Body io.ReadSeeker `type:"blob"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being uploaded. This is a positive integer between 1 + // and 10,000. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being uploaded. + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadPartInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartInput) GoString() string { + return s.String() +} + +type UploadPartOutput struct { + _ struct{} `type:"structure"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s UploadPartOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartOutput) GoString() string { + return s.String() +} + +type VersioningConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADelete"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation +func (s VersioningConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VersioningConfiguration) GoString() string { + return s.String() +} + +type WebsiteConfiguration struct { + _ struct{} `type:"structure"` + + ErrorDocument *ErrorDocument `type:"structure"` + + IndexDocument *IndexDocument `type:"structure"` + + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation +func (s WebsiteConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WebsiteConfiguration) GoString() string { + return s.String() +} + +const ( + // @enum BucketCannedACL + BucketCannedACLPrivate = "private" + // @enum BucketCannedACL + BucketCannedACLPublicRead = "public-read" + // @enum BucketCannedACL + BucketCannedACLPublicReadWrite = "public-read-write" + // @enum BucketCannedACL + BucketCannedACLAuthenticatedRead = "authenticated-read" +) + +const ( + // @enum BucketLocationConstraint + BucketLocationConstraintEu = "EU" + // @enum BucketLocationConstraint + BucketLocationConstraintEuWest1 = "eu-west-1" + // @enum BucketLocationConstraint + BucketLocationConstraintUsWest1 = "us-west-1" + // @enum BucketLocationConstraint + BucketLocationConstraintUsWest2 = "us-west-2" + // @enum BucketLocationConstraint + BucketLocationConstraintApSoutheast1 = "ap-southeast-1" + // @enum BucketLocationConstraint + BucketLocationConstraintApSoutheast2 = "ap-southeast-2" + // @enum BucketLocationConstraint + BucketLocationConstraintApNortheast1 = "ap-northeast-1" + // @enum BucketLocationConstraint + BucketLocationConstraintSaEast1 = "sa-east-1" + // @enum BucketLocationConstraint + BucketLocationConstraintCnNorth1 = "cn-north-1" + // @enum BucketLocationConstraint + BucketLocationConstraintEuCentral1 = "eu-central-1" +) + +const ( + // @enum BucketLogsPermission + BucketLogsPermissionFullControl = "FULL_CONTROL" + // @enum BucketLogsPermission + BucketLogsPermissionRead = "READ" + // @enum BucketLogsPermission + BucketLogsPermissionWrite = "WRITE" +) + +const ( + // @enum BucketVersioningStatus + BucketVersioningStatusEnabled = "Enabled" + // @enum BucketVersioningStatus + BucketVersioningStatusSuspended = "Suspended" +) + +// Requests Amazon S3 to encode the object keys in the response and specifies +// the encoding method to use. An object key may contain any Unicode character; +// however, XML 1.0 parser cannot parse some characters, such as characters +// with an ASCII value from 0 to 10. For characters that are not supported in +// XML 1.0, you can add this parameter to request that Amazon S3 encode the +// keys in the response. +const ( + // @enum EncodingType + EncodingTypeUrl = "url" +) + +// Bucket event for which to send notifications. +const ( + // @enum Event + EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" + // @enum Event + EventS3ObjectCreated = "s3:ObjectCreated:*" + // @enum Event + EventS3ObjectCreatedPut = "s3:ObjectCreated:Put" + // @enum Event + EventS3ObjectCreatedPost = "s3:ObjectCreated:Post" + // @enum Event + EventS3ObjectCreatedCopy = "s3:ObjectCreated:Copy" + // @enum Event + EventS3ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" + // @enum Event + EventS3ObjectRemoved = "s3:ObjectRemoved:*" + // @enum Event + EventS3ObjectRemovedDelete = "s3:ObjectRemoved:Delete" + // @enum Event + EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" +) + +const ( + // @enum ExpirationStatus + ExpirationStatusEnabled = "Enabled" + // @enum ExpirationStatus + ExpirationStatusDisabled = "Disabled" +) + +const ( + // @enum FilterRuleName + FilterRuleNamePrefix = "prefix" + // @enum FilterRuleName + FilterRuleNameSuffix = "suffix" +) + +const ( + // @enum MFADelete + MFADeleteEnabled = "Enabled" + // @enum MFADelete + MFADeleteDisabled = "Disabled" +) + +const ( + // @enum MFADeleteStatus + MFADeleteStatusEnabled = "Enabled" + // @enum MFADeleteStatus + MFADeleteStatusDisabled = "Disabled" +) + +const ( + // @enum MetadataDirective + MetadataDirectiveCopy = "COPY" + // @enum MetadataDirective + MetadataDirectiveReplace = "REPLACE" +) + +const ( + // @enum ObjectCannedACL + ObjectCannedACLPrivate = "private" + // @enum ObjectCannedACL + ObjectCannedACLPublicRead = "public-read" + // @enum ObjectCannedACL + ObjectCannedACLPublicReadWrite = "public-read-write" + // @enum ObjectCannedACL + ObjectCannedACLAuthenticatedRead = "authenticated-read" + // @enum ObjectCannedACL + ObjectCannedACLAwsExecRead = "aws-exec-read" + // @enum ObjectCannedACL + ObjectCannedACLBucketOwnerRead = "bucket-owner-read" + // @enum ObjectCannedACL + ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control" +) + +const ( + // @enum ObjectStorageClass + ObjectStorageClassStandard = "STANDARD" + // @enum ObjectStorageClass + ObjectStorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + // @enum ObjectStorageClass + ObjectStorageClassGlacier = "GLACIER" +) + +const ( + // @enum ObjectVersionStorageClass + ObjectVersionStorageClassStandard = "STANDARD" +) + +const ( + // @enum Payer + PayerRequester = "Requester" + // @enum Payer + PayerBucketOwner = "BucketOwner" +) + +const ( + // @enum Permission + PermissionFullControl = "FULL_CONTROL" + // @enum Permission + PermissionWrite = "WRITE" + // @enum Permission + PermissionWriteAcp = "WRITE_ACP" + // @enum Permission + PermissionRead = "READ" + // @enum Permission + PermissionReadAcp = "READ_ACP" +) + +const ( + // @enum Protocol + ProtocolHttp = "http" + // @enum Protocol + ProtocolHttps = "https" +) + +const ( + // @enum ReplicationRuleStatus + ReplicationRuleStatusEnabled = "Enabled" + // @enum ReplicationRuleStatus + ReplicationRuleStatusDisabled = "Disabled" +) + +const ( + // @enum ReplicationStatus + ReplicationStatusComplete = "COMPLETE" + // @enum ReplicationStatus + ReplicationStatusPending = "PENDING" + // @enum ReplicationStatus + ReplicationStatusFailed = "FAILED" + // @enum ReplicationStatus + ReplicationStatusReplica = "REPLICA" +) + +// If present, indicates that the requester was successfully charged for the +// request. +const ( + // @enum RequestCharged + RequestChargedRequester = "requester" +) + +// Confirms that the requester knows that she or he will be charged for the +// request. Bucket owners need not specify this parameter in their requests. +// Documentation on downloading objects from requester pays buckets can be found +// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html +const ( + // @enum RequestPayer + RequestPayerRequester = "requester" +) + +const ( + // @enum ServerSideEncryption + ServerSideEncryptionAes256 = "AES256" + // @enum ServerSideEncryption + ServerSideEncryptionAwsKms = "aws:kms" +) + +const ( + // @enum StorageClass + StorageClassStandard = "STANDARD" + // @enum StorageClass + StorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + // @enum StorageClass + StorageClassStandardIa = "STANDARD_IA" +) + +const ( + // @enum TransitionStorageClass + TransitionStorageClassGlacier = "GLACIER" + // @enum TransitionStorageClass + TransitionStorageClassStandardIa = "STANDARD_IA" +) + +const ( + // @enum Type + TypeCanonicalUser = "CanonicalUser" + // @enum Type + TypeAmazonCustomerByEmail = "AmazonCustomerByEmail" + // @enum Type + TypeGroup = "Group" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/bucket_location.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/bucket_location.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/bucket_location.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/bucket_location.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,43 @@ +package s3 + +import ( + "io/ioutil" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`) + +func buildGetBucketLocation(r *request.Request) { + if r.DataFilled() { + out := r.Data.(*GetBucketLocationOutput) + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed reading response body", err) + return + } + + match := reBucketLocation.FindSubmatch(b) + if len(match) > 1 { + loc := string(match[1]) + out.LocationConstraint = &loc + } + } +} + +func populateLocationConstraint(r *request.Request) { + if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" { + in := r.Params.(*CreateBucketInput) + if in.CreateBucketConfiguration == nil { + r.Params = awsutil.CopyOf(r.Params) + in = r.Params.(*CreateBucketInput) + in.CreateBucketConfiguration = &CreateBucketConfiguration{ + LocationConstraint: r.Config.Region, + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/bucket_location_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/bucket_location_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/bucket_location_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/bucket_location_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,78 @@ +package s3_test + +import ( + "bytes" + "io/ioutil" + "net/http" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/stretchr/testify/assert" +) + +var s3LocationTests = []struct { + body string + loc string +}{ + {``, ``}, + {`EU`, `EU`}, +} + +func TestGetBucketLocation(t *testing.T) { + for _, test := range s3LocationTests { + s := s3.New(unit.Session) + s.Handlers.Send.Clear() + s.Handlers.Send.PushBack(func(r *request.Request) { + reader := ioutil.NopCloser(bytes.NewReader([]byte(test.body))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: reader} + }) + + resp, err := s.GetBucketLocation(&s3.GetBucketLocationInput{Bucket: aws.String("bucket")}) + assert.NoError(t, err) + if test.loc == "" { + assert.Nil(t, resp.LocationConstraint) + } else { + assert.Equal(t, test.loc, *resp.LocationConstraint) + } + } +} + +func TestPopulateLocationConstraint(t *testing.T) { + s := s3.New(unit.Session) + in := &s3.CreateBucketInput{ + Bucket: aws.String("bucket"), + } + req, _ := s.CreateBucketRequest(in) + err := req.Build() + assert.NoError(t, err) + v, _ := awsutil.ValuesAtPath(req.Params, "CreateBucketConfiguration.LocationConstraint") + assert.Equal(t, "mock-region", *(v[0].(*string))) + assert.Nil(t, in.CreateBucketConfiguration) // don't modify original params +} + +func TestNoPopulateLocationConstraintIfProvided(t *testing.T) { + s := s3.New(unit.Session) + req, _ := s.CreateBucketRequest(&s3.CreateBucketInput{ + Bucket: aws.String("bucket"), + CreateBucketConfiguration: &s3.CreateBucketConfiguration{}, + }) + err := req.Build() + assert.NoError(t, err) + v, _ := awsutil.ValuesAtPath(req.Params, "CreateBucketConfiguration.LocationConstraint") + assert.Equal(t, 0, len(v)) +} + +func TestNoPopulateLocationConstraintIfClassic(t *testing.T) { + s := s3.New(unit.Session, &aws.Config{Region: aws.String("us-east-1")}) + req, _ := s.CreateBucketRequest(&s3.CreateBucketInput{ + Bucket: aws.String("bucket"), + }) + err := req.Build() + assert.NoError(t, err) + v, _ := awsutil.ValuesAtPath(req.Params, "CreateBucketConfiguration.LocationConstraint") + assert.Equal(t, 0, len(v)) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/content_md5.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/content_md5.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/content_md5.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/content_md5.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,36 @@ +package s3 + +import ( + "crypto/md5" + "encoding/base64" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// contentMD5 computes and sets the HTTP Content-MD5 header for requests that +// require it. +func contentMD5(r *request.Request) { + h := md5.New() + + // hash the body. seek back to the first position after reading to reset + // the body for transmission. copy errors may be assumed to be from the + // body. + _, err := io.Copy(h, r.Body) + if err != nil { + r.Error = awserr.New("ContentMD5", "failed to read body", err) + return + } + _, err = r.Body.Seek(0, 0) + if err != nil { + r.Error = awserr.New("ContentMD5", "failed to seek body", err) + return + } + + // encode the md5 checksum in base64 and set the request header. + sum := h.Sum(nil) + sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum))) + base64.StdEncoding.Encode(sum64, sum) + r.HTTPRequest.Header.Set("Content-MD5", string(sum64)) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/customizations.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/customizations.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/customizations.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/customizations.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,37 @@ +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" +) + +func init() { + initClient = func(c *client.Client) { + // Support building custom host-style bucket endpoints + c.Handlers.Build.PushFront(updateHostWithBucket) + + // Require SSL when using SSE keys + c.Handlers.Validate.PushBack(validateSSERequiresSSL) + c.Handlers.Build.PushBack(computeSSEKeys) + + // S3 uses custom error unmarshaling logic + c.Handlers.UnmarshalError.Clear() + c.Handlers.UnmarshalError.PushBack(unmarshalError) + } + + initRequest = func(r *request.Request) { + switch r.Operation.Name { + case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration: + // These S3 operations require Content-MD5 to be set + r.Handlers.Build.PushBack(contentMD5) + case opGetBucketLocation: + // GetBucketLocation has custom parsing logic + r.Handlers.Unmarshal.PushFront(buildGetBucketLocation) + case opCreateBucket: + // Auto-populate LocationConstraint with current region + r.Handlers.Validate.PushFront(populateLocationConstraint) + case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: + r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/customizations_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/customizations_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/customizations_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/customizations_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,105 @@ +package s3_test + +import ( + "crypto/md5" + "encoding/base64" + "io/ioutil" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/stretchr/testify/assert" +) + +func assertMD5(t *testing.T, req *request.Request) { + err := req.Build() + assert.NoError(t, err) + + b, _ := ioutil.ReadAll(req.HTTPRequest.Body) + out := md5.Sum(b) + assert.NotEmpty(t, b) + assert.Equal(t, base64.StdEncoding.EncodeToString(out[:]), req.HTTPRequest.Header.Get("Content-MD5")) +} + +func TestMD5InPutBucketCors(t *testing.T) { + svc := s3.New(unit.Session) + req, _ := svc.PutBucketCorsRequest(&s3.PutBucketCorsInput{ + Bucket: aws.String("bucketname"), + CORSConfiguration: &s3.CORSConfiguration{ + CORSRules: []*s3.CORSRule{ + { + AllowedMethods: []*string{aws.String("GET")}, + AllowedOrigins: []*string{aws.String("*")}, + }, + }, + }, + }) + assertMD5(t, req) +} + +func TestMD5InPutBucketLifecycle(t *testing.T) { + svc := s3.New(unit.Session) + req, _ := svc.PutBucketLifecycleRequest(&s3.PutBucketLifecycleInput{ + Bucket: aws.String("bucketname"), + LifecycleConfiguration: &s3.LifecycleConfiguration{ + Rules: []*s3.Rule{ + { + ID: aws.String("ID"), + Prefix: aws.String("Prefix"), + Status: aws.String("Enabled"), + }, + }, + }, + }) + assertMD5(t, req) +} + +func TestMD5InPutBucketPolicy(t *testing.T) { + svc := s3.New(unit.Session) + req, _ := svc.PutBucketPolicyRequest(&s3.PutBucketPolicyInput{ + Bucket: aws.String("bucketname"), + Policy: aws.String("{}"), + }) + assertMD5(t, req) +} + +func TestMD5InPutBucketTagging(t *testing.T) { + svc := s3.New(unit.Session) + req, _ := svc.PutBucketTaggingRequest(&s3.PutBucketTaggingInput{ + Bucket: aws.String("bucketname"), + Tagging: &s3.Tagging{ + TagSet: []*s3.Tag{ + {Key: aws.String("KEY"), Value: aws.String("VALUE")}, + }, + }, + }) + assertMD5(t, req) +} + +func TestMD5InDeleteObjects(t *testing.T) { + svc := s3.New(unit.Session) + req, _ := svc.DeleteObjectsRequest(&s3.DeleteObjectsInput{ + Bucket: aws.String("bucketname"), + Delete: &s3.Delete{ + Objects: []*s3.ObjectIdentifier{ + {Key: aws.String("key")}, + }, + }, + }) + assertMD5(t, req) +} + +func TestMD5InPutBucketLifecycleConfiguration(t *testing.T) { + svc := s3.New(unit.Session) + req, _ := svc.PutBucketLifecycleConfigurationRequest(&s3.PutBucketLifecycleConfigurationInput{ + Bucket: aws.String("bucketname"), + LifecycleConfiguration: &s3.BucketLifecycleConfiguration{ + Rules: []*s3.LifecycleRule{ + {Prefix: aws.String("prefix"), Status: aws.String(s3.ExpirationStatusEnabled)}, + }, + }, + }) + assertMD5(t, req) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1599 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package s3_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleS3_AbortMultipartUpload() { + svc := s3.New(session.New()) + + params := &s3.AbortMultipartUploadInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + UploadId: aws.String("MultipartUploadId"), // Required + RequestPayer: aws.String("RequestPayer"), + } + resp, err := svc.AbortMultipartUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_CompleteMultipartUpload() { + svc := s3.New(session.New()) + + params := &s3.CompleteMultipartUploadInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + UploadId: aws.String("MultipartUploadId"), // Required + MultipartUpload: &s3.CompletedMultipartUpload{ + Parts: []*s3.CompletedPart{ + { // Required + ETag: aws.String("ETag"), + PartNumber: aws.Int64(1), + }, + // More values... + }, + }, + RequestPayer: aws.String("RequestPayer"), + } + resp, err := svc.CompleteMultipartUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_CopyObject() { + svc := s3.New(session.New()) + + params := &s3.CopyObjectInput{ + Bucket: aws.String("BucketName"), // Required + CopySource: aws.String("CopySource"), // Required + Key: aws.String("ObjectKey"), // Required + ACL: aws.String("ObjectCannedACL"), + CacheControl: aws.String("CacheControl"), + ContentDisposition: aws.String("ContentDisposition"), + ContentEncoding: aws.String("ContentEncoding"), + ContentLanguage: aws.String("ContentLanguage"), + ContentType: aws.String("ContentType"), + CopySourceIfMatch: aws.String("CopySourceIfMatch"), + CopySourceIfModifiedSince: aws.Time(time.Now()), + CopySourceIfNoneMatch: aws.String("CopySourceIfNoneMatch"), + CopySourceIfUnmodifiedSince: aws.Time(time.Now()), + CopySourceSSECustomerAlgorithm: aws.String("CopySourceSSECustomerAlgorithm"), + CopySourceSSECustomerKey: aws.String("CopySourceSSECustomerKey"), + CopySourceSSECustomerKeyMD5: aws.String("CopySourceSSECustomerKeyMD5"), + Expires: aws.Time(time.Now()), + GrantFullControl: aws.String("GrantFullControl"), + GrantRead: aws.String("GrantRead"), + GrantReadACP: aws.String("GrantReadACP"), + GrantWriteACP: aws.String("GrantWriteACP"), + Metadata: map[string]*string{ + "Key": aws.String("MetadataValue"), // Required + // More values... + }, + MetadataDirective: aws.String("MetadataDirective"), + RequestPayer: aws.String("RequestPayer"), + SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), + SSECustomerKey: aws.String("SSECustomerKey"), + SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), + SSEKMSKeyId: aws.String("SSEKMSKeyId"), + ServerSideEncryption: aws.String("ServerSideEncryption"), + StorageClass: aws.String("StorageClass"), + WebsiteRedirectLocation: aws.String("WebsiteRedirectLocation"), + } + resp, err := svc.CopyObject(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_CreateBucket() { + svc := s3.New(session.New()) + + params := &s3.CreateBucketInput{ + Bucket: aws.String("BucketName"), // Required + ACL: aws.String("BucketCannedACL"), + CreateBucketConfiguration: &s3.CreateBucketConfiguration{ + LocationConstraint: aws.String("BucketLocationConstraint"), + }, + GrantFullControl: aws.String("GrantFullControl"), + GrantRead: aws.String("GrantRead"), + GrantReadACP: aws.String("GrantReadACP"), + GrantWrite: aws.String("GrantWrite"), + GrantWriteACP: aws.String("GrantWriteACP"), + } + resp, err := svc.CreateBucket(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_CreateMultipartUpload() { + svc := s3.New(session.New()) + + params := &s3.CreateMultipartUploadInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + ACL: aws.String("ObjectCannedACL"), + CacheControl: aws.String("CacheControl"), + ContentDisposition: aws.String("ContentDisposition"), + ContentEncoding: aws.String("ContentEncoding"), + ContentLanguage: aws.String("ContentLanguage"), + ContentType: aws.String("ContentType"), + Expires: aws.Time(time.Now()), + GrantFullControl: aws.String("GrantFullControl"), + GrantRead: aws.String("GrantRead"), + GrantReadACP: aws.String("GrantReadACP"), + GrantWriteACP: aws.String("GrantWriteACP"), + Metadata: map[string]*string{ + "Key": aws.String("MetadataValue"), // Required + // More values... + }, + RequestPayer: aws.String("RequestPayer"), + SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), + SSECustomerKey: aws.String("SSECustomerKey"), + SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), + SSEKMSKeyId: aws.String("SSEKMSKeyId"), + ServerSideEncryption: aws.String("ServerSideEncryption"), + StorageClass: aws.String("StorageClass"), + WebsiteRedirectLocation: aws.String("WebsiteRedirectLocation"), + } + resp, err := svc.CreateMultipartUpload(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_DeleteBucket() { + svc := s3.New(session.New()) + + params := &s3.DeleteBucketInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.DeleteBucket(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_DeleteBucketCors() { + svc := s3.New(session.New()) + + params := &s3.DeleteBucketCorsInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.DeleteBucketCors(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_DeleteBucketLifecycle() { + svc := s3.New(session.New()) + + params := &s3.DeleteBucketLifecycleInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.DeleteBucketLifecycle(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_DeleteBucketPolicy() { + svc := s3.New(session.New()) + + params := &s3.DeleteBucketPolicyInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.DeleteBucketPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_DeleteBucketReplication() { + svc := s3.New(session.New()) + + params := &s3.DeleteBucketReplicationInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.DeleteBucketReplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_DeleteBucketTagging() { + svc := s3.New(session.New()) + + params := &s3.DeleteBucketTaggingInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.DeleteBucketTagging(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_DeleteBucketWebsite() { + svc := s3.New(session.New()) + + params := &s3.DeleteBucketWebsiteInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.DeleteBucketWebsite(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_DeleteObject() { + svc := s3.New(session.New()) + + params := &s3.DeleteObjectInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + MFA: aws.String("MFA"), + RequestPayer: aws.String("RequestPayer"), + VersionId: aws.String("ObjectVersionId"), + } + resp, err := svc.DeleteObject(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_DeleteObjects() { + svc := s3.New(session.New()) + + params := &s3.DeleteObjectsInput{ + Bucket: aws.String("BucketName"), // Required + Delete: &s3.Delete{ // Required + Objects: []*s3.ObjectIdentifier{ // Required + { // Required + Key: aws.String("ObjectKey"), // Required + VersionId: aws.String("ObjectVersionId"), + }, + // More values... + }, + Quiet: aws.Bool(true), + }, + MFA: aws.String("MFA"), + RequestPayer: aws.String("RequestPayer"), + } + resp, err := svc.DeleteObjects(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketAcl() { + svc := s3.New(session.New()) + + params := &s3.GetBucketAclInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketAcl(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketCors() { + svc := s3.New(session.New()) + + params := &s3.GetBucketCorsInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketCors(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketLifecycle() { + svc := s3.New(session.New()) + + params := &s3.GetBucketLifecycleInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketLifecycle(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketLifecycleConfiguration() { + svc := s3.New(session.New()) + + params := &s3.GetBucketLifecycleConfigurationInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketLifecycleConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketLocation() { + svc := s3.New(session.New()) + + params := &s3.GetBucketLocationInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketLocation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketLogging() { + svc := s3.New(session.New()) + + params := &s3.GetBucketLoggingInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketLogging(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketNotification() { + svc := s3.New(session.New()) + + params := &s3.GetBucketNotificationConfigurationRequest{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketNotification(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketNotificationConfiguration() { + svc := s3.New(session.New()) + + params := &s3.GetBucketNotificationConfigurationRequest{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketNotificationConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketPolicy() { + svc := s3.New(session.New()) + + params := &s3.GetBucketPolicyInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketReplication() { + svc := s3.New(session.New()) + + params := &s3.GetBucketReplicationInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketReplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketRequestPayment() { + svc := s3.New(session.New()) + + params := &s3.GetBucketRequestPaymentInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketRequestPayment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketTagging() { + svc := s3.New(session.New()) + + params := &s3.GetBucketTaggingInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketTagging(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketVersioning() { + svc := s3.New(session.New()) + + params := &s3.GetBucketVersioningInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketVersioning(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetBucketWebsite() { + svc := s3.New(session.New()) + + params := &s3.GetBucketWebsiteInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.GetBucketWebsite(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetObject() { + svc := s3.New(session.New()) + + params := &s3.GetObjectInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + IfMatch: aws.String("IfMatch"), + IfModifiedSince: aws.Time(time.Now()), + IfNoneMatch: aws.String("IfNoneMatch"), + IfUnmodifiedSince: aws.Time(time.Now()), + Range: aws.String("Range"), + RequestPayer: aws.String("RequestPayer"), + ResponseCacheControl: aws.String("ResponseCacheControl"), + ResponseContentDisposition: aws.String("ResponseContentDisposition"), + ResponseContentEncoding: aws.String("ResponseContentEncoding"), + ResponseContentLanguage: aws.String("ResponseContentLanguage"), + ResponseContentType: aws.String("ResponseContentType"), + ResponseExpires: aws.Time(time.Now()), + SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), + SSECustomerKey: aws.String("SSECustomerKey"), + SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), + VersionId: aws.String("ObjectVersionId"), + } + resp, err := svc.GetObject(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetObjectAcl() { + svc := s3.New(session.New()) + + params := &s3.GetObjectAclInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + RequestPayer: aws.String("RequestPayer"), + VersionId: aws.String("ObjectVersionId"), + } + resp, err := svc.GetObjectAcl(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_GetObjectTorrent() { + svc := s3.New(session.New()) + + params := &s3.GetObjectTorrentInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + RequestPayer: aws.String("RequestPayer"), + } + resp, err := svc.GetObjectTorrent(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_HeadBucket() { + svc := s3.New(session.New()) + + params := &s3.HeadBucketInput{ + Bucket: aws.String("BucketName"), // Required + } + resp, err := svc.HeadBucket(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_HeadObject() { + svc := s3.New(session.New()) + + params := &s3.HeadObjectInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + IfMatch: aws.String("IfMatch"), + IfModifiedSince: aws.Time(time.Now()), + IfNoneMatch: aws.String("IfNoneMatch"), + IfUnmodifiedSince: aws.Time(time.Now()), + Range: aws.String("Range"), + RequestPayer: aws.String("RequestPayer"), + SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), + SSECustomerKey: aws.String("SSECustomerKey"), + SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), + VersionId: aws.String("ObjectVersionId"), + } + resp, err := svc.HeadObject(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_ListBuckets() { + svc := s3.New(session.New()) + + var params *s3.ListBucketsInput + resp, err := svc.ListBuckets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_ListMultipartUploads() { + svc := s3.New(session.New()) + + params := &s3.ListMultipartUploadsInput{ + Bucket: aws.String("BucketName"), // Required + Delimiter: aws.String("Delimiter"), + EncodingType: aws.String("EncodingType"), + KeyMarker: aws.String("KeyMarker"), + MaxUploads: aws.Int64(1), + Prefix: aws.String("Prefix"), + UploadIdMarker: aws.String("UploadIdMarker"), + } + resp, err := svc.ListMultipartUploads(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_ListObjectVersions() { + svc := s3.New(session.New()) + + params := &s3.ListObjectVersionsInput{ + Bucket: aws.String("BucketName"), // Required + Delimiter: aws.String("Delimiter"), + EncodingType: aws.String("EncodingType"), + KeyMarker: aws.String("KeyMarker"), + MaxKeys: aws.Int64(1), + Prefix: aws.String("Prefix"), + VersionIdMarker: aws.String("VersionIdMarker"), + } + resp, err := svc.ListObjectVersions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_ListObjects() { + svc := s3.New(session.New()) + + params := &s3.ListObjectsInput{ + Bucket: aws.String("BucketName"), // Required + Delimiter: aws.String("Delimiter"), + EncodingType: aws.String("EncodingType"), + Marker: aws.String("Marker"), + MaxKeys: aws.Int64(1), + Prefix: aws.String("Prefix"), + } + resp, err := svc.ListObjects(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_ListParts() { + svc := s3.New(session.New()) + + params := &s3.ListPartsInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + UploadId: aws.String("MultipartUploadId"), // Required + MaxParts: aws.Int64(1), + PartNumberMarker: aws.Int64(1), + RequestPayer: aws.String("RequestPayer"), + } + resp, err := svc.ListParts(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketAcl() { + svc := s3.New(session.New()) + + params := &s3.PutBucketAclInput{ + Bucket: aws.String("BucketName"), // Required + ACL: aws.String("BucketCannedACL"), + AccessControlPolicy: &s3.AccessControlPolicy{ + Grants: []*s3.Grant{ + { // Required + Grantee: &s3.Grantee{ + Type: aws.String("Type"), // Required + DisplayName: aws.String("DisplayName"), + EmailAddress: aws.String("EmailAddress"), + ID: aws.String("ID"), + URI: aws.String("URI"), + }, + Permission: aws.String("Permission"), + }, + // More values... + }, + Owner: &s3.Owner{ + DisplayName: aws.String("DisplayName"), + ID: aws.String("ID"), + }, + }, + GrantFullControl: aws.String("GrantFullControl"), + GrantRead: aws.String("GrantRead"), + GrantReadACP: aws.String("GrantReadACP"), + GrantWrite: aws.String("GrantWrite"), + GrantWriteACP: aws.String("GrantWriteACP"), + } + resp, err := svc.PutBucketAcl(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketCors() { + svc := s3.New(session.New()) + + params := &s3.PutBucketCorsInput{ + Bucket: aws.String("BucketName"), // Required + CORSConfiguration: &s3.CORSConfiguration{ // Required + CORSRules: []*s3.CORSRule{ // Required + { // Required + AllowedMethods: []*string{ // Required + aws.String("AllowedMethod"), // Required + // More values... + }, + AllowedOrigins: []*string{ // Required + aws.String("AllowedOrigin"), // Required + // More values... + }, + AllowedHeaders: []*string{ + aws.String("AllowedHeader"), // Required + // More values... + }, + ExposeHeaders: []*string{ + aws.String("ExposeHeader"), // Required + // More values... + }, + MaxAgeSeconds: aws.Int64(1), + }, + // More values... + }, + }, + } + resp, err := svc.PutBucketCors(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketLifecycle() { + svc := s3.New(session.New()) + + params := &s3.PutBucketLifecycleInput{ + Bucket: aws.String("BucketName"), // Required + LifecycleConfiguration: &s3.LifecycleConfiguration{ + Rules: []*s3.Rule{ // Required + { // Required + Prefix: aws.String("Prefix"), // Required + Status: aws.String("ExpirationStatus"), // Required + Expiration: &s3.LifecycleExpiration{ + Date: aws.Time(time.Now()), + Days: aws.Int64(1), + }, + ID: aws.String("ID"), + NoncurrentVersionExpiration: &s3.NoncurrentVersionExpiration{ + NoncurrentDays: aws.Int64(1), + }, + NoncurrentVersionTransition: &s3.NoncurrentVersionTransition{ + NoncurrentDays: aws.Int64(1), + StorageClass: aws.String("TransitionStorageClass"), + }, + Transition: &s3.Transition{ + Date: aws.Time(time.Now()), + Days: aws.Int64(1), + StorageClass: aws.String("TransitionStorageClass"), + }, + }, + // More values... + }, + }, + } + resp, err := svc.PutBucketLifecycle(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketLifecycleConfiguration() { + svc := s3.New(session.New()) + + params := &s3.PutBucketLifecycleConfigurationInput{ + Bucket: aws.String("BucketName"), // Required + LifecycleConfiguration: &s3.BucketLifecycleConfiguration{ + Rules: []*s3.LifecycleRule{ // Required + { // Required + Prefix: aws.String("Prefix"), // Required + Status: aws.String("ExpirationStatus"), // Required + Expiration: &s3.LifecycleExpiration{ + Date: aws.Time(time.Now()), + Days: aws.Int64(1), + }, + ID: aws.String("ID"), + NoncurrentVersionExpiration: &s3.NoncurrentVersionExpiration{ + NoncurrentDays: aws.Int64(1), + }, + NoncurrentVersionTransitions: []*s3.NoncurrentVersionTransition{ + { // Required + NoncurrentDays: aws.Int64(1), + StorageClass: aws.String("TransitionStorageClass"), + }, + // More values... + }, + Transitions: []*s3.Transition{ + { // Required + Date: aws.Time(time.Now()), + Days: aws.Int64(1), + StorageClass: aws.String("TransitionStorageClass"), + }, + // More values... + }, + }, + // More values... + }, + }, + } + resp, err := svc.PutBucketLifecycleConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketLogging() { + svc := s3.New(session.New()) + + params := &s3.PutBucketLoggingInput{ + Bucket: aws.String("BucketName"), // Required + BucketLoggingStatus: &s3.BucketLoggingStatus{ // Required + LoggingEnabled: &s3.LoggingEnabled{ + TargetBucket: aws.String("TargetBucket"), + TargetGrants: []*s3.TargetGrant{ + { // Required + Grantee: &s3.Grantee{ + Type: aws.String("Type"), // Required + DisplayName: aws.String("DisplayName"), + EmailAddress: aws.String("EmailAddress"), + ID: aws.String("ID"), + URI: aws.String("URI"), + }, + Permission: aws.String("BucketLogsPermission"), + }, + // More values... + }, + TargetPrefix: aws.String("TargetPrefix"), + }, + }, + } + resp, err := svc.PutBucketLogging(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketNotification() { + svc := s3.New(session.New()) + + params := &s3.PutBucketNotificationInput{ + Bucket: aws.String("BucketName"), // Required + NotificationConfiguration: &s3.NotificationConfigurationDeprecated{ // Required + CloudFunctionConfiguration: &s3.CloudFunctionConfiguration{ + CloudFunction: aws.String("CloudFunction"), + Event: aws.String("Event"), + Events: []*string{ + aws.String("Event"), // Required + // More values... + }, + Id: aws.String("NotificationId"), + InvocationRole: aws.String("CloudFunctionInvocationRole"), + }, + QueueConfiguration: &s3.QueueConfigurationDeprecated{ + Event: aws.String("Event"), + Events: []*string{ + aws.String("Event"), // Required + // More values... + }, + Id: aws.String("NotificationId"), + Queue: aws.String("QueueArn"), + }, + TopicConfiguration: &s3.TopicConfigurationDeprecated{ + Event: aws.String("Event"), + Events: []*string{ + aws.String("Event"), // Required + // More values... + }, + Id: aws.String("NotificationId"), + Topic: aws.String("TopicArn"), + }, + }, + } + resp, err := svc.PutBucketNotification(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketNotificationConfiguration() { + svc := s3.New(session.New()) + + params := &s3.PutBucketNotificationConfigurationInput{ + Bucket: aws.String("BucketName"), // Required + NotificationConfiguration: &s3.NotificationConfiguration{ // Required + LambdaFunctionConfigurations: []*s3.LambdaFunctionConfiguration{ + { // Required + Events: []*string{ // Required + aws.String("Event"), // Required + // More values... + }, + LambdaFunctionArn: aws.String("LambdaFunctionArn"), // Required + Filter: &s3.NotificationConfigurationFilter{ + Key: &s3.KeyFilter{ + FilterRules: []*s3.FilterRule{ + { // Required + Name: aws.String("FilterRuleName"), + Value: aws.String("FilterRuleValue"), + }, + // More values... + }, + }, + }, + Id: aws.String("NotificationId"), + }, + // More values... + }, + QueueConfigurations: []*s3.QueueConfiguration{ + { // Required + Events: []*string{ // Required + aws.String("Event"), // Required + // More values... + }, + QueueArn: aws.String("QueueArn"), // Required + Filter: &s3.NotificationConfigurationFilter{ + Key: &s3.KeyFilter{ + FilterRules: []*s3.FilterRule{ + { // Required + Name: aws.String("FilterRuleName"), + Value: aws.String("FilterRuleValue"), + }, + // More values... + }, + }, + }, + Id: aws.String("NotificationId"), + }, + // More values... + }, + TopicConfigurations: []*s3.TopicConfiguration{ + { // Required + Events: []*string{ // Required + aws.String("Event"), // Required + // More values... + }, + TopicArn: aws.String("TopicArn"), // Required + Filter: &s3.NotificationConfigurationFilter{ + Key: &s3.KeyFilter{ + FilterRules: []*s3.FilterRule{ + { // Required + Name: aws.String("FilterRuleName"), + Value: aws.String("FilterRuleValue"), + }, + // More values... + }, + }, + }, + Id: aws.String("NotificationId"), + }, + // More values... + }, + }, + } + resp, err := svc.PutBucketNotificationConfiguration(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketPolicy() { + svc := s3.New(session.New()) + + params := &s3.PutBucketPolicyInput{ + Bucket: aws.String("BucketName"), // Required + Policy: aws.String("Policy"), // Required + } + resp, err := svc.PutBucketPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketReplication() { + svc := s3.New(session.New()) + + params := &s3.PutBucketReplicationInput{ + Bucket: aws.String("BucketName"), // Required + ReplicationConfiguration: &s3.ReplicationConfiguration{ // Required + Role: aws.String("Role"), // Required + Rules: []*s3.ReplicationRule{ // Required + { // Required + Destination: &s3.Destination{ // Required + Bucket: aws.String("BucketName"), // Required + StorageClass: aws.String("StorageClass"), + }, + Prefix: aws.String("Prefix"), // Required + Status: aws.String("ReplicationRuleStatus"), // Required + ID: aws.String("ID"), + }, + // More values... + }, + }, + } + resp, err := svc.PutBucketReplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketRequestPayment() { + svc := s3.New(session.New()) + + params := &s3.PutBucketRequestPaymentInput{ + Bucket: aws.String("BucketName"), // Required + RequestPaymentConfiguration: &s3.RequestPaymentConfiguration{ // Required + Payer: aws.String("Payer"), // Required + }, + } + resp, err := svc.PutBucketRequestPayment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketTagging() { + svc := s3.New(session.New()) + + params := &s3.PutBucketTaggingInput{ + Bucket: aws.String("BucketName"), // Required + Tagging: &s3.Tagging{ // Required + TagSet: []*s3.Tag{ // Required + { // Required + Key: aws.String("ObjectKey"), // Required + Value: aws.String("Value"), // Required + }, + // More values... + }, + }, + } + resp, err := svc.PutBucketTagging(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketVersioning() { + svc := s3.New(session.New()) + + params := &s3.PutBucketVersioningInput{ + Bucket: aws.String("BucketName"), // Required + VersioningConfiguration: &s3.VersioningConfiguration{ // Required + MFADelete: aws.String("MFADelete"), + Status: aws.String("BucketVersioningStatus"), + }, + MFA: aws.String("MFA"), + } + resp, err := svc.PutBucketVersioning(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutBucketWebsite() { + svc := s3.New(session.New()) + + params := &s3.PutBucketWebsiteInput{ + Bucket: aws.String("BucketName"), // Required + WebsiteConfiguration: &s3.WebsiteConfiguration{ // Required + ErrorDocument: &s3.ErrorDocument{ + Key: aws.String("ObjectKey"), // Required + }, + IndexDocument: &s3.IndexDocument{ + Suffix: aws.String("Suffix"), // Required + }, + RedirectAllRequestsTo: &s3.RedirectAllRequestsTo{ + HostName: aws.String("HostName"), // Required + Protocol: aws.String("Protocol"), + }, + RoutingRules: []*s3.RoutingRule{ + { // Required + Redirect: &s3.Redirect{ // Required + HostName: aws.String("HostName"), + HttpRedirectCode: aws.String("HttpRedirectCode"), + Protocol: aws.String("Protocol"), + ReplaceKeyPrefixWith: aws.String("ReplaceKeyPrefixWith"), + ReplaceKeyWith: aws.String("ReplaceKeyWith"), + }, + Condition: &s3.Condition{ + HttpErrorCodeReturnedEquals: aws.String("HttpErrorCodeReturnedEquals"), + KeyPrefixEquals: aws.String("KeyPrefixEquals"), + }, + }, + // More values... + }, + }, + } + resp, err := svc.PutBucketWebsite(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutObject() { + svc := s3.New(session.New()) + + params := &s3.PutObjectInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + ACL: aws.String("ObjectCannedACL"), + Body: bytes.NewReader([]byte("PAYLOAD")), + CacheControl: aws.String("CacheControl"), + ContentDisposition: aws.String("ContentDisposition"), + ContentEncoding: aws.String("ContentEncoding"), + ContentLanguage: aws.String("ContentLanguage"), + ContentLength: aws.Int64(1), + ContentType: aws.String("ContentType"), + Expires: aws.Time(time.Now()), + GrantFullControl: aws.String("GrantFullControl"), + GrantRead: aws.String("GrantRead"), + GrantReadACP: aws.String("GrantReadACP"), + GrantWriteACP: aws.String("GrantWriteACP"), + Metadata: map[string]*string{ + "Key": aws.String("MetadataValue"), // Required + // More values... + }, + RequestPayer: aws.String("RequestPayer"), + SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), + SSECustomerKey: aws.String("SSECustomerKey"), + SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), + SSEKMSKeyId: aws.String("SSEKMSKeyId"), + ServerSideEncryption: aws.String("ServerSideEncryption"), + StorageClass: aws.String("StorageClass"), + WebsiteRedirectLocation: aws.String("WebsiteRedirectLocation"), + } + resp, err := svc.PutObject(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_PutObjectAcl() { + svc := s3.New(session.New()) + + params := &s3.PutObjectAclInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + ACL: aws.String("ObjectCannedACL"), + AccessControlPolicy: &s3.AccessControlPolicy{ + Grants: []*s3.Grant{ + { // Required + Grantee: &s3.Grantee{ + Type: aws.String("Type"), // Required + DisplayName: aws.String("DisplayName"), + EmailAddress: aws.String("EmailAddress"), + ID: aws.String("ID"), + URI: aws.String("URI"), + }, + Permission: aws.String("Permission"), + }, + // More values... + }, + Owner: &s3.Owner{ + DisplayName: aws.String("DisplayName"), + ID: aws.String("ID"), + }, + }, + GrantFullControl: aws.String("GrantFullControl"), + GrantRead: aws.String("GrantRead"), + GrantReadACP: aws.String("GrantReadACP"), + GrantWrite: aws.String("GrantWrite"), + GrantWriteACP: aws.String("GrantWriteACP"), + RequestPayer: aws.String("RequestPayer"), + } + resp, err := svc.PutObjectAcl(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_RestoreObject() { + svc := s3.New(session.New()) + + params := &s3.RestoreObjectInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + RequestPayer: aws.String("RequestPayer"), + RestoreRequest: &s3.RestoreRequest{ + Days: aws.Int64(1), // Required + }, + VersionId: aws.String("ObjectVersionId"), + } + resp, err := svc.RestoreObject(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_UploadPart() { + svc := s3.New(session.New()) + + params := &s3.UploadPartInput{ + Bucket: aws.String("BucketName"), // Required + Key: aws.String("ObjectKey"), // Required + PartNumber: aws.Int64(1), // Required + UploadId: aws.String("MultipartUploadId"), // Required + Body: bytes.NewReader([]byte("PAYLOAD")), + ContentLength: aws.Int64(1), + RequestPayer: aws.String("RequestPayer"), + SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), + SSECustomerKey: aws.String("SSECustomerKey"), + SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), + } + resp, err := svc.UploadPart(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleS3_UploadPartCopy() { + svc := s3.New(session.New()) + + params := &s3.UploadPartCopyInput{ + Bucket: aws.String("BucketName"), // Required + CopySource: aws.String("CopySource"), // Required + Key: aws.String("ObjectKey"), // Required + PartNumber: aws.Int64(1), // Required + UploadId: aws.String("MultipartUploadId"), // Required + CopySourceIfMatch: aws.String("CopySourceIfMatch"), + CopySourceIfModifiedSince: aws.Time(time.Now()), + CopySourceIfNoneMatch: aws.String("CopySourceIfNoneMatch"), + CopySourceIfUnmodifiedSince: aws.Time(time.Now()), + CopySourceRange: aws.String("CopySourceRange"), + CopySourceSSECustomerAlgorithm: aws.String("CopySourceSSECustomerAlgorithm"), + CopySourceSSECustomerKey: aws.String("CopySourceSSECustomerKey"), + CopySourceSSECustomerKeyMD5: aws.String("CopySourceSSECustomerKeyMD5"), + RequestPayer: aws.String("RequestPayer"), + SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), + SSECustomerKey: aws.String("SSECustomerKey"), + SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), + } + resp, err := svc.UploadPartCopy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,60 @@ +package s3 + +import ( + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) +var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) + +// dnsCompatibleBucketName returns true if the bucket name is DNS compatible. +// Buckets created outside of the classic region MUST be DNS compatible. +func dnsCompatibleBucketName(bucket string) bool { + return reDomain.MatchString(bucket) && + !reIPAddress.MatchString(bucket) && + !strings.Contains(bucket, "..") +} + +// hostStyleBucketName returns true if the request should put the bucket in +// the host. This is false if S3ForcePathStyle is explicitly set or if the +// bucket is not DNS compatible. +func hostStyleBucketName(r *request.Request, bucket string) bool { + if aws.BoolValue(r.Config.S3ForcePathStyle) { + return false + } + + // Bucket might be DNS compatible but dots in the hostname will fail + // certificate validation, so do not use host-style. + if r.HTTPRequest.URL.Scheme == "https" && strings.Contains(bucket, ".") { + return false + } + + // GetBucketLocation should be able to be called from any region within + // a partition, and return the associated region of the bucket. + if r.Operation.Name == opGetBucketLocation { + return false + } + + // Use host-style if the bucket is DNS compatible + return dnsCompatibleBucketName(bucket) +} + +func updateHostWithBucket(r *request.Request) { + b, _ := awsutil.ValuesAtPath(r.Params, "Bucket") + if len(b) == 0 { + return + } + + if bucket := b[0].(*string); aws.StringValue(bucket) != "" && hostStyleBucketName(r, *bucket) { + r.HTTPRequest.URL.Host = *bucket + "." + r.HTTPRequest.URL.Host + r.HTTPRequest.URL.Path = strings.Replace(r.HTTPRequest.URL.Path, "/{Bucket}", "", -1) + if r.HTTPRequest.URL.Path == "" { + r.HTTPRequest.URL.Path = "/" + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,75 @@ +package s3_test + +import ( + "net/url" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" +) + +type s3BucketTest struct { + bucket string + url string +} + +var ( + sslTests = []s3BucketTest{ + {"abc", "https://abc.s3.mock-region.amazonaws.com/"}, + {"a$b$c", "https://s3.mock-region.amazonaws.com/a%24b%24c"}, + {"a.b.c", "https://s3.mock-region.amazonaws.com/a.b.c"}, + {"a..bc", "https://s3.mock-region.amazonaws.com/a..bc"}, + } + + nosslTests = []s3BucketTest{ + {"a.b.c", "http://a.b.c.s3.mock-region.amazonaws.com/"}, + {"a..bc", "http://s3.mock-region.amazonaws.com/a..bc"}, + } + + forcepathTests = []s3BucketTest{ + {"abc", "https://s3.mock-region.amazonaws.com/abc"}, + {"a$b$c", "https://s3.mock-region.amazonaws.com/a%24b%24c"}, + {"a.b.c", "https://s3.mock-region.amazonaws.com/a.b.c"}, + {"a..bc", "https://s3.mock-region.amazonaws.com/a..bc"}, + } +) + +func runTests(t *testing.T, svc *s3.S3, tests []s3BucketTest) { + for _, test := range tests { + req, _ := svc.ListObjectsRequest(&s3.ListObjectsInput{Bucket: &test.bucket}) + req.Build() + assert.Equal(t, test.url, req.HTTPRequest.URL.String()) + } +} + +func TestHostStyleBucketBuild(t *testing.T) { + s := s3.New(unit.Session) + runTests(t, s, sslTests) +} + +func TestHostStyleBucketBuildNoSSL(t *testing.T) { + s := s3.New(unit.Session, &aws.Config{DisableSSL: aws.Bool(true)}) + runTests(t, s, nosslTests) +} + +func TestPathStyleBucketBuild(t *testing.T) { + s := s3.New(unit.Session, &aws.Config{S3ForcePathStyle: aws.Bool(true)}) + runTests(t, s, forcepathTests) +} + +func TestHostStyleBucketGetBucketLocation(t *testing.T) { + s := s3.New(unit.Session) + req, _ := s.GetBucketLocationRequest(&s3.GetBucketLocationInput{ + Bucket: aws.String("bucket"), + }) + + req.Build() + require.NoError(t, req.Error) + u, _ := url.Parse(req.HTTPRequest.URL.String()) + assert.NotContains(t, u.Host, "bucket") + assert.Contains(t, u.Path, "bucket") +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,246 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package s3iface provides an interface for the Amazon Simple Storage Service. +package s3iface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" +) + +// S3API is the interface type for s3.S3. +type S3API interface { + AbortMultipartUploadRequest(*s3.AbortMultipartUploadInput) (*request.Request, *s3.AbortMultipartUploadOutput) + + AbortMultipartUpload(*s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) + + CompleteMultipartUploadRequest(*s3.CompleteMultipartUploadInput) (*request.Request, *s3.CompleteMultipartUploadOutput) + + CompleteMultipartUpload(*s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) + + CopyObjectRequest(*s3.CopyObjectInput) (*request.Request, *s3.CopyObjectOutput) + + CopyObject(*s3.CopyObjectInput) (*s3.CopyObjectOutput, error) + + CreateBucketRequest(*s3.CreateBucketInput) (*request.Request, *s3.CreateBucketOutput) + + CreateBucket(*s3.CreateBucketInput) (*s3.CreateBucketOutput, error) + + CreateMultipartUploadRequest(*s3.CreateMultipartUploadInput) (*request.Request, *s3.CreateMultipartUploadOutput) + + CreateMultipartUpload(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) + + DeleteBucketRequest(*s3.DeleteBucketInput) (*request.Request, *s3.DeleteBucketOutput) + + DeleteBucket(*s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error) + + DeleteBucketCorsRequest(*s3.DeleteBucketCorsInput) (*request.Request, *s3.DeleteBucketCorsOutput) + + DeleteBucketCors(*s3.DeleteBucketCorsInput) (*s3.DeleteBucketCorsOutput, error) + + DeleteBucketLifecycleRequest(*s3.DeleteBucketLifecycleInput) (*request.Request, *s3.DeleteBucketLifecycleOutput) + + DeleteBucketLifecycle(*s3.DeleteBucketLifecycleInput) (*s3.DeleteBucketLifecycleOutput, error) + + DeleteBucketPolicyRequest(*s3.DeleteBucketPolicyInput) (*request.Request, *s3.DeleteBucketPolicyOutput) + + DeleteBucketPolicy(*s3.DeleteBucketPolicyInput) (*s3.DeleteBucketPolicyOutput, error) + + DeleteBucketReplicationRequest(*s3.DeleteBucketReplicationInput) (*request.Request, *s3.DeleteBucketReplicationOutput) + + DeleteBucketReplication(*s3.DeleteBucketReplicationInput) (*s3.DeleteBucketReplicationOutput, error) + + DeleteBucketTaggingRequest(*s3.DeleteBucketTaggingInput) (*request.Request, *s3.DeleteBucketTaggingOutput) + + DeleteBucketTagging(*s3.DeleteBucketTaggingInput) (*s3.DeleteBucketTaggingOutput, error) + + DeleteBucketWebsiteRequest(*s3.DeleteBucketWebsiteInput) (*request.Request, *s3.DeleteBucketWebsiteOutput) + + DeleteBucketWebsite(*s3.DeleteBucketWebsiteInput) (*s3.DeleteBucketWebsiteOutput, error) + + DeleteObjectRequest(*s3.DeleteObjectInput) (*request.Request, *s3.DeleteObjectOutput) + + DeleteObject(*s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) + + DeleteObjectsRequest(*s3.DeleteObjectsInput) (*request.Request, *s3.DeleteObjectsOutput) + + DeleteObjects(*s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error) + + GetBucketAclRequest(*s3.GetBucketAclInput) (*request.Request, *s3.GetBucketAclOutput) + + GetBucketAcl(*s3.GetBucketAclInput) (*s3.GetBucketAclOutput, error) + + GetBucketCorsRequest(*s3.GetBucketCorsInput) (*request.Request, *s3.GetBucketCorsOutput) + + GetBucketCors(*s3.GetBucketCorsInput) (*s3.GetBucketCorsOutput, error) + + GetBucketLifecycleRequest(*s3.GetBucketLifecycleInput) (*request.Request, *s3.GetBucketLifecycleOutput) + + GetBucketLifecycle(*s3.GetBucketLifecycleInput) (*s3.GetBucketLifecycleOutput, error) + + GetBucketLifecycleConfigurationRequest(*s3.GetBucketLifecycleConfigurationInput) (*request.Request, *s3.GetBucketLifecycleConfigurationOutput) + + GetBucketLifecycleConfiguration(*s3.GetBucketLifecycleConfigurationInput) (*s3.GetBucketLifecycleConfigurationOutput, error) + + GetBucketLocationRequest(*s3.GetBucketLocationInput) (*request.Request, *s3.GetBucketLocationOutput) + + GetBucketLocation(*s3.GetBucketLocationInput) (*s3.GetBucketLocationOutput, error) + + GetBucketLoggingRequest(*s3.GetBucketLoggingInput) (*request.Request, *s3.GetBucketLoggingOutput) + + GetBucketLogging(*s3.GetBucketLoggingInput) (*s3.GetBucketLoggingOutput, error) + + GetBucketNotificationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfigurationDeprecated) + + GetBucketNotification(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfigurationDeprecated, error) + + GetBucketNotificationConfigurationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfiguration) + + GetBucketNotificationConfiguration(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfiguration, error) + + GetBucketPolicyRequest(*s3.GetBucketPolicyInput) (*request.Request, *s3.GetBucketPolicyOutput) + + GetBucketPolicy(*s3.GetBucketPolicyInput) (*s3.GetBucketPolicyOutput, error) + + GetBucketReplicationRequest(*s3.GetBucketReplicationInput) (*request.Request, *s3.GetBucketReplicationOutput) + + GetBucketReplication(*s3.GetBucketReplicationInput) (*s3.GetBucketReplicationOutput, error) + + GetBucketRequestPaymentRequest(*s3.GetBucketRequestPaymentInput) (*request.Request, *s3.GetBucketRequestPaymentOutput) + + GetBucketRequestPayment(*s3.GetBucketRequestPaymentInput) (*s3.GetBucketRequestPaymentOutput, error) + + GetBucketTaggingRequest(*s3.GetBucketTaggingInput) (*request.Request, *s3.GetBucketTaggingOutput) + + GetBucketTagging(*s3.GetBucketTaggingInput) (*s3.GetBucketTaggingOutput, error) + + GetBucketVersioningRequest(*s3.GetBucketVersioningInput) (*request.Request, *s3.GetBucketVersioningOutput) + + GetBucketVersioning(*s3.GetBucketVersioningInput) (*s3.GetBucketVersioningOutput, error) + + GetBucketWebsiteRequest(*s3.GetBucketWebsiteInput) (*request.Request, *s3.GetBucketWebsiteOutput) + + GetBucketWebsite(*s3.GetBucketWebsiteInput) (*s3.GetBucketWebsiteOutput, error) + + GetObjectRequest(*s3.GetObjectInput) (*request.Request, *s3.GetObjectOutput) + + GetObject(*s3.GetObjectInput) (*s3.GetObjectOutput, error) + + GetObjectAclRequest(*s3.GetObjectAclInput) (*request.Request, *s3.GetObjectAclOutput) + + GetObjectAcl(*s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) + + GetObjectTorrentRequest(*s3.GetObjectTorrentInput) (*request.Request, *s3.GetObjectTorrentOutput) + + GetObjectTorrent(*s3.GetObjectTorrentInput) (*s3.GetObjectTorrentOutput, error) + + HeadBucketRequest(*s3.HeadBucketInput) (*request.Request, *s3.HeadBucketOutput) + + HeadBucket(*s3.HeadBucketInput) (*s3.HeadBucketOutput, error) + + HeadObjectRequest(*s3.HeadObjectInput) (*request.Request, *s3.HeadObjectOutput) + + HeadObject(*s3.HeadObjectInput) (*s3.HeadObjectOutput, error) + + ListBucketsRequest(*s3.ListBucketsInput) (*request.Request, *s3.ListBucketsOutput) + + ListBuckets(*s3.ListBucketsInput) (*s3.ListBucketsOutput, error) + + ListMultipartUploadsRequest(*s3.ListMultipartUploadsInput) (*request.Request, *s3.ListMultipartUploadsOutput) + + ListMultipartUploads(*s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) + + ListMultipartUploadsPages(*s3.ListMultipartUploadsInput, func(*s3.ListMultipartUploadsOutput, bool) bool) error + + ListObjectVersionsRequest(*s3.ListObjectVersionsInput) (*request.Request, *s3.ListObjectVersionsOutput) + + ListObjectVersions(*s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error) + + ListObjectVersionsPages(*s3.ListObjectVersionsInput, func(*s3.ListObjectVersionsOutput, bool) bool) error + + ListObjectsRequest(*s3.ListObjectsInput) (*request.Request, *s3.ListObjectsOutput) + + ListObjects(*s3.ListObjectsInput) (*s3.ListObjectsOutput, error) + + ListObjectsPages(*s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool) error + + ListPartsRequest(*s3.ListPartsInput) (*request.Request, *s3.ListPartsOutput) + + ListParts(*s3.ListPartsInput) (*s3.ListPartsOutput, error) + + ListPartsPages(*s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool) error + + PutBucketAclRequest(*s3.PutBucketAclInput) (*request.Request, *s3.PutBucketAclOutput) + + PutBucketAcl(*s3.PutBucketAclInput) (*s3.PutBucketAclOutput, error) + + PutBucketCorsRequest(*s3.PutBucketCorsInput) (*request.Request, *s3.PutBucketCorsOutput) + + PutBucketCors(*s3.PutBucketCorsInput) (*s3.PutBucketCorsOutput, error) + + PutBucketLifecycleRequest(*s3.PutBucketLifecycleInput) (*request.Request, *s3.PutBucketLifecycleOutput) + + PutBucketLifecycle(*s3.PutBucketLifecycleInput) (*s3.PutBucketLifecycleOutput, error) + + PutBucketLifecycleConfigurationRequest(*s3.PutBucketLifecycleConfigurationInput) (*request.Request, *s3.PutBucketLifecycleConfigurationOutput) + + PutBucketLifecycleConfiguration(*s3.PutBucketLifecycleConfigurationInput) (*s3.PutBucketLifecycleConfigurationOutput, error) + + PutBucketLoggingRequest(*s3.PutBucketLoggingInput) (*request.Request, *s3.PutBucketLoggingOutput) + + PutBucketLogging(*s3.PutBucketLoggingInput) (*s3.PutBucketLoggingOutput, error) + + PutBucketNotificationRequest(*s3.PutBucketNotificationInput) (*request.Request, *s3.PutBucketNotificationOutput) + + PutBucketNotification(*s3.PutBucketNotificationInput) (*s3.PutBucketNotificationOutput, error) + + PutBucketNotificationConfigurationRequest(*s3.PutBucketNotificationConfigurationInput) (*request.Request, *s3.PutBucketNotificationConfigurationOutput) + + PutBucketNotificationConfiguration(*s3.PutBucketNotificationConfigurationInput) (*s3.PutBucketNotificationConfigurationOutput, error) + + PutBucketPolicyRequest(*s3.PutBucketPolicyInput) (*request.Request, *s3.PutBucketPolicyOutput) + + PutBucketPolicy(*s3.PutBucketPolicyInput) (*s3.PutBucketPolicyOutput, error) + + PutBucketReplicationRequest(*s3.PutBucketReplicationInput) (*request.Request, *s3.PutBucketReplicationOutput) + + PutBucketReplication(*s3.PutBucketReplicationInput) (*s3.PutBucketReplicationOutput, error) + + PutBucketRequestPaymentRequest(*s3.PutBucketRequestPaymentInput) (*request.Request, *s3.PutBucketRequestPaymentOutput) + + PutBucketRequestPayment(*s3.PutBucketRequestPaymentInput) (*s3.PutBucketRequestPaymentOutput, error) + + PutBucketTaggingRequest(*s3.PutBucketTaggingInput) (*request.Request, *s3.PutBucketTaggingOutput) + + PutBucketTagging(*s3.PutBucketTaggingInput) (*s3.PutBucketTaggingOutput, error) + + PutBucketVersioningRequest(*s3.PutBucketVersioningInput) (*request.Request, *s3.PutBucketVersioningOutput) + + PutBucketVersioning(*s3.PutBucketVersioningInput) (*s3.PutBucketVersioningOutput, error) + + PutBucketWebsiteRequest(*s3.PutBucketWebsiteInput) (*request.Request, *s3.PutBucketWebsiteOutput) + + PutBucketWebsite(*s3.PutBucketWebsiteInput) (*s3.PutBucketWebsiteOutput, error) + + PutObjectRequest(*s3.PutObjectInput) (*request.Request, *s3.PutObjectOutput) + + PutObject(*s3.PutObjectInput) (*s3.PutObjectOutput, error) + + PutObjectAclRequest(*s3.PutObjectAclInput) (*request.Request, *s3.PutObjectAclOutput) + + PutObjectAcl(*s3.PutObjectAclInput) (*s3.PutObjectAclOutput, error) + + RestoreObjectRequest(*s3.RestoreObjectInput) (*request.Request, *s3.RestoreObjectOutput) + + RestoreObject(*s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error) + + UploadPartRequest(*s3.UploadPartInput) (*request.Request, *s3.UploadPartOutput) + + UploadPart(*s3.UploadPartInput) (*s3.UploadPartOutput, error) + + UploadPartCopyRequest(*s3.UploadPartCopyInput) (*request.Request, *s3.UploadPartCopyOutput) + + UploadPartCopy(*s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error) +} + +var _ S3API = (*s3.S3)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,3 @@ +// Package s3manager provides utilities to upload and download objects from +// S3 concurrently. Helpful for when working with large objects. +package s3manager diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,354 @@ +package s3manager + +import ( + "fmt" + "io" + "net/http" + "strconv" + "strings" + "sync" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" +) + +// DefaultDownloadPartSize is the default range of bytes to get at a time when +// using Download(). +const DefaultDownloadPartSize = 1024 * 1024 * 5 + +// DefaultDownloadConcurrency is the default number of goroutines to spin up +// when using Download(). +const DefaultDownloadConcurrency = 5 + +// The Downloader structure that calls Download(). It is safe to call Download() +// on this structure for multiple objects and across concurrent goroutines. +// Mutating the Downloader's properties is not safe to be done concurrently. +type Downloader struct { + // The buffer size (in bytes) to use when buffering data into chunks and + // sending them as parts to S3. The minimum allowed part size is 5MB, and + // if this value is set to zero, the DefaultPartSize value will be used. + PartSize int64 + + // The number of goroutines to spin up in parallel when sending parts. + // If this is set to zero, the DefaultConcurrency value will be used. + Concurrency int + + // An S3 client to use when performing downloads. + S3 s3iface.S3API +} + +// NewDownloader creates a new Downloader instance to downloads objects from +// S3 in concurrent chunks. Pass in additional functional options to customize +// the downloader behavior. Requires a client.ConfigProvider in order to create +// a S3 service client. The session.Session satisfies the client.ConfigProvider +// interface. +// +// Example: +// // The session the S3 Downloader will use +// sess := session.New() +// +// // Create a downloader with the session and default options +// downloader := s3manager.NewDownloader(sess) +// +// // Create a downloader with the session and custom options +// downloader := s3manager.NewDownloader(sess, func(d *s3manager.Uploader) { +// d.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downloader { + d := &Downloader{ + S3: s3.New(c), + PartSize: DefaultDownloadPartSize, + Concurrency: DefaultDownloadConcurrency, + } + for _, option := range options { + option(d) + } + + return d +} + +// NewDownloaderWithClient creates a new Downloader instance to downloads +// objects from S3 in concurrent chunks. Pass in additional functional +// options to customize the downloader behavior. Requires a S3 service client +// to make S3 API calls. +// +// Example: +// // The S3 client the S3 Downloader will use +// s3Svc := s3.new(session.New()) +// +// // Create a downloader with the s3 client and default options +// downloader := s3manager.NewDownloaderWithClient(s3Svc) +// +// // Create a downloader with the s3 client and custom options +// downloader := s3manager.NewDownloaderWithClient(s3Svc, func(d *s3manager.Uploader) { +// d.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *Downloader { + d := &Downloader{ + S3: svc, + PartSize: DefaultDownloadPartSize, + Concurrency: DefaultDownloadConcurrency, + } + for _, option := range options { + option(d) + } + + return d +} + +// Download downloads an object in S3 and writes the payload into w using +// concurrent GET requests. +// +// Additional functional options can be provided to configure the individual +// upload. These options are copies of the Uploader instance Upload is called from. +// Modifying the options will not impact the original Uploader instance. +// +// It is safe to call this method concurrently across goroutines. +// +// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent +// downloads, or in memory []byte wrapper using aws.WriteAtBuffer. +func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) { + impl := downloader{w: w, in: input, ctx: d} + + for _, option := range options { + option(&impl.ctx) + } + + return impl.download() +} + +// downloader is the implementation structure used internally by Downloader. +type downloader struct { + ctx Downloader + + in *s3.GetObjectInput + w io.WriterAt + + wg sync.WaitGroup + m sync.Mutex + + pos int64 + totalBytes int64 + written int64 + err error +} + +// init initializes the downloader with default options. +func (d *downloader) init() { + d.totalBytes = -1 + + if d.ctx.Concurrency == 0 { + d.ctx.Concurrency = DefaultDownloadConcurrency + } + + if d.ctx.PartSize == 0 { + d.ctx.PartSize = DefaultDownloadPartSize + } +} + +// download performs the implementation of the object download across ranged +// GETs. +func (d *downloader) download() (n int64, err error) { + d.init() + + // Spin off first worker to check additional header information + d.getChunk() + + if total := d.getTotalBytes(); total >= 0 { + // Spin up workers + ch := make(chan dlchunk, d.ctx.Concurrency) + + for i := 0; i < d.ctx.Concurrency; i++ { + d.wg.Add(1) + go d.downloadPart(ch) + } + + // Assign work + for d.getErr() == nil { + if d.pos >= total { + break // We're finished queueing chunks + } + + // Queue the next range of bytes to read. + ch <- dlchunk{w: d.w, start: d.pos, size: d.ctx.PartSize} + d.pos += d.ctx.PartSize + } + + // Wait for completion + close(ch) + d.wg.Wait() + } else { + // Checking if we read anything new + for d.err == nil { + d.getChunk() + } + + // We expect a 416 error letting us know we are done downloading the + // total bytes. Since we do not know the content's length, this will + // keep grabbing chunks of data until the range of bytes specified in + // the request is out of range of the content. Once, this happens, a + // 416 should occur. + e, ok := d.err.(awserr.RequestFailure) + if ok && e.StatusCode() == http.StatusRequestedRangeNotSatisfiable { + d.err = nil + } + } + + // Return error + return d.written, d.err +} + +// downloadPart is an individual goroutine worker reading from the ch channel +// and performing a GetObject request on the data with a given byte range. +// +// If this is the first worker, this operation also resolves the total number +// of bytes to be read so that the worker manager knows when it is finished. +func (d *downloader) downloadPart(ch chan dlchunk) { + defer d.wg.Done() + for { + chunk, ok := <-ch + if !ok { + break + } + d.downloadChunk(chunk) + } +} + +// getChunk grabs a chunk of data from the body. +// Not thread safe. Should only used when grabbing data on a single thread. +func (d *downloader) getChunk() { + chunk := dlchunk{w: d.w, start: d.pos, size: d.ctx.PartSize} + d.pos += d.ctx.PartSize + d.downloadChunk(chunk) +} + +// downloadChunk downloads the chunk froom s3 +func (d *downloader) downloadChunk(chunk dlchunk) { + if d.getErr() != nil { + return + } + // Get the next byte range of data + in := &s3.GetObjectInput{} + awsutil.Copy(in, d.in) + rng := fmt.Sprintf("bytes=%d-%d", + chunk.start, chunk.start+chunk.size-1) + in.Range = &rng + + req, resp := d.ctx.S3.GetObjectRequest(in) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + err := req.Send() + + if err != nil { + d.setErr(err) + } else { + d.setTotalBytes(resp) // Set total if not yet set. + + n, err := io.Copy(&chunk, resp.Body) + resp.Body.Close() + + if err != nil { + d.setErr(err) + } + d.incrWritten(n) + } +} + +// getTotalBytes is a thread-safe getter for retrieving the total byte status. +func (d *downloader) getTotalBytes() int64 { + d.m.Lock() + defer d.m.Unlock() + + return d.totalBytes +} + +// setTotalBytes is a thread-safe setter for setting the total byte status. +// Will extract the object's total bytes from the Content-Range if the file +// will be chunked, or Content-Length. Content-Length is used when the response +// does not include a Content-Range. Meaning the object was not chunked. This +// occurs when the full file fits within the PartSize directive. +func (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) { + d.m.Lock() + defer d.m.Unlock() + + if d.totalBytes >= 0 { + return + } + + if resp.ContentRange == nil { + // ContentRange is nil when the full file contents is provied, and + // is not chunked. Use ContentLength instead. + if resp.ContentLength != nil { + d.totalBytes = *resp.ContentLength + return + } + } else { + parts := strings.Split(*resp.ContentRange, "/") + + total := int64(-1) + var err error + // Checking for whether or not a numbered total exists + // If one does not exist, we will assume the total to be -1, undefined, + // and sequentially download each chunk until hitting a 416 error + totalStr := parts[len(parts)-1] + if totalStr != "*" { + total, err = strconv.ParseInt(totalStr, 10, 64) + if err != nil { + d.err = err + return + } + } + + d.totalBytes = total + } +} + +func (d *downloader) incrWritten(n int64) { + d.m.Lock() + defer d.m.Unlock() + + d.written += n +} + +// getErr is a thread-safe getter for the error object +func (d *downloader) getErr() error { + d.m.Lock() + defer d.m.Unlock() + + return d.err +} + +// setErr is a thread-safe setter for the error object +func (d *downloader) setErr(e error) { + d.m.Lock() + defer d.m.Unlock() + + d.err = e +} + +// dlchunk represents a single chunk of data to write by the worker routine. +// This structure also implements an io.SectionReader style interface for +// io.WriterAt, effectively making it an io.SectionWriter (which does not +// exist). +type dlchunk struct { + w io.WriterAt + start int64 + size int64 + cur int64 +} + +// Write wraps io.WriterAt for the dlchunk, writing from the dlchunk's start +// position to its end (or EOF). +func (c *dlchunk) Write(p []byte) (n int, err error) { + if c.cur >= c.size { + return 0, io.EOF + } + + n, err = c.w.WriteAt(p, c.start+c.cur) + c.cur += int64(n) + + return +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,309 @@ +package s3manager_test + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strconv" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" +) + +func dlLoggingSvc(data []byte) (*s3.S3, *[]string, *[]string) { + var m sync.Mutex + names := []string{} + ranges := []string{} + + svc := s3.New(unit.Session) + svc.Handlers.Send.Clear() + svc.Handlers.Send.PushBack(func(r *request.Request) { + m.Lock() + defer m.Unlock() + + names = append(names, r.Operation.Name) + ranges = append(ranges, *r.Params.(*s3.GetObjectInput).Range) + + rerng := regexp.MustCompile(`bytes=(\d+)-(\d+)`) + rng := rerng.FindStringSubmatch(r.HTTPRequest.Header.Get("Range")) + start, _ := strconv.ParseInt(rng[1], 10, 64) + fin, _ := strconv.ParseInt(rng[2], 10, 64) + fin++ + + if fin > int64(len(data)) { + fin = int64(len(data)) + } + + bodyBytes := data[start:fin] + r.HTTPResponse = &http.Response{ + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewReader(bodyBytes)), + Header: http.Header{}, + } + r.HTTPResponse.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", + start, fin-1, len(data))) + r.HTTPResponse.Header.Set("Content-Length", fmt.Sprintf("%d", len(bodyBytes))) + }) + + return svc, &names, &ranges +} + +func dlLoggingSvcNoChunk(data []byte) (*s3.S3, *[]string) { + var m sync.Mutex + names := []string{} + + svc := s3.New(unit.Session) + svc.Handlers.Send.Clear() + svc.Handlers.Send.PushBack(func(r *request.Request) { + m.Lock() + defer m.Unlock() + + names = append(names, r.Operation.Name) + + r.HTTPResponse = &http.Response{ + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewReader(data[:])), + Header: http.Header{}, + } + r.HTTPResponse.Header.Set("Content-Length", fmt.Sprintf("%d", len(data))) + }) + + return svc, &names +} + +func dlLoggingSvcNoContentRangeLength(data []byte, states []int) (*s3.S3, *[]string) { + var m sync.Mutex + names := []string{} + var index int = 0 + + svc := s3.New(unit.Session) + svc.Handlers.Send.Clear() + svc.Handlers.Send.PushBack(func(r *request.Request) { + m.Lock() + defer m.Unlock() + + names = append(names, r.Operation.Name) + + r.HTTPResponse = &http.Response{ + StatusCode: states[index], + Body: ioutil.NopCloser(bytes.NewReader(data[:])), + Header: http.Header{}, + } + index++ + }) + + return svc, &names +} + +func dlLoggingSvcContentRangeTotalAny(data []byte, states []int) (*s3.S3, *[]string) { + var m sync.Mutex + names := []string{} + ranges := []string{} + var index int = 0 + + svc := s3.New(unit.Session) + svc.Handlers.Send.Clear() + svc.Handlers.Send.PushBack(func(r *request.Request) { + m.Lock() + defer m.Unlock() + + names = append(names, r.Operation.Name) + ranges = append(ranges, *r.Params.(*s3.GetObjectInput).Range) + + rerng := regexp.MustCompile(`bytes=(\d+)-(\d+)`) + rng := rerng.FindStringSubmatch(r.HTTPRequest.Header.Get("Range")) + start, _ := strconv.ParseInt(rng[1], 10, 64) + fin, _ := strconv.ParseInt(rng[2], 10, 64) + fin++ + + if fin >= int64(len(data)) { + fin = int64(len(data)) + } + + // Setting start and finish to 0 because this state of 1 is suppose to + // be an error state of 416 + if index == len(states)-1 { + start = 0 + fin = 0 + } + + bodyBytes := data[start:fin] + + r.HTTPResponse = &http.Response{ + StatusCode: states[index], + Body: ioutil.NopCloser(bytes.NewReader(bodyBytes)), + Header: http.Header{}, + } + r.HTTPResponse.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/*", + start, fin-1)) + index++ + }) + + return svc, &names +} + +func TestDownloadOrder(t *testing.T) { + s, names, ranges := dlLoggingSvc(buf12MB) + + d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) { + d.Concurrency = 1 + }) + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.Nil(t, err) + assert.Equal(t, int64(len(buf12MB)), n) + assert.Equal(t, []string{"GetObject", "GetObject", "GetObject"}, *names) + assert.Equal(t, []string{"bytes=0-5242879", "bytes=5242880-10485759", "bytes=10485760-15728639"}, *ranges) + + count := 0 + for _, b := range w.Bytes() { + count += int(b) + } + assert.Equal(t, 0, count) +} + +func TestDownloadZero(t *testing.T) { + s, names, ranges := dlLoggingSvc([]byte{}) + + d := s3manager.NewDownloaderWithClient(s) + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.Nil(t, err) + assert.Equal(t, int64(0), n) + assert.Equal(t, []string{"GetObject"}, *names) + assert.Equal(t, []string{"bytes=0-5242879"}, *ranges) +} + +func TestDownloadSetPartSize(t *testing.T) { + s, names, ranges := dlLoggingSvc([]byte{1, 2, 3}) + + d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) { + d.Concurrency = 1 + d.PartSize = 1 + }) + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.Nil(t, err) + assert.Equal(t, int64(3), n) + assert.Equal(t, []string{"GetObject", "GetObject", "GetObject"}, *names) + assert.Equal(t, []string{"bytes=0-0", "bytes=1-1", "bytes=2-2"}, *ranges) + assert.Equal(t, []byte{1, 2, 3}, w.Bytes()) +} + +func TestDownloadError(t *testing.T) { + s, names, _ := dlLoggingSvc([]byte{1, 2, 3}) + + num := 0 + s.Handlers.Send.PushBack(func(r *request.Request) { + num++ + if num > 1 { + r.HTTPResponse.StatusCode = 400 + r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) + } + }) + + d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) { + d.Concurrency = 1 + d.PartSize = 1 + }) + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.NotNil(t, err) + assert.Equal(t, int64(1), n) + assert.Equal(t, []string{"GetObject", "GetObject"}, *names) + assert.Equal(t, []byte{1}, w.Bytes()) +} + +func TestDownloadNonChunk(t *testing.T) { + s, names := dlLoggingSvcNoChunk(buf2MB) + + d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) { + d.Concurrency = 1 + }) + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.Nil(t, err) + assert.Equal(t, int64(len(buf2MB)), n) + assert.Equal(t, []string{"GetObject"}, *names) + + count := 0 + for _, b := range w.Bytes() { + count += int(b) + } + assert.Equal(t, 0, count) +} + +func TestDownloadNoContentRangeLength(t *testing.T) { + s, names := dlLoggingSvcNoContentRangeLength(buf2MB, []int{200, 416}) + + d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) { + d.Concurrency = 1 + }) + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.Nil(t, err) + assert.Equal(t, int64(len(buf2MB)), n) + assert.Equal(t, []string{"GetObject", "GetObject"}, *names) + + count := 0 + for _, b := range w.Bytes() { + count += int(b) + } + assert.Equal(t, 0, count) +} + +func TestDownloadContentRangeTotalAny(t *testing.T) { + s, names := dlLoggingSvcContentRangeTotalAny(buf2MB, []int{200, 416}) + + d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) { + d.Concurrency = 1 + }) + w := &aws.WriteAtBuffer{} + n, err := d.Download(w, &s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("key"), + }) + + assert.Nil(t, err) + assert.Equal(t, int64(len(buf2MB)), n) + assert.Equal(t, []string{"GetObject", "GetObject"}, *names) + + count := 0 + for _, b := range w.Bytes() { + count += int(b) + } + assert.Equal(t, 0, count) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,23 @@ +// Package s3manageriface provides an interface for the s3manager package +package s3manageriface + +import ( + "io" + + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" +) + +// DownloaderAPI is the interface type for s3manager.Downloader. +type DownloaderAPI interface { + Download(io.WriterAt, *s3.GetObjectInput, ...func(*s3manager.Downloader)) (int64, error) +} + +var _ DownloaderAPI = (*s3manager.Downloader)(nil) + +// UploaderAPI is the interface type for s3manager.Uploader. +type UploaderAPI interface { + Upload(*s3manager.UploadInput, ...func(*s3manager.Uploader)) (*s3manager.UploadOutput, error) +} + +var _ UploaderAPI = (*s3manager.Uploader)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3manager/shared_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3manager/shared_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3manager/shared_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3manager/shared_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,4 @@ +package s3manager_test + +var buf12MB = make([]byte, 1024*1024*12) +var buf2MB = make([]byte, 1024*1024*2) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,661 @@ +package s3manager + +import ( + "bytes" + "fmt" + "io" + "sort" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" +) + +// MaxUploadParts is the maximum allowed number of parts in a multi-part upload +// on Amazon S3. +const MaxUploadParts = 10000 + +// MinUploadPartSize is the minimum allowed part size when uploading a part to +// Amazon S3. +const MinUploadPartSize int64 = 1024 * 1024 * 5 + +// DefaultUploadPartSize is the default part size to buffer chunks of a +// payload into. +const DefaultUploadPartSize = MinUploadPartSize + +// DefaultUploadConcurrency is the default number of goroutines to spin up when +// using Upload(). +const DefaultUploadConcurrency = 5 + +// A MultiUploadFailure wraps a failed S3 multipart upload. An error returned +// will satisfy this interface when a multi part upload failed to upload all +// chucks to S3. In the case of a failure the UploadID is needed to operate on +// the chunks, if any, which were uploaded. +// +// Example: +// +// u := s3manager.NewUploader(opts) +// output, err := u.upload(input) +// if err != nil { +// if multierr, ok := err.(MultiUploadFailure); ok { +// // Process error and its associated uploadID +// fmt.Println("Error:", multierr.Code(), multierr.Message(), multierr.UploadID()) +// } else { +// // Process error generically +// fmt.Println("Error:", err.Error()) +// } +// } +// +type MultiUploadFailure interface { + awserr.Error + + // Returns the upload id for the S3 multipart upload that failed. + UploadID() string +} + +// So that the Error interface type can be included as an anonymous field +// in the multiUploadError struct and not conflict with the error.Error() method. +type awsError awserr.Error + +// A multiUploadError wraps the upload ID of a failed s3 multipart upload. +// Composed of BaseError for code, message, and original error +// +// Should be used for an error that occurred failing a S3 multipart upload, +// and a upload ID is available. If an uploadID is not available a more relevant +type multiUploadError struct { + awsError + + // ID for multipart upload which failed. + uploadID string +} + +// Error returns the string representation of the error. +// +// See apierr.BaseError ErrorWithExtra for output format +// +// Satisfies the error interface. +func (m multiUploadError) Error() string { + extra := fmt.Sprintf("upload id: %s", m.uploadID) + return awserr.SprintError(m.Code(), m.Message(), extra, m.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (m multiUploadError) String() string { + return m.Error() +} + +// UploadID returns the id of the S3 upload which failed. +func (m multiUploadError) UploadID() string { + return m.uploadID +} + +// UploadInput contains all input for upload requests to Amazon S3. +type UploadInput struct { + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256, + // aws:kms). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` + + // The readable body payload to send to S3. + Body io.Reader +} + +// UploadOutput represents a response from the Upload() call. +type UploadOutput struct { + // The URL where the object was uploaded to. + Location string + + // The version of the object that was uploaded. Will only be populated if + // the S3 Bucket is versioned. If the bucket is not versioned this field + // will not be set. + VersionID *string + + // The ID for a multipart upload to S3. In the case of an error the error + // can be cast to the MultiUploadFailure interface to extract the upload ID. + UploadID string +} + +// The Uploader structure that calls Upload(). It is safe to call Upload() +// on this structure for multiple objects and across concurrent goroutines. +// Mutating the Uploader's properties is not safe to be done concurrently. +type Uploader struct { + // The buffer size (in bytes) to use when buffering data into chunks and + // sending them as parts to S3. The minimum allowed part size is 5MB, and + // if this value is set to zero, the DefaultPartSize value will be used. + PartSize int64 + + // The number of goroutines to spin up in parallel when sending parts. + // If this is set to zero, the DefaultConcurrency value will be used. + Concurrency int + + // Setting this value to true will cause the SDK to avoid calling + // AbortMultipartUpload on a failure, leaving all successfully uploaded + // parts on S3 for manual recovery. + // + // Note that storing parts of an incomplete multipart upload counts towards + // space usage on S3 and will add additional costs if not cleaned up. + LeavePartsOnError bool + + // MaxUploadParts is the max number of parts which will be uploaded to S3. + // Will be used to calculate the partsize of the object to be uploaded. + // E.g: 5GB file, with MaxUploadParts set to 100, will upload the file + // as 100, 50MB parts. + // With a limited of s3.MaxUploadParts (10,000 parts). + MaxUploadParts int + + // The client to use when uploading to S3. + S3 s3iface.S3API +} + +// NewUploader creates a new Uploader instance to upload objects to S3. Pass In +// additional functional options to customize the uploader's behavior. Requires a +// client.ConfigProvider in order to create a S3 service client. The session.Session +// satisfies the client.ConfigProvider interface. +// +// Example: +// // The session the S3 Uploader will use +// sess := session.New() +// +// // Create an uploader with the session and default options +// uploader := s3manager.NewUploader(sess) +// +// // Create an uploader with the session and custom options +// uploader := s3manager.NewUploader(session, func(u *s3manager.Uploader) { +// u.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader { + u := &Uploader{ + S3: s3.New(c), + PartSize: DefaultUploadPartSize, + Concurrency: DefaultUploadConcurrency, + LeavePartsOnError: false, + MaxUploadParts: MaxUploadParts, + } + + for _, option := range options { + option(u) + } + + return u +} + +// NewUploaderWithClient creates a new Uploader instance to upload objects to S3. Pass in +// additional functional options to customize the uploader's behavior. Requires +// a S3 service client to make S3 API calls. +// +// Example: +// // S3 service client the Upload manager will use. +// s3Svc := s3.New(session.New()) +// +// // Create an uploader with S3 client and default options +// uploader := s3manager.NewUploaderWithClient(s3Svc) +// +// // Create an uploader with S3 client and custom options +// uploader := s3manager.NewUploaderWithClient(s3Svc, func(u *s3manager.Uploader) { +// u.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploader { + u := &Uploader{ + S3: svc, + PartSize: DefaultUploadPartSize, + Concurrency: DefaultUploadConcurrency, + LeavePartsOnError: false, + MaxUploadParts: MaxUploadParts, + } + + for _, option := range options { + option(u) + } + + return u +} + +// Upload uploads an object to S3, intelligently buffering large files into +// smaller chunks and sending them in parallel across multiple goroutines. You +// can configure the buffer size and concurrency through the Uploader's parameters. +// +// Additional functional options can be provided to configure the individual +// upload. These options are copies of the Uploader instance Upload is called from. +// Modifying the options will not impact the original Uploader instance. +// +// It is safe to call this method concurrently across goroutines. +// +// Example: +// // Upload input parameters +// upParams := &s3manager.UploadInput{ +// Bucket: &bucketName, +// Key: &keyName, +// Body: file, +// } +// +// // Perform an upload. +// result, err := uploader.Upload(upParams) +// +// // Perform upload with options different than the those in the Uploader. +// result, err := uploader.Upload(upParams, func(u *s3manager.Uploader) { +// u.PartSize = 10 * 1024 * 1024 // 10MB part size +// u.LeavePartsOnError = true // Dont delete the parts if the upload fails. +// }) +func (u Uploader) Upload(input *UploadInput, options ...func(*Uploader)) (*UploadOutput, error) { + i := uploader{in: input, ctx: u} + + for _, option := range options { + option(&i.ctx) + } + + return i.upload() +} + +// internal structure to manage an upload to S3. +type uploader struct { + ctx Uploader + + in *UploadInput + + readerPos int64 // current reader position + totalSize int64 // set to -1 if the size is not known +} + +// internal logic for deciding whether to upload a single part or use a +// multipart upload. +func (u *uploader) upload() (*UploadOutput, error) { + u.init() + + if u.ctx.PartSize < MinUploadPartSize { + msg := fmt.Sprintf("part size must be at least %d bytes", MinUploadPartSize) + return nil, awserr.New("ConfigError", msg, nil) + } + + // Do one read to determine if we have more than one part + buf, err := u.nextReader() + if err == io.EOF || err == io.ErrUnexpectedEOF { // single part + return u.singlePart(buf) + } else if err != nil { + return nil, awserr.New("ReadRequestBody", "read upload data failed", err) + } + + mu := multiuploader{uploader: u} + return mu.upload(buf) +} + +// init will initialize all default options. +func (u *uploader) init() { + if u.ctx.Concurrency == 0 { + u.ctx.Concurrency = DefaultUploadConcurrency + } + if u.ctx.PartSize == 0 { + u.ctx.PartSize = DefaultUploadPartSize + } + + // Try to get the total size for some optimizations + u.initSize() +} + +// initSize tries to detect the total stream size, setting u.totalSize. If +// the size is not known, totalSize is set to -1. +func (u *uploader) initSize() { + u.totalSize = -1 + + switch r := u.in.Body.(type) { + case io.Seeker: + pos, _ := r.Seek(0, 1) + defer r.Seek(pos, 0) + + n, err := r.Seek(0, 2) + if err != nil { + return + } + u.totalSize = n + + // Try to adjust partSize if it is too small and account for + // integer division truncation. + if u.totalSize/u.ctx.PartSize >= int64(u.ctx.MaxUploadParts) { + // Add one to the part size to account for remainders + // during the size calculation. e.g odd number of bytes. + u.ctx.PartSize = (u.totalSize / int64(u.ctx.MaxUploadParts)) + 1 + } + } +} + +// nextReader returns a seekable reader representing the next packet of data. +// This operation increases the shared u.readerPos counter, but note that it +// does not need to be wrapped in a mutex because nextReader is only called +// from the main thread. +func (u *uploader) nextReader() (io.ReadSeeker, error) { + switch r := u.in.Body.(type) { + case io.ReaderAt: + var err error + + n := u.ctx.PartSize + if u.totalSize >= 0 { + bytesLeft := u.totalSize - u.readerPos + + if bytesLeft == 0 { + err = io.EOF + n = bytesLeft + } else if bytesLeft <= u.ctx.PartSize { + err = io.ErrUnexpectedEOF + n = bytesLeft + } + } + + buf := io.NewSectionReader(r, u.readerPos, n) + u.readerPos += n + + return buf, err + + default: + packet := make([]byte, u.ctx.PartSize) + n, err := io.ReadFull(u.in.Body, packet) + u.readerPos += int64(n) + + return bytes.NewReader(packet[0:n]), err + } +} + +// singlePart contains upload logic for uploading a single chunk via +// a regular PutObject request. Multipart requests require at least two +// parts, or at least 5MB of data. +func (u *uploader) singlePart(buf io.ReadSeeker) (*UploadOutput, error) { + params := &s3.PutObjectInput{} + awsutil.Copy(params, u.in) + params.Body = buf + + req, out := u.ctx.S3.PutObjectRequest(params) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + if err := req.Send(); err != nil { + return nil, err + } + + url := req.HTTPRequest.URL.String() + return &UploadOutput{ + Location: url, + VersionID: out.VersionId, + }, nil +} + +// internal structure to manage a specific multipart upload to S3. +type multiuploader struct { + *uploader + wg sync.WaitGroup + m sync.Mutex + err error + uploadID string + parts completedParts +} + +// keeps track of a single chunk of data being sent to S3. +type chunk struct { + buf io.ReadSeeker + num int64 +} + +// completedParts is a wrapper to make parts sortable by their part number, +// since S3 required this list to be sent in sorted order. +type completedParts []*s3.CompletedPart + +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber } + +// upload will perform a multipart upload using the firstBuf buffer containing +// the first chunk of data. +func (u *multiuploader) upload(firstBuf io.ReadSeeker) (*UploadOutput, error) { + params := &s3.CreateMultipartUploadInput{} + awsutil.Copy(params, u.in) + + // Create the multipart + req, resp := u.ctx.S3.CreateMultipartUploadRequest(params) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + if err := req.Send(); err != nil { + return nil, err + } + u.uploadID = *resp.UploadId + + // Create the workers + ch := make(chan chunk, u.ctx.Concurrency) + for i := 0; i < u.ctx.Concurrency; i++ { + u.wg.Add(1) + go u.readChunk(ch) + } + + // Send part 1 to the workers + var num int64 = 1 + ch <- chunk{buf: firstBuf, num: num} + + // Read and queue the rest of the parts + for u.geterr() == nil { + // This upload exceeded maximum number of supported parts, error now. + if num > int64(u.ctx.MaxUploadParts) || num > int64(MaxUploadParts) { + var msg string + if num > int64(u.ctx.MaxUploadParts) { + msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit", + u.ctx.MaxUploadParts) + } else { + msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit", + MaxUploadParts) + } + u.seterr(awserr.New("TotalPartsExceeded", msg, nil)) + break + } + num++ + + buf, err := u.nextReader() + if err == io.EOF { + break + } + + ch <- chunk{buf: buf, num: num} + + if err != nil && err != io.ErrUnexpectedEOF { + u.seterr(awserr.New( + "ReadRequestBody", + "read multipart upload data failed", + err)) + break + } + } + + // Close the channel, wait for workers, and complete upload + close(ch) + u.wg.Wait() + complete := u.complete() + + if err := u.geterr(); err != nil { + return nil, &multiUploadError{ + awsError: awserr.New( + "MultipartUpload", + "upload multipart failed", + err), + uploadID: u.uploadID, + } + } + return &UploadOutput{ + Location: *complete.Location, + VersionID: complete.VersionId, + UploadID: u.uploadID, + }, nil +} + +// readChunk runs in worker goroutines to pull chunks off of the ch channel +// and send() them as UploadPart requests. +func (u *multiuploader) readChunk(ch chan chunk) { + defer u.wg.Done() + for { + data, ok := <-ch + + if !ok { + break + } + + if u.geterr() == nil { + if err := u.send(data); err != nil { + u.seterr(err) + } + } + } +} + +// send performs an UploadPart request and keeps track of the completed +// part information. +func (u *multiuploader) send(c chunk) error { + req, resp := u.ctx.S3.UploadPartRequest(&s3.UploadPartInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + Body: c.buf, + UploadId: &u.uploadID, + PartNumber: &c.num, + }) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + if err := req.Send(); err != nil { + return err + } + + n := c.num + completed := &s3.CompletedPart{ETag: resp.ETag, PartNumber: &n} + + u.m.Lock() + u.parts = append(u.parts, completed) + u.m.Unlock() + + return nil +} + +// geterr is a thread-safe getter for the error object +func (u *multiuploader) geterr() error { + u.m.Lock() + defer u.m.Unlock() + + return u.err +} + +// seterr is a thread-safe setter for the error object +func (u *multiuploader) seterr(e error) { + u.m.Lock() + defer u.m.Unlock() + + u.err = e +} + +// fail will abort the multipart unless LeavePartsOnError is set to true. +func (u *multiuploader) fail() { + if u.ctx.LeavePartsOnError { + return + } + + req, _ := u.ctx.S3.AbortMultipartUploadRequest(&s3.AbortMultipartUploadInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + UploadId: &u.uploadID, + }) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + req.Send() +} + +// complete successfully completes a multipart upload and returns the response. +func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput { + if u.geterr() != nil { + u.fail() + return nil + } + + // Parts must be sorted in PartNumber order. + sort.Sort(u.parts) + + req, resp := u.ctx.S3.CompleteMultipartUploadRequest(&s3.CompleteMultipartUploadInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + UploadId: &u.uploadID, + MultipartUpload: &s3.CompletedMultipartUpload{Parts: u.parts}, + }) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + if err := req.Send(); err != nil { + u.seterr(err) + u.fail() + } + + return resp +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,482 @@ +package s3manager_test + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "reflect" + "sort" + "strings" + "sync" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" + "github.com/stretchr/testify/assert" +) + +var emptyList = []string{} + +func val(i interface{}, s string) interface{} { + v, err := awsutil.ValuesAtPath(i, s) + if err != nil || len(v) == 0 { + return nil + } + if _, ok := v[0].(io.Reader); ok { + return v[0] + } + + if rv := reflect.ValueOf(v[0]); rv.Kind() == reflect.Ptr { + return rv.Elem().Interface() + } + + return v[0] +} + +func contains(src []string, s string) bool { + for _, v := range src { + if s == v { + return true + } + } + return false +} + +func loggingSvc(ignoreOps []string) (*s3.S3, *[]string, *[]interface{}) { + var m sync.Mutex + partNum := 0 + names := []string{} + params := []interface{}{} + svc := s3.New(unit.Session) + svc.Handlers.Unmarshal.Clear() + svc.Handlers.UnmarshalMeta.Clear() + svc.Handlers.UnmarshalError.Clear() + svc.Handlers.Send.Clear() + svc.Handlers.Send.PushBack(func(r *request.Request) { + m.Lock() + defer m.Unlock() + + if !contains(ignoreOps, r.Operation.Name) { + names = append(names, r.Operation.Name) + params = append(params, r.Params) + } + + r.HTTPResponse = &http.Response{ + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + + switch data := r.Data.(type) { + case *s3.CreateMultipartUploadOutput: + data.UploadId = aws.String("UPLOAD-ID") + case *s3.UploadPartOutput: + partNum++ + data.ETag = aws.String(fmt.Sprintf("ETAG%d", partNum)) + case *s3.CompleteMultipartUploadOutput: + data.Location = aws.String("https://location") + data.VersionId = aws.String("VERSION-ID") + case *s3.PutObjectOutput: + data.VersionId = aws.String("VERSION-ID") + } + }) + + return svc, &names, ¶ms +} + +func buflen(i interface{}) int { + r := i.(io.Reader) + b, _ := ioutil.ReadAll(r) + return len(b) +} + +func TestUploadOrderMulti(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + u := s3manager.NewUploaderWithClient(s) + + resp, err := u.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf12MB), + ServerSideEncryption: aws.String("AES256"), + ContentType: aws.String("content/type"), + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops) + assert.Equal(t, "https://location", resp.Location) + assert.Equal(t, "UPLOAD-ID", resp.UploadID) + assert.Equal(t, aws.String("VERSION-ID"), resp.VersionID) + + // Validate input values + + // UploadPart + assert.Equal(t, "UPLOAD-ID", val((*args)[1], "UploadId")) + assert.Equal(t, "UPLOAD-ID", val((*args)[2], "UploadId")) + assert.Equal(t, "UPLOAD-ID", val((*args)[3], "UploadId")) + + // CompleteMultipartUpload + assert.Equal(t, "UPLOAD-ID", val((*args)[4], "UploadId")) + assert.Equal(t, int64(1), val((*args)[4], "MultipartUpload.Parts[0].PartNumber")) + assert.Equal(t, int64(2), val((*args)[4], "MultipartUpload.Parts[1].PartNumber")) + assert.Equal(t, int64(3), val((*args)[4], "MultipartUpload.Parts[2].PartNumber")) + assert.Regexp(t, `^ETAG\d+$`, val((*args)[4], "MultipartUpload.Parts[0].ETag")) + assert.Regexp(t, `^ETAG\d+$`, val((*args)[4], "MultipartUpload.Parts[1].ETag")) + assert.Regexp(t, `^ETAG\d+$`, val((*args)[4], "MultipartUpload.Parts[2].ETag")) + + // Custom headers + assert.Equal(t, "AES256", val((*args)[0], "ServerSideEncryption")) + assert.Equal(t, "content/type", val((*args)[0], "ContentType")) +} + +func TestUploadOrderMultiDifferentPartSize(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.PartSize = 1024 * 1024 * 7 + u.Concurrency = 1 + }) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf12MB), + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops) + + // Part lengths + assert.Equal(t, 1024*1024*7, buflen(val((*args)[1], "Body"))) + assert.Equal(t, 1024*1024*5, buflen(val((*args)[2], "Body"))) +} + +func TestUploadIncreasePartSize(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.Concurrency = 1 + u.MaxUploadParts = 2 + }) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf12MB), + }) + + assert.NoError(t, err) + assert.Equal(t, int64(s3manager.DefaultDownloadPartSize), mgr.PartSize) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops) + + // Part lengths + assert.Equal(t, (1024*1024*6)+1, buflen(val((*args)[1], "Body"))) + assert.Equal(t, (1024*1024*6)-1, buflen(val((*args)[2], "Body"))) +} + +func TestUploadFailIfPartSizeTooSmall(t *testing.T) { + mgr := s3manager.NewUploader(unit.Session, func(u *s3manager.Uploader) { + u.PartSize = 5 + }) + resp, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf12MB), + }) + + assert.Nil(t, resp) + assert.NotNil(t, err) + + aerr := err.(awserr.Error) + assert.Equal(t, "ConfigError", aerr.Code()) + assert.Contains(t, aerr.Message(), "part size must be at least") +} + +func TestUploadOrderSingle(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s) + resp, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf2MB), + ServerSideEncryption: aws.String("AES256"), + ContentType: aws.String("content/type"), + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"PutObject"}, *ops) + assert.NotEqual(t, "", resp.Location) + assert.Equal(t, aws.String("VERSION-ID"), resp.VersionID) + assert.Equal(t, "", resp.UploadID) + assert.Equal(t, "AES256", val((*args)[0], "ServerSideEncryption")) + assert.Equal(t, "content/type", val((*args)[0], "ContentType")) +} + +func TestUploadOrderSingleFailure(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse.StatusCode = 400 + }) + mgr := s3manager.NewUploaderWithClient(s) + resp, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf2MB), + }) + + assert.Error(t, err) + assert.Equal(t, []string{"PutObject"}, *ops) + assert.Nil(t, resp) +} + +func TestUploadOrderZero(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s) + resp, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(make([]byte, 0)), + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"PutObject"}, *ops) + assert.NotEqual(t, "", resp.Location) + assert.Equal(t, "", resp.UploadID) + assert.Equal(t, 0, buflen(val((*args)[0], "Body"))) +} + +func TestUploadOrderMultiFailure(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + s.Handlers.Send.PushBack(func(r *request.Request) { + switch t := r.Data.(type) { + case *s3.UploadPartOutput: + if *t.ETag == "ETAG2" { + r.HTTPResponse.StatusCode = 400 + } + } + }) + + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.Concurrency = 1 + }) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf12MB), + }) + + assert.Error(t, err) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "AbortMultipartUpload"}, *ops) +} + +func TestUploadOrderMultiFailureOnComplete(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + s.Handlers.Send.PushBack(func(r *request.Request) { + switch r.Data.(type) { + case *s3.CompleteMultipartUploadOutput: + r.HTTPResponse.StatusCode = 400 + } + }) + + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.Concurrency = 1 + }) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(buf12MB), + }) + + assert.Error(t, err) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", + "UploadPart", "CompleteMultipartUpload", "AbortMultipartUpload"}, *ops) +} + +func TestUploadOrderMultiFailureOnCreate(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + s.Handlers.Send.PushBack(func(r *request.Request) { + switch r.Data.(type) { + case *s3.CreateMultipartUploadOutput: + r.HTTPResponse.StatusCode = 400 + } + }) + + mgr := s3manager.NewUploaderWithClient(s) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(make([]byte, 1024*1024*12)), + }) + + assert.Error(t, err) + assert.Equal(t, []string{"CreateMultipartUpload"}, *ops) +} + +func TestUploadOrderMultiFailureLeaveParts(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + s.Handlers.Send.PushBack(func(r *request.Request) { + switch data := r.Data.(type) { + case *s3.UploadPartOutput: + if *data.ETag == "ETAG2" { + r.HTTPResponse.StatusCode = 400 + } + } + }) + + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.Concurrency = 1 + u.LeavePartsOnError = true + }) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: bytes.NewReader(make([]byte, 1024*1024*12)), + }) + + assert.Error(t, err) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart"}, *ops) +} + +type failreader struct { + times int + failCount int +} + +func (f *failreader) Read(b []byte) (int, error) { + f.failCount++ + if f.failCount >= f.times { + return 0, fmt.Errorf("random failure") + } + return len(b), nil +} + +func TestUploadOrderReadFail1(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: &failreader{times: 1}, + }) + + assert.Equal(t, "ReadRequestBody", err.(awserr.Error).Code()) + assert.EqualError(t, err.(awserr.Error).OrigErr(), "random failure") + assert.Equal(t, []string{}, *ops) +} + +func TestUploadOrderReadFail2(t *testing.T) { + s, ops, _ := loggingSvc([]string{"UploadPart"}) + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.Concurrency = 1 + }) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: &failreader{times: 2}, + }) + + assert.Equal(t, "ReadRequestBody", err.(awserr.Error).Code()) + assert.EqualError(t, err.(awserr.Error).OrigErr(), "random failure") + assert.Equal(t, []string{"CreateMultipartUpload", "AbortMultipartUpload"}, *ops) +} + +type sizedReader struct { + size int + cur int +} + +func (s *sizedReader) Read(p []byte) (n int, err error) { + if s.cur >= s.size { + return 0, io.EOF + } + + n = len(p) + s.cur += len(p) + if s.cur > s.size { + n -= s.cur - s.size + } + + return +} + +func TestUploadOrderMultiBufferedReader(t *testing.T) { + s, ops, args := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s) + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: &sizedReader{size: 1024 * 1024 * 12}, + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops) + + // Part lengths + parts := []int{ + buflen(val((*args)[1], "Body")), + buflen(val((*args)[2], "Body")), + buflen(val((*args)[3], "Body")), + } + sort.Ints(parts) + assert.Equal(t, []int{1024 * 1024 * 2, 1024 * 1024 * 5, 1024 * 1024 * 5}, parts) +} + +func TestUploadOrderMultiBufferedReaderExceedTotalParts(t *testing.T) { + s, ops, _ := loggingSvc([]string{"UploadPart"}) + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.Concurrency = 1 + u.MaxUploadParts = 2 + }) + resp, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: &sizedReader{size: 1024 * 1024 * 12}, + }) + + assert.Error(t, err) + assert.Nil(t, resp) + assert.Equal(t, []string{"CreateMultipartUpload", "AbortMultipartUpload"}, *ops) + + aerr := err.(awserr.Error) + assert.Equal(t, "TotalPartsExceeded", aerr.Code()) + assert.Contains(t, aerr.Message(), "configured MaxUploadParts (2)") +} + +func TestUploadOrderSingleBufferedReader(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s) + resp, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: &sizedReader{size: 1024 * 1024 * 2}, + }) + + assert.NoError(t, err) + assert.Equal(t, []string{"PutObject"}, *ops) + assert.NotEqual(t, "", resp.Location) + assert.Equal(t, "", resp.UploadID) +} + +func TestUploadZeroLenObject(t *testing.T) { + requestMade := false + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requestMade = true + w.WriteHeader(http.StatusOK) + })) + mgr := s3manager.NewUploaderWithClient(s3.New(unit.Session, &aws.Config{ + Endpoint: aws.String(server.URL), + })) + resp, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: strings.NewReader(""), + }) + + assert.NoError(t, err) + assert.True(t, requestMade) + assert.NotEqual(t, "", resp.Location) + assert.Equal(t, "", resp.UploadID) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,86 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restxml" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// S3 is a client for Amazon S3. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type S3 struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "s3" + +// New creates a new instance of the S3 client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a S3 client from just a session. +// svc := s3.New(mySession) +// +// // Create a S3 client with additional configuration +// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *S3 { + svc := &S3{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2006-03-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a S3 operation and runs any +// custom request initialization. +func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/sse.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/sse.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/sse.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/sse.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,44 @@ +package s3 + +import ( + "crypto/md5" + "encoding/base64" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil) + +func validateSSERequiresSSL(r *request.Request) { + if r.HTTPRequest.URL.Scheme != "https" { + p, _ := awsutil.ValuesAtPath(r.Params, "SSECustomerKey||CopySourceSSECustomerKey") + if len(p) > 0 { + r.Error = errSSERequiresSSL + } + } +} + +func computeSSEKeys(r *request.Request) { + headers := []string{ + "x-amz-server-side-encryption-customer-key", + "x-amz-copy-source-server-side-encryption-customer-key", + } + + for _, h := range headers { + md5h := h + "-md5" + if key := r.HTTPRequest.Header.Get(h); key != "" { + // Base64-encode the value + b64v := base64.StdEncoding.EncodeToString([]byte(key)) + r.HTTPRequest.Header.Set(h, b64v) + + // Add MD5 if it wasn't computed + if r.HTTPRequest.Header.Get(md5h) == "" { + sum := md5.Sum([]byte(key)) + b64sum := base64.StdEncoding.EncodeToString(sum[:]) + r.HTTPRequest.Header.Set(md5h, b64sum) + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/sse_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/sse_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/sse_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/sse_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,79 @@ +package s3_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/stretchr/testify/assert" +) + +func TestSSECustomerKeyOverHTTPError(t *testing.T) { + s := s3.New(unit.Session, &aws.Config{DisableSSL: aws.Bool(true)}) + req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{ + Bucket: aws.String("bucket"), + CopySource: aws.String("bucket/source"), + Key: aws.String("dest"), + SSECustomerKey: aws.String("key"), + }) + err := req.Build() + + assert.Error(t, err) + assert.Equal(t, "ConfigError", err.(awserr.Error).Code()) + assert.Contains(t, err.(awserr.Error).Message(), "cannot send SSE keys over HTTP") +} + +func TestCopySourceSSECustomerKeyOverHTTPError(t *testing.T) { + s := s3.New(unit.Session, &aws.Config{DisableSSL: aws.Bool(true)}) + req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{ + Bucket: aws.String("bucket"), + CopySource: aws.String("bucket/source"), + Key: aws.String("dest"), + CopySourceSSECustomerKey: aws.String("key"), + }) + err := req.Build() + + assert.Error(t, err) + assert.Equal(t, "ConfigError", err.(awserr.Error).Code()) + assert.Contains(t, err.(awserr.Error).Message(), "cannot send SSE keys over HTTP") +} + +func TestComputeSSEKeys(t *testing.T) { + s := s3.New(unit.Session) + req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{ + Bucket: aws.String("bucket"), + CopySource: aws.String("bucket/source"), + Key: aws.String("dest"), + SSECustomerKey: aws.String("key"), + CopySourceSSECustomerKey: aws.String("key"), + }) + err := req.Build() + + assert.NoError(t, err) + assert.Equal(t, "a2V5", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key")) + assert.Equal(t, "a2V5", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key")) + assert.Equal(t, "PG4LipwVIkqCKLmpjKFTHQ==", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key-md5")) + assert.Equal(t, "PG4LipwVIkqCKLmpjKFTHQ==", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key-md5")) +} + +func TestComputeSSEKeysShortcircuit(t *testing.T) { + s := s3.New(unit.Session) + req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{ + Bucket: aws.String("bucket"), + CopySource: aws.String("bucket/source"), + Key: aws.String("dest"), + SSECustomerKey: aws.String("key"), + CopySourceSSECustomerKey: aws.String("key"), + SSECustomerKeyMD5: aws.String("MD5"), + CopySourceSSECustomerKeyMD5: aws.String("MD5"), + }) + err := req.Build() + + assert.NoError(t, err) + assert.Equal(t, "a2V5", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key")) + assert.Equal(t, "a2V5", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key")) + assert.Equal(t, "MD5", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key-md5")) + assert.Equal(t, "MD5", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key-md5")) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/statusok_error.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/statusok_error.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/statusok_error.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/statusok_error.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,36 @@ +package s3 + +import ( + "bytes" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +func copyMultipartStatusOKUnmarhsalError(r *request.Request) { + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "unable to read response body", err) + return + } + body := bytes.NewReader(b) + r.HTTPResponse.Body = aws.ReadSeekCloser(body) + defer r.HTTPResponse.Body.(aws.ReaderSeekerCloser).Seek(0, 0) + + if body.Len() == 0 { + // If there is no body don't attempt to parse the body. + return + } + + unmarshalError(r) + if err, ok := r.Error.(awserr.Error); ok && err != nil { + if err.Code() == "SerializationError" { + r.Error = nil + return + } + r.HTTPResponse.StatusCode = http.StatusServiceUnavailable + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/statusok_error_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/statusok_error_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/statusok_error_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/statusok_error_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,130 @@ +package s3_test + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" +) + +const errMsg = `ErrorCodemessage bodyrequestIDhostID=` + +var lastModifiedTime = time.Date(2009, 11, 23, 0, 0, 0, 0, time.UTC) + +func TestCopyObjectNoError(t *testing.T) { + const successMsg = ` + +2009-11-23T0:00:00Z"1da64c7f13d1e8dbeaea40b905fd586c"` + + res, err := newCopyTestSvc(successMsg).CopyObject(&s3.CopyObjectInput{ + Bucket: aws.String("bucketname"), + CopySource: aws.String("bucketname/exists.txt"), + Key: aws.String("destination.txt"), + }) + + require.NoError(t, err) + + assert.Equal(t, fmt.Sprintf(`%q`, "1da64c7f13d1e8dbeaea40b905fd586c"), *res.CopyObjectResult.ETag) + assert.Equal(t, lastModifiedTime, *res.CopyObjectResult.LastModified) +} + +func TestCopyObjectError(t *testing.T) { + _, err := newCopyTestSvc(errMsg).CopyObject(&s3.CopyObjectInput{ + Bucket: aws.String("bucketname"), + CopySource: aws.String("bucketname/doesnotexist.txt"), + Key: aws.String("destination.txt"), + }) + + require.Error(t, err) + e := err.(awserr.Error) + + assert.Equal(t, "ErrorCode", e.Code()) + assert.Equal(t, "message body", e.Message()) +} + +func TestUploadPartCopySuccess(t *testing.T) { + const successMsg = ` + +2009-11-23T0:00:00Z"1da64c7f13d1e8dbeaea40b905fd586c"` + + res, err := newCopyTestSvc(successMsg).UploadPartCopy(&s3.UploadPartCopyInput{ + Bucket: aws.String("bucketname"), + CopySource: aws.String("bucketname/doesnotexist.txt"), + Key: aws.String("destination.txt"), + PartNumber: aws.Int64(0), + UploadId: aws.String("uploadID"), + }) + + require.NoError(t, err) + + assert.Equal(t, fmt.Sprintf(`%q`, "1da64c7f13d1e8dbeaea40b905fd586c"), *res.CopyPartResult.ETag) + assert.Equal(t, lastModifiedTime, *res.CopyPartResult.LastModified) +} + +func TestUploadPartCopyError(t *testing.T) { + _, err := newCopyTestSvc(errMsg).UploadPartCopy(&s3.UploadPartCopyInput{ + Bucket: aws.String("bucketname"), + CopySource: aws.String("bucketname/doesnotexist.txt"), + Key: aws.String("destination.txt"), + PartNumber: aws.Int64(0), + UploadId: aws.String("uploadID"), + }) + + require.Error(t, err) + e := err.(awserr.Error) + + assert.Equal(t, "ErrorCode", e.Code()) + assert.Equal(t, "message body", e.Message()) +} + +func TestCompleteMultipartUploadSuccess(t *testing.T) { + const successMsg = ` + +locationNamebucketNamekeyName"etagVal"` + res, err := newCopyTestSvc(successMsg).CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ + Bucket: aws.String("bucketname"), + Key: aws.String("key"), + UploadId: aws.String("uploadID"), + }) + + require.NoError(t, err) + + assert.Equal(t, `"etagVal"`, *res.ETag) + assert.Equal(t, "bucketName", *res.Bucket) + assert.Equal(t, "keyName", *res.Key) + assert.Equal(t, "locationName", *res.Location) +} + +func TestCompleteMultipartUploadError(t *testing.T) { + _, err := newCopyTestSvc(errMsg).CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ + Bucket: aws.String("bucketname"), + Key: aws.String("key"), + UploadId: aws.String("uploadID"), + }) + + require.Error(t, err) + e := err.(awserr.Error) + + assert.Equal(t, "ErrorCode", e.Code()) + assert.Equal(t, "message body", e.Message()) +} + +func newCopyTestSvc(errMsg string) *s3.S3 { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, errMsg, http.StatusOK) + })) + return s3.New(unit.Session, aws.NewConfig(). + WithEndpoint(server.URL). + WithDisableSSL(true). + WithMaxRetries(0). + WithS3ForcePathStyle(true)) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,57 @@ +package s3 + +import ( + "encoding/xml" + "fmt" + "io" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code"` + Message string `xml:"Message"` +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { + r.Error = awserr.NewRequestFailure( + awserr.New("BucketRegionError", + fmt.Sprintf("incorrect region, the bucket is not in '%s' region", + aws.StringValue(r.Config.Region)), + nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + if r.HTTPResponse.ContentLength == 0 { + // No body, use status code to generate an awserr.Error + r.Error = awserr.NewRequestFailure( + awserr.New(strings.Replace(r.HTTPResponse.Status, " ", "", -1), r.HTTPResponse.Status, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + resp := &xmlErrorResponse{} + err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) + if err != nil && err != io.EOF { + r.Error = awserr.New("SerializationError", "failed to decode S3 XML error response", nil) + } else { + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,165 @@ +package s3_test + +import ( + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/s3" +) + +type testErrorCase struct { + RespFn func() *http.Response + ReqID string + Code, Msg string +} + +var testUnmarshalCases = []testErrorCase{ + { + RespFn: func() *http.Response { + return &http.Response{ + StatusCode: 301, + Header: http.Header{"X-Amz-Request-Id": []string{"abc123"}}, + Body: ioutil.NopCloser(nil), + ContentLength: -1, + } + }, + ReqID: "abc123", + Code: "BucketRegionError", Msg: "incorrect region, the bucket is not in 'mock-region' region", + }, + { + RespFn: func() *http.Response { + return &http.Response{ + StatusCode: 403, + Header: http.Header{"X-Amz-Request-Id": []string{"abc123"}}, + Body: ioutil.NopCloser(nil), + ContentLength: 0, + } + }, + ReqID: "abc123", + Code: "Forbidden", Msg: "Forbidden", + }, + { + RespFn: func() *http.Response { + return &http.Response{ + StatusCode: 400, + Header: http.Header{"X-Amz-Request-Id": []string{"abc123"}}, + Body: ioutil.NopCloser(nil), + ContentLength: 0, + } + }, + ReqID: "abc123", + Code: "BadRequest", Msg: "Bad Request", + }, + { + RespFn: func() *http.Response { + return &http.Response{ + StatusCode: 404, + Header: http.Header{"X-Amz-Request-Id": []string{"abc123"}}, + Body: ioutil.NopCloser(nil), + ContentLength: 0, + } + }, + ReqID: "abc123", + Code: "NotFound", Msg: "Not Found", + }, + { + RespFn: func() *http.Response { + body := `SomeExceptionException message` + return &http.Response{ + StatusCode: 500, + Header: http.Header{"X-Amz-Request-Id": []string{"abc123"}}, + Body: ioutil.NopCloser(strings.NewReader(body)), + ContentLength: int64(len(body)), + } + }, + ReqID: "abc123", + Code: "SomeException", Msg: "Exception message", + }, +} + +func TestUnmarshalError(t *testing.T) { + for _, c := range testUnmarshalCases { + s := s3.New(unit.Session) + s.Handlers.Send.Clear() + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = c.RespFn() + r.HTTPResponse.Status = http.StatusText(r.HTTPResponse.StatusCode) + }) + _, err := s.PutBucketAcl(&s3.PutBucketAclInput{ + Bucket: aws.String("bucket"), ACL: aws.String("public-read"), + }) + + assert.Error(t, err) + assert.Equal(t, c.Code, err.(awserr.Error).Code()) + assert.Equal(t, c.Msg, err.(awserr.Error).Message()) + assert.Equal(t, c.ReqID, err.(awserr.RequestFailure).RequestID()) + } +} + +const completeMultiResp = ` +163 + + +https://bucket.s3-us-west-2.amazonaws.com/keybucketkey"a7d414b9133d6483d9a1c4e04e856e3b-2" +0 +` + +func Test200NoErrorUnmarshalError(t *testing.T) { + s := s3.New(unit.Session) + s.Handlers.Send.Clear() + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &http.Response{ + StatusCode: 200, + Header: http.Header{"X-Amz-Request-Id": []string{"abc123"}}, + Body: ioutil.NopCloser(strings.NewReader(completeMultiResp)), + ContentLength: -1, + } + r.HTTPResponse.Status = http.StatusText(r.HTTPResponse.StatusCode) + }) + _, err := s.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ + Bucket: aws.String("bucket"), Key: aws.String("key"), + UploadId: aws.String("id"), + MultipartUpload: &s3.CompletedMultipartUpload{Parts: []*s3.CompletedPart{ + {ETag: aws.String("etag"), PartNumber: aws.Int64(1)}, + }}, + }) + + assert.NoError(t, err) +} + +const completeMultiErrResp = `SomeExceptionException message` + +func Test200WithErrorUnmarshalError(t *testing.T) { + s := s3.New(unit.Session) + s.Handlers.Send.Clear() + s.Handlers.Send.PushBack(func(r *request.Request) { + r.HTTPResponse = &http.Response{ + StatusCode: 200, + Header: http.Header{"X-Amz-Request-Id": []string{"abc123"}}, + Body: ioutil.NopCloser(strings.NewReader(completeMultiErrResp)), + ContentLength: -1, + } + r.HTTPResponse.Status = http.StatusText(r.HTTPResponse.StatusCode) + }) + _, err := s.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ + Bucket: aws.String("bucket"), Key: aws.String("key"), + UploadId: aws.String("id"), + MultipartUpload: &s3.CompletedMultipartUpload{Parts: []*s3.CompletedPart{ + {ETag: aws.String("etag"), PartNumber: aws.Int64(1)}, + }}, + }) + + assert.Error(t, err) + + assert.Equal(t, "SomeException", err.(awserr.Error).Code()) + assert.Equal(t, "Exception message", err.(awserr.Error).Message()) + assert.Equal(t, "abc123", err.(awserr.RequestFailure).RequestID()) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/waiters.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/waiters.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/waiters.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/s3/waiters.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,117 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package s3 + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error { + waiterCfg := waiter.Config{ + Operation: "HeadBucket", + Delay: 5, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 403, + }, + { + State: "retry", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error { + waiterCfg := waiter.Config{ + Operation: "HeadBucket", + Delay: 5, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error { + waiterCfg := waiter.Config{ + Operation: "HeadObject", + Delay: 5, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "retry", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error { + waiterCfg := waiter.Config{ + Operation: "HeadObject", + Delay: 5, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ses/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ses/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ses/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ses/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,3984 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package ses provides a client for Amazon Simple Email Service. +package ses + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opCloneReceiptRuleSet = "CloneReceiptRuleSet" + +// CloneReceiptRuleSetRequest generates a request for the CloneReceiptRuleSet operation. +func (c *SES) CloneReceiptRuleSetRequest(input *CloneReceiptRuleSetInput) (req *request.Request, output *CloneReceiptRuleSetOutput) { + op := &request.Operation{ + Name: opCloneReceiptRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CloneReceiptRuleSetInput{} + } + + req = c.newRequest(op, input, output) + output = &CloneReceiptRuleSetOutput{} + req.Data = output + return +} + +// Creates a receipt rule set by cloning an existing one. All receipt rules +// and configurations are copied to the new receipt rule set and are completely +// independent of the source rule set. +// +// For information about setting up rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rule-set.html). +// +// This action is throttled at one request per second. +func (c *SES) CloneReceiptRuleSet(input *CloneReceiptRuleSetInput) (*CloneReceiptRuleSetOutput, error) { + req, out := c.CloneReceiptRuleSetRequest(input) + err := req.Send() + return out, err +} + +const opCreateReceiptFilter = "CreateReceiptFilter" + +// CreateReceiptFilterRequest generates a request for the CreateReceiptFilter operation. +func (c *SES) CreateReceiptFilterRequest(input *CreateReceiptFilterInput) (req *request.Request, output *CreateReceiptFilterOutput) { + op := &request.Operation{ + Name: opCreateReceiptFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateReceiptFilterInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateReceiptFilterOutput{} + req.Data = output + return +} + +// Creates a new IP address filter. +// +// For information about setting up IP address filters, see the Amazon SES +// Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-ip-filters.html). +// +// This action is throttled at one request per second. +func (c *SES) CreateReceiptFilter(input *CreateReceiptFilterInput) (*CreateReceiptFilterOutput, error) { + req, out := c.CreateReceiptFilterRequest(input) + err := req.Send() + return out, err +} + +const opCreateReceiptRule = "CreateReceiptRule" + +// CreateReceiptRuleRequest generates a request for the CreateReceiptRule operation. +func (c *SES) CreateReceiptRuleRequest(input *CreateReceiptRuleInput) (req *request.Request, output *CreateReceiptRuleOutput) { + op := &request.Operation{ + Name: opCreateReceiptRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateReceiptRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateReceiptRuleOutput{} + req.Data = output + return +} + +// Creates a receipt rule. +// +// For information about setting up receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rules.html). +// +// This action is throttled at one request per second. +func (c *SES) CreateReceiptRule(input *CreateReceiptRuleInput) (*CreateReceiptRuleOutput, error) { + req, out := c.CreateReceiptRuleRequest(input) + err := req.Send() + return out, err +} + +const opCreateReceiptRuleSet = "CreateReceiptRuleSet" + +// CreateReceiptRuleSetRequest generates a request for the CreateReceiptRuleSet operation. +func (c *SES) CreateReceiptRuleSetRequest(input *CreateReceiptRuleSetInput) (req *request.Request, output *CreateReceiptRuleSetOutput) { + op := &request.Operation{ + Name: opCreateReceiptRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateReceiptRuleSetInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateReceiptRuleSetOutput{} + req.Data = output + return +} + +// Creates an empty receipt rule set. +// +// For information about setting up receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rule-set.html). +// +// This action is throttled at one request per second. +func (c *SES) CreateReceiptRuleSet(input *CreateReceiptRuleSetInput) (*CreateReceiptRuleSetOutput, error) { + req, out := c.CreateReceiptRuleSetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteIdentity = "DeleteIdentity" + +// DeleteIdentityRequest generates a request for the DeleteIdentity operation. +func (c *SES) DeleteIdentityRequest(input *DeleteIdentityInput) (req *request.Request, output *DeleteIdentityOutput) { + op := &request.Operation{ + Name: opDeleteIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteIdentityOutput{} + req.Data = output + return +} + +// Deletes the specified identity (email address or domain) from the list of +// verified identities. +// +// This action is throttled at one request per second. +func (c *SES) DeleteIdentity(input *DeleteIdentityInput) (*DeleteIdentityOutput, error) { + req, out := c.DeleteIdentityRequest(input) + err := req.Send() + return out, err +} + +const opDeleteIdentityPolicy = "DeleteIdentityPolicy" + +// DeleteIdentityPolicyRequest generates a request for the DeleteIdentityPolicy operation. +func (c *SES) DeleteIdentityPolicyRequest(input *DeleteIdentityPolicyInput) (req *request.Request, output *DeleteIdentityPolicyOutput) { + op := &request.Operation{ + Name: opDeleteIdentityPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteIdentityPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteIdentityPolicyOutput{} + req.Data = output + return +} + +// Deletes the specified sending authorization policy for the given identity +// (email address or domain). This API returns successfully even if a policy +// with the specified name does not exist. +// +// This API is for the identity owner only. If you have not verified the identity, +// this API will return an error. Sending authorization is a feature that enables +// an identity owner to authorize other senders to use its identities. For information +// about using sending authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// +// This action is throttled at one request per second. +func (c *SES) DeleteIdentityPolicy(input *DeleteIdentityPolicyInput) (*DeleteIdentityPolicyOutput, error) { + req, out := c.DeleteIdentityPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteReceiptFilter = "DeleteReceiptFilter" + +// DeleteReceiptFilterRequest generates a request for the DeleteReceiptFilter operation. +func (c *SES) DeleteReceiptFilterRequest(input *DeleteReceiptFilterInput) (req *request.Request, output *DeleteReceiptFilterOutput) { + op := &request.Operation{ + Name: opDeleteReceiptFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteReceiptFilterInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteReceiptFilterOutput{} + req.Data = output + return +} + +// Deletes the specified IP address filter. +// +// For information about managing IP address filters, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-ip-filters.html). +// +// This action is throttled at one request per second. +func (c *SES) DeleteReceiptFilter(input *DeleteReceiptFilterInput) (*DeleteReceiptFilterOutput, error) { + req, out := c.DeleteReceiptFilterRequest(input) + err := req.Send() + return out, err +} + +const opDeleteReceiptRule = "DeleteReceiptRule" + +// DeleteReceiptRuleRequest generates a request for the DeleteReceiptRule operation. +func (c *SES) DeleteReceiptRuleRequest(input *DeleteReceiptRuleInput) (req *request.Request, output *DeleteReceiptRuleOutput) { + op := &request.Operation{ + Name: opDeleteReceiptRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteReceiptRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteReceiptRuleOutput{} + req.Data = output + return +} + +// Deletes the specified receipt rule. +// +// For information about managing receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rules.html). +// +// This action is throttled at one request per second. +func (c *SES) DeleteReceiptRule(input *DeleteReceiptRuleInput) (*DeleteReceiptRuleOutput, error) { + req, out := c.DeleteReceiptRuleRequest(input) + err := req.Send() + return out, err +} + +const opDeleteReceiptRuleSet = "DeleteReceiptRuleSet" + +// DeleteReceiptRuleSetRequest generates a request for the DeleteReceiptRuleSet operation. +func (c *SES) DeleteReceiptRuleSetRequest(input *DeleteReceiptRuleSetInput) (req *request.Request, output *DeleteReceiptRuleSetOutput) { + op := &request.Operation{ + Name: opDeleteReceiptRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteReceiptRuleSetInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteReceiptRuleSetOutput{} + req.Data = output + return +} + +// Deletes the specified receipt rule set and all of the receipt rules it contains. +// +// The currently active rule set cannot be deleted. For information about managing +// receipt rule sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). +// +// This action is throttled at one request per second. +func (c *SES) DeleteReceiptRuleSet(input *DeleteReceiptRuleSetInput) (*DeleteReceiptRuleSetOutput, error) { + req, out := c.DeleteReceiptRuleSetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVerifiedEmailAddress = "DeleteVerifiedEmailAddress" + +// DeleteVerifiedEmailAddressRequest generates a request for the DeleteVerifiedEmailAddress operation. +func (c *SES) DeleteVerifiedEmailAddressRequest(input *DeleteVerifiedEmailAddressInput) (req *request.Request, output *DeleteVerifiedEmailAddressOutput) { + op := &request.Operation{ + Name: opDeleteVerifiedEmailAddress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVerifiedEmailAddressInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteVerifiedEmailAddressOutput{} + req.Data = output + return +} + +// Deletes the specified email address from the list of verified addresses. +// +// The DeleteVerifiedEmailAddress action is deprecated as of the May 15, 2012 +// release of Domain Verification. The DeleteIdentity action is now preferred. +// This action is throttled at one request per second. +func (c *SES) DeleteVerifiedEmailAddress(input *DeleteVerifiedEmailAddressInput) (*DeleteVerifiedEmailAddressOutput, error) { + req, out := c.DeleteVerifiedEmailAddressRequest(input) + err := req.Send() + return out, err +} + +const opDescribeActiveReceiptRuleSet = "DescribeActiveReceiptRuleSet" + +// DescribeActiveReceiptRuleSetRequest generates a request for the DescribeActiveReceiptRuleSet operation. +func (c *SES) DescribeActiveReceiptRuleSetRequest(input *DescribeActiveReceiptRuleSetInput) (req *request.Request, output *DescribeActiveReceiptRuleSetOutput) { + op := &request.Operation{ + Name: opDescribeActiveReceiptRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeActiveReceiptRuleSetInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeActiveReceiptRuleSetOutput{} + req.Data = output + return +} + +// Returns the metadata and receipt rules for the receipt rule set that is currently +// active. +// +// For information about setting up receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rule-set.html). +// +// This action is throttled at one request per second. +func (c *SES) DescribeActiveReceiptRuleSet(input *DescribeActiveReceiptRuleSetInput) (*DescribeActiveReceiptRuleSetOutput, error) { + req, out := c.DescribeActiveReceiptRuleSetRequest(input) + err := req.Send() + return out, err +} + +const opDescribeReceiptRule = "DescribeReceiptRule" + +// DescribeReceiptRuleRequest generates a request for the DescribeReceiptRule operation. +func (c *SES) DescribeReceiptRuleRequest(input *DescribeReceiptRuleInput) (req *request.Request, output *DescribeReceiptRuleOutput) { + op := &request.Operation{ + Name: opDescribeReceiptRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeReceiptRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReceiptRuleOutput{} + req.Data = output + return +} + +// Returns the details of the specified receipt rule. +// +// For information about setting up receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rules.html). +// +// This action is throttled at one request per second. +func (c *SES) DescribeReceiptRule(input *DescribeReceiptRuleInput) (*DescribeReceiptRuleOutput, error) { + req, out := c.DescribeReceiptRuleRequest(input) + err := req.Send() + return out, err +} + +const opDescribeReceiptRuleSet = "DescribeReceiptRuleSet" + +// DescribeReceiptRuleSetRequest generates a request for the DescribeReceiptRuleSet operation. +func (c *SES) DescribeReceiptRuleSetRequest(input *DescribeReceiptRuleSetInput) (req *request.Request, output *DescribeReceiptRuleSetOutput) { + op := &request.Operation{ + Name: opDescribeReceiptRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeReceiptRuleSetInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReceiptRuleSetOutput{} + req.Data = output + return +} + +// Returns the details of the specified receipt rule set. +// +// For information about managing receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). +// +// This action is throttled at one request per second. +func (c *SES) DescribeReceiptRuleSet(input *DescribeReceiptRuleSetInput) (*DescribeReceiptRuleSetOutput, error) { + req, out := c.DescribeReceiptRuleSetRequest(input) + err := req.Send() + return out, err +} + +const opGetIdentityDkimAttributes = "GetIdentityDkimAttributes" + +// GetIdentityDkimAttributesRequest generates a request for the GetIdentityDkimAttributes operation. +func (c *SES) GetIdentityDkimAttributesRequest(input *GetIdentityDkimAttributesInput) (req *request.Request, output *GetIdentityDkimAttributesOutput) { + op := &request.Operation{ + Name: opGetIdentityDkimAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetIdentityDkimAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetIdentityDkimAttributesOutput{} + req.Data = output + return +} + +// Returns the current status of Easy DKIM signing for an entity. For domain +// name identities, this action also returns the DKIM tokens that are required +// for Easy DKIM signing, and whether Amazon SES has successfully verified that +// these tokens have been published. +// +// This action takes a list of identities as input and returns the following +// information for each: +// +// Whether Easy DKIM signing is enabled or disabled. A set of DKIM tokens +// that represent the identity. If the identity is an email address, the tokens +// represent the domain of that address. Whether Amazon SES has successfully +// verified the DKIM tokens published in the domain's DNS. This information +// is only returned for domain name identities, not for email addresses. This +// action is throttled at one request per second and can only get DKIM attributes +// for up to 100 identities at a time. +// +// For more information about creating DNS records using DKIM tokens, go to +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim-dns-records.html). +func (c *SES) GetIdentityDkimAttributes(input *GetIdentityDkimAttributesInput) (*GetIdentityDkimAttributesOutput, error) { + req, out := c.GetIdentityDkimAttributesRequest(input) + err := req.Send() + return out, err +} + +const opGetIdentityNotificationAttributes = "GetIdentityNotificationAttributes" + +// GetIdentityNotificationAttributesRequest generates a request for the GetIdentityNotificationAttributes operation. +func (c *SES) GetIdentityNotificationAttributesRequest(input *GetIdentityNotificationAttributesInput) (req *request.Request, output *GetIdentityNotificationAttributesOutput) { + op := &request.Operation{ + Name: opGetIdentityNotificationAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetIdentityNotificationAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetIdentityNotificationAttributesOutput{} + req.Data = output + return +} + +// Given a list of verified identities (email addresses and/or domains), returns +// a structure describing identity notification attributes. +// +// This action is throttled at one request per second and can only get notification +// attributes for up to 100 identities at a time. +// +// For more information about using notifications with Amazon SES, see the +// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). +func (c *SES) GetIdentityNotificationAttributes(input *GetIdentityNotificationAttributesInput) (*GetIdentityNotificationAttributesOutput, error) { + req, out := c.GetIdentityNotificationAttributesRequest(input) + err := req.Send() + return out, err +} + +const opGetIdentityPolicies = "GetIdentityPolicies" + +// GetIdentityPoliciesRequest generates a request for the GetIdentityPolicies operation. +func (c *SES) GetIdentityPoliciesRequest(input *GetIdentityPoliciesInput) (req *request.Request, output *GetIdentityPoliciesOutput) { + op := &request.Operation{ + Name: opGetIdentityPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetIdentityPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetIdentityPoliciesOutput{} + req.Data = output + return +} + +// Returns the requested sending authorization policies for the given identity +// (email address or domain). The policies are returned as a map of policy names +// to policy contents. You can retrieve a maximum of 20 policies at a time. +// +// This API is for the identity owner only. If you have not verified the identity, +// this API will return an error. Sending authorization is a feature that enables +// an identity owner to authorize other senders to use its identities. For information +// about using sending authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// +// This action is throttled at one request per second. +func (c *SES) GetIdentityPolicies(input *GetIdentityPoliciesInput) (*GetIdentityPoliciesOutput, error) { + req, out := c.GetIdentityPoliciesRequest(input) + err := req.Send() + return out, err +} + +const opGetIdentityVerificationAttributes = "GetIdentityVerificationAttributes" + +// GetIdentityVerificationAttributesRequest generates a request for the GetIdentityVerificationAttributes operation. +func (c *SES) GetIdentityVerificationAttributesRequest(input *GetIdentityVerificationAttributesInput) (req *request.Request, output *GetIdentityVerificationAttributesOutput) { + op := &request.Operation{ + Name: opGetIdentityVerificationAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetIdentityVerificationAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetIdentityVerificationAttributesOutput{} + req.Data = output + return +} + +// Given a list of identities (email addresses and/or domains), returns the +// verification status and (for domain identities) the verification token for +// each identity. +// +// This action is throttled at one request per second and can only get verification +// attributes for up to 100 identities at a time. +func (c *SES) GetIdentityVerificationAttributes(input *GetIdentityVerificationAttributesInput) (*GetIdentityVerificationAttributesOutput, error) { + req, out := c.GetIdentityVerificationAttributesRequest(input) + err := req.Send() + return out, err +} + +const opGetSendQuota = "GetSendQuota" + +// GetSendQuotaRequest generates a request for the GetSendQuota operation. +func (c *SES) GetSendQuotaRequest(input *GetSendQuotaInput) (req *request.Request, output *GetSendQuotaOutput) { + op := &request.Operation{ + Name: opGetSendQuota, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSendQuotaInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSendQuotaOutput{} + req.Data = output + return +} + +// Returns the user's current sending limits. +// +// This action is throttled at one request per second. +func (c *SES) GetSendQuota(input *GetSendQuotaInput) (*GetSendQuotaOutput, error) { + req, out := c.GetSendQuotaRequest(input) + err := req.Send() + return out, err +} + +const opGetSendStatistics = "GetSendStatistics" + +// GetSendStatisticsRequest generates a request for the GetSendStatistics operation. +func (c *SES) GetSendStatisticsRequest(input *GetSendStatisticsInput) (req *request.Request, output *GetSendStatisticsOutput) { + op := &request.Operation{ + Name: opGetSendStatistics, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSendStatisticsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSendStatisticsOutput{} + req.Data = output + return +} + +// Returns the user's sending statistics. The result is a list of data points, +// representing the last two weeks of sending activity. +// +// Each data point in the list contains statistics for a 15-minute interval. +// +// This action is throttled at one request per second. +func (c *SES) GetSendStatistics(input *GetSendStatisticsInput) (*GetSendStatisticsOutput, error) { + req, out := c.GetSendStatisticsRequest(input) + err := req.Send() + return out, err +} + +const opListIdentities = "ListIdentities" + +// ListIdentitiesRequest generates a request for the ListIdentities operation. +func (c *SES) ListIdentitiesRequest(input *ListIdentitiesInput) (req *request.Request, output *ListIdentitiesOutput) { + op := &request.Operation{ + Name: opListIdentities, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxItems", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListIdentitiesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListIdentitiesOutput{} + req.Data = output + return +} + +// Returns a list containing all of the identities (email addresses and domains) +// for a specific AWS Account, regardless of verification status. +// +// This action is throttled at one request per second. +func (c *SES) ListIdentities(input *ListIdentitiesInput) (*ListIdentitiesOutput, error) { + req, out := c.ListIdentitiesRequest(input) + err := req.Send() + return out, err +} + +func (c *SES) ListIdentitiesPages(input *ListIdentitiesInput, fn func(p *ListIdentitiesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListIdentitiesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListIdentitiesOutput), lastPage) + }) +} + +const opListIdentityPolicies = "ListIdentityPolicies" + +// ListIdentityPoliciesRequest generates a request for the ListIdentityPolicies operation. +func (c *SES) ListIdentityPoliciesRequest(input *ListIdentityPoliciesInput) (req *request.Request, output *ListIdentityPoliciesOutput) { + op := &request.Operation{ + Name: opListIdentityPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListIdentityPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListIdentityPoliciesOutput{} + req.Data = output + return +} + +// Returns a list of sending authorization policies that are attached to the +// given identity (email address or domain). This API returns only a list. If +// you want the actual policy content, you can use GetIdentityPolicies. +// +// This API is for the identity owner only. If you have not verified the identity, +// this API will return an error. Sending authorization is a feature that enables +// an identity owner to authorize other senders to use its identities. For information +// about using sending authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// +// This action is throttled at one request per second. +func (c *SES) ListIdentityPolicies(input *ListIdentityPoliciesInput) (*ListIdentityPoliciesOutput, error) { + req, out := c.ListIdentityPoliciesRequest(input) + err := req.Send() + return out, err +} + +const opListReceiptFilters = "ListReceiptFilters" + +// ListReceiptFiltersRequest generates a request for the ListReceiptFilters operation. +func (c *SES) ListReceiptFiltersRequest(input *ListReceiptFiltersInput) (req *request.Request, output *ListReceiptFiltersOutput) { + op := &request.Operation{ + Name: opListReceiptFilters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListReceiptFiltersInput{} + } + + req = c.newRequest(op, input, output) + output = &ListReceiptFiltersOutput{} + req.Data = output + return +} + +// Lists the IP address filters associated with your account. +// +// For information about managing IP address filters, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-ip-filters.html). +// +// This action is throttled at one request per second. +func (c *SES) ListReceiptFilters(input *ListReceiptFiltersInput) (*ListReceiptFiltersOutput, error) { + req, out := c.ListReceiptFiltersRequest(input) + err := req.Send() + return out, err +} + +const opListReceiptRuleSets = "ListReceiptRuleSets" + +// ListReceiptRuleSetsRequest generates a request for the ListReceiptRuleSets operation. +func (c *SES) ListReceiptRuleSetsRequest(input *ListReceiptRuleSetsInput) (req *request.Request, output *ListReceiptRuleSetsOutput) { + op := &request.Operation{ + Name: opListReceiptRuleSets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListReceiptRuleSetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListReceiptRuleSetsOutput{} + req.Data = output + return +} + +// Lists the receipt rule sets that exist under your AWS account. If there are +// additional receipt rule sets to be retrieved, you will receive a NextToken +// that you can provide to the next call to ListReceiptRuleSets to retrieve +// the additional entries. +// +// For information about managing receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). +// +// This action is throttled at one request per second. +func (c *SES) ListReceiptRuleSets(input *ListReceiptRuleSetsInput) (*ListReceiptRuleSetsOutput, error) { + req, out := c.ListReceiptRuleSetsRequest(input) + err := req.Send() + return out, err +} + +const opListVerifiedEmailAddresses = "ListVerifiedEmailAddresses" + +// ListVerifiedEmailAddressesRequest generates a request for the ListVerifiedEmailAddresses operation. +func (c *SES) ListVerifiedEmailAddressesRequest(input *ListVerifiedEmailAddressesInput) (req *request.Request, output *ListVerifiedEmailAddressesOutput) { + op := &request.Operation{ + Name: opListVerifiedEmailAddresses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListVerifiedEmailAddressesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListVerifiedEmailAddressesOutput{} + req.Data = output + return +} + +// Returns a list containing all of the email addresses that have been verified. +// +// The ListVerifiedEmailAddresses action is deprecated as of the May 15, 2012 +// release of Domain Verification. The ListIdentities action is now preferred. +// This action is throttled at one request per second. +func (c *SES) ListVerifiedEmailAddresses(input *ListVerifiedEmailAddressesInput) (*ListVerifiedEmailAddressesOutput, error) { + req, out := c.ListVerifiedEmailAddressesRequest(input) + err := req.Send() + return out, err +} + +const opPutIdentityPolicy = "PutIdentityPolicy" + +// PutIdentityPolicyRequest generates a request for the PutIdentityPolicy operation. +func (c *SES) PutIdentityPolicyRequest(input *PutIdentityPolicyInput) (req *request.Request, output *PutIdentityPolicyOutput) { + op := &request.Operation{ + Name: opPutIdentityPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutIdentityPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &PutIdentityPolicyOutput{} + req.Data = output + return +} + +// Adds or updates a sending authorization policy for the specified identity +// (email address or domain). +// +// This API is for the identity owner only. If you have not verified the identity, +// this API will return an error. Sending authorization is a feature that enables +// an identity owner to authorize other senders to use its identities. For information +// about using sending authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// +// This action is throttled at one request per second. +func (c *SES) PutIdentityPolicy(input *PutIdentityPolicyInput) (*PutIdentityPolicyOutput, error) { + req, out := c.PutIdentityPolicyRequest(input) + err := req.Send() + return out, err +} + +const opReorderReceiptRuleSet = "ReorderReceiptRuleSet" + +// ReorderReceiptRuleSetRequest generates a request for the ReorderReceiptRuleSet operation. +func (c *SES) ReorderReceiptRuleSetRequest(input *ReorderReceiptRuleSetInput) (req *request.Request, output *ReorderReceiptRuleSetOutput) { + op := &request.Operation{ + Name: opReorderReceiptRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReorderReceiptRuleSetInput{} + } + + req = c.newRequest(op, input, output) + output = &ReorderReceiptRuleSetOutput{} + req.Data = output + return +} + +// Reorders the receipt rules within a receipt rule set. +// +// All of the rules in the rule set must be represented in this request. That +// is, this API will return an error if the reorder request doesn’t explicitly +// position all of the rules. For information about managing receipt rule sets, +// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). +// +// This action is throttled at one request per second. +func (c *SES) ReorderReceiptRuleSet(input *ReorderReceiptRuleSetInput) (*ReorderReceiptRuleSetOutput, error) { + req, out := c.ReorderReceiptRuleSetRequest(input) + err := req.Send() + return out, err +} + +const opSendBounce = "SendBounce" + +// SendBounceRequest generates a request for the SendBounce operation. +func (c *SES) SendBounceRequest(input *SendBounceInput) (req *request.Request, output *SendBounceOutput) { + op := &request.Operation{ + Name: opSendBounce, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendBounceInput{} + } + + req = c.newRequest(op, input, output) + output = &SendBounceOutput{} + req.Data = output + return +} + +// Generates and sends a bounce message to the sender of an email you received +// through Amazon SES. You can only use this API on an email up to 24 hours +// after you receive it. +// +// You cannot use this API to send generic bounces for mail that was not received +// by Amazon SES. For information about receiving email through Amazon SES, +// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). +// +// This action is throttled at one request per second. +func (c *SES) SendBounce(input *SendBounceInput) (*SendBounceOutput, error) { + req, out := c.SendBounceRequest(input) + err := req.Send() + return out, err +} + +const opSendEmail = "SendEmail" + +// SendEmailRequest generates a request for the SendEmail operation. +func (c *SES) SendEmailRequest(input *SendEmailInput) (req *request.Request, output *SendEmailOutput) { + op := &request.Operation{ + Name: opSendEmail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendEmailInput{} + } + + req = c.newRequest(op, input, output) + output = &SendEmailOutput{} + req.Data = output + return +} + +// Composes an email message based on input data, and then immediately queues +// the message for sending. +// +// There are several important points to know about SendEmail: +// +// You can only send email from verified email addresses and domains; otherwise, +// you will get an "Email address not verified" error. If your account is still +// in the Amazon SES sandbox, you must also verify every recipient email address +// except for the recipients provided by the Amazon SES mailbox simulator. For +// more information, go to the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). +// The total size of the message cannot exceed 10 MB. This includes any attachments +// that are part of the message. Amazon SES has a limit on the total number +// of recipients per message. The combined number of To:, CC: and BCC: email +// addresses cannot exceed 50. If you need to send an email message to a larger +// audience, you can divide your recipient list into groups of 50 or fewer, +// and then call Amazon SES repeatedly to send the message to each group. For +// every message that you send, the total number of recipients (To:, CC: and +// BCC:) is counted against your sending quota - the maximum number of emails +// you can send in a 24-hour period. For information about your sending quota, +// go to the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/manage-sending-limits.html). +func (c *SES) SendEmail(input *SendEmailInput) (*SendEmailOutput, error) { + req, out := c.SendEmailRequest(input) + err := req.Send() + return out, err +} + +const opSendRawEmail = "SendRawEmail" + +// SendRawEmailRequest generates a request for the SendRawEmail operation. +func (c *SES) SendRawEmailRequest(input *SendRawEmailInput) (req *request.Request, output *SendRawEmailOutput) { + op := &request.Operation{ + Name: opSendRawEmail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendRawEmailInput{} + } + + req = c.newRequest(op, input, output) + output = &SendRawEmailOutput{} + req.Data = output + return +} + +// Sends an email message, with header and content specified by the client. +// The SendRawEmail action is useful for sending multipart MIME emails. The +// raw text of the message must comply with Internet email standards; otherwise, +// the message cannot be sent. +// +// There are several important points to know about SendRawEmail: +// +// You can only send email from verified email addresses and domains; otherwise, +// you will get an "Email address not verified" error. If your account is still +// in the Amazon SES sandbox, you must also verify every recipient email address +// except for the recipients provided by the Amazon SES mailbox simulator. For +// more information, go to the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). +// The total size of the message cannot exceed 10 MB. This includes any attachments +// that are part of the message. Amazon SES has a limit on the total number +// of recipients per message. The combined number of To:, CC: and BCC: email +// addresses cannot exceed 50. If you need to send an email message to a larger +// audience, you can divide your recipient list into groups of 50 or fewer, +// and then call Amazon SES repeatedly to send the message to each group. The +// To:, CC:, and BCC: headers in the raw message can contain a group list. Note +// that each recipient in a group list counts towards the 50-recipient limit. +// For every message that you send, the total number of recipients (To:, CC: +// and BCC:) is counted against your sending quota - the maximum number of emails +// you can send in a 24-hour period. For information about your sending quota, +// go to the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/manage-sending-limits.html). +// If you are using sending authorization to send on behalf of another user, +// SendRawEmail enables you to specify the cross-account identity for the email's +// "Source," "From," and "Return-Path" parameters in one of two ways: you can +// pass optional parameters SourceArn, FromArn, and/or ReturnPathArn to the +// API, or you can include the following X-headers in the header of your raw +// email: X-SES-SOURCE-ARN X-SES-FROM-ARN X-SES-RETURN-PATH-ARN Do not include +// these X-headers in the DKIM signature, because they are removed by Amazon +// SES before sending the email. For the most common sending authorization use +// case, we recommend that you specify the SourceIdentityArn and do not specify +// either the FromIdentityArn or ReturnPathIdentityArn. (The same note applies +// to the corresponding X-headers.) If you only specify the SourceIdentityArn, +// Amazon SES will simply set the "From" address and the "Return Path" address +// to the identity specified in SourceIdentityArn. For more information about +// sending authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +func (c *SES) SendRawEmail(input *SendRawEmailInput) (*SendRawEmailOutput, error) { + req, out := c.SendRawEmailRequest(input) + err := req.Send() + return out, err +} + +const opSetActiveReceiptRuleSet = "SetActiveReceiptRuleSet" + +// SetActiveReceiptRuleSetRequest generates a request for the SetActiveReceiptRuleSet operation. +func (c *SES) SetActiveReceiptRuleSetRequest(input *SetActiveReceiptRuleSetInput) (req *request.Request, output *SetActiveReceiptRuleSetOutput) { + op := &request.Operation{ + Name: opSetActiveReceiptRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetActiveReceiptRuleSetInput{} + } + + req = c.newRequest(op, input, output) + output = &SetActiveReceiptRuleSetOutput{} + req.Data = output + return +} + +// Sets the specified receipt rule set as the active receipt rule set. +// +// To disable your email-receiving through Amazon SES completely, you can call +// this API with RuleSetName set to null. For information about managing receipt +// rule sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). +// +// This action is throttled at one request per second. +func (c *SES) SetActiveReceiptRuleSet(input *SetActiveReceiptRuleSetInput) (*SetActiveReceiptRuleSetOutput, error) { + req, out := c.SetActiveReceiptRuleSetRequest(input) + err := req.Send() + return out, err +} + +const opSetIdentityDkimEnabled = "SetIdentityDkimEnabled" + +// SetIdentityDkimEnabledRequest generates a request for the SetIdentityDkimEnabled operation. +func (c *SES) SetIdentityDkimEnabledRequest(input *SetIdentityDkimEnabledInput) (req *request.Request, output *SetIdentityDkimEnabledOutput) { + op := &request.Operation{ + Name: opSetIdentityDkimEnabled, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetIdentityDkimEnabledInput{} + } + + req = c.newRequest(op, input, output) + output = &SetIdentityDkimEnabledOutput{} + req.Data = output + return +} + +// Enables or disables Easy DKIM signing of email sent from an identity: +// +// If Easy DKIM signing is enabled for a domain name identity (e.g., example.com), +// then Amazon SES will DKIM-sign all email sent by addresses under that domain +// name (e.g., user@example.com). If Easy DKIM signing is enabled for an email +// address, then Amazon SES will DKIM-sign all email sent by that email address. +// For email addresses (e.g., user@example.com), you can only enable Easy DKIM +// signing if the corresponding domain (e.g., example.com) has been set up for +// Easy DKIM using the AWS Console or the VerifyDomainDkim action. +// +// This action is throttled at one request per second. +// +// For more information about Easy DKIM signing, go to the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html). +func (c *SES) SetIdentityDkimEnabled(input *SetIdentityDkimEnabledInput) (*SetIdentityDkimEnabledOutput, error) { + req, out := c.SetIdentityDkimEnabledRequest(input) + err := req.Send() + return out, err +} + +const opSetIdentityFeedbackForwardingEnabled = "SetIdentityFeedbackForwardingEnabled" + +// SetIdentityFeedbackForwardingEnabledRequest generates a request for the SetIdentityFeedbackForwardingEnabled operation. +func (c *SES) SetIdentityFeedbackForwardingEnabledRequest(input *SetIdentityFeedbackForwardingEnabledInput) (req *request.Request, output *SetIdentityFeedbackForwardingEnabledOutput) { + op := &request.Operation{ + Name: opSetIdentityFeedbackForwardingEnabled, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetIdentityFeedbackForwardingEnabledInput{} + } + + req = c.newRequest(op, input, output) + output = &SetIdentityFeedbackForwardingEnabledOutput{} + req.Data = output + return +} + +// Given an identity (email address or domain), enables or disables whether +// Amazon SES forwards bounce and complaint notifications as email. Feedback +// forwarding can only be disabled when Amazon Simple Notification Service (Amazon +// SNS) topics are specified for both bounces and complaints. +// +// Feedback forwarding does not apply to delivery notifications. Delivery notifications +// are only available through Amazon SNS. This action is throttled at one request +// per second. +// +// For more information about using notifications with Amazon SES, see the +// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). +func (c *SES) SetIdentityFeedbackForwardingEnabled(input *SetIdentityFeedbackForwardingEnabledInput) (*SetIdentityFeedbackForwardingEnabledOutput, error) { + req, out := c.SetIdentityFeedbackForwardingEnabledRequest(input) + err := req.Send() + return out, err +} + +const opSetIdentityNotificationTopic = "SetIdentityNotificationTopic" + +// SetIdentityNotificationTopicRequest generates a request for the SetIdentityNotificationTopic operation. +func (c *SES) SetIdentityNotificationTopicRequest(input *SetIdentityNotificationTopicInput) (req *request.Request, output *SetIdentityNotificationTopicOutput) { + op := &request.Operation{ + Name: opSetIdentityNotificationTopic, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetIdentityNotificationTopicInput{} + } + + req = c.newRequest(op, input, output) + output = &SetIdentityNotificationTopicOutput{} + req.Data = output + return +} + +// Given an identity (email address or domain), sets the Amazon Simple Notification +// Service (Amazon SNS) topic to which Amazon SES will publish bounce, complaint, +// and/or delivery notifications for emails sent with that identity as the Source. +// +// Unless feedback forwarding is enabled, you must specify Amazon SNS topics +// for bounce and complaint notifications. For more information, see SetIdentityFeedbackForwardingEnabled. +// This action is throttled at one request per second. +// +// For more information about feedback notification, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). +func (c *SES) SetIdentityNotificationTopic(input *SetIdentityNotificationTopicInput) (*SetIdentityNotificationTopicOutput, error) { + req, out := c.SetIdentityNotificationTopicRequest(input) + err := req.Send() + return out, err +} + +const opSetReceiptRulePosition = "SetReceiptRulePosition" + +// SetReceiptRulePositionRequest generates a request for the SetReceiptRulePosition operation. +func (c *SES) SetReceiptRulePositionRequest(input *SetReceiptRulePositionInput) (req *request.Request, output *SetReceiptRulePositionOutput) { + op := &request.Operation{ + Name: opSetReceiptRulePosition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetReceiptRulePositionInput{} + } + + req = c.newRequest(op, input, output) + output = &SetReceiptRulePositionOutput{} + req.Data = output + return +} + +// Sets the position of the specified receipt rule in the receipt rule set. +// +// For information about managing receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rules.html). +// +// This action is throttled at one request per second. +func (c *SES) SetReceiptRulePosition(input *SetReceiptRulePositionInput) (*SetReceiptRulePositionOutput, error) { + req, out := c.SetReceiptRulePositionRequest(input) + err := req.Send() + return out, err +} + +const opUpdateReceiptRule = "UpdateReceiptRule" + +// UpdateReceiptRuleRequest generates a request for the UpdateReceiptRule operation. +func (c *SES) UpdateReceiptRuleRequest(input *UpdateReceiptRuleInput) (req *request.Request, output *UpdateReceiptRuleOutput) { + op := &request.Operation{ + Name: opUpdateReceiptRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateReceiptRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateReceiptRuleOutput{} + req.Data = output + return +} + +// Updates a receipt rule. +// +// For information about managing receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rules.html). +// +// This action is throttled at one request per second. +func (c *SES) UpdateReceiptRule(input *UpdateReceiptRuleInput) (*UpdateReceiptRuleOutput, error) { + req, out := c.UpdateReceiptRuleRequest(input) + err := req.Send() + return out, err +} + +const opVerifyDomainDkim = "VerifyDomainDkim" + +// VerifyDomainDkimRequest generates a request for the VerifyDomainDkim operation. +func (c *SES) VerifyDomainDkimRequest(input *VerifyDomainDkimInput) (req *request.Request, output *VerifyDomainDkimOutput) { + op := &request.Operation{ + Name: opVerifyDomainDkim, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &VerifyDomainDkimInput{} + } + + req = c.newRequest(op, input, output) + output = &VerifyDomainDkimOutput{} + req.Data = output + return +} + +// Returns a set of DKIM tokens for a domain. DKIM tokens are character strings +// that represent your domain's identity. Using these tokens, you will need +// to create DNS CNAME records that point to DKIM public keys hosted by Amazon +// SES. Amazon Web Services will eventually detect that you have updated your +// DNS records; this detection process may take up to 72 hours. Upon successful +// detection, Amazon SES will be able to DKIM-sign email originating from that +// domain. +// +// This action is throttled at one request per second. +// +// To enable or disable Easy DKIM signing for a domain, use the SetIdentityDkimEnabled +// action. +// +// For more information about creating DNS records using DKIM tokens, go to +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim-dns-records.html). +func (c *SES) VerifyDomainDkim(input *VerifyDomainDkimInput) (*VerifyDomainDkimOutput, error) { + req, out := c.VerifyDomainDkimRequest(input) + err := req.Send() + return out, err +} + +const opVerifyDomainIdentity = "VerifyDomainIdentity" + +// VerifyDomainIdentityRequest generates a request for the VerifyDomainIdentity operation. +func (c *SES) VerifyDomainIdentityRequest(input *VerifyDomainIdentityInput) (req *request.Request, output *VerifyDomainIdentityOutput) { + op := &request.Operation{ + Name: opVerifyDomainIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &VerifyDomainIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &VerifyDomainIdentityOutput{} + req.Data = output + return +} + +// Verifies a domain. +// +// This action is throttled at one request per second. +func (c *SES) VerifyDomainIdentity(input *VerifyDomainIdentityInput) (*VerifyDomainIdentityOutput, error) { + req, out := c.VerifyDomainIdentityRequest(input) + err := req.Send() + return out, err +} + +const opVerifyEmailAddress = "VerifyEmailAddress" + +// VerifyEmailAddressRequest generates a request for the VerifyEmailAddress operation. +func (c *SES) VerifyEmailAddressRequest(input *VerifyEmailAddressInput) (req *request.Request, output *VerifyEmailAddressOutput) { + op := &request.Operation{ + Name: opVerifyEmailAddress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &VerifyEmailAddressInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &VerifyEmailAddressOutput{} + req.Data = output + return +} + +// Verifies an email address. This action causes a confirmation email message +// to be sent to the specified address. +// +// The VerifyEmailAddress action is deprecated as of the May 15, 2012 release +// of Domain Verification. The VerifyEmailIdentity action is now preferred. +// This action is throttled at one request per second. +func (c *SES) VerifyEmailAddress(input *VerifyEmailAddressInput) (*VerifyEmailAddressOutput, error) { + req, out := c.VerifyEmailAddressRequest(input) + err := req.Send() + return out, err +} + +const opVerifyEmailIdentity = "VerifyEmailIdentity" + +// VerifyEmailIdentityRequest generates a request for the VerifyEmailIdentity operation. +func (c *SES) VerifyEmailIdentityRequest(input *VerifyEmailIdentityInput) (req *request.Request, output *VerifyEmailIdentityOutput) { + op := &request.Operation{ + Name: opVerifyEmailIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &VerifyEmailIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &VerifyEmailIdentityOutput{} + req.Data = output + return +} + +// Verifies an email address. This action causes a confirmation email message +// to be sent to the specified address. +// +// This action is throttled at one request per second. +func (c *SES) VerifyEmailIdentity(input *VerifyEmailIdentityInput) (*VerifyEmailIdentityOutput, error) { + req, out := c.VerifyEmailIdentityRequest(input) + err := req.Send() + return out, err +} + +// When included in a receipt rule, this action adds a header to the received +// email. +// +// For information about adding a header using a receipt rule, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-add-header.html). +type AddHeaderAction struct { + _ struct{} `type:"structure"` + + // The name of the header to add. Must be between 1 and 50 characters, inclusive, + // and consist of alphanumeric (a-z, A-Z, 0-9) characters and dashes only. + HeaderName *string `type:"string" required:"true"` + + // Must be less than 2048 characters, and must not contain newline characters + // ("\r" or "\n"). + HeaderValue *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AddHeaderAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddHeaderAction) GoString() string { + return s.String() +} + +// Represents the body of the message. You can specify text, HTML, or both. +// If you use both, then the message should display correctly in the widest +// variety of email clients. +type Body struct { + _ struct{} `type:"structure"` + + // The content of the message, in HTML format. Use this for email clients that + // can process HTML. You can include clickable links, formatted text, and much + // more in an HTML message. + Html *Content `type:"structure"` + + // The content of the message, in text format. Use this for text-based email + // clients, or clients on high-latency networks (such as mobile devices). + Text *Content `type:"structure"` +} + +// String returns the string representation +func (s Body) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Body) GoString() string { + return s.String() +} + +// When included in a receipt rule, this action rejects the received email by +// returning a bounce response to the sender and, optionally, publishes a notification +// to Amazon Simple Notification Service (Amazon SNS). +// +// For information about sending a bounce message in response to a received +// email, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-bounce.html). +type BounceAction struct { + _ struct{} `type:"structure"` + + // Human-readable text to include in the bounce message. + Message *string `type:"string" required:"true"` + + // The email address of the sender of the bounced email. This is the address + // from which the bounce message will be sent. + Sender *string `type:"string" required:"true"` + + // The SMTP reply code, as defined by RFC 5321 (https://tools.ietf.org/html/rfc5321). + SmtpReplyCode *string `type:"string" required:"true"` + + // The SMTP enhanced status code, as defined by RFC 3463 (https://tools.ietf.org/html/rfc3463). + StatusCode *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the + // bounce action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. + // For more information about Amazon SNS topics, see the Amazon SNS Developer + // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s BounceAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BounceAction) GoString() string { + return s.String() +} + +// Recipient-related information to include in the Delivery Status Notification +// (DSN) when an email that Amazon SES receives on your behalf bounces. +// +// For information about receiving email through Amazon SES, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). +type BouncedRecipientInfo struct { + _ struct{} `type:"structure"` + + // The reason for the bounce. You must provide either this parameter or RecipientDsnFields. + BounceType *string `type:"string" enum:"BounceType"` + + // The email address of the recipient of the bounced email. + Recipient *string `type:"string" required:"true"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to receive email for the recipient of the bounced email. For more information + // about sending authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + RecipientArn *string `type:"string"` + + // Recipient-related DSN fields, most of which would normally be filled in automatically + // when provided with a BounceType. You must provide either this parameter or + // BounceType. + RecipientDsnFields *RecipientDsnFields `type:"structure"` +} + +// String returns the string representation +func (s BouncedRecipientInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BouncedRecipientInfo) GoString() string { + return s.String() +} + +type CloneReceiptRuleSetInput struct { + _ struct{} `type:"structure"` + + // The name of the rule set to clone. + OriginalRuleSetName *string `type:"string" required:"true"` + + // The name of the rule set to create. The name must: + // + // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-). Start and end with a letter or number. Contain less than + // 64 characters. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CloneReceiptRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloneReceiptRuleSetInput) GoString() string { + return s.String() +} + +type CloneReceiptRuleSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CloneReceiptRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloneReceiptRuleSetOutput) GoString() string { + return s.String() +} + +// Represents textual data, plus an optional character set specification. +// +// By default, the text must be 7-bit ASCII, due to the constraints of the +// SMTP protocol. If the text must contain any other characters, then you must +// also specify a character set. Examples include UTF-8, ISO-8859-1, and Shift_JIS. +type Content struct { + _ struct{} `type:"structure"` + + // The character set of the content. + Charset *string `type:"string"` + + // The textual data of the content. + Data *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Content) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Content) GoString() string { + return s.String() +} + +type CreateReceiptFilterInput struct { + _ struct{} `type:"structure"` + + // A data structure that describes the IP address filter to create, which consists + // of a name, an IP address range, and whether to allow or block mail from it. + Filter *ReceiptFilter `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateReceiptFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReceiptFilterInput) GoString() string { + return s.String() +} + +type CreateReceiptFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateReceiptFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReceiptFilterOutput) GoString() string { + return s.String() +} + +type CreateReceiptRuleInput struct { + _ struct{} `type:"structure"` + + // The name of an existing rule after which the new rule will be placed. If + // this parameter is null, the new rule will be inserted at the beginning of + // the rule list. + After *string `type:"string"` + + // A data structure that contains the specified rule's name, actions, recipients, + // domains, enabled status, scan status, and TLS policy. + Rule *ReceiptRule `type:"structure" required:"true"` + + // The name of the rule set to which to add the rule. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateReceiptRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReceiptRuleInput) GoString() string { + return s.String() +} + +type CreateReceiptRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateReceiptRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReceiptRuleOutput) GoString() string { + return s.String() +} + +type CreateReceiptRuleSetInput struct { + _ struct{} `type:"structure"` + + // The name of the rule set to create. The name must: + // + // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-). Start and end with a letter or number. Contain less than + // 64 characters. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateReceiptRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReceiptRuleSetInput) GoString() string { + return s.String() +} + +type CreateReceiptRuleSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateReceiptRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReceiptRuleSetOutput) GoString() string { + return s.String() +} + +// Represents a request instructing the service to delete an identity from the +// list of identities for the AWS Account. +type DeleteIdentityInput struct { + _ struct{} `type:"structure"` + + // The identity to be removed from the list of identities for the AWS Account. + Identity *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIdentityInput) GoString() string { + return s.String() +} + +// An empty element. Receiving this element indicates that the request completed +// successfully. +type DeleteIdentityOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIdentityOutput) GoString() string { + return s.String() +} + +// Represents a request instructing the service to delete an authorization policy +// applying to an identity. +// +// This request succeeds regardless of whether the specified policy exists. +type DeleteIdentityPolicyInput struct { + _ struct{} `type:"structure"` + + // The identity that is associated with the policy that you want to delete. + // You can specify the identity by using its name or by using its Amazon Resource + // Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com. + // + // To successfully call this API, you must own the identity. + Identity *string `type:"string" required:"true"` + + // The name of the policy to be deleted. + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteIdentityPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIdentityPolicyInput) GoString() string { + return s.String() +} + +// An empty element. Receiving this element indicates that the request completed +// successfully. +type DeleteIdentityPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteIdentityPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIdentityPolicyOutput) GoString() string { + return s.String() +} + +type DeleteReceiptFilterInput struct { + _ struct{} `type:"structure"` + + // The name of the IP address filter to delete. + FilterName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteReceiptFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReceiptFilterInput) GoString() string { + return s.String() +} + +type DeleteReceiptFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteReceiptFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReceiptFilterOutput) GoString() string { + return s.String() +} + +type DeleteReceiptRuleInput struct { + _ struct{} `type:"structure"` + + // The name of the receipt rule to delete. + RuleName *string `type:"string" required:"true"` + + // The name of the receipt rule set that contains the receipt rule to delete. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteReceiptRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReceiptRuleInput) GoString() string { + return s.String() +} + +type DeleteReceiptRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteReceiptRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReceiptRuleOutput) GoString() string { + return s.String() +} + +type DeleteReceiptRuleSetInput struct { + _ struct{} `type:"structure"` + + // The name of the receipt rule set to delete. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteReceiptRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReceiptRuleSetInput) GoString() string { + return s.String() +} + +type DeleteReceiptRuleSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteReceiptRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReceiptRuleSetOutput) GoString() string { + return s.String() +} + +// Represents a request instructing the service to delete an address from the +// list of verified email addresses. +type DeleteVerifiedEmailAddressInput struct { + _ struct{} `type:"structure"` + + // An email address to be removed from the list of verified addresses. + EmailAddress *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVerifiedEmailAddressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVerifiedEmailAddressInput) GoString() string { + return s.String() +} + +type DeleteVerifiedEmailAddressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVerifiedEmailAddressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVerifiedEmailAddressOutput) GoString() string { + return s.String() +} + +type DescribeActiveReceiptRuleSetInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeActiveReceiptRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeActiveReceiptRuleSetInput) GoString() string { + return s.String() +} + +type DescribeActiveReceiptRuleSetOutput struct { + _ struct{} `type:"structure"` + + // The metadata for the currently active receipt rule set. The metadata consists + // of the rule set name and a timestamp of when the rule set was created. + Metadata *ReceiptRuleSetMetadata `type:"structure"` + + // The receipt rules that belong to the active rule set. + Rules []*ReceiptRule `type:"list"` +} + +// String returns the string representation +func (s DescribeActiveReceiptRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeActiveReceiptRuleSetOutput) GoString() string { + return s.String() +} + +type DescribeReceiptRuleInput struct { + _ struct{} `type:"structure"` + + // The name of the receipt rule. + RuleName *string `type:"string" required:"true"` + + // The name of the receipt rule set to which the receipt rule belongs. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeReceiptRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReceiptRuleInput) GoString() string { + return s.String() +} + +type DescribeReceiptRuleOutput struct { + _ struct{} `type:"structure"` + + // A data structure that contains the specified receipt rule's name, actions, + // recipients, domains, enabled status, scan status, and Transport Layer Security + // (TLS) policy. + Rule *ReceiptRule `type:"structure"` +} + +// String returns the string representation +func (s DescribeReceiptRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReceiptRuleOutput) GoString() string { + return s.String() +} + +type DescribeReceiptRuleSetInput struct { + _ struct{} `type:"structure"` + + // The name of the receipt rule set to describe. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeReceiptRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReceiptRuleSetInput) GoString() string { + return s.String() +} + +type DescribeReceiptRuleSetOutput struct { + _ struct{} `type:"structure"` + + // The metadata for the receipt rule set, which consists of the rule set name + // and the timestamp of when the rule set was created. + Metadata *ReceiptRuleSetMetadata `type:"structure"` + + // A list of the receipt rules that belong to the specified receipt rule set. + Rules []*ReceiptRule `type:"list"` +} + +// String returns the string representation +func (s DescribeReceiptRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReceiptRuleSetOutput) GoString() string { + return s.String() +} + +// Represents the destination of the message, consisting of To:, CC:, and BCC: +// fields. +// +// By default, the string must be 7-bit ASCII. If the text must contain any +// other characters, then you must use MIME encoded-word syntax (RFC 2047) instead +// of a literal string. MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?=. +// For more information, see RFC 2047 (http://tools.ietf.org/html/rfc2047). +type Destination struct { + _ struct{} `type:"structure"` + + // The BCC: field(s) of the message. + BccAddresses []*string `type:"list"` + + // The CC: field(s) of the message. + CcAddresses []*string `type:"list"` + + // The To: field(s) of the message. + ToAddresses []*string `type:"list"` +} + +// String returns the string representation +func (s Destination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Destination) GoString() string { + return s.String() +} + +// Additional X-headers to include in the Delivery Status Notification (DSN) +// when an email that Amazon SES receives on your behalf bounces. +// +// For information about receiving email through Amazon SES, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). +type ExtensionField struct { + _ struct{} `type:"structure"` + + // The name of the header to add. Must be between 1 and 50 characters, inclusive, + // and consist of alphanumeric (a-z, A-Z, 0-9) characters and dashes only. + Name *string `type:"string" required:"true"` + + // The value of the header to add. Must be less than 2048 characters, and must + // not contain newline characters ("\r" or "\n"). + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ExtensionField) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExtensionField) GoString() string { + return s.String() +} + +// Given a list of verified identities, describes their DKIM attributes. The +// DKIM attributes of an email address identity includes whether DKIM signing +// is individually enabled or disabled for that address. The DKIM attributes +// of a domain name identity includes whether DKIM signing is enabled, as well +// as the DNS records (tokens) that must remain published in the domain name's +// DNS. +type GetIdentityDkimAttributesInput struct { + _ struct{} `type:"structure"` + + // A list of one or more verified identities - email addresses, domains, or + // both. + Identities []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetIdentityDkimAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityDkimAttributesInput) GoString() string { + return s.String() +} + +// Represents a list of all the DKIM attributes for the specified identity. +type GetIdentityDkimAttributesOutput struct { + _ struct{} `type:"structure"` + + // The DKIM attributes for an email address or a domain. + DkimAttributes map[string]*IdentityDkimAttributes `type:"map" required:"true"` +} + +// String returns the string representation +func (s GetIdentityDkimAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityDkimAttributesOutput) GoString() string { + return s.String() +} + +type GetIdentityNotificationAttributesInput struct { + _ struct{} `type:"structure"` + + // A list of one or more identities. You can specify an identity by using its + // name or by using its Amazon Resource Name (ARN). Examples: user@example.com, + // example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com. + Identities []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetIdentityNotificationAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityNotificationAttributesInput) GoString() string { + return s.String() +} + +// Describes whether an identity has Amazon Simple Notification Service (Amazon +// SNS) topics set for bounce, complaint, and/or delivery notifications, and +// specifies whether feedback forwarding is enabled for bounce and complaint +// notifications. +type GetIdentityNotificationAttributesOutput struct { + _ struct{} `type:"structure"` + + // A map of Identity to IdentityNotificationAttributes. + NotificationAttributes map[string]*IdentityNotificationAttributes `type:"map" required:"true"` +} + +// String returns the string representation +func (s GetIdentityNotificationAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityNotificationAttributesOutput) GoString() string { + return s.String() +} + +// Represents a request instructing the service to retrieve the text of a list +// of authorization policies applying to an identity. +type GetIdentityPoliciesInput struct { + _ struct{} `type:"structure"` + + // The identity for which the policies will be retrieved. You can specify an + // identity by using its name or by using its Amazon Resource Name (ARN). Examples: + // user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com. + // + // To successfully call this API, you must own the identity. + Identity *string `type:"string" required:"true"` + + // A list of the names of policies to be retrieved. You can retrieve a maximum + // of 20 policies at a time. If you do not know the names of the policies that + // are attached to the identity, you can use ListIdentityPolicies. + PolicyNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetIdentityPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityPoliciesInput) GoString() string { + return s.String() +} + +// Represents a map of policy names to policies returned from a successful GetIdentityPolicies +// request. +type GetIdentityPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A map of policy names to policies. + Policies map[string]*string `type:"map" required:"true"` +} + +// String returns the string representation +func (s GetIdentityPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityPoliciesOutput) GoString() string { + return s.String() +} + +// Represents a request instructing the service to provide the verification +// attributes for a list of identities. +type GetIdentityVerificationAttributesInput struct { + _ struct{} `type:"structure"` + + // A list of identities. + Identities []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetIdentityVerificationAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityVerificationAttributesInput) GoString() string { + return s.String() +} + +// Represents the verification attributes for a list of identities. +type GetIdentityVerificationAttributesOutput struct { + _ struct{} `type:"structure"` + + // A map of Identities to IdentityVerificationAttributes objects. + VerificationAttributes map[string]*IdentityVerificationAttributes `type:"map" required:"true"` +} + +// String returns the string representation +func (s GetIdentityVerificationAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityVerificationAttributesOutput) GoString() string { + return s.String() +} + +type GetSendQuotaInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetSendQuotaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSendQuotaInput) GoString() string { + return s.String() +} + +// Represents the user's current activity limits returned from a successful +// GetSendQuota request. +type GetSendQuotaOutput struct { + _ struct{} `type:"structure"` + + // The maximum number of emails the user is allowed to send in a 24-hour interval. + // A value of -1 signifies an unlimited quota. + Max24HourSend *float64 `type:"double"` + + // The maximum number of emails that Amazon SES can accept from the user's account + // per second. + // + // The rate at which Amazon SES accepts the user's messages might be less than + // the maximum send rate. + MaxSendRate *float64 `type:"double"` + + // The number of emails sent during the previous 24 hours. + SentLast24Hours *float64 `type:"double"` +} + +// String returns the string representation +func (s GetSendQuotaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSendQuotaOutput) GoString() string { + return s.String() +} + +type GetSendStatisticsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetSendStatisticsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSendStatisticsInput) GoString() string { + return s.String() +} + +// Represents a list of SendDataPoint items returned from a successful GetSendStatistics +// request. This list contains aggregated data from the previous two weeks of +// sending activity. +type GetSendStatisticsOutput struct { + _ struct{} `type:"structure"` + + // A list of data points, each of which represents 15 minutes of activity. + SendDataPoints []*SendDataPoint `type:"list"` +} + +// String returns the string representation +func (s GetSendStatisticsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSendStatisticsOutput) GoString() string { + return s.String() +} + +// Represents the DKIM attributes of a verified email address or a domain. +type IdentityDkimAttributes struct { + _ struct{} `type:"structure"` + + // True if DKIM signing is enabled for email sent from the identity; false otherwise. + DkimEnabled *bool `type:"boolean" required:"true"` + + // A set of character strings that represent the domain's identity. Using these + // tokens, you will need to create DNS CNAME records that point to DKIM public + // keys hosted by Amazon SES. Amazon Web Services will eventually detect that + // you have updated your DNS records; this detection process may take up to + // 72 hours. Upon successful detection, Amazon SES will be able to DKIM-sign + // email originating from that domain. (This only applies to domain identities, + // not email address identities.) + // + // For more information about creating DNS records using DKIM tokens, go to + // the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim-dns-records.html). + DkimTokens []*string `type:"list"` + + // Describes whether Amazon SES has successfully verified the DKIM DNS records + // (tokens) published in the domain name's DNS. (This only applies to domain + // identities, not email address identities.) + DkimVerificationStatus *string `type:"string" required:"true" enum:"VerificationStatus"` +} + +// String returns the string representation +func (s IdentityDkimAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdentityDkimAttributes) GoString() string { + return s.String() +} + +// Represents the notification attributes of an identity, including whether +// an identity has Amazon Simple Notification Service (Amazon SNS) topics set +// for bounce, complaint, and/or delivery notifications, and whether feedback +// forwarding is enabled for bounce and complaint notifications. +type IdentityNotificationAttributes struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic where Amazon SES will + // publish bounce notifications. + BounceTopic *string `type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic where Amazon SES will + // publish complaint notifications. + ComplaintTopic *string `type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic where Amazon SES will + // publish delivery notifications. + DeliveryTopic *string `type:"string" required:"true"` + + // Describes whether Amazon SES will forward bounce and complaint notifications + // as email. true indicates that Amazon SES will forward bounce and complaint + // notifications as email, while false indicates that bounce and complaint notifications + // will be published only to the specified bounce and complaint Amazon SNS topics. + ForwardingEnabled *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s IdentityNotificationAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdentityNotificationAttributes) GoString() string { + return s.String() +} + +// Represents the verification attributes of a single identity. +type IdentityVerificationAttributes struct { + _ struct{} `type:"structure"` + + // The verification status of the identity: "Pending", "Success", "Failed", + // or "TemporaryFailure". + VerificationStatus *string `type:"string" required:"true" enum:"VerificationStatus"` + + // The verification token for a domain identity. Null for email address identities. + VerificationToken *string `type:"string"` +} + +// String returns the string representation +func (s IdentityVerificationAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdentityVerificationAttributes) GoString() string { + return s.String() +} + +// When included in a receipt rule, this action calls an AWS Lambda function +// and, optionally, publishes a notification to Amazon Simple Notification Service +// (Amazon SNS). +// +// To enable Amazon SES to call your AWS Lambda function or to publish to an +// Amazon SNS topic of another account, Amazon SES must have permission to access +// those resources. For information about giving permissions, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). +// +// For information about using AWS Lambda actions in receipt rules, see the +// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-lambda.html). +type LambdaAction struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the AWS Lambda function. An example of + // an AWS Lambda function ARN is arn:aws:lambda:us-west-2:account-id:function:MyFunction. + // For more information about AWS Lambda, see the AWS Lambda Developer Guide + // (http://docs.aws.amazon.com/lambda/latest/dg/welcome.html). + FunctionArn *string `type:"string" required:"true"` + + // The invocation type of the AWS Lambda function. An invocation type of RequestResponse + // means that the execution of the function will immediately result in a response, + // and a value of Event means that the function will be invoked asynchronously. + // The default value is Event. For information about AWS Lambda invocation types, + // see the AWS Lambda Developer Guide (http://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html). + // + // There is a 30-second timeout on RequestResponse invocations. You should + // use Event invocation in most cases. Use RequestResponse only when you want + // to make a mail flow decision, such as whether to stop the receipt rule or + // the receipt rule set. + InvocationType *string `type:"string" enum:"InvocationType"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the + // Lambda action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. + // For more information about Amazon SNS topics, see the Amazon SNS Developer + // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s LambdaAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaAction) GoString() string { + return s.String() +} + +// Represents a request instructing the service to list all identities for the +// AWS Account. +type ListIdentitiesInput struct { + _ struct{} `type:"structure"` + + // The type of the identities to list. Possible values are "EmailAddress" and + // "Domain". If this parameter is omitted, then all identities will be listed. + IdentityType *string `type:"string" enum:"IdentityType"` + + // The maximum number of identities per page. Possible values are 1-1000 inclusive. + MaxItems *int64 `type:"integer"` + + // The token to use for pagination. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListIdentitiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentitiesInput) GoString() string { + return s.String() +} + +// Represents a list of all verified identities for the AWS Account. +type ListIdentitiesOutput struct { + _ struct{} `type:"structure"` + + // A list of identities. + Identities []*string `type:"list" required:"true"` + + // The token used for pagination. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListIdentitiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentitiesOutput) GoString() string { + return s.String() +} + +// Represents a request instructing the service to list all authorization policies, +// by name, applying to an identity. +type ListIdentityPoliciesInput struct { + _ struct{} `type:"structure"` + + // The identity that is associated with the policy for which the policies will + // be listed. You can specify an identity by using its name or by using its + // Amazon Resource Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com. + // + // To successfully call this API, you must own the identity. + Identity *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListIdentityPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentityPoliciesInput) GoString() string { + return s.String() +} + +// Represents a list of policy names returned from a successful ListIdentityPolicies +// request. +type ListIdentityPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A list of names of policies that apply to the specified identity. + PolicyNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListIdentityPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentityPoliciesOutput) GoString() string { + return s.String() +} + +type ListReceiptFiltersInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListReceiptFiltersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListReceiptFiltersInput) GoString() string { + return s.String() +} + +type ListReceiptFiltersOutput struct { + _ struct{} `type:"structure"` + + // A list of IP address filter data structures, which each consist of a name, + // an IP address range, and whether to allow or block mail from it. + Filters []*ReceiptFilter `type:"list"` +} + +// String returns the string representation +func (s ListReceiptFiltersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListReceiptFiltersOutput) GoString() string { + return s.String() +} + +type ListReceiptRuleSetsInput struct { + _ struct{} `type:"structure"` + + // A token returned from a previous call to ListReceiptRuleSets to indicate + // the position in the receipt rule set list. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListReceiptRuleSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListReceiptRuleSetsInput) GoString() string { + return s.String() +} + +type ListReceiptRuleSetsOutput struct { + _ struct{} `type:"structure"` + + // A token indicating that there are additional receipt rule sets available + // to be listed. Pass this token to successive calls of ListReceiptRuleSets + // to retrieve up to 100 receipt rule sets at a time. + NextToken *string `type:"string"` + + // The metadata for the currently active receipt rule set. The metadata consists + // of the rule set name and the timestamp of when the rule set was created. + RuleSets []*ReceiptRuleSetMetadata `type:"list"` +} + +// String returns the string representation +func (s ListReceiptRuleSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListReceiptRuleSetsOutput) GoString() string { + return s.String() +} + +type ListVerifiedEmailAddressesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListVerifiedEmailAddressesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVerifiedEmailAddressesInput) GoString() string { + return s.String() +} + +// Represents a list of all the email addresses verified for the current user. +type ListVerifiedEmailAddressesOutput struct { + _ struct{} `type:"structure"` + + // A list of email addresses that have been verified. + VerifiedEmailAddresses []*string `type:"list"` +} + +// String returns the string representation +func (s ListVerifiedEmailAddressesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVerifiedEmailAddressesOutput) GoString() string { + return s.String() +} + +// Represents the message to be sent, composed of a subject and a body. +type Message struct { + _ struct{} `type:"structure"` + + // The message body. + Body *Body `type:"structure" required:"true"` + + // The subject of the message: A short summary of the content, which will appear + // in the recipient's inbox. + Subject *Content `type:"structure" required:"true"` +} + +// String returns the string representation +func (s Message) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Message) GoString() string { + return s.String() +} + +// Message-related information to include in the Delivery Status Notification +// (DSN) when an email that Amazon SES receives on your behalf bounces. +// +// For information about receiving email through Amazon SES, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). +type MessageDsn struct { + _ struct{} `type:"structure"` + + // When the message was received by the reporting mail transfer agent (MTA), + // in RFC 822 (https://www.ietf.org/rfc/rfc0822.txt) date-time format. + ArrivalDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Additional X-headers to include in the DSN. + ExtensionFields []*ExtensionField `type:"list"` + + // The reporting MTA that attempted to deliver the message, formatted as specified + // in RFC 3464 (https://tools.ietf.org/html/rfc3464) (mta-name-type; mta-name). + // The default value is dns; inbound-smtp.[region].amazonaws.com. + ReportingMta *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s MessageDsn) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MessageDsn) GoString() string { + return s.String() +} + +// Represents a request instructing the service to apply an authorization policy +// to an identity. +type PutIdentityPolicyInput struct { + _ struct{} `type:"structure"` + + // The identity to which the policy will apply. You can specify an identity + // by using its name or by using its Amazon Resource Name (ARN). Examples: user@example.com, + // example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com. + // + // To successfully call this API, you must own the identity. + Identity *string `type:"string" required:"true"` + + // The text of the policy in JSON format. The policy cannot exceed 4 KB. + // + // For information about the syntax of sending authorization policies, see + // the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-policies.html). + Policy *string `min:"1" type:"string" required:"true"` + + // The name of the policy. + // + // The policy name cannot exceed 64 characters and can only include alphanumeric + // characters, dashes, and underscores. + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutIdentityPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutIdentityPolicyInput) GoString() string { + return s.String() +} + +// An empty element. Receiving this element indicates that the request completed +// successfully. +type PutIdentityPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutIdentityPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutIdentityPolicyOutput) GoString() string { + return s.String() +} + +// Represents the raw data of the message. +type RawMessage struct { + _ struct{} `type:"structure"` + + // The raw data of the message. The client must ensure that the message format + // complies with Internet email standards regarding email header fields, MIME + // types, MIME encoding, and base64 encoding (if necessary). + // + // The To:, CC:, and BCC: headers in the raw message can contain a group list. + // + // If you are using SendRawEmail with sending authorization, you can include + // X-headers in the raw message to specify the "Source," "From," and "Return-Path" + // addresses. For more information, see the documentation for SendRawEmail. + // + // Do not include these X-headers in the DKIM signature, because they are removed + // by Amazon SES before sending the email. For more information, go to the Amazon + // SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-raw.html). + Data []byte `type:"blob" required:"true"` +} + +// String returns the string representation +func (s RawMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RawMessage) GoString() string { + return s.String() +} + +// An action that Amazon SES can take when it receives an email on behalf of +// one or more email addresses or domains that you own. An instance of this +// data type can represent only one action. +// +// For information about setting up receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rules.html). +type ReceiptAction struct { + _ struct{} `type:"structure"` + + // Adds a header to the received email. + AddHeaderAction *AddHeaderAction `type:"structure"` + + // Rejects the received email by returning a bounce response to the sender and, + // optionally, publishes a notification to Amazon Simple Notification Service + // (Amazon SNS). + BounceAction *BounceAction `type:"structure"` + + // Calls an AWS Lambda function, and optionally, publishes a notification to + // Amazon SNS. + LambdaAction *LambdaAction `type:"structure"` + + // Saves the received message to an Amazon Simple Storage Service (Amazon S3) + // bucket and, optionally, publishes a notification to Amazon SNS. + S3Action *S3Action `type:"structure"` + + // Publishes the email content within a notification to Amazon SNS. + SNSAction *SNSAction `type:"structure"` + + // Terminates the evaluation of the receipt rule set and optionally publishes + // a notification to Amazon SNS. + StopAction *StopAction `type:"structure"` + + // Calls Amazon WorkMail and, optionally, publishes a notification to Amazon + // SNS. + WorkmailAction *WorkmailAction `type:"structure"` +} + +// String returns the string representation +func (s ReceiptAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReceiptAction) GoString() string { + return s.String() +} + +// A receipt IP address filter enables you to specify whether to accept or reject +// mail originating from an IP address or range of IP addresses. +// +// For information about setting up IP address filters, see the Amazon SES +// Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-ip-filters.html). +type ReceiptFilter struct { + _ struct{} `type:"structure"` + + // A structure that provides the IP addresses to block or allow, and whether + // to block or allow incoming mail from them. + IpFilter *ReceiptIpFilter `type:"structure" required:"true"` + + // The name of the IP address filter. The name must: + // + // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-). Start and end with a letter or number. Contain less than + // 64 characters. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ReceiptFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReceiptFilter) GoString() string { + return s.String() +} + +// A receipt IP address filter enables you to specify whether to accept or reject +// mail originating from an IP address or range of IP addresses. +// +// For information about setting up IP address filters, see the Amazon SES +// Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-ip-filters.html). +type ReceiptIpFilter struct { + _ struct{} `type:"structure"` + + // A single IP address or a range of IP addresses that you want to block or + // allow, specified in Classless Inter-Domain Routing (CIDR) notation. An example + // of a single email address is 10.0.0.1. An example of a range of IP addresses + // is 10.0.0.1/24. For more information about CIDR notation, see RFC 2317 (https://tools.ietf.org/html/rfc2317). + Cidr *string `type:"string" required:"true"` + + // Indicates whether to block or allow incoming mail from the specified IP addresses. + Policy *string `type:"string" required:"true" enum:"ReceiptFilterPolicy"` +} + +// String returns the string representation +func (s ReceiptIpFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReceiptIpFilter) GoString() string { + return s.String() +} + +// Receipt rules enable you to specify which actions Amazon SES should take +// when it receives mail on behalf of one or more email addresses or domains +// that you own. +// +// Each receipt rule defines a set of email addresses or domains to which it +// applies. If the email addresses or domains match at least one recipient address +// of the message, Amazon SES executes all of the receipt rule's actions on +// the message. +// +// For information about setting up receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rules.html). +type ReceiptRule struct { + _ struct{} `type:"structure"` + + // An ordered list of actions to perform on messages that match at least one + // of the recipient email addresses or domains specified in the receipt rule. + Actions []*ReceiptAction `type:"list"` + + // If true, the receipt rule is active. The default value is true. + Enabled *bool `type:"boolean"` + + // The name of the receipt rule. The name must: + // + // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-). Start and end with a letter or number. Contain less than + // 64 characters. + Name *string `type:"string" required:"true"` + + // The recipient domains and email addresses to which the receipt rule applies. + // If this field is not specified, this rule will match all recipients under + // all verified domains. + Recipients []*string `type:"list"` + + // If true, then messages to which this receipt rule applies are scanned for + // spam and viruses. The default value is true. + ScanEnabled *bool `type:"boolean"` + + // Specifies whether Amazon SES should require that incoming email is delivered + // over a connection encrypted with Transport Layer Security (TLS). If this + // parameter is set to Require, Amazon SES will bounce emails that are not received + // over TLS. The default is Optional. + TlsPolicy *string `type:"string" enum:"TlsPolicy"` +} + +// String returns the string representation +func (s ReceiptRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReceiptRule) GoString() string { + return s.String() +} + +// Information about a receipt rule set. +// +// A receipt rule set is a collection of rules that specify what Amazon SES +// should do with mail it receives on behalf of your account's verified domains. +// +// For information about setting up receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rule-set.html). +type ReceiptRuleSetMetadata struct { + _ struct{} `type:"structure"` + + // The date and time the receipt rule set was created. + CreatedTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The name of the receipt rule set. The name must: + // + // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-). Start and end with a letter or number. Contain less than + // 64 characters. + Name *string `type:"string"` +} + +// String returns the string representation +func (s ReceiptRuleSetMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReceiptRuleSetMetadata) GoString() string { + return s.String() +} + +// Recipient-related information to include in the Delivery Status Notification +// (DSN) when an email that Amazon SES receives on your behalf bounces. +// +// For information about receiving email through Amazon SES, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). +type RecipientDsnFields struct { + _ struct{} `type:"structure"` + + // The action performed by the reporting mail transfer agent (MTA) as a result + // of its attempt to deliver the message to the recipient address. This is required + // by RFC 3464 (https://tools.ietf.org/html/rfc3464). + Action *string `type:"string" required:"true" enum:"DsnAction"` + + // An extended explanation of what went wrong; this is usually an SMTP response. + // See RFC 3463 (https://tools.ietf.org/html/rfc3463) for the correct formatting + // of this parameter. + DiagnosticCode *string `type:"string"` + + // Additional X-headers to include in the DSN. + ExtensionFields []*ExtensionField `type:"list"` + + // The email address to which the message was ultimately delivered. This corresponds + // to the Final-Recipient in the DSN. If not specified, FinalRecipient will + // be set to the Recipient specified in the BouncedRecipientInfo structure. + // Either FinalRecipient or the recipient in BouncedRecipientInfo must be a + // recipient of the original bounced message. + // + // Do not prepend the FinalRecipient email address with rfc 822;, as described + // in RFC 3798 (https://tools.ietf.org/html/rfc3798). + FinalRecipient *string `type:"string"` + + // The time the final delivery attempt was made, in RFC 822 (https://www.ietf.org/rfc/rfc0822.txt) + // date-time format. + LastAttemptDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The MTA to which the remote MTA attempted to deliver the message, formatted + // as specified in RFC 3464 (https://tools.ietf.org/html/rfc3464) (mta-name-type; + // mta-name). This parameter typically applies only to propagating synchronous + // bounces. + RemoteMta *string `type:"string"` + + // The status code that indicates what went wrong. This is required by RFC 3464 + // (https://tools.ietf.org/html/rfc3464). + Status *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RecipientDsnFields) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecipientDsnFields) GoString() string { + return s.String() +} + +type ReorderReceiptRuleSetInput struct { + _ struct{} `type:"structure"` + + // A list of the specified receipt rule set's receipt rules in the order that + // you want to put them. + RuleNames []*string `type:"list" required:"true"` + + // The name of the receipt rule set to reorder. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ReorderReceiptRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReorderReceiptRuleSetInput) GoString() string { + return s.String() +} + +type ReorderReceiptRuleSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ReorderReceiptRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReorderReceiptRuleSetOutput) GoString() string { + return s.String() +} + +// When included in a receipt rule, this action saves the received message to +// an Amazon Simple Storage Service (Amazon S3) bucket and, optionally, publishes +// a notification to Amazon Simple Notification Service (Amazon SNS). +// +// To enable Amazon SES to write emails to your Amazon S3 bucket, use an AWS +// KMS key to encrypt your emails, or publish to an Amazon SNS topic of another +// account, Amazon SES must have permission to access those resources. For information +// about giving permissions, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). +// +// When you save your emails to an Amazon S3 bucket, the maximum email size +// (including headers) is 30 MB. Emails larger than that will bounce. For information +// about specifying Amazon S3 actions in receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-s3.html). +type S3Action struct { + _ struct{} `type:"structure"` + + // The name of the Amazon S3 bucket to which to save the received email. + BucketName *string `type:"string" required:"true"` + + // The customer master key that Amazon SES should use to encrypt your emails + // before saving them to the Amazon S3 bucket. You can use the default master + // key or a custom master key you created in AWS KMS as follows: + // + // To use the default master key, provide an ARN in the form of arn:aws:kms:REGION:ACCOUNT-ID-WITHOUT-HYPHENS:alias/aws/ses. + // For example, if your AWS account ID is 123456789012 and you want to use the + // default master key in the US West (Oregon) region, the ARN of the default + // master key would be arn:aws:kms:us-west-2:123456789012:alias/aws/ses. If + // you use the default master key, you don't need to perform any extra steps + // to give Amazon SES permission to use the key. To use a custom master key + // you created in AWS KMS, provide the ARN of the master key and ensure that + // you add a statement to your key's policy to give Amazon SES permission to + // use it. For more information about giving permissions, see the Amazon SES + // Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). + // For more information about key policies, see the AWS KMS Developer Guide + // (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html). If + // you do not specify a master key, Amazon SES will not encrypt your emails. + // + // Your mail is encrypted by Amazon SES using the Amazon S3 encryption client + // before the mail is submitted to Amazon S3 for storage. It is not encrypted + // using Amazon S3 server-side encryption. This means that you must use the + // Amazon S3 encryption client to decrypt the email after retrieving it from + // Amazon S3, as the service has no access to use your AWS KMS keys for decryption. + // This encryption client is currently available with the AWS Java SDK (https://aws.amazon.com/sdk-for-java/) + // and AWS Ruby SDK (https://aws.amazon.com/sdk-for-ruby/) only. For more information + // about client-side encryption using AWS KMS master keys, see the Amazon S3 + // Developer Guide (http://alpha-docs-aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html). + KmsKeyArn *string `type:"string"` + + // The key prefix of the Amazon S3 bucket. The key prefix is similar to a directory + // name that enables you to store similar data under the same directory in a + // bucket. + ObjectKeyPrefix *string `type:"string"` + + // The ARN of the Amazon SNS topic to notify when the message is saved to the + // Amazon S3 bucket. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. + // For more information about Amazon SNS topics, see the Amazon SNS Developer + // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s S3Action) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3Action) GoString() string { + return s.String() +} + +// When included in a receipt rule, this action publishes a notification to +// Amazon Simple Notification Service (Amazon SNS). This action includes a complete +// copy of the email content in the Amazon SNS notifications. Amazon SNS notifications +// for all other actions simply provide information about the email. They do +// not include the email content itself. +// +// If you own the Amazon SNS topic, you don't need to do anything to give Amazon +// SES permission to publish emails to it. However, if you don't own the Amazon +// SNS topic, you need to attach a policy to the topic to give Amazon SES permissions +// to access it. For information about giving permissions, see the Amazon SES +// Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). +// +// You can only publish emails that are 150 KB or less (including the header) +// to Amazon SNS. Larger emails will bounce. If you anticipate emails larger +// than 150 KB, use the S3 action instead. For information about using a receipt +// rule to publish an Amazon SNS notification, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-sns.html). +type SNSAction struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to notify. An example + // of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. + // For more information about Amazon SNS topics, see the Amazon SNS Developer + // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SNSAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SNSAction) GoString() string { + return s.String() +} + +// Request object for sending a simple/complex bounce. It contains all of the +// information needed to generate a basic DSN or a fully-customized DSN. +type SendBounceInput struct { + _ struct{} `type:"structure"` + + // The address to use in the "From" header of the bounce message. This must + // be an identity that you have verified with Amazon SES. + BounceSender *string `type:"string" required:"true"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to use the address in the "From" header of the bounce. For more information + // about sending authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + BounceSenderArn *string `type:"string"` + + // A list of recipients of the bounced message, including the information required + // to create the Delivery Status Notifications (DSNs) for the recipients. You + // must specify at least one BouncedRecipientInfo in the list. + BouncedRecipientInfoList []*BouncedRecipientInfo `type:"list" required:"true"` + + // Human-readable text for the bounce message to explain the failure. If not + // specified, the text will be auto-generated based on the bounced recipient + // information. + Explanation *string `type:"string"` + + // Message-related DSN fields. If not specified, Amazon SES will choose the + // values. + MessageDsn *MessageDsn `type:"structure"` + + // The message ID of the message to be bounced. + OriginalMessageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendBounceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendBounceInput) GoString() string { + return s.String() +} + +type SendBounceOutput struct { + _ struct{} `type:"structure"` + + // The message ID of the bounce message. + MessageId *string `type:"string"` +} + +// String returns the string representation +func (s SendBounceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendBounceOutput) GoString() string { + return s.String() +} + +// Represents sending statistics data. Each SendDataPoint contains statistics +// for a 15-minute period of sending activity. +type SendDataPoint struct { + _ struct{} `type:"structure"` + + // Number of emails that have bounced. + Bounces *int64 `type:"long"` + + // Number of unwanted emails that were rejected by recipients. + Complaints *int64 `type:"long"` + + // Number of emails that have been enqueued for sending. + DeliveryAttempts *int64 `type:"long"` + + // Number of emails rejected by Amazon SES. + Rejects *int64 `type:"long"` + + // Time of the data point. + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s SendDataPoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendDataPoint) GoString() string { + return s.String() +} + +// Represents a request instructing the service to send a single email message. +// +// This datatype can be used in application code to compose a message consisting +// of source, destination, message, reply-to, and return-path parts. This object +// can then be sent using the SendEmail action. +type SendEmailInput struct { + _ struct{} `type:"structure"` + + // The destination for this email, composed of To:, CC:, and BCC: fields. + Destination *Destination `type:"structure" required:"true"` + + // The message to be sent. + Message *Message `type:"structure" required:"true"` + + // The reply-to email address(es) for the message. If the recipient replies + // to the message, each reply-to address will receive the reply. + ReplyToAddresses []*string `type:"list"` + + // The email address to which bounces and complaints are to be forwarded when + // feedback forwarding is enabled. If the message cannot be delivered to the + // recipient, then an error message will be returned from the recipient's ISP; + // this message will then be forwarded to the email address specified by the + // ReturnPath parameter. The ReturnPath parameter is never overwritten. This + // email address must be either individually verified with Amazon SES, or from + // a domain that has been verified with Amazon SES. + ReturnPath *string `type:"string"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to use the email address specified in the ReturnPath parameter. + // + // For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) + // attaches a policy to it that authorizes you to use feedback@example.com, + // then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, + // and the ReturnPath to be feedback@example.com. + // + // For more information about sending authorization, see the Amazon SES Developer + // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + ReturnPathArn *string `type:"string"` + + // The email address that is sending the email. This email address must be either + // individually verified with Amazon SES, or from a domain that has been verified + // with Amazon SES. For information about verifying identities, see the Amazon + // SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). + // + // If you are sending on behalf of another user and have been permitted to + // do so by a sending authorization policy, then you must also specify the SourceArn + // parameter. For more information about sending authorization, see the Amazon + // SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + // + // In all cases, the email address must be 7-bit ASCII. If the text must contain + // any other characters, then you must use MIME encoded-word syntax (RFC 2047) + // instead of a literal string. MIME encoded-word syntax uses the following + // form: =?charset?encoding?encoded-text?=. For more information, see RFC 2047 + // (http://tools.ietf.org/html/rfc2047). + Source *string `type:"string" required:"true"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to send for the email address specified in the Source parameter. + // + // For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) + // attaches a policy to it that authorizes you to send from user@example.com, + // then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, + // and the Source to be user@example.com. + // + // For more information about sending authorization, see the Amazon SES Developer + // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + SourceArn *string `type:"string"` +} + +// String returns the string representation +func (s SendEmailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendEmailInput) GoString() string { + return s.String() +} + +// Represents a unique message ID returned from a successful SendEmail request. +type SendEmailOutput struct { + _ struct{} `type:"structure"` + + // The unique message identifier returned from the SendEmail action. + MessageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendEmailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendEmailOutput) GoString() string { + return s.String() +} + +// Represents a request instructing the service to send a raw email message. +// +// This datatype can be used in application code to compose a message consisting +// of source, destination, and raw message text. This object can then be sent +// using the SendRawEmail action. +type SendRawEmailInput struct { + _ struct{} `type:"structure"` + + // A list of destinations for the message, consisting of To:, CC:, and BCC: + // addresses. + Destinations []*string `type:"list"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to specify a particular "From" address in the header of the raw email. + // + // Instead of using this parameter, you can use the X-header X-SES-FROM-ARN + // in the raw message of the email. If you use both the FromArn parameter and + // the corresponding X-header, Amazon SES uses the value of the FromArn parameter. + // + // For information about when to use this parameter, see the description of + // SendRawEmail in this guide, or see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-delegate-sender-tasks-email.html). + FromArn *string `type:"string"` + + // The raw text of the message. The client is responsible for ensuring the following: + // + // Message must contain a header and a body, separated by a blank line. All + // required header fields must be present. Each part of a multipart MIME message + // must be formatted properly. MIME content types must be among those supported + // by Amazon SES. For more information, go to the Amazon SES Developer Guide + // (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mime-types.html). + // Content must be base64-encoded, if MIME requires it. + RawMessage *RawMessage `type:"structure" required:"true"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to use the email address specified in the ReturnPath parameter. + // + // For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) + // attaches a policy to it that authorizes you to use feedback@example.com, + // then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, + // and the ReturnPath to be feedback@example.com. + // + // Instead of using this parameter, you can use the X-header X-SES-RETURN-PATH-ARN + // in the raw message of the email. If you use both the ReturnPathArn parameter + // and the corresponding X-header, Amazon SES uses the value of the ReturnPathArn + // parameter. + // + // For information about when to use this parameter, see the description of + // SendRawEmail in this guide, or see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-delegate-sender-tasks-email.html). + ReturnPathArn *string `type:"string"` + + // The identity's email address. If you do not provide a value for this parameter, + // you must specify a "From" address in the raw text of the message. (You can + // also specify both.) + // + // By default, the string must be 7-bit ASCII. If the text must contain any + // other characters, then you must use MIME encoded-word syntax (RFC 2047) instead + // of a literal string. MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?=. + // For more information, see RFC 2047 (http://tools.ietf.org/html/rfc2047). + // + // If you specify the Source parameter and have feedback forwarding enabled, + // then bounces and complaints will be sent to this email address. This takes + // precedence over any Return-Path header that you might include in the raw + // text of the message. + Source *string `type:"string"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to send for the email address specified in the Source parameter. + // + // For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) + // attaches a policy to it that authorizes you to send from user@example.com, + // then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, + // and the Source to be user@example.com. + // + // Instead of using this parameter, you can use the X-header X-SES-SOURCE-ARN + // in the raw message of the email. If you use both the SourceArn parameter + // and the corresponding X-header, Amazon SES uses the value of the SourceArn + // parameter. + // + // For information about when to use this parameter, see the description of + // SendRawEmail in this guide, or see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-delegate-sender-tasks-email.html). + SourceArn *string `type:"string"` +} + +// String returns the string representation +func (s SendRawEmailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendRawEmailInput) GoString() string { + return s.String() +} + +// Represents a unique message ID returned from a successful SendRawEmail request. +type SendRawEmailOutput struct { + _ struct{} `type:"structure"` + + // The unique message identifier returned from the SendRawEmail action. + MessageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendRawEmailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendRawEmailOutput) GoString() string { + return s.String() +} + +type SetActiveReceiptRuleSetInput struct { + _ struct{} `type:"structure"` + + // The name of the receipt rule set to make active. Setting this value to null + // disables all email receiving. + RuleSetName *string `type:"string"` +} + +// String returns the string representation +func (s SetActiveReceiptRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetActiveReceiptRuleSetInput) GoString() string { + return s.String() +} + +type SetActiveReceiptRuleSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetActiveReceiptRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetActiveReceiptRuleSetOutput) GoString() string { + return s.String() +} + +// Represents a request instructing the service to enable or disable DKIM signing +// for an identity. +type SetIdentityDkimEnabledInput struct { + _ struct{} `type:"structure"` + + // Sets whether DKIM signing is enabled for an identity. Set to true to enable + // DKIM signing for this identity; false to disable it. + DkimEnabled *bool `type:"boolean" required:"true"` + + // The identity for which DKIM signing should be enabled or disabled. + Identity *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetIdentityDkimEnabledInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityDkimEnabledInput) GoString() string { + return s.String() +} + +// An empty element. Receiving this element indicates that the request completed +// successfully. +type SetIdentityDkimEnabledOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetIdentityDkimEnabledOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityDkimEnabledOutput) GoString() string { + return s.String() +} + +type SetIdentityFeedbackForwardingEnabledInput struct { + _ struct{} `type:"structure"` + + // Sets whether Amazon SES will forward bounce and complaint notifications as + // email. true specifies that Amazon SES will forward bounce and complaint notifications + // as email, in addition to any Amazon SNS topic publishing otherwise specified. + // false specifies that Amazon SES will publish bounce and complaint notifications + // only through Amazon SNS. This value can only be set to false when Amazon + // SNS topics are set for both Bounce and Complaint notification types. + ForwardingEnabled *bool `type:"boolean" required:"true"` + + // The identity for which to set bounce and complaint notification forwarding. + // Examples: user@example.com, example.com. + Identity *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetIdentityFeedbackForwardingEnabledInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityFeedbackForwardingEnabledInput) GoString() string { + return s.String() +} + +// An empty element. Receiving this element indicates that the request completed +// successfully. +type SetIdentityFeedbackForwardingEnabledOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetIdentityFeedbackForwardingEnabledOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityFeedbackForwardingEnabledOutput) GoString() string { + return s.String() +} + +// Represents a request to set or clear an identity's notification topic. +type SetIdentityNotificationTopicInput struct { + _ struct{} `type:"structure"` + + // The identity for which the Amazon SNS topic will be set. You can specify + // an identity by using its name or by using its Amazon Resource Name (ARN). + // Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com. + Identity *string `type:"string" required:"true"` + + // The type of notifications that will be published to the specified Amazon + // SNS topic. + NotificationType *string `type:"string" required:"true" enum:"NotificationType"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic. If the parameter + // is omitted from the request or a null value is passed, SnsTopic is cleared + // and publishing is disabled. + SnsTopic *string `type:"string"` +} + +// String returns the string representation +func (s SetIdentityNotificationTopicInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityNotificationTopicInput) GoString() string { + return s.String() +} + +// An empty element. Receiving this element indicates that the request completed +// successfully. +type SetIdentityNotificationTopicOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetIdentityNotificationTopicOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityNotificationTopicOutput) GoString() string { + return s.String() +} + +type SetReceiptRulePositionInput struct { + _ struct{} `type:"structure"` + + // The name of the receipt rule after which to place the specified receipt rule. + After *string `type:"string"` + + // The name of the receipt rule to reposition. + RuleName *string `type:"string" required:"true"` + + // The name of the receipt rule set that contains the receipt rule to reposition. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetReceiptRulePositionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetReceiptRulePositionInput) GoString() string { + return s.String() +} + +type SetReceiptRulePositionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetReceiptRulePositionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetReceiptRulePositionOutput) GoString() string { + return s.String() +} + +// When included in a receipt rule, this action terminates the evaluation of +// the receipt rule set and, optionally, publishes a notification to Amazon +// Simple Notification Service (Amazon SNS). +// +// For information about setting a stop action in a receipt rule, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-stop.html). +type StopAction struct { + _ struct{} `type:"structure"` + + // The scope to which the Stop action applies. That is, what is being stopped. + Scope *string `type:"string" required:"true" enum:"StopScope"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the + // stop action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. + // For more information about Amazon SNS topics, see the Amazon SNS Developer + // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s StopAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopAction) GoString() string { + return s.String() +} + +type UpdateReceiptRuleInput struct { + _ struct{} `type:"structure"` + + // A data structure that contains the updated receipt rule information. + Rule *ReceiptRule `type:"structure" required:"true"` + + // The name of the receipt rule set to which the receipt rule belongs. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateReceiptRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateReceiptRuleInput) GoString() string { + return s.String() +} + +type UpdateReceiptRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateReceiptRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateReceiptRuleOutput) GoString() string { + return s.String() +} + +// Represents a request instructing the service to begin DKIM verification for +// a domain. +type VerifyDomainDkimInput struct { + _ struct{} `type:"structure"` + + // The name of the domain to be verified for Easy DKIM signing. + Domain *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s VerifyDomainDkimInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyDomainDkimInput) GoString() string { + return s.String() +} + +// Represents the DNS records that must be published in the domain name's DNS +// to complete DKIM setup. +type VerifyDomainDkimOutput struct { + _ struct{} `type:"structure"` + + // A set of character strings that represent the domain's identity. If the identity + // is an email address, the tokens represent the domain of that address. + // + // Using these tokens, you will need to create DNS CNAME records that point + // to DKIM public keys hosted by Amazon SES. Amazon Web Services will eventually + // detect that you have updated your DNS records; this detection process may + // take up to 72 hours. Upon successful detection, Amazon SES will be able to + // DKIM-sign emails originating from that domain. + // + // For more information about creating DNS records using DKIM tokens, go to + // the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim-dns-records.html). + DkimTokens []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s VerifyDomainDkimOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyDomainDkimOutput) GoString() string { + return s.String() +} + +// Represents a request instructing the service to begin domain verification. +type VerifyDomainIdentityInput struct { + _ struct{} `type:"structure"` + + // The domain to be verified. + Domain *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s VerifyDomainIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyDomainIdentityInput) GoString() string { + return s.String() +} + +// Represents a token used for domain ownership verification. +type VerifyDomainIdentityOutput struct { + _ struct{} `type:"structure"` + + // A TXT record that must be placed in the DNS settings for the domain, in order + // to complete domain verification. + VerificationToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s VerifyDomainIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyDomainIdentityOutput) GoString() string { + return s.String() +} + +// Represents a request instructing the service to begin email address verification. +type VerifyEmailAddressInput struct { + _ struct{} `type:"structure"` + + // The email address to be verified. + EmailAddress *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s VerifyEmailAddressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyEmailAddressInput) GoString() string { + return s.String() +} + +type VerifyEmailAddressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s VerifyEmailAddressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyEmailAddressOutput) GoString() string { + return s.String() +} + +// Represents a request instructing the service to begin email address verification. +type VerifyEmailIdentityInput struct { + _ struct{} `type:"structure"` + + // The email address to be verified. + EmailAddress *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s VerifyEmailIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyEmailIdentityInput) GoString() string { + return s.String() +} + +// An empty element. Receiving this element indicates that the request completed +// successfully. +type VerifyEmailIdentityOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s VerifyEmailIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyEmailIdentityOutput) GoString() string { + return s.String() +} + +// When included in a receipt rule, this action calls Amazon WorkMail and, optionally, +// publishes a notification to Amazon Simple Notification Service (Amazon SNS). +// You will typically not use this action directly because Amazon WorkMail adds +// the rule automatically during its setup procedure. +// +// For information using a receipt rule to call Amazon WorkMail, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-workmail.html). +type WorkmailAction struct { + _ struct{} `type:"structure"` + + // The ARN of the Amazon WorkMail organization. An example of an Amazon WorkMail + // organization ARN is arn:aws:workmail:us-west-2:123456789012:organization/m-68755160c4cb4e29a2b2f8fb58f359d7. + // For information about Amazon WorkMail organizations, see the Amazon WorkMail + // Administrator Guide (http://docs.aws.amazon.com/workmail/latest/adminguide/organizations_overview.html). + OrganizationArn *string `type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the + // WorkMail action is called. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. + // For more information about Amazon SNS topics, see the Amazon SNS Developer + // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s WorkmailAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkmailAction) GoString() string { + return s.String() +} + +const ( + // @enum BounceType + BounceTypeDoesNotExist = "DoesNotExist" + // @enum BounceType + BounceTypeMessageTooLarge = "MessageTooLarge" + // @enum BounceType + BounceTypeExceededQuota = "ExceededQuota" + // @enum BounceType + BounceTypeContentRejected = "ContentRejected" + // @enum BounceType + BounceTypeUndefined = "Undefined" + // @enum BounceType + BounceTypeTemporaryFailure = "TemporaryFailure" +) + +const ( + // @enum DsnAction + DsnActionFailed = "failed" + // @enum DsnAction + DsnActionDelayed = "delayed" + // @enum DsnAction + DsnActionDelivered = "delivered" + // @enum DsnAction + DsnActionRelayed = "relayed" + // @enum DsnAction + DsnActionExpanded = "expanded" +) + +const ( + // @enum IdentityType + IdentityTypeEmailAddress = "EmailAddress" + // @enum IdentityType + IdentityTypeDomain = "Domain" +) + +const ( + // @enum InvocationType + InvocationTypeEvent = "Event" + // @enum InvocationType + InvocationTypeRequestResponse = "RequestResponse" +) + +const ( + // @enum NotificationType + NotificationTypeBounce = "Bounce" + // @enum NotificationType + NotificationTypeComplaint = "Complaint" + // @enum NotificationType + NotificationTypeDelivery = "Delivery" +) + +const ( + // @enum ReceiptFilterPolicy + ReceiptFilterPolicyBlock = "Block" + // @enum ReceiptFilterPolicy + ReceiptFilterPolicyAllow = "Allow" +) + +const ( + // @enum StopScope + StopScopeRuleSet = "RuleSet" +) + +const ( + // @enum TlsPolicy + TlsPolicyRequire = "Require" + // @enum TlsPolicy + TlsPolicyOptional = "Optional" +) + +const ( + // @enum VerificationStatus + VerificationStatusPending = "Pending" + // @enum VerificationStatus + VerificationStatusSuccess = "Success" + // @enum VerificationStatus + VerificationStatusFailed = "Failed" + // @enum VerificationStatus + VerificationStatusTemporaryFailure = "TemporaryFailure" + // @enum VerificationStatus + VerificationStatusNotStarted = "NotStarted" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ses/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ses/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ses/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ses/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,965 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ses_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ses" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleSES_CloneReceiptRuleSet() { + svc := ses.New(session.New()) + + params := &ses.CloneReceiptRuleSetInput{ + OriginalRuleSetName: aws.String("ReceiptRuleSetName"), // Required + RuleSetName: aws.String("ReceiptRuleSetName"), // Required + } + resp, err := svc.CloneReceiptRuleSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_CreateReceiptFilter() { + svc := ses.New(session.New()) + + params := &ses.CreateReceiptFilterInput{ + Filter: &ses.ReceiptFilter{ // Required + IpFilter: &ses.ReceiptIpFilter{ // Required + Cidr: aws.String("Cidr"), // Required + Policy: aws.String("ReceiptFilterPolicy"), // Required + }, + Name: aws.String("ReceiptFilterName"), // Required + }, + } + resp, err := svc.CreateReceiptFilter(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_CreateReceiptRule() { + svc := ses.New(session.New()) + + params := &ses.CreateReceiptRuleInput{ + Rule: &ses.ReceiptRule{ // Required + Name: aws.String("ReceiptRuleName"), // Required + Actions: []*ses.ReceiptAction{ + { // Required + AddHeaderAction: &ses.AddHeaderAction{ + HeaderName: aws.String("HeaderName"), // Required + HeaderValue: aws.String("HeaderValue"), // Required + }, + BounceAction: &ses.BounceAction{ + Message: aws.String("BounceMessage"), // Required + Sender: aws.String("Address"), // Required + SmtpReplyCode: aws.String("BounceSmtpReplyCode"), // Required + StatusCode: aws.String("BounceStatusCode"), + TopicArn: aws.String("AmazonResourceName"), + }, + LambdaAction: &ses.LambdaAction{ + FunctionArn: aws.String("AmazonResourceName"), // Required + InvocationType: aws.String("InvocationType"), + TopicArn: aws.String("AmazonResourceName"), + }, + S3Action: &ses.S3Action{ + BucketName: aws.String("S3BucketName"), // Required + KmsKeyArn: aws.String("AmazonResourceName"), + ObjectKeyPrefix: aws.String("S3KeyPrefix"), + TopicArn: aws.String("AmazonResourceName"), + }, + SNSAction: &ses.SNSAction{ + TopicArn: aws.String("AmazonResourceName"), // Required + }, + StopAction: &ses.StopAction{ + Scope: aws.String("StopScope"), // Required + TopicArn: aws.String("AmazonResourceName"), + }, + WorkmailAction: &ses.WorkmailAction{ + OrganizationArn: aws.String("AmazonResourceName"), // Required + TopicArn: aws.String("AmazonResourceName"), + }, + }, + // More values... + }, + Enabled: aws.Bool(true), + Recipients: []*string{ + aws.String("Recipient"), // Required + // More values... + }, + ScanEnabled: aws.Bool(true), + TlsPolicy: aws.String("TlsPolicy"), + }, + RuleSetName: aws.String("ReceiptRuleSetName"), // Required + After: aws.String("ReceiptRuleName"), + } + resp, err := svc.CreateReceiptRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_CreateReceiptRuleSet() { + svc := ses.New(session.New()) + + params := &ses.CreateReceiptRuleSetInput{ + RuleSetName: aws.String("ReceiptRuleSetName"), // Required + } + resp, err := svc.CreateReceiptRuleSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_DeleteIdentity() { + svc := ses.New(session.New()) + + params := &ses.DeleteIdentityInput{ + Identity: aws.String("Identity"), // Required + } + resp, err := svc.DeleteIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_DeleteIdentityPolicy() { + svc := ses.New(session.New()) + + params := &ses.DeleteIdentityPolicyInput{ + Identity: aws.String("Identity"), // Required + PolicyName: aws.String("PolicyName"), // Required + } + resp, err := svc.DeleteIdentityPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_DeleteReceiptFilter() { + svc := ses.New(session.New()) + + params := &ses.DeleteReceiptFilterInput{ + FilterName: aws.String("ReceiptFilterName"), // Required + } + resp, err := svc.DeleteReceiptFilter(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_DeleteReceiptRule() { + svc := ses.New(session.New()) + + params := &ses.DeleteReceiptRuleInput{ + RuleName: aws.String("ReceiptRuleName"), // Required + RuleSetName: aws.String("ReceiptRuleSetName"), // Required + } + resp, err := svc.DeleteReceiptRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_DeleteReceiptRuleSet() { + svc := ses.New(session.New()) + + params := &ses.DeleteReceiptRuleSetInput{ + RuleSetName: aws.String("ReceiptRuleSetName"), // Required + } + resp, err := svc.DeleteReceiptRuleSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_DeleteVerifiedEmailAddress() { + svc := ses.New(session.New()) + + params := &ses.DeleteVerifiedEmailAddressInput{ + EmailAddress: aws.String("Address"), // Required + } + resp, err := svc.DeleteVerifiedEmailAddress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_DescribeActiveReceiptRuleSet() { + svc := ses.New(session.New()) + + var params *ses.DescribeActiveReceiptRuleSetInput + resp, err := svc.DescribeActiveReceiptRuleSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_DescribeReceiptRule() { + svc := ses.New(session.New()) + + params := &ses.DescribeReceiptRuleInput{ + RuleName: aws.String("ReceiptRuleName"), // Required + RuleSetName: aws.String("ReceiptRuleSetName"), // Required + } + resp, err := svc.DescribeReceiptRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_DescribeReceiptRuleSet() { + svc := ses.New(session.New()) + + params := &ses.DescribeReceiptRuleSetInput{ + RuleSetName: aws.String("ReceiptRuleSetName"), // Required + } + resp, err := svc.DescribeReceiptRuleSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_GetIdentityDkimAttributes() { + svc := ses.New(session.New()) + + params := &ses.GetIdentityDkimAttributesInput{ + Identities: []*string{ // Required + aws.String("Identity"), // Required + // More values... + }, + } + resp, err := svc.GetIdentityDkimAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_GetIdentityNotificationAttributes() { + svc := ses.New(session.New()) + + params := &ses.GetIdentityNotificationAttributesInput{ + Identities: []*string{ // Required + aws.String("Identity"), // Required + // More values... + }, + } + resp, err := svc.GetIdentityNotificationAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_GetIdentityPolicies() { + svc := ses.New(session.New()) + + params := &ses.GetIdentityPoliciesInput{ + Identity: aws.String("Identity"), // Required + PolicyNames: []*string{ // Required + aws.String("PolicyName"), // Required + // More values... + }, + } + resp, err := svc.GetIdentityPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_GetIdentityVerificationAttributes() { + svc := ses.New(session.New()) + + params := &ses.GetIdentityVerificationAttributesInput{ + Identities: []*string{ // Required + aws.String("Identity"), // Required + // More values... + }, + } + resp, err := svc.GetIdentityVerificationAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_GetSendQuota() { + svc := ses.New(session.New()) + + var params *ses.GetSendQuotaInput + resp, err := svc.GetSendQuota(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_GetSendStatistics() { + svc := ses.New(session.New()) + + var params *ses.GetSendStatisticsInput + resp, err := svc.GetSendStatistics(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_ListIdentities() { + svc := ses.New(session.New()) + + params := &ses.ListIdentitiesInput{ + IdentityType: aws.String("IdentityType"), + MaxItems: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListIdentities(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_ListIdentityPolicies() { + svc := ses.New(session.New()) + + params := &ses.ListIdentityPoliciesInput{ + Identity: aws.String("Identity"), // Required + } + resp, err := svc.ListIdentityPolicies(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_ListReceiptFilters() { + svc := ses.New(session.New()) + + var params *ses.ListReceiptFiltersInput + resp, err := svc.ListReceiptFilters(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_ListReceiptRuleSets() { + svc := ses.New(session.New()) + + params := &ses.ListReceiptRuleSetsInput{ + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListReceiptRuleSets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_ListVerifiedEmailAddresses() { + svc := ses.New(session.New()) + + var params *ses.ListVerifiedEmailAddressesInput + resp, err := svc.ListVerifiedEmailAddresses(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_PutIdentityPolicy() { + svc := ses.New(session.New()) + + params := &ses.PutIdentityPolicyInput{ + Identity: aws.String("Identity"), // Required + Policy: aws.String("Policy"), // Required + PolicyName: aws.String("PolicyName"), // Required + } + resp, err := svc.PutIdentityPolicy(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_ReorderReceiptRuleSet() { + svc := ses.New(session.New()) + + params := &ses.ReorderReceiptRuleSetInput{ + RuleNames: []*string{ // Required + aws.String("ReceiptRuleName"), // Required + // More values... + }, + RuleSetName: aws.String("ReceiptRuleSetName"), // Required + } + resp, err := svc.ReorderReceiptRuleSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_SendBounce() { + svc := ses.New(session.New()) + + params := &ses.SendBounceInput{ + BounceSender: aws.String("Address"), // Required + BouncedRecipientInfoList: []*ses.BouncedRecipientInfo{ // Required + { // Required + Recipient: aws.String("Address"), // Required + BounceType: aws.String("BounceType"), + RecipientArn: aws.String("AmazonResourceName"), + RecipientDsnFields: &ses.RecipientDsnFields{ + Action: aws.String("DsnAction"), // Required + Status: aws.String("DsnStatus"), // Required + DiagnosticCode: aws.String("DiagnosticCode"), + ExtensionFields: []*ses.ExtensionField{ + { // Required + Name: aws.String("ExtensionFieldName"), // Required + Value: aws.String("ExtensionFieldValue"), // Required + }, + // More values... + }, + FinalRecipient: aws.String("Address"), + LastAttemptDate: aws.Time(time.Now()), + RemoteMta: aws.String("RemoteMta"), + }, + }, + // More values... + }, + OriginalMessageId: aws.String("MessageId"), // Required + BounceSenderArn: aws.String("AmazonResourceName"), + Explanation: aws.String("Explanation"), + MessageDsn: &ses.MessageDsn{ + ReportingMta: aws.String("ReportingMta"), // Required + ArrivalDate: aws.Time(time.Now()), + ExtensionFields: []*ses.ExtensionField{ + { // Required + Name: aws.String("ExtensionFieldName"), // Required + Value: aws.String("ExtensionFieldValue"), // Required + }, + // More values... + }, + }, + } + resp, err := svc.SendBounce(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_SendEmail() { + svc := ses.New(session.New()) + + params := &ses.SendEmailInput{ + Destination: &ses.Destination{ // Required + BccAddresses: []*string{ + aws.String("Address"), // Required + // More values... + }, + CcAddresses: []*string{ + aws.String("Address"), // Required + // More values... + }, + ToAddresses: []*string{ + aws.String("Address"), // Required + // More values... + }, + }, + Message: &ses.Message{ // Required + Body: &ses.Body{ // Required + Html: &ses.Content{ + Data: aws.String("MessageData"), // Required + Charset: aws.String("Charset"), + }, + Text: &ses.Content{ + Data: aws.String("MessageData"), // Required + Charset: aws.String("Charset"), + }, + }, + Subject: &ses.Content{ // Required + Data: aws.String("MessageData"), // Required + Charset: aws.String("Charset"), + }, + }, + Source: aws.String("Address"), // Required + ReplyToAddresses: []*string{ + aws.String("Address"), // Required + // More values... + }, + ReturnPath: aws.String("Address"), + ReturnPathArn: aws.String("AmazonResourceName"), + SourceArn: aws.String("AmazonResourceName"), + } + resp, err := svc.SendEmail(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_SendRawEmail() { + svc := ses.New(session.New()) + + params := &ses.SendRawEmailInput{ + RawMessage: &ses.RawMessage{ // Required + Data: []byte("PAYLOAD"), // Required + }, + Destinations: []*string{ + aws.String("Address"), // Required + // More values... + }, + FromArn: aws.String("AmazonResourceName"), + ReturnPathArn: aws.String("AmazonResourceName"), + Source: aws.String("Address"), + SourceArn: aws.String("AmazonResourceName"), + } + resp, err := svc.SendRawEmail(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_SetActiveReceiptRuleSet() { + svc := ses.New(session.New()) + + params := &ses.SetActiveReceiptRuleSetInput{ + RuleSetName: aws.String("ReceiptRuleSetName"), + } + resp, err := svc.SetActiveReceiptRuleSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_SetIdentityDkimEnabled() { + svc := ses.New(session.New()) + + params := &ses.SetIdentityDkimEnabledInput{ + DkimEnabled: aws.Bool(true), // Required + Identity: aws.String("Identity"), // Required + } + resp, err := svc.SetIdentityDkimEnabled(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_SetIdentityFeedbackForwardingEnabled() { + svc := ses.New(session.New()) + + params := &ses.SetIdentityFeedbackForwardingEnabledInput{ + ForwardingEnabled: aws.Bool(true), // Required + Identity: aws.String("Identity"), // Required + } + resp, err := svc.SetIdentityFeedbackForwardingEnabled(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_SetIdentityNotificationTopic() { + svc := ses.New(session.New()) + + params := &ses.SetIdentityNotificationTopicInput{ + Identity: aws.String("Identity"), // Required + NotificationType: aws.String("NotificationType"), // Required + SnsTopic: aws.String("NotificationTopic"), + } + resp, err := svc.SetIdentityNotificationTopic(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_SetReceiptRulePosition() { + svc := ses.New(session.New()) + + params := &ses.SetReceiptRulePositionInput{ + RuleName: aws.String("ReceiptRuleName"), // Required + RuleSetName: aws.String("ReceiptRuleSetName"), // Required + After: aws.String("ReceiptRuleName"), + } + resp, err := svc.SetReceiptRulePosition(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_UpdateReceiptRule() { + svc := ses.New(session.New()) + + params := &ses.UpdateReceiptRuleInput{ + Rule: &ses.ReceiptRule{ // Required + Name: aws.String("ReceiptRuleName"), // Required + Actions: []*ses.ReceiptAction{ + { // Required + AddHeaderAction: &ses.AddHeaderAction{ + HeaderName: aws.String("HeaderName"), // Required + HeaderValue: aws.String("HeaderValue"), // Required + }, + BounceAction: &ses.BounceAction{ + Message: aws.String("BounceMessage"), // Required + Sender: aws.String("Address"), // Required + SmtpReplyCode: aws.String("BounceSmtpReplyCode"), // Required + StatusCode: aws.String("BounceStatusCode"), + TopicArn: aws.String("AmazonResourceName"), + }, + LambdaAction: &ses.LambdaAction{ + FunctionArn: aws.String("AmazonResourceName"), // Required + InvocationType: aws.String("InvocationType"), + TopicArn: aws.String("AmazonResourceName"), + }, + S3Action: &ses.S3Action{ + BucketName: aws.String("S3BucketName"), // Required + KmsKeyArn: aws.String("AmazonResourceName"), + ObjectKeyPrefix: aws.String("S3KeyPrefix"), + TopicArn: aws.String("AmazonResourceName"), + }, + SNSAction: &ses.SNSAction{ + TopicArn: aws.String("AmazonResourceName"), // Required + }, + StopAction: &ses.StopAction{ + Scope: aws.String("StopScope"), // Required + TopicArn: aws.String("AmazonResourceName"), + }, + WorkmailAction: &ses.WorkmailAction{ + OrganizationArn: aws.String("AmazonResourceName"), // Required + TopicArn: aws.String("AmazonResourceName"), + }, + }, + // More values... + }, + Enabled: aws.Bool(true), + Recipients: []*string{ + aws.String("Recipient"), // Required + // More values... + }, + ScanEnabled: aws.Bool(true), + TlsPolicy: aws.String("TlsPolicy"), + }, + RuleSetName: aws.String("ReceiptRuleSetName"), // Required + } + resp, err := svc.UpdateReceiptRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_VerifyDomainDkim() { + svc := ses.New(session.New()) + + params := &ses.VerifyDomainDkimInput{ + Domain: aws.String("Domain"), // Required + } + resp, err := svc.VerifyDomainDkim(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_VerifyDomainIdentity() { + svc := ses.New(session.New()) + + params := &ses.VerifyDomainIdentityInput{ + Domain: aws.String("Domain"), // Required + } + resp, err := svc.VerifyDomainIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_VerifyEmailAddress() { + svc := ses.New(session.New()) + + params := &ses.VerifyEmailAddressInput{ + EmailAddress: aws.String("Address"), // Required + } + resp, err := svc.VerifyEmailAddress(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSES_VerifyEmailIdentity() { + svc := ses.New(session.New()) + + params := &ses.VerifyEmailIdentityInput{ + EmailAddress: aws.String("Address"), // Required + } + resp, err := svc.VerifyEmailIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ses/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ses/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ses/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ses/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,93 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ses + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// This is the API Reference for Amazon Simple Email Service (Amazon SES). This +// documentation is intended to be used in conjunction with the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/Welcome.html). +// +// For a list of Amazon SES endpoints to use in service requests, see Regions +// and Amazon SES (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/regions.html) +// in the Amazon SES Developer Guide. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type SES struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "email" + +// New creates a new instance of the SES client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a SES client from just a session. +// svc := ses.New(mySession) +// +// // Create a SES client with additional configuration +// svc := ses.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SES { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *SES { + svc := &SES{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: "ses", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2010-12-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SES operation and runs any +// custom request initialization. +func (c *SES) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ses/sesiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ses/sesiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ses/sesiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ses/sesiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,172 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package sesiface provides an interface for the Amazon Simple Email Service. +package sesiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/ses" +) + +// SESAPI is the interface type for ses.SES. +type SESAPI interface { + CloneReceiptRuleSetRequest(*ses.CloneReceiptRuleSetInput) (*request.Request, *ses.CloneReceiptRuleSetOutput) + + CloneReceiptRuleSet(*ses.CloneReceiptRuleSetInput) (*ses.CloneReceiptRuleSetOutput, error) + + CreateReceiptFilterRequest(*ses.CreateReceiptFilterInput) (*request.Request, *ses.CreateReceiptFilterOutput) + + CreateReceiptFilter(*ses.CreateReceiptFilterInput) (*ses.CreateReceiptFilterOutput, error) + + CreateReceiptRuleRequest(*ses.CreateReceiptRuleInput) (*request.Request, *ses.CreateReceiptRuleOutput) + + CreateReceiptRule(*ses.CreateReceiptRuleInput) (*ses.CreateReceiptRuleOutput, error) + + CreateReceiptRuleSetRequest(*ses.CreateReceiptRuleSetInput) (*request.Request, *ses.CreateReceiptRuleSetOutput) + + CreateReceiptRuleSet(*ses.CreateReceiptRuleSetInput) (*ses.CreateReceiptRuleSetOutput, error) + + DeleteIdentityRequest(*ses.DeleteIdentityInput) (*request.Request, *ses.DeleteIdentityOutput) + + DeleteIdentity(*ses.DeleteIdentityInput) (*ses.DeleteIdentityOutput, error) + + DeleteIdentityPolicyRequest(*ses.DeleteIdentityPolicyInput) (*request.Request, *ses.DeleteIdentityPolicyOutput) + + DeleteIdentityPolicy(*ses.DeleteIdentityPolicyInput) (*ses.DeleteIdentityPolicyOutput, error) + + DeleteReceiptFilterRequest(*ses.DeleteReceiptFilterInput) (*request.Request, *ses.DeleteReceiptFilterOutput) + + DeleteReceiptFilter(*ses.DeleteReceiptFilterInput) (*ses.DeleteReceiptFilterOutput, error) + + DeleteReceiptRuleRequest(*ses.DeleteReceiptRuleInput) (*request.Request, *ses.DeleteReceiptRuleOutput) + + DeleteReceiptRule(*ses.DeleteReceiptRuleInput) (*ses.DeleteReceiptRuleOutput, error) + + DeleteReceiptRuleSetRequest(*ses.DeleteReceiptRuleSetInput) (*request.Request, *ses.DeleteReceiptRuleSetOutput) + + DeleteReceiptRuleSet(*ses.DeleteReceiptRuleSetInput) (*ses.DeleteReceiptRuleSetOutput, error) + + DeleteVerifiedEmailAddressRequest(*ses.DeleteVerifiedEmailAddressInput) (*request.Request, *ses.DeleteVerifiedEmailAddressOutput) + + DeleteVerifiedEmailAddress(*ses.DeleteVerifiedEmailAddressInput) (*ses.DeleteVerifiedEmailAddressOutput, error) + + DescribeActiveReceiptRuleSetRequest(*ses.DescribeActiveReceiptRuleSetInput) (*request.Request, *ses.DescribeActiveReceiptRuleSetOutput) + + DescribeActiveReceiptRuleSet(*ses.DescribeActiveReceiptRuleSetInput) (*ses.DescribeActiveReceiptRuleSetOutput, error) + + DescribeReceiptRuleRequest(*ses.DescribeReceiptRuleInput) (*request.Request, *ses.DescribeReceiptRuleOutput) + + DescribeReceiptRule(*ses.DescribeReceiptRuleInput) (*ses.DescribeReceiptRuleOutput, error) + + DescribeReceiptRuleSetRequest(*ses.DescribeReceiptRuleSetInput) (*request.Request, *ses.DescribeReceiptRuleSetOutput) + + DescribeReceiptRuleSet(*ses.DescribeReceiptRuleSetInput) (*ses.DescribeReceiptRuleSetOutput, error) + + GetIdentityDkimAttributesRequest(*ses.GetIdentityDkimAttributesInput) (*request.Request, *ses.GetIdentityDkimAttributesOutput) + + GetIdentityDkimAttributes(*ses.GetIdentityDkimAttributesInput) (*ses.GetIdentityDkimAttributesOutput, error) + + GetIdentityNotificationAttributesRequest(*ses.GetIdentityNotificationAttributesInput) (*request.Request, *ses.GetIdentityNotificationAttributesOutput) + + GetIdentityNotificationAttributes(*ses.GetIdentityNotificationAttributesInput) (*ses.GetIdentityNotificationAttributesOutput, error) + + GetIdentityPoliciesRequest(*ses.GetIdentityPoliciesInput) (*request.Request, *ses.GetIdentityPoliciesOutput) + + GetIdentityPolicies(*ses.GetIdentityPoliciesInput) (*ses.GetIdentityPoliciesOutput, error) + + GetIdentityVerificationAttributesRequest(*ses.GetIdentityVerificationAttributesInput) (*request.Request, *ses.GetIdentityVerificationAttributesOutput) + + GetIdentityVerificationAttributes(*ses.GetIdentityVerificationAttributesInput) (*ses.GetIdentityVerificationAttributesOutput, error) + + GetSendQuotaRequest(*ses.GetSendQuotaInput) (*request.Request, *ses.GetSendQuotaOutput) + + GetSendQuota(*ses.GetSendQuotaInput) (*ses.GetSendQuotaOutput, error) + + GetSendStatisticsRequest(*ses.GetSendStatisticsInput) (*request.Request, *ses.GetSendStatisticsOutput) + + GetSendStatistics(*ses.GetSendStatisticsInput) (*ses.GetSendStatisticsOutput, error) + + ListIdentitiesRequest(*ses.ListIdentitiesInput) (*request.Request, *ses.ListIdentitiesOutput) + + ListIdentities(*ses.ListIdentitiesInput) (*ses.ListIdentitiesOutput, error) + + ListIdentitiesPages(*ses.ListIdentitiesInput, func(*ses.ListIdentitiesOutput, bool) bool) error + + ListIdentityPoliciesRequest(*ses.ListIdentityPoliciesInput) (*request.Request, *ses.ListIdentityPoliciesOutput) + + ListIdentityPolicies(*ses.ListIdentityPoliciesInput) (*ses.ListIdentityPoliciesOutput, error) + + ListReceiptFiltersRequest(*ses.ListReceiptFiltersInput) (*request.Request, *ses.ListReceiptFiltersOutput) + + ListReceiptFilters(*ses.ListReceiptFiltersInput) (*ses.ListReceiptFiltersOutput, error) + + ListReceiptRuleSetsRequest(*ses.ListReceiptRuleSetsInput) (*request.Request, *ses.ListReceiptRuleSetsOutput) + + ListReceiptRuleSets(*ses.ListReceiptRuleSetsInput) (*ses.ListReceiptRuleSetsOutput, error) + + ListVerifiedEmailAddressesRequest(*ses.ListVerifiedEmailAddressesInput) (*request.Request, *ses.ListVerifiedEmailAddressesOutput) + + ListVerifiedEmailAddresses(*ses.ListVerifiedEmailAddressesInput) (*ses.ListVerifiedEmailAddressesOutput, error) + + PutIdentityPolicyRequest(*ses.PutIdentityPolicyInput) (*request.Request, *ses.PutIdentityPolicyOutput) + + PutIdentityPolicy(*ses.PutIdentityPolicyInput) (*ses.PutIdentityPolicyOutput, error) + + ReorderReceiptRuleSetRequest(*ses.ReorderReceiptRuleSetInput) (*request.Request, *ses.ReorderReceiptRuleSetOutput) + + ReorderReceiptRuleSet(*ses.ReorderReceiptRuleSetInput) (*ses.ReorderReceiptRuleSetOutput, error) + + SendBounceRequest(*ses.SendBounceInput) (*request.Request, *ses.SendBounceOutput) + + SendBounce(*ses.SendBounceInput) (*ses.SendBounceOutput, error) + + SendEmailRequest(*ses.SendEmailInput) (*request.Request, *ses.SendEmailOutput) + + SendEmail(*ses.SendEmailInput) (*ses.SendEmailOutput, error) + + SendRawEmailRequest(*ses.SendRawEmailInput) (*request.Request, *ses.SendRawEmailOutput) + + SendRawEmail(*ses.SendRawEmailInput) (*ses.SendRawEmailOutput, error) + + SetActiveReceiptRuleSetRequest(*ses.SetActiveReceiptRuleSetInput) (*request.Request, *ses.SetActiveReceiptRuleSetOutput) + + SetActiveReceiptRuleSet(*ses.SetActiveReceiptRuleSetInput) (*ses.SetActiveReceiptRuleSetOutput, error) + + SetIdentityDkimEnabledRequest(*ses.SetIdentityDkimEnabledInput) (*request.Request, *ses.SetIdentityDkimEnabledOutput) + + SetIdentityDkimEnabled(*ses.SetIdentityDkimEnabledInput) (*ses.SetIdentityDkimEnabledOutput, error) + + SetIdentityFeedbackForwardingEnabledRequest(*ses.SetIdentityFeedbackForwardingEnabledInput) (*request.Request, *ses.SetIdentityFeedbackForwardingEnabledOutput) + + SetIdentityFeedbackForwardingEnabled(*ses.SetIdentityFeedbackForwardingEnabledInput) (*ses.SetIdentityFeedbackForwardingEnabledOutput, error) + + SetIdentityNotificationTopicRequest(*ses.SetIdentityNotificationTopicInput) (*request.Request, *ses.SetIdentityNotificationTopicOutput) + + SetIdentityNotificationTopic(*ses.SetIdentityNotificationTopicInput) (*ses.SetIdentityNotificationTopicOutput, error) + + SetReceiptRulePositionRequest(*ses.SetReceiptRulePositionInput) (*request.Request, *ses.SetReceiptRulePositionOutput) + + SetReceiptRulePosition(*ses.SetReceiptRulePositionInput) (*ses.SetReceiptRulePositionOutput, error) + + UpdateReceiptRuleRequest(*ses.UpdateReceiptRuleInput) (*request.Request, *ses.UpdateReceiptRuleOutput) + + UpdateReceiptRule(*ses.UpdateReceiptRuleInput) (*ses.UpdateReceiptRuleOutput, error) + + VerifyDomainDkimRequest(*ses.VerifyDomainDkimInput) (*request.Request, *ses.VerifyDomainDkimOutput) + + VerifyDomainDkim(*ses.VerifyDomainDkimInput) (*ses.VerifyDomainDkimOutput, error) + + VerifyDomainIdentityRequest(*ses.VerifyDomainIdentityInput) (*request.Request, *ses.VerifyDomainIdentityOutput) + + VerifyDomainIdentity(*ses.VerifyDomainIdentityInput) (*ses.VerifyDomainIdentityOutput, error) + + VerifyEmailAddressRequest(*ses.VerifyEmailAddressInput) (*request.Request, *ses.VerifyEmailAddressOutput) + + VerifyEmailAddress(*ses.VerifyEmailAddressInput) (*ses.VerifyEmailAddressOutput, error) + + VerifyEmailIdentityRequest(*ses.VerifyEmailIdentityInput) (*request.Request, *ses.VerifyEmailIdentityOutput) + + VerifyEmailIdentity(*ses.VerifyEmailIdentityInput) (*ses.VerifyEmailIdentityOutput, error) +} + +var _ SESAPI = (*ses.SES)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ses/waiters.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ses/waiters.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ses/waiters.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ses/waiters.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,30 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ses + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *SES) WaitUntilIdentityExists(input *GetIdentityVerificationAttributesInput) error { + waiterCfg := waiter.Config{ + Operation: "GetIdentityVerificationAttributes", + Delay: 3, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "VerificationAttributes.*.VerificationStatus", + Expected: "Success", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/simpledb/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/simpledb/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/simpledb/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/simpledb/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1029 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package simpledb provides a client for Amazon SimpleDB. +package simpledb + +import ( + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opBatchDeleteAttributes = "BatchDeleteAttributes" + +// BatchDeleteAttributesRequest generates a request for the BatchDeleteAttributes operation. +func (c *SimpleDB) BatchDeleteAttributesRequest(input *BatchDeleteAttributesInput) (req *request.Request, output *BatchDeleteAttributesOutput) { + op := &request.Operation{ + Name: opBatchDeleteAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchDeleteAttributesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &BatchDeleteAttributesOutput{} + req.Data = output + return +} + +// Performs multiple DeleteAttributes operations in a single call, which reduces +// round trips and latencies. This enables Amazon SimpleDB to optimize requests, +// which generally yields better throughput. +// +// If you specify BatchDeleteAttributes without attributes or values, all +// the attributes for the item are deleted. +// +// BatchDeleteAttributes is an idempotent operation; running it multiple times +// on the same item or attribute doesn't result in an error. +// +// The BatchDeleteAttributes operation succeeds or fails in its entirety. +// There are no partial deletes. You can execute multiple BatchDeleteAttributes +// operations and other operations in parallel. However, large numbers of concurrent +// BatchDeleteAttributes calls can result in Service Unavailable (503) responses. +// +// This operation is vulnerable to exceeding the maximum URL size when making +// a REST request using the HTTP GET method. +// +// This operation does not support conditions using Expected.X.Name, Expected.X.Value, +// or Expected.X.Exists. +// +// The following limitations are enforced for this operation: 1 MB request +// size 25 item limit per BatchDeleteAttributes operation +func (c *SimpleDB) BatchDeleteAttributes(input *BatchDeleteAttributesInput) (*BatchDeleteAttributesOutput, error) { + req, out := c.BatchDeleteAttributesRequest(input) + err := req.Send() + return out, err +} + +const opBatchPutAttributes = "BatchPutAttributes" + +// BatchPutAttributesRequest generates a request for the BatchPutAttributes operation. +func (c *SimpleDB) BatchPutAttributesRequest(input *BatchPutAttributesInput) (req *request.Request, output *BatchPutAttributesOutput) { + op := &request.Operation{ + Name: opBatchPutAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchPutAttributesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &BatchPutAttributesOutput{} + req.Data = output + return +} + +// The BatchPutAttributes operation creates or replaces attributes within one +// or more items. By using this operation, the client can perform multiple PutAttribute +// operation with a single call. This helps yield savings in round trips and +// latencies, enabling Amazon SimpleDB to optimize requests and generally produce +// better throughput. +// +// The client may specify the item name with the Item.X.ItemName parameter. +// The client may specify new attributes using a combination of the Item.X.Attribute.Y.Name +// and Item.X.Attribute.Y.Value parameters. The client may specify the first +// attribute for the first item using the parameters Item.0.Attribute.0.Name +// and Item.0.Attribute.0.Value, and for the second attribute for the first +// item by the parameters Item.0.Attribute.1.Name and Item.0.Attribute.1.Value, +// and so on. +// +// Attributes are uniquely identified within an item by their name/value combination. +// For example, a single item can have the attributes { "first_name", "first_value" +// } and { "first_name", "second_value" }. However, it cannot have two attribute +// instances where both the Item.X.Attribute.Y.Name and Item.X.Attribute.Y.Value +// are the same. +// +// Optionally, the requester can supply the Replace parameter for each individual +// value. Setting this value to true will cause the new attribute values to +// replace the existing attribute values. For example, if an item I has the +// attributes { 'a', '1' }, { 'b', '2'} and { 'b', '3' } and the requester does +// a BatchPutAttributes of {'I', 'b', '4' } with the Replace parameter set to +// true, the final attributes of the item will be { 'a', '1' } and { 'b', '4' +// }, replacing the previous values of the 'b' attribute with the new value. +// +// You cannot specify an empty string as an item or as an attribute name. +// The BatchPutAttributes operation succeeds or fails in its entirety. There +// are no partial puts. This operation is vulnerable to exceeding the maximum +// URL size when making a REST request using the HTTP GET method. This operation +// does not support conditions using Expected.X.Name, Expected.X.Value, or Expected.X.Exists. +// You can execute multiple BatchPutAttributes operations and other operations +// in parallel. However, large numbers of concurrent BatchPutAttributes calls +// can result in Service Unavailable (503) responses. +// +// The following limitations are enforced for this operation: 256 attribute +// name-value pairs per item 1 MB request size 1 billion attributes per domain +// 10 GB of total user data storage per domain 25 item limit per BatchPutAttributes +// operation +func (c *SimpleDB) BatchPutAttributes(input *BatchPutAttributesInput) (*BatchPutAttributesOutput, error) { + req, out := c.BatchPutAttributesRequest(input) + err := req.Send() + return out, err +} + +const opCreateDomain = "CreateDomain" + +// CreateDomainRequest generates a request for the CreateDomain operation. +func (c *SimpleDB) CreateDomainRequest(input *CreateDomainInput) (req *request.Request, output *CreateDomainOutput) { + op := &request.Operation{ + Name: opCreateDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDomainInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateDomainOutput{} + req.Data = output + return +} + +// The CreateDomain operation creates a new domain. The domain name should be +// unique among the domains associated with the Access Key ID provided in the +// request. The CreateDomain operation may take 10 or more seconds to complete. +// +// CreateDomain is an idempotent operation; running it multiple times using +// the same domain name will not result in an error response. The client can +// create up to 100 domains per account. +// +// If the client requires additional domains, go to http://aws.amazon.com/contact-us/simpledb-limit-request/ +// (http://aws.amazon.com/contact-us/simpledb-limit-request/). +func (c *SimpleDB) CreateDomain(input *CreateDomainInput) (*CreateDomainOutput, error) { + req, out := c.CreateDomainRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAttributes = "DeleteAttributes" + +// DeleteAttributesRequest generates a request for the DeleteAttributes operation. +func (c *SimpleDB) DeleteAttributesRequest(input *DeleteAttributesInput) (req *request.Request, output *DeleteAttributesOutput) { + op := &request.Operation{ + Name: opDeleteAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAttributesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteAttributesOutput{} + req.Data = output + return +} + +// Deletes one or more attributes associated with an item. If all attributes +// of the item are deleted, the item is deleted. +// +// If DeleteAttributes is called without being passed any attributes or values +// specified, all the attributes for the item are deleted. DeleteAttributes +// is an idempotent operation; running it multiple times on the same item or +// attribute does not result in an error response. +// +// Because Amazon SimpleDB makes multiple copies of item data and uses an +// eventual consistency update model, performing a GetAttributes or Select operation +// (read) immediately after a DeleteAttributes or PutAttributes operation (write) +// might not return updated item data. +func (c *SimpleDB) DeleteAttributes(input *DeleteAttributesInput) (*DeleteAttributesOutput, error) { + req, out := c.DeleteAttributesRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDomain = "DeleteDomain" + +// DeleteDomainRequest generates a request for the DeleteDomain operation. +func (c *SimpleDB) DeleteDomainRequest(input *DeleteDomainInput) (req *request.Request, output *DeleteDomainOutput) { + op := &request.Operation{ + Name: opDeleteDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDomainInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteDomainOutput{} + req.Data = output + return +} + +// The DeleteDomain operation deletes a domain. Any items (and their attributes) +// in the domain are deleted as well. The DeleteDomain operation might take +// 10 or more seconds to complete. +// +// Running DeleteDomain on a domain that does not exist or running the function +// multiple times using the same domain name will not result in an error response. +func (c *SimpleDB) DeleteDomain(input *DeleteDomainInput) (*DeleteDomainOutput, error) { + req, out := c.DeleteDomainRequest(input) + err := req.Send() + return out, err +} + +const opDomainMetadata = "DomainMetadata" + +// DomainMetadataRequest generates a request for the DomainMetadata operation. +func (c *SimpleDB) DomainMetadataRequest(input *DomainMetadataInput) (req *request.Request, output *DomainMetadataOutput) { + op := &request.Operation{ + Name: opDomainMetadata, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DomainMetadataInput{} + } + + req = c.newRequest(op, input, output) + output = &DomainMetadataOutput{} + req.Data = output + return +} + +// Returns information about the domain, including when the domain was created, +// the number of items and attributes in the domain, and the size of the attribute +// names and values. +func (c *SimpleDB) DomainMetadata(input *DomainMetadataInput) (*DomainMetadataOutput, error) { + req, out := c.DomainMetadataRequest(input) + err := req.Send() + return out, err +} + +const opGetAttributes = "GetAttributes" + +// GetAttributesRequest generates a request for the GetAttributes operation. +func (c *SimpleDB) GetAttributesRequest(input *GetAttributesInput) (req *request.Request, output *GetAttributesOutput) { + op := &request.Operation{ + Name: opGetAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetAttributesOutput{} + req.Data = output + return +} + +// Returns all of the attributes associated with the specified item. Optionally, +// the attributes returned can be limited to one or more attributes by specifying +// an attribute name parameter. +// +// If the item does not exist on the replica that was accessed for this operation, +// an empty set is returned. The system does not return an error as it cannot +// guarantee the item does not exist on other replicas. +// +// If GetAttributes is called without being passed any attribute names, all +// the attributes for the item are returned. +func (c *SimpleDB) GetAttributes(input *GetAttributesInput) (*GetAttributesOutput, error) { + req, out := c.GetAttributesRequest(input) + err := req.Send() + return out, err +} + +const opListDomains = "ListDomains" + +// ListDomainsRequest generates a request for the ListDomains operation. +func (c *SimpleDB) ListDomainsRequest(input *ListDomainsInput) (req *request.Request, output *ListDomainsOutput) { + op := &request.Operation{ + Name: opListDomains, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxNumberOfDomains", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDomainsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDomainsOutput{} + req.Data = output + return +} + +// The ListDomains operation lists all domains associated with the Access Key +// ID. It returns domain names up to the limit set by MaxNumberOfDomains (#MaxNumberOfDomains). +// A NextToken (#NextToken) is returned if there are more than MaxNumberOfDomains +// domains. Calling ListDomains successive times with the NextToken provided +// by the operation returns up to MaxNumberOfDomains more domain names with +// each successive operation call. +func (c *SimpleDB) ListDomains(input *ListDomainsInput) (*ListDomainsOutput, error) { + req, out := c.ListDomainsRequest(input) + err := req.Send() + return out, err +} + +func (c *SimpleDB) ListDomainsPages(input *ListDomainsInput, fn func(p *ListDomainsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListDomainsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListDomainsOutput), lastPage) + }) +} + +const opPutAttributes = "PutAttributes" + +// PutAttributesRequest generates a request for the PutAttributes operation. +func (c *SimpleDB) PutAttributesRequest(input *PutAttributesInput) (req *request.Request, output *PutAttributesOutput) { + op := &request.Operation{ + Name: opPutAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutAttributesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutAttributesOutput{} + req.Data = output + return +} + +// The PutAttributes operation creates or replaces attributes in an item. The +// client may specify new attributes using a combination of the Attribute.X.Name +// and Attribute.X.Value parameters. The client specifies the first attribute +// by the parameters Attribute.0.Name and Attribute.0.Value, the second attribute +// by the parameters Attribute.1.Name and Attribute.1.Value, and so on. +// +// Attributes are uniquely identified in an item by their name/value combination. +// For example, a single item can have the attributes { "first_name", "first_value" +// } and { "first_name", second_value" }. However, it cannot have two attribute +// instances where both the Attribute.X.Name and Attribute.X.Value are the same. +// +// Optionally, the requestor can supply the Replace parameter for each individual +// attribute. Setting this value to true causes the new attribute value to replace +// the existing attribute value(s). For example, if an item has the attributes +// { 'a', '1' }, { 'b', '2'} and { 'b', '3' } and the requestor calls PutAttributes +// using the attributes { 'b', '4' } with the Replace parameter set to true, +// the final attributes of the item are changed to { 'a', '1' } and { 'b', '4' +// }, which replaces the previous values of the 'b' attribute with the new value. +// +// Using PutAttributes to replace attribute values that do not exist will +// not result in an error response. You cannot specify an empty string as +// an attribute name. +// +// Because Amazon SimpleDB makes multiple copies of client data and uses an +// eventual consistency update model, an immediate GetAttributes or Select operation +// (read) immediately after a PutAttributes or DeleteAttributes operation (write) +// might not return the updated data. +// +// The following limitations are enforced for this operation: 256 total attribute +// name-value pairs per item One billion attributes per domain 10 GB of total +// user data storage per domain +func (c *SimpleDB) PutAttributes(input *PutAttributesInput) (*PutAttributesOutput, error) { + req, out := c.PutAttributesRequest(input) + err := req.Send() + return out, err +} + +const opSelect = "Select" + +// SelectRequest generates a request for the Select operation. +func (c *SimpleDB) SelectRequest(input *SelectInput) (req *request.Request, output *SelectOutput) { + op := &request.Operation{ + Name: opSelect, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &SelectInput{} + } + + req = c.newRequest(op, input, output) + output = &SelectOutput{} + req.Data = output + return +} + +// The Select operation returns a set of attributes for ItemNames that match +// the select expression. Select is similar to the standard SQL SELECT statement. +// +// The total size of the response cannot exceed 1 MB in total size. Amazon +// SimpleDB automatically adjusts the number of items returned per page to enforce +// this limit. For example, if the client asks to retrieve 2500 items, but each +// individual item is 10 kB in size, the system returns 100 items and an appropriate +// NextToken so the client can access the next page of results. +// +// For information on how to construct select expressions, see Using Select +// to Create Amazon SimpleDB Queries in the Developer Guide. +func (c *SimpleDB) Select(input *SelectInput) (*SelectOutput, error) { + req, out := c.SelectRequest(input) + err := req.Send() + return out, err +} + +func (c *SimpleDB) SelectPages(input *SelectInput, fn func(p *SelectOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.SelectRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*SelectOutput), lastPage) + }) +} + +type Attribute struct { + _ struct{} `type:"structure"` + + AlternateNameEncoding *string `type:"string"` + + AlternateValueEncoding *string `type:"string"` + + // The name of the attribute. + Name *string `type:"string" required:"true"` + + // The value of the attribute. + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Attribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Attribute) GoString() string { + return s.String() +} + +type BatchDeleteAttributesInput struct { + _ struct{} `type:"structure"` + + // The name of the domain in which the attributes are being deleted. + DomainName *string `type:"string" required:"true"` + + // A list of items on which to perform the operation. + Items []*DeletableItem `locationNameList:"Item" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s BatchDeleteAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDeleteAttributesInput) GoString() string { + return s.String() +} + +type BatchDeleteAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s BatchDeleteAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDeleteAttributesOutput) GoString() string { + return s.String() +} + +type BatchPutAttributesInput struct { + _ struct{} `type:"structure"` + + // The name of the domain in which the attributes are being stored. + DomainName *string `type:"string" required:"true"` + + // A list of items on which to perform the operation. + Items []*ReplaceableItem `locationNameList:"Item" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s BatchPutAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchPutAttributesInput) GoString() string { + return s.String() +} + +type BatchPutAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s BatchPutAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchPutAttributesOutput) GoString() string { + return s.String() +} + +type CreateDomainInput struct { + _ struct{} `type:"structure"` + + // The name of the domain to create. The name can range between 3 and 255 characters + // and can contain the following characters: a-z, A-Z, 0-9, '_', '-', and '.'. + DomainName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDomainInput) GoString() string { + return s.String() +} + +type CreateDomainOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDomainOutput) GoString() string { + return s.String() +} + +type DeletableAttribute struct { + _ struct{} `type:"structure"` + + // The name of the attribute. + Name *string `type:"string" required:"true"` + + // The value of the attribute. + Value *string `type:"string"` +} + +// String returns the string representation +func (s DeletableAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletableAttribute) GoString() string { + return s.String() +} + +type DeletableItem struct { + _ struct{} `type:"structure"` + + Attributes []*DeletableAttribute `locationNameList:"Attribute" type:"list" flattened:"true"` + + Name *string `locationName:"ItemName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletableItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletableItem) GoString() string { + return s.String() +} + +type DeleteAttributesInput struct { + _ struct{} `type:"structure"` + + // A list of Attributes. Similar to columns on a spreadsheet, attributes represent + // categories of data that can be assigned to items. + Attributes []*DeletableAttribute `locationNameList:"Attribute" type:"list" flattened:"true"` + + // The name of the domain in which to perform the operation. + DomainName *string `type:"string" required:"true"` + + // The update condition which, if specified, determines whether the specified + // attributes will be deleted or not. The update condition must be satisfied + // in order for this request to be processed and the attributes to be deleted. + Expected *UpdateCondition `type:"structure"` + + // The name of the item. Similar to rows on a spreadsheet, items represent individual + // objects that contain one or more value-attribute pairs. + ItemName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAttributesInput) GoString() string { + return s.String() +} + +type DeleteAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAttributesOutput) GoString() string { + return s.String() +} + +type DeleteDomainInput struct { + _ struct{} `type:"structure"` + + // The name of the domain to delete. + DomainName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDomainInput) GoString() string { + return s.String() +} + +type DeleteDomainOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDomainOutput) GoString() string { + return s.String() +} + +type DomainMetadataInput struct { + _ struct{} `type:"structure"` + + // The name of the domain for which to display the metadata of. + DomainName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DomainMetadataInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainMetadataInput) GoString() string { + return s.String() +} + +type DomainMetadataOutput struct { + _ struct{} `type:"structure"` + + // The number of unique attribute names in the domain. + AttributeNameCount *int64 `type:"integer"` + + // The total size of all unique attribute names in the domain, in bytes. + AttributeNamesSizeBytes *int64 `type:"long"` + + // The number of all attribute name/value pairs in the domain. + AttributeValueCount *int64 `type:"integer"` + + // The total size of all attribute values in the domain, in bytes. + AttributeValuesSizeBytes *int64 `type:"long"` + + // The number of all items in the domain. + ItemCount *int64 `type:"integer"` + + // The total size of all item names in the domain, in bytes. + ItemNamesSizeBytes *int64 `type:"long"` + + // The data and time when metadata was calculated, in Epoch (UNIX) seconds. + Timestamp *int64 `type:"integer"` +} + +// String returns the string representation +func (s DomainMetadataOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainMetadataOutput) GoString() string { + return s.String() +} + +type GetAttributesInput struct { + _ struct{} `type:"structure"` + + // The names of the attributes. + AttributeNames []*string `locationNameList:"AttributeName" type:"list" flattened:"true"` + + // Determines whether or not strong consistency should be enforced when data + // is read from SimpleDB. If true, any data previously written to SimpleDB will + // be returned. Otherwise, results will be consistent eventually, and the client + // may not see data that was written immediately before your read. + ConsistentRead *bool `type:"boolean"` + + // The name of the domain in which to perform the operation. + DomainName *string `type:"string" required:"true"` + + // The name of the item. + ItemName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAttributesInput) GoString() string { + return s.String() +} + +type GetAttributesOutput struct { + _ struct{} `type:"structure"` + + // The list of attributes returned by the operation. + Attributes []*Attribute `locationNameList:"Attribute" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAttributesOutput) GoString() string { + return s.String() +} + +type Item struct { + _ struct{} `type:"structure"` + + AlternateNameEncoding *string `type:"string"` + + // A list of attributes. + Attributes []*Attribute `locationNameList:"Attribute" type:"list" flattened:"true" required:"true"` + + // The name of the item. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Item) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Item) GoString() string { + return s.String() +} + +type ListDomainsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of domain names you want returned. The range is 1 to 100. + // The default setting is 100. + MaxNumberOfDomains *int64 `type:"integer"` + + // A string informing Amazon SimpleDB where to start the next list of domain + // names. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListDomainsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDomainsInput) GoString() string { + return s.String() +} + +type ListDomainsOutput struct { + _ struct{} `type:"structure"` + + // A list of domain names that match the expression. + DomainNames []*string `locationNameList:"DomainName" type:"list" flattened:"true"` + + // An opaque token indicating that there are more domains than the specified + // MaxNumberOfDomains still available. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListDomainsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDomainsOutput) GoString() string { + return s.String() +} + +type PutAttributesInput struct { + _ struct{} `type:"structure"` + + // The list of attributes. + Attributes []*ReplaceableAttribute `locationNameList:"Attribute" type:"list" flattened:"true" required:"true"` + + // The name of the domain in which to perform the operation. + DomainName *string `type:"string" required:"true"` + + // The update condition which, if specified, determines whether the specified + // attributes will be updated or not. The update condition must be satisfied + // in order for this request to be processed and the attributes to be updated. + Expected *UpdateCondition `type:"structure"` + + // The name of the item. + ItemName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PutAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutAttributesInput) GoString() string { + return s.String() +} + +type PutAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutAttributesOutput) GoString() string { + return s.String() +} + +type ReplaceableAttribute struct { + _ struct{} `type:"structure"` + + // The name of the replaceable attribute. + Name *string `type:"string" required:"true"` + + // A flag specifying whether or not to replace the attribute/value pair or to + // add a new attribute/value pair. The default setting is false. + Replace *bool `type:"boolean"` + + // The value of the replaceable attribute. + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ReplaceableAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceableAttribute) GoString() string { + return s.String() +} + +type ReplaceableItem struct { + _ struct{} `type:"structure"` + + // The list of attributes for a replaceable item. + Attributes []*ReplaceableAttribute `locationNameList:"Attribute" type:"list" flattened:"true" required:"true"` + + // The name of the replaceable item. + Name *string `locationName:"ItemName" type:"string" required:"true"` +} + +// String returns the string representation +func (s ReplaceableItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceableItem) GoString() string { + return s.String() +} + +type SelectInput struct { + _ struct{} `type:"structure"` + + // Determines whether or not strong consistency should be enforced when data + // is read from SimpleDB. If true, any data previously written to SimpleDB will + // be returned. Otherwise, results will be consistent eventually, and the client + // may not see data that was written immediately before your read. + ConsistentRead *bool `type:"boolean"` + + // A string informing Amazon SimpleDB where to start the next list of ItemNames. + NextToken *string `type:"string"` + + // The expression used to query the domain. + SelectExpression *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SelectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelectInput) GoString() string { + return s.String() +} + +type SelectOutput struct { + _ struct{} `type:"structure"` + + // A list of items that match the select expression. + Items []*Item `locationNameList:"Item" type:"list" flattened:"true"` + + // An opaque token indicating that more items than MaxNumberOfItems were matched, + // the response size exceeded 1 megabyte, or the execution time exceeded 5 seconds. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s SelectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelectOutput) GoString() string { + return s.String() +} + +// Specifies the conditions under which data should be updated. If an update +// condition is specified for a request, the data will only be updated if the +// condition is satisfied. For example, if an attribute with a specific name +// and value exists, or if a specific attribute doesn't exist. +type UpdateCondition struct { + _ struct{} `type:"structure"` + + // A value specifying whether or not the specified attribute must exist with + // the specified value in order for the update condition to be satisfied. Specify + // true if the attribute must exist for the update condition to be satisfied. + // Specify false if the attribute should not exist in order for the update condition + // to be satisfied. + Exists *bool `type:"boolean"` + + // The name of the attribute involved in the condition. + Name *string `type:"string"` + + // The value of an attribute. This value can only be specified when the Exists + // parameter is equal to true. + Value *string `type:"string"` +} + +// String returns the string representation +func (s UpdateCondition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateCondition) GoString() string { + return s.String() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/simpledb/customizations.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/simpledb/customizations.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/simpledb/customizations.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/simpledb/customizations.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,11 @@ +package simpledb + +import "github.com/aws/aws-sdk-go/aws/client" + +func init() { + initClient = func(c *client.Client) { + // SimpleDB uses custom error unmarshaling logic + c.Handlers.UnmarshalError.Clear() + c.Handlers.UnmarshalError.PushBack(unmarshalError) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/simpledb/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/simpledb/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/simpledb/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/simpledb/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,269 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package simpledb_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/simpledb" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleSimpleDB_BatchDeleteAttributes() { + svc := simpledb.New(session.New()) + + params := &simpledb.BatchDeleteAttributesInput{ + DomainName: aws.String("String"), // Required + Items: []*simpledb.DeletableItem{ // Required + { // Required + Name: aws.String("String"), // Required + Attributes: []*simpledb.DeletableAttribute{ + { // Required + Name: aws.String("String"), // Required + Value: aws.String("String"), + }, + // More values... + }, + }, + // More values... + }, + } + resp, err := svc.BatchDeleteAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSimpleDB_BatchPutAttributes() { + svc := simpledb.New(session.New()) + + params := &simpledb.BatchPutAttributesInput{ + DomainName: aws.String("String"), // Required + Items: []*simpledb.ReplaceableItem{ // Required + { // Required + Attributes: []*simpledb.ReplaceableAttribute{ // Required + { // Required + Name: aws.String("String"), // Required + Value: aws.String("String"), // Required + Replace: aws.Bool(true), + }, + // More values... + }, + Name: aws.String("String"), // Required + }, + // More values... + }, + } + resp, err := svc.BatchPutAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSimpleDB_CreateDomain() { + svc := simpledb.New(session.New()) + + params := &simpledb.CreateDomainInput{ + DomainName: aws.String("String"), // Required + } + resp, err := svc.CreateDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSimpleDB_DeleteAttributes() { + svc := simpledb.New(session.New()) + + params := &simpledb.DeleteAttributesInput{ + DomainName: aws.String("String"), // Required + ItemName: aws.String("String"), // Required + Attributes: []*simpledb.DeletableAttribute{ + { // Required + Name: aws.String("String"), // Required + Value: aws.String("String"), + }, + // More values... + }, + Expected: &simpledb.UpdateCondition{ + Exists: aws.Bool(true), + Name: aws.String("String"), + Value: aws.String("String"), + }, + } + resp, err := svc.DeleteAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSimpleDB_DeleteDomain() { + svc := simpledb.New(session.New()) + + params := &simpledb.DeleteDomainInput{ + DomainName: aws.String("String"), // Required + } + resp, err := svc.DeleteDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSimpleDB_DomainMetadata() { + svc := simpledb.New(session.New()) + + params := &simpledb.DomainMetadataInput{ + DomainName: aws.String("String"), // Required + } + resp, err := svc.DomainMetadata(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSimpleDB_GetAttributes() { + svc := simpledb.New(session.New()) + + params := &simpledb.GetAttributesInput{ + DomainName: aws.String("String"), // Required + ItemName: aws.String("String"), // Required + AttributeNames: []*string{ + aws.String("String"), // Required + // More values... + }, + ConsistentRead: aws.Bool(true), + } + resp, err := svc.GetAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSimpleDB_ListDomains() { + svc := simpledb.New(session.New()) + + params := &simpledb.ListDomainsInput{ + MaxNumberOfDomains: aws.Int64(1), + NextToken: aws.String("String"), + } + resp, err := svc.ListDomains(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSimpleDB_PutAttributes() { + svc := simpledb.New(session.New()) + + params := &simpledb.PutAttributesInput{ + Attributes: []*simpledb.ReplaceableAttribute{ // Required + { // Required + Name: aws.String("String"), // Required + Value: aws.String("String"), // Required + Replace: aws.Bool(true), + }, + // More values... + }, + DomainName: aws.String("String"), // Required + ItemName: aws.String("String"), // Required + Expected: &simpledb.UpdateCondition{ + Exists: aws.Bool(true), + Name: aws.String("String"), + Value: aws.String("String"), + }, + } + resp, err := svc.PutAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSimpleDB_Select() { + svc := simpledb.New(session.New()) + + params := &simpledb.SelectInput{ + SelectExpression: aws.String("String"), // Required + ConsistentRead: aws.Bool(true), + NextToken: aws.String("String"), + } + resp, err := svc.Select(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/simpledb/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/simpledb/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/simpledb/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/simpledb/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,102 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package simpledb + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v2" +) + +// Amazon SimpleDB is a web service providing the core database functions of +// data indexing and querying in the cloud. By offloading the time and effort +// associated with building and operating a web-scale database, SimpleDB provides +// developers the freedom to focus on application development. A traditional, +// clustered relational database requires a sizable upfront capital outlay, +// is complex to design, and often requires extensive and repetitive database +// administration. Amazon SimpleDB is dramatically simpler, requiring no schema, +// automatically indexing your data and providing a simple API for storage and +// access. This approach eliminates the administrative burden of data modeling, +// index maintenance, and performance tuning. Developers gain access to this +// functionality within Amazon's proven computing environment, are able to scale +// instantly, and pay only for what they use. +// +// Visit http://aws.amazon.com/simpledb/ (http://aws.amazon.com/simpledb/) +// for more information. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type SimpleDB struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "sdb" + +// New creates a new instance of the SimpleDB client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a SimpleDB client from just a session. +// svc := simpledb.New(mySession) +// +// // Create a SimpleDB client with additional configuration +// svc := simpledb.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SimpleDB { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *SimpleDB { + svc := &SimpleDB{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2009-04-15", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v2.Sign) + svc.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SimpleDB operation and runs any +// custom request initialization. +func (c *SimpleDB) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/simpledb/simpledbiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/simpledb/simpledbiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/simpledb/simpledbiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/simpledb/simpledbiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,58 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package simpledbiface provides an interface for the Amazon SimpleDB. +package simpledbiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/simpledb" +) + +// SimpleDBAPI is the interface type for simpledb.SimpleDB. +type SimpleDBAPI interface { + BatchDeleteAttributesRequest(*simpledb.BatchDeleteAttributesInput) (*request.Request, *simpledb.BatchDeleteAttributesOutput) + + BatchDeleteAttributes(*simpledb.BatchDeleteAttributesInput) (*simpledb.BatchDeleteAttributesOutput, error) + + BatchPutAttributesRequest(*simpledb.BatchPutAttributesInput) (*request.Request, *simpledb.BatchPutAttributesOutput) + + BatchPutAttributes(*simpledb.BatchPutAttributesInput) (*simpledb.BatchPutAttributesOutput, error) + + CreateDomainRequest(*simpledb.CreateDomainInput) (*request.Request, *simpledb.CreateDomainOutput) + + CreateDomain(*simpledb.CreateDomainInput) (*simpledb.CreateDomainOutput, error) + + DeleteAttributesRequest(*simpledb.DeleteAttributesInput) (*request.Request, *simpledb.DeleteAttributesOutput) + + DeleteAttributes(*simpledb.DeleteAttributesInput) (*simpledb.DeleteAttributesOutput, error) + + DeleteDomainRequest(*simpledb.DeleteDomainInput) (*request.Request, *simpledb.DeleteDomainOutput) + + DeleteDomain(*simpledb.DeleteDomainInput) (*simpledb.DeleteDomainOutput, error) + + DomainMetadataRequest(*simpledb.DomainMetadataInput) (*request.Request, *simpledb.DomainMetadataOutput) + + DomainMetadata(*simpledb.DomainMetadataInput) (*simpledb.DomainMetadataOutput, error) + + GetAttributesRequest(*simpledb.GetAttributesInput) (*request.Request, *simpledb.GetAttributesOutput) + + GetAttributes(*simpledb.GetAttributesInput) (*simpledb.GetAttributesOutput, error) + + ListDomainsRequest(*simpledb.ListDomainsInput) (*request.Request, *simpledb.ListDomainsOutput) + + ListDomains(*simpledb.ListDomainsInput) (*simpledb.ListDomainsOutput, error) + + ListDomainsPages(*simpledb.ListDomainsInput, func(*simpledb.ListDomainsOutput, bool) bool) error + + PutAttributesRequest(*simpledb.PutAttributesInput) (*request.Request, *simpledb.PutAttributesOutput) + + PutAttributes(*simpledb.PutAttributesInput) (*simpledb.PutAttributesOutput, error) + + SelectRequest(*simpledb.SelectInput) (*request.Request, *simpledb.SelectOutput) + + Select(*simpledb.SelectInput) (*simpledb.SelectOutput, error) + + SelectPages(*simpledb.SelectInput, func(*simpledb.SelectOutput, bool) bool) error +} + +var _ SimpleDBAPI = (*simpledb.SimpleDB)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/simpledb/unmarshall_error.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/simpledb/unmarshall_error.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/simpledb/unmarshall_error.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/simpledb/unmarshall_error.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,51 @@ +package simpledb + +import ( + "encoding/xml" + "io" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +type xmlErrorDetail struct { + Code string `xml:"Code"` + Message string `xml:"Message"` +} + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"Response"` + Errors []xmlErrorDetail `xml:"Errors>Error"` + RequestID string `xml:"RequestID"` +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + if r.HTTPResponse.ContentLength == int64(0) { + // No body, use status code to generate an awserr.Error + r.Error = awserr.NewRequestFailure( + awserr.New(strings.Replace(r.HTTPResponse.Status, " ", "", -1), r.HTTPResponse.Status, nil), + r.HTTPResponse.StatusCode, + "", + ) + return + } + + resp := &xmlErrorResponse{} + err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) + if err != nil && err != io.EOF { + r.Error = awserr.New("SerializationError", "failed to decode SimpleDB XML error response", nil) + } else if len(resp.Errors) == 0 { + r.Error = awserr.New("MissingError", "missing error code in SimpleDB XML error response", nil) + } else { + // If there are multiple error codes, return only the first as the aws.Error interface only supports + // one error code. + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Errors[0].Code, resp.Errors[0].Message, nil), + r.HTTPResponse.StatusCode, + resp.RequestID, + ) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/simpledb/unmarshall_error_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/simpledb/unmarshall_error_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/simpledb/unmarshall_error_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/simpledb/unmarshall_error_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,139 @@ +package simpledb_test + +import ( + "bytes" + "io/ioutil" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/simpledb" +) + +var statusCodeErrorTests = []struct { + scode int + status string + code string + message string +}{ + {301, "Moved Permanently", "MovedPermanently", "Moved Permanently"}, + {403, "Forbidden", "Forbidden", "Forbidden"}, + {400, "Bad Request", "BadRequest", "Bad Request"}, + {404, "Not Found", "NotFound", "Not Found"}, + {500, "Internal Error", "InternalError", "Internal Error"}, +} + +func TestStatusCodeError(t *testing.T) { + for _, test := range statusCodeErrorTests { + s := simpledb.New(unit.Session) + s.Handlers.Send.Clear() + s.Handlers.Send.PushBack(func(r *request.Request) { + body := ioutil.NopCloser(bytes.NewReader([]byte{})) + r.HTTPResponse = &http.Response{ + ContentLength: 0, + StatusCode: test.scode, + Status: test.status, + Body: body, + } + }) + _, err := s.CreateDomain(&simpledb.CreateDomainInput{ + DomainName: aws.String("test-domain"), + }) + + assert.Error(t, err) + assert.Equal(t, test.code, err.(awserr.Error).Code()) + assert.Equal(t, test.message, err.(awserr.Error).Message()) + } +} + +var responseErrorTests = []struct { + scode int + status string + code string + message string + requestID string + errors []struct { + code string + message string + } +}{ + { + scode: 400, + status: "Bad Request", + code: "MissingError", + message: "missing error code in SimpleDB XML error response", + requestID: "101", + errors: []struct{ code, message string }{}, + }, + { + scode: 403, + status: "Forbidden", + code: "AuthFailure", + message: "AWS was not able to validate the provided access keys.", + requestID: "1111", + errors: []struct{ code, message string }{ + {"AuthFailure", "AWS was not able to validate the provided access keys."}, + }, + }, + { + scode: 500, + status: "Internal Error", + code: "MissingParameter", + message: "Message #1", + requestID: "8756", + errors: []struct{ code, message string }{ + {"MissingParameter", "Message #1"}, + {"InternalError", "Message #2"}, + }, + }, +} + +func TestResponseError(t *testing.T) { + for _, test := range responseErrorTests { + s := simpledb.New(unit.Session) + s.Handlers.Send.Clear() + s.Handlers.Send.PushBack(func(r *request.Request) { + xml := createXMLResponse(test.requestID, test.errors) + body := ioutil.NopCloser(bytes.NewReader([]byte(xml))) + r.HTTPResponse = &http.Response{ + ContentLength: int64(len(xml)), + StatusCode: test.scode, + Status: test.status, + Body: body, + } + }) + _, err := s.CreateDomain(&simpledb.CreateDomainInput{ + DomainName: aws.String("test-domain"), + }) + + assert.Error(t, err) + assert.Equal(t, test.code, err.(awserr.Error).Code()) + assert.Equal(t, test.message, err.(awserr.Error).Message()) + if len(test.errors) > 0 { + assert.Equal(t, test.requestID, err.(awserr.RequestFailure).RequestID()) + assert.Equal(t, test.scode, err.(awserr.RequestFailure).StatusCode()) + } + } +} + +// createXMLResponse constructs an XML string that has one or more error messages in it. +func createXMLResponse(requestID string, errors []struct{ code, message string }) []byte { + var buf bytes.Buffer + buf.WriteString(``) + for _, e := range errors { + buf.WriteString(``) + buf.WriteString(e.code) + buf.WriteString(``) + buf.WriteString(e.message) + buf.WriteString(``) + } + buf.WriteString(``) + buf.WriteString(requestID) + buf.WriteString(``) + return buf.Bytes() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sns/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sns/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sns/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sns/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2098 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package sns provides a client for Amazon Simple Notification Service. +package sns + +import ( + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opAddPermission = "AddPermission" + +// AddPermissionRequest generates a request for the AddPermission operation. +func (c *SNS) AddPermissionRequest(input *AddPermissionInput) (req *request.Request, output *AddPermissionOutput) { + op := &request.Operation{ + Name: opAddPermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddPermissionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AddPermissionOutput{} + req.Data = output + return +} + +// Adds a statement to a topic's access control policy, granting access for +// the specified AWS accounts to the specified actions. +func (c *SNS) AddPermission(input *AddPermissionInput) (*AddPermissionOutput, error) { + req, out := c.AddPermissionRequest(input) + err := req.Send() + return out, err +} + +const opConfirmSubscription = "ConfirmSubscription" + +// ConfirmSubscriptionRequest generates a request for the ConfirmSubscription operation. +func (c *SNS) ConfirmSubscriptionRequest(input *ConfirmSubscriptionInput) (req *request.Request, output *ConfirmSubscriptionOutput) { + op := &request.Operation{ + Name: opConfirmSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ConfirmSubscriptionInput{} + } + + req = c.newRequest(op, input, output) + output = &ConfirmSubscriptionOutput{} + req.Data = output + return +} + +// Verifies an endpoint owner's intent to receive messages by validating the +// token sent to the endpoint by an earlier Subscribe action. If the token is +// valid, the action creates a new subscription and returns its Amazon Resource +// Name (ARN). This call requires an AWS signature only when the AuthenticateOnUnsubscribe +// flag is set to "true". +func (c *SNS) ConfirmSubscription(input *ConfirmSubscriptionInput) (*ConfirmSubscriptionOutput, error) { + req, out := c.ConfirmSubscriptionRequest(input) + err := req.Send() + return out, err +} + +const opCreatePlatformApplication = "CreatePlatformApplication" + +// CreatePlatformApplicationRequest generates a request for the CreatePlatformApplication operation. +func (c *SNS) CreatePlatformApplicationRequest(input *CreatePlatformApplicationInput) (req *request.Request, output *CreatePlatformApplicationOutput) { + op := &request.Operation{ + Name: opCreatePlatformApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePlatformApplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePlatformApplicationOutput{} + req.Data = output + return +} + +// Creates a platform application object for one of the supported push notification +// services, such as APNS and GCM, to which devices and mobile apps may register. +// You must specify PlatformPrincipal and PlatformCredential attributes when +// using the CreatePlatformApplication action. The PlatformPrincipal is received +// from the notification service. For APNS/APNS_SANDBOX, PlatformPrincipal is +// "SSL certificate". For GCM, PlatformPrincipal is not applicable. For ADM, +// PlatformPrincipal is "client id". The PlatformCredential is also received +// from the notification service. For APNS/APNS_SANDBOX, PlatformCredential +// is "private key". For GCM, PlatformCredential is "API key". For ADM, PlatformCredential +// is "client secret". The PlatformApplicationArn that is returned when using +// CreatePlatformApplication is then used as an attribute for the CreatePlatformEndpoint +// action. For more information, see Using Amazon SNS Mobile Push Notifications +// (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) CreatePlatformApplication(input *CreatePlatformApplicationInput) (*CreatePlatformApplicationOutput, error) { + req, out := c.CreatePlatformApplicationRequest(input) + err := req.Send() + return out, err +} + +const opCreatePlatformEndpoint = "CreatePlatformEndpoint" + +// CreatePlatformEndpointRequest generates a request for the CreatePlatformEndpoint operation. +func (c *SNS) CreatePlatformEndpointRequest(input *CreatePlatformEndpointInput) (req *request.Request, output *CreatePlatformEndpointOutput) { + op := &request.Operation{ + Name: opCreatePlatformEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePlatformEndpointInput{} + } + + req = c.newRequest(op, input, output) + output = &CreatePlatformEndpointOutput{} + req.Data = output + return +} + +// Creates an endpoint for a device and mobile app on one of the supported push +// notification services, such as GCM and APNS. CreatePlatformEndpoint requires +// the PlatformApplicationArn that is returned from CreatePlatformApplication. +// The EndpointArn that is returned when using CreatePlatformEndpoint can then +// be used by the Publish action to send a message to a mobile app or by the +// Subscribe action for subscription to a topic. The CreatePlatformEndpoint +// action is idempotent, so if the requester already owns an endpoint with the +// same device token and attributes, that endpoint's ARN is returned without +// creating a new endpoint. For more information, see Using Amazon SNS Mobile +// Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +// +// When using CreatePlatformEndpoint with Baidu, two attributes must be provided: +// ChannelId and UserId. The token field must also contain the ChannelId. For +// more information, see Creating an Amazon SNS Endpoint for Baidu (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePushBaiduEndpoint.html). +func (c *SNS) CreatePlatformEndpoint(input *CreatePlatformEndpointInput) (*CreatePlatformEndpointOutput, error) { + req, out := c.CreatePlatformEndpointRequest(input) + err := req.Send() + return out, err +} + +const opCreateTopic = "CreateTopic" + +// CreateTopicRequest generates a request for the CreateTopic operation. +func (c *SNS) CreateTopicRequest(input *CreateTopicInput) (req *request.Request, output *CreateTopicOutput) { + op := &request.Operation{ + Name: opCreateTopic, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTopicInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTopicOutput{} + req.Data = output + return +} + +// Creates a topic to which notifications can be published. Users can create +// at most 3000 topics. For more information, see http://aws.amazon.com/sns +// (http://aws.amazon.com/sns/). This action is idempotent, so if the requester +// already owns a topic with the specified name, that topic's ARN is returned +// without creating a new topic. +func (c *SNS) CreateTopic(input *CreateTopicInput) (*CreateTopicOutput, error) { + req, out := c.CreateTopicRequest(input) + err := req.Send() + return out, err +} + +const opDeleteEndpoint = "DeleteEndpoint" + +// DeleteEndpointRequest generates a request for the DeleteEndpoint operation. +func (c *SNS) DeleteEndpointRequest(input *DeleteEndpointInput) (req *request.Request, output *DeleteEndpointOutput) { + op := &request.Operation{ + Name: opDeleteEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteEndpointInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteEndpointOutput{} + req.Data = output + return +} + +// Deletes the endpoint from Amazon SNS. This action is idempotent. For more +// information, see Using Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) DeleteEndpoint(input *DeleteEndpointInput) (*DeleteEndpointOutput, error) { + req, out := c.DeleteEndpointRequest(input) + err := req.Send() + return out, err +} + +const opDeletePlatformApplication = "DeletePlatformApplication" + +// DeletePlatformApplicationRequest generates a request for the DeletePlatformApplication operation. +func (c *SNS) DeletePlatformApplicationRequest(input *DeletePlatformApplicationInput) (req *request.Request, output *DeletePlatformApplicationOutput) { + op := &request.Operation{ + Name: opDeletePlatformApplication, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePlatformApplicationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeletePlatformApplicationOutput{} + req.Data = output + return +} + +// Deletes a platform application object for one of the supported push notification +// services, such as APNS and GCM. For more information, see Using Amazon SNS +// Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) DeletePlatformApplication(input *DeletePlatformApplicationInput) (*DeletePlatformApplicationOutput, error) { + req, out := c.DeletePlatformApplicationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTopic = "DeleteTopic" + +// DeleteTopicRequest generates a request for the DeleteTopic operation. +func (c *SNS) DeleteTopicRequest(input *DeleteTopicInput) (req *request.Request, output *DeleteTopicOutput) { + op := &request.Operation{ + Name: opDeleteTopic, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTopicInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteTopicOutput{} + req.Data = output + return +} + +// Deletes a topic and all its subscriptions. Deleting a topic might prevent +// some messages previously sent to the topic from being delivered to subscribers. +// This action is idempotent, so deleting a topic that does not exist does not +// result in an error. +func (c *SNS) DeleteTopic(input *DeleteTopicInput) (*DeleteTopicOutput, error) { + req, out := c.DeleteTopicRequest(input) + err := req.Send() + return out, err +} + +const opGetEndpointAttributes = "GetEndpointAttributes" + +// GetEndpointAttributesRequest generates a request for the GetEndpointAttributes operation. +func (c *SNS) GetEndpointAttributesRequest(input *GetEndpointAttributesInput) (req *request.Request, output *GetEndpointAttributesOutput) { + op := &request.Operation{ + Name: opGetEndpointAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetEndpointAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetEndpointAttributesOutput{} + req.Data = output + return +} + +// Retrieves the endpoint attributes for a device on one of the supported push +// notification services, such as GCM and APNS. For more information, see Using +// Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) GetEndpointAttributes(input *GetEndpointAttributesInput) (*GetEndpointAttributesOutput, error) { + req, out := c.GetEndpointAttributesRequest(input) + err := req.Send() + return out, err +} + +const opGetPlatformApplicationAttributes = "GetPlatformApplicationAttributes" + +// GetPlatformApplicationAttributesRequest generates a request for the GetPlatformApplicationAttributes operation. +func (c *SNS) GetPlatformApplicationAttributesRequest(input *GetPlatformApplicationAttributesInput) (req *request.Request, output *GetPlatformApplicationAttributesOutput) { + op := &request.Operation{ + Name: opGetPlatformApplicationAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPlatformApplicationAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetPlatformApplicationAttributesOutput{} + req.Data = output + return +} + +// Retrieves the attributes of the platform application object for the supported +// push notification services, such as APNS and GCM. For more information, see +// Using Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) GetPlatformApplicationAttributes(input *GetPlatformApplicationAttributesInput) (*GetPlatformApplicationAttributesOutput, error) { + req, out := c.GetPlatformApplicationAttributesRequest(input) + err := req.Send() + return out, err +} + +const opGetSubscriptionAttributes = "GetSubscriptionAttributes" + +// GetSubscriptionAttributesRequest generates a request for the GetSubscriptionAttributes operation. +func (c *SNS) GetSubscriptionAttributesRequest(input *GetSubscriptionAttributesInput) (req *request.Request, output *GetSubscriptionAttributesOutput) { + op := &request.Operation{ + Name: opGetSubscriptionAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSubscriptionAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSubscriptionAttributesOutput{} + req.Data = output + return +} + +// Returns all of the properties of a subscription. +func (c *SNS) GetSubscriptionAttributes(input *GetSubscriptionAttributesInput) (*GetSubscriptionAttributesOutput, error) { + req, out := c.GetSubscriptionAttributesRequest(input) + err := req.Send() + return out, err +} + +const opGetTopicAttributes = "GetTopicAttributes" + +// GetTopicAttributesRequest generates a request for the GetTopicAttributes operation. +func (c *SNS) GetTopicAttributesRequest(input *GetTopicAttributesInput) (req *request.Request, output *GetTopicAttributesOutput) { + op := &request.Operation{ + Name: opGetTopicAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetTopicAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetTopicAttributesOutput{} + req.Data = output + return +} + +// Returns all of the properties of a topic. Topic properties returned might +// differ based on the authorization of the user. +func (c *SNS) GetTopicAttributes(input *GetTopicAttributesInput) (*GetTopicAttributesOutput, error) { + req, out := c.GetTopicAttributesRequest(input) + err := req.Send() + return out, err +} + +const opListEndpointsByPlatformApplication = "ListEndpointsByPlatformApplication" + +// ListEndpointsByPlatformApplicationRequest generates a request for the ListEndpointsByPlatformApplication operation. +func (c *SNS) ListEndpointsByPlatformApplicationRequest(input *ListEndpointsByPlatformApplicationInput) (req *request.Request, output *ListEndpointsByPlatformApplicationOutput) { + op := &request.Operation{ + Name: opListEndpointsByPlatformApplication, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListEndpointsByPlatformApplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &ListEndpointsByPlatformApplicationOutput{} + req.Data = output + return +} + +// Lists the endpoints and endpoint attributes for devices in a supported push +// notification service, such as GCM and APNS. The results for ListEndpointsByPlatformApplication +// are paginated and return a limited list of endpoints, up to 100. If additional +// records are available after the first page results, then a NextToken string +// will be returned. To receive the next page, you call ListEndpointsByPlatformApplication +// again using the NextToken string received from the previous call. When there +// are no more records to return, NextToken will be null. For more information, +// see Using Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) ListEndpointsByPlatformApplication(input *ListEndpointsByPlatformApplicationInput) (*ListEndpointsByPlatformApplicationOutput, error) { + req, out := c.ListEndpointsByPlatformApplicationRequest(input) + err := req.Send() + return out, err +} + +func (c *SNS) ListEndpointsByPlatformApplicationPages(input *ListEndpointsByPlatformApplicationInput, fn func(p *ListEndpointsByPlatformApplicationOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListEndpointsByPlatformApplicationRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListEndpointsByPlatformApplicationOutput), lastPage) + }) +} + +const opListPlatformApplications = "ListPlatformApplications" + +// ListPlatformApplicationsRequest generates a request for the ListPlatformApplications operation. +func (c *SNS) ListPlatformApplicationsRequest(input *ListPlatformApplicationsInput) (req *request.Request, output *ListPlatformApplicationsOutput) { + op := &request.Operation{ + Name: opListPlatformApplications, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListPlatformApplicationsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPlatformApplicationsOutput{} + req.Data = output + return +} + +// Lists the platform application objects for the supported push notification +// services, such as APNS and GCM. The results for ListPlatformApplications +// are paginated and return a limited list of applications, up to 100. If additional +// records are available after the first page results, then a NextToken string +// will be returned. To receive the next page, you call ListPlatformApplications +// using the NextToken string received from the previous call. When there are +// no more records to return, NextToken will be null. For more information, +// see Using Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) ListPlatformApplications(input *ListPlatformApplicationsInput) (*ListPlatformApplicationsOutput, error) { + req, out := c.ListPlatformApplicationsRequest(input) + err := req.Send() + return out, err +} + +func (c *SNS) ListPlatformApplicationsPages(input *ListPlatformApplicationsInput, fn func(p *ListPlatformApplicationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListPlatformApplicationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListPlatformApplicationsOutput), lastPage) + }) +} + +const opListSubscriptions = "ListSubscriptions" + +// ListSubscriptionsRequest generates a request for the ListSubscriptions operation. +func (c *SNS) ListSubscriptionsRequest(input *ListSubscriptionsInput) (req *request.Request, output *ListSubscriptionsOutput) { + op := &request.Operation{ + Name: opListSubscriptions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListSubscriptionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListSubscriptionsOutput{} + req.Data = output + return +} + +// Returns a list of the requester's subscriptions. Each call returns a limited +// list of subscriptions, up to 100. If there are more subscriptions, a NextToken +// is also returned. Use the NextToken parameter in a new ListSubscriptions +// call to get further results. +func (c *SNS) ListSubscriptions(input *ListSubscriptionsInput) (*ListSubscriptionsOutput, error) { + req, out := c.ListSubscriptionsRequest(input) + err := req.Send() + return out, err +} + +func (c *SNS) ListSubscriptionsPages(input *ListSubscriptionsInput, fn func(p *ListSubscriptionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListSubscriptionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListSubscriptionsOutput), lastPage) + }) +} + +const opListSubscriptionsByTopic = "ListSubscriptionsByTopic" + +// ListSubscriptionsByTopicRequest generates a request for the ListSubscriptionsByTopic operation. +func (c *SNS) ListSubscriptionsByTopicRequest(input *ListSubscriptionsByTopicInput) (req *request.Request, output *ListSubscriptionsByTopicOutput) { + op := &request.Operation{ + Name: opListSubscriptionsByTopic, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListSubscriptionsByTopicInput{} + } + + req = c.newRequest(op, input, output) + output = &ListSubscriptionsByTopicOutput{} + req.Data = output + return +} + +// Returns a list of the subscriptions to a specific topic. Each call returns +// a limited list of subscriptions, up to 100. If there are more subscriptions, +// a NextToken is also returned. Use the NextToken parameter in a new ListSubscriptionsByTopic +// call to get further results. +func (c *SNS) ListSubscriptionsByTopic(input *ListSubscriptionsByTopicInput) (*ListSubscriptionsByTopicOutput, error) { + req, out := c.ListSubscriptionsByTopicRequest(input) + err := req.Send() + return out, err +} + +func (c *SNS) ListSubscriptionsByTopicPages(input *ListSubscriptionsByTopicInput, fn func(p *ListSubscriptionsByTopicOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListSubscriptionsByTopicRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListSubscriptionsByTopicOutput), lastPage) + }) +} + +const opListTopics = "ListTopics" + +// ListTopicsRequest generates a request for the ListTopics operation. +func (c *SNS) ListTopicsRequest(input *ListTopicsInput) (req *request.Request, output *ListTopicsOutput) { + op := &request.Operation{ + Name: opListTopics, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTopicsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTopicsOutput{} + req.Data = output + return +} + +// Returns a list of the requester's topics. Each call returns a limited list +// of topics, up to 100. If there are more topics, a NextToken is also returned. +// Use the NextToken parameter in a new ListTopics call to get further results. +func (c *SNS) ListTopics(input *ListTopicsInput) (*ListTopicsOutput, error) { + req, out := c.ListTopicsRequest(input) + err := req.Send() + return out, err +} + +func (c *SNS) ListTopicsPages(input *ListTopicsInput, fn func(p *ListTopicsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListTopicsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListTopicsOutput), lastPage) + }) +} + +const opPublish = "Publish" + +// PublishRequest generates a request for the Publish operation. +func (c *SNS) PublishRequest(input *PublishInput) (req *request.Request, output *PublishOutput) { + op := &request.Operation{ + Name: opPublish, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PublishInput{} + } + + req = c.newRequest(op, input, output) + output = &PublishOutput{} + req.Data = output + return +} + +// Sends a message to all of a topic's subscribed endpoints. When a messageId +// is returned, the message has been saved and Amazon SNS will attempt to deliver +// it to the topic's subscribers shortly. The format of the outgoing message +// to each subscribed endpoint depends on the notification protocol selected. +// +// To use the Publish action for sending a message to a mobile endpoint, such +// as an app on a Kindle device or mobile phone, you must specify the EndpointArn. +// The EndpointArn is returned when making a call with the CreatePlatformEndpoint +// action. The second example below shows a request and response for publishing +// to a mobile endpoint. +func (c *SNS) Publish(input *PublishInput) (*PublishOutput, error) { + req, out := c.PublishRequest(input) + err := req.Send() + return out, err +} + +const opRemovePermission = "RemovePermission" + +// RemovePermissionRequest generates a request for the RemovePermission operation. +func (c *SNS) RemovePermissionRequest(input *RemovePermissionInput) (req *request.Request, output *RemovePermissionOutput) { + op := &request.Operation{ + Name: opRemovePermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemovePermissionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RemovePermissionOutput{} + req.Data = output + return +} + +// Removes a statement from a topic's access control policy. +func (c *SNS) RemovePermission(input *RemovePermissionInput) (*RemovePermissionOutput, error) { + req, out := c.RemovePermissionRequest(input) + err := req.Send() + return out, err +} + +const opSetEndpointAttributes = "SetEndpointAttributes" + +// SetEndpointAttributesRequest generates a request for the SetEndpointAttributes operation. +func (c *SNS) SetEndpointAttributesRequest(input *SetEndpointAttributesInput) (req *request.Request, output *SetEndpointAttributesOutput) { + op := &request.Operation{ + Name: opSetEndpointAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetEndpointAttributesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetEndpointAttributesOutput{} + req.Data = output + return +} + +// Sets the attributes for an endpoint for a device on one of the supported +// push notification services, such as GCM and APNS. For more information, see +// Using Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) SetEndpointAttributes(input *SetEndpointAttributesInput) (*SetEndpointAttributesOutput, error) { + req, out := c.SetEndpointAttributesRequest(input) + err := req.Send() + return out, err +} + +const opSetPlatformApplicationAttributes = "SetPlatformApplicationAttributes" + +// SetPlatformApplicationAttributesRequest generates a request for the SetPlatformApplicationAttributes operation. +func (c *SNS) SetPlatformApplicationAttributesRequest(input *SetPlatformApplicationAttributesInput) (req *request.Request, output *SetPlatformApplicationAttributesOutput) { + op := &request.Operation{ + Name: opSetPlatformApplicationAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetPlatformApplicationAttributesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetPlatformApplicationAttributesOutput{} + req.Data = output + return +} + +// Sets the attributes of the platform application object for the supported +// push notification services, such as APNS and GCM. For more information, see +// Using Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +func (c *SNS) SetPlatformApplicationAttributes(input *SetPlatformApplicationAttributesInput) (*SetPlatformApplicationAttributesOutput, error) { + req, out := c.SetPlatformApplicationAttributesRequest(input) + err := req.Send() + return out, err +} + +const opSetSubscriptionAttributes = "SetSubscriptionAttributes" + +// SetSubscriptionAttributesRequest generates a request for the SetSubscriptionAttributes operation. +func (c *SNS) SetSubscriptionAttributesRequest(input *SetSubscriptionAttributesInput) (req *request.Request, output *SetSubscriptionAttributesOutput) { + op := &request.Operation{ + Name: opSetSubscriptionAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetSubscriptionAttributesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetSubscriptionAttributesOutput{} + req.Data = output + return +} + +// Allows a subscription owner to set an attribute of the topic to a new value. +func (c *SNS) SetSubscriptionAttributes(input *SetSubscriptionAttributesInput) (*SetSubscriptionAttributesOutput, error) { + req, out := c.SetSubscriptionAttributesRequest(input) + err := req.Send() + return out, err +} + +const opSetTopicAttributes = "SetTopicAttributes" + +// SetTopicAttributesRequest generates a request for the SetTopicAttributes operation. +func (c *SNS) SetTopicAttributesRequest(input *SetTopicAttributesInput) (req *request.Request, output *SetTopicAttributesOutput) { + op := &request.Operation{ + Name: opSetTopicAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetTopicAttributesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetTopicAttributesOutput{} + req.Data = output + return +} + +// Allows a topic owner to set an attribute of the topic to a new value. +func (c *SNS) SetTopicAttributes(input *SetTopicAttributesInput) (*SetTopicAttributesOutput, error) { + req, out := c.SetTopicAttributesRequest(input) + err := req.Send() + return out, err +} + +const opSubscribe = "Subscribe" + +// SubscribeRequest generates a request for the Subscribe operation. +func (c *SNS) SubscribeRequest(input *SubscribeInput) (req *request.Request, output *SubscribeOutput) { + op := &request.Operation{ + Name: opSubscribe, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SubscribeInput{} + } + + req = c.newRequest(op, input, output) + output = &SubscribeOutput{} + req.Data = output + return +} + +// Prepares to subscribe an endpoint by sending the endpoint a confirmation +// message. To actually create a subscription, the endpoint owner must call +// the ConfirmSubscription action with the token from the confirmation message. +// Confirmation tokens are valid for three days. +func (c *SNS) Subscribe(input *SubscribeInput) (*SubscribeOutput, error) { + req, out := c.SubscribeRequest(input) + err := req.Send() + return out, err +} + +const opUnsubscribe = "Unsubscribe" + +// UnsubscribeRequest generates a request for the Unsubscribe operation. +func (c *SNS) UnsubscribeRequest(input *UnsubscribeInput) (req *request.Request, output *UnsubscribeOutput) { + op := &request.Operation{ + Name: opUnsubscribe, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnsubscribeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UnsubscribeOutput{} + req.Data = output + return +} + +// Deletes a subscription. If the subscription requires authentication for deletion, +// only the owner of the subscription or the topic's owner can unsubscribe, +// and an AWS signature is required. If the Unsubscribe call does not require +// authentication and the requester is not the subscription owner, a final cancellation +// message is delivered to the endpoint, so that the endpoint owner can easily +// resubscribe to the topic if the Unsubscribe request was unintended. +func (c *SNS) Unsubscribe(input *UnsubscribeInput) (*UnsubscribeOutput, error) { + req, out := c.UnsubscribeRequest(input) + err := req.Send() + return out, err +} + +type AddPermissionInput struct { + _ struct{} `type:"structure"` + + // The AWS account IDs of the users (principals) who will be given access to + // the specified actions. The users must have AWS accounts, but do not need + // to be signed up for this service. + AWSAccountId []*string `type:"list" required:"true"` + + // The action you want to allow for the specified principal(s). + // + // Valid values: any Amazon SNS action name. + ActionName []*string `type:"list" required:"true"` + + // A unique identifier for the new policy statement. + Label *string `type:"string" required:"true"` + + // The ARN of the topic whose access control policy you wish to modify. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AddPermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddPermissionInput) GoString() string { + return s.String() +} + +type AddPermissionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddPermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddPermissionOutput) GoString() string { + return s.String() +} + +// Input for ConfirmSubscription action. +type ConfirmSubscriptionInput struct { + _ struct{} `type:"structure"` + + // Disallows unauthenticated unsubscribes of the subscription. If the value + // of this parameter is true and the request has an AWS signature, then only + // the topic owner and the subscription owner can unsubscribe the endpoint. + // The unsubscribe action requires AWS authentication. + AuthenticateOnUnsubscribe *string `type:"string"` + + // Short-lived token sent to an endpoint during the Subscribe action. + Token *string `type:"string" required:"true"` + + // The ARN of the topic for which you wish to confirm a subscription. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ConfirmSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmSubscriptionInput) GoString() string { + return s.String() +} + +// Response for ConfirmSubscriptions action. +type ConfirmSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the created subscription. + SubscriptionArn *string `type:"string"` +} + +// String returns the string representation +func (s ConfirmSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmSubscriptionOutput) GoString() string { + return s.String() +} + +// Input for CreatePlatformApplication action. +type CreatePlatformApplicationInput struct { + _ struct{} `type:"structure"` + + // For a list of attributes, see SetPlatformApplicationAttributes (http://docs.aws.amazon.com/sns/latest/api/API_SetPlatformApplicationAttributes.html) + Attributes map[string]*string `type:"map" required:"true"` + + // Application names must be made up of only uppercase and lowercase ASCII letters, + // numbers, underscores, hyphens, and periods, and must be between 1 and 256 + // characters long. + Name *string `type:"string" required:"true"` + + // The following platforms are supported: ADM (Amazon Device Messaging), APNS + // (Apple Push Notification Service), APNS_SANDBOX, and GCM (Google Cloud Messaging). + Platform *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreatePlatformApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlatformApplicationInput) GoString() string { + return s.String() +} + +// Response from CreatePlatformApplication action. +type CreatePlatformApplicationOutput struct { + _ struct{} `type:"structure"` + + // PlatformApplicationArn is returned. + PlatformApplicationArn *string `type:"string"` +} + +// String returns the string representation +func (s CreatePlatformApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlatformApplicationOutput) GoString() string { + return s.String() +} + +// Input for CreatePlatformEndpoint action. +type CreatePlatformEndpointInput struct { + _ struct{} `type:"structure"` + + // For a list of attributes, see SetEndpointAttributes (http://docs.aws.amazon.com/sns/latest/api/API_SetEndpointAttributes.html). + Attributes map[string]*string `type:"map"` + + // Arbitrary user data to associate with the endpoint. Amazon SNS does not use + // this data. The data must be in UTF-8 format and less than 2KB. + CustomUserData *string `type:"string"` + + // PlatformApplicationArn returned from CreatePlatformApplication is used to + // create a an endpoint. + PlatformApplicationArn *string `type:"string" required:"true"` + + // Unique identifier created by the notification service for an app on a device. + // The specific name for Token will vary, depending on which notification service + // is being used. For example, when using APNS as the notification service, + // you need the device token. Alternatively, when using GCM or ADM, the device + // token equivalent is called the registration ID. + Token *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreatePlatformEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlatformEndpointInput) GoString() string { + return s.String() +} + +// Response from CreateEndpoint action. +type CreatePlatformEndpointOutput struct { + _ struct{} `type:"structure"` + + // EndpointArn returned from CreateEndpoint action. + EndpointArn *string `type:"string"` +} + +// String returns the string representation +func (s CreatePlatformEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlatformEndpointOutput) GoString() string { + return s.String() +} + +// Input for CreateTopic action. +type CreateTopicInput struct { + _ struct{} `type:"structure"` + + // The name of the topic you want to create. + // + // Constraints: Topic names must be made up of only uppercase and lowercase + // ASCII letters, numbers, underscores, and hyphens, and must be between 1 and + // 256 characters long. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTopicInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTopicInput) GoString() string { + return s.String() +} + +// Response from CreateTopic action. +type CreateTopicOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) assigned to the created topic. + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateTopicOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTopicOutput) GoString() string { + return s.String() +} + +// Input for DeleteEndpoint action. +type DeleteEndpointInput struct { + _ struct{} `type:"structure"` + + // EndpointArn of endpoint to delete. + EndpointArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEndpointInput) GoString() string { + return s.String() +} + +type DeleteEndpointOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEndpointOutput) GoString() string { + return s.String() +} + +// Input for DeletePlatformApplication action. +type DeletePlatformApplicationInput struct { + _ struct{} `type:"structure"` + + // PlatformApplicationArn of platform application object to delete. + PlatformApplicationArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePlatformApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePlatformApplicationInput) GoString() string { + return s.String() +} + +type DeletePlatformApplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePlatformApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePlatformApplicationOutput) GoString() string { + return s.String() +} + +type DeleteTopicInput struct { + _ struct{} `type:"structure"` + + // The ARN of the topic you want to delete. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTopicInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTopicInput) GoString() string { + return s.String() +} + +type DeleteTopicOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTopicOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTopicOutput) GoString() string { + return s.String() +} + +// Endpoint for mobile app and device. +type Endpoint struct { + _ struct{} `type:"structure"` + + // Attributes for endpoint. + Attributes map[string]*string `type:"map"` + + // EndpointArn for mobile app and device. + EndpointArn *string `type:"string"` +} + +// String returns the string representation +func (s Endpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Endpoint) GoString() string { + return s.String() +} + +// Input for GetEndpointAttributes action. +type GetEndpointAttributesInput struct { + _ struct{} `type:"structure"` + + // EndpointArn for GetEndpointAttributes input. + EndpointArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetEndpointAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetEndpointAttributesInput) GoString() string { + return s.String() +} + +// Response from GetEndpointAttributes of the EndpointArn. +type GetEndpointAttributesOutput struct { + _ struct{} `type:"structure"` + + // Attributes include the following: + // + // CustomUserData -- arbitrary user data to associate with the endpoint. + // Amazon SNS does not use this data. The data must be in UTF-8 format and less + // than 2KB. Enabled -- flag that enables/disables delivery to the endpoint. + // Amazon SNS will set this to false when a notification service indicates to + // Amazon SNS that the endpoint is invalid. Users can set it back to true, typically + // after updating Token. Token -- device token, also referred to as a registration + // id, for an app and mobile device. This is returned from the notification + // service when an app and mobile device are registered with the notification + // service. + Attributes map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetEndpointAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetEndpointAttributesOutput) GoString() string { + return s.String() +} + +// Input for GetPlatformApplicationAttributes action. +type GetPlatformApplicationAttributesInput struct { + _ struct{} `type:"structure"` + + // PlatformApplicationArn for GetPlatformApplicationAttributesInput. + PlatformApplicationArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPlatformApplicationAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPlatformApplicationAttributesInput) GoString() string { + return s.String() +} + +// Response for GetPlatformApplicationAttributes action. +type GetPlatformApplicationAttributesOutput struct { + _ struct{} `type:"structure"` + + // Attributes include the following: + // + // EventEndpointCreated -- Topic ARN to which EndpointCreated event notifications + // should be sent. EventEndpointDeleted -- Topic ARN to which EndpointDeleted + // event notifications should be sent. EventEndpointUpdated -- Topic ARN to + // which EndpointUpdate event notifications should be sent. EventDeliveryFailure + // -- Topic ARN to which DeliveryFailure event notifications should be sent + // upon Direct Publish delivery failure (permanent) to one of the application's + // endpoints. + Attributes map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetPlatformApplicationAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPlatformApplicationAttributesOutput) GoString() string { + return s.String() +} + +// Input for GetSubscriptionAttributes. +type GetSubscriptionAttributesInput struct { + _ struct{} `type:"structure"` + + // The ARN of the subscription whose properties you want to get. + SubscriptionArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetSubscriptionAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSubscriptionAttributesInput) GoString() string { + return s.String() +} + +// Response for GetSubscriptionAttributes action. +type GetSubscriptionAttributesOutput struct { + _ struct{} `type:"structure"` + + // A map of the subscription's attributes. Attributes in this map include the + // following: + // + // SubscriptionArn -- the subscription's ARN TopicArn -- the topic ARN that + // the subscription is associated with Owner -- the AWS account ID of the subscription's + // owner ConfirmationWasAuthenticated -- true if the subscription confirmation + // request was authenticated DeliveryPolicy -- the JSON serialization of the + // subscription's delivery policy EffectiveDeliveryPolicy -- the JSON serialization + // of the effective delivery policy that takes into account the topic delivery + // policy and account system defaults + Attributes map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetSubscriptionAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSubscriptionAttributesOutput) GoString() string { + return s.String() +} + +// Input for GetTopicAttributes action. +type GetTopicAttributesInput struct { + _ struct{} `type:"structure"` + + // The ARN of the topic whose properties you want to get. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTopicAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTopicAttributesInput) GoString() string { + return s.String() +} + +// Response for GetTopicAttributes action. +type GetTopicAttributesOutput struct { + _ struct{} `type:"structure"` + + // A map of the topic's attributes. Attributes in this map include the following: + // + // TopicArn -- the topic's ARN Owner -- the AWS account ID of the topic's + // owner Policy -- the JSON serialization of the topic's access control policy + // DisplayName -- the human-readable name used in the "From" field for notifications + // to email and email-json endpoints SubscriptionsPending -- the number of + // subscriptions pending confirmation on this topic SubscriptionsConfirmed + // -- the number of confirmed subscriptions on this topic SubscriptionsDeleted + // -- the number of deleted subscriptions on this topic DeliveryPolicy -- the + // JSON serialization of the topic's delivery policy EffectiveDeliveryPolicy + // -- the JSON serialization of the effective delivery policy that takes into + // account system defaults + Attributes map[string]*string `type:"map"` +} + +// String returns the string representation +func (s GetTopicAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTopicAttributesOutput) GoString() string { + return s.String() +} + +// Input for ListEndpointsByPlatformApplication action. +type ListEndpointsByPlatformApplicationInput struct { + _ struct{} `type:"structure"` + + // NextToken string is used when calling ListEndpointsByPlatformApplication + // action to retrieve additional records that are available after the first + // page results. + NextToken *string `type:"string"` + + // PlatformApplicationArn for ListEndpointsByPlatformApplicationInput action. + PlatformApplicationArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListEndpointsByPlatformApplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEndpointsByPlatformApplicationInput) GoString() string { + return s.String() +} + +// Response for ListEndpointsByPlatformApplication action. +type ListEndpointsByPlatformApplicationOutput struct { + _ struct{} `type:"structure"` + + // Endpoints returned for ListEndpointsByPlatformApplication action. + Endpoints []*Endpoint `type:"list"` + + // NextToken string is returned when calling ListEndpointsByPlatformApplication + // action if additional records are available after the first page results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListEndpointsByPlatformApplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEndpointsByPlatformApplicationOutput) GoString() string { + return s.String() +} + +// Input for ListPlatformApplications action. +type ListPlatformApplicationsInput struct { + _ struct{} `type:"structure"` + + // NextToken string is used when calling ListPlatformApplications action to + // retrieve additional records that are available after the first page results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListPlatformApplicationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPlatformApplicationsInput) GoString() string { + return s.String() +} + +// Response for ListPlatformApplications action. +type ListPlatformApplicationsOutput struct { + _ struct{} `type:"structure"` + + // NextToken string is returned when calling ListPlatformApplications action + // if additional records are available after the first page results. + NextToken *string `type:"string"` + + // Platform applications returned when calling ListPlatformApplications action. + PlatformApplications []*PlatformApplication `type:"list"` +} + +// String returns the string representation +func (s ListPlatformApplicationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPlatformApplicationsOutput) GoString() string { + return s.String() +} + +// Input for ListSubscriptionsByTopic action. +type ListSubscriptionsByTopicInput struct { + _ struct{} `type:"structure"` + + // Token returned by the previous ListSubscriptionsByTopic request. + NextToken *string `type:"string"` + + // The ARN of the topic for which you wish to find subscriptions. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListSubscriptionsByTopicInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSubscriptionsByTopicInput) GoString() string { + return s.String() +} + +// Response for ListSubscriptionsByTopic action. +type ListSubscriptionsByTopicOutput struct { + _ struct{} `type:"structure"` + + // Token to pass along to the next ListSubscriptionsByTopic request. This element + // is returned if there are more subscriptions to retrieve. + NextToken *string `type:"string"` + + // A list of subscriptions. + Subscriptions []*Subscription `type:"list"` +} + +// String returns the string representation +func (s ListSubscriptionsByTopicOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSubscriptionsByTopicOutput) GoString() string { + return s.String() +} + +// Input for ListSubscriptions action. +type ListSubscriptionsInput struct { + _ struct{} `type:"structure"` + + // Token returned by the previous ListSubscriptions request. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListSubscriptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSubscriptionsInput) GoString() string { + return s.String() +} + +// Response for ListSubscriptions action +type ListSubscriptionsOutput struct { + _ struct{} `type:"structure"` + + // Token to pass along to the next ListSubscriptions request. This element is + // returned if there are more subscriptions to retrieve. + NextToken *string `type:"string"` + + // A list of subscriptions. + Subscriptions []*Subscription `type:"list"` +} + +// String returns the string representation +func (s ListSubscriptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSubscriptionsOutput) GoString() string { + return s.String() +} + +type ListTopicsInput struct { + _ struct{} `type:"structure"` + + // Token returned by the previous ListTopics request. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListTopicsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTopicsInput) GoString() string { + return s.String() +} + +// Response for ListTopics action. +type ListTopicsOutput struct { + _ struct{} `type:"structure"` + + // Token to pass along to the next ListTopics request. This element is returned + // if there are additional topics to retrieve. + NextToken *string `type:"string"` + + // A list of topic ARNs. + Topics []*Topic `type:"list"` +} + +// String returns the string representation +func (s ListTopicsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTopicsOutput) GoString() string { + return s.String() +} + +// The user-specified message attribute value. For string data types, the value +// attribute has the same restrictions on the content as the message body. For +// more information, see Publish (http://docs.aws.amazon.com/sns/latest/api/API_Publish.html). +// +// Name, type, and value must not be empty or null. In addition, the message +// body should not be empty or null. All parts of the message attribute, including +// name, type, and value, are included in the message size restriction, which +// is currently 256 KB (262,144 bytes). For more information, see Using Amazon +// SNS Message Attributes (http://docs.aws.amazon.com/sns/latest/dg/SNSMessageAttributes.html). +type MessageAttributeValue struct { + _ struct{} `type:"structure"` + + // Binary type attributes can store any binary data, for example, compressed + // data, encrypted data, or images. + BinaryValue []byte `type:"blob"` + + // Amazon SNS supports the following logical data types: String, Number, and + // Binary. For more information, see Message Attribute Data Types (http://docs.aws.amazon.com/sns/latest/dg/SNSMessageAttributes.html#SNSMessageAttributes.DataTypes). + DataType *string `type:"string" required:"true"` + + // Strings are Unicode with UTF8 binary encoding. For a list of code values, + // see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + StringValue *string `type:"string"` +} + +// String returns the string representation +func (s MessageAttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MessageAttributeValue) GoString() string { + return s.String() +} + +// Platform application object. +type PlatformApplication struct { + _ struct{} `type:"structure"` + + // Attributes for platform application object. + Attributes map[string]*string `type:"map"` + + // PlatformApplicationArn for platform application object. + PlatformApplicationArn *string `type:"string"` +} + +// String returns the string representation +func (s PlatformApplication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PlatformApplication) GoString() string { + return s.String() +} + +// Input for Publish action. +type PublishInput struct { + _ struct{} `type:"structure"` + + // The message you want to send to the topic. + // + // If you want to send the same message to all transport protocols, include + // the text of the message as a String value. + // + // If you want to send different messages for each transport protocol, set + // the value of the MessageStructure parameter to json and use a JSON object + // for the Message parameter. See the Examples section for the format of the + // JSON object. + // + // Constraints: Messages must be UTF-8 encoded strings at most 256 KB in size + // (262144 bytes, not 262144 characters). + // + // JSON-specific constraints: Keys in the JSON object that correspond to supported + // transport protocols must have simple JSON string values. The values will + // be parsed (unescaped) before they are used in outgoing messages. Outbound + // notifications are JSON encoded (meaning that the characters will be reescaped + // for sending). Values have a minimum length of 0 (the empty string, "", is + // allowed). Values have a maximum length bounded by the overall message size + // (so, including multiple protocols may limit message sizes). Non-string values + // will cause the key to be ignored. Keys that do not correspond to supported + // transport protocols are ignored. Duplicate keys are not allowed. Failure + // to parse or validate any key or value in the message will cause the Publish + // call to return an error (no partial delivery). + Message *string `type:"string" required:"true"` + + // Message attributes for Publish action. + MessageAttributes map[string]*MessageAttributeValue `locationNameKey:"Name" locationNameValue:"Value" type:"map"` + + // Set MessageStructure to json if you want to send a different message for + // each protocol. For example, using one publish action, you can send a short + // message to your SMS subscribers and a longer message to your email subscribers. + // If you set MessageStructure to json, the value of the Message parameter must: + // + // be a syntactically valid JSON object; and contain at least a top-level + // JSON key of "default" with a value that is a string. You can define other + // top-level keys that define the message you want to send to a specific transport + // protocol (e.g., "http"). + // + // For information about sending different messages for each protocol using + // the AWS Management Console, go to Create Different Messages for Each Protocol + // (http://docs.aws.amazon.com/sns/latest/gsg/Publish.html#sns-message-formatting-by-protocol) + // in the Amazon Simple Notification Service Getting Started Guide. + // + // Valid value: json + MessageStructure *string `type:"string"` + + // Optional parameter to be used as the "Subject" line when the message is delivered + // to email endpoints. This field will also be included, if present, in the + // standard JSON messages delivered to other endpoints. + // + // Constraints: Subjects must be ASCII text that begins with a letter, number, + // or punctuation mark; must not include line breaks or control characters; + // and must be less than 100 characters long. + Subject *string `type:"string"` + + // Either TopicArn or EndpointArn, but not both. + TargetArn *string `type:"string"` + + // The topic you want to publish to. + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s PublishInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublishInput) GoString() string { + return s.String() +} + +// Response for Publish action. +type PublishOutput struct { + _ struct{} `type:"structure"` + + // Unique identifier assigned to the published message. + // + // Length Constraint: Maximum 100 characters + MessageId *string `type:"string"` +} + +// String returns the string representation +func (s PublishOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublishOutput) GoString() string { + return s.String() +} + +// Input for RemovePermission action. +type RemovePermissionInput struct { + _ struct{} `type:"structure"` + + // The unique label of the statement you want to remove. + Label *string `type:"string" required:"true"` + + // The ARN of the topic whose access control policy you wish to modify. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RemovePermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemovePermissionInput) GoString() string { + return s.String() +} + +type RemovePermissionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemovePermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemovePermissionOutput) GoString() string { + return s.String() +} + +// Input for SetEndpointAttributes action. +type SetEndpointAttributesInput struct { + _ struct{} `type:"structure"` + + // A map of the endpoint attributes. Attributes in this map include the following: + // + // CustomUserData -- arbitrary user data to associate with the endpoint. + // Amazon SNS does not use this data. The data must be in UTF-8 format and less + // than 2KB. Enabled -- flag that enables/disables delivery to the endpoint. + // Amazon SNS will set this to false when a notification service indicates to + // Amazon SNS that the endpoint is invalid. Users can set it back to true, typically + // after updating Token. Token -- device token, also referred to as a registration + // id, for an app and mobile device. This is returned from the notification + // service when an app and mobile device are registered with the notification + // service. + Attributes map[string]*string `type:"map" required:"true"` + + // EndpointArn used for SetEndpointAttributes action. + EndpointArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetEndpointAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetEndpointAttributesInput) GoString() string { + return s.String() +} + +type SetEndpointAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetEndpointAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetEndpointAttributesOutput) GoString() string { + return s.String() +} + +// Input for SetPlatformApplicationAttributes action. +type SetPlatformApplicationAttributesInput struct { + _ struct{} `type:"structure"` + + // A map of the platform application attributes. Attributes in this map include + // the following: + // + // PlatformCredential -- The credential received from the notification service. + // For APNS/APNS_SANDBOX, PlatformCredential is "private key". For GCM, PlatformCredential + // is "API key". For ADM, PlatformCredential is "client secret". PlatformPrincipal + // -- The principal received from the notification service. For APNS/APNS_SANDBOX, + // PlatformPrincipal is "SSL certificate". For GCM, PlatformPrincipal is not + // applicable. For ADM, PlatformPrincipal is "client id". EventEndpointCreated + // -- Topic ARN to which EndpointCreated event notifications should be sent. + // EventEndpointDeleted -- Topic ARN to which EndpointDeleted event notifications + // should be sent. EventEndpointUpdated -- Topic ARN to which EndpointUpdate + // event notifications should be sent. EventDeliveryFailure -- Topic ARN to + // which DeliveryFailure event notifications should be sent upon Direct Publish + // delivery failure (permanent) to one of the application's endpoints. + Attributes map[string]*string `type:"map" required:"true"` + + // PlatformApplicationArn for SetPlatformApplicationAttributes action. + PlatformApplicationArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetPlatformApplicationAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetPlatformApplicationAttributesInput) GoString() string { + return s.String() +} + +type SetPlatformApplicationAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetPlatformApplicationAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetPlatformApplicationAttributesOutput) GoString() string { + return s.String() +} + +// Input for SetSubscriptionAttributes action. +type SetSubscriptionAttributesInput struct { + _ struct{} `type:"structure"` + + // The name of the attribute you want to set. Only a subset of the subscriptions + // attributes are mutable. + // + // Valid values: DeliveryPolicy | RawMessageDelivery + AttributeName *string `type:"string" required:"true"` + + // The new value for the attribute in JSON format. + AttributeValue *string `type:"string"` + + // The ARN of the subscription to modify. + SubscriptionArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetSubscriptionAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetSubscriptionAttributesInput) GoString() string { + return s.String() +} + +type SetSubscriptionAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetSubscriptionAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetSubscriptionAttributesOutput) GoString() string { + return s.String() +} + +// Input for SetTopicAttributes action. +type SetTopicAttributesInput struct { + _ struct{} `type:"structure"` + + // The name of the attribute you want to set. Only a subset of the topic's attributes + // are mutable. + // + // Valid values: Policy | DisplayName | DeliveryPolicy + AttributeName *string `type:"string" required:"true"` + + // The new value for the attribute. + AttributeValue *string `type:"string"` + + // The ARN of the topic to modify. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetTopicAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetTopicAttributesInput) GoString() string { + return s.String() +} + +type SetTopicAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetTopicAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetTopicAttributesOutput) GoString() string { + return s.String() +} + +// Input for Subscribe action. +type SubscribeInput struct { + _ struct{} `type:"structure"` + + // The endpoint that you want to receive notifications. Endpoints vary by protocol: + // + // For the http protocol, the endpoint is an URL beginning with "http://" + // For the https protocol, the endpoint is a URL beginning with "https://" For + // the email protocol, the endpoint is an email address For the email-json protocol, + // the endpoint is an email address For the sms protocol, the endpoint is a + // phone number of an SMS-enabled device For the sqs protocol, the endpoint + // is the ARN of an Amazon SQS queue For the application protocol, the endpoint + // is the EndpointArn of a mobile app and device. + Endpoint *string `type:"string"` + + // The protocol you want to use. Supported protocols include: + // + // http -- delivery of JSON-encoded message via HTTP POST https -- delivery + // of JSON-encoded message via HTTPS POST email -- delivery of message via + // SMTP email-json -- delivery of JSON-encoded message via SMTP sms -- delivery + // of message via SMS sqs -- delivery of JSON-encoded message to an Amazon + // SQS queue application -- delivery of JSON-encoded message to an EndpointArn + // for a mobile app and device. + Protocol *string `type:"string" required:"true"` + + // The ARN of the topic you want to subscribe to. + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SubscribeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubscribeInput) GoString() string { + return s.String() +} + +// Response for Subscribe action. +type SubscribeOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the subscription, if the service was able to create a subscription + // immediately (without requiring endpoint owner confirmation). + SubscriptionArn *string `type:"string"` +} + +// String returns the string representation +func (s SubscribeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubscribeOutput) GoString() string { + return s.String() +} + +// A wrapper type for the attributes of an Amazon SNS subscription. +type Subscription struct { + _ struct{} `type:"structure"` + + // The subscription's endpoint (format depends on the protocol). + Endpoint *string `type:"string"` + + // The subscription's owner. + Owner *string `type:"string"` + + // The subscription's protocol. + Protocol *string `type:"string"` + + // The subscription's ARN. + SubscriptionArn *string `type:"string"` + + // The ARN of the subscription's topic. + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s Subscription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Subscription) GoString() string { + return s.String() +} + +// A wrapper type for the topic's Amazon Resource Name (ARN). To retrieve a +// topic's attributes, use GetTopicAttributes. +type Topic struct { + _ struct{} `type:"structure"` + + // The topic's ARN. + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s Topic) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Topic) GoString() string { + return s.String() +} + +// Input for Unsubscribe action. +type UnsubscribeInput struct { + _ struct{} `type:"structure"` + + // The ARN of the subscription to be deleted. + SubscriptionArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UnsubscribeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsubscribeInput) GoString() string { + return s.String() +} + +type UnsubscribeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UnsubscribeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsubscribeOutput) GoString() string { + return s.String() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sns/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sns/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sns/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sns/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,542 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package sns_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/sns" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleSNS_AddPermission() { + svc := sns.New(session.New()) + + params := &sns.AddPermissionInput{ + AWSAccountId: []*string{ // Required + aws.String("delegate"), // Required + // More values... + }, + ActionName: []*string{ // Required + aws.String("action"), // Required + // More values... + }, + Label: aws.String("label"), // Required + TopicArn: aws.String("topicARN"), // Required + } + resp, err := svc.AddPermission(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_ConfirmSubscription() { + svc := sns.New(session.New()) + + params := &sns.ConfirmSubscriptionInput{ + Token: aws.String("token"), // Required + TopicArn: aws.String("topicARN"), // Required + AuthenticateOnUnsubscribe: aws.String("authenticateOnUnsubscribe"), + } + resp, err := svc.ConfirmSubscription(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_CreatePlatformApplication() { + svc := sns.New(session.New()) + + params := &sns.CreatePlatformApplicationInput{ + Attributes: map[string]*string{ // Required + "Key": aws.String("String"), // Required + // More values... + }, + Name: aws.String("String"), // Required + Platform: aws.String("String"), // Required + } + resp, err := svc.CreatePlatformApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_CreatePlatformEndpoint() { + svc := sns.New(session.New()) + + params := &sns.CreatePlatformEndpointInput{ + PlatformApplicationArn: aws.String("String"), // Required + Token: aws.String("String"), // Required + Attributes: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + CustomUserData: aws.String("String"), + } + resp, err := svc.CreatePlatformEndpoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_CreateTopic() { + svc := sns.New(session.New()) + + params := &sns.CreateTopicInput{ + Name: aws.String("topicName"), // Required + } + resp, err := svc.CreateTopic(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_DeleteEndpoint() { + svc := sns.New(session.New()) + + params := &sns.DeleteEndpointInput{ + EndpointArn: aws.String("String"), // Required + } + resp, err := svc.DeleteEndpoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_DeletePlatformApplication() { + svc := sns.New(session.New()) + + params := &sns.DeletePlatformApplicationInput{ + PlatformApplicationArn: aws.String("String"), // Required + } + resp, err := svc.DeletePlatformApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_DeleteTopic() { + svc := sns.New(session.New()) + + params := &sns.DeleteTopicInput{ + TopicArn: aws.String("topicARN"), // Required + } + resp, err := svc.DeleteTopic(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_GetEndpointAttributes() { + svc := sns.New(session.New()) + + params := &sns.GetEndpointAttributesInput{ + EndpointArn: aws.String("String"), // Required + } + resp, err := svc.GetEndpointAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_GetPlatformApplicationAttributes() { + svc := sns.New(session.New()) + + params := &sns.GetPlatformApplicationAttributesInput{ + PlatformApplicationArn: aws.String("String"), // Required + } + resp, err := svc.GetPlatformApplicationAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_GetSubscriptionAttributes() { + svc := sns.New(session.New()) + + params := &sns.GetSubscriptionAttributesInput{ + SubscriptionArn: aws.String("subscriptionARN"), // Required + } + resp, err := svc.GetSubscriptionAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_GetTopicAttributes() { + svc := sns.New(session.New()) + + params := &sns.GetTopicAttributesInput{ + TopicArn: aws.String("topicARN"), // Required + } + resp, err := svc.GetTopicAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_ListEndpointsByPlatformApplication() { + svc := sns.New(session.New()) + + params := &sns.ListEndpointsByPlatformApplicationInput{ + PlatformApplicationArn: aws.String("String"), // Required + NextToken: aws.String("String"), + } + resp, err := svc.ListEndpointsByPlatformApplication(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_ListPlatformApplications() { + svc := sns.New(session.New()) + + params := &sns.ListPlatformApplicationsInput{ + NextToken: aws.String("String"), + } + resp, err := svc.ListPlatformApplications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_ListSubscriptions() { + svc := sns.New(session.New()) + + params := &sns.ListSubscriptionsInput{ + NextToken: aws.String("nextToken"), + } + resp, err := svc.ListSubscriptions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_ListSubscriptionsByTopic() { + svc := sns.New(session.New()) + + params := &sns.ListSubscriptionsByTopicInput{ + TopicArn: aws.String("topicARN"), // Required + NextToken: aws.String("nextToken"), + } + resp, err := svc.ListSubscriptionsByTopic(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_ListTopics() { + svc := sns.New(session.New()) + + params := &sns.ListTopicsInput{ + NextToken: aws.String("nextToken"), + } + resp, err := svc.ListTopics(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_Publish() { + svc := sns.New(session.New()) + + params := &sns.PublishInput{ + Message: aws.String("message"), // Required + MessageAttributes: map[string]*sns.MessageAttributeValue{ + "Key": { // Required + DataType: aws.String("String"), // Required + BinaryValue: []byte("PAYLOAD"), + StringValue: aws.String("String"), + }, + // More values... + }, + MessageStructure: aws.String("messageStructure"), + Subject: aws.String("subject"), + TargetArn: aws.String("String"), + TopicArn: aws.String("topicARN"), + } + resp, err := svc.Publish(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_RemovePermission() { + svc := sns.New(session.New()) + + params := &sns.RemovePermissionInput{ + Label: aws.String("label"), // Required + TopicArn: aws.String("topicARN"), // Required + } + resp, err := svc.RemovePermission(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_SetEndpointAttributes() { + svc := sns.New(session.New()) + + params := &sns.SetEndpointAttributesInput{ + Attributes: map[string]*string{ // Required + "Key": aws.String("String"), // Required + // More values... + }, + EndpointArn: aws.String("String"), // Required + } + resp, err := svc.SetEndpointAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_SetPlatformApplicationAttributes() { + svc := sns.New(session.New()) + + params := &sns.SetPlatformApplicationAttributesInput{ + Attributes: map[string]*string{ // Required + "Key": aws.String("String"), // Required + // More values... + }, + PlatformApplicationArn: aws.String("String"), // Required + } + resp, err := svc.SetPlatformApplicationAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_SetSubscriptionAttributes() { + svc := sns.New(session.New()) + + params := &sns.SetSubscriptionAttributesInput{ + AttributeName: aws.String("attributeName"), // Required + SubscriptionArn: aws.String("subscriptionARN"), // Required + AttributeValue: aws.String("attributeValue"), + } + resp, err := svc.SetSubscriptionAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_SetTopicAttributes() { + svc := sns.New(session.New()) + + params := &sns.SetTopicAttributesInput{ + AttributeName: aws.String("attributeName"), // Required + TopicArn: aws.String("topicARN"), // Required + AttributeValue: aws.String("attributeValue"), + } + resp, err := svc.SetTopicAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_Subscribe() { + svc := sns.New(session.New()) + + params := &sns.SubscribeInput{ + Protocol: aws.String("protocol"), // Required + TopicArn: aws.String("topicARN"), // Required + Endpoint: aws.String("endpoint"), + } + resp, err := svc.Subscribe(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSNS_Unsubscribe() { + svc := sns.New(session.New()) + + params := &sns.UnsubscribeInput{ + SubscriptionArn: aws.String("subscriptionARN"), // Required + } + resp, err := svc.Unsubscribe(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sns/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sns/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sns/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sns/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,98 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package sns + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Amazon Simple Notification Service (Amazon SNS) is a web service that enables +// you to build distributed web-enabled applications. Applications can use Amazon +// SNS to easily push real-time notification messages to interested subscribers +// over multiple delivery protocols. For more information about this product +// see http://aws.amazon.com/sns (http://aws.amazon.com/sns/). For detailed +// information about Amazon SNS features and their associated API calls, see +// the Amazon SNS Developer Guide (http://docs.aws.amazon.com/sns/latest/dg/). +// +// We also provide SDKs that enable you to access Amazon SNS from your preferred +// programming language. The SDKs contain functionality that automatically takes +// care of tasks such as: cryptographically signing your service requests, retrying +// requests, and handling error responses. For a list of available SDKs, go +// to Tools for Amazon Web Services (http://aws.amazon.com/tools/). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type SNS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "sns" + +// New creates a new instance of the SNS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a SNS client from just a session. +// svc := sns.New(mySession) +// +// // Create a SNS client with additional configuration +// svc := sns.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SNS { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *SNS { + svc := &SNS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2010-03-31", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SNS operation and runs any +// custom request initialization. +func (c *SNS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sns/snsiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sns/snsiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sns/snsiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sns/snsiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,124 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package snsiface provides an interface for the Amazon Simple Notification Service. +package snsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sns" +) + +// SNSAPI is the interface type for sns.SNS. +type SNSAPI interface { + AddPermissionRequest(*sns.AddPermissionInput) (*request.Request, *sns.AddPermissionOutput) + + AddPermission(*sns.AddPermissionInput) (*sns.AddPermissionOutput, error) + + ConfirmSubscriptionRequest(*sns.ConfirmSubscriptionInput) (*request.Request, *sns.ConfirmSubscriptionOutput) + + ConfirmSubscription(*sns.ConfirmSubscriptionInput) (*sns.ConfirmSubscriptionOutput, error) + + CreatePlatformApplicationRequest(*sns.CreatePlatformApplicationInput) (*request.Request, *sns.CreatePlatformApplicationOutput) + + CreatePlatformApplication(*sns.CreatePlatformApplicationInput) (*sns.CreatePlatformApplicationOutput, error) + + CreatePlatformEndpointRequest(*sns.CreatePlatformEndpointInput) (*request.Request, *sns.CreatePlatformEndpointOutput) + + CreatePlatformEndpoint(*sns.CreatePlatformEndpointInput) (*sns.CreatePlatformEndpointOutput, error) + + CreateTopicRequest(*sns.CreateTopicInput) (*request.Request, *sns.CreateTopicOutput) + + CreateTopic(*sns.CreateTopicInput) (*sns.CreateTopicOutput, error) + + DeleteEndpointRequest(*sns.DeleteEndpointInput) (*request.Request, *sns.DeleteEndpointOutput) + + DeleteEndpoint(*sns.DeleteEndpointInput) (*sns.DeleteEndpointOutput, error) + + DeletePlatformApplicationRequest(*sns.DeletePlatformApplicationInput) (*request.Request, *sns.DeletePlatformApplicationOutput) + + DeletePlatformApplication(*sns.DeletePlatformApplicationInput) (*sns.DeletePlatformApplicationOutput, error) + + DeleteTopicRequest(*sns.DeleteTopicInput) (*request.Request, *sns.DeleteTopicOutput) + + DeleteTopic(*sns.DeleteTopicInput) (*sns.DeleteTopicOutput, error) + + GetEndpointAttributesRequest(*sns.GetEndpointAttributesInput) (*request.Request, *sns.GetEndpointAttributesOutput) + + GetEndpointAttributes(*sns.GetEndpointAttributesInput) (*sns.GetEndpointAttributesOutput, error) + + GetPlatformApplicationAttributesRequest(*sns.GetPlatformApplicationAttributesInput) (*request.Request, *sns.GetPlatformApplicationAttributesOutput) + + GetPlatformApplicationAttributes(*sns.GetPlatformApplicationAttributesInput) (*sns.GetPlatformApplicationAttributesOutput, error) + + GetSubscriptionAttributesRequest(*sns.GetSubscriptionAttributesInput) (*request.Request, *sns.GetSubscriptionAttributesOutput) + + GetSubscriptionAttributes(*sns.GetSubscriptionAttributesInput) (*sns.GetSubscriptionAttributesOutput, error) + + GetTopicAttributesRequest(*sns.GetTopicAttributesInput) (*request.Request, *sns.GetTopicAttributesOutput) + + GetTopicAttributes(*sns.GetTopicAttributesInput) (*sns.GetTopicAttributesOutput, error) + + ListEndpointsByPlatformApplicationRequest(*sns.ListEndpointsByPlatformApplicationInput) (*request.Request, *sns.ListEndpointsByPlatformApplicationOutput) + + ListEndpointsByPlatformApplication(*sns.ListEndpointsByPlatformApplicationInput) (*sns.ListEndpointsByPlatformApplicationOutput, error) + + ListEndpointsByPlatformApplicationPages(*sns.ListEndpointsByPlatformApplicationInput, func(*sns.ListEndpointsByPlatformApplicationOutput, bool) bool) error + + ListPlatformApplicationsRequest(*sns.ListPlatformApplicationsInput) (*request.Request, *sns.ListPlatformApplicationsOutput) + + ListPlatformApplications(*sns.ListPlatformApplicationsInput) (*sns.ListPlatformApplicationsOutput, error) + + ListPlatformApplicationsPages(*sns.ListPlatformApplicationsInput, func(*sns.ListPlatformApplicationsOutput, bool) bool) error + + ListSubscriptionsRequest(*sns.ListSubscriptionsInput) (*request.Request, *sns.ListSubscriptionsOutput) + + ListSubscriptions(*sns.ListSubscriptionsInput) (*sns.ListSubscriptionsOutput, error) + + ListSubscriptionsPages(*sns.ListSubscriptionsInput, func(*sns.ListSubscriptionsOutput, bool) bool) error + + ListSubscriptionsByTopicRequest(*sns.ListSubscriptionsByTopicInput) (*request.Request, *sns.ListSubscriptionsByTopicOutput) + + ListSubscriptionsByTopic(*sns.ListSubscriptionsByTopicInput) (*sns.ListSubscriptionsByTopicOutput, error) + + ListSubscriptionsByTopicPages(*sns.ListSubscriptionsByTopicInput, func(*sns.ListSubscriptionsByTopicOutput, bool) bool) error + + ListTopicsRequest(*sns.ListTopicsInput) (*request.Request, *sns.ListTopicsOutput) + + ListTopics(*sns.ListTopicsInput) (*sns.ListTopicsOutput, error) + + ListTopicsPages(*sns.ListTopicsInput, func(*sns.ListTopicsOutput, bool) bool) error + + PublishRequest(*sns.PublishInput) (*request.Request, *sns.PublishOutput) + + Publish(*sns.PublishInput) (*sns.PublishOutput, error) + + RemovePermissionRequest(*sns.RemovePermissionInput) (*request.Request, *sns.RemovePermissionOutput) + + RemovePermission(*sns.RemovePermissionInput) (*sns.RemovePermissionOutput, error) + + SetEndpointAttributesRequest(*sns.SetEndpointAttributesInput) (*request.Request, *sns.SetEndpointAttributesOutput) + + SetEndpointAttributes(*sns.SetEndpointAttributesInput) (*sns.SetEndpointAttributesOutput, error) + + SetPlatformApplicationAttributesRequest(*sns.SetPlatformApplicationAttributesInput) (*request.Request, *sns.SetPlatformApplicationAttributesOutput) + + SetPlatformApplicationAttributes(*sns.SetPlatformApplicationAttributesInput) (*sns.SetPlatformApplicationAttributesOutput, error) + + SetSubscriptionAttributesRequest(*sns.SetSubscriptionAttributesInput) (*request.Request, *sns.SetSubscriptionAttributesOutput) + + SetSubscriptionAttributes(*sns.SetSubscriptionAttributesInput) (*sns.SetSubscriptionAttributesOutput, error) + + SetTopicAttributesRequest(*sns.SetTopicAttributesInput) (*request.Request, *sns.SetTopicAttributesOutput) + + SetTopicAttributes(*sns.SetTopicAttributesInput) (*sns.SetTopicAttributesOutput, error) + + SubscribeRequest(*sns.SubscribeInput) (*request.Request, *sns.SubscribeOutput) + + Subscribe(*sns.SubscribeInput) (*sns.SubscribeOutput, error) + + UnsubscribeRequest(*sns.UnsubscribeInput) (*request.Request, *sns.UnsubscribeOutput) + + Unsubscribe(*sns.UnsubscribeInput) (*sns.UnsubscribeOutput, error) +} + +var _ SNSAPI = (*sns.SNS)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1822 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package sqs provides a client for Amazon Simple Queue Service. +package sqs + +import ( + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opAddPermission = "AddPermission" + +// AddPermissionRequest generates a request for the AddPermission operation. +func (c *SQS) AddPermissionRequest(input *AddPermissionInput) (req *request.Request, output *AddPermissionOutput) { + op := &request.Operation{ + Name: opAddPermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddPermissionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &AddPermissionOutput{} + req.Data = output + return +} + +// Adds a permission to a queue for a specific principal (http://docs.aws.amazon.com/general/latest/gr/glos-chap.html#P). +// This allows for sharing access to the queue. +// +// When you create a queue, you have full control access rights for the queue. +// Only you (as owner of the queue) can grant or deny permissions to the queue. +// For more information about these permissions, see Shared Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/acp-overview.html) +// in the Amazon SQS Developer Guide. +// +// AddPermission writes an Amazon SQS-generated policy. If you want to write +// your own policy, use SetQueueAttributes to upload your policy. For more information +// about writing your own policy, see Using The Access Policy Language (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/AccessPolicyLanguage.html) +// in the Amazon SQS Developer Guide. +// +// Some API actions take lists of parameters. These lists are specified using +// the param.n notation. Values of n are integers starting from 1. For example, +// a parameter list with two elements looks like this: &Attribute.1=this +// +// &Attribute.2=that +func (c *SQS) AddPermission(input *AddPermissionInput) (*AddPermissionOutput, error) { + req, out := c.AddPermissionRequest(input) + err := req.Send() + return out, err +} + +const opChangeMessageVisibility = "ChangeMessageVisibility" + +// ChangeMessageVisibilityRequest generates a request for the ChangeMessageVisibility operation. +func (c *SQS) ChangeMessageVisibilityRequest(input *ChangeMessageVisibilityInput) (req *request.Request, output *ChangeMessageVisibilityOutput) { + op := &request.Operation{ + Name: opChangeMessageVisibility, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ChangeMessageVisibilityInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ChangeMessageVisibilityOutput{} + req.Data = output + return +} + +// Changes the visibility timeout of a specified message in a queue to a new +// value. The maximum allowed timeout value you can set the value to is 12 hours. +// This means you can't extend the timeout of a message in an existing queue +// to more than a total visibility timeout of 12 hours. (For more information +// visibility timeout, see Visibility Timeout (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/AboutVT.html) +// in the Amazon SQS Developer Guide.) +// +// For example, let's say you have a message and its default message visibility +// timeout is 5 minutes. After 3 minutes, you call ChangeMessageVisiblity with +// a timeout of 10 minutes. At that time, the timeout for the message would +// be extended by 10 minutes beyond the time of the ChangeMessageVisibility +// call. This results in a total visibility timeout of 13 minutes. You can continue +// to call ChangeMessageVisibility to extend the visibility timeout to a maximum +// of 12 hours. If you try to extend beyond 12 hours, the request will be rejected. +// +// There is a 120,000 limit for the number of inflight messages per queue. +// Messages are inflight after they have been received from the queue by a consuming +// component, but have not yet been deleted from the queue. If you reach the +// 120,000 limit, you will receive an OverLimit error message from Amazon SQS. +// To help avoid reaching the limit, you should delete the messages from the +// queue after they have been processed. You can also increase the number of +// queues you use to process the messages. +// +// If you attempt to set the VisibilityTimeout to an amount more than the maximum +// time left, Amazon SQS returns an error. It will not automatically recalculate +// and increase the timeout to the maximum time remaining. Unlike with a queue, +// when you change the visibility timeout for a specific message, that timeout +// value is applied immediately but is not saved in memory for that message. +// If you don't delete a message after it is received, the visibility timeout +// for the message the next time it is received reverts to the original timeout +// value, not the value you set with the ChangeMessageVisibility action. +func (c *SQS) ChangeMessageVisibility(input *ChangeMessageVisibilityInput) (*ChangeMessageVisibilityOutput, error) { + req, out := c.ChangeMessageVisibilityRequest(input) + err := req.Send() + return out, err +} + +const opChangeMessageVisibilityBatch = "ChangeMessageVisibilityBatch" + +// ChangeMessageVisibilityBatchRequest generates a request for the ChangeMessageVisibilityBatch operation. +func (c *SQS) ChangeMessageVisibilityBatchRequest(input *ChangeMessageVisibilityBatchInput) (req *request.Request, output *ChangeMessageVisibilityBatchOutput) { + op := &request.Operation{ + Name: opChangeMessageVisibilityBatch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ChangeMessageVisibilityBatchInput{} + } + + req = c.newRequest(op, input, output) + output = &ChangeMessageVisibilityBatchOutput{} + req.Data = output + return +} + +// Changes the visibility timeout of multiple messages. This is a batch version +// of ChangeMessageVisibility. The result of the action on each message is reported +// individually in the response. You can send up to 10 ChangeMessageVisibility +// requests with each ChangeMessageVisibilityBatch action. +// +// Because the batch request can result in a combination of successful and +// unsuccessful actions, you should check for batch errors even when the call +// returns an HTTP status code of 200. Some API actions take lists of parameters. +// These lists are specified using the param.n notation. Values of n are integers +// starting from 1. For example, a parameter list with two elements looks like +// this: &Attribute.1=this +// +// &Attribute.2=that +func (c *SQS) ChangeMessageVisibilityBatch(input *ChangeMessageVisibilityBatchInput) (*ChangeMessageVisibilityBatchOutput, error) { + req, out := c.ChangeMessageVisibilityBatchRequest(input) + err := req.Send() + return out, err +} + +const opCreateQueue = "CreateQueue" + +// CreateQueueRequest generates a request for the CreateQueue operation. +func (c *SQS) CreateQueueRequest(input *CreateQueueInput) (req *request.Request, output *CreateQueueOutput) { + op := &request.Operation{ + Name: opCreateQueue, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateQueueInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateQueueOutput{} + req.Data = output + return +} + +// Creates a new queue, or returns the URL of an existing one. When you request +// CreateQueue, you provide a name for the queue. To successfully create a new +// queue, you must provide a name that is unique within the scope of your own +// queues. +// +// If you delete a queue, you must wait at least 60 seconds before creating +// a queue with the same name. +// +// You may pass one or more attributes in the request. If you do not provide +// a value for any attribute, the queue will have the default value for that +// attribute. Permitted attributes are the same that can be set using SetQueueAttributes. +// +// Use GetQueueUrl to get a queue's URL. GetQueueUrl requires only the QueueName +// parameter. +// +// If you provide the name of an existing queue, along with the exact names +// and values of all the queue's attributes, CreateQueue returns the queue URL +// for the existing queue. If the queue name, attribute names, or attribute +// values do not match an existing queue, CreateQueue returns an error. +// +// Some API actions take lists of parameters. These lists are specified using +// the param.n notation. Values of n are integers starting from 1. For example, +// a parameter list with two elements looks like this: &Attribute.1=this +// +// &Attribute.2=that +func (c *SQS) CreateQueue(input *CreateQueueInput) (*CreateQueueOutput, error) { + req, out := c.CreateQueueRequest(input) + err := req.Send() + return out, err +} + +const opDeleteMessage = "DeleteMessage" + +// DeleteMessageRequest generates a request for the DeleteMessage operation. +func (c *SQS) DeleteMessageRequest(input *DeleteMessageInput) (req *request.Request, output *DeleteMessageOutput) { + op := &request.Operation{ + Name: opDeleteMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteMessageInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteMessageOutput{} + req.Data = output + return +} + +// Deletes the specified message from the specified queue. You specify the message +// by using the message's receipt handle and not the message ID you received +// when you sent the message. Even if the message is locked by another reader +// due to the visibility timeout setting, it is still deleted from the queue. +// If you leave a message in the queue for longer than the queue's configured +// retention period, Amazon SQS automatically deletes it. +// +// The receipt handle is associated with a specific instance of receiving +// the message. If you receive a message more than once, the receipt handle +// you get each time you receive the message is different. When you request +// DeleteMessage, if you don't provide the most recently received receipt handle +// for the message, the request will still succeed, but the message might not +// be deleted. +// +// It is possible you will receive a message even after you have deleted +// it. This might happen on rare occasions if one of the servers storing a copy +// of the message is unavailable when you request to delete the message. The +// copy remains on the server and might be returned to you again on a subsequent +// receive request. You should create your system to be idempotent so that receiving +// a particular message more than once is not a problem. +func (c *SQS) DeleteMessage(input *DeleteMessageInput) (*DeleteMessageOutput, error) { + req, out := c.DeleteMessageRequest(input) + err := req.Send() + return out, err +} + +const opDeleteMessageBatch = "DeleteMessageBatch" + +// DeleteMessageBatchRequest generates a request for the DeleteMessageBatch operation. +func (c *SQS) DeleteMessageBatchRequest(input *DeleteMessageBatchInput) (req *request.Request, output *DeleteMessageBatchOutput) { + op := &request.Operation{ + Name: opDeleteMessageBatch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteMessageBatchInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteMessageBatchOutput{} + req.Data = output + return +} + +// Deletes up to ten messages from the specified queue. This is a batch version +// of DeleteMessage. The result of the delete action on each message is reported +// individually in the response. +// +// Because the batch request can result in a combination of successful and +// unsuccessful actions, you should check for batch errors even when the call +// returns an HTTP status code of 200. +// +// Some API actions take lists of parameters. These lists are specified using +// the param.n notation. Values of n are integers starting from 1. For example, +// a parameter list with two elements looks like this: &Attribute.1=this +// +// &Attribute.2=that +func (c *SQS) DeleteMessageBatch(input *DeleteMessageBatchInput) (*DeleteMessageBatchOutput, error) { + req, out := c.DeleteMessageBatchRequest(input) + err := req.Send() + return out, err +} + +const opDeleteQueue = "DeleteQueue" + +// DeleteQueueRequest generates a request for the DeleteQueue operation. +func (c *SQS) DeleteQueueRequest(input *DeleteQueueInput) (req *request.Request, output *DeleteQueueOutput) { + op := &request.Operation{ + Name: opDeleteQueue, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteQueueInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteQueueOutput{} + req.Data = output + return +} + +// Deletes the queue specified by the queue URL, regardless of whether the queue +// is empty. If the specified queue does not exist, Amazon SQS returns a successful +// response. +// +// Use DeleteQueue with care; once you delete your queue, any messages in +// the queue are no longer available. +// +// When you delete a queue, the deletion process takes up to 60 seconds. +// Requests you send involving that queue during the 60 seconds might succeed. +// For example, a SendMessage request might succeed, but after the 60 seconds, +// the queue and that message you sent no longer exist. Also, when you delete +// a queue, you must wait at least 60 seconds before creating a queue with the +// same name. +// +// We reserve the right to delete queues that have had no activity for more +// than 30 days. For more information, see How Amazon SQS Queues Work (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSConcepts.html) +// in the Amazon SQS Developer Guide. +func (c *SQS) DeleteQueue(input *DeleteQueueInput) (*DeleteQueueOutput, error) { + req, out := c.DeleteQueueRequest(input) + err := req.Send() + return out, err +} + +const opGetQueueAttributes = "GetQueueAttributes" + +// GetQueueAttributesRequest generates a request for the GetQueueAttributes operation. +func (c *SQS) GetQueueAttributesRequest(input *GetQueueAttributesInput) (req *request.Request, output *GetQueueAttributesOutput) { + op := &request.Operation{ + Name: opGetQueueAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetQueueAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetQueueAttributesOutput{} + req.Data = output + return +} + +// Gets attributes for the specified queue. The following attributes are supported: +// All - returns all values. ApproximateNumberOfMessages - returns the approximate +// number of visible messages in a queue. For more information, see Resources +// Required to Process Messages (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/ApproximateNumber.html) +// in the Amazon SQS Developer Guide. ApproximateNumberOfMessagesNotVisible +// - returns the approximate number of messages that are not timed-out and not +// deleted. For more information, see Resources Required to Process Messages +// (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/ApproximateNumber.html) +// in the Amazon SQS Developer Guide. VisibilityTimeout - returns the visibility +// timeout for the queue. For more information about visibility timeout, see +// Visibility Timeout (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/AboutVT.html) +// in the Amazon SQS Developer Guide. CreatedTimestamp - returns the time when +// the queue was created (epoch time in seconds). LastModifiedTimestamp - returns +// the time when the queue was last changed (epoch time in seconds). Policy +// - returns the queue's policy. MaximumMessageSize - returns the limit of +// how many bytes a message can contain before Amazon SQS rejects it. MessageRetentionPeriod +// - returns the number of seconds Amazon SQS retains a message. QueueArn - +// returns the queue's Amazon resource name (ARN). ApproximateNumberOfMessagesDelayed +// - returns the approximate number of messages that are pending to be added +// to the queue. DelaySeconds - returns the default delay on the queue in seconds. +// ReceiveMessageWaitTimeSeconds - returns the time for which a ReceiveMessage +// call will wait for a message to arrive. RedrivePolicy - returns the parameters +// for dead letter queue functionality of the source queue. For more information +// about RedrivePolicy and dead letter queues, see Using Amazon SQS Dead Letter +// Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSDeadLetterQueue.html) +// in the Amazon SQS Developer Guide. +// +// Going forward, new attributes might be added. If you are writing code that +// calls this action, we recommend that you structure your code so that it can +// handle new attributes gracefully. Some API actions take lists of parameters. +// These lists are specified using the param.n notation. Values of n are integers +// starting from 1. For example, a parameter list with two elements looks like +// this: &Attribute.1=this +// +// &Attribute.2=that +func (c *SQS) GetQueueAttributes(input *GetQueueAttributesInput) (*GetQueueAttributesOutput, error) { + req, out := c.GetQueueAttributesRequest(input) + err := req.Send() + return out, err +} + +const opGetQueueUrl = "GetQueueUrl" + +// GetQueueUrlRequest generates a request for the GetQueueUrl operation. +func (c *SQS) GetQueueUrlRequest(input *GetQueueUrlInput) (req *request.Request, output *GetQueueUrlOutput) { + op := &request.Operation{ + Name: opGetQueueUrl, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetQueueUrlInput{} + } + + req = c.newRequest(op, input, output) + output = &GetQueueUrlOutput{} + req.Data = output + return +} + +// Returns the URL of an existing queue. This action provides a simple way to +// retrieve the URL of an Amazon SQS queue. +// +// To access a queue that belongs to another AWS account, use the QueueOwnerAWSAccountId +// parameter to specify the account ID of the queue's owner. The queue's owner +// must grant you permission to access the queue. For more information about +// shared queue access, see AddPermission or go to Shared Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/acp-overview.html) +// in the Amazon SQS Developer Guide. +func (c *SQS) GetQueueUrl(input *GetQueueUrlInput) (*GetQueueUrlOutput, error) { + req, out := c.GetQueueUrlRequest(input) + err := req.Send() + return out, err +} + +const opListDeadLetterSourceQueues = "ListDeadLetterSourceQueues" + +// ListDeadLetterSourceQueuesRequest generates a request for the ListDeadLetterSourceQueues operation. +func (c *SQS) ListDeadLetterSourceQueuesRequest(input *ListDeadLetterSourceQueuesInput) (req *request.Request, output *ListDeadLetterSourceQueuesOutput) { + op := &request.Operation{ + Name: opListDeadLetterSourceQueues, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListDeadLetterSourceQueuesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDeadLetterSourceQueuesOutput{} + req.Data = output + return +} + +// Returns a list of your queues that have the RedrivePolicy queue attribute +// configured with a dead letter queue. +// +// For more information about using dead letter queues, see Using Amazon SQS +// Dead Letter Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSDeadLetterQueue.html). +func (c *SQS) ListDeadLetterSourceQueues(input *ListDeadLetterSourceQueuesInput) (*ListDeadLetterSourceQueuesOutput, error) { + req, out := c.ListDeadLetterSourceQueuesRequest(input) + err := req.Send() + return out, err +} + +const opListQueues = "ListQueues" + +// ListQueuesRequest generates a request for the ListQueues operation. +func (c *SQS) ListQueuesRequest(input *ListQueuesInput) (req *request.Request, output *ListQueuesOutput) { + op := &request.Operation{ + Name: opListQueues, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListQueuesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListQueuesOutput{} + req.Data = output + return +} + +// Returns a list of your queues. The maximum number of queues that can be returned +// is 1000. If you specify a value for the optional QueueNamePrefix parameter, +// only queues with a name beginning with the specified value are returned. +func (c *SQS) ListQueues(input *ListQueuesInput) (*ListQueuesOutput, error) { + req, out := c.ListQueuesRequest(input) + err := req.Send() + return out, err +} + +const opPurgeQueue = "PurgeQueue" + +// PurgeQueueRequest generates a request for the PurgeQueue operation. +func (c *SQS) PurgeQueueRequest(input *PurgeQueueInput) (req *request.Request, output *PurgeQueueOutput) { + op := &request.Operation{ + Name: opPurgeQueue, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PurgeQueueInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PurgeQueueOutput{} + req.Data = output + return +} + +// Deletes the messages in a queue specified by the queue URL. +// +// When you use the PurgeQueue API, the deleted messages in the queue cannot +// be retrieved. When you purge a queue, the message deletion process takes +// up to 60 seconds. All messages sent to the queue before calling PurgeQueue +// will be deleted; messages sent to the queue while it is being purged may +// be deleted. While the queue is being purged, messages sent to the queue before +// PurgeQueue was called may be received, but will be deleted within the next +// minute. +func (c *SQS) PurgeQueue(input *PurgeQueueInput) (*PurgeQueueOutput, error) { + req, out := c.PurgeQueueRequest(input) + err := req.Send() + return out, err +} + +const opReceiveMessage = "ReceiveMessage" + +// ReceiveMessageRequest generates a request for the ReceiveMessage operation. +func (c *SQS) ReceiveMessageRequest(input *ReceiveMessageInput) (req *request.Request, output *ReceiveMessageOutput) { + op := &request.Operation{ + Name: opReceiveMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReceiveMessageInput{} + } + + req = c.newRequest(op, input, output) + output = &ReceiveMessageOutput{} + req.Data = output + return +} + +// Retrieves one or more messages, with a maximum limit of 10 messages, from +// the specified queue. Long poll support is enabled by using the WaitTimeSeconds +// parameter. For more information, see Amazon SQS Long Poll (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-long-polling.html) +// in the Amazon SQS Developer Guide. +// +// Short poll is the default behavior where a weighted random set of machines +// is sampled on a ReceiveMessage call. This means only the messages on the +// sampled machines are returned. If the number of messages in the queue is +// small (less than 1000), it is likely you will get fewer messages than you +// requested per ReceiveMessage call. If the number of messages in the queue +// is extremely small, you might not receive any messages in a particular ReceiveMessage +// response; in which case you should repeat the request. +// +// For each message returned, the response includes the following: +// +// Message body +// +// MD5 digest of the message body. For information about MD5, go to http://www.faqs.org/rfcs/rfc1321.html +// (http://www.faqs.org/rfcs/rfc1321.html). +// +// Message ID you received when you sent the message to the queue. +// +// Receipt handle. +// +// Message attributes. +// +// MD5 digest of the message attributes. +// +// The receipt handle is the identifier you must provide when deleting the +// message. For more information, see Queue and Message Identifiers (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/ImportantIdentifiers.html) +// in the Amazon SQS Developer Guide. +// +// You can provide the VisibilityTimeout parameter in your request, which +// will be applied to the messages that Amazon SQS returns in the response. +// If you do not include the parameter, the overall visibility timeout for the +// queue is used for the returned messages. For more information, see Visibility +// Timeout (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/AboutVT.html) +// in the Amazon SQS Developer Guide. +// +// Going forward, new attributes might be added. If you are writing code +// that calls this action, we recommend that you structure your code so that +// it can handle new attributes gracefully. +func (c *SQS) ReceiveMessage(input *ReceiveMessageInput) (*ReceiveMessageOutput, error) { + req, out := c.ReceiveMessageRequest(input) + err := req.Send() + return out, err +} + +const opRemovePermission = "RemovePermission" + +// RemovePermissionRequest generates a request for the RemovePermission operation. +func (c *SQS) RemovePermissionRequest(input *RemovePermissionInput) (req *request.Request, output *RemovePermissionOutput) { + op := &request.Operation{ + Name: opRemovePermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemovePermissionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RemovePermissionOutput{} + req.Data = output + return +} + +// Revokes any permissions in the queue policy that matches the specified Label +// parameter. Only the owner of the queue can remove permissions. +func (c *SQS) RemovePermission(input *RemovePermissionInput) (*RemovePermissionOutput, error) { + req, out := c.RemovePermissionRequest(input) + err := req.Send() + return out, err +} + +const opSendMessage = "SendMessage" + +// SendMessageRequest generates a request for the SendMessage operation. +func (c *SQS) SendMessageRequest(input *SendMessageInput) (req *request.Request, output *SendMessageOutput) { + op := &request.Operation{ + Name: opSendMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendMessageInput{} + } + + req = c.newRequest(op, input, output) + output = &SendMessageOutput{} + req.Data = output + return +} + +// Delivers a message to the specified queue. With Amazon SQS, you now have +// the ability to send large payload messages that are up to 256KB (262,144 +// bytes) in size. To send large payloads, you must use an AWS SDK that supports +// SigV4 signing. To verify whether SigV4 is supported for an AWS SDK, check +// the SDK release notes. +// +// The following list shows the characters (in Unicode) allowed in your message, +// according to the W3C XML specification. For more information, go to http://www.w3.org/TR/REC-xml/#charsets +// (http://www.w3.org/TR/REC-xml/#charsets) If you send any characters not included +// in the list, your request will be rejected. +// +// #x9 | #xA | #xD | [#x20 to #xD7FF] | [#xE000 to #xFFFD] | [#x10000 to #x10FFFF] +func (c *SQS) SendMessage(input *SendMessageInput) (*SendMessageOutput, error) { + req, out := c.SendMessageRequest(input) + err := req.Send() + return out, err +} + +const opSendMessageBatch = "SendMessageBatch" + +// SendMessageBatchRequest generates a request for the SendMessageBatch operation. +func (c *SQS) SendMessageBatchRequest(input *SendMessageBatchInput) (req *request.Request, output *SendMessageBatchOutput) { + op := &request.Operation{ + Name: opSendMessageBatch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendMessageBatchInput{} + } + + req = c.newRequest(op, input, output) + output = &SendMessageBatchOutput{} + req.Data = output + return +} + +// Delivers up to ten messages to the specified queue. This is a batch version +// of SendMessage. The result of the send action on each message is reported +// individually in the response. The maximum allowed individual message size +// is 256 KB (262,144 bytes). +// +// The maximum total payload size (i.e., the sum of all a batch's individual +// message lengths) is also 256 KB (262,144 bytes). +// +// If the DelaySeconds parameter is not specified for an entry, the default +// for the queue is used. +// +// The following list shows the characters (in Unicode) that are allowed in +// your message, according to the W3C XML specification. For more information, +// go to http://www.faqs.org/rfcs/rfc1321.html (http://www.faqs.org/rfcs/rfc1321.html). +// If you send any characters that are not included in the list, your request +// will be rejected. #x9 | #xA | #xD | [#x20 to #xD7FF] | [#xE000 to #xFFFD] +// | [#x10000 to #x10FFFF] +// +// Because the batch request can result in a combination of successful and +// unsuccessful actions, you should check for batch errors even when the call +// returns an HTTP status code of 200. Some API actions take lists of parameters. +// These lists are specified using the param.n notation. Values of n are integers +// starting from 1. For example, a parameter list with two elements looks like +// this: &Attribute.1=this +// +// &Attribute.2=that +func (c *SQS) SendMessageBatch(input *SendMessageBatchInput) (*SendMessageBatchOutput, error) { + req, out := c.SendMessageBatchRequest(input) + err := req.Send() + return out, err +} + +const opSetQueueAttributes = "SetQueueAttributes" + +// SetQueueAttributesRequest generates a request for the SetQueueAttributes operation. +func (c *SQS) SetQueueAttributesRequest(input *SetQueueAttributesInput) (req *request.Request, output *SetQueueAttributesOutput) { + op := &request.Operation{ + Name: opSetQueueAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetQueueAttributesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SetQueueAttributesOutput{} + req.Data = output + return +} + +// Sets the value of one or more queue attributes. When you change a queue's +// attributes, the change can take up to 60 seconds for most of the attributes +// to propagate throughout the SQS system. Changes made to the MessageRetentionPeriod +// attribute can take up to 15 minutes. +// +// Going forward, new attributes might be added. If you are writing code that +// calls this action, we recommend that you structure your code so that it can +// handle new attributes gracefully. +func (c *SQS) SetQueueAttributes(input *SetQueueAttributesInput) (*SetQueueAttributesOutput, error) { + req, out := c.SetQueueAttributesRequest(input) + err := req.Send() + return out, err +} + +type AddPermissionInput struct { + _ struct{} `type:"structure"` + + // The AWS account number of the principal (http://docs.aws.amazon.com/general/latest/gr/glos-chap.html#P) + // who will be given permission. The principal must have an AWS account, but + // does not need to be signed up for Amazon SQS. For information about locating + // the AWS account identification, see Your AWS Identifiers (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/AWSCredentials.html) + // in the Amazon SQS Developer Guide. + AWSAccountIds []*string `locationNameList:"AWSAccountId" type:"list" flattened:"true" required:"true"` + + // The action the client wants to allow for the specified principal. The following + // are valid values: * | SendMessage | ReceiveMessage | DeleteMessage | ChangeMessageVisibility + // | GetQueueAttributes | GetQueueUrl. For more information about these actions, + // see Understanding Permissions (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/acp-overview.html#PermissionTypes) + // in the Amazon SQS Developer Guide. + // + // Specifying SendMessage, DeleteMessage, or ChangeMessageVisibility for the + // ActionName.n also grants permissions for the corresponding batch versions + // of those actions: SendMessageBatch, DeleteMessageBatch, and ChangeMessageVisibilityBatch. + Actions []*string `locationNameList:"ActionName" type:"list" flattened:"true" required:"true"` + + // The unique identification of the permission you're setting (e.g., AliceSendMessage). + // Constraints: Maximum 80 characters; alphanumeric characters, hyphens (-), + // and underscores (_) are allowed. + Label *string `type:"string" required:"true"` + + // The URL of the Amazon SQS queue to take action on. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AddPermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddPermissionInput) GoString() string { + return s.String() +} + +type AddPermissionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddPermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddPermissionOutput) GoString() string { + return s.String() +} + +// This is used in the responses of batch API to give a detailed description +// of the result of an action on each entry in the request. +type BatchResultErrorEntry struct { + _ struct{} `type:"structure"` + + // An error code representing why the action failed on this entry. + Code *string `type:"string" required:"true"` + + // The id of an entry in a batch request. + Id *string `type:"string" required:"true"` + + // A message explaining why the action failed on this entry. + Message *string `type:"string"` + + // Whether the error happened due to the sender's fault. + SenderFault *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s BatchResultErrorEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchResultErrorEntry) GoString() string { + return s.String() +} + +type ChangeMessageVisibilityBatchInput struct { + _ struct{} `type:"structure"` + + // A list of receipt handles of the messages for which the visibility timeout + // must be changed. + Entries []*ChangeMessageVisibilityBatchRequestEntry `locationNameList:"ChangeMessageVisibilityBatchRequestEntry" type:"list" flattened:"true" required:"true"` + + // The URL of the Amazon SQS queue to take action on. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ChangeMessageVisibilityBatchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeMessageVisibilityBatchInput) GoString() string { + return s.String() +} + +// For each message in the batch, the response contains a ChangeMessageVisibilityBatchResultEntry +// tag if the message succeeds or a BatchResultErrorEntry tag if the message +// fails. +type ChangeMessageVisibilityBatchOutput struct { + _ struct{} `type:"structure"` + + // A list of BatchResultErrorEntry items. + Failed []*BatchResultErrorEntry `locationNameList:"BatchResultErrorEntry" type:"list" flattened:"true" required:"true"` + + // A list of ChangeMessageVisibilityBatchResultEntry items. + Successful []*ChangeMessageVisibilityBatchResultEntry `locationNameList:"ChangeMessageVisibilityBatchResultEntry" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s ChangeMessageVisibilityBatchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeMessageVisibilityBatchOutput) GoString() string { + return s.String() +} + +// Encloses a receipt handle and an entry id for each message in ChangeMessageVisibilityBatch. +// +// All of the following parameters are list parameters that must be prefixed +// with ChangeMessageVisibilityBatchRequestEntry.n, where n is an integer value +// starting with 1. For example, a parameter list for this action might look +// like this: +// +// &ChangeMessageVisibilityBatchRequestEntry.1.Id=change_visibility_msg_2 +// +// &ChangeMessageVisibilityBatchRequestEntry.1.ReceiptHandle=Your_Receipt_Handle +// +// &ChangeMessageVisibilityBatchRequestEntry.1.VisibilityTimeout=45 +type ChangeMessageVisibilityBatchRequestEntry struct { + _ struct{} `type:"structure"` + + // An identifier for this particular receipt handle. This is used to communicate + // the result. Note that the Ids of a batch request need to be unique within + // the request. + Id *string `type:"string" required:"true"` + + // A receipt handle. + ReceiptHandle *string `type:"string" required:"true"` + + // The new value (in seconds) for the message's visibility timeout. + VisibilityTimeout *int64 `type:"integer"` +} + +// String returns the string representation +func (s ChangeMessageVisibilityBatchRequestEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeMessageVisibilityBatchRequestEntry) GoString() string { + return s.String() +} + +// Encloses the id of an entry in ChangeMessageVisibilityBatch. +type ChangeMessageVisibilityBatchResultEntry struct { + _ struct{} `type:"structure"` + + // Represents a message whose visibility timeout has been changed successfully. + Id *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ChangeMessageVisibilityBatchResultEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeMessageVisibilityBatchResultEntry) GoString() string { + return s.String() +} + +type ChangeMessageVisibilityInput struct { + _ struct{} `type:"structure"` + + // The URL of the Amazon SQS queue to take action on. + QueueUrl *string `type:"string" required:"true"` + + // The receipt handle associated with the message whose visibility timeout should + // be changed. This parameter is returned by the ReceiveMessage action. + ReceiptHandle *string `type:"string" required:"true"` + + // The new value (in seconds - from 0 to 43200 - maximum 12 hours) for the message's + // visibility timeout. + VisibilityTimeout *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s ChangeMessageVisibilityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeMessageVisibilityInput) GoString() string { + return s.String() +} + +type ChangeMessageVisibilityOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ChangeMessageVisibilityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangeMessageVisibilityOutput) GoString() string { + return s.String() +} + +type CreateQueueInput struct { + _ struct{} `type:"structure"` + + // A map of attributes with their corresponding values. + // + // The following lists the names, descriptions, and values of the special request + // parameters the CreateQueue action uses: + // + // DelaySeconds - The time in seconds that the delivery of all messages + // in the queue will be delayed. An integer from 0 to 900 (15 minutes). The + // default for this attribute is 0 (zero). MaximumMessageSize - The limit of + // how many bytes a message can contain before Amazon SQS rejects it. An integer + // from 1024 bytes (1 KiB) up to 262144 bytes (256 KiB). The default for this + // attribute is 262144 (256 KiB). MessageRetentionPeriod - The number of seconds + // Amazon SQS retains a message. Integer representing seconds, from 60 (1 minute) + // to 1209600 (14 days). The default for this attribute is 345600 (4 days). + // Policy - The queue's policy. A valid AWS policy. For more information about + // policy structure, see Overview of AWS IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html) + // in the Amazon IAM User Guide. ReceiveMessageWaitTimeSeconds - The time for + // which a ReceiveMessage call will wait for a message to arrive. An integer + // from 0 to 20 (seconds). The default for this attribute is 0. VisibilityTimeout + // - The visibility timeout for the queue. An integer from 0 to 43200 (12 hours). + // The default for this attribute is 30. For more information about visibility + // timeout, see Visibility Timeout (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/AboutVT.html) + // in the Amazon SQS Developer Guide. + Attributes map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` + + // The name for the queue to be created. + QueueName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateQueueInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateQueueInput) GoString() string { + return s.String() +} + +// Returns the QueueUrl element of the created queue. +type CreateQueueOutput struct { + _ struct{} `type:"structure"` + + // The URL for the created Amazon SQS queue. + QueueUrl *string `type:"string"` +} + +// String returns the string representation +func (s CreateQueueOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateQueueOutput) GoString() string { + return s.String() +} + +type DeleteMessageBatchInput struct { + _ struct{} `type:"structure"` + + // A list of receipt handles for the messages to be deleted. + Entries []*DeleteMessageBatchRequestEntry `locationNameList:"DeleteMessageBatchRequestEntry" type:"list" flattened:"true" required:"true"` + + // The URL of the Amazon SQS queue to take action on. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMessageBatchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMessageBatchInput) GoString() string { + return s.String() +} + +// For each message in the batch, the response contains a DeleteMessageBatchResultEntry +// tag if the message is deleted or a BatchResultErrorEntry tag if the message +// cannot be deleted. +type DeleteMessageBatchOutput struct { + _ struct{} `type:"structure"` + + // A list of BatchResultErrorEntry items. + Failed []*BatchResultErrorEntry `locationNameList:"BatchResultErrorEntry" type:"list" flattened:"true" required:"true"` + + // A list of DeleteMessageBatchResultEntry items. + Successful []*DeleteMessageBatchResultEntry `locationNameList:"DeleteMessageBatchResultEntry" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s DeleteMessageBatchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMessageBatchOutput) GoString() string { + return s.String() +} + +// Encloses a receipt handle and an identifier for it. +type DeleteMessageBatchRequestEntry struct { + _ struct{} `type:"structure"` + + // An identifier for this particular receipt handle. This is used to communicate + // the result. Note that the Ids of a batch request need to be unique within + // the request. + Id *string `type:"string" required:"true"` + + // A receipt handle. + ReceiptHandle *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMessageBatchRequestEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMessageBatchRequestEntry) GoString() string { + return s.String() +} + +// Encloses the id an entry in DeleteMessageBatch. +type DeleteMessageBatchResultEntry struct { + _ struct{} `type:"structure"` + + // Represents a successfully deleted message. + Id *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMessageBatchResultEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMessageBatchResultEntry) GoString() string { + return s.String() +} + +type DeleteMessageInput struct { + _ struct{} `type:"structure"` + + // The URL of the Amazon SQS queue to take action on. + QueueUrl *string `type:"string" required:"true"` + + // The receipt handle associated with the message to delete. + ReceiptHandle *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMessageInput) GoString() string { + return s.String() +} + +type DeleteMessageOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMessageOutput) GoString() string { + return s.String() +} + +type DeleteQueueInput struct { + _ struct{} `type:"structure"` + + // The URL of the Amazon SQS queue to take action on. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteQueueInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteQueueInput) GoString() string { + return s.String() +} + +type DeleteQueueOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteQueueOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteQueueOutput) GoString() string { + return s.String() +} + +type GetQueueAttributesInput struct { + _ struct{} `type:"structure"` + + // A list of attributes to retrieve information for. + AttributeNames []*string `locationNameList:"AttributeName" type:"list" flattened:"true"` + + // The URL of the Amazon SQS queue to take action on. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetQueueAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetQueueAttributesInput) GoString() string { + return s.String() +} + +// A list of returned queue attributes. +type GetQueueAttributesOutput struct { + _ struct{} `type:"structure"` + + // A map of attributes to the respective values. + Attributes map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` +} + +// String returns the string representation +func (s GetQueueAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetQueueAttributesOutput) GoString() string { + return s.String() +} + +type GetQueueUrlInput struct { + _ struct{} `type:"structure"` + + // The name of the queue whose URL must be fetched. Maximum 80 characters; alphanumeric + // characters, hyphens (-), and underscores (_) are allowed. + QueueName *string `type:"string" required:"true"` + + // The AWS account ID of the account that created the queue. + QueueOwnerAWSAccountId *string `type:"string"` +} + +// String returns the string representation +func (s GetQueueUrlInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetQueueUrlInput) GoString() string { + return s.String() +} + +// For more information, see Responses (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/UnderstandingResponses.html) +// in the Amazon SQS Developer Guide. +type GetQueueUrlOutput struct { + _ struct{} `type:"structure"` + + // The URL for the queue. + QueueUrl *string `type:"string"` +} + +// String returns the string representation +func (s GetQueueUrlOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetQueueUrlOutput) GoString() string { + return s.String() +} + +type ListDeadLetterSourceQueuesInput struct { + _ struct{} `type:"structure"` + + // The queue URL of a dead letter queue. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListDeadLetterSourceQueuesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeadLetterSourceQueuesInput) GoString() string { + return s.String() +} + +// A list of your dead letter source queues. +type ListDeadLetterSourceQueuesOutput struct { + _ struct{} `type:"structure"` + + // A list of source queue URLs that have the RedrivePolicy queue attribute configured + // with a dead letter queue. + QueueUrls []*string `locationName:"queueUrls" locationNameList:"QueueUrl" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s ListDeadLetterSourceQueuesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDeadLetterSourceQueuesOutput) GoString() string { + return s.String() +} + +type ListQueuesInput struct { + _ struct{} `type:"structure"` + + // A string to use for filtering the list results. Only those queues whose name + // begins with the specified string are returned. + QueueNamePrefix *string `type:"string"` +} + +// String returns the string representation +func (s ListQueuesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListQueuesInput) GoString() string { + return s.String() +} + +// A list of your queues. +type ListQueuesOutput struct { + _ struct{} `type:"structure"` + + // A list of queue URLs, up to 1000 entries. + QueueUrls []*string `locationNameList:"QueueUrl" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListQueuesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListQueuesOutput) GoString() string { + return s.String() +} + +// An Amazon SQS message. +type Message struct { + _ struct{} `type:"structure"` + + // SenderId, SentTimestamp, ApproximateReceiveCount, and/or ApproximateFirstReceiveTimestamp. + // SentTimestamp and ApproximateFirstReceiveTimestamp are each returned as an + // integer representing the epoch time (http://en.wikipedia.org/wiki/Unix_time) + // in milliseconds. + Attributes map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` + + // The message's contents (not URL-encoded). + Body *string `type:"string"` + + // An MD5 digest of the non-URL-encoded message body string. + MD5OfBody *string `type:"string"` + + // An MD5 digest of the non-URL-encoded message attribute string. This can be + // used to verify that Amazon SQS received the message correctly. Amazon SQS + // first URL decodes the message before creating the MD5 digest. For information + // about MD5, go to http://www.faqs.org/rfcs/rfc1321.html (http://www.faqs.org/rfcs/rfc1321.html). + MD5OfMessageAttributes *string `type:"string"` + + // Each message attribute consists of a Name, Type, and Value. For more information, + // see Message Attribute Items (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSMessageAttributes.html#SQSMessageAttributesNTV). + MessageAttributes map[string]*MessageAttributeValue `locationName:"MessageAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` + + // A unique identifier for the message. Message IDs are considered unique across + // all AWS accounts for an extended period of time. + MessageId *string `type:"string"` + + // An identifier associated with the act of receiving the message. A new receipt + // handle is returned every time you receive a message. When deleting a message, + // you provide the last received receipt handle to delete the message. + ReceiptHandle *string `type:"string"` +} + +// String returns the string representation +func (s Message) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Message) GoString() string { + return s.String() +} + +// The user-specified message attribute value. For string data types, the value +// attribute has the same restrictions on the content as the message body. For +// more information, see SendMessage (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessage.html). +// +// Name, type, and value must not be empty or null. In addition, the message +// body should not be empty or null. All parts of the message attribute, including +// name, type, and value, are included in the message size restriction, which +// is currently 256 KB (262,144 bytes). +type MessageAttributeValue struct { + _ struct{} `type:"structure"` + + // Not implemented. Reserved for future use. + BinaryListValues [][]byte `locationName:"BinaryListValue" locationNameList:"BinaryListValue" type:"list" flattened:"true"` + + // Binary type attributes can store any binary data, for example, compressed + // data, encrypted data, or images. + BinaryValue []byte `type:"blob"` + + // Amazon SQS supports the following logical data types: String, Number, and + // Binary. In addition, you can append your own custom labels. For more information, + // see Message Attribute Data Types (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSMessageAttributes.html#SQSMessageAttributes.DataTypes). + DataType *string `type:"string" required:"true"` + + // Not implemented. Reserved for future use. + StringListValues []*string `locationName:"StringListValue" locationNameList:"StringListValue" type:"list" flattened:"true"` + + // Strings are Unicode with UTF8 binary encoding. For a list of code values, + // see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + StringValue *string `type:"string"` +} + +// String returns the string representation +func (s MessageAttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MessageAttributeValue) GoString() string { + return s.String() +} + +type PurgeQueueInput struct { + _ struct{} `type:"structure"` + + // The queue URL of the queue to delete the messages from when using the PurgeQueue + // API. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PurgeQueueInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurgeQueueInput) GoString() string { + return s.String() +} + +type PurgeQueueOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PurgeQueueOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurgeQueueOutput) GoString() string { + return s.String() +} + +type ReceiveMessageInput struct { + _ struct{} `type:"structure"` + + // A list of attributes that need to be returned along with each message. + // + // The following lists the names and descriptions of the attributes that can + // be returned: + // + // All - returns all values. ApproximateFirstReceiveTimestamp - returns + // the time when the message was first received from the queue (epoch time in + // milliseconds). ApproximateReceiveCount - returns the number of times a message + // has been received from the queue but not deleted. SenderId - returns the + // AWS account number (or the IP address, if anonymous access is allowed) of + // the sender. SentTimestamp - returns the time when the message was sent to + // the queue (epoch time in milliseconds). + AttributeNames []*string `locationNameList:"AttributeName" type:"list" flattened:"true"` + + // The maximum number of messages to return. Amazon SQS never returns more messages + // than this value but may return fewer. Values can be from 1 to 10. Default + // is 1. + // + // All of the messages are not necessarily returned. + MaxNumberOfMessages *int64 `type:"integer"` + + // The name of the message attribute, where N is the index. The message attribute + // name can contain the following characters: A-Z, a-z, 0-9, underscore (_), + // hyphen (-), and period (.). The name must not start or end with a period, + // and it should not have successive periods. The name is case sensitive and + // must be unique among all attribute names for the message. The name can be + // up to 256 characters long. The name cannot start with "AWS." or "Amazon." + // (or any variations in casing), because these prefixes are reserved for use + // by Amazon Web Services. + // + // When using ReceiveMessage, you can send a list of attribute names to receive, + // or you can return all of the attributes by specifying "All" or ".*" in your + // request. You can also use "foo.*" to return all message attributes starting + // with the "foo" prefix. + MessageAttributeNames []*string `locationNameList:"MessageAttributeName" type:"list" flattened:"true"` + + // The URL of the Amazon SQS queue to take action on. + QueueUrl *string `type:"string" required:"true"` + + // The duration (in seconds) that the received messages are hidden from subsequent + // retrieve requests after being retrieved by a ReceiveMessage request. + VisibilityTimeout *int64 `type:"integer"` + + // The duration (in seconds) for which the call will wait for a message to arrive + // in the queue before returning. If a message is available, the call will return + // sooner than WaitTimeSeconds. + WaitTimeSeconds *int64 `type:"integer"` +} + +// String returns the string representation +func (s ReceiveMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReceiveMessageInput) GoString() string { + return s.String() +} + +// A list of received messages. +type ReceiveMessageOutput struct { + _ struct{} `type:"structure"` + + // A list of messages. + Messages []*Message `locationNameList:"Message" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ReceiveMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReceiveMessageOutput) GoString() string { + return s.String() +} + +type RemovePermissionInput struct { + _ struct{} `type:"structure"` + + // The identification of the permission to remove. This is the label added with + // the AddPermission action. + Label *string `type:"string" required:"true"` + + // The URL of the Amazon SQS queue to take action on. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RemovePermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemovePermissionInput) GoString() string { + return s.String() +} + +type RemovePermissionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemovePermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemovePermissionOutput) GoString() string { + return s.String() +} + +type SendMessageBatchInput struct { + _ struct{} `type:"structure"` + + // A list of SendMessageBatchRequestEntry items. + Entries []*SendMessageBatchRequestEntry `locationNameList:"SendMessageBatchRequestEntry" type:"list" flattened:"true" required:"true"` + + // The URL of the Amazon SQS queue to take action on. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendMessageBatchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendMessageBatchInput) GoString() string { + return s.String() +} + +// For each message in the batch, the response contains a SendMessageBatchResultEntry +// tag if the message succeeds or a BatchResultErrorEntry tag if the message +// fails. +type SendMessageBatchOutput struct { + _ struct{} `type:"structure"` + + // A list of BatchResultErrorEntry items with the error detail about each message + // that could not be enqueued. + Failed []*BatchResultErrorEntry `locationNameList:"BatchResultErrorEntry" type:"list" flattened:"true" required:"true"` + + // A list of SendMessageBatchResultEntry items. + Successful []*SendMessageBatchResultEntry `locationNameList:"SendMessageBatchResultEntry" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s SendMessageBatchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendMessageBatchOutput) GoString() string { + return s.String() +} + +// Contains the details of a single Amazon SQS message along with a Id. +type SendMessageBatchRequestEntry struct { + _ struct{} `type:"structure"` + + // The number of seconds for which the message has to be delayed. + DelaySeconds *int64 `type:"integer"` + + // An identifier for the message in this batch. This is used to communicate + // the result. Note that the Ids of a batch request need to be unique within + // the request. + Id *string `type:"string" required:"true"` + + // Each message attribute consists of a Name, Type, and Value. For more information, + // see Message Attribute Items (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSMessageAttributes.html#SQSMessageAttributesNTV). + MessageAttributes map[string]*MessageAttributeValue `locationName:"MessageAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` + + // Body of the message. + MessageBody *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendMessageBatchRequestEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendMessageBatchRequestEntry) GoString() string { + return s.String() +} + +// Encloses a message ID for successfully enqueued message of a SendMessageBatch. +type SendMessageBatchResultEntry struct { + _ struct{} `type:"structure"` + + // An identifier for the message in this batch. + Id *string `type:"string" required:"true"` + + // An MD5 digest of the non-URL-encoded message attribute string. This can be + // used to verify that Amazon SQS received the message batch correctly. Amazon + // SQS first URL decodes the message before creating the MD5 digest. For information + // about MD5, go to http://www.faqs.org/rfcs/rfc1321.html (http://www.faqs.org/rfcs/rfc1321.html). + MD5OfMessageAttributes *string `type:"string"` + + // An MD5 digest of the non-URL-encoded message body string. This can be used + // to verify that Amazon SQS received the message correctly. Amazon SQS first + // URL decodes the message before creating the MD5 digest. For information about + // MD5, go to http://www.faqs.org/rfcs/rfc1321.html (http://www.faqs.org/rfcs/rfc1321.html). + MD5OfMessageBody *string `type:"string" required:"true"` + + // An identifier for the message. + MessageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendMessageBatchResultEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendMessageBatchResultEntry) GoString() string { + return s.String() +} + +type SendMessageInput struct { + _ struct{} `type:"structure"` + + // The number of seconds (0 to 900 - 15 minutes) to delay a specific message. + // Messages with a positive DelaySeconds value become available for processing + // after the delay time is finished. If you don't specify a value, the default + // value for the queue applies. + DelaySeconds *int64 `type:"integer"` + + // Each message attribute consists of a Name, Type, and Value. For more information, + // see Message Attribute Items (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSMessageAttributes.html#SQSMessageAttributesNTV). + MessageAttributes map[string]*MessageAttributeValue `locationName:"MessageAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` + + // The message to send. String maximum 256 KB in size. For a list of allowed + // characters, see the preceding important note. + MessageBody *string `type:"string" required:"true"` + + // The URL of the Amazon SQS queue to take action on. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendMessageInput) GoString() string { + return s.String() +} + +// The MD5OfMessageBody and MessageId elements. +type SendMessageOutput struct { + _ struct{} `type:"structure"` + + // An MD5 digest of the non-URL-encoded message attribute string. This can be + // used to verify that Amazon SQS received the message correctly. Amazon SQS + // first URL decodes the message before creating the MD5 digest. For information + // about MD5, go to http://www.faqs.org/rfcs/rfc1321.html (http://www.faqs.org/rfcs/rfc1321.html). + MD5OfMessageAttributes *string `type:"string"` + + // An MD5 digest of the non-URL-encoded message body string. This can be used + // to verify that Amazon SQS received the message correctly. Amazon SQS first + // URL decodes the message before creating the MD5 digest. For information about + // MD5, go to http://www.faqs.org/rfcs/rfc1321.html (http://www.faqs.org/rfcs/rfc1321.html). + MD5OfMessageBody *string `type:"string"` + + // An element containing the message ID of the message sent to the queue. For + // more information, see Queue and Message Identifiers (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/ImportantIdentifiers.html) + // in the Amazon SQS Developer Guide. + MessageId *string `type:"string"` +} + +// String returns the string representation +func (s SendMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendMessageOutput) GoString() string { + return s.String() +} + +type SetQueueAttributesInput struct { + _ struct{} `type:"structure"` + + // A map of attributes to set. + // + // The following lists the names, descriptions, and values of the special request + // parameters the SetQueueAttributes action uses: + // + // DelaySeconds - The time in seconds that the delivery of all messages + // in the queue will be delayed. An integer from 0 to 900 (15 minutes). The + // default for this attribute is 0 (zero). MaximumMessageSize - The limit of + // how many bytes a message can contain before Amazon SQS rejects it. An integer + // from 1024 bytes (1 KiB) up to 262144 bytes (256 KiB). The default for this + // attribute is 262144 (256 KiB). MessageRetentionPeriod - The number of seconds + // Amazon SQS retains a message. Integer representing seconds, from 60 (1 minute) + // to 1209600 (14 days). The default for this attribute is 345600 (4 days). + // Policy - The queue's policy. A valid AWS policy. For more information about + // policy structure, see Overview of AWS IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html) + // in the Amazon IAM User Guide. ReceiveMessageWaitTimeSeconds - The time for + // which a ReceiveMessage call will wait for a message to arrive. An integer + // from 0 to 20 (seconds). The default for this attribute is 0. VisibilityTimeout + // - The visibility timeout for the queue. An integer from 0 to 43200 (12 hours). + // The default for this attribute is 30. For more information about visibility + // timeout, see Visibility Timeout in the Amazon SQS Developer Guide. RedrivePolicy + // - The parameters for dead letter queue functionality of the source queue. + // For more information about RedrivePolicy and dead letter queues, see Using + // Amazon SQS Dead Letter Queues in the Amazon SQS Developer Guide. + Attributes map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true" required:"true"` + + // The URL of the Amazon SQS queue to take action on. + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetQueueAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetQueueAttributesInput) GoString() string { + return s.String() +} + +type SetQueueAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetQueueAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetQueueAttributesOutput) GoString() string { + return s.String() +} + +const ( + // @enum QueueAttributeName + QueueAttributeNamePolicy = "Policy" + // @enum QueueAttributeName + QueueAttributeNameVisibilityTimeout = "VisibilityTimeout" + // @enum QueueAttributeName + QueueAttributeNameMaximumMessageSize = "MaximumMessageSize" + // @enum QueueAttributeName + QueueAttributeNameMessageRetentionPeriod = "MessageRetentionPeriod" + // @enum QueueAttributeName + QueueAttributeNameApproximateNumberOfMessages = "ApproximateNumberOfMessages" + // @enum QueueAttributeName + QueueAttributeNameApproximateNumberOfMessagesNotVisible = "ApproximateNumberOfMessagesNotVisible" + // @enum QueueAttributeName + QueueAttributeNameCreatedTimestamp = "CreatedTimestamp" + // @enum QueueAttributeName + QueueAttributeNameLastModifiedTimestamp = "LastModifiedTimestamp" + // @enum QueueAttributeName + QueueAttributeNameQueueArn = "QueueArn" + // @enum QueueAttributeName + QueueAttributeNameApproximateNumberOfMessagesDelayed = "ApproximateNumberOfMessagesDelayed" + // @enum QueueAttributeName + QueueAttributeNameDelaySeconds = "DelaySeconds" + // @enum QueueAttributeName + QueueAttributeNameReceiveMessageWaitTimeSeconds = "ReceiveMessageWaitTimeSeconds" + // @enum QueueAttributeName + QueueAttributeNameRedrivePolicy = "RedrivePolicy" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/api_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/api_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/api_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/api_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,30 @@ +// +build integration + +package sqs_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/sqs" +) + +func TestFlattenedTraits(t *testing.T) { + s := sqs.New(session.New()) + _, err := s.DeleteMessageBatch(&sqs.DeleteMessageBatchInput{ + QueueURL: aws.String("QUEUE"), + Entries: []*sqs.DeleteMessageBatchRequestEntry{ + { + ID: aws.String("TEST"), + ReceiptHandle: aws.String("RECEIPT"), + }, + }, + }) + + assert.Error(t, err) + assert.Equal(t, "InvalidAddress", err.Code()) + assert.Equal(t, "The address QUEUE is not valid for this endpoint.", err.Message()) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/checksums.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/checksums.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/checksums.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/checksums.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,105 @@ +package sqs + +import ( + "crypto/md5" + "encoding/hex" + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +var ( + errChecksumMissingBody = fmt.Errorf("cannot compute checksum. missing body") + errChecksumMissingMD5 = fmt.Errorf("cannot verify checksum. missing response MD5") +) + +func setupChecksumValidation(r *request.Request) { + if aws.BoolValue(r.Config.DisableComputeChecksums) { + return + } + + switch r.Operation.Name { + case opSendMessage: + r.Handlers.Unmarshal.PushBack(verifySendMessage) + case opSendMessageBatch: + r.Handlers.Unmarshal.PushBack(verifySendMessageBatch) + case opReceiveMessage: + r.Handlers.Unmarshal.PushBack(verifyReceiveMessage) + } +} + +func verifySendMessage(r *request.Request) { + if r.DataFilled() && r.ParamsFilled() { + in := r.Params.(*SendMessageInput) + out := r.Data.(*SendMessageOutput) + err := checksumsMatch(in.MessageBody, out.MD5OfMessageBody) + if err != nil { + setChecksumError(r, err.Error()) + } + } +} + +func verifySendMessageBatch(r *request.Request) { + if r.DataFilled() && r.ParamsFilled() { + entries := map[string]*SendMessageBatchResultEntry{} + ids := []string{} + + out := r.Data.(*SendMessageBatchOutput) + for _, entry := range out.Successful { + entries[*entry.Id] = entry + } + + in := r.Params.(*SendMessageBatchInput) + for _, entry := range in.Entries { + if e := entries[*entry.Id]; e != nil { + err := checksumsMatch(entry.MessageBody, e.MD5OfMessageBody) + if err != nil { + ids = append(ids, *e.MessageId) + } + } + } + if len(ids) > 0 { + setChecksumError(r, "invalid messages: %s", strings.Join(ids, ", ")) + } + } +} + +func verifyReceiveMessage(r *request.Request) { + if r.DataFilled() && r.ParamsFilled() { + ids := []string{} + out := r.Data.(*ReceiveMessageOutput) + for _, msg := range out.Messages { + err := checksumsMatch(msg.Body, msg.MD5OfBody) + if err != nil { + ids = append(ids, *msg.MessageId) + } + } + if len(ids) > 0 { + setChecksumError(r, "invalid messages: %s", strings.Join(ids, ", ")) + } + } +} + +func checksumsMatch(body, expectedMD5 *string) error { + if body == nil { + return errChecksumMissingBody + } else if expectedMD5 == nil { + return errChecksumMissingMD5 + } + + msum := md5.Sum([]byte(*body)) + sum := hex.EncodeToString(msum[:]) + if sum != *expectedMD5 { + return fmt.Errorf("expected MD5 checksum '%s', got '%s'", *expectedMD5, sum) + } + + return nil +} + +func setChecksumError(r *request.Request, format string, args ...interface{}) { + r.Retryable = aws.Bool(true) + r.Error = awserr.New("InvalidChecksum", fmt.Sprintf(format, args...), nil) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/checksums_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/checksums_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/checksums_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/checksums_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,207 @@ +package sqs_test + +import ( + "bytes" + "io/ioutil" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/sqs" +) + +var svc = func() *sqs.SQS { + s := sqs.New(unit.Session, &aws.Config{ + DisableParamValidation: aws.Bool(true), + }) + s.Handlers.Send.Clear() + return s +}() + +func TestSendMessageChecksum(t *testing.T) { + req, _ := svc.SendMessageRequest(&sqs.SendMessageInput{ + MessageBody: aws.String("test"), + }) + req.Handlers.Send.PushBack(func(r *request.Request) { + body := ioutil.NopCloser(bytes.NewReader([]byte(""))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: body} + r.Data = &sqs.SendMessageOutput{ + MD5OfMessageBody: aws.String("098f6bcd4621d373cade4e832627b4f6"), + MessageId: aws.String("12345"), + } + }) + err := req.Send() + assert.NoError(t, err) +} + +func TestSendMessageChecksumInvalid(t *testing.T) { + req, _ := svc.SendMessageRequest(&sqs.SendMessageInput{ + MessageBody: aws.String("test"), + }) + req.Handlers.Send.PushBack(func(r *request.Request) { + body := ioutil.NopCloser(bytes.NewReader([]byte(""))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: body} + r.Data = &sqs.SendMessageOutput{ + MD5OfMessageBody: aws.String("000"), + MessageId: aws.String("12345"), + } + }) + err := req.Send() + assert.Error(t, err) + + assert.Equal(t, "InvalidChecksum", err.(awserr.Error).Code()) + assert.Contains(t, err.(awserr.Error).Message(), "expected MD5 checksum '000', got '098f6bcd4621d373cade4e832627b4f6'") +} + +func TestSendMessageChecksumInvalidNoValidation(t *testing.T) { + s := sqs.New(unit.Session, &aws.Config{ + DisableParamValidation: aws.Bool(true), + DisableComputeChecksums: aws.Bool(true), + }) + s.Handlers.Send.Clear() + + req, _ := s.SendMessageRequest(&sqs.SendMessageInput{ + MessageBody: aws.String("test"), + }) + req.Handlers.Send.PushBack(func(r *request.Request) { + body := ioutil.NopCloser(bytes.NewReader([]byte(""))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: body} + r.Data = &sqs.SendMessageOutput{ + MD5OfMessageBody: aws.String("000"), + MessageId: aws.String("12345"), + } + }) + err := req.Send() + assert.NoError(t, err) +} + +func TestSendMessageChecksumNoInput(t *testing.T) { + req, _ := svc.SendMessageRequest(&sqs.SendMessageInput{}) + req.Handlers.Send.PushBack(func(r *request.Request) { + body := ioutil.NopCloser(bytes.NewReader([]byte(""))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: body} + r.Data = &sqs.SendMessageOutput{} + }) + err := req.Send() + assert.Error(t, err) + + assert.Equal(t, "InvalidChecksum", err.(awserr.Error).Code()) + assert.Contains(t, err.(awserr.Error).Message(), "cannot compute checksum. missing body") +} + +func TestSendMessageChecksumNoOutput(t *testing.T) { + req, _ := svc.SendMessageRequest(&sqs.SendMessageInput{ + MessageBody: aws.String("test"), + }) + req.Handlers.Send.PushBack(func(r *request.Request) { + body := ioutil.NopCloser(bytes.NewReader([]byte(""))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: body} + r.Data = &sqs.SendMessageOutput{} + }) + err := req.Send() + assert.Error(t, err) + + assert.Equal(t, "InvalidChecksum", err.(awserr.Error).Code()) + assert.Contains(t, err.(awserr.Error).Message(), "cannot verify checksum. missing response MD5") +} + +func TestRecieveMessageChecksum(t *testing.T) { + req, _ := svc.ReceiveMessageRequest(&sqs.ReceiveMessageInput{}) + req.Handlers.Send.PushBack(func(r *request.Request) { + md5 := "098f6bcd4621d373cade4e832627b4f6" + body := ioutil.NopCloser(bytes.NewReader([]byte(""))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: body} + r.Data = &sqs.ReceiveMessageOutput{ + Messages: []*sqs.Message{ + {Body: aws.String("test"), MD5OfBody: &md5}, + {Body: aws.String("test"), MD5OfBody: &md5}, + {Body: aws.String("test"), MD5OfBody: &md5}, + {Body: aws.String("test"), MD5OfBody: &md5}, + }, + } + }) + err := req.Send() + assert.NoError(t, err) +} + +func TestRecieveMessageChecksumInvalid(t *testing.T) { + req, _ := svc.ReceiveMessageRequest(&sqs.ReceiveMessageInput{}) + req.Handlers.Send.PushBack(func(r *request.Request) { + md5 := "098f6bcd4621d373cade4e832627b4f6" + body := ioutil.NopCloser(bytes.NewReader([]byte(""))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: body} + r.Data = &sqs.ReceiveMessageOutput{ + Messages: []*sqs.Message{ + {Body: aws.String("test"), MD5OfBody: &md5}, + {Body: aws.String("test"), MD5OfBody: aws.String("000"), MessageId: aws.String("123")}, + {Body: aws.String("test"), MD5OfBody: aws.String("000"), MessageId: aws.String("456")}, + {Body: aws.String("test"), MD5OfBody: &md5}, + }, + } + }) + err := req.Send() + assert.Error(t, err) + + assert.Equal(t, "InvalidChecksum", err.(awserr.Error).Code()) + assert.Contains(t, err.(awserr.Error).Message(), "invalid messages: 123, 456") +} + +func TestSendMessageBatchChecksum(t *testing.T) { + req, _ := svc.SendMessageBatchRequest(&sqs.SendMessageBatchInput{ + Entries: []*sqs.SendMessageBatchRequestEntry{ + {Id: aws.String("1"), MessageBody: aws.String("test")}, + {Id: aws.String("2"), MessageBody: aws.String("test")}, + {Id: aws.String("3"), MessageBody: aws.String("test")}, + {Id: aws.String("4"), MessageBody: aws.String("test")}, + }, + }) + req.Handlers.Send.PushBack(func(r *request.Request) { + md5 := "098f6bcd4621d373cade4e832627b4f6" + body := ioutil.NopCloser(bytes.NewReader([]byte(""))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: body} + r.Data = &sqs.SendMessageBatchOutput{ + Successful: []*sqs.SendMessageBatchResultEntry{ + {MD5OfMessageBody: &md5, MessageId: aws.String("123"), Id: aws.String("1")}, + {MD5OfMessageBody: &md5, MessageId: aws.String("456"), Id: aws.String("2")}, + {MD5OfMessageBody: &md5, MessageId: aws.String("789"), Id: aws.String("3")}, + {MD5OfMessageBody: &md5, MessageId: aws.String("012"), Id: aws.String("4")}, + }, + } + }) + err := req.Send() + assert.NoError(t, err) +} + +func TestSendMessageBatchChecksumInvalid(t *testing.T) { + req, _ := svc.SendMessageBatchRequest(&sqs.SendMessageBatchInput{ + Entries: []*sqs.SendMessageBatchRequestEntry{ + {Id: aws.String("1"), MessageBody: aws.String("test")}, + {Id: aws.String("2"), MessageBody: aws.String("test")}, + {Id: aws.String("3"), MessageBody: aws.String("test")}, + {Id: aws.String("4"), MessageBody: aws.String("test")}, + }, + }) + req.Handlers.Send.PushBack(func(r *request.Request) { + md5 := "098f6bcd4621d373cade4e832627b4f6" + body := ioutil.NopCloser(bytes.NewReader([]byte(""))) + r.HTTPResponse = &http.Response{StatusCode: 200, Body: body} + r.Data = &sqs.SendMessageBatchOutput{ + Successful: []*sqs.SendMessageBatchResultEntry{ + {MD5OfMessageBody: &md5, MessageId: aws.String("123"), Id: aws.String("1")}, + {MD5OfMessageBody: aws.String("000"), MessageId: aws.String("456"), Id: aws.String("2")}, + {MD5OfMessageBody: aws.String("000"), MessageId: aws.String("789"), Id: aws.String("3")}, + {MD5OfMessageBody: &md5, MessageId: aws.String("012"), Id: aws.String("4")}, + }, + } + }) + err := req.Send() + assert.Error(t, err) + + assert.Equal(t, "InvalidChecksum", err.(awserr.Error).Code()) + assert.Contains(t, err.(awserr.Error).Message(), "invalid messages: 456, 789") +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/customizations.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/customizations.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/customizations.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/customizations.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,9 @@ +package sqs + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = func(r *request.Request) { + setupChecksumValidation(r) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,433 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package sqs_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/sqs" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleSQS_AddPermission() { + svc := sqs.New(session.New()) + + params := &sqs.AddPermissionInput{ + AWSAccountIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Actions: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + Label: aws.String("String"), // Required + QueueUrl: aws.String("String"), // Required + } + resp, err := svc.AddPermission(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_ChangeMessageVisibility() { + svc := sqs.New(session.New()) + + params := &sqs.ChangeMessageVisibilityInput{ + QueueUrl: aws.String("String"), // Required + ReceiptHandle: aws.String("String"), // Required + VisibilityTimeout: aws.Int64(1), // Required + } + resp, err := svc.ChangeMessageVisibility(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_ChangeMessageVisibilityBatch() { + svc := sqs.New(session.New()) + + params := &sqs.ChangeMessageVisibilityBatchInput{ + Entries: []*sqs.ChangeMessageVisibilityBatchRequestEntry{ // Required + { // Required + Id: aws.String("String"), // Required + ReceiptHandle: aws.String("String"), // Required + VisibilityTimeout: aws.Int64(1), + }, + // More values... + }, + QueueUrl: aws.String("String"), // Required + } + resp, err := svc.ChangeMessageVisibilityBatch(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_CreateQueue() { + svc := sqs.New(session.New()) + + params := &sqs.CreateQueueInput{ + QueueName: aws.String("String"), // Required + Attributes: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.CreateQueue(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_DeleteMessage() { + svc := sqs.New(session.New()) + + params := &sqs.DeleteMessageInput{ + QueueUrl: aws.String("String"), // Required + ReceiptHandle: aws.String("String"), // Required + } + resp, err := svc.DeleteMessage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_DeleteMessageBatch() { + svc := sqs.New(session.New()) + + params := &sqs.DeleteMessageBatchInput{ + Entries: []*sqs.DeleteMessageBatchRequestEntry{ // Required + { // Required + Id: aws.String("String"), // Required + ReceiptHandle: aws.String("String"), // Required + }, + // More values... + }, + QueueUrl: aws.String("String"), // Required + } + resp, err := svc.DeleteMessageBatch(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_DeleteQueue() { + svc := sqs.New(session.New()) + + params := &sqs.DeleteQueueInput{ + QueueUrl: aws.String("String"), // Required + } + resp, err := svc.DeleteQueue(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_GetQueueAttributes() { + svc := sqs.New(session.New()) + + params := &sqs.GetQueueAttributesInput{ + QueueUrl: aws.String("String"), // Required + AttributeNames: []*string{ + aws.String("QueueAttributeName"), // Required + // More values... + }, + } + resp, err := svc.GetQueueAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_GetQueueUrl() { + svc := sqs.New(session.New()) + + params := &sqs.GetQueueUrlInput{ + QueueName: aws.String("String"), // Required + QueueOwnerAWSAccountId: aws.String("String"), + } + resp, err := svc.GetQueueUrl(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_ListDeadLetterSourceQueues() { + svc := sqs.New(session.New()) + + params := &sqs.ListDeadLetterSourceQueuesInput{ + QueueUrl: aws.String("String"), // Required + } + resp, err := svc.ListDeadLetterSourceQueues(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_ListQueues() { + svc := sqs.New(session.New()) + + params := &sqs.ListQueuesInput{ + QueueNamePrefix: aws.String("String"), + } + resp, err := svc.ListQueues(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_PurgeQueue() { + svc := sqs.New(session.New()) + + params := &sqs.PurgeQueueInput{ + QueueUrl: aws.String("String"), // Required + } + resp, err := svc.PurgeQueue(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_ReceiveMessage() { + svc := sqs.New(session.New()) + + params := &sqs.ReceiveMessageInput{ + QueueUrl: aws.String("String"), // Required + AttributeNames: []*string{ + aws.String("QueueAttributeName"), // Required + // More values... + }, + MaxNumberOfMessages: aws.Int64(1), + MessageAttributeNames: []*string{ + aws.String("MessageAttributeName"), // Required + // More values... + }, + VisibilityTimeout: aws.Int64(1), + WaitTimeSeconds: aws.Int64(1), + } + resp, err := svc.ReceiveMessage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_RemovePermission() { + svc := sqs.New(session.New()) + + params := &sqs.RemovePermissionInput{ + Label: aws.String("String"), // Required + QueueUrl: aws.String("String"), // Required + } + resp, err := svc.RemovePermission(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_SendMessage() { + svc := sqs.New(session.New()) + + params := &sqs.SendMessageInput{ + MessageBody: aws.String("String"), // Required + QueueUrl: aws.String("String"), // Required + DelaySeconds: aws.Int64(1), + MessageAttributes: map[string]*sqs.MessageAttributeValue{ + "Key": { // Required + DataType: aws.String("String"), // Required + BinaryListValues: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + BinaryValue: []byte("PAYLOAD"), + StringListValues: []*string{ + aws.String("String"), // Required + // More values... + }, + StringValue: aws.String("String"), + }, + // More values... + }, + } + resp, err := svc.SendMessage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_SendMessageBatch() { + svc := sqs.New(session.New()) + + params := &sqs.SendMessageBatchInput{ + Entries: []*sqs.SendMessageBatchRequestEntry{ // Required + { // Required + Id: aws.String("String"), // Required + MessageBody: aws.String("String"), // Required + DelaySeconds: aws.Int64(1), + MessageAttributes: map[string]*sqs.MessageAttributeValue{ + "Key": { // Required + DataType: aws.String("String"), // Required + BinaryListValues: [][]byte{ + []byte("PAYLOAD"), // Required + // More values... + }, + BinaryValue: []byte("PAYLOAD"), + StringListValues: []*string{ + aws.String("String"), // Required + // More values... + }, + StringValue: aws.String("String"), + }, + // More values... + }, + }, + // More values... + }, + QueueUrl: aws.String("String"), // Required + } + resp, err := svc.SendMessageBatch(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSQS_SetQueueAttributes() { + svc := sqs.New(session.New()) + + params := &sqs.SetQueueAttributesInput{ + Attributes: map[string]*string{ // Required + "Key": aws.String("String"), // Required + // More values... + }, + QueueUrl: aws.String("String"), // Required + } + resp, err := svc.SetQueueAttributes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,110 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package sqs + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Welcome to the Amazon Simple Queue Service API Reference. This section describes +// who should read this guide, how the guide is organized, and other resources +// related to the Amazon Simple Queue Service (Amazon SQS). +// +// Amazon SQS offers reliable and scalable hosted queues for storing messages +// as they travel between computers. By using Amazon SQS, you can move data +// between distributed components of your applications that perform different +// tasks without losing messages or requiring each component to be always available. +// +// Helpful Links: Current WSDL (2012-11-05) (http://queue.amazonaws.com/doc/2012-11-05/QueueService.wsdl) +// Making API Requests (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/MakingRequestsArticle.html) +// Amazon SQS product page (http://aws.amazon.com/sqs/) Using Amazon SQS Message +// Attributes (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSMessageAttributes.html) +// Using Amazon SQS Dead Letter Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSDeadLetterQueue.html) +// Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sqs_region) +// +// +// We also provide SDKs that enable you to access Amazon SQS from your preferred +// programming language. The SDKs contain functionality that automatically takes +// care of tasks such as: +// +// Cryptographically signing your service requests Retrying requests Handling +// error responses +// +// For a list of available SDKs, go to Tools for Amazon Web Services (http://aws.amazon.com/tools/). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type SQS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "sqs" + +// New creates a new instance of the SQS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a SQS client from just a session. +// svc := sqs.New(mySession) +// +// // Create a SQS client with additional configuration +// svc := sqs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SQS { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *SQS { + svc := &SQS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2012-11-05", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SQS operation and runs any +// custom request initialization. +func (c *SQS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/sqsiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/sqsiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/sqsiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sqs/sqsiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,82 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package sqsiface provides an interface for the Amazon Simple Queue Service. +package sqsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sqs" +) + +// SQSAPI is the interface type for sqs.SQS. +type SQSAPI interface { + AddPermissionRequest(*sqs.AddPermissionInput) (*request.Request, *sqs.AddPermissionOutput) + + AddPermission(*sqs.AddPermissionInput) (*sqs.AddPermissionOutput, error) + + ChangeMessageVisibilityRequest(*sqs.ChangeMessageVisibilityInput) (*request.Request, *sqs.ChangeMessageVisibilityOutput) + + ChangeMessageVisibility(*sqs.ChangeMessageVisibilityInput) (*sqs.ChangeMessageVisibilityOutput, error) + + ChangeMessageVisibilityBatchRequest(*sqs.ChangeMessageVisibilityBatchInput) (*request.Request, *sqs.ChangeMessageVisibilityBatchOutput) + + ChangeMessageVisibilityBatch(*sqs.ChangeMessageVisibilityBatchInput) (*sqs.ChangeMessageVisibilityBatchOutput, error) + + CreateQueueRequest(*sqs.CreateQueueInput) (*request.Request, *sqs.CreateQueueOutput) + + CreateQueue(*sqs.CreateQueueInput) (*sqs.CreateQueueOutput, error) + + DeleteMessageRequest(*sqs.DeleteMessageInput) (*request.Request, *sqs.DeleteMessageOutput) + + DeleteMessage(*sqs.DeleteMessageInput) (*sqs.DeleteMessageOutput, error) + + DeleteMessageBatchRequest(*sqs.DeleteMessageBatchInput) (*request.Request, *sqs.DeleteMessageBatchOutput) + + DeleteMessageBatch(*sqs.DeleteMessageBatchInput) (*sqs.DeleteMessageBatchOutput, error) + + DeleteQueueRequest(*sqs.DeleteQueueInput) (*request.Request, *sqs.DeleteQueueOutput) + + DeleteQueue(*sqs.DeleteQueueInput) (*sqs.DeleteQueueOutput, error) + + GetQueueAttributesRequest(*sqs.GetQueueAttributesInput) (*request.Request, *sqs.GetQueueAttributesOutput) + + GetQueueAttributes(*sqs.GetQueueAttributesInput) (*sqs.GetQueueAttributesOutput, error) + + GetQueueUrlRequest(*sqs.GetQueueUrlInput) (*request.Request, *sqs.GetQueueUrlOutput) + + GetQueueUrl(*sqs.GetQueueUrlInput) (*sqs.GetQueueUrlOutput, error) + + ListDeadLetterSourceQueuesRequest(*sqs.ListDeadLetterSourceQueuesInput) (*request.Request, *sqs.ListDeadLetterSourceQueuesOutput) + + ListDeadLetterSourceQueues(*sqs.ListDeadLetterSourceQueuesInput) (*sqs.ListDeadLetterSourceQueuesOutput, error) + + ListQueuesRequest(*sqs.ListQueuesInput) (*request.Request, *sqs.ListQueuesOutput) + + ListQueues(*sqs.ListQueuesInput) (*sqs.ListQueuesOutput, error) + + PurgeQueueRequest(*sqs.PurgeQueueInput) (*request.Request, *sqs.PurgeQueueOutput) + + PurgeQueue(*sqs.PurgeQueueInput) (*sqs.PurgeQueueOutput, error) + + ReceiveMessageRequest(*sqs.ReceiveMessageInput) (*request.Request, *sqs.ReceiveMessageOutput) + + ReceiveMessage(*sqs.ReceiveMessageInput) (*sqs.ReceiveMessageOutput, error) + + RemovePermissionRequest(*sqs.RemovePermissionInput) (*request.Request, *sqs.RemovePermissionOutput) + + RemovePermission(*sqs.RemovePermissionInput) (*sqs.RemovePermissionOutput, error) + + SendMessageRequest(*sqs.SendMessageInput) (*request.Request, *sqs.SendMessageOutput) + + SendMessage(*sqs.SendMessageInput) (*sqs.SendMessageOutput, error) + + SendMessageBatchRequest(*sqs.SendMessageBatchInput) (*request.Request, *sqs.SendMessageBatchOutput) + + SendMessageBatch(*sqs.SendMessageBatchInput) (*sqs.SendMessageBatchOutput, error) + + SetQueueAttributesRequest(*sqs.SetQueueAttributesInput) (*request.Request, *sqs.SetQueueAttributesOutput) + + SetQueueAttributes(*sqs.SetQueueAttributesInput) (*sqs.SetQueueAttributesOutput, error) +} + +var _ SQSAPI = (*sqs.SQS)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ssm/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ssm/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ssm/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ssm/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1816 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package ssm provides a client for Amazon Simple Systems Management Service. +package ssm + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opCancelCommand = "CancelCommand" + +// CancelCommandRequest generates a request for the CancelCommand operation. +func (c *SSM) CancelCommandRequest(input *CancelCommandInput) (req *request.Request, output *CancelCommandOutput) { + op := &request.Operation{ + Name: opCancelCommand, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelCommandInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelCommandOutput{} + req.Data = output + return +} + +// Attempts to cancel the command specified by the Command ID. There is no guarantee +// that the command will be terminated and the underlying process stopped. +func (c *SSM) CancelCommand(input *CancelCommandInput) (*CancelCommandOutput, error) { + req, out := c.CancelCommandRequest(input) + err := req.Send() + return out, err +} + +const opCreateAssociation = "CreateAssociation" + +// CreateAssociationRequest generates a request for the CreateAssociation operation. +func (c *SSM) CreateAssociationRequest(input *CreateAssociationInput) (req *request.Request, output *CreateAssociationOutput) { + op := &request.Operation{ + Name: opCreateAssociation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAssociationInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateAssociationOutput{} + req.Data = output + return +} + +// Associates the specified SSM document with the specified instance. +// +// When you associate an SSM document with an instance, the configuration agent +// on the instance processes the document and configures the instance as specified. +// +// If you associate a document with an instance that already has an associated +// document, the system throws the AssociationAlreadyExists exception. +func (c *SSM) CreateAssociation(input *CreateAssociationInput) (*CreateAssociationOutput, error) { + req, out := c.CreateAssociationRequest(input) + err := req.Send() + return out, err +} + +const opCreateAssociationBatch = "CreateAssociationBatch" + +// CreateAssociationBatchRequest generates a request for the CreateAssociationBatch operation. +func (c *SSM) CreateAssociationBatchRequest(input *CreateAssociationBatchInput) (req *request.Request, output *CreateAssociationBatchOutput) { + op := &request.Operation{ + Name: opCreateAssociationBatch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAssociationBatchInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateAssociationBatchOutput{} + req.Data = output + return +} + +// Associates the specified SSM document with the specified instances. +// +// When you associate an SSM document with an instance, the configuration agent +// on the instance processes the document and configures the instance as specified. +// +// If you associate a document with an instance that already has an associated +// document, the system throws the AssociationAlreadyExists exception. +func (c *SSM) CreateAssociationBatch(input *CreateAssociationBatchInput) (*CreateAssociationBatchOutput, error) { + req, out := c.CreateAssociationBatchRequest(input) + err := req.Send() + return out, err +} + +const opCreateDocument = "CreateDocument" + +// CreateDocumentRequest generates a request for the CreateDocument operation. +func (c *SSM) CreateDocumentRequest(input *CreateDocumentInput) (req *request.Request, output *CreateDocumentOutput) { + op := &request.Operation{ + Name: opCreateDocument, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDocumentInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDocumentOutput{} + req.Data = output + return +} + +// Creates an SSM document. +// +// After you create an SSM document, you can use CreateAssociation to associate +// it with one or more running instances. +func (c *SSM) CreateDocument(input *CreateDocumentInput) (*CreateDocumentOutput, error) { + req, out := c.CreateDocumentRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAssociation = "DeleteAssociation" + +// DeleteAssociationRequest generates a request for the DeleteAssociation operation. +func (c *SSM) DeleteAssociationRequest(input *DeleteAssociationInput) (req *request.Request, output *DeleteAssociationOutput) { + op := &request.Operation{ + Name: opDeleteAssociation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAssociationInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteAssociationOutput{} + req.Data = output + return +} + +// Disassociates the specified SSM document from the specified instance. +// +// When you disassociate an SSM document from an instance, it does not change +// the configuration of the instance. To change the configuration state of an +// instance after you disassociate a document, you must create a new document +// with the desired configuration and associate it with the instance. +func (c *SSM) DeleteAssociation(input *DeleteAssociationInput) (*DeleteAssociationOutput, error) { + req, out := c.DeleteAssociationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDocument = "DeleteDocument" + +// DeleteDocumentRequest generates a request for the DeleteDocument operation. +func (c *SSM) DeleteDocumentRequest(input *DeleteDocumentInput) (req *request.Request, output *DeleteDocumentOutput) { + op := &request.Operation{ + Name: opDeleteDocument, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDocumentInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteDocumentOutput{} + req.Data = output + return +} + +// Deletes the SSM document and all instance associations to the document. +// +// Before you delete the SSM document, we recommend that you use DeleteAssociation +// to disassociate all instances that are associated with the document. +func (c *SSM) DeleteDocument(input *DeleteDocumentInput) (*DeleteDocumentOutput, error) { + req, out := c.DeleteDocumentRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAssociation = "DescribeAssociation" + +// DescribeAssociationRequest generates a request for the DescribeAssociation operation. +func (c *SSM) DescribeAssociationRequest(input *DescribeAssociationInput) (req *request.Request, output *DescribeAssociationOutput) { + op := &request.Operation{ + Name: opDescribeAssociation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAssociationInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAssociationOutput{} + req.Data = output + return +} + +// Describes the associations for the specified SSM document or instance. +func (c *SSM) DescribeAssociation(input *DescribeAssociationInput) (*DescribeAssociationOutput, error) { + req, out := c.DescribeAssociationRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDocument = "DescribeDocument" + +// DescribeDocumentRequest generates a request for the DescribeDocument operation. +func (c *SSM) DescribeDocumentRequest(input *DescribeDocumentInput) (req *request.Request, output *DescribeDocumentOutput) { + op := &request.Operation{ + Name: opDescribeDocument, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDocumentInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDocumentOutput{} + req.Data = output + return +} + +// Describes the specified SSM document. +func (c *SSM) DescribeDocument(input *DescribeDocumentInput) (*DescribeDocumentOutput, error) { + req, out := c.DescribeDocumentRequest(input) + err := req.Send() + return out, err +} + +const opDescribeInstanceInformation = "DescribeInstanceInformation" + +// DescribeInstanceInformationRequest generates a request for the DescribeInstanceInformation operation. +func (c *SSM) DescribeInstanceInformationRequest(input *DescribeInstanceInformationInput) (req *request.Request, output *DescribeInstanceInformationOutput) { + op := &request.Operation{ + Name: opDescribeInstanceInformation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInstanceInformationInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeInstanceInformationOutput{} + req.Data = output + return +} + +// Describes one or more of your instances. You can use this to get information +// about instances like the operating system platform, the SSM agent version, +// status etc. If you specify one or more instance IDs, it returns information +// for those instances. If you do not specify instance IDs, it returns information +// for all your instances. If you specify an instance ID that is not valid or +// an instance that you do not own, you receive an error. +func (c *SSM) DescribeInstanceInformation(input *DescribeInstanceInformationInput) (*DescribeInstanceInformationOutput, error) { + req, out := c.DescribeInstanceInformationRequest(input) + err := req.Send() + return out, err +} + +const opGetDocument = "GetDocument" + +// GetDocumentRequest generates a request for the GetDocument operation. +func (c *SSM) GetDocumentRequest(input *GetDocumentInput) (req *request.Request, output *GetDocumentOutput) { + op := &request.Operation{ + Name: opGetDocument, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDocumentInput{} + } + + req = c.newRequest(op, input, output) + output = &GetDocumentOutput{} + req.Data = output + return +} + +// Gets the contents of the specified SSM document. +func (c *SSM) GetDocument(input *GetDocumentInput) (*GetDocumentOutput, error) { + req, out := c.GetDocumentRequest(input) + err := req.Send() + return out, err +} + +const opListAssociations = "ListAssociations" + +// ListAssociationsRequest generates a request for the ListAssociations operation. +func (c *SSM) ListAssociationsRequest(input *ListAssociationsInput) (req *request.Request, output *ListAssociationsOutput) { + op := &request.Operation{ + Name: opListAssociations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListAssociationsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListAssociationsOutput{} + req.Data = output + return +} + +// Lists the associations for the specified SSM document or instance. +func (c *SSM) ListAssociations(input *ListAssociationsInput) (*ListAssociationsOutput, error) { + req, out := c.ListAssociationsRequest(input) + err := req.Send() + return out, err +} + +func (c *SSM) ListAssociationsPages(input *ListAssociationsInput, fn func(p *ListAssociationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListAssociationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListAssociationsOutput), lastPage) + }) +} + +const opListCommandInvocations = "ListCommandInvocations" + +// ListCommandInvocationsRequest generates a request for the ListCommandInvocations operation. +func (c *SSM) ListCommandInvocationsRequest(input *ListCommandInvocationsInput) (req *request.Request, output *ListCommandInvocationsOutput) { + op := &request.Operation{ + Name: opListCommandInvocations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListCommandInvocationsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListCommandInvocationsOutput{} + req.Data = output + return +} + +// An invocation is copy of a command sent to a specific instance. A command +// can apply to one or more instances. A command invocation applies to one instance. +// For example, if a user executes SendCommand against three instances, then +// a command invocation is created for each requested instance ID. ListCommandInvocations +// provide status about command execution. +func (c *SSM) ListCommandInvocations(input *ListCommandInvocationsInput) (*ListCommandInvocationsOutput, error) { + req, out := c.ListCommandInvocationsRequest(input) + err := req.Send() + return out, err +} + +func (c *SSM) ListCommandInvocationsPages(input *ListCommandInvocationsInput, fn func(p *ListCommandInvocationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListCommandInvocationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListCommandInvocationsOutput), lastPage) + }) +} + +const opListCommands = "ListCommands" + +// ListCommandsRequest generates a request for the ListCommands operation. +func (c *SSM) ListCommandsRequest(input *ListCommandsInput) (req *request.Request, output *ListCommandsOutput) { + op := &request.Operation{ + Name: opListCommands, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListCommandsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListCommandsOutput{} + req.Data = output + return +} + +// Lists the commands requested by users of the AWS account. +func (c *SSM) ListCommands(input *ListCommandsInput) (*ListCommandsOutput, error) { + req, out := c.ListCommandsRequest(input) + err := req.Send() + return out, err +} + +func (c *SSM) ListCommandsPages(input *ListCommandsInput, fn func(p *ListCommandsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListCommandsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListCommandsOutput), lastPage) + }) +} + +const opListDocuments = "ListDocuments" + +// ListDocumentsRequest generates a request for the ListDocuments operation. +func (c *SSM) ListDocumentsRequest(input *ListDocumentsInput) (req *request.Request, output *ListDocumentsOutput) { + op := &request.Operation{ + Name: opListDocuments, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDocumentsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDocumentsOutput{} + req.Data = output + return +} + +// Describes one or more of your SSM documents. +func (c *SSM) ListDocuments(input *ListDocumentsInput) (*ListDocumentsOutput, error) { + req, out := c.ListDocumentsRequest(input) + err := req.Send() + return out, err +} + +func (c *SSM) ListDocumentsPages(input *ListDocumentsInput, fn func(p *ListDocumentsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListDocumentsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListDocumentsOutput), lastPage) + }) +} + +const opSendCommand = "SendCommand" + +// SendCommandRequest generates a request for the SendCommand operation. +func (c *SSM) SendCommandRequest(input *SendCommandInput) (req *request.Request, output *SendCommandOutput) { + op := &request.Operation{ + Name: opSendCommand, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendCommandInput{} + } + + req = c.newRequest(op, input, output) + output = &SendCommandOutput{} + req.Data = output + return +} + +// Executes commands on one or more remote instances. +func (c *SSM) SendCommand(input *SendCommandInput) (*SendCommandOutput, error) { + req, out := c.SendCommandRequest(input) + err := req.Send() + return out, err +} + +const opUpdateAssociationStatus = "UpdateAssociationStatus" + +// UpdateAssociationStatusRequest generates a request for the UpdateAssociationStatus operation. +func (c *SSM) UpdateAssociationStatusRequest(input *UpdateAssociationStatusInput) (req *request.Request, output *UpdateAssociationStatusOutput) { + op := &request.Operation{ + Name: opUpdateAssociationStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAssociationStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateAssociationStatusOutput{} + req.Data = output + return +} + +// Updates the status of the SSM document associated with the specified instance. +func (c *SSM) UpdateAssociationStatus(input *UpdateAssociationStatusInput) (*UpdateAssociationStatusOutput, error) { + req, out := c.UpdateAssociationStatusRequest(input) + err := req.Send() + return out, err +} + +// Describes an association of an SSM document and an instance. +type Association struct { + _ struct{} `type:"structure"` + + // The ID of the instance. + InstanceId *string `type:"string"` + + // The name of the SSM document. + Name *string `type:"string"` +} + +// String returns the string representation +func (s Association) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Association) GoString() string { + return s.String() +} + +// Describes the parameters for a document. +type AssociationDescription struct { + _ struct{} `type:"structure"` + + // The date when the association was made. + Date *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The ID of the instance. + InstanceId *string `type:"string"` + + // The name of the SSM document. + Name *string `type:"string"` + + // A description of the parameters for a document. + Parameters map[string][]*string `type:"map"` + + // The association status. + Status *AssociationStatus `type:"structure"` +} + +// String returns the string representation +func (s AssociationDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociationDescription) GoString() string { + return s.String() +} + +// Describes a filter. +type AssociationFilter struct { + _ struct{} `type:"structure"` + + // The name of the filter. + Key *string `locationName:"key" type:"string" required:"true" enum:"AssociationFilterKey"` + + // The filter value. + Value *string `locationName:"value" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociationFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociationFilter) GoString() string { + return s.String() +} + +// Describes an association status. +type AssociationStatus struct { + _ struct{} `type:"structure"` + + // A user-defined string. + AdditionalInfo *string `type:"string"` + + // The date when the status changed. + Date *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // The reason for the status. + Message *string `type:"string" required:"true"` + + // The status. + Name *string `type:"string" required:"true" enum:"AssociationStatusName"` +} + +// String returns the string representation +func (s AssociationStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociationStatus) GoString() string { + return s.String() +} + +type CancelCommandInput struct { + _ struct{} `type:"structure"` + + // The ID of the command you want to cancel. + CommandId *string `min:"36" type:"string" required:"true"` + + // (Optional) A list of instance IDs on which you want to cancel the command. + // If not provided, the command is canceled on every instance on which it was + // requested. + InstanceIds []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s CancelCommandInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelCommandInput) GoString() string { + return s.String() +} + +// Whether or not the command was successfully canceled. There is no guarantee +// that a request can be canceled. +type CancelCommandOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CancelCommandOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelCommandOutput) GoString() string { + return s.String() +} + +// Describes a command request. +type Command struct { + _ struct{} `type:"structure"` + + // A unique identifier for this command. + CommandId *string `min:"36" type:"string"` + + // User-specified information about the command, such as a brief description + // of what the command should do. + Comment *string `type:"string"` + + // The name of the SSM document requested for execution. + DocumentName *string `type:"string"` + + // If this time is reached and the command has not already started executing, + // it will not execute. Calculated based on the ExpiresAfter user input provided + // as part of the SendCommand API. + ExpiresAfter *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The instance IDs against which this command was requested. + InstanceIds []*string `min:"1" type:"list"` + + // The S3 bucket where the responses to the command executions should be stored. + // This was requested when issuing the command. + OutputS3BucketName *string `min:"3" type:"string"` + + // The S3 directory path inside the bucket where the responses to the command + // executions should be stored. This was requested when issuing the command. + OutputS3KeyPrefix *string `type:"string"` + + // The parameter values to be inserted in the SSM document when executing the + // command. + Parameters map[string][]*string `type:"map"` + + // The date and time the command was requested. + RequestedDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The status of the command. + Status *string `type:"string" enum:"CommandStatus"` +} + +// String returns the string representation +func (s Command) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Command) GoString() string { + return s.String() +} + +// Describes a command filter. +type CommandFilter struct { + _ struct{} `type:"structure"` + + // The name of the filter. For example, requested date and time. + Key *string `locationName:"key" type:"string" required:"true" enum:"CommandFilterKey"` + + // The filter value. For example: June 30, 2015. + Value *string `locationName:"value" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CommandFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CommandFilter) GoString() string { + return s.String() +} + +// An invocation is copy of a command sent to a specific instance. A command +// can apply to one or more instances. A command invocation applies to one instance. +// For example, if a user executes SendCommand against three instances, then +// a command invocation is created for each requested instance ID. A command +// invocation returns status and detail information about a command you executed. +type CommandInvocation struct { + _ struct{} `type:"structure"` + + // The command against which this invocation was requested. + CommandId *string `min:"36" type:"string"` + + CommandPlugins []*CommandPlugin `type:"list"` + + // User-specified information about the command, such as a brief description + // of what the command should do. + Comment *string `type:"string"` + + // The document name that was requested for execution. + DocumentName *string `type:"string"` + + // The instance ID in which this invocation was requested. + InstanceId *string `type:"string"` + + // The time and date the request was sent to this instance. + RequestedDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Whether or not the invocation succeeded, failed, or is pending. + Status *string `type:"string" enum:"CommandInvocationStatus"` + + // Gets the trace output sent by the agent. + TraceOutput *string `type:"string"` +} + +// String returns the string representation +func (s CommandInvocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CommandInvocation) GoString() string { + return s.String() +} + +// Describes plugin details. +type CommandPlugin struct { + _ struct{} `type:"structure"` + + // The name of the plugin. Must be one of the following: aws:updateAgent, aws:domainjoin, + // aws:applications, aws:runPowerShellScript, aws:psmodule, aws:cloudWatch, + // aws:runShellScript, or aws:updateSSMAgent. + Name *string `min:"4" type:"string"` + + // Output of the plugin execution. + Output *string `type:"string"` + + // The S3 bucket where the responses to the command executions should be stored. + // This was requested when issuing the command. + OutputS3BucketName *string `min:"3" type:"string"` + + // The S3 directory path inside the bucket where the responses to the command + // executions should be stored. This was requested when issuing the command. + OutputS3KeyPrefix *string `type:"string"` + + // A numeric response code generated after executing the plugin. + ResponseCode *int64 `type:"integer"` + + // The time the plugin stopped executing. Could stop prematurely if, for example, + // a cancel command was sent. + ResponseFinishDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The time the plugin started executing. + ResponseStartDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The status of this plugin. You can execute a document with multiple plugins. + Status *string `type:"string" enum:"CommandPluginStatus"` +} + +// String returns the string representation +func (s CommandPlugin) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CommandPlugin) GoString() string { + return s.String() +} + +type CreateAssociationBatchInput struct { + _ struct{} `type:"structure"` + + // One or more associations. + Entries []*CreateAssociationBatchRequestEntry `locationNameList:"entries" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateAssociationBatchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAssociationBatchInput) GoString() string { + return s.String() +} + +type CreateAssociationBatchOutput struct { + _ struct{} `type:"structure"` + + // Information about the associations that failed. + Failed []*FailedCreateAssociation `locationNameList:"FailedCreateAssociationEntry" type:"list"` + + // Information about the associations that succeeded. + Successful []*AssociationDescription `locationNameList:"AssociationDescription" type:"list"` +} + +// String returns the string representation +func (s CreateAssociationBatchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAssociationBatchOutput) GoString() string { + return s.String() +} + +// Describes the association of an SSM document and an instance. +type CreateAssociationBatchRequestEntry struct { + _ struct{} `type:"structure"` + + // The ID of the instance. + InstanceId *string `type:"string"` + + // The name of the configuration document. + Name *string `type:"string"` + + // A description of the parameters for a document. + Parameters map[string][]*string `type:"map"` +} + +// String returns the string representation +func (s CreateAssociationBatchRequestEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAssociationBatchRequestEntry) GoString() string { + return s.String() +} + +type CreateAssociationInput struct { + _ struct{} `type:"structure"` + + // The Windows Server instance ID. + InstanceId *string `type:"string" required:"true"` + + // The name of the SSM document. + Name *string `type:"string" required:"true"` + + // The parameters for the documents runtime configuration. + Parameters map[string][]*string `type:"map"` +} + +// String returns the string representation +func (s CreateAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAssociationInput) GoString() string { + return s.String() +} + +type CreateAssociationOutput struct { + _ struct{} `type:"structure"` + + // Information about the association. + AssociationDescription *AssociationDescription `type:"structure"` +} + +// String returns the string representation +func (s CreateAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAssociationOutput) GoString() string { + return s.String() +} + +type CreateDocumentInput struct { + _ struct{} `type:"structure"` + + // A valid JSON string. For more information about the contents of this string, + // see SSM Document (http://docs.aws.amazon.com/ssm/latest/APIReference/aws-ssm-document.html). + Content *string `min:"1" type:"string" required:"true"` + + // A name for the SSM document. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateDocumentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDocumentInput) GoString() string { + return s.String() +} + +type CreateDocumentOutput struct { + _ struct{} `type:"structure"` + + // Information about the SSM document. + DocumentDescription *DocumentDescription `type:"structure"` +} + +// String returns the string representation +func (s CreateDocumentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDocumentOutput) GoString() string { + return s.String() +} + +type DeleteAssociationInput struct { + _ struct{} `type:"structure"` + + // The ID of the instance. + InstanceId *string `type:"string" required:"true"` + + // The name of the SSM document. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAssociationInput) GoString() string { + return s.String() +} + +type DeleteAssociationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAssociationOutput) GoString() string { + return s.String() +} + +type DeleteDocumentInput struct { + _ struct{} `type:"structure"` + + // The name of the SSM document. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDocumentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDocumentInput) GoString() string { + return s.String() +} + +type DeleteDocumentOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDocumentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDocumentOutput) GoString() string { + return s.String() +} + +type DescribeAssociationInput struct { + _ struct{} `type:"structure"` + + // The Windows Server instance ID. + InstanceId *string `type:"string" required:"true"` + + // The name of the SSM document. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAssociationInput) GoString() string { + return s.String() +} + +type DescribeAssociationOutput struct { + _ struct{} `type:"structure"` + + // Information about the association. + AssociationDescription *AssociationDescription `type:"structure"` +} + +// String returns the string representation +func (s DescribeAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAssociationOutput) GoString() string { + return s.String() +} + +type DescribeDocumentInput struct { + _ struct{} `type:"structure"` + + // The name of the SSM document. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDocumentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDocumentInput) GoString() string { + return s.String() +} + +type DescribeDocumentOutput struct { + _ struct{} `type:"structure"` + + // Information about the SSM document. + Document *DocumentDescription `type:"structure"` +} + +// String returns the string representation +func (s DescribeDocumentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDocumentOutput) GoString() string { + return s.String() +} + +type DescribeInstanceInformationInput struct { + _ struct{} `type:"structure"` + + // One or more filters. Use a filter to return a more specific list of instances. + InstanceInformationFilterList []*InstanceInformationFilter `locationNameList:"InstanceInformationFilter" min:"1" type:"list"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstanceInformationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceInformationInput) GoString() string { + return s.String() +} + +type DescribeInstanceInformationOutput struct { + _ struct{} `type:"structure"` + + // The instance information list. + InstanceInformationList []*InstanceInformation `locationNameList:"InstanceInformation" type:"list"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstanceInformationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceInformationOutput) GoString() string { + return s.String() +} + +// Describes an SSM document. +type DocumentDescription struct { + _ struct{} `type:"structure"` + + // The date when the SSM document was created. + CreatedDate *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A description of the document. + Description *string `type:"string"` + + // The name of the SSM document. + Name *string `type:"string"` + + // A description of the parameters for a document. + Parameters []*DocumentParameter `locationNameList:"DocumentParameter" type:"list"` + + // The list of OS platforms compatible with this SSM document. + PlatformTypes []*string `locationNameList:"PlatformType" type:"list"` + + // The SHA1 hash of the document, which you can use for verification purposes. + Sha1 *string `type:"string"` + + // The status of the SSM document. + Status *string `type:"string" enum:"DocumentStatus"` +} + +// String returns the string representation +func (s DocumentDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DocumentDescription) GoString() string { + return s.String() +} + +// Describes a filter. +type DocumentFilter struct { + _ struct{} `type:"structure"` + + // The name of the filter. + Key *string `locationName:"key" type:"string" required:"true" enum:"DocumentFilterKey"` + + // The value of the filter. + Value *string `locationName:"value" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DocumentFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DocumentFilter) GoString() string { + return s.String() +} + +// Describes the name of an SSM document. +type DocumentIdentifier struct { + _ struct{} `type:"structure"` + + // The name of the SSM document. + Name *string `type:"string"` + + // The operating system platform. + PlatformTypes []*string `locationNameList:"PlatformType" type:"list"` +} + +// String returns the string representation +func (s DocumentIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DocumentIdentifier) GoString() string { + return s.String() +} + +type DocumentParameter struct { + _ struct{} `type:"structure"` + + // If specified, the default values for the parameters. Parameters without a + // default value are required. Parameters with a default value are optional. + DefaultValue *string `type:"string"` + + // A description of what the parameter does, how to use it, the default value, + // and whether or not the parameter is optional. + Description *string `type:"string"` + + // The name of the parameter. + Name *string `type:"string"` + + // The type of parameter. The type can be either “String” or “StringList”. + Type *string `type:"string" enum:"DocumentParameterType"` +} + +// String returns the string representation +func (s DocumentParameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DocumentParameter) GoString() string { + return s.String() +} + +// Describes a failed association. +type FailedCreateAssociation struct { + _ struct{} `type:"structure"` + + // The association. + Entry *CreateAssociationBatchRequestEntry `type:"structure"` + + // The source of the failure. + Fault *string `type:"string" enum:"Fault"` + + // A description of the failure. + Message *string `type:"string"` +} + +// String returns the string representation +func (s FailedCreateAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailedCreateAssociation) GoString() string { + return s.String() +} + +type GetDocumentInput struct { + _ struct{} `type:"structure"` + + // The name of the SSM document. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDocumentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDocumentInput) GoString() string { + return s.String() +} + +type GetDocumentOutput struct { + _ struct{} `type:"structure"` + + // The contents of the SSM document. + Content *string `min:"1" type:"string"` + + // The name of the SSM document. + Name *string `type:"string"` +} + +// String returns the string representation +func (s GetDocumentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDocumentOutput) GoString() string { + return s.String() +} + +// Describes a filter for a specific list of instances. +type InstanceInformation struct { + _ struct{} `type:"structure"` + + // The version of the SSM agent running on your instance. + AgentVersion *string `type:"string"` + + // The instance ID. + InstanceId *string `type:"string"` + + // Indicates whether latest version of the SSM agent is running on your instance. + IsLatestVersion *bool `type:"boolean"` + + // The date and time when agent last pinged SSM service. + LastPingDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // Connection status of the SSM agent. + PingStatus *string `type:"string" enum:"PingStatus"` + + // The name of the operating system platform running on your instance. + PlatformName *string `type:"string"` + + // The operating system platform type. + PlatformType *string `type:"string" enum:"PlatformType"` + + // The version of the OS platform running on your instance. + PlatformVersion *string `type:"string"` +} + +// String returns the string representation +func (s InstanceInformation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceInformation) GoString() string { + return s.String() +} + +// Describes a filter for a specific list of instances. +type InstanceInformationFilter struct { + _ struct{} `type:"structure"` + + // The name of the filter. + Key *string `locationName:"key" type:"string" required:"true" enum:"InstanceInformationFilterKey"` + + // The filter values. + ValueSet []*string `locationName:"valueSet" locationNameList:"InstanceInformationFilterValue" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s InstanceInformationFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceInformationFilter) GoString() string { + return s.String() +} + +type ListAssociationsInput struct { + _ struct{} `type:"structure"` + + // One or more filters. Use a filter to return a more specific list of results. + AssociationFilterList []*AssociationFilter `locationNameList:"AssociationFilter" min:"1" type:"list" required:"true"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListAssociationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAssociationsInput) GoString() string { + return s.String() +} + +type ListAssociationsOutput struct { + _ struct{} `type:"structure"` + + // The associations. + Associations []*Association `locationNameList:"Association" type:"list"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListAssociationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAssociationsOutput) GoString() string { + return s.String() +} + +type ListCommandInvocationsInput struct { + _ struct{} `type:"structure"` + + // (Optional) The invocations for a specific command ID. + CommandId *string `min:"36" type:"string"` + + // (Optional) If set this returns the response of the command executions and + // any command output. By default this is set to False. + Details *bool `type:"boolean"` + + // (Optional) One or more filters. Use a filter to return a more specific list + // of results. + Filters []*CommandFilter `min:"1" type:"list"` + + // (Optional) The command execution details for a specific instance ID. + InstanceId *string `type:"string"` + + // (Optional) The maximum number of items to return for this call. The call + // also returns a token that you can specify in a subsequent call to get the + // next set of results. + MaxResults *int64 `min:"1" type:"integer"` + + // (Optional) The token for the next set of items to return. (You received this + // token from a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListCommandInvocationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCommandInvocationsInput) GoString() string { + return s.String() +} + +type ListCommandInvocationsOutput struct { + _ struct{} `type:"structure"` + + // (Optional) A list of all invocations. + CommandInvocations []*CommandInvocation `type:"list"` + + // (Optional) The token for the next set of items to return. (You received this + // token from a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListCommandInvocationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCommandInvocationsOutput) GoString() string { + return s.String() +} + +type ListCommandsInput struct { + _ struct{} `type:"structure"` + + // (Optional) If provided, lists only the specified command. + CommandId *string `min:"36" type:"string"` + + // (Optional) One or more filters. Use a filter to return a more specific list + // of results. + Filters []*CommandFilter `min:"1" type:"list"` + + // (Optional) Lists commands issued against this instance ID. + InstanceId *string `type:"string"` + + // (Optional) The maximum number of items to return for this call. The call + // also returns a token that you can specify in a subsequent call to get the + // next set of results. + MaxResults *int64 `min:"1" type:"integer"` + + // (Optional) The token for the next set of items to return. (You received this + // token from a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListCommandsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCommandsInput) GoString() string { + return s.String() +} + +type ListCommandsOutput struct { + _ struct{} `type:"structure"` + + // (Optional) The list of commands requested by the user. + Commands []*Command `type:"list"` + + // (Optional) The token for the next set of items to return. (You received this + // token from a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListCommandsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCommandsOutput) GoString() string { + return s.String() +} + +type ListDocumentsInput struct { + _ struct{} `type:"structure"` + + // One or more filters. Use a filter to return a more specific list of results. + DocumentFilterList []*DocumentFilter `locationNameList:"DocumentFilter" min:"1" type:"list"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListDocumentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDocumentsInput) GoString() string { + return s.String() +} + +type ListDocumentsOutput struct { + _ struct{} `type:"structure"` + + // The names of the SSM documents. + DocumentIdentifiers []*DocumentIdentifier `locationNameList:"DocumentIdentifier" type:"list"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListDocumentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDocumentsOutput) GoString() string { + return s.String() +} + +type SendCommandInput struct { + _ struct{} `type:"structure"` + + // User-specified information about the command, such as a brief description + // of what the command should do. + Comment *string `type:"string"` + + // Required. The name of the SSM document to execute. This can be an SSM public + // document or a custom document. + DocumentName *string `type:"string" required:"true"` + + // Required. The instance IDs where the command should execute. + InstanceIds []*string `min:"1" type:"list" required:"true"` + + // The name of the S3 bucket where command execution responses should be stored. + OutputS3BucketName *string `min:"3" type:"string"` + + // The directory structure within the S3 bucket where the responses should be + // stored. + OutputS3KeyPrefix *string `type:"string"` + + // The required and optional parameters specified in the SSM document being + // executed. + Parameters map[string][]*string `type:"map"` + + // If this time is reached and the command has not already started executing, + // it will not execute. + TimeoutSeconds *int64 `min:"30" type:"integer"` +} + +// String returns the string representation +func (s SendCommandInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendCommandInput) GoString() string { + return s.String() +} + +type SendCommandOutput struct { + _ struct{} `type:"structure"` + + // The request as it was received by SSM. Also provides the command ID which + // can be used future references to this request. + Command *Command `type:"structure"` +} + +// String returns the string representation +func (s SendCommandOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendCommandOutput) GoString() string { + return s.String() +} + +type UpdateAssociationStatusInput struct { + _ struct{} `type:"structure"` + + // The association status. + AssociationStatus *AssociationStatus `type:"structure" required:"true"` + + // The ID of the instance. + InstanceId *string `type:"string" required:"true"` + + // The name of the SSM document. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateAssociationStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAssociationStatusInput) GoString() string { + return s.String() +} + +type UpdateAssociationStatusOutput struct { + _ struct{} `type:"structure"` + + // Information about the association. + AssociationDescription *AssociationDescription `type:"structure"` +} + +// String returns the string representation +func (s UpdateAssociationStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAssociationStatusOutput) GoString() string { + return s.String() +} + +const ( + // @enum AssociationFilterKey + AssociationFilterKeyInstanceId = "InstanceId" + // @enum AssociationFilterKey + AssociationFilterKeyName = "Name" +) + +const ( + // @enum AssociationStatusName + AssociationStatusNamePending = "Pending" + // @enum AssociationStatusName + AssociationStatusNameSuccess = "Success" + // @enum AssociationStatusName + AssociationStatusNameFailed = "Failed" +) + +const ( + // @enum CommandFilterKey + CommandFilterKeyInvokedAfter = "InvokedAfter" + // @enum CommandFilterKey + CommandFilterKeyInvokedBefore = "InvokedBefore" + // @enum CommandFilterKey + CommandFilterKeyStatus = "Status" +) + +const ( + // @enum CommandInvocationStatus + CommandInvocationStatusPending = "Pending" + // @enum CommandInvocationStatus + CommandInvocationStatusInProgress = "InProgress" + // @enum CommandInvocationStatus + CommandInvocationStatusCancelling = "Cancelling" + // @enum CommandInvocationStatus + CommandInvocationStatusSuccess = "Success" + // @enum CommandInvocationStatus + CommandInvocationStatusTimedOut = "TimedOut" + // @enum CommandInvocationStatus + CommandInvocationStatusCancelled = "Cancelled" + // @enum CommandInvocationStatus + CommandInvocationStatusFailed = "Failed" +) + +const ( + // @enum CommandPluginStatus + CommandPluginStatusPending = "Pending" + // @enum CommandPluginStatus + CommandPluginStatusInProgress = "InProgress" + // @enum CommandPluginStatus + CommandPluginStatusSuccess = "Success" + // @enum CommandPluginStatus + CommandPluginStatusTimedOut = "TimedOut" + // @enum CommandPluginStatus + CommandPluginStatusCancelled = "Cancelled" + // @enum CommandPluginStatus + CommandPluginStatusFailed = "Failed" +) + +const ( + // @enum CommandStatus + CommandStatusPending = "Pending" + // @enum CommandStatus + CommandStatusInProgress = "InProgress" + // @enum CommandStatus + CommandStatusCancelling = "Cancelling" + // @enum CommandStatus + CommandStatusSuccess = "Success" + // @enum CommandStatus + CommandStatusTimedOut = "TimedOut" + // @enum CommandStatus + CommandStatusCancelled = "Cancelled" + // @enum CommandStatus + CommandStatusFailed = "Failed" +) + +const ( + // @enum DocumentFilterKey + DocumentFilterKeyName = "Name" + // @enum DocumentFilterKey + DocumentFilterKeyOwner = "Owner" + // @enum DocumentFilterKey + DocumentFilterKeyPlatformTypes = "PlatformTypes" +) + +const ( + // @enum DocumentParameterType + DocumentParameterTypeString = "String" + // @enum DocumentParameterType + DocumentParameterTypeStringList = "StringList" +) + +const ( + // @enum DocumentStatus + DocumentStatusCreating = "Creating" + // @enum DocumentStatus + DocumentStatusActive = "Active" + // @enum DocumentStatus + DocumentStatusDeleting = "Deleting" +) + +const ( + // @enum Fault + FaultClient = "Client" + // @enum Fault + FaultServer = "Server" + // @enum Fault + FaultUnknown = "Unknown" +) + +const ( + // @enum InstanceInformationFilterKey + InstanceInformationFilterKeyInstanceIds = "InstanceIds" + // @enum InstanceInformationFilterKey + InstanceInformationFilterKeyAgentVersion = "AgentVersion" + // @enum InstanceInformationFilterKey + InstanceInformationFilterKeyPingStatus = "PingStatus" + // @enum InstanceInformationFilterKey + InstanceInformationFilterKeyPlatformTypes = "PlatformTypes" +) + +const ( + // @enum PingStatus + PingStatusOnline = "Online" + // @enum PingStatus + PingStatusConnectionLost = "ConnectionLost" + // @enum PingStatus + PingStatusInactive = "Inactive" +) + +const ( + // @enum PlatformType + PlatformTypeWindows = "Windows" + // @enum PlatformType + PlatformTypeLinux = "Linux" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ssm/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ssm/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ssm/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ssm/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,418 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ssm_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ssm" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleSSM_CancelCommand() { + svc := ssm.New(session.New()) + + params := &ssm.CancelCommandInput{ + CommandId: aws.String("CommandId"), // Required + InstanceIds: []*string{ + aws.String("InstanceId"), // Required + // More values... + }, + } + resp, err := svc.CancelCommand(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_CreateAssociation() { + svc := ssm.New(session.New()) + + params := &ssm.CreateAssociationInput{ + InstanceId: aws.String("InstanceId"), // Required + Name: aws.String("DocumentName"), // Required + Parameters: map[string][]*string{ + "Key": { // Required + aws.String("ParameterValue"), // Required + // More values... + }, + // More values... + }, + } + resp, err := svc.CreateAssociation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_CreateAssociationBatch() { + svc := ssm.New(session.New()) + + params := &ssm.CreateAssociationBatchInput{ + Entries: []*ssm.CreateAssociationBatchRequestEntry{ // Required + { // Required + InstanceId: aws.String("InstanceId"), + Name: aws.String("DocumentName"), + Parameters: map[string][]*string{ + "Key": { // Required + aws.String("ParameterValue"), // Required + // More values... + }, + // More values... + }, + }, + // More values... + }, + } + resp, err := svc.CreateAssociationBatch(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_CreateDocument() { + svc := ssm.New(session.New()) + + params := &ssm.CreateDocumentInput{ + Content: aws.String("DocumentContent"), // Required + Name: aws.String("DocumentName"), // Required + } + resp, err := svc.CreateDocument(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_DeleteAssociation() { + svc := ssm.New(session.New()) + + params := &ssm.DeleteAssociationInput{ + InstanceId: aws.String("InstanceId"), // Required + Name: aws.String("DocumentName"), // Required + } + resp, err := svc.DeleteAssociation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_DeleteDocument() { + svc := ssm.New(session.New()) + + params := &ssm.DeleteDocumentInput{ + Name: aws.String("DocumentName"), // Required + } + resp, err := svc.DeleteDocument(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_DescribeAssociation() { + svc := ssm.New(session.New()) + + params := &ssm.DescribeAssociationInput{ + InstanceId: aws.String("InstanceId"), // Required + Name: aws.String("DocumentName"), // Required + } + resp, err := svc.DescribeAssociation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_DescribeDocument() { + svc := ssm.New(session.New()) + + params := &ssm.DescribeDocumentInput{ + Name: aws.String("DocumentName"), // Required + } + resp, err := svc.DescribeDocument(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_DescribeInstanceInformation() { + svc := ssm.New(session.New()) + + params := &ssm.DescribeInstanceInformationInput{ + InstanceInformationFilterList: []*ssm.InstanceInformationFilter{ + { // Required + Key: aws.String("InstanceInformationFilterKey"), // Required + ValueSet: []*string{ // Required + aws.String("InstanceInformationFilterValue"), // Required + // More values... + }, + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeInstanceInformation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_GetDocument() { + svc := ssm.New(session.New()) + + params := &ssm.GetDocumentInput{ + Name: aws.String("DocumentName"), // Required + } + resp, err := svc.GetDocument(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_ListAssociations() { + svc := ssm.New(session.New()) + + params := &ssm.ListAssociationsInput{ + AssociationFilterList: []*ssm.AssociationFilter{ // Required + { // Required + Key: aws.String("AssociationFilterKey"), // Required + Value: aws.String("AssociationFilterValue"), // Required + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListAssociations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_ListCommandInvocations() { + svc := ssm.New(session.New()) + + params := &ssm.ListCommandInvocationsInput{ + CommandId: aws.String("CommandId"), + Details: aws.Bool(true), + Filters: []*ssm.CommandFilter{ + { // Required + Key: aws.String("CommandFilterKey"), // Required + Value: aws.String("CommandFilterValue"), // Required + }, + // More values... + }, + InstanceId: aws.String("InstanceId"), + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListCommandInvocations(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_ListCommands() { + svc := ssm.New(session.New()) + + params := &ssm.ListCommandsInput{ + CommandId: aws.String("CommandId"), + Filters: []*ssm.CommandFilter{ + { // Required + Key: aws.String("CommandFilterKey"), // Required + Value: aws.String("CommandFilterValue"), // Required + }, + // More values... + }, + InstanceId: aws.String("InstanceId"), + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListCommands(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_ListDocuments() { + svc := ssm.New(session.New()) + + params := &ssm.ListDocumentsInput{ + DocumentFilterList: []*ssm.DocumentFilter{ + { // Required + Key: aws.String("DocumentFilterKey"), // Required + Value: aws.String("DocumentFilterValue"), // Required + }, + // More values... + }, + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.ListDocuments(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_SendCommand() { + svc := ssm.New(session.New()) + + params := &ssm.SendCommandInput{ + DocumentName: aws.String("DocumentName"), // Required + InstanceIds: []*string{ // Required + aws.String("InstanceId"), // Required + // More values... + }, + Comment: aws.String("Comment"), + OutputS3BucketName: aws.String("S3BucketName"), + OutputS3KeyPrefix: aws.String("S3KeyPrefix"), + Parameters: map[string][]*string{ + "Key": { // Required + aws.String("ParameterValue"), // Required + // More values... + }, + // More values... + }, + TimeoutSeconds: aws.Int64(1), + } + resp, err := svc.SendCommand(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSSM_UpdateAssociationStatus() { + svc := ssm.New(session.New()) + + params := &ssm.UpdateAssociationStatusInput{ + AssociationStatus: &ssm.AssociationStatus{ // Required + Date: aws.Time(time.Now()), // Required + Message: aws.String("StatusMessage"), // Required + Name: aws.String("AssociationStatusName"), // Required + AdditionalInfo: aws.String("StatusAdditionalInfo"), + }, + InstanceId: aws.String("InstanceId"), // Required + Name: aws.String("DocumentName"), // Required + } + resp, err := svc.UpdateAssociationStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ssm/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ssm/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ssm/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ssm/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,194 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ssm + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// Simple Systems Manager (SSM) enables you to remotely manage the configuration +// of your Amazon EC2 instance. Using SSM, you can run scripts or commands using +// either EC2 Run Command or SSM Config. (SSM Config is currently available +// only for Windows instances.) +// +// Run Command Run Command provides an on-demand experience for executing +// commands. You can use pre-defined Amazon SSM documents to perform the actions +// listed later in this section, or you can create your own documents. With +// these documents, you can remotely configure your instances by sending commands +// using the Commands page in the Amazon EC2 console (http://console.aws.amazon.com/ec2/), +// AWS Tools for Windows PowerShell (http://docs.aws.amazon.com/powershell/latest/reference/items/Amazon_Simple_Systems_Management_cmdlets.html), +// or the AWS CLI (http://docs.aws.amazon.com/cli/latest/reference/ssm/index.html). +// +// Run Command reports the status of the command execution for each instance +// targeted by a command. You can also audit the command execution to understand +// who executed commands, when, and what changes were made. By switching between +// different SSM documents, you can quickly configure your instances with different +// types of commands. To get started with Run Command, verify that your environment +// meets the prerequisites for remotely running commands on EC2 instances (Linux +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/remote-commands-prereq.html) +// or Windows (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/remote-commands-prereq.html)). +// +// SSM Config SSM Config is a lightweight instance configuration solution. +// SSM Config is currently only available for Windows instances. With SSM Config, +// you can specify a setup configuration for your instances. SSM Config is similar +// to EC2 User Data, which is another way of running one-time scripts or applying +// settings during instance launch. SSM Config is an extension of this capability. +// Using SSM documents, you can specify which actions the system should perform +// on your instances, including which applications to install, which AWS Directory +// Service directory to join, which Microsoft PowerShell modules to install, +// etc. If an instance is missing one or more of these configurations, the system +// makes those changes. By default, the system checks every five minutes to +// see if there is a new configuration to apply as defined in a new SSM document. +// If so, the system updates the instances accordingly. In this way, you can +// remotely maintain a consistent configuration baseline on your instances. +// SSM Config is available using the AWS CLI or the AWS Tools for Windows PowerShell. +// For more information, see Managing Windows Instance Configuration (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-configuration-manage.html). +// +// SSM Config and Run Command include the following pre-defined documents. +// +// Amazon Pre-defined SSM Documents Name Description Platform AWS-RunShellScript +// +// Run shell scripts +// +// Linux +// +// AWS-UpdateSSMAgent +// +// Update the Amazon SSM agent +// +// Linux +// +// AWS-JoinDirectoryServiceDomain +// +// Join an AWS Directory +// +// Windows +// +// AWS-RunPowerShellScript +// +// Run PowerShell commands or scripts +// +// Windows +// +// AWS-UpdateEC2Config +// +// Update the EC2Config service +// +// Windows +// +// AWS-ConfigureWindowsUpdate +// +// Configure Windows Update settings +// +// Windows +// +// AWS-InstallApplication +// +// Install, repair, or uninstall software using an MSI package +// +// Windows +// +// AWS-InstallPowerShellModule +// +// Install PowerShell modules +// +// Windows +// +// AWS-ConfigureCloudWatch +// +// Configure Amazon CloudWatch Logs to monitor applications and systems +// +// Windows +// +// The commands or scripts specified in SSM documents run with administrative +// privilege on your instances because the Amazon SSM agent runs as root on +// Linux and the EC2Config service runs in the Local System account on Windows. +// If a user has permission to execute any of the pre-defined SSM documents +// (any document that begins with AWS-*) then that user also has administrator +// access to the instance. Delegate access to SSM and Run Command judiciously. +// This becomes extremely important if you create your own SSM documents. Amazon +// Web Services does not provide guidance about how to create secure SSM documents. +// You create SSM documents and delegate access to Run Command at your own risk. +// As a security best practice, we recommend that you assign access to "AWS-*" +// documents, especially the AWS-RunShellScript document on Linux and the AWS-RunPowerShellScript +// document on Windows, to trusted administrators only. You can create SSM documents +// for specific tasks and delegate access to non-administrators. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type SSM struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "ssm" + +// New creates a new instance of the SSM client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a SSM client from just a session. +// svc := ssm.New(mySession) +// +// // Create a SSM client with additional configuration +// svc := ssm.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSM { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *SSM { + svc := &SSM{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-11-06", + JSONVersion: "1.1", + TargetPrefix: "AmazonSSM", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SSM operation and runs any +// custom request initialization. +func (c *SSM) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ssm/ssmiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ssm/ssmiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ssm/ssmiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/ssm/ssmiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,86 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package ssmiface provides an interface for the Amazon Simple Systems Management Service. +package ssmiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/ssm" +) + +// SSMAPI is the interface type for ssm.SSM. +type SSMAPI interface { + CancelCommandRequest(*ssm.CancelCommandInput) (*request.Request, *ssm.CancelCommandOutput) + + CancelCommand(*ssm.CancelCommandInput) (*ssm.CancelCommandOutput, error) + + CreateAssociationRequest(*ssm.CreateAssociationInput) (*request.Request, *ssm.CreateAssociationOutput) + + CreateAssociation(*ssm.CreateAssociationInput) (*ssm.CreateAssociationOutput, error) + + CreateAssociationBatchRequest(*ssm.CreateAssociationBatchInput) (*request.Request, *ssm.CreateAssociationBatchOutput) + + CreateAssociationBatch(*ssm.CreateAssociationBatchInput) (*ssm.CreateAssociationBatchOutput, error) + + CreateDocumentRequest(*ssm.CreateDocumentInput) (*request.Request, *ssm.CreateDocumentOutput) + + CreateDocument(*ssm.CreateDocumentInput) (*ssm.CreateDocumentOutput, error) + + DeleteAssociationRequest(*ssm.DeleteAssociationInput) (*request.Request, *ssm.DeleteAssociationOutput) + + DeleteAssociation(*ssm.DeleteAssociationInput) (*ssm.DeleteAssociationOutput, error) + + DeleteDocumentRequest(*ssm.DeleteDocumentInput) (*request.Request, *ssm.DeleteDocumentOutput) + + DeleteDocument(*ssm.DeleteDocumentInput) (*ssm.DeleteDocumentOutput, error) + + DescribeAssociationRequest(*ssm.DescribeAssociationInput) (*request.Request, *ssm.DescribeAssociationOutput) + + DescribeAssociation(*ssm.DescribeAssociationInput) (*ssm.DescribeAssociationOutput, error) + + DescribeDocumentRequest(*ssm.DescribeDocumentInput) (*request.Request, *ssm.DescribeDocumentOutput) + + DescribeDocument(*ssm.DescribeDocumentInput) (*ssm.DescribeDocumentOutput, error) + + DescribeInstanceInformationRequest(*ssm.DescribeInstanceInformationInput) (*request.Request, *ssm.DescribeInstanceInformationOutput) + + DescribeInstanceInformation(*ssm.DescribeInstanceInformationInput) (*ssm.DescribeInstanceInformationOutput, error) + + GetDocumentRequest(*ssm.GetDocumentInput) (*request.Request, *ssm.GetDocumentOutput) + + GetDocument(*ssm.GetDocumentInput) (*ssm.GetDocumentOutput, error) + + ListAssociationsRequest(*ssm.ListAssociationsInput) (*request.Request, *ssm.ListAssociationsOutput) + + ListAssociations(*ssm.ListAssociationsInput) (*ssm.ListAssociationsOutput, error) + + ListAssociationsPages(*ssm.ListAssociationsInput, func(*ssm.ListAssociationsOutput, bool) bool) error + + ListCommandInvocationsRequest(*ssm.ListCommandInvocationsInput) (*request.Request, *ssm.ListCommandInvocationsOutput) + + ListCommandInvocations(*ssm.ListCommandInvocationsInput) (*ssm.ListCommandInvocationsOutput, error) + + ListCommandInvocationsPages(*ssm.ListCommandInvocationsInput, func(*ssm.ListCommandInvocationsOutput, bool) bool) error + + ListCommandsRequest(*ssm.ListCommandsInput) (*request.Request, *ssm.ListCommandsOutput) + + ListCommands(*ssm.ListCommandsInput) (*ssm.ListCommandsOutput, error) + + ListCommandsPages(*ssm.ListCommandsInput, func(*ssm.ListCommandsOutput, bool) bool) error + + ListDocumentsRequest(*ssm.ListDocumentsInput) (*request.Request, *ssm.ListDocumentsOutput) + + ListDocuments(*ssm.ListDocumentsInput) (*ssm.ListDocumentsOutput, error) + + ListDocumentsPages(*ssm.ListDocumentsInput, func(*ssm.ListDocumentsOutput, bool) bool) error + + SendCommandRequest(*ssm.SendCommandInput) (*request.Request, *ssm.SendCommandOutput) + + SendCommand(*ssm.SendCommandInput) (*ssm.SendCommandOutput, error) + + UpdateAssociationStatusRequest(*ssm.UpdateAssociationStatusInput) (*request.Request, *ssm.UpdateAssociationStatusOutput) + + UpdateAssociationStatus(*ssm.UpdateAssociationStatusInput) (*ssm.UpdateAssociationStatusOutput, error) +} + +var _ SSMAPI = (*ssm.SSM)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/storagegateway/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/storagegateway/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/storagegateway/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/storagegateway/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,4908 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package storagegateway provides a client for AWS Storage Gateway. +package storagegateway + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opActivateGateway = "ActivateGateway" + +// ActivateGatewayRequest generates a request for the ActivateGateway operation. +func (c *StorageGateway) ActivateGatewayRequest(input *ActivateGatewayInput) (req *request.Request, output *ActivateGatewayOutput) { + op := &request.Operation{ + Name: opActivateGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ActivateGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &ActivateGatewayOutput{} + req.Data = output + return +} + +// This operation activates the gateway you previously deployed on your host. +// For more information, see Activate the AWS Storage Gateway (http://docs.aws.amazon.com/storagegateway/latest/userguide/GettingStartedActivateGateway-common.html). +// In the activation process, you specify information such as the region you +// want to use for storing snapshots, the time zone for scheduled snapshots +// the gateway snapshot schedule window, an activation key, and a name for your +// gateway. The activation process also associates your gateway with your account; +// for more information, see UpdateGatewayInformation. +// +// You must turn on the gateway VM before you can activate your gateway. +func (c *StorageGateway) ActivateGateway(input *ActivateGatewayInput) (*ActivateGatewayOutput, error) { + req, out := c.ActivateGatewayRequest(input) + err := req.Send() + return out, err +} + +const opAddCache = "AddCache" + +// AddCacheRequest generates a request for the AddCache operation. +func (c *StorageGateway) AddCacheRequest(input *AddCacheInput) (req *request.Request, output *AddCacheOutput) { + op := &request.Operation{ + Name: opAddCache, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddCacheInput{} + } + + req = c.newRequest(op, input, output) + output = &AddCacheOutput{} + req.Data = output + return +} + +// This operation configures one or more gateway local disks as cache for a +// cached-volume gateway. This operation is supported only for the gateway-cached +// volume architecture (see Storage Gateway Concepts (http://docs.aws.amazon.com/storagegateway/latest/userguide/StorageGatewayConcepts.html)). +// +// In the request, you specify the gateway Amazon Resource Name (ARN) to which +// you want to add cache, and one or more disk IDs that you want to configure +// as cache. +func (c *StorageGateway) AddCache(input *AddCacheInput) (*AddCacheOutput, error) { + req, out := c.AddCacheRequest(input) + err := req.Send() + return out, err +} + +const opAddTagsToResource = "AddTagsToResource" + +// AddTagsToResourceRequest generates a request for the AddTagsToResource operation. +func (c *StorageGateway) AddTagsToResourceRequest(input *AddTagsToResourceInput) (req *request.Request, output *AddTagsToResourceOutput) { + op := &request.Operation{ + Name: opAddTagsToResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsToResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &AddTagsToResourceOutput{} + req.Data = output + return +} + +// This operation adds one or more tags to the specified resource. You use tags +// to add metadata to resources, which you can use to categorize these resources. +// For example, you can categorize resources by purpose, owner, environment, +// or team. Each tag consists of a key and a value, which you define. You can +// add tags to the following AWS Storage Gateway resources: +// +// Storage gateways of all types +// +// Storage Volumes +// +// Virtual Tapes +// +// You can create a maximum of 10 tags for each resource. Virtual tapes and +// storage volumes that are recovered to a new gateway maintain their tags. +func (c *StorageGateway) AddTagsToResource(input *AddTagsToResourceInput) (*AddTagsToResourceOutput, error) { + req, out := c.AddTagsToResourceRequest(input) + err := req.Send() + return out, err +} + +const opAddUploadBuffer = "AddUploadBuffer" + +// AddUploadBufferRequest generates a request for the AddUploadBuffer operation. +func (c *StorageGateway) AddUploadBufferRequest(input *AddUploadBufferInput) (req *request.Request, output *AddUploadBufferOutput) { + op := &request.Operation{ + Name: opAddUploadBuffer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddUploadBufferInput{} + } + + req = c.newRequest(op, input, output) + output = &AddUploadBufferOutput{} + req.Data = output + return +} + +// This operation configures one or more gateway local disks as upload buffer +// for a specified gateway. This operation is supported for both the gateway-stored +// and gateway-cached volume architectures. +// +// In the request, you specify the gateway Amazon Resource Name (ARN) to which +// you want to add upload buffer, and one or more disk IDs that you want to +// configure as upload buffer. +func (c *StorageGateway) AddUploadBuffer(input *AddUploadBufferInput) (*AddUploadBufferOutput, error) { + req, out := c.AddUploadBufferRequest(input) + err := req.Send() + return out, err +} + +const opAddWorkingStorage = "AddWorkingStorage" + +// AddWorkingStorageRequest generates a request for the AddWorkingStorage operation. +func (c *StorageGateway) AddWorkingStorageRequest(input *AddWorkingStorageInput) (req *request.Request, output *AddWorkingStorageOutput) { + op := &request.Operation{ + Name: opAddWorkingStorage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddWorkingStorageInput{} + } + + req = c.newRequest(op, input, output) + output = &AddWorkingStorageOutput{} + req.Data = output + return +} + +// This operation configures one or more gateway local disks as working storage +// for a gateway. This operation is supported only for the gateway-stored volume +// architecture. This operation is deprecated method in cached-volumes API version +// (20120630). Use AddUploadBuffer instead. +// +// Working storage is also referred to as upload buffer. You can also use the +// AddUploadBuffer operation to add upload buffer to a stored-volume gateway. +// +// In the request, you specify the gateway Amazon Resource Name (ARN) to which +// you want to add working storage, and one or more disk IDs that you want to +// configure as working storage. +func (c *StorageGateway) AddWorkingStorage(input *AddWorkingStorageInput) (*AddWorkingStorageOutput, error) { + req, out := c.AddWorkingStorageRequest(input) + err := req.Send() + return out, err +} + +const opCancelArchival = "CancelArchival" + +// CancelArchivalRequest generates a request for the CancelArchival operation. +func (c *StorageGateway) CancelArchivalRequest(input *CancelArchivalInput) (req *request.Request, output *CancelArchivalOutput) { + op := &request.Operation{ + Name: opCancelArchival, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelArchivalInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelArchivalOutput{} + req.Data = output + return +} + +// Cancels archiving of a virtual tape to the virtual tape shelf (VTS) after +// the archiving process is initiated. +func (c *StorageGateway) CancelArchival(input *CancelArchivalInput) (*CancelArchivalOutput, error) { + req, out := c.CancelArchivalRequest(input) + err := req.Send() + return out, err +} + +const opCancelRetrieval = "CancelRetrieval" + +// CancelRetrievalRequest generates a request for the CancelRetrieval operation. +func (c *StorageGateway) CancelRetrievalRequest(input *CancelRetrievalInput) (req *request.Request, output *CancelRetrievalOutput) { + op := &request.Operation{ + Name: opCancelRetrieval, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelRetrievalInput{} + } + + req = c.newRequest(op, input, output) + output = &CancelRetrievalOutput{} + req.Data = output + return +} + +// Cancels retrieval of a virtual tape from the virtual tape shelf (VTS) to +// a gateway after the retrieval process is initiated. The virtual tape is returned +// to the VTS. +func (c *StorageGateway) CancelRetrieval(input *CancelRetrievalInput) (*CancelRetrievalOutput, error) { + req, out := c.CancelRetrievalRequest(input) + err := req.Send() + return out, err +} + +const opCreateCachediSCSIVolume = "CreateCachediSCSIVolume" + +// CreateCachediSCSIVolumeRequest generates a request for the CreateCachediSCSIVolume operation. +func (c *StorageGateway) CreateCachediSCSIVolumeRequest(input *CreateCachediSCSIVolumeInput) (req *request.Request, output *CreateCachediSCSIVolumeOutput) { + op := &request.Operation{ + Name: opCreateCachediSCSIVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCachediSCSIVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateCachediSCSIVolumeOutput{} + req.Data = output + return +} + +// This operation creates a cached volume on a specified cached gateway. This +// operation is supported only for the gateway-cached volume architecture. +// +// Cache storage must be allocated to the gateway before you can create a cached +// volume. Use the AddCache operation to add cache storage to a gateway. In +// the request, you must specify the gateway, size of the volume in bytes, the +// iSCSI target name, an IP address on which to expose the target, and a unique +// client token. In response, AWS Storage Gateway creates the volume and returns +// information about it such as the volume Amazon Resource Name (ARN), its size, +// and the iSCSI target ARN that initiators can use to connect to the volume +// target. +func (c *StorageGateway) CreateCachediSCSIVolume(input *CreateCachediSCSIVolumeInput) (*CreateCachediSCSIVolumeOutput, error) { + req, out := c.CreateCachediSCSIVolumeRequest(input) + err := req.Send() + return out, err +} + +const opCreateSnapshot = "CreateSnapshot" + +// CreateSnapshotRequest generates a request for the CreateSnapshot operation. +func (c *StorageGateway) CreateSnapshotRequest(input *CreateSnapshotInput) (req *request.Request, output *CreateSnapshotOutput) { + op := &request.Operation{ + Name: opCreateSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSnapshotInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSnapshotOutput{} + req.Data = output + return +} + +// This operation initiates a snapshot of a volume. +// +// AWS Storage Gateway provides the ability to back up point-in-time snapshots +// of your data to Amazon Simple Storage (S3) for durable off-site recovery, +// as well as import the data to an Amazon Elastic Block Store (EBS) volume +// in Amazon Elastic Compute Cloud (EC2). You can take snapshots of your gateway +// volume on a scheduled or ad-hoc basis. This API enables you to take ad-hoc +// snapshot. For more information, see Working With Snapshots in the AWS Storage +// Gateway Console (http://docs.aws.amazon.com/storagegateway/latest/userguide/WorkingWithSnapshots.html). +// +// In the CreateSnapshot request you identify the volume by providing its Amazon +// Resource Name (ARN). You must also provide description for the snapshot. +// When AWS Storage Gateway takes the snapshot of specified volume, the snapshot +// and description appears in the AWS Storage Gateway Console. In response, +// AWS Storage Gateway returns you a snapshot ID. You can use this snapshot +// ID to check the snapshot progress or later use it when you want to create +// a volume from a snapshot. +// +// To list or delete a snapshot, you must use the Amazon EC2 API. For more +// information, see DescribeSnapshots or DeleteSnapshot in the EC2 API reference +// (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_Operations.html). +func (c *StorageGateway) CreateSnapshot(input *CreateSnapshotInput) (*CreateSnapshotOutput, error) { + req, out := c.CreateSnapshotRequest(input) + err := req.Send() + return out, err +} + +const opCreateSnapshotFromVolumeRecoveryPoint = "CreateSnapshotFromVolumeRecoveryPoint" + +// CreateSnapshotFromVolumeRecoveryPointRequest generates a request for the CreateSnapshotFromVolumeRecoveryPoint operation. +func (c *StorageGateway) CreateSnapshotFromVolumeRecoveryPointRequest(input *CreateSnapshotFromVolumeRecoveryPointInput) (req *request.Request, output *CreateSnapshotFromVolumeRecoveryPointOutput) { + op := &request.Operation{ + Name: opCreateSnapshotFromVolumeRecoveryPoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSnapshotFromVolumeRecoveryPointInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSnapshotFromVolumeRecoveryPointOutput{} + req.Data = output + return +} + +// This operation initiates a snapshot of a gateway from a volume recovery point. +// This operation is supported only for the gateway-cached volume architecture +// (see ). +// +// A volume recovery point is a point in time at which all data of the volume +// is consistent and from which you can create a snapshot. To get a list of +// volume recovery point for gateway-cached volumes, use ListVolumeRecoveryPoints. +// +// In the CreateSnapshotFromVolumeRecoveryPoint request, you identify the volume +// by providing its Amazon Resource Name (ARN). You must also provide a description +// for the snapshot. When AWS Storage Gateway takes a snapshot of the specified +// volume, the snapshot and its description appear in the AWS Storage Gateway +// console. In response, AWS Storage Gateway returns you a snapshot ID. You +// can use this snapshot ID to check the snapshot progress or later use it when +// you want to create a volume from a snapshot. +// +// To list or delete a snapshot, you must use the Amazon EC2 API. For more +// information, in Amazon Elastic Compute Cloud API Reference. +func (c *StorageGateway) CreateSnapshotFromVolumeRecoveryPoint(input *CreateSnapshotFromVolumeRecoveryPointInput) (*CreateSnapshotFromVolumeRecoveryPointOutput, error) { + req, out := c.CreateSnapshotFromVolumeRecoveryPointRequest(input) + err := req.Send() + return out, err +} + +const opCreateStorediSCSIVolume = "CreateStorediSCSIVolume" + +// CreateStorediSCSIVolumeRequest generates a request for the CreateStorediSCSIVolume operation. +func (c *StorageGateway) CreateStorediSCSIVolumeRequest(input *CreateStorediSCSIVolumeInput) (req *request.Request, output *CreateStorediSCSIVolumeOutput) { + op := &request.Operation{ + Name: opCreateStorediSCSIVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateStorediSCSIVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateStorediSCSIVolumeOutput{} + req.Data = output + return +} + +// This operation creates a volume on a specified gateway. This operation is +// supported only for the gateway-stored volume architecture. +// +// The size of the volume to create is inferred from the disk size. You can +// choose to preserve existing data on the disk, create volume from an existing +// snapshot, or create an empty volume. If you choose to create an empty gateway +// volume, then any existing data on the disk is erased. +// +// In the request you must specify the gateway and the disk information on +// which you are creating the volume. In response, AWS Storage Gateway creates +// the volume and returns volume information such as the volume Amazon Resource +// Name (ARN), its size, and the iSCSI target ARN that initiators can use to +// connect to the volume target. +func (c *StorageGateway) CreateStorediSCSIVolume(input *CreateStorediSCSIVolumeInput) (*CreateStorediSCSIVolumeOutput, error) { + req, out := c.CreateStorediSCSIVolumeRequest(input) + err := req.Send() + return out, err +} + +const opCreateTapes = "CreateTapes" + +// CreateTapesRequest generates a request for the CreateTapes operation. +func (c *StorageGateway) CreateTapesRequest(input *CreateTapesInput) (req *request.Request, output *CreateTapesOutput) { + op := &request.Operation{ + Name: opCreateTapes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTapesInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateTapesOutput{} + req.Data = output + return +} + +// Creates one or more virtual tapes. You write data to the virtual tapes and +// then archive the tapes. +// +// Cache storage must be allocated to the gateway before you can create virtual +// tapes. Use the AddCache operation to add cache storage to a gateway. +func (c *StorageGateway) CreateTapes(input *CreateTapesInput) (*CreateTapesOutput, error) { + req, out := c.CreateTapesRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBandwidthRateLimit = "DeleteBandwidthRateLimit" + +// DeleteBandwidthRateLimitRequest generates a request for the DeleteBandwidthRateLimit operation. +func (c *StorageGateway) DeleteBandwidthRateLimitRequest(input *DeleteBandwidthRateLimitInput) (req *request.Request, output *DeleteBandwidthRateLimitOutput) { + op := &request.Operation{ + Name: opDeleteBandwidthRateLimit, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteBandwidthRateLimitInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteBandwidthRateLimitOutput{} + req.Data = output + return +} + +// This operation deletes the bandwidth rate limits of a gateway. You can delete +// either the upload and download bandwidth rate limit, or you can delete both. +// If you delete only one of the limits, the other limit remains unchanged. +// To specify which gateway to work with, use the Amazon Resource Name (ARN) +// of the gateway in your request. +func (c *StorageGateway) DeleteBandwidthRateLimit(input *DeleteBandwidthRateLimitInput) (*DeleteBandwidthRateLimitOutput, error) { + req, out := c.DeleteBandwidthRateLimitRequest(input) + err := req.Send() + return out, err +} + +const opDeleteChapCredentials = "DeleteChapCredentials" + +// DeleteChapCredentialsRequest generates a request for the DeleteChapCredentials operation. +func (c *StorageGateway) DeleteChapCredentialsRequest(input *DeleteChapCredentialsInput) (req *request.Request, output *DeleteChapCredentialsOutput) { + op := &request.Operation{ + Name: opDeleteChapCredentials, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteChapCredentialsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteChapCredentialsOutput{} + req.Data = output + return +} + +// This operation deletes Challenge-Handshake Authentication Protocol (CHAP) +// credentials for a specified iSCSI target and initiator pair. +func (c *StorageGateway) DeleteChapCredentials(input *DeleteChapCredentialsInput) (*DeleteChapCredentialsOutput, error) { + req, out := c.DeleteChapCredentialsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteGateway = "DeleteGateway" + +// DeleteGatewayRequest generates a request for the DeleteGateway operation. +func (c *StorageGateway) DeleteGatewayRequest(input *DeleteGatewayInput) (req *request.Request, output *DeleteGatewayOutput) { + op := &request.Operation{ + Name: opDeleteGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteGatewayOutput{} + req.Data = output + return +} + +// This operation deletes a gateway. To specify which gateway to delete, use +// the Amazon Resource Name (ARN) of the gateway in your request. The operation +// deletes the gateway; however, it does not delete the gateway virtual machine +// (VM) from your host computer. +// +// After you delete a gateway, you cannot reactivate it. Completed snapshots +// of the gateway volumes are not deleted upon deleting the gateway, however, +// pending snapshots will not complete. After you delete a gateway, your next +// step is to remove it from your environment. +// +// You no longer pay software charges after the gateway is deleted; however, +// your existing Amazon EBS snapshots persist and you will continue to be billed +// for these snapshots. You can choose to remove all remaining Amazon EBS snapshots +// by canceling your Amazon EC2 subscription.  If you prefer not to cancel your +// Amazon EC2 subscription, you can delete your snapshots using the Amazon EC2 +// console. For more information, see the AWS Storage Gateway Detail Page (http://aws.amazon.com/storagegateway). +func (c *StorageGateway) DeleteGateway(input *DeleteGatewayInput) (*DeleteGatewayOutput, error) { + req, out := c.DeleteGatewayRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSnapshotSchedule = "DeleteSnapshotSchedule" + +// DeleteSnapshotScheduleRequest generates a request for the DeleteSnapshotSchedule operation. +func (c *StorageGateway) DeleteSnapshotScheduleRequest(input *DeleteSnapshotScheduleInput) (req *request.Request, output *DeleteSnapshotScheduleOutput) { + op := &request.Operation{ + Name: opDeleteSnapshotSchedule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSnapshotScheduleInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteSnapshotScheduleOutput{} + req.Data = output + return +} + +// This operation deletes a snapshot of a volume. +// +// You can take snapshots of your gateway volumes on a scheduled or ad-hoc +// basis. This API enables you to delete a snapshot schedule for a volume. For +// more information, see Working with Snapshots (http://docs.aws.amazon.com/storagegateway/latest/userguide/WorkingWithSnapshots.html). +// In the DeleteSnapshotSchedule request, you identify the volume by providing +// its Amazon Resource Name (ARN). +// +// To list or delete a snapshot, you must use the Amazon EC2 API. in Amazon +// Elastic Compute Cloud API Reference. +func (c *StorageGateway) DeleteSnapshotSchedule(input *DeleteSnapshotScheduleInput) (*DeleteSnapshotScheduleOutput, error) { + req, out := c.DeleteSnapshotScheduleRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTape = "DeleteTape" + +// DeleteTapeRequest generates a request for the DeleteTape operation. +func (c *StorageGateway) DeleteTapeRequest(input *DeleteTapeInput) (req *request.Request, output *DeleteTapeOutput) { + op := &request.Operation{ + Name: opDeleteTape, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTapeInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTapeOutput{} + req.Data = output + return +} + +// Deletes the specified virtual tape. +func (c *StorageGateway) DeleteTape(input *DeleteTapeInput) (*DeleteTapeOutput, error) { + req, out := c.DeleteTapeRequest(input) + err := req.Send() + return out, err +} + +const opDeleteTapeArchive = "DeleteTapeArchive" + +// DeleteTapeArchiveRequest generates a request for the DeleteTapeArchive operation. +func (c *StorageGateway) DeleteTapeArchiveRequest(input *DeleteTapeArchiveInput) (req *request.Request, output *DeleteTapeArchiveOutput) { + op := &request.Operation{ + Name: opDeleteTapeArchive, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTapeArchiveInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteTapeArchiveOutput{} + req.Data = output + return +} + +// Deletes the specified virtual tape from the virtual tape shelf (VTS). +func (c *StorageGateway) DeleteTapeArchive(input *DeleteTapeArchiveInput) (*DeleteTapeArchiveOutput, error) { + req, out := c.DeleteTapeArchiveRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVolume = "DeleteVolume" + +// DeleteVolumeRequest generates a request for the DeleteVolume operation. +func (c *StorageGateway) DeleteVolumeRequest(input *DeleteVolumeInput) (req *request.Request, output *DeleteVolumeOutput) { + op := &request.Operation{ + Name: opDeleteVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVolumeInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteVolumeOutput{} + req.Data = output + return +} + +// This operation deletes the specified gateway volume that you previously created +// using the CreateCachediSCSIVolume or CreateStorediSCSIVolume API. For gateway-stored +// volumes, the local disk that was configured as the storage volume is not +// deleted. You can reuse the local disk to create another storage volume. +// +// Before you delete a gateway volume, make sure there are no iSCSI connections +// to the volume you are deleting. You should also make sure there is no snapshot +// in progress. You can use the Amazon Elastic Compute Cloud (Amazon EC2) API +// to query snapshots on the volume you are deleting and check the snapshot +// status. For more information, go to DescribeSnapshots (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeSnapshots.html) +// in the Amazon Elastic Compute Cloud API Reference. +// +// In the request, you must provide the Amazon Resource Name (ARN) of the storage +// volume you want to delete. +func (c *StorageGateway) DeleteVolume(input *DeleteVolumeInput) (*DeleteVolumeOutput, error) { + req, out := c.DeleteVolumeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeBandwidthRateLimit = "DescribeBandwidthRateLimit" + +// DescribeBandwidthRateLimitRequest generates a request for the DescribeBandwidthRateLimit operation. +func (c *StorageGateway) DescribeBandwidthRateLimitRequest(input *DescribeBandwidthRateLimitInput) (req *request.Request, output *DescribeBandwidthRateLimitOutput) { + op := &request.Operation{ + Name: opDescribeBandwidthRateLimit, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeBandwidthRateLimitInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeBandwidthRateLimitOutput{} + req.Data = output + return +} + +// This operation returns the bandwidth rate limits of a gateway. By default, +// these limits are not set, which means no bandwidth rate limiting is in effect. +// +// This operation only returns a value for a bandwidth rate limit only if the +// limit is set. If no limits are set for the gateway, then this operation returns +// only the gateway ARN in the response body. To specify which gateway to describe, +// use the Amazon Resource Name (ARN) of the gateway in your request. +func (c *StorageGateway) DescribeBandwidthRateLimit(input *DescribeBandwidthRateLimitInput) (*DescribeBandwidthRateLimitOutput, error) { + req, out := c.DescribeBandwidthRateLimitRequest(input) + err := req.Send() + return out, err +} + +const opDescribeCache = "DescribeCache" + +// DescribeCacheRequest generates a request for the DescribeCache operation. +func (c *StorageGateway) DescribeCacheRequest(input *DescribeCacheInput) (req *request.Request, output *DescribeCacheOutput) { + op := &request.Operation{ + Name: opDescribeCache, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeCacheInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCacheOutput{} + req.Data = output + return +} + +// This operation returns information about the cache of a gateway. This operation +// is supported only for the gateway-cached volume architecture. +// +// The response includes disk IDs that are configured as cache, and it includes +// the amount of cache allocated and used. +func (c *StorageGateway) DescribeCache(input *DescribeCacheInput) (*DescribeCacheOutput, error) { + req, out := c.DescribeCacheRequest(input) + err := req.Send() + return out, err +} + +const opDescribeCachediSCSIVolumes = "DescribeCachediSCSIVolumes" + +// DescribeCachediSCSIVolumesRequest generates a request for the DescribeCachediSCSIVolumes operation. +func (c *StorageGateway) DescribeCachediSCSIVolumesRequest(input *DescribeCachediSCSIVolumesInput) (req *request.Request, output *DescribeCachediSCSIVolumesOutput) { + op := &request.Operation{ + Name: opDescribeCachediSCSIVolumes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeCachediSCSIVolumesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCachediSCSIVolumesOutput{} + req.Data = output + return +} + +// This operation returns a description of the gateway volumes specified in +// the request. This operation is supported only for the gateway-cached volume +// architecture. +// +// The list of gateway volumes in the request must be from one gateway. In +// the response Amazon Storage Gateway returns volume information sorted by +// volume Amazon Resource Name (ARN). +func (c *StorageGateway) DescribeCachediSCSIVolumes(input *DescribeCachediSCSIVolumesInput) (*DescribeCachediSCSIVolumesOutput, error) { + req, out := c.DescribeCachediSCSIVolumesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeChapCredentials = "DescribeChapCredentials" + +// DescribeChapCredentialsRequest generates a request for the DescribeChapCredentials operation. +func (c *StorageGateway) DescribeChapCredentialsRequest(input *DescribeChapCredentialsInput) (req *request.Request, output *DescribeChapCredentialsOutput) { + op := &request.Operation{ + Name: opDescribeChapCredentials, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeChapCredentialsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeChapCredentialsOutput{} + req.Data = output + return +} + +// This operation returns an array of Challenge-Handshake Authentication Protocol +// (CHAP) credentials information for a specified iSCSI target, one for each +// target-initiator pair. +func (c *StorageGateway) DescribeChapCredentials(input *DescribeChapCredentialsInput) (*DescribeChapCredentialsOutput, error) { + req, out := c.DescribeChapCredentialsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeGatewayInformation = "DescribeGatewayInformation" + +// DescribeGatewayInformationRequest generates a request for the DescribeGatewayInformation operation. +func (c *StorageGateway) DescribeGatewayInformationRequest(input *DescribeGatewayInformationInput) (req *request.Request, output *DescribeGatewayInformationOutput) { + op := &request.Operation{ + Name: opDescribeGatewayInformation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeGatewayInformationInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeGatewayInformationOutput{} + req.Data = output + return +} + +// This operation returns metadata about a gateway such as its name, network +// interfaces, configured time zone, and the state (whether the gateway is running +// or not). To specify which gateway to describe, use the Amazon Resource Name +// (ARN) of the gateway in your request. +func (c *StorageGateway) DescribeGatewayInformation(input *DescribeGatewayInformationInput) (*DescribeGatewayInformationOutput, error) { + req, out := c.DescribeGatewayInformationRequest(input) + err := req.Send() + return out, err +} + +const opDescribeMaintenanceStartTime = "DescribeMaintenanceStartTime" + +// DescribeMaintenanceStartTimeRequest generates a request for the DescribeMaintenanceStartTime operation. +func (c *StorageGateway) DescribeMaintenanceStartTimeRequest(input *DescribeMaintenanceStartTimeInput) (req *request.Request, output *DescribeMaintenanceStartTimeOutput) { + op := &request.Operation{ + Name: opDescribeMaintenanceStartTime, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeMaintenanceStartTimeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeMaintenanceStartTimeOutput{} + req.Data = output + return +} + +// This operation returns your gateway's weekly maintenance start time including +// the day and time of the week. Note that values are in terms of the gateway's +// time zone. +func (c *StorageGateway) DescribeMaintenanceStartTime(input *DescribeMaintenanceStartTimeInput) (*DescribeMaintenanceStartTimeOutput, error) { + req, out := c.DescribeMaintenanceStartTimeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSnapshotSchedule = "DescribeSnapshotSchedule" + +// DescribeSnapshotScheduleRequest generates a request for the DescribeSnapshotSchedule operation. +func (c *StorageGateway) DescribeSnapshotScheduleRequest(input *DescribeSnapshotScheduleInput) (req *request.Request, output *DescribeSnapshotScheduleOutput) { + op := &request.Operation{ + Name: opDescribeSnapshotSchedule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSnapshotScheduleInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSnapshotScheduleOutput{} + req.Data = output + return +} + +// This operation describes the snapshot schedule for the specified gateway +// volume. The snapshot schedule information includes intervals at which snapshots +// are automatically initiated on the volume. +func (c *StorageGateway) DescribeSnapshotSchedule(input *DescribeSnapshotScheduleInput) (*DescribeSnapshotScheduleOutput, error) { + req, out := c.DescribeSnapshotScheduleRequest(input) + err := req.Send() + return out, err +} + +const opDescribeStorediSCSIVolumes = "DescribeStorediSCSIVolumes" + +// DescribeStorediSCSIVolumesRequest generates a request for the DescribeStorediSCSIVolumes operation. +func (c *StorageGateway) DescribeStorediSCSIVolumesRequest(input *DescribeStorediSCSIVolumesInput) (req *request.Request, output *DescribeStorediSCSIVolumesOutput) { + op := &request.Operation{ + Name: opDescribeStorediSCSIVolumes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeStorediSCSIVolumesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStorediSCSIVolumesOutput{} + req.Data = output + return +} + +// This operation returns the description of the gateway volumes specified in +// the request. The list of gateway volumes in the request must be from one +// gateway. In the response Amazon Storage Gateway returns volume information +// sorted by volume ARNs. +func (c *StorageGateway) DescribeStorediSCSIVolumes(input *DescribeStorediSCSIVolumesInput) (*DescribeStorediSCSIVolumesOutput, error) { + req, out := c.DescribeStorediSCSIVolumesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTapeArchives = "DescribeTapeArchives" + +// DescribeTapeArchivesRequest generates a request for the DescribeTapeArchives operation. +func (c *StorageGateway) DescribeTapeArchivesRequest(input *DescribeTapeArchivesInput) (req *request.Request, output *DescribeTapeArchivesOutput) { + op := &request.Operation{ + Name: opDescribeTapeArchives, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeTapeArchivesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTapeArchivesOutput{} + req.Data = output + return +} + +// Returns a description of specified virtual tapes in the virtual tape shelf +// (VTS). +// +// If a specific TapeARN is not specified, AWS Storage Gateway returns a description +// of all virtual tapes found in the VTS associated with your account. +func (c *StorageGateway) DescribeTapeArchives(input *DescribeTapeArchivesInput) (*DescribeTapeArchivesOutput, error) { + req, out := c.DescribeTapeArchivesRequest(input) + err := req.Send() + return out, err +} + +func (c *StorageGateway) DescribeTapeArchivesPages(input *DescribeTapeArchivesInput, fn func(p *DescribeTapeArchivesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeTapeArchivesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeTapeArchivesOutput), lastPage) + }) +} + +const opDescribeTapeRecoveryPoints = "DescribeTapeRecoveryPoints" + +// DescribeTapeRecoveryPointsRequest generates a request for the DescribeTapeRecoveryPoints operation. +func (c *StorageGateway) DescribeTapeRecoveryPointsRequest(input *DescribeTapeRecoveryPointsInput) (req *request.Request, output *DescribeTapeRecoveryPointsOutput) { + op := &request.Operation{ + Name: opDescribeTapeRecoveryPoints, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeTapeRecoveryPointsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTapeRecoveryPointsOutput{} + req.Data = output + return +} + +// Returns a list of virtual tape recovery points that are available for the +// specified gateway-VTL. +// +// A recovery point is a point in time view of a virtual tape at which all +// the data on the virtual tape is consistent. If your gateway crashes, virtual +// tapes that have recovery points can be recovered to a new gateway. +func (c *StorageGateway) DescribeTapeRecoveryPoints(input *DescribeTapeRecoveryPointsInput) (*DescribeTapeRecoveryPointsOutput, error) { + req, out := c.DescribeTapeRecoveryPointsRequest(input) + err := req.Send() + return out, err +} + +func (c *StorageGateway) DescribeTapeRecoveryPointsPages(input *DescribeTapeRecoveryPointsInput, fn func(p *DescribeTapeRecoveryPointsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeTapeRecoveryPointsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeTapeRecoveryPointsOutput), lastPage) + }) +} + +const opDescribeTapes = "DescribeTapes" + +// DescribeTapesRequest generates a request for the DescribeTapes operation. +func (c *StorageGateway) DescribeTapesRequest(input *DescribeTapesInput) (req *request.Request, output *DescribeTapesOutput) { + op := &request.Operation{ + Name: opDescribeTapes, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeTapesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTapesOutput{} + req.Data = output + return +} + +// Returns a description of the specified Amazon Resource Name (ARN) of virtual +// tapes. If a TapeARN is not specified, returns a description of all virtual +// tapes associated with the specified gateway. +func (c *StorageGateway) DescribeTapes(input *DescribeTapesInput) (*DescribeTapesOutput, error) { + req, out := c.DescribeTapesRequest(input) + err := req.Send() + return out, err +} + +func (c *StorageGateway) DescribeTapesPages(input *DescribeTapesInput, fn func(p *DescribeTapesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeTapesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeTapesOutput), lastPage) + }) +} + +const opDescribeUploadBuffer = "DescribeUploadBuffer" + +// DescribeUploadBufferRequest generates a request for the DescribeUploadBuffer operation. +func (c *StorageGateway) DescribeUploadBufferRequest(input *DescribeUploadBufferInput) (req *request.Request, output *DescribeUploadBufferOutput) { + op := &request.Operation{ + Name: opDescribeUploadBuffer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeUploadBufferInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeUploadBufferOutput{} + req.Data = output + return +} + +// This operation returns information about the upload buffer of a gateway. +// This operation is supported for both the gateway-stored and gateway-cached +// volume architectures. +// +// The response includes disk IDs that are configured as upload buffer space, +// and it includes the amount of upload buffer space allocated and used. +func (c *StorageGateway) DescribeUploadBuffer(input *DescribeUploadBufferInput) (*DescribeUploadBufferOutput, error) { + req, out := c.DescribeUploadBufferRequest(input) + err := req.Send() + return out, err +} + +const opDescribeVTLDevices = "DescribeVTLDevices" + +// DescribeVTLDevicesRequest generates a request for the DescribeVTLDevices operation. +func (c *StorageGateway) DescribeVTLDevicesRequest(input *DescribeVTLDevicesInput) (req *request.Request, output *DescribeVTLDevicesOutput) { + op := &request.Operation{ + Name: opDescribeVTLDevices, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeVTLDevicesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVTLDevicesOutput{} + req.Data = output + return +} + +// Returns a description of virtual tape library (VTL) devices for the specified +// gateway. In the response, AWS Storage Gateway returns VTL device information. +// +// The list of VTL devices must be from one gateway. +func (c *StorageGateway) DescribeVTLDevices(input *DescribeVTLDevicesInput) (*DescribeVTLDevicesOutput, error) { + req, out := c.DescribeVTLDevicesRequest(input) + err := req.Send() + return out, err +} + +func (c *StorageGateway) DescribeVTLDevicesPages(input *DescribeVTLDevicesInput, fn func(p *DescribeVTLDevicesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeVTLDevicesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeVTLDevicesOutput), lastPage) + }) +} + +const opDescribeWorkingStorage = "DescribeWorkingStorage" + +// DescribeWorkingStorageRequest generates a request for the DescribeWorkingStorage operation. +func (c *StorageGateway) DescribeWorkingStorageRequest(input *DescribeWorkingStorageInput) (req *request.Request, output *DescribeWorkingStorageOutput) { + op := &request.Operation{ + Name: opDescribeWorkingStorage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeWorkingStorageInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeWorkingStorageOutput{} + req.Data = output + return +} + +// This operation returns information about the working storage of a gateway. +// This operation is supported only for the gateway-stored volume architecture. +// This operation is deprecated in cached-volumes API version (20120630). Use +// DescribeUploadBuffer instead. +// +// Working storage is also referred to as upload buffer. You can also use the +// DescribeUploadBuffer operation to add upload buffer to a stored-volume gateway. +// +// The response includes disk IDs that are configured as working storage, and +// it includes the amount of working storage allocated and used. +func (c *StorageGateway) DescribeWorkingStorage(input *DescribeWorkingStorageInput) (*DescribeWorkingStorageOutput, error) { + req, out := c.DescribeWorkingStorageRequest(input) + err := req.Send() + return out, err +} + +const opDisableGateway = "DisableGateway" + +// DisableGatewayRequest generates a request for the DisableGateway operation. +func (c *StorageGateway) DisableGatewayRequest(input *DisableGatewayInput) (req *request.Request, output *DisableGatewayOutput) { + op := &request.Operation{ + Name: opDisableGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableGatewayOutput{} + req.Data = output + return +} + +// Disables a gateway when the gateway is no longer functioning. For example, +// if your gateway VM is damaged, you can disable the gateway so you can recover +// virtual tapes. +// +// Use this operation for a gateway-VTL that is not reachable or not functioning. +// +// Once a gateway is disabled it cannot be enabled. +func (c *StorageGateway) DisableGateway(input *DisableGatewayInput) (*DisableGatewayOutput, error) { + req, out := c.DisableGatewayRequest(input) + err := req.Send() + return out, err +} + +const opListGateways = "ListGateways" + +// ListGatewaysRequest generates a request for the ListGateways operation. +func (c *StorageGateway) ListGatewaysRequest(input *ListGatewaysInput) (req *request.Request, output *ListGatewaysOutput) { + op := &request.Operation{ + Name: opListGateways, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListGatewaysInput{} + } + + req = c.newRequest(op, input, output) + output = &ListGatewaysOutput{} + req.Data = output + return +} + +// This operation lists gateways owned by an AWS account in a region specified +// in the request. The returned list is ordered by gateway Amazon Resource Name +// (ARN). +// +// By default, the operation returns a maximum of 100 gateways. This operation +// supports pagination that allows you to optionally reduce the number of gateways +// returned in a response. +// +// If you have more gateways than are returned in a response-that is, the response +// returns only a truncated list of your gateways-the response contains a marker +// that you can specify in your next request to fetch the next page of gateways. +func (c *StorageGateway) ListGateways(input *ListGatewaysInput) (*ListGatewaysOutput, error) { + req, out := c.ListGatewaysRequest(input) + err := req.Send() + return out, err +} + +func (c *StorageGateway) ListGatewaysPages(input *ListGatewaysInput, fn func(p *ListGatewaysOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListGatewaysRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListGatewaysOutput), lastPage) + }) +} + +const opListLocalDisks = "ListLocalDisks" + +// ListLocalDisksRequest generates a request for the ListLocalDisks operation. +func (c *StorageGateway) ListLocalDisksRequest(input *ListLocalDisksInput) (req *request.Request, output *ListLocalDisksOutput) { + op := &request.Operation{ + Name: opListLocalDisks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListLocalDisksInput{} + } + + req = c.newRequest(op, input, output) + output = &ListLocalDisksOutput{} + req.Data = output + return +} + +// This operation returns a list of the gateway's local disks. To specify which +// gateway to describe, you use the Amazon Resource Name (ARN) of the gateway +// in the body of the request. +// +// The request returns a list of all disks, specifying which are configured +// as working storage, cache storage, or stored volume or not configured at +// all. The response includes a DiskStatus field. This field can have a value +// of present (the disk is available to use), missing (the disk is no longer +// connected to the gateway), or mismatch (the disk node is occupied by a disk +// that has incorrect metadata or the disk content is corrupted). +func (c *StorageGateway) ListLocalDisks(input *ListLocalDisksInput) (*ListLocalDisksOutput, error) { + req, out := c.ListLocalDisksRequest(input) + err := req.Send() + return out, err +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a request for the ListTagsForResource operation. +func (c *StorageGateway) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForResourceOutput{} + req.Data = output + return +} + +// This operation lists the tags that have been added to the specified resource. +func (c *StorageGateway) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + err := req.Send() + return out, err +} + +const opListVolumeInitiators = "ListVolumeInitiators" + +// ListVolumeInitiatorsRequest generates a request for the ListVolumeInitiators operation. +func (c *StorageGateway) ListVolumeInitiatorsRequest(input *ListVolumeInitiatorsInput) (req *request.Request, output *ListVolumeInitiatorsOutput) { + op := &request.Operation{ + Name: opListVolumeInitiators, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListVolumeInitiatorsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListVolumeInitiatorsOutput{} + req.Data = output + return +} + +// This operation lists iSCSI initiators that are connected to a volume. You +// can use this operation to determine whether a volume is being used or not. +func (c *StorageGateway) ListVolumeInitiators(input *ListVolumeInitiatorsInput) (*ListVolumeInitiatorsOutput, error) { + req, out := c.ListVolumeInitiatorsRequest(input) + err := req.Send() + return out, err +} + +const opListVolumeRecoveryPoints = "ListVolumeRecoveryPoints" + +// ListVolumeRecoveryPointsRequest generates a request for the ListVolumeRecoveryPoints operation. +func (c *StorageGateway) ListVolumeRecoveryPointsRequest(input *ListVolumeRecoveryPointsInput) (req *request.Request, output *ListVolumeRecoveryPointsOutput) { + op := &request.Operation{ + Name: opListVolumeRecoveryPoints, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListVolumeRecoveryPointsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListVolumeRecoveryPointsOutput{} + req.Data = output + return +} + +// This operation lists the recovery points for a specified gateway. This operation +// is supported only for the gateway-cached volume architecture. +// +// Each gateway-cached volume has one recovery point. A volume recovery point +// is a point in time at which all data of the volume is consistent and from +// which you can create a snapshot. To create a snapshot from a volume recovery +// point use the CreateSnapshotFromVolumeRecoveryPoint operation. +func (c *StorageGateway) ListVolumeRecoveryPoints(input *ListVolumeRecoveryPointsInput) (*ListVolumeRecoveryPointsOutput, error) { + req, out := c.ListVolumeRecoveryPointsRequest(input) + err := req.Send() + return out, err +} + +const opListVolumes = "ListVolumes" + +// ListVolumesRequest generates a request for the ListVolumes operation. +func (c *StorageGateway) ListVolumesRequest(input *ListVolumesInput) (req *request.Request, output *ListVolumesOutput) { + op := &request.Operation{ + Name: opListVolumes, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListVolumesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListVolumesOutput{} + req.Data = output + return +} + +// This operation lists the iSCSI stored volumes of a gateway. Results are sorted +// by volume ARN. The response includes only the volume ARNs. If you want additional +// volume information, use the DescribeStorediSCSIVolumes API. +// +// The operation supports pagination. By default, the operation returns a maximum +// of up to 100 volumes. You can optionally specify the Limit field in the body +// to limit the number of volumes in the response. If the number of volumes +// returned in the response is truncated, the response includes a Marker field. +// You can use this Marker value in your subsequent request to retrieve the +// next set of volumes. +func (c *StorageGateway) ListVolumes(input *ListVolumesInput) (*ListVolumesOutput, error) { + req, out := c.ListVolumesRequest(input) + err := req.Send() + return out, err +} + +func (c *StorageGateway) ListVolumesPages(input *ListVolumesInput, fn func(p *ListVolumesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListVolumesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListVolumesOutput), lastPage) + }) +} + +const opRemoveTagsFromResource = "RemoveTagsFromResource" + +// RemoveTagsFromResourceRequest generates a request for the RemoveTagsFromResource operation. +func (c *StorageGateway) RemoveTagsFromResourceRequest(input *RemoveTagsFromResourceInput) (req *request.Request, output *RemoveTagsFromResourceOutput) { + op := &request.Operation{ + Name: opRemoveTagsFromResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsFromResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveTagsFromResourceOutput{} + req.Data = output + return +} + +// This operation removes one or more tags from the specified resource. +func (c *StorageGateway) RemoveTagsFromResource(input *RemoveTagsFromResourceInput) (*RemoveTagsFromResourceOutput, error) { + req, out := c.RemoveTagsFromResourceRequest(input) + err := req.Send() + return out, err +} + +const opResetCache = "ResetCache" + +// ResetCacheRequest generates a request for the ResetCache operation. +func (c *StorageGateway) ResetCacheRequest(input *ResetCacheInput) (req *request.Request, output *ResetCacheOutput) { + op := &request.Operation{ + Name: opResetCache, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetCacheInput{} + } + + req = c.newRequest(op, input, output) + output = &ResetCacheOutput{} + req.Data = output + return +} + +// This operation resets all cache disks that have encountered a error and makes +// the disks available for reconfiguration as cache storage. If your cache disk +// encounters a error, the gateway prevents read and write operations on virtual +// tapes in the gateway. For example, an error can occur when a disk is corrupted +// or removed from the gateway. When a cache is reset, the gateway loses its +// cache storage. At this point you can reconfigure the disks as cache disks. +// +// If the cache disk you are resetting contains data that has not been uploaded +// to Amazon S3 yet, that data can be lost. After you reset cache disks, there +// will be no configured cache disks left in the gateway, so you must configure +// at least one new cache disk for your gateway to function properly. +func (c *StorageGateway) ResetCache(input *ResetCacheInput) (*ResetCacheOutput, error) { + req, out := c.ResetCacheRequest(input) + err := req.Send() + return out, err +} + +const opRetrieveTapeArchive = "RetrieveTapeArchive" + +// RetrieveTapeArchiveRequest generates a request for the RetrieveTapeArchive operation. +func (c *StorageGateway) RetrieveTapeArchiveRequest(input *RetrieveTapeArchiveInput) (req *request.Request, output *RetrieveTapeArchiveOutput) { + op := &request.Operation{ + Name: opRetrieveTapeArchive, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RetrieveTapeArchiveInput{} + } + + req = c.newRequest(op, input, output) + output = &RetrieveTapeArchiveOutput{} + req.Data = output + return +} + +// Retrieves an archived virtual tape from the virtual tape shelf (VTS) to a +// gateway-VTL. Virtual tapes archived in the VTS are not associated with any +// gateway. However after a tape is retrieved, it is associated with a gateway, +// even though it is also listed in the VTS. +// +// Once a tape is successfully retrieved to a gateway, it cannot be retrieved +// again to another gateway. You must archive the tape again before you can +// retrieve it to another gateway. +func (c *StorageGateway) RetrieveTapeArchive(input *RetrieveTapeArchiveInput) (*RetrieveTapeArchiveOutput, error) { + req, out := c.RetrieveTapeArchiveRequest(input) + err := req.Send() + return out, err +} + +const opRetrieveTapeRecoveryPoint = "RetrieveTapeRecoveryPoint" + +// RetrieveTapeRecoveryPointRequest generates a request for the RetrieveTapeRecoveryPoint operation. +func (c *StorageGateway) RetrieveTapeRecoveryPointRequest(input *RetrieveTapeRecoveryPointInput) (req *request.Request, output *RetrieveTapeRecoveryPointOutput) { + op := &request.Operation{ + Name: opRetrieveTapeRecoveryPoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RetrieveTapeRecoveryPointInput{} + } + + req = c.newRequest(op, input, output) + output = &RetrieveTapeRecoveryPointOutput{} + req.Data = output + return +} + +// Retrieves the recovery point for the specified virtual tape. +// +// A recovery point is a point in time view of a virtual tape at which all +// the data on the tape is consistent. If your gateway crashes, virtual tapes +// that have recovery points can be recovered to a new gateway. +// +// The virtual tape can be retrieved to only one gateway. The retrieved tape +// is read-only. The virtual tape can be retrieved to only a gateway-VTL. There +// is no charge for retrieving recovery points. +func (c *StorageGateway) RetrieveTapeRecoveryPoint(input *RetrieveTapeRecoveryPointInput) (*RetrieveTapeRecoveryPointOutput, error) { + req, out := c.RetrieveTapeRecoveryPointRequest(input) + err := req.Send() + return out, err +} + +const opShutdownGateway = "ShutdownGateway" + +// ShutdownGatewayRequest generates a request for the ShutdownGateway operation. +func (c *StorageGateway) ShutdownGatewayRequest(input *ShutdownGatewayInput) (req *request.Request, output *ShutdownGatewayOutput) { + op := &request.Operation{ + Name: opShutdownGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ShutdownGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &ShutdownGatewayOutput{} + req.Data = output + return +} + +// This operation shuts down a gateway. To specify which gateway to shut down, +// use the Amazon Resource Name (ARN) of the gateway in the body of your request. +// +// The operation shuts down the gateway service component running in the storage +// gateway's virtual machine (VM) and not the VM. +// +// If you want to shut down the VM, it is recommended that you first shut down +// the gateway component in the VM to avoid unpredictable conditions. After +// the gateway is shutdown, you cannot call any other API except StartGateway, +// DescribeGatewayInformation, and ListGateways. For more information, see ActivateGateway. +// Your applications cannot read from or write to the gateway's storage volumes, +// and there are no snapshots taken. +// +// When you make a shutdown request, you will get a 200 OK success response +// immediately. However, it might take some time for the gateway to shut down. +// You can call the DescribeGatewayInformation API to check the status. For +// more information, see ActivateGateway. If do not intend to use the gateway +// again, you must delete the gateway (using DeleteGateway) to no longer pay +// software charges associated with the gateway. +func (c *StorageGateway) ShutdownGateway(input *ShutdownGatewayInput) (*ShutdownGatewayOutput, error) { + req, out := c.ShutdownGatewayRequest(input) + err := req.Send() + return out, err +} + +const opStartGateway = "StartGateway" + +// StartGatewayRequest generates a request for the StartGateway operation. +func (c *StorageGateway) StartGatewayRequest(input *StartGatewayInput) (req *request.Request, output *StartGatewayOutput) { + op := &request.Operation{ + Name: opStartGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartGatewayInput{} + } + + req = c.newRequest(op, input, output) + output = &StartGatewayOutput{} + req.Data = output + return +} + +// This operation starts a gateway that you previously shut down (see ShutdownGateway). +// After the gateway starts, you can then make other API calls, your applications +// can read from or write to the gateway's storage volumes and you will be able +// to take snapshot backups. +// +// When you make a request, you will get a 200 OK success response immediately. +// However, it might take some time for the gateway to be ready. You should +// call DescribeGatewayInformation and check the status before making any additional +// API calls. For more information, see ActivateGateway. To specify which gateway +// to start, use the Amazon Resource Name (ARN) of the gateway in your request. +func (c *StorageGateway) StartGateway(input *StartGatewayInput) (*StartGatewayOutput, error) { + req, out := c.StartGatewayRequest(input) + err := req.Send() + return out, err +} + +const opUpdateBandwidthRateLimit = "UpdateBandwidthRateLimit" + +// UpdateBandwidthRateLimitRequest generates a request for the UpdateBandwidthRateLimit operation. +func (c *StorageGateway) UpdateBandwidthRateLimitRequest(input *UpdateBandwidthRateLimitInput) (req *request.Request, output *UpdateBandwidthRateLimitOutput) { + op := &request.Operation{ + Name: opUpdateBandwidthRateLimit, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateBandwidthRateLimitInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateBandwidthRateLimitOutput{} + req.Data = output + return +} + +// This operation updates the bandwidth rate limits of a gateway. You can update +// both the upload and download bandwidth rate limit or specify only one of +// the two. If you don't set a bandwidth rate limit, the existing rate limit +// remains. +// +// By default, a gateway's bandwidth rate limits are not set. If you don't +// set any limit, the gateway does not have any limitations on its bandwidth +// usage and could potentially use the maximum available bandwidth. +// +// To specify which gateway to update, use the Amazon Resource Name (ARN) of +// the gateway in your request. +func (c *StorageGateway) UpdateBandwidthRateLimit(input *UpdateBandwidthRateLimitInput) (*UpdateBandwidthRateLimitOutput, error) { + req, out := c.UpdateBandwidthRateLimitRequest(input) + err := req.Send() + return out, err +} + +const opUpdateChapCredentials = "UpdateChapCredentials" + +// UpdateChapCredentialsRequest generates a request for the UpdateChapCredentials operation. +func (c *StorageGateway) UpdateChapCredentialsRequest(input *UpdateChapCredentialsInput) (req *request.Request, output *UpdateChapCredentialsOutput) { + op := &request.Operation{ + Name: opUpdateChapCredentials, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateChapCredentialsInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateChapCredentialsOutput{} + req.Data = output + return +} + +// This operation updates the Challenge-Handshake Authentication Protocol (CHAP) +// credentials for a specified iSCSI target. By default, a gateway does not +// have CHAP enabled; however, for added security, you might use it. +// +// When you update CHAP credentials, all existing connections on the target +// are closed and initiators must reconnect with the new credentials. +func (c *StorageGateway) UpdateChapCredentials(input *UpdateChapCredentialsInput) (*UpdateChapCredentialsOutput, error) { + req, out := c.UpdateChapCredentialsRequest(input) + err := req.Send() + return out, err +} + +const opUpdateGatewayInformation = "UpdateGatewayInformation" + +// UpdateGatewayInformationRequest generates a request for the UpdateGatewayInformation operation. +func (c *StorageGateway) UpdateGatewayInformationRequest(input *UpdateGatewayInformationInput) (req *request.Request, output *UpdateGatewayInformationOutput) { + op := &request.Operation{ + Name: opUpdateGatewayInformation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateGatewayInformationInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateGatewayInformationOutput{} + req.Data = output + return +} + +// This operation updates a gateway's metadata, which includes the gateway's +// name and time zone. To specify which gateway to update, use the Amazon Resource +// Name (ARN) of the gateway in your request. +func (c *StorageGateway) UpdateGatewayInformation(input *UpdateGatewayInformationInput) (*UpdateGatewayInformationOutput, error) { + req, out := c.UpdateGatewayInformationRequest(input) + err := req.Send() + return out, err +} + +const opUpdateGatewaySoftwareNow = "UpdateGatewaySoftwareNow" + +// UpdateGatewaySoftwareNowRequest generates a request for the UpdateGatewaySoftwareNow operation. +func (c *StorageGateway) UpdateGatewaySoftwareNowRequest(input *UpdateGatewaySoftwareNowInput) (req *request.Request, output *UpdateGatewaySoftwareNowOutput) { + op := &request.Operation{ + Name: opUpdateGatewaySoftwareNow, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateGatewaySoftwareNowInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateGatewaySoftwareNowOutput{} + req.Data = output + return +} + +// This operation updates the gateway virtual machine (VM) software. The request +// immediately triggers the software update. +// +// When you make this request, you get a 200 OK success response immediately. +// However, it might take some time for the update to complete. You can call +// DescribeGatewayInformation to verify the gateway is in the STATE_RUNNING +// state. A software update forces a system restart of your gateway. You can +// minimize the chance of any disruption to your applications by increasing +// your iSCSI Initiators' timeouts. For more information about increasing iSCSI +// Initiator timeouts for Windows and Linux, see Customizing Your Windows iSCSI +// Settings (http://docs.aws.amazon.com/storagegateway/latest/userguide/ConfiguringiSCSIClientInitiatorWindowsClient.html#CustomizeWindowsiSCSISettings) +// and Customizing Your Linux iSCSI Settings (http://docs.aws.amazon.com/storagegateway/latest/userguide/ConfiguringiSCSIClientInitiatorRedHatClient.html#CustomizeLinuxiSCSISettings), +// respectively. +func (c *StorageGateway) UpdateGatewaySoftwareNow(input *UpdateGatewaySoftwareNowInput) (*UpdateGatewaySoftwareNowOutput, error) { + req, out := c.UpdateGatewaySoftwareNowRequest(input) + err := req.Send() + return out, err +} + +const opUpdateMaintenanceStartTime = "UpdateMaintenanceStartTime" + +// UpdateMaintenanceStartTimeRequest generates a request for the UpdateMaintenanceStartTime operation. +func (c *StorageGateway) UpdateMaintenanceStartTimeRequest(input *UpdateMaintenanceStartTimeInput) (req *request.Request, output *UpdateMaintenanceStartTimeOutput) { + op := &request.Operation{ + Name: opUpdateMaintenanceStartTime, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateMaintenanceStartTimeInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateMaintenanceStartTimeOutput{} + req.Data = output + return +} + +// This operation updates a gateway's weekly maintenance start time information, +// including day and time of the week. The maintenance time is the time in your +// gateway's time zone. +func (c *StorageGateway) UpdateMaintenanceStartTime(input *UpdateMaintenanceStartTimeInput) (*UpdateMaintenanceStartTimeOutput, error) { + req, out := c.UpdateMaintenanceStartTimeRequest(input) + err := req.Send() + return out, err +} + +const opUpdateSnapshotSchedule = "UpdateSnapshotSchedule" + +// UpdateSnapshotScheduleRequest generates a request for the UpdateSnapshotSchedule operation. +func (c *StorageGateway) UpdateSnapshotScheduleRequest(input *UpdateSnapshotScheduleInput) (req *request.Request, output *UpdateSnapshotScheduleOutput) { + op := &request.Operation{ + Name: opUpdateSnapshotSchedule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSnapshotScheduleInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateSnapshotScheduleOutput{} + req.Data = output + return +} + +// This operation updates a snapshot schedule configured for a gateway volume. +// +// The default snapshot schedule for volume is once every 24 hours, starting +// at the creation time of the volume. You can use this API to change the snapshot +// schedule configured for the volume. +// +// In the request you must identify the gateway volume whose snapshot schedule +// you want to update, and the schedule information, including when you want +// the snapshot to begin on a day and the frequency (in hours) of snapshots. +func (c *StorageGateway) UpdateSnapshotSchedule(input *UpdateSnapshotScheduleInput) (*UpdateSnapshotScheduleOutput, error) { + req, out := c.UpdateSnapshotScheduleRequest(input) + err := req.Send() + return out, err +} + +const opUpdateVTLDeviceType = "UpdateVTLDeviceType" + +// UpdateVTLDeviceTypeRequest generates a request for the UpdateVTLDeviceType operation. +func (c *StorageGateway) UpdateVTLDeviceTypeRequest(input *UpdateVTLDeviceTypeInput) (req *request.Request, output *UpdateVTLDeviceTypeOutput) { + op := &request.Operation{ + Name: opUpdateVTLDeviceType, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateVTLDeviceTypeInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateVTLDeviceTypeOutput{} + req.Data = output + return +} + +// This operation updates the type of medium changer in a gateway-VTL. When +// you activate a gateway-VTL, you select a medium changer type for the gateway-VTL. +// This operation enables you to select a different type of medium changer after +// a gateway-VTL is activated. +func (c *StorageGateway) UpdateVTLDeviceType(input *UpdateVTLDeviceTypeInput) (*UpdateVTLDeviceTypeOutput, error) { + req, out := c.UpdateVTLDeviceTypeRequest(input) + err := req.Send() + return out, err +} + +// A JSON object containing one or more of the following fields: +// +// ActivateGatewayInput$ActivationKey GatewayName ActivateGatewayInput$GatewayRegion +// ActivateGatewayInput$GatewayTimezone ActivateGatewayInput$GatewayType +// ActivateGatewayInput$TapeDriveType ActivateGatewayInput$MediumChangerType +type ActivateGatewayInput struct { + _ struct{} `type:"structure"` + + // Your gateway activation key. You can obtain the activation key by sending + // an HTTP GET request with redirects enabled to the gateway IP address (port + // 80). The redirect URL returned in the response provides you the activation + // key for your gateway in the query string parameter activationKey. It may + // also include other activation-related parameters, however, these are merely + // defaults -- the arguments you pass to the ActivateGateway API call determine + // the actual configuration of your gateway. + ActivationKey *string `min:"1" type:"string" required:"true"` + + // A unique identifier for your gateway. This name becomes part of the gateway + // Amazon Resources Name (ARN) which is what you use as an input to other operations. + GatewayName *string `min:"2" type:"string" required:"true"` + + // One of the values that indicates the region where you want to store the snapshot + // backups. The gateway region specified must be the same region as the region + // in your Host header in the request. For more information about available + // regions and endpoints for AWS Storage Gateway, see Regions and Endpoints + // (http://docs.aws.amazon.com/general/latest/gr/rande.html#sg_region) in the + // Amazon Web Services Glossary. + // + // Valid Values: "us-east-1", "us-west-1", "us-west-2", "eu-west-1", "eu-central-1", + // "ap-northeast-1", "ap-southeast-1", "ap-southeast-2", "sa-east-1" + GatewayRegion *string `min:"1" type:"string" required:"true"` + + // One of the values that indicates the time zone you want to set for the gateway. + // The time zone is used, for example, for scheduling snapshots and your gateway's + // maintenance schedule. + GatewayTimezone *string `min:"3" type:"string" required:"true"` + + // One of the values that defines the type of gateway to activate. The type + // specified is critical to all later functions of the gateway and cannot be + // changed after activation. The default value is STORED. + GatewayType *string `min:"2" type:"string"` + + // The value that indicates the type of medium changer to use for gateway-VTL. + // This field is optional. + // + // Valid Values: "STK-L700", "AWS-Gateway-VTL" + MediumChangerType *string `min:"2" type:"string"` + + // The value that indicates the type of tape drive to use for gateway-VTL. This + // field is optional. + // + // Valid Values: "IBM-ULT3580-TD5" + TapeDriveType *string `min:"2" type:"string"` +} + +// String returns the string representation +func (s ActivateGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivateGatewayInput) GoString() string { + return s.String() +} + +// AWS Storage Gateway returns the Amazon Resource Name (ARN) of the activated +// gateway. It is a string made of information such as your account, gateway +// name, and region. This ARN is used to reference the gateway in other API +// operations as well as resource-based authorization. +type ActivateGatewayOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s ActivateGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivateGatewayOutput) GoString() string { + return s.String() +} + +type AddCacheInput struct { + _ struct{} `type:"structure"` + + DiskIds []*string `type:"list" required:"true"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddCacheInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddCacheInput) GoString() string { + return s.String() +} + +type AddCacheOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s AddCacheOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddCacheOutput) GoString() string { + return s.String() +} + +// AddTagsToResourceInput +type AddTagsToResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource you want to add tags to. + ResourceARN *string `min:"50" type:"string" required:"true"` + + // The key-value pair that represents the tag you want to add to the resource. + // The value can be an empty string. + // + // Valid characters for key and value are letters, spaces, and numbers representable + // in UTF-8 format, and the following special characters: + - = . _ : / @. + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsToResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToResourceInput) GoString() string { + return s.String() +} + +// AddTagsToResourceOutput +type AddTagsToResourceOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource you want to add tags to. + ResourceARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s AddTagsToResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToResourceOutput) GoString() string { + return s.String() +} + +type AddUploadBufferInput struct { + _ struct{} `type:"structure"` + + DiskIds []*string `type:"list" required:"true"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddUploadBufferInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddUploadBufferInput) GoString() string { + return s.String() +} + +type AddUploadBufferOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s AddUploadBufferOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddUploadBufferOutput) GoString() string { + return s.String() +} + +// A JSON object containing one or more of the following fields: +// +// AddWorkingStorageInput$DiskIds +type AddWorkingStorageInput struct { + _ struct{} `type:"structure"` + + // An array of strings that identify disks that are to be configured as working + // storage. Each string have a minimum length of 1 and maximum length of 300. + // You can get the disk IDs from the ListLocalDisks API. + DiskIds []*string `type:"list" required:"true"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddWorkingStorageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddWorkingStorageInput) GoString() string { + return s.String() +} + +// A JSON object containing the of the gateway for which working storage was +// configured. +type AddWorkingStorageOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s AddWorkingStorageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddWorkingStorageOutput) GoString() string { + return s.String() +} + +type CachediSCSIVolume struct { + _ struct{} `type:"structure"` + + SourceSnapshotId *string `type:"string"` + + VolumeARN *string `min:"50" type:"string"` + + VolumeId *string `min:"12" type:"string"` + + VolumeProgress *float64 `type:"double"` + + VolumeSizeInBytes *int64 `type:"long"` + + VolumeStatus *string `min:"3" type:"string"` + + VolumeType *string `min:"3" type:"string"` + + // Lists iSCSI information about a volume. + VolumeiSCSIAttributes *VolumeiSCSIAttributes `type:"structure"` +} + +// String returns the string representation +func (s CachediSCSIVolume) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CachediSCSIVolume) GoString() string { + return s.String() +} + +// CancelArchivalInput +type CancelArchivalInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the virtual tape you want to cancel archiving + // for. + TapeARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelArchivalInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelArchivalInput) GoString() string { + return s.String() +} + +// CancelArchivalOutput +type CancelArchivalOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the virtual tape for which archiving was + // canceled. + TapeARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s CancelArchivalOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelArchivalOutput) GoString() string { + return s.String() +} + +// CancelRetrievalInput +type CancelRetrievalInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the virtual tape you want to cancel retrieval + // for. + TapeARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelRetrievalInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelRetrievalInput) GoString() string { + return s.String() +} + +// CancelRetrievalOutput +type CancelRetrievalOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the virtual tape for which retrieval was + // canceled. + TapeARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s CancelRetrievalOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelRetrievalOutput) GoString() string { + return s.String() +} + +// Describes Challenge-Handshake Authentication Protocol (CHAP) information +// that supports authentication between your gateway and iSCSI initiators. +type ChapInfo struct { + _ struct{} `type:"structure"` + + // The iSCSI initiator that connects to the target. + InitiatorName *string `min:"1" type:"string"` + + // The secret key that the initiator (for example, the Windows client) must + // provide to participate in mutual CHAP with the target. + SecretToAuthenticateInitiator *string `min:"1" type:"string"` + + // The secret key that the target must provide to participate in mutual CHAP + // with the initiator (e.g. Windows client). + SecretToAuthenticateTarget *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the volume. + // + // Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens + // (-). + TargetARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s ChapInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChapInfo) GoString() string { + return s.String() +} + +type CreateCachediSCSIVolumeInput struct { + _ struct{} `type:"structure"` + + ClientToken *string `min:"5" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` + + NetworkInterfaceId *string `type:"string" required:"true"` + + SnapshotId *string `type:"string"` + + TargetName *string `min:"1" type:"string" required:"true"` + + VolumeSizeInBytes *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s CreateCachediSCSIVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCachediSCSIVolumeInput) GoString() string { + return s.String() +} + +type CreateCachediSCSIVolumeOutput struct { + _ struct{} `type:"structure"` + + TargetARN *string `min:"50" type:"string"` + + VolumeARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s CreateCachediSCSIVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCachediSCSIVolumeOutput) GoString() string { + return s.String() +} + +type CreateSnapshotFromVolumeRecoveryPointInput struct { + _ struct{} `type:"structure"` + + SnapshotDescription *string `min:"1" type:"string" required:"true"` + + VolumeARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateSnapshotFromVolumeRecoveryPointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotFromVolumeRecoveryPointInput) GoString() string { + return s.String() +} + +type CreateSnapshotFromVolumeRecoveryPointOutput struct { + _ struct{} `type:"structure"` + + SnapshotId *string `type:"string"` + + VolumeARN *string `min:"50" type:"string"` + + VolumeRecoveryPointTime *string `type:"string"` +} + +// String returns the string representation +func (s CreateSnapshotFromVolumeRecoveryPointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotFromVolumeRecoveryPointOutput) GoString() string { + return s.String() +} + +// A JSON object containing one or more of the following fields: +// +// CreateSnapshotInput$SnapshotDescription CreateSnapshotInput$VolumeARN +type CreateSnapshotInput struct { + _ struct{} `type:"structure"` + + // Textual description of the snapshot that appears in the Amazon EC2 console, + // Elastic Block Store snapshots panel in the Description field, and in the + // AWS Storage Gateway snapshot Details pane, Description field + SnapshotDescription *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation + // to return a list of gateway volumes. + VolumeARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotInput) GoString() string { + return s.String() +} + +// A JSON object containing the following fields: +type CreateSnapshotOutput struct { + _ struct{} `type:"structure"` + + // The snapshot ID that is used to refer to the snapshot in future operations + // such as describing snapshots (Amazon Elastic Compute Cloud API DescribeSnapshots) + // or creating a volume from a snapshot (CreateStorediSCSIVolume). + SnapshotId *string `type:"string"` + + // The Amazon Resource Name (ARN) of the volume of which the snapshot was taken. + VolumeARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s CreateSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotOutput) GoString() string { + return s.String() +} + +// A JSON object containing one or more of the following fields: +// +// CreateStorediSCSIVolumeInput$DiskId CreateStorediSCSIVolumeInput$NetworkInterfaceId +// CreateStorediSCSIVolumeInput$PreserveExistingData CreateStorediSCSIVolumeInput$SnapshotId +// CreateStorediSCSIVolumeInput$TargetName +type CreateStorediSCSIVolumeInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the gateway local disk that is configured as a + // stored volume. Use ListLocalDisks (http://docs.aws.amazon.com/storagegateway/latest/userguide/API_ListLocalDisks.html) + // to list disk IDs for a gateway. + DiskId *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` + + // The network interface of the gateway on which to expose the iSCSI target. + // Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a + // list of the network interfaces available on a gateway. + // + // Valid Values: A valid IP address. + NetworkInterfaceId *string `type:"string" required:"true"` + + // Specify this field as true if you want to preserve the data on the local + // disk. Otherwise, specifying this field as false creates an empty volume. + // + // Valid Values: true, false + PreserveExistingData *bool `type:"boolean" required:"true"` + + // The snapshot ID (e.g. "snap-1122aabb") of the snapshot to restore as the + // new stored volume. Specify this field if you want to create the iSCSI storage + // volume from a snapshot otherwise do not include this field. To list snapshots + // for your account use DescribeSnapshots (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeSnapshots.html) + // in the Amazon Elastic Compute Cloud API Reference. + SnapshotId *string `type:"string"` + + // The name of the iSCSI target used by initiators to connect to the target + // and as a suffix for the target ARN. For example, specifying TargetName as + // myvolume results in the target ARN of arn:aws:storagegateway:us-east-1:111122223333:gateway/mygateway/target/iqn.1997-05.com.amazon:myvolume. + // The target name must be unique across all volumes of a gateway. + TargetName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateStorediSCSIVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStorediSCSIVolumeInput) GoString() string { + return s.String() +} + +// A JSON object containing the following fields: +type CreateStorediSCSIVolumeOutput struct { + _ struct{} `type:"structure"` + + // he Amazon Resource Name (ARN) of the volume target that includes the iSCSI + // name that initiators can use to connect to the target. + TargetARN *string `min:"50" type:"string"` + + // The Amazon Resource Name (ARN) of the configured volume. + VolumeARN *string `min:"50" type:"string"` + + // The size of the volume in bytes. + VolumeSizeInBytes *int64 `type:"long"` +} + +// String returns the string representation +func (s CreateStorediSCSIVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStorediSCSIVolumeOutput) GoString() string { + return s.String() +} + +// CreateTapesInput +type CreateTapesInput struct { + _ struct{} `type:"structure"` + + // A unique identifier that you use to retry a request. If you retry a request, + // use the same ClientToken you specified in the initial request. + // + // Using the same ClientToken prevents creating the tape multiple times. + ClientToken *string `min:"5" type:"string" required:"true"` + + // The unique Amazon Resource Name(ARN) that represents the gateway to associate + // the virtual tapes with. Use the ListGateways operation to return a list of + // gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` + + // The number of virtual tapes you want to create. + NumTapesToCreate *int64 `min:"1" type:"integer" required:"true"` + + // A prefix you append to the barcode of the virtual tape you are creating. + // This makes a barcode unique. + // + // The prefix must be 1 to 4 characters in length and must be upper-case letters + // A-Z. + TapeBarcodePrefix *string `min:"1" type:"string" required:"true"` + + // The size, in bytes, of the virtual tapes you want to create. + // + // The size must be gigabyte (1024*1024*1024 byte) aligned. + TapeSizeInBytes *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s CreateTapesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTapesInput) GoString() string { + return s.String() +} + +// CreateTapeOutput +type CreateTapesOutput struct { + _ struct{} `type:"structure"` + + // A list of unique Amazon Resource Named (ARN) that represents the virtual + // tapes that were created. + TapeARNs []*string `type:"list"` +} + +// String returns the string representation +func (s CreateTapesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTapesOutput) GoString() string { + return s.String() +} + +type DeleteBandwidthRateLimitInput struct { + _ struct{} `type:"structure"` + + BandwidthType *string `min:"3" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBandwidthRateLimitInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBandwidthRateLimitInput) GoString() string { + return s.String() +} + +// A JSON object containing the of the gateway whose bandwidth rate information +// was deleted. +type DeleteBandwidthRateLimitOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s DeleteBandwidthRateLimitOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBandwidthRateLimitOutput) GoString() string { + return s.String() +} + +// A JSON object containing one or more of the following fields: +// +// DeleteChapCredentialsInput$InitiatorName DeleteChapCredentialsInput$TargetARN +type DeleteChapCredentialsInput struct { + _ struct{} `type:"structure"` + + // The iSCSI initiator that connects to the target. + InitiatorName *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes + // operation to return to retrieve the TargetARN for specified VolumeARN. + TargetARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteChapCredentialsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteChapCredentialsInput) GoString() string { + return s.String() +} + +// A JSON object containing the following fields: +type DeleteChapCredentialsOutput struct { + _ struct{} `type:"structure"` + + // The iSCSI initiator that connects to the target. + InitiatorName *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the target. + TargetARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s DeleteChapCredentialsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteChapCredentialsOutput) GoString() string { + return s.String() +} + +// A JSON object containing the id of the gateway to delete. +type DeleteGatewayInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGatewayInput) GoString() string { + return s.String() +} + +// A JSON object containing the id of the deleted gateway. +type DeleteGatewayOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s DeleteGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGatewayOutput) GoString() string { + return s.String() +} + +type DeleteSnapshotScheduleInput struct { + _ struct{} `type:"structure"` + + VolumeARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSnapshotScheduleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotScheduleInput) GoString() string { + return s.String() +} + +type DeleteSnapshotScheduleOutput struct { + _ struct{} `type:"structure"` + + VolumeARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s DeleteSnapshotScheduleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotScheduleOutput) GoString() string { + return s.String() +} + +// DeleteTapeArchiveInput +type DeleteTapeArchiveInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the virtual tape to delete from the virtual + // tape shelf (VTS). + TapeARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTapeArchiveInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTapeArchiveInput) GoString() string { + return s.String() +} + +// DeleteTapeArchiveOutput +type DeleteTapeArchiveOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the virtual tape that was deleted from + // the virtual tape shelf (VTS). + TapeARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s DeleteTapeArchiveOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTapeArchiveOutput) GoString() string { + return s.String() +} + +// DeleteTapeInput +type DeleteTapeInput struct { + _ struct{} `type:"structure"` + + // The unique Amazon Resource Name (ARN) of the gateway that the virtual tape + // to delete is associated with. Use the ListGateways operation to return a + // list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the virtual tape to delete. + TapeARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTapeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTapeInput) GoString() string { + return s.String() +} + +// DeleteTapeOutput +type DeleteTapeOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the deleted virtual tape. + TapeARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s DeleteTapeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTapeOutput) GoString() string { + return s.String() +} + +// A JSON object containing the DeleteVolumeInput$VolumeARN to delete. +type DeleteVolumeInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation + // to return a list of gateway volumes. + VolumeARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVolumeInput) GoString() string { + return s.String() +} + +// A JSON object containing the of the storage volume that was deleted +type DeleteVolumeOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the storage volume that was deleted. It + // is the same ARN you provided in the request. + VolumeARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s DeleteVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVolumeOutput) GoString() string { + return s.String() +} + +// A JSON object containing the of the gateway. +type DescribeBandwidthRateLimitInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeBandwidthRateLimitInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBandwidthRateLimitInput) GoString() string { + return s.String() +} + +// A JSON object containing the following fields: +type DescribeBandwidthRateLimitOutput struct { + _ struct{} `type:"structure"` + + // The average download bandwidth rate limit in bits per second. This field + // does not appear in the response if the download rate limit is not set. + AverageDownloadRateLimitInBitsPerSec *int64 `min:"102400" type:"long"` + + // The average upload bandwidth rate limit in bits per second. This field does + // not appear in the response if the upload rate limit is not set. + AverageUploadRateLimitInBitsPerSec *int64 `min:"51200" type:"long"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s DescribeBandwidthRateLimitOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBandwidthRateLimitOutput) GoString() string { + return s.String() +} + +type DescribeCacheInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeCacheInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheInput) GoString() string { + return s.String() +} + +type DescribeCacheOutput struct { + _ struct{} `type:"structure"` + + CacheAllocatedInBytes *int64 `type:"long"` + + CacheDirtyPercentage *float64 `type:"double"` + + CacheHitPercentage *float64 `type:"double"` + + CacheMissPercentage *float64 `type:"double"` + + CacheUsedPercentage *float64 `type:"double"` + + DiskIds []*string `type:"list"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s DescribeCacheOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCacheOutput) GoString() string { + return s.String() +} + +type DescribeCachediSCSIVolumesInput struct { + _ struct{} `type:"structure"` + + VolumeARNs []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeCachediSCSIVolumesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCachediSCSIVolumesInput) GoString() string { + return s.String() +} + +// A JSON object containing the following fields: +type DescribeCachediSCSIVolumesOutput struct { + _ struct{} `type:"structure"` + + // An array of objects where each object contains metadata about one cached + // volume. + CachediSCSIVolumes []*CachediSCSIVolume `type:"list"` +} + +// String returns the string representation +func (s DescribeCachediSCSIVolumesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCachediSCSIVolumesOutput) GoString() string { + return s.String() +} + +// A JSON object containing the Amazon Resource Name (ARN) of the iSCSI volume +// target. +type DescribeChapCredentialsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes + // operation to return to retrieve the TargetARN for specified VolumeARN. + TargetARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeChapCredentialsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeChapCredentialsInput) GoString() string { + return s.String() +} + +// A JSON object containing a . +type DescribeChapCredentialsOutput struct { + _ struct{} `type:"structure"` + + // An array of ChapInfo objects that represent CHAP credentials. Each object + // in the array contains CHAP credential information for one target-initiator + // pair. If no CHAP credentials are set, an empty array is returned. CHAP credential + // information is provided in a JSON object with the following fields: + // + // InitiatorName: The iSCSI initiator that connects to the target. + // + // SecretToAuthenticateInitiator: The secret key that the initiator (for + // example, the Windows client) must provide to participate in mutual CHAP with + // the target. + // + // SecretToAuthenticateTarget: The secret key that the target must provide + // to participate in mutual CHAP with the initiator (e.g. Windows client). + // + // TargetARN: The Amazon Resource Name (ARN) of the storage volume. + ChapCredentials []*ChapInfo `type:"list"` +} + +// String returns the string representation +func (s DescribeChapCredentialsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeChapCredentialsOutput) GoString() string { + return s.String() +} + +// A JSON object containing the id of the gateway. +type DescribeGatewayInformationInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeGatewayInformationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeGatewayInformationInput) GoString() string { + return s.String() +} + +// A JSON object containing the following fields: +type DescribeGatewayInformationOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` + + // The gateway ID. + GatewayId *string `min:"12" type:"string"` + + // The gateway name. + GatewayName *string `type:"string"` + + // A NetworkInterface array that contains descriptions of the gateway network + // interfaces. + GatewayNetworkInterfaces []*NetworkInterface `type:"list"` + + // One of the values that indicates the operating state of the gateway. + GatewayState *string `min:"2" type:"string"` + + // One of the values that indicates the time zone configured for the gateway. + GatewayTimezone *string `min:"3" type:"string"` + + // The type of the gateway. + GatewayType *string `min:"2" type:"string"` + + // The date on which the last software update was applied to the gateway. If + // the gateway has never been updated, this field does not return a value in + // the response. + LastSoftwareUpdate *string `min:"1" type:"string"` + + // The date on which an update to the gateway is available. This date is in + // the time zone of the gateway. If the gateway is not available for an update + // this field is not returned in the response. + NextUpdateAvailabilityDate *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeGatewayInformationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeGatewayInformationOutput) GoString() string { + return s.String() +} + +// A JSON object containing the of the gateway. +type DescribeMaintenanceStartTimeInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeMaintenanceStartTimeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMaintenanceStartTimeInput) GoString() string { + return s.String() +} + +type DescribeMaintenanceStartTimeOutput struct { + _ struct{} `type:"structure"` + + DayOfWeek *int64 `type:"integer"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` + + HourOfDay *int64 `type:"integer"` + + MinuteOfHour *int64 `type:"integer"` + + Timezone *string `min:"3" type:"string"` +} + +// String returns the string representation +func (s DescribeMaintenanceStartTimeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMaintenanceStartTimeOutput) GoString() string { + return s.String() +} + +// A JSON object containing the DescribeSnapshotScheduleInput$VolumeARN of the +// volume. +type DescribeSnapshotScheduleInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation + // to return a list of gateway volumes. + VolumeARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeSnapshotScheduleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotScheduleInput) GoString() string { + return s.String() +} + +type DescribeSnapshotScheduleOutput struct { + _ struct{} `type:"structure"` + + Description *string `min:"1" type:"string"` + + RecurrenceInHours *int64 `min:"1" type:"integer"` + + StartAt *int64 `type:"integer"` + + Timezone *string `min:"3" type:"string"` + + VolumeARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s DescribeSnapshotScheduleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotScheduleOutput) GoString() string { + return s.String() +} + +// A JSON Object containing a list of DescribeStorediSCSIVolumesInput$VolumeARNs. +type DescribeStorediSCSIVolumesInput struct { + _ struct{} `type:"structure"` + + // An array of strings where each string represents the Amazon Resource Name + // (ARN) of a stored volume. All of the specified stored volumes must from the + // same gateway. Use ListVolumes to get volume ARNs for a gateway. + VolumeARNs []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeStorediSCSIVolumesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStorediSCSIVolumesInput) GoString() string { + return s.String() +} + +type DescribeStorediSCSIVolumesOutput struct { + _ struct{} `type:"structure"` + + StorediSCSIVolumes []*StorediSCSIVolume `type:"list"` +} + +// String returns the string representation +func (s DescribeStorediSCSIVolumesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStorediSCSIVolumesOutput) GoString() string { + return s.String() +} + +// DescribeTapeArchivesInput +type DescribeTapeArchivesInput struct { + _ struct{} `type:"structure"` + + // Specifies that the number of virtual tapes descried be limited to the specified + // number. + Limit *int64 `min:"1" type:"integer"` + + // An opaque string that indicates the position at which to begin describing + // virtual tapes. + Marker *string `min:"1" type:"string"` + + // Specifies one or more unique Amazon Resource Names (ARNs) that represent + // the virtual tapes you want to describe. + TapeARNs []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeTapeArchivesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTapeArchivesInput) GoString() string { + return s.String() +} + +// DescribeTapeArchivesOutput +type DescribeTapeArchivesOutput struct { + _ struct{} `type:"structure"` + + // An opaque string that indicates the position at which the virtual tapes that + // were fetched for description ended. Use this marker in your next request + // to fetch the next set of virtual tapes in the virtual tape shelf (VTS). If + // there are no more virtual tapes to describe, this field does not appear in + // the response. + Marker *string `min:"1" type:"string"` + + // An array of virtual tape objects in the virtual tape shelf (VTS). The description + // includes of the Amazon Resource Name(ARN) of the virtual tapes. The information + // returned includes the Amazon Resource Names (ARNs) of the tapes, size of + // the tapes, status of the tapes, progress of the description and tape barcode. + TapeArchives []*TapeArchive `type:"list"` +} + +// String returns the string representation +func (s DescribeTapeArchivesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTapeArchivesOutput) GoString() string { + return s.String() +} + +// DescribeTapeRecoveryPointsInput +type DescribeTapeRecoveryPointsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` + + // Specifies that the number of virtual tape recovery points that are described + // be limited to the specified number. + Limit *int64 `min:"1" type:"integer"` + + // An opaque string that indicates the position at which to begin describing + // the virtual tape recovery points. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeTapeRecoveryPointsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTapeRecoveryPointsInput) GoString() string { + return s.String() +} + +// DescribeTapeRecoveryPointsOutput +type DescribeTapeRecoveryPointsOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` + + // An opaque string that indicates the position at which the virtual tape recovery + // points that were listed for description ended. + // + // Use this marker in your next request to list the next set of virtual tape + // recovery points in the list. If there are no more recovery points to describe, + // this field does not appear in the response. + Marker *string `min:"1" type:"string"` + + // An array of TapeRecoveryPointInfos that are available for the specified gateway. + TapeRecoveryPointInfos []*TapeRecoveryPointInfo `type:"list"` +} + +// String returns the string representation +func (s DescribeTapeRecoveryPointsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTapeRecoveryPointsOutput) GoString() string { + return s.String() +} + +// DescribeTapesInput +type DescribeTapesInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` + + // Specifies that the number of virtual tapes described be limited to the specified + // number. + // + // Amazon Web Services may impose its own limit, if this field is not set. + Limit *int64 `min:"1" type:"integer"` + + // A marker value, obtained in a previous call to DescribeTapes. This marker + // indicates which page of results to retrieve. + // + // If not specified, the first page of results is retrieved. + Marker *string `min:"1" type:"string"` + + // Specifies one or more unique Amazon Resource Names (ARNs) that represent + // the virtual tapes you want to describe. If this parameter is not specified, + // AWS Storage Gateway returns a description of all virtual tapes associated + // with the specified gateway. + TapeARNs []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeTapesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTapesInput) GoString() string { + return s.String() +} + +// DescribeTapesOutput +type DescribeTapesOutput struct { + _ struct{} `type:"structure"` + + // An opaque string which can be used as part of a subsequent DescribeTapes + // call to retrieve the next page of results. + // + // If a response does not contain a marker, then there are no more results + // to be retrieved. + Marker *string `min:"1" type:"string"` + + // An array of virtual tape descriptions. + Tapes []*Tape `type:"list"` +} + +// String returns the string representation +func (s DescribeTapesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTapesOutput) GoString() string { + return s.String() +} + +type DescribeUploadBufferInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeUploadBufferInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeUploadBufferInput) GoString() string { + return s.String() +} + +type DescribeUploadBufferOutput struct { + _ struct{} `type:"structure"` + + DiskIds []*string `type:"list"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` + + UploadBufferAllocatedInBytes *int64 `type:"long"` + + UploadBufferUsedInBytes *int64 `type:"long"` +} + +// String returns the string representation +func (s DescribeUploadBufferOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeUploadBufferOutput) GoString() string { + return s.String() +} + +// DescribeVTLDevicesInput +type DescribeVTLDevicesInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` + + // Specifies that the number of VTL devices described be limited to the specified + // number. + Limit *int64 `min:"1" type:"integer"` + + // An opaque string that indicates the position at which to begin describing + // the VTL devices. + Marker *string `min:"1" type:"string"` + + // An array of strings, where each string represents the Amazon Resource Name + // (ARN) of a VTL device. + // + // All of the specified VTL devices must be from the same gateway. If no VTL + // devices are specified, the result will contain all devices on the specified + // gateway. + VTLDeviceARNs []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeVTLDevicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVTLDevicesInput) GoString() string { + return s.String() +} + +// DescribeVTLDevicesOutput +type DescribeVTLDevicesOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` + + // An opaque string that indicates the position at which the VTL devices that + // were fetched for description ended. Use the marker in your next request to + // fetch the next set of VTL devices in the list. If there are no more VTL devices + // to describe, this field does not appear in the response. + Marker *string `min:"1" type:"string"` + + // An array of VTL device objects composed of the Amazon Resource Name(ARN) + // of the VTL devices. + VTLDevices []*VTLDevice `type:"list"` +} + +// String returns the string representation +func (s DescribeVTLDevicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVTLDevicesOutput) GoString() string { + return s.String() +} + +// A JSON object containing the of the gateway. +type DescribeWorkingStorageInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeWorkingStorageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkingStorageInput) GoString() string { + return s.String() +} + +// A JSON object containing the following fields: +type DescribeWorkingStorageOutput struct { + _ struct{} `type:"structure"` + + // An array of the gateway's local disk IDs that are configured as working storage. + // Each local disk ID is specified as a string (minimum length of 1 and maximum + // length of 300). If no local disks are configured as working storage, then + // the DiskIds array is empty. + DiskIds []*string `type:"list"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` + + // The total working storage in bytes allocated for the gateway. If no working + // storage is configured for the gateway, this field returns 0. + WorkingStorageAllocatedInBytes *int64 `type:"long"` + + // The total working storage in bytes in use by the gateway. If no working storage + // is configured for the gateway, this field returns 0. + WorkingStorageUsedInBytes *int64 `type:"long"` +} + +// String returns the string representation +func (s DescribeWorkingStorageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkingStorageOutput) GoString() string { + return s.String() +} + +// Lists iSCSI information about a VTL device. +type DeviceiSCSIAttributes struct { + _ struct{} `type:"structure"` + + // Indicates whether mutual CHAP is enabled for the iSCSI target. + ChapEnabled *bool `type:"boolean"` + + // The network interface identifier of the VTL device. + NetworkInterfaceId *string `type:"string"` + + // The port used to communicate with iSCSI VTL device targets. + NetworkInterfacePort *int64 `type:"integer"` + + // Specifies the unique Amazon Resource Name(ARN) that encodes the iSCSI qualified + // name(iqn) of a tape drive or media changer target. + TargetARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s DeviceiSCSIAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeviceiSCSIAttributes) GoString() string { + return s.String() +} + +// DisableGatewayInput +type DisableGatewayInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableGatewayInput) GoString() string { + return s.String() +} + +// DisableGatewayOutput +type DisableGatewayOutput struct { + _ struct{} `type:"structure"` + + // The unique Amazon Resource Name of the disabled gateway. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s DisableGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableGatewayOutput) GoString() string { + return s.String() +} + +type Disk struct { + _ struct{} `type:"structure"` + + DiskAllocationResource *string `type:"string"` + + DiskAllocationType *string `min:"3" type:"string"` + + DiskId *string `min:"1" type:"string"` + + DiskNode *string `type:"string"` + + DiskPath *string `type:"string"` + + DiskSizeInBytes *int64 `type:"long"` + + DiskStatus *string `type:"string"` +} + +// String returns the string representation +func (s Disk) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Disk) GoString() string { + return s.String() +} + +// Provides additional information about an error that was returned by the service +// as an or. See the errorCode and errorDetails members for more information +// about the error. +type Error struct { + _ struct{} `type:"structure"` + + // Additional information about the error. + ErrorCode *string `locationName:"errorCode" type:"string" enum:"ErrorCode"` + + // Human-readable text that provides detail about the error that occurred. + ErrorDetails map[string]*string `locationName:"errorDetails" type:"map"` +} + +// String returns the string representation +func (s Error) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Error) GoString() string { + return s.String() +} + +type GatewayInfo struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` + + GatewayName *string `type:"string"` + + GatewayOperationalState *string `min:"2" type:"string"` + + GatewayType *string `min:"2" type:"string"` +} + +// String returns the string representation +func (s GatewayInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GatewayInfo) GoString() string { + return s.String() +} + +// A JSON object containing zero or more of the following fields: +// +// ListGatewaysInput$Limit ListGatewaysInput$Marker +type ListGatewaysInput struct { + _ struct{} `type:"structure"` + + // Specifies that the list of gateways returned be limited to the specified + // number of items. + Limit *int64 `min:"1" type:"integer"` + + // An opaque string that indicates the position at which to begin the returned + // list of gateways. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGatewaysInput) GoString() string { + return s.String() +} + +type ListGatewaysOutput struct { + _ struct{} `type:"structure"` + + Gateways []*GatewayInfo `type:"list"` + + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGatewaysOutput) GoString() string { + return s.String() +} + +// A JSON object containing the of the gateway. +type ListLocalDisksInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListLocalDisksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListLocalDisksInput) GoString() string { + return s.String() +} + +type ListLocalDisksOutput struct { + _ struct{} `type:"structure"` + + Disks []*Disk `type:"list"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s ListLocalDisksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListLocalDisksOutput) GoString() string { + return s.String() +} + +// ListTagsForResourceInput +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // Specifies that the list of tags returned be limited to the specified number + // of items. + Limit *int64 `min:"1" type:"integer"` + + // An opaque string that indicates the position at which to begin returning + // the list of tags. + Marker *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the resource for which you want to list + // tags. + ResourceARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// ListTagsForResourceOutput +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // An opaque string that indicates the position at which to stop returning the + // list of tags. + Marker *string `min:"1" type:"string"` + + // he Amazon Resource Name (ARN) of the resource for which you want to list + // tags. + ResourceARN *string `min:"50" type:"string"` + + // An array that contains the tags for the specified resource. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// ListVolumeInitiatorsInput +type ListVolumeInitiatorsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation + // to return a list of gateway volumes for the gateway. + VolumeARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListVolumeInitiatorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVolumeInitiatorsInput) GoString() string { + return s.String() +} + +// ListVolumeInitiatorsOutput +type ListVolumeInitiatorsOutput struct { + _ struct{} `type:"structure"` + + // The host names and port numbers of all iSCSI initiators that are connected + // to the gateway. + Initiators []*string `type:"list"` +} + +// String returns the string representation +func (s ListVolumeInitiatorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVolumeInitiatorsOutput) GoString() string { + return s.String() +} + +type ListVolumeRecoveryPointsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListVolumeRecoveryPointsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVolumeRecoveryPointsInput) GoString() string { + return s.String() +} + +type ListVolumeRecoveryPointsOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` + + VolumeRecoveryPointInfos []*VolumeRecoveryPointInfo `type:"list"` +} + +// String returns the string representation +func (s ListVolumeRecoveryPointsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVolumeRecoveryPointsOutput) GoString() string { + return s.String() +} + +// A JSON object that contains one or more of the following fields: +// +// ListVolumesInput$Limit ListVolumesInput$Marker +type ListVolumesInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` + + // Specifies that the list of volumes returned be limited to the specified number + // of items. + Limit *int64 `min:"1" type:"integer"` + + // A string that indicates the position at which to begin the returned list + // of volumes. Obtain the marker from the response of a previous List iSCSI + // Volumes request. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListVolumesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVolumesInput) GoString() string { + return s.String() +} + +type ListVolumesOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` + + Marker *string `min:"1" type:"string"` + + VolumeInfos []*VolumeInfo `type:"list"` +} + +// String returns the string representation +func (s ListVolumesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVolumesOutput) GoString() string { + return s.String() +} + +// Describes a gateway's network interface. +type NetworkInterface struct { + _ struct{} `type:"structure"` + + // The Internet Protocol version 4 (IPv4) address of the interface. + Ipv4Address *string `type:"string"` + + // The Internet Protocol version 6 (IPv6) address of the interface. Currently + // not supported. + Ipv6Address *string `type:"string"` + + // The Media Access Control (MAC) address of the interface. + // + // This is currently unsupported and will not be returned in output. + MacAddress *string `type:"string"` +} + +// String returns the string representation +func (s NetworkInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterface) GoString() string { + return s.String() +} + +// RemoveTagsFromResourceInput +type RemoveTagsFromResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource you want to remove the tags + // from. + ResourceARN *string `min:"50" type:"string"` + + // The keys of the tags you want to remove from the specified resource. A tag + // is composed of a key/value pair. + TagKeys []*string `type:"list"` +} + +// String returns the string representation +func (s RemoveTagsFromResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromResourceInput) GoString() string { + return s.String() +} + +// RemoveTagsFromResourceOutput +type RemoveTagsFromResourceOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource that the tags were removed + // from. + ResourceARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s RemoveTagsFromResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromResourceOutput) GoString() string { + return s.String() +} + +type ResetCacheInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s ResetCacheInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetCacheInput) GoString() string { + return s.String() +} + +type ResetCacheOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s ResetCacheOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetCacheOutput) GoString() string { + return s.String() +} + +// RetrieveTapeArchiveInput +type RetrieveTapeArchiveInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway you want to retrieve the virtual + // tape to. Use the ListGateways operation to return a list of gateways for + // your account and region. + // + // You retrieve archived virtual tapes to only one gateway and the gateway + // must be a gateway-VTL. + GatewayARN *string `min:"50" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the virtual tape you want to retrieve from + // the virtual tape shelf (VTS). + TapeARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s RetrieveTapeArchiveInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RetrieveTapeArchiveInput) GoString() string { + return s.String() +} + +// RetrieveTapeArchiveOutput +type RetrieveTapeArchiveOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the retrieved virtual tape. + TapeARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s RetrieveTapeArchiveOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RetrieveTapeArchiveOutput) GoString() string { + return s.String() +} + +// RetrieveTapeRecoveryPointInput +type RetrieveTapeRecoveryPointInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the virtual tape for which you want to + // retrieve the recovery point. + TapeARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s RetrieveTapeRecoveryPointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RetrieveTapeRecoveryPointInput) GoString() string { + return s.String() +} + +// RetrieveTapeRecoveryPointOutput +type RetrieveTapeRecoveryPointOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the virtual tape for which the recovery + // point was retrieved. + TapeARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s RetrieveTapeRecoveryPointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RetrieveTapeRecoveryPointOutput) GoString() string { + return s.String() +} + +// A JSON object containing the of the gateway to shut down. +type ShutdownGatewayInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s ShutdownGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ShutdownGatewayInput) GoString() string { + return s.String() +} + +// A JSON object containing the of the gateway that was shut down. +type ShutdownGatewayOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s ShutdownGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ShutdownGatewayOutput) GoString() string { + return s.String() +} + +// A JSON object containing the of the gateway to start. +type StartGatewayInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartGatewayInput) GoString() string { + return s.String() +} + +// A JSON object containing the of the gateway that was restarted. +type StartGatewayOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s StartGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartGatewayOutput) GoString() string { + return s.String() +} + +type StorediSCSIVolume struct { + _ struct{} `type:"structure"` + + PreservedExistingData *bool `type:"boolean"` + + SourceSnapshotId *string `type:"string"` + + VolumeARN *string `min:"50" type:"string"` + + VolumeDiskId *string `min:"1" type:"string"` + + VolumeId *string `min:"12" type:"string"` + + VolumeProgress *float64 `type:"double"` + + VolumeSizeInBytes *int64 `type:"long"` + + VolumeStatus *string `min:"3" type:"string"` + + VolumeType *string `min:"3" type:"string"` + + // Lists iSCSI information about a volume. + VolumeiSCSIAttributes *VolumeiSCSIAttributes `type:"structure"` +} + +// String returns the string representation +func (s StorediSCSIVolume) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StorediSCSIVolume) GoString() string { + return s.String() +} + +type Tag struct { + _ struct{} `type:"structure"` + + Key *string `min:"1" type:"string" required:"true"` + + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Describes a virtual tape object. +type Tape struct { + _ struct{} `type:"structure"` + + // For archiving virtual tapes, indicates how much data remains to be uploaded + // before archiving is complete. + // + // Range: 0 (not started) to 100 (complete). + Progress *float64 `type:"double"` + + // The Amazon Resource Name (ARN) of the virtual tape. + TapeARN *string `min:"50" type:"string"` + + // The barcode that identifies a specific virtual tape. + TapeBarcode *string `min:"7" type:"string"` + + // The size, in bytes, of the virtual tape. + TapeSizeInBytes *int64 `type:"long"` + + // The current state of the virtual tape. + TapeStatus *string `type:"string"` + + // The virtual tape library (VTL) device that the virtual tape is associated + // with. + VTLDevice *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s Tape) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tape) GoString() string { + return s.String() +} + +// Represents a virtual tape that is archived in the virtual tape shelf (VTS). +type TapeArchive struct { + _ struct{} `type:"structure"` + + // The time that the archiving of the virtual tape was completed. + // + // The string format of the completion time is in the ISO8601 extended YYYY-MM-DD'T'HH:MM:SS'Z' + // format. + CompletionTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The Amazon Resource Name (ARN) of the gateway-VTL that the virtual tape is + // being retrieved to. + // + // The virtual tape is retrieved from the virtual tape shelf (VTS). + RetrievedTo *string `min:"50" type:"string"` + + // The Amazon Resource Name (ARN) of an archived virtual tape. + TapeARN *string `min:"50" type:"string"` + + // The barcode that identifies the archived virtual tape. + TapeBarcode *string `min:"7" type:"string"` + + // The size, in bytes, of the archived virtual tape. + TapeSizeInBytes *int64 `type:"long"` + + // The current state of the archived virtual tape. + TapeStatus *string `type:"string"` +} + +// String returns the string representation +func (s TapeArchive) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TapeArchive) GoString() string { + return s.String() +} + +// Describes a recovery point. +type TapeRecoveryPointInfo struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the virtual tape. + TapeARN *string `min:"50" type:"string"` + + // The time when the point-in-time view of the virtual tape was replicated for + // later recovery. + // + // The string format of the tape recovery point time is in the ISO8601 extended + // YYYY-MM-DD'T'HH:MM:SS'Z' format. + TapeRecoveryPointTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The size, in bytes, of the virtual tapes to recover. + TapeSizeInBytes *int64 `type:"long"` + + TapeStatus *string `type:"string"` +} + +// String returns the string representation +func (s TapeRecoveryPointInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TapeRecoveryPointInfo) GoString() string { + return s.String() +} + +// A JSON object containing one or more of the following fields: +// +// UpdateBandwidthRateLimitInput$AverageDownloadRateLimitInBitsPerSec UpdateBandwidthRateLimitInput$AverageUploadRateLimitInBitsPerSec +type UpdateBandwidthRateLimitInput struct { + _ struct{} `type:"structure"` + + // The average download bandwidth rate limit in bits per second. + AverageDownloadRateLimitInBitsPerSec *int64 `min:"102400" type:"long"` + + // The average upload bandwidth rate limit in bits per second. + AverageUploadRateLimitInBitsPerSec *int64 `min:"51200" type:"long"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateBandwidthRateLimitInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBandwidthRateLimitInput) GoString() string { + return s.String() +} + +// A JSON object containing the of the gateway whose throttle information was +// updated. +type UpdateBandwidthRateLimitOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s UpdateBandwidthRateLimitOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBandwidthRateLimitOutput) GoString() string { + return s.String() +} + +// A JSON object containing one or more of the following fields: +// +// UpdateChapCredentialsInput$InitiatorName UpdateChapCredentialsInput$SecretToAuthenticateInitiator +// UpdateChapCredentialsInput$SecretToAuthenticateTarget UpdateChapCredentialsInput$TargetARN +type UpdateChapCredentialsInput struct { + _ struct{} `type:"structure"` + + // The iSCSI initiator that connects to the target. + InitiatorName *string `min:"1" type:"string" required:"true"` + + // The secret key that the initiator (for example, the Windows client) must + // provide to participate in mutual CHAP with the target. + // + // The secret key must be between 12 and 16 bytes when encoded in UTF-8. + SecretToAuthenticateInitiator *string `min:"1" type:"string" required:"true"` + + // The secret key that the target must provide to participate in mutual CHAP + // with the initiator (e.g. Windows client). + // + // Byte constraints: Minimum bytes of 12. Maximum bytes of 16. + // + // The secret key must be between 12 and 16 bytes when encoded in UTF-8. + SecretToAuthenticateTarget *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes + // operation to return the TargetARN for specified VolumeARN. + TargetARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateChapCredentialsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateChapCredentialsInput) GoString() string { + return s.String() +} + +// A JSON object containing the following fields: +type UpdateChapCredentialsOutput struct { + _ struct{} `type:"structure"` + + // The iSCSI initiator that connects to the target. This is the same initiator + // name specified in the request. + InitiatorName *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the target. This is the same target specified + // in the request. + TargetARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s UpdateChapCredentialsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateChapCredentialsOutput) GoString() string { + return s.String() +} + +type UpdateGatewayInformationInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` + + // A unique identifier for your gateway. This name becomes part of the gateway + // Amazon Resources Name (ARN) which is what you use as an input to other operations. + GatewayName *string `min:"2" type:"string"` + + GatewayTimezone *string `min:"3" type:"string"` +} + +// String returns the string representation +func (s UpdateGatewayInformationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGatewayInformationInput) GoString() string { + return s.String() +} + +// A JSON object containing the of the gateway that was updated. +type UpdateGatewayInformationOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` + + GatewayName *string `type:"string"` +} + +// String returns the string representation +func (s UpdateGatewayInformationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGatewayInformationOutput) GoString() string { + return s.String() +} + +// A JSON object containing the of the gateway to update. +type UpdateGatewaySoftwareNowInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateGatewaySoftwareNowInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGatewaySoftwareNowInput) GoString() string { + return s.String() +} + +// A JSON object containing the of the gateway that was updated. +type UpdateGatewaySoftwareNowOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s UpdateGatewaySoftwareNowOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGatewaySoftwareNowOutput) GoString() string { + return s.String() +} + +// A JSON object containing the following fields: +// +// UpdateMaintenanceStartTimeInput$DayOfWeek UpdateMaintenanceStartTimeInput$HourOfDay +// UpdateMaintenanceStartTimeInput$MinuteOfHour +type UpdateMaintenanceStartTimeInput struct { + _ struct{} `type:"structure"` + + // The maintenance start time day of the week. + DayOfWeek *int64 `type:"integer" required:"true"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string" required:"true"` + + // The hour component of the maintenance start time represented as hh, where + // hh is the hour (00 to 23). The hour of the day is in the time zone of the + // gateway. + HourOfDay *int64 `type:"integer" required:"true"` + + // The minute component of the maintenance start time represented as mm, where + // mm is the minute (00 to 59). The minute of the hour is in the time zone of + // the gateway. + MinuteOfHour *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s UpdateMaintenanceStartTimeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMaintenanceStartTimeInput) GoString() string { + return s.String() +} + +// A JSON object containing the of the gateway whose maintenance start time +// is updated. +type UpdateMaintenanceStartTimeOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s UpdateMaintenanceStartTimeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMaintenanceStartTimeOutput) GoString() string { + return s.String() +} + +// A JSON object containing one or more of the following fields: +// +// UpdateSnapshotScheduleInput$Description UpdateSnapshotScheduleInput$RecurrenceInHours +// UpdateSnapshotScheduleInput$StartAt UpdateSnapshotScheduleInput$VolumeARN +type UpdateSnapshotScheduleInput struct { + _ struct{} `type:"structure"` + + // Optional description of the snapshot that overwrites the existing description. + Description *string `min:"1" type:"string"` + + // Frequency of snapshots. Specify the number of hours between snapshots. + RecurrenceInHours *int64 `min:"1" type:"integer" required:"true"` + + // The hour of the day at which the snapshot schedule begins represented as + // hh, where hh is the hour (0 to 23). The hour of the day is in the time zone + // of the gateway. + StartAt *int64 `type:"integer" required:"true"` + + // The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation + // to return a list of gateway volumes. + VolumeARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateSnapshotScheduleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSnapshotScheduleInput) GoString() string { + return s.String() +} + +// A JSON object containing the of the updated storage volume. +type UpdateSnapshotScheduleOutput struct { + _ struct{} `type:"structure"` + + VolumeARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s UpdateSnapshotScheduleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSnapshotScheduleOutput) GoString() string { + return s.String() +} + +// UpdateVTLDeviceTypeInput +type UpdateVTLDeviceTypeInput struct { + _ struct{} `type:"structure"` + + // The type of medium changer you want to select. + // + // Valid Values: "STK-L700", "AWS-Gateway-VTL" + DeviceType *string `min:"2" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the medium changer you want to select. + VTLDeviceARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateVTLDeviceTypeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateVTLDeviceTypeInput) GoString() string { + return s.String() +} + +// UpdateVTLDeviceTypeOutput +type UpdateVTLDeviceTypeOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the medium changer you have selected. + VTLDeviceARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s UpdateVTLDeviceTypeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateVTLDeviceTypeOutput) GoString() string { + return s.String() +} + +// Represents a device object associated with a gateway-VTL. +type VTLDevice struct { + _ struct{} `type:"structure"` + + // A list of iSCSI information about a VTL device. + DeviceiSCSIAttributes *DeviceiSCSIAttributes `type:"structure"` + + // Specifies the unique Amazon Resource Name (ARN) of the device (tape drive + // or media changer). + VTLDeviceARN *string `min:"50" type:"string"` + + VTLDeviceProductIdentifier *string `type:"string"` + + VTLDeviceType *string `type:"string"` + + VTLDeviceVendor *string `type:"string"` +} + +// String returns the string representation +func (s VTLDevice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VTLDevice) GoString() string { + return s.String() +} + +type VolumeInfo struct { + _ struct{} `type:"structure"` + + VolumeARN *string `min:"50" type:"string"` + + VolumeType *string `min:"3" type:"string"` +} + +// String returns the string representation +func (s VolumeInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeInfo) GoString() string { + return s.String() +} + +type VolumeRecoveryPointInfo struct { + _ struct{} `type:"structure"` + + VolumeARN *string `min:"50" type:"string"` + + VolumeRecoveryPointTime *string `type:"string"` + + VolumeSizeInBytes *int64 `type:"long"` + + VolumeUsageInBytes *int64 `type:"long"` +} + +// String returns the string representation +func (s VolumeRecoveryPointInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeRecoveryPointInfo) GoString() string { + return s.String() +} + +// Lists iSCSI information about a volume. +type VolumeiSCSIAttributes struct { + _ struct{} `type:"structure"` + + // Indicates whether mutual CHAP is enabled for the iSCSI target. + ChapEnabled *bool `type:"boolean"` + + // The logical disk number. + LunNumber *int64 `min:"1" type:"integer"` + + // The network interface identifier. + NetworkInterfaceId *string `type:"string"` + + // The port used to communicate with iSCSI targets. + NetworkInterfacePort *int64 `type:"integer"` + + // The Amazon Resource Name (ARN) of the volume target. + TargetARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s VolumeiSCSIAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeiSCSIAttributes) GoString() string { + return s.String() +} + +const ( + // @enum ErrorCode + ErrorCodeActivationKeyExpired = "ActivationKeyExpired" + // @enum ErrorCode + ErrorCodeActivationKeyInvalid = "ActivationKeyInvalid" + // @enum ErrorCode + ErrorCodeActivationKeyNotFound = "ActivationKeyNotFound" + // @enum ErrorCode + ErrorCodeGatewayInternalError = "GatewayInternalError" + // @enum ErrorCode + ErrorCodeGatewayNotConnected = "GatewayNotConnected" + // @enum ErrorCode + ErrorCodeGatewayNotFound = "GatewayNotFound" + // @enum ErrorCode + ErrorCodeGatewayProxyNetworkConnectionBusy = "GatewayProxyNetworkConnectionBusy" + // @enum ErrorCode + ErrorCodeAuthenticationFailure = "AuthenticationFailure" + // @enum ErrorCode + ErrorCodeBandwidthThrottleScheduleNotFound = "BandwidthThrottleScheduleNotFound" + // @enum ErrorCode + ErrorCodeBlocked = "Blocked" + // @enum ErrorCode + ErrorCodeCannotExportSnapshot = "CannotExportSnapshot" + // @enum ErrorCode + ErrorCodeChapCredentialNotFound = "ChapCredentialNotFound" + // @enum ErrorCode + ErrorCodeDiskAlreadyAllocated = "DiskAlreadyAllocated" + // @enum ErrorCode + ErrorCodeDiskDoesNotExist = "DiskDoesNotExist" + // @enum ErrorCode + ErrorCodeDiskSizeGreaterThanVolumeMaxSize = "DiskSizeGreaterThanVolumeMaxSize" + // @enum ErrorCode + ErrorCodeDiskSizeLessThanVolumeSize = "DiskSizeLessThanVolumeSize" + // @enum ErrorCode + ErrorCodeDiskSizeNotGigAligned = "DiskSizeNotGigAligned" + // @enum ErrorCode + ErrorCodeDuplicateCertificateInfo = "DuplicateCertificateInfo" + // @enum ErrorCode + ErrorCodeDuplicateSchedule = "DuplicateSchedule" + // @enum ErrorCode + ErrorCodeEndpointNotFound = "EndpointNotFound" + // @enum ErrorCode + ErrorCodeIamnotSupported = "IAMNotSupported" + // @enum ErrorCode + ErrorCodeInitiatorInvalid = "InitiatorInvalid" + // @enum ErrorCode + ErrorCodeInitiatorNotFound = "InitiatorNotFound" + // @enum ErrorCode + ErrorCodeInternalError = "InternalError" + // @enum ErrorCode + ErrorCodeInvalidGateway = "InvalidGateway" + // @enum ErrorCode + ErrorCodeInvalidEndpoint = "InvalidEndpoint" + // @enum ErrorCode + ErrorCodeInvalidParameters = "InvalidParameters" + // @enum ErrorCode + ErrorCodeInvalidSchedule = "InvalidSchedule" + // @enum ErrorCode + ErrorCodeLocalStorageLimitExceeded = "LocalStorageLimitExceeded" + // @enum ErrorCode + ErrorCodeLunAlreadyAllocated = "LunAlreadyAllocated " + // @enum ErrorCode + ErrorCodeLunInvalid = "LunInvalid" + // @enum ErrorCode + ErrorCodeMaximumContentLengthExceeded = "MaximumContentLengthExceeded" + // @enum ErrorCode + ErrorCodeMaximumTapeCartridgeCountExceeded = "MaximumTapeCartridgeCountExceeded" + // @enum ErrorCode + ErrorCodeMaximumVolumeCountExceeded = "MaximumVolumeCountExceeded" + // @enum ErrorCode + ErrorCodeNetworkConfigurationChanged = "NetworkConfigurationChanged" + // @enum ErrorCode + ErrorCodeNoDisksAvailable = "NoDisksAvailable" + // @enum ErrorCode + ErrorCodeNotImplemented = "NotImplemented" + // @enum ErrorCode + ErrorCodeNotSupported = "NotSupported" + // @enum ErrorCode + ErrorCodeOperationAborted = "OperationAborted" + // @enum ErrorCode + ErrorCodeOutdatedGateway = "OutdatedGateway" + // @enum ErrorCode + ErrorCodeParametersNotImplemented = "ParametersNotImplemented" + // @enum ErrorCode + ErrorCodeRegionInvalid = "RegionInvalid" + // @enum ErrorCode + ErrorCodeRequestTimeout = "RequestTimeout" + // @enum ErrorCode + ErrorCodeServiceUnavailable = "ServiceUnavailable" + // @enum ErrorCode + ErrorCodeSnapshotDeleted = "SnapshotDeleted" + // @enum ErrorCode + ErrorCodeSnapshotIdInvalid = "SnapshotIdInvalid" + // @enum ErrorCode + ErrorCodeSnapshotInProgress = "SnapshotInProgress" + // @enum ErrorCode + ErrorCodeSnapshotNotFound = "SnapshotNotFound" + // @enum ErrorCode + ErrorCodeSnapshotScheduleNotFound = "SnapshotScheduleNotFound" + // @enum ErrorCode + ErrorCodeStagingAreaFull = "StagingAreaFull" + // @enum ErrorCode + ErrorCodeStorageFailure = "StorageFailure" + // @enum ErrorCode + ErrorCodeTapeCartridgeNotFound = "TapeCartridgeNotFound" + // @enum ErrorCode + ErrorCodeTargetAlreadyExists = "TargetAlreadyExists" + // @enum ErrorCode + ErrorCodeTargetInvalid = "TargetInvalid" + // @enum ErrorCode + ErrorCodeTargetNotFound = "TargetNotFound" + // @enum ErrorCode + ErrorCodeUnauthorizedOperation = "UnauthorizedOperation" + // @enum ErrorCode + ErrorCodeVolumeAlreadyExists = "VolumeAlreadyExists" + // @enum ErrorCode + ErrorCodeVolumeIdInvalid = "VolumeIdInvalid" + // @enum ErrorCode + ErrorCodeVolumeInUse = "VolumeInUse" + // @enum ErrorCode + ErrorCodeVolumeNotFound = "VolumeNotFound" + // @enum ErrorCode + ErrorCodeVolumeNotReady = "VolumeNotReady" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/storagegateway/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/storagegateway/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/storagegateway/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/storagegateway/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1119 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package storagegateway_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/storagegateway" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleStorageGateway_ActivateGateway() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.ActivateGatewayInput{ + ActivationKey: aws.String("ActivationKey"), // Required + GatewayName: aws.String("GatewayName"), // Required + GatewayRegion: aws.String("RegionId"), // Required + GatewayTimezone: aws.String("GatewayTimezone"), // Required + GatewayType: aws.String("GatewayType"), + MediumChangerType: aws.String("MediumChangerType"), + TapeDriveType: aws.String("TapeDriveType"), + } + resp, err := svc.ActivateGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_AddCache() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.AddCacheInput{ + DiskIds: []*string{ // Required + aws.String("DiskId"), // Required + // More values... + }, + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.AddCache(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_AddTagsToResource() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.AddTagsToResourceInput{ + ResourceARN: aws.String("ResourceARN"), // Required + Tags: []*storagegateway.Tag{ // Required + { // Required + Key: aws.String("TagKey"), // Required + Value: aws.String("TagValue"), // Required + }, + // More values... + }, + } + resp, err := svc.AddTagsToResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_AddUploadBuffer() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.AddUploadBufferInput{ + DiskIds: []*string{ // Required + aws.String("DiskId"), // Required + // More values... + }, + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.AddUploadBuffer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_AddWorkingStorage() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.AddWorkingStorageInput{ + DiskIds: []*string{ // Required + aws.String("DiskId"), // Required + // More values... + }, + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.AddWorkingStorage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_CancelArchival() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.CancelArchivalInput{ + GatewayARN: aws.String("GatewayARN"), // Required + TapeARN: aws.String("TapeARN"), // Required + } + resp, err := svc.CancelArchival(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_CancelRetrieval() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.CancelRetrievalInput{ + GatewayARN: aws.String("GatewayARN"), // Required + TapeARN: aws.String("TapeARN"), // Required + } + resp, err := svc.CancelRetrieval(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_CreateCachediSCSIVolume() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.CreateCachediSCSIVolumeInput{ + ClientToken: aws.String("ClientToken"), // Required + GatewayARN: aws.String("GatewayARN"), // Required + NetworkInterfaceId: aws.String("NetworkInterfaceId"), // Required + TargetName: aws.String("TargetName"), // Required + VolumeSizeInBytes: aws.Int64(1), // Required + SnapshotId: aws.String("SnapshotId"), + } + resp, err := svc.CreateCachediSCSIVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_CreateSnapshot() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.CreateSnapshotInput{ + SnapshotDescription: aws.String("SnapshotDescription"), // Required + VolumeARN: aws.String("VolumeARN"), // Required + } + resp, err := svc.CreateSnapshot(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_CreateSnapshotFromVolumeRecoveryPoint() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.CreateSnapshotFromVolumeRecoveryPointInput{ + SnapshotDescription: aws.String("SnapshotDescription"), // Required + VolumeARN: aws.String("VolumeARN"), // Required + } + resp, err := svc.CreateSnapshotFromVolumeRecoveryPoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_CreateStorediSCSIVolume() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.CreateStorediSCSIVolumeInput{ + DiskId: aws.String("DiskId"), // Required + GatewayARN: aws.String("GatewayARN"), // Required + NetworkInterfaceId: aws.String("NetworkInterfaceId"), // Required + PreserveExistingData: aws.Bool(true), // Required + TargetName: aws.String("TargetName"), // Required + SnapshotId: aws.String("SnapshotId"), + } + resp, err := svc.CreateStorediSCSIVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_CreateTapes() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.CreateTapesInput{ + ClientToken: aws.String("ClientToken"), // Required + GatewayARN: aws.String("GatewayARN"), // Required + NumTapesToCreate: aws.Int64(1), // Required + TapeBarcodePrefix: aws.String("TapeBarcodePrefix"), // Required + TapeSizeInBytes: aws.Int64(1), // Required + } + resp, err := svc.CreateTapes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DeleteBandwidthRateLimit() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DeleteBandwidthRateLimitInput{ + BandwidthType: aws.String("BandwidthType"), // Required + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.DeleteBandwidthRateLimit(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DeleteChapCredentials() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DeleteChapCredentialsInput{ + InitiatorName: aws.String("IqnName"), // Required + TargetARN: aws.String("TargetARN"), // Required + } + resp, err := svc.DeleteChapCredentials(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DeleteGateway() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DeleteGatewayInput{ + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.DeleteGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DeleteSnapshotSchedule() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DeleteSnapshotScheduleInput{ + VolumeARN: aws.String("VolumeARN"), // Required + } + resp, err := svc.DeleteSnapshotSchedule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DeleteTape() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DeleteTapeInput{ + GatewayARN: aws.String("GatewayARN"), // Required + TapeARN: aws.String("TapeARN"), // Required + } + resp, err := svc.DeleteTape(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DeleteTapeArchive() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DeleteTapeArchiveInput{ + TapeARN: aws.String("TapeARN"), // Required + } + resp, err := svc.DeleteTapeArchive(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DeleteVolume() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DeleteVolumeInput{ + VolumeARN: aws.String("VolumeARN"), // Required + } + resp, err := svc.DeleteVolume(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DescribeBandwidthRateLimit() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DescribeBandwidthRateLimitInput{ + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.DescribeBandwidthRateLimit(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DescribeCache() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DescribeCacheInput{ + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.DescribeCache(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DescribeCachediSCSIVolumes() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DescribeCachediSCSIVolumesInput{ + VolumeARNs: []*string{ // Required + aws.String("VolumeARN"), // Required + // More values... + }, + } + resp, err := svc.DescribeCachediSCSIVolumes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DescribeChapCredentials() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DescribeChapCredentialsInput{ + TargetARN: aws.String("TargetARN"), // Required + } + resp, err := svc.DescribeChapCredentials(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DescribeGatewayInformation() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DescribeGatewayInformationInput{ + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.DescribeGatewayInformation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DescribeMaintenanceStartTime() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DescribeMaintenanceStartTimeInput{ + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.DescribeMaintenanceStartTime(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DescribeSnapshotSchedule() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DescribeSnapshotScheduleInput{ + VolumeARN: aws.String("VolumeARN"), // Required + } + resp, err := svc.DescribeSnapshotSchedule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DescribeStorediSCSIVolumes() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DescribeStorediSCSIVolumesInput{ + VolumeARNs: []*string{ // Required + aws.String("VolumeARN"), // Required + // More values... + }, + } + resp, err := svc.DescribeStorediSCSIVolumes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DescribeTapeArchives() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DescribeTapeArchivesInput{ + Limit: aws.Int64(1), + Marker: aws.String("Marker"), + TapeARNs: []*string{ + aws.String("TapeARN"), // Required + // More values... + }, + } + resp, err := svc.DescribeTapeArchives(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DescribeTapeRecoveryPoints() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DescribeTapeRecoveryPointsInput{ + GatewayARN: aws.String("GatewayARN"), // Required + Limit: aws.Int64(1), + Marker: aws.String("Marker"), + } + resp, err := svc.DescribeTapeRecoveryPoints(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DescribeTapes() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DescribeTapesInput{ + GatewayARN: aws.String("GatewayARN"), // Required + Limit: aws.Int64(1), + Marker: aws.String("Marker"), + TapeARNs: []*string{ + aws.String("TapeARN"), // Required + // More values... + }, + } + resp, err := svc.DescribeTapes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DescribeUploadBuffer() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DescribeUploadBufferInput{ + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.DescribeUploadBuffer(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DescribeVTLDevices() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DescribeVTLDevicesInput{ + GatewayARN: aws.String("GatewayARN"), // Required + Limit: aws.Int64(1), + Marker: aws.String("Marker"), + VTLDeviceARNs: []*string{ + aws.String("VTLDeviceARN"), // Required + // More values... + }, + } + resp, err := svc.DescribeVTLDevices(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DescribeWorkingStorage() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DescribeWorkingStorageInput{ + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.DescribeWorkingStorage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_DisableGateway() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.DisableGatewayInput{ + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.DisableGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_ListGateways() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.ListGatewaysInput{ + Limit: aws.Int64(1), + Marker: aws.String("Marker"), + } + resp, err := svc.ListGateways(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_ListLocalDisks() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.ListLocalDisksInput{ + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.ListLocalDisks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_ListTagsForResource() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.ListTagsForResourceInput{ + Limit: aws.Int64(1), + Marker: aws.String("Marker"), + ResourceARN: aws.String("ResourceARN"), + } + resp, err := svc.ListTagsForResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_ListVolumeInitiators() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.ListVolumeInitiatorsInput{ + VolumeARN: aws.String("VolumeARN"), // Required + } + resp, err := svc.ListVolumeInitiators(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_ListVolumeRecoveryPoints() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.ListVolumeRecoveryPointsInput{ + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.ListVolumeRecoveryPoints(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_ListVolumes() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.ListVolumesInput{ + GatewayARN: aws.String("GatewayARN"), // Required + Limit: aws.Int64(1), + Marker: aws.String("Marker"), + } + resp, err := svc.ListVolumes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_RemoveTagsFromResource() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.RemoveTagsFromResourceInput{ + ResourceARN: aws.String("ResourceARN"), + TagKeys: []*string{ + aws.String("TagKey"), // Required + // More values... + }, + } + resp, err := svc.RemoveTagsFromResource(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_ResetCache() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.ResetCacheInput{ + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.ResetCache(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_RetrieveTapeArchive() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.RetrieveTapeArchiveInput{ + GatewayARN: aws.String("GatewayARN"), // Required + TapeARN: aws.String("TapeARN"), // Required + } + resp, err := svc.RetrieveTapeArchive(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_RetrieveTapeRecoveryPoint() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.RetrieveTapeRecoveryPointInput{ + GatewayARN: aws.String("GatewayARN"), // Required + TapeARN: aws.String("TapeARN"), // Required + } + resp, err := svc.RetrieveTapeRecoveryPoint(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_ShutdownGateway() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.ShutdownGatewayInput{ + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.ShutdownGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_StartGateway() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.StartGatewayInput{ + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.StartGateway(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_UpdateBandwidthRateLimit() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.UpdateBandwidthRateLimitInput{ + GatewayARN: aws.String("GatewayARN"), // Required + AverageDownloadRateLimitInBitsPerSec: aws.Int64(1), + AverageUploadRateLimitInBitsPerSec: aws.Int64(1), + } + resp, err := svc.UpdateBandwidthRateLimit(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_UpdateChapCredentials() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.UpdateChapCredentialsInput{ + InitiatorName: aws.String("IqnName"), // Required + SecretToAuthenticateInitiator: aws.String("ChapSecret"), // Required + TargetARN: aws.String("TargetARN"), // Required + SecretToAuthenticateTarget: aws.String("ChapSecret"), + } + resp, err := svc.UpdateChapCredentials(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_UpdateGatewayInformation() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.UpdateGatewayInformationInput{ + GatewayARN: aws.String("GatewayARN"), // Required + GatewayName: aws.String("GatewayName"), + GatewayTimezone: aws.String("GatewayTimezone"), + } + resp, err := svc.UpdateGatewayInformation(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_UpdateGatewaySoftwareNow() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.UpdateGatewaySoftwareNowInput{ + GatewayARN: aws.String("GatewayARN"), // Required + } + resp, err := svc.UpdateGatewaySoftwareNow(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_UpdateMaintenanceStartTime() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.UpdateMaintenanceStartTimeInput{ + DayOfWeek: aws.Int64(1), // Required + GatewayARN: aws.String("GatewayARN"), // Required + HourOfDay: aws.Int64(1), // Required + MinuteOfHour: aws.Int64(1), // Required + } + resp, err := svc.UpdateMaintenanceStartTime(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_UpdateSnapshotSchedule() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.UpdateSnapshotScheduleInput{ + RecurrenceInHours: aws.Int64(1), // Required + StartAt: aws.Int64(1), // Required + VolumeARN: aws.String("VolumeARN"), // Required + Description: aws.String("Description"), + } + resp, err := svc.UpdateSnapshotSchedule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleStorageGateway_UpdateVTLDeviceType() { + svc := storagegateway.New(session.New()) + + params := &storagegateway.UpdateVTLDeviceTypeInput{ + DeviceType: aws.String("DeviceType"), // Required + VTLDeviceARN: aws.String("VTLDeviceARN"), // Required + } + resp, err := svc.UpdateVTLDeviceType(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/storagegateway/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/storagegateway/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/storagegateway/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/storagegateway/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,108 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package storagegateway + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// AWS Storage Gateway is the service that connects an on-premises software +// appliance with cloud-based storage to provide seamless and secure integration +// between an organization's on-premises IT environment and AWS's storage infrastructure. +// The service enables you to securely upload data to the AWS cloud for cost +// effective backup and rapid disaster recovery. +// +// Use the following links to get started using the AWS Storage Gateway Service +// API Reference: +// +// AWS Storage Gateway Required Request Headers (http://docs.aws.amazon.com/storagegateway/latest/userguide/AWSStorageGatewayHTTPRequestsHeaders.html): +// Describes the required headers that you must send with every POST request +// to AWS Storage Gateway. Signing Requests (http://docs.aws.amazon.com/storagegateway/latest/userguide/AWSStorageGatewaySigningRequests.html): +// AWS Storage Gateway requires that you authenticate every request you send; +// this topic describes how sign such a request. Error Responses (http://docs.aws.amazon.com/storagegateway/latest/userguide/APIErrorResponses.html): +// Provides reference information about AWS Storage Gateway errors. Operations +// in AWS Storage Gateway (http://docs.aws.amazon.com/storagegateway/latest/userguide/AWSStorageGatewayAPIOperations.html): +// Contains detailed descriptions of all AWS Storage Gateway operations, their +// request parameters, response elements, possible errors, and examples of requests +// and responses. AWS Storage Gateway Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/index.html?rande.html): +// Provides a list of each of the regions and endpoints available for use with +// AWS Storage Gateway. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type StorageGateway struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "storagegateway" + +// New creates a new instance of the StorageGateway client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a StorageGateway client from just a session. +// svc := storagegateway.New(mySession) +// +// // Create a StorageGateway client with additional configuration +// svc := storagegateway.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *StorageGateway { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *StorageGateway { + svc := &StorageGateway{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2013-06-30", + JSONVersion: "1.1", + TargetPrefix: "StorageGateway_20130630", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a StorageGateway operation and runs any +// custom request initialization. +func (c *StorageGateway) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/storagegateway/storagegatewayiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/storagegateway/storagegatewayiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/storagegateway/storagegatewayiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/storagegateway/storagegatewayiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,238 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package storagegatewayiface provides an interface for the AWS Storage Gateway. +package storagegatewayiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/storagegateway" +) + +// StorageGatewayAPI is the interface type for storagegateway.StorageGateway. +type StorageGatewayAPI interface { + ActivateGatewayRequest(*storagegateway.ActivateGatewayInput) (*request.Request, *storagegateway.ActivateGatewayOutput) + + ActivateGateway(*storagegateway.ActivateGatewayInput) (*storagegateway.ActivateGatewayOutput, error) + + AddCacheRequest(*storagegateway.AddCacheInput) (*request.Request, *storagegateway.AddCacheOutput) + + AddCache(*storagegateway.AddCacheInput) (*storagegateway.AddCacheOutput, error) + + AddTagsToResourceRequest(*storagegateway.AddTagsToResourceInput) (*request.Request, *storagegateway.AddTagsToResourceOutput) + + AddTagsToResource(*storagegateway.AddTagsToResourceInput) (*storagegateway.AddTagsToResourceOutput, error) + + AddUploadBufferRequest(*storagegateway.AddUploadBufferInput) (*request.Request, *storagegateway.AddUploadBufferOutput) + + AddUploadBuffer(*storagegateway.AddUploadBufferInput) (*storagegateway.AddUploadBufferOutput, error) + + AddWorkingStorageRequest(*storagegateway.AddWorkingStorageInput) (*request.Request, *storagegateway.AddWorkingStorageOutput) + + AddWorkingStorage(*storagegateway.AddWorkingStorageInput) (*storagegateway.AddWorkingStorageOutput, error) + + CancelArchivalRequest(*storagegateway.CancelArchivalInput) (*request.Request, *storagegateway.CancelArchivalOutput) + + CancelArchival(*storagegateway.CancelArchivalInput) (*storagegateway.CancelArchivalOutput, error) + + CancelRetrievalRequest(*storagegateway.CancelRetrievalInput) (*request.Request, *storagegateway.CancelRetrievalOutput) + + CancelRetrieval(*storagegateway.CancelRetrievalInput) (*storagegateway.CancelRetrievalOutput, error) + + CreateCachediSCSIVolumeRequest(*storagegateway.CreateCachediSCSIVolumeInput) (*request.Request, *storagegateway.CreateCachediSCSIVolumeOutput) + + CreateCachediSCSIVolume(*storagegateway.CreateCachediSCSIVolumeInput) (*storagegateway.CreateCachediSCSIVolumeOutput, error) + + CreateSnapshotRequest(*storagegateway.CreateSnapshotInput) (*request.Request, *storagegateway.CreateSnapshotOutput) + + CreateSnapshot(*storagegateway.CreateSnapshotInput) (*storagegateway.CreateSnapshotOutput, error) + + CreateSnapshotFromVolumeRecoveryPointRequest(*storagegateway.CreateSnapshotFromVolumeRecoveryPointInput) (*request.Request, *storagegateway.CreateSnapshotFromVolumeRecoveryPointOutput) + + CreateSnapshotFromVolumeRecoveryPoint(*storagegateway.CreateSnapshotFromVolumeRecoveryPointInput) (*storagegateway.CreateSnapshotFromVolumeRecoveryPointOutput, error) + + CreateStorediSCSIVolumeRequest(*storagegateway.CreateStorediSCSIVolumeInput) (*request.Request, *storagegateway.CreateStorediSCSIVolumeOutput) + + CreateStorediSCSIVolume(*storagegateway.CreateStorediSCSIVolumeInput) (*storagegateway.CreateStorediSCSIVolumeOutput, error) + + CreateTapesRequest(*storagegateway.CreateTapesInput) (*request.Request, *storagegateway.CreateTapesOutput) + + CreateTapes(*storagegateway.CreateTapesInput) (*storagegateway.CreateTapesOutput, error) + + DeleteBandwidthRateLimitRequest(*storagegateway.DeleteBandwidthRateLimitInput) (*request.Request, *storagegateway.DeleteBandwidthRateLimitOutput) + + DeleteBandwidthRateLimit(*storagegateway.DeleteBandwidthRateLimitInput) (*storagegateway.DeleteBandwidthRateLimitOutput, error) + + DeleteChapCredentialsRequest(*storagegateway.DeleteChapCredentialsInput) (*request.Request, *storagegateway.DeleteChapCredentialsOutput) + + DeleteChapCredentials(*storagegateway.DeleteChapCredentialsInput) (*storagegateway.DeleteChapCredentialsOutput, error) + + DeleteGatewayRequest(*storagegateway.DeleteGatewayInput) (*request.Request, *storagegateway.DeleteGatewayOutput) + + DeleteGateway(*storagegateway.DeleteGatewayInput) (*storagegateway.DeleteGatewayOutput, error) + + DeleteSnapshotScheduleRequest(*storagegateway.DeleteSnapshotScheduleInput) (*request.Request, *storagegateway.DeleteSnapshotScheduleOutput) + + DeleteSnapshotSchedule(*storagegateway.DeleteSnapshotScheduleInput) (*storagegateway.DeleteSnapshotScheduleOutput, error) + + DeleteTapeRequest(*storagegateway.DeleteTapeInput) (*request.Request, *storagegateway.DeleteTapeOutput) + + DeleteTape(*storagegateway.DeleteTapeInput) (*storagegateway.DeleteTapeOutput, error) + + DeleteTapeArchiveRequest(*storagegateway.DeleteTapeArchiveInput) (*request.Request, *storagegateway.DeleteTapeArchiveOutput) + + DeleteTapeArchive(*storagegateway.DeleteTapeArchiveInput) (*storagegateway.DeleteTapeArchiveOutput, error) + + DeleteVolumeRequest(*storagegateway.DeleteVolumeInput) (*request.Request, *storagegateway.DeleteVolumeOutput) + + DeleteVolume(*storagegateway.DeleteVolumeInput) (*storagegateway.DeleteVolumeOutput, error) + + DescribeBandwidthRateLimitRequest(*storagegateway.DescribeBandwidthRateLimitInput) (*request.Request, *storagegateway.DescribeBandwidthRateLimitOutput) + + DescribeBandwidthRateLimit(*storagegateway.DescribeBandwidthRateLimitInput) (*storagegateway.DescribeBandwidthRateLimitOutput, error) + + DescribeCacheRequest(*storagegateway.DescribeCacheInput) (*request.Request, *storagegateway.DescribeCacheOutput) + + DescribeCache(*storagegateway.DescribeCacheInput) (*storagegateway.DescribeCacheOutput, error) + + DescribeCachediSCSIVolumesRequest(*storagegateway.DescribeCachediSCSIVolumesInput) (*request.Request, *storagegateway.DescribeCachediSCSIVolumesOutput) + + DescribeCachediSCSIVolumes(*storagegateway.DescribeCachediSCSIVolumesInput) (*storagegateway.DescribeCachediSCSIVolumesOutput, error) + + DescribeChapCredentialsRequest(*storagegateway.DescribeChapCredentialsInput) (*request.Request, *storagegateway.DescribeChapCredentialsOutput) + + DescribeChapCredentials(*storagegateway.DescribeChapCredentialsInput) (*storagegateway.DescribeChapCredentialsOutput, error) + + DescribeGatewayInformationRequest(*storagegateway.DescribeGatewayInformationInput) (*request.Request, *storagegateway.DescribeGatewayInformationOutput) + + DescribeGatewayInformation(*storagegateway.DescribeGatewayInformationInput) (*storagegateway.DescribeGatewayInformationOutput, error) + + DescribeMaintenanceStartTimeRequest(*storagegateway.DescribeMaintenanceStartTimeInput) (*request.Request, *storagegateway.DescribeMaintenanceStartTimeOutput) + + DescribeMaintenanceStartTime(*storagegateway.DescribeMaintenanceStartTimeInput) (*storagegateway.DescribeMaintenanceStartTimeOutput, error) + + DescribeSnapshotScheduleRequest(*storagegateway.DescribeSnapshotScheduleInput) (*request.Request, *storagegateway.DescribeSnapshotScheduleOutput) + + DescribeSnapshotSchedule(*storagegateway.DescribeSnapshotScheduleInput) (*storagegateway.DescribeSnapshotScheduleOutput, error) + + DescribeStorediSCSIVolumesRequest(*storagegateway.DescribeStorediSCSIVolumesInput) (*request.Request, *storagegateway.DescribeStorediSCSIVolumesOutput) + + DescribeStorediSCSIVolumes(*storagegateway.DescribeStorediSCSIVolumesInput) (*storagegateway.DescribeStorediSCSIVolumesOutput, error) + + DescribeTapeArchivesRequest(*storagegateway.DescribeTapeArchivesInput) (*request.Request, *storagegateway.DescribeTapeArchivesOutput) + + DescribeTapeArchives(*storagegateway.DescribeTapeArchivesInput) (*storagegateway.DescribeTapeArchivesOutput, error) + + DescribeTapeArchivesPages(*storagegateway.DescribeTapeArchivesInput, func(*storagegateway.DescribeTapeArchivesOutput, bool) bool) error + + DescribeTapeRecoveryPointsRequest(*storagegateway.DescribeTapeRecoveryPointsInput) (*request.Request, *storagegateway.DescribeTapeRecoveryPointsOutput) + + DescribeTapeRecoveryPoints(*storagegateway.DescribeTapeRecoveryPointsInput) (*storagegateway.DescribeTapeRecoveryPointsOutput, error) + + DescribeTapeRecoveryPointsPages(*storagegateway.DescribeTapeRecoveryPointsInput, func(*storagegateway.DescribeTapeRecoveryPointsOutput, bool) bool) error + + DescribeTapesRequest(*storagegateway.DescribeTapesInput) (*request.Request, *storagegateway.DescribeTapesOutput) + + DescribeTapes(*storagegateway.DescribeTapesInput) (*storagegateway.DescribeTapesOutput, error) + + DescribeTapesPages(*storagegateway.DescribeTapesInput, func(*storagegateway.DescribeTapesOutput, bool) bool) error + + DescribeUploadBufferRequest(*storagegateway.DescribeUploadBufferInput) (*request.Request, *storagegateway.DescribeUploadBufferOutput) + + DescribeUploadBuffer(*storagegateway.DescribeUploadBufferInput) (*storagegateway.DescribeUploadBufferOutput, error) + + DescribeVTLDevicesRequest(*storagegateway.DescribeVTLDevicesInput) (*request.Request, *storagegateway.DescribeVTLDevicesOutput) + + DescribeVTLDevices(*storagegateway.DescribeVTLDevicesInput) (*storagegateway.DescribeVTLDevicesOutput, error) + + DescribeVTLDevicesPages(*storagegateway.DescribeVTLDevicesInput, func(*storagegateway.DescribeVTLDevicesOutput, bool) bool) error + + DescribeWorkingStorageRequest(*storagegateway.DescribeWorkingStorageInput) (*request.Request, *storagegateway.DescribeWorkingStorageOutput) + + DescribeWorkingStorage(*storagegateway.DescribeWorkingStorageInput) (*storagegateway.DescribeWorkingStorageOutput, error) + + DisableGatewayRequest(*storagegateway.DisableGatewayInput) (*request.Request, *storagegateway.DisableGatewayOutput) + + DisableGateway(*storagegateway.DisableGatewayInput) (*storagegateway.DisableGatewayOutput, error) + + ListGatewaysRequest(*storagegateway.ListGatewaysInput) (*request.Request, *storagegateway.ListGatewaysOutput) + + ListGateways(*storagegateway.ListGatewaysInput) (*storagegateway.ListGatewaysOutput, error) + + ListGatewaysPages(*storagegateway.ListGatewaysInput, func(*storagegateway.ListGatewaysOutput, bool) bool) error + + ListLocalDisksRequest(*storagegateway.ListLocalDisksInput) (*request.Request, *storagegateway.ListLocalDisksOutput) + + ListLocalDisks(*storagegateway.ListLocalDisksInput) (*storagegateway.ListLocalDisksOutput, error) + + ListTagsForResourceRequest(*storagegateway.ListTagsForResourceInput) (*request.Request, *storagegateway.ListTagsForResourceOutput) + + ListTagsForResource(*storagegateway.ListTagsForResourceInput) (*storagegateway.ListTagsForResourceOutput, error) + + ListVolumeInitiatorsRequest(*storagegateway.ListVolumeInitiatorsInput) (*request.Request, *storagegateway.ListVolumeInitiatorsOutput) + + ListVolumeInitiators(*storagegateway.ListVolumeInitiatorsInput) (*storagegateway.ListVolumeInitiatorsOutput, error) + + ListVolumeRecoveryPointsRequest(*storagegateway.ListVolumeRecoveryPointsInput) (*request.Request, *storagegateway.ListVolumeRecoveryPointsOutput) + + ListVolumeRecoveryPoints(*storagegateway.ListVolumeRecoveryPointsInput) (*storagegateway.ListVolumeRecoveryPointsOutput, error) + + ListVolumesRequest(*storagegateway.ListVolumesInput) (*request.Request, *storagegateway.ListVolumesOutput) + + ListVolumes(*storagegateway.ListVolumesInput) (*storagegateway.ListVolumesOutput, error) + + ListVolumesPages(*storagegateway.ListVolumesInput, func(*storagegateway.ListVolumesOutput, bool) bool) error + + RemoveTagsFromResourceRequest(*storagegateway.RemoveTagsFromResourceInput) (*request.Request, *storagegateway.RemoveTagsFromResourceOutput) + + RemoveTagsFromResource(*storagegateway.RemoveTagsFromResourceInput) (*storagegateway.RemoveTagsFromResourceOutput, error) + + ResetCacheRequest(*storagegateway.ResetCacheInput) (*request.Request, *storagegateway.ResetCacheOutput) + + ResetCache(*storagegateway.ResetCacheInput) (*storagegateway.ResetCacheOutput, error) + + RetrieveTapeArchiveRequest(*storagegateway.RetrieveTapeArchiveInput) (*request.Request, *storagegateway.RetrieveTapeArchiveOutput) + + RetrieveTapeArchive(*storagegateway.RetrieveTapeArchiveInput) (*storagegateway.RetrieveTapeArchiveOutput, error) + + RetrieveTapeRecoveryPointRequest(*storagegateway.RetrieveTapeRecoveryPointInput) (*request.Request, *storagegateway.RetrieveTapeRecoveryPointOutput) + + RetrieveTapeRecoveryPoint(*storagegateway.RetrieveTapeRecoveryPointInput) (*storagegateway.RetrieveTapeRecoveryPointOutput, error) + + ShutdownGatewayRequest(*storagegateway.ShutdownGatewayInput) (*request.Request, *storagegateway.ShutdownGatewayOutput) + + ShutdownGateway(*storagegateway.ShutdownGatewayInput) (*storagegateway.ShutdownGatewayOutput, error) + + StartGatewayRequest(*storagegateway.StartGatewayInput) (*request.Request, *storagegateway.StartGatewayOutput) + + StartGateway(*storagegateway.StartGatewayInput) (*storagegateway.StartGatewayOutput, error) + + UpdateBandwidthRateLimitRequest(*storagegateway.UpdateBandwidthRateLimitInput) (*request.Request, *storagegateway.UpdateBandwidthRateLimitOutput) + + UpdateBandwidthRateLimit(*storagegateway.UpdateBandwidthRateLimitInput) (*storagegateway.UpdateBandwidthRateLimitOutput, error) + + UpdateChapCredentialsRequest(*storagegateway.UpdateChapCredentialsInput) (*request.Request, *storagegateway.UpdateChapCredentialsOutput) + + UpdateChapCredentials(*storagegateway.UpdateChapCredentialsInput) (*storagegateway.UpdateChapCredentialsOutput, error) + + UpdateGatewayInformationRequest(*storagegateway.UpdateGatewayInformationInput) (*request.Request, *storagegateway.UpdateGatewayInformationOutput) + + UpdateGatewayInformation(*storagegateway.UpdateGatewayInformationInput) (*storagegateway.UpdateGatewayInformationOutput, error) + + UpdateGatewaySoftwareNowRequest(*storagegateway.UpdateGatewaySoftwareNowInput) (*request.Request, *storagegateway.UpdateGatewaySoftwareNowOutput) + + UpdateGatewaySoftwareNow(*storagegateway.UpdateGatewaySoftwareNowInput) (*storagegateway.UpdateGatewaySoftwareNowOutput, error) + + UpdateMaintenanceStartTimeRequest(*storagegateway.UpdateMaintenanceStartTimeInput) (*request.Request, *storagegateway.UpdateMaintenanceStartTimeOutput) + + UpdateMaintenanceStartTime(*storagegateway.UpdateMaintenanceStartTimeInput) (*storagegateway.UpdateMaintenanceStartTimeOutput, error) + + UpdateSnapshotScheduleRequest(*storagegateway.UpdateSnapshotScheduleInput) (*request.Request, *storagegateway.UpdateSnapshotScheduleOutput) + + UpdateSnapshotSchedule(*storagegateway.UpdateSnapshotScheduleInput) (*storagegateway.UpdateSnapshotScheduleOutput, error) + + UpdateVTLDeviceTypeRequest(*storagegateway.UpdateVTLDeviceTypeInput) (*request.Request, *storagegateway.UpdateVTLDeviceTypeOutput) + + UpdateVTLDeviceType(*storagegateway.UpdateVTLDeviceTypeInput) (*storagegateway.UpdateVTLDeviceTypeOutput, error) +} + +var _ StorageGatewayAPI = (*storagegateway.StorageGateway)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sts/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sts/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sts/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sts/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1094 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package sts provides a client for AWS Security Token Service. +package sts + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAssumeRole = "AssumeRole" + +// AssumeRoleRequest generates a request for the AssumeRole operation. +func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { + op := &request.Operation{ + Name: opAssumeRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleInput{} + } + + req = c.newRequest(op, input, output) + output = &AssumeRoleOutput{} + req.Data = output + return +} + +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) that you can use to access +// AWS resources that you might not normally have access to. Typically, you +// use AssumeRole for cross-account access or federation. +// +// Important: You cannot call AssumeRole by using AWS account credentials; +// access will be denied. You must use IAM user credentials or temporary security +// credentials to call AssumeRole. +// +// For cross-account access, imagine that you own multiple accounts and need +// to access resources in each account. You could create long-term credentials +// in each account to access those resources. However, managing all those credentials +// and remembering which one can access which account can be time consuming. +// Instead, you can create one set of long-term credentials in one account and +// then use temporary security credentials to access all the other accounts +// by assuming roles in those accounts. For more information about roles, see +// IAM Roles (Delegation and Federation) (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html) +// in the Using IAM. +// +// For federation, you can, for example, grant single sign-on access to the +// AWS Management Console. If you already have an identity and authentication +// system in your corporate network, you don't have to recreate user identities +// in AWS in order to grant those user identities access to AWS. Instead, after +// a user has been authenticated, you call AssumeRole (and specify the role +// with the appropriate permissions) to get temporary security credentials for +// that user. With those temporary security credentials, you construct a sign-in +// URL that users can use to access the console. For more information, see Common +// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction) +// in the Using IAM. +// +// The temporary security credentials are valid for the duration that you specified +// when calling AssumeRole, which can be from 900 seconds (15 minutes) to 3600 +// seconds (1 hour). The default is 1 hour. +// +// Optionally, you can pass an IAM access policy to this operation. If you +// choose not to pass a policy, the temporary security credentials that are +// returned by the operation have the permissions that are defined in the access +// policy of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by both the access policy of the role that +// is being assumed, and the policy that you pass. This gives you a way to further +// restrict the permissions for the resulting temporary security credentials. +// You cannot use the passed policy to grant permissions that are in excess +// of those allowed by the access policy of the role that is being assumed. +// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, +// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the Using IAM. +// +// To assume a role, your AWS account must be trusted by the role. The trust +// relationship is defined in the role's trust policy when the role is created. +// You must also have a policy that allows you to call sts:AssumeRole. +// +// Using MFA with AssumeRole +// +// You can optionally include multi-factor authentication (MFA) information +// when you call AssumeRole. This is useful for cross-account scenarios in which +// you want to make sure that the user who is assuming the role has been authenticated +// using an AWS MFA device. In that scenario, the trust policy of the role being +// assumed includes a condition that tests for MFA authentication; if the caller +// does not include valid MFA information, the request to assume the role is +// denied. The condition in a trust policy that tests for MFA authentication +// might look like the following example. +// +// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} +// +// For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// in the Using IAM guide. +// +// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode +// parameters. The SerialNumber value identifies the user's hardware or virtual +// MFA device. The TokenCode is the time-based one-time password (TOTP) that +// the MFA devices produces. +func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + err := req.Send() + return out, err +} + +const opAssumeRoleWithSAML = "AssumeRoleWithSAML" + +// AssumeRoleWithSAMLRequest generates a request for the AssumeRoleWithSAML operation. +func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithSAML, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithSAMLInput{} + } + + req = c.newRequest(op, input, output) + output = &AssumeRoleWithSAMLOutput{} + req.Data = output + return +} + +// Returns a set of temporary security credentials for users who have been authenticated +// via a SAML authentication response. This operation provides a mechanism for +// tying an enterprise identity store or directory to role-based AWS access +// without user-specific credentials or configuration. +// +// The temporary security credentials returned by this operation consist of +// an access key ID, a secret access key, and a security token. Applications +// can use these temporary security credentials to sign calls to AWS services. +// The credentials are valid for the duration that you specified when calling +// AssumeRoleWithSAML, which can be up to 3600 seconds (1 hour) or until the +// time specified in the SAML authentication response's SessionNotOnOrAfter +// value, whichever is shorter. +// +// The maximum duration for a session is 1 hour, and the minimum duration is +// 15 minutes, even if values outside this range are specified. Optionally, +// you can pass an IAM access policy to this operation. If you choose not to +// pass a policy, the temporary security credentials that are returned by the +// operation have the permissions that are defined in the access policy of the +// role that is being assumed. If you pass a policy to this operation, the temporary +// security credentials that are returned by the operation have the permissions +// that are allowed by both the access policy of the role that is being assumed, +// and the policy that you pass. This gives you a way to further restrict the +// permissions for the resulting temporary security credentials. You cannot +// use the passed policy to grant permissions that are in excess of those allowed +// by the access policy of the role that is being assumed. For more information, +// see Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the Using IAM. +// +// Before your application can call AssumeRoleWithSAML, you must configure +// your SAML identity provider (IdP) to issue the claims required by AWS. Additionally, +// you must use AWS Identity and Access Management (IAM) to create a SAML provider +// entity in your AWS account that represents your identity provider, and create +// an IAM role that specifies this SAML provider in its trust policy. +// +// Calling AssumeRoleWithSAML does not require the use of AWS security credentials. +// The identity of the caller is validated by using keys in the metadata document +// that is uploaded for the SAML provider entity for your identity provider. +// +// For more information, see the following resources: +// +// About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the Using IAM. Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the Using IAM. Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the Using IAM. Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the Using IAM. +func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + err := req.Send() + return out, err +} + +const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" + +// AssumeRoleWithWebIdentityRequest generates a request for the AssumeRoleWithWebIdentity operation. +func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithWebIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithWebIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &AssumeRoleWithWebIdentityOutput{} + req.Data = output + return +} + +// Returns a set of temporary security credentials for users who have been authenticated +// in a mobile or web application with a web identity provider, such as Amazon +// Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible +// identity provider. +// +// For mobile applications, we recommend that you use Amazon Cognito. You +// can use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/) +// and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely +// identify a user and supply the user with a consistent identity throughout +// the lifetime of an application. +// +// To learn more about Amazon Cognito, see Amazon Cognito Overview (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) +// in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview +// (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) +// in the AWS SDK for iOS Developer Guide. +// +// Calling AssumeRoleWithWebIdentity does not require the use of AWS security +// credentials. Therefore, you can distribute an application (for example, on +// mobile devices) that requests temporary security credentials without including +// long-term AWS credentials in the application, and without deploying server-based +// proxy services that use long-term AWS credentials. Instead, the identity +// of the caller is validated by using a token from the web identity provider. +// +// The temporary security credentials returned by this API consist of an access +// key ID, a secret access key, and a security token. Applications can use these +// temporary security credentials to sign calls to AWS service APIs. The credentials +// are valid for the duration that you specified when calling AssumeRoleWithWebIdentity, +// which can be from 900 seconds (15 minutes) to 3600 seconds (1 hour). By default, +// the temporary security credentials are valid for 1 hour. +// +// Optionally, you can pass an IAM access policy to this operation. If you +// choose not to pass a policy, the temporary security credentials that are +// returned by the operation have the permissions that are defined in the access +// policy of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by both the access policy of the role that +// is being assumed, and the policy that you pass. This gives you a way to further +// restrict the permissions for the resulting temporary security credentials. +// You cannot use the passed policy to grant permissions that are in excess +// of those allowed by the access policy of the role that is being assumed. +// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, +// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the Using IAM. +// +// Before your application can call AssumeRoleWithWebIdentity, you must have +// an identity token from a supported identity provider and create a role that +// the application can assume. The role that your application assumes must trust +// the identity provider that is associated with the identity token. In other +// words, the identity provider must be specified in the role's trust policy. +// +// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity +// API, see the following resources: +// +// Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual) +// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). +// This interactive website lets you walk through the process of authenticating +// via Login with Amazon, Facebook, or Google, getting temporary security credentials, +// and then using those credentials to make a request to AWS. AWS SDK for iOS +// (http://aws.amazon.com/sdkforios/) and AWS SDK for Android (http://aws.amazon.com/sdkforandroid/). +// These toolkits contain sample apps that show how to invoke the identity providers, +// and then how to use the information from these providers to get and use temporary +// security credentials. Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/4617974389850313). +// This article discusses web identity federation and shows an example of how +// to use web identity federation to get access to content in Amazon S3. +func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + err := req.Send() + return out, err +} + +const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" + +// DecodeAuthorizationMessageRequest generates a request for the DecodeAuthorizationMessage operation. +func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { + op := &request.Operation{ + Name: opDecodeAuthorizationMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DecodeAuthorizationMessageInput{} + } + + req = c.newRequest(op, input, output) + output = &DecodeAuthorizationMessageOutput{} + req.Data = output + return +} + +// Decodes additional information about the authorization status of a request +// from an encoded message returned in response to an AWS request. +// +// For example, if a user is not authorized to perform an action that he or +// she has requested, the request returns a Client.UnauthorizedOperation response +// (an HTTP 403 response). Some AWS actions additionally return an encoded message +// that can provide details about this authorization failure. +// +// Only certain AWS actions return an encoded authorization message. The documentation +// for an individual action indicates whether that action returns an encoded +// message in addition to returning an HTTP code. The message is encoded because +// the details of the authorization status can constitute privileged information +// that the user who requested the action should not see. To decode an authorization +// status message, a user must be granted permissions via an IAM policy to request +// the DecodeAuthorizationMessage (sts:DecodeAuthorizationMessage) action. +// +// The decoded message includes the following type of information: +// +// Whether the request was denied due to an explicit deny or due to the absence +// of an explicit allow. For more information, see Determining Whether a Request +// is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the Using IAM. The principal who made the request. The requested action. +// The requested resource. The values of condition keys in the context of the +// user's request. +func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + err := req.Send() + return out, err +} + +const opGetFederationToken = "GetFederationToken" + +// GetFederationTokenRequest generates a request for the GetFederationToken operation. +func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { + op := &request.Operation{ + Name: opGetFederationToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetFederationTokenInput{} + } + + req = c.newRequest(op, input, output) + output = &GetFederationTokenOutput{} + req.Data = output + return +} + +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) for a federated user. +// A typical use is in a proxy application that gets temporary security credentials +// on behalf of distributed applications inside a corporate network. Because +// you must call the GetFederationToken action using the long-term security +// credentials of an IAM user, this call is appropriate in contexts where those +// credentials can be safely stored, usually in a server-based application. +// +// If you are creating a mobile-based or browser-based app that can authenticate +// users using a web identity provider like Login with Amazon, Facebook, Google, +// or an OpenID Connect-compatible identity provider, we recommend that you +// use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// For more information, see Federation Through a Web-based Identity Provider +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// The GetFederationToken action must be called by using the long-term AWS +// security credentials of an IAM user. You can also call GetFederationToken +// using the security credentials of an AWS account (root), but this is not +// recommended. Instead, we recommend that you create an IAM user for the purpose +// of the proxy application and then attach a policy to the IAM user that limits +// federated users to only the actions and resources they need access to. For +// more information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// in the Using IAM. +// +// The temporary security credentials that are obtained by using the long-term +// credentials of an IAM user are valid for the specified duration, between +// 900 seconds (15 minutes) and 129600 seconds (36 hours). Temporary credentials +// that are obtained by using AWS account (root) credentials have a maximum +// duration of 3600 seconds (1 hour) +// +// Permissions +// +// The permissions for the temporary security credentials returned by GetFederationToken +// are determined by a combination of the following: +// +// The policy or policies that are attached to the IAM user whose credentials +// are used to call GetFederationToken. The policy that is passed as a parameter +// in the call. The passed policy is attached to the temporary security credentials +// that result from the GetFederationToken API call--that is, to the federated +// user. When the federated user makes an AWS request, AWS evaluates the policy +// attached to the federated user in combination with the policy or policies +// attached to the IAM user whose credentials were used to call GetFederationToken. +// AWS allows the federated user's request only when both the federated user +// and the IAM user are explicitly allowed to perform the requested action. +// The passed policy cannot grant more permissions than those that are defined +// in the IAM user policy. +// +// A typical use case is that the permissions of the IAM user whose credentials +// are used to call GetFederationToken are designed to allow access to all the +// actions and resources that any federated user will need. Then, for individual +// users, you pass a policy to the operation that scopes down the permissions +// to a level that's appropriate to that individual user, using a policy that +// allows only a subset of permissions that are granted to the IAM user. +// +// If you do not pass a policy, the resulting temporary security credentials +// have no effective permissions. The only exception is when the temporary security +// credentials are used to access a resource that has a resource-based policy +// that specifically allows the federated user to access the resource. +// +// For more information about how permissions work, see Permissions for GetFederationToken +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). +// For information about using GetFederationToken to create temporary security +// credentials, see GetFederationToken—Federation Through a Custom Identity +// Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + err := req.Send() + return out, err +} + +const opGetSessionToken = "GetSessionToken" + +// GetSessionTokenRequest generates a request for the GetSessionToken operation. +func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { + op := &request.Operation{ + Name: opGetSessionToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSessionTokenInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSessionTokenOutput{} + req.Data = output + return +} + +// Returns a set of temporary credentials for an AWS account or IAM user. The +// credentials consist of an access key ID, a secret access key, and a security +// token. Typically, you use GetSessionToken if you want to use MFA to protect +// programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled +// IAM users would need to call GetSessionToken and submit an MFA code that +// is associated with their MFA device. Using the temporary security credentials +// that are returned from the call, IAM users can then make programmatic calls +// to APIs that require MFA authentication. If you do not supply a correct MFA +// code, then the API returns an access denied error. +// +// The GetSessionToken action must be called by using the long-term AWS security +// credentials of the AWS account or an IAM user. Credentials that are created +// by IAM users are valid for the duration that you specify, between 900 seconds +// (15 minutes) and 129600 seconds (36 hours); credentials that are created +// by using account credentials have a maximum duration of 3600 seconds (1 hour). +// +// We recommend that you do not call GetSessionToken with root account credentials. +// Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) +// by creating one or more IAM users, giving them the necessary permissions, +// and using IAM users for everyday interaction with AWS. +// +// The permissions associated with the temporary security credentials returned +// by GetSessionToken are based on the permissions associated with account or +// IAM user whose credentials are used to call the action. If GetSessionToken +// is called using root account credentials, the temporary credentials have +// root account permissions. Similarly, if GetSessionToken is called using the +// credentials of an IAM user, the temporary credentials have the same permissions +// as the IAM user. +// +// For more information about using GetSessionToken to create temporary credentials, +// go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// in the Using IAM. +func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + err := req.Send() + return out, err +} + +type AssumeRoleInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set + // to 3600 seconds. + DurationSeconds *int64 `min:"900" type:"integer"` + + // A unique identifier that is used by third parties when assuming roles in + // their customers' accounts. For each role that the third party can assume, + // they should instruct their customers to ensure the role's trust policy checks + // for the external ID that the third party generated. Each time the third party + // assumes the role, they should pass the customer's external ID. The external + // ID is useful in order to help third parties bind a role to the customer who + // created it. For more information about the external ID, see How to Use an + // External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // in the Using IAM. + ExternalId *string `min:"2" type:"string"` + + // An IAM policy in JSON format. + // + // This parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both (the intersection of) the access policy of the role that + // is being assumed, and the policy that you pass. This gives you a way to further + // restrict the permissions for the resulting temporary security credentials. + // You cannot use the passed policy to grant permissions that are in excess + // of those allowed by the access policy of the role that is being assumed. + // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, + // and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the Using IAM. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the role to assume. + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. + // + // Use the role session name to uniquely identify a session when the same role + // is assumed by different principals or for different reasons. In cross-account + // scenarios, the role session name is visible to, and can be logged by the + // account that owns the role. The role session name is also used in the ARN + // of the assumed role principal. This means that subsequent cross-account API + // requests using the temporary security credentials will expose the role session + // name to the external account in their CloudTrail logs. + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s AssumeRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleInput) GoString() string { + return s.String() +} + +// Contains the response to a successful AssumeRole request, including temporary +// AWS credentials that can be used to make AWS requests. +type AssumeRoleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + // As of this writing, the typical size is less than 4096 bytes, but that can + // vary. Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s AssumeRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleOutput) GoString() string { + return s.String() +} + +type AssumeRoleWithSAMLInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set + // to 3600 seconds. An expiration can also be specified in the SAML authentication + // response's SessionNotOnOrAfter value. The actual expiration time is whichever + // value is shorter. + // + // The maximum duration for a session is 1 hour, and the minimum duration is + // 15 minutes, even if values outside this range are specified. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format. + // + // The policy parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both the access policy of the role that is being assumed, + // and the policy that you pass. This gives you a way to further restrict the + // permissions for the resulting temporary security credentials. You cannot + // use the passed policy to grant permissions that are in excess of those allowed + // by the access policy of the role that is being assumed. For more information, + // Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the Using IAM. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes + // the IdP. + PrincipalArn *string `min:"20" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + RoleArn *string `min:"20" type:"string" required:"true"` + + // The base-64 encoded SAML authentication response provided by the IdP. + // + // For more information, see Configuring a Relying Party and Adding Claims + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the Using IAM guide. + SAMLAssertion *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLInput) GoString() string { + return s.String() +} + +// Contains the response to a successful AssumeRoleWithSAML request, including +// temporary AWS credentials that can be used to make AWS requests. +type AssumeRoleWithSAMLOutput struct { + _ struct{} `type:"structure"` + + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The value of the Recipient attribute of the SubjectConfirmationData element + // of the SAML assertion. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + // As of this writing, the typical size is less than 4096 bytes, but that can + // vary. Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // The value of the Issuer element of the SAML assertion. + Issuer *string `type:"string"` + + // A hash value based on the concatenation of the Issuer response value, the + // AWS account ID, and the friendly name (the last part of the ARN) of the SAML + // provider in IAM. The combination of NameQualifier and Subject can be used + // to uniquely identify a federated user. + // + // The following pseudocode shows how the hash value is calculated: + // + // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" + // ) ) + NameQualifier *string `type:"string"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The value of the NameID element in the Subject element of the SAML assertion. + Subject *string `type:"string"` + + // The format of the name ID, as defined by the Format attribute in the NameID + // element of the SAML assertion. Typical examples of the format are transient + // or persistent. + // + // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, + // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient + // is returned as transient. If the format includes any other prefix, the format + // is returned with no modifications. + SubjectType *string `type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLOutput) GoString() string { + return s.String() +} + +type AssumeRoleWithWebIdentityInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set + // to 3600 seconds. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format. + // + // The policy parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both the access policy of the role that is being assumed, + // and the policy that you pass. This gives you a way to further restrict the + // permissions for the resulting temporary security credentials. You cannot + // use the passed policy to grant permissions that are in excess of those allowed + // by the access policy of the role that is being assumed. For more information, + // see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the Using IAM. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The fully qualified host component of the domain name of the identity provider. + // + // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com + // and graph.facebook.com are the only supported identity providers for OAuth + // 2.0 access tokens. Do not include URL schemes and port numbers. + // + // Do not specify this value for OpenID Connect ID tokens. + ProviderId *string `min:"4" type:"string"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. Typically, you pass the name + // or identifier that is associated with the user who is using your application. + // That way, the temporary security credentials that your application will use + // are associated with that user. This session name is included as part of the + // ARN and assumed role ID in the AssumedRoleUser response element. + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The OAuth 2.0 access token or OpenID Connect ID token that is provided by + // the identity provider. Your application must get this token by authenticating + // the user who is using your application with a web identity provider before + // the application makes an AssumeRoleWithWebIdentity call. + WebIdentityToken *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityInput) GoString() string { + return s.String() +} + +// Contains the response to a successful AssumeRoleWithWebIdentity request, +// including temporary AWS credentials that can be used to make AWS requests. +type AssumeRoleWithWebIdentityOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The intended audience (also known as client ID) of the web identity token. + // This is traditionally the client identifier issued to the application that + // requested the web identity token. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security token. + // + // Note: The size of the security token that STS APIs return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + // As of this writing, the typical size is less than 4096 bytes, but that can + // vary. Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The issuing authority of the web identity token presented. For OpenID Connect + // ID Tokens this contains the value of the iss field. For OAuth 2.0 access + // tokens, this contains the value of the ProviderId parameter that was passed + // in the AssumeRoleWithWebIdentity request. + Provider *string `type:"string"` + + // The unique user identifier that is returned by the identity provider. This + // identifier is associated with the WebIdentityToken that was submitted with + // the AssumeRoleWithWebIdentity call. The identifier is typically unique to + // the user and the application that acquired the WebIdentityToken (pairwise + // identifier). For OpenID Connect ID tokens, this field contains the value + // returned by the identity provider as the token's sub (Subject) claim. + SubjectFromWebIdentityToken *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityOutput) GoString() string { + return s.String() +} + +// The identifiers for the temporary security credentials that the operation +// returns. +type AssumedRoleUser struct { + _ struct{} `type:"structure"` + + // The ARN of the temporary security credentials that are returned from the + // AssumeRole action. For more information about ARNs and how to use them in + // policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in Using IAM. + Arn *string `min:"20" type:"string" required:"true"` + + // A unique identifier that contains the role ID and the role session name of + // the role that is being assumed. The role ID is generated by AWS when the + // role is created. + AssumedRoleId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumedRoleUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumedRoleUser) GoString() string { + return s.String() +} + +// AWS credentials for API authentication. +type Credentials struct { + _ struct{} `type:"structure"` + + // The access key ID that identifies the temporary security credentials. + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The date on which the current credentials expire. + Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The secret access key that can be used to sign requests. + SecretAccessKey *string `type:"string" required:"true"` + + // The token that users must pass to the service API to use the temporary credentials. + SessionToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Credentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Credentials) GoString() string { + return s.String() +} + +type DecodeAuthorizationMessageInput struct { + _ struct{} `type:"structure"` + + // The encoded message that was returned with the response. + EncodedMessage *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageInput) GoString() string { + return s.String() +} + +// A document that contains additional information about the authorization status +// of a request from an encoded message that is returned in response to an AWS +// request. +type DecodeAuthorizationMessageOutput struct { + _ struct{} `type:"structure"` + + // An XML document that contains the decoded message. For more information, + // see DecodeAuthorizationMessage. + DecodedMessage *string `type:"string"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageOutput) GoString() string { + return s.String() +} + +// Identifiers for the federated user that is associated with the credentials. +type FederatedUser struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the federated user that is associated with the credentials. + // For more information about ARNs and how to use them in policies, see IAM + // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in Using IAM. + Arn *string `min:"20" type:"string" required:"true"` + + // The string that identifies the federated user associated with the credentials, + // similar to the unique ID of an IAM user. + FederatedUserId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s FederatedUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FederatedUser) GoString() string { + return s.String() +} + +type GetFederationTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the session should last. Acceptable durations + // for federation sessions range from 900 seconds (15 minutes) to 129600 seconds + // (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained + // using AWS account (root) credentials are restricted to a maximum of 3600 + // seconds (one hour). If the specified duration is longer than one hour, the + // session obtained by using AWS account (root) credentials defaults to one + // hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The name of the federated user. The name is used as an identifier for the + // temporary security credentials (such as Bob). For example, you can reference + // the federated user name in a resource-based policy, such as in an Amazon + // S3 bucket policy. + Name *string `min:"2" type:"string" required:"true"` + + // An IAM policy in JSON format that is passed with the GetFederationToken call + // and evaluated along with the policy or policies that are attached to the + // IAM user whose credentials are used to call GetFederationToken. The passed + // policy is used to scope down the permissions that are available to the IAM + // user, by allowing only a subset of the permissions that are granted to the + // IAM user. The passed policy cannot grant more permissions than those granted + // to the IAM user. The final permissions for the federated user are the most + // restrictive set based on the intersection of the passed policy and the IAM + // user policy. + // + // If you do not pass a policy, the resulting temporary security credentials + // have no effective permissions. The only exception is when the temporary security + // credentials are used to access a resource that has a resource-based policy + // that specifically allows the federated user to access the resource. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. For more information about how permissions work, see Permissions for + // GetFederationToken (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). + Policy *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetFederationTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetFederationToken request, including +// temporary AWS credentials that can be used to make AWS requests. +type GetFederationTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + // As of this writing, the typical size is less than 4096 bytes, but that can + // vary. Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // Identifiers for the federated user associated with the credentials (such + // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You + // can use the federated user's ARN in your resource-based policies, such as + // an Amazon S3 bucket policy. + FederatedUser *FederatedUser `type:"structure"` + + // A percentage value indicating the size of the policy in packed form. The + // service rejects policies for which the packed size is greater than 100 percent + // of the allowed value. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s GetFederationTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenOutput) GoString() string { + return s.String() +} + +type GetSessionTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the credentials should remain valid. Acceptable + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129600 + // seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions + // for AWS account owners are restricted to a maximum of 3600 seconds (one hour). + // If the duration is longer than one hour, the session for AWS account owners + // defaults to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The identification number of the MFA device that is associated with the IAM + // user who is making the GetSessionToken call. Specify this value if the IAM + // user has a policy that requires MFA authentication. The value is either the + // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource + // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // You can find the device for an IAM user by going to the AWS Management Console + // and viewing the user's security credentials. + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if MFA is required. If any policy requires + // the IAM user to submit an MFA code, specify this value. If MFA authentication + // is required, and the user does not provide a code when requesting a set of + // temporary security credentials, the user will receive an "access denied" + // response when requesting resources that require MFA authentication. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s GetSessionTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetSessionToken request, including +// temporary AWS credentials that can be used to make AWS requests. +type GetSessionTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + // As of this writing, the typical size is less than 4096 bytes, but that can + // vary. Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` +} + +// String returns the string representation +func (s GetSessionTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenOutput) GoString() string { + return s.String() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sts/customizations.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sts/customizations.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sts/customizations.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sts/customizations.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,12 @@ +package sts + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = func(r *request.Request) { + switch r.Operation.Name { + case opAssumeRoleWithSAML, opAssumeRoleWithWebIdentity: + r.Handlers.Sign.Clear() // these operations are unsigned + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sts/customizations_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sts/customizations_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sts/customizations_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sts/customizations_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,39 @@ +package sts_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/awstesting/unit" + "github.com/aws/aws-sdk-go/service/sts" +) + +var svc = sts.New(unit.Session, &aws.Config{ + Region: aws.String("mock-region"), +}) + +func TestUnsignedRequest_AssumeRoleWithSAML(t *testing.T) { + req, _ := svc.AssumeRoleWithSAMLRequest(&sts.AssumeRoleWithSAMLInput{ + PrincipalArn: aws.String("ARN01234567890123456789"), + RoleArn: aws.String("ARN01234567890123456789"), + SAMLAssertion: aws.String("ASSERT"), + }) + + err := req.Sign() + assert.NoError(t, err) + assert.Equal(t, "", req.HTTPRequest.Header.Get("Authorization")) +} + +func TestUnsignedRequest_AssumeRoleWithWebIdentity(t *testing.T) { + req, _ := svc.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ + RoleArn: aws.String("ARN01234567890123456789"), + RoleSessionName: aws.String("SESSION"), + WebIdentityToken: aws.String("TOKEN"), + }) + + err := req.Sign() + assert.NoError(t, err) + assert.Equal(t, "", req.HTTPRequest.Header.Get("Authorization")) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sts/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sts/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sts/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sts/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,149 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package sts_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/sts" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleSTS_AssumeRole() { + svc := sts.New(session.New()) + + params := &sts.AssumeRoleInput{ + RoleArn: aws.String("arnType"), // Required + RoleSessionName: aws.String("roleSessionNameType"), // Required + DurationSeconds: aws.Int64(1), + ExternalId: aws.String("externalIdType"), + Policy: aws.String("sessionPolicyDocumentType"), + SerialNumber: aws.String("serialNumberType"), + TokenCode: aws.String("tokenCodeType"), + } + resp, err := svc.AssumeRole(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSTS_AssumeRoleWithSAML() { + svc := sts.New(session.New()) + + params := &sts.AssumeRoleWithSAMLInput{ + PrincipalArn: aws.String("arnType"), // Required + RoleArn: aws.String("arnType"), // Required + SAMLAssertion: aws.String("SAMLAssertionType"), // Required + DurationSeconds: aws.Int64(1), + Policy: aws.String("sessionPolicyDocumentType"), + } + resp, err := svc.AssumeRoleWithSAML(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSTS_AssumeRoleWithWebIdentity() { + svc := sts.New(session.New()) + + params := &sts.AssumeRoleWithWebIdentityInput{ + RoleArn: aws.String("arnType"), // Required + RoleSessionName: aws.String("roleSessionNameType"), // Required + WebIdentityToken: aws.String("clientTokenType"), // Required + DurationSeconds: aws.Int64(1), + Policy: aws.String("sessionPolicyDocumentType"), + ProviderId: aws.String("urlType"), + } + resp, err := svc.AssumeRoleWithWebIdentity(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSTS_DecodeAuthorizationMessage() { + svc := sts.New(session.New()) + + params := &sts.DecodeAuthorizationMessageInput{ + EncodedMessage: aws.String("encodedMessageType"), // Required + } + resp, err := svc.DecodeAuthorizationMessage(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSTS_GetFederationToken() { + svc := sts.New(session.New()) + + params := &sts.GetFederationTokenInput{ + Name: aws.String("userNameType"), // Required + DurationSeconds: aws.Int64(1), + Policy: aws.String("sessionPolicyDocumentType"), + } + resp, err := svc.GetFederationToken(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSTS_GetSessionToken() { + svc := sts.New(session.New()) + + params := &sts.GetSessionTokenInput{ + DurationSeconds: aws.Int64(1), + SerialNumber: aws.String("serialNumberType"), + TokenCode: aws.String("tokenCodeType"), + } + resp, err := svc.GetSessionToken(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sts/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sts/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sts/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sts/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,130 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package sts + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// The AWS Security Token Service (STS) is a web service that enables you to +// request temporary, limited-privilege credentials for AWS Identity and Access +// Management (IAM) users or for users that you authenticate (federated users). +// This guide provides descriptions of the STS API. For more detailed information +// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// +// As an alternative to using the API, you can use one of the AWS SDKs, which +// consist of libraries and sample code for various programming languages and +// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient +// way to create programmatic access to STS. For example, the SDKs take care +// of cryptographically signing requests, managing errors, and retrying requests +// automatically. For information about the AWS SDKs, including how to download +// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/). +// For information about setting up signatures and authorization through the +// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html" +// target="_blank) in the AWS General Reference. For general information about +// the Query API, go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html" +// target="_blank) in Using IAM. For information about using security tokens +// with other AWS products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) +// in the Using IAM. +// +// If you're new to AWS and need additional technical information about a specific +// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/ +// (http://aws.amazon.com/documentation/" target="_blank). +// +// Endpoints +// +// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com +// that maps to the US East (N. Virginia) region. Additional regions are available, +// but must first be activated in the AWS Management Console before you can +// use a different region's endpoint. For more information about activating +// a region for STS see Activating STS in a New Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the Using IAM. +// +// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region) +// in the AWS General Reference. +// +// Recording API requests +// +// STS supports AWS CloudTrail, which is a service that records AWS calls for +// your AWS account and delivers log files to an Amazon S3 bucket. By using +// information collected by CloudTrail, you can determine what requests were +// successfully made to STS, who made the request, when it was made, and so +// on. To learn more about CloudTrail, including how to turn it on and find +// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type STS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "sts" + +// New creates a new instance of the STS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a STS client from just a session. +// svc := sts.New(mySession) +// +// // Create a STS client with additional configuration +// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *STS { + svc := &STS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2011-06-15", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a STS operation and runs any +// custom request initialization. +func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,38 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package stsiface provides an interface for the AWS Security Token Service. +package stsiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sts" +) + +// STSAPI is the interface type for sts.STS. +type STSAPI interface { + AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput) + + AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) + + AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput) + + AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error) + + AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput) + + AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error) + + DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput) + + DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error) + + GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput) + + GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error) + + GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput) + + GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error) +} + +var _ STSAPI = (*sts.STS)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/support/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/support/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/support/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/support/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1649 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package support provides a client for AWS Support. +package support + +import ( + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAddAttachmentsToSet = "AddAttachmentsToSet" + +// AddAttachmentsToSetRequest generates a request for the AddAttachmentsToSet operation. +func (c *Support) AddAttachmentsToSetRequest(input *AddAttachmentsToSetInput) (req *request.Request, output *AddAttachmentsToSetOutput) { + op := &request.Operation{ + Name: opAddAttachmentsToSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddAttachmentsToSetInput{} + } + + req = c.newRequest(op, input, output) + output = &AddAttachmentsToSetOutput{} + req.Data = output + return +} + +// Adds one or more attachments to an attachment set. If an AttachmentSetId +// is not specified, a new attachment set is created, and the ID of the set +// is returned in the response. If an AttachmentSetId is specified, the attachments +// are added to the specified set, if it exists. +// +// An attachment set is a temporary container for attachments that are to be +// added to a case or case communication. The set is available for one hour +// after it is created; the ExpiryTime returned in the response indicates when +// the set expires. The maximum number of attachments in a set is 3, and the +// maximum size of any attachment in the set is 5 MB. +func (c *Support) AddAttachmentsToSet(input *AddAttachmentsToSetInput) (*AddAttachmentsToSetOutput, error) { + req, out := c.AddAttachmentsToSetRequest(input) + err := req.Send() + return out, err +} + +const opAddCommunicationToCase = "AddCommunicationToCase" + +// AddCommunicationToCaseRequest generates a request for the AddCommunicationToCase operation. +func (c *Support) AddCommunicationToCaseRequest(input *AddCommunicationToCaseInput) (req *request.Request, output *AddCommunicationToCaseOutput) { + op := &request.Operation{ + Name: opAddCommunicationToCase, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddCommunicationToCaseInput{} + } + + req = c.newRequest(op, input, output) + output = &AddCommunicationToCaseOutput{} + req.Data = output + return +} + +// Adds additional customer communication to an AWS Support case. You use the +// CaseId value to identify the case to add communication to. You can list a +// set of email addresses to copy on the communication using the CcEmailAddresses +// value. The CommunicationBody value contains the text of the communication. +// +// The response indicates the success or failure of the request. +// +// This operation implements a subset of the features of the AWS Support Center. +func (c *Support) AddCommunicationToCase(input *AddCommunicationToCaseInput) (*AddCommunicationToCaseOutput, error) { + req, out := c.AddCommunicationToCaseRequest(input) + err := req.Send() + return out, err +} + +const opCreateCase = "CreateCase" + +// CreateCaseRequest generates a request for the CreateCase operation. +func (c *Support) CreateCaseRequest(input *CreateCaseInput) (req *request.Request, output *CreateCaseOutput) { + op := &request.Operation{ + Name: opCreateCase, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCaseInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateCaseOutput{} + req.Data = output + return +} + +// Creates a new case in the AWS Support Center. This operation is modeled on +// the behavior of the AWS Support Center Create Case (https://console.aws.amazon.com/support/home#/case/create) +// page. Its parameters require you to specify the following information: +// +// IssueType. The type of issue for the case. You can specify either "customer-service" +// or "technical." If you do not indicate a value, the default is "technical." +// ServiceCode. The code for an AWS service. You obtain the ServiceCode by +// calling DescribeServices. CategoryCode. The category for the service defined +// for the ServiceCode value. You also obtain the category code for a service +// by calling DescribeServices. Each AWS service defines its own set of category +// codes. SeverityCode. A value that indicates the urgency of the case, which +// in turn determines the response time according to your service level agreement +// with AWS Support. You obtain the SeverityCode by calling DescribeSeverityLevels. +// Subject. The Subject field on the AWS Support Center Create Case (https://console.aws.amazon.com/support/home#/case/create) +// page. CommunicationBody. The Description field on the AWS Support Center +// Create Case (https://console.aws.amazon.com/support/home#/case/create) page. +// AttachmentSetId. The ID of a set of attachments that has been created by +// using AddAttachmentsToSet. Language. The human language in which AWS Support +// handles the case. English and Japanese are currently supported. CcEmailAddresses. +// The AWS Support Center CC field on the Create Case (https://console.aws.amazon.com/support/home#/case/create) +// page. You can list email addresses to be copied on any correspondence about +// the case. The account that opens the case is already identified by passing +// the AWS Credentials in the HTTP POST method or in a method or function call +// from one of the programming languages supported by an AWS SDK (http://aws.amazon.com/tools/). +// To add additional communication or attachments to an existing case, use +// AddCommunicationToCase. +// +// A successful CreateCase request returns an AWS Support case number. Case +// numbers are used by the DescribeCases operation to retrieve existing AWS +// Support cases. +func (c *Support) CreateCase(input *CreateCaseInput) (*CreateCaseOutput, error) { + req, out := c.CreateCaseRequest(input) + err := req.Send() + return out, err +} + +const opDescribeAttachment = "DescribeAttachment" + +// DescribeAttachmentRequest generates a request for the DescribeAttachment operation. +func (c *Support) DescribeAttachmentRequest(input *DescribeAttachmentInput) (req *request.Request, output *DescribeAttachmentOutput) { + op := &request.Operation{ + Name: opDescribeAttachment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAttachmentInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeAttachmentOutput{} + req.Data = output + return +} + +// Returns the attachment that has the specified ID. Attachment IDs are generated +// by the case management system when you add an attachment to a case or case +// communication. Attachment IDs are returned in the AttachmentDetails objects +// that are returned by the DescribeCommunications operation. +func (c *Support) DescribeAttachment(input *DescribeAttachmentInput) (*DescribeAttachmentOutput, error) { + req, out := c.DescribeAttachmentRequest(input) + err := req.Send() + return out, err +} + +const opDescribeCases = "DescribeCases" + +// DescribeCasesRequest generates a request for the DescribeCases operation. +func (c *Support) DescribeCasesRequest(input *DescribeCasesInput) (req *request.Request, output *DescribeCasesOutput) { + op := &request.Operation{ + Name: opDescribeCases, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCasesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCasesOutput{} + req.Data = output + return +} + +// Returns a list of cases that you specify by passing one or more case IDs. +// In addition, you can filter the cases by date by setting values for the AfterTime +// and BeforeTime request parameters. You can set values for the IncludeResolvedCases +// and IncludeCommunications request parameters to control how much information +// is returned. +// +// Case data is available for 12 months after creation. If a case was created +// more than 12 months ago, a request for data might cause an error. +// +// The response returns the following in JSON format: +// +// One or more CaseDetails data types. One or more NextToken values, which +// specify where to paginate the returned records represented by the CaseDetails +// objects. +func (c *Support) DescribeCases(input *DescribeCasesInput) (*DescribeCasesOutput, error) { + req, out := c.DescribeCasesRequest(input) + err := req.Send() + return out, err +} + +func (c *Support) DescribeCasesPages(input *DescribeCasesInput, fn func(p *DescribeCasesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeCasesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeCasesOutput), lastPage) + }) +} + +const opDescribeCommunications = "DescribeCommunications" + +// DescribeCommunicationsRequest generates a request for the DescribeCommunications operation. +func (c *Support) DescribeCommunicationsRequest(input *DescribeCommunicationsInput) (req *request.Request, output *DescribeCommunicationsOutput) { + op := &request.Operation{ + Name: opDescribeCommunications, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCommunicationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeCommunicationsOutput{} + req.Data = output + return +} + +// Returns communications (and attachments) for one or more support cases. You +// can use the AfterTime and BeforeTime parameters to filter by date. You can +// use the CaseId parameter to restrict the results to a particular case. +// +// Case data is available for 12 months after creation. If a case was created +// more than 12 months ago, a request for data might cause an error. +// +// You can use the MaxResults and NextToken parameters to control the pagination +// of the result set. Set MaxResults to the number of cases you want displayed +// on each page, and use NextToken to specify the resumption of pagination. +func (c *Support) DescribeCommunications(input *DescribeCommunicationsInput) (*DescribeCommunicationsOutput, error) { + req, out := c.DescribeCommunicationsRequest(input) + err := req.Send() + return out, err +} + +func (c *Support) DescribeCommunicationsPages(input *DescribeCommunicationsInput, fn func(p *DescribeCommunicationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeCommunicationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeCommunicationsOutput), lastPage) + }) +} + +const opDescribeServices = "DescribeServices" + +// DescribeServicesRequest generates a request for the DescribeServices operation. +func (c *Support) DescribeServicesRequest(input *DescribeServicesInput) (req *request.Request, output *DescribeServicesOutput) { + op := &request.Operation{ + Name: opDescribeServices, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeServicesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeServicesOutput{} + req.Data = output + return +} + +// Returns the current list of AWS services and a list of service categories +// that applies to each one. You then use service names and categories in your +// CreateCase requests. Each AWS service has its own set of categories. +// +// The service codes and category codes correspond to the values that are displayed +// in the Service and Category drop-down lists on the AWS Support Center Create +// Case (https://console.aws.amazon.com/support/home#/case/create) page. The +// values in those fields, however, do not necessarily match the service codes +// and categories returned by the DescribeServices request. Always use the service +// codes and categories obtained programmatically. This practice ensures that +// you always have the most recent set of service and category codes. +func (c *Support) DescribeServices(input *DescribeServicesInput) (*DescribeServicesOutput, error) { + req, out := c.DescribeServicesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeSeverityLevels = "DescribeSeverityLevels" + +// DescribeSeverityLevelsRequest generates a request for the DescribeSeverityLevels operation. +func (c *Support) DescribeSeverityLevelsRequest(input *DescribeSeverityLevelsInput) (req *request.Request, output *DescribeSeverityLevelsOutput) { + op := &request.Operation{ + Name: opDescribeSeverityLevels, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSeverityLevelsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSeverityLevelsOutput{} + req.Data = output + return +} + +// Returns the list of severity levels that you can assign to an AWS Support +// case. The severity level for a case is also a field in the CaseDetails data +// type included in any CreateCase request. +func (c *Support) DescribeSeverityLevels(input *DescribeSeverityLevelsInput) (*DescribeSeverityLevelsOutput, error) { + req, out := c.DescribeSeverityLevelsRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTrustedAdvisorCheckRefreshStatuses = "DescribeTrustedAdvisorCheckRefreshStatuses" + +// DescribeTrustedAdvisorCheckRefreshStatusesRequest generates a request for the DescribeTrustedAdvisorCheckRefreshStatuses operation. +func (c *Support) DescribeTrustedAdvisorCheckRefreshStatusesRequest(input *DescribeTrustedAdvisorCheckRefreshStatusesInput) (req *request.Request, output *DescribeTrustedAdvisorCheckRefreshStatusesOutput) { + op := &request.Operation{ + Name: opDescribeTrustedAdvisorCheckRefreshStatuses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTrustedAdvisorCheckRefreshStatusesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTrustedAdvisorCheckRefreshStatusesOutput{} + req.Data = output + return +} + +// Returns the refresh status of the Trusted Advisor checks that have the specified +// check IDs. Check IDs can be obtained by calling DescribeTrustedAdvisorChecks. +func (c *Support) DescribeTrustedAdvisorCheckRefreshStatuses(input *DescribeTrustedAdvisorCheckRefreshStatusesInput) (*DescribeTrustedAdvisorCheckRefreshStatusesOutput, error) { + req, out := c.DescribeTrustedAdvisorCheckRefreshStatusesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTrustedAdvisorCheckResult = "DescribeTrustedAdvisorCheckResult" + +// DescribeTrustedAdvisorCheckResultRequest generates a request for the DescribeTrustedAdvisorCheckResult operation. +func (c *Support) DescribeTrustedAdvisorCheckResultRequest(input *DescribeTrustedAdvisorCheckResultInput) (req *request.Request, output *DescribeTrustedAdvisorCheckResultOutput) { + op := &request.Operation{ + Name: opDescribeTrustedAdvisorCheckResult, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTrustedAdvisorCheckResultInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTrustedAdvisorCheckResultOutput{} + req.Data = output + return +} + +// Returns the results of the Trusted Advisor check that has the specified check +// ID. Check IDs can be obtained by calling DescribeTrustedAdvisorChecks. +// +// The response contains a TrustedAdvisorCheckResult object, which contains +// these three objects: +// +// TrustedAdvisorCategorySpecificSummary TrustedAdvisorResourceDetail TrustedAdvisorResourcesSummary +// In addition, the response contains these fields: +// +// Status. The alert status of the check: "ok" (green), "warning" (yellow), +// "error" (red), or "not_available". Timestamp. The time of the last refresh +// of the check. CheckId. The unique identifier for the check. +func (c *Support) DescribeTrustedAdvisorCheckResult(input *DescribeTrustedAdvisorCheckResultInput) (*DescribeTrustedAdvisorCheckResultOutput, error) { + req, out := c.DescribeTrustedAdvisorCheckResultRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTrustedAdvisorCheckSummaries = "DescribeTrustedAdvisorCheckSummaries" + +// DescribeTrustedAdvisorCheckSummariesRequest generates a request for the DescribeTrustedAdvisorCheckSummaries operation. +func (c *Support) DescribeTrustedAdvisorCheckSummariesRequest(input *DescribeTrustedAdvisorCheckSummariesInput) (req *request.Request, output *DescribeTrustedAdvisorCheckSummariesOutput) { + op := &request.Operation{ + Name: opDescribeTrustedAdvisorCheckSummaries, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTrustedAdvisorCheckSummariesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTrustedAdvisorCheckSummariesOutput{} + req.Data = output + return +} + +// Returns the summaries of the results of the Trusted Advisor checks that have +// the specified check IDs. Check IDs can be obtained by calling DescribeTrustedAdvisorChecks. +// +// The response contains an array of TrustedAdvisorCheckSummary objects. +func (c *Support) DescribeTrustedAdvisorCheckSummaries(input *DescribeTrustedAdvisorCheckSummariesInput) (*DescribeTrustedAdvisorCheckSummariesOutput, error) { + req, out := c.DescribeTrustedAdvisorCheckSummariesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeTrustedAdvisorChecks = "DescribeTrustedAdvisorChecks" + +// DescribeTrustedAdvisorChecksRequest generates a request for the DescribeTrustedAdvisorChecks operation. +func (c *Support) DescribeTrustedAdvisorChecksRequest(input *DescribeTrustedAdvisorChecksInput) (req *request.Request, output *DescribeTrustedAdvisorChecksOutput) { + op := &request.Operation{ + Name: opDescribeTrustedAdvisorChecks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTrustedAdvisorChecksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeTrustedAdvisorChecksOutput{} + req.Data = output + return +} + +// Returns information about all available Trusted Advisor checks, including +// name, ID, category, description, and metadata. You must specify a language +// code; English ("en") and Japanese ("ja") are currently supported. The response +// contains a TrustedAdvisorCheckDescription for each check. +func (c *Support) DescribeTrustedAdvisorChecks(input *DescribeTrustedAdvisorChecksInput) (*DescribeTrustedAdvisorChecksOutput, error) { + req, out := c.DescribeTrustedAdvisorChecksRequest(input) + err := req.Send() + return out, err +} + +const opRefreshTrustedAdvisorCheck = "RefreshTrustedAdvisorCheck" + +// RefreshTrustedAdvisorCheckRequest generates a request for the RefreshTrustedAdvisorCheck operation. +func (c *Support) RefreshTrustedAdvisorCheckRequest(input *RefreshTrustedAdvisorCheckInput) (req *request.Request, output *RefreshTrustedAdvisorCheckOutput) { + op := &request.Operation{ + Name: opRefreshTrustedAdvisorCheck, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RefreshTrustedAdvisorCheckInput{} + } + + req = c.newRequest(op, input, output) + output = &RefreshTrustedAdvisorCheckOutput{} + req.Data = output + return +} + +// Requests a refresh of the Trusted Advisor check that has the specified check +// ID. Check IDs can be obtained by calling DescribeTrustedAdvisorChecks. +// +// The response contains a TrustedAdvisorCheckRefreshStatus object, which contains +// these fields: +// +// Status. The refresh status of the check: "none", "enqueued", "processing", +// "success", or "abandoned". MillisUntilNextRefreshable. The amount of time, +// in milliseconds, until the check is eligible for refresh. CheckId. The unique +// identifier for the check. +func (c *Support) RefreshTrustedAdvisorCheck(input *RefreshTrustedAdvisorCheckInput) (*RefreshTrustedAdvisorCheckOutput, error) { + req, out := c.RefreshTrustedAdvisorCheckRequest(input) + err := req.Send() + return out, err +} + +const opResolveCase = "ResolveCase" + +// ResolveCaseRequest generates a request for the ResolveCase operation. +func (c *Support) ResolveCaseRequest(input *ResolveCaseInput) (req *request.Request, output *ResolveCaseOutput) { + op := &request.Operation{ + Name: opResolveCase, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResolveCaseInput{} + } + + req = c.newRequest(op, input, output) + output = &ResolveCaseOutput{} + req.Data = output + return +} + +// Takes a CaseId and returns the initial state of the case along with the state +// of the case after the call to ResolveCase completed. +func (c *Support) ResolveCase(input *ResolveCaseInput) (*ResolveCaseOutput, error) { + req, out := c.ResolveCaseRequest(input) + err := req.Send() + return out, err +} + +type AddAttachmentsToSetInput struct { + _ struct{} `type:"structure"` + + // The ID of the attachment set. If an AttachmentSetId is not specified, a new + // attachment set is created, and the ID of the set is returned in the response. + // If an AttachmentSetId is specified, the attachments are added to the specified + // set, if it exists. + AttachmentSetId *string `locationName:"attachmentSetId" type:"string"` + + // One or more attachments to add to the set. The limit is 3 attachments per + // set, and the size limit is 5 MB per attachment. + Attachments []*Attachment `locationName:"attachments" type:"list" required:"true"` +} + +// String returns the string representation +func (s AddAttachmentsToSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddAttachmentsToSetInput) GoString() string { + return s.String() +} + +// The ID and expiry time of the attachment set returned by the AddAttachmentsToSet +// operation. +type AddAttachmentsToSetOutput struct { + _ struct{} `type:"structure"` + + // The ID of the attachment set. If an AttachmentSetId was not specified, a + // new attachment set is created, and the ID of the set is returned in the response. + // If an AttachmentSetId was specified, the attachments are added to the specified + // set, if it exists. + AttachmentSetId *string `locationName:"attachmentSetId" type:"string"` + + // The time and date when the attachment set expires. + ExpiryTime *string `locationName:"expiryTime" type:"string"` +} + +// String returns the string representation +func (s AddAttachmentsToSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddAttachmentsToSetOutput) GoString() string { + return s.String() +} + +// To be written. +type AddCommunicationToCaseInput struct { + _ struct{} `type:"structure"` + + // The ID of a set of one or more attachments for the communication to add to + // the case. Create the set by calling AddAttachmentsToSet + AttachmentSetId *string `locationName:"attachmentSetId" type:"string"` + + // The AWS Support case ID requested or returned in the call. The case ID is + // an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47 + CaseId *string `locationName:"caseId" type:"string"` + + // The email addresses in the CC line of an email to be added to the support + // case. + CcEmailAddresses []*string `locationName:"ccEmailAddresses" type:"list"` + + // The body of an email communication to add to the support case. + CommunicationBody *string `locationName:"communicationBody" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddCommunicationToCaseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddCommunicationToCaseInput) GoString() string { + return s.String() +} + +// The result of the AddCommunicationToCase operation. +type AddCommunicationToCaseOutput struct { + _ struct{} `type:"structure"` + + // True if AddCommunicationToCase succeeds. Otherwise, returns an error. + Result *bool `locationName:"result" type:"boolean"` +} + +// String returns the string representation +func (s AddCommunicationToCaseOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddCommunicationToCaseOutput) GoString() string { + return s.String() +} + +// An attachment to a case communication. The attachment consists of the file +// name and the content of the file. +type Attachment struct { + _ struct{} `type:"structure"` + + // The content of the attachment file. + Data []byte `locationName:"data" type:"blob"` + + // The name of the attachment file. + FileName *string `locationName:"fileName" type:"string"` +} + +// String returns the string representation +func (s Attachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Attachment) GoString() string { + return s.String() +} + +// The file name and ID of an attachment to a case communication. You can use +// the ID to retrieve the attachment with the DescribeAttachment operation. +type AttachmentDetails struct { + _ struct{} `type:"structure"` + + // The ID of the attachment. + AttachmentId *string `locationName:"attachmentId" type:"string"` + + // The file name of the attachment. + FileName *string `locationName:"fileName" type:"string"` +} + +// String returns the string representation +func (s AttachmentDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachmentDetails) GoString() string { + return s.String() +} + +// A JSON-formatted object that contains the metadata for a support case. It +// is contained the response from a DescribeCases request. CaseDetails contains +// the following fields: +// +// CaseID. The AWS Support case ID requested or returned in the call. The +// case ID is an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47. +// CategoryCode. The category of problem for the AWS Support case. Corresponds +// to the CategoryCode values returned by a call to DescribeServices. DisplayId. +// The identifier for the case on pages in the AWS Support Center. Language. +// The ISO 639-1 code for the language in which AWS provides support. AWS Support +// currently supports English ("en") and Japanese ("ja"). Language parameters +// must be passed explicitly for operations that take them. RecentCommunications. +// One or more Communication objects. Fields of these objects are Attachments, +// Body, CaseId, SubmittedBy, and TimeCreated. NextToken. A resumption point +// for pagination. ServiceCode. The identifier for the AWS service that corresponds +// to the service code defined in the call to DescribeServices. SeverityCode. +// The severity code assigned to the case. Contains one of the values returned +// by the call to DescribeSeverityLevels. Status. The status of the case in +// the AWS Support Center. Subject. The subject line of the case. SubmittedBy. +// The email address of the account that submitted the case. TimeCreated. The +// time the case was created, in ISO-8601 format. +type CaseDetails struct { + _ struct{} `type:"structure"` + + // The AWS Support case ID requested or returned in the call. The case ID is + // an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47 + CaseId *string `locationName:"caseId" type:"string"` + + // The category of problem for the AWS Support case. + CategoryCode *string `locationName:"categoryCode" type:"string"` + + // The email addresses that receive copies of communication about the case. + CcEmailAddresses []*string `locationName:"ccEmailAddresses" type:"list"` + + // The ID displayed for the case in the AWS Support Center. This is a numeric + // string. + DisplayId *string `locationName:"displayId" type:"string"` + + // The ISO 639-1 code for the language in which AWS provides support. AWS Support + // currently supports English ("en") and Japanese ("ja"). Language parameters + // must be passed explicitly for operations that take them. + Language *string `locationName:"language" type:"string"` + + // The five most recent communications between you and AWS Support Center, including + // the IDs of any attachments to the communications. Also includes a nextToken + // that you can use to retrieve earlier communications. + RecentCommunications *RecentCaseCommunications `locationName:"recentCommunications" type:"structure"` + + // The code for the AWS service returned by the call to DescribeServices. + ServiceCode *string `locationName:"serviceCode" type:"string"` + + // The code for the severity level returned by the call to DescribeSeverityLevels. + SeverityCode *string `locationName:"severityCode" type:"string"` + + // The status of the case. + Status *string `locationName:"status" type:"string"` + + // The subject line for the case in the AWS Support Center. + Subject *string `locationName:"subject" type:"string"` + + // The email address of the account that submitted the case. + SubmittedBy *string `locationName:"submittedBy" type:"string"` + + // The time that the case was case created in the AWS Support Center. + TimeCreated *string `locationName:"timeCreated" type:"string"` +} + +// String returns the string representation +func (s CaseDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CaseDetails) GoString() string { + return s.String() +} + +// A JSON-formatted name/value pair that represents the category name and category +// code of the problem, selected from the DescribeServices response for each +// AWS service. +type Category struct { + _ struct{} `type:"structure"` + + // The category code for the support case. + Code *string `locationName:"code" type:"string"` + + // The category name for the support case. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s Category) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Category) GoString() string { + return s.String() +} + +// A communication associated with an AWS Support case. The communication consists +// of the case ID, the message body, attachment information, the account email +// address, and the date and time of the communication. +type Communication struct { + _ struct{} `type:"structure"` + + // Information about the attachments to the case communication. + AttachmentSet []*AttachmentDetails `locationName:"attachmentSet" type:"list"` + + // The text of the communication between the customer and AWS Support. + Body *string `locationName:"body" type:"string"` + + // The AWS Support case ID requested or returned in the call. The case ID is + // an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47 + CaseId *string `locationName:"caseId" type:"string"` + + // The email address of the account that submitted the AWS Support case. + SubmittedBy *string `locationName:"submittedBy" type:"string"` + + // The time the communication was created. + TimeCreated *string `locationName:"timeCreated" type:"string"` +} + +// String returns the string representation +func (s Communication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Communication) GoString() string { + return s.String() +} + +type CreateCaseInput struct { + _ struct{} `type:"structure"` + + // The ID of a set of one or more attachments for the case. Create the set by + // using AddAttachmentsToSet. + AttachmentSetId *string `locationName:"attachmentSetId" type:"string"` + + // The category of problem for the AWS Support case. + CategoryCode *string `locationName:"categoryCode" type:"string"` + + // A list of email addresses that AWS Support copies on case correspondence. + CcEmailAddresses []*string `locationName:"ccEmailAddresses" type:"list"` + + // The communication body text when you create an AWS Support case by calling + // CreateCase. + CommunicationBody *string `locationName:"communicationBody" type:"string" required:"true"` + + // The type of issue for the case. You can specify either "customer-service" + // or "technical." If you do not indicate a value, the default is "technical." + IssueType *string `locationName:"issueType" type:"string"` + + // The ISO 639-1 code for the language in which AWS provides support. AWS Support + // currently supports English ("en") and Japanese ("ja"). Language parameters + // must be passed explicitly for operations that take them. + Language *string `locationName:"language" type:"string"` + + // The code for the AWS service returned by the call to DescribeServices. + ServiceCode *string `locationName:"serviceCode" type:"string"` + + // The code for the severity level returned by the call to DescribeSeverityLevels. + // + // The availability of severity levels depends on each customer's support subscription. + // In other words, your subscription may not necessarily require the urgent + // level of response time. + SeverityCode *string `locationName:"severityCode" type:"string"` + + // The title of the AWS Support case. + Subject *string `locationName:"subject" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateCaseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCaseInput) GoString() string { + return s.String() +} + +// The AWS Support case ID returned by a successful completion of the CreateCase +// operation. +type CreateCaseOutput struct { + _ struct{} `type:"structure"` + + // The AWS Support case ID requested or returned in the call. The case ID is + // an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47 + CaseId *string `locationName:"caseId" type:"string"` +} + +// String returns the string representation +func (s CreateCaseOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCaseOutput) GoString() string { + return s.String() +} + +type DescribeAttachmentInput struct { + _ struct{} `type:"structure"` + + // The ID of the attachment to return. Attachment IDs are returned by the DescribeCommunications + // operation. + AttachmentId *string `locationName:"attachmentId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeAttachmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAttachmentInput) GoString() string { + return s.String() +} + +// The content and file name of the attachment returned by the DescribeAttachment +// operation. +type DescribeAttachmentOutput struct { + _ struct{} `type:"structure"` + + // The attachment content and file name. + Attachment *Attachment `locationName:"attachment" type:"structure"` +} + +// String returns the string representation +func (s DescribeAttachmentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAttachmentOutput) GoString() string { + return s.String() +} + +type DescribeCasesInput struct { + _ struct{} `type:"structure"` + + // The start date for a filtered date search on support case communications. + // Case communications are available for 12 months after creation. + AfterTime *string `locationName:"afterTime" type:"string"` + + // The end date for a filtered date search on support case communications. Case + // communications are available for 12 months after creation. + BeforeTime *string `locationName:"beforeTime" type:"string"` + + // A list of ID numbers of the support cases you want returned. The maximum + // number of cases is 100. + CaseIdList []*string `locationName:"caseIdList" type:"list"` + + // The ID displayed for a case in the AWS Support Center user interface. + DisplayId *string `locationName:"displayId" type:"string"` + + // Specifies whether communications should be included in the DescribeCases + // results. The default is true. + IncludeCommunications *bool `locationName:"includeCommunications" type:"boolean"` + + // Specifies whether resolved support cases should be included in the DescribeCases + // results. The default is false. + IncludeResolvedCases *bool `locationName:"includeResolvedCases" type:"boolean"` + + // The ISO 639-1 code for the language in which AWS provides support. AWS Support + // currently supports English ("en") and Japanese ("ja"). Language parameters + // must be passed explicitly for operations that take them. + Language *string `locationName:"language" type:"string"` + + // The maximum number of results to return before paginating. + MaxResults *int64 `locationName:"maxResults" min:"10" type:"integer"` + + // A resumption point for pagination. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeCasesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCasesInput) GoString() string { + return s.String() +} + +// Returns an array of CaseDetails objects and a NextToken that defines a point +// for pagination in the result set. +type DescribeCasesOutput struct { + _ struct{} `type:"structure"` + + // The details for the cases that match the request. + Cases []*CaseDetails `locationName:"cases" type:"list"` + + // A resumption point for pagination. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeCasesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCasesOutput) GoString() string { + return s.String() +} + +type DescribeCommunicationsInput struct { + _ struct{} `type:"structure"` + + // The start date for a filtered date search on support case communications. + // Case communications are available for 12 months after creation. + AfterTime *string `locationName:"afterTime" type:"string"` + + // The end date for a filtered date search on support case communications. Case + // communications are available for 12 months after creation. + BeforeTime *string `locationName:"beforeTime" type:"string"` + + // The AWS Support case ID requested or returned in the call. The case ID is + // an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47 + CaseId *string `locationName:"caseId" type:"string" required:"true"` + + // The maximum number of results to return before paginating. + MaxResults *int64 `locationName:"maxResults" min:"10" type:"integer"` + + // A resumption point for pagination. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeCommunicationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCommunicationsInput) GoString() string { + return s.String() +} + +// The communications returned by the DescribeCommunications operation. +type DescribeCommunicationsOutput struct { + _ struct{} `type:"structure"` + + // The communications for the case. + Communications []*Communication `locationName:"communications" type:"list"` + + // A resumption point for pagination. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeCommunicationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCommunicationsOutput) GoString() string { + return s.String() +} + +type DescribeServicesInput struct { + _ struct{} `type:"structure"` + + // The ISO 639-1 code for the language in which AWS provides support. AWS Support + // currently supports English ("en") and Japanese ("ja"). Language parameters + // must be passed explicitly for operations that take them. + Language *string `locationName:"language" type:"string"` + + // A JSON-formatted list of service codes available for AWS services. + ServiceCodeList []*string `locationName:"serviceCodeList" type:"list"` +} + +// String returns the string representation +func (s DescribeServicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeServicesInput) GoString() string { + return s.String() +} + +// The list of AWS services returned by the DescribeServices operation. +type DescribeServicesOutput struct { + _ struct{} `type:"structure"` + + // A JSON-formatted list of AWS services. + Services []*Service `locationName:"services" type:"list"` +} + +// String returns the string representation +func (s DescribeServicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeServicesOutput) GoString() string { + return s.String() +} + +type DescribeSeverityLevelsInput struct { + _ struct{} `type:"structure"` + + // The ISO 639-1 code for the language in which AWS provides support. AWS Support + // currently supports English ("en") and Japanese ("ja"). Language parameters + // must be passed explicitly for operations that take them. + Language *string `locationName:"language" type:"string"` +} + +// String returns the string representation +func (s DescribeSeverityLevelsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSeverityLevelsInput) GoString() string { + return s.String() +} + +// The list of severity levels returned by the DescribeSeverityLevels operation. +type DescribeSeverityLevelsOutput struct { + _ struct{} `type:"structure"` + + // The available severity levels for the support case. Available severity levels + // are defined by your service level agreement with AWS. + SeverityLevels []*SeverityLevel `locationName:"severityLevels" type:"list"` +} + +// String returns the string representation +func (s DescribeSeverityLevelsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSeverityLevelsOutput) GoString() string { + return s.String() +} + +type DescribeTrustedAdvisorCheckRefreshStatusesInput struct { + _ struct{} `type:"structure"` + + // The IDs of the Trusted Advisor checks. + CheckIds []*string `locationName:"checkIds" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeTrustedAdvisorCheckRefreshStatusesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrustedAdvisorCheckRefreshStatusesInput) GoString() string { + return s.String() +} + +// The statuses of the Trusted Advisor checks returned by the DescribeTrustedAdvisorCheckRefreshStatuses +// operation. +type DescribeTrustedAdvisorCheckRefreshStatusesOutput struct { + _ struct{} `type:"structure"` + + // The refresh status of the specified Trusted Advisor checks. + Statuses []*TrustedAdvisorCheckRefreshStatus `locationName:"statuses" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeTrustedAdvisorCheckRefreshStatusesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrustedAdvisorCheckRefreshStatusesOutput) GoString() string { + return s.String() +} + +type DescribeTrustedAdvisorCheckResultInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the Trusted Advisor check. + CheckId *string `locationName:"checkId" type:"string" required:"true"` + + // The ISO 639-1 code for the language in which AWS provides support. AWS Support + // currently supports English ("en") and Japanese ("ja"). Language parameters + // must be passed explicitly for operations that take them. + Language *string `locationName:"language" type:"string"` +} + +// String returns the string representation +func (s DescribeTrustedAdvisorCheckResultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrustedAdvisorCheckResultInput) GoString() string { + return s.String() +} + +// The result of the Trusted Advisor check returned by the DescribeTrustedAdvisorCheckResult +// operation. +type DescribeTrustedAdvisorCheckResultOutput struct { + _ struct{} `type:"structure"` + + // The detailed results of the Trusted Advisor check. + Result *TrustedAdvisorCheckResult `locationName:"result" type:"structure"` +} + +// String returns the string representation +func (s DescribeTrustedAdvisorCheckResultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrustedAdvisorCheckResultOutput) GoString() string { + return s.String() +} + +type DescribeTrustedAdvisorCheckSummariesInput struct { + _ struct{} `type:"structure"` + + // The IDs of the Trusted Advisor checks. + CheckIds []*string `locationName:"checkIds" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeTrustedAdvisorCheckSummariesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrustedAdvisorCheckSummariesInput) GoString() string { + return s.String() +} + +// The summaries of the Trusted Advisor checks returned by the DescribeTrustedAdvisorCheckSummaries +// operation. +type DescribeTrustedAdvisorCheckSummariesOutput struct { + _ struct{} `type:"structure"` + + // The summary information for the requested Trusted Advisor checks. + Summaries []*TrustedAdvisorCheckSummary `locationName:"summaries" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeTrustedAdvisorCheckSummariesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrustedAdvisorCheckSummariesOutput) GoString() string { + return s.String() +} + +type DescribeTrustedAdvisorChecksInput struct { + _ struct{} `type:"structure"` + + // The ISO 639-1 code for the language in which AWS provides support. AWS Support + // currently supports English ("en") and Japanese ("ja"). Language parameters + // must be passed explicitly for operations that take them. + Language *string `locationName:"language" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeTrustedAdvisorChecksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrustedAdvisorChecksInput) GoString() string { + return s.String() +} + +// Information about the Trusted Advisor checks returned by the DescribeTrustedAdvisorChecks +// operation. +type DescribeTrustedAdvisorChecksOutput struct { + _ struct{} `type:"structure"` + + // Information about all available Trusted Advisor checks. + Checks []*TrustedAdvisorCheckDescription `locationName:"checks" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeTrustedAdvisorChecksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrustedAdvisorChecksOutput) GoString() string { + return s.String() +} + +// The five most recent communications associated with the case. +type RecentCaseCommunications struct { + _ struct{} `type:"structure"` + + // The five most recent communications associated with the case. + Communications []*Communication `locationName:"communications" type:"list"` + + // A resumption point for pagination. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s RecentCaseCommunications) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecentCaseCommunications) GoString() string { + return s.String() +} + +type RefreshTrustedAdvisorCheckInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the Trusted Advisor check. + CheckId *string `locationName:"checkId" type:"string" required:"true"` +} + +// String returns the string representation +func (s RefreshTrustedAdvisorCheckInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RefreshTrustedAdvisorCheckInput) GoString() string { + return s.String() +} + +// The current refresh status of a Trusted Advisor check. +type RefreshTrustedAdvisorCheckOutput struct { + _ struct{} `type:"structure"` + + // The current refresh status for a check, including the amount of time until + // the check is eligible for refresh. + Status *TrustedAdvisorCheckRefreshStatus `locationName:"status" type:"structure" required:"true"` +} + +// String returns the string representation +func (s RefreshTrustedAdvisorCheckOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RefreshTrustedAdvisorCheckOutput) GoString() string { + return s.String() +} + +type ResolveCaseInput struct { + _ struct{} `type:"structure"` + + // The AWS Support case ID requested or returned in the call. The case ID is + // an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47 + CaseId *string `locationName:"caseId" type:"string"` +} + +// String returns the string representation +func (s ResolveCaseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResolveCaseInput) GoString() string { + return s.String() +} + +// The status of the case returned by the ResolveCase operation. +type ResolveCaseOutput struct { + _ struct{} `type:"structure"` + + // The status of the case after the ResolveCase request was processed. + FinalCaseStatus *string `locationName:"finalCaseStatus" type:"string"` + + // The status of the case when the ResolveCase request was sent. + InitialCaseStatus *string `locationName:"initialCaseStatus" type:"string"` +} + +// String returns the string representation +func (s ResolveCaseOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResolveCaseOutput) GoString() string { + return s.String() +} + +// Information about an AWS service returned by the DescribeServices operation. +type Service struct { + _ struct{} `type:"structure"` + + // A list of categories that describe the type of support issue a case describes. + // Categories consist of a category name and a category code. Category names + // and codes are passed to AWS Support when you call CreateCase. + Categories []*Category `locationName:"categories" type:"list"` + + // The code for an AWS service returned by the DescribeServices response. The + // Name element contains the corresponding friendly name. + Code *string `locationName:"code" type:"string"` + + // The friendly name for an AWS service. The Code element contains the corresponding + // code. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s Service) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Service) GoString() string { + return s.String() +} + +// A code and name pair that represent a severity level that can be applied +// to a support case. +type SeverityLevel struct { + _ struct{} `type:"structure"` + + // One of four values: "low," "medium," "high," and "urgent". These values correspond + // to response times returned to the caller in SeverityLevel.name. + Code *string `locationName:"code" type:"string"` + + // The name of the severity level that corresponds to the severity level code. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s SeverityLevel) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SeverityLevel) GoString() string { + return s.String() +} + +// The container for summary information that relates to the category of the +// Trusted Advisor check. +type TrustedAdvisorCategorySpecificSummary struct { + _ struct{} `type:"structure"` + + // The summary information about cost savings for a Trusted Advisor check that + // is in the Cost Optimizing category. + CostOptimizing *TrustedAdvisorCostOptimizingSummary `locationName:"costOptimizing" type:"structure"` +} + +// String returns the string representation +func (s TrustedAdvisorCategorySpecificSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrustedAdvisorCategorySpecificSummary) GoString() string { + return s.String() +} + +// The description and metadata for a Trusted Advisor check. +type TrustedAdvisorCheckDescription struct { + _ struct{} `type:"structure"` + + // The category of the Trusted Advisor check. + Category *string `locationName:"category" type:"string" required:"true"` + + // The description of the Trusted Advisor check, which includes the alert criteria + // and recommended actions (contains HTML markup). + Description *string `locationName:"description" type:"string" required:"true"` + + // The unique identifier for the Trusted Advisor check. + Id *string `locationName:"id" type:"string" required:"true"` + + // The column headings for the data returned by the Trusted Advisor check. The + // order of the headings corresponds to the order of the data in the Metadata + // element of the TrustedAdvisorResourceDetail for the check. Metadata contains + // all the data that is shown in the Excel download, even in those cases where + // the UI shows just summary data. + Metadata []*string `locationName:"metadata" type:"list" required:"true"` + + // The display name for the Trusted Advisor check. + Name *string `locationName:"name" type:"string" required:"true"` +} + +// String returns the string representation +func (s TrustedAdvisorCheckDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrustedAdvisorCheckDescription) GoString() string { + return s.String() +} + +// The refresh status of a Trusted Advisor check. +type TrustedAdvisorCheckRefreshStatus struct { + _ struct{} `type:"structure"` + + // The unique identifier for the Trusted Advisor check. + CheckId *string `locationName:"checkId" type:"string" required:"true"` + + // The amount of time, in milliseconds, until the Trusted Advisor check is eligible + // for refresh. + MillisUntilNextRefreshable *int64 `locationName:"millisUntilNextRefreshable" type:"long" required:"true"` + + // The status of the Trusted Advisor check for which a refresh has been requested: + // "none", "enqueued", "processing", "success", or "abandoned". + Status *string `locationName:"status" type:"string" required:"true"` +} + +// String returns the string representation +func (s TrustedAdvisorCheckRefreshStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrustedAdvisorCheckRefreshStatus) GoString() string { + return s.String() +} + +// The results of a Trusted Advisor check returned by DescribeTrustedAdvisorCheckResult. +type TrustedAdvisorCheckResult struct { + _ struct{} `type:"structure"` + + // Summary information that relates to the category of the check. Cost Optimizing + // is the only category that is currently supported. + CategorySpecificSummary *TrustedAdvisorCategorySpecificSummary `locationName:"categorySpecificSummary" type:"structure" required:"true"` + + // The unique identifier for the Trusted Advisor check. + CheckId *string `locationName:"checkId" type:"string" required:"true"` + + // The details about each resource listed in the check result. + FlaggedResources []*TrustedAdvisorResourceDetail `locationName:"flaggedResources" type:"list" required:"true"` + + // Details about AWS resources that were analyzed in a call to Trusted Advisor + // DescribeTrustedAdvisorCheckSummaries. + ResourcesSummary *TrustedAdvisorResourcesSummary `locationName:"resourcesSummary" type:"structure" required:"true"` + + // The alert status of the check: "ok" (green), "warning" (yellow), "error" + // (red), or "not_available". + Status *string `locationName:"status" type:"string" required:"true"` + + // The time of the last refresh of the check. + Timestamp *string `locationName:"timestamp" type:"string" required:"true"` +} + +// String returns the string representation +func (s TrustedAdvisorCheckResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrustedAdvisorCheckResult) GoString() string { + return s.String() +} + +// A summary of a Trusted Advisor check result, including the alert status, +// last refresh, and number of resources examined. +type TrustedAdvisorCheckSummary struct { + _ struct{} `type:"structure"` + + // Summary information that relates to the category of the check. Cost Optimizing + // is the only category that is currently supported. + CategorySpecificSummary *TrustedAdvisorCategorySpecificSummary `locationName:"categorySpecificSummary" type:"structure" required:"true"` + + // The unique identifier for the Trusted Advisor check. + CheckId *string `locationName:"checkId" type:"string" required:"true"` + + // Specifies whether the Trusted Advisor check has flagged resources. + HasFlaggedResources *bool `locationName:"hasFlaggedResources" type:"boolean"` + + // Details about AWS resources that were analyzed in a call to Trusted Advisor + // DescribeTrustedAdvisorCheckSummaries. + ResourcesSummary *TrustedAdvisorResourcesSummary `locationName:"resourcesSummary" type:"structure" required:"true"` + + // The alert status of the check: "ok" (green), "warning" (yellow), "error" + // (red), or "not_available". + Status *string `locationName:"status" type:"string" required:"true"` + + // The time of the last refresh of the check. + Timestamp *string `locationName:"timestamp" type:"string" required:"true"` +} + +// String returns the string representation +func (s TrustedAdvisorCheckSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrustedAdvisorCheckSummary) GoString() string { + return s.String() +} + +// The estimated cost savings that might be realized if the recommended actions +// are taken. +type TrustedAdvisorCostOptimizingSummary struct { + _ struct{} `type:"structure"` + + // The estimated monthly savings that might be realized if the recommended actions + // are taken. + EstimatedMonthlySavings *float64 `locationName:"estimatedMonthlySavings" type:"double" required:"true"` + + // The estimated percentage of savings that might be realized if the recommended + // actions are taken. + EstimatedPercentMonthlySavings *float64 `locationName:"estimatedPercentMonthlySavings" type:"double" required:"true"` +} + +// String returns the string representation +func (s TrustedAdvisorCostOptimizingSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrustedAdvisorCostOptimizingSummary) GoString() string { + return s.String() +} + +// Contains information about a resource identified by a Trusted Advisor check. +type TrustedAdvisorResourceDetail struct { + _ struct{} `type:"structure"` + + // Specifies whether the AWS resource was ignored by Trusted Advisor because + // it was marked as suppressed by the user. + IsSuppressed *bool `locationName:"isSuppressed" type:"boolean"` + + // Additional information about the identified resource. The exact metadata + // and its order can be obtained by inspecting the TrustedAdvisorCheckDescription + // object returned by the call to DescribeTrustedAdvisorChecks. Metadata contains + // all the data that is shown in the Excel download, even in those cases where + // the UI shows just summary data. + Metadata []*string `locationName:"metadata" type:"list" required:"true"` + + // The AWS region in which the identified resource is located. + Region *string `locationName:"region" type:"string" required:"true"` + + // The unique identifier for the identified resource. + ResourceId *string `locationName:"resourceId" type:"string" required:"true"` + + // The status code for the resource identified in the Trusted Advisor check. + Status *string `locationName:"status" type:"string" required:"true"` +} + +// String returns the string representation +func (s TrustedAdvisorResourceDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrustedAdvisorResourceDetail) GoString() string { + return s.String() +} + +// Details about AWS resources that were analyzed in a call to Trusted Advisor +// DescribeTrustedAdvisorCheckSummaries. +type TrustedAdvisorResourcesSummary struct { + _ struct{} `type:"structure"` + + // The number of AWS resources that were flagged (listed) by the Trusted Advisor + // check. + ResourcesFlagged *int64 `locationName:"resourcesFlagged" type:"long" required:"true"` + + // The number of AWS resources ignored by Trusted Advisor because information + // was unavailable. + ResourcesIgnored *int64 `locationName:"resourcesIgnored" type:"long" required:"true"` + + // The number of AWS resources that were analyzed by the Trusted Advisor check. + ResourcesProcessed *int64 `locationName:"resourcesProcessed" type:"long" required:"true"` + + // The number of AWS resources ignored by Trusted Advisor because they were + // marked as suppressed by the user. + ResourcesSuppressed *int64 `locationName:"resourcesSuppressed" type:"long" required:"true"` +} + +// String returns the string representation +func (s TrustedAdvisorResourcesSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrustedAdvisorResourcesSummary) GoString() string { + return s.String() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/support/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/support/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/support/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/support/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,332 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package support_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/support" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleSupport_AddAttachmentsToSet() { + svc := support.New(session.New()) + + params := &support.AddAttachmentsToSetInput{ + Attachments: []*support.Attachment{ // Required + { // Required + Data: []byte("PAYLOAD"), + FileName: aws.String("FileName"), + }, + // More values... + }, + AttachmentSetId: aws.String("AttachmentSetId"), + } + resp, err := svc.AddAttachmentsToSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSupport_AddCommunicationToCase() { + svc := support.New(session.New()) + + params := &support.AddCommunicationToCaseInput{ + CommunicationBody: aws.String("CommunicationBody"), // Required + AttachmentSetId: aws.String("AttachmentSetId"), + CaseId: aws.String("CaseId"), + CcEmailAddresses: []*string{ + aws.String("CcEmailAddress"), // Required + // More values... + }, + } + resp, err := svc.AddCommunicationToCase(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSupport_CreateCase() { + svc := support.New(session.New()) + + params := &support.CreateCaseInput{ + CommunicationBody: aws.String("CommunicationBody"), // Required + Subject: aws.String("Subject"), // Required + AttachmentSetId: aws.String("AttachmentSetId"), + CategoryCode: aws.String("CategoryCode"), + CcEmailAddresses: []*string{ + aws.String("CcEmailAddress"), // Required + // More values... + }, + IssueType: aws.String("IssueType"), + Language: aws.String("Language"), + ServiceCode: aws.String("ServiceCode"), + SeverityCode: aws.String("SeverityCode"), + } + resp, err := svc.CreateCase(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSupport_DescribeAttachment() { + svc := support.New(session.New()) + + params := &support.DescribeAttachmentInput{ + AttachmentId: aws.String("AttachmentId"), // Required + } + resp, err := svc.DescribeAttachment(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSupport_DescribeCases() { + svc := support.New(session.New()) + + params := &support.DescribeCasesInput{ + AfterTime: aws.String("AfterTime"), + BeforeTime: aws.String("BeforeTime"), + CaseIdList: []*string{ + aws.String("CaseId"), // Required + // More values... + }, + DisplayId: aws.String("DisplayId"), + IncludeCommunications: aws.Bool(true), + IncludeResolvedCases: aws.Bool(true), + Language: aws.String("Language"), + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeCases(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSupport_DescribeCommunications() { + svc := support.New(session.New()) + + params := &support.DescribeCommunicationsInput{ + CaseId: aws.String("CaseId"), // Required + AfterTime: aws.String("AfterTime"), + BeforeTime: aws.String("BeforeTime"), + MaxResults: aws.Int64(1), + NextToken: aws.String("NextToken"), + } + resp, err := svc.DescribeCommunications(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSupport_DescribeServices() { + svc := support.New(session.New()) + + params := &support.DescribeServicesInput{ + Language: aws.String("Language"), + ServiceCodeList: []*string{ + aws.String("ServiceCode"), // Required + // More values... + }, + } + resp, err := svc.DescribeServices(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSupport_DescribeSeverityLevels() { + svc := support.New(session.New()) + + params := &support.DescribeSeverityLevelsInput{ + Language: aws.String("Language"), + } + resp, err := svc.DescribeSeverityLevels(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSupport_DescribeTrustedAdvisorCheckRefreshStatuses() { + svc := support.New(session.New()) + + params := &support.DescribeTrustedAdvisorCheckRefreshStatusesInput{ + CheckIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeTrustedAdvisorCheckRefreshStatuses(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSupport_DescribeTrustedAdvisorCheckResult() { + svc := support.New(session.New()) + + params := &support.DescribeTrustedAdvisorCheckResultInput{ + CheckId: aws.String("String"), // Required + Language: aws.String("String"), + } + resp, err := svc.DescribeTrustedAdvisorCheckResult(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSupport_DescribeTrustedAdvisorCheckSummaries() { + svc := support.New(session.New()) + + params := &support.DescribeTrustedAdvisorCheckSummariesInput{ + CheckIds: []*string{ // Required + aws.String("String"), // Required + // More values... + }, + } + resp, err := svc.DescribeTrustedAdvisorCheckSummaries(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSupport_DescribeTrustedAdvisorChecks() { + svc := support.New(session.New()) + + params := &support.DescribeTrustedAdvisorChecksInput{ + Language: aws.String("String"), // Required + } + resp, err := svc.DescribeTrustedAdvisorChecks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSupport_RefreshTrustedAdvisorCheck() { + svc := support.New(session.New()) + + params := &support.RefreshTrustedAdvisorCheckInput{ + CheckId: aws.String("String"), // Required + } + resp, err := svc.RefreshTrustedAdvisorCheck(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSupport_ResolveCase() { + svc := support.New(session.New()) + + params := &support.ResolveCaseInput{ + CaseId: aws.String("CaseId"), + } + resp, err := svc.ResolveCase(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/support/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/support/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/support/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/support/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,122 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package support + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// The AWS Support API reference is intended for programmers who need detailed +// information about the AWS Support operations and data types. This service +// enables you to manage your AWS Support cases programmatically. It uses HTTP +// methods that return results in JSON format. +// +// The AWS Support service also exposes a set of Trusted Advisor (https://aws.amazon.com/premiumsupport/trustedadvisor/) +// features. You can retrieve a list of checks and their descriptions, get check +// results, specify checks to refresh, and get the refresh status of checks. +// +// The following list describes the AWS Support case management operations: +// +// Service names, issue categories, and available severity levels. The DescribeServices +// and DescribeSeverityLevels operations return AWS service names, service codes, +// service categories, and problem severity levels. You use these values when +// you call the CreateCase operation. Case creation, case details, and case +// resolution. The CreateCase, DescribeCases, DescribeAttachment, and ResolveCase +// operations create AWS Support cases, retrieve information about cases, and +// resolve cases. Case communication. The DescribeCommunications, AddCommunicationToCase, +// and AddAttachmentsToSet operations retrieve and add communications and attachments +// to AWS Support cases. The following list describes the operations available +// from the AWS Support service for Trusted Advisor: +// +// DescribeTrustedAdvisorChecks returns the list of checks that run against +// your AWS resources. Using the CheckId for a specific check returned by DescribeTrustedAdvisorChecks, +// you can call DescribeTrustedAdvisorCheckResult to obtain the results for +// the check you specified. DescribeTrustedAdvisorCheckSummaries returns summarized +// results for one or more Trusted Advisor checks. RefreshTrustedAdvisorCheck +// requests that Trusted Advisor rerun a specified check. DescribeTrustedAdvisorCheckRefreshStatuses +// reports the refresh status of one or more checks. For authentication of +// requests, AWS Support uses Signature Version 4 Signing Process (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// +// See About the AWS Support API (http://docs.aws.amazon.com/awssupport/latest/user/Welcome.html) +// in the AWS Support User Guide for information about how to use this service +// to create and manage your support cases, and how to call Trusted Advisor +// for results of checks on your resources. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type Support struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "support" + +// New creates a new instance of the Support client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Support client from just a session. +// svc := support.New(mySession) +// +// // Create a Support client with additional configuration +// svc := support.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Support { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *Support { + svc := &Support{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2013-04-15", + JSONVersion: "1.1", + TargetPrefix: "AWSSupport_20130415", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Support operation and runs any +// custom request initialization. +func (c *Support) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/support/supportiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/support/supportiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/support/supportiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/support/supportiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,74 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package supportiface provides an interface for the AWS Support. +package supportiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/support" +) + +// SupportAPI is the interface type for support.Support. +type SupportAPI interface { + AddAttachmentsToSetRequest(*support.AddAttachmentsToSetInput) (*request.Request, *support.AddAttachmentsToSetOutput) + + AddAttachmentsToSet(*support.AddAttachmentsToSetInput) (*support.AddAttachmentsToSetOutput, error) + + AddCommunicationToCaseRequest(*support.AddCommunicationToCaseInput) (*request.Request, *support.AddCommunicationToCaseOutput) + + AddCommunicationToCase(*support.AddCommunicationToCaseInput) (*support.AddCommunicationToCaseOutput, error) + + CreateCaseRequest(*support.CreateCaseInput) (*request.Request, *support.CreateCaseOutput) + + CreateCase(*support.CreateCaseInput) (*support.CreateCaseOutput, error) + + DescribeAttachmentRequest(*support.DescribeAttachmentInput) (*request.Request, *support.DescribeAttachmentOutput) + + DescribeAttachment(*support.DescribeAttachmentInput) (*support.DescribeAttachmentOutput, error) + + DescribeCasesRequest(*support.DescribeCasesInput) (*request.Request, *support.DescribeCasesOutput) + + DescribeCases(*support.DescribeCasesInput) (*support.DescribeCasesOutput, error) + + DescribeCasesPages(*support.DescribeCasesInput, func(*support.DescribeCasesOutput, bool) bool) error + + DescribeCommunicationsRequest(*support.DescribeCommunicationsInput) (*request.Request, *support.DescribeCommunicationsOutput) + + DescribeCommunications(*support.DescribeCommunicationsInput) (*support.DescribeCommunicationsOutput, error) + + DescribeCommunicationsPages(*support.DescribeCommunicationsInput, func(*support.DescribeCommunicationsOutput, bool) bool) error + + DescribeServicesRequest(*support.DescribeServicesInput) (*request.Request, *support.DescribeServicesOutput) + + DescribeServices(*support.DescribeServicesInput) (*support.DescribeServicesOutput, error) + + DescribeSeverityLevelsRequest(*support.DescribeSeverityLevelsInput) (*request.Request, *support.DescribeSeverityLevelsOutput) + + DescribeSeverityLevels(*support.DescribeSeverityLevelsInput) (*support.DescribeSeverityLevelsOutput, error) + + DescribeTrustedAdvisorCheckRefreshStatusesRequest(*support.DescribeTrustedAdvisorCheckRefreshStatusesInput) (*request.Request, *support.DescribeTrustedAdvisorCheckRefreshStatusesOutput) + + DescribeTrustedAdvisorCheckRefreshStatuses(*support.DescribeTrustedAdvisorCheckRefreshStatusesInput) (*support.DescribeTrustedAdvisorCheckRefreshStatusesOutput, error) + + DescribeTrustedAdvisorCheckResultRequest(*support.DescribeTrustedAdvisorCheckResultInput) (*request.Request, *support.DescribeTrustedAdvisorCheckResultOutput) + + DescribeTrustedAdvisorCheckResult(*support.DescribeTrustedAdvisorCheckResultInput) (*support.DescribeTrustedAdvisorCheckResultOutput, error) + + DescribeTrustedAdvisorCheckSummariesRequest(*support.DescribeTrustedAdvisorCheckSummariesInput) (*request.Request, *support.DescribeTrustedAdvisorCheckSummariesOutput) + + DescribeTrustedAdvisorCheckSummaries(*support.DescribeTrustedAdvisorCheckSummariesInput) (*support.DescribeTrustedAdvisorCheckSummariesOutput, error) + + DescribeTrustedAdvisorChecksRequest(*support.DescribeTrustedAdvisorChecksInput) (*request.Request, *support.DescribeTrustedAdvisorChecksOutput) + + DescribeTrustedAdvisorChecks(*support.DescribeTrustedAdvisorChecksInput) (*support.DescribeTrustedAdvisorChecksOutput, error) + + RefreshTrustedAdvisorCheckRequest(*support.RefreshTrustedAdvisorCheckInput) (*request.Request, *support.RefreshTrustedAdvisorCheckOutput) + + RefreshTrustedAdvisorCheck(*support.RefreshTrustedAdvisorCheckInput) (*support.RefreshTrustedAdvisorCheckOutput, error) + + ResolveCaseRequest(*support.ResolveCaseInput) (*request.Request, *support.ResolveCaseOutput) + + ResolveCase(*support.ResolveCaseInput) (*support.ResolveCaseOutput, error) +} + +var _ SupportAPI = (*support.Support)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/swf/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/swf/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/swf/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/swf/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,7522 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package swf provides a client for Amazon Simple Workflow Service. +package swf + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opCountClosedWorkflowExecutions = "CountClosedWorkflowExecutions" + +// CountClosedWorkflowExecutionsRequest generates a request for the CountClosedWorkflowExecutions operation. +func (c *SWF) CountClosedWorkflowExecutionsRequest(input *CountClosedWorkflowExecutionsInput) (req *request.Request, output *WorkflowExecutionCount) { + op := &request.Operation{ + Name: opCountClosedWorkflowExecutions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CountClosedWorkflowExecutionsInput{} + } + + req = c.newRequest(op, input, output) + output = &WorkflowExecutionCount{} + req.Data = output + return +} + +// Returns the number of closed workflow executions within the given domain +// that meet the specified filtering criteria. +// +// This operation is eventually consistent. The results are best effort and +// may not exactly reflect recent updates and changes. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. tagFilter.tag: String constraint. The key is +// swf:tagFilter.tag. typeFilter.name: String constraint. The key is swf:typeFilter.name. +// typeFilter.version: String constraint. The key is swf:typeFilter.version. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) CountClosedWorkflowExecutions(input *CountClosedWorkflowExecutionsInput) (*WorkflowExecutionCount, error) { + req, out := c.CountClosedWorkflowExecutionsRequest(input) + err := req.Send() + return out, err +} + +const opCountOpenWorkflowExecutions = "CountOpenWorkflowExecutions" + +// CountOpenWorkflowExecutionsRequest generates a request for the CountOpenWorkflowExecutions operation. +func (c *SWF) CountOpenWorkflowExecutionsRequest(input *CountOpenWorkflowExecutionsInput) (req *request.Request, output *WorkflowExecutionCount) { + op := &request.Operation{ + Name: opCountOpenWorkflowExecutions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CountOpenWorkflowExecutionsInput{} + } + + req = c.newRequest(op, input, output) + output = &WorkflowExecutionCount{} + req.Data = output + return +} + +// Returns the number of open workflow executions within the given domain that +// meet the specified filtering criteria. +// +// This operation is eventually consistent. The results are best effort and +// may not exactly reflect recent updates and changes. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. tagFilter.tag: String constraint. The key is +// swf:tagFilter.tag. typeFilter.name: String constraint. The key is swf:typeFilter.name. +// typeFilter.version: String constraint. The key is swf:typeFilter.version. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) CountOpenWorkflowExecutions(input *CountOpenWorkflowExecutionsInput) (*WorkflowExecutionCount, error) { + req, out := c.CountOpenWorkflowExecutionsRequest(input) + err := req.Send() + return out, err +} + +const opCountPendingActivityTasks = "CountPendingActivityTasks" + +// CountPendingActivityTasksRequest generates a request for the CountPendingActivityTasks operation. +func (c *SWF) CountPendingActivityTasksRequest(input *CountPendingActivityTasksInput) (req *request.Request, output *PendingTaskCount) { + op := &request.Operation{ + Name: opCountPendingActivityTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CountPendingActivityTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &PendingTaskCount{} + req.Data = output + return +} + +// Returns the estimated number of activity tasks in the specified task list. +// The count returned is an approximation and is not guaranteed to be exact. +// If you specify a task list that no activity task was ever scheduled in then +// 0 will be returned. +// +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the taskList.name parameter by using a Condition element +// with the swf:taskList.name key to allow the action to access only certain +// task lists. If the caller does not have sufficient permissions to invoke +// the action, or the parameter values fall outside the specified constraints, +// the action fails. The associated event attribute's cause parameter will be +// set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see +// Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) CountPendingActivityTasks(input *CountPendingActivityTasksInput) (*PendingTaskCount, error) { + req, out := c.CountPendingActivityTasksRequest(input) + err := req.Send() + return out, err +} + +const opCountPendingDecisionTasks = "CountPendingDecisionTasks" + +// CountPendingDecisionTasksRequest generates a request for the CountPendingDecisionTasks operation. +func (c *SWF) CountPendingDecisionTasksRequest(input *CountPendingDecisionTasksInput) (req *request.Request, output *PendingTaskCount) { + op := &request.Operation{ + Name: opCountPendingDecisionTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CountPendingDecisionTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &PendingTaskCount{} + req.Data = output + return +} + +// Returns the estimated number of decision tasks in the specified task list. +// The count returned is an approximation and is not guaranteed to be exact. +// If you specify a task list that no decision task was ever scheduled in then +// 0 will be returned. +// +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the taskList.name parameter by using a Condition element +// with the swf:taskList.name key to allow the action to access only certain +// task lists. If the caller does not have sufficient permissions to invoke +// the action, or the parameter values fall outside the specified constraints, +// the action fails. The associated event attribute's cause parameter will be +// set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see +// Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) CountPendingDecisionTasks(input *CountPendingDecisionTasksInput) (*PendingTaskCount, error) { + req, out := c.CountPendingDecisionTasksRequest(input) + err := req.Send() + return out, err +} + +const opDeprecateActivityType = "DeprecateActivityType" + +// DeprecateActivityTypeRequest generates a request for the DeprecateActivityType operation. +func (c *SWF) DeprecateActivityTypeRequest(input *DeprecateActivityTypeInput) (req *request.Request, output *DeprecateActivityTypeOutput) { + op := &request.Operation{ + Name: opDeprecateActivityType, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeprecateActivityTypeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeprecateActivityTypeOutput{} + req.Data = output + return +} + +// Deprecates the specified activity type. After an activity type has been deprecated, +// you cannot create new tasks of that activity type. Tasks of this type that +// were scheduled before the type was deprecated will continue to run. +// +// This operation is eventually consistent. The results are best effort and +// may not exactly reflect recent updates and changes. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. activityType.name: String constraint. The key +// is swf:activityType.name. activityType.version: String constraint. The key +// is swf:activityType.version. If the caller does not have sufficient permissions +// to invoke the action, or the parameter values fall outside the specified +// constraints, the action fails. The associated event attribute's cause parameter +// will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, +// see Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) DeprecateActivityType(input *DeprecateActivityTypeInput) (*DeprecateActivityTypeOutput, error) { + req, out := c.DeprecateActivityTypeRequest(input) + err := req.Send() + return out, err +} + +const opDeprecateDomain = "DeprecateDomain" + +// DeprecateDomainRequest generates a request for the DeprecateDomain operation. +func (c *SWF) DeprecateDomainRequest(input *DeprecateDomainInput) (req *request.Request, output *DeprecateDomainOutput) { + op := &request.Operation{ + Name: opDeprecateDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeprecateDomainInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeprecateDomainOutput{} + req.Data = output + return +} + +// Deprecates the specified domain. After a domain has been deprecated it cannot +// be used to create new workflow executions or register new types. However, +// you can still use visibility actions on this domain. Deprecating a domain +// also deprecates all activity and workflow types registered in the domain. +// Executions that were started before the domain was deprecated will continue +// to run. +// +// This operation is eventually consistent. The results are best effort and +// may not exactly reflect recent updates and changes. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) DeprecateDomain(input *DeprecateDomainInput) (*DeprecateDomainOutput, error) { + req, out := c.DeprecateDomainRequest(input) + err := req.Send() + return out, err +} + +const opDeprecateWorkflowType = "DeprecateWorkflowType" + +// DeprecateWorkflowTypeRequest generates a request for the DeprecateWorkflowType operation. +func (c *SWF) DeprecateWorkflowTypeRequest(input *DeprecateWorkflowTypeInput) (req *request.Request, output *DeprecateWorkflowTypeOutput) { + op := &request.Operation{ + Name: opDeprecateWorkflowType, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeprecateWorkflowTypeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeprecateWorkflowTypeOutput{} + req.Data = output + return +} + +// Deprecates the specified workflow type. After a workflow type has been deprecated, +// you cannot create new executions of that type. Executions that were started +// before the type was deprecated will continue to run. A deprecated workflow +// type may still be used when calling visibility actions. +// +// This operation is eventually consistent. The results are best effort and +// may not exactly reflect recent updates and changes. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. workflowType.name: String constraint. The key +// is swf:workflowType.name. workflowType.version: String constraint. The key +// is swf:workflowType.version. If the caller does not have sufficient permissions +// to invoke the action, or the parameter values fall outside the specified +// constraints, the action fails. The associated event attribute's cause parameter +// will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, +// see Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) DeprecateWorkflowType(input *DeprecateWorkflowTypeInput) (*DeprecateWorkflowTypeOutput, error) { + req, out := c.DeprecateWorkflowTypeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeActivityType = "DescribeActivityType" + +// DescribeActivityTypeRequest generates a request for the DescribeActivityType operation. +func (c *SWF) DescribeActivityTypeRequest(input *DescribeActivityTypeInput) (req *request.Request, output *DescribeActivityTypeOutput) { + op := &request.Operation{ + Name: opDescribeActivityType, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeActivityTypeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeActivityTypeOutput{} + req.Data = output + return +} + +// Returns information about the specified activity type. This includes configuration +// settings provided when the type was registered and other general information +// about the type. +// +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. activityType.name: String constraint. The key +// is swf:activityType.name. activityType.version: String constraint. The key +// is swf:activityType.version. If the caller does not have sufficient permissions +// to invoke the action, or the parameter values fall outside the specified +// constraints, the action fails. The associated event attribute's cause parameter +// will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, +// see Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) DescribeActivityType(input *DescribeActivityTypeInput) (*DescribeActivityTypeOutput, error) { + req, out := c.DescribeActivityTypeRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDomain = "DescribeDomain" + +// DescribeDomainRequest generates a request for the DescribeDomain operation. +func (c *SWF) DescribeDomainRequest(input *DescribeDomainInput) (req *request.Request, output *DescribeDomainOutput) { + op := &request.Operation{ + Name: opDescribeDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDomainInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDomainOutput{} + req.Data = output + return +} + +// Returns information about the specified domain, including description and +// status. +// +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) DescribeDomain(input *DescribeDomainInput) (*DescribeDomainOutput, error) { + req, out := c.DescribeDomainRequest(input) + err := req.Send() + return out, err +} + +const opDescribeWorkflowExecution = "DescribeWorkflowExecution" + +// DescribeWorkflowExecutionRequest generates a request for the DescribeWorkflowExecution operation. +func (c *SWF) DescribeWorkflowExecutionRequest(input *DescribeWorkflowExecutionInput) (req *request.Request, output *DescribeWorkflowExecutionOutput) { + op := &request.Operation{ + Name: opDescribeWorkflowExecution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeWorkflowExecutionInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeWorkflowExecutionOutput{} + req.Data = output + return +} + +// Returns information about the specified workflow execution including its +// type and some statistics. +// +// This operation is eventually consistent. The results are best effort and +// may not exactly reflect recent updates and changes. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) DescribeWorkflowExecution(input *DescribeWorkflowExecutionInput) (*DescribeWorkflowExecutionOutput, error) { + req, out := c.DescribeWorkflowExecutionRequest(input) + err := req.Send() + return out, err +} + +const opDescribeWorkflowType = "DescribeWorkflowType" + +// DescribeWorkflowTypeRequest generates a request for the DescribeWorkflowType operation. +func (c *SWF) DescribeWorkflowTypeRequest(input *DescribeWorkflowTypeInput) (req *request.Request, output *DescribeWorkflowTypeOutput) { + op := &request.Operation{ + Name: opDescribeWorkflowType, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeWorkflowTypeInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeWorkflowTypeOutput{} + req.Data = output + return +} + +// Returns information about the specified workflow type. This includes configuration +// settings specified when the type was registered and other information such +// as creation date, current status, and so on. +// +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. workflowType.name: String constraint. The key +// is swf:workflowType.name. workflowType.version: String constraint. The key +// is swf:workflowType.version. If the caller does not have sufficient permissions +// to invoke the action, or the parameter values fall outside the specified +// constraints, the action fails. The associated event attribute's cause parameter +// will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, +// see Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) DescribeWorkflowType(input *DescribeWorkflowTypeInput) (*DescribeWorkflowTypeOutput, error) { + req, out := c.DescribeWorkflowTypeRequest(input) + err := req.Send() + return out, err +} + +const opGetWorkflowExecutionHistory = "GetWorkflowExecutionHistory" + +// GetWorkflowExecutionHistoryRequest generates a request for the GetWorkflowExecutionHistory operation. +func (c *SWF) GetWorkflowExecutionHistoryRequest(input *GetWorkflowExecutionHistoryInput) (req *request.Request, output *GetWorkflowExecutionHistoryOutput) { + op := &request.Operation{ + Name: opGetWorkflowExecutionHistory, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextPageToken"}, + OutputTokens: []string{"nextPageToken"}, + LimitToken: "maximumPageSize", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetWorkflowExecutionHistoryInput{} + } + + req = c.newRequest(op, input, output) + output = &GetWorkflowExecutionHistoryOutput{} + req.Data = output + return +} + +// Returns the history of the specified workflow execution. The results may +// be split into multiple pages. To retrieve subsequent pages, make the call +// again using the nextPageToken returned by the initial call. +// +// This operation is eventually consistent. The results are best effort and +// may not exactly reflect recent updates and changes. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) GetWorkflowExecutionHistory(input *GetWorkflowExecutionHistoryInput) (*GetWorkflowExecutionHistoryOutput, error) { + req, out := c.GetWorkflowExecutionHistoryRequest(input) + err := req.Send() + return out, err +} + +func (c *SWF) GetWorkflowExecutionHistoryPages(input *GetWorkflowExecutionHistoryInput, fn func(p *GetWorkflowExecutionHistoryOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetWorkflowExecutionHistoryRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetWorkflowExecutionHistoryOutput), lastPage) + }) +} + +const opListActivityTypes = "ListActivityTypes" + +// ListActivityTypesRequest generates a request for the ListActivityTypes operation. +func (c *SWF) ListActivityTypesRequest(input *ListActivityTypesInput) (req *request.Request, output *ListActivityTypesOutput) { + op := &request.Operation{ + Name: opListActivityTypes, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextPageToken"}, + OutputTokens: []string{"nextPageToken"}, + LimitToken: "maximumPageSize", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListActivityTypesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListActivityTypesOutput{} + req.Data = output + return +} + +// Returns information about all activities registered in the specified domain +// that match the specified name and registration status. The result includes +// information like creation date, current status of the activity, etc. The +// results may be split into multiple pages. To retrieve subsequent pages, make +// the call again using the nextPageToken returned by the initial call. +// +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) ListActivityTypes(input *ListActivityTypesInput) (*ListActivityTypesOutput, error) { + req, out := c.ListActivityTypesRequest(input) + err := req.Send() + return out, err +} + +func (c *SWF) ListActivityTypesPages(input *ListActivityTypesInput, fn func(p *ListActivityTypesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListActivityTypesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListActivityTypesOutput), lastPage) + }) +} + +const opListClosedWorkflowExecutions = "ListClosedWorkflowExecutions" + +// ListClosedWorkflowExecutionsRequest generates a request for the ListClosedWorkflowExecutions operation. +func (c *SWF) ListClosedWorkflowExecutionsRequest(input *ListClosedWorkflowExecutionsInput) (req *request.Request, output *WorkflowExecutionInfos) { + op := &request.Operation{ + Name: opListClosedWorkflowExecutions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextPageToken"}, + OutputTokens: []string{"nextPageToken"}, + LimitToken: "maximumPageSize", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListClosedWorkflowExecutionsInput{} + } + + req = c.newRequest(op, input, output) + output = &WorkflowExecutionInfos{} + req.Data = output + return +} + +// Returns a list of closed workflow executions in the specified domain that +// meet the filtering criteria. The results may be split into multiple pages. +// To retrieve subsequent pages, make the call again using the nextPageToken +// returned by the initial call. +// +// This operation is eventually consistent. The results are best effort and +// may not exactly reflect recent updates and changes. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. tagFilter.tag: String constraint. The key is +// swf:tagFilter.tag. typeFilter.name: String constraint. The key is swf:typeFilter.name. +// typeFilter.version: String constraint. The key is swf:typeFilter.version. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) ListClosedWorkflowExecutions(input *ListClosedWorkflowExecutionsInput) (*WorkflowExecutionInfos, error) { + req, out := c.ListClosedWorkflowExecutionsRequest(input) + err := req.Send() + return out, err +} + +func (c *SWF) ListClosedWorkflowExecutionsPages(input *ListClosedWorkflowExecutionsInput, fn func(p *WorkflowExecutionInfos, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListClosedWorkflowExecutionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*WorkflowExecutionInfos), lastPage) + }) +} + +const opListDomains = "ListDomains" + +// ListDomainsRequest generates a request for the ListDomains operation. +func (c *SWF) ListDomainsRequest(input *ListDomainsInput) (req *request.Request, output *ListDomainsOutput) { + op := &request.Operation{ + Name: opListDomains, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextPageToken"}, + OutputTokens: []string{"nextPageToken"}, + LimitToken: "maximumPageSize", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDomainsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDomainsOutput{} + req.Data = output + return +} + +// Returns the list of domains registered in the account. The results may be +// split into multiple pages. To retrieve subsequent pages, make the call again +// using the nextPageToken returned by the initial call. +// +// This operation is eventually consistent. The results are best effort and +// may not exactly reflect recent updates and changes. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. The element must be set to arn:aws:swf::AccountID:domain/*, +// where AccountID is the account ID, with no dashes. Use an Action element +// to allow or deny permission to call this action. You cannot use an IAM policy +// to constrain this action's parameters. If the caller does not have sufficient +// permissions to invoke the action, or the parameter values fall outside the +// specified constraints, the action fails. The associated event attribute's +// cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example +// IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) ListDomains(input *ListDomainsInput) (*ListDomainsOutput, error) { + req, out := c.ListDomainsRequest(input) + err := req.Send() + return out, err +} + +func (c *SWF) ListDomainsPages(input *ListDomainsInput, fn func(p *ListDomainsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListDomainsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListDomainsOutput), lastPage) + }) +} + +const opListOpenWorkflowExecutions = "ListOpenWorkflowExecutions" + +// ListOpenWorkflowExecutionsRequest generates a request for the ListOpenWorkflowExecutions operation. +func (c *SWF) ListOpenWorkflowExecutionsRequest(input *ListOpenWorkflowExecutionsInput) (req *request.Request, output *WorkflowExecutionInfos) { + op := &request.Operation{ + Name: opListOpenWorkflowExecutions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextPageToken"}, + OutputTokens: []string{"nextPageToken"}, + LimitToken: "maximumPageSize", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListOpenWorkflowExecutionsInput{} + } + + req = c.newRequest(op, input, output) + output = &WorkflowExecutionInfos{} + req.Data = output + return +} + +// Returns a list of open workflow executions in the specified domain that meet +// the filtering criteria. The results may be split into multiple pages. To +// retrieve subsequent pages, make the call again using the nextPageToken returned +// by the initial call. +// +// This operation is eventually consistent. The results are best effort and +// may not exactly reflect recent updates and changes. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. tagFilter.tag: String constraint. The key is +// swf:tagFilter.tag. typeFilter.name: String constraint. The key is swf:typeFilter.name. +// typeFilter.version: String constraint. The key is swf:typeFilter.version. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) ListOpenWorkflowExecutions(input *ListOpenWorkflowExecutionsInput) (*WorkflowExecutionInfos, error) { + req, out := c.ListOpenWorkflowExecutionsRequest(input) + err := req.Send() + return out, err +} + +func (c *SWF) ListOpenWorkflowExecutionsPages(input *ListOpenWorkflowExecutionsInput, fn func(p *WorkflowExecutionInfos, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListOpenWorkflowExecutionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*WorkflowExecutionInfos), lastPage) + }) +} + +const opListWorkflowTypes = "ListWorkflowTypes" + +// ListWorkflowTypesRequest generates a request for the ListWorkflowTypes operation. +func (c *SWF) ListWorkflowTypesRequest(input *ListWorkflowTypesInput) (req *request.Request, output *ListWorkflowTypesOutput) { + op := &request.Operation{ + Name: opListWorkflowTypes, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextPageToken"}, + OutputTokens: []string{"nextPageToken"}, + LimitToken: "maximumPageSize", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListWorkflowTypesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListWorkflowTypesOutput{} + req.Data = output + return +} + +// Returns information about workflow types in the specified domain. The results +// may be split into multiple pages that can be retrieved by making the call +// repeatedly. +// +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) ListWorkflowTypes(input *ListWorkflowTypesInput) (*ListWorkflowTypesOutput, error) { + req, out := c.ListWorkflowTypesRequest(input) + err := req.Send() + return out, err +} + +func (c *SWF) ListWorkflowTypesPages(input *ListWorkflowTypesInput, fn func(p *ListWorkflowTypesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListWorkflowTypesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListWorkflowTypesOutput), lastPage) + }) +} + +const opPollForActivityTask = "PollForActivityTask" + +// PollForActivityTaskRequest generates a request for the PollForActivityTask operation. +func (c *SWF) PollForActivityTaskRequest(input *PollForActivityTaskInput) (req *request.Request, output *PollForActivityTaskOutput) { + op := &request.Operation{ + Name: opPollForActivityTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PollForActivityTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &PollForActivityTaskOutput{} + req.Data = output + return +} + +// Used by workers to get an ActivityTask from the specified activity taskList. +// This initiates a long poll, where the service holds the HTTP connection open +// and responds as soon as a task becomes available. The maximum time the service +// holds on to the request before responding is 60 seconds. If no task is available +// within 60 seconds, the poll will return an empty result. An empty result, +// in this context, means that an ActivityTask is returned, but that the value +// of taskToken is an empty string. If a task is returned, the worker should +// use its type to identify and process it correctly. +// +// Workers should set their client side socket timeout to at least 70 seconds +// (10 seconds higher than the maximum time service may hold the poll request). +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the taskList.name parameter by using a Condition element +// with the swf:taskList.name key to allow the action to access only certain +// task lists. If the caller does not have sufficient permissions to invoke +// the action, or the parameter values fall outside the specified constraints, +// the action fails. The associated event attribute's cause parameter will be +// set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see +// Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) PollForActivityTask(input *PollForActivityTaskInput) (*PollForActivityTaskOutput, error) { + req, out := c.PollForActivityTaskRequest(input) + err := req.Send() + return out, err +} + +const opPollForDecisionTask = "PollForDecisionTask" + +// PollForDecisionTaskRequest generates a request for the PollForDecisionTask operation. +func (c *SWF) PollForDecisionTaskRequest(input *PollForDecisionTaskInput) (req *request.Request, output *PollForDecisionTaskOutput) { + op := &request.Operation{ + Name: opPollForDecisionTask, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextPageToken"}, + OutputTokens: []string{"nextPageToken"}, + LimitToken: "maximumPageSize", + TruncationToken: "", + }, + } + + if input == nil { + input = &PollForDecisionTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &PollForDecisionTaskOutput{} + req.Data = output + return +} + +// Used by deciders to get a DecisionTask from the specified decision taskList. +// A decision task may be returned for any open workflow execution that is using +// the specified task list. The task includes a paginated view of the history +// of the workflow execution. The decider should use the workflow type and the +// history to determine how to properly handle the task. +// +// This action initiates a long poll, where the service holds the HTTP connection +// open and responds as soon a task becomes available. If no decision task is +// available in the specified task list before the timeout of 60 seconds expires, +// an empty result is returned. An empty result, in this context, means that +// a DecisionTask is returned, but that the value of taskToken is an empty string. +// +// Deciders should set their client-side socket timeout to at least 70 seconds +// (10 seconds higher than the timeout). Because the number of workflow history +// events for a single workflow execution might be very large, the result returned +// might be split up across a number of pages. To retrieve subsequent pages, +// make additional calls to PollForDecisionTask using the nextPageToken returned +// by the initial call. Note that you do not call GetWorkflowExecutionHistory +// with this nextPageToken. Instead, call PollForDecisionTask again. Access +// Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the taskList.name parameter by using a Condition element +// with the swf:taskList.name key to allow the action to access only certain +// task lists. If the caller does not have sufficient permissions to invoke +// the action, or the parameter values fall outside the specified constraints, +// the action fails. The associated event attribute's cause parameter will be +// set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see +// Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) PollForDecisionTask(input *PollForDecisionTaskInput) (*PollForDecisionTaskOutput, error) { + req, out := c.PollForDecisionTaskRequest(input) + err := req.Send() + return out, err +} + +func (c *SWF) PollForDecisionTaskPages(input *PollForDecisionTaskInput, fn func(p *PollForDecisionTaskOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.PollForDecisionTaskRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*PollForDecisionTaskOutput), lastPage) + }) +} + +const opRecordActivityTaskHeartbeat = "RecordActivityTaskHeartbeat" + +// RecordActivityTaskHeartbeatRequest generates a request for the RecordActivityTaskHeartbeat operation. +func (c *SWF) RecordActivityTaskHeartbeatRequest(input *RecordActivityTaskHeartbeatInput) (req *request.Request, output *RecordActivityTaskHeartbeatOutput) { + op := &request.Operation{ + Name: opRecordActivityTaskHeartbeat, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RecordActivityTaskHeartbeatInput{} + } + + req = c.newRequest(op, input, output) + output = &RecordActivityTaskHeartbeatOutput{} + req.Data = output + return +} + +// Used by activity workers to report to the service that the ActivityTask represented +// by the specified taskToken is still making progress. The worker can also +// (optionally) specify details of the progress, for example percent complete, +// using the details parameter. This action can also be used by the worker as +// a mechanism to check if cancellation is being requested for the activity +// task. If a cancellation is being attempted for the specified task, then the +// boolean cancelRequested flag returned by the service is set to true. +// +// This action resets the taskHeartbeatTimeout clock. The taskHeartbeatTimeout +// is specified in RegisterActivityType. +// +// This action does not in itself create an event in the workflow execution +// history. However, if the task times out, the workflow execution history will +// contain a ActivityTaskTimedOut event that contains the information from the +// last heartbeat generated by the activity worker. +// +// The taskStartToCloseTimeout of an activity type is the maximum duration +// of an activity task, regardless of the number of RecordActivityTaskHeartbeat +// requests received. The taskStartToCloseTimeout is also specified in RegisterActivityType. +// This operation is only useful for long-lived activities to report liveliness +// of the task and to determine if a cancellation is being attempted. If the +// cancelRequested flag returns true, a cancellation is being attempted. If +// the worker can cancel the activity, it should respond with RespondActivityTaskCanceled. +// Otherwise, it should ignore the cancellation request. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) RecordActivityTaskHeartbeat(input *RecordActivityTaskHeartbeatInput) (*RecordActivityTaskHeartbeatOutput, error) { + req, out := c.RecordActivityTaskHeartbeatRequest(input) + err := req.Send() + return out, err +} + +const opRegisterActivityType = "RegisterActivityType" + +// RegisterActivityTypeRequest generates a request for the RegisterActivityType operation. +func (c *SWF) RegisterActivityTypeRequest(input *RegisterActivityTypeInput) (req *request.Request, output *RegisterActivityTypeOutput) { + op := &request.Operation{ + Name: opRegisterActivityType, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterActivityTypeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RegisterActivityTypeOutput{} + req.Data = output + return +} + +// Registers a new activity type along with its configuration settings in the +// specified domain. +// +// A TypeAlreadyExists fault is returned if the type already exists in the +// domain. You cannot change any configuration settings of the type after its +// registration, and it must be registered as a new version. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. defaultTaskList.name: String constraint. The +// key is swf:defaultTaskList.name. name: String constraint. The key is swf:name. +// version: String constraint. The key is swf:version. If the caller does +// not have sufficient permissions to invoke the action, or the parameter values +// fall outside the specified constraints, the action fails. The associated +// event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) RegisterActivityType(input *RegisterActivityTypeInput) (*RegisterActivityTypeOutput, error) { + req, out := c.RegisterActivityTypeRequest(input) + err := req.Send() + return out, err +} + +const opRegisterDomain = "RegisterDomain" + +// RegisterDomainRequest generates a request for the RegisterDomain operation. +func (c *SWF) RegisterDomainRequest(input *RegisterDomainInput) (req *request.Request, output *RegisterDomainOutput) { + op := &request.Operation{ + Name: opRegisterDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterDomainInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RegisterDomainOutput{} + req.Data = output + return +} + +// Registers a new domain. +// +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// You cannot use an IAM policy to control domain access for this action. +// The name of the domain being registered is available as the resource of this +// action. Use an Action element to allow or deny permission to call this action. +// You cannot use an IAM policy to constrain this action's parameters. If the +// caller does not have sufficient permissions to invoke the action, or the +// parameter values fall outside the specified constraints, the action fails. +// The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) RegisterDomain(input *RegisterDomainInput) (*RegisterDomainOutput, error) { + req, out := c.RegisterDomainRequest(input) + err := req.Send() + return out, err +} + +const opRegisterWorkflowType = "RegisterWorkflowType" + +// RegisterWorkflowTypeRequest generates a request for the RegisterWorkflowType operation. +func (c *SWF) RegisterWorkflowTypeRequest(input *RegisterWorkflowTypeInput) (req *request.Request, output *RegisterWorkflowTypeOutput) { + op := &request.Operation{ + Name: opRegisterWorkflowType, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterWorkflowTypeInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RegisterWorkflowTypeOutput{} + req.Data = output + return +} + +// Registers a new workflow type and its configuration settings in the specified +// domain. +// +// The retention period for the workflow history is set by the RegisterDomain +// action. +// +// If the type already exists, then a TypeAlreadyExists fault is returned. +// You cannot change the configuration settings of a workflow type once it is +// registered and it must be registered as a new version. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. defaultTaskList.name: String constraint. The +// key is swf:defaultTaskList.name. name: String constraint. The key is swf:name. +// version: String constraint. The key is swf:version. If the caller does +// not have sufficient permissions to invoke the action, or the parameter values +// fall outside the specified constraints, the action fails. The associated +// event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) RegisterWorkflowType(input *RegisterWorkflowTypeInput) (*RegisterWorkflowTypeOutput, error) { + req, out := c.RegisterWorkflowTypeRequest(input) + err := req.Send() + return out, err +} + +const opRequestCancelWorkflowExecution = "RequestCancelWorkflowExecution" + +// RequestCancelWorkflowExecutionRequest generates a request for the RequestCancelWorkflowExecution operation. +func (c *SWF) RequestCancelWorkflowExecutionRequest(input *RequestCancelWorkflowExecutionInput) (req *request.Request, output *RequestCancelWorkflowExecutionOutput) { + op := &request.Operation{ + Name: opRequestCancelWorkflowExecution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RequestCancelWorkflowExecutionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RequestCancelWorkflowExecutionOutput{} + req.Data = output + return +} + +// Records a WorkflowExecutionCancelRequested event in the currently running +// workflow execution identified by the given domain, workflowId, and runId. +// This logically requests the cancellation of the workflow execution as a whole. +// It is up to the decider to take appropriate actions when it receives an execution +// history with this event. +// +// If the runId is not specified, the WorkflowExecutionCancelRequested event +// is recorded in the history of the current open workflow execution with the +// specified workflowId in the domain. Because this action allows the workflow +// to properly clean up and gracefully close, it should be used instead of TerminateWorkflowExecution +// when possible. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) RequestCancelWorkflowExecution(input *RequestCancelWorkflowExecutionInput) (*RequestCancelWorkflowExecutionOutput, error) { + req, out := c.RequestCancelWorkflowExecutionRequest(input) + err := req.Send() + return out, err +} + +const opRespondActivityTaskCanceled = "RespondActivityTaskCanceled" + +// RespondActivityTaskCanceledRequest generates a request for the RespondActivityTaskCanceled operation. +func (c *SWF) RespondActivityTaskCanceledRequest(input *RespondActivityTaskCanceledInput) (req *request.Request, output *RespondActivityTaskCanceledOutput) { + op := &request.Operation{ + Name: opRespondActivityTaskCanceled, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RespondActivityTaskCanceledInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RespondActivityTaskCanceledOutput{} + req.Data = output + return +} + +// Used by workers to tell the service that the ActivityTask identified by the +// taskToken was successfully canceled. Additional details can be optionally +// provided using the details argument. +// +// These details (if provided) appear in the ActivityTaskCanceled event added +// to the workflow history. +// +// Only use this operation if the canceled flag of a RecordActivityTaskHeartbeat +// request returns true and if the activity can be safely undone or abandoned. +// A task is considered open from the time that it is scheduled until it is +// closed. Therefore a task is reported as open while a worker is processing +// it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, +// RespondActivityTaskCanceled, RespondActivityTaskFailed, or the task has timed +// out (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-basic.html#swf-dev-timeout-types). +// +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) RespondActivityTaskCanceled(input *RespondActivityTaskCanceledInput) (*RespondActivityTaskCanceledOutput, error) { + req, out := c.RespondActivityTaskCanceledRequest(input) + err := req.Send() + return out, err +} + +const opRespondActivityTaskCompleted = "RespondActivityTaskCompleted" + +// RespondActivityTaskCompletedRequest generates a request for the RespondActivityTaskCompleted operation. +func (c *SWF) RespondActivityTaskCompletedRequest(input *RespondActivityTaskCompletedInput) (req *request.Request, output *RespondActivityTaskCompletedOutput) { + op := &request.Operation{ + Name: opRespondActivityTaskCompleted, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RespondActivityTaskCompletedInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RespondActivityTaskCompletedOutput{} + req.Data = output + return +} + +// Used by workers to tell the service that the ActivityTask identified by the +// taskToken completed successfully with a result (if provided). The result +// appears in the ActivityTaskCompleted event in the workflow history. +// +// If the requested task does not complete successfully, use RespondActivityTaskFailed +// instead. If the worker finds that the task is canceled through the canceled +// flag returned by RecordActivityTaskHeartbeat, it should cancel the task, +// clean up and then call RespondActivityTaskCanceled. A task is considered +// open from the time that it is scheduled until it is closed. Therefore a task +// is reported as open while a worker is processing it. A task is closed after +// it has been specified in a call to RespondActivityTaskCompleted, RespondActivityTaskCanceled, +// RespondActivityTaskFailed, or the task has timed out (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-basic.html#swf-dev-timeout-types). +// +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) RespondActivityTaskCompleted(input *RespondActivityTaskCompletedInput) (*RespondActivityTaskCompletedOutput, error) { + req, out := c.RespondActivityTaskCompletedRequest(input) + err := req.Send() + return out, err +} + +const opRespondActivityTaskFailed = "RespondActivityTaskFailed" + +// RespondActivityTaskFailedRequest generates a request for the RespondActivityTaskFailed operation. +func (c *SWF) RespondActivityTaskFailedRequest(input *RespondActivityTaskFailedInput) (req *request.Request, output *RespondActivityTaskFailedOutput) { + op := &request.Operation{ + Name: opRespondActivityTaskFailed, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RespondActivityTaskFailedInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RespondActivityTaskFailedOutput{} + req.Data = output + return +} + +// Used by workers to tell the service that the ActivityTask identified by the +// taskToken has failed with reason (if specified). The reason and details appear +// in the ActivityTaskFailed event added to the workflow history. +// +// A task is considered open from the time that it is scheduled until it is +// closed. Therefore a task is reported as open while a worker is processing +// it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, +// RespondActivityTaskCanceled, RespondActivityTaskFailed, or the task has timed +// out (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-basic.html#swf-dev-timeout-types). +// +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) RespondActivityTaskFailed(input *RespondActivityTaskFailedInput) (*RespondActivityTaskFailedOutput, error) { + req, out := c.RespondActivityTaskFailedRequest(input) + err := req.Send() + return out, err +} + +const opRespondDecisionTaskCompleted = "RespondDecisionTaskCompleted" + +// RespondDecisionTaskCompletedRequest generates a request for the RespondDecisionTaskCompleted operation. +func (c *SWF) RespondDecisionTaskCompletedRequest(input *RespondDecisionTaskCompletedInput) (req *request.Request, output *RespondDecisionTaskCompletedOutput) { + op := &request.Operation{ + Name: opRespondDecisionTaskCompleted, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RespondDecisionTaskCompletedInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &RespondDecisionTaskCompletedOutput{} + req.Data = output + return +} + +// Used by deciders to tell the service that the DecisionTask identified by +// the taskToken has successfully completed. The decisions argument specifies +// the list of decisions made while processing the task. +// +// A DecisionTaskCompleted event is added to the workflow history. The executionContext +// specified is attached to the event in the workflow execution history. +// +// Access Control +// +// If an IAM policy grants permission to use RespondDecisionTaskCompleted, +// it can express permissions for the list of decisions in the decisions parameter. +// Each of the decisions has one or more parameters, much like a regular API +// call. To allow for policies to be as readable as possible, you can express +// permissions on decisions as if they were actual API calls, including applying +// conditions to some parameters. For more information, see Using IAM to Manage +// Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) RespondDecisionTaskCompleted(input *RespondDecisionTaskCompletedInput) (*RespondDecisionTaskCompletedOutput, error) { + req, out := c.RespondDecisionTaskCompletedRequest(input) + err := req.Send() + return out, err +} + +const opSignalWorkflowExecution = "SignalWorkflowExecution" + +// SignalWorkflowExecutionRequest generates a request for the SignalWorkflowExecution operation. +func (c *SWF) SignalWorkflowExecutionRequest(input *SignalWorkflowExecutionInput) (req *request.Request, output *SignalWorkflowExecutionOutput) { + op := &request.Operation{ + Name: opSignalWorkflowExecution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SignalWorkflowExecutionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &SignalWorkflowExecutionOutput{} + req.Data = output + return +} + +// Records a WorkflowExecutionSignaled event in the workflow execution history +// and creates a decision task for the workflow execution identified by the +// given domain, workflowId and runId. The event is recorded with the specified +// user defined signalName and input (if provided). +// +// If a runId is not specified, then the WorkflowExecutionSignaled event is +// recorded in the history of the current open workflow with the matching workflowId +// in the domain. If the specified workflow execution is not open, this method +// fails with UnknownResource. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) SignalWorkflowExecution(input *SignalWorkflowExecutionInput) (*SignalWorkflowExecutionOutput, error) { + req, out := c.SignalWorkflowExecutionRequest(input) + err := req.Send() + return out, err +} + +const opStartWorkflowExecution = "StartWorkflowExecution" + +// StartWorkflowExecutionRequest generates a request for the StartWorkflowExecution operation. +func (c *SWF) StartWorkflowExecutionRequest(input *StartWorkflowExecutionInput) (req *request.Request, output *StartWorkflowExecutionOutput) { + op := &request.Operation{ + Name: opStartWorkflowExecution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartWorkflowExecutionInput{} + } + + req = c.newRequest(op, input, output) + output = &StartWorkflowExecutionOutput{} + req.Data = output + return +} + +// Starts an execution of the workflow type in the specified domain using the +// provided workflowId and input data. +// +// This action returns the newly started workflow execution. +// +// Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. tagList.member.0: The key is swf:tagList.member.0. +// tagList.member.1: The key is swf:tagList.member.1. tagList.member.2: The +// key is swf:tagList.member.2. tagList.member.3: The key is swf:tagList.member.3. +// tagList.member.4: The key is swf:tagList.member.4. taskList: String constraint. +// The key is swf:taskList.name. workflowType.name: String constraint. The key +// is swf:workflowType.name. workflowType.version: String constraint. The key +// is swf:workflowType.version. If the caller does not have sufficient permissions +// to invoke the action, or the parameter values fall outside the specified +// constraints, the action fails. The associated event attribute's cause parameter +// will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, +// see Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) StartWorkflowExecution(input *StartWorkflowExecutionInput) (*StartWorkflowExecutionOutput, error) { + req, out := c.StartWorkflowExecutionRequest(input) + err := req.Send() + return out, err +} + +const opTerminateWorkflowExecution = "TerminateWorkflowExecution" + +// TerminateWorkflowExecutionRequest generates a request for the TerminateWorkflowExecution operation. +func (c *SWF) TerminateWorkflowExecutionRequest(input *TerminateWorkflowExecutionInput) (req *request.Request, output *TerminateWorkflowExecutionOutput) { + op := &request.Operation{ + Name: opTerminateWorkflowExecution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TerminateWorkflowExecutionInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &TerminateWorkflowExecutionOutput{} + req.Data = output + return +} + +// Records a WorkflowExecutionTerminated event and forces closure of the workflow +// execution identified by the given domain, runId, and workflowId. The child +// policy, registered with the workflow type or specified when starting this +// execution, is applied to any open child workflow executions of this workflow +// execution. +// +// If the identified workflow execution was in progress, it is terminated +// immediately. If a runId is not specified, then the WorkflowExecutionTerminated +// event is recorded in the history of the current open workflow with the matching +// workflowId in the domain. You should consider using RequestCancelWorkflowExecution +// action instead because it allows the workflow to gracefully close while TerminateWorkflowExecution +// does not. Access Control +// +// You can use IAM policies to control this action's access to Amazon SWF resources +// as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +func (c *SWF) TerminateWorkflowExecution(input *TerminateWorkflowExecutionInput) (*TerminateWorkflowExecutionOutput, error) { + req, out := c.TerminateWorkflowExecutionRequest(input) + err := req.Send() + return out, err +} + +// Provides details of the ActivityTaskCancelRequested event. +type ActivityTaskCancelRequestedEventAttributes struct { + _ struct{} `type:"structure"` + + // The unique ID of the task. + ActivityId *string `locationName:"activityId" min:"1" type:"string" required:"true"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the RequestCancelActivityTask decision for this cancellation + // request. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s ActivityTaskCancelRequestedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivityTaskCancelRequestedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the ActivityTaskCanceled event. +type ActivityTaskCanceledEventAttributes struct { + _ struct{} `type:"structure"` + + // Details of the cancellation (if any). + Details *string `locationName:"details" type:"string"` + + // If set, contains the ID of the last ActivityTaskCancelRequested event recorded + // for this activity task. This information can be useful for diagnosing problems + // by tracing back the chain of events leading up to this event. + LatestCancelRequestedEventId *int64 `locationName:"latestCancelRequestedEventId" type:"long"` + + // The ID of the ActivityTaskScheduled event that was recorded when this activity + // task was scheduled. This information can be useful for diagnosing problems + // by tracing back the chain of events leading up to this event. + ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` + + // The ID of the ActivityTaskStarted event recorded when this activity task + // was started. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s ActivityTaskCanceledEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivityTaskCanceledEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the ActivityTaskCompleted event. +type ActivityTaskCompletedEventAttributes struct { + _ struct{} `type:"structure"` + + // The results of the activity task (if any). + Result *string `locationName:"result" type:"string"` + + // The ID of the ActivityTaskScheduled event that was recorded when this activity + // task was scheduled. This information can be useful for diagnosing problems + // by tracing back the chain of events leading up to this event. + ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` + + // The ID of the ActivityTaskStarted event recorded when this activity task + // was started. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s ActivityTaskCompletedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivityTaskCompletedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the ActivityTaskFailed event. +type ActivityTaskFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The details of the failure (if any). + Details *string `locationName:"details" type:"string"` + + // The reason provided for the failure (if any). + Reason *string `locationName:"reason" type:"string"` + + // The ID of the ActivityTaskScheduled event that was recorded when this activity + // task was scheduled. This information can be useful for diagnosing problems + // by tracing back the chain of events leading up to this event. + ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` + + // The ID of the ActivityTaskStarted event recorded when this activity task + // was started. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s ActivityTaskFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivityTaskFailedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the ActivityTaskScheduled event. +type ActivityTaskScheduledEventAttributes struct { + _ struct{} `type:"structure"` + + // The unique ID of the activity task. + ActivityId *string `locationName:"activityId" min:"1" type:"string" required:"true"` + + // The type of the activity task. + ActivityType *ActivityType `locationName:"activityType" type:"structure" required:"true"` + + // Optional. Data attached to the event that can be used by the decider in subsequent + // workflow tasks. This data is not sent to the activity. + Control *string `locationName:"control" type:"string"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision that + // resulted in the scheduling of this activity task. This information can be + // useful for diagnosing problems by tracing back the chain of events leading + // up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The maximum time before which the worker processing this task must report + // progress by calling RecordActivityTaskHeartbeat. If the timeout is exceeded, + // the activity task is automatically timed out. If the worker subsequently + // attempts to record a heartbeat or return a result, it will be ignored. + HeartbeatTimeout *string `locationName:"heartbeatTimeout" type:"string"` + + // The input provided to the activity task. + Input *string `locationName:"input" type:"string"` + + // The maximum amount of time for this activity task. + ScheduleToCloseTimeout *string `locationName:"scheduleToCloseTimeout" type:"string"` + + // The maximum amount of time the activity task can wait to be assigned to a + // worker. + ScheduleToStartTimeout *string `locationName:"scheduleToStartTimeout" type:"string"` + + // The maximum amount of time a worker may take to process the activity task. + StartToCloseTimeout *string `locationName:"startToCloseTimeout" type:"string"` + + // The task list in which the activity task has been scheduled. + TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` + + // Optional. The priority to assign to the scheduled activity task. If set, + // this will override any default priority value that was assigned when the + // activity type was registered. + // + // Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) + // to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority. + // + // For more information about setting task priority, see Setting Task Priority + // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // in the Amazon Simple Workflow Developer Guide. + TaskPriority *string `locationName:"taskPriority" type:"string"` +} + +// String returns the string representation +func (s ActivityTaskScheduledEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivityTaskScheduledEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the ActivityTaskStarted event. +type ActivityTaskStartedEventAttributes struct { + _ struct{} `type:"structure"` + + // Identity of the worker that was assigned this task. This aids diagnostics + // when problems arise. The form of this identity is user defined. + Identity *string `locationName:"identity" type:"string"` + + // The ID of the ActivityTaskScheduled event that was recorded when this activity + // task was scheduled. This information can be useful for diagnosing problems + // by tracing back the chain of events leading up to this event. + ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s ActivityTaskStartedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivityTaskStartedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the ActivityTaskTimedOut event. +type ActivityTaskTimedOutEventAttributes struct { + _ struct{} `type:"structure"` + + // Contains the content of the details parameter for the last call made by the + // activity to RecordActivityTaskHeartbeat. + Details *string `locationName:"details" type:"string"` + + // The ID of the ActivityTaskScheduled event that was recorded when this activity + // task was scheduled. This information can be useful for diagnosing problems + // by tracing back the chain of events leading up to this event. + ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` + + // The ID of the ActivityTaskStarted event recorded when this activity task + // was started. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` + + // The type of the timeout that caused this event. + TimeoutType *string `locationName:"timeoutType" type:"string" required:"true" enum:"ActivityTaskTimeoutType"` +} + +// String returns the string representation +func (s ActivityTaskTimedOutEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivityTaskTimedOutEventAttributes) GoString() string { + return s.String() +} + +// Represents an activity type. +type ActivityType struct { + _ struct{} `type:"structure"` + + // The name of this activity. + // + // The combination of activity type name and version must be unique within + // a domain. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The version of this activity. + // + // The combination of activity type name and version must be unique with in + // a domain. + Version *string `locationName:"version" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ActivityType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivityType) GoString() string { + return s.String() +} + +// Configuration settings registered with the activity type. +type ActivityTypeConfiguration struct { + _ struct{} `type:"structure"` + + // Optional. The default maximum time, in seconds, before which a worker processing + // a task must report progress by calling RecordActivityTaskHeartbeat. + // + // You can specify this value only when registering an activity type. The registered + // default value can be overridden when you schedule a task through the ScheduleActivityTask + // decision. If the activity worker subsequently attempts to record a heartbeat + // or returns a result, the activity worker receives an UnknownResource fault. + // In this case, Amazon SWF no longer considers the activity task to be valid; + // the activity worker should clean up the activity task. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + DefaultTaskHeartbeatTimeout *string `locationName:"defaultTaskHeartbeatTimeout" type:"string"` + + // Optional. The default task list specified for this activity type at registration. + // This default is used if a task list is not provided when a task is scheduled + // through the ScheduleActivityTask decision. You can override the default registered + // task list when scheduling a task through the ScheduleActivityTask decision. + DefaultTaskList *TaskList `locationName:"defaultTaskList" type:"structure"` + + // Optional. The default task priority for tasks of this activity type, specified + // at registration. If not set, then "0" will be used as the default priority. + // This default can be overridden when scheduling an activity task. + // + // Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) + // to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority. + // + // For more information about setting task priority, see Setting Task Priority + // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // in the Amazon Simple Workflow Developer Guide. + DefaultTaskPriority *string `locationName:"defaultTaskPriority" type:"string"` + + // Optional. The default maximum duration, specified when registering the activity + // type, for tasks of this activity type. You can override this default when + // scheduling a task through the ScheduleActivityTask decision. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + DefaultTaskScheduleToCloseTimeout *string `locationName:"defaultTaskScheduleToCloseTimeout" type:"string"` + + // Optional. The default maximum duration, specified when registering the activity + // type, that a task of an activity type can wait before being assigned to a + // worker. You can override this default when scheduling a task through the + // ScheduleActivityTask decision. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + DefaultTaskScheduleToStartTimeout *string `locationName:"defaultTaskScheduleToStartTimeout" type:"string"` + + // Optional. The default maximum duration for tasks of an activity type specified + // when registering the activity type. You can override this default when scheduling + // a task through the ScheduleActivityTask decision. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + DefaultTaskStartToCloseTimeout *string `locationName:"defaultTaskStartToCloseTimeout" type:"string"` +} + +// String returns the string representation +func (s ActivityTypeConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivityTypeConfiguration) GoString() string { + return s.String() +} + +// Detailed information about an activity type. +type ActivityTypeInfo struct { + _ struct{} `type:"structure"` + + // The ActivityType type structure representing the activity type. + ActivityType *ActivityType `locationName:"activityType" type:"structure" required:"true"` + + // The date and time this activity type was created through RegisterActivityType. + CreationDate *time.Time `locationName:"creationDate" type:"timestamp" timestampFormat:"unix" required:"true"` + + // If DEPRECATED, the date and time DeprecateActivityType was called. + DeprecationDate *time.Time `locationName:"deprecationDate" type:"timestamp" timestampFormat:"unix"` + + // The description of the activity type provided in RegisterActivityType. + Description *string `locationName:"description" type:"string"` + + // The current status of the activity type. + Status *string `locationName:"status" type:"string" required:"true" enum:"RegistrationStatus"` +} + +// String returns the string representation +func (s ActivityTypeInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivityTypeInfo) GoString() string { + return s.String() +} + +// Provides details of the CancelTimer decision. +// +// Access Control +// +// You can use IAM policies to control this decision's access to Amazon SWF +// resources as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +type CancelTimerDecisionAttributes struct { + _ struct{} `type:"structure"` + + // Required. The unique ID of the timer to cancel. + TimerId *string `locationName:"timerId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelTimerDecisionAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelTimerDecisionAttributes) GoString() string { + return s.String() +} + +// Provides details of the CancelTimerFailed event. +type CancelTimerFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The cause of the failure. This information is generated by the system and + // can be useful for diagnostic purposes. + // + // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because + // it lacked sufficient permissions. For details and example IAM policies, see + // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + Cause *string `locationName:"cause" type:"string" required:"true" enum:"CancelTimerFailedCause"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the CancelTimer decision to cancel this timer. This information + // can be useful for diagnosing problems by tracing back the chain of events + // leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The timerId provided in the CancelTimer decision that failed. + TimerId *string `locationName:"timerId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelTimerFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelTimerFailedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the CancelWorkflowExecution decision. +// +// Access Control +// +// You can use IAM policies to control this decision's access to Amazon SWF +// resources as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +type CancelWorkflowExecutionDecisionAttributes struct { + _ struct{} `type:"structure"` + + // Optional. details of the cancellation. + Details *string `locationName:"details" type:"string"` +} + +// String returns the string representation +func (s CancelWorkflowExecutionDecisionAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelWorkflowExecutionDecisionAttributes) GoString() string { + return s.String() +} + +// Provides details of the CancelWorkflowExecutionFailed event. +type CancelWorkflowExecutionFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The cause of the failure. This information is generated by the system and + // can be useful for diagnostic purposes. + // + // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because + // it lacked sufficient permissions. For details and example IAM policies, see + // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + Cause *string `locationName:"cause" type:"string" required:"true" enum:"CancelWorkflowExecutionFailedCause"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the CancelWorkflowExecution decision for this cancellation + // request. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s CancelWorkflowExecutionFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelWorkflowExecutionFailedEventAttributes) GoString() string { + return s.String() +} + +// Provide details of the ChildWorkflowExecutionCanceled event. +type ChildWorkflowExecutionCanceledEventAttributes struct { + _ struct{} `type:"structure"` + + // Details of the cancellation (if provided). + Details *string `locationName:"details" type:"string"` + + // The ID of the StartChildWorkflowExecutionInitiated event corresponding to + // the StartChildWorkflowExecution decision to start this child workflow execution. + // This information can be useful for diagnosing problems by tracing back the + // chain of events leading up to this event. + InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` + + // The ID of the ChildWorkflowExecutionStarted event recorded when this child + // workflow execution was started. This information can be useful for diagnosing + // problems by tracing back the chain of events leading up to this event. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` + + // The child workflow execution that was canceled. + WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` + + // The type of the child workflow execution. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ChildWorkflowExecutionCanceledEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChildWorkflowExecutionCanceledEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the ChildWorkflowExecutionCompleted event. +type ChildWorkflowExecutionCompletedEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the StartChildWorkflowExecutionInitiated event corresponding to + // the StartChildWorkflowExecution decision to start this child workflow execution. + // This information can be useful for diagnosing problems by tracing back the + // chain of events leading up to this event. + InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` + + // The result of the child workflow execution (if any). + Result *string `locationName:"result" type:"string"` + + // The ID of the ChildWorkflowExecutionStarted event recorded when this child + // workflow execution was started. This information can be useful for diagnosing + // problems by tracing back the chain of events leading up to this event. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` + + // The child workflow execution that was completed. + WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` + + // The type of the child workflow execution. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ChildWorkflowExecutionCompletedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChildWorkflowExecutionCompletedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the ChildWorkflowExecutionFailed event. +type ChildWorkflowExecutionFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The details of the failure (if provided). + Details *string `locationName:"details" type:"string"` + + // The ID of the StartChildWorkflowExecutionInitiated event corresponding to + // the StartChildWorkflowExecution decision to start this child workflow execution. + // This information can be useful for diagnosing problems by tracing back the + // chain of events leading up to this event. + InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` + + // The reason for the failure (if provided). + Reason *string `locationName:"reason" type:"string"` + + // The ID of the ChildWorkflowExecutionStarted event recorded when this child + // workflow execution was started. This information can be useful for diagnosing + // problems by tracing back the chain of events leading up to this event. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` + + // The child workflow execution that failed. + WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` + + // The type of the child workflow execution. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ChildWorkflowExecutionFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChildWorkflowExecutionFailedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the ChildWorkflowExecutionStarted event. +type ChildWorkflowExecutionStartedEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the StartChildWorkflowExecutionInitiated event corresponding to + // the StartChildWorkflowExecution decision to start this child workflow execution. + // This information can be useful for diagnosing problems by tracing back the + // chain of events leading up to this event. + InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` + + // The child workflow execution that was started. + WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` + + // The type of the child workflow execution. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ChildWorkflowExecutionStartedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChildWorkflowExecutionStartedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the ChildWorkflowExecutionTerminated event. +type ChildWorkflowExecutionTerminatedEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the StartChildWorkflowExecutionInitiated event corresponding to + // the StartChildWorkflowExecution decision to start this child workflow execution. + // This information can be useful for diagnosing problems by tracing back the + // chain of events leading up to this event. + InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` + + // The ID of the ChildWorkflowExecutionStarted event recorded when this child + // workflow execution was started. This information can be useful for diagnosing + // problems by tracing back the chain of events leading up to this event. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` + + // The child workflow execution that was terminated. + WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` + + // The type of the child workflow execution. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ChildWorkflowExecutionTerminatedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChildWorkflowExecutionTerminatedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the ChildWorkflowExecutionTimedOut event. +type ChildWorkflowExecutionTimedOutEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the StartChildWorkflowExecutionInitiated event corresponding to + // the StartChildWorkflowExecution decision to start this child workflow execution. + // This information can be useful for diagnosing problems by tracing back the + // chain of events leading up to this event. + InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` + + // The ID of the ChildWorkflowExecutionStarted event recorded when this child + // workflow execution was started. This information can be useful for diagnosing + // problems by tracing back the chain of events leading up to this event. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` + + // The type of the timeout that caused the child workflow execution to time + // out. + TimeoutType *string `locationName:"timeoutType" type:"string" required:"true" enum:"WorkflowExecutionTimeoutType"` + + // The child workflow execution that timed out. + WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` + + // The type of the child workflow execution. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ChildWorkflowExecutionTimedOutEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChildWorkflowExecutionTimedOutEventAttributes) GoString() string { + return s.String() +} + +// Used to filter the closed workflow executions in visibility APIs by their +// close status. +type CloseStatusFilter struct { + _ struct{} `type:"structure"` + + // Required. The close status that must match the close status of an execution + // for it to meet the criteria of this filter. + Status *string `locationName:"status" type:"string" required:"true" enum:"CloseStatus"` +} + +// String returns the string representation +func (s CloseStatusFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloseStatusFilter) GoString() string { + return s.String() +} + +// Provides details of the CompleteWorkflowExecution decision. +// +// Access Control +// +// You can use IAM policies to control this decision's access to Amazon SWF +// resources as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +type CompleteWorkflowExecutionDecisionAttributes struct { + _ struct{} `type:"structure"` + + // The result of the workflow execution. The form of the result is implementation + // defined. + Result *string `locationName:"result" type:"string"` +} + +// String returns the string representation +func (s CompleteWorkflowExecutionDecisionAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteWorkflowExecutionDecisionAttributes) GoString() string { + return s.String() +} + +// Provides details of the CompleteWorkflowExecutionFailed event. +type CompleteWorkflowExecutionFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The cause of the failure. This information is generated by the system and + // can be useful for diagnostic purposes. + // + // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because + // it lacked sufficient permissions. For details and example IAM policies, see + // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + Cause *string `locationName:"cause" type:"string" required:"true" enum:"CompleteWorkflowExecutionFailedCause"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the CompleteWorkflowExecution decision to complete this + // execution. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s CompleteWorkflowExecutionFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteWorkflowExecutionFailedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the ContinueAsNewWorkflowExecution decision. +// +// Access Control +// +// You can use IAM policies to control this decision's access to Amazon SWF +// resources as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. tag: Optional.. A tag used to identify the workflow +// execution taskList: String constraint. The key is swf:taskList.name. workflowType.version: +// String constraint. The key is swf:workflowType.version. If the caller +// does not have sufficient permissions to invoke the action, or the parameter +// values fall outside the specified constraints, the action fails. The associated +// event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +type ContinueAsNewWorkflowExecutionDecisionAttributes struct { + _ struct{} `type:"structure"` + + // If set, specifies the policy to use for the child workflow executions of + // the new execution if it is terminated by calling the TerminateWorkflowExecution + // action explicitly or due to an expired timeout. This policy overrides the + // default child policy specified when registering the workflow type using RegisterWorkflowType. + // + // The supported child policies are: + // + // TERMINATE: the child executions will be terminated. REQUEST_CANCEL: a request + // to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested + // event in its history. It is up to the decider to take appropriate actions + // when it receives an execution history with this event. ABANDON: no action + // will be taken. The child executions will continue to run. A child policy + // for this workflow execution must be specified either as a default for the + // workflow type or through this parameter. If neither this parameter is set + // nor a default child policy was specified at registration time then a fault + // will be returned. + ChildPolicy *string `locationName:"childPolicy" type:"string" enum:"ChildPolicy"` + + // If set, specifies the total duration for this workflow execution. This overrides + // the defaultExecutionStartToCloseTimeout specified when registering the workflow + // type. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + // + // An execution start-to-close timeout for this workflow execution must be + // specified either as a default for the workflow type or through this field. + // If neither this field is set nor a default execution start-to-close timeout + // was specified at registration time then a fault will be returned. + ExecutionStartToCloseTimeout *string `locationName:"executionStartToCloseTimeout" type:"string"` + + // The input provided to the new workflow execution. + Input *string `locationName:"input" type:"string"` + + // The ARN of an IAM role that authorizes Amazon SWF to invoke AWS Lambda functions. + // + // In order for this workflow execution to invoke AWS Lambda functions, an + // appropriate IAM role must be specified either as a default for the workflow + // type or through this field. + LambdaRole *string `locationName:"lambdaRole" min:"1" type:"string"` + + // The list of tags to associate with the new workflow execution. A maximum + // of 5 tags can be specified. You can list workflow executions with a specific + // tag by calling ListOpenWorkflowExecutions or ListClosedWorkflowExecutions + // and specifying a TagFilter. + TagList []*string `locationName:"tagList" type:"list"` + + // Represents a task list. + TaskList *TaskList `locationName:"taskList" type:"structure"` + + // Optional. The task priority that, if set, specifies the priority for the + // decision tasks for this workflow execution. This overrides the defaultTaskPriority + // specified when registering the workflow type. Valid values are integers that + // range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). + // Higher numbers indicate higher priority. + // + // For more information about setting task priority, see Setting Task Priority + // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // in the Amazon Simple Workflow Developer Guide. + TaskPriority *string `locationName:"taskPriority" type:"string"` + + // Specifies the maximum duration of decision tasks for the new workflow execution. + // This parameter overrides the defaultTaskStartToCloseTimout specified when + // registering the workflow type using RegisterWorkflowType. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + // + // A task start-to-close timeout for the new workflow execution must be specified + // either as a default for the workflow type or through this parameter. If neither + // this parameter is set nor a default task start-to-close timeout was specified + // at registration time then a fault will be returned. + TaskStartToCloseTimeout *string `locationName:"taskStartToCloseTimeout" type:"string"` + + WorkflowTypeVersion *string `locationName:"workflowTypeVersion" min:"1" type:"string"` +} + +// String returns the string representation +func (s ContinueAsNewWorkflowExecutionDecisionAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContinueAsNewWorkflowExecutionDecisionAttributes) GoString() string { + return s.String() +} + +// Provides details of the ContinueAsNewWorkflowExecutionFailed event. +type ContinueAsNewWorkflowExecutionFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The cause of the failure. This information is generated by the system and + // can be useful for diagnostic purposes. + // + // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because + // it lacked sufficient permissions. For details and example IAM policies, see + // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + Cause *string `locationName:"cause" type:"string" required:"true" enum:"ContinueAsNewWorkflowExecutionFailedCause"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the ContinueAsNewWorkflowExecution decision that started + // this execution. This information can be useful for diagnosing problems by + // tracing back the chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s ContinueAsNewWorkflowExecutionFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContinueAsNewWorkflowExecutionFailedEventAttributes) GoString() string { + return s.String() +} + +type CountClosedWorkflowExecutionsInput struct { + _ struct{} `type:"structure"` + + // If specified, only workflow executions that match this close status are counted. + // This filter has an affect only if executionStatus is specified as CLOSED. + // + // closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually + // exclusive. You can specify at most one of these in a request. + CloseStatusFilter *CloseStatusFilter `locationName:"closeStatusFilter" type:"structure"` + + // If specified, only workflow executions that meet the close time criteria + // of the filter are counted. + // + // startTimeFilter and closeTimeFilter are mutually exclusive. You must specify + // one of these in a request but not both. + CloseTimeFilter *ExecutionTimeFilter `locationName:"closeTimeFilter" type:"structure"` + + // The name of the domain containing the workflow executions to count. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // If specified, only workflow executions matching the WorkflowId in the filter + // are counted. + // + // closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually + // exclusive. You can specify at most one of these in a request. + ExecutionFilter *WorkflowExecutionFilter `locationName:"executionFilter" type:"structure"` + + // If specified, only workflow executions that meet the start time criteria + // of the filter are counted. + // + // startTimeFilter and closeTimeFilter are mutually exclusive. You must specify + // one of these in a request but not both. + StartTimeFilter *ExecutionTimeFilter `locationName:"startTimeFilter" type:"structure"` + + // If specified, only executions that have a tag that matches the filter are + // counted. + // + // closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually + // exclusive. You can specify at most one of these in a request. + TagFilter *TagFilter `locationName:"tagFilter" type:"structure"` + + // If specified, indicates the type of the workflow executions to be counted. + // + // closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually + // exclusive. You can specify at most one of these in a request. + TypeFilter *WorkflowTypeFilter `locationName:"typeFilter" type:"structure"` +} + +// String returns the string representation +func (s CountClosedWorkflowExecutionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CountClosedWorkflowExecutionsInput) GoString() string { + return s.String() +} + +type CountOpenWorkflowExecutionsInput struct { + _ struct{} `type:"structure"` + + // The name of the domain containing the workflow executions to count. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // If specified, only workflow executions matching the WorkflowId in the filter + // are counted. + // + // executionFilter, typeFilter and tagFilter are mutually exclusive. You can + // specify at most one of these in a request. + ExecutionFilter *WorkflowExecutionFilter `locationName:"executionFilter" type:"structure"` + + // Specifies the start time criteria that workflow executions must meet in order + // to be counted. + StartTimeFilter *ExecutionTimeFilter `locationName:"startTimeFilter" type:"structure" required:"true"` + + // If specified, only executions that have a tag that matches the filter are + // counted. + // + // executionFilter, typeFilter and tagFilter are mutually exclusive. You can + // specify at most one of these in a request. + TagFilter *TagFilter `locationName:"tagFilter" type:"structure"` + + // Specifies the type of the workflow executions to be counted. + // + // executionFilter, typeFilter and tagFilter are mutually exclusive. You can + // specify at most one of these in a request. + TypeFilter *WorkflowTypeFilter `locationName:"typeFilter" type:"structure"` +} + +// String returns the string representation +func (s CountOpenWorkflowExecutionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CountOpenWorkflowExecutionsInput) GoString() string { + return s.String() +} + +type CountPendingActivityTasksInput struct { + _ struct{} `type:"structure"` + + // The name of the domain that contains the task list. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // The name of the task list. + TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CountPendingActivityTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CountPendingActivityTasksInput) GoString() string { + return s.String() +} + +type CountPendingDecisionTasksInput struct { + _ struct{} `type:"structure"` + + // The name of the domain that contains the task list. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // The name of the task list. + TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CountPendingDecisionTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CountPendingDecisionTasksInput) GoString() string { + return s.String() +} + +// Specifies a decision made by the decider. A decision can be one of these +// types: +// +// CancelTimer: cancels a previously started timer and records a TimerCanceled +// event in the history. CancelWorkflowExecution: closes the workflow execution +// and records a WorkflowExecutionCanceled event in the history. CompleteWorkflowExecution: +// closes the workflow execution and records a WorkflowExecutionCompleted event +// in the history . ContinueAsNewWorkflowExecution: closes the workflow execution +// and starts a new workflow execution of the same type using the same workflow +// ID and a unique run ID. A WorkflowExecutionContinuedAsNew event is recorded +// in the history. FailWorkflowExecution: closes the workflow execution and +// records a WorkflowExecutionFailed event in the history. RecordMarker: records +// a MarkerRecorded event in the history. Markers can be used for adding custom +// information in the history for instance to let deciders know that they do +// not need to look at the history beyond the marker event. RequestCancelActivityTask: +// attempts to cancel a previously scheduled activity task. If the activity +// task was scheduled but has not been assigned to a worker, then it will be +// canceled. If the activity task was already assigned to a worker, then the +// worker will be informed that cancellation has been requested in the response +// to RecordActivityTaskHeartbeat. RequestCancelExternalWorkflowExecution: +// requests that a request be made to cancel the specified external workflow +// execution and records a RequestCancelExternalWorkflowExecutionInitiated event +// in the history. ScheduleActivityTask: schedules an activity task. ScheduleLambdaFunction: +// schedules a AWS Lambda function. SignalExternalWorkflowExecution: requests +// a signal to be delivered to the specified external workflow execution and +// records a SignalExternalWorkflowExecutionInitiated event in the history. +// StartChildWorkflowExecution: requests that a child workflow execution be +// started and records a StartChildWorkflowExecutionInitiated event in the history. +// The child workflow execution is a separate workflow execution with its own +// history. StartTimer: starts a timer for this workflow execution and records +// a TimerStarted event in the history. This timer will fire after the specified +// delay and record a TimerFired event. Access Control +// +// If you grant permission to use RespondDecisionTaskCompleted, you can use +// IAM policies to express permissions for the list of decisions returned by +// this action as if they were members of the API. Treating decisions as a pseudo +// API maintains a uniform conceptual model and helps keep policies readable. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +// +// Decision Failure +// +// Decisions can fail for several reasons +// +// The ordering of decisions should follow a logical flow. Some decisions +// might not make sense in the current context of the workflow execution and +// will therefore fail. A limit on your account was reached. The decision lacks +// sufficient permissions. One of the following events might be added to the +// history to indicate an error. The event attribute's cause parameter indicates +// the cause. If cause is set to OPERATION_NOT_PERMITTED, the decision failed +// because it lacked sufficient permissions. For details and example IAM policies, +// see Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +// +// ScheduleActivityTaskFailed: a ScheduleActivityTask decision failed. This +// could happen if the activity type specified in the decision is not registered, +// is in a deprecated state, or the decision is not properly configured. ScheduleLambdaFunctionFailed: +// a ScheduleLambdaFunctionFailed decision failed. This could happen if the +// AWS Lambda function specified in the decision does not exist, or the AWS +// Lambda service's limits are exceeded. RequestCancelActivityTaskFailed: a +// RequestCancelActivityTask decision failed. This could happen if there is +// no open activity task with the specified activityId. StartTimerFailed: a +// StartTimer decision failed. This could happen if there is another open timer +// with the same timerId. CancelTimerFailed: a CancelTimer decision failed. +// This could happen if there is no open timer with the specified timerId. +// StartChildWorkflowExecutionFailed: a StartChildWorkflowExecution decision +// failed. This could happen if the workflow type specified is not registered, +// is deprecated, or the decision is not properly configured. SignalExternalWorkflowExecutionFailed: +// a SignalExternalWorkflowExecution decision failed. This could happen if the +// workflowID specified in the decision was incorrect. RequestCancelExternalWorkflowExecutionFailed: +// a RequestCancelExternalWorkflowExecution decision failed. This could happen +// if the workflowID specified in the decision was incorrect. CancelWorkflowExecutionFailed: +// a CancelWorkflowExecution decision failed. This could happen if there is +// an unhandled decision task pending in the workflow execution. CompleteWorkflowExecutionFailed: +// a CompleteWorkflowExecution decision failed. This could happen if there is +// an unhandled decision task pending in the workflow execution. ContinueAsNewWorkflowExecutionFailed: +// a ContinueAsNewWorkflowExecution decision failed. This could happen if there +// is an unhandled decision task pending in the workflow execution or the ContinueAsNewWorkflowExecution +// decision was not configured correctly. FailWorkflowExecutionFailed: a FailWorkflowExecution +// decision failed. This could happen if there is an unhandled decision task +// pending in the workflow execution. The preceding error events might occur +// due to an error in the decider logic, which might put the workflow execution +// in an unstable state The cause field in the event structure for the error +// event indicates the cause of the error. +// +// A workflow execution may be closed by the decider by returning one of the +// following decisions when completing a decision task: CompleteWorkflowExecution, +// FailWorkflowExecution, CancelWorkflowExecution and ContinueAsNewWorkflowExecution. +// An UnhandledDecision fault will be returned if a workflow closing decision +// is specified and a signal or activity event had been added to the history +// while the decision task was being performed by the decider. Unlike the above +// situations which are logic issues, this fault is always possible because +// of race conditions in a distributed system. The right action here is to call +// RespondDecisionTaskCompleted without any decisions. This would result in +// another decision task with these new events included in the history. The +// decider should handle the new events and may decide to close the workflow +// execution. How to code a decision +// +// You code a decision by first setting the decision type field to one of the +// above decision values, and then set the corresponding attributes field shown +// below: +// +// ScheduleActivityTaskDecisionAttributes ScheduleLambdaFunctionDecisionAttributes +// RequestCancelActivityTaskDecisionAttributes CompleteWorkflowExecutionDecisionAttributes +// FailWorkflowExecutionDecisionAttributes CancelWorkflowExecutionDecisionAttributes +// ContinueAsNewWorkflowExecutionDecisionAttributes RecordMarkerDecisionAttributes +// StartTimerDecisionAttributes CancelTimerDecisionAttributes SignalExternalWorkflowExecutionDecisionAttributes +// RequestCancelExternalWorkflowExecutionDecisionAttributes StartChildWorkflowExecutionDecisionAttributes +type Decision struct { + _ struct{} `type:"structure"` + + // Provides details of the CancelTimer decision. It is not set for other decision + // types. + CancelTimerDecisionAttributes *CancelTimerDecisionAttributes `locationName:"cancelTimerDecisionAttributes" type:"structure"` + + // Provides details of the CancelWorkflowExecution decision. It is not set for + // other decision types. + CancelWorkflowExecutionDecisionAttributes *CancelWorkflowExecutionDecisionAttributes `locationName:"cancelWorkflowExecutionDecisionAttributes" type:"structure"` + + // Provides details of the CompleteWorkflowExecution decision. It is not set + // for other decision types. + CompleteWorkflowExecutionDecisionAttributes *CompleteWorkflowExecutionDecisionAttributes `locationName:"completeWorkflowExecutionDecisionAttributes" type:"structure"` + + // Provides details of the ContinueAsNewWorkflowExecution decision. It is not + // set for other decision types. + ContinueAsNewWorkflowExecutionDecisionAttributes *ContinueAsNewWorkflowExecutionDecisionAttributes `locationName:"continueAsNewWorkflowExecutionDecisionAttributes" type:"structure"` + + // Specifies the type of the decision. + DecisionType *string `locationName:"decisionType" type:"string" required:"true" enum:"DecisionType"` + + // Provides details of the FailWorkflowExecution decision. It is not set for + // other decision types. + FailWorkflowExecutionDecisionAttributes *FailWorkflowExecutionDecisionAttributes `locationName:"failWorkflowExecutionDecisionAttributes" type:"structure"` + + // Provides details of the RecordMarker decision. It is not set for other decision + // types. + RecordMarkerDecisionAttributes *RecordMarkerDecisionAttributes `locationName:"recordMarkerDecisionAttributes" type:"structure"` + + // Provides details of the RequestCancelActivityTask decision. It is not set + // for other decision types. + RequestCancelActivityTaskDecisionAttributes *RequestCancelActivityTaskDecisionAttributes `locationName:"requestCancelActivityTaskDecisionAttributes" type:"structure"` + + // Provides details of the RequestCancelExternalWorkflowExecution decision. + // It is not set for other decision types. + RequestCancelExternalWorkflowExecutionDecisionAttributes *RequestCancelExternalWorkflowExecutionDecisionAttributes `locationName:"requestCancelExternalWorkflowExecutionDecisionAttributes" type:"structure"` + + // Provides details of the ScheduleActivityTask decision. It is not set for + // other decision types. + ScheduleActivityTaskDecisionAttributes *ScheduleActivityTaskDecisionAttributes `locationName:"scheduleActivityTaskDecisionAttributes" type:"structure"` + + // Provides details of the ScheduleLambdaFunction decision. + // + // Access Control + // + // You can use IAM policies to control this decision's access to Amazon SWF + // resources as follows: + // + // Use a Resource element with the domain name to limit the action to only + // specified domains. Use an Action element to allow or deny permission to call + // this action. Constrain the following parameters by using a Condition element + // with the appropriate keys. activityType.name: String constraint. The key + // is swf:activityType.name. activityType.version: String constraint. The key + // is swf:activityType.version. taskList: String constraint. The key is swf:taskList.name. + // If the caller does not have sufficient permissions to invoke the action, + // or the parameter values fall outside the specified constraints, the action + // fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. + // For details and example IAM policies, see Using IAM to Manage Access to Amazon + // SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + ScheduleLambdaFunctionDecisionAttributes *ScheduleLambdaFunctionDecisionAttributes `locationName:"scheduleLambdaFunctionDecisionAttributes" type:"structure"` + + // Provides details of the SignalExternalWorkflowExecution decision. It is not + // set for other decision types. + SignalExternalWorkflowExecutionDecisionAttributes *SignalExternalWorkflowExecutionDecisionAttributes `locationName:"signalExternalWorkflowExecutionDecisionAttributes" type:"structure"` + + // Provides details of the StartChildWorkflowExecution decision. It is not set + // for other decision types. + StartChildWorkflowExecutionDecisionAttributes *StartChildWorkflowExecutionDecisionAttributes `locationName:"startChildWorkflowExecutionDecisionAttributes" type:"structure"` + + // Provides details of the StartTimer decision. It is not set for other decision + // types. + StartTimerDecisionAttributes *StartTimerDecisionAttributes `locationName:"startTimerDecisionAttributes" type:"structure"` +} + +// String returns the string representation +func (s Decision) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Decision) GoString() string { + return s.String() +} + +// Provides details of the DecisionTaskCompleted event. +type DecisionTaskCompletedEventAttributes struct { + _ struct{} `type:"structure"` + + // User defined context for the workflow execution. + ExecutionContext *string `locationName:"executionContext" type:"string"` + + // The ID of the DecisionTaskScheduled event that was recorded when this decision + // task was scheduled. This information can be useful for diagnosing problems + // by tracing back the chain of events leading up to this event. + ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` + + // The ID of the DecisionTaskStarted event recorded when this decision task + // was started. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s DecisionTaskCompletedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecisionTaskCompletedEventAttributes) GoString() string { + return s.String() +} + +// Provides details about the DecisionTaskScheduled event. +type DecisionTaskScheduledEventAttributes struct { + _ struct{} `type:"structure"` + + // The maximum duration for this decision task. The task is considered timed + // out if it does not completed within this duration. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + StartToCloseTimeout *string `locationName:"startToCloseTimeout" type:"string"` + + // The name of the task list in which the decision task was scheduled. + TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` + + // Optional. A task priority that, if set, specifies the priority for this decision + // task. Valid values are integers that range from Java's Integer.MIN_VALUE + // (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate + // higher priority. + // + // For more information about setting task priority, see Setting Task Priority + // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // in the Amazon Simple Workflow Developer Guide. + TaskPriority *string `locationName:"taskPriority" type:"string"` +} + +// String returns the string representation +func (s DecisionTaskScheduledEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecisionTaskScheduledEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the DecisionTaskStarted event. +type DecisionTaskStartedEventAttributes struct { + _ struct{} `type:"structure"` + + // Identity of the decider making the request. This enables diagnostic tracing + // when problems arise. The form of this identity is user defined. + Identity *string `locationName:"identity" type:"string"` + + // The ID of the DecisionTaskScheduled event that was recorded when this decision + // task was scheduled. This information can be useful for diagnosing problems + // by tracing back the chain of events leading up to this event. + ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s DecisionTaskStartedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecisionTaskStartedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the DecisionTaskTimedOut event. +type DecisionTaskTimedOutEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the DecisionTaskScheduled event that was recorded when this decision + // task was scheduled. This information can be useful for diagnosing problems + // by tracing back the chain of events leading up to this event. + ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` + + // The ID of the DecisionTaskStarted event recorded when this decision task + // was started. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` + + // The type of timeout that expired before the decision task could be completed. + TimeoutType *string `locationName:"timeoutType" type:"string" required:"true" enum:"DecisionTaskTimeoutType"` +} + +// String returns the string representation +func (s DecisionTaskTimedOutEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecisionTaskTimedOutEventAttributes) GoString() string { + return s.String() +} + +type DeprecateActivityTypeInput struct { + _ struct{} `type:"structure"` + + // The activity type to deprecate. + ActivityType *ActivityType `locationName:"activityType" type:"structure" required:"true"` + + // The name of the domain in which the activity type is registered. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeprecateActivityTypeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeprecateActivityTypeInput) GoString() string { + return s.String() +} + +type DeprecateActivityTypeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeprecateActivityTypeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeprecateActivityTypeOutput) GoString() string { + return s.String() +} + +type DeprecateDomainInput struct { + _ struct{} `type:"structure"` + + // The name of the domain to deprecate. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeprecateDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeprecateDomainInput) GoString() string { + return s.String() +} + +type DeprecateDomainOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeprecateDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeprecateDomainOutput) GoString() string { + return s.String() +} + +type DeprecateWorkflowTypeInput struct { + _ struct{} `type:"structure"` + + // The name of the domain in which the workflow type is registered. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // The workflow type to deprecate. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s DeprecateWorkflowTypeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeprecateWorkflowTypeInput) GoString() string { + return s.String() +} + +type DeprecateWorkflowTypeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeprecateWorkflowTypeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeprecateWorkflowTypeOutput) GoString() string { + return s.String() +} + +type DescribeActivityTypeInput struct { + _ struct{} `type:"structure"` + + // The activity type to get information about. Activity types are identified + // by the name and version that were supplied when the activity was registered. + ActivityType *ActivityType `locationName:"activityType" type:"structure" required:"true"` + + // The name of the domain in which the activity type is registered. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeActivityTypeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeActivityTypeInput) GoString() string { + return s.String() +} + +// Detailed information about an activity type. +type DescribeActivityTypeOutput struct { + _ struct{} `type:"structure"` + + // The configuration settings registered with the activity type. + Configuration *ActivityTypeConfiguration `locationName:"configuration" type:"structure" required:"true"` + + // General information about the activity type. + // + // The status of activity type (returned in the ActivityTypeInfo structure) + // can be one of the following. + // + // REGISTERED: The type is registered and available. Workers supporting this + // type should be running. DEPRECATED: The type was deprecated using DeprecateActivityType, + // but is still in use. You should keep workers supporting this type running. + // You cannot create new tasks of this type. + TypeInfo *ActivityTypeInfo `locationName:"typeInfo" type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeActivityTypeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeActivityTypeOutput) GoString() string { + return s.String() +} + +type DescribeDomainInput struct { + _ struct{} `type:"structure"` + + // The name of the domain to describe. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDomainInput) GoString() string { + return s.String() +} + +// Contains details of a domain. +type DescribeDomainOutput struct { + _ struct{} `type:"structure"` + + // Contains the configuration settings of a domain. + Configuration *DomainConfiguration `locationName:"configuration" type:"structure" required:"true"` + + // Contains general information about a domain. + DomainInfo *DomainInfo `locationName:"domainInfo" type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDomainOutput) GoString() string { + return s.String() +} + +type DescribeWorkflowExecutionInput struct { + _ struct{} `type:"structure"` + + // The name of the domain containing the workflow execution. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // The workflow execution to describe. + Execution *WorkflowExecution `locationName:"execution" type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeWorkflowExecutionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkflowExecutionInput) GoString() string { + return s.String() +} + +// Contains details about a workflow execution. +type DescribeWorkflowExecutionOutput struct { + _ struct{} `type:"structure"` + + // The configuration settings for this workflow execution including timeout + // values, tasklist etc. + ExecutionConfiguration *WorkflowExecutionConfiguration `locationName:"executionConfiguration" type:"structure" required:"true"` + + // Information about the workflow execution. + ExecutionInfo *WorkflowExecutionInfo `locationName:"executionInfo" type:"structure" required:"true"` + + // The time when the last activity task was scheduled for this workflow execution. + // You can use this information to determine if the workflow has not made progress + // for an unusually long period of time and might require a corrective action. + LatestActivityTaskTimestamp *time.Time `locationName:"latestActivityTaskTimestamp" type:"timestamp" timestampFormat:"unix"` + + // The latest executionContext provided by the decider for this workflow execution. + // A decider can provide an executionContext (a free-form string) when closing + // a decision task using RespondDecisionTaskCompleted. + LatestExecutionContext *string `locationName:"latestExecutionContext" type:"string"` + + // The number of tasks for this workflow execution. This includes open and closed + // tasks of all types. + OpenCounts *WorkflowExecutionOpenCounts `locationName:"openCounts" type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeWorkflowExecutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkflowExecutionOutput) GoString() string { + return s.String() +} + +type DescribeWorkflowTypeInput struct { + _ struct{} `type:"structure"` + + // The name of the domain in which this workflow type is registered. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // The workflow type to describe. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeWorkflowTypeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkflowTypeInput) GoString() string { + return s.String() +} + +// Contains details about a workflow type. +type DescribeWorkflowTypeOutput struct { + _ struct{} `type:"structure"` + + // Configuration settings of the workflow type registered through RegisterWorkflowType + Configuration *WorkflowTypeConfiguration `locationName:"configuration" type:"structure" required:"true"` + + // General information about the workflow type. + // + // The status of the workflow type (returned in the WorkflowTypeInfo structure) + // can be one of the following. + // + // REGISTERED: The type is registered and available. Workers supporting this + // type should be running. DEPRECATED: The type was deprecated using DeprecateWorkflowType, + // but is still in use. You should keep workers supporting this type running. + // You cannot create new workflow executions of this type. + TypeInfo *WorkflowTypeInfo `locationName:"typeInfo" type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeWorkflowTypeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkflowTypeOutput) GoString() string { + return s.String() +} + +// Contains the configuration settings of a domain. +type DomainConfiguration struct { + _ struct{} `type:"structure"` + + // The retention period for workflow executions in this domain. + WorkflowExecutionRetentionPeriodInDays *string `locationName:"workflowExecutionRetentionPeriodInDays" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DomainConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainConfiguration) GoString() string { + return s.String() +} + +// Contains general information about a domain. +type DomainInfo struct { + _ struct{} `type:"structure"` + + // The description of the domain provided through RegisterDomain. + Description *string `locationName:"description" type:"string"` + + // The name of the domain. This name is unique within the account. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The status of the domain: + // + // REGISTERED: The domain is properly registered and available. You can use + // this domain for registering types and creating new workflow executions. + // DEPRECATED: The domain was deprecated using DeprecateDomain, but is still + // in use. You should not create new workflow executions in this domain. + Status *string `locationName:"status" type:"string" required:"true" enum:"RegistrationStatus"` +} + +// String returns the string representation +func (s DomainInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainInfo) GoString() string { + return s.String() +} + +// Used to filter the workflow executions in visibility APIs by various time-based +// rules. Each parameter, if specified, defines a rule that must be satisfied +// by each returned query result. The parameter values are in the Unix Time +// format (https://en.wikipedia.org/wiki/Unix_time). For example: "oldestDate": +// 1325376070. +type ExecutionTimeFilter struct { + _ struct{} `type:"structure"` + + // Specifies the latest start or close date and time to return. + LatestDate *time.Time `locationName:"latestDate" type:"timestamp" timestampFormat:"unix"` + + // Specifies the oldest start or close date and time to return. + OldestDate *time.Time `locationName:"oldestDate" type:"timestamp" timestampFormat:"unix" required:"true"` +} + +// String returns the string representation +func (s ExecutionTimeFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExecutionTimeFilter) GoString() string { + return s.String() +} + +// Provides details of the ExternalWorkflowExecutionCancelRequested event. +type ExternalWorkflowExecutionCancelRequestedEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the RequestCancelExternalWorkflowExecutionInitiated event corresponding + // to the RequestCancelExternalWorkflowExecution decision to cancel this external + // workflow execution. This information can be useful for diagnosing problems + // by tracing back the chain of events leading up to this event. + InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` + + // The external workflow execution to which the cancellation request was delivered. + WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ExternalWorkflowExecutionCancelRequestedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExternalWorkflowExecutionCancelRequestedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the ExternalWorkflowExecutionSignaled event. +type ExternalWorkflowExecutionSignaledEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the SignalExternalWorkflowExecutionInitiated event corresponding + // to the SignalExternalWorkflowExecution decision to request this signal. This + // information can be useful for diagnosing problems by tracing back the chain + // of events leading up to this event. + InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` + + // The external workflow execution that the signal was delivered to. + WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ExternalWorkflowExecutionSignaledEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExternalWorkflowExecutionSignaledEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the FailWorkflowExecution decision. +// +// Access Control +// +// You can use IAM policies to control this decision's access to Amazon SWF +// resources as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +type FailWorkflowExecutionDecisionAttributes struct { + _ struct{} `type:"structure"` + + // Optional. Details of the failure. + Details *string `locationName:"details" type:"string"` + + // A descriptive reason for the failure that may help in diagnostics. + Reason *string `locationName:"reason" type:"string"` +} + +// String returns the string representation +func (s FailWorkflowExecutionDecisionAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailWorkflowExecutionDecisionAttributes) GoString() string { + return s.String() +} + +// Provides details of the FailWorkflowExecutionFailed event. +type FailWorkflowExecutionFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The cause of the failure. This information is generated by the system and + // can be useful for diagnostic purposes. + // + // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because + // it lacked sufficient permissions. For details and example IAM policies, see + // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + Cause *string `locationName:"cause" type:"string" required:"true" enum:"FailWorkflowExecutionFailedCause"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the FailWorkflowExecution decision to fail this execution. + // This information can be useful for diagnosing problems by tracing back the + // chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s FailWorkflowExecutionFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailWorkflowExecutionFailedEventAttributes) GoString() string { + return s.String() +} + +type GetWorkflowExecutionHistoryInput struct { + _ struct{} `type:"structure"` + + // The name of the domain containing the workflow execution. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // Specifies the workflow execution for which to return the history. + Execution *WorkflowExecution `locationName:"execution" type:"structure" required:"true"` + + // The maximum number of results that will be returned per call. nextPageToken + // can be used to obtain futher pages of results. The default is 1000, which + // is the maximum allowed page size. You can, however, specify a page size smaller + // than the maximum. + // + // This is an upper limit only; the actual number of results returned per call + // may be fewer than the specified maximum. + MaximumPageSize *int64 `locationName:"maximumPageSize" type:"integer"` + + // If a NextPageToken was returned by a previous call, there are more results + // available. To retrieve the next page of results, make the call again using + // the returned token in nextPageToken. Keep all other arguments unchanged. + // + // The configured maximumPageSize determines how many results can be returned + // in a single call. + NextPageToken *string `locationName:"nextPageToken" type:"string"` + + // When set to true, returns the events in reverse order. By default the results + // are returned in ascending order of the eventTimeStamp of the events. + ReverseOrder *bool `locationName:"reverseOrder" type:"boolean"` +} + +// String returns the string representation +func (s GetWorkflowExecutionHistoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetWorkflowExecutionHistoryInput) GoString() string { + return s.String() +} + +// Paginated representation of a workflow history for a workflow execution. +// This is the up to date, complete and authoritative record of the events related +// to all tasks and events in the life of the workflow execution. +type GetWorkflowExecutionHistoryOutput struct { + _ struct{} `type:"structure"` + + // The list of history events. + Events []*HistoryEvent `locationName:"events" type:"list" required:"true"` + + // If a NextPageToken was returned by a previous call, there are more results + // available. To retrieve the next page of results, make the call again using + // the returned token in nextPageToken. Keep all other arguments unchanged. + // + // The configured maximumPageSize determines how many results can be returned + // in a single call. + NextPageToken *string `locationName:"nextPageToken" type:"string"` +} + +// String returns the string representation +func (s GetWorkflowExecutionHistoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetWorkflowExecutionHistoryOutput) GoString() string { + return s.String() +} + +// Event within a workflow execution. A history event can be one of these types: +// +// WorkflowExecutionStarted: The workflow execution was started. WorkflowExecutionCompleted: +// The workflow execution was closed due to successful completion. WorkflowExecutionFailed: +// The workflow execution closed due to a failure. WorkflowExecutionTimedOut: +// The workflow execution was closed because a time out was exceeded. WorkflowExecutionCanceled: +// The workflow execution was successfully canceled and closed. WorkflowExecutionTerminated: +// The workflow execution was terminated. WorkflowExecutionContinuedAsNew: +// The workflow execution was closed and a new execution of the same type was +// created with the same workflowId. WorkflowExecutionCancelRequested: A request +// to cancel this workflow execution was made. DecisionTaskScheduled: A decision +// task was scheduled for the workflow execution. DecisionTaskStarted: The +// decision task was dispatched to a decider. DecisionTaskCompleted: The decider +// successfully completed a decision task by calling RespondDecisionTaskCompleted. +// DecisionTaskTimedOut: The decision task timed out. ActivityTaskScheduled: +// An activity task was scheduled for execution. ScheduleActivityTaskFailed: +// Failed to process ScheduleActivityTask decision. This happens when the decision +// is not configured properly, for example the activity type specified is not +// registered. ActivityTaskStarted: The scheduled activity task was dispatched +// to a worker. ActivityTaskCompleted: An activity worker successfully completed +// an activity task by calling RespondActivityTaskCompleted. ActivityTaskFailed: +// An activity worker failed an activity task by calling RespondActivityTaskFailed. +// ActivityTaskTimedOut: The activity task timed out. ActivityTaskCanceled: +// The activity task was successfully canceled. ActivityTaskCancelRequested: +// A RequestCancelActivityTask decision was received by the system. RequestCancelActivityTaskFailed: +// Failed to process RequestCancelActivityTask decision. This happens when the +// decision is not configured properly. WorkflowExecutionSignaled: An external +// signal was received for the workflow execution. MarkerRecorded: A marker +// was recorded in the workflow history as the result of a RecordMarker decision. +// TimerStarted: A timer was started for the workflow execution due to a StartTimer +// decision. StartTimerFailed: Failed to process StartTimer decision. This +// happens when the decision is not configured properly, for example a timer +// already exists with the specified timer ID. TimerFired: A timer, previously +// started for this workflow execution, fired. TimerCanceled: A timer, previously +// started for this workflow execution, was successfully canceled. CancelTimerFailed: +// Failed to process CancelTimer decision. This happens when the decision is +// not configured properly, for example no timer exists with the specified timer +// ID. StartChildWorkflowExecutionInitiated: A request was made to start a +// child workflow execution. StartChildWorkflowExecutionFailed: Failed to process +// StartChildWorkflowExecution decision. This happens when the decision is not +// configured properly, for example the workflow type specified is not registered. +// ChildWorkflowExecutionStarted: A child workflow execution was successfully +// started. ChildWorkflowExecutionCompleted: A child workflow execution, started +// by this workflow execution, completed successfully and was closed. ChildWorkflowExecutionFailed: +// A child workflow execution, started by this workflow execution, failed to +// complete successfully and was closed. ChildWorkflowExecutionTimedOut: A +// child workflow execution, started by this workflow execution, timed out and +// was closed. ChildWorkflowExecutionCanceled: A child workflow execution, +// started by this workflow execution, was canceled and closed. ChildWorkflowExecutionTerminated: +// A child workflow execution, started by this workflow execution, was terminated. +// SignalExternalWorkflowExecutionInitiated: A request to signal an external +// workflow was made. ExternalWorkflowExecutionSignaled: A signal, requested +// by this workflow execution, was successfully delivered to the target external +// workflow execution. SignalExternalWorkflowExecutionFailed: The request to +// signal an external workflow execution failed. RequestCancelExternalWorkflowExecutionInitiated: +// A request was made to request the cancellation of an external workflow execution. +// ExternalWorkflowExecutionCancelRequested: Request to cancel an external +// workflow execution was successfully delivered to the target execution. RequestCancelExternalWorkflowExecutionFailed: +// Request to cancel an external workflow execution failed. LambdaFunctionScheduled: +// An AWS Lambda function was scheduled for execution. LambdaFunctionStarted: +// The scheduled function was invoked in the AWS Lambda service. LambdaFunctionCompleted: +// The AWS Lambda function successfully completed. LambdaFunctionFailed: The +// AWS Lambda function execution failed. LambdaFunctionTimedOut: The AWS Lambda +// function execution timed out. ScheduleLambdaFunctionFailed: Failed to process +// ScheduleLambdaFunction decision. This happens when the workflow execution +// does not have the proper IAM role attached to invoke AWS Lambda functions. +// StartLambdaFunctionFailed: Failed to invoke the scheduled function in the +// AWS Lambda service. This happens when the AWS Lambda service is not available +// in the current region, or received too many requests. +type HistoryEvent struct { + _ struct{} `type:"structure"` + + // If the event is of type ActivityTaskcancelRequested then this member is set + // and provides detailed information about the event. It is not set for other + // event types. + ActivityTaskCancelRequestedEventAttributes *ActivityTaskCancelRequestedEventAttributes `locationName:"activityTaskCancelRequestedEventAttributes" type:"structure"` + + // If the event is of type ActivityTaskCanceled then this member is set and + // provides detailed information about the event. It is not set for other event + // types. + ActivityTaskCanceledEventAttributes *ActivityTaskCanceledEventAttributes `locationName:"activityTaskCanceledEventAttributes" type:"structure"` + + // If the event is of type ActivityTaskCompleted then this member is set and + // provides detailed information about the event. It is not set for other event + // types. + ActivityTaskCompletedEventAttributes *ActivityTaskCompletedEventAttributes `locationName:"activityTaskCompletedEventAttributes" type:"structure"` + + // If the event is of type ActivityTaskFailed then this member is set and provides + // detailed information about the event. It is not set for other event types. + ActivityTaskFailedEventAttributes *ActivityTaskFailedEventAttributes `locationName:"activityTaskFailedEventAttributes" type:"structure"` + + // If the event is of type ActivityTaskScheduled then this member is set and + // provides detailed information about the event. It is not set for other event + // types. + ActivityTaskScheduledEventAttributes *ActivityTaskScheduledEventAttributes `locationName:"activityTaskScheduledEventAttributes" type:"structure"` + + // If the event is of type ActivityTaskStarted then this member is set and provides + // detailed information about the event. It is not set for other event types. + ActivityTaskStartedEventAttributes *ActivityTaskStartedEventAttributes `locationName:"activityTaskStartedEventAttributes" type:"structure"` + + // If the event is of type ActivityTaskTimedOut then this member is set and + // provides detailed information about the event. It is not set for other event + // types. + ActivityTaskTimedOutEventAttributes *ActivityTaskTimedOutEventAttributes `locationName:"activityTaskTimedOutEventAttributes" type:"structure"` + + // If the event is of type CancelTimerFailed then this member is set and provides + // detailed information about the event. It is not set for other event types. + CancelTimerFailedEventAttributes *CancelTimerFailedEventAttributes `locationName:"cancelTimerFailedEventAttributes" type:"structure"` + + // If the event is of type CancelWorkflowExecutionFailed then this member is + // set and provides detailed information about the event. It is not set for + // other event types. + CancelWorkflowExecutionFailedEventAttributes *CancelWorkflowExecutionFailedEventAttributes `locationName:"cancelWorkflowExecutionFailedEventAttributes" type:"structure"` + + // If the event is of type ChildWorkflowExecutionCanceled then this member is + // set and provides detailed information about the event. It is not set for + // other event types. + ChildWorkflowExecutionCanceledEventAttributes *ChildWorkflowExecutionCanceledEventAttributes `locationName:"childWorkflowExecutionCanceledEventAttributes" type:"structure"` + + // If the event is of type ChildWorkflowExecutionCompleted then this member + // is set and provides detailed information about the event. It is not set for + // other event types. + ChildWorkflowExecutionCompletedEventAttributes *ChildWorkflowExecutionCompletedEventAttributes `locationName:"childWorkflowExecutionCompletedEventAttributes" type:"structure"` + + // If the event is of type ChildWorkflowExecutionFailed then this member is + // set and provides detailed information about the event. It is not set for + // other event types. + ChildWorkflowExecutionFailedEventAttributes *ChildWorkflowExecutionFailedEventAttributes `locationName:"childWorkflowExecutionFailedEventAttributes" type:"structure"` + + // If the event is of type ChildWorkflowExecutionStarted then this member is + // set and provides detailed information about the event. It is not set for + // other event types. + ChildWorkflowExecutionStartedEventAttributes *ChildWorkflowExecutionStartedEventAttributes `locationName:"childWorkflowExecutionStartedEventAttributes" type:"structure"` + + // If the event is of type ChildWorkflowExecutionTerminated then this member + // is set and provides detailed information about the event. It is not set for + // other event types. + ChildWorkflowExecutionTerminatedEventAttributes *ChildWorkflowExecutionTerminatedEventAttributes `locationName:"childWorkflowExecutionTerminatedEventAttributes" type:"structure"` + + // If the event is of type ChildWorkflowExecutionTimedOut then this member is + // set and provides detailed information about the event. It is not set for + // other event types. + ChildWorkflowExecutionTimedOutEventAttributes *ChildWorkflowExecutionTimedOutEventAttributes `locationName:"childWorkflowExecutionTimedOutEventAttributes" type:"structure"` + + // If the event is of type CompleteWorkflowExecutionFailed then this member + // is set and provides detailed information about the event. It is not set for + // other event types. + CompleteWorkflowExecutionFailedEventAttributes *CompleteWorkflowExecutionFailedEventAttributes `locationName:"completeWorkflowExecutionFailedEventAttributes" type:"structure"` + + // If the event is of type ContinueAsNewWorkflowExecutionFailed then this member + // is set and provides detailed information about the event. It is not set for + // other event types. + ContinueAsNewWorkflowExecutionFailedEventAttributes *ContinueAsNewWorkflowExecutionFailedEventAttributes `locationName:"continueAsNewWorkflowExecutionFailedEventAttributes" type:"structure"` + + // If the event is of type DecisionTaskCompleted then this member is set and + // provides detailed information about the event. It is not set for other event + // types. + DecisionTaskCompletedEventAttributes *DecisionTaskCompletedEventAttributes `locationName:"decisionTaskCompletedEventAttributes" type:"structure"` + + // If the event is of type DecisionTaskScheduled then this member is set and + // provides detailed information about the event. It is not set for other event + // types. + DecisionTaskScheduledEventAttributes *DecisionTaskScheduledEventAttributes `locationName:"decisionTaskScheduledEventAttributes" type:"structure"` + + // If the event is of type DecisionTaskStarted then this member is set and provides + // detailed information about the event. It is not set for other event types. + DecisionTaskStartedEventAttributes *DecisionTaskStartedEventAttributes `locationName:"decisionTaskStartedEventAttributes" type:"structure"` + + // If the event is of type DecisionTaskTimedOut then this member is set and + // provides detailed information about the event. It is not set for other event + // types. + DecisionTaskTimedOutEventAttributes *DecisionTaskTimedOutEventAttributes `locationName:"decisionTaskTimedOutEventAttributes" type:"structure"` + + // The system generated ID of the event. This ID uniquely identifies the event + // with in the workflow execution history. + EventId *int64 `locationName:"eventId" type:"long" required:"true"` + + // The date and time when the event occurred. + EventTimestamp *time.Time `locationName:"eventTimestamp" type:"timestamp" timestampFormat:"unix" required:"true"` + + // The type of the history event. + EventType *string `locationName:"eventType" type:"string" required:"true" enum:"EventType"` + + // If the event is of type ExternalWorkflowExecutionCancelRequested then this + // member is set and provides detailed information about the event. It is not + // set for other event types. + ExternalWorkflowExecutionCancelRequestedEventAttributes *ExternalWorkflowExecutionCancelRequestedEventAttributes `locationName:"externalWorkflowExecutionCancelRequestedEventAttributes" type:"structure"` + + // If the event is of type ExternalWorkflowExecutionSignaled then this member + // is set and provides detailed information about the event. It is not set for + // other event types. + ExternalWorkflowExecutionSignaledEventAttributes *ExternalWorkflowExecutionSignaledEventAttributes `locationName:"externalWorkflowExecutionSignaledEventAttributes" type:"structure"` + + // If the event is of type FailWorkflowExecutionFailed then this member is set + // and provides detailed information about the event. It is not set for other + // event types. + FailWorkflowExecutionFailedEventAttributes *FailWorkflowExecutionFailedEventAttributes `locationName:"failWorkflowExecutionFailedEventAttributes" type:"structure"` + + // Provides details for the LambdaFunctionCompleted event. + LambdaFunctionCompletedEventAttributes *LambdaFunctionCompletedEventAttributes `locationName:"lambdaFunctionCompletedEventAttributes" type:"structure"` + + // Provides details for the LambdaFunctionFailed event. + LambdaFunctionFailedEventAttributes *LambdaFunctionFailedEventAttributes `locationName:"lambdaFunctionFailedEventAttributes" type:"structure"` + + // Provides details for the LambdaFunctionScheduled event. + LambdaFunctionScheduledEventAttributes *LambdaFunctionScheduledEventAttributes `locationName:"lambdaFunctionScheduledEventAttributes" type:"structure"` + + // Provides details for the LambdaFunctionStarted event. + LambdaFunctionStartedEventAttributes *LambdaFunctionStartedEventAttributes `locationName:"lambdaFunctionStartedEventAttributes" type:"structure"` + + // Provides details for the LambdaFunctionTimedOut event. + LambdaFunctionTimedOutEventAttributes *LambdaFunctionTimedOutEventAttributes `locationName:"lambdaFunctionTimedOutEventAttributes" type:"structure"` + + // If the event is of type MarkerRecorded then this member is set and provides + // detailed information about the event. It is not set for other event types. + MarkerRecordedEventAttributes *MarkerRecordedEventAttributes `locationName:"markerRecordedEventAttributes" type:"structure"` + + // If the event is of type DecisionTaskFailed then this member is set and provides + // detailed information about the event. It is not set for other event types. + RecordMarkerFailedEventAttributes *RecordMarkerFailedEventAttributes `locationName:"recordMarkerFailedEventAttributes" type:"structure"` + + // If the event is of type RequestCancelActivityTaskFailed then this member + // is set and provides detailed information about the event. It is not set for + // other event types. + RequestCancelActivityTaskFailedEventAttributes *RequestCancelActivityTaskFailedEventAttributes `locationName:"requestCancelActivityTaskFailedEventAttributes" type:"structure"` + + // If the event is of type RequestCancelExternalWorkflowExecutionFailed then + // this member is set and provides detailed information about the event. It + // is not set for other event types. + RequestCancelExternalWorkflowExecutionFailedEventAttributes *RequestCancelExternalWorkflowExecutionFailedEventAttributes `locationName:"requestCancelExternalWorkflowExecutionFailedEventAttributes" type:"structure"` + + // If the event is of type RequestCancelExternalWorkflowExecutionInitiated then + // this member is set and provides detailed information about the event. It + // is not set for other event types. + RequestCancelExternalWorkflowExecutionInitiatedEventAttributes *RequestCancelExternalWorkflowExecutionInitiatedEventAttributes `locationName:"requestCancelExternalWorkflowExecutionInitiatedEventAttributes" type:"structure"` + + // If the event is of type ScheduleActivityTaskFailed then this member is set + // and provides detailed information about the event. It is not set for other + // event types. + ScheduleActivityTaskFailedEventAttributes *ScheduleActivityTaskFailedEventAttributes `locationName:"scheduleActivityTaskFailedEventAttributes" type:"structure"` + + // Provides details for the ScheduleLambdaFunctionFailed event. + ScheduleLambdaFunctionFailedEventAttributes *ScheduleLambdaFunctionFailedEventAttributes `locationName:"scheduleLambdaFunctionFailedEventAttributes" type:"structure"` + + // If the event is of type SignalExternalWorkflowExecutionFailed then this member + // is set and provides detailed information about the event. It is not set for + // other event types. + SignalExternalWorkflowExecutionFailedEventAttributes *SignalExternalWorkflowExecutionFailedEventAttributes `locationName:"signalExternalWorkflowExecutionFailedEventAttributes" type:"structure"` + + // If the event is of type SignalExternalWorkflowExecutionInitiated then this + // member is set and provides detailed information about the event. It is not + // set for other event types. + SignalExternalWorkflowExecutionInitiatedEventAttributes *SignalExternalWorkflowExecutionInitiatedEventAttributes `locationName:"signalExternalWorkflowExecutionInitiatedEventAttributes" type:"structure"` + + // If the event is of type StartChildWorkflowExecutionFailed then this member + // is set and provides detailed information about the event. It is not set for + // other event types. + StartChildWorkflowExecutionFailedEventAttributes *StartChildWorkflowExecutionFailedEventAttributes `locationName:"startChildWorkflowExecutionFailedEventAttributes" type:"structure"` + + // If the event is of type StartChildWorkflowExecutionInitiated then this member + // is set and provides detailed information about the event. It is not set for + // other event types. + StartChildWorkflowExecutionInitiatedEventAttributes *StartChildWorkflowExecutionInitiatedEventAttributes `locationName:"startChildWorkflowExecutionInitiatedEventAttributes" type:"structure"` + + // Provides details for the StartLambdaFunctionFailed event. + StartLambdaFunctionFailedEventAttributes *StartLambdaFunctionFailedEventAttributes `locationName:"startLambdaFunctionFailedEventAttributes" type:"structure"` + + // If the event is of type StartTimerFailed then this member is set and provides + // detailed information about the event. It is not set for other event types. + StartTimerFailedEventAttributes *StartTimerFailedEventAttributes `locationName:"startTimerFailedEventAttributes" type:"structure"` + + // If the event is of type TimerCanceled then this member is set and provides + // detailed information about the event. It is not set for other event types. + TimerCanceledEventAttributes *TimerCanceledEventAttributes `locationName:"timerCanceledEventAttributes" type:"structure"` + + // If the event is of type TimerFired then this member is set and provides detailed + // information about the event. It is not set for other event types. + TimerFiredEventAttributes *TimerFiredEventAttributes `locationName:"timerFiredEventAttributes" type:"structure"` + + // If the event is of type TimerStarted then this member is set and provides + // detailed information about the event. It is not set for other event types. + TimerStartedEventAttributes *TimerStartedEventAttributes `locationName:"timerStartedEventAttributes" type:"structure"` + + // If the event is of type WorkflowExecutionCancelRequested then this member + // is set and provides detailed information about the event. It is not set for + // other event types. + WorkflowExecutionCancelRequestedEventAttributes *WorkflowExecutionCancelRequestedEventAttributes `locationName:"workflowExecutionCancelRequestedEventAttributes" type:"structure"` + + // If the event is of type WorkflowExecutionCanceled then this member is set + // and provides detailed information about the event. It is not set for other + // event types. + WorkflowExecutionCanceledEventAttributes *WorkflowExecutionCanceledEventAttributes `locationName:"workflowExecutionCanceledEventAttributes" type:"structure"` + + // If the event is of type WorkflowExecutionCompleted then this member is set + // and provides detailed information about the event. It is not set for other + // event types. + WorkflowExecutionCompletedEventAttributes *WorkflowExecutionCompletedEventAttributes `locationName:"workflowExecutionCompletedEventAttributes" type:"structure"` + + // If the event is of type WorkflowExecutionContinuedAsNew then this member + // is set and provides detailed information about the event. It is not set for + // other event types. + WorkflowExecutionContinuedAsNewEventAttributes *WorkflowExecutionContinuedAsNewEventAttributes `locationName:"workflowExecutionContinuedAsNewEventAttributes" type:"structure"` + + // If the event is of type WorkflowExecutionFailed then this member is set and + // provides detailed information about the event. It is not set for other event + // types. + WorkflowExecutionFailedEventAttributes *WorkflowExecutionFailedEventAttributes `locationName:"workflowExecutionFailedEventAttributes" type:"structure"` + + // If the event is of type WorkflowExecutionSignaled then this member is set + // and provides detailed information about the event. It is not set for other + // event types. + WorkflowExecutionSignaledEventAttributes *WorkflowExecutionSignaledEventAttributes `locationName:"workflowExecutionSignaledEventAttributes" type:"structure"` + + // If the event is of type WorkflowExecutionStarted then this member is set + // and provides detailed information about the event. It is not set for other + // event types. + WorkflowExecutionStartedEventAttributes *WorkflowExecutionStartedEventAttributes `locationName:"workflowExecutionStartedEventAttributes" type:"structure"` + + // If the event is of type WorkflowExecutionTerminated then this member is set + // and provides detailed information about the event. It is not set for other + // event types. + WorkflowExecutionTerminatedEventAttributes *WorkflowExecutionTerminatedEventAttributes `locationName:"workflowExecutionTerminatedEventAttributes" type:"structure"` + + // If the event is of type WorkflowExecutionTimedOut then this member is set + // and provides detailed information about the event. It is not set for other + // event types. + WorkflowExecutionTimedOutEventAttributes *WorkflowExecutionTimedOutEventAttributes `locationName:"workflowExecutionTimedOutEventAttributes" type:"structure"` +} + +// String returns the string representation +func (s HistoryEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HistoryEvent) GoString() string { + return s.String() +} + +// Provides details for the LambdaFunctionCompleted event. +type LambdaFunctionCompletedEventAttributes struct { + _ struct{} `type:"structure"` + + // The result of the function execution (if any). + Result *string `locationName:"result" type:"string"` + + // The ID of the LambdaFunctionScheduled event that was recorded when this AWS + // Lambda function was scheduled. This information can be useful for diagnosing + // problems by tracing back the chain of events leading up to this event. + ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` + + // The ID of the LambdaFunctionStarted event recorded in the history. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s LambdaFunctionCompletedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaFunctionCompletedEventAttributes) GoString() string { + return s.String() +} + +// Provides details for the LambdaFunctionFailed event. +type LambdaFunctionFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The details of the failure (if any). + Details *string `locationName:"details" type:"string"` + + // The reason provided for the failure (if any). + Reason *string `locationName:"reason" type:"string"` + + // The ID of the LambdaFunctionScheduled event that was recorded when this AWS + // Lambda function was scheduled. This information can be useful for diagnosing + // problems by tracing back the chain of events leading up to this event. + ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` + + // The ID of the LambdaFunctionStarted event recorded in the history. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s LambdaFunctionFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaFunctionFailedEventAttributes) GoString() string { + return s.String() +} + +// Provides details for the LambdaFunctionScheduled event. +type LambdaFunctionScheduledEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the DecisionTaskCompleted event for the decision that resulted + // in the scheduling of this AWS Lambda function. This information can be useful + // for diagnosing problems by tracing back the chain of events leading up to + // this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The unique Amazon SWF ID for the AWS Lambda task. + Id *string `locationName:"id" min:"1" type:"string" required:"true"` + + // Input provided to the AWS Lambda function. + Input *string `locationName:"input" min:"1" type:"string"` + + // The name of the scheduled AWS Lambda function. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The maximum time, in seconds, that the AWS Lambda function can take to execute + // from start to close before it is marked as failed. + StartToCloseTimeout *string `locationName:"startToCloseTimeout" type:"string"` +} + +// String returns the string representation +func (s LambdaFunctionScheduledEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaFunctionScheduledEventAttributes) GoString() string { + return s.String() +} + +// Provides details for the LambdaFunctionStarted event. +type LambdaFunctionStartedEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the LambdaFunctionScheduled event that was recorded when this AWS + // Lambda function was scheduled. This information can be useful for diagnosing + // problems by tracing back the chain of events leading up to this event. + ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s LambdaFunctionStartedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaFunctionStartedEventAttributes) GoString() string { + return s.String() +} + +// Provides details for the LambdaFunctionTimedOut event. +type LambdaFunctionTimedOutEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the LambdaFunctionScheduled event that was recorded when this AWS + // Lambda function was scheduled. This information can be useful for diagnosing + // problems by tracing back the chain of events leading up to this event. + ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long" required:"true"` + + // The ID of the LambdaFunctionStarted event recorded in the history. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` + + // The type of the timeout that caused this event. + TimeoutType *string `locationName:"timeoutType" type:"string" enum:"LambdaFunctionTimeoutType"` +} + +// String returns the string representation +func (s LambdaFunctionTimedOutEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaFunctionTimedOutEventAttributes) GoString() string { + return s.String() +} + +type ListActivityTypesInput struct { + _ struct{} `type:"structure"` + + // The name of the domain in which the activity types have been registered. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // The maximum number of results that will be returned per call. nextPageToken + // can be used to obtain futher pages of results. The default is 1000, which + // is the maximum allowed page size. You can, however, specify a page size smaller + // than the maximum. + // + // This is an upper limit only; the actual number of results returned per call + // may be fewer than the specified maximum. + MaximumPageSize *int64 `locationName:"maximumPageSize" type:"integer"` + + // If specified, only lists the activity types that have this name. + Name *string `locationName:"name" min:"1" type:"string"` + + // If a NextPageToken was returned by a previous call, there are more results + // available. To retrieve the next page of results, make the call again using + // the returned token in nextPageToken. Keep all other arguments unchanged. + // + // The configured maximumPageSize determines how many results can be returned + // in a single call. + NextPageToken *string `locationName:"nextPageToken" type:"string"` + + // Specifies the registration status of the activity types to list. + RegistrationStatus *string `locationName:"registrationStatus" type:"string" required:"true" enum:"RegistrationStatus"` + + // When set to true, returns the results in reverse order. By default, the results + // are returned in ascending alphabetical order by name of the activity types. + ReverseOrder *bool `locationName:"reverseOrder" type:"boolean"` +} + +// String returns the string representation +func (s ListActivityTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListActivityTypesInput) GoString() string { + return s.String() +} + +// Contains a paginated list of activity type information structures. +type ListActivityTypesOutput struct { + _ struct{} `type:"structure"` + + // If a NextPageToken was returned by a previous call, there are more results + // available. To retrieve the next page of results, make the call again using + // the returned token in nextPageToken. Keep all other arguments unchanged. + // + // The configured maximumPageSize determines how many results can be returned + // in a single call. + NextPageToken *string `locationName:"nextPageToken" type:"string"` + + // List of activity type information. + TypeInfos []*ActivityTypeInfo `locationName:"typeInfos" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListActivityTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListActivityTypesOutput) GoString() string { + return s.String() +} + +type ListClosedWorkflowExecutionsInput struct { + _ struct{} `type:"structure"` + + // If specified, only workflow executions that match this close status are listed. + // For example, if TERMINATED is specified, then only TERMINATED workflow executions + // are listed. + // + // closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually + // exclusive. You can specify at most one of these in a request. + CloseStatusFilter *CloseStatusFilter `locationName:"closeStatusFilter" type:"structure"` + + // If specified, the workflow executions are included in the returned results + // based on whether their close times are within the range specified by this + // filter. Also, if this parameter is specified, the returned results are ordered + // by their close times. + // + // startTimeFilter and closeTimeFilter are mutually exclusive. You must specify + // one of these in a request but not both. + CloseTimeFilter *ExecutionTimeFilter `locationName:"closeTimeFilter" type:"structure"` + + // The name of the domain that contains the workflow executions to list. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // If specified, only workflow executions matching the workflow ID specified + // in the filter are returned. + // + // closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually + // exclusive. You can specify at most one of these in a request. + ExecutionFilter *WorkflowExecutionFilter `locationName:"executionFilter" type:"structure"` + + // The maximum number of results that will be returned per call. nextPageToken + // can be used to obtain futher pages of results. The default is 1000, which + // is the maximum allowed page size. You can, however, specify a page size smaller + // than the maximum. + // + // This is an upper limit only; the actual number of results returned per call + // may be fewer than the specified maximum. + MaximumPageSize *int64 `locationName:"maximumPageSize" type:"integer"` + + // If a NextPageToken was returned by a previous call, there are more results + // available. To retrieve the next page of results, make the call again using + // the returned token in nextPageToken. Keep all other arguments unchanged. + // + // The configured maximumPageSize determines how many results can be returned + // in a single call. + NextPageToken *string `locationName:"nextPageToken" type:"string"` + + // When set to true, returns the results in reverse order. By default the results + // are returned in descending order of the start or the close time of the executions. + ReverseOrder *bool `locationName:"reverseOrder" type:"boolean"` + + // If specified, the workflow executions are included in the returned results + // based on whether their start times are within the range specified by this + // filter. Also, if this parameter is specified, the returned results are ordered + // by their start times. + // + // startTimeFilter and closeTimeFilter are mutually exclusive. You must specify + // one of these in a request but not both. + StartTimeFilter *ExecutionTimeFilter `locationName:"startTimeFilter" type:"structure"` + + // If specified, only executions that have the matching tag are listed. + // + // closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually + // exclusive. You can specify at most one of these in a request. + TagFilter *TagFilter `locationName:"tagFilter" type:"structure"` + + // If specified, only executions of the type specified in the filter are returned. + // + // closeStatusFilter, executionFilter, typeFilter and tagFilter are mutually + // exclusive. You can specify at most one of these in a request. + TypeFilter *WorkflowTypeFilter `locationName:"typeFilter" type:"structure"` +} + +// String returns the string representation +func (s ListClosedWorkflowExecutionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListClosedWorkflowExecutionsInput) GoString() string { + return s.String() +} + +type ListDomainsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results that will be returned per call. nextPageToken + // can be used to obtain futher pages of results. The default is 1000, which + // is the maximum allowed page size. You can, however, specify a page size smaller + // than the maximum. + // + // This is an upper limit only; the actual number of results returned per call + // may be fewer than the specified maximum. + MaximumPageSize *int64 `locationName:"maximumPageSize" type:"integer"` + + // If a NextPageToken was returned by a previous call, there are more results + // available. To retrieve the next page of results, make the call again using + // the returned token in nextPageToken. Keep all other arguments unchanged. + // + // The configured maximumPageSize determines how many results can be returned + // in a single call. + NextPageToken *string `locationName:"nextPageToken" type:"string"` + + // Specifies the registration status of the domains to list. + RegistrationStatus *string `locationName:"registrationStatus" type:"string" required:"true" enum:"RegistrationStatus"` + + // When set to true, returns the results in reverse order. By default, the results + // are returned in ascending alphabetical order by name of the domains. + ReverseOrder *bool `locationName:"reverseOrder" type:"boolean"` +} + +// String returns the string representation +func (s ListDomainsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDomainsInput) GoString() string { + return s.String() +} + +// Contains a paginated collection of DomainInfo structures. +type ListDomainsOutput struct { + _ struct{} `type:"structure"` + + // A list of DomainInfo structures. + DomainInfos []*DomainInfo `locationName:"domainInfos" type:"list" required:"true"` + + // If a NextPageToken was returned by a previous call, there are more results + // available. To retrieve the next page of results, make the call again using + // the returned token in nextPageToken. Keep all other arguments unchanged. + // + // The configured maximumPageSize determines how many results can be returned + // in a single call. + NextPageToken *string `locationName:"nextPageToken" type:"string"` +} + +// String returns the string representation +func (s ListDomainsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDomainsOutput) GoString() string { + return s.String() +} + +type ListOpenWorkflowExecutionsInput struct { + _ struct{} `type:"structure"` + + // The name of the domain that contains the workflow executions to list. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // If specified, only workflow executions matching the workflow ID specified + // in the filter are returned. + // + // executionFilter, typeFilter and tagFilter are mutually exclusive. You can + // specify at most one of these in a request. + ExecutionFilter *WorkflowExecutionFilter `locationName:"executionFilter" type:"structure"` + + // The maximum number of results that will be returned per call. nextPageToken + // can be used to obtain futher pages of results. The default is 1000, which + // is the maximum allowed page size. You can, however, specify a page size smaller + // than the maximum. + // + // This is an upper limit only; the actual number of results returned per call + // may be fewer than the specified maximum. + MaximumPageSize *int64 `locationName:"maximumPageSize" type:"integer"` + + // If a NextPageToken was returned by a previous call, there are more results + // available. To retrieve the next page of results, make the call again using + // the returned token in nextPageToken. Keep all other arguments unchanged. + // + // The configured maximumPageSize determines how many results can be returned + // in a single call. + NextPageToken *string `locationName:"nextPageToken" type:"string"` + + // When set to true, returns the results in reverse order. By default the results + // are returned in descending order of the start time of the executions. + ReverseOrder *bool `locationName:"reverseOrder" type:"boolean"` + + // Workflow executions are included in the returned results based on whether + // their start times are within the range specified by this filter. + StartTimeFilter *ExecutionTimeFilter `locationName:"startTimeFilter" type:"structure" required:"true"` + + // If specified, only executions that have the matching tag are listed. + // + // executionFilter, typeFilter and tagFilter are mutually exclusive. You can + // specify at most one of these in a request. + TagFilter *TagFilter `locationName:"tagFilter" type:"structure"` + + // If specified, only executions of the type specified in the filter are returned. + // + // executionFilter, typeFilter and tagFilter are mutually exclusive. You can + // specify at most one of these in a request. + TypeFilter *WorkflowTypeFilter `locationName:"typeFilter" type:"structure"` +} + +// String returns the string representation +func (s ListOpenWorkflowExecutionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListOpenWorkflowExecutionsInput) GoString() string { + return s.String() +} + +type ListWorkflowTypesInput struct { + _ struct{} `type:"structure"` + + // The name of the domain in which the workflow types have been registered. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // The maximum number of results that will be returned per call. nextPageToken + // can be used to obtain futher pages of results. The default is 1000, which + // is the maximum allowed page size. You can, however, specify a page size smaller + // than the maximum. + // + // This is an upper limit only; the actual number of results returned per call + // may be fewer than the specified maximum. + MaximumPageSize *int64 `locationName:"maximumPageSize" type:"integer"` + + // If specified, lists the workflow type with this name. + Name *string `locationName:"name" min:"1" type:"string"` + + // If a NextPageToken was returned by a previous call, there are more results + // available. To retrieve the next page of results, make the call again using + // the returned token in nextPageToken. Keep all other arguments unchanged. + // + // The configured maximumPageSize determines how many results can be returned + // in a single call. + NextPageToken *string `locationName:"nextPageToken" type:"string"` + + // Specifies the registration status of the workflow types to list. + RegistrationStatus *string `locationName:"registrationStatus" type:"string" required:"true" enum:"RegistrationStatus"` + + // When set to true, returns the results in reverse order. By default the results + // are returned in ascending alphabetical order of the name of the workflow + // types. + ReverseOrder *bool `locationName:"reverseOrder" type:"boolean"` +} + +// String returns the string representation +func (s ListWorkflowTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListWorkflowTypesInput) GoString() string { + return s.String() +} + +// Contains a paginated list of information structures about workflow types. +type ListWorkflowTypesOutput struct { + _ struct{} `type:"structure"` + + // If a NextPageToken was returned by a previous call, there are more results + // available. To retrieve the next page of results, make the call again using + // the returned token in nextPageToken. Keep all other arguments unchanged. + // + // The configured maximumPageSize determines how many results can be returned + // in a single call. + NextPageToken *string `locationName:"nextPageToken" type:"string"` + + // The list of workflow type information. + TypeInfos []*WorkflowTypeInfo `locationName:"typeInfos" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListWorkflowTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListWorkflowTypesOutput) GoString() string { + return s.String() +} + +// Provides details of the MarkerRecorded event. +type MarkerRecordedEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the RecordMarker decision that requested this marker. This + // information can be useful for diagnosing problems by tracing back the chain + // of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // Details of the marker (if any). + Details *string `locationName:"details" type:"string"` + + // The name of the marker. + MarkerName *string `locationName:"markerName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s MarkerRecordedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MarkerRecordedEventAttributes) GoString() string { + return s.String() +} + +// Contains the count of tasks in a task list. +type PendingTaskCount struct { + _ struct{} `type:"structure"` + + // The number of tasks in the task list. + Count *int64 `locationName:"count" type:"integer" required:"true"` + + // If set to true, indicates that the actual count was more than the maximum + // supported by this API and the count returned is the truncated value. + Truncated *bool `locationName:"truncated" type:"boolean"` +} + +// String returns the string representation +func (s PendingTaskCount) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PendingTaskCount) GoString() string { + return s.String() +} + +type PollForActivityTaskInput struct { + _ struct{} `type:"structure"` + + // The name of the domain that contains the task lists being polled. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // Identity of the worker making the request, recorded in the ActivityTaskStarted + // event in the workflow history. This enables diagnostic tracing when problems + // arise. The form of this identity is user defined. + Identity *string `locationName:"identity" type:"string"` + + // Specifies the task list to poll for activity tasks. + // + // The specified string must not start or end with whitespace. It must not + // contain a : (colon), / (slash), | (vertical bar), or any control characters + // (\u0000-\u001f | \u007f - \u009f). Also, it must not contain the literal + // string quotarnquot. + TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PollForActivityTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PollForActivityTaskInput) GoString() string { + return s.String() +} + +// Unit of work sent to an activity worker. +type PollForActivityTaskOutput struct { + _ struct{} `type:"structure"` + + // The unique ID of the task. + ActivityId *string `locationName:"activityId" min:"1" type:"string" required:"true"` + + // The type of this activity task. + ActivityType *ActivityType `locationName:"activityType" type:"structure" required:"true"` + + // The inputs provided when the activity task was scheduled. The form of the + // input is user defined and should be meaningful to the activity implementation. + Input *string `locationName:"input" type:"string"` + + // The ID of the ActivityTaskStarted event recorded in the history. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` + + // The opaque string used as a handle on the task. This token is used by workers + // to communicate progress and response information back to the system about + // the task. + TaskToken *string `locationName:"taskToken" min:"1" type:"string" required:"true"` + + // The workflow execution that started this activity task. + WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PollForActivityTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PollForActivityTaskOutput) GoString() string { + return s.String() +} + +type PollForDecisionTaskInput struct { + _ struct{} `type:"structure"` + + // The name of the domain containing the task lists to poll. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // Identity of the decider making the request, which is recorded in the DecisionTaskStarted + // event in the workflow history. This enables diagnostic tracing when problems + // arise. The form of this identity is user defined. + Identity *string `locationName:"identity" type:"string"` + + // The maximum number of results that will be returned per call. nextPageToken + // can be used to obtain futher pages of results. The default is 1000, which + // is the maximum allowed page size. You can, however, specify a page size smaller + // than the maximum. + // + // This is an upper limit only; the actual number of results returned per call + // may be fewer than the specified maximum. + MaximumPageSize *int64 `locationName:"maximumPageSize" type:"integer"` + + // If a NextPageToken was returned by a previous call, there are more results + // available. To retrieve the next page of results, make the call again using + // the returned token in nextPageToken. Keep all other arguments unchanged. + // + // The configured maximumPageSize determines how many results can be returned + // in a single call. + // + // The nextPageToken returned by this action cannot be used with GetWorkflowExecutionHistory + // to get the next page. You must call PollForDecisionTask again (with the nextPageToken) + // to retrieve the next page of history records. Calling PollForDecisionTask + // with a nextPageToken will not return a new decision task.. + NextPageToken *string `locationName:"nextPageToken" type:"string"` + + // When set to true, returns the events in reverse order. By default the results + // are returned in ascending order of the eventTimestamp of the events. + ReverseOrder *bool `locationName:"reverseOrder" type:"boolean"` + + // Specifies the task list to poll for decision tasks. + // + // The specified string must not start or end with whitespace. It must not + // contain a : (colon), / (slash), | (vertical bar), or any control characters + // (\u0000-\u001f | \u007f - \u009f). Also, it must not contain the literal + // string quotarnquot. + TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PollForDecisionTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PollForDecisionTaskInput) GoString() string { + return s.String() +} + +// A structure that represents a decision task. Decision tasks are sent to deciders +// in order for them to make decisions. +type PollForDecisionTaskOutput struct { + _ struct{} `type:"structure"` + + // A paginated list of history events of the workflow execution. The decider + // uses this during the processing of the decision task. + Events []*HistoryEvent `locationName:"events" type:"list" required:"true"` + + // If a NextPageToken was returned by a previous call, there are more results + // available. To retrieve the next page of results, make the call again using + // the returned token in nextPageToken. Keep all other arguments unchanged. + // + // The configured maximumPageSize determines how many results can be returned + // in a single call. + NextPageToken *string `locationName:"nextPageToken" type:"string"` + + // The ID of the DecisionTaskStarted event of the previous decision task of + // this workflow execution that was processed by the decider. This can be used + // to determine the events in the history new since the last decision task received + // by the decider. + PreviousStartedEventId *int64 `locationName:"previousStartedEventId" type:"long"` + + // The ID of the DecisionTaskStarted event recorded in the history. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` + + // The opaque string used as a handle on the task. This token is used by workers + // to communicate progress and response information back to the system about + // the task. + TaskToken *string `locationName:"taskToken" min:"1" type:"string" required:"true"` + + // The workflow execution for which this decision task was created. + WorkflowExecution *WorkflowExecution `locationName:"workflowExecution" type:"structure" required:"true"` + + // The type of the workflow execution for which this decision task was created. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PollForDecisionTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PollForDecisionTaskOutput) GoString() string { + return s.String() +} + +type RecordActivityTaskHeartbeatInput struct { + _ struct{} `type:"structure"` + + // If specified, contains details about the progress of the task. + Details *string `locationName:"details" type:"string"` + + // The taskToken of the ActivityTask. + // + // taskToken is generated by the service and should be treated as an opaque + // value. If the task is passed to another process, its taskToken must also + // be passed. This enables it to provide its progress and respond with results. + TaskToken *string `locationName:"taskToken" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RecordActivityTaskHeartbeatInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecordActivityTaskHeartbeatInput) GoString() string { + return s.String() +} + +// Status information about an activity task. +type RecordActivityTaskHeartbeatOutput struct { + _ struct{} `type:"structure"` + + // Set to true if cancellation of the task is requested. + CancelRequested *bool `locationName:"cancelRequested" type:"boolean" required:"true"` +} + +// String returns the string representation +func (s RecordActivityTaskHeartbeatOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecordActivityTaskHeartbeatOutput) GoString() string { + return s.String() +} + +// Provides details of the RecordMarker decision. +// +// Access Control +// +// You can use IAM policies to control this decision's access to Amazon SWF +// resources as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +type RecordMarkerDecisionAttributes struct { + _ struct{} `type:"structure"` + + // Optional. details of the marker. + Details *string `locationName:"details" type:"string"` + + // Required. The name of the marker. + MarkerName *string `locationName:"markerName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RecordMarkerDecisionAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecordMarkerDecisionAttributes) GoString() string { + return s.String() +} + +// Provides details of the RecordMarkerFailed event. +type RecordMarkerFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The cause of the failure. This information is generated by the system and + // can be useful for diagnostic purposes. + // + // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because + // it lacked sufficient permissions. For details and example IAM policies, see + // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + Cause *string `locationName:"cause" type:"string" required:"true" enum:"RecordMarkerFailedCause"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the RecordMarkerFailed decision for this cancellation request. + // This information can be useful for diagnosing problems by tracing back the + // chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The marker's name. + MarkerName *string `locationName:"markerName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RecordMarkerFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecordMarkerFailedEventAttributes) GoString() string { + return s.String() +} + +type RegisterActivityTypeInput struct { + _ struct{} `type:"structure"` + + // If set, specifies the default maximum time before which a worker processing + // a task of this type must report progress by calling RecordActivityTaskHeartbeat. + // If the timeout is exceeded, the activity task is automatically timed out. + // This default can be overridden when scheduling an activity task using the + // ScheduleActivityTask decision. If the activity worker subsequently attempts + // to record a heartbeat or returns a result, the activity worker receives an + // UnknownResource fault. In this case, Amazon SWF no longer considers the activity + // task to be valid; the activity worker should clean up the activity task. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + DefaultTaskHeartbeatTimeout *string `locationName:"defaultTaskHeartbeatTimeout" type:"string"` + + // If set, specifies the default task list to use for scheduling tasks of this + // activity type. This default task list is used if a task list is not provided + // when a task is scheduled through the ScheduleActivityTask decision. + DefaultTaskList *TaskList `locationName:"defaultTaskList" type:"structure"` + + // The default task priority to assign to the activity type. If not assigned, + // then "0" will be used. Valid values are integers that range from Java's Integer.MIN_VALUE + // (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate + // higher priority. + // + // For more information about setting task priority, see Setting Task Priority + // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // in the Amazon Simple Workflow Developer Guide. + DefaultTaskPriority *string `locationName:"defaultTaskPriority" type:"string"` + + // If set, specifies the default maximum duration for a task of this activity + // type. This default can be overridden when scheduling an activity task using + // the ScheduleActivityTask decision. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + DefaultTaskScheduleToCloseTimeout *string `locationName:"defaultTaskScheduleToCloseTimeout" type:"string"` + + // If set, specifies the default maximum duration that a task of this activity + // type can wait before being assigned to a worker. This default can be overridden + // when scheduling an activity task using the ScheduleActivityTask decision. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + DefaultTaskScheduleToStartTimeout *string `locationName:"defaultTaskScheduleToStartTimeout" type:"string"` + + // If set, specifies the default maximum duration that a worker can take to + // process tasks of this activity type. This default can be overridden when + // scheduling an activity task using the ScheduleActivityTask decision. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + DefaultTaskStartToCloseTimeout *string `locationName:"defaultTaskStartToCloseTimeout" type:"string"` + + // A textual description of the activity type. + Description *string `locationName:"description" type:"string"` + + // The name of the domain in which this activity is to be registered. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // The name of the activity type within the domain. + // + // The specified string must not start or end with whitespace. It must not + // contain a : (colon), / (slash), | (vertical bar), or any control characters + // (\u0000-\u001f | \u007f - \u009f). Also, it must not contain the literal + // string quotarnquot. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The version of the activity type. + // + // The activity type consists of the name and version, the combination of which + // must be unique within the domain. The specified string must not start or + // end with whitespace. It must not contain a : (colon), / (slash), | (vertical + // bar), or any control characters (\u0000-\u001f | \u007f - \u009f). Also, + // it must not contain the literal string quotarnquot. + Version *string `locationName:"version" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterActivityTypeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterActivityTypeInput) GoString() string { + return s.String() +} + +type RegisterActivityTypeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RegisterActivityTypeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterActivityTypeOutput) GoString() string { + return s.String() +} + +type RegisterDomainInput struct { + _ struct{} `type:"structure"` + + // A text description of the domain. + Description *string `locationName:"description" type:"string"` + + // Name of the domain to register. The name must be unique in the region that + // the domain is registered in. + // + // The specified string must not start or end with whitespace. It must not + // contain a : (colon), / (slash), | (vertical bar), or any control characters + // (\u0000-\u001f | \u007f - \u009f). Also, it must not contain the literal + // string quotarnquot. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The duration (in days) that records and histories of workflow executions + // on the domain should be kept by the service. After the retention period, + // the workflow execution is not available in the results of visibility calls. + // + // If you pass the value NONE or 0 (zero), then the workflow execution history + // will not be retained. As soon as the workflow execution completes, the execution + // record and its history are deleted. + // + // The maximum workflow execution retention period is 90 days. For more information + // about Amazon SWF service limits, see: Amazon SWF Service Limits (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-limits.html) + // in the Amazon SWF Developer Guide. + WorkflowExecutionRetentionPeriodInDays *string `locationName:"workflowExecutionRetentionPeriodInDays" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterDomainInput) GoString() string { + return s.String() +} + +type RegisterDomainOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RegisterDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterDomainOutput) GoString() string { + return s.String() +} + +type RegisterWorkflowTypeInput struct { + _ struct{} `type:"structure"` + + // If set, specifies the default policy to use for the child workflow executions + // when a workflow execution of this type is terminated, by calling the TerminateWorkflowExecution + // action explicitly or due to an expired timeout. This default can be overridden + // when starting a workflow execution using the StartWorkflowExecution action + // or the StartChildWorkflowExecution decision. + // + // The supported child policies are: + // + // TERMINATE: the child executions will be terminated. REQUEST_CANCEL: a request + // to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested + // event in its history. It is up to the decider to take appropriate actions + // when it receives an execution history with this event. ABANDON: no action + // will be taken. The child executions will continue to run. + DefaultChildPolicy *string `locationName:"defaultChildPolicy" type:"string" enum:"ChildPolicy"` + + // If set, specifies the default maximum duration for executions of this workflow + // type. You can override this default when starting an execution through the + // StartWorkflowExecution action or StartChildWorkflowExecution decision. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. Unlike some of the other timeout parameters in Amazon SWF, you cannot + // specify a value of "NONE" for defaultExecutionStartToCloseTimeout; there + // is a one-year max limit on the time that a workflow execution can run. Exceeding + // this limit will always cause the workflow execution to time out. + DefaultExecutionStartToCloseTimeout *string `locationName:"defaultExecutionStartToCloseTimeout" type:"string"` + + // The ARN of the default IAM role to use when a workflow execution of this + // type invokes AWS Lambda functions. + // + // This default can be overridden when starting a workflow execution using + // the StartWorkflowExecution action or the StartChildWorkflowExecution and + // ContinueAsNewWorkflowExecution decision. + DefaultLambdaRole *string `locationName:"defaultLambdaRole" min:"1" type:"string"` + + // If set, specifies the default task list to use for scheduling decision tasks + // for executions of this workflow type. This default is used only if a task + // list is not provided when starting the execution through the StartWorkflowExecution + // action or StartChildWorkflowExecution decision. + DefaultTaskList *TaskList `locationName:"defaultTaskList" type:"structure"` + + // The default task priority to assign to the workflow type. If not assigned, + // then "0" will be used. Valid values are integers that range from Java's Integer.MIN_VALUE + // (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate + // higher priority. + // + // For more information about setting task priority, see Setting Task Priority + // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // in the Amazon Simple Workflow Developer Guide. + DefaultTaskPriority *string `locationName:"defaultTaskPriority" type:"string"` + + // If set, specifies the default maximum duration of decision tasks for this + // workflow type. This default can be overridden when starting a workflow execution + // using the StartWorkflowExecution action or the StartChildWorkflowExecution + // decision. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + DefaultTaskStartToCloseTimeout *string `locationName:"defaultTaskStartToCloseTimeout" type:"string"` + + // Textual description of the workflow type. + Description *string `locationName:"description" type:"string"` + + // The name of the domain in which to register the workflow type. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // The name of the workflow type. + // + // The specified string must not start or end with whitespace. It must not + // contain a : (colon), / (slash), | (vertical bar), or any control characters + // (\u0000-\u001f | \u007f - \u009f). Also, it must not contain the literal + // string quotarnquot. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The version of the workflow type. + // + // The workflow type consists of the name and version, the combination of which + // must be unique within the domain. To get a list of all currently registered + // workflow types, use the ListWorkflowTypes action. The specified string must + // not start or end with whitespace. It must not contain a : (colon), / (slash), + // | (vertical bar), or any control characters (\u0000-\u001f | \u007f - \u009f). + // Also, it must not contain the literal string quotarnquot. + Version *string `locationName:"version" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterWorkflowTypeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterWorkflowTypeInput) GoString() string { + return s.String() +} + +type RegisterWorkflowTypeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RegisterWorkflowTypeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterWorkflowTypeOutput) GoString() string { + return s.String() +} + +// Provides details of the RequestCancelActivityTask decision. +// +// Access Control +// +// You can use IAM policies to control this decision's access to Amazon SWF +// resources as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +type RequestCancelActivityTaskDecisionAttributes struct { + _ struct{} `type:"structure"` + + // The activityId of the activity task to be canceled. + ActivityId *string `locationName:"activityId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RequestCancelActivityTaskDecisionAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestCancelActivityTaskDecisionAttributes) GoString() string { + return s.String() +} + +// Provides details of the RequestCancelActivityTaskFailed event. +type RequestCancelActivityTaskFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The activityId provided in the RequestCancelActivityTask decision that failed. + ActivityId *string `locationName:"activityId" min:"1" type:"string" required:"true"` + + // The cause of the failure. This information is generated by the system and + // can be useful for diagnostic purposes. + // + // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because + // it lacked sufficient permissions. For details and example IAM policies, see + // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + Cause *string `locationName:"cause" type:"string" required:"true" enum:"RequestCancelActivityTaskFailedCause"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the RequestCancelActivityTask decision for this cancellation + // request. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s RequestCancelActivityTaskFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestCancelActivityTaskFailedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the RequestCancelExternalWorkflowExecution decision. +// +// Access Control +// +// You can use IAM policies to control this decision's access to Amazon SWF +// resources as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +type RequestCancelExternalWorkflowExecutionDecisionAttributes struct { + _ struct{} `type:"structure"` + + // Optional. Data attached to the event that can be used by the decider in subsequent + // workflow tasks. + Control *string `locationName:"control" type:"string"` + + // The runId of the external workflow execution to cancel. + RunId *string `locationName:"runId" type:"string"` + + // Required. The workflowId of the external workflow execution to cancel. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RequestCancelExternalWorkflowExecutionDecisionAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestCancelExternalWorkflowExecutionDecisionAttributes) GoString() string { + return s.String() +} + +// Provides details of the RequestCancelExternalWorkflowExecutionFailed event. +type RequestCancelExternalWorkflowExecutionFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The cause of the failure. This information is generated by the system and + // can be useful for diagnostic purposes. + // + // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because + // it lacked sufficient permissions. For details and example IAM policies, see + // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + Cause *string `locationName:"cause" type:"string" required:"true" enum:"RequestCancelExternalWorkflowExecutionFailedCause"` + + Control *string `locationName:"control" type:"string"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the RequestCancelExternalWorkflowExecution decision for + // this cancellation request. This information can be useful for diagnosing + // problems by tracing back the chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The ID of the RequestCancelExternalWorkflowExecutionInitiated event corresponding + // to the RequestCancelExternalWorkflowExecution decision to cancel this external + // workflow execution. This information can be useful for diagnosing problems + // by tracing back the chain of events leading up to this event. + InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` + + // The runId of the external workflow execution. + RunId *string `locationName:"runId" type:"string"` + + // The workflowId of the external workflow to which the cancel request was to + // be delivered. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RequestCancelExternalWorkflowExecutionFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestCancelExternalWorkflowExecutionFailedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the RequestCancelExternalWorkflowExecutionInitiated event. +type RequestCancelExternalWorkflowExecutionInitiatedEventAttributes struct { + _ struct{} `type:"structure"` + + // Optional. Data attached to the event that can be used by the decider in subsequent + // workflow tasks. + Control *string `locationName:"control" type:"string"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the RequestCancelExternalWorkflowExecution decision for + // this cancellation request. This information can be useful for diagnosing + // problems by tracing back the chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The runId of the external workflow execution to be canceled. + RunId *string `locationName:"runId" type:"string"` + + // The workflowId of the external workflow execution to be canceled. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RequestCancelExternalWorkflowExecutionInitiatedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestCancelExternalWorkflowExecutionInitiatedEventAttributes) GoString() string { + return s.String() +} + +type RequestCancelWorkflowExecutionInput struct { + _ struct{} `type:"structure"` + + // The name of the domain containing the workflow execution to cancel. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // The runId of the workflow execution to cancel. + RunId *string `locationName:"runId" type:"string"` + + // The workflowId of the workflow execution to cancel. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RequestCancelWorkflowExecutionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestCancelWorkflowExecutionInput) GoString() string { + return s.String() +} + +type RequestCancelWorkflowExecutionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RequestCancelWorkflowExecutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestCancelWorkflowExecutionOutput) GoString() string { + return s.String() +} + +type RespondActivityTaskCanceledInput struct { + _ struct{} `type:"structure"` + + // Optional. Information about the cancellation. + Details *string `locationName:"details" type:"string"` + + // The taskToken of the ActivityTask. + // + // taskToken is generated by the service and should be treated as an opaque + // value. If the task is passed to another process, its taskToken must also + // be passed. This enables it to provide its progress and respond with results. + TaskToken *string `locationName:"taskToken" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RespondActivityTaskCanceledInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RespondActivityTaskCanceledInput) GoString() string { + return s.String() +} + +type RespondActivityTaskCanceledOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RespondActivityTaskCanceledOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RespondActivityTaskCanceledOutput) GoString() string { + return s.String() +} + +type RespondActivityTaskCompletedInput struct { + _ struct{} `type:"structure"` + + // The result of the activity task. It is a free form string that is implementation + // specific. + Result *string `locationName:"result" type:"string"` + + // The taskToken of the ActivityTask. + // + // taskToken is generated by the service and should be treated as an opaque + // value. If the task is passed to another process, its taskToken must also + // be passed. This enables it to provide its progress and respond with results. + TaskToken *string `locationName:"taskToken" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RespondActivityTaskCompletedInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RespondActivityTaskCompletedInput) GoString() string { + return s.String() +} + +type RespondActivityTaskCompletedOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RespondActivityTaskCompletedOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RespondActivityTaskCompletedOutput) GoString() string { + return s.String() +} + +type RespondActivityTaskFailedInput struct { + _ struct{} `type:"structure"` + + // Optional. Detailed information about the failure. + Details *string `locationName:"details" type:"string"` + + // Description of the error that may assist in diagnostics. + Reason *string `locationName:"reason" type:"string"` + + // The taskToken of the ActivityTask. + // + // taskToken is generated by the service and should be treated as an opaque + // value. If the task is passed to another process, its taskToken must also + // be passed. This enables it to provide its progress and respond with results. + TaskToken *string `locationName:"taskToken" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RespondActivityTaskFailedInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RespondActivityTaskFailedInput) GoString() string { + return s.String() +} + +type RespondActivityTaskFailedOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RespondActivityTaskFailedOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RespondActivityTaskFailedOutput) GoString() string { + return s.String() +} + +type RespondDecisionTaskCompletedInput struct { + _ struct{} `type:"structure"` + + // The list of decisions (possibly empty) made by the decider while processing + // this decision task. See the docs for the decision structure for details. + Decisions []*Decision `locationName:"decisions" type:"list"` + + // User defined context to add to workflow execution. + ExecutionContext *string `locationName:"executionContext" type:"string"` + + // The taskToken from the DecisionTask. + // + // taskToken is generated by the service and should be treated as an opaque + // value. If the task is passed to another process, its taskToken must also + // be passed. This enables it to provide its progress and respond with results. + TaskToken *string `locationName:"taskToken" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RespondDecisionTaskCompletedInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RespondDecisionTaskCompletedInput) GoString() string { + return s.String() +} + +type RespondDecisionTaskCompletedOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RespondDecisionTaskCompletedOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RespondDecisionTaskCompletedOutput) GoString() string { + return s.String() +} + +// Provides details of the ScheduleActivityTask decision. +// +// Access Control +// +// You can use IAM policies to control this decision's access to Amazon SWF +// resources as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. activityType.name: String constraint. The key +// is swf:activityType.name. activityType.version: String constraint. The key +// is swf:activityType.version. taskList: String constraint. The key is swf:taskList.name. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +type ScheduleActivityTaskDecisionAttributes struct { + _ struct{} `type:"structure"` + + // Required. The activityId of the activity task. + // + // The specified string must not start or end with whitespace. It must not + // contain a : (colon), / (slash), | (vertical bar), or any control characters + // (\u0000-\u001f | \u007f - \u009f). Also, it must not contain the literal + // string quotarnquot. + ActivityId *string `locationName:"activityId" min:"1" type:"string" required:"true"` + + // Required. The type of the activity task to schedule. + ActivityType *ActivityType `locationName:"activityType" type:"structure" required:"true"` + + // Optional. Data attached to the event that can be used by the decider in subsequent + // workflow tasks. This data is not sent to the activity. + Control *string `locationName:"control" type:"string"` + + // If set, specifies the maximum time before which a worker processing a task + // of this type must report progress by calling RecordActivityTaskHeartbeat. + // If the timeout is exceeded, the activity task is automatically timed out. + // If the worker subsequently attempts to record a heartbeat or returns a result, + // it will be ignored. This overrides the default heartbeat timeout specified + // when registering the activity type using RegisterActivityType. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + HeartbeatTimeout *string `locationName:"heartbeatTimeout" type:"string"` + + // The input provided to the activity task. + Input *string `locationName:"input" type:"string"` + + // The maximum duration for this activity task. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + // + // A schedule-to-close timeout for this activity task must be specified either + // as a default for the activity type or through this field. If neither this + // field is set nor a default schedule-to-close timeout was specified at registration + // time then a fault will be returned. + ScheduleToCloseTimeout *string `locationName:"scheduleToCloseTimeout" type:"string"` + + // Optional. If set, specifies the maximum duration the activity task can wait + // to be assigned to a worker. This overrides the default schedule-to-start + // timeout specified when registering the activity type using RegisterActivityType. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + // + // A schedule-to-start timeout for this activity task must be specified either + // as a default for the activity type or through this field. If neither this + // field is set nor a default schedule-to-start timeout was specified at registration + // time then a fault will be returned. + ScheduleToStartTimeout *string `locationName:"scheduleToStartTimeout" type:"string"` + + // If set, specifies the maximum duration a worker may take to process this + // activity task. This overrides the default start-to-close timeout specified + // when registering the activity type using RegisterActivityType. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + // + // A start-to-close timeout for this activity task must be specified either + // as a default for the activity type or through this field. If neither this + // field is set nor a default start-to-close timeout was specified at registration + // time then a fault will be returned. + StartToCloseTimeout *string `locationName:"startToCloseTimeout" type:"string"` + + // If set, specifies the name of the task list in which to schedule the activity + // task. If not specified, the defaultTaskList registered with the activity + // type will be used. + // + // A task list for this activity task must be specified either as a default + // for the activity type or through this field. If neither this field is set + // nor a default task list was specified at registration time then a fault will + // be returned. The specified string must not start or end with whitespace. + // It must not contain a : (colon), / (slash), | (vertical bar), or any control + // characters (\u0000-\u001f | \u007f - \u009f). Also, it must not contain the + // literal string quotarnquot. + TaskList *TaskList `locationName:"taskList" type:"structure"` + + // Optional. If set, specifies the priority with which the activity task is + // to be assigned to a worker. This overrides the defaultTaskPriority specified + // when registering the activity type using RegisterActivityType. Valid values + // are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE + // (2147483647). Higher numbers indicate higher priority. + // + // For more information about setting task priority, see Setting Task Priority + // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // in the Amazon Simple Workflow Developer Guide. + TaskPriority *string `locationName:"taskPriority" type:"string"` +} + +// String returns the string representation +func (s ScheduleActivityTaskDecisionAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduleActivityTaskDecisionAttributes) GoString() string { + return s.String() +} + +// Provides details of the ScheduleActivityTaskFailed event. +type ScheduleActivityTaskFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The activityId provided in the ScheduleActivityTask decision that failed. + ActivityId *string `locationName:"activityId" min:"1" type:"string" required:"true"` + + // The activity type provided in the ScheduleActivityTask decision that failed. + ActivityType *ActivityType `locationName:"activityType" type:"structure" required:"true"` + + // The cause of the failure. This information is generated by the system and + // can be useful for diagnostic purposes. + // + // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because + // it lacked sufficient permissions. For details and example IAM policies, see + // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + Cause *string `locationName:"cause" type:"string" required:"true" enum:"ScheduleActivityTaskFailedCause"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision that + // resulted in the scheduling of this activity task. This information can be + // useful for diagnosing problems by tracing back the chain of events leading + // up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` +} + +// String returns the string representation +func (s ScheduleActivityTaskFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduleActivityTaskFailedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the ScheduleLambdaFunction decision. +// +// Access Control +// +// You can use IAM policies to control this decision's access to Amazon SWF +// resources as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. activityType.name: String constraint. The key +// is swf:activityType.name. activityType.version: String constraint. The key +// is swf:activityType.version. taskList: String constraint. The key is swf:taskList.name. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +type ScheduleLambdaFunctionDecisionAttributes struct { + _ struct{} `type:"structure"` + + // Required. The SWF id of the AWS Lambda task. + // + // The specified string must not start or end with whitespace. It must not + // contain a : (colon), / (slash), | (vertical bar), or any control characters + // (\u0000-\u001f | \u007f - \u009f). Also, it must not contain the literal + // string quotarnquot. + Id *string `locationName:"id" min:"1" type:"string" required:"true"` + + // The input provided to the AWS Lambda function. + Input *string `locationName:"input" min:"1" type:"string"` + + // Required. The name of the AWS Lambda function to invoke. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // If set, specifies the maximum duration the function may take to execute. + StartToCloseTimeout *string `locationName:"startToCloseTimeout" type:"string"` +} + +// String returns the string representation +func (s ScheduleLambdaFunctionDecisionAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduleLambdaFunctionDecisionAttributes) GoString() string { + return s.String() +} + +// Provides details for the ScheduleLambdaFunctionFailed event. +type ScheduleLambdaFunctionFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The cause of the failure. This information is generated by the system and + // can be useful for diagnostic purposes. + // + // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because + // it lacked sufficient permissions. For details and example IAM policies, see + // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + Cause *string `locationName:"cause" type:"string" required:"true" enum:"ScheduleLambdaFunctionFailedCause"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision that + // resulted in the scheduling of this AWS Lambda function. This information + // can be useful for diagnosing problems by tracing back the chain of events + // leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The unique Amazon SWF ID of the AWS Lambda task. + Id *string `locationName:"id" min:"1" type:"string" required:"true"` + + // The name of the scheduled AWS Lambda function. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ScheduleLambdaFunctionFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduleLambdaFunctionFailedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the SignalExternalWorkflowExecution decision. +// +// Access Control +// +// You can use IAM policies to control this decision's access to Amazon SWF +// resources as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +type SignalExternalWorkflowExecutionDecisionAttributes struct { + _ struct{} `type:"structure"` + + // Optional. Data attached to the event that can be used by the decider in subsequent + // decision tasks. + Control *string `locationName:"control" type:"string"` + + // Optional. Input data to be provided with the signal. The target workflow + // execution will use the signal name and input data to process the signal. + Input *string `locationName:"input" type:"string"` + + // The runId of the workflow execution to be signaled. + RunId *string `locationName:"runId" type:"string"` + + // Required. The name of the signal.The target workflow execution will use the + // signal name and input to process the signal. + SignalName *string `locationName:"signalName" min:"1" type:"string" required:"true"` + + // Required. The workflowId of the workflow execution to be signaled. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SignalExternalWorkflowExecutionDecisionAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SignalExternalWorkflowExecutionDecisionAttributes) GoString() string { + return s.String() +} + +// Provides details of the SignalExternalWorkflowExecutionFailed event. +type SignalExternalWorkflowExecutionFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The cause of the failure. This information is generated by the system and + // can be useful for diagnostic purposes. + // + // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because + // it lacked sufficient permissions. For details and example IAM policies, see + // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + Cause *string `locationName:"cause" type:"string" required:"true" enum:"SignalExternalWorkflowExecutionFailedCause"` + + Control *string `locationName:"control" type:"string"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the SignalExternalWorkflowExecution decision for this signal. + // This information can be useful for diagnosing problems by tracing back the + // chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The ID of the SignalExternalWorkflowExecutionInitiated event corresponding + // to the SignalExternalWorkflowExecution decision to request this signal. This + // information can be useful for diagnosing problems by tracing back the chain + // of events leading up to this event. + InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` + + // The runId of the external workflow execution that the signal was being delivered + // to. + RunId *string `locationName:"runId" type:"string"` + + // The workflowId of the external workflow execution that the signal was being + // delivered to. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SignalExternalWorkflowExecutionFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SignalExternalWorkflowExecutionFailedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the SignalExternalWorkflowExecutionInitiated event. +type SignalExternalWorkflowExecutionInitiatedEventAttributes struct { + _ struct{} `type:"structure"` + + // Optional. data attached to the event that can be used by the decider in subsequent + // decision tasks. + Control *string `locationName:"control" type:"string"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the SignalExternalWorkflowExecution decision for this signal. + // This information can be useful for diagnosing problems by tracing back the + // chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // Input provided to the signal (if any). + Input *string `locationName:"input" type:"string"` + + // The runId of the external workflow execution to send the signal to. + RunId *string `locationName:"runId" type:"string"` + + // The name of the signal. + SignalName *string `locationName:"signalName" min:"1" type:"string" required:"true"` + + // The workflowId of the external workflow execution. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SignalExternalWorkflowExecutionInitiatedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SignalExternalWorkflowExecutionInitiatedEventAttributes) GoString() string { + return s.String() +} + +type SignalWorkflowExecutionInput struct { + _ struct{} `type:"structure"` + + // The name of the domain containing the workflow execution to signal. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // Data to attach to the WorkflowExecutionSignaled event in the target workflow + // execution's history. + Input *string `locationName:"input" type:"string"` + + // The runId of the workflow execution to signal. + RunId *string `locationName:"runId" type:"string"` + + // The name of the signal. This name must be meaningful to the target workflow. + SignalName *string `locationName:"signalName" min:"1" type:"string" required:"true"` + + // The workflowId of the workflow execution to signal. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SignalWorkflowExecutionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SignalWorkflowExecutionInput) GoString() string { + return s.String() +} + +type SignalWorkflowExecutionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SignalWorkflowExecutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SignalWorkflowExecutionOutput) GoString() string { + return s.String() +} + +// Provides details of the StartChildWorkflowExecution decision. +// +// Access Control +// +// You can use IAM policies to control this decision's access to Amazon SWF +// resources as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. Constrain the following parameters by using a Condition element +// with the appropriate keys. tagList.member.N: The key is "swf:tagList.N" +// where N is the tag number from 0 to 4, inclusive. taskList: String constraint. +// The key is swf:taskList.name. workflowType.name: String constraint. The key +// is swf:workflowType.name. workflowType.version: String constraint. The key +// is swf:workflowType.version. If the caller does not have sufficient permissions +// to invoke the action, or the parameter values fall outside the specified +// constraints, the action fails. The associated event attribute's cause parameter +// will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, +// see Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +type StartChildWorkflowExecutionDecisionAttributes struct { + _ struct{} `type:"structure"` + + // Optional. If set, specifies the policy to use for the child workflow executions + // if the workflow execution being started is terminated by calling the TerminateWorkflowExecution + // action explicitly or due to an expired timeout. This policy overrides the + // default child policy specified when registering the workflow type using RegisterWorkflowType. + // + // The supported child policies are: + // + // TERMINATE: the child executions will be terminated. REQUEST_CANCEL: a request + // to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested + // event in its history. It is up to the decider to take appropriate actions + // when it receives an execution history with this event. ABANDON: no action + // will be taken. The child executions will continue to run. A child policy + // for this workflow execution must be specified either as a default for the + // workflow type or through this parameter. If neither this parameter is set + // nor a default child policy was specified at registration time then a fault + // will be returned. + ChildPolicy *string `locationName:"childPolicy" type:"string" enum:"ChildPolicy"` + + // Optional. Data attached to the event that can be used by the decider in subsequent + // workflow tasks. This data is not sent to the child workflow execution. + Control *string `locationName:"control" type:"string"` + + // The total duration for this workflow execution. This overrides the defaultExecutionStartToCloseTimeout + // specified when registering the workflow type. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + // + // An execution start-to-close timeout for this workflow execution must be + // specified either as a default for the workflow type or through this parameter. + // If neither this parameter is set nor a default execution start-to-close timeout + // was specified at registration time then a fault will be returned. + ExecutionStartToCloseTimeout *string `locationName:"executionStartToCloseTimeout" type:"string"` + + // The input to be provided to the workflow execution. + Input *string `locationName:"input" type:"string"` + + // The ARN of an IAM role that authorizes Amazon SWF to invoke AWS Lambda functions. + // + // In order for this workflow execution to invoke AWS Lambda functions, an + // appropriate IAM role must be specified either as a default for the workflow + // type or through this field. + LambdaRole *string `locationName:"lambdaRole" min:"1" type:"string"` + + // The list of tags to associate with the child workflow execution. A maximum + // of 5 tags can be specified. You can list workflow executions with a specific + // tag by calling ListOpenWorkflowExecutions or ListClosedWorkflowExecutions + // and specifying a TagFilter. + TagList []*string `locationName:"tagList" type:"list"` + + // The name of the task list to be used for decision tasks of the child workflow + // execution. + // + // A task list for this workflow execution must be specified either as a default + // for the workflow type or through this parameter. If neither this parameter + // is set nor a default task list was specified at registration time then a + // fault will be returned. The specified string must not start or end with whitespace. + // It must not contain a : (colon), / (slash), | (vertical bar), or any control + // characters (\u0000-\u001f | \u007f - \u009f). Also, it must not contain the + // literal string quotarnquot. + TaskList *TaskList `locationName:"taskList" type:"structure"` + + // Optional. A task priority that, if set, specifies the priority for a decision + // task of this workflow execution. This overrides the defaultTaskPriority specified + // when registering the workflow type. Valid values are integers that range + // from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). + // Higher numbers indicate higher priority. + // + // For more information about setting task priority, see Setting Task Priority + // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // in the Amazon Simple Workflow Developer Guide. + TaskPriority *string `locationName:"taskPriority" type:"string"` + + // Specifies the maximum duration of decision tasks for this workflow execution. + // This parameter overrides the defaultTaskStartToCloseTimout specified when + // registering the workflow type using RegisterWorkflowType. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + // + // A task start-to-close timeout for this workflow execution must be specified + // either as a default for the workflow type or through this parameter. If neither + // this parameter is set nor a default task start-to-close timeout was specified + // at registration time then a fault will be returned. + TaskStartToCloseTimeout *string `locationName:"taskStartToCloseTimeout" type:"string"` + + // Required. The workflowId of the workflow execution. + // + // The specified string must not start or end with whitespace. It must not + // contain a : (colon), / (slash), | (vertical bar), or any control characters + // (\u0000-\u001f | \u007f - \u009f). Also, it must not contain the literal + // string quotarnquot. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` + + // Required. The type of the workflow execution to be started. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s StartChildWorkflowExecutionDecisionAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartChildWorkflowExecutionDecisionAttributes) GoString() string { + return s.String() +} + +// Provides details of the StartChildWorkflowExecutionFailed event. +type StartChildWorkflowExecutionFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The cause of the failure. This information is generated by the system and + // can be useful for diagnostic purposes. + // + // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because + // it lacked sufficient permissions. For details and example IAM policies, see + // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + Cause *string `locationName:"cause" type:"string" required:"true" enum:"StartChildWorkflowExecutionFailedCause"` + + Control *string `locationName:"control" type:"string"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the StartChildWorkflowExecution decision to request this + // child workflow execution. This information can be useful for diagnosing problems + // by tracing back the cause of events. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The ID of the StartChildWorkflowExecutionInitiated event corresponding to + // the StartChildWorkflowExecution decision to start this child workflow execution. + // This information can be useful for diagnosing problems by tracing back the + // chain of events leading up to this event. + InitiatedEventId *int64 `locationName:"initiatedEventId" type:"long" required:"true"` + + // The workflowId of the child workflow execution. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` + + // The workflow type provided in the StartChildWorkflowExecution decision that + // failed. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s StartChildWorkflowExecutionFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartChildWorkflowExecutionFailedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the StartChildWorkflowExecutionInitiated event. +type StartChildWorkflowExecutionInitiatedEventAttributes struct { + _ struct{} `type:"structure"` + + // The policy to use for the child workflow executions if this execution gets + // terminated by explicitly calling the TerminateWorkflowExecution action or + // due to an expired timeout. + // + // The supported child policies are: + // + // TERMINATE: the child executions will be terminated. REQUEST_CANCEL: a request + // to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested + // event in its history. It is up to the decider to take appropriate actions + // when it receives an execution history with this event. ABANDON: no action + // will be taken. The child executions will continue to run. + ChildPolicy *string `locationName:"childPolicy" type:"string" required:"true" enum:"ChildPolicy"` + + // Optional. Data attached to the event that can be used by the decider in subsequent + // decision tasks. This data is not sent to the activity. + Control *string `locationName:"control" type:"string"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the StartChildWorkflowExecution decision to request this + // child workflow execution. This information can be useful for diagnosing problems + // by tracing back the cause of events. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The maximum duration for the child workflow execution. If the workflow execution + // is not closed within this duration, it will be timed out and force terminated. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + ExecutionStartToCloseTimeout *string `locationName:"executionStartToCloseTimeout" type:"string"` + + // The inputs provided to the child workflow execution (if any). + Input *string `locationName:"input" type:"string"` + + // The IAM role attached to this workflow execution to use when invoking AWS + // Lambda functions. + LambdaRole *string `locationName:"lambdaRole" min:"1" type:"string"` + + // The list of tags to associated with the child workflow execution. + TagList []*string `locationName:"tagList" type:"list"` + + // The name of the task list used for the decision tasks of the child workflow + // execution. + TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` + + // Optional. The priority assigned for the decision tasks for this workflow + // execution. Valid values are integers that range from Java's Integer.MIN_VALUE + // (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate + // higher priority. + // + // For more information about setting task priority, see Setting Task Priority + // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // in the Amazon Simple Workflow Developer Guide. + TaskPriority *string `locationName:"taskPriority" type:"string"` + + // The maximum duration allowed for the decision tasks for this workflow execution. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + TaskStartToCloseTimeout *string `locationName:"taskStartToCloseTimeout" type:"string"` + + // The workflowId of the child workflow execution. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` + + // The type of the child workflow execution. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s StartChildWorkflowExecutionInitiatedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartChildWorkflowExecutionInitiatedEventAttributes) GoString() string { + return s.String() +} + +// Provides details for the StartLambdaFunctionFailed event. +type StartLambdaFunctionFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The cause of the failure. This information is generated by the system and + // can be useful for diagnostic purposes. + // + // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because + // it lacked sufficient permissions. For details and example IAM policies, see + // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + Cause *string `locationName:"cause" type:"string" enum:"StartLambdaFunctionFailedCause"` + + // The error message (if any). + Message *string `locationName:"message" type:"string"` + + // The ID of the LambdaFunctionScheduled event that was recorded when this AWS + // Lambda function was scheduled. This information can be useful for diagnosing + // problems by tracing back the chain of events leading up to this event. + ScheduledEventId *int64 `locationName:"scheduledEventId" type:"long"` +} + +// String returns the string representation +func (s StartLambdaFunctionFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartLambdaFunctionFailedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the StartTimer decision. +// +// Access Control +// +// You can use IAM policies to control this decision's access to Amazon SWF +// resources as follows: +// +// Use a Resource element with the domain name to limit the action to only +// specified domains. Use an Action element to allow or deny permission to call +// this action. You cannot use an IAM policy to constrain this action's parameters. +// If the caller does not have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). +type StartTimerDecisionAttributes struct { + _ struct{} `type:"structure"` + + // Optional. Data attached to the event that can be used by the decider in subsequent + // workflow tasks. + Control *string `locationName:"control" type:"string"` + + // Required. The duration to wait before firing the timer. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. + StartToFireTimeout *string `locationName:"startToFireTimeout" min:"1" type:"string" required:"true"` + + // Required. The unique ID of the timer. + // + // The specified string must not start or end with whitespace. It must not + // contain a : (colon), / (slash), | (vertical bar), or any control characters + // (\u0000-\u001f | \u007f - \u009f). Also, it must not contain the literal + // string quotarnquot. + TimerId *string `locationName:"timerId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartTimerDecisionAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartTimerDecisionAttributes) GoString() string { + return s.String() +} + +// Provides details of the StartTimerFailed event. +type StartTimerFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The cause of the failure. This information is generated by the system and + // can be useful for diagnostic purposes. + // + // If cause is set to OPERATION_NOT_PERMITTED, the decision failed because + // it lacked sufficient permissions. For details and example IAM policies, see + // Using IAM to Manage Access to Amazon SWF Workflows (http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html). + Cause *string `locationName:"cause" type:"string" required:"true" enum:"StartTimerFailedCause"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the StartTimer decision for this activity task. This information + // can be useful for diagnosing problems by tracing back the chain of events + // leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The timerId provided in the StartTimer decision that failed. + TimerId *string `locationName:"timerId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartTimerFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartTimerFailedEventAttributes) GoString() string { + return s.String() +} + +type StartWorkflowExecutionInput struct { + _ struct{} `type:"structure"` + + // If set, specifies the policy to use for the child workflow executions of + // this workflow execution if it is terminated, by calling the TerminateWorkflowExecution + // action explicitly or due to an expired timeout. This policy overrides the + // default child policy specified when registering the workflow type using RegisterWorkflowType. + // + // The supported child policies are: + // + // TERMINATE: the child executions will be terminated. REQUEST_CANCEL: a request + // to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested + // event in its history. It is up to the decider to take appropriate actions + // when it receives an execution history with this event. ABANDON: no action + // will be taken. The child executions will continue to run. A child policy + // for this workflow execution must be specified either as a default for the + // workflow type or through this parameter. If neither this parameter is set + // nor a default child policy was specified at registration time then a fault + // will be returned. + ChildPolicy *string `locationName:"childPolicy" type:"string" enum:"ChildPolicy"` + + // The name of the domain in which the workflow execution is created. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // The total duration for this workflow execution. This overrides the defaultExecutionStartToCloseTimeout + // specified when registering the workflow type. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. Exceeding this limit will cause the workflow execution to time out. Unlike + // some of the other timeout parameters in Amazon SWF, you cannot specify a + // value of "NONE" for this timeout; there is a one-year max limit on the time + // that a workflow execution can run. + // + // An execution start-to-close timeout must be specified either through this + // parameter or as a default when the workflow type is registered. If neither + // this parameter nor a default execution start-to-close timeout is specified, + // a fault is returned. + ExecutionStartToCloseTimeout *string `locationName:"executionStartToCloseTimeout" type:"string"` + + // The input for the workflow execution. This is a free form string which should + // be meaningful to the workflow you are starting. This input is made available + // to the new workflow execution in the WorkflowExecutionStarted history event. + Input *string `locationName:"input" type:"string"` + + // The ARN of an IAM role that authorizes Amazon SWF to invoke AWS Lambda functions. + // + // In order for this workflow execution to invoke AWS Lambda functions, an + // appropriate IAM role must be specified either as a default for the workflow + // type or through this field. + LambdaRole *string `locationName:"lambdaRole" min:"1" type:"string"` + + // The list of tags to associate with the workflow execution. You can specify + // a maximum of 5 tags. You can list workflow executions with a specific tag + // by calling ListOpenWorkflowExecutions or ListClosedWorkflowExecutions and + // specifying a TagFilter. + TagList []*string `locationName:"tagList" type:"list"` + + // The task list to use for the decision tasks generated for this workflow execution. + // This overrides the defaultTaskList specified when registering the workflow + // type. + // + // A task list for this workflow execution must be specified either as a default + // for the workflow type or through this parameter. If neither this parameter + // is set nor a default task list was specified at registration time then a + // fault will be returned. The specified string must not start or end with whitespace. + // It must not contain a : (colon), / (slash), | (vertical bar), or any control + // characters (\u0000-\u001f | \u007f - \u009f). Also, it must not contain the + // literal string quotarnquot. + TaskList *TaskList `locationName:"taskList" type:"structure"` + + // The task priority to use for this workflow execution. This will override + // any default priority that was assigned when the workflow type was registered. + // If not set, then the default task priority for the workflow type will be + // used. Valid values are integers that range from Java's Integer.MIN_VALUE + // (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate + // higher priority. + // + // For more information about setting task priority, see Setting Task Priority + // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // in the Amazon Simple Workflow Developer Guide. + TaskPriority *string `locationName:"taskPriority" type:"string"` + + // Specifies the maximum duration of decision tasks for this workflow execution. + // This parameter overrides the defaultTaskStartToCloseTimout specified when + // registering the workflow type using RegisterWorkflowType. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + // + // A task start-to-close timeout for this workflow execution must be specified + // either as a default for the workflow type or through this parameter. If neither + // this parameter is set nor a default task start-to-close timeout was specified + // at registration time then a fault will be returned. + TaskStartToCloseTimeout *string `locationName:"taskStartToCloseTimeout" type:"string"` + + // The user defined identifier associated with the workflow execution. You can + // use this to associate a custom identifier with the workflow execution. You + // may specify the same identifier if a workflow execution is logically a restart + // of a previous execution. You cannot have two open workflow executions with + // the same workflowId at the same time. + // + // The specified string must not start or end with whitespace. It must not + // contain a : (colon), / (slash), | (vertical bar), or any control characters + // (\u0000-\u001f | \u007f - \u009f). Also, it must not contain the literal + // string quotarnquot. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` + + // The type of the workflow to start. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s StartWorkflowExecutionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartWorkflowExecutionInput) GoString() string { + return s.String() +} + +// Specifies the runId of a workflow execution. +type StartWorkflowExecutionOutput struct { + _ struct{} `type:"structure"` + + // The runId of a workflow execution. This ID is generated by the service and + // can be used to uniquely identify the workflow execution within a domain. + RunId *string `locationName:"runId" min:"1" type:"string"` +} + +// String returns the string representation +func (s StartWorkflowExecutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartWorkflowExecutionOutput) GoString() string { + return s.String() +} + +// Used to filter the workflow executions in visibility APIs based on a tag. +type TagFilter struct { + _ struct{} `type:"structure"` + + // Required. Specifies the tag that must be associated with the execution for + // it to meet the filter criteria. + Tag *string `locationName:"tag" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s TagFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagFilter) GoString() string { + return s.String() +} + +// Represents a task list. +type TaskList struct { + _ struct{} `type:"structure"` + + // The name of the task list. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s TaskList) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TaskList) GoString() string { + return s.String() +} + +type TerminateWorkflowExecutionInput struct { + _ struct{} `type:"structure"` + + // If set, specifies the policy to use for the child workflow executions of + // the workflow execution being terminated. This policy overrides the child + // policy specified for the workflow execution at registration time or when + // starting the execution. + // + // The supported child policies are: + // + // TERMINATE: the child executions will be terminated. REQUEST_CANCEL: a request + // to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested + // event in its history. It is up to the decider to take appropriate actions + // when it receives an execution history with this event. ABANDON: no action + // will be taken. The child executions will continue to run. A child policy + // for this workflow execution must be specified either as a default for the + // workflow type or through this parameter. If neither this parameter is set + // nor a default child policy was specified at registration time then a fault + // will be returned. + ChildPolicy *string `locationName:"childPolicy" type:"string" enum:"ChildPolicy"` + + // Optional. Details for terminating the workflow execution. + Details *string `locationName:"details" type:"string"` + + // The domain of the workflow execution to terminate. + Domain *string `locationName:"domain" min:"1" type:"string" required:"true"` + + // Optional. A descriptive reason for terminating the workflow execution. + Reason *string `locationName:"reason" type:"string"` + + // The runId of the workflow execution to terminate. + RunId *string `locationName:"runId" type:"string"` + + // The workflowId of the workflow execution to terminate. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s TerminateWorkflowExecutionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateWorkflowExecutionInput) GoString() string { + return s.String() +} + +type TerminateWorkflowExecutionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TerminateWorkflowExecutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateWorkflowExecutionOutput) GoString() string { + return s.String() +} + +// Provides details of the TimerCanceled event. +type TimerCanceledEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the CancelTimer decision to cancel this timer. This information + // can be useful for diagnosing problems by tracing back the chain of events + // leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The ID of the TimerStarted event that was recorded when this timer was started. + // This information can be useful for diagnosing problems by tracing back the + // chain of events leading up to this event. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` + + // The unique ID of the timer that was canceled. + TimerId *string `locationName:"timerId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s TimerCanceledEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimerCanceledEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the TimerFired event. +type TimerFiredEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the TimerStarted event that was recorded when this timer was started. + // This information can be useful for diagnosing problems by tracing back the + // chain of events leading up to this event. + StartedEventId *int64 `locationName:"startedEventId" type:"long" required:"true"` + + // The unique ID of the timer that fired. + TimerId *string `locationName:"timerId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s TimerFiredEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimerFiredEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the TimerStarted event. +type TimerStartedEventAttributes struct { + _ struct{} `type:"structure"` + + // Optional. Data attached to the event that can be used by the decider in subsequent + // workflow tasks. + Control *string `locationName:"control" type:"string"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the StartTimer decision for this activity task. This information + // can be useful for diagnosing problems by tracing back the chain of events + // leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The duration of time after which the timer will fire. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. + StartToFireTimeout *string `locationName:"startToFireTimeout" min:"1" type:"string" required:"true"` + + // The unique ID of the timer that was started. + TimerId *string `locationName:"timerId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s TimerStartedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimerStartedEventAttributes) GoString() string { + return s.String() +} + +// Represents a workflow execution. +type WorkflowExecution struct { + _ struct{} `type:"structure"` + + // A system-generated unique identifier for the workflow execution. + RunId *string `locationName:"runId" min:"1" type:"string" required:"true"` + + // The user defined identifier associated with the workflow execution. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s WorkflowExecution) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecution) GoString() string { + return s.String() +} + +// Provides details of the WorkflowExecutionCancelRequested event. +type WorkflowExecutionCancelRequestedEventAttributes struct { + _ struct{} `type:"structure"` + + // If set, indicates that the request to cancel the workflow execution was automatically + // generated, and specifies the cause. This happens if the parent workflow execution + // times out or is terminated, and the child policy is set to cancel child executions. + Cause *string `locationName:"cause" type:"string" enum:"WorkflowExecutionCancelRequestedCause"` + + // The ID of the RequestCancelExternalWorkflowExecutionInitiated event corresponding + // to the RequestCancelExternalWorkflowExecution decision to cancel this workflow + // execution.The source event with this ID can be found in the history of the + // source workflow execution. This information can be useful for diagnosing + // problems by tracing back the chain of events leading up to this event. + ExternalInitiatedEventId *int64 `locationName:"externalInitiatedEventId" type:"long"` + + // The external workflow execution for which the cancellation was requested. + ExternalWorkflowExecution *WorkflowExecution `locationName:"externalWorkflowExecution" type:"structure"` +} + +// String returns the string representation +func (s WorkflowExecutionCancelRequestedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionCancelRequestedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the WorkflowExecutionCanceled event. +type WorkflowExecutionCanceledEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the CancelWorkflowExecution decision for this cancellation + // request. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // Details for the cancellation (if any). + Details *string `locationName:"details" type:"string"` +} + +// String returns the string representation +func (s WorkflowExecutionCanceledEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionCanceledEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the WorkflowExecutionCompleted event. +type WorkflowExecutionCompletedEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the CompleteWorkflowExecution decision to complete this + // execution. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The result produced by the workflow execution upon successful completion. + Result *string `locationName:"result" type:"string"` +} + +// String returns the string representation +func (s WorkflowExecutionCompletedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionCompletedEventAttributes) GoString() string { + return s.String() +} + +// The configuration settings for a workflow execution including timeout values, +// tasklist etc. These configuration settings are determined from the defaults +// specified when registering the workflow type and those specified when starting +// the workflow execution. +type WorkflowExecutionConfiguration struct { + _ struct{} `type:"structure"` + + // The policy to use for the child workflow executions if this workflow execution + // is terminated, by calling the TerminateWorkflowExecution action explicitly + // or due to an expired timeout. + // + // The supported child policies are: + // + // TERMINATE: the child executions will be terminated. REQUEST_CANCEL: a request + // to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested + // event in its history. It is up to the decider to take appropriate actions + // when it receives an execution history with this event. ABANDON: no action + // will be taken. The child executions will continue to run. + ChildPolicy *string `locationName:"childPolicy" type:"string" required:"true" enum:"ChildPolicy"` + + // The total duration for this workflow execution. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + ExecutionStartToCloseTimeout *string `locationName:"executionStartToCloseTimeout" min:"1" type:"string" required:"true"` + + // The IAM role used by this workflow execution when invoking AWS Lambda functions. + LambdaRole *string `locationName:"lambdaRole" min:"1" type:"string"` + + // The task list used for the decision tasks generated for this workflow execution. + TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` + + // The priority assigned to decision tasks for this workflow execution. Valid + // values are integers that range from Java's Integer.MIN_VALUE (-2147483648) + // to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority. + // + // For more information about setting task priority, see Setting Task Priority + // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // in the Amazon Simple Workflow Developer Guide. + TaskPriority *string `locationName:"taskPriority" type:"string"` + + // The maximum duration allowed for decision tasks for this workflow execution. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + TaskStartToCloseTimeout *string `locationName:"taskStartToCloseTimeout" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s WorkflowExecutionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionConfiguration) GoString() string { + return s.String() +} + +// Provides details of the WorkflowExecutionContinuedAsNew event. +type WorkflowExecutionContinuedAsNewEventAttributes struct { + _ struct{} `type:"structure"` + + // The policy to use for the child workflow executions of the new execution + // if it is terminated by calling the TerminateWorkflowExecution action explicitly + // or due to an expired timeout. + // + // The supported child policies are: + // + // TERMINATE: the child executions will be terminated. REQUEST_CANCEL: a request + // to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested + // event in its history. It is up to the decider to take appropriate actions + // when it receives an execution history with this event. ABANDON: no action + // will be taken. The child executions will continue to run. + ChildPolicy *string `locationName:"childPolicy" type:"string" required:"true" enum:"ChildPolicy"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the ContinueAsNewWorkflowExecution decision that started + // this execution. This information can be useful for diagnosing problems by + // tracing back the chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The total duration allowed for the new workflow execution. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + ExecutionStartToCloseTimeout *string `locationName:"executionStartToCloseTimeout" type:"string"` + + // The input provided to the new workflow execution. + Input *string `locationName:"input" type:"string"` + + // The IAM role attached to this workflow execution to use when invoking AWS + // Lambda functions. + LambdaRole *string `locationName:"lambdaRole" min:"1" type:"string"` + + // The runId of the new workflow execution. + NewExecutionRunId *string `locationName:"newExecutionRunId" min:"1" type:"string" required:"true"` + + // The list of tags associated with the new workflow execution. + TagList []*string `locationName:"tagList" type:"list"` + + // Represents a task list. + TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` + + TaskPriority *string `locationName:"taskPriority" type:"string"` + + // The maximum duration of decision tasks for the new workflow execution. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + TaskStartToCloseTimeout *string `locationName:"taskStartToCloseTimeout" type:"string"` + + // Represents a workflow type. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s WorkflowExecutionContinuedAsNewEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionContinuedAsNewEventAttributes) GoString() string { + return s.String() +} + +// Contains the count of workflow executions returned from CountOpenWorkflowExecutions +// or CountClosedWorkflowExecutions +type WorkflowExecutionCount struct { + _ struct{} `type:"structure"` + + // The number of workflow executions. + Count *int64 `locationName:"count" type:"integer" required:"true"` + + // If set to true, indicates that the actual count was more than the maximum + // supported by this API and the count returned is the truncated value. + Truncated *bool `locationName:"truncated" type:"boolean"` +} + +// String returns the string representation +func (s WorkflowExecutionCount) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionCount) GoString() string { + return s.String() +} + +// Provides details of the WorkflowExecutionFailed event. +type WorkflowExecutionFailedEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the DecisionTaskCompleted event corresponding to the decision task + // that resulted in the FailWorkflowExecution decision to fail this execution. + // This information can be useful for diagnosing problems by tracing back the + // chain of events leading up to this event. + DecisionTaskCompletedEventId *int64 `locationName:"decisionTaskCompletedEventId" type:"long" required:"true"` + + // The details of the failure (if any). + Details *string `locationName:"details" type:"string"` + + // The descriptive reason provided for the failure (if any). + Reason *string `locationName:"reason" type:"string"` +} + +// String returns the string representation +func (s WorkflowExecutionFailedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionFailedEventAttributes) GoString() string { + return s.String() +} + +// Used to filter the workflow executions in visibility APIs by their workflowId. +type WorkflowExecutionFilter struct { + _ struct{} `type:"structure"` + + // The workflowId to pass of match the criteria of this filter. + WorkflowId *string `locationName:"workflowId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s WorkflowExecutionFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionFilter) GoString() string { + return s.String() +} + +// Contains information about a workflow execution. +type WorkflowExecutionInfo struct { + _ struct{} `type:"structure"` + + // Set to true if a cancellation is requested for this workflow execution. + CancelRequested *bool `locationName:"cancelRequested" type:"boolean"` + + // If the execution status is closed then this specifies how the execution was + // closed: + // + // COMPLETED: the execution was successfully completed. CANCELED: the execution + // was canceled.Cancellation allows the implementation to gracefully clean up + // before the execution is closed. TERMINATED: the execution was force terminated. + // FAILED: the execution failed to complete. TIMED_OUT: the execution did + // not complete in the alloted time and was automatically timed out. CONTINUED_AS_NEW: + // the execution is logically continued. This means the current execution was + // completed and a new execution was started to carry on the workflow. + CloseStatus *string `locationName:"closeStatus" type:"string" enum:"CloseStatus"` + + // The time when the workflow execution was closed. Set only if the execution + // status is CLOSED. + CloseTimestamp *time.Time `locationName:"closeTimestamp" type:"timestamp" timestampFormat:"unix"` + + // The workflow execution this information is about. + Execution *WorkflowExecution `locationName:"execution" type:"structure" required:"true"` + + // The current status of the execution. + ExecutionStatus *string `locationName:"executionStatus" type:"string" required:"true" enum:"ExecutionStatus"` + + // If this workflow execution is a child of another execution then contains + // the workflow execution that started this execution. + Parent *WorkflowExecution `locationName:"parent" type:"structure"` + + // The time when the execution was started. + StartTimestamp *time.Time `locationName:"startTimestamp" type:"timestamp" timestampFormat:"unix" required:"true"` + + // The list of tags associated with the workflow execution. Tags can be used + // to identify and list workflow executions of interest through the visibility + // APIs. A workflow execution can have a maximum of 5 tags. + TagList []*string `locationName:"tagList" type:"list"` + + // The type of the workflow execution. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s WorkflowExecutionInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionInfo) GoString() string { + return s.String() +} + +// Contains a paginated list of information about workflow executions. +type WorkflowExecutionInfos struct { + _ struct{} `type:"structure"` + + // The list of workflow information structures. + ExecutionInfos []*WorkflowExecutionInfo `locationName:"executionInfos" type:"list" required:"true"` + + // If a NextPageToken was returned by a previous call, there are more results + // available. To retrieve the next page of results, make the call again using + // the returned token in nextPageToken. Keep all other arguments unchanged. + // + // The configured maximumPageSize determines how many results can be returned + // in a single call. + NextPageToken *string `locationName:"nextPageToken" type:"string"` +} + +// String returns the string representation +func (s WorkflowExecutionInfos) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionInfos) GoString() string { + return s.String() +} + +// Contains the counts of open tasks, child workflow executions and timers for +// a workflow execution. +type WorkflowExecutionOpenCounts struct { + _ struct{} `type:"structure"` + + // The count of activity tasks whose status is OPEN. + OpenActivityTasks *int64 `locationName:"openActivityTasks" type:"integer" required:"true"` + + // The count of child workflow executions whose status is OPEN. + OpenChildWorkflowExecutions *int64 `locationName:"openChildWorkflowExecutions" type:"integer" required:"true"` + + // The count of decision tasks whose status is OPEN. A workflow execution can + // have at most one open decision task. + OpenDecisionTasks *int64 `locationName:"openDecisionTasks" type:"integer" required:"true"` + + // The count of AWS Lambda functions that are currently executing. + OpenLambdaFunctions *int64 `locationName:"openLambdaFunctions" type:"integer"` + + // The count of timers started by this workflow execution that have not fired + // yet. + OpenTimers *int64 `locationName:"openTimers" type:"integer" required:"true"` +} + +// String returns the string representation +func (s WorkflowExecutionOpenCounts) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionOpenCounts) GoString() string { + return s.String() +} + +// Provides details of the WorkflowExecutionSignaled event. +type WorkflowExecutionSignaledEventAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the SignalExternalWorkflowExecutionInitiated event corresponding + // to the SignalExternalWorkflow decision to signal this workflow execution.The + // source event with this ID can be found in the history of the source workflow + // execution. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. This field is set only + // if the signal was initiated by another workflow execution. + ExternalInitiatedEventId *int64 `locationName:"externalInitiatedEventId" type:"long"` + + // The workflow execution that sent the signal. This is set only of the signal + // was sent by another workflow execution. + ExternalWorkflowExecution *WorkflowExecution `locationName:"externalWorkflowExecution" type:"structure"` + + // Inputs provided with the signal (if any). The decider can use the signal + // name and inputs to determine how to process the signal. + Input *string `locationName:"input" type:"string"` + + // The name of the signal received. The decider can use the signal name and + // inputs to determine how to the process the signal. + SignalName *string `locationName:"signalName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s WorkflowExecutionSignaledEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionSignaledEventAttributes) GoString() string { + return s.String() +} + +// Provides details of WorkflowExecutionStarted event. +type WorkflowExecutionStartedEventAttributes struct { + _ struct{} `type:"structure"` + + // The policy to use for the child workflow executions if this workflow execution + // is terminated, by calling the TerminateWorkflowExecution action explicitly + // or due to an expired timeout. + // + // The supported child policies are: + // + // TERMINATE: the child executions will be terminated. REQUEST_CANCEL: a request + // to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested + // event in its history. It is up to the decider to take appropriate actions + // when it receives an execution history with this event. ABANDON: no action + // will be taken. The child executions will continue to run. + ChildPolicy *string `locationName:"childPolicy" type:"string" required:"true" enum:"ChildPolicy"` + + // If this workflow execution was started due to a ContinueAsNewWorkflowExecution + // decision, then it contains the runId of the previous workflow execution that + // was closed and continued as this execution. + ContinuedExecutionRunId *string `locationName:"continuedExecutionRunId" type:"string"` + + // The maximum duration for this workflow execution. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + ExecutionStartToCloseTimeout *string `locationName:"executionStartToCloseTimeout" type:"string"` + + // The input provided to the workflow execution (if any). + Input *string `locationName:"input" type:"string"` + + // The IAM role attached to this workflow execution to use when invoking AWS + // Lambda functions. + LambdaRole *string `locationName:"lambdaRole" min:"1" type:"string"` + + // The ID of the StartChildWorkflowExecutionInitiated event corresponding to + // the StartChildWorkflowExecution decision to start this workflow execution. + // The source event with this ID can be found in the history of the source workflow + // execution. This information can be useful for diagnosing problems by tracing + // back the chain of events leading up to this event. + ParentInitiatedEventId *int64 `locationName:"parentInitiatedEventId" type:"long"` + + // The source workflow execution that started this workflow execution. The member + // is not set if the workflow execution was not started by a workflow. + ParentWorkflowExecution *WorkflowExecution `locationName:"parentWorkflowExecution" type:"structure"` + + // The list of tags associated with this workflow execution. An execution can + // have up to 5 tags. + TagList []*string `locationName:"tagList" type:"list"` + + // The name of the task list for scheduling the decision tasks for this workflow + // execution. + TaskList *TaskList `locationName:"taskList" type:"structure" required:"true"` + + TaskPriority *string `locationName:"taskPriority" type:"string"` + + // The maximum duration of decision tasks for this workflow type. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + TaskStartToCloseTimeout *string `locationName:"taskStartToCloseTimeout" type:"string"` + + // The workflow type of this execution. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s WorkflowExecutionStartedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionStartedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the WorkflowExecutionTerminated event. +type WorkflowExecutionTerminatedEventAttributes struct { + _ struct{} `type:"structure"` + + // If set, indicates that the workflow execution was automatically terminated, + // and specifies the cause. This happens if the parent workflow execution times + // out or is terminated and the child policy is set to terminate child executions. + Cause *string `locationName:"cause" type:"string" enum:"WorkflowExecutionTerminatedCause"` + + // The policy used for the child workflow executions of this workflow execution. + // + // The supported child policies are: + // + // TERMINATE: the child executions will be terminated. REQUEST_CANCEL: a request + // to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested + // event in its history. It is up to the decider to take appropriate actions + // when it receives an execution history with this event. ABANDON: no action + // will be taken. The child executions will continue to run. + ChildPolicy *string `locationName:"childPolicy" type:"string" required:"true" enum:"ChildPolicy"` + + // The details provided for the termination (if any). + Details *string `locationName:"details" type:"string"` + + // The reason provided for the termination (if any). + Reason *string `locationName:"reason" type:"string"` +} + +// String returns the string representation +func (s WorkflowExecutionTerminatedEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionTerminatedEventAttributes) GoString() string { + return s.String() +} + +// Provides details of the WorkflowExecutionTimedOut event. +type WorkflowExecutionTimedOutEventAttributes struct { + _ struct{} `type:"structure"` + + // The policy used for the child workflow executions of this workflow execution. + // + // The supported child policies are: + // + // TERMINATE: the child executions will be terminated. REQUEST_CANCEL: a request + // to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested + // event in its history. It is up to the decider to take appropriate actions + // when it receives an execution history with this event. ABANDON: no action + // will be taken. The child executions will continue to run. + ChildPolicy *string `locationName:"childPolicy" type:"string" required:"true" enum:"ChildPolicy"` + + // The type of timeout that caused this event. + TimeoutType *string `locationName:"timeoutType" type:"string" required:"true" enum:"WorkflowExecutionTimeoutType"` +} + +// String returns the string representation +func (s WorkflowExecutionTimedOutEventAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowExecutionTimedOutEventAttributes) GoString() string { + return s.String() +} + +// Represents a workflow type. +type WorkflowType struct { + _ struct{} `type:"structure"` + + // Required. The name of the workflow type. + // + // The combination of workflow type name and version must be unique with in + // a domain. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // Required. The version of the workflow type. + // + // The combination of workflow type name and version must be unique with in + // a domain. + Version *string `locationName:"version" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s WorkflowType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowType) GoString() string { + return s.String() +} + +// The configuration settings of a workflow type. +type WorkflowTypeConfiguration struct { + _ struct{} `type:"structure"` + + // Optional. The default policy to use for the child workflow executions when + // a workflow execution of this type is terminated, by calling the TerminateWorkflowExecution + // action explicitly or due to an expired timeout. This default can be overridden + // when starting a workflow execution using the StartWorkflowExecution action + // or the StartChildWorkflowExecution decision. + // + // The supported child policies are: + // + // TERMINATE: the child executions will be terminated. REQUEST_CANCEL: a request + // to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested + // event in its history. It is up to the decider to take appropriate actions + // when it receives an execution history with this event. ABANDON: no action + // will be taken. The child executions will continue to run. + DefaultChildPolicy *string `locationName:"defaultChildPolicy" type:"string" enum:"ChildPolicy"` + + // Optional. The default maximum duration, specified when registering the workflow + // type, for executions of this workflow type. This default can be overridden + // when starting a workflow execution using the StartWorkflowExecution action + // or the StartChildWorkflowExecution decision. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + DefaultExecutionStartToCloseTimeout *string `locationName:"defaultExecutionStartToCloseTimeout" type:"string"` + + // The default IAM role to use when a workflow execution invokes a AWS Lambda + // function. + DefaultLambdaRole *string `locationName:"defaultLambdaRole" min:"1" type:"string"` + + // Optional. The default task list, specified when registering the workflow + // type, for decisions tasks scheduled for workflow executions of this type. + // This default can be overridden when starting a workflow execution using the + // StartWorkflowExecution action or the StartChildWorkflowExecution decision. + DefaultTaskList *TaskList `locationName:"defaultTaskList" type:"structure"` + + // Optional. The default task priority, specified when registering the workflow + // type, for all decision tasks of this workflow type. This default can be overridden + // when starting a workflow execution using the StartWorkflowExecution action + // or the StartChildWorkflowExecution decision. + // + // Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) + // to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority. + // + // For more information about setting task priority, see Setting Task Priority + // (http://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html) + // in the Amazon Simple Workflow Developer Guide. + DefaultTaskPriority *string `locationName:"defaultTaskPriority" type:"string"` + + // Optional. The default maximum duration, specified when registering the workflow + // type, that a decision task for executions of this workflow type might take + // before returning completion or failure. If the task does not close in the + // specified time then the task is automatically timed out and rescheduled. + // If the decider eventually reports a completion or failure, it is ignored. + // This default can be overridden when starting a workflow execution using the + // StartWorkflowExecution action or the StartChildWorkflowExecution decision. + // + // The duration is specified in seconds; an integer greater than or equal to + // 0. The value "NONE" can be used to specify unlimited duration. + DefaultTaskStartToCloseTimeout *string `locationName:"defaultTaskStartToCloseTimeout" type:"string"` +} + +// String returns the string representation +func (s WorkflowTypeConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowTypeConfiguration) GoString() string { + return s.String() +} + +// Used to filter workflow execution query results by type. Each parameter, +// if specified, defines a rule that must be satisfied by each returned result. +type WorkflowTypeFilter struct { + _ struct{} `type:"structure"` + + // Required. Name of the workflow type. + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // Version of the workflow type. + Version *string `locationName:"version" type:"string"` +} + +// String returns the string representation +func (s WorkflowTypeFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowTypeFilter) GoString() string { + return s.String() +} + +// Contains information about a workflow type. +type WorkflowTypeInfo struct { + _ struct{} `type:"structure"` + + // The date when this type was registered. + CreationDate *time.Time `locationName:"creationDate" type:"timestamp" timestampFormat:"unix" required:"true"` + + // If the type is in deprecated state, then it is set to the date when the type + // was deprecated. + DeprecationDate *time.Time `locationName:"deprecationDate" type:"timestamp" timestampFormat:"unix"` + + // The description of the type registered through RegisterWorkflowType. + Description *string `locationName:"description" type:"string"` + + // The current status of the workflow type. + Status *string `locationName:"status" type:"string" required:"true" enum:"RegistrationStatus"` + + // The workflow type this information is about. + WorkflowType *WorkflowType `locationName:"workflowType" type:"structure" required:"true"` +} + +// String returns the string representation +func (s WorkflowTypeInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowTypeInfo) GoString() string { + return s.String() +} + +const ( + // @enum ActivityTaskTimeoutType + ActivityTaskTimeoutTypeStartToClose = "START_TO_CLOSE" + // @enum ActivityTaskTimeoutType + ActivityTaskTimeoutTypeScheduleToStart = "SCHEDULE_TO_START" + // @enum ActivityTaskTimeoutType + ActivityTaskTimeoutTypeScheduleToClose = "SCHEDULE_TO_CLOSE" + // @enum ActivityTaskTimeoutType + ActivityTaskTimeoutTypeHeartbeat = "HEARTBEAT" +) + +const ( + // @enum CancelTimerFailedCause + CancelTimerFailedCauseTimerIdUnknown = "TIMER_ID_UNKNOWN" + // @enum CancelTimerFailedCause + CancelTimerFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" +) + +const ( + // @enum CancelWorkflowExecutionFailedCause + CancelWorkflowExecutionFailedCauseUnhandledDecision = "UNHANDLED_DECISION" + // @enum CancelWorkflowExecutionFailedCause + CancelWorkflowExecutionFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" +) + +const ( + // @enum ChildPolicy + ChildPolicyTerminate = "TERMINATE" + // @enum ChildPolicy + ChildPolicyRequestCancel = "REQUEST_CANCEL" + // @enum ChildPolicy + ChildPolicyAbandon = "ABANDON" +) + +const ( + // @enum CloseStatus + CloseStatusCompleted = "COMPLETED" + // @enum CloseStatus + CloseStatusFailed = "FAILED" + // @enum CloseStatus + CloseStatusCanceled = "CANCELED" + // @enum CloseStatus + CloseStatusTerminated = "TERMINATED" + // @enum CloseStatus + CloseStatusContinuedAsNew = "CONTINUED_AS_NEW" + // @enum CloseStatus + CloseStatusTimedOut = "TIMED_OUT" +) + +const ( + // @enum CompleteWorkflowExecutionFailedCause + CompleteWorkflowExecutionFailedCauseUnhandledDecision = "UNHANDLED_DECISION" + // @enum CompleteWorkflowExecutionFailedCause + CompleteWorkflowExecutionFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" +) + +const ( + // @enum ContinueAsNewWorkflowExecutionFailedCause + ContinueAsNewWorkflowExecutionFailedCauseUnhandledDecision = "UNHANDLED_DECISION" + // @enum ContinueAsNewWorkflowExecutionFailedCause + ContinueAsNewWorkflowExecutionFailedCauseWorkflowTypeDeprecated = "WORKFLOW_TYPE_DEPRECATED" + // @enum ContinueAsNewWorkflowExecutionFailedCause + ContinueAsNewWorkflowExecutionFailedCauseWorkflowTypeDoesNotExist = "WORKFLOW_TYPE_DOES_NOT_EXIST" + // @enum ContinueAsNewWorkflowExecutionFailedCause + ContinueAsNewWorkflowExecutionFailedCauseDefaultExecutionStartToCloseTimeoutUndefined = "DEFAULT_EXECUTION_START_TO_CLOSE_TIMEOUT_UNDEFINED" + // @enum ContinueAsNewWorkflowExecutionFailedCause + ContinueAsNewWorkflowExecutionFailedCauseDefaultTaskStartToCloseTimeoutUndefined = "DEFAULT_TASK_START_TO_CLOSE_TIMEOUT_UNDEFINED" + // @enum ContinueAsNewWorkflowExecutionFailedCause + ContinueAsNewWorkflowExecutionFailedCauseDefaultTaskListUndefined = "DEFAULT_TASK_LIST_UNDEFINED" + // @enum ContinueAsNewWorkflowExecutionFailedCause + ContinueAsNewWorkflowExecutionFailedCauseDefaultChildPolicyUndefined = "DEFAULT_CHILD_POLICY_UNDEFINED" + // @enum ContinueAsNewWorkflowExecutionFailedCause + ContinueAsNewWorkflowExecutionFailedCauseContinueAsNewWorkflowExecutionRateExceeded = "CONTINUE_AS_NEW_WORKFLOW_EXECUTION_RATE_EXCEEDED" + // @enum ContinueAsNewWorkflowExecutionFailedCause + ContinueAsNewWorkflowExecutionFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" +) + +const ( + // @enum DecisionTaskTimeoutType + DecisionTaskTimeoutTypeStartToClose = "START_TO_CLOSE" +) + +const ( + // @enum DecisionType + DecisionTypeScheduleActivityTask = "ScheduleActivityTask" + // @enum DecisionType + DecisionTypeRequestCancelActivityTask = "RequestCancelActivityTask" + // @enum DecisionType + DecisionTypeCompleteWorkflowExecution = "CompleteWorkflowExecution" + // @enum DecisionType + DecisionTypeFailWorkflowExecution = "FailWorkflowExecution" + // @enum DecisionType + DecisionTypeCancelWorkflowExecution = "CancelWorkflowExecution" + // @enum DecisionType + DecisionTypeContinueAsNewWorkflowExecution = "ContinueAsNewWorkflowExecution" + // @enum DecisionType + DecisionTypeRecordMarker = "RecordMarker" + // @enum DecisionType + DecisionTypeStartTimer = "StartTimer" + // @enum DecisionType + DecisionTypeCancelTimer = "CancelTimer" + // @enum DecisionType + DecisionTypeSignalExternalWorkflowExecution = "SignalExternalWorkflowExecution" + // @enum DecisionType + DecisionTypeRequestCancelExternalWorkflowExecution = "RequestCancelExternalWorkflowExecution" + // @enum DecisionType + DecisionTypeStartChildWorkflowExecution = "StartChildWorkflowExecution" + // @enum DecisionType + DecisionTypeScheduleLambdaFunction = "ScheduleLambdaFunction" +) + +const ( + // @enum EventType + EventTypeWorkflowExecutionStarted = "WorkflowExecutionStarted" + // @enum EventType + EventTypeWorkflowExecutionCancelRequested = "WorkflowExecutionCancelRequested" + // @enum EventType + EventTypeWorkflowExecutionCompleted = "WorkflowExecutionCompleted" + // @enum EventType + EventTypeCompleteWorkflowExecutionFailed = "CompleteWorkflowExecutionFailed" + // @enum EventType + EventTypeWorkflowExecutionFailed = "WorkflowExecutionFailed" + // @enum EventType + EventTypeFailWorkflowExecutionFailed = "FailWorkflowExecutionFailed" + // @enum EventType + EventTypeWorkflowExecutionTimedOut = "WorkflowExecutionTimedOut" + // @enum EventType + EventTypeWorkflowExecutionCanceled = "WorkflowExecutionCanceled" + // @enum EventType + EventTypeCancelWorkflowExecutionFailed = "CancelWorkflowExecutionFailed" + // @enum EventType + EventTypeWorkflowExecutionContinuedAsNew = "WorkflowExecutionContinuedAsNew" + // @enum EventType + EventTypeContinueAsNewWorkflowExecutionFailed = "ContinueAsNewWorkflowExecutionFailed" + // @enum EventType + EventTypeWorkflowExecutionTerminated = "WorkflowExecutionTerminated" + // @enum EventType + EventTypeDecisionTaskScheduled = "DecisionTaskScheduled" + // @enum EventType + EventTypeDecisionTaskStarted = "DecisionTaskStarted" + // @enum EventType + EventTypeDecisionTaskCompleted = "DecisionTaskCompleted" + // @enum EventType + EventTypeDecisionTaskTimedOut = "DecisionTaskTimedOut" + // @enum EventType + EventTypeActivityTaskScheduled = "ActivityTaskScheduled" + // @enum EventType + EventTypeScheduleActivityTaskFailed = "ScheduleActivityTaskFailed" + // @enum EventType + EventTypeActivityTaskStarted = "ActivityTaskStarted" + // @enum EventType + EventTypeActivityTaskCompleted = "ActivityTaskCompleted" + // @enum EventType + EventTypeActivityTaskFailed = "ActivityTaskFailed" + // @enum EventType + EventTypeActivityTaskTimedOut = "ActivityTaskTimedOut" + // @enum EventType + EventTypeActivityTaskCanceled = "ActivityTaskCanceled" + // @enum EventType + EventTypeActivityTaskCancelRequested = "ActivityTaskCancelRequested" + // @enum EventType + EventTypeRequestCancelActivityTaskFailed = "RequestCancelActivityTaskFailed" + // @enum EventType + EventTypeWorkflowExecutionSignaled = "WorkflowExecutionSignaled" + // @enum EventType + EventTypeMarkerRecorded = "MarkerRecorded" + // @enum EventType + EventTypeRecordMarkerFailed = "RecordMarkerFailed" + // @enum EventType + EventTypeTimerStarted = "TimerStarted" + // @enum EventType + EventTypeStartTimerFailed = "StartTimerFailed" + // @enum EventType + EventTypeTimerFired = "TimerFired" + // @enum EventType + EventTypeTimerCanceled = "TimerCanceled" + // @enum EventType + EventTypeCancelTimerFailed = "CancelTimerFailed" + // @enum EventType + EventTypeStartChildWorkflowExecutionInitiated = "StartChildWorkflowExecutionInitiated" + // @enum EventType + EventTypeStartChildWorkflowExecutionFailed = "StartChildWorkflowExecutionFailed" + // @enum EventType + EventTypeChildWorkflowExecutionStarted = "ChildWorkflowExecutionStarted" + // @enum EventType + EventTypeChildWorkflowExecutionCompleted = "ChildWorkflowExecutionCompleted" + // @enum EventType + EventTypeChildWorkflowExecutionFailed = "ChildWorkflowExecutionFailed" + // @enum EventType + EventTypeChildWorkflowExecutionTimedOut = "ChildWorkflowExecutionTimedOut" + // @enum EventType + EventTypeChildWorkflowExecutionCanceled = "ChildWorkflowExecutionCanceled" + // @enum EventType + EventTypeChildWorkflowExecutionTerminated = "ChildWorkflowExecutionTerminated" + // @enum EventType + EventTypeSignalExternalWorkflowExecutionInitiated = "SignalExternalWorkflowExecutionInitiated" + // @enum EventType + EventTypeSignalExternalWorkflowExecutionFailed = "SignalExternalWorkflowExecutionFailed" + // @enum EventType + EventTypeExternalWorkflowExecutionSignaled = "ExternalWorkflowExecutionSignaled" + // @enum EventType + EventTypeRequestCancelExternalWorkflowExecutionInitiated = "RequestCancelExternalWorkflowExecutionInitiated" + // @enum EventType + EventTypeRequestCancelExternalWorkflowExecutionFailed = "RequestCancelExternalWorkflowExecutionFailed" + // @enum EventType + EventTypeExternalWorkflowExecutionCancelRequested = "ExternalWorkflowExecutionCancelRequested" + // @enum EventType + EventTypeLambdaFunctionScheduled = "LambdaFunctionScheduled" + // @enum EventType + EventTypeLambdaFunctionStarted = "LambdaFunctionStarted" + // @enum EventType + EventTypeLambdaFunctionCompleted = "LambdaFunctionCompleted" + // @enum EventType + EventTypeLambdaFunctionFailed = "LambdaFunctionFailed" + // @enum EventType + EventTypeLambdaFunctionTimedOut = "LambdaFunctionTimedOut" + // @enum EventType + EventTypeScheduleLambdaFunctionFailed = "ScheduleLambdaFunctionFailed" + // @enum EventType + EventTypeStartLambdaFunctionFailed = "StartLambdaFunctionFailed" +) + +const ( + // @enum ExecutionStatus + ExecutionStatusOpen = "OPEN" + // @enum ExecutionStatus + ExecutionStatusClosed = "CLOSED" +) + +const ( + // @enum FailWorkflowExecutionFailedCause + FailWorkflowExecutionFailedCauseUnhandledDecision = "UNHANDLED_DECISION" + // @enum FailWorkflowExecutionFailedCause + FailWorkflowExecutionFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" +) + +const ( + // @enum LambdaFunctionTimeoutType + LambdaFunctionTimeoutTypeStartToClose = "START_TO_CLOSE" +) + +const ( + // @enum RecordMarkerFailedCause + RecordMarkerFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" +) + +const ( + // @enum RegistrationStatus + RegistrationStatusRegistered = "REGISTERED" + // @enum RegistrationStatus + RegistrationStatusDeprecated = "DEPRECATED" +) + +const ( + // @enum RequestCancelActivityTaskFailedCause + RequestCancelActivityTaskFailedCauseActivityIdUnknown = "ACTIVITY_ID_UNKNOWN" + // @enum RequestCancelActivityTaskFailedCause + RequestCancelActivityTaskFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" +) + +const ( + // @enum RequestCancelExternalWorkflowExecutionFailedCause + RequestCancelExternalWorkflowExecutionFailedCauseUnknownExternalWorkflowExecution = "UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION" + // @enum RequestCancelExternalWorkflowExecutionFailedCause + RequestCancelExternalWorkflowExecutionFailedCauseRequestCancelExternalWorkflowExecutionRateExceeded = "REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_RATE_EXCEEDED" + // @enum RequestCancelExternalWorkflowExecutionFailedCause + RequestCancelExternalWorkflowExecutionFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" +) + +const ( + // @enum ScheduleActivityTaskFailedCause + ScheduleActivityTaskFailedCauseActivityTypeDeprecated = "ACTIVITY_TYPE_DEPRECATED" + // @enum ScheduleActivityTaskFailedCause + ScheduleActivityTaskFailedCauseActivityTypeDoesNotExist = "ACTIVITY_TYPE_DOES_NOT_EXIST" + // @enum ScheduleActivityTaskFailedCause + ScheduleActivityTaskFailedCauseActivityIdAlreadyInUse = "ACTIVITY_ID_ALREADY_IN_USE" + // @enum ScheduleActivityTaskFailedCause + ScheduleActivityTaskFailedCauseOpenActivitiesLimitExceeded = "OPEN_ACTIVITIES_LIMIT_EXCEEDED" + // @enum ScheduleActivityTaskFailedCause + ScheduleActivityTaskFailedCauseActivityCreationRateExceeded = "ACTIVITY_CREATION_RATE_EXCEEDED" + // @enum ScheduleActivityTaskFailedCause + ScheduleActivityTaskFailedCauseDefaultScheduleToCloseTimeoutUndefined = "DEFAULT_SCHEDULE_TO_CLOSE_TIMEOUT_UNDEFINED" + // @enum ScheduleActivityTaskFailedCause + ScheduleActivityTaskFailedCauseDefaultTaskListUndefined = "DEFAULT_TASK_LIST_UNDEFINED" + // @enum ScheduleActivityTaskFailedCause + ScheduleActivityTaskFailedCauseDefaultScheduleToStartTimeoutUndefined = "DEFAULT_SCHEDULE_TO_START_TIMEOUT_UNDEFINED" + // @enum ScheduleActivityTaskFailedCause + ScheduleActivityTaskFailedCauseDefaultStartToCloseTimeoutUndefined = "DEFAULT_START_TO_CLOSE_TIMEOUT_UNDEFINED" + // @enum ScheduleActivityTaskFailedCause + ScheduleActivityTaskFailedCauseDefaultHeartbeatTimeoutUndefined = "DEFAULT_HEARTBEAT_TIMEOUT_UNDEFINED" + // @enum ScheduleActivityTaskFailedCause + ScheduleActivityTaskFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" +) + +const ( + // @enum ScheduleLambdaFunctionFailedCause + ScheduleLambdaFunctionFailedCauseIdAlreadyInUse = "ID_ALREADY_IN_USE" + // @enum ScheduleLambdaFunctionFailedCause + ScheduleLambdaFunctionFailedCauseOpenLambdaFunctionsLimitExceeded = "OPEN_LAMBDA_FUNCTIONS_LIMIT_EXCEEDED" + // @enum ScheduleLambdaFunctionFailedCause + ScheduleLambdaFunctionFailedCauseLambdaFunctionCreationRateExceeded = "LAMBDA_FUNCTION_CREATION_RATE_EXCEEDED" + // @enum ScheduleLambdaFunctionFailedCause + ScheduleLambdaFunctionFailedCauseLambdaServiceNotAvailableInRegion = "LAMBDA_SERVICE_NOT_AVAILABLE_IN_REGION" +) + +const ( + // @enum SignalExternalWorkflowExecutionFailedCause + SignalExternalWorkflowExecutionFailedCauseUnknownExternalWorkflowExecution = "UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION" + // @enum SignalExternalWorkflowExecutionFailedCause + SignalExternalWorkflowExecutionFailedCauseSignalExternalWorkflowExecutionRateExceeded = "SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_RATE_EXCEEDED" + // @enum SignalExternalWorkflowExecutionFailedCause + SignalExternalWorkflowExecutionFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" +) + +const ( + // @enum StartChildWorkflowExecutionFailedCause + StartChildWorkflowExecutionFailedCauseWorkflowTypeDoesNotExist = "WORKFLOW_TYPE_DOES_NOT_EXIST" + // @enum StartChildWorkflowExecutionFailedCause + StartChildWorkflowExecutionFailedCauseWorkflowTypeDeprecated = "WORKFLOW_TYPE_DEPRECATED" + // @enum StartChildWorkflowExecutionFailedCause + StartChildWorkflowExecutionFailedCauseOpenChildrenLimitExceeded = "OPEN_CHILDREN_LIMIT_EXCEEDED" + // @enum StartChildWorkflowExecutionFailedCause + StartChildWorkflowExecutionFailedCauseOpenWorkflowsLimitExceeded = "OPEN_WORKFLOWS_LIMIT_EXCEEDED" + // @enum StartChildWorkflowExecutionFailedCause + StartChildWorkflowExecutionFailedCauseChildCreationRateExceeded = "CHILD_CREATION_RATE_EXCEEDED" + // @enum StartChildWorkflowExecutionFailedCause + StartChildWorkflowExecutionFailedCauseWorkflowAlreadyRunning = "WORKFLOW_ALREADY_RUNNING" + // @enum StartChildWorkflowExecutionFailedCause + StartChildWorkflowExecutionFailedCauseDefaultExecutionStartToCloseTimeoutUndefined = "DEFAULT_EXECUTION_START_TO_CLOSE_TIMEOUT_UNDEFINED" + // @enum StartChildWorkflowExecutionFailedCause + StartChildWorkflowExecutionFailedCauseDefaultTaskListUndefined = "DEFAULT_TASK_LIST_UNDEFINED" + // @enum StartChildWorkflowExecutionFailedCause + StartChildWorkflowExecutionFailedCauseDefaultTaskStartToCloseTimeoutUndefined = "DEFAULT_TASK_START_TO_CLOSE_TIMEOUT_UNDEFINED" + // @enum StartChildWorkflowExecutionFailedCause + StartChildWorkflowExecutionFailedCauseDefaultChildPolicyUndefined = "DEFAULT_CHILD_POLICY_UNDEFINED" + // @enum StartChildWorkflowExecutionFailedCause + StartChildWorkflowExecutionFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" +) + +const ( + // @enum StartLambdaFunctionFailedCause + StartLambdaFunctionFailedCauseAssumeRoleFailed = "ASSUME_ROLE_FAILED" +) + +const ( + // @enum StartTimerFailedCause + StartTimerFailedCauseTimerIdAlreadyInUse = "TIMER_ID_ALREADY_IN_USE" + // @enum StartTimerFailedCause + StartTimerFailedCauseOpenTimersLimitExceeded = "OPEN_TIMERS_LIMIT_EXCEEDED" + // @enum StartTimerFailedCause + StartTimerFailedCauseTimerCreationRateExceeded = "TIMER_CREATION_RATE_EXCEEDED" + // @enum StartTimerFailedCause + StartTimerFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" +) + +const ( + // @enum WorkflowExecutionCancelRequestedCause + WorkflowExecutionCancelRequestedCauseChildPolicyApplied = "CHILD_POLICY_APPLIED" +) + +const ( + // @enum WorkflowExecutionTerminatedCause + WorkflowExecutionTerminatedCauseChildPolicyApplied = "CHILD_POLICY_APPLIED" + // @enum WorkflowExecutionTerminatedCause + WorkflowExecutionTerminatedCauseEventLimitExceeded = "EVENT_LIMIT_EXCEEDED" + // @enum WorkflowExecutionTerminatedCause + WorkflowExecutionTerminatedCauseOperatorInitiated = "OPERATOR_INITIATED" +) + +const ( + // @enum WorkflowExecutionTimeoutType + WorkflowExecutionTimeoutTypeStartToClose = "START_TO_CLOSE" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/swf/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/swf/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/swf/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/swf/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,900 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package swf_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/swf" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleSWF_CountClosedWorkflowExecutions() { + svc := swf.New(session.New()) + + params := &swf.CountClosedWorkflowExecutionsInput{ + Domain: aws.String("DomainName"), // Required + CloseStatusFilter: &swf.CloseStatusFilter{ + Status: aws.String("CloseStatus"), // Required + }, + CloseTimeFilter: &swf.ExecutionTimeFilter{ + OldestDate: aws.Time(time.Now()), // Required + LatestDate: aws.Time(time.Now()), + }, + ExecutionFilter: &swf.WorkflowExecutionFilter{ + WorkflowId: aws.String("WorkflowId"), // Required + }, + StartTimeFilter: &swf.ExecutionTimeFilter{ + OldestDate: aws.Time(time.Now()), // Required + LatestDate: aws.Time(time.Now()), + }, + TagFilter: &swf.TagFilter{ + Tag: aws.String("Tag"), // Required + }, + TypeFilter: &swf.WorkflowTypeFilter{ + Name: aws.String("Name"), // Required + Version: aws.String("VersionOptional"), + }, + } + resp, err := svc.CountClosedWorkflowExecutions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_CountOpenWorkflowExecutions() { + svc := swf.New(session.New()) + + params := &swf.CountOpenWorkflowExecutionsInput{ + Domain: aws.String("DomainName"), // Required + StartTimeFilter: &swf.ExecutionTimeFilter{ // Required + OldestDate: aws.Time(time.Now()), // Required + LatestDate: aws.Time(time.Now()), + }, + ExecutionFilter: &swf.WorkflowExecutionFilter{ + WorkflowId: aws.String("WorkflowId"), // Required + }, + TagFilter: &swf.TagFilter{ + Tag: aws.String("Tag"), // Required + }, + TypeFilter: &swf.WorkflowTypeFilter{ + Name: aws.String("Name"), // Required + Version: aws.String("VersionOptional"), + }, + } + resp, err := svc.CountOpenWorkflowExecutions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_CountPendingActivityTasks() { + svc := swf.New(session.New()) + + params := &swf.CountPendingActivityTasksInput{ + Domain: aws.String("DomainName"), // Required + TaskList: &swf.TaskList{ // Required + Name: aws.String("Name"), // Required + }, + } + resp, err := svc.CountPendingActivityTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_CountPendingDecisionTasks() { + svc := swf.New(session.New()) + + params := &swf.CountPendingDecisionTasksInput{ + Domain: aws.String("DomainName"), // Required + TaskList: &swf.TaskList{ // Required + Name: aws.String("Name"), // Required + }, + } + resp, err := svc.CountPendingDecisionTasks(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_DeprecateActivityType() { + svc := swf.New(session.New()) + + params := &swf.DeprecateActivityTypeInput{ + ActivityType: &swf.ActivityType{ // Required + Name: aws.String("Name"), // Required + Version: aws.String("Version"), // Required + }, + Domain: aws.String("DomainName"), // Required + } + resp, err := svc.DeprecateActivityType(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_DeprecateDomain() { + svc := swf.New(session.New()) + + params := &swf.DeprecateDomainInput{ + Name: aws.String("DomainName"), // Required + } + resp, err := svc.DeprecateDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_DeprecateWorkflowType() { + svc := swf.New(session.New()) + + params := &swf.DeprecateWorkflowTypeInput{ + Domain: aws.String("DomainName"), // Required + WorkflowType: &swf.WorkflowType{ // Required + Name: aws.String("Name"), // Required + Version: aws.String("Version"), // Required + }, + } + resp, err := svc.DeprecateWorkflowType(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_DescribeActivityType() { + svc := swf.New(session.New()) + + params := &swf.DescribeActivityTypeInput{ + ActivityType: &swf.ActivityType{ // Required + Name: aws.String("Name"), // Required + Version: aws.String("Version"), // Required + }, + Domain: aws.String("DomainName"), // Required + } + resp, err := svc.DescribeActivityType(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_DescribeDomain() { + svc := swf.New(session.New()) + + params := &swf.DescribeDomainInput{ + Name: aws.String("DomainName"), // Required + } + resp, err := svc.DescribeDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_DescribeWorkflowExecution() { + svc := swf.New(session.New()) + + params := &swf.DescribeWorkflowExecutionInput{ + Domain: aws.String("DomainName"), // Required + Execution: &swf.WorkflowExecution{ // Required + RunId: aws.String("RunId"), // Required + WorkflowId: aws.String("WorkflowId"), // Required + }, + } + resp, err := svc.DescribeWorkflowExecution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_DescribeWorkflowType() { + svc := swf.New(session.New()) + + params := &swf.DescribeWorkflowTypeInput{ + Domain: aws.String("DomainName"), // Required + WorkflowType: &swf.WorkflowType{ // Required + Name: aws.String("Name"), // Required + Version: aws.String("Version"), // Required + }, + } + resp, err := svc.DescribeWorkflowType(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_GetWorkflowExecutionHistory() { + svc := swf.New(session.New()) + + params := &swf.GetWorkflowExecutionHistoryInput{ + Domain: aws.String("DomainName"), // Required + Execution: &swf.WorkflowExecution{ // Required + RunId: aws.String("RunId"), // Required + WorkflowId: aws.String("WorkflowId"), // Required + }, + MaximumPageSize: aws.Int64(1), + NextPageToken: aws.String("PageToken"), + ReverseOrder: aws.Bool(true), + } + resp, err := svc.GetWorkflowExecutionHistory(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_ListActivityTypes() { + svc := swf.New(session.New()) + + params := &swf.ListActivityTypesInput{ + Domain: aws.String("DomainName"), // Required + RegistrationStatus: aws.String("RegistrationStatus"), // Required + MaximumPageSize: aws.Int64(1), + Name: aws.String("Name"), + NextPageToken: aws.String("PageToken"), + ReverseOrder: aws.Bool(true), + } + resp, err := svc.ListActivityTypes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_ListClosedWorkflowExecutions() { + svc := swf.New(session.New()) + + params := &swf.ListClosedWorkflowExecutionsInput{ + Domain: aws.String("DomainName"), // Required + CloseStatusFilter: &swf.CloseStatusFilter{ + Status: aws.String("CloseStatus"), // Required + }, + CloseTimeFilter: &swf.ExecutionTimeFilter{ + OldestDate: aws.Time(time.Now()), // Required + LatestDate: aws.Time(time.Now()), + }, + ExecutionFilter: &swf.WorkflowExecutionFilter{ + WorkflowId: aws.String("WorkflowId"), // Required + }, + MaximumPageSize: aws.Int64(1), + NextPageToken: aws.String("PageToken"), + ReverseOrder: aws.Bool(true), + StartTimeFilter: &swf.ExecutionTimeFilter{ + OldestDate: aws.Time(time.Now()), // Required + LatestDate: aws.Time(time.Now()), + }, + TagFilter: &swf.TagFilter{ + Tag: aws.String("Tag"), // Required + }, + TypeFilter: &swf.WorkflowTypeFilter{ + Name: aws.String("Name"), // Required + Version: aws.String("VersionOptional"), + }, + } + resp, err := svc.ListClosedWorkflowExecutions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_ListDomains() { + svc := swf.New(session.New()) + + params := &swf.ListDomainsInput{ + RegistrationStatus: aws.String("RegistrationStatus"), // Required + MaximumPageSize: aws.Int64(1), + NextPageToken: aws.String("PageToken"), + ReverseOrder: aws.Bool(true), + } + resp, err := svc.ListDomains(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_ListOpenWorkflowExecutions() { + svc := swf.New(session.New()) + + params := &swf.ListOpenWorkflowExecutionsInput{ + Domain: aws.String("DomainName"), // Required + StartTimeFilter: &swf.ExecutionTimeFilter{ // Required + OldestDate: aws.Time(time.Now()), // Required + LatestDate: aws.Time(time.Now()), + }, + ExecutionFilter: &swf.WorkflowExecutionFilter{ + WorkflowId: aws.String("WorkflowId"), // Required + }, + MaximumPageSize: aws.Int64(1), + NextPageToken: aws.String("PageToken"), + ReverseOrder: aws.Bool(true), + TagFilter: &swf.TagFilter{ + Tag: aws.String("Tag"), // Required + }, + TypeFilter: &swf.WorkflowTypeFilter{ + Name: aws.String("Name"), // Required + Version: aws.String("VersionOptional"), + }, + } + resp, err := svc.ListOpenWorkflowExecutions(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_ListWorkflowTypes() { + svc := swf.New(session.New()) + + params := &swf.ListWorkflowTypesInput{ + Domain: aws.String("DomainName"), // Required + RegistrationStatus: aws.String("RegistrationStatus"), // Required + MaximumPageSize: aws.Int64(1), + Name: aws.String("Name"), + NextPageToken: aws.String("PageToken"), + ReverseOrder: aws.Bool(true), + } + resp, err := svc.ListWorkflowTypes(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_PollForActivityTask() { + svc := swf.New(session.New()) + + params := &swf.PollForActivityTaskInput{ + Domain: aws.String("DomainName"), // Required + TaskList: &swf.TaskList{ // Required + Name: aws.String("Name"), // Required + }, + Identity: aws.String("Identity"), + } + resp, err := svc.PollForActivityTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_PollForDecisionTask() { + svc := swf.New(session.New()) + + params := &swf.PollForDecisionTaskInput{ + Domain: aws.String("DomainName"), // Required + TaskList: &swf.TaskList{ // Required + Name: aws.String("Name"), // Required + }, + Identity: aws.String("Identity"), + MaximumPageSize: aws.Int64(1), + NextPageToken: aws.String("PageToken"), + ReverseOrder: aws.Bool(true), + } + resp, err := svc.PollForDecisionTask(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_RecordActivityTaskHeartbeat() { + svc := swf.New(session.New()) + + params := &swf.RecordActivityTaskHeartbeatInput{ + TaskToken: aws.String("TaskToken"), // Required + Details: aws.String("LimitedData"), + } + resp, err := svc.RecordActivityTaskHeartbeat(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_RegisterActivityType() { + svc := swf.New(session.New()) + + params := &swf.RegisterActivityTypeInput{ + Domain: aws.String("DomainName"), // Required + Name: aws.String("Name"), // Required + Version: aws.String("Version"), // Required + DefaultTaskHeartbeatTimeout: aws.String("DurationInSecondsOptional"), + DefaultTaskList: &swf.TaskList{ + Name: aws.String("Name"), // Required + }, + DefaultTaskPriority: aws.String("TaskPriority"), + DefaultTaskScheduleToCloseTimeout: aws.String("DurationInSecondsOptional"), + DefaultTaskScheduleToStartTimeout: aws.String("DurationInSecondsOptional"), + DefaultTaskStartToCloseTimeout: aws.String("DurationInSecondsOptional"), + Description: aws.String("Description"), + } + resp, err := svc.RegisterActivityType(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_RegisterDomain() { + svc := swf.New(session.New()) + + params := &swf.RegisterDomainInput{ + Name: aws.String("DomainName"), // Required + WorkflowExecutionRetentionPeriodInDays: aws.String("DurationInDays"), // Required + Description: aws.String("Description"), + } + resp, err := svc.RegisterDomain(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_RegisterWorkflowType() { + svc := swf.New(session.New()) + + params := &swf.RegisterWorkflowTypeInput{ + Domain: aws.String("DomainName"), // Required + Name: aws.String("Name"), // Required + Version: aws.String("Version"), // Required + DefaultChildPolicy: aws.String("ChildPolicy"), + DefaultExecutionStartToCloseTimeout: aws.String("DurationInSecondsOptional"), + DefaultLambdaRole: aws.String("Arn"), + DefaultTaskList: &swf.TaskList{ + Name: aws.String("Name"), // Required + }, + DefaultTaskPriority: aws.String("TaskPriority"), + DefaultTaskStartToCloseTimeout: aws.String("DurationInSecondsOptional"), + Description: aws.String("Description"), + } + resp, err := svc.RegisterWorkflowType(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_RequestCancelWorkflowExecution() { + svc := swf.New(session.New()) + + params := &swf.RequestCancelWorkflowExecutionInput{ + Domain: aws.String("DomainName"), // Required + WorkflowId: aws.String("WorkflowId"), // Required + RunId: aws.String("RunIdOptional"), + } + resp, err := svc.RequestCancelWorkflowExecution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_RespondActivityTaskCanceled() { + svc := swf.New(session.New()) + + params := &swf.RespondActivityTaskCanceledInput{ + TaskToken: aws.String("TaskToken"), // Required + Details: aws.String("Data"), + } + resp, err := svc.RespondActivityTaskCanceled(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_RespondActivityTaskCompleted() { + svc := swf.New(session.New()) + + params := &swf.RespondActivityTaskCompletedInput{ + TaskToken: aws.String("TaskToken"), // Required + Result: aws.String("Data"), + } + resp, err := svc.RespondActivityTaskCompleted(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_RespondActivityTaskFailed() { + svc := swf.New(session.New()) + + params := &swf.RespondActivityTaskFailedInput{ + TaskToken: aws.String("TaskToken"), // Required + Details: aws.String("Data"), + Reason: aws.String("FailureReason"), + } + resp, err := svc.RespondActivityTaskFailed(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_RespondDecisionTaskCompleted() { + svc := swf.New(session.New()) + + params := &swf.RespondDecisionTaskCompletedInput{ + TaskToken: aws.String("TaskToken"), // Required + Decisions: []*swf.Decision{ + { // Required + DecisionType: aws.String("DecisionType"), // Required + CancelTimerDecisionAttributes: &swf.CancelTimerDecisionAttributes{ + TimerId: aws.String("TimerId"), // Required + }, + CancelWorkflowExecutionDecisionAttributes: &swf.CancelWorkflowExecutionDecisionAttributes{ + Details: aws.String("Data"), + }, + CompleteWorkflowExecutionDecisionAttributes: &swf.CompleteWorkflowExecutionDecisionAttributes{ + Result: aws.String("Data"), + }, + ContinueAsNewWorkflowExecutionDecisionAttributes: &swf.ContinueAsNewWorkflowExecutionDecisionAttributes{ + ChildPolicy: aws.String("ChildPolicy"), + ExecutionStartToCloseTimeout: aws.String("DurationInSecondsOptional"), + Input: aws.String("Data"), + LambdaRole: aws.String("Arn"), + TagList: []*string{ + aws.String("Tag"), // Required + // More values... + }, + TaskList: &swf.TaskList{ + Name: aws.String("Name"), // Required + }, + TaskPriority: aws.String("TaskPriority"), + TaskStartToCloseTimeout: aws.String("DurationInSecondsOptional"), + WorkflowTypeVersion: aws.String("Version"), + }, + FailWorkflowExecutionDecisionAttributes: &swf.FailWorkflowExecutionDecisionAttributes{ + Details: aws.String("Data"), + Reason: aws.String("FailureReason"), + }, + RecordMarkerDecisionAttributes: &swf.RecordMarkerDecisionAttributes{ + MarkerName: aws.String("MarkerName"), // Required + Details: aws.String("Data"), + }, + RequestCancelActivityTaskDecisionAttributes: &swf.RequestCancelActivityTaskDecisionAttributes{ + ActivityId: aws.String("ActivityId"), // Required + }, + RequestCancelExternalWorkflowExecutionDecisionAttributes: &swf.RequestCancelExternalWorkflowExecutionDecisionAttributes{ + WorkflowId: aws.String("WorkflowId"), // Required + Control: aws.String("Data"), + RunId: aws.String("RunIdOptional"), + }, + ScheduleActivityTaskDecisionAttributes: &swf.ScheduleActivityTaskDecisionAttributes{ + ActivityId: aws.String("ActivityId"), // Required + ActivityType: &swf.ActivityType{ // Required + Name: aws.String("Name"), // Required + Version: aws.String("Version"), // Required + }, + Control: aws.String("Data"), + HeartbeatTimeout: aws.String("DurationInSecondsOptional"), + Input: aws.String("Data"), + ScheduleToCloseTimeout: aws.String("DurationInSecondsOptional"), + ScheduleToStartTimeout: aws.String("DurationInSecondsOptional"), + StartToCloseTimeout: aws.String("DurationInSecondsOptional"), + TaskList: &swf.TaskList{ + Name: aws.String("Name"), // Required + }, + TaskPriority: aws.String("TaskPriority"), + }, + ScheduleLambdaFunctionDecisionAttributes: &swf.ScheduleLambdaFunctionDecisionAttributes{ + Id: aws.String("FunctionId"), // Required + Name: aws.String("FunctionName"), // Required + Input: aws.String("FunctionInput"), + StartToCloseTimeout: aws.String("DurationInSecondsOptional"), + }, + SignalExternalWorkflowExecutionDecisionAttributes: &swf.SignalExternalWorkflowExecutionDecisionAttributes{ + SignalName: aws.String("SignalName"), // Required + WorkflowId: aws.String("WorkflowId"), // Required + Control: aws.String("Data"), + Input: aws.String("Data"), + RunId: aws.String("RunIdOptional"), + }, + StartChildWorkflowExecutionDecisionAttributes: &swf.StartChildWorkflowExecutionDecisionAttributes{ + WorkflowId: aws.String("WorkflowId"), // Required + WorkflowType: &swf.WorkflowType{ // Required + Name: aws.String("Name"), // Required + Version: aws.String("Version"), // Required + }, + ChildPolicy: aws.String("ChildPolicy"), + Control: aws.String("Data"), + ExecutionStartToCloseTimeout: aws.String("DurationInSecondsOptional"), + Input: aws.String("Data"), + LambdaRole: aws.String("Arn"), + TagList: []*string{ + aws.String("Tag"), // Required + // More values... + }, + TaskList: &swf.TaskList{ + Name: aws.String("Name"), // Required + }, + TaskPriority: aws.String("TaskPriority"), + TaskStartToCloseTimeout: aws.String("DurationInSecondsOptional"), + }, + StartTimerDecisionAttributes: &swf.StartTimerDecisionAttributes{ + StartToFireTimeout: aws.String("DurationInSeconds"), // Required + TimerId: aws.String("TimerId"), // Required + Control: aws.String("Data"), + }, + }, + // More values... + }, + ExecutionContext: aws.String("Data"), + } + resp, err := svc.RespondDecisionTaskCompleted(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_SignalWorkflowExecution() { + svc := swf.New(session.New()) + + params := &swf.SignalWorkflowExecutionInput{ + Domain: aws.String("DomainName"), // Required + SignalName: aws.String("SignalName"), // Required + WorkflowId: aws.String("WorkflowId"), // Required + Input: aws.String("Data"), + RunId: aws.String("RunIdOptional"), + } + resp, err := svc.SignalWorkflowExecution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_StartWorkflowExecution() { + svc := swf.New(session.New()) + + params := &swf.StartWorkflowExecutionInput{ + Domain: aws.String("DomainName"), // Required + WorkflowId: aws.String("WorkflowId"), // Required + WorkflowType: &swf.WorkflowType{ // Required + Name: aws.String("Name"), // Required + Version: aws.String("Version"), // Required + }, + ChildPolicy: aws.String("ChildPolicy"), + ExecutionStartToCloseTimeout: aws.String("DurationInSecondsOptional"), + Input: aws.String("Data"), + LambdaRole: aws.String("Arn"), + TagList: []*string{ + aws.String("Tag"), // Required + // More values... + }, + TaskList: &swf.TaskList{ + Name: aws.String("Name"), // Required + }, + TaskPriority: aws.String("TaskPriority"), + TaskStartToCloseTimeout: aws.String("DurationInSecondsOptional"), + } + resp, err := svc.StartWorkflowExecution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleSWF_TerminateWorkflowExecution() { + svc := swf.New(session.New()) + + params := &swf.TerminateWorkflowExecutionInput{ + Domain: aws.String("DomainName"), // Required + WorkflowId: aws.String("WorkflowId"), // Required + ChildPolicy: aws.String("ChildPolicy"), + Details: aws.String("Data"), + Reason: aws.String("TerminateReason"), + RunId: aws.String("RunIdOptional"), + } + resp, err := svc.TerminateWorkflowExecution(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/swf/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/swf/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/swf/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/swf/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,100 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package swf + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// The Amazon Simple Workflow Service (Amazon SWF) makes it easy to build applications +// that use Amazon's cloud to coordinate work across distributed components. +// In Amazon SWF, a task represents a logical unit of work that is performed +// by a component of your workflow. Coordinating tasks in a workflow involves +// managing intertask dependencies, scheduling, and concurrency in accordance +// with the logical flow of the application. +// +// Amazon SWF gives you full control over implementing tasks and coordinating +// them without worrying about underlying complexities such as tracking their +// progress and maintaining their state. +// +// This documentation serves as reference only. For a broader overview of the +// Amazon SWF programming model, see the Amazon SWF Developer Guide (http://docs.aws.amazon.com/amazonswf/latest/developerguide/). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type SWF struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "swf" + +// New creates a new instance of the SWF client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a SWF client from just a session. +// svc := swf.New(mySession) +// +// // Create a SWF client with additional configuration +// svc := swf.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SWF { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *SWF { + svc := &SWF{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2012-01-25", + JSONVersion: "1.0", + TargetPrefix: "SimpleWorkflowService", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SWF operation and runs any +// custom request initialization. +func (c *SWF) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/swf/swfiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/swf/swfiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/swf/swfiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/swf/swfiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,152 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package swfiface provides an interface for the Amazon Simple Workflow Service. +package swfiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/swf" +) + +// SWFAPI is the interface type for swf.SWF. +type SWFAPI interface { + CountClosedWorkflowExecutionsRequest(*swf.CountClosedWorkflowExecutionsInput) (*request.Request, *swf.WorkflowExecutionCount) + + CountClosedWorkflowExecutions(*swf.CountClosedWorkflowExecutionsInput) (*swf.WorkflowExecutionCount, error) + + CountOpenWorkflowExecutionsRequest(*swf.CountOpenWorkflowExecutionsInput) (*request.Request, *swf.WorkflowExecutionCount) + + CountOpenWorkflowExecutions(*swf.CountOpenWorkflowExecutionsInput) (*swf.WorkflowExecutionCount, error) + + CountPendingActivityTasksRequest(*swf.CountPendingActivityTasksInput) (*request.Request, *swf.PendingTaskCount) + + CountPendingActivityTasks(*swf.CountPendingActivityTasksInput) (*swf.PendingTaskCount, error) + + CountPendingDecisionTasksRequest(*swf.CountPendingDecisionTasksInput) (*request.Request, *swf.PendingTaskCount) + + CountPendingDecisionTasks(*swf.CountPendingDecisionTasksInput) (*swf.PendingTaskCount, error) + + DeprecateActivityTypeRequest(*swf.DeprecateActivityTypeInput) (*request.Request, *swf.DeprecateActivityTypeOutput) + + DeprecateActivityType(*swf.DeprecateActivityTypeInput) (*swf.DeprecateActivityTypeOutput, error) + + DeprecateDomainRequest(*swf.DeprecateDomainInput) (*request.Request, *swf.DeprecateDomainOutput) + + DeprecateDomain(*swf.DeprecateDomainInput) (*swf.DeprecateDomainOutput, error) + + DeprecateWorkflowTypeRequest(*swf.DeprecateWorkflowTypeInput) (*request.Request, *swf.DeprecateWorkflowTypeOutput) + + DeprecateWorkflowType(*swf.DeprecateWorkflowTypeInput) (*swf.DeprecateWorkflowTypeOutput, error) + + DescribeActivityTypeRequest(*swf.DescribeActivityTypeInput) (*request.Request, *swf.DescribeActivityTypeOutput) + + DescribeActivityType(*swf.DescribeActivityTypeInput) (*swf.DescribeActivityTypeOutput, error) + + DescribeDomainRequest(*swf.DescribeDomainInput) (*request.Request, *swf.DescribeDomainOutput) + + DescribeDomain(*swf.DescribeDomainInput) (*swf.DescribeDomainOutput, error) + + DescribeWorkflowExecutionRequest(*swf.DescribeWorkflowExecutionInput) (*request.Request, *swf.DescribeWorkflowExecutionOutput) + + DescribeWorkflowExecution(*swf.DescribeWorkflowExecutionInput) (*swf.DescribeWorkflowExecutionOutput, error) + + DescribeWorkflowTypeRequest(*swf.DescribeWorkflowTypeInput) (*request.Request, *swf.DescribeWorkflowTypeOutput) + + DescribeWorkflowType(*swf.DescribeWorkflowTypeInput) (*swf.DescribeWorkflowTypeOutput, error) + + GetWorkflowExecutionHistoryRequest(*swf.GetWorkflowExecutionHistoryInput) (*request.Request, *swf.GetWorkflowExecutionHistoryOutput) + + GetWorkflowExecutionHistory(*swf.GetWorkflowExecutionHistoryInput) (*swf.GetWorkflowExecutionHistoryOutput, error) + + GetWorkflowExecutionHistoryPages(*swf.GetWorkflowExecutionHistoryInput, func(*swf.GetWorkflowExecutionHistoryOutput, bool) bool) error + + ListActivityTypesRequest(*swf.ListActivityTypesInput) (*request.Request, *swf.ListActivityTypesOutput) + + ListActivityTypes(*swf.ListActivityTypesInput) (*swf.ListActivityTypesOutput, error) + + ListActivityTypesPages(*swf.ListActivityTypesInput, func(*swf.ListActivityTypesOutput, bool) bool) error + + ListClosedWorkflowExecutionsRequest(*swf.ListClosedWorkflowExecutionsInput) (*request.Request, *swf.WorkflowExecutionInfos) + + ListClosedWorkflowExecutions(*swf.ListClosedWorkflowExecutionsInput) (*swf.WorkflowExecutionInfos, error) + + ListClosedWorkflowExecutionsPages(*swf.ListClosedWorkflowExecutionsInput, func(*swf.WorkflowExecutionInfos, bool) bool) error + + ListDomainsRequest(*swf.ListDomainsInput) (*request.Request, *swf.ListDomainsOutput) + + ListDomains(*swf.ListDomainsInput) (*swf.ListDomainsOutput, error) + + ListDomainsPages(*swf.ListDomainsInput, func(*swf.ListDomainsOutput, bool) bool) error + + ListOpenWorkflowExecutionsRequest(*swf.ListOpenWorkflowExecutionsInput) (*request.Request, *swf.WorkflowExecutionInfos) + + ListOpenWorkflowExecutions(*swf.ListOpenWorkflowExecutionsInput) (*swf.WorkflowExecutionInfos, error) + + ListOpenWorkflowExecutionsPages(*swf.ListOpenWorkflowExecutionsInput, func(*swf.WorkflowExecutionInfos, bool) bool) error + + ListWorkflowTypesRequest(*swf.ListWorkflowTypesInput) (*request.Request, *swf.ListWorkflowTypesOutput) + + ListWorkflowTypes(*swf.ListWorkflowTypesInput) (*swf.ListWorkflowTypesOutput, error) + + ListWorkflowTypesPages(*swf.ListWorkflowTypesInput, func(*swf.ListWorkflowTypesOutput, bool) bool) error + + PollForActivityTaskRequest(*swf.PollForActivityTaskInput) (*request.Request, *swf.PollForActivityTaskOutput) + + PollForActivityTask(*swf.PollForActivityTaskInput) (*swf.PollForActivityTaskOutput, error) + + PollForDecisionTaskRequest(*swf.PollForDecisionTaskInput) (*request.Request, *swf.PollForDecisionTaskOutput) + + PollForDecisionTask(*swf.PollForDecisionTaskInput) (*swf.PollForDecisionTaskOutput, error) + + PollForDecisionTaskPages(*swf.PollForDecisionTaskInput, func(*swf.PollForDecisionTaskOutput, bool) bool) error + + RecordActivityTaskHeartbeatRequest(*swf.RecordActivityTaskHeartbeatInput) (*request.Request, *swf.RecordActivityTaskHeartbeatOutput) + + RecordActivityTaskHeartbeat(*swf.RecordActivityTaskHeartbeatInput) (*swf.RecordActivityTaskHeartbeatOutput, error) + + RegisterActivityTypeRequest(*swf.RegisterActivityTypeInput) (*request.Request, *swf.RegisterActivityTypeOutput) + + RegisterActivityType(*swf.RegisterActivityTypeInput) (*swf.RegisterActivityTypeOutput, error) + + RegisterDomainRequest(*swf.RegisterDomainInput) (*request.Request, *swf.RegisterDomainOutput) + + RegisterDomain(*swf.RegisterDomainInput) (*swf.RegisterDomainOutput, error) + + RegisterWorkflowTypeRequest(*swf.RegisterWorkflowTypeInput) (*request.Request, *swf.RegisterWorkflowTypeOutput) + + RegisterWorkflowType(*swf.RegisterWorkflowTypeInput) (*swf.RegisterWorkflowTypeOutput, error) + + RequestCancelWorkflowExecutionRequest(*swf.RequestCancelWorkflowExecutionInput) (*request.Request, *swf.RequestCancelWorkflowExecutionOutput) + + RequestCancelWorkflowExecution(*swf.RequestCancelWorkflowExecutionInput) (*swf.RequestCancelWorkflowExecutionOutput, error) + + RespondActivityTaskCanceledRequest(*swf.RespondActivityTaskCanceledInput) (*request.Request, *swf.RespondActivityTaskCanceledOutput) + + RespondActivityTaskCanceled(*swf.RespondActivityTaskCanceledInput) (*swf.RespondActivityTaskCanceledOutput, error) + + RespondActivityTaskCompletedRequest(*swf.RespondActivityTaskCompletedInput) (*request.Request, *swf.RespondActivityTaskCompletedOutput) + + RespondActivityTaskCompleted(*swf.RespondActivityTaskCompletedInput) (*swf.RespondActivityTaskCompletedOutput, error) + + RespondActivityTaskFailedRequest(*swf.RespondActivityTaskFailedInput) (*request.Request, *swf.RespondActivityTaskFailedOutput) + + RespondActivityTaskFailed(*swf.RespondActivityTaskFailedInput) (*swf.RespondActivityTaskFailedOutput, error) + + RespondDecisionTaskCompletedRequest(*swf.RespondDecisionTaskCompletedInput) (*request.Request, *swf.RespondDecisionTaskCompletedOutput) + + RespondDecisionTaskCompleted(*swf.RespondDecisionTaskCompletedInput) (*swf.RespondDecisionTaskCompletedOutput, error) + + SignalWorkflowExecutionRequest(*swf.SignalWorkflowExecutionInput) (*request.Request, *swf.SignalWorkflowExecutionOutput) + + SignalWorkflowExecution(*swf.SignalWorkflowExecutionInput) (*swf.SignalWorkflowExecutionOutput, error) + + StartWorkflowExecutionRequest(*swf.StartWorkflowExecutionInput) (*request.Request, *swf.StartWorkflowExecutionOutput) + + StartWorkflowExecution(*swf.StartWorkflowExecutionInput) (*swf.StartWorkflowExecutionOutput, error) + + TerminateWorkflowExecutionRequest(*swf.TerminateWorkflowExecutionInput) (*request.Request, *swf.TerminateWorkflowExecutionOutput) + + TerminateWorkflowExecution(*swf.TerminateWorkflowExecutionInput) (*swf.TerminateWorkflowExecutionOutput, error) +} + +var _ SWFAPI = (*swf.SWF)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/waf/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/waf/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/waf/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/waf/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,4057 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package waf provides a client for AWS WAF. +package waf + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opCreateByteMatchSet = "CreateByteMatchSet" + +// CreateByteMatchSetRequest generates a request for the CreateByteMatchSet operation. +func (c *WAF) CreateByteMatchSetRequest(input *CreateByteMatchSetInput) (req *request.Request, output *CreateByteMatchSetOutput) { + op := &request.Operation{ + Name: opCreateByteMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateByteMatchSetInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateByteMatchSetOutput{} + req.Data = output + return +} + +// Creates a ByteMatchSet. You then use UpdateByteMatchSet to identify the part +// of a web request that you want AWS WAF to inspect, such as the values of +// the User-Agent header or the query string. For example, you can create a +// ByteMatchSet that matches any requests with User-Agent headers that contain +// the string BadBot. You can then configure AWS WAF to reject those requests. +// +// To create and configure a ByteMatchSet, perform the following steps: +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a CreateByteMatchSet request. Submit a CreateByteMatchSet request. +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateByteMatchSet request. Submit an UpdateByteMatchSet +// request to specify the part of the request that you want AWS WAF to inspect +// (for example, the header or the URI) and the value that you want AWS WAF +// to watch for. For more information about how to use the AWS WAF API to allow +// or block HTTP requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +func (c *WAF) CreateByteMatchSet(input *CreateByteMatchSetInput) (*CreateByteMatchSetOutput, error) { + req, out := c.CreateByteMatchSetRequest(input) + err := req.Send() + return out, err +} + +const opCreateIPSet = "CreateIPSet" + +// CreateIPSetRequest generates a request for the CreateIPSet operation. +func (c *WAF) CreateIPSetRequest(input *CreateIPSetInput) (req *request.Request, output *CreateIPSetOutput) { + op := &request.Operation{ + Name: opCreateIPSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateIPSetInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateIPSetOutput{} + req.Data = output + return +} + +// Creates an IPSet, which you use to specify which web requests you want to +// allow or block based on the IP addresses that the requests originate from. +// For example, if you're receiving a lot of requests from one or more individual +// IP addresses or one or more ranges of IP addresses and you want to block +// the requests, you can create an IPSet that contains those IP addresses and +// then configure AWS WAF to block the requests. +// +// To create and configure an IPSet, perform the following steps: +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a CreateIPSet request. Submit a CreateIPSet request. Use GetChangeToken +// to get the change token that you provide in the ChangeToken parameter of +// an UpdateIPSet request. Submit an UpdateIPSet request to specify the IP addresses +// that you want AWS WAF to watch for. For more information about how to use +// the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer +// Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +func (c *WAF) CreateIPSet(input *CreateIPSetInput) (*CreateIPSetOutput, error) { + req, out := c.CreateIPSetRequest(input) + err := req.Send() + return out, err +} + +const opCreateRule = "CreateRule" + +// CreateRuleRequest generates a request for the CreateRule operation. +func (c *WAF) CreateRuleRequest(input *CreateRuleInput) (req *request.Request, output *CreateRuleOutput) { + op := &request.Operation{ + Name: opCreateRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateRuleOutput{} + req.Data = output + return +} + +// Creates a Rule, which contains the IPSet objects, ByteMatchSet objects, and +// other predicates that identify the requests that you want to block. If you +// add more than one predicate to a Rule, a request must match all of the specifications +// to be allowed or blocked. For example, suppose you add the following to a +// Rule: +// +// An IPSet that matches the IP address 192.0.2.44/32 A ByteMatchSet that +// matches BadBot in the User-Agent header You then add the Rule to a WebACL +// and specify that you want to blocks requests that satisfy the Rule. For a +// request to be blocked, it must come from the IP address 192.0.2.44 and the +// User-Agent header in the request must contain the value BadBot. +// +// To create and configure a Rule, perform the following steps: +// +// Create and update the predicates that you want to include in the Rule. +// For more information, see CreateByteMatchSet, CreateIPSet, and CreateSqlInjectionMatchSet. +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a CreateRule request. Submit a CreateRule request. Use GetChangeToken +// to get the change token that you provide in the ChangeToken parameter of +// an UpdateRule request. Submit an UpdateRule request to specify the predicates +// that you want to include in the Rule. Create and update a WebACL that contains +// the Rule. For more information, see CreateWebACL. For more information about +// how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF +// Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +func (c *WAF) CreateRule(input *CreateRuleInput) (*CreateRuleOutput, error) { + req, out := c.CreateRuleRequest(input) + err := req.Send() + return out, err +} + +const opCreateSizeConstraintSet = "CreateSizeConstraintSet" + +// CreateSizeConstraintSetRequest generates a request for the CreateSizeConstraintSet operation. +func (c *WAF) CreateSizeConstraintSetRequest(input *CreateSizeConstraintSetInput) (req *request.Request, output *CreateSizeConstraintSetOutput) { + op := &request.Operation{ + Name: opCreateSizeConstraintSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSizeConstraintSetInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSizeConstraintSetOutput{} + req.Data = output + return +} + +// Creates a SizeConstraintSet. You then use UpdateSizeConstraintSet to identify +// the part of a web request that you want AWS WAF to check for length, such +// as the length of the User-Agent header or the length of the query string. +// For example, you can create a SizeConstraintSet that matches any requests +// that have a query string that is longer than 100 bytes. You can then configure +// AWS WAF to reject those requests. +// +// To create and configure a SizeConstraintSet, perform the following steps: +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a CreateSizeConstraintSet request. Submit a CreateSizeConstraintSet +// request. Use GetChangeToken to get the change token that you provide in the +// ChangeToken parameter of an UpdateSizeConstraintSet request. Submit an UpdateSizeConstraintSet +// request to specify the part of the request that you want AWS WAF to inspect +// (for example, the header or the URI) and the value that you want AWS WAF +// to watch for. For more information about how to use the AWS WAF API to allow +// or block HTTP requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +func (c *WAF) CreateSizeConstraintSet(input *CreateSizeConstraintSetInput) (*CreateSizeConstraintSetOutput, error) { + req, out := c.CreateSizeConstraintSetRequest(input) + err := req.Send() + return out, err +} + +const opCreateSqlInjectionMatchSet = "CreateSqlInjectionMatchSet" + +// CreateSqlInjectionMatchSetRequest generates a request for the CreateSqlInjectionMatchSet operation. +func (c *WAF) CreateSqlInjectionMatchSetRequest(input *CreateSqlInjectionMatchSetInput) (req *request.Request, output *CreateSqlInjectionMatchSetOutput) { + op := &request.Operation{ + Name: opCreateSqlInjectionMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSqlInjectionMatchSetInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateSqlInjectionMatchSetOutput{} + req.Data = output + return +} + +// Creates a SqlInjectionMatchSet, which you use to allow, block, or count requests +// that contain snippets of SQL code in a specified part of web requests. AWS +// WAF searches for character sequences that are likely to be malicious strings. +// +// To create and configure a SqlInjectionMatchSet, perform the following steps: +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a CreateSqlInjectionMatchSet request. Submit a CreateSqlInjectionMatchSet +// request. Use GetChangeToken to get the change token that you provide in the +// ChangeToken parameter of an UpdateSqlInjectionMatchSet request. Submit an +// UpdateSqlInjectionMatchSet request to specify the parts of web requests in +// which you want to allow, block, or count malicious SQL code. For more information +// about how to use the AWS WAF API to allow or block HTTP requests, see the +// AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +func (c *WAF) CreateSqlInjectionMatchSet(input *CreateSqlInjectionMatchSetInput) (*CreateSqlInjectionMatchSetOutput, error) { + req, out := c.CreateSqlInjectionMatchSetRequest(input) + err := req.Send() + return out, err +} + +const opCreateWebACL = "CreateWebACL" + +// CreateWebACLRequest generates a request for the CreateWebACL operation. +func (c *WAF) CreateWebACLRequest(input *CreateWebACLInput) (req *request.Request, output *CreateWebACLOutput) { + op := &request.Operation{ + Name: opCreateWebACL, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateWebACLInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateWebACLOutput{} + req.Data = output + return +} + +// Creates a WebACL, which contains the Rules that identify the CloudFront web +// requests that you want to allow, block, or count. AWS WAF evaluates Rules +// in order based on the value of Priority for each Rule. +// +// You also specify a default action, either ALLOW or BLOCK. If a web request +// doesn't match any of the Rules in a WebACL, AWS WAF responds to the request +// with the default action. +// +// To create and configure a WebACL, perform the following steps: +// +// Create and update the ByteMatchSet objects and other predicates that you +// want to include in Rules. For more information, see CreateByteMatchSet, UpdateByteMatchSet, +// CreateIPSet, UpdateIPSet, CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet. +// Create and update the Rules that you want to include in the WebACL. For more +// information, see CreateRule and UpdateRule. Use GetChangeToken to get the +// change token that you provide in the ChangeToken parameter of a CreateWebACL +// request. Submit a CreateWebACL request. Use GetChangeToken to get the change +// token that you provide in the ChangeToken parameter of an UpdateWebACL request. +// Submit an UpdateWebACL request to specify the Rules that you want to include +// in the WebACL, to specify the default action, and to associate the WebACL +// with a CloudFront distribution. For more information about how to use the +// AWS WAF API, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +func (c *WAF) CreateWebACL(input *CreateWebACLInput) (*CreateWebACLOutput, error) { + req, out := c.CreateWebACLRequest(input) + err := req.Send() + return out, err +} + +const opDeleteByteMatchSet = "DeleteByteMatchSet" + +// DeleteByteMatchSetRequest generates a request for the DeleteByteMatchSet operation. +func (c *WAF) DeleteByteMatchSetRequest(input *DeleteByteMatchSetInput) (req *request.Request, output *DeleteByteMatchSetOutput) { + op := &request.Operation{ + Name: opDeleteByteMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteByteMatchSetInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteByteMatchSetOutput{} + req.Data = output + return +} + +// Permanently deletes a ByteMatchSet. You can't delete a ByteMatchSet if it's +// still used in any Rules or if it still includes any ByteMatchTuple objects +// (any filters). +// +// If you just want to remove a ByteMatchSet from a Rule, use UpdateRule. +// +// To permanently delete a ByteMatchSet, perform the following steps: +// +// Update the ByteMatchSet to remove filters, if any. For more information, +// see UpdateByteMatchSet. Use GetChangeToken to get the change token that you +// provide in the ChangeToken parameter of a DeleteByteMatchSet request. Submit +// a DeleteByteMatchSet request. +func (c *WAF) DeleteByteMatchSet(input *DeleteByteMatchSetInput) (*DeleteByteMatchSetOutput, error) { + req, out := c.DeleteByteMatchSetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteIPSet = "DeleteIPSet" + +// DeleteIPSetRequest generates a request for the DeleteIPSet operation. +func (c *WAF) DeleteIPSetRequest(input *DeleteIPSetInput) (req *request.Request, output *DeleteIPSetOutput) { + op := &request.Operation{ + Name: opDeleteIPSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteIPSetInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteIPSetOutput{} + req.Data = output + return +} + +// Permanently deletes an IPSet. You can't delete an IPSet if it's still used +// in any Rules or if it still includes any IP addresses. +// +// If you just want to remove an IPSet from a Rule, use UpdateRule. +// +// To permanently delete an IPSet from AWS WAF, perform the following steps: +// +// Update the IPSet to remove IP address ranges, if any. For more information, +// see UpdateIPSet. Use GetChangeToken to get the change token that you provide +// in the ChangeToken parameter of a DeleteIPSet request. Submit a DeleteIPSet +// request. +func (c *WAF) DeleteIPSet(input *DeleteIPSetInput) (*DeleteIPSetOutput, error) { + req, out := c.DeleteIPSetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRule = "DeleteRule" + +// DeleteRuleRequest generates a request for the DeleteRule operation. +func (c *WAF) DeleteRuleRequest(input *DeleteRuleInput) (req *request.Request, output *DeleteRuleOutput) { + op := &request.Operation{ + Name: opDeleteRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteRuleOutput{} + req.Data = output + return +} + +// Permanently deletes a Rule. You can't delete a Rule if it's still used in +// any WebACL objects or if it still includes any predicates, such as ByteMatchSet +// objects. +// +// If you just want to remove a Rule from a WebACL, use UpdateWebACL. +// +// To permanently delete a Rule from AWS WAF, perform the following steps: +// +// Update the Rule to remove predicates, if any. For more information, see +// UpdateRule. Use GetChangeToken to get the change token that you provide in +// the ChangeToken parameter of a DeleteRule request. Submit a DeleteRule request. +func (c *WAF) DeleteRule(input *DeleteRuleInput) (*DeleteRuleOutput, error) { + req, out := c.DeleteRuleRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSizeConstraintSet = "DeleteSizeConstraintSet" + +// DeleteSizeConstraintSetRequest generates a request for the DeleteSizeConstraintSet operation. +func (c *WAF) DeleteSizeConstraintSetRequest(input *DeleteSizeConstraintSetInput) (req *request.Request, output *DeleteSizeConstraintSetOutput) { + op := &request.Operation{ + Name: opDeleteSizeConstraintSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSizeConstraintSetInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteSizeConstraintSetOutput{} + req.Data = output + return +} + +// Permanently deletes a SizeConstraintSet. You can't delete a SizeConstraintSet +// if it's still used in any Rules or if it still includes any SizeConstraint +// objects (any filters). +// +// If you just want to remove a SizeConstraintSet from a Rule, use UpdateRule. +// +// To permanently delete a SizeConstraintSet, perform the following steps: +// +// Update the SizeConstraintSet to remove filters, if any. For more information, +// see UpdateSizeConstraintSet. Use GetChangeToken to get the change token that +// you provide in the ChangeToken parameter of a DeleteSizeConstraintSet request. +// Submit a DeleteSizeConstraintSet request. +func (c *WAF) DeleteSizeConstraintSet(input *DeleteSizeConstraintSetInput) (*DeleteSizeConstraintSetOutput, error) { + req, out := c.DeleteSizeConstraintSetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSqlInjectionMatchSet = "DeleteSqlInjectionMatchSet" + +// DeleteSqlInjectionMatchSetRequest generates a request for the DeleteSqlInjectionMatchSet operation. +func (c *WAF) DeleteSqlInjectionMatchSetRequest(input *DeleteSqlInjectionMatchSetInput) (req *request.Request, output *DeleteSqlInjectionMatchSetOutput) { + op := &request.Operation{ + Name: opDeleteSqlInjectionMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSqlInjectionMatchSetInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteSqlInjectionMatchSetOutput{} + req.Data = output + return +} + +// Permanently deletes a SqlInjectionMatchSet. You can't delete a SqlInjectionMatchSet +// if it's still used in any Rules or if it still contains any SqlInjectionMatchTuple +// objects. +// +// If you just want to remove a SqlInjectionMatchSet from a Rule, use UpdateRule. +// +// To permanently delete a SqlInjectionMatchSet from AWS WAF, perform the following +// steps: +// +// Update the SqlInjectionMatchSet to remove filters, if any. For more information, +// see UpdateSqlInjectionMatchSet. Use GetChangeToken to get the change token +// that you provide in the ChangeToken parameter of a DeleteSqlInjectionMatchSet +// request. Submit a DeleteSqlInjectionMatchSet request. +func (c *WAF) DeleteSqlInjectionMatchSet(input *DeleteSqlInjectionMatchSetInput) (*DeleteSqlInjectionMatchSetOutput, error) { + req, out := c.DeleteSqlInjectionMatchSetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteWebACL = "DeleteWebACL" + +// DeleteWebACLRequest generates a request for the DeleteWebACL operation. +func (c *WAF) DeleteWebACLRequest(input *DeleteWebACLInput) (req *request.Request, output *DeleteWebACLOutput) { + op := &request.Operation{ + Name: opDeleteWebACL, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteWebACLInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteWebACLOutput{} + req.Data = output + return +} + +// Permanently deletes a WebACL. You can't delete a WebACL if it still contains +// any Rules. +// +// To delete a WebACL, perform the following steps: +// +// Update the WebACL to remove Rules, if any. For more information, see UpdateWebACL. +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a DeleteWebACL request. Submit a DeleteWebACL request. +func (c *WAF) DeleteWebACL(input *DeleteWebACLInput) (*DeleteWebACLOutput, error) { + req, out := c.DeleteWebACLRequest(input) + err := req.Send() + return out, err +} + +const opGetByteMatchSet = "GetByteMatchSet" + +// GetByteMatchSetRequest generates a request for the GetByteMatchSet operation. +func (c *WAF) GetByteMatchSetRequest(input *GetByteMatchSetInput) (req *request.Request, output *GetByteMatchSetOutput) { + op := &request.Operation{ + Name: opGetByteMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetByteMatchSetInput{} + } + + req = c.newRequest(op, input, output) + output = &GetByteMatchSetOutput{} + req.Data = output + return +} + +// Returns the ByteMatchSet specified by ByteMatchSetId. +func (c *WAF) GetByteMatchSet(input *GetByteMatchSetInput) (*GetByteMatchSetOutput, error) { + req, out := c.GetByteMatchSetRequest(input) + err := req.Send() + return out, err +} + +const opGetChangeToken = "GetChangeToken" + +// GetChangeTokenRequest generates a request for the GetChangeToken operation. +func (c *WAF) GetChangeTokenRequest(input *GetChangeTokenInput) (req *request.Request, output *GetChangeTokenOutput) { + op := &request.Operation{ + Name: opGetChangeToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetChangeTokenInput{} + } + + req = c.newRequest(op, input, output) + output = &GetChangeTokenOutput{} + req.Data = output + return +} + +// When you want to create, update, or delete AWS WAF objects, get a change +// token and include the change token in the create, update, or delete request. +// Change tokens ensure that your application doesn't submit conflicting requests +// to AWS WAF. +// +// Each create, update, or delete request must use a unique change token. If +// your application submits a GetChangeToken request and then submits a second +// GetChangeToken request before submitting a create, update, or delete request, +// the second GetChangeToken request returns the same value as the first GetChangeToken +// request. +// +// When you use a change token in a create, update, or delete request, the +// status of the change token changes to PENDING, which indicates that AWS WAF +// is propagating the change to all AWS WAF servers. Use GetChangeTokenStatus +// to determine the status of your change token. +func (c *WAF) GetChangeToken(input *GetChangeTokenInput) (*GetChangeTokenOutput, error) { + req, out := c.GetChangeTokenRequest(input) + err := req.Send() + return out, err +} + +const opGetChangeTokenStatus = "GetChangeTokenStatus" + +// GetChangeTokenStatusRequest generates a request for the GetChangeTokenStatus operation. +func (c *WAF) GetChangeTokenStatusRequest(input *GetChangeTokenStatusInput) (req *request.Request, output *GetChangeTokenStatusOutput) { + op := &request.Operation{ + Name: opGetChangeTokenStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetChangeTokenStatusInput{} + } + + req = c.newRequest(op, input, output) + output = &GetChangeTokenStatusOutput{} + req.Data = output + return +} + +// Returns the status of a ChangeToken that you got by calling GetChangeToken. +// ChangeTokenStatus is one of the following values: +// +// PROVISIONED: You requested the change token by calling GetChangeToken, +// but you haven't used it yet in a call to create, update, or delete an AWS +// WAF object. PENDING: AWS WAF is propagating the create, update, or delete +// request to all AWS WAF servers. IN_SYNC: Propagation is complete. +func (c *WAF) GetChangeTokenStatus(input *GetChangeTokenStatusInput) (*GetChangeTokenStatusOutput, error) { + req, out := c.GetChangeTokenStatusRequest(input) + err := req.Send() + return out, err +} + +const opGetIPSet = "GetIPSet" + +// GetIPSetRequest generates a request for the GetIPSet operation. +func (c *WAF) GetIPSetRequest(input *GetIPSetInput) (req *request.Request, output *GetIPSetOutput) { + op := &request.Operation{ + Name: opGetIPSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetIPSetInput{} + } + + req = c.newRequest(op, input, output) + output = &GetIPSetOutput{} + req.Data = output + return +} + +// Returns the IPSet that is specified by IPSetId. +func (c *WAF) GetIPSet(input *GetIPSetInput) (*GetIPSetOutput, error) { + req, out := c.GetIPSetRequest(input) + err := req.Send() + return out, err +} + +const opGetRule = "GetRule" + +// GetRuleRequest generates a request for the GetRule operation. +func (c *WAF) GetRuleRequest(input *GetRuleInput) (req *request.Request, output *GetRuleOutput) { + op := &request.Operation{ + Name: opGetRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &GetRuleOutput{} + req.Data = output + return +} + +// Returns the Rule that is specified by the RuleId that you included in the +// GetRule request. +func (c *WAF) GetRule(input *GetRuleInput) (*GetRuleOutput, error) { + req, out := c.GetRuleRequest(input) + err := req.Send() + return out, err +} + +const opGetSampledRequests = "GetSampledRequests" + +// GetSampledRequestsRequest generates a request for the GetSampledRequests operation. +func (c *WAF) GetSampledRequestsRequest(input *GetSampledRequestsInput) (req *request.Request, output *GetSampledRequestsOutput) { + op := &request.Operation{ + Name: opGetSampledRequests, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSampledRequestsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSampledRequestsOutput{} + req.Data = output + return +} + +// Gets detailed information about a specified number of requests--a sample--that +// AWS WAF randomly selects from among the first 5,000 requests that your AWS +// resource received during a time range that you choose. You can specify a +// sample size of up to 100 requests, and you can specify any time range in +// the previous three hours. +// +// GetSampledRequests returns a time range, which is usually the time range +// that you specified. However, if your resource (such as a CloudFront distribution) +// received 5,000 requests before the specified time range elapsed, GetSampledRequests +// returns an updated time range. This new time range indicates the actual period +// during which AWS WAF selected the requests in the sample. +func (c *WAF) GetSampledRequests(input *GetSampledRequestsInput) (*GetSampledRequestsOutput, error) { + req, out := c.GetSampledRequestsRequest(input) + err := req.Send() + return out, err +} + +const opGetSizeConstraintSet = "GetSizeConstraintSet" + +// GetSizeConstraintSetRequest generates a request for the GetSizeConstraintSet operation. +func (c *WAF) GetSizeConstraintSetRequest(input *GetSizeConstraintSetInput) (req *request.Request, output *GetSizeConstraintSetOutput) { + op := &request.Operation{ + Name: opGetSizeConstraintSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSizeConstraintSetInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSizeConstraintSetOutput{} + req.Data = output + return +} + +// Returns the SizeConstraintSet specified by SizeConstraintSetId. +func (c *WAF) GetSizeConstraintSet(input *GetSizeConstraintSetInput) (*GetSizeConstraintSetOutput, error) { + req, out := c.GetSizeConstraintSetRequest(input) + err := req.Send() + return out, err +} + +const opGetSqlInjectionMatchSet = "GetSqlInjectionMatchSet" + +// GetSqlInjectionMatchSetRequest generates a request for the GetSqlInjectionMatchSet operation. +func (c *WAF) GetSqlInjectionMatchSetRequest(input *GetSqlInjectionMatchSetInput) (req *request.Request, output *GetSqlInjectionMatchSetOutput) { + op := &request.Operation{ + Name: opGetSqlInjectionMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSqlInjectionMatchSetInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSqlInjectionMatchSetOutput{} + req.Data = output + return +} + +// Returns the SqlInjectionMatchSet that is specified by SqlInjectionMatchSetId. +func (c *WAF) GetSqlInjectionMatchSet(input *GetSqlInjectionMatchSetInput) (*GetSqlInjectionMatchSetOutput, error) { + req, out := c.GetSqlInjectionMatchSetRequest(input) + err := req.Send() + return out, err +} + +const opGetWebACL = "GetWebACL" + +// GetWebACLRequest generates a request for the GetWebACL operation. +func (c *WAF) GetWebACLRequest(input *GetWebACLInput) (req *request.Request, output *GetWebACLOutput) { + op := &request.Operation{ + Name: opGetWebACL, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetWebACLInput{} + } + + req = c.newRequest(op, input, output) + output = &GetWebACLOutput{} + req.Data = output + return +} + +// Returns the WebACL that is specified by WebACLId. +func (c *WAF) GetWebACL(input *GetWebACLInput) (*GetWebACLOutput, error) { + req, out := c.GetWebACLRequest(input) + err := req.Send() + return out, err +} + +const opListByteMatchSets = "ListByteMatchSets" + +// ListByteMatchSetsRequest generates a request for the ListByteMatchSets operation. +func (c *WAF) ListByteMatchSetsRequest(input *ListByteMatchSetsInput) (req *request.Request, output *ListByteMatchSetsOutput) { + op := &request.Operation{ + Name: opListByteMatchSets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListByteMatchSetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListByteMatchSetsOutput{} + req.Data = output + return +} + +// Returns an array of ByteMatchSetSummary objects. +func (c *WAF) ListByteMatchSets(input *ListByteMatchSetsInput) (*ListByteMatchSetsOutput, error) { + req, out := c.ListByteMatchSetsRequest(input) + err := req.Send() + return out, err +} + +const opListIPSets = "ListIPSets" + +// ListIPSetsRequest generates a request for the ListIPSets operation. +func (c *WAF) ListIPSetsRequest(input *ListIPSetsInput) (req *request.Request, output *ListIPSetsOutput) { + op := &request.Operation{ + Name: opListIPSets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListIPSetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListIPSetsOutput{} + req.Data = output + return +} + +// Returns an array of IPSetSummary objects in the response. +func (c *WAF) ListIPSets(input *ListIPSetsInput) (*ListIPSetsOutput, error) { + req, out := c.ListIPSetsRequest(input) + err := req.Send() + return out, err +} + +const opListRules = "ListRules" + +// ListRulesRequest generates a request for the ListRules operation. +func (c *WAF) ListRulesRequest(input *ListRulesInput) (req *request.Request, output *ListRulesOutput) { + op := &request.Operation{ + Name: opListRules, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListRulesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListRulesOutput{} + req.Data = output + return +} + +// Returns an array of RuleSummary objects. +func (c *WAF) ListRules(input *ListRulesInput) (*ListRulesOutput, error) { + req, out := c.ListRulesRequest(input) + err := req.Send() + return out, err +} + +const opListSizeConstraintSets = "ListSizeConstraintSets" + +// ListSizeConstraintSetsRequest generates a request for the ListSizeConstraintSets operation. +func (c *WAF) ListSizeConstraintSetsRequest(input *ListSizeConstraintSetsInput) (req *request.Request, output *ListSizeConstraintSetsOutput) { + op := &request.Operation{ + Name: opListSizeConstraintSets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListSizeConstraintSetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListSizeConstraintSetsOutput{} + req.Data = output + return +} + +// Returns an array of SizeConstraintSetSummary objects. +func (c *WAF) ListSizeConstraintSets(input *ListSizeConstraintSetsInput) (*ListSizeConstraintSetsOutput, error) { + req, out := c.ListSizeConstraintSetsRequest(input) + err := req.Send() + return out, err +} + +const opListSqlInjectionMatchSets = "ListSqlInjectionMatchSets" + +// ListSqlInjectionMatchSetsRequest generates a request for the ListSqlInjectionMatchSets operation. +func (c *WAF) ListSqlInjectionMatchSetsRequest(input *ListSqlInjectionMatchSetsInput) (req *request.Request, output *ListSqlInjectionMatchSetsOutput) { + op := &request.Operation{ + Name: opListSqlInjectionMatchSets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListSqlInjectionMatchSetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListSqlInjectionMatchSetsOutput{} + req.Data = output + return +} + +// Returns an array of SqlInjectionMatchSet objects. +func (c *WAF) ListSqlInjectionMatchSets(input *ListSqlInjectionMatchSetsInput) (*ListSqlInjectionMatchSetsOutput, error) { + req, out := c.ListSqlInjectionMatchSetsRequest(input) + err := req.Send() + return out, err +} + +const opListWebACLs = "ListWebACLs" + +// ListWebACLsRequest generates a request for the ListWebACLs operation. +func (c *WAF) ListWebACLsRequest(input *ListWebACLsInput) (req *request.Request, output *ListWebACLsOutput) { + op := &request.Operation{ + Name: opListWebACLs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListWebACLsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListWebACLsOutput{} + req.Data = output + return +} + +// Returns an array of WebACLSummary objects in the response. +func (c *WAF) ListWebACLs(input *ListWebACLsInput) (*ListWebACLsOutput, error) { + req, out := c.ListWebACLsRequest(input) + err := req.Send() + return out, err +} + +const opUpdateByteMatchSet = "UpdateByteMatchSet" + +// UpdateByteMatchSetRequest generates a request for the UpdateByteMatchSet operation. +func (c *WAF) UpdateByteMatchSetRequest(input *UpdateByteMatchSetInput) (req *request.Request, output *UpdateByteMatchSetOutput) { + op := &request.Operation{ + Name: opUpdateByteMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateByteMatchSetInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateByteMatchSetOutput{} + req.Data = output + return +} + +// Inserts or deletes ByteMatchTuple objects (filters) in a ByteMatchSet. For +// each ByteMatchTuple object, you specify the following values: +// +// Whether to insert or delete the object from the array. If you want to change +// a ByteMatchSetUpdate object, you delete the existing object and add a new +// one. The part of a web request that you want AWS WAF to inspect, such as +// a query string or the value of the User-Agent header. The bytes (typically +// a string that corresponds with ASCII characters) that you want AWS WAF to +// look for. For more information, including how you specify the values for +// the AWS WAF API and the AWS CLI or SDKs, see TargetString in the ByteMatchTuple +// data type. Where to look, such as at the beginning or the end of a query +// string. Whether to perform any conversions on the request, such as converting +// it to lowercase, before inspecting it for the specified string. For example, +// you can add a ByteMatchSetUpdate object that matches web requests in which +// User-Agent headers contain the string BadBot. You can then configure AWS +// WAF to block those requests. +// +// To create and configure a ByteMatchSet, perform the following steps: +// +// Create a ByteMatchSet. For more information, see CreateByteMatchSet. Use +// GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateByteMatchSet request. Submit an UpdateByteMatchSet +// request to specify the part of the request that you want AWS WAF to inspect +// (for example, the header or the URI) and the value that you want AWS WAF +// to watch for. For more information about how to use the AWS WAF API to allow +// or block HTTP requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +func (c *WAF) UpdateByteMatchSet(input *UpdateByteMatchSetInput) (*UpdateByteMatchSetOutput, error) { + req, out := c.UpdateByteMatchSetRequest(input) + err := req.Send() + return out, err +} + +const opUpdateIPSet = "UpdateIPSet" + +// UpdateIPSetRequest generates a request for the UpdateIPSet operation. +func (c *WAF) UpdateIPSetRequest(input *UpdateIPSetInput) (req *request.Request, output *UpdateIPSetOutput) { + op := &request.Operation{ + Name: opUpdateIPSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateIPSetInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateIPSetOutput{} + req.Data = output + return +} + +// Inserts or deletes IPSetDescriptor objects in an IPSet. For each IPSetDescriptor +// object, you specify the following values: +// +// Whether to insert or delete the object from the array. If you want to change +// an IPSetDescriptor object, you delete the existing object and add a new one. +// The IP address version, IPv4. The IP address in CIDR notation, for example, +// 192.0.2.0/24 (for the range of IP addresses from 192.0.2.0 to 192.0.2.255) +// or 192.0.2.44/32 (for the individual IP address 192.0.2.44). AWS WAF supports +// /8, /16, /24, and /32 IP address ranges. For more information about CIDR +// notation, see the Wikipedia entry Classless Inter-Domain Routing (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). +// +// You use an IPSet to specify which web requests you want to allow or block +// based on the IP addresses that the requests originated from. For example, +// if you're receiving a lot of requests from one or a small number of IP addresses +// and you want to block the requests, you can create an IPSet that specifies +// those IP addresses, and then configure AWS WAF to block the requests. +// +// To create and configure an IPSet, perform the following steps: +// +// Submit a CreateIPSet request. Use GetChangeToken to get the change token +// that you provide in the ChangeToken parameter of an UpdateIPSet request. +// Submit an UpdateIPSet request to specify the IP addresses that you want AWS +// WAF to watch for. When you update an IPSet, you specify the IP addresses +// that you want to add and/or the IP addresses that you want to delete. If +// you want to change an IP address, you delete the existing IP address and +// add the new one. +// +// For more information about how to use the AWS WAF API to allow or block +// HTTP requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +func (c *WAF) UpdateIPSet(input *UpdateIPSetInput) (*UpdateIPSetOutput, error) { + req, out := c.UpdateIPSetRequest(input) + err := req.Send() + return out, err +} + +const opUpdateRule = "UpdateRule" + +// UpdateRuleRequest generates a request for the UpdateRule operation. +func (c *WAF) UpdateRuleRequest(input *UpdateRuleInput) (req *request.Request, output *UpdateRuleOutput) { + op := &request.Operation{ + Name: opUpdateRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateRuleOutput{} + req.Data = output + return +} + +// Inserts or deletes Predicate objects in a Rule. Each Predicate object identifies +// a predicate, such as a ByteMatchSet or an IPSet, that specifies the web requests +// that you want to allow, block, or count. If you add more than one predicate +// to a Rule, a request must match all of the specifications to be allowed, +// blocked, or counted. For example, suppose you add the following to a Rule: +// +// A ByteMatchSet that matches the value BadBot in the User-Agent header An +// IPSet that matches the IP address 192.0.2.44 You then add the Rule to a +// WebACL and specify that you want to block requests that satisfy the Rule. +// For a request to be blocked, the User-Agent header in the request must contain +// the value BadBot and the request must originate from the IP address 192.0.2.44. +// +// To create and configure a Rule, perform the following steps: +// +// Create and update the predicates that you want to include in the Rule. +// Create the Rule. See CreateRule. Use GetChangeToken to get the change token +// that you provide in the ChangeToken parameter of an UpdateRule request. Submit +// an UpdateRule request to add predicates to the Rule. Create and update a +// WebACL that contains the Rule. See CreateWebACL. If you want to replace +// one ByteMatchSet or IPSet with another, you delete the existing one and add +// the new one. +// +// For more information about how to use the AWS WAF API to allow or block +// HTTP requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +func (c *WAF) UpdateRule(input *UpdateRuleInput) (*UpdateRuleOutput, error) { + req, out := c.UpdateRuleRequest(input) + err := req.Send() + return out, err +} + +const opUpdateSizeConstraintSet = "UpdateSizeConstraintSet" + +// UpdateSizeConstraintSetRequest generates a request for the UpdateSizeConstraintSet operation. +func (c *WAF) UpdateSizeConstraintSetRequest(input *UpdateSizeConstraintSetInput) (req *request.Request, output *UpdateSizeConstraintSetOutput) { + op := &request.Operation{ + Name: opUpdateSizeConstraintSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSizeConstraintSetInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateSizeConstraintSetOutput{} + req.Data = output + return +} + +// Inserts or deletes SizeConstraint objects (filters) in a SizeConstraintSet. +// For each SizeConstraint object, you specify the following values: +// +// Whether to insert or delete the object from the array. If you want to change +// a SizeConstraintSetUpdate object, you delete the existing object and add +// a new one. The part of a web request that you want AWS WAF to evaluate, such +// as the length of a query string or the length of the User-Agent header. Whether +// to perform any transformations on the request, such as converting it to lowercase, +// before checking its length. Note that transformations of the request body +// are not supported because the AWS resource forwards only the first 8192 bytes +// of your request to AWS WAF. A ComparisonOperator used for evaluating the +// selected part of the request against the specified Size, such as equals, +// greater than, less than, and so on. The length, in bytes, that you want AWS +// WAF to watch for in selected part of the request. The length is computed +// after applying the transformation. For example, you can add a SizeConstraintSetUpdate +// object that matches web requests in which the length of the User-Agent header +// is greater than 100 bytes. You can then configure AWS WAF to block those +// requests. +// +// To create and configure a SizeConstraintSet, perform the following steps: +// +// Create a SizeConstraintSet. For more information, see CreateSizeConstraintSet. +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateSizeConstraintSet request. Submit an UpdateSizeConstraintSet +// request to specify the part of the request that you want AWS WAF to inspect +// (for example, the header or the URI) and the value that you want AWS WAF +// to watch for. For more information about how to use the AWS WAF API to allow +// or block HTTP requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +func (c *WAF) UpdateSizeConstraintSet(input *UpdateSizeConstraintSetInput) (*UpdateSizeConstraintSetOutput, error) { + req, out := c.UpdateSizeConstraintSetRequest(input) + err := req.Send() + return out, err +} + +const opUpdateSqlInjectionMatchSet = "UpdateSqlInjectionMatchSet" + +// UpdateSqlInjectionMatchSetRequest generates a request for the UpdateSqlInjectionMatchSet operation. +func (c *WAF) UpdateSqlInjectionMatchSetRequest(input *UpdateSqlInjectionMatchSetInput) (req *request.Request, output *UpdateSqlInjectionMatchSetOutput) { + op := &request.Operation{ + Name: opUpdateSqlInjectionMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSqlInjectionMatchSetInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateSqlInjectionMatchSetOutput{} + req.Data = output + return +} + +// Inserts or deletes SqlInjectionMatchTuple objects (filters) in a SqlInjectionMatchSet. +// For each SqlInjectionMatchTuple object, you specify the following values: +// +// Action: Whether to insert the object into or delete the object from the +// array. To change a SqlInjectionMatchTuple, you delete the existing object +// and add a new one. FieldToMatch: The part of web requests that you want AWS +// WAF to inspect and, if you want AWS WAF to inspect a header, the name of +// the header. TextTransformation: Which text transformation, if any, to perform +// on the web request before inspecting the request for snippets of malicious +// SQL code. You use SqlInjectionMatchSet objects to specify which CloudFront +// requests you want to allow, block, or count. For example, if you're receiving +// requests that contain snippets of SQL code in the query string and you want +// to block the requests, you can create a SqlInjectionMatchSet with the applicable +// settings, and then configure AWS WAF to block the requests. +// +// To create and configure a SqlInjectionMatchSet, perform the following steps: +// +// Submit a CreateSqlInjectionMatchSet request. Use GetChangeToken to get +// the change token that you provide in the ChangeToken parameter of an UpdateIPSet +// request. Submit an UpdateSqlInjectionMatchSet request to specify the parts +// of web requests that you want AWS WAF to inspect for snippets of SQL code. +// For more information about how to use the AWS WAF API to allow or block +// HTTP requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +func (c *WAF) UpdateSqlInjectionMatchSet(input *UpdateSqlInjectionMatchSetInput) (*UpdateSqlInjectionMatchSetOutput, error) { + req, out := c.UpdateSqlInjectionMatchSetRequest(input) + err := req.Send() + return out, err +} + +const opUpdateWebACL = "UpdateWebACL" + +// UpdateWebACLRequest generates a request for the UpdateWebACL operation. +func (c *WAF) UpdateWebACLRequest(input *UpdateWebACLInput) (req *request.Request, output *UpdateWebACLOutput) { + op := &request.Operation{ + Name: opUpdateWebACL, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateWebACLInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateWebACLOutput{} + req.Data = output + return +} + +// Inserts or deletes ActivatedRule objects in a WebACL. Each Rule identifies +// web requests that you want to allow, block, or count. When you update a WebACL, +// you specify the following values: +// +// A default action for the WebACL, either ALLOW or BLOCK. AWS WAF performs +// the default action if a request doesn't match the criteria in any of the +// Rules in a WebACL. The Rules that you want to add and/or delete. If you want +// to replace one Rule with another, you delete the existing Rule and add the +// new one. For each Rule, whether you want AWS WAF to allow requests, block +// requests, or count requests that match the conditions in the Rule. The order +// in which you want AWS WAF to evaluate the Rules in a WebACL. If you add more +// than one Rule to a WebACL, AWS WAF evaluates each request against the Rules +// in order based on the value of Priority. (The Rule that has the lowest value +// for Priority is evaluated first.) When a web request matches all of the predicates +// (such as ByteMatchSets and IPSets) in a Rule, AWS WAF immediately takes the +// corresponding action, allow or block, and doesn't evaluate the request against +// the remaining Rules in the WebACL, if any. The CloudFront distribution that +// you want to associate with the WebACL. To create and configure a WebACL, +// perform the following steps: +// +// Create and update the predicates that you want to include in Rules. For +// more information, see CreateByteMatchSet, UpdateByteMatchSet, CreateIPSet, +// UpdateIPSet, CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet. +// Create and update the Rules that you want to include in the WebACL. For more +// information, see CreateRule and UpdateRule. Create a WebACL. See CreateWebACL. +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateWebACL request. Submit an UpdateWebACL request to specify +// the Rules that you want to include in the WebACL, to specify the default +// action, and to associate the WebACL with a CloudFront distribution. For +// more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +func (c *WAF) UpdateWebACL(input *UpdateWebACLInput) (*UpdateWebACLOutput, error) { + req, out := c.UpdateWebACLRequest(input) + err := req.Send() + return out, err +} + +// The ActivatedRule object in an UpdateWebACL request specifies a Rule that +// you want to insert or delete, the priority of the Rule in the WebACL, and +// the action that you want AWS WAF to take when a web request matches the Rule +// (ALLOW, BLOCK, or COUNT). +// +// To specify whether to insert or delete a Rule, use the Action parameter +// in the WebACLUpdate data type. +type ActivatedRule struct { + _ struct{} `type:"structure"` + + // Specifies the action that CloudFront or AWS WAF takes when a web request + // matches the conditions in the Rule. Valid values for Action include the following: + // + // ALLOW: CloudFront responds with the requested object. BLOCK: CloudFront + // responds with an HTTP 403 (Forbidden) status code. COUNT: AWS WAF increments + // a counter of requests that match the conditions in the rule and then continues + // to inspect the web request based on the remaining rules in the web ACL. + Action *WafAction `type:"structure" required:"true"` + + // Specifies the order in which the Rules in a WebACL are evaluated. Rules with + // a lower value for Priority are evaluated before Rules with a higher value. + // The value must be a unique integer. If you add multiple Rules to a WebACL, + // the values don't need to be consecutive. + Priority *int64 `type:"integer" required:"true"` + + // The RuleId for a Rule. You use RuleId to get more information about a Rule + // (see GetRule), update a Rule (see UpdateRule), insert a Rule into a WebACL + // or delete a one from a WebACL (see UpdateWebACL), or delete a Rule from AWS + // WAF (see DeleteRule). + // + // RuleId is returned by CreateRule and by ListRules. + RuleId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ActivatedRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivatedRule) GoString() string { + return s.String() +} + +// In a GetByteMatchSet request, ByteMatchSet is a complex type that contains +// the ByteMatchSetId and Name of a ByteMatchSet, and the values that you specified +// when you updated the ByteMatchSet. +// +// A complex type that contains ByteMatchTuple objects, which specify the parts +// of web requests that you want AWS WAF to inspect and the values that you +// want AWS WAF to search for. If a ByteMatchSet contains more than one ByteMatchTuple +// object, a request needs to match the settings in only one ByteMatchTuple +// to be considered a match. +type ByteMatchSet struct { + _ struct{} `type:"structure"` + + // The ByteMatchSetId for a ByteMatchSet. You use ByteMatchSetId to get information + // about a ByteMatchSet (see GetByteMatchSet), update a ByteMatchSet (see UpdateByteMatchSet, + // insert a ByteMatchSet into a Rule or delete one from a Rule (see UpdateRule), + // and delete a ByteMatchSet from AWS WAF (see DeleteByteMatchSet). + // + // ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets. + ByteMatchSetId *string `min:"1" type:"string" required:"true"` + + // Specifies the bytes (typically a string that corresponds with ASCII characters) + // that you want AWS WAF to search for in web requests, the location in requests + // that you want AWS WAF to search, and other settings. + ByteMatchTuples []*ByteMatchTuple `type:"list" required:"true"` + + // A friendly name or description of the ByteMatchSet. You can't change Name + // after you create a ByteMatchSet. + Name *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ByteMatchSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ByteMatchSet) GoString() string { + return s.String() +} + +// Returned by ListByteMatchSets. Each ByteMatchSetSummary object includes the +// Name and ByteMatchSetId for one ByteMatchSet. +type ByteMatchSetSummary struct { + _ struct{} `type:"structure"` + + // The ByteMatchSetId for a ByteMatchSet. You use ByteMatchSetId to get information + // about a ByteMatchSet, update a ByteMatchSet, remove a ByteMatchSet from a + // Rule, and delete a ByteMatchSet from AWS WAF. + // + // ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets. + ByteMatchSetId *string `min:"1" type:"string" required:"true"` + + // A friendly name or description of the ByteMatchSet. You can't change Name + // after you create a ByteMatchSet. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ByteMatchSetSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ByteMatchSetSummary) GoString() string { + return s.String() +} + +// In an UpdateByteMatchSet request, ByteMatchSetUpdate specifies whether to +// insert or delete a ByteMatchTuple and includes the settings for the ByteMatchTuple. +type ByteMatchSetUpdate struct { + _ struct{} `type:"structure"` + + // Specifies whether to insert or delete a ByteMatchTuple. + Action *string `type:"string" required:"true" enum:"ChangeAction"` + + // Information about the part of a web request that you want AWS WAF to inspect + // and the value that you want AWS WAF to search for. If you specify DELETE + // for the value of Action, the ByteMatchTuple values must exactly match the + // values in the ByteMatchTuple that you want to delete from the ByteMatchSet. + ByteMatchTuple *ByteMatchTuple `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ByteMatchSetUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ByteMatchSetUpdate) GoString() string { + return s.String() +} + +// The bytes (typically a string that corresponds with ASCII characters) that +// you want AWS WAF to search for in web requests, the location in requests +// that you want AWS WAF to search, and other settings. +type ByteMatchTuple struct { + _ struct{} `type:"structure"` + + // The part of a web request that you want AWS WAF to search, such as a specified + // header or a query string. For more information, see FieldToMatch. + FieldToMatch *FieldToMatch `type:"structure" required:"true"` + + // Within the portion of a web request that you want to search (for example, + // in the query string, if any), specify where you want AWS WAF to search. Valid + // values include the following: + // + // CONTAINS + // + // The specified part of the web request must include the value of TargetString, + // but the location doesn't matter. + // + // CONTAINS_WORD + // + // The specified part of the web request must include the value of TargetString, + // and TargetString must contain only alphanumeric characters or underscore + // (A-Z, a-z, 0-9, or _). In addition, TargetString must be a word, which means + // one of the following: + // + // TargetString exactly matches the value of the specified part of the web + // request, such as the value of a header. TargetString is at the beginning + // of the specified part of the web request and is followed by a character other + // than an alphanumeric character or underscore (_), for example, BadBot;. TargetString + // is at the end of the specified part of the web request and is preceded by + // a character other than an alphanumeric character or underscore (_), for example, + // ;BadBot. TargetString is in the middle of the specified part of the web request + // and is preceded and followed by characters other than alphanumeric characters + // or underscore (_), for example, -BadBot;. EXACTLY + // + // The value of the specified part of the web request must exactly match the + // value of TargetString. + // + // STARTS_WITH + // + // The value of TargetString must appear at the beginning of the specified + // part of the web request. + // + // ENDS_WITH + // + // The value of TargetString must appear at the end of the specified part of + // the web request. + PositionalConstraint *string `type:"string" required:"true" enum:"PositionalConstraint"` + + // The value that you want AWS WAF to search for. AWS WAF searches for the specified + // string in the part of web requests that you specified in FieldToMatch. The + // maximum length of the value is 50 bytes. + // + // Valid values depend on the values that you specified for FieldToMatch: + // + // HEADER: The value that you want AWS WAF to search for in the request header + // that you specified in FieldToMatch, for example, the value of the User-Agent + // or Referer header. METHOD: The HTTP method, which indicates the type of operation + // specified in the request. CloudFront supports the following methods: DELETE, + // GET, HEAD, OPTIONS, PATCH, POST, and PUT. QUERY_STRING: The value that you + // want AWS WAF to search for in the query string, which is the part of a URL + // that appears after a ? character. URI: The value that you want AWS WAF to + // search for in the part of a URL that identifies a resource, for example, + // /images/daily-ad.jpg. BODY: The part of a request that contains any additional + // data that you want to send to your web server as the HTTP request body, such + // as data from a form. The request body immediately follows the request headers. + // Note that only the first 8192 bytes of the request body are forwarded to + // AWS WAF for inspection. To allow or block requests based on the length of + // the body, you can create a size constraint set. For more information, see + // CreateSizeConstraintSet. If TargetString includes alphabetic characters + // A-Z and a-z, note that the value is case sensitive. + // + // If you're using the AWS WAF API + // + // Specify a base64-encoded version of the value. The maximum length of the + // value before you base64-encode it is 50 bytes. + // + // For example, suppose the value of Type is HEADER and the value of Data is + // User-Agent. If you want to search the User-Agent header for the value BadBot, + // you base64-encode BadBot using MIME base64 encoding and include the resulting + // value, QmFkQm90, in the value of TargetString. + // + // If you're using the AWS CLI or one of the AWS SDKs + // + // The value that you want AWS WAF to search for. The SDK automatically base64 + // encodes the value. + TargetString []byte `type:"blob" required:"true"` + + // Text transformations eliminate some of the unusual formatting that attackers + // use in web requests in an effort to bypass AWS WAF. If you specify a transformation, + // AWS WAF performs the transformation on TargetString before inspecting a request + // for a match. + // + // CMD_LINE + // + // When you're concerned that attackers are injecting an operating system commandline + // command and using unusual formatting to disguise some or all of the command, + // use this option to perform the following transformations: + // + // Delete the following characters: \ " ' ^ Delete spaces before the following + // characters: / ( Replace the following characters with a space: , ; Replace + // multiple spaces with one space Convert uppercase letters (A-Z) to lowercase + // (a-z) COMPRESS_WHITE_SPACE + // + // Use this option to replace the following characters with a space character + // (decimal 32): + // + // \f, formfeed, decimal 12 \t, tab, decimal 9 \n, newline, decimal 10 \r, + // carriage return, decimal 13 \v, vertical tab, decimal 11 non-breaking space, + // decimal 160 COMPRESS_WHITE_SPACE also replaces multiple spaces with one + // space. + // + // HTML_ENTITY_DECODE + // + // Use this option to replace HTML-encoded characters with unencoded characters. + // HTML_ENTITY_DECODE performs the following operations: + // + // Replaces (ampersand)quot; with " Replaces (ampersand)nbsp; with a non-breaking + // space, decimal 160 Replaces (ampersand)lt; with a "less than" symbol Replaces + // (ampersand)gt; with > Replaces characters that are represented in hexadecimal + // format, (ampersand)#xhhhh;, with the corresponding characters Replaces characters + // that are represented in decimal format, (ampersand)#nnnn;, with the corresponding + // characters LOWERCASE + // + // Use this option to convert uppercase letters (A-Z) to lowercase (a-z). + // + // URL_DECODE + // + // Use this option to decode a URL-encoded value. + // + // NONE + // + // Specify NONE if you don't want to perform any text transformations. + TextTransformation *string `type:"string" required:"true" enum:"TextTransformation"` +} + +// String returns the string representation +func (s ByteMatchTuple) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ByteMatchTuple) GoString() string { + return s.String() +} + +type CreateByteMatchSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // A friendly name or description of the ByteMatchSet. You can't change Name + // after you create a ByteMatchSet. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateByteMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateByteMatchSetInput) GoString() string { + return s.String() +} + +type CreateByteMatchSetOutput struct { + _ struct{} `type:"structure"` + + // A ByteMatchSet that contains no ByteMatchTuple objects. + ByteMatchSet *ByteMatchSet `type:"structure"` + + // The ChangeToken that you used to submit the CreateByteMatchSet request. You + // can also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s CreateByteMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateByteMatchSetOutput) GoString() string { + return s.String() +} + +type CreateIPSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // A friendly name or description of the IPSet. You can't change Name after + // you create the IPSet. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateIPSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateIPSetInput) GoString() string { + return s.String() +} + +type CreateIPSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the CreateIPSet request. You can + // also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `type:"string"` + + // The IPSet returned in the CreateIPSet response. + IPSet *IPSet `type:"structure"` +} + +// String returns the string representation +func (s CreateIPSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateIPSetOutput) GoString() string { + return s.String() +} + +type CreateRuleInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // A friendly name or description for the metrics for this Rule. The name can + // contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain + // whitespace. You can't change the name of the metric after you create the + // Rule. + MetricName *string `type:"string" required:"true"` + + // A friendly name or description of the Rule. You can't change the name of + // a Rule after you create it. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRuleInput) GoString() string { + return s.String() +} + +type CreateRuleOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the CreateRule request. You can also + // use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `type:"string"` + + // The Rule returned in the CreateRule response. + Rule *Rule `type:"structure"` +} + +// String returns the string representation +func (s CreateRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRuleOutput) GoString() string { + return s.String() +} + +type CreateSizeConstraintSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // A friendly name or description of the SizeConstraintSet. You can't change + // Name after you create a SizeConstraintSet. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateSizeConstraintSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSizeConstraintSetInput) GoString() string { + return s.String() +} + +type CreateSizeConstraintSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the CreateSizeConstraintSet request. + // You can also use this value to query the status of the request. For more + // information, see GetChangeTokenStatus. + ChangeToken *string `type:"string"` + + // A SizeConstraintSet that contains no SizeConstraint objects. + SizeConstraintSet *SizeConstraintSet `type:"structure"` +} + +// String returns the string representation +func (s CreateSizeConstraintSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSizeConstraintSetOutput) GoString() string { + return s.String() +} + +// A request to create a SqlInjectionMatchSet. +type CreateSqlInjectionMatchSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // A friendly name or description for the SqlInjectionMatchSet that you're creating. + // You can't change Name after you create the SqlInjectionMatchSet. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateSqlInjectionMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSqlInjectionMatchSetInput) GoString() string { + return s.String() +} + +// The response to a CreateSqlInjectionMatchSet request. +type CreateSqlInjectionMatchSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the CreateSqlInjectionMatchSet request. + // You can also use this value to query the status of the request. For more + // information, see GetChangeTokenStatus. + ChangeToken *string `type:"string"` + + // A SqlInjectionMatchSet. + SqlInjectionMatchSet *SqlInjectionMatchSet `type:"structure"` +} + +// String returns the string representation +func (s CreateSqlInjectionMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSqlInjectionMatchSetOutput) GoString() string { + return s.String() +} + +type CreateWebACLInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // The action that you want AWS WAF to take when a request doesn't match the + // criteria specified in any of the Rule objects that are associated with the + // WebACL. + DefaultAction *WafAction `type:"structure" required:"true"` + + // A friendly name or description for the metrics for this WebACL. The name + // can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't + // contain whitespace. You can't change MetricName after you create the WebACL. + MetricName *string `type:"string" required:"true"` + + // A friendly name or description of the WebACL. You can't change Name after + // you create the WebACL. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateWebACLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateWebACLInput) GoString() string { + return s.String() +} + +type CreateWebACLOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the CreateWebACL request. You can + // also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `type:"string"` + + // The WebACL returned in the CreateWebACL response. + WebACL *WebACL `type:"structure"` +} + +// String returns the string representation +func (s CreateWebACLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateWebACLOutput) GoString() string { + return s.String() +} + +type DeleteByteMatchSetInput struct { + _ struct{} `type:"structure"` + + // The ByteMatchSetId of the ByteMatchSet that you want to delete. ByteMatchSetId + // is returned by CreateByteMatchSet and by ListByteMatchSets. + ByteMatchSetId *string `min:"1" type:"string" required:"true"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteByteMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteByteMatchSetInput) GoString() string { + return s.String() +} + +type DeleteByteMatchSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the DeleteByteMatchSet request. You + // can also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s DeleteByteMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteByteMatchSetOutput) GoString() string { + return s.String() +} + +type DeleteIPSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // The IPSetId of the IPSet that you want to delete. IPSetId is returned by + // CreateIPSet and by ListIPSets. + IPSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteIPSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIPSetInput) GoString() string { + return s.String() +} + +type DeleteIPSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the DeleteIPSet request. You can + // also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s DeleteIPSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIPSetOutput) GoString() string { + return s.String() +} + +type DeleteRuleInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // The RuleId of the Rule that you want to delete. RuleId is returned by CreateRule + // and by ListRules. + RuleId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRuleInput) GoString() string { + return s.String() +} + +type DeleteRuleOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the DeleteRule request. You can also + // use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s DeleteRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRuleOutput) GoString() string { + return s.String() +} + +type DeleteSizeConstraintSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // The SizeConstraintSetId of the SizeConstraintSet that you want to delete. + // SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets. + SizeConstraintSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSizeConstraintSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSizeConstraintSetInput) GoString() string { + return s.String() +} + +type DeleteSizeConstraintSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the DeleteSizeConstraintSet request. + // You can also use this value to query the status of the request. For more + // information, see GetChangeTokenStatus. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s DeleteSizeConstraintSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSizeConstraintSetOutput) GoString() string { + return s.String() +} + +// A request to delete a SqlInjectionMatchSet from AWS WAF. +type DeleteSqlInjectionMatchSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // The SqlInjectionMatchSetId of the SqlInjectionMatchSet that you want to delete. + // SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets. + SqlInjectionMatchSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSqlInjectionMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSqlInjectionMatchSetInput) GoString() string { + return s.String() +} + +// The response to a request to delete a SqlInjectionMatchSet from AWS WAF. +type DeleteSqlInjectionMatchSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the DeleteSqlInjectionMatchSet request. + // You can also use this value to query the status of the request. For more + // information, see GetChangeTokenStatus. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s DeleteSqlInjectionMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSqlInjectionMatchSetOutput) GoString() string { + return s.String() +} + +type DeleteWebACLInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // The WebACLId of the WebACL that you want to delete. WebACLId is returned + // by CreateWebACL and by ListWebACLs. + WebACLId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteWebACLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteWebACLInput) GoString() string { + return s.String() +} + +type DeleteWebACLOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the DeleteWebACL request. You can + // also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s DeleteWebACLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteWebACLOutput) GoString() string { + return s.String() +} + +// Specifies where in a web request to look for TargetString. +type FieldToMatch struct { + _ struct{} `type:"structure"` + + // When the value of Type is HEADER, enter the name of the header that you want + // AWS WAF to search, for example, User-Agent or Referer. If the value of Type + // is any other value, omit Data. + // + // The name of the header is not case sensitive. + Data *string `type:"string"` + + // The part of the web request that you want AWS WAF to search for a specified + // string. Parts of a request that you can search include the following: + // + // HEADER: A specified request header, for example, the value of the User-Agent + // or Referer header. If you choose HEADER for the type, specify the name of + // the header in Data. METHOD: The HTTP method, which indicated the type of + // operation that the request is asking the origin to perform. Amazon CloudFront + // supports the following methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, + // and PUT. QUERY_STRING: A query string, which is the part of a URL that appears + // after a ? character, if any. URI: The part of a web request that identifies + // a resource, for example, /images/daily-ad.jpg. BODY: The part of a request + // that contains any additional data that you want to send to your web server + // as the HTTP request body, such as data from a form. The request body immediately + // follows the request headers. Note that only the first 8192 bytes of the request + // body are forwarded to AWS WAF for inspection. To allow or block requests + // based on the length of the body, you can create a size constraint set. For + // more information, see CreateSizeConstraintSet. + Type *string `type:"string" required:"true" enum:"MatchFieldType"` +} + +// String returns the string representation +func (s FieldToMatch) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FieldToMatch) GoString() string { + return s.String() +} + +type GetByteMatchSetInput struct { + _ struct{} `type:"structure"` + + // The ByteMatchSetId of the ByteMatchSet that you want to get. ByteMatchSetId + // is returned by CreateByteMatchSet and by ListByteMatchSets. + ByteMatchSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetByteMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetByteMatchSetInput) GoString() string { + return s.String() +} + +type GetByteMatchSetOutput struct { + _ struct{} `type:"structure"` + + // Information about the ByteMatchSet that you specified in the GetByteMatchSet + // request. For more information, see the following topics: + // + // ByteMatchSet: Contains ByteMatchSetId, ByteMatchTuples, and Name ByteMatchTuples: + // Contains an array of ByteMatchTuple objects. Each ByteMatchTuple object contains + // FieldToMatch, PositionalConstraint, TargetString, and TextTransformation + // FieldToMatch: Contains Data and Type + ByteMatchSet *ByteMatchSet `type:"structure"` +} + +// String returns the string representation +func (s GetByteMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetByteMatchSetOutput) GoString() string { + return s.String() +} + +type GetChangeTokenInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetChangeTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeTokenInput) GoString() string { + return s.String() +} + +type GetChangeTokenOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used in the request. Use this value in a GetChangeTokenStatus + // request to get the current status of the request. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s GetChangeTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeTokenOutput) GoString() string { + return s.String() +} + +type GetChangeTokenStatusInput struct { + _ struct{} `type:"structure"` + + // The change token for which you want to get the status. This change token + // was previously returned in the GetChangeToken response. + ChangeToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetChangeTokenStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeTokenStatusInput) GoString() string { + return s.String() +} + +type GetChangeTokenStatusOutput struct { + _ struct{} `type:"structure"` + + // The status of the change token. + ChangeTokenStatus *string `type:"string" enum:"ChangeTokenStatus"` +} + +// String returns the string representation +func (s GetChangeTokenStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeTokenStatusOutput) GoString() string { + return s.String() +} + +type GetIPSetInput struct { + _ struct{} `type:"structure"` + + // The IPSetId of the IPSet that you want to get. IPSetId is returned by CreateIPSet + // and by ListIPSets. + IPSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetIPSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIPSetInput) GoString() string { + return s.String() +} + +type GetIPSetOutput struct { + _ struct{} `type:"structure"` + + // Information about the IPSet that you specified in the GetIPSet request. For + // more information, see the following topics: + // + // IPSet: Contains IPSetDescriptors, IPSetId, and Name IPSetDescriptors: Contains + // an array of IPSetDescriptor objects. Each IPSetDescriptor object contains + // Type and Value + IPSet *IPSet `type:"structure"` +} + +// String returns the string representation +func (s GetIPSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIPSetOutput) GoString() string { + return s.String() +} + +type GetRuleInput struct { + _ struct{} `type:"structure"` + + // The RuleId of the Rule that you want to get. RuleId is returned by CreateRule + // and by ListRules. + RuleId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRuleInput) GoString() string { + return s.String() +} + +type GetRuleOutput struct { + _ struct{} `type:"structure"` + + // Information about the Rule that you specified in the GetRule request. For + // more information, see the following topics: + // + // Rule: Contains MetricName, Name, an array of Predicate objects, and RuleId + // Predicate: Each Predicate object contains DataId, Negated, and Type + Rule *Rule `type:"structure"` +} + +// String returns the string representation +func (s GetRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRuleOutput) GoString() string { + return s.String() +} + +type GetSampledRequestsInput struct { + _ struct{} `type:"structure"` + + // The number of requests that you want AWS WAF to return from among the first + // 5,000 requests that your AWS resource received during the time range. If + // your resource received fewer requests than the value of MaxItems, GetSampledRequests + // returns information about all of them. + MaxItems *int64 `min:"1" type:"long" required:"true"` + + // RuleId is one of two values: + // + // The RuleId of the Rule for which you want GetSampledRequests to return + // a sample of requests. Default_Action, which causes GetSampledRequests to + // return a sample of the requests that didn't match any of the rules in the + // specified WebACL. + RuleId *string `min:"1" type:"string" required:"true"` + + // The start date and time and the end date and time of the range for which + // you want GetSampledRequests to return a sample of requests. Specify the date + // and time in Unix time format (in seconds). You can specify any time range + // in the previous three hours. + TimeWindow *TimeWindow `type:"structure" required:"true"` + + // The WebACLId of the WebACL for which you want GetSampledRequests to return + // a sample of requests. + WebAclId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetSampledRequestsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSampledRequestsInput) GoString() string { + return s.String() +} + +type GetSampledRequestsOutput struct { + _ struct{} `type:"structure"` + + // The total number of requests from which GetSampledRequests got a sample of + // MaxItems requests. If PopulationSize is less than MaxItems, the sample includes + // every request that your AWS resource received during the specified time range. + PopulationSize *int64 `type:"long"` + + // A complex type that contains detailed information about each of the requests + // in the sample. + SampledRequests []*SampledHTTPRequest `type:"list"` + + // Usually, TimeWindow is the time range that you specified in the GetSampledRequests + // request. However, if your AWS resource received more than 5,000 requests + // during the time range that you specified in the request, GetSampledRequests + // returns the time range for the first 5,000 requests. + TimeWindow *TimeWindow `type:"structure"` +} + +// String returns the string representation +func (s GetSampledRequestsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSampledRequestsOutput) GoString() string { + return s.String() +} + +type GetSizeConstraintSetInput struct { + _ struct{} `type:"structure"` + + // The SizeConstraintSetId of the SizeConstraintSet that you want to get. SizeConstraintSetId + // is returned by CreateSizeConstraintSet and by ListSizeConstraintSets. + SizeConstraintSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetSizeConstraintSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSizeConstraintSetInput) GoString() string { + return s.String() +} + +type GetSizeConstraintSetOutput struct { + _ struct{} `type:"structure"` + + // Information about the SizeConstraintSet that you specified in the GetSizeConstraintSet + // request. For more information, see the following topics: + // + // SizeConstraintSet: Contains SizeConstraintSetId, SizeConstraints, and Name + // SizeConstraints: Contains an array of SizeConstraint objects. Each SizeConstraint + // object contains FieldToMatch, TextTransformation, ComparisonOperator, and + // Size FieldToMatch: Contains Data and Type + SizeConstraintSet *SizeConstraintSet `type:"structure"` +} + +// String returns the string representation +func (s GetSizeConstraintSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSizeConstraintSetOutput) GoString() string { + return s.String() +} + +// A request to get a SqlInjectionMatchSet. +type GetSqlInjectionMatchSetInput struct { + _ struct{} `type:"structure"` + + // The SqlInjectionMatchSetId of the SqlInjectionMatchSet that you want to get. + // SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets. + SqlInjectionMatchSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetSqlInjectionMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSqlInjectionMatchSetInput) GoString() string { + return s.String() +} + +// The response to a GetSqlInjectionMatchSet request. +type GetSqlInjectionMatchSetOutput struct { + _ struct{} `type:"structure"` + + // Information about the SqlInjectionMatchSet that you specified in the GetSqlInjectionMatchSet + // request. For more information, see the following topics: + // + // SqlInjectionMatchSet: Contains Name, SqlInjectionMatchSetId, and an array + // of SqlInjectionMatchTuple objects SqlInjectionMatchTuple: Each SqlInjectionMatchTuple + // object contains FieldToMatch and TextTransformation FieldToMatch: Contains + // Data and Type + SqlInjectionMatchSet *SqlInjectionMatchSet `type:"structure"` +} + +// String returns the string representation +func (s GetSqlInjectionMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSqlInjectionMatchSetOutput) GoString() string { + return s.String() +} + +type GetWebACLInput struct { + _ struct{} `type:"structure"` + + // The WebACLId of the WebACL that you want to get. WebACLId is returned by + // CreateWebACL and by ListWebACLs. + WebACLId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetWebACLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetWebACLInput) GoString() string { + return s.String() +} + +type GetWebACLOutput struct { + _ struct{} `type:"structure"` + + // Information about the WebACL that you specified in the GetWebACL request. + // For more information, see the following topics: + // + // WebACL: Contains DefaultAction, MetricName, Name, an array of Rule objects, + // and WebACLId DefaultAction (Data type is WafAction): Contains Type Rules: + // Contains an array of ActivatedRule objects, which contain Action, Priority, + // and RuleId Action: Contains Type + WebACL *WebACL `type:"structure"` +} + +// String returns the string representation +func (s GetWebACLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetWebACLOutput) GoString() string { + return s.String() +} + +// The response from a GetSampledRequests request includes an HTTPHeader complex +// type that appears as Headers in the response syntax. HTTPHeader contains +// the names and values of all of the headers that appear in one of the web +// requests that were returned by GetSampledRequests. +type HTTPHeader struct { + _ struct{} `type:"structure"` + + // The name of one of the headers in the sampled web request. + Name *string `type:"string"` + + // The value of one of the headers in the sampled web request. + Value *string `type:"string"` +} + +// String returns the string representation +func (s HTTPHeader) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HTTPHeader) GoString() string { + return s.String() +} + +// The response from a GetSampledRequests request includes an HTTPRequest complex +// type that appears as Request in the response syntax. HTTPRequest contains +// information about one of the web requests that were returned by GetSampledRequests. +type HTTPRequest struct { + _ struct{} `type:"structure"` + + // The IP address that the request originated from. If the WebACL is associated + // with a CloudFront distribution, this is the value of one of the following + // fields in CloudFront access logs: + // + // c-ip, if the viewer did not use an HTTP proxy or a load balancer to send + // the request x-forwarded-for, if the viewer did use an HTTP proxy or a load + // balancer to send the request + ClientIP *string `type:"string"` + + // The two-letter country code for the country that the request originated from. + // For a current list of country codes, see the Wikipedia entry ISO 3166-1 alpha-2 + // (https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2). + Country *string `type:"string"` + + // The HTTP version specified in the sampled web request, for example, HTTP/1.1. + HTTPVersion *string `type:"string"` + + // A complex type that contains two values for each header in the sampled web + // request: the name of the header and the value of the header. + Headers []*HTTPHeader `type:"list"` + + // The HTTP method specified in the sampled web request. CloudFront supports + // the following methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, and PUT. + Method *string `type:"string"` + + // The part of a web request that identifies the resource, for example, /images/daily-ad.jpg. + URI *string `type:"string"` +} + +// String returns the string representation +func (s HTTPRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HTTPRequest) GoString() string { + return s.String() +} + +// Contains one or more IP addresses or blocks of IP addresses specified in +// Classless Inter-Domain Routing (CIDR) notation. To specify an individual +// IP address, you specify the four-part IP address followed by a /32, for example, +// 192.0.2.0/31. To block a range of IP addresses, you can specify a /24, a +// /16, or a /8 CIDR. For more information about CIDR notation, perform an Internet +// search on cidr notation. +type IPSet struct { + _ struct{} `type:"structure"` + + // The IP address type (IPV4) and the IP address range (in CIDR notation) that + // web requests originate from. If the WebACL is associated with a CloudFront + // distribution, this is the value of one of the following fields in CloudFront + // access logs: + // + // c-ip, if the viewer did not use an HTTP proxy or a load balancer to send + // the request x-forwarded-for, if the viewer did use an HTTP proxy or a load + // balancer to send the request + IPSetDescriptors []*IPSetDescriptor `type:"list" required:"true"` + + // The IPSetId for an IPSet. You use IPSetId to get information about an IPSet + // (see GetIPSet), update an IPSet (see UpdateIPSet), insert an IPSet into a + // Rule or delete one from a Rule (see UpdateRule), and delete an IPSet from + // AWS WAF (see DeleteIPSet). + // + // IPSetId is returned by CreateIPSet and by ListIPSets. + IPSetId *string `min:"1" type:"string" required:"true"` + + // A friendly name or description of the IPSet. You can't change the name of + // an IPSet after you create it. + Name *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s IPSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IPSet) GoString() string { + return s.String() +} + +// Specifies the IP address type (IPV4) and the IP address range (in CIDR format) +// that web requests originate from. +type IPSetDescriptor struct { + _ struct{} `type:"structure"` + + // Specify IPV4. + Type *string `type:"string" required:"true" enum:"IPSetDescriptorType"` + + // Specify an IPv4 address by using CIDR notation. For example: + // + // To configure AWS WAF to allow, block, or count requests that originated + // from the IP address 192.0.2.44, specify 192.0.2.44/32. To configure AWS WAF + // to allow, block, or count requests that originated from IP addresses from + // 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24. AWS WAF supports only /8, + // /16, /24, and /32 IP addresses. + // + // For more information about CIDR notation, see the Wikipedia entry Classless + // Inter-Domain Routing (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s IPSetDescriptor) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IPSetDescriptor) GoString() string { + return s.String() +} + +// Contains the identifier and the name of the IPSet. +type IPSetSummary struct { + _ struct{} `type:"structure"` + + // The IPSetId for an IPSet. You can use IPSetId in a GetIPSet request to get + // detailed information about an IPSet. + IPSetId *string `min:"1" type:"string" required:"true"` + + // A friendly name or description of the IPSet. You can't change the name of + // an IPSet after you create it. + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s IPSetSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IPSetSummary) GoString() string { + return s.String() +} + +// Specifies the type of update to perform to an IPSet with UpdateIPSet. +type IPSetUpdate struct { + _ struct{} `type:"structure"` + + // Specifies whether to insert or delete an IP address with UpdateIPSet. + Action *string `type:"string" required:"true" enum:"ChangeAction"` + + // The IP address type (IPV4) and the IP address range (in CIDR notation) that + // web requests originate from. + IPSetDescriptor *IPSetDescriptor `type:"structure" required:"true"` +} + +// String returns the string representation +func (s IPSetUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IPSetUpdate) GoString() string { + return s.String() +} + +type ListByteMatchSetsInput struct { + _ struct{} `type:"structure"` + + // Specifies the number of ByteMatchSet objects that you want AWS WAF to return + // for this request. If you have more ByteMatchSets objects than the number + // you specify for Limit, the response includes a NextMarker value that you + // can use to get another batch of ByteMatchSet objects. + Limit *int64 `min:"1" type:"integer" required:"true"` + + // If you specify a value for Limit and you have more ByteMatchSets than the + // value of Limit, AWS WAF returns a NextMarker value in the response that allows + // you to list another group of ByteMatchSets. For the second and subsequent + // ListByteMatchSets requests, specify the value of NextMarker from the previous + // response to get information about another batch of ByteMatchSets. + NextMarker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListByteMatchSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListByteMatchSetsInput) GoString() string { + return s.String() +} + +type ListByteMatchSetsOutput struct { + _ struct{} `type:"structure"` + + // An array of ByteMatchSetSummary objects. + ByteMatchSets []*ByteMatchSetSummary `type:"list"` + + // If you have more ByteMatchSet objects than the number that you specified + // for Limit in the request, the response includes a NextMarker value. To list + // more ByteMatchSet objects, submit another ListByteMatchSets request, and + // specify the NextMarker value from the response in the NextMarker value in + // the next request. + NextMarker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListByteMatchSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListByteMatchSetsOutput) GoString() string { + return s.String() +} + +type ListIPSetsInput struct { + _ struct{} `type:"structure"` + + // Specifies the number of IPSet objects that you want AWS WAF to return for + // this request. If you have more IPSet objects than the number you specify + // for Limit, the response includes a NextMarker value that you can use to get + // another batch of IPSet objects. + Limit *int64 `min:"1" type:"integer" required:"true"` + + // If you specify a value for Limit and you have more IPSets than the value + // of Limit, AWS WAF returns a NextMarker value in the response that allows + // you to list another group of IPSets. For the second and subsequent ListIPSets + // requests, specify the value of NextMarker from the previous response to get + // information about another batch of ByteMatchSets. + NextMarker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListIPSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIPSetsInput) GoString() string { + return s.String() +} + +type ListIPSetsOutput struct { + _ struct{} `type:"structure"` + + // An array of IPSetSummary objects. + IPSets []*IPSetSummary `type:"list"` + + // If you have more IPSet objects than the number that you specified for Limit + // in the request, the response includes a NextMarker value. To list more IPSet + // objects, submit another ListIPSets request, and specify the NextMarker value + // from the response in the NextMarker value in the next request. + NextMarker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListIPSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIPSetsOutput) GoString() string { + return s.String() +} + +type ListRulesInput struct { + _ struct{} `type:"structure"` + + // Specifies the number of Rules that you want AWS WAF to return for this request. + // If you have more Rules than the number that you specify for Limit, the response + // includes a NextMarker value that you can use to get another batch of Rules. + Limit *int64 `min:"1" type:"integer" required:"true"` + + // If you specify a value for Limit and you have more Rules than the value of + // Limit, AWS WAF returns a NextMarker value in the response that allows you + // to list another group of Rules. For the second and subsequent ListRules requests, + // specify the value of NextMarker from the previous response to get information + // about another batch of Rules. + NextMarker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListRulesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRulesInput) GoString() string { + return s.String() +} + +type ListRulesOutput struct { + _ struct{} `type:"structure"` + + // If you have more Rules than the number that you specified for Limit in the + // request, the response includes a NextMarker value. To list more Rules, submit + // another ListRules request, and specify the NextMarker value from the response + // in the NextMarker value in the next request. + NextMarker *string `min:"1" type:"string"` + + // An array of RuleSummary objects. + Rules []*RuleSummary `type:"list"` +} + +// String returns the string representation +func (s ListRulesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRulesOutput) GoString() string { + return s.String() +} + +type ListSizeConstraintSetsInput struct { + _ struct{} `type:"structure"` + + // Specifies the number of SizeConstraintSet objects that you want AWS WAF to + // return for this request. If you have more SizeConstraintSets objects than + // the number you specify for Limit, the response includes a NextMarker value + // that you can use to get another batch of SizeConstraintSet objects. + Limit *int64 `min:"1" type:"integer" required:"true"` + + // If you specify a value for Limit and you have more SizeConstraintSets than + // the value of Limit, AWS WAF returns a NextMarker value in the response that + // allows you to list another group of SizeConstraintSets. For the second and + // subsequent ListSizeConstraintSets requests, specify the value of NextMarker + // from the previous response to get information about another batch of SizeConstraintSets. + NextMarker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListSizeConstraintSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSizeConstraintSetsInput) GoString() string { + return s.String() +} + +type ListSizeConstraintSetsOutput struct { + _ struct{} `type:"structure"` + + // If you have more SizeConstraintSet objects than the number that you specified + // for Limit in the request, the response includes a NextMarker value. To list + // more SizeConstraintSet objects, submit another ListSizeConstraintSets request, + // and specify the NextMarker value from the response in the NextMarker value + // in the next request. + NextMarker *string `min:"1" type:"string"` + + // An array of SizeConstraintSetSummary objects. + SizeConstraintSets []*SizeConstraintSetSummary `type:"list"` +} + +// String returns the string representation +func (s ListSizeConstraintSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSizeConstraintSetsOutput) GoString() string { + return s.String() +} + +// A request to list the SqlInjectionMatchSet objects created by the current +// AWS account. +type ListSqlInjectionMatchSetsInput struct { + _ struct{} `type:"structure"` + + // Specifies the number of SqlInjectionMatchSet objects that you want AWS WAF + // to return for this request. If you have more SqlInjectionMatchSet objects + // than the number you specify for Limit, the response includes a NextMarker + // value that you can use to get another batch of Rules. + Limit *int64 `min:"1" type:"integer" required:"true"` + + // If you specify a value for Limit and you have more SqlInjectionMatchSet objects + // than the value of Limit, AWS WAF returns a NextMarker value in the response + // that allows you to list another group of SqlInjectionMatchSets. For the second + // and subsequent ListSqlInjectionMatchSets requests, specify the value of NextMarker + // from the previous response to get information about another batch of SqlInjectionMatchSets. + NextMarker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListSqlInjectionMatchSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSqlInjectionMatchSetsInput) GoString() string { + return s.String() +} + +// The response to a ListSqlInjectionMatchSets request. +type ListSqlInjectionMatchSetsOutput struct { + _ struct{} `type:"structure"` + + // If you have more SqlInjectionMatchSet objects than the number that you specified + // for Limit in the request, the response includes a NextMarker value. To list + // more SqlInjectionMatchSet objects, submit another ListSqlInjectionMatchSets + // request, and specify the NextMarker value from the response in the NextMarker + // value in the next request. + NextMarker *string `min:"1" type:"string"` + + // An array of SqlInjectionMatchSetSummary objects. + SqlInjectionMatchSets []*SqlInjectionMatchSetSummary `type:"list"` +} + +// String returns the string representation +func (s ListSqlInjectionMatchSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSqlInjectionMatchSetsOutput) GoString() string { + return s.String() +} + +type ListWebACLsInput struct { + _ struct{} `type:"structure"` + + // Specifies the number of WebACL objects that you want AWS WAF to return for + // this request. If you have more WebACL objects than the number that you specify + // for Limit, the response includes a NextMarker value that you can use to get + // another batch of WebACL objects. + Limit *int64 `min:"1" type:"integer" required:"true"` + + // If you specify a value for Limit and you have more WebACL objects than the + // number that you specify for Limit, AWS WAF returns a NextMarker value in + // the response that allows you to list another group of WebACL objects. For + // the second and subsequent ListWebACLs requests, specify the value of NextMarker + // from the previous response to get information about another batch of WebACL + // objects. + NextMarker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListWebACLsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListWebACLsInput) GoString() string { + return s.String() +} + +type ListWebACLsOutput struct { + _ struct{} `type:"structure"` + + // If you have more WebACL objects than the number that you specified for Limit + // in the request, the response includes a NextMarker value. To list more WebACL + // objects, submit another ListWebACLs request, and specify the NextMarker value + // from the response in the NextMarker value in the next request. + NextMarker *string `min:"1" type:"string"` + + // An array of WebACLSummary objects. + WebACLs []*WebACLSummary `type:"list"` +} + +// String returns the string representation +func (s ListWebACLsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListWebACLsOutput) GoString() string { + return s.String() +} + +// Specifies the ByteMatchSet, IPSet, and SqlInjectionMatchSet objects that +// you want to add to a Rule and, for each object, indicates whether you want +// to negate the settings, for example, requests that do NOT originate from +// the IP address 192.0.2.44. +type Predicate struct { + _ struct{} `type:"structure"` + + // A unique identifier for a predicate in a Rule, such as ByteMatchSetId or + // IPSetId. The ID is returned by the corresponding Create or List command. + DataId *string `min:"1" type:"string" required:"true"` + + // Set Negated to False if you want AWS WAF to allow, block, or count requests + // based on the settings in the specified ByteMatchSet, IPSet, or SqlInjectionMatchSet. + // For example, if an IPSet includes the IP address 192.0.2.44, AWS WAF will + // allow or block requests based on that IP address. + // + // Set Negated to True if you want AWS WAF to allow or block a request based + // on the negation of the settings in the ByteMatchSet, IPSet, or SqlInjectionMatchSet. + // For example, if an IPSet includes the IP address 192.0.2.44, AWS WAF will + // allow, block, or count requests based on all IP addresses except 192.0.2.44. + Negated *bool `type:"boolean" required:"true"` + + // The type of predicate in a Rule, such as ByteMatchSet or IPSet. + Type *string `type:"string" required:"true" enum:"PredicateType"` +} + +// String returns the string representation +func (s Predicate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Predicate) GoString() string { + return s.String() +} + +// A combination of ByteMatchSet, IPSet, and/or SqlInjectionMatchSet objects +// that identify the web requests that you want to allow, block, or count. For +// example, you might create a Rule that includes the following predicates: +// +// An IPSet that causes AWS WAF to search for web requests that originate +// from the IP address 192.0.2.44 A ByteMatchSet that causes AWS WAF to search +// for web requests for which the value of the User-Agent header is BadBot. +// To match the settings in this Rule, a request must originate from 192.0.2.44 +// AND include a User-Agent header for which the value is BadBot. +type Rule struct { + _ struct{} `type:"structure"` + + MetricName *string `type:"string"` + + // The friendly name or description for the Rule. You can't change the name + // of a Rule after you create it. + Name *string `min:"1" type:"string"` + + // The Predicates object contains one Predicate element for each ByteMatchSet, + // IPSet, or SqlInjectionMatchSet object that you want to include in a Rule. + Predicates []*Predicate `type:"list" required:"true"` + + // A unique identifier for a Rule. You use RuleId to get more information about + // a Rule (see GetRule), update a Rule (see UpdateRule), insert a Rule into + // a WebACL or delete a one from a WebACL (see UpdateWebACL), or delete a Rule + // from AWS WAF (see DeleteRule). + // + // RuleId is returned by CreateRule and by ListRules. + RuleId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Rule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Rule) GoString() string { + return s.String() +} + +// Contains the identifier and the friendly name or description of the Rule. +type RuleSummary struct { + _ struct{} `type:"structure"` + + // A friendly name or description of the Rule. You can't change the name of + // a Rule after you create it. + Name *string `min:"1" type:"string" required:"true"` + + // A unique identifier for a Rule. You use RuleId to get more information about + // a Rule (see GetRule), update a Rule (see UpdateRule), insert a Rule into + // a WebACL or delete one from a WebACL (see UpdateWebACL), or delete a Rule + // from AWS WAF (see DeleteRule). + // + // RuleId is returned by CreateRule and by ListRules. + RuleId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RuleSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RuleSummary) GoString() string { + return s.String() +} + +// Specifies a Predicate (such as an IPSet) and indicates whether you want to +// add it to a Rule or delete it from a Rule. +type RuleUpdate struct { + _ struct{} `type:"structure"` + + // Specify INSERT to add a Predicate to a Rule. Use DELETE to remove a Predicate + // from a Rule. + Action *string `type:"string" required:"true" enum:"ChangeAction"` + + // The ID of the Predicate (such as an IPSet) that you want to add to a Rule. + Predicate *Predicate `type:"structure" required:"true"` +} + +// String returns the string representation +func (s RuleUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RuleUpdate) GoString() string { + return s.String() +} + +// The response from a GetSampledRequests request includes a SampledHTTPRequests +// complex type that appears as SampledRequests in the response syntax. SampledHTTPRequests +// contains one SampledHTTPRequest object for each web request that is returned +// by GetSampledRequests. +type SampledHTTPRequest struct { + _ struct{} `type:"structure"` + + // The action for the Rule that the request matched: ALLOW, BLOCK, or COUNT. + Action *string `type:"string"` + + // A complex type that contains detailed information about the request. + Request *HTTPRequest `type:"structure" required:"true"` + + // The time at which AWS WAF received the request from your AWS resource, in + // Unix time format (in seconds). + Timestamp *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A value that indicates how one result in the response relates proportionally + // to other results in the response. A result that has a weight of 2 represents + // roughly twice as many CloudFront web requests as a result that has a weight + // of 1. + Weight *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s SampledHTTPRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SampledHTTPRequest) GoString() string { + return s.String() +} + +// Specifies a constraint on the size of a part of the web request. AWS WAF +// uses the Size, ComparisonOperator, and FieldToMatch to build an expression +// in the form of "Size ComparisonOperator size in bytes of FieldToMatch". If +// that expression is true, the SizeConstraint is considered to match. +type SizeConstraint struct { + _ struct{} `type:"structure"` + + // The type of comparison you want AWS WAF to perform. AWS WAF uses this in + // combination with the provided Size and FieldToMatch to build an expression + // in the form of "Size ComparisonOperator size in bytes of FieldToMatch". If + // that expression is true, the SizeConstraint is considered to match. + // + // EQ: Used to test if the Size is equal to the size of the FieldToMatch + // + // NE: Used to test if the Size is not equal to the size of the FieldToMatch + // + // LE: Used to test if the Size is less than or equal to the size of the FieldToMatch + // + // LT: Used to test if the Size is strictly less than the size of the FieldToMatch + // + // GE: Used to test if the Size is greater than or equal to the size of the + // FieldToMatch + // + // GT: Used to test if the Size is strictly greater than the size of the FieldToMatch + ComparisonOperator *string `type:"string" required:"true" enum:"ComparisonOperator"` + + // Specifies where in a web request to look for TargetString. + FieldToMatch *FieldToMatch `type:"structure" required:"true"` + + // The size in bytes that you want AWS WAF to compare against the size of the + // specified FieldToMatch. AWS WAF uses this in combination with ComparisonOperator + // and FieldToMatch to build an expression in the form of "Size ComparisonOperator + // size in bytes of FieldToMatch". If that expression is true, the SizeConstraint + // is considered to match. + // + // Valid values for size are 0 - 21474836480 bytes (0 - 20 GB). + // + // If you specify URI for the value of Type, the / in the URI counts as one + // character. For example, the URI /logo.jpg is nine characters long. + Size *int64 `type:"long" required:"true"` + + // Text transformations eliminate some of the unusual formatting that attackers + // use in web requests in an effort to bypass AWS WAF. If you specify a transformation, + // AWS WAF performs the transformation on FieldToMatch before inspecting a request + // for a match. + // + // Note that if you choose BODY for the value of Type, you must choose NONE + // for TextTransformation because CloudFront forwards only the first 8192 bytes + // for inspection. + // + // NONE + // + // Specify NONE if you don't want to perform any text transformations. + // + // CMD_LINE + // + // When you're concerned that attackers are injecting an operating system command + // line command and using unusual formatting to disguise some or all of the + // command, use this option to perform the following transformations: + // + // Delete the following characters: \ " ' ^ Delete spaces before the following + // characters: / ( Replace the following characters with a space: , ; Replace + // multiple spaces with one space Convert uppercase letters (A-Z) to lowercase + // (a-z) COMPRESS_WHITE_SPACE + // + // Use this option to replace the following characters with a space character + // (decimal 32): + // + // \f, formfeed, decimal 12 \t, tab, decimal 9 \n, newline, decimal 10 \r, + // carriage return, decimal 13 \v, vertical tab, decimal 11 non-breaking space, + // decimal 160 COMPRESS_WHITE_SPACE also replaces multiple spaces with one + // space. + // + // HTML_ENTITY_DECODE + // + // Use this option to replace HTML-encoded characters with unencoded characters. + // HTML_ENTITY_DECODE performs the following operations: + // + // Replaces (ampersand)quot; with " Replaces (ampersand)nbsp; with a non-breaking + // space, decimal 160 Replaces (ampersand)lt; with a "less than" symbol Replaces + // (ampersand)gt; with > Replaces characters that are represented in hexadecimal + // format, (ampersand)#xhhhh;, with the corresponding characters Replaces characters + // that are represented in decimal format, (ampersand)#nnnn;, with the corresponding + // characters LOWERCASE + // + // Use this option to convert uppercase letters (A-Z) to lowercase (a-z). + // + // URL_DECODE + // + // Use this option to decode a URL-encoded value. + TextTransformation *string `type:"string" required:"true" enum:"TextTransformation"` +} + +// String returns the string representation +func (s SizeConstraint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SizeConstraint) GoString() string { + return s.String() +} + +// A complex type that contains SizeConstraint objects, which specify the parts +// of web requests that you want AWS WAF to inspect the size of. If a SizeConstraintSet +// contains more than one SizeConstraint object, a request only needs to match +// one constraint to be considered a match. +type SizeConstraintSet struct { + _ struct{} `type:"structure"` + + // The name, if any, of the SizeConstraintSet. + Name *string `min:"1" type:"string"` + + // A unique identifier for a SizeConstraintSet. You use SizeConstraintSetId + // to get information about a SizeConstraintSet (see GetSizeConstraintSet), + // update a SizeConstraintSet (see UpdateSizeConstraintSet, insert a SizeConstraintSet + // into a Rule or delete one from a Rule (see UpdateRule), and delete a SizeConstraintSet + // from AWS WAF (see DeleteSizeConstraintSet). + // + // SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets. + SizeConstraintSetId *string `min:"1" type:"string" required:"true"` + + // Specifies the parts of web requests that you want to inspect the size of. + SizeConstraints []*SizeConstraint `type:"list" required:"true"` +} + +// String returns the string representation +func (s SizeConstraintSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SizeConstraintSet) GoString() string { + return s.String() +} + +// The Id and Name of a SizeConstraintSet. +type SizeConstraintSetSummary struct { + _ struct{} `type:"structure"` + + // The name of the SizeConstraintSet, if any. + Name *string `min:"1" type:"string" required:"true"` + + // A unique identifier for a SizeConstraintSet. You use SizeConstraintSetId + // to get information about a SizeConstraintSet (see GetSizeConstraintSet), + // update a SizeConstraintSet (see UpdateSizeConstraintSet, insert a SizeConstraintSet + // into a Rule or delete one from a Rule (see UpdateRule), and delete a SizeConstraintSet + // from AWS WAF (see DeleteSizeConstraintSet). + // + // SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets. + SizeConstraintSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SizeConstraintSetSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SizeConstraintSetSummary) GoString() string { + return s.String() +} + +// Specifies the part of a web request that you want to inspect the size of +// and indicates whether you want to add the specification to a SizeConstraintSet +// or delete it from a SizeConstraintSet. +type SizeConstraintSetUpdate struct { + _ struct{} `type:"structure"` + + // Specify INSERT to add a SizeConstraintSetUpdate to a SizeConstraintSet. Use + // DELETE to remove a SizeConstraintSetUpdate from a SizeConstraintSet. + Action *string `type:"string" required:"true" enum:"ChangeAction"` + + // Specifies a constraint on the size of a part of the web request. AWS WAF + // uses the Size, ComparisonOperator, and FieldToMatch to build an expression + // in the form of "Size ComparisonOperator size in bytes of FieldToMatch". If + // that expression is true, the SizeConstraint is considered to match. + SizeConstraint *SizeConstraint `type:"structure" required:"true"` +} + +// String returns the string representation +func (s SizeConstraintSetUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SizeConstraintSetUpdate) GoString() string { + return s.String() +} + +// A complex type that contains SqlInjectionMatchTuple objects, which specify +// the parts of web requests that you want AWS WAF to inspect for snippets of +// malicious SQL code and, if you want AWS WAF to inspect a header, the name +// of the header. If a SqlInjectionMatchSet contains more than one SqlInjectionMatchTuple +// object, a request needs to include snippets of SQL code in only one of the +// specified parts of the request to be considered a match. +type SqlInjectionMatchSet struct { + _ struct{} `type:"structure"` + + // The name, if any, of the SqlInjectionMatchSet. + Name *string `min:"1" type:"string"` + + // A unique identifier for a SqlInjectionMatchSet. You use SqlInjectionMatchSetId + // to get information about a SqlInjectionMatchSet (see GetSqlInjectionMatchSet), + // update a SqlInjectionMatchSet (see UpdateSqlInjectionMatchSet, insert a SqlInjectionMatchSet + // into a Rule or delete one from a Rule (see UpdateRule), and delete a SqlInjectionMatchSet + // from AWS WAF (see DeleteSqlInjectionMatchSet). + // + // SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by + // ListSqlInjectionMatchSets. + SqlInjectionMatchSetId *string `min:"1" type:"string" required:"true"` + + // Specifies the parts of web requests that you want to inspect for snippets + // of malicious SQL code. + SqlInjectionMatchTuples []*SqlInjectionMatchTuple `type:"list" required:"true"` +} + +// String returns the string representation +func (s SqlInjectionMatchSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SqlInjectionMatchSet) GoString() string { + return s.String() +} + +// The Id and Name of a SqlInjectionMatchSet. +type SqlInjectionMatchSetSummary struct { + _ struct{} `type:"structure"` + + // The name of the SqlInjectionMatchSet, if any, specified by Id. + Name *string `min:"1" type:"string" required:"true"` + + // A unique identifier for a SqlInjectionMatchSet. You use SqlInjectionMatchSetId + // to get information about a SqlInjectionMatchSet (see GetSqlInjectionMatchSet), + // update a SqlInjectionMatchSet (see UpdateSqlInjectionMatchSet, insert a SqlInjectionMatchSet + // into a Rule or delete one from a Rule (see UpdateRule), and delete a SqlInjectionMatchSet + // from AWS WAF (see DeleteSqlInjectionMatchSet). + // + // SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by + // ListSqlInjectionMatchSets. + SqlInjectionMatchSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SqlInjectionMatchSetSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SqlInjectionMatchSetSummary) GoString() string { + return s.String() +} + +// Specifies the part of a web request that you want to inspect for snippets +// of malicious SQL code and indicates whether you want to add the specification +// to a SqlInjectionMatchSet or delete it from a SqlInjectionMatchSet. +type SqlInjectionMatchSetUpdate struct { + _ struct{} `type:"structure"` + + // Specify INSERT to add a SqlInjectionMatchSetUpdate to a SqlInjectionMatchSet. + // Use DELETE to remove a SqlInjectionMatchSetUpdate from a SqlInjectionMatchSet. + Action *string `type:"string" required:"true" enum:"ChangeAction"` + + // Specifies the part of a web request that you want AWS WAF to inspect for + // snippets of malicious SQL code and, if you want AWS WAF to inspect a header, + // the name of the header. + SqlInjectionMatchTuple *SqlInjectionMatchTuple `type:"structure" required:"true"` +} + +// String returns the string representation +func (s SqlInjectionMatchSetUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SqlInjectionMatchSetUpdate) GoString() string { + return s.String() +} + +// Specifies the part of a web request that you want AWS WAF to inspect for +// snippets of malicious SQL code and, if you want AWS WAF to inspect a header, +// the name of the header. +type SqlInjectionMatchTuple struct { + _ struct{} `type:"structure"` + + // Specifies where in a web request to look for TargetString. + FieldToMatch *FieldToMatch `type:"structure" required:"true"` + + // Text transformations eliminate some of the unusual formatting that attackers + // use in web requests in an effort to bypass AWS WAF. If you specify a transformation, + // AWS WAF performs the transformation on FieldToMatch before inspecting a request + // for a match. + // + // CMD_LINE + // + // When you're concerned that attackers are injecting an operating system commandline + // command and using unusual formatting to disguise some or all of the command, + // use this option to perform the following transformations: + // + // Delete the following characters: \ " ' ^ Delete spaces before the following + // characters: / ( Replace the following characters with a space: , ; Replace + // multiple spaces with one space Convert uppercase letters (A-Z) to lowercase + // (a-z) COMPRESS_WHITE_SPACE + // + // Use this option to replace the following characters with a space character + // (decimal 32): + // + // \f, formfeed, decimal 12 \t, tab, decimal 9 \n, newline, decimal 10 \r, + // carriage return, decimal 13 \v, vertical tab, decimal 11 non-breaking space, + // decimal 160 COMPRESS_WHITE_SPACE also replaces multiple spaces with one + // space. + // + // HTML_ENTITY_DECODE + // + // Use this option to replace HTML-encoded characters with unencoded characters. + // HTML_ENTITY_DECODE performs the following operations: + // + // Replaces (ampersand)quot; with " Replaces (ampersand)nbsp; with a non-breaking + // space, decimal 160 Replaces (ampersand)lt; with a "less than" symbol Replaces + // (ampersand)gt; with > Replaces characters that are represented in hexadecimal + // format, (ampersand)#xhhhh;, with the corresponding characters Replaces characters + // that are represented in decimal format, (ampersand)#nnnn;, with the corresponding + // characters LOWERCASE + // + // Use this option to convert uppercase letters (A-Z) to lowercase (a-z). + // + // URL_DECODE + // + // Use this option to decode a URL-encoded value. + // + // NONE + // + // Specify NONE if you don't want to perform any text transformations. + TextTransformation *string `type:"string" required:"true" enum:"TextTransformation"` +} + +// String returns the string representation +func (s SqlInjectionMatchTuple) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SqlInjectionMatchTuple) GoString() string { + return s.String() +} + +// In a GetSampledRequests request, the StartTime and EndTime objects specify +// the time range for which you want AWS WAF to return a sample of web requests. +// +// In a GetSampledRequests response, the StartTime and EndTime objects specify +// the time range for which AWS WAF actually returned a sample of web requests. +// AWS WAF gets the specified number of requests from among the first 5,000 +// requests that your AWS resource receives during the specified time period. +// If your resource receives more than 5,000 requests during that period, AWS +// WAF stops sampling after the 5,000th request. In that case, EndTime is the +// time that AWS WAF received the 5,000th request. +type TimeWindow struct { + _ struct{} `type:"structure"` + + // The end of the time range from which you want GetSampledRequests to return + // a sample of the requests that your AWS resource received. You can specify + // any time range in the previous three hours. + EndTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // The beginning of the time range from which you want GetSampledRequests to + // return a sample of the requests that your AWS resource received. You can + // specify any time range in the previous three hours. + StartTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` +} + +// String returns the string representation +func (s TimeWindow) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimeWindow) GoString() string { + return s.String() +} + +type UpdateByteMatchSetInput struct { + _ struct{} `type:"structure"` + + // The ByteMatchSetId of the ByteMatchSet that you want to update. ByteMatchSetId + // is returned by CreateByteMatchSet and by ListByteMatchSets. + ByteMatchSetId *string `min:"1" type:"string" required:"true"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // An array of ByteMatchSetUpdate objects that you want to insert into or delete + // from a ByteMatchSet. For more information, see the applicable data types: + // + // ByteMatchSetUpdate: Contains Action and ByteMatchTuple ByteMatchTuple: + // Contains FieldToMatch, PositionalConstraint, TargetString, and TextTransformation + // FieldToMatch: Contains Data and Type + Updates []*ByteMatchSetUpdate `type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateByteMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateByteMatchSetInput) GoString() string { + return s.String() +} + +type UpdateByteMatchSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the UpdateByteMatchSet request. You + // can also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s UpdateByteMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateByteMatchSetOutput) GoString() string { + return s.String() +} + +type UpdateIPSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // The IPSetId of the IPSet that you want to update. IPSetId is returned by + // CreateIPSet and by ListIPSets. + IPSetId *string `min:"1" type:"string" required:"true"` + + // An array of IPSetUpdate objects that you want to insert into or delete from + // an IPSet. For more information, see the applicable data types: + // + // IPSetUpdate: Contains Action and IPSetDescriptor IPSetDescriptor: Contains + // Type and Value + Updates []*IPSetUpdate `type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateIPSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateIPSetInput) GoString() string { + return s.String() +} + +type UpdateIPSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the UpdateIPSet request. You can + // also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s UpdateIPSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateIPSetOutput) GoString() string { + return s.String() +} + +type UpdateRuleInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // The RuleId of the Rule that you want to update. RuleId is returned by CreateRule + // and by ListRules. + RuleId *string `min:"1" type:"string" required:"true"` + + // An array of RuleUpdate objects that you want to insert into or delete from + // a Rule. For more information, see the applicable data types: + // + // RuleUpdate: Contains Action and Predicate Predicate: Contains DataId, Negated, + // and Type FieldToMatch: Contains Data and Type + Updates []*RuleUpdate `type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRuleInput) GoString() string { + return s.String() +} + +type UpdateRuleOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the UpdateRule request. You can also + // use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s UpdateRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRuleOutput) GoString() string { + return s.String() +} + +type UpdateSizeConstraintSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // The SizeConstraintSetId of the SizeConstraintSet that you want to update. + // SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets. + SizeConstraintSetId *string `min:"1" type:"string" required:"true"` + + // An array of SizeConstraintSetUpdate objects that you want to insert into + // or delete from a SizeConstraintSet. For more information, see the applicable + // data types: + // + // SizeConstraintSetUpdate: Contains Action and SizeConstraint SizeConstraint: + // Contains FieldToMatch, TextTransformation, ComparisonOperator, and Size FieldToMatch: + // Contains Data and Type + Updates []*SizeConstraintSetUpdate `type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateSizeConstraintSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSizeConstraintSetInput) GoString() string { + return s.String() +} + +type UpdateSizeConstraintSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the UpdateSizeConstraintSet request. + // You can also use this value to query the status of the request. For more + // information, see GetChangeTokenStatus. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s UpdateSizeConstraintSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSizeConstraintSetOutput) GoString() string { + return s.String() +} + +// A request to update a SqlInjectionMatchSet. +type UpdateSqlInjectionMatchSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // The SqlInjectionMatchSetId of the SqlInjectionMatchSet that you want to update. + // SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets. + SqlInjectionMatchSetId *string `min:"1" type:"string" required:"true"` + + // An array of SqlInjectionMatchSetUpdate objects that you want to insert into + // or delete from a SqlInjectionMatchSet. For more information, see the applicable + // data types: + // + // SqlInjectionMatchSetUpdate: Contains Action and SqlInjectionMatchTuple + // SqlInjectionMatchTuple: Contains FieldToMatch and TextTransformation FieldToMatch: + // Contains Data and Type + Updates []*SqlInjectionMatchSetUpdate `type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateSqlInjectionMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSqlInjectionMatchSetInput) GoString() string { + return s.String() +} + +// The response to an UpdateSqlInjectionMatchSets request. +type UpdateSqlInjectionMatchSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the UpdateSqlInjectionMatchSet request. + // You can also use this value to query the status of the request. For more + // information, see GetChangeTokenStatus. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s UpdateSqlInjectionMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSqlInjectionMatchSetOutput) GoString() string { + return s.String() +} + +type UpdateWebACLInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + ChangeToken *string `type:"string" required:"true"` + + // For the action that is associated with a rule in a WebACL, specifies the + // action that you want AWS WAF to perform when a web request matches all of + // the conditions in a rule. For the default action in a WebACL, specifies the + // action that you want AWS WAF to take when a web request doesn't match all + // of the conditions in any of the rules in a WebACL. + DefaultAction *WafAction `type:"structure"` + + // An array of updates to make to the WebACL. + // + // An array of WebACLUpdate objects that you want to insert into or delete + // from a WebACL. For more information, see the applicable data types: + // + // WebACLUpdate: Contains Action and ActivatedRule ActivatedRule: Contains + // Action, Priority, and RuleId WafAction: Contains Type + Updates []*WebACLUpdate `type:"list"` + + // The WebACLId of the WebACL that you want to update. WebACLId is returned + // by CreateWebACL and by ListWebACLs. + WebACLId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateWebACLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateWebACLInput) GoString() string { + return s.String() +} + +type UpdateWebACLOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the UpdateWebACL request. You can + // also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `type:"string"` +} + +// String returns the string representation +func (s UpdateWebACLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateWebACLOutput) GoString() string { + return s.String() +} + +// For the action that is associated with a rule in a WebACL, specifies the +// action that you want AWS WAF to perform when a web request matches all of +// the conditions in a rule. For the default action in a WebACL, specifies the +// action that you want AWS WAF to take when a web request doesn't match all +// of the conditions in any of the rules in a WebACL. +type WafAction struct { + _ struct{} `type:"structure"` + + // Specifies how you want AWS WAF to respond to requests that match the settings + // in a Rule. Valid settings include the following: + // + // ALLOW: AWS WAF allows requests BLOCK: AWS WAF blocks requests COUNT: AWS + // WAF increments a counter of the requests that match all of the conditions + // in the rule. AWS WAF then continues to inspect the web request based on the + // remaining rules in the web ACL. You can't specify COUNT for the default action + // for a WebACL. + Type *string `type:"string" required:"true" enum:"WafActionType"` +} + +// String returns the string representation +func (s WafAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WafAction) GoString() string { + return s.String() +} + +// Contains the Rules that identify the requests that you want to allow, block, +// or count. In a WebACL, you also specify a default action (ALLOW or BLOCK), +// and the action for each Rule that you add to a WebACL, for example, block +// requests from specified IP addresses or block requests from specified referrers. +// You also associate the WebACL with a CloudFront distribution to identify +// the requests that you want AWS WAF to filter. If you add more than one Rule +// to a WebACL, a request needs to match only one of the specifications to be +// allowed, blocked, or counted. For more information, see UpdateWebACL. +type WebACL struct { + _ struct{} `type:"structure"` + + // The action to perform if none of the Rules contained in the WebACL match. + // The action is specified by the WafAction object. + DefaultAction *WafAction `type:"structure" required:"true"` + + MetricName *string `type:"string"` + + // A friendly name or description of the WebACL. You can't change the name of + // a WebACL after you create it. + Name *string `min:"1" type:"string"` + + // An array that contains the action for each Rule in a WebACL, the priority + // of the Rule, and the ID of the Rule. + Rules []*ActivatedRule `type:"list" required:"true"` + + // A unique identifier for a WebACL. You use WebACLId to get information about + // a WebACL (see GetWebACL), update a WebACL (see UpdateWebACL, and delete a + // WebACL from AWS WAF (see DeleteWebACL). + // + // WebACLId is returned by CreateWebACL and by ListWebACLs. + WebACLId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s WebACL) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WebACL) GoString() string { + return s.String() +} + +// Contains the identifier and the name or description of the WebACL. +type WebACLSummary struct { + _ struct{} `type:"structure"` + + // A friendly name or description of the WebACL. You can't change the name of + // a WebACL after you create it. + Name *string `min:"1" type:"string" required:"true"` + + // A unique identifier for a WebACL. You use WebACLId to get information about + // a WebACL (see GetWebACL), update a WebACL (see UpdateWebACL, and delete a + // WebACL from AWS WAF (see DeleteWebACL). + // + // WebACLId is returned by CreateWebACL and by ListWebACLs. + WebACLId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s WebACLSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WebACLSummary) GoString() string { + return s.String() +} + +// Specifies whether to insert a Rule into or delete a Rule from a WebACL. +type WebACLUpdate struct { + _ struct{} `type:"structure"` + + // Specifies whether to insert a Rule into or delete a Rule from a WebACL. + Action *string `type:"string" required:"true" enum:"ChangeAction"` + + // The ActivatedRule object in an UpdateWebACL request specifies a Rule that + // you want to insert or delete, the priority of the Rule in the WebACL, and + // the action that you want AWS WAF to take when a web request matches the Rule + // (ALLOW, BLOCK, or COUNT). + // + // To specify whether to insert or delete a Rule, use the Action parameter + // in the WebACLUpdate data type. + ActivatedRule *ActivatedRule `type:"structure" required:"true"` +} + +// String returns the string representation +func (s WebACLUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WebACLUpdate) GoString() string { + return s.String() +} + +const ( + // @enum ChangeAction + ChangeActionInsert = "INSERT" + // @enum ChangeAction + ChangeActionDelete = "DELETE" +) + +const ( + // @enum ChangeTokenStatus + ChangeTokenStatusProvisioned = "PROVISIONED" + // @enum ChangeTokenStatus + ChangeTokenStatusPending = "PENDING" + // @enum ChangeTokenStatus + ChangeTokenStatusInsync = "INSYNC" +) + +const ( + // @enum ComparisonOperator + ComparisonOperatorEq = "EQ" + // @enum ComparisonOperator + ComparisonOperatorNe = "NE" + // @enum ComparisonOperator + ComparisonOperatorLe = "LE" + // @enum ComparisonOperator + ComparisonOperatorLt = "LT" + // @enum ComparisonOperator + ComparisonOperatorGe = "GE" + // @enum ComparisonOperator + ComparisonOperatorGt = "GT" +) + +const ( + // @enum IPSetDescriptorType + IPSetDescriptorTypeIpv4 = "IPV4" +) + +const ( + // @enum MatchFieldType + MatchFieldTypeUri = "URI" + // @enum MatchFieldType + MatchFieldTypeQueryString = "QUERY_STRING" + // @enum MatchFieldType + MatchFieldTypeHeader = "HEADER" + // @enum MatchFieldType + MatchFieldTypeMethod = "METHOD" + // @enum MatchFieldType + MatchFieldTypeBody = "BODY" +) + +const ( + // @enum ParameterExceptionField + ParameterExceptionFieldChangeAction = "CHANGE_ACTION" + // @enum ParameterExceptionField + ParameterExceptionFieldWafAction = "WAF_ACTION" + // @enum ParameterExceptionField + ParameterExceptionFieldPredicateType = "PREDICATE_TYPE" + // @enum ParameterExceptionField + ParameterExceptionFieldIpsetType = "IPSET_TYPE" + // @enum ParameterExceptionField + ParameterExceptionFieldByteMatchFieldType = "BYTE_MATCH_FIELD_TYPE" + // @enum ParameterExceptionField + ParameterExceptionFieldSqlInjectionMatchFieldType = "SQL_INJECTION_MATCH_FIELD_TYPE" + // @enum ParameterExceptionField + ParameterExceptionFieldByteMatchTextTransformation = "BYTE_MATCH_TEXT_TRANSFORMATION" + // @enum ParameterExceptionField + ParameterExceptionFieldByteMatchPositionalConstraint = "BYTE_MATCH_POSITIONAL_CONSTRAINT" + // @enum ParameterExceptionField + ParameterExceptionFieldSizeConstraintComparisonOperator = "SIZE_CONSTRAINT_COMPARISON_OPERATOR" +) + +const ( + // @enum ParameterExceptionReason + ParameterExceptionReasonInvalidOption = "INVALID_OPTION" + // @enum ParameterExceptionReason + ParameterExceptionReasonIllegalCombination = "ILLEGAL_COMBINATION" +) + +const ( + // @enum PositionalConstraint + PositionalConstraintExactly = "EXACTLY" + // @enum PositionalConstraint + PositionalConstraintStartsWith = "STARTS_WITH" + // @enum PositionalConstraint + PositionalConstraintEndsWith = "ENDS_WITH" + // @enum PositionalConstraint + PositionalConstraintContains = "CONTAINS" + // @enum PositionalConstraint + PositionalConstraintContainsWord = "CONTAINS_WORD" +) + +const ( + // @enum PredicateType + PredicateTypeIpmatch = "IPMatch" + // @enum PredicateType + PredicateTypeByteMatch = "ByteMatch" + // @enum PredicateType + PredicateTypeSqlInjectionMatch = "SqlInjectionMatch" + // @enum PredicateType + PredicateTypeSizeConstraint = "SizeConstraint" +) + +const ( + // @enum TextTransformation + TextTransformationNone = "NONE" + // @enum TextTransformation + TextTransformationCompressWhiteSpace = "COMPRESS_WHITE_SPACE" + // @enum TextTransformation + TextTransformationHtmlEntityDecode = "HTML_ENTITY_DECODE" + // @enum TextTransformation + TextTransformationLowercase = "LOWERCASE" + // @enum TextTransformation + TextTransformationCmdLine = "CMD_LINE" + // @enum TextTransformation + TextTransformationUrlDecode = "URL_DECODE" +) + +const ( + // @enum WafActionType + WafActionTypeBlock = "BLOCK" + // @enum WafActionType + WafActionTypeAllow = "ALLOW" + // @enum WafActionType + WafActionTypeCount = "COUNT" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/waf/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/waf/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/waf/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/waf/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,756 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package waf_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/waf" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleWAF_CreateByteMatchSet() { + svc := waf.New(session.New()) + + params := &waf.CreateByteMatchSetInput{ + ChangeToken: aws.String("ChangeToken"), // Required + Name: aws.String("ResourceName"), // Required + } + resp, err := svc.CreateByteMatchSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_CreateIPSet() { + svc := waf.New(session.New()) + + params := &waf.CreateIPSetInput{ + ChangeToken: aws.String("ChangeToken"), // Required + Name: aws.String("ResourceName"), // Required + } + resp, err := svc.CreateIPSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_CreateRule() { + svc := waf.New(session.New()) + + params := &waf.CreateRuleInput{ + ChangeToken: aws.String("ChangeToken"), // Required + MetricName: aws.String("MetricName"), // Required + Name: aws.String("ResourceName"), // Required + } + resp, err := svc.CreateRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_CreateSizeConstraintSet() { + svc := waf.New(session.New()) + + params := &waf.CreateSizeConstraintSetInput{ + ChangeToken: aws.String("ChangeToken"), // Required + Name: aws.String("ResourceName"), // Required + } + resp, err := svc.CreateSizeConstraintSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_CreateSqlInjectionMatchSet() { + svc := waf.New(session.New()) + + params := &waf.CreateSqlInjectionMatchSetInput{ + ChangeToken: aws.String("ChangeToken"), // Required + Name: aws.String("ResourceName"), // Required + } + resp, err := svc.CreateSqlInjectionMatchSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_CreateWebACL() { + svc := waf.New(session.New()) + + params := &waf.CreateWebACLInput{ + ChangeToken: aws.String("ChangeToken"), // Required + DefaultAction: &waf.WafAction{ // Required + Type: aws.String("WafActionType"), // Required + }, + MetricName: aws.String("MetricName"), // Required + Name: aws.String("ResourceName"), // Required + } + resp, err := svc.CreateWebACL(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_DeleteByteMatchSet() { + svc := waf.New(session.New()) + + params := &waf.DeleteByteMatchSetInput{ + ByteMatchSetId: aws.String("ResourceId"), // Required + ChangeToken: aws.String("ChangeToken"), // Required + } + resp, err := svc.DeleteByteMatchSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_DeleteIPSet() { + svc := waf.New(session.New()) + + params := &waf.DeleteIPSetInput{ + ChangeToken: aws.String("ChangeToken"), // Required + IPSetId: aws.String("ResourceId"), // Required + } + resp, err := svc.DeleteIPSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_DeleteRule() { + svc := waf.New(session.New()) + + params := &waf.DeleteRuleInput{ + ChangeToken: aws.String("ChangeToken"), // Required + RuleId: aws.String("ResourceId"), // Required + } + resp, err := svc.DeleteRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_DeleteSizeConstraintSet() { + svc := waf.New(session.New()) + + params := &waf.DeleteSizeConstraintSetInput{ + ChangeToken: aws.String("ChangeToken"), // Required + SizeConstraintSetId: aws.String("ResourceId"), // Required + } + resp, err := svc.DeleteSizeConstraintSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_DeleteSqlInjectionMatchSet() { + svc := waf.New(session.New()) + + params := &waf.DeleteSqlInjectionMatchSetInput{ + ChangeToken: aws.String("ChangeToken"), // Required + SqlInjectionMatchSetId: aws.String("ResourceId"), // Required + } + resp, err := svc.DeleteSqlInjectionMatchSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_DeleteWebACL() { + svc := waf.New(session.New()) + + params := &waf.DeleteWebACLInput{ + ChangeToken: aws.String("ChangeToken"), // Required + WebACLId: aws.String("ResourceId"), // Required + } + resp, err := svc.DeleteWebACL(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_GetByteMatchSet() { + svc := waf.New(session.New()) + + params := &waf.GetByteMatchSetInput{ + ByteMatchSetId: aws.String("ResourceId"), // Required + } + resp, err := svc.GetByteMatchSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_GetChangeToken() { + svc := waf.New(session.New()) + + var params *waf.GetChangeTokenInput + resp, err := svc.GetChangeToken(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_GetChangeTokenStatus() { + svc := waf.New(session.New()) + + params := &waf.GetChangeTokenStatusInput{ + ChangeToken: aws.String("ChangeToken"), // Required + } + resp, err := svc.GetChangeTokenStatus(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_GetIPSet() { + svc := waf.New(session.New()) + + params := &waf.GetIPSetInput{ + IPSetId: aws.String("ResourceId"), // Required + } + resp, err := svc.GetIPSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_GetRule() { + svc := waf.New(session.New()) + + params := &waf.GetRuleInput{ + RuleId: aws.String("ResourceId"), // Required + } + resp, err := svc.GetRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_GetSampledRequests() { + svc := waf.New(session.New()) + + params := &waf.GetSampledRequestsInput{ + MaxItems: aws.Int64(1), // Required + RuleId: aws.String("ResourceId"), // Required + TimeWindow: &waf.TimeWindow{ // Required + EndTime: aws.Time(time.Now()), // Required + StartTime: aws.Time(time.Now()), // Required + }, + WebAclId: aws.String("ResourceId"), // Required + } + resp, err := svc.GetSampledRequests(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_GetSizeConstraintSet() { + svc := waf.New(session.New()) + + params := &waf.GetSizeConstraintSetInput{ + SizeConstraintSetId: aws.String("ResourceId"), // Required + } + resp, err := svc.GetSizeConstraintSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_GetSqlInjectionMatchSet() { + svc := waf.New(session.New()) + + params := &waf.GetSqlInjectionMatchSetInput{ + SqlInjectionMatchSetId: aws.String("ResourceId"), // Required + } + resp, err := svc.GetSqlInjectionMatchSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_GetWebACL() { + svc := waf.New(session.New()) + + params := &waf.GetWebACLInput{ + WebACLId: aws.String("ResourceId"), // Required + } + resp, err := svc.GetWebACL(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_ListByteMatchSets() { + svc := waf.New(session.New()) + + params := &waf.ListByteMatchSetsInput{ + Limit: aws.Int64(1), // Required + NextMarker: aws.String("NextMarker"), + } + resp, err := svc.ListByteMatchSets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_ListIPSets() { + svc := waf.New(session.New()) + + params := &waf.ListIPSetsInput{ + Limit: aws.Int64(1), // Required + NextMarker: aws.String("NextMarker"), + } + resp, err := svc.ListIPSets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_ListRules() { + svc := waf.New(session.New()) + + params := &waf.ListRulesInput{ + Limit: aws.Int64(1), // Required + NextMarker: aws.String("NextMarker"), + } + resp, err := svc.ListRules(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_ListSizeConstraintSets() { + svc := waf.New(session.New()) + + params := &waf.ListSizeConstraintSetsInput{ + Limit: aws.Int64(1), // Required + NextMarker: aws.String("NextMarker"), + } + resp, err := svc.ListSizeConstraintSets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_ListSqlInjectionMatchSets() { + svc := waf.New(session.New()) + + params := &waf.ListSqlInjectionMatchSetsInput{ + Limit: aws.Int64(1), // Required + NextMarker: aws.String("NextMarker"), + } + resp, err := svc.ListSqlInjectionMatchSets(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_ListWebACLs() { + svc := waf.New(session.New()) + + params := &waf.ListWebACLsInput{ + Limit: aws.Int64(1), // Required + NextMarker: aws.String("NextMarker"), + } + resp, err := svc.ListWebACLs(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_UpdateByteMatchSet() { + svc := waf.New(session.New()) + + params := &waf.UpdateByteMatchSetInput{ + ByteMatchSetId: aws.String("ResourceId"), // Required + ChangeToken: aws.String("ChangeToken"), // Required + Updates: []*waf.ByteMatchSetUpdate{ // Required + { // Required + Action: aws.String("ChangeAction"), // Required + ByteMatchTuple: &waf.ByteMatchTuple{ // Required + FieldToMatch: &waf.FieldToMatch{ // Required + Type: aws.String("MatchFieldType"), // Required + Data: aws.String("MatchFieldData"), + }, + PositionalConstraint: aws.String("PositionalConstraint"), // Required + TargetString: []byte("PAYLOAD"), // Required + TextTransformation: aws.String("TextTransformation"), // Required + }, + }, + // More values... + }, + } + resp, err := svc.UpdateByteMatchSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_UpdateIPSet() { + svc := waf.New(session.New()) + + params := &waf.UpdateIPSetInput{ + ChangeToken: aws.String("ChangeToken"), // Required + IPSetId: aws.String("ResourceId"), // Required + Updates: []*waf.IPSetUpdate{ // Required + { // Required + Action: aws.String("ChangeAction"), // Required + IPSetDescriptor: &waf.IPSetDescriptor{ // Required + Type: aws.String("IPSetDescriptorType"), // Required + Value: aws.String("IPSetDescriptorValue"), // Required + }, + }, + // More values... + }, + } + resp, err := svc.UpdateIPSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_UpdateRule() { + svc := waf.New(session.New()) + + params := &waf.UpdateRuleInput{ + ChangeToken: aws.String("ChangeToken"), // Required + RuleId: aws.String("ResourceId"), // Required + Updates: []*waf.RuleUpdate{ // Required + { // Required + Action: aws.String("ChangeAction"), // Required + Predicate: &waf.Predicate{ // Required + DataId: aws.String("ResourceId"), // Required + Negated: aws.Bool(true), // Required + Type: aws.String("PredicateType"), // Required + }, + }, + // More values... + }, + } + resp, err := svc.UpdateRule(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_UpdateSizeConstraintSet() { + svc := waf.New(session.New()) + + params := &waf.UpdateSizeConstraintSetInput{ + ChangeToken: aws.String("ChangeToken"), // Required + SizeConstraintSetId: aws.String("ResourceId"), // Required + Updates: []*waf.SizeConstraintSetUpdate{ // Required + { // Required + Action: aws.String("ChangeAction"), // Required + SizeConstraint: &waf.SizeConstraint{ // Required + ComparisonOperator: aws.String("ComparisonOperator"), // Required + FieldToMatch: &waf.FieldToMatch{ // Required + Type: aws.String("MatchFieldType"), // Required + Data: aws.String("MatchFieldData"), + }, + Size: aws.Int64(1), // Required + TextTransformation: aws.String("TextTransformation"), // Required + }, + }, + // More values... + }, + } + resp, err := svc.UpdateSizeConstraintSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_UpdateSqlInjectionMatchSet() { + svc := waf.New(session.New()) + + params := &waf.UpdateSqlInjectionMatchSetInput{ + ChangeToken: aws.String("ChangeToken"), // Required + SqlInjectionMatchSetId: aws.String("ResourceId"), // Required + Updates: []*waf.SqlInjectionMatchSetUpdate{ // Required + { // Required + Action: aws.String("ChangeAction"), // Required + SqlInjectionMatchTuple: &waf.SqlInjectionMatchTuple{ // Required + FieldToMatch: &waf.FieldToMatch{ // Required + Type: aws.String("MatchFieldType"), // Required + Data: aws.String("MatchFieldData"), + }, + TextTransformation: aws.String("TextTransformation"), // Required + }, + }, + // More values... + }, + } + resp, err := svc.UpdateSqlInjectionMatchSet(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWAF_UpdateWebACL() { + svc := waf.New(session.New()) + + params := &waf.UpdateWebACLInput{ + ChangeToken: aws.String("ChangeToken"), // Required + WebACLId: aws.String("ResourceId"), // Required + DefaultAction: &waf.WafAction{ + Type: aws.String("WafActionType"), // Required + }, + Updates: []*waf.WebACLUpdate{ + { // Required + Action: aws.String("ChangeAction"), // Required + ActivatedRule: &waf.ActivatedRule{ // Required + Action: &waf.WafAction{ // Required + Type: aws.String("WafActionType"), // Required + }, + Priority: aws.Int64(1), // Required + RuleId: aws.String("ResourceId"), // Required + }, + }, + // More values... + }, + } + resp, err := svc.UpdateWebACL(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/waf/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/waf/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/waf/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/waf/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,91 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package waf + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// This is the AWS WAF API Reference. This guide is for developers who need +// detailed information about the AWS WAF API actions, data types, and errors. +// For detailed information about AWS WAF features and an overview of how to +// use the AWS WAF API, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type WAF struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "waf" + +// New creates a new instance of the WAF client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a WAF client from just a session. +// svc := waf.New(mySession) +// +// // Create a WAF client with additional configuration +// svc := waf.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *WAF { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *WAF { + svc := &WAF{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-08-24", + JSONVersion: "1.1", + TargetPrefix: "AWSWAF_20150824", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a WAF operation and runs any +// custom request initialization. +func (c *WAF) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/waf/wafiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/waf/wafiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/waf/wafiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/waf/wafiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,146 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package wafiface provides an interface for the AWS WAF. +package wafiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/waf" +) + +// WAFAPI is the interface type for waf.WAF. +type WAFAPI interface { + CreateByteMatchSetRequest(*waf.CreateByteMatchSetInput) (*request.Request, *waf.CreateByteMatchSetOutput) + + CreateByteMatchSet(*waf.CreateByteMatchSetInput) (*waf.CreateByteMatchSetOutput, error) + + CreateIPSetRequest(*waf.CreateIPSetInput) (*request.Request, *waf.CreateIPSetOutput) + + CreateIPSet(*waf.CreateIPSetInput) (*waf.CreateIPSetOutput, error) + + CreateRuleRequest(*waf.CreateRuleInput) (*request.Request, *waf.CreateRuleOutput) + + CreateRule(*waf.CreateRuleInput) (*waf.CreateRuleOutput, error) + + CreateSizeConstraintSetRequest(*waf.CreateSizeConstraintSetInput) (*request.Request, *waf.CreateSizeConstraintSetOutput) + + CreateSizeConstraintSet(*waf.CreateSizeConstraintSetInput) (*waf.CreateSizeConstraintSetOutput, error) + + CreateSqlInjectionMatchSetRequest(*waf.CreateSqlInjectionMatchSetInput) (*request.Request, *waf.CreateSqlInjectionMatchSetOutput) + + CreateSqlInjectionMatchSet(*waf.CreateSqlInjectionMatchSetInput) (*waf.CreateSqlInjectionMatchSetOutput, error) + + CreateWebACLRequest(*waf.CreateWebACLInput) (*request.Request, *waf.CreateWebACLOutput) + + CreateWebACL(*waf.CreateWebACLInput) (*waf.CreateWebACLOutput, error) + + DeleteByteMatchSetRequest(*waf.DeleteByteMatchSetInput) (*request.Request, *waf.DeleteByteMatchSetOutput) + + DeleteByteMatchSet(*waf.DeleteByteMatchSetInput) (*waf.DeleteByteMatchSetOutput, error) + + DeleteIPSetRequest(*waf.DeleteIPSetInput) (*request.Request, *waf.DeleteIPSetOutput) + + DeleteIPSet(*waf.DeleteIPSetInput) (*waf.DeleteIPSetOutput, error) + + DeleteRuleRequest(*waf.DeleteRuleInput) (*request.Request, *waf.DeleteRuleOutput) + + DeleteRule(*waf.DeleteRuleInput) (*waf.DeleteRuleOutput, error) + + DeleteSizeConstraintSetRequest(*waf.DeleteSizeConstraintSetInput) (*request.Request, *waf.DeleteSizeConstraintSetOutput) + + DeleteSizeConstraintSet(*waf.DeleteSizeConstraintSetInput) (*waf.DeleteSizeConstraintSetOutput, error) + + DeleteSqlInjectionMatchSetRequest(*waf.DeleteSqlInjectionMatchSetInput) (*request.Request, *waf.DeleteSqlInjectionMatchSetOutput) + + DeleteSqlInjectionMatchSet(*waf.DeleteSqlInjectionMatchSetInput) (*waf.DeleteSqlInjectionMatchSetOutput, error) + + DeleteWebACLRequest(*waf.DeleteWebACLInput) (*request.Request, *waf.DeleteWebACLOutput) + + DeleteWebACL(*waf.DeleteWebACLInput) (*waf.DeleteWebACLOutput, error) + + GetByteMatchSetRequest(*waf.GetByteMatchSetInput) (*request.Request, *waf.GetByteMatchSetOutput) + + GetByteMatchSet(*waf.GetByteMatchSetInput) (*waf.GetByteMatchSetOutput, error) + + GetChangeTokenRequest(*waf.GetChangeTokenInput) (*request.Request, *waf.GetChangeTokenOutput) + + GetChangeToken(*waf.GetChangeTokenInput) (*waf.GetChangeTokenOutput, error) + + GetChangeTokenStatusRequest(*waf.GetChangeTokenStatusInput) (*request.Request, *waf.GetChangeTokenStatusOutput) + + GetChangeTokenStatus(*waf.GetChangeTokenStatusInput) (*waf.GetChangeTokenStatusOutput, error) + + GetIPSetRequest(*waf.GetIPSetInput) (*request.Request, *waf.GetIPSetOutput) + + GetIPSet(*waf.GetIPSetInput) (*waf.GetIPSetOutput, error) + + GetRuleRequest(*waf.GetRuleInput) (*request.Request, *waf.GetRuleOutput) + + GetRule(*waf.GetRuleInput) (*waf.GetRuleOutput, error) + + GetSampledRequestsRequest(*waf.GetSampledRequestsInput) (*request.Request, *waf.GetSampledRequestsOutput) + + GetSampledRequests(*waf.GetSampledRequestsInput) (*waf.GetSampledRequestsOutput, error) + + GetSizeConstraintSetRequest(*waf.GetSizeConstraintSetInput) (*request.Request, *waf.GetSizeConstraintSetOutput) + + GetSizeConstraintSet(*waf.GetSizeConstraintSetInput) (*waf.GetSizeConstraintSetOutput, error) + + GetSqlInjectionMatchSetRequest(*waf.GetSqlInjectionMatchSetInput) (*request.Request, *waf.GetSqlInjectionMatchSetOutput) + + GetSqlInjectionMatchSet(*waf.GetSqlInjectionMatchSetInput) (*waf.GetSqlInjectionMatchSetOutput, error) + + GetWebACLRequest(*waf.GetWebACLInput) (*request.Request, *waf.GetWebACLOutput) + + GetWebACL(*waf.GetWebACLInput) (*waf.GetWebACLOutput, error) + + ListByteMatchSetsRequest(*waf.ListByteMatchSetsInput) (*request.Request, *waf.ListByteMatchSetsOutput) + + ListByteMatchSets(*waf.ListByteMatchSetsInput) (*waf.ListByteMatchSetsOutput, error) + + ListIPSetsRequest(*waf.ListIPSetsInput) (*request.Request, *waf.ListIPSetsOutput) + + ListIPSets(*waf.ListIPSetsInput) (*waf.ListIPSetsOutput, error) + + ListRulesRequest(*waf.ListRulesInput) (*request.Request, *waf.ListRulesOutput) + + ListRules(*waf.ListRulesInput) (*waf.ListRulesOutput, error) + + ListSizeConstraintSetsRequest(*waf.ListSizeConstraintSetsInput) (*request.Request, *waf.ListSizeConstraintSetsOutput) + + ListSizeConstraintSets(*waf.ListSizeConstraintSetsInput) (*waf.ListSizeConstraintSetsOutput, error) + + ListSqlInjectionMatchSetsRequest(*waf.ListSqlInjectionMatchSetsInput) (*request.Request, *waf.ListSqlInjectionMatchSetsOutput) + + ListSqlInjectionMatchSets(*waf.ListSqlInjectionMatchSetsInput) (*waf.ListSqlInjectionMatchSetsOutput, error) + + ListWebACLsRequest(*waf.ListWebACLsInput) (*request.Request, *waf.ListWebACLsOutput) + + ListWebACLs(*waf.ListWebACLsInput) (*waf.ListWebACLsOutput, error) + + UpdateByteMatchSetRequest(*waf.UpdateByteMatchSetInput) (*request.Request, *waf.UpdateByteMatchSetOutput) + + UpdateByteMatchSet(*waf.UpdateByteMatchSetInput) (*waf.UpdateByteMatchSetOutput, error) + + UpdateIPSetRequest(*waf.UpdateIPSetInput) (*request.Request, *waf.UpdateIPSetOutput) + + UpdateIPSet(*waf.UpdateIPSetInput) (*waf.UpdateIPSetOutput, error) + + UpdateRuleRequest(*waf.UpdateRuleInput) (*request.Request, *waf.UpdateRuleOutput) + + UpdateRule(*waf.UpdateRuleInput) (*waf.UpdateRuleOutput, error) + + UpdateSizeConstraintSetRequest(*waf.UpdateSizeConstraintSetInput) (*request.Request, *waf.UpdateSizeConstraintSetOutput) + + UpdateSizeConstraintSet(*waf.UpdateSizeConstraintSetInput) (*waf.UpdateSizeConstraintSetOutput, error) + + UpdateSqlInjectionMatchSetRequest(*waf.UpdateSqlInjectionMatchSetInput) (*request.Request, *waf.UpdateSqlInjectionMatchSetOutput) + + UpdateSqlInjectionMatchSet(*waf.UpdateSqlInjectionMatchSetInput) (*waf.UpdateSqlInjectionMatchSetOutput, error) + + UpdateWebACLRequest(*waf.UpdateWebACLInput) (*request.Request, *waf.UpdateWebACLOutput) + + UpdateWebACL(*waf.UpdateWebACLInput) (*waf.UpdateWebACLOutput, error) +} + +var _ WAFAPI = (*waf.WAF)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/workspaces/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/workspaces/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/workspaces/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/workspaces/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1033 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package workspaces provides a client for Amazon WorkSpaces. +package workspaces + +import ( + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opCreateWorkspaces = "CreateWorkspaces" + +// CreateWorkspacesRequest generates a request for the CreateWorkspaces operation. +func (c *WorkSpaces) CreateWorkspacesRequest(input *CreateWorkspacesInput) (req *request.Request, output *CreateWorkspacesOutput) { + op := &request.Operation{ + Name: opCreateWorkspaces, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateWorkspacesInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateWorkspacesOutput{} + req.Data = output + return +} + +// Creates one or more WorkSpaces. +// +// This operation is asynchronous and returns before the WorkSpaces are created. +func (c *WorkSpaces) CreateWorkspaces(input *CreateWorkspacesInput) (*CreateWorkspacesOutput, error) { + req, out := c.CreateWorkspacesRequest(input) + err := req.Send() + return out, err +} + +const opDescribeWorkspaceBundles = "DescribeWorkspaceBundles" + +// DescribeWorkspaceBundlesRequest generates a request for the DescribeWorkspaceBundles operation. +func (c *WorkSpaces) DescribeWorkspaceBundlesRequest(input *DescribeWorkspaceBundlesInput) (req *request.Request, output *DescribeWorkspaceBundlesOutput) { + op := &request.Operation{ + Name: opDescribeWorkspaceBundles, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeWorkspaceBundlesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeWorkspaceBundlesOutput{} + req.Data = output + return +} + +// Obtains information about the WorkSpace bundles that are available to your +// account in the specified region. +// +// You can filter the results with either the BundleIds parameter, or the Owner +// parameter, but not both. +// +// This operation supports pagination with the use of the NextToken request +// and response parameters. If more results are available, the NextToken response +// member contains a token that you pass in the next call to this operation +// to retrieve the next set of items. +func (c *WorkSpaces) DescribeWorkspaceBundles(input *DescribeWorkspaceBundlesInput) (*DescribeWorkspaceBundlesOutput, error) { + req, out := c.DescribeWorkspaceBundlesRequest(input) + err := req.Send() + return out, err +} + +func (c *WorkSpaces) DescribeWorkspaceBundlesPages(input *DescribeWorkspaceBundlesInput, fn func(p *DescribeWorkspaceBundlesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeWorkspaceBundlesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeWorkspaceBundlesOutput), lastPage) + }) +} + +const opDescribeWorkspaceDirectories = "DescribeWorkspaceDirectories" + +// DescribeWorkspaceDirectoriesRequest generates a request for the DescribeWorkspaceDirectories operation. +func (c *WorkSpaces) DescribeWorkspaceDirectoriesRequest(input *DescribeWorkspaceDirectoriesInput) (req *request.Request, output *DescribeWorkspaceDirectoriesOutput) { + op := &request.Operation{ + Name: opDescribeWorkspaceDirectories, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeWorkspaceDirectoriesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeWorkspaceDirectoriesOutput{} + req.Data = output + return +} + +// Retrieves information about the AWS Directory Service directories in the +// region that are registered with Amazon WorkSpaces and are available to your +// account. +// +// This operation supports pagination with the use of the NextToken request +// and response parameters. If more results are available, the NextToken response +// member contains a token that you pass in the next call to this operation +// to retrieve the next set of items. +func (c *WorkSpaces) DescribeWorkspaceDirectories(input *DescribeWorkspaceDirectoriesInput) (*DescribeWorkspaceDirectoriesOutput, error) { + req, out := c.DescribeWorkspaceDirectoriesRequest(input) + err := req.Send() + return out, err +} + +func (c *WorkSpaces) DescribeWorkspaceDirectoriesPages(input *DescribeWorkspaceDirectoriesInput, fn func(p *DescribeWorkspaceDirectoriesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeWorkspaceDirectoriesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeWorkspaceDirectoriesOutput), lastPage) + }) +} + +const opDescribeWorkspaces = "DescribeWorkspaces" + +// DescribeWorkspacesRequest generates a request for the DescribeWorkspaces operation. +func (c *WorkSpaces) DescribeWorkspacesRequest(input *DescribeWorkspacesInput) (req *request.Request, output *DescribeWorkspacesOutput) { + op := &request.Operation{ + Name: opDescribeWorkspaces, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeWorkspacesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeWorkspacesOutput{} + req.Data = output + return +} + +// Obtains information about the specified WorkSpaces. +// +// Only one of the filter parameters, such as BundleId, DirectoryId, or WorkspaceIds, +// can be specified at a time. +// +// This operation supports pagination with the use of the NextToken request +// and response parameters. If more results are available, the NextToken response +// member contains a token that you pass in the next call to this operation +// to retrieve the next set of items. +func (c *WorkSpaces) DescribeWorkspaces(input *DescribeWorkspacesInput) (*DescribeWorkspacesOutput, error) { + req, out := c.DescribeWorkspacesRequest(input) + err := req.Send() + return out, err +} + +func (c *WorkSpaces) DescribeWorkspacesPages(input *DescribeWorkspacesInput, fn func(p *DescribeWorkspacesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeWorkspacesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeWorkspacesOutput), lastPage) + }) +} + +const opRebootWorkspaces = "RebootWorkspaces" + +// RebootWorkspacesRequest generates a request for the RebootWorkspaces operation. +func (c *WorkSpaces) RebootWorkspacesRequest(input *RebootWorkspacesInput) (req *request.Request, output *RebootWorkspacesOutput) { + op := &request.Operation{ + Name: opRebootWorkspaces, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RebootWorkspacesInput{} + } + + req = c.newRequest(op, input, output) + output = &RebootWorkspacesOutput{} + req.Data = output + return +} + +// Reboots the specified WorkSpaces. +// +// To be able to reboot a WorkSpace, the WorkSpace must have a State of AVAILABLE, +// IMPAIRED, or INOPERABLE. +// +// This operation is asynchronous and will return before the WorkSpaces have +// rebooted. +func (c *WorkSpaces) RebootWorkspaces(input *RebootWorkspacesInput) (*RebootWorkspacesOutput, error) { + req, out := c.RebootWorkspacesRequest(input) + err := req.Send() + return out, err +} + +const opRebuildWorkspaces = "RebuildWorkspaces" + +// RebuildWorkspacesRequest generates a request for the RebuildWorkspaces operation. +func (c *WorkSpaces) RebuildWorkspacesRequest(input *RebuildWorkspacesInput) (req *request.Request, output *RebuildWorkspacesOutput) { + op := &request.Operation{ + Name: opRebuildWorkspaces, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RebuildWorkspacesInput{} + } + + req = c.newRequest(op, input, output) + output = &RebuildWorkspacesOutput{} + req.Data = output + return +} + +// Rebuilds the specified WorkSpaces. +// +// Rebuilding a WorkSpace is a potentially destructive action that can result +// in the loss of data. Rebuilding a WorkSpace causes the following to occur: +// +// The system is restored to the image of the bundle that the WorkSpace is +// created from. Any applications that have been installed, or system settings +// that have been made since the WorkSpace was created will be lost. The data +// drive (D drive) is re-created from the last automatic snapshot taken of the +// data drive. The current contents of the data drive are overwritten. Automatic +// snapshots of the data drive are taken every 12 hours, so the snapshot can +// be as much as 12 hours old. To be able to rebuild a WorkSpace, the WorkSpace +// must have a State of AVAILABLE or ERROR. +// +// This operation is asynchronous and will return before the WorkSpaces have +// been completely rebuilt. +func (c *WorkSpaces) RebuildWorkspaces(input *RebuildWorkspacesInput) (*RebuildWorkspacesOutput, error) { + req, out := c.RebuildWorkspacesRequest(input) + err := req.Send() + return out, err +} + +const opTerminateWorkspaces = "TerminateWorkspaces" + +// TerminateWorkspacesRequest generates a request for the TerminateWorkspaces operation. +func (c *WorkSpaces) TerminateWorkspacesRequest(input *TerminateWorkspacesInput) (req *request.Request, output *TerminateWorkspacesOutput) { + op := &request.Operation{ + Name: opTerminateWorkspaces, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TerminateWorkspacesInput{} + } + + req = c.newRequest(op, input, output) + output = &TerminateWorkspacesOutput{} + req.Data = output + return +} + +// Terminates the specified WorkSpaces. +// +// Terminating a WorkSpace is a permanent action and cannot be undone. The +// user's data is not maintained and will be destroyed. If you need to archive +// any user data, contact Amazon Web Services before terminating the WorkSpace. +// +// You can terminate a WorkSpace that is in any state except SUSPENDED. +// +// This operation is asynchronous and will return before the WorkSpaces have +// been completely terminated. +func (c *WorkSpaces) TerminateWorkspaces(input *TerminateWorkspacesInput) (*TerminateWorkspacesOutput, error) { + req, out := c.TerminateWorkspacesRequest(input) + err := req.Send() + return out, err +} + +// Contains information about the compute type of a WorkSpace bundle. +type ComputeType struct { + _ struct{} `type:"structure"` + + // The name of the compute type for the bundle. + Name *string `type:"string" enum:"Compute"` +} + +// String returns the string representation +func (s ComputeType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComputeType) GoString() string { + return s.String() +} + +// Contains the inputs for the CreateWorkspaces operation. +type CreateWorkspacesInput struct { + _ struct{} `type:"structure"` + + // An array of structures that specify the WorkSpaces to create. + Workspaces []*WorkspaceRequest `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateWorkspacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateWorkspacesInput) GoString() string { + return s.String() +} + +// Contains the result of the CreateWorkspaces operation. +type CreateWorkspacesOutput struct { + _ struct{} `type:"structure"` + + // An array of structures that represent the WorkSpaces that could not be created. + FailedRequests []*FailedCreateWorkspaceRequest `type:"list"` + + // An array of structures that represent the WorkSpaces that were created. + // + // Because this operation is asynchronous, the identifier in WorkspaceId is + // not immediately available. If you immediately call DescribeWorkspaces with + // this identifier, no information will be returned. + PendingRequests []*Workspace `type:"list"` +} + +// String returns the string representation +func (s CreateWorkspacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateWorkspacesOutput) GoString() string { + return s.String() +} + +// Contains default WorkSpace creation information. +type DefaultWorkspaceCreationProperties struct { + _ struct{} `type:"structure"` + + // The identifier of any custom security groups that are applied to the WorkSpaces + // when they are created. + CustomSecurityGroupId *string `type:"string"` + + // The organizational unit (OU) in the directory that the WorkSpace machine + // accounts are placed in. + DefaultOu *string `type:"string"` + + // A public IP address will be attached to all WorkSpaces that are created or + // rebuilt. + EnableInternetAccess *bool `type:"boolean"` + + // Specifies if the directory is enabled for Amazon WorkDocs. + EnableWorkDocs *bool `type:"boolean"` + + // The WorkSpace user is an administrator on the WorkSpace. + UserEnabledAsLocalAdministrator *bool `type:"boolean"` +} + +// String returns the string representation +func (s DefaultWorkspaceCreationProperties) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefaultWorkspaceCreationProperties) GoString() string { + return s.String() +} + +// Contains the inputs for the DescribeWorkspaceBundles operation. +type DescribeWorkspaceBundlesInput struct { + _ struct{} `type:"structure"` + + // An array of strings that contains the identifiers of the bundles to retrieve. + // This parameter cannot be combined with any other filter parameter. + BundleIds []*string `min:"1" type:"list"` + + // The NextToken value from a previous call to this operation. Pass null if + // this is the first call. + NextToken *string `min:"1" type:"string"` + + // The owner of the bundles to retrieve. This parameter cannot be combined with + // any other filter parameter. + // + // This contains one of the following values: + // + // null - Retrieves the bundles that belong to the account making the call. + // AMAZON - Retrieves the bundles that are provided by AWS. + Owner *string `type:"string"` +} + +// String returns the string representation +func (s DescribeWorkspaceBundlesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkspaceBundlesInput) GoString() string { + return s.String() +} + +// Contains the results of the DescribeWorkspaceBundles operation. +type DescribeWorkspaceBundlesOutput struct { + _ struct{} `type:"structure"` + + // An array of structures that contain information about the bundles. + Bundles []*WorkspaceBundle `type:"list"` + + // If not null, more results are available. Pass this value for the NextToken + // parameter in a subsequent call to this operation to retrieve the next set + // of items. This token is valid for one day and must be used within that timeframe. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeWorkspaceBundlesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkspaceBundlesOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the DescribeWorkspaceDirectories operation. +type DescribeWorkspaceDirectoriesInput struct { + _ struct{} `type:"structure"` + + // An array of strings that contains the directory identifiers to retrieve information + // for. If this member is null, all directories are retrieved. + DirectoryIds []*string `min:"1" type:"list"` + + // The NextToken value from a previous call to this operation. Pass null if + // this is the first call. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeWorkspaceDirectoriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkspaceDirectoriesInput) GoString() string { + return s.String() +} + +// Contains the results of the DescribeWorkspaceDirectories operation. +type DescribeWorkspaceDirectoriesOutput struct { + _ struct{} `type:"structure"` + + // An array of structures that contain information about the directories. + Directories []*WorkspaceDirectory `type:"list"` + + // If not null, more results are available. Pass this value for the NextToken + // parameter in a subsequent call to this operation to retrieve the next set + // of items. This token is valid for one day and must be used within that timeframe. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeWorkspaceDirectoriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkspaceDirectoriesOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the DescribeWorkspaces operation. +type DescribeWorkspacesInput struct { + _ struct{} `type:"structure"` + + // The identifier of a bundle to obtain the WorkSpaces for. All WorkSpaces that + // are created from this bundle will be retrieved. This parameter cannot be + // combined with any other filter parameter. + BundleId *string `type:"string"` + + // Specifies the directory identifier to which to limit the WorkSpaces. Optionally, + // you can specify a specific directory user with the UserName parameter. This + // parameter cannot be combined with any other filter parameter. + DirectoryId *string `type:"string"` + + // The maximum number of items to return. + Limit *int64 `min:"1" type:"integer"` + + // The NextToken value from a previous call to this operation. Pass null if + // this is the first call. + NextToken *string `min:"1" type:"string"` + + // Used with the DirectoryId parameter to specify the directory user for which + // to obtain the WorkSpace. + UserName *string `min:"1" type:"string"` + + // An array of strings that contain the identifiers of the WorkSpaces for which + // to retrieve information. This parameter cannot be combined with any other + // filter parameter. + // + // Because the CreateWorkspaces operation is asynchronous, the identifier returned + // by CreateWorkspaces is not immediately available. If you immediately call + // DescribeWorkspaces with this identifier, no information will be returned. + WorkspaceIds []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s DescribeWorkspacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkspacesInput) GoString() string { + return s.String() +} + +// Contains the results for the DescribeWorkspaces operation. +type DescribeWorkspacesOutput struct { + _ struct{} `type:"structure"` + + // If not null, more results are available. Pass this value for the NextToken + // parameter in a subsequent call to this operation to retrieve the next set + // of items. This token is valid for one day and must be used within that timeframe. + NextToken *string `min:"1" type:"string"` + + // An array of structures that contain the information about the WorkSpaces. + // + // Because the CreateWorkspaces operation is asynchronous, some of this information + // may be incomplete for a newly-created WorkSpace. + Workspaces []*Workspace `type:"list"` +} + +// String returns the string representation +func (s DescribeWorkspacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkspacesOutput) GoString() string { + return s.String() +} + +// Contains information about a WorkSpace that could not be created. +type FailedCreateWorkspaceRequest struct { + _ struct{} `type:"structure"` + + // The error code. + ErrorCode *string `type:"string"` + + // The textual error message. + ErrorMessage *string `type:"string"` + + // A WorkspaceRequest object that contains the information about the WorkSpace + // that could not be created. + WorkspaceRequest *WorkspaceRequest `type:"structure"` +} + +// String returns the string representation +func (s FailedCreateWorkspaceRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailedCreateWorkspaceRequest) GoString() string { + return s.String() +} + +// Contains information about a WorkSpace that could not be rebooted (RebootWorkspaces), +// rebuilt (RebuildWorkspaces), or terminated (TerminateWorkspaces). +type FailedWorkspaceChangeRequest struct { + _ struct{} `type:"structure"` + + // The error code. + ErrorCode *string `type:"string"` + + // The textual error message. + ErrorMessage *string `type:"string"` + + // The identifier of the WorkSpace. + WorkspaceId *string `type:"string"` +} + +// String returns the string representation +func (s FailedWorkspaceChangeRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailedWorkspaceChangeRequest) GoString() string { + return s.String() +} + +// Contains information used with the RebootWorkspaces operation to reboot a +// WorkSpace. +type RebootRequest struct { + _ struct{} `type:"structure"` + + // The identifier of the WorkSpace to reboot. + WorkspaceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RebootRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootRequest) GoString() string { + return s.String() +} + +// Contains the inputs for the RebootWorkspaces operation. +type RebootWorkspacesInput struct { + _ struct{} `type:"structure"` + + // An array of structures that specify the WorkSpaces to reboot. + RebootWorkspaceRequests []*RebootRequest `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s RebootWorkspacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootWorkspacesInput) GoString() string { + return s.String() +} + +// Contains the results of the RebootWorkspaces operation. +type RebootWorkspacesOutput struct { + _ struct{} `type:"structure"` + + // An array of structures that represent any WorkSpaces that could not be rebooted. + FailedRequests []*FailedWorkspaceChangeRequest `type:"list"` +} + +// String returns the string representation +func (s RebootWorkspacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootWorkspacesOutput) GoString() string { + return s.String() +} + +// Contains information used with the RebuildWorkspaces operation to rebuild +// a WorkSpace. +type RebuildRequest struct { + _ struct{} `type:"structure"` + + // The identifier of the WorkSpace to rebuild. + WorkspaceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RebuildRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebuildRequest) GoString() string { + return s.String() +} + +// Contains the inputs for the RebuildWorkspaces operation. +type RebuildWorkspacesInput struct { + _ struct{} `type:"structure"` + + // An array of structures that specify the WorkSpaces to rebuild. + RebuildWorkspaceRequests []*RebuildRequest `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s RebuildWorkspacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebuildWorkspacesInput) GoString() string { + return s.String() +} + +// Contains the results of the RebuildWorkspaces operation. +type RebuildWorkspacesOutput struct { + _ struct{} `type:"structure"` + + // An array of structures that represent any WorkSpaces that could not be rebuilt. + FailedRequests []*FailedWorkspaceChangeRequest `type:"list"` +} + +// String returns the string representation +func (s RebuildWorkspacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebuildWorkspacesOutput) GoString() string { + return s.String() +} + +// Contains information used with the TerminateWorkspaces operation to terminate +// a WorkSpace. +type TerminateRequest struct { + _ struct{} `type:"structure"` + + // The identifier of the WorkSpace to terminate. + WorkspaceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s TerminateRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateRequest) GoString() string { + return s.String() +} + +// Contains the inputs for the TerminateWorkspaces operation. +type TerminateWorkspacesInput struct { + _ struct{} `type:"structure"` + + // An array of structures that specify the WorkSpaces to terminate. + TerminateWorkspaceRequests []*TerminateRequest `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s TerminateWorkspacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateWorkspacesInput) GoString() string { + return s.String() +} + +// Contains the results of the TerminateWorkspaces operation. +type TerminateWorkspacesOutput struct { + _ struct{} `type:"structure"` + + // An array of structures that represent any WorkSpaces that could not be terminated. + FailedRequests []*FailedWorkspaceChangeRequest `type:"list"` +} + +// String returns the string representation +func (s TerminateWorkspacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateWorkspacesOutput) GoString() string { + return s.String() +} + +// Contains information about the user storage for a WorkSpace bundle. +type UserStorage struct { + _ struct{} `type:"structure"` + + // The amount of user storage for the bundle. + Capacity *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UserStorage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserStorage) GoString() string { + return s.String() +} + +// Contains information about a WorkSpace. +type Workspace struct { + _ struct{} `type:"structure"` + + // The identifier of the bundle that the WorkSpace was created from. + BundleId *string `type:"string"` + + // The name of the WorkSpace as seen by the operating system. + ComputerName *string `type:"string"` + + // The identifier of the AWS Directory Service directory that the WorkSpace + // belongs to. + DirectoryId *string `type:"string"` + + // If the WorkSpace could not be created, this contains the error code. + ErrorCode *string `type:"string"` + + // If the WorkSpace could not be created, this contains a textual error message + // that describes the failure. + ErrorMessage *string `type:"string"` + + // The IP address of the WorkSpace. + IpAddress *string `type:"string"` + + // Specifies whether the data stored on the root volume, or C: drive, is encrypted. + RootVolumeEncryptionEnabled *bool `type:"boolean"` + + // The operational state of the WorkSpace. + State *string `type:"string" enum:"WorkspaceState"` + + // The identifier of the subnet that the WorkSpace is in. + SubnetId *string `type:"string"` + + // The user that the WorkSpace is assigned to. + UserName *string `min:"1" type:"string"` + + // Specifies whether the data stored on the user volume, or D: drive, is encrypted. + UserVolumeEncryptionEnabled *bool `type:"boolean"` + + // The KMS key used to encrypt data stored on your WorkSpace. + VolumeEncryptionKey *string `type:"string"` + + // The identifier of the WorkSpace. + WorkspaceId *string `type:"string"` +} + +// String returns the string representation +func (s Workspace) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Workspace) GoString() string { + return s.String() +} + +// Contains information about a WorkSpace bundle. +type WorkspaceBundle struct { + _ struct{} `type:"structure"` + + // The bundle identifier. + BundleId *string `type:"string"` + + // A ComputeType object that specifies the compute type for the bundle. + ComputeType *ComputeType `type:"structure"` + + // The bundle description. + Description *string `type:"string"` + + // The name of the bundle. + Name *string `min:"1" type:"string"` + + // The owner of the bundle. This contains the owner's account identifier, or + // AMAZON if the bundle is provided by AWS. + Owner *string `type:"string"` + + // A UserStorage object that specifies the amount of user storage that the bundle + // contains. + UserStorage *UserStorage `type:"structure"` +} + +// String returns the string representation +func (s WorkspaceBundle) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkspaceBundle) GoString() string { + return s.String() +} + +// Contains information about an AWS Directory Service directory for use with +// Amazon WorkSpaces. +type WorkspaceDirectory struct { + _ struct{} `type:"structure"` + + // The directory alias. + Alias *string `type:"string"` + + // The user name for the service account. + CustomerUserName *string `min:"1" type:"string"` + + // The directory identifier. + DirectoryId *string `type:"string"` + + // The name of the directory. + DirectoryName *string `type:"string"` + + // The directory type. + DirectoryType *string `type:"string" enum:"WorkspaceDirectoryType"` + + // An array of strings that contains the IP addresses of the DNS servers for + // the directory. + DnsIpAddresses []*string `type:"list"` + + // The identifier of the IAM role. This is the role that allows Amazon WorkSpaces + // to make calls to other services, such as Amazon EC2, on your behalf. + IamRoleId *string `type:"string"` + + // The registration code for the directory. This is the code that users enter + // in their Amazon WorkSpaces client application to connect to the directory. + RegistrationCode *string `min:"1" type:"string"` + + // The state of the directory's registration with Amazon WorkSpaces + State *string `type:"string" enum:"WorkspaceDirectoryState"` + + // An array of strings that contains the identifiers of the subnets used with + // the directory. + SubnetIds []*string `type:"list"` + + // A structure that specifies the default creation properties for all WorkSpaces + // in the directory. + WorkspaceCreationProperties *DefaultWorkspaceCreationProperties `type:"structure"` + + // The identifier of the security group that is assigned to new WorkSpaces. + WorkspaceSecurityGroupId *string `type:"string"` +} + +// String returns the string representation +func (s WorkspaceDirectory) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkspaceDirectory) GoString() string { + return s.String() +} + +// Contains information about a WorkSpace creation request. +type WorkspaceRequest struct { + _ struct{} `type:"structure"` + + // The identifier of the bundle to create the WorkSpace from. You can use the + // DescribeWorkspaceBundles operation to obtain a list of the bundles that are + // available. + BundleId *string `type:"string" required:"true"` + + // The identifier of the AWS Directory Service directory to create the WorkSpace + // in. You can use the DescribeWorkspaceDirectories operation to obtain a list + // of the directories that are available. + DirectoryId *string `type:"string" required:"true"` + + // Specifies whether the data stored on the root volume, or C: drive, is encrypted. + RootVolumeEncryptionEnabled *bool `type:"boolean"` + + // The username that the WorkSpace is assigned to. This username must exist + // in the AWS Directory Service directory specified by the DirectoryId member. + UserName *string `min:"1" type:"string" required:"true"` + + // Specifies whether the data stored on the user volume, or D: drive, is encrypted. + UserVolumeEncryptionEnabled *bool `type:"boolean"` + + // The KMS key used to encrypt data stored on your WorkSpace. + VolumeEncryptionKey *string `type:"string"` +} + +// String returns the string representation +func (s WorkspaceRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkspaceRequest) GoString() string { + return s.String() +} + +const ( + // @enum Compute + ComputeValue = "VALUE" + // @enum Compute + ComputeStandard = "STANDARD" + // @enum Compute + ComputePerformance = "PERFORMANCE" +) + +const ( + // @enum WorkspaceDirectoryState + WorkspaceDirectoryStateRegistering = "REGISTERING" + // @enum WorkspaceDirectoryState + WorkspaceDirectoryStateRegistered = "REGISTERED" + // @enum WorkspaceDirectoryState + WorkspaceDirectoryStateDeregistering = "DEREGISTERING" + // @enum WorkspaceDirectoryState + WorkspaceDirectoryStateDeregistered = "DEREGISTERED" + // @enum WorkspaceDirectoryState + WorkspaceDirectoryStateError = "ERROR" +) + +const ( + // @enum WorkspaceDirectoryType + WorkspaceDirectoryTypeSimpleAd = "SIMPLE_AD" + // @enum WorkspaceDirectoryType + WorkspaceDirectoryTypeAdConnector = "AD_CONNECTOR" +) + +const ( + // @enum WorkspaceState + WorkspaceStatePending = "PENDING" + // @enum WorkspaceState + WorkspaceStateAvailable = "AVAILABLE" + // @enum WorkspaceState + WorkspaceStateImpaired = "IMPAIRED" + // @enum WorkspaceState + WorkspaceStateUnhealthy = "UNHEALTHY" + // @enum WorkspaceState + WorkspaceStateRebooting = "REBOOTING" + // @enum WorkspaceState + WorkspaceStateRebuilding = "REBUILDING" + // @enum WorkspaceState + WorkspaceStateTerminating = "TERMINATING" + // @enum WorkspaceState + WorkspaceStateTerminated = "TERMINATED" + // @enum WorkspaceState + WorkspaceStateSuspended = "SUSPENDED" + // @enum WorkspaceState + WorkspaceStateError = "ERROR" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/workspaces/examples_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/workspaces/examples_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/workspaces/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/workspaces/examples_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,191 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package workspaces_test + +import ( + "bytes" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/workspaces" +) + +var _ time.Duration +var _ bytes.Buffer + +func ExampleWorkSpaces_CreateWorkspaces() { + svc := workspaces.New(session.New()) + + params := &workspaces.CreateWorkspacesInput{ + Workspaces: []*workspaces.WorkspaceRequest{ // Required + { // Required + BundleId: aws.String("BundleId"), // Required + DirectoryId: aws.String("DirectoryId"), // Required + UserName: aws.String("UserName"), // Required + RootVolumeEncryptionEnabled: aws.Bool(true), + UserVolumeEncryptionEnabled: aws.Bool(true), + VolumeEncryptionKey: aws.String("VolumeEncryptionKey"), + }, + // More values... + }, + } + resp, err := svc.CreateWorkspaces(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWorkSpaces_DescribeWorkspaceBundles() { + svc := workspaces.New(session.New()) + + params := &workspaces.DescribeWorkspaceBundlesInput{ + BundleIds: []*string{ + aws.String("BundleId"), // Required + // More values... + }, + NextToken: aws.String("PaginationToken"), + Owner: aws.String("BundleOwner"), + } + resp, err := svc.DescribeWorkspaceBundles(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWorkSpaces_DescribeWorkspaceDirectories() { + svc := workspaces.New(session.New()) + + params := &workspaces.DescribeWorkspaceDirectoriesInput{ + DirectoryIds: []*string{ + aws.String("DirectoryId"), // Required + // More values... + }, + NextToken: aws.String("PaginationToken"), + } + resp, err := svc.DescribeWorkspaceDirectories(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWorkSpaces_DescribeWorkspaces() { + svc := workspaces.New(session.New()) + + params := &workspaces.DescribeWorkspacesInput{ + BundleId: aws.String("BundleId"), + DirectoryId: aws.String("DirectoryId"), + Limit: aws.Int64(1), + NextToken: aws.String("PaginationToken"), + UserName: aws.String("UserName"), + WorkspaceIds: []*string{ + aws.String("WorkspaceId"), // Required + // More values... + }, + } + resp, err := svc.DescribeWorkspaces(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWorkSpaces_RebootWorkspaces() { + svc := workspaces.New(session.New()) + + params := &workspaces.RebootWorkspacesInput{ + RebootWorkspaceRequests: []*workspaces.RebootRequest{ // Required + { // Required + WorkspaceId: aws.String("WorkspaceId"), // Required + }, + // More values... + }, + } + resp, err := svc.RebootWorkspaces(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWorkSpaces_RebuildWorkspaces() { + svc := workspaces.New(session.New()) + + params := &workspaces.RebuildWorkspacesInput{ + RebuildWorkspaceRequests: []*workspaces.RebuildRequest{ // Required + { // Required + WorkspaceId: aws.String("WorkspaceId"), // Required + }, + // More values... + }, + } + resp, err := svc.RebuildWorkspaces(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} + +func ExampleWorkSpaces_TerminateWorkspaces() { + svc := workspaces.New(session.New()) + + params := &workspaces.TerminateWorkspacesInput{ + TerminateWorkspaceRequests: []*workspaces.TerminateRequest{ // Required + { // Required + WorkspaceId: aws.String("WorkspaceId"), // Required + }, + // More values... + }, + } + resp, err := svc.TerminateWorkspaces(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/workspaces/service.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/workspaces/service.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/workspaces/service.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/workspaces/service.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,90 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package workspaces + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// This is the Amazon WorkSpaces API Reference. This guide provides detailed +// information about Amazon WorkSpaces operations, data types, parameters, and +// errors. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type WorkSpaces struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "workspaces" + +// New creates a new instance of the WorkSpaces client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a WorkSpaces client from just a session. +// svc := workspaces.New(mySession) +// +// // Create a WorkSpaces client with additional configuration +// svc := workspaces.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *WorkSpaces { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *WorkSpaces { + svc := &WorkSpaces{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-04-08", + JSONVersion: "1.1", + TargetPrefix: "WorkspacesService", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a WorkSpaces operation and runs any +// custom request initialization. +func (c *WorkSpaces) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/workspaces/workspacesiface/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/workspaces/workspacesiface/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/workspaces/workspacesiface/interface.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/service/workspaces/workspacesiface/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,48 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package workspacesiface provides an interface for the Amazon WorkSpaces. +package workspacesiface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/workspaces" +) + +// WorkSpacesAPI is the interface type for workspaces.WorkSpaces. +type WorkSpacesAPI interface { + CreateWorkspacesRequest(*workspaces.CreateWorkspacesInput) (*request.Request, *workspaces.CreateWorkspacesOutput) + + CreateWorkspaces(*workspaces.CreateWorkspacesInput) (*workspaces.CreateWorkspacesOutput, error) + + DescribeWorkspaceBundlesRequest(*workspaces.DescribeWorkspaceBundlesInput) (*request.Request, *workspaces.DescribeWorkspaceBundlesOutput) + + DescribeWorkspaceBundles(*workspaces.DescribeWorkspaceBundlesInput) (*workspaces.DescribeWorkspaceBundlesOutput, error) + + DescribeWorkspaceBundlesPages(*workspaces.DescribeWorkspaceBundlesInput, func(*workspaces.DescribeWorkspaceBundlesOutput, bool) bool) error + + DescribeWorkspaceDirectoriesRequest(*workspaces.DescribeWorkspaceDirectoriesInput) (*request.Request, *workspaces.DescribeWorkspaceDirectoriesOutput) + + DescribeWorkspaceDirectories(*workspaces.DescribeWorkspaceDirectoriesInput) (*workspaces.DescribeWorkspaceDirectoriesOutput, error) + + DescribeWorkspaceDirectoriesPages(*workspaces.DescribeWorkspaceDirectoriesInput, func(*workspaces.DescribeWorkspaceDirectoriesOutput, bool) bool) error + + DescribeWorkspacesRequest(*workspaces.DescribeWorkspacesInput) (*request.Request, *workspaces.DescribeWorkspacesOutput) + + DescribeWorkspaces(*workspaces.DescribeWorkspacesInput) (*workspaces.DescribeWorkspacesOutput, error) + + DescribeWorkspacesPages(*workspaces.DescribeWorkspacesInput, func(*workspaces.DescribeWorkspacesOutput, bool) bool) error + + RebootWorkspacesRequest(*workspaces.RebootWorkspacesInput) (*request.Request, *workspaces.RebootWorkspacesOutput) + + RebootWorkspaces(*workspaces.RebootWorkspacesInput) (*workspaces.RebootWorkspacesOutput, error) + + RebuildWorkspacesRequest(*workspaces.RebuildWorkspacesInput) (*request.Request, *workspaces.RebuildWorkspacesOutput) + + RebuildWorkspaces(*workspaces.RebuildWorkspacesInput) (*workspaces.RebuildWorkspacesOutput, error) + + TerminateWorkspacesRequest(*workspaces.TerminateWorkspacesInput) (*request.Request, *workspaces.TerminateWorkspacesOutput) + + TerminateWorkspaces(*workspaces.TerminateWorkspacesInput) (*workspaces.TerminateWorkspacesOutput, error) +} + +var _ WorkSpacesAPI = (*workspaces.WorkSpaces)(nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/.travis.yml aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/.travis.yml --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/.travis.yml 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,22 @@ +language: go + +sudo: false + +go: + - 1.4 + - 1.5 + - tip + +# Use Go 1.5's vendoring experiment for 1.5 tests. 1.4 tests will use the tip of the dependencies repo. +env: + - GO15VENDOREXPERIMENT=1 + +install: + - make get-deps + +script: + - make unit-with-race-cover + +matrix: + allow_failures: + - go: tip diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/.gitignore aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/.gitignore --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/.gitignore 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,4 @@ +testdata/conf_out.ini +ini.sublime-project +ini.sublime-workspace +testdata/conf_reflect.ini diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/ini.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/ini.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/ini.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/ini.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1226 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package ini provides INI file read and write functionality in Go. +package ini + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "os" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +const ( + DEFAULT_SECTION = "DEFAULT" + // Maximum allowed depth when recursively substituing variable names. + _DEPTH_VALUES = 99 + + _VERSION = "1.6.0" +) + +func Version() string { + return _VERSION +} + +var ( + LineBreak = "\n" + + // Variable regexp pattern: %(variable)s + varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) + + // Write spaces around "=" to look better. + PrettyFormat = true +) + +func init() { + if runtime.GOOS == "windows" { + LineBreak = "\r\n" + } +} + +func inSlice(str string, s []string) bool { + for _, v := range s { + if str == v { + return true + } + } + return false +} + +// dataSource is a interface that returns file content. +type dataSource interface { + ReadCloser() (io.ReadCloser, error) +} + +type sourceFile struct { + name string +} + +func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { + return os.Open(s.name) +} + +type bytesReadCloser struct { + reader io.Reader +} + +func (rc *bytesReadCloser) Read(p []byte) (n int, err error) { + return rc.reader.Read(p) +} + +func (rc *bytesReadCloser) Close() error { + return nil +} + +type sourceData struct { + data []byte +} + +func (s *sourceData) ReadCloser() (io.ReadCloser, error) { + return &bytesReadCloser{bytes.NewReader(s.data)}, nil +} + +// ____ __. +// | |/ _|____ ___.__. +// | <_/ __ < | | +// | | \ ___/\___ | +// |____|__ \___ > ____| +// \/ \/\/ + +// Key represents a key under a section. +type Key struct { + s *Section + Comment string + name string + value string + isAutoIncr bool +} + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// String returns string representation of value. +func (k *Key) String() string { + val := k.value + if strings.Index(val, "%") == -1 { + return val + } + + for i := 0; i < _DEPTH_VALUES; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := strings.TrimLeft(vr, "%(") + noption = strings.TrimRight(noption, ")s") + + // Search in the same section. + nk, err := k.s.GetKey(noption) + if err != nil { + // Search again in default section. + nk, _ = k.s.f.Section("").GetKey(noption) + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + return strconv.Atoi(k.String()) +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 10, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 10, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 10, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string devide by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + vals := strings.Split(str, delim) + for i := range vals { + vals[i] = strings.TrimSpace(vals[i]) + } + return vals +} + +// Float64s returns list of float64 devide by given delimiter. +func (k *Key) Float64s(delim string) []float64 { + strs := k.Strings(delim) + vals := make([]float64, len(strs)) + for i := range strs { + vals[i], _ = strconv.ParseFloat(strs[i], 64) + } + return vals +} + +// Ints returns list of int devide by given delimiter. +func (k *Key) Ints(delim string) []int { + strs := k.Strings(delim) + vals := make([]int, len(strs)) + for i := range strs { + vals[i], _ = strconv.Atoi(strs[i]) + } + return vals +} + +// Int64s returns list of int64 devide by given delimiter. +func (k *Key) Int64s(delim string) []int64 { + strs := k.Strings(delim) + vals := make([]int64, len(strs)) + for i := range strs { + vals[i], _ = strconv.ParseInt(strs[i], 10, 64) + } + return vals +} + +// Uints returns list of uint devide by given delimiter. +func (k *Key) Uints(delim string) []uint { + strs := k.Strings(delim) + vals := make([]uint, len(strs)) + for i := range strs { + u, _ := strconv.ParseUint(strs[i], 10, 64) + vals[i] = uint(u) + } + return vals +} + +// Uint64s returns list of uint64 devide by given delimiter. +func (k *Key) Uint64s(delim string) []uint64 { + strs := k.Strings(delim) + vals := make([]uint64, len(strs)) + for i := range strs { + vals[i], _ = strconv.ParseUint(strs[i], 10, 64) + } + return vals +} + +// TimesFormat parses with given format and returns list of time.Time devide by given delimiter. +func (k *Key) TimesFormat(format, delim string) []time.Time { + strs := k.Strings(delim) + vals := make([]time.Time, len(strs)) + for i := range strs { + vals[i], _ = time.Parse(format, strs[i]) + } + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time devide by given delimiter. +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + k.value = v +} + +// _________ __ .__ +// / _____/ ____ _____/ |_|__| ____ ____ +// \_____ \_/ __ \_/ ___\ __\ |/ _ \ / \ +// / \ ___/\ \___| | | ( <_> ) | \ +// /_______ /\___ >\___ >__| |__|\____/|___| / +// \/ \/ \/ \/ + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string +} + +func newSection(f *File, name string) *Section { + return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)} +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + s.keys[name].value = val + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = &Key{s, "", name, val, false} + s.keysHash[name] = val + return s.keys[name], nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + // FIXME: change to section level lock? + if s.f.BlockMode { + s.f.lock.RLock() + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } else { + break + } + } + return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) + } + return key, nil +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := map[string]string{} + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + return + } + } +} + +// ___________.__.__ +// \_ _____/|__| | ____ +// | __) | | | _/ __ \ +// | \ | | |_\ ___/ +// \___ / |__|____/\___ > +// \/ \/ + +// File represents a combination of a or more INI file(s) in memory. +type File struct { + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + // Make sure data is safe in multiple goroutines. + lock sync.RWMutex + + // Allow combination of multiple data sources. + dataSources []dataSource + // Actual data is stored here. + sections map[string]*Section + + // To keep data in order. + sectionList []string + + NameMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource) *File { + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string]*Section), + sectionList: make([]string, 0, 10), + } +} + +func parseDataSource(source interface{}) (dataSource, error) { + switch s := source.(type) { + case string: + return sourceFile{s}, nil + case []byte: + return &sourceData{s}, nil + default: + return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) + } +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +func Load(source interface{}, others ...interface{}) (_ *File, err error) { + sources := make([]dataSource, len(others)+1) + sources[0], err = parseDataSource(source) + if err != nil { + return nil, err + } + for i := range others { + sources[i+1], err = parseDataSource(others[i]) + if err != nil { + return nil, err + } + } + f := newFile(sources) + return f, f.Reload() +} + +// Empty returns an empty file object. +func Empty() *File { + // Ignore error here, we sure our data is good. + f, _ := Load([]byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("error creating new section: empty section name") + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if inSlice(name, f.sectionList) { + return f.sections[name], nil + } + + f.sectionList = append(f.sectionList, name) + f.sections[name] = newSection(f, name) + return f.sections[name], nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + if len(name) == 0 { + name = DEFAULT_SECTION + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sec := f.sections[name] + if sec == nil { + return nil, fmt.Errorf("error when getting section: section '%s' not exists", name) + } + return sec, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + // Note: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// Section returns list of Section. +func (f *File) Sections() []*Section { + sections := make([]*Section, len(f.sectionList)) + for i := range f.sectionList { + sections[i] = f.Section(f.sectionList[i]) + } + return sections +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section. +func (f *File) DeleteSection(name string) { + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if len(name) == 0 { + name = DEFAULT_SECTION + } + + for i, s := range f.sectionList { + if s == name { + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + delete(f.sections, name) + return + } + } +} + +func cutComment(str string) string { + i := strings.Index(str, "#") + if i == -1 { + return str + } + return str[:i] +} + +func checkMultipleLines(buf *bufio.Reader, line, val, valQuote string) (string, error) { + isEnd := false + for { + next, err := buf.ReadString('\n') + if err != nil { + if err != io.EOF { + return "", err + } + isEnd = true + } + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + break + } + val += next + if isEnd { + return "", fmt.Errorf("error parsing line: missing closing key quote from '%s' to '%s'", line, next) + } + } + return val, nil +} + +func checkContinuationLines(buf *bufio.Reader, val string) (string, bool, error) { + isEnd := false + for { + valLen := len(val) + if valLen == 0 || val[valLen-1] != '\\' { + break + } + val = val[:valLen-1] + + next, err := buf.ReadString('\n') + if err != nil { + if err != io.EOF { + return "", isEnd, err + } + isEnd = true + } + + next = strings.TrimSpace(next) + if len(next) == 0 { + break + } + val += next + } + return val, isEnd, nil +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) error { + buf := bufio.NewReader(reader) + + // Handle BOM-UTF8. + // http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding + mask, err := buf.Peek(3) + if err == nil && len(mask) >= 3 && mask[0] == 239 && mask[1] == 187 && mask[2] == 191 { + buf.Read(mask) + } + + count := 1 + comments := "" + isEnd := false + + section, err := f.NewSection(DEFAULT_SECTION) + if err != nil { + return err + } + + for { + line, err := buf.ReadString('\n') + line = strings.TrimSpace(line) + length := len(line) + + // Check error and ignore io.EOF just for a moment. + if err != nil { + if err != io.EOF { + return fmt.Errorf("error reading next line: %v", err) + } + // The last line of file could be an empty line. + if length == 0 { + break + } + isEnd = true + } + + // Skip empty lines. + if length == 0 { + continue + } + + switch { + case line[0] == '#' || line[0] == ';': // Comments. + if len(comments) == 0 { + comments = line + } else { + comments += LineBreak + line + } + continue + case line[0] == '[' && line[length-1] == ']': // New sction. + section, err = f.NewSection(strings.TrimSpace(line[1 : length-1])) + if err != nil { + return err + } + + if len(comments) > 0 { + section.Comment = comments + comments = "" + } + // Reset counter. + count = 1 + continue + } + + // Other possibilities. + var ( + i int + keyQuote string + kname string + valQuote string + val string + ) + + // Key name surrounded by quotes. + if line[0] == '"' { + if length > 6 && line[0:3] == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + if len(keyQuote) > 0 { + qLen := len(keyQuote) + pos := strings.Index(line[qLen:], keyQuote) + if pos == -1 { + return fmt.Errorf("error parsing line: missing closing key quote: %s", line) + } + pos = pos + qLen + i = strings.IndexAny(line[pos:], "=:") + if i < 0 { + return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line) + } else if i == pos { + return fmt.Errorf("error parsing line: key is empty: %s", line) + } + i = i + pos + kname = line[qLen:pos] // Just keep spaces inside quotes. + } else { + i = strings.IndexAny(line, "=:") + if i < 0 { + return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line) + } else if i == 0 { + return fmt.Errorf("error parsing line: key is empty: %s", line) + } + kname = strings.TrimSpace(line[0:i]) + } + + isAutoIncr := false + // Auto increment. + if kname == "-" { + isAutoIncr = true + kname = "#" + fmt.Sprint(count) + count++ + } + + lineRight := strings.TrimSpace(line[i+1:]) + lineRightLength := len(lineRight) + firstChar := "" + if lineRightLength >= 2 { + firstChar = lineRight[0:1] + } + if firstChar == "`" { + valQuote = "`" + } else if firstChar == `"` { + if lineRightLength >= 3 && lineRight[0:3] == `"""` { + valQuote = `"""` + } else { + valQuote = `"` + } + } else if firstChar == `'` { + valQuote = `'` + } + + if len(valQuote) > 0 { + qLen := len(valQuote) + pos := strings.LastIndex(lineRight[qLen:], valQuote) + // For multiple-line value check. + if pos == -1 { + if valQuote == `"` || valQuote == `'` { + return fmt.Errorf("error parsing line: single quote does not allow multiple-line value: %s", line) + } + + val = lineRight[qLen:] + "\n" + val, err = checkMultipleLines(buf, line, val, valQuote) + if err != nil { + return err + } + } else { + val = lineRight[qLen : pos+qLen] + } + } else { + val = strings.TrimSpace(cutComment(lineRight)) + val, isEnd, err = checkContinuationLines(buf, val) + if err != nil { + return err + } + } + + k, err := section.NewKey(kname, val) + if err != nil { + return err + } + k.isAutoIncr = isAutoIncr + if len(comments) > 0 { + k.Comment = comments + comments = "" + } + + if isEnd { + break + } + } + return nil +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + return err + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +// WriteToIndent writes file content into io.Writer with given value indention. +func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { + equalSign := "=" + if PrettyFormat { + equalSign = " = " + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + for i, sname := range f.sectionList { + sec := f.Section(sname) + if len(sec.Comment) > 0 { + if sec.Comment[0] != '#' && sec.Comment[0] != ';' { + sec.Comment = "; " + sec.Comment + } + if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil { + return 0, err + } + } + + if i > 0 { + if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return 0, err + } + } else { + // Write nothing if default section is empty. + if len(sec.keyList) == 0 { + continue + } + } + + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + if key.Comment[0] != '#' && key.Comment[0] != ';' { + key.Comment = "; " + key.Comment + } + if _, err = buf.WriteString(key.Comment + LineBreak); err != nil { + return 0, err + } + } + + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncr: + kname = "-" + case strings.Contains(kname, "`") || strings.Contains(kname, `"`): + kname = `"""` + kname + `"""` + case strings.Contains(kname, `=`) || strings.Contains(kname, `:`): + kname = "`" + kname + "`" + } + + val := key.value + // In case key value contains "\n", "`" or "\"". + if strings.Contains(val, "\n") || strings.Contains(val, "`") || strings.Contains(val, `"`) || + strings.Contains(val, "#") { + val = `"""` + val + `"""` + } + if _, err = buf.WriteString(kname + equalSign + val + LineBreak); err != nil { + return 0, err + } + } + + // Put a line between sections. + if _, err = buf.WriteString(LineBreak); err != nil { + return 0, err + } + } + + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename afte done. + tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp" + defer os.Remove(tmpPath) + + fw, err := os.Create(tmpPath) + if err != nil { + return err + } + + if _, err = f.WriteToIndent(fw, indent); err != nil { + fw.Close() + return err + } + fw.Close() + + // Remove old file and rename the new one. + os.Remove(filename) + return os.Rename(tmpPath, filename) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/ini_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/ini_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/ini_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/ini_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,512 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" + "strings" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Version(t *testing.T) { + Convey("Get version", t, func() { + So(Version(), ShouldEqual, _VERSION) + }) +} + +const _CONF_DATA = ` +; Package name +NAME = ini +; Package version +VERSION = v1 +; Package import path +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +# Information about package author +# Bio can be written in multiple lines. +[author] +NAME = Unknwon # Succeeding comment +E-MAIL = fake@localhost +GITHUB = https://github.com/%(NAME)s +BIO = """Gopher. +Coding addict. +Good man. +""" # Succeeding comment + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +UNUSED_KEY = should be deleted + +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values + +[types] +STRING = str +BOOL = true +BOOL_FALSE = false +FLOAT64 = 1.25 +INT = 10 +TIME = 2015-01-01T20:17:05Z +DURATION = 2h45m +UINT = 3 + +[array] +STRINGS = en, zh, de +FLOAT64S = 1.1, 2.2, 3.3 +INTS = 1, 2, 3 +UINTS = 1, 2, 3 +TIMES = 2015-01-01T20:17:05Z,2015-01-01T20:17:05Z,2015-01-01T20:17:05Z + +[note] +empty_lines = next line is empty\ + +[advance] +value with quotes = "some value" +value quote2 again = 'some value' +true = """"2+3=5"""" +"1+1=2" = true +"""6+1=7""" = true +"""` + "`" + `5+5` + "`" + `""" = 10 +""""6+6"""" = 12 +` + "`" + `7-2=4` + "`" + ` = false +ADDRESS = ` + "`" + `404 road, +NotFound, State, 50000` + "`" + ` + +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 \ +` + +func Test_Load(t *testing.T) { + Convey("Load from data sources", t, func() { + + Convey("Load with empty data", func() { + So(Empty(), ShouldNotBeNil) + }) + + Convey("Load with multiple data sources", func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + }) + }) + + Convey("Bad load process", t, func() { + + Convey("Load from invalid data sources", func() { + _, err := Load(_CONF_DATA) + So(err, ShouldNotBeNil) + + _, err = Load("testdata/404.ini") + So(err, ShouldNotBeNil) + + _, err = Load(1) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(""), 1) + So(err, ShouldNotBeNil) + }) + + Convey("Load with empty section name", func() { + _, err := Load([]byte("[]")) + So(err, ShouldNotBeNil) + }) + + Convey("Load with bad keys", func() { + _, err := Load([]byte(`"""name`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`"""name"""`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`""=1`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`=`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`name`)) + So(err, ShouldNotBeNil) + }) + + Convey("Load with bad values", func() { + _, err := Load([]byte(`name="""Unknwon`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`key = "value`)) + So(err, ShouldNotBeNil) + }) + }) +} + +func Test_Values(t *testing.T) { + Convey("Test getting and setting values", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + Convey("Get values in default section", func() { + sec := cfg.Section("") + So(sec, ShouldNotBeNil) + So(sec.Key("NAME").Value(), ShouldEqual, "ini") + So(sec.Key("NAME").String(), ShouldEqual, "ini") + So(sec.Key("NAME").Validate(func(in string) string { + return in + }), ShouldEqual, "ini") + So(sec.Key("NAME").Comment, ShouldEqual, "; Package name") + So(sec.Key("IMPORT_PATH").String(), ShouldEqual, "gopkg.in/ini.v1") + }) + + Convey("Get values in non-default section", func() { + sec := cfg.Section("author") + So(sec, ShouldNotBeNil) + So(sec.Key("NAME").String(), ShouldEqual, "Unknwon") + So(sec.Key("GITHUB").String(), ShouldEqual, "https://github.com/Unknwon") + + sec = cfg.Section("package") + So(sec, ShouldNotBeNil) + So(sec.Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") + }) + + Convey("Get auto-increment key names", func() { + keys := cfg.Section("features").Keys() + for i, k := range keys { + So(k.Name(), ShouldEqual, fmt.Sprintf("#%d", i+1)) + } + }) + + Convey("Get overwrite value", func() { + So(cfg.Section("author").Key("E-MAIL").String(), ShouldEqual, "u@gogs.io") + }) + + Convey("Get sections", func() { + sections := cfg.Sections() + for i, name := range []string{DEFAULT_SECTION, "author", "package", "package.sub", "features", "types", "array", "note", "advance"} { + So(sections[i].Name(), ShouldEqual, name) + } + }) + + Convey("Get parent section value", func() { + So(cfg.Section("package.sub").Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") + }) + + Convey("Get multiple line value", func() { + So(cfg.Section("author").Key("BIO").String(), ShouldEqual, "Gopher.\nCoding addict.\nGood man.\n") + }) + + Convey("Get values with type", func() { + sec := cfg.Section("types") + v1, err := sec.Key("BOOL").Bool() + So(err, ShouldBeNil) + So(v1, ShouldBeTrue) + + v1, err = sec.Key("BOOL_FALSE").Bool() + So(err, ShouldBeNil) + So(v1, ShouldBeFalse) + + v2, err := sec.Key("FLOAT64").Float64() + So(err, ShouldBeNil) + So(v2, ShouldEqual, 1.25) + + v3, err := sec.Key("INT").Int() + So(err, ShouldBeNil) + So(v3, ShouldEqual, 10) + + v4, err := sec.Key("INT").Int64() + So(err, ShouldBeNil) + So(v4, ShouldEqual, 10) + + v5, err := sec.Key("UINT").Uint() + So(err, ShouldBeNil) + So(v5, ShouldEqual, 3) + + v6, err := sec.Key("UINT").Uint64() + So(err, ShouldBeNil) + So(v6, ShouldEqual, 3) + + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + v7, err := sec.Key("TIME").Time() + So(err, ShouldBeNil) + So(v7.String(), ShouldEqual, t.String()) + + Convey("Must get values with type", func() { + So(sec.Key("STRING").MustString("404"), ShouldEqual, "str") + So(sec.Key("BOOL").MustBool(), ShouldBeTrue) + So(sec.Key("FLOAT64").MustFloat64(), ShouldEqual, 1.25) + So(sec.Key("INT").MustInt(), ShouldEqual, 10) + So(sec.Key("INT").MustInt64(), ShouldEqual, 10) + So(sec.Key("UINT").MustUint(), ShouldEqual, 3) + So(sec.Key("UINT").MustUint64(), ShouldEqual, 3) + So(sec.Key("TIME").MustTime().String(), ShouldEqual, t.String()) + + dur, err := time.ParseDuration("2h45m") + So(err, ShouldBeNil) + So(sec.Key("DURATION").MustDuration().Seconds(), ShouldEqual, dur.Seconds()) + + Convey("Must get values with default value", func() { + So(sec.Key("STRING_404").MustString("404"), ShouldEqual, "404") + So(sec.Key("BOOL_404").MustBool(true), ShouldBeTrue) + So(sec.Key("FLOAT64_404").MustFloat64(2.5), ShouldEqual, 2.5) + So(sec.Key("INT_404").MustInt(15), ShouldEqual, 15) + So(sec.Key("INT_404").MustInt64(15), ShouldEqual, 15) + So(sec.Key("UINT_404").MustUint(6), ShouldEqual, 6) + So(sec.Key("UINT_404").MustUint64(6), ShouldEqual, 6) + + t, err := time.Parse(time.RFC3339, "2014-01-01T20:17:05Z") + So(err, ShouldBeNil) + So(sec.Key("TIME_404").MustTime(t).String(), ShouldEqual, t.String()) + + So(sec.Key("DURATION_404").MustDuration(dur).Seconds(), ShouldEqual, dur.Seconds()) + }) + }) + }) + + Convey("Get value with candidates", func() { + sec := cfg.Section("types") + So(sec.Key("STRING").In("", []string{"str", "arr", "types"}), ShouldEqual, "str") + So(sec.Key("FLOAT64").InFloat64(0, []float64{1.25, 2.5, 3.75}), ShouldEqual, 1.25) + So(sec.Key("INT").InInt(0, []int{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("INT").InInt64(0, []int64{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("UINT").InUint(0, []uint{3, 6, 9}), ShouldEqual, 3) + So(sec.Key("UINT").InUint64(0, []uint64{3, 6, 9}), ShouldEqual, 3) + + zt, err := time.Parse(time.RFC3339, "0001-01-01T01:00:00Z") + So(err, ShouldBeNil) + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + So(sec.Key("TIME").InTime(zt, []time.Time{t, time.Now(), time.Now().Add(1 * time.Second)}).String(), ShouldEqual, t.String()) + + Convey("Get value with candidates and default value", func() { + So(sec.Key("STRING_404").In("str", []string{"str", "arr", "types"}), ShouldEqual, "str") + So(sec.Key("FLOAT64_404").InFloat64(1.25, []float64{1.25, 2.5, 3.75}), ShouldEqual, 1.25) + So(sec.Key("INT_404").InInt(10, []int{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("INT64_404").InInt64(10, []int64{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("UINT_404").InUint(3, []uint{3, 6, 9}), ShouldEqual, 3) + So(sec.Key("UINT_404").InUint64(3, []uint64{3, 6, 9}), ShouldEqual, 3) + So(sec.Key("TIME_404").InTime(t, []time.Time{time.Now(), time.Now(), time.Now().Add(1 * time.Second)}).String(), ShouldEqual, t.String()) + }) + }) + + Convey("Get values in range", func() { + sec := cfg.Section("types") + So(sec.Key("FLOAT64").RangeFloat64(0, 1, 2), ShouldEqual, 1.25) + So(sec.Key("INT").RangeInt(0, 10, 20), ShouldEqual, 10) + So(sec.Key("INT").RangeInt64(0, 10, 20), ShouldEqual, 10) + + minT, err := time.Parse(time.RFC3339, "0001-01-01T01:00:00Z") + So(err, ShouldBeNil) + midT, err := time.Parse(time.RFC3339, "2013-01-01T01:00:00Z") + So(err, ShouldBeNil) + maxT, err := time.Parse(time.RFC3339, "9999-01-01T01:00:00Z") + So(err, ShouldBeNil) + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + So(sec.Key("TIME").RangeTime(t, minT, maxT).String(), ShouldEqual, t.String()) + + Convey("Get value in range with default value", func() { + So(sec.Key("FLOAT64").RangeFloat64(5, 0, 1), ShouldEqual, 5) + So(sec.Key("INT").RangeInt(7, 0, 5), ShouldEqual, 7) + So(sec.Key("INT").RangeInt64(7, 0, 5), ShouldEqual, 7) + So(sec.Key("TIME").RangeTime(t, minT, midT).String(), ShouldEqual, t.String()) + }) + }) + + Convey("Get values into slice", func() { + sec := cfg.Section("array") + So(strings.Join(sec.Key("STRINGS").Strings(","), ","), ShouldEqual, "en,zh,de") + So(len(sec.Key("STRINGS_404").Strings(",")), ShouldEqual, 0) + + vals1 := sec.Key("FLOAT64S").Float64s(",") + for i, v := range []float64{1.1, 2.2, 3.3} { + So(vals1[i], ShouldEqual, v) + } + + vals2 := sec.Key("INTS").Ints(",") + for i, v := range []int{1, 2, 3} { + So(vals2[i], ShouldEqual, v) + } + + vals3 := sec.Key("INTS").Int64s(",") + for i, v := range []int64{1, 2, 3} { + So(vals3[i], ShouldEqual, v) + } + + vals4 := sec.Key("UINTS").Uints(",") + for i, v := range []uint{1, 2, 3} { + So(vals4[i], ShouldEqual, v) + } + + vals5 := sec.Key("UINTS").Uint64s(",") + for i, v := range []uint64{1, 2, 3} { + So(vals5[i], ShouldEqual, v) + } + + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + vals6 := sec.Key("TIMES").Times(",") + for i, v := range []time.Time{t, t, t} { + So(vals6[i].String(), ShouldEqual, v.String()) + } + }) + + Convey("Get key hash", func() { + cfg.Section("").KeysHash() + }) + + Convey("Set key value", func() { + k := cfg.Section("author").Key("NAME") + k.SetValue("无闻") + So(k.String(), ShouldEqual, "无闻") + }) + + Convey("Get key strings", func() { + So(strings.Join(cfg.Section("types").KeyStrings(), ","), ShouldEqual, "STRING,BOOL,BOOL_FALSE,FLOAT64,INT,TIME,DURATION,UINT") + }) + + Convey("Delete a key", func() { + cfg.Section("package.sub").DeleteKey("UNUSED_KEY") + _, err := cfg.Section("package.sub").GetKey("UNUSED_KEY") + So(err, ShouldNotBeNil) + }) + + Convey("Get section strings", func() { + So(strings.Join(cfg.SectionStrings(), ","), ShouldEqual, "DEFAULT,author,package,package.sub,features,types,array,note,advance") + }) + + Convey("Delete a section", func() { + cfg.DeleteSection("") + So(cfg.SectionStrings()[0], ShouldNotEqual, DEFAULT_SECTION) + }) + + Convey("Create new sections", func() { + cfg.NewSections("test", "test2") + _, err := cfg.GetSection("test") + So(err, ShouldBeNil) + _, err = cfg.GetSection("test2") + So(err, ShouldBeNil) + }) + }) + + Convey("Test getting and setting bad values", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + Convey("Create new key with empty name", func() { + k, err := cfg.Section("").NewKey("", "") + So(err, ShouldNotBeNil) + So(k, ShouldBeNil) + }) + + Convey("Create new section with empty name", func() { + s, err := cfg.NewSection("") + So(err, ShouldNotBeNil) + So(s, ShouldBeNil) + }) + + Convey("Create new sections with empty name", func() { + So(cfg.NewSections(""), ShouldNotBeNil) + }) + + Convey("Get section that not exists", func() { + s, err := cfg.GetSection("404") + So(err, ShouldNotBeNil) + So(s, ShouldBeNil) + + s = cfg.Section("404") + So(s, ShouldNotBeNil) + }) + }) +} + +func Test_File_Append(t *testing.T) { + Convey("Append data sources", t, func() { + cfg, err := Load([]byte("")) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + So(cfg.Append([]byte(""), []byte("")), ShouldBeNil) + + Convey("Append bad data sources", func() { + So(cfg.Append(1), ShouldNotBeNil) + So(cfg.Append([]byte(""), 1), ShouldNotBeNil) + }) + }) +} + +func Test_File_SaveTo(t *testing.T) { + Convey("Save file", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + cfg.Section("").Key("NAME").Comment = "Package name" + cfg.Section("author").Comment = `Information about package author +# Bio can be written in multiple lines.` + cfg.Section("advanced").Key("val w/ pound").SetValue("my#password") + So(cfg.SaveTo("testdata/conf_out.ini"), ShouldBeNil) + + cfg.Section("author").Key("NAME").Comment = "This is author name" + So(cfg.SaveToIndent("testdata/conf_out.ini", "\t"), ShouldBeNil) + }) +} + +func Benchmark_Key_Value(b *testing.B) { + c, _ := Load([]byte(_CONF_DATA)) + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").Value() + } +} + +func Benchmark_Key_String(b *testing.B) { + c, _ := Load([]byte(_CONF_DATA)) + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").String() + } +} + +func Benchmark_Key_Value_NonBlock(b *testing.B) { + c, _ := Load([]byte(_CONF_DATA)) + c.BlockMode = false + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").Value() + } +} + +func Benchmark_Key_String_NonBlock(b *testing.B) { + c, _ := Load([]byte(_CONF_DATA)) + c.BlockMode = false + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").String() + } +} + +func Benchmark_Key_SetValue(b *testing.B) { + c, _ := Load([]byte(_CONF_DATA)) + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").SetValue("10") + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/LICENSE aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/LICENSE --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/LICENSE 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README.md aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README.md --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README.md 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README.md 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,560 @@ +ini [![Build Status](https://drone.io/github.com/go-ini/ini/status.png)](https://drone.io/github.com/go-ini/ini/latest) [![](http://gocover.io/_badge/github.com/go-ini/ini)](http://gocover.io/github.com/go-ini/ini) +=== + +![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) + +Package ini provides INI file read and write functionality in Go. + +[简体中文](README_ZH.md) + +## Feature + +- Load multiple data sources(`[]byte` or file) with overwrites. +- Read with recursion values. +- Read with parent-child sections. +- Read with auto-increment key names. +- Read with multiple-line values. +- Read with tons of helper methods. +- Read and convert values to Go types. +- Read and **WRITE** comments of sections and keys. +- Manipulate sections, keys and comments with ease. +- Keep sections and keys in order as you parse and save. + +## Installation + + go get gopkg.in/ini.v1 + +## Getting Started + +### Loading from data sources + +A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many as** data sources you want. Passing other types will simply return an error. + +```go +cfg, err := ini.Load([]byte("raw data"), "filename") +``` + +Or start with an empty object: + +```go +cfg := ini.Empty() +``` + +When you cannot decide how many data sources to load at the beginning, you still able to **Append()** them later. + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +### Working with sections + +To get a section, you would need to: + +```go +section, err := cfg.GetSection("section name") +``` + +For a shortcut for default section, just give an empty string as name: + +```go +section, err := cfg.GetSection("") +``` + +When you're pretty sure the section exists, following code could make your life easier: + +```go +section := cfg.Section("") +``` + +What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you. + +To create a new section: + +```go +err := cfg.NewSection("new section") +``` + +To get a list of sections or section names: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### Working with keys + +To get a key under a section: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +Same rule applies to key operations: + +```go +key := cfg.Section("").Key("key name") +``` + +To create a new key: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +To get a list of keys or key names: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +To get a clone hash of keys and corresponding values: + +```go +hash := cfg.GetSection("").KeysHash() +``` + +### Working with values + +To get a string value: + +```go +val := cfg.Section("").Key("key name").String() +``` + +To validate key value on the fly: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +To get value with types: + +```go +// For boolean values: +// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On +// false when value is: 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// Methods start with Must also accept one argument for default value +// when key not found or fail to parse value to given type. +// Except method MustString, which you have to pass a default value. + +v = cfg.Section("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +What if my value is three-line long? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +Not a problem! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +That's cool, how about continuation lines? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +Piece of cake! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +Note that single quotes around values will be stripped: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +That's all? Hmm, no. + +#### Helper methods of working with values + +To get value with given candidates: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates. + +To validate value in a given range: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +To auto-split value into slice: + +```go +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +### Save your configuration + +Finally, it's time to save your configuration to somewhere. + +A typical way to save configuration is writing it to a file: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +Another way to save is writing to a `io.Writer` interface: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +## Advanced Usage + +### Recursive Values + +For all value of keys, there is a special syntax `%()s`, where `` is the key name in same section or default section, and `%()s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions. + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +### Parent-child Sections + +You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section. + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +### Auto-increment Key Names + +If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### Map To Struct + +Want more objective way to play with INI? Cool. + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // Things can be simpler. + err = ini.MapTo(p, "path/to/ini") + // ... + + // Just map a section? Fine. + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +Can I have default value for field? Absolutely. + +Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type. + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +It's really cool, but what's the point if you can't give me my file back from struct? + +### Reflect From Struct + +Why not? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string + None []int +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +So, what do I get? + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +Places = HangZhou,Boston +None = +``` + +#### Name Mapper + +To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name. + +There are 2 built-in name mappers: + +- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key. +- `TitleUnderscore`: it converts to format `title_underscore` then match section or key. + +To use them: + +```go +type Info struct { + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +Same rules of name mapper apply to `ini.ReflectFromWithMapper` function. + +#### Other Notes On Map/Reflect + +Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome. + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## Getting Help + +- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) +- [File An Issue](https://github.com/go-ini/ini/issues/new) + +## FAQs + +### What does `BlockMode` field do? + +By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster. + +### Why another INI library? + +Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster. + +To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path) + +## License + +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README_ZH.md aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README_ZH.md --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README_ZH.md 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README_ZH.md 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,547 @@ +本包提供了 Go 语言中读写 INI 文件的功能。 + +## 功能特性 + +- 支持覆盖加载多个数据源(`[]byte` 或文件) +- 支持递归读取键值 +- 支持读取父子分区 +- 支持读取自增键名 +- 支持读取多行的键值 +- 支持大量辅助方法 +- 支持在读取时直接转换为 Go 语言类型 +- 支持读取和 **写入** 分区和键的注释 +- 轻松操作分区、键值和注释 +- 在保存文件时分区和键值会保持原有的顺序 + +## 下载安装 + + go get gopkg.in/ini.v1 + +## 开始使用 + +### 从数据源加载 + +一个 **数据源** 可以是 `[]byte` 类型的原始数据,或 `string` 类型的文件路径。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。 + +```go +cfg, err := ini.Load([]byte("raw data"), "filename") +``` + +或者从一个空白的文件开始: + +```go +cfg := ini.Empty() +``` + +当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。 + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +### 操作分区(Section) + +获取指定分区: + +```go +section, err := cfg.GetSection("section name") +``` + +如果您想要获取默认分区,则可以用空字符串代替分区名: + +```go +section, err := cfg.GetSection("") +``` + +当您非常确定某个分区是存在的,可以使用以下简便方法: + +```go +section := cfg.Section("") +``` + +如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。 + +创建一个分区: + +```go +err := cfg.NewSection("new section") +``` + +获取所有分区对象或名称: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### 操作键(Key) + +获取某个分区下的键: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +和分区一样,您也可以直接获取键而忽略错误处理: + +```go +key := cfg.Section("").Key("key name") +``` + +创建一个新的键: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +获取分区下的所有键或键名: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +获取分区下的所有键值对的克隆: + +```go +hash := cfg.GetSection("").KeysHash() +``` + +### 操作键值(Value) + +获取一个类型为字符串(string)的值: + +```go +val := cfg.Section("").Key("key name").String() +``` + +获取值的同时通过自定义函数进行处理验证: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +获取其它类型的值: + +```go +// 布尔值的规则: +// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On +// false 当值为:0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值, +// 当键不存在或者转换失败时,则会直接返回该默认值。 +// 但是,MustString 方法必须传递一个默认值。 + +v = cfg.Seciont("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +如果我的值有好多行怎么办? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +嗯哼?小 case! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +简直是小菜一碟! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +需要注意的是,值两侧的单引号会被自动剔除: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +这就是全部了?哈哈,当然不是。 + +#### 操作键值的辅助方法 + +获取键值时设定候选值: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。 + +验证获取的值是否在指定范围内: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +自动分割键值为切片(slice): + +```go +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +### 保存配置 + +终于到了这个时刻,是时候保存一下配置了。 + +比较原始的做法是输出配置到某个文件: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +### 高级用法 + +#### 递归读取键值 + +在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以是相同分区或者默认分区下的键名。字符串 `%()s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。 + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +#### 读取父子分区 + +您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。 + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +#### 读取自增键名 + +如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。 + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### 映射到结构 + +想要使用更加面向对象的方式玩转 INI 吗?好主意。 + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // 一切竟可以如此的简单。 + err = ini.MapTo(p, "path/to/ini") + // ... + + // 嗯哼?只需要映射一个分区吗? + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。 + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用? + +### 从结构反射 + +可是,我有说不能吗? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string + None []int +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +瞧瞧,奇迹发生了。 + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +Places = HangZhou,Boston +None = +``` + +#### 名称映射器(Name Mapper) + +为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。 + +目前有 2 款内置的映射器: + +- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。 +- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。 + +使用方法: + +```go +type Info struct{ + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。 + +#### 映射/反射的其它说明 + +任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚! + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## 获取帮助 + +- [API 文档](https://gowalker.org/gopkg.in/ini.v1) +- [创建工单](https://github.com/go-ini/ini/issues/new) + +## 常见问题 + +### 字段 `BlockMode` 是什么? + +默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。 + +### 为什么要写另一个 INI 解析库? + +许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。 + +为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/struct.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/struct.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/struct.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/struct.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,350 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "time" + "unicode" +) + +// NameMapper represents a ini tag name mapper. +type NameMapper func(string) string + +// Built-in name getters. +var ( + // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. + AllCapsUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + } + newstr = append(newstr, unicode.ToUpper(chr)) + } + return string(newstr) + } + // TitleUnderscore converts to format title_underscore. + TitleUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + chr -= ('A' - 'a') + } + newstr = append(newstr, chr) + } + return string(newstr) + } +) + +func (s *Section) parseFieldName(raw, actual string) string { + if len(actual) > 0 { + return actual + } + if s.f.NameMapper != nil { + return s.f.NameMapper(raw) + } + return raw +} + +func parseDelim(actual string) string { + if len(actual) > 0 { + return actual + } + return "," +} + +var reflectTime = reflect.TypeOf(time.Now()).Kind() + +// setWithProperType sets proper value to field based on its type, +// but it does not return error for failing parsing, +// because we want to use default value that is already assigned to strcut. +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + if len(key.String()) == 0 { + return nil + } + field.SetString(key.String()) + case reflect.Bool: + boolVal, err := key.Bool() + if err != nil { + return nil + } + field.SetBool(boolVal) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + durationVal, err := key.Duration() + if err == nil { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + intVal, err := key.Int64() + if err != nil { + return nil + } + field.SetInt(intVal) + // byte is an alias for uint8, so supporting uint8 breaks support for byte + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + durationVal, err := key.Duration() + if err == nil { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + uintVal, err := key.Uint64() + if err != nil { + return nil + } + field.SetUint(uintVal) + + case reflect.Float64: + floatVal, err := key.Float64() + if err != nil { + return nil + } + field.SetFloat(floatVal) + case reflectTime: + timeVal, err := key.Time() + if err != nil { + return nil + } + field.Set(reflect.ValueOf(timeVal)) + case reflect.Slice: + vals := key.Strings(delim) + numVals := len(vals) + if numVals == 0 { + return nil + } + + sliceOf := field.Type().Elem().Kind() + + var times []time.Time + if sliceOf == reflectTime { + times = key.Times(delim) + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(times[i])) + default: + slice.Index(i).Set(reflect.ValueOf(vals[i])) + } + } + field.Set(slice) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func (s *Section) mapTo(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + fieldName := s.parseFieldName(tpField.Name, tag) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + isStruct := tpField.Type.Kind() == reflect.Struct + if isAnonymous { + field.Set(reflect.New(tpField.Type.Elem())) + } + + if isAnonymous || isStruct { + if sec, err := s.f.GetSection(fieldName); err == nil { + if err = sec.mapTo(field); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + continue + } + } + + if key, err := s.GetKey(fieldName); err == nil { + if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + } + } + return nil +} + +// MapTo maps section to given struct. +func (s *Section) MapTo(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot map to non-pointer struct") + } + + return s.mapTo(val) +} + +// MapTo maps file to given struct. +func (f *File) MapTo(v interface{}) error { + return f.Section("").MapTo(v) +} + +// MapTo maps data sources to given struct with name mapper. +func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.MapTo(v) +} + +// MapTo maps data sources to given struct. +func MapTo(v, source interface{}, others ...interface{}) error { + return MapToWithMapper(v, nil, source, others...) +} + +// reflectWithProperType does the opposite thing with setWithProperType. +func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + key.SetValue(field.String()) + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float64, + reflectTime: + key.SetValue(fmt.Sprint(field)) + case reflect.Slice: + vals := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + + var buf bytes.Buffer + isTime := fmt.Sprint(field.Type()) == "[]time.Time" + for i := 0; i < field.Len(); i++ { + if isTime { + buf.WriteString(vals.Index(i).Interface().(time.Time).Format(time.RFC3339)) + } else { + buf.WriteString(fmt.Sprint(vals.Index(i))) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-1]) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func (s *Section) reflectFrom(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + fieldName := s.parseFieldName(tpField.Name, tag) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || + (tpField.Type.Kind() == reflect.Struct) { + // Note: The only error here is section doesn't exist. + sec, err := s.f.GetSection(fieldName) + if err != nil { + // Note: fieldName can never be empty here, ignore error. + sec, _ = s.f.NewSection(fieldName) + } + if err = sec.reflectFrom(field); err != nil { + return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) + } + continue + } + + // Note: Same reason as secion. + key, err := s.GetKey(fieldName) + if err != nil { + key, _ = s.NewKey(fieldName, "") + } + if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) + } + + } + return nil +} + +// ReflectFrom reflects secion from given struct. +func (s *Section) ReflectFrom(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot reflect from non-pointer struct") + } + + return s.reflectFrom(val) +} + +// ReflectFrom reflects file from given struct. +func (f *File) ReflectFrom(v interface{}) error { + return f.Section("").ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct with name mapper. +func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { + cfg.NameMapper = mapper + return cfg.ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct. +func ReflectFrom(cfg *File, v interface{}) error { + return ReflectFromWithMapper(cfg, v, nil) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/struct_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/struct_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/struct_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/struct_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,239 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "strings" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +type testNested struct { + Cities []string `delim:"|"` + Visits []time.Time + Note string + Unused int `ini:"-"` +} + +type testEmbeded struct { + GPA float64 +} + +type testStruct struct { + Name string `ini:"NAME"` + Age int + Male bool + Money float64 + Born time.Time + Time time.Duration `ini:"Duration"` + Others testNested + *testEmbeded `ini:"grade"` + Unused int `ini:"-"` + Unsigned uint +} + +const _CONF_DATA_STRUCT = ` +NAME = Unknwon +Age = 21 +Male = true +Money = 1.25 +Born = 1993-10-07T20:17:05Z +Duration = 2h45m +Unsigned = 3 + +[Others] +Cities = HangZhou|Boston +Visits = 1993-10-07T20:17:05Z, 1993-10-07T20:17:05Z +Note = Hello world! + +[grade] +GPA = 2.8 + +[foo.bar] +Here = there +When = then +` + +type unsupport struct { + Byte byte +} + +type unsupport2 struct { + Others struct { + Cities byte + } +} + +type unsupport3 struct { + Cities byte +} + +type unsupport4 struct { + *unsupport3 `ini:"Others"` +} + +type defaultValue struct { + Name string + Age int + Male bool + Money float64 + Born time.Time + Cities []string +} + +type fooBar struct { + Here, When string +} + +const _INVALID_DATA_CONF_STRUCT = ` +Name = +Age = age +Male = 123 +Money = money +Born = nil +Cities = +` + +func Test_Struct(t *testing.T) { + Convey("Map to struct", t, func() { + Convey("Map file to struct", func() { + ts := new(testStruct) + So(MapTo(ts, []byte(_CONF_DATA_STRUCT)), ShouldBeNil) + + So(ts.Name, ShouldEqual, "Unknwon") + So(ts.Age, ShouldEqual, 21) + So(ts.Male, ShouldBeTrue) + So(ts.Money, ShouldEqual, 1.25) + So(ts.Unsigned, ShouldEqual, 3) + + t, err := time.Parse(time.RFC3339, "1993-10-07T20:17:05Z") + So(err, ShouldBeNil) + So(ts.Born.String(), ShouldEqual, t.String()) + + dur, err := time.ParseDuration("2h45m") + So(err, ShouldBeNil) + So(ts.Time.Seconds(), ShouldEqual, dur.Seconds()) + + So(strings.Join(ts.Others.Cities, ","), ShouldEqual, "HangZhou,Boston") + So(ts.Others.Visits[0].String(), ShouldEqual, t.String()) + So(ts.Others.Note, ShouldEqual, "Hello world!") + So(ts.testEmbeded.GPA, ShouldEqual, 2.8) + }) + + Convey("Map section to struct", func() { + foobar := new(fooBar) + f, err := Load([]byte(_CONF_DATA_STRUCT)) + So(err, ShouldBeNil) + + So(f.Section("foo.bar").MapTo(foobar), ShouldBeNil) + So(foobar.Here, ShouldEqual, "there") + So(foobar.When, ShouldEqual, "then") + }) + + Convey("Map to non-pointer struct", func() { + cfg, err := Load([]byte(_CONF_DATA_STRUCT)) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + So(cfg.MapTo(testStruct{}), ShouldNotBeNil) + }) + + Convey("Map to unsupported type", func() { + cfg, err := Load([]byte(_CONF_DATA_STRUCT)) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + cfg.NameMapper = func(raw string) string { + if raw == "Byte" { + return "NAME" + } + return raw + } + So(cfg.MapTo(&unsupport{}), ShouldNotBeNil) + So(cfg.MapTo(&unsupport2{}), ShouldNotBeNil) + So(cfg.MapTo(&unsupport4{}), ShouldNotBeNil) + }) + + Convey("Map from invalid data source", func() { + So(MapTo(&testStruct{}, "hi"), ShouldNotBeNil) + }) + + Convey("Map to wrong types and gain default values", func() { + cfg, err := Load([]byte(_INVALID_DATA_CONF_STRUCT)) + So(err, ShouldBeNil) + + t, err := time.Parse(time.RFC3339, "1993-10-07T20:17:05Z") + So(err, ShouldBeNil) + dv := &defaultValue{"Joe", 10, true, 1.25, t, []string{"HangZhou", "Boston"}} + So(cfg.MapTo(dv), ShouldBeNil) + So(dv.Name, ShouldEqual, "Joe") + So(dv.Age, ShouldEqual, 10) + So(dv.Male, ShouldBeTrue) + So(dv.Money, ShouldEqual, 1.25) + So(dv.Born.String(), ShouldEqual, t.String()) + So(strings.Join(dv.Cities, ","), ShouldEqual, "HangZhou,Boston") + }) + }) + + Convey("Reflect from struct", t, func() { + type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string + None []int + } + type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded `ini:"infos"` + } + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := Empty() + So(ReflectFrom(cfg, a), ShouldBeNil) + cfg.SaveTo("testdata/conf_reflect.ini") + + Convey("Reflect from non-point struct", func() { + So(ReflectFrom(cfg, Author{}), ShouldNotBeNil) + }) + }) +} + +type testMapper struct { + PackageName string +} + +func Test_NameGetter(t *testing.T) { + Convey("Test name mappers", t, func() { + So(MapToWithMapper(&testMapper{}, TitleUnderscore, []byte("packag_name=ini")), ShouldBeNil) + + cfg, err := Load([]byte("PACKAGE_NAME=ini")) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + cfg.NameMapper = AllCapsUnderscore + tg := new(testMapper) + So(cfg.MapTo(tg), ShouldBeNil) + So(tg.PackageName, ShouldEqual, "ini") + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/testdata/conf.ini aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/testdata/conf.ini --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/testdata/conf.ini 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/testdata/conf.ini 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2 @@ +[author] +E-MAIL = u@gogs.io \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,49 @@ +package jmespath + +import "strconv" + +// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is +// safe for concurrent use by multiple goroutines. +type JMESPath struct { + ast ASTNode + intr *treeInterpreter +} + +// Compile parses a JMESPath expression and returns, if successful, a JMESPath +// object that can be used to match against data. +func Compile(expression string) (*JMESPath, error) { + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + jmespath := &JMESPath{ast: ast, intr: newInterpreter()} + return jmespath, nil +} + +// MustCompile is like Compile but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled +// JMESPaths. +func MustCompile(expression string) *JMESPath { + jmespath, err := Compile(expression) + if err != nil { + panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) + } + return jmespath +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func (jp *JMESPath) Search(data interface{}) (interface{}, error) { + return jp.intr.Execute(jp.ast, data) +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func Search(expression string, data interface{}) (interface{}, error) { + intr := newInterpreter() + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + return intr.Execute(ast, data) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/api_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/api_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/api_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/api_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,32 @@ +package jmespath + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestValidPrecompiledExpressionSearches(t *testing.T) { + assert := assert.New(t) + data := make(map[string]interface{}) + data["foo"] = "bar" + precompiled, err := Compile("foo") + assert.Nil(err) + result, err := precompiled.Search(data) + assert.Nil(err) + assert.Equal("bar", result) +} + +func TestInvalidPrecompileErrors(t *testing.T) { + assert := assert.New(t) + _, err := Compile("not a valid expression") + assert.NotNil(err) +} + +func TestInvalidMustCompilePanics(t *testing.T) { + defer func() { + r := recover() + assert.NotNil(t, r) + }() + MustCompile("not a valid expression") +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +// generated by stringer -type astNodeType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" + +var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} + +func (i astNodeType) String() string { + if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { + return fmt.Sprintf("astNodeType(%d)", i) + } + return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/basic.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/basic.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/basic.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/basic.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,96 @@ +[{ + "given": + {"foo": {"bar": {"baz": "correct"}}}, + "cases": [ + { + "expression": "foo", + "result": {"bar": {"baz": "correct"}} + }, + { + "expression": "foo.bar", + "result": {"baz": "correct"} + }, + { + "expression": "foo.bar.baz", + "result": "correct" + }, + { + "expression": "foo\n.\nbar\n.baz", + "result": "correct" + }, + { + "expression": "foo.bar.baz.bad", + "result": null + }, + { + "expression": "foo.bar.bad", + "result": null + }, + { + "expression": "foo.bad", + "result": null + }, + { + "expression": "bad", + "result": null + }, + { + "expression": "bad.morebad.morebad", + "result": null + } + ] +}, +{ + "given": + {"foo": {"bar": ["one", "two", "three"]}}, + "cases": [ + { + "expression": "foo", + "result": {"bar": ["one", "two", "three"]} + }, + { + "expression": "foo.bar", + "result": ["one", "two", "three"] + } + ] +}, +{ + "given": ["one", "two", "three"], + "cases": [ + { + "expression": "one", + "result": null + }, + { + "expression": "two", + "result": null + }, + { + "expression": "three", + "result": null + }, + { + "expression": "one.two", + "result": null + } + ] +}, +{ + "given": + {"foo": {"1": ["one", "two", "three"], "-1": "bar"}}, + "cases": [ + { + "expression": "foo.\"1\"", + "result": ["one", "two", "three"] + }, + { + "expression": "foo.\"1\"[0]", + "result": "one" + }, + { + "expression": "foo.\"-1\"", + "result": "bar" + } + ] +} +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/boolean.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/boolean.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/boolean.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/boolean.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,257 @@ +[ + { + "given": { + "outer": { + "foo": "foo", + "bar": "bar", + "baz": "baz" + } + }, + "cases": [ + { + "expression": "outer.foo || outer.bar", + "result": "foo" + }, + { + "expression": "outer.foo||outer.bar", + "result": "foo" + }, + { + "expression": "outer.bar || outer.baz", + "result": "bar" + }, + { + "expression": "outer.bar||outer.baz", + "result": "bar" + }, + { + "expression": "outer.bad || outer.foo", + "result": "foo" + }, + { + "expression": "outer.bad||outer.foo", + "result": "foo" + }, + { + "expression": "outer.foo || outer.bad", + "result": "foo" + }, + { + "expression": "outer.foo||outer.bad", + "result": "foo" + }, + { + "expression": "outer.bad || outer.alsobad", + "result": null + }, + { + "expression": "outer.bad||outer.alsobad", + "result": null + } + ] + }, + { + "given": { + "outer": { + "foo": "foo", + "bool": false, + "empty_list": [], + "empty_string": "" + } + }, + "cases": [ + { + "expression": "outer.empty_string || outer.foo", + "result": "foo" + }, + { + "expression": "outer.nokey || outer.bool || outer.empty_list || outer.empty_string || outer.foo", + "result": "foo" + } + ] + }, + { + "given": { + "True": true, + "False": false, + "Number": 5, + "EmptyList": [], + "Zero": 0 + }, + "cases": [ + { + "expression": "True && False", + "result": false + }, + { + "expression": "False && True", + "result": false + }, + { + "expression": "True && True", + "result": true + }, + { + "expression": "False && False", + "result": false + }, + { + "expression": "True && Number", + "result": 5 + }, + { + "expression": "Number && True", + "result": true + }, + { + "expression": "Number && False", + "result": false + }, + { + "expression": "Number && EmptyList", + "result": [] + }, + { + "expression": "Number && True", + "result": true + }, + { + "expression": "EmptyList && True", + "result": [] + }, + { + "expression": "EmptyList && False", + "result": [] + }, + { + "expression": "True || False", + "result": true + }, + { + "expression": "True || True", + "result": true + }, + { + "expression": "False || True", + "result": true + }, + { + "expression": "False || False", + "result": false + }, + { + "expression": "Number || EmptyList", + "result": 5 + }, + { + "expression": "Number || True", + "result": 5 + }, + { + "expression": "Number || True && False", + "result": 5 + }, + { + "expression": "(Number || True) && False", + "result": false + }, + { + "expression": "Number || (True && False)", + "result": 5 + }, + { + "expression": "!True", + "result": false + }, + { + "expression": "!False", + "result": true + }, + { + "expression": "!Number", + "result": false + }, + { + "expression": "!EmptyList", + "result": true + }, + { + "expression": "True && !False", + "result": true + }, + { + "expression": "True && !EmptyList", + "result": true + }, + { + "expression": "!False && !EmptyList", + "result": true + }, + { + "expression": "!(True && False)", + "result": true + }, + { + "expression": "!Zero", + "result": false + }, + { + "expression": "!!Zero", + "result": true + } + ] + }, + { + "given": { + "one": 1, + "two": 2, + "three": 3 + }, + "cases": [ + { + "expression": "one < two", + "result": true + }, + { + "expression": "one <= two", + "result": true + }, + { + "expression": "one == one", + "result": true + }, + { + "expression": "one == two", + "result": false + }, + { + "expression": "one > two", + "result": false + }, + { + "expression": "one >= two", + "result": false + }, + { + "expression": "one != two", + "result": true + }, + { + "expression": "one < two && three > one", + "result": true + }, + { + "expression": "one < two || three > one", + "result": true + }, + { + "expression": "one < two || three < one", + "result": true + }, + { + "expression": "two < one || three < one", + "result": false + } + ] + } +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/current.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/current.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/current.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/current.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,25 @@ +[ + { + "given": { + "foo": [{"name": "a"}, {"name": "b"}], + "bar": {"baz": "qux"} + }, + "cases": [ + { + "expression": "@", + "result": { + "foo": [{"name": "a"}, {"name": "b"}], + "bar": {"baz": "qux"} + } + }, + { + "expression": "@.bar", + "result": {"baz": "qux"} + }, + { + "expression": "@.foo[0]", + "result": {"name": "a"} + } + ] + } +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/escape.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/escape.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/escape.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/escape.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,46 @@ +[{ + "given": { + "foo.bar": "dot", + "foo bar": "space", + "foo\nbar": "newline", + "foo\"bar": "doublequote", + "c:\\\\windows\\path": "windows", + "/unix/path": "unix", + "\"\"\"": "threequotes", + "bar": {"baz": "qux"} + }, + "cases": [ + { + "expression": "\"foo.bar\"", + "result": "dot" + }, + { + "expression": "\"foo bar\"", + "result": "space" + }, + { + "expression": "\"foo\\nbar\"", + "result": "newline" + }, + { + "expression": "\"foo\\\"bar\"", + "result": "doublequote" + }, + { + "expression": "\"c:\\\\\\\\windows\\\\path\"", + "result": "windows" + }, + { + "expression": "\"/unix/path\"", + "result": "unix" + }, + { + "expression": "\"\\\"\\\"\\\"\"", + "result": "threequotes" + }, + { + "expression": "\"bar\".\"baz\"", + "result": "qux" + } + ] +}] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/filters.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/filters.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/filters.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/filters.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,468 @@ +[ + { + "given": {"foo": [{"name": "a"}, {"name": "b"}]}, + "cases": [ + { + "comment": "Matching a literal", + "expression": "foo[?name == 'a']", + "result": [{"name": "a"}] + } + ] + }, + { + "given": {"foo": [0, 1], "bar": [2, 3]}, + "cases": [ + { + "comment": "Matching a literal", + "expression": "*[?[0] == `0`]", + "result": [[], []] + } + ] + }, + { + "given": {"foo": [{"first": "foo", "last": "bar"}, + {"first": "foo", "last": "foo"}, + {"first": "foo", "last": "baz"}]}, + "cases": [ + { + "comment": "Matching an expression", + "expression": "foo[?first == last]", + "result": [{"first": "foo", "last": "foo"}] + }, + { + "comment": "Verify projection created from filter", + "expression": "foo[?first == last].first", + "result": ["foo"] + } + ] + }, + { + "given": {"foo": [{"age": 20}, + {"age": 25}, + {"age": 30}]}, + "cases": [ + { + "comment": "Greater than with a number", + "expression": "foo[?age > `25`]", + "result": [{"age": 30}] + }, + { + "expression": "foo[?age >= `25`]", + "result": [{"age": 25}, {"age": 30}] + }, + { + "comment": "Greater than with a number", + "expression": "foo[?age > `30`]", + "result": [] + }, + { + "comment": "Greater than with a number", + "expression": "foo[?age < `25`]", + "result": [{"age": 20}] + }, + { + "comment": "Greater than with a number", + "expression": "foo[?age <= `25`]", + "result": [{"age": 20}, {"age": 25}] + }, + { + "comment": "Greater than with a number", + "expression": "foo[?age < `20`]", + "result": [] + }, + { + "expression": "foo[?age == `20`]", + "result": [{"age": 20}] + }, + { + "expression": "foo[?age != `20`]", + "result": [{"age": 25}, {"age": 30}] + } + ] + }, + { + "given": {"foo": [{"top": {"name": "a"}}, + {"top": {"name": "b"}}]}, + "cases": [ + { + "comment": "Filter with subexpression", + "expression": "foo[?top.name == 'a']", + "result": [{"top": {"name": "a"}}] + } + ] + }, + { + "given": {"foo": [{"top": {"first": "foo", "last": "bar"}}, + {"top": {"first": "foo", "last": "foo"}}, + {"top": {"first": "foo", "last": "baz"}}]}, + "cases": [ + { + "comment": "Matching an expression", + "expression": "foo[?top.first == top.last]", + "result": [{"top": {"first": "foo", "last": "foo"}}] + }, + { + "comment": "Matching a JSON array", + "expression": "foo[?top == `{\"first\": \"foo\", \"last\": \"bar\"}`]", + "result": [{"top": {"first": "foo", "last": "bar"}}] + } + ] + }, + { + "given": {"foo": [ + {"key": true}, + {"key": false}, + {"key": 0}, + {"key": 1}, + {"key": [0]}, + {"key": {"bar": [0]}}, + {"key": null}, + {"key": [1]}, + {"key": {"a":2}} + ]}, + "cases": [ + { + "expression": "foo[?key == `true`]", + "result": [{"key": true}] + }, + { + "expression": "foo[?key == `false`]", + "result": [{"key": false}] + }, + { + "expression": "foo[?key == `0`]", + "result": [{"key": 0}] + }, + { + "expression": "foo[?key == `1`]", + "result": [{"key": 1}] + }, + { + "expression": "foo[?key == `[0]`]", + "result": [{"key": [0]}] + }, + { + "expression": "foo[?key == `{\"bar\": [0]}`]", + "result": [{"key": {"bar": [0]}}] + }, + { + "expression": "foo[?key == `null`]", + "result": [{"key": null}] + }, + { + "expression": "foo[?key == `[1]`]", + "result": [{"key": [1]}] + }, + { + "expression": "foo[?key == `{\"a\":2}`]", + "result": [{"key": {"a":2}}] + }, + { + "expression": "foo[?`true` == key]", + "result": [{"key": true}] + }, + { + "expression": "foo[?`false` == key]", + "result": [{"key": false}] + }, + { + "expression": "foo[?`0` == key]", + "result": [{"key": 0}] + }, + { + "expression": "foo[?`1` == key]", + "result": [{"key": 1}] + }, + { + "expression": "foo[?`[0]` == key]", + "result": [{"key": [0]}] + }, + { + "expression": "foo[?`{\"bar\": [0]}` == key]", + "result": [{"key": {"bar": [0]}}] + }, + { + "expression": "foo[?`null` == key]", + "result": [{"key": null}] + }, + { + "expression": "foo[?`[1]` == key]", + "result": [{"key": [1]}] + }, + { + "expression": "foo[?`{\"a\":2}` == key]", + "result": [{"key": {"a":2}}] + }, + { + "expression": "foo[?key != `true`]", + "result": [{"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `false`]", + "result": [{"key": true}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `0`]", + "result": [{"key": true}, {"key": false}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `1`]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `null`]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `[1]`]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `{\"a\":2}`]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}] + }, + { + "expression": "foo[?`true` != key]", + "result": [{"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`false` != key]", + "result": [{"key": true}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`0` != key]", + "result": [{"key": true}, {"key": false}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`1` != key]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`null` != key]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`[1]` != key]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`{\"a\":2}` != key]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}] + } + ] + }, + { + "given": {"reservations": [ + {"instances": [ + {"foo": 1, "bar": 2}, {"foo": 1, "bar": 3}, + {"foo": 1, "bar": 2}, {"foo": 2, "bar": 1}]}]}, + "cases": [ + { + "expression": "reservations[].instances[?bar==`1`]", + "result": [[{"foo": 2, "bar": 1}]] + }, + { + "expression": "reservations[*].instances[?bar==`1`]", + "result": [[{"foo": 2, "bar": 1}]] + }, + { + "expression": "reservations[].instances[?bar==`1`][]", + "result": [{"foo": 2, "bar": 1}] + } + ] + }, + { + "given": { + "baz": "other", + "foo": [ + {"bar": 1}, {"bar": 2}, {"bar": 3}, {"bar": 4}, {"bar": 1, "baz": 2} + ] + }, + "cases": [ + { + "expression": "foo[?bar==`1`].bar[0]", + "result": [] + } + ] + }, + { + "given": { + "foo": [ + {"a": 1, "b": {"c": "x"}}, + {"a": 1, "b": {"c": "y"}}, + {"a": 1, "b": {"c": "z"}}, + {"a": 2, "b": {"c": "z"}}, + {"a": 1, "baz": 2} + ] + }, + "cases": [ + { + "expression": "foo[?a==`1`].b.c", + "result": ["x", "y", "z"] + } + ] + }, + { + "given": {"foo": [{"name": "a"}, {"name": "b"}, {"name": "c"}]}, + "cases": [ + { + "comment": "Filter with or expression", + "expression": "foo[?name == 'a' || name == 'b']", + "result": [{"name": "a"}, {"name": "b"}] + }, + { + "expression": "foo[?name == 'a' || name == 'e']", + "result": [{"name": "a"}] + }, + { + "expression": "foo[?name == 'a' || name == 'b' || name == 'c']", + "result": [{"name": "a"}, {"name": "b"}, {"name": "c"}] + } + ] + }, + { + "given": {"foo": [{"a": 1, "b": 2}, {"a": 1, "b": 3}]}, + "cases": [ + { + "comment": "Filter with and expression", + "expression": "foo[?a == `1` && b == `2`]", + "result": [{"a": 1, "b": 2}] + }, + { + "expression": "foo[?a == `1` && b == `4`]", + "result": [] + } + ] + }, + { + "given": {"foo": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}]}, + "cases": [ + { + "comment": "Filter with Or and And expressions", + "expression": "foo[?c == `3` || a == `1` && b == `4`]", + "result": [{"a": 1, "b": 2, "c": 3}] + }, + { + "expression": "foo[?b == `2` || a == `3` && b == `4`]", + "result": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}] + }, + { + "expression": "foo[?a == `3` && b == `4` || b == `2`]", + "result": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}] + }, + { + "expression": "foo[?(a == `3` && b == `4`) || b == `2`]", + "result": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}] + }, + { + "expression": "foo[?((a == `3` && b == `4`)) || b == `2`]", + "result": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}] + }, + { + "expression": "foo[?a == `3` && (b == `4` || b == `2`)]", + "result": [{"a": 3, "b": 4}] + }, + { + "expression": "foo[?a == `3` && ((b == `4` || b == `2`))]", + "result": [{"a": 3, "b": 4}] + } + ] + }, + { + "given": {"foo": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}]}, + "cases": [ + { + "comment": "Verify precedence of or/and expressions", + "expression": "foo[?a == `1` || b ==`2` && c == `5`]", + "result": [{"a": 1, "b": 2, "c": 3}] + }, + { + "comment": "Parentheses can alter precedence", + "expression": "foo[?(a == `1` || b ==`2`) && c == `5`]", + "result": [] + }, + { + "comment": "Not expressions combined with and/or", + "expression": "foo[?!(a == `1` || b ==`2`)]", + "result": [{"a": 3, "b": 4}] + } + ] + }, + { + "given": { + "foo": [ + {"key": true}, + {"key": false}, + {"key": []}, + {"key": {}}, + {"key": [0]}, + {"key": {"a": "b"}}, + {"key": 0}, + {"key": 1}, + {"key": null}, + {"notkey": true} + ] + }, + "cases": [ + { + "comment": "Unary filter expression", + "expression": "foo[?key]", + "result": [ + {"key": true}, {"key": [0]}, {"key": {"a": "b"}}, + {"key": 0}, {"key": 1} + ] + }, + { + "comment": "Unary not filter expression", + "expression": "foo[?!key]", + "result": [ + {"key": false}, {"key": []}, {"key": {}}, + {"key": null}, {"notkey": true} + ] + }, + { + "comment": "Equality with null RHS", + "expression": "foo[?key == `null`]", + "result": [ + {"key": null}, {"notkey": true} + ] + } + ] + }, + { + "given": { + "foo": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + "cases": [ + { + "comment": "Using @ in a filter expression", + "expression": "foo[?@ < `5`]", + "result": [0, 1, 2, 3, 4] + }, + { + "comment": "Using @ in a filter expression", + "expression": "foo[?`5` > @]", + "result": [0, 1, 2, 3, 4] + }, + { + "comment": "Using @ in a filter expression", + "expression": "foo[?@ == @]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + } + ] + } +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/functions.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/functions.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/functions.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/functions.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,825 @@ +[{ + "given": + { + "foo": -1, + "zero": 0, + "numbers": [-1, 3, 4, 5], + "array": [-1, 3, 4, 5, "a", "100"], + "strings": ["a", "b", "c"], + "decimals": [1.01, 1.2, -1.5], + "str": "Str", + "false": false, + "empty_list": [], + "empty_hash": {}, + "objects": {"foo": "bar", "bar": "baz"}, + "null_key": null + }, + "cases": [ + { + "expression": "abs(foo)", + "result": 1 + }, + { + "expression": "abs(foo)", + "result": 1 + }, + { + "expression": "abs(str)", + "error": "invalid-type" + }, + { + "expression": "abs(array[1])", + "result": 3 + }, + { + "expression": "abs(array[1])", + "result": 3 + }, + { + "expression": "abs(`false`)", + "error": "invalid-type" + }, + { + "expression": "abs(`-24`)", + "result": 24 + }, + { + "expression": "abs(`-24`)", + "result": 24 + }, + { + "expression": "abs(`1`, `2`)", + "error": "invalid-arity" + }, + { + "expression": "abs()", + "error": "invalid-arity" + }, + { + "expression": "unknown_function(`1`, `2`)", + "error": "unknown-function" + }, + { + "expression": "avg(numbers)", + "result": 2.75 + }, + { + "expression": "avg(array)", + "error": "invalid-type" + }, + { + "expression": "avg('abc')", + "error": "invalid-type" + }, + { + "expression": "avg(foo)", + "error": "invalid-type" + }, + { + "expression": "avg(@)", + "error": "invalid-type" + }, + { + "expression": "avg(strings)", + "error": "invalid-type" + }, + { + "expression": "ceil(`1.2`)", + "result": 2 + }, + { + "expression": "ceil(decimals[0])", + "result": 2 + }, + { + "expression": "ceil(decimals[1])", + "result": 2 + }, + { + "expression": "ceil(decimals[2])", + "result": -1 + }, + { + "expression": "ceil('string')", + "error": "invalid-type" + }, + { + "expression": "contains('abc', 'a')", + "result": true + }, + { + "expression": "contains('abc', 'd')", + "result": false + }, + { + "expression": "contains(`false`, 'd')", + "error": "invalid-type" + }, + { + "expression": "contains(strings, 'a')", + "result": true + }, + { + "expression": "contains(decimals, `1.2`)", + "result": true + }, + { + "expression": "contains(decimals, `false`)", + "result": false + }, + { + "expression": "ends_with(str, 'r')", + "result": true + }, + { + "expression": "ends_with(str, 'tr')", + "result": true + }, + { + "expression": "ends_with(str, 'Str')", + "result": true + }, + { + "expression": "ends_with(str, 'SStr')", + "result": false + }, + { + "expression": "ends_with(str, 'foo')", + "result": false + }, + { + "expression": "ends_with(str, `0`)", + "error": "invalid-type" + }, + { + "expression": "floor(`1.2`)", + "result": 1 + }, + { + "expression": "floor('string')", + "error": "invalid-type" + }, + { + "expression": "floor(decimals[0])", + "result": 1 + }, + { + "expression": "floor(foo)", + "result": -1 + }, + { + "expression": "floor(str)", + "error": "invalid-type" + }, + { + "expression": "length('abc')", + "result": 3 + }, + { + "expression": "length('✓foo')", + "result": 4 + }, + { + "expression": "length('')", + "result": 0 + }, + { + "expression": "length(@)", + "result": 12 + }, + { + "expression": "length(strings[0])", + "result": 1 + }, + { + "expression": "length(str)", + "result": 3 + }, + { + "expression": "length(array)", + "result": 6 + }, + { + "expression": "length(objects)", + "result": 2 + }, + { + "expression": "length(`false`)", + "error": "invalid-type" + }, + { + "expression": "length(foo)", + "error": "invalid-type" + }, + { + "expression": "length(strings[0])", + "result": 1 + }, + { + "expression": "max(numbers)", + "result": 5 + }, + { + "expression": "max(decimals)", + "result": 1.2 + }, + { + "expression": "max(strings)", + "result": "c" + }, + { + "expression": "max(abc)", + "error": "invalid-type" + }, + { + "expression": "max(array)", + "error": "invalid-type" + }, + { + "expression": "max(decimals)", + "result": 1.2 + }, + { + "expression": "max(empty_list)", + "result": null + }, + { + "expression": "merge(`{}`)", + "result": {} + }, + { + "expression": "merge(`{}`, `{}`)", + "result": {} + }, + { + "expression": "merge(`{\"a\": 1}`, `{\"b\": 2}`)", + "result": {"a": 1, "b": 2} + }, + { + "expression": "merge(`{\"a\": 1}`, `{\"a\": 2}`)", + "result": {"a": 2} + }, + { + "expression": "merge(`{\"a\": 1, \"b\": 2}`, `{\"a\": 2, \"c\": 3}`, `{\"d\": 4}`)", + "result": {"a": 2, "b": 2, "c": 3, "d": 4} + }, + { + "expression": "min(numbers)", + "result": -1 + }, + { + "expression": "min(decimals)", + "result": -1.5 + }, + { + "expression": "min(abc)", + "error": "invalid-type" + }, + { + "expression": "min(array)", + "error": "invalid-type" + }, + { + "expression": "min(empty_list)", + "result": null + }, + { + "expression": "min(decimals)", + "result": -1.5 + }, + { + "expression": "min(strings)", + "result": "a" + }, + { + "expression": "type('abc')", + "result": "string" + }, + { + "expression": "type(`1.0`)", + "result": "number" + }, + { + "expression": "type(`2`)", + "result": "number" + }, + { + "expression": "type(`true`)", + "result": "boolean" + }, + { + "expression": "type(`false`)", + "result": "boolean" + }, + { + "expression": "type(`null`)", + "result": "null" + }, + { + "expression": "type(`[0]`)", + "result": "array" + }, + { + "expression": "type(`{\"a\": \"b\"}`)", + "result": "object" + }, + { + "expression": "type(@)", + "result": "object" + }, + { + "expression": "sort(keys(objects))", + "result": ["bar", "foo"] + }, + { + "expression": "keys(foo)", + "error": "invalid-type" + }, + { + "expression": "keys(strings)", + "error": "invalid-type" + }, + { + "expression": "keys(`false`)", + "error": "invalid-type" + }, + { + "expression": "sort(values(objects))", + "result": ["bar", "baz"] + }, + { + "expression": "keys(empty_hash)", + "result": [] + }, + { + "expression": "values(foo)", + "error": "invalid-type" + }, + { + "expression": "join(', ', strings)", + "result": "a, b, c" + }, + { + "expression": "join(', ', strings)", + "result": "a, b, c" + }, + { + "expression": "join(',', `[\"a\", \"b\"]`)", + "result": "a,b" + }, + { + "expression": "join(',', `[\"a\", 0]`)", + "error": "invalid-type" + }, + { + "expression": "join(', ', str)", + "error": "invalid-type" + }, + { + "expression": "join('|', strings)", + "result": "a|b|c" + }, + { + "expression": "join(`2`, strings)", + "error": "invalid-type" + }, + { + "expression": "join('|', decimals)", + "error": "invalid-type" + }, + { + "expression": "join('|', decimals[].to_string(@))", + "result": "1.01|1.2|-1.5" + }, + { + "expression": "join('|', empty_list)", + "result": "" + }, + { + "expression": "reverse(numbers)", + "result": [5, 4, 3, -1] + }, + { + "expression": "reverse(array)", + "result": ["100", "a", 5, 4, 3, -1] + }, + { + "expression": "reverse(`[]`)", + "result": [] + }, + { + "expression": "reverse('')", + "result": "" + }, + { + "expression": "reverse('hello world')", + "result": "dlrow olleh" + }, + { + "expression": "starts_with(str, 'S')", + "result": true + }, + { + "expression": "starts_with(str, 'St')", + "result": true + }, + { + "expression": "starts_with(str, 'Str')", + "result": true + }, + { + "expression": "starts_with(str, 'String')", + "result": false + }, + { + "expression": "starts_with(str, `0`)", + "error": "invalid-type" + }, + { + "expression": "sum(numbers)", + "result": 11 + }, + { + "expression": "sum(decimals)", + "result": 0.71 + }, + { + "expression": "sum(array)", + "error": "invalid-type" + }, + { + "expression": "sum(array[].to_number(@))", + "result": 111 + }, + { + "expression": "sum(`[]`)", + "result": 0 + }, + { + "expression": "to_array('foo')", + "result": ["foo"] + }, + { + "expression": "to_array(`0`)", + "result": [0] + }, + { + "expression": "to_array(objects)", + "result": [{"foo": "bar", "bar": "baz"}] + }, + { + "expression": "to_array(`[1, 2, 3]`)", + "result": [1, 2, 3] + }, + { + "expression": "to_array(false)", + "result": [false] + }, + { + "expression": "to_string('foo')", + "result": "foo" + }, + { + "expression": "to_string(`1.2`)", + "result": "1.2" + }, + { + "expression": "to_string(`[0, 1]`)", + "result": "[0,1]" + }, + { + "expression": "to_number('1.0')", + "result": 1.0 + }, + { + "expression": "to_number('1.1')", + "result": 1.1 + }, + { + "expression": "to_number('4')", + "result": 4 + }, + { + "expression": "to_number('notanumber')", + "result": null + }, + { + "expression": "to_number(`false`)", + "result": null + }, + { + "expression": "to_number(`null`)", + "result": null + }, + { + "expression": "to_number(`[0]`)", + "result": null + }, + { + "expression": "to_number(`{\"foo\": 0}`)", + "result": null + }, + { + "expression": "\"to_string\"(`1.0`)", + "error": "syntax" + }, + { + "expression": "sort(numbers)", + "result": [-1, 3, 4, 5] + }, + { + "expression": "sort(strings)", + "result": ["a", "b", "c"] + }, + { + "expression": "sort(decimals)", + "result": [-1.5, 1.01, 1.2] + }, + { + "expression": "sort(array)", + "error": "invalid-type" + }, + { + "expression": "sort(abc)", + "error": "invalid-type" + }, + { + "expression": "sort(empty_list)", + "result": [] + }, + { + "expression": "sort(@)", + "error": "invalid-type" + }, + { + "expression": "not_null(unknown_key, str)", + "result": "Str" + }, + { + "expression": "not_null(unknown_key, foo.bar, empty_list, str)", + "result": [] + }, + { + "expression": "not_null(unknown_key, null_key, empty_list, str)", + "result": [] + }, + { + "expression": "not_null(all, expressions, are_null)", + "result": null + }, + { + "expression": "not_null()", + "error": "invalid-arity" + }, + { + "description": "function projection on single arg function", + "expression": "numbers[].to_string(@)", + "result": ["-1", "3", "4", "5"] + }, + { + "description": "function projection on single arg function", + "expression": "array[].to_number(@)", + "result": [-1, 3, 4, 5, 100] + } + ] +}, { + "given": + { + "foo": [ + {"b": "b", "a": "a"}, + {"c": "c", "b": "b"}, + {"d": "d", "c": "c"}, + {"e": "e", "d": "d"}, + {"f": "f", "e": "e"} + ] + }, + "cases": [ + { + "description": "function projection on variadic function", + "expression": "foo[].not_null(f, e, d, c, b, a)", + "result": ["b", "c", "d", "e", "f"] + } + ] +}, { + "given": + { + "people": [ + {"age": 20, "age_str": "20", "bool": true, "name": "a", "extra": "foo"}, + {"age": 40, "age_str": "40", "bool": false, "name": "b", "extra": "bar"}, + {"age": 30, "age_str": "30", "bool": true, "name": "c"}, + {"age": 50, "age_str": "50", "bool": false, "name": "d"}, + {"age": 10, "age_str": "10", "bool": true, "name": 3} + ] + }, + "cases": [ + { + "description": "sort by field expression", + "expression": "sort_by(people, &age)", + "result": [ + {"age": 10, "age_str": "10", "bool": true, "name": 3}, + {"age": 20, "age_str": "20", "bool": true, "name": "a", "extra": "foo"}, + {"age": 30, "age_str": "30", "bool": true, "name": "c"}, + {"age": 40, "age_str": "40", "bool": false, "name": "b", "extra": "bar"}, + {"age": 50, "age_str": "50", "bool": false, "name": "d"} + ] + }, + { + "expression": "sort_by(people, &age_str)", + "result": [ + {"age": 10, "age_str": "10", "bool": true, "name": 3}, + {"age": 20, "age_str": "20", "bool": true, "name": "a", "extra": "foo"}, + {"age": 30, "age_str": "30", "bool": true, "name": "c"}, + {"age": 40, "age_str": "40", "bool": false, "name": "b", "extra": "bar"}, + {"age": 50, "age_str": "50", "bool": false, "name": "d"} + ] + }, + { + "description": "sort by function expression", + "expression": "sort_by(people, &to_number(age_str))", + "result": [ + {"age": 10, "age_str": "10", "bool": true, "name": 3}, + {"age": 20, "age_str": "20", "bool": true, "name": "a", "extra": "foo"}, + {"age": 30, "age_str": "30", "bool": true, "name": "c"}, + {"age": 40, "age_str": "40", "bool": false, "name": "b", "extra": "bar"}, + {"age": 50, "age_str": "50", "bool": false, "name": "d"} + ] + }, + { + "description": "function projection on sort_by function", + "expression": "sort_by(people, &age)[].name", + "result": [3, "a", "c", "b", "d"] + }, + { + "expression": "sort_by(people, &extra)", + "error": "invalid-type" + }, + { + "expression": "sort_by(people, &bool)", + "error": "invalid-type" + }, + { + "expression": "sort_by(people, &name)", + "error": "invalid-type" + }, + { + "expression": "sort_by(people, name)", + "error": "invalid-type" + }, + { + "expression": "sort_by(people, &age)[].extra", + "result": ["foo", "bar"] + }, + { + "expression": "sort_by(`[]`, &age)", + "result": [] + }, + { + "expression": "max_by(people, &age)", + "result": {"age": 50, "age_str": "50", "bool": false, "name": "d"} + }, + { + "expression": "max_by(people, &age_str)", + "result": {"age": 50, "age_str": "50", "bool": false, "name": "d"} + }, + { + "expression": "max_by(people, &bool)", + "error": "invalid-type" + }, + { + "expression": "max_by(people, &extra)", + "error": "invalid-type" + }, + { + "expression": "max_by(people, &to_number(age_str))", + "result": {"age": 50, "age_str": "50", "bool": false, "name": "d"} + }, + { + "expression": "min_by(people, &age)", + "result": {"age": 10, "age_str": "10", "bool": true, "name": 3} + }, + { + "expression": "min_by(people, &age_str)", + "result": {"age": 10, "age_str": "10", "bool": true, "name": 3} + }, + { + "expression": "min_by(people, &bool)", + "error": "invalid-type" + }, + { + "expression": "min_by(people, &extra)", + "error": "invalid-type" + }, + { + "expression": "min_by(people, &to_number(age_str))", + "result": {"age": 10, "age_str": "10", "bool": true, "name": 3} + } + ] +}, { + "given": + { + "people": [ + {"age": 10, "order": "1"}, + {"age": 10, "order": "2"}, + {"age": 10, "order": "3"}, + {"age": 10, "order": "4"}, + {"age": 10, "order": "5"}, + {"age": 10, "order": "6"}, + {"age": 10, "order": "7"}, + {"age": 10, "order": "8"}, + {"age": 10, "order": "9"}, + {"age": 10, "order": "10"}, + {"age": 10, "order": "11"} + ] + }, + "cases": [ + { + "description": "stable sort order", + "expression": "sort_by(people, &age)", + "result": [ + {"age": 10, "order": "1"}, + {"age": 10, "order": "2"}, + {"age": 10, "order": "3"}, + {"age": 10, "order": "4"}, + {"age": 10, "order": "5"}, + {"age": 10, "order": "6"}, + {"age": 10, "order": "7"}, + {"age": 10, "order": "8"}, + {"age": 10, "order": "9"}, + {"age": 10, "order": "10"}, + {"age": 10, "order": "11"} + ] + } + ] +}, { + "given": + { + "people": [ + {"a": 10, "b": 1, "c": "z"}, + {"a": 10, "b": 2, "c": null}, + {"a": 10, "b": 3}, + {"a": 10, "b": 4, "c": "z"}, + {"a": 10, "b": 5, "c": null}, + {"a": 10, "b": 6}, + {"a": 10, "b": 7, "c": "z"}, + {"a": 10, "b": 8, "c": null}, + {"a": 10, "b": 9} + ], + "empty": [] + }, + "cases": [ + { + "expression": "map(&a, people)", + "result": [10, 10, 10, 10, 10, 10, 10, 10, 10] + }, + { + "expression": "map(&c, people)", + "result": ["z", null, null, "z", null, null, "z", null, null] + }, + { + "expression": "map(&a, badkey)", + "error": "invalid-type" + }, + { + "expression": "map(&foo, empty)", + "result": [] + } + ] +}, { + "given": { + "array": [ + { + "foo": {"bar": "yes1"} + }, + { + "foo": {"bar": "yes2"} + }, + { + "foo1": {"bar": "no"} + } + ]}, + "cases": [ + { + "expression": "map(&foo.bar, array)", + "result": ["yes1", "yes2", null] + }, + { + "expression": "map(&foo1.bar, array)", + "result": [null, null, "no"] + }, + { + "expression": "map(&foo.bar.baz, array)", + "result": [null, null, null] + } + ] +}, { + "given": { + "array": [[1, 2, 3, [4]], [5, 6, 7, [8, 9]]] + }, + "cases": [ + { + "expression": "map(&[], array)", + "result": [[1, 2, 3, 4], [5, 6, 7, 8, 9]] + } + ] +} +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/identifiers.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/identifiers.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/identifiers.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/identifiers.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1377 @@ +[ + { + "given": { + "__L": true + }, + "cases": [ + { + "expression": "__L", + "result": true + } + ] + }, + { + "given": { + "!\r": true + }, + "cases": [ + { + "expression": "\"!\\r\"", + "result": true + } + ] + }, + { + "given": { + "Y_1623": true + }, + "cases": [ + { + "expression": "Y_1623", + "result": true + } + ] + }, + { + "given": { + "x": true + }, + "cases": [ + { + "expression": "x", + "result": true + } + ] + }, + { + "given": { + "\tF\uCebb": true + }, + "cases": [ + { + "expression": "\"\\tF\\uCebb\"", + "result": true + } + ] + }, + { + "given": { + " \t": true + }, + "cases": [ + { + "expression": "\" \\t\"", + "result": true + } + ] + }, + { + "given": { + " ": true + }, + "cases": [ + { + "expression": "\" \"", + "result": true + } + ] + }, + { + "given": { + "v2": true + }, + "cases": [ + { + "expression": "v2", + "result": true + } + ] + }, + { + "given": { + "\t": true + }, + "cases": [ + { + "expression": "\"\\t\"", + "result": true + } + ] + }, + { + "given": { + "_X": true + }, + "cases": [ + { + "expression": "_X", + "result": true + } + ] + }, + { + "given": { + "\t4\ud9da\udd15": true + }, + "cases": [ + { + "expression": "\"\\t4\\ud9da\\udd15\"", + "result": true + } + ] + }, + { + "given": { + "v24_W": true + }, + "cases": [ + { + "expression": "v24_W", + "result": true + } + ] + }, + { + "given": { + "H": true + }, + "cases": [ + { + "expression": "\"H\"", + "result": true + } + ] + }, + { + "given": { + "\f": true + }, + "cases": [ + { + "expression": "\"\\f\"", + "result": true + } + ] + }, + { + "given": { + "E4": true + }, + "cases": [ + { + "expression": "\"E4\"", + "result": true + } + ] + }, + { + "given": { + "!": true + }, + "cases": [ + { + "expression": "\"!\"", + "result": true + } + ] + }, + { + "given": { + "tM": true + }, + "cases": [ + { + "expression": "tM", + "result": true + } + ] + }, + { + "given": { + " [": true + }, + "cases": [ + { + "expression": "\" [\"", + "result": true + } + ] + }, + { + "given": { + "R!": true + }, + "cases": [ + { + "expression": "\"R!\"", + "result": true + } + ] + }, + { + "given": { + "_6W": true + }, + "cases": [ + { + "expression": "_6W", + "result": true + } + ] + }, + { + "given": { + "\uaBA1\r": true + }, + "cases": [ + { + "expression": "\"\\uaBA1\\r\"", + "result": true + } + ] + }, + { + "given": { + "tL7": true + }, + "cases": [ + { + "expression": "tL7", + "result": true + } + ] + }, + { + "given": { + "<": true + }, + "cases": [ + { + "expression": "\">\"", + "result": true + } + ] + }, + { + "given": { + "hvu": true + }, + "cases": [ + { + "expression": "hvu", + "result": true + } + ] + }, + { + "given": { + "; !": true + }, + "cases": [ + { + "expression": "\"; !\"", + "result": true + } + ] + }, + { + "given": { + "hU": true + }, + "cases": [ + { + "expression": "hU", + "result": true + } + ] + }, + { + "given": { + "!I\n\/": true + }, + "cases": [ + { + "expression": "\"!I\\n\\/\"", + "result": true + } + ] + }, + { + "given": { + "\uEEbF": true + }, + "cases": [ + { + "expression": "\"\\uEEbF\"", + "result": true + } + ] + }, + { + "given": { + "U)\t": true + }, + "cases": [ + { + "expression": "\"U)\\t\"", + "result": true + } + ] + }, + { + "given": { + "fa0_9": true + }, + "cases": [ + { + "expression": "fa0_9", + "result": true + } + ] + }, + { + "given": { + "/": true + }, + "cases": [ + { + "expression": "\"/\"", + "result": true + } + ] + }, + { + "given": { + "Gy": true + }, + "cases": [ + { + "expression": "Gy", + "result": true + } + ] + }, + { + "given": { + "\b": true + }, + "cases": [ + { + "expression": "\"\\b\"", + "result": true + } + ] + }, + { + "given": { + "<": true + }, + "cases": [ + { + "expression": "\"<\"", + "result": true + } + ] + }, + { + "given": { + "\t": true + }, + "cases": [ + { + "expression": "\"\\t\"", + "result": true + } + ] + }, + { + "given": { + "\t&\\\r": true + }, + "cases": [ + { + "expression": "\"\\t&\\\\\\r\"", + "result": true + } + ] + }, + { + "given": { + "#": true + }, + "cases": [ + { + "expression": "\"#\"", + "result": true + } + ] + }, + { + "given": { + "B__": true + }, + "cases": [ + { + "expression": "B__", + "result": true + } + ] + }, + { + "given": { + "\nS \n": true + }, + "cases": [ + { + "expression": "\"\\nS \\n\"", + "result": true + } + ] + }, + { + "given": { + "Bp": true + }, + "cases": [ + { + "expression": "Bp", + "result": true + } + ] + }, + { + "given": { + ",\t;": true + }, + "cases": [ + { + "expression": "\",\\t;\"", + "result": true + } + ] + }, + { + "given": { + "B_q": true + }, + "cases": [ + { + "expression": "B_q", + "result": true + } + ] + }, + { + "given": { + "\/+\t\n\b!Z": true + }, + "cases": [ + { + "expression": "\"\\/+\\t\\n\\b!Z\"", + "result": true + } + ] + }, + { + "given": { + "\udadd\udfc7\\ueFAc": true + }, + "cases": [ + { + "expression": "\"\udadd\udfc7\\\\ueFAc\"", + "result": true + } + ] + }, + { + "given": { + ":\f": true + }, + "cases": [ + { + "expression": "\":\\f\"", + "result": true + } + ] + }, + { + "given": { + "\/": true + }, + "cases": [ + { + "expression": "\"\\/\"", + "result": true + } + ] + }, + { + "given": { + "_BW_6Hg_Gl": true + }, + "cases": [ + { + "expression": "_BW_6Hg_Gl", + "result": true + } + ] + }, + { + "given": { + "\udbcf\udc02": true + }, + "cases": [ + { + "expression": "\"\udbcf\udc02\"", + "result": true + } + ] + }, + { + "given": { + "zs1DC": true + }, + "cases": [ + { + "expression": "zs1DC", + "result": true + } + ] + }, + { + "given": { + "__434": true + }, + "cases": [ + { + "expression": "__434", + "result": true + } + ] + }, + { + "given": { + "\udb94\udd41": true + }, + "cases": [ + { + "expression": "\"\udb94\udd41\"", + "result": true + } + ] + }, + { + "given": { + "Z_5": true + }, + "cases": [ + { + "expression": "Z_5", + "result": true + } + ] + }, + { + "given": { + "z_M_": true + }, + "cases": [ + { + "expression": "z_M_", + "result": true + } + ] + }, + { + "given": { + "YU_2": true + }, + "cases": [ + { + "expression": "YU_2", + "result": true + } + ] + }, + { + "given": { + "_0": true + }, + "cases": [ + { + "expression": "_0", + "result": true + } + ] + }, + { + "given": { + "\b+": true + }, + "cases": [ + { + "expression": "\"\\b+\"", + "result": true + } + ] + }, + { + "given": { + "\"": true + }, + "cases": [ + { + "expression": "\"\\\"\"", + "result": true + } + ] + }, + { + "given": { + "D7": true + }, + "cases": [ + { + "expression": "D7", + "result": true + } + ] + }, + { + "given": { + "_62L": true + }, + "cases": [ + { + "expression": "_62L", + "result": true + } + ] + }, + { + "given": { + "\tK\t": true + }, + "cases": [ + { + "expression": "\"\\tK\\t\"", + "result": true + } + ] + }, + { + "given": { + "\n\\\f": true + }, + "cases": [ + { + "expression": "\"\\n\\\\\\f\"", + "result": true + } + ] + }, + { + "given": { + "I_": true + }, + "cases": [ + { + "expression": "I_", + "result": true + } + ] + }, + { + "given": { + "W_a0_": true + }, + "cases": [ + { + "expression": "W_a0_", + "result": true + } + ] + }, + { + "given": { + "BQ": true + }, + "cases": [ + { + "expression": "BQ", + "result": true + } + ] + }, + { + "given": { + "\tX$\uABBb": true + }, + "cases": [ + { + "expression": "\"\\tX$\\uABBb\"", + "result": true + } + ] + }, + { + "given": { + "Z9": true + }, + "cases": [ + { + "expression": "Z9", + "result": true + } + ] + }, + { + "given": { + "\b%\"\uda38\udd0f": true + }, + "cases": [ + { + "expression": "\"\\b%\\\"\uda38\udd0f\"", + "result": true + } + ] + }, + { + "given": { + "_F": true + }, + "cases": [ + { + "expression": "_F", + "result": true + } + ] + }, + { + "given": { + "!,": true + }, + "cases": [ + { + "expression": "\"!,\"", + "result": true + } + ] + }, + { + "given": { + "\"!": true + }, + "cases": [ + { + "expression": "\"\\\"!\"", + "result": true + } + ] + }, + { + "given": { + "Hh": true + }, + "cases": [ + { + "expression": "Hh", + "result": true + } + ] + }, + { + "given": { + "&": true + }, + "cases": [ + { + "expression": "\"&\"", + "result": true + } + ] + }, + { + "given": { + "9\r\\R": true + }, + "cases": [ + { + "expression": "\"9\\r\\\\R\"", + "result": true + } + ] + }, + { + "given": { + "M_k": true + }, + "cases": [ + { + "expression": "M_k", + "result": true + } + ] + }, + { + "given": { + "!\b\n\udb06\ude52\"\"": true + }, + "cases": [ + { + "expression": "\"!\\b\\n\udb06\ude52\\\"\\\"\"", + "result": true + } + ] + }, + { + "given": { + "6": true + }, + "cases": [ + { + "expression": "\"6\"", + "result": true + } + ] + }, + { + "given": { + "_7": true + }, + "cases": [ + { + "expression": "_7", + "result": true + } + ] + }, + { + "given": { + "0": true + }, + "cases": [ + { + "expression": "\"0\"", + "result": true + } + ] + }, + { + "given": { + "\\8\\": true + }, + "cases": [ + { + "expression": "\"\\\\8\\\\\"", + "result": true + } + ] + }, + { + "given": { + "b7eo": true + }, + "cases": [ + { + "expression": "b7eo", + "result": true + } + ] + }, + { + "given": { + "xIUo9": true + }, + "cases": [ + { + "expression": "xIUo9", + "result": true + } + ] + }, + { + "given": { + "5": true + }, + "cases": [ + { + "expression": "\"5\"", + "result": true + } + ] + }, + { + "given": { + "?": true + }, + "cases": [ + { + "expression": "\"?\"", + "result": true + } + ] + }, + { + "given": { + "sU": true + }, + "cases": [ + { + "expression": "sU", + "result": true + } + ] + }, + { + "given": { + "VH2&H\\\/": true + }, + "cases": [ + { + "expression": "\"VH2&H\\\\\\/\"", + "result": true + } + ] + }, + { + "given": { + "_C": true + }, + "cases": [ + { + "expression": "_C", + "result": true + } + ] + }, + { + "given": { + "_": true + }, + "cases": [ + { + "expression": "_", + "result": true + } + ] + }, + { + "given": { + "<\t": true + }, + "cases": [ + { + "expression": "\"<\\t\"", + "result": true + } + ] + }, + { + "given": { + "\uD834\uDD1E": true + }, + "cases": [ + { + "expression": "\"\\uD834\\uDD1E\"", + "result": true + } + ] + } +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/indices.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/indices.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/indices.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/indices.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,346 @@ +[{ + "given": + {"foo": {"bar": ["zero", "one", "two"]}}, + "cases": [ + { + "expression": "foo.bar[0]", + "result": "zero" + }, + { + "expression": "foo.bar[1]", + "result": "one" + }, + { + "expression": "foo.bar[2]", + "result": "two" + }, + { + "expression": "foo.bar[3]", + "result": null + }, + { + "expression": "foo.bar[-1]", + "result": "two" + }, + { + "expression": "foo.bar[-2]", + "result": "one" + }, + { + "expression": "foo.bar[-3]", + "result": "zero" + }, + { + "expression": "foo.bar[-4]", + "result": null + } + ] +}, +{ + "given": + {"foo": [{"bar": "one"}, {"bar": "two"}, {"bar": "three"}, {"notbar": "four"}]}, + "cases": [ + { + "expression": "foo.bar", + "result": null + }, + { + "expression": "foo[0].bar", + "result": "one" + }, + { + "expression": "foo[1].bar", + "result": "two" + }, + { + "expression": "foo[2].bar", + "result": "three" + }, + { + "expression": "foo[3].notbar", + "result": "four" + }, + { + "expression": "foo[3].bar", + "result": null + }, + { + "expression": "foo[0]", + "result": {"bar": "one"} + }, + { + "expression": "foo[1]", + "result": {"bar": "two"} + }, + { + "expression": "foo[2]", + "result": {"bar": "three"} + }, + { + "expression": "foo[3]", + "result": {"notbar": "four"} + }, + { + "expression": "foo[4]", + "result": null + } + ] +}, +{ + "given": [ + "one", "two", "three" + ], + "cases": [ + { + "expression": "[0]", + "result": "one" + }, + { + "expression": "[1]", + "result": "two" + }, + { + "expression": "[2]", + "result": "three" + }, + { + "expression": "[-1]", + "result": "three" + }, + { + "expression": "[-2]", + "result": "two" + }, + { + "expression": "[-3]", + "result": "one" + } + ] +}, +{ + "given": {"reservations": [ + {"instances": [{"foo": 1}, {"foo": 2}]} + ]}, + "cases": [ + { + "expression": "reservations[].instances[].foo", + "result": [1, 2] + }, + { + "expression": "reservations[].instances[].bar", + "result": [] + }, + { + "expression": "reservations[].notinstances[].foo", + "result": [] + }, + { + "expression": "reservations[].notinstances[].foo", + "result": [] + } + ] +}, +{ + "given": {"reservations": [{ + "instances": [ + {"foo": [{"bar": 1}, {"bar": 2}, {"notbar": 3}, {"bar": 4}]}, + {"foo": [{"bar": 5}, {"bar": 6}, {"notbar": [7]}, {"bar": 8}]}, + {"foo": "bar"}, + {"notfoo": [{"bar": 20}, {"bar": 21}, {"notbar": [7]}, {"bar": 22}]}, + {"bar": [{"baz": [1]}, {"baz": [2]}, {"baz": [3]}, {"baz": [4]}]}, + {"baz": [{"baz": [1, 2]}, {"baz": []}, {"baz": []}, {"baz": [3, 4]}]}, + {"qux": [{"baz": []}, {"baz": [1, 2, 3]}, {"baz": [4]}, {"baz": []}]} + ], + "otherkey": {"foo": [{"bar": 1}, {"bar": 2}, {"notbar": 3}, {"bar": 4}]} + }, { + "instances": [ + {"a": [{"bar": 1}, {"bar": 2}, {"notbar": 3}, {"bar": 4}]}, + {"b": [{"bar": 5}, {"bar": 6}, {"notbar": [7]}, {"bar": 8}]}, + {"c": "bar"}, + {"notfoo": [{"bar": 23}, {"bar": 24}, {"notbar": [7]}, {"bar": 25}]}, + {"qux": [{"baz": []}, {"baz": [1, 2, 3]}, {"baz": [4]}, {"baz": []}]} + ], + "otherkey": {"foo": [{"bar": 1}, {"bar": 2}, {"notbar": 3}, {"bar": 4}]} + } + ]}, + "cases": [ + { + "expression": "reservations[].instances[].foo[].bar", + "result": [1, 2, 4, 5, 6, 8] + }, + { + "expression": "reservations[].instances[].foo[].baz", + "result": [] + }, + { + "expression": "reservations[].instances[].notfoo[].bar", + "result": [20, 21, 22, 23, 24, 25] + }, + { + "expression": "reservations[].instances[].notfoo[].notbar", + "result": [[7], [7]] + }, + { + "expression": "reservations[].notinstances[].foo", + "result": [] + }, + { + "expression": "reservations[].instances[].foo[].notbar", + "result": [3, [7]] + }, + { + "expression": "reservations[].instances[].bar[].baz", + "result": [[1], [2], [3], [4]] + }, + { + "expression": "reservations[].instances[].baz[].baz", + "result": [[1, 2], [], [], [3, 4]] + }, + { + "expression": "reservations[].instances[].qux[].baz", + "result": [[], [1, 2, 3], [4], [], [], [1, 2, 3], [4], []] + }, + { + "expression": "reservations[].instances[].qux[].baz[]", + "result": [1, 2, 3, 4, 1, 2, 3, 4] + } + ] +}, +{ + "given": { + "foo": [ + [["one", "two"], ["three", "four"]], + [["five", "six"], ["seven", "eight"]], + [["nine"], ["ten"]] + ] + }, + "cases": [ + { + "expression": "foo[]", + "result": [["one", "two"], ["three", "four"], ["five", "six"], + ["seven", "eight"], ["nine"], ["ten"]] + }, + { + "expression": "foo[][0]", + "result": ["one", "three", "five", "seven", "nine", "ten"] + }, + { + "expression": "foo[][1]", + "result": ["two", "four", "six", "eight"] + }, + { + "expression": "foo[][0][0]", + "result": [] + }, + { + "expression": "foo[][2][2]", + "result": [] + }, + { + "expression": "foo[][0][0][100]", + "result": [] + } + ] +}, +{ + "given": { + "foo": [{ + "bar": [ + { + "qux": 2, + "baz": 1 + }, + { + "qux": 4, + "baz": 3 + } + ] + }, + { + "bar": [ + { + "qux": 6, + "baz": 5 + }, + { + "qux": 8, + "baz": 7 + } + ] + } + ] + }, + "cases": [ + { + "expression": "foo", + "result": [{"bar": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}]}, + {"bar": [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]}] + }, + { + "expression": "foo[]", + "result": [{"bar": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}]}, + {"bar": [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]}] + }, + { + "expression": "foo[].bar", + "result": [[{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}], + [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]] + }, + { + "expression": "foo[].bar[]", + "result": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}, + {"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}] + }, + { + "expression": "foo[].bar[].baz", + "result": [1, 3, 5, 7] + } + ] +}, +{ + "given": { + "string": "string", + "hash": {"foo": "bar", "bar": "baz"}, + "number": 23, + "nullvalue": null + }, + "cases": [ + { + "expression": "string[]", + "result": null + }, + { + "expression": "hash[]", + "result": null + }, + { + "expression": "number[]", + "result": null + }, + { + "expression": "nullvalue[]", + "result": null + }, + { + "expression": "string[].foo", + "result": null + }, + { + "expression": "hash[].foo", + "result": null + }, + { + "expression": "number[].foo", + "result": null + }, + { + "expression": "nullvalue[].foo", + "result": null + }, + { + "expression": "nullvalue[].foo[].bar", + "result": null + } + ] +} +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/literal.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/literal.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/literal.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/literal.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,185 @@ +[ + { + "given": { + "foo": [{"name": "a"}, {"name": "b"}], + "bar": {"baz": "qux"} + }, + "cases": [ + { + "expression": "`\"foo\"`", + "result": "foo" + }, + { + "comment": "Interpret escaped unicode.", + "expression": "`\"\\u03a6\"`", + "result": "Φ" + }, + { + "expression": "`\"✓\"`", + "result": "✓" + }, + { + "expression": "`[1, 2, 3]`", + "result": [1, 2, 3] + }, + { + "expression": "`{\"a\": \"b\"}`", + "result": {"a": "b"} + }, + { + "expression": "`true`", + "result": true + }, + { + "expression": "`false`", + "result": false + }, + { + "expression": "`null`", + "result": null + }, + { + "expression": "`0`", + "result": 0 + }, + { + "expression": "`1`", + "result": 1 + }, + { + "expression": "`2`", + "result": 2 + }, + { + "expression": "`3`", + "result": 3 + }, + { + "expression": "`4`", + "result": 4 + }, + { + "expression": "`5`", + "result": 5 + }, + { + "expression": "`6`", + "result": 6 + }, + { + "expression": "`7`", + "result": 7 + }, + { + "expression": "`8`", + "result": 8 + }, + { + "expression": "`9`", + "result": 9 + }, + { + "comment": "Escaping a backtick in quotes", + "expression": "`\"foo\\`bar\"`", + "result": "foo`bar" + }, + { + "comment": "Double quote in literal", + "expression": "`\"foo\\\"bar\"`", + "result": "foo\"bar" + }, + { + "expression": "`\"1\\`\"`", + "result": "1`" + }, + { + "comment": "Multiple literal expressions with escapes", + "expression": "`\"\\\\\"`.{a:`\"b\"`}", + "result": {"a": "b"} + }, + { + "comment": "literal . identifier", + "expression": "`{\"a\": \"b\"}`.a", + "result": "b" + }, + { + "comment": "literal . identifier . identifier", + "expression": "`{\"a\": {\"b\": \"c\"}}`.a.b", + "result": "c" + }, + { + "comment": "literal . identifier bracket-expr", + "expression": "`[0, 1, 2]`[1]", + "result": 1 + } + ] + }, + { + "comment": "Literals", + "given": {"type": "object"}, + "cases": [ + { + "comment": "Literal with leading whitespace", + "expression": "` {\"foo\": true}`", + "result": {"foo": true} + }, + { + "comment": "Literal with trailing whitespace", + "expression": "`{\"foo\": true} `", + "result": {"foo": true} + }, + { + "comment": "Literal on RHS of subexpr not allowed", + "expression": "foo.`\"bar\"`", + "error": "syntax" + } + ] + }, + { + "comment": "Raw String Literals", + "given": {}, + "cases": [ + { + "expression": "'foo'", + "result": "foo" + }, + { + "expression": "' foo '", + "result": " foo " + }, + { + "expression": "'0'", + "result": "0" + }, + { + "expression": "'newline\n'", + "result": "newline\n" + }, + { + "expression": "'\n'", + "result": "\n" + }, + { + "expression": "'✓'", + "result": "✓" + }, + { + "expression": "'𝄞'", + "result": "𝄞" + }, + { + "expression": "' [foo] '", + "result": " [foo] " + }, + { + "expression": "'[foo]'", + "result": "[foo]" + }, + { + "comment": "Do not interpret escaped unicode.", + "expression": "'\\u03a6'", + "result": "\\u03a6" + } + ] + } +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/multiselect.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/multiselect.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/multiselect.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/multiselect.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,393 @@ +[{ + "given": { + "foo": { + "bar": "bar", + "baz": "baz", + "qux": "qux", + "nested": { + "one": { + "a": "first", + "b": "second", + "c": "third" + }, + "two": { + "a": "first", + "b": "second", + "c": "third" + }, + "three": { + "a": "first", + "b": "second", + "c": {"inner": "third"} + } + } + }, + "bar": 1, + "baz": 2, + "qux\"": 3 + }, + "cases": [ + { + "expression": "foo.{bar: bar}", + "result": {"bar": "bar"} + }, + { + "expression": "foo.{\"bar\": bar}", + "result": {"bar": "bar"} + }, + { + "expression": "foo.{\"foo.bar\": bar}", + "result": {"foo.bar": "bar"} + }, + { + "expression": "foo.{bar: bar, baz: baz}", + "result": {"bar": "bar", "baz": "baz"} + }, + { + "expression": "foo.{\"bar\": bar, \"baz\": baz}", + "result": {"bar": "bar", "baz": "baz"} + }, + { + "expression": "{\"baz\": baz, \"qux\\\"\": \"qux\\\"\"}", + "result": {"baz": 2, "qux\"": 3} + }, + { + "expression": "foo.{bar:bar,baz:baz}", + "result": {"bar": "bar", "baz": "baz"} + }, + { + "expression": "foo.{bar: bar,qux: qux}", + "result": {"bar": "bar", "qux": "qux"} + }, + { + "expression": "foo.{bar: bar, noexist: noexist}", + "result": {"bar": "bar", "noexist": null} + }, + { + "expression": "foo.{noexist: noexist, alsonoexist: alsonoexist}", + "result": {"noexist": null, "alsonoexist": null} + }, + { + "expression": "foo.badkey.{nokey: nokey, alsonokey: alsonokey}", + "result": null + }, + { + "expression": "foo.nested.*.{a: a,b: b}", + "result": [{"a": "first", "b": "second"}, + {"a": "first", "b": "second"}, + {"a": "first", "b": "second"}] + }, + { + "expression": "foo.nested.three.{a: a, cinner: c.inner}", + "result": {"a": "first", "cinner": "third"} + }, + { + "expression": "foo.nested.three.{a: a, c: c.inner.bad.key}", + "result": {"a": "first", "c": null} + }, + { + "expression": "foo.{a: nested.one.a, b: nested.two.b}", + "result": {"a": "first", "b": "second"} + }, + { + "expression": "{bar: bar, baz: baz}", + "result": {"bar": 1, "baz": 2} + }, + { + "expression": "{bar: bar}", + "result": {"bar": 1} + }, + { + "expression": "{otherkey: bar}", + "result": {"otherkey": 1} + }, + { + "expression": "{no: no, exist: exist}", + "result": {"no": null, "exist": null} + }, + { + "expression": "foo.[bar]", + "result": ["bar"] + }, + { + "expression": "foo.[bar,baz]", + "result": ["bar", "baz"] + }, + { + "expression": "foo.[bar,qux]", + "result": ["bar", "qux"] + }, + { + "expression": "foo.[bar,noexist]", + "result": ["bar", null] + }, + { + "expression": "foo.[noexist,alsonoexist]", + "result": [null, null] + } + ] +}, { + "given": { + "foo": {"bar": 1, "baz": [2, 3, 4]} + }, + "cases": [ + { + "expression": "foo.{bar:bar,baz:baz}", + "result": {"bar": 1, "baz": [2, 3, 4]} + }, + { + "expression": "foo.[bar,baz[0]]", + "result": [1, 2] + }, + { + "expression": "foo.[bar,baz[1]]", + "result": [1, 3] + }, + { + "expression": "foo.[bar,baz[2]]", + "result": [1, 4] + }, + { + "expression": "foo.[bar,baz[3]]", + "result": [1, null] + }, + { + "expression": "foo.[bar[0],baz[3]]", + "result": [null, null] + } + ] +}, { + "given": { + "foo": {"bar": 1, "baz": 2} + }, + "cases": [ + { + "expression": "foo.{bar: bar, baz: baz}", + "result": {"bar": 1, "baz": 2} + }, + { + "expression": "foo.[bar,baz]", + "result": [1, 2] + } + ] +}, { + "given": { + "foo": { + "bar": {"baz": [{"common": "first", "one": 1}, + {"common": "second", "two": 2}]}, + "ignoreme": 1, + "includeme": true + } + }, + "cases": [ + { + "expression": "foo.{bar: bar.baz[1],includeme: includeme}", + "result": {"bar": {"common": "second", "two": 2}, "includeme": true} + }, + { + "expression": "foo.{\"bar.baz.two\": bar.baz[1].two, includeme: includeme}", + "result": {"bar.baz.two": 2, "includeme": true} + }, + { + "expression": "foo.[includeme, bar.baz[*].common]", + "result": [true, ["first", "second"]] + }, + { + "expression": "foo.[includeme, bar.baz[*].none]", + "result": [true, []] + }, + { + "expression": "foo.[includeme, bar.baz[].common]", + "result": [true, ["first", "second"]] + } + ] +}, { + "given": { + "reservations": [{ + "instances": [ + {"id": "id1", + "name": "first"}, + {"id": "id2", + "name": "second"} + ]}, { + "instances": [ + {"id": "id3", + "name": "third"}, + {"id": "id4", + "name": "fourth"} + ]} + ]}, + "cases": [ + { + "expression": "reservations[*].instances[*].{id: id, name: name}", + "result": [[{"id": "id1", "name": "first"}, {"id": "id2", "name": "second"}], + [{"id": "id3", "name": "third"}, {"id": "id4", "name": "fourth"}]] + }, + { + "expression": "reservations[].instances[].{id: id, name: name}", + "result": [{"id": "id1", "name": "first"}, + {"id": "id2", "name": "second"}, + {"id": "id3", "name": "third"}, + {"id": "id4", "name": "fourth"}] + }, + { + "expression": "reservations[].instances[].[id, name]", + "result": [["id1", "first"], + ["id2", "second"], + ["id3", "third"], + ["id4", "fourth"]] + } + ] +}, +{ + "given": { + "foo": [{ + "bar": [ + { + "qux": 2, + "baz": 1 + }, + { + "qux": 4, + "baz": 3 + } + ] + }, + { + "bar": [ + { + "qux": 6, + "baz": 5 + }, + { + "qux": 8, + "baz": 7 + } + ] + } + ] + }, + "cases": [ + { + "expression": "foo", + "result": [{"bar": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}]}, + {"bar": [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]}] + }, + { + "expression": "foo[]", + "result": [{"bar": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}]}, + {"bar": [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]}] + }, + { + "expression": "foo[].bar", + "result": [[{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}], + [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]] + }, + { + "expression": "foo[].bar[]", + "result": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}, + {"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}] + }, + { + "expression": "foo[].bar[].[baz, qux]", + "result": [[1, 2], [3, 4], [5, 6], [7, 8]] + }, + { + "expression": "foo[].bar[].[baz]", + "result": [[1], [3], [5], [7]] + }, + { + "expression": "foo[].bar[].[baz, qux][]", + "result": [1, 2, 3, 4, 5, 6, 7, 8] + } + ] +}, +{ + "given": { + "foo": { + "baz": [ + { + "bar": "abc" + }, { + "bar": "def" + } + ], + "qux": ["zero"] + } + }, + "cases": [ + { + "expression": "foo.[baz[*].bar, qux[0]]", + "result": [["abc", "def"], "zero"] + } + ] +}, +{ + "given": { + "foo": { + "baz": [ + { + "bar": "a", + "bam": "b", + "boo": "c" + }, { + "bar": "d", + "bam": "e", + "boo": "f" + } + ], + "qux": ["zero"] + } + }, + "cases": [ + { + "expression": "foo.[baz[*].[bar, boo], qux[0]]", + "result": [[["a", "c" ], ["d", "f" ]], "zero"] + } + ] +}, +{ + "given": { + "foo": { + "baz": [ + { + "bar": "a", + "bam": "b", + "boo": "c" + }, { + "bar": "d", + "bam": "e", + "boo": "f" + } + ], + "qux": ["zero"] + } + }, + "cases": [ + { + "expression": "foo.[baz[*].not_there || baz[*].bar, qux[0]]", + "result": [["a", "d"], "zero"] + } + ] +}, +{ + "given": {"type": "object"}, + "cases": [ + { + "comment": "Nested multiselect", + "expression": "[[*],*]", + "result": [null, ["object"]] + } + ] +}, +{ + "given": [], + "cases": [ + { + "comment": "Nested multiselect", + "expression": "[[*]]", + "result": [[]] + } + ] +} +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/ormatch.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/ormatch.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/ormatch.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/ormatch.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,59 @@ +[{ + "given": + {"outer": {"foo": "foo", "bar": "bar", "baz": "baz"}}, + "cases": [ + { + "expression": "outer.foo || outer.bar", + "result": "foo" + }, + { + "expression": "outer.foo||outer.bar", + "result": "foo" + }, + { + "expression": "outer.bar || outer.baz", + "result": "bar" + }, + { + "expression": "outer.bar||outer.baz", + "result": "bar" + }, + { + "expression": "outer.bad || outer.foo", + "result": "foo" + }, + { + "expression": "outer.bad||outer.foo", + "result": "foo" + }, + { + "expression": "outer.foo || outer.bad", + "result": "foo" + }, + { + "expression": "outer.foo||outer.bad", + "result": "foo" + }, + { + "expression": "outer.bad || outer.alsobad", + "result": null + }, + { + "expression": "outer.bad||outer.alsobad", + "result": null + } + ] +}, { + "given": + {"outer": {"foo": "foo", "bool": false, "empty_list": [], "empty_string": ""}}, + "cases": [ + { + "expression": "outer.empty_string || outer.foo", + "result": "foo" + }, + { + "expression": "outer.nokey || outer.bool || outer.empty_list || outer.empty_string || outer.foo", + "result": "foo" + } + ] +}] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/pipe.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/pipe.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/pipe.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/pipe.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,131 @@ +[{ + "given": { + "foo": { + "bar": { + "baz": "subkey" + }, + "other": { + "baz": "subkey" + }, + "other2": { + "baz": "subkey" + }, + "other3": { + "notbaz": ["a", "b", "c"] + }, + "other4": { + "notbaz": ["a", "b", "c"] + } + } + }, + "cases": [ + { + "expression": "foo.*.baz | [0]", + "result": "subkey" + }, + { + "expression": "foo.*.baz | [1]", + "result": "subkey" + }, + { + "expression": "foo.*.baz | [2]", + "result": "subkey" + }, + { + "expression": "foo.bar.* | [0]", + "result": "subkey" + }, + { + "expression": "foo.*.notbaz | [*]", + "result": [["a", "b", "c"], ["a", "b", "c"]] + }, + { + "expression": "{\"a\": foo.bar, \"b\": foo.other} | *.baz", + "result": ["subkey", "subkey"] + } + ] +}, { + "given": { + "foo": { + "bar": { + "baz": "one" + }, + "other": { + "baz": "two" + }, + "other2": { + "baz": "three" + }, + "other3": { + "notbaz": ["a", "b", "c"] + }, + "other4": { + "notbaz": ["d", "e", "f"] + } + } + }, + "cases": [ + { + "expression": "foo | bar", + "result": {"baz": "one"} + }, + { + "expression": "foo | bar | baz", + "result": "one" + }, + { + "expression": "foo|bar| baz", + "result": "one" + }, + { + "expression": "not_there | [0]", + "result": null + }, + { + "expression": "not_there | [0]", + "result": null + }, + { + "expression": "[foo.bar, foo.other] | [0]", + "result": {"baz": "one"} + }, + { + "expression": "{\"a\": foo.bar, \"b\": foo.other} | a", + "result": {"baz": "one"} + }, + { + "expression": "{\"a\": foo.bar, \"b\": foo.other} | b", + "result": {"baz": "two"} + }, + { + "expression": "foo.bam || foo.bar | baz", + "result": "one" + }, + { + "expression": "foo | not_there || bar", + "result": {"baz": "one"} + } + ] +}, { + "given": { + "foo": [{ + "bar": [{ + "baz": "one" + }, { + "baz": "two" + }] + }, { + "bar": [{ + "baz": "three" + }, { + "baz": "four" + }] + }] + }, + "cases": [ + { + "expression": "foo[*].bar[*] | [0][0]", + "result": {"baz": "one"} + } + ] +}] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/slice.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/slice.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/slice.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/slice.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,187 @@ +[{ + "given": { + "foo": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], + "bar": { + "baz": 1 + } + }, + "cases": [ + { + "expression": "bar[0:10]", + "result": null + }, + { + "expression": "foo[0:10:1]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0:10]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0:10:]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0::1]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0::]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0:]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[:10:1]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[::1]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[:10:]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[::]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[:]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[1:9]", + "result": [1, 2, 3, 4, 5, 6, 7, 8] + }, + { + "expression": "foo[0:10:2]", + "result": [0, 2, 4, 6, 8] + }, + { + "expression": "foo[5:]", + "result": [5, 6, 7, 8, 9] + }, + { + "expression": "foo[5::2]", + "result": [5, 7, 9] + }, + { + "expression": "foo[::2]", + "result": [0, 2, 4, 6, 8] + }, + { + "expression": "foo[::-1]", + "result": [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + }, + { + "expression": "foo[1::2]", + "result": [1, 3, 5, 7, 9] + }, + { + "expression": "foo[10:0:-1]", + "result": [9, 8, 7, 6, 5, 4, 3, 2, 1] + }, + { + "expression": "foo[10:5:-1]", + "result": [9, 8, 7, 6] + }, + { + "expression": "foo[8:2:-2]", + "result": [8, 6, 4] + }, + { + "expression": "foo[0:20]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[10:-20:-1]", + "result": [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + }, + { + "expression": "foo[10:-20]", + "result": [] + }, + { + "expression": "foo[-4:-1]", + "result": [6, 7, 8] + }, + { + "expression": "foo[:-5:-1]", + "result": [9, 8, 7, 6] + }, + { + "expression": "foo[8:2:0]", + "error": "invalid-value" + }, + { + "expression": "foo[8:2:0:1]", + "error": "syntax" + }, + { + "expression": "foo[8:2&]", + "error": "syntax" + }, + { + "expression": "foo[2:a:3]", + "error": "syntax" + } + ] +}, { + "given": { + "foo": [{"a": 1}, {"a": 2}, {"a": 3}], + "bar": [{"a": {"b": 1}}, {"a": {"b": 2}}, + {"a": {"b": 3}}], + "baz": 50 + }, + "cases": [ + { + "expression": "foo[:2].a", + "result": [1, 2] + }, + { + "expression": "foo[:2].b", + "result": [] + }, + { + "expression": "foo[:2].a.b", + "result": [] + }, + { + "expression": "bar[::-1].a.b", + "result": [3, 2, 1] + }, + { + "expression": "bar[:2].a.b", + "result": [1, 2] + }, + { + "expression": "baz[:2].a", + "result": null + } + ] +}, { + "given": [{"a": 1}, {"a": 2}, {"a": 3}], + "cases": [ + { + "expression": "[:]", + "result": [{"a": 1}, {"a": 2}, {"a": 3}] + }, + { + "expression": "[:2].a", + "result": [1, 2] + }, + { + "expression": "[::-1].a", + "result": [3, 2, 1] + }, + { + "expression": "[:2].b", + "result": [] + } + ] +}] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/syntax.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/syntax.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/syntax.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/syntax.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,616 @@ +[{ + "comment": "Dot syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo.bar", + "result": null + }, + { + "expression": "foo.1", + "error": "syntax" + }, + { + "expression": "foo.-11", + "error": "syntax" + }, + { + "expression": "foo", + "result": null + }, + { + "expression": "foo.", + "error": "syntax" + }, + { + "expression": "foo.", + "error": "syntax" + }, + { + "expression": ".foo", + "error": "syntax" + }, + { + "expression": "foo..bar", + "error": "syntax" + }, + { + "expression": "foo.bar.", + "error": "syntax" + }, + { + "expression": "foo[.]", + "error": "syntax" + } + ] +}, + { + "comment": "Simple token errors", + "given": {"type": "object"}, + "cases": [ + { + "expression": ".", + "error": "syntax" + }, + { + "expression": ":", + "error": "syntax" + }, + { + "expression": ",", + "error": "syntax" + }, + { + "expression": "]", + "error": "syntax" + }, + { + "expression": "[", + "error": "syntax" + }, + { + "expression": "}", + "error": "syntax" + }, + { + "expression": "{", + "error": "syntax" + }, + { + "expression": ")", + "error": "syntax" + }, + { + "expression": "(", + "error": "syntax" + }, + { + "expression": "((&", + "error": "syntax" + }, + { + "expression": "a[", + "error": "syntax" + }, + { + "expression": "a]", + "error": "syntax" + }, + { + "expression": "a][", + "error": "syntax" + }, + { + "expression": "!", + "error": "syntax" + } + ] + }, + { + "comment": "Boolean syntax errors", + "given": {"type": "object"}, + "cases": [ + { + "expression": "![!(!", + "error": "syntax" + } + ] + }, + { + "comment": "Wildcard syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "*", + "result": ["object"] + }, + { + "expression": "*.*", + "result": [] + }, + { + "expression": "*.foo", + "result": [] + }, + { + "expression": "*[0]", + "result": [] + }, + { + "expression": ".*", + "error": "syntax" + }, + { + "expression": "*foo", + "error": "syntax" + }, + { + "expression": "*0", + "error": "syntax" + }, + { + "expression": "foo[*]bar", + "error": "syntax" + }, + { + "expression": "foo[*]*", + "error": "syntax" + } + ] + }, + { + "comment": "Flatten syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "[]", + "result": null + } + ] + }, + { + "comment": "Simple bracket syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "[0]", + "result": null + }, + { + "expression": "[*]", + "result": null + }, + { + "expression": "*.[0]", + "error": "syntax" + }, + { + "expression": "*.[\"0\"]", + "result": [[null]] + }, + { + "expression": "[*].bar", + "result": null + }, + { + "expression": "[*][0]", + "result": null + }, + { + "expression": "foo[#]", + "error": "syntax" + } + ] + }, + { + "comment": "Multi-select list syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo[0]", + "result": null + }, + { + "comment": "Valid multi-select of a list", + "expression": "foo[0, 1]", + "error": "syntax" + }, + { + "expression": "foo.[0]", + "error": "syntax" + }, + { + "expression": "foo.[*]", + "result": null + }, + { + "comment": "Multi-select of a list with trailing comma", + "expression": "foo[0, ]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list with trailing comma and no close", + "expression": "foo[0,", + "error": "syntax" + }, + { + "comment": "Multi-select of a list with trailing comma and no close", + "expression": "foo.[a", + "error": "syntax" + }, + { + "comment": "Multi-select of a list with extra comma", + "expression": "foo[0,, 1]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list using an identifier index", + "expression": "foo[abc]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list using identifier indices", + "expression": "foo[abc, def]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list using an identifier index", + "expression": "foo[abc, 1]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list using an identifier index with trailing comma", + "expression": "foo[abc, ]", + "error": "syntax" + }, + { + "comment": "Valid multi-select of a hash using an identifier index", + "expression": "foo.[abc]", + "result": null + }, + { + "comment": "Valid multi-select of a hash", + "expression": "foo.[abc, def]", + "result": null + }, + { + "comment": "Multi-select of a hash using a numeric index", + "expression": "foo.[abc, 1]", + "error": "syntax" + }, + { + "comment": "Multi-select of a hash with a trailing comma", + "expression": "foo.[abc, ]", + "error": "syntax" + }, + { + "comment": "Multi-select of a hash with extra commas", + "expression": "foo.[abc,, def]", + "error": "syntax" + }, + { + "comment": "Multi-select of a hash using number indices", + "expression": "foo.[0, 1]", + "error": "syntax" + } + ] + }, + { + "comment": "Multi-select hash syntax", + "given": {"type": "object"}, + "cases": [ + { + "comment": "No key or value", + "expression": "a{}", + "error": "syntax" + }, + { + "comment": "No closing token", + "expression": "a{", + "error": "syntax" + }, + { + "comment": "Not a key value pair", + "expression": "a{foo}", + "error": "syntax" + }, + { + "comment": "Missing value and closing character", + "expression": "a{foo:", + "error": "syntax" + }, + { + "comment": "Missing closing character", + "expression": "a{foo: 0", + "error": "syntax" + }, + { + "comment": "Missing value", + "expression": "a{foo:}", + "error": "syntax" + }, + { + "comment": "Trailing comma and no closing character", + "expression": "a{foo: 0, ", + "error": "syntax" + }, + { + "comment": "Missing value with trailing comma", + "expression": "a{foo: ,}", + "error": "syntax" + }, + { + "comment": "Accessing Array using an identifier", + "expression": "a{foo: bar}", + "error": "syntax" + }, + { + "expression": "a{foo: 0}", + "error": "syntax" + }, + { + "comment": "Missing key-value pair", + "expression": "a.{}", + "error": "syntax" + }, + { + "comment": "Not a key-value pair", + "expression": "a.{foo}", + "error": "syntax" + }, + { + "comment": "Missing value", + "expression": "a.{foo:}", + "error": "syntax" + }, + { + "comment": "Missing value with trailing comma", + "expression": "a.{foo: ,}", + "error": "syntax" + }, + { + "comment": "Valid multi-select hash extraction", + "expression": "a.{foo: bar}", + "result": null + }, + { + "comment": "Valid multi-select hash extraction", + "expression": "a.{foo: bar, baz: bam}", + "result": null + }, + { + "comment": "Trailing comma", + "expression": "a.{foo: bar, }", + "error": "syntax" + }, + { + "comment": "Missing key in second key-value pair", + "expression": "a.{foo: bar, baz}", + "error": "syntax" + }, + { + "comment": "Missing value in second key-value pair", + "expression": "a.{foo: bar, baz:}", + "error": "syntax" + }, + { + "comment": "Trailing comma", + "expression": "a.{foo: bar, baz: bam, }", + "error": "syntax" + }, + { + "comment": "Nested multi select", + "expression": "{\"\\\\\":{\" \":*}}", + "result": {"\\": {" ": ["object"]}} + } + ] + }, + { + "comment": "Or expressions", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo || bar", + "result": null + }, + { + "expression": "foo ||", + "error": "syntax" + }, + { + "expression": "foo.|| bar", + "error": "syntax" + }, + { + "expression": " || foo", + "error": "syntax" + }, + { + "expression": "foo || || foo", + "error": "syntax" + }, + { + "expression": "foo.[a || b]", + "result": null + }, + { + "expression": "foo.[a ||]", + "error": "syntax" + }, + { + "expression": "\"foo", + "error": "syntax" + } + ] + }, + { + "comment": "Filter expressions", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo[?bar==`\"baz\"`]", + "result": null + }, + { + "expression": "foo[? bar == `\"baz\"` ]", + "result": null + }, + { + "expression": "foo[ ?bar==`\"baz\"`]", + "error": "syntax" + }, + { + "expression": "foo[?bar==]", + "error": "syntax" + }, + { + "expression": "foo[?==]", + "error": "syntax" + }, + { + "expression": "foo[?==bar]", + "error": "syntax" + }, + { + "expression": "foo[?bar==baz?]", + "error": "syntax" + }, + { + "expression": "foo[?a.b.c==d.e.f]", + "result": null + }, + { + "expression": "foo[?bar==`[0, 1, 2]`]", + "result": null + }, + { + "expression": "foo[?bar==`[\"a\", \"b\", \"c\"]`]", + "result": null + }, + { + "comment": "Literal char not escaped", + "expression": "foo[?bar==`[\"foo`bar\"]`]", + "error": "syntax" + }, + { + "comment": "Literal char escaped", + "expression": "foo[?bar==`[\"foo\\`bar\"]`]", + "result": null + }, + { + "comment": "Unknown comparator", + "expression": "foo[?bar<>baz]", + "error": "syntax" + }, + { + "comment": "Unknown comparator", + "expression": "foo[?bar^baz]", + "error": "syntax" + }, + { + "expression": "foo[bar==baz]", + "error": "syntax" + }, + { + "comment": "Quoted identifier in filter expression no spaces", + "expression": "[?\"\\\\\">`\"foo\"`]", + "result": null + }, + { + "comment": "Quoted identifier in filter expression with spaces", + "expression": "[?\"\\\\\" > `\"foo\"`]", + "result": null + } + ] + }, + { + "comment": "Filter expression errors", + "given": {"type": "object"}, + "cases": [ + { + "expression": "bar.`\"anything\"`", + "error": "syntax" + }, + { + "expression": "bar.baz.noexists.`\"literal\"`", + "error": "syntax" + }, + { + "comment": "Literal wildcard projection", + "expression": "foo[*].`\"literal\"`", + "error": "syntax" + }, + { + "expression": "foo[*].name.`\"literal\"`", + "error": "syntax" + }, + { + "expression": "foo[].name.`\"literal\"`", + "error": "syntax" + }, + { + "expression": "foo[].name.`\"literal\"`.`\"subliteral\"`", + "error": "syntax" + }, + { + "comment": "Projecting a literal onto an empty list", + "expression": "foo[*].name.noexist.`\"literal\"`", + "error": "syntax" + }, + { + "expression": "foo[].name.noexist.`\"literal\"`", + "error": "syntax" + }, + { + "expression": "twolen[*].`\"foo\"`", + "error": "syntax" + }, + { + "comment": "Two level projection of a literal", + "expression": "twolen[*].threelen[*].`\"bar\"`", + "error": "syntax" + }, + { + "comment": "Two level flattened projection of a literal", + "expression": "twolen[].threelen[].`\"bar\"`", + "error": "syntax" + } + ] + }, + { + "comment": "Identifiers", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo", + "result": null + }, + { + "expression": "\"foo\"", + "result": null + }, + { + "expression": "\"\\\\\"", + "result": null + } + ] + }, + { + "comment": "Combined syntax", + "given": [], + "cases": [ + { + "expression": "*||*|*|*", + "result": null + }, + { + "expression": "*[]||[*]", + "result": [] + }, + { + "expression": "[*.*]", + "result": [null] + } + ] + } +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/unicode.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/unicode.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/unicode.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/unicode.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,38 @@ +[ + { + "given": {"foo": [{"✓": "✓"}, {"✓": "✗"}]}, + "cases": [ + { + "expression": "foo[].\"✓\"", + "result": ["✓", "✗"] + } + ] + }, + { + "given": {"☯": true}, + "cases": [ + { + "expression": "\"☯\"", + "result": true + } + ] + }, + { + "given": {"♪♫•*¨*•.¸¸❤¸¸.•*¨*•♫♪": true}, + "cases": [ + { + "expression": "\"♪♫•*¨*•.¸¸❤¸¸.•*¨*•♫♪\"", + "result": true + } + ] + }, + { + "given": {"☃": true}, + "cases": [ + { + "expression": "\"☃\"", + "result": true + } + ] + } +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/wildcard.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/wildcard.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/wildcard.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance/wildcard.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,460 @@ +[{ + "given": { + "foo": { + "bar": { + "baz": "val" + }, + "other": { + "baz": "val" + }, + "other2": { + "baz": "val" + }, + "other3": { + "notbaz": ["a", "b", "c"] + }, + "other4": { + "notbaz": ["a", "b", "c"] + }, + "other5": { + "other": { + "a": 1, + "b": 1, + "c": 1 + } + } + } + }, + "cases": [ + { + "expression": "foo.*.baz", + "result": ["val", "val", "val"] + }, + { + "expression": "foo.bar.*", + "result": ["val"] + }, + { + "expression": "foo.*.notbaz", + "result": [["a", "b", "c"], ["a", "b", "c"]] + }, + { + "expression": "foo.*.notbaz[0]", + "result": ["a", "a"] + }, + { + "expression": "foo.*.notbaz[-1]", + "result": ["c", "c"] + } + ] +}, { + "given": { + "foo": { + "first-1": { + "second-1": "val" + }, + "first-2": { + "second-1": "val" + }, + "first-3": { + "second-1": "val" + } + } + }, + "cases": [ + { + "expression": "foo.*", + "result": [{"second-1": "val"}, {"second-1": "val"}, + {"second-1": "val"}] + }, + { + "expression": "foo.*.*", + "result": [["val"], ["val"], ["val"]] + }, + { + "expression": "foo.*.*.*", + "result": [[], [], []] + }, + { + "expression": "foo.*.*.*.*", + "result": [[], [], []] + } + ] +}, { + "given": { + "foo": { + "bar": "one" + }, + "other": { + "bar": "one" + }, + "nomatch": { + "notbar": "three" + } + }, + "cases": [ + { + "expression": "*.bar", + "result": ["one", "one"] + } + ] +}, { + "given": { + "top1": { + "sub1": {"foo": "one"} + }, + "top2": { + "sub1": {"foo": "one"} + } + }, + "cases": [ + { + "expression": "*", + "result": [{"sub1": {"foo": "one"}}, + {"sub1": {"foo": "one"}}] + }, + { + "expression": "*.sub1", + "result": [{"foo": "one"}, + {"foo": "one"}] + }, + { + "expression": "*.*", + "result": [[{"foo": "one"}], + [{"foo": "one"}]] + }, + { + "expression": "*.*.foo[]", + "result": ["one", "one"] + }, + { + "expression": "*.sub1.foo", + "result": ["one", "one"] + } + ] +}, +{ + "given": + {"foo": [{"bar": "one"}, {"bar": "two"}, {"bar": "three"}, {"notbar": "four"}]}, + "cases": [ + { + "expression": "foo[*].bar", + "result": ["one", "two", "three"] + }, + { + "expression": "foo[*].notbar", + "result": ["four"] + } + ] +}, +{ + "given": + [{"bar": "one"}, {"bar": "two"}, {"bar": "three"}, {"notbar": "four"}], + "cases": [ + { + "expression": "[*]", + "result": [{"bar": "one"}, {"bar": "two"}, {"bar": "three"}, {"notbar": "four"}] + }, + { + "expression": "[*].bar", + "result": ["one", "two", "three"] + }, + { + "expression": "[*].notbar", + "result": ["four"] + } + ] +}, +{ + "given": { + "foo": { + "bar": [ + {"baz": ["one", "two", "three"]}, + {"baz": ["four", "five", "six"]}, + {"baz": ["seven", "eight", "nine"]} + ] + } + }, + "cases": [ + { + "expression": "foo.bar[*].baz", + "result": [["one", "two", "three"], ["four", "five", "six"], ["seven", "eight", "nine"]] + }, + { + "expression": "foo.bar[*].baz[0]", + "result": ["one", "four", "seven"] + }, + { + "expression": "foo.bar[*].baz[1]", + "result": ["two", "five", "eight"] + }, + { + "expression": "foo.bar[*].baz[2]", + "result": ["three", "six", "nine"] + }, + { + "expression": "foo.bar[*].baz[3]", + "result": [] + } + ] +}, +{ + "given": { + "foo": { + "bar": [["one", "two"], ["three", "four"]] + } + }, + "cases": [ + { + "expression": "foo.bar[*]", + "result": [["one", "two"], ["three", "four"]] + }, + { + "expression": "foo.bar[0]", + "result": ["one", "two"] + }, + { + "expression": "foo.bar[0][0]", + "result": "one" + }, + { + "expression": "foo.bar[0][0][0]", + "result": null + }, + { + "expression": "foo.bar[0][0][0][0]", + "result": null + }, + { + "expression": "foo[0][0]", + "result": null + } + ] +}, +{ + "given": { + "foo": [ + {"bar": [{"kind": "basic"}, {"kind": "intermediate"}]}, + {"bar": [{"kind": "advanced"}, {"kind": "expert"}]}, + {"bar": "string"} + ] + + }, + "cases": [ + { + "expression": "foo[*].bar[*].kind", + "result": [["basic", "intermediate"], ["advanced", "expert"]] + }, + { + "expression": "foo[*].bar[0].kind", + "result": ["basic", "advanced"] + } + ] +}, +{ + "given": { + "foo": [ + {"bar": {"kind": "basic"}}, + {"bar": {"kind": "intermediate"}}, + {"bar": {"kind": "advanced"}}, + {"bar": {"kind": "expert"}}, + {"bar": "string"} + ] + }, + "cases": [ + { + "expression": "foo[*].bar.kind", + "result": ["basic", "intermediate", "advanced", "expert"] + } + ] +}, +{ + "given": { + "foo": [{"bar": ["one", "two"]}, {"bar": ["three", "four"]}, {"bar": ["five"]}] + }, + "cases": [ + { + "expression": "foo[*].bar[0]", + "result": ["one", "three", "five"] + }, + { + "expression": "foo[*].bar[1]", + "result": ["two", "four"] + }, + { + "expression": "foo[*].bar[2]", + "result": [] + } + ] +}, +{ + "given": { + "foo": [{"bar": []}, {"bar": []}, {"bar": []}] + }, + "cases": [ + { + "expression": "foo[*].bar[0]", + "result": [] + } + ] +}, +{ + "given": { + "foo": [["one", "two"], ["three", "four"], ["five"]] + }, + "cases": [ + { + "expression": "foo[*][0]", + "result": ["one", "three", "five"] + }, + { + "expression": "foo[*][1]", + "result": ["two", "four"] + } + ] +}, +{ + "given": { + "foo": [ + [ + ["one", "two"], ["three", "four"] + ], [ + ["five", "six"], ["seven", "eight"] + ], [ + ["nine"], ["ten"] + ] + ] + }, + "cases": [ + { + "expression": "foo[*][0]", + "result": [["one", "two"], ["five", "six"], ["nine"]] + }, + { + "expression": "foo[*][1]", + "result": [["three", "four"], ["seven", "eight"], ["ten"]] + }, + { + "expression": "foo[*][0][0]", + "result": ["one", "five", "nine"] + }, + { + "expression": "foo[*][1][0]", + "result": ["three", "seven", "ten"] + }, + { + "expression": "foo[*][0][1]", + "result": ["two", "six"] + }, + { + "expression": "foo[*][1][1]", + "result": ["four", "eight"] + }, + { + "expression": "foo[*][2]", + "result": [] + }, + { + "expression": "foo[*][2][2]", + "result": [] + }, + { + "expression": "bar[*]", + "result": null + }, + { + "expression": "bar[*].baz[*]", + "result": null + } + ] +}, +{ + "given": { + "string": "string", + "hash": {"foo": "bar", "bar": "baz"}, + "number": 23, + "nullvalue": null + }, + "cases": [ + { + "expression": "string[*]", + "result": null + }, + { + "expression": "hash[*]", + "result": null + }, + { + "expression": "number[*]", + "result": null + }, + { + "expression": "nullvalue[*]", + "result": null + }, + { + "expression": "string[*].foo", + "result": null + }, + { + "expression": "hash[*].foo", + "result": null + }, + { + "expression": "number[*].foo", + "result": null + }, + { + "expression": "nullvalue[*].foo", + "result": null + }, + { + "expression": "nullvalue[*].foo[*].bar", + "result": null + } + ] +}, +{ + "given": { + "string": "string", + "hash": {"foo": "val", "bar": "val"}, + "number": 23, + "array": [1, 2, 3], + "nullvalue": null + }, + "cases": [ + { + "expression": "string.*", + "result": null + }, + { + "expression": "hash.*", + "result": ["val", "val"] + }, + { + "expression": "number.*", + "result": null + }, + { + "expression": "array.*", + "result": null + }, + { + "expression": "nullvalue.*", + "result": null + } + ] +}, +{ + "given": { + "a": [0, 1, 2], + "b": [0, 1, 2] + }, + "cases": [ + { + "expression": "*[0]", + "result": [0, 0] + } + ] +} +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/compliance_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,123 @@ +package jmespath + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +type TestSuite struct { + Given interface{} + TestCases []TestCase `json:"cases"` + Comment string +} +type TestCase struct { + Comment string + Expression string + Result interface{} + Error string +} + +var whiteListed = []string{ + "compliance/basic.json", + "compliance/current.json", + "compliance/escape.json", + "compliance/filters.json", + "compliance/functions.json", + "compliance/identifiers.json", + "compliance/indices.json", + "compliance/literal.json", + "compliance/multiselect.json", + "compliance/ormatch.json", + "compliance/pipe.json", + "compliance/slice.json", + "compliance/syntax.json", + "compliance/unicode.json", + "compliance/wildcard.json", + "compliance/boolean.json", +} + +func allowed(path string) bool { + for _, el := range whiteListed { + if el == path { + return true + } + } + return false +} + +func TestCompliance(t *testing.T) { + assert := assert.New(t) + + var complianceFiles []string + err := filepath.Walk("compliance", func(path string, _ os.FileInfo, _ error) error { + //if strings.HasSuffix(path, ".json") { + if allowed(path) { + complianceFiles = append(complianceFiles, path) + } + return nil + }) + if assert.Nil(err) { + for _, filename := range complianceFiles { + runComplianceTest(assert, filename) + } + } +} + +func runComplianceTest(assert *assert.Assertions, filename string) { + var testSuites []TestSuite + data, err := ioutil.ReadFile(filename) + if assert.Nil(err) { + err := json.Unmarshal(data, &testSuites) + if assert.Nil(err) { + for _, testsuite := range testSuites { + runTestSuite(assert, testsuite, filename) + } + } + } +} + +func runTestSuite(assert *assert.Assertions, testsuite TestSuite, filename string) { + for _, testcase := range testsuite.TestCases { + if testcase.Error != "" { + // This is a test case that verifies we error out properly. + runSyntaxTestCase(assert, testsuite.Given, testcase, filename) + } else { + runTestCase(assert, testsuite.Given, testcase, filename) + } + } +} + +func runSyntaxTestCase(assert *assert.Assertions, given interface{}, testcase TestCase, filename string) { + // Anything with an .Error means that we expect that JMESPath should return + // an error when we try to evaluate the expression. + _, err := Search(testcase.Expression, given) + assert.NotNil(err, fmt.Sprintf("Expression: %s", testcase.Expression)) +} + +func runTestCase(assert *assert.Assertions, given interface{}, testcase TestCase, filename string) { + lexer := NewLexer() + var err error + _, err = lexer.tokenize(testcase.Expression) + if err != nil { + errMsg := fmt.Sprintf("(%s) Could not lex expression: %s -- %s", filename, testcase.Expression, err.Error()) + assert.Fail(errMsg) + return + } + parser := NewParser() + _, err = parser.Parse(testcase.Expression) + if err != nil { + errMsg := fmt.Sprintf("(%s) Could not parse expression: %s -- %s", filename, testcase.Expression, err.Error()) + assert.Fail(errMsg) + return + } + actual, err := Search(testcase.Expression, given) + if assert.Nil(err, fmt.Sprintf("Expression: %s", testcase.Expression)) { + assert.Equal(testcase.Result, actual, fmt.Sprintf("Expression: %s", testcase.Expression)) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/functions.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/functions.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/functions.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/functions.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,842 @@ +package jmespath + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "unicode/utf8" +) + +type jpFunction func(arguments []interface{}) (interface{}, error) + +type jpType string + +const ( + jpUnknown jpType = "unknown" + jpNumber jpType = "number" + jpString jpType = "string" + jpArray jpType = "array" + jpObject jpType = "object" + jpArrayNumber jpType = "array[number]" + jpArrayString jpType = "array[string]" + jpExpref jpType = "expref" + jpAny jpType = "any" +) + +type functionEntry struct { + name string + arguments []argSpec + handler jpFunction + hasExpRef bool +} + +type argSpec struct { + types []jpType + variadic bool +} + +type byExprString struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprString) Len() int { + return len(a.items) +} +func (a *byExprString) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprString) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(string) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(string) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type byExprFloat struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprFloat) Len() int { + return len(a.items) +} +func (a *byExprFloat) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprFloat) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(float64) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(float64) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type functionCaller struct { + functionTable map[string]functionEntry +} + +func newFunctionCaller() *functionCaller { + caller := &functionCaller{} + caller.functionTable = map[string]functionEntry{ + "length": { + name: "length", + arguments: []argSpec{ + {types: []jpType{jpString, jpArray, jpObject}}, + }, + handler: jpfLength, + }, + "starts_with": { + name: "starts_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfStartsWith, + }, + "abs": { + name: "abs", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfAbs, + }, + "avg": { + name: "avg", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfAvg, + }, + "ceil": { + name: "ceil", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfCeil, + }, + "contains": { + name: "contains", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + {types: []jpType{jpAny}}, + }, + handler: jpfContains, + }, + "ends_with": { + name: "ends_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfEndsWith, + }, + "floor": { + name: "floor", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfFloor, + }, + "map": { + name: "amp", + arguments: []argSpec{ + {types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + }, + handler: jpfMap, + hasExpRef: true, + }, + "max": { + name: "max", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMax, + }, + "merge": { + name: "merge", + arguments: []argSpec{ + {types: []jpType{jpObject}, variadic: true}, + }, + handler: jpfMerge, + }, + "max_by": { + name: "max_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMaxBy, + hasExpRef: true, + }, + "sum": { + name: "sum", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfSum, + }, + "min": { + name: "min", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMin, + }, + "min_by": { + name: "min_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMinBy, + hasExpRef: true, + }, + "type": { + name: "type", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfType, + }, + "keys": { + name: "keys", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfKeys, + }, + "values": { + name: "values", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfValues, + }, + "sort": { + name: "sort", + arguments: []argSpec{ + {types: []jpType{jpArrayString, jpArrayNumber}}, + }, + handler: jpfSort, + }, + "sort_by": { + name: "sort_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfSortBy, + hasExpRef: true, + }, + "join": { + name: "join", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpArrayString}}, + }, + handler: jpfJoin, + }, + "reverse": { + name: "reverse", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + }, + handler: jpfReverse, + }, + "to_array": { + name: "to_array", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToArray, + }, + "to_string": { + name: "to_string", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToString, + }, + "to_number": { + name: "to_number", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToNumber, + }, + "not_null": { + name: "not_null", + arguments: []argSpec{ + {types: []jpType{jpAny}, variadic: true}, + }, + handler: jpfNotNull, + }, + } + return caller +} + +func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { + if len(e.arguments) == 0 { + return arguments, nil + } + if !e.arguments[len(e.arguments)-1].variadic { + if len(e.arguments) != len(arguments) { + return nil, errors.New("incorrect number of args") + } + for i, spec := range e.arguments { + userArg := arguments[i] + err := spec.typeCheck(userArg) + if err != nil { + return nil, err + } + } + return arguments, nil + } + if len(arguments) < len(e.arguments) { + return nil, errors.New("Invalid arity.") + } + return arguments, nil +} + +func (a *argSpec) typeCheck(arg interface{}) error { + for _, t := range a.types { + switch t { + case jpNumber: + if _, ok := arg.(float64); ok { + return nil + } + case jpString: + if _, ok := arg.(string); ok { + return nil + } + case jpArray: + if isSliceType(arg) { + return nil + } + case jpObject: + if _, ok := arg.(map[string]interface{}); ok { + return nil + } + case jpArrayNumber: + if _, ok := toArrayNum(arg); ok { + return nil + } + case jpArrayString: + if _, ok := toArrayStr(arg); ok { + return nil + } + case jpAny: + return nil + case jpExpref: + if _, ok := arg.(expRef); ok { + return nil + } + } + } + return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) +} + +func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { + entry, ok := f.functionTable[name] + if !ok { + return nil, errors.New("unknown function: " + name) + } + resolvedArgs, err := entry.resolveArgs(arguments) + if err != nil { + return nil, err + } + if entry.hasExpRef { + var extra []interface{} + extra = append(extra, intr) + resolvedArgs = append(extra, resolvedArgs...) + } + return entry.handler(resolvedArgs) +} + +func jpfAbs(arguments []interface{}) (interface{}, error) { + num := arguments[0].(float64) + return math.Abs(num), nil +} + +func jpfLength(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if c, ok := arg.(string); ok { + return float64(utf8.RuneCountInString(c)), nil + } else if isSliceType(arg) { + v := reflect.ValueOf(arg) + return float64(v.Len()), nil + } else if c, ok := arg.(map[string]interface{}); ok { + return float64(len(c)), nil + } + return nil, errors.New("could not compute length()") +} + +func jpfStartsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + prefix := arguments[1].(string) + return strings.HasPrefix(search, prefix), nil +} + +func jpfAvg(arguments []interface{}) (interface{}, error) { + // We've already type checked the value so we can safely use + // type assertions. + args := arguments[0].([]interface{}) + length := float64(len(args)) + numerator := 0.0 + for _, n := range args { + numerator += n.(float64) + } + return numerator / length, nil +} +func jpfCeil(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Ceil(val), nil +} +func jpfContains(arguments []interface{}) (interface{}, error) { + search := arguments[0] + el := arguments[1] + if searchStr, ok := search.(string); ok { + if elStr, ok := el.(string); ok { + return strings.Index(searchStr, elStr) != -1, nil + } + return false, nil + } + // Otherwise this is a generic contains for []interface{} + general := search.([]interface{}) + for _, item := range general { + if item == el { + return true, nil + } + } + return false, nil +} +func jpfEndsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + suffix := arguments[1].(string) + return strings.HasSuffix(search, suffix), nil +} +func jpfFloor(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Floor(val), nil +} +func jpfMap(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + exp := arguments[1].(expRef) + node := exp.ref + arr := arguments[2].([]interface{}) + mapped := make([]interface{}, 0, len(arr)) + for _, value := range arr { + current, err := intr.Execute(node, value) + if err != nil { + return nil, err + } + mapped = append(mapped, current) + } + return mapped, nil +} +func jpfMax(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil + } + // Otherwise we're dealing with a max() of strings. + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil +} +func jpfMerge(arguments []interface{}) (interface{}, error) { + final := make(map[string]interface{}) + for _, m := range arguments { + mapped := m.(map[string]interface{}) + for key, value := range mapped { + final[key] = value + } + } + return final, nil +} +func jpfMaxBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + switch t := start.(type) { + case float64: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + case string: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + default: + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfSum(arguments []interface{}) (interface{}, error) { + items, _ := toArrayNum(arguments[0]) + sum := 0.0 + for _, item := range items { + sum += item + } + return sum, nil +} + +func jpfMin(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil + } + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil +} + +func jpfMinBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if t, ok := start.(float64); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else if t, ok := start.(string); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfType(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if _, ok := arg.(float64); ok { + return "number", nil + } + if _, ok := arg.(string); ok { + return "string", nil + } + if _, ok := arg.([]interface{}); ok { + return "array", nil + } + if _, ok := arg.(map[string]interface{}); ok { + return "object", nil + } + if arg == nil { + return "null", nil + } + if arg == true || arg == false { + return "boolean", nil + } + return nil, errors.New("unknown type") +} +func jpfKeys(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for key := range arg { + collected = append(collected, key) + } + return collected, nil +} +func jpfValues(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for _, value := range arg { + collected = append(collected, value) + } + return collected, nil +} +func jpfSort(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + d := sort.Float64Slice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil + } + // Otherwise we're dealing with sort()'ing strings. + items, _ := toArrayStr(arguments[0]) + d := sort.StringSlice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil +} +func jpfSortBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return arr, nil + } else if len(arr) == 1 { + return arr, nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if _, ok := start.(float64); ok { + sortable := &byExprFloat{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else if _, ok := start.(string); ok { + sortable := &byExprString{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfJoin(arguments []interface{}) (interface{}, error) { + sep := arguments[0].(string) + // We can't just do arguments[1].([]string), we have to + // manually convert each item to a string. + arrayStr := []string{} + for _, item := range arguments[1].([]interface{}) { + arrayStr = append(arrayStr, item.(string)) + } + return strings.Join(arrayStr, sep), nil +} +func jpfReverse(arguments []interface{}) (interface{}, error) { + if s, ok := arguments[0].(string); ok { + r := []rune(s) + for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r), nil + } + items := arguments[0].([]interface{}) + length := len(items) + reversed := make([]interface{}, length) + for i, item := range items { + reversed[length-(i+1)] = item + } + return reversed, nil +} +func jpfToArray(arguments []interface{}) (interface{}, error) { + if _, ok := arguments[0].([]interface{}); ok { + return arguments[0], nil + } + return arguments[:1:1], nil +} +func jpfToString(arguments []interface{}) (interface{}, error) { + if v, ok := arguments[0].(string); ok { + return v, nil + } + result, err := json.Marshal(arguments[0]) + if err != nil { + return nil, err + } + return string(result), nil +} +func jpfToNumber(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if v, ok := arg.(float64); ok { + return v, nil + } + if v, ok := arg.(string); ok { + conv, err := strconv.ParseFloat(v, 64) + if err != nil { + return nil, nil + } + return conv, nil + } + if _, ok := arg.([]interface{}); ok { + return nil, nil + } + if _, ok := arg.(map[string]interface{}); ok { + return nil, nil + } + if arg == nil { + return nil, nil + } + if arg == true || arg == false { + return nil, nil + } + return nil, errors.New("unknown type") +} +func jpfNotNull(arguments []interface{}) (interface{}, error) { + for _, arg := range arguments { + if arg != nil { + return arg, nil + } + } + return nil, nil +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/fuzz/jmespath.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/fuzz/jmespath.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/fuzz/jmespath.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/fuzz/jmespath.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,13 @@ +package jmespath + +import "github.com/jmespath/go-jmespath" + +// Fuzz will fuzz test the JMESPath parser. +func Fuzz(data []byte) int { + p := jmespath.NewParser() + _, err := p.Parse(string(data)) + if err != nil { + return 1 + } + return 0 +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.gitignore aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.gitignore --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.gitignore 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,4 @@ +jpgo +jmespath-fuzz.zip +cpu.out +go-jmespath.test diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/interpreter.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/interpreter.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/interpreter.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/interpreter.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,418 @@ +package jmespath + +import ( + "errors" + "reflect" + "unicode" + "unicode/utf8" +) + +/* This is a tree based interpreter. It walks the AST and directly + interprets the AST to search through a JSON document. +*/ + +type treeInterpreter struct { + fCall *functionCaller +} + +func newInterpreter() *treeInterpreter { + interpreter := treeInterpreter{} + interpreter.fCall = newFunctionCaller() + return &interpreter +} + +type expRef struct { + ref ASTNode +} + +// Execute takes an ASTNode and input data and interprets the AST directly. +// It will produce the result of applying the JMESPath expression associated +// with the ASTNode to the input data "value". +func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { + switch node.nodeType { + case ASTComparator: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + right, err := intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + switch node.value { + case tEQ: + return objsEqual(left, right), nil + case tNE: + return !objsEqual(left, right), nil + } + leftNum, ok := left.(float64) + if !ok { + return nil, nil + } + rightNum, ok := right.(float64) + if !ok { + return nil, nil + } + switch node.value { + case tGT: + return leftNum > rightNum, nil + case tGTE: + return leftNum >= rightNum, nil + case tLT: + return leftNum < rightNum, nil + case tLTE: + return leftNum <= rightNum, nil + } + case ASTExpRef: + return expRef{ref: node.children[0]}, nil + case ASTFunctionExpression: + resolvedArgs := []interface{}{} + for _, arg := range node.children { + current, err := intr.Execute(arg, value) + if err != nil { + return nil, err + } + resolvedArgs = append(resolvedArgs, current) + } + return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) + case ASTField: + if m, ok := value.(map[string]interface{}); ok { + key := node.value.(string) + return m[key], nil + } + return intr.fieldFromStruct(node.value.(string), value) + case ASTFilterProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.filterProjectionWithReflection(node, left) + } + return nil, nil + } + compareNode := node.children[2] + collected := []interface{}{} + for _, element := range sliceType { + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil + case ASTFlatten: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + // If we can't type convert to []interface{}, there's + // a chance this could still work via reflection if we're + // dealing with user provided types. + if isSliceType(left) { + return intr.flattenWithReflection(left) + } + return nil, nil + } + flattened := []interface{}{} + for _, element := range sliceType { + if elementSlice, ok := element.([]interface{}); ok { + flattened = append(flattened, elementSlice...) + } else if isSliceType(element) { + reflectFlat := []interface{}{} + v := reflect.ValueOf(element) + for i := 0; i < v.Len(); i++ { + reflectFlat = append(reflectFlat, v.Index(i).Interface()) + } + flattened = append(flattened, reflectFlat...) + } else { + flattened = append(flattened, element) + } + } + return flattened, nil + case ASTIdentity, ASTCurrentNode: + return value, nil + case ASTIndex: + if sliceType, ok := value.([]interface{}); ok { + index := node.value.(int) + if index < 0 { + index += len(sliceType) + } + if index < len(sliceType) && index >= 0 { + return sliceType[index], nil + } + return nil, nil + } + // Otherwise try via reflection. + rv := reflect.ValueOf(value) + if rv.Kind() == reflect.Slice { + index := node.value.(int) + if index < 0 { + index += rv.Len() + } + if index < rv.Len() && index >= 0 { + v := rv.Index(index) + return v.Interface(), nil + } + } + return nil, nil + case ASTKeyValPair: + return intr.Execute(node.children[0], value) + case ASTLiteral: + return node.value, nil + case ASTMultiSelectHash: + if value == nil { + return nil, nil + } + collected := make(map[string]interface{}) + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + key := child.value.(string) + collected[key] = current + } + return collected, nil + case ASTMultiSelectList: + if value == nil { + return nil, nil + } + collected := []interface{}{} + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + collected = append(collected, current) + } + return collected, nil + case ASTOrExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + matched, err = intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + } + return matched, nil + case ASTAndExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return matched, nil + } + return intr.Execute(node.children[1], value) + case ASTNotExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return true, nil + } + return false, nil + case ASTPipe: + result := value + var err error + for _, child := range node.children { + result, err = intr.Execute(child, result) + if err != nil { + return nil, err + } + } + return result, nil + case ASTProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.projectWithReflection(node, left) + } + return nil, nil + } + collected := []interface{}{} + var current interface{} + for _, element := range sliceType { + current, err = intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + case ASTSubexpression, ASTIndexExpression: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + return intr.Execute(node.children[1], left) + case ASTSlice: + sliceType, ok := value.([]interface{}) + if !ok { + if isSliceType(value) { + return intr.sliceWithReflection(node, value) + } + return nil, nil + } + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + return slice(sliceType, sliceParams) + case ASTValueProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + mapType, ok := left.(map[string]interface{}) + if !ok { + return nil, nil + } + values := make([]interface{}, len(mapType)) + for _, value := range mapType { + values = append(values, value) + } + collected := []interface{}{} + for _, element := range values { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + } + return nil, errors.New("Unknown AST node: " + node.nodeType.String()) +} + +func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { + rv := reflect.ValueOf(value) + first, n := utf8.DecodeRuneInString(key) + fieldName := string(unicode.ToUpper(first)) + key[n:] + if rv.Kind() == reflect.Struct { + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } else if rv.Kind() == reflect.Ptr { + // Handle multiple levels of indirection? + if rv.IsNil() { + return nil, nil + } + rv = rv.Elem() + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } + return nil, nil +} + +func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + flattened := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + if reflect.TypeOf(element).Kind() == reflect.Slice { + // Then insert the contents of the element + // slice into the flattened slice, + // i.e flattened = append(flattened, mySlice...) + elementV := reflect.ValueOf(element) + for j := 0; j < elementV.Len(); j++ { + flattened = append( + flattened, elementV.Index(j).Interface()) + } + } else { + flattened = append(flattened, element) + } + } + return flattened, nil +} + +func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + final := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + final = append(final, element) + } + return slice(final, sliceParams) +} + +func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { + compareNode := node.children[2] + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil +} + +func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if result != nil { + collected = append(collected, result) + } + } + return collected, nil +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/interpreter_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/interpreter_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/interpreter_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/interpreter_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,221 @@ +package jmespath + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" +) + +type scalars struct { + Foo string + Bar string +} + +type sliceType struct { + A string + B []scalars + C []*scalars +} + +type benchmarkStruct struct { + Fooasdfasdfasdfasdf string +} + +type benchmarkNested struct { + Fooasdfasdfasdfasdf nestedA +} + +type nestedA struct { + Fooasdfasdfasdfasdf nestedB +} + +type nestedB struct { + Fooasdfasdfasdfasdf nestedC +} + +type nestedC struct { + Fooasdfasdfasdfasdf string +} + +type nestedSlice struct { + A []sliceType +} + +func TestCanSupportEmptyInterface(t *testing.T) { + assert := assert.New(t) + data := make(map[string]interface{}) + data["foo"] = "bar" + result, err := Search("foo", data) + assert.Nil(err) + assert.Equal("bar", result) +} + +func TestCanSupportUserDefinedStructsValue(t *testing.T) { + assert := assert.New(t) + s := scalars{Foo: "one", Bar: "bar"} + result, err := Search("Foo", s) + assert.Nil(err) + assert.Equal("one", result) +} + +func TestCanSupportUserDefinedStructsRef(t *testing.T) { + assert := assert.New(t) + s := scalars{Foo: "one", Bar: "bar"} + result, err := Search("Foo", &s) + assert.Nil(err) + assert.Equal("one", result) +} + +func TestCanSupportStructWithSliceAll(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}} + result, err := Search("B[].Foo", data) + assert.Nil(err) + assert.Equal([]interface{}{"f1", "correct"}, result) +} + +func TestCanSupportStructWithSlicingExpression(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}} + result, err := Search("B[:].Foo", data) + assert.Nil(err) + assert.Equal([]interface{}{"f1", "correct"}, result) +} + +func TestCanSupportStructWithFilterProjection(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}} + result, err := Search("B[? `true` ].Foo", data) + assert.Nil(err) + assert.Equal([]interface{}{"f1", "correct"}, result) +} + +func TestCanSupportStructWithSlice(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}} + result, err := Search("B[-1].Foo", data) + assert.Nil(err) + assert.Equal("correct", result) +} + +func TestCanSupportStructWithOrExpressions(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", C: nil} + result, err := Search("C || A", data) + assert.Nil(err) + assert.Equal("foo", result) +} + +func TestCanSupportStructWithSlicePointer(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", C: []*scalars{{"f1", "b1"}, {"correct", "b2"}}} + result, err := Search("C[-1].Foo", data) + assert.Nil(err) + assert.Equal("correct", result) +} + +func TestWillAutomaticallyCapitalizeFieldNames(t *testing.T) { + assert := assert.New(t) + s := scalars{Foo: "one", Bar: "bar"} + // Note that there's a lower cased "foo" instead of "Foo", + // but it should still correspond to the Foo field in the + // scalars struct + result, err := Search("foo", &s) + assert.Nil(err) + assert.Equal("one", result) +} + +func TestCanSupportStructWithSliceLowerCased(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}} + result, err := Search("b[-1].foo", data) + assert.Nil(err) + assert.Equal("correct", result) +} + +func TestCanSupportStructWithNestedPointers(t *testing.T) { + assert := assert.New(t) + data := struct{ A *struct{ B int } }{} + result, err := Search("A.B", data) + assert.Nil(err) + assert.Nil(result) +} + +func TestCanSupportFlattenNestedSlice(t *testing.T) { + assert := assert.New(t) + data := nestedSlice{A: []sliceType{ + {B: []scalars{{Foo: "f1a"}, {Foo: "f1b"}}}, + {B: []scalars{{Foo: "f2a"}, {Foo: "f2b"}}}, + }} + result, err := Search("A[].B[].Foo", data) + assert.Nil(err) + assert.Equal([]interface{}{"f1a", "f1b", "f2a", "f2b"}, result) +} + +func TestCanSupportFlattenNestedEmptySlice(t *testing.T) { + assert := assert.New(t) + data := nestedSlice{A: []sliceType{ + {}, {B: []scalars{{Foo: "a"}}}, + }} + result, err := Search("A[].B[].Foo", data) + assert.Nil(err) + assert.Equal([]interface{}{"a"}, result) +} + +func TestCanSupportProjectionsWithStructs(t *testing.T) { + assert := assert.New(t) + data := nestedSlice{A: []sliceType{ + {A: "first"}, {A: "second"}, {A: "third"}, + }} + result, err := Search("A[*].A", data) + assert.Nil(err) + assert.Equal([]interface{}{"first", "second", "third"}, result) +} + +func TestCanSupportSliceOfStructsWithFunctions(t *testing.T) { + assert := assert.New(t) + data := []scalars{scalars{"a1", "b1"}, scalars{"a2", "b2"}} + result, err := Search("length(@)", data) + assert.Nil(err) + assert.Equal(result.(float64), 2.0) +} + +func BenchmarkInterpretSingleFieldStruct(b *testing.B) { + intr := newInterpreter() + parser := NewParser() + ast, _ := parser.Parse("fooasdfasdfasdfasdf") + data := benchmarkStruct{"foobarbazqux"} + for i := 0; i < b.N; i++ { + intr.Execute(ast, &data) + } +} + +func BenchmarkInterpretNestedStruct(b *testing.B) { + intr := newInterpreter() + parser := NewParser() + ast, _ := parser.Parse("fooasdfasdfasdfasdf.fooasdfasdfasdfasdf.fooasdfasdfasdfasdf.fooasdfasdfasdfasdf") + data := benchmarkNested{ + nestedA{ + nestedB{ + nestedC{"foobarbazqux"}, + }, + }, + } + for i := 0; i < b.N; i++ { + intr.Execute(ast, &data) + } +} + +func BenchmarkInterpretNestedMaps(b *testing.B) { + jsonData := []byte(`{"fooasdfasdfasdfasdf": {"fooasdfasdfasdfasdf": {"fooasdfasdfasdfasdf": {"fooasdfasdfasdfasdf": "foobarbazqux"}}}}`) + var data interface{} + json.Unmarshal(jsonData, &data) + + intr := newInterpreter() + parser := NewParser() + ast, _ := parser.Parse("fooasdfasdfasdfasdf.fooasdfasdfasdfasdf.fooasdfasdfasdfasdf.fooasdfasdfasdfasdf") + for i := 0; i < b.N; i++ { + intr.Execute(ast, data) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/lexer.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/lexer.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/lexer.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/lexer.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,420 @@ +package jmespath + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +type token struct { + tokenType tokType + value string + position int + length int +} + +type tokType int + +const eof = -1 + +// Lexer contains information about the expression being tokenized. +type Lexer struct { + expression string // The expression provided by the user. + currentPos int // The current position in the string. + lastWidth int // The width of the current rune. This + buf bytes.Buffer // Internal buffer used for building up values. +} + +// SyntaxError is the main error used whenever a lexing or parsing error occurs. +type SyntaxError struct { + msg string // Error message displayed to user + Expression string // Expression that generated a SyntaxError + Offset int // The location in the string where the error occurred +} + +func (e SyntaxError) Error() string { + // In the future, it would be good to underline the specific + // location where the error occurred. + return "SyntaxError: " + e.msg +} + +// HighlightLocation will show where the syntax error occurred. +// It will place a "^" character on a line below the expression +// at the point where the syntax error occurred. +func (e SyntaxError) HighlightLocation() string { + return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" +} + +//go:generate stringer -type=tokType +const ( + tUnknown tokType = iota + tStar + tDot + tFilter + tFlatten + tLparen + tRparen + tLbracket + tRbracket + tLbrace + tRbrace + tOr + tPipe + tNumber + tUnquotedIdentifier + tQuotedIdentifier + tComma + tColon + tLT + tLTE + tGT + tGTE + tEQ + tNE + tJSONLiteral + tStringLiteral + tCurrent + tExpref + tAnd + tNot + tEOF +) + +var basicTokens = map[rune]tokType{ + '.': tDot, + '*': tStar, + ',': tComma, + ':': tColon, + '{': tLbrace, + '}': tRbrace, + ']': tRbracket, // tLbracket not included because it could be "[]" + '(': tLparen, + ')': tRparen, + '@': tCurrent, +} + +// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. +// When using this bitmask just be sure to shift the rune down 64 bits +// before checking against identifierStartBits. +const identifierStartBits uint64 = 576460745995190270 + +// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. +var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} + +var whiteSpace = map[rune]bool{ + ' ': true, '\t': true, '\n': true, '\r': true, +} + +func (t token) String() string { + return fmt.Sprintf("Token{%+v, %s, %d, %d}", + t.tokenType, t.value, t.position, t.length) +} + +// NewLexer creates a new JMESPath lexer. +func NewLexer() *Lexer { + lexer := Lexer{} + return &lexer +} + +func (lexer *Lexer) next() rune { + if lexer.currentPos >= len(lexer.expression) { + lexer.lastWidth = 0 + return eof + } + r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) + lexer.lastWidth = w + lexer.currentPos += w + return r +} + +func (lexer *Lexer) back() { + lexer.currentPos -= lexer.lastWidth +} + +func (lexer *Lexer) peek() rune { + t := lexer.next() + lexer.back() + return t +} + +// tokenize takes an expression and returns corresponding tokens. +func (lexer *Lexer) tokenize(expression string) ([]token, error) { + var tokens []token + lexer.expression = expression + lexer.currentPos = 0 + lexer.lastWidth = 0 +loop: + for { + r := lexer.next() + if identifierStartBits&(1<<(uint64(r)-64)) > 0 { + t := lexer.consumeUnquotedIdentifier() + tokens = append(tokens, t) + } else if val, ok := basicTokens[r]; ok { + // Basic single char token. + t := token{ + tokenType: val, + value: string(r), + position: lexer.currentPos - lexer.lastWidth, + length: 1, + } + tokens = append(tokens, t) + } else if r == '-' || (r >= '0' && r <= '9') { + t := lexer.consumeNumber() + tokens = append(tokens, t) + } else if r == '[' { + t := lexer.consumeLBracket() + tokens = append(tokens, t) + } else if r == '"' { + t, err := lexer.consumeQuotedIdentifier() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '\'' { + t, err := lexer.consumeRawStringLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '`' { + t, err := lexer.consumeLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '|' { + t := lexer.matchOrElse(r, '|', tOr, tPipe) + tokens = append(tokens, t) + } else if r == '<' { + t := lexer.matchOrElse(r, '=', tLTE, tLT) + tokens = append(tokens, t) + } else if r == '>' { + t := lexer.matchOrElse(r, '=', tGTE, tGT) + tokens = append(tokens, t) + } else if r == '!' { + t := lexer.matchOrElse(r, '=', tNE, tNot) + tokens = append(tokens, t) + } else if r == '=' { + t := lexer.matchOrElse(r, '=', tEQ, tUnknown) + tokens = append(tokens, t) + } else if r == '&' { + t := lexer.matchOrElse(r, '&', tAnd, tExpref) + tokens = append(tokens, t) + } else if r == eof { + break loop + } else if _, ok := whiteSpace[r]; ok { + // Ignore whitespace + } else { + return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) + } + } + tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) + return tokens, nil +} + +// Consume characters until the ending rune "r" is reached. +// If the end of the expression is reached before seeing the +// terminating rune "r", then an error is returned. +// If no error occurs then the matching substring is returned. +// The returned string will not include the ending rune. +func (lexer *Lexer) consumeUntil(end rune) (string, error) { + start := lexer.currentPos + current := lexer.next() + for current != end && current != eof { + if current == '\\' && lexer.peek() != eof { + lexer.next() + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return "", SyntaxError{ + msg: "Unclosed delimiter: " + string(end), + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil +} + +func (lexer *Lexer) consumeLiteral() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('`') + if err != nil { + return token{}, err + } + value = strings.Replace(value, "\\`", "`", -1) + return token{ + tokenType: tJSONLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) consumeRawStringLiteral() (token, error) { + start := lexer.currentPos + currentIndex := start + current := lexer.next() + for current != '\'' && lexer.peek() != eof { + if current == '\\' && lexer.peek() == '\'' { + chunk := lexer.expression[currentIndex : lexer.currentPos-1] + lexer.buf.WriteString(chunk) + lexer.buf.WriteString("'") + lexer.next() + currentIndex = lexer.currentPos + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return token{}, SyntaxError{ + msg: "Unclosed delimiter: '", + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + if currentIndex < lexer.currentPos { + lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) + } + value := lexer.buf.String() + // Reset the buffer so it can reused again. + lexer.buf.Reset() + return token{ + tokenType: tStringLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: lexer.expression, + Offset: lexer.currentPos - 1, + } +} + +// Checks for a two char token, otherwise matches a single character +// token. This is used whenever a two char token overlaps a single +// char token, e.g. "||" -> tPipe, "|" -> tOr. +func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == second { + t = token{ + tokenType: matchedType, + value: string(first) + string(second), + position: start, + length: 2, + } + } else { + lexer.back() + t = token{ + tokenType: singleCharType, + value: string(first), + position: start, + length: 1, + } + } + return t +} + +func (lexer *Lexer) consumeLBracket() token { + // There's three options here: + // 1. A filter expression "[?" + // 2. A flatten operator "[]" + // 3. A bare rbracket "[" + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == '?' { + t = token{ + tokenType: tFilter, + value: "[?", + position: start, + length: 2, + } + } else if nextRune == ']' { + t = token{ + tokenType: tFlatten, + value: "[]", + position: start, + length: 2, + } + } else { + t = token{ + tokenType: tLbracket, + value: "[", + position: start, + length: 1, + } + lexer.back() + } + return t +} + +func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('"') + if err != nil { + return token{}, err + } + var decoded string + asJSON := []byte("\"" + value + "\"") + if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { + return token{}, err + } + return token{ + tokenType: tQuotedIdentifier, + value: decoded, + position: start - 1, + length: len(decoded), + }, nil +} + +func (lexer *Lexer) consumeUnquotedIdentifier() token { + // Consume runes until we reach the end of an unquoted + // identifier. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tUnquotedIdentifier, + value: value, + position: start, + length: lexer.currentPos - start, + } +} + +func (lexer *Lexer) consumeNumber() token { + // Consume runes until we reach something that's not a number. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < '0' || r > '9' { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tNumber, + value: value, + position: start, + length: lexer.currentPos - start, + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/lexer_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/lexer_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/lexer_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/lexer_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,161 @@ +package jmespath + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +var lexingTests = []struct { + expression string + expected []token +}{ + {"*", []token{{tStar, "*", 0, 1}}}, + {".", []token{{tDot, ".", 0, 1}}}, + {"[?", []token{{tFilter, "[?", 0, 2}}}, + {"[]", []token{{tFlatten, "[]", 0, 2}}}, + {"(", []token{{tLparen, "(", 0, 1}}}, + {")", []token{{tRparen, ")", 0, 1}}}, + {"[", []token{{tLbracket, "[", 0, 1}}}, + {"]", []token{{tRbracket, "]", 0, 1}}}, + {"{", []token{{tLbrace, "{", 0, 1}}}, + {"}", []token{{tRbrace, "}", 0, 1}}}, + {"||", []token{{tOr, "||", 0, 2}}}, + {"|", []token{{tPipe, "|", 0, 1}}}, + {"29", []token{{tNumber, "29", 0, 2}}}, + {"2", []token{{tNumber, "2", 0, 1}}}, + {"0", []token{{tNumber, "0", 0, 1}}}, + {"-20", []token{{tNumber, "-20", 0, 3}}}, + {"foo", []token{{tUnquotedIdentifier, "foo", 0, 3}}}, + {`"bar"`, []token{{tQuotedIdentifier, "bar", 0, 3}}}, + // Escaping the delimiter + {`"bar\"baz"`, []token{{tQuotedIdentifier, `bar"baz`, 0, 7}}}, + {",", []token{{tComma, ",", 0, 1}}}, + {":", []token{{tColon, ":", 0, 1}}}, + {"<", []token{{tLT, "<", 0, 1}}}, + {"<=", []token{{tLTE, "<=", 0, 2}}}, + {">", []token{{tGT, ">", 0, 1}}}, + {">=", []token{{tGTE, ">=", 0, 2}}}, + {"==", []token{{tEQ, "==", 0, 2}}}, + {"!=", []token{{tNE, "!=", 0, 2}}}, + {"`[0, 1, 2]`", []token{{tJSONLiteral, "[0, 1, 2]", 1, 9}}}, + {"'foo'", []token{{tStringLiteral, "foo", 1, 3}}}, + {"'a'", []token{{tStringLiteral, "a", 1, 1}}}, + {`'foo\'bar'`, []token{{tStringLiteral, "foo'bar", 1, 7}}}, + {"@", []token{{tCurrent, "@", 0, 1}}}, + {"&", []token{{tExpref, "&", 0, 1}}}, + // Quoted identifier unicode escape sequences + {`"\u2713"`, []token{{tQuotedIdentifier, "✓", 0, 3}}}, + {`"\\"`, []token{{tQuotedIdentifier, `\`, 0, 1}}}, + {"`\"foo\"`", []token{{tJSONLiteral, "\"foo\"", 1, 5}}}, + // Combinations of tokens. + {"foo.bar", []token{ + {tUnquotedIdentifier, "foo", 0, 3}, + {tDot, ".", 3, 1}, + {tUnquotedIdentifier, "bar", 4, 3}, + }}, + {"foo[0]", []token{ + {tUnquotedIdentifier, "foo", 0, 3}, + {tLbracket, "[", 3, 1}, + {tNumber, "0", 4, 1}, + {tRbracket, "]", 5, 1}, + }}, + {"foo[?a' where is one of" + @echo " test to run all the tests" + @echo " build to build the library and jp executable" + @echo " generate to run codegen" + + +generate: + go generate ./... + +build: + rm -f $(CMD) + go build ./... + rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... + mv cmd/$(CMD)/$(CMD) . + +test: + go test -v ./... + +check: + go vet ./... + @echo "golint ./..." + @lint=`golint ./...`; \ + lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ + echo "$$lint"; \ + if [ "$$lint" != "" ]; then exit 1; fi + +htmlc: + go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov + +buildfuzz: + go-fuzz-build github.com/jmespath/go-jmespath/fuzz + +fuzz: buildfuzz + go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata + +bench: + go test -bench . -cpuprofile cpu.out + +pprof-cpu: + go tool pprof ./go-jmespath.test ./cpu.out diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/parser.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/parser.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/parser.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/parser.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,603 @@ +package jmespath + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type astNodeType int + +//go:generate stringer -type astNodeType +const ( + ASTEmpty astNodeType = iota + ASTComparator + ASTCurrentNode + ASTExpRef + ASTFunctionExpression + ASTField + ASTFilterProjection + ASTFlatten + ASTIdentity + ASTIndex + ASTIndexExpression + ASTKeyValPair + ASTLiteral + ASTMultiSelectHash + ASTMultiSelectList + ASTOrExpression + ASTAndExpression + ASTNotExpression + ASTPipe + ASTProjection + ASTSubexpression + ASTSlice + ASTValueProjection +) + +// ASTNode represents the abstract syntax tree of a JMESPath expression. +type ASTNode struct { + nodeType astNodeType + value interface{} + children []ASTNode +} + +func (node ASTNode) String() string { + return node.PrettyPrint(0) +} + +// PrettyPrint will pretty print the parsed AST. +// The AST is an implementation detail and this pretty print +// function is provided as a convenience method to help with +// debugging. You should not rely on its output as the internal +// structure of the AST may change at any time. +func (node ASTNode) PrettyPrint(indent int) string { + spaces := strings.Repeat(" ", indent) + output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) + nextIndent := indent + 2 + if node.value != nil { + if converted, ok := node.value.(fmt.Stringer); ok { + // Account for things like comparator nodes + // that are enums with a String() method. + output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) + } else { + output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) + } + } + lastIndex := len(node.children) + if lastIndex > 0 { + output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) + childIndent := nextIndent + 2 + for _, elem := range node.children { + output += elem.PrettyPrint(childIndent) + } + } + output += fmt.Sprintf("%s}\n", spaces) + return output +} + +var bindingPowers = map[tokType]int{ + tEOF: 0, + tUnquotedIdentifier: 0, + tQuotedIdentifier: 0, + tRbracket: 0, + tRparen: 0, + tComma: 0, + tRbrace: 0, + tNumber: 0, + tCurrent: 0, + tExpref: 0, + tColon: 0, + tPipe: 1, + tOr: 2, + tAnd: 3, + tEQ: 5, + tLT: 5, + tLTE: 5, + tGT: 5, + tGTE: 5, + tNE: 5, + tFlatten: 9, + tStar: 20, + tFilter: 21, + tDot: 40, + tNot: 45, + tLbrace: 50, + tLbracket: 55, + tLparen: 60, +} + +// Parser holds state about the current expression being parsed. +type Parser struct { + expression string + tokens []token + index int +} + +// NewParser creates a new JMESPath parser. +func NewParser() *Parser { + p := Parser{} + return &p +} + +// Parse will compile a JMESPath expression. +func (p *Parser) Parse(expression string) (ASTNode, error) { + lexer := NewLexer() + p.expression = expression + p.index = 0 + tokens, err := lexer.tokenize(expression) + if err != nil { + return ASTNode{}, err + } + p.tokens = tokens + parsed, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() != tEOF { + return ASTNode{}, p.syntaxError(fmt.Sprintf( + "Unexpected token at the end of the expresssion: %s", p.current())) + } + return parsed, nil +} + +func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { + var err error + leftToken := p.lookaheadToken(0) + p.advance() + leftNode, err := p.nud(leftToken) + if err != nil { + return ASTNode{}, err + } + currentToken := p.current() + for bindingPower < bindingPowers[currentToken] { + p.advance() + leftNode, err = p.led(currentToken, leftNode) + if err != nil { + return ASTNode{}, err + } + currentToken = p.current() + } + return leftNode, nil +} + +func (p *Parser) parseIndexExpression() (ASTNode, error) { + if p.lookahead(0) == tColon || p.lookahead(1) == tColon { + return p.parseSliceExpression() + } + indexStr := p.lookaheadToken(0).value + parsedInt, err := strconv.Atoi(indexStr) + if err != nil { + return ASTNode{}, err + } + indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} + p.advance() + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return indexNode, nil +} + +func (p *Parser) parseSliceExpression() (ASTNode, error) { + parts := []*int{nil, nil, nil} + index := 0 + current := p.current() + for current != tRbracket && index < 3 { + if current == tColon { + index++ + p.advance() + } else if current == tNumber { + parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) + if err != nil { + return ASTNode{}, err + } + parts[index] = &parsedInt + p.advance() + } else { + return ASTNode{}, p.syntaxError( + "Expected tColon or tNumber" + ", received: " + p.current().String()) + } + current = p.current() + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTSlice, + value: parts, + }, nil +} + +func (p *Parser) match(tokenType tokType) error { + if p.current() == tokenType { + p.advance() + return nil + } + return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) +} + +func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { + switch tokenType { + case tDot: + if p.current() != tStar { + right, err := p.parseDotRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTSubexpression, + children: []ASTNode{node, right}, + }, err + } + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTValueProjection, + children: []ASTNode{node, right}, + }, err + case tPipe: + right, err := p.parseExpression(bindingPowers[tPipe]) + return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err + case tOr: + right, err := p.parseExpression(bindingPowers[tOr]) + return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err + case tAnd: + right, err := p.parseExpression(bindingPowers[tAnd]) + return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err + case tLparen: + name := node.value + var args []ASTNode + for p.current() != tRparen { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() == tComma { + if err := p.match(tComma); err != nil { + return ASTNode{}, err + } + } + args = append(args, expression) + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTFunctionExpression, + value: name, + children: args, + }, nil + case tFilter: + return p.parseFilter(node) + case tFlatten: + left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{left, right}, + }, err + case tEQ, tNE, tGT, tGTE, tLT, tLTE: + right, err := p.parseExpression(bindingPowers[tokenType]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTComparator, + value: tokenType, + children: []ASTNode{node, right}, + }, nil + case tLbracket: + tokenType := p.current() + var right ASTNode + var err error + if tokenType == tNumber || tokenType == tColon { + right, err = p.parseIndexExpression() + if err != nil { + return ASTNode{}, err + } + return p.projectIfSlice(node, right) + } + // Otherwise this is a projection. + if err := p.match(tStar); err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{node, right}, + }, nil + } + return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) +} + +func (p *Parser) nud(token token) (ASTNode, error) { + switch token.tokenType { + case tJSONLiteral: + var parsed interface{} + err := json.Unmarshal([]byte(token.value), &parsed) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTLiteral, value: parsed}, nil + case tStringLiteral: + return ASTNode{nodeType: ASTLiteral, value: token.value}, nil + case tUnquotedIdentifier: + return ASTNode{ + nodeType: ASTField, + value: token.value, + }, nil + case tQuotedIdentifier: + node := ASTNode{nodeType: ASTField, value: token.value} + if p.current() == tLparen { + return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) + } + return node, nil + case tStar: + left := ASTNode{nodeType: ASTIdentity} + var right ASTNode + var err error + if p.current() == tRbracket { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + } + return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err + case tFilter: + return p.parseFilter(ASTNode{nodeType: ASTIdentity}) + case tLbrace: + return p.parseMultiSelectHash() + case tFlatten: + left := ASTNode{ + nodeType: ASTFlatten, + children: []ASTNode{{nodeType: ASTIdentity}}, + } + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil + case tLbracket: + tokenType := p.current() + //var right ASTNode + if tokenType == tNumber || tokenType == tColon { + right, err := p.parseIndexExpression() + if err != nil { + return ASTNode{}, nil + } + return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) + } else if tokenType == tStar && p.lookahead(1) == tRbracket { + p.advance() + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{{nodeType: ASTIdentity}, right}, + }, nil + } else { + return p.parseMultiSelectList() + } + case tCurrent: + return ASTNode{nodeType: ASTCurrentNode}, nil + case tExpref: + expression, err := p.parseExpression(bindingPowers[tExpref]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil + case tNot: + expression, err := p.parseExpression(bindingPowers[tNot]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil + case tLparen: + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return expression, nil + case tEOF: + return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) + } + + return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) +} + +func (p *Parser) parseMultiSelectList() (ASTNode, error) { + var expressions []ASTNode + for { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + expressions = append(expressions, expression) + if p.current() == tRbracket { + break + } + err = p.match(tComma) + if err != nil { + return ASTNode{}, err + } + } + err := p.match(tRbracket) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTMultiSelectList, + children: expressions, + }, nil +} + +func (p *Parser) parseMultiSelectHash() (ASTNode, error) { + var children []ASTNode + for { + keyToken := p.lookaheadToken(0) + if err := p.match(tUnquotedIdentifier); err != nil { + if err := p.match(tQuotedIdentifier); err != nil { + return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") + } + } + keyName := keyToken.value + err := p.match(tColon) + if err != nil { + return ASTNode{}, err + } + value, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + node := ASTNode{ + nodeType: ASTKeyValPair, + value: keyName, + children: []ASTNode{value}, + } + children = append(children, node) + if p.current() == tComma { + err := p.match(tComma) + if err != nil { + return ASTNode{}, nil + } + } else if p.current() == tRbrace { + err := p.match(tRbrace) + if err != nil { + return ASTNode{}, nil + } + break + } + } + return ASTNode{ + nodeType: ASTMultiSelectHash, + children: children, + }, nil +} + +func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { + indexExpr := ASTNode{ + nodeType: ASTIndexExpression, + children: []ASTNode{left, right}, + } + if right.nodeType == ASTSlice { + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{indexExpr, right}, + }, err + } + return indexExpr, nil +} +func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { + var right, condition ASTNode + var err error + condition, err = p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + if p.current() == tFlatten { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tFilter]) + if err != nil { + return ASTNode{}, err + } + } + + return ASTNode{ + nodeType: ASTFilterProjection, + children: []ASTNode{node, right, condition}, + }, nil +} + +func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { + lookahead := p.current() + if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { + return p.parseExpression(bindingPower) + } else if lookahead == tLbracket { + if err := p.match(tLbracket); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectList() + } else if lookahead == tLbrace { + if err := p.match(tLbrace); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectHash() + } + return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") +} + +func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { + current := p.current() + if bindingPowers[current] < 10 { + return ASTNode{nodeType: ASTIdentity}, nil + } else if current == tLbracket { + return p.parseExpression(bindingPower) + } else if current == tFilter { + return p.parseExpression(bindingPower) + } else if current == tDot { + err := p.match(tDot) + if err != nil { + return ASTNode{}, err + } + return p.parseDotRHS(bindingPower) + } else { + return ASTNode{}, p.syntaxError("Error") + } +} + +func (p *Parser) lookahead(number int) tokType { + return p.lookaheadToken(number).tokenType +} + +func (p *Parser) current() tokType { + return p.lookahead(0) +} + +func (p *Parser) lookaheadToken(number int) token { + return p.tokens[p.index+number] +} + +func (p *Parser) advance() { + p.index++ +} + +func tokensOneOf(elements []tokType, token tokType) bool { + for _, elem := range elements { + if elem == token { + return true + } + } + return false +} + +func (p *Parser) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: p.lookaheadToken(0).position, + } +} + +// Create a SyntaxError based on the provided token. +// This differs from syntaxError() which creates a SyntaxError +// based on the current lookahead token. +func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: t.position, + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/parser_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/parser_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/parser_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/parser_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,136 @@ +package jmespath + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +var parsingErrorTests = []struct { + expression string + msg string +}{ + {"foo.", "Incopmlete expression"}, + {"[foo", "Incopmlete expression"}, + {"]", "Invalid"}, + {")", "Invalid"}, + {"}", "Invalid"}, + {"foo..bar", "Invalid"}, + {`foo."bar`, "Forwards lexer errors"}, + {`{foo: bar`, "Incomplete expression"}, + {`{foo bar}`, "Invalid"}, + {`[foo bar]`, "Invalid"}, + {`foo@`, "Invalid"}, + {`&&&&&&&&&&&&t(`, "Invalid"}, + {`[*][`, "Invalid"}, +} + +func TestParsingErrors(t *testing.T) { + assert := assert.New(t) + parser := NewParser() + for _, tt := range parsingErrorTests { + _, err := parser.Parse(tt.expression) + assert.NotNil(err, fmt.Sprintf("Expected parsing error: %s, for expression: %s", tt.msg, tt.expression)) + } +} + +var prettyPrinted = `ASTProjection { + children: { + ASTField { + value: "foo" + } + ASTSubexpression { + children: { + ASTSubexpression { + children: { + ASTField { + value: "bar" + } + ASTField { + value: "baz" + } + } + ASTField { + value: "qux" + } + } +} +` + +var prettyPrintedCompNode = `ASTFilterProjection { + children: { + ASTField { + value: "a" + } + ASTIdentity { + } + ASTComparator { + value: tLTE + children: { + ASTField { + value: "b" + } + ASTField { + value: "c" + } + } +} +` + +func TestPrettyPrintedAST(t *testing.T) { + assert := assert.New(t) + parser := NewParser() + parsed, _ := parser.Parse("foo[*].bar.baz.qux") + assert.Equal(parsed.PrettyPrint(0), prettyPrinted) +} + +func TestPrettyPrintedCompNode(t *testing.T) { + assert := assert.New(t) + parser := NewParser() + parsed, _ := parser.Parse("a[?b<=c]") + assert.Equal(parsed.PrettyPrint(0), prettyPrintedCompNode) +} + +func BenchmarkParseIdentifier(b *testing.B) { + runParseBenchmark(b, exprIdentifier) +} + +func BenchmarkParseSubexpression(b *testing.B) { + runParseBenchmark(b, exprSubexpr) +} + +func BenchmarkParseDeeplyNested50(b *testing.B) { + runParseBenchmark(b, deeplyNested50) +} + +func BenchmarkParseDeepNested50Pipe(b *testing.B) { + runParseBenchmark(b, deeplyNested50Pipe) +} + +func BenchmarkParseDeepNested50Index(b *testing.B) { + runParseBenchmark(b, deeplyNested50Index) +} + +func BenchmarkParseQuotedIdentifier(b *testing.B) { + runParseBenchmark(b, exprQuotedIdentifier) +} + +func BenchmarkParseQuotedIdentifierEscapes(b *testing.B) { + runParseBenchmark(b, quotedIdentifierEscapes) +} + +func BenchmarkParseRawStringLiteral(b *testing.B) { + runParseBenchmark(b, rawStringLiteral) +} + +func BenchmarkParseDeepProjection104(b *testing.B) { + runParseBenchmark(b, deepProjection104) +} + +func runParseBenchmark(b *testing.B, expression string) { + parser := NewParser() + for i := 0; i < b.N; i++ { + parser.Parse(expression) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/README.md aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/README.md --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/README.md 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/README.md 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,7 @@ +# go-jmespath - A JMESPath implementation in Go + +[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) + + + +See http://jmespath.org for more info. diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/toktype_string.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/toktype_string.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/toktype_string.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/toktype_string.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +// generated by stringer -type=tokType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" + +var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} + +func (i tokType) String() string { + if i < 0 || i >= tokType(len(_tokType_index)-1) { + return fmt.Sprintf("tokType(%d)", i) + } + return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.travis.yml aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.travis.yml --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.travis.yml 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,9 @@ +language: go + +sudo: false + +go: + - 1.4 + +install: go get -v -t ./... +script: make test diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/util.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/util.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/util.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/util.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,185 @@ +package jmespath + +import ( + "errors" + "reflect" +) + +// IsFalse determines if an object is false based on the JMESPath spec. +// JMESPath defines false values to be any of: +// - An empty string array, or hash. +// - The boolean value false. +// - nil +func isFalse(value interface{}) bool { + switch v := value.(type) { + case bool: + return !v + case []interface{}: + return len(v) == 0 + case map[string]interface{}: + return len(v) == 0 + case string: + return len(v) == 0 + case nil: + return true + } + // Try the reflection cases before returning false. + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Struct: + // A struct type will never be false, even if + // all of its values are the zero type. + return false + case reflect.Slice, reflect.Map: + return rv.Len() == 0 + case reflect.Ptr: + if rv.IsNil() { + return true + } + // If it's a pointer type, we'll try to deref the pointer + // and evaluate the pointer value for isFalse. + element := rv.Elem() + return isFalse(element.Interface()) + } + return false +} + +// ObjsEqual is a generic object equality check. +// It will take two arbitrary objects and recursively determine +// if they are equal. +func objsEqual(left interface{}, right interface{}) bool { + return reflect.DeepEqual(left, right) +} + +// SliceParam refers to a single part of a slice. +// A slice consists of a start, a stop, and a step, similar to +// python slices. +type sliceParam struct { + N int + Specified bool +} + +// Slice supports [start:stop:step] style slicing that's supported in JMESPath. +func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { + computed, err := computeSliceParams(len(slice), parts) + if err != nil { + return nil, err + } + start, stop, step := computed[0], computed[1], computed[2] + result := []interface{}{} + if step > 0 { + for i := start; i < stop; i += step { + result = append(result, slice[i]) + } + } else { + for i := start; i > stop; i += step { + result = append(result, slice[i]) + } + } + return result, nil +} + +func computeSliceParams(length int, parts []sliceParam) ([]int, error) { + var start, stop, step int + if !parts[2].Specified { + step = 1 + } else if parts[2].N == 0 { + return nil, errors.New("Invalid slice, step cannot be 0") + } else { + step = parts[2].N + } + var stepValueNegative bool + if step < 0 { + stepValueNegative = true + } else { + stepValueNegative = false + } + + if !parts[0].Specified { + if stepValueNegative { + start = length - 1 + } else { + start = 0 + } + } else { + start = capSlice(length, parts[0].N, step) + } + + if !parts[1].Specified { + if stepValueNegative { + stop = -1 + } else { + stop = length + } + } else { + stop = capSlice(length, parts[1].N, step) + } + return []int{start, stop, step}, nil +} + +func capSlice(length int, actual int, step int) int { + if actual < 0 { + actual += length + if actual < 0 { + if step < 0 { + actual = -1 + } else { + actual = 0 + } + } + } else if actual >= length { + if step < 0 { + actual = length - 1 + } else { + actual = length + } + } + return actual +} + +// ToArrayNum converts an empty interface type to a slice of float64. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. +func toArrayNum(data interface{}) ([]float64, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]float64, len(d)) + for i, el := range d { + item, ok := el.(float64) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +// ToArrayStr converts an empty interface type to a slice of strings. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. If the input data could be entirely +// converted, then the converted data, along with a second value of true, +// will be returned. +func toArrayStr(data interface{}) ([]string, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]string, len(d)) + for i, el := range d { + item, ok := el.(string) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +func isSliceType(v interface{}) bool { + if v == nil { + return false + } + return reflect.TypeOf(v).Kind() == reflect.Slice +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/util_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/util_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/util_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/util_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,73 @@ +package jmespath + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSlicePositiveStep(t *testing.T) { + assert := assert.New(t) + input := make([]interface{}, 5) + input[0] = 0 + input[1] = 1 + input[2] = 2 + input[3] = 3 + input[4] = 4 + result, err := slice(input, []sliceParam{{0, true}, {3, true}, {1, true}}) + assert.Nil(err) + assert.Equal(input[:3], result) +} + +func TestIsFalseJSONTypes(t *testing.T) { + assert := assert.New(t) + assert.True(isFalse(false)) + assert.True(isFalse("")) + var empty []interface{} + assert.True(isFalse(empty)) + m := make(map[string]interface{}) + assert.True(isFalse(m)) + assert.True(isFalse(nil)) + +} + +func TestIsFalseWithUserDefinedStructs(t *testing.T) { + assert := assert.New(t) + type nilStructType struct { + SliceOfPointers []*string + } + nilStruct := nilStructType{SliceOfPointers: nil} + assert.True(isFalse(nilStruct.SliceOfPointers)) + + // A user defined struct will never be false though, + // even if it's fields are the zero type. + assert.False(isFalse(nilStruct)) +} + +func TestIsFalseWithNilInterface(t *testing.T) { + assert := assert.New(t) + var a *int = nil + var nilInterface interface{} + nilInterface = a + assert.True(isFalse(nilInterface)) +} + +func TestIsFalseWithMapOfUserStructs(t *testing.T) { + assert := assert.New(t) + type foo struct { + Bar string + Baz string + } + m := make(map[int]foo) + assert.True(isFalse(m)) +} + +func TestObjsEqual(t *testing.T) { + assert := assert.New(t) + assert.True(objsEqual("foo", "foo")) + assert.True(objsEqual(20, 20)) + assert.True(objsEqual([]int{1, 2, 3}, []int{1, 2, 3})) + assert.True(objsEqual(nil, nil)) + assert.True(!objsEqual(nil, "foo")) + assert.True(objsEqual([]int{}, []int{})) + assert.True(!objsEqual([]int{}, nil)) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/.yardopts aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/.yardopts --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/.yardopts 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/aws/aws-sdk-go/.yardopts 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,7 @@ +--plugin go +-e doc-src/plugin/plugin.rb +-m markdown +-o doc/api +--title "AWS SDK for Go" +aws/**/*.go +service/**/*.go diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/.gitignore aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/.gitignore --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/.gitignore 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,4 @@ +testdata/conf_out.ini +ini.sublime-project +ini.sublime-workspace +testdata/conf_reflect.ini diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/ini.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/ini.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/ini.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/ini.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1027 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package ini provides INI file read and write functionality in Go. +package ini + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +const ( + DEFAULT_SECTION = "DEFAULT" + // Maximum allowed depth when recursively substituing variable names. + _DEPTH_VALUES = 99 + + _VERSION = "1.8.6" +) + +func Version() string { + return _VERSION +} + +var ( + LineBreak = "\n" + + // Variable regexp pattern: %(variable)s + varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) + + // Write spaces around "=" to look better. + PrettyFormat = true +) + +func init() { + if runtime.GOOS == "windows" { + LineBreak = "\r\n" + } +} + +func inSlice(str string, s []string) bool { + for _, v := range s { + if str == v { + return true + } + } + return false +} + +// dataSource is a interface that returns file content. +type dataSource interface { + ReadCloser() (io.ReadCloser, error) +} + +type sourceFile struct { + name string +} + +func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { + return os.Open(s.name) +} + +type bytesReadCloser struct { + reader io.Reader +} + +func (rc *bytesReadCloser) Read(p []byte) (n int, err error) { + return rc.reader.Read(p) +} + +func (rc *bytesReadCloser) Close() error { + return nil +} + +type sourceData struct { + data []byte +} + +func (s *sourceData) ReadCloser() (io.ReadCloser, error) { + return &bytesReadCloser{bytes.NewReader(s.data)}, nil +} + +// ____ __. +// | |/ _|____ ___.__. +// | <_/ __ < | | +// | | \ ___/\___ | +// |____|__ \___ > ____| +// \/ \/\/ + +// Key represents a key under a section. +type Key struct { + s *Section + Comment string + name string + value string + isAutoIncr bool +} + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// String returns string representation of value. +func (k *Key) String() string { + val := k.value + if strings.Index(val, "%") == -1 { + return val + } + + for i := 0; i < _DEPTH_VALUES; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := strings.TrimLeft(vr, "%(") + noption = strings.TrimRight(noption, ")s") + + // Search in the same section. + nk, err := k.s.GetKey(noption) + if err != nil { + // Search again in default section. + nk, _ = k.s.f.Section("").GetKey(noption) + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + return strconv.Atoi(k.String()) +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 10, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 10, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 10, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string divided by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + vals := strings.Split(str, delim) + for i := range vals { + vals[i] = strings.TrimSpace(vals[i]) + } + return vals +} + +// Float64s returns list of float64 divided by given delimiter. +func (k *Key) Float64s(delim string) []float64 { + strs := k.Strings(delim) + vals := make([]float64, len(strs)) + for i := range strs { + vals[i], _ = strconv.ParseFloat(strs[i], 64) + } + return vals +} + +// Ints returns list of int divided by given delimiter. +func (k *Key) Ints(delim string) []int { + strs := k.Strings(delim) + vals := make([]int, len(strs)) + for i := range strs { + vals[i], _ = strconv.Atoi(strs[i]) + } + return vals +} + +// Int64s returns list of int64 divided by given delimiter. +func (k *Key) Int64s(delim string) []int64 { + strs := k.Strings(delim) + vals := make([]int64, len(strs)) + for i := range strs { + vals[i], _ = strconv.ParseInt(strs[i], 10, 64) + } + return vals +} + +// Uints returns list of uint divided by given delimiter. +func (k *Key) Uints(delim string) []uint { + strs := k.Strings(delim) + vals := make([]uint, len(strs)) + for i := range strs { + u, _ := strconv.ParseUint(strs[i], 10, 0) + vals[i] = uint(u) + } + return vals +} + +// Uint64s returns list of uint64 divided by given delimiter. +func (k *Key) Uint64s(delim string) []uint64 { + strs := k.Strings(delim) + vals := make([]uint64, len(strs)) + for i := range strs { + vals[i], _ = strconv.ParseUint(strs[i], 10, 64) + } + return vals +} + +// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) TimesFormat(format, delim string) []time.Time { + strs := k.Strings(delim) + vals := make([]time.Time, len(strs)) + for i := range strs { + vals[i], _ = time.Parse(format, strs[i]) + } + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + if k.s.f.BlockMode { + k.s.f.lock.Lock() + defer k.s.f.lock.Unlock() + } + + k.value = v + k.s.keysHash[k.name] = v +} + +// _________ __ .__ +// / _____/ ____ _____/ |_|__| ____ ____ +// \_____ \_/ __ \_/ ___\ __\ |/ _ \ / \ +// / \ ___/\ \___| | | ( <_> ) | \ +// /_______ /\___ >\___ >__| |__|\____/|___| / +// \/ \/ \/ \/ + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string +} + +func newSection(f *File, name string) *Section { + return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)} +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + s.keys[name].value = val + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = &Key{s, "", name, val, false} + s.keysHash[name] = val + return s.keys[name], nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + // FIXME: change to section level lock? + if s.f.BlockMode { + s.f.lock.RLock() + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } else { + break + } + } + return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) + } + return key, nil +} + +// HasKey returns true if section contains a key with given name. +func (s *Section) HasKey(name string) bool { + key, _ := s.GetKey(name) + return key != nil +} + +// Haskey is a backwards-compatible name for HasKey. +func (s *Section) Haskey(name string) bool { + return s.HasKey(name) +} + +// HasValue returns true if section contains given raw value. +func (s *Section) HasValue(value string) bool { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + for _, k := range s.keys { + if value == k.value { + return true + } + } + return false +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := map[string]string{} + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + return + } + } +} + +// ___________.__.__ +// \_ _____/|__| | ____ +// | __) | | | _/ __ \ +// | \ | | |_\ ___/ +// \___ / |__|____/\___ > +// \/ \/ + +// File represents a combination of a or more INI file(s) in memory. +type File struct { + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + // Make sure data is safe in multiple goroutines. + lock sync.RWMutex + + // Allow combination of multiple data sources. + dataSources []dataSource + // Actual data is stored here. + sections map[string]*Section + + // To keep data in order. + sectionList []string + + NameMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource) *File { + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string]*Section), + sectionList: make([]string, 0, 10), + } +} + +func parseDataSource(source interface{}) (dataSource, error) { + switch s := source.(type) { + case string: + return sourceFile{s}, nil + case []byte: + return &sourceData{s}, nil + default: + return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) + } +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +func Load(source interface{}, others ...interface{}) (_ *File, err error) { + sources := make([]dataSource, len(others)+1) + sources[0], err = parseDataSource(source) + if err != nil { + return nil, err + } + for i := range others { + sources[i+1], err = parseDataSource(others[i]) + if err != nil { + return nil, err + } + } + f := newFile(sources) + if err = f.Reload(); err != nil { + return nil, err + } + return f, nil +} + +// Empty returns an empty file object. +func Empty() *File { + // Ignore error here, we sure our data is good. + f, _ := Load([]byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("error creating new section: empty section name") + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if inSlice(name, f.sectionList) { + return f.sections[name], nil + } + + f.sectionList = append(f.sectionList, name) + f.sections[name] = newSection(f, name) + return f.sections[name], nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + if len(name) == 0 { + name = DEFAULT_SECTION + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sec := f.sections[name] + if sec == nil { + return nil, fmt.Errorf("error when getting section: section '%s' not exists", name) + } + return sec, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + // Note: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// Section returns list of Section. +func (f *File) Sections() []*Section { + sections := make([]*Section, len(f.sectionList)) + for i := range f.sectionList { + sections[i] = f.Section(f.sectionList[i]) + } + return sections +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section. +func (f *File) DeleteSection(name string) { + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if len(name) == 0 { + name = DEFAULT_SECTION + } + + for i, s := range f.sectionList { + if s == name { + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + delete(f.sections, name) + return + } + } +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + return err + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +// WriteToIndent writes file content into io.Writer with given value indention. +func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { + equalSign := "=" + if PrettyFormat { + equalSign = " = " + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + for i, sname := range f.sectionList { + sec := f.Section(sname) + if len(sec.Comment) > 0 { + if sec.Comment[0] != '#' && sec.Comment[0] != ';' { + sec.Comment = "; " + sec.Comment + } + if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil { + return 0, err + } + } + + if i > 0 { + if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return 0, err + } + } else { + // Write nothing if default section is empty. + if len(sec.keyList) == 0 { + continue + } + } + + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + if key.Comment[0] != '#' && key.Comment[0] != ';' { + key.Comment = "; " + key.Comment + } + if _, err = buf.WriteString(key.Comment + LineBreak); err != nil { + return 0, err + } + } + + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncr: + kname = "-" + case strings.ContainsAny(kname, "\"=:"): + kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` + } + + val := key.value + // In case key value contains "\n", "`", "\"", "#" or ";". + if strings.ContainsAny(val, "\n`") { + val = `"""` + val + `"""` + } else if strings.ContainsAny(val, "#;") { + val = "`" + val + "`" + } + if _, err = buf.WriteString(kname + equalSign + val + LineBreak); err != nil { + return 0, err + } + } + + // Put a line between sections. + if _, err = buf.WriteString(LineBreak); err != nil { + return 0, err + } + } + + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename afte done. + tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp" + defer os.Remove(tmpPath) + + fw, err := os.Create(tmpPath) + if err != nil { + return err + } + + if _, err = f.WriteToIndent(fw, indent); err != nil { + fw.Close() + return err + } + fw.Close() + + // Remove old file and rename the new one. + os.Remove(filename) + return os.Rename(tmpPath, filename) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/ini_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/ini_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/ini_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/ini_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,574 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "fmt" + "strings" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Version(t *testing.T) { + Convey("Get version", t, func() { + So(Version(), ShouldEqual, _VERSION) + }) +} + +const _CONF_DATA = ` +; Package name +NAME = ini +; Package version +VERSION = v1 +; Package import path +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +# Information about package author +# Bio can be written in multiple lines. +[author] +NAME = Unknwon ; Succeeding comment +E-MAIL = fake@localhost +GITHUB = https://github.com/%(NAME)s +BIO = """Gopher. +Coding addict. +Good man. +""" # Succeeding comment + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +UNUSED_KEY = should be deleted + +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values + +[types] +STRING = str +BOOL = true +BOOL_FALSE = false +FLOAT64 = 1.25 +INT = 10 +TIME = 2015-01-01T20:17:05Z +DURATION = 2h45m +UINT = 3 + +[array] +STRINGS = en, zh, de +FLOAT64S = 1.1, 2.2, 3.3 +INTS = 1, 2, 3 +UINTS = 1, 2, 3 +TIMES = 2015-01-01T20:17:05Z,2015-01-01T20:17:05Z,2015-01-01T20:17:05Z + +[note] +empty_lines = next line is empty\ + +; Comment before the section +[comments] ; This is a comment for the section too +; Comment before key +key = "value" +key2 = "value2" ; This is a comment for key2 +key3 = "one", "two", "three" + +[advance] +value with quotes = "some value" +value quote2 again = 'some value' +true = 2+3=5 +"1+1=2" = true +"""6+1=7""" = true +"""` + "`" + `5+5` + "`" + `""" = 10 +` + "`" + `"6+6"` + "`" + ` = 12 +` + "`" + `7-2=4` + "`" + ` = false +ADDRESS = ` + "`" + `404 road, +NotFound, State, 50000` + "`" + ` + +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 \ +` + +func Test_Load(t *testing.T) { + Convey("Load from data sources", t, func() { + + Convey("Load with empty data", func() { + So(Empty(), ShouldNotBeNil) + }) + + Convey("Load with multiple data sources", func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + }) + }) + + Convey("Bad load process", t, func() { + + Convey("Load from invalid data sources", func() { + _, err := Load(_CONF_DATA) + So(err, ShouldNotBeNil) + + f, err := Load("testdata/404.ini") + So(err, ShouldNotBeNil) + So(f, ShouldBeNil) + + _, err = Load(1) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(""), 1) + So(err, ShouldNotBeNil) + }) + + Convey("Load with bad section name", func() { + _, err := Load([]byte("[]")) + So(err, ShouldNotBeNil) + + _, err = Load([]byte("[")) + So(err, ShouldNotBeNil) + }) + + Convey("Load with bad keys", func() { + _, err := Load([]byte(`"""name`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`"""name"""`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`""=1`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`=`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`name`)) + So(err, ShouldNotBeNil) + }) + + Convey("Load with bad values", func() { + _, err := Load([]byte(`name="""Unknwon`)) + So(err, ShouldNotBeNil) + }) + }) +} + +func Test_Values(t *testing.T) { + Convey("Test getting and setting values", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + Convey("Get values in default section", func() { + sec := cfg.Section("") + So(sec, ShouldNotBeNil) + So(sec.Key("NAME").Value(), ShouldEqual, "ini") + So(sec.Key("NAME").String(), ShouldEqual, "ini") + So(sec.Key("NAME").Validate(func(in string) string { + return in + }), ShouldEqual, "ini") + So(sec.Key("NAME").Comment, ShouldEqual, "; Package name") + So(sec.Key("IMPORT_PATH").String(), ShouldEqual, "gopkg.in/ini.v1") + }) + + Convey("Get values in non-default section", func() { + sec := cfg.Section("author") + So(sec, ShouldNotBeNil) + So(sec.Key("NAME").String(), ShouldEqual, "Unknwon") + So(sec.Key("GITHUB").String(), ShouldEqual, "https://github.com/Unknwon") + + sec = cfg.Section("package") + So(sec, ShouldNotBeNil) + So(sec.Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") + }) + + Convey("Get auto-increment key names", func() { + keys := cfg.Section("features").Keys() + for i, k := range keys { + So(k.Name(), ShouldEqual, fmt.Sprintf("#%d", i+1)) + } + }) + + Convey("Get overwrite value", func() { + So(cfg.Section("author").Key("E-MAIL").String(), ShouldEqual, "u@gogs.io") + }) + + Convey("Get sections", func() { + sections := cfg.Sections() + for i, name := range []string{DEFAULT_SECTION, "author", "package", "package.sub", "features", "types", "array", "note", "comments", "advance"} { + So(sections[i].Name(), ShouldEqual, name) + } + }) + + Convey("Get parent section value", func() { + So(cfg.Section("package.sub").Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") + So(cfg.Section("package.fake.sub").Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") + }) + + Convey("Get multiple line value", func() { + So(cfg.Section("author").Key("BIO").String(), ShouldEqual, "Gopher.\nCoding addict.\nGood man.\n") + }) + + Convey("Get values with type", func() { + sec := cfg.Section("types") + v1, err := sec.Key("BOOL").Bool() + So(err, ShouldBeNil) + So(v1, ShouldBeTrue) + + v1, err = sec.Key("BOOL_FALSE").Bool() + So(err, ShouldBeNil) + So(v1, ShouldBeFalse) + + v2, err := sec.Key("FLOAT64").Float64() + So(err, ShouldBeNil) + So(v2, ShouldEqual, 1.25) + + v3, err := sec.Key("INT").Int() + So(err, ShouldBeNil) + So(v3, ShouldEqual, 10) + + v4, err := sec.Key("INT").Int64() + So(err, ShouldBeNil) + So(v4, ShouldEqual, 10) + + v5, err := sec.Key("UINT").Uint() + So(err, ShouldBeNil) + So(v5, ShouldEqual, 3) + + v6, err := sec.Key("UINT").Uint64() + So(err, ShouldBeNil) + So(v6, ShouldEqual, 3) + + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + v7, err := sec.Key("TIME").Time() + So(err, ShouldBeNil) + So(v7.String(), ShouldEqual, t.String()) + + Convey("Must get values with type", func() { + So(sec.Key("STRING").MustString("404"), ShouldEqual, "str") + So(sec.Key("BOOL").MustBool(), ShouldBeTrue) + So(sec.Key("FLOAT64").MustFloat64(), ShouldEqual, 1.25) + So(sec.Key("INT").MustInt(), ShouldEqual, 10) + So(sec.Key("INT").MustInt64(), ShouldEqual, 10) + So(sec.Key("UINT").MustUint(), ShouldEqual, 3) + So(sec.Key("UINT").MustUint64(), ShouldEqual, 3) + So(sec.Key("TIME").MustTime().String(), ShouldEqual, t.String()) + + dur, err := time.ParseDuration("2h45m") + So(err, ShouldBeNil) + So(sec.Key("DURATION").MustDuration().Seconds(), ShouldEqual, dur.Seconds()) + + Convey("Must get values with default value", func() { + So(sec.Key("STRING_404").MustString("404"), ShouldEqual, "404") + So(sec.Key("BOOL_404").MustBool(true), ShouldBeTrue) + So(sec.Key("FLOAT64_404").MustFloat64(2.5), ShouldEqual, 2.5) + So(sec.Key("INT_404").MustInt(15), ShouldEqual, 15) + So(sec.Key("INT_404").MustInt64(15), ShouldEqual, 15) + So(sec.Key("UINT_404").MustUint(6), ShouldEqual, 6) + So(sec.Key("UINT_404").MustUint64(6), ShouldEqual, 6) + + t, err := time.Parse(time.RFC3339, "2014-01-01T20:17:05Z") + So(err, ShouldBeNil) + So(sec.Key("TIME_404").MustTime(t).String(), ShouldEqual, t.String()) + + So(sec.Key("DURATION_404").MustDuration(dur).Seconds(), ShouldEqual, dur.Seconds()) + }) + }) + }) + + Convey("Get value with candidates", func() { + sec := cfg.Section("types") + So(sec.Key("STRING").In("", []string{"str", "arr", "types"}), ShouldEqual, "str") + So(sec.Key("FLOAT64").InFloat64(0, []float64{1.25, 2.5, 3.75}), ShouldEqual, 1.25) + So(sec.Key("INT").InInt(0, []int{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("INT").InInt64(0, []int64{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("UINT").InUint(0, []uint{3, 6, 9}), ShouldEqual, 3) + So(sec.Key("UINT").InUint64(0, []uint64{3, 6, 9}), ShouldEqual, 3) + + zt, err := time.Parse(time.RFC3339, "0001-01-01T01:00:00Z") + So(err, ShouldBeNil) + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + So(sec.Key("TIME").InTime(zt, []time.Time{t, time.Now(), time.Now().Add(1 * time.Second)}).String(), ShouldEqual, t.String()) + + Convey("Get value with candidates and default value", func() { + So(sec.Key("STRING_404").In("str", []string{"str", "arr", "types"}), ShouldEqual, "str") + So(sec.Key("FLOAT64_404").InFloat64(1.25, []float64{1.25, 2.5, 3.75}), ShouldEqual, 1.25) + So(sec.Key("INT_404").InInt(10, []int{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("INT64_404").InInt64(10, []int64{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("UINT_404").InUint(3, []uint{3, 6, 9}), ShouldEqual, 3) + So(sec.Key("UINT_404").InUint64(3, []uint64{3, 6, 9}), ShouldEqual, 3) + So(sec.Key("TIME_404").InTime(t, []time.Time{time.Now(), time.Now(), time.Now().Add(1 * time.Second)}).String(), ShouldEqual, t.String()) + }) + }) + + Convey("Get values in range", func() { + sec := cfg.Section("types") + So(sec.Key("FLOAT64").RangeFloat64(0, 1, 2), ShouldEqual, 1.25) + So(sec.Key("INT").RangeInt(0, 10, 20), ShouldEqual, 10) + So(sec.Key("INT").RangeInt64(0, 10, 20), ShouldEqual, 10) + + minT, err := time.Parse(time.RFC3339, "0001-01-01T01:00:00Z") + So(err, ShouldBeNil) + midT, err := time.Parse(time.RFC3339, "2013-01-01T01:00:00Z") + So(err, ShouldBeNil) + maxT, err := time.Parse(time.RFC3339, "9999-01-01T01:00:00Z") + So(err, ShouldBeNil) + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + So(sec.Key("TIME").RangeTime(t, minT, maxT).String(), ShouldEqual, t.String()) + + Convey("Get value in range with default value", func() { + So(sec.Key("FLOAT64").RangeFloat64(5, 0, 1), ShouldEqual, 5) + So(sec.Key("INT").RangeInt(7, 0, 5), ShouldEqual, 7) + So(sec.Key("INT").RangeInt64(7, 0, 5), ShouldEqual, 7) + So(sec.Key("TIME").RangeTime(t, minT, midT).String(), ShouldEqual, t.String()) + }) + }) + + Convey("Get values into slice", func() { + sec := cfg.Section("array") + So(strings.Join(sec.Key("STRINGS").Strings(","), ","), ShouldEqual, "en,zh,de") + So(len(sec.Key("STRINGS_404").Strings(",")), ShouldEqual, 0) + + vals1 := sec.Key("FLOAT64S").Float64s(",") + for i, v := range []float64{1.1, 2.2, 3.3} { + So(vals1[i], ShouldEqual, v) + } + + vals2 := sec.Key("INTS").Ints(",") + for i, v := range []int{1, 2, 3} { + So(vals2[i], ShouldEqual, v) + } + + vals3 := sec.Key("INTS").Int64s(",") + for i, v := range []int64{1, 2, 3} { + So(vals3[i], ShouldEqual, v) + } + + vals4 := sec.Key("UINTS").Uints(",") + for i, v := range []uint{1, 2, 3} { + So(vals4[i], ShouldEqual, v) + } + + vals5 := sec.Key("UINTS").Uint64s(",") + for i, v := range []uint64{1, 2, 3} { + So(vals5[i], ShouldEqual, v) + } + + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + vals6 := sec.Key("TIMES").Times(",") + for i, v := range []time.Time{t, t, t} { + So(vals6[i].String(), ShouldEqual, v.String()) + } + }) + + Convey("Get key hash", func() { + cfg.Section("").KeysHash() + }) + + Convey("Set key value", func() { + k := cfg.Section("author").Key("NAME") + k.SetValue("无闻") + So(k.String(), ShouldEqual, "无闻") + }) + + Convey("Get key strings", func() { + So(strings.Join(cfg.Section("types").KeyStrings(), ","), ShouldEqual, "STRING,BOOL,BOOL_FALSE,FLOAT64,INT,TIME,DURATION,UINT") + }) + + Convey("Delete a key", func() { + cfg.Section("package.sub").DeleteKey("UNUSED_KEY") + _, err := cfg.Section("package.sub").GetKey("UNUSED_KEY") + So(err, ShouldNotBeNil) + }) + + Convey("Has Key (backwards compatible)", func() { + sec := cfg.Section("package.sub") + haskey1 := sec.Haskey("UNUSED_KEY") + haskey2 := sec.Haskey("CLONE_URL") + haskey3 := sec.Haskey("CLONE_URL_NO") + So(haskey1, ShouldBeTrue) + So(haskey2, ShouldBeTrue) + So(haskey3, ShouldBeFalse) + }) + + Convey("Has Key", func() { + sec := cfg.Section("package.sub") + haskey1 := sec.HasKey("UNUSED_KEY") + haskey2 := sec.HasKey("CLONE_URL") + haskey3 := sec.HasKey("CLONE_URL_NO") + So(haskey1, ShouldBeTrue) + So(haskey2, ShouldBeTrue) + So(haskey3, ShouldBeFalse) + }) + + Convey("Has Value", func() { + sec := cfg.Section("author") + hasvalue1 := sec.HasValue("Unknwon") + hasvalue2 := sec.HasValue("doc") + So(hasvalue1, ShouldBeTrue) + So(hasvalue2, ShouldBeFalse) + }) + + Convey("Get section strings", func() { + So(strings.Join(cfg.SectionStrings(), ","), ShouldEqual, "DEFAULT,author,package,package.sub,features,types,array,note,comments,advance") + }) + + Convey("Delete a section", func() { + cfg.DeleteSection("") + So(cfg.SectionStrings()[0], ShouldNotEqual, DEFAULT_SECTION) + }) + + Convey("Create new sections", func() { + cfg.NewSections("test", "test2") + _, err := cfg.GetSection("test") + So(err, ShouldBeNil) + _, err = cfg.GetSection("test2") + So(err, ShouldBeNil) + }) + }) + + Convey("Test getting and setting bad values", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + Convey("Create new key with empty name", func() { + k, err := cfg.Section("").NewKey("", "") + So(err, ShouldNotBeNil) + So(k, ShouldBeNil) + }) + + Convey("Create new section with empty name", func() { + s, err := cfg.NewSection("") + So(err, ShouldNotBeNil) + So(s, ShouldBeNil) + }) + + Convey("Create new sections with empty name", func() { + So(cfg.NewSections(""), ShouldNotBeNil) + }) + + Convey("Get section that not exists", func() { + s, err := cfg.GetSection("404") + So(err, ShouldNotBeNil) + So(s, ShouldBeNil) + + s = cfg.Section("404") + So(s, ShouldNotBeNil) + }) + }) + + Convey("Test key hash clone", t, func() { + cfg, err := Load([]byte(strings.Replace("network=tcp,addr=127.0.0.1:6379,db=4,pool_size=100,idle_timeout=180", ",", "\n", -1))) + So(err, ShouldBeNil) + for _, v := range cfg.Section("").KeysHash() { + So(len(v), ShouldBeGreaterThan, 0) + } + }) + + Convey("Key has empty value", t, func() { + _conf := `key1= +key2= ; comment` + cfg, err := Load([]byte(_conf)) + So(err, ShouldBeNil) + So(cfg.Section("").Key("key1").Value(), ShouldBeEmpty) + }) +} + +func Test_File_Append(t *testing.T) { + Convey("Append data sources", t, func() { + cfg, err := Load([]byte("")) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + So(cfg.Append([]byte(""), []byte("")), ShouldBeNil) + + Convey("Append bad data sources", func() { + So(cfg.Append(1), ShouldNotBeNil) + So(cfg.Append([]byte(""), 1), ShouldNotBeNil) + }) + }) +} + +func Test_File_WriteTo(t *testing.T) { + Convey("Write to somewhere", t, func() { + var buf bytes.Buffer + cfg := Empty() + cfg.WriteTo(&buf) + }) +} + +func Test_File_SaveTo(t *testing.T) { + Convey("Save file", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + cfg.Section("").Key("NAME").Comment = "Package name" + cfg.Section("author").Comment = `Information about package author +# Bio can be written in multiple lines.` + cfg.Section("advanced").Key("val w/ pound").SetValue("my#password") + So(cfg.SaveTo("testdata/conf_out.ini"), ShouldBeNil) + + cfg.Section("author").Key("NAME").Comment = "This is author name" + So(cfg.SaveToIndent("testdata/conf_out.ini", "\t"), ShouldBeNil) + }) +} + +func Benchmark_Key_Value(b *testing.B) { + c, _ := Load([]byte(_CONF_DATA)) + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").Value() + } +} + +func Benchmark_Key_String(b *testing.B) { + c, _ := Load([]byte(_CONF_DATA)) + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").String() + } +} + +func Benchmark_Key_Value_NonBlock(b *testing.B) { + c, _ := Load([]byte(_CONF_DATA)) + c.BlockMode = false + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").Value() + } +} + +func Benchmark_Key_String_NonBlock(b *testing.B) { + c, _ := Load([]byte(_CONF_DATA)) + c.BlockMode = false + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").String() + } +} + +func Benchmark_Key_SetValue(b *testing.B) { + c, _ := Load([]byte(_CONF_DATA)) + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").SetValue("10") + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/LICENSE aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/LICENSE --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/LICENSE 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/parser.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/parser.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/parser.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/parser.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,312 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + "unicode" +) + +type tokenType int + +const ( + _TOKEN_INVALID tokenType = iota + _TOKEN_COMMENT + _TOKEN_SECTION + _TOKEN_KEY +) + +type parser struct { + buf *bufio.Reader + isEOF bool + count int + comment *bytes.Buffer +} + +func newParser(r io.Reader) *parser { + return &parser{ + buf: bufio.NewReader(r), + count: 1, + comment: &bytes.Buffer{}, + } +} + +// BOM handles header of BOM-UTF8 format. +// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding +func (p *parser) BOM() error { + mask, err := p.buf.Peek(3) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 3 { + return nil + } else if mask[0] == 239 && mask[1] == 187 && mask[2] == 191 { + p.buf.Read(mask) + } + return nil +} + +func (p *parser) readUntil(delim byte) ([]byte, error) { + data, err := p.buf.ReadBytes(delim) + if err != nil { + if err == io.EOF { + p.isEOF = true + } else { + return nil, err + } + } + return data, nil +} + +func cleanComment(in []byte) ([]byte, bool) { + i := bytes.IndexAny(in, "#;") + if i == -1 { + return nil, false + } + return in[i:], true +} + +func readKeyName(in []byte) (string, int, error) { + line := string(in) + + // Check if key name surrounded by quotes. + var keyQuote string + if line[0] == '"' { + if len(line) > 6 && string(line[0:3]) == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + + // Get out key name + endIdx := -1 + if len(keyQuote) > 0 { + startIdx := len(keyQuote) + // FIXME: fail case -> """"""name"""=value + pos := strings.Index(line[startIdx:], keyQuote) + if pos == -1 { + return "", -1, fmt.Errorf("missing closing key quote: %s", line) + } + pos += startIdx + + // Find key-value delimiter + i := strings.IndexAny(line[pos+startIdx:], "=:") + if i < 0 { + return "", -1, fmt.Errorf("key-value delimiter not found: %s", line) + } + endIdx = pos + i + return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil + } + + endIdx = strings.IndexAny(line, "=:") + if endIdx < 0 { + return "", -1, fmt.Errorf("key-value delimiter not found: %s", line) + } + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil +} + +func (p *parser) readMultilines(line, val, valQuote string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := string(data) + + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + + comment, has := cleanComment([]byte(next[pos:])) + if has { + p.comment.Write(bytes.TrimSpace(comment)) + } + break + } + val += next + if p.isEOF { + return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) + } + } + return val, nil +} + +func (p *parser) readContinuationLines(val string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := strings.TrimSpace(string(data)) + + if len(next) == 0 { + break + } + val += next + if val[len(val)-1] != '\\' { + break + } + val = val[:len(val)-1] + } + return val, nil +} + +// hasSurroundedQuote check if and only if the first and last characters +// are quotes \" or \'. +// It returns false if any other parts also contain same kind of quotes. +func hasSurroundedQuote(in string, quote byte) bool { + return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote && + strings.IndexByte(in[1:], quote) == len(in)-2 +} + +func (p *parser) readValue(in []byte) (string, error) { + line := strings.TrimLeftFunc(string(in), unicode.IsSpace) + if len(line) == 0 { + return "", nil + } + + var valQuote string + if len(line) > 3 && string(line[0:3]) == `"""` { + valQuote = `"""` + } else if line[0] == '`' { + valQuote = "`" + } + + if len(valQuote) > 0 { + startIdx := len(valQuote) + pos := strings.LastIndex(line[startIdx:], valQuote) + // Check for multi-line value + if pos == -1 { + return p.readMultilines(line, line[startIdx:], valQuote) + } + + return line[startIdx : pos+startIdx], nil + } + + // Won't be able to reach here if value only contains whitespace. + line = strings.TrimSpace(line) + + // Check continuation lines + if line[len(line)-1] == '\\' { + return p.readContinuationLines(line[:len(line)-1]) + } + + i := strings.IndexAny(line, "#;") + if i > -1 { + p.comment.WriteString(line[i:]) + line = strings.TrimSpace(line[:i]) + } + + // Trim single quotes + if hasSurroundedQuote(line, '\'') || + hasSurroundedQuote(line, '"') { + line = line[1 : len(line)-1] + } + return line, nil +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) (err error) { + p := newParser(reader) + if err = p.BOM(); err != nil { + return fmt.Errorf("BOM: %v", err) + } + + // Ignore error because default section name is never empty string. + section, _ := f.NewSection(DEFAULT_SECTION) + + var line []byte + for !p.isEOF { + line, err = p.readUntil('\n') + if err != nil { + return err + } + + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + if len(line) == 0 { + continue + } + + // Comments + if line[0] == '#' || line[0] == ';' { + // Note: we do not care ending line break, + // it is needed for adding second line, + // so just clean it once at the end when set to value. + p.comment.Write(line) + continue + } + + // Section + if line[0] == '[' { + // Read to the next ']' (TODO: support quoted strings) + closeIdx := bytes.IndexByte(line, ']') + if closeIdx == -1 { + return fmt.Errorf("unclosed section: %s", line) + } + + section, err = f.NewSection(string(line[1:closeIdx])) + if err != nil { + return err + } + + comment, has := cleanComment(line[closeIdx+1:]) + if has { + p.comment.Write(comment) + } + + section.Comment = strings.TrimSpace(p.comment.String()) + + // Reset aotu-counter and comments + p.comment.Reset() + p.count = 1 + continue + } + + kname, offset, err := readKeyName(line) + if err != nil { + return err + } + + // Auto increment. + isAutoIncr := false + if kname == "-" { + isAutoIncr = true + kname = "#" + strconv.Itoa(p.count) + p.count++ + } + + key, err := section.NewKey(kname, "") + if err != nil { + return err + } + key.isAutoIncr = isAutoIncr + + value, err := p.readValue(line[offset:]) + if err != nil { + return err + } + key.SetValue(value) + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + } + return nil +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/README.md aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/README.md --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/README.md 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/README.md 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,590 @@ +ini [![Build Status](https://drone.io/github.com/go-ini/ini/status.png)](https://drone.io/github.com/go-ini/ini/latest) [![](http://gocover.io/_badge/github.com/go-ini/ini)](http://gocover.io/github.com/go-ini/ini) +=== + +![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) + +Package ini provides INI file read and write functionality in Go. + +[简体中文](README_ZH.md) + +## Feature + +- Load multiple data sources(`[]byte` or file) with overwrites. +- Read with recursion values. +- Read with parent-child sections. +- Read with auto-increment key names. +- Read with multiple-line values. +- Read with tons of helper methods. +- Read and convert values to Go types. +- Read and **WRITE** comments of sections and keys. +- Manipulate sections, keys and comments with ease. +- Keep sections and keys in order as you parse and save. + +## Installation + +To use a tagged revision: + + go get gopkg.in/ini.v1 + +To use with latest changes: + + go get github.com/go-ini/ini + +### Testing + +If you want to test on your machine, please apply `-t` flag: + + go get -t gopkg.in/ini.v1 + +## Getting Started + +### Loading from data sources + +A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many as** data sources you want. Passing other types will simply return an error. + +```go +cfg, err := ini.Load([]byte("raw data"), "filename") +``` + +Or start with an empty object: + +```go +cfg := ini.Empty() +``` + +When you cannot decide how many data sources to load at the beginning, you still able to **Append()** them later. + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +### Working with sections + +To get a section, you would need to: + +```go +section, err := cfg.GetSection("section name") +``` + +For a shortcut for default section, just give an empty string as name: + +```go +section, err := cfg.GetSection("") +``` + +When you're pretty sure the section exists, following code could make your life easier: + +```go +section := cfg.Section("") +``` + +What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you. + +To create a new section: + +```go +err := cfg.NewSection("new section") +``` + +To get a list of sections or section names: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### Working with keys + +To get a key under a section: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +Same rule applies to key operations: + +```go +key := cfg.Section("").Key("key name") +``` + +To check if a key exists: + +```go +yes := cfg.Section("").HasKey("key name") +``` + +To create a new key: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +To get a list of keys or key names: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +To get a clone hash of keys and corresponding values: + +```go +hash := cfg.GetSection("").KeysHash() +``` + +### Working with values + +To get a string value: + +```go +val := cfg.Section("").Key("key name").String() +``` + +To validate key value on the fly: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance): + +```go +val := cfg.Section("").Key("key name").Value() +``` + +To check if raw value exists: + +```go +yes := cfg.Section("").HasValue("test value") +``` + +To get value with types: + +```go +// For boolean values: +// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// Methods start with Must also accept one argument for default value +// when key not found or fail to parse value to given type. +// Except method MustString, which you have to pass a default value. + +v = cfg.Section("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +What if my value is three-line long? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +Not a problem! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +That's cool, how about continuation lines? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +Piece of cake! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +Note that single quotes around values will be stripped: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +That's all? Hmm, no. + +#### Helper methods of working with values + +To get value with given candidates: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates. + +To validate value in a given range: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +To auto-split value into slice: + +```go +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +### Save your configuration + +Finally, it's time to save your configuration to somewhere. + +A typical way to save configuration is writing it to a file: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +Another way to save is writing to a `io.Writer` interface: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +## Advanced Usage + +### Recursive Values + +For all value of keys, there is a special syntax `%()s`, where `` is the key name in same section or default section, and `%()s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions. + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +### Parent-child Sections + +You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section. + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +### Auto-increment Key Names + +If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### Map To Struct + +Want more objective way to play with INI? Cool. + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // Things can be simpler. + err = ini.MapTo(p, "path/to/ini") + // ... + + // Just map a section? Fine. + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +Can I have default value for field? Absolutely. + +Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type. + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +It's really cool, but what's the point if you can't give me my file back from struct? + +### Reflect From Struct + +Why not? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string + None []int +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +So, what do I get? + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +Places = HangZhou,Boston +None = +``` + +#### Name Mapper + +To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name. + +There are 2 built-in name mappers: + +- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key. +- `TitleUnderscore`: it converts to format `title_underscore` then match section or key. + +To use them: + +```go +type Info struct { + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +Same rules of name mapper apply to `ini.ReflectFromWithMapper` function. + +#### Other Notes On Map/Reflect + +Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome. + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## Getting Help + +- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) +- [File An Issue](https://github.com/go-ini/ini/issues/new) + +## FAQs + +### What does `BlockMode` field do? + +By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster. + +### Why another INI library? + +Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster. + +To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path) + +## License + +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/README_ZH.md aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/README_ZH.md --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/README_ZH.md 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/README_ZH.md 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,577 @@ +本包提供了 Go 语言中读写 INI 文件的功能。 + +## 功能特性 + +- 支持覆盖加载多个数据源(`[]byte` 或文件) +- 支持递归读取键值 +- 支持读取父子分区 +- 支持读取自增键名 +- 支持读取多行的键值 +- 支持大量辅助方法 +- 支持在读取时直接转换为 Go 语言类型 +- 支持读取和 **写入** 分区和键的注释 +- 轻松操作分区、键值和注释 +- 在保存文件时分区和键值会保持原有的顺序 + +## 下载安装 + +使用一个特定版本: + + go get gopkg.in/ini.v1 + +使用最新版: + + go get github.com/go-ini/ini + +### 测试安装 + +如果您想要在自己的机器上运行测试,请使用 `-t` 标记: + + go get -t gopkg.in/ini.v1 + +## 开始使用 + +### 从数据源加载 + +一个 **数据源** 可以是 `[]byte` 类型的原始数据,或 `string` 类型的文件路径。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。 + +```go +cfg, err := ini.Load([]byte("raw data"), "filename") +``` + +或者从一个空白的文件开始: + +```go +cfg := ini.Empty() +``` + +当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。 + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +### 操作分区(Section) + +获取指定分区: + +```go +section, err := cfg.GetSection("section name") +``` + +如果您想要获取默认分区,则可以用空字符串代替分区名: + +```go +section, err := cfg.GetSection("") +``` + +当您非常确定某个分区是存在的,可以使用以下简便方法: + +```go +section := cfg.Section("") +``` + +如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。 + +创建一个分区: + +```go +err := cfg.NewSection("new section") +``` + +获取所有分区对象或名称: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### 操作键(Key) + +获取某个分区下的键: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +和分区一样,您也可以直接获取键而忽略错误处理: + +```go +key := cfg.Section("").Key("key name") +``` + +判断某个键是否存在: + +```go +yes := cfg.Section("").HasKey("key name") +``` + +创建一个新的键: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +获取分区下的所有键或键名: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +获取分区下的所有键值对的克隆: + +```go +hash := cfg.GetSection("").KeysHash() +``` + +### 操作键值(Value) + +获取一个类型为字符串(string)的值: + +```go +val := cfg.Section("").Key("key name").String() +``` + +获取值的同时通过自定义函数进行处理验证: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +如果您不需要任何对值的自动转变功能(例如递归读取),可以直接获取原值(这种方式性能最佳): + +```go +val := cfg.Section("").Key("key name").Value() +``` + +判断某个原值是否存在: + +```go +yes := cfg.Section("").HasValue("test value") +``` + +获取其它类型的值: + +```go +// 布尔值的规则: +// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值, +// 当键不存在或者转换失败时,则会直接返回该默认值。 +// 但是,MustString 方法必须传递一个默认值。 + +v = cfg.Seciont("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +如果我的值有好多行怎么办? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +嗯哼?小 case! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +简直是小菜一碟! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +需要注意的是,值两侧的单引号会被自动剔除: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +这就是全部了?哈哈,当然不是。 + +#### 操作键值的辅助方法 + +获取键值时设定候选值: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。 + +验证获取的值是否在指定范围内: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +自动分割键值为切片(slice): + +```go +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +### 保存配置 + +终于到了这个时刻,是时候保存一下配置了。 + +比较原始的做法是输出配置到某个文件: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +### 高级用法 + +#### 递归读取键值 + +在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以是相同分区或者默认分区下的键名。字符串 `%()s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。 + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +#### 读取父子分区 + +您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。 + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +#### 读取自增键名 + +如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。 + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### 映射到结构 + +想要使用更加面向对象的方式玩转 INI 吗?好主意。 + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // 一切竟可以如此的简单。 + err = ini.MapTo(p, "path/to/ini") + // ... + + // 嗯哼?只需要映射一个分区吗? + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。 + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用? + +### 从结构反射 + +可是,我有说不能吗? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string + None []int +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +瞧瞧,奇迹发生了。 + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +Places = HangZhou,Boston +None = +``` + +#### 名称映射器(Name Mapper) + +为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。 + +目前有 2 款内置的映射器: + +- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。 +- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。 + +使用方法: + +```go +type Info struct{ + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。 + +#### 映射/反射的其它说明 + +任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚! + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## 获取帮助 + +- [API 文档](https://gowalker.org/gopkg.in/ini.v1) +- [创建工单](https://github.com/go-ini/ini/issues/new) + +## 常见问题 + +### 字段 `BlockMode` 是什么? + +默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。 + +### 为什么要写另一个 INI 解析库? + +许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。 + +为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/struct.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/struct.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/struct.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/struct.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,351 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "time" + "unicode" +) + +// NameMapper represents a ini tag name mapper. +type NameMapper func(string) string + +// Built-in name getters. +var ( + // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. + AllCapsUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + } + newstr = append(newstr, unicode.ToUpper(chr)) + } + return string(newstr) + } + // TitleUnderscore converts to format title_underscore. + TitleUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + chr -= ('A' - 'a') + } + newstr = append(newstr, chr) + } + return string(newstr) + } +) + +func (s *Section) parseFieldName(raw, actual string) string { + if len(actual) > 0 { + return actual + } + if s.f.NameMapper != nil { + return s.f.NameMapper(raw) + } + return raw +} + +func parseDelim(actual string) string { + if len(actual) > 0 { + return actual + } + return "," +} + +var reflectTime = reflect.TypeOf(time.Now()).Kind() + +// setWithProperType sets proper value to field based on its type, +// but it does not return error for failing parsing, +// because we want to use default value that is already assigned to strcut. +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + if len(key.String()) == 0 { + return nil + } + field.SetString(key.String()) + case reflect.Bool: + boolVal, err := key.Bool() + if err != nil { + return nil + } + field.SetBool(boolVal) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && int(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + intVal, err := key.Int64() + if err != nil || intVal == 0 { + return nil + } + field.SetInt(intVal) + // byte is an alias for uint8, so supporting uint8 breaks support for byte + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + durationVal, err := key.Duration() + if err == nil { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + uintVal, err := key.Uint64() + if err != nil { + return nil + } + field.SetUint(uintVal) + + case reflect.Float64: + floatVal, err := key.Float64() + if err != nil { + return nil + } + field.SetFloat(floatVal) + case reflectTime: + timeVal, err := key.Time() + if err != nil { + return nil + } + field.Set(reflect.ValueOf(timeVal)) + case reflect.Slice: + vals := key.Strings(delim) + numVals := len(vals) + if numVals == 0 { + return nil + } + + sliceOf := field.Type().Elem().Kind() + + var times []time.Time + if sliceOf == reflectTime { + times = key.Times(delim) + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(times[i])) + default: + slice.Index(i).Set(reflect.ValueOf(vals[i])) + } + } + field.Set(slice) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func (s *Section) mapTo(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + fieldName := s.parseFieldName(tpField.Name, tag) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + isStruct := tpField.Type.Kind() == reflect.Struct + if isAnonymous { + field.Set(reflect.New(tpField.Type.Elem())) + } + + if isAnonymous || isStruct { + if sec, err := s.f.GetSection(fieldName); err == nil { + if err = sec.mapTo(field); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + continue + } + } + + if key, err := s.GetKey(fieldName); err == nil { + if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + } + } + return nil +} + +// MapTo maps section to given struct. +func (s *Section) MapTo(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot map to non-pointer struct") + } + + return s.mapTo(val) +} + +// MapTo maps file to given struct. +func (f *File) MapTo(v interface{}) error { + return f.Section("").MapTo(v) +} + +// MapTo maps data sources to given struct with name mapper. +func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.MapTo(v) +} + +// MapTo maps data sources to given struct. +func MapTo(v, source interface{}, others ...interface{}) error { + return MapToWithMapper(v, nil, source, others...) +} + +// reflectWithProperType does the opposite thing with setWithProperType. +func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + key.SetValue(field.String()) + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float64, + reflectTime: + key.SetValue(fmt.Sprint(field)) + case reflect.Slice: + vals := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + + var buf bytes.Buffer + isTime := fmt.Sprint(field.Type()) == "[]time.Time" + for i := 0; i < field.Len(); i++ { + if isTime { + buf.WriteString(vals.Index(i).Interface().(time.Time).Format(time.RFC3339)) + } else { + buf.WriteString(fmt.Sprint(vals.Index(i))) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-1]) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func (s *Section) reflectFrom(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + fieldName := s.parseFieldName(tpField.Name, tag) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || + (tpField.Type.Kind() == reflect.Struct) { + // Note: The only error here is section doesn't exist. + sec, err := s.f.GetSection(fieldName) + if err != nil { + // Note: fieldName can never be empty here, ignore error. + sec, _ = s.f.NewSection(fieldName) + } + if err = sec.reflectFrom(field); err != nil { + return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) + } + continue + } + + // Note: Same reason as secion. + key, err := s.GetKey(fieldName) + if err != nil { + key, _ = s.NewKey(fieldName, "") + } + if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) + } + + } + return nil +} + +// ReflectFrom reflects secion from given struct. +func (s *Section) ReflectFrom(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot reflect from non-pointer struct") + } + + return s.reflectFrom(val) +} + +// ReflectFrom reflects file from given struct. +func (f *File) ReflectFrom(v interface{}) error { + return f.Section("").ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct with name mapper. +func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { + cfg.NameMapper = mapper + return cfg.ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct. +func ReflectFrom(cfg *File, v interface{}) error { + return ReflectFromWithMapper(cfg, v, nil) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/struct_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/struct_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/struct_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/struct_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,239 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "strings" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +type testNested struct { + Cities []string `delim:"|"` + Visits []time.Time + Note string + Unused int `ini:"-"` +} + +type testEmbeded struct { + GPA float64 +} + +type testStruct struct { + Name string `ini:"NAME"` + Age int + Male bool + Money float64 + Born time.Time + Time time.Duration `ini:"Duration"` + Others testNested + *testEmbeded `ini:"grade"` + Unused int `ini:"-"` + Unsigned uint +} + +const _CONF_DATA_STRUCT = ` +NAME = Unknwon +Age = 21 +Male = true +Money = 1.25 +Born = 1993-10-07T20:17:05Z +Duration = 2h45m +Unsigned = 3 + +[Others] +Cities = HangZhou|Boston +Visits = 1993-10-07T20:17:05Z, 1993-10-07T20:17:05Z +Note = Hello world! + +[grade] +GPA = 2.8 + +[foo.bar] +Here = there +When = then +` + +type unsupport struct { + Byte byte +} + +type unsupport2 struct { + Others struct { + Cities byte + } +} + +type unsupport3 struct { + Cities byte +} + +type unsupport4 struct { + *unsupport3 `ini:"Others"` +} + +type defaultValue struct { + Name string + Age int + Male bool + Money float64 + Born time.Time + Cities []string +} + +type fooBar struct { + Here, When string +} + +const _INVALID_DATA_CONF_STRUCT = ` +Name = +Age = age +Male = 123 +Money = money +Born = nil +Cities = +` + +func Test_Struct(t *testing.T) { + Convey("Map to struct", t, func() { + Convey("Map file to struct", func() { + ts := new(testStruct) + So(MapTo(ts, []byte(_CONF_DATA_STRUCT)), ShouldBeNil) + + So(ts.Name, ShouldEqual, "Unknwon") + So(ts.Age, ShouldEqual, 21) + So(ts.Male, ShouldBeTrue) + So(ts.Money, ShouldEqual, 1.25) + So(ts.Unsigned, ShouldEqual, 3) + + t, err := time.Parse(time.RFC3339, "1993-10-07T20:17:05Z") + So(err, ShouldBeNil) + So(ts.Born.String(), ShouldEqual, t.String()) + + dur, err := time.ParseDuration("2h45m") + So(err, ShouldBeNil) + So(ts.Time.Seconds(), ShouldEqual, dur.Seconds()) + + So(strings.Join(ts.Others.Cities, ","), ShouldEqual, "HangZhou,Boston") + So(ts.Others.Visits[0].String(), ShouldEqual, t.String()) + So(ts.Others.Note, ShouldEqual, "Hello world!") + So(ts.testEmbeded.GPA, ShouldEqual, 2.8) + }) + + Convey("Map section to struct", func() { + foobar := new(fooBar) + f, err := Load([]byte(_CONF_DATA_STRUCT)) + So(err, ShouldBeNil) + + So(f.Section("foo.bar").MapTo(foobar), ShouldBeNil) + So(foobar.Here, ShouldEqual, "there") + So(foobar.When, ShouldEqual, "then") + }) + + Convey("Map to non-pointer struct", func() { + cfg, err := Load([]byte(_CONF_DATA_STRUCT)) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + So(cfg.MapTo(testStruct{}), ShouldNotBeNil) + }) + + Convey("Map to unsupported type", func() { + cfg, err := Load([]byte(_CONF_DATA_STRUCT)) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + cfg.NameMapper = func(raw string) string { + if raw == "Byte" { + return "NAME" + } + return raw + } + So(cfg.MapTo(&unsupport{}), ShouldNotBeNil) + So(cfg.MapTo(&unsupport2{}), ShouldNotBeNil) + So(cfg.MapTo(&unsupport4{}), ShouldNotBeNil) + }) + + Convey("Map from invalid data source", func() { + So(MapTo(&testStruct{}, "hi"), ShouldNotBeNil) + }) + + Convey("Map to wrong types and gain default values", func() { + cfg, err := Load([]byte(_INVALID_DATA_CONF_STRUCT)) + So(err, ShouldBeNil) + + t, err := time.Parse(time.RFC3339, "1993-10-07T20:17:05Z") + So(err, ShouldBeNil) + dv := &defaultValue{"Joe", 10, true, 1.25, t, []string{"HangZhou", "Boston"}} + So(cfg.MapTo(dv), ShouldBeNil) + So(dv.Name, ShouldEqual, "Joe") + So(dv.Age, ShouldEqual, 10) + So(dv.Male, ShouldBeTrue) + So(dv.Money, ShouldEqual, 1.25) + So(dv.Born.String(), ShouldEqual, t.String()) + So(strings.Join(dv.Cities, ","), ShouldEqual, "HangZhou,Boston") + }) + }) + + Convey("Reflect from struct", t, func() { + type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string + None []int + } + type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded `ini:"infos"` + } + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := Empty() + So(ReflectFrom(cfg, a), ShouldBeNil) + cfg.SaveTo("testdata/conf_reflect.ini") + + Convey("Reflect from non-point struct", func() { + So(ReflectFrom(cfg, Author{}), ShouldNotBeNil) + }) + }) +} + +type testMapper struct { + PackageName string +} + +func Test_NameGetter(t *testing.T) { + Convey("Test name mappers", t, func() { + So(MapToWithMapper(&testMapper{}, TitleUnderscore, []byte("packag_name=ini")), ShouldBeNil) + + cfg, err := Load([]byte("PACKAGE_NAME=ini")) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + cfg.NameMapper = AllCapsUnderscore + tg := new(testMapper) + So(cfg.MapTo(tg), ShouldBeNil) + So(tg.PackageName, ShouldEqual, "ini") + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/testdata/conf.ini aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/testdata/conf.ini --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/testdata/conf.ini 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/go-ini/ini/testdata/conf.ini 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2 @@ +[author] +E-MAIL = u@gogs.io \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/.gitignore aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/.gitignore --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/.gitignore 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/.gitignore 2016-05-24 07:05:22.000000000 +0000 @@ -1,6 +1,5 @@ .DS_Store *.[568ao] -*.pb.go *.ao *.so *.pyc @@ -13,5 +12,4 @@ _obj _test _testmain.go -compiler/protoc-gen-go -compiler/testdata/extension_test +protoc-gen-go/testdata/multi/*.pb.go diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/jsonpb/jsonpb.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/jsonpb/jsonpb.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/jsonpb/jsonpb.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/jsonpb/jsonpb.go 2016-05-24 07:05:22.000000000 +0000 @@ -41,12 +41,14 @@ import ( "bytes" "encoding/json" + "errors" "fmt" "io" "reflect" "sort" "strconv" "strings" + "time" "github.com/golang/protobuf/proto" ) @@ -69,12 +71,15 @@ // value, and for newlines to be appear between fields and array // elements. Indent string + + // Whether to use the original (.proto) name for fields. + OrigName bool } // Marshal marshals a protocol buffer into JSON. func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { writer := &errWriter{writer: out} - return m.marshalObject(writer, pb, "") + return m.marshalObject(writer, pb, "", "") } // MarshalToString converts a protocol buffer object to JSON string. @@ -93,15 +98,83 @@ func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +type wkt interface { + XXX_WellKnownType() string +} + // marshalObject writes a struct to the Writer. -func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent string) error { +func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error { + s := reflect.ValueOf(v).Elem() + + // Handle well-known types. + if wkt, ok := v.(wkt); ok { + switch wkt.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + // "Wrappers use the same representation in JSON + // as the wrapped primitive type, ..." + sprop := proto.GetProperties(s.Type()) + return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent) + case "Any": + // Any is a bit more involved. + return m.marshalAny(out, v, indent) + case "Duration": + // "Generated output always contains 3, 6, or 9 fractional digits, + // depending on required precision." + s, ns := s.Field(0).Int(), s.Field(1).Int() + d := time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond + x := fmt.Sprintf("%.9f", d.Seconds()) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + out.write(`"`) + out.write(x) + out.write(`s"`) + return out.err + case "Struct": + // Let marshalValue handle the `fields` map. + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent) + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 3, 6 or 9 fractional digits." + s, ns := s.Field(0).Int(), s.Field(1).Int() + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + out.write(`"`) + out.write(x) + out.write(`Z"`) + return out.err + case "Value": + // Value has a single oneof. + kind := s.Field(0) + if kind.IsNil() { + // "absence of any variant indicates an error" + return errors.New("nil Value") + } + // oneof -> *T -> T -> T.F + x := kind.Elem().Elem().Field(0) + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, x, indent) + } + } + out.write("{") if m.Indent != "" { out.write("\n") } - s := reflect.ValueOf(v).Elem() firstField := true + + if typeURL != "" { + if err := m.marshalTypeURL(out, indent, typeURL); err != nil { + return err + } + firstField = false + } + for i := 0; i < s.NumField(); i++ { value := s.Field(i) valueField := s.Type().Field(i) @@ -149,7 +222,7 @@ value = sv.Field(0) valueField = sv.Type().Field(0) } - prop := jsonProperties(valueField) + prop := jsonProperties(valueField, m.OrigName) if !firstField { m.writeSep(out) } @@ -182,7 +255,7 @@ value := reflect.ValueOf(ext) var prop proto.Properties prop.Parse(desc.Tag) - prop.OrigName = fmt.Sprintf("[%s]", desc.Name) + prop.JSONName = fmt.Sprintf("[%s]", desc.Name) if !firstField { m.writeSep(out) } @@ -210,6 +283,70 @@ } } +func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + v := reflect.ValueOf(any).Elem() + turl := v.Field(0).String() + val := v.Field(1).Bytes() + + // Only the part of type_url after the last slash is relevant. + mname := turl + if slash := strings.LastIndex(mname, "/"); slash >= 0 { + mname = mname[slash+1:] + } + mt := proto.MessageType(mname) + if mt == nil { + return fmt.Errorf("unknown message type %q", mname) + } + msg := reflect.New(mt.Elem()).Interface().(proto.Message) + if err := proto.Unmarshal(val, msg); err != nil { + return err + } + + if _, ok := msg.(wkt); ok { + out.write("{") + if m.Indent != "" { + out.write("\n") + } + if err := m.marshalTypeURL(out, indent, turl); err != nil { + return err + } + m.writeSep(out) + out.write(`"value":`) + if err := m.marshalObject(out, msg, indent, ""); err != nil { + return err + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err + } + + return m.marshalObject(out, msg, indent, turl) +} + +func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"@type":`) + if m.Indent != "" { + out.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + out.write(string(b)) + return out.err +} + // marshalField writes field description and value to the Writer. func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { if m.Indent != "" { @@ -217,7 +354,7 @@ out.write(m.Indent) } out.write(`"`) - out.write(prop.OrigName) + out.write(prop.JSONName) out.write(`":`) if m.Indent != "" { out.write(" ") @@ -259,6 +396,19 @@ return out.err } + // Handle well-known types. + // Most are handled up in marshalObject (because 99% are messages). + type wkt interface { + XXX_WellKnownType() string + } + if wkt, ok := v.Interface().(wkt); ok { + switch wkt.XXX_WellKnownType() { + case "NullValue": + out.write("null") + return out.err + } + } + // Handle enumerations. if !m.EnumsAsInts && prop.Enum != "" { // Unknown enum values will are stringified by the proto library as their @@ -284,7 +434,7 @@ // Handle nested messages. if v.Kind() == reflect.Struct { - return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent) + return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "") } // Handle maps. @@ -354,15 +504,23 @@ return out.err } +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + inputValue := json.RawMessage{} + if err := dec.Decode(&inputValue); err != nil { + return err + } + return unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil) +} + // Unmarshal unmarshals a JSON object stream into a protocol // buffer. This function is lenient and will decode any options // permutations of the related Marshaler. func Unmarshal(r io.Reader, pb proto.Message) error { - inputValue := json.RawMessage{} - if err := json.NewDecoder(r).Decode(&inputValue); err != nil { - return err - } - return unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue) + dec := json.NewDecoder(r) + return UnmarshalNext(dec, pb) } // UnmarshalString will populate the fields of a protocol buffer based @@ -373,13 +531,83 @@ } // unmarshalValue converts/copies a value into the target. -func unmarshalValue(target reflect.Value, inputValue json.RawMessage) error { +// prop may be nil. +func unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error { targetType := target.Type() // Allocate memory for pointer fields. if targetType.Kind() == reflect.Ptr { target.Set(reflect.New(targetType.Elem())) - return unmarshalValue(target.Elem(), inputValue) + return unmarshalValue(target.Elem(), inputValue, prop) + } + + // Handle well-known types. + type wkt interface { + XXX_WellKnownType() string + } + if wkt, ok := target.Addr().Interface().(wkt); ok { + switch wkt.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + // "Wrappers use the same representation in JSON + // as the wrapped primitive type, except that null is allowed." + // encoding/json will turn JSON `null` into Go `nil`, + // so we don't have to do any extra work. + return unmarshalValue(target.Field(0), inputValue, prop) + case "Any": + return fmt.Errorf("unmarshaling Any not supported yet") + case "Duration": + unq, err := strconv.Unquote(string(inputValue)) + if err != nil { + return err + } + d, err := time.ParseDuration(unq) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + ns := d.Nanoseconds() + s := ns / 1e9 + ns %= 1e9 + target.Field(0).SetInt(s) + target.Field(1).SetInt(ns) + return nil + case "Timestamp": + unq, err := strconv.Unquote(string(inputValue)) + if err != nil { + return err + } + t, err := time.Parse(time.RFC3339Nano, unq) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + ns := t.UnixNano() + s := ns / 1e9 + ns %= 1e9 + target.Field(0).SetInt(s) + target.Field(1).SetInt(ns) + return nil + } + } + + // Handle enums, which have an underlying type of int32, + // and may appear as strings. + // The case of an enum appearing as a number is handled + // at the bottom of this function. + if inputValue[0] == '"' && prop != nil && prop.Enum != "" { + vmap := proto.EnumValueMap(prop.Enum) + // Don't need to do unquoting; valid enum names + // are from a limited character set. + s := inputValue[1 : len(inputValue)-1] + n, ok := vmap[string(s)] + if !ok { + return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum) + } + if target.Kind() == reflect.Ptr { // proto2 + target.Set(reflect.New(targetType.Elem())) + target = target.Elem() + } + target.SetInt(int64(n)) + return nil } // Handle nested messages. @@ -389,56 +617,56 @@ return err } + consumeField := func(prop *proto.Properties) (json.RawMessage, bool) { + // Be liberal in what names we accept; both orig_name and camelName are okay. + fieldNames := acceptedJSONFieldNames(prop) + + vOrig, okOrig := jsonFields[fieldNames.orig] + vCamel, okCamel := jsonFields[fieldNames.camel] + if !okOrig && !okCamel { + return nil, false + } + // If, for some reason, both are present in the data, favour the camelName. + var raw json.RawMessage + if okOrig { + raw = vOrig + delete(jsonFields, fieldNames.orig) + } + if okCamel { + raw = vCamel + delete(jsonFields, fieldNames.camel) + } + return raw, true + } + sprops := proto.GetProperties(targetType) for i := 0; i < target.NumField(); i++ { ft := target.Type().Field(i) if strings.HasPrefix(ft.Name, "XXX_") { continue } - fieldName := jsonProperties(ft).OrigName - valueForField, ok := jsonFields[fieldName] + valueForField, ok := consumeField(sprops.Prop[i]) if !ok { continue } - delete(jsonFields, fieldName) - - // Handle enums, which have an underlying type of int32, - // and may appear as strings. We do this while handling - // the struct so we have access to the enum info. - // The case of an enum appearing as a number is handled - // by the recursive call to unmarshalValue. - if enum := sprops.Prop[i].Enum; valueForField[0] == '"' && enum != "" { - vmap := proto.EnumValueMap(enum) - // Don't need to do unquoting; valid enum names - // are from a limited character set. - s := valueForField[1 : len(valueForField)-1] - n, ok := vmap[string(s)] - if !ok { - return fmt.Errorf("unknown value %q for enum %s", s, enum) - } - f := target.Field(i) - if f.Kind() == reflect.Ptr { // proto2 - f.Set(reflect.New(f.Type().Elem())) - f = f.Elem() - } - f.SetInt(int64(n)) - continue - } - if err := unmarshalValue(target.Field(i), valueForField); err != nil { + if err := unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil { return err } } // Check for any oneof fields. - for fname, raw := range jsonFields { - if oop, ok := sprops.OneofTypes[fname]; ok { + if len(jsonFields) > 0 { + for _, oop := range sprops.OneofTypes { + raw, ok := consumeField(oop.Prop) + if !ok { + continue + } nv := reflect.New(oop.Type.Elem()) target.Field(oop.Field).Set(nv) - if err := unmarshalValue(nv.Elem().Field(0), raw); err != nil { + if err := unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil { return err } - delete(jsonFields, fname) } } if len(jsonFields) > 0 { @@ -462,7 +690,7 @@ len := len(slc) target.Set(reflect.MakeSlice(targetType, len, len)) for i := 0; i < len; i++ { - if err := unmarshalValue(target.Index(i), slc[i]); err != nil { + if err := unmarshalValue(target.Index(i), slc[i], prop); err != nil { return err } } @@ -476,6 +704,13 @@ return err } target.Set(reflect.MakeMap(targetType)) + var keyprop, valprop *proto.Properties + if prop != nil { + // These could still be nil if the protobuf metadata is broken somehow. + // TODO: This won't work because the fields are unexported. + // We should probably just reparse them. + //keyprop, valprop = prop.mkeyprop, prop.mvalprop + } for ks, raw := range mp { // Unmarshal map key. The core json library already decoded the key into a // string, so we handle that specially. Other types were quoted post-serialization. @@ -484,14 +719,14 @@ k = reflect.ValueOf(ks) } else { k = reflect.New(targetType.Key()).Elem() - if err := unmarshalValue(k, json.RawMessage(ks)); err != nil { + if err := unmarshalValue(k, json.RawMessage(ks), keyprop); err != nil { return err } } // Unmarshal map value. v := reflect.New(targetType.Elem()).Elem() - if err := unmarshalValue(v, raw); err != nil { + if err := unmarshalValue(v, raw, valprop); err != nil { return err } target.SetMapIndex(k, v) @@ -510,13 +745,28 @@ return json.Unmarshal(inputValue, target.Addr().Interface()) } -// jsonProperties returns parsed proto.Properties for the field. -func jsonProperties(f reflect.StructField) *proto.Properties { +// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute. +func jsonProperties(f reflect.StructField, origName bool) *proto.Properties { var prop proto.Properties prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f) + if origName || prop.JSONName == "" { + prop.JSONName = prop.OrigName + } return &prop } +type fieldNames struct { + orig, camel string +} + +func acceptedJSONFieldNames(prop *proto.Properties) fieldNames { + opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName} + if prop.JSONName != "" { + opts.camel = prop.JSONName + } + return opts +} + // extendableProto is an interface implemented by any protocol buffer that may be extended. type extendableProto interface { proto.Message diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/jsonpb/jsonpb_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/jsonpb/jsonpb_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/jsonpb/jsonpb_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/jsonpb/jsonpb_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -32,12 +32,21 @@ package jsonpb import ( + "bytes" + "encoding/json" + "io" "reflect" "testing" - pb "github.com/golang/protobuf/jsonpb/jsonpb_test_proto" "github.com/golang/protobuf/proto" + + pb "github.com/golang/protobuf/jsonpb/jsonpb_test_proto" proto3pb "github.com/golang/protobuf/proto/proto3_proto" + anypb "github.com/golang/protobuf/ptypes/any" + durpb "github.com/golang/protobuf/ptypes/duration" + stpb "github.com/golang/protobuf/ptypes/struct" + tspb "github.com/golang/protobuf/ptypes/timestamp" + wpb "github.com/golang/protobuf/ptypes/wrappers" ) var ( @@ -62,31 +71,31 @@ } simpleObjectJSON = `{` + - `"o_bool":true,` + - `"o_int32":-32,` + - `"o_int64":"-6400000000",` + - `"o_uint32":32,` + - `"o_uint64":"6400000000",` + - `"o_sint32":-13,` + - `"o_sint64":"-2600000000",` + - `"o_float":3.14,` + - `"o_double":6.02214179e+23,` + - `"o_string":"hello \"there\"",` + - `"o_bytes":"YmVlcCBib29w"` + + `"oBool":true,` + + `"oInt32":-32,` + + `"oInt64":"-6400000000",` + + `"oUint32":32,` + + `"oUint64":"6400000000",` + + `"oSint32":-13,` + + `"oSint64":"-2600000000",` + + `"oFloat":3.14,` + + `"oDouble":6.02214179e+23,` + + `"oString":"hello \"there\"",` + + `"oBytes":"YmVlcCBib29w"` + `}` simpleObjectPrettyJSON = `{ - "o_bool": true, - "o_int32": -32, - "o_int64": "-6400000000", - "o_uint32": 32, - "o_uint64": "6400000000", - "o_sint32": -13, - "o_sint64": "-2600000000", - "o_float": 3.14, - "o_double": 6.02214179e+23, - "o_string": "hello \"there\"", - "o_bytes": "YmVlcCBib29w" + "oBool": true, + "oInt32": -32, + "oInt64": "-6400000000", + "oUint32": 32, + "oUint64": "6400000000", + "oSint32": -13, + "oSint64": "-2600000000", + "oFloat": 3.14, + "oDouble": 6.02214179e+23, + "oString": "hello \"there\"", + "oBytes": "YmVlcCBib29w" }` repeatsObject = &pb.Repeats{ @@ -104,65 +113,65 @@ } repeatsObjectJSON = `{` + - `"r_bool":[true,false,true],` + - `"r_int32":[-3,-4,-5],` + - `"r_int64":["-123456789","-987654321"],` + - `"r_uint32":[1,2,3],` + - `"r_uint64":["6789012345","3456789012"],` + - `"r_sint32":[-1,-2,-3],` + - `"r_sint64":["-6789012345","-3456789012"],` + - `"r_float":[3.14,6.28],` + - `"r_double":[2.99792458e+08,6.62606957e-34],` + - `"r_string":["happy","days"],` + - `"r_bytes":["c2tpdHRsZXM=","bSZtJ3M="]` + + `"rBool":[true,false,true],` + + `"rInt32":[-3,-4,-5],` + + `"rInt64":["-123456789","-987654321"],` + + `"rUint32":[1,2,3],` + + `"rUint64":["6789012345","3456789012"],` + + `"rSint32":[-1,-2,-3],` + + `"rSint64":["-6789012345","-3456789012"],` + + `"rFloat":[3.14,6.28],` + + `"rDouble":[2.99792458e+08,6.62606957e-34],` + + `"rString":["happy","days"],` + + `"rBytes":["c2tpdHRsZXM=","bSZtJ3M="]` + `}` repeatsObjectPrettyJSON = `{ - "r_bool": [ + "rBool": [ true, false, true ], - "r_int32": [ + "rInt32": [ -3, -4, -5 ], - "r_int64": [ + "rInt64": [ "-123456789", "-987654321" ], - "r_uint32": [ + "rUint32": [ 1, 2, 3 ], - "r_uint64": [ + "rUint64": [ "6789012345", "3456789012" ], - "r_sint32": [ + "rSint32": [ -1, -2, -3 ], - "r_sint64": [ + "rSint64": [ "-6789012345", "-3456789012" ], - "r_float": [ + "rFloat": [ 3.14, 6.28 ], - "r_double": [ + "rDouble": [ 2.99792458e+08, 6.62606957e-34 ], - "r_string": [ + "rString": [ "happy", "days" ], - "r_bytes": [ + "rBytes": [ "c2tpdHRsZXM=", "bSZtJ3M=" ] @@ -182,46 +191,46 @@ } complexObjectJSON = `{"color":"GREEN",` + - `"r_color":["RED","GREEN","BLUE"],` + - `"simple":{"o_int32":-32},` + - `"r_simple":[{"o_int32":-32},{"o_int64":"25"}],` + - `"repeats":{"r_string":["roses","red"]},` + - `"r_repeats":[{"r_string":["roses","red"]},{"r_string":["violets","blue"]}]` + + `"rColor":["RED","GREEN","BLUE"],` + + `"simple":{"oInt32":-32},` + + `"rSimple":[{"oInt32":-32},{"oInt64":"25"}],` + + `"repeats":{"rString":["roses","red"]},` + + `"rRepeats":[{"rString":["roses","red"]},{"rString":["violets","blue"]}]` + `}` complexObjectPrettyJSON = `{ "color": "GREEN", - "r_color": [ + "rColor": [ "RED", "GREEN", "BLUE" ], "simple": { - "o_int32": -32 + "oInt32": -32 }, - "r_simple": [ + "rSimple": [ { - "o_int32": -32 + "oInt32": -32 }, { - "o_int64": "25" + "oInt64": "25" } ], "repeats": { - "r_string": [ + "rString": [ "roses", "red" ] }, - "r_repeats": [ + "rRepeats": [ { - "r_string": [ + "rString": [ "roses", "red" ] }, { - "r_string": [ + "rString": [ "violets", "blue" ] @@ -235,7 +244,7 @@ colorListPrettyJSON = `{ "color": 1000, - "r_color": [ + "rColor": [ "RED" ] }` @@ -291,6 +300,18 @@ &pb.Widget{Color: pb.Widget_BLUE.Enum()}, colorPrettyJSON}, {"unknown enum value object", marshalerAllOptions, &pb.Widget{Color: pb.Widget_Color(1000).Enum(), RColor: []pb.Widget_Color{pb.Widget_RED}}, colorListPrettyJSON}, + {"repeated proto3 enum", Marshaler{}, + &proto3pb.Message{RFunny: []proto3pb.Message_Humour{ + proto3pb.Message_PUNS, + proto3pb.Message_SLAPSTICK, + }}, + `{"rFunny":["PUNS","SLAPSTICK"]}`}, + {"repeated proto3 enum as int", Marshaler{EnumsAsInts: true}, + &proto3pb.Message{RFunny: []proto3pb.Message_Humour{ + proto3pb.Message_PUNS, + proto3pb.Message_SLAPSTICK, + }}, + `{"rFunny":[1,2]}`}, {"empty value", marshaler, &pb.Simple3{}, `{}`}, {"empty value emitted", Marshaler{EmitDefaults: true}, &pb.Simple3{}, `{"dub":0}`}, {"map", marshaler, &pb.Mappy{Nummy: map[int64]int32{1: 2, 3: 4}}, `{"nummy":{"1":2,"3":4}}`}, @@ -305,14 +326,53 @@ {"map", marshaler, &pb.Mappy{Buggy: map[int64]string{1234: "yup"}}, `{"buggy":{"1234":"yup"}}`}, {"map", marshaler, &pb.Mappy{Booly: map[bool]bool{false: true}}, `{"booly":{"false":true}}`}, + // TODO: This is broken. + //{"map", marshaler, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}, `{"enumy":{"XIV":"ROMAN"}`}, + {"map", Marshaler{EnumsAsInts: true}, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}, `{"enumy":{"XIV":2}}`}, {"proto2 map", marshaler, &pb.Maps{MInt64Str: map[int64]string{213: "cat"}}, - `{"m_int64_str":{"213":"cat"}}`}, + `{"mInt64Str":{"213":"cat"}}`}, {"proto2 map", marshaler, &pb.Maps{MBoolSimple: map[bool]*pb.Simple{true: &pb.Simple{OInt32: proto.Int32(1)}}}, - `{"m_bool_simple":{"true":{"o_int32":1}}}`}, + `{"mBoolSimple":{"true":{"oInt32":1}}}`}, {"oneof, not set", marshaler, &pb.MsgWithOneof{}, `{}`}, {"oneof, set", marshaler, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Title{"Grand Poobah"}}, `{"title":"Grand Poobah"}`}, + {"force orig_name", Marshaler{OrigName: true}, &pb.Simple{OInt32: proto.Int32(4)}, + `{"o_int32":4}`}, {"proto2 extension", marshaler, realNumber, realNumberJSON}, + + {"Any with message", marshaler, &pb.KnownTypes{An: &anypb.Any{ + TypeUrl: "something.example.com/jsonpb.Simple", + Value: []byte{ + // &pb.Simple{OBool:true} + 1 << 3, 1, + }, + }}, `{"an":{"@type":"something.example.com/jsonpb.Simple","oBool":true}}`}, + {"Any with WKT", marshaler, &pb.KnownTypes{An: &anypb.Any{ + TypeUrl: "type.googleapis.com/google.protobuf.Duration", + Value: []byte{ + // &durpb.Duration{Seconds: 1, Nanos: 212000000 } + 1 << 3, 1, // seconds + 2 << 3, 0x80, 0xba, 0x8b, 0x65, // nanos + }, + }}, `{"an":{"@type":"type.googleapis.com/google.protobuf.Duration","value":"1.212s"}}`}, + {"Duration", marshaler, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3}}, `{"dur":"3.000s"}`}, + {"Struct", marshaler, &pb.KnownTypes{St: &stpb.Struct{ + Fields: map[string]*stpb.Value{ + "one": &stpb.Value{Kind: &stpb.Value_StringValue{"loneliest number"}}, + "two": &stpb.Value{Kind: &stpb.Value_NullValue{stpb.NullValue_NULL_VALUE}}, + }, + }}, `{"st":{"one":"loneliest number","two":null}}`}, + {"Timestamp", marshaler, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 14e8, Nanos: 21e6}}, `{"ts":"2014-05-13T16:53:20.021Z"}`}, + + {"DoubleValue", marshaler, &pb.KnownTypes{Dbl: &wpb.DoubleValue{Value: 1.2}}, `{"dbl":1.2}`}, + {"FloatValue", marshaler, &pb.KnownTypes{Flt: &wpb.FloatValue{Value: 1.2}}, `{"flt":1.2}`}, + {"Int64Value", marshaler, &pb.KnownTypes{I64: &wpb.Int64Value{Value: -3}}, `{"i64":"-3"}`}, + {"UInt64Value", marshaler, &pb.KnownTypes{U64: &wpb.UInt64Value{Value: 3}}, `{"u64":"3"}`}, + {"Int32Value", marshaler, &pb.KnownTypes{I32: &wpb.Int32Value{Value: -4}}, `{"i32":-4}`}, + {"UInt32Value", marshaler, &pb.KnownTypes{U32: &wpb.UInt32Value{Value: 4}}, `{"u32":4}`}, + {"BoolValue", marshaler, &pb.KnownTypes{Bool: &wpb.BoolValue{Value: true}}, `{"bool":true}`}, + {"StringValue", marshaler, &pb.KnownTypes{Str: &wpb.StringValue{Value: "plush"}}, `{"str":"plush"}`}, + {"BytesValue", marshaler, &pb.KnownTypes{Bytes: &wpb.BytesValue{Value: []byte("wow")}}, `{"bytes":"d293"}`}, } func TestMarshaling(t *testing.T) { @@ -344,12 +404,49 @@ {"unknown enum value object", "{\n \"color\": 1000,\n \"r_color\": [\n \"RED\"\n ]\n}", &pb.Widget{Color: pb.Widget_Color(1000).Enum(), RColor: []pb.Widget_Color{pb.Widget_RED}}}, - {"unquoted int64 object", `{"o_int64":-314}`, &pb.Simple{OInt64: proto.Int64(-314)}}, - {"unquoted uint64 object", `{"o_uint64":123}`, &pb.Simple{OUint64: proto.Uint64(123)}}, + {"repeated proto3 enum", `{"rFunny":["PUNS","SLAPSTICK"]}`, + &proto3pb.Message{RFunny: []proto3pb.Message_Humour{ + proto3pb.Message_PUNS, + proto3pb.Message_SLAPSTICK, + }}}, + {"repeated proto3 enum as int", `{"rFunny":[1,2]}`, + &proto3pb.Message{RFunny: []proto3pb.Message_Humour{ + proto3pb.Message_PUNS, + proto3pb.Message_SLAPSTICK, + }}}, + {"repeated proto3 enum as mix of strings and ints", `{"rFunny":["PUNS",2]}`, + &proto3pb.Message{RFunny: []proto3pb.Message_Humour{ + proto3pb.Message_PUNS, + proto3pb.Message_SLAPSTICK, + }}}, + {"unquoted int64 object", `{"oInt64":-314}`, &pb.Simple{OInt64: proto.Int64(-314)}}, + {"unquoted uint64 object", `{"oUint64":123}`, &pb.Simple{OUint64: proto.Uint64(123)}}, {"map", `{"nummy":{"1":2,"3":4}}`, &pb.Mappy{Nummy: map[int64]int32{1: 2, 3: 4}}}, {"map", `{"strry":{"\"one\"":"two","three":"four"}}`, &pb.Mappy{Strry: map[string]string{`"one"`: "two", "three": "four"}}}, {"map", `{"objjy":{"1":{"dub":1}}}`, &pb.Mappy{Objjy: map[int32]*pb.Simple3{1: &pb.Simple3{Dub: 1}}}}, + // TODO: This is broken. + //{"map", `{"enumy":{"XIV":"ROMAN"}`, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}}, + {"map", `{"enumy":{"XIV":2}}`, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}}, {"oneof", `{"salary":31000}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Salary{31000}}}, + {"oneof spec name", `{"country":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Country{"Australia"}}}, + {"oneof orig_name", `{"Country":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Country{"Australia"}}}, + {"orig_name input", `{"o_bool":true}`, &pb.Simple{OBool: proto.Bool(true)}}, + {"camelName input", `{"oBool":true}`, &pb.Simple{OBool: proto.Bool(true)}}, + + {"Duration", `{"dur":"3.000s"}`, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3}}}, + {"Timestamp", `{"ts":"2014-05-13T16:53:20.021Z"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 14e8, Nanos: 21e6}}}, + + {"DoubleValue", `{"dbl":1.2}`, &pb.KnownTypes{Dbl: &wpb.DoubleValue{Value: 1.2}}}, + {"FloatValue", `{"flt":1.2}`, &pb.KnownTypes{Flt: &wpb.FloatValue{Value: 1.2}}}, + {"Int64Value", `{"i64":"-3"}`, &pb.KnownTypes{I64: &wpb.Int64Value{Value: -3}}}, + {"UInt64Value", `{"u64":"3"}`, &pb.KnownTypes{U64: &wpb.UInt64Value{Value: 3}}}, + {"Int32Value", `{"i32":-4}`, &pb.KnownTypes{I32: &wpb.Int32Value{Value: -4}}}, + {"UInt32Value", `{"u32":4}`, &pb.KnownTypes{U32: &wpb.UInt32Value{Value: 4}}}, + {"BoolValue", `{"bool":true}`, &pb.KnownTypes{Bool: &wpb.BoolValue{Value: true}}}, + {"StringValue", `{"str":"plush"}`, &pb.KnownTypes{Str: &wpb.StringValue{Value: "plush"}}}, + {"BytesValue", `{"bytes":"d293"}`, &pb.KnownTypes{Bytes: &wpb.BytesValue{Value: []byte("wow")}}}, + // `null` is also a permissible value. Let's just test one. + {"null DoubleValue", `{"dbl":null}`, &pb.KnownTypes{Dbl: &wpb.DoubleValue{}}}, } func TestUnmarshaling(t *testing.T) { @@ -359,7 +456,37 @@ err := UnmarshalString(tt.json, p) if err != nil { - t.Error(err) + t.Errorf("%s: %v", tt.desc, err) + continue + } + + // For easier diffs, compare text strings of the protos. + exp := proto.MarshalTextString(tt.pb) + act := proto.MarshalTextString(p) + if string(exp) != string(act) { + t.Errorf("%s: got [%s] want [%s]", tt.desc, act, exp) + } + } +} + +func TestUnmarshalNext(t *testing.T) { + // We only need to check against a few, not all of them. + tests := unmarshalingTests[:5] + + // Create a buffer with many concatenated JSON objects. + var b bytes.Buffer + for _, tt := range tests { + b.WriteString(tt.json) + } + + dec := json.NewDecoder(&b) + for _, tt := range tests { + // Make a new instance of the type of our expected object. + p := reflect.New(reflect.TypeOf(tt.pb).Elem()).Interface().(proto.Message) + + err := UnmarshalNext(dec, p) + if err != nil { + t.Errorf("%s: %v", tt.desc, err) continue } @@ -370,6 +497,12 @@ t.Errorf("%s: got [%s] want [%s]", tt.desc, act, exp) } } + + p := &pb.Simple{} + err := UnmarshalNext(dec, p) + if err != io.EOF { + t.Errorf("eof: got %v, expected io.EOF", err) + } } var unmarshalingShouldError = []struct { diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile 2016-05-24 07:05:22.000000000 +0000 @@ -30,4 +30,4 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. regenerate: - protoc --go_out=. *.proto + protoc --go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,Mgoogle/protobuf/struct.proto=github.com/golang/protobuf/ptypes/struct,Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,Mgoogle/protobuf/wrappers.proto=github.com/golang/protobuf/ptypes/wrappers:. *.proto diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go 2016-05-24 07:05:22.000000000 +0000 @@ -19,6 +19,7 @@ MsgWithOneof Real Complex + KnownTypes */ package jsonpb @@ -35,6 +36,30 @@ // is compatible with the proto package it is being compiled against. const _ = proto.ProtoPackageIsVersion1 +type Numeral int32 + +const ( + Numeral_UNKNOWN Numeral = 0 + Numeral_ARABIC Numeral = 1 + Numeral_ROMAN Numeral = 2 +) + +var Numeral_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ARABIC", + 2: "ROMAN", +} +var Numeral_value = map[string]int32{ + "UNKNOWN": 0, + "ARABIC": 1, + "ROMAN": 2, +} + +func (x Numeral) String() string { + return proto.EnumName(Numeral_name, int32(x)) +} +func (Numeral) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + type Simple3 struct { Dub float64 `protobuf:"fixed64,1,opt,name=dub" json:"dub,omitempty"` } @@ -50,6 +75,7 @@ Objjy map[int32]*Simple3 `protobuf:"bytes,3,rep,name=objjy" json:"objjy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` Buggy map[int64]string `protobuf:"bytes,4,rep,name=buggy" json:"buggy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` Booly map[bool]bool `protobuf:"bytes,5,rep,name=booly" json:"booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + Enumy map[string]Numeral `protobuf:"bytes,6,rep,name=enumy" json:"enumy,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=jsonpb.Numeral"` } func (m *Mappy) Reset() { *m = Mappy{} } @@ -92,28 +118,42 @@ return nil } +func (m *Mappy) GetEnumy() map[string]Numeral { + if m != nil { + return m.Enumy + } + return nil +} + func init() { proto.RegisterType((*Simple3)(nil), "jsonpb.Simple3") proto.RegisterType((*Mappy)(nil), "jsonpb.Mappy") + proto.RegisterEnum("jsonpb.Numeral", Numeral_name, Numeral_value) } var fileDescriptor0 = []byte{ - // 261 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0xd1, 0xc1, 0x4a, 0xc3, 0x30, - 0x1c, 0x06, 0x70, 0xb2, 0x98, 0xb9, 0xfe, 0x8b, 0x08, 0x39, 0x68, 0xf0, 0x20, 0xb2, 0x8b, 0x3d, - 0x48, 0x0f, 0xdb, 0x45, 0x04, 0x2f, 0x82, 0x47, 0xf5, 0xb0, 0x07, 0x18, 0x8b, 0x86, 0x61, 0x6d, - 0x9b, 0x90, 0xa6, 0x42, 0x9e, 0xd1, 0x97, 0xb2, 0x89, 0xae, 0xfe, 0x0b, 0xf1, 0xfc, 0xfd, 0x42, - 0xbe, 0x2f, 0x81, 0xf3, 0x46, 0x5b, 0xb5, 0x75, 0xaa, 0x73, 0x5b, 0x2d, 0x2b, 0xf5, 0xea, 0xba, - 0xd2, 0x58, 0xed, 0x34, 0x9f, 0x57, 0x9d, 0x6e, 0x8d, 0x5c, 0x9e, 0xc1, 0xf1, 0xe6, 0xbd, 0x31, - 0xb5, 0x5a, 0xf3, 0x1c, 0xe8, 0x5b, 0x2f, 0x05, 0xb9, 0x22, 0x05, 0x59, 0x7e, 0x51, 0x60, 0x4f, - 0x3b, 0x63, 0x3c, 0xbf, 0x06, 0xd6, 0xf6, 0x4d, 0xe3, 0x87, 0x80, 0x16, 0xf9, 0x4a, 0x94, 0x3f, - 0x27, 0xcb, 0x98, 0x96, 0xcf, 0x21, 0x7a, 0x6c, 0x9d, 0x8d, 0xb0, 0x73, 0xd6, 0x7a, 0x31, 0x4b, - 0xc1, 0x4d, 0x88, 0x46, 0x38, 0x94, 0xa9, 0xbc, 0xa0, 0x29, 0xf8, 0x12, 0xa2, 0x11, 0xca, 0x7e, - 0xbf, 0xf7, 0xe2, 0x28, 0x05, 0x1f, 0x42, 0xf4, 0x07, 0xb5, 0xae, 0xbd, 0x60, 0x49, 0x18, 0xa2, - 0x08, 0x2f, 0x6e, 0x00, 0x50, 0xe3, 0x61, 0xf1, 0x87, 0xf2, 0x71, 0x31, 0xe5, 0x27, 0xc0, 0x3e, - 0x77, 0x75, 0xaf, 0x86, 0xfa, 0xa4, 0x60, 0x77, 0xb3, 0x5b, 0x12, 0x34, 0xaa, 0x8d, 0x74, 0x36, - 0xd5, 0x59, 0xd4, 0xf7, 0x00, 0xa8, 0x3b, 0xd2, 0x8c, 0x5f, 0x62, 0x9d, 0xaf, 0x4e, 0x0f, 0xfd, - 0x7e, 0x9f, 0xfe, 0x70, 0x19, 0x5a, 0xf4, 0x7f, 0xb5, 0x6c, 0xd4, 0xe3, 0x2c, 0xac, 0x17, 0x53, - 0xbd, 0x08, 0x5a, 0xce, 0xe3, 0xa7, 0xaf, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xd5, 0x3b, 0xe8, - 0xea, 0x0f, 0x02, 0x00, 0x00, + // 357 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x93, 0x4d, 0x4b, 0xc3, 0x40, + 0x10, 0x86, 0x4d, 0xe2, 0xa6, 0xcd, 0x14, 0x34, 0x2c, 0x82, 0x8b, 0x5e, 0x4a, 0x41, 0x28, 0x82, + 0x39, 0xb4, 0x97, 0xe2, 0xad, 0x95, 0x1e, 0x8a, 0x34, 0x85, 0x14, 0xf1, 0x58, 0x1a, 0x5d, 0x8a, + 0x31, 0xc9, 0x86, 0x7c, 0x08, 0xfb, 0x83, 0xfc, 0x9f, 0x32, 0x9b, 0xd4, 0xc4, 0xb2, 0xe0, 0x6d, + 0x92, 0xf7, 0x79, 0xc2, 0xec, 0x1b, 0x16, 0xae, 0x13, 0x91, 0xf3, 0x5d, 0xc9, 0x8b, 0x72, 0x27, + 0xc2, 0x88, 0xbf, 0x95, 0x85, 0x97, 0xe5, 0xa2, 0x14, 0xd4, 0x8e, 0x0a, 0x91, 0x66, 0xe1, 0xe8, + 0x16, 0x7a, 0xdb, 0x8f, 0x24, 0x8b, 0xf9, 0x94, 0xba, 0x60, 0xbd, 0x57, 0x21, 0x33, 0x86, 0xc6, + 0xd8, 0x08, 0x70, 0x1c, 0x7d, 0x13, 0x20, 0xeb, 0x7d, 0x96, 0x49, 0xea, 0x01, 0x49, 0xab, 0x24, + 0x91, 0xcc, 0x18, 0x5a, 0xe3, 0xc1, 0x84, 0x79, 0xb5, 0xee, 0xa9, 0xd4, 0xf3, 0x31, 0x5a, 0xa6, + 0x65, 0x2e, 0x83, 0x1a, 0x43, 0xbe, 0x28, 0xf3, 0x5c, 0x32, 0x53, 0xc7, 0x6f, 0x31, 0x6a, 0x78, + 0x85, 0x21, 0x2f, 0xc2, 0x28, 0x92, 0xcc, 0xd2, 0xf1, 0x1b, 0x8c, 0x1a, 0x5e, 0x61, 0xc8, 0x87, + 0xd5, 0xe1, 0x20, 0xd9, 0xb9, 0x8e, 0x5f, 0x60, 0xd4, 0xf0, 0x0a, 0x53, 0xbc, 0x10, 0xb1, 0x64, + 0x44, 0xcb, 0x63, 0x74, 0xe4, 0x71, 0x46, 0x9e, 0xa7, 0x55, 0x22, 0x99, 0xad, 0xe3, 0x97, 0x18, + 0x35, 0xbc, 0xc2, 0x6e, 0x66, 0x00, 0x6d, 0x09, 0xd8, 0xe4, 0x27, 0x97, 0xaa, 0x49, 0x2b, 0xc0, + 0x91, 0x5e, 0x01, 0xf9, 0xda, 0xc7, 0x15, 0x67, 0xe6, 0xd0, 0x18, 0x93, 0xa0, 0x7e, 0x78, 0x34, + 0x67, 0x06, 0x9a, 0x6d, 0x1d, 0x5d, 0xd3, 0xd1, 0x98, 0x4e, 0xd7, 0x5c, 0x01, 0xb4, 0xc5, 0x74, + 0x4d, 0x52, 0x9b, 0x77, 0x5d, 0x73, 0x30, 0xb9, 0x3c, 0x9e, 0xa1, 0xf9, 0xdf, 0x27, 0x4b, 0xb4, + 0x9d, 0xfd, 0xb7, 0xbe, 0x73, 0x6a, 0xfe, 0xb6, 0xd7, 0x35, 0xfb, 0x1a, 0xb3, 0x7f, 0xb2, 0x7e, + 0xdb, 0xa3, 0xe6, 0xe0, 0x7f, 0xd6, 0xbf, 0x68, 0xd7, 0xf7, 0xab, 0x84, 0xe7, 0xfb, 0xb8, 0xf3, + 0xa9, 0xfb, 0x07, 0xe8, 0x35, 0x6f, 0xe9, 0x00, 0x7a, 0x2f, 0xfe, 0xb3, 0xbf, 0x79, 0xf5, 0xdd, + 0x33, 0x0a, 0x60, 0xcf, 0x83, 0xf9, 0x62, 0xf5, 0xe4, 0x1a, 0xd4, 0x01, 0x12, 0x6c, 0xd6, 0x73, + 0xdf, 0x35, 0x43, 0x5b, 0x5d, 0x81, 0xe9, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3d, 0x04, 0xff, + 0x62, 0x1d, 0x03, 0x00, 0x00, } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto 2016-05-24 07:05:22.000000000 +0000 @@ -37,10 +37,17 @@ double dub = 1; } +enum Numeral { + UNKNOWN = 0; + ARABIC = 1; + ROMAN = 2; +} + message Mappy { map nummy = 1; map strry = 2; map objjy = 3; map buggy = 4; map booly = 5; + map enumy = 6; } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go 2016-05-24 07:05:22.000000000 +0000 @@ -7,6 +7,11 @@ import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" +import google_protobuf1 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf2 "github.com/golang/protobuf/ptypes/struct" +import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp" +import google_protobuf4 "github.com/golang/protobuf/ptypes/wrappers" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -52,17 +57,17 @@ // Test message for holding primitive types. type Simple struct { - OBool *bool `protobuf:"varint,1,opt,name=o_bool" json:"o_bool,omitempty"` - OInt32 *int32 `protobuf:"varint,2,opt,name=o_int32" json:"o_int32,omitempty"` - OInt64 *int64 `protobuf:"varint,3,opt,name=o_int64" json:"o_int64,omitempty"` - OUint32 *uint32 `protobuf:"varint,4,opt,name=o_uint32" json:"o_uint32,omitempty"` - OUint64 *uint64 `protobuf:"varint,5,opt,name=o_uint64" json:"o_uint64,omitempty"` - OSint32 *int32 `protobuf:"zigzag32,6,opt,name=o_sint32" json:"o_sint32,omitempty"` - OSint64 *int64 `protobuf:"zigzag64,7,opt,name=o_sint64" json:"o_sint64,omitempty"` - OFloat *float32 `protobuf:"fixed32,8,opt,name=o_float" json:"o_float,omitempty"` - ODouble *float64 `protobuf:"fixed64,9,opt,name=o_double" json:"o_double,omitempty"` - OString *string `protobuf:"bytes,10,opt,name=o_string" json:"o_string,omitempty"` - OBytes []byte `protobuf:"bytes,11,opt,name=o_bytes" json:"o_bytes,omitempty"` + OBool *bool `protobuf:"varint,1,opt,name=o_bool,json=oBool" json:"o_bool,omitempty"` + OInt32 *int32 `protobuf:"varint,2,opt,name=o_int32,json=oInt32" json:"o_int32,omitempty"` + OInt64 *int64 `protobuf:"varint,3,opt,name=o_int64,json=oInt64" json:"o_int64,omitempty"` + OUint32 *uint32 `protobuf:"varint,4,opt,name=o_uint32,json=oUint32" json:"o_uint32,omitempty"` + OUint64 *uint64 `protobuf:"varint,5,opt,name=o_uint64,json=oUint64" json:"o_uint64,omitempty"` + OSint32 *int32 `protobuf:"zigzag32,6,opt,name=o_sint32,json=oSint32" json:"o_sint32,omitempty"` + OSint64 *int64 `protobuf:"zigzag64,7,opt,name=o_sint64,json=oSint64" json:"o_sint64,omitempty"` + OFloat *float32 `protobuf:"fixed32,8,opt,name=o_float,json=oFloat" json:"o_float,omitempty"` + ODouble *float64 `protobuf:"fixed64,9,opt,name=o_double,json=oDouble" json:"o_double,omitempty"` + OString *string `protobuf:"bytes,10,opt,name=o_string,json=oString" json:"o_string,omitempty"` + OBytes []byte `protobuf:"bytes,11,opt,name=o_bytes,json=oBytes" json:"o_bytes,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -150,17 +155,17 @@ // Test message for holding repeated primitives. type Repeats struct { - RBool []bool `protobuf:"varint,1,rep,name=r_bool" json:"r_bool,omitempty"` - RInt32 []int32 `protobuf:"varint,2,rep,name=r_int32" json:"r_int32,omitempty"` - RInt64 []int64 `protobuf:"varint,3,rep,name=r_int64" json:"r_int64,omitempty"` - RUint32 []uint32 `protobuf:"varint,4,rep,name=r_uint32" json:"r_uint32,omitempty"` - RUint64 []uint64 `protobuf:"varint,5,rep,name=r_uint64" json:"r_uint64,omitempty"` - RSint32 []int32 `protobuf:"zigzag32,6,rep,name=r_sint32" json:"r_sint32,omitempty"` - RSint64 []int64 `protobuf:"zigzag64,7,rep,name=r_sint64" json:"r_sint64,omitempty"` - RFloat []float32 `protobuf:"fixed32,8,rep,name=r_float" json:"r_float,omitempty"` - RDouble []float64 `protobuf:"fixed64,9,rep,name=r_double" json:"r_double,omitempty"` - RString []string `protobuf:"bytes,10,rep,name=r_string" json:"r_string,omitempty"` - RBytes [][]byte `protobuf:"bytes,11,rep,name=r_bytes" json:"r_bytes,omitempty"` + RBool []bool `protobuf:"varint,1,rep,name=r_bool,json=rBool" json:"r_bool,omitempty"` + RInt32 []int32 `protobuf:"varint,2,rep,name=r_int32,json=rInt32" json:"r_int32,omitempty"` + RInt64 []int64 `protobuf:"varint,3,rep,name=r_int64,json=rInt64" json:"r_int64,omitempty"` + RUint32 []uint32 `protobuf:"varint,4,rep,name=r_uint32,json=rUint32" json:"r_uint32,omitempty"` + RUint64 []uint64 `protobuf:"varint,5,rep,name=r_uint64,json=rUint64" json:"r_uint64,omitempty"` + RSint32 []int32 `protobuf:"zigzag32,6,rep,name=r_sint32,json=rSint32" json:"r_sint32,omitempty"` + RSint64 []int64 `protobuf:"zigzag64,7,rep,name=r_sint64,json=rSint64" json:"r_sint64,omitempty"` + RFloat []float32 `protobuf:"fixed32,8,rep,name=r_float,json=rFloat" json:"r_float,omitempty"` + RDouble []float64 `protobuf:"fixed64,9,rep,name=r_double,json=rDouble" json:"r_double,omitempty"` + RString []string `protobuf:"bytes,10,rep,name=r_string,json=rString" json:"r_string,omitempty"` + RBytes [][]byte `protobuf:"bytes,11,rep,name=r_bytes,json=rBytes" json:"r_bytes,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -249,11 +254,11 @@ // Test message for holding enums and nested messages. type Widget struct { Color *Widget_Color `protobuf:"varint,1,opt,name=color,enum=jsonpb.Widget_Color" json:"color,omitempty"` - RColor []Widget_Color `protobuf:"varint,2,rep,name=r_color,enum=jsonpb.Widget_Color" json:"r_color,omitempty"` + RColor []Widget_Color `protobuf:"varint,2,rep,name=r_color,json=rColor,enum=jsonpb.Widget_Color" json:"r_color,omitempty"` Simple *Simple `protobuf:"bytes,10,opt,name=simple" json:"simple,omitempty"` - RSimple []*Simple `protobuf:"bytes,11,rep,name=r_simple" json:"r_simple,omitempty"` + RSimple []*Simple `protobuf:"bytes,11,rep,name=r_simple,json=rSimple" json:"r_simple,omitempty"` Repeats *Repeats `protobuf:"bytes,20,opt,name=repeats" json:"repeats,omitempty"` - RRepeats []*Repeats `protobuf:"bytes,21,rep,name=r_repeats" json:"r_repeats,omitempty"` + RRepeats []*Repeats `protobuf:"bytes,21,rep,name=r_repeats,json=rRepeats" json:"r_repeats,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -305,8 +310,8 @@ } type Maps struct { - MInt64Str map[int64]string `protobuf:"bytes,1,rep,name=m_int64_str" json:"m_int64_str,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - MBoolSimple map[bool]*Simple `protobuf:"bytes,2,rep,name=m_bool_simple" json:"m_bool_simple,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MInt64Str map[int64]string `protobuf:"bytes,1,rep,name=m_int64_str,json=mInt64Str" json:"m_int64_str,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MBoolSimple map[bool]*Simple `protobuf:"bytes,2,rep,name=m_bool_simple,json=mBoolSimple" json:"m_bool_simple,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` XXX_unrecognized []byte `json:"-"` } @@ -333,6 +338,7 @@ // Types that are valid to be assigned to Union: // *MsgWithOneof_Title // *MsgWithOneof_Salary + // *MsgWithOneof_Country Union isMsgWithOneof_Union `protobuf_oneof:"union"` XXX_unrecognized []byte `json:"-"` } @@ -352,9 +358,13 @@ type MsgWithOneof_Salary struct { Salary int64 `protobuf:"varint,2,opt,name=salary,oneof"` } +type MsgWithOneof_Country struct { + Country string `protobuf:"bytes,3,opt,name=Country,json=country,oneof"` +} -func (*MsgWithOneof_Title) isMsgWithOneof_Union() {} -func (*MsgWithOneof_Salary) isMsgWithOneof_Union() {} +func (*MsgWithOneof_Title) isMsgWithOneof_Union() {} +func (*MsgWithOneof_Salary) isMsgWithOneof_Union() {} +func (*MsgWithOneof_Country) isMsgWithOneof_Union() {} func (m *MsgWithOneof) GetUnion() isMsgWithOneof_Union { if m != nil { @@ -377,11 +387,19 @@ return 0 } +func (m *MsgWithOneof) GetCountry() string { + if x, ok := m.GetUnion().(*MsgWithOneof_Country); ok { + return x.Country + } + return "" +} + // XXX_OneofFuncs is for the internal use of the proto package. func (*MsgWithOneof) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _MsgWithOneof_OneofMarshaler, _MsgWithOneof_OneofUnmarshaler, _MsgWithOneof_OneofSizer, []interface{}{ (*MsgWithOneof_Title)(nil), (*MsgWithOneof_Salary)(nil), + (*MsgWithOneof_Country)(nil), } } @@ -395,6 +413,9 @@ case *MsgWithOneof_Salary: b.EncodeVarint(2<<3 | proto.WireVarint) b.EncodeVarint(uint64(x.Salary)) + case *MsgWithOneof_Country: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Country) case nil: default: return fmt.Errorf("MsgWithOneof.Union has unexpected type %T", x) @@ -419,6 +440,13 @@ x, err := b.DecodeVarint() m.Union = &MsgWithOneof_Salary{int64(x)} return true, err + case 3: // union.Country + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &MsgWithOneof_Country{x} + return true, err default: return false, nil } @@ -435,6 +463,10 @@ case *MsgWithOneof_Salary: n += proto.SizeVarint(2<<3 | proto.WireVarint) n += proto.SizeVarint(uint64(x.Salary)) + case *MsgWithOneof_Country: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Country))) + n += len(x.Country) case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) @@ -511,7 +543,120 @@ ExtensionType: (*Complex)(nil), Field: 123, Name: "jsonpb.Complex.real_extension", - Tag: "bytes,123,opt,name=real_extension", + Tag: "bytes,123,opt,name=real_extension,json=realExtension", +} + +type KnownTypes struct { + An *google_protobuf.Any `protobuf:"bytes,14,opt,name=an" json:"an,omitempty"` + Dur *google_protobuf1.Duration `protobuf:"bytes,1,opt,name=dur" json:"dur,omitempty"` + St *google_protobuf2.Struct `protobuf:"bytes,12,opt,name=st" json:"st,omitempty"` + Ts *google_protobuf3.Timestamp `protobuf:"bytes,2,opt,name=ts" json:"ts,omitempty"` + Dbl *google_protobuf4.DoubleValue `protobuf:"bytes,3,opt,name=dbl" json:"dbl,omitempty"` + Flt *google_protobuf4.FloatValue `protobuf:"bytes,4,opt,name=flt" json:"flt,omitempty"` + I64 *google_protobuf4.Int64Value `protobuf:"bytes,5,opt,name=i64" json:"i64,omitempty"` + U64 *google_protobuf4.UInt64Value `protobuf:"bytes,6,opt,name=u64" json:"u64,omitempty"` + I32 *google_protobuf4.Int32Value `protobuf:"bytes,7,opt,name=i32" json:"i32,omitempty"` + U32 *google_protobuf4.UInt32Value `protobuf:"bytes,8,opt,name=u32" json:"u32,omitempty"` + Bool *google_protobuf4.BoolValue `protobuf:"bytes,9,opt,name=bool" json:"bool,omitempty"` + Str *google_protobuf4.StringValue `protobuf:"bytes,10,opt,name=str" json:"str,omitempty"` + Bytes *google_protobuf4.BytesValue `protobuf:"bytes,11,opt,name=bytes" json:"bytes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *KnownTypes) Reset() { *m = KnownTypes{} } +func (m *KnownTypes) String() string { return proto.CompactTextString(m) } +func (*KnownTypes) ProtoMessage() {} +func (*KnownTypes) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } + +func (m *KnownTypes) GetAn() *google_protobuf.Any { + if m != nil { + return m.An + } + return nil +} + +func (m *KnownTypes) GetDur() *google_protobuf1.Duration { + if m != nil { + return m.Dur + } + return nil +} + +func (m *KnownTypes) GetSt() *google_protobuf2.Struct { + if m != nil { + return m.St + } + return nil +} + +func (m *KnownTypes) GetTs() *google_protobuf3.Timestamp { + if m != nil { + return m.Ts + } + return nil +} + +func (m *KnownTypes) GetDbl() *google_protobuf4.DoubleValue { + if m != nil { + return m.Dbl + } + return nil +} + +func (m *KnownTypes) GetFlt() *google_protobuf4.FloatValue { + if m != nil { + return m.Flt + } + return nil +} + +func (m *KnownTypes) GetI64() *google_protobuf4.Int64Value { + if m != nil { + return m.I64 + } + return nil +} + +func (m *KnownTypes) GetU64() *google_protobuf4.UInt64Value { + if m != nil { + return m.U64 + } + return nil +} + +func (m *KnownTypes) GetI32() *google_protobuf4.Int32Value { + if m != nil { + return m.I32 + } + return nil +} + +func (m *KnownTypes) GetU32() *google_protobuf4.UInt32Value { + if m != nil { + return m.U32 + } + return nil +} + +func (m *KnownTypes) GetBool() *google_protobuf4.BoolValue { + if m != nil { + return m.Bool + } + return nil +} + +func (m *KnownTypes) GetStr() *google_protobuf4.StringValue { + if m != nil { + return m.Str + } + return nil +} + +func (m *KnownTypes) GetBytes() *google_protobuf4.BytesValue { + if m != nil { + return m.Bytes + } + return nil } var E_Name = &proto.ExtensionDesc{ @@ -530,49 +675,77 @@ proto.RegisterType((*MsgWithOneof)(nil), "jsonpb.MsgWithOneof") proto.RegisterType((*Real)(nil), "jsonpb.Real") proto.RegisterType((*Complex)(nil), "jsonpb.Complex") + proto.RegisterType((*KnownTypes)(nil), "jsonpb.KnownTypes") proto.RegisterEnum("jsonpb.Widget_Color", Widget_Color_name, Widget_Color_value) proto.RegisterExtension(E_Complex_RealExtension) proto.RegisterExtension(E_Name) } var fileDescriptor1 = []byte{ - // 598 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x93, 0x5f, 0x6b, 0x13, 0x4d, - 0x14, 0xc6, 0xbb, 0x3b, 0xfb, 0xf7, 0xa4, 0x4d, 0xb7, 0x43, 0x5f, 0x58, 0xfa, 0x52, 0x2d, 0x2b, - 0x05, 0xf1, 0x22, 0x94, 0x58, 0x45, 0x72, 0x99, 0x1a, 0xac, 0x60, 0x14, 0x52, 0xa4, 0x57, 0xb2, - 0x6c, 0x9a, 0x69, 0xdc, 0xba, 0xd9, 0x09, 0xb3, 0x13, 0x69, 0xd0, 0x0b, 0xc1, 0x2f, 0xa7, 0xdf, - 0xc3, 0x0f, 0xe2, 0xcc, 0x9c, 0x6c, 0xb6, 0x8d, 0x9a, 0xcb, 0x67, 0x9e, 0xf3, 0xe4, 0xfc, 0xce, - 0x39, 0x0b, 0x54, 0xb2, 0x4a, 0xa6, 0x7c, 0x7c, 0xc3, 0xae, 0x64, 0xd5, 0x99, 0x0b, 0x2e, 0x39, - 0xf5, 0x6e, 0x2a, 0x5e, 0xce, 0xc7, 0xc9, 0x0f, 0x0b, 0xbc, 0x8b, 0x7c, 0x36, 0x2f, 0x18, 0x6d, - 0x83, 0xc7, 0xd3, 0x31, 0xe7, 0x45, 0x6c, 0x1d, 0x59, 0x8f, 0x03, 0xba, 0x0b, 0x3e, 0x4f, 0xf3, - 0x52, 0x3e, 0xed, 0xc6, 0xb6, 0x12, 0xdc, 0xb5, 0xf0, 0xfc, 0x34, 0x26, 0x4a, 0x20, 0x34, 0x82, - 0x80, 0xa7, 0x0b, 0xb4, 0x38, 0x4a, 0xd9, 0x69, 0x14, 0xe5, 0x71, 0x95, 0xe2, 0xa0, 0x52, 0xa1, - 0xc7, 0x53, 0xca, 0x5e, 0xa3, 0x28, 0x8f, 0xaf, 0x14, 0x8a, 0xc1, 0xd7, 0x05, 0xcf, 0x64, 0x1c, - 0x28, 0xc1, 0x46, 0xcb, 0x84, 0x2f, 0xc6, 0x05, 0x8b, 0x43, 0xa5, 0x58, 0xab, 0x22, 0x29, 0xf2, - 0x72, 0x1a, 0x83, 0x52, 0x42, 0x2c, 0x1a, 0x2f, 0x15, 0x5b, 0xdc, 0x52, 0xc2, 0x76, 0xf2, 0xd3, - 0x02, 0x7f, 0xc4, 0xe6, 0x2c, 0x93, 0x95, 0x66, 0x11, 0x35, 0x0b, 0x41, 0x16, 0xb1, 0x66, 0x21, - 0xc8, 0x22, 0xd6, 0x2c, 0x04, 0x59, 0x44, 0xc3, 0x42, 0x90, 0x45, 0x34, 0x2c, 0x04, 0x59, 0x44, - 0xc3, 0x42, 0x90, 0x45, 0x34, 0x2c, 0x04, 0x59, 0xc4, 0x9a, 0x85, 0x20, 0x8b, 0x68, 0x58, 0x08, - 0xb2, 0x88, 0x86, 0x85, 0x20, 0x8b, 0x58, 0xb3, 0x10, 0xc5, 0xf2, 0xdd, 0x06, 0xef, 0x32, 0x9f, - 0x4c, 0x99, 0xa4, 0x8f, 0xc0, 0xbd, 0xe2, 0x05, 0x17, 0x66, 0x2b, 0xed, 0xee, 0x7e, 0x07, 0x37, - 0xd7, 0xc1, 0xe7, 0xce, 0x99, 0x7e, 0xa3, 0xc7, 0x3a, 0x00, 0x6d, 0x9a, 0xef, 0x5f, 0xb6, 0x07, - 0xe0, 0x55, 0x66, 0xd9, 0x66, 0x86, 0xad, 0x6e, 0xbb, 0x76, 0xad, 0x4e, 0xe0, 0x08, 0x71, 0x8c, - 0x43, 0x37, 0xf2, 0x37, 0x87, 0x2f, 0x70, 0xc6, 0xf1, 0xbe, 0x89, 0xd8, 0xad, 0x0d, 0xf5, 0xe8, - 0x13, 0x08, 0x45, 0x5a, 0x7b, 0xfe, 0x33, 0x21, 0x9b, 0x9e, 0xe4, 0x18, 0x5c, 0x6c, 0xc8, 0x07, - 0x32, 0x1a, 0xbc, 0x8c, 0xb6, 0x68, 0x08, 0xee, 0xab, 0xd1, 0x60, 0xf0, 0x36, 0xb2, 0x68, 0x00, - 0x4e, 0xff, 0xcd, 0xfb, 0x41, 0x64, 0x27, 0xbf, 0x2c, 0x70, 0x86, 0xd9, 0xbc, 0xa2, 0x27, 0xd0, - 0x9a, 0xe1, 0xb6, 0xf4, 0xdc, 0xcc, 0x4e, 0x5b, 0xdd, 0xff, 0xeb, 0x54, 0x6d, 0xe9, 0x0c, 0x5f, - 0xeb, 0xe7, 0x0b, 0x29, 0x06, 0xa5, 0x14, 0x4b, 0x7a, 0x0a, 0x3b, 0x33, 0x73, 0x00, 0x35, 0x8e, - 0x6d, 0x6a, 0x0e, 0xef, 0xd7, 0xf4, 0x95, 0x01, 0xc1, 0x4c, 0xd5, 0xc1, 0x09, 0xb4, 0x37, 0x72, - 0x5a, 0x40, 0x3e, 0xb1, 0xa5, 0x99, 0x3d, 0xa1, 0x3b, 0xe0, 0x7e, 0xce, 0x8a, 0x05, 0x33, 0xdf, - 0x43, 0xd8, 0xb3, 0x5f, 0x58, 0x07, 0x7d, 0x88, 0x36, 0x53, 0xee, 0xd6, 0x04, 0xf4, 0xf0, 0x6e, - 0xcd, 0x1f, 0xf3, 0xd4, 0x19, 0x49, 0x0f, 0xb6, 0x87, 0xd5, 0xf4, 0x32, 0x97, 0x1f, 0xdf, 0x95, - 0x8c, 0x5f, 0xab, 0x6b, 0x70, 0x65, 0x2e, 0x55, 0xcf, 0x3a, 0x21, 0x3c, 0xdf, 0x52, 0x07, 0xe3, - 0x55, 0x59, 0x91, 0x89, 0xa5, 0x09, 0x21, 0xe7, 0x5b, 0x7d, 0x1f, 0xdc, 0x45, 0x99, 0xf3, 0x32, - 0x79, 0x08, 0xce, 0x88, 0x65, 0x45, 0xd3, 0x9a, 0xae, 0xb1, 0x9e, 0x04, 0xc1, 0x24, 0xfa, 0xa6, - 0x7e, 0x76, 0xf2, 0x01, 0xfc, 0x33, 0xae, 0xff, 0xea, 0x96, 0xee, 0x41, 0x98, 0xcf, 0xb2, 0x69, - 0x5e, 0xea, 0xa4, 0x0d, 0x5f, 0xf7, 0x19, 0xb4, 0x85, 0x0a, 0x4a, 0xd9, 0xad, 0x64, 0x65, 0xa5, - 0xa2, 0xe9, 0x76, 0xb3, 0xb5, 0xac, 0x88, 0xbf, 0xdc, 0xdf, 0xf6, 0x2a, 0xb3, 0x77, 0x00, 0x4e, - 0x99, 0xcd, 0xd8, 0x86, 0xf9, 0xab, 0x6e, 0xfc, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfb, 0xc2, - 0xb2, 0xf6, 0x78, 0x04, 0x00, 0x00, + // 1031 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x95, 0xdf, 0x72, 0xdb, 0x44, + 0x14, 0xc6, 0x2b, 0xc9, 0x96, 0xec, 0x75, 0x12, 0xcc, 0x4e, 0x4a, 0x15, 0x13, 0x40, 0xe3, 0x29, + 0x45, 0x14, 0xea, 0x0e, 0x8a, 0xc7, 0xc3, 0x14, 0x6e, 0x9a, 0xc6, 0x50, 0x06, 0x52, 0x66, 0x36, + 0x0d, 0xbd, 0xf4, 0xc8, 0xf1, 0xc6, 0xa8, 0xc8, 0x5a, 0xcf, 0xee, 0x8a, 0xd4, 0x03, 0x17, 0x79, + 0x08, 0x5e, 0x01, 0x1e, 0x81, 0x27, 0xe2, 0x41, 0x98, 0x73, 0x56, 0x7f, 0x12, 0x3b, 0xbe, 0x8a, + 0x8f, 0xce, 0x77, 0xbe, 0xac, 0x7e, 0x7b, 0x74, 0x0e, 0xa1, 0x9a, 0x2b, 0x3d, 0x11, 0xd3, 0xb7, + 0xfc, 0x42, 0xab, 0xc1, 0x52, 0x0a, 0x2d, 0xa8, 0xfb, 0x56, 0x89, 0x6c, 0x39, 0xed, 0x1d, 0xcc, + 0x85, 0x98, 0xa7, 0xfc, 0x29, 0x3e, 0x9d, 0xe6, 0x97, 0x4f, 0xe3, 0x6c, 0x65, 0x24, 0xbd, 0x8f, + 0xd7, 0x53, 0xb3, 0x5c, 0xc6, 0x3a, 0x11, 0x59, 0x91, 0x3f, 0x5c, 0xcf, 0x2b, 0x2d, 0xf3, 0x0b, + 0x5d, 0x64, 0x3f, 0x59, 0xcf, 0xea, 0x64, 0xc1, 0x95, 0x8e, 0x17, 0xcb, 0x6d, 0xf6, 0x57, 0x32, + 0x5e, 0x2e, 0xb9, 0x2c, 0x4e, 0xd8, 0xff, 0xdb, 0x26, 0xee, 0x59, 0xb2, 0x58, 0xa6, 0x9c, 0xde, + 0x27, 0xae, 0x98, 0x4c, 0x85, 0x48, 0x7d, 0x2b, 0xb0, 0xc2, 0x16, 0x6b, 0x8a, 0x63, 0x21, 0x52, + 0xfa, 0x80, 0x78, 0x62, 0x92, 0x64, 0xfa, 0x28, 0xf2, 0xed, 0xc0, 0x0a, 0x9b, 0xcc, 0x15, 0x3f, + 0x40, 0x54, 0x25, 0x46, 0x43, 0xdf, 0x09, 0xac, 0xd0, 0x31, 0x89, 0xd1, 0x90, 0x1e, 0x90, 0x96, + 0x98, 0xe4, 0xa6, 0xa4, 0x11, 0x58, 0xe1, 0x2e, 0xf3, 0xc4, 0x39, 0x86, 0x75, 0x6a, 0x34, 0xf4, + 0x9b, 0x81, 0x15, 0x36, 0x8a, 0x54, 0x59, 0xa5, 0x4c, 0x95, 0x1b, 0x58, 0xe1, 0xfb, 0xcc, 0x13, + 0x67, 0x37, 0xaa, 0x94, 0xa9, 0xf2, 0x02, 0x2b, 0xa4, 0x45, 0x6a, 0x34, 0x34, 0x87, 0xb8, 0x4c, + 0x45, 0xac, 0xfd, 0x56, 0x60, 0x85, 0x36, 0x73, 0xc5, 0x77, 0x10, 0x99, 0x9a, 0x99, 0xc8, 0xa7, + 0x29, 0xf7, 0xdb, 0x81, 0x15, 0x5a, 0xcc, 0x13, 0x27, 0x18, 0x16, 0x76, 0x5a, 0x26, 0xd9, 0xdc, + 0x27, 0x81, 0x15, 0xb6, 0xc1, 0x0e, 0x43, 0x63, 0x37, 0x5d, 0x69, 0xae, 0xfc, 0x4e, 0x60, 0x85, + 0x3b, 0xcc, 0x15, 0xc7, 0x10, 0xf5, 0xff, 0xb1, 0x89, 0xc7, 0xf8, 0x92, 0xc7, 0x5a, 0x01, 0x28, + 0x59, 0x82, 0x72, 0x00, 0x94, 0x2c, 0x41, 0xc9, 0x0a, 0x94, 0x03, 0xa0, 0x64, 0x05, 0x4a, 0x56, + 0xa0, 0x1c, 0x00, 0x25, 0x2b, 0x50, 0xb2, 0x06, 0xe5, 0x00, 0x28, 0x59, 0x83, 0x92, 0x35, 0x28, + 0x07, 0x40, 0xc9, 0x1a, 0x94, 0xac, 0x41, 0x39, 0x00, 0x4a, 0x9e, 0xdd, 0xa8, 0xaa, 0x40, 0x39, + 0x00, 0x4a, 0xd6, 0xa0, 0x64, 0x05, 0xca, 0x01, 0x50, 0xb2, 0x02, 0x25, 0x6b, 0x50, 0x0e, 0x80, + 0x92, 0x35, 0x28, 0x59, 0x83, 0x72, 0x00, 0x94, 0xac, 0x41, 0xc9, 0x0a, 0x94, 0x03, 0xa0, 0xa4, + 0x01, 0xf5, 0xaf, 0x4d, 0xdc, 0x37, 0xc9, 0x6c, 0xce, 0x35, 0x7d, 0x4c, 0x9a, 0x17, 0x22, 0x15, + 0x12, 0xfb, 0x69, 0x2f, 0xda, 0x1f, 0x98, 0xaf, 0x61, 0x60, 0xd2, 0x83, 0x17, 0x90, 0x63, 0x46, + 0x42, 0x9f, 0x80, 0x9f, 0x51, 0x03, 0xbc, 0x6d, 0x6a, 0x57, 0xe2, 0x5f, 0xfa, 0x88, 0xb8, 0x0a, + 0xbb, 0x16, 0x2f, 0xb0, 0x13, 0xed, 0x95, 0x6a, 0xd3, 0xcb, 0xac, 0xc8, 0xd2, 0xcf, 0x0d, 0x10, + 0x54, 0xc2, 0x39, 0x37, 0x95, 0x00, 0xa8, 0x90, 0x7a, 0xd2, 0x5c, 0xb0, 0xbf, 0x8f, 0x9e, 0xef, + 0x95, 0xca, 0xe2, 0xde, 0x59, 0x99, 0xa7, 0x5f, 0x92, 0xb6, 0x9c, 0x94, 0xe2, 0xfb, 0x68, 0xbb, + 0x21, 0x6e, 0xc9, 0xe2, 0x57, 0xff, 0x53, 0xd2, 0x34, 0x87, 0xf6, 0x88, 0xc3, 0xc6, 0x27, 0xdd, + 0x7b, 0xb4, 0x4d, 0x9a, 0xdf, 0xb3, 0xf1, 0xf8, 0x55, 0xd7, 0xa2, 0x2d, 0xd2, 0x38, 0xfe, 0xe9, + 0x7c, 0xdc, 0xb5, 0xfb, 0x7f, 0xd9, 0xa4, 0x71, 0x1a, 0x2f, 0x15, 0xfd, 0x86, 0x74, 0x16, 0xa6, + 0x5d, 0x80, 0x3d, 0xf6, 0x58, 0x27, 0xfa, 0xb0, 0xf4, 0x07, 0xc9, 0xe0, 0x14, 0xfb, 0xe7, 0x4c, + 0xcb, 0x71, 0xa6, 0xe5, 0x8a, 0xb5, 0x17, 0x65, 0x4c, 0x9f, 0x93, 0xdd, 0x05, 0xf6, 0x66, 0xf9, + 0xd6, 0x36, 0x96, 0x7f, 0x74, 0xbb, 0x1c, 0xfa, 0xd5, 0xbc, 0xb6, 0x31, 0xe8, 0x2c, 0xea, 0x27, + 0xbd, 0x6f, 0xc9, 0xde, 0x6d, 0x7f, 0xda, 0x25, 0xce, 0x6f, 0x7c, 0x85, 0xd7, 0xe8, 0x30, 0xf8, + 0x49, 0xf7, 0x49, 0xf3, 0xf7, 0x38, 0xcd, 0x39, 0x8e, 0x84, 0x36, 0x33, 0xc1, 0x33, 0xfb, 0x6b, + 0xab, 0xf7, 0x8a, 0x74, 0xd7, 0xed, 0x6f, 0xd6, 0xb7, 0x4c, 0xfd, 0xc3, 0x9b, 0xf5, 0x9b, 0x97, + 0x52, 0xfb, 0xf5, 0x39, 0xd9, 0x39, 0x55, 0xf3, 0x37, 0x89, 0xfe, 0xf5, 0xe7, 0x8c, 0x8b, 0x4b, + 0xfa, 0x01, 0x69, 0xea, 0x44, 0xa7, 0x1c, 0xdd, 0xda, 0x2f, 0xef, 0x31, 0x13, 0x52, 0x9f, 0xb8, + 0x2a, 0x4e, 0x63, 0xb9, 0x42, 0x4b, 0xe7, 0xe5, 0x3d, 0x56, 0xc4, 0xb4, 0x47, 0xbc, 0x17, 0x22, + 0x87, 0x83, 0xe0, 0x9c, 0x82, 0x1a, 0xef, 0xc2, 0x3c, 0x38, 0xf6, 0x48, 0x33, 0xcf, 0x12, 0x91, + 0xf5, 0x1f, 0x91, 0x06, 0xe3, 0x71, 0x5a, 0xbf, 0x98, 0x85, 0x33, 0xc3, 0x04, 0x8f, 0x5b, 0xad, + 0x59, 0xf7, 0xfa, 0xfa, 0xfa, 0xda, 0xee, 0x5f, 0x81, 0x19, 0x9c, 0xf1, 0x1d, 0x3d, 0x24, 0xed, + 0x64, 0x11, 0xcf, 0x93, 0x0c, 0xfe, 0xa9, 0x91, 0xd7, 0x0f, 0xea, 0x92, 0xe8, 0x84, 0xec, 0x49, + 0x1e, 0xa7, 0x13, 0xfe, 0x4e, 0xf3, 0x4c, 0x25, 0x22, 0xa3, 0x3b, 0x75, 0xb3, 0xc4, 0xa9, 0xff, + 0xc7, 0xed, 0x6e, 0x2b, 0xec, 0xd9, 0x2e, 0x14, 0x8d, 0xcb, 0x9a, 0xfe, 0x7f, 0x0d, 0x42, 0x7e, + 0xcc, 0xc4, 0x55, 0xf6, 0x7a, 0xb5, 0xe4, 0x8a, 0x3e, 0x24, 0x76, 0x9c, 0xf9, 0x7b, 0x58, 0xba, + 0x3f, 0x30, 0x43, 0x7e, 0x50, 0x0e, 0xf9, 0xc1, 0xf3, 0x6c, 0xc5, 0xec, 0x38, 0xa3, 0x5f, 0x10, + 0x67, 0x96, 0x9b, 0xef, 0xaf, 0x13, 0x1d, 0x6c, 0xc8, 0x4e, 0x8a, 0x55, 0xc3, 0x40, 0x45, 0x3f, + 0x23, 0xb6, 0xd2, 0xfe, 0x0e, 0x6a, 0x1f, 0x6c, 0x68, 0xcf, 0x70, 0xed, 0x30, 0x5b, 0xc1, 0x77, + 0x6d, 0x6b, 0x55, 0xdc, 0x5c, 0x6f, 0x43, 0xf8, 0xba, 0xdc, 0x40, 0xcc, 0xd6, 0x8a, 0x0e, 0x88, + 0x33, 0x9b, 0xa6, 0x08, 0xbe, 0x13, 0x1d, 0x6e, 0x9e, 0x00, 0x07, 0xcd, 0x2f, 0x00, 0x99, 0x81, + 0x90, 0x3e, 0x21, 0xce, 0x65, 0xaa, 0x71, 0x6d, 0x40, 0xd3, 0xaf, 0xeb, 0x71, 0x64, 0x15, 0xf2, + 0xcb, 0x54, 0x83, 0x3c, 0x29, 0x56, 0xc9, 0x5d, 0x72, 0x6c, 0xe3, 0x42, 0x9e, 0x8c, 0x86, 0x70, + 0x9a, 0x7c, 0x34, 0xc4, 0xf5, 0x72, 0xd7, 0x69, 0xce, 0x6f, 0xea, 0xf3, 0xd1, 0x10, 0xed, 0x8f, + 0x22, 0xdc, 0x39, 0x5b, 0xec, 0x8f, 0xa2, 0xd2, 0xfe, 0x28, 0x42, 0xfb, 0xa3, 0x08, 0x17, 0xd1, + 0x36, 0xfb, 0x4a, 0x9f, 0xa3, 0xbe, 0x81, 0x6b, 0xa4, 0xbd, 0x05, 0x25, 0x7c, 0x47, 0x46, 0x8e, + 0x3a, 0xf0, 0x87, 0x89, 0x40, 0xb6, 0xf8, 0x9b, 0xd1, 0x5c, 0xf8, 0x2b, 0x2d, 0xe9, 0x57, 0xa4, + 0x59, 0xef, 0xb2, 0xbb, 0x5e, 0x00, 0x47, 0xb6, 0x29, 0x30, 0xca, 0x67, 0x01, 0x69, 0x64, 0xf1, + 0x82, 0xaf, 0xb5, 0xe8, 0x9f, 0xf8, 0x95, 0x63, 0xe6, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0xca, + 0xa2, 0x76, 0x34, 0xe8, 0x08, 0x00, 0x00, } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto 2016-05-24 07:05:22.000000000 +0000 @@ -31,6 +31,12 @@ syntax = "proto2"; +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + package jsonpb; // Test message for holding primitive types. @@ -89,6 +95,7 @@ oneof union { string title = 1; int64 salary = 2; + string Country = 3; } } @@ -108,3 +115,20 @@ optional double imaginary = 1; extensions 100 to max; } + +message KnownTypes { + optional google.protobuf.Any an = 14; + optional google.protobuf.Duration dur = 1; + optional google.protobuf.Struct st = 12; + optional google.protobuf.Timestamp ts = 2; + + optional google.protobuf.DoubleValue dbl = 3; + optional google.protobuf.FloatValue flt = 4; + optional google.protobuf.Int64Value i64 = 5; + optional google.protobuf.UInt64Value u64 = 6; + optional google.protobuf.Int32Value i32 = 7; + optional google.protobuf.UInt32Value u32 = 8; + optional google.protobuf.BoolValue bool = 9; + optional google.protobuf.StringValue str = 10; + optional google.protobuf.BytesValue bytes = 11; +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/Makefile aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/Makefile --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/Makefile 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/Makefile 2016-05-24 07:05:22.000000000 +0000 @@ -33,13 +33,11 @@ all: install install: - go install ./proto - go install ./jsonpb + go install ./proto ./jsonpb ./ptypes go install ./protoc-gen-go test: - go test ./proto - go test ./jsonpb + go test ./proto ./jsonpb ./ptypes make -C protoc-gen-go/testdata test clean: diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/all_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/all_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/all_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/all_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -1329,9 +1329,18 @@ func TestTypedNilMarshal(t *testing.T) { // A typed nil should return ErrNil and not crash. - _, err := Marshal((*GoEnum)(nil)) - if err != ErrNil { - t.Errorf("Marshal: got err %v, want ErrNil", err) + { + var m *GoEnum + if _, err := Marshal(m); err != ErrNil { + t.Errorf("Marshal(%#v): got %v, want ErrNil", m, err) + } + } + + { + m := &Communique{Union: &Communique_Msg{nil}} + if _, err := Marshal(m); err == nil || err == ErrNil { + t.Errorf("Marshal(%#v): got %v, want errOneofHasNil", m, err) + } } } @@ -1958,6 +1967,40 @@ } } +func TestDecodeMapFieldMissingKey(t *testing.T) { + b := []byte{ + 0x0A, 0x03, // message, tag 1 (name_mapping), of length 3 bytes + // no key + 0x12, 0x01, 0x6D, // string value of length 1 byte, value "m" + } + got := &MessageWithMap{} + err := Unmarshal(b, got) + if err != nil { + t.Fatalf("failed to marshal map with missing key: %v", err) + } + want := &MessageWithMap{NameMapping: map[int32]string{0: "m"}} + if !Equal(got, want) { + t.Errorf("Unmarshaled map with no key was not as expected. got: %v, want %v", got, want) + } +} + +func TestDecodeMapFieldMissingValue(t *testing.T) { + b := []byte{ + 0x0A, 0x02, // message, tag 1 (name_mapping), of length 2 bytes + 0x08, 0x01, // varint key, value 1 + // no value + } + got := &MessageWithMap{} + err := Unmarshal(b, got) + if err != nil { + t.Fatalf("failed to marshal map with missing value: %v", err) + } + want := &MessageWithMap{NameMapping: map[int32]string{1: ""}} + if !Equal(got, want) { + t.Errorf("Unmarshaled map with no value was not as expected. got: %v, want %v", got, want) + } +} + func TestOneof(t *testing.T) { m := &Communique{} b, err := Marshal(m) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/any_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/any_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/any_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/any_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,272 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "strings" + "testing" + + "github.com/golang/protobuf/proto" + + pb "github.com/golang/protobuf/proto/proto3_proto" + testpb "github.com/golang/protobuf/proto/testdata" + anypb "github.com/golang/protobuf/ptypes/any" +) + +var ( + expandedMarshaler = proto.TextMarshaler{ExpandAny: true} + expandedCompactMarshaler = proto.TextMarshaler{Compact: true, ExpandAny: true} +) + +// anyEqual reports whether two messages which may be google.protobuf.Any or may +// contain google.protobuf.Any fields are equal. We can't use proto.Equal for +// comparison, because semantically equivalent messages may be marshaled to +// binary in different tag order. Instead, trust that TextMarshaler with +// ExpandAny option works and compare the text marshaling results. +func anyEqual(got, want proto.Message) bool { + // if messages are proto.Equal, no need to marshal. + if proto.Equal(got, want) { + return true + } + g := expandedMarshaler.Text(got) + w := expandedMarshaler.Text(want) + return g == w +} + +type golden struct { + m proto.Message + t, c string +} + +var goldenMessages = makeGolden() + +func makeGolden() []golden { + nested := &pb.Nested{Bunny: "Monty"} + nb, err := proto.Marshal(nested) + if err != nil { + panic(err) + } + m1 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb}, + } + m2 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: "http://[::1]/type.googleapis.com/" + proto.MessageName(nested), Value: nb}, + } + m3 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: `type.googleapis.com/"/` + proto.MessageName(nested), Value: nb}, + } + m4 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: "type.googleapis.com/a/path/" + proto.MessageName(nested), Value: nb}, + } + m5 := &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb} + + any1 := &testpb.MyMessage{Count: proto.Int32(47), Name: proto.String("David")} + proto.SetExtension(any1, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("foo")}) + proto.SetExtension(any1, testpb.E_Ext_Text, proto.String("bar")) + any1b, err := proto.Marshal(any1) + if err != nil { + panic(err) + } + any2 := &testpb.MyMessage{Count: proto.Int32(42), Bikeshed: testpb.MyMessage_GREEN.Enum(), RepBytes: [][]byte{[]byte("roboto")}} + proto.SetExtension(any2, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("baz")}) + any2b, err := proto.Marshal(any2) + if err != nil { + panic(err) + } + m6 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b}, + ManyThings: []*anypb.Any{ + &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any2), Value: any2b}, + &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b}, + }, + } + + const ( + m1Golden = ` +name: "David" +result_count: 47 +anything: < + [type.googleapis.com/proto3_proto.Nested]: < + bunny: "Monty" + > +> +` + m2Golden = ` +name: "David" +result_count: 47 +anything: < + ["http://[::1]/type.googleapis.com/proto3_proto.Nested"]: < + bunny: "Monty" + > +> +` + m3Golden = ` +name: "David" +result_count: 47 +anything: < + ["type.googleapis.com/\"/proto3_proto.Nested"]: < + bunny: "Monty" + > +> +` + m4Golden = ` +name: "David" +result_count: 47 +anything: < + [type.googleapis.com/a/path/proto3_proto.Nested]: < + bunny: "Monty" + > +> +` + m5Golden = ` +[type.googleapis.com/proto3_proto.Nested]: < + bunny: "Monty" +> +` + m6Golden = ` +name: "David" +result_count: 47 +anything: < + [type.googleapis.com/testdata.MyMessage]: < + count: 47 + name: "David" + [testdata.Ext.more]: < + data: "foo" + > + [testdata.Ext.text]: "bar" + > +> +many_things: < + [type.googleapis.com/testdata.MyMessage]: < + count: 42 + bikeshed: GREEN + rep_bytes: "roboto" + [testdata.Ext.more]: < + data: "baz" + > + > +> +many_things: < + [type.googleapis.com/testdata.MyMessage]: < + count: 47 + name: "David" + [testdata.Ext.more]: < + data: "foo" + > + [testdata.Ext.text]: "bar" + > +> +` + ) + return []golden{ + {m1, strings.TrimSpace(m1Golden) + "\n", strings.TrimSpace(compact(m1Golden)) + " "}, + {m2, strings.TrimSpace(m2Golden) + "\n", strings.TrimSpace(compact(m2Golden)) + " "}, + {m3, strings.TrimSpace(m3Golden) + "\n", strings.TrimSpace(compact(m3Golden)) + " "}, + {m4, strings.TrimSpace(m4Golden) + "\n", strings.TrimSpace(compact(m4Golden)) + " "}, + {m5, strings.TrimSpace(m5Golden) + "\n", strings.TrimSpace(compact(m5Golden)) + " "}, + {m6, strings.TrimSpace(m6Golden) + "\n", strings.TrimSpace(compact(m6Golden)) + " "}, + } +} + +func TestMarshalGolden(t *testing.T) { + for _, tt := range goldenMessages { + if got, want := expandedMarshaler.Text(tt.m), tt.t; got != want { + t.Errorf("message %v: got:\n%s\nwant:\n%s", tt.m, got, want) + } + if got, want := expandedCompactMarshaler.Text(tt.m), tt.c; got != want { + t.Errorf("message %v: got:\n`%s`\nwant:\n`%s`", tt.m, got, want) + } + } +} + +func TestUnmarshalGolden(t *testing.T) { + for _, tt := range goldenMessages { + want := tt.m + got := proto.Clone(tt.m) + got.Reset() + if err := proto.UnmarshalText(tt.t, got); err != nil { + t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.t, err) + } + if !anyEqual(got, want) { + t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.t, got, want) + } + got.Reset() + if err := proto.UnmarshalText(tt.c, got); err != nil { + t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.c, err) + } + if !anyEqual(got, want) { + t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.c, got, want) + } + } +} + +func TestMarsahlUnknownAny(t *testing.T) { + m := &pb.Message{ + Anything: &anypb.Any{ + TypeUrl: "foo", + Value: []byte("bar"), + }, + } + want := `anything: < + type_url: "foo" + value: "bar" +> +` + got := expandedMarshaler.Text(m) + if got != want { + t.Errorf("got\n`%s`\nwant\n`%s`", got, want) + } +} + +func TestAmbiguousAny(t *testing.T) { + pb := &anypb.Any{} + err := proto.UnmarshalText(` + [type.googleapis.com/proto3_proto.Nested]: < + bunny: "Monty" + > + type_url: "ttt/proto3_proto.Nested" + `, pb) + t.Logf("result: %v (error: %v)", expandedMarshaler.Text(pb), err) + if err != nil { + t.Errorf("failed to parse ambiguous Any message: %v", err) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/decode.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/decode.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/decode.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/decode.go 2016-05-24 07:05:22.000000000 +0000 @@ -768,10 +768,11 @@ } } keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() || !valelem.IsValid() { - // We did not decode the key or the value in the map entry. - // Either way, it's an invalid map entry. - return fmt.Errorf("proto: bad map data: missing key/val") + if !keyelem.IsValid() { + keyelem = reflect.Zero(p.mtype.Key()) + } + if !valelem.IsValid() { + valelem = reflect.Zero(p.mtype.Elem()) } v.SetMapIndex(keyelem, valelem) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/encode.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/encode.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/encode.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/encode.go 2016-05-24 07:05:22.000000000 +0000 @@ -64,6 +64,10 @@ // a struct with a repeated field containing a nil element. errRepeatedHasNil = errors.New("proto: repeated field has nil element") + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + // ErrNil is the error returned if Marshal is called with nil. ErrNil = errors.New("proto: Marshal called with nil") ) @@ -1222,7 +1226,9 @@ // Do oneof fields. if prop.oneofMarshaler != nil { m := structPointer_Interface(base, prop.stype).(Message) - if err := prop.oneofMarshaler(m, o); err != nil { + if err := prop.oneofMarshaler(m, o); err == ErrNil { + return errOneofHasNil + } else if err != nil { return err } } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/Makefile aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/Makefile --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/Makefile 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/Makefile 2016-05-24 07:05:22.000000000 +0000 @@ -39,5 +39,5 @@ generate-test-pbs: make install make -C testdata - protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto + protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto make diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/pointer_reflect.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/pointer_reflect.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/pointer_reflect.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/pointer_reflect.go 2016-05-24 07:05:22.000000000 +0000 @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build appengine +// +build appengine js // This file contains an implementation of proto field accesses using package reflect. // It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/pointer_unsafe.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/pointer_unsafe.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/pointer_unsafe.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/pointer_unsafe.go 2016-05-24 07:05:22.000000000 +0000 @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build !appengine +// +build !appengine,!js // This file contains the implementation of the proto field accesses using package unsafe. diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/properties.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/properties.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/properties.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/properties.go 2016-05-24 07:05:22.000000000 +0000 @@ -173,6 +173,7 @@ type Properties struct { Name string // name of the field, for error messages OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc Wire string WireType int Tag int @@ -229,8 +230,9 @@ if p.Packed { s += ",packed" } - if p.OrigName != p.Name { - s += ",name=" + p.OrigName + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName } if p.proto3 { s += ",proto3" @@ -310,6 +312,8 @@ p.Packed = true case strings.HasPrefix(f, "name="): p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] case strings.HasPrefix(f, "enum="): p.Enum = f[5:] case f == "proto3": @@ -697,7 +701,11 @@ if f.Name == "XXX_unrecognized" { // special case prop.unrecField = toField(&f) } - oneof := f.Tag.Get("protobuf_oneof") != "" // special case + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } prop.Prop[i] = p prop.order[i] = i if debug { @@ -707,7 +715,7 @@ } print("\n") } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof { + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") } } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go 2016-05-24 07:05:22.000000000 +0000 @@ -16,10 +16,19 @@ package proto3_proto import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" import testdata "github.com/golang/protobuf/proto/testdata" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.ProtoPackageIsVersion1 type Message_Humour int32 @@ -46,25 +55,30 @@ func (x Message_Humour) String() string { return proto.EnumName(Message_Humour_name, int32(x)) } +func (Message_Humour) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } type Message struct { Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"` - HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm" json:"height_in_cm,omitempty"` + HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm,json=heightInCm" json:"height_in_cm,omitempty"` Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` - ResultCount int64 `protobuf:"varint,7,opt,name=result_count" json:"result_count,omitempty"` - TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman" json:"true_scotsman,omitempty"` + ResultCount int64 `protobuf:"varint,7,opt,name=result_count,json=resultCount" json:"result_count,omitempty"` + TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman,json=trueScotsman" json:"true_scotsman,omitempty"` Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"` Key []uint64 `protobuf:"varint,5,rep,name=key" json:"key,omitempty"` Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"` + RFunny []Message_Humour `protobuf:"varint,16,rep,name=r_funny,json=rFunny,enum=proto3_proto.Message_Humour" json:"r_funny,omitempty"` Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field" json:"proto2_field,omitempty"` - Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field,json=proto2Field" json:"proto2_field,omitempty"` + Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value,json=proto2Value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Anything *google_protobuf.Any `protobuf:"bytes,14,opt,name=anything" json:"anything,omitempty"` + ManyThings []*google_protobuf.Any `protobuf:"bytes,15,rep,name=many_things,json=manyThings" json:"many_things,omitempty"` } -func (m *Message) Reset() { *m = Message{} } -func (m *Message) String() string { return proto.CompactTextString(m) } -func (*Message) ProtoMessage() {} +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (m *Message) GetNested() *Nested { if m != nil { @@ -94,21 +108,37 @@ return nil } +func (m *Message) GetAnything() *google_protobuf.Any { + if m != nil { + return m.Anything + } + return nil +} + +func (m *Message) GetManyThings() []*google_protobuf.Any { + if m != nil { + return m.ManyThings + } + return nil +} + type Nested struct { Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"` } -func (m *Nested) Reset() { *m = Nested{} } -func (m *Nested) String() string { return proto.CompactTextString(m) } -func (*Nested) ProtoMessage() {} +func (m *Nested) Reset() { *m = Nested{} } +func (m *Nested) String() string { return proto.CompactTextString(m) } +func (*Nested) ProtoMessage() {} +func (*Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } type MessageWithMap struct { - ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` + ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } -func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } -func (*MessageWithMap) ProtoMessage() {} +func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } +func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } +func (*MessageWithMap) ProtoMessage() {} +func (*MessageWithMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } func (m *MessageWithMap) GetByteMapping() map[bool][]byte { if m != nil { @@ -118,5 +148,51 @@ } func init() { + proto.RegisterType((*Message)(nil), "proto3_proto.Message") + proto.RegisterType((*Nested)(nil), "proto3_proto.Nested") + proto.RegisterType((*MessageWithMap)(nil), "proto3_proto.MessageWithMap") proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value) } + +var fileDescriptor0 = []byte{ + // 617 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x92, 0x5d, 0x6b, 0xdb, 0x3c, + 0x14, 0xc7, 0x1f, 0xc5, 0xa9, 0x93, 0x1e, 0x3b, 0xad, 0xd1, 0xd3, 0x81, 0x1a, 0xc6, 0xf0, 0x32, + 0x18, 0x66, 0x2f, 0xee, 0xc8, 0x28, 0x94, 0x31, 0x36, 0xda, 0xae, 0x65, 0xa1, 0x69, 0x16, 0x9c, + 0x76, 0x65, 0x57, 0x46, 0x49, 0x95, 0xc4, 0x2c, 0x96, 0x83, 0x2d, 0x0f, 0xfc, 0x75, 0xf6, 0x29, + 0x77, 0x39, 0x24, 0x39, 0xa9, 0x5b, 0xb2, 0xed, 0xca, 0xd2, 0xf1, 0xef, 0xbc, 0xe8, 0xff, 0x3f, + 0xb0, 0xbf, 0x4c, 0x13, 0x91, 0xbc, 0x0d, 0xd5, 0xe7, 0x40, 0x5f, 0x7c, 0xf5, 0xc1, 0x76, 0xf5, + 0x57, 0x7b, 0x7f, 0x96, 0x24, 0xb3, 0x05, 0xd3, 0xc8, 0x38, 0x9f, 0x1e, 0x50, 0x5e, 0x68, 0xb0, + 0xfd, 0xbf, 0x60, 0x99, 0xb8, 0xa5, 0x82, 0x1e, 0xc8, 0x83, 0x0e, 0x76, 0x7e, 0x99, 0xd0, 0xb8, + 0x64, 0x59, 0x46, 0x67, 0x0c, 0x63, 0xa8, 0x73, 0x1a, 0x33, 0x82, 0x5c, 0xe4, 0x6d, 0x07, 0xea, + 0x8c, 0x8f, 0xa0, 0x39, 0x8f, 0x16, 0x34, 0x8d, 0x44, 0x41, 0x6a, 0x2e, 0xf2, 0x76, 0xba, 0x8f, + 0xfd, 0x6a, 0x43, 0xbf, 0x4c, 0xf6, 0x3f, 0xe7, 0x71, 0x92, 0xa7, 0xc1, 0x9a, 0xc6, 0x2e, 0xd8, + 0x73, 0x16, 0xcd, 0xe6, 0x22, 0x8c, 0x78, 0x38, 0x89, 0x89, 0xe1, 0x22, 0xaf, 0x15, 0x80, 0x8e, + 0xf5, 0xf8, 0x69, 0x2c, 0xfb, 0xc9, 0x71, 0x48, 0xdd, 0x45, 0x9e, 0x1d, 0xa8, 0x33, 0x7e, 0x0a, + 0x76, 0xca, 0xb2, 0x7c, 0x21, 0xc2, 0x49, 0x92, 0x73, 0x41, 0x1a, 0x2e, 0xf2, 0x8c, 0xc0, 0xd2, + 0xb1, 0x53, 0x19, 0xc2, 0xcf, 0xa0, 0x25, 0xd2, 0x9c, 0x85, 0xd9, 0x24, 0x11, 0x59, 0x4c, 0x39, + 0x69, 0xba, 0xc8, 0x6b, 0x06, 0xb6, 0x0c, 0x8e, 0xca, 0x18, 0xde, 0x83, 0xad, 0x6c, 0x92, 0xa4, + 0x8c, 0x6c, 0xbb, 0xc8, 0xab, 0x05, 0xfa, 0x82, 0x1d, 0x30, 0xbe, 0xb3, 0x82, 0x6c, 0xb9, 0x86, + 0x57, 0x0f, 0xe4, 0x11, 0xbf, 0x02, 0x93, 0xb3, 0x4c, 0xb0, 0x5b, 0x62, 0xba, 0xc8, 0xb3, 0xba, + 0x7b, 0xf7, 0x5f, 0x37, 0x50, 0xff, 0x82, 0x92, 0xc1, 0x87, 0xd0, 0x48, 0xc3, 0x69, 0xce, 0x79, + 0x41, 0x1c, 0xd7, 0xf8, 0xa7, 0x18, 0x66, 0x7a, 0x2e, 0x59, 0xfc, 0x1e, 0x1a, 0x82, 0xa5, 0x29, + 0x8d, 0x38, 0x01, 0xd7, 0xf0, 0xac, 0x6e, 0x67, 0x73, 0xda, 0x95, 0x86, 0xce, 0xb8, 0x48, 0x8b, + 0x60, 0x95, 0x82, 0x8f, 0x40, 0x5b, 0xdc, 0x0d, 0xa7, 0x11, 0x5b, 0xdc, 0x12, 0x4b, 0x0d, 0xfa, + 0xc8, 0x5f, 0xd9, 0xe9, 0x8f, 0xf2, 0xf1, 0x27, 0x36, 0xa5, 0xf9, 0x42, 0x64, 0x81, 0xa5, 0xd1, + 0x73, 0x49, 0xe2, 0xde, 0x3a, 0xf3, 0x07, 0x5d, 0xe4, 0x8c, 0xb4, 0x54, 0xf3, 0xe7, 0x9b, 0x9b, + 0x0f, 0x15, 0xf9, 0x55, 0x82, 0x7a, 0x80, 0xb2, 0x94, 0x8a, 0xe0, 0x37, 0xd0, 0xa4, 0xbc, 0x10, + 0xf3, 0x88, 0xcf, 0xc8, 0x4e, 0xa9, 0x94, 0x5e, 0x35, 0x7f, 0xb5, 0x6a, 0xfe, 0x31, 0x2f, 0x82, + 0x35, 0x85, 0x0f, 0xc1, 0x8a, 0x29, 0x2f, 0x42, 0x75, 0xcb, 0xc8, 0xae, 0xea, 0xbd, 0x39, 0x09, + 0x24, 0x78, 0xa5, 0xb8, 0xf6, 0x10, 0xec, 0xaa, 0x0c, 0x2b, 0xcb, 0xf4, 0x4e, 0x2a, 0xcb, 0x5e, + 0xc0, 0x96, 0x7e, 0x4e, 0xed, 0x2f, 0x8e, 0x69, 0xe4, 0x5d, 0xed, 0x08, 0xb5, 0xaf, 0xc1, 0x79, + 0xf8, 0xb6, 0x0d, 0x55, 0x5f, 0xde, 0xaf, 0xfa, 0x07, 0x79, 0xef, 0xca, 0x76, 0x3e, 0x82, 0xa9, + 0x6d, 0xc6, 0x16, 0x34, 0xae, 0x07, 0x17, 0x83, 0x2f, 0x37, 0x03, 0xe7, 0x3f, 0xdc, 0x84, 0xfa, + 0xf0, 0x7a, 0x30, 0x72, 0x10, 0x6e, 0xc1, 0xf6, 0xa8, 0x7f, 0x3c, 0x1c, 0x5d, 0xf5, 0x4e, 0x2f, + 0x9c, 0x1a, 0xde, 0x05, 0xeb, 0xa4, 0xd7, 0xef, 0x87, 0x27, 0xc7, 0xbd, 0xfe, 0xd9, 0x37, 0xc7, + 0xe8, 0x3c, 0x01, 0x53, 0x0f, 0x2b, 0x97, 0x75, 0xac, 0x96, 0x4a, 0xcf, 0xa3, 0x2f, 0x9d, 0x9f, + 0x08, 0x76, 0x4a, 0x73, 0x6e, 0x22, 0x31, 0xbf, 0xa4, 0x4b, 0x3c, 0x04, 0x7b, 0x5c, 0x08, 0x16, + 0xc6, 0x74, 0xb9, 0x94, 0x4e, 0x20, 0x25, 0xea, 0xeb, 0x8d, 0x86, 0x96, 0x39, 0xfe, 0x49, 0x21, + 0xd8, 0xa5, 0xe6, 0x4b, 0x5f, 0xc7, 0x77, 0x91, 0xf6, 0x07, 0x70, 0x1e, 0x02, 0x55, 0x71, 0x9a, + 0x5a, 0x9c, 0xbd, 0xaa, 0x38, 0x76, 0x45, 0x85, 0xb1, 0xa9, 0x5b, 0xff, 0x0e, 0x00, 0x00, 0xff, + 0xff, 0x54, 0x4a, 0xfa, 0x41, 0xa1, 0x04, 0x00, 0x00, +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto 2016-05-24 07:05:22.000000000 +0000 @@ -31,6 +31,7 @@ syntax = "proto3"; +import "google/protobuf/any.proto"; import "testdata/test.proto"; package proto3_proto; @@ -53,10 +54,14 @@ repeated uint64 key = 5; Nested nested = 6; + repeated Humour r_funny = 16; map terrain = 10; testdata.SubDefaults proto2_field = 11; map proto2_value = 13; + + google.protobuf.Any anything = 14; + repeated google.protobuf.Any many_things = 15; } message Nested { diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/testdata/test.pb.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/testdata/test.pb.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/testdata/test.pb.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/testdata/test.pb.go 2016-05-24 07:05:22.000000000 +0000 @@ -20,6 +20,7 @@ NewMessage InnerMessage OtherMessage + RequiredInnerMessage MyMessage Ext ComplexExtension @@ -192,7 +193,7 @@ *x = MyMessage_Color(value) return nil } -func (MyMessage_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{11, 0} } +func (MyMessage_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 0} } type DefaultsMessage_DefaultsEnum int32 @@ -230,7 +231,7 @@ return nil } func (DefaultsMessage_DefaultsEnum) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{14, 0} + return fileDescriptor0, []int{15, 0} } type Defaults_Color int32 @@ -268,7 +269,7 @@ *x = Defaults_Color(value) return nil } -func (Defaults_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{19, 0} } +func (Defaults_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{20, 0} } type RepeatedEnum_Color int32 @@ -299,7 +300,7 @@ *x = RepeatedEnum_Color(value) return nil } -func (RepeatedEnum_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{21, 0} } +func (RepeatedEnum_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{22, 0} } type GoEnum struct { Foo *FOO `protobuf:"varint,1,req,name=foo,enum=testdata.FOO" json:"foo,omitempty"` @@ -319,8 +320,8 @@ } type GoTestField struct { - Label *string `protobuf:"bytes,1,req,name=Label" json:"Label,omitempty"` - Type *string `protobuf:"bytes,2,req,name=Type" json:"Type,omitempty"` + Label *string `protobuf:"bytes,1,req,name=Label,json=label" json:"Label,omitempty"` + Type *string `protobuf:"bytes,2,req,name=Type,json=type" json:"Type,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -345,84 +346,84 @@ type GoTest struct { // Some typical parameters - Kind *GoTest_KIND `protobuf:"varint,1,req,name=Kind,enum=testdata.GoTest_KIND" json:"Kind,omitempty"` - Table *string `protobuf:"bytes,2,opt,name=Table" json:"Table,omitempty"` - Param *int32 `protobuf:"varint,3,opt,name=Param" json:"Param,omitempty"` + Kind *GoTest_KIND `protobuf:"varint,1,req,name=Kind,json=kind,enum=testdata.GoTest_KIND" json:"Kind,omitempty"` + Table *string `protobuf:"bytes,2,opt,name=Table,json=table" json:"Table,omitempty"` + Param *int32 `protobuf:"varint,3,opt,name=Param,json=param" json:"Param,omitempty"` // Required, repeated and optional foreign fields. - RequiredField *GoTestField `protobuf:"bytes,4,req,name=RequiredField" json:"RequiredField,omitempty"` - RepeatedField []*GoTestField `protobuf:"bytes,5,rep,name=RepeatedField" json:"RepeatedField,omitempty"` - OptionalField *GoTestField `protobuf:"bytes,6,opt,name=OptionalField" json:"OptionalField,omitempty"` + RequiredField *GoTestField `protobuf:"bytes,4,req,name=RequiredField,json=requiredField" json:"RequiredField,omitempty"` + RepeatedField []*GoTestField `protobuf:"bytes,5,rep,name=RepeatedField,json=repeatedField" json:"RepeatedField,omitempty"` + OptionalField *GoTestField `protobuf:"bytes,6,opt,name=OptionalField,json=optionalField" json:"OptionalField,omitempty"` // Required fields of all basic types - F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required" json:"F_Bool_required,omitempty"` - F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required" json:"F_Int32_required,omitempty"` - F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required" json:"F_Int64_required,omitempty"` - F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required" json:"F_Fixed32_required,omitempty"` - F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required" json:"F_Fixed64_required,omitempty"` - F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required" json:"F_Uint32_required,omitempty"` - F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required" json:"F_Uint64_required,omitempty"` - F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required" json:"F_Float_required,omitempty"` - F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required" json:"F_Double_required,omitempty"` - F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required" json:"F_String_required,omitempty"` - F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required" json:"F_Bytes_required,omitempty"` - F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required" json:"F_Sint32_required,omitempty"` - F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required" json:"F_Sint64_required,omitempty"` + F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required,json=fBoolRequired" json:"F_Bool_required,omitempty"` + F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required,json=fInt32Required" json:"F_Int32_required,omitempty"` + F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required,json=fInt64Required" json:"F_Int64_required,omitempty"` + F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required,json=fFixed32Required" json:"F_Fixed32_required,omitempty"` + F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required,json=fFixed64Required" json:"F_Fixed64_required,omitempty"` + F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required,json=fUint32Required" json:"F_Uint32_required,omitempty"` + F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required,json=fUint64Required" json:"F_Uint64_required,omitempty"` + F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required,json=fFloatRequired" json:"F_Float_required,omitempty"` + F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required,json=fDoubleRequired" json:"F_Double_required,omitempty"` + F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required,json=fStringRequired" json:"F_String_required,omitempty"` + F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required,json=fBytesRequired" json:"F_Bytes_required,omitempty"` + F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required,json=fSint32Required" json:"F_Sint32_required,omitempty"` + F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required,json=fSint64Required" json:"F_Sint64_required,omitempty"` // Repeated fields of all basic types - F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated" json:"F_Bool_repeated,omitempty"` - F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated" json:"F_Int32_repeated,omitempty"` - F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated" json:"F_Int64_repeated,omitempty"` - F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated" json:"F_Fixed32_repeated,omitempty"` - F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated" json:"F_Fixed64_repeated,omitempty"` - F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated" json:"F_Uint32_repeated,omitempty"` - F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated" json:"F_Uint64_repeated,omitempty"` - F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated" json:"F_Float_repeated,omitempty"` - F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated" json:"F_Double_repeated,omitempty"` - F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated" json:"F_String_repeated,omitempty"` - F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated" json:"F_Bytes_repeated,omitempty"` - F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated" json:"F_Sint32_repeated,omitempty"` - F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated" json:"F_Sint64_repeated,omitempty"` + F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated,json=fBoolRepeated" json:"F_Bool_repeated,omitempty"` + F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated,json=fInt32Repeated" json:"F_Int32_repeated,omitempty"` + F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated,json=fInt64Repeated" json:"F_Int64_repeated,omitempty"` + F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated,json=fFixed32Repeated" json:"F_Fixed32_repeated,omitempty"` + F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated,json=fFixed64Repeated" json:"F_Fixed64_repeated,omitempty"` + F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated,json=fUint32Repeated" json:"F_Uint32_repeated,omitempty"` + F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated,json=fUint64Repeated" json:"F_Uint64_repeated,omitempty"` + F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated,json=fFloatRepeated" json:"F_Float_repeated,omitempty"` + F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated,json=fDoubleRepeated" json:"F_Double_repeated,omitempty"` + F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated,json=fStringRepeated" json:"F_String_repeated,omitempty"` + F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated,json=fBytesRepeated" json:"F_Bytes_repeated,omitempty"` + F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated,json=fSint32Repeated" json:"F_Sint32_repeated,omitempty"` + F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated,json=fSint64Repeated" json:"F_Sint64_repeated,omitempty"` // Optional fields of all basic types - F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional" json:"F_Bool_optional,omitempty"` - F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional" json:"F_Int32_optional,omitempty"` - F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional" json:"F_Int64_optional,omitempty"` - F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional" json:"F_Fixed32_optional,omitempty"` - F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional" json:"F_Fixed64_optional,omitempty"` - F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional" json:"F_Uint32_optional,omitempty"` - F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional" json:"F_Uint64_optional,omitempty"` - F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional" json:"F_Float_optional,omitempty"` - F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional" json:"F_Double_optional,omitempty"` - F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional" json:"F_String_optional,omitempty"` - F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional" json:"F_Bytes_optional,omitempty"` - F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional" json:"F_Sint32_optional,omitempty"` - F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional" json:"F_Sint64_optional,omitempty"` + F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional,json=fBoolOptional" json:"F_Bool_optional,omitempty"` + F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional,json=fInt32Optional" json:"F_Int32_optional,omitempty"` + F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional,json=fInt64Optional" json:"F_Int64_optional,omitempty"` + F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional,json=fFixed32Optional" json:"F_Fixed32_optional,omitempty"` + F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional,json=fFixed64Optional" json:"F_Fixed64_optional,omitempty"` + F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional,json=fUint32Optional" json:"F_Uint32_optional,omitempty"` + F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional,json=fUint64Optional" json:"F_Uint64_optional,omitempty"` + F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional,json=fFloatOptional" json:"F_Float_optional,omitempty"` + F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional,json=fDoubleOptional" json:"F_Double_optional,omitempty"` + F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional,json=fStringOptional" json:"F_String_optional,omitempty"` + F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional,json=fBytesOptional" json:"F_Bytes_optional,omitempty"` + F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional,json=fSint32Optional" json:"F_Sint32_optional,omitempty"` + F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional,json=fSint64Optional" json:"F_Sint64_optional,omitempty"` // Default-valued fields of all basic types - F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,def=1" json:"F_Bool_defaulted,omitempty"` - F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,def=32" json:"F_Int32_defaulted,omitempty"` - F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,def=64" json:"F_Int64_defaulted,omitempty"` - F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"` - F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"` - F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"` - F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"` - F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,def=314159" json:"F_Float_defaulted,omitempty"` - F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,def=271828" json:"F_Double_defaulted,omitempty"` - F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"` - F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"` - F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"` - F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"` + F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,json=fBoolDefaulted,def=1" json:"F_Bool_defaulted,omitempty"` + F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,json=fInt32Defaulted,def=32" json:"F_Int32_defaulted,omitempty"` + F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,json=fInt64Defaulted,def=64" json:"F_Int64_defaulted,omitempty"` + F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,json=fFixed32Defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"` + F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,json=fFixed64Defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"` + F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,json=fUint32Defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"` + F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,json=fUint64Defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"` + F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,json=fFloatDefaulted,def=314159" json:"F_Float_defaulted,omitempty"` + F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,json=fDoubleDefaulted,def=271828" json:"F_Double_defaulted,omitempty"` + F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,json=fStringDefaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"` + F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,json=fBytesDefaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"` + F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,json=fSint32Defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"` + F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,json=fSint64Defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"` // Packed repeated fields (no string or bytes). - F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed" json:"F_Bool_repeated_packed,omitempty"` - F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed" json:"F_Int32_repeated_packed,omitempty"` - F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed" json:"F_Int64_repeated_packed,omitempty"` - F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed" json:"F_Fixed32_repeated_packed,omitempty"` - F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed" json:"F_Fixed64_repeated_packed,omitempty"` - F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed" json:"F_Uint32_repeated_packed,omitempty"` - F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed" json:"F_Uint64_repeated_packed,omitempty"` - F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed" json:"F_Float_repeated_packed,omitempty"` - F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed" json:"F_Double_repeated_packed,omitempty"` - F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed" json:"F_Sint32_repeated_packed,omitempty"` - F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed" json:"F_Sint64_repeated_packed,omitempty"` - Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup" json:"requiredgroup,omitempty"` - Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup" json:"repeatedgroup,omitempty"` - Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed,json=fBoolRepeatedPacked" json:"F_Bool_repeated_packed,omitempty"` + F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed,json=fInt32RepeatedPacked" json:"F_Int32_repeated_packed,omitempty"` + F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed,json=fInt64RepeatedPacked" json:"F_Int64_repeated_packed,omitempty"` + F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed,json=fFixed32RepeatedPacked" json:"F_Fixed32_repeated_packed,omitempty"` + F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed,json=fFixed64RepeatedPacked" json:"F_Fixed64_repeated_packed,omitempty"` + F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed,json=fUint32RepeatedPacked" json:"F_Uint32_repeated_packed,omitempty"` + F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed,json=fUint64RepeatedPacked" json:"F_Uint64_repeated_packed,omitempty"` + F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed,json=fFloatRepeatedPacked" json:"F_Float_repeated_packed,omitempty"` + F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed,json=fDoubleRepeatedPacked" json:"F_Double_repeated_packed,omitempty"` + F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed,json=fSint32RepeatedPacked" json:"F_Sint32_repeated_packed,omitempty"` + F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed,json=fSint64RepeatedPacked" json:"F_Sint64_repeated_packed,omitempty"` + Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup,json=requiredgroup" json:"requiredgroup,omitempty"` + Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup,json=repeatedgroup" json:"repeatedgroup,omitempty"` + Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup,json=optionalgroup" json:"optionalgroup,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -953,13 +954,14 @@ // Required, repeated, and optional groups. type GoTest_RequiredGroup struct { - RequiredField *string `protobuf:"bytes,71,req,name=RequiredField" json:"RequiredField,omitempty"` + RequiredField *string `protobuf:"bytes,71,req,name=RequiredField,json=requiredField" json:"RequiredField,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} } -func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_RequiredGroup) ProtoMessage() {} +func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} } +func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_RequiredGroup) ProtoMessage() {} +func (*GoTest_RequiredGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } func (m *GoTest_RequiredGroup) GetRequiredField() string { if m != nil && m.RequiredField != nil { @@ -969,13 +971,14 @@ } type GoTest_RepeatedGroup struct { - RequiredField *string `protobuf:"bytes,81,req,name=RequiredField" json:"RequiredField,omitempty"` + RequiredField *string `protobuf:"bytes,81,req,name=RequiredField,json=requiredField" json:"RequiredField,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} } -func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_RepeatedGroup) ProtoMessage() {} +func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} } +func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_RepeatedGroup) ProtoMessage() {} +func (*GoTest_RepeatedGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 1} } func (m *GoTest_RepeatedGroup) GetRequiredField() string { if m != nil && m.RequiredField != nil { @@ -985,13 +988,14 @@ } type GoTest_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,91,req,name=RequiredField" json:"RequiredField,omitempty"` + RequiredField *string `protobuf:"bytes,91,req,name=RequiredField,json=requiredField" json:"RequiredField,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} } -func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_OptionalGroup) ProtoMessage() {} +func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} } +func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_OptionalGroup) ProtoMessage() {} +func (*GoTest_OptionalGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 2} } func (m *GoTest_OptionalGroup) GetRequiredField() string { if m != nil && m.RequiredField != nil { @@ -1004,11 +1008,11 @@ // Numbers are all big, larger than tag numbers in GoTestField, // the message used in the corresponding test. type GoSkipTest struct { - SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32" json:"skip_int32,omitempty"` - SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32" json:"skip_fixed32,omitempty"` - SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64" json:"skip_fixed64,omitempty"` - SkipString *string `protobuf:"bytes,14,req,name=skip_string" json:"skip_string,omitempty"` - Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup" json:"skipgroup,omitempty"` + SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32,json=skipInt32" json:"skip_int32,omitempty"` + SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32,json=skipFixed32" json:"skip_fixed32,omitempty"` + SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64,json=skipFixed64" json:"skip_fixed64,omitempty"` + SkipString *string `protobuf:"bytes,14,req,name=skip_string,json=skipString" json:"skip_string,omitempty"` + Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup,json=skipgroup" json:"skipgroup,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -1053,14 +1057,15 @@ } type GoSkipTest_SkipGroup struct { - GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32" json:"group_int32,omitempty"` - GroupString *string `protobuf:"bytes,17,req,name=group_string" json:"group_string,omitempty"` + GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32,json=groupInt32" json:"group_int32,omitempty"` + GroupString *string `protobuf:"bytes,17,req,name=group_string,json=groupString" json:"group_string,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} } -func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) } -func (*GoSkipTest_SkipGroup) ProtoMessage() {} +func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} } +func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest_SkipGroup) ProtoMessage() {} +func (*GoSkipTest_SkipGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} } func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 { if m != nil && m.GroupInt32 != nil { @@ -1114,7 +1119,7 @@ type MaxTag struct { // Maximum possible tag number. - LastField *string `protobuf:"bytes,536870911,opt,name=last_field" json:"last_field,omitempty"` + LastField *string `protobuf:"bytes,536870911,opt,name=last_field,json=lastField" json:"last_field,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -1202,7 +1207,7 @@ type NewMessage_Nested struct { Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - FoodGroup *string `protobuf:"bytes,2,opt,name=food_group" json:"food_group,omitempty"` + FoodGroup *string `protobuf:"bytes,2,opt,name=food_group,json=foodGroup" json:"food_group,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -1316,18 +1321,36 @@ return nil } +type RequiredInnerMessage struct { + LeoFinallyWonAnOscar *InnerMessage `protobuf:"bytes,1,req,name=leo_finally_won_an_oscar,json=leoFinallyWonAnOscar" json:"leo_finally_won_an_oscar,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RequiredInnerMessage) Reset() { *m = RequiredInnerMessage{} } +func (m *RequiredInnerMessage) String() string { return proto.CompactTextString(m) } +func (*RequiredInnerMessage) ProtoMessage() {} +func (*RequiredInnerMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *RequiredInnerMessage) GetLeoFinallyWonAnOscar() *InnerMessage { + if m != nil { + return m.LeoFinallyWonAnOscar + } + return nil +} + type MyMessage struct { - Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"` - Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"` - Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"` - Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"` - Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"` - RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner" json:"rep_inner,omitempty"` - Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"` - Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup" json:"somegroup,omitempty"` + Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"` + Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"` + Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"` + Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"` + Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"` + WeMustGoDeeper *RequiredInnerMessage `protobuf:"bytes,13,opt,name=we_must_go_deeper,json=weMustGoDeeper" json:"we_must_go_deeper,omitempty"` + RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner,json=repInner" json:"rep_inner,omitempty"` + Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"` + Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"` // This field becomes [][]byte in the generated code. - RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes" json:"rep_bytes,omitempty"` + RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes,json=repBytes" json:"rep_bytes,omitempty"` Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"` XXX_extensions map[int32]proto.Extension `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -1336,7 +1359,7 @@ func (m *MyMessage) Reset() { *m = MyMessage{} } func (m *MyMessage) String() string { return proto.CompactTextString(m) } func (*MyMessage) ProtoMessage() {} -func (*MyMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (*MyMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } var extRange_MyMessage = []proto.ExtensionRange{ {100, 536870911}, @@ -1394,6 +1417,13 @@ return nil } +func (m *MyMessage) GetWeMustGoDeeper() *RequiredInnerMessage { + if m != nil { + return m.WeMustGoDeeper + } + return nil +} + func (m *MyMessage) GetRepInner() []*InnerMessage { if m != nil { return m.RepInner @@ -1430,13 +1460,14 @@ } type MyMessage_SomeGroup struct { - GroupField *int32 `protobuf:"varint,9,opt,name=group_field" json:"group_field,omitempty"` + GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} } -func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) } -func (*MyMessage_SomeGroup) ProtoMessage() {} +func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} } +func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*MyMessage_SomeGroup) ProtoMessage() {} +func (*MyMessage_SomeGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 0} } func (m *MyMessage_SomeGroup) GetGroupField() int32 { if m != nil && m.GroupField != nil { @@ -1453,7 +1484,7 @@ func (m *Ext) Reset() { *m = Ext{} } func (m *Ext) String() string { return proto.CompactTextString(m) } func (*Ext) ProtoMessage() {} -func (*Ext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (*Ext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } func (m *Ext) GetData() string { if m != nil && m.Data != nil { @@ -1496,7 +1527,7 @@ func (m *ComplexExtension) Reset() { *m = ComplexExtension{} } func (m *ComplexExtension) String() string { return proto.CompactTextString(m) } func (*ComplexExtension) ProtoMessage() {} -func (*ComplexExtension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (*ComplexExtension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } func (m *ComplexExtension) GetFirst() int32 { if m != nil && m.First != nil { @@ -1527,7 +1558,7 @@ func (m *DefaultsMessage) Reset() { *m = DefaultsMessage{} } func (m *DefaultsMessage) String() string { return proto.CompactTextString(m) } func (*DefaultsMessage) ProtoMessage() {} -func (*DefaultsMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (*DefaultsMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } var extRange_DefaultsMessage = []proto.ExtensionRange{ {100, 536870911}, @@ -1551,7 +1582,7 @@ func (m *MyMessageSet) Reset() { *m = MyMessageSet{} } func (m *MyMessageSet) String() string { return proto.CompactTextString(m) } func (*MyMessageSet) ProtoMessage() {} -func (*MyMessageSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (*MyMessageSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } func (m *MyMessageSet) Marshal() ([]byte, error) { return proto.MarshalMessageSet(m.ExtensionMap()) @@ -1591,17 +1622,17 @@ func (m *Empty) Reset() { *m = Empty{} } func (m *Empty) String() string { return proto.CompactTextString(m) } func (*Empty) ProtoMessage() {} -func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } type MessageList struct { - Message []*MessageList_Message `protobuf:"group,1,rep,name=Message" json:"message,omitempty"` + Message []*MessageList_Message `protobuf:"group,1,rep,name=Message,json=message" json:"message,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *MessageList) Reset() { *m = MessageList{} } func (m *MessageList) String() string { return proto.CompactTextString(m) } func (*MessageList) ProtoMessage() {} -func (*MessageList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } +func (*MessageList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } func (m *MessageList) GetMessage() []*MessageList_Message { if m != nil { @@ -1616,9 +1647,10 @@ XXX_unrecognized []byte `json:"-"` } -func (m *MessageList_Message) Reset() { *m = MessageList_Message{} } -func (m *MessageList_Message) String() string { return proto.CompactTextString(m) } -func (*MessageList_Message) ProtoMessage() {} +func (m *MessageList_Message) Reset() { *m = MessageList_Message{} } +func (m *MessageList_Message) String() string { return proto.CompactTextString(m) } +func (*MessageList_Message) ProtoMessage() {} +func (*MessageList_Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18, 0} } func (m *MessageList_Message) GetName() string { if m != nil && m.Name != nil { @@ -1635,15 +1667,15 @@ } type Strings struct { - StringField *string `protobuf:"bytes,1,opt,name=string_field" json:"string_field,omitempty"` - BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field" json:"bytes_field,omitempty"` + StringField *string `protobuf:"bytes,1,opt,name=string_field,json=stringField" json:"string_field,omitempty"` + BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field,json=bytesField" json:"bytes_field,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Strings) Reset() { *m = Strings{} } func (m *Strings) String() string { return proto.CompactTextString(m) } func (*Strings) ProtoMessage() {} -func (*Strings) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (*Strings) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } func (m *Strings) GetStringField() string { if m != nil && m.StringField != nil { @@ -1662,35 +1694,35 @@ type Defaults struct { // Default-valued fields of all basic types. // Same as GoTest, but copied here to make testing easier. - F_Bool *bool `protobuf:"varint,1,opt,name=F_Bool,def=1" json:"F_Bool,omitempty"` - F_Int32 *int32 `protobuf:"varint,2,opt,name=F_Int32,def=32" json:"F_Int32,omitempty"` - F_Int64 *int64 `protobuf:"varint,3,opt,name=F_Int64,def=64" json:"F_Int64,omitempty"` - F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,def=320" json:"F_Fixed32,omitempty"` - F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,def=640" json:"F_Fixed64,omitempty"` - F_Uint32 *uint32 `protobuf:"varint,6,opt,name=F_Uint32,def=3200" json:"F_Uint32,omitempty"` - F_Uint64 *uint64 `protobuf:"varint,7,opt,name=F_Uint64,def=6400" json:"F_Uint64,omitempty"` - F_Float *float32 `protobuf:"fixed32,8,opt,name=F_Float,def=314159" json:"F_Float,omitempty"` - F_Double *float64 `protobuf:"fixed64,9,opt,name=F_Double,def=271828" json:"F_Double,omitempty"` - F_String *string `protobuf:"bytes,10,opt,name=F_String,def=hello, \"world!\"\n" json:"F_String,omitempty"` - F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,def=Bignose" json:"F_Bytes,omitempty"` - F_Sint32 *int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,def=-32" json:"F_Sint32,omitempty"` - F_Sint64 *int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,def=-64" json:"F_Sint64,omitempty"` - F_Enum *Defaults_Color `protobuf:"varint,14,opt,name=F_Enum,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"` + F_Bool *bool `protobuf:"varint,1,opt,name=F_Bool,json=fBool,def=1" json:"F_Bool,omitempty"` + F_Int32 *int32 `protobuf:"varint,2,opt,name=F_Int32,json=fInt32,def=32" json:"F_Int32,omitempty"` + F_Int64 *int64 `protobuf:"varint,3,opt,name=F_Int64,json=fInt64,def=64" json:"F_Int64,omitempty"` + F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=fFixed32,def=320" json:"F_Fixed32,omitempty"` + F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=fFixed64,def=640" json:"F_Fixed64,omitempty"` + F_Uint32 *uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=fUint32,def=3200" json:"F_Uint32,omitempty"` + F_Uint64 *uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=fUint64,def=6400" json:"F_Uint64,omitempty"` + F_Float *float32 `protobuf:"fixed32,8,opt,name=F_Float,json=fFloat,def=314159" json:"F_Float,omitempty"` + F_Double *float64 `protobuf:"fixed64,9,opt,name=F_Double,json=fDouble,def=271828" json:"F_Double,omitempty"` + F_String *string `protobuf:"bytes,10,opt,name=F_String,json=fString,def=hello, \"world!\"\n" json:"F_String,omitempty"` + F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=fBytes,def=Bignose" json:"F_Bytes,omitempty"` + F_Sint32 *int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=fSint32,def=-32" json:"F_Sint32,omitempty"` + F_Sint64 *int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=fSint64,def=-64" json:"F_Sint64,omitempty"` + F_Enum *Defaults_Color `protobuf:"varint,14,opt,name=F_Enum,json=fEnum,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"` // More fields with crazy defaults. - F_Pinf *float32 `protobuf:"fixed32,15,opt,name=F_Pinf,def=inf" json:"F_Pinf,omitempty"` - F_Ninf *float32 `protobuf:"fixed32,16,opt,name=F_Ninf,def=-inf" json:"F_Ninf,omitempty"` - F_Nan *float32 `protobuf:"fixed32,17,opt,name=F_Nan,def=nan" json:"F_Nan,omitempty"` + F_Pinf *float32 `protobuf:"fixed32,15,opt,name=F_Pinf,json=fPinf,def=inf" json:"F_Pinf,omitempty"` + F_Ninf *float32 `protobuf:"fixed32,16,opt,name=F_Ninf,json=fNinf,def=-inf" json:"F_Ninf,omitempty"` + F_Nan *float32 `protobuf:"fixed32,17,opt,name=F_Nan,json=fNan,def=nan" json:"F_Nan,omitempty"` // Sub-message. Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"` // Redundant but explicit defaults. - StrZero *string `protobuf:"bytes,19,opt,name=str_zero,def=" json:"str_zero,omitempty"` + StrZero *string `protobuf:"bytes,19,opt,name=str_zero,json=strZero,def=" json:"str_zero,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Defaults) Reset() { *m = Defaults{} } func (m *Defaults) String() string { return proto.CompactTextString(m) } func (*Defaults) ProtoMessage() {} -func (*Defaults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } +func (*Defaults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } const Default_Defaults_F_Bool bool = true const Default_Defaults_F_Int32 int32 = 32 @@ -1854,7 +1886,7 @@ func (m *SubDefaults) Reset() { *m = SubDefaults{} } func (m *SubDefaults) String() string { return proto.CompactTextString(m) } func (*SubDefaults) ProtoMessage() {} -func (*SubDefaults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } +func (*SubDefaults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } const Default_SubDefaults_N int64 = 7 @@ -1873,7 +1905,7 @@ func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} } func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) } func (*RepeatedEnum) ProtoMessage() {} -func (*RepeatedEnum) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } +func (*RepeatedEnum) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color { if m != nil { @@ -1884,10 +1916,10 @@ type MoreRepeated struct { Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"` - BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed" json:"bools_packed,omitempty"` + BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed,json=boolsPacked" json:"bools_packed,omitempty"` Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"` - IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed" json:"ints_packed,omitempty"` - Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed" json:"int64s_packed,omitempty"` + IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed,json=intsPacked" json:"ints_packed,omitempty"` + Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed,json=int64sPacked" json:"int64s_packed,omitempty"` Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"` Fixeds []uint32 `protobuf:"fixed32,6,rep,name=fixeds" json:"fixeds,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -1896,7 +1928,7 @@ func (m *MoreRepeated) Reset() { *m = MoreRepeated{} } func (m *MoreRepeated) String() string { return proto.CompactTextString(m) } func (*MoreRepeated) ProtoMessage() {} -func (*MoreRepeated) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } +func (*MoreRepeated) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } func (m *MoreRepeated) GetBools() []bool { if m != nil { @@ -1948,14 +1980,14 @@ } type GroupOld struct { - G *GroupOld_G `protobuf:"group,101,opt,name=G" json:"g,omitempty"` + G *GroupOld_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GroupOld) Reset() { *m = GroupOld{} } func (m *GroupOld) String() string { return proto.CompactTextString(m) } func (*GroupOld) ProtoMessage() {} -func (*GroupOld) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } +func (*GroupOld) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } func (m *GroupOld) GetG() *GroupOld_G { if m != nil { @@ -1969,9 +2001,10 @@ XXX_unrecognized []byte `json:"-"` } -func (m *GroupOld_G) Reset() { *m = GroupOld_G{} } -func (m *GroupOld_G) String() string { return proto.CompactTextString(m) } -func (*GroupOld_G) ProtoMessage() {} +func (m *GroupOld_G) Reset() { *m = GroupOld_G{} } +func (m *GroupOld_G) String() string { return proto.CompactTextString(m) } +func (*GroupOld_G) ProtoMessage() {} +func (*GroupOld_G) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24, 0} } func (m *GroupOld_G) GetX() int32 { if m != nil && m.X != nil { @@ -1981,14 +2014,14 @@ } type GroupNew struct { - G *GroupNew_G `protobuf:"group,101,opt,name=G" json:"g,omitempty"` + G *GroupNew_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *GroupNew) Reset() { *m = GroupNew{} } func (m *GroupNew) String() string { return proto.CompactTextString(m) } func (*GroupNew) ProtoMessage() {} -func (*GroupNew) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } +func (*GroupNew) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } func (m *GroupNew) GetG() *GroupNew_G { if m != nil { @@ -2003,9 +2036,10 @@ XXX_unrecognized []byte `json:"-"` } -func (m *GroupNew_G) Reset() { *m = GroupNew_G{} } -func (m *GroupNew_G) String() string { return proto.CompactTextString(m) } -func (*GroupNew_G) ProtoMessage() {} +func (m *GroupNew_G) Reset() { *m = GroupNew_G{} } +func (m *GroupNew_G) String() string { return proto.CompactTextString(m) } +func (*GroupNew_G) ProtoMessage() {} +func (*GroupNew_G) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25, 0} } func (m *GroupNew_G) GetX() int32 { if m != nil && m.X != nil { @@ -2029,7 +2063,7 @@ func (m *FloatingPoint) Reset() { *m = FloatingPoint{} } func (m *FloatingPoint) String() string { return proto.CompactTextString(m) } func (*FloatingPoint) ProtoMessage() {} -func (*FloatingPoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } +func (*FloatingPoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } func (m *FloatingPoint) GetF() float64 { if m != nil && m.F != nil { @@ -2039,17 +2073,17 @@ } type MessageWithMap struct { - NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - StrToStr map[string]string `protobuf:"bytes,4,rep,name=str_to_str" json:"str_to_str,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + StrToStr map[string]string `protobuf:"bytes,4,rep,name=str_to_str,json=strToStr" json:"str_to_str,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` XXX_unrecognized []byte `json:"-"` } func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } func (*MessageWithMap) ProtoMessage() {} -func (*MessageWithMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } +func (*MessageWithMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } func (m *MessageWithMap) GetNameMapping() map[int32]string { if m != nil { @@ -2108,7 +2142,7 @@ func (m *Oneof) Reset() { *m = Oneof{} } func (m *Oneof) String() string { return proto.CompactTextString(m) } func (*Oneof) ProtoMessage() {} -func (*Oneof) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } +func (*Oneof) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } type isOneof_Union interface { isOneof_Union() @@ -2118,55 +2152,55 @@ } type Oneof_F_Bool struct { - F_Bool bool `protobuf:"varint,1,opt,name=F_Bool,oneof"` + F_Bool bool `protobuf:"varint,1,opt,name=F_Bool,json=fBool,oneof"` } type Oneof_F_Int32 struct { - F_Int32 int32 `protobuf:"varint,2,opt,name=F_Int32,oneof"` + F_Int32 int32 `protobuf:"varint,2,opt,name=F_Int32,json=fInt32,oneof"` } type Oneof_F_Int64 struct { - F_Int64 int64 `protobuf:"varint,3,opt,name=F_Int64,oneof"` + F_Int64 int64 `protobuf:"varint,3,opt,name=F_Int64,json=fInt64,oneof"` } type Oneof_F_Fixed32 struct { - F_Fixed32 uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,oneof"` + F_Fixed32 uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=fFixed32,oneof"` } type Oneof_F_Fixed64 struct { - F_Fixed64 uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,oneof"` + F_Fixed64 uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=fFixed64,oneof"` } type Oneof_F_Uint32 struct { - F_Uint32 uint32 `protobuf:"varint,6,opt,name=F_Uint32,oneof"` + F_Uint32 uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=fUint32,oneof"` } type Oneof_F_Uint64 struct { - F_Uint64 uint64 `protobuf:"varint,7,opt,name=F_Uint64,oneof"` + F_Uint64 uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=fUint64,oneof"` } type Oneof_F_Float struct { - F_Float float32 `protobuf:"fixed32,8,opt,name=F_Float,oneof"` + F_Float float32 `protobuf:"fixed32,8,opt,name=F_Float,json=fFloat,oneof"` } type Oneof_F_Double struct { - F_Double float64 `protobuf:"fixed64,9,opt,name=F_Double,oneof"` + F_Double float64 `protobuf:"fixed64,9,opt,name=F_Double,json=fDouble,oneof"` } type Oneof_F_String struct { - F_String string `protobuf:"bytes,10,opt,name=F_String,oneof"` + F_String string `protobuf:"bytes,10,opt,name=F_String,json=fString,oneof"` } type Oneof_F_Bytes struct { - F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,oneof"` + F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=fBytes,oneof"` } type Oneof_F_Sint32 struct { - F_Sint32 int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,oneof"` + F_Sint32 int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=fSint32,oneof"` } type Oneof_F_Sint64 struct { - F_Sint64 int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,oneof"` + F_Sint64 int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=fSint64,oneof"` } type Oneof_F_Enum struct { - F_Enum MyMessage_Color `protobuf:"varint,14,opt,name=F_Enum,enum=testdata.MyMessage_Color,oneof"` + F_Enum MyMessage_Color `protobuf:"varint,14,opt,name=F_Enum,json=fEnum,enum=testdata.MyMessage_Color,oneof"` } type Oneof_F_Message struct { - F_Message *GoTestField `protobuf:"bytes,15,opt,name=F_Message,oneof"` + F_Message *GoTestField `protobuf:"bytes,15,opt,name=F_Message,json=fMessage,oneof"` } type Oneof_FGroup struct { - FGroup *Oneof_F_Group `protobuf:"group,16,opt,name=F_Group,oneof"` + FGroup *Oneof_F_Group `protobuf:"group,16,opt,name=F_Group,json=fGroup,oneof"` } type Oneof_F_Largest_Tag struct { - F_Largest_Tag int32 `protobuf:"varint,536870911,opt,name=F_Largest_Tag,oneof"` + F_Largest_Tag int32 `protobuf:"varint,536870911,opt,name=F_Largest_Tag,json=fLargestTag,oneof"` } type Oneof_Value struct { Value int32 `protobuf:"varint,100,opt,name=value,oneof"` @@ -2651,9 +2685,10 @@ XXX_unrecognized []byte `json:"-"` } -func (m *Oneof_F_Group) Reset() { *m = Oneof_F_Group{} } -func (m *Oneof_F_Group) String() string { return proto.CompactTextString(m) } -func (*Oneof_F_Group) ProtoMessage() {} +func (m *Oneof_F_Group) Reset() { *m = Oneof_F_Group{} } +func (m *Oneof_F_Group) String() string { return proto.CompactTextString(m) } +func (*Oneof_F_Group) ProtoMessage() {} +func (*Oneof_F_Group) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28, 0} } func (m *Oneof_F_Group) GetX() int32 { if m != nil && m.X != nil { @@ -2663,7 +2698,7 @@ } type Communique struct { - MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry" json:"make_me_cry,omitempty"` + MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"` // This is a oneof, called "union". // // Types that are valid to be assigned to Union: @@ -2680,7 +2715,7 @@ func (m *Communique) Reset() { *m = Communique{} } func (m *Communique) String() string { return proto.CompactTextString(m) } func (*Communique) ProtoMessage() {} -func (*Communique) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } +func (*Communique) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } type isCommunique_Union interface { isCommunique_Union() @@ -2696,7 +2731,7 @@ Data []byte `protobuf:"bytes,7,opt,name=data,oneof"` } type Communique_TempC struct { - TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,oneof"` + TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"` } type Communique_Col struct { Col MyMessage_Color `protobuf:"varint,9,opt,name=col,enum=testdata.MyMessage_Color,oneof"` @@ -2916,7 +2951,7 @@ ExtensionType: ([]*ComplexExtension)(nil), Field: 201, Name: "testdata.r_complex", - Tag: "bytes,201,rep,name=r_complex", + Tag: "bytes,201,rep,name=r_complex,json=rComplex", } var E_NoDefaultDouble = &proto.ExtensionDesc{ @@ -2924,7 +2959,7 @@ ExtensionType: (*float64)(nil), Field: 101, Name: "testdata.no_default_double", - Tag: "fixed64,101,opt,name=no_default_double", + Tag: "fixed64,101,opt,name=no_default_double,json=noDefaultDouble", } var E_NoDefaultFloat = &proto.ExtensionDesc{ @@ -2932,7 +2967,7 @@ ExtensionType: (*float32)(nil), Field: 102, Name: "testdata.no_default_float", - Tag: "fixed32,102,opt,name=no_default_float", + Tag: "fixed32,102,opt,name=no_default_float,json=noDefaultFloat", } var E_NoDefaultInt32 = &proto.ExtensionDesc{ @@ -2940,7 +2975,7 @@ ExtensionType: (*int32)(nil), Field: 103, Name: "testdata.no_default_int32", - Tag: "varint,103,opt,name=no_default_int32", + Tag: "varint,103,opt,name=no_default_int32,json=noDefaultInt32", } var E_NoDefaultInt64 = &proto.ExtensionDesc{ @@ -2948,7 +2983,7 @@ ExtensionType: (*int64)(nil), Field: 104, Name: "testdata.no_default_int64", - Tag: "varint,104,opt,name=no_default_int64", + Tag: "varint,104,opt,name=no_default_int64,json=noDefaultInt64", } var E_NoDefaultUint32 = &proto.ExtensionDesc{ @@ -2956,7 +2991,7 @@ ExtensionType: (*uint32)(nil), Field: 105, Name: "testdata.no_default_uint32", - Tag: "varint,105,opt,name=no_default_uint32", + Tag: "varint,105,opt,name=no_default_uint32,json=noDefaultUint32", } var E_NoDefaultUint64 = &proto.ExtensionDesc{ @@ -2964,7 +2999,7 @@ ExtensionType: (*uint64)(nil), Field: 106, Name: "testdata.no_default_uint64", - Tag: "varint,106,opt,name=no_default_uint64", + Tag: "varint,106,opt,name=no_default_uint64,json=noDefaultUint64", } var E_NoDefaultSint32 = &proto.ExtensionDesc{ @@ -2972,7 +3007,7 @@ ExtensionType: (*int32)(nil), Field: 107, Name: "testdata.no_default_sint32", - Tag: "zigzag32,107,opt,name=no_default_sint32", + Tag: "zigzag32,107,opt,name=no_default_sint32,json=noDefaultSint32", } var E_NoDefaultSint64 = &proto.ExtensionDesc{ @@ -2980,7 +3015,7 @@ ExtensionType: (*int64)(nil), Field: 108, Name: "testdata.no_default_sint64", - Tag: "zigzag64,108,opt,name=no_default_sint64", + Tag: "zigzag64,108,opt,name=no_default_sint64,json=noDefaultSint64", } var E_NoDefaultFixed32 = &proto.ExtensionDesc{ @@ -2988,7 +3023,7 @@ ExtensionType: (*uint32)(nil), Field: 109, Name: "testdata.no_default_fixed32", - Tag: "fixed32,109,opt,name=no_default_fixed32", + Tag: "fixed32,109,opt,name=no_default_fixed32,json=noDefaultFixed32", } var E_NoDefaultFixed64 = &proto.ExtensionDesc{ @@ -2996,7 +3031,7 @@ ExtensionType: (*uint64)(nil), Field: 110, Name: "testdata.no_default_fixed64", - Tag: "fixed64,110,opt,name=no_default_fixed64", + Tag: "fixed64,110,opt,name=no_default_fixed64,json=noDefaultFixed64", } var E_NoDefaultSfixed32 = &proto.ExtensionDesc{ @@ -3004,7 +3039,7 @@ ExtensionType: (*int32)(nil), Field: 111, Name: "testdata.no_default_sfixed32", - Tag: "fixed32,111,opt,name=no_default_sfixed32", + Tag: "fixed32,111,opt,name=no_default_sfixed32,json=noDefaultSfixed32", } var E_NoDefaultSfixed64 = &proto.ExtensionDesc{ @@ -3012,7 +3047,7 @@ ExtensionType: (*int64)(nil), Field: 112, Name: "testdata.no_default_sfixed64", - Tag: "fixed64,112,opt,name=no_default_sfixed64", + Tag: "fixed64,112,opt,name=no_default_sfixed64,json=noDefaultSfixed64", } var E_NoDefaultBool = &proto.ExtensionDesc{ @@ -3020,7 +3055,7 @@ ExtensionType: (*bool)(nil), Field: 113, Name: "testdata.no_default_bool", - Tag: "varint,113,opt,name=no_default_bool", + Tag: "varint,113,opt,name=no_default_bool,json=noDefaultBool", } var E_NoDefaultString = &proto.ExtensionDesc{ @@ -3028,7 +3063,7 @@ ExtensionType: (*string)(nil), Field: 114, Name: "testdata.no_default_string", - Tag: "bytes,114,opt,name=no_default_string", + Tag: "bytes,114,opt,name=no_default_string,json=noDefaultString", } var E_NoDefaultBytes = &proto.ExtensionDesc{ @@ -3036,7 +3071,7 @@ ExtensionType: ([]byte)(nil), Field: 115, Name: "testdata.no_default_bytes", - Tag: "bytes,115,opt,name=no_default_bytes", + Tag: "bytes,115,opt,name=no_default_bytes,json=noDefaultBytes", } var E_NoDefaultEnum = &proto.ExtensionDesc{ @@ -3044,7 +3079,7 @@ ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), Field: 116, Name: "testdata.no_default_enum", - Tag: "varint,116,opt,name=no_default_enum,enum=testdata.DefaultsMessage_DefaultsEnum", + Tag: "varint,116,opt,name=no_default_enum,json=noDefaultEnum,enum=testdata.DefaultsMessage_DefaultsEnum", } var E_DefaultDouble = &proto.ExtensionDesc{ @@ -3052,7 +3087,7 @@ ExtensionType: (*float64)(nil), Field: 201, Name: "testdata.default_double", - Tag: "fixed64,201,opt,name=default_double,def=3.1415", + Tag: "fixed64,201,opt,name=default_double,json=defaultDouble,def=3.1415", } var E_DefaultFloat = &proto.ExtensionDesc{ @@ -3060,7 +3095,7 @@ ExtensionType: (*float32)(nil), Field: 202, Name: "testdata.default_float", - Tag: "fixed32,202,opt,name=default_float,def=3.14", + Tag: "fixed32,202,opt,name=default_float,json=defaultFloat,def=3.14", } var E_DefaultInt32 = &proto.ExtensionDesc{ @@ -3068,7 +3103,7 @@ ExtensionType: (*int32)(nil), Field: 203, Name: "testdata.default_int32", - Tag: "varint,203,opt,name=default_int32,def=42", + Tag: "varint,203,opt,name=default_int32,json=defaultInt32,def=42", } var E_DefaultInt64 = &proto.ExtensionDesc{ @@ -3076,7 +3111,7 @@ ExtensionType: (*int64)(nil), Field: 204, Name: "testdata.default_int64", - Tag: "varint,204,opt,name=default_int64,def=43", + Tag: "varint,204,opt,name=default_int64,json=defaultInt64,def=43", } var E_DefaultUint32 = &proto.ExtensionDesc{ @@ -3084,7 +3119,7 @@ ExtensionType: (*uint32)(nil), Field: 205, Name: "testdata.default_uint32", - Tag: "varint,205,opt,name=default_uint32,def=44", + Tag: "varint,205,opt,name=default_uint32,json=defaultUint32,def=44", } var E_DefaultUint64 = &proto.ExtensionDesc{ @@ -3092,7 +3127,7 @@ ExtensionType: (*uint64)(nil), Field: 206, Name: "testdata.default_uint64", - Tag: "varint,206,opt,name=default_uint64,def=45", + Tag: "varint,206,opt,name=default_uint64,json=defaultUint64,def=45", } var E_DefaultSint32 = &proto.ExtensionDesc{ @@ -3100,7 +3135,7 @@ ExtensionType: (*int32)(nil), Field: 207, Name: "testdata.default_sint32", - Tag: "zigzag32,207,opt,name=default_sint32,def=46", + Tag: "zigzag32,207,opt,name=default_sint32,json=defaultSint32,def=46", } var E_DefaultSint64 = &proto.ExtensionDesc{ @@ -3108,7 +3143,7 @@ ExtensionType: (*int64)(nil), Field: 208, Name: "testdata.default_sint64", - Tag: "zigzag64,208,opt,name=default_sint64,def=47", + Tag: "zigzag64,208,opt,name=default_sint64,json=defaultSint64,def=47", } var E_DefaultFixed32 = &proto.ExtensionDesc{ @@ -3116,7 +3151,7 @@ ExtensionType: (*uint32)(nil), Field: 209, Name: "testdata.default_fixed32", - Tag: "fixed32,209,opt,name=default_fixed32,def=48", + Tag: "fixed32,209,opt,name=default_fixed32,json=defaultFixed32,def=48", } var E_DefaultFixed64 = &proto.ExtensionDesc{ @@ -3124,7 +3159,7 @@ ExtensionType: (*uint64)(nil), Field: 210, Name: "testdata.default_fixed64", - Tag: "fixed64,210,opt,name=default_fixed64,def=49", + Tag: "fixed64,210,opt,name=default_fixed64,json=defaultFixed64,def=49", } var E_DefaultSfixed32 = &proto.ExtensionDesc{ @@ -3132,7 +3167,7 @@ ExtensionType: (*int32)(nil), Field: 211, Name: "testdata.default_sfixed32", - Tag: "fixed32,211,opt,name=default_sfixed32,def=50", + Tag: "fixed32,211,opt,name=default_sfixed32,json=defaultSfixed32,def=50", } var E_DefaultSfixed64 = &proto.ExtensionDesc{ @@ -3140,7 +3175,7 @@ ExtensionType: (*int64)(nil), Field: 212, Name: "testdata.default_sfixed64", - Tag: "fixed64,212,opt,name=default_sfixed64,def=51", + Tag: "fixed64,212,opt,name=default_sfixed64,json=defaultSfixed64,def=51", } var E_DefaultBool = &proto.ExtensionDesc{ @@ -3148,7 +3183,7 @@ ExtensionType: (*bool)(nil), Field: 213, Name: "testdata.default_bool", - Tag: "varint,213,opt,name=default_bool,def=1", + Tag: "varint,213,opt,name=default_bool,json=defaultBool,def=1", } var E_DefaultString = &proto.ExtensionDesc{ @@ -3156,7 +3191,7 @@ ExtensionType: (*string)(nil), Field: 214, Name: "testdata.default_string", - Tag: "bytes,214,opt,name=default_string,def=Hello, string", + Tag: "bytes,214,opt,name=default_string,json=defaultString,def=Hello, string", } var E_DefaultBytes = &proto.ExtensionDesc{ @@ -3164,7 +3199,7 @@ ExtensionType: ([]byte)(nil), Field: 215, Name: "testdata.default_bytes", - Tag: "bytes,215,opt,name=default_bytes,def=Hello, bytes", + Tag: "bytes,215,opt,name=default_bytes,json=defaultBytes,def=Hello, bytes", } var E_DefaultEnum = &proto.ExtensionDesc{ @@ -3172,7 +3207,7 @@ ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), Field: 216, Name: "testdata.default_enum", - Tag: "varint,216,opt,name=default_enum,enum=testdata.DefaultsMessage_DefaultsEnum,def=1", + Tag: "varint,216,opt,name=default_enum,json=defaultEnum,enum=testdata.DefaultsMessage_DefaultsEnum,def=1", } var E_X201 = &proto.ExtensionDesc{ @@ -3593,6 +3628,7 @@ proto.RegisterType((*NewMessage_Nested)(nil), "testdata.NewMessage.Nested") proto.RegisterType((*InnerMessage)(nil), "testdata.InnerMessage") proto.RegisterType((*OtherMessage)(nil), "testdata.OtherMessage") + proto.RegisterType((*RequiredInnerMessage)(nil), "testdata.RequiredInnerMessage") proto.RegisterType((*MyMessage)(nil), "testdata.MyMessage") proto.RegisterType((*MyMessage_SomeGroup)(nil), "testdata.MyMessage.SomeGroup") proto.RegisterType((*Ext)(nil), "testdata.Ext") @@ -3713,214 +3749,281 @@ } var fileDescriptor0 = []byte{ - // 3329 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x58, 0xd9, 0x73, 0x1b, 0xc7, - 0xf1, 0xd6, 0xe2, 0xc6, 0x00, 0x24, 0x96, 0x4b, 0x1d, 0x10, 0xe5, 0x83, 0x5a, 0xd9, 0xfa, 0xc9, - 0x92, 0x0d, 0x93, 0x20, 0x48, 0x49, 0xfb, 0xab, 0x94, 0x23, 0x4a, 0x00, 0xcd, 0x98, 0x24, 0x18, - 0x92, 0x8a, 0xcb, 0x4e, 0x25, 0x28, 0x90, 0x5c, 0x82, 0x30, 0x01, 0x2c, 0x04, 0x2c, 0x62, 0x31, - 0x4f, 0x79, 0xcd, 0x43, 0x1e, 0x92, 0x54, 0xaa, 0x5c, 0xf9, 0x1f, 0x92, 0xbc, 0xe7, 0x2f, 0x88, - 0xef, 0xfb, 0xc8, 0x7d, 0x39, 0xf7, 0xed, 0x24, 0x76, 0x92, 0x97, 0xa4, 0xbb, 0x67, 0x6f, 0x60, - 0x87, 0xb4, 0x1e, 0x6c, 0x70, 0xbe, 0xfe, 0x7a, 0x66, 0x7a, 0xba, 0x7b, 0xbe, 0x59, 0xc6, 0x4c, - 0xbd, 0x6f, 0x16, 0xba, 0x3d, 0xc3, 0x34, 0x94, 0x14, 0xfe, 0xde, 0xad, 0x9b, 0x75, 0xf5, 0x01, - 0x96, 0x58, 0x32, 0xca, 0x9d, 0x41, 0x5b, 0x99, 0x62, 0xd1, 0x3d, 0xc3, 0xc8, 0x4b, 0xd3, 0x91, - 0x4b, 0xe3, 0xc5, 0xb1, 0x82, 0x6d, 0x51, 0xa8, 0x54, 0xab, 0xea, 0x65, 0x96, 0x59, 0x32, 0xb6, - 0x60, 0xa4, 0xd2, 0xd4, 0x5b, 0xbb, 0xca, 0x18, 0x8b, 0xaf, 0xd4, 0xb7, 0xf5, 0x16, 0x19, 0xa7, - 0x95, 0x2c, 0x8b, 0x6d, 0x1d, 0x76, 0xf5, 0x7c, 0x04, 0xff, 0x52, 0xbf, 0x7c, 0x12, 0x5d, 0xa2, - 0xb1, 0x72, 0x81, 0xc5, 0x9e, 0x68, 0x76, 0x76, 0x2d, 0x9f, 0xa7, 0x5c, 0x9f, 0x1c, 0x2f, 0x3c, - 0xb1, 0xbc, 0x76, 0x0b, 0x9d, 0x6d, 0xd5, 0xb7, 0x5b, 0x48, 0x97, 0xc0, 0x19, 0xfc, 0xb9, 0x5e, - 0xef, 0xd5, 0xdb, 0xf9, 0x28, 0xfc, 0x19, 0x57, 0x1e, 0x66, 0x63, 0x1b, 0xfa, 0x9d, 0x41, 0xb3, - 0xa7, 0xef, 0xd2, 0xdc, 0xf9, 0x18, 0xf8, 0xca, 0x0c, 0xfb, 0xe2, 0x0b, 0x23, 0xeb, 0xae, 0x5e, - 0x37, 0x6d, 0xeb, 0xf8, 0x74, 0x54, 0x68, 0x5d, 0xed, 0x9a, 0x4d, 0xa3, 0x53, 0x6f, 0x71, 0xeb, - 0x04, 0x4c, 0x19, 0x6a, 0x7d, 0x86, 0xe5, 0x2a, 0xb5, 0x45, 0xc3, 0x68, 0xd5, 0x7a, 0xd6, 0x82, - 0xf2, 0x0c, 0xd6, 0x92, 0x52, 0xf2, 0x4c, 0xae, 0xd4, 0x96, 0x3b, 0xe6, 0x5c, 0xd1, 0x45, 0x32, - 0x80, 0xc4, 0x1d, 0x64, 0xa1, 0xe4, 0x22, 0x59, 0x40, 0xa2, 0x10, 0x6c, 0xa5, 0x52, 0xab, 0x34, - 0xef, 0xea, 0xbb, 0x5e, 0xd6, 0x18, 0x60, 0x49, 0x0f, 0xe6, 0xe5, 0x8d, 0x03, 0x96, 0x50, 0xce, - 0xb2, 0x89, 0x4a, 0xed, 0x76, 0xd3, 0x3f, 0x59, 0x0e, 0xa0, 0x31, 0x17, 0xf2, 0xb2, 0x64, 0x80, - 0x62, 0x7c, 0x1d, 0x95, 0x96, 0x51, 0x37, 0x5d, 0x64, 0x02, 0x90, 0x08, 0x27, 0xdd, 0x32, 0x06, - 0x10, 0x7f, 0x17, 0x52, 0x00, 0x92, 0x38, 0xb4, 0x69, 0xf6, 0x9a, 0x9d, 0x86, 0x0b, 0x4d, 0xd2, - 0x81, 0x93, 0xbf, 0xc5, 0x43, 0x08, 0x93, 0x8b, 0xe8, 0x80, 0x64, 0x2d, 0x52, 0x60, 0x7d, 0x7b, - 0x00, 0x4d, 0xb8, 0x90, 0x77, 0x7d, 0x0d, 0x80, 0x14, 0x5f, 0x68, 0xf9, 0xe9, 0xe5, 0x4f, 0xc2, - 0xc1, 0x05, 0x42, 0x6b, 0x21, 0xa7, 0x00, 0x09, 0x84, 0xd6, 0x42, 0x4e, 0x03, 0x32, 0x14, 0x5a, - 0x0b, 0x3b, 0x03, 0xd8, 0x50, 0x68, 0x2d, 0x2c, 0x0f, 0x58, 0x30, 0xb4, 0x16, 0x74, 0x16, 0xa0, - 0x60, 0x68, 0x2d, 0x68, 0x0a, 0xa0, 0x40, 0x68, 0x2d, 0xe4, 0x1c, 0x20, 0xc1, 0xd0, 0x5a, 0xd0, - 0x3d, 0x00, 0x05, 0x43, 0x6b, 0x41, 0xf7, 0x02, 0x94, 0x06, 0xc8, 0x13, 0x5a, 0x0b, 0x79, 0x5e, - 0x02, 0x28, 0x0b, 0x8b, 0xf7, 0xc6, 0xd6, 0xc2, 0x5e, 0x40, 0x6c, 0xc2, 0xc5, 0xbc, 0x2b, 0x7c, - 0x11, 0x31, 0x6f, 0x74, 0x0d, 0x2b, 0xdb, 0xf3, 0xf7, 0x41, 0xa2, 0xfb, 0xa2, 0xeb, 0x20, 0xf7, - 0x53, 0xd5, 0x79, 0xa2, 0xeb, 0x20, 0xd3, 0x80, 0x04, 0xa2, 0xeb, 0x60, 0xe7, 0x01, 0x0b, 0x44, - 0xd7, 0xc1, 0x54, 0xc0, 0xfc, 0xd1, 0x75, 0xa0, 0x0b, 0x00, 0xf9, 0xa3, 0xeb, 0x40, 0x0f, 0x00, - 0xe4, 0x8b, 0xae, 0x83, 0x3c, 0x08, 0x88, 0x3f, 0xba, 0x0e, 0x74, 0x11, 0x20, 0x7f, 0x74, 0x1d, - 0xe8, 0xff, 0xa8, 0xb9, 0x78, 0xa2, 0xeb, 0x20, 0xdf, 0xc2, 0xbe, 0xe3, 0x8f, 0xae, 0x83, 0x7d, - 0x1b, 0x31, 0x7f, 0x74, 0x1d, 0xec, 0x3b, 0x88, 0x29, 0xca, 0x7d, 0xe4, 0x12, 0xa3, 0xbb, 0xab, - 0xef, 0xd5, 0x07, 0x2d, 0x0c, 0xfc, 0x25, 0x0c, 0xaf, 0x16, 0x33, 0x7b, 0x03, 0x5d, 0xb9, 0x17, - 0xb9, 0x3c, 0xc8, 0xae, 0xc1, 0x43, 0x18, 0x65, 0x2d, 0x32, 0x57, 0x74, 0x60, 0xf0, 0xec, 0xc2, - 0x97, 0x31, 0xd4, 0x5a, 0x64, 0xa1, 0xa4, 0x4c, 0xb3, 0x49, 0x37, 0xdc, 0xae, 0xc1, 0x15, 0x8c, - 0xb7, 0x16, 0x9d, 0x2b, 0xce, 0x78, 0x2c, 0x7c, 0x2e, 0x1e, 0xc6, 0xa8, 0x6b, 0xd1, 0x85, 0x12, - 0x5a, 0x28, 0x4e, 0xe8, 0x5d, 0x83, 0x47, 0x30, 0xf6, 0x5a, 0x0c, 0x5c, 0x78, 0x2c, 0x7c, 0x2e, - 0x0a, 0x78, 0x04, 0x5a, 0x0c, 0x5c, 0xcc, 0x28, 0xe7, 0x71, 0x99, 0xfc, 0x20, 0x5c, 0x83, 0x47, - 0xf1, 0x24, 0xb4, 0xc4, 0xdc, 0x6c, 0x69, 0x76, 0xfe, 0xba, 0xa2, 0xa2, 0x13, 0xeb, 0x44, 0x5c, - 0x9b, 0x19, 0x3c, 0x12, 0x2d, 0x51, 0xbc, 0x3a, 0x7b, 0xad, 0x78, 0x0d, 0x3a, 0xae, 0xe2, 0x1c, - 0x8d, 0x6b, 0x33, 0x8b, 0x67, 0xa3, 0xc9, 0xfb, 0x7a, 0xab, 0x65, 0x3c, 0x3c, 0xad, 0x3e, 0x6b, - 0xf4, 0x5a, 0xbb, 0xe7, 0xc1, 0xdd, 0x05, 0x9c, 0x94, 0x9f, 0x96, 0x6b, 0xfc, 0x55, 0xbc, 0x17, - 0xb2, 0x5a, 0x72, 0xb1, 0xd9, 0xe8, 0x18, 0x7d, 0x9d, 0xaf, 0x7d, 0x33, 0xb8, 0xbb, 0xaf, 0xa1, - 0xd5, 0x84, 0x16, 0x7d, 0x04, 0x42, 0xec, 0x58, 0xf8, 0x76, 0xf7, 0x75, 0xb4, 0x50, 0xc0, 0x02, - 0xa2, 0xac, 0xb2, 0xd3, 0x81, 0xfe, 0x53, 0xeb, 0xd6, 0x77, 0x0e, 0xc0, 0xaa, 0x88, 0x6d, 0x68, - 0x31, 0x22, 0x4b, 0xb0, 0x98, 0x33, 0xc1, 0x56, 0x64, 0x1b, 0xcd, 0x61, 0x47, 0xf2, 0x19, 0x79, - 0xaa, 0xd0, 0x36, 0x2a, 0x61, 0x73, 0x22, 0xa3, 0x07, 0xd9, 0xd9, 0xe1, 0x06, 0x65, 0x9b, 0xcd, - 0x63, 0x9f, 0x0a, 0x98, 0x8d, 0xf0, 0xb6, 0x80, 0x2d, 0x8b, 0xcc, 0x1e, 0x60, 0xf9, 0xa1, 0xb6, - 0x65, 0x5b, 0x5d, 0xc5, 0xee, 0xe5, 0xb7, 0x1a, 0xe1, 0xeb, 0x1a, 0x36, 0x32, 0x77, 0xf9, 0xfe, - 0x66, 0x66, 0x1b, 0x5d, 0xc7, 0x9e, 0xe6, 0xba, 0x0a, 0xf4, 0x35, 0xdb, 0x4a, 0xc3, 0xf6, 0x66, - 0xad, 0x3e, 0x3f, 0xd4, 0xac, 0x6c, 0xab, 0x0f, 0xa3, 0xd8, 0xb3, 0xfc, 0x66, 0x23, 0xd6, 0xf5, - 0x11, 0x9a, 0x29, 0x64, 0x36, 0xcf, 0xc6, 0xec, 0x2b, 0xa3, 0xd1, 0x33, 0x06, 0xdd, 0x7c, 0x05, - 0xee, 0x0d, 0x56, 0xbc, 0x6f, 0x48, 0x51, 0xd8, 0x5a, 0x61, 0x09, 0xad, 0x38, 0x8d, 0x3b, 0xe5, - 0xb4, 0x75, 0xf0, 0x38, 0x9a, 0xc6, 0xad, 0x1c, 0x9a, 0x5d, 0xe5, 0x9c, 0xf6, 0x34, 0xa4, 0xca, - 0x28, 0x9a, 0xad, 0x1e, 0x88, 0x36, 0x75, 0xd1, 0x95, 0x2a, 0xdc, 0xcf, 0xa9, 0xa0, 0x76, 0x59, - 0xc2, 0xdb, 0x93, 0xdb, 0x79, 0xe7, 0x1b, 0xb2, 0xfb, 0xb4, 0x6d, 0xe7, 0x9b, 0x60, 0xd8, 0xee, - 0xb3, 0x24, 0xb8, 0x9e, 0x93, 0x40, 0x66, 0xa1, 0x92, 0x4a, 0xb1, 0xd8, 0x67, 0xaa, 0xcb, 0xb7, - 0xe4, 0x13, 0xf8, 0x6b, 0xb1, 0x5a, 0x5d, 0x81, 0xc8, 0xa5, 0x59, 0x7c, 0xf1, 0xa9, 0xad, 0xf2, - 0xa6, 0x1c, 0x51, 0x72, 0x2c, 0x53, 0x59, 0x5e, 0x5b, 0x2a, 0x6f, 0xac, 0x6f, 0x2c, 0xaf, 0x6d, - 0xc9, 0x51, 0xc4, 0x2a, 0x2b, 0xd5, 0x1b, 0x5b, 0x72, 0x4c, 0x49, 0xb2, 0x28, 0x8e, 0xc5, 0x15, - 0xc6, 0x12, 0x9b, 0x5b, 0x80, 0x2f, 0xc9, 0x09, 0xf4, 0xb2, 0xb5, 0xbc, 0x5a, 0x96, 0x93, 0x68, - 0xb9, 0x75, 0x7b, 0x7d, 0xa5, 0x2c, 0xa7, 0xf0, 0xe7, 0x8d, 0x8d, 0x8d, 0x1b, 0x4f, 0xc9, 0x69, - 0x24, 0xad, 0xde, 0x58, 0x97, 0x19, 0xc1, 0x37, 0x16, 0x01, 0xce, 0x80, 0x16, 0x4c, 0x55, 0x6e, - 0xaf, 0xdd, 0xdc, 0x5a, 0xae, 0xae, 0xc9, 0x59, 0xf5, 0x25, 0x89, 0xb1, 0x25, 0x63, 0xf3, 0xa0, - 0xd9, 0x25, 0x3d, 0x08, 0xde, 0xfb, 0xf0, 0xbb, 0x46, 0x69, 0x61, 0x69, 0xa4, 0x93, 0x2c, 0x4b, - 0x63, 0x7b, 0xbc, 0x20, 0x48, 0x1f, 0x25, 0xfd, 0xa3, 0x0b, 0x25, 0x52, 0x46, 0x09, 0x65, 0x92, - 0x65, 0x68, 0xb4, 0x4f, 0x1d, 0x84, 0x24, 0x51, 0x5a, 0x99, 0x65, 0x69, 0x1c, 0xe4, 0x27, 0x95, - 0x1b, 0xce, 0x0b, 0x7b, 0xf6, 0x02, 0xfe, 0xe0, 0x27, 0xb5, 0xc0, 0xd2, 0xce, 0x1f, 0xe8, 0x94, - 0xb8, 0xd6, 0xaa, 0x64, 0x7b, 0x55, 0x7c, 0xd0, 0x9a, 0x6a, 0x82, 0x22, 0x3d, 0xc5, 0xc6, 0xd6, - 0x8c, 0xce, 0x3a, 0xa5, 0x27, 0x6d, 0x28, 0xcd, 0xa4, 0x7a, 0x1e, 0x6f, 0xd9, 0xb8, 0x7a, 0x8e, - 0x31, 0x0f, 0x30, 0xc6, 0xa4, 0x6d, 0x0e, 0x60, 0xfe, 0xaa, 0xd3, 0x2c, 0xb1, 0x5a, 0xbf, 0xbb, - 0x55, 0x6f, 0x28, 0xa7, 0x19, 0x6b, 0xd5, 0xfb, 0x26, 0x6c, 0x0c, 0x0f, 0xf0, 0xbf, 0xf0, 0x4f, - 0xc2, 0xee, 0xa7, 0x7e, 0x9e, 0xb1, 0x6a, 0x6b, 0x77, 0x55, 0xef, 0xf7, 0xeb, 0x0d, 0x5d, 0xb9, - 0xc2, 0x12, 0x1d, 0x70, 0xa3, 0xa3, 0x74, 0x46, 0x49, 0x7a, 0xce, 0xdd, 0x90, 0x6b, 0x55, 0x58, - 0x23, 0x13, 0x25, 0xc3, 0xa2, 0xa0, 0xdf, 0x49, 0x3e, 0xc7, 0xa7, 0x4e, 0xb3, 0x84, 0x35, 0x0c, - 0xaa, 0xbc, 0x53, 0x6f, 0xeb, 0x79, 0xee, 0xbf, 0xc7, 0xd8, 0x9a, 0xfe, 0xec, 0x31, 0xfc, 0xbb, - 0x56, 0x23, 0xfc, 0x47, 0xa7, 0x2e, 0x8f, 0xf6, 0x8f, 0x47, 0x0b, 0xaf, 0x87, 0xdd, 0x1a, 0x3f, - 0x06, 0x92, 0xf2, 0xea, 0x4d, 0x96, 0x5d, 0xee, 0x74, 0xf4, 0x9e, 0x3d, 0x2b, 0x30, 0xf6, 0x8d, - 0xbe, 0x69, 0xbd, 0x1a, 0x14, 0x16, 0xeb, 0x1a, 0x3d, 0x93, 0xaf, 0x5b, 0x8b, 0xc1, 0x2d, 0x33, - 0xa3, 0x4c, 0xb0, 0xf4, 0x8e, 0x01, 0x94, 0x1d, 0x5c, 0x1a, 0x36, 0xe8, 0x94, 0x7a, 0xc0, 0xb2, - 0x55, 0x73, 0xdf, 0x75, 0x02, 0xab, 0x39, 0xd0, 0x0f, 0x69, 0xd6, 0x28, 0x3e, 0x16, 0xbe, 0x50, - 0x6f, 0x0d, 0xf8, 0xdb, 0x21, 0xab, 0x8c, 0xb3, 0xc4, 0xb3, 0x7a, 0xb3, 0xb1, 0x6f, 0x12, 0x37, - 0x02, 0xdd, 0x25, 0xde, 0xc4, 0x05, 0xc0, 0xa3, 0x01, 0x77, 0x79, 0xda, 0xdd, 0xa5, 0x77, 0x5d, - 0x97, 0x53, 0xa9, 0x5d, 0xf9, 0x4b, 0xf0, 0x2f, 0xa2, 0x7e, 0x23, 0xca, 0xd2, 0xab, 0x87, 0xf6, - 0x54, 0xe0, 0x7d, 0xc7, 0x18, 0x74, 0xf8, 0x82, 0xe3, 0xce, 0x86, 0x9d, 0x77, 0xca, 0x9d, 0x81, - 0x61, 0xea, 0x34, 0x55, 0x1a, 0x97, 0xd5, 0xd5, 0x4d, 0x98, 0x08, 0x45, 0x9c, 0x33, 0x6f, 0x5c, - 0x34, 0xaf, 0x72, 0x91, 0x25, 0x0c, 0xdc, 0x5a, 0x1f, 0x1e, 0x1e, 0x51, 0xbf, 0x9d, 0x6f, 0xcb, - 0x0f, 0xb1, 0x34, 0xb4, 0xb1, 0x1a, 0x77, 0x99, 0x0d, 0x9a, 0xfa, 0x5c, 0x5e, 0x61, 0xa9, 0xed, - 0xe6, 0x81, 0xde, 0xdf, 0x87, 0xf8, 0x25, 0x61, 0xf2, 0xf1, 0xe2, 0x59, 0xd7, 0xd2, 0xd9, 0x59, - 0xe1, 0xa6, 0xd1, 0x32, 0x7a, 0xca, 0x0c, 0x54, 0x8e, 0xd1, 0xd6, 0xf9, 0x91, 0xa5, 0xa8, 0xc7, - 0xdd, 0x3b, 0xca, 0x7a, 0x13, 0x8c, 0x78, 0xad, 0x4c, 0xf0, 0x95, 0x6c, 0xe3, 0x9d, 0x0c, 0xaf, - 0x1f, 0x54, 0xa5, 0x32, 0xce, 0xd8, 0xd8, 0xc3, 0x4b, 0x03, 0x2a, 0x1a, 0x2e, 0xfb, 0xa9, 0x69, - 0xa8, 0x2e, 0x87, 0xe1, 0x54, 0x17, 0x4f, 0xf8, 0x34, 0x1e, 0xb6, 0x0a, 0xf1, 0xe1, 0x2b, 0x80, - 0x0e, 0xb2, 0x51, 0xc6, 0x86, 0x05, 0x1d, 0x64, 0x69, 0xa3, 0x5c, 0x5e, 0x83, 0x8e, 0x85, 0xbd, - 0x6b, 0xe5, 0x76, 0x59, 0x8e, 0x78, 0xce, 0xe5, 0x2b, 0x12, 0x8b, 0x96, 0xef, 0x9a, 0x78, 0x04, - 0xb8, 0x36, 0x9e, 0x73, 0xc5, 0x19, 0x16, 0x6b, 0x1b, 0x3d, 0x5d, 0x99, 0x1c, 0xb1, 0x68, 0x78, - 0x59, 0x60, 0xe8, 0x3d, 0xef, 0x58, 0xe0, 0x17, 0xcf, 0xb3, 0x98, 0xa9, 0x83, 0x9f, 0x91, 0x8c, - 0x7d, 0x72, 0x7a, 0x01, 0x4a, 0x63, 0xd0, 0xde, 0xd6, 0x7b, 0xa3, 0x8d, 0x9a, 0xb4, 0x81, 0x4f, - 0x32, 0xf9, 0xa6, 0xd1, 0xee, 0xb6, 0xf4, 0xbb, 0xe0, 0x55, 0xef, 0xf4, 0xa1, 0x49, 0x63, 0x42, - 0xec, 0x35, 0x7b, 0x94, 0xde, 0x28, 0xa1, 0x21, 0x17, 0xfb, 0x3a, 0x24, 0xf3, 0x2e, 0x4f, 0x70, - 0x84, 0xcd, 0xfd, 0x66, 0x0f, 0xd3, 0x1a, 0xdb, 0xc5, 0x12, 0xcb, 0xdd, 0xe2, 0x5a, 0xa4, 0x6f, - 0xb9, 0x86, 0x47, 0x76, 0xd6, 0x1e, 0xa2, 0x07, 0x39, 0x04, 0xe2, 0xe9, 0xf2, 0x46, 0x15, 0xa2, - 0x03, 0x61, 0xaa, 0xae, 0x95, 0x21, 0x36, 0xf0, 0x63, 0xeb, 0xc9, 0xaa, 0x2f, 0x34, 0xf7, 0xb0, - 0xac, 0xb3, 0xba, 0x4d, 0xdd, 0x24, 0x04, 0xdb, 0x4a, 0x52, 0x8b, 0xa4, 0x24, 0x35, 0xc9, 0xe2, - 0xe5, 0x76, 0xd7, 0x3c, 0x54, 0x75, 0x96, 0xb1, 0x8c, 0x56, 0x9a, 0xd0, 0x9f, 0x0a, 0x2c, 0xd9, - 0xb6, 0x76, 0x24, 0xd1, 0x9d, 0xe8, 0x3d, 0x78, 0xd7, 0xce, 0xfe, 0x0d, 0x77, 0x51, 0xd2, 0x53, - 0xc5, 0x56, 0x19, 0x44, 0x78, 0x19, 0xf0, 0x1a, 0x89, 0x62, 0x8d, 0xa8, 0x25, 0x96, 0xe4, 0xf2, - 0xae, 0x4f, 0x2d, 0x9c, 0x2b, 0x3d, 0x7e, 0xf4, 0xbc, 0x4f, 0x40, 0x3e, 0x50, 0xf6, 0x58, 0x83, - 0x54, 0xb7, 0xea, 0x73, 0x31, 0x96, 0xb2, 0xb7, 0x0e, 0xbc, 0x04, 0x17, 0x63, 0xc4, 0xb0, 0x65, - 0xf4, 0x24, 0x4b, 0x5a, 0xf2, 0xcb, 0x6a, 0x18, 0x28, 0x9e, 0xed, 0x41, 0xb8, 0x20, 0xa2, 0x8e, - 0x64, 0x3e, 0xcd, 0xd2, 0x8e, 0xbc, 0xa2, 0xc2, 0xb7, 0x84, 0xb2, 0x3b, 0x0e, 0xe6, 0x71, 0x57, - 0x1e, 0x9f, 0x86, 0x1b, 0xcb, 0x12, 0x50, 0xf4, 0x01, 0xc0, 0x16, 0xc5, 0xce, 0x38, 0x98, 0x27, - 0x3d, 0x52, 0xf8, 0x0c, 0x4e, 0x4a, 0x22, 0x89, 0x6a, 0xc6, 0x15, 0xc0, 0x79, 0x24, 0x70, 0x61, - 0x44, 0x79, 0xee, 0xca, 0x5e, 0x15, 0x11, 0x1e, 0x17, 0xa8, 0x9a, 0xd1, 0x62, 0x37, 0x8f, 0x6e, - 0x49, 0xec, 0x52, 0x19, 0x79, 0x14, 0xee, 0x29, 0x62, 0xf3, 0x05, 0x66, 0x5d, 0x59, 0xeb, 0x0c, - 0xd3, 0xf5, 0xe8, 0x68, 0xd9, 0x47, 0x31, 0x7c, 0x98, 0x3f, 0x70, 0x3d, 0x62, 0xfd, 0xe7, 0xdd, - 0x83, 0xb5, 0x43, 0xcc, 0xcb, 0x5f, 0xe3, 0x15, 0x07, 0x41, 0x04, 0xc2, 0x7a, 0xb3, 0xb3, 0x07, - 0x97, 0x27, 0x6e, 0x27, 0x0a, 0x3f, 0xf9, 0x21, 0xac, 0xe1, 0xa0, 0x4c, 0x83, 0xb1, 0x47, 0x70, - 0x54, 0x01, 0xc1, 0x50, 0x5b, 0xab, 0x77, 0xe0, 0x3a, 0x24, 0xcb, 0x4e, 0xbd, 0x03, 0x7b, 0x8b, - 0xf6, 0x07, 0xdb, 0x79, 0x25, 0xf8, 0xe9, 0x64, 0x73, 0xb0, 0xed, 0x1c, 0xa9, 0xc2, 0x52, 0x90, - 0x0a, 0xb5, 0x2f, 0xea, 0x3d, 0x23, 0x3f, 0x49, 0xfb, 0x3f, 0x71, 0xcc, 0x1e, 0x00, 0xd7, 0x6a, - 0xc6, 0xeb, 0x29, 0xcb, 0xa4, 0x0e, 0xef, 0xfd, 0x9a, 0x74, 0x55, 0x5d, 0x65, 0x59, 0x5b, 0x49, - 0x51, 0xc5, 0x5c, 0xc1, 0x64, 0x04, 0x9f, 0x94, 0xd3, 0xe3, 0xc5, 0x7b, 0xdc, 0xd5, 0x78, 0xcd, - 0xf8, 0xf6, 0x55, 0x39, 0xb0, 0x00, 0x49, 0xfd, 0xa6, 0x04, 0xb5, 0x04, 0x0d, 0xc5, 0x36, 0xc6, - 0xe4, 0xde, 0x86, 0x44, 0xec, 0x93, 0x3f, 0x7c, 0x2f, 0x67, 0xe9, 0x4f, 0x5b, 0xa0, 0x46, 0x9c, - 0xc7, 0x01, 0xd4, 0x04, 0x1c, 0x44, 0x9f, 0xd7, 0x36, 0x64, 0x48, 0x06, 0xff, 0xb2, 0xcd, 0x62, - 0xce, 0xf3, 0xe0, 0x2c, 0x1b, 0xa3, 0xf3, 0x72, 0xa0, 0xa4, 0xf3, 0x28, 0xc8, 0xb1, 0x24, 0xaf, - 0x96, 0x3e, 0x7d, 0xb3, 0x4a, 0x63, 0xff, 0x20, 0xf1, 0xc3, 0x2f, 0x87, 0xa4, 0xfa, 0xff, 0x2c, - 0x45, 0x1d, 0x15, 0xee, 0x7f, 0xe5, 0x7e, 0x26, 0x35, 0xf2, 0x3a, 0x35, 0xec, 0x93, 0x1e, 0xa9, - 0x63, 0xc1, 0x85, 0xa5, 0xa9, 0x71, 0x26, 0x2d, 0xa1, 0x38, 0xb9, 0xcb, 0x8b, 0x45, 0xad, 0x58, - 0x64, 0xb8, 0xdc, 0x45, 0x64, 0x80, 0x81, 0x7c, 0xd6, 0x4f, 0xc6, 0x9f, 0x87, 0xfc, 0x6b, 0x1c, - 0x0a, 0x20, 0xca, 0x7c, 0x58, 0xe7, 0xba, 0x01, 0x5b, 0x41, 0x6c, 0x8f, 0xae, 0x47, 0x49, 0xfd, - 0x20, 0xca, 0xc6, 0xad, 0x1e, 0xf1, 0x64, 0xd3, 0xdc, 0x5f, 0xad, 0x77, 0x95, 0xc7, 0x58, 0x16, - 0x5b, 0x45, 0xad, 0x5d, 0xef, 0x76, 0x31, 0xf7, 0x25, 0xba, 0xbb, 0x1e, 0x1a, 0x6a, 0x35, 0x96, - 0x7d, 0x61, 0x0d, 0x8c, 0x57, 0xb9, 0x6d, 0xb9, 0x63, 0xf6, 0x0e, 0x95, 0x4f, 0xb0, 0x4c, 0xbb, - 0xdf, 0x70, 0xf8, 0x11, 0xe2, 0x5f, 0x0a, 0xe5, 0xaf, 0xf6, 0x1b, 0x3e, 0x3a, 0xcc, 0x8f, 0xcd, - 0xc6, 0xe1, 0x47, 0x8f, 0x98, 0x1f, 0xcb, 0xcf, 0xe7, 0x40, 0x03, 0xc1, 0x0a, 0x89, 0x6b, 0x1a, - 0xa8, 0x03, 0xe9, 0x20, 0x33, 0xc5, 0x8b, 0xa1, 0x74, 0xa8, 0xf0, 0x2d, 0x03, 0xfe, 0x43, 0xdc, - 0xa9, 0x22, 0x93, 0x87, 0xf6, 0xe3, 0x11, 0x2f, 0x71, 0xbf, 0x78, 0x49, 0x6b, 0x91, 0x6b, 0xd2, - 0xd4, 0xa7, 0x58, 0x2e, 0xb8, 0x07, 0x0f, 0x45, 0x01, 0xc5, 0xe0, 0xa1, 0x64, 0x8a, 0x67, 0x3c, - 0x5f, 0x69, 0xbd, 0xc7, 0x42, 0xbe, 0x60, 0xfe, 0xa1, 0xfd, 0x78, 0x9c, 0xa5, 0x02, 0xe2, 0x89, - 0x38, 0x8f, 0xb2, 0x31, 0xdf, 0x26, 0xbc, 0x84, 0xf4, 0x88, 0x05, 0xab, 0xef, 0x47, 0x59, 0xbc, - 0xda, 0xd1, 0x8d, 0x3d, 0xd0, 0x01, 0xbe, 0xb6, 0xfd, 0xf8, 0x09, 0x10, 0x0b, 0xfe, 0x96, 0xed, - 0x19, 0xb2, 0x1b, 0x36, 0x0c, 0x4d, 0x0e, 0xb5, 0x6b, 0xdf, 0xa0, 0xdd, 0xab, 0x61, 0x50, 0x09, - 0x36, 0x6a, 0xef, 0x98, 0xdd, 0xa4, 0xed, 0x49, 0x3c, 0x0d, 0xda, 0x36, 0xf3, 0xb6, 0x66, 0x7b, - 0xcc, 0xdb, 0x94, 0x6d, 0xaa, 0xa7, 0x09, 0x3b, 0x66, 0x9e, 0xee, 0xeb, 0x1d, 0xb3, 0x5b, 0x2f, - 0x8c, 0x5d, 0x09, 0xf4, 0xdd, 0x70, 0xdd, 0x05, 0xc6, 0x97, 0x71, 0x7f, 0xb6, 0xa4, 0xc8, 0x09, - 0xbe, 0x3a, 0x93, 0x6d, 0x72, 0xcf, 0x92, 0xd5, 0x32, 0x55, 0xad, 0xe7, 0xd4, 0x29, 0xf4, 0x85, - 0x4a, 0x8d, 0xaa, 0x17, 0x6c, 0xcf, 0x41, 0x7d, 0xd6, 0x56, 0xea, 0xbd, 0x06, 0x18, 0xd4, 0xe0, - 0xb9, 0xe1, 0x3c, 0x30, 0x30, 0xf8, 0x39, 0xfb, 0xf8, 0x76, 0x69, 0x40, 0x9a, 0x3a, 0x89, 0xbb, - 0xe5, 0x32, 0x8d, 0xca, 0x1d, 0xbb, 0x7a, 0x7c, 0x11, 0x24, 0xc3, 0xa0, 0x03, 0x82, 0x66, 0x31, - 0xcd, 0x92, 0xa6, 0xd1, 0x6b, 0xd7, 0x4d, 0x43, 0xfd, 0x2e, 0xbc, 0xe3, 0x40, 0xf0, 0xb4, 0x01, - 0xb8, 0x43, 0x97, 0x71, 0xa6, 0x5d, 0x3f, 0x80, 0xba, 0xd2, 0x6b, 0x3b, 0x3d, 0x3b, 0x9d, 0x64, - 0x5b, 0x38, 0xd1, 0x81, 0xe1, 0x84, 0xe3, 0x96, 0x52, 0x48, 0x58, 0xd1, 0x1d, 0xb7, 0xd4, 0x5b, - 0xd2, 0x0a, 0x2d, 0x30, 0x4c, 0xbd, 0xdd, 0xad, 0xed, 0xd0, 0x39, 0xe1, 0x99, 0x5c, 0x62, 0x51, - 0x68, 0xe0, 0x74, 0x44, 0x47, 0x44, 0x70, 0x9a, 0x45, 0xa1, 0x33, 0xd0, 0xc1, 0x65, 0x8a, 0x13, - 0x9e, 0x6b, 0x87, 0x37, 0xd1, 0xc7, 0x4f, 0x38, 0xfb, 0xb8, 0x9c, 0x63, 0xd1, 0x4a, 0xb5, 0x8a, - 0x77, 0x0a, 0xfc, 0x6f, 0x56, 0x96, 0xb4, 0x07, 0x59, 0xaa, 0xd1, 0xd3, 0x75, 0x2c, 0x97, 0xd1, - 0x22, 0xef, 0x19, 0xec, 0xc0, 0xda, 0x2d, 0x96, 0xdc, 0xe1, 0x22, 0x4f, 0x09, 0x51, 0xe6, 0xf9, - 0xef, 0xf1, 0xe7, 0xd3, 0x94, 0x0b, 0x07, 0x65, 0xa1, 0x56, 0x01, 0xc9, 0x5c, 0x3b, 0xca, 0xcf, - 0xf3, 0xbc, 0x33, 0x8a, 0xfc, 0x94, 0xd8, 0x44, 0xc7, 0xb0, 0xbf, 0x5f, 0xd5, 0x76, 0x29, 0x97, - 0x95, 0xb3, 0xc3, 0x97, 0xbb, 0xed, 0x52, 0x27, 0x05, 0x32, 0xc7, 0x64, 0x0f, 0x8b, 0x54, 0xba, - 0x88, 0xb4, 0x47, 0xf7, 0xba, 0x9f, 0x44, 0xa9, 0x2f, 0x22, 0x35, 0x48, 0x8f, 0x0d, 0x91, 0x40, - 0x8b, 0x08, 0x48, 0xfb, 0x74, 0x87, 0xfb, 0x37, 0x35, 0x38, 0x72, 0xaa, 0x26, 0xe9, 0xb3, 0x61, - 0x96, 0x78, 0xae, 0x67, 0x48, 0xbd, 0xf9, 0x59, 0xfd, 0x23, 0xe7, 0x3a, 0x20, 0xa9, 0x35, 0xcc, - 0x12, 0xcf, 0xd5, 0x22, 0x25, 0x36, 0xcf, 0x14, 0x6f, 0xd8, 0x79, 0x77, 0x13, 0xd1, 0xda, 0xa4, - 0x53, 0x47, 0xd0, 0xc4, 0xb3, 0x75, 0x48, 0xc6, 0x2e, 0xb0, 0x49, 0xef, 0x1a, 0x8f, 0x31, 0x9d, - 0x01, 0xbc, 0xdc, 0x48, 0x9e, 0x78, 0xbe, 0x2e, 0xf0, 0x64, 0xad, 0xc8, 0x72, 0x1e, 0x1e, 0x4a, - 0x22, 0x11, 0xe7, 0x0e, 0xa9, 0xf8, 0x40, 0x1c, 0xa9, 0x4a, 0x45, 0xac, 0x1e, 0x5d, 0x31, 0xfe, - 0xa4, 0xa2, 0x87, 0x83, 0x88, 0xd4, 0xa7, 0x8b, 0xec, 0x69, 0xdf, 0xf2, 0x74, 0x14, 0x86, 0x02, - 0x8e, 0x49, 0xbd, 0xe6, 0x62, 0xa8, 0x41, 0xc1, 0xfb, 0x26, 0xd3, 0x34, 0x36, 0x7e, 0xfc, 0x12, - 0x7c, 0x5e, 0xe2, 0xaf, 0x80, 0xb9, 0x02, 0x3e, 0x10, 0xb4, 0xab, 0x6c, 0xec, 0xd8, 0x85, 0xf8, - 0x82, 0xc4, 0x65, 0x37, 0x52, 0x21, 0x2d, 0xc6, 0x8e, 0x5d, 0x8c, 0x2f, 0x4a, 0xfc, 0x75, 0x54, - 0x2a, 0x06, 0x68, 0xe2, 0x83, 0x7d, 0x89, 0x6b, 0xea, 0x48, 0x69, 0x0e, 0xb2, 0x62, 0xfc, 0xf8, - 0x05, 0xf9, 0xb2, 0x44, 0x15, 0x19, 0x29, 0x95, 0x82, 0x3c, 0xf1, 0x7c, 0xaf, 0x48, 0x54, 0x93, - 0x91, 0xd2, 0xbc, 0x97, 0x77, 0x74, 0x51, 0xbe, 0x2a, 0x51, 0x55, 0x46, 0x4a, 0x0b, 0x41, 0x9e, - 0x78, 0xbe, 0xd7, 0x48, 0x3f, 0x01, 0xef, 0x2a, 0x1c, 0x43, 0xee, 0x63, 0x14, 0xe6, 0xeb, 0x12, - 0x55, 0x66, 0xa4, 0x74, 0x6d, 0x88, 0x28, 0x9e, 0xf1, 0x0d, 0x89, 0x6a, 0x33, 0x52, 0xba, 0xae, - 0x5d, 0x63, 0xf2, 0xc7, 0x29, 0xce, 0x37, 0x25, 0xaa, 0xce, 0xc8, 0xfc, 0xcc, 0x30, 0x53, 0x3c, - 0xe7, 0x5b, 0x12, 0xd5, 0x67, 0x64, 0x7e, 0x16, 0xa2, 0x93, 0x3d, 0x6e, 0x81, 0xbe, 0xed, 0x79, - 0x67, 0x6b, 0x37, 0x3c, 0x51, 0x3d, 0xb2, 0x48, 0xdf, 0x21, 0x5d, 0xa8, 0x8d, 0x3d, 0xce, 0x1f, - 0xb3, 0x9c, 0xa0, 0x3d, 0xe6, 0xe6, 0xdd, 0x91, 0x15, 0xfb, 0xae, 0x44, 0x25, 0x9b, 0xb5, 0x3c, - 0x90, 0xbd, 0xf6, 0x39, 0x77, 0xed, 0x47, 0x55, 0xef, 0x7b, 0xd2, 0xc7, 0x2a, 0x5f, 0xfc, 0x8c, - 0x02, 0xa1, 0x89, 0xdd, 0x2d, 0xce, 0xcc, 0x7a, 0x2f, 0x63, 0xef, 0x17, 0x14, 0x5e, 0xb6, 0x99, - 0x62, 0xce, 0xf3, 0xe9, 0x08, 0x3f, 0xa1, 0x58, 0xbc, 0x62, 0x28, 0xef, 0x05, 0x21, 0x6f, 0x2e, - 0x94, 0xf7, 0xa2, 0x90, 0x57, 0x0a, 0xe5, 0xbd, 0x24, 0xe4, 0xcd, 0x87, 0xf2, 0x5e, 0x16, 0xf2, - 0x16, 0x42, 0x79, 0xaf, 0x08, 0x79, 0x57, 0x43, 0x79, 0xaf, 0x0a, 0x79, 0xd7, 0x42, 0x79, 0xaf, - 0x09, 0x79, 0xd7, 0x43, 0x79, 0xaf, 0x8b, 0x78, 0xb3, 0x33, 0xa1, 0xbc, 0x37, 0x84, 0xbc, 0xf0, - 0x7c, 0x79, 0x53, 0xc8, 0x0b, 0xcf, 0x97, 0xb7, 0x84, 0xbc, 0xf0, 0x7c, 0x79, 0x5b, 0xc8, 0x0b, - 0xcf, 0x97, 0x77, 0x84, 0xbc, 0xf0, 0x7c, 0x79, 0x57, 0xc8, 0x0b, 0xcf, 0x97, 0xf7, 0x84, 0xbc, - 0xf0, 0x7c, 0xf9, 0xbe, 0x90, 0x17, 0x9e, 0x2f, 0x3f, 0x10, 0xf2, 0xc2, 0xf3, 0xe5, 0x87, 0x22, - 0x5e, 0x31, 0x3c, 0x5f, 0x7e, 0x24, 0xe4, 0x85, 0xe7, 0xcb, 0x8f, 0x85, 0xbc, 0xf0, 0x7c, 0xf9, - 0x89, 0x90, 0x17, 0x9e, 0x2f, 0x3f, 0x15, 0xf2, 0xc2, 0xf3, 0xe5, 0x67, 0x42, 0x5e, 0x78, 0xbe, - 0xfc, 0x5c, 0xc8, 0x0b, 0xcf, 0x97, 0x5f, 0x08, 0x79, 0xe1, 0xf9, 0xf2, 0x4b, 0x21, 0x2f, 0x3c, - 0x5f, 0xde, 0x17, 0xf2, 0xc2, 0xf3, 0xe5, 0x57, 0x22, 0xde, 0x5c, 0x78, 0xbe, 0xfc, 0x5a, 0xc8, - 0x0b, 0xcf, 0x97, 0xdf, 0x08, 0x79, 0xe1, 0xf9, 0xf2, 0x5b, 0x21, 0x2f, 0x3c, 0x5f, 0x7e, 0x27, - 0xe4, 0x85, 0xe7, 0xcb, 0xef, 0x85, 0xbc, 0xf0, 0x7c, 0xf9, 0x83, 0x90, 0x17, 0x9e, 0x2f, 0x7f, - 0x14, 0xf2, 0xc2, 0xf3, 0xe5, 0x4f, 0x42, 0x5e, 0x78, 0xbe, 0xfc, 0x59, 0xc8, 0x0b, 0xcf, 0x97, - 0xbf, 0x88, 0x78, 0xa5, 0xf0, 0x7c, 0xf9, 0xab, 0x90, 0x17, 0x9e, 0x2f, 0x7f, 0x13, 0xf2, 0xc2, - 0xf3, 0xe5, 0x03, 0x21, 0x2f, 0x3c, 0x5f, 0xfe, 0x2e, 0xe4, 0x85, 0xe7, 0xcb, 0x3f, 0x84, 0xbc, - 0xf0, 0x7c, 0xf9, 0xa7, 0x90, 0x17, 0x9e, 0x2f, 0x1f, 0x0a, 0x79, 0xe1, 0xf9, 0xf2, 0x91, 0x90, - 0x17, 0x9e, 0x2f, 0xff, 0x12, 0xf2, 0xc2, 0xf3, 0xe5, 0xdf, 0x22, 0xde, 0x7c, 0x78, 0xbe, 0xfc, - 0x67, 0x34, 0xef, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x8e, 0xa4, 0xcf, 0x8d, 0xf9, 0x2b, 0x00, - 0x00, + // 4407 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x5a, 0x59, 0x77, 0xdb, 0x48, + 0x76, 0x36, 0xc0, 0xfd, 0x92, 0x12, 0xa1, 0xb2, 0xda, 0x4d, 0x4b, 0x5e, 0x60, 0xce, 0x74, 0x37, + 0xbd, 0x69, 0x24, 0x10, 0xa2, 0x6d, 0xba, 0xd3, 0xe7, 0x78, 0xa1, 0x64, 0x9d, 0xb1, 0x44, 0x05, + 0x52, 0x77, 0x9f, 0xe9, 0x3c, 0xf0, 0x50, 0x22, 0x48, 0xb3, 0x4d, 0x02, 0x34, 0x09, 0xc5, 0x52, + 0xf2, 0xd2, 0x2f, 0xc9, 0x6b, 0xb6, 0x97, 0xbc, 0xe6, 0x29, 0x4f, 0x49, 0xce, 0xc9, 0x9f, 0x48, + 0xba, 0x7b, 0xd6, 0x9e, 0x35, 0xeb, 0x64, 0x5f, 0x26, 0xfb, 0x36, 0x93, 0xe4, 0xa5, 0xe7, 0xd4, + 0xad, 0x02, 0x50, 0x00, 0x09, 0x48, 0x7e, 0x12, 0x51, 0xf5, 0x7d, 0xb7, 0x6e, 0x15, 0xbe, 0xba, + 0xb7, 0x6e, 0x41, 0x00, 0x8e, 0x39, 0x71, 0x56, 0x46, 0x63, 0xdb, 0xb1, 0x49, 0x96, 0xfe, 0xee, + 0xb4, 0x9d, 0x76, 0xf9, 0x3a, 0xa4, 0x37, 0xed, 0x86, 0x75, 0x34, 0x24, 0x57, 0x21, 0xd1, 0xb5, + 0xed, 0x92, 0xa4, 0xca, 0x95, 0x79, 0x6d, 0x6e, 0xc5, 0x45, 0xac, 0x6c, 0x34, 0x9b, 0x06, 0xed, + 0x29, 0xdf, 0x81, 0xfc, 0xa6, 0xbd, 0x6f, 0x4e, 0x9c, 0x8d, 0xbe, 0x39, 0xe8, 0x90, 0x45, 0x48, + 0x3d, 0x6d, 0x1f, 0x98, 0x03, 0x64, 0xe4, 0x8c, 0xd4, 0x80, 0x3e, 0x10, 0x02, 0xc9, 0xfd, 0x93, + 0x91, 0x59, 0x92, 0xb1, 0x31, 0xe9, 0x9c, 0x8c, 0xcc, 0xf2, 0xaf, 0x5c, 0xa1, 0x83, 0x50, 0x26, + 0xb9, 0x0e, 0xc9, 0x2f, 0xf7, 0xad, 0x0e, 0x1f, 0xe5, 0x35, 0x7f, 0x14, 0xd6, 0xbf, 0xf2, 0xe5, + 0xad, 0x9d, 0xc7, 0x46, 0xf2, 0x79, 0xdf, 0x42, 0xfb, 0xfb, 0xed, 0x83, 0x01, 0x35, 0x25, 0x51, + 0xfb, 0x0e, 0x7d, 0xa0, 0xad, 0xbb, 0xed, 0x71, 0x7b, 0x58, 0x4a, 0xa8, 0x52, 0x25, 0x65, 0xa4, + 0x46, 0xf4, 0x81, 0xdc, 0x87, 0x39, 0xc3, 0x7c, 0x71, 0xd4, 0x1f, 0x9b, 0x1d, 0x74, 0xae, 0x94, + 0x54, 0xe5, 0x4a, 0x7e, 0xda, 0x3e, 0x76, 0x1a, 0x73, 0x63, 0x11, 0xcb, 0xc8, 0x23, 0xb3, 0xed, + 0xb8, 0xe4, 0x94, 0x9a, 0x88, 0x25, 0x0b, 0x58, 0x4a, 0x6e, 0x8e, 0x9c, 0xbe, 0x6d, 0xb5, 0x07, + 0x8c, 0x9c, 0x56, 0xa5, 0x18, 0xb2, 0x2d, 0x62, 0xc9, 0x9b, 0x50, 0xdc, 0x68, 0x3d, 0xb4, 0xed, + 0x41, 0xcb, 0xf5, 0xa8, 0x04, 0xaa, 0x5c, 0xc9, 0x1a, 0x73, 0x5d, 0xda, 0xea, 0x4e, 0x89, 0x54, + 0x40, 0xd9, 0x68, 0x6d, 0x59, 0x4e, 0x55, 0xf3, 0x81, 0x79, 0x55, 0xae, 0xa4, 0x8c, 0xf9, 0x2e, + 0x36, 0x4f, 0x21, 0x6b, 0xba, 0x8f, 0x2c, 0xa8, 0x72, 0x25, 0xc1, 0x90, 0x35, 0xdd, 0x43, 0xde, + 0x02, 0xb2, 0xd1, 0xda, 0xe8, 0x1f, 0x9b, 0x1d, 0xd1, 0xea, 0x9c, 0x2a, 0x57, 0x32, 0x86, 0xd2, + 0xe5, 0x1d, 0x33, 0xd0, 0xa2, 0xe5, 0x79, 0x55, 0xae, 0xa4, 0x5d, 0xb4, 0x60, 0xfb, 0x06, 0x2c, + 0x6c, 0xb4, 0xde, 0xed, 0x07, 0x1d, 0x2e, 0xaa, 0x72, 0x65, 0xce, 0x28, 0x76, 0x59, 0xfb, 0x34, + 0x56, 0x34, 0xac, 0xa8, 0x72, 0x25, 0xc9, 0xb1, 0x82, 0x5d, 0x9c, 0xdd, 0xc6, 0xc0, 0x6e, 0x3b, + 0x3e, 0x74, 0x41, 0x95, 0x2b, 0xb2, 0x31, 0xdf, 0xc5, 0xe6, 0xa0, 0xd5, 0xc7, 0xf6, 0xd1, 0xc1, + 0xc0, 0xf4, 0xa1, 0x44, 0x95, 0x2b, 0x92, 0x51, 0xec, 0xb2, 0xf6, 0x20, 0x76, 0xcf, 0x19, 0xf7, + 0xad, 0x9e, 0x8f, 0x3d, 0x8f, 0xfa, 0x2d, 0x76, 0x59, 0x7b, 0xd0, 0x83, 0x87, 0x27, 0x8e, 0x39, + 0xf1, 0xa1, 0xa6, 0x2a, 0x57, 0x0a, 0xc6, 0x7c, 0x17, 0x9b, 0x43, 0x56, 0x43, 0x6b, 0xd0, 0x55, + 0xe5, 0xca, 0x02, 0xb5, 0x3a, 0x63, 0x0d, 0xf6, 0x42, 0x6b, 0xd0, 0x53, 0xe5, 0x0a, 0xe1, 0x58, + 0x61, 0x0d, 0x44, 0xcd, 0x30, 0x21, 0x96, 0x16, 0xd5, 0x84, 0xa0, 0x19, 0xd6, 0x18, 0xd4, 0x0c, + 0x07, 0xbe, 0xa6, 0x26, 0x44, 0xcd, 0x84, 0x90, 0x38, 0x38, 0x47, 0x5e, 0x50, 0x13, 0xa2, 0x66, + 0x38, 0x32, 0xa4, 0x19, 0x8e, 0x7d, 0x5d, 0x4d, 0x04, 0x35, 0x33, 0x85, 0x16, 0x2d, 0x97, 0xd4, + 0x44, 0x50, 0x33, 0x1c, 0x1d, 0xd4, 0x0c, 0x07, 0x5f, 0x54, 0x13, 0x01, 0xcd, 0x84, 0xb1, 0xa2, + 0xe1, 0x25, 0x35, 0x11, 0xd0, 0x8c, 0x38, 0x3b, 0x57, 0x33, 0x1c, 0xba, 0xac, 0x26, 0x44, 0xcd, + 0x88, 0x56, 0x3d, 0xcd, 0x70, 0xe8, 0x25, 0x35, 0x11, 0xd0, 0x8c, 0x88, 0xf5, 0x34, 0xc3, 0xb1, + 0x97, 0xd5, 0x44, 0x40, 0x33, 0x1c, 0x7b, 0x5d, 0xd4, 0x0c, 0x87, 0x7e, 0x2c, 0xa9, 0x09, 0x51, + 0x34, 0x1c, 0x7a, 0x33, 0x20, 0x1a, 0x8e, 0xfd, 0x84, 0x62, 0x45, 0xd5, 0x84, 0xc1, 0xe2, 0x2a, + 0x7c, 0x4a, 0xc1, 0xa2, 0x6c, 0x38, 0xd8, 0x97, 0x8d, 0x1b, 0x82, 0x4a, 0x57, 0x54, 0xc9, 0x93, + 0x8d, 0x1b, 0xc3, 0x44, 0xd9, 0x78, 0xc0, 0xab, 0x18, 0x6a, 0xb9, 0x6c, 0xa6, 0x90, 0x35, 0xdd, + 0x47, 0xaa, 0xaa, 0xe4, 0xcb, 0xc6, 0x43, 0x06, 0x64, 0xe3, 0x61, 0xaf, 0xa9, 0x92, 0x28, 0x9b, + 0x19, 0x68, 0xd1, 0x72, 0x59, 0x95, 0x44, 0xd9, 0x78, 0x68, 0x51, 0x36, 0x1e, 0xf8, 0x0b, 0xaa, + 0x24, 0xc8, 0x66, 0x1a, 0x2b, 0x1a, 0xfe, 0xa2, 0x2a, 0x09, 0xb2, 0x09, 0xce, 0x8e, 0xc9, 0xc6, + 0x83, 0xbe, 0xa1, 0x4a, 0xbe, 0x6c, 0x82, 0x56, 0xb9, 0x6c, 0x3c, 0xe8, 0x9b, 0xaa, 0x24, 0xc8, + 0x26, 0x88, 0xe5, 0xb2, 0xf1, 0xb0, 0x6f, 0x61, 0x7e, 0x73, 0x65, 0xe3, 0x61, 0x05, 0xd9, 0x78, + 0xd0, 0xdf, 0xa1, 0xb9, 0xd0, 0x93, 0x8d, 0x07, 0x15, 0x65, 0xe3, 0x61, 0x7f, 0x97, 0x62, 0x7d, + 0xd9, 0x4c, 0x83, 0xc5, 0x55, 0xf8, 0x3d, 0x0a, 0xf6, 0x65, 0xe3, 0x81, 0x57, 0xd0, 0x09, 0x2a, + 0x9b, 0x8e, 0xd9, 0x6d, 0x1f, 0x0d, 0xa8, 0xc4, 0x2a, 0x54, 0x37, 0xf5, 0xa4, 0x33, 0x3e, 0x32, + 0xa9, 0x27, 0xb6, 0x3d, 0x78, 0xec, 0xf6, 0x91, 0x15, 0x6a, 0x9c, 0xc9, 0xc7, 0x27, 0x5c, 0xa7, + 0xfa, 0xa9, 0xcb, 0x55, 0xcd, 0x28, 0x32, 0x0d, 0x4d, 0xe3, 0x6b, 0xba, 0x80, 0xbf, 0x41, 0x55, + 0x54, 0x97, 0x6b, 0x3a, 0xc3, 0xd7, 0x74, 0x1f, 0x5f, 0x85, 0xf3, 0xbe, 0x94, 0x7c, 0xc6, 0x4d, + 0xaa, 0xa5, 0x7a, 0xa2, 0xaa, 0xad, 0x1a, 0x0b, 0xae, 0xa0, 0x66, 0x91, 0x02, 0xc3, 0xdc, 0xa2, + 0x92, 0xaa, 0x27, 0x6a, 0xba, 0x47, 0x12, 0x47, 0xd2, 0xa8, 0x0c, 0xb9, 0xb0, 0x7c, 0xce, 0x6d, + 0xaa, 0xac, 0x7a, 0xb2, 0xaa, 0xad, 0xae, 0x1a, 0x0a, 0xd7, 0xd7, 0x0c, 0x4e, 0x60, 0x9c, 0x15, + 0xaa, 0xb0, 0x7a, 0xb2, 0xa6, 0x7b, 0x9c, 0xe0, 0x38, 0x0b, 0xae, 0xd0, 0x7c, 0xca, 0x97, 0xa8, + 0xd2, 0xea, 0xe9, 0xea, 0x9a, 0xbe, 0xb6, 0x7e, 0xcf, 0x28, 0x32, 0xc5, 0xf9, 0x1c, 0x9d, 0x8e, + 0xc3, 0x25, 0xe7, 0x93, 0x56, 0xa9, 0xe6, 0xea, 0x69, 0xed, 0xce, 0xda, 0x5d, 0xed, 0xae, 0xa1, + 0x70, 0xed, 0xf9, 0xac, 0x77, 0x28, 0x8b, 0x8b, 0xcf, 0x67, 0xad, 0x51, 0xf5, 0xd5, 0x95, 0x67, + 0xe6, 0x60, 0x60, 0xdf, 0x52, 0xcb, 0x2f, 0xed, 0xf1, 0xa0, 0x73, 0xad, 0x0c, 0x86, 0xc2, 0xf5, + 0x28, 0x8e, 0xba, 0xe0, 0x0a, 0xd2, 0xa7, 0xff, 0x1a, 0x3d, 0x87, 0x15, 0xea, 0x99, 0x87, 0xfd, + 0x9e, 0x65, 0x4f, 0x4c, 0xa3, 0xc8, 0xa4, 0x19, 0x5a, 0x93, 0xbd, 0xf0, 0x3a, 0xfe, 0x3a, 0xa5, + 0x2d, 0xd4, 0x13, 0xb7, 0xab, 0x1a, 0x1d, 0x69, 0xd6, 0x3a, 0xee, 0x85, 0xd7, 0xf1, 0x37, 0x28, + 0x87, 0xd4, 0x13, 0xb7, 0x6b, 0x3a, 0xe7, 0x88, 0xeb, 0x78, 0x07, 0x2e, 0x84, 0xf2, 0x62, 0x6b, + 0xd4, 0x3e, 0x7c, 0x6e, 0x76, 0x4a, 0x1a, 0x4d, 0x8f, 0x0f, 0x65, 0x45, 0x32, 0xce, 0x07, 0x52, + 0xe4, 0x2e, 0x76, 0x93, 0x7b, 0xf0, 0x7a, 0x38, 0x51, 0xba, 0xcc, 0x2a, 0xcd, 0x97, 0xc8, 0x5c, + 0x0c, 0xe6, 0xcc, 0x10, 0x55, 0x08, 0xc0, 0x2e, 0x55, 0xa7, 0x09, 0xd4, 0xa7, 0xfa, 0x91, 0x98, + 0x53, 0x7f, 0x06, 0x2e, 0x4e, 0xa7, 0x52, 0x97, 0xbc, 0x4e, 0x33, 0x2a, 0x92, 0x2f, 0x84, 0xb3, + 0xea, 0x14, 0x7d, 0xc6, 0xd8, 0x35, 0x9a, 0x62, 0x45, 0xfa, 0xd4, 0xe8, 0xf7, 0xa1, 0x34, 0x95, + 0x6c, 0x5d, 0xf6, 0x1d, 0x9a, 0x73, 0x91, 0xfd, 0x5a, 0x28, 0xef, 0x86, 0xc9, 0x33, 0x86, 0xbe, + 0x4b, 0x93, 0xb0, 0x40, 0x9e, 0x1a, 0x19, 0x97, 0x2c, 0x98, 0x8e, 0x5d, 0xee, 0x3d, 0x9a, 0x95, + 0xf9, 0x92, 0x05, 0x32, 0xb3, 0x38, 0x6e, 0x28, 0x3f, 0xbb, 0xdc, 0x3a, 0x4d, 0xd3, 0x7c, 0xdc, + 0x60, 0xaa, 0xe6, 0xe4, 0xb7, 0x29, 0x79, 0x6f, 0xf6, 0x8c, 0x7f, 0x9c, 0xa0, 0x09, 0x96, 0xb3, + 0xf7, 0x66, 0x4d, 0xd9, 0x63, 0xcf, 0x98, 0xf2, 0x4f, 0x28, 0x9b, 0x08, 0xec, 0xa9, 0x39, 0x3f, + 0x06, 0xaf, 0xe2, 0xe8, 0x8d, 0xed, 0xa3, 0x51, 0x69, 0x43, 0x95, 0x2b, 0xa0, 0x5d, 0x99, 0xaa, + 0x7e, 0xdc, 0x43, 0xde, 0x26, 0x45, 0x19, 0x41, 0x12, 0xb3, 0xc2, 0xec, 0x32, 0x2b, 0xbb, 0x6a, + 0x22, 0xc2, 0x0a, 0x43, 0x79, 0x56, 0x04, 0x12, 0xb5, 0xe2, 0x06, 0x7d, 0x66, 0xe5, 0x03, 0x55, + 0x9a, 0x69, 0xc5, 0x4d, 0x01, 0xdc, 0x4a, 0x80, 0xb4, 0xb4, 0xee, 0xd7, 0x5b, 0xd8, 0x4f, 0xbe, + 0x18, 0x2e, 0xc0, 0x36, 0xf1, 0xfc, 0x1c, 0xac, 0xb4, 0x18, 0x4d, 0x70, 0x6e, 0x9a, 0xf6, 0xb3, + 0x11, 0xb4, 0x80, 0x37, 0xd3, 0xb4, 0x9f, 0x9b, 0x41, 0x2b, 0xff, 0xa6, 0x04, 0x49, 0x5a, 0x4f, + 0x92, 0x2c, 0x24, 0xdf, 0x6b, 0x6e, 0x3d, 0x56, 0xce, 0xd1, 0x5f, 0x0f, 0x9b, 0xcd, 0xa7, 0x8a, + 0x44, 0x72, 0x90, 0x7a, 0xf8, 0x95, 0xfd, 0xc6, 0x9e, 0x22, 0x93, 0x22, 0xe4, 0x37, 0xb6, 0x76, + 0x36, 0x1b, 0xc6, 0xae, 0xb1, 0xb5, 0xb3, 0xaf, 0x24, 0x68, 0xdf, 0xc6, 0xd3, 0xe6, 0x83, 0x7d, + 0x25, 0x49, 0x32, 0x90, 0xa0, 0x6d, 0x29, 0x02, 0x90, 0xde, 0xdb, 0x37, 0xb6, 0x76, 0x36, 0x95, + 0x34, 0xb5, 0xb2, 0xbf, 0xb5, 0xdd, 0x50, 0x32, 0x14, 0xb9, 0xff, 0xee, 0xee, 0xd3, 0x86, 0x92, + 0xa5, 0x3f, 0x1f, 0x18, 0xc6, 0x83, 0xaf, 0x28, 0x39, 0x4a, 0xda, 0x7e, 0xb0, 0xab, 0x00, 0x76, + 0x3f, 0x78, 0xf8, 0xb4, 0xa1, 0xe4, 0x49, 0x01, 0xb2, 0x1b, 0xef, 0xee, 0x3c, 0xda, 0xdf, 0x6a, + 0xee, 0x28, 0x85, 0xf2, 0x6f, 0xc9, 0x00, 0x9b, 0xf6, 0xde, 0xf3, 0xfe, 0x08, 0xab, 0xe2, 0xcb, + 0x00, 0x93, 0xe7, 0xfd, 0x51, 0x0b, 0xa5, 0xc7, 0x2b, 0xbb, 0x1c, 0x6d, 0xc1, 0xa0, 0x43, 0xae, + 0x41, 0x01, 0xbb, 0xbb, 0x2c, 0x14, 0x60, 0x41, 0x97, 0x31, 0xf2, 0xb4, 0x8d, 0x47, 0x87, 0x20, + 0xa4, 0xa6, 0x63, 0x1d, 0x97, 0x16, 0x20, 0x35, 0x9d, 0x5c, 0x05, 0x7c, 0x6c, 0x4d, 0x30, 0xac, + 0x63, 0xed, 0x96, 0x33, 0x70, 0x5c, 0x16, 0xe8, 0xc9, 0xdb, 0x80, 0x63, 0x32, 0x59, 0x14, 0xa7, + 0x25, 0xea, 0xba, 0xbb, 0x42, 0x7f, 0x30, 0x59, 0xf8, 0x84, 0xa5, 0x26, 0xe4, 0xbc, 0x76, 0x3a, + 0x16, 0xb6, 0xf2, 0x19, 0x29, 0x38, 0x23, 0xc0, 0x26, 0x6f, 0x4a, 0x0c, 0xc0, 0xbd, 0x59, 0x40, + 0x6f, 0x18, 0x89, 0xb9, 0x53, 0xbe, 0x0c, 0x73, 0x3b, 0xb6, 0xc5, 0xb6, 0x10, 0xae, 0x52, 0x01, + 0xa4, 0x76, 0x49, 0xc2, 0x12, 0x46, 0x6a, 0x97, 0xaf, 0x00, 0x08, 0x7d, 0x0a, 0x48, 0x07, 0xac, + 0x0f, 0x37, 0xa2, 0x74, 0x50, 0xbe, 0x09, 0xe9, 0xed, 0xf6, 0xf1, 0x7e, 0xbb, 0x47, 0xae, 0x01, + 0x0c, 0xda, 0x13, 0xa7, 0xd5, 0x45, 0xa9, 0x7c, 0xfe, 0xf9, 0xe7, 0x9f, 0x4b, 0x78, 0xe2, 0xca, + 0xd1, 0x56, 0x26, 0x95, 0x17, 0x00, 0xcd, 0x41, 0x67, 0xdb, 0x9c, 0x4c, 0xda, 0x3d, 0x93, 0x54, + 0x21, 0x6d, 0x99, 0x13, 0x9a, 0x72, 0x24, 0x2c, 0xe6, 0x97, 0xfd, 0x55, 0xf0, 0x51, 0x2b, 0x3b, + 0x08, 0x31, 0x38, 0x94, 0x28, 0x90, 0xb0, 0x8e, 0x86, 0x78, 0x59, 0x91, 0x32, 0xe8, 0xcf, 0xa5, + 0x4b, 0x90, 0x66, 0x18, 0x42, 0x20, 0x69, 0xb5, 0x87, 0x66, 0x89, 0x8d, 0x8b, 0xbf, 0xcb, 0xbf, + 0x2a, 0x01, 0xec, 0x98, 0x2f, 0xcf, 0x30, 0xa6, 0x8f, 0x8a, 0x19, 0x33, 0xc1, 0xc6, 0xbc, 0x1f, + 0x37, 0x26, 0xd5, 0x59, 0xd7, 0xb6, 0x3b, 0x2d, 0xf6, 0x8a, 0xd9, 0xbd, 0x4a, 0x8e, 0xb6, 0xe0, + 0x5b, 0x2b, 0x7f, 0x00, 0x85, 0x2d, 0xcb, 0x32, 0xc7, 0xae, 0x4f, 0x04, 0x92, 0xcf, 0xec, 0x89, + 0xc3, 0x2f, 0x78, 0xf0, 0x37, 0x29, 0x41, 0x72, 0x64, 0x8f, 0x1d, 0x36, 0xcf, 0x7a, 0x52, 0x5f, + 0x5d, 0x5d, 0x35, 0xb0, 0x85, 0x5c, 0x82, 0xdc, 0xa1, 0x6d, 0x59, 0xe6, 0x21, 0x9d, 0x44, 0x02, + 0x6b, 0x0b, 0xbf, 0xa1, 0xfc, 0xcb, 0x12, 0x14, 0x9a, 0xce, 0x33, 0xdf, 0xb8, 0x02, 0x89, 0xe7, + 0xe6, 0x09, 0xba, 0x97, 0x30, 0xe8, 0x4f, 0xb2, 0x08, 0xa9, 0x9f, 0x6f, 0x0f, 0x8e, 0xd8, 0x85, + 0x4f, 0xc1, 0x60, 0x0f, 0xe4, 0x02, 0xa4, 0x5f, 0x9a, 0xfd, 0xde, 0x33, 0x07, 0x6d, 0xca, 0x06, + 0x7f, 0x22, 0xb7, 0x20, 0xd5, 0xa7, 0xce, 0x96, 0x92, 0xb8, 0x5e, 0x17, 0xfc, 0xf5, 0x12, 0xe7, + 0x60, 0x30, 0xd0, 0x8d, 0x6c, 0xb6, 0xa3, 0x7c, 0xf4, 0xd1, 0x47, 0x1f, 0xc9, 0xe5, 0x2e, 0x2c, + 0xba, 0xb1, 0x23, 0x30, 0xd9, 0x1d, 0x28, 0x0d, 0x4c, 0xbb, 0xd5, 0xed, 0x5b, 0xed, 0xc1, 0xe0, + 0xa4, 0xf5, 0xd2, 0xb6, 0x5a, 0x6d, 0xab, 0x65, 0x4f, 0x0e, 0xdb, 0x63, 0x5c, 0x80, 0xe8, 0x21, + 0x16, 0x07, 0xa6, 0xbd, 0xc1, 0x68, 0xef, 0xdb, 0xd6, 0x03, 0xab, 0x49, 0x39, 0xe5, 0x3f, 0x48, + 0x42, 0x6e, 0xfb, 0xc4, 0xb5, 0xbe, 0x08, 0xa9, 0x43, 0xfb, 0xc8, 0x62, 0x6b, 0x99, 0x32, 0xd8, + 0x83, 0xf7, 0x8e, 0x64, 0xe1, 0x1d, 0x2d, 0x42, 0xea, 0xc5, 0x91, 0xed, 0x98, 0x38, 0xdd, 0x9c, + 0xc1, 0x1e, 0xe8, 0x6a, 0x8d, 0x4c, 0xa7, 0x94, 0xc4, 0x0a, 0x93, 0xfe, 0xf4, 0xe7, 0x9f, 0x3a, + 0xc3, 0xfc, 0xc9, 0x0a, 0xa4, 0x6d, 0xba, 0xfa, 0x93, 0x52, 0x1a, 0x2f, 0xb7, 0x04, 0xb8, 0xf8, + 0x56, 0x0c, 0x8e, 0x22, 0x5b, 0xb0, 0xf0, 0xd2, 0x6c, 0x0d, 0x8f, 0x26, 0x4e, 0xab, 0x67, 0xb7, + 0x3a, 0xa6, 0x39, 0x32, 0xc7, 0xa5, 0x39, 0x1c, 0x49, 0x88, 0x09, 0xb3, 0x16, 0xd2, 0x98, 0x7f, + 0x69, 0x6e, 0x1f, 0x4d, 0x9c, 0x4d, 0xfb, 0x31, 0xb2, 0x48, 0x15, 0x72, 0x63, 0x93, 0x46, 0x02, + 0xea, 0x6c, 0x21, 0x3c, 0x7a, 0x80, 0x9a, 0x1d, 0x9b, 0x23, 0x6c, 0x20, 0xeb, 0x90, 0x3d, 0xe8, + 0x3f, 0x37, 0x27, 0xcf, 0xcc, 0x4e, 0x29, 0xa3, 0x4a, 0x95, 0x79, 0xed, 0xa2, 0xcf, 0xf1, 0x96, + 0x75, 0xe5, 0x91, 0x3d, 0xb0, 0xc7, 0x86, 0x07, 0x25, 0xf7, 0x21, 0x37, 0xb1, 0x87, 0x26, 0xd3, + 0x77, 0x16, 0x33, 0xdb, 0xe5, 0x59, 0xbc, 0x3d, 0x7b, 0x68, 0xba, 0x11, 0xcc, 0xc5, 0x93, 0x65, + 0xe6, 0xe8, 0x01, 0x3d, 0xbf, 0x96, 0x00, 0xeb, 0x73, 0xea, 0x10, 0x9e, 0x67, 0xc9, 0x12, 0x75, + 0xa8, 0xd7, 0xa5, 0xc7, 0x92, 0x52, 0x1e, 0x8b, 0x3b, 0xef, 0x79, 0xe9, 0x16, 0xe4, 0x3c, 0x83, + 0x7e, 0xe8, 0x63, 0xe1, 0x26, 0x87, 0xf1, 0x80, 0x85, 0x3e, 0x16, 0x6b, 0xde, 0x80, 0x14, 0xba, + 0x4d, 0xd3, 0x84, 0xd1, 0xa0, 0x59, 0x29, 0x07, 0xa9, 0x4d, 0xa3, 0xd1, 0xd8, 0x51, 0x24, 0x4c, + 0x50, 0x4f, 0xdf, 0x6d, 0x28, 0xb2, 0xa0, 0xd8, 0xdf, 0x96, 0x20, 0xd1, 0x38, 0x46, 0xb5, 0xd0, + 0x69, 0xb8, 0x3b, 0x9a, 0xfe, 0xd6, 0x6a, 0x90, 0x1c, 0xda, 0x63, 0x93, 0x9c, 0x9f, 0x31, 0xcb, + 0x52, 0x0f, 0xdf, 0x97, 0x70, 0x95, 0xdb, 0x38, 0x76, 0x0c, 0xc4, 0x6b, 0x6f, 0x41, 0xd2, 0x31, + 0x8f, 0x9d, 0xd9, 0xbc, 0x67, 0x6c, 0x00, 0x0a, 0xd0, 0x6e, 0x42, 0xda, 0x3a, 0x1a, 0x1e, 0x98, + 0xe3, 0xd9, 0xd0, 0x3e, 0x4e, 0x8f, 0x43, 0xca, 0xef, 0x81, 0xf2, 0xc8, 0x1e, 0x8e, 0x06, 0xe6, + 0x71, 0xe3, 0xd8, 0x31, 0xad, 0x49, 0xdf, 0xb6, 0xa8, 0x9e, 0xbb, 0xfd, 0x31, 0x46, 0x11, 0xbc, + 0xb0, 0xc5, 0x07, 0xba, 0xab, 0x27, 0xe6, 0xa1, 0x6d, 0x75, 0x78, 0xc0, 0xe4, 0x4f, 0x14, 0xed, + 0x3c, 0xeb, 0x8f, 0x69, 0x00, 0xa1, 0x71, 0x9e, 0x3d, 0x94, 0x37, 0xa1, 0xc8, 0x0f, 0xfa, 0x13, + 0x3e, 0x70, 0xf9, 0x06, 0x14, 0xdc, 0x26, 0xbc, 0xbd, 0xce, 0x42, 0xf2, 0x83, 0x86, 0xd1, 0x54, + 0xce, 0xd1, 0x65, 0x6d, 0xee, 0x34, 0x14, 0x89, 0xfe, 0xd8, 0x7f, 0xbf, 0x19, 0x58, 0xca, 0x4b, + 0x50, 0xf0, 0x7c, 0xdf, 0x33, 0x1d, 0xec, 0xa1, 0x09, 0x21, 0x53, 0x97, 0xb3, 0x52, 0x39, 0x03, + 0xa9, 0xc6, 0x70, 0xe4, 0x9c, 0x94, 0x7f, 0x11, 0xf2, 0x1c, 0xf4, 0xb4, 0x3f, 0x71, 0xc8, 0x1d, + 0xc8, 0x0c, 0xf9, 0x7c, 0x25, 0x3c, 0x73, 0x89, 0x9a, 0xf2, 0x71, 0xee, 0x6f, 0xc3, 0x45, 0x2f, + 0x55, 0x21, 0x23, 0xc4, 0x52, 0xbe, 0xd5, 0x65, 0x71, 0xab, 0xb3, 0xa0, 0x90, 0x10, 0x82, 0x42, + 0x79, 0x1b, 0x32, 0x2c, 0x03, 0x4e, 0x30, 0xab, 0xb3, 0x7a, 0x8d, 0x89, 0x89, 0xbd, 0xf9, 0x3c, + 0x6b, 0x63, 0x57, 0xc8, 0x57, 0x21, 0x8f, 0x82, 0xe5, 0x08, 0x16, 0x3a, 0x01, 0x9b, 0x98, 0xdc, + 0x7e, 0x3f, 0x05, 0x59, 0x77, 0xa5, 0xc8, 0x32, 0xa4, 0x59, 0x91, 0x84, 0xa6, 0xdc, 0x22, 0x3e, + 0x85, 0x65, 0x11, 0x59, 0x86, 0x0c, 0x2f, 0x84, 0x78, 0x74, 0xa7, 0x15, 0x7b, 0x9a, 0x15, 0x3e, + 0x5e, 0x67, 0x4d, 0xc7, 0xc0, 0xc4, 0xca, 0xf3, 0x34, 0x2b, 0x6d, 0x88, 0x0a, 0x39, 0xaf, 0x98, + 0xc1, 0x78, 0xcc, 0x6b, 0xf1, 0xac, 0x5b, 0xbd, 0x08, 0x88, 0x9a, 0x8e, 0x11, 0x8b, 0x17, 0xde, + 0xd9, 0xae, 0x7f, 0x3c, 0xc9, 0xba, 0x25, 0x09, 0xde, 0xa1, 0xbb, 0x55, 0x76, 0x86, 0x17, 0x21, + 0x3e, 0xa0, 0xa6, 0x63, 0x48, 0x70, 0x4b, 0xea, 0x0c, 0x2f, 0x34, 0xc8, 0x55, 0xea, 0x22, 0x16, + 0x0e, 0xb8, 0xf5, 0xfd, 0xfa, 0x39, 0xcd, 0xca, 0x09, 0x72, 0x8d, 0x5a, 0x60, 0xd5, 0x01, 0xee, + 0x4b, 0xbf, 0x58, 0xce, 0xf0, 0xa2, 0x81, 0xdc, 0xa4, 0x10, 0xb6, 0xfc, 0x25, 0x88, 0xa8, 0x8c, + 0x33, 0xbc, 0x32, 0x26, 0x2a, 0x1d, 0x10, 0xc3, 0x03, 0x86, 0x04, 0xa1, 0x0a, 0x4e, 0xb3, 0x2a, + 0x98, 0x5c, 0x41, 0x73, 0x6c, 0x52, 0x05, 0xbf, 0xe2, 0xcd, 0xf0, 0x2a, 0xc3, 0xef, 0xc7, 0x23, + 0x9b, 0x57, 0xdd, 0x66, 0x78, 0x1d, 0x41, 0x6a, 0xf4, 0x7d, 0x51, 0x7d, 0x97, 0xe6, 0x31, 0x08, + 0x96, 0x7c, 0xe1, 0xb9, 0xef, 0x94, 0xc5, 0xc0, 0x3a, 0x8b, 0x20, 0x46, 0xaa, 0x8b, 0xbb, 0x61, + 0x89, 0xf2, 0x76, 0xfb, 0x56, 0xb7, 0x54, 0xc4, 0x95, 0x48, 0xf4, 0xad, 0xae, 0x91, 0xea, 0xd2, + 0x16, 0xa6, 0x81, 0x1d, 0xda, 0xa7, 0x60, 0x5f, 0xf2, 0x36, 0xeb, 0xa4, 0x4d, 0xa4, 0x04, 0xa9, + 0x8d, 0xd6, 0x4e, 0xdb, 0x2a, 0x2d, 0x30, 0x9e, 0xd5, 0xb6, 0x8c, 0x64, 0x77, 0xa7, 0x6d, 0x91, + 0xb7, 0x20, 0x31, 0x39, 0x3a, 0x28, 0x91, 0xf0, 0xe7, 0x8d, 0xbd, 0xa3, 0x03, 0xd7, 0x15, 0x83, + 0x22, 0xc8, 0x32, 0x64, 0x27, 0xce, 0xb8, 0xf5, 0x0b, 0xe6, 0xd8, 0x2e, 0x9d, 0xc7, 0x25, 0x3c, + 0x67, 0x64, 0x26, 0xce, 0xf8, 0x03, 0x73, 0x6c, 0x9f, 0x31, 0xf8, 0x95, 0xaf, 0x40, 0x5e, 0xb0, + 0x4b, 0x8a, 0x20, 0x59, 0xec, 0xa4, 0x50, 0x97, 0xee, 0x18, 0x92, 0x55, 0xde, 0x87, 0x82, 0x5b, + 0x48, 0xe0, 0x7c, 0x35, 0xba, 0x93, 0x06, 0xf6, 0x18, 0xf7, 0xe7, 0xbc, 0x76, 0x49, 0x4c, 0x51, + 0x3e, 0x8c, 0xa7, 0x0b, 0x06, 0x2d, 0x2b, 0x21, 0x57, 0xa4, 0xf2, 0x0f, 0x25, 0x28, 0x6c, 0xdb, + 0x63, 0xff, 0x96, 0x77, 0x11, 0x52, 0x07, 0xb6, 0x3d, 0x98, 0xa0, 0xd9, 0xac, 0xc1, 0x1e, 0xc8, + 0x1b, 0x50, 0xc0, 0x1f, 0x6e, 0x01, 0x28, 0x7b, 0xf7, 0x0b, 0x79, 0x6c, 0xe7, 0x55, 0x1f, 0x81, + 0x64, 0xdf, 0x72, 0x26, 0x3c, 0x92, 0xe1, 0x6f, 0xf2, 0x05, 0xc8, 0xd3, 0xbf, 0x2e, 0x33, 0xe9, + 0x1d, 0x58, 0x81, 0x36, 0x73, 0xe2, 0x5b, 0x30, 0x87, 0x6f, 0xdf, 0x83, 0x65, 0xbc, 0xbb, 0x84, + 0x02, 0xeb, 0xe0, 0xc0, 0x12, 0x64, 0x58, 0x28, 0x98, 0xe0, 0x27, 0xab, 0x9c, 0xe1, 0x3e, 0xd2, + 0xf0, 0x8a, 0x95, 0x00, 0x4b, 0xf7, 0x19, 0x83, 0x3f, 0x95, 0x1f, 0x40, 0x16, 0xb3, 0x54, 0x73, + 0xd0, 0x21, 0x65, 0x90, 0x7a, 0x25, 0x13, 0x73, 0xe4, 0xa2, 0x70, 0xcc, 0xe7, 0xdd, 0x2b, 0x9b, + 0x86, 0xd4, 0x5b, 0x5a, 0x00, 0x69, 0x93, 0x9e, 0xbb, 0x8f, 0x79, 0x98, 0x96, 0x8e, 0xcb, 0x4d, + 0x6e, 0x62, 0xc7, 0x7c, 0x19, 0x67, 0x62, 0xc7, 0x7c, 0xc9, 0x4c, 0x5c, 0x9d, 0x32, 0x41, 0x9f, + 0x4e, 0xf8, 0xf7, 0x3b, 0xe9, 0x84, 0x9e, 0xf3, 0x71, 0x7b, 0xf6, 0xad, 0xde, 0xae, 0xdd, 0xb7, + 0xf0, 0x9c, 0xdf, 0xc5, 0x73, 0x92, 0x64, 0x48, 0xdd, 0xf2, 0x67, 0x49, 0x98, 0xe7, 0x41, 0xf4, + 0xfd, 0xbe, 0xf3, 0x6c, 0xbb, 0x3d, 0x22, 0x4f, 0xa1, 0x40, 0xe3, 0x67, 0x6b, 0xd8, 0x1e, 0x8d, + 0xe8, 0x46, 0x95, 0xf0, 0x50, 0x71, 0x7d, 0x2a, 0x28, 0x73, 0xfc, 0xca, 0x4e, 0x7b, 0x68, 0x6e, + 0x33, 0x6c, 0xc3, 0x72, 0xc6, 0x27, 0x46, 0xde, 0xf2, 0x5b, 0xc8, 0x16, 0xe4, 0x87, 0x93, 0x9e, + 0x67, 0x4c, 0x46, 0x63, 0x95, 0x48, 0x63, 0xdb, 0x93, 0x5e, 0xc0, 0x16, 0x0c, 0xbd, 0x06, 0xea, + 0x18, 0x8d, 0xbc, 0x9e, 0xad, 0xc4, 0x29, 0x8e, 0xd1, 0x20, 0x11, 0x74, 0xec, 0xc0, 0x6f, 0x21, + 0x8f, 0x01, 0xe8, 0x46, 0x72, 0x6c, 0x5a, 0x24, 0xa1, 0x56, 0xf2, 0xda, 0x9b, 0x91, 0xb6, 0xf6, + 0x9c, 0xf1, 0xbe, 0xbd, 0xe7, 0x8c, 0x99, 0x21, 0xba, 0x05, 0xf1, 0x71, 0xe9, 0x1d, 0x50, 0xc2, + 0xf3, 0x17, 0xcf, 0xde, 0xa9, 0x19, 0x67, 0xef, 0x1c, 0x3f, 0x7b, 0xd7, 0xe5, 0xbb, 0xd2, 0xd2, + 0x7b, 0x50, 0x0c, 0x4d, 0x59, 0xa4, 0x13, 0x46, 0xbf, 0x2d, 0xd2, 0xf3, 0xda, 0xeb, 0xc2, 0xd7, + 0x63, 0xf1, 0xd5, 0x8a, 0x76, 0xdf, 0x01, 0x25, 0x3c, 0x7d, 0xd1, 0x70, 0x36, 0xa6, 0x26, 0x40, + 0xfe, 0x7d, 0x98, 0x0b, 0x4c, 0x59, 0x24, 0xe7, 0x4e, 0x99, 0x54, 0xf9, 0x97, 0x52, 0x90, 0x6a, + 0x5a, 0xa6, 0xdd, 0x25, 0xaf, 0x07, 0x33, 0xe2, 0x93, 0x73, 0x6e, 0x36, 0xbc, 0x18, 0xca, 0x86, + 0x4f, 0xce, 0x79, 0xb9, 0xf0, 0x62, 0x28, 0x17, 0xba, 0x5d, 0x35, 0x9d, 0x5c, 0x9e, 0xca, 0x84, + 0x4f, 0xce, 0x09, 0x69, 0xf0, 0xf2, 0x54, 0x1a, 0xf4, 0xbb, 0x6b, 0x3a, 0x0d, 0x9d, 0xc1, 0x1c, + 0xf8, 0xe4, 0x9c, 0x9f, 0xff, 0x96, 0xc3, 0xf9, 0xcf, 0xeb, 0xac, 0xe9, 0xcc, 0x25, 0x21, 0xf7, + 0xa1, 0x4b, 0x2c, 0xeb, 0x2d, 0x87, 0xb3, 0x1e, 0xf2, 0x78, 0xbe, 0x5b, 0x0e, 0xe7, 0x3b, 0xec, + 0xe4, 0xf9, 0xed, 0x62, 0x28, 0xbf, 0xa1, 0x51, 0x96, 0xd8, 0x96, 0xc3, 0x89, 0x8d, 0xf1, 0x04, + 0x4f, 0xc5, 0xac, 0xe6, 0x75, 0xd6, 0x74, 0xa2, 0x85, 0x52, 0x5a, 0xf4, 0xb9, 0x1e, 0xdf, 0x05, + 0x86, 0x77, 0x9d, 0x2e, 0x9b, 0x7b, 0xe4, 0x2c, 0xc6, 0x7c, 0x60, 0xc7, 0xd5, 0x74, 0x8f, 0x5c, + 0x1a, 0x64, 0xba, 0xbc, 0xd4, 0x55, 0x30, 0x46, 0x09, 0xb2, 0xc4, 0x97, 0xbf, 0xb2, 0xd1, 0xc2, + 0x58, 0x85, 0xf3, 0x62, 0xa7, 0xf7, 0x0a, 0xcc, 0x6d, 0xb4, 0x9e, 0xb6, 0xc7, 0x3d, 0x73, 0xe2, + 0xb4, 0xf6, 0xdb, 0x3d, 0xef, 0xba, 0x80, 0xbe, 0xff, 0x7c, 0x97, 0xf7, 0xec, 0xb7, 0x7b, 0xe4, + 0x82, 0x2b, 0xae, 0x0e, 0xf6, 0x4a, 0x5c, 0x5e, 0x4b, 0xaf, 0xd3, 0x45, 0x63, 0xc6, 0x30, 0xea, + 0x2d, 0xf0, 0xa8, 0xf7, 0x30, 0x03, 0xa9, 0x23, 0xab, 0x6f, 0x5b, 0x0f, 0x73, 0x90, 0x71, 0xec, + 0xf1, 0xb0, 0xed, 0xd8, 0xe5, 0x1f, 0x49, 0x00, 0x8f, 0xec, 0xe1, 0xf0, 0xc8, 0xea, 0xbf, 0x38, + 0x32, 0xc9, 0x15, 0xc8, 0x0f, 0xdb, 0xcf, 0xcd, 0xd6, 0xd0, 0x6c, 0x1d, 0x8e, 0xdd, 0x7d, 0x90, + 0xa3, 0x4d, 0xdb, 0xe6, 0xa3, 0xf1, 0x09, 0x29, 0xb9, 0x87, 0x71, 0xd4, 0x0e, 0x4a, 0x92, 0x1f, + 0xce, 0x17, 0xf9, 0xf1, 0x32, 0xcd, 0xdf, 0xa1, 0x7b, 0xc0, 0x64, 0x15, 0x43, 0x86, 0xbf, 0x3d, + 0x7c, 0xa2, 0x92, 0x77, 0xcc, 0xe1, 0xa8, 0x75, 0x88, 0x52, 0xa1, 0x72, 0x48, 0xd1, 0xe7, 0x47, + 0xe4, 0x36, 0x24, 0x0e, 0xed, 0x01, 0x8a, 0xe4, 0x94, 0xf7, 0x42, 0x71, 0xe4, 0x0d, 0x48, 0x0c, + 0x27, 0x4c, 0x36, 0x79, 0x6d, 0x41, 0x38, 0x11, 0xb0, 0x24, 0x44, 0x61, 0xc3, 0x49, 0xcf, 0x9b, + 0xf7, 0x8d, 0x22, 0x24, 0x36, 0x9a, 0x4d, 0x9a, 0xe5, 0x37, 0x9a, 0xcd, 0x35, 0x45, 0xaa, 0x7f, + 0x09, 0xb2, 0xbd, 0xb1, 0x69, 0xd2, 0xf0, 0x30, 0xbb, 0xba, 0xf8, 0x10, 0xb3, 0x9a, 0x07, 0xaa, + 0x6f, 0x43, 0xe6, 0x90, 0xd5, 0x17, 0x24, 0xa2, 0x80, 0x2d, 0xfd, 0x21, 0xbb, 0x3e, 0x59, 0xf2, + 0xbb, 0xc3, 0x15, 0x89, 0xe1, 0xda, 0xa8, 0xef, 0x42, 0x6e, 0xdc, 0x3a, 0xcd, 0xe0, 0xc7, 0x2c, + 0xbb, 0xc4, 0x19, 0xcc, 0x8e, 0x79, 0x53, 0xbd, 0x01, 0x0b, 0x96, 0xed, 0x7e, 0xb2, 0x68, 0x75, + 0xd8, 0x1e, 0xbb, 0x38, 0x7d, 0x68, 0x73, 0x8d, 0x9b, 0xec, 0x33, 0xa1, 0x65, 0xf3, 0x0e, 0xb6, + 0x2b, 0xeb, 0x8f, 0x40, 0x11, 0xcc, 0x60, 0x91, 0x19, 0x67, 0xa5, 0xcb, 0xbe, 0x4b, 0x7a, 0x56, + 0x70, 0xdf, 0x87, 0x8c, 0xb0, 0x9d, 0x19, 0x63, 0xa4, 0xc7, 0x3e, 0xf2, 0x7a, 0x46, 0x30, 0xd4, + 0x4d, 0x1b, 0xa1, 0xb1, 0x26, 0xda, 0xc8, 0x33, 0xf6, 0xfd, 0x57, 0x34, 0x52, 0xd3, 0x43, 0xab, + 0x72, 0x74, 0xaa, 0x2b, 0x7d, 0xf6, 0xf9, 0xd6, 0xb3, 0xc2, 0x02, 0xe0, 0x0c, 0x33, 0xf1, 0xce, + 0x7c, 0xc8, 0xbe, 0xec, 0x06, 0xcc, 0x4c, 0x79, 0x33, 0x39, 0xd5, 0x9b, 0xe7, 0xec, 0x33, 0xaa, + 0x67, 0x66, 0x6f, 0x96, 0x37, 0x93, 0x53, 0xbd, 0x19, 0xb0, 0x0f, 0xac, 0x01, 0x33, 0x35, 0xbd, + 0xbe, 0x09, 0x44, 0x7c, 0xd5, 0x3c, 0x4f, 0xc4, 0xd8, 0x19, 0xb2, 0xcf, 0xe6, 0xfe, 0xcb, 0x66, + 0x94, 0x59, 0x86, 0xe2, 0x1d, 0xb2, 0xd8, 0x17, 0xf5, 0xa0, 0xa1, 0x9a, 0x5e, 0xdf, 0x82, 0xf3, + 0xe2, 0xc4, 0xce, 0xe0, 0x92, 0xad, 0x4a, 0x95, 0xa2, 0xb1, 0xe0, 0x4f, 0x8d, 0x73, 0x66, 0x9a, + 0x8a, 0x77, 0x6a, 0xa4, 0x4a, 0x15, 0x65, 0xca, 0x54, 0x4d, 0xaf, 0x3f, 0x80, 0xa2, 0x60, 0xea, + 0x00, 0x33, 0x74, 0xb4, 0x99, 0x17, 0xec, 0x5f, 0x1b, 0x3c, 0x33, 0x34, 0xa3, 0x87, 0xdf, 0x18, + 0xcf, 0x71, 0xd1, 0x46, 0xc6, 0xec, 0xbb, 0xbc, 0xef, 0x0b, 0x32, 0x42, 0x5b, 0x02, 0x2b, 0xed, + 0x38, 0x2b, 0x13, 0xf6, 0xc5, 0xde, 0x77, 0x85, 0x12, 0xea, 0xfd, 0xc0, 0x74, 0x4c, 0x9a, 0xe4, + 0x62, 0x6c, 0x38, 0x18, 0x91, 0xdf, 0x8c, 0x04, 0xac, 0x88, 0x57, 0x21, 0xc2, 0xb4, 0xe9, 0x63, + 0x7d, 0x0b, 0xe6, 0xcf, 0x1e, 0x90, 0x3e, 0x96, 0x58, 0x5d, 0x5c, 0x5d, 0xa1, 0xa5, 0xb3, 0x31, + 0xd7, 0x09, 0xc4, 0xa5, 0x06, 0xcc, 0x9d, 0x39, 0x28, 0x7d, 0x22, 0xb1, 0xea, 0x92, 0x5a, 0x32, + 0x0a, 0x9d, 0x60, 0x64, 0x9a, 0x3b, 0x73, 0x58, 0xfa, 0x54, 0x62, 0x57, 0x11, 0xba, 0xe6, 0x19, + 0x71, 0x23, 0xd3, 0xdc, 0x99, 0xc3, 0xd2, 0x57, 0x59, 0xed, 0x28, 0xeb, 0x55, 0xd1, 0x08, 0xc6, + 0x82, 0xf9, 0xb3, 0x87, 0xa5, 0xaf, 0x49, 0x78, 0x2d, 0x21, 0xeb, 0xba, 0xb7, 0x2e, 0x5e, 0x64, + 0x9a, 0x3f, 0x7b, 0x58, 0xfa, 0xba, 0x84, 0x97, 0x17, 0xb2, 0xbe, 0x1e, 0x30, 0x13, 0xf4, 0xe6, + 0xf4, 0xb0, 0xf4, 0x0d, 0x09, 0xef, 0x13, 0x64, 0xbd, 0xe6, 0x99, 0xd9, 0x9b, 0xf2, 0xe6, 0xf4, + 0xb0, 0xf4, 0x4d, 0x3c, 0xc5, 0xd7, 0x65, 0xfd, 0x4e, 0xc0, 0x0c, 0x46, 0xa6, 0xe2, 0x2b, 0x84, + 0xa5, 0x6f, 0x49, 0x78, 0xed, 0x23, 0xeb, 0x77, 0x0d, 0x77, 0x74, 0x3f, 0x32, 0x15, 0x5f, 0x21, + 0x2c, 0x7d, 0x26, 0xe1, 0xed, 0x90, 0xac, 0xdf, 0x0b, 0x1a, 0xc2, 0xc8, 0xa4, 0xbc, 0x4a, 0x58, + 0xfa, 0x36, 0xb5, 0x54, 0xac, 0xcb, 0xeb, 0xab, 0x86, 0xeb, 0x80, 0x10, 0x99, 0x94, 0x57, 0x09, + 0x4b, 0xdf, 0xa1, 0xa6, 0x94, 0xba, 0xbc, 0xbe, 0x16, 0x32, 0x55, 0xd3, 0xeb, 0x8f, 0xa0, 0x70, + 0xd6, 0xb0, 0xf4, 0x5d, 0xf1, 0xd6, 0x2d, 0xdf, 0x11, 0x62, 0xd3, 0xae, 0xf0, 0xce, 0x4e, 0x0d, + 0x4c, 0xdf, 0xc3, 0x1a, 0xa7, 0x3e, 0xf7, 0x84, 0xdd, 0x4c, 0x31, 0x82, 0xff, 0xfa, 0x58, 0x98, + 0xda, 0xf6, 0xf7, 0xc7, 0xa9, 0x31, 0xea, 0xfb, 0x12, 0x5e, 0x5f, 0x15, 0xb8, 0x41, 0xc4, 0x7b, + 0x3b, 0x85, 0x05, 0xac, 0x0f, 0xfd, 0x59, 0x9e, 0x16, 0xad, 0x7e, 0x20, 0xbd, 0x4a, 0xb8, 0xaa, + 0x27, 0x9a, 0x3b, 0x0d, 0x6f, 0x31, 0xb0, 0xe5, 0x6d, 0x48, 0x1e, 0x6b, 0xab, 0x6b, 0xe2, 0x91, + 0x4c, 0xbc, 0xb5, 0x65, 0x41, 0x2a, 0xaf, 0x15, 0x85, 0x8b, 0xed, 0xe1, 0xc8, 0x39, 0x31, 0x90, + 0xc5, 0xd9, 0x5a, 0x24, 0xfb, 0x93, 0x18, 0xb6, 0xc6, 0xd9, 0xd5, 0x48, 0xf6, 0xa7, 0x31, 0xec, + 0x2a, 0x67, 0xeb, 0x91, 0xec, 0xaf, 0xc6, 0xb0, 0x75, 0xce, 0x5e, 0x8f, 0x64, 0x7f, 0x2d, 0x86, + 0xbd, 0xce, 0xd9, 0xb5, 0x48, 0xf6, 0xd7, 0x63, 0xd8, 0x35, 0xce, 0xbe, 0x13, 0xc9, 0xfe, 0x46, + 0x0c, 0xfb, 0x0e, 0x67, 0xdf, 0x8d, 0x64, 0x7f, 0x33, 0x86, 0x7d, 0x97, 0xb3, 0xef, 0x45, 0xb2, + 0xbf, 0x15, 0xc3, 0xbe, 0xc7, 0xd8, 0x6b, 0xab, 0x91, 0xec, 0xcf, 0xa2, 0xd9, 0x6b, 0xab, 0x9c, + 0x1d, 0xad, 0xb5, 0x6f, 0xc7, 0xb0, 0xb9, 0xd6, 0xd6, 0xa2, 0xb5, 0xf6, 0x9d, 0x18, 0x36, 0xd7, + 0xda, 0x5a, 0xb4, 0xd6, 0xbe, 0x1b, 0xc3, 0xe6, 0x5a, 0x5b, 0x8b, 0xd6, 0xda, 0xf7, 0x62, 0xd8, + 0x5c, 0x6b, 0x6b, 0xd1, 0x5a, 0xfb, 0x7e, 0x0c, 0x9b, 0x6b, 0x6d, 0x2d, 0x5a, 0x6b, 0x3f, 0x88, + 0x61, 0x73, 0xad, 0xad, 0x45, 0x6b, 0xed, 0x8f, 0x62, 0xd8, 0x5c, 0x6b, 0x6b, 0xd1, 0x5a, 0xfb, + 0xe3, 0x18, 0x36, 0xd7, 0xda, 0x5a, 0xb4, 0xd6, 0xfe, 0x24, 0x86, 0xcd, 0xb5, 0xa6, 0x45, 0x6b, + 0xed, 0x4f, 0xa3, 0xd9, 0x1a, 0xd7, 0x9a, 0x16, 0xad, 0xb5, 0x3f, 0x8b, 0x61, 0x73, 0xad, 0x69, + 0xd1, 0x5a, 0xfb, 0xf3, 0x18, 0x36, 0xd7, 0x9a, 0x16, 0xad, 0xb5, 0x1f, 0xc6, 0xb0, 0xb9, 0xd6, + 0xb4, 0x68, 0xad, 0xfd, 0x45, 0x0c, 0x9b, 0x6b, 0x4d, 0x8b, 0xd6, 0xda, 0x5f, 0xc6, 0xb0, 0xb9, + 0xd6, 0xb4, 0x68, 0xad, 0xfd, 0x55, 0x0c, 0x9b, 0x6b, 0x4d, 0x8b, 0xd6, 0xda, 0x5f, 0xc7, 0xb0, + 0xb9, 0xd6, 0xb4, 0x68, 0xad, 0xfd, 0x4d, 0x0c, 0x9b, 0x6b, 0x4d, 0x8b, 0xd6, 0xda, 0xdf, 0xc6, + 0xb0, 0xb9, 0xd6, 0xaa, 0xd1, 0x5a, 0xfb, 0xbb, 0x68, 0x76, 0x95, 0x6b, 0xad, 0x1a, 0xad, 0xb5, + 0xbf, 0x8f, 0x61, 0x73, 0xad, 0x55, 0xa3, 0xb5, 0xf6, 0x0f, 0x31, 0x6c, 0xae, 0xb5, 0x6a, 0xb4, + 0xd6, 0xfe, 0x31, 0x86, 0xcd, 0xb5, 0x56, 0x8d, 0xd6, 0xda, 0x8f, 0x62, 0xd8, 0x5c, 0x6b, 0xd5, + 0x68, 0xad, 0xfd, 0x53, 0x0c, 0x9b, 0x6b, 0xad, 0x1a, 0xad, 0xb5, 0x7f, 0x8e, 0x61, 0x73, 0xad, + 0x55, 0xa3, 0xb5, 0xf6, 0x2f, 0x31, 0x6c, 0xae, 0xb5, 0x6a, 0xb4, 0xd6, 0xfe, 0x35, 0x86, 0xcd, + 0xb5, 0x56, 0x8d, 0xd6, 0xda, 0xbf, 0xc5, 0xb0, 0xb9, 0xd6, 0xf4, 0x68, 0xad, 0xfd, 0x7b, 0x34, + 0x5b, 0xe7, 0x5a, 0xd3, 0xa3, 0xb5, 0xf6, 0x1f, 0x31, 0x6c, 0xae, 0x35, 0x3d, 0x5a, 0x6b, 0xff, + 0x19, 0xc3, 0xe6, 0x5a, 0xd3, 0xa3, 0xb5, 0xf6, 0x5f, 0x31, 0x6c, 0xae, 0x35, 0x3d, 0x5a, 0x6b, + 0xff, 0x1d, 0xc3, 0xe6, 0x5a, 0xd3, 0xa3, 0xb5, 0xf6, 0x3f, 0x31, 0x6c, 0xae, 0x35, 0x3d, 0x5a, + 0x6b, 0x3f, 0x8e, 0x61, 0x73, 0xad, 0xe9, 0xd1, 0x5a, 0xfb, 0x49, 0x0c, 0x9b, 0x6b, 0x4d, 0x8f, + 0xd6, 0xda, 0xff, 0xc6, 0xb0, 0xb9, 0xd6, 0xf4, 0x68, 0xad, 0xfd, 0x5f, 0x0c, 0x9b, 0x6b, 0x6d, + 0x3d, 0x5a, 0x6b, 0xff, 0x1f, 0xcd, 0x5e, 0x5f, 0xfd, 0x69, 0x00, 0x00, 0x00, 0xff, 0xff, 0x81, + 0x23, 0xc6, 0xe6, 0xc6, 0x38, 0x00, 0x00, } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/testdata/test.proto aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/testdata/test.proto --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/testdata/test.proto 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/testdata/test.proto 2016-05-24 07:05:22.000000000 +0000 @@ -237,6 +237,10 @@ extensions 100 to max; } +message RequiredInnerMessage { + required InnerMessage leo_finally_won_an_oscar = 1; +} + message MyMessage { required int32 count = 1; optional string name = 2; @@ -244,6 +248,7 @@ repeated string pet = 4; optional InnerMessage inner = 5; repeated OtherMessage others = 6; + optional RequiredInnerMessage we_must_go_deeper = 13; repeated InnerMessage rep_inner = 12; enum Color { diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/text.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/text.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/text.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/text.go 2016-05-24 07:05:22.000000000 +0000 @@ -175,7 +175,93 @@ Bytes() []byte } -func writeStruct(w *textWriter, sv reflect.Value) error { +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } st := sv.Type() sprops := GetProperties(st) for i := 0; i < sv.NumField(); i++ { @@ -227,7 +313,7 @@ } continue } - if err := writeAny(w, v, props); err != nil { + if err := tm.writeAny(w, v, props); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -269,7 +355,7 @@ return err } } - if err := writeAny(w, key, props.mkeyprop); err != nil { + if err := tm.writeAny(w, key, props.mkeyprop); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -286,7 +372,7 @@ return err } } - if err := writeAny(w, val, props.mvalprop); err != nil { + if err := tm.writeAny(w, val, props.mvalprop); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -358,7 +444,7 @@ } // Enums have a String method, so writeAny will work fine. - if err := writeAny(w, fv, props); err != nil { + if err := tm.writeAny(w, fv, props); err != nil { return err } @@ -370,7 +456,7 @@ // Extensions (the XXX_extensions field). pv := sv.Addr() if pv.Type().Implements(extendableProtoType) { - if err := writeExtensions(w, pv); err != nil { + if err := tm.writeExtensions(w, pv); err != nil { return err } } @@ -400,7 +486,7 @@ } // writeAny writes an arbitrary field. -func writeAny(w *textWriter, v reflect.Value, props *Properties) error { +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) // Floats have special cases. @@ -449,15 +535,15 @@ } } w.indent() - if tm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := tm.MarshalText() + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() if err != nil { return err } if _, err = w.Write(text); err != nil { return err } - } else if err := writeStruct(w, v); err != nil { + } else if err := tm.writeStruct(w, v); err != nil { return err } w.unindent() @@ -601,7 +687,7 @@ // writeExtensions writes all the extensions in pv. // pv is assumed to be a pointer to a protocol message struct that is extendable. -func writeExtensions(w *textWriter, pv reflect.Value) error { +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { emap := extensionMaps[pv.Type().Elem()] ep := pv.Interface().(extendableProto) @@ -636,13 +722,13 @@ // Repeated extensions will appear as a slice. if !desc.repeated() { - if err := writeExtension(w, desc.Name, pb); err != nil { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { return err } } else { v := reflect.ValueOf(pb) for i := 0; i < v.Len(); i++ { - if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { return err } } @@ -651,7 +737,7 @@ return nil } -func writeExtension(w *textWriter, name string, pb interface{}) error { +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { return err } @@ -660,7 +746,7 @@ return err } } - if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil { + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -685,7 +771,15 @@ w.complete = false } -func marshalText(w io.Writer, pb Message, compact bool) error { +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { val := reflect.ValueOf(pb) if pb == nil || val.IsNil() { w.Write([]byte("")) @@ -700,11 +794,11 @@ aw := &textWriter{ w: ww, complete: true, - compact: compact, + compact: tm.Compact, } - if tm, ok := pb.(encoding.TextMarshaler); ok { - text, err := tm.MarshalText() + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() if err != nil { return err } @@ -718,7 +812,7 @@ } // Dereference the received pointer so we don't have outer < and >. v := reflect.Indirect(val) - if err := writeStruct(aw, v); err != nil { + if err := tm.writeStruct(aw, v); err != nil { return err } if bw != nil { @@ -727,25 +821,29 @@ return nil } +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + // MarshalText writes a given protocol buffer in text format. // The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { - return marshalText(w, pb, false) -} +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } // MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { - var buf bytes.Buffer - marshalText(&buf, pb, false) - return buf.String() -} +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } // CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) } +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } // CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { - var buf bytes.Buffer - marshalText(&buf, pb, true) - return buf.String() -} +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/text_parser.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/text_parser.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/text_parser.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/text_parser.go 2016-05-24 07:05:22.000000000 +0000 @@ -163,7 +163,7 @@ p.cur.offset, p.cur.line = p.offset, p.line p.cur.unquoted = "" switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',': + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': // Single symbol p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] case '"', '\'': @@ -451,7 +451,10 @@ fieldSet := make(map[string]bool) // A struct is a sequence of "name: value", terminated by one of // '>' or '}', or the end of the input. A name may also be - // "[extension]". + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > for { tok := p.next() if tok.err != nil { @@ -461,33 +464,66 @@ break } if tok.value == "[" { - // Looks like an extension. + // Looks like an extension or an Any. // // TODO: Check whether we need to handle // namespace rooted names (e.g. ".something.Foo"). - tok = p.next() - if tok.err != nil { - return tok.err + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + continue } + var desc *ExtensionDesc // This could be faster, but it's functional. // TODO: Do something smarter than a linear scan. for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == tok.value { + if d.Name == extName { desc = d break } } if desc == nil { - return p.errorf("unrecognized extension %q", tok.value) - } - // Check the extension terminator. - tok = p.next() - if tok.err != nil { - return tok.err - } - if tok.value != "]" { - return p.errorf("unrecognized extension terminator %q", tok.value) + return p.errorf("unrecognized extension %q", extName) } props := &Properties{} @@ -627,7 +663,8 @@ return err } reqFieldErr = err - } else if props.Required { + } + if props.Required { reqCount-- } @@ -643,6 +680,35 @@ return reqFieldErr } +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + } + return strings.Join(parts, ""), nil +} + // consumeOptionalSeparator consumes an optional semicolon or comma. // It is used in readStruct to provide backward compatibility. func (p *textParser) consumeOptionalSeparator() error { diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/text_parser_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/text_parser_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/text_parser_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/proto/text_parser_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -336,6 +336,16 @@ }, }, + // Missing required field in a required submessage + { + in: `count: 42 we_must_go_deeper < leo_finally_won_an_oscar <> >`, + err: `proto: required field "testdata.InnerMessage.host" not set`, + out: &MyMessage{ + Count: Int32(42), + WeMustGoDeeper: &RequiredInnerMessage{LeoFinallyWonAnOscar: &InnerMessage{}}, + }, + }, + // Repeated non-repeated field { in: `name: "Rob" name: "Russ"`, diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go 2016-05-24 07:05:22.000000000 +0000 @@ -3,7 +3,7 @@ // DO NOT EDIT! /* -Package google_protobuf is a generated protocol buffer package. +Package descriptor is a generated protocol buffer package. It is generated from these files: google/protobuf/descriptor.proto @@ -249,6 +249,46 @@ } func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{11, 0} } +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{11, 1} } + // The protocol compiler can output a FileDescriptorSet containing the .proto // files it parses. type FileDescriptorSet struct { @@ -275,13 +315,13 @@ // Names of files imported by this file. Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` // Indexes of the public imported files in the dependency list above. - PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency" json:"public_dependency,omitempty"` + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` // Indexes of the weak imported files in the dependency list. // For Google-internal migration only. Do not use. - WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency" json:"weak_dependency,omitempty"` + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` // All top-level definitions in this file. - MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type" json:"message_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type" json:"enum_type,omitempty"` + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` @@ -289,7 +329,7 @@ // You may safely remove this entire field without harming runtime // functionality of the descriptors -- the information is needed only by // development tools. - SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info" json:"source_code_info,omitempty"` + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` // The syntax of the proto file. // The supported values are "proto2" and "proto3". Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` @@ -387,15 +427,19 @@ // Describes a message type. type DescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` - NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type" json:"nested_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type" json:"enum_type,omitempty"` - ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range" json:"extension_range,omitempty"` - OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl" json:"oneof_decl,omitempty"` - Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } @@ -459,6 +503,20 @@ return nil } +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + type DescriptorProto_ExtensionRange struct { Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` @@ -486,6 +544,36 @@ return 0 } +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{2, 1} +} + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + // Describes a field within a message. type FieldDescriptorProto struct { Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` @@ -499,7 +587,7 @@ // rules are used to find the type (i.e. first the nested types within this // message are searched, then within the parent, on up to the root // namespace). - TypeName *string `protobuf:"bytes,6,opt,name=type_name" json:"type_name,omitempty"` + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` // For extensions, this is the name of the type being extended. It is // resolved in the same manner as type_name. Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` @@ -508,17 +596,15 @@ // For strings, contains the default text contents (not escaped in any way). // For bytes, contains the C escaped value. All bytes >= 128 are escaped. // TODO(kenton): Base-64 encode? - DefaultValue *string `protobuf:"bytes,7,opt,name=default_value" json:"default_value,omitempty"` + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` // If set, gives the index of a oneof in the containing type's oneof_decl - // list. This field is a member of that oneof. Extensions of a oneof should - // not set this since the oneof to which they belong will be inferred based - // on the extension range containing the extension's field number. - OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index" json:"oneof_index,omitempty"` + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` // JSON name of this field. The value is set by protocol compiler. If the // user has set a "json_name" option on this field, that option's value // will be used. Otherwise, it's deduced from the field's name by converting // it to camelCase. - JsonName *string `protobuf:"bytes,10,opt,name=json_name" json:"json_name,omitempty"` + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -723,13 +809,13 @@ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // Input and output type names. These are resolved in the same way as // FieldDescriptorProto.type_name, but must refer to a message type. - InputType *string `protobuf:"bytes,2,opt,name=input_type" json:"input_type,omitempty"` - OutputType *string `protobuf:"bytes,3,opt,name=output_type" json:"output_type,omitempty"` + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` // Identifies if client streams multiple client messages - ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,def=0" json:"client_streaming,omitempty"` + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` // Identifies if server streams multiple server messages - ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,def=0" json:"server_streaming,omitempty"` + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -788,23 +874,25 @@ // placed. By default, the proto package is used, but this is often // inappropriate because proto packages do not normally start with backwards // domain names. - JavaPackage *string `protobuf:"bytes,1,opt,name=java_package" json:"java_package,omitempty"` + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` // If set, all the classes from the .proto file are wrapped in a single // outer class with the given name. This applies to both Proto1 // (equivalent to the old "--one_java_file" option) and Proto2 (where // a .proto always translates to a single class, but you may want to // explicitly choose the class name). - JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname" json:"java_outer_classname,omitempty"` + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` // If set true, then the Java code generator will generate a separate .java // file for each top-level message, enum, and service defined in the .proto // file. Thus, these types will *not* be nested inside the outer class // named by java_outer_classname. However, the outer class will still be // generated to contain the file's getDescriptor() method as well as any // top-level extensions defined in the file. - JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,def=0" json:"java_multiple_files,omitempty"` + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` // If set true, then the Java code generator will generate equals() and // hashCode() methods for all messages defined in the .proto file. - // - In the full runtime, this is purely a speed optimization, as the + // This increases generated code size, potentially substantially for large + // protos, which may harm a memory-constrained application. + // - In the full runtime this is a speed optimization, as the // AbstractMessage base class includes reflection-based implementations of // these methods. // - In the lite runtime, setting this option changes the semantics of @@ -812,21 +900,21 @@ // the generated methods compute their results based on field values rather // than object identity. (Implementations should not assume that hashcodes // will be consistent across runtimes or versions of the protocol compiler.) - JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,def=0" json:"java_generate_equals_and_hash,omitempty"` + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash,def=0" json:"java_generate_equals_and_hash,omitempty"` // If set true, then the Java2 code generator will generate code that // throws an exception whenever an attempt is made to assign a non-UTF-8 // byte sequence to a string field. // Message reflection will do the same. // However, an extension field still accepts non-UTF-8 byte sequences. // This option has no effect on when used with the lite runtime. - JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,def=0" json:"java_string_check_utf8,omitempty"` - OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` // Sets the Go package where structs generated from this .proto will be // placed. If omitted, the Go package will be derived from the following: // - The basename of the package import path, if provided. // - Otherwise, the package statement in the .proto file, if present. // - Otherwise, the basename of the .proto file, without extension. - GoPackage *string `protobuf:"bytes,11,opt,name=go_package" json:"go_package,omitempty"` + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` // Should generic services be generated in each language? "Generic" services // are not specific to any particular RPC system. They are generated by the // main code generators in each language (without additional plugins). @@ -837,9 +925,9 @@ // that generate code specific to your particular RPC system. Therefore, // these default to false. Old code which depends on generic services should // explicitly set them to true. - CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,def=0" json:"cc_generic_services,omitempty"` - JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,def=0" json:"java_generic_services,omitempty"` - PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,def=0" json:"py_generic_services,omitempty"` + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` // Is this file deprecated? // Depending on the target platform, this can emit Deprecated annotations // for everything in the file, or it will be completely ignored; in the very @@ -847,9 +935,17 @@ Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // Enables the use of arenas for the proto messages in this file. This applies // only to generated classes for C++. - CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,def=0" json:"cc_enable_arenas,omitempty"` + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // Whether the nano proto compiler should generate in the deprecated non-nano + // suffixed package. + JavananoUseDeprecatedPackage *bool `protobuf:"varint,38,opt,name=javanano_use_deprecated_package,json=javananoUseDeprecatedPackage" json:"javanano_use_deprecated_package,omitempty"` // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` XXX_extensions map[int32]proto.Extension `json:"-"` XXX_unrecognized []byte `json:"-"` } @@ -967,6 +1063,27 @@ return Default_FileOptions_CcEnableArenas } +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetJavananoUseDeprecatedPackage() bool { + if m != nil && m.JavananoUseDeprecatedPackage != nil { + return *m.JavananoUseDeprecatedPackage + } + return false +} + func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { if m != nil { return m.UninterpretedOption @@ -993,11 +1110,11 @@ // // Because this is an option, the above two restrictions are not enforced by // the protocol compiler. - MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,def=0" json:"message_set_wire_format,omitempty"` + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` // Disables the generation of the standard "descriptor()" accessor, which can // conflict with a field of the same name. This is meant to make migration // from proto1 easier; new code should avoid fields named "descriptor". - NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` // Is this message deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the message, or it will be completely ignored; in the very least, @@ -1024,9 +1141,9 @@ // NOTE: Do not set the option in .proto files. Always use the maps syntax // instead. The option should only be implicitly set by the proto compiler // parser. - MapEntry *bool `protobuf:"varint,7,opt,name=map_entry" json:"map_entry,omitempty"` + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` XXX_extensions map[int32]proto.Extension `json:"-"` XXX_unrecognized []byte `json:"-"` } @@ -1098,8 +1215,19 @@ // The packed option can be enabled for repeated primitive fields to enable // a more efficient representation on the wire. Rather than repeatedly // writing the tag and type for each element, the entire array is encoded as - // a single length-delimited blob. + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). By default these types are + // represented as JavaScript strings. This avoids loss of precision that can + // happen when a large value is converted to a floating point JavaScript + // numbers. Specifying JS_NUMBER for the jstype causes the generated + // JavaScript code to use the JavaScript "number" type instead of strings. + // This option is an enum to permit additional types to be added, + // e.g. goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` // Should this field be parsed lazily? Lazy applies only to message-type // fields. It means that when the outer message is initially parsed, the // inner message's contents will not be parsed but instead stored in encoded @@ -1137,7 +1265,7 @@ // For Google-internal migration only. Do not use. Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` XXX_extensions map[int32]proto.Extension `json:"-"` XXX_unrecognized []byte `json:"-"` } @@ -1162,6 +1290,7 @@ } const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL const Default_FieldOptions_Lazy bool = false const Default_FieldOptions_Deprecated bool = false const Default_FieldOptions_Weak bool = false @@ -1180,6 +1309,13 @@ return false } +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + func (m *FieldOptions) GetLazy() bool { if m != nil && m.Lazy != nil { return *m.Lazy @@ -1211,14 +1347,14 @@ type EnumOptions struct { // Set this option to true to allow mapping different tag names to the same // value. - AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias" json:"allow_alias,omitempty"` + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` // Is this enum deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the enum, or it will be completely ignored; in the very least, this // is a formalization for deprecating enums. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` XXX_extensions map[int32]proto.Extension `json:"-"` XXX_unrecognized []byte `json:"-"` } @@ -1272,7 +1408,7 @@ // this is a formalization for deprecating enum values. Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` XXX_extensions map[int32]proto.Extension `json:"-"` XXX_unrecognized []byte `json:"-"` } @@ -1319,7 +1455,7 @@ // this is a formalization for deprecating services. Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` XXX_extensions map[int32]proto.Extension `json:"-"` XXX_unrecognized []byte `json:"-"` } @@ -1366,7 +1502,7 @@ // this is a formalization for deprecating methods. Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` XXX_extensions map[int32]proto.Extension `json:"-"` XXX_unrecognized []byte `json:"-"` } @@ -1416,12 +1552,12 @@ Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` // The value of the uninterpreted option, in whatever type the tokenizer // identified it as during parsing. Exactly one of these should be set. - IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value" json:"identifier_value,omitempty"` - PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value" json:"positive_int_value,omitempty"` - NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value" json:"negative_int_value,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value" json:"double_value,omitempty"` - StringValue []byte `protobuf:"bytes,7,opt,name=string_value" json:"string_value,omitempty"` - AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value" json:"aggregate_value,omitempty"` + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -1485,8 +1621,8 @@ // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents // "foo.(bar.baz).qux". type UninterpretedOption_NamePart struct { - NamePart *string `protobuf:"bytes,1,req,name=name_part" json:"name_part,omitempty"` - IsExtension *bool `protobuf:"varint,2,req,name=is_extension" json:"is_extension,omitempty"` + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -1611,6 +1747,11 @@ // A series of line comments appearing on consecutive lines, with no other // tokens appearing on those lines, will be treated as a single comment. // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // // Only the comment content is provided; comment markers (e.g. //) are // stripped out. For block comments, leading whitespace and an asterisk // will be stripped from the beginning of each line other than the first. @@ -1631,6 +1772,12 @@ // // Another line attached to qux. // optional double qux = 4; // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // // optional string corge = 5; // /* Block comment attached // * to corge. Leading asterisks @@ -1638,9 +1785,12 @@ // /* Block comment attached to // * grault. */ // optional int32 grault = 6; - LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments" json:"leading_comments,omitempty"` - TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments" json:"trailing_comments,omitempty"` - XXX_unrecognized []byte `json:"-"` + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } @@ -1676,11 +1826,19 @@ return "" } +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + func init() { proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") @@ -1702,111 +1860,147 @@ proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) } var fileDescriptor0 = []byte{ - // 1635 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x58, 0xcd, 0x72, 0xdb, 0xd4, - 0x17, 0xff, 0xfb, 0x43, 0xfe, 0x38, 0x76, 0x1c, 0x45, 0x49, 0x5b, 0x35, 0xff, 0x96, 0xb4, 0xa6, - 0x2d, 0x69, 0x69, 0x1d, 0x26, 0x2d, 0xa5, 0x84, 0x55, 0x3e, 0xd4, 0xd4, 0x33, 0x4e, 0x6c, 0x12, - 0x87, 0xa1, 0x6c, 0x34, 0x37, 0xf2, 0xb5, 0xa3, 0x56, 0x96, 0x8c, 0x24, 0xa7, 0x4d, 0x19, 0x66, - 0x78, 0x00, 0x98, 0xe1, 0x09, 0x18, 0x5e, 0x81, 0x0d, 0x2b, 0x36, 0xec, 0x79, 0x03, 0xb6, 0x0c, - 0xf0, 0x18, 0x9c, 0x7b, 0xaf, 0x25, 0x4b, 0x8a, 0x9d, 0xb6, 0x4c, 0x4b, 0x57, 0xee, 0xb9, 0xbf, - 0x73, 0xce, 0xef, 0x9e, 0xcf, 0xab, 0xc0, 0x95, 0x9e, 0xe3, 0xf4, 0x2c, 0xba, 0x32, 0x70, 0x1d, - 0xdf, 0x39, 0x1c, 0x76, 0x57, 0x3a, 0xd4, 0x33, 0x5c, 0x73, 0xe0, 0x3b, 0x6e, 0x8d, 0xcb, 0x94, - 0x59, 0x81, 0xa8, 0x05, 0x88, 0xea, 0x36, 0xcc, 0x3d, 0x34, 0x2d, 0xba, 0x15, 0x02, 0xf7, 0xa9, - 0xaf, 0xac, 0x42, 0xb6, 0x8b, 0x42, 0x35, 0x75, 0x25, 0xb3, 0x5c, 0x5a, 0xbd, 0x56, 0x4b, 0x28, - 0xd5, 0xe2, 0x1a, 0x2d, 0x26, 0xae, 0xfe, 0x9e, 0x81, 0xf9, 0x09, 0x72, 0xa5, 0x0c, 0x59, 0x9b, - 0xf4, 0x99, 0xad, 0xd4, 0x72, 0x51, 0x99, 0x85, 0xfc, 0x80, 0x18, 0x4f, 0x49, 0x8f, 0xaa, 0x69, - 0x2e, 0x50, 0x00, 0x3a, 0x74, 0x40, 0xed, 0x0e, 0xb5, 0x8d, 0x13, 0x35, 0x83, 0x0e, 0x8b, 0xca, - 0x45, 0x98, 0x1b, 0x0c, 0x0f, 0x2d, 0xd3, 0xd0, 0x23, 0x47, 0x80, 0x47, 0x92, 0x72, 0x01, 0x66, - 0x9f, 0x51, 0xf2, 0x34, 0x7a, 0x50, 0xe2, 0x07, 0xf7, 0xa1, 0xdc, 0xa7, 0x9e, 0x87, 0x86, 0x75, - 0xff, 0x64, 0x40, 0xd5, 0x2c, 0xa7, 0x7e, 0xe5, 0x14, 0xf5, 0x24, 0xbd, 0x8f, 0xa0, 0x48, 0xed, - 0x61, 0x5f, 0x28, 0x49, 0x53, 0xee, 0xab, 0x21, 0x22, 0xa9, 0xf8, 0x00, 0xf2, 0x1e, 0x75, 0x8f, - 0x4d, 0x83, 0xaa, 0x39, 0xae, 0xf6, 0xde, 0x29, 0xb5, 0x7d, 0x71, 0x7e, 0x5a, 0xb3, 0x48, 0x9f, - 0xfb, 0xd4, 0xf6, 0x4c, 0xc7, 0x56, 0xf3, 0x5c, 0xf7, 0xfa, 0x84, 0x10, 0x53, 0xab, 0x93, 0xd4, - 0xbc, 0x03, 0x79, 0x67, 0xe0, 0xa3, 0x9a, 0xa7, 0x16, 0x30, 0x7a, 0xa5, 0xd5, 0x4b, 0x13, 0x53, - 0xd3, 0x14, 0x18, 0xe5, 0x63, 0x90, 0x3d, 0x67, 0xe8, 0x1a, 0x54, 0x37, 0x9c, 0x0e, 0xd5, 0x4d, - 0xbb, 0xeb, 0xa8, 0x45, 0xae, 0xb7, 0x74, 0x9a, 0x2b, 0x07, 0x6e, 0x22, 0xae, 0x8e, 0x30, 0xa5, - 0x02, 0x39, 0xef, 0xc4, 0xf6, 0xc9, 0x73, 0xb5, 0xcc, 0xd2, 0x54, 0xfd, 0x23, 0x03, 0xb3, 0x67, - 0x67, 0xf6, 0x1e, 0x48, 0x5d, 0xc6, 0x19, 0xf3, 0xfa, 0x1a, 0x37, 0x8a, 0xc5, 0x22, 0xf7, 0x3a, - 0x9a, 0x1f, 0x42, 0xc9, 0xa6, 0x9e, 0x4f, 0x3b, 0x22, 0x75, 0x99, 0x7f, 0x93, 0xef, 0xec, 0x6b, - 0xe4, 0xfb, 0x11, 0xcc, 0x86, 0x4c, 0x75, 0x97, 0xd8, 0xbd, 0xa0, 0x5c, 0x56, 0x5e, 0xe6, 0xb3, - 0xa6, 0x05, 0x7a, 0x7b, 0x4c, 0x0d, 0xd3, 0x02, 0x8e, 0x4d, 0x9d, 0x2e, 0x16, 0xb1, 0x61, 0x61, - 0x22, 0x27, 0x5f, 0xba, 0xc9, 0x20, 0x49, 0x12, 0x1f, 0x8c, 0x0b, 0x20, 0x3f, 0x25, 0x91, 0x3b, - 0xa2, 0x0b, 0x46, 0x35, 0xb0, 0x78, 0x1b, 0x2a, 0x09, 0xf7, 0x33, 0x20, 0x79, 0x3e, 0x71, 0x7d, - 0x9e, 0x37, 0x49, 0x29, 0x41, 0x06, 0x3b, 0x89, 0x77, 0xa3, 0x54, 0xfd, 0x45, 0x82, 0x85, 0x89, - 0xd1, 0x8e, 0xe7, 0x1a, 0xab, 0x03, 0x23, 0x74, 0x48, 0x5d, 0x0c, 0x3b, 0xb3, 0xb1, 0x06, 0x92, - 0x45, 0x0e, 0xa9, 0x85, 0x01, 0x4d, 0x2d, 0x57, 0x56, 0xdf, 0x7f, 0xa5, 0x0c, 0xd6, 0x1a, 0x4c, - 0x05, 0x2b, 0x20, 0x3b, 0xea, 0x3d, 0xa6, 0x7a, 0xeb, 0xd5, 0x54, 0xdb, 0xa8, 0xa1, 0xcc, 0x41, - 0x91, 0x69, 0xea, 0x9c, 0x58, 0x8e, 0x13, 0x93, 0xa1, 0xc0, 0x93, 0xd4, 0xa1, 0xc1, 0x7c, 0x39, - 0x07, 0x33, 0x1d, 0xda, 0x25, 0x43, 0xcb, 0xd7, 0x8f, 0x89, 0x35, 0xa4, 0x3c, 0x6e, 0x45, 0x65, - 0x1e, 0x4a, 0x22, 0x07, 0x26, 0x62, 0x9f, 0xf3, 0xae, 0x90, 0x98, 0xc1, 0x27, 0x1e, 0x66, 0x97, - 0x1b, 0x04, 0x8e, 0xab, 0x25, 0x3b, 0xee, 0xf2, 0x64, 0x82, 0xa3, 0x70, 0x57, 0x7f, 0x4e, 0x43, - 0x96, 0x93, 0x9b, 0x85, 0x52, 0xfb, 0x71, 0x4b, 0xd3, 0xb7, 0x9a, 0x07, 0x1b, 0x0d, 0x4d, 0x4e, - 0x61, 0xcc, 0x80, 0x0b, 0x1e, 0x36, 0x9a, 0xeb, 0x6d, 0x39, 0x1d, 0xfe, 0xbf, 0xbe, 0xdb, 0xbe, - 0x7f, 0x4f, 0xce, 0x84, 0x0a, 0x07, 0x42, 0x90, 0x8d, 0x02, 0xee, 0xae, 0xca, 0x12, 0xde, 0xad, - 0x2c, 0x0c, 0xd4, 0x3f, 0xd7, 0xb6, 0x10, 0x91, 0x8b, 0x4b, 0x10, 0x93, 0xc7, 0xdc, 0x16, 0xb9, - 0x64, 0xa3, 0xd9, 0x6c, 0xc8, 0x85, 0xd0, 0xe6, 0x7e, 0x7b, 0xaf, 0xbe, 0xbb, 0x2d, 0x17, 0x43, - 0x9b, 0xdb, 0x7b, 0xcd, 0x83, 0x96, 0x0c, 0xa1, 0x85, 0x1d, 0x6d, 0x7f, 0x7f, 0x7d, 0x5b, 0x93, - 0x4b, 0x21, 0x62, 0xe3, 0x71, 0x5b, 0xdb, 0x97, 0xcb, 0x31, 0x5a, 0xe8, 0x62, 0x26, 0x74, 0xa1, - 0xed, 0x1e, 0xec, 0xc8, 0x15, 0x8c, 0xd9, 0x8c, 0x70, 0x11, 0x90, 0x98, 0x4d, 0x88, 0x90, 0xa9, - 0x3c, 0x26, 0x22, 0xac, 0xcc, 0xc5, 0x04, 0x88, 0x50, 0xaa, 0x9b, 0x20, 0x89, 0x7a, 0x50, 0xa0, - 0xd2, 0x58, 0xdf, 0xd0, 0x1a, 0x7a, 0xb3, 0xd5, 0xae, 0x37, 0x77, 0xd7, 0x1b, 0x18, 0xbb, 0x50, - 0xb6, 0xa7, 0x7d, 0x7a, 0x50, 0xdf, 0xd3, 0xb6, 0x30, 0x7e, 0x11, 0x59, 0x4b, 0x5b, 0x6f, 0xa3, - 0x2c, 0x53, 0xbd, 0x06, 0x0b, 0x13, 0xdb, 0x26, 0x56, 0xbd, 0xd5, 0x6f, 0x53, 0x30, 0x3f, 0xa9, - 0xc3, 0xe3, 0x35, 0xfe, 0x00, 0x24, 0x51, 0x30, 0x62, 0x9e, 0xdd, 0x9c, 0x38, 0x24, 0x3e, 0x63, - 0x88, 0x33, 0xa6, 0x74, 0x66, 0xca, 0x94, 0x66, 0xba, 0x41, 0xc9, 0x58, 0xa0, 0x4e, 0x35, 0x35, - 0xad, 0xed, 0x78, 0xb7, 0xe2, 0x9a, 0x4e, 0x38, 0xba, 0x3a, 0x9d, 0x64, 0xe0, 0xed, 0xfb, 0x14, - 0x9c, 0x9f, 0xb2, 0x97, 0xe2, 0xce, 0xee, 0x43, 0xae, 0x4f, 0xfd, 0x23, 0x27, 0x18, 0xe8, 0x37, - 0x26, 0x4c, 0x1a, 0x76, 0x7c, 0xc6, 0x88, 0xca, 0x4c, 0xdb, 0x35, 0xc2, 0x7f, 0x40, 0xe9, 0xd7, - 0x14, 0x9c, 0x9b, 0x6c, 0x2b, 0xce, 0x08, 0x9f, 0x0a, 0xa6, 0x3d, 0x18, 0xfa, 0x62, 0x76, 0xa7, - 0xc3, 0x3e, 0x1e, 0xfa, 0xa1, 0x30, 0xc3, 0x85, 0x2b, 0x63, 0x0a, 0x59, 0x4e, 0xe1, 0x9d, 0x29, - 0xdc, 0x83, 0x45, 0xb9, 0x04, 0xb2, 0x61, 0x99, 0xd4, 0xf6, 0x75, 0xcf, 0x77, 0x29, 0xe9, 0x9b, - 0x76, 0x8f, 0xcf, 0xa3, 0xc2, 0x9a, 0xd4, 0x25, 0x96, 0x47, 0x19, 0x80, 0x2d, 0x7b, 0xea, 0x46, - 0x00, 0xb9, 0x08, 0xa0, 0xfa, 0x5b, 0x16, 0x4a, 0xd1, 0xd5, 0xbb, 0x00, 0xe5, 0x27, 0xe4, 0x98, - 0xe8, 0xc1, 0x63, 0x47, 0xdc, 0xe0, 0x12, 0x2c, 0x70, 0x29, 0x52, 0x46, 0x53, 0x86, 0x45, 0x3c, - 0x8f, 0xdf, 0xaf, 0xc0, 0x4f, 0xab, 0x30, 0xcf, 0x4f, 0xfb, 0x38, 0xac, 0xcc, 0x81, 0x45, 0x75, - 0xf6, 0x06, 0xf3, 0xf8, 0x20, 0x0a, 0x89, 0xdc, 0x86, 0xcb, 0x1c, 0xd3, 0xa3, 0x36, 0x75, 0x89, - 0x4f, 0x75, 0xfa, 0xe5, 0x10, 0x0f, 0x74, 0x62, 0x77, 0xf4, 0x23, 0xe2, 0x1d, 0xa9, 0x0b, 0x51, - 0xf4, 0x75, 0x38, 0xcf, 0xd1, 0x48, 0x1a, 0x19, 0xeb, 0xc6, 0x11, 0x35, 0x9e, 0xea, 0x43, 0xbf, - 0xfb, 0x40, 0xfd, 0x7f, 0x14, 0xf6, 0x10, 0xca, 0x2c, 0x5e, 0x7d, 0xf3, 0x05, 0xfa, 0x74, 0x5c, - 0x3e, 0x0d, 0x2b, 0x13, 0x2a, 0x3e, 0x72, 0xc1, 0x5a, 0x73, 0xa4, 0xb0, 0x83, 0x2f, 0x86, 0x35, - 0x69, 0xbf, 0xa5, 0x69, 0x5b, 0x2c, 0x41, 0x3d, 0x27, 0xbc, 0x72, 0x29, 0xb8, 0x94, 0x61, 0x08, - 0xba, 0xf8, 0x9e, 0x1b, 0xbd, 0x98, 0x3c, 0x55, 0x8e, 0xfa, 0xbf, 0x06, 0xe7, 0xc6, 0x97, 0x8a, - 0xa2, 0xe6, 0xa2, 0x28, 0xb4, 0x34, 0x38, 0x39, 0x8d, 0x51, 0xa2, 0x98, 0x8b, 0xfc, 0x35, 0xe9, - 0x52, 0x03, 0x43, 0xd3, 0x51, 0x2f, 0x24, 0x52, 0x88, 0x44, 0xa8, 0x4d, 0x0e, 0x31, 0xb2, 0xc4, - 0xc5, 0x1f, 0x9e, 0xba, 0x14, 0x05, 0x6c, 0xc2, 0xc2, 0xd0, 0x36, 0x6d, 0xcc, 0x0c, 0x1a, 0x60, - 0xef, 0x0a, 0x51, 0x43, 0xea, 0x5f, 0xf9, 0x29, 0xaf, 0x84, 0x83, 0x28, 0x5a, 0xc4, 0xa5, 0xba, - 0x06, 0xe5, 0x68, 0x64, 0x94, 0x22, 0x88, 0xd8, 0xe0, 0x10, 0xc3, 0xc1, 0xb9, 0xd9, 0xdc, 0x62, - 0x23, 0xef, 0x0b, 0x0d, 0xe7, 0x17, 0x8e, 0xde, 0x46, 0xbd, 0xad, 0xe9, 0x7b, 0x07, 0xbb, 0xed, - 0xfa, 0x8e, 0x26, 0x67, 0x6e, 0x15, 0x0b, 0x7f, 0xe7, 0xe5, 0x6f, 0xf0, 0x5f, 0xba, 0xfa, 0x67, - 0x0a, 0x2a, 0xf1, 0x45, 0xae, 0xdc, 0x80, 0x0b, 0xc1, 0x03, 0xd7, 0xa3, 0xbe, 0xfe, 0xcc, 0x74, - 0x79, 0xb2, 0xfa, 0x44, 0x2c, 0xf2, 0xf0, 0x1a, 0x35, 0x58, 0xb2, 0x1d, 0xcc, 0x38, 0x56, 0x04, - 0x71, 0x3b, 0xfa, 0xf8, 0x0b, 0x40, 0x27, 0x06, 0xc6, 0xcb, 0x73, 0xc4, 0xf4, 0x98, 0x12, 0xb2, - 0x4c, 0xf4, 0x08, 0xf7, 0x61, 0x9f, 0x0c, 0x30, 0x66, 0xbe, 0x7b, 0xc2, 0xf7, 0x66, 0xe1, 0x8d, - 0x04, 0x29, 0x7a, 0xd1, 0x1f, 0xd3, 0x50, 0x8e, 0x2e, 0x50, 0xf6, 0x94, 0x30, 0x78, 0x2b, 0xa7, - 0x78, 0x11, 0xbe, 0x7b, 0xe6, 0xba, 0xad, 0x6d, 0xb2, 0x5d, 0xbb, 0x96, 0x13, 0xfb, 0x8d, 0xcd, - 0x47, 0x56, 0x7c, 0x54, 0xbc, 0x66, 0x0a, 0x38, 0x1c, 0xb2, 0x16, 0x79, 0x71, 0x12, 0x6f, 0xe5, - 0x33, 0xee, 0x8b, 0x78, 0xf6, 0x71, 0x11, 0xef, 0xb8, 0x37, 0x52, 0x16, 0x2b, 0x20, 0x71, 0xaa, - 0xd8, 0x21, 0x23, 0xb2, 0xf2, 0xff, 0x94, 0x02, 0x64, 0x37, 0x9b, 0x7b, 0xac, 0x34, 0xb0, 0x16, - 0x84, 0x54, 0x6f, 0xd5, 0xb5, 0x4d, 0xac, 0x8e, 0x68, 0x88, 0xbe, 0x4b, 0x41, 0x29, 0xb2, 0x2f, - 0xd8, 0xc8, 0x23, 0x96, 0xe5, 0x3c, 0xd3, 0x89, 0x65, 0x62, 0x0d, 0x8b, 0xab, 0x9e, 0x71, 0xab, - 0x37, 0x9d, 0xb2, 0xaf, 0x41, 0x4e, 0x6e, 0x95, 0x84, 0xfb, 0xd4, 0xdb, 0x74, 0xff, 0x15, 0x54, - 0xe2, 0xfb, 0x23, 0xe1, 0xfc, 0xea, 0xdb, 0x74, 0xfe, 0x02, 0x66, 0xe2, 0x9b, 0xe3, 0x3f, 0xf4, - 0xfd, 0x43, 0x1a, 0xe6, 0x27, 0x40, 0x94, 0x4f, 0x46, 0x4b, 0x52, 0xac, 0xe9, 0x3b, 0xaf, 0x62, - 0xb6, 0xb6, 0x8b, 0x0a, 0x2d, 0xfc, 0x06, 0x50, 0x54, 0x90, 0x4d, 0xfc, 0x8c, 0xf6, 0x4d, 0xfc, - 0x76, 0x73, 0x47, 0x2f, 0x64, 0xb1, 0x44, 0x17, 0x41, 0x19, 0x38, 0x9e, 0xe9, 0x9b, 0xc7, 0xec, - 0xcb, 0x31, 0x78, 0x3d, 0xb3, 0x7d, 0x9a, 0x65, 0x67, 0x36, 0xed, 0x91, 0xc4, 0x19, 0x6b, 0xb3, - 0x0c, 0xdb, 0x7c, 0x1d, 0x67, 0xc8, 0x86, 0xac, 0x90, 0xb2, 0x35, 0x99, 0x62, 0xd2, 0xd1, 0x12, - 0x1a, 0xbf, 0xc2, 0xcb, 0xec, 0x6b, 0x9e, 0xf4, 0x7a, 0x2e, 0x33, 0x15, 0xc0, 0xf9, 0x2a, 0x5c, - 0xbc, 0x0b, 0x85, 0x90, 0x22, 0x4e, 0x21, 0x76, 0x3f, 0xdc, 0x2b, 0xfc, 0x9b, 0x25, 0x8d, 0xdc, - 0xd0, 0x9a, 0xe9, 0xe9, 0xe3, 0x0f, 0xc7, 0x34, 0x4a, 0x0b, 0xd5, 0x9f, 0x70, 0x68, 0x26, 0x3e, - 0x63, 0xd7, 0xa0, 0x60, 0x39, 0x98, 0x1b, 0x06, 0x12, 0x7f, 0xcc, 0x58, 0x7e, 0xc9, 0x97, 0x6f, - 0xad, 0x31, 0xc2, 0x2f, 0x1a, 0x50, 0x08, 0x7e, 0x63, 0x83, 0x66, 0x07, 0xc4, 0x3f, 0xe2, 0x36, - 0xa4, 0x8d, 0x34, 0x6f, 0xd9, 0xac, 0x37, 0x20, 0x36, 0x8f, 0xba, 0x90, 0x60, 0x28, 0x2d, 0x4a, - 0x3a, 0x7c, 0xd1, 0x3a, 0xfd, 0x3e, 0x06, 0xd5, 0x1b, 0x85, 0xf2, 0x22, 0xcc, 0xf9, 0x2e, 0x31, - 0xad, 0xd8, 0x11, 0x8b, 0x64, 0x71, 0xe3, 0x26, 0xae, 0x47, 0xa7, 0x9f, 0xe4, 0xb4, 0x21, 0x27, - 0x5e, 0x42, 0xde, 0xa3, 0xd4, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x3d, 0xe5, 0x5c, 0x96, 0xdd, - 0x11, 0x00, 0x00, + // 2199 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x58, 0x4f, 0x73, 0xdb, 0xd6, + 0x11, 0x2f, 0xff, 0x8a, 0x5c, 0x52, 0x24, 0xf4, 0xa4, 0xd8, 0xb4, 0x62, 0x47, 0x36, 0x63, 0xc7, + 0x8e, 0xd3, 0x52, 0x19, 0xb7, 0x49, 0x5c, 0xa5, 0x93, 0x0e, 0x45, 0xc2, 0x0a, 0x3d, 0x94, 0xc8, + 0x82, 0x64, 0xeb, 0xe4, 0x82, 0x81, 0xc0, 0x47, 0x0a, 0x36, 0x08, 0xb0, 0x00, 0x68, 0x5b, 0x39, + 0x75, 0xa6, 0xa7, 0x7e, 0x83, 0x4e, 0xdb, 0xe9, 0x21, 0x97, 0xcc, 0xf4, 0x03, 0xf4, 0xd0, 0x7b, + 0xaf, 0x3d, 0xf4, 0xdc, 0x63, 0x67, 0xda, 0x6f, 0xd0, 0x6b, 0xf7, 0xbd, 0x07, 0x80, 0x00, 0x09, + 0xc5, 0x6a, 0x66, 0x52, 0x47, 0x17, 0xf1, 0xed, 0xfe, 0x76, 0xb1, 0x6f, 0xdf, 0xef, 0xed, 0x2e, + 0x00, 0x37, 0xa7, 0xb6, 0x3d, 0x35, 0xe9, 0xfe, 0xdc, 0xb1, 0x3d, 0xfb, 0x74, 0x31, 0xd9, 0x1f, + 0x53, 0x57, 0x77, 0x8c, 0xb9, 0x67, 0x3b, 0x0d, 0x2e, 0x23, 0x55, 0x81, 0x68, 0x04, 0x88, 0xfa, + 0x31, 0x6c, 0x3d, 0x32, 0x4c, 0xda, 0x0e, 0x81, 0x03, 0xea, 0x91, 0x87, 0x90, 0x9d, 0xa0, 0xb0, + 0x96, 0xba, 0x99, 0xb9, 0x57, 0x7a, 0x70, 0xbb, 0xb1, 0x62, 0xd4, 0x88, 0x5b, 0xf4, 0x99, 0x58, + 0xe1, 0x16, 0xf5, 0x7f, 0x66, 0x61, 0x3b, 0x41, 0x4b, 0x08, 0x64, 0x2d, 0x6d, 0xc6, 0x3c, 0xa6, + 0xee, 0x15, 0x15, 0xfe, 0x9b, 0xd4, 0x60, 0x63, 0xae, 0xe9, 0xcf, 0xb4, 0x29, 0xad, 0xa5, 0xb9, + 0x38, 0x58, 0x92, 0xb7, 0x00, 0xc6, 0x74, 0x4e, 0xad, 0x31, 0xb5, 0xf4, 0xf3, 0x5a, 0x06, 0xa3, + 0x28, 0x2a, 0x11, 0x09, 0x79, 0x0f, 0xb6, 0xe6, 0x8b, 0x53, 0xd3, 0xd0, 0xd5, 0x08, 0x0c, 0x10, + 0x96, 0x53, 0x24, 0xa1, 0x68, 0x2f, 0xc1, 0x77, 0xa1, 0xfa, 0x82, 0x6a, 0xcf, 0xa2, 0xd0, 0x12, + 0x87, 0x56, 0x98, 0x38, 0x02, 0x6c, 0x41, 0x79, 0x46, 0x5d, 0x17, 0x03, 0x50, 0xbd, 0xf3, 0x39, + 0xad, 0x65, 0xf9, 0xee, 0x6f, 0xae, 0xed, 0x7e, 0x75, 0xe7, 0x25, 0xdf, 0x6a, 0x88, 0x46, 0xa4, + 0x09, 0x45, 0x6a, 0x2d, 0x66, 0xc2, 0x43, 0xee, 0x82, 0xfc, 0xc9, 0x88, 0x58, 0xf5, 0x52, 0x60, + 0x66, 0xbe, 0x8b, 0x0d, 0x97, 0x3a, 0xcf, 0x0d, 0x9d, 0xd6, 0xf2, 0xdc, 0xc1, 0xdd, 0x35, 0x07, + 0x03, 0xa1, 0x5f, 0xf5, 0x11, 0xd8, 0xe1, 0x56, 0x8a, 0xf4, 0xa5, 0x47, 0x2d, 0xd7, 0xb0, 0xad, + 0xda, 0x06, 0x77, 0x72, 0x27, 0xe1, 0x14, 0xa9, 0x39, 0x5e, 0x75, 0xb1, 0xb4, 0x23, 0x1f, 0xc2, + 0x86, 0x3d, 0xf7, 0xf0, 0x97, 0x5b, 0x2b, 0xe0, 0xf9, 0x94, 0x1e, 0x5c, 0x4f, 0x24, 0x42, 0x4f, + 0x60, 0x94, 0x00, 0x4c, 0x3a, 0x20, 0xb9, 0xf6, 0xc2, 0xd1, 0xa9, 0xaa, 0xdb, 0x63, 0xaa, 0x1a, + 0xd6, 0xc4, 0xae, 0x15, 0xb9, 0x83, 0xbd, 0xf5, 0x8d, 0x70, 0x60, 0x0b, 0x71, 0x1d, 0x84, 0x29, + 0x15, 0x37, 0xb6, 0x26, 0x57, 0x20, 0xef, 0x9e, 0x5b, 0x9e, 0xf6, 0xb2, 0x56, 0xe6, 0x0c, 0xf1, + 0x57, 0xf5, 0xff, 0xe4, 0xa0, 0x7a, 0x19, 0x8a, 0x7d, 0x0c, 0xb9, 0x09, 0xdb, 0x25, 0x12, 0xec, + 0x7f, 0xc8, 0x81, 0xb0, 0x89, 0x27, 0x31, 0xff, 0x0d, 0x93, 0xd8, 0x84, 0x92, 0x45, 0x5d, 0x8f, + 0x8e, 0x05, 0x23, 0x32, 0x97, 0xe4, 0x14, 0x08, 0xa3, 0x75, 0x4a, 0x65, 0xbf, 0x11, 0xa5, 0x9e, + 0x40, 0x35, 0x0c, 0x49, 0x75, 0x34, 0x6b, 0x1a, 0x70, 0x73, 0xff, 0x55, 0x91, 0x34, 0xe4, 0xc0, + 0x4e, 0x61, 0x66, 0x4a, 0x85, 0xc6, 0xd6, 0xa4, 0x0d, 0x60, 0x5b, 0xd4, 0x9e, 0xe0, 0xf5, 0xd2, + 0x4d, 0xe4, 0x49, 0x72, 0x96, 0x7a, 0x0c, 0xb2, 0x96, 0x25, 0x5b, 0x48, 0x75, 0x93, 0xfc, 0x78, + 0x49, 0xb5, 0x8d, 0x0b, 0x98, 0x72, 0x2c, 0x2e, 0xd9, 0x1a, 0xdb, 0x46, 0x50, 0x71, 0x28, 0xe3, + 0x3d, 0xa6, 0x58, 0xec, 0xac, 0xc8, 0x83, 0x68, 0xbc, 0x72, 0x67, 0x8a, 0x6f, 0x26, 0x36, 0xb6, + 0xe9, 0x44, 0x97, 0xe4, 0x6d, 0x08, 0x05, 0x2a, 0xa7, 0x15, 0xf0, 0x2a, 0x54, 0x0e, 0x84, 0x27, + 0x28, 0xdb, 0x7d, 0x08, 0x95, 0x78, 0x7a, 0xc8, 0x0e, 0xe4, 0x5c, 0x4f, 0x73, 0x3c, 0xce, 0xc2, + 0x9c, 0x22, 0x16, 0x44, 0x82, 0x0c, 0x16, 0x19, 0x5e, 0xe5, 0x72, 0x0a, 0xfb, 0xb9, 0xfb, 0x11, + 0x6c, 0xc6, 0x1e, 0x7f, 0x59, 0xc3, 0xfa, 0x6f, 0xf3, 0xb0, 0x93, 0xc4, 0xb9, 0x44, 0xfa, 0xe3, + 0xf5, 0x41, 0x06, 0x9c, 0x52, 0x07, 0x79, 0xc7, 0x3c, 0xf8, 0x2b, 0x64, 0x54, 0xce, 0xd4, 0x4e, + 0xa9, 0x89, 0x6c, 0x4a, 0xdd, 0xab, 0x3c, 0x78, 0xef, 0x52, 0xac, 0x6e, 0x74, 0x99, 0x89, 0x22, + 0x2c, 0xc9, 0x27, 0x90, 0xf5, 0x4b, 0x1c, 0xf3, 0x70, 0xff, 0x72, 0x1e, 0x18, 0x17, 0x15, 0x6e, + 0x47, 0xde, 0x84, 0x22, 0xfb, 0x2f, 0x72, 0x9b, 0xe7, 0x31, 0x17, 0x98, 0x80, 0xe5, 0x95, 0xec, + 0x42, 0x81, 0xd3, 0x6c, 0x4c, 0x83, 0xd6, 0x10, 0xae, 0xd9, 0xc1, 0x8c, 0xe9, 0x44, 0x5b, 0x98, + 0x9e, 0xfa, 0x5c, 0x33, 0x17, 0x94, 0x13, 0x06, 0x0f, 0xc6, 0x17, 0xfe, 0x9c, 0xc9, 0xc8, 0x1e, + 0x94, 0x04, 0x2b, 0x0d, 0xb4, 0x79, 0xc9, 0xab, 0x4f, 0x4e, 0x11, 0x44, 0xed, 0x30, 0x09, 0x7b, + 0xfc, 0x53, 0x17, 0xef, 0x82, 0x7f, 0xb4, 0xfc, 0x11, 0x4c, 0xc0, 0x1f, 0xff, 0xd1, 0x6a, 0xe1, + 0xbb, 0x91, 0xbc, 0xbd, 0x55, 0x2e, 0xd6, 0xff, 0x9c, 0x86, 0x2c, 0xbf, 0x6f, 0x55, 0x28, 0x0d, + 0x3f, 0xeb, 0xcb, 0x6a, 0xbb, 0x37, 0x3a, 0xec, 0xca, 0x52, 0x8a, 0x54, 0x00, 0xb8, 0xe0, 0x51, + 0xb7, 0xd7, 0x1c, 0x4a, 0xe9, 0x70, 0xdd, 0x39, 0x19, 0x7e, 0xf8, 0x23, 0x29, 0x13, 0x1a, 0x8c, + 0x84, 0x20, 0x1b, 0x05, 0xfc, 0xf0, 0x81, 0x94, 0x43, 0x26, 0x94, 0x85, 0x83, 0xce, 0x13, 0xb9, + 0x8d, 0x88, 0x7c, 0x5c, 0x82, 0x98, 0x0d, 0xb2, 0x09, 0x45, 0x2e, 0x39, 0xec, 0xf5, 0xba, 0x52, + 0x21, 0xf4, 0x39, 0x18, 0x2a, 0x9d, 0x93, 0x23, 0xa9, 0x18, 0xfa, 0x3c, 0x52, 0x7a, 0xa3, 0xbe, + 0x04, 0xa1, 0x87, 0x63, 0x79, 0x30, 0x68, 0x1e, 0xc9, 0x52, 0x29, 0x44, 0x1c, 0x7e, 0x36, 0x94, + 0x07, 0x52, 0x39, 0x16, 0x16, 0x3e, 0x62, 0x33, 0x7c, 0x84, 0x7c, 0x32, 0x3a, 0x96, 0x2a, 0x64, + 0x0b, 0x36, 0xc5, 0x23, 0x82, 0x20, 0xaa, 0x2b, 0x22, 0x8c, 0x54, 0x5a, 0x06, 0x22, 0xbc, 0x6c, + 0xc5, 0x04, 0x88, 0x20, 0xf5, 0x16, 0xe4, 0x38, 0xbb, 0x90, 0xc5, 0x95, 0x6e, 0xf3, 0x50, 0xee, + 0xaa, 0xbd, 0xfe, 0xb0, 0xd3, 0x3b, 0x69, 0x76, 0x31, 0x77, 0xa1, 0x4c, 0x91, 0x7f, 0x36, 0xea, + 0x28, 0x72, 0x1b, 0xf3, 0x17, 0x91, 0xf5, 0xe5, 0xe6, 0x10, 0x65, 0x99, 0xfa, 0x7d, 0xd8, 0x49, + 0xaa, 0x33, 0x49, 0x37, 0xa3, 0xfe, 0x65, 0x0a, 0xb6, 0x13, 0x4a, 0x66, 0xe2, 0x2d, 0xfa, 0x29, + 0xe4, 0x04, 0xd3, 0x44, 0x13, 0x79, 0x37, 0xb1, 0xf6, 0x72, 0xde, 0xad, 0x35, 0x12, 0x6e, 0x17, + 0x6d, 0xa4, 0x99, 0x0b, 0x1a, 0x29, 0x73, 0xb1, 0x46, 0xa7, 0x5f, 0xa7, 0xa0, 0x76, 0x91, 0xef, + 0x57, 0xdc, 0xf7, 0x74, 0xec, 0xbe, 0x7f, 0xbc, 0x1a, 0xc0, 0xad, 0x8b, 0xf7, 0xb0, 0x16, 0xc5, + 0x57, 0x29, 0xb8, 0x92, 0x3c, 0x6f, 0x24, 0xc6, 0xf0, 0x09, 0xe4, 0x67, 0xd4, 0x3b, 0xb3, 0x83, + 0x9e, 0xfb, 0x4e, 0x42, 0x25, 0x67, 0xea, 0xd5, 0x5c, 0xf9, 0x56, 0xd1, 0x56, 0x90, 0xb9, 0x68, + 0x68, 0x10, 0xd1, 0xac, 0x45, 0xfa, 0x9b, 0x34, 0xbc, 0x91, 0xe8, 0x3c, 0x31, 0xd0, 0x1b, 0x00, + 0x86, 0x35, 0x5f, 0x78, 0xa2, 0xaf, 0x8a, 0x32, 0x53, 0xe4, 0x12, 0x7e, 0x85, 0x59, 0x09, 0x59, + 0x78, 0xa1, 0x3e, 0xc3, 0xf5, 0x20, 0x44, 0x1c, 0xf0, 0x70, 0x19, 0x68, 0x96, 0x07, 0xfa, 0xd6, + 0x05, 0x3b, 0x5d, 0x6b, 0x59, 0xef, 0x83, 0xa4, 0x9b, 0x06, 0xb5, 0x3c, 0xd5, 0xf5, 0x1c, 0xaa, + 0xcd, 0x0c, 0x6b, 0xca, 0xeb, 0x68, 0xe1, 0x20, 0x37, 0xd1, 0x4c, 0x97, 0x2a, 0x55, 0xa1, 0x1e, + 0x04, 0x5a, 0x66, 0xc1, 0x9b, 0x85, 0x13, 0xb1, 0xc8, 0xc7, 0x2c, 0x84, 0x3a, 0xb4, 0xa8, 0xff, + 0x7d, 0x03, 0x4a, 0x91, 0xe9, 0x8c, 0xdc, 0x82, 0xf2, 0x53, 0xed, 0xb9, 0xa6, 0x06, 0x13, 0xb7, + 0xc8, 0x44, 0x89, 0xc9, 0xfa, 0xfe, 0xd4, 0xfd, 0x3e, 0xec, 0x70, 0x08, 0xee, 0x11, 0x1f, 0xa4, + 0x9b, 0x9a, 0xeb, 0xf2, 0xa4, 0x15, 0x38, 0x94, 0x30, 0x5d, 0x8f, 0xa9, 0x5a, 0x81, 0x86, 0x7c, + 0x00, 0xdb, 0xdc, 0x62, 0x86, 0x85, 0xd7, 0x98, 0x9b, 0x54, 0x65, 0xef, 0x00, 0x2e, 0xaf, 0xa7, + 0x61, 0x64, 0x5b, 0x0c, 0x71, 0xec, 0x03, 0x58, 0x44, 0x2e, 0x39, 0x82, 0x1b, 0xdc, 0x6c, 0x4a, + 0x2d, 0xea, 0x68, 0x1e, 0x55, 0xe9, 0x2f, 0x17, 0x88, 0x55, 0x35, 0x6b, 0xac, 0x9e, 0x69, 0xee, + 0x59, 0x6d, 0x27, 0xea, 0xe0, 0x1a, 0xc3, 0x1e, 0xf9, 0x50, 0x99, 0x23, 0x9b, 0xd6, 0xf8, 0x53, + 0xc4, 0x91, 0x03, 0xb8, 0xc2, 0x1d, 0x61, 0x52, 0x70, 0xcf, 0xaa, 0x7e, 0x46, 0xf5, 0x67, 0xea, + 0xc2, 0x9b, 0x3c, 0xac, 0xbd, 0x19, 0xf5, 0xc0, 0x83, 0x1c, 0x70, 0x4c, 0x8b, 0x41, 0x46, 0x88, + 0x20, 0x03, 0x28, 0xb3, 0xf3, 0x98, 0x19, 0x5f, 0x60, 0xd8, 0xb6, 0xc3, 0x7b, 0x44, 0x25, 0xe1, + 0x72, 0x47, 0x92, 0xd8, 0xe8, 0xf9, 0x06, 0xc7, 0x38, 0x9f, 0x1e, 0xe4, 0x06, 0x7d, 0x59, 0x6e, + 0x2b, 0xa5, 0xc0, 0xcb, 0x23, 0xdb, 0x61, 0x9c, 0x9a, 0xda, 0x61, 0x8e, 0x4b, 0x82, 0x53, 0x53, + 0x3b, 0xc8, 0x30, 0xe6, 0x4b, 0xd7, 0xc5, 0xb6, 0xf1, 0xdd, 0xc5, 0x1f, 0xd6, 0xdd, 0x9a, 0x14, + 0xcb, 0x97, 0xae, 0x1f, 0x09, 0x80, 0x4f, 0x73, 0x17, 0xaf, 0xc4, 0x1b, 0xcb, 0x7c, 0x45, 0x0d, + 0xb7, 0xd6, 0x76, 0xb9, 0x6a, 0x8a, 0x4f, 0x9c, 0x9f, 0xaf, 0x1b, 0x92, 0xd8, 0x13, 0xe7, 0xe7, + 0xab, 0x66, 0x77, 0xf8, 0x0b, 0x98, 0x43, 0x75, 0x4c, 0xf9, 0xb8, 0x76, 0x35, 0x8a, 0x8e, 0x28, + 0xc8, 0x3e, 0x12, 0x59, 0x57, 0xa9, 0xa5, 0x9d, 0xe2, 0xd9, 0x6b, 0x0e, 0xfe, 0x70, 0x6b, 0x7b, + 0x51, 0x70, 0x45, 0xd7, 0x65, 0xae, 0x6d, 0x72, 0x25, 0xb9, 0x0f, 0x5b, 0xf6, 0xe9, 0x53, 0x5d, + 0x90, 0x4b, 0x45, 0x3f, 0x13, 0xe3, 0x65, 0xed, 0x36, 0x4f, 0x53, 0x95, 0x29, 0x38, 0xb5, 0xfa, + 0x5c, 0x4c, 0xde, 0x45, 0xe7, 0xee, 0x99, 0xe6, 0xcc, 0x79, 0x93, 0x76, 0x31, 0xa9, 0xb4, 0x76, + 0x47, 0x40, 0x85, 0xfc, 0x24, 0x10, 0x13, 0x19, 0xf6, 0xd8, 0xe6, 0x2d, 0xcd, 0xb2, 0xd5, 0x85, + 0x4b, 0xd5, 0x65, 0x88, 0xe1, 0x59, 0xbc, 0xc3, 0xc2, 0x52, 0xae, 0x07, 0xb0, 0x91, 0x8b, 0xc5, + 0x2c, 0x00, 0x05, 0xc7, 0xf3, 0x04, 0x76, 0x16, 0x96, 0x61, 0x21, 0xc5, 0x51, 0xc3, 0x8c, 0xc5, + 0x85, 0xad, 0xfd, 0x6b, 0xe3, 0x82, 0xa1, 0x7b, 0x14, 0x45, 0x0b, 0x92, 0x28, 0xdb, 0x8b, 0x75, + 0x61, 0xfd, 0x00, 0xca, 0x51, 0xee, 0x90, 0x22, 0x08, 0xf6, 0x60, 0x77, 0xc3, 0x8e, 0xda, 0xea, + 0xb5, 0x59, 0x2f, 0xfc, 0x5c, 0xc6, 0xc6, 0x86, 0x3d, 0xb9, 0xdb, 0x19, 0xca, 0xaa, 0x32, 0x3a, + 0x19, 0x76, 0x8e, 0x65, 0x29, 0x73, 0xbf, 0x58, 0xf8, 0xf7, 0x86, 0xf4, 0x2b, 0xfc, 0x4b, 0xd7, + 0xff, 0x9a, 0x86, 0x4a, 0x7c, 0x0e, 0x26, 0x3f, 0x81, 0xab, 0xc1, 0x4b, 0xab, 0x4b, 0x3d, 0xf5, + 0x85, 0xe1, 0x70, 0x3a, 0xcf, 0x34, 0x31, 0x49, 0x86, 0x27, 0xb1, 0xe3, 0xa3, 0xf0, 0xf5, 0xfe, + 0x17, 0x88, 0x79, 0xc4, 0x21, 0xa4, 0x0b, 0x7b, 0x98, 0x32, 0x9c, 0x35, 0xad, 0xb1, 0xe6, 0x8c, + 0xd5, 0xe5, 0xe7, 0x02, 0x55, 0xd3, 0x91, 0x07, 0xae, 0x2d, 0x3a, 0x49, 0xe8, 0xe5, 0xba, 0x65, + 0x0f, 0x7c, 0xf0, 0xb2, 0xc4, 0x36, 0x7d, 0xe8, 0x0a, 0x6b, 0x32, 0x17, 0xb1, 0x06, 0x67, 0xaf, + 0x99, 0x36, 0x47, 0xda, 0x78, 0xce, 0x39, 0x9f, 0xde, 0x0a, 0x4a, 0x01, 0x05, 0x32, 0x5b, 0x7f, + 0x7b, 0x67, 0x10, 0xcd, 0xe3, 0x3f, 0x32, 0x50, 0x8e, 0x4e, 0x70, 0x6c, 0x20, 0xd6, 0x79, 0x99, + 0x4f, 0xf1, 0x2a, 0xf0, 0xf6, 0xd7, 0xce, 0x7b, 0x8d, 0x16, 0xab, 0xff, 0x07, 0x79, 0x31, 0x57, + 0x29, 0xc2, 0x92, 0xf5, 0x5e, 0xc6, 0x35, 0x2a, 0xa6, 0xf5, 0x82, 0xe2, 0xaf, 0xb0, 0xd8, 0xe5, + 0x9f, 0xba, 0xdc, 0x77, 0x9e, 0xfb, 0xbe, 0xfd, 0xf5, 0xbe, 0x1f, 0x0f, 0xb8, 0xf3, 0xe2, 0xe3, + 0x81, 0x7a, 0xd2, 0x53, 0x8e, 0x9b, 0x5d, 0xc5, 0x37, 0x27, 0xd7, 0x20, 0x6b, 0x6a, 0x5f, 0x9c, + 0xc7, 0x3b, 0x05, 0x17, 0x5d, 0x36, 0xf1, 0xe8, 0x81, 0x7d, 0xf2, 0x88, 0xd7, 0x67, 0x2e, 0xfa, + 0x16, 0xa9, 0xbf, 0x0f, 0x39, 0x9e, 0x2f, 0x02, 0xe0, 0x67, 0x4c, 0xfa, 0x1e, 0x29, 0x40, 0xb6, + 0xd5, 0x53, 0x18, 0xfd, 0x91, 0xef, 0x42, 0xaa, 0xf6, 0x3b, 0x72, 0x0b, 0x6f, 0x40, 0xfd, 0x03, + 0xc8, 0x8b, 0x24, 0xb0, 0xab, 0x11, 0xa6, 0x01, 0x8d, 0xc4, 0xd2, 0xf7, 0x91, 0x0a, 0xb4, 0xa3, + 0xe3, 0x43, 0x59, 0x91, 0xd2, 0xd1, 0xe3, 0xfd, 0x4b, 0x0a, 0x4a, 0x91, 0x81, 0x8a, 0xb5, 0x72, + 0xcd, 0x34, 0xed, 0x17, 0xaa, 0x66, 0x1a, 0x58, 0xa1, 0xc4, 0xf9, 0x00, 0x17, 0x35, 0x99, 0xe4, + 0xb2, 0xf9, 0xfb, 0xbf, 0x70, 0xf3, 0x8f, 0x29, 0x90, 0x56, 0x87, 0xb1, 0x95, 0x00, 0x53, 0xaf, + 0x35, 0xc0, 0x3f, 0xa4, 0xa0, 0x12, 0x9f, 0xc0, 0x56, 0xc2, 0xbb, 0xf5, 0x5a, 0xc3, 0xfb, 0x7d, + 0x0a, 0x36, 0x63, 0x73, 0xd7, 0x77, 0x2a, 0xba, 0xdf, 0x65, 0x60, 0x3b, 0xc1, 0x0e, 0x0b, 0x90, + 0x18, 0x50, 0xc5, 0xcc, 0xfc, 0x83, 0xcb, 0x3c, 0xab, 0xc1, 0xfa, 0x5f, 0x5f, 0x73, 0x3c, 0x7f, + 0x9e, 0xc5, 0x7e, 0x69, 0x8c, 0xb1, 0xa8, 0x1a, 0x13, 0x03, 0xc7, 0x37, 0xf1, 0xc6, 0x22, 0xa6, + 0xd6, 0xea, 0x52, 0x2e, 0x5e, 0x8f, 0xbf, 0x0f, 0x64, 0x6e, 0xbb, 0x86, 0x67, 0x3c, 0x67, 0x9f, + 0xe7, 0x82, 0x17, 0x69, 0x36, 0xc5, 0x66, 0x15, 0x29, 0xd0, 0x74, 0x2c, 0x2f, 0x44, 0x5b, 0x74, + 0xaa, 0xad, 0xa0, 0x59, 0x19, 0xca, 0x28, 0x52, 0xa0, 0x09, 0xd1, 0x38, 0x68, 0x8e, 0xed, 0x05, + 0x1b, 0x08, 0x04, 0x8e, 0x55, 0xbd, 0x94, 0x52, 0x12, 0xb2, 0x10, 0xe2, 0x4f, 0x6c, 0xcb, 0x37, + 0xf8, 0xb2, 0x52, 0x12, 0x32, 0x01, 0xb9, 0x0b, 0x55, 0x6d, 0x3a, 0x75, 0x98, 0xf3, 0xc0, 0x91, + 0x18, 0x43, 0x2b, 0xa1, 0x98, 0x03, 0x77, 0x1f, 0x43, 0x21, 0xc8, 0x03, 0x6b, 0x2c, 0x2c, 0x13, + 0xd8, 0xf3, 0xf9, 0x77, 0x94, 0x34, 0x7b, 0xa9, 0xb7, 0x02, 0x25, 0x3e, 0xd4, 0x70, 0xd5, 0xe5, + 0x07, 0xbd, 0x34, 0xea, 0x0b, 0x4a, 0xc9, 0x70, 0xc3, 0x2f, 0x38, 0xf5, 0xaf, 0xb0, 0xbd, 0xc6, + 0x3f, 0x48, 0x92, 0x36, 0x14, 0x4c, 0x1b, 0xf9, 0xc1, 0x2c, 0xc4, 0xd7, 0xf0, 0x7b, 0xaf, 0xf8, + 0x86, 0xd9, 0xe8, 0xfa, 0x78, 0x25, 0xb4, 0xdc, 0xfd, 0x5b, 0x0a, 0x0a, 0x81, 0x18, 0x1b, 0x45, + 0x76, 0xae, 0x79, 0x67, 0xdc, 0x5d, 0xee, 0x30, 0x2d, 0xa5, 0x14, 0xbe, 0x66, 0x72, 0x9c, 0x66, + 0x2c, 0x4e, 0x01, 0x5f, 0xce, 0xd6, 0xec, 0x5c, 0x4d, 0xaa, 0x8d, 0xf9, 0x80, 0x6b, 0xcf, 0x66, + 0x78, 0x92, 0x6e, 0x70, 0xae, 0xbe, 0xbc, 0xe5, 0x8b, 0xd9, 0x77, 0x71, 0xcf, 0xd1, 0x0c, 0x33, + 0x86, 0xcd, 0x72, 0xac, 0x14, 0x28, 0x42, 0xf0, 0x01, 0x5c, 0x0b, 0xfc, 0x8e, 0xa9, 0xa7, 0xe1, + 0xf0, 0x3c, 0x5e, 0x1a, 0xe5, 0xf9, 0xd7, 0xae, 0xab, 0x3e, 0xa0, 0xed, 0xeb, 0x03, 0xdb, 0xc3, + 0x27, 0x38, 0xc8, 0xda, 0xb3, 0xd5, 0x4c, 0x1c, 0x4a, 0x2b, 0xef, 0x5d, 0xee, 0xa7, 0xa9, 0xcf, + 0x61, 0x39, 0x54, 0x7c, 0x99, 0xce, 0x1c, 0xf5, 0x0f, 0xff, 0x94, 0xde, 0x3d, 0x12, 0x76, 0xfd, + 0x20, 0x83, 0x0a, 0x9d, 0x98, 0x54, 0x67, 0xd9, 0xf9, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc3, + 0xe8, 0xdf, 0x9c, 0xc3, 0x18, 0x00, 0x00, } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/generator/generator.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/generator/generator.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/generator/generator.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/generator/generator.go 2016-05-24 07:05:22.000000000 +0000 @@ -264,6 +264,31 @@ // PackageName is the package name we'll use in the generated code to refer to this file. func (d *FileDescriptor) PackageName() string { return uniquePackageOf(d.FileDescriptorProto) } +// goPackageOption interprets the file's go_package option. +// If there is no go_package, it returns ("", "", false). +// If there's a simple name, it returns ("", pkg, true). +// If the option implies an import path, it returns (impPath, pkg, true). +func (d *FileDescriptor) goPackageOption() (impPath, pkg string, ok bool) { + pkg = d.GetOptions().GetGoPackage() + if pkg == "" { + return + } + ok = true + // The presence of a slash implies there's an import path. + slash := strings.LastIndex(pkg, "/") + if slash < 0 { + return + } + impPath, pkg = pkg, pkg[slash+1:] + // A semicolon-delimited suffix overrides the package name. + sc := strings.IndexByte(impPath, ';') + if sc < 0 { + return + } + impPath, pkg = impPath[:sc], impPath[sc+1:] + return +} + // goPackageName returns the Go package name to use in the // generated Go file. The result explicit reports whether the name // came from an option go_package statement. If explicit is false, @@ -271,10 +296,8 @@ // or the input file name. func (d *FileDescriptor) goPackageName() (name string, explicit bool) { // Does the file have a "go_package" option? - if opts := d.Options; opts != nil { - if pkg := opts.GetGoPackage(); pkg != "" { - return pkg, true - } + if _, pkg, ok := d.goPackageOption(); ok { + return pkg, true } // Does the file have a package clause? @@ -285,6 +308,26 @@ return baseName(d.GetName()), false } +// goFileName returns the output name for the generated Go file. +func (d *FileDescriptor) goFileName() string { + name := *d.Name + if ext := path.Ext(name); ext == ".proto" || ext == ".protodevel" { + name = name[:len(name)-len(ext)] + } + name += ".pb.go" + + // Does the file have a "go_package" option? + // If it does, it may override the filename. + if impPath, _, ok := d.goPackageOption(); ok && impPath != "" { + // Replace the existing dirname with the declared import path. + _, name = path.Split(name) + name = path.Join(impPath, name) + return name + } + + return name +} + func (d *FileDescriptor) addExport(obj Object, sym symbol) { d.exported[obj] = append(d.exported[obj], sym) } @@ -512,7 +555,7 @@ Param map[string]string // Command-line parameters. PackageImportPath string // Go import path of the package we're generating code for ImportPrefix string // String to prefix to imported package file names. - ImportMap map[string]string // Mapping from import name to generated name + ImportMap map[string]string // Mapping from .proto file name to import path Pkg map[string]string // The names under which we import support packages @@ -760,9 +803,9 @@ // and FileDescriptorProtos into file-referenced objects within the Generator. // It also creates the list of files to generate and so should be called before GenerateAllFiles. func (g *Generator) WrapTypes() { - g.allFiles = make([]*FileDescriptor, len(g.Request.ProtoFile)) + g.allFiles = make([]*FileDescriptor, 0, len(g.Request.ProtoFile)) g.allFilesByName = make(map[string]*FileDescriptor, len(g.allFiles)) - for i, f := range g.Request.ProtoFile { + for _, f := range g.Request.ProtoFile { // We must wrap the descriptors before we wrap the enums descs := wrapDescriptors(f) g.buildNestedDescriptors(descs) @@ -778,22 +821,22 @@ proto3: fileIsProto3(f), } extractComments(fd) - g.allFiles[i] = fd + g.allFiles = append(g.allFiles, fd) g.allFilesByName[f.GetName()] = fd } for _, fd := range g.allFiles { fd.imp = wrapImported(fd.FileDescriptorProto, g) } - g.genFiles = make([]*FileDescriptor, len(g.Request.FileToGenerate)) - for i, fileName := range g.Request.FileToGenerate { - g.genFiles[i] = g.allFilesByName[fileName] - if g.genFiles[i] == nil { + g.genFiles = make([]*FileDescriptor, 0, len(g.Request.FileToGenerate)) + for _, fileName := range g.Request.FileToGenerate { + fd := g.allFilesByName[fileName] + if fd == nil { g.Fail("could not find file named", fileName) } - g.genFiles[i].index = i + fd.index = len(g.genFiles) + g.genFiles = append(g.genFiles, fd) } - g.Response.File = make([]*plugin.CodeGeneratorResponse_File, len(g.genFiles)) } // Scan the descriptors in this file. For each one, build the slice of nested descriptors @@ -857,9 +900,8 @@ } } - d.ext = make([]*ExtensionDescriptor, len(desc.Extension)) - for i, field := range desc.Extension { - d.ext[i] = &ExtensionDescriptor{common{file}, field, d} + for _, field := range desc.Extension { + d.ext = append(d.ext, &ExtensionDescriptor{common{file}, field, d}) } return d @@ -918,9 +960,9 @@ // Return a slice of all the top-level ExtensionDescriptors defined within this file. func wrapExtensions(file *descriptor.FileDescriptorProto) []*ExtensionDescriptor { - sl := make([]*ExtensionDescriptor, len(file.Extension)) - for i, field := range file.Extension { - sl[i] = &ExtensionDescriptor{common{file}, field, nil} + var sl []*ExtensionDescriptor + for _, field := range file.Extension { + sl = append(sl, &ExtensionDescriptor{common{file}, field, nil}) } return sl } @@ -1089,7 +1131,6 @@ for _, file := range g.genFiles { genFileMap[file] = true } - i := 0 for _, file := range g.allFiles { g.Reset() g.writeOutput = genFileMap[file] @@ -1097,10 +1138,10 @@ if !g.writeOutput { continue } - g.Response.File[i] = new(plugin.CodeGeneratorResponse_File) - g.Response.File[i].Name = proto.String(goFileName(*file.Name)) - g.Response.File[i].Content = proto.String(g.String()) - i++ + g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ + Name: proto.String(file.goFileName()), + Content: proto.String(g.String()), + }) } } @@ -1285,7 +1326,7 @@ if fd.PackageName() == g.packageName { continue } - filename := goFileName(s) + filename := fd.goFileName() // By default, import path is the dirname of the Go filename. importPath := path.Dir(filename) if substitution, ok := g.ImportMap[s]; ok { @@ -1426,6 +1467,9 @@ } indexes = append(indexes, strconv.Itoa(enum.index)) g.P("func (", ccTypeName, ") EnumDescriptor() ([]byte, []int) { return fileDescriptor", g.file.index, ", []int{", strings.Join(indexes, ", "), "} }") + if enum.file.GetPackage() == "google.protobuf" && enum.GetName() == "NullValue" { + g.P("func (", ccTypeName, `) XXX_WellKnownType() string { return "`, enum.GetName(), `" }`) + } g.P() } @@ -1517,6 +1561,11 @@ name = name[i+1:] } } + if json := field.GetJsonName(); json != "" && json != name { + // TODO: escaping might be needed, in which case + // perhaps this should be in its own "json" tag. + name += ",json=" + json + } name = ",name=" + name if message.proto3() { // We only need the extra tag for []byte fields; @@ -1647,6 +1696,28 @@ "Descriptor", } +// Names of messages in the `google.protobuf` package for which +// we will generate XXX_WellKnownType methods. +var wellKnownTypes = map[string]bool{ + "Any": true, + "Duration": true, + "Empty": true, + "Struct": true, + "Timestamp": true, + + "Value": true, + "ListValue": true, + "DoubleValue": true, + "FloatValue": true, + "Int64Value": true, + "UInt64Value": true, + "Int32Value": true, + "UInt32Value": true, + "BoolValue": true, + "StringValue": true, + "BytesValue": true, +} + // Generate the type and default constant definitions for this Descriptor. func (g *Generator) generateMessage(message *Descriptor) { // The full type name @@ -1827,13 +1898,15 @@ g.P("func (m *", ccTypeName, ") Reset() { *m = ", ccTypeName, "{} }") g.P("func (m *", ccTypeName, ") String() string { return ", g.Pkg["proto"], ".CompactTextString(m) }") g.P("func (*", ccTypeName, ") ProtoMessage() {}") - if !message.group { - var indexes []string - for m := message; m != nil; m = m.parent { - // XXX: skip groups? - indexes = append([]string{strconv.Itoa(m.index)}, indexes...) - } - g.P("func (*", ccTypeName, ") Descriptor() ([]byte, []int) { return fileDescriptor", g.file.index, ", []int{", strings.Join(indexes, ", "), "} }") + var indexes []string + for m := message; m != nil; m = m.parent { + indexes = append([]string{strconv.Itoa(m.index)}, indexes...) + } + g.P("func (*", ccTypeName, ") Descriptor() ([]byte, []int) { return fileDescriptor", g.file.index, ", []int{", strings.Join(indexes, ", "), "} }") + // TODO: Revisit the decision to use a XXX_WellKnownType method + // if we change proto.MessageName to work with multiple equivalents. + if message.file.GetPackage() == "google.protobuf" && wellKnownTypes[message.GetName()] { + g.P("func (*", ccTypeName, `) XXX_WellKnownType() string { return "`, message.GetName(), `" }`) } // Extension support methods @@ -2647,15 +2720,6 @@ // dottedSlice turns a sliced name into a dotted name. func dottedSlice(elem []string) string { return strings.Join(elem, ".") } -// Given a .proto file name, return the output name for the generated Go program. -func goFileName(name string) string { - ext := path.Ext(name) - if ext == ".proto" || ext == ".protodevel" { - name = name[0 : len(name)-len(ext)] - } - return name + ".pb.go" -} - // Is this field optional? func isOptional(field *descriptor.FieldDescriptorProto) bool { return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_OPTIONAL diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -33,6 +33,8 @@ import ( "testing" + + "github.com/golang/protobuf/protoc-gen-go/descriptor" ) func TestCamelCase(t *testing.T) { @@ -54,3 +56,30 @@ } } } + +func TestGoPackageOption(t *testing.T) { + tests := []struct { + in string + impPath, pkg string + ok bool + }{ + {"", "", "", false}, + {"foo", "", "foo", true}, + {"github.com/golang/bar", "github.com/golang/bar", "bar", true}, + {"github.com/golang/bar;baz", "github.com/golang/bar", "baz", true}, + } + for _, tc := range tests { + d := &FileDescriptor{ + FileDescriptorProto: &descriptor.FileDescriptorProto{ + Options: &descriptor.FileOptions{ + GoPackage: &tc.in, + }, + }, + } + impPath, pkg, ok := d.goPackageOption() + if impPath != tc.impPath || pkg != tc.pkg || ok != tc.ok { + t.Errorf("go_package = %q => (%q, %q, %t), want (%q, %q, %t)", tc.in, + impPath, pkg, ok, tc.impPath, tc.pkg, tc.ok) + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,462 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package grpc outputs gRPC service descriptions in Go code. +// It runs as a plugin for the Go protocol buffer compiler plugin. +// It is linked in to protoc-gen-go. +package grpc + +import ( + "fmt" + "path" + "strconv" + "strings" + + pb "github.com/golang/protobuf/protoc-gen-go/descriptor" + "github.com/golang/protobuf/protoc-gen-go/generator" +) + +// generatedCodeVersion indicates a version of the generated code. +// It is incremented whenever an incompatibility between the generated code and +// the grpc package is introduced; the generated code references +// a constant, grpc.SupportPackageIsVersionN (where N is generatedCodeVersion). +const generatedCodeVersion = 2 + +// Paths for packages used by code generated in this file, +// relative to the import_prefix of the generator.Generator. +const ( + contextPkgPath = "golang.org/x/net/context" + grpcPkgPath = "google.golang.org/grpc" +) + +func init() { + generator.RegisterPlugin(new(grpc)) +} + +// grpc is an implementation of the Go protocol buffer compiler's +// plugin architecture. It generates bindings for gRPC support. +type grpc struct { + gen *generator.Generator +} + +// Name returns the name of this plugin, "grpc". +func (g *grpc) Name() string { + return "grpc" +} + +// The names for packages imported in the generated code. +// They may vary from the final path component of the import path +// if the name is used by other packages. +var ( + contextPkg string + grpcPkg string +) + +// Init initializes the plugin. +func (g *grpc) Init(gen *generator.Generator) { + g.gen = gen + contextPkg = generator.RegisterUniquePackageName("context", nil) + grpcPkg = generator.RegisterUniquePackageName("grpc", nil) +} + +// Given a type name defined in a .proto, return its object. +// Also record that we're using it, to guarantee the associated import. +func (g *grpc) objectNamed(name string) generator.Object { + g.gen.RecordTypeUse(name) + return g.gen.ObjectNamed(name) +} + +// Given a type name defined in a .proto, return its name as we will print it. +func (g *grpc) typeName(str string) string { + return g.gen.TypeName(g.objectNamed(str)) +} + +// P forwards to g.gen.P. +func (g *grpc) P(args ...interface{}) { g.gen.P(args...) } + +// Generate generates code for the services in the given file. +func (g *grpc) Generate(file *generator.FileDescriptor) { + if len(file.FileDescriptorProto.Service) == 0 { + return + } + + g.P("// Reference imports to suppress errors if they are not otherwise used.") + g.P("var _ ", contextPkg, ".Context") + g.P("var _ ", grpcPkg, ".ClientConn") + g.P() + + // Assert version compatibility. + g.P("// This is a compile-time assertion to ensure that this generated file") + g.P("// is compatible with the grpc package it is being compiled against.") + g.P("const _ = ", grpcPkg, ".SupportPackageIsVersion", generatedCodeVersion) + g.P() + + for i, service := range file.FileDescriptorProto.Service { + g.generateService(file, service, i) + } +} + +// GenerateImports generates the import declaration for this file. +func (g *grpc) GenerateImports(file *generator.FileDescriptor) { + if len(file.FileDescriptorProto.Service) == 0 { + return + } + g.P("import (") + g.P(contextPkg, " ", strconv.Quote(path.Join(g.gen.ImportPrefix, contextPkgPath))) + g.P(grpcPkg, " ", strconv.Quote(path.Join(g.gen.ImportPrefix, grpcPkgPath))) + g.P(")") + g.P() +} + +// reservedClientName records whether a client name is reserved on the client side. +var reservedClientName = map[string]bool{ +// TODO: do we need any in gRPC? +} + +func unexport(s string) string { return strings.ToLower(s[:1]) + s[1:] } + +// generateService generates all the code for the named service. +func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.ServiceDescriptorProto, index int) { + path := fmt.Sprintf("6,%d", index) // 6 means service. + + origServName := service.GetName() + fullServName := origServName + if pkg := file.GetPackage(); pkg != "" { + fullServName = pkg + "." + fullServName + } + servName := generator.CamelCase(origServName) + + g.P() + g.P("// Client API for ", servName, " service") + g.P() + + // Client interface. + g.P("type ", servName, "Client interface {") + for i, method := range service.Method { + g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service. + g.P(g.generateClientSignature(servName, method)) + } + g.P("}") + g.P() + + // Client structure. + g.P("type ", unexport(servName), "Client struct {") + g.P("cc *", grpcPkg, ".ClientConn") + g.P("}") + g.P() + + // NewClient factory. + g.P("func New", servName, "Client (cc *", grpcPkg, ".ClientConn) ", servName, "Client {") + g.P("return &", unexport(servName), "Client{cc}") + g.P("}") + g.P() + + var methodIndex, streamIndex int + serviceDescVar := "_" + servName + "_serviceDesc" + // Client method implementations. + for _, method := range service.Method { + var descExpr string + if !method.GetServerStreaming() && !method.GetClientStreaming() { + // Unary RPC method + descExpr = fmt.Sprintf("&%s.Methods[%d]", serviceDescVar, methodIndex) + methodIndex++ + } else { + // Streaming RPC method + descExpr = fmt.Sprintf("&%s.Streams[%d]", serviceDescVar, streamIndex) + streamIndex++ + } + g.generateClientMethod(servName, fullServName, serviceDescVar, method, descExpr) + } + + g.P("// Server API for ", servName, " service") + g.P() + + // Server interface. + serverType := servName + "Server" + g.P("type ", serverType, " interface {") + for i, method := range service.Method { + g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service. + g.P(g.generateServerSignature(servName, method)) + } + g.P("}") + g.P() + + // Server registration. + g.P("func Register", servName, "Server(s *", grpcPkg, ".Server, srv ", serverType, ") {") + g.P("s.RegisterService(&", serviceDescVar, `, srv)`) + g.P("}") + g.P() + + // Server handler implementations. + var handlerNames []string + for _, method := range service.Method { + hname := g.generateServerMethod(servName, fullServName, method) + handlerNames = append(handlerNames, hname) + } + + // Service descriptor. + g.P("var ", serviceDescVar, " = ", grpcPkg, ".ServiceDesc {") + g.P("ServiceName: ", strconv.Quote(fullServName), ",") + g.P("HandlerType: (*", serverType, ")(nil),") + g.P("Methods: []", grpcPkg, ".MethodDesc{") + for i, method := range service.Method { + if method.GetServerStreaming() || method.GetClientStreaming() { + continue + } + g.P("{") + g.P("MethodName: ", strconv.Quote(method.GetName()), ",") + g.P("Handler: ", handlerNames[i], ",") + g.P("},") + } + g.P("},") + g.P("Streams: []", grpcPkg, ".StreamDesc{") + for i, method := range service.Method { + if !method.GetServerStreaming() && !method.GetClientStreaming() { + continue + } + g.P("{") + g.P("StreamName: ", strconv.Quote(method.GetName()), ",") + g.P("Handler: ", handlerNames[i], ",") + if method.GetServerStreaming() { + g.P("ServerStreams: true,") + } + if method.GetClientStreaming() { + g.P("ClientStreams: true,") + } + g.P("},") + } + g.P("},") + g.P("}") + g.P() +} + +// generateClientSignature returns the client-side signature for a method. +func (g *grpc) generateClientSignature(servName string, method *pb.MethodDescriptorProto) string { + origMethName := method.GetName() + methName := generator.CamelCase(origMethName) + if reservedClientName[methName] { + methName += "_" + } + reqArg := ", in *" + g.typeName(method.GetInputType()) + if method.GetClientStreaming() { + reqArg = "" + } + respName := "*" + g.typeName(method.GetOutputType()) + if method.GetServerStreaming() || method.GetClientStreaming() { + respName = servName + "_" + generator.CamelCase(origMethName) + "Client" + } + return fmt.Sprintf("%s(ctx %s.Context%s, opts ...%s.CallOption) (%s, error)", methName, contextPkg, reqArg, grpcPkg, respName) +} + +func (g *grpc) generateClientMethod(servName, fullServName, serviceDescVar string, method *pb.MethodDescriptorProto, descExpr string) { + sname := fmt.Sprintf("/%s/%s", fullServName, method.GetName()) + methName := generator.CamelCase(method.GetName()) + inType := g.typeName(method.GetInputType()) + outType := g.typeName(method.GetOutputType()) + + g.P("func (c *", unexport(servName), "Client) ", g.generateClientSignature(servName, method), "{") + if !method.GetServerStreaming() && !method.GetClientStreaming() { + g.P("out := new(", outType, ")") + // TODO: Pass descExpr to Invoke. + g.P("err := ", grpcPkg, `.Invoke(ctx, "`, sname, `", in, out, c.cc, opts...)`) + g.P("if err != nil { return nil, err }") + g.P("return out, nil") + g.P("}") + g.P() + return + } + streamType := unexport(servName) + methName + "Client" + g.P("stream, err := ", grpcPkg, ".NewClientStream(ctx, ", descExpr, `, c.cc, "`, sname, `", opts...)`) + g.P("if err != nil { return nil, err }") + g.P("x := &", streamType, "{stream}") + if !method.GetClientStreaming() { + g.P("if err := x.ClientStream.SendMsg(in); err != nil { return nil, err }") + g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") + } + g.P("return x, nil") + g.P("}") + g.P() + + genSend := method.GetClientStreaming() + genRecv := method.GetServerStreaming() + genCloseAndRecv := !method.GetServerStreaming() + + // Stream auxiliary types and methods. + g.P("type ", servName, "_", methName, "Client interface {") + if genSend { + g.P("Send(*", inType, ") error") + } + if genRecv { + g.P("Recv() (*", outType, ", error)") + } + if genCloseAndRecv { + g.P("CloseAndRecv() (*", outType, ", error)") + } + g.P(grpcPkg, ".ClientStream") + g.P("}") + g.P() + + g.P("type ", streamType, " struct {") + g.P(grpcPkg, ".ClientStream") + g.P("}") + g.P() + + if genSend { + g.P("func (x *", streamType, ") Send(m *", inType, ") error {") + g.P("return x.ClientStream.SendMsg(m)") + g.P("}") + g.P() + } + if genRecv { + g.P("func (x *", streamType, ") Recv() (*", outType, ", error) {") + g.P("m := new(", outType, ")") + g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } + if genCloseAndRecv { + g.P("func (x *", streamType, ") CloseAndRecv() (*", outType, ", error) {") + g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") + g.P("m := new(", outType, ")") + g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } +} + +// generateServerSignature returns the server-side signature for a method. +func (g *grpc) generateServerSignature(servName string, method *pb.MethodDescriptorProto) string { + origMethName := method.GetName() + methName := generator.CamelCase(origMethName) + if reservedClientName[methName] { + methName += "_" + } + + var reqArgs []string + ret := "error" + if !method.GetServerStreaming() && !method.GetClientStreaming() { + reqArgs = append(reqArgs, contextPkg+".Context") + ret = "(*" + g.typeName(method.GetOutputType()) + ", error)" + } + if !method.GetClientStreaming() { + reqArgs = append(reqArgs, "*"+g.typeName(method.GetInputType())) + } + if method.GetServerStreaming() || method.GetClientStreaming() { + reqArgs = append(reqArgs, servName+"_"+generator.CamelCase(origMethName)+"Server") + } + + return methName + "(" + strings.Join(reqArgs, ", ") + ") " + ret +} + +func (g *grpc) generateServerMethod(servName, fullServName string, method *pb.MethodDescriptorProto) string { + methName := generator.CamelCase(method.GetName()) + hname := fmt.Sprintf("_%s_%s_Handler", servName, methName) + inType := g.typeName(method.GetInputType()) + outType := g.typeName(method.GetOutputType()) + + if !method.GetServerStreaming() && !method.GetClientStreaming() { + g.P("func ", hname, "(srv interface{}, ctx ", contextPkg, ".Context, dec func(interface{}) error, interceptor ", grpcPkg, ".UnaryServerInterceptor) (interface{}, error) {") + g.P("in := new(", inType, ")") + g.P("if err := dec(in); err != nil { return nil, err }") + g.P("if interceptor == nil { return srv.(", servName, "Server).", methName, "(ctx, in) }") + g.P("info := &grpc.UnaryServerInfo{") + g.P("Server: srv,") + g.P("FullMethod: ", strconv.Quote(fmt.Sprintf("/%s/%s", fullServName, methName)), ",") + g.P("}") + g.P("handler := func(ctx ", contextPkg, ".Context, req interface{}) (interface{}, error) {") + g.P("return srv.(", servName, "Server).", methName, "(ctx, req.(*", inType, "))") + g.P("}") + g.P("return interceptor(ctx, in, info, handler)") + g.P("}") + g.P() + return hname + } + streamType := unexport(servName) + methName + "Server" + g.P("func ", hname, "(srv interface{}, stream ", grpcPkg, ".ServerStream) error {") + if !method.GetClientStreaming() { + g.P("m := new(", inType, ")") + g.P("if err := stream.RecvMsg(m); err != nil { return err }") + g.P("return srv.(", servName, "Server).", methName, "(m, &", streamType, "{stream})") + } else { + g.P("return srv.(", servName, "Server).", methName, "(&", streamType, "{stream})") + } + g.P("}") + g.P() + + genSend := method.GetServerStreaming() + genSendAndClose := !method.GetServerStreaming() + genRecv := method.GetClientStreaming() + + // Stream auxiliary types and methods. + g.P("type ", servName, "_", methName, "Server interface {") + if genSend { + g.P("Send(*", outType, ") error") + } + if genSendAndClose { + g.P("SendAndClose(*", outType, ") error") + } + if genRecv { + g.P("Recv() (*", inType, ", error)") + } + g.P(grpcPkg, ".ServerStream") + g.P("}") + g.P() + + g.P("type ", streamType, " struct {") + g.P(grpcPkg, ".ServerStream") + g.P("}") + g.P() + + if genSend { + g.P("func (x *", streamType, ") Send(m *", outType, ") error {") + g.P("return x.ServerStream.SendMsg(m)") + g.P("}") + g.P() + } + if genSendAndClose { + g.P("func (x *", streamType, ") SendAndClose(m *", outType, ") error {") + g.P("return x.ServerStream.SendMsg(m)") + g.P("}") + g.P() + } + if genRecv { + g.P("func (x *", streamType, ") Recv() (*", inType, ", error) {") + g.P("m := new(", inType, ")") + g.P("if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } + + return hname +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/internal/grpc/grpc.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/internal/grpc/grpc.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/internal/grpc/grpc.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/internal/grpc/grpc.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,442 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2015 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Package grpc outputs gRPC service descriptions in Go code. -// It runs as a plugin for the Go protocol buffer compiler plugin. -// It is linked in to protoc-gen-go. -package grpc - -import ( - "fmt" - "path" - "strconv" - "strings" - - pb "github.com/golang/protobuf/protoc-gen-go/descriptor" - "github.com/golang/protobuf/protoc-gen-go/generator" -) - -// Paths for packages used by code generated in this file, -// relative to the import_prefix of the generator.Generator. -const ( - contextPkgPath = "golang.org/x/net/context" - grpcPkgPath = "google.golang.org/grpc" -) - -func init() { - generator.RegisterPlugin(new(grpc)) -} - -// grpc is an implementation of the Go protocol buffer compiler's -// plugin architecture. It generates bindings for gRPC support. -type grpc struct { - gen *generator.Generator -} - -// Name returns the name of this plugin, "grpc". -func (g *grpc) Name() string { - return "grpc" -} - -// The names for packages imported in the generated code. -// They may vary from the final path component of the import path -// if the name is used by other packages. -var ( - contextPkg string - grpcPkg string -) - -// Init initializes the plugin. -func (g *grpc) Init(gen *generator.Generator) { - g.gen = gen - contextPkg = generator.RegisterUniquePackageName("context", nil) - grpcPkg = generator.RegisterUniquePackageName("grpc", nil) -} - -// Given a type name defined in a .proto, return its object. -// Also record that we're using it, to guarantee the associated import. -func (g *grpc) objectNamed(name string) generator.Object { - g.gen.RecordTypeUse(name) - return g.gen.ObjectNamed(name) -} - -// Given a type name defined in a .proto, return its name as we will print it. -func (g *grpc) typeName(str string) string { - return g.gen.TypeName(g.objectNamed(str)) -} - -// P forwards to g.gen.P. -func (g *grpc) P(args ...interface{}) { g.gen.P(args...) } - -// Generate generates code for the services in the given file. -func (g *grpc) Generate(file *generator.FileDescriptor) { - if len(file.FileDescriptorProto.Service) == 0 { - return - } - g.P("// Reference imports to suppress errors if they are not otherwise used.") - g.P("var _ ", contextPkg, ".Context") - g.P("var _ ", grpcPkg, ".ClientConn") - g.P() - for i, service := range file.FileDescriptorProto.Service { - g.generateService(file, service, i) - } -} - -// GenerateImports generates the import declaration for this file. -func (g *grpc) GenerateImports(file *generator.FileDescriptor) { - if len(file.FileDescriptorProto.Service) == 0 { - return - } - g.P("import (") - g.P(contextPkg, " ", strconv.Quote(path.Join(g.gen.ImportPrefix, contextPkgPath))) - g.P(grpcPkg, " ", strconv.Quote(path.Join(g.gen.ImportPrefix, grpcPkgPath))) - g.P(")") - g.P() -} - -// reservedClientName records whether a client name is reserved on the client side. -var reservedClientName = map[string]bool{ -// TODO: do we need any in gRPC? -} - -func unexport(s string) string { return strings.ToLower(s[:1]) + s[1:] } - -// generateService generates all the code for the named service. -func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.ServiceDescriptorProto, index int) { - path := fmt.Sprintf("6,%d", index) // 6 means service. - - origServName := service.GetName() - fullServName := origServName - if pkg := file.GetPackage(); pkg != "" { - fullServName = pkg + "." + fullServName - } - servName := generator.CamelCase(origServName) - - g.P() - g.P("// Client API for ", servName, " service") - g.P() - - // Client interface. - g.P("type ", servName, "Client interface {") - for i, method := range service.Method { - g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service. - g.P(g.generateClientSignature(servName, method)) - } - g.P("}") - g.P() - - // Client structure. - g.P("type ", unexport(servName), "Client struct {") - g.P("cc *", grpcPkg, ".ClientConn") - g.P("}") - g.P() - - // NewClient factory. - g.P("func New", servName, "Client (cc *", grpcPkg, ".ClientConn) ", servName, "Client {") - g.P("return &", unexport(servName), "Client{cc}") - g.P("}") - g.P() - - var methodIndex, streamIndex int - serviceDescVar := "_" + servName + "_serviceDesc" - // Client method implementations. - for _, method := range service.Method { - var descExpr string - if !method.GetServerStreaming() && !method.GetClientStreaming() { - // Unary RPC method - descExpr = fmt.Sprintf("&%s.Methods[%d]", serviceDescVar, methodIndex) - methodIndex++ - } else { - // Streaming RPC method - descExpr = fmt.Sprintf("&%s.Streams[%d]", serviceDescVar, streamIndex) - streamIndex++ - } - g.generateClientMethod(servName, fullServName, serviceDescVar, method, descExpr) - } - - g.P("// Server API for ", servName, " service") - g.P() - - // Server interface. - serverType := servName + "Server" - g.P("type ", serverType, " interface {") - for i, method := range service.Method { - g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service. - g.P(g.generateServerSignature(servName, method)) - } - g.P("}") - g.P() - - // Server registration. - g.P("func Register", servName, "Server(s *", grpcPkg, ".Server, srv ", serverType, ") {") - g.P("s.RegisterService(&", serviceDescVar, `, srv)`) - g.P("}") - g.P() - - // Server handler implementations. - var handlerNames []string - for _, method := range service.Method { - hname := g.generateServerMethod(servName, method) - handlerNames = append(handlerNames, hname) - } - - // Service descriptor. - g.P("var ", serviceDescVar, " = ", grpcPkg, ".ServiceDesc {") - g.P("ServiceName: ", strconv.Quote(fullServName), ",") - g.P("HandlerType: (*", serverType, ")(nil),") - g.P("Methods: []", grpcPkg, ".MethodDesc{") - for i, method := range service.Method { - if method.GetServerStreaming() || method.GetClientStreaming() { - continue - } - g.P("{") - g.P("MethodName: ", strconv.Quote(method.GetName()), ",") - g.P("Handler: ", handlerNames[i], ",") - g.P("},") - } - g.P("},") - g.P("Streams: []", grpcPkg, ".StreamDesc{") - for i, method := range service.Method { - if !method.GetServerStreaming() && !method.GetClientStreaming() { - continue - } - g.P("{") - g.P("StreamName: ", strconv.Quote(method.GetName()), ",") - g.P("Handler: ", handlerNames[i], ",") - if method.GetServerStreaming() { - g.P("ServerStreams: true,") - } - if method.GetClientStreaming() { - g.P("ClientStreams: true,") - } - g.P("},") - } - g.P("},") - g.P("}") - g.P() -} - -// generateClientSignature returns the client-side signature for a method. -func (g *grpc) generateClientSignature(servName string, method *pb.MethodDescriptorProto) string { - origMethName := method.GetName() - methName := generator.CamelCase(origMethName) - if reservedClientName[methName] { - methName += "_" - } - reqArg := ", in *" + g.typeName(method.GetInputType()) - if method.GetClientStreaming() { - reqArg = "" - } - respName := "*" + g.typeName(method.GetOutputType()) - if method.GetServerStreaming() || method.GetClientStreaming() { - respName = servName + "_" + generator.CamelCase(origMethName) + "Client" - } - return fmt.Sprintf("%s(ctx %s.Context%s, opts ...%s.CallOption) (%s, error)", methName, contextPkg, reqArg, grpcPkg, respName) -} - -func (g *grpc) generateClientMethod(servName, fullServName, serviceDescVar string, method *pb.MethodDescriptorProto, descExpr string) { - sname := fmt.Sprintf("/%s/%s", fullServName, method.GetName()) - methName := generator.CamelCase(method.GetName()) - inType := g.typeName(method.GetInputType()) - outType := g.typeName(method.GetOutputType()) - - g.P("func (c *", unexport(servName), "Client) ", g.generateClientSignature(servName, method), "{") - if !method.GetServerStreaming() && !method.GetClientStreaming() { - g.P("out := new(", outType, ")") - // TODO: Pass descExpr to Invoke. - g.P("err := ", grpcPkg, `.Invoke(ctx, "`, sname, `", in, out, c.cc, opts...)`) - g.P("if err != nil { return nil, err }") - g.P("return out, nil") - g.P("}") - g.P() - return - } - streamType := unexport(servName) + methName + "Client" - g.P("stream, err := ", grpcPkg, ".NewClientStream(ctx, ", descExpr, `, c.cc, "`, sname, `", opts...)`) - g.P("if err != nil { return nil, err }") - g.P("x := &", streamType, "{stream}") - if !method.GetClientStreaming() { - g.P("if err := x.ClientStream.SendMsg(in); err != nil { return nil, err }") - g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") - } - g.P("return x, nil") - g.P("}") - g.P() - - genSend := method.GetClientStreaming() - genRecv := method.GetServerStreaming() - genCloseAndRecv := !method.GetServerStreaming() - - // Stream auxiliary types and methods. - g.P("type ", servName, "_", methName, "Client interface {") - if genSend { - g.P("Send(*", inType, ") error") - } - if genRecv { - g.P("Recv() (*", outType, ", error)") - } - if genCloseAndRecv { - g.P("CloseAndRecv() (*", outType, ", error)") - } - g.P(grpcPkg, ".ClientStream") - g.P("}") - g.P() - - g.P("type ", streamType, " struct {") - g.P(grpcPkg, ".ClientStream") - g.P("}") - g.P() - - if genSend { - g.P("func (x *", streamType, ") Send(m *", inType, ") error {") - g.P("return x.ClientStream.SendMsg(m)") - g.P("}") - g.P() - } - if genRecv { - g.P("func (x *", streamType, ") Recv() (*", outType, ", error) {") - g.P("m := new(", outType, ")") - g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") - g.P("return m, nil") - g.P("}") - g.P() - } - if genCloseAndRecv { - g.P("func (x *", streamType, ") CloseAndRecv() (*", outType, ", error) {") - g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") - g.P("m := new(", outType, ")") - g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") - g.P("return m, nil") - g.P("}") - g.P() - } -} - -// generateServerSignature returns the server-side signature for a method. -func (g *grpc) generateServerSignature(servName string, method *pb.MethodDescriptorProto) string { - origMethName := method.GetName() - methName := generator.CamelCase(origMethName) - if reservedClientName[methName] { - methName += "_" - } - - var reqArgs []string - ret := "error" - if !method.GetServerStreaming() && !method.GetClientStreaming() { - reqArgs = append(reqArgs, contextPkg+".Context") - ret = "(*" + g.typeName(method.GetOutputType()) + ", error)" - } - if !method.GetClientStreaming() { - reqArgs = append(reqArgs, "*"+g.typeName(method.GetInputType())) - } - if method.GetServerStreaming() || method.GetClientStreaming() { - reqArgs = append(reqArgs, servName+"_"+generator.CamelCase(origMethName)+"Server") - } - - return methName + "(" + strings.Join(reqArgs, ", ") + ") " + ret -} - -func (g *grpc) generateServerMethod(servName string, method *pb.MethodDescriptorProto) string { - methName := generator.CamelCase(method.GetName()) - hname := fmt.Sprintf("_%s_%s_Handler", servName, methName) - inType := g.typeName(method.GetInputType()) - outType := g.typeName(method.GetOutputType()) - - if !method.GetServerStreaming() && !method.GetClientStreaming() { - g.P("func ", hname, "(srv interface{}, ctx ", contextPkg, ".Context, dec func(interface{}) error) (interface{}, error) {") - g.P("in := new(", inType, ")") - g.P("if err := dec(in); err != nil { return nil, err }") - g.P("out, err := srv.(", servName, "Server).", methName, "(ctx, in)") - g.P("if err != nil { return nil, err }") - g.P("return out, nil") - g.P("}") - g.P() - return hname - } - streamType := unexport(servName) + methName + "Server" - g.P("func ", hname, "(srv interface{}, stream ", grpcPkg, ".ServerStream) error {") - if !method.GetClientStreaming() { - g.P("m := new(", inType, ")") - g.P("if err := stream.RecvMsg(m); err != nil { return err }") - g.P("return srv.(", servName, "Server).", methName, "(m, &", streamType, "{stream})") - } else { - g.P("return srv.(", servName, "Server).", methName, "(&", streamType, "{stream})") - } - g.P("}") - g.P() - - genSend := method.GetServerStreaming() - genSendAndClose := !method.GetServerStreaming() - genRecv := method.GetClientStreaming() - - // Stream auxiliary types and methods. - g.P("type ", servName, "_", methName, "Server interface {") - if genSend { - g.P("Send(*", outType, ") error") - } - if genSendAndClose { - g.P("SendAndClose(*", outType, ") error") - } - if genRecv { - g.P("Recv() (*", inType, ", error)") - } - g.P(grpcPkg, ".ServerStream") - g.P("}") - g.P() - - g.P("type ", streamType, " struct {") - g.P(grpcPkg, ".ServerStream") - g.P("}") - g.P() - - if genSend { - g.P("func (x *", streamType, ") Send(m *", outType, ") error {") - g.P("return x.ServerStream.SendMsg(m)") - g.P("}") - g.P() - } - if genSendAndClose { - g.P("func (x *", streamType, ") SendAndClose(m *", outType, ") error {") - g.P("return x.ServerStream.SendMsg(m)") - g.P("}") - g.P() - } - if genRecv { - g.P("func (x *", streamType, ") Recv() (*", inType, ", error) {") - g.P("m := new(", inType, ")") - g.P("if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err }") - g.P("return m, nil") - g.P("}") - g.P() - } - - return hname -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/link_grpc.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/link_grpc.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/link_grpc.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/link_grpc.go 2016-05-24 07:05:22.000000000 +0000 @@ -31,4 +31,4 @@ package main -import _ "github.com/golang/protobuf/protoc-gen-go/internal/grpc" +import _ "github.com/golang/protobuf/protoc-gen-go/grpc" diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go 2016-05-24 07:05:22.000000000 +0000 @@ -3,7 +3,7 @@ // DO NOT EDIT! /* -Package google_protobuf_compiler is a generated protocol buffer package. +Package plugin_go is a generated protocol buffer package. It is generated from these files: google/protobuf/compiler/plugin.proto @@ -12,7 +12,7 @@ CodeGeneratorRequest CodeGeneratorResponse */ -package google_protobuf_compiler +package plugin_go import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -33,7 +33,7 @@ // The .proto files that were explicitly listed on the command-line. The // code generator should generate code only for these files. Each file's // descriptor will be included in proto_file, below. - FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate" json:"file_to_generate,omitempty"` + FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"` // The generator parameter passed on the command-line. Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` // FileDescriptorProtos for all files in files_to_generate and everything @@ -47,7 +47,7 @@ // the entire set into memory at once. However, as of this writing, this // is not similarly optimized on protoc's end -- it will store all fields in // memory at once before sending them to the plugin. - ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file" json:"proto_file,omitempty"` + ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -162,7 +162,7 @@ // command line. // // If |insertion_point| is present, |name| must also be present. - InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point" json:"insertion_point,omitempty"` + InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"` // The file contents. Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -201,22 +201,25 @@ } var fileDescriptor0 = []byte{ - // 269 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x91, 0xc1, 0x4a, 0xc3, 0x40, - 0x10, 0x86, 0xa9, 0x46, 0xa4, 0x63, 0x25, 0x1a, 0x14, 0x43, 0xf1, 0x10, 0x44, 0xc1, 0x83, 0x6c, - 0x40, 0x3c, 0x78, 0xf2, 0x10, 0x45, 0xaf, 0xc5, 0x17, 0x08, 0x31, 0x9d, 0x86, 0x85, 0x74, 0x67, - 0x9d, 0xdd, 0x1c, 0x7d, 0x21, 0x9f, 0xd2, 0xc9, 0xa6, 0x15, 0x09, 0xf6, 0x14, 0xf8, 0xe7, 0xcf, - 0xf7, 0xcd, 0xb0, 0x70, 0xd3, 0x10, 0x35, 0x2d, 0xe6, 0x96, 0xc9, 0xd3, 0x47, 0xb7, 0xca, 0x6b, - 0x5a, 0x5b, 0xdd, 0x22, 0xe7, 0xb6, 0xed, 0x1a, 0x6d, 0x54, 0x18, 0x24, 0xe9, 0x50, 0x53, 0xdb, - 0x9a, 0xda, 0xd6, 0xe6, 0xd9, 0x18, 0xb0, 0x44, 0x57, 0xb3, 0xb6, 0x9e, 0x78, 0x68, 0x5f, 0x7d, - 0xc1, 0xd9, 0x33, 0x2d, 0xf1, 0x0d, 0x0d, 0x72, 0x25, 0xf1, 0x3b, 0x7e, 0x76, 0xe8, 0x7c, 0x92, - 0xc2, 0xc9, 0x4a, 0x10, 0xa5, 0xa7, 0xb2, 0x19, 0x66, 0x98, 0x4e, 0xb2, 0xfd, 0xdb, 0x69, 0x72, - 0x0a, 0x53, 0x5b, 0x71, 0xb5, 0x46, 0x8f, 0x9c, 0xee, 0x65, 0x13, 0x89, 0x1e, 0x01, 0x02, 0xad, - 0xec, 0x7f, 0x49, 0x63, 0xa9, 0x1d, 0xdd, 0x5f, 0xab, 0xf1, 0x56, 0xaf, 0x32, 0x7c, 0xf9, 0xf5, - 0x2f, 0x82, 0xfe, 0x7b, 0x02, 0xe7, 0x23, 0xbf, 0xb3, 0x64, 0x1c, 0x26, 0xc7, 0x70, 0x80, 0xcc, - 0xc4, 0x62, 0xed, 0x15, 0x05, 0x44, 0x7f, 0xe0, 0x0f, 0x6a, 0xd7, 0xc9, 0xea, 0x5f, 0x5a, 0x70, - 0xcf, 0x9f, 0x20, 0xea, 0xbf, 0xc9, 0x0c, 0x22, 0x23, 0xfb, 0x6f, 0xc8, 0x17, 0x10, 0x6b, 0xa9, - 0xb0, 0xd7, 0x64, 0x4a, 0x4b, 0xda, 0xf8, 0xcd, 0x55, 0x31, 0x1c, 0xd6, 0x64, 0x3c, 0x4a, 0x10, - 0xf7, 0x41, 0x71, 0x07, 0x97, 0xa2, 0xd9, 0xa9, 0x2e, 0x66, 0x8b, 0xf0, 0x2a, 0xe1, 0x32, 0xf7, - 0x13, 0x00, 0x00, 0xff, 0xff, 0xdb, 0x18, 0x2a, 0x2a, 0xbd, 0x01, 0x00, 0x00, + // 311 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x91, 0xd1, 0x4a, 0xfb, 0x30, + 0x14, 0xc6, 0xe9, 0xff, 0x3f, 0x91, 0x1d, 0x65, 0x93, 0x30, 0xa1, 0x8c, 0x5d, 0x94, 0xa1, 0xb8, + 0xab, 0x14, 0x44, 0xf0, 0x7e, 0x13, 0xf5, 0xb2, 0x14, 0xaf, 0x04, 0x29, 0xb5, 0x3b, 0x2b, 0x81, + 0x2e, 0x27, 0xa6, 0xe9, 0x13, 0xf9, 0x4e, 0x3e, 0x8f, 0x49, 0xda, 0x4e, 0x29, 0xee, 0xaa, 0x3d, + 0xdf, 0xf9, 0xe5, 0x3b, 0x5f, 0x72, 0xe0, 0xba, 0x24, 0x2a, 0x2b, 0x8c, 0x95, 0x26, 0x43, 0xef, + 0xcd, 0x2e, 0x2e, 0x68, 0xaf, 0x44, 0x85, 0x3a, 0x56, 0x55, 0x53, 0x0a, 0xc9, 0x7d, 0x83, 0x85, + 0x2d, 0xc6, 0x7b, 0x8c, 0xf7, 0xd8, 0x3c, 0x1a, 0x1a, 0x6c, 0xb1, 0x2e, 0xb4, 0x50, 0x86, 0x74, + 0x4b, 0x2f, 0x3f, 0x03, 0x98, 0x6d, 0x68, 0x8b, 0x4f, 0x28, 0x51, 0xe7, 0x56, 0x4f, 0xf1, 0xa3, + 0xc1, 0xda, 0xb0, 0x15, 0x5c, 0xec, 0xac, 0x47, 0x66, 0x28, 0x2b, 0xdb, 0x1e, 0x86, 0x41, 0xf4, + 0x7f, 0x35, 0x4e, 0x27, 0x4e, 0x7f, 0xa1, 0xee, 0x04, 0xb2, 0x05, 0x8c, 0x55, 0xae, 0xf3, 0x3d, + 0x1a, 0xd4, 0xe1, 0xbf, 0x28, 0xb0, 0xc8, 0x8f, 0xc0, 0x36, 0x00, 0x7e, 0x52, 0xe6, 0x4e, 0x85, + 0x53, 0xeb, 0x70, 0x76, 0x7b, 0xc5, 0x87, 0x89, 0x1f, 0x6d, 0xf3, 0xe1, 0x90, 0x2d, 0x71, 0xb2, + 0x35, 0x71, 0x1f, 0xd7, 0x59, 0x7e, 0x05, 0x70, 0x39, 0x48, 0x59, 0x2b, 0x92, 0x35, 0xb2, 0x19, + 0x9c, 0xa0, 0xd6, 0xa4, 0x6d, 0x36, 0x37, 0xb8, 0x2d, 0xd8, 0x33, 0x8c, 0x7e, 0x8d, 0xbb, 0xe3, + 0xc7, 0x1e, 0x88, 0xff, 0x69, 0xea, 0xd3, 0xa4, 0xde, 0x61, 0xfe, 0x06, 0x23, 0x57, 0x31, 0x06, + 0x23, 0x69, 0x6f, 0xd4, 0x8d, 0xf1, 0xff, 0xec, 0x06, 0xa6, 0xc2, 0xe2, 0xda, 0x08, 0x92, 0x99, + 0x22, 0x21, 0x4d, 0x77, 0xfd, 0xc9, 0x41, 0x4e, 0x9c, 0xca, 0x42, 0x38, 0x2d, 0x48, 0x1a, 0xb4, + 0xc0, 0xd4, 0x03, 0x7d, 0xb9, 0xbe, 0x87, 0x85, 0xcd, 0x72, 0x34, 0xdf, 0xfa, 0x3c, 0xf1, 0x8b, + 0xf6, 0x0f, 0x52, 0xbf, 0x8e, 0xdb, 0xb5, 0x67, 0x25, 0x7d, 0x07, 0x00, 0x00, 0xff, 0xff, 0x83, + 0x7b, 0x5c, 0x7c, 0x1b, 0x02, 0x00, 0x00, } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go 2016-05-24 07:05:22.000000000 +0000 @@ -183,14 +183,14 @@ Hat *HatType `protobuf:"varint,4,opt,name=hat,enum=my.test.HatType,def=1" json:"hat,omitempty"` // optional imp.ImportedMessage.Owner owner = 6; Deadline *float32 `protobuf:"fixed32,7,opt,name=deadline,def=inf" json:"deadline,omitempty"` - Somegroup *Request_SomeGroup `protobuf:"group,8,opt,name=SomeGroup" json:"somegroup,omitempty"` + Somegroup *Request_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"` // This is a map field. It will generate map[int32]string. - NameMapping map[int32]string `protobuf:"bytes,14,rep,name=name_mapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + NameMapping map[int32]string `protobuf:"bytes,14,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // This is a map field whose value type is a message. - MsgMapping map[int64]*Reply `protobuf:"bytes,15,rep,name=msg_mapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MsgMapping map[int64]*Reply `protobuf:"bytes,15,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` Reset_ *int32 `protobuf:"varint,12,opt,name=reset" json:"reset,omitempty"` // This field should not conflict with any getters. - GetKey_ *string `protobuf:"bytes,16,opt,name=get_key" json:"get_key,omitempty"` + GetKey_ *string `protobuf:"bytes,16,opt,name=get_key,json=getKey" json:"get_key,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -266,7 +266,7 @@ } type Request_SomeGroup struct { - GroupField *int32 `protobuf:"varint,9,opt,name=group_field" json:"group_field,omitempty"` + GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -283,7 +283,7 @@ type Reply struct { Found []*Reply_Entry `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` - CompactKeys []int32 `protobuf:"varint,2,rep,packed,name=compact_keys" json:"compact_keys,omitempty"` + CompactKeys []int32 `protobuf:"varint,2,rep,packed,name=compact_keys,json=compactKeys" json:"compact_keys,omitempty"` XXX_extensions map[int32]proto.Extension `json:"-"` XXX_unrecognized []byte `json:"-"` } @@ -321,9 +321,9 @@ } type Reply_Entry struct { - KeyThatNeeds_1234Camel_CasIng *int64 `protobuf:"varint,1,req,name=key_that_needs_1234camel_CasIng" json:"key_that_needs_1234camel_CasIng,omitempty"` + KeyThatNeeds_1234Camel_CasIng *int64 `protobuf:"varint,1,req,name=key_that_needs_1234camel_CasIng,json=keyThatNeeds1234camelCasIng" json:"key_that_needs_1234camel_CasIng,omitempty"` Value *int64 `protobuf:"varint,2,opt,name=value,def=7" json:"value,omitempty"` - XMyFieldName_2 *int64 `protobuf:"varint,3,opt,name=_my_field_name_2" json:"_my_field_name_2,omitempty"` + XMyFieldName_2 *int64 `protobuf:"varint,3,opt,name=_my_field_name_2,json=myFieldName2" json:"_my_field_name_2,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -474,7 +474,7 @@ } type Communique struct { - MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry" json:"make_me_cry,omitempty"` + MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"` // This is a oneof, called "union". // // Types that are valid to be assigned to Union: @@ -510,7 +510,7 @@ Data []byte `protobuf:"bytes,7,opt,name=data,oneof"` } type Communique_TempC struct { - TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,oneof"` + TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"` } type Communique_Height struct { Height float32 `protobuf:"fixed32,9,opt,name=height,oneof"` @@ -528,7 +528,7 @@ Msg *Reply `protobuf:"bytes,13,opt,name=msg,oneof"` } type Communique_Somegroup struct { - Somegroup *Communique_SomeGroup `protobuf:"group,14,opt,name=SomeGroup,oneof"` + Somegroup *Communique_SomeGroup `protobuf:"group,14,opt,name=SomeGroup,json=somegroup,oneof"` } func (*Communique_Number) isCommunique_Union() {} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden 2016-05-24 07:05:22.000000000 +0000 @@ -183,14 +183,14 @@ Hat *HatType `protobuf:"varint,4,opt,name=hat,enum=my.test.HatType,def=1" json:"hat,omitempty"` // optional imp.ImportedMessage.Owner owner = 6; Deadline *float32 `protobuf:"fixed32,7,opt,name=deadline,def=inf" json:"deadline,omitempty"` - Somegroup *Request_SomeGroup `protobuf:"group,8,opt,name=SomeGroup" json:"somegroup,omitempty"` + Somegroup *Request_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"` // This is a map field. It will generate map[int32]string. - NameMapping map[int32]string `protobuf:"bytes,14,rep,name=name_mapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + NameMapping map[int32]string `protobuf:"bytes,14,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // This is a map field whose value type is a message. - MsgMapping map[int64]*Reply `protobuf:"bytes,15,rep,name=msg_mapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MsgMapping map[int64]*Reply `protobuf:"bytes,15,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` Reset_ *int32 `protobuf:"varint,12,opt,name=reset" json:"reset,omitempty"` // This field should not conflict with any getters. - GetKey_ *string `protobuf:"bytes,16,opt,name=get_key" json:"get_key,omitempty"` + GetKey_ *string `protobuf:"bytes,16,opt,name=get_key,json=getKey" json:"get_key,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -266,7 +266,7 @@ } type Request_SomeGroup struct { - GroupField *int32 `protobuf:"varint,9,opt,name=group_field" json:"group_field,omitempty"` + GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -283,7 +283,7 @@ type Reply struct { Found []*Reply_Entry `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` - CompactKeys []int32 `protobuf:"varint,2,rep,packed,name=compact_keys" json:"compact_keys,omitempty"` + CompactKeys []int32 `protobuf:"varint,2,rep,packed,name=compact_keys,json=compactKeys" json:"compact_keys,omitempty"` XXX_extensions map[int32]proto.Extension `json:"-"` XXX_unrecognized []byte `json:"-"` } @@ -321,9 +321,9 @@ } type Reply_Entry struct { - KeyThatNeeds_1234Camel_CasIng *int64 `protobuf:"varint,1,req,name=key_that_needs_1234camel_CasIng" json:"key_that_needs_1234camel_CasIng,omitempty"` + KeyThatNeeds_1234Camel_CasIng *int64 `protobuf:"varint,1,req,name=key_that_needs_1234camel_CasIng,json=keyThatNeeds1234camelCasIng" json:"key_that_needs_1234camel_CasIng,omitempty"` Value *int64 `protobuf:"varint,2,opt,name=value,def=7" json:"value,omitempty"` - XMyFieldName_2 *int64 `protobuf:"varint,3,opt,name=_my_field_name_2" json:"_my_field_name_2,omitempty"` + XMyFieldName_2 *int64 `protobuf:"varint,3,opt,name=_my_field_name_2,json=myFieldName2" json:"_my_field_name_2,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -474,7 +474,7 @@ } type Communique struct { - MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry" json:"make_me_cry,omitempty"` + MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"` // This is a oneof, called "union". // // Types that are valid to be assigned to Union: @@ -510,7 +510,7 @@ Data []byte `protobuf:"bytes,7,opt,name=data,oneof"` } type Communique_TempC struct { - TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,oneof"` + TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"` } type Communique_Height struct { Height float32 `protobuf:"fixed32,9,opt,name=height,oneof"` @@ -528,7 +528,7 @@ Msg *Reply `protobuf:"bytes,13,opt,name=msg,oneof"` } type Communique_Somegroup struct { - Somegroup *Communique_SomeGroup `protobuf:"group,14,opt,name=SomeGroup,oneof"` + Somegroup *Communique_SomeGroup `protobuf:"group,14,opt,name=SomeGroup,json=somegroup,oneof"` } func (*Communique_Number) isCommunique_Union() {} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/any/any.pb.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/any/any.pb.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/any/any.pb.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/any/any.pb.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,111 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/any/any.proto +// DO NOT EDIT! + +/* +Package any is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/any/any.proto + +It has these top-level messages: + Any +*/ +package any + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.ProtoPackageIsVersion1 + +// `Any` contains an arbitrary serialized message along with a URL +// that describes the type of the serialized message. +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + // A URL/resource name whose content describes the type of the + // serialized message. + // + // For URLs which use the schema `http`, `https`, or no schema, the + // following restrictions and interpretations apply: + // + // * If no schema is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemas other than `http`, `https` (or the empty schema) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` + // Must be valid serialized data of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Any) XXX_WellKnownType() string { return "Any" } + +func init() { + proto.RegisterType((*Any)(nil), "google.protobuf.Any") +} + +var fileDescriptor0 = []byte{ + // 184 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc, + 0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c, + 0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69, + 0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24, + 0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0x04, 0x14, 0xe7, 0x09, 0x82, 0x70, 0x9c, + 0x8a, 0xb8, 0x84, 0x81, 0x96, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0x01, 0x34, 0x2c, 0x00, 0xc4, 0x09, + 0x60, 0x8c, 0x52, 0x25, 0xca, 0x71, 0x0b, 0x18, 0x19, 0x17, 0x31, 0x31, 0xbb, 0x07, 0x38, 0xad, + 0x62, 0x92, 0x73, 0x87, 0x98, 0x16, 0x00, 0x55, 0xa5, 0x17, 0x9e, 0x9a, 0x93, 0xe3, 0x9d, 0x97, + 0x5f, 0x9e, 0x17, 0x02, 0x52, 0x9d, 0xc4, 0x06, 0xd6, 0x6e, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, + 0xc6, 0x4d, 0x03, 0x23, 0xf6, 0x00, 0x00, 0x00, +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/any/any.proto aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/any/any.proto --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/any/any.proto 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/any/any.proto 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,100 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; +option go_package = "github.com/golang/protobuf/ptypes/any"; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized message along with a URL +// that describes the type of the serialized message. +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name whose content describes the type of the + // serialized message. + // + // For URLs which use the schema `http`, `https`, or no schema, the + // following restrictions and interpretations apply: + // + // * If no schema is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemas other than `http`, `https` (or the empty schema) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be valid serialized data of the above specified type. + bytes value = 2; +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/any.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/any.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/any.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/any.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,136 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements functions to marshal proto.Message to/from +// google.protobuf.Any message. + +import ( + "fmt" + "reflect" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" +) + +const googleApis = "type.googleapis.com/" + +// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. +// +// Note that regular type assertions should be done using the Is +// function. AnyMessageName is provided for less common use cases like filtering a +// sequence of Any messages based on a set of allowed message type names. +func AnyMessageName(any *any.Any) (string, error) { + slash := strings.LastIndex(any.TypeUrl, "/") + if slash < 0 { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return any.TypeUrl[slash+1:], nil +} + +// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. +func MarshalAny(pb proto.Message) (*any.Any, error) { + value, err := proto.Marshal(pb) + if err != nil { + return nil, err + } + return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in a google.protobuf.Any +// message. The allocated message is stored in the embedded proto.Message. +// +// Example: +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct { + proto.Message +} + +// Empty returns a new proto.Message of the type specified in a +// google.protobuf.Any message. It returns an error if corresponding message +// type isn't linked in. +func Empty(any *any.Any) (proto.Message, error) { + aname, err := AnyMessageName(any) + if err != nil { + return nil, err + } + + t := proto.MessageType(aname) + if t == nil { + return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + } + return reflect.New(t.Elem()).Interface().(proto.Message), nil +} + +// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any +// message and places the decoded result in pb. It returns an error if type of +// contents of Any message does not match type of pb message. +// +// pb can be a proto.Message, or a *DynamicAny. +func UnmarshalAny(any *any.Any, pb proto.Message) error { + if d, ok := pb.(*DynamicAny); ok { + if d.Message == nil { + var err error + d.Message, err = Empty(any) + if err != nil { + return err + } + } + return UnmarshalAny(any, d.Message) + } + + aname, err := AnyMessageName(any) + if err != nil { + return err + } + + mname := proto.MessageName(pb) + if aname != mname { + return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + } + return proto.Unmarshal(any.Value, pb) +} + +// Is returns true if any value contains a given message type. +func Is(any *any.Any, pb proto.Message) bool { + aname, err := AnyMessageName(any) + if err != nil { + return false + } + + return aname == proto.MessageName(pb) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/any_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/any_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/any_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/any_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,113 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +import ( + "testing" + + "github.com/golang/protobuf/proto" + pb "github.com/golang/protobuf/protoc-gen-go/descriptor" + "github.com/golang/protobuf/ptypes/any" +) + +func TestMarshalUnmarshal(t *testing.T) { + orig := &any.Any{Value: []byte("test")} + + packed, err := MarshalAny(orig) + if err != nil { + t.Errorf("MarshalAny(%+v): got: _, %v exp: _, nil", orig, err) + } + + unpacked := &any.Any{} + err = UnmarshalAny(packed, unpacked) + if err != nil || !proto.Equal(unpacked, orig) { + t.Errorf("got: %v, %+v; want nil, %+v", err, unpacked, orig) + } +} + +func TestIs(t *testing.T) { + a, err := MarshalAny(&pb.FileDescriptorProto{}) + if err != nil { + t.Fatal(err) + } + if Is(a, &pb.DescriptorProto{}) { + t.Error("FileDescriptorProto is not a DescriptorProto, but Is says it is") + } + if !Is(a, &pb.FileDescriptorProto{}) { + t.Error("FileDescriptorProto is indeed a FileDescriptorProto, but Is says it is not") + } +} + +func TestIsDifferentUrlPrefixes(t *testing.T) { + m := &pb.FileDescriptorProto{} + a := &any.Any{TypeUrl: "foo/bar/" + proto.MessageName(m)} + if !Is(a, m) { + t.Errorf("message with type url %q didn't satisfy Is for type %q", a.TypeUrl, proto.MessageName(m)) + } +} + +func TestUnmarshalDynamic(t *testing.T) { + want := &pb.FileDescriptorProto{Name: proto.String("foo")} + a, err := MarshalAny(want) + if err != nil { + t.Fatal(err) + } + var got DynamicAny + if err := UnmarshalAny(a, &got); err != nil { + t.Fatal(err) + } + if !proto.Equal(got.Message, want) { + t.Errorf("invalid result from UnmarshalAny, got %q want %q", got.Message, want) + } +} + +func TestEmpty(t *testing.T) { + want := &pb.FileDescriptorProto{} + a, err := MarshalAny(want) + if err != nil { + t.Fatal(err) + } + got, err := Empty(a) + if err != nil { + t.Fatal(err) + } + if !proto.Equal(got, want) { + t.Errorf("unequal empty message, got %q, want %q", got, want) + } + + // that's a valid type_url for a message which shouldn't be linked into this + // test binary. We want an error. + a.TypeUrl = "type.googleapis.com/google.protobuf.FieldMask" + if _, err := Empty(a); err == nil { + t.Errorf("got no error for an attempt to create a message of type %q, which shouldn't be linked in", a.TypeUrl) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/doc.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/doc.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/doc.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/doc.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package ptypes contains code for interacting with well-known types. +*/ +package ptypes diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/duration/duration.pb.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/duration/duration.pb.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/duration/duration.pb.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/duration/duration.pb.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,107 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/duration/duration.proto +// DO NOT EDIT! + +/* +Package duration is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/duration/duration.proto + +It has these top-level messages: + Duration +*/ +package duration + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.ProtoPackageIsVersion1 + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Duration) XXX_WellKnownType() string { return "Duration" } + +func init() { + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +} + +var fileDescriptor0 = []byte{ + // 187 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0x29, + 0x2d, 0x4a, 0x2c, 0xc9, 0xcc, 0xcf, 0x83, 0x33, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3, + 0xd3, 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24, 0xb8, + 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x60, + 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0xa0, 0x38, 0x6b, 0x10, + 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x0c, 0x74, 0x82, 0x1e, 0x9a, 0x91, 0x4e, 0xbc, 0x30, 0x03, 0x03, + 0x40, 0x22, 0x01, 0x8c, 0x51, 0x5a, 0xc4, 0xbb, 0x77, 0x01, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, + 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, 0x73, 0x03, 0xa0, 0x4a, 0xf5, 0xc2, 0x53, 0x73, 0x72, + 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0x5a, 0x92, 0xd8, 0xc0, 0x66, 0x18, 0x03, 0x02, 0x00, + 0x00, 0xff, 0xff, 0x62, 0xfb, 0xb1, 0x51, 0x0e, 0x01, 0x00, 0x00, +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/duration/duration.proto aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/duration/duration.proto --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/duration/duration.proto 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/duration/duration.proto 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,97 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; +option go_package = "github.com/golang/protobuf/ptypes/duration"; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +message Duration { + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/duration.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/duration.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/duration.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/duration.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,102 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" + + durpb "github.com/golang/protobuf/ptypes/duration" +) + +const ( + // Range of a durpb.Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the durpb.Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid durpb.Duration +// may still be too large to fit into a time.Duration (the range of durpb.Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *durpb.Duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) + } + return nil +} + +// Duration converts a durpb.Duration to a time.Duration. Duration +// returns an error if the durpb.Duration is invalid or is too large to be +// represented in a time.Duration. +func Duration(p *durpb.Duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a durpb.Duration. +func DurationProto(d time.Duration) *durpb.Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &durpb.Duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/duration_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/duration_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/duration_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/duration_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,121 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +import ( + "math" + "testing" + "time" + + "github.com/golang/protobuf/proto" + durpb "github.com/golang/protobuf/ptypes/duration" +) + +const ( + minGoSeconds = math.MinInt64 / int64(1e9) + maxGoSeconds = math.MaxInt64 / int64(1e9) +) + +var durationTests = []struct { + proto *durpb.Duration + isValid bool + inRange bool + dur time.Duration +}{ + // The zero duration. + {&durpb.Duration{0, 0}, true, true, 0}, + // Some ordinary non-zero durations. + {&durpb.Duration{100, 0}, true, true, 100 * time.Second}, + {&durpb.Duration{-100, 0}, true, true, -100 * time.Second}, + {&durpb.Duration{100, 987}, true, true, 100*time.Second + 987}, + {&durpb.Duration{-100, -987}, true, true, -(100*time.Second + 987)}, + // The largest duration representable in Go. + {&durpb.Duration{maxGoSeconds, int32(math.MaxInt64 - 1e9*maxGoSeconds)}, true, true, math.MaxInt64}, + // The smallest duration representable in Go. + {&durpb.Duration{minGoSeconds, int32(math.MinInt64 - 1e9*minGoSeconds)}, true, true, math.MinInt64}, + {nil, false, false, 0}, + {&durpb.Duration{-100, 987}, false, false, 0}, + {&durpb.Duration{100, -987}, false, false, 0}, + {&durpb.Duration{math.MinInt64, 0}, false, false, 0}, + {&durpb.Duration{math.MaxInt64, 0}, false, false, 0}, + // The largest valid duration. + {&durpb.Duration{maxSeconds, 1e9 - 1}, true, false, 0}, + // The smallest valid duration. + {&durpb.Duration{minSeconds, -(1e9 - 1)}, true, false, 0}, + // The smallest invalid duration above the valid range. + {&durpb.Duration{maxSeconds + 1, 0}, false, false, 0}, + // The largest invalid duration below the valid range. + {&durpb.Duration{minSeconds - 1, -(1e9 - 1)}, false, false, 0}, + // One nanosecond past the largest duration representable in Go. + {&durpb.Duration{maxGoSeconds, int32(math.MaxInt64-1e9*maxGoSeconds) + 1}, true, false, 0}, + // One nanosecond past the smallest duration representable in Go. + {&durpb.Duration{minGoSeconds, int32(math.MinInt64-1e9*minGoSeconds) - 1}, true, false, 0}, + // One second past the largest duration representable in Go. + {&durpb.Duration{maxGoSeconds + 1, int32(math.MaxInt64 - 1e9*maxGoSeconds)}, true, false, 0}, + // One second past the smallest duration representable in Go. + {&durpb.Duration{minGoSeconds - 1, int32(math.MinInt64 - 1e9*minGoSeconds)}, true, false, 0}, +} + +func TestValidateDuration(t *testing.T) { + for _, test := range durationTests { + err := validateDuration(test.proto) + gotValid := (err == nil) + if gotValid != test.isValid { + t.Errorf("validateDuration(%v) = %t, want %t", test.proto, gotValid, test.isValid) + } + } +} + +func TestDuration(t *testing.T) { + for _, test := range durationTests { + got, err := Duration(test.proto) + gotOK := (err == nil) + wantOK := test.isValid && test.inRange + if gotOK != wantOK { + t.Errorf("Duration(%v) ok = %t, want %t", test.proto, gotOK, wantOK) + } + if err == nil && got != test.dur { + t.Errorf("Duration(%v) = %v, want %v", test.proto, got, test.dur) + } + } +} + +func TestDurationProto(t *testing.T) { + for _, test := range durationTests { + if test.isValid && test.inRange { + got := DurationProto(test.dur) + if !proto.Equal(got, test.proto) { + t.Errorf("DurationProto(%v) = %v, want %v", test.dur, got, test.proto) + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/empty/empty.pb.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/empty/empty.pb.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/empty/empty.pb.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/empty/empty.pb.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,63 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/empty/empty.proto +// DO NOT EDIT! + +/* +Package empty is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/empty/empty.proto + +It has these top-level messages: + Empty +*/ +package empty + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.ProtoPackageIsVersion1 + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Empty) XXX_WellKnownType() string { return "Empty" } + +func init() { + proto.RegisterType((*Empty)(nil), "google.protobuf.Empty") +} + +var fileDescriptor0 = []byte{ + // 148 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x32, 0x4e, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcd, + 0x05, 0x32, 0x20, 0xa4, 0x1e, 0x58, 0x4e, 0x88, 0x3f, 0x3d, 0x3f, 0x3f, 0x3d, 0x27, 0x55, 0x0f, + 0xa6, 0x52, 0x89, 0x9d, 0x8b, 0xd5, 0x15, 0x24, 0xef, 0x54, 0xc9, 0x25, 0x0c, 0x34, 0x49, 0x0f, + 0x4d, 0xde, 0x89, 0x0b, 0x2c, 0x1b, 0x00, 0xe2, 0x06, 0x30, 0x46, 0xa9, 0x13, 0x69, 0xe7, 0x02, + 0x46, 0xc6, 0x1f, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, + 0x0c, 0x0d, 0x80, 0x2a, 0xd5, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, + 0x69, 0x49, 0x62, 0x03, 0x9b, 0x61, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x7f, 0xbb, 0xf4, 0x0e, + 0xd2, 0x00, 0x00, 0x00, +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/empty/empty.proto aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/empty/empty.proto --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/empty/empty.proto 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/empty/empty.proto 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,53 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; +option go_package = "github.com/golang/protobuf/ptypes/empty"; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "EmptyProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +message Empty {} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/regen.sh aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/regen.sh --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/regen.sh 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/regen.sh 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,72 @@ +#!/bin/bash -e +# +# This script fetches and rebuilds the "well-known types" protocol buffers. +# To run this you will need protoc and goprotobuf installed; +# see https://github.com/golang/protobuf for instructions. +# You also need Go and Git installed. + +PKG=github.com/golang/protobuf/ptypes +UPSTREAM=https://github.com/google/protobuf +UPSTREAM_SUBDIR=src/google/protobuf +PROTO_FILES=' + any.proto + duration.proto + empty.proto + struct.proto + timestamp.proto + wrappers.proto +' + +function die() { + echo 1>&2 $* + exit 1 +} + +# Sanity check that the right tools are accessible. +for tool in go git protoc protoc-gen-go; do + q=$(which $tool) || die "didn't find $tool" + echo 1>&2 "$tool: $q" +done + +tmpdir=$(mktemp -d -t regen-wkt.XXXXXX) +trap 'rm -rf $tmpdir' EXIT + +echo -n 1>&2 "finding package dir... " +pkgdir=$(go list -f '{{.Dir}}' $PKG) +echo 1>&2 $pkgdir +base=$(echo $pkgdir | sed "s,/$PKG\$,,") +echo 1>&2 "base: $base" +cd $base + +echo 1>&2 "fetching latest protos... " +git clone -q $UPSTREAM $tmpdir +# Pass 1: build mapping from upstream filename to our filename. +declare -A filename_map +for f in $(cd $PKG && find * -name '*.proto'); do + echo -n 1>&2 "looking for latest version of $f... " + up=$(cd $tmpdir/$UPSTREAM_SUBDIR && find * -name $(basename $f) | grep -v /testdata/) + echo 1>&2 $up + if [ $(echo $up | wc -w) != "1" ]; then + die "not exactly one match" + fi + filename_map[$up]=$f +done +# Pass 2: copy files, making necessary adjustments. +for up in "${!filename_map[@]}"; do + f=${filename_map[$up]} + shortname=$(basename $f | sed 's,\.proto$,,') + cat $tmpdir/$UPSTREAM_SUBDIR/$up | + # Adjust proto package. + # TODO(dsymonds): Remove when the right go_package options are upstream. + sed '/^package /a option go_package = "github.com\/golang\/protobuf\/ptypes\/'${shortname}'";' | + # Unfortunately "package struct" and "package type" don't work. + sed '/option go_package/s,struct",struct;structpb",' | + cat > $PKG/$f +done + +# Run protoc once per package. +for dir in $(find $PKG -name '*.proto' | xargs dirname | sort | uniq); do + echo 1>&2 "* $dir" + protoc --go_out=. $dir/*.proto +done +echo 1>&2 "All OK" diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/struct/struct.pb.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/struct/struct.pb.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/struct/struct.pb.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/struct/struct.pb.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,376 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/struct/struct.proto +// DO NOT EDIT! + +/* +Package structpb is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/struct/struct.proto + +It has these top-level messages: + Struct + Value + ListValue +*/ +package structpb + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.ProtoPackageIsVersion1 + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +var NullValue_name = map[int32]string{ + 0: "NULL_VALUE", +} +var NullValue_value = map[string]int32{ + "NULL_VALUE": 0, +} + +func (x NullValue) String() string { + return proto.EnumName(NullValue_name, int32(x)) +} +func (NullValue) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (NullValue) XXX_WellKnownType() string { return "NullValue" } + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + // Map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Struct) Reset() { *m = Struct{} } +func (m *Struct) String() string { return proto.CompactTextString(m) } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Struct) XXX_WellKnownType() string { return "Struct" } + +func (m *Struct) GetFields() map[string]*Value { + if m != nil { + return m.Fields + } + return nil +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + // The kind of value. + // + // Types that are valid to be assigned to Kind: + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (*Value) XXX_WellKnownType() string { return "Value" } + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,enum=google.protobuf.NullValue,oneof"` +} +type Value_NumberValue struct { + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,oneof"` +} +type Value_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,oneof"` +} +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,oneof"` +} +type Value_StructValue struct { + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,oneof"` +} +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} +func (*Value_NumberValue) isValue_Kind() {} +func (*Value_StringValue) isValue_Kind() {} +func (*Value_BoolValue) isValue_Kind() {} +func (*Value_StructValue) isValue_Kind() {} +func (*Value_ListValue) isValue_Kind() {} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Value) GetNullValue() NullValue { + if x, ok := m.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NullValue_NULL_VALUE +} + +func (m *Value) GetNumberValue() float64 { + if x, ok := m.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBoolValue() bool { + if x, ok := m.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *Value) GetStructValue() *Struct { + if x, ok := m.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (m *Value) GetListValue() *ListValue { + if x, ok := m.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } +} + +func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + b.EncodeVarint(2<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.NumberValue)) + case *Value_StringValue: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.StringValue) + case *Value_BoolValue: + t := uint64(0) + if x.BoolValue { + t = 1 + } + b.EncodeVarint(4<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Value_StructValue: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StructValue); err != nil { + return err + } + case *Value_ListValue: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ListValue); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Value.Kind has unexpected type %T", x) + } + return nil +} + +func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Value) + switch tag { + case 1: // kind.null_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_NullValue{NullValue(x)} + return true, err + case 2: // kind.number_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Kind = &Value_NumberValue{math.Float64frombits(x)} + return true, err + case 3: // kind.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Kind = &Value_StringValue{x} + return true, err + case 4: // kind.bool_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_BoolValue{x != 0} + return true, err + case 5: // kind.struct_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Struct) + err := b.DecodeMessage(msg) + m.Kind = &Value_StructValue{msg} + return true, err + case 6: // kind.list_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ListValue) + err := b.DecodeMessage(msg) + m.Kind = &Value_ListValue{msg} + return true, err + default: + return false, nil + } +} + +func _Value_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + n += proto.SizeVarint(2<<3 | proto.WireFixed64) + n += 8 + case *Value_StringValue: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case *Value_BoolValue: + n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += 1 + case *Value_StructValue: + s := proto.Size(x.StructValue) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_ListValue: + s := proto.Size(x.ListValue) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"` +} + +func (m *ListValue) Reset() { *m = ListValue{} } +func (m *ListValue) String() string { return proto.CompactTextString(m) } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (*ListValue) XXX_WellKnownType() string { return "ListValue" } + +func (m *ListValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +func init() { + proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterType((*Value)(nil), "google.protobuf.Value") + proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") + proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) +} + +var fileDescriptor0 = []byte{ + // 412 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x92, 0xcf, 0x8b, 0xd3, 0x40, + 0x14, 0xc7, 0x3b, 0x49, 0x1b, 0xcc, 0x8b, 0xd4, 0x12, 0x41, 0x4b, 0x05, 0x95, 0xf6, 0x52, 0x44, + 0x12, 0xac, 0x08, 0x62, 0xbd, 0x18, 0xa8, 0x15, 0x0c, 0x25, 0x46, 0x5b, 0xc1, 0x4b, 0x69, 0xda, + 0x34, 0x86, 0x4e, 0x67, 0x42, 0x7e, 0x28, 0x3d, 0xfa, 0x5f, 0x78, 0x5c, 0xf6, 0xb8, 0xc7, 0xfd, + 0x0b, 0x77, 0x7e, 0x24, 0xd9, 0xa5, 0xa5, 0xb0, 0xa7, 0x99, 0xf7, 0x9d, 0xcf, 0xfb, 0xce, 0x7b, + 0x6f, 0x06, 0xde, 0x45, 0x71, 0xfe, 0xbb, 0x08, 0xac, 0x35, 0xdd, 0xdb, 0x11, 0xc5, 0x2b, 0x12, + 0xd9, 0x49, 0x4a, 0x73, 0x1a, 0x14, 0x5b, 0x3b, 0xc9, 0x0f, 0x49, 0x98, 0xd9, 0x59, 0x9e, 0x16, + 0xeb, 0xbc, 0x5c, 0x2c, 0x71, 0x6a, 0x3e, 0x8a, 0x28, 0x8d, 0x70, 0x68, 0x55, 0x6c, 0xff, 0x3f, + 0x02, 0xed, 0xbb, 0x20, 0xcc, 0x31, 0x68, 0xdb, 0x38, 0xc4, 0x9b, 0xac, 0x8b, 0x5e, 0xaa, 0x43, + 0x63, 0x34, 0xb0, 0x8e, 0x60, 0x4b, 0x82, 0xd6, 0x67, 0x41, 0x4d, 0x48, 0x9e, 0x1e, 0xfc, 0x32, + 0xa5, 0xf7, 0x0d, 0x8c, 0x3b, 0xb2, 0xd9, 0x01, 0x75, 0x17, 0x1e, 0x98, 0x11, 0x1a, 0xea, 0x3e, + 0xdf, 0x9a, 0xaf, 0xa1, 0xf5, 0x67, 0x85, 0x8b, 0xb0, 0xab, 0x30, 0xcd, 0x18, 0x3d, 0x39, 0x31, + 0x5f, 0xf0, 0x53, 0x5f, 0x42, 0x1f, 0x94, 0xf7, 0xa8, 0x7f, 0xad, 0x40, 0x4b, 0x88, 0xac, 0x32, + 0x20, 0x05, 0xc6, 0x4b, 0x69, 0xc0, 0x4d, 0xdb, 0xa3, 0xde, 0x89, 0xc1, 0x8c, 0x21, 0x82, 0xff, + 0xd2, 0xf0, 0x75, 0x52, 0x05, 0xe6, 0x00, 0x1e, 0x92, 0x62, 0x1f, 0x84, 0xe9, 0xf2, 0xf6, 0x7e, + 0xc4, 0x10, 0x43, 0xaa, 0x35, 0xc4, 0xe6, 0x14, 0x93, 0xa8, 0x84, 0x54, 0x5e, 0x38, 0x87, 0xa4, + 0x2a, 0xa1, 0x17, 0x00, 0x01, 0xa5, 0x55, 0x19, 0x4d, 0x86, 0x3c, 0xe0, 0x57, 0x71, 0x4d, 0x02, + 0x1f, 0x85, 0x0b, 0x1b, 0x51, 0x89, 0xb4, 0x44, 0xab, 0x4f, 0xcf, 0xcc, 0xb1, 0xb4, 0x67, 0xbb, + 0xba, 0x4b, 0x1c, 0x67, 0x55, 0xae, 0x26, 0x72, 0x4f, 0xbb, 0x74, 0x19, 0x52, 0x77, 0x89, 0xab, + 0xc0, 0xd1, 0xa0, 0xb9, 0x8b, 0xc9, 0xa6, 0x3f, 0x06, 0xbd, 0x26, 0x4c, 0x0b, 0x34, 0x61, 0x56, + 0xbd, 0xe8, 0xb9, 0xa1, 0x97, 0xd4, 0xab, 0x67, 0xa0, 0xd7, 0x43, 0x34, 0xdb, 0x00, 0xb3, 0xb9, + 0xeb, 0x2e, 0x17, 0x9f, 0xdc, 0xf9, 0xa4, 0xd3, 0x70, 0xfe, 0x21, 0x78, 0xcc, 0x7e, 0xdb, 0xb1, + 0x85, 0x63, 0xc8, 0x6e, 0x3c, 0x1e, 0x7b, 0xe8, 0xd7, 0x9b, 0xfb, 0x7e, 0xcc, 0xb1, 0x5c, 0x92, + 0xe0, 0x02, 0xa1, 0x4b, 0x45, 0x9d, 0x7a, 0xce, 0x95, 0xf2, 0x7c, 0x2a, 0xcd, 0xbd, 0xaa, 0xbe, + 0x9f, 0x21, 0xc6, 0x5f, 0x09, 0xfd, 0x4b, 0x7e, 0xf0, 0xcc, 0x40, 0x13, 0x56, 0x6f, 0x6f, 0x02, + 0x00, 0x00, 0xff, 0xff, 0xbc, 0xcf, 0x6d, 0x50, 0xfe, 0x02, 0x00, 0x00, +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/struct/struct.proto aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/struct/struct.proto --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/struct/struct.proto 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/struct/struct.proto 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,96 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; +option go_package = "github.com/golang/protobuf/ptypes/struct;structpb"; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "StructProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; + + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +message Struct { + // Map of dynamically typed values. + map fields = 1; +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +message Value { + // The kind of value. + oneof kind { + // Represents a null value. + NullValue null_value = 1; + // Represents a double value. + double number_value = 2; + // Represents a string value. + string string_value = 3; + // Represents a boolean value. + bool bool_value = 4; + // Represents a structured value. + Struct struct_value = 5; + // Represents a repeated `Value`. + ListValue list_value = 6; + } +} + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +enum NullValue { + // Null value. + NULL_VALUE = 0; +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +message ListValue { + // Repeated field of dynamically typed values. + repeated Value values = 1; +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,120 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +// DO NOT EDIT! + +/* +Package timestamp is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/timestamp/timestamp.proto + +It has these top-level messages: + Timestamp +*/ +package timestamp + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.ProtoPackageIsVersion1 + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// now = time.time() +// seconds = int(now) +// nanos = int((now - seconds) * 10**9) +// timestamp = Timestamp(seconds=seconds, nanos=nanos) +// +// +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } + +func init() { + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +} + +var fileDescriptor0 = []byte{ + // 192 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0xc9, + 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x40, 0xb0, 0xf4, 0xc0, 0x6a, 0x84, 0xf8, 0xd3, 0xf3, + 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x60, 0x3a, 0x94, 0xac, 0xb9, 0x38, 0x43, 0x60, 0x6a, 0x84, 0x24, + 0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, + 0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0xa0, 0x38, 0x6b, + 0x10, 0x84, 0xe3, 0xd4, 0xc8, 0xc8, 0x25, 0x0c, 0x74, 0x86, 0x1e, 0x9a, 0xa1, 0x4e, 0x7c, 0x70, + 0x23, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51, 0xda, 0x24, 0x38, 0x7a, 0x01, 0x23, 0xe3, 0x0f, 0x46, + 0xc6, 0x45, 0x4c, 0xcc, 0xee, 0x01, 0x4e, 0xab, 0x98, 0xe4, 0xdc, 0x21, 0x86, 0x07, 0x40, 0x95, + 0xeb, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80, 0xb4, 0x25, 0xb1, 0x81, + 0xcd, 0x31, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x17, 0x5f, 0xb7, 0xdc, 0x17, 0x01, 0x00, 0x00, +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,111 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; +option go_package = "github.com/golang/protobuf/ptypes/timestamp"; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// now = time.time() +// seconds = int(now) +// nanos = int((now - seconds) * 10**9) +// timestamp = Timestamp(seconds=seconds, nanos=nanos) +// +// +message Timestamp { + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/timestamp.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/timestamp.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/timestamp.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/timestamp.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,125 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" + + tspb "github.com/golang/protobuf/ptypes/timestamp" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *tspb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func Timestamp(ts *tspb.Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*tspb.Timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := &tspb.Timestamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid +// Timestamps, it returns an error message in parentheses. +func TimestampString(ts *tspb.Timestamp) string { + t, err := Timestamp(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/timestamp_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/timestamp_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/timestamp_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/timestamp_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,138 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +import ( + "math" + "testing" + "time" + + "github.com/golang/protobuf/proto" + tspb "github.com/golang/protobuf/ptypes/timestamp" +) + +var tests = []struct { + ts *tspb.Timestamp + valid bool + t time.Time +}{ + // The timestamp representing the Unix epoch date. + {&tspb.Timestamp{0, 0}, true, utcDate(1970, 1, 1)}, + // The smallest representable timestamp. + {&tspb.Timestamp{math.MinInt64, math.MinInt32}, false, + time.Unix(math.MinInt64, math.MinInt32).UTC()}, + // The smallest representable timestamp with non-negative nanos. + {&tspb.Timestamp{math.MinInt64, 0}, false, time.Unix(math.MinInt64, 0).UTC()}, + // The earliest valid timestamp. + {&tspb.Timestamp{minValidSeconds, 0}, true, utcDate(1, 1, 1)}, + //"0001-01-01T00:00:00Z"}, + // The largest representable timestamp. + {&tspb.Timestamp{math.MaxInt64, math.MaxInt32}, false, + time.Unix(math.MaxInt64, math.MaxInt32).UTC()}, + // The largest representable timestamp with nanos in range. + {&tspb.Timestamp{math.MaxInt64, 1e9 - 1}, false, + time.Unix(math.MaxInt64, 1e9-1).UTC()}, + // The largest valid timestamp. + {&tspb.Timestamp{maxValidSeconds - 1, 1e9 - 1}, true, + time.Date(9999, 12, 31, 23, 59, 59, 1e9-1, time.UTC)}, + // The smallest invalid timestamp that is larger than the valid range. + {&tspb.Timestamp{maxValidSeconds, 0}, false, time.Unix(maxValidSeconds, 0).UTC()}, + // A date before the epoch. + {&tspb.Timestamp{-281836800, 0}, true, utcDate(1961, 1, 26)}, + // A date after the epoch. + {&tspb.Timestamp{1296000000, 0}, true, utcDate(2011, 1, 26)}, + // A date after the epoch, in the middle of the day. + {&tspb.Timestamp{1296012345, 940483}, true, + time.Date(2011, 1, 26, 3, 25, 45, 940483, time.UTC)}, +} + +func TestValidateTimestamp(t *testing.T) { + for _, s := range tests { + got := validateTimestamp(s.ts) + if (got == nil) != s.valid { + t.Errorf("validateTimestamp(%v) = %v, want %v", s.ts, got, s.valid) + } + } +} + +func TestTimestamp(t *testing.T) { + for _, s := range tests { + got, err := Timestamp(s.ts) + if (err == nil) != s.valid { + t.Errorf("Timestamp(%v) error = %v, but valid = %t", s.ts, err, s.valid) + } else if s.valid && got != s.t { + t.Errorf("Timestamp(%v) = %v, want %v", s.ts, got, s.t) + } + } + // Special case: a nil Timestamp is an error, but returns the 0 Unix time. + got, err := Timestamp(nil) + want := time.Unix(0, 0).UTC() + if got != want { + t.Errorf("Timestamp(nil) = %v, want %v", got, want) + } + if err == nil { + t.Errorf("Timestamp(nil) error = nil, expected error") + } +} + +func TestTimestampProto(t *testing.T) { + for _, s := range tests { + got, err := TimestampProto(s.t) + if (err == nil) != s.valid { + t.Errorf("TimestampProto(%v) error = %v, but valid = %t", s.t, err, s.valid) + } else if s.valid && !proto.Equal(got, s.ts) { + t.Errorf("TimestampProto(%v) = %v, want %v", s.t, got, s.ts) + } + } + // No corresponding special case here: no time.Time results in a nil Timestamp. +} + +func TestTimestampString(t *testing.T) { + for _, test := range []struct { + ts *tspb.Timestamp + want string + }{ + // Not much testing needed because presumably time.Format is + // well-tested. + {&tspb.Timestamp{0, 0}, "1970-01-01T00:00:00Z"}, + {&tspb.Timestamp{minValidSeconds - 1, 0}, "(timestamp: seconds:-62135596801 before 0001-01-01)"}, + } { + got := TimestampString(test.ts) + if got != test.want { + t.Errorf("TimestampString(%v) = %q, want %q", test.ts, got, test.want) + } + } +} + +func utcDate(year, month, day int) time.Time { + return time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,194 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/wrappers/wrappers.proto +// DO NOT EDIT! + +/* +Package wrappers is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/wrappers/wrappers.proto + +It has these top-level messages: + DoubleValue + FloatValue + Int64Value + UInt64Value + Int32Value + UInt32Value + BoolValue + StringValue + BytesValue +*/ +package wrappers + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.ProtoPackageIsVersion1 + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` +} + +func (m *DoubleValue) Reset() { *m = DoubleValue{} } +func (m *DoubleValue) String() string { return proto.CompactTextString(m) } +func (*DoubleValue) ProtoMessage() {} +func (*DoubleValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value" json:"value,omitempty"` +} + +func (m *FloatValue) Reset() { *m = FloatValue{} } +func (m *FloatValue) String() string { return proto.CompactTextString(m) } +func (*FloatValue) ProtoMessage() {} +func (*FloatValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (m *Int64Value) String() string { return proto.CompactTextString(m) } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (m *UInt64Value) String() string { return proto.CompactTextString(m) } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *Int32Value) Reset() { *m = Int32Value{} } +func (m *Int32Value) String() string { return proto.CompactTextString(m) } +func (*Int32Value) ProtoMessage() {} +func (*Int32Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *UInt32Value) Reset() { *m = UInt32Value{} } +func (m *UInt32Value) String() string { return proto.CompactTextString(m) } +func (*UInt32Value) ProtoMessage() {} +func (*UInt32Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *BoolValue) Reset() { *m = BoolValue{} } +func (m *BoolValue) String() string { return proto.CompactTextString(m) } +func (*BoolValue) ProtoMessage() {} +func (*BoolValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + // The string value. + Value string `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` +} + +func (m *StringValue) Reset() { *m = StringValue{} } +func (m *StringValue) String() string { return proto.CompactTextString(m) } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (*StringValue) XXX_WellKnownType() string { return "StringValue" } + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *BytesValue) Reset() { *m = BytesValue{} } +func (m *BytesValue) String() string { return proto.CompactTextString(m) } +func (*BytesValue) ProtoMessage() {} +func (*BytesValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } + +func init() { + proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") + proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") + proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value") + proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value") + proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value") + proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value") + proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue") + proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue") + proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") +} + +var fileDescriptor0 = []byte{ + // 258 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0x2f, + 0x4a, 0x2c, 0x28, 0x48, 0x2d, 0x42, 0x30, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3, 0xd3, + 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0x94, 0xb9, 0xb8, 0x5d, 0xf2, 0x4b, 0x93, 0x72, 0x52, 0xc3, + 0x12, 0x73, 0x4a, 0x53, 0x85, 0x44, 0xb8, 0x58, 0xcb, 0x40, 0x0c, 0x09, 0x46, 0x05, 0x46, 0x0d, + 0xc6, 0x20, 0x08, 0x47, 0x49, 0x89, 0x8b, 0xcb, 0x2d, 0x27, 0x3f, 0xb1, 0x04, 0x8b, 0x1a, 0x26, + 0x24, 0x35, 0x9e, 0x79, 0x25, 0x66, 0x26, 0x58, 0xd4, 0x30, 0xc3, 0xd4, 0x00, 0x2d, 0x0b, 0xc5, + 0xa5, 0x88, 0x05, 0xd5, 0x20, 0x63, 0x23, 0x2c, 0x6a, 0x58, 0xd1, 0x0c, 0xc2, 0xaa, 0x88, 0x17, + 0xa6, 0x48, 0x91, 0x8b, 0xd3, 0x29, 0x3f, 0x3f, 0x07, 0x8b, 0x12, 0x0e, 0x24, 0x73, 0x82, 0x4b, + 0x8a, 0x32, 0xf3, 0xd2, 0xb1, 0x28, 0xe2, 0x44, 0x72, 0x90, 0x53, 0x65, 0x49, 0x6a, 0x31, 0x16, + 0x35, 0x3c, 0x50, 0x35, 0x4e, 0xf5, 0x5c, 0xc2, 0xc0, 0xd8, 0xd0, 0x43, 0x0b, 0x5d, 0x27, 0xde, + 0x70, 0x68, 0xf0, 0x07, 0x80, 0x44, 0x02, 0x18, 0xa3, 0xb4, 0x88, 0x8f, 0xba, 0x05, 0x8c, 0x8c, + 0x3f, 0x18, 0x19, 0x17, 0x31, 0x31, 0xbb, 0x07, 0x38, 0xad, 0x62, 0x92, 0x73, 0x87, 0x18, 0x1d, + 0x00, 0x55, 0xad, 0x17, 0x9e, 0x9a, 0x93, 0xe3, 0x9d, 0x97, 0x5f, 0x9e, 0x17, 0x02, 0xd2, 0x95, + 0xc4, 0x06, 0x36, 0xc6, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xdf, 0x64, 0x4b, 0x1c, 0x02, + 0x00, 0x00, +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,119 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. + +syntax = "proto3"; + +package google.protobuf; +option go_package = "github.com/golang/protobuf/ptypes/wrappers"; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option java_package = "com.google.protobuf"; +option java_outer_classname = "WrappersProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +message DoubleValue { + // The double value. + double value = 1; +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +message FloatValue { + // The float value. + float value = 1; +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +message Int64Value { + // The int64 value. + int64 value = 1; +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +message UInt64Value { + // The uint64 value. + uint64 value = 1; +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +message Int32Value { + // The int32 value. + int32 value = 1; +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +message UInt32Value { + // The uint32 value. + uint32 value = 1; +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +message BoolValue { + // The bool value. + bool value = 1; +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +message StringValue { + // The string value. + string value = 1; +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +message BytesValue { + // The bytes value. + bytes value = 1; +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/README.md aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/README.md --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/README.md 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/golang/protobuf/README.md 2016-05-24 07:05:22.000000000 +0000 @@ -191,3 +191,9 @@ the --go_out argument to protoc: protoc --go_out=plugins=grpc:. *.proto + +## Plugins ## + +The `protoc-gen-go/generator` package exposes a plugin interface, +which is used by the gRPC code generation. This interface is not +supported and is subject to incompatible changes without notice. diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/api.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/api.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/api.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/api.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,49 @@ +package jmespath + +import "strconv" + +// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is +// safe for concurrent use by multiple goroutines. +type JMESPath struct { + ast ASTNode + intr *treeInterpreter +} + +// Compile parses a JMESPath expression and returns, if successful, a JMESPath +// object that can be used to match against data. +func Compile(expression string) (*JMESPath, error) { + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + jmespath := &JMESPath{ast: ast, intr: newInterpreter()} + return jmespath, nil +} + +// MustCompile is like Compile but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled +// JMESPaths. +func MustCompile(expression string) *JMESPath { + jmespath, err := Compile(expression) + if err != nil { + panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) + } + return jmespath +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func (jp *JMESPath) Search(data interface{}) (interface{}, error) { + return jp.intr.Execute(jp.ast, data) +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func Search(expression string, data interface{}) (interface{}, error) { + intr := newInterpreter() + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + return intr.Execute(ast, data) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/api_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/api_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/api_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/api_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,32 @@ +package jmespath + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestValidPrecompiledExpressionSearches(t *testing.T) { + assert := assert.New(t) + data := make(map[string]interface{}) + data["foo"] = "bar" + precompiled, err := Compile("foo") + assert.Nil(err) + result, err := precompiled.Search(data) + assert.Nil(err) + assert.Equal("bar", result) +} + +func TestInvalidPrecompileErrors(t *testing.T) { + assert := assert.New(t) + _, err := Compile("not a valid expression") + assert.NotNil(err) +} + +func TestInvalidMustCompilePanics(t *testing.T) { + defer func() { + r := recover() + assert.NotNil(t, r) + }() + MustCompile("not a valid expression") +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/astnodetype_string.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/astnodetype_string.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/astnodetype_string.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/astnodetype_string.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +// generated by stringer -type astNodeType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" + +var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} + +func (i astNodeType) String() string { + if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { + return fmt.Sprintf("astNodeType(%d)", i) + } + return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/cmd/jpgo/main.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/cmd/jpgo/main.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/cmd/jpgo/main.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/cmd/jpgo/main.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,96 @@ +/*Basic command line interface for debug and testing purposes. + +Examples: + +Only print the AST for the expression: + + jp.go -ast "foo.bar.baz" + +Evaluate the JMESPath expression against JSON data from a file: + + jp.go -input /tmp/data.json "foo.bar.baz" + +This program can also be used as an executable to the jp-compliance +runner (github.com/jmespath/jmespath.test). + +*/ +package main + +import ( + "flag" + "fmt" + "io/ioutil" + "os" +) + +import ( + "encoding/json" + + "github.com/jmespath/go-jmespath" +) + +func errMsg(msg string, a ...interface{}) int { + fmt.Fprintf(os.Stderr, msg, a...) + fmt.Fprintln(os.Stderr) + return 1 +} + +func run() int { + + astOnly := flag.Bool("ast", false, "Print the AST for the input expression and exit.") + inputFile := flag.String("input", "", "Filename containing JSON data to search. If not provided, data is read from stdin.") + + flag.Parse() + args := flag.Args() + if len(args) != 1 { + fmt.Fprintf(os.Stderr, "Usage:\n\n") + flag.PrintDefaults() + return errMsg("\nError: expected a single argument (the JMESPath expression).") + } + + expression := args[0] + parser := jmespath.NewParser() + parsed, err := parser.Parse(expression) + if err != nil { + if syntaxError, ok := err.(jmespath.SyntaxError); ok { + return errMsg("%s\n%s\n", syntaxError, syntaxError.HighlightLocation()) + } + return errMsg("%s", err) + } + if *astOnly { + fmt.Println("") + fmt.Printf("%s\n", parsed) + return 0 + } + + var inputData []byte + if *inputFile != "" { + inputData, err = ioutil.ReadFile(*inputFile) + if err != nil { + return errMsg("Error loading file %s: %s", *inputFile, err) + } + } else { + // If an input data file is not provided then we read the + // data from stdin. + inputData, err = ioutil.ReadAll(os.Stdin) + if err != nil { + return errMsg("Error reading from stdin: %s", err) + } + } + var data interface{} + json.Unmarshal(inputData, &data) + result, err := jmespath.Search(expression, data) + if err != nil { + return errMsg("Error executing expression: %s", err) + } + toJSON, err := json.MarshalIndent(result, "", " ") + if err != nil { + return errMsg("Error serializing result to JSON: %s", err) + } + fmt.Println(string(toJSON)) + return 0 +} + +func main() { + os.Exit(run()) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/basic.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/basic.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/basic.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/basic.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,96 @@ +[{ + "given": + {"foo": {"bar": {"baz": "correct"}}}, + "cases": [ + { + "expression": "foo", + "result": {"bar": {"baz": "correct"}} + }, + { + "expression": "foo.bar", + "result": {"baz": "correct"} + }, + { + "expression": "foo.bar.baz", + "result": "correct" + }, + { + "expression": "foo\n.\nbar\n.baz", + "result": "correct" + }, + { + "expression": "foo.bar.baz.bad", + "result": null + }, + { + "expression": "foo.bar.bad", + "result": null + }, + { + "expression": "foo.bad", + "result": null + }, + { + "expression": "bad", + "result": null + }, + { + "expression": "bad.morebad.morebad", + "result": null + } + ] +}, +{ + "given": + {"foo": {"bar": ["one", "two", "three"]}}, + "cases": [ + { + "expression": "foo", + "result": {"bar": ["one", "two", "three"]} + }, + { + "expression": "foo.bar", + "result": ["one", "two", "three"] + } + ] +}, +{ + "given": ["one", "two", "three"], + "cases": [ + { + "expression": "one", + "result": null + }, + { + "expression": "two", + "result": null + }, + { + "expression": "three", + "result": null + }, + { + "expression": "one.two", + "result": null + } + ] +}, +{ + "given": + {"foo": {"1": ["one", "two", "three"], "-1": "bar"}}, + "cases": [ + { + "expression": "foo.\"1\"", + "result": ["one", "two", "three"] + }, + { + "expression": "foo.\"1\"[0]", + "result": "one" + }, + { + "expression": "foo.\"-1\"", + "result": "bar" + } + ] +} +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/boolean.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/boolean.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/boolean.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/boolean.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,257 @@ +[ + { + "given": { + "outer": { + "foo": "foo", + "bar": "bar", + "baz": "baz" + } + }, + "cases": [ + { + "expression": "outer.foo || outer.bar", + "result": "foo" + }, + { + "expression": "outer.foo||outer.bar", + "result": "foo" + }, + { + "expression": "outer.bar || outer.baz", + "result": "bar" + }, + { + "expression": "outer.bar||outer.baz", + "result": "bar" + }, + { + "expression": "outer.bad || outer.foo", + "result": "foo" + }, + { + "expression": "outer.bad||outer.foo", + "result": "foo" + }, + { + "expression": "outer.foo || outer.bad", + "result": "foo" + }, + { + "expression": "outer.foo||outer.bad", + "result": "foo" + }, + { + "expression": "outer.bad || outer.alsobad", + "result": null + }, + { + "expression": "outer.bad||outer.alsobad", + "result": null + } + ] + }, + { + "given": { + "outer": { + "foo": "foo", + "bool": false, + "empty_list": [], + "empty_string": "" + } + }, + "cases": [ + { + "expression": "outer.empty_string || outer.foo", + "result": "foo" + }, + { + "expression": "outer.nokey || outer.bool || outer.empty_list || outer.empty_string || outer.foo", + "result": "foo" + } + ] + }, + { + "given": { + "True": true, + "False": false, + "Number": 5, + "EmptyList": [], + "Zero": 0 + }, + "cases": [ + { + "expression": "True && False", + "result": false + }, + { + "expression": "False && True", + "result": false + }, + { + "expression": "True && True", + "result": true + }, + { + "expression": "False && False", + "result": false + }, + { + "expression": "True && Number", + "result": 5 + }, + { + "expression": "Number && True", + "result": true + }, + { + "expression": "Number && False", + "result": false + }, + { + "expression": "Number && EmptyList", + "result": [] + }, + { + "expression": "Number && True", + "result": true + }, + { + "expression": "EmptyList && True", + "result": [] + }, + { + "expression": "EmptyList && False", + "result": [] + }, + { + "expression": "True || False", + "result": true + }, + { + "expression": "True || True", + "result": true + }, + { + "expression": "False || True", + "result": true + }, + { + "expression": "False || False", + "result": false + }, + { + "expression": "Number || EmptyList", + "result": 5 + }, + { + "expression": "Number || True", + "result": 5 + }, + { + "expression": "Number || True && False", + "result": 5 + }, + { + "expression": "(Number || True) && False", + "result": false + }, + { + "expression": "Number || (True && False)", + "result": 5 + }, + { + "expression": "!True", + "result": false + }, + { + "expression": "!False", + "result": true + }, + { + "expression": "!Number", + "result": false + }, + { + "expression": "!EmptyList", + "result": true + }, + { + "expression": "True && !False", + "result": true + }, + { + "expression": "True && !EmptyList", + "result": true + }, + { + "expression": "!False && !EmptyList", + "result": true + }, + { + "expression": "!(True && False)", + "result": true + }, + { + "expression": "!Zero", + "result": false + }, + { + "expression": "!!Zero", + "result": true + } + ] + }, + { + "given": { + "one": 1, + "two": 2, + "three": 3 + }, + "cases": [ + { + "expression": "one < two", + "result": true + }, + { + "expression": "one <= two", + "result": true + }, + { + "expression": "one == one", + "result": true + }, + { + "expression": "one == two", + "result": false + }, + { + "expression": "one > two", + "result": false + }, + { + "expression": "one >= two", + "result": false + }, + { + "expression": "one != two", + "result": true + }, + { + "expression": "one < two && three > one", + "result": true + }, + { + "expression": "one < two || three > one", + "result": true + }, + { + "expression": "one < two || three < one", + "result": true + }, + { + "expression": "two < one || three < one", + "result": false + } + ] + } +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/current.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/current.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/current.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/current.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,25 @@ +[ + { + "given": { + "foo": [{"name": "a"}, {"name": "b"}], + "bar": {"baz": "qux"} + }, + "cases": [ + { + "expression": "@", + "result": { + "foo": [{"name": "a"}, {"name": "b"}], + "bar": {"baz": "qux"} + } + }, + { + "expression": "@.bar", + "result": {"baz": "qux"} + }, + { + "expression": "@.foo[0]", + "result": {"name": "a"} + } + ] + } +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/escape.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/escape.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/escape.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/escape.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,46 @@ +[{ + "given": { + "foo.bar": "dot", + "foo bar": "space", + "foo\nbar": "newline", + "foo\"bar": "doublequote", + "c:\\\\windows\\path": "windows", + "/unix/path": "unix", + "\"\"\"": "threequotes", + "bar": {"baz": "qux"} + }, + "cases": [ + { + "expression": "\"foo.bar\"", + "result": "dot" + }, + { + "expression": "\"foo bar\"", + "result": "space" + }, + { + "expression": "\"foo\\nbar\"", + "result": "newline" + }, + { + "expression": "\"foo\\\"bar\"", + "result": "doublequote" + }, + { + "expression": "\"c:\\\\\\\\windows\\\\path\"", + "result": "windows" + }, + { + "expression": "\"/unix/path\"", + "result": "unix" + }, + { + "expression": "\"\\\"\\\"\\\"\"", + "result": "threequotes" + }, + { + "expression": "\"bar\".\"baz\"", + "result": "qux" + } + ] +}] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/filters.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/filters.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/filters.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/filters.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,468 @@ +[ + { + "given": {"foo": [{"name": "a"}, {"name": "b"}]}, + "cases": [ + { + "comment": "Matching a literal", + "expression": "foo[?name == 'a']", + "result": [{"name": "a"}] + } + ] + }, + { + "given": {"foo": [0, 1], "bar": [2, 3]}, + "cases": [ + { + "comment": "Matching a literal", + "expression": "*[?[0] == `0`]", + "result": [[], []] + } + ] + }, + { + "given": {"foo": [{"first": "foo", "last": "bar"}, + {"first": "foo", "last": "foo"}, + {"first": "foo", "last": "baz"}]}, + "cases": [ + { + "comment": "Matching an expression", + "expression": "foo[?first == last]", + "result": [{"first": "foo", "last": "foo"}] + }, + { + "comment": "Verify projection created from filter", + "expression": "foo[?first == last].first", + "result": ["foo"] + } + ] + }, + { + "given": {"foo": [{"age": 20}, + {"age": 25}, + {"age": 30}]}, + "cases": [ + { + "comment": "Greater than with a number", + "expression": "foo[?age > `25`]", + "result": [{"age": 30}] + }, + { + "expression": "foo[?age >= `25`]", + "result": [{"age": 25}, {"age": 30}] + }, + { + "comment": "Greater than with a number", + "expression": "foo[?age > `30`]", + "result": [] + }, + { + "comment": "Greater than with a number", + "expression": "foo[?age < `25`]", + "result": [{"age": 20}] + }, + { + "comment": "Greater than with a number", + "expression": "foo[?age <= `25`]", + "result": [{"age": 20}, {"age": 25}] + }, + { + "comment": "Greater than with a number", + "expression": "foo[?age < `20`]", + "result": [] + }, + { + "expression": "foo[?age == `20`]", + "result": [{"age": 20}] + }, + { + "expression": "foo[?age != `20`]", + "result": [{"age": 25}, {"age": 30}] + } + ] + }, + { + "given": {"foo": [{"top": {"name": "a"}}, + {"top": {"name": "b"}}]}, + "cases": [ + { + "comment": "Filter with subexpression", + "expression": "foo[?top.name == 'a']", + "result": [{"top": {"name": "a"}}] + } + ] + }, + { + "given": {"foo": [{"top": {"first": "foo", "last": "bar"}}, + {"top": {"first": "foo", "last": "foo"}}, + {"top": {"first": "foo", "last": "baz"}}]}, + "cases": [ + { + "comment": "Matching an expression", + "expression": "foo[?top.first == top.last]", + "result": [{"top": {"first": "foo", "last": "foo"}}] + }, + { + "comment": "Matching a JSON array", + "expression": "foo[?top == `{\"first\": \"foo\", \"last\": \"bar\"}`]", + "result": [{"top": {"first": "foo", "last": "bar"}}] + } + ] + }, + { + "given": {"foo": [ + {"key": true}, + {"key": false}, + {"key": 0}, + {"key": 1}, + {"key": [0]}, + {"key": {"bar": [0]}}, + {"key": null}, + {"key": [1]}, + {"key": {"a":2}} + ]}, + "cases": [ + { + "expression": "foo[?key == `true`]", + "result": [{"key": true}] + }, + { + "expression": "foo[?key == `false`]", + "result": [{"key": false}] + }, + { + "expression": "foo[?key == `0`]", + "result": [{"key": 0}] + }, + { + "expression": "foo[?key == `1`]", + "result": [{"key": 1}] + }, + { + "expression": "foo[?key == `[0]`]", + "result": [{"key": [0]}] + }, + { + "expression": "foo[?key == `{\"bar\": [0]}`]", + "result": [{"key": {"bar": [0]}}] + }, + { + "expression": "foo[?key == `null`]", + "result": [{"key": null}] + }, + { + "expression": "foo[?key == `[1]`]", + "result": [{"key": [1]}] + }, + { + "expression": "foo[?key == `{\"a\":2}`]", + "result": [{"key": {"a":2}}] + }, + { + "expression": "foo[?`true` == key]", + "result": [{"key": true}] + }, + { + "expression": "foo[?`false` == key]", + "result": [{"key": false}] + }, + { + "expression": "foo[?`0` == key]", + "result": [{"key": 0}] + }, + { + "expression": "foo[?`1` == key]", + "result": [{"key": 1}] + }, + { + "expression": "foo[?`[0]` == key]", + "result": [{"key": [0]}] + }, + { + "expression": "foo[?`{\"bar\": [0]}` == key]", + "result": [{"key": {"bar": [0]}}] + }, + { + "expression": "foo[?`null` == key]", + "result": [{"key": null}] + }, + { + "expression": "foo[?`[1]` == key]", + "result": [{"key": [1]}] + }, + { + "expression": "foo[?`{\"a\":2}` == key]", + "result": [{"key": {"a":2}}] + }, + { + "expression": "foo[?key != `true`]", + "result": [{"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `false`]", + "result": [{"key": true}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `0`]", + "result": [{"key": true}, {"key": false}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `1`]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `null`]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `[1]`]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `{\"a\":2}`]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}] + }, + { + "expression": "foo[?`true` != key]", + "result": [{"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`false` != key]", + "result": [{"key": true}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`0` != key]", + "result": [{"key": true}, {"key": false}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`1` != key]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`null` != key]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`[1]` != key]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`{\"a\":2}` != key]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}] + } + ] + }, + { + "given": {"reservations": [ + {"instances": [ + {"foo": 1, "bar": 2}, {"foo": 1, "bar": 3}, + {"foo": 1, "bar": 2}, {"foo": 2, "bar": 1}]}]}, + "cases": [ + { + "expression": "reservations[].instances[?bar==`1`]", + "result": [[{"foo": 2, "bar": 1}]] + }, + { + "expression": "reservations[*].instances[?bar==`1`]", + "result": [[{"foo": 2, "bar": 1}]] + }, + { + "expression": "reservations[].instances[?bar==`1`][]", + "result": [{"foo": 2, "bar": 1}] + } + ] + }, + { + "given": { + "baz": "other", + "foo": [ + {"bar": 1}, {"bar": 2}, {"bar": 3}, {"bar": 4}, {"bar": 1, "baz": 2} + ] + }, + "cases": [ + { + "expression": "foo[?bar==`1`].bar[0]", + "result": [] + } + ] + }, + { + "given": { + "foo": [ + {"a": 1, "b": {"c": "x"}}, + {"a": 1, "b": {"c": "y"}}, + {"a": 1, "b": {"c": "z"}}, + {"a": 2, "b": {"c": "z"}}, + {"a": 1, "baz": 2} + ] + }, + "cases": [ + { + "expression": "foo[?a==`1`].b.c", + "result": ["x", "y", "z"] + } + ] + }, + { + "given": {"foo": [{"name": "a"}, {"name": "b"}, {"name": "c"}]}, + "cases": [ + { + "comment": "Filter with or expression", + "expression": "foo[?name == 'a' || name == 'b']", + "result": [{"name": "a"}, {"name": "b"}] + }, + { + "expression": "foo[?name == 'a' || name == 'e']", + "result": [{"name": "a"}] + }, + { + "expression": "foo[?name == 'a' || name == 'b' || name == 'c']", + "result": [{"name": "a"}, {"name": "b"}, {"name": "c"}] + } + ] + }, + { + "given": {"foo": [{"a": 1, "b": 2}, {"a": 1, "b": 3}]}, + "cases": [ + { + "comment": "Filter with and expression", + "expression": "foo[?a == `1` && b == `2`]", + "result": [{"a": 1, "b": 2}] + }, + { + "expression": "foo[?a == `1` && b == `4`]", + "result": [] + } + ] + }, + { + "given": {"foo": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}]}, + "cases": [ + { + "comment": "Filter with Or and And expressions", + "expression": "foo[?c == `3` || a == `1` && b == `4`]", + "result": [{"a": 1, "b": 2, "c": 3}] + }, + { + "expression": "foo[?b == `2` || a == `3` && b == `4`]", + "result": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}] + }, + { + "expression": "foo[?a == `3` && b == `4` || b == `2`]", + "result": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}] + }, + { + "expression": "foo[?(a == `3` && b == `4`) || b == `2`]", + "result": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}] + }, + { + "expression": "foo[?((a == `3` && b == `4`)) || b == `2`]", + "result": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}] + }, + { + "expression": "foo[?a == `3` && (b == `4` || b == `2`)]", + "result": [{"a": 3, "b": 4}] + }, + { + "expression": "foo[?a == `3` && ((b == `4` || b == `2`))]", + "result": [{"a": 3, "b": 4}] + } + ] + }, + { + "given": {"foo": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}]}, + "cases": [ + { + "comment": "Verify precedence of or/and expressions", + "expression": "foo[?a == `1` || b ==`2` && c == `5`]", + "result": [{"a": 1, "b": 2, "c": 3}] + }, + { + "comment": "Parentheses can alter precedence", + "expression": "foo[?(a == `1` || b ==`2`) && c == `5`]", + "result": [] + }, + { + "comment": "Not expressions combined with and/or", + "expression": "foo[?!(a == `1` || b ==`2`)]", + "result": [{"a": 3, "b": 4}] + } + ] + }, + { + "given": { + "foo": [ + {"key": true}, + {"key": false}, + {"key": []}, + {"key": {}}, + {"key": [0]}, + {"key": {"a": "b"}}, + {"key": 0}, + {"key": 1}, + {"key": null}, + {"notkey": true} + ] + }, + "cases": [ + { + "comment": "Unary filter expression", + "expression": "foo[?key]", + "result": [ + {"key": true}, {"key": [0]}, {"key": {"a": "b"}}, + {"key": 0}, {"key": 1} + ] + }, + { + "comment": "Unary not filter expression", + "expression": "foo[?!key]", + "result": [ + {"key": false}, {"key": []}, {"key": {}}, + {"key": null}, {"notkey": true} + ] + }, + { + "comment": "Equality with null RHS", + "expression": "foo[?key == `null`]", + "result": [ + {"key": null}, {"notkey": true} + ] + } + ] + }, + { + "given": { + "foo": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + "cases": [ + { + "comment": "Using @ in a filter expression", + "expression": "foo[?@ < `5`]", + "result": [0, 1, 2, 3, 4] + }, + { + "comment": "Using @ in a filter expression", + "expression": "foo[?`5` > @]", + "result": [0, 1, 2, 3, 4] + }, + { + "comment": "Using @ in a filter expression", + "expression": "foo[?@ == @]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + } + ] + } +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/functions.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/functions.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/functions.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/functions.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,825 @@ +[{ + "given": + { + "foo": -1, + "zero": 0, + "numbers": [-1, 3, 4, 5], + "array": [-1, 3, 4, 5, "a", "100"], + "strings": ["a", "b", "c"], + "decimals": [1.01, 1.2, -1.5], + "str": "Str", + "false": false, + "empty_list": [], + "empty_hash": {}, + "objects": {"foo": "bar", "bar": "baz"}, + "null_key": null + }, + "cases": [ + { + "expression": "abs(foo)", + "result": 1 + }, + { + "expression": "abs(foo)", + "result": 1 + }, + { + "expression": "abs(str)", + "error": "invalid-type" + }, + { + "expression": "abs(array[1])", + "result": 3 + }, + { + "expression": "abs(array[1])", + "result": 3 + }, + { + "expression": "abs(`false`)", + "error": "invalid-type" + }, + { + "expression": "abs(`-24`)", + "result": 24 + }, + { + "expression": "abs(`-24`)", + "result": 24 + }, + { + "expression": "abs(`1`, `2`)", + "error": "invalid-arity" + }, + { + "expression": "abs()", + "error": "invalid-arity" + }, + { + "expression": "unknown_function(`1`, `2`)", + "error": "unknown-function" + }, + { + "expression": "avg(numbers)", + "result": 2.75 + }, + { + "expression": "avg(array)", + "error": "invalid-type" + }, + { + "expression": "avg('abc')", + "error": "invalid-type" + }, + { + "expression": "avg(foo)", + "error": "invalid-type" + }, + { + "expression": "avg(@)", + "error": "invalid-type" + }, + { + "expression": "avg(strings)", + "error": "invalid-type" + }, + { + "expression": "ceil(`1.2`)", + "result": 2 + }, + { + "expression": "ceil(decimals[0])", + "result": 2 + }, + { + "expression": "ceil(decimals[1])", + "result": 2 + }, + { + "expression": "ceil(decimals[2])", + "result": -1 + }, + { + "expression": "ceil('string')", + "error": "invalid-type" + }, + { + "expression": "contains('abc', 'a')", + "result": true + }, + { + "expression": "contains('abc', 'd')", + "result": false + }, + { + "expression": "contains(`false`, 'd')", + "error": "invalid-type" + }, + { + "expression": "contains(strings, 'a')", + "result": true + }, + { + "expression": "contains(decimals, `1.2`)", + "result": true + }, + { + "expression": "contains(decimals, `false`)", + "result": false + }, + { + "expression": "ends_with(str, 'r')", + "result": true + }, + { + "expression": "ends_with(str, 'tr')", + "result": true + }, + { + "expression": "ends_with(str, 'Str')", + "result": true + }, + { + "expression": "ends_with(str, 'SStr')", + "result": false + }, + { + "expression": "ends_with(str, 'foo')", + "result": false + }, + { + "expression": "ends_with(str, `0`)", + "error": "invalid-type" + }, + { + "expression": "floor(`1.2`)", + "result": 1 + }, + { + "expression": "floor('string')", + "error": "invalid-type" + }, + { + "expression": "floor(decimals[0])", + "result": 1 + }, + { + "expression": "floor(foo)", + "result": -1 + }, + { + "expression": "floor(str)", + "error": "invalid-type" + }, + { + "expression": "length('abc')", + "result": 3 + }, + { + "expression": "length('✓foo')", + "result": 4 + }, + { + "expression": "length('')", + "result": 0 + }, + { + "expression": "length(@)", + "result": 12 + }, + { + "expression": "length(strings[0])", + "result": 1 + }, + { + "expression": "length(str)", + "result": 3 + }, + { + "expression": "length(array)", + "result": 6 + }, + { + "expression": "length(objects)", + "result": 2 + }, + { + "expression": "length(`false`)", + "error": "invalid-type" + }, + { + "expression": "length(foo)", + "error": "invalid-type" + }, + { + "expression": "length(strings[0])", + "result": 1 + }, + { + "expression": "max(numbers)", + "result": 5 + }, + { + "expression": "max(decimals)", + "result": 1.2 + }, + { + "expression": "max(strings)", + "result": "c" + }, + { + "expression": "max(abc)", + "error": "invalid-type" + }, + { + "expression": "max(array)", + "error": "invalid-type" + }, + { + "expression": "max(decimals)", + "result": 1.2 + }, + { + "expression": "max(empty_list)", + "result": null + }, + { + "expression": "merge(`{}`)", + "result": {} + }, + { + "expression": "merge(`{}`, `{}`)", + "result": {} + }, + { + "expression": "merge(`{\"a\": 1}`, `{\"b\": 2}`)", + "result": {"a": 1, "b": 2} + }, + { + "expression": "merge(`{\"a\": 1}`, `{\"a\": 2}`)", + "result": {"a": 2} + }, + { + "expression": "merge(`{\"a\": 1, \"b\": 2}`, `{\"a\": 2, \"c\": 3}`, `{\"d\": 4}`)", + "result": {"a": 2, "b": 2, "c": 3, "d": 4} + }, + { + "expression": "min(numbers)", + "result": -1 + }, + { + "expression": "min(decimals)", + "result": -1.5 + }, + { + "expression": "min(abc)", + "error": "invalid-type" + }, + { + "expression": "min(array)", + "error": "invalid-type" + }, + { + "expression": "min(empty_list)", + "result": null + }, + { + "expression": "min(decimals)", + "result": -1.5 + }, + { + "expression": "min(strings)", + "result": "a" + }, + { + "expression": "type('abc')", + "result": "string" + }, + { + "expression": "type(`1.0`)", + "result": "number" + }, + { + "expression": "type(`2`)", + "result": "number" + }, + { + "expression": "type(`true`)", + "result": "boolean" + }, + { + "expression": "type(`false`)", + "result": "boolean" + }, + { + "expression": "type(`null`)", + "result": "null" + }, + { + "expression": "type(`[0]`)", + "result": "array" + }, + { + "expression": "type(`{\"a\": \"b\"}`)", + "result": "object" + }, + { + "expression": "type(@)", + "result": "object" + }, + { + "expression": "sort(keys(objects))", + "result": ["bar", "foo"] + }, + { + "expression": "keys(foo)", + "error": "invalid-type" + }, + { + "expression": "keys(strings)", + "error": "invalid-type" + }, + { + "expression": "keys(`false`)", + "error": "invalid-type" + }, + { + "expression": "sort(values(objects))", + "result": ["bar", "baz"] + }, + { + "expression": "keys(empty_hash)", + "result": [] + }, + { + "expression": "values(foo)", + "error": "invalid-type" + }, + { + "expression": "join(', ', strings)", + "result": "a, b, c" + }, + { + "expression": "join(', ', strings)", + "result": "a, b, c" + }, + { + "expression": "join(',', `[\"a\", \"b\"]`)", + "result": "a,b" + }, + { + "expression": "join(',', `[\"a\", 0]`)", + "error": "invalid-type" + }, + { + "expression": "join(', ', str)", + "error": "invalid-type" + }, + { + "expression": "join('|', strings)", + "result": "a|b|c" + }, + { + "expression": "join(`2`, strings)", + "error": "invalid-type" + }, + { + "expression": "join('|', decimals)", + "error": "invalid-type" + }, + { + "expression": "join('|', decimals[].to_string(@))", + "result": "1.01|1.2|-1.5" + }, + { + "expression": "join('|', empty_list)", + "result": "" + }, + { + "expression": "reverse(numbers)", + "result": [5, 4, 3, -1] + }, + { + "expression": "reverse(array)", + "result": ["100", "a", 5, 4, 3, -1] + }, + { + "expression": "reverse(`[]`)", + "result": [] + }, + { + "expression": "reverse('')", + "result": "" + }, + { + "expression": "reverse('hello world')", + "result": "dlrow olleh" + }, + { + "expression": "starts_with(str, 'S')", + "result": true + }, + { + "expression": "starts_with(str, 'St')", + "result": true + }, + { + "expression": "starts_with(str, 'Str')", + "result": true + }, + { + "expression": "starts_with(str, 'String')", + "result": false + }, + { + "expression": "starts_with(str, `0`)", + "error": "invalid-type" + }, + { + "expression": "sum(numbers)", + "result": 11 + }, + { + "expression": "sum(decimals)", + "result": 0.71 + }, + { + "expression": "sum(array)", + "error": "invalid-type" + }, + { + "expression": "sum(array[].to_number(@))", + "result": 111 + }, + { + "expression": "sum(`[]`)", + "result": 0 + }, + { + "expression": "to_array('foo')", + "result": ["foo"] + }, + { + "expression": "to_array(`0`)", + "result": [0] + }, + { + "expression": "to_array(objects)", + "result": [{"foo": "bar", "bar": "baz"}] + }, + { + "expression": "to_array(`[1, 2, 3]`)", + "result": [1, 2, 3] + }, + { + "expression": "to_array(false)", + "result": [false] + }, + { + "expression": "to_string('foo')", + "result": "foo" + }, + { + "expression": "to_string(`1.2`)", + "result": "1.2" + }, + { + "expression": "to_string(`[0, 1]`)", + "result": "[0,1]" + }, + { + "expression": "to_number('1.0')", + "result": 1.0 + }, + { + "expression": "to_number('1.1')", + "result": 1.1 + }, + { + "expression": "to_number('4')", + "result": 4 + }, + { + "expression": "to_number('notanumber')", + "result": null + }, + { + "expression": "to_number(`false`)", + "result": null + }, + { + "expression": "to_number(`null`)", + "result": null + }, + { + "expression": "to_number(`[0]`)", + "result": null + }, + { + "expression": "to_number(`{\"foo\": 0}`)", + "result": null + }, + { + "expression": "\"to_string\"(`1.0`)", + "error": "syntax" + }, + { + "expression": "sort(numbers)", + "result": [-1, 3, 4, 5] + }, + { + "expression": "sort(strings)", + "result": ["a", "b", "c"] + }, + { + "expression": "sort(decimals)", + "result": [-1.5, 1.01, 1.2] + }, + { + "expression": "sort(array)", + "error": "invalid-type" + }, + { + "expression": "sort(abc)", + "error": "invalid-type" + }, + { + "expression": "sort(empty_list)", + "result": [] + }, + { + "expression": "sort(@)", + "error": "invalid-type" + }, + { + "expression": "not_null(unknown_key, str)", + "result": "Str" + }, + { + "expression": "not_null(unknown_key, foo.bar, empty_list, str)", + "result": [] + }, + { + "expression": "not_null(unknown_key, null_key, empty_list, str)", + "result": [] + }, + { + "expression": "not_null(all, expressions, are_null)", + "result": null + }, + { + "expression": "not_null()", + "error": "invalid-arity" + }, + { + "description": "function projection on single arg function", + "expression": "numbers[].to_string(@)", + "result": ["-1", "3", "4", "5"] + }, + { + "description": "function projection on single arg function", + "expression": "array[].to_number(@)", + "result": [-1, 3, 4, 5, 100] + } + ] +}, { + "given": + { + "foo": [ + {"b": "b", "a": "a"}, + {"c": "c", "b": "b"}, + {"d": "d", "c": "c"}, + {"e": "e", "d": "d"}, + {"f": "f", "e": "e"} + ] + }, + "cases": [ + { + "description": "function projection on variadic function", + "expression": "foo[].not_null(f, e, d, c, b, a)", + "result": ["b", "c", "d", "e", "f"] + } + ] +}, { + "given": + { + "people": [ + {"age": 20, "age_str": "20", "bool": true, "name": "a", "extra": "foo"}, + {"age": 40, "age_str": "40", "bool": false, "name": "b", "extra": "bar"}, + {"age": 30, "age_str": "30", "bool": true, "name": "c"}, + {"age": 50, "age_str": "50", "bool": false, "name": "d"}, + {"age": 10, "age_str": "10", "bool": true, "name": 3} + ] + }, + "cases": [ + { + "description": "sort by field expression", + "expression": "sort_by(people, &age)", + "result": [ + {"age": 10, "age_str": "10", "bool": true, "name": 3}, + {"age": 20, "age_str": "20", "bool": true, "name": "a", "extra": "foo"}, + {"age": 30, "age_str": "30", "bool": true, "name": "c"}, + {"age": 40, "age_str": "40", "bool": false, "name": "b", "extra": "bar"}, + {"age": 50, "age_str": "50", "bool": false, "name": "d"} + ] + }, + { + "expression": "sort_by(people, &age_str)", + "result": [ + {"age": 10, "age_str": "10", "bool": true, "name": 3}, + {"age": 20, "age_str": "20", "bool": true, "name": "a", "extra": "foo"}, + {"age": 30, "age_str": "30", "bool": true, "name": "c"}, + {"age": 40, "age_str": "40", "bool": false, "name": "b", "extra": "bar"}, + {"age": 50, "age_str": "50", "bool": false, "name": "d"} + ] + }, + { + "description": "sort by function expression", + "expression": "sort_by(people, &to_number(age_str))", + "result": [ + {"age": 10, "age_str": "10", "bool": true, "name": 3}, + {"age": 20, "age_str": "20", "bool": true, "name": "a", "extra": "foo"}, + {"age": 30, "age_str": "30", "bool": true, "name": "c"}, + {"age": 40, "age_str": "40", "bool": false, "name": "b", "extra": "bar"}, + {"age": 50, "age_str": "50", "bool": false, "name": "d"} + ] + }, + { + "description": "function projection on sort_by function", + "expression": "sort_by(people, &age)[].name", + "result": [3, "a", "c", "b", "d"] + }, + { + "expression": "sort_by(people, &extra)", + "error": "invalid-type" + }, + { + "expression": "sort_by(people, &bool)", + "error": "invalid-type" + }, + { + "expression": "sort_by(people, &name)", + "error": "invalid-type" + }, + { + "expression": "sort_by(people, name)", + "error": "invalid-type" + }, + { + "expression": "sort_by(people, &age)[].extra", + "result": ["foo", "bar"] + }, + { + "expression": "sort_by(`[]`, &age)", + "result": [] + }, + { + "expression": "max_by(people, &age)", + "result": {"age": 50, "age_str": "50", "bool": false, "name": "d"} + }, + { + "expression": "max_by(people, &age_str)", + "result": {"age": 50, "age_str": "50", "bool": false, "name": "d"} + }, + { + "expression": "max_by(people, &bool)", + "error": "invalid-type" + }, + { + "expression": "max_by(people, &extra)", + "error": "invalid-type" + }, + { + "expression": "max_by(people, &to_number(age_str))", + "result": {"age": 50, "age_str": "50", "bool": false, "name": "d"} + }, + { + "expression": "min_by(people, &age)", + "result": {"age": 10, "age_str": "10", "bool": true, "name": 3} + }, + { + "expression": "min_by(people, &age_str)", + "result": {"age": 10, "age_str": "10", "bool": true, "name": 3} + }, + { + "expression": "min_by(people, &bool)", + "error": "invalid-type" + }, + { + "expression": "min_by(people, &extra)", + "error": "invalid-type" + }, + { + "expression": "min_by(people, &to_number(age_str))", + "result": {"age": 10, "age_str": "10", "bool": true, "name": 3} + } + ] +}, { + "given": + { + "people": [ + {"age": 10, "order": "1"}, + {"age": 10, "order": "2"}, + {"age": 10, "order": "3"}, + {"age": 10, "order": "4"}, + {"age": 10, "order": "5"}, + {"age": 10, "order": "6"}, + {"age": 10, "order": "7"}, + {"age": 10, "order": "8"}, + {"age": 10, "order": "9"}, + {"age": 10, "order": "10"}, + {"age": 10, "order": "11"} + ] + }, + "cases": [ + { + "description": "stable sort order", + "expression": "sort_by(people, &age)", + "result": [ + {"age": 10, "order": "1"}, + {"age": 10, "order": "2"}, + {"age": 10, "order": "3"}, + {"age": 10, "order": "4"}, + {"age": 10, "order": "5"}, + {"age": 10, "order": "6"}, + {"age": 10, "order": "7"}, + {"age": 10, "order": "8"}, + {"age": 10, "order": "9"}, + {"age": 10, "order": "10"}, + {"age": 10, "order": "11"} + ] + } + ] +}, { + "given": + { + "people": [ + {"a": 10, "b": 1, "c": "z"}, + {"a": 10, "b": 2, "c": null}, + {"a": 10, "b": 3}, + {"a": 10, "b": 4, "c": "z"}, + {"a": 10, "b": 5, "c": null}, + {"a": 10, "b": 6}, + {"a": 10, "b": 7, "c": "z"}, + {"a": 10, "b": 8, "c": null}, + {"a": 10, "b": 9} + ], + "empty": [] + }, + "cases": [ + { + "expression": "map(&a, people)", + "result": [10, 10, 10, 10, 10, 10, 10, 10, 10] + }, + { + "expression": "map(&c, people)", + "result": ["z", null, null, "z", null, null, "z", null, null] + }, + { + "expression": "map(&a, badkey)", + "error": "invalid-type" + }, + { + "expression": "map(&foo, empty)", + "result": [] + } + ] +}, { + "given": { + "array": [ + { + "foo": {"bar": "yes1"} + }, + { + "foo": {"bar": "yes2"} + }, + { + "foo1": {"bar": "no"} + } + ]}, + "cases": [ + { + "expression": "map(&foo.bar, array)", + "result": ["yes1", "yes2", null] + }, + { + "expression": "map(&foo1.bar, array)", + "result": [null, null, "no"] + }, + { + "expression": "map(&foo.bar.baz, array)", + "result": [null, null, null] + } + ] +}, { + "given": { + "array": [[1, 2, 3, [4]], [5, 6, 7, [8, 9]]] + }, + "cases": [ + { + "expression": "map(&[], array)", + "result": [[1, 2, 3, 4], [5, 6, 7, 8, 9]] + } + ] +} +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/identifiers.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/identifiers.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/identifiers.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/identifiers.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1377 @@ +[ + { + "given": { + "__L": true + }, + "cases": [ + { + "expression": "__L", + "result": true + } + ] + }, + { + "given": { + "!\r": true + }, + "cases": [ + { + "expression": "\"!\\r\"", + "result": true + } + ] + }, + { + "given": { + "Y_1623": true + }, + "cases": [ + { + "expression": "Y_1623", + "result": true + } + ] + }, + { + "given": { + "x": true + }, + "cases": [ + { + "expression": "x", + "result": true + } + ] + }, + { + "given": { + "\tF\uCebb": true + }, + "cases": [ + { + "expression": "\"\\tF\\uCebb\"", + "result": true + } + ] + }, + { + "given": { + " \t": true + }, + "cases": [ + { + "expression": "\" \\t\"", + "result": true + } + ] + }, + { + "given": { + " ": true + }, + "cases": [ + { + "expression": "\" \"", + "result": true + } + ] + }, + { + "given": { + "v2": true + }, + "cases": [ + { + "expression": "v2", + "result": true + } + ] + }, + { + "given": { + "\t": true + }, + "cases": [ + { + "expression": "\"\\t\"", + "result": true + } + ] + }, + { + "given": { + "_X": true + }, + "cases": [ + { + "expression": "_X", + "result": true + } + ] + }, + { + "given": { + "\t4\ud9da\udd15": true + }, + "cases": [ + { + "expression": "\"\\t4\\ud9da\\udd15\"", + "result": true + } + ] + }, + { + "given": { + "v24_W": true + }, + "cases": [ + { + "expression": "v24_W", + "result": true + } + ] + }, + { + "given": { + "H": true + }, + "cases": [ + { + "expression": "\"H\"", + "result": true + } + ] + }, + { + "given": { + "\f": true + }, + "cases": [ + { + "expression": "\"\\f\"", + "result": true + } + ] + }, + { + "given": { + "E4": true + }, + "cases": [ + { + "expression": "\"E4\"", + "result": true + } + ] + }, + { + "given": { + "!": true + }, + "cases": [ + { + "expression": "\"!\"", + "result": true + } + ] + }, + { + "given": { + "tM": true + }, + "cases": [ + { + "expression": "tM", + "result": true + } + ] + }, + { + "given": { + " [": true + }, + "cases": [ + { + "expression": "\" [\"", + "result": true + } + ] + }, + { + "given": { + "R!": true + }, + "cases": [ + { + "expression": "\"R!\"", + "result": true + } + ] + }, + { + "given": { + "_6W": true + }, + "cases": [ + { + "expression": "_6W", + "result": true + } + ] + }, + { + "given": { + "\uaBA1\r": true + }, + "cases": [ + { + "expression": "\"\\uaBA1\\r\"", + "result": true + } + ] + }, + { + "given": { + "tL7": true + }, + "cases": [ + { + "expression": "tL7", + "result": true + } + ] + }, + { + "given": { + "<": true + }, + "cases": [ + { + "expression": "\">\"", + "result": true + } + ] + }, + { + "given": { + "hvu": true + }, + "cases": [ + { + "expression": "hvu", + "result": true + } + ] + }, + { + "given": { + "; !": true + }, + "cases": [ + { + "expression": "\"; !\"", + "result": true + } + ] + }, + { + "given": { + "hU": true + }, + "cases": [ + { + "expression": "hU", + "result": true + } + ] + }, + { + "given": { + "!I\n\/": true + }, + "cases": [ + { + "expression": "\"!I\\n\\/\"", + "result": true + } + ] + }, + { + "given": { + "\uEEbF": true + }, + "cases": [ + { + "expression": "\"\\uEEbF\"", + "result": true + } + ] + }, + { + "given": { + "U)\t": true + }, + "cases": [ + { + "expression": "\"U)\\t\"", + "result": true + } + ] + }, + { + "given": { + "fa0_9": true + }, + "cases": [ + { + "expression": "fa0_9", + "result": true + } + ] + }, + { + "given": { + "/": true + }, + "cases": [ + { + "expression": "\"/\"", + "result": true + } + ] + }, + { + "given": { + "Gy": true + }, + "cases": [ + { + "expression": "Gy", + "result": true + } + ] + }, + { + "given": { + "\b": true + }, + "cases": [ + { + "expression": "\"\\b\"", + "result": true + } + ] + }, + { + "given": { + "<": true + }, + "cases": [ + { + "expression": "\"<\"", + "result": true + } + ] + }, + { + "given": { + "\t": true + }, + "cases": [ + { + "expression": "\"\\t\"", + "result": true + } + ] + }, + { + "given": { + "\t&\\\r": true + }, + "cases": [ + { + "expression": "\"\\t&\\\\\\r\"", + "result": true + } + ] + }, + { + "given": { + "#": true + }, + "cases": [ + { + "expression": "\"#\"", + "result": true + } + ] + }, + { + "given": { + "B__": true + }, + "cases": [ + { + "expression": "B__", + "result": true + } + ] + }, + { + "given": { + "\nS \n": true + }, + "cases": [ + { + "expression": "\"\\nS \\n\"", + "result": true + } + ] + }, + { + "given": { + "Bp": true + }, + "cases": [ + { + "expression": "Bp", + "result": true + } + ] + }, + { + "given": { + ",\t;": true + }, + "cases": [ + { + "expression": "\",\\t;\"", + "result": true + } + ] + }, + { + "given": { + "B_q": true + }, + "cases": [ + { + "expression": "B_q", + "result": true + } + ] + }, + { + "given": { + "\/+\t\n\b!Z": true + }, + "cases": [ + { + "expression": "\"\\/+\\t\\n\\b!Z\"", + "result": true + } + ] + }, + { + "given": { + "\udadd\udfc7\\ueFAc": true + }, + "cases": [ + { + "expression": "\"\udadd\udfc7\\\\ueFAc\"", + "result": true + } + ] + }, + { + "given": { + ":\f": true + }, + "cases": [ + { + "expression": "\":\\f\"", + "result": true + } + ] + }, + { + "given": { + "\/": true + }, + "cases": [ + { + "expression": "\"\\/\"", + "result": true + } + ] + }, + { + "given": { + "_BW_6Hg_Gl": true + }, + "cases": [ + { + "expression": "_BW_6Hg_Gl", + "result": true + } + ] + }, + { + "given": { + "\udbcf\udc02": true + }, + "cases": [ + { + "expression": "\"\udbcf\udc02\"", + "result": true + } + ] + }, + { + "given": { + "zs1DC": true + }, + "cases": [ + { + "expression": "zs1DC", + "result": true + } + ] + }, + { + "given": { + "__434": true + }, + "cases": [ + { + "expression": "__434", + "result": true + } + ] + }, + { + "given": { + "\udb94\udd41": true + }, + "cases": [ + { + "expression": "\"\udb94\udd41\"", + "result": true + } + ] + }, + { + "given": { + "Z_5": true + }, + "cases": [ + { + "expression": "Z_5", + "result": true + } + ] + }, + { + "given": { + "z_M_": true + }, + "cases": [ + { + "expression": "z_M_", + "result": true + } + ] + }, + { + "given": { + "YU_2": true + }, + "cases": [ + { + "expression": "YU_2", + "result": true + } + ] + }, + { + "given": { + "_0": true + }, + "cases": [ + { + "expression": "_0", + "result": true + } + ] + }, + { + "given": { + "\b+": true + }, + "cases": [ + { + "expression": "\"\\b+\"", + "result": true + } + ] + }, + { + "given": { + "\"": true + }, + "cases": [ + { + "expression": "\"\\\"\"", + "result": true + } + ] + }, + { + "given": { + "D7": true + }, + "cases": [ + { + "expression": "D7", + "result": true + } + ] + }, + { + "given": { + "_62L": true + }, + "cases": [ + { + "expression": "_62L", + "result": true + } + ] + }, + { + "given": { + "\tK\t": true + }, + "cases": [ + { + "expression": "\"\\tK\\t\"", + "result": true + } + ] + }, + { + "given": { + "\n\\\f": true + }, + "cases": [ + { + "expression": "\"\\n\\\\\\f\"", + "result": true + } + ] + }, + { + "given": { + "I_": true + }, + "cases": [ + { + "expression": "I_", + "result": true + } + ] + }, + { + "given": { + "W_a0_": true + }, + "cases": [ + { + "expression": "W_a0_", + "result": true + } + ] + }, + { + "given": { + "BQ": true + }, + "cases": [ + { + "expression": "BQ", + "result": true + } + ] + }, + { + "given": { + "\tX$\uABBb": true + }, + "cases": [ + { + "expression": "\"\\tX$\\uABBb\"", + "result": true + } + ] + }, + { + "given": { + "Z9": true + }, + "cases": [ + { + "expression": "Z9", + "result": true + } + ] + }, + { + "given": { + "\b%\"\uda38\udd0f": true + }, + "cases": [ + { + "expression": "\"\\b%\\\"\uda38\udd0f\"", + "result": true + } + ] + }, + { + "given": { + "_F": true + }, + "cases": [ + { + "expression": "_F", + "result": true + } + ] + }, + { + "given": { + "!,": true + }, + "cases": [ + { + "expression": "\"!,\"", + "result": true + } + ] + }, + { + "given": { + "\"!": true + }, + "cases": [ + { + "expression": "\"\\\"!\"", + "result": true + } + ] + }, + { + "given": { + "Hh": true + }, + "cases": [ + { + "expression": "Hh", + "result": true + } + ] + }, + { + "given": { + "&": true + }, + "cases": [ + { + "expression": "\"&\"", + "result": true + } + ] + }, + { + "given": { + "9\r\\R": true + }, + "cases": [ + { + "expression": "\"9\\r\\\\R\"", + "result": true + } + ] + }, + { + "given": { + "M_k": true + }, + "cases": [ + { + "expression": "M_k", + "result": true + } + ] + }, + { + "given": { + "!\b\n\udb06\ude52\"\"": true + }, + "cases": [ + { + "expression": "\"!\\b\\n\udb06\ude52\\\"\\\"\"", + "result": true + } + ] + }, + { + "given": { + "6": true + }, + "cases": [ + { + "expression": "\"6\"", + "result": true + } + ] + }, + { + "given": { + "_7": true + }, + "cases": [ + { + "expression": "_7", + "result": true + } + ] + }, + { + "given": { + "0": true + }, + "cases": [ + { + "expression": "\"0\"", + "result": true + } + ] + }, + { + "given": { + "\\8\\": true + }, + "cases": [ + { + "expression": "\"\\\\8\\\\\"", + "result": true + } + ] + }, + { + "given": { + "b7eo": true + }, + "cases": [ + { + "expression": "b7eo", + "result": true + } + ] + }, + { + "given": { + "xIUo9": true + }, + "cases": [ + { + "expression": "xIUo9", + "result": true + } + ] + }, + { + "given": { + "5": true + }, + "cases": [ + { + "expression": "\"5\"", + "result": true + } + ] + }, + { + "given": { + "?": true + }, + "cases": [ + { + "expression": "\"?\"", + "result": true + } + ] + }, + { + "given": { + "sU": true + }, + "cases": [ + { + "expression": "sU", + "result": true + } + ] + }, + { + "given": { + "VH2&H\\\/": true + }, + "cases": [ + { + "expression": "\"VH2&H\\\\\\/\"", + "result": true + } + ] + }, + { + "given": { + "_C": true + }, + "cases": [ + { + "expression": "_C", + "result": true + } + ] + }, + { + "given": { + "_": true + }, + "cases": [ + { + "expression": "_", + "result": true + } + ] + }, + { + "given": { + "<\t": true + }, + "cases": [ + { + "expression": "\"<\\t\"", + "result": true + } + ] + }, + { + "given": { + "\uD834\uDD1E": true + }, + "cases": [ + { + "expression": "\"\\uD834\\uDD1E\"", + "result": true + } + ] + } +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/indices.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/indices.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/indices.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/indices.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,346 @@ +[{ + "given": + {"foo": {"bar": ["zero", "one", "two"]}}, + "cases": [ + { + "expression": "foo.bar[0]", + "result": "zero" + }, + { + "expression": "foo.bar[1]", + "result": "one" + }, + { + "expression": "foo.bar[2]", + "result": "two" + }, + { + "expression": "foo.bar[3]", + "result": null + }, + { + "expression": "foo.bar[-1]", + "result": "two" + }, + { + "expression": "foo.bar[-2]", + "result": "one" + }, + { + "expression": "foo.bar[-3]", + "result": "zero" + }, + { + "expression": "foo.bar[-4]", + "result": null + } + ] +}, +{ + "given": + {"foo": [{"bar": "one"}, {"bar": "two"}, {"bar": "three"}, {"notbar": "four"}]}, + "cases": [ + { + "expression": "foo.bar", + "result": null + }, + { + "expression": "foo[0].bar", + "result": "one" + }, + { + "expression": "foo[1].bar", + "result": "two" + }, + { + "expression": "foo[2].bar", + "result": "three" + }, + { + "expression": "foo[3].notbar", + "result": "four" + }, + { + "expression": "foo[3].bar", + "result": null + }, + { + "expression": "foo[0]", + "result": {"bar": "one"} + }, + { + "expression": "foo[1]", + "result": {"bar": "two"} + }, + { + "expression": "foo[2]", + "result": {"bar": "three"} + }, + { + "expression": "foo[3]", + "result": {"notbar": "four"} + }, + { + "expression": "foo[4]", + "result": null + } + ] +}, +{ + "given": [ + "one", "two", "three" + ], + "cases": [ + { + "expression": "[0]", + "result": "one" + }, + { + "expression": "[1]", + "result": "two" + }, + { + "expression": "[2]", + "result": "three" + }, + { + "expression": "[-1]", + "result": "three" + }, + { + "expression": "[-2]", + "result": "two" + }, + { + "expression": "[-3]", + "result": "one" + } + ] +}, +{ + "given": {"reservations": [ + {"instances": [{"foo": 1}, {"foo": 2}]} + ]}, + "cases": [ + { + "expression": "reservations[].instances[].foo", + "result": [1, 2] + }, + { + "expression": "reservations[].instances[].bar", + "result": [] + }, + { + "expression": "reservations[].notinstances[].foo", + "result": [] + }, + { + "expression": "reservations[].notinstances[].foo", + "result": [] + } + ] +}, +{ + "given": {"reservations": [{ + "instances": [ + {"foo": [{"bar": 1}, {"bar": 2}, {"notbar": 3}, {"bar": 4}]}, + {"foo": [{"bar": 5}, {"bar": 6}, {"notbar": [7]}, {"bar": 8}]}, + {"foo": "bar"}, + {"notfoo": [{"bar": 20}, {"bar": 21}, {"notbar": [7]}, {"bar": 22}]}, + {"bar": [{"baz": [1]}, {"baz": [2]}, {"baz": [3]}, {"baz": [4]}]}, + {"baz": [{"baz": [1, 2]}, {"baz": []}, {"baz": []}, {"baz": [3, 4]}]}, + {"qux": [{"baz": []}, {"baz": [1, 2, 3]}, {"baz": [4]}, {"baz": []}]} + ], + "otherkey": {"foo": [{"bar": 1}, {"bar": 2}, {"notbar": 3}, {"bar": 4}]} + }, { + "instances": [ + {"a": [{"bar": 1}, {"bar": 2}, {"notbar": 3}, {"bar": 4}]}, + {"b": [{"bar": 5}, {"bar": 6}, {"notbar": [7]}, {"bar": 8}]}, + {"c": "bar"}, + {"notfoo": [{"bar": 23}, {"bar": 24}, {"notbar": [7]}, {"bar": 25}]}, + {"qux": [{"baz": []}, {"baz": [1, 2, 3]}, {"baz": [4]}, {"baz": []}]} + ], + "otherkey": {"foo": [{"bar": 1}, {"bar": 2}, {"notbar": 3}, {"bar": 4}]} + } + ]}, + "cases": [ + { + "expression": "reservations[].instances[].foo[].bar", + "result": [1, 2, 4, 5, 6, 8] + }, + { + "expression": "reservations[].instances[].foo[].baz", + "result": [] + }, + { + "expression": "reservations[].instances[].notfoo[].bar", + "result": [20, 21, 22, 23, 24, 25] + }, + { + "expression": "reservations[].instances[].notfoo[].notbar", + "result": [[7], [7]] + }, + { + "expression": "reservations[].notinstances[].foo", + "result": [] + }, + { + "expression": "reservations[].instances[].foo[].notbar", + "result": [3, [7]] + }, + { + "expression": "reservations[].instances[].bar[].baz", + "result": [[1], [2], [3], [4]] + }, + { + "expression": "reservations[].instances[].baz[].baz", + "result": [[1, 2], [], [], [3, 4]] + }, + { + "expression": "reservations[].instances[].qux[].baz", + "result": [[], [1, 2, 3], [4], [], [], [1, 2, 3], [4], []] + }, + { + "expression": "reservations[].instances[].qux[].baz[]", + "result": [1, 2, 3, 4, 1, 2, 3, 4] + } + ] +}, +{ + "given": { + "foo": [ + [["one", "two"], ["three", "four"]], + [["five", "six"], ["seven", "eight"]], + [["nine"], ["ten"]] + ] + }, + "cases": [ + { + "expression": "foo[]", + "result": [["one", "two"], ["three", "four"], ["five", "six"], + ["seven", "eight"], ["nine"], ["ten"]] + }, + { + "expression": "foo[][0]", + "result": ["one", "three", "five", "seven", "nine", "ten"] + }, + { + "expression": "foo[][1]", + "result": ["two", "four", "six", "eight"] + }, + { + "expression": "foo[][0][0]", + "result": [] + }, + { + "expression": "foo[][2][2]", + "result": [] + }, + { + "expression": "foo[][0][0][100]", + "result": [] + } + ] +}, +{ + "given": { + "foo": [{ + "bar": [ + { + "qux": 2, + "baz": 1 + }, + { + "qux": 4, + "baz": 3 + } + ] + }, + { + "bar": [ + { + "qux": 6, + "baz": 5 + }, + { + "qux": 8, + "baz": 7 + } + ] + } + ] + }, + "cases": [ + { + "expression": "foo", + "result": [{"bar": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}]}, + {"bar": [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]}] + }, + { + "expression": "foo[]", + "result": [{"bar": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}]}, + {"bar": [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]}] + }, + { + "expression": "foo[].bar", + "result": [[{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}], + [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]] + }, + { + "expression": "foo[].bar[]", + "result": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}, + {"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}] + }, + { + "expression": "foo[].bar[].baz", + "result": [1, 3, 5, 7] + } + ] +}, +{ + "given": { + "string": "string", + "hash": {"foo": "bar", "bar": "baz"}, + "number": 23, + "nullvalue": null + }, + "cases": [ + { + "expression": "string[]", + "result": null + }, + { + "expression": "hash[]", + "result": null + }, + { + "expression": "number[]", + "result": null + }, + { + "expression": "nullvalue[]", + "result": null + }, + { + "expression": "string[].foo", + "result": null + }, + { + "expression": "hash[].foo", + "result": null + }, + { + "expression": "number[].foo", + "result": null + }, + { + "expression": "nullvalue[].foo", + "result": null + }, + { + "expression": "nullvalue[].foo[].bar", + "result": null + } + ] +} +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/literal.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/literal.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/literal.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/literal.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,185 @@ +[ + { + "given": { + "foo": [{"name": "a"}, {"name": "b"}], + "bar": {"baz": "qux"} + }, + "cases": [ + { + "expression": "`\"foo\"`", + "result": "foo" + }, + { + "comment": "Interpret escaped unicode.", + "expression": "`\"\\u03a6\"`", + "result": "Φ" + }, + { + "expression": "`\"✓\"`", + "result": "✓" + }, + { + "expression": "`[1, 2, 3]`", + "result": [1, 2, 3] + }, + { + "expression": "`{\"a\": \"b\"}`", + "result": {"a": "b"} + }, + { + "expression": "`true`", + "result": true + }, + { + "expression": "`false`", + "result": false + }, + { + "expression": "`null`", + "result": null + }, + { + "expression": "`0`", + "result": 0 + }, + { + "expression": "`1`", + "result": 1 + }, + { + "expression": "`2`", + "result": 2 + }, + { + "expression": "`3`", + "result": 3 + }, + { + "expression": "`4`", + "result": 4 + }, + { + "expression": "`5`", + "result": 5 + }, + { + "expression": "`6`", + "result": 6 + }, + { + "expression": "`7`", + "result": 7 + }, + { + "expression": "`8`", + "result": 8 + }, + { + "expression": "`9`", + "result": 9 + }, + { + "comment": "Escaping a backtick in quotes", + "expression": "`\"foo\\`bar\"`", + "result": "foo`bar" + }, + { + "comment": "Double quote in literal", + "expression": "`\"foo\\\"bar\"`", + "result": "foo\"bar" + }, + { + "expression": "`\"1\\`\"`", + "result": "1`" + }, + { + "comment": "Multiple literal expressions with escapes", + "expression": "`\"\\\\\"`.{a:`\"b\"`}", + "result": {"a": "b"} + }, + { + "comment": "literal . identifier", + "expression": "`{\"a\": \"b\"}`.a", + "result": "b" + }, + { + "comment": "literal . identifier . identifier", + "expression": "`{\"a\": {\"b\": \"c\"}}`.a.b", + "result": "c" + }, + { + "comment": "literal . identifier bracket-expr", + "expression": "`[0, 1, 2]`[1]", + "result": 1 + } + ] + }, + { + "comment": "Literals", + "given": {"type": "object"}, + "cases": [ + { + "comment": "Literal with leading whitespace", + "expression": "` {\"foo\": true}`", + "result": {"foo": true} + }, + { + "comment": "Literal with trailing whitespace", + "expression": "`{\"foo\": true} `", + "result": {"foo": true} + }, + { + "comment": "Literal on RHS of subexpr not allowed", + "expression": "foo.`\"bar\"`", + "error": "syntax" + } + ] + }, + { + "comment": "Raw String Literals", + "given": {}, + "cases": [ + { + "expression": "'foo'", + "result": "foo" + }, + { + "expression": "' foo '", + "result": " foo " + }, + { + "expression": "'0'", + "result": "0" + }, + { + "expression": "'newline\n'", + "result": "newline\n" + }, + { + "expression": "'\n'", + "result": "\n" + }, + { + "expression": "'✓'", + "result": "✓" + }, + { + "expression": "'𝄞'", + "result": "𝄞" + }, + { + "expression": "' [foo] '", + "result": " [foo] " + }, + { + "expression": "'[foo]'", + "result": "[foo]" + }, + { + "comment": "Do not interpret escaped unicode.", + "expression": "'\\u03a6'", + "result": "\\u03a6" + } + ] + } +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/multiselect.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/multiselect.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/multiselect.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/multiselect.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,393 @@ +[{ + "given": { + "foo": { + "bar": "bar", + "baz": "baz", + "qux": "qux", + "nested": { + "one": { + "a": "first", + "b": "second", + "c": "third" + }, + "two": { + "a": "first", + "b": "second", + "c": "third" + }, + "three": { + "a": "first", + "b": "second", + "c": {"inner": "third"} + } + } + }, + "bar": 1, + "baz": 2, + "qux\"": 3 + }, + "cases": [ + { + "expression": "foo.{bar: bar}", + "result": {"bar": "bar"} + }, + { + "expression": "foo.{\"bar\": bar}", + "result": {"bar": "bar"} + }, + { + "expression": "foo.{\"foo.bar\": bar}", + "result": {"foo.bar": "bar"} + }, + { + "expression": "foo.{bar: bar, baz: baz}", + "result": {"bar": "bar", "baz": "baz"} + }, + { + "expression": "foo.{\"bar\": bar, \"baz\": baz}", + "result": {"bar": "bar", "baz": "baz"} + }, + { + "expression": "{\"baz\": baz, \"qux\\\"\": \"qux\\\"\"}", + "result": {"baz": 2, "qux\"": 3} + }, + { + "expression": "foo.{bar:bar,baz:baz}", + "result": {"bar": "bar", "baz": "baz"} + }, + { + "expression": "foo.{bar: bar,qux: qux}", + "result": {"bar": "bar", "qux": "qux"} + }, + { + "expression": "foo.{bar: bar, noexist: noexist}", + "result": {"bar": "bar", "noexist": null} + }, + { + "expression": "foo.{noexist: noexist, alsonoexist: alsonoexist}", + "result": {"noexist": null, "alsonoexist": null} + }, + { + "expression": "foo.badkey.{nokey: nokey, alsonokey: alsonokey}", + "result": null + }, + { + "expression": "foo.nested.*.{a: a,b: b}", + "result": [{"a": "first", "b": "second"}, + {"a": "first", "b": "second"}, + {"a": "first", "b": "second"}] + }, + { + "expression": "foo.nested.three.{a: a, cinner: c.inner}", + "result": {"a": "first", "cinner": "third"} + }, + { + "expression": "foo.nested.three.{a: a, c: c.inner.bad.key}", + "result": {"a": "first", "c": null} + }, + { + "expression": "foo.{a: nested.one.a, b: nested.two.b}", + "result": {"a": "first", "b": "second"} + }, + { + "expression": "{bar: bar, baz: baz}", + "result": {"bar": 1, "baz": 2} + }, + { + "expression": "{bar: bar}", + "result": {"bar": 1} + }, + { + "expression": "{otherkey: bar}", + "result": {"otherkey": 1} + }, + { + "expression": "{no: no, exist: exist}", + "result": {"no": null, "exist": null} + }, + { + "expression": "foo.[bar]", + "result": ["bar"] + }, + { + "expression": "foo.[bar,baz]", + "result": ["bar", "baz"] + }, + { + "expression": "foo.[bar,qux]", + "result": ["bar", "qux"] + }, + { + "expression": "foo.[bar,noexist]", + "result": ["bar", null] + }, + { + "expression": "foo.[noexist,alsonoexist]", + "result": [null, null] + } + ] +}, { + "given": { + "foo": {"bar": 1, "baz": [2, 3, 4]} + }, + "cases": [ + { + "expression": "foo.{bar:bar,baz:baz}", + "result": {"bar": 1, "baz": [2, 3, 4]} + }, + { + "expression": "foo.[bar,baz[0]]", + "result": [1, 2] + }, + { + "expression": "foo.[bar,baz[1]]", + "result": [1, 3] + }, + { + "expression": "foo.[bar,baz[2]]", + "result": [1, 4] + }, + { + "expression": "foo.[bar,baz[3]]", + "result": [1, null] + }, + { + "expression": "foo.[bar[0],baz[3]]", + "result": [null, null] + } + ] +}, { + "given": { + "foo": {"bar": 1, "baz": 2} + }, + "cases": [ + { + "expression": "foo.{bar: bar, baz: baz}", + "result": {"bar": 1, "baz": 2} + }, + { + "expression": "foo.[bar,baz]", + "result": [1, 2] + } + ] +}, { + "given": { + "foo": { + "bar": {"baz": [{"common": "first", "one": 1}, + {"common": "second", "two": 2}]}, + "ignoreme": 1, + "includeme": true + } + }, + "cases": [ + { + "expression": "foo.{bar: bar.baz[1],includeme: includeme}", + "result": {"bar": {"common": "second", "two": 2}, "includeme": true} + }, + { + "expression": "foo.{\"bar.baz.two\": bar.baz[1].two, includeme: includeme}", + "result": {"bar.baz.two": 2, "includeme": true} + }, + { + "expression": "foo.[includeme, bar.baz[*].common]", + "result": [true, ["first", "second"]] + }, + { + "expression": "foo.[includeme, bar.baz[*].none]", + "result": [true, []] + }, + { + "expression": "foo.[includeme, bar.baz[].common]", + "result": [true, ["first", "second"]] + } + ] +}, { + "given": { + "reservations": [{ + "instances": [ + {"id": "id1", + "name": "first"}, + {"id": "id2", + "name": "second"} + ]}, { + "instances": [ + {"id": "id3", + "name": "third"}, + {"id": "id4", + "name": "fourth"} + ]} + ]}, + "cases": [ + { + "expression": "reservations[*].instances[*].{id: id, name: name}", + "result": [[{"id": "id1", "name": "first"}, {"id": "id2", "name": "second"}], + [{"id": "id3", "name": "third"}, {"id": "id4", "name": "fourth"}]] + }, + { + "expression": "reservations[].instances[].{id: id, name: name}", + "result": [{"id": "id1", "name": "first"}, + {"id": "id2", "name": "second"}, + {"id": "id3", "name": "third"}, + {"id": "id4", "name": "fourth"}] + }, + { + "expression": "reservations[].instances[].[id, name]", + "result": [["id1", "first"], + ["id2", "second"], + ["id3", "third"], + ["id4", "fourth"]] + } + ] +}, +{ + "given": { + "foo": [{ + "bar": [ + { + "qux": 2, + "baz": 1 + }, + { + "qux": 4, + "baz": 3 + } + ] + }, + { + "bar": [ + { + "qux": 6, + "baz": 5 + }, + { + "qux": 8, + "baz": 7 + } + ] + } + ] + }, + "cases": [ + { + "expression": "foo", + "result": [{"bar": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}]}, + {"bar": [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]}] + }, + { + "expression": "foo[]", + "result": [{"bar": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}]}, + {"bar": [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]}] + }, + { + "expression": "foo[].bar", + "result": [[{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}], + [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]] + }, + { + "expression": "foo[].bar[]", + "result": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}, + {"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}] + }, + { + "expression": "foo[].bar[].[baz, qux]", + "result": [[1, 2], [3, 4], [5, 6], [7, 8]] + }, + { + "expression": "foo[].bar[].[baz]", + "result": [[1], [3], [5], [7]] + }, + { + "expression": "foo[].bar[].[baz, qux][]", + "result": [1, 2, 3, 4, 5, 6, 7, 8] + } + ] +}, +{ + "given": { + "foo": { + "baz": [ + { + "bar": "abc" + }, { + "bar": "def" + } + ], + "qux": ["zero"] + } + }, + "cases": [ + { + "expression": "foo.[baz[*].bar, qux[0]]", + "result": [["abc", "def"], "zero"] + } + ] +}, +{ + "given": { + "foo": { + "baz": [ + { + "bar": "a", + "bam": "b", + "boo": "c" + }, { + "bar": "d", + "bam": "e", + "boo": "f" + } + ], + "qux": ["zero"] + } + }, + "cases": [ + { + "expression": "foo.[baz[*].[bar, boo], qux[0]]", + "result": [[["a", "c" ], ["d", "f" ]], "zero"] + } + ] +}, +{ + "given": { + "foo": { + "baz": [ + { + "bar": "a", + "bam": "b", + "boo": "c" + }, { + "bar": "d", + "bam": "e", + "boo": "f" + } + ], + "qux": ["zero"] + } + }, + "cases": [ + { + "expression": "foo.[baz[*].not_there || baz[*].bar, qux[0]]", + "result": [["a", "d"], "zero"] + } + ] +}, +{ + "given": {"type": "object"}, + "cases": [ + { + "comment": "Nested multiselect", + "expression": "[[*],*]", + "result": [null, ["object"]] + } + ] +}, +{ + "given": [], + "cases": [ + { + "comment": "Nested multiselect", + "expression": "[[*]]", + "result": [[]] + } + ] +} +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/ormatch.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/ormatch.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/ormatch.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/ormatch.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,59 @@ +[{ + "given": + {"outer": {"foo": "foo", "bar": "bar", "baz": "baz"}}, + "cases": [ + { + "expression": "outer.foo || outer.bar", + "result": "foo" + }, + { + "expression": "outer.foo||outer.bar", + "result": "foo" + }, + { + "expression": "outer.bar || outer.baz", + "result": "bar" + }, + { + "expression": "outer.bar||outer.baz", + "result": "bar" + }, + { + "expression": "outer.bad || outer.foo", + "result": "foo" + }, + { + "expression": "outer.bad||outer.foo", + "result": "foo" + }, + { + "expression": "outer.foo || outer.bad", + "result": "foo" + }, + { + "expression": "outer.foo||outer.bad", + "result": "foo" + }, + { + "expression": "outer.bad || outer.alsobad", + "result": null + }, + { + "expression": "outer.bad||outer.alsobad", + "result": null + } + ] +}, { + "given": + {"outer": {"foo": "foo", "bool": false, "empty_list": [], "empty_string": ""}}, + "cases": [ + { + "expression": "outer.empty_string || outer.foo", + "result": "foo" + }, + { + "expression": "outer.nokey || outer.bool || outer.empty_list || outer.empty_string || outer.foo", + "result": "foo" + } + ] +}] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/pipe.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/pipe.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/pipe.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/pipe.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,131 @@ +[{ + "given": { + "foo": { + "bar": { + "baz": "subkey" + }, + "other": { + "baz": "subkey" + }, + "other2": { + "baz": "subkey" + }, + "other3": { + "notbaz": ["a", "b", "c"] + }, + "other4": { + "notbaz": ["a", "b", "c"] + } + } + }, + "cases": [ + { + "expression": "foo.*.baz | [0]", + "result": "subkey" + }, + { + "expression": "foo.*.baz | [1]", + "result": "subkey" + }, + { + "expression": "foo.*.baz | [2]", + "result": "subkey" + }, + { + "expression": "foo.bar.* | [0]", + "result": "subkey" + }, + { + "expression": "foo.*.notbaz | [*]", + "result": [["a", "b", "c"], ["a", "b", "c"]] + }, + { + "expression": "{\"a\": foo.bar, \"b\": foo.other} | *.baz", + "result": ["subkey", "subkey"] + } + ] +}, { + "given": { + "foo": { + "bar": { + "baz": "one" + }, + "other": { + "baz": "two" + }, + "other2": { + "baz": "three" + }, + "other3": { + "notbaz": ["a", "b", "c"] + }, + "other4": { + "notbaz": ["d", "e", "f"] + } + } + }, + "cases": [ + { + "expression": "foo | bar", + "result": {"baz": "one"} + }, + { + "expression": "foo | bar | baz", + "result": "one" + }, + { + "expression": "foo|bar| baz", + "result": "one" + }, + { + "expression": "not_there | [0]", + "result": null + }, + { + "expression": "not_there | [0]", + "result": null + }, + { + "expression": "[foo.bar, foo.other] | [0]", + "result": {"baz": "one"} + }, + { + "expression": "{\"a\": foo.bar, \"b\": foo.other} | a", + "result": {"baz": "one"} + }, + { + "expression": "{\"a\": foo.bar, \"b\": foo.other} | b", + "result": {"baz": "two"} + }, + { + "expression": "foo.bam || foo.bar | baz", + "result": "one" + }, + { + "expression": "foo | not_there || bar", + "result": {"baz": "one"} + } + ] +}, { + "given": { + "foo": [{ + "bar": [{ + "baz": "one" + }, { + "baz": "two" + }] + }, { + "bar": [{ + "baz": "three" + }, { + "baz": "four" + }] + }] + }, + "cases": [ + { + "expression": "foo[*].bar[*] | [0][0]", + "result": {"baz": "one"} + } + ] +}] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/slice.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/slice.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/slice.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/slice.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,187 @@ +[{ + "given": { + "foo": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], + "bar": { + "baz": 1 + } + }, + "cases": [ + { + "expression": "bar[0:10]", + "result": null + }, + { + "expression": "foo[0:10:1]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0:10]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0:10:]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0::1]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0::]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0:]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[:10:1]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[::1]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[:10:]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[::]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[:]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[1:9]", + "result": [1, 2, 3, 4, 5, 6, 7, 8] + }, + { + "expression": "foo[0:10:2]", + "result": [0, 2, 4, 6, 8] + }, + { + "expression": "foo[5:]", + "result": [5, 6, 7, 8, 9] + }, + { + "expression": "foo[5::2]", + "result": [5, 7, 9] + }, + { + "expression": "foo[::2]", + "result": [0, 2, 4, 6, 8] + }, + { + "expression": "foo[::-1]", + "result": [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + }, + { + "expression": "foo[1::2]", + "result": [1, 3, 5, 7, 9] + }, + { + "expression": "foo[10:0:-1]", + "result": [9, 8, 7, 6, 5, 4, 3, 2, 1] + }, + { + "expression": "foo[10:5:-1]", + "result": [9, 8, 7, 6] + }, + { + "expression": "foo[8:2:-2]", + "result": [8, 6, 4] + }, + { + "expression": "foo[0:20]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[10:-20:-1]", + "result": [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + }, + { + "expression": "foo[10:-20]", + "result": [] + }, + { + "expression": "foo[-4:-1]", + "result": [6, 7, 8] + }, + { + "expression": "foo[:-5:-1]", + "result": [9, 8, 7, 6] + }, + { + "expression": "foo[8:2:0]", + "error": "invalid-value" + }, + { + "expression": "foo[8:2:0:1]", + "error": "syntax" + }, + { + "expression": "foo[8:2&]", + "error": "syntax" + }, + { + "expression": "foo[2:a:3]", + "error": "syntax" + } + ] +}, { + "given": { + "foo": [{"a": 1}, {"a": 2}, {"a": 3}], + "bar": [{"a": {"b": 1}}, {"a": {"b": 2}}, + {"a": {"b": 3}}], + "baz": 50 + }, + "cases": [ + { + "expression": "foo[:2].a", + "result": [1, 2] + }, + { + "expression": "foo[:2].b", + "result": [] + }, + { + "expression": "foo[:2].a.b", + "result": [] + }, + { + "expression": "bar[::-1].a.b", + "result": [3, 2, 1] + }, + { + "expression": "bar[:2].a.b", + "result": [1, 2] + }, + { + "expression": "baz[:2].a", + "result": null + } + ] +}, { + "given": [{"a": 1}, {"a": 2}, {"a": 3}], + "cases": [ + { + "expression": "[:]", + "result": [{"a": 1}, {"a": 2}, {"a": 3}] + }, + { + "expression": "[:2].a", + "result": [1, 2] + }, + { + "expression": "[::-1].a", + "result": [3, 2, 1] + }, + { + "expression": "[:2].b", + "result": [] + } + ] +}] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/syntax.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/syntax.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/syntax.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/syntax.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,616 @@ +[{ + "comment": "Dot syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo.bar", + "result": null + }, + { + "expression": "foo.1", + "error": "syntax" + }, + { + "expression": "foo.-11", + "error": "syntax" + }, + { + "expression": "foo", + "result": null + }, + { + "expression": "foo.", + "error": "syntax" + }, + { + "expression": "foo.", + "error": "syntax" + }, + { + "expression": ".foo", + "error": "syntax" + }, + { + "expression": "foo..bar", + "error": "syntax" + }, + { + "expression": "foo.bar.", + "error": "syntax" + }, + { + "expression": "foo[.]", + "error": "syntax" + } + ] +}, + { + "comment": "Simple token errors", + "given": {"type": "object"}, + "cases": [ + { + "expression": ".", + "error": "syntax" + }, + { + "expression": ":", + "error": "syntax" + }, + { + "expression": ",", + "error": "syntax" + }, + { + "expression": "]", + "error": "syntax" + }, + { + "expression": "[", + "error": "syntax" + }, + { + "expression": "}", + "error": "syntax" + }, + { + "expression": "{", + "error": "syntax" + }, + { + "expression": ")", + "error": "syntax" + }, + { + "expression": "(", + "error": "syntax" + }, + { + "expression": "((&", + "error": "syntax" + }, + { + "expression": "a[", + "error": "syntax" + }, + { + "expression": "a]", + "error": "syntax" + }, + { + "expression": "a][", + "error": "syntax" + }, + { + "expression": "!", + "error": "syntax" + } + ] + }, + { + "comment": "Boolean syntax errors", + "given": {"type": "object"}, + "cases": [ + { + "expression": "![!(!", + "error": "syntax" + } + ] + }, + { + "comment": "Wildcard syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "*", + "result": ["object"] + }, + { + "expression": "*.*", + "result": [] + }, + { + "expression": "*.foo", + "result": [] + }, + { + "expression": "*[0]", + "result": [] + }, + { + "expression": ".*", + "error": "syntax" + }, + { + "expression": "*foo", + "error": "syntax" + }, + { + "expression": "*0", + "error": "syntax" + }, + { + "expression": "foo[*]bar", + "error": "syntax" + }, + { + "expression": "foo[*]*", + "error": "syntax" + } + ] + }, + { + "comment": "Flatten syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "[]", + "result": null + } + ] + }, + { + "comment": "Simple bracket syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "[0]", + "result": null + }, + { + "expression": "[*]", + "result": null + }, + { + "expression": "*.[0]", + "error": "syntax" + }, + { + "expression": "*.[\"0\"]", + "result": [[null]] + }, + { + "expression": "[*].bar", + "result": null + }, + { + "expression": "[*][0]", + "result": null + }, + { + "expression": "foo[#]", + "error": "syntax" + } + ] + }, + { + "comment": "Multi-select list syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo[0]", + "result": null + }, + { + "comment": "Valid multi-select of a list", + "expression": "foo[0, 1]", + "error": "syntax" + }, + { + "expression": "foo.[0]", + "error": "syntax" + }, + { + "expression": "foo.[*]", + "result": null + }, + { + "comment": "Multi-select of a list with trailing comma", + "expression": "foo[0, ]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list with trailing comma and no close", + "expression": "foo[0,", + "error": "syntax" + }, + { + "comment": "Multi-select of a list with trailing comma and no close", + "expression": "foo.[a", + "error": "syntax" + }, + { + "comment": "Multi-select of a list with extra comma", + "expression": "foo[0,, 1]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list using an identifier index", + "expression": "foo[abc]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list using identifier indices", + "expression": "foo[abc, def]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list using an identifier index", + "expression": "foo[abc, 1]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list using an identifier index with trailing comma", + "expression": "foo[abc, ]", + "error": "syntax" + }, + { + "comment": "Valid multi-select of a hash using an identifier index", + "expression": "foo.[abc]", + "result": null + }, + { + "comment": "Valid multi-select of a hash", + "expression": "foo.[abc, def]", + "result": null + }, + { + "comment": "Multi-select of a hash using a numeric index", + "expression": "foo.[abc, 1]", + "error": "syntax" + }, + { + "comment": "Multi-select of a hash with a trailing comma", + "expression": "foo.[abc, ]", + "error": "syntax" + }, + { + "comment": "Multi-select of a hash with extra commas", + "expression": "foo.[abc,, def]", + "error": "syntax" + }, + { + "comment": "Multi-select of a hash using number indices", + "expression": "foo.[0, 1]", + "error": "syntax" + } + ] + }, + { + "comment": "Multi-select hash syntax", + "given": {"type": "object"}, + "cases": [ + { + "comment": "No key or value", + "expression": "a{}", + "error": "syntax" + }, + { + "comment": "No closing token", + "expression": "a{", + "error": "syntax" + }, + { + "comment": "Not a key value pair", + "expression": "a{foo}", + "error": "syntax" + }, + { + "comment": "Missing value and closing character", + "expression": "a{foo:", + "error": "syntax" + }, + { + "comment": "Missing closing character", + "expression": "a{foo: 0", + "error": "syntax" + }, + { + "comment": "Missing value", + "expression": "a{foo:}", + "error": "syntax" + }, + { + "comment": "Trailing comma and no closing character", + "expression": "a{foo: 0, ", + "error": "syntax" + }, + { + "comment": "Missing value with trailing comma", + "expression": "a{foo: ,}", + "error": "syntax" + }, + { + "comment": "Accessing Array using an identifier", + "expression": "a{foo: bar}", + "error": "syntax" + }, + { + "expression": "a{foo: 0}", + "error": "syntax" + }, + { + "comment": "Missing key-value pair", + "expression": "a.{}", + "error": "syntax" + }, + { + "comment": "Not a key-value pair", + "expression": "a.{foo}", + "error": "syntax" + }, + { + "comment": "Missing value", + "expression": "a.{foo:}", + "error": "syntax" + }, + { + "comment": "Missing value with trailing comma", + "expression": "a.{foo: ,}", + "error": "syntax" + }, + { + "comment": "Valid multi-select hash extraction", + "expression": "a.{foo: bar}", + "result": null + }, + { + "comment": "Valid multi-select hash extraction", + "expression": "a.{foo: bar, baz: bam}", + "result": null + }, + { + "comment": "Trailing comma", + "expression": "a.{foo: bar, }", + "error": "syntax" + }, + { + "comment": "Missing key in second key-value pair", + "expression": "a.{foo: bar, baz}", + "error": "syntax" + }, + { + "comment": "Missing value in second key-value pair", + "expression": "a.{foo: bar, baz:}", + "error": "syntax" + }, + { + "comment": "Trailing comma", + "expression": "a.{foo: bar, baz: bam, }", + "error": "syntax" + }, + { + "comment": "Nested multi select", + "expression": "{\"\\\\\":{\" \":*}}", + "result": {"\\": {" ": ["object"]}} + } + ] + }, + { + "comment": "Or expressions", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo || bar", + "result": null + }, + { + "expression": "foo ||", + "error": "syntax" + }, + { + "expression": "foo.|| bar", + "error": "syntax" + }, + { + "expression": " || foo", + "error": "syntax" + }, + { + "expression": "foo || || foo", + "error": "syntax" + }, + { + "expression": "foo.[a || b]", + "result": null + }, + { + "expression": "foo.[a ||]", + "error": "syntax" + }, + { + "expression": "\"foo", + "error": "syntax" + } + ] + }, + { + "comment": "Filter expressions", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo[?bar==`\"baz\"`]", + "result": null + }, + { + "expression": "foo[? bar == `\"baz\"` ]", + "result": null + }, + { + "expression": "foo[ ?bar==`\"baz\"`]", + "error": "syntax" + }, + { + "expression": "foo[?bar==]", + "error": "syntax" + }, + { + "expression": "foo[?==]", + "error": "syntax" + }, + { + "expression": "foo[?==bar]", + "error": "syntax" + }, + { + "expression": "foo[?bar==baz?]", + "error": "syntax" + }, + { + "expression": "foo[?a.b.c==d.e.f]", + "result": null + }, + { + "expression": "foo[?bar==`[0, 1, 2]`]", + "result": null + }, + { + "expression": "foo[?bar==`[\"a\", \"b\", \"c\"]`]", + "result": null + }, + { + "comment": "Literal char not escaped", + "expression": "foo[?bar==`[\"foo`bar\"]`]", + "error": "syntax" + }, + { + "comment": "Literal char escaped", + "expression": "foo[?bar==`[\"foo\\`bar\"]`]", + "result": null + }, + { + "comment": "Unknown comparator", + "expression": "foo[?bar<>baz]", + "error": "syntax" + }, + { + "comment": "Unknown comparator", + "expression": "foo[?bar^baz]", + "error": "syntax" + }, + { + "expression": "foo[bar==baz]", + "error": "syntax" + }, + { + "comment": "Quoted identifier in filter expression no spaces", + "expression": "[?\"\\\\\">`\"foo\"`]", + "result": null + }, + { + "comment": "Quoted identifier in filter expression with spaces", + "expression": "[?\"\\\\\" > `\"foo\"`]", + "result": null + } + ] + }, + { + "comment": "Filter expression errors", + "given": {"type": "object"}, + "cases": [ + { + "expression": "bar.`\"anything\"`", + "error": "syntax" + }, + { + "expression": "bar.baz.noexists.`\"literal\"`", + "error": "syntax" + }, + { + "comment": "Literal wildcard projection", + "expression": "foo[*].`\"literal\"`", + "error": "syntax" + }, + { + "expression": "foo[*].name.`\"literal\"`", + "error": "syntax" + }, + { + "expression": "foo[].name.`\"literal\"`", + "error": "syntax" + }, + { + "expression": "foo[].name.`\"literal\"`.`\"subliteral\"`", + "error": "syntax" + }, + { + "comment": "Projecting a literal onto an empty list", + "expression": "foo[*].name.noexist.`\"literal\"`", + "error": "syntax" + }, + { + "expression": "foo[].name.noexist.`\"literal\"`", + "error": "syntax" + }, + { + "expression": "twolen[*].`\"foo\"`", + "error": "syntax" + }, + { + "comment": "Two level projection of a literal", + "expression": "twolen[*].threelen[*].`\"bar\"`", + "error": "syntax" + }, + { + "comment": "Two level flattened projection of a literal", + "expression": "twolen[].threelen[].`\"bar\"`", + "error": "syntax" + } + ] + }, + { + "comment": "Identifiers", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo", + "result": null + }, + { + "expression": "\"foo\"", + "result": null + }, + { + "expression": "\"\\\\\"", + "result": null + } + ] + }, + { + "comment": "Combined syntax", + "given": [], + "cases": [ + { + "expression": "*||*|*|*", + "result": null + }, + { + "expression": "*[]||[*]", + "result": [] + }, + { + "expression": "[*.*]", + "result": [null] + } + ] + } +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/unicode.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/unicode.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/unicode.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/unicode.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,38 @@ +[ + { + "given": {"foo": [{"✓": "✓"}, {"✓": "✗"}]}, + "cases": [ + { + "expression": "foo[].\"✓\"", + "result": ["✓", "✗"] + } + ] + }, + { + "given": {"☯": true}, + "cases": [ + { + "expression": "\"☯\"", + "result": true + } + ] + }, + { + "given": {"♪♫•*¨*•.¸¸❤¸¸.•*¨*•♫♪": true}, + "cases": [ + { + "expression": "\"♪♫•*¨*•.¸¸❤¸¸.•*¨*•♫♪\"", + "result": true + } + ] + }, + { + "given": {"☃": true}, + "cases": [ + { + "expression": "\"☃\"", + "result": true + } + ] + } +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/wildcard.json aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/wildcard.json --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/wildcard.json 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance/wildcard.json 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,460 @@ +[{ + "given": { + "foo": { + "bar": { + "baz": "val" + }, + "other": { + "baz": "val" + }, + "other2": { + "baz": "val" + }, + "other3": { + "notbaz": ["a", "b", "c"] + }, + "other4": { + "notbaz": ["a", "b", "c"] + }, + "other5": { + "other": { + "a": 1, + "b": 1, + "c": 1 + } + } + } + }, + "cases": [ + { + "expression": "foo.*.baz", + "result": ["val", "val", "val"] + }, + { + "expression": "foo.bar.*", + "result": ["val"] + }, + { + "expression": "foo.*.notbaz", + "result": [["a", "b", "c"], ["a", "b", "c"]] + }, + { + "expression": "foo.*.notbaz[0]", + "result": ["a", "a"] + }, + { + "expression": "foo.*.notbaz[-1]", + "result": ["c", "c"] + } + ] +}, { + "given": { + "foo": { + "first-1": { + "second-1": "val" + }, + "first-2": { + "second-1": "val" + }, + "first-3": { + "second-1": "val" + } + } + }, + "cases": [ + { + "expression": "foo.*", + "result": [{"second-1": "val"}, {"second-1": "val"}, + {"second-1": "val"}] + }, + { + "expression": "foo.*.*", + "result": [["val"], ["val"], ["val"]] + }, + { + "expression": "foo.*.*.*", + "result": [[], [], []] + }, + { + "expression": "foo.*.*.*.*", + "result": [[], [], []] + } + ] +}, { + "given": { + "foo": { + "bar": "one" + }, + "other": { + "bar": "one" + }, + "nomatch": { + "notbar": "three" + } + }, + "cases": [ + { + "expression": "*.bar", + "result": ["one", "one"] + } + ] +}, { + "given": { + "top1": { + "sub1": {"foo": "one"} + }, + "top2": { + "sub1": {"foo": "one"} + } + }, + "cases": [ + { + "expression": "*", + "result": [{"sub1": {"foo": "one"}}, + {"sub1": {"foo": "one"}}] + }, + { + "expression": "*.sub1", + "result": [{"foo": "one"}, + {"foo": "one"}] + }, + { + "expression": "*.*", + "result": [[{"foo": "one"}], + [{"foo": "one"}]] + }, + { + "expression": "*.*.foo[]", + "result": ["one", "one"] + }, + { + "expression": "*.sub1.foo", + "result": ["one", "one"] + } + ] +}, +{ + "given": + {"foo": [{"bar": "one"}, {"bar": "two"}, {"bar": "three"}, {"notbar": "four"}]}, + "cases": [ + { + "expression": "foo[*].bar", + "result": ["one", "two", "three"] + }, + { + "expression": "foo[*].notbar", + "result": ["four"] + } + ] +}, +{ + "given": + [{"bar": "one"}, {"bar": "two"}, {"bar": "three"}, {"notbar": "four"}], + "cases": [ + { + "expression": "[*]", + "result": [{"bar": "one"}, {"bar": "two"}, {"bar": "three"}, {"notbar": "four"}] + }, + { + "expression": "[*].bar", + "result": ["one", "two", "three"] + }, + { + "expression": "[*].notbar", + "result": ["four"] + } + ] +}, +{ + "given": { + "foo": { + "bar": [ + {"baz": ["one", "two", "three"]}, + {"baz": ["four", "five", "six"]}, + {"baz": ["seven", "eight", "nine"]} + ] + } + }, + "cases": [ + { + "expression": "foo.bar[*].baz", + "result": [["one", "two", "three"], ["four", "five", "six"], ["seven", "eight", "nine"]] + }, + { + "expression": "foo.bar[*].baz[0]", + "result": ["one", "four", "seven"] + }, + { + "expression": "foo.bar[*].baz[1]", + "result": ["two", "five", "eight"] + }, + { + "expression": "foo.bar[*].baz[2]", + "result": ["three", "six", "nine"] + }, + { + "expression": "foo.bar[*].baz[3]", + "result": [] + } + ] +}, +{ + "given": { + "foo": { + "bar": [["one", "two"], ["three", "four"]] + } + }, + "cases": [ + { + "expression": "foo.bar[*]", + "result": [["one", "two"], ["three", "four"]] + }, + { + "expression": "foo.bar[0]", + "result": ["one", "two"] + }, + { + "expression": "foo.bar[0][0]", + "result": "one" + }, + { + "expression": "foo.bar[0][0][0]", + "result": null + }, + { + "expression": "foo.bar[0][0][0][0]", + "result": null + }, + { + "expression": "foo[0][0]", + "result": null + } + ] +}, +{ + "given": { + "foo": [ + {"bar": [{"kind": "basic"}, {"kind": "intermediate"}]}, + {"bar": [{"kind": "advanced"}, {"kind": "expert"}]}, + {"bar": "string"} + ] + + }, + "cases": [ + { + "expression": "foo[*].bar[*].kind", + "result": [["basic", "intermediate"], ["advanced", "expert"]] + }, + { + "expression": "foo[*].bar[0].kind", + "result": ["basic", "advanced"] + } + ] +}, +{ + "given": { + "foo": [ + {"bar": {"kind": "basic"}}, + {"bar": {"kind": "intermediate"}}, + {"bar": {"kind": "advanced"}}, + {"bar": {"kind": "expert"}}, + {"bar": "string"} + ] + }, + "cases": [ + { + "expression": "foo[*].bar.kind", + "result": ["basic", "intermediate", "advanced", "expert"] + } + ] +}, +{ + "given": { + "foo": [{"bar": ["one", "two"]}, {"bar": ["three", "four"]}, {"bar": ["five"]}] + }, + "cases": [ + { + "expression": "foo[*].bar[0]", + "result": ["one", "three", "five"] + }, + { + "expression": "foo[*].bar[1]", + "result": ["two", "four"] + }, + { + "expression": "foo[*].bar[2]", + "result": [] + } + ] +}, +{ + "given": { + "foo": [{"bar": []}, {"bar": []}, {"bar": []}] + }, + "cases": [ + { + "expression": "foo[*].bar[0]", + "result": [] + } + ] +}, +{ + "given": { + "foo": [["one", "two"], ["three", "four"], ["five"]] + }, + "cases": [ + { + "expression": "foo[*][0]", + "result": ["one", "three", "five"] + }, + { + "expression": "foo[*][1]", + "result": ["two", "four"] + } + ] +}, +{ + "given": { + "foo": [ + [ + ["one", "two"], ["three", "four"] + ], [ + ["five", "six"], ["seven", "eight"] + ], [ + ["nine"], ["ten"] + ] + ] + }, + "cases": [ + { + "expression": "foo[*][0]", + "result": [["one", "two"], ["five", "six"], ["nine"]] + }, + { + "expression": "foo[*][1]", + "result": [["three", "four"], ["seven", "eight"], ["ten"]] + }, + { + "expression": "foo[*][0][0]", + "result": ["one", "five", "nine"] + }, + { + "expression": "foo[*][1][0]", + "result": ["three", "seven", "ten"] + }, + { + "expression": "foo[*][0][1]", + "result": ["two", "six"] + }, + { + "expression": "foo[*][1][1]", + "result": ["four", "eight"] + }, + { + "expression": "foo[*][2]", + "result": [] + }, + { + "expression": "foo[*][2][2]", + "result": [] + }, + { + "expression": "bar[*]", + "result": null + }, + { + "expression": "bar[*].baz[*]", + "result": null + } + ] +}, +{ + "given": { + "string": "string", + "hash": {"foo": "bar", "bar": "baz"}, + "number": 23, + "nullvalue": null + }, + "cases": [ + { + "expression": "string[*]", + "result": null + }, + { + "expression": "hash[*]", + "result": null + }, + { + "expression": "number[*]", + "result": null + }, + { + "expression": "nullvalue[*]", + "result": null + }, + { + "expression": "string[*].foo", + "result": null + }, + { + "expression": "hash[*].foo", + "result": null + }, + { + "expression": "number[*].foo", + "result": null + }, + { + "expression": "nullvalue[*].foo", + "result": null + }, + { + "expression": "nullvalue[*].foo[*].bar", + "result": null + } + ] +}, +{ + "given": { + "string": "string", + "hash": {"foo": "val", "bar": "val"}, + "number": 23, + "array": [1, 2, 3], + "nullvalue": null + }, + "cases": [ + { + "expression": "string.*", + "result": null + }, + { + "expression": "hash.*", + "result": ["val", "val"] + }, + { + "expression": "number.*", + "result": null + }, + { + "expression": "array.*", + "result": null + }, + { + "expression": "nullvalue.*", + "result": null + } + ] +}, +{ + "given": { + "a": [0, 1, 2], + "b": [0, 1, 2] + }, + "cases": [ + { + "expression": "*[0]", + "result": [0, 0] + } + ] +} +] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/compliance_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,123 @@ +package jmespath + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +type TestSuite struct { + Given interface{} + TestCases []TestCase `json:"cases"` + Comment string +} +type TestCase struct { + Comment string + Expression string + Result interface{} + Error string +} + +var whiteListed = []string{ + "compliance/basic.json", + "compliance/current.json", + "compliance/escape.json", + "compliance/filters.json", + "compliance/functions.json", + "compliance/identifiers.json", + "compliance/indices.json", + "compliance/literal.json", + "compliance/multiselect.json", + "compliance/ormatch.json", + "compliance/pipe.json", + "compliance/slice.json", + "compliance/syntax.json", + "compliance/unicode.json", + "compliance/wildcard.json", + "compliance/boolean.json", +} + +func allowed(path string) bool { + for _, el := range whiteListed { + if el == path { + return true + } + } + return false +} + +func TestCompliance(t *testing.T) { + assert := assert.New(t) + + var complianceFiles []string + err := filepath.Walk("compliance", func(path string, _ os.FileInfo, _ error) error { + //if strings.HasSuffix(path, ".json") { + if allowed(path) { + complianceFiles = append(complianceFiles, path) + } + return nil + }) + if assert.Nil(err) { + for _, filename := range complianceFiles { + runComplianceTest(assert, filename) + } + } +} + +func runComplianceTest(assert *assert.Assertions, filename string) { + var testSuites []TestSuite + data, err := ioutil.ReadFile(filename) + if assert.Nil(err) { + err := json.Unmarshal(data, &testSuites) + if assert.Nil(err) { + for _, testsuite := range testSuites { + runTestSuite(assert, testsuite, filename) + } + } + } +} + +func runTestSuite(assert *assert.Assertions, testsuite TestSuite, filename string) { + for _, testcase := range testsuite.TestCases { + if testcase.Error != "" { + // This is a test case that verifies we error out properly. + runSyntaxTestCase(assert, testsuite.Given, testcase, filename) + } else { + runTestCase(assert, testsuite.Given, testcase, filename) + } + } +} + +func runSyntaxTestCase(assert *assert.Assertions, given interface{}, testcase TestCase, filename string) { + // Anything with an .Error means that we expect that JMESPath should return + // an error when we try to evaluate the expression. + _, err := Search(testcase.Expression, given) + assert.NotNil(err, fmt.Sprintf("Expression: %s", testcase.Expression)) +} + +func runTestCase(assert *assert.Assertions, given interface{}, testcase TestCase, filename string) { + lexer := NewLexer() + var err error + _, err = lexer.tokenize(testcase.Expression) + if err != nil { + errMsg := fmt.Sprintf("(%s) Could not lex expression: %s -- %s", filename, testcase.Expression, err.Error()) + assert.Fail(errMsg) + return + } + parser := NewParser() + _, err = parser.Parse(testcase.Expression) + if err != nil { + errMsg := fmt.Sprintf("(%s) Could not parse expression: %s -- %s", filename, testcase.Expression, err.Error()) + assert.Fail(errMsg) + return + } + actual, err := Search(testcase.Expression, given) + if assert.Nil(err, fmt.Sprintf("Expression: %s", testcase.Expression)) { + assert.Equal(testcase.Result, actual, fmt.Sprintf("Expression: %s", testcase.Expression)) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/functions.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/functions.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/functions.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/functions.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,842 @@ +package jmespath + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "unicode/utf8" +) + +type jpFunction func(arguments []interface{}) (interface{}, error) + +type jpType string + +const ( + jpUnknown jpType = "unknown" + jpNumber jpType = "number" + jpString jpType = "string" + jpArray jpType = "array" + jpObject jpType = "object" + jpArrayNumber jpType = "array[number]" + jpArrayString jpType = "array[string]" + jpExpref jpType = "expref" + jpAny jpType = "any" +) + +type functionEntry struct { + name string + arguments []argSpec + handler jpFunction + hasExpRef bool +} + +type argSpec struct { + types []jpType + variadic bool +} + +type byExprString struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprString) Len() int { + return len(a.items) +} +func (a *byExprString) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprString) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(string) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(string) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type byExprFloat struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprFloat) Len() int { + return len(a.items) +} +func (a *byExprFloat) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprFloat) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(float64) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(float64) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type functionCaller struct { + functionTable map[string]functionEntry +} + +func newFunctionCaller() *functionCaller { + caller := &functionCaller{} + caller.functionTable = map[string]functionEntry{ + "length": { + name: "length", + arguments: []argSpec{ + {types: []jpType{jpString, jpArray, jpObject}}, + }, + handler: jpfLength, + }, + "starts_with": { + name: "starts_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfStartsWith, + }, + "abs": { + name: "abs", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfAbs, + }, + "avg": { + name: "avg", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfAvg, + }, + "ceil": { + name: "ceil", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfCeil, + }, + "contains": { + name: "contains", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + {types: []jpType{jpAny}}, + }, + handler: jpfContains, + }, + "ends_with": { + name: "ends_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfEndsWith, + }, + "floor": { + name: "floor", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfFloor, + }, + "map": { + name: "amp", + arguments: []argSpec{ + {types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + }, + handler: jpfMap, + hasExpRef: true, + }, + "max": { + name: "max", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMax, + }, + "merge": { + name: "merge", + arguments: []argSpec{ + {types: []jpType{jpObject}, variadic: true}, + }, + handler: jpfMerge, + }, + "max_by": { + name: "max_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMaxBy, + hasExpRef: true, + }, + "sum": { + name: "sum", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfSum, + }, + "min": { + name: "min", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMin, + }, + "min_by": { + name: "min_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMinBy, + hasExpRef: true, + }, + "type": { + name: "type", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfType, + }, + "keys": { + name: "keys", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfKeys, + }, + "values": { + name: "values", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfValues, + }, + "sort": { + name: "sort", + arguments: []argSpec{ + {types: []jpType{jpArrayString, jpArrayNumber}}, + }, + handler: jpfSort, + }, + "sort_by": { + name: "sort_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfSortBy, + hasExpRef: true, + }, + "join": { + name: "join", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpArrayString}}, + }, + handler: jpfJoin, + }, + "reverse": { + name: "reverse", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + }, + handler: jpfReverse, + }, + "to_array": { + name: "to_array", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToArray, + }, + "to_string": { + name: "to_string", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToString, + }, + "to_number": { + name: "to_number", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToNumber, + }, + "not_null": { + name: "not_null", + arguments: []argSpec{ + {types: []jpType{jpAny}, variadic: true}, + }, + handler: jpfNotNull, + }, + } + return caller +} + +func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { + if len(e.arguments) == 0 { + return arguments, nil + } + if !e.arguments[len(e.arguments)-1].variadic { + if len(e.arguments) != len(arguments) { + return nil, errors.New("incorrect number of args") + } + for i, spec := range e.arguments { + userArg := arguments[i] + err := spec.typeCheck(userArg) + if err != nil { + return nil, err + } + } + return arguments, nil + } + if len(arguments) < len(e.arguments) { + return nil, errors.New("Invalid arity.") + } + return arguments, nil +} + +func (a *argSpec) typeCheck(arg interface{}) error { + for _, t := range a.types { + switch t { + case jpNumber: + if _, ok := arg.(float64); ok { + return nil + } + case jpString: + if _, ok := arg.(string); ok { + return nil + } + case jpArray: + if isSliceType(arg) { + return nil + } + case jpObject: + if _, ok := arg.(map[string]interface{}); ok { + return nil + } + case jpArrayNumber: + if _, ok := toArrayNum(arg); ok { + return nil + } + case jpArrayString: + if _, ok := toArrayStr(arg); ok { + return nil + } + case jpAny: + return nil + case jpExpref: + if _, ok := arg.(expRef); ok { + return nil + } + } + } + return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) +} + +func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { + entry, ok := f.functionTable[name] + if !ok { + return nil, errors.New("unknown function: " + name) + } + resolvedArgs, err := entry.resolveArgs(arguments) + if err != nil { + return nil, err + } + if entry.hasExpRef { + var extra []interface{} + extra = append(extra, intr) + resolvedArgs = append(extra, resolvedArgs...) + } + return entry.handler(resolvedArgs) +} + +func jpfAbs(arguments []interface{}) (interface{}, error) { + num := arguments[0].(float64) + return math.Abs(num), nil +} + +func jpfLength(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if c, ok := arg.(string); ok { + return float64(utf8.RuneCountInString(c)), nil + } else if isSliceType(arg) { + v := reflect.ValueOf(arg) + return float64(v.Len()), nil + } else if c, ok := arg.(map[string]interface{}); ok { + return float64(len(c)), nil + } + return nil, errors.New("could not compute length()") +} + +func jpfStartsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + prefix := arguments[1].(string) + return strings.HasPrefix(search, prefix), nil +} + +func jpfAvg(arguments []interface{}) (interface{}, error) { + // We've already type checked the value so we can safely use + // type assertions. + args := arguments[0].([]interface{}) + length := float64(len(args)) + numerator := 0.0 + for _, n := range args { + numerator += n.(float64) + } + return numerator / length, nil +} +func jpfCeil(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Ceil(val), nil +} +func jpfContains(arguments []interface{}) (interface{}, error) { + search := arguments[0] + el := arguments[1] + if searchStr, ok := search.(string); ok { + if elStr, ok := el.(string); ok { + return strings.Index(searchStr, elStr) != -1, nil + } + return false, nil + } + // Otherwise this is a generic contains for []interface{} + general := search.([]interface{}) + for _, item := range general { + if item == el { + return true, nil + } + } + return false, nil +} +func jpfEndsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + suffix := arguments[1].(string) + return strings.HasSuffix(search, suffix), nil +} +func jpfFloor(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Floor(val), nil +} +func jpfMap(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + exp := arguments[1].(expRef) + node := exp.ref + arr := arguments[2].([]interface{}) + mapped := make([]interface{}, 0, len(arr)) + for _, value := range arr { + current, err := intr.Execute(node, value) + if err != nil { + return nil, err + } + mapped = append(mapped, current) + } + return mapped, nil +} +func jpfMax(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil + } + // Otherwise we're dealing with a max() of strings. + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil +} +func jpfMerge(arguments []interface{}) (interface{}, error) { + final := make(map[string]interface{}) + for _, m := range arguments { + mapped := m.(map[string]interface{}) + for key, value := range mapped { + final[key] = value + } + } + return final, nil +} +func jpfMaxBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + switch t := start.(type) { + case float64: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + case string: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + default: + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfSum(arguments []interface{}) (interface{}, error) { + items, _ := toArrayNum(arguments[0]) + sum := 0.0 + for _, item := range items { + sum += item + } + return sum, nil +} + +func jpfMin(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil + } + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil +} + +func jpfMinBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if t, ok := start.(float64); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else if t, ok := start.(string); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfType(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if _, ok := arg.(float64); ok { + return "number", nil + } + if _, ok := arg.(string); ok { + return "string", nil + } + if _, ok := arg.([]interface{}); ok { + return "array", nil + } + if _, ok := arg.(map[string]interface{}); ok { + return "object", nil + } + if arg == nil { + return "null", nil + } + if arg == true || arg == false { + return "boolean", nil + } + return nil, errors.New("unknown type") +} +func jpfKeys(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for key := range arg { + collected = append(collected, key) + } + return collected, nil +} +func jpfValues(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for _, value := range arg { + collected = append(collected, value) + } + return collected, nil +} +func jpfSort(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + d := sort.Float64Slice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil + } + // Otherwise we're dealing with sort()'ing strings. + items, _ := toArrayStr(arguments[0]) + d := sort.StringSlice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil +} +func jpfSortBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return arr, nil + } else if len(arr) == 1 { + return arr, nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if _, ok := start.(float64); ok { + sortable := &byExprFloat{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else if _, ok := start.(string); ok { + sortable := &byExprString{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfJoin(arguments []interface{}) (interface{}, error) { + sep := arguments[0].(string) + // We can't just do arguments[1].([]string), we have to + // manually convert each item to a string. + arrayStr := []string{} + for _, item := range arguments[1].([]interface{}) { + arrayStr = append(arrayStr, item.(string)) + } + return strings.Join(arrayStr, sep), nil +} +func jpfReverse(arguments []interface{}) (interface{}, error) { + if s, ok := arguments[0].(string); ok { + r := []rune(s) + for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r), nil + } + items := arguments[0].([]interface{}) + length := len(items) + reversed := make([]interface{}, length) + for i, item := range items { + reversed[length-(i+1)] = item + } + return reversed, nil +} +func jpfToArray(arguments []interface{}) (interface{}, error) { + if _, ok := arguments[0].([]interface{}); ok { + return arguments[0], nil + } + return arguments[:1:1], nil +} +func jpfToString(arguments []interface{}) (interface{}, error) { + if v, ok := arguments[0].(string); ok { + return v, nil + } + result, err := json.Marshal(arguments[0]) + if err != nil { + return nil, err + } + return string(result), nil +} +func jpfToNumber(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if v, ok := arg.(float64); ok { + return v, nil + } + if v, ok := arg.(string); ok { + conv, err := strconv.ParseFloat(v, 64) + if err != nil { + return nil, nil + } + return conv, nil + } + if _, ok := arg.([]interface{}); ok { + return nil, nil + } + if _, ok := arg.(map[string]interface{}); ok { + return nil, nil + } + if arg == nil { + return nil, nil + } + if arg == true || arg == false { + return nil, nil + } + return nil, errors.New("unknown type") +} +func jpfNotNull(arguments []interface{}) (interface{}, error) { + for _, arg := range arguments { + if arg != nil { + return arg, nil + } + } + return nil, nil +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/jmespath.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/jmespath.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/jmespath.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/jmespath.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,13 @@ +package jmespath + +import "github.com/jmespath/go-jmespath" + +// Fuzz will fuzz test the JMESPath parser. +func Fuzz(data []byte) int { + p := jmespath.NewParser() + _, err := p.Parse(string(data)) + if err != nil { + return 1 + } + return 0 +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-1 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-1 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-1 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-1 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-10 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-10 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-10 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-10 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-100 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-100 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-100 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-100 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +ends_with(str, 'SStr') \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-101 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-101 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-101 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-101 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +ends_with(str, 'foo') \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-102 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-102 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-102 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-102 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +floor(`1.2`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-103 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-103 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-103 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-103 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +floor(decimals[0]) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-104 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-104 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-104 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-104 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +floor(foo) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-105 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-105 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-105 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-105 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +length('abc') \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-106 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-106 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-106 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-106 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +length('') \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-107 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-107 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-107 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-107 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +length(@) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-108 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-108 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-108 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-108 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +length(strings[0]) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-109 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-109 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-109 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-109 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +length(str) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-110 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-110 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-110 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-110 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +length(array) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-112 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-112 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-112 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-112 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +length(strings[0]) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-115 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-115 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-115 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-115 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +max(strings) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-118 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-118 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-118 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-118 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +merge(`{}`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-119 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-119 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-119 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-119 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +merge(`{}`, `{}`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-12 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-12 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-12 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-12 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +two \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-120 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-120 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-120 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-120 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +merge(`{"a": 1}`, `{"b": 2}`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-121 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-121 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-121 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-121 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +merge(`{"a": 1}`, `{"a": 2}`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-122 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-122 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-122 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-122 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +merge(`{"a": 1, "b": 2}`, `{"a": 2, "c": 3}`, `{"d": 4}`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-123 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-123 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-123 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-123 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +min(numbers) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-126 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-126 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-126 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-126 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +min(decimals) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-128 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-128 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-128 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-128 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +type('abc') \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-129 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-129 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-129 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-129 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +type(`1.0`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-13 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-13 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-13 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-13 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +three \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-130 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-130 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-130 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-130 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +type(`2`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-131 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-131 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-131 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-131 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +type(`true`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-132 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-132 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-132 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-132 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +type(`false`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-133 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-133 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-133 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-133 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +type(`null`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-134 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-134 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-134 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-134 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +type(`[0]`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-135 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-135 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-135 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-135 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +type(`{"a": "b"}`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-136 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-136 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-136 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-136 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +type(@) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-137 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-137 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-137 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-137 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +keys(objects) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-138 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-138 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-138 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-138 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +values(objects) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-139 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-139 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-139 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-139 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +keys(empty_hash) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-14 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-14 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-14 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-14 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +one.two \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-140 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-140 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-140 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-140 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +join(', ', strings) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-141 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-141 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-141 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-141 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +join(', ', strings) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-142 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-142 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-142 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-142 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +join(',', `["a", "b"]`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-143 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-143 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-143 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-143 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +join('|', strings) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-144 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-144 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-144 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-144 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +join('|', decimals[].to_string(@)) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-145 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-145 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-145 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-145 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +join('|', empty_list) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-146 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-146 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-146 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-146 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +reverse(numbers) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-147 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-147 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-147 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-147 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +reverse(array) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-148 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-148 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-148 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-148 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +reverse(`[]`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-149 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-149 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-149 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-149 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +reverse('') \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-15 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-15 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-15 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-15 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo."1" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-150 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-150 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-150 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-150 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +reverse('hello world') \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-151 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-151 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-151 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-151 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +starts_with(str, 'S') \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-152 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-152 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-152 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-152 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +starts_with(str, 'St') \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-153 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-153 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-153 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-153 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +starts_with(str, 'Str') \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-155 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-155 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-155 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-155 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +sum(numbers) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-156 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-156 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-156 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-156 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +sum(decimals) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-157 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-157 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-157 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-157 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +sum(array[].to_number(@)) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-158 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-158 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-158 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-158 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +sum(`[]`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-159 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-159 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-159 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-159 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +to_array('foo') \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-16 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-16 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-16 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-16 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo."1"[0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-160 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-160 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-160 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-160 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +to_array(`0`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-161 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-161 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-161 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-161 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +to_array(objects) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-162 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-162 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-162 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-162 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +to_array(`[1, 2, 3]`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-163 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-163 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-163 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-163 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +to_array(false) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-164 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-164 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-164 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-164 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +to_string('foo') \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-165 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-165 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-165 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-165 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +to_string(`1.2`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-166 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-166 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-166 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-166 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +to_string(`[0, 1]`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-167 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-167 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-167 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-167 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +to_number('1.0') \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-168 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-168 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-168 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-168 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +to_number('1.1') \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-169 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-169 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-169 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-169 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +to_number('4') \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-17 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-17 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-17 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-17 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo."-1" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-170 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-170 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-170 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-170 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +to_number('notanumber') \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-171 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-171 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-171 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-171 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +to_number(`false`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-172 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-172 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-172 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-172 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +to_number(`null`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-173 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-173 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-173 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-173 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +to_number(`[0]`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-174 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-174 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-174 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-174 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +to_number(`{"foo": 0}`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-175 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-175 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-175 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-175 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +sort(numbers) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-178 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-178 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-178 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-178 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +sort(empty_list) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-179 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-179 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-179 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-179 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +not_null(unknown_key, str) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-18 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-18 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-18 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-18 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +@ \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-180 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-180 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-180 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-180 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +not_null(unknown_key, foo.bar, empty_list, str) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-181 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-181 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-181 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-181 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +not_null(unknown_key, null_key, empty_list, str) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-182 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-182 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-182 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-182 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +not_null(all, expressions, are_null) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-183 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-183 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-183 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-183 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +numbers[].to_string(@) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-184 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-184 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-184 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-184 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +array[].to_number(@) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-185 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-185 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-185 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-185 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[].not_null(f, e, d, c, b, a) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-186 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-186 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-186 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-186 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +sort_by(people, &age) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-187 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-187 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-187 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-187 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +sort_by(people, &to_number(age_str)) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-188 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-188 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-188 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-188 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +sort_by(people, &age)[].name \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-189 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-189 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-189 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-189 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +sort_by(people, &age)[].extra \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-19 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-19 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-19 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-19 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +@.bar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-190 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-190 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-190 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-190 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +sort_by(`[]`, &age) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-191 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-191 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-191 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-191 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +max_by(people, &age) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-192 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-192 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-192 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-192 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +max_by(people, &age_str) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-193 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-193 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-193 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-193 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +max_by(people, &to_number(age_str)) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-194 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-194 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-194 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-194 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +min_by(people, &age) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-195 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-195 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-195 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-195 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +min_by(people, &age_str) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-196 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-196 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-196 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-196 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +min_by(people, &to_number(age_str)) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-198 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-198 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-198 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-198 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +__L \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-199 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-199 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-199 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-199 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"!\r" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-2 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-2 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-2 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-2 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-20 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-20 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-20 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-20 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +@.foo[0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-200 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-200 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-200 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-200 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +Y_1623 \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-201 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-201 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-201 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-201 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +x \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-202 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-202 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-202 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-202 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"\tF\uCebb" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-203 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-203 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-203 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-203 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +" \t" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-204 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-204 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-204 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-204 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +" " \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-205 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-205 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-205 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-205 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +v2 \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-206 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-206 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-206 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-206 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"\t" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-207 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-207 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-207 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-207 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +_X \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-208 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-208 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-208 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-208 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"\t4\ud9da\udd15" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-209 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-209 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-209 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-209 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +v24_W \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-21 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-21 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-21 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-21 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"foo.bar" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-210 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-210 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-210 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-210 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"H" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-211 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-211 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-211 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-211 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"\f" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-212 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-212 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-212 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-212 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"E4" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-213 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-213 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-213 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-213 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"!" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-214 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-214 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-214 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-214 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +tM \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-215 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-215 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-215 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-215 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +" [" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-216 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-216 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-216 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-216 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"R!" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-217 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-217 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-217 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-217 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +_6W \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-218 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-218 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-218 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-218 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"\uaBA1\r" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-219 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-219 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-219 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-219 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +tL7 \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-22 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-22 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-22 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-22 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"foo bar" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-220 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-220 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-220 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-220 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"<" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-257 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-257 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-257 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-257 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +hvu \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-258 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-258 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-258 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-258 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"; !" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-259 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-259 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-259 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-259 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +hU \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-26 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-26 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-26 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-26 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"/unix/path" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-260 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-260 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-260 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-260 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"!I\n\/" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-261 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-261 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-261 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-261 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"\uEEbF" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-262 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-262 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-262 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-262 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"U)\t" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-263 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-263 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-263 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-263 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +fa0_9 \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-264 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-264 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-264 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-264 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"/" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-265 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-265 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-265 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-265 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +Gy \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-266 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-266 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-266 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-266 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"\b" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-267 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-267 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-267 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-267 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"<" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-268 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-268 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-268 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-268 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"\t" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-269 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-269 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-269 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-269 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"\t&\\\r" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-27 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-27 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-27 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-27 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"\"\"\"" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-270 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-270 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-270 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-270 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"#" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-271 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-271 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-271 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-271 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +B__ \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-272 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-272 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-272 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-272 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"\nS \n" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-273 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-273 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-273 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-273 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +Bp \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-274 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-274 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-274 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-274 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +",\t;" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-275 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-275 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-275 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-275 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +B_q \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-276 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-276 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-276 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-276 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"\/+\t\n\b!Z" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-277 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-277 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-277 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-277 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"󇟇\\ueFAc" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-278 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-278 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-278 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-278 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +":\f" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-279 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-279 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-279 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-279 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"\/" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-28 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-28 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-28 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-28 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"bar"."baz" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-280 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-280 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-280 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-280 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +_BW_6Hg_Gl \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-281 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-281 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-281 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-281 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"􃰂" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-282 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-282 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-282 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-282 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +zs1DC \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-283 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-283 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-283 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-283 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +__434 \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-284 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-284 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-284 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-284 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"󵅁" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-285 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-285 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-285 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-285 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +Z_5 \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-286 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-286 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-286 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-286 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +z_M_ \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-287 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-287 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-287 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-287 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +YU_2 \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-288 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-288 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-288 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-288 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +_0 \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-289 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-289 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-289 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-289 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"\b+" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-29 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-29 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-29 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-29 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?name == 'a'] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-290 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-290 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-290 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-290 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"\"" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-291 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-291 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-291 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-291 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +D7 \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-292 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-292 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-292 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-292 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +_62L \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-293 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-293 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-293 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-293 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"\tK\t" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-294 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-294 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-294 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-294 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"\n\\\f" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-295 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-295 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-295 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-295 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +I_ \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-296 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-296 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-296 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-296 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +W_a0_ \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-297 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-297 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-297 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-297 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +BQ \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-298 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-298 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-298 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-298 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"\tX$\uABBb" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-299 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-299 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-299 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-299 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +Z9 \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-3 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-3 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-3 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-3 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bar.baz \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-30 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-30 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-30 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-30 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +*[?[0] == `0`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-300 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-300 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-300 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-300 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"\b%\"򞄏" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-301 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-301 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-301 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-301 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +_F \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-302 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-302 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-302 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-302 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"!," \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-303 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-303 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-303 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-303 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"\"!" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-304 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-304 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-304 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-304 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +Hh \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-305 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-305 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-305 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-305 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"&" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-306 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-306 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-306 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-306 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"9\r\\R" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-307 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-307 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-307 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-307 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +M_k \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-308 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-308 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-308 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-308 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"!\b\n󑩒\"\"" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-309 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-309 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-309 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-309 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"6" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-31 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-31 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-31 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-31 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?first == last] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-310 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-310 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-310 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-310 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +_7 \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-311 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-311 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-311 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-311 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"0" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-312 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-312 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-312 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-312 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"\\8\\" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-313 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-313 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-313 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-313 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +b7eo \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-314 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-314 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-314 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-314 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +xIUo9 \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-315 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-315 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-315 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-315 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"5" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-316 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-316 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-316 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-316 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"?" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-317 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-317 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-317 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-317 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +sU \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-318 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-318 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-318 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-318 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"VH2&H\\\/" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-319 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-319 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-319 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-319 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +_C \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-32 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-32 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-32 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-32 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?first == last].first \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-320 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-320 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-320 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-320 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +_ \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-321 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-321 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-321 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-321 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"<\t" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-322 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-322 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-322 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-322 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"\uD834\uDD1E" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-323 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-323 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-323 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-323 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bar[0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-324 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-324 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-324 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-324 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bar[1] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-325 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-325 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-325 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-325 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bar[2] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-326 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-326 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-326 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-326 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bar[3] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-327 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-327 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-327 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-327 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bar[-1] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-328 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-328 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-328 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-328 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bar[-2] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-329 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-329 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-329 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-329 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bar[-3] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-33 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-33 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-33 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-33 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?age > `25`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-330 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-330 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-330 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-330 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bar[-4] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-331 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-331 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-331 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-331 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-332 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-332 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-332 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-332 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[0].bar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-333 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-333 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-333 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-333 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[1].bar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-334 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-334 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-334 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-334 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[2].bar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-335 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-335 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-335 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-335 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[3].notbar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-336 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-336 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-336 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-336 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[3].bar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-337 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-337 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-337 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-337 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-338 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-338 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-338 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-338 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[1] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-339 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-339 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-339 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-339 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[2] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-34 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-34 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-34 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-34 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?age >= `25`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-340 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-340 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-340 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-340 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[3] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-341 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-341 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-341 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-341 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[4] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-342 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-342 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-342 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-342 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +[0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-343 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-343 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-343 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-343 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +[1] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-344 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-344 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-344 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-344 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +[2] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-345 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-345 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-345 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-345 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +[-1] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-346 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-346 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-346 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-346 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +[-2] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-347 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-347 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-347 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-347 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +[-3] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-348 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-348 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-348 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-348 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +reservations[].instances[].foo \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-349 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-349 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-349 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-349 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +reservations[].instances[].bar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-35 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-35 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-35 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-35 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?age > `30`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-350 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-350 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-350 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-350 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +reservations[].notinstances[].foo \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-351 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-351 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-351 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-351 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +reservations[].notinstances[].foo \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-352 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-352 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-352 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-352 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +reservations[].instances[].foo[].bar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-353 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-353 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-353 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-353 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +reservations[].instances[].foo[].baz \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-354 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-354 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-354 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-354 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +reservations[].instances[].notfoo[].bar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-355 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-355 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-355 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-355 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +reservations[].instances[].notfoo[].notbar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-356 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-356 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-356 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-356 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +reservations[].notinstances[].foo \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-357 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-357 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-357 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-357 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +reservations[].instances[].foo[].notbar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-358 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-358 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-358 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-358 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +reservations[].instances[].bar[].baz \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-359 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-359 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-359 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-359 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +reservations[].instances[].baz[].baz \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-36 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-36 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-36 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-36 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?age < `25`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-360 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-360 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-360 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-360 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +reservations[].instances[].qux[].baz \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-361 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-361 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-361 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-361 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +reservations[].instances[].qux[].baz[] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-362 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-362 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-362 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-362 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-363 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-363 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-363 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-363 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[][0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-364 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-364 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-364 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-364 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[][1] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-365 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-365 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-365 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-365 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[][0][0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-366 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-366 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-366 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-366 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[][2][2] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-367 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-367 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-367 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-367 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[][0][0][100] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-368 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-368 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-368 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-368 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-369 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-369 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-369 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-369 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-37 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-37 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-37 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-37 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?age <= `25`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-370 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-370 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-370 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-370 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[].bar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-371 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-371 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-371 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-371 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[].bar[] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-372 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-372 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-372 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-372 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[].bar[].baz \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-373 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-373 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-373 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-373 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +string[] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-374 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-374 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-374 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-374 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +hash[] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-375 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-375 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-375 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-375 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +number[] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-376 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-376 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-376 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-376 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +nullvalue[] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-377 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-377 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-377 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-377 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +string[].foo \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-378 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-378 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-378 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-378 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +hash[].foo \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-379 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-379 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-379 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-379 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +number[].foo \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-38 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-38 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-38 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-38 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?age < `20`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-380 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-380 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-380 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-380 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +nullvalue[].foo \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-381 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-381 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-381 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-381 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +nullvalue[].foo[].bar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-382 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-382 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-382 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-382 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`"foo"` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-383 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-383 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-383 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-383 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`"\u03a6"` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-384 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-384 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-384 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-384 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`"✓"` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-385 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-385 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-385 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-385 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`[1, 2, 3]` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-386 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-386 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-386 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-386 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`{"a": "b"}` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-387 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-387 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-387 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-387 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`true` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-388 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-388 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-388 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-388 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`false` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-389 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-389 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-389 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-389 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`null` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-39 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-39 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-39 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-39 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?age == `20`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-390 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-390 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-390 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-390 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`0` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-391 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-391 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-391 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-391 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`1` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-392 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-392 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-392 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-392 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`2` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-393 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-393 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-393 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-393 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`3` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-394 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-394 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-394 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-394 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`4` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-395 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-395 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-395 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-395 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`5` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-396 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-396 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-396 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-396 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`6` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-397 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-397 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-397 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-397 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`7` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-398 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-398 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-398 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-398 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`8` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-399 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-399 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-399 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-399 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`9` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-4 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-4 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-4 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-4 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bar.baz.bad \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-40 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-40 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-40 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-40 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?age != `20`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-400 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-400 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-400 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-400 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`"foo\`bar"` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-401 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-401 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-401 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-401 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`"foo\"bar"` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-402 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-402 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-402 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-402 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`"1\`"` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-403 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-403 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-403 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-403 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`"\\"`.{a:`"b"`} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-404 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-404 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-404 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-404 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`{"a": "b"}`.a \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-405 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-405 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-405 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-405 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`{"a": {"b": "c"}}`.a.b \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-406 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-406 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-406 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-406 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`[0, 1, 2]`[1] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-407 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-407 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-407 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-407 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +` {"foo": true}` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-408 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-408 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-408 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-408 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`{"foo": true} ` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-409 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-409 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-409 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-409 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +'foo' \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-41 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-41 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-41 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-41 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?top.name == 'a'] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-410 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-410 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-410 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-410 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +' foo ' \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-411 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-411 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-411 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-411 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +'0' \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-412 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-412 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-412 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-412 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2 @@ +'newline +' \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-413 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-413 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-413 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-413 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2 @@ +' +' \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-414 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-414 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-414 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-414 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +'✓' \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-415 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-415 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-415 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-415 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +'𝄞' \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-416 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-416 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-416 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-416 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +' [foo] ' \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-417 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-417 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-417 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-417 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +'[foo]' \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-418 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-418 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-418 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-418 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +'\u03a6' \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-419 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-419 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-419 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-419 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.{bar: bar} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-42 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-42 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-42 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-42 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?top.first == top.last] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-420 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-420 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-420 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-420 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.{"bar": bar} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-421 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-421 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-421 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-421 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.{"foo.bar": bar} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-422 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-422 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-422 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-422 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.{bar: bar, baz: baz} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-423 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-423 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-423 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-423 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.{"bar": bar, "baz": baz} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-424 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-424 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-424 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-424 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +{"baz": baz, "qux\"": "qux\""} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-425 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-425 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-425 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-425 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.{bar:bar,baz:baz} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-426 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-426 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-426 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-426 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.{bar: bar,qux: qux} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-427 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-427 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-427 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-427 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.{bar: bar, noexist: noexist} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-428 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-428 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-428 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-428 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.{noexist: noexist, alsonoexist: alsonoexist} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-429 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-429 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-429 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-429 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.badkey.{nokey: nokey, alsonokey: alsonokey} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-43 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-43 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-43 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-43 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?top == `{"first": "foo", "last": "bar"}`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-430 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-430 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-430 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-430 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.nested.*.{a: a,b: b} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-431 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-431 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-431 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-431 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.nested.three.{a: a, cinner: c.inner} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-432 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-432 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-432 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-432 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.nested.three.{a: a, c: c.inner.bad.key} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-433 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-433 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-433 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-433 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.{a: nested.one.a, b: nested.two.b} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-434 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-434 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-434 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-434 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +{bar: bar, baz: baz} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-435 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-435 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-435 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-435 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +{bar: bar} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-436 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-436 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-436 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-436 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +{otherkey: bar} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-437 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-437 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-437 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-437 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +{no: no, exist: exist} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-438 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-438 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-438 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-438 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.[bar] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-439 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-439 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-439 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-439 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.[bar,baz] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-44 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-44 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-44 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-44 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?key == `true`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-440 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-440 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-440 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-440 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.[bar,qux] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-441 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-441 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-441 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-441 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.[bar,noexist] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-442 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-442 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-442 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-442 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.[noexist,alsonoexist] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-443 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-443 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-443 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-443 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.{bar:bar,baz:baz} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-444 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-444 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-444 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-444 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.[bar,baz[0]] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-445 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-445 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-445 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-445 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.[bar,baz[1]] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-446 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-446 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-446 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-446 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.[bar,baz[2]] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-447 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-447 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-447 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-447 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.[bar,baz[3]] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-448 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-448 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-448 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-448 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.[bar[0],baz[3]] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-449 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-449 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-449 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-449 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.{bar: bar, baz: baz} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-45 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-45 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-45 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-45 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?key == `false`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-450 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-450 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-450 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-450 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.[bar,baz] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-451 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-451 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-451 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-451 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.{bar: bar.baz[1],includeme: includeme} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-452 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-452 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-452 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-452 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.{"bar.baz.two": bar.baz[1].two, includeme: includeme} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-453 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-453 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-453 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-453 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.[includeme, bar.baz[*].common] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-454 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-454 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-454 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-454 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.[includeme, bar.baz[*].none] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-455 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-455 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-455 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-455 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.[includeme, bar.baz[].common] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-456 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-456 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-456 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-456 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +reservations[*].instances[*].{id: id, name: name} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-457 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-457 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-457 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-457 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +reservations[].instances[].{id: id, name: name} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-458 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-458 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-458 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-458 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +reservations[].instances[].[id, name] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-459 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-459 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-459 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-459 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-46 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-46 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-46 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-46 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?key == `0`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-460 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-460 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-460 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-460 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-461 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-461 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-461 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-461 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[].bar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-462 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-462 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-462 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-462 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[].bar[] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-463 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-463 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-463 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-463 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[].bar[].[baz, qux] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-464 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-464 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-464 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-464 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[].bar[].[baz] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-465 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-465 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-465 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-465 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[].bar[].[baz, qux][] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-466 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-466 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-466 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-466 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.[baz[*].bar, qux[0]] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-467 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-467 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-467 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-467 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.[baz[*].[bar, boo], qux[0]] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-468 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-468 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-468 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-468 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.[baz[*].not_there || baz[*].bar, qux[0]] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-469 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-469 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-469 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-469 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +[[*],*] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-47 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-47 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-47 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-47 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?key == `1`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-470 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-470 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-470 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-470 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +[[*]] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-471 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-471 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-471 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-471 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +outer.foo || outer.bar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-472 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-472 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-472 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-472 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +outer.foo||outer.bar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-473 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-473 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-473 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-473 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +outer.bar || outer.baz \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-474 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-474 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-474 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-474 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +outer.bar||outer.baz \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-475 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-475 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-475 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-475 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +outer.bad || outer.foo \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-476 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-476 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-476 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-476 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +outer.bad||outer.foo \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-477 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-477 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-477 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-477 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +outer.foo || outer.bad \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-478 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-478 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-478 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-478 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +outer.foo||outer.bad \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-479 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-479 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-479 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-479 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +outer.bad || outer.alsobad \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-48 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-48 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-48 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-48 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?key == `[0]`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-480 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-480 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-480 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-480 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +outer.bad||outer.alsobad \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-481 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-481 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-481 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-481 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +outer.empty_string || outer.foo \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-482 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-482 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-482 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-482 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +outer.nokey || outer.bool || outer.empty_list || outer.empty_string || outer.foo \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-483 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-483 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-483 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-483 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.*.baz | [0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-484 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-484 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-484 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-484 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.*.baz | [1] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-485 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-485 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-485 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-485 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.*.baz | [2] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-486 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-486 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-486 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-486 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bar.* | [0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-487 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-487 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-487 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-487 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.*.notbaz | [*] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-488 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-488 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-488 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-488 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo | bar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-489 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-489 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-489 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-489 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo | bar | baz \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-49 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-49 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-49 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-49 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?key == `{"bar": [0]}`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-490 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-490 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-490 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-490 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo|bar| baz \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-491 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-491 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-491 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-491 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +not_there | [0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-492 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-492 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-492 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-492 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +not_there | [0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-493 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-493 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-493 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-493 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +[foo.bar, foo.other] | [0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-494 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-494 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-494 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-494 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +{"a": foo.bar, "b": foo.other} | a \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-495 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-495 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-495 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-495 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +{"a": foo.bar, "b": foo.other} | b \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-496 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-496 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-496 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-496 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +{"a": foo.bar, "b": foo.other} | *.baz \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-497 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-497 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-497 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-497 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bam || foo.bar | baz \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-498 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-498 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-498 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-498 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo | not_there || bar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-499 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-499 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-499 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-499 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[*].bar[*] | [0][0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-5 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-5 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-5 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-5 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bar.bad \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-50 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-50 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-50 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-50 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?key == `null`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-500 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-500 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-500 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-500 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +bar[0:10] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-501 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-501 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-501 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-501 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[0:10:1] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-502 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-502 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-502 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-502 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[0:10] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-503 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-503 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-503 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-503 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[0:10:] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-504 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-504 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-504 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-504 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[0::1] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-505 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-505 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-505 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-505 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[0::] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-506 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-506 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-506 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-506 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[0:] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-507 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-507 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-507 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-507 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[:10:1] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-508 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-508 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-508 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-508 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[::1] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-509 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-509 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-509 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-509 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[:10:] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-51 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-51 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-51 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-51 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?key == `[1]`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-510 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-510 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-510 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-510 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[::] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-511 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-511 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-511 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-511 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[:] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-512 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-512 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-512 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-512 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[1:9] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-513 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-513 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-513 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-513 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[0:10:2] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-514 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-514 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-514 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-514 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[5:] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-515 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-515 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-515 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-515 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[5::2] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-516 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-516 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-516 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-516 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[::2] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-517 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-517 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-517 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-517 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[::-1] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-518 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-518 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-518 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-518 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[1::2] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-519 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-519 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-519 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-519 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[10:0:-1] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-52 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-52 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-52 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-52 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?key == `{"a":2}`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-520 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-520 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-520 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-520 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[10:5:-1] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-521 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-521 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-521 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-521 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[8:2:-2] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-522 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-522 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-522 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-522 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[0:20] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-523 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-523 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-523 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-523 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[10:-20:-1] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-524 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-524 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-524 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-524 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[10:-20] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-525 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-525 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-525 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-525 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[-4:-1] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-526 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-526 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-526 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-526 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[:-5:-1] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-527 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-527 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-527 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-527 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[:2].a \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-528 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-528 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-528 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-528 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[:2].b \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-529 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-529 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-529 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-529 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[:2].a.b \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-53 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-53 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-53 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-53 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?`true` == key] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-530 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-530 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-530 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-530 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +bar[::-1].a.b \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-531 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-531 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-531 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-531 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +bar[:2].a.b \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-532 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-532 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-532 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-532 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +baz[:2].a \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-533 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-533 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-533 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-533 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +[:] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-534 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-534 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-534 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-534 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +[:2].a \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-535 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-535 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-535 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-535 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +[::-1].a \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-536 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-536 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-536 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-536 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +[:2].b \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-537 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-537 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-537 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-537 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-538 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-538 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-538 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-538 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-539 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-539 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-539 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-539 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +* \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-54 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-54 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-54 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-54 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?`false` == key] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-540 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-540 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-540 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-540 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +*.* \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-541 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-541 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-541 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-541 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +*.foo \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-542 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-542 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-542 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-542 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +*[0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-543 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-543 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-543 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-543 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +[] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-544 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-544 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-544 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-544 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +[0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-545 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-545 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-545 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-545 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +[*] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-546 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-546 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-546 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-546 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +*.["0"] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-547 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-547 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-547 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-547 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +[*].bar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-548 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-548 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-548 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-548 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +[*][0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-549 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-549 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-549 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-549 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-55 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-55 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-55 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-55 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?`0` == key] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-550 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-550 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-550 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-550 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.[*] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-551 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-551 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-551 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-551 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.[abc] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-552 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-552 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-552 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-552 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.[abc, def] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-553 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-553 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-553 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-553 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +a.{foo: bar} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-554 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-554 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-554 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-554 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +a.{foo: bar, baz: bam} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-555 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-555 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-555 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-555 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +{"\\":{" ":*}} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-556 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-556 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-556 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-556 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo || bar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-557 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-557 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-557 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-557 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.[a || b] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-558 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-558 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-558 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-558 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?bar==`"baz"`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-559 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-559 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-559 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-559 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[? bar == `"baz"` ] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-56 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-56 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-56 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-56 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?`1` == key] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-560 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-560 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-560 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-560 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?a.b.c==d.e.f] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-561 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-561 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-561 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-561 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?bar==`[0, 1, 2]`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-562 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-562 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-562 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-562 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?bar==`["a", "b", "c"]`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-563 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-563 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-563 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-563 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?bar==`["foo\`bar"]`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-564 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-564 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-564 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-564 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +[?"\\">`"foo"`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-565 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-565 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-565 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-565 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +[?"\\" > `"foo"`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-566 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-566 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-566 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-566 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-567 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-567 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-567 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-567 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"foo" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-568 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-568 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-568 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-568 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"\\" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-569 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-569 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-569 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-569 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +*||*|*|* \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-57 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-57 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-57 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-57 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?`[0]` == key] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-570 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-570 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-570 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-570 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +*[]||[*] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-571 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-571 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-571 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-571 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +[*.*] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-572 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-572 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-572 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-572 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[]."✓" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-573 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-573 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-573 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-573 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"☯" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-574 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-574 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-574 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-574 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"♪♫•*¨*•.¸¸❤¸¸.•*¨*•♫♪" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-575 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-575 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-575 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-575 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +"☃" \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-576 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-576 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-576 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-576 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.*.baz \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-577 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-577 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-577 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-577 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bar.* \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-578 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-578 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-578 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-578 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.*.notbaz \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-579 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-579 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-579 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-579 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.*.notbaz[0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-58 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-58 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-58 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-58 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?`{"bar": [0]}` == key] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-580 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-580 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-580 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-580 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.*.notbaz[-1] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-581 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-581 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-581 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-581 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.* \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-582 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-582 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-582 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-582 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.*.* \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-583 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-583 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-583 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-583 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.*.*.* \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-584 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-584 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-584 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-584 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.*.*.*.* \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-585 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-585 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-585 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-585 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +*.bar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-586 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-586 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-586 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-586 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +* \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-587 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-587 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-587 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-587 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +*.sub1 \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-588 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-588 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-588 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-588 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +*.* \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-589 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-589 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-589 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-589 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +*.*.foo \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-59 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-59 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-59 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-59 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?`null` == key] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-590 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-590 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-590 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-590 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +*.sub1.foo \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-591 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-591 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-591 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-591 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[*].bar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-592 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-592 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-592 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-592 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[*].notbar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-593 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-593 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-593 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-593 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +[*] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-594 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-594 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-594 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-594 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +[*].bar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-595 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-595 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-595 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-595 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +[*].notbar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-596 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-596 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-596 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-596 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bar[*].baz \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-597 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-597 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-597 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-597 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bar[*].baz[0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-598 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-598 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-598 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-598 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bar[*].baz[1] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-599 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-599 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-599 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-599 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bar[*].baz[2] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-6 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-6 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-6 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-6 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bad \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-60 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-60 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-60 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-60 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?`[1]` == key] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-600 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-600 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-600 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-600 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bar[*].baz[3] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-601 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-601 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-601 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-601 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bar[*] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-602 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-602 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-602 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-602 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bar[0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-603 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-603 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-603 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-603 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bar[0][0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-604 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-604 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-604 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-604 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bar[0][0][0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-605 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-605 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-605 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-605 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo.bar[0][0][0][0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-606 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-606 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-606 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-606 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[0][0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-607 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-607 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-607 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-607 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[*].bar[*].kind \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-608 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-608 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-608 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-608 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[*].bar[0].kind \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-609 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-609 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-609 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-609 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[*].bar.kind \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-61 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-61 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-61 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-61 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?`{"a":2}` == key] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-610 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-610 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-610 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-610 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[*].bar[0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-611 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-611 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-611 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-611 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[*].bar[1] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-612 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-612 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-612 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-612 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[*].bar[2] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-613 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-613 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-613 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-613 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[*].bar[0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-614 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-614 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-614 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-614 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[*][0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-615 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-615 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-615 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-615 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[*][1] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-616 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-616 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-616 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-616 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[*][0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-617 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-617 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-617 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-617 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[*][1] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-618 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-618 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-618 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-618 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[*][0][0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-619 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-619 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-619 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-619 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[*][1][0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-62 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-62 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-62 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-62 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?key != `true`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-620 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-620 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-620 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-620 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[*][0][1] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-621 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-621 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-621 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-621 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[*][1][1] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-622 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-622 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-622 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-622 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[*][2] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-623 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-623 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-623 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-623 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[*][2][2] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-624 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-624 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-624 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-624 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +bar[*] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-625 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-625 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-625 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-625 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +bar[*].baz[*] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-626 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-626 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-626 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-626 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +string[*] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-627 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-627 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-627 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-627 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +hash[*] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-628 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-628 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-628 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-628 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +number[*] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-629 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-629 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-629 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-629 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +nullvalue[*] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-63 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-63 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-63 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-63 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?key != `false`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-630 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-630 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-630 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-630 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +string[*].foo \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-631 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-631 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-631 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-631 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +hash[*].foo \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-632 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-632 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-632 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-632 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +number[*].foo \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-633 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-633 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-633 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-633 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +nullvalue[*].foo \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-634 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-634 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-634 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-634 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +nullvalue[*].foo[*].bar \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-635 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-635 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-635 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-635 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +string.* \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-636 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-636 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-636 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-636 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +hash.* \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-637 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-637 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-637 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-637 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +number.* \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-638 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-638 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-638 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-638 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +array.* \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-639 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-639 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-639 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-639 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +nullvalue.* \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-64 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-64 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-64 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-64 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?key != `0`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-640 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-640 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-640 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-640 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +*[0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-641 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-641 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-641 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-641 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`foo` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-642 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-642 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-642 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-642 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`foo\"quote` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-643 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-643 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-643 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-643 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`✓` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-644 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-644 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-644 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-644 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`foo\"bar` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-645 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-645 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-645 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-645 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`1\`` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-646 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-646 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-646 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-646 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`\\`.{a:`b`} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-647 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-647 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-647 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-647 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`foo` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-648 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-648 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-648 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-648 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +` foo` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-649 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-649 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-649 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-649 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`foo` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-65 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-65 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-65 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-65 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?key != `1`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-650 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-650 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-650 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-650 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`foo\"quote` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-651 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-651 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-651 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-651 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`✓` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-652 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-652 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-652 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-652 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`foo\"bar` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-653 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-653 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-653 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-653 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`1\`` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-654 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-654 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-654 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-654 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`\\`.{a:`b`} \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-655 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-655 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-655 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-655 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +`foo` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-656 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-656 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-656 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-656 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +` foo` \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-66 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-66 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-66 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-66 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?key != `null`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-67 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-67 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-67 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-67 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?key != `[1]`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-68 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-68 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-68 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-68 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?key != `{"a":2}`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-69 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-69 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-69 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-69 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?`true` != key] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-7 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-7 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-7 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-7 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +bad \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-70 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-70 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-70 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-70 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?`false` != key] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-71 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-71 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-71 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-71 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?`0` != key] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-72 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-72 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-72 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-72 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?`1` != key] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-73 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-73 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-73 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-73 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?`null` != key] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-74 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-74 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-74 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-74 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?`[1]` != key] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-75 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-75 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-75 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-75 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?`{"a":2}` != key] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-76 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-76 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-76 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-76 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +reservations[].instances[?bar==`1`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-77 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-77 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-77 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-77 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +reservations[*].instances[?bar==`1`] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-78 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-78 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-78 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-78 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +reservations[].instances[?bar==`1`][] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-79 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-79 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-79 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-79 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?bar==`1`].bar[0] \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-8 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-8 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-8 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-8 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +bad.morebad.morebad \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-80 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-80 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-80 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-80 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo[?a==`1`].b.c \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-81 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-81 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-81 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-81 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +abs(foo) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-82 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-82 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-82 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-82 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +abs(foo) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-83 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-83 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-83 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-83 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +abs(array[1]) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-84 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-84 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-84 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-84 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +abs(array[1]) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-85 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-85 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-85 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-85 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +abs(`-24`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-86 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-86 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-86 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-86 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +abs(`-24`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-87 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-87 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-87 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-87 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +avg(numbers) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-88 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-88 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-88 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-88 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +ceil(`1.2`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-89 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-89 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-89 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-89 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +ceil(decimals[0]) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-9 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-9 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-9 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-9 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +foo \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-90 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-90 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-90 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-90 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +ceil(decimals[1]) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-91 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-91 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-91 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-91 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +ceil(decimals[2]) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-92 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-92 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-92 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-92 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +contains('abc', 'a') \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-93 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-93 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-93 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-93 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +contains('abc', 'd') \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-94 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-94 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-94 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-94 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +contains(strings, 'a') \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-95 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-95 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-95 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-95 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +contains(decimals, `1.2`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-96 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-96 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-96 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-96 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +contains(decimals, `false`) \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-97 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-97 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-97 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-97 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +ends_with(str, 'r') \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-98 aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-98 --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-98 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/fuzz/testdata/expr-98 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +ends_with(str, 'tr') \ No newline at end of file diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/.gitignore aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/.gitignore --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/.gitignore 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,4 @@ +jpgo +jmespath-fuzz.zip +cpu.out +go-jmespath.test diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/interpreter.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/interpreter.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/interpreter.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/interpreter.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,418 @@ +package jmespath + +import ( + "errors" + "reflect" + "unicode" + "unicode/utf8" +) + +/* This is a tree based interpreter. It walks the AST and directly + interprets the AST to search through a JSON document. +*/ + +type treeInterpreter struct { + fCall *functionCaller +} + +func newInterpreter() *treeInterpreter { + interpreter := treeInterpreter{} + interpreter.fCall = newFunctionCaller() + return &interpreter +} + +type expRef struct { + ref ASTNode +} + +// Execute takes an ASTNode and input data and interprets the AST directly. +// It will produce the result of applying the JMESPath expression associated +// with the ASTNode to the input data "value". +func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { + switch node.nodeType { + case ASTComparator: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + right, err := intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + switch node.value { + case tEQ: + return objsEqual(left, right), nil + case tNE: + return !objsEqual(left, right), nil + } + leftNum, ok := left.(float64) + if !ok { + return nil, nil + } + rightNum, ok := right.(float64) + if !ok { + return nil, nil + } + switch node.value { + case tGT: + return leftNum > rightNum, nil + case tGTE: + return leftNum >= rightNum, nil + case tLT: + return leftNum < rightNum, nil + case tLTE: + return leftNum <= rightNum, nil + } + case ASTExpRef: + return expRef{ref: node.children[0]}, nil + case ASTFunctionExpression: + resolvedArgs := []interface{}{} + for _, arg := range node.children { + current, err := intr.Execute(arg, value) + if err != nil { + return nil, err + } + resolvedArgs = append(resolvedArgs, current) + } + return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) + case ASTField: + if m, ok := value.(map[string]interface{}); ok { + key := node.value.(string) + return m[key], nil + } + return intr.fieldFromStruct(node.value.(string), value) + case ASTFilterProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.filterProjectionWithReflection(node, left) + } + return nil, nil + } + compareNode := node.children[2] + collected := []interface{}{} + for _, element := range sliceType { + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil + case ASTFlatten: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + // If we can't type convert to []interface{}, there's + // a chance this could still work via reflection if we're + // dealing with user provided types. + if isSliceType(left) { + return intr.flattenWithReflection(left) + } + return nil, nil + } + flattened := []interface{}{} + for _, element := range sliceType { + if elementSlice, ok := element.([]interface{}); ok { + flattened = append(flattened, elementSlice...) + } else if isSliceType(element) { + reflectFlat := []interface{}{} + v := reflect.ValueOf(element) + for i := 0; i < v.Len(); i++ { + reflectFlat = append(reflectFlat, v.Index(i).Interface()) + } + flattened = append(flattened, reflectFlat...) + } else { + flattened = append(flattened, element) + } + } + return flattened, nil + case ASTIdentity, ASTCurrentNode: + return value, nil + case ASTIndex: + if sliceType, ok := value.([]interface{}); ok { + index := node.value.(int) + if index < 0 { + index += len(sliceType) + } + if index < len(sliceType) && index >= 0 { + return sliceType[index], nil + } + return nil, nil + } + // Otherwise try via reflection. + rv := reflect.ValueOf(value) + if rv.Kind() == reflect.Slice { + index := node.value.(int) + if index < 0 { + index += rv.Len() + } + if index < rv.Len() && index >= 0 { + v := rv.Index(index) + return v.Interface(), nil + } + } + return nil, nil + case ASTKeyValPair: + return intr.Execute(node.children[0], value) + case ASTLiteral: + return node.value, nil + case ASTMultiSelectHash: + if value == nil { + return nil, nil + } + collected := make(map[string]interface{}) + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + key := child.value.(string) + collected[key] = current + } + return collected, nil + case ASTMultiSelectList: + if value == nil { + return nil, nil + } + collected := []interface{}{} + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + collected = append(collected, current) + } + return collected, nil + case ASTOrExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + matched, err = intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + } + return matched, nil + case ASTAndExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return matched, nil + } + return intr.Execute(node.children[1], value) + case ASTNotExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return true, nil + } + return false, nil + case ASTPipe: + result := value + var err error + for _, child := range node.children { + result, err = intr.Execute(child, result) + if err != nil { + return nil, err + } + } + return result, nil + case ASTProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.projectWithReflection(node, left) + } + return nil, nil + } + collected := []interface{}{} + var current interface{} + for _, element := range sliceType { + current, err = intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + case ASTSubexpression, ASTIndexExpression: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + return intr.Execute(node.children[1], left) + case ASTSlice: + sliceType, ok := value.([]interface{}) + if !ok { + if isSliceType(value) { + return intr.sliceWithReflection(node, value) + } + return nil, nil + } + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + return slice(sliceType, sliceParams) + case ASTValueProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + mapType, ok := left.(map[string]interface{}) + if !ok { + return nil, nil + } + values := make([]interface{}, len(mapType)) + for _, value := range mapType { + values = append(values, value) + } + collected := []interface{}{} + for _, element := range values { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + } + return nil, errors.New("Unknown AST node: " + node.nodeType.String()) +} + +func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { + rv := reflect.ValueOf(value) + first, n := utf8.DecodeRuneInString(key) + fieldName := string(unicode.ToUpper(first)) + key[n:] + if rv.Kind() == reflect.Struct { + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } else if rv.Kind() == reflect.Ptr { + // Handle multiple levels of indirection? + if rv.IsNil() { + return nil, nil + } + rv = rv.Elem() + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } + return nil, nil +} + +func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + flattened := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + if reflect.TypeOf(element).Kind() == reflect.Slice { + // Then insert the contents of the element + // slice into the flattened slice, + // i.e flattened = append(flattened, mySlice...) + elementV := reflect.ValueOf(element) + for j := 0; j < elementV.Len(); j++ { + flattened = append( + flattened, elementV.Index(j).Interface()) + } + } else { + flattened = append(flattened, element) + } + } + return flattened, nil +} + +func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + final := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + final = append(final, element) + } + return slice(final, sliceParams) +} + +func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { + compareNode := node.children[2] + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil +} + +func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if result != nil { + collected = append(collected, result) + } + } + return collected, nil +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/interpreter_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/interpreter_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/interpreter_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/interpreter_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,221 @@ +package jmespath + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" +) + +type scalars struct { + Foo string + Bar string +} + +type sliceType struct { + A string + B []scalars + C []*scalars +} + +type benchmarkStruct struct { + Fooasdfasdfasdfasdf string +} + +type benchmarkNested struct { + Fooasdfasdfasdfasdf nestedA +} + +type nestedA struct { + Fooasdfasdfasdfasdf nestedB +} + +type nestedB struct { + Fooasdfasdfasdfasdf nestedC +} + +type nestedC struct { + Fooasdfasdfasdfasdf string +} + +type nestedSlice struct { + A []sliceType +} + +func TestCanSupportEmptyInterface(t *testing.T) { + assert := assert.New(t) + data := make(map[string]interface{}) + data["foo"] = "bar" + result, err := Search("foo", data) + assert.Nil(err) + assert.Equal("bar", result) +} + +func TestCanSupportUserDefinedStructsValue(t *testing.T) { + assert := assert.New(t) + s := scalars{Foo: "one", Bar: "bar"} + result, err := Search("Foo", s) + assert.Nil(err) + assert.Equal("one", result) +} + +func TestCanSupportUserDefinedStructsRef(t *testing.T) { + assert := assert.New(t) + s := scalars{Foo: "one", Bar: "bar"} + result, err := Search("Foo", &s) + assert.Nil(err) + assert.Equal("one", result) +} + +func TestCanSupportStructWithSliceAll(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}} + result, err := Search("B[].Foo", data) + assert.Nil(err) + assert.Equal([]interface{}{"f1", "correct"}, result) +} + +func TestCanSupportStructWithSlicingExpression(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}} + result, err := Search("B[:].Foo", data) + assert.Nil(err) + assert.Equal([]interface{}{"f1", "correct"}, result) +} + +func TestCanSupportStructWithFilterProjection(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}} + result, err := Search("B[? `true` ].Foo", data) + assert.Nil(err) + assert.Equal([]interface{}{"f1", "correct"}, result) +} + +func TestCanSupportStructWithSlice(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}} + result, err := Search("B[-1].Foo", data) + assert.Nil(err) + assert.Equal("correct", result) +} + +func TestCanSupportStructWithOrExpressions(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", C: nil} + result, err := Search("C || A", data) + assert.Nil(err) + assert.Equal("foo", result) +} + +func TestCanSupportStructWithSlicePointer(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", C: []*scalars{{"f1", "b1"}, {"correct", "b2"}}} + result, err := Search("C[-1].Foo", data) + assert.Nil(err) + assert.Equal("correct", result) +} + +func TestWillAutomaticallyCapitalizeFieldNames(t *testing.T) { + assert := assert.New(t) + s := scalars{Foo: "one", Bar: "bar"} + // Note that there's a lower cased "foo" instead of "Foo", + // but it should still correspond to the Foo field in the + // scalars struct + result, err := Search("foo", &s) + assert.Nil(err) + assert.Equal("one", result) +} + +func TestCanSupportStructWithSliceLowerCased(t *testing.T) { + assert := assert.New(t) + data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}} + result, err := Search("b[-1].foo", data) + assert.Nil(err) + assert.Equal("correct", result) +} + +func TestCanSupportStructWithNestedPointers(t *testing.T) { + assert := assert.New(t) + data := struct{ A *struct{ B int } }{} + result, err := Search("A.B", data) + assert.Nil(err) + assert.Nil(result) +} + +func TestCanSupportFlattenNestedSlice(t *testing.T) { + assert := assert.New(t) + data := nestedSlice{A: []sliceType{ + {B: []scalars{{Foo: "f1a"}, {Foo: "f1b"}}}, + {B: []scalars{{Foo: "f2a"}, {Foo: "f2b"}}}, + }} + result, err := Search("A[].B[].Foo", data) + assert.Nil(err) + assert.Equal([]interface{}{"f1a", "f1b", "f2a", "f2b"}, result) +} + +func TestCanSupportFlattenNestedEmptySlice(t *testing.T) { + assert := assert.New(t) + data := nestedSlice{A: []sliceType{ + {}, {B: []scalars{{Foo: "a"}}}, + }} + result, err := Search("A[].B[].Foo", data) + assert.Nil(err) + assert.Equal([]interface{}{"a"}, result) +} + +func TestCanSupportProjectionsWithStructs(t *testing.T) { + assert := assert.New(t) + data := nestedSlice{A: []sliceType{ + {A: "first"}, {A: "second"}, {A: "third"}, + }} + result, err := Search("A[*].A", data) + assert.Nil(err) + assert.Equal([]interface{}{"first", "second", "third"}, result) +} + +func TestCanSupportSliceOfStructsWithFunctions(t *testing.T) { + assert := assert.New(t) + data := []scalars{scalars{"a1", "b1"}, scalars{"a2", "b2"}} + result, err := Search("length(@)", data) + assert.Nil(err) + assert.Equal(result.(float64), 2.0) +} + +func BenchmarkInterpretSingleFieldStruct(b *testing.B) { + intr := newInterpreter() + parser := NewParser() + ast, _ := parser.Parse("fooasdfasdfasdfasdf") + data := benchmarkStruct{"foobarbazqux"} + for i := 0; i < b.N; i++ { + intr.Execute(ast, &data) + } +} + +func BenchmarkInterpretNestedStruct(b *testing.B) { + intr := newInterpreter() + parser := NewParser() + ast, _ := parser.Parse("fooasdfasdfasdfasdf.fooasdfasdfasdfasdf.fooasdfasdfasdfasdf.fooasdfasdfasdfasdf") + data := benchmarkNested{ + nestedA{ + nestedB{ + nestedC{"foobarbazqux"}, + }, + }, + } + for i := 0; i < b.N; i++ { + intr.Execute(ast, &data) + } +} + +func BenchmarkInterpretNestedMaps(b *testing.B) { + jsonData := []byte(`{"fooasdfasdfasdfasdf": {"fooasdfasdfasdfasdf": {"fooasdfasdfasdfasdf": {"fooasdfasdfasdfasdf": "foobarbazqux"}}}}`) + var data interface{} + json.Unmarshal(jsonData, &data) + + intr := newInterpreter() + parser := NewParser() + ast, _ := parser.Parse("fooasdfasdfasdfasdf.fooasdfasdfasdfasdf.fooasdfasdfasdfasdf.fooasdfasdfasdfasdf") + for i := 0; i < b.N; i++ { + intr.Execute(ast, data) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/lexer.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/lexer.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/lexer.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/lexer.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,420 @@ +package jmespath + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +type token struct { + tokenType tokType + value string + position int + length int +} + +type tokType int + +const eof = -1 + +// Lexer contains information about the expression being tokenized. +type Lexer struct { + expression string // The expression provided by the user. + currentPos int // The current position in the string. + lastWidth int // The width of the current rune. This + buf bytes.Buffer // Internal buffer used for building up values. +} + +// SyntaxError is the main error used whenever a lexing or parsing error occurs. +type SyntaxError struct { + msg string // Error message displayed to user + Expression string // Expression that generated a SyntaxError + Offset int // The location in the string where the error occurred +} + +func (e SyntaxError) Error() string { + // In the future, it would be good to underline the specific + // location where the error occurred. + return "SyntaxError: " + e.msg +} + +// HighlightLocation will show where the syntax error occurred. +// It will place a "^" character on a line below the expression +// at the point where the syntax error occurred. +func (e SyntaxError) HighlightLocation() string { + return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" +} + +//go:generate stringer -type=tokType +const ( + tUnknown tokType = iota + tStar + tDot + tFilter + tFlatten + tLparen + tRparen + tLbracket + tRbracket + tLbrace + tRbrace + tOr + tPipe + tNumber + tUnquotedIdentifier + tQuotedIdentifier + tComma + tColon + tLT + tLTE + tGT + tGTE + tEQ + tNE + tJSONLiteral + tStringLiteral + tCurrent + tExpref + tAnd + tNot + tEOF +) + +var basicTokens = map[rune]tokType{ + '.': tDot, + '*': tStar, + ',': tComma, + ':': tColon, + '{': tLbrace, + '}': tRbrace, + ']': tRbracket, // tLbracket not included because it could be "[]" + '(': tLparen, + ')': tRparen, + '@': tCurrent, +} + +// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. +// When using this bitmask just be sure to shift the rune down 64 bits +// before checking against identifierStartBits. +const identifierStartBits uint64 = 576460745995190270 + +// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. +var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} + +var whiteSpace = map[rune]bool{ + ' ': true, '\t': true, '\n': true, '\r': true, +} + +func (t token) String() string { + return fmt.Sprintf("Token{%+v, %s, %d, %d}", + t.tokenType, t.value, t.position, t.length) +} + +// NewLexer creates a new JMESPath lexer. +func NewLexer() *Lexer { + lexer := Lexer{} + return &lexer +} + +func (lexer *Lexer) next() rune { + if lexer.currentPos >= len(lexer.expression) { + lexer.lastWidth = 0 + return eof + } + r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) + lexer.lastWidth = w + lexer.currentPos += w + return r +} + +func (lexer *Lexer) back() { + lexer.currentPos -= lexer.lastWidth +} + +func (lexer *Lexer) peek() rune { + t := lexer.next() + lexer.back() + return t +} + +// tokenize takes an expression and returns corresponding tokens. +func (lexer *Lexer) tokenize(expression string) ([]token, error) { + var tokens []token + lexer.expression = expression + lexer.currentPos = 0 + lexer.lastWidth = 0 +loop: + for { + r := lexer.next() + if identifierStartBits&(1<<(uint64(r)-64)) > 0 { + t := lexer.consumeUnquotedIdentifier() + tokens = append(tokens, t) + } else if val, ok := basicTokens[r]; ok { + // Basic single char token. + t := token{ + tokenType: val, + value: string(r), + position: lexer.currentPos - lexer.lastWidth, + length: 1, + } + tokens = append(tokens, t) + } else if r == '-' || (r >= '0' && r <= '9') { + t := lexer.consumeNumber() + tokens = append(tokens, t) + } else if r == '[' { + t := lexer.consumeLBracket() + tokens = append(tokens, t) + } else if r == '"' { + t, err := lexer.consumeQuotedIdentifier() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '\'' { + t, err := lexer.consumeRawStringLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '`' { + t, err := lexer.consumeLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '|' { + t := lexer.matchOrElse(r, '|', tOr, tPipe) + tokens = append(tokens, t) + } else if r == '<' { + t := lexer.matchOrElse(r, '=', tLTE, tLT) + tokens = append(tokens, t) + } else if r == '>' { + t := lexer.matchOrElse(r, '=', tGTE, tGT) + tokens = append(tokens, t) + } else if r == '!' { + t := lexer.matchOrElse(r, '=', tNE, tNot) + tokens = append(tokens, t) + } else if r == '=' { + t := lexer.matchOrElse(r, '=', tEQ, tUnknown) + tokens = append(tokens, t) + } else if r == '&' { + t := lexer.matchOrElse(r, '&', tAnd, tExpref) + tokens = append(tokens, t) + } else if r == eof { + break loop + } else if _, ok := whiteSpace[r]; ok { + // Ignore whitespace + } else { + return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) + } + } + tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) + return tokens, nil +} + +// Consume characters until the ending rune "r" is reached. +// If the end of the expression is reached before seeing the +// terminating rune "r", then an error is returned. +// If no error occurs then the matching substring is returned. +// The returned string will not include the ending rune. +func (lexer *Lexer) consumeUntil(end rune) (string, error) { + start := lexer.currentPos + current := lexer.next() + for current != end && current != eof { + if current == '\\' && lexer.peek() != eof { + lexer.next() + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return "", SyntaxError{ + msg: "Unclosed delimiter: " + string(end), + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil +} + +func (lexer *Lexer) consumeLiteral() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('`') + if err != nil { + return token{}, err + } + value = strings.Replace(value, "\\`", "`", -1) + return token{ + tokenType: tJSONLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) consumeRawStringLiteral() (token, error) { + start := lexer.currentPos + currentIndex := start + current := lexer.next() + for current != '\'' && lexer.peek() != eof { + if current == '\\' && lexer.peek() == '\'' { + chunk := lexer.expression[currentIndex : lexer.currentPos-1] + lexer.buf.WriteString(chunk) + lexer.buf.WriteString("'") + lexer.next() + currentIndex = lexer.currentPos + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return token{}, SyntaxError{ + msg: "Unclosed delimiter: '", + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + if currentIndex < lexer.currentPos { + lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) + } + value := lexer.buf.String() + // Reset the buffer so it can reused again. + lexer.buf.Reset() + return token{ + tokenType: tStringLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: lexer.expression, + Offset: lexer.currentPos - 1, + } +} + +// Checks for a two char token, otherwise matches a single character +// token. This is used whenever a two char token overlaps a single +// char token, e.g. "||" -> tPipe, "|" -> tOr. +func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == second { + t = token{ + tokenType: matchedType, + value: string(first) + string(second), + position: start, + length: 2, + } + } else { + lexer.back() + t = token{ + tokenType: singleCharType, + value: string(first), + position: start, + length: 1, + } + } + return t +} + +func (lexer *Lexer) consumeLBracket() token { + // There's three options here: + // 1. A filter expression "[?" + // 2. A flatten operator "[]" + // 3. A bare rbracket "[" + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == '?' { + t = token{ + tokenType: tFilter, + value: "[?", + position: start, + length: 2, + } + } else if nextRune == ']' { + t = token{ + tokenType: tFlatten, + value: "[]", + position: start, + length: 2, + } + } else { + t = token{ + tokenType: tLbracket, + value: "[", + position: start, + length: 1, + } + lexer.back() + } + return t +} + +func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('"') + if err != nil { + return token{}, err + } + var decoded string + asJSON := []byte("\"" + value + "\"") + if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { + return token{}, err + } + return token{ + tokenType: tQuotedIdentifier, + value: decoded, + position: start - 1, + length: len(decoded), + }, nil +} + +func (lexer *Lexer) consumeUnquotedIdentifier() token { + // Consume runes until we reach the end of an unquoted + // identifier. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tUnquotedIdentifier, + value: value, + position: start, + length: lexer.currentPos - start, + } +} + +func (lexer *Lexer) consumeNumber() token { + // Consume runes until we reach something that's not a number. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < '0' || r > '9' { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tNumber, + value: value, + position: start, + length: lexer.currentPos - start, + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/lexer_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/lexer_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/lexer_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/lexer_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,161 @@ +package jmespath + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +var lexingTests = []struct { + expression string + expected []token +}{ + {"*", []token{{tStar, "*", 0, 1}}}, + {".", []token{{tDot, ".", 0, 1}}}, + {"[?", []token{{tFilter, "[?", 0, 2}}}, + {"[]", []token{{tFlatten, "[]", 0, 2}}}, + {"(", []token{{tLparen, "(", 0, 1}}}, + {")", []token{{tRparen, ")", 0, 1}}}, + {"[", []token{{tLbracket, "[", 0, 1}}}, + {"]", []token{{tRbracket, "]", 0, 1}}}, + {"{", []token{{tLbrace, "{", 0, 1}}}, + {"}", []token{{tRbrace, "}", 0, 1}}}, + {"||", []token{{tOr, "||", 0, 2}}}, + {"|", []token{{tPipe, "|", 0, 1}}}, + {"29", []token{{tNumber, "29", 0, 2}}}, + {"2", []token{{tNumber, "2", 0, 1}}}, + {"0", []token{{tNumber, "0", 0, 1}}}, + {"-20", []token{{tNumber, "-20", 0, 3}}}, + {"foo", []token{{tUnquotedIdentifier, "foo", 0, 3}}}, + {`"bar"`, []token{{tQuotedIdentifier, "bar", 0, 3}}}, + // Escaping the delimiter + {`"bar\"baz"`, []token{{tQuotedIdentifier, `bar"baz`, 0, 7}}}, + {",", []token{{tComma, ",", 0, 1}}}, + {":", []token{{tColon, ":", 0, 1}}}, + {"<", []token{{tLT, "<", 0, 1}}}, + {"<=", []token{{tLTE, "<=", 0, 2}}}, + {">", []token{{tGT, ">", 0, 1}}}, + {">=", []token{{tGTE, ">=", 0, 2}}}, + {"==", []token{{tEQ, "==", 0, 2}}}, + {"!=", []token{{tNE, "!=", 0, 2}}}, + {"`[0, 1, 2]`", []token{{tJSONLiteral, "[0, 1, 2]", 1, 9}}}, + {"'foo'", []token{{tStringLiteral, "foo", 1, 3}}}, + {"'a'", []token{{tStringLiteral, "a", 1, 1}}}, + {`'foo\'bar'`, []token{{tStringLiteral, "foo'bar", 1, 7}}}, + {"@", []token{{tCurrent, "@", 0, 1}}}, + {"&", []token{{tExpref, "&", 0, 1}}}, + // Quoted identifier unicode escape sequences + {`"\u2713"`, []token{{tQuotedIdentifier, "✓", 0, 3}}}, + {`"\\"`, []token{{tQuotedIdentifier, `\`, 0, 1}}}, + {"`\"foo\"`", []token{{tJSONLiteral, "\"foo\"", 1, 5}}}, + // Combinations of tokens. + {"foo.bar", []token{ + {tUnquotedIdentifier, "foo", 0, 3}, + {tDot, ".", 3, 1}, + {tUnquotedIdentifier, "bar", 4, 3}, + }}, + {"foo[0]", []token{ + {tUnquotedIdentifier, "foo", 0, 3}, + {tLbracket, "[", 3, 1}, + {tNumber, "0", 4, 1}, + {tRbracket, "]", 5, 1}, + }}, + {"foo[?a' where is one of" + @echo " test to run all the tests" + @echo " build to build the library and jp executable" + @echo " generate to run codegen" + + +generate: + go generate ./... + +build: + rm -f $(CMD) + go build ./... + rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... + mv cmd/$(CMD)/$(CMD) . + +test: + go test -v ./... + +check: + go vet ./... + @echo "golint ./..." + @lint=`golint ./...`; \ + lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ + echo "$$lint"; \ + if [ "$$lint" != "" ]; then exit 1; fi + +htmlc: + go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov + +buildfuzz: + go-fuzz-build github.com/jmespath/go-jmespath/fuzz + +fuzz: buildfuzz + go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata + +bench: + go test -bench . -cpuprofile cpu.out + +pprof-cpu: + go tool pprof ./go-jmespath.test ./cpu.out diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/parser.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/parser.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/parser.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/parser.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,603 @@ +package jmespath + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type astNodeType int + +//go:generate stringer -type astNodeType +const ( + ASTEmpty astNodeType = iota + ASTComparator + ASTCurrentNode + ASTExpRef + ASTFunctionExpression + ASTField + ASTFilterProjection + ASTFlatten + ASTIdentity + ASTIndex + ASTIndexExpression + ASTKeyValPair + ASTLiteral + ASTMultiSelectHash + ASTMultiSelectList + ASTOrExpression + ASTAndExpression + ASTNotExpression + ASTPipe + ASTProjection + ASTSubexpression + ASTSlice + ASTValueProjection +) + +// ASTNode represents the abstract syntax tree of a JMESPath expression. +type ASTNode struct { + nodeType astNodeType + value interface{} + children []ASTNode +} + +func (node ASTNode) String() string { + return node.PrettyPrint(0) +} + +// PrettyPrint will pretty print the parsed AST. +// The AST is an implementation detail and this pretty print +// function is provided as a convenience method to help with +// debugging. You should not rely on its output as the internal +// structure of the AST may change at any time. +func (node ASTNode) PrettyPrint(indent int) string { + spaces := strings.Repeat(" ", indent) + output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) + nextIndent := indent + 2 + if node.value != nil { + if converted, ok := node.value.(fmt.Stringer); ok { + // Account for things like comparator nodes + // that are enums with a String() method. + output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) + } else { + output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) + } + } + lastIndex := len(node.children) + if lastIndex > 0 { + output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) + childIndent := nextIndent + 2 + for _, elem := range node.children { + output += elem.PrettyPrint(childIndent) + } + } + output += fmt.Sprintf("%s}\n", spaces) + return output +} + +var bindingPowers = map[tokType]int{ + tEOF: 0, + tUnquotedIdentifier: 0, + tQuotedIdentifier: 0, + tRbracket: 0, + tRparen: 0, + tComma: 0, + tRbrace: 0, + tNumber: 0, + tCurrent: 0, + tExpref: 0, + tColon: 0, + tPipe: 1, + tOr: 2, + tAnd: 3, + tEQ: 5, + tLT: 5, + tLTE: 5, + tGT: 5, + tGTE: 5, + tNE: 5, + tFlatten: 9, + tStar: 20, + tFilter: 21, + tDot: 40, + tNot: 45, + tLbrace: 50, + tLbracket: 55, + tLparen: 60, +} + +// Parser holds state about the current expression being parsed. +type Parser struct { + expression string + tokens []token + index int +} + +// NewParser creates a new JMESPath parser. +func NewParser() *Parser { + p := Parser{} + return &p +} + +// Parse will compile a JMESPath expression. +func (p *Parser) Parse(expression string) (ASTNode, error) { + lexer := NewLexer() + p.expression = expression + p.index = 0 + tokens, err := lexer.tokenize(expression) + if err != nil { + return ASTNode{}, err + } + p.tokens = tokens + parsed, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() != tEOF { + return ASTNode{}, p.syntaxError(fmt.Sprintf( + "Unexpected token at the end of the expresssion: %s", p.current())) + } + return parsed, nil +} + +func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { + var err error + leftToken := p.lookaheadToken(0) + p.advance() + leftNode, err := p.nud(leftToken) + if err != nil { + return ASTNode{}, err + } + currentToken := p.current() + for bindingPower < bindingPowers[currentToken] { + p.advance() + leftNode, err = p.led(currentToken, leftNode) + if err != nil { + return ASTNode{}, err + } + currentToken = p.current() + } + return leftNode, nil +} + +func (p *Parser) parseIndexExpression() (ASTNode, error) { + if p.lookahead(0) == tColon || p.lookahead(1) == tColon { + return p.parseSliceExpression() + } + indexStr := p.lookaheadToken(0).value + parsedInt, err := strconv.Atoi(indexStr) + if err != nil { + return ASTNode{}, err + } + indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} + p.advance() + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return indexNode, nil +} + +func (p *Parser) parseSliceExpression() (ASTNode, error) { + parts := []*int{nil, nil, nil} + index := 0 + current := p.current() + for current != tRbracket && index < 3 { + if current == tColon { + index++ + p.advance() + } else if current == tNumber { + parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) + if err != nil { + return ASTNode{}, err + } + parts[index] = &parsedInt + p.advance() + } else { + return ASTNode{}, p.syntaxError( + "Expected tColon or tNumber" + ", received: " + p.current().String()) + } + current = p.current() + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTSlice, + value: parts, + }, nil +} + +func (p *Parser) match(tokenType tokType) error { + if p.current() == tokenType { + p.advance() + return nil + } + return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) +} + +func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { + switch tokenType { + case tDot: + if p.current() != tStar { + right, err := p.parseDotRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTSubexpression, + children: []ASTNode{node, right}, + }, err + } + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTValueProjection, + children: []ASTNode{node, right}, + }, err + case tPipe: + right, err := p.parseExpression(bindingPowers[tPipe]) + return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err + case tOr: + right, err := p.parseExpression(bindingPowers[tOr]) + return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err + case tAnd: + right, err := p.parseExpression(bindingPowers[tAnd]) + return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err + case tLparen: + name := node.value + var args []ASTNode + for p.current() != tRparen { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() == tComma { + if err := p.match(tComma); err != nil { + return ASTNode{}, err + } + } + args = append(args, expression) + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTFunctionExpression, + value: name, + children: args, + }, nil + case tFilter: + return p.parseFilter(node) + case tFlatten: + left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{left, right}, + }, err + case tEQ, tNE, tGT, tGTE, tLT, tLTE: + right, err := p.parseExpression(bindingPowers[tokenType]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTComparator, + value: tokenType, + children: []ASTNode{node, right}, + }, nil + case tLbracket: + tokenType := p.current() + var right ASTNode + var err error + if tokenType == tNumber || tokenType == tColon { + right, err = p.parseIndexExpression() + if err != nil { + return ASTNode{}, err + } + return p.projectIfSlice(node, right) + } + // Otherwise this is a projection. + if err := p.match(tStar); err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{node, right}, + }, nil + } + return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) +} + +func (p *Parser) nud(token token) (ASTNode, error) { + switch token.tokenType { + case tJSONLiteral: + var parsed interface{} + err := json.Unmarshal([]byte(token.value), &parsed) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTLiteral, value: parsed}, nil + case tStringLiteral: + return ASTNode{nodeType: ASTLiteral, value: token.value}, nil + case tUnquotedIdentifier: + return ASTNode{ + nodeType: ASTField, + value: token.value, + }, nil + case tQuotedIdentifier: + node := ASTNode{nodeType: ASTField, value: token.value} + if p.current() == tLparen { + return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) + } + return node, nil + case tStar: + left := ASTNode{nodeType: ASTIdentity} + var right ASTNode + var err error + if p.current() == tRbracket { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + } + return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err + case tFilter: + return p.parseFilter(ASTNode{nodeType: ASTIdentity}) + case tLbrace: + return p.parseMultiSelectHash() + case tFlatten: + left := ASTNode{ + nodeType: ASTFlatten, + children: []ASTNode{{nodeType: ASTIdentity}}, + } + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil + case tLbracket: + tokenType := p.current() + //var right ASTNode + if tokenType == tNumber || tokenType == tColon { + right, err := p.parseIndexExpression() + if err != nil { + return ASTNode{}, nil + } + return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) + } else if tokenType == tStar && p.lookahead(1) == tRbracket { + p.advance() + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{{nodeType: ASTIdentity}, right}, + }, nil + } else { + return p.parseMultiSelectList() + } + case tCurrent: + return ASTNode{nodeType: ASTCurrentNode}, nil + case tExpref: + expression, err := p.parseExpression(bindingPowers[tExpref]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil + case tNot: + expression, err := p.parseExpression(bindingPowers[tNot]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil + case tLparen: + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return expression, nil + case tEOF: + return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) + } + + return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) +} + +func (p *Parser) parseMultiSelectList() (ASTNode, error) { + var expressions []ASTNode + for { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + expressions = append(expressions, expression) + if p.current() == tRbracket { + break + } + err = p.match(tComma) + if err != nil { + return ASTNode{}, err + } + } + err := p.match(tRbracket) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTMultiSelectList, + children: expressions, + }, nil +} + +func (p *Parser) parseMultiSelectHash() (ASTNode, error) { + var children []ASTNode + for { + keyToken := p.lookaheadToken(0) + if err := p.match(tUnquotedIdentifier); err != nil { + if err := p.match(tQuotedIdentifier); err != nil { + return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") + } + } + keyName := keyToken.value + err := p.match(tColon) + if err != nil { + return ASTNode{}, err + } + value, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + node := ASTNode{ + nodeType: ASTKeyValPair, + value: keyName, + children: []ASTNode{value}, + } + children = append(children, node) + if p.current() == tComma { + err := p.match(tComma) + if err != nil { + return ASTNode{}, nil + } + } else if p.current() == tRbrace { + err := p.match(tRbrace) + if err != nil { + return ASTNode{}, nil + } + break + } + } + return ASTNode{ + nodeType: ASTMultiSelectHash, + children: children, + }, nil +} + +func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { + indexExpr := ASTNode{ + nodeType: ASTIndexExpression, + children: []ASTNode{left, right}, + } + if right.nodeType == ASTSlice { + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{indexExpr, right}, + }, err + } + return indexExpr, nil +} +func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { + var right, condition ASTNode + var err error + condition, err = p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + if p.current() == tFlatten { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tFilter]) + if err != nil { + return ASTNode{}, err + } + } + + return ASTNode{ + nodeType: ASTFilterProjection, + children: []ASTNode{node, right, condition}, + }, nil +} + +func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { + lookahead := p.current() + if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { + return p.parseExpression(bindingPower) + } else if lookahead == tLbracket { + if err := p.match(tLbracket); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectList() + } else if lookahead == tLbrace { + if err := p.match(tLbrace); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectHash() + } + return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") +} + +func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { + current := p.current() + if bindingPowers[current] < 10 { + return ASTNode{nodeType: ASTIdentity}, nil + } else if current == tLbracket { + return p.parseExpression(bindingPower) + } else if current == tFilter { + return p.parseExpression(bindingPower) + } else if current == tDot { + err := p.match(tDot) + if err != nil { + return ASTNode{}, err + } + return p.parseDotRHS(bindingPower) + } else { + return ASTNode{}, p.syntaxError("Error") + } +} + +func (p *Parser) lookahead(number int) tokType { + return p.lookaheadToken(number).tokenType +} + +func (p *Parser) current() tokType { + return p.lookahead(0) +} + +func (p *Parser) lookaheadToken(number int) token { + return p.tokens[p.index+number] +} + +func (p *Parser) advance() { + p.index++ +} + +func tokensOneOf(elements []tokType, token tokType) bool { + for _, elem := range elements { + if elem == token { + return true + } + } + return false +} + +func (p *Parser) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: p.lookaheadToken(0).position, + } +} + +// Create a SyntaxError based on the provided token. +// This differs from syntaxError() which creates a SyntaxError +// based on the current lookahead token. +func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: t.position, + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/parser_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/parser_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/parser_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/parser_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,136 @@ +package jmespath + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +var parsingErrorTests = []struct { + expression string + msg string +}{ + {"foo.", "Incopmlete expression"}, + {"[foo", "Incopmlete expression"}, + {"]", "Invalid"}, + {")", "Invalid"}, + {"}", "Invalid"}, + {"foo..bar", "Invalid"}, + {`foo."bar`, "Forwards lexer errors"}, + {`{foo: bar`, "Incomplete expression"}, + {`{foo bar}`, "Invalid"}, + {`[foo bar]`, "Invalid"}, + {`foo@`, "Invalid"}, + {`&&&&&&&&&&&&t(`, "Invalid"}, + {`[*][`, "Invalid"}, +} + +func TestParsingErrors(t *testing.T) { + assert := assert.New(t) + parser := NewParser() + for _, tt := range parsingErrorTests { + _, err := parser.Parse(tt.expression) + assert.NotNil(err, fmt.Sprintf("Expected parsing error: %s, for expression: %s", tt.msg, tt.expression)) + } +} + +var prettyPrinted = `ASTProjection { + children: { + ASTField { + value: "foo" + } + ASTSubexpression { + children: { + ASTSubexpression { + children: { + ASTField { + value: "bar" + } + ASTField { + value: "baz" + } + } + ASTField { + value: "qux" + } + } +} +` + +var prettyPrintedCompNode = `ASTFilterProjection { + children: { + ASTField { + value: "a" + } + ASTIdentity { + } + ASTComparator { + value: tLTE + children: { + ASTField { + value: "b" + } + ASTField { + value: "c" + } + } +} +` + +func TestPrettyPrintedAST(t *testing.T) { + assert := assert.New(t) + parser := NewParser() + parsed, _ := parser.Parse("foo[*].bar.baz.qux") + assert.Equal(parsed.PrettyPrint(0), prettyPrinted) +} + +func TestPrettyPrintedCompNode(t *testing.T) { + assert := assert.New(t) + parser := NewParser() + parsed, _ := parser.Parse("a[?b<=c]") + assert.Equal(parsed.PrettyPrint(0), prettyPrintedCompNode) +} + +func BenchmarkParseIdentifier(b *testing.B) { + runParseBenchmark(b, exprIdentifier) +} + +func BenchmarkParseSubexpression(b *testing.B) { + runParseBenchmark(b, exprSubexpr) +} + +func BenchmarkParseDeeplyNested50(b *testing.B) { + runParseBenchmark(b, deeplyNested50) +} + +func BenchmarkParseDeepNested50Pipe(b *testing.B) { + runParseBenchmark(b, deeplyNested50Pipe) +} + +func BenchmarkParseDeepNested50Index(b *testing.B) { + runParseBenchmark(b, deeplyNested50Index) +} + +func BenchmarkParseQuotedIdentifier(b *testing.B) { + runParseBenchmark(b, exprQuotedIdentifier) +} + +func BenchmarkParseQuotedIdentifierEscapes(b *testing.B) { + runParseBenchmark(b, quotedIdentifierEscapes) +} + +func BenchmarkParseRawStringLiteral(b *testing.B) { + runParseBenchmark(b, rawStringLiteral) +} + +func BenchmarkParseDeepProjection104(b *testing.B) { + runParseBenchmark(b, deepProjection104) +} + +func runParseBenchmark(b *testing.B, expression string) { + parser := NewParser() + for i := 0; i < b.N; i++ { + parser.Parse(expression) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/README.md aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/README.md --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/README.md 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/README.md 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,7 @@ +# go-jmespath - A JMESPath implementation in Go + +[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) + + + +See http://jmespath.org for more info. diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/toktype_string.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/toktype_string.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/toktype_string.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/toktype_string.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,16 @@ +// generated by stringer -type=tokType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" + +var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} + +func (i tokType) String() string { + if i < 0 || i >= tokType(len(_tokType_index)-1) { + return fmt.Sprintf("tokType(%d)", i) + } + return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/.travis.yml aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/.travis.yml --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/.travis.yml 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,9 @@ +language: go + +sudo: false + +go: + - 1.4 + +install: go get -v -t ./... +script: make test diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/util.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/util.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/util.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/util.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,185 @@ +package jmespath + +import ( + "errors" + "reflect" +) + +// IsFalse determines if an object is false based on the JMESPath spec. +// JMESPath defines false values to be any of: +// - An empty string array, or hash. +// - The boolean value false. +// - nil +func isFalse(value interface{}) bool { + switch v := value.(type) { + case bool: + return !v + case []interface{}: + return len(v) == 0 + case map[string]interface{}: + return len(v) == 0 + case string: + return len(v) == 0 + case nil: + return true + } + // Try the reflection cases before returning false. + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Struct: + // A struct type will never be false, even if + // all of its values are the zero type. + return false + case reflect.Slice, reflect.Map: + return rv.Len() == 0 + case reflect.Ptr: + if rv.IsNil() { + return true + } + // If it's a pointer type, we'll try to deref the pointer + // and evaluate the pointer value for isFalse. + element := rv.Elem() + return isFalse(element.Interface()) + } + return false +} + +// ObjsEqual is a generic object equality check. +// It will take two arbitrary objects and recursively determine +// if they are equal. +func objsEqual(left interface{}, right interface{}) bool { + return reflect.DeepEqual(left, right) +} + +// SliceParam refers to a single part of a slice. +// A slice consists of a start, a stop, and a step, similar to +// python slices. +type sliceParam struct { + N int + Specified bool +} + +// Slice supports [start:stop:step] style slicing that's supported in JMESPath. +func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { + computed, err := computeSliceParams(len(slice), parts) + if err != nil { + return nil, err + } + start, stop, step := computed[0], computed[1], computed[2] + result := []interface{}{} + if step > 0 { + for i := start; i < stop; i += step { + result = append(result, slice[i]) + } + } else { + for i := start; i > stop; i += step { + result = append(result, slice[i]) + } + } + return result, nil +} + +func computeSliceParams(length int, parts []sliceParam) ([]int, error) { + var start, stop, step int + if !parts[2].Specified { + step = 1 + } else if parts[2].N == 0 { + return nil, errors.New("Invalid slice, step cannot be 0") + } else { + step = parts[2].N + } + var stepValueNegative bool + if step < 0 { + stepValueNegative = true + } else { + stepValueNegative = false + } + + if !parts[0].Specified { + if stepValueNegative { + start = length - 1 + } else { + start = 0 + } + } else { + start = capSlice(length, parts[0].N, step) + } + + if !parts[1].Specified { + if stepValueNegative { + stop = -1 + } else { + stop = length + } + } else { + stop = capSlice(length, parts[1].N, step) + } + return []int{start, stop, step}, nil +} + +func capSlice(length int, actual int, step int) int { + if actual < 0 { + actual += length + if actual < 0 { + if step < 0 { + actual = -1 + } else { + actual = 0 + } + } + } else if actual >= length { + if step < 0 { + actual = length - 1 + } else { + actual = length + } + } + return actual +} + +// ToArrayNum converts an empty interface type to a slice of float64. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. +func toArrayNum(data interface{}) ([]float64, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]float64, len(d)) + for i, el := range d { + item, ok := el.(float64) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +// ToArrayStr converts an empty interface type to a slice of strings. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. If the input data could be entirely +// converted, then the converted data, along with a second value of true, +// will be returned. +func toArrayStr(data interface{}) ([]string, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]string, len(d)) + for i, el := range d { + item, ok := el.(string) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +func isSliceType(v interface{}) bool { + if v == nil { + return false + } + return reflect.TypeOf(v).Kind() == reflect.Slice +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/util_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/util_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/util_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/jmespath/go-jmespath/util_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,73 @@ +package jmespath + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSlicePositiveStep(t *testing.T) { + assert := assert.New(t) + input := make([]interface{}, 5) + input[0] = 0 + input[1] = 1 + input[2] = 2 + input[3] = 3 + input[4] = 4 + result, err := slice(input, []sliceParam{{0, true}, {3, true}, {1, true}}) + assert.Nil(err) + assert.Equal(input[:3], result) +} + +func TestIsFalseJSONTypes(t *testing.T) { + assert := assert.New(t) + assert.True(isFalse(false)) + assert.True(isFalse("")) + var empty []interface{} + assert.True(isFalse(empty)) + m := make(map[string]interface{}) + assert.True(isFalse(m)) + assert.True(isFalse(nil)) + +} + +func TestIsFalseWithUserDefinedStructs(t *testing.T) { + assert := assert.New(t) + type nilStructType struct { + SliceOfPointers []*string + } + nilStruct := nilStructType{SliceOfPointers: nil} + assert.True(isFalse(nilStruct.SliceOfPointers)) + + // A user defined struct will never be false though, + // even if it's fields are the zero type. + assert.False(isFalse(nilStruct)) +} + +func TestIsFalseWithNilInterface(t *testing.T) { + assert := assert.New(t) + var a *int = nil + var nilInterface interface{} + nilInterface = a + assert.True(isFalse(nilInterface)) +} + +func TestIsFalseWithMapOfUserStructs(t *testing.T) { + assert := assert.New(t) + type foo struct { + Bar string + Baz string + } + m := make(map[int]foo) + assert.True(isFalse(m)) +} + +func TestObjsEqual(t *testing.T) { + assert := assert.New(t) + assert.True(objsEqual("foo", "foo")) + assert.True(objsEqual(20, 20)) + assert.True(objsEqual([]int{1, 2, 3}, []int{1, 2, 3})) + assert.True(objsEqual(nil, nil)) + assert.True(!objsEqual(nil, "foo")) + assert.True(objsEqual([]int{}, []int{})) + assert.True(!objsEqual([]int{}, nil)) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/autoscaling/autoscaling.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/autoscaling/autoscaling.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/autoscaling/autoscaling.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/autoscaling/autoscaling.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,592 +0,0 @@ -// The autoscaling package provides types and functions for interaction with the AWS -// AutoScaling service (autoscaling) -package autoscaling - -import ( - "encoding/xml" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/mitchellh/goamz/aws" -) - -// The AutoScaling type encapsulates operations operations with the autoscaling endpoint. -type AutoScaling struct { - aws.Auth - aws.Region - httpClient *http.Client -} - -const APIVersion = "2011-01-01" - -// New creates a new AutoScaling instance. -func New(auth aws.Auth, region aws.Region) *AutoScaling { - return NewWithClient(auth, region, aws.RetryingClient) -} - -func NewWithClient(auth aws.Auth, region aws.Region, httpClient *http.Client) *AutoScaling { - return &AutoScaling{auth, region, httpClient} -} - -func (autoscaling *AutoScaling) query(params map[string]string, resp interface{}) error { - params["Version"] = APIVersion - params["Timestamp"] = time.Now().In(time.UTC).Format(time.RFC3339) - - endpoint, err := url.Parse(autoscaling.Region.AutoScalingEndpoint) - if err != nil { - return err - } - - sign(autoscaling.Auth, "GET", "/", params, endpoint.Host) - endpoint.RawQuery = multimap(params).Encode() - r, err := autoscaling.httpClient.Get(endpoint.String()) - - if err != nil { - return err - } - defer r.Body.Close() - if r.StatusCode > 200 { - return buildError(r) - } - - decoder := xml.NewDecoder(r.Body) - decodedBody := decoder.Decode(resp) - - return decodedBody -} - -func buildError(r *http.Response) error { - var ( - err Error - errors xmlErrors - ) - xml.NewDecoder(r.Body).Decode(&errors) - if len(errors.Errors) > 0 { - err = errors.Errors[0] - } - err.StatusCode = r.StatusCode - if err.Message == "" { - err.Message = r.Status - } - return &err -} - -func multimap(p map[string]string) url.Values { - q := make(url.Values, len(p)) - for k, v := range p { - q[k] = []string{v} - } - return q -} - -func makeParams(action string) map[string]string { - params := make(map[string]string) - params["Action"] = action - return params -} - -func addBlockDeviceParams(prename string, params map[string]string, blockdevices []BlockDeviceMapping) { - for i, k := range blockdevices { - // Fixup index since Amazon counts these from 1 - prefix := prename + "BlockDeviceMappings.member." + strconv.Itoa(i+1) + "." - - if k.DeviceName != "" { - params[prefix+"DeviceName"] = k.DeviceName - } - - if k.VirtualName != "" { - params[prefix+"VirtualName"] = k.VirtualName - } else if k.NoDevice { - params[prefix+"NoDevice"] = "" - } else { - if k.SnapshotId != "" { - params[prefix+"Ebs.SnapshotId"] = k.SnapshotId - } - if k.VolumeType != "" { - params[prefix+"Ebs.VolumeType"] = k.VolumeType - } - if k.IOPS != 0 { - params[prefix+"Ebs.Iops"] = strconv.FormatInt(k.IOPS, 10) - } - if k.VolumeSize != 0 { - params[prefix+"Ebs.VolumeSize"] = strconv.FormatInt(k.VolumeSize, 10) - } - if k.DeleteOnTermination { - params[prefix+"Ebs.DeleteOnTermination"] = "true" - } else { - params[prefix+"Ebs.DeleteOnTermination"] = "false" - } - if k.Encrypted { - params[prefix+"Ebs.Encrypted"] = "true" - } - } - } -} - -// ---------------------------------------------------------------------------- -// AutoScaling objects - -type Tag struct { - Key string `xml:"Key"` - Value string `xml:"Value"` - PropagateAtLaunch bool `xml:"PropagateAtLaunch"` -} - -type LaunchConfiguration struct { - AssociatePublicIpAddress bool `xml:"AssociatePublicIpAddress"` - IamInstanceProfile string `xml:"IamInstanceProfile"` - ImageId string `xml:"ImageId"` - InstanceType string `xml:"InstanceType"` - KernelId string `xml:"KernelId"` - KeyName string `xml:"KeyName"` - SpotPrice string `xml:"SpotPrice"` - Name string `xml:"LaunchConfigurationName"` - SecurityGroups []string `xml:"SecurityGroups>member"` - UserData []byte `xml:"UserData"` - BlockDevices []BlockDeviceMapping `xml:"BlockDeviceMappings>member"` -} - -type Instance struct { - AvailabilityZone string `xml:"AvailabilityZone"` - HealthStatus string `xml:"HealthStatus"` - InstanceId string `xml:"InstanceId"` - LaunchConfigurationName string `xml:"LaunchConfigurationName"` - LifecycleState string `xml:"LifecycleState"` -} - -type AutoScalingGroup struct { - AvailabilityZones []string `xml:"AvailabilityZones>member"` - CreatedTime time.Time `xml:"CreatedTime"` - DefaultCooldown int `xml:"DefaultCooldown"` - DesiredCapacity int `xml:"DesiredCapacity"` - HealthCheckGracePeriod int `xml:"HealthCheckGracePeriod"` - HealthCheckType string `xml:"HealthCheckType"` - InstanceId string `xml:"InstanceId"` - Instances []Instance `xml:"Instances>member"` - LaunchConfigurationName string `xml:"LaunchConfigurationName"` - LoadBalancerNames []string `xml:"LoadBalancerNames>member"` - MaxSize int `xml:"MaxSize"` - MinSize int `xml:"MinSize"` - Name string `xml:"AutoScalingGroupName"` - Status string `xml:"Status"` - Tags []Tag `xml:"Tags>member"` - VPCZoneIdentifier string `xml:"VPCZoneIdentifier"` - TerminationPolicies []string `xml:"TerminationPolicies>member"` -} - -// BlockDeviceMapping represents the association of a block device with an image. -// -// See http://goo.gl/wnDBf for more details. -type BlockDeviceMapping struct { - DeviceName string `xml:"DeviceName"` - VirtualName string `xml:"VirtualName"` - SnapshotId string `xml:"Ebs>SnapshotId"` - VolumeType string `xml:"Ebs>VolumeType"` - VolumeSize int64 `xml:"Ebs>VolumeSize"` - DeleteOnTermination bool `xml:"Ebs>DeleteOnTermination"` - Encrypted bool `xml:"Ebs>Encrypted"` - NoDevice bool `xml:"NoDevice"` - - // The number of I/O operations per second (IOPS) that the volume supports. - IOPS int64 `xml:"ebs>iops"` -} - -// ---------------------------------------------------------------------------- -// Create - -// The CreateAutoScalingGroup request parameters -type CreateAutoScalingGroup struct { - AvailZone []string - DefaultCooldown int - DesiredCapacity int - HealthCheckGracePeriod int - HealthCheckType string - InstanceId string - LaunchConfigurationName string - LoadBalancerNames []string - MaxSize int - MinSize int - PlacementGroup string - TerminationPolicies []string - Name string - Tags []Tag - VPCZoneIdentifier []string - - SetDefaultCooldown bool - SetDesiredCapacity bool - SetHealthCheckGracePeriod bool - SetMaxSize bool - SetMinSize bool -} - -func (autoscaling *AutoScaling) CreateAutoScalingGroup(options *CreateAutoScalingGroup) (resp *SimpleResp, err error) { - params := makeParams("CreateAutoScalingGroup") - - params["AutoScalingGroupName"] = options.Name - - if options.SetDefaultCooldown { - params["DefaultCooldown"] = strconv.Itoa(options.DefaultCooldown) - } - - if options.SetDesiredCapacity { - params["DesiredCapacity"] = strconv.Itoa(options.DesiredCapacity) - } - - if options.SetHealthCheckGracePeriod { - params["HealthCheckGracePeriod"] = strconv.Itoa(options.HealthCheckGracePeriod) - } - - if options.HealthCheckType != "" { - params["HealthCheckType"] = options.HealthCheckType - } - - if options.InstanceId != "" { - params["InstanceId"] = options.InstanceId - } - - if options.LaunchConfigurationName != "" { - params["LaunchConfigurationName"] = options.LaunchConfigurationName - } - - for i, v := range options.AvailZone { - params["AvailabilityZones.member."+strconv.Itoa(i+1)] = v - } - - for i, v := range options.LoadBalancerNames { - params["LoadBalancerNames.member."+strconv.Itoa(i+1)] = v - } - - if options.SetMaxSize { - params["MaxSize"] = strconv.Itoa(options.MaxSize) - } - - if options.SetMinSize { - params["MinSize"] = strconv.Itoa(options.MinSize) - } - - if options.PlacementGroup != "" { - params["PlacementGroup"] = options.PlacementGroup - } - - for j, tag := range options.Tags { - params["Tags.member."+strconv.Itoa(j+1)+".Key"] = tag.Key - params["Tags.member."+strconv.Itoa(j+1)+".Value"] = tag.Value - params["Tags.member."+strconv.Itoa(j+1)+".PropagateAtLaunch"] = strconv.FormatBool(tag.PropagateAtLaunch) - } - - for i, v := range options.TerminationPolicies { - params["TerminationPolicies.member."+strconv.Itoa(i+1)] = v - } - - if options.VPCZoneIdentifier != nil { - params["VPCZoneIdentifier"] = strings.Join(options.VPCZoneIdentifier, ",") - } - - resp = &SimpleResp{} - - err = autoscaling.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// The CreateLaunchConfiguration request parameters -type CreateLaunchConfiguration struct { - AssociatePublicIpAddress bool - IamInstanceProfile string - ImageId string - InstanceId string - InstanceType string - KernelId string - KeyName string - SpotPrice string - Name string - SecurityGroups []string - UserData string - BlockDevices []BlockDeviceMapping -} - -func (autoscaling *AutoScaling) CreateLaunchConfiguration(options *CreateLaunchConfiguration) (resp *SimpleResp, err error) { - params := makeParams("CreateLaunchConfiguration") - - params["LaunchConfigurationName"] = options.Name - - if options.AssociatePublicIpAddress { - params["AssociatePublicIpAddress"] = "true" - } - - if options.IamInstanceProfile != "" { - params["IamInstanceProfile"] = options.IamInstanceProfile - } - - if options.ImageId != "" { - params["ImageId"] = options.ImageId - } - if options.InstanceType != "" { - params["InstanceType"] = options.InstanceType - } - if options.InstanceId != "" { - params["InstanceId"] = options.InstanceId - } - if options.KernelId != "" { - params["KernelId"] = options.KernelId - } - - if options.KeyName != "" { - params["KeyName"] = options.KeyName - } - - if options.SpotPrice != "" { - params["SpotPrice"] = options.SpotPrice - } - - for i, v := range options.SecurityGroups { - params["SecurityGroups.member."+strconv.Itoa(i+1)] = v - } - - if options.UserData != "" { - userData := make([]byte, b64.EncodedLen(len(options.UserData))) - b64.Encode(userData, []byte(options.UserData)) - params["UserData"] = string(userData) - } - addBlockDeviceParams("", params, options.BlockDevices) - - resp = &SimpleResp{} - - err = autoscaling.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// Describe - -// DescribeAutoScalingGroups request params -type DescribeAutoScalingGroups struct { - Names []string -} - -type DescribeAutoScalingGroupsResp struct { - RequestId string `xml:"ResponseMetadata>RequestId"` - AutoScalingGroups []AutoScalingGroup `xml:"DescribeAutoScalingGroupsResult>AutoScalingGroups>member"` -} - -func (autoscaling *AutoScaling) DescribeAutoScalingGroups(options *DescribeAutoScalingGroups) (resp *DescribeAutoScalingGroupsResp, err error) { - params := makeParams("DescribeAutoScalingGroups") - - for i, v := range options.Names { - params["AutoScalingGroupNames.member."+strconv.Itoa(i+1)] = v - } - - resp = &DescribeAutoScalingGroupsResp{} - - err = autoscaling.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// DescribeLaunchConfigurations request params -type DescribeLaunchConfigurations struct { - Names []string -} - -type DescribeLaunchConfigurationsResp struct { - RequestId string `xml:"ResponseMetadata>RequestId"` - LaunchConfigurations []LaunchConfiguration `xml:"DescribeLaunchConfigurationsResult>LaunchConfigurations>member"` -} - -func (autoscaling *AutoScaling) DescribeLaunchConfigurations(options *DescribeLaunchConfigurations) (resp *DescribeLaunchConfigurationsResp, err error) { - params := makeParams("DescribeLaunchConfigurations") - - for i, v := range options.Names { - params["LaunchConfigurationNames.member."+strconv.Itoa(i+1)] = v - } - - resp = &DescribeLaunchConfigurationsResp{} - - err = autoscaling.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// ---------------------------------------------------------------------------- -// Destroy - -// The DeleteLaunchConfiguration request parameters -type DeleteLaunchConfiguration struct { - Name string -} - -func (autoscaling *AutoScaling) DeleteLaunchConfiguration(options *DeleteLaunchConfiguration) (resp *SimpleResp, err error) { - params := makeParams("DeleteLaunchConfiguration") - - params["LaunchConfigurationName"] = options.Name - - resp = &SimpleResp{} - - err = autoscaling.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// The DeleteLaunchConfiguration request parameters -type DeleteAutoScalingGroup struct { - Name string - ForceDelete bool -} - -func (autoscaling *AutoScaling) DeleteAutoScalingGroup(options *DeleteAutoScalingGroup) (resp *SimpleResp, err error) { - params := makeParams("DeleteAutoScalingGroup") - - params["AutoScalingGroupName"] = options.Name - params["ForceDelete"] = strconv.FormatBool(options.ForceDelete) - - resp = &SimpleResp{} - - err = autoscaling.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// ---------------------------------------------------------------------------- -// Destroy -// The UpdateAutoScalingGroup request parameters -type UpdateAutoScalingGroup struct { - AvailZone []string - DefaultCooldown int - DesiredCapacity int - HealthCheckGracePeriod int - HealthCheckType string - LaunchConfigurationName string - MaxSize int - MinSize int - PlacementGroup string - TerminationPolicies []string - Name string - VPCZoneIdentifier []string - - SetDefaultCooldown bool - SetDesiredCapacity bool - SetHealthCheckGracePeriod bool - SetMaxSize bool - SetMinSize bool -} - -func (autoscaling *AutoScaling) UpdateAutoScalingGroup(options *UpdateAutoScalingGroup) (resp *SimpleResp, err error) { - params := makeParams("UpdateAutoScalingGroup") - - if options.Name != "" { - params["AutoScalingGroupName"] = options.Name - } - - if options.SetDefaultCooldown { - params["DefaultCooldown"] = strconv.Itoa(options.DefaultCooldown) - } - - if options.SetDesiredCapacity { - params["DesiredCapacity"] = strconv.Itoa(options.DesiredCapacity) - } - - if options.SetHealthCheckGracePeriod { - params["HealthCheckGracePeriod"] = strconv.Itoa(options.HealthCheckGracePeriod) - } - - if options.HealthCheckType != "" { - params["HealthCheckType"] = options.HealthCheckType - } - - if options.LaunchConfigurationName != "" { - params["LaunchConfigurationName"] = options.LaunchConfigurationName - } - - for i, v := range options.AvailZone { - params["AvailabilityZones.member."+strconv.Itoa(i+1)] = v - } - - if options.SetMaxSize { - params["MaxSize"] = strconv.Itoa(options.MaxSize) - } - - if options.SetMinSize { - params["MinSize"] = strconv.Itoa(options.MinSize) - } - - if options.PlacementGroup != "" { - params["PlacementGroup"] = options.PlacementGroup - } - for i, v := range options.TerminationPolicies { - params["TerminationPolicies.member."+strconv.Itoa(i+1)] = v - } - - if options.VPCZoneIdentifier != nil { - params["VPCZoneIdentifier"] = strings.Join(options.VPCZoneIdentifier, ",") - } - - resp = &SimpleResp{} - - err = autoscaling.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// Responses - -type SimpleResp struct { - RequestId string `xml:"ResponseMetadata>RequestId"` -} - -type xmlErrors struct { - Errors []Error `xml:"Error"` -} - -// Error encapsulates an autoscaling error. -type Error struct { - // HTTP status code of the error. - StatusCode int - - // AWS code of the error. - Code string - - // Message explaining the error. - Message string -} - -func (e *Error) Error() string { - var prefix string - if e.Code != "" { - prefix = e.Code + ": " - } - if prefix == "" && e.StatusCode > 0 { - prefix = strconv.Itoa(e.StatusCode) + ": " - } - return prefix + e.Message -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/autoscaling/autoscaling_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/autoscaling/autoscaling_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/autoscaling/autoscaling_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/autoscaling/autoscaling_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,194 +0,0 @@ -package autoscaling_test - -import ( - "testing" - - "github.com/mitchellh/goamz/autoscaling" - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/testutil" - . "github.com/motain/gocheck" -) - -func Test(t *testing.T) { - TestingT(t) -} - -type S struct { - autoscaling *autoscaling.AutoScaling -} - -var _ = Suite(&S{}) - -var testServer = testutil.NewHTTPServer() - -func (s *S) SetUpSuite(c *C) { - testServer.Start() - auth := aws.Auth{"abc", "123", ""} - s.autoscaling = autoscaling.NewWithClient(auth, aws.Region{AutoScalingEndpoint: testServer.URL}, testutil.DefaultClient) -} - -func (s *S) TearDownTest(c *C) { - testServer.Flush() -} - -func (s *S) Test_CreateAutoScalingGroup(c *C) { - testServer.Response(200, nil, CreateAutoScalingGroupExample) - - options := autoscaling.CreateAutoScalingGroup{ - AvailZone: []string{"us-east-1a"}, - DefaultCooldown: 30, - DesiredCapacity: 2, - HealthCheckGracePeriod: 30, - HealthCheckType: "elb", - InstanceId: "i-foo", - LaunchConfigurationName: "foobar", - MinSize: 2, - MaxSize: 2, - PlacementGroup: "foobar", - TerminationPolicies: []string{"ClosestToNextInstanceHour", "OldestInstance"}, - Name: "foobar", - Tags: []autoscaling.Tag{ - autoscaling.Tag{ - Key: "foo", - Value: "bar", - }, - }, - VPCZoneIdentifier: []string{"foo", "bar"}, - } - - resp, err := s.autoscaling.CreateAutoScalingGroup(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"CreateAutoScalingGroup"}) - c.Assert(req.Form["InstanceId"], DeepEquals, []string{"i-foo"}) - c.Assert(req.Form["VPCZoneIdentifier"], DeepEquals, []string{"foo,bar"}) - c.Assert(req.Form["TerminationPolicies.member.1"], DeepEquals, []string{"ClosestToNextInstanceHour"}) - c.Assert(req.Form["TerminationPolicies.member.2"], DeepEquals, []string{"OldestInstance"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "8d798a29-f083-11e1-bdfb-cb223EXAMPLE") -} - -func (s *S) Test_CreateLaunchConfiguration(c *C) { - testServer.Response(200, nil, CreateLaunchConfigurationExample) - - options := autoscaling.CreateLaunchConfiguration{ - SecurityGroups: []string{"sg-1111"}, - ImageId: "i-141421", - InstanceId: "i-141421", - InstanceType: "m1.small", - KeyName: "foobar", - Name: "i-141421", - UserData: "#!/bin/bash\necho Hello\n", - BlockDevices: []autoscaling.BlockDeviceMapping{ - {DeviceName: "/dev/sdb", VirtualName: "ephemeral0"}, - {DeviceName: "/dev/sdc", SnapshotId: "snap-a08912c9", DeleteOnTermination: true}, - }, - } - - resp, err := s.autoscaling.CreateLaunchConfiguration(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"CreateLaunchConfiguration"}) - c.Assert(req.Form["InstanceType"], DeepEquals, []string{"m1.small"}) - c.Assert(req.Form["SecurityGroups.member.1"], DeepEquals, []string{"sg-1111"}) - c.Assert(req.Form["UserData"], DeepEquals, []string{"IyEvYmluL2Jhc2gKZWNobyBIZWxsbwo="}) - c.Assert(req.Form["BlockDeviceMappings.member.1.DeviceName"], DeepEquals, []string{"/dev/sdb"}) - c.Assert(req.Form["BlockDeviceMappings.member.1.VirtualName"], DeepEquals, []string{"ephemeral0"}) - c.Assert(req.Form["BlockDeviceMappings.member.2.Ebs.SnapshotId"], DeepEquals, []string{"snap-a08912c9"}) - c.Assert(req.Form["BlockDeviceMappings.member.2.Ebs.DeleteOnTermination"], DeepEquals, []string{"true"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "7c6e177f-f082-11e1-ac58-3714bEXAMPLE") -} - -func (s *S) Test_DescribeAutoScalingGroups(c *C) { - testServer.Response(200, nil, DescribeAutoScalingGroupsExample) - - options := autoscaling.DescribeAutoScalingGroups{ - Names: []string{"foobar"}, - } - - resp, err := s.autoscaling.DescribeAutoScalingGroups(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeAutoScalingGroups"}) - c.Assert(req.Form["AutoScalingGroupNames.member.1"], DeepEquals, []string{"foobar"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "0f02a07d-b677-11e2-9eb0-dd50EXAMPLE") - c.Assert(resp.AutoScalingGroups[0].Name, Equals, "my-test-asg-lbs") - c.Assert(resp.AutoScalingGroups[0].LaunchConfigurationName, Equals, "my-test-lc") - c.Assert(resp.AutoScalingGroups[0].TerminationPolicies[0], Equals, "Default") -} - -func (s *S) Test_DescribeLaunchConfigurations(c *C) { - testServer.Response(200, nil, DescribeLaunchConfigurationsExample) - - options := autoscaling.DescribeLaunchConfigurations{ - Names: []string{"foobar"}, - } - - resp, err := s.autoscaling.DescribeLaunchConfigurations(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeLaunchConfigurations"}) - c.Assert(req.Form["LaunchConfigurationNames.member.1"], DeepEquals, []string{"foobar"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "d05a22f8-b690-11e2-bf8e-2113fEXAMPLE") - c.Assert(resp.LaunchConfigurations[0].InstanceType, Equals, "m1.small") -} - -func (s *S) TestDeleteAutoScalingGroup(c *C) { - testServer.Response(200, nil, DeleteAutoScalingGroupExample) - - options := autoscaling.DeleteAutoScalingGroup{ - Name: "foobar", - ForceDelete: true, - } - - resp, err := s.autoscaling.DeleteAutoScalingGroup(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"DeleteAutoScalingGroup"}) - c.Assert(req.Form["AutoScalingGroupName"], DeepEquals, []string{"foobar"}) - c.Assert(req.Form["ForceDelete"], DeepEquals, []string{"true"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "70a76d42-9665-11e2-9fdf-211deEXAMPLE") -} - -func (s *S) TestDeleteLaunchConfiguration(c *C) { - testServer.Response(200, nil, DeleteLaunchConfigurationExample) - - options := autoscaling.DeleteLaunchConfiguration{ - Name: "foobar", - } - - resp, err := s.autoscaling.DeleteLaunchConfiguration(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"DeleteLaunchConfiguration"}) - c.Assert(req.Form["LaunchConfigurationName"], DeepEquals, []string{"foobar"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "7347261f-97df-11e2-8756-35eEXAMPLE") -} - -func (s *S) Test_UpdateAutoScalingGroup(c *C) { - testServer.Response(200, nil, UpdateAutoScalingGroupExample) - - options := autoscaling.UpdateAutoScalingGroup{ - AvailZone: []string{"us-east-1a"}, - DefaultCooldown: 30, - Name: "bar", - - SetDefaultCooldown: true, - } - - resp, err := s.autoscaling.UpdateAutoScalingGroup(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"UpdateAutoScalingGroup"}) - c.Assert(req.Form["AutoScalingGroupName"], DeepEquals, []string{"bar"}) - c.Assert(req.Form["DefaultCooldown"], DeepEquals, []string{"30"}) - c.Assert(req.Form["MinSize"], IsNil) - c.Assert(req.Form["MaxSize"], IsNil) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "adafead0-ab8a-11e2-ba13-ab0ccEXAMPLE") -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/autoscaling/responses_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/autoscaling/responses_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/autoscaling/responses_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/autoscaling/responses_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,117 +0,0 @@ -package autoscaling_test - -var ErrorDump = ` - -UnsupportedOperation - -0503f4e9-bbd6-483c-b54f-c4ae9f3b30f4 -` - -// http://goo.gl/gQRD2H -var CreateAutoScalingGroupExample = ` - - -8d798a29-f083-11e1-bdfb-cb223EXAMPLE - - -` - -var CreateLaunchConfigurationExample = ` - - - 7c6e177f-f082-11e1-ac58-3714bEXAMPLE - - -` - -var DescribeLaunchConfigurationsExample = ` - - - - - true - - dedicated - 2013-01-21T23:04:42.200Z - - my-test-lc - - m1.small - arn:aws:autoscaling:us-east-1:803981987763:launchConfiguration: - 9dbbbf87-6141-428a-a409-0752edbe6cad:launchConfigurationName/my-test-lc - - ami-514ac838 - - - - true - - false - - - - - d05a22f8-b690-11e2-bf8e-2113fEXAMPLE - - -` - -var DescribeAutoScalingGroupsExample = ` - - - - - - - my-test-asg-lbs - ELB - 2013-05-06T17:47:15.107Z - - my-test-lc - - 2 - - us-east-1b - us-east-1a - - - my-test-asg-loadbalancer - - 2 - - 120 - 300 - arn:aws:autoscaling:us-east-1:803981987763:autoScalingGroup:ca861182-c8f9-4ca7-b1eb-cd35505f5ebb - :autoScalingGroupName/my-test-asg-lbs - - Default - - 10 - - - - - 0f02a07d-b677-11e2-9eb0-dd50EXAMPLE - - -` - -var DeleteLaunchConfigurationExample = ` - - 7347261f-97df-11e2-8756-35eEXAMPLE - -` - -var DeleteAutoScalingGroupExample = ` - - 70a76d42-9665-11e2-9fdf-211deEXAMPLE - -` - -var UpdateAutoScalingGroupExample = ` - - - adafead0-ab8a-11e2-ba13-ab0ccEXAMPLE - - -` diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/autoscaling/sign.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/autoscaling/sign.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/autoscaling/sign.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/autoscaling/sign.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,38 +0,0 @@ -package autoscaling - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "github.com/mitchellh/goamz/aws" - "sort" - "strings" -) - -// ---------------------------------------------------------------------------- -// Version 2 signing (http://goo.gl/RSRp5) - -var b64 = base64.StdEncoding - -func sign(auth aws.Auth, method, path string, params map[string]string, host string) { - params["AWSAccessKeyId"] = auth.AccessKey - params["SignatureVersion"] = "2" - params["SignatureMethod"] = "HmacSHA256" - if auth.Token != "" { - params["SecurityToken"] = auth.Token - } - - var sarray []string - for k, v := range params { - sarray = append(sarray, aws.Encode(k)+"="+aws.Encode(v)) - } - sort.StringSlice(sarray).Sort() - joined := strings.Join(sarray, "&") - payload := method + "\n" + host + "\n" + path + "\n" + joined - hash := hmac.New(sha256.New, []byte(auth.SecretKey)) - hash.Write([]byte(payload)) - signature := make([]byte, b64.EncodedLen(hash.Size())) - b64.Encode(signature, hash.Sum(nil)) - - params["Signature"] = string(signature) -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/aws/attempt.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/aws/attempt.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/aws/attempt.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/aws/attempt.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,74 +0,0 @@ -package aws - -import ( - "time" -) - -// AttemptStrategy represents a strategy for waiting for an action -// to complete successfully. This is an internal type used by the -// implementation of other goamz packages. -type AttemptStrategy struct { - Total time.Duration // total duration of attempt. - Delay time.Duration // interval between each try in the burst. - Min int // minimum number of retries; overrides Total -} - -type Attempt struct { - strategy AttemptStrategy - last time.Time - end time.Time - force bool - count int -} - -// Start begins a new sequence of attempts for the given strategy. -func (s AttemptStrategy) Start() *Attempt { - now := time.Now() - return &Attempt{ - strategy: s, - last: now, - end: now.Add(s.Total), - force: true, - } -} - -// Next waits until it is time to perform the next attempt or returns -// false if it is time to stop trying. -func (a *Attempt) Next() bool { - now := time.Now() - sleep := a.nextSleep(now) - if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count { - return false - } - a.force = false - if sleep > 0 && a.count > 0 { - time.Sleep(sleep) - now = time.Now() - } - a.count++ - a.last = now - return true -} - -func (a *Attempt) nextSleep(now time.Time) time.Duration { - sleep := a.strategy.Delay - now.Sub(a.last) - if sleep < 0 { - return 0 - } - return sleep -} - -// HasNext returns whether another attempt will be made if the current -// one fails. If it returns true, the following call to Next is -// guaranteed to return true. -func (a *Attempt) HasNext() bool { - if a.force || a.strategy.Min > a.count { - return true - } - now := time.Now() - if now.Add(a.nextSleep(now)).Before(a.end) { - a.force = true - return true - } - return false -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/aws/attempt_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/aws/attempt_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/aws/attempt_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/aws/attempt_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,57 +0,0 @@ -package aws_test - -import ( - "github.com/mitchellh/goamz/aws" - . "github.com/motain/gocheck" - "time" -) - -func (S) TestAttemptTiming(c *C) { - testAttempt := aws.AttemptStrategy{ - Total: 0.25e9, - Delay: 0.1e9, - } - want := []time.Duration{0, 0.1e9, 0.2e9, 0.2e9} - got := make([]time.Duration, 0, len(want)) // avoid allocation when testing timing - t0 := time.Now() - for a := testAttempt.Start(); a.Next(); { - got = append(got, time.Now().Sub(t0)) - } - got = append(got, time.Now().Sub(t0)) - c.Assert(got, HasLen, len(want)) - const margin = 0.01e9 - for i, got := range want { - lo := want[i] - margin - hi := want[i] + margin - if got < lo || got > hi { - c.Errorf("attempt %d want %g got %g", i, want[i].Seconds(), got.Seconds()) - } - } -} - -func (S) TestAttemptNextHasNext(c *C) { - a := aws.AttemptStrategy{}.Start() - c.Assert(a.Next(), Equals, true) - c.Assert(a.Next(), Equals, false) - - a = aws.AttemptStrategy{}.Start() - c.Assert(a.Next(), Equals, true) - c.Assert(a.HasNext(), Equals, false) - c.Assert(a.Next(), Equals, false) - - a = aws.AttemptStrategy{Total: 2e8}.Start() - c.Assert(a.Next(), Equals, true) - c.Assert(a.HasNext(), Equals, true) - time.Sleep(2e8) - c.Assert(a.HasNext(), Equals, true) - c.Assert(a.Next(), Equals, true) - c.Assert(a.Next(), Equals, false) - - a = aws.AttemptStrategy{Total: 1e8, Min: 2}.Start() - time.Sleep(1e8) - c.Assert(a.Next(), Equals, true) - c.Assert(a.HasNext(), Equals, true) - c.Assert(a.Next(), Equals, true) - c.Assert(a.HasNext(), Equals, false) - c.Assert(a.Next(), Equals, false) -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/aws/aws.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/aws/aws.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/aws/aws.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/aws/aws.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,445 +0,0 @@ -// -// goamz - Go packages to interact with the Amazon Web Services. -// -// https://wiki.ubuntu.com/goamz -// -// Copyright (c) 2011 Canonical Ltd. -// -// Written by Gustavo Niemeyer -// -package aws - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "os" - - "github.com/vaughan0/go-ini" -) - -// Region defines the URLs where AWS services may be accessed. -// -// See http://goo.gl/d8BP1 for more details. -type Region struct { - Name string // the canonical name of this region. - EC2Endpoint string - S3Endpoint string - S3BucketEndpoint string // Not needed by AWS S3. Use ${bucket} for bucket name. - S3LocationConstraint bool // true if this region requires a LocationConstraint declaration. - S3LowercaseBucket bool // true if the region requires bucket names to be lower case. - SDBEndpoint string - SNSEndpoint string - SQSEndpoint string - IAMEndpoint string - ELBEndpoint string - AutoScalingEndpoint string - RdsEndpoint string - Route53Endpoint string -} - -var USGovWest = Region{ - "us-gov-west-1", - "https://ec2.us-gov-west-1.amazonaws.com", - "https://s3-fips-us-gov-west-1.amazonaws.com", - "", - true, - true, - "", - "https://sns.us-gov-west-1.amazonaws.com", - "https://sqs.us-gov-west-1.amazonaws.com", - "https://iam.us-gov.amazonaws.com", - "https://elasticloadbalancing.us-gov-west-1.amazonaws.com", - "https://autoscaling.us-gov-west-1.amazonaws.com", - "https://rds.us-gov-west-1.amazonaws.com", - "https://route53.amazonaws.com", -} - -var USEast = Region{ - "us-east-1", - "https://ec2.us-east-1.amazonaws.com", - "https://s3.amazonaws.com", - "", - false, - false, - "https://sdb.amazonaws.com", - "https://sns.us-east-1.amazonaws.com", - "https://sqs.us-east-1.amazonaws.com", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.us-east-1.amazonaws.com", - "https://autoscaling.us-east-1.amazonaws.com", - "https://rds.us-east-1.amazonaws.com", - "https://route53.amazonaws.com", -} - -var USWest = Region{ - "us-west-1", - "https://ec2.us-west-1.amazonaws.com", - "https://s3-us-west-1.amazonaws.com", - "", - true, - true, - "https://sdb.us-west-1.amazonaws.com", - "https://sns.us-west-1.amazonaws.com", - "https://sqs.us-west-1.amazonaws.com", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.us-west-1.amazonaws.com", - "https://autoscaling.us-west-1.amazonaws.com", - "https://rds.us-west-1.amazonaws.com", - "https://route53.amazonaws.com", -} - -var USWest2 = Region{ - "us-west-2", - "https://ec2.us-west-2.amazonaws.com", - "https://s3-us-west-2.amazonaws.com", - "", - true, - true, - "https://sdb.us-west-2.amazonaws.com", - "https://sns.us-west-2.amazonaws.com", - "https://sqs.us-west-2.amazonaws.com", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.us-west-2.amazonaws.com", - "https://autoscaling.us-west-2.amazonaws.com", - "https://rds.us-west-2.amazonaws.com", - "https://route53.amazonaws.com", -} - -var EUWest = Region{ - "eu-west-1", - "https://ec2.eu-west-1.amazonaws.com", - "https://s3-eu-west-1.amazonaws.com", - "", - true, - true, - "https://sdb.eu-west-1.amazonaws.com", - "https://sns.eu-west-1.amazonaws.com", - "https://sqs.eu-west-1.amazonaws.com", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.eu-west-1.amazonaws.com", - "https://autoscaling.eu-west-1.amazonaws.com", - "https://rds.eu-west-1.amazonaws.com", - "https://route53.amazonaws.com", -} - -var EUCentral = Region{ - "eu-central-1", - "https://ec2.eu-central-1.amazonaws.com", - "https://s3-eu-central-1.amazonaws.com", - "", - true, - true, - "", - "https://sns.eu-central-1.amazonaws.com", - "https://sqs.eu-central-1.amazonaws.com", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.eu-central-1.amazonaws.com", - "https://autoscaling.eu-central-1.amazonaws.com", - "https://rds.eu-central-1.amazonaws.com", - "https://route53.amazonaws.com", -} - -var APSoutheast = Region{ - "ap-southeast-1", - "https://ec2.ap-southeast-1.amazonaws.com", - "https://s3-ap-southeast-1.amazonaws.com", - "", - true, - true, - "https://sdb.ap-southeast-1.amazonaws.com", - "https://sns.ap-southeast-1.amazonaws.com", - "https://sqs.ap-southeast-1.amazonaws.com", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.ap-southeast-1.amazonaws.com", - "https://autoscaling.ap-southeast-1.amazonaws.com", - "https://rds.ap-southeast-1.amazonaws.com", - "https://route53.amazonaws.com", -} - -var APSoutheast2 = Region{ - "ap-southeast-2", - "https://ec2.ap-southeast-2.amazonaws.com", - "https://s3-ap-southeast-2.amazonaws.com", - "", - true, - true, - "https://sdb.ap-southeast-2.amazonaws.com", - "https://sns.ap-southeast-2.amazonaws.com", - "https://sqs.ap-southeast-2.amazonaws.com", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.ap-southeast-2.amazonaws.com", - "https://autoscaling.ap-southeast-2.amazonaws.com", - "https://rds.ap-southeast-2.amazonaws.com", - "https://route53.amazonaws.com", -} - -var APNortheast = Region{ - "ap-northeast-1", - "https://ec2.ap-northeast-1.amazonaws.com", - "https://s3-ap-northeast-1.amazonaws.com", - "", - true, - true, - "https://sdb.ap-northeast-1.amazonaws.com", - "https://sns.ap-northeast-1.amazonaws.com", - "https://sqs.ap-northeast-1.amazonaws.com", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.ap-northeast-1.amazonaws.com", - "https://autoscaling.ap-northeast-1.amazonaws.com", - "https://rds.ap-northeast-1.amazonaws.com", - "https://route53.amazonaws.com", -} - -var SAEast = Region{ - "sa-east-1", - "https://ec2.sa-east-1.amazonaws.com", - "https://s3-sa-east-1.amazonaws.com", - "", - true, - true, - "https://sdb.sa-east-1.amazonaws.com", - "https://sns.sa-east-1.amazonaws.com", - "https://sqs.sa-east-1.amazonaws.com", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.sa-east-1.amazonaws.com", - "https://autoscaling.sa-east-1.amazonaws.com", - "https://rds.sa-east-1.amazonaws.com", - "https://route53.amazonaws.com", -} - -var CNNorth = Region{ - "cn-north-1", - "https://ec2.cn-north-1.amazonaws.com.cn", - "https://s3.cn-north-1.amazonaws.com.cn", - "", - true, - true, - "", - "https://sns.cn-north-1.amazonaws.com.cn", - "https://sqs.cn-north-1.amazonaws.com.cn", - "https://iam.cn-north-1.amazonaws.com.cn", - "https://elasticloadbalancing.cn-north-1.amazonaws.com.cn", - "https://autoscaling.cn-north-1.amazonaws.com.cn", - "https://rds.cn-north-1.amazonaws.com.cn", - "https://route53.amazonaws.com", -} - -var Regions = map[string]Region{ - APNortheast.Name: APNortheast, - APSoutheast.Name: APSoutheast, - APSoutheast2.Name: APSoutheast2, - EUWest.Name: EUWest, - EUCentral.Name: EUCentral, - USEast.Name: USEast, - USWest.Name: USWest, - USWest2.Name: USWest2, - SAEast.Name: SAEast, - USGovWest.Name: USGovWest, - CNNorth.Name: CNNorth, -} - -type Auth struct { - AccessKey, SecretKey, Token string -} - -var unreserved = make([]bool, 128) -var hex = "0123456789ABCDEF" - -func init() { - // RFC3986 - u := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567890-_.~" - for _, c := range u { - unreserved[c] = true - } -} - -type credentials struct { - Code string - LastUpdated string - Type string - AccessKeyId string - SecretAccessKey string - Token string - Expiration string -} - -// GetMetaData retrieves instance metadata about the current machine. -// -// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html for more details. -func GetMetaData(path string) (contents []byte, err error) { - url := "http://169.254.169.254/latest/meta-data/" + path - - resp, err := RetryingClient.Get(url) - if err != nil { - return - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - err = fmt.Errorf("Code %d returned for url %s", resp.StatusCode, url) - return - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return - } - return []byte(body), err -} - -func getInstanceCredentials() (cred credentials, err error) { - credentialPath := "iam/security-credentials/" - - // Get the instance role - role, err := GetMetaData(credentialPath) - if err != nil { - return - } - - // Get the instance role credentials - credentialJSON, err := GetMetaData(credentialPath + string(role)) - if err != nil { - return - } - - err = json.Unmarshal([]byte(credentialJSON), &cred) - return -} - -// GetAuth creates an Auth based on either passed in credentials, -// environment information or instance based role credentials. -func GetAuth(accessKey string, secretKey string) (auth Auth, err error) { - // First try passed in credentials - if accessKey != "" && secretKey != "" { - return Auth{accessKey, secretKey, ""}, nil - } - - // Next try to get auth from the environment - auth, err = SharedAuth() - if err == nil { - // Found auth, return - return - } - - // Next try to get auth from the environment - auth, err = EnvAuth() - if err == nil { - // Found auth, return - return - } - - // Next try getting auth from the instance role - cred, err := getInstanceCredentials() - if err == nil { - // Found auth, return - auth.AccessKey = cred.AccessKeyId - auth.SecretKey = cred.SecretAccessKey - auth.Token = cred.Token - return - } - err = errors.New("No valid AWS authentication found") - return -} - -// SharedAuth creates an Auth based on shared credentials stored in -// $HOME/.aws/credentials. The AWS_PROFILE environment variables is used to -// select the profile. -func SharedAuth() (auth Auth, err error) { - var profileName = os.Getenv("AWS_PROFILE") - - if profileName == "" { - profileName = "default" - } - - var credentialsFile = os.Getenv("AWS_CREDENTIAL_FILE") - if credentialsFile == "" { - var homeDir = os.Getenv("HOME") - if homeDir == "" { - err = errors.New("Could not get HOME") - return - } - credentialsFile = homeDir + "/.aws/credentials" - } - - file, err := ini.LoadFile(credentialsFile) - if err != nil { - err = errors.New("Couldn't parse AWS credentials file") - return - } - - var profile = file[profileName] - if profile == nil { - err = errors.New("Couldn't find profile in AWS credentials file") - return - } - - auth.AccessKey = profile["aws_access_key_id"] - auth.SecretKey = profile["aws_secret_access_key"] - - if auth.AccessKey == "" { - err = errors.New("AWS_ACCESS_KEY_ID not found in environment in credentials file") - } - if auth.SecretKey == "" { - err = errors.New("AWS_SECRET_ACCESS_KEY not found in credentials file") - } - return -} - -// EnvAuth creates an Auth based on environment information. -// The AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment -// For accounts that require a security token, it is read from AWS_SECURITY_TOKEN -// variables are used. -func EnvAuth() (auth Auth, err error) { - auth.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID") - if auth.AccessKey == "" { - auth.AccessKey = os.Getenv("AWS_ACCESS_KEY") - } - - auth.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY") - if auth.SecretKey == "" { - auth.SecretKey = os.Getenv("AWS_SECRET_KEY") - } - - auth.Token = os.Getenv("AWS_SECURITY_TOKEN") - - if auth.AccessKey == "" { - err = errors.New("AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment") - } - if auth.SecretKey == "" { - err = errors.New("AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment") - } - return -} - -// Encode takes a string and URI-encodes it in a way suitable -// to be used in AWS signatures. -func Encode(s string) string { - encode := false - for i := 0; i != len(s); i++ { - c := s[i] - if c > 127 || !unreserved[c] { - encode = true - break - } - } - if !encode { - return s - } - e := make([]byte, len(s)*3) - ei := 0 - for i := 0; i != len(s); i++ { - c := s[i] - if c > 127 || !unreserved[c] { - e[ei] = '%' - e[ei+1] = hex[c>>4] - e[ei+2] = hex[c&0xF] - ei += 3 - } else { - e[ei] = c - ei += 1 - } - } - return string(e[:ei]) -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/aws/aws_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/aws/aws_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/aws/aws_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/aws/aws_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,203 +0,0 @@ -package aws_test - -import ( - "github.com/mitchellh/goamz/aws" - . "github.com/motain/gocheck" - "io/ioutil" - "os" - "strings" - "testing" -) - -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&S{}) - -type S struct { - environ []string -} - -func (s *S) SetUpSuite(c *C) { - s.environ = os.Environ() -} - -func (s *S) TearDownTest(c *C) { - os.Clearenv() - for _, kv := range s.environ { - l := strings.SplitN(kv, "=", 2) - os.Setenv(l[0], l[1]) - } -} - -func (s *S) TestSharedAuthNoHome(c *C) { - os.Clearenv() - os.Setenv("AWS_PROFILE", "foo") - _, err := aws.SharedAuth() - c.Assert(err, ErrorMatches, "Could not get HOME") -} - -func (s *S) TestSharedAuthNoCredentialsFile(c *C) { - os.Clearenv() - os.Setenv("AWS_PROFILE", "foo") - os.Setenv("HOME", "/tmp") - _, err := aws.SharedAuth() - c.Assert(err, ErrorMatches, "Couldn't parse AWS credentials file") -} - -func (s *S) TestSharedAuthNoProfileInFile(c *C) { - os.Clearenv() - os.Setenv("AWS_PROFILE", "foo") - - d, err := ioutil.TempDir("", "") - if err != nil { - panic(err) - } - defer os.RemoveAll(d) - - err = os.Mkdir(d+"/.aws", 0755) - if err != nil { - panic(err) - } - - ioutil.WriteFile(d+"/.aws/credentials", []byte("[bar]\n"), 0644) - os.Setenv("HOME", d) - - _, err = aws.SharedAuth() - c.Assert(err, ErrorMatches, "Couldn't find profile in AWS credentials file") -} - -func (s *S) TestSharedAuthNoKeysInProfile(c *C) { - os.Clearenv() - os.Setenv("AWS_PROFILE", "bar") - - d, err := ioutil.TempDir("", "") - if err != nil { - panic(err) - } - defer os.RemoveAll(d) - - err = os.Mkdir(d+"/.aws", 0755) - if err != nil { - panic(err) - } - - ioutil.WriteFile(d+"/.aws/credentials", []byte("[bar]\nawsaccesskeyid = AK.."), 0644) - os.Setenv("HOME", d) - - _, err = aws.SharedAuth() - c.Assert(err, ErrorMatches, "AWS_SECRET_ACCESS_KEY not found in credentials file") -} - -func (s *S) TestSharedAuthDefaultCredentials(c *C) { - os.Clearenv() - - d, err := ioutil.TempDir("", "") - if err != nil { - panic(err) - } - defer os.RemoveAll(d) - - err = os.Mkdir(d+"/.aws", 0755) - if err != nil { - panic(err) - } - - ioutil.WriteFile(d+"/.aws/credentials", []byte("[default]\naws_access_key_id = access\naws_secret_access_key = secret\n"), 0644) - os.Setenv("HOME", d) - - auth, err := aws.SharedAuth() - c.Assert(err, IsNil) - c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"}) -} - -func (s *S) TestSharedAuth(c *C) { - os.Clearenv() - os.Setenv("AWS_PROFILE", "bar") - - d, err := ioutil.TempDir("", "") - if err != nil { - panic(err) - } - defer os.RemoveAll(d) - - err = os.Mkdir(d+"/.aws", 0755) - if err != nil { - panic(err) - } - - ioutil.WriteFile(d+"/.aws/credentials", []byte("[bar]\naws_access_key_id = access\naws_secret_access_key = secret\n"), 0644) - os.Setenv("HOME", d) - - auth, err := aws.SharedAuth() - c.Assert(err, IsNil) - c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"}) -} - -func (s *S) TestEnvAuthNoSecret(c *C) { - os.Clearenv() - _, err := aws.EnvAuth() - c.Assert(err, ErrorMatches, "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment") -} - -func (s *S) TestEnvAuthNoAccess(c *C) { - os.Clearenv() - os.Setenv("AWS_SECRET_ACCESS_KEY", "foo") - _, err := aws.EnvAuth() - c.Assert(err, ErrorMatches, "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment") -} - -func (s *S) TestEnvAuth(c *C) { - os.Clearenv() - os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") - os.Setenv("AWS_ACCESS_KEY_ID", "access") - auth, err := aws.EnvAuth() - c.Assert(err, IsNil) - c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"}) -} - -func (s *S) TestEnvAuthWithToken(c *C) { - os.Clearenv() - os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") - os.Setenv("AWS_ACCESS_KEY_ID", "access") - os.Setenv("AWS_SECURITY_TOKEN", "token") - auth, err := aws.EnvAuth() - c.Assert(err, IsNil) - c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access", Token: "token"}) -} - -func (s *S) TestEnvAuthAlt(c *C) { - os.Clearenv() - os.Setenv("AWS_SECRET_KEY", "secret") - os.Setenv("AWS_ACCESS_KEY", "access") - auth, err := aws.EnvAuth() - c.Assert(err, IsNil) - c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"}) -} - -func (s *S) TestGetAuthStatic(c *C) { - auth, err := aws.GetAuth("access", "secret") - c.Assert(err, IsNil) - c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"}) -} - -func (s *S) TestGetAuthEnv(c *C) { - os.Clearenv() - os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") - os.Setenv("AWS_ACCESS_KEY_ID", "access") - auth, err := aws.GetAuth("", "") - c.Assert(err, IsNil) - c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"}) -} - -func (s *S) TestEncode(c *C) { - c.Assert(aws.Encode("foo"), Equals, "foo") - c.Assert(aws.Encode("/"), Equals, "%2F") -} - -func (s *S) TestRegionsAreNamed(c *C) { - for n, r := range aws.Regions { - c.Assert(n, Equals, r.Name) - } -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/aws/client.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/aws/client.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/aws/client.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/aws/client.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,125 +0,0 @@ -package aws - -import ( - "math" - "net" - "net/http" - "time" -) - -type RetryableFunc func(*http.Request, *http.Response, error) bool -type WaitFunc func(try int) -type DeadlineFunc func() time.Time - -type ResilientTransport struct { - // Timeout is the maximum amount of time a dial will wait for - // a connect to complete. - // - // The default is no timeout. - // - // With or without a timeout, the operating system may impose - // its own earlier timeout. For instance, TCP timeouts are - // often around 3 minutes. - DialTimeout time.Duration - - // MaxTries, if non-zero, specifies the number of times we will retry on - // failure. Retries are only attempted for temporary network errors or known - // safe failures. - MaxTries int - Deadline DeadlineFunc - ShouldRetry RetryableFunc - Wait WaitFunc - transport *http.Transport -} - -// Convenience method for creating an http client -func NewClient(rt *ResilientTransport) *http.Client { - rt.transport = &http.Transport{ - Dial: func(netw, addr string) (net.Conn, error) { - c, err := net.DialTimeout(netw, addr, rt.DialTimeout) - if err != nil { - return nil, err - } - c.SetDeadline(rt.Deadline()) - return c, nil - }, - DisableKeepAlives: true, - Proxy: http.ProxyFromEnvironment, - } - // TODO: Would be nice is ResilientTransport allowed clients to initialize - // with http.Transport attributes. - return &http.Client{ - Transport: rt, - } -} - -var retryingTransport = &ResilientTransport{ - Deadline: func() time.Time { - return time.Now().Add(5 * time.Second) - }, - DialTimeout: 10 * time.Second, - MaxTries: 3, - ShouldRetry: awsRetry, - Wait: ExpBackoff, -} - -// Exported default client -var RetryingClient = NewClient(retryingTransport) - -func (t *ResilientTransport) RoundTrip(req *http.Request) (*http.Response, error) { - return t.tries(req) -} - -// Retry a request a maximum of t.MaxTries times. -// We'll only retry if the proper criteria are met. -// If a wait function is specified, wait that amount of time -// In between requests. -func (t *ResilientTransport) tries(req *http.Request) (res *http.Response, err error) { - for try := 0; try < t.MaxTries; try += 1 { - res, err = t.transport.RoundTrip(req) - - if !t.ShouldRetry(req, res, err) { - break - } - if res != nil { - res.Body.Close() - } - if t.Wait != nil { - t.Wait(try) - } - } - - return -} - -func ExpBackoff(try int) { - time.Sleep(100 * time.Millisecond * - time.Duration(math.Exp2(float64(try)))) -} - -func LinearBackoff(try int) { - time.Sleep(time.Duration(try*100) * time.Millisecond) -} - -// Decide if we should retry a request. -// In general, the criteria for retrying a request is described here -// http://docs.aws.amazon.com/general/latest/gr/api-retries.html -func awsRetry(req *http.Request, res *http.Response, err error) bool { - retry := false - - // Retry if there's a temporary network error. - if neterr, ok := err.(net.Error); ok { - if neterr.Temporary() { - retry = true - } - } - - // Retry if we get a 5xx series error. - if res != nil { - if res.StatusCode >= 500 && res.StatusCode < 600 { - retry = true - } - } - - return retry -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/aws/client_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/aws/client_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/aws/client_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/aws/client_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,121 +0,0 @@ -package aws_test - -import ( - "fmt" - "github.com/mitchellh/goamz/aws" - "io/ioutil" - "net/http" - "net/http/httptest" - "strings" - "testing" - "time" -) - -// Retrieve the response from handler using aws.RetryingClient -func serveAndGet(handler http.HandlerFunc) (body string, err error) { - ts := httptest.NewServer(handler) - defer ts.Close() - resp, err := aws.RetryingClient.Get(ts.URL) - if err != nil { - return - } - if resp.StatusCode != 200 { - return "", fmt.Errorf("Bad status code: %d", resp.StatusCode) - } - greeting, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - return - } - return strings.TrimSpace(string(greeting)), nil -} - -func TestClient_expected(t *testing.T) { - body := "foo bar" - - resp, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, body) - }) - if err != nil { - t.Fatal(err) - } - if resp != body { - t.Fatal("Body not as expected.") - } -} - -func TestClient_delay(t *testing.T) { - body := "baz" - wait := 4 - resp, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) { - if wait < 0 { - // If we dipped to zero delay and still failed. - t.Fatal("Never succeeded.") - } - wait -= 1 - time.Sleep(time.Second * time.Duration(wait)) - fmt.Fprintln(w, body) - }) - if err != nil { - t.Fatal(err) - } - if resp != body { - t.Fatal("Body not as expected.", resp) - } -} - -func TestClient_no4xxRetry(t *testing.T) { - tries := 0 - - // Fail once before succeeding. - _, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) { - tries += 1 - http.Error(w, "error", 404) - }) - - if err == nil { - t.Fatal("should have error") - } - - if tries != 1 { - t.Fatalf("should only try once: %d", tries) - } -} - -func TestClient_retries(t *testing.T) { - body := "biz" - failed := false - // Fail once before succeeding. - resp, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) { - if !failed { - http.Error(w, "error", 500) - failed = true - } else { - fmt.Fprintln(w, body) - } - }) - if failed != true { - t.Error("We didn't retry!") - } - if err != nil { - t.Fatal(err) - } - if resp != body { - t.Fatal("Body not as expected.") - } -} - -func TestClient_fails(t *testing.T) { - tries := 0 - // Fail 3 times and return the last error. - _, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) { - tries += 1 - http.Error(w, "error", 500) - }) - if err == nil { - t.Fatal(err) - } - if tries != 3 { - t.Fatal("Didn't retry enough") - } -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/CHANGES.md aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/CHANGES.md --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/CHANGES.md 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/CHANGES.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ -# Changes in this fork - -* Added EC2.CreateImage() -* Added EC2.CreateKeyPair() -* Added EC2.DeleteKeyPair() diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/ec2.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/ec2.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/ec2.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/ec2.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,3390 +0,0 @@ -// -// goamz - Go packages to interact with the Amazon Web Services. -// -// https://wiki.ubuntu.com/goamz -// -// Copyright (c) 2011 Canonical Ltd. -// -// Written by Gustavo Niemeyer -// - -package ec2 - -import ( - "crypto/rand" - "encoding/base64" - "encoding/hex" - "encoding/xml" - "fmt" - "log" - "net/http" - "net/http/httputil" - "net/url" - "sort" - "strconv" - "strings" - "time" - - "github.com/mitchellh/goamz/aws" -) - -const debug = false - -// The EC2 type encapsulates operations with a specific EC2 region. -type EC2 struct { - aws.Auth - aws.Region - httpClient *http.Client - private byte // Reserve the right of using private data. -} - -// New creates a new EC2. -func NewWithClient(auth aws.Auth, region aws.Region, client *http.Client) *EC2 { - return &EC2{auth, region, client, 0} -} - -func New(auth aws.Auth, region aws.Region) *EC2 { - return NewWithClient(auth, region, aws.RetryingClient) -} - -// ---------------------------------------------------------------------------- -// Filtering helper. - -// Filter builds filtering parameters to be used in an EC2 query which supports -// filtering. For example: -// -// filter := NewFilter() -// filter.Add("architecture", "i386") -// filter.Add("launch-index", "0") -// resp, err := ec2.Instances(nil, filter) -// -type Filter struct { - m map[string][]string -} - -// NewFilter creates a new Filter. -func NewFilter() *Filter { - return &Filter{make(map[string][]string)} -} - -// Add appends a filtering parameter with the given name and value(s). -func (f *Filter) Add(name string, value ...string) { - f.m[name] = append(f.m[name], value...) -} - -func (f *Filter) addParams(params map[string]string) { - if f != nil { - a := make([]string, len(f.m)) - i := 0 - for k := range f.m { - a[i] = k - i++ - } - sort.StringSlice(a).Sort() - for i, k := range a { - prefix := "Filter." + strconv.Itoa(i+1) - params[prefix+".Name"] = k - for j, v := range f.m[k] { - params[prefix+".Value."+strconv.Itoa(j+1)] = v - } - } - } -} - -// ---------------------------------------------------------------------------- -// Request dispatching logic. - -// Error encapsulates an error returned by EC2. -// -// See http://goo.gl/VZGuC for more details. -type Error struct { - // HTTP status code (200, 403, ...) - StatusCode int - // EC2 error code ("UnsupportedOperation", ...) - Code string - // The human-oriented error message - Message string - RequestId string `xml:"RequestID"` -} - -func (err *Error) Error() string { - if err.Code == "" { - return err.Message - } - - return fmt.Sprintf("%s (%s)", err.Message, err.Code) -} - -// For now a single error inst is being exposed. In the future it may be useful -// to provide access to all of them, but rather than doing it as an array/slice, -// use a *next pointer, so that it's backward compatible and it continues to be -// easy to handle the first error, which is what most people will want. -type xmlErrors struct { - RequestId string `xml:"RequestID"` - Errors []Error `xml:"Errors>Error"` -} - -var timeNow = time.Now - -func (ec2 *EC2) query(params map[string]string, resp interface{}) error { - params["Version"] = "2014-06-15" - params["Timestamp"] = timeNow().In(time.UTC).Format(time.RFC3339) - endpoint, err := url.Parse(ec2.Region.EC2Endpoint) - if err != nil { - return err - } - if endpoint.Path == "" { - endpoint.Path = "/" - } - sign(ec2.Auth, "GET", endpoint.Path, params, endpoint.Host) - endpoint.RawQuery = multimap(params).Encode() - if debug { - log.Printf("get { %v } -> {\n", endpoint.String()) - } - - r, err := ec2.httpClient.Get(endpoint.String()) - if err != nil { - return err - } - defer r.Body.Close() - - if debug { - dump, _ := httputil.DumpResponse(r, true) - log.Printf("response:\n") - log.Printf("%v\n}\n", string(dump)) - } - if r.StatusCode != 200 { - return buildError(r) - } - err = xml.NewDecoder(r.Body).Decode(resp) - return err -} - -func multimap(p map[string]string) url.Values { - q := make(url.Values, len(p)) - for k, v := range p { - q[k] = []string{v} - } - return q -} - -func buildError(r *http.Response) error { - errors := xmlErrors{} - xml.NewDecoder(r.Body).Decode(&errors) - var err Error - if len(errors.Errors) > 0 { - err = errors.Errors[0] - } - err.RequestId = errors.RequestId - err.StatusCode = r.StatusCode - if err.Message == "" { - err.Message = err.Code - } - return &err -} - -func makeParams(action string) map[string]string { - params := make(map[string]string) - params["Action"] = action - return params -} - -func addParamsList(params map[string]string, label string, ids []string) { - for i, id := range ids { - params[label+"."+strconv.Itoa(i+1)] = id - } -} - -func addBlockDeviceParams(prename string, params map[string]string, blockdevices []BlockDeviceMapping) { - for i, k := range blockdevices { - // Fixup index since Amazon counts these from 1 - prefix := prename + "BlockDeviceMapping." + strconv.Itoa(i+1) + "." - - if k.DeviceName != "" { - params[prefix+"DeviceName"] = k.DeviceName - } - - if k.VirtualName != "" { - params[prefix+"VirtualName"] = k.VirtualName - } else if k.NoDevice { - params[prefix+"NoDevice"] = "" - } else { - if k.SnapshotId != "" { - params[prefix+"Ebs.SnapshotId"] = k.SnapshotId - } - if k.VolumeType != "" { - params[prefix+"Ebs.VolumeType"] = k.VolumeType - } - if k.IOPS != 0 { - params[prefix+"Ebs.Iops"] = strconv.FormatInt(k.IOPS, 10) - } - if k.VolumeSize != 0 { - params[prefix+"Ebs.VolumeSize"] = strconv.FormatInt(k.VolumeSize, 10) - } - if k.DeleteOnTermination { - params[prefix+"Ebs.DeleteOnTermination"] = "true" - } else { - params[prefix+"Ebs.DeleteOnTermination"] = "false" - } - if k.Encrypted { - params[prefix+"Ebs.Encrypted"] = "true" - } - } - } -} - -// ---------------------------------------------------------------------------- -// Instance management functions and types. - -// The RunInstances type encapsulates options for the respective request in EC2. -// -// See http://goo.gl/Mcm3b for more details. -type RunInstances struct { - ImageId string - MinCount int - MaxCount int - KeyName string - InstanceType string - SecurityGroups []SecurityGroup - IamInstanceProfile string - KernelId string - RamdiskId string - UserData []byte - AvailZone string - PlacementGroupName string - Monitoring bool - SubnetId string - AssociatePublicIpAddress bool - DisableAPITermination bool - EbsOptimized bool - ShutdownBehavior string - PrivateIPAddress string - BlockDevices []BlockDeviceMapping - Tenancy string -} - -// Response to a RunInstances request. -// -// See http://goo.gl/Mcm3b for more details. -type RunInstancesResp struct { - RequestId string `xml:"requestId"` - ReservationId string `xml:"reservationId"` - OwnerId string `xml:"ownerId"` - SecurityGroups []SecurityGroup `xml:"groupSet>item"` - Instances []Instance `xml:"instancesSet>item"` -} - -// BlockDevice represents the association of a block device with an instance. -type BlockDevice struct { - DeviceName string `xml:"deviceName"` - VolumeId string `xml:"ebs>volumeId"` - Status string `xml:"ebs>status"` - AttachTime string `xml:"ebs>attachTime"` - DeleteOnTermination bool `xml:"ebs>deleteOnTermination"` -} - -// Instance encapsulates a running instance in EC2. -// -// See http://goo.gl/OCH8a for more details. -type Instance struct { - InstanceId string `xml:"instanceId"` - InstanceType string `xml:"instanceType"` - ImageId string `xml:"imageId"` - PrivateDNSName string `xml:"privateDnsName"` - DNSName string `xml:"dnsName"` - KeyName string `xml:"keyName"` - AMILaunchIndex int `xml:"amiLaunchIndex"` - Hypervisor string `xml:"hypervisor"` - VirtType string `xml:"virtualizationType"` - Monitoring string `xml:"monitoring>state"` - AvailZone string `xml:"placement>availabilityZone"` - Tenancy string `xml:"placement>tenancy"` - PlacementGroupName string `xml:"placement>groupName"` - State InstanceState `xml:"instanceState"` - Tags []Tag `xml:"tagSet>item"` - VpcId string `xml:"vpcId"` - SubnetId string `xml:"subnetId"` - IamInstanceProfile string `xml:"iamInstanceProfile"` - PrivateIpAddress string `xml:"privateIpAddress"` - PublicIpAddress string `xml:"ipAddress"` - Architecture string `xml:"architecture"` - LaunchTime time.Time `xml:"launchTime"` - SourceDestCheck bool `xml:"sourceDestCheck"` - SecurityGroups []SecurityGroup `xml:"groupSet>item"` - EbsOptimized string `xml:"ebsOptimized"` - BlockDevices []BlockDevice `xml:"blockDeviceMapping>item"` - RootDeviceName string `xml:"rootDeviceName"` -} - -// RunInstances starts new instances in EC2. -// If options.MinCount and options.MaxCount are both zero, a single instance -// will be started; otherwise if options.MaxCount is zero, options.MinCount -// will be used insteead. -// -// See http://goo.gl/Mcm3b for more details. -func (ec2 *EC2) RunInstances(options *RunInstances) (resp *RunInstancesResp, err error) { - params := makeParams("RunInstances") - params["ImageId"] = options.ImageId - params["InstanceType"] = options.InstanceType - var min, max int - if options.MinCount == 0 && options.MaxCount == 0 { - min = 1 - max = 1 - } else if options.MaxCount == 0 { - min = options.MinCount - max = min - } else { - min = options.MinCount - max = options.MaxCount - } - params["MinCount"] = strconv.Itoa(min) - params["MaxCount"] = strconv.Itoa(max) - token, err := clientToken() - if err != nil { - return nil, err - } - params["ClientToken"] = token - - if options.KeyName != "" { - params["KeyName"] = options.KeyName - } - if options.KernelId != "" { - params["KernelId"] = options.KernelId - } - if options.RamdiskId != "" { - params["RamdiskId"] = options.RamdiskId - } - if options.UserData != nil { - userData := make([]byte, b64.EncodedLen(len(options.UserData))) - b64.Encode(userData, options.UserData) - params["UserData"] = string(userData) - } - if options.AvailZone != "" { - params["Placement.AvailabilityZone"] = options.AvailZone - } - if options.PlacementGroupName != "" { - params["Placement.GroupName"] = options.PlacementGroupName - } - if options.Monitoring { - params["Monitoring.Enabled"] = "true" - } - if options.Tenancy != "" { - params["Placement.Tenancy"] = options.Tenancy - } - if options.SubnetId != "" && options.AssociatePublicIpAddress { - // If we have a non-default VPC / Subnet specified, we can flag - // AssociatePublicIpAddress to get a Public IP assigned. By default these are not provided. - // You cannot specify both SubnetId and the NetworkInterface.0.* parameters though, otherwise - // you get: Network interfaces and an instance-level subnet ID may not be specified on the same request - // You also need to attach Security Groups to the NetworkInterface instead of the instance, - // to avoid: Network interfaces and an instance-level security groups may not be specified on - // the same request - params["NetworkInterface.0.DeviceIndex"] = "0" - params["NetworkInterface.0.AssociatePublicIpAddress"] = "true" - params["NetworkInterface.0.SubnetId"] = options.SubnetId - - if options.PrivateIPAddress != "" { - params["NetworkInterface.0.PrivateIpAddress"] = options.PrivateIPAddress - } - - i := 1 - for _, g := range options.SecurityGroups { - // We only have SecurityGroupId's on NetworkInterface's, no SecurityGroup params. - if g.Id != "" { - params["NetworkInterface.0.SecurityGroupId."+strconv.Itoa(i)] = g.Id - i++ - } - } - } else { - if options.SubnetId != "" { - params["SubnetId"] = options.SubnetId - } - - if options.PrivateIPAddress != "" { - params["PrivateIpAddress"] = options.PrivateIPAddress - } - - i, j := 1, 1 - for _, g := range options.SecurityGroups { - if g.Id != "" { - params["SecurityGroupId."+strconv.Itoa(i)] = g.Id - i++ - } else { - params["SecurityGroup."+strconv.Itoa(j)] = g.Name - j++ - } - } - } - if options.IamInstanceProfile != "" { - params["IamInstanceProfile.Name"] = options.IamInstanceProfile - } - if options.DisableAPITermination { - params["DisableApiTermination"] = "true" - } - if options.EbsOptimized { - params["EbsOptimized"] = "true" - } - if options.ShutdownBehavior != "" { - params["InstanceInitiatedShutdownBehavior"] = options.ShutdownBehavior - } - addBlockDeviceParams("", params, options.BlockDevices) - - resp = &RunInstancesResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -func clientToken() (string, error) { - // Maximum EC2 client token size is 64 bytes. - // Each byte expands to two when hex encoded. - buf := make([]byte, 32) - _, err := rand.Read(buf) - if err != nil { - return "", err - } - return hex.EncodeToString(buf), nil -} - -// The GetConsoleOutput type encapsulates options for the respective request in EC2. -// -// See http://goo.gl/EY70zb for more details. -type GetConsoleOutput struct { - InstanceId string -} - -// Response to a GetConsoleOutput request. Note that Output is base64-encoded, -// as in the underlying AWS API. -// -// See http://goo.gl/EY70zb for more details. -type GetConsoleOutputResp struct { - RequestId string `xml:"requestId"` - InstanceId string `xml:"instanceId"` - Timestamp time.Time `xml:"timestamp"` - Output string `xml:"output"` -} - -// GetConsoleOutput returns the console output for the sepcified instance. Note -// that console output is base64-encoded, as in the underlying AWS API. -// -// See http://goo.gl/EY70zb for more details. -func (ec2 *EC2) GetConsoleOutput(options *GetConsoleOutput) (resp *GetConsoleOutputResp, err error) { - params := makeParams("GetConsoleOutput") - params["InstanceId"] = options.InstanceId - resp = &GetConsoleOutputResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// ---------------------------------------------------------------------------- -// Instance events and status functions and types. - -// The DescribeInstanceStatus type encapsulates options for the respective request in EC2. -// -// See http://goo.gl/DFySJY for more details. -type EventsSet struct { - Code string `xml:"code"` - Description string `xml:"description"` - NotBefore string `xml:"notBefore"` - NotAfter string `xml:"notAfter"` -} - -type StatusDetails struct { - Name string `xml:"name"` - Status string `xml:"status"` - ImpairedSince string `xml:"impairedSince"` -} - -type Status struct { - Status string `xml:"status"` - Details []StatusDetails `xml:"details>item"` -} - -type InstanceStatusSet struct { - InstanceId string `xml:"instanceId"` - AvailabilityZone string `xml:"availabilityZone"` - InstanceState InstanceState `xml:"instanceState"` - SystemStatus Status `xml:"systemStatus"` - InstanceStatus Status `xml:"instanceStatus"` - Events []EventsSet `xml:"eventsSet>item"` -} - -type DescribeInstanceStatusResp struct { - RequestId string `xml:"requestId"` - InstanceStatus []InstanceStatusSet `xml:"instanceStatusSet>item"` -} - -type DescribeInstanceStatus struct { - InstanceIds []string - IncludeAllInstances bool - MaxResults int64 - NextToken string -} - -func (ec2 *EC2) DescribeInstanceStatus(options *DescribeInstanceStatus, filter *Filter) (resp *DescribeInstanceStatusResp, err error) { - params := makeParams("DescribeInstanceStatus") - if options.IncludeAllInstances { - params["IncludeAllInstances"] = "true" - } - if len(options.InstanceIds) > 0 { - addParamsList(params, "InstanceId", options.InstanceIds) - } - if options.MaxResults > 0 { - params["MaxResults"] = strconv.FormatInt(options.MaxResults, 10) - } - if options.NextToken != "" { - params["NextToken"] = options.NextToken - } - if filter != nil { - filter.addParams(params) - } - - resp = &DescribeInstanceStatusResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return -} - -// ---------------------------------------------------------------------------- -// Spot Instance management functions and types. - -// The RequestSpotInstances type encapsulates options for the respective request in EC2. -// -// See http://goo.gl/GRZgCD for more details. -type RequestSpotInstances struct { - SpotPrice string - InstanceCount int - Type string - ImageId string - KeyName string - InstanceType string - SecurityGroups []SecurityGroup - IamInstanceProfile string - KernelId string - RamdiskId string - UserData []byte - AvailZone string - PlacementGroupName string - Monitoring bool - SubnetId string - AssociatePublicIpAddress bool - PrivateIPAddress string - BlockDevices []BlockDeviceMapping -} - -type SpotInstanceSpec struct { - ImageId string - KeyName string - InstanceType string - SecurityGroups []SecurityGroup - IamInstanceProfile string - KernelId string - RamdiskId string - UserData []byte - AvailZone string - PlacementGroupName string - Monitoring bool - SubnetId string - AssociatePublicIpAddress bool - PrivateIPAddress string - BlockDevices []BlockDeviceMapping -} - -type SpotLaunchSpec struct { - ImageId string `xml:"imageId"` - KeyName string `xml:"keyName"` - InstanceType string `xml:"instanceType"` - SecurityGroups []SecurityGroup `xml:"groupSet>item"` - IamInstanceProfile string `xml:"iamInstanceProfile"` - KernelId string `xml:"kernelId"` - RamdiskId string `xml:"ramdiskId"` - PlacementGroupName string `xml:"placement>groupName"` - Monitoring bool `xml:"monitoring>enabled"` - SubnetId string `xml:"subnetId"` - BlockDevices []BlockDeviceMapping `xml:"blockDeviceMapping>item"` -} - -type SpotStatus struct { - Code string `xml:"code"` - UpdateTime string `xml:"updateTime"` - Message string `xml:"message"` -} - -type SpotRequestResult struct { - SpotRequestId string `xml:"spotInstanceRequestId"` - SpotPrice string `xml:"spotPrice"` - Type string `xml:"type"` - AvailZone string `xml:"launchedAvailabilityZone"` - InstanceId string `xml:"instanceId"` - State string `xml:"state"` - Status SpotStatus `xml:"status"` - SpotLaunchSpec SpotLaunchSpec `xml:"launchSpecification"` - CreateTime string `xml:"createTime"` - Tags []Tag `xml:"tagSet>item"` -} - -// Response to a RequestSpotInstances request. -// -// See http://goo.gl/GRZgCD for more details. -type RequestSpotInstancesResp struct { - RequestId string `xml:"requestId"` - SpotRequestResults []SpotRequestResult `xml:"spotInstanceRequestSet>item"` -} - -// RequestSpotInstances requests a new spot instances in EC2. -func (ec2 *EC2) RequestSpotInstances(options *RequestSpotInstances) (resp *RequestSpotInstancesResp, err error) { - params := makeParams("RequestSpotInstances") - prefix := "LaunchSpecification" + "." - - params["SpotPrice"] = options.SpotPrice - params[prefix+"ImageId"] = options.ImageId - params[prefix+"InstanceType"] = options.InstanceType - - if options.InstanceCount != 0 { - params["InstanceCount"] = strconv.Itoa(options.InstanceCount) - } - if options.KeyName != "" { - params[prefix+"KeyName"] = options.KeyName - } - if options.KernelId != "" { - params[prefix+"KernelId"] = options.KernelId - } - if options.RamdiskId != "" { - params[prefix+"RamdiskId"] = options.RamdiskId - } - if options.UserData != nil { - userData := make([]byte, b64.EncodedLen(len(options.UserData))) - b64.Encode(userData, options.UserData) - params[prefix+"UserData"] = string(userData) - } - if options.AvailZone != "" { - params[prefix+"Placement.AvailabilityZone"] = options.AvailZone - } - if options.PlacementGroupName != "" { - params[prefix+"Placement.GroupName"] = options.PlacementGroupName - } - if options.Monitoring { - params[prefix+"Monitoring.Enabled"] = "true" - } - if options.SubnetId != "" && options.AssociatePublicIpAddress { - // If we have a non-default VPC / Subnet specified, we can flag - // AssociatePublicIpAddress to get a Public IP assigned. By default these are not provided. - // You cannot specify both SubnetId and the NetworkInterface.0.* parameters though, otherwise - // you get: Network interfaces and an instance-level subnet ID may not be specified on the same request - // You also need to attach Security Groups to the NetworkInterface instead of the instance, - // to avoid: Network interfaces and an instance-level security groups may not be specified on - // the same request - params[prefix+"NetworkInterface.0.DeviceIndex"] = "0" - params[prefix+"NetworkInterface.0.AssociatePublicIpAddress"] = "true" - params[prefix+"NetworkInterface.0.SubnetId"] = options.SubnetId - - i := 1 - for _, g := range options.SecurityGroups { - // We only have SecurityGroupId's on NetworkInterface's, no SecurityGroup params. - if g.Id != "" { - params[prefix+"NetworkInterface.0.SecurityGroupId."+strconv.Itoa(i)] = g.Id - i++ - } - } - } else { - if options.SubnetId != "" { - params[prefix+"SubnetId"] = options.SubnetId - } - - i, j := 1, 1 - for _, g := range options.SecurityGroups { - if g.Id != "" { - params[prefix+"SecurityGroupId."+strconv.Itoa(i)] = g.Id - i++ - } else { - params[prefix+"SecurityGroup."+strconv.Itoa(j)] = g.Name - j++ - } - } - } - if options.IamInstanceProfile != "" { - params[prefix+"IamInstanceProfile.Name"] = options.IamInstanceProfile - } - if options.PrivateIPAddress != "" { - params[prefix+"PrivateIpAddress"] = options.PrivateIPAddress - } - addBlockDeviceParams(prefix, params, options.BlockDevices) - - resp = &RequestSpotInstancesResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// Response to a DescribeSpotInstanceRequests request. -// -// See http://goo.gl/KsKJJk for more details. -type SpotRequestsResp struct { - RequestId string `xml:"requestId"` - SpotRequestResults []SpotRequestResult `xml:"spotInstanceRequestSet>item"` -} - -// DescribeSpotInstanceRequests returns details about spot requests in EC2. Both parameters -// are optional, and if provided will limit the spot requests returned to those -// matching the given spot request ids or filtering rules. -// -// See http://goo.gl/KsKJJk for more details. -func (ec2 *EC2) DescribeSpotRequests(spotrequestIds []string, filter *Filter) (resp *SpotRequestsResp, err error) { - params := makeParams("DescribeSpotInstanceRequests") - addParamsList(params, "SpotInstanceRequestId", spotrequestIds) - filter.addParams(params) - resp = &SpotRequestsResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// Response to a CancelSpotInstanceRequests request. -// -// See http://goo.gl/3BKHj for more details. -type CancelSpotRequestResult struct { - SpotRequestId string `xml:"spotInstanceRequestId"` - State string `xml:"state"` -} -type CancelSpotRequestsResp struct { - RequestId string `xml:"requestId"` - CancelSpotRequestResults []CancelSpotRequestResult `xml:"spotInstanceRequestSet>item"` -} - -// CancelSpotRequests requests the cancellation of spot requests when the given ids. -// -// See http://goo.gl/3BKHj for more details. -func (ec2 *EC2) CancelSpotRequests(spotrequestIds []string) (resp *CancelSpotRequestsResp, err error) { - params := makeParams("CancelSpotInstanceRequests") - addParamsList(params, "SpotInstanceRequestId", spotrequestIds) - resp = &CancelSpotRequestsResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -type DescribeSpotPriceHistory struct { - InstanceType []string - ProductDescription []string - AvailabilityZone string - StartTime, EndTime time.Time -} - -// Response to a DescribeSpotPriceHisotyr request. -// -// See http://goo.gl/3BKHj for more details. -type DescribeSpotPriceHistoryResp struct { - RequestId string `xml:"requestId"` - History []SpotPriceHistory `xml:"spotPriceHistorySet>item"` -} - -type SpotPriceHistory struct { - InstanceType string `xml:"instanceType"` - ProductDescription string `xml:"productDescription"` - SpotPrice string `xml:"spotPrice"` - Timestamp time.Time `xml:"timestamp"` - AvailabilityZone string `xml:"availabilityZone"` -} - -// DescribeSpotPriceHistory gets the spot pricing history. -// -// See http://goo.gl/3BKHj for more details. -func (ec2 *EC2) DescribeSpotPriceHistory(o *DescribeSpotPriceHistory) (resp *DescribeSpotPriceHistoryResp, err error) { - params := makeParams("DescribeSpotPriceHistory") - if o.AvailabilityZone != "" { - params["AvailabilityZone"] = o.AvailabilityZone - } - - if !o.StartTime.IsZero() { - params["StartTime"] = o.StartTime.In(time.UTC).Format(time.RFC3339) - } - if !o.EndTime.IsZero() { - params["EndTime"] = o.EndTime.In(time.UTC).Format(time.RFC3339) - } - - if len(o.InstanceType) > 0 { - addParamsList(params, "InstanceType", o.InstanceType) - } - if len(o.ProductDescription) > 0 { - addParamsList(params, "ProductDescription", o.ProductDescription) - } - - resp = &DescribeSpotPriceHistoryResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return -} - -// Response to a TerminateInstances request. -// -// See http://goo.gl/3BKHj for more details. -type TerminateInstancesResp struct { - RequestId string `xml:"requestId"` - StateChanges []InstanceStateChange `xml:"instancesSet>item"` -} - -// InstanceState encapsulates the state of an instance in EC2. -// -// See http://goo.gl/y3ZBq for more details. -type InstanceState struct { - Code int `xml:"code"` // Watch out, bits 15-8 have unpublished meaning. - Name string `xml:"name"` -} - -// InstanceStateChange informs of the previous and current states -// for an instance when a state change is requested. -type InstanceStateChange struct { - InstanceId string `xml:"instanceId"` - CurrentState InstanceState `xml:"currentState"` - PreviousState InstanceState `xml:"previousState"` -} - -// TerminateInstances requests the termination of instances when the given ids. -// -// See http://goo.gl/3BKHj for more details. -func (ec2 *EC2) TerminateInstances(instIds []string) (resp *TerminateInstancesResp, err error) { - params := makeParams("TerminateInstances") - addParamsList(params, "InstanceId", instIds) - resp = &TerminateInstancesResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// Response to a DescribeInstances request. -// -// See http://goo.gl/mLbmw for more details. -type InstancesResp struct { - RequestId string `xml:"requestId"` - Reservations []Reservation `xml:"reservationSet>item"` -} - -// Reservation represents details about a reservation in EC2. -// -// See http://goo.gl/0ItPT for more details. -type Reservation struct { - ReservationId string `xml:"reservationId"` - OwnerId string `xml:"ownerId"` - RequesterId string `xml:"requesterId"` - SecurityGroups []SecurityGroup `xml:"groupSet>item"` - Instances []Instance `xml:"instancesSet>item"` -} - -// Instances returns details about instances in EC2. Both parameters -// are optional, and if provided will limit the instances returned to those -// matching the given instance ids or filtering rules. -// -// See http://goo.gl/4No7c for more details. -func (ec2 *EC2) Instances(instIds []string, filter *Filter) (resp *InstancesResp, err error) { - params := makeParams("DescribeInstances") - addParamsList(params, "InstanceId", instIds) - filter.addParams(params) - resp = &InstancesResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// ---------------------------------------------------------------------------- -// Volume management - -// The CreateVolume request parameters -// -// See http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-CreateVolume.html -type CreateVolume struct { - AvailZone string - Size int64 - SnapshotId string - VolumeType string - IOPS int64 - Encrypted bool -} - -// Response to an AttachVolume request -type AttachVolumeResp struct { - RequestId string `xml:"requestId"` - VolumeId string `xml:"volumeId"` - InstanceId string `xml:"instanceId"` - Device string `xml:"device"` - Status string `xml:"status"` - AttachTime string `xml:"attachTime"` -} - -// Response to a CreateVolume request -type CreateVolumeResp struct { - RequestId string `xml:"requestId"` - VolumeId string `xml:"volumeId"` - Size int64 `xml:"size"` - SnapshotId string `xml:"snapshotId"` - AvailZone string `xml:"availabilityZone"` - Status string `xml:"status"` - CreateTime string `xml:"createTime"` - VolumeType string `xml:"volumeType"` - IOPS int64 `xml:"iops"` - Encrypted bool `xml:"encrypted"` -} - -// Volume is a single volume. -type Volume struct { - VolumeId string `xml:"volumeId"` - Size string `xml:"size"` - SnapshotId string `xml:"snapshotId"` - AvailZone string `xml:"availabilityZone"` - Status string `xml:"status"` - Attachments []VolumeAttachment `xml:"attachmentSet>item"` - VolumeType string `xml:"volumeType"` - IOPS int64 `xml:"iops"` - Encrypted bool `xml:"encrypted"` - Tags []Tag `xml:"tagSet>item"` -} - -type VolumeAttachment struct { - VolumeId string `xml:"volumeId"` - InstanceId string `xml:"instanceId"` - Device string `xml:"device"` - Status string `xml:"status"` -} - -// Response to a DescribeVolumes request -type VolumesResp struct { - RequestId string `xml:"requestId"` - Volumes []Volume `xml:"volumeSet>item"` -} - -// Attach a volume. -func (ec2 *EC2) AttachVolume(volumeId string, instanceId string, device string) (resp *AttachVolumeResp, err error) { - params := makeParams("AttachVolume") - params["VolumeId"] = volumeId - params["InstanceId"] = instanceId - params["Device"] = device - - resp = &AttachVolumeResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return -} - -// Create a new volume. -func (ec2 *EC2) CreateVolume(options *CreateVolume) (resp *CreateVolumeResp, err error) { - params := makeParams("CreateVolume") - params["AvailabilityZone"] = options.AvailZone - if options.Size > 0 { - params["Size"] = strconv.FormatInt(options.Size, 10) - } - - if options.SnapshotId != "" { - params["SnapshotId"] = options.SnapshotId - } - - if options.VolumeType != "" { - params["VolumeType"] = options.VolumeType - } - - if options.IOPS > 0 { - params["Iops"] = strconv.FormatInt(options.IOPS, 10) - } - - if options.Encrypted { - params["Encrypted"] = "true" - } - - resp = &CreateVolumeResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return -} - -// Delete an EBS volume. -func (ec2 *EC2) DeleteVolume(id string) (resp *SimpleResp, err error) { - params := makeParams("DeleteVolume") - params["VolumeId"] = id - - resp = &SimpleResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// Detaches an EBS volume. -func (ec2 *EC2) DetachVolume(id string) (resp *SimpleResp, err error) { - params := makeParams("DetachVolume") - params["VolumeId"] = id - - resp = &SimpleResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// Finds or lists all volumes. -func (ec2 *EC2) Volumes(volIds []string, filter *Filter) (resp *VolumesResp, err error) { - params := makeParams("DescribeVolumes") - addParamsList(params, "VolumeId", volIds) - filter.addParams(params) - resp = &VolumesResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// ---------------------------------------------------------------------------- -// Availability zone management functions and types. -// See http://goo.gl/ylxT4R for more details. - -// DescribeAvailabilityZonesResp represents a response to a DescribeAvailabilityZones -// request in EC2. -type DescribeAvailabilityZonesResp struct { - RequestId string `xml:"requestId"` - Zones []AvailabilityZoneInfo `xml:"availabilityZoneInfo>item"` -} - -// AvailabilityZoneInfo encapsulates details for an availability zone in EC2. -type AvailabilityZoneInfo struct { - AvailabilityZone - State string `xml:"zoneState"` - MessageSet []string `xml:"messageSet>item"` -} - -// AvailabilityZone represents an EC2 availability zone. -type AvailabilityZone struct { - Name string `xml:"zoneName"` - Region string `xml:"regionName"` -} - -// DescribeAvailabilityZones returns details about availability zones in EC2. -// The filter parameter is optional, and if provided will limit the -// availability zones returned to those matching the given filtering -// rules. -// -// See http://goo.gl/ylxT4R for more details. -func (ec2 *EC2) DescribeAvailabilityZones(filter *Filter) (resp *DescribeAvailabilityZonesResp, err error) { - params := makeParams("DescribeAvailabilityZones") - filter.addParams(params) - resp = &DescribeAvailabilityZonesResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// ---------------------------------------------------------------------------- -// ElasticIp management (for VPC) - -// The AllocateAddress request parameters -// -// see http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-AllocateAddress.html -type AllocateAddress struct { - Domain string -} - -// Response to an AllocateAddress request -type AllocateAddressResp struct { - RequestId string `xml:"requestId"` - PublicIp string `xml:"publicIp"` - Domain string `xml:"domain"` - AllocationId string `xml:"allocationId"` -} - -// The AssociateAddress request parameters -// -// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-AssociateAddress.html -type AssociateAddress struct { - InstanceId string - PublicIp string - AllocationId string - AllowReassociation bool -} - -// Response to an AssociateAddress request -type AssociateAddressResp struct { - RequestId string `xml:"requestId"` - Return bool `xml:"return"` - AssociationId string `xml:"associationId"` -} - -// Address represents an Elastic IP Address -// See http://goo.gl/uxCjp7 for more details -type Address struct { - PublicIp string `xml:"publicIp"` - AllocationId string `xml:"allocationId"` - Domain string `xml:"domain"` - InstanceId string `xml:"instanceId"` - AssociationId string `xml:"associationId"` - NetworkInterfaceId string `xml:"networkInterfaceId"` - NetworkInterfaceOwnerId string `xml:"networkInterfaceOwnerId"` - PrivateIpAddress string `xml:"privateIpAddress"` -} - -type DescribeAddressesResp struct { - RequestId string `xml:"requestId"` - Addresses []Address `xml:"addressesSet>item"` -} - -// Allocate a new Elastic IP. -func (ec2 *EC2) AllocateAddress(options *AllocateAddress) (resp *AllocateAddressResp, err error) { - params := makeParams("AllocateAddress") - params["Domain"] = options.Domain - - resp = &AllocateAddressResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return -} - -// Release an Elastic IP (VPC). -func (ec2 *EC2) ReleaseAddress(id string) (resp *SimpleResp, err error) { - params := makeParams("ReleaseAddress") - params["AllocationId"] = id - - resp = &SimpleResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return -} - -// Release an Elastic IP (Public) -func (ec2 *EC2) ReleasePublicAddress(publicIp string) (resp *SimpleResp, err error) { - params := makeParams("ReleaseAddress") - params["PublicIp"] = publicIp - - resp = &SimpleResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return -} - -// Associate an address with a VPC instance. -func (ec2 *EC2) AssociateAddress(options *AssociateAddress) (resp *AssociateAddressResp, err error) { - params := makeParams("AssociateAddress") - params["InstanceId"] = options.InstanceId - if options.PublicIp != "" { - params["PublicIp"] = options.PublicIp - } - if options.AllocationId != "" { - params["AllocationId"] = options.AllocationId - } - if options.AllowReassociation { - params["AllowReassociation"] = "true" - } - - resp = &AssociateAddressResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return -} - -// Disassociate an address from a VPC instance. -func (ec2 *EC2) DisassociateAddress(id string) (resp *SimpleResp, err error) { - params := makeParams("DisassociateAddress") - params["AssociationId"] = id - - resp = &SimpleResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return -} - -// Disassociate an address from a VPC instance. -func (ec2 *EC2) DisassociateAddressClassic(ip string) (resp *SimpleResp, err error) { - params := makeParams("DisassociateAddress") - params["PublicIp"] = ip - - resp = &SimpleResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return -} - -// DescribeAddresses returns details about one or more -// Elastic IP Addresses. Returned addresses can be -// filtered by Public IP, Allocation ID or multiple filters -// -// See http://goo.gl/zW7J4p for more details. -func (ec2 *EC2) Addresses(publicIps []string, allocationIds []string, filter *Filter) (resp *DescribeAddressesResp, err error) { - params := makeParams("DescribeAddresses") - addParamsList(params, "PublicIp", publicIps) - addParamsList(params, "AllocationId", allocationIds) - filter.addParams(params) - resp = &DescribeAddressesResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// ---------------------------------------------------------------------------- -// Image and snapshot management functions and types. - -// The CreateImage request parameters. -// -// See http://goo.gl/cxU41 for more details. -type CreateImage struct { - InstanceId string - Name string - Description string - NoReboot bool - BlockDevices []BlockDeviceMapping -} - -// Response to a CreateImage request. -// -// See http://goo.gl/cxU41 for more details. -type CreateImageResp struct { - RequestId string `xml:"requestId"` - ImageId string `xml:"imageId"` -} - -// Response to a DescribeImages request. -// -// See http://goo.gl/hLnyg for more details. -type ImagesResp struct { - RequestId string `xml:"requestId"` - Images []Image `xml:"imagesSet>item"` -} - -// Response to a DescribeImageAttribute request. -// -// See http://goo.gl/bHO3zT for more details. -type ImageAttributeResp struct { - RequestId string `xml:"requestId"` - ImageId string `xml:"imageId"` - Kernel string `xml:"kernel>value"` - RamDisk string `xml:"ramdisk>value"` - Description string `xml:"description>value"` - Group string `xml:"launchPermission>item>group"` - UserIds []string `xml:"launchPermission>item>userId"` - ProductCodes []string `xml:"productCodes>item>productCode"` - BlockDevices []BlockDeviceMapping `xml:"blockDeviceMapping>item"` -} - -// The RegisterImage request parameters. -type RegisterImage struct { - ImageLocation string - Name string - Description string - Architecture string - KernelId string - RamdiskId string - RootDeviceName string - VirtType string - SriovNetSupport string - BlockDevices []BlockDeviceMapping -} - -// Response to a RegisterImage request. -type RegisterImageResp struct { - RequestId string `xml:"requestId"` - ImageId string `xml:"imageId"` -} - -// Response to a DegisterImage request. -// -// See http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DeregisterImage.html -type DeregisterImageResp struct { - RequestId string `xml:"requestId"` - Return bool `xml:"return"` -} - -// BlockDeviceMapping represents the association of a block device with an image. -// -// See http://goo.gl/wnDBf for more details. -type BlockDeviceMapping struct { - DeviceName string `xml:"deviceName"` - VirtualName string `xml:"virtualName"` - SnapshotId string `xml:"ebs>snapshotId"` - VolumeType string `xml:"ebs>volumeType"` - VolumeSize int64 `xml:"ebs>volumeSize"` - DeleteOnTermination bool `xml:"ebs>deleteOnTermination"` - Encrypted bool `xml:"ebs>encrypted"` - NoDevice bool `xml:"noDevice"` - - // The number of I/O operations per second (IOPS) that the volume supports. - IOPS int64 `xml:"ebs>iops"` -} - -// Image represents details about an image. -// -// See http://goo.gl/iSqJG for more details. -type Image struct { - Id string `xml:"imageId"` - Name string `xml:"name"` - Description string `xml:"description"` - Type string `xml:"imageType"` - State string `xml:"imageState"` - Location string `xml:"imageLocation"` - Public bool `xml:"isPublic"` - Architecture string `xml:"architecture"` - Platform string `xml:"platform"` - ProductCodes []string `xml:"productCode>item>productCode"` - KernelId string `xml:"kernelId"` - RamdiskId string `xml:"ramdiskId"` - StateReason string `xml:"stateReason"` - OwnerId string `xml:"imageOwnerId"` - OwnerAlias string `xml:"imageOwnerAlias"` - RootDeviceType string `xml:"rootDeviceType"` - RootDeviceName string `xml:"rootDeviceName"` - VirtualizationType string `xml:"virtualizationType"` - Hypervisor string `xml:"hypervisor"` - BlockDevices []BlockDeviceMapping `xml:"blockDeviceMapping>item"` - Tags []Tag `xml:"tagSet>item"` -} - -// The ModifyImageAttribute request parameters. -type ModifyImageAttribute struct { - AddUsers []string - RemoveUsers []string - AddGroups []string - RemoveGroups []string - ProductCodes []string - Description string -} - -// The CopyImage request parameters. -// -// See http://goo.gl/hQwPCK for more details. -type CopyImage struct { - SourceRegion string - SourceImageId string - Name string - Description string - ClientToken string -} - -// Response to a CopyImage request. -// -// See http://goo.gl/hQwPCK for more details. -type CopyImageResp struct { - RequestId string `xml:"requestId"` - ImageId string `xml:"imageId"` -} - -// Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance -// that is either running or stopped. -// -// See http://goo.gl/cxU41 for more details. -func (ec2 *EC2) CreateImage(options *CreateImage) (resp *CreateImageResp, err error) { - params := makeParams("CreateImage") - params["InstanceId"] = options.InstanceId - params["Name"] = options.Name - if options.Description != "" { - params["Description"] = options.Description - } - if options.NoReboot { - params["NoReboot"] = "true" - } - addBlockDeviceParams("", params, options.BlockDevices) - - resp = &CreateImageResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return -} - -// Images returns details about available images. -// The ids and filter parameters, if provided, will limit the images returned. -// For example, to get all the private images associated with this account set -// the boolean filter "is-public" to 0. -// For list of filters: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeImages.html -// -// Note: calling this function with nil ids and filter parameters will result in -// a very large number of images being returned. -// -// See http://goo.gl/SRBhW for more details. -func (ec2 *EC2) Images(ids []string, filter *Filter) (resp *ImagesResp, err error) { - params := makeParams("DescribeImages") - for i, id := range ids { - params["ImageId."+strconv.Itoa(i+1)] = id - } - filter.addParams(params) - - resp = &ImagesResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// ImagesByOwners returns details about available images. -// The ids, owners, and filter parameters, if provided, will limit the images returned. -// For example, to get all the private images associated with this account set -// the boolean filter "is-public" to 0. -// For list of filters: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeImages.html -// -// Note: calling this function with nil ids and filter parameters will result in -// a very large number of images being returned. -// -// See http://goo.gl/SRBhW for more details. -func (ec2 *EC2) ImagesByOwners(ids []string, owners []string, filter *Filter) (resp *ImagesResp, err error) { - params := makeParams("DescribeImages") - for i, id := range ids { - params["ImageId."+strconv.Itoa(i+1)] = id - } - for i, owner := range owners { - params[fmt.Sprintf("Owner.%d", i+1)] = owner - } - - filter.addParams(params) - - resp = &ImagesResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// ImageAttribute describes an attribute of an AMI. -// You can specify only one attribute at a time. -// Valid attributes are: -// description | kernel | ramdisk | launchPermission | productCodes | blockDeviceMapping -// -// See http://goo.gl/bHO3zT for more details. -func (ec2 *EC2) ImageAttribute(imageId, attribute string) (resp *ImageAttributeResp, err error) { - params := makeParams("DescribeImageAttribute") - params["ImageId"] = imageId - params["Attribute"] = attribute - - resp = &ImageAttributeResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// ModifyImageAttribute sets attributes for an image. -// -// See http://goo.gl/YUjO4G for more details. -func (ec2 *EC2) ModifyImageAttribute(imageId string, options *ModifyImageAttribute) (resp *SimpleResp, err error) { - params := makeParams("ModifyImageAttribute") - params["ImageId"] = imageId - if options.Description != "" { - params["Description.Value"] = options.Description - } - - if options.AddUsers != nil { - for i, user := range options.AddUsers { - p := fmt.Sprintf("LaunchPermission.Add.%d.UserId", i+1) - params[p] = user - } - } - - if options.RemoveUsers != nil { - for i, user := range options.RemoveUsers { - p := fmt.Sprintf("LaunchPermission.Remove.%d.UserId", i+1) - params[p] = user - } - } - - if options.AddGroups != nil { - for i, group := range options.AddGroups { - p := fmt.Sprintf("LaunchPermission.Add.%d.Group", i+1) - params[p] = group - } - } - - if options.RemoveGroups != nil { - for i, group := range options.RemoveGroups { - p := fmt.Sprintf("LaunchPermission.Remove.%d.Group", i+1) - params[p] = group - } - } - - if options.ProductCodes != nil { - addParamsList(params, "ProductCode", options.ProductCodes) - } - - resp = &SimpleResp{} - err = ec2.query(params, resp) - if err != nil { - resp = nil - } - - return -} - -// Registers a new AMI with EC2. -// -// See: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-RegisterImage.html -func (ec2 *EC2) RegisterImage(options *RegisterImage) (resp *RegisterImageResp, err error) { - params := makeParams("RegisterImage") - params["Name"] = options.Name - if options.ImageLocation != "" { - params["ImageLocation"] = options.ImageLocation - } - - if options.Description != "" { - params["Description"] = options.Description - } - - if options.Architecture != "" { - params["Architecture"] = options.Architecture - } - - if options.KernelId != "" { - params["KernelId"] = options.KernelId - } - - if options.RamdiskId != "" { - params["RamdiskId"] = options.RamdiskId - } - - if options.RootDeviceName != "" { - params["RootDeviceName"] = options.RootDeviceName - } - - if options.VirtType != "" { - params["VirtualizationType"] = options.VirtType - } - - if options.SriovNetSupport != "" { - params["SriovNetSupport"] = "simple" - } - - addBlockDeviceParams("", params, options.BlockDevices) - - resp = &RegisterImageResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return -} - -// Degisters an image. Note that this does not delete the backing stores of the AMI. -// -// See http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DeregisterImage.html -func (ec2 *EC2) DeregisterImage(imageId string) (resp *DeregisterImageResp, err error) { - params := makeParams("DeregisterImage") - params["ImageId"] = imageId - - resp = &DeregisterImageResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return -} - -// Copy and Image from one region to another. -// -// See http://goo.gl/hQwPCK for more details. -func (ec2 *EC2) CopyImage(options *CopyImage) (resp *CopyImageResp, err error) { - params := makeParams("CopyImage") - - if options.SourceRegion != "" { - params["SourceRegion"] = options.SourceRegion - } - - if options.SourceImageId != "" { - params["SourceImageId"] = options.SourceImageId - } - - if options.Name != "" { - params["Name"] = options.Name - } - - if options.Description != "" { - params["Description"] = options.Description - } - - if options.ClientToken != "" { - params["ClientToken"] = options.ClientToken - } - - resp = &CopyImageResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return -} - -// Response to a CreateSnapshot request. -// -// See http://goo.gl/ttcda for more details. -type CreateSnapshotResp struct { - RequestId string `xml:"requestId"` - Snapshot -} - -// CreateSnapshot creates a volume snapshot and stores it in S3. -// -// See http://goo.gl/ttcda for more details. -func (ec2 *EC2) CreateSnapshot(volumeId, description string) (resp *CreateSnapshotResp, err error) { - params := makeParams("CreateSnapshot") - params["VolumeId"] = volumeId - params["Description"] = description - - resp = &CreateSnapshotResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// DeleteSnapshots deletes the volume snapshots with the given ids. -// -// Note: If you make periodic snapshots of a volume, the snapshots are -// incremental so that only the blocks on the device that have changed -// since your last snapshot are incrementally saved in the new snapshot. -// Even though snapshots are saved incrementally, the snapshot deletion -// process is designed so that you need to retain only the most recent -// snapshot in order to restore the volume. -// -// See http://goo.gl/vwU1y for more details. -func (ec2 *EC2) DeleteSnapshots(ids []string) (resp *SimpleResp, err error) { - params := makeParams("DeleteSnapshot") - for i, id := range ids { - params["SnapshotId."+strconv.Itoa(i+1)] = id - } - - resp = &SimpleResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// Response to a DescribeSnapshots request. -// -// See http://goo.gl/nClDT for more details. -type SnapshotsResp struct { - RequestId string `xml:"requestId"` - Snapshots []Snapshot `xml:"snapshotSet>item"` -} - -// Snapshot represents details about a volume snapshot. -// -// See http://goo.gl/nkovs for more details. -type Snapshot struct { - Id string `xml:"snapshotId"` - VolumeId string `xml:"volumeId"` - VolumeSize string `xml:"volumeSize"` - Status string `xml:"status"` - StartTime string `xml:"startTime"` - Description string `xml:"description"` - Progress string `xml:"progress"` - OwnerId string `xml:"ownerId"` - OwnerAlias string `xml:"ownerAlias"` - Encrypted bool `xml:"encrypted"` - Tags []Tag `xml:"tagSet>item"` -} - -// Snapshots returns details about volume snapshots available to the user. -// The ids and filter parameters, if provided, limit the snapshots returned. -// -// See http://goo.gl/ogJL4 for more details. -func (ec2 *EC2) Snapshots(ids []string, filter *Filter) (resp *SnapshotsResp, err error) { - params := makeParams("DescribeSnapshots") - for i, id := range ids { - params["SnapshotId."+strconv.Itoa(i+1)] = id - } - filter.addParams(params) - - resp = &SnapshotsResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// ---------------------------------------------------------------------------- -// KeyPair management functions and types. - -type KeyPair struct { - Name string `xml:"keyName"` - Fingerprint string `xml:"keyFingerprint"` -} - -type KeyPairsResp struct { - RequestId string `xml:"requestId"` - Keys []KeyPair `xml:"keySet>item"` -} - -type CreateKeyPairResp struct { - RequestId string `xml:"requestId"` - KeyName string `xml:"keyName"` - KeyFingerprint string `xml:"keyFingerprint"` - KeyMaterial string `xml:"keyMaterial"` -} - -type ImportKeyPairResponse struct { - RequestId string `xml:"requestId"` - KeyName string `xml:"keyName"` - KeyFingerprint string `xml:"keyFingerprint"` -} - -// CreateKeyPair creates a new key pair and returns the private key contents. -// -// See http://goo.gl/0S6hV -func (ec2 *EC2) CreateKeyPair(keyName string) (resp *CreateKeyPairResp, err error) { - params := makeParams("CreateKeyPair") - params["KeyName"] = keyName - - resp = &CreateKeyPairResp{} - err = ec2.query(params, resp) - if err == nil { - resp.KeyFingerprint = strings.TrimSpace(resp.KeyFingerprint) - } - return -} - -// DeleteKeyPair deletes a key pair. -// -// See http://goo.gl/0bqok -func (ec2 *EC2) DeleteKeyPair(name string) (resp *SimpleResp, err error) { - params := makeParams("DeleteKeyPair") - params["KeyName"] = name - - resp = &SimpleResp{} - err = ec2.query(params, resp) - return -} - -// KeyPairs returns list of key pairs for this account -// -// See http://goo.gl/Apzsfz -func (ec2 *EC2) KeyPairs(keynames []string, filter *Filter) (resp *KeyPairsResp, err error) { - params := makeParams("DescribeKeyPairs") - for i, name := range keynames { - params["KeyName."+strconv.Itoa(i)] = name - } - filter.addParams(params) - - resp = &KeyPairsResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return resp, nil -} - -// ImportKeyPair imports a key into AWS -// -// See http://goo.gl/NbZUvw -func (ec2 *EC2) ImportKeyPair(keyname string, key string) (resp *ImportKeyPairResponse, err error) { - params := makeParams("ImportKeyPair") - params["KeyName"] = keyname - - // Oddly, AWS requires the key material to be base64-encoded, even if it was - // already encoded. So, we force another round of encoding... - // c.f. https://groups.google.com/forum/?fromgroups#!topic/boto-dev/IczrStO9Q8M - params["PublicKeyMaterial"] = base64.StdEncoding.EncodeToString([]byte(key)) - - resp = &ImportKeyPairResponse{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -// ---------------------------------------------------------------------------- -// Security group management functions and types. - -// SimpleResp represents a response to an EC2 request which on success will -// return no other information besides a request id. -type SimpleResp struct { - XMLName xml.Name - RequestId string `xml:"requestId"` -} - -// CreateSecurityGroupResp represents a response to a CreateSecurityGroup request. -type CreateSecurityGroupResp struct { - SecurityGroup - RequestId string `xml:"requestId"` -} - -// CreateSecurityGroup run a CreateSecurityGroup request in EC2, with the provided -// name and description. -// -// See http://goo.gl/Eo7Yl for more details. -func (ec2 *EC2) CreateSecurityGroup(group SecurityGroup) (resp *CreateSecurityGroupResp, err error) { - params := makeParams("CreateSecurityGroup") - params["GroupName"] = group.Name - params["GroupDescription"] = group.Description - if group.VpcId != "" { - params["VpcId"] = group.VpcId - } - - resp = &CreateSecurityGroupResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - resp.Name = group.Name - return resp, nil -} - -// SecurityGroupsResp represents a response to a DescribeSecurityGroups -// request in EC2. -// -// See http://goo.gl/k12Uy for more details. -type SecurityGroupsResp struct { - RequestId string `xml:"requestId"` - Groups []SecurityGroupInfo `xml:"securityGroupInfo>item"` -} - -// SecurityGroup encapsulates details for a security group in EC2. -// -// See http://goo.gl/CIdyP for more details. -type SecurityGroupInfo struct { - SecurityGroup - OwnerId string `xml:"ownerId"` - Description string `xml:"groupDescription"` - IPPerms []IPPerm `xml:"ipPermissions>item"` - IPPermsEgress []IPPerm `xml:"ipPermissionsEgress>item"` -} - -// IPPerm represents an allowance within an EC2 security group. -// -// See http://goo.gl/4oTxv for more details. -type IPPerm struct { - Protocol string `xml:"ipProtocol"` - FromPort int `xml:"fromPort"` - ToPort int `xml:"toPort"` - SourceIPs []string `xml:"ipRanges>item>cidrIp"` - SourceGroups []UserSecurityGroup `xml:"groups>item"` -} - -// UserSecurityGroup holds a security group and the owner -// of that group. -type UserSecurityGroup struct { - Id string `xml:"groupId"` - Name string `xml:"groupName"` - OwnerId string `xml:"userId"` -} - -// SecurityGroup represents an EC2 security group. -// If SecurityGroup is used as a parameter, then one of Id or Name -// may be empty. If both are set, then Id is used. -type SecurityGroup struct { - Id string `xml:"groupId"` - Name string `xml:"groupName"` - Description string `xml:"groupDescription"` - VpcId string `xml:"vpcId"` - Tags []Tag `xml:"tagSet>item"` -} - -// SecurityGroupNames is a convenience function that -// returns a slice of security groups with the given names. -func SecurityGroupNames(names ...string) []SecurityGroup { - g := make([]SecurityGroup, len(names)) - for i, name := range names { - g[i] = SecurityGroup{Name: name} - } - return g -} - -// SecurityGroupNames is a convenience function that -// returns a slice of security groups with the given ids. -func SecurityGroupIds(ids ...string) []SecurityGroup { - g := make([]SecurityGroup, len(ids)) - for i, id := range ids { - g[i] = SecurityGroup{Id: id} - } - return g -} - -// SecurityGroups returns details about security groups in EC2. Both parameters -// are optional, and if provided will limit the security groups returned to those -// matching the given groups or filtering rules. -// -// See http://goo.gl/k12Uy for more details. -func (ec2 *EC2) SecurityGroups(groups []SecurityGroup, filter *Filter) (resp *SecurityGroupsResp, err error) { - params := makeParams("DescribeSecurityGroups") - i, j := 1, 1 - for _, g := range groups { - if g.Id != "" { - params["GroupId."+strconv.Itoa(i)] = g.Id - i++ - } else { - params["GroupName."+strconv.Itoa(j)] = g.Name - j++ - } - } - filter.addParams(params) - - resp = &SecurityGroupsResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -// DeleteSecurityGroup removes the given security group in EC2. -// -// See http://goo.gl/QJJDO for more details. -func (ec2 *EC2) DeleteSecurityGroup(group SecurityGroup) (resp *SimpleResp, err error) { - params := makeParams("DeleteSecurityGroup") - if group.Id != "" { - params["GroupId"] = group.Id - } else { - params["GroupName"] = group.Name - } - - resp = &SimpleResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -// AuthorizeSecurityGroup creates an allowance for clients matching the provided -// rules to access instances within the given security group. -// -// See http://goo.gl/u2sDJ for more details. -func (ec2 *EC2) AuthorizeSecurityGroup(group SecurityGroup, perms []IPPerm) (resp *SimpleResp, err error) { - return ec2.authOrRevoke("AuthorizeSecurityGroupIngress", group, perms) -} - -// AuthorizeSecurityGroupEgress creates an allowance for clients matching the provided -// rules for egress access. -// -// See http://goo.gl/UHnH4L for more details. -func (ec2 *EC2) AuthorizeSecurityGroupEgress(group SecurityGroup, perms []IPPerm) (resp *SimpleResp, err error) { - return ec2.authOrRevoke("AuthorizeSecurityGroupEgress", group, perms) -} - -// RevokeSecurityGroup revokes permissions from a group. -// -// See http://goo.gl/ZgdxA for more details. -func (ec2 *EC2) RevokeSecurityGroup(group SecurityGroup, perms []IPPerm) (resp *SimpleResp, err error) { - return ec2.authOrRevoke("RevokeSecurityGroupIngress", group, perms) -} - -// RevokeSecurityGroupEgress revokes egress permissions from a group -// -// see http://goo.gl/Zv4wh8 -func (ec2 *EC2) RevokeSecurityGroupEgress(group SecurityGroup, perms []IPPerm) (resp *SimpleResp, err error) { - return ec2.authOrRevoke("RevokeSecurityGroupEgress", group, perms) -} - -func (ec2 *EC2) authOrRevoke(op string, group SecurityGroup, perms []IPPerm) (resp *SimpleResp, err error) { - params := makeParams(op) - if group.Id != "" { - params["GroupId"] = group.Id - } else { - params["GroupName"] = group.Name - } - - for i, perm := range perms { - prefix := "IpPermissions." + strconv.Itoa(i+1) - params[prefix+".IpProtocol"] = perm.Protocol - params[prefix+".FromPort"] = strconv.Itoa(perm.FromPort) - params[prefix+".ToPort"] = strconv.Itoa(perm.ToPort) - for j, ip := range perm.SourceIPs { - params[prefix+".IpRanges."+strconv.Itoa(j+1)+".CidrIp"] = ip - } - for j, g := range perm.SourceGroups { - subprefix := prefix + ".Groups." + strconv.Itoa(j+1) - if g.OwnerId != "" { - params[subprefix+".UserId"] = g.OwnerId - } - if g.Id != "" { - params[subprefix+".GroupId"] = g.Id - } else { - params[subprefix+".GroupName"] = g.Name - } - } - } - - resp = &SimpleResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -// ResourceTag represents key-value metadata used to classify and organize -// EC2 instances. -// -// See http://goo.gl/bncl3 for more details -type Tag struct { - Key string `xml:"key"` - Value string `xml:"value"` -} - -// CreateTags adds or overwrites one or more tags for the specified taggable resources. -// For a list of tagable resources, see: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html -// -// See http://goo.gl/Vmkqc for more details -func (ec2 *EC2) CreateTags(resourceIds []string, tags []Tag) (resp *SimpleResp, err error) { - params := makeParams("CreateTags") - addParamsList(params, "ResourceId", resourceIds) - - for j, tag := range tags { - params["Tag."+strconv.Itoa(j+1)+".Key"] = tag.Key - params["Tag."+strconv.Itoa(j+1)+".Value"] = tag.Value - } - - resp = &SimpleResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -// DeleteTags deletes tags. -func (ec2 *EC2) DeleteTags(resourceIds []string, tags []Tag) (resp *SimpleResp, err error) { - params := makeParams("DeleteTags") - addParamsList(params, "ResourceId", resourceIds) - - for j, tag := range tags { - params["Tag."+strconv.Itoa(j+1)+".Key"] = tag.Key - - if tag.Value != "" { - params["Tag."+strconv.Itoa(j+1)+".Value"] = tag.Value - } - } - - resp = &SimpleResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return resp, nil -} - -type TagsResp struct { - RequestId string `xml:"requestId"` - Tags []ResourceTag `xml:"tagSet>item"` -} - -type ResourceTag struct { - Tag - ResourceId string `xml:"resourceId"` - ResourceType string `xml:"resourceType"` -} - -func (ec2 *EC2) Tags(filter *Filter) (*TagsResp, error) { - params := makeParams("DescribeTags") - filter.addParams(params) - - resp := &TagsResp{} - if err := ec2.query(params, resp); err != nil { - return nil, err - } - - return resp, nil -} - -// Response to a StartInstances request. -// -// See http://goo.gl/awKeF for more details. -type StartInstanceResp struct { - RequestId string `xml:"requestId"` - StateChanges []InstanceStateChange `xml:"instancesSet>item"` -} - -// Response to a StopInstances request. -// -// See http://goo.gl/436dJ for more details. -type StopInstanceResp struct { - RequestId string `xml:"requestId"` - StateChanges []InstanceStateChange `xml:"instancesSet>item"` -} - -// StartInstances starts an Amazon EBS-backed AMI that you've previously stopped. -// -// See http://goo.gl/awKeF for more details. -func (ec2 *EC2) StartInstances(ids ...string) (resp *StartInstanceResp, err error) { - params := makeParams("StartInstances") - addParamsList(params, "InstanceId", ids) - resp = &StartInstanceResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -// StopInstances requests stopping one or more Amazon EBS-backed instances. -// -// See http://goo.gl/436dJ for more details. -func (ec2 *EC2) StopInstances(ids ...string) (resp *StopInstanceResp, err error) { - params := makeParams("StopInstances") - addParamsList(params, "InstanceId", ids) - resp = &StopInstanceResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -// RebootInstance requests a reboot of one or more instances. This operation is asynchronous; -// it only queues a request to reboot the specified instance(s). The operation will succeed -// if the instances are valid and belong to you. -// -// Requests to reboot terminated instances are ignored. -// -// See http://goo.gl/baoUf for more details. -func (ec2 *EC2) RebootInstances(ids ...string) (resp *SimpleResp, err error) { - params := makeParams("RebootInstances") - addParamsList(params, "InstanceId", ids) - resp = &SimpleResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -// The ModifyInstanceAttribute request parameters. -type ModifyInstance struct { - InstanceType string - BlockDevices []BlockDeviceMapping - DisableAPITermination bool - EbsOptimized bool - SecurityGroups []SecurityGroup - ShutdownBehavior string - KernelId string - RamdiskId string - SourceDestCheck bool - SriovNetSupport bool - UserData []byte - - SetSourceDestCheck bool -} - -// Response to a ModifyInstanceAttribute request. -// -// http://goo.gl/icuXh5 for more details. -type ModifyInstanceResp struct { - RequestId string `xml:"requestId"` - Return bool `xml:"return"` -} - -// ModifyImageAttribute modifies the specified attribute of the specified instance. -// You can specify only one attribute at a time. To modify some attributes, the -// instance must be stopped. -// -// See http://goo.gl/icuXh5 for more details. -func (ec2 *EC2) ModifyInstance(instId string, options *ModifyInstance) (resp *ModifyInstanceResp, err error) { - params := makeParams("ModifyInstanceAttribute") - params["InstanceId"] = instId - addBlockDeviceParams("", params, options.BlockDevices) - - if options.InstanceType != "" { - params["InstanceType.Value"] = options.InstanceType - } - - if options.DisableAPITermination { - params["DisableApiTermination.Value"] = "true" - } - - if options.EbsOptimized { - params["EbsOptimized"] = "true" - } - - if options.ShutdownBehavior != "" { - params["InstanceInitiatedShutdownBehavior.Value"] = options.ShutdownBehavior - } - - if options.KernelId != "" { - params["Kernel.Value"] = options.KernelId - } - - if options.RamdiskId != "" { - params["Ramdisk.Value"] = options.RamdiskId - } - - if options.SourceDestCheck || options.SetSourceDestCheck { - if options.SourceDestCheck { - params["SourceDestCheck.Value"] = "true" - } else { - params["SourceDestCheck.Value"] = "false" - } - } - - if options.SriovNetSupport { - params["SriovNetSupport.Value"] = "simple" - } - - if options.UserData != nil { - userData := make([]byte, b64.EncodedLen(len(options.UserData))) - b64.Encode(userData, options.UserData) - params["UserData"] = string(userData) - } - - i := 1 - for _, g := range options.SecurityGroups { - if g.Id != "" { - params["GroupId."+strconv.Itoa(i)] = g.Id - i++ - } - } - - resp = &ModifyInstanceResp{} - err = ec2.query(params, resp) - if err != nil { - resp = nil - } - return -} - -// ---------------------------------------------------------------------------- -// VPC management functions and types. - -// The CreateVpc request parameters -// -// See http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-CreateVpc.html -type CreateVpc struct { - CidrBlock string - InstanceTenancy string -} - -// Response to a CreateVpc request -type CreateVpcResp struct { - RequestId string `xml:"requestId"` - VPC VPC `xml:"vpc"` -} - -// The ModifyVpcAttribute request parameters. -// -// See http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/index.html?ApiReference-query-DescribeVpcAttribute.html for more details. -type ModifyVpcAttribute struct { - EnableDnsSupport bool - EnableDnsHostnames bool - - SetEnableDnsSupport bool - SetEnableDnsHostnames bool -} - -// Response to a DescribeVpcAttribute request. -// -// See http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/index.html?ApiReference-query-DescribeVpcAttribute.html for more details. -type VpcAttributeResp struct { - RequestId string `xml:"requestId"` - VpcId string `xml:"vpcId"` - EnableDnsSupport bool `xml:"enableDnsSupport>value"` - EnableDnsHostnames bool `xml:"enableDnsHostnames>value"` -} - -// CreateInternetGateway request parameters. -// -// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-CreateInternetGateway.html -type CreateInternetGateway struct{} - -// CreateInternetGateway response -type CreateInternetGatewayResp struct { - RequestId string `xml:"requestId"` - InternetGateway InternetGateway `xml:"internetGateway"` -} - -// The CreateVpcPeeringConnection request parameters -// -// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVpcPeeringConnection.html -type CreateVpcPeeringConnection struct { - PeerOwnerId string - PeerVpcId string - VpcId string -} - -// Response to a CreateVpcPeeringConnection -type CreateVpcPeeringConnectionResp struct { - RequestId string `xml:"requestId"` - VpcPeeringConnection VpcPeeringConnection `xml:"vpcPeeringConnection"` -} - -// The AcceptVpcPeeringConnection request parameters -// -// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_AcceptVpcPeeringConnection.html -type AcceptVpcPeeringConnection struct { - VpcPeeringConnectionId string `xml:"vpcPeeringConnectionId"` -} - -// Response to a AcceptVpcPeeringConnection request. -type AcceptVpcPeeringConnectionResp struct { - RequestId string `xml:"requestId"` - VpcPeeringConnection VpcPeeringConnection `xml:"vpcPeeringConnection"` -} - -// The DeleteVpcPeeringConnection request -// -// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DeleteVpcPeeringConnection.html -type DeleteVpcPeeringConnection struct { - VpcPeeringConnectionId string `xml:"vpcPeeringConnectionId"` -} - -// Response to a DeleteVpcPeeringConnection request. -type DeleteVpcPeeringConnectionResp struct { - RequestId string `xml:"requestId"` -} - -// Response to a DescribeVpcPeeringConnection request. -type DescribeVpcPeeringConnectionResp struct { - RequestId string `xml:"requestId"` - VpcPeeringConnections []VpcPeeringConnection `xml:"vpcPeeringConnectionSet>item"` -} - -// The RejectVpcPeeringConnection request -// -// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RejectVpcPeeringConnection.html -type RejectVpcPeeringConnection struct { - VpcPeeringConnectionId string `xml:"vpcPeeringConnectionId"` -} - -// Response to a RejectVpcPeeringConnection request. -type RejectVpcPeeringConnectionResp struct { - RequestId string `xml:"requestId"` -} - -// The CreateRouteTable request parameters. -// -// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-CreateRouteTable.html -type CreateRouteTable struct { - VpcId string -} - -// Response to a CreateRouteTable request. -type CreateRouteTableResp struct { - RequestId string `xml:"requestId"` - RouteTable RouteTable `xml:"routeTable"` -} - -// CreateRoute request parameters -// -// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-CreateRoute.html -type CreateRoute struct { - RouteTableId string - DestinationCidrBlock string - GatewayId string - InstanceId string - NetworkInterfaceId string - VpcPeeringConnectionId string -} -type ReplaceRoute struct { - RouteTableId string - DestinationCidrBlock string - GatewayId string - InstanceId string - NetworkInterfaceId string - VpcPeeringConnectionId string -} - -type AssociateRouteTableResp struct { - RequestId string `xml:"requestId"` - AssociationId string `xml:"associationId"` -} -type ReassociateRouteTableResp struct { - RequestId string `xml:"requestId"` - AssociationId string `xml:"newAssociationId"` -} - -// The CreateDhcpOptions request parameters -// -// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateDhcpOptions.html -type CreateDhcpOptions struct { - DomainNameServers string - DomainName string - NtpServers string - NetbiosNameServers string - NetbiosNodeType string -} - -// Response to a CreateDhcpOptions request -type CreateDhcpOptionsResp struct { - RequestId string `xml:"requestId"` - DhcpOptions DhcpOptions `xml:"dhcpOptions"` -} - -// The CreateSubnet request parameters -// -// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-CreateSubnet.html -type CreateSubnet struct { - VpcId string - CidrBlock string - AvailabilityZone string -} - -// Response to a CreateSubnet request -type CreateSubnetResp struct { - RequestId string `xml:"requestId"` - Subnet Subnet `xml:"subnet"` -} - -// The ModifySubnetAttribute request parameters -// -// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-ModifySubnetAttribute.html -type ModifySubnetAttribute struct { - SubnetId string - MapPublicIpOnLaunch bool -} - -type ModifySubnetAttributeResp struct { - RequestId string `xml:"requestId"` - Return bool `xml:"return"` -} - -// The CreateNetworkAcl request parameters -// -// http://goo.gl/BZmCRF -type CreateNetworkAcl struct { - VpcId string -} - -// Response to a CreateNetworkAcl request -type CreateNetworkAclResp struct { - RequestId string `xml:"requestId"` - NetworkAcl NetworkAcl `xml:"networkAcl"` -} - -// Response to CreateNetworkAclEntry request -type CreateNetworkAclEntryResp struct { - RequestId string `xml:"requestId"` - Return bool `xml:"return"` -} - -// Response to a DescribeInternetGateways request. -type InternetGatewaysResp struct { - RequestId string `xml:"requestId"` - InternetGateways []InternetGateway `xml:"internetGatewaySet>item"` -} - -// Response to a DescribeRouteTables request. -type RouteTablesResp struct { - RequestId string `xml:"requestId"` - RouteTables []RouteTable `xml:"routeTableSet>item"` -} - -// Response to a DescribeVpcs request. -type VpcsResp struct { - RequestId string `xml:"requestId"` - VPCs []VPC `xml:"vpcSet>item"` -} - -// Internet Gateway -type InternetGateway struct { - InternetGatewayId string `xml:"internetGatewayId"` - Attachments []InternetGatewayAttachment `xml:"attachmentSet>item"` - Tags []Tag `xml:"tagSet>item"` -} - -type InternetGatewayAttachment struct { - VpcId string `xml:"vpcId"` - State string `xml:"state"` -} - -// vpc peering -type VpcPeeringConnection struct { - AccepterVpcInfo VpcPeeringConnectionVpcInfo `xml:"accepterVpcInfo"` - ExpirationTime string `xml:"expirationTime"` - RequesterVpcInfo VpcPeeringConnectionVpcInfo `xml:"requesterVpcInfo"` - Status VpcPeeringConnectionStateReason `xml:"status"` - Tags []Tag `xml:"tagSet>item"` - VpcPeeringConnectionId string `xml:"vpcPeeringConnectionId"` -} - -type VpcPeeringConnectionVpcInfo struct { - CidrBlock string `xml:"cidrBlock"` - OwnerId string `xml:"ownerId"` - VpcId string `xml:"vpcId"` -} - -type VpcPeeringConnectionStateReason struct { - Code string `xml:"code"` - Message string `xml:"message"` -} - -// Routing Table -type RouteTable struct { - RouteTableId string `xml:"routeTableId"` - VpcId string `xml:"vpcId"` - Associations []RouteTableAssociation `xml:"associationSet>item"` - Routes []Route `xml:"routeSet>item"` - Tags []Tag `xml:"tagSet>item"` -} - -type RouteTableAssociation struct { - AssociationId string `xml:"routeTableAssociationId"` - RouteTableId string `xml:"routeTableId"` - SubnetId string `xml:"subnetId"` - Main bool `xml:"main"` -} - -type Route struct { - DestinationCidrBlock string `xml:"destinationCidrBlock"` - GatewayId string `xml:"gatewayId"` - InstanceId string `xml:"instanceId"` - InstanceOwnerId string `xml:"instanceOwnerId"` - NetworkInterfaceId string `xml:"networkInterfaceId"` - State string `xml:"state"` - Origin string `xml:"origin"` - VpcPeeringConnectionId string `xml:"vpcPeeringConnectionId"` -} - -// Subnet -type Subnet struct { - SubnetId string `xml:"subnetId"` - State string `xml:"state"` - VpcId string `xml:"vpcId"` - CidrBlock string `xml:"cidrBlock"` - AvailableIpAddressCount int `xml:"availableIpAddressCount"` - AvailabilityZone string `xml:"availabilityZone"` - DefaultForAZ bool `xml:"defaultForAz"` - MapPublicIpOnLaunch bool `xml:"mapPublicIpOnLaunch"` - Tags []Tag `xml:"tagSet>item"` -} - -// DhcpOptions -type DhcpOptions struct { - DhcpOptionsId string `xml:"dhcpOptionsId"` - DhcpConfigurationSets DhcpConfigurationSet `xml:"dhcpConfigurationSet"` -} - -type DhcpConfigurationSet struct { - Tags []Tag `xml:"dhcpConfigurationSet>item"` -} - -// NetworkAcl represent network acl -type NetworkAcl struct { - NetworkAclId string `xml:"networkAclId"` - VpcId string `xml:"vpcId"` - Default string `xml:"default"` - EntrySet []NetworkAclEntry `xml:"entrySet>item"` - AssociationSet []NetworkAclAssociation `xml:"associationSet>item"` - Tags []Tag `xml:"tagSet>item"` -} - -// NetworkAclAssociation -type NetworkAclAssociation struct { - NetworkAclAssociationId string `xml:"networkAclAssociationId"` - NetworkAclId string `xml:"networkAclId"` - SubnetId string `xml:"subnetId"` -} - -// NetworkAclEntry represent a rule within NetworkAcl -type NetworkAclEntry struct { - RuleNumber int `xml:"ruleNumber"` - Protocol int `xml:"protocol"` - RuleAction string `xml:"ruleAction"` - Egress bool `xml:"egress"` - CidrBlock string `xml:"cidrBlock"` - IcmpCode IcmpCode `xml:"icmpTypeCode"` - PortRange PortRange `xml:"portRange"` -} - -// IcmpCode -type IcmpCode struct { - Code int `xml:"code"` - Type int `xml:"type"` -} - -// PortRange -type PortRange struct { - From int `xml:"from"` - To int `xml:"to"` -} - -// Response to describe NetworkAcls -type NetworkAclsResp struct { - RequestId string `xml:"requestId"` - NetworkAcls []NetworkAcl `xml:"networkAclSet>item"` -} - -// VPC represents a single VPC. -type VPC struct { - VpcId string `xml:"vpcId"` - State string `xml:"state"` - CidrBlock string `xml:"cidrBlock"` - DHCPOptionsID string `xml:"dhcpOptionsId"` - InstanceTenancy string `xml:"instanceTenancy"` - IsDefault bool `xml:"isDefault"` - Tags []Tag `xml:"tagSet>item"` -} - -// Response to a DescribeSubnets request. -type SubnetsResp struct { - RequestId string `xml:"requestId"` - Subnets []Subnet `xml:"subnetSet>item"` -} - -// Create a new VPC. -func (ec2 *EC2) CreateVpc(options *CreateVpc) (resp *CreateVpcResp, err error) { - params := makeParams("CreateVpc") - params["CidrBlock"] = options.CidrBlock - - if options.InstanceTenancy != "" { - params["InstanceTenancy"] = options.InstanceTenancy - } - - resp = &CreateVpcResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return -} - -// Delete a VPC. -func (ec2 *EC2) DeleteVpc(id string) (resp *SimpleResp, err error) { - params := makeParams("DeleteVpc") - params["VpcId"] = id - - resp = &SimpleResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// DescribeVpcs -// -// See http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeVpcs.html -func (ec2 *EC2) DescribeVpcs(ids []string, filter *Filter) (resp *VpcsResp, err error) { - params := makeParams("DescribeVpcs") - addParamsList(params, "VpcId", ids) - filter.addParams(params) - resp = &VpcsResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return -} - -// VpcAttribute describes an attribute of a VPC. -// You can specify only one attribute at a time. -// Valid attributes are: -// enableDnsSupport | enableDnsHostnames -// -// See http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/index.html?ApiReference-query-DescribeVpcAttribute.html for more details. -func (ec2 *EC2) VpcAttribute(vpcId, attribute string) (resp *VpcAttributeResp, err error) { - params := makeParams("DescribeVpcAttribute") - params["VpcId"] = vpcId - params["Attribute"] = attribute - - resp = &VpcAttributeResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// ModifyVpcAttribute modifies the specified attribute of the specified VPC. -// -// See http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/index.html?ApiReference-query-ModifyVpcAttribute.html for more details. -func (ec2 *EC2) ModifyVpcAttribute(vpcId string, options *ModifyVpcAttribute) (*SimpleResp, error) { - params := makeParams("ModifyVpcAttribute") - - params["VpcId"] = vpcId - - if options.SetEnableDnsSupport { - params["EnableDnsSupport.Value"] = strconv.FormatBool(options.EnableDnsSupport) - } - - if options.SetEnableDnsHostnames { - params["EnableDnsHostnames.Value"] = strconv.FormatBool(options.EnableDnsHostnames) - } - - resp := &SimpleResp{} - if err := ec2.query(params, resp); err != nil { - return nil, err - } - - return resp, nil -} - -// Create a new subnet. -func (ec2 *EC2) CreateSubnet(options *CreateSubnet) (resp *CreateSubnetResp, err error) { - params := makeParams("CreateSubnet") - params["AvailabilityZone"] = options.AvailabilityZone - params["CidrBlock"] = options.CidrBlock - params["VpcId"] = options.VpcId - - resp = &CreateSubnetResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return -} - -// Delete a Subnet. -func (ec2 *EC2) DeleteSubnet(id string) (resp *SimpleResp, err error) { - params := makeParams("DeleteSubnet") - params["SubnetId"] = id - - resp = &SimpleResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// ModifySubnetAttribute -// -// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-ModifySubnetAttribute.html -func (ec2 *EC2) ModifySubnetAttribute(options *ModifySubnetAttribute) (resp *ModifySubnetAttributeResp, err error) { - params := makeParams("ModifySubnetAttribute") - params["SubnetId"] = options.SubnetId - if options.MapPublicIpOnLaunch { - params["MapPublicIpOnLaunch.Value"] = "true" - } else { - params["MapPublicIpOnLaunch.Value"] = "false" - } - - resp = &ModifySubnetAttributeResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// DescribeSubnets -// -// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeSubnets.html -func (ec2 *EC2) DescribeSubnets(ids []string, filter *Filter) (resp *SubnetsResp, err error) { - params := makeParams("DescribeSubnets") - addParamsList(params, "SubnetId", ids) - filter.addParams(params) - - resp = &SubnetsResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return -} - -// Create DhcpOptions. -func (ec2 *EC2) CreateDhcpOptions(options *CreateDhcpOptions) (resp *CreateDhcpOptionsResp, err error) { - params := makeParams("CreateDhcpOptions") - params["DomainNameServers"] = options.DomainNameServers - params["DomainName"] = options.DomainName - params["NtpServers"] = options.NtpServers - params["NetbiosNameServers"] = options.NetbiosNameServers - params["NetbiosNodeType"] = options.NetbiosNodeType - - resp = &CreateDhcpOptionsResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return -} - -// Delete DhcpOptions. -func (ec2 *EC2) DeleteDhcpOptions(id string) (resp *SimpleResp, err error) { - params := makeParams("DeleteDhcpOptions") - params["DhcpOptionsId"] = id - - resp = &SimpleResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// Associate DhcpOptions to a VPC. -func (ec2 *EC2) AssociateDhcpOptions(dhcpOptionsId string, vpcId string) (resp *SimpleResp, err error) { - params := makeParams("AssociateDhcpOptions") - params["DhcpOptionsId"] = dhcpOptionsId - params["VpcId"] = vpcId - - resp = &SimpleResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// CreateNetworkAcl creates a network ACL in a VPC. -// -// http://goo.gl/51X7db -func (ec2 *EC2) CreateNetworkAcl(options *CreateNetworkAcl) (resp *CreateNetworkAclResp, err error) { - params := makeParams("CreateNetworkAcl") - params["VpcId"] = options.VpcId - - resp = &CreateNetworkAclResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return -} - -// CreateNetworkAclEntry creates an entry (a rule) in a network ACL with the specified rule number. -// -// http://goo.gl/BtXhtj -func (ec2 *EC2) CreateNetworkAclEntry(networkAclId string, options *NetworkAclEntry) (resp *CreateNetworkAclEntryResp, err error) { - - params := makeParams("CreateNetworkAclEntry") - params["NetworkAclId"] = networkAclId - params["RuleNumber"] = strconv.Itoa(options.RuleNumber) - params["Protocol"] = strconv.Itoa(options.Protocol) - params["RuleAction"] = options.RuleAction - params["Egress"] = strconv.FormatBool(options.Egress) - params["CidrBlock"] = options.CidrBlock - if params["Protocol"] == "-1" { - params["Icmp.Type"] = strconv.Itoa(options.IcmpCode.Type) - params["Icmp.Code"] = strconv.Itoa(options.IcmpCode.Code) - } - params["PortRange.From"] = strconv.Itoa(options.PortRange.From) - params["PortRange.To"] = strconv.Itoa(options.PortRange.To) - - resp = &CreateNetworkAclEntryResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return resp, nil -} - -// NetworkAcls describes one or more of your network ACLs for given filter. -// -// http://goo.gl/mk9RsV -func (ec2 *EC2) NetworkAcls(networkAclIds []string, filter *Filter) (resp *NetworkAclsResp, err error) { - params := makeParams("DescribeNetworkAcls") - addParamsList(params, "NetworkAclId", networkAclIds) - filter.addParams(params) - resp = &NetworkAclsResp{} - if err = ec2.query(params, resp); err != nil { - return nil, err - } - - return resp, nil -} - -// Response to a DeleteNetworkAcl request. -type DeleteNetworkAclResp struct { - RequestId string `xml:"requestId"` - Return bool `xml:"return"` -} - -// DeleteNetworkAcl deletes the network ACL with specified id. -// -// http://goo.gl/nC78Wx -func (ec2 *EC2) DeleteNetworkAcl(id string) (resp *DeleteNetworkAclResp, err error) { - params := makeParams("DeleteNetworkAcl") - params["NetworkAclId"] = id - - resp = &DeleteNetworkAclResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -// Response to a DeleteNetworkAclEntry request. -type DeleteNetworkAclEntryResp struct { - RequestId string `xml:"requestId"` - Return bool `xml:"return"` -} - -// DeleteNetworkAclEntry deletes the specified ingress or egress entry (rule) from the specified network ACL. -// -// http://goo.gl/moQbE2 -func (ec2 *EC2) DeleteNetworkAclEntry(id string, ruleNumber int, egress bool) (resp *DeleteNetworkAclEntryResp, err error) { - params := makeParams("DeleteNetworkAclEntry") - params["NetworkAclId"] = id - params["RuleNumber"] = strconv.Itoa(ruleNumber) - params["Egress"] = strconv.FormatBool(egress) - - resp = &DeleteNetworkAclEntryResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -type ReplaceNetworkAclAssociationResponse struct { - RequestId string `xml:"requestId"` - NewAssociationId string `xml:"newAssociationId"` -} - -// ReplaceNetworkAclAssociation changes which network ACL a subnet is associated with. -// -// http://goo.gl/ar0MH5 -func (ec2 *EC2) ReplaceNetworkAclAssociation(associationId string, networkAclId string) (resp *ReplaceNetworkAclAssociationResponse, err error) { - params := makeParams("ReplaceNetworkAclAssociation") - params["NetworkAclId"] = networkAclId - params["AssociationId"] = associationId - - resp = &ReplaceNetworkAclAssociationResponse{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -// Create a new internet gateway. -func (ec2 *EC2) CreateInternetGateway( - options *CreateInternetGateway) (resp *CreateInternetGatewayResp, err error) { - params := makeParams("CreateInternetGateway") - - resp = &CreateInternetGatewayResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return -} - -// Attach an InternetGateway. -func (ec2 *EC2) AttachInternetGateway(id, vpcId string) (resp *SimpleResp, err error) { - params := makeParams("AttachInternetGateway") - params["InternetGatewayId"] = id - params["VpcId"] = vpcId - - resp = &SimpleResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// Detach an InternetGateway. -func (ec2 *EC2) DetachInternetGateway(id, vpcId string) (resp *SimpleResp, err error) { - params := makeParams("DetachInternetGateway") - params["InternetGatewayId"] = id - params["VpcId"] = vpcId - - resp = &SimpleResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// Delete an InternetGateway. -func (ec2 *EC2) DeleteInternetGateway(id string) (resp *SimpleResp, err error) { - params := makeParams("DeleteInternetGateway") - params["InternetGatewayId"] = id - - resp = &SimpleResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// DescribeInternetGateways -// -// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInternetGateways.html -func (ec2 *EC2) DescribeInternetGateways(ids []string, filter *Filter) (resp *InternetGatewaysResp, err error) { - params := makeParams("DescribeInternetGateways") - addParamsList(params, "InternetGatewayId", ids) - filter.addParams(params) - - resp = &InternetGatewaysResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return -} - -// Create a new routing table. -func (ec2 *EC2) CreateRouteTable( - options *CreateRouteTable) (resp *CreateRouteTableResp, err error) { - params := makeParams("CreateRouteTable") - params["VpcId"] = options.VpcId - - resp = &CreateRouteTableResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return -} - -// Delete a RouteTable. -func (ec2 *EC2) DeleteRouteTable(id string) (resp *SimpleResp, err error) { - params := makeParams("DeleteRouteTable") - params["RouteTableId"] = id - - resp = &SimpleResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// DescribeRouteTables -// -// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeRouteTables.html -func (ec2 *EC2) DescribeRouteTables(ids []string, filter *Filter) (resp *RouteTablesResp, err error) { - params := makeParams("DescribeRouteTables") - addParamsList(params, "RouteTableId", ids) - filter.addParams(params) - - resp = &RouteTablesResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return -} - -// Associate a routing table. -func (ec2 *EC2) AssociateRouteTable(id, subnetId string) (*AssociateRouteTableResp, error) { - params := makeParams("AssociateRouteTable") - params["RouteTableId"] = id - params["SubnetId"] = subnetId - - resp := &AssociateRouteTableResp{} - err := ec2.query(params, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -// Disassociate a routing table. -func (ec2 *EC2) DisassociateRouteTable(id string) (*SimpleResp, error) { - params := makeParams("DisassociateRouteTable") - params["AssociationId"] = id - - resp := &SimpleResp{} - err := ec2.query(params, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -// Re-associate a routing table. -func (ec2 *EC2) ReassociateRouteTable(id, routeTableId string) (*ReassociateRouteTableResp, error) { - params := makeParams("ReplaceRouteTableAssociation") - params["AssociationId"] = id - params["RouteTableId"] = routeTableId - - resp := &ReassociateRouteTableResp{} - err := ec2.query(params, resp) - if err != nil { - return nil, err - } - return resp, nil -} - -// Create a new route. -func (ec2 *EC2) CreateRoute(options *CreateRoute) (resp *SimpleResp, err error) { - params := makeParams("CreateRoute") - params["RouteTableId"] = options.RouteTableId - params["DestinationCidrBlock"] = options.DestinationCidrBlock - - if v := options.GatewayId; v != "" { - params["GatewayId"] = v - } - if v := options.InstanceId; v != "" { - params["InstanceId"] = v - } - if v := options.NetworkInterfaceId; v != "" { - params["NetworkInterfaceId"] = v - } - if v := options.VpcPeeringConnectionId; v != "" { - params["VpcPeeringConnectionId"] = v - } - - resp = &SimpleResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// Delete a Route. -func (ec2 *EC2) DeleteRoute(routeTableId, cidr string) (resp *SimpleResp, err error) { - params := makeParams("DeleteRoute") - params["RouteTableId"] = routeTableId - params["DestinationCidrBlock"] = cidr - - resp = &SimpleResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// Replace a new route. -func (ec2 *EC2) ReplaceRoute(options *ReplaceRoute) (resp *SimpleResp, err error) { - params := makeParams("ReplaceRoute") - params["RouteTableId"] = options.RouteTableId - params["DestinationCidrBlock"] = options.DestinationCidrBlock - - if v := options.GatewayId; v != "" { - params["GatewayId"] = v - } - if v := options.InstanceId; v != "" { - params["InstanceId"] = v - } - if v := options.NetworkInterfaceId; v != "" { - params["NetworkInterfaceId"] = v - } - if v := options.VpcPeeringConnectionId; v != "" { - params["VpcPeeringConnectionId"] = v - } - - resp = &SimpleResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// Create a vpc peering connection -func (ec2 *EC2) CreateVpcPeeringConnection( - options *CreateVpcPeeringConnection) (resp *CreateVpcPeeringConnectionResp, err error) { - params := makeParams("CreateVpcPeeringConnection") - params["PeerOwnerId"] = options.PeerOwnerId - params["PeerVpcId"] = options.PeerVpcId - params["VpcId"] = options.VpcId - - resp = &CreateVpcPeeringConnectionResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// AcceptVpcPeeringConnection -func (ec2 *EC2) AcceptVpcPeeringConnection(id string) (resp *AcceptVpcPeeringConnectionResp, err error) { - params := makeParams("AcceptVpcPeeringConnection") - params["VpcPeeringConnectionId"] = id - - resp = &AcceptVpcPeeringConnectionResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -func (ec2 *EC2) DeleteVpcPeeringConnection(id string) (resp *DeleteVpcPeeringConnectionResp, err error) { - params := makeParams("DeleteVpcPeeringConnection") - params["VpcPeeringConnectionId"] = id - - resp = &DeleteVpcPeeringConnectionResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// DescribeVpcPeeringConnection -func (ec2 *EC2) DescribeVpcPeeringConnection(ids []string, filter *Filter) (resp *DescribeVpcPeeringConnectionResp, err error) { - params := makeParams("DescribeVpcPeeringConnections") - addParamsList(params, "VpcPeeringConnectionId", ids) - filter.addParams(params) - resp = &DescribeVpcPeeringConnectionResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return -} - -// RejectVpcPeeringConnection -func (ec2 *EC2) RejectVpcPeeringConnection(id string) (resp *RejectVpcPeeringConnectionResp, err error) { - params := makeParams("RejectVpcPeeringConnection") - params["VpcPeeringConnectionId"] = id - - resp = &RejectVpcPeeringConnectionResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -// The ResetImageAttribute request parameters. -type ResetImageAttribute struct { - Attribute string -} - -// ResetImageAttribute resets an attribute of an AMI to its default value. -// -// http://goo.gl/r6ZCPm for more details. -func (ec2 *EC2) ResetImageAttribute(imageId string, options *ResetImageAttribute) (resp *SimpleResp, err error) { - params := makeParams("ResetImageAttribute") - params["ImageId"] = imageId - - if options.Attribute != "" { - params["Attribute"] = options.Attribute - } - - resp = &SimpleResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -type CreateCustomerGateway struct { - Type string - IpAddress string - BgpAsn int -} - -// Response to a CreateCustomerGateway request -type CreateCustomerGatewayResp struct { - RequestId string `xml:"requestId"` - CustomerGateway CustomerGateway `xml:"customerGateway"` -} - -type CustomerGateway struct { - CustomerGatewayId string `xml:"customerGatewayId"` - State string `xml:"state"` - Type string `xml:"type"` - IpAddress string `xml:"ipAddress"` - BgpAsn int `xml:"bgpAsn"` - Tags []Tag `xml:"tagSet>item"` -} - -type DescribeCustomerGatewaysResp struct { - RequestId string `xml:"requestId"` - CustomerGateways []CustomerGateway `xml:"customerGatewaySet>item"` -} - -//Create a customer gateway -func (ec2 *EC2) CreateCustomerGateway(options *CreateCustomerGateway) (resp *CreateCustomerGatewayResp, err error) { - params := makeParams("CreateCustomerGateway") - params["Type"] = options.Type - params["IpAddress"] = options.IpAddress - if options.BgpAsn != 0 { - params["BgpAsn"] = strconv.Itoa(options.BgpAsn) - } - - resp = &CreateCustomerGatewayResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - return -} - -func (ec2 *EC2) DescribeCustomerGateways(ids []string, filter *Filter) (resp *DescribeCustomerGatewaysResp, err error) { - params := makeParams("DescribeCustomerGateways") - addParamsList(params, "CustomerGatewayId", ids) - filter.addParams(params) - - resp = &DescribeCustomerGatewaysResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return -} - -type DeleteCustomerGatewayResp struct { - RequestId string `xml:"requestId"` - Return bool `xml:"return"` -} - -func (ec2 *EC2) DeleteCustomerGateway(customerGatewayId string) (resp *DeleteCustomerGatewayResp, err error) { - params := makeParams("DeleteCustomerGateway") - params["CustomerGatewayId"] = customerGatewayId - - resp = &DeleteCustomerGatewayResp{} - err = ec2.query(params, resp) - if err != nil { - return nil, err - } - - return -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/ec2i_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/ec2i_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/ec2i_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/ec2i_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,204 +0,0 @@ -package ec2_test - -import ( - "crypto/rand" - "fmt" - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/ec2" - "github.com/mitchellh/goamz/testutil" - . "github.com/motain/gocheck" -) - -// AmazonServer represents an Amazon EC2 server. -type AmazonServer struct { - auth aws.Auth -} - -func (s *AmazonServer) SetUp(c *C) { - auth, err := aws.EnvAuth() - if err != nil { - c.Fatal(err.Error()) - } - s.auth = auth -} - -// Suite cost per run: 0.02 USD -var _ = Suite(&AmazonClientSuite{}) - -// AmazonClientSuite tests the client against a live EC2 server. -type AmazonClientSuite struct { - srv AmazonServer - ClientTests -} - -func (s *AmazonClientSuite) SetUpSuite(c *C) { - if !testutil.Amazon { - c.Skip("AmazonClientSuite tests not enabled") - } - s.srv.SetUp(c) - s.ec2 = ec2.NewWithClient(s.srv.auth, aws.USEast, testutil.DefaultClient) -} - -// ClientTests defines integration tests designed to test the client. -// It is not used as a test suite in itself, but embedded within -// another type. -type ClientTests struct { - ec2 *ec2.EC2 -} - -var imageId = "ami-ccf405a5" // Ubuntu Maverick, i386, EBS store - -// Cost: 0.00 USD -func (s *ClientTests) TestRunInstancesError(c *C) { - options := ec2.RunInstances{ - ImageId: "ami-a6f504cf", // Ubuntu Maverick, i386, instance store - InstanceType: "t1.micro", // Doesn't work with micro, results in 400. - } - - resp, err := s.ec2.RunInstances(&options) - - c.Assert(resp, IsNil) - c.Assert(err, ErrorMatches, "AMI.*root device.*not supported.*") - - ec2err, ok := err.(*ec2.Error) - c.Assert(ok, Equals, true) - c.Assert(ec2err.StatusCode, Equals, 400) - c.Assert(ec2err.Code, Equals, "UnsupportedOperation") - c.Assert(ec2err.Message, Matches, "AMI.*root device.*not supported.*") - c.Assert(ec2err.RequestId, Matches, ".+") -} - -// Cost: 0.02 USD -func (s *ClientTests) TestRunAndTerminate(c *C) { - options := ec2.RunInstances{ - ImageId: imageId, - InstanceType: "t1.micro", - } - resp1, err := s.ec2.RunInstances(&options) - c.Assert(err, IsNil) - c.Check(resp1.ReservationId, Matches, "r-[0-9a-f]*") - c.Check(resp1.OwnerId, Matches, "[0-9]+") - c.Check(resp1.Instances, HasLen, 1) - c.Check(resp1.Instances[0].InstanceType, Equals, "t1.micro") - - instId := resp1.Instances[0].InstanceId - - resp2, err := s.ec2.Instances([]string{instId}, nil) - c.Assert(err, IsNil) - if c.Check(resp2.Reservations, HasLen, 1) && c.Check(len(resp2.Reservations[0].Instances), Equals, 1) { - inst := resp2.Reservations[0].Instances[0] - c.Check(inst.InstanceId, Equals, instId) - } - - resp3, err := s.ec2.TerminateInstances([]string{instId}) - c.Assert(err, IsNil) - c.Check(resp3.StateChanges, HasLen, 1) - c.Check(resp3.StateChanges[0].InstanceId, Equals, instId) - c.Check(resp3.StateChanges[0].CurrentState.Name, Equals, "shutting-down") - c.Check(resp3.StateChanges[0].CurrentState.Code, Equals, 32) -} - -// Cost: 0.00 USD -func (s *ClientTests) TestSecurityGroups(c *C) { - name := "goamz-test" - descr := "goamz security group for tests" - - // Clean it up, if a previous test left it around and avoid leaving it around. - s.ec2.DeleteSecurityGroup(ec2.SecurityGroup{Name: name}) - defer s.ec2.DeleteSecurityGroup(ec2.SecurityGroup{Name: name}) - - resp1, err := s.ec2.CreateSecurityGroup(ec2.SecurityGroup{Name: name, Description: descr}) - c.Assert(err, IsNil) - c.Assert(resp1.RequestId, Matches, ".+") - c.Assert(resp1.Name, Equals, name) - c.Assert(resp1.Id, Matches, ".+") - - resp1, err = s.ec2.CreateSecurityGroup(ec2.SecurityGroup{Name: name, Description: descr}) - ec2err, _ := err.(*ec2.Error) - c.Assert(resp1, IsNil) - c.Assert(ec2err, NotNil) - c.Assert(ec2err.Code, Equals, "InvalidGroup.Duplicate") - - perms := []ec2.IPPerm{{ - Protocol: "tcp", - FromPort: 0, - ToPort: 1024, - SourceIPs: []string{"127.0.0.1/24"}, - }} - - resp2, err := s.ec2.AuthorizeSecurityGroup(ec2.SecurityGroup{Name: name}, perms) - c.Assert(err, IsNil) - c.Assert(resp2.RequestId, Matches, ".+") - - resp3, err := s.ec2.SecurityGroups(ec2.SecurityGroupNames(name), nil) - c.Assert(err, IsNil) - c.Assert(resp3.RequestId, Matches, ".+") - c.Assert(resp3.Groups, HasLen, 1) - - g0 := resp3.Groups[0] - c.Assert(g0.Name, Equals, name) - c.Assert(g0.Description, Equals, descr) - c.Assert(g0.IPPerms, HasLen, 1) - c.Assert(g0.IPPerms[0].Protocol, Equals, "tcp") - c.Assert(g0.IPPerms[0].FromPort, Equals, 0) - c.Assert(g0.IPPerms[0].ToPort, Equals, 1024) - c.Assert(g0.IPPerms[0].SourceIPs, DeepEquals, []string{"127.0.0.1/24"}) - - resp2, err = s.ec2.DeleteSecurityGroup(ec2.SecurityGroup{Name: name}) - c.Assert(err, IsNil) - c.Assert(resp2.RequestId, Matches, ".+") -} - -var sessionId = func() string { - buf := make([]byte, 8) - // if we have no randomness, we'll just make do, so ignore the error. - rand.Read(buf) - return fmt.Sprintf("%x", buf) -}() - -// sessionName reutrns a name that is probably -// unique to this test session. -func sessionName(prefix string) string { - return prefix + "-" + sessionId -} - -var allRegions = []aws.Region{ - aws.USEast, - aws.USWest, - aws.EUWest, - aws.EUCentral, - aws.APSoutheast, - aws.APNortheast, -} - -// Communicate with all EC2 endpoints to see if they are alive. -func (s *ClientTests) TestRegions(c *C) { - name := sessionName("goamz-region-test") - perms := []ec2.IPPerm{{ - Protocol: "tcp", - FromPort: 80, - ToPort: 80, - SourceIPs: []string{"127.0.0.1/32"}, - }} - errs := make(chan error, len(allRegions)) - for _, region := range allRegions { - go func(r aws.Region) { - e := ec2.NewWithClient(s.ec2.Auth, r, testutil.DefaultClient) - _, err := e.AuthorizeSecurityGroup(ec2.SecurityGroup{Name: name}, perms) - errs <- err - }(region) - } - for _ = range allRegions { - err := <-errs - if err != nil { - ec2_err, ok := err.(*ec2.Error) - if ok { - c.Check(ec2_err.Code, Matches, "InvalidGroup.NotFound") - } else { - c.Errorf("Non-EC2 error: %s", err) - } - } else { - c.Errorf("Test should have errored but it seems to have succeeded") - } - } -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/ec2test/filter.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/ec2test/filter.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/ec2test/filter.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/ec2test/filter.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,84 +0,0 @@ -package ec2test - -import ( - "fmt" - "net/url" - "strings" -) - -// filter holds an ec2 filter. A filter maps an attribute to a set of -// possible values for that attribute. For an item to pass through the -// filter, every attribute of the item mentioned in the filter must match -// at least one of its given values. -type filter map[string][]string - -// newFilter creates a new filter from the Filter fields in the url form. -// -// The filtering is specified through a map of name=>values, where the -// name is a well-defined key identifying the data to be matched, -// and the list of values holds the possible values the filtered -// item can take for the key to be included in the -// result set. For example: -// -// Filter.1.Name=instance-type -// Filter.1.Value.1=m1.small -// Filter.1.Value.2=m1.large -// -func newFilter(form url.Values) filter { - // TODO return an error if the fields are not well formed? - names := make(map[int]string) - values := make(map[int][]string) - maxId := 0 - for name, fvalues := range form { - var rest string - var id int - if x, _ := fmt.Sscanf(name, "Filter.%d.%s", &id, &rest); x != 2 { - continue - } - if id > maxId { - maxId = id - } - if rest == "Name" { - names[id] = fvalues[0] - continue - } - if !strings.HasPrefix(rest, "Value.") { - continue - } - values[id] = append(values[id], fvalues[0]) - } - - f := make(filter) - for id, name := range names { - f[name] = values[id] - } - return f -} - -func notDigit(r rune) bool { - return r < '0' || r > '9' -} - -// filterable represents an object that can be passed through a filter. -type filterable interface { - // matchAttr returns true if given attribute of the - // object matches value. It returns an error if the - // attribute is not recognised or the value is malformed. - matchAttr(attr, value string) (bool, error) -} - -// ok returns true if x passes through the filter. -func (f filter) ok(x filterable) (bool, error) { -next: - for a, vs := range f { - for _, v := range vs { - if ok, err := x.matchAttr(a, v); ok { - continue next - } else if err != nil { - return false, fmt.Errorf("bad attribute or value %q=%q for type %T: %v", a, v, x, err) - } - } - return false, nil - } - return true, nil -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/ec2test/server.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/ec2test/server.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/ec2test/server.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/ec2test/server.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,993 +0,0 @@ -// The ec2test package implements a fake EC2 provider with -// the capability of inducing errors on any given operation, -// and retrospectively determining what operations have been -// carried out. -package ec2test - -import ( - "encoding/base64" - "encoding/xml" - "fmt" - "github.com/mitchellh/goamz/ec2" - "io" - "net" - "net/http" - "net/url" - "regexp" - "strconv" - "strings" - "sync" -) - -var b64 = base64.StdEncoding - -// Action represents a request that changes the ec2 state. -type Action struct { - RequestId string - - // Request holds the requested action as a url.Values instance - Request url.Values - - // If the action succeeded, Response holds the value that - // was marshalled to build the XML response for the request. - Response interface{} - - // If the action failed, Err holds an error giving details of the failure. - Err *ec2.Error -} - -// TODO possible other things: -// - some virtual time stamp interface, so a client -// can ask for all actions after a certain virtual time. - -// Server implements an EC2 simulator for use in testing. -type Server struct { - url string - listener net.Listener - mu sync.Mutex - reqs []*Action - - instances map[string]*Instance // id -> instance - reservations map[string]*reservation // id -> reservation - groups map[string]*securityGroup // id -> group - maxId counter - reqId counter - reservationId counter - groupId counter - initialInstanceState ec2.InstanceState -} - -// reservation holds a simulated ec2 reservation. -type reservation struct { - id string - instances map[string]*Instance - groups []*securityGroup -} - -// instance holds a simulated ec2 instance -type Instance struct { - // UserData holds the data that was passed to the RunInstances request - // when the instance was started. - UserData []byte - id string - imageId string - reservation *reservation - instType string - state ec2.InstanceState -} - -// permKey represents permission for a given security -// group or IP address (but not both) to access a given range of -// ports. Equality of permKeys is used in the implementation of -// permission sets, relying on the uniqueness of securityGroup -// instances. -type permKey struct { - protocol string - fromPort int - toPort int - group *securityGroup - ipAddr string -} - -// securityGroup holds a simulated ec2 security group. -// Instances of securityGroup should only be created through -// Server.createSecurityGroup to ensure that groups can be -// compared by pointer value. -type securityGroup struct { - id string - name string - description string - - perms map[permKey]bool -} - -func (g *securityGroup) ec2SecurityGroup() ec2.SecurityGroup { - return ec2.SecurityGroup{ - Name: g.name, - Id: g.id, - } -} - -func (g *securityGroup) matchAttr(attr, value string) (ok bool, err error) { - switch attr { - case "description": - return g.description == value, nil - case "group-id": - return g.id == value, nil - case "group-name": - return g.name == value, nil - case "ip-permission.cidr": - return g.hasPerm(func(k permKey) bool { return k.ipAddr == value }), nil - case "ip-permission.group-name": - return g.hasPerm(func(k permKey) bool { - return k.group != nil && k.group.name == value - }), nil - case "ip-permission.from-port": - port, err := strconv.Atoi(value) - if err != nil { - return false, err - } - return g.hasPerm(func(k permKey) bool { return k.fromPort == port }), nil - case "ip-permission.to-port": - port, err := strconv.Atoi(value) - if err != nil { - return false, err - } - return g.hasPerm(func(k permKey) bool { return k.toPort == port }), nil - case "ip-permission.protocol": - return g.hasPerm(func(k permKey) bool { return k.protocol == value }), nil - case "owner-id": - return value == ownerId, nil - } - return false, fmt.Errorf("unknown attribute %q", attr) -} - -func (g *securityGroup) hasPerm(test func(k permKey) bool) bool { - for k := range g.perms { - if test(k) { - return true - } - } - return false -} - -// ec2Perms returns the list of EC2 permissions granted -// to g. It groups permissions by port range and protocol. -func (g *securityGroup) ec2Perms() (perms []ec2.IPPerm) { - // The grouping is held in result. We use permKey for convenience, - // (ensuring that the group and ipAddr of each key is zero). For - // each protocol/port range combination, we build up the permission - // set in the associated value. - result := make(map[permKey]*ec2.IPPerm) - for k := range g.perms { - groupKey := k - groupKey.group = nil - groupKey.ipAddr = "" - - ec2p := result[groupKey] - if ec2p == nil { - ec2p = &ec2.IPPerm{ - Protocol: k.protocol, - FromPort: k.fromPort, - ToPort: k.toPort, - } - result[groupKey] = ec2p - } - if k.group != nil { - ec2p.SourceGroups = append(ec2p.SourceGroups, - ec2.UserSecurityGroup{ - Id: k.group.id, - Name: k.group.name, - OwnerId: ownerId, - }) - } else { - ec2p.SourceIPs = append(ec2p.SourceIPs, k.ipAddr) - } - } - for _, ec2p := range result { - perms = append(perms, *ec2p) - } - return -} - -var actions = map[string]func(*Server, http.ResponseWriter, *http.Request, string) interface{}{ - "RunInstances": (*Server).runInstances, - "TerminateInstances": (*Server).terminateInstances, - "DescribeInstances": (*Server).describeInstances, - "CreateSecurityGroup": (*Server).createSecurityGroup, - "DescribeSecurityGroups": (*Server).describeSecurityGroups, - "DeleteSecurityGroup": (*Server).deleteSecurityGroup, - "AuthorizeSecurityGroupIngress": (*Server).authorizeSecurityGroupIngress, - "RevokeSecurityGroupIngress": (*Server).revokeSecurityGroupIngress, -} - -const ownerId = "9876" - -// newAction allocates a new action and adds it to the -// recorded list of server actions. -func (srv *Server) newAction() *Action { - srv.mu.Lock() - defer srv.mu.Unlock() - - a := new(Action) - srv.reqs = append(srv.reqs, a) - return a -} - -// NewServer returns a new server. -func NewServer() (*Server, error) { - srv := &Server{ - instances: make(map[string]*Instance), - groups: make(map[string]*securityGroup), - reservations: make(map[string]*reservation), - initialInstanceState: Pending, - } - - // Add default security group. - g := &securityGroup{ - name: "default", - description: "default group", - id: fmt.Sprintf("sg-%d", srv.groupId.next()), - } - g.perms = map[permKey]bool{ - permKey{ - protocol: "icmp", - fromPort: -1, - toPort: -1, - group: g, - }: true, - permKey{ - protocol: "tcp", - fromPort: 0, - toPort: 65535, - group: g, - }: true, - permKey{ - protocol: "udp", - fromPort: 0, - toPort: 65535, - group: g, - }: true, - } - srv.groups[g.id] = g - - l, err := net.Listen("tcp", "localhost:0") - if err != nil { - return nil, fmt.Errorf("cannot listen on localhost: %v", err) - } - srv.listener = l - - srv.url = "http://" + l.Addr().String() - - // we use HandlerFunc rather than *Server directly so that we - // can avoid exporting HandlerFunc from *Server. - go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - srv.serveHTTP(w, req) - })) - return srv, nil -} - -// Quit closes down the server. -func (srv *Server) Quit() { - srv.listener.Close() -} - -// SetInitialInstanceState sets the state that any new instances will be started in. -func (srv *Server) SetInitialInstanceState(state ec2.InstanceState) { - srv.mu.Lock() - srv.initialInstanceState = state - srv.mu.Unlock() -} - -// URL returns the URL of the server. -func (srv *Server) URL() string { - return srv.url -} - -// serveHTTP serves the EC2 protocol. -func (srv *Server) serveHTTP(w http.ResponseWriter, req *http.Request) { - req.ParseForm() - - a := srv.newAction() - a.RequestId = fmt.Sprintf("req%d", srv.reqId.next()) - a.Request = req.Form - - // Methods on Server that deal with parsing user data - // may fail. To save on error handling code, we allow these - // methods to call fatalf, which will panic with an *ec2.Error - // which will be caught here and returned - // to the client as a properly formed EC2 error. - defer func() { - switch err := recover().(type) { - case *ec2.Error: - a.Err = err - err.RequestId = a.RequestId - writeError(w, err) - case nil: - default: - panic(err) - } - }() - - f := actions[req.Form.Get("Action")] - if f == nil { - fatalf(400, "InvalidParameterValue", "Unrecognized Action") - } - - response := f(srv, w, req, a.RequestId) - a.Response = response - - w.Header().Set("Content-Type", `xml version="1.0" encoding="UTF-8"`) - xmlMarshal(w, response) -} - -// Instance returns the instance for the given instance id. -// It returns nil if there is no such instance. -func (srv *Server) Instance(id string) *Instance { - srv.mu.Lock() - defer srv.mu.Unlock() - return srv.instances[id] -} - -// writeError writes an appropriate error response. -// TODO how should we deal with errors when the -// error itself is potentially generated by backend-agnostic -// code? -func writeError(w http.ResponseWriter, err *ec2.Error) { - // Error encapsulates an error returned by EC2. - // TODO merge with ec2.Error when xml supports ignoring a field. - type ec2error struct { - Code string // EC2 error code ("UnsupportedOperation", ...) - Message string // The human-oriented error message - RequestId string - } - - type Response struct { - RequestId string - Errors []ec2error `xml:"Errors>Error"` - } - - w.Header().Set("Content-Type", `xml version="1.0" encoding="UTF-8"`) - w.WriteHeader(err.StatusCode) - xmlMarshal(w, Response{ - RequestId: err.RequestId, - Errors: []ec2error{{ - Code: err.Code, - Message: err.Message, - }}, - }) -} - -// xmlMarshal is the same as xml.Marshal except that -// it panics on error. The marshalling should not fail, -// but we want to know if it does. -func xmlMarshal(w io.Writer, x interface{}) { - if err := xml.NewEncoder(w).Encode(x); err != nil { - panic(fmt.Errorf("error marshalling %#v: %v", x, err)) - } -} - -// formToGroups parses a set of SecurityGroup form values -// as found in a RunInstances request, and returns the resulting -// slice of security groups. -// It calls fatalf if a group is not found. -func (srv *Server) formToGroups(form url.Values) []*securityGroup { - var groups []*securityGroup - for name, values := range form { - switch { - case strings.HasPrefix(name, "SecurityGroupId."): - if g := srv.groups[values[0]]; g != nil { - groups = append(groups, g) - } else { - fatalf(400, "InvalidGroup.NotFound", "unknown group id %q", values[0]) - } - case strings.HasPrefix(name, "SecurityGroup."): - var found *securityGroup - for _, g := range srv.groups { - if g.name == values[0] { - found = g - } - } - if found == nil { - fatalf(400, "InvalidGroup.NotFound", "unknown group name %q", values[0]) - } - groups = append(groups, found) - } - } - return groups -} - -// runInstances implements the EC2 RunInstances entry point. -func (srv *Server) runInstances(w http.ResponseWriter, req *http.Request, reqId string) interface{} { - min := atoi(req.Form.Get("MinCount")) - max := atoi(req.Form.Get("MaxCount")) - if min < 0 || max < 1 { - fatalf(400, "InvalidParameterValue", "bad values for MinCount or MaxCount") - } - if min > max { - fatalf(400, "InvalidParameterCombination", "MinCount is greater than MaxCount") - } - var userData []byte - if data := req.Form.Get("UserData"); data != "" { - var err error - userData, err = b64.DecodeString(data) - if err != nil { - fatalf(400, "InvalidParameterValue", "bad UserData value: %v", err) - } - } - - // TODO attributes still to consider: - // ImageId: accept anything, we can verify later - // KeyName ? - // InstanceType ? - // KernelId ? - // RamdiskId ? - // AvailZone ? - // GroupName tag - // Monitoring ignore? - // SubnetId ? - // DisableAPITermination bool - // ShutdownBehavior string - // PrivateIPAddress string - - srv.mu.Lock() - defer srv.mu.Unlock() - - // make sure that form fields are correct before creating the reservation. - instType := req.Form.Get("InstanceType") - imageId := req.Form.Get("ImageId") - - r := srv.newReservation(srv.formToGroups(req.Form)) - - var resp ec2.RunInstancesResp - resp.RequestId = reqId - resp.ReservationId = r.id - resp.OwnerId = ownerId - - for i := 0; i < max; i++ { - inst := srv.newInstance(r, instType, imageId, srv.initialInstanceState) - inst.UserData = userData - resp.Instances = append(resp.Instances, inst.ec2instance()) - } - return &resp -} - -func (srv *Server) group(group ec2.SecurityGroup) *securityGroup { - if group.Id != "" { - return srv.groups[group.Id] - } - for _, g := range srv.groups { - if g.name == group.Name { - return g - } - } - return nil -} - -// NewInstances creates n new instances in srv with the given instance type, -// image ID, initial state and security groups. If any group does not already -// exist, it will be created. NewInstances returns the ids of the new instances. -func (srv *Server) NewInstances(n int, instType string, imageId string, state ec2.InstanceState, groups []ec2.SecurityGroup) []string { - srv.mu.Lock() - defer srv.mu.Unlock() - - rgroups := make([]*securityGroup, len(groups)) - for i, group := range groups { - g := srv.group(group) - if g == nil { - fatalf(400, "InvalidGroup.NotFound", "no such group %v", g) - } - rgroups[i] = g - } - r := srv.newReservation(rgroups) - - ids := make([]string, n) - for i := 0; i < n; i++ { - inst := srv.newInstance(r, instType, imageId, state) - ids[i] = inst.id - } - return ids -} - -func (srv *Server) newInstance(r *reservation, instType string, imageId string, state ec2.InstanceState) *Instance { - inst := &Instance{ - id: fmt.Sprintf("i-%d", srv.maxId.next()), - instType: instType, - imageId: imageId, - state: state, - reservation: r, - } - srv.instances[inst.id] = inst - r.instances[inst.id] = inst - return inst -} - -func (srv *Server) newReservation(groups []*securityGroup) *reservation { - r := &reservation{ - id: fmt.Sprintf("r-%d", srv.reservationId.next()), - instances: make(map[string]*Instance), - groups: groups, - } - - srv.reservations[r.id] = r - return r -} - -func (srv *Server) terminateInstances(w http.ResponseWriter, req *http.Request, reqId string) interface{} { - srv.mu.Lock() - defer srv.mu.Unlock() - var resp ec2.TerminateInstancesResp - resp.RequestId = reqId - var insts []*Instance - for attr, vals := range req.Form { - if strings.HasPrefix(attr, "InstanceId.") { - id := vals[0] - inst := srv.instances[id] - if inst == nil { - fatalf(400, "InvalidInstanceID.NotFound", "no such instance id %q", id) - } - insts = append(insts, inst) - } - } - for _, inst := range insts { - resp.StateChanges = append(resp.StateChanges, inst.terminate()) - } - return &resp -} - -func (inst *Instance) terminate() (d ec2.InstanceStateChange) { - d.PreviousState = inst.state - inst.state = ShuttingDown - d.CurrentState = inst.state - d.InstanceId = inst.id - return d -} - -func (inst *Instance) ec2instance() ec2.Instance { - return ec2.Instance{ - InstanceId: inst.id, - InstanceType: inst.instType, - ImageId: inst.imageId, - DNSName: fmt.Sprintf("%s.example.com", inst.id), - // TODO the rest - } -} - -func (inst *Instance) matchAttr(attr, value string) (ok bool, err error) { - switch attr { - case "architecture": - return value == "i386", nil - case "instance-id": - return inst.id == value, nil - case "group-id": - for _, g := range inst.reservation.groups { - if g.id == value { - return true, nil - } - } - return false, nil - case "group-name": - for _, g := range inst.reservation.groups { - if g.name == value { - return true, nil - } - } - return false, nil - case "image-id": - return value == inst.imageId, nil - case "instance-state-code": - code, err := strconv.Atoi(value) - if err != nil { - return false, err - } - return code&0xff == inst.state.Code, nil - case "instance-state-name": - return value == inst.state.Name, nil - } - return false, fmt.Errorf("unknown attribute %q", attr) -} - -var ( - Pending = ec2.InstanceState{0, "pending"} - Running = ec2.InstanceState{16, "running"} - ShuttingDown = ec2.InstanceState{32, "shutting-down"} - Terminated = ec2.InstanceState{16, "terminated"} - Stopped = ec2.InstanceState{16, "stopped"} -) - -func (srv *Server) createSecurityGroup(w http.ResponseWriter, req *http.Request, reqId string) interface{} { - name := req.Form.Get("GroupName") - if name == "" { - fatalf(400, "InvalidParameterValue", "empty security group name") - } - srv.mu.Lock() - defer srv.mu.Unlock() - if srv.group(ec2.SecurityGroup{Name: name}) != nil { - fatalf(400, "InvalidGroup.Duplicate", "group %q already exists", name) - } - g := &securityGroup{ - name: name, - description: req.Form.Get("GroupDescription"), - id: fmt.Sprintf("sg-%d", srv.groupId.next()), - perms: make(map[permKey]bool), - } - srv.groups[g.id] = g - // we define a local type for this because ec2.CreateSecurityGroupResp - // contains SecurityGroup, but the response to this request - // should not contain the security group name. - type CreateSecurityGroupResponse struct { - RequestId string `xml:"requestId"` - Return bool `xml:"return"` - GroupId string `xml:"groupId"` - } - r := &CreateSecurityGroupResponse{ - RequestId: reqId, - Return: true, - GroupId: g.id, - } - return r -} - -func (srv *Server) notImplemented(w http.ResponseWriter, req *http.Request, reqId string) interface{} { - fatalf(500, "InternalError", "not implemented") - panic("not reached") -} - -func (srv *Server) describeInstances(w http.ResponseWriter, req *http.Request, reqId string) interface{} { - srv.mu.Lock() - defer srv.mu.Unlock() - insts := make(map[*Instance]bool) - for name, vals := range req.Form { - if !strings.HasPrefix(name, "InstanceId.") { - continue - } - inst := srv.instances[vals[0]] - if inst == nil { - fatalf(400, "InvalidInstanceID.NotFound", "instance %q not found", vals[0]) - } - insts[inst] = true - } - - f := newFilter(req.Form) - - var resp ec2.InstancesResp - resp.RequestId = reqId - for _, r := range srv.reservations { - var instances []ec2.Instance - for _, inst := range r.instances { - if len(insts) > 0 && !insts[inst] { - continue - } - ok, err := f.ok(inst) - if ok { - instances = append(instances, inst.ec2instance()) - } else if err != nil { - fatalf(400, "InvalidParameterValue", "describe instances: %v", err) - } - } - if len(instances) > 0 { - var groups []ec2.SecurityGroup - for _, g := range r.groups { - groups = append(groups, g.ec2SecurityGroup()) - } - resp.Reservations = append(resp.Reservations, ec2.Reservation{ - ReservationId: r.id, - OwnerId: ownerId, - Instances: instances, - SecurityGroups: groups, - }) - } - } - return &resp -} - -func (srv *Server) describeSecurityGroups(w http.ResponseWriter, req *http.Request, reqId string) interface{} { - // BUG similar bug to describeInstances, but for GroupName and GroupId - srv.mu.Lock() - defer srv.mu.Unlock() - - var groups []*securityGroup - for name, vals := range req.Form { - var g ec2.SecurityGroup - switch { - case strings.HasPrefix(name, "GroupName."): - g.Name = vals[0] - case strings.HasPrefix(name, "GroupId."): - g.Id = vals[0] - default: - continue - } - sg := srv.group(g) - if sg == nil { - fatalf(400, "InvalidGroup.NotFound", "no such group %v", g) - } - groups = append(groups, sg) - } - if len(groups) == 0 { - for _, g := range srv.groups { - groups = append(groups, g) - } - } - - f := newFilter(req.Form) - var resp ec2.SecurityGroupsResp - resp.RequestId = reqId - for _, group := range groups { - ok, err := f.ok(group) - if ok { - resp.Groups = append(resp.Groups, ec2.SecurityGroupInfo{ - OwnerId: ownerId, - SecurityGroup: group.ec2SecurityGroup(), - Description: group.description, - IPPerms: group.ec2Perms(), - }) - } else if err != nil { - fatalf(400, "InvalidParameterValue", "describe security groups: %v", err) - } - } - return &resp -} - -func (srv *Server) authorizeSecurityGroupIngress(w http.ResponseWriter, req *http.Request, reqId string) interface{} { - srv.mu.Lock() - defer srv.mu.Unlock() - g := srv.group(ec2.SecurityGroup{ - Name: req.Form.Get("GroupName"), - Id: req.Form.Get("GroupId"), - }) - if g == nil { - fatalf(400, "InvalidGroup.NotFound", "group not found") - } - perms := srv.parsePerms(req) - - for _, p := range perms { - if g.perms[p] { - fatalf(400, "InvalidPermission.Duplicate", "Permission has already been authorized on the specified group") - } - } - for _, p := range perms { - g.perms[p] = true - } - return &ec2.SimpleResp{ - XMLName: xml.Name{"", "AuthorizeSecurityGroupIngressResponse"}, - RequestId: reqId, - } -} - -func (srv *Server) revokeSecurityGroupIngress(w http.ResponseWriter, req *http.Request, reqId string) interface{} { - srv.mu.Lock() - defer srv.mu.Unlock() - g := srv.group(ec2.SecurityGroup{ - Name: req.Form.Get("GroupName"), - Id: req.Form.Get("GroupId"), - }) - if g == nil { - fatalf(400, "InvalidGroup.NotFound", "group not found") - } - perms := srv.parsePerms(req) - - // Note EC2 does not give an error if asked to revoke an authorization - // that does not exist. - for _, p := range perms { - delete(g.perms, p) - } - return &ec2.SimpleResp{ - XMLName: xml.Name{"", "RevokeSecurityGroupIngressResponse"}, - RequestId: reqId, - } -} - -var secGroupPat = regexp.MustCompile(`^sg-[a-z0-9]+$`) -var ipPat = regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+/[0-9]+$`) -var ownerIdPat = regexp.MustCompile(`^[0-9]+$`) - -// parsePerms returns a slice of permKey values extracted -// from the permission fields in req. -func (srv *Server) parsePerms(req *http.Request) []permKey { - // perms maps an index found in the form to its associated - // IPPerm. For instance, the form value with key - // "IpPermissions.3.FromPort" will be stored in perms[3].FromPort - perms := make(map[int]ec2.IPPerm) - - type subgroupKey struct { - id1, id2 int - } - // Each IPPerm can have many source security groups. The form key - // for a source security group contains two indices: the index - // of the IPPerm and the sub-index of the security group. The - // sourceGroups map maps from a subgroupKey containing these - // two indices to the associated security group. For instance, - // the form value with key "IPPermissions.3.Groups.2.GroupName" - // will be stored in sourceGroups[subgroupKey{3, 2}].Name. - sourceGroups := make(map[subgroupKey]ec2.UserSecurityGroup) - - // For each value in the form we store its associated information in the - // above maps. The maps are necessary because the form keys may - // arrive in any order, and the indices are not - // necessarily sequential or even small. - for name, vals := range req.Form { - val := vals[0] - var id1 int - var rest string - if x, _ := fmt.Sscanf(name, "IpPermissions.%d.%s", &id1, &rest); x != 2 { - continue - } - ec2p := perms[id1] - switch { - case rest == "FromPort": - ec2p.FromPort = atoi(val) - case rest == "ToPort": - ec2p.ToPort = atoi(val) - case rest == "IpProtocol": - switch val { - case "tcp", "udp", "icmp": - ec2p.Protocol = val - default: - // check it's a well formed number - atoi(val) - ec2p.Protocol = val - } - case strings.HasPrefix(rest, "Groups."): - k := subgroupKey{id1: id1} - if x, _ := fmt.Sscanf(rest[len("Groups."):], "%d.%s", &k.id2, &rest); x != 2 { - continue - } - g := sourceGroups[k] - switch rest { - case "UserId": - // BUG if the user id is blank, this does not conform to the - // way that EC2 handles it - a specified but blank owner id - // can cause RevokeSecurityGroupIngress to fail with - // "group not found" even if the security group id has been - // correctly specified. - // By failing here, we ensure that we fail early in this case. - if !ownerIdPat.MatchString(val) { - fatalf(400, "InvalidUserID.Malformed", "Invalid user ID: %q", val) - } - g.OwnerId = val - case "GroupName": - g.Name = val - case "GroupId": - if !secGroupPat.MatchString(val) { - fatalf(400, "InvalidGroupId.Malformed", "Invalid group ID: %q", val) - } - g.Id = val - default: - fatalf(400, "UnknownParameter", "unknown parameter %q", name) - } - sourceGroups[k] = g - case strings.HasPrefix(rest, "IpRanges."): - var id2 int - if x, _ := fmt.Sscanf(rest[len("IpRanges."):], "%d.%s", &id2, &rest); x != 2 { - continue - } - switch rest { - case "CidrIp": - if !ipPat.MatchString(val) { - fatalf(400, "InvalidPermission.Malformed", "Invalid IP range: %q", val) - } - ec2p.SourceIPs = append(ec2p.SourceIPs, val) - default: - fatalf(400, "UnknownParameter", "unknown parameter %q", name) - } - default: - fatalf(400, "UnknownParameter", "unknown parameter %q", name) - } - perms[id1] = ec2p - } - // Associate each set of source groups with its IPPerm. - for k, g := range sourceGroups { - p := perms[k.id1] - p.SourceGroups = append(p.SourceGroups, g) - perms[k.id1] = p - } - - // Now that we have built up the IPPerms we need, we check for - // parameter errors and build up a permKey for each permission, - // looking up security groups from srv as we do so. - var result []permKey - for _, p := range perms { - if p.FromPort > p.ToPort { - fatalf(400, "InvalidParameterValue", "invalid port range") - } - k := permKey{ - protocol: p.Protocol, - fromPort: p.FromPort, - toPort: p.ToPort, - } - for _, g := range p.SourceGroups { - if g.OwnerId != "" && g.OwnerId != ownerId { - fatalf(400, "InvalidGroup.NotFound", "group %q not found", g.Name) - } - var ec2g ec2.SecurityGroup - switch { - case g.Id != "": - ec2g.Id = g.Id - case g.Name != "": - ec2g.Name = g.Name - } - k.group = srv.group(ec2g) - if k.group == nil { - fatalf(400, "InvalidGroup.NotFound", "group %v not found", g) - } - result = append(result, k) - } - k.group = nil - for _, ip := range p.SourceIPs { - k.ipAddr = ip - result = append(result, k) - } - } - return result -} - -func (srv *Server) deleteSecurityGroup(w http.ResponseWriter, req *http.Request, reqId string) interface{} { - srv.mu.Lock() - defer srv.mu.Unlock() - g := srv.group(ec2.SecurityGroup{ - Name: req.Form.Get("GroupName"), - Id: req.Form.Get("GroupId"), - }) - if g == nil { - fatalf(400, "InvalidGroup.NotFound", "group not found") - } - for _, r := range srv.reservations { - for _, h := range r.groups { - if h == g && r.hasRunningMachine() { - fatalf(500, "InvalidGroup.InUse", "group is currently in use by a running instance") - } - } - } - for _, sg := range srv.groups { - // If a group refers to itself, it's ok to delete it. - if sg == g { - continue - } - for k := range sg.perms { - if k.group == g { - fatalf(500, "InvalidGroup.InUse", "group is currently in use by group %q", sg.id) - } - } - } - - delete(srv.groups, g.id) - return &ec2.SimpleResp{ - XMLName: xml.Name{"", "DeleteSecurityGroupResponse"}, - RequestId: reqId, - } -} - -func (r *reservation) hasRunningMachine() bool { - for _, inst := range r.instances { - if inst.state.Code != ShuttingDown.Code && inst.state.Code != Terminated.Code { - return true - } - } - return false -} - -type counter int - -func (c *counter) next() (i int) { - i = int(*c) - (*c)++ - return -} - -// atoi is like strconv.Atoi but is fatal if the -// string is not well formed. -func atoi(s string) int { - i, err := strconv.Atoi(s) - if err != nil { - fatalf(400, "InvalidParameterValue", "bad number: %v", err) - } - return i -} - -func fatalf(statusCode int, code string, f string, a ...interface{}) { - panic(&ec2.Error{ - StatusCode: statusCode, - Code: code, - Message: fmt.Sprintf(f, a...), - }) -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/ec2_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/ec2_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/ec2_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/ec2_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,1501 +0,0 @@ -package ec2_test - -import ( - "testing" - - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/ec2" - "github.com/mitchellh/goamz/testutil" - . "github.com/motain/gocheck" -) - -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&S{}) - -type S struct { - ec2 *ec2.EC2 -} - -var testServer = testutil.NewHTTPServer() - -func (s *S) SetUpSuite(c *C) { - testServer.Start() - auth := aws.Auth{"abc", "123", ""} - s.ec2 = ec2.NewWithClient( - auth, - aws.Region{EC2Endpoint: testServer.URL}, - testutil.DefaultClient, - ) -} - -func (s *S) TearDownTest(c *C) { - testServer.Flush() -} - -func (s *S) TestRunInstancesErrorDump(c *C) { - testServer.Response(400, nil, ErrorDump) - - options := ec2.RunInstances{ - ImageId: "ami-a6f504cf", // Ubuntu Maverick, i386, instance store - InstanceType: "t1.micro", // Doesn't work with micro, results in 400. - } - - msg := `AMIs with an instance-store root device are not supported for the instance type 't1\.micro'\.` - - resp, err := s.ec2.RunInstances(&options) - - testServer.WaitRequest() - - c.Assert(resp, IsNil) - c.Assert(err, ErrorMatches, msg+` \(UnsupportedOperation\)`) - - ec2err, ok := err.(*ec2.Error) - c.Assert(ok, Equals, true) - c.Assert(ec2err.StatusCode, Equals, 400) - c.Assert(ec2err.Code, Equals, "UnsupportedOperation") - c.Assert(ec2err.Message, Matches, msg) - c.Assert(ec2err.RequestId, Equals, "0503f4e9-bbd6-483c-b54f-c4ae9f3b30f4") -} - -func (s *S) TestRequestSpotInstancesErrorDump(c *C) { - testServer.Response(400, nil, ErrorDump) - - options := ec2.RequestSpotInstances{ - SpotPrice: "0.01", - ImageId: "ami-a6f504cf", // Ubuntu Maverick, i386, instance store - InstanceType: "t1.micro", // Doesn't work with micro, results in 400. - } - - msg := `AMIs with an instance-store root device are not supported for the instance type 't1\.micro'\.` - - resp, err := s.ec2.RequestSpotInstances(&options) - - testServer.WaitRequest() - - c.Assert(resp, IsNil) - c.Assert(err, ErrorMatches, msg+` \(UnsupportedOperation\)`) - - ec2err, ok := err.(*ec2.Error) - c.Assert(ok, Equals, true) - c.Assert(ec2err.StatusCode, Equals, 400) - c.Assert(ec2err.Code, Equals, "UnsupportedOperation") - c.Assert(ec2err.Message, Matches, msg) - c.Assert(ec2err.RequestId, Equals, "0503f4e9-bbd6-483c-b54f-c4ae9f3b30f4") -} - -func (s *S) TestRunInstancesErrorWithoutXML(c *C) { - testServer.Responses(5, 500, nil, "") - options := ec2.RunInstances{ImageId: "image-id"} - - resp, err := s.ec2.RunInstances(&options) - - testServer.WaitRequest() - - c.Assert(resp, IsNil) - c.Assert(err, ErrorMatches, "") - - ec2err, ok := err.(*ec2.Error) - c.Assert(ok, Equals, true) - c.Assert(ec2err.StatusCode, Equals, 500) - c.Assert(ec2err.Code, Equals, "") - c.Assert(ec2err.Message, Equals, "") - c.Assert(ec2err.RequestId, Equals, "") -} - -func (s *S) TestRequestSpotInstancesErrorWithoutXML(c *C) { - testServer.Responses(5, 500, nil, "") - options := ec2.RequestSpotInstances{SpotPrice: "spot-price", ImageId: "image-id"} - - resp, err := s.ec2.RequestSpotInstances(&options) - - testServer.WaitRequest() - - c.Assert(resp, IsNil) - c.Assert(err, ErrorMatches, "") - - ec2err, ok := err.(*ec2.Error) - c.Assert(ok, Equals, true) - c.Assert(ec2err.StatusCode, Equals, 500) - c.Assert(ec2err.Code, Equals, "") - c.Assert(ec2err.Message, Equals, "") - c.Assert(ec2err.RequestId, Equals, "") -} - -func (s *S) TestRunInstancesExample(c *C) { - testServer.Response(200, nil, RunInstancesExample) - - options := ec2.RunInstances{ - KeyName: "my-keys", - ImageId: "image-id", - InstanceType: "inst-type", - SecurityGroups: []ec2.SecurityGroup{{Name: "g1"}, {Id: "g2"}, {Name: "g3"}, {Id: "g4"}}, - UserData: []byte("1234"), - KernelId: "kernel-id", - RamdiskId: "ramdisk-id", - AvailZone: "zone", - Tenancy: "dedicated", - PlacementGroupName: "group", - Monitoring: true, - SubnetId: "subnet-id", - DisableAPITermination: true, - EbsOptimized: true, - ShutdownBehavior: "terminate", - PrivateIPAddress: "10.0.0.25", - BlockDevices: []ec2.BlockDeviceMapping{ - {DeviceName: "/dev/sdb", VirtualName: "ephemeral0"}, - {DeviceName: "/dev/sdc", SnapshotId: "snap-a08912c9", DeleteOnTermination: true}, - }, - } - resp, err := s.ec2.RunInstances(&options) - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"RunInstances"}) - c.Assert(req.Form["ImageId"], DeepEquals, []string{"image-id"}) - c.Assert(req.Form["MinCount"], DeepEquals, []string{"1"}) - c.Assert(req.Form["MaxCount"], DeepEquals, []string{"1"}) - c.Assert(req.Form["KeyName"], DeepEquals, []string{"my-keys"}) - c.Assert(req.Form["InstanceType"], DeepEquals, []string{"inst-type"}) - c.Assert(req.Form["SecurityGroup.1"], DeepEquals, []string{"g1"}) - c.Assert(req.Form["SecurityGroup.2"], DeepEquals, []string{"g3"}) - c.Assert(req.Form["SecurityGroupId.1"], DeepEquals, []string{"g2"}) - c.Assert(req.Form["SecurityGroupId.2"], DeepEquals, []string{"g4"}) - c.Assert(req.Form["UserData"], DeepEquals, []string{"MTIzNA=="}) - c.Assert(req.Form["KernelId"], DeepEquals, []string{"kernel-id"}) - c.Assert(req.Form["RamdiskId"], DeepEquals, []string{"ramdisk-id"}) - c.Assert(req.Form["Placement.AvailabilityZone"], DeepEquals, []string{"zone"}) - c.Assert(req.Form["Placement.GroupName"], DeepEquals, []string{"group"}) - c.Assert(req.Form["Monitoring.Enabled"], DeepEquals, []string{"true"}) - c.Assert(req.Form["SubnetId"], DeepEquals, []string{"subnet-id"}) - c.Assert(req.Form["DisableApiTermination"], DeepEquals, []string{"true"}) - c.Assert(req.Form["EbsOptimized"], DeepEquals, []string{"true"}) - c.Assert(req.Form["InstanceInitiatedShutdownBehavior"], DeepEquals, []string{"terminate"}) - c.Assert(req.Form["PrivateIpAddress"], DeepEquals, []string{"10.0.0.25"}) - c.Assert(req.Form["BlockDeviceMapping.1.DeviceName"], DeepEquals, []string{"/dev/sdb"}) - c.Assert(req.Form["BlockDeviceMapping.1.VirtualName"], DeepEquals, []string{"ephemeral0"}) - c.Assert(req.Form["BlockDeviceMapping.2.Ebs.SnapshotId"], DeepEquals, []string{"snap-a08912c9"}) - c.Assert(req.Form["BlockDeviceMapping.2.Ebs.DeleteOnTermination"], DeepEquals, []string{"true"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") - c.Assert(resp.ReservationId, Equals, "r-47a5402e") - c.Assert(resp.OwnerId, Equals, "999988887777") - c.Assert(resp.SecurityGroups, DeepEquals, []ec2.SecurityGroup{{Name: "default", Id: "sg-67ad940e"}}) - c.Assert(resp.Instances, HasLen, 3) - - i0 := resp.Instances[0] - c.Assert(i0.InstanceId, Equals, "i-2ba64342") - c.Assert(i0.InstanceType, Equals, "m1.small") - c.Assert(i0.ImageId, Equals, "ami-60a54009") - c.Assert(i0.Monitoring, Equals, "enabled") - c.Assert(i0.KeyName, Equals, "example-key-name") - c.Assert(i0.AMILaunchIndex, Equals, 0) - c.Assert(i0.VirtType, Equals, "paravirtual") - c.Assert(i0.Hypervisor, Equals, "xen") - - i1 := resp.Instances[1] - c.Assert(i1.InstanceId, Equals, "i-2bc64242") - c.Assert(i1.InstanceType, Equals, "m1.small") - c.Assert(i1.ImageId, Equals, "ami-60a54009") - c.Assert(i1.Monitoring, Equals, "enabled") - c.Assert(i1.KeyName, Equals, "example-key-name") - c.Assert(i1.AMILaunchIndex, Equals, 1) - c.Assert(i1.VirtType, Equals, "paravirtual") - c.Assert(i1.Hypervisor, Equals, "xen") - - i2 := resp.Instances[2] - c.Assert(i2.InstanceId, Equals, "i-2be64332") - c.Assert(i2.InstanceType, Equals, "m1.small") - c.Assert(i2.ImageId, Equals, "ami-60a54009") - c.Assert(i2.Monitoring, Equals, "enabled") - c.Assert(i2.KeyName, Equals, "example-key-name") - c.Assert(i2.AMILaunchIndex, Equals, 2) - c.Assert(i2.VirtType, Equals, "paravirtual") - c.Assert(i2.Hypervisor, Equals, "xen") -} - -func (s *S) TestRequestSpotInstancesExample(c *C) { - testServer.Response(200, nil, RequestSpotInstancesExample) - - options := ec2.RequestSpotInstances{ - SpotPrice: "0.5", - KeyName: "my-keys", - ImageId: "image-id", - InstanceType: "inst-type", - SecurityGroups: []ec2.SecurityGroup{{Name: "g1"}, {Id: "g2"}, {Name: "g3"}, {Id: "g4"}}, - UserData: []byte("1234"), - KernelId: "kernel-id", - RamdiskId: "ramdisk-id", - AvailZone: "zone", - PlacementGroupName: "group", - Monitoring: true, - SubnetId: "subnet-id", - PrivateIPAddress: "10.0.0.25", - BlockDevices: []ec2.BlockDeviceMapping{ - {DeviceName: "/dev/sdb", VirtualName: "ephemeral0"}, - {DeviceName: "/dev/sdc", SnapshotId: "snap-a08912c9", DeleteOnTermination: true}, - }, - } - resp, err := s.ec2.RequestSpotInstances(&options) - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"RequestSpotInstances"}) - c.Assert(req.Form["SpotPrice"], DeepEquals, []string{"0.5"}) - c.Assert(req.Form["LaunchSpecification.ImageId"], DeepEquals, []string{"image-id"}) - c.Assert(req.Form["LaunchSpecification.KeyName"], DeepEquals, []string{"my-keys"}) - c.Assert(req.Form["LaunchSpecification.InstanceType"], DeepEquals, []string{"inst-type"}) - c.Assert(req.Form["LaunchSpecification.SecurityGroup.1"], DeepEquals, []string{"g1"}) - c.Assert(req.Form["LaunchSpecification.SecurityGroup.2"], DeepEquals, []string{"g3"}) - c.Assert(req.Form["LaunchSpecification.SecurityGroupId.1"], DeepEquals, []string{"g2"}) - c.Assert(req.Form["LaunchSpecification.SecurityGroupId.2"], DeepEquals, []string{"g4"}) - c.Assert(req.Form["LaunchSpecification.UserData"], DeepEquals, []string{"MTIzNA=="}) - c.Assert(req.Form["LaunchSpecification.KernelId"], DeepEquals, []string{"kernel-id"}) - c.Assert(req.Form["LaunchSpecification.RamdiskId"], DeepEquals, []string{"ramdisk-id"}) - c.Assert(req.Form["LaunchSpecification.Placement.AvailabilityZone"], DeepEquals, []string{"zone"}) - c.Assert(req.Form["LaunchSpecification.Placement.GroupName"], DeepEquals, []string{"group"}) - c.Assert(req.Form["LaunchSpecification.Monitoring.Enabled"], DeepEquals, []string{"true"}) - c.Assert(req.Form["LaunchSpecification.SubnetId"], DeepEquals, []string{"subnet-id"}) - c.Assert(req.Form["LaunchSpecification.PrivateIpAddress"], DeepEquals, []string{"10.0.0.25"}) - c.Assert(req.Form["LaunchSpecification.BlockDeviceMapping.1.DeviceName"], DeepEquals, []string{"/dev/sdb"}) - c.Assert(req.Form["LaunchSpecification.BlockDeviceMapping.1.VirtualName"], DeepEquals, []string{"ephemeral0"}) - c.Assert(req.Form["LaunchSpecification.BlockDeviceMapping.2.Ebs.SnapshotId"], DeepEquals, []string{"snap-a08912c9"}) - c.Assert(req.Form["LaunchSpecification.BlockDeviceMapping.2.Ebs.DeleteOnTermination"], DeepEquals, []string{"true"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") - c.Assert(resp.SpotRequestResults[0].SpotRequestId, Equals, "sir-1a2b3c4d") - c.Assert(resp.SpotRequestResults[0].SpotPrice, Equals, "0.5") - c.Assert(resp.SpotRequestResults[0].State, Equals, "open") - c.Assert(resp.SpotRequestResults[0].SpotLaunchSpec.ImageId, Equals, "ami-1a2b3c4d") - c.Assert(resp.SpotRequestResults[0].Status.Code, Equals, "pending-evaluation") - c.Assert(resp.SpotRequestResults[0].Status.UpdateTime, Equals, "2008-05-07T12:51:50.000Z") - c.Assert(resp.SpotRequestResults[0].Status.Message, Equals, "Your Spot request has been submitted for review, and is pending evaluation.") -} - -func (s *S) TestCancelSpotRequestsExample(c *C) { - testServer.Response(200, nil, CancelSpotRequestsExample) - - resp, err := s.ec2.CancelSpotRequests([]string{"s-1", "s-2"}) - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"CancelSpotInstanceRequests"}) - c.Assert(req.Form["SpotInstanceRequestId.1"], DeepEquals, []string{"s-1"}) - c.Assert(req.Form["SpotInstanceRequestId.2"], DeepEquals, []string{"s-2"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") - c.Assert(resp.CancelSpotRequestResults[0].SpotRequestId, Equals, "sir-1a2b3c4d") - c.Assert(resp.CancelSpotRequestResults[0].State, Equals, "cancelled") -} - -func (s *S) TestTerminateInstancesExample(c *C) { - testServer.Response(200, nil, TerminateInstancesExample) - - resp, err := s.ec2.TerminateInstances([]string{"i-1", "i-2"}) - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"TerminateInstances"}) - c.Assert(req.Form["InstanceId.1"], DeepEquals, []string{"i-1"}) - c.Assert(req.Form["InstanceId.2"], DeepEquals, []string{"i-2"}) - c.Assert(req.Form["UserData"], IsNil) - c.Assert(req.Form["KernelId"], IsNil) - c.Assert(req.Form["RamdiskId"], IsNil) - c.Assert(req.Form["Placement.AvailabilityZone"], IsNil) - c.Assert(req.Form["Placement.GroupName"], IsNil) - c.Assert(req.Form["Monitoring.Enabled"], IsNil) - c.Assert(req.Form["SubnetId"], IsNil) - c.Assert(req.Form["DisableApiTermination"], IsNil) - c.Assert(req.Form["EbsOptimized"], IsNil) - c.Assert(req.Form["InstanceInitiatedShutdownBehavior"], IsNil) - c.Assert(req.Form["PrivateIpAddress"], IsNil) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") - c.Assert(resp.StateChanges, HasLen, 1) - c.Assert(resp.StateChanges[0].InstanceId, Equals, "i-3ea74257") - c.Assert(resp.StateChanges[0].CurrentState.Code, Equals, 32) - c.Assert(resp.StateChanges[0].CurrentState.Name, Equals, "shutting-down") - c.Assert(resp.StateChanges[0].PreviousState.Code, Equals, 16) - c.Assert(resp.StateChanges[0].PreviousState.Name, Equals, "running") -} - -func (s *S) TestDescribeSpotRequestsExample(c *C) { - testServer.Response(200, nil, DescribeSpotRequestsExample) - - filter := ec2.NewFilter() - filter.Add("key1", "value1") - filter.Add("key2", "value2", "value3") - - resp, err := s.ec2.DescribeSpotRequests([]string{"s-1", "s-2"}, filter) - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeSpotInstanceRequests"}) - c.Assert(req.Form["SpotInstanceRequestId.1"], DeepEquals, []string{"s-1"}) - c.Assert(req.Form["SpotInstanceRequestId.2"], DeepEquals, []string{"s-2"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "b1719f2a-5334-4479-b2f1-26926EXAMPLE") - c.Assert(resp.SpotRequestResults[0].SpotRequestId, Equals, "sir-1a2b3c4d") - c.Assert(resp.SpotRequestResults[0].State, Equals, "active") - c.Assert(resp.SpotRequestResults[0].SpotPrice, Equals, "0.5") - c.Assert(resp.SpotRequestResults[0].SpotLaunchSpec.ImageId, Equals, "ami-1a2b3c4d") - c.Assert(resp.SpotRequestResults[0].Status.Code, Equals, "fulfilled") - c.Assert(resp.SpotRequestResults[0].Status.UpdateTime, Equals, "2008-05-07T12:51:50.000Z") - c.Assert(resp.SpotRequestResults[0].Status.Message, Equals, "Your Spot request is fulfilled.") -} - -func (s *S) TestDescribeInstancesExample1(c *C) { - testServer.Response(200, nil, DescribeInstancesExample1) - - filter := ec2.NewFilter() - filter.Add("key1", "value1") - filter.Add("key2", "value2", "value3") - - resp, err := s.ec2.Instances([]string{"i-1", "i-2"}, nil) - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeInstances"}) - c.Assert(req.Form["InstanceId.1"], DeepEquals, []string{"i-1"}) - c.Assert(req.Form["InstanceId.2"], DeepEquals, []string{"i-2"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "98e3c9a4-848c-4d6d-8e8a-b1bdEXAMPLE") - c.Assert(resp.Reservations, HasLen, 2) - - r0 := resp.Reservations[0] - c.Assert(r0.ReservationId, Equals, "r-b27e30d9") - c.Assert(r0.OwnerId, Equals, "999988887777") - c.Assert(r0.RequesterId, Equals, "854251627541") - c.Assert(r0.SecurityGroups, DeepEquals, []ec2.SecurityGroup{{Name: "default", Id: "sg-67ad940e"}}) - c.Assert(r0.Instances, HasLen, 1) - - r0i := r0.Instances[0] - c.Assert(r0i.InstanceId, Equals, "i-c5cd56af") - c.Assert(r0i.PrivateDNSName, Equals, "domU-12-31-39-10-56-34.compute-1.internal") - c.Assert(r0i.DNSName, Equals, "ec2-174-129-165-232.compute-1.amazonaws.com") - c.Assert(r0i.AvailZone, Equals, "us-east-1b") - c.Assert(r0i.RootDeviceName, Equals, "/dev/sda1") - - b0 := r0i.BlockDevices[0] - c.Assert(b0.DeviceName, Equals, "/dev/sda1") - c.Assert(b0.VolumeId, Equals, "vol-a082c1c9") - c.Assert(b0.Status, Equals, "attached") - c.Assert(b0.AttachTime, Equals, "2010-08-17T01:15:21.000Z") - c.Assert(b0.DeleteOnTermination, Equals, false) -} - -func (s *S) TestDescribeInstancesExample2(c *C) { - testServer.Response(200, nil, DescribeInstancesExample2) - - filter := ec2.NewFilter() - filter.Add("key1", "value1") - filter.Add("key2", "value2", "value3") - - resp, err := s.ec2.Instances([]string{"i-1", "i-2"}, filter) - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeInstances"}) - c.Assert(req.Form["InstanceId.1"], DeepEquals, []string{"i-1"}) - c.Assert(req.Form["InstanceId.2"], DeepEquals, []string{"i-2"}) - c.Assert(req.Form["Filter.1.Name"], DeepEquals, []string{"key1"}) - c.Assert(req.Form["Filter.1.Value.1"], DeepEquals, []string{"value1"}) - c.Assert(req.Form["Filter.1.Value.2"], IsNil) - c.Assert(req.Form["Filter.2.Name"], DeepEquals, []string{"key2"}) - c.Assert(req.Form["Filter.2.Value.1"], DeepEquals, []string{"value2"}) - c.Assert(req.Form["Filter.2.Value.2"], DeepEquals, []string{"value3"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") - c.Assert(resp.Reservations, HasLen, 1) - - r0 := resp.Reservations[0] - r0i := r0.Instances[0] - c.Assert(r0i.State.Code, Equals, 16) - c.Assert(r0i.State.Name, Equals, "running") - - r0t0 := r0i.Tags[0] - r0t1 := r0i.Tags[1] - c.Assert(r0t0.Key, Equals, "webserver") - c.Assert(r0t0.Value, Equals, "") - c.Assert(r0t1.Key, Equals, "stack") - c.Assert(r0t1.Value, Equals, "Production") -} - -func (s *S) TestCreateImageExample(c *C) { - testServer.Response(200, nil, CreateImageExample) - - options := &ec2.CreateImage{ - InstanceId: "i-123456", - Name: "foo", - Description: "Test CreateImage", - NoReboot: true, - BlockDevices: []ec2.BlockDeviceMapping{ - {DeviceName: "/dev/sdb", VirtualName: "ephemeral0"}, - {DeviceName: "/dev/sdc", SnapshotId: "snap-a08912c9", DeleteOnTermination: true}, - }, - } - - resp, err := s.ec2.CreateImage(options) - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"CreateImage"}) - c.Assert(req.Form["InstanceId"], DeepEquals, []string{options.InstanceId}) - c.Assert(req.Form["Name"], DeepEquals, []string{options.Name}) - c.Assert(req.Form["Description"], DeepEquals, []string{options.Description}) - c.Assert(req.Form["NoReboot"], DeepEquals, []string{"true"}) - c.Assert(req.Form["BlockDeviceMapping.1.DeviceName"], DeepEquals, []string{"/dev/sdb"}) - c.Assert(req.Form["BlockDeviceMapping.1.VirtualName"], DeepEquals, []string{"ephemeral0"}) - c.Assert(req.Form["BlockDeviceMapping.2.DeviceName"], DeepEquals, []string{"/dev/sdc"}) - c.Assert(req.Form["BlockDeviceMapping.2.Ebs.SnapshotId"], DeepEquals, []string{"snap-a08912c9"}) - c.Assert(req.Form["BlockDeviceMapping.2.Ebs.DeleteOnTermination"], DeepEquals, []string{"true"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") - c.Assert(resp.ImageId, Equals, "ami-4fa54026") -} - -func (s *S) TestDescribeImagesExample(c *C) { - testServer.Response(200, nil, DescribeImagesExample) - - filter := ec2.NewFilter() - filter.Add("key1", "value1") - filter.Add("key2", "value2", "value3") - - resp, err := s.ec2.Images([]string{"ami-1", "ami-2"}, filter) - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeImages"}) - c.Assert(req.Form["ImageId.1"], DeepEquals, []string{"ami-1"}) - c.Assert(req.Form["ImageId.2"], DeepEquals, []string{"ami-2"}) - c.Assert(req.Form["Filter.1.Name"], DeepEquals, []string{"key1"}) - c.Assert(req.Form["Filter.1.Value.1"], DeepEquals, []string{"value1"}) - c.Assert(req.Form["Filter.1.Value.2"], IsNil) - c.Assert(req.Form["Filter.2.Name"], DeepEquals, []string{"key2"}) - c.Assert(req.Form["Filter.2.Value.1"], DeepEquals, []string{"value2"}) - c.Assert(req.Form["Filter.2.Value.2"], DeepEquals, []string{"value3"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "4a4a27a2-2e7c-475d-b35b-ca822EXAMPLE") - c.Assert(resp.Images, HasLen, 1) - - i0 := resp.Images[0] - c.Assert(i0.Id, Equals, "ami-a2469acf") - c.Assert(i0.Type, Equals, "machine") - c.Assert(i0.Name, Equals, "example-marketplace-amzn-ami.1") - c.Assert(i0.Description, Equals, "Amazon Linux AMI i386 EBS") - c.Assert(i0.Location, Equals, "aws-marketplace/example-marketplace-amzn-ami.1") - c.Assert(i0.State, Equals, "available") - c.Assert(i0.Public, Equals, true) - c.Assert(i0.OwnerId, Equals, "123456789999") - c.Assert(i0.OwnerAlias, Equals, "aws-marketplace") - c.Assert(i0.Architecture, Equals, "i386") - c.Assert(i0.KernelId, Equals, "aki-805ea7e9") - c.Assert(i0.RootDeviceType, Equals, "ebs") - c.Assert(i0.RootDeviceName, Equals, "/dev/sda1") - c.Assert(i0.VirtualizationType, Equals, "paravirtual") - c.Assert(i0.Hypervisor, Equals, "xen") - - c.Assert(i0.BlockDevices, HasLen, 1) - c.Assert(i0.BlockDevices[0].DeviceName, Equals, "/dev/sda1") - c.Assert(i0.BlockDevices[0].SnapshotId, Equals, "snap-787e9403") - c.Assert(i0.BlockDevices[0].VolumeSize, Equals, int64(8)) - c.Assert(i0.BlockDevices[0].DeleteOnTermination, Equals, true) - - testServer.Response(200, nil, DescribeImagesExample) - resp2, err := s.ec2.ImagesByOwners([]string{"ami-1", "ami-2"}, []string{"123456789999", "id2"}, filter) - - req2 := testServer.WaitRequest() - c.Assert(req2.Form["Action"], DeepEquals, []string{"DescribeImages"}) - c.Assert(req2.Form["ImageId.1"], DeepEquals, []string{"ami-1"}) - c.Assert(req2.Form["ImageId.2"], DeepEquals, []string{"ami-2"}) - c.Assert(req2.Form["Owner.1"], DeepEquals, []string{"123456789999"}) - c.Assert(req2.Form["Owner.2"], DeepEquals, []string{"id2"}) - c.Assert(req2.Form["Filter.1.Name"], DeepEquals, []string{"key1"}) - c.Assert(req2.Form["Filter.1.Value.1"], DeepEquals, []string{"value1"}) - c.Assert(req2.Form["Filter.1.Value.2"], IsNil) - c.Assert(req2.Form["Filter.2.Name"], DeepEquals, []string{"key2"}) - c.Assert(req2.Form["Filter.2.Value.1"], DeepEquals, []string{"value2"}) - c.Assert(req2.Form["Filter.2.Value.2"], DeepEquals, []string{"value3"}) - - c.Assert(err, IsNil) - c.Assert(resp2.RequestId, Equals, "4a4a27a2-2e7c-475d-b35b-ca822EXAMPLE") - c.Assert(resp2.Images, HasLen, 1) - - i1 := resp2.Images[0] - c.Assert(i1.Id, Equals, "ami-a2469acf") - c.Assert(i1.Type, Equals, "machine") - c.Assert(i1.Name, Equals, "example-marketplace-amzn-ami.1") - c.Assert(i1.Description, Equals, "Amazon Linux AMI i386 EBS") - c.Assert(i1.Location, Equals, "aws-marketplace/example-marketplace-amzn-ami.1") - c.Assert(i1.State, Equals, "available") - c.Assert(i1.Public, Equals, true) - c.Assert(i1.OwnerId, Equals, "123456789999") - c.Assert(i1.OwnerAlias, Equals, "aws-marketplace") - c.Assert(i1.Architecture, Equals, "i386") - c.Assert(i1.KernelId, Equals, "aki-805ea7e9") - c.Assert(i1.RootDeviceType, Equals, "ebs") - c.Assert(i1.RootDeviceName, Equals, "/dev/sda1") - c.Assert(i1.VirtualizationType, Equals, "paravirtual") - c.Assert(i1.Hypervisor, Equals, "xen") - - c.Assert(i1.BlockDevices, HasLen, 1) - c.Assert(i1.BlockDevices[0].DeviceName, Equals, "/dev/sda1") - c.Assert(i1.BlockDevices[0].SnapshotId, Equals, "snap-787e9403") - c.Assert(i1.BlockDevices[0].VolumeSize, Equals, int64(8)) - c.Assert(i1.BlockDevices[0].DeleteOnTermination, Equals, true) -} - -func (s *S) TestImageAttributeExample(c *C) { - testServer.Response(200, nil, ImageAttributeExample) - - resp, err := s.ec2.ImageAttribute("ami-61a54008", "launchPermission") - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeImageAttribute"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") - c.Assert(resp.ImageId, Equals, "ami-61a54008") - c.Assert(resp.Group, Equals, "all") - c.Assert(resp.UserIds[0], Equals, "495219933132") -} - -func (s *S) TestCreateSnapshotExample(c *C) { - testServer.Response(200, nil, CreateSnapshotExample) - - resp, err := s.ec2.CreateSnapshot("vol-4d826724", "Daily Backup") - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"CreateSnapshot"}) - c.Assert(req.Form["VolumeId"], DeepEquals, []string{"vol-4d826724"}) - c.Assert(req.Form["Description"], DeepEquals, []string{"Daily Backup"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") - c.Assert(resp.Snapshot.Id, Equals, "snap-78a54011") - c.Assert(resp.Snapshot.VolumeId, Equals, "vol-4d826724") - c.Assert(resp.Snapshot.Status, Equals, "pending") - c.Assert(resp.Snapshot.StartTime, Equals, "2008-05-07T12:51:50.000Z") - c.Assert(resp.Snapshot.Progress, Equals, "60%") - c.Assert(resp.Snapshot.OwnerId, Equals, "111122223333") - c.Assert(resp.Snapshot.VolumeSize, Equals, "10") - c.Assert(resp.Snapshot.Description, Equals, "Daily Backup") -} - -func (s *S) TestDeleteSnapshotsExample(c *C) { - testServer.Response(200, nil, DeleteSnapshotExample) - - resp, err := s.ec2.DeleteSnapshots([]string{"snap-78a54011"}) - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"DeleteSnapshot"}) - c.Assert(req.Form["SnapshotId.1"], DeepEquals, []string{"snap-78a54011"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") -} - -func (s *S) TestDescribeSnapshotsExample(c *C) { - testServer.Response(200, nil, DescribeSnapshotsExample) - - filter := ec2.NewFilter() - filter.Add("key1", "value1") - filter.Add("key2", "value2", "value3") - - resp, err := s.ec2.Snapshots([]string{"snap-1", "snap-2"}, filter) - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeSnapshots"}) - c.Assert(req.Form["SnapshotId.1"], DeepEquals, []string{"snap-1"}) - c.Assert(req.Form["SnapshotId.2"], DeepEquals, []string{"snap-2"}) - c.Assert(req.Form["Filter.1.Name"], DeepEquals, []string{"key1"}) - c.Assert(req.Form["Filter.1.Value.1"], DeepEquals, []string{"value1"}) - c.Assert(req.Form["Filter.1.Value.2"], IsNil) - c.Assert(req.Form["Filter.2.Name"], DeepEquals, []string{"key2"}) - c.Assert(req.Form["Filter.2.Value.1"], DeepEquals, []string{"value2"}) - c.Assert(req.Form["Filter.2.Value.2"], DeepEquals, []string{"value3"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") - c.Assert(resp.Snapshots, HasLen, 1) - - s0 := resp.Snapshots[0] - c.Assert(s0.Id, Equals, "snap-1a2b3c4d") - c.Assert(s0.VolumeId, Equals, "vol-8875daef") - c.Assert(s0.VolumeSize, Equals, "15") - c.Assert(s0.Status, Equals, "pending") - c.Assert(s0.StartTime, Equals, "2010-07-29T04:12:01.000Z") - c.Assert(s0.Progress, Equals, "30%") - c.Assert(s0.OwnerId, Equals, "111122223333") - c.Assert(s0.Description, Equals, "Daily Backup") - - c.Assert(s0.Tags, HasLen, 1) - c.Assert(s0.Tags[0].Key, Equals, "Purpose") - c.Assert(s0.Tags[0].Value, Equals, "demo_db_14_backup") -} - -func (s *S) TestModifyImageAttributeExample(c *C) { - testServer.Response(200, nil, ModifyImageAttributeExample) - - options := ec2.ModifyImageAttribute{ - Description: "Test Description", - } - - resp, err := s.ec2.ModifyImageAttribute("ami-4fa54026", &options) - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"ModifyImageAttribute"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") -} - -func (s *S) TestModifyImageAttributeExample_complex(c *C) { - testServer.Response(200, nil, ModifyImageAttributeExample) - - options := ec2.ModifyImageAttribute{ - AddUsers: []string{"u1", "u2"}, - RemoveUsers: []string{"u3"}, - AddGroups: []string{"g1", "g3"}, - RemoveGroups: []string{"g2"}, - Description: "Test Description", - } - - resp, err := s.ec2.ModifyImageAttribute("ami-4fa54026", &options) - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"ModifyImageAttribute"}) - c.Assert(req.Form["LaunchPermission.Add.1.UserId"], DeepEquals, []string{"u1"}) - c.Assert(req.Form["LaunchPermission.Add.2.UserId"], DeepEquals, []string{"u2"}) - c.Assert(req.Form["LaunchPermission.Remove.1.UserId"], DeepEquals, []string{"u3"}) - c.Assert(req.Form["LaunchPermission.Add.1.Group"], DeepEquals, []string{"g1"}) - c.Assert(req.Form["LaunchPermission.Add.2.Group"], DeepEquals, []string{"g3"}) - c.Assert(req.Form["LaunchPermission.Remove.1.Group"], DeepEquals, []string{"g2"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") -} - -func (s *S) TestCopyImageExample(c *C) { - testServer.Response(200, nil, CopyImageExample) - - options := ec2.CopyImage{ - SourceRegion: "us-west-2", - SourceImageId: "ami-1a2b3c4d", - Description: "Test Description", - } - - resp, err := s.ec2.CopyImage(&options) - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"CopyImage"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "60bc441d-fa2c-494d-b155-5d6a3EXAMPLE") -} - -func (s *S) TestCreateKeyPairExample(c *C) { - testServer.Response(200, nil, CreateKeyPairExample) - - resp, err := s.ec2.CreateKeyPair("foo") - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"CreateKeyPair"}) - c.Assert(req.Form["KeyName"], DeepEquals, []string{"foo"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") - c.Assert(resp.KeyName, Equals, "foo") - c.Assert(resp.KeyFingerprint, Equals, "00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00") -} - -func (s *S) TestDeleteKeyPairExample(c *C) { - testServer.Response(200, nil, DeleteKeyPairExample) - - resp, err := s.ec2.DeleteKeyPair("foo") - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"DeleteKeyPair"}) - c.Assert(req.Form["KeyName"], DeepEquals, []string{"foo"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") -} - -func (s *S) TestCreateSecurityGroupExample(c *C) { - testServer.Response(200, nil, CreateSecurityGroupExample) - - resp, err := s.ec2.CreateSecurityGroup(ec2.SecurityGroup{Name: "websrv", Description: "Web Servers"}) - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"CreateSecurityGroup"}) - c.Assert(req.Form["GroupName"], DeepEquals, []string{"websrv"}) - c.Assert(req.Form["GroupDescription"], DeepEquals, []string{"Web Servers"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") - c.Assert(resp.Name, Equals, "websrv") - c.Assert(resp.Id, Equals, "sg-67ad940e") -} - -func (s *S) TestDescribeSecurityGroupsExample(c *C) { - testServer.Response(200, nil, DescribeSecurityGroupsExample) - - resp, err := s.ec2.SecurityGroups([]ec2.SecurityGroup{{Name: "WebServers"}, {Name: "RangedPortsBySource"}}, nil) - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeSecurityGroups"}) - c.Assert(req.Form["GroupName.1"], DeepEquals, []string{"WebServers"}) - c.Assert(req.Form["GroupName.2"], DeepEquals, []string{"RangedPortsBySource"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") - c.Assert(resp.Groups, HasLen, 2) - - g0 := resp.Groups[0] - c.Assert(g0.OwnerId, Equals, "999988887777") - c.Assert(g0.Name, Equals, "WebServers") - c.Assert(g0.Id, Equals, "sg-67ad940e") - c.Assert(g0.Description, Equals, "Web Servers") - c.Assert(g0.IPPerms, HasLen, 1) - c.Assert(g0.IPPermsEgress, HasLen, 1) - - g0ipp := g0.IPPerms[0] - c.Assert(g0ipp.Protocol, Equals, "tcp") - c.Assert(g0ipp.FromPort, Equals, 80) - c.Assert(g0ipp.ToPort, Equals, 80) - c.Assert(g0ipp.SourceIPs, DeepEquals, []string{"0.0.0.0/0"}) - - g0ippe := g0.IPPermsEgress[0] - c.Assert(g0ippe.Protocol, Equals, "tcp") - c.Assert(g0ippe.FromPort, Equals, 80) - c.Assert(g0ippe.ToPort, Equals, 80) - c.Assert(g0ippe.SourceIPs, DeepEquals, []string{"0.0.0.0/0"}) - - g1 := resp.Groups[1] - c.Assert(g1.OwnerId, Equals, "999988887777") - c.Assert(g1.Name, Equals, "RangedPortsBySource") - c.Assert(g1.Id, Equals, "sg-76abc467") - c.Assert(g1.Description, Equals, "Group A") - c.Assert(g1.IPPerms, HasLen, 1) - - g1ipp := g1.IPPerms[0] - c.Assert(g1ipp.Protocol, Equals, "tcp") - c.Assert(g1ipp.FromPort, Equals, 6000) - c.Assert(g1ipp.ToPort, Equals, 7000) - c.Assert(g1ipp.SourceIPs, IsNil) -} - -func (s *S) TestDescribeSecurityGroupsExampleWithFilter(c *C) { - testServer.Response(200, nil, DescribeSecurityGroupsExample) - - filter := ec2.NewFilter() - filter.Add("ip-permission.protocol", "tcp") - filter.Add("ip-permission.from-port", "22") - filter.Add("ip-permission.to-port", "22") - filter.Add("ip-permission.group-name", "app_server_group", "database_group") - - _, err := s.ec2.SecurityGroups(nil, filter) - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeSecurityGroups"}) - c.Assert(req.Form["Filter.1.Name"], DeepEquals, []string{"ip-permission.from-port"}) - c.Assert(req.Form["Filter.1.Value.1"], DeepEquals, []string{"22"}) - c.Assert(req.Form["Filter.2.Name"], DeepEquals, []string{"ip-permission.group-name"}) - c.Assert(req.Form["Filter.2.Value.1"], DeepEquals, []string{"app_server_group"}) - c.Assert(req.Form["Filter.2.Value.2"], DeepEquals, []string{"database_group"}) - c.Assert(req.Form["Filter.3.Name"], DeepEquals, []string{"ip-permission.protocol"}) - c.Assert(req.Form["Filter.3.Value.1"], DeepEquals, []string{"tcp"}) - c.Assert(req.Form["Filter.4.Name"], DeepEquals, []string{"ip-permission.to-port"}) - c.Assert(req.Form["Filter.4.Value.1"], DeepEquals, []string{"22"}) - - c.Assert(err, IsNil) -} - -func (s *S) TestDescribeSecurityGroupsDumpWithGroup(c *C) { - testServer.Response(200, nil, DescribeSecurityGroupsDump) - - resp, err := s.ec2.SecurityGroups(nil, nil) - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeSecurityGroups"}) - c.Assert(err, IsNil) - c.Check(resp.Groups, HasLen, 1) - c.Check(resp.Groups[0].IPPerms, HasLen, 2) - - ipp0 := resp.Groups[0].IPPerms[0] - c.Assert(ipp0.SourceIPs, IsNil) - c.Check(ipp0.Protocol, Equals, "icmp") - c.Assert(ipp0.SourceGroups, HasLen, 1) - c.Check(ipp0.SourceGroups[0].OwnerId, Equals, "12345") - c.Check(ipp0.SourceGroups[0].Name, Equals, "default") - c.Check(ipp0.SourceGroups[0].Id, Equals, "sg-67ad940e") - - ipp1 := resp.Groups[0].IPPerms[1] - c.Check(ipp1.Protocol, Equals, "tcp") - c.Assert(ipp0.SourceIPs, IsNil) - c.Assert(ipp0.SourceGroups, HasLen, 1) - c.Check(ipp1.SourceGroups[0].Id, Equals, "sg-76abc467") - c.Check(ipp1.SourceGroups[0].OwnerId, Equals, "12345") - c.Check(ipp1.SourceGroups[0].Name, Equals, "other") -} - -func (s *S) TestDeleteSecurityGroupExample(c *C) { - testServer.Response(200, nil, DeleteSecurityGroupExample) - - resp, err := s.ec2.DeleteSecurityGroup(ec2.SecurityGroup{Name: "websrv"}) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"DeleteSecurityGroup"}) - c.Assert(req.Form["GroupName"], DeepEquals, []string{"websrv"}) - c.Assert(req.Form["GroupId"], IsNil) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") -} - -func (s *S) TestDeleteSecurityGroupExampleWithId(c *C) { - testServer.Response(200, nil, DeleteSecurityGroupExample) - - // ignore return and error - we're only want to check the parameter handling. - s.ec2.DeleteSecurityGroup(ec2.SecurityGroup{Id: "sg-67ad940e", Name: "ignored"}) - req := testServer.WaitRequest() - - c.Assert(req.Form["GroupName"], IsNil) - c.Assert(req.Form["GroupId"], DeepEquals, []string{"sg-67ad940e"}) -} - -func (s *S) TestAuthorizeSecurityGroupExample1(c *C) { - testServer.Response(200, nil, AuthorizeSecurityGroupIngressExample) - - perms := []ec2.IPPerm{{ - Protocol: "tcp", - FromPort: 80, - ToPort: 80, - SourceIPs: []string{"205.192.0.0/16", "205.159.0.0/16"}, - }} - resp, err := s.ec2.AuthorizeSecurityGroup(ec2.SecurityGroup{Name: "websrv"}, perms) - - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"AuthorizeSecurityGroupIngress"}) - c.Assert(req.Form["GroupName"], DeepEquals, []string{"websrv"}) - c.Assert(req.Form["IpPermissions.1.IpProtocol"], DeepEquals, []string{"tcp"}) - c.Assert(req.Form["IpPermissions.1.FromPort"], DeepEquals, []string{"80"}) - c.Assert(req.Form["IpPermissions.1.ToPort"], DeepEquals, []string{"80"}) - c.Assert(req.Form["IpPermissions.1.IpRanges.1.CidrIp"], DeepEquals, []string{"205.192.0.0/16"}) - c.Assert(req.Form["IpPermissions.1.IpRanges.2.CidrIp"], DeepEquals, []string{"205.159.0.0/16"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") -} - -func (s *S) TestAuthorizeSecurityGroupEgress(c *C) { - testServer.Response(200, nil, AuthorizeSecurityGroupEgressExample) - - perms := []ec2.IPPerm{{ - Protocol: "tcp", - FromPort: 80, - ToPort: 80, - SourceIPs: []string{"205.192.0.0/16", "205.159.0.0/16"}, - }} - resp, err := s.ec2.AuthorizeSecurityGroupEgress(ec2.SecurityGroup{Name: "websrv"}, perms) - - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"AuthorizeSecurityGroupEgress"}) - c.Assert(req.Form["GroupName"], DeepEquals, []string{"websrv"}) - c.Assert(req.Form["IpPermissions.1.IpProtocol"], DeepEquals, []string{"tcp"}) - c.Assert(req.Form["IpPermissions.1.FromPort"], DeepEquals, []string{"80"}) - c.Assert(req.Form["IpPermissions.1.ToPort"], DeepEquals, []string{"80"}) - c.Assert(req.Form["IpPermissions.1.IpRanges.1.CidrIp"], DeepEquals, []string{"205.192.0.0/16"}) - c.Assert(req.Form["IpPermissions.1.IpRanges.2.CidrIp"], DeepEquals, []string{"205.159.0.0/16"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") -} - -func (s *S) TestAuthorizeSecurityGroupExample1WithId(c *C) { - testServer.Response(200, nil, AuthorizeSecurityGroupIngressExample) - - perms := []ec2.IPPerm{{ - Protocol: "tcp", - FromPort: 80, - ToPort: 80, - SourceIPs: []string{"205.192.0.0/16", "205.159.0.0/16"}, - }} - // ignore return and error - we're only want to check the parameter handling. - s.ec2.AuthorizeSecurityGroup(ec2.SecurityGroup{Id: "sg-67ad940e", Name: "ignored"}, perms) - - req := testServer.WaitRequest() - - c.Assert(req.Form["GroupName"], IsNil) - c.Assert(req.Form["GroupId"], DeepEquals, []string{"sg-67ad940e"}) -} - -func (s *S) TestAuthorizeSecurityGroupExample2(c *C) { - testServer.Response(200, nil, AuthorizeSecurityGroupIngressExample) - - perms := []ec2.IPPerm{{ - Protocol: "tcp", - FromPort: 80, - ToPort: 81, - SourceGroups: []ec2.UserSecurityGroup{ - {OwnerId: "999988887777", Name: "OtherAccountGroup"}, - {Id: "sg-67ad940e"}, - }, - }} - resp, err := s.ec2.AuthorizeSecurityGroup(ec2.SecurityGroup{Name: "websrv"}, perms) - - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"AuthorizeSecurityGroupIngress"}) - c.Assert(req.Form["GroupName"], DeepEquals, []string{"websrv"}) - c.Assert(req.Form["IpPermissions.1.IpProtocol"], DeepEquals, []string{"tcp"}) - c.Assert(req.Form["IpPermissions.1.FromPort"], DeepEquals, []string{"80"}) - c.Assert(req.Form["IpPermissions.1.ToPort"], DeepEquals, []string{"81"}) - c.Assert(req.Form["IpPermissions.1.Groups.1.UserId"], DeepEquals, []string{"999988887777"}) - c.Assert(req.Form["IpPermissions.1.Groups.1.GroupName"], DeepEquals, []string{"OtherAccountGroup"}) - c.Assert(req.Form["IpPermissions.1.Groups.2.UserId"], IsNil) - c.Assert(req.Form["IpPermissions.1.Groups.2.GroupName"], IsNil) - c.Assert(req.Form["IpPermissions.1.Groups.2.GroupId"], DeepEquals, []string{"sg-67ad940e"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") -} - -func (s *S) TestRevokeSecurityGroupExample(c *C) { - // RevokeSecurityGroup is implemented by the same code as AuthorizeSecurityGroup - // so there's no need to duplicate all the tests. - testServer.Response(200, nil, RevokeSecurityGroupIngressExample) - - resp, err := s.ec2.RevokeSecurityGroup(ec2.SecurityGroup{Name: "websrv"}, nil) - - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"RevokeSecurityGroupIngress"}) - c.Assert(req.Form["GroupName"], DeepEquals, []string{"websrv"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") -} - -func (s *S) TestCreateTags(c *C) { - testServer.Response(200, nil, CreateTagsExample) - - resp, err := s.ec2.CreateTags([]string{"ami-1a2b3c4d", "i-7f4d3a2b"}, []ec2.Tag{{"webserver", ""}, {"stack", "Production"}}) - - req := testServer.WaitRequest() - c.Assert(req.Form["ResourceId.1"], DeepEquals, []string{"ami-1a2b3c4d"}) - c.Assert(req.Form["ResourceId.2"], DeepEquals, []string{"i-7f4d3a2b"}) - c.Assert(req.Form["Tag.1.Key"], DeepEquals, []string{"webserver"}) - c.Assert(req.Form["Tag.1.Value"], DeepEquals, []string{""}) - c.Assert(req.Form["Tag.2.Key"], DeepEquals, []string{"stack"}) - c.Assert(req.Form["Tag.2.Value"], DeepEquals, []string{"Production"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") -} - -func (s *S) TestStartInstances(c *C) { - testServer.Response(200, nil, StartInstancesExample) - - resp, err := s.ec2.StartInstances("i-10a64379") - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"StartInstances"}) - c.Assert(req.Form["InstanceId.1"], DeepEquals, []string{"i-10a64379"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") - - s0 := resp.StateChanges[0] - c.Assert(s0.InstanceId, Equals, "i-10a64379") - c.Assert(s0.CurrentState.Code, Equals, 0) - c.Assert(s0.CurrentState.Name, Equals, "pending") - c.Assert(s0.PreviousState.Code, Equals, 80) - c.Assert(s0.PreviousState.Name, Equals, "stopped") -} - -func (s *S) TestStopInstances(c *C) { - testServer.Response(200, nil, StopInstancesExample) - - resp, err := s.ec2.StopInstances("i-10a64379") - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"StopInstances"}) - c.Assert(req.Form["InstanceId.1"], DeepEquals, []string{"i-10a64379"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") - - s0 := resp.StateChanges[0] - c.Assert(s0.InstanceId, Equals, "i-10a64379") - c.Assert(s0.CurrentState.Code, Equals, 64) - c.Assert(s0.CurrentState.Name, Equals, "stopping") - c.Assert(s0.PreviousState.Code, Equals, 16) - c.Assert(s0.PreviousState.Name, Equals, "running") -} - -func (s *S) TestRebootInstances(c *C) { - testServer.Response(200, nil, RebootInstancesExample) - - resp, err := s.ec2.RebootInstances("i-10a64379") - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"RebootInstances"}) - c.Assert(req.Form["InstanceId.1"], DeepEquals, []string{"i-10a64379"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") -} - -func (s *S) TestSignatureWithEndpointPath(c *C) { - ec2.FakeTime(true) - defer ec2.FakeTime(false) - - testServer.Response(200, nil, RebootInstancesExample) - - // https://bugs.launchpad.net/goamz/+bug/1022749 - ec2 := ec2.NewWithClient(s.ec2.Auth, aws.Region{EC2Endpoint: testServer.URL + "/services/Cloud"}, testutil.DefaultClient) - - _, err := ec2.RebootInstances("i-10a64379") - c.Assert(err, IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Form["Signature"], DeepEquals, []string{"tyOTQ0c0T5ujskCPTWa5ATMtv7UyErgT339cU8O2+Q8="}) -} - -func (s *S) TestDescribeInstanceStatusExample(c *C) { - testServer.Response(200, nil, DescribeInstanceStatusExample) - options := &ec2.DescribeInstanceStatus{} - resp, err := s.ec2.DescribeInstanceStatus(options, nil) - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeInstanceStatus"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "3be1508e-c444-4fef-89cc-0b1223c4f02fEXAMPLE") - c.Assert(resp.InstanceStatus[0].InstanceId, Equals, "i-1a2b3c4d") - c.Assert(resp.InstanceStatus[0].InstanceState.Code, Equals, 16) - c.Assert(resp.InstanceStatus[0].SystemStatus.Status, Equals, "impaired") - c.Assert(resp.InstanceStatus[0].SystemStatus.Details[0].Name, Equals, "reachability") - c.Assert(resp.InstanceStatus[0].SystemStatus.Details[0].Status, Equals, "failed") - c.Assert(resp.InstanceStatus[0].SystemStatus.Details[0].ImpairedSince, Equals, "YYYY-MM-DDTHH:MM:SS.000Z") - c.Assert(resp.InstanceStatus[0].InstanceStatus.Details[0].Name, Equals, "reachability") - c.Assert(resp.InstanceStatus[0].InstanceStatus.Details[0].Status, Equals, "failed") - c.Assert(resp.InstanceStatus[0].InstanceStatus.Details[0].ImpairedSince, Equals, "YYYY-MM-DDTHH:MM:SS.000Z") - c.Assert(resp.InstanceStatus[0].Events[0].Code, Equals, "instance-retirement") - c.Assert(resp.InstanceStatus[0].Events[0].Description, Equals, "The instance is running on degraded hardware") - c.Assert(resp.InstanceStatus[0].Events[0].NotBefore, Equals, "YYYY-MM-DDTHH:MM:SS+0000") - c.Assert(resp.InstanceStatus[0].Events[0].NotAfter, Equals, "YYYY-MM-DDTHH:MM:SS+0000") -} - -func (s *S) TestAllocateAddressExample(c *C) { - testServer.Response(200, nil, AllocateAddressExample) - - options := &ec2.AllocateAddress{ - Domain: "vpc", - } - - resp, err := s.ec2.AllocateAddress(options) - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"AllocateAddress"}) - c.Assert(req.Form["Domain"], DeepEquals, []string{"vpc"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") - c.Assert(resp.PublicIp, Equals, "198.51.100.1") - c.Assert(resp.Domain, Equals, "vpc") - c.Assert(resp.AllocationId, Equals, "eipalloc-5723d13e") -} - -func (s *S) TestReleaseAddressExample(c *C) { - testServer.Response(200, nil, ReleaseAddressExample) - - resp, err := s.ec2.ReleaseAddress("eipalloc-5723d13e") - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"ReleaseAddress"}) - c.Assert(req.Form["AllocationId"], DeepEquals, []string{"eipalloc-5723d13e"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") -} - -func (s *S) TestAssociateAddressExample(c *C) { - testServer.Response(200, nil, AssociateAddressExample) - - options := &ec2.AssociateAddress{ - InstanceId: "i-4fd2431a", - AllocationId: "eipalloc-5723d13e", - AllowReassociation: true, - } - - resp, err := s.ec2.AssociateAddress(options) - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"AssociateAddress"}) - c.Assert(req.Form["InstanceId"], DeepEquals, []string{"i-4fd2431a"}) - c.Assert(req.Form["AllocationId"], DeepEquals, []string{"eipalloc-5723d13e"}) - c.Assert(req.Form["AllowReassociation"], DeepEquals, []string{"true"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") - c.Assert(resp.AssociationId, Equals, "eipassoc-fc5ca095") -} - -func (s *S) TestDisassociateAddressExample(c *C) { - testServer.Response(200, nil, DisassociateAddressExample) - - resp, err := s.ec2.DisassociateAddress("eipassoc-aa7486c3") - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"DisassociateAddress"}) - c.Assert(req.Form["AssociationId"], DeepEquals, []string{"eipassoc-aa7486c3"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") -} - -func (s *S) TestModifyInstance(c *C) { - testServer.Response(200, nil, ModifyInstanceExample) - - options := ec2.ModifyInstance{ - InstanceType: "m1.small", - DisableAPITermination: true, - EbsOptimized: true, - SecurityGroups: []ec2.SecurityGroup{{Id: "g1"}, {Id: "g2"}}, - ShutdownBehavior: "terminate", - KernelId: "kernel-id", - RamdiskId: "ramdisk-id", - SourceDestCheck: true, - SriovNetSupport: true, - UserData: []byte("1234"), - BlockDevices: []ec2.BlockDeviceMapping{ - {DeviceName: "/dev/sda1", SnapshotId: "snap-a08912c9", DeleteOnTermination: true}, - }, - } - - resp, err := s.ec2.ModifyInstance("i-2ba64342", &options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"ModifyInstanceAttribute"}) - c.Assert(req.Form["InstanceId"], DeepEquals, []string{"i-2ba64342"}) - c.Assert(req.Form["InstanceType.Value"], DeepEquals, []string{"m1.small"}) - c.Assert(req.Form["BlockDeviceMapping.1.DeviceName"], DeepEquals, []string{"/dev/sda1"}) - c.Assert(req.Form["BlockDeviceMapping.1.Ebs.SnapshotId"], DeepEquals, []string{"snap-a08912c9"}) - c.Assert(req.Form["BlockDeviceMapping.1.Ebs.DeleteOnTermination"], DeepEquals, []string{"true"}) - c.Assert(req.Form["DisableApiTermination.Value"], DeepEquals, []string{"true"}) - c.Assert(req.Form["EbsOptimized"], DeepEquals, []string{"true"}) - c.Assert(req.Form["GroupId.1"], DeepEquals, []string{"g1"}) - c.Assert(req.Form["GroupId.2"], DeepEquals, []string{"g2"}) - c.Assert(req.Form["InstanceInitiatedShutdownBehavior.Value"], DeepEquals, []string{"terminate"}) - c.Assert(req.Form["Kernel.Value"], DeepEquals, []string{"kernel-id"}) - c.Assert(req.Form["Ramdisk.Value"], DeepEquals, []string{"ramdisk-id"}) - c.Assert(req.Form["SourceDestCheck.Value"], DeepEquals, []string{"true"}) - c.Assert(req.Form["SriovNetSupport.Value"], DeepEquals, []string{"simple"}) - c.Assert(req.Form["UserData"], DeepEquals, []string{"MTIzNA=="}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") -} - -func (s *S) TestCreateVpc(c *C) { - testServer.Response(200, nil, CreateVpcExample) - - options := &ec2.CreateVpc{ - CidrBlock: "foo", - } - - resp, err := s.ec2.CreateVpc(options) - - req := testServer.WaitRequest() - c.Assert(req.Form["CidrBlock"], DeepEquals, []string{"foo"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "7a62c49f-347e-4fc4-9331-6e8eEXAMPLE") - c.Assert(resp.VPC.VpcId, Equals, "vpc-1a2b3c4d") - c.Assert(resp.VPC.State, Equals, "pending") - c.Assert(resp.VPC.CidrBlock, Equals, "10.0.0.0/16") - c.Assert(resp.VPC.DHCPOptionsID, Equals, "dopt-1a2b3c4d2") - c.Assert(resp.VPC.InstanceTenancy, Equals, "default") -} - -func (s *S) TestDescribeVpcs(c *C) { - testServer.Response(200, nil, DescribeVpcsExample) - - filter := ec2.NewFilter() - filter.Add("key1", "value1") - filter.Add("key2", "value2", "value3") - - resp, err := s.ec2.DescribeVpcs([]string{"id1", "id2"}, filter) - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeVpcs"}) - c.Assert(req.Form["VpcId.1"], DeepEquals, []string{"id1"}) - c.Assert(req.Form["VpcId.2"], DeepEquals, []string{"id2"}) - c.Assert(req.Form["Filter.1.Name"], DeepEquals, []string{"key1"}) - c.Assert(req.Form["Filter.1.Value.1"], DeepEquals, []string{"value1"}) - c.Assert(req.Form["Filter.1.Value.2"], IsNil) - c.Assert(req.Form["Filter.2.Name"], DeepEquals, []string{"key2"}) - c.Assert(req.Form["Filter.2.Value.1"], DeepEquals, []string{"value2"}) - c.Assert(req.Form["Filter.2.Value.2"], DeepEquals, []string{"value3"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "7a62c49f-347e-4fc4-9331-6e8eEXAMPLE") - c.Assert(resp.VPCs, HasLen, 1) -} - -func (s *S) TestCreateSubnet(c *C) { - testServer.Response(200, nil, CreateSubnetExample) - - options := &ec2.CreateSubnet{ - AvailabilityZone: "baz", - CidrBlock: "foo", - VpcId: "bar", - } - - resp, err := s.ec2.CreateSubnet(options) - - req := testServer.WaitRequest() - c.Assert(req.Form["VpcId"], DeepEquals, []string{"bar"}) - c.Assert(req.Form["CidrBlock"], DeepEquals, []string{"foo"}) - c.Assert(req.Form["AvailabilityZone"], DeepEquals, []string{"baz"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "7a62c49f-347e-4fc4-9331-6e8eEXAMPLE") - c.Assert(resp.Subnet.SubnetId, Equals, "subnet-9d4a7b6c") - c.Assert(resp.Subnet.State, Equals, "pending") - c.Assert(resp.Subnet.VpcId, Equals, "vpc-1a2b3c4d") - c.Assert(resp.Subnet.CidrBlock, Equals, "10.0.1.0/24") - c.Assert(resp.Subnet.AvailableIpAddressCount, Equals, 251) -} - -func (s *S) TestModifySubnetAttribute(c *C) { - testServer.Response(200, nil, ModifySubnetAttributeExample) - - options := &ec2.ModifySubnetAttribute{ - SubnetId: "foo", - MapPublicIpOnLaunch: true, - } - - resp, err := s.ec2.ModifySubnetAttribute(options) - - req := testServer.WaitRequest() - c.Assert(req.Form["SubnetId"], DeepEquals, []string{"foo"}) - c.Assert(req.Form["MapPublicIpOnLaunch.Value"], DeepEquals, []string{"true"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") -} - -func (s *S) TestResetImageAttribute(c *C) { - testServer.Response(200, nil, ResetImageAttributeExample) - - options := ec2.ResetImageAttribute{Attribute: "launchPermission"} - resp, err := s.ec2.ResetImageAttribute("i-2ba64342", &options) - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"ResetImageAttribute"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") -} - -func (s *S) TestDescribeAvailabilityZonesExample1(c *C) { - testServer.Response(200, nil, DescribeAvailabilityZonesExample1) - - resp, err := s.ec2.DescribeAvailabilityZones(nil) - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeAvailabilityZones"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") - c.Assert(resp.Zones, HasLen, 4) - - z0 := resp.Zones[0] - c.Assert(z0.Name, Equals, "us-east-1a") - c.Assert(z0.Region, Equals, "us-east-1") - c.Assert(z0.State, Equals, "available") - c.Assert(z0.MessageSet, HasLen, 0) - - z1 := resp.Zones[1] - c.Assert(z1.Name, Equals, "us-east-1b") - c.Assert(z1.Region, Equals, "us-east-1") - c.Assert(z1.State, Equals, "available") - c.Assert(z1.MessageSet, HasLen, 0) - - z2 := resp.Zones[2] - c.Assert(z2.Name, Equals, "us-east-1c") - c.Assert(z2.Region, Equals, "us-east-1") - c.Assert(z2.State, Equals, "available") - c.Assert(z2.MessageSet, HasLen, 0) - - z3 := resp.Zones[3] - c.Assert(z3.Name, Equals, "us-east-1d") - c.Assert(z3.Region, Equals, "us-east-1") - c.Assert(z3.State, Equals, "available") - c.Assert(z3.MessageSet, HasLen, 0) -} - -func (s *S) TestDescribeAvailabilityZonesExample2(c *C) { - testServer.Response(200, nil, DescribeAvailabilityZonesExample2) - - resp, err := s.ec2.DescribeAvailabilityZones(nil) - - req := testServer.WaitRequest() - c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeAvailabilityZones"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") - c.Assert(resp.Zones, HasLen, 2) - - z0 := resp.Zones[0] - c.Assert(z0.Name, Equals, "us-east-1a") - c.Assert(z0.Region, Equals, "us-east-1") - c.Assert(z0.State, Equals, "impaired") - c.Assert(z0.MessageSet, HasLen, 0) - - z1 := resp.Zones[1] - c.Assert(z1.Name, Equals, "us-east-1b") - c.Assert(z1.Region, Equals, "us-east-1") - c.Assert(z1.State, Equals, "unavailable") - c.Assert(z1.MessageSet, DeepEquals, []string{"us-east-1b is currently down for maintenance."}) -} - -func (s *S) TestCreateNetworkAcl(c *C) { - testServer.Response(200, nil, CreateNetworkAclExample) - - options := &ec2.CreateNetworkAcl{ - VpcId: "vpc-11ad4878", - } - - resp, err := s.ec2.CreateNetworkAcl(options) - - req := testServer.WaitRequest() - c.Assert(req.Form["VpcId"], DeepEquals, []string{"vpc-11ad4878"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") - c.Assert(resp.NetworkAcl.VpcId, Equals, "vpc-11ad4878") - c.Assert(resp.NetworkAcl.NetworkAclId, Equals, "acl-5fb85d36") - c.Assert(resp.NetworkAcl.Default, Equals, "false") - c.Assert(resp.NetworkAcl.EntrySet, HasLen, 2) - c.Assert(resp.NetworkAcl.EntrySet[0].RuleNumber, Equals, 32767) - c.Assert(resp.NetworkAcl.EntrySet[0].Protocol, Equals, -1) - c.Assert(resp.NetworkAcl.EntrySet[0].RuleAction, Equals, "deny") - c.Assert(resp.NetworkAcl.EntrySet[0].Egress, Equals, true) - c.Assert(resp.NetworkAcl.EntrySet[0].CidrBlock, Equals, "0.0.0.0/0") -} - -func (s *S) TestCreateNetworkAclEntry(c *C) { - testServer.Response(200, nil, CreateNetworkAclEntryRespExample) - - options := &ec2.NetworkAclEntry{ - RuleNumber: 32767, - Protocol: 6, - RuleAction: "deny", - Egress: true, - CidrBlock: "0.0.0.0/0", - PortRange: ec2.PortRange{ - To: 22, - From: 22, - }, - } - - resp, err := s.ec2.CreateNetworkAclEntry("acl-11ad4878", options) - - req := testServer.WaitRequest() - - c.Assert(req.Form["NetworkAclId"], DeepEquals, []string{"acl-11ad4878"}) - c.Assert(req.Form["RuleNumber"], DeepEquals, []string{"32767"}) - c.Assert(req.Form["Protocol"], DeepEquals, []string{"6"}) - c.Assert(req.Form["RuleAction"], DeepEquals, []string{"deny"}) - c.Assert(req.Form["Egress"], DeepEquals, []string{"true"}) - c.Assert(req.Form["CidrBlock"], DeepEquals, []string{"0.0.0.0/0"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") -} - -func (s *S) TestDescribeNetworkAcls(c *C) { - testServer.Response(200, nil, DescribeNetworkAclsExample) - - filter := ec2.NewFilter() - filter.Add("vpc-id", "vpc-5266953b") - - resp, err := s.ec2.NetworkAcls([]string{"acl-5566953c", "acl-5d659634"}, filter) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") - c.Assert(resp.NetworkAcls, HasLen, 2) - c.Assert(resp.NetworkAcls[1].AssociationSet, HasLen, 2) - c.Assert(resp.NetworkAcls[1].AssociationSet[0].NetworkAclAssociationId, Equals, "aclassoc-5c659635") - c.Assert(resp.NetworkAcls[1].AssociationSet[0].NetworkAclId, Equals, "acl-5d659634") - c.Assert(resp.NetworkAcls[1].AssociationSet[0].SubnetId, Equals, "subnet-ff669596") -} - -func (s *S) TestReplaceNetworkAclAssociation(c *C) { - testServer.Response(200, nil, ReplaceNetworkAclAssociationResponseExample) - - resp, err := s.ec2.ReplaceNetworkAclAssociation("aclassoc-e5b95c8c", "acl-5fb85d36") - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE") - c.Assert(resp.NewAssociationId, Equals, "aclassoc-17b85d7e") -} - -func (s *S) TestCreateCustomerGateway(c *C) { - testServer.Response(200, nil, CreateCustomerGatewayResponseExample) - - options := &ec2.CreateCustomerGateway{ - Type: "ipsec.1", - IpAddress: "10.0.0.20", - BgpAsn: 65534, - } - - resp, err := s.ec2.CreateCustomerGateway(options) - - req := testServer.WaitRequest() - c.Assert(req.Form["Type"], DeepEquals, []string{"ipsec.1"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "7a62c49f-347e-4fc4-9331-6e8eEXAMPLE") - c.Assert(resp.CustomerGateway.Type, Equals, "ipsec.1") - c.Assert(resp.CustomerGateway.State, Equals, "pending") - c.Assert(resp.CustomerGateway.BgpAsn, Equals, 65534) - c.Assert(resp.CustomerGateway.IpAddress, Equals, "10.0.0.20") -} - -func (s *S) TestDescribeCustomerGateways(c *C) { - testServer.Response(200, nil, DescribeCustomerGatewaysResponseExample) - - filter := ec2.NewFilter() - filter.Add("state", "pending") - - resp, err := s.ec2.DescribeCustomerGateways([]string{"cgw-b4dc3961", "cgw-b4dc3962"}, filter) - - req := testServer.WaitRequest() - c.Assert(req.Form["Filter.1.Name"], DeepEquals, []string{"state"}) - c.Assert(req.Form["Filter.1.Value.1"], DeepEquals, []string{"pending"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "7a62c49f-347e-4fc4-9331-6e8eEXAMPLE") - c.Assert(resp.CustomerGateways, HasLen, 2) - c.Assert(resp.CustomerGateways[0].CustomerGatewayId, Equals, "cgw-b4dc3961") - c.Assert(resp.CustomerGateways[1].CustomerGatewayId, Equals, "cgw-b4dc3962") -} - -func (s *S) TestDeleteCustomerGateway(c *C) { - testServer.Response(200, nil, DeleteCustomerGatewayResponseExample) - - resp, err := s.ec2.DeleteCustomerGateway("cgw-b4dc3961") - - req := testServer.WaitRequest() - c.Assert(req.Form["CustomerGatewayId"], DeepEquals, []string{"cgw-b4dc3961"}) - - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "7a62c49f-347e-4fc4-9331-6e8eEXAMPLE") - c.Assert(resp.Return, Equals, true) -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/ec2t_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/ec2t_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/ec2t_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/ec2t_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,580 +0,0 @@ -package ec2_test - -import ( - "fmt" - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/ec2" - "github.com/mitchellh/goamz/ec2/ec2test" - "github.com/mitchellh/goamz/testutil" - . "github.com/motain/gocheck" - "regexp" - "sort" -) - -// LocalServer represents a local ec2test fake server. -type LocalServer struct { - auth aws.Auth - region aws.Region - srv *ec2test.Server -} - -func (s *LocalServer) SetUp(c *C) { - srv, err := ec2test.NewServer() - c.Assert(err, IsNil) - c.Assert(srv, NotNil) - - s.srv = srv - s.region = aws.Region{EC2Endpoint: srv.URL()} -} - -// LocalServerSuite defines tests that will run -// against the local ec2test server. It includes -// selected tests from ClientTests; -// when the ec2test functionality is sufficient, it should -// include all of them, and ClientTests can be simply embedded. -type LocalServerSuite struct { - srv LocalServer - ServerTests - clientTests ClientTests -} - -var _ = Suite(&LocalServerSuite{}) - -func (s *LocalServerSuite) SetUpSuite(c *C) { - s.srv.SetUp(c) - s.ServerTests.ec2 = ec2.NewWithClient(s.srv.auth, s.srv.region, testutil.DefaultClient) - s.clientTests.ec2 = ec2.NewWithClient(s.srv.auth, s.srv.region, testutil.DefaultClient) -} - -func (s *LocalServerSuite) TestRunAndTerminate(c *C) { - s.clientTests.TestRunAndTerminate(c) -} - -func (s *LocalServerSuite) TestSecurityGroups(c *C) { - s.clientTests.TestSecurityGroups(c) -} - -// TestUserData is not defined on ServerTests because it -// requires the ec2test server to function. -func (s *LocalServerSuite) TestUserData(c *C) { - data := make([]byte, 256) - for i := range data { - data[i] = byte(i) - } - inst, err := s.ec2.RunInstances(&ec2.RunInstances{ - ImageId: imageId, - InstanceType: "t1.micro", - UserData: data, - }) - c.Assert(err, IsNil) - c.Assert(inst, NotNil) - c.Assert(inst.Instances[0].DNSName, Equals, inst.Instances[0].InstanceId+".example.com") - - id := inst.Instances[0].InstanceId - - defer s.ec2.TerminateInstances([]string{id}) - - tinst := s.srv.srv.Instance(id) - c.Assert(tinst, NotNil) - c.Assert(tinst.UserData, DeepEquals, data) -} - -// AmazonServerSuite runs the ec2test server tests against a live EC2 server. -// It will only be activated if the -all flag is specified. -type AmazonServerSuite struct { - srv AmazonServer - ServerTests -} - -var _ = Suite(&AmazonServerSuite{}) - -func (s *AmazonServerSuite) SetUpSuite(c *C) { - if !testutil.Amazon { - c.Skip("AmazonServerSuite tests not enabled") - } - s.srv.SetUp(c) - s.ServerTests.ec2 = ec2.NewWithClient(s.srv.auth, aws.USEast, testutil.DefaultClient) -} - -// ServerTests defines a set of tests designed to test -// the ec2test local fake ec2 server. -// It is not used as a test suite in itself, but embedded within -// another type. -type ServerTests struct { - ec2 *ec2.EC2 -} - -func terminateInstances(c *C, e *ec2.EC2, insts []*ec2.Instance) { - var ids []string - for _, inst := range insts { - if inst != nil { - ids = append(ids, inst.InstanceId) - } - } - _, err := e.TerminateInstances(ids) - c.Check(err, IsNil, Commentf("%d INSTANCES LEFT RUNNING!!!", len(ids))) -} - -func (s *ServerTests) makeTestGroup(c *C, name, descr string) ec2.SecurityGroup { - // Clean it up if a previous test left it around. - _, err := s.ec2.DeleteSecurityGroup(ec2.SecurityGroup{Name: name}) - if err != nil && err.(*ec2.Error).Code != "InvalidGroup.NotFound" { - c.Fatalf("delete security group: %v", err) - } - - resp, err := s.ec2.CreateSecurityGroup(ec2.SecurityGroup{Name: name, Description: descr}) - c.Assert(err, IsNil) - c.Assert(resp.Name, Equals, name) - return resp.SecurityGroup -} - -func (s *ServerTests) TestIPPerms(c *C) { - g0 := s.makeTestGroup(c, "goamz-test0", "ec2test group 0") - defer s.ec2.DeleteSecurityGroup(g0) - - g1 := s.makeTestGroup(c, "goamz-test1", "ec2test group 1") - defer s.ec2.DeleteSecurityGroup(g1) - - resp, err := s.ec2.SecurityGroups([]ec2.SecurityGroup{g0, g1}, nil) - c.Assert(err, IsNil) - c.Assert(resp.Groups, HasLen, 2) - c.Assert(resp.Groups[0].IPPerms, HasLen, 0) - c.Assert(resp.Groups[1].IPPerms, HasLen, 0) - - ownerId := resp.Groups[0].OwnerId - - // test some invalid parameters - // TODO more - _, err = s.ec2.AuthorizeSecurityGroup(g0, []ec2.IPPerm{{ - Protocol: "tcp", - FromPort: 0, - ToPort: 1024, - SourceIPs: []string{"z127.0.0.1/24"}, - }}) - c.Assert(err, NotNil) - c.Check(err.(*ec2.Error).Code, Equals, "InvalidPermission.Malformed") - - // Check that AuthorizeSecurityGroup adds the correct authorizations. - _, err = s.ec2.AuthorizeSecurityGroup(g0, []ec2.IPPerm{{ - Protocol: "tcp", - FromPort: 2000, - ToPort: 2001, - SourceIPs: []string{"127.0.0.0/24"}, - SourceGroups: []ec2.UserSecurityGroup{{ - Name: g1.Name, - }, { - Id: g0.Id, - }}, - }, { - Protocol: "tcp", - FromPort: 2000, - ToPort: 2001, - SourceIPs: []string{"200.1.1.34/32"}, - }}) - c.Assert(err, IsNil) - - resp, err = s.ec2.SecurityGroups([]ec2.SecurityGroup{g0}, nil) - c.Assert(err, IsNil) - c.Assert(resp.Groups, HasLen, 1) - c.Assert(resp.Groups[0].IPPerms, HasLen, 1) - - perm := resp.Groups[0].IPPerms[0] - srcg := perm.SourceGroups - c.Assert(srcg, HasLen, 2) - - // Normalize so we don't care about returned order. - if srcg[0].Name == g1.Name { - srcg[0], srcg[1] = srcg[1], srcg[0] - } - c.Check(srcg[0].Name, Equals, g0.Name) - c.Check(srcg[0].Id, Equals, g0.Id) - c.Check(srcg[0].OwnerId, Equals, ownerId) - c.Check(srcg[1].Name, Equals, g1.Name) - c.Check(srcg[1].Id, Equals, g1.Id) - c.Check(srcg[1].OwnerId, Equals, ownerId) - - sort.Strings(perm.SourceIPs) - c.Check(perm.SourceIPs, DeepEquals, []string{"127.0.0.0/24", "200.1.1.34/32"}) - - // Check that we can't delete g1 (because g0 is using it) - _, err = s.ec2.DeleteSecurityGroup(g1) - c.Assert(err, NotNil) - c.Check(err.(*ec2.Error).Code, Equals, "InvalidGroup.InUse") - - _, err = s.ec2.RevokeSecurityGroup(g0, []ec2.IPPerm{{ - Protocol: "tcp", - FromPort: 2000, - ToPort: 2001, - SourceGroups: []ec2.UserSecurityGroup{{Id: g1.Id}}, - }, { - Protocol: "tcp", - FromPort: 2000, - ToPort: 2001, - SourceIPs: []string{"200.1.1.34/32"}, - }}) - c.Assert(err, IsNil) - - resp, err = s.ec2.SecurityGroups([]ec2.SecurityGroup{g0}, nil) - c.Assert(err, IsNil) - c.Assert(resp.Groups, HasLen, 1) - c.Assert(resp.Groups[0].IPPerms, HasLen, 1) - - perm = resp.Groups[0].IPPerms[0] - srcg = perm.SourceGroups - c.Assert(srcg, HasLen, 1) - c.Check(srcg[0].Name, Equals, g0.Name) - c.Check(srcg[0].Id, Equals, g0.Id) - c.Check(srcg[0].OwnerId, Equals, ownerId) - - c.Check(perm.SourceIPs, DeepEquals, []string{"127.0.0.0/24"}) - - // We should be able to delete g1 now because we've removed its only use. - _, err = s.ec2.DeleteSecurityGroup(g1) - c.Assert(err, IsNil) - - _, err = s.ec2.DeleteSecurityGroup(g0) - c.Assert(err, IsNil) - - f := ec2.NewFilter() - f.Add("group-id", g0.Id, g1.Id) - resp, err = s.ec2.SecurityGroups(nil, f) - c.Assert(err, IsNil) - c.Assert(resp.Groups, HasLen, 0) -} - -func (s *ServerTests) TestDuplicateIPPerm(c *C) { - name := "goamz-test" - descr := "goamz security group for tests" - - // Clean it up, if a previous test left it around and avoid leaving it around. - s.ec2.DeleteSecurityGroup(ec2.SecurityGroup{Name: name}) - defer s.ec2.DeleteSecurityGroup(ec2.SecurityGroup{Name: name}) - - resp1, err := s.ec2.CreateSecurityGroup(ec2.SecurityGroup{Name: name, Description: descr}) - c.Assert(err, IsNil) - c.Assert(resp1.Name, Equals, name) - - perms := []ec2.IPPerm{{ - Protocol: "tcp", - FromPort: 200, - ToPort: 1024, - SourceIPs: []string{"127.0.0.1/24"}, - }, { - Protocol: "tcp", - FromPort: 0, - ToPort: 100, - SourceIPs: []string{"127.0.0.1/24"}, - }} - - _, err = s.ec2.AuthorizeSecurityGroup(ec2.SecurityGroup{Name: name}, perms[0:1]) - c.Assert(err, IsNil) - - _, err = s.ec2.AuthorizeSecurityGroup(ec2.SecurityGroup{Name: name}, perms[0:2]) - c.Assert(err, ErrorMatches, `.*\(InvalidPermission.Duplicate\)`) -} - -type filterSpec struct { - name string - values []string -} - -func (s *ServerTests) TestInstanceFiltering(c *C) { - groupResp, err := s.ec2.CreateSecurityGroup(ec2.SecurityGroup{Name: sessionName("testgroup1"), Description: "testgroup one description"}) - c.Assert(err, IsNil) - group1 := groupResp.SecurityGroup - defer s.ec2.DeleteSecurityGroup(group1) - - groupResp, err = s.ec2.CreateSecurityGroup(ec2.SecurityGroup{Name: sessionName("testgroup2"), Description: "testgroup two description"}) - c.Assert(err, IsNil) - group2 := groupResp.SecurityGroup - defer s.ec2.DeleteSecurityGroup(group2) - - insts := make([]*ec2.Instance, 3) - inst, err := s.ec2.RunInstances(&ec2.RunInstances{ - MinCount: 2, - ImageId: imageId, - InstanceType: "t1.micro", - SecurityGroups: []ec2.SecurityGroup{group1}, - }) - c.Assert(err, IsNil) - insts[0] = &inst.Instances[0] - insts[1] = &inst.Instances[1] - defer terminateInstances(c, s.ec2, insts) - - imageId2 := "ami-e358958a" // Natty server, i386, EBS store - inst, err = s.ec2.RunInstances(&ec2.RunInstances{ - ImageId: imageId2, - InstanceType: "t1.micro", - SecurityGroups: []ec2.SecurityGroup{group2}, - }) - c.Assert(err, IsNil) - insts[2] = &inst.Instances[0] - - ids := func(indices ...int) (instIds []string) { - for _, index := range indices { - instIds = append(instIds, insts[index].InstanceId) - } - return - } - - tests := []struct { - about string - instanceIds []string // instanceIds argument to Instances method. - filters []filterSpec // filters argument to Instances method. - resultIds []string // set of instance ids of expected results. - allowExtra bool // resultIds may be incomplete. - err string // expected error. - }{ - { - about: "check that Instances returns all instances", - resultIds: ids(0, 1, 2), - allowExtra: true, - }, { - about: "check that specifying two instance ids returns them", - instanceIds: ids(0, 2), - resultIds: ids(0, 2), - }, { - about: "check that specifying a non-existent instance id gives an error", - instanceIds: append(ids(0), "i-deadbeef"), - err: `.*\(InvalidInstanceID\.NotFound\)`, - }, { - about: "check that a filter allowed both instances returns both of them", - filters: []filterSpec{ - {"instance-id", ids(0, 2)}, - }, - resultIds: ids(0, 2), - }, { - about: "check that a filter allowing only one instance returns it", - filters: []filterSpec{ - {"instance-id", ids(1)}, - }, - resultIds: ids(1), - }, { - about: "check that a filter allowing no instances returns none", - filters: []filterSpec{ - {"instance-id", []string{"i-deadbeef12345"}}, - }, - }, { - about: "check that filtering on group id works", - filters: []filterSpec{ - {"group-id", []string{group1.Id}}, - }, - resultIds: ids(0, 1), - }, { - about: "check that filtering on group name works", - filters: []filterSpec{ - {"group-name", []string{group1.Name}}, - }, - resultIds: ids(0, 1), - }, { - about: "check that filtering on image id works", - filters: []filterSpec{ - {"image-id", []string{imageId}}, - }, - resultIds: ids(0, 1), - allowExtra: true, - }, { - about: "combination filters 1", - filters: []filterSpec{ - {"image-id", []string{imageId, imageId2}}, - {"group-name", []string{group1.Name}}, - }, - resultIds: ids(0, 1), - }, { - about: "combination filters 2", - filters: []filterSpec{ - {"image-id", []string{imageId2}}, - {"group-name", []string{group1.Name}}, - }, - }, - } - for i, t := range tests { - c.Logf("%d. %s", i, t.about) - var f *ec2.Filter - if t.filters != nil { - f = ec2.NewFilter() - for _, spec := range t.filters { - f.Add(spec.name, spec.values...) - } - } - resp, err := s.ec2.Instances(t.instanceIds, f) - if t.err != "" { - c.Check(err, ErrorMatches, t.err) - continue - } - c.Assert(err, IsNil) - insts := make(map[string]*ec2.Instance) - for _, r := range resp.Reservations { - for j := range r.Instances { - inst := &r.Instances[j] - c.Check(insts[inst.InstanceId], IsNil, Commentf("duplicate instance id: %q", inst.InstanceId)) - insts[inst.InstanceId] = inst - } - } - if !t.allowExtra { - c.Check(insts, HasLen, len(t.resultIds), Commentf("expected %d instances got %#v", len(t.resultIds), insts)) - } - for j, id := range t.resultIds { - c.Check(insts[id], NotNil, Commentf("instance id %d (%q) not found; got %#v", j, id, insts)) - } - } -} - -func idsOnly(gs []ec2.SecurityGroup) []ec2.SecurityGroup { - for i := range gs { - gs[i].Name = "" - } - return gs -} - -func namesOnly(gs []ec2.SecurityGroup) []ec2.SecurityGroup { - for i := range gs { - gs[i].Id = "" - } - return gs -} - -func (s *ServerTests) TestGroupFiltering(c *C) { - g := make([]ec2.SecurityGroup, 4) - for i := range g { - resp, err := s.ec2.CreateSecurityGroup(ec2.SecurityGroup{Name: sessionName(fmt.Sprintf("testgroup%d", i)), Description: fmt.Sprintf("testdescription%d", i)}) - c.Assert(err, IsNil) - g[i] = resp.SecurityGroup - c.Logf("group %d: %v", i, g[i]) - defer s.ec2.DeleteSecurityGroup(g[i]) - } - - perms := [][]ec2.IPPerm{ - {{ - Protocol: "tcp", - FromPort: 100, - ToPort: 200, - SourceIPs: []string{"1.2.3.4/32"}, - }}, - {{ - Protocol: "tcp", - FromPort: 200, - ToPort: 300, - SourceGroups: []ec2.UserSecurityGroup{{Id: g[1].Id}}, - }}, - {{ - Protocol: "udp", - FromPort: 200, - ToPort: 400, - SourceGroups: []ec2.UserSecurityGroup{{Id: g[1].Id}}, - }}, - } - for i, ps := range perms { - _, err := s.ec2.AuthorizeSecurityGroup(g[i], ps) - c.Assert(err, IsNil) - } - - groups := func(indices ...int) (gs []ec2.SecurityGroup) { - for _, index := range indices { - gs = append(gs, g[index]) - } - return - } - - type groupTest struct { - about string - groups []ec2.SecurityGroup // groupIds argument to SecurityGroups method. - filters []filterSpec // filters argument to SecurityGroups method. - results []ec2.SecurityGroup // set of expected result groups. - allowExtra bool // specified results may be incomplete. - err string // expected error. - } - filterCheck := func(name, val string, gs []ec2.SecurityGroup) groupTest { - return groupTest{ - about: "filter check " + name, - filters: []filterSpec{{name, []string{val}}}, - results: gs, - allowExtra: true, - } - } - tests := []groupTest{ - { - about: "check that SecurityGroups returns all groups", - results: groups(0, 1, 2, 3), - allowExtra: true, - }, { - about: "check that specifying two group ids returns them", - groups: idsOnly(groups(0, 2)), - results: groups(0, 2), - }, { - about: "check that specifying names only works", - groups: namesOnly(groups(0, 2)), - results: groups(0, 2), - }, { - about: "check that specifying a non-existent group id gives an error", - groups: append(groups(0), ec2.SecurityGroup{Id: "sg-eeeeeeeee"}), - err: `.*\(InvalidGroup\.NotFound\)`, - }, { - about: "check that a filter allowed two groups returns both of them", - filters: []filterSpec{ - {"group-id", []string{g[0].Id, g[2].Id}}, - }, - results: groups(0, 2), - }, - { - about: "check that the previous filter works when specifying a list of ids", - groups: groups(1, 2), - filters: []filterSpec{ - {"group-id", []string{g[0].Id, g[2].Id}}, - }, - results: groups(2), - }, { - about: "check that a filter allowing no groups returns none", - filters: []filterSpec{ - {"group-id", []string{"sg-eeeeeeeee"}}, - }, - }, - filterCheck("description", "testdescription1", groups(1)), - filterCheck("group-name", g[2].Name, groups(2)), - filterCheck("ip-permission.cidr", "1.2.3.4/32", groups(0)), - filterCheck("ip-permission.group-name", g[1].Name, groups(1, 2)), - filterCheck("ip-permission.protocol", "udp", groups(2)), - filterCheck("ip-permission.from-port", "200", groups(1, 2)), - filterCheck("ip-permission.to-port", "200", groups(0)), - // TODO owner-id - } - for i, t := range tests { - c.Logf("%d. %s", i, t.about) - var f *ec2.Filter - if t.filters != nil { - f = ec2.NewFilter() - for _, spec := range t.filters { - f.Add(spec.name, spec.values...) - } - } - resp, err := s.ec2.SecurityGroups(t.groups, f) - if t.err != "" { - c.Check(err, ErrorMatches, t.err) - continue - } - c.Assert(err, IsNil) - groups := make(map[string]*ec2.SecurityGroup) - for j := range resp.Groups { - group := &resp.Groups[j].SecurityGroup - c.Check(groups[group.Id], IsNil, Commentf("duplicate group id: %q", group.Id)) - - groups[group.Id] = group - } - // If extra groups may be returned, eliminate all groups that - // we did not create in this session apart from the default group. - if t.allowExtra { - namePat := regexp.MustCompile(sessionName("testgroup[0-9]")) - for id, g := range groups { - if !namePat.MatchString(g.Name) { - delete(groups, id) - } - } - } - c.Check(groups, HasLen, len(t.results)) - for j, g := range t.results { - rg := groups[g.Id] - c.Assert(rg, NotNil, Commentf("group %d (%v) not found; got %#v", j, g, groups)) - c.Check(rg.Name, Equals, g.Name, Commentf("group %d (%v)", j, g)) - } - } -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/export_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/export_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/export_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/export_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ -package ec2 - -import ( - "github.com/mitchellh/goamz/aws" - "time" -) - -func Sign(auth aws.Auth, method, path string, params map[string]string, host string) { - sign(auth, method, path, params, host) -} - -func fixedTime() time.Time { - return time.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC) -} - -func FakeTime(fakeIt bool) { - if fakeIt { - timeNow = fixedTime - } else { - timeNow = time.Now - } -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/responses_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/responses_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/responses_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/responses_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,1263 +0,0 @@ -package ec2_test - -var ErrorDump = ` - -UnsupportedOperation -AMIs with an instance-store root device are not supported for the instance type 't1.micro'. -0503f4e9-bbd6-483c-b54f-c4ae9f3b30f4 -` - -// http://goo.gl/Mcm3b -var RunInstancesExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - r-47a5402e - 999988887777 - - - sg-67ad940e - default - - - - - i-2ba64342 - ami-60a54009 - - 0 - pending - - - - example-key-name - 0 - m1.small - 2007-08-07T11:51:50.000Z - - us-east-1b - - - enabled - - paravirtual - - - xen - - - i-2bc64242 - ami-60a54009 - - 0 - pending - - - - example-key-name - 1 - m1.small - 2007-08-07T11:51:50.000Z - - us-east-1b - - - enabled - - paravirtual - - - xen - - - i-2be64332 - ami-60a54009 - - 0 - pending - - - - example-key-name - 2 - m1.small - 2007-08-07T11:51:50.000Z - - us-east-1b - - - enabled - - paravirtual - - - xen - - - -` - -// http://goo.gl/GRZgCD -var RequestSpotInstancesExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - - - sir-1a2b3c4d - 0.5 - one-time - open - - pending-evaluation - 2008-05-07T12:51:50.000Z - Your Spot request has been submitted for review, and is pending evaluation. - - MyAzGroup - - ami-1a2b3c4d - gsg-keypair - - - sg-1a2b3c4d - websrv - - - m1.small - - - false - - false - - YYYY-MM-DDTHH:MM:SS.000Z - Linux/UNIX - - - -` - -// http://goo.gl/KsKJJk -var DescribeSpotRequestsExample = ` - - b1719f2a-5334-4479-b2f1-26926EXAMPLE - - - sir-1a2b3c4d - 0.5 - one-time - active - - fulfilled - 2008-05-07T12:51:50.000Z - Your Spot request is fulfilled. - - - ami-1a2b3c4d - gsg-keypair - - - sg-1a2b3c4d - websrv - - - m1.small - - false - - false - - i-1a2b3c4d - YYYY-MM-DDTHH:MM:SS.000Z - Linux/UNIX - us-east-1a - - - -` - -// http://goo.gl/DcfFgJ -var CancelSpotRequestsExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - - - sir-1a2b3c4d - cancelled - - - -` - -// http://goo.gl/3BKHj -var TerminateInstancesExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - - - i-3ea74257 - - 32 - shutting-down - - - 16 - running - - - - -` - -// http://goo.gl/mLbmw -var DescribeInstancesExample1 = ` - - 98e3c9a4-848c-4d6d-8e8a-b1bdEXAMPLE - - - r-b27e30d9 - 999988887777 - - - sg-67ad940e - default - - - - - i-c5cd56af - ami-1a2b3c4d - - 16 - running - - domU-12-31-39-10-56-34.compute-1.internal - ec2-174-129-165-232.compute-1.amazonaws.com - - GSG_Keypair - 0 - - m1.small - 2010-08-17T01:15:18.000Z - - us-east-1b - - - aki-94c527fd - ari-96c527ff - - disabled - - 10.198.85.190 - 174.129.165.232 - i386 - ebs - /dev/sda1 - - - /dev/sda1 - - vol-a082c1c9 - attached - 2010-08-17T01:15:21.000Z - false - - - - spot - sir-7a688402 - paravirtual - - - xen - - - 854251627541 - - - r-b67e30dd - 999988887777 - - - sg-67ad940e - default - - - - - i-d9cd56b3 - ami-1a2b3c4d - - 16 - running - - domU-12-31-39-10-54-E5.compute-1.internal - ec2-184-73-58-78.compute-1.amazonaws.com - - GSG_Keypair - 0 - - m1.large - 2010-08-17T01:15:19.000Z - - us-east-1b - - - aki-94c527fd - ari-96c527ff - - disabled - - 10.198.87.19 - 184.73.58.78 - i386 - ebs - /dev/sda1 - - - /dev/sda1 - - vol-a282c1cb - attached - 2010-08-17T01:15:23.000Z - false - - - - spot - sir-55a3aa02 - paravirtual - - - xen - - - 854251627541 - - - -` - -// http://goo.gl/mLbmw -var DescribeInstancesExample2 = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - - - r-bc7e30d7 - 999988887777 - - - sg-67ad940e - default - - - - - i-c7cd56ad - ami-b232d0db - - 16 - running - - domU-12-31-39-01-76-06.compute-1.internal - ec2-72-44-52-124.compute-1.amazonaws.com - GSG_Keypair - 0 - - m1.small - 2010-08-17T01:15:16.000Z - - us-east-1b - - aki-94c527fd - ari-96c527ff - - disabled - - 10.255.121.240 - 72.44.52.124 - i386 - ebs - /dev/sda1 - - - /dev/sda1 - - vol-a482c1cd - attached - 2010-08-17T01:15:26.000Z - true - - - - paravirtual - - - - webserver - - - - stack - Production - - - xen - - - - - -` - -// http://goo.gl/cxU41 -var CreateImageExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - ami-4fa54026 - -` - -// http://goo.gl/V0U25 -var DescribeImagesExample = ` - - 4a4a27a2-2e7c-475d-b35b-ca822EXAMPLE - - - ami-a2469acf - aws-marketplace/example-marketplace-amzn-ami.1 - available - 123456789999 - true - - - a1b2c3d4e5f6g7h8i9j10k11 - marketplace - - - i386 - machine - aki-805ea7e9 - aws-marketplace - example-marketplace-amzn-ami.1 - Amazon Linux AMI i386 EBS - ebs - /dev/sda1 - - - /dev/sda1 - - snap-787e9403 - 8 - true - - - - paravirtual - xen - - - -` - -// http://goo.gl/bHO3z -var ImageAttributeExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - ami-61a54008 - - - all - - - 495219933132 - - - -` - -// http://goo.gl/ttcda -var CreateSnapshotExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - snap-78a54011 - vol-4d826724 - pending - 2008-05-07T12:51:50.000Z - 60% - 111122223333 - 10 - Daily Backup - -` - -// http://goo.gl/vwU1y -var DeleteSnapshotExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - true - -` - -// http://goo.gl/nkovs -var DescribeSnapshotsExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - - - snap-1a2b3c4d - vol-8875daef - pending - 2010-07-29T04:12:01.000Z - 30% - 111122223333 - 15 - Daily Backup - - - Purpose - demo_db_14_backup - - - - - -` - -// http://goo.gl/YUjO4G -var ModifyImageAttributeExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - true - -` - -// http://goo.gl/hQwPCK -var CopyImageExample = ` - - 60bc441d-fa2c-494d-b155-5d6a3EXAMPLE - ami-4d3c2b1a - -` - -var CreateKeyPairExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - foo - - 00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00 - - ---- BEGIN RSA PRIVATE KEY ---- -MIICiTCCAfICCQD6m7oRw0uXOjANBgkqhkiG9w0BAQUFADCBiDELMAkGA1UEBhMC -VVMxCzAJBgNVBAgTAldBMRAwDgYDVQQHEwdTZWF0dGxlMQ8wDQYDVQQKEwZBbWF6 -b24xFDASBgNVBAsTC0lBTSBDb25zb2xlMRIwEAYDVQQDEwlUZXN0Q2lsYWMxHzAd -BgkqhkiG9w0BCQEWEG5vb25lQGFtYXpvbi5jb20wHhcNMTEwNDI1MjA0NTIxWhcN -MTIwNDI0MjA0NTIxWjCBiDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAldBMRAwDgYD -VQQHEwdTZWF0dGxlMQ8wDQYDVQQKEwZBbWF6b24xFDASBgNVBAsTC0lBTSBDb25z -b2xlMRIwEAYDVQQDEwlUZXN0Q2lsYWMxHzAdBgkqhkiG9w0BCQEWEG5vb25lQGFt -YXpvbi5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMaK0dn+a4GmWIWJ -21uUSfwfEvySWtC2XADZ4nB+BLYgVIk60CpiwsZ3G93vUEIO3IyNoH/f0wYK8m9T -rDHudUZg3qX4waLG5M43q7Wgc/MbQITxOUSQv7c7ugFFDzQGBzZswY6786m86gpE -Ibb3OhjZnzcvQAaRHhdlQWIMm2nrAgMBAAEwDQYJKoZIhvcNAQEFBQADgYEAtCu4 -nUhVVxYUntneD9+h8Mg9q6q+auNKyExzyLwaxlAoo7TJHidbtS4J5iNmZgXL0Fkb -FFBjvSfpJIlJ00zbhNYS5f6GuoEDmFJl0ZxBHjJnyp378OD8uTs7fLvjx79LjSTb -NYiytVbZPQUQ5Yaxu2jXnimvw3rrszlaEXAMPLE= ------END RSA PRIVATE KEY----- - - -` - -var DeleteKeyPairExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - true - -` - -// http://goo.gl/Eo7Yl -var CreateSecurityGroupExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - true - sg-67ad940e - -` - -// http://goo.gl/k12Uy -var DescribeSecurityGroupsExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - - - 999988887777 - WebServers - sg-67ad940e - Web Servers - - - tcp - 80 - 80 - - - - 0.0.0.0/0 - - - - - - - tcp - 80 - 80 - - - - 0.0.0.0/0 - - - - - - - 999988887777 - RangedPortsBySource - sg-76abc467 - Group A - - - tcp - 6000 - 7000 - - - - - - - -` - -// A dump which includes groups within ip permissions. -var DescribeSecurityGroupsDump = ` - - - 87b92b57-cc6e-48b2-943f-f6f0e5c9f46c - - - 12345 - default - default group - - - icmp - -1 - -1 - - - 12345 - default - sg-67ad940e - - - - - - tcp - 0 - 65535 - - - 12345 - other - sg-76abc467 - - - - - - - - -` - -// http://goo.gl/QJJDO -var DeleteSecurityGroupExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - true - -` - -// http://goo.gl/u2sDJ -var AuthorizeSecurityGroupIngressExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - true - -` - -// http://goo.gl/u2sDJ -var AuthorizeSecurityGroupEgressExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - true - -` - -// http://goo.gl/Mz7xr -var RevokeSecurityGroupIngressExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - true - -` - -// http://goo.gl/Vmkqc -var CreateTagsExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - true - -` - -// http://goo.gl/awKeF -var StartInstancesExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - - - i-10a64379 - - 0 - pending - - - 80 - stopped - - - - -` - -// http://goo.gl/436dJ -var StopInstancesExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - - - i-10a64379 - - 64 - stopping - - - 16 - running - - - - -` - -// http://goo.gl/baoUf -var RebootInstancesExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - true - -` - -// http://goo.gl/9rprDN -var AllocateAddressExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - 198.51.100.1 - vpc - eipalloc-5723d13e - -` - -// http://goo.gl/DFySJY -var DescribeInstanceStatusExample = ` - - 3be1508e-c444-4fef-89cc-0b1223c4f02fEXAMPLE - - - i-1a2b3c4d - us-east-1d - - 16 - running - - - impaired -
    - - reachability - failed - YYYY-MM-DDTHH:MM:SS.000Z - -
    -
    - - impaired -
    - - reachability - failed - YYYY-MM-DDTHH:MM:SS.000Z - -
    -
    - - - instance-retirement - The instance is running on degraded hardware - YYYY-MM-DDTHH:MM:SS+0000 - YYYY-MM-DDTHH:MM:SS+0000 - - -
    - - i-2a2b3c4d - us-east-1d - - 16 - running - - - ok -
    - - reachability - passed - -
    -
    - - ok -
    - - reachability - passed - -
    -
    - - - instance-reboot - The instance is scheduled for a reboot - YYYY-MM-DDTHH:MM:SS+0000 - YYYY-MM-DDTHH:MM:SS+0000 - - -
    - - i-3a2b3c4d - us-east-1c - - 16 - running - - - ok -
    - - reachability - passed - -
    -
    - - ok -
    - - reachability - passed - -
    -
    -
    - - i-4a2b3c4d - us-east-1c - - 16 - running - - - ok -
    - - reachability - passed - -
    -
    - - insufficient-data -
    - - reachability - insufficient-data - -
    -
    -
    -
    -
    -` - -// http://goo.gl/3Q0oCc -var ReleaseAddressExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - true - -` - -// http://goo.gl/uOSQE -var AssociateAddressExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - true - eipassoc-fc5ca095 - -` - -// http://goo.gl/LrOa0 -var DisassociateAddressExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - true - -` - -// http://goo.gl/icuXh5 -var ModifyInstanceExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - true - -` - -var CreateVpcExample = ` - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE - - vpc-1a2b3c4d - pending - 10.0.0.0/16 - dopt-1a2b3c4d2 - default - - - -` - -var DescribeVpcsExample = ` - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE - - - vpc-1a2b3c4d - available - 10.0.0.0/23 - dopt-7a8b9c2d - default - false - - - - -` - -var CreateSubnetExample = ` - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE - - subnet-9d4a7b6c - pending - vpc-1a2b3c4d - 10.0.1.0/24 - 251 - us-east-1a - - - -` - -// http://goo.gl/tu2Kxm -var ModifySubnetAttributeExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - true - -` - -// http://goo.gl/r6ZCPm -var ResetImageAttributeExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - true - -` - -// http://goo.gl/ylxT4R -var DescribeAvailabilityZonesExample1 = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - - - us-east-1a - available - us-east-1 - - - - us-east-1b - available - us-east-1 - - - - us-east-1c - available - us-east-1 - - - - us-east-1d - available - us-east-1 - - - - -` - -// http://goo.gl/ylxT4R -var DescribeAvailabilityZonesExample2 = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - - - us-east-1a - impaired - us-east-1 - - - - us-east-1b - unavailable - us-east-1 - - us-east-1b is currently down for maintenance. - - - - -` - -// http://goo.gl/sdomyE -var CreateNetworkAclExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - - acl-5fb85d36 - vpc-11ad4878 - false - - - 32767 - -1 - deny - true - 0.0.0.0/0 - - - 32767 - -1 - deny - false - 0.0.0.0/0 - - - - - - -` - -// http://goo.gl/6sYloC -var CreateNetworkAclEntryRespExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - true - -` - -// http://goo.gl/5tqceF -var DescribeNetworkAclsExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - - - acl-5566953c - vpc-5266953b - true - - - 100 - -1 - allow - true - 0.0.0.0/0 - - - 32767 - -1 - deny - true - 0.0.0.0/0 - - - 100 - -1 - allow - false - 0.0.0.0/0 - - - 32767 - -1 - deny - false - 0.0.0.0/0 - - - - - - - acl-5d659634 - vpc-5266953b - false - - - 110 - 6 - allow - true - 0.0.0.0/0 - - 49152 - 65535 - - - - 32767 - -1 - deny - true - 0.0.0.0/0 - - - 110 - 6 - allow - false - 0.0.0.0/0 - - 80 - 80 - - - - 120 - 6 - allow - false - 0.0.0.0/0 - - 443 - 443 - - - - 32767 - -1 - deny - false - 0.0.0.0/0 - - - - - aclassoc-5c659635 - acl-5d659634 - subnet-ff669596 - - - aclassoc-c26596ab - acl-5d659634 - subnet-f0669599 - - - - - - -` - -var ReplaceNetworkAclAssociationResponseExample = ` - - 59dbff89-35bd-4eac-99ed-be587EXAMPLE - aclassoc-17b85d7e - -` - -var CreateCustomerGatewayResponseExample = ` - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE - - cgw-b4dc3961 - pending - ipsec.1 - 10.0.0.20 - 65534 - - - -` - -var DescribeCustomerGatewaysResponseExample = ` - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE - - - cgw-b4dc3961 - available - ipsec.1 - 12.1.2.3 - 65534 - - - - cgw-b4dc3962 - pending - ipsec.1 - 12.1.2.4 - 65500 - - - - -` -var DeleteCustomerGatewayResponseExample = ` - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE - true -` diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/sign.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/sign.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/sign.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/sign.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,45 +0,0 @@ -package ec2 - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "github.com/mitchellh/goamz/aws" - "sort" - "strings" -) - -// ---------------------------------------------------------------------------- -// EC2 signing (http://goo.gl/fQmAN) - -var b64 = base64.StdEncoding - -func sign(auth aws.Auth, method, path string, params map[string]string, host string) { - params["AWSAccessKeyId"] = auth.AccessKey - params["SignatureVersion"] = "2" - params["SignatureMethod"] = "HmacSHA256" - if auth.Token != "" { - params["SecurityToken"] = auth.Token - } - - // AWS specifies that the parameters in a signed request must - // be provided in the natural order of the keys. This is distinct - // from the natural order of the encoded value of key=value. - // Percent and equals affect the sorting order. - var keys, sarray []string - for k, _ := range params { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - sarray = append(sarray, aws.Encode(k)+"="+aws.Encode(params[k])) - } - joined := strings.Join(sarray, "&") - payload := method + "\n" + host + "\n" + path + "\n" + joined - hash := hmac.New(sha256.New, []byte(auth.SecretKey)) - hash.Write([]byte(payload)) - signature := make([]byte, b64.EncodedLen(hash.Size())) - b64.Encode(signature, hash.Sum(nil)) - - params["Signature"] = string(signature) -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/sign_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/sign_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/sign_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/ec2/sign_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,68 +0,0 @@ -package ec2_test - -import ( - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/ec2" - . "github.com/motain/gocheck" -) - -// EC2 ReST authentication docs: http://goo.gl/fQmAN - -var testAuth = aws.Auth{"user", "secret", ""} - -func (s *S) TestBasicSignature(c *C) { - params := map[string]string{} - ec2.Sign(testAuth, "GET", "/path", params, "localhost") - c.Assert(params["SignatureVersion"], Equals, "2") - c.Assert(params["SignatureMethod"], Equals, "HmacSHA256") - expected := "6lSe5QyXum0jMVc7cOUz32/52ZnL7N5RyKRk/09yiK4=" - c.Assert(params["Signature"], Equals, expected) -} - -func (s *S) TestParamSignature(c *C) { - params := map[string]string{ - "param1": "value1", - "param2": "value2", - "param3": "value3", - } - ec2.Sign(testAuth, "GET", "/path", params, "localhost") - expected := "XWOR4+0lmK8bD8CGDGZ4kfuSPbb2JibLJiCl/OPu1oU=" - c.Assert(params["Signature"], Equals, expected) -} - -func (s *S) TestManyParams(c *C) { - params := map[string]string{ - "param1": "value10", - "param2": "value2", - "param3": "value3", - "param4": "value4", - "param5": "value5", - "param6": "value6", - "param7": "value7", - "param8": "value8", - "param9": "value9", - "param10": "value1", - } - ec2.Sign(testAuth, "GET", "/path", params, "localhost") - expected := "di0sjxIvezUgQ1SIL6i+C/H8lL+U0CQ9frLIak8jkVg=" - c.Assert(params["Signature"], Equals, expected) -} - -func (s *S) TestEscaping(c *C) { - params := map[string]string{"Nonce": "+ +"} - ec2.Sign(testAuth, "GET", "/path", params, "localhost") - c.Assert(params["Nonce"], Equals, "+ +") - expected := "bqffDELReIqwjg/W0DnsnVUmfLK4wXVLO4/LuG+1VFA=" - c.Assert(params["Signature"], Equals, expected) -} - -func (s *S) TestSignatureExample1(c *C) { - params := map[string]string{ - "Timestamp": "2009-02-01T12:53:20+00:00", - "Version": "2007-11-07", - "Action": "ListDomains", - } - ec2.Sign(aws.Auth{"access", "secret", ""}, "GET", "/", params, "sdb.amazonaws.com") - expected := "okj96/5ucWBSc1uR2zXVfm6mDHtgfNv657rRtt/aunQ=" - c.Assert(params["Signature"], Equals, expected) -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/elb/elb.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/elb/elb.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/elb/elb.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/elb/elb.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,575 +0,0 @@ -// The elb package provides types and functions for interaction with the AWS -// Elastic Load Balancing service (ELB) -package elb - -import ( - "encoding/xml" - "net/http" - "net/url" - "strconv" - "time" - - "github.com/mitchellh/goamz/aws" -) - -// The ELB type encapsulates operations operations with the elb endpoint. -type ELB struct { - aws.Auth - aws.Region - httpClient *http.Client -} - -const APIVersion = "2012-06-01" - -// New creates a new ELB instance. -func New(auth aws.Auth, region aws.Region) *ELB { - return NewWithClient(auth, region, aws.RetryingClient) -} - -func NewWithClient(auth aws.Auth, region aws.Region, httpClient *http.Client) *ELB { - return &ELB{auth, region, httpClient} -} - -func (elb *ELB) query(params map[string]string, resp interface{}) error { - params["Version"] = APIVersion - params["Timestamp"] = time.Now().In(time.UTC).Format(time.RFC3339) - - endpoint, err := url.Parse(elb.Region.ELBEndpoint) - if err != nil { - return err - } - - sign(elb.Auth, "GET", "/", params, endpoint.Host) - endpoint.RawQuery = multimap(params).Encode() - r, err := elb.httpClient.Get(endpoint.String()) - - if err != nil { - return err - } - defer r.Body.Close() - if r.StatusCode > 200 { - return buildError(r) - } - - decoder := xml.NewDecoder(r.Body) - decodedBody := decoder.Decode(resp) - - return decodedBody -} - -func buildError(r *http.Response) error { - var ( - err Error - errors xmlErrors - ) - xml.NewDecoder(r.Body).Decode(&errors) - if len(errors.Errors) > 0 { - err = errors.Errors[0] - } - err.StatusCode = r.StatusCode - if err.Message == "" { - err.Message = r.Status - } - return &err -} - -func multimap(p map[string]string) url.Values { - q := make(url.Values, len(p)) - for k, v := range p { - q[k] = []string{v} - } - return q -} - -func makeParams(action string) map[string]string { - params := make(map[string]string) - params["Action"] = action - return params -} - -// ---------------------------------------------------------------------------- -// ELB objects - -// A listener attaches to an elb -type Listener struct { - InstancePort int64 `xml:"Listener>InstancePort"` - InstanceProtocol string `xml:"Listener>InstanceProtocol"` - SSLCertificateId string `xml:"Listener>SSLCertificateId"` - LoadBalancerPort int64 `xml:"Listener>LoadBalancerPort"` - Protocol string `xml:"Listener>Protocol"` -} - -// An Instance attaches to an elb -type Instance struct { - InstanceId string `xml:"InstanceId"` -} - -// A tag attached to an elb -type Tag struct { - Key string `xml:"Key"` - Value string `xml:"Value"` -} - -// An InstanceState from an elb health query -type InstanceState struct { - InstanceId string `xml:"InstanceId"` - Description string `xml:"Description"` - State string `xml:"State"` - ReasonCode string `xml:"ReasonCode"` -} - -// ---------------------------------------------------------------------------- -// AddTags - -type AddTags struct { - LoadBalancerNames []string - Tags []Tag -} - -type AddTagsResp struct { - RequestId string `xml:"ResponseMetadata>RequestId"` -} - -func (elb *ELB) AddTags(options *AddTags) (resp *AddTagsResp, err error) { - params := makeParams("AddTags") - - for i, v := range options.LoadBalancerNames { - params["LoadBalancerNames.member."+strconv.Itoa(i+1)] = v - } - - for i, v := range options.Tags { - params["Tags.member."+strconv.Itoa(i+1)+".Key"] = v.Key - params["Tags.member."+strconv.Itoa(i+1)+".Value"] = v.Value - } - - resp = &AddTagsResp{} - - err = elb.query(params, resp) - - return resp, err -} - -// ---------------------------------------------------------------------------- -// RemoveTags - -type RemoveTags struct { - LoadBalancerNames []string - TagKeys []string -} - -type RemoveTagsResp struct { - RequestId string `xml:"ResponseMetadata>RequestId"` -} - -func (elb *ELB) RemoveTags(options *RemoveTags) (resp *RemoveTagsResp, err error) { - params := makeParams("RemoveTags") - - for i, v := range options.LoadBalancerNames { - params["LoadBalancerNames.member."+strconv.Itoa(i+1)] = v - } - - for i, v := range options.TagKeys { - params["Tags.member."+strconv.Itoa(i+1)+".Key"] = v - } - - resp = &RemoveTagsResp{} - - err = elb.query(params, resp) - - return resp, err -} - -// ---------------------------------------------------------------------------- -// Create - -// The CreateLoadBalancer request parameters -type CreateLoadBalancer struct { - AvailZone []string - Listeners []Listener - LoadBalancerName string - Internal bool // true for vpc elbs - SecurityGroups []string - Subnets []string - Tags []Tag -} - -type CreateLoadBalancerResp struct { - DNSName string `xml:"CreateLoadBalancerResult>DNSName"` - RequestId string `xml:"ResponseMetadata>RequestId"` -} - -func (elb *ELB) CreateLoadBalancer(options *CreateLoadBalancer) (resp *CreateLoadBalancerResp, err error) { - params := makeParams("CreateLoadBalancer") - - params["LoadBalancerName"] = options.LoadBalancerName - - for i, v := range options.AvailZone { - params["AvailabilityZones.member."+strconv.Itoa(i+1)] = v - } - - for i, v := range options.SecurityGroups { - params["SecurityGroups.member."+strconv.Itoa(i+1)] = v - } - - for i, v := range options.Subnets { - params["Subnets.member."+strconv.Itoa(i+1)] = v - } - - for i, v := range options.Listeners { - params["Listeners.member."+strconv.Itoa(i+1)+".LoadBalancerPort"] = strconv.FormatInt(v.LoadBalancerPort, 10) - params["Listeners.member."+strconv.Itoa(i+1)+".InstancePort"] = strconv.FormatInt(v.InstancePort, 10) - params["Listeners.member."+strconv.Itoa(i+1)+".Protocol"] = v.Protocol - params["Listeners.member."+strconv.Itoa(i+1)+".InstanceProtocol"] = v.InstanceProtocol - params["Listeners.member."+strconv.Itoa(i+1)+".SSLCertificateId"] = v.SSLCertificateId - } - - for i, v := range options.Tags { - params["Tags.member."+strconv.Itoa(i+1)+".Key"] = v.Key - params["Tags.member."+strconv.Itoa(i+1)+".Value"] = v.Value - } - - if options.Internal { - params["Scheme"] = "internal" - } - - resp = &CreateLoadBalancerResp{} - - err = elb.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// ---------------------------------------------------------------------------- -// Destroy - -// The DestroyLoadBalancer request parameters -type DeleteLoadBalancer struct { - LoadBalancerName string -} - -func (elb *ELB) DeleteLoadBalancer(options *DeleteLoadBalancer) (resp *SimpleResp, err error) { - params := makeParams("DeleteLoadBalancer") - - params["LoadBalancerName"] = options.LoadBalancerName - - resp = &SimpleResp{} - - err = elb.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// ---------------------------------------------------------------------------- -// Describe - -// An individual load balancer -type LoadBalancer struct { - LoadBalancerName string `xml:"LoadBalancerName"` - Listeners []Listener `xml:"ListenerDescriptions>member"` - Instances []Instance `xml:"Instances>member"` - HealthCheck HealthCheck `xml:"HealthCheck"` - AvailabilityZones []string `xml:"AvailabilityZones>member"` - HostedZoneNameID string `xml:"CanonicalHostedZoneNameID"` - DNSName string `xml:"DNSName"` - SecurityGroups []string `xml:"SecurityGroups>member"` - Scheme string `xml:"Scheme"` - Subnets []string `xml:"Subnets>member"` -} - -// DescribeLoadBalancer request params -type DescribeLoadBalancer struct { - Names []string -} - -type DescribeLoadBalancersResp struct { - RequestId string `xml:"ResponseMetadata>RequestId"` - LoadBalancers []LoadBalancer `xml:"DescribeLoadBalancersResult>LoadBalancerDescriptions>member"` -} - -func (elb *ELB) DescribeLoadBalancers(options *DescribeLoadBalancer) (resp *DescribeLoadBalancersResp, err error) { - params := makeParams("DescribeLoadBalancers") - - for i, v := range options.Names { - params["LoadBalancerNames.member."+strconv.Itoa(i+1)] = v - } - - resp = &DescribeLoadBalancersResp{} - - err = elb.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// ---------------------------------------------------------------------------- -// Attributes - -type AccessLog struct { - EmitInterval int64 - Enabled bool - S3BucketName string - S3BucketPrefix string -} - -type ConnectionDraining struct { - Enabled bool - Timeout int64 -} - -type LoadBalancerAttributes struct { - CrossZoneLoadBalancingEnabled bool - ConnectionSettingsIdleTimeout int64 - ConnectionDraining ConnectionDraining - AccessLog AccessLog -} - -type ModifyLoadBalancerAttributes struct { - LoadBalancerName string - LoadBalancerAttributes LoadBalancerAttributes -} - -func (elb *ELB) ModifyLoadBalancerAttributes(options *ModifyLoadBalancerAttributes) (resp *SimpleResp, err error) { - params := makeParams("ModifyLoadBalancerAttributes") - - params["LoadBalancerName"] = options.LoadBalancerName - params["LoadBalancerAttributes.CrossZoneLoadBalancing.Enabled"] = strconv.FormatBool(options.LoadBalancerAttributes.CrossZoneLoadBalancingEnabled) - if options.LoadBalancerAttributes.ConnectionSettingsIdleTimeout > 0 { - params["LoadBalancerAttributes.ConnectionSettings.IdleTimeout"] = strconv.Itoa(int(options.LoadBalancerAttributes.ConnectionSettingsIdleTimeout)) - } - if options.LoadBalancerAttributes.ConnectionDraining.Timeout > 0 { - params["LoadBalancerAttributes.ConnectionDraining.Timeout"] = strconv.Itoa(int(options.LoadBalancerAttributes.ConnectionDraining.Timeout)) - } - params["LoadBalancerAttributes.ConnectionDraining.Enabled"] = strconv.FormatBool(options.LoadBalancerAttributes.ConnectionDraining.Enabled) - params["LoadBalancerAttributes.AccessLog.Enabled"] = strconv.FormatBool(options.LoadBalancerAttributes.AccessLog.Enabled) - if options.LoadBalancerAttributes.AccessLog.Enabled { - params["LoadBalancerAttributes.AccessLog.EmitInterval"] = strconv.Itoa(int(options.LoadBalancerAttributes.AccessLog.EmitInterval)) - params["LoadBalancerAttributes.AccessLog.S3BucketName"] = options.LoadBalancerAttributes.AccessLog.S3BucketName - params["LoadBalancerAttributes.AccessLog.S3BucketPrefix"] = options.LoadBalancerAttributes.AccessLog.S3BucketPrefix - } - - resp = &SimpleResp{} - - err = elb.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// ---------------------------------------------------------------------------- -// Instance Registration / deregistration - -// The RegisterInstancesWithLoadBalancer request parameters -type RegisterInstancesWithLoadBalancer struct { - LoadBalancerName string - Instances []string -} - -type RegisterInstancesWithLoadBalancerResp struct { - Instances []Instance `xml:"RegisterInstancesWithLoadBalancerResult>Instances>member"` - RequestId string `xml:"ResponseMetadata>RequestId"` -} - -func (elb *ELB) RegisterInstancesWithLoadBalancer(options *RegisterInstancesWithLoadBalancer) (resp *RegisterInstancesWithLoadBalancerResp, err error) { - params := makeParams("RegisterInstancesWithLoadBalancer") - - params["LoadBalancerName"] = options.LoadBalancerName - - for i, v := range options.Instances { - params["Instances.member."+strconv.Itoa(i+1)+".InstanceId"] = v - } - - resp = &RegisterInstancesWithLoadBalancerResp{} - - err = elb.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// The DeregisterInstancesFromLoadBalancer request parameters -type DeregisterInstancesFromLoadBalancer struct { - LoadBalancerName string - Instances []string -} - -type DeregisterInstancesFromLoadBalancerResp struct { - Instances []Instance `xml:"DeregisterInstancesFromLoadBalancerResult>Instances>member"` - RequestId string `xml:"ResponseMetadata>RequestId"` -} - -func (elb *ELB) DeregisterInstancesFromLoadBalancer(options *DeregisterInstancesFromLoadBalancer) (resp *DeregisterInstancesFromLoadBalancerResp, err error) { - params := makeParams("DeregisterInstancesFromLoadBalancer") - - params["LoadBalancerName"] = options.LoadBalancerName - - for i, v := range options.Instances { - params["Instances.member."+strconv.Itoa(i+1)+".InstanceId"] = v - } - - resp = &DeregisterInstancesFromLoadBalancerResp{} - - err = elb.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// ---------------------------------------------------------------------------- -// DescribeTags - -type DescribeTags struct { - LoadBalancerNames []string -} - -type LoadBalancerTag struct { - Tags []Tag `xml:"Tags>member"` - LoadBalancerName string `xml:"LoadBalancerName"` -} - -type DescribeTagsResp struct { - LoadBalancerTags []LoadBalancerTag `xml:"DescribeTagsResult>TagDescriptions>member"` - NextToken string `xml:"DescribeTagsResult>NextToken"` - RequestId string `xml:"ResponseMetadata>RequestId"` -} - -func (elb *ELB) DescribeTags(options *DescribeTags) (resp *DescribeTagsResp, err error) { - params := makeParams("DescribeTags") - - for i, v := range options.LoadBalancerNames { - params["LoadBalancerNames.member."+strconv.Itoa(i+1)] = v - } - - resp = &DescribeTagsResp{} - - err = elb.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// ---------------------------------------------------------------------------- -// Health Checks - -type HealthCheck struct { - HealthyThreshold int64 `xml:"HealthyThreshold"` - UnhealthyThreshold int64 `xml:"UnhealthyThreshold"` - Interval int64 `xml:"Interval"` - Target string `xml:"Target"` - Timeout int64 `xml:"Timeout"` -} - -type ConfigureHealthCheck struct { - LoadBalancerName string - Check HealthCheck -} - -type ConfigureHealthCheckResp struct { - Check HealthCheck `xml:"ConfigureHealthCheckResult>HealthCheck"` - RequestId string `xml:"ResponseMetadata>RequestId"` -} - -func (elb *ELB) ConfigureHealthCheck(options *ConfigureHealthCheck) (resp *ConfigureHealthCheckResp, err error) { - params := makeParams("ConfigureHealthCheck") - - params["LoadBalancerName"] = options.LoadBalancerName - params["HealthCheck.HealthyThreshold"] = strconv.Itoa(int(options.Check.HealthyThreshold)) - params["HealthCheck.UnhealthyThreshold"] = strconv.Itoa(int(options.Check.UnhealthyThreshold)) - params["HealthCheck.Interval"] = strconv.Itoa(int(options.Check.Interval)) - params["HealthCheck.Target"] = options.Check.Target - params["HealthCheck.Timeout"] = strconv.Itoa(int(options.Check.Timeout)) - - resp = &ConfigureHealthCheckResp{} - - err = elb.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// ---------------------------------------------------------------------------- -// Instance Health - -// The DescribeInstanceHealth request parameters -type DescribeInstanceHealth struct { - LoadBalancerName string -} - -type DescribeInstanceHealthResp struct { - InstanceStates []InstanceState `xml:"DescribeInstanceHealthResult>InstanceStates>member"` - RequestId string `xml:"ResponseMetadata>RequestId"` -} - -func (elb *ELB) DescribeInstanceHealth(options *DescribeInstanceHealth) (resp *DescribeInstanceHealthResp, err error) { - params := makeParams("DescribeInstanceHealth") - - params["LoadBalancerName"] = options.LoadBalancerName - - resp = &DescribeInstanceHealthResp{} - - err = elb.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// Responses - -type SimpleResp struct { - RequestId string `xml:"ResponseMetadata>RequestId"` -} - -type xmlErrors struct { - Errors []Error `xml:"Error"` -} - -// Error encapsulates an elb error. -type Error struct { - // HTTP status code of the error. - StatusCode int - - // AWS code of the error. - Code string - - // Message explaining the error. - Message string -} - -func (e *Error) Error() string { - var prefix string - if e.Code != "" { - prefix = e.Code + ": " - } - if prefix == "" && e.StatusCode > 0 { - prefix = strconv.Itoa(e.StatusCode) + ": " - } - return prefix + e.Message -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/elb/elb_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/elb/elb_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/elb/elb_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/elb/elb_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,235 +0,0 @@ -package elb_test - -import ( - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/elb" - "github.com/mitchellh/goamz/testutil" - . "github.com/motain/gocheck" - "testing" -) - -func Test(t *testing.T) { - TestingT(t) -} - -type S struct { - elb *elb.ELB -} - -var _ = Suite(&S{}) - -var testServer = testutil.NewHTTPServer() - -func (s *S) SetUpSuite(c *C) { - testServer.Start() - auth := aws.Auth{"abc", "123", ""} - s.elb = elb.NewWithClient(auth, aws.Region{ELBEndpoint: testServer.URL}, testutil.DefaultClient) -} - -func (s *S) TearDownTest(c *C) { - testServer.Flush() -} - -func (s *S) TestAddTags(c *C) { - testServer.Response(200, nil, AddTagsExample) - - options := elb.AddTags{ - LoadBalancerNames: []string{"foobar"}, - Tags: []elb.Tag{ - { - Key: "hello", - Value: "world", - }, - }, - } - - resp, err := s.elb.AddTags(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"AddTags"}) - c.Assert(req.Form["LoadBalancerNames.member.1"], DeepEquals, []string{"foobar"}) - c.Assert(req.Form["Tags.member.1.Key"], DeepEquals, []string{"hello"}) - c.Assert(req.Form["Tags.member.1.Value"], DeepEquals, []string{"world"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "360e81f7-1100-11e4-b6ed-0f30EXAMPLE") -} - -func (s *S) TestRemoveTags(c *C) { - testServer.Response(200, nil, RemoveTagsExample) - - options := elb.RemoveTags{ - LoadBalancerNames: []string{"foobar"}, - TagKeys: []string{"hello"}, - } - - resp, err := s.elb.RemoveTags(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"RemoveTags"}) - c.Assert(req.Form["LoadBalancerNames.member.1"], DeepEquals, []string{"foobar"}) - c.Assert(req.Form["Tags.member.1.Key"], DeepEquals, []string{"hello"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "83c88b9d-12b7-11e3-8b82-87b12EXAMPLE") -} - -func (s *S) TestCreateLoadBalancer(c *C) { - testServer.Response(200, nil, CreateLoadBalancerExample) - - options := elb.CreateLoadBalancer{ - AvailZone: []string{"us-east-1a"}, - Listeners: []elb.Listener{elb.Listener{ - InstancePort: 80, - InstanceProtocol: "http", - SSLCertificateId: "needToAddASSLCertToYourAWSAccount", - LoadBalancerPort: 80, - Protocol: "http", - }, - }, - LoadBalancerName: "foobar", - Internal: false, - SecurityGroups: []string{"sg1"}, - Subnets: []string{"sn1"}, - } - - resp, err := s.elb.CreateLoadBalancer(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"CreateLoadBalancer"}) - c.Assert(req.Form["LoadBalancerName"], DeepEquals, []string{"foobar"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "1549581b-12b7-11e3-895e-1334aEXAMPLE") -} - -func (s *S) TestDeleteLoadBalancer(c *C) { - testServer.Response(200, nil, DeleteLoadBalancerExample) - - options := elb.DeleteLoadBalancer{ - LoadBalancerName: "foobar", - } - - resp, err := s.elb.DeleteLoadBalancer(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"DeleteLoadBalancer"}) - c.Assert(req.Form["LoadBalancerName"], DeepEquals, []string{"foobar"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "1549581b-12b7-11e3-895e-1334aEXAMPLE") -} - -func (s *S) TestDescribeLoadBalancers(c *C) { - testServer.Response(200, nil, DescribeLoadBalancersExample) - - options := elb.DescribeLoadBalancer{ - Names: []string{"foobar"}, - } - - resp, err := s.elb.DescribeLoadBalancers(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeLoadBalancers"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "83c88b9d-12b7-11e3-8b82-87b12EXAMPLE") - c.Assert(resp.LoadBalancers[0].LoadBalancerName, Equals, "MyLoadBalancer") - c.Assert(resp.LoadBalancers[0].Listeners[0].Protocol, Equals, "HTTP") - c.Assert(resp.LoadBalancers[0].Instances[0].InstanceId, Equals, "i-e4cbe38d") - c.Assert(resp.LoadBalancers[0].AvailabilityZones[0].AvailabilityZone, Equals, "us-east-1a") - c.Assert(resp.LoadBalancers[0].Scheme, Equals, "internet-facing") - c.Assert(resp.LoadBalancers[0].DNSName, Equals, "MyLoadBalancer-123456789.us-east-1.elb.amazonaws.com") - c.Assert(resp.LoadBalancers[0].HealthCheck.HealthyThreshold, Equals, int64(2)) - c.Assert(resp.LoadBalancers[0].HealthCheck.UnhealthyThreshold, Equals, int64(10)) - c.Assert(resp.LoadBalancers[0].HealthCheck.Interval, Equals, int64(90)) - c.Assert(resp.LoadBalancers[0].HealthCheck.Target, Equals, "HTTP:80/") - c.Assert(resp.LoadBalancers[0].HealthCheck.Timeout, Equals, int64(60)) -} - -func (s *S) TestRegisterInstancesWithLoadBalancer(c *C) { - testServer.Response(200, nil, RegisterInstancesWithLoadBalancerExample) - - options := elb.RegisterInstancesWithLoadBalancer{ - LoadBalancerName: "foobar", - Instances: []string{"instance-1", "instance-2"}, - } - - resp, err := s.elb.RegisterInstancesWithLoadBalancer(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"RegisterInstancesWithLoadBalancer"}) - c.Assert(req.Form["LoadBalancerName"], DeepEquals, []string{"foobar"}) - c.Assert(req.Form["Instances.member.1.InstanceId"], DeepEquals, []string{"instance-1"}) - c.Assert(req.Form["Instances.member.2.InstanceId"], DeepEquals, []string{"instance-2"}) - c.Assert(err, IsNil) - - c.Assert(resp.Instances[0].InstanceId, Equals, "i-315b7e51") - c.Assert(resp.RequestId, Equals, "83c88b9d-12b7-11e3-8b82-87b12EXAMPLE") -} - -func (s *S) TestDeregisterInstancesFromLoadBalancer(c *C) { - testServer.Response(200, nil, DeregisterInstancesFromLoadBalancerExample) - - options := elb.DeregisterInstancesFromLoadBalancer{ - LoadBalancerName: "foobar", - Instances: []string{"instance-1", "instance-2"}, - } - - resp, err := s.elb.DeregisterInstancesFromLoadBalancer(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"DeregisterInstancesFromLoadBalancer"}) - c.Assert(req.Form["LoadBalancerName"], DeepEquals, []string{"foobar"}) - c.Assert(req.Form["Instances.member.1.InstanceId"], DeepEquals, []string{"instance-1"}) - c.Assert(req.Form["Instances.member.2.InstanceId"], DeepEquals, []string{"instance-2"}) - c.Assert(err, IsNil) - - c.Assert(resp.Instances[0].InstanceId, Equals, "i-6ec63d59") - c.Assert(resp.RequestId, Equals, "83c88b9d-12b7-11e3-8b82-87b12EXAMPLE") -} - -func (s *S) TestConfigureHealthCheck(c *C) { - testServer.Response(200, nil, ConfigureHealthCheckExample) - - options := elb.ConfigureHealthCheck{ - LoadBalancerName: "foobar", - Check: elb.HealthCheck{ - HealthyThreshold: 2, - UnhealthyThreshold: 2, - Interval: 30, - Target: "HTTP:80/ping", - Timeout: 3, - }, - } - - resp, err := s.elb.ConfigureHealthCheck(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"ConfigureHealthCheck"}) - c.Assert(req.Form["LoadBalancerName"], DeepEquals, []string{"foobar"}) - c.Assert(err, IsNil) - - c.Assert(resp.Check.HealthyThreshold, Equals, int64(2)) - c.Assert(resp.Check.UnhealthyThreshold, Equals, int64(2)) - c.Assert(resp.Check.Interval, Equals, int64(30)) - c.Assert(resp.Check.Target, Equals, "HTTP:80/ping") - c.Assert(resp.Check.Timeout, Equals, int64(3)) - c.Assert(resp.RequestId, Equals, "83c88b9d-12b7-11e3-8b82-87b12EXAMPLE") -} - -func (s *S) TestDescribeInstanceHealth(c *C) { - testServer.Response(200, nil, DescribeInstanceHealthExample) - - options := elb.DescribeInstanceHealth{ - LoadBalancerName: "foobar", - } - - resp, err := s.elb.DescribeInstanceHealth(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeInstanceHealth"}) - c.Assert(req.Form["LoadBalancerName"], DeepEquals, []string{"foobar"}) - c.Assert(err, IsNil) - - c.Assert(resp.InstanceStates[0].InstanceId, Equals, "i-90d8c2a5") - c.Assert(resp.InstanceStates[0].State, Equals, "InService") - c.Assert(resp.InstanceStates[1].InstanceId, Equals, "i-06ea3e60") - c.Assert(resp.InstanceStates[1].State, Equals, "OutOfService") - c.Assert(resp.RequestId, Equals, "1549581b-12b7-11e3-895e-1334aEXAMPLE") -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/elb/responses_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/elb/responses_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/elb/responses_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/elb/responses_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,182 +0,0 @@ -package elb_test - -var ErrorDump = ` - -UnsupportedOperation - -0503f4e9-bbd6-483c-b54f-c4ae9f3b30f4 -` - -// http://goo.gl/OkMdtJ -var AddTagsExample = ` - - - - 360e81f7-1100-11e4-b6ed-0f30EXAMPLE - - -` - -// http://goo.gl/nT2E89 -var RemoveTagsExample = ` - - - - 83c88b9d-12b7-11e3-8b82-87b12EXAMPLE - - -` - -// http://goo.gl/gQRD2H -var CreateLoadBalancerExample = ` - - - MyLoadBalancer-1234567890.us-east-1.elb.amazonaws.com - - - 1549581b-12b7-11e3-895e-1334aEXAMPLE - - -` - -// http://goo.gl/GLZeBN -var DeleteLoadBalancerExample = ` - - - 1549581b-12b7-11e3-895e-1334aEXAMPLE - - -` - -// http://goo.gl/8UgpQ8 -var DescribeLoadBalancersExample = ` - - - - - - MyLoadBalancer - 2013-05-24T21:15:31.280Z - - 90 - HTTP:80/ - 2 - 60 - 10 - - - - - - HTTP - 80 - HTTP - needToAddASSLCertToYourAWSAccount - 80 - - - - - - i-e4cbe38d - - - - - - - - - us-east-1a - - ZZZZZZZZZZZ123X - MyLoadBalancer-123456789.us-east-1.elb.amazonaws.com - internet-facing - - amazon-elb - amazon-elb-sg - - MyLoadBalancer-123456789.us-east-1.elb.amazonaws.com - - - - - - - 83c88b9d-12b7-11e3-8b82-87b12EXAMPLE - - -` - -// http://goo.gl/Uz1N66 -var RegisterInstancesWithLoadBalancerExample = ` - - - - - i-315b7e51 - - - - - 83c88b9d-12b7-11e3-8b82-87b12EXAMPLE - - - ` - -// http://goo.gl/5OMv62 -var DeregisterInstancesFromLoadBalancerExample = ` - - - - - i-6ec63d59 - - - - - 83c88b9d-12b7-11e3-8b82-87b12EXAMPLE - - -` - -// http://docs.aws.amazon.com/ElasticLoadBalancing/latest/APIReference/API_ConfigureHealthCheck.html -var ConfigureHealthCheckExample = ` - - - - 30 - HTTP:80/ping - 2 - 3 - 2 - - - - 83c88b9d-12b7-11e3-8b82-87b12EXAMPLE - -` - -// http://goo.gl/cGNxfj -var DescribeInstanceHealthExample = ` - - - - - N/A - i-90d8c2a5 - InService - N/A - - - N/A - i-06ea3e60 - OutOfService - N/A - - - - - 1549581b-12b7-11e3-895e-1334aEXAMPLE - -` diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/elb/sign.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/elb/sign.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/elb/sign.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/elb/sign.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,38 +0,0 @@ -package elb - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "github.com/mitchellh/goamz/aws" - "sort" - "strings" -) - -// ---------------------------------------------------------------------------- -// Version 2 signing (http://goo.gl/RSRp5) - -var b64 = base64.StdEncoding - -func sign(auth aws.Auth, method, path string, params map[string]string, host string) { - params["AWSAccessKeyId"] = auth.AccessKey - params["SignatureVersion"] = "2" - params["SignatureMethod"] = "HmacSHA256" - if auth.Token != "" { - params["SecurityToken"] = auth.Token - } - - var sarray []string - for k, v := range params { - sarray = append(sarray, aws.Encode(k)+"="+aws.Encode(v)) - } - sort.StringSlice(sarray).Sort() - joined := strings.Join(sarray, "&") - payload := method + "\n" + host + "\n" + path + "\n" + joined - hash := hmac.New(sha256.New, []byte(auth.SecretKey)) - hash.Write([]byte(payload)) - signature := make([]byte, b64.EncodedLen(hash.Size())) - b64.Encode(signature, hash.Sum(nil)) - - params["Signature"] = string(signature) -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/mturk/export_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/mturk/export_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/mturk/export_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/mturk/export_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ -package mturk - -import ( - "github.com/mitchellh/goamz/aws" -) - -func Sign(auth aws.Auth, service, method, timestamp string, params map[string]string) { - sign(auth, service, method, timestamp, params) -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/mturk/mturk.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/mturk/mturk.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/mturk/mturk.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/mturk/mturk.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,281 +0,0 @@ -// -// goamz - Go packages to interact with the Amazon Web Services. -// -// https://wiki.ubuntu.com/goamz -// -// Copyright (c) 2011 Canonical Ltd. -// -// Written by Graham Miller - -// This package is in an experimental state, and does not currently -// follow conventions and style of the rest of goamz or common -// Go conventions. It must be polished before it's considered a -// first-class package in goamz. -package mturk - -import ( - "encoding/xml" - "errors" - "fmt" - "github.com/mitchellh/goamz/aws" - "net/http" - //"net/http/httputil" - "net/url" - "strconv" - "time" -) - -type MTurk struct { - aws.Auth - URL *url.URL -} - -func New(auth aws.Auth) *MTurk { - mt := &MTurk{Auth: auth} - var err error - mt.URL, err = url.Parse("http://mechanicalturk.amazonaws.com/") - if err != nil { - panic(err.Error()) - } - return mt -} - -// ---------------------------------------------------------------------------- -// Request dispatching logic. - -// Error encapsulates an error returned by MTurk. -type Error struct { - StatusCode int // HTTP status code (200, 403, ...) - Code string // EC2 error code ("UnsupportedOperation", ...) - Message string // The human-oriented error message - RequestId string -} - -func (err *Error) Error() string { - return err.Message -} - -// The request stanza included in several response types, for example -// in a "CreateHITResponse". http://goo.gl/qGeKf -type xmlRequest struct { - RequestId string - IsValid string - Errors []Error `xml:"Errors>Error"` -} - -// Common price structure used in requests and responses -// http://goo.gl/tE4AV -type Price struct { - Amount string - CurrencyCode string - FormattedPrice string -} - -// Really just a country string -// http://goo.gl/mU4uG -type Locale string - -// Data structure used to specify requirements for the worker -// used in CreateHIT, for example -// http://goo.gl/LvRo9 -type QualificationRequirement struct { - QualificationTypeId string - Comparator string - IntegerValue int - LocaleValue Locale - RequiredToPreview string -} - -// Data structure holding the contents of an "external" -// question. http://goo.gl/NP8Aa -type ExternalQuestion struct { - XMLName xml.Name `xml:"http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2006-07-14/ExternalQuestion.xsd ExternalQuestion"` - ExternalURL string - FrameHeight int -} - -// The data structure representing a "human interface task" (HIT) -// Currently only supports "external" questions, because Go -// structs don't support union types. http://goo.gl/NP8Aa -// This type is returned, for example, from SearchHITs -// http://goo.gl/PskcX -type HIT struct { - Request xmlRequest - - HITId string - HITTypeId string - CreationTime string - Title string - Description string - Keywords string - HITStatus string - Reward Price - LifetimeInSeconds uint - AssignmentDurationInSeconds uint - MaxAssignments uint - AutoApprovalDelayInSeconds uint - QualificationRequirement QualificationRequirement - Question ExternalQuestion - RequesterAnnotation string - NumberofSimilarHITs uint - HITReviewStatus string - NumberOfAssignmentsPending uint - NumberOfAssignmentsAvailable uint - NumberOfAssignmentsCompleted uint -} - -// The main data structure returned by SearchHITs -// http://goo.gl/PskcX -type SearchHITsResult struct { - NumResults uint - PageNumber uint - TotalNumResults uint - HITs []HIT `xml:"HIT"` -} - -// The wrapper data structure returned by SearchHITs -// http://goo.gl/PskcX -type SearchHITsResponse struct { - RequestId string `xml:"OperationRequest>RequestId"` - SearchHITsResult SearchHITsResult -} - -// The wrapper data structure returned by CreateHIT -// http://goo.gl/PskcX -type CreateHITResponse struct { - RequestId string `xml:"OperationRequest>RequestId"` - HIT HIT -} - -// Corresponds to the "CreateHIT" operation of the Mechanical Turk -// API. http://goo.gl/cDBRc Currently only supports "external" -// questions (see "HIT" struct above). If "keywords", "maxAssignments", -// "qualificationRequirement" or "requesterAnnotation" are the zero -// value for their types, they will not be included in the request. -func (mt *MTurk) CreateHIT(title, description string, question ExternalQuestion, reward Price, assignmentDurationInSeconds, lifetimeInSeconds uint, keywords string, maxAssignments uint, qualificationRequirement *QualificationRequirement, requesterAnnotation string) (h *HIT, err error) { - params := make(map[string]string) - params["Title"] = title - params["Description"] = description - params["Question"], err = xmlEncode(&question) - if err != nil { - return - } - params["Reward.1.Amount"] = reward.Amount - params["Reward.1.CurrencyCode"] = reward.CurrencyCode - params["AssignmentDurationInSeconds"] = strconv.FormatUint(uint64(assignmentDurationInSeconds), 10) - - params["LifetimeInSeconds"] = strconv.FormatUint(uint64(lifetimeInSeconds), 10) - if keywords != "" { - params["Keywords"] = keywords - } - if maxAssignments != 0 { - params["MaxAssignments"] = strconv.FormatUint(uint64(maxAssignments), 10) - } - if qualificationRequirement != nil { - params["QualificationRequirement"], err = xmlEncode(qualificationRequirement) - if err != nil { - return - } - } - if requesterAnnotation != "" { - params["RequesterAnnotation"] = requesterAnnotation - } - - var response CreateHITResponse - err = mt.query(params, "CreateHIT", &response) - if err == nil { - h = &response.HIT - } - return -} - -// Corresponds to the "CreateHIT" operation of the Mechanical Turk -// API, using an existing "hit type". http://goo.gl/cDBRc Currently only -// supports "external" questions (see "HIT" struct above). If -// "maxAssignments" or "requesterAnnotation" are the zero value for -// their types, they will not be included in the request. -func (mt *MTurk) CreateHITOfType(hitTypeId string, q ExternalQuestion, lifetimeInSeconds uint, maxAssignments uint, requesterAnnotation string) (h *HIT, err error) { - params := make(map[string]string) - params["HITTypeId"] = hitTypeId - params["Question"], err = xmlEncode(&q) - if err != nil { - return - } - params["LifetimeInSeconds"] = strconv.FormatUint(uint64(lifetimeInSeconds), 10) - if maxAssignments != 0 { - params["MaxAssignments"] = strconv.FormatUint(uint64(maxAssignments), 10) - } - if requesterAnnotation != "" { - params["RequesterAnnotation"] = requesterAnnotation - } - - var response CreateHITResponse - err = mt.query(params, "CreateHIT", &response) - if err == nil { - h = &response.HIT - } - return -} - -// Corresponds to "SearchHITs" operation of Mechanical Turk. http://goo.gl/PskcX -// Currenlty supports none of the optional parameters. -func (mt *MTurk) SearchHITs() (s *SearchHITsResult, err error) { - params := make(map[string]string) - var response SearchHITsResponse - err = mt.query(params, "SearchHITs", &response) - if err == nil { - s = &response.SearchHITsResult - } - return -} - -// Adds common parameters to the "params" map, signs the request, -// adds the signature to the "params" map and sends the request -// to the server. It then unmarshals the response in to the "resp" -// parameter using xml.Unmarshal() -func (mt *MTurk) query(params map[string]string, operation string, resp interface{}) error { - service := "AWSMechanicalTurkRequester" - timestamp := time.Now().UTC().Format("2006-01-02T15:04:05Z") - - params["AWSAccessKeyId"] = mt.Auth.AccessKey - params["Service"] = service - params["Timestamp"] = timestamp - params["Operation"] = operation - - // make a copy - url := *mt.URL - - sign(mt.Auth, service, operation, timestamp, params) - url.RawQuery = multimap(params).Encode() - r, err := http.Get(url.String()) - if err != nil { - return err - } - //dump, _ := httputil.DumpResponse(r, true) - //println("DUMP:\n", string(dump)) - if r.StatusCode != 200 { - return errors.New(fmt.Sprintf("%d: unexpected status code", r.StatusCode)) - } - dec := xml.NewDecoder(r.Body) - err = dec.Decode(resp) - r.Body.Close() - return err -} - -func multimap(p map[string]string) url.Values { - q := make(url.Values, len(p)) - for k, v := range p { - q[k] = []string{v} - } - return q -} - -func xmlEncode(i interface{}) (s string, err error) { - var buf []byte - buf, err = xml.Marshal(i) - if err != nil { - return - } - s = string(buf) - return -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/mturk/mturk_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/mturk/mturk_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/mturk/mturk_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/mturk/mturk_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,91 +0,0 @@ -package mturk_test - -import ( - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/exp/mturk" - "github.com/mitchellh/goamz/testutil" - . "github.com/motain/gocheck" - "net/url" - "testing" -) - -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&S{}) - -type S struct { - mturk *mturk.MTurk -} - -var testServer = testutil.NewHTTPServer() - -func (s *S) SetUpSuite(c *C) { - testServer.Start() - auth := aws.Auth{"abc", "123", ""} - u, err := url.Parse(testServer.URL) - if err != nil { - panic(err.Error()) - } - - s.mturk = &mturk.MTurk{ - Auth: auth, - URL: u, - } -} - -func (s *S) TearDownTest(c *C) { - testServer.Flush() -} - -func (s *S) TestCreateHIT(c *C) { - testServer.Response(200, nil, BasicHitResponse) - - question := mturk.ExternalQuestion{ - ExternalURL: "http://www.amazon.com", - FrameHeight: 200, - } - reward := mturk.Price{ - Amount: "0.01", - CurrencyCode: "USD", - } - hit, err := s.mturk.CreateHIT("title", "description", question, reward, 1, 2, "key1,key2", 3, nil, "annotation") - - testServer.WaitRequest() - - c.Assert(err, IsNil) - c.Assert(hit, NotNil) - - c.Assert(hit.HITId, Equals, "28J4IXKO2L927XKJTHO34OCDNASCDW") - c.Assert(hit.HITTypeId, Equals, "2XZ7D1X3V0FKQVW7LU51S7PKKGFKDF") -} - -func (s *S) TestSearchHITs(c *C) { - testServer.Response(200, nil, SearchHITResponse) - - hitResult, err := s.mturk.SearchHITs() - - c.Assert(err, IsNil) - c.Assert(hitResult, NotNil) - - c.Assert(hitResult.NumResults, Equals, uint(1)) - c.Assert(hitResult.PageNumber, Equals, uint(1)) - c.Assert(hitResult.TotalNumResults, Equals, uint(1)) - - c.Assert(len(hitResult.HITs), Equals, 1) - c.Assert(hitResult.HITs[0].HITId, Equals, "2BU26DG67D1XTE823B3OQ2JF2XWF83") - c.Assert(hitResult.HITs[0].HITTypeId, Equals, "22OWJ5OPB0YV6IGL5727KP9U38P5XR") - c.Assert(hitResult.HITs[0].CreationTime, Equals, "2011-12-28T19:56:20Z") - c.Assert(hitResult.HITs[0].Title, Equals, "test hit") - c.Assert(hitResult.HITs[0].Description, Equals, "please disregard, testing only") - c.Assert(hitResult.HITs[0].HITStatus, Equals, "Reviewable") - c.Assert(hitResult.HITs[0].MaxAssignments, Equals, uint(1)) - c.Assert(hitResult.HITs[0].Reward.Amount, Equals, "0.01") - c.Assert(hitResult.HITs[0].Reward.CurrencyCode, Equals, "USD") - c.Assert(hitResult.HITs[0].AutoApprovalDelayInSeconds, Equals, uint(2592000)) - c.Assert(hitResult.HITs[0].AssignmentDurationInSeconds, Equals, uint(30)) - c.Assert(hitResult.HITs[0].NumberOfAssignmentsPending, Equals, uint(0)) - c.Assert(hitResult.HITs[0].NumberOfAssignmentsAvailable, Equals, uint(1)) - c.Assert(hitResult.HITs[0].NumberOfAssignmentsCompleted, Equals, uint(0)) -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/mturk/responses_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/mturk/responses_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/mturk/responses_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/mturk/responses_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ -package mturk_test - -var BasicHitResponse = ` -643b794b-66b6-4427-bb8a-4d3df5c9a20eTrue28J4IXKO2L927XKJTHO34OCDNASCDW2XZ7D1X3V0FKQVW7LU51S7PKKGFKDF -` - -var SearchHITResponse = ` -38862d9c-f015-4177-a2d3-924110a9d6f2True1112BU26DG67D1XTE823B3OQ2JF2XWF8322OWJ5OPB0YV6IGL5727KP9U38P5XR2011-12-28T19:56:20Ztest hitplease disregard, testing onlyReviewable10.01USD$0.0125920002011-12-28T19:56:50Z30010 -` diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/mturk/sign.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/mturk/sign.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/mturk/sign.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/mturk/sign.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ -package mturk - -import ( - "crypto/hmac" - "crypto/sha1" - "encoding/base64" - "github.com/mitchellh/goamz/aws" -) - -var b64 = base64.StdEncoding - -// ---------------------------------------------------------------------------- -// Mechanical Turk signing (http://goo.gl/wrzfn) -func sign(auth aws.Auth, service, method, timestamp string, params map[string]string) { - payload := service + method + timestamp - hash := hmac.New(sha1.New, []byte(auth.SecretKey)) - hash.Write([]byte(payload)) - signature := make([]byte, b64.EncodedLen(hash.Size())) - b64.Encode(signature, hash.Sum(nil)) - - params["Signature"] = string(signature) -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/mturk/sign_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/mturk/sign_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/mturk/sign_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/mturk/sign_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -package mturk_test - -import ( - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/exp/mturk" - . "github.com/motain/gocheck" -) - -// Mechanical Turk REST authentication docs: http://goo.gl/wrzfn - -var testAuth = aws.Auth{"user", "secret", ""} - -// == fIJy9wCApBNL2R4J2WjJGtIBFX4= -func (s *S) TestBasicSignature(c *C) { - params := map[string]string{} - mturk.Sign(testAuth, "AWSMechanicalTurkRequester", "CreateHIT", "2012-02-16T20:30:47Z", params) - expected := "b/TnvzrdeD/L/EyzdFrznPXhido=" - c.Assert(params["Signature"], Equals, expected) -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sdb/export_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sdb/export_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sdb/export_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sdb/export_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ -package sdb - -import ( - "github.com/mitchellh/goamz/aws" -) - -func Sign(auth aws.Auth, method, path string, params map[string][]string, headers map[string][]string) { - sign(auth, method, path, params, headers) -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sdb/responses_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sdb/responses_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sdb/responses_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sdb/responses_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,120 +0,0 @@ -package sdb_test - -var TestCreateDomainXmlOK = ` - - - - 63264005-7a5f-e01a-a224-395c63b89f6d - 0.0055590279 - - -` - -var TestListDomainsXmlOK = ` - - - - Account - Domain - Record - - - 15fcaf55-9914-63c2-21f3-951e31193790 - 0.0000071759 - - -` - -var TestListDomainsWithNextTokenXmlOK = ` - - - - Domain1-200706011651 - Domain2-200706011652 - TWV0ZXJpbmdUZXN0RG9tYWluMS0yMDA3MDYwMTE2NTY= - - - eb13162f-1b95-4511-8b12-489b86acfd28 - 0.0000219907 - - -` - -var TestDeleteDomainXmlOK = ` - - - - 039e1e25-9a64-2a74-93da-2fda36122a97 - 0.0055590278 - - -` - -var TestDomainMetadataXmlNoSuchDomain = ` - - - - - NoSuchDomain - The specified domain does not exist. - 0.0000071759 - - - e050cea2-a772-f90e-2cb0-98ebd42c2898 - -` - -var TestPutAttrsXmlOK = ` - - - - 490206ce-8292-456c-a00f-61b335eb202b - 0.0000219907 - - -` - -var TestAttrsXmlOK = ` - - - - ColorBlue - SizeMed - - - b1e8f1f7-42e9-494c-ad09-2674e557526d - 0.0000219942 - - -` - -var TestSelectXmlOK = ` - - - - - Item_03 - CategoryClothes - SubcategoryPants - NameSweatpants - ColorBlue - ColorYellow - ColorPink - SizeLarge - - - Item_06 - CategoryMotorcycle Parts - SubcategoryBodywork - NameFender Eliminator - ColorBlue - MakeYamaha - ModelR1 - - - - b1e8f1f7-42e9-494c-ad09-2674e557526d - 0.0000219907 - - -` diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sdb/sdb.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sdb/sdb.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sdb/sdb.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sdb/sdb.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,413 +0,0 @@ -// -// goamz - Go packages to interact with the Amazon Web Services. -// -// https://wiki.ubuntu.com/goamz -// -// Copyright (c) 2011 AppsAttic Ltd. -// -// sdb package written by: -// -// Andrew Chilton -// Brad Rydzewski - -// This package is in an experimental state, and does not currently -// follow conventions and style of the rest of goamz or common -// Go conventions. It must be polished before it's considered a -// first-class package in goamz. -package sdb - -// BUG: SelectResp isn't properly organized. It must change. - -// - -import ( - "encoding/xml" - "github.com/mitchellh/goamz/aws" - "log" - "net/http" - "net/http/httputil" - "net/url" - "strconv" - "time" -) - -const debug = false - -// The SDB type encapsulates operations with a specific SimpleDB region. -type SDB struct { - aws.Auth - aws.Region - private byte // Reserve the right of using private data. -} - -// New creates a new SDB. -func New(auth aws.Auth, region aws.Region) *SDB { - return &SDB{auth, region, 0} -} - -// The Domain type represents a collection of items that are described -// by name-value attributes. -type Domain struct { - *SDB - Name string -} - -// Domain returns a Domain with the given name. -func (sdb *SDB) Domain(name string) *Domain { - return &Domain{sdb, name} -} - -// The Item type represent individual objects that contain one or more -// name-value attributes stored within a SDB Domain as rows. -type Item struct { - *SDB - *Domain - Name string -} - -// Item returns an Item with the given name. -func (domain *Domain) Item(name string) *Item { - return &Item{domain.SDB, domain, name} -} - -// The Attr type represent categories of data that can be assigned to items. -type Attr struct { - Name string - Value string -} - -// ---------------------------------------------------------------------------- -// Service-level operations. - -// --- ListDomains - -// Response to a ListDomains request. -// -// See http://goo.gl/3u0Cf for more details. -type ListDomainsResp struct { - Domains []string `xml:"ListDomainsResult>DomainName"` - NextToken string `xml:"ListDomainsResult>NextToken"` - ResponseMetadata ResponseMetadata -} - -// ListDomains lists all domains in sdb. -// -// See http://goo.gl/Dsw15 for more details. -func (sdb *SDB) ListDomains() (resp *ListDomainsResp, err error) { - return sdb.ListDomainsN(0, "") -} - -// ListDomainsN lists domains in sdb up to maxDomains. -// If nextToken is not empty, domains listed will start at the given token. -// -// See http://goo.gl/Dsw15 for more details. -func (sdb *SDB) ListDomainsN(maxDomains int, nextToken string) (resp *ListDomainsResp, err error) { - params := makeParams("ListDomains") - if maxDomains != 0 { - params["MaxNumberOfDomains"] = []string{strconv.Itoa(maxDomains)} - } - if nextToken != "" { - params["NextToken"] = []string{nextToken} - } - resp = &ListDomainsResp{} - err = sdb.query(nil, nil, params, nil, resp) - return -} - -// --- SelectExpression - -// Response to a Select request. -// -// See http://goo.gl/GTsSZ for more details. -type SelectResp struct { - Items []struct { - Name string - Attrs []Attr `xml:"Attribute"` - } `xml:"SelectResult>Item"` - ResponseMetadata ResponseMetadata -} - -// Select returns a set of items and attributes that match expr. -// Select is similar to the standard SQL SELECT statement. -// -// See http://goo.gl/GTsSZ for more details. -func (sdb *SDB) Select(expr string, consistent bool) (resp *SelectResp, err error) { - resp = &SelectResp{} - params := makeParams("Select") - params["SelectExpression"] = []string{expr} - if consistent { - params["ConsistentRead"] = []string{"true"} - } - err = sdb.query(nil, nil, params, nil, resp) - return -} - -// ---------------------------------------------------------------------------- -// Domain-level operations. - -// --- CreateDomain - -// CreateDomain creates a new domain. -// -// See http://goo.gl/jDjGH for more details. -func (domain *Domain) CreateDomain() (resp *SimpleResp, err error) { - params := makeParams("CreateDomain") - resp = &SimpleResp{} - err = domain.SDB.query(domain, nil, params, nil, resp) - return -} - -// DeleteDomain deletes an existing domain. -// -// See http://goo.gl/S0dCL for more details. -func (domain *Domain) DeleteDomain() (resp *SimpleResp, err error) { - params := makeParams("DeleteDomain") - resp = &SimpleResp{} - err = domain.SDB.query(domain, nil, params, nil, resp) - return -} - -// ---------------------------------------------------------------------------- -// Item-level operations. - -type PutAttrs struct { - attrs []Attr - expected []Attr - replace map[string]bool - missing map[string]bool -} - -func (pa *PutAttrs) Add(name, value string) { - pa.attrs = append(pa.attrs, Attr{name, value}) -} - -func (pa *PutAttrs) Replace(name, value string) { - pa.Add(name, value) - if pa.replace == nil { - pa.replace = make(map[string]bool) - } - pa.replace[name] = true -} - -// The PutAttrs request will only succeed if the existing -// item in SimpleDB contains a matching name / value pair. -func (pa *PutAttrs) IfValue(name, value string) { - pa.expected = append(pa.expected, Attr{name, value}) -} - -// Flag to test the existence of an attribute while performing -// conditional updates. X can be any positive integer or 0. -// -// This should set Expected.N.Name=name and Expected.N.Exists=false -func (pa *PutAttrs) IfMissing(name string) { - if pa.missing == nil { - pa.missing = make(map[string]bool) - } - pa.missing[name] = true -} - -// PutAttrs adds attrs to item. -// -// See http://goo.gl/yTAV4 for more details. -func (item *Item) PutAttrs(attrs *PutAttrs) (resp *SimpleResp, err error) { - params := makeParams("PutAttributes") - resp = &SimpleResp{} - - // copy these attrs over to the parameters - itemNum := 1 - for _, attr := range attrs.attrs { - itemNumStr := strconv.Itoa(itemNum) - - // do the name, value and replace - params["Attribute."+itemNumStr+".Name"] = []string{attr.Name} - params["Attribute."+itemNumStr+".Value"] = []string{attr.Value} - - if _, ok := attrs.replace[attr.Name]; ok { - params["Attribute."+itemNumStr+".Replace"] = []string{"true"} - } - - itemNum++ - } - - //append expected values to params - expectedNum := 1 - for _, attr := range attrs.expected { - expectedNumStr := strconv.Itoa(expectedNum) - params["Expected."+expectedNumStr+".Name"] = []string{attr.Name} - params["Expected."+expectedNumStr+".Value"] = []string{attr.Value} - - if attrs.missing[attr.Name] { - params["Expected."+expectedNumStr+".Exists"] = []string{"false"} - } - expectedNum++ - } - - err = item.query(params, nil, resp) - if err != nil { - return nil, err - } - return -} - -// Response to an Attrs request. -// -// See http://goo.gl/45X1M for more details. -type AttrsResp struct { - Attrs []Attr `xml:"GetAttributesResult>Attribute"` - ResponseMetadata ResponseMetadata -} - -// Attrs returns one or more of the named attributes, or -// all of item's attributes if names is nil. -// If consistent is true, previous writes will necessarily -// be observed. -// -// See http://goo.gl/45X1M for more details. -func (item *Item) Attrs(names []string, consistent bool) (resp *AttrsResp, err error) { - params := makeParams("GetAttributes") - params["ItemName"] = []string{item.Name} - if consistent { - params["ConsistentRead"] = []string{"true"} - } - - // Copy these attributes over to the parameters - for i, name := range names { - params["AttributeName."+strconv.Itoa(i+1)] = []string{name} - } - - resp = &AttrsResp{} - err = item.query(params, nil, resp) - if err != nil { - return nil, err - } - return -} - -// ---------------------------------------------------------------------------- -// Generic data structures for all requests/responses. - -// Error encapsulates an error returned by SDB. -type Error struct { - StatusCode int // HTTP status code (200, 403, ...) - StatusMsg string // HTTP status message ("Service Unavailable", "Bad Request", ...) - Code string // SimpleDB error code ("InvalidParameterValue", ...) - Message string // The human-oriented error message - RequestId string // A unique ID for this request - BoxUsage float64 // The measure of machine utilization for this request. -} - -func (err *Error) Error() string { - return err.Message -} - -// SimpleResp represents a response to an SDB request which on success -// will return no other information besides ResponseMetadata. -type SimpleResp struct { - ResponseMetadata ResponseMetadata -} - -// ResponseMetadata -type ResponseMetadata struct { - RequestId string // A unique ID for tracking the request - BoxUsage float64 // The measure of machine utilization for this request. -} - -func buildError(r *http.Response) error { - err := Error{} - err.StatusCode = r.StatusCode - err.StatusMsg = r.Status - xml.NewDecoder(r.Body).Decode(&err) - return &err -} - -// ---------------------------------------------------------------------------- -// Request dispatching logic. - -func (item *Item) query(params url.Values, headers http.Header, resp interface{}) error { - return item.Domain.SDB.query(item.Domain, item, params, headers, resp) -} - -func (domain *Domain) query(item *Item, params url.Values, headers http.Header, resp interface{}) error { - return domain.SDB.query(domain, item, params, headers, resp) -} - -func (sdb *SDB) query(domain *Domain, item *Item, params url.Values, headers http.Header, resp interface{}) error { - // all SimpleDB operations have path="/" - method := "GET" - path := "/" - - // if we have been given no headers or params, create them - if headers == nil { - headers = map[string][]string{} - } - if params == nil { - params = map[string][]string{} - } - - // setup some default parameters - params["Version"] = []string{"2009-04-15"} - params["Timestamp"] = []string{time.Now().UTC().Format(time.RFC3339)} - - // set the DomainName param (every request must have one) - if domain != nil { - params["DomainName"] = []string{domain.Name} - } - - // set the ItemName if we have one - if item != nil { - params["ItemName"] = []string{item.Name} - } - - // check the endpoint URL - u, err := url.Parse(sdb.Region.SDBEndpoint) - if err != nil { - return err - } - headers["Host"] = []string{u.Host} - sign(sdb.Auth, method, path, params, headers) - - u.Path = path - if len(params) > 0 { - u.RawQuery = params.Encode() - } - req := http.Request{ - URL: u, - Method: method, - ProtoMajor: 1, - ProtoMinor: 1, - Close: true, - Header: headers, - } - - if v, ok := headers["Content-Length"]; ok { - req.ContentLength, _ = strconv.ParseInt(v[0], 10, 64) - delete(headers, "Content-Length") - } - - r, err := http.DefaultClient.Do(&req) - if err != nil { - return err - } - defer r.Body.Close() - - if debug { - dump, _ := httputil.DumpResponse(r, true) - log.Printf("response:\n") - log.Printf("%v\n}\n", string(dump)) - } - - // status code is always 200 when successful (since we're always doing a GET) - if r.StatusCode != 200 { - return buildError(r) - } - - // everything was fine, so unmarshal the XML and return what it's err is (if any) - err = xml.NewDecoder(r.Body).Decode(resp) - return err -} - -func makeParams(action string) map[string][]string { - params := make(map[string][]string) - params["Action"] = []string{action} - return params -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sdb/sdb_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sdb/sdb_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sdb/sdb_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sdb/sdb_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,218 +0,0 @@ -package sdb_test - -import ( - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/exp/sdb" - "github.com/mitchellh/goamz/testutil" - . "github.com/motain/gocheck" - "testing" -) - -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&S{}) - -type S struct { - sdb *sdb.SDB -} - -var testServer = testutil.NewHTTPServer() - -func (s *S) SetUpSuite(c *C) { - testServer.Start() - auth := aws.Auth{"abc", "123", ""} - s.sdb = sdb.New(auth, aws.Region{SDBEndpoint: testServer.URL}) -} - -func (s *S) TearDownTest(c *C) { - testServer.Flush() -} - -func (s *S) TestCreateDomainOK(c *C) { - testServer.Response(200, nil, TestCreateDomainXmlOK) - - domain := s.sdb.Domain("domain") - resp, err := domain.CreateDomain() - req := testServer.WaitRequest() - - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/") - c.Assert(req.Header["Date"], Not(Equals), "") - - c.Assert(resp.ResponseMetadata.RequestId, Equals, "63264005-7a5f-e01a-a224-395c63b89f6d") - c.Assert(resp.ResponseMetadata.BoxUsage, Equals, 0.0055590279) - - c.Assert(err, IsNil) -} - -func (s *S) TestListDomainsOK(c *C) { - testServer.Response(200, nil, TestListDomainsXmlOK) - - resp, err := s.sdb.ListDomains() - req := testServer.WaitRequest() - - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/") - c.Assert(req.Header["Date"], Not(Equals), "") - - c.Assert(resp.ResponseMetadata.RequestId, Equals, "15fcaf55-9914-63c2-21f3-951e31193790") - c.Assert(resp.ResponseMetadata.BoxUsage, Equals, 0.0000071759) - c.Assert(resp.Domains, DeepEquals, []string{"Account", "Domain", "Record"}) - - c.Assert(err, IsNil) -} - -func (s *S) TestListDomainsWithNextTokenXmlOK(c *C) { - testServer.Response(200, nil, TestListDomainsWithNextTokenXmlOK) - - resp, err := s.sdb.ListDomains() - req := testServer.WaitRequest() - - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/") - c.Assert(req.Header["Date"], Not(Equals), "") - - c.Assert(resp.ResponseMetadata.RequestId, Equals, "eb13162f-1b95-4511-8b12-489b86acfd28") - c.Assert(resp.ResponseMetadata.BoxUsage, Equals, 0.0000219907) - c.Assert(resp.Domains, DeepEquals, []string{"Domain1-200706011651", "Domain2-200706011652"}) - c.Assert(resp.NextToken, Equals, "TWV0ZXJpbmdUZXN0RG9tYWluMS0yMDA3MDYwMTE2NTY=") - - c.Assert(err, IsNil) -} - -func (s *S) TestDeleteDomainOK(c *C) { - testServer.Response(200, nil, TestDeleteDomainXmlOK) - - domain := s.sdb.Domain("domain") - resp, err := domain.DeleteDomain() - req := testServer.WaitRequest() - - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/") - c.Assert(req.Header["Date"], Not(Equals), "") - - c.Assert(resp.ResponseMetadata.RequestId, Equals, "039e1e25-9a64-2a74-93da-2fda36122a97") - c.Assert(resp.ResponseMetadata.BoxUsage, Equals, 0.0055590278) - - c.Assert(err, IsNil) -} - -func (s *S) TestPutAttrsOK(c *C) { - testServer.Response(200, nil, TestPutAttrsXmlOK) - - domain := s.sdb.Domain("MyDomain") - item := domain.Item("Item123") - - putAttrs := new(sdb.PutAttrs) - putAttrs.Add("FirstName", "john") - putAttrs.Add("LastName", "smith") - putAttrs.Replace("MiddleName", "jacob") - - putAttrs.IfValue("FirstName", "john") - putAttrs.IfMissing("FirstName") - - resp, err := item.PutAttrs(putAttrs) - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/") - c.Assert(req.Form["Action"], DeepEquals, []string{"PutAttributes"}) - c.Assert(req.Form["ItemName"], DeepEquals, []string{"Item123"}) - c.Assert(req.Form["DomainName"], DeepEquals, []string{"MyDomain"}) - c.Assert(req.Form["Attribute.1.Name"], DeepEquals, []string{"FirstName"}) - c.Assert(req.Form["Attribute.1.Value"], DeepEquals, []string{"john"}) - c.Assert(req.Form["Attribute.2.Name"], DeepEquals, []string{"LastName"}) - c.Assert(req.Form["Attribute.2.Value"], DeepEquals, []string{"smith"}) - c.Assert(req.Form["Attribute.3.Name"], DeepEquals, []string{"MiddleName"}) - c.Assert(req.Form["Attribute.3.Value"], DeepEquals, []string{"jacob"}) - c.Assert(req.Form["Attribute.3.Replace"], DeepEquals, []string{"true"}) - - c.Assert(req.Form["Expected.1.Name"], DeepEquals, []string{"FirstName"}) - c.Assert(req.Form["Expected.1.Value"], DeepEquals, []string{"john"}) - c.Assert(req.Form["Expected.1.Exists"], DeepEquals, []string{"false"}) - - c.Assert(err, IsNil) - c.Assert(resp.ResponseMetadata.RequestId, Equals, "490206ce-8292-456c-a00f-61b335eb202b") - c.Assert(resp.ResponseMetadata.BoxUsage, Equals, 0.0000219907) - -} - -func (s *S) TestAttrsOK(c *C) { - testServer.Response(200, nil, TestAttrsXmlOK) - - domain := s.sdb.Domain("MyDomain") - item := domain.Item("Item123") - - resp, err := item.Attrs(nil, true) - req := testServer.WaitRequest() - - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/") - c.Assert(req.Header["Date"], Not(Equals), "") - c.Assert(req.Form["Action"], DeepEquals, []string{"GetAttributes"}) - c.Assert(req.Form["ItemName"], DeepEquals, []string{"Item123"}) - c.Assert(req.Form["DomainName"], DeepEquals, []string{"MyDomain"}) - c.Assert(req.Form["ConsistentRead"], DeepEquals, []string{"true"}) - - c.Assert(resp.Attrs[0].Name, Equals, "Color") - c.Assert(resp.Attrs[0].Value, Equals, "Blue") - c.Assert(resp.Attrs[1].Name, Equals, "Size") - c.Assert(resp.Attrs[1].Value, Equals, "Med") - c.Assert(resp.ResponseMetadata.RequestId, Equals, "b1e8f1f7-42e9-494c-ad09-2674e557526d") - c.Assert(resp.ResponseMetadata.BoxUsage, Equals, 0.0000219942) - - c.Assert(err, IsNil) -} - -func (s *S) TestAttrsSelectOK(c *C) { - testServer.Response(200, nil, TestAttrsXmlOK) - - domain := s.sdb.Domain("MyDomain") - item := domain.Item("Item123") - - resp, err := item.Attrs([]string{"Color", "Size"}, true) - req := testServer.WaitRequest() - - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/") - c.Assert(req.Header["Date"], Not(Equals), "") - c.Assert(req.Form["Action"], DeepEquals, []string{"GetAttributes"}) - c.Assert(req.Form["ItemName"], DeepEquals, []string{"Item123"}) - c.Assert(req.Form["DomainName"], DeepEquals, []string{"MyDomain"}) - c.Assert(req.Form["ConsistentRead"], DeepEquals, []string{"true"}) - c.Assert(req.Form["AttributeName.1"], DeepEquals, []string{"Color"}) - c.Assert(req.Form["AttributeName.2"], DeepEquals, []string{"Size"}) - - c.Assert(resp.Attrs[0].Name, Equals, "Color") - c.Assert(resp.Attrs[0].Value, Equals, "Blue") - c.Assert(resp.Attrs[1].Name, Equals, "Size") - c.Assert(resp.Attrs[1].Value, Equals, "Med") - c.Assert(resp.ResponseMetadata.RequestId, Equals, "b1e8f1f7-42e9-494c-ad09-2674e557526d") - c.Assert(resp.ResponseMetadata.BoxUsage, Equals, 0.0000219942) - - c.Assert(err, IsNil) -} - -func (s *S) TestSelectOK(c *C) { - testServer.Response(200, nil, TestSelectXmlOK) - - resp, err := s.sdb.Select("select Color from MyDomain where Color like 'Blue%'", true) - req := testServer.WaitRequest() - - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/") - c.Assert(req.Header["Date"], Not(Equals), "") - c.Assert(req.Form["Action"], DeepEquals, []string{"Select"}) - c.Assert(req.Form["ConsistentRead"], DeepEquals, []string{"true"}) - - c.Assert(resp.ResponseMetadata.RequestId, Equals, "b1e8f1f7-42e9-494c-ad09-2674e557526d") - c.Assert(resp.ResponseMetadata.BoxUsage, Equals, 0.0000219907) - c.Assert(len(resp.Items), Equals, 2) - c.Assert(resp.Items[0].Name, Equals, "Item_03") - c.Assert(resp.Items[1].Name, Equals, "Item_06") - c.Assert(resp.Items[0].Attrs[0].Name, Equals, "Category") - c.Assert(resp.Items[0].Attrs[0].Value, Equals, "Clothes") - - c.Assert(err, IsNil) -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sdb/sign.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sdb/sign.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sdb/sign.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sdb/sign.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,55 +0,0 @@ -package sdb - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "net/http" - "net/url" - "sort" - "strings" - - "github.com/mitchellh/goamz/aws" -) - -var b64 = base64.StdEncoding - -// ---------------------------------------------------------------------------- -// SimpleDB signing (http://goo.gl/CaY81) - -func sign(auth aws.Auth, method, path string, params url.Values, headers http.Header) { - var host string - for k, v := range headers { - k = strings.ToLower(k) - switch k { - case "host": - host = v[0] - } - } - - // set up some defaults used for signing the request - params["AWSAccessKeyId"] = []string{auth.AccessKey} - params["SignatureVersion"] = []string{"2"} - params["SignatureMethod"] = []string{"HmacSHA256"} - if auth.Token != "" { - params["SecurityToken"] = []string{auth.Token} - } - - // join up all the incoming params - var sarray []string - for k, v := range params { - sarray = append(sarray, aws.Encode(k)+"="+aws.Encode(v[0])) - } - sort.StringSlice(sarray).Sort() - joined := strings.Join(sarray, "&") - - // create the payload, sign it and create the signature - payload := strings.Join([]string{method, host, "/", joined}, "\n") - hash := hmac.New(sha256.New, []byte(auth.SecretKey)) - hash.Write([]byte(payload)) - signature := make([]byte, b64.EncodedLen(hash.Size())) - b64.Encode(signature, hash.Sum(nil)) - - // add the signature to the outgoing params - params["Signature"] = []string{string(signature)} -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sdb/sign_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sdb/sign_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sdb/sign_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sdb/sign_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,29 +0,0 @@ -package sdb_test - -import ( - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/exp/sdb" - . "github.com/motain/gocheck" -) - -// SimpleDB ReST authentication docs: http://goo.gl/CaY81 - -var testAuth = aws.Auth{"access-key-id-s8eBOWuU", "secret-access-key-UkQjTLd9", ""} - -func (s *S) TestSignExampleDomainCreate(c *C) { - method := "GET" - params := map[string][]string{ - "Action": {"CreateDomain"}, - "DomainName": {"MyDomain"}, - "Timestamp": {"2011-08-20T07:23:57+12:00"}, - "Version": {"2009-04-15"}, - } - headers := map[string][]string{ - "Host": {"sdb.amazonaws.com"}, - } - sdb.Sign(testAuth, method, "", params, headers) - expected := "ot2JaeeqMRJqgAqW67hkzUlffgxdOz4RykbrECB+tDU=" - c.Assert(params["Signature"], DeepEquals, []string{expected}) -} - -// Do a few test methods which takes combinations of params diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/Makefile aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/Makefile --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/Makefile 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/Makefile 1970-01-01 00:00:00.000000000 +0000 @@ -1,21 +0,0 @@ -include $(GOROOT)/src/Make.inc - -TARG=launchpad.net/goamz/sns - -GOFILES=\ - sns.go\ - sign.go\ - -include $(GOROOT)/src/Make.pkg - -GOFMT=gofmt -BADFMT=$(shell $(GOFMT) -l $(GOFILES) 2> /dev/null) - -gofmt: $(BADFMT) - @for F in $(BADFMT); do $(GOFMT) -w $$F && echo $$F; done - -ifneq ($(BADFMT),) -ifneq ($(MAKECMDGOALS), gofmt) -#$(warning WARNING: make gofmt: $(BADFMT)) -endif -endif diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/permissions.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/permissions.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/permissions.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/permissions.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,51 +0,0 @@ -package sns - -import ( - "strconv" -) - -type Permission struct { - ActionName string - AccountId string -} - -type AddPermissionResponse struct { - ResponseMetadata -} - -// AddPermission -// -// See http://goo.gl/mbY4a for more details. -func (sns *SNS) AddPermission(permissions []Permission, Label, TopicArn string) (resp *AddPermissionResponse, err error) { - resp = &AddPermissionResponse{} - params := makeParams("AddPermission") - - for i, p := range permissions { - params["AWSAccountId.member."+strconv.Itoa(i+1)] = p.AccountId - params["ActionName.member."+strconv.Itoa(i+1)] = p.ActionName - } - - params["Label"] = Label - params["TopicArn"] = TopicArn - - err = sns.query(params, resp) - return -} - -type RemovePermissionResponse struct { - ResponseMetadata -} - -// RemovePermission -// -// See http://goo.gl/wGl5j for more details. -func (sns *SNS) RemovePermission(Label, TopicArn string) (resp *RemovePermissionResponse, err error) { - resp = &RemovePermissionResponse{} - params := makeParams("RemovePermission") - - params["Label"] = Label - params["TopicArn"] = TopicArn - - err = sns.query(params, resp) - return -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/README aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/README --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/README 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/README 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -Amazon Simple Notification Service API for Golang. diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/responses_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/responses_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/responses_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/responses_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,164 +0,0 @@ -package sns_test - -var TestListTopicsXmlOK = ` - - - - - - arn:aws:sns:us-west-1:331995417492:Transcoding - - - - - bd10b26c-e30e-11e0-ba29-93c3aca2f103 - - -` - -var TestCreateTopicXmlOK = ` - - - - arn:aws:sns:us-east-1:123456789012:My-Topic - - - a8dec8b3-33a4-11df-8963-01868b7c937a - - -` - -var TestDeleteTopicXmlOK = ` - - - f3aa9ac9-3c3d-11df-8235-9dab105e9c32 - - -` - -var TestListSubscriptionsXmlOK = ` - - - - - arn:aws:sns:us-east-1:698519295917:My-Topic - email - arn:aws:sns:us-east-1:123456789012:My-Topic:80289ba6-0fd4-4079-afb4-ce8c8260f0ca - 123456789012 - example@amazon.com - - - - - 384ac68d-3775-11df-8963-01868b7c937a - - -` - -var TestGetTopicAttributesXmlOK = ` - - - - - Owner - 123456789012 - - - Policy - {"Version":"2008-10-17","Id":"us-east-1/698519295917/test__default_policy_ID","Statement" : [{"Effect":"Allow","Sid":"us-east-1/698519295917/test__default_statement_ID","Principal" : {"AWS": "*"},"Action":["SNS:GetTopicAttributes","SNS:SetTopicAttributes","SNS:AddPermission","SNS:RemovePermission","SNS:DeleteTopic","SNS:Subscribe","SNS:ListSubscriptionsByTopic","SNS:Publish","SNS:Receive"],"Resource":"arn:aws:sns:us-east-1:698519295917:test","Condition" : {"StringLike" : {"AWS:SourceArn": "arn:aws:*:*:698519295917:*"}}}]} - - - TopicArn - arn:aws:sns:us-east-1:123456789012:My-Topic - - - - - 057f074c-33a7-11df-9540-99d0768312d3 - - -` - -var TestPublishXmlOK = ` - - - 94f20ce6-13c5-43a0-9a9e-ca52d816e90b - - - f187a3c1-376f-11df-8963-01868b7c937a - - -` - -var TestSetTopicAttributesXmlOK = ` - - - a8763b99-33a7-11df-a9b7-05d48da6f042 - - -` - -var TestSubscribeXmlOK = ` - - - pending confirmation - - - a169c740-3766-11df-8963-01868b7c937a - - -` - -var TestUnsubscribeXmlOK = ` - - - 18e0ac39-3776-11df-84c0-b93cc1666b84 - - -` - -var TestConfirmSubscriptionXmlOK = ` - - - arn:aws:sns:us-east-1:123456789012:My-Topic:80289ba6-0fd4-4079-afb4-ce8c8260f0ca - - - 7a50221f-3774-11df-a9b7-05d48da6f042 - - -` - -var TestAddPermissionXmlOK = ` - - - 6a213e4e-33a8-11df-9540-99d0768312d3 - - -` - -var TestRemovePermissionXmlOK = ` - - - d170b150-33a8-11df-995a-2d6fbe836cc1 - - -` - -var TestListSubscriptionsByTopicXmlOK = ` - - - - - arn:aws:sns:us-east-1:123456789012:My-Topic - email - arn:aws:sns:us-east-1:123456789012:My-Topic:80289ba6-0fd4-4079-afb4-ce8c8260f0ca - 123456789012 - example@amazon.com - - - - - b9275252-3774-11df-9540-99d0768312d3 - - -` diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/sign.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/sign.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/sign.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/sign.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,69 +0,0 @@ -package sns - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "github.com/mitchellh/goamz/aws" - "sort" - "strings" -) - -var b64 = base64.StdEncoding - -/* -func sign(auth aws.Auth, method, path string, params url.Values, headers http.Header) { - var host string - for k, v := range headers { - k = strings.ToLower(k) - switch k { - case "host": - host = v[0] - } - } - - params["AWSAccessKeyId"] = []string{auth.AccessKey} - params["SignatureVersion"] = []string{"2"} - params["SignatureMethod"] = []string{"HmacSHA256"} - if auth.Token != "" { - params["SecurityToken"] = auth.Token - } - - var sarry []string - for k, v := range params { - sarry = append(sarry, aws.Encode(k) + "=" + aws.Encode(v[0])) - } - - sort.StringSlice(sarry).Sort() - joined := strings.Join(sarry, "&") - - payload := strings.Join([]string{method, host, "/", joined}, "\n") - hash := hmac.NewSHA256([]byte(auth.SecretKey)) - hash.Write([]byte(payload)) - signature := make([]byte, b64.EncodedLen(hash.Size())) - b64.Encode(signature, hash.Sum()) - - params["Signature"] = []string{"AWS " + string(signature)} - println("Payload:", payload) - println("Signature:", strings.Join(params["Signature"], "|")) -}*/ - -func sign(auth aws.Auth, method, path string, params map[string]string, host string) { - params["AWSAccessKeyId"] = auth.AccessKey - params["SignatureVersion"] = "2" - params["SignatureMethod"] = "HmacSHA256" - - var sarray []string - for k, v := range params { - sarray = append(sarray, aws.Encode(k)+"="+aws.Encode(v)) - } - sort.StringSlice(sarray).Sort() - joined := strings.Join(sarray, "&") - payload := method + "\n" + host + "\n" + path + "\n" + joined - hash := hmac.New(sha256.New, []byte(auth.SecretKey)) - hash.Write([]byte(payload)) - signature := make([]byte, b64.EncodedLen(hash.Size())) - b64.Encode(signature, hash.Sum(nil)) - - params["Signature"] = string(signature) -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/sns.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/sns.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/sns.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/sns.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,115 +0,0 @@ -// -// goamz - Go packages to interact with the Amazon Web Services. -// -// https://wiki.ubuntu.com/goamz -// -// Copyright (c) 2011 Memeo Inc. -// -// Written by Prudhvi Krishna Surapaneni - -// This package is in an experimental state, and does not currently -// follow conventions and style of the rest of goamz or common -// Go conventions. It must be polished before it's considered a -// first-class package in goamz. -package sns - -// BUG(niemeyer): Topic values in responses are not being initialized -// properly, since they're supposed to reference *SNS. - -// BUG(niemeyer): Package needs documentation. - -import ( - "encoding/xml" - "github.com/mitchellh/goamz/aws" - "net/http" - "net/url" - "time" -) - -// The SNS type encapsulates operation with an SNS region. -type SNS struct { - aws.Auth - aws.Region - private byte // Reserve the right of using private data. -} - -type AttributeEntry struct { - Key string `xml:"key"` - Value string `xml:"value"` -} - -type ResponseMetadata struct { - RequestId string `xml:"ResponseMetadata>RequestId"` - BoxUsage float64 `xml:"ResponseMetadata>BoxUsage"` -} - -func New(auth aws.Auth, region aws.Region) *SNS { - return &SNS{auth, region, 0} -} - -func makeParams(action string) map[string]string { - params := make(map[string]string) - params["Action"] = action - return params -} - -type Error struct { - StatusCode int - Code string - Message string - RequestId string -} - -func (err *Error) Error() string { - return err.Message -} - -type xmlErrors struct { - RequestId string - Errors []Error `xml:"Errors>Error"` -} - -func (sns *SNS) query(params map[string]string, resp interface{}) error { - params["Timestamp"] = time.Now().UTC().Format(time.RFC3339) - u, err := url.Parse(sns.Region.SNSEndpoint) - if err != nil { - return err - } - - sign(sns.Auth, "GET", "/", params, u.Host) - u.RawQuery = multimap(params).Encode() - r, err := http.Get(u.String()) - if err != nil { - return err - } - defer r.Body.Close() - - if r.StatusCode != 200 { - return buildError(r) - } - err = xml.NewDecoder(r.Body).Decode(resp) - return err -} - -func buildError(r *http.Response) error { - errors := xmlErrors{} - xml.NewDecoder(r.Body).Decode(&errors) - var err Error - if len(errors.Errors) > 0 { - err = errors.Errors[0] - } - err.RequestId = errors.RequestId - err.StatusCode = r.StatusCode - if err.Message == "" { - err.Message = r.Status - } - return &err -} - -func multimap(p map[string]string) url.Values { - q := make(url.Values, len(p)) - for k, v := range p { - q[k] = []string{v} - } - return q -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/sns_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/sns_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/sns_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/sns_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,250 +0,0 @@ -package sns_test - -import ( - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/exp/sns" - "github.com/mitchellh/goamz/testutil" - . "github.com/motain/gocheck" - "testing" -) - -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&S{}) - -type S struct { - sns *sns.SNS -} - -var testServer = testutil.NewHTTPServer() - -func (s *S) SetUpSuite(c *C) { - testServer.Start() - auth := aws.Auth{"abc", "123", ""} - s.sns = sns.New(auth, aws.Region{SNSEndpoint: testServer.URL}) -} - -func (s *S) TearDownTest(c *C) { - testServer.Flush() -} - -func (s *S) TestListTopicsOK(c *C) { - testServer.Response(200, nil, TestListTopicsXmlOK) - - resp, err := s.sns.ListTopics(nil) - req := testServer.WaitRequest() - - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/") - c.Assert(req.Header["Date"], Not(Equals), "") - - c.Assert(resp.Topics[0].SNS, Equals, s.sns) - c.Assert(resp.ResponseMetadata.RequestId, Equals, "bd10b26c-e30e-11e0-ba29-93c3aca2f103") - c.Assert(err, IsNil) -} - -func (s *S) TestCreateTopic(c *C) { - testServer.Response(200, nil, TestCreateTopicXmlOK) - - resp, err := s.sns.CreateTopic("My-Topic") - req := testServer.WaitRequest() - - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/") - c.Assert(req.Header["Date"], Not(Equals), "") - - c.Assert(resp.Topic.SNS, Equals, s.sns) - c.Assert(resp.Topic.TopicArn, Equals, "arn:aws:sns:us-east-1:123456789012:My-Topic") - c.Assert(resp.ResponseMetadata.RequestId, Equals, "a8dec8b3-33a4-11df-8963-01868b7c937a") - c.Assert(err, IsNil) -} - -func (s *S) TestDeleteTopic(c *C) { - testServer.Response(200, nil, TestDeleteTopicXmlOK) - - t := sns.Topic{s.sns, "arn:aws:sns:us-east-1:123456789012:My-Topic"} - resp, err := t.Delete() - req := testServer.WaitRequest() - - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/") - c.Assert(req.Header["Date"], Not(Equals), "") - - c.Assert(resp.ResponseMetadata.RequestId, Equals, "f3aa9ac9-3c3d-11df-8235-9dab105e9c32") - c.Assert(err, IsNil) -} - -func (s *S) TestListSubscriptions(c *C) { - testServer.Response(200, nil, TestListSubscriptionsXmlOK) - - resp, err := s.sns.ListSubscriptions(nil) - req := testServer.WaitRequest() - - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/") - c.Assert(req.Header["Date"], Not(Equals), "") - - c.Assert(len(resp.Subscriptions), Not(Equals), 0) - c.Assert(resp.Subscriptions[0].Protocol, Equals, "email") - c.Assert(resp.Subscriptions[0].Endpoint, Equals, "example@amazon.com") - c.Assert(resp.Subscriptions[0].SubscriptionArn, Equals, "arn:aws:sns:us-east-1:123456789012:My-Topic:80289ba6-0fd4-4079-afb4-ce8c8260f0ca") - c.Assert(resp.Subscriptions[0].TopicArn, Equals, "arn:aws:sns:us-east-1:698519295917:My-Topic") - c.Assert(resp.Subscriptions[0].Owner, Equals, "123456789012") - c.Assert(err, IsNil) -} - -func (s *S) TestGetTopicAttributes(c *C) { - testServer.Response(200, nil, TestGetTopicAttributesXmlOK) - - resp, err := s.sns.GetTopicAttributes("arn:aws:sns:us-east-1:123456789012:My-Topic") - req := testServer.WaitRequest() - - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/") - c.Assert(req.Header["Date"], Not(Equals), "") - - c.Assert(len(resp.Attributes), Not(Equals), 0) - c.Assert(resp.Attributes[0].Key, Equals, "Owner") - c.Assert(resp.Attributes[0].Value, Equals, "123456789012") - c.Assert(resp.Attributes[1].Key, Equals, "Policy") - c.Assert(resp.Attributes[1].Value, Equals, `{"Version":"2008-10-17","Id":"us-east-1/698519295917/test__default_policy_ID","Statement" : [{"Effect":"Allow","Sid":"us-east-1/698519295917/test__default_statement_ID","Principal" : {"AWS": "*"},"Action":["SNS:GetTopicAttributes","SNS:SetTopicAttributes","SNS:AddPermission","SNS:RemovePermission","SNS:DeleteTopic","SNS:Subscribe","SNS:ListSubscriptionsByTopic","SNS:Publish","SNS:Receive"],"Resource":"arn:aws:sns:us-east-1:698519295917:test","Condition" : {"StringLike" : {"AWS:SourceArn": "arn:aws:*:*:698519295917:*"}}}]}`) - c.Assert(resp.ResponseMetadata.RequestId, Equals, "057f074c-33a7-11df-9540-99d0768312d3") - c.Assert(err, IsNil) -} - -func (s *S) TestPublish(c *C) { - testServer.Response(200, nil, TestPublishXmlOK) - - pubOpt := &sns.PublishOpt{ - Message: "foobar", - MessageStructure: "", - Subject: "subject", - TopicArn: "arn:aws:sns:us-east-1:123456789012:My-Topic", - TargetArn: "arn:aws:sns:us-east-1:123456789012:My-Other-Topic", - } - - resp, err := s.sns.Publish(pubOpt) - req := testServer.WaitRequest() - - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/") - c.Assert(req.Header["Date"], Not(Equals), "") - - c.Assert(resp.MessageId, Equals, "94f20ce6-13c5-43a0-9a9e-ca52d816e90b") - c.Assert(resp.ResponseMetadata.RequestId, Equals, "f187a3c1-376f-11df-8963-01868b7c937a") - c.Assert(err, IsNil) -} - -func (s *S) TestSetTopicAttributes(c *C) { - testServer.Response(200, nil, TestSetTopicAttributesXmlOK) - - resp, err := s.sns.SetTopicAttributes("DisplayName", "MyTopicName", "arn:aws:sns:us-east-1:123456789012:My-Topic") - req := testServer.WaitRequest() - - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/") - c.Assert(req.Header["Date"], Not(Equals), "") - - c.Assert(resp.ResponseMetadata.RequestId, Equals, "a8763b99-33a7-11df-a9b7-05d48da6f042") - c.Assert(err, IsNil) -} - -func (s *S) TestSubscribe(c *C) { - testServer.Response(200, nil, TestSubscribeXmlOK) - - resp, err := s.sns.Subscribe("example@amazon.com", "email", "arn:aws:sns:us-east-1:123456789012:My-Topic") - req := testServer.WaitRequest() - - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/") - c.Assert(req.Header["Date"], Not(Equals), "") - - c.Assert(resp.SubscriptionArn, Equals, "pending confirmation") - c.Assert(resp.ResponseMetadata.RequestId, Equals, "a169c740-3766-11df-8963-01868b7c937a") - c.Assert(err, IsNil) -} - -func (s *S) TestUnsubscribe(c *C) { - testServer.Response(200, nil, TestUnsubscribeXmlOK) - - resp, err := s.sns.Unsubscribe("arn:aws:sns:us-east-1:123456789012:My-Topic:a169c740-3766-11df-8963-01868b7c937a") - req := testServer.WaitRequest() - - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/") - c.Assert(req.Header["Date"], Not(Equals), "") - - c.Assert(resp.ResponseMetadata.RequestId, Equals, "18e0ac39-3776-11df-84c0-b93cc1666b84") - c.Assert(err, IsNil) -} - -func (s *S) TestConfirmSubscription(c *C) { - testServer.Response(200, nil, TestConfirmSubscriptionXmlOK) - - opt := &sns.ConfirmSubscriptionOpt{"", "51b2ff3edb475b7d91550e0ab6edf0c1de2a34e6ebaf6", "arn:aws:sns:us-east-1:123456789012:My-Topic"} - resp, err := s.sns.ConfirmSubscription(opt) - req := testServer.WaitRequest() - - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/") - c.Assert(req.Header["Date"], Not(Equals), "") - - c.Assert(resp.SubscriptionArn, Equals, "arn:aws:sns:us-east-1:123456789012:My-Topic:80289ba6-0fd4-4079-afb4-ce8c8260f0ca") - c.Assert(resp.ResponseMetadata.RequestId, Equals, "7a50221f-3774-11df-a9b7-05d48da6f042") - c.Assert(err, IsNil) -} - -func (s *S) TestAddPermission(c *C) { - testServer.Response(200, nil, TestAddPermissionXmlOK) - perm := make([]sns.Permission, 2) - perm[0].ActionName = "Publish" - perm[1].ActionName = "GetTopicAttributes" - perm[0].AccountId = "987654321000" - perm[1].AccountId = "876543210000" - - resp, err := s.sns.AddPermission(perm, "NewPermission", "arn:aws:sns:us-east-1:123456789012:My-Topic") - req := testServer.WaitRequest() - - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/") - c.Assert(req.Header["Date"], Not(Equals), "") - - c.Assert(resp.RequestId, Equals, "6a213e4e-33a8-11df-9540-99d0768312d3") - c.Assert(err, IsNil) -} - -func (s *S) TestRemovePermission(c *C) { - testServer.Response(200, nil, TestRemovePermissionXmlOK) - - resp, err := s.sns.RemovePermission("NewPermission", "arn:aws:sns:us-east-1:123456789012:My-Topic") - req := testServer.WaitRequest() - - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/") - c.Assert(req.Header["Date"], Not(Equals), "") - - c.Assert(resp.RequestId, Equals, "d170b150-33a8-11df-995a-2d6fbe836cc1") - c.Assert(err, IsNil) -} - -func (s *S) TestListSubscriptionByTopic(c *C) { - testServer.Response(200, nil, TestListSubscriptionsByTopicXmlOK) - - opt := &sns.ListSubscriptionByTopicOpt{"", "arn:aws:sns:us-east-1:123456789012:My-Topic"} - resp, err := s.sns.ListSubscriptionByTopic(opt) - req := testServer.WaitRequest() - - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/") - c.Assert(req.Header["Date"], Not(Equals), "") - - c.Assert(len(resp.Subscriptions), Not(Equals), 0) - c.Assert(resp.Subscriptions[0].TopicArn, Equals, "arn:aws:sns:us-east-1:123456789012:My-Topic") - c.Assert(resp.Subscriptions[0].SubscriptionArn, Equals, "arn:aws:sns:us-east-1:123456789012:My-Topic:80289ba6-0fd4-4079-afb4-ce8c8260f0ca") - c.Assert(resp.Subscriptions[0].Owner, Equals, "123456789012") - c.Assert(resp.Subscriptions[0].Endpoint, Equals, "example@amazon.com") - c.Assert(resp.Subscriptions[0].Protocol, Equals, "email") - c.Assert(err, IsNil) -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/subscription.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/subscription.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/subscription.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/subscription.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,161 +0,0 @@ -package sns - -type Subscription struct { - Endpoint string - Owner string - Protocol string - SubscriptionArn string - TopicArn string -} - -type ListSubscriptionsResp struct { - Subscriptions []Subscription `xml:"ListSubscriptionsResult>Subscriptions>member"` - NextToken string - ResponseMetadata -} - -type PublishOpt struct { - Message string - MessageStructure string - Subject string - TopicArn string - TargetArn string -} - -type PublishResp struct { - MessageId string `xml:"PublishResult>MessageId"` - ResponseMetadata -} - -type SubscribeResponse struct { - SubscriptionArn string `xml:"SubscribeResult>SubscriptionArn"` - ResponseMetadata -} - -type UnsubscribeResponse struct { - ResponseMetadata -} - -type ConfirmSubscriptionResponse struct { - SubscriptionArn string `xml:"ConfirmSubscriptionResult>SubscriptionArn"` - ResponseMetadata -} - -type ConfirmSubscriptionOpt struct { - AuthenticateOnUnsubscribe string - Token string - TopicArn string -} - -type ListSubscriptionByTopicResponse struct { - Subscriptions []Subscription `xml:"ListSubscriptionsByTopicResult>Subscriptions>member"` - ResponseMetadata -} - -type ListSubscriptionByTopicOpt struct { - NextToken string - TopicArn string -} - -// Publish -// -// See http://goo.gl/AY2D8 for more details. -func (sns *SNS) Publish(options *PublishOpt) (resp *PublishResp, err error) { - resp = &PublishResp{} - params := makeParams("Publish") - - if options.Subject != "" { - params["Subject"] = options.Subject - } - - if options.MessageStructure != "" { - params["MessageStructure"] = options.MessageStructure - } - - if options.Message != "" { - params["Message"] = options.Message - } - - if options.TopicArn != "" { - params["TopicArn"] = options.TopicArn - } - - err = sns.query(params, resp) - return -} - -// Subscribe -// -// See http://goo.gl/c3iGS for more details. -func (sns *SNS) Subscribe(Endpoint, Protocol, TopicArn string) (resp *SubscribeResponse, err error) { - resp = &SubscribeResponse{} - params := makeParams("Subscribe") - - params["Endpoint"] = Endpoint - params["Protocol"] = Protocol - params["TopicArn"] = TopicArn - - err = sns.query(params, resp) - return -} - -// Unsubscribe -// -// See http://goo.gl/4l5Ge for more details. -func (sns *SNS) Unsubscribe(SubscriptionArn string) (resp *UnsubscribeResponse, err error) { - resp = &UnsubscribeResponse{} - params := makeParams("Unsubscribe") - - params["SubscriptionArn"] = SubscriptionArn - - err = sns.query(params, resp) - return -} - -// ConfirmSubscription -// -// See http://goo.gl/3hXzH for more details. -func (sns *SNS) ConfirmSubscription(options *ConfirmSubscriptionOpt) (resp *ConfirmSubscriptionResponse, err error) { - resp = &ConfirmSubscriptionResponse{} - params := makeParams("ConfirmSubscription") - - if options.AuthenticateOnUnsubscribe != "" { - params["AuthenticateOnUnsubscribe"] = options.AuthenticateOnUnsubscribe - } - - params["Token"] = options.Token - params["TopicArn"] = options.TopicArn - - err = sns.query(params, resp) - return -} - -// ListSubscriptions -// -// See http://goo.gl/k3aGn for more details. -func (sns *SNS) ListSubscriptions(NextToken *string) (resp *ListSubscriptionsResp, err error) { - resp = &ListSubscriptionsResp{} - params := makeParams("ListSubscriptions") - if NextToken != nil { - params["NextToken"] = *NextToken - } - err = sns.query(params, resp) - return -} - -// ListSubscriptionByTopic -// -// See http://goo.gl/LaVcC for more details. -func (sns *SNS) ListSubscriptionByTopic(options *ListSubscriptionByTopicOpt) (resp *ListSubscriptionByTopicResponse, err error) { - resp = &ListSubscriptionByTopicResponse{} - params := makeParams("ListSbubscriptionByTopic") - - if options.NextToken != "" { - params["NextToken"] = options.NextToken - } - - params["TopicArn"] = options.TopicArn - - err = sns.query(params, resp) - return -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/topic.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/topic.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/topic.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/exp/sns/topic.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,103 +0,0 @@ -package sns - -import ( - "errors" -) - -type Topic struct { - SNS *SNS - TopicArn string -} - -type ListTopicsResp struct { - Topics []Topic `xml:"ListTopicsResult>Topics>member"` - NextToken string - ResponseMetadata -} - -type CreateTopicResp struct { - Topic Topic `xml:"CreateTopicResult"` - ResponseMetadata -} - -type DeleteTopicResp struct { - ResponseMetadata -} - -type GetTopicAttributesResp struct { - Attributes []AttributeEntry `xml:"GetTopicAttributesResult>Attributes>entry"` - ResponseMetadata -} - -type SetTopicAttributesResponse struct { - ResponseMetadata -} - -// ListTopics -// -// See http://goo.gl/lfrMK for more details. -func (sns *SNS) ListTopics(NextToken *string) (resp *ListTopicsResp, err error) { - resp = &ListTopicsResp{} - params := makeParams("ListTopics") - if NextToken != nil { - params["NextToken"] = *NextToken - } - err = sns.query(params, resp) - for i, _ := range resp.Topics { - resp.Topics[i].SNS = sns - } - return -} - -// CreateTopic -// -// See http://goo.gl/m9aAt for more details. -func (sns *SNS) CreateTopic(Name string) (resp *CreateTopicResp, err error) { - resp = &CreateTopicResp{} - params := makeParams("CreateTopic") - params["Name"] = Name - err = sns.query(params, resp) - resp.Topic.SNS = sns - return -} - -// Delete -// -// Helper function for deleting a topic -func (topic *Topic) Delete() (resp *DeleteTopicResp, err error) { - resp = &DeleteTopicResp{} - params := makeParams("DeleteTopic") - params["TopicArn"] = topic.TopicArn - err = topic.SNS.query(params, resp) - return -} - -// GetTopicAttributes -// -// See http://goo.gl/WXRoX for more details. -func (sns *SNS) GetTopicAttributes(TopicArn string) (resp *GetTopicAttributesResp, err error) { - resp = &GetTopicAttributesResp{} - params := makeParams("GetTopicAttributes") - params["TopicArn"] = TopicArn - err = sns.query(params, resp) - return -} - -// SetTopicAttributes -// -// See http://goo.gl/oVYW7 for more details. -func (sns *SNS) SetTopicAttributes(AttributeName, AttributeValue, TopicArn string) (resp *SetTopicAttributesResponse, err error) { - resp = &SetTopicAttributesResponse{} - params := makeParams("SetTopicAttributes") - - if AttributeName == "" || TopicArn == "" { - return nil, errors.New("Invalid Attribute Name or TopicArn") - } - - params["AttributeName"] = AttributeName - params["AttributeValue"] = AttributeValue - params["TopicArn"] = TopicArn - - err = sns.query(params, resp) - return -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/iam/iam.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/iam/iam.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/iam/iam.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/iam/iam.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,460 +0,0 @@ -// The iam package provides types and functions for interaction with the AWS -// Identity and Access Management (IAM) service. -package iam - -import ( - "encoding/xml" - "github.com/mitchellh/goamz/aws" - "net/http" - "net/url" - "strconv" - "strings" - "time" -) - -// The IAM type encapsulates operations operations with the IAM endpoint. -type IAM struct { - aws.Auth - aws.Region - httpClient *http.Client -} - -// New creates a new IAM instance. -func New(auth aws.Auth, region aws.Region) *IAM { - return NewWithClient(auth, region, aws.RetryingClient) -} - -func NewWithClient(auth aws.Auth, region aws.Region, httpClient *http.Client) *IAM { - return &IAM{auth, region, httpClient} -} - -func (iam *IAM) query(params map[string]string, resp interface{}) error { - params["Version"] = "2010-05-08" - params["Timestamp"] = time.Now().In(time.UTC).Format(time.RFC3339) - endpoint, err := url.Parse(iam.IAMEndpoint) - if err != nil { - return err - } - sign(iam.Auth, "GET", "/", params, endpoint.Host) - endpoint.RawQuery = multimap(params).Encode() - r, err := iam.httpClient.Get(endpoint.String()) - if err != nil { - return err - } - defer r.Body.Close() - if r.StatusCode > 200 { - return buildError(r) - } - return xml.NewDecoder(r.Body).Decode(resp) -} - -func (iam *IAM) postQuery(params map[string]string, resp interface{}) error { - endpoint, err := url.Parse(iam.IAMEndpoint) - if err != nil { - return err - } - params["Version"] = "2010-05-08" - params["Timestamp"] = time.Now().In(time.UTC).Format(time.RFC3339) - sign(iam.Auth, "POST", "/", params, endpoint.Host) - encoded := multimap(params).Encode() - body := strings.NewReader(encoded) - req, err := http.NewRequest("POST", endpoint.String(), body) - if err != nil { - return err - } - req.Header.Set("Host", endpoint.Host) - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - req.Header.Set("Content-Length", strconv.Itoa(len(encoded))) - r, err := http.DefaultClient.Do(req) - if err != nil { - return err - } - defer r.Body.Close() - if r.StatusCode > 200 { - return buildError(r) - } - return xml.NewDecoder(r.Body).Decode(resp) -} - -func buildError(r *http.Response) error { - var ( - err Error - errors xmlErrors - ) - xml.NewDecoder(r.Body).Decode(&errors) - if len(errors.Errors) > 0 { - err = errors.Errors[0] - } - err.StatusCode = r.StatusCode - if err.Message == "" { - err.Message = r.Status - } - return &err -} - -func multimap(p map[string]string) url.Values { - q := make(url.Values, len(p)) - for k, v := range p { - q[k] = []string{v} - } - return q -} - -// Response to a CreateUser request. -// -// See http://goo.gl/JS9Gz for more details. -type CreateUserResp struct { - RequestId string `xml:"ResponseMetadata>RequestId"` - User User `xml:"CreateUserResult>User"` -} - -// User encapsulates a user managed by IAM. -// -// See http://goo.gl/BwIQ3 for more details. -type User struct { - Arn string - Path string - Id string `xml:"UserId"` - Name string `xml:"UserName"` -} - -// CreateUser creates a new user in IAM. -// -// See http://goo.gl/JS9Gz for more details. -func (iam *IAM) CreateUser(name, path string) (*CreateUserResp, error) { - params := map[string]string{ - "Action": "CreateUser", - "Path": path, - "UserName": name, - } - resp := new(CreateUserResp) - if err := iam.query(params, resp); err != nil { - return nil, err - } - return resp, nil -} - -// Response for GetUser requests. -// -// See http://goo.gl/ZnzRN for more details. -type GetUserResp struct { - RequestId string `xml:"ResponseMetadata>RequestId"` - User User `xml:"GetUserResult>User"` -} - -// GetUser gets a user from IAM. -// -// See http://goo.gl/ZnzRN for more details. -func (iam *IAM) GetUser(name string) (*GetUserResp, error) { - params := map[string]string{ - "Action": "GetUser", - "UserName": name, - } - resp := new(GetUserResp) - if err := iam.query(params, resp); err != nil { - return nil, err - } - return resp, nil -} - -// DeleteUser deletes a user from IAM. -// -// See http://goo.gl/jBuCG for more details. -func (iam *IAM) DeleteUser(name string) (*SimpleResp, error) { - params := map[string]string{ - "Action": "DeleteUser", - "UserName": name, - } - resp := new(SimpleResp) - if err := iam.query(params, resp); err != nil { - return nil, err - } - return resp, nil -} - -// Response to a CreateGroup request. -// -// See http://goo.gl/n7NNQ for more details. -type CreateGroupResp struct { - Group Group `xml:"CreateGroupResult>Group"` - RequestId string `xml:"ResponseMetadata>RequestId"` -} - -// Group encapsulates a group managed by IAM. -// -// See http://goo.gl/ae7Vs for more details. -type Group struct { - Arn string - Id string `xml:"GroupId"` - Name string `xml:"GroupName"` - Path string -} - -// CreateGroup creates a new group in IAM. -// -// The path parameter can be used to identify which division or part of the -// organization the user belongs to. -// -// If path is unset ("") it defaults to "/". -// -// See http://goo.gl/n7NNQ for more details. -func (iam *IAM) CreateGroup(name string, path string) (*CreateGroupResp, error) { - params := map[string]string{ - "Action": "CreateGroup", - "GroupName": name, - } - if path != "" { - params["Path"] = path - } - resp := new(CreateGroupResp) - if err := iam.query(params, resp); err != nil { - return nil, err - } - return resp, nil -} - -// Response to a ListGroups request. -// -// See http://goo.gl/W2TRj for more details. -type GroupsResp struct { - Groups []Group `xml:"ListGroupsResult>Groups>member"` - RequestId string `xml:"ResponseMetadata>RequestId"` -} - -// Groups list the groups that have the specified path prefix. -// -// The parameter pathPrefix is optional. If pathPrefix is "", all groups are -// returned. -// -// See http://goo.gl/W2TRj for more details. -func (iam *IAM) Groups(pathPrefix string) (*GroupsResp, error) { - params := map[string]string{ - "Action": "ListGroups", - } - if pathPrefix != "" { - params["PathPrefix"] = pathPrefix - } - resp := new(GroupsResp) - if err := iam.query(params, resp); err != nil { - return nil, err - } - return resp, nil -} - -// DeleteGroup deletes a group from IAM. -// -// See http://goo.gl/d5i2i for more details. -func (iam *IAM) DeleteGroup(name string) (*SimpleResp, error) { - params := map[string]string{ - "Action": "DeleteGroup", - "GroupName": name, - } - resp := new(SimpleResp) - if err := iam.query(params, resp); err != nil { - return nil, err - } - return resp, nil -} - -// Response to a CreateAccessKey request. -// -// See http://goo.gl/L46Py for more details. -type CreateAccessKeyResp struct { - RequestId string `xml:"ResponseMetadata>RequestId"` - AccessKey AccessKey `xml:"CreateAccessKeyResult>AccessKey"` -} - -// AccessKey encapsulates an access key generated for a user. -// -// See http://goo.gl/LHgZR for more details. -type AccessKey struct { - UserName string - Id string `xml:"AccessKeyId"` - Secret string `xml:"SecretAccessKey,omitempty"` - Status string -} - -// CreateAccessKey creates a new access key in IAM. -// -// See http://goo.gl/L46Py for more details. -func (iam *IAM) CreateAccessKey(userName string) (*CreateAccessKeyResp, error) { - params := map[string]string{ - "Action": "CreateAccessKey", - "UserName": userName, - } - resp := new(CreateAccessKeyResp) - if err := iam.query(params, resp); err != nil { - return nil, err - } - return resp, nil -} - -// Response to AccessKeys request. -// -// See http://goo.gl/Vjozx for more details. -type AccessKeysResp struct { - RequestId string `xml:"ResponseMetadata>RequestId"` - AccessKeys []AccessKey `xml:"ListAccessKeysResult>AccessKeyMetadata>member"` -} - -// AccessKeys lists all acccess keys associated with a user. -// -// The userName parameter is optional. If set to "", the userName is determined -// implicitly based on the AWS Access Key ID used to sign the request. -// -// See http://goo.gl/Vjozx for more details. -func (iam *IAM) AccessKeys(userName string) (*AccessKeysResp, error) { - params := map[string]string{ - "Action": "ListAccessKeys", - } - if userName != "" { - params["UserName"] = userName - } - resp := new(AccessKeysResp) - if err := iam.query(params, resp); err != nil { - return nil, err - } - return resp, nil -} - -// DeleteAccessKey deletes an access key from IAM. -// -// The userName parameter is optional. If set to "", the userName is determined -// implicitly based on the AWS Access Key ID used to sign the request. -// -// See http://goo.gl/hPGhw for more details. -func (iam *IAM) DeleteAccessKey(id, userName string) (*SimpleResp, error) { - params := map[string]string{ - "Action": "DeleteAccessKey", - "AccessKeyId": id, - } - if userName != "" { - params["UserName"] = userName - } - resp := new(SimpleResp) - if err := iam.query(params, resp); err != nil { - return nil, err - } - return resp, nil -} - -// Response to a GetUserPolicy request. -// -// See http://goo.gl/BH04O for more details. -type GetUserPolicyResp struct { - Policy UserPolicy `xml:"GetUserPolicyResult"` - RequestId string `xml:"ResponseMetadata>RequestId"` -} - -// UserPolicy encapsulates an IAM group policy. -// -// See http://goo.gl/C7hgS for more details. -type UserPolicy struct { - Name string `xml:"PolicyName"` - UserName string `xml:"UserName"` - Document string `xml:"PolicyDocument"` -} - -// GetUserPolicy gets a user policy in IAM. -// -// See http://goo.gl/BH04O for more details. -func (iam *IAM) GetUserPolicy(userName, policyName string) (*GetUserPolicyResp, error) { - params := map[string]string{ - "Action": "GetUserPolicy", - "UserName": userName, - "PolicyName": policyName, - } - resp := new(GetUserPolicyResp) - if err := iam.query(params, resp); err != nil { - return nil, err - } - return resp, nil - return nil, nil -} - -// PutUserPolicy creates a user policy in IAM. -// -// See http://goo.gl/ldCO8 for more details. -func (iam *IAM) PutUserPolicy(userName, policyName, policyDocument string) (*SimpleResp, error) { - params := map[string]string{ - "Action": "PutUserPolicy", - "UserName": userName, - "PolicyName": policyName, - "PolicyDocument": policyDocument, - } - resp := new(SimpleResp) - if err := iam.postQuery(params, resp); err != nil { - return nil, err - } - return resp, nil -} - -// DeleteUserPolicy deletes a user policy from IAM. -// -// See http://goo.gl/7Jncn for more details. -func (iam *IAM) DeleteUserPolicy(userName, policyName string) (*SimpleResp, error) { - params := map[string]string{ - "Action": "DeleteUserPolicy", - "PolicyName": policyName, - "UserName": userName, - } - resp := new(SimpleResp) - if err := iam.query(params, resp); err != nil { - return nil, err - } - return resp, nil -} - -// Response for AddUserToGroup requests. -// -// See http://goo.gl/ZnzRN for more details. -type AddUserToGroupResp struct { - RequestId string `xml:"ResponseMetadata>RequestId"` -} - -// AddUserToGroup adds a user to a specific group -// -// See http://goo.gl/ZnzRN for more details. -func (iam *IAM) AddUserToGroup(name, group string) (*AddUserToGroupResp, error) { - - params := map[string]string{ - "Action": "AddUserToGroup", - "GroupName": group, - "UserName": name} - resp := new(AddUserToGroupResp) - if err := iam.query(params, resp); err != nil { - return nil, err - } - return resp, nil -} - -type SimpleResp struct { - RequestId string `xml:"ResponseMetadata>RequestId"` -} - -type xmlErrors struct { - Errors []Error `xml:"Error"` -} - -// Error encapsulates an IAM error. -type Error struct { - // HTTP status code of the error. - StatusCode int - - // AWS code of the error. - Code string - - // Message explaining the error. - Message string -} - -func (e *Error) Error() string { - var prefix string - if e.Code != "" { - prefix = e.Code + ": " - } - if prefix == "" && e.StatusCode > 0 { - prefix = strconv.Itoa(e.StatusCode) + ": " - } - return prefix + e.Message -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/iam/iami_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/iam/iami_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/iam/iami_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/iam/iami_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,208 +0,0 @@ -package iam_test - -import ( - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/iam" - "github.com/mitchellh/goamz/testutil" - . "github.com/motain/gocheck" - "net/url" -) - -// AmazonServer represents an Amazon AWS server. -type AmazonServer struct { - auth aws.Auth -} - -func (s *AmazonServer) SetUp(c *C) { - auth, err := aws.EnvAuth() - if err != nil { - c.Fatal(err) - } - s.auth = auth -} - -var _ = Suite(&AmazonClientSuite{}) - -// AmazonClientSuite tests the client against a live AWS server. -type AmazonClientSuite struct { - srv AmazonServer - ClientTests -} - -func (s *AmazonClientSuite) SetUpSuite(c *C) { - if !testutil.Amazon { - c.Skip("AmazonClientSuite tests not enabled") - } - s.srv.SetUp(c) - s.iam = iam.New(s.srv.auth, aws.USEast) -} - -// ClientTests defines integration tests designed to test the client. -// It is not used as a test suite in itself, but embedded within -// another type. -type ClientTests struct { - iam *iam.IAM -} - -func (s *ClientTests) TestCreateAndDeleteUser(c *C) { - createResp, err := s.iam.CreateUser("gopher", "/gopher/") - c.Assert(err, IsNil) - getResp, err := s.iam.GetUser("gopher") - c.Assert(err, IsNil) - c.Assert(createResp.User, DeepEquals, getResp.User) - _, err = s.iam.DeleteUser("gopher") - c.Assert(err, IsNil) -} - -func (s *ClientTests) TestCreateUserError(c *C) { - _, err := s.iam.CreateUser("gopher", "/gopher/") - c.Assert(err, IsNil) - defer s.iam.DeleteUser("gopher") - _, err = s.iam.CreateUser("gopher", "/") - iamErr, ok := err.(*iam.Error) - c.Assert(ok, Equals, true) - c.Assert(iamErr.StatusCode, Equals, 409) - c.Assert(iamErr.Code, Equals, "EntityAlreadyExists") - c.Assert(iamErr.Message, Equals, "User with name gopher already exists.") -} - -func (s *ClientTests) TestDeleteUserError(c *C) { - _, err := s.iam.DeleteUser("gopher") - iamErr, ok := err.(*iam.Error) - c.Assert(ok, Equals, true) - c.Assert(iamErr.StatusCode, Equals, 404) - c.Assert(iamErr.Code, Equals, "NoSuchEntity") - c.Assert(iamErr.Message, Equals, "The user with name gopher cannot be found.") -} - -func (s *ClientTests) TestGetUserError(c *C) { - _, err := s.iam.GetUser("gopher") - iamErr, ok := err.(*iam.Error) - c.Assert(ok, Equals, true) - c.Assert(iamErr.StatusCode, Equals, 404) - c.Assert(iamErr.Code, Equals, "NoSuchEntity") - c.Assert(iamErr.Message, Equals, "The user with name gopher cannot be found.") -} - -func (s *ClientTests) TestCreateListAndDeleteAccessKey(c *C) { - createUserResp, err := s.iam.CreateUser("gopher", "/gopher/") - c.Assert(err, IsNil) - defer s.iam.DeleteUser(createUserResp.User.Name) - createKeyResp, err := s.iam.CreateAccessKey(createUserResp.User.Name) - c.Assert(err, IsNil) - listKeyResp, err := s.iam.AccessKeys(createUserResp.User.Name) - c.Assert(err, IsNil) - c.Assert(listKeyResp.AccessKeys, HasLen, 1) - createKeyResp.AccessKey.Secret = "" - c.Assert(listKeyResp.AccessKeys[0], DeepEquals, createKeyResp.AccessKey) - _, err = s.iam.DeleteAccessKey(createKeyResp.AccessKey.Id, createUserResp.User.Name) - c.Assert(err, IsNil) -} - -func (s *ClientTests) TestCreateAccessKeyError(c *C) { - _, err := s.iam.CreateAccessKey("unknowngopher") - c.Assert(err, NotNil) - iamErr, ok := err.(*iam.Error) - c.Assert(ok, Equals, true) - c.Assert(iamErr.StatusCode, Equals, 404) - c.Assert(iamErr.Code, Equals, "NoSuchEntity") - c.Assert(iamErr.Message, Equals, "The user with name unknowngopher cannot be found.") -} - -func (s *ClientTests) TestListAccessKeysUserNotFound(c *C) { - _, err := s.iam.AccessKeys("unknowngopher") - c.Assert(err, NotNil) - iamErr, ok := err.(*iam.Error) - c.Assert(ok, Equals, true) - c.Assert(iamErr.StatusCode, Equals, 404) - c.Assert(iamErr.Code, Equals, "NoSuchEntity") - c.Assert(iamErr.Message, Equals, "The user with name unknowngopher cannot be found.") -} - -func (s *ClientTests) TestListAccessKeysUserWithoutKeys(c *C) { - createUserResp, err := s.iam.CreateUser("gopher", "/") - c.Assert(err, IsNil) - defer s.iam.DeleteUser(createUserResp.User.Name) - resp, err := s.iam.AccessKeys(createUserResp.User.Name) - c.Assert(err, IsNil) - c.Assert(resp.AccessKeys, HasLen, 0) -} - -func (s *ClientTests) TestCreateListAndDeleteGroup(c *C) { - cResp1, err := s.iam.CreateGroup("Finances", "/finances/") - c.Assert(err, IsNil) - cResp2, err := s.iam.CreateGroup("DevelopmentManagers", "/development/managers/") - c.Assert(err, IsNil) - lResp, err := s.iam.Groups("/development/") - c.Assert(err, IsNil) - c.Assert(lResp.Groups, HasLen, 1) - c.Assert(cResp2.Group, DeepEquals, lResp.Groups[0]) - lResp, err = s.iam.Groups("") - c.Assert(err, IsNil) - c.Assert(lResp.Groups, HasLen, 2) - if lResp.Groups[0].Name == cResp1.Group.Name { - c.Assert([]iam.Group{cResp1.Group, cResp2.Group}, DeepEquals, lResp.Groups) - } else { - c.Assert([]iam.Group{cResp2.Group, cResp1.Group}, DeepEquals, lResp.Groups) - } - _, err = s.iam.DeleteGroup("DevelopmentManagers") - c.Assert(err, IsNil) - lResp, err = s.iam.Groups("/development/") - c.Assert(err, IsNil) - c.Assert(lResp.Groups, HasLen, 0) - _, err = s.iam.DeleteGroup("Finances") - c.Assert(err, IsNil) -} - -func (s *ClientTests) TestCreateGroupError(c *C) { - _, err := s.iam.CreateGroup("Finances", "/finances/") - c.Assert(err, IsNil) - defer s.iam.DeleteGroup("Finances") - _, err = s.iam.CreateGroup("Finances", "/something-else/") - iamErr, ok := err.(*iam.Error) - c.Assert(ok, Equals, true) - c.Assert(iamErr.StatusCode, Equals, 409) - c.Assert(iamErr.Code, Equals, "EntityAlreadyExists") - c.Assert(iamErr.Message, Equals, "Group with name Finances already exists.") -} - -func (s *ClientTests) TestDeleteGroupError(c *C) { - _, err := s.iam.DeleteGroup("Finances") - iamErr, ok := err.(*iam.Error) - c.Assert(ok, Equals, true) - c.Assert(iamErr.StatusCode, Equals, 404) - c.Assert(iamErr.Code, Equals, "NoSuchEntity") - c.Assert(iamErr.Message, Equals, "The group with name Finances cannot be found.") -} - -func (s *ClientTests) TestPutGetAndDeleteUserPolicy(c *C) { - userResp, err := s.iam.CreateUser("gopher", "/gopher/") - c.Assert(err, IsNil) - defer s.iam.DeleteUser(userResp.User.Name) - document := `{ - "Statement": [ - { - "Action": [ - "s3:*" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws:s3:::8shsns19s90ajahadsj/*", - "arn:aws:s3:::8shsns19s90ajahadsj" - ] - }] - }` - _, err = s.iam.PutUserPolicy(userResp.User.Name, "EverythingS3", document) - c.Assert(err, IsNil) - resp, err := s.iam.GetUserPolicy(userResp.User.Name, "EverythingS3") - c.Assert(err, IsNil) - c.Assert(resp.Policy.Name, Equals, "EverythingS3") - c.Assert(resp.Policy.UserName, Equals, userResp.User.Name) - gotDocument, err := url.QueryUnescape(resp.Policy.Document) - c.Assert(err, IsNil) - c.Assert(gotDocument, Equals, document) - _, err = s.iam.DeleteUserPolicy(userResp.User.Name, "EverythingS3") - c.Assert(err, IsNil) - _, err = s.iam.GetUserPolicy(userResp.User.Name, "EverythingS3") - c.Assert(err, NotNil) -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/iam/iamtest/server.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/iam/iamtest/server.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/iam/iamtest/server.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/iam/iamtest/server.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,432 +0,0 @@ -// Package iamtest implements a fake IAM provider with the capability of -// inducing errors on any given operation, and retrospectively determining what -// operations have been carried out. -package iamtest - -import ( - "encoding/json" - "encoding/xml" - "fmt" - "github.com/mitchellh/goamz/iam" - "net" - "net/http" - "strings" - "sync" -) - -type action struct { - srv *Server - w http.ResponseWriter - req *http.Request - reqId string -} - -// Server implements an IAM simulator for use in tests. -type Server struct { - reqId int - url string - listener net.Listener - users []iam.User - groups []iam.Group - accessKeys []iam.AccessKey - userPolicies []iam.UserPolicy - mutex sync.Mutex -} - -func NewServer() (*Server, error) { - l, err := net.Listen("tcp", "localhost:0") - if err != nil { - return nil, fmt.Errorf("cannot listen on localhost: %v", err) - } - srv := &Server{ - listener: l, - url: "http://" + l.Addr().String(), - } - go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - srv.serveHTTP(w, req) - })) - return srv, nil -} - -// Quit closes down the server. -func (srv *Server) Quit() error { - return srv.listener.Close() -} - -// URL returns a URL for the server. -func (srv *Server) URL() string { - return srv.url -} - -type xmlErrors struct { - XMLName string `xml:"ErrorResponse"` - Error iam.Error -} - -func (srv *Server) error(w http.ResponseWriter, err *iam.Error) { - w.WriteHeader(err.StatusCode) - xmlErr := xmlErrors{Error: *err} - if e := xml.NewEncoder(w).Encode(xmlErr); e != nil { - panic(e) - } -} - -func (srv *Server) serveHTTP(w http.ResponseWriter, req *http.Request) { - req.ParseForm() - srv.mutex.Lock() - defer srv.mutex.Unlock() - action := req.FormValue("Action") - if action == "" { - srv.error(w, &iam.Error{ - StatusCode: 400, - Code: "MissingAction", - Message: "Missing action", - }) - } - if a, ok := actions[action]; ok { - reqId := fmt.Sprintf("req%0X", srv.reqId) - srv.reqId++ - if resp, err := a(srv, w, req, reqId); err == nil { - if err := xml.NewEncoder(w).Encode(resp); err != nil { - panic(err) - } - } else { - switch err.(type) { - case *iam.Error: - srv.error(w, err.(*iam.Error)) - default: - panic(err) - } - } - } else { - srv.error(w, &iam.Error{ - StatusCode: 400, - Code: "InvalidAction", - Message: "Invalid action: " + action, - }) - } -} - -func (srv *Server) createUser(w http.ResponseWriter, req *http.Request, reqId string) (interface{}, error) { - if err := srv.validate(req, []string{"UserName"}); err != nil { - return nil, err - } - path := req.FormValue("Path") - if path == "" { - path = "/" - } - name := req.FormValue("UserName") - for _, user := range srv.users { - if user.Name == name { - return nil, &iam.Error{ - StatusCode: 409, - Code: "EntityAlreadyExists", - Message: fmt.Sprintf("User with name %s already exists.", name), - } - } - } - user := iam.User{ - Id: "USER" + reqId + "EXAMPLE", - Arn: fmt.Sprintf("arn:aws:iam:::123456789012:user%s%s", path, name), - Name: name, - Path: path, - } - srv.users = append(srv.users, user) - return iam.CreateUserResp{ - RequestId: reqId, - User: user, - }, nil -} - -func (srv *Server) getUser(w http.ResponseWriter, req *http.Request, reqId string) (interface{}, error) { - if err := srv.validate(req, []string{"UserName"}); err != nil { - return nil, err - } - name := req.FormValue("UserName") - index, err := srv.findUser(name) - if err != nil { - return nil, err - } - return iam.GetUserResp{RequestId: reqId, User: srv.users[index]}, nil -} - -func (srv *Server) deleteUser(w http.ResponseWriter, req *http.Request, reqId string) (interface{}, error) { - if err := srv.validate(req, []string{"UserName"}); err != nil { - return nil, err - } - name := req.FormValue("UserName") - index, err := srv.findUser(name) - if err != nil { - return nil, err - } - copy(srv.users[index:], srv.users[index+1:]) - srv.users = srv.users[:len(srv.users)-1] - return iam.SimpleResp{RequestId: reqId}, nil -} - -func (srv *Server) createAccessKey(w http.ResponseWriter, req *http.Request, reqId string) (interface{}, error) { - if err := srv.validate(req, []string{"UserName"}); err != nil { - return nil, err - } - userName := req.FormValue("UserName") - if _, err := srv.findUser(userName); err != nil { - return nil, err - } - key := iam.AccessKey{ - Id: fmt.Sprintf("%s%d", userName, len(srv.accessKeys)), - Secret: "", - UserName: userName, - Status: "Active", - } - srv.accessKeys = append(srv.accessKeys, key) - return iam.CreateAccessKeyResp{RequestId: reqId, AccessKey: key}, nil -} - -func (srv *Server) deleteAccessKey(w http.ResponseWriter, req *http.Request, reqId string) (interface{}, error) { - if err := srv.validate(req, []string{"AccessKeyId", "UserName"}); err != nil { - return nil, err - } - key := req.FormValue("AccessKeyId") - index := -1 - for i, ak := range srv.accessKeys { - if ak.Id == key { - index = i - break - } - } - if index < 0 { - return nil, &iam.Error{ - StatusCode: 404, - Code: "NoSuchEntity", - Message: "No such key.", - } - } - copy(srv.accessKeys[index:], srv.accessKeys[index+1:]) - srv.accessKeys = srv.accessKeys[:len(srv.accessKeys)-1] - return iam.SimpleResp{RequestId: reqId}, nil -} - -func (srv *Server) listAccessKeys(w http.ResponseWriter, req *http.Request, reqId string) (interface{}, error) { - if err := srv.validate(req, []string{"UserName"}); err != nil { - return nil, err - } - userName := req.FormValue("UserName") - if _, err := srv.findUser(userName); err != nil { - return nil, err - } - var keys []iam.AccessKey - for _, k := range srv.accessKeys { - if k.UserName == userName { - keys = append(keys, k) - } - } - return iam.AccessKeysResp{ - RequestId: reqId, - AccessKeys: keys, - }, nil -} - -func (srv *Server) createGroup(w http.ResponseWriter, req *http.Request, reqId string) (interface{}, error) { - if err := srv.validate(req, []string{"GroupName"}); err != nil { - return nil, err - } - name := req.FormValue("GroupName") - path := req.FormValue("Path") - for _, group := range srv.groups { - if group.Name == name { - return nil, &iam.Error{ - StatusCode: 409, - Code: "EntityAlreadyExists", - Message: fmt.Sprintf("Group with name %s already exists.", name), - } - } - } - group := iam.Group{ - Id: "GROUP " + reqId + "EXAMPLE", - Arn: fmt.Sprintf("arn:aws:iam:::123456789012:group%s%s", path, name), - Name: name, - Path: path, - } - srv.groups = append(srv.groups, group) - return iam.CreateGroupResp{ - RequestId: reqId, - Group: group, - }, nil -} - -func (srv *Server) listGroups(w http.ResponseWriter, req *http.Request, reqId string) (interface{}, error) { - pathPrefix := req.FormValue("PathPrefix") - if pathPrefix == "" { - return iam.GroupsResp{ - RequestId: reqId, - Groups: srv.groups, - }, nil - } - var groups []iam.Group - for _, group := range srv.groups { - if strings.HasPrefix(group.Path, pathPrefix) { - groups = append(groups, group) - } - } - return iam.GroupsResp{ - RequestId: reqId, - Groups: groups, - }, nil -} - -func (srv *Server) deleteGroup(w http.ResponseWriter, req *http.Request, reqId string) (interface{}, error) { - if err := srv.validate(req, []string{"GroupName"}); err != nil { - return nil, err - } - name := req.FormValue("GroupName") - index := -1 - for i, group := range srv.groups { - if group.Name == name { - index = i - break - } - } - if index == -1 { - return nil, &iam.Error{ - StatusCode: 404, - Code: "NoSuchEntity", - Message: fmt.Sprintf("The group with name %s cannot be found.", name), - } - } - copy(srv.groups[index:], srv.groups[index+1:]) - srv.groups = srv.groups[:len(srv.groups)-1] - return iam.SimpleResp{RequestId: reqId}, nil -} - -func (srv *Server) putUserPolicy(w http.ResponseWriter, req *http.Request, reqId string) (interface{}, error) { - if err := srv.validate(req, []string{"UserName", "PolicyDocument", "PolicyName"}); err != nil { - return nil, err - } - var exists bool - policyName := req.FormValue("PolicyName") - userName := req.FormValue("UserName") - for _, policy := range srv.userPolicies { - if policyName == policy.Name && userName == policy.UserName { - exists = true - break - } - } - if !exists { - policy := iam.UserPolicy{ - Name: policyName, - UserName: userName, - Document: req.FormValue("PolicyDocument"), - } - var dumb interface{} - if err := json.Unmarshal([]byte(policy.Document), &dumb); err != nil { - return nil, &iam.Error{ - StatusCode: 400, - Code: "MalformedPolicyDocument", - Message: "Malformed policy document", - } - } - srv.userPolicies = append(srv.userPolicies, policy) - } - return iam.SimpleResp{RequestId: reqId}, nil -} - -func (srv *Server) deleteUserPolicy(w http.ResponseWriter, req *http.Request, reqId string) (interface{}, error) { - if err := srv.validate(req, []string{"UserName", "PolicyName"}); err != nil { - return nil, err - } - policyName := req.FormValue("PolicyName") - userName := req.FormValue("UserName") - index := -1 - for i, policy := range srv.userPolicies { - if policyName == policy.Name && userName == policy.UserName { - index = i - break - } - } - if index < 0 { - return nil, &iam.Error{ - StatusCode: 404, - Code: "NoSuchEntity", - Message: "No such user policy", - } - } - copy(srv.userPolicies[index:], srv.userPolicies[index+1:]) - srv.userPolicies = srv.userPolicies[:len(srv.userPolicies)-1] - return iam.SimpleResp{RequestId: reqId}, nil -} - -func (srv *Server) getUserPolicy(w http.ResponseWriter, req *http.Request, reqId string) (interface{}, error) { - if err := srv.validate(req, []string{"UserName", "PolicyName"}); err != nil { - return nil, err - } - policyName := req.FormValue("PolicyName") - userName := req.FormValue("UserName") - index := -1 - for i, policy := range srv.userPolicies { - if policyName == policy.Name && userName == policy.UserName { - index = i - break - } - } - if index < 0 { - return nil, &iam.Error{ - StatusCode: 404, - Code: "NoSuchEntity", - Message: "No such user policy", - } - } - return iam.GetUserPolicyResp{ - Policy: srv.userPolicies[index], - RequestId: reqId, - }, nil -} - -func (srv *Server) findUser(userName string) (int, error) { - var ( - err error - index = -1 - ) - for i, user := range srv.users { - if user.Name == userName { - index = i - break - } - } - if index < 0 { - err = &iam.Error{ - StatusCode: 404, - Code: "NoSuchEntity", - Message: fmt.Sprintf("The user with name %s cannot be found.", userName), - } - } - return index, err -} - -// Validates the presence of required request parameters. -func (srv *Server) validate(req *http.Request, required []string) error { - for _, r := range required { - if req.FormValue(r) == "" { - return &iam.Error{ - StatusCode: 400, - Code: "InvalidParameterCombination", - Message: fmt.Sprintf("%s is required.", r), - } - } - } - return nil -} - -var actions = map[string]func(*Server, http.ResponseWriter, *http.Request, string) (interface{}, error){ - "CreateUser": (*Server).createUser, - "DeleteUser": (*Server).deleteUser, - "GetUser": (*Server).getUser, - "CreateAccessKey": (*Server).createAccessKey, - "DeleteAccessKey": (*Server).deleteAccessKey, - "ListAccessKeys": (*Server).listAccessKeys, - "PutUserPolicy": (*Server).putUserPolicy, - "DeleteUserPolicy": (*Server).deleteUserPolicy, - "GetUserPolicy": (*Server).getUserPolicy, - "CreateGroup": (*Server).createGroup, - "DeleteGroup": (*Server).deleteGroup, - "ListGroups": (*Server).listGroups, -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/iam/iam_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/iam/iam_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/iam/iam_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/iam/iam_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,289 +0,0 @@ -package iam_test - -import ( - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/iam" - "github.com/mitchellh/goamz/testutil" - . "github.com/motain/gocheck" - "strings" - "testing" -) - -func Test(t *testing.T) { - TestingT(t) -} - -type S struct { - iam *iam.IAM -} - -var _ = Suite(&S{}) - -var testServer = testutil.NewHTTPServer() - -func (s *S) SetUpSuite(c *C) { - testServer.Start() - auth := aws.Auth{"abc", "123", ""} - s.iam = iam.NewWithClient(auth, aws.Region{IAMEndpoint: testServer.URL}, testutil.DefaultClient) -} - -func (s *S) TearDownTest(c *C) { - testServer.Flush() -} - -func (s *S) TestCreateUser(c *C) { - testServer.Response(200, nil, CreateUserExample) - resp, err := s.iam.CreateUser("Bob", "/division_abc/subdivision_xyz/") - values := testServer.WaitRequest().URL.Query() - c.Assert(values.Get("Action"), Equals, "CreateUser") - c.Assert(values.Get("UserName"), Equals, "Bob") - c.Assert(values.Get("Path"), Equals, "/division_abc/subdivision_xyz/") - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "7a62c49f-347e-4fc4-9331-6e8eEXAMPLE") - expected := iam.User{ - Path: "/division_abc/subdivision_xyz/", - Name: "Bob", - Id: "AIDACKCEVSQ6C2EXAMPLE", - Arn: "arn:aws:iam::123456789012:user/division_abc/subdivision_xyz/Bob", - } - c.Assert(resp.User, DeepEquals, expected) -} - -func (s *S) TestCreateUserConflict(c *C) { - testServer.Response(409, nil, DuplicateUserExample) - resp, err := s.iam.CreateUser("Bob", "/division_abc/subdivision_xyz/") - testServer.WaitRequest() - c.Assert(resp, IsNil) - c.Assert(err, NotNil) - e, ok := err.(*iam.Error) - c.Assert(ok, Equals, true) - c.Assert(e.Message, Equals, "User with name Bob already exists.") - c.Assert(e.Code, Equals, "EntityAlreadyExists") -} - -func (s *S) TestGetUser(c *C) { - testServer.Response(200, nil, GetUserExample) - resp, err := s.iam.GetUser("Bob") - values := testServer.WaitRequest().URL.Query() - c.Assert(values.Get("Action"), Equals, "GetUser") - c.Assert(values.Get("UserName"), Equals, "Bob") - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "7a62c49f-347e-4fc4-9331-6e8eEXAMPLE") - expected := iam.User{ - Path: "/division_abc/subdivision_xyz/", - Name: "Bob", - Id: "AIDACKCEVSQ6C2EXAMPLE", - Arn: "arn:aws:iam::123456789012:user/division_abc/subdivision_xyz/Bob", - } - c.Assert(resp.User, DeepEquals, expected) -} - -func (s *S) TestDeleteUser(c *C) { - testServer.Response(200, nil, RequestIdExample) - resp, err := s.iam.DeleteUser("Bob") - values := testServer.WaitRequest().URL.Query() - c.Assert(values.Get("Action"), Equals, "DeleteUser") - c.Assert(values.Get("UserName"), Equals, "Bob") - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "7a62c49f-347e-4fc4-9331-6e8eEXAMPLE") -} - -func (s *S) TestCreateGroup(c *C) { - testServer.Response(200, nil, CreateGroupExample) - resp, err := s.iam.CreateGroup("Admins", "/admins/") - values := testServer.WaitRequest().URL.Query() - c.Assert(values.Get("Action"), Equals, "CreateGroup") - c.Assert(values.Get("GroupName"), Equals, "Admins") - c.Assert(values.Get("Path"), Equals, "/admins/") - c.Assert(err, IsNil) - c.Assert(resp.Group.Path, Equals, "/admins/") - c.Assert(resp.Group.Name, Equals, "Admins") - c.Assert(resp.Group.Id, Equals, "AGPACKCEVSQ6C2EXAMPLE") - c.Assert(resp.RequestId, Equals, "7a62c49f-347e-4fc4-9331-6e8eEXAMPLE") -} - -func (s *S) TestCreateGroupWithoutPath(c *C) { - testServer.Response(200, nil, CreateGroupExample) - _, err := s.iam.CreateGroup("Managers", "") - values := testServer.WaitRequest().URL.Query() - c.Assert(values.Get("Action"), Equals, "CreateGroup") - c.Assert(err, IsNil) - _, ok := map[string][]string(values)["Path"] - c.Assert(ok, Equals, false) -} - -func (s *S) TestDeleteGroup(c *C) { - testServer.Response(200, nil, RequestIdExample) - resp, err := s.iam.DeleteGroup("Admins") - values := testServer.WaitRequest().URL.Query() - c.Assert(values.Get("Action"), Equals, "DeleteGroup") - c.Assert(values.Get("GroupName"), Equals, "Admins") - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "7a62c49f-347e-4fc4-9331-6e8eEXAMPLE") -} - -func (s *S) TestListGroups(c *C) { - testServer.Response(200, nil, ListGroupsExample) - resp, err := s.iam.Groups("/division_abc/") - values := testServer.WaitRequest().URL.Query() - c.Assert(values.Get("Action"), Equals, "ListGroups") - c.Assert(values.Get("PathPrefix"), Equals, "/division_abc/") - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "7a62c49f-347e-4fc4-9331-6e8eEXAMPLE") - expected := []iam.Group{ - { - Path: "/division_abc/subdivision_xyz/", - Name: "Admins", - Id: "AGPACKCEVSQ6C2EXAMPLE", - Arn: "arn:aws:iam::123456789012:group/Admins", - }, - { - Path: "/division_abc/subdivision_xyz/product_1234/engineering/", - Name: "Test", - Id: "AGP2MAB8DPLSRHEXAMPLE", - Arn: "arn:aws:iam::123456789012:group/division_abc/subdivision_xyz/product_1234/engineering/Test", - }, - { - Path: "/division_abc/subdivision_xyz/product_1234/", - Name: "Managers", - Id: "AGPIODR4TAW7CSEXAMPLE", - Arn: "arn:aws:iam::123456789012:group/division_abc/subdivision_xyz/product_1234/Managers", - }, - } - c.Assert(resp.Groups, DeepEquals, expected) -} - -func (s *S) TestListGroupsWithoutPathPrefix(c *C) { - testServer.Response(200, nil, ListGroupsExample) - _, err := s.iam.Groups("") - values := testServer.WaitRequest().URL.Query() - c.Assert(values.Get("Action"), Equals, "ListGroups") - c.Assert(err, IsNil) - _, ok := map[string][]string(values)["PathPrefix"] - c.Assert(ok, Equals, false) -} - -func (s *S) TestCreateAccessKey(c *C) { - testServer.Response(200, nil, CreateAccessKeyExample) - resp, err := s.iam.CreateAccessKey("Bob") - values := testServer.WaitRequest().URL.Query() - c.Assert(values.Get("Action"), Equals, "CreateAccessKey") - c.Assert(values.Get("UserName"), Equals, "Bob") - c.Assert(err, IsNil) - c.Assert(resp.AccessKey.UserName, Equals, "Bob") - c.Assert(resp.AccessKey.Id, Equals, "AKIAIOSFODNN7EXAMPLE") - c.Assert(resp.AccessKey.Secret, Equals, "wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY") - c.Assert(resp.AccessKey.Status, Equals, "Active") -} - -func (s *S) TestDeleteAccessKey(c *C) { - testServer.Response(200, nil, RequestIdExample) - resp, err := s.iam.DeleteAccessKey("ysa8hasdhasdsi", "Bob") - values := testServer.WaitRequest().URL.Query() - c.Assert(values.Get("Action"), Equals, "DeleteAccessKey") - c.Assert(values.Get("AccessKeyId"), Equals, "ysa8hasdhasdsi") - c.Assert(values.Get("UserName"), Equals, "Bob") - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "7a62c49f-347e-4fc4-9331-6e8eEXAMPLE") -} - -func (s *S) TestDeleteAccessKeyBlankUserName(c *C) { - testServer.Response(200, nil, RequestIdExample) - _, err := s.iam.DeleteAccessKey("ysa8hasdhasdsi", "") - c.Assert(err, IsNil) - values := testServer.WaitRequest().URL.Query() - c.Assert(values.Get("Action"), Equals, "DeleteAccessKey") - c.Assert(values.Get("AccessKeyId"), Equals, "ysa8hasdhasdsi") - _, ok := map[string][]string(values)["UserName"] - c.Assert(ok, Equals, false) -} - -func (s *S) TestAccessKeys(c *C) { - testServer.Response(200, nil, ListAccessKeyExample) - resp, err := s.iam.AccessKeys("Bob") - values := testServer.WaitRequest().URL.Query() - c.Assert(values.Get("Action"), Equals, "ListAccessKeys") - c.Assert(values.Get("UserName"), Equals, "Bob") - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "7a62c49f-347e-4fc4-9331-6e8eEXAMPLE") - c.Assert(resp.AccessKeys, HasLen, 2) - c.Assert(resp.AccessKeys[0].Id, Equals, "AKIAIOSFODNN7EXAMPLE") - c.Assert(resp.AccessKeys[0].UserName, Equals, "Bob") - c.Assert(resp.AccessKeys[0].Status, Equals, "Active") - c.Assert(resp.AccessKeys[1].Id, Equals, "AKIAI44QH8DHBEXAMPLE") - c.Assert(resp.AccessKeys[1].UserName, Equals, "Bob") - c.Assert(resp.AccessKeys[1].Status, Equals, "Inactive") -} - -func (s *S) TestAccessKeysBlankUserName(c *C) { - testServer.Response(200, nil, ListAccessKeyExample) - _, err := s.iam.AccessKeys("") - c.Assert(err, IsNil) - values := testServer.WaitRequest().URL.Query() - c.Assert(values.Get("Action"), Equals, "ListAccessKeys") - _, ok := map[string][]string(values)["UserName"] - c.Assert(ok, Equals, false) -} - -func (s *S) TestGetUserPolicy(c *C) { - testServer.Response(200, nil, GetUserPolicyExample) - resp, err := s.iam.GetUserPolicy("Bob", "AllAccessPolicy") - values := testServer.WaitRequest().URL.Query() - c.Assert(values.Get("Action"), Equals, "GetUserPolicy") - c.Assert(values.Get("UserName"), Equals, "Bob") - c.Assert(values.Get("PolicyName"), Equals, "AllAccessPolicy") - c.Assert(err, IsNil) - c.Assert(resp.Policy.UserName, Equals, "Bob") - c.Assert(resp.Policy.Name, Equals, "AllAccessPolicy") - c.Assert(strings.TrimSpace(resp.Policy.Document), Equals, `{"Statement":[{"Effect":"Allow","Action":"*","Resource":"*"}]}`) - c.Assert(resp.RequestId, Equals, "7a62c49f-347e-4fc4-9331-6e8eEXAMPLE") -} - -func (s *S) TestPutUserPolicy(c *C) { - document := `{ - "Statement": [ - { - "Action": [ - "s3:*" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws:s3:::8shsns19s90ajahadsj/*", - "arn:aws:s3:::8shsns19s90ajahadsj" - ] - }] - }` - testServer.Response(200, nil, RequestIdExample) - resp, err := s.iam.PutUserPolicy("Bob", "AllAccessPolicy", document) - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "POST") - c.Assert(req.FormValue("Action"), Equals, "PutUserPolicy") - c.Assert(req.FormValue("PolicyName"), Equals, "AllAccessPolicy") - c.Assert(req.FormValue("UserName"), Equals, "Bob") - c.Assert(req.FormValue("PolicyDocument"), Equals, document) - c.Assert(req.FormValue("Version"), Equals, "2010-05-08") - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "7a62c49f-347e-4fc4-9331-6e8eEXAMPLE") -} - -func (s *S) TestDeleteUserPolicy(c *C) { - testServer.Response(200, nil, RequestIdExample) - resp, err := s.iam.DeleteUserPolicy("Bob", "AllAccessPolicy") - values := testServer.WaitRequest().URL.Query() - c.Assert(values.Get("Action"), Equals, "DeleteUserPolicy") - c.Assert(values.Get("PolicyName"), Equals, "AllAccessPolicy") - c.Assert(values.Get("UserName"), Equals, "Bob") - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "7a62c49f-347e-4fc4-9331-6e8eEXAMPLE") -} - -func (s *S) TestAddUserToGroup(c *C) { - testServer.Response(200, nil, AddUserToGroupExample) - resp, err := s.iam.AddUserToGroup("admin1", "Admins") - values := testServer.WaitRequest().URL.Query() - c.Assert(values.Get("Action"), Equals, "AddUserToGroup") - c.Assert(values.Get("GroupName"), Equals, "Admins") - c.Assert(values.Get("UserName"), Equals, "admin1") - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "7a62c49f-347e-4fc4-9331-6e8eEXAMPLE") -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/iam/iamt_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/iam/iamt_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/iam/iamt_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/iam/iamt_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,39 +0,0 @@ -package iam_test - -import ( - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/iam" - "github.com/mitchellh/goamz/iam/iamtest" - . "github.com/motain/gocheck" -) - -// LocalServer represents a local ec2test fake server. -type LocalServer struct { - auth aws.Auth - region aws.Region - srv *iamtest.Server -} - -func (s *LocalServer) SetUp(c *C) { - srv, err := iamtest.NewServer() - c.Assert(err, IsNil) - c.Assert(srv, NotNil) - - s.srv = srv - s.region = aws.Region{IAMEndpoint: srv.URL()} -} - -// LocalServerSuite defines tests that will run -// against the local iamtest server. It includes -// tests from ClientTests. -type LocalServerSuite struct { - srv LocalServer - ClientTests -} - -var _ = Suite(&LocalServerSuite{}) - -func (s *LocalServerSuite) SetUpSuite(c *C) { - s.srv.SetUp(c) - s.ClientTests.iam = iam.New(s.srv.auth, s.srv.region) -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/iam/responses_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/iam/responses_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/iam/responses_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/iam/responses_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,163 +0,0 @@ -package iam_test - -// http://goo.gl/EUIvl -var CreateUserExample = ` - - - - /division_abc/subdivision_xyz/ - Bob - AIDACKCEVSQ6C2EXAMPLE - arn:aws:iam::123456789012:user/division_abc/subdivision_xyz/Bob - - - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE - - -` - -var DuplicateUserExample = ` - - - Sender - EntityAlreadyExists - User with name Bob already exists. - - 1d5f5000-1316-11e2-a60f-91a8e6fb6d21 - -` - -var GetUserExample = ` - - - - /division_abc/subdivision_xyz/ - Bob - AIDACKCEVSQ6C2EXAMPLE - arn:aws:iam::123456789012:user/division_abc/subdivision_xyz/Bob - - - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE - - -` - -var CreateGroupExample = ` - - - - /admins/ - Admins - AGPACKCEVSQ6C2EXAMPLE - arn:aws:iam::123456789012:group/Admins - - - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE - - -` - -var ListGroupsExample = ` - - - - - /division_abc/subdivision_xyz/ - Admins - AGPACKCEVSQ6C2EXAMPLE - arn:aws:iam::123456789012:group/Admins - - - /division_abc/subdivision_xyz/product_1234/engineering/ - Test - AGP2MAB8DPLSRHEXAMPLE - arn:aws:iam::123456789012:group/division_abc/subdivision_xyz/product_1234/engineering/Test - - - /division_abc/subdivision_xyz/product_1234/ - Managers - AGPIODR4TAW7CSEXAMPLE - arn:aws:iam::123456789012:group/division_abc/subdivision_xyz/product_1234/Managers - - - false - - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE - - -` - -var RequestIdExample = ` - - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE - - -` - -var CreateAccessKeyExample = ` - - - - Bob - AKIAIOSFODNN7EXAMPLE - Active - wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY - - - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE - - -` - -var ListAccessKeyExample = ` - - - Bob - - - Bob - AKIAIOSFODNN7EXAMPLE - Active - - - Bob - AKIAI44QH8DHBEXAMPLE - Inactive - - - false - - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE - - -` - -var GetUserPolicyExample = ` - - - Bob - AllAccessPolicy - - {"Statement":[{"Effect":"Allow","Action":"*","Resource":"*"}]} - - - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE - - -` - -var AddUserToGroupExample = ` - - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE - - -` diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/iam/sign.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/iam/sign.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/iam/sign.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/iam/sign.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,38 +0,0 @@ -package iam - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "github.com/mitchellh/goamz/aws" - "sort" - "strings" -) - -// ---------------------------------------------------------------------------- -// Version 2 signing (http://goo.gl/RSRp5) - -var b64 = base64.StdEncoding - -func sign(auth aws.Auth, method, path string, params map[string]string, host string) { - params["AWSAccessKeyId"] = auth.AccessKey - params["SignatureVersion"] = "2" - params["SignatureMethod"] = "HmacSHA256" - if auth.Token != "" { - params["SecurityToken"] = auth.Token - } - - var sarray []string - for k, v := range params { - sarray = append(sarray, aws.Encode(k)+"="+aws.Encode(v)) - } - sort.StringSlice(sarray).Sort() - joined := strings.Join(sarray, "&") - payload := method + "\n" + host + "\n" + path + "\n" + joined - hash := hmac.New(sha256.New, []byte(auth.SecretKey)) - hash.Write([]byte(payload)) - signature := make([]byte, b64.EncodedLen(hash.Size())) - b64.Encode(signature, hash.Sum(nil)) - - params["Signature"] = string(signature) -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/LICENSE aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/LICENSE --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/LICENSE 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,185 +0,0 @@ -This software is licensed under the LGPLv3, included below. - -As a special exception to the GNU Lesser General Public License version 3 -("LGPL3"), the copyright holders of this Library give you permission to -convey to a third party a Combined Work that links statically or dynamically -to this Library without providing any Minimal Corresponding Source or -Minimal Application Code as set out in 4d or providing the installation -information set out in section 4e, provided that you comply with the other -provisions of LGPL3 and provided that you meet, for the Application the -terms and conditions of the license(s) which apply to the Application. - -Except as stated in this special exception, the provisions of LGPL3 will -continue to comply in full to this Library. If you modify this Library, you -may apply this exception to your version of this Library, but you are not -obliged to do so. If you do not wish to do so, delete this exception -statement from your version. This exception does not (and cannot) modify any -license terms which apply to the Application, with which you must still -comply. - - - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/rds/rds.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/rds/rds.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/rds/rds.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/rds/rds.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,835 +0,0 @@ -// The rds package provides types and functions for interaction with the AWS -// Relational Database service (rds) -package rds - -import ( - "encoding/xml" - "net/http" - "net/url" - "strconv" - "time" - - "github.com/mitchellh/goamz/aws" -) - -// The Rds type encapsulates operations operations with the Rds endpoint. -type Rds struct { - aws.Auth - aws.Region - httpClient *http.Client -} - -const APIVersion = "2014-10-31" - -// New creates a new Rds instance. -func New(auth aws.Auth, region aws.Region) *Rds { - return NewWithClient(auth, region, aws.RetryingClient) -} - -func NewWithClient(auth aws.Auth, region aws.Region, httpClient *http.Client) *Rds { - return &Rds{auth, region, httpClient} -} - -func (rds *Rds) query(params map[string]string, resp interface{}) error { - params["Version"] = APIVersion - params["Timestamp"] = time.Now().In(time.UTC).Format(time.RFC3339) - - endpoint, err := url.Parse(rds.Region.RdsEndpoint) - if err != nil { - return err - } - - sign(rds.Auth, "GET", "/", params, endpoint.Host) - endpoint.RawQuery = multimap(params).Encode() - r, err := rds.httpClient.Get(endpoint.String()) - - if err != nil { - return err - } - defer r.Body.Close() - if r.StatusCode > 200 { - return buildError(r) - } - - decoder := xml.NewDecoder(r.Body) - decodedBody := decoder.Decode(resp) - - return decodedBody -} - -func buildError(r *http.Response) error { - var ( - err Error - errors xmlErrors - ) - xml.NewDecoder(r.Body).Decode(&errors) - if len(errors.Errors) > 0 { - err = errors.Errors[0] - } - err.StatusCode = r.StatusCode - if err.Message == "" { - err.Message = r.Status - } - return &err -} - -func multimap(p map[string]string) url.Values { - q := make(url.Values, len(p)) - for k, v := range p { - q[k] = []string{v} - } - return q -} - -func makeParams(action string) map[string]string { - params := make(map[string]string) - params["Action"] = action - return params -} - -// ---------------------------------------------------------------------------- -// Rds objects - -type DBInstance struct { - Address string `xml:"Endpoint>Address"` - AllocatedStorage int `xml:"AllocatedStorage"` - StorageType string `xml:"StorageType"` - AvailabilityZone string `xml:"AvailabilityZone"` - BackupRetentionPeriod int `xml:"BackupRetentionPeriod"` - DBInstanceClass string `xml:"DBInstanceClass"` - DBInstanceIdentifier string `xml:"DBInstanceIdentifier"` - DBInstanceStatus string `xml:"DBInstanceStatus"` - DBName string `xml:"DBName"` - Engine string `xml:"Engine"` - EngineVersion string `xml:"EngineVersion"` - StorageEncrypted bool `xml:"StorageEncrypted"` - MasterUsername string `xml:"MasterUsername"` - MultiAZ bool `xml:"MultiAZ"` - Port int `xml:"Endpoint>Port"` - PreferredBackupWindow string `xml:"PreferredBackupWindow"` - PreferredMaintenanceWindow string `xml:"PreferredMaintenanceWindow"` - VpcSecurityGroupIds []string `xml:"VpcSecurityGroups>VpcSecurityGroupMembership>VpcSecurityGroupId"` - DBSecurityGroupNames []string `xml:"DBSecurityGroups>DBSecurityGroup>DBSecurityGroupName"` - DBSubnetGroup DBSubnetGroup `xml:"DBSubnetGroup"` - DBParameterGroupName string `xml:"DBParameterGroups>DBParameterGroup>DBParameterGroupName"` -} - -type DBSecurityGroup struct { - Description string `xml:"DBSecurityGroupDescription"` - Name string `xml:"DBSecurityGroupName"` - EC2SecurityGroupNames []string `xml:"EC2SecurityGroups>EC2SecurityGroup>EC2SecurityGroupName"` - EC2SecurityGroupIds []string `xml:"EC2SecurityGroups>EC2SecurityGroup>EC2SecurityGroupId"` - EC2SecurityGroupOwnerIds []string `xml:"EC2SecurityGroups>EC2SecurityGroup>EC2SecurityGroupOwnerId"` - EC2SecurityGroupStatuses []string `xml:"EC2SecurityGroups>EC2SecurityGroup>Status"` - CidrIps []string `xml:"IPRanges>IPRange>CIDRIP"` - CidrStatuses []string `xml:"IPRanges>IPRange>Status"` -} - -type DBSubnetGroup struct { - Description string `xml:"DBSubnetGroupDescription"` - Name string `xml:"DBSubnetGroupName"` - Status string `xml:"SubnetGroupStatus"` - SubnetIds []string `xml:"Subnets>Subnet>SubnetIdentifier"` - VpcId string `xml:"VpcId"` -} - -type DBSnapshot struct { - AllocatedStorage int `xml:"AllocatedStorage"` - AvailabilityZone string `xml:"AvailabilityZone"` - DBInstanceIdentifier string `xml:"DBInstanceIdentifier"` - DBSnapshotIdentifier string `xml:"DBSnapshotIdentifier"` - Engine string `xml:"Engine"` - EngineVersion string `xml:"EngineVersion"` - InstanceCreateTime string `xml:"InstanceCreateTime"` - Iops int `xml:"Iops"` - LicenseModel string `xml:"LicenseModel"` - MasterUsername string `xml:"MasterUsername"` - OptionGroupName string `xml:"OptionGroupName"` - PercentProgress int `xml:"PercentProgress"` - Port int `xml:"Port"` - SnapshotCreateTime string `xml:"SnapshotCreateTime"` - SnapshotType string `xml:"SnapshotType"` - SourceRegion string `xml:"SourceRegion"` - Status string `xml:"Status"` - VpcId string `xml:"VpcId"` -} - -type DBParameterGroup struct { - DBParameterGroupFamily string `xml:"DBParameterGroupFamily"` - DBParameterGroupName string `xml:"DBParameterGroupName"` - Description string `xml:"Description"` -} - -type Parameter struct { - ApplyMethod string `xml:"ApplyMethod"` - ParameterName string `xml:"ParameterName"` - ParameterValue string `xml:"ParameterValue"` -} - -// ---------------------------------------------------------------------------- -// Create - -// The CreateDBInstance request parameters -type CreateDBInstance struct { - AllocatedStorage int - StorageType string - AvailabilityZone string - BackupRetentionPeriod int - DBInstanceClass string - DBInstanceIdentifier string - DBName string - DBSubnetGroupName string - Engine string - EngineVersion string - StorageEncrypted bool - Iops int - MasterUsername string - MasterUserPassword string - MultiAZ bool - Port int - PreferredBackupWindow string // hh24:mi-hh24:mi - PreferredMaintenanceWindow string // ddd:hh24:mi-ddd:hh24:mi - PubliclyAccessible bool - VpcSecurityGroupIds []string - DBSecurityGroupNames []string - DBParameterGroupName string - - SetAllocatedStorage bool - SetBackupRetentionPeriod bool - SetIops bool - SetPort bool -} - -func (rds *Rds) CreateDBInstance(options *CreateDBInstance) (resp *SimpleResp, err error) { - params := makeParams("CreateDBInstance") - - if options.SetAllocatedStorage { - params["AllocatedStorage"] = strconv.Itoa(options.AllocatedStorage) - } - - if options.StorageType != "" { - params["StorageType"] = options.StorageType - } - - if options.SetBackupRetentionPeriod { - params["BackupRetentionPeriod"] = strconv.Itoa(options.BackupRetentionPeriod) - } - - if options.SetIops { - params["Iops"] = strconv.Itoa(options.Iops) - } - - if options.SetPort { - params["Port"] = strconv.Itoa(options.Port) - } - - if options.AvailabilityZone != "" { - params["AvailabilityZone"] = options.AvailabilityZone - } - - if options.DBInstanceClass != "" { - params["DBInstanceClass"] = options.DBInstanceClass - } - - if options.DBInstanceIdentifier != "" { - params["DBInstanceIdentifier"] = options.DBInstanceIdentifier - } - - if options.DBName != "" { - params["DBName"] = options.DBName - } - - if options.DBSubnetGroupName != "" { - params["DBSubnetGroupName"] = options.DBSubnetGroupName - } - - if options.Engine != "" { - params["Engine"] = options.Engine - } - - if options.EngineVersion != "" { - params["EngineVersion"] = options.EngineVersion - } - - if options.StorageEncrypted { - params["StorageEncrypted"] = "true" - } - - if options.MasterUsername != "" { - params["MasterUsername"] = options.MasterUsername - } - - if options.MasterUserPassword != "" { - params["MasterUserPassword"] = options.MasterUserPassword - } - - if options.MultiAZ { - params["MultiAZ"] = "true" - } - - if options.PreferredBackupWindow != "" { - params["PreferredBackupWindow"] = options.PreferredBackupWindow - } - - if options.PreferredMaintenanceWindow != "" { - params["PreferredMaintenanceWindow"] = options.PreferredMaintenanceWindow - } - - if options.PubliclyAccessible { - params["PubliclyAccessible"] = "true" - } - - for j, group := range options.VpcSecurityGroupIds { - params["VpcSecurityGroupIds.member."+strconv.Itoa(j+1)] = group - } - - for j, group := range options.DBSecurityGroupNames { - params["DBSecurityGroups.member."+strconv.Itoa(j+1)] = group - } - - if options.DBParameterGroupName != "" { - params["DBParameterGroupName"] = options.DBParameterGroupName - } - - resp = &SimpleResp{} - - err = rds.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// The CreateDBSecurityGroup request parameters -type CreateDBSecurityGroup struct { - DBSecurityGroupName string - DBSecurityGroupDescription string -} - -func (rds *Rds) CreateDBSecurityGroup(options *CreateDBSecurityGroup) (resp *SimpleResp, err error) { - params := makeParams("CreateDBSecurityGroup") - - params["DBSecurityGroupName"] = options.DBSecurityGroupName - params["DBSecurityGroupDescription"] = options.DBSecurityGroupDescription - - resp = &SimpleResp{} - - err = rds.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// The CreateDBSubnetGroup request parameters -type CreateDBSubnetGroup struct { - DBSubnetGroupName string - DBSubnetGroupDescription string - SubnetIds []string -} - -func (rds *Rds) CreateDBSubnetGroup(options *CreateDBSubnetGroup) (resp *SimpleResp, err error) { - params := makeParams("CreateDBSubnetGroup") - - params["DBSubnetGroupName"] = options.DBSubnetGroupName - params["DBSubnetGroupDescription"] = options.DBSubnetGroupDescription - - for j, group := range options.SubnetIds { - params["SubnetIds.member."+strconv.Itoa(j+1)] = group - } - - resp = &SimpleResp{} - - err = rds.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// The CreateDBSecurityGroup request parameters -type AuthorizeDBSecurityGroupIngress struct { - Cidr string - DBSecurityGroupName string - EC2SecurityGroupId string - EC2SecurityGroupName string - EC2SecurityGroupOwnerId string -} - -func (rds *Rds) AuthorizeDBSecurityGroupIngress(options *AuthorizeDBSecurityGroupIngress) (resp *SimpleResp, err error) { - params := makeParams("AuthorizeDBSecurityGroupIngress") - - if attr := options.Cidr; attr != "" { - params["CIDRIP"] = attr - } - - if attr := options.EC2SecurityGroupId; attr != "" { - params["EC2SecurityGroupId"] = attr - } - - if attr := options.EC2SecurityGroupOwnerId; attr != "" { - params["EC2SecurityGroupOwnerId"] = attr - } - - if attr := options.EC2SecurityGroupName; attr != "" { - params["EC2SecurityGroupName"] = attr - } - - params["DBSecurityGroupName"] = options.DBSecurityGroupName - - resp = &SimpleResp{} - - err = rds.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// The CreateDBParameterGroup request parameters -type CreateDBParameterGroup struct { - DBParameterGroupFamily string - DBParameterGroupName string - Description string -} - -func (rds *Rds) CreateDBParameterGroup(options *CreateDBParameterGroup) (resp *SimpleResp, err error) { - params := makeParams("CreateDBParameterGroup") - - params["DBParameterGroupFamily"] = options.DBParameterGroupFamily - params["DBParameterGroupName"] = options.DBParameterGroupName - params["Description"] = options.Description - - resp = &SimpleResp{} - - err = rds.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// Describe - -// DescribeDBInstances request params -type DescribeDBInstances struct { - DBInstanceIdentifier string -} - -type DescribeDBInstancesResp struct { - RequestId string `xml:"ResponseMetadata>RequestId"` - DBInstances []DBInstance `xml:"DescribeDBInstancesResult>DBInstances>DBInstance"` -} - -func (rds *Rds) DescribeDBInstances(options *DescribeDBInstances) (resp *DescribeDBInstancesResp, err error) { - params := makeParams("DescribeDBInstances") - - params["DBInstanceIdentifier"] = options.DBInstanceIdentifier - - resp = &DescribeDBInstancesResp{} - - err = rds.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// DescribeDBSecurityGroups request params -type DescribeDBSecurityGroups struct { - DBSecurityGroupName string -} - -type DescribeDBSecurityGroupsResp struct { - RequestId string `xml:"ResponseMetadata>RequestId"` - DBSecurityGroups []DBSecurityGroup `xml:"DescribeDBSecurityGroupsResult>DBSecurityGroups>DBSecurityGroup"` -} - -func (rds *Rds) DescribeDBSecurityGroups(options *DescribeDBSecurityGroups) (resp *DescribeDBSecurityGroupsResp, err error) { - params := makeParams("DescribeDBSecurityGroups") - - params["DBSecurityGroupName"] = options.DBSecurityGroupName - - resp = &DescribeDBSecurityGroupsResp{} - - err = rds.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// DescribeDBSubnetGroups request params -type DescribeDBSubnetGroups struct { - DBSubnetGroupName string -} - -type DescribeDBSubnetGroupsResp struct { - RequestId string `xml:"ResponseMetadata>RequestId"` - DBSubnetGroups []DBSubnetGroup `xml:"DescribeDBSubnetGroupsResult>DBSubnetGroups>DBSubnetGroup"` -} - -func (rds *Rds) DescribeDBSubnetGroups(options *DescribeDBSubnetGroups) (resp *DescribeDBSubnetGroupsResp, err error) { - params := makeParams("DescribeDBSubnetGroups") - - params["DBSubnetGroupName"] = options.DBSubnetGroupName - - resp = &DescribeDBSubnetGroupsResp{} - - err = rds.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// DescribeDBSnapshots request params -type DescribeDBSnapshots struct { - DBInstanceIdentifier string - DBSnapshotIdentifier string - SnapshotType string -} - -type DescribeDBSnapshotsResp struct { - RequestId string `xml:"ResponseMetadata>RequestId"` - DBSnapshots []DBSnapshot `xml:"DescribeDBSnapshotsResult>DBSnapshots>DBSnapshot"` -} - -func (rds *Rds) DescribeDBSnapshots(options *DescribeDBSnapshots) (resp *DescribeDBSnapshotsResp, err error) { - params := makeParams("DescribeDBSnapshots") - - if options.DBInstanceIdentifier != "" { - params["DBInstanceIdentifier"] = options.DBInstanceIdentifier - } - - if options.DBSnapshotIdentifier != "" { - params["DBSnapshotIdentifier"] = options.DBSnapshotIdentifier - } - - if options.SnapshotType != "" { - params["SnapshotType"] = options.SnapshotType - } - - resp = &DescribeDBSnapshotsResp{} - - err = rds.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// DescribeDBParameterGroups request params -type DescribeDBParameterGroups struct { - DBParameterGroupName string -} - -type DescribeDBParameterGroupsResp struct { - RequestId string `xml:"ResponseMetadata>RequestId"` - DBParameterGroups []DBParameterGroup `xml:"DescribeDBParameterGroupsResult>DBParameterGroups>DBParameterGroup"` -} - -func (rds *Rds) DescribeDBParameterGroups(options *DescribeDBParameterGroups) (resp *DescribeDBParameterGroupsResp, err error) { - params := makeParams("DescribeDBParameterGroups") - - params["DBParameterGroupName"] = options.DBParameterGroupName - - resp = &DescribeDBParameterGroupsResp{} - - err = rds.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// DescribeDBParameters request params -type DescribeDBParameters struct { - DBParameterGroupName string - Source string -} - -type DescribeDBParametersResp struct { - RequestId string `xml:"ResponseMetadata>RequestId"` - Parameters []Parameter `xml:"DescribeDBParametersResult>Parameters>Parameter"` -} - -func (rds *Rds) DescribeDBParameters(options *DescribeDBParameters) (resp *DescribeDBParametersResp, err error) { - params := makeParams("DescribeDBParameters") - - params["DBParameterGroupName"] = options.DBParameterGroupName - - if attr := options.Source; attr != "" { - params["Source"] = attr - } - - resp = &DescribeDBParametersResp{} - - err = rds.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// DeleteDBInstance request params -type DeleteDBInstance struct { - FinalDBSnapshotIdentifier string - DBInstanceIdentifier string - SkipFinalSnapshot bool -} - -func (rds *Rds) DeleteDBInstance(options *DeleteDBInstance) (resp *SimpleResp, err error) { - params := makeParams("DeleteDBInstance") - - params["DBInstanceIdentifier"] = options.DBInstanceIdentifier - - // If we don't skip the final snapshot, we need to specify a final - // snapshot identifier - if options.SkipFinalSnapshot { - params["SkipFinalSnapshot"] = "true" - } else { - params["FinalDBSnapshotIdentifier"] = options.FinalDBSnapshotIdentifier - } - - resp = &SimpleResp{} - - err = rds.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// DeleteDBSecurityGroup request params -type DeleteDBSecurityGroup struct { - DBSecurityGroupName string -} - -func (rds *Rds) DeleteDBSecurityGroup(options *DeleteDBSecurityGroup) (resp *SimpleResp, err error) { - params := makeParams("DeleteDBSecurityGroup") - - params["DBSecurityGroupName"] = options.DBSecurityGroupName - - resp = &SimpleResp{} - - err = rds.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// DeleteDBSubnetGroup request params -type DeleteDBSubnetGroup struct { - DBSubnetGroupName string -} - -func (rds *Rds) DeleteDBSubnetGroup(options *DeleteDBSubnetGroup) (resp *SimpleResp, err error) { - params := makeParams("DeleteDBSubnetGroup") - - params["DBSubnetGroupName"] = options.DBSubnetGroupName - - resp = &SimpleResp{} - - err = rds.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// DeleteDBParameterGroup request params -type DeleteDBParameterGroup struct { - DBParameterGroupName string -} - -func (rds *Rds) DeleteDBParameterGroup(options *DeleteDBParameterGroup) (resp *SimpleResp, err error) { - params := makeParams("DeleteDBParameterGroup") - - params["DBParameterGroupName"] = options.DBParameterGroupName - - resp = &SimpleResp{} - - err = rds.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -type RestoreDBInstanceFromDBSnapshot struct { - DBInstanceIdentifier string - DBSnapshotIdentifier string - AutoMinorVersionUpgrade bool - AvailabilityZone string - DBInstanceClass string - DBName string - DBSubnetGroupName string - Engine string - Iops int - LicenseModel string - MultiAZ bool - OptionGroupName string - Port int - PubliclyAccessible bool - - SetIops bool - SetPort bool -} - -func (rds *Rds) RestoreDBInstanceFromDBSnapshot(options *RestoreDBInstanceFromDBSnapshot) (resp *SimpleResp, err error) { - params := makeParams("RestoreDBInstanceFromDBSnapshot") - - params["DBInstanceIdentifier"] = options.DBInstanceIdentifier - params["DBSnapshotIdentifier"] = options.DBSnapshotIdentifier - - if options.AutoMinorVersionUpgrade { - params["AutoMinorVersionUpgrade"] = "true" - } - - if options.AvailabilityZone != "" { - params["AvailabilityZone"] = options.AvailabilityZone - } - - if options.DBInstanceClass != "" { - params["DBInstanceClass"] = options.DBInstanceClass - } - - if options.DBName != "" { - params["DBName"] = options.DBName - } - - if options.DBSubnetGroupName != "" { - params["DBSubnetGroupName"] = options.DBSubnetGroupName - } - - if options.Engine != "" { - params["Engine"] = options.Engine - } - - if options.SetIops { - params["Iops"] = strconv.Itoa(options.Iops) - } - - if options.LicenseModel != "" { - params["LicenseModel"] = options.LicenseModel - } - - if options.MultiAZ { - params["MultiAZ"] = "true" - } - - if options.OptionGroupName != "" { - params["OptionGroupName"] = options.OptionGroupName - } - - if options.SetPort { - params["Port"] = strconv.Itoa(options.Port) - } - - if options.PubliclyAccessible { - params["PubliclyAccessible"] = "true" - } - - resp = &SimpleResp{} - - err = rds.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// ModifyDBParameterGroup request parameters -type ModifyDBParameterGroup struct { - DBParameterGroupName string - Parameters []Parameter -} - -func (rds *Rds) ModifyDBParameterGroup(options *ModifyDBParameterGroup) (resp *SimpleResp, err error) { - params := makeParams("ModifyDBParameterGroup") - - params["DBParameterGroupName"] = options.DBParameterGroupName - - for j, group := range options.Parameters { - params["Parameters.member."+strconv.Itoa(j+1)+".ApplyMethod"] = group.ApplyMethod - params["Parameters.member."+strconv.Itoa(j+1)+".ParameterName"] = group.ParameterName - params["Parameters.member."+strconv.Itoa(j+1)+".ParameterValue"] = group.ParameterValue - } - - resp = &SimpleResp{} - - err = rds.query(params, resp) - - if err != nil { - resp = nil - } - - return -} - -// Responses - -type SimpleResp struct { - RequestId string `xml:"ResponseMetadata>RequestId"` -} - -type xmlErrors struct { - Errors []Error `xml:"Error"` -} - -// Error encapsulates an Rds error. -type Error struct { - // HTTP status code of the error. - StatusCode int - - // AWS code of the error. - Code string - - // Message explaining the error. - Message string -} - -func (e *Error) Error() string { - var prefix string - if e.Code != "" { - prefix = e.Code + ": " - } - if prefix == "" && e.StatusCode > 0 { - prefix = strconv.Itoa(e.StatusCode) + ": " - } - return prefix + e.Message -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/rds/rds_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/rds/rds_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/rds/rds_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/rds/rds_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,436 +0,0 @@ -package rds_test - -import ( - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/rds" - "github.com/mitchellh/goamz/testutil" - . "github.com/motain/gocheck" - "testing" -) - -func Test(t *testing.T) { - TestingT(t) -} - -type S struct { - rds *rds.Rds -} - -var _ = Suite(&S{}) - -var testServer = testutil.NewHTTPServer() - -func (s *S) SetUpSuite(c *C) { - testServer.Start() - auth := aws.Auth{"abc", "123", ""} - s.rds = rds.NewWithClient(auth, aws.Region{RdsEndpoint: testServer.URL}, testutil.DefaultClient) -} - -func (s *S) TearDownTest(c *C) { - testServer.Flush() -} - -func (s *S) Test_CreateDBInstance(c *C) { - testServer.Response(200, nil, CreateDBInstanceExample) - - options := rds.CreateDBInstance{ - BackupRetentionPeriod: 30, - MultiAZ: false, - DBInstanceIdentifier: "foobarbaz", - PreferredBackupWindow: "10:07-10:37", - PreferredMaintenanceWindow: "sun:06:13-sun:06:43", - AvailabilityZone: "us-west-2b", - Engine: "mysql", - EngineVersion: "", - DBName: "5.6.13", - AllocatedStorage: 10, - StorageType: "gp2", - MasterUsername: "foobar", - MasterUserPassword: "bazbarbaz", - DBInstanceClass: "db.m1.small", - DBSecurityGroupNames: []string{"foo", "bar"}, - DBParameterGroupName: "default.mysql5.6", - - SetBackupRetentionPeriod: true, - } - - resp, err := s.rds.CreateDBInstance(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"CreateDBInstance"}) - c.Assert(req.Form["Engine"], DeepEquals, []string{"mysql"}) - c.Assert(req.Form["StorageType"], DeepEquals, []string{"gp2"}) - c.Assert(req.Form["DBSecurityGroups.member.1"], DeepEquals, []string{"foo"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "523e3218-afc7-11c3-90f5-f90431260ab4") -} - -func (s *S) Test_CreateDBSecurityGroup(c *C) { - testServer.Response(200, nil, CreateDBSecurityGroupExample) - - options := rds.CreateDBSecurityGroup{ - DBSecurityGroupName: "foobarbaz", - DBSecurityGroupDescription: "test description", - } - - resp, err := s.rds.CreateDBSecurityGroup(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"CreateDBSecurityGroup"}) - c.Assert(req.Form["DBSecurityGroupName"], DeepEquals, []string{"foobarbaz"}) - c.Assert(req.Form["DBSecurityGroupDescription"], DeepEquals, []string{"test description"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "e68ef6fa-afc1-11c3-845a-476777009d19") -} - -func (s *S) Test_CreateDBSubnetGroup(c *C) { - testServer.Response(200, nil, CreateDBSubnetGroupExample) - - options := rds.CreateDBSubnetGroup{ - DBSubnetGroupName: "foobarbaz", - DBSubnetGroupDescription: "test description", - SubnetIds: []string{"subnet-e4d398a1", "subnet-c2bdb6ba"}, - } - - resp, err := s.rds.CreateDBSubnetGroup(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"CreateDBSubnetGroup"}) - c.Assert(req.Form["DBSubnetGroupName"], DeepEquals, []string{"foobarbaz"}) - c.Assert(req.Form["DBSubnetGroupDescription"], DeepEquals, []string{"test description"}) - c.Assert(req.Form["SubnetIds.member.1"], DeepEquals, []string{"subnet-e4d398a1"}) - c.Assert(req.Form["SubnetIds.member.2"], DeepEquals, []string{"subnet-c2bdb6ba"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "3a401b3f-bb9e-11d3-f4c6-37db295f7674") -} - -func (s *S) Test_CreateDBParameterGroup(c *C) { - testServer.Response(200, nil, CreateDBParameterGroupExample) - - options := rds.CreateDBParameterGroup{ - DBParameterGroupFamily: "mysql5.6", - DBParameterGroupName: "mydbparamgroup3", - Description: "My new DB Parameter Group", - } - - resp, err := s.rds.CreateDBParameterGroup(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"CreateDBParameterGroup"}) - c.Assert(req.Form["DBParameterGroupFamily"], DeepEquals, []string{"mysql5.6"}) - c.Assert(req.Form["DBParameterGroupName"], DeepEquals, []string{"mydbparamgroup3"}) - c.Assert(req.Form["Description"], DeepEquals, []string{"My new DB Parameter Group"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "7805c127-af22-11c3-96ac-6999cc5f7e72") -} - -func (s *S) Test_DescribeDBInstances(c *C) { - testServer.Response(200, nil, DescribeDBInstancesExample) - - options := rds.DescribeDBInstances{ - DBInstanceIdentifier: "foobarbaz", - } - - resp, err := s.rds.DescribeDBInstances(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeDBInstances"}) - c.Assert(req.Form["DBInstanceIdentifier"], DeepEquals, []string{"foobarbaz"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "01b2685a-b978-11d3-f272-7cd6cce12cc5") - c.Assert(resp.DBInstances[0].DBName, Equals, "mysampledb") - c.Assert(resp.DBInstances[0].DBSecurityGroupNames, DeepEquals, []string{"my-db-secgroup"}) - c.Assert(resp.DBInstances[0].DBParameterGroupName, Equals, "default.mysql5.6") - c.Assert(resp.DBInstances[0].StorageType, Equals, "gp2") - c.Assert(resp.DBInstances[1].VpcSecurityGroupIds, DeepEquals, []string{"my-vpc-secgroup"}) -} - -func (s *S) Test_DescribeDBSecurityGroups(c *C) { - testServer.Response(200, nil, DescribeDBSecurityGroupsExample) - - options := rds.DescribeDBSecurityGroups{ - DBSecurityGroupName: "foobarbaz", - } - - resp, err := s.rds.DescribeDBSecurityGroups(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeDBSecurityGroups"}) - c.Assert(req.Form["DBSecurityGroupName"], DeepEquals, []string{"foobarbaz"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "b76e692c-b98c-11d3-a907-5a2c468b9cb0") - c.Assert(resp.DBSecurityGroups[0].EC2SecurityGroupIds, DeepEquals, []string{"sg-7f476617"}) - c.Assert(resp.DBSecurityGroups[0].EC2SecurityGroupOwnerIds, DeepEquals, []string{"803#########"}) - c.Assert(resp.DBSecurityGroups[0].EC2SecurityGroupStatuses, DeepEquals, []string{"authorized"}) - c.Assert(resp.DBSecurityGroups[0].CidrIps, DeepEquals, []string{"192.0.0.0/24", "190.0.1.0/29", "190.0.2.0/29", "10.0.0.0/8"}) - c.Assert(resp.DBSecurityGroups[0].CidrStatuses, DeepEquals, []string{"authorized", "authorized", "authorized", "authorized"}) -} - -func (s *S) Test_DescribeDBSubnetGroups(c *C) { - testServer.Response(200, nil, DescribeDBSubnetGroupsExample) - - options := rds.DescribeDBSubnetGroups{ - DBSubnetGroupName: "foobarbaz", - } - - resp, err := s.rds.DescribeDBSubnetGroups(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeDBSubnetGroups"}) - c.Assert(req.Form["DBSubnetGroupName"], DeepEquals, []string{"foobarbaz"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "b783db3b-b98c-11d3-fbc7-5c0aad74da7c") - c.Assert(resp.DBSubnetGroups[0].Status, DeepEquals, "Complete") - c.Assert(resp.DBSubnetGroups[0].SubnetIds, DeepEquals, []string{"subnet-e8b3e5b1", "subnet-44b2f22e"}) - c.Assert(resp.DBSubnetGroups[0].VpcId, DeepEquals, "vpc-e7abbdce") -} - -func (s *S) Test_DescribeDBParameterGroups(c *C) { - testServer.Response(200, nil, DescribeDBParameterGroupsExample) - - options := rds.DescribeDBParameterGroups{ - DBParameterGroupName: "mydbparamgroup3", - } - - resp, err := s.rds.DescribeDBParameterGroups(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeDBParameterGroups"}) - c.Assert(req.Form["DBParameterGroupName"], DeepEquals, []string{"mydbparamgroup3"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "b75d527a-b98c-11d3-f272-7cd6cce12cc5") - c.Assert(resp.DBParameterGroups[0].DBParameterGroupFamily, Equals, "mysql5.6") - c.Assert(resp.DBParameterGroups[0].Description, Equals, "My new DB Parameter Group") - c.Assert(resp.DBParameterGroups[0].DBParameterGroupName, Equals, "mydbparamgroup3") -} - -func (s *S) Test_DescribeDBParameters(c *C) { - testServer.Response(200, nil, DescribeDBParametersExample) - - options := rds.DescribeDBParameters{ - DBParameterGroupName: "mydbparamgroup3", - Source: "user", - } - - resp, err := s.rds.DescribeDBParameters(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeDBParameters"}) - c.Assert(req.Form["DBParameterGroupName"], DeepEquals, []string{"mydbparamgroup3"}) - c.Assert(req.Form["Source"], DeepEquals, []string{"user"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "8c40488f-b9ff-11d3-a15e-7ac49293f4fa") - c.Assert(resp.Parameters[0].ParameterName, Equals, "character_set_server") - c.Assert(resp.Parameters[0].ParameterValue, Equals, "utf8") - c.Assert(resp.Parameters[1].ParameterName, Equals, "character_set_client") - c.Assert(resp.Parameters[1].ParameterValue, Equals, "utf8") - c.Assert(resp.Parameters[2].ParameterName, Equals, "character_set_results") - c.Assert(resp.Parameters[2].ParameterValue, Equals, "utf8") - c.Assert(resp.Parameters[3].ParameterName, Equals, "collation_server") - c.Assert(resp.Parameters[3].ParameterValue, Equals, "utf8_unicode_ci") - c.Assert(resp.Parameters[4].ParameterName, Equals, "collation_connection") - c.Assert(resp.Parameters[4].ParameterValue, Equals, "utf8_unicode_ci") -} - -func (s *S) Test_DeleteDBInstance(c *C) { - testServer.Response(200, nil, DeleteDBInstanceExample) - - options := rds.DeleteDBInstance{ - DBInstanceIdentifier: "foobarbaz", - SkipFinalSnapshot: true, - } - - resp, err := s.rds.DeleteDBInstance(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"DeleteDBInstance"}) - c.Assert(req.Form["DBInstanceIdentifier"], DeepEquals, []string{"foobarbaz"}) - c.Assert(req.Form["SkipFinalSnapshot"], DeepEquals, []string{"true"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "7369556f-b70d-11c3-faca-6ba18376ea1b") -} - -func (s *S) Test_DeleteDBInstance_SnapshotIdentifier(c *C) { - testServer.Response(200, nil, DeleteDBInstanceExample) - - options := rds.DeleteDBInstance{ - DBInstanceIdentifier: "foobarbaz", - SkipFinalSnapshot: false, - FinalDBSnapshotIdentifier: "bar", - } - - resp, err := s.rds.DeleteDBInstance(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"DeleteDBInstance"}) - c.Assert(req.Form["DBInstanceIdentifier"], DeepEquals, []string{"foobarbaz"}) - c.Assert(req.Form["FinalDBSnapshotIdentifier"], DeepEquals, []string{"bar"}) - c.Assert(req.Form["SkipFinalSnapshot"], IsNil) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "7369556f-b70d-11c3-faca-6ba18376ea1b") -} - -func (s *S) Test_DeleteDBSecurityGroup(c *C) { - testServer.Response(200, nil, DeleteDBSecurityGroupExample) - - options := rds.DeleteDBSecurityGroup{ - DBSecurityGroupName: "foobarbaz", - } - - resp, err := s.rds.DeleteDBSecurityGroup(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"DeleteDBSecurityGroup"}) - c.Assert(req.Form["DBSecurityGroupName"], DeepEquals, []string{"foobarbaz"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "7aec7454-ba25-11d3-855b-576787000e19") -} - -func (s *S) Test_DeleteDBSubnetGroup(c *C) { - testServer.Response(200, nil, DeleteDBSubnetGroupExample) - - options := rds.DeleteDBSubnetGroup{ - DBSubnetGroupName: "foobarbaz", - } - - resp, err := s.rds.DeleteDBSubnetGroup(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"DeleteDBSubnetGroup"}) - c.Assert(req.Form["DBSubnetGroupName"], DeepEquals, []string{"foobarbaz"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "6295e5ab-bbf3-11d3-f4c6-37db295f7674") -} - -func (s *S) Test_DeleteDBParameterGroup(c *C) { - testServer.Response(200, nil, DeleteDBParameterGroupExample) - - options := rds.DeleteDBParameterGroup{ - DBParameterGroupName: "mydbparamgroup3", - } - - resp, err := s.rds.DeleteDBParameterGroup(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"DeleteDBParameterGroup"}) - c.Assert(req.Form["DBParameterGroupName"], DeepEquals, []string{"mydbparamgroup3"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "cad6c267-ba25-11d3-fe11-33d33a9bb7e3") -} - -func (s *S) Test_AuthorizeDBSecurityGroupIngress(c *C) { - testServer.Response(200, nil, AuthorizeDBSecurityGroupIngressExample) - - options := rds.AuthorizeDBSecurityGroupIngress{ - DBSecurityGroupName: "foobarbaz", - EC2SecurityGroupOwnerId: "bar", - } - - resp, err := s.rds.AuthorizeDBSecurityGroupIngress(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"AuthorizeDBSecurityGroupIngress"}) - c.Assert(req.Form["DBSecurityGroupName"], DeepEquals, []string{"foobarbaz"}) - c.Assert(req.Form["EC2SecurityGroupOwnerId"], DeepEquals, []string{"bar"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "6176b5f8-bfed-11d3-f92b-31fa5e8dbc99") -} - -func (s *S) Test_DescribeDBSnapshots(c *C) { - testServer.Response(200, nil, DescribeDBSnapshotsExample) - - options := rds.DescribeDBSnapshots{ - DBInstanceIdentifier: "foobar", - DBSnapshotIdentifier: "baz", - SnapshotType: "manual", - } - - resp, err := s.rds.DescribeDBSnapshots(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeDBSnapshots"}) - c.Assert(req.Form["DBInstanceIdentifier"], DeepEquals, []string{"foobar"}) - c.Assert(req.Form["DBSnapshotIdentifier"], DeepEquals, []string{"baz"}) - c.Assert(req.Form["SnapshotType"], DeepEquals, []string{"manual"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "b7769930-b98c-11d3-f272-7cd6cce12cc5") - c.Assert(resp.DBSnapshots[0].OptionGroupName, Equals, "default:mysql-5-6") - c.Assert(resp.DBSnapshots[0].Engine, Equals, "mysql") - c.Assert(resp.DBSnapshots[0].SnapshotType, Equals, "manual") -} - -func (s *S) Test_RestoreDBInstanceFromDBSnapshot(c *C) { - testServer.Response(200, nil, RestoreDBInstanceFromDBSnapshotExample) - - options := rds.RestoreDBInstanceFromDBSnapshot{ - DBInstanceIdentifier: "foo", - DBSnapshotIdentifier: "bar", - } - - resp, err := s.rds.RestoreDBInstanceFromDBSnapshot(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"RestoreDBInstanceFromDBSnapshot"}) - c.Assert(req.Form["DBInstanceIdentifier"], DeepEquals, []string{"foo"}) - c.Assert(req.Form["DBSnapshotIdentifier"], DeepEquals, []string{"bar"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "863fd73e-be2b-11d3-855b-576787000e19") -} - -func (s *S) Test_ModifyDBParameterGroup(c *C) { - testServer.Response(200, nil, ModifyDBParameterGroupExample) - - options := rds.ModifyDBParameterGroup{ - DBParameterGroupName: "mydbparamgroup3", - Parameters: []rds.Parameter{ - rds.Parameter{ - ApplyMethod: "immediate", - ParameterName: "character_set_server", - ParameterValue: "utf8", - }, - rds.Parameter{ - ApplyMethod: "immediate", - ParameterName: "character_set_client", - ParameterValue: "utf8", - }, - rds.Parameter{ - ApplyMethod: "immediate", - ParameterName: "character_set_results", - ParameterValue: "utf8", - }, - rds.Parameter{ - ApplyMethod: "immediate", - ParameterName: "collation_server", - ParameterValue: "utf8_unicode_ci", - }, - rds.Parameter{ - ApplyMethod: "immediate", - ParameterName: "collation_connection", - ParameterValue: "utf8_unicode_ci", - }, - }, - } - - resp, err := s.rds.ModifyDBParameterGroup(&options) - req := testServer.WaitRequest() - - c.Assert(req.Form["Action"], DeepEquals, []string{"ModifyDBParameterGroup"}) - c.Assert(req.Form["DBParameterGroupName"], DeepEquals, []string{"mydbparamgroup3"}) - c.Assert(req.Form["Parameters.member.1.ApplyMethod"], DeepEquals, []string{"immediate"}) - c.Assert(req.Form["Parameters.member.1.ParameterName"], DeepEquals, []string{"character_set_server"}) - c.Assert(req.Form["Parameters.member.1.ParameterValue"], DeepEquals, []string{"utf8"}) - c.Assert(req.Form["Parameters.member.2.ApplyMethod"], DeepEquals, []string{"immediate"}) - c.Assert(req.Form["Parameters.member.2.ParameterName"], DeepEquals, []string{"character_set_client"}) - c.Assert(req.Form["Parameters.member.2.ParameterValue"], DeepEquals, []string{"utf8"}) - c.Assert(req.Form["Parameters.member.3.ApplyMethod"], DeepEquals, []string{"immediate"}) - c.Assert(req.Form["Parameters.member.3.ParameterName"], DeepEquals, []string{"character_set_results"}) - c.Assert(req.Form["Parameters.member.3.ParameterValue"], DeepEquals, []string{"utf8"}) - c.Assert(req.Form["Parameters.member.4.ApplyMethod"], DeepEquals, []string{"immediate"}) - c.Assert(req.Form["Parameters.member.4.ParameterName"], DeepEquals, []string{"collation_server"}) - c.Assert(req.Form["Parameters.member.4.ParameterValue"], DeepEquals, []string{"utf8_unicode_ci"}) - c.Assert(req.Form["Parameters.member.5.ApplyMethod"], DeepEquals, []string{"immediate"}) - c.Assert(req.Form["Parameters.member.5.ParameterName"], DeepEquals, []string{"collation_connection"}) - c.Assert(req.Form["Parameters.member.5.ParameterValue"], DeepEquals, []string{"utf8_unicode_ci"}) - c.Assert(err, IsNil) - c.Assert(resp.RequestId, Equals, "12d7435e-bba0-11d3-fe11-33d33a9bb7e3") -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/rds/responses_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/rds/responses_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/rds/responses_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/rds/responses_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,639 +0,0 @@ -package rds_test - -var ErrorDump = ` - -UnsupportedOperation - -0503f4e9-bbd6-483c-b54f-c4ae9f3b30f4 -` - -// http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html -var DescribeDBInstancesExample = ` - - - - - 7 - false - available - - mysqlexampledb - 10:07-10:37 - sun:06:13-sun:06:43 - us-west-2b - 2014-04-21T17:15:00Z - - mysql - - general-public-license - - - in-sync - default.mysql5.6 - - - - 3306 -
    mysqlexampledb.c6c1rntzufv0.us-west-2.rds.amazonaws.com
    -
    - 5.6.13 - - - default:mysql-5-6 - in-sync - - - - - active - my-db-secgroup - - - true - mysampledb - true - 2014-01-29T22:58:24.231Z - 5 - gp2 - myawsuser - db.t1.micro -
    - - 7 - false - available - - - active - my-vpc-secgroup - - - mysqlexampledb-restore - 10:07-10:37 - sun:06:13-sun:06:43 - us-west-2b - 2014-04-21T17:15:00Z - - mysql - - general-public-license - - - in-sync - default.mysql5.6 - - - - 3306 -
    mysqlexampledb-restore.c6c2mntzugv0.us-west-2.rds.amazonaws.com
    -
    - 5.6.13 - - - default:mysql-5-6 - in-sync - - - - true - mysampledb - true - 2014-03-28T20:14:17.296Z - 5 - myawsuser - db.t1.micro -
    -
    -
    - - 01b2685a-b978-11d3-f272-7cd6cce12cc5 - -
    -` - -var CreateDBInstanceExample = ` - - - - 7 - creating - false - - myawsuser-dbi01 - - 03:50-04:20 - wed:06:38-wed:07:08 - - mysql - - **** - - general-public-license - 5.6.13 - - - in-sync - default.mysql5.6 - - - - - default:mysql-5-6 - in-sync - - - - - active - default - - - true - true - 15 - db.m1.large - myawsuser - - - - 523e3218-afc7-11c3-90f5-f90431260ab4 - - -` - -var DeleteDBInstanceExample = ` - - - - 7 - deleting - false - - mydatabase - 08:14-08:44 - fri:04:50-fri:05:20 - us-east-1a - - 2013-11-09T00:15:00Z - mysql - - general-public-license - 5.6.13 - - 3306 -
    mydatabase.cf037hpkuvjt.us-east-1.rds.amazonaws.com
    -
    - - - in-sync - default.mysql5.6 - - - - - default:mysql-5-6 - in-sync - - - true - - - active - default - - - mysqldb - true - 2011-04-28T23:33:54.909Z - 100 - myawsuser - db.m1.medium -
    -
    - - 7369556f-b70d-11c3-faca-6ba18376ea1b - -
    -` - -var DescribeDBSecurityGroupsExample = ` - - - - - - - authorized - elasticbeanstalk-windows - 803######### - sg-7f476617 - - - My security group - - - 192.0.0.0/24 - authorized - - - 190.0.1.0/29 - authorized - - - 190.0.2.0/29 - authorized - - - 10.0.0.0/8 - authorized - - - 803######### - my-secgrp - - - - default - - 803######### - default - - - - - b76e692c-b98c-11d3-a907-5a2c468b9cb0 - - -` - -var DeleteDBSecurityGroupExample = ` - - - 7aec7454-ba25-11d3-855b-576787000e19 - - -` - -var CreateDBSecurityGroupExample = ` - - - - - My new DB Security Group - - 803######### - mydbsecuritygroup00 - - - - e68ef6fa-afc1-11c3-845a-476777009d19 - - -` - -var AuthorizeDBSecurityGroupIngressExample = ` - - - - - - authorized - elasticbeanstalk-windows - 803######### - sg-7f476617 - - - default - - - 192.0.0.0/24 - authorized - - - 190.0.1.0/29 - authorized - - - 190.0.2.0/29 - authorized - - - 10.0.0.0/8 - authorized - - - 803######### - default - - - - 6176b5f8-bfed-11d3-f92b-31fa5e8dbc99 - - -` - -var DescribeDBSubnetGroupsExample = ` - - - - - vpc-e7abbdce - Complete - DB subnet group 1 - mydbsubnetgroup1 - - - Active - subnet-e8b3e5b1 - - us-west-2a - false - - - - Active - subnet-44b2f22e - - us-west-2b - false - - - - - - vpc-c1e17bb8 - Complete - My DB Subnet Group 2 - sub-grp-2 - - - Active - subnet-d281ef8a - - us-west-2a - false - - - - Active - subnet-b381ef9f - - us-west-2c - false - - - - Active - subnet-e1e17ebd - - us-west-2b - false - - - - - - - - b783db3b-b98c-11d3-fbc7-5c0aad74da7c - - -` - -var DeleteDBSubnetGroupExample = ` - - - 6295e5ab-bbf3-11d3-f4c6-37db295f7674 - - -` - -var CreateDBSubnetGroupExample = ` - - - - vpc-33dc97ea - Complete - My new DB Subnet Group - myawsuser-dbsubnetgroup - - - Active - subnet-e4d398a1 - - us-east-1b - false - - - - Active - subnet-c2bdb6ba - - us-east-1c - false - - - - - - - 3a401b3f-bb9e-11d3-f4c6-37db295f7674 - - -` - -var DescribeDBSnapshotsExample = ` - - - - - 3306 - default:mysql-5-6 - mysql - available - manual - general-public-license - 5.6.13 - my-mysqlexampledb - my-test-restore-snapshot - 2014-03-28T19:57:16.707Z - us-west-2b - 2014-01-29T22:58:24.231Z - 100 - 5 - awsmyuser - - - 3306 - default:mysql-5-6 - mysql - available - automated - general-public-license - 5.6.13 - my-mysqlexampledb - rds:my-mysqlexampledb-2014-04-19-10-08 - 2014-04-19T10:09:09.790Z - us-west-2b - 2014-01-29T22:58:24.231Z - 100 - 5 - awsmyuser - - - 3306 - default:mysql-5-6 - mysql - available - automated - general-public-license - 5.6.13 - my-mysqlexampledb - rds:my-mysqlexampledb-2014-04-20-10-09 - 2014-04-20T10:09:15.446Z - us-west-2b - 2014-01-29T22:58:24.231Z - 100 - 5 - awsmyuser - - - - - b7769930-b98c-11d3-f272-7cd6cce12cc5 - - -` - -var RestoreDBInstanceFromDBSnapshotExample = ` - - - - 7 - false - creating - - mysqldb-restored - 08:14-08:44 - fri:04:50-fri:05:20 - - mysql - - general-public-license - 5.6.13 - - - in-sync - default.mysql5.6 - - - - - default:mysql-5-6 - pending-apply - - - true - - - active - default - - - mysqldb - true - 100 - myawsuser - db.m1.medium - - - - 863fd73e-be2b-11d3-855b-576787000e19 - - -` - -var CreateDBParameterGroupExample = ` - - - - mysql5.1 - My new DB Parameter Group - mydbparamgroup3 - - - - 7805c127-af22-11c3-96ac-6999cc5f7e72 - - -` - -var DescribeDBParameterGroupsExample = ` - - - - - mysql5.6 - My new DB Parameter Group - mydbparamgroup3 - - - - - b75d527a-b98c-11d3-f272-7cd6cce12cc5 - - -` - - -var DeleteDBParameterGroupExample = ` - - - cad6c267-ba25-11d3-fe11-33d33a9bb7e3 - - -` - -var ModifyDBParameterGroupExample = ` - - - mydbparamgroup3 - - - 12d7435e-bba0-11d3-fe11-33d33a9bb7e3 - - -` - -var DescribeDBParametersExample = ` - - - bGlzdGVuZXJfbmV0d29ya3M= - - - utf8 - character_set_server - - - utf8 - character_set_client - - - utf8 - character_set_results - - - utf8_unicode_ci - collation_server - - - utf8_unicode_ci - collation_connection - - - - - 8c40488f-b9ff-11d3-a15e-7ac49293f4fa - - -` diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/rds/sign.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/rds/sign.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/rds/sign.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/rds/sign.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,38 +0,0 @@ -package rds - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "github.com/mitchellh/goamz/aws" - "sort" - "strings" -) - -// ---------------------------------------------------------------------------- -// Version 2 signing (http://goo.gl/RSRp5) - -var b64 = base64.StdEncoding - -func sign(auth aws.Auth, method, path string, params map[string]string, host string) { - params["AWSAccessKeyId"] = auth.AccessKey - params["SignatureVersion"] = "2" - params["SignatureMethod"] = "HmacSHA256" - if auth.Token != "" { - params["SecurityToken"] = auth.Token - } - - var sarray []string - for k, v := range params { - sarray = append(sarray, aws.Encode(k)+"="+aws.Encode(v)) - } - sort.StringSlice(sarray).Sort() - joined := strings.Join(sarray, "&") - payload := method + "\n" + host + "\n" + path + "\n" + joined - hash := hmac.New(sha256.New, []byte(auth.SecretKey)) - hash.Write([]byte(payload)) - signature := make([]byte, b64.EncodedLen(hash.Size())) - b64.Encode(signature, hash.Sum(nil)) - - params["Signature"] = string(signature) -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/README.md aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/README.md --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/README.md 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,41 +0,0 @@ -# goamz - An Amazon Library for Go - -Current API documentation: [![GoDoc](https://godoc.org/github.com/mitchellh/goamz?status.svg)](https://godoc.org/github.com/mitchellh/goamz) - -This is a fork of [https://launchpad.net/goamz](https://launchpad.net/goamz) -that adds some missing API calls to certain packages. - -This library is *incomplete*, but implements a large amount of the AWS API. -It is heavily used in projects such as -[Terraform](https://github.com/hashicorp/terraform) and -[Packer](https://github.com/mitchellh/packer). -If you find anything missing from this library, -please [file an issue](https://github.com/mitchellh/goamz). - -## Example Usage - -```go -package main - -import ( - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/s3" - "log" - "fmt" -) - -func main() { - auth, err := aws.EnvAuth() - if err != nil { - log.Fatal(err) - } - client := s3.New(auth, aws.USEast) - resp, err := client.ListBuckets() - - if err != nil { - log.Fatal(err) - } - - log.Print(fmt.Sprintf("%T %+v", resp.Buckets[0], resp.Buckets[0])) -} -``` diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/route53/responses_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/route53/responses_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/route53/responses_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/route53/responses_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,122 +0,0 @@ -package route53 - -var CreateHostedZoneExample = ` - - - /hostedzone/Z1PA6795UKMFR9 - example.com. - myUniqueIdentifier - - This is my first hosted zone. - - 2 - - - /change/C1PA6795UKMFR9 - PENDING - 2012-03-15T01:36:41.958Z - - - - ns-2048.awsdns-64.com - ns-2049.awsdns-65.net - ns-2050.awsdns-66.org - ns-2051.awsdns-67.co.uk - - -` - -var DeleteHostedZoneExample = ` - - - /change/C1PA6795UKMFR9 - PENDING - 2012-03-10T01:36:41.958Z - -` - -var GetHostedZoneExample = ` - - - /hostedzone/Z1PA6795UKMFR9 - example.com. - myUniqueIdentifier - - This is my first hosted zone. - - 17 - - - - ns-2048.awsdns-64.com - ns-2049.awsdns-65.net - ns-2050.awsdns-66.org - ns-2051.awsdns-67.co.uk - - -` - -var GetChangeExample = ` - - - C2682N5HXP0BZ4 - INSYNC - 2011-09-10T01:36:41.958Z - -` - -var ChangeResourceRecordSetsExample = ` - - - /change/asdf - PENDING - 2014 - -` - -var ListResourceRecordSetsExample = ` - - - - example.com. - SOA - 900 - - - ns-2048.awsdns-64.net. hostmaster.awsdns.com. 1 7200 900 1209600 86400 - - - - - true - 1 - testdoc2.example.com - NS -` - -var ListHostedZonesExample = ` - - - - /hostedzone/Z2K123214213123 - example.com. - D2224C5B-684A-DB4A-BB9A-E09E3BAFEA7A - - Test comment - - 10 - - - /hostedzone/ZLT12321321124 - sub.example.com. - A970F076-FCB1-D959-B395-96474CC84EB8 - - Test comment for subdomain host - - 4 - - - false - 100 -` diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/route53/route53.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/route53/route53.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/route53/route53.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/route53/route53.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,377 +0,0 @@ -// The route53 package provides types and functions for interaction with the AWS -// Route53 service -package route53 - -import ( - "bytes" - "encoding/xml" - "fmt" - "io" - "net/http" - "net/url" - "reflect" - "strconv" - "strings" - "time" - - "github.com/mitchellh/goamz/aws" -) - -// The Route53 type encapsulates operations operations with the route53 endpoint. -type Route53 struct { - aws.Auth - aws.Region - httpClient *http.Client -} - -const APIVersion = "2013-04-01" - -// New creates a new ELB instance. -func New(auth aws.Auth, region aws.Region) *Route53 { - return NewWithClient(auth, region, aws.RetryingClient) -} - -func NewWithClient(auth aws.Auth, region aws.Region, httpClient *http.Client) *Route53 { - return &Route53{auth, region, httpClient} -} - -type CreateHostedZoneRequest struct { - Name string `xml:"Name"` - CallerReference string `xml:"CallerReference"` - Comment string `xml:"HostedZoneConfig>Comment"` -} - -type CreateHostedZoneResponse struct { - HostedZone HostedZone `xml:"HostedZone"` - ChangeInfo ChangeInfo `xml:"ChangeInfo"` - DelegationSet DelegationSet `xml:"DelegationSet"` -} - -type HostedZone struct { - ID string `xml:"Id"` - Name string `xml:"Name"` - CallerReference string `xml:"CallerReference"` - Comment string `xml:"Config>Comment"` - ResourceCount int `xml:"ResourceRecordSetCount"` -} - -type ChangeInfo struct { - ID string `xml:"Id"` - Status string `xml:"Status"` - SubmittedAt string `xml:"SubmittedAt"` -} - -type DelegationSet struct { - NameServers []string `xml:"NameServers>NameServer"` -} - -func (r *Route53) query(method, path string, req, resp interface{}) error { - params := make(map[string]string) - endpoint, err := url.Parse(r.Region.Route53Endpoint) - if err != nil { - return err - } - endpoint.Path = path - sign(r.Auth, endpoint.Path, params) - - // If they look like url.Values, just encode... - if queryArgs, ok := req.(url.Values); ok { - endpoint.RawQuery = queryArgs.Encode() - req = nil - } - - // Encode the body - var body io.ReadWriter - if req != nil { - bodyBuf := bytes.NewBuffer(nil) - enc := xml.NewEncoder(bodyBuf) - start := xml.StartElement{ - Name: xml.Name{ - Space: "", - Local: reflect.Indirect(reflect.ValueOf(req)).Type().Name(), - }, - Attr: []xml.Attr{{xml.Name{"", "xmlns"}, "https://route53.amazonaws.com/doc/2013-04-01/"}}, - } - if err := enc.EncodeElement(req, start); err != nil { - return err - } - - // This is really a sadness, but can't think of a better way to - // do this for now in Go's constructs. - replace := "" - if strings.Contains(bodyBuf.String(), replace) { - var newBuf bytes.Buffer - newBuf.WriteString(strings.Replace(bodyBuf.String(), replace, "", -1)) - bodyBuf = &newBuf - } - - // http://docs.aws.amazon.com/Route53/latest/APIReference/CreateAliasRRSAPI.html - if reflect.Indirect(reflect.ValueOf(req)).Type().Name() == "ChangeResourceRecordSetsRequest" { - for _, change := range req.(ChangeResourceRecordSetsRequest).Changes { - if change.Record.AliasTarget != nil { - replace := change.Record.Type + "
    0" - var newBuf bytes.Buffer - newBuf.WriteString(strings.Replace(bodyBuf.String(), replace, change.Record.Type+"
    ", -1)) - bodyBuf = &newBuf - } - } - } - - body = bodyBuf - } - - // Make the http request - hReq, err := http.NewRequest(method, endpoint.String(), body) - if err != nil { - return err - } - for k, v := range params { - hReq.Header.Set(k, v) - } - re, err := r.httpClient.Do(hReq) - if err != nil { - return err - } - defer re.Body.Close() - - // Check the status code - switch re.StatusCode { - case 200: - case 201: - default: - var body bytes.Buffer - io.Copy(&body, re.Body) - return fmt.Errorf("Request failed, got status code: %d. Response: %s", - re.StatusCode, body.Bytes()) - } - - // Decode the response - decoder := xml.NewDecoder(re.Body) - return decoder.Decode(resp) -} - -func multimap(p map[string]string) url.Values { - q := make(url.Values, len(p)) - for k, v := range p { - q[k] = []string{v} - } - return q -} - -// CreateHostedZone is used to create a new hosted zone -func (r *Route53) CreateHostedZone(req *CreateHostedZoneRequest) (*CreateHostedZoneResponse, error) { - // Generate a unique caller reference if none provided - if req.CallerReference == "" { - req.CallerReference = time.Now().Format(time.RFC3339Nano) - } - out := &CreateHostedZoneResponse{} - if err := r.query("POST", fmt.Sprintf("/%s/hostedzone", APIVersion), req, out); err != nil { - return nil, err - } - return out, nil -} - -type DeleteHostedZoneResponse struct { - ChangeInfo ChangeInfo `xml:"ChangeInfo"` -} - -func (r *Route53) DeleteHostedZone(ID string) (*DeleteHostedZoneResponse, error) { - // Remove the hostedzone prefix if given - ID = CleanZoneID(ID) - out := &DeleteHostedZoneResponse{} - err := r.query("DELETE", fmt.Sprintf("/%s/hostedzone/%s", APIVersion, ID), nil, out) - if err != nil { - return nil, err - } - return out, err -} - -// CleanZoneID is used to remove the leading /hostedzone/ -func CleanZoneID(ID string) string { - if strings.HasPrefix(ID, "/hostedzone/") { - ID = strings.TrimPrefix(ID, "/hostedzone/") - } - return ID -} - -// CleanChangeID is used to remove the leading /change/ -func CleanChangeID(ID string) string { - if strings.HasPrefix(ID, "/change/") { - ID = strings.TrimPrefix(ID, "/change/") - } - return ID -} - -type GetHostedZoneResponse struct { - HostedZone HostedZone `xml:"HostedZone"` - DelegationSet DelegationSet `xml:"DelegationSet"` -} - -func (r *Route53) GetHostedZone(ID string) (*GetHostedZoneResponse, error) { - // Remove the hostedzone prefix if given - ID = CleanZoneID(ID) - out := &GetHostedZoneResponse{} - err := r.query("GET", fmt.Sprintf("/%s/hostedzone/%s", APIVersion, ID), nil, out) - if err != nil { - return nil, err - } - return out, err -} - -type ListHostedZonesResponse struct { - HostedZones []HostedZone `xml:"HostedZones>HostedZone"` - Marker string `xml:"Marker"` - IsTruncated bool `xml:"IsTruncated"` - NextMarker string `xml:"NextMarker"` - MaxItems int `xml:"MaxItems"` -} - -func (r *Route53) ListHostedZones(marker string, maxItems int) (*ListHostedZonesResponse, error) { - values := url.Values{} - - if marker != "" { - values.Add("marker", marker) - } - - if maxItems != 0 { - values.Add("maxItems", strconv.Itoa(maxItems)) - } - - out := &ListHostedZonesResponse{} - err := r.query("GET", fmt.Sprintf("/%s/hostedzone/", APIVersion), values, out) - if err != nil { - return nil, err - } - return out, err -} - -type GetChangeResponse struct { - ChangeInfo ChangeInfo `xml:"ChangeInfo"` -} - -func (r *Route53) GetChange(ID string) (string, error) { - ID = CleanChangeID(ID) - out := &GetChangeResponse{} - err := r.query("GET", fmt.Sprintf("/%s/change/%s", APIVersion, ID), nil, out) - if err != nil { - return "", err - } - return out.ChangeInfo.Status, err -} - -type ChangeResourceRecordSetsRequest struct { - Comment string `xml:"ChangeBatch>Comment,omitempty"` - Changes []Change `xml:"ChangeBatch>Changes>Change"` -} - -type Change struct { - Action string `xml:"Action"` - Record ResourceRecordSet `xml:"ResourceRecordSet"` -} - -type AliasTarget struct { - HostedZoneId string - DNSName string - EvaluateTargetHealth bool -} - -type ChangeResourceRecordSetsResponse struct { - ChangeInfo ChangeInfo `xml:"ChangeInfo"` -} - -func (r *Route53) ChangeResourceRecordSets(zone string, - req *ChangeResourceRecordSetsRequest) (*ChangeResourceRecordSetsResponse, error) { - // This is really sad, but we have to format this differently - // for Route53 to make them happy. - reqCopy := *req - for i, change := range reqCopy.Changes { - if len(change.Record.Records) > 1 { - var buf bytes.Buffer - for _, r := range change.Record.Records { - buf.WriteString(fmt.Sprintf( - "%s", - r)) - } - - change.Record.Records = nil - change.Record.RecordsXML = fmt.Sprintf( - "%s", buf.String()) - reqCopy.Changes[i] = change - } - } - - zone = CleanZoneID(zone) - out := &ChangeResourceRecordSetsResponse{} - if err := r.query("POST", fmt.Sprintf("/%s/hostedzone/%s/rrset", APIVersion, - zone), reqCopy, out); err != nil { - return nil, err - } - return out, nil -} - -type ListOpts struct { - Name string - Type string - Identifier string - MaxItems int -} - -type ListResourceRecordSetsResponse struct { - Records []ResourceRecordSet `xml:"ResourceRecordSets>ResourceRecordSet"` - IsTruncated bool `xml:"IsTruncated"` - MaxItems int `xml:"MaxItems"` - NextRecordName string `xml:"NextRecordName"` - NextRecordType string `xml:"NextRecordType"` - NextRecordIdentifier string `xml:"NextRecordIdentifier"` -} - -type ResourceRecordSet struct { - Name string `xml:"Name"` - Type string `xml:"Type"` - TTL int `xml:"TTL"` - Records []string `xml:"ResourceRecords>ResourceRecord>Value,omitempty"` - SetIdentifier string `xml:"SetIdentifier,omitempty"` - Weight int `xml:"Weight,omitempty"` - HealthCheckId string `xml:"HealthCheckId,omitempty"` - Region string `xml:"Region,omitempty"` - Failover string `xml:"Failover,omitempty"` - AliasTarget *AliasTarget `xml:"AliasTarget,omitempty"` - - RecordsXML string `xml:",innerxml"` -} - -func (r *Route53) ListResourceRecordSets(zone string, lopts *ListOpts) (*ListResourceRecordSetsResponse, error) { - if lopts == nil { - lopts = &ListOpts{} - } - params := make(map[string]string) - if lopts.Name != "" { - params["name"] = lopts.Name - } - if lopts.Type != "" { - params["type"] = lopts.Type - } - if lopts.Identifier != "" { - params["identifier"] = lopts.Identifier - } - if lopts.MaxItems != 0 { - params["maxitems"] = strconv.FormatInt(int64(lopts.MaxItems), 10) - } - - req := multimap(params) - zone = CleanZoneID(zone) - out := &ListResourceRecordSetsResponse{} - if err := r.query("GET", fmt.Sprintf("/%s/hostedzone/%s/rrset", APIVersion, zone), req, out); err != nil { - return nil, err - } - return out, nil -} - -func FQDN(name string) string { - n := len(name) - if n == 0 || name[n-1] == '.' { - return name - } else { - return name + "." - } -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/route53/route53_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/route53/route53_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/route53/route53_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/route53/route53_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,194 +0,0 @@ -package route53 - -import ( - "bytes" - "encoding/xml" - "fmt" - "io" - "log" - "testing" - - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/testutil" -) - -var testServer *testutil.HTTPServer - -func init() { - testServer = testutil.NewHTTPServer() - testServer.Start() -} - -func makeTestServer() *testutil.HTTPServer { - testServer.Flush() - log.Printf("Flush") - return testServer -} - -func makeClient(server *testutil.HTTPServer) *Route53 { - auth := aws.Auth{"abc", "123", ""} - return NewWithClient(auth, aws.Region{Route53Endpoint: server.URL}, testutil.DefaultClient) -} - -func TestCreateHostedZone(t *testing.T) { - testServer := makeTestServer() - client := makeClient(testServer) - testServer.Response(201, nil, CreateHostedZoneExample) - - req := &CreateHostedZoneRequest{ - Name: "example.com", - Comment: "Testing", - } - - resp, err := client.CreateHostedZone(req) - if err != nil { - t.Fatalf("err: %v", err) - } - - if resp.HostedZone.ID != "/hostedzone/Z1PA6795UKMFR9" { - t.Fatalf("bad: %v", resp) - } - if resp.ChangeInfo.ID != "/change/C1PA6795UKMFR9" { - t.Fatalf("bad: %v", resp) - } - if resp.DelegationSet.NameServers[3] != "ns-2051.awsdns-67.co.uk" { - t.Fatalf("bad: %v", resp) - } - - httpReq := testServer.WaitRequest() - if httpReq.URL.Path != "/2013-04-01/hostedzone" { - t.Fatalf("bad: %#v", httpReq) - } - if httpReq.Method != "POST" { - t.Fatalf("bad: %#v", httpReq) - } - if httpReq.ContentLength == 0 { - t.Fatalf("bad: %#v", httpReq) - } -} - -func TestDeleteHostedZone(t *testing.T) { - testServer := makeTestServer() - client := makeClient(testServer) - testServer.Response(200, nil, DeleteHostedZoneExample) - - resp, err := client.DeleteHostedZone("/hostedzone/foobarbaz") - if err != nil { - t.Fatalf("err: %v", err) - } - - if resp.ChangeInfo.ID != "/change/C1PA6795UKMFR9" { - t.Fatalf("bad: %v", resp) - } -} - -func TestGetHostedZone(t *testing.T) { - testServer := makeTestServer() - client := makeClient(testServer) - testServer.Response(200, nil, GetHostedZoneExample) - - resp, err := client.GetHostedZone("/hostedzone/foobarbaz") - if err != nil { - t.Fatalf("err: %v", err) - } - - if resp.HostedZone.CallerReference != "myUniqueIdentifier" { - t.Fatalf("bad: %v", resp) - } -} - -func TestGetChange(t *testing.T) { - testServer := makeTestServer() - client := makeClient(testServer) - testServer.Response(200, nil, GetChangeExample) - - status, err := client.GetChange("/change/abcd") - if err != nil { - t.Fatalf("err: %v", err) - } - - if status != "INSYNC" { - t.Fatalf("bad: %v", status) - } -} - -func TestChangeResourceRecordSets(t *testing.T) { - testServer := makeTestServer() - client := makeClient(testServer) - testServer.Response(200, nil, ChangeResourceRecordSetsExample) - - req := &ChangeResourceRecordSetsRequest{ - Comment: "Test", - Changes: []Change{ - Change{ - Action: "CREATE", - Record: ResourceRecordSet{ - Name: "foo.hashicorp.com", - Type: "A", - TTL: 300, - Records: []string{"127.0.0.1"}, - }, - }, - }, - } - - resp, err := client.ChangeResourceRecordSets("Z1234", req) - if err != nil { - t.Fatalf("err: %v", err) - } - - if resp.ChangeInfo.ID != "/change/asdf" { - t.Fatalf("bad: %v", resp) - } -} - -func TestListResourceRecordSets(t *testing.T) { - testServer := makeTestServer() - client := makeClient(testServer) - testServer.Response(200, nil, ListResourceRecordSetsExample) - - resp, err := client.ListResourceRecordSets("Z1234", nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if resp.Records[0].Name != "example.com." { - t.Fatalf("bad: %v", resp) - } -} - -func TestListHostedZones(t *testing.T) { - testServer := makeTestServer() - client := makeClient(testServer) - testServer.Response(200, nil, ListHostedZonesExample) - - resp, err := client.ListHostedZones("", 0) - if err != nil { - t.Fatalf("err: %v", err) - } - - if resp.HostedZones[0].Name != "example.com." { - t.Fatalf("bad: %v", resp) - } - - if resp.HostedZones[1].Name != "sub.example.com." { - t.Fatalf("bad: %v", resp) - } -} - -func decode(t *testing.T, r io.Reader, out interface{}) { - var buf1 bytes.Buffer - var buf2 bytes.Buffer - b, err := io.Copy(io.MultiWriter(&buf1, &buf2), r) - if err != nil { - panic(fmt.Errorf("copy failed: %v", err)) - } - if b == 0 { - panic(fmt.Errorf("copy failed: zero bytes")) - } - dec := xml.NewDecoder(&buf1) - if err := dec.Decode(out); err != nil { - t.Errorf("body: %s||", buf2.Bytes()) - panic(fmt.Errorf("decode failed: %v", err)) - } -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/route53/sign.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/route53/sign.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/route53/sign.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/route53/sign.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,29 +0,0 @@ -package route53 - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "fmt" - "time" - - "github.com/mitchellh/goamz/aws" -) - -var b64 = base64.StdEncoding - -func sign(auth aws.Auth, path string, params map[string]string) { - date := time.Now().In(time.UTC).Format(time.RFC1123) - params["Date"] = date - hash := hmac.New(sha256.New, []byte(auth.SecretKey)) - hash.Write([]byte(date)) - signature := make([]byte, b64.EncodedLen(hash.Size())) - b64.Encode(signature, hash.Sum(nil)) - - header := fmt.Sprintf("AWS3-HTTPS AWSAccessKeyId=%s,Algorithm=HmacSHA256,Signature=%s", - auth.AccessKey, signature) - params["X-Amzn-Authorization"] = string(header) - if auth.Token != "" { - params["X-Amz-Security-Token"] = auth.Token - } -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/export_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/export_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/export_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/export_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -package s3 - -import ( - "github.com/mitchellh/goamz/aws" -) - -var originalStrategy = attempts - -func SetAttemptStrategy(s *aws.AttemptStrategy) { - if s == nil { - attempts = originalStrategy - } else { - attempts = *s - } -} - -func Sign(auth aws.Auth, method, path string, params, headers map[string][]string) { - sign(auth, method, path, params, headers) -} - -func SetListPartsMax(n int) { - listPartsMax = n -} - -func SetListMultiMax(n int) { - listMultiMax = n -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/multi.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/multi.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/multi.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/multi.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,409 +0,0 @@ -package s3 - -import ( - "bytes" - "crypto/md5" - "encoding/base64" - "encoding/hex" - "encoding/xml" - "errors" - "io" - "sort" - "strconv" -) - -// Multi represents an unfinished multipart upload. -// -// Multipart uploads allow sending big objects in smaller chunks. -// After all parts have been sent, the upload must be explicitly -// completed by calling Complete with the list of parts. -// -// See http://goo.gl/vJfTG for an overview of multipart uploads. -type Multi struct { - Bucket *Bucket - Key string - UploadId string -} - -// That's the default. Here just for testing. -var listMultiMax = 1000 - -type listMultiResp struct { - NextKeyMarker string - NextUploadIdMarker string - IsTruncated bool - Upload []Multi - CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` -} - -// ListMulti returns the list of unfinished multipart uploads in b. -// -// The prefix parameter limits the response to keys that begin with the -// specified prefix. You can use prefixes to separate a bucket into different -// groupings of keys (to get the feeling of folders, for example). -// -// The delim parameter causes the response to group all of the keys that -// share a common prefix up to the next delimiter in a single entry within -// the CommonPrefixes field. You can use delimiters to separate a bucket -// into different groupings of keys, similar to how folders would work. -// -// See http://goo.gl/ePioY for details. -func (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) { - params := map[string][]string{ - "uploads": {""}, - "max-uploads": {strconv.FormatInt(int64(listMultiMax), 10)}, - "prefix": {prefix}, - "delimiter": {delim}, - } - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "GET", - bucket: b.Name, - params: params, - } - var resp listMultiResp - err := b.S3.query(req, &resp) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, nil, err - } - for i := range resp.Upload { - multi := &resp.Upload[i] - multi.Bucket = b - multis = append(multis, multi) - } - prefixes = append(prefixes, resp.CommonPrefixes...) - if !resp.IsTruncated { - return multis, prefixes, nil - } - params["key-marker"] = []string{resp.NextKeyMarker} - params["upload-id-marker"] = []string{resp.NextUploadIdMarker} - attempt = attempts.Start() // Last request worked. - } - panic("unreachable") -} - -// Multi returns a multipart upload handler for the provided key -// inside b. If a multipart upload exists for key, it is returned, -// otherwise a new multipart upload is initiated with contType and perm. -func (b *Bucket) Multi(key, contType string, perm ACL) (*Multi, error) { - multis, _, err := b.ListMulti(key, "") - if err != nil && !hasCode(err, "NoSuchUpload") { - return nil, err - } - for _, m := range multis { - if m.Key == key { - return m, nil - } - } - return b.InitMulti(key, contType, perm) -} - -// InitMulti initializes a new multipart upload at the provided -// key inside b and returns a value for manipulating it. -// -// See http://goo.gl/XP8kL for details. -func (b *Bucket) InitMulti(key string, contType string, perm ACL) (*Multi, error) { - headers := map[string][]string{ - "Content-Type": {contType}, - "Content-Length": {"0"}, - "x-amz-acl": {string(perm)}, - } - params := map[string][]string{ - "uploads": {""}, - } - req := &request{ - method: "POST", - bucket: b.Name, - path: key, - headers: headers, - params: params, - } - var err error - var resp struct { - UploadId string `xml:"UploadId"` - } - for attempt := attempts.Start(); attempt.Next(); { - err = b.S3.query(req, &resp) - if !shouldRetry(err) { - break - } - } - if err != nil { - return nil, err - } - return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil -} - -// PutPart sends part n of the multipart upload, reading all the content from r. -// Each part, except for the last one, must be at least 5MB in size. -// -// See http://goo.gl/pqZer for details. -func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) { - partSize, _, md5b64, err := seekerInfo(r) - if err != nil { - return Part{}, err - } - return m.putPart(n, r, partSize, md5b64) -} - -func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) (Part, error) { - headers := map[string][]string{ - "Content-Length": {strconv.FormatInt(partSize, 10)}, - "Content-MD5": {md5b64}, - } - params := map[string][]string{ - "uploadId": {m.UploadId}, - "partNumber": {strconv.FormatInt(int64(n), 10)}, - } - for attempt := attempts.Start(); attempt.Next(); { - _, err := r.Seek(0, 0) - if err != nil { - return Part{}, err - } - req := &request{ - method: "PUT", - bucket: m.Bucket.Name, - path: m.Key, - headers: headers, - params: params, - payload: r, - } - err = m.Bucket.S3.prepare(req) - if err != nil { - return Part{}, err - } - resp, err := m.Bucket.S3.run(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return Part{}, err - } - etag := resp.Header.Get("ETag") - if etag == "" { - return Part{}, errors.New("part upload succeeded with no ETag") - } - return Part{n, etag, partSize}, nil - } - panic("unreachable") -} - -func seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) { - _, err = r.Seek(0, 0) - if err != nil { - return 0, "", "", err - } - digest := md5.New() - size, err = io.Copy(digest, r) - if err != nil { - return 0, "", "", err - } - sum := digest.Sum(nil) - md5hex = hex.EncodeToString(sum) - md5b64 = base64.StdEncoding.EncodeToString(sum) - return size, md5hex, md5b64, nil -} - -type Part struct { - N int `xml:"PartNumber"` - ETag string - Size int64 -} - -type partSlice []Part - -func (s partSlice) Len() int { return len(s) } -func (s partSlice) Less(i, j int) bool { return s[i].N < s[j].N } -func (s partSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -type listPartsResp struct { - NextPartNumberMarker string - IsTruncated bool - Part []Part -} - -// That's the default. Here just for testing. -var listPartsMax = 1000 - -// ListParts returns the list of previously uploaded parts in m, -// ordered by part number. -// -// See http://goo.gl/ePioY for details. -func (m *Multi) ListParts() ([]Part, error) { - params := map[string][]string{ - "uploadId": {m.UploadId}, - "max-parts": {strconv.FormatInt(int64(listPartsMax), 10)}, - } - var parts partSlice - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "GET", - bucket: m.Bucket.Name, - path: m.Key, - params: params, - } - var resp listPartsResp - err := m.Bucket.S3.query(req, &resp) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, err - } - parts = append(parts, resp.Part...) - if !resp.IsTruncated { - sort.Sort(parts) - return parts, nil - } - params["part-number-marker"] = []string{resp.NextPartNumberMarker} - attempt = attempts.Start() // Last request worked. - } - panic("unreachable") -} - -type ReaderAtSeeker interface { - io.ReaderAt - io.ReadSeeker -} - -// PutAll sends all of r via a multipart upload with parts no larger -// than partSize bytes, which must be set to at least 5MB. -// Parts previously uploaded are either reused if their checksum -// and size match the new part, or otherwise overwritten with the -// new content. -// PutAll returns all the parts of m (reused or not). -func (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) { - old, err := m.ListParts() - if err != nil && !hasCode(err, "NoSuchUpload") { - return nil, err - } - reuse := 0 // Index of next old part to consider reusing. - current := 1 // Part number of latest good part handled. - totalSize, err := r.Seek(0, 2) - if err != nil { - return nil, err - } - first := true // Must send at least one empty part if the file is empty. - var result []Part -NextSection: - for offset := int64(0); offset < totalSize || first; offset += partSize { - first = false - if offset+partSize > totalSize { - partSize = totalSize - offset - } - section := io.NewSectionReader(r, offset, partSize) - _, md5hex, md5b64, err := seekerInfo(section) - if err != nil { - return nil, err - } - for reuse < len(old) && old[reuse].N <= current { - // Looks like this part was already sent. - part := &old[reuse] - etag := `"` + md5hex + `"` - if part.N == current && part.Size == partSize && part.ETag == etag { - // Checksum matches. Reuse the old part. - result = append(result, *part) - current++ - continue NextSection - } - reuse++ - } - - // Part wasn't found or doesn't match. Send it. - part, err := m.putPart(current, section, partSize, md5b64) - if err != nil { - return nil, err - } - result = append(result, part) - current++ - } - return result, nil -} - -type completeUpload struct { - XMLName xml.Name `xml:"CompleteMultipartUpload"` - Parts completeParts `xml:"Part"` -} - -type completePart struct { - PartNumber int - ETag string -} - -type completeParts []completePart - -func (p completeParts) Len() int { return len(p) } -func (p completeParts) Less(i, j int) bool { return p[i].PartNumber < p[j].PartNumber } -func (p completeParts) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// Complete assembles the given previously uploaded parts into the -// final object. This operation may take several minutes. -// -// See http://goo.gl/2Z7Tw for details. -func (m *Multi) Complete(parts []Part) error { - params := map[string][]string{ - "uploadId": {m.UploadId}, - } - c := completeUpload{} - for _, p := range parts { - c.Parts = append(c.Parts, completePart{p.N, p.ETag}) - } - sort.Sort(c.Parts) - data, err := xml.Marshal(&c) - if err != nil { - return err - } - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "POST", - bucket: m.Bucket.Name, - path: m.Key, - params: params, - payload: bytes.NewReader(data), - } - err := m.Bucket.S3.query(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - return err - } - panic("unreachable") -} - -// Abort deletes an unifinished multipart upload and any previously -// uploaded parts for it. -// -// After a multipart upload is aborted, no additional parts can be -// uploaded using it. However, if any part uploads are currently in -// progress, those part uploads might or might not succeed. As a result, -// it might be necessary to abort a given multipart upload multiple -// times in order to completely free all storage consumed by all parts. -// -// NOTE: If the described scenario happens to you, please report back to -// the goamz authors with details. In the future such retrying should be -// handled internally, but it's not clear what happens precisely (Is an -// error returned? Is the issue completely undetectable?). -// -// See http://goo.gl/dnyJw for details. -func (m *Multi) Abort() error { - params := map[string][]string{ - "uploadId": {m.UploadId}, - } - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "DELETE", - bucket: m.Bucket.Name, - path: m.Key, - params: params, - } - err := m.Bucket.S3.query(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - return err - } - panic("unreachable") -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/multi_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/multi_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/multi_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/multi_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,370 +0,0 @@ -package s3_test - -import ( - "encoding/xml" - "github.com/mitchellh/goamz/s3" - . "github.com/motain/gocheck" - "io" - "io/ioutil" - "strings" -) - -func (s *S) TestInitMulti(c *C) { - testServer.Response(200, nil, InitMultiResultDump) - - b := s.s3.Bucket("sample") - - multi, err := b.InitMulti("multi", "text/plain", s3.Private) - c.Assert(err, IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "POST") - c.Assert(req.URL.Path, Equals, "/sample/multi") - c.Assert(req.Header["Content-Type"], DeepEquals, []string{"text/plain"}) - c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"}) - c.Assert(req.Form["uploads"], DeepEquals, []string{""}) - - c.Assert(multi.UploadId, Matches, "JNbR_[A-Za-z0-9.]+QQ--") -} - -func (s *S) TestMultiNoPreviousUpload(c *C) { - // Don't retry the NoSuchUpload error. - s.DisableRetries() - - testServer.Response(404, nil, NoSuchUploadErrorDump) - testServer.Response(200, nil, InitMultiResultDump) - - b := s.s3.Bucket("sample") - - multi, err := b.Multi("multi", "text/plain", s3.Private) - c.Assert(err, IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/sample/") - c.Assert(req.Form["uploads"], DeepEquals, []string{""}) - c.Assert(req.Form["prefix"], DeepEquals, []string{"multi"}) - - req = testServer.WaitRequest() - c.Assert(req.Method, Equals, "POST") - c.Assert(req.URL.Path, Equals, "/sample/multi") - c.Assert(req.Form["uploads"], DeepEquals, []string{""}) - - c.Assert(multi.UploadId, Matches, "JNbR_[A-Za-z0-9.]+QQ--") -} - -func (s *S) TestMultiReturnOld(c *C) { - testServer.Response(200, nil, ListMultiResultDump) - - b := s.s3.Bucket("sample") - - multi, err := b.Multi("multi1", "text/plain", s3.Private) - c.Assert(err, IsNil) - c.Assert(multi.Key, Equals, "multi1") - c.Assert(multi.UploadId, Equals, "iUVug89pPvSswrikD") - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/sample/") - c.Assert(req.Form["uploads"], DeepEquals, []string{""}) - c.Assert(req.Form["prefix"], DeepEquals, []string{"multi1"}) -} - -func (s *S) TestListParts(c *C) { - testServer.Response(200, nil, InitMultiResultDump) - testServer.Response(200, nil, ListPartsResultDump1) - testServer.Response(404, nil, NoSuchUploadErrorDump) // :-( - testServer.Response(200, nil, ListPartsResultDump2) - - b := s.s3.Bucket("sample") - - multi, err := b.InitMulti("multi", "text/plain", s3.Private) - c.Assert(err, IsNil) - - parts, err := multi.ListParts() - c.Assert(err, IsNil) - c.Assert(parts, HasLen, 3) - c.Assert(parts[0].N, Equals, 1) - c.Assert(parts[0].Size, Equals, int64(5)) - c.Assert(parts[0].ETag, Equals, `"ffc88b4ca90a355f8ddba6b2c3b2af5c"`) - c.Assert(parts[1].N, Equals, 2) - c.Assert(parts[1].Size, Equals, int64(5)) - c.Assert(parts[1].ETag, Equals, `"d067a0fa9dc61a6e7195ca99696b5a89"`) - c.Assert(parts[2].N, Equals, 3) - c.Assert(parts[2].Size, Equals, int64(5)) - c.Assert(parts[2].ETag, Equals, `"49dcd91231f801159e893fb5c6674985"`) - testServer.WaitRequest() - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/sample/multi") - c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--") - c.Assert(req.Form["max-parts"], DeepEquals, []string{"1000"}) - - testServer.WaitRequest() // The internal error. - req = testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/sample/multi") - c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--") - c.Assert(req.Form["max-parts"], DeepEquals, []string{"1000"}) - c.Assert(req.Form["part-number-marker"], DeepEquals, []string{"2"}) -} - -func (s *S) TestPutPart(c *C) { - headers := map[string]string{ - "ETag": `"26f90efd10d614f100252ff56d88dad8"`, - } - testServer.Response(200, nil, InitMultiResultDump) - testServer.Response(200, headers, "") - - b := s.s3.Bucket("sample") - - multi, err := b.InitMulti("multi", "text/plain", s3.Private) - c.Assert(err, IsNil) - - part, err := multi.PutPart(1, strings.NewReader("")) - c.Assert(err, IsNil) - c.Assert(part.N, Equals, 1) - c.Assert(part.Size, Equals, int64(8)) - c.Assert(part.ETag, Equals, headers["ETag"]) - - testServer.WaitRequest() - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "PUT") - c.Assert(req.URL.Path, Equals, "/sample/multi") - c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--") - c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"}) - c.Assert(req.Header["Content-Length"], DeepEquals, []string{"8"}) - c.Assert(req.Header["Content-Md5"], DeepEquals, []string{"JvkO/RDWFPEAJS/1bYja2A=="}) -} - -func readAll(r io.Reader) string { - data, err := ioutil.ReadAll(r) - if err != nil { - panic(err) - } - return string(data) -} - -func (s *S) TestPutAllNoPreviousUpload(c *C) { - // Don't retry the NoSuchUpload error. - s.DisableRetries() - - etag1 := map[string]string{"ETag": `"etag1"`} - etag2 := map[string]string{"ETag": `"etag2"`} - etag3 := map[string]string{"ETag": `"etag3"`} - testServer.Response(200, nil, InitMultiResultDump) - testServer.Response(404, nil, NoSuchUploadErrorDump) - testServer.Response(200, etag1, "") - testServer.Response(200, etag2, "") - testServer.Response(200, etag3, "") - - b := s.s3.Bucket("sample") - - multi, err := b.InitMulti("multi", "text/plain", s3.Private) - c.Assert(err, IsNil) - - parts, err := multi.PutAll(strings.NewReader("part1part2last"), 5) - c.Assert(parts, HasLen, 3) - c.Assert(parts[0].ETag, Equals, `"etag1"`) - c.Assert(parts[1].ETag, Equals, `"etag2"`) - c.Assert(parts[2].ETag, Equals, `"etag3"`) - c.Assert(err, IsNil) - - // Init - testServer.WaitRequest() - - // List old parts. Won't find anything. - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/sample/multi") - - // Send part 1. - req = testServer.WaitRequest() - c.Assert(req.Method, Equals, "PUT") - c.Assert(req.URL.Path, Equals, "/sample/multi") - c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"}) - c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"}) - c.Assert(readAll(req.Body), Equals, "part1") - - // Send part 2. - req = testServer.WaitRequest() - c.Assert(req.Method, Equals, "PUT") - c.Assert(req.URL.Path, Equals, "/sample/multi") - c.Assert(req.Form["partNumber"], DeepEquals, []string{"2"}) - c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"}) - c.Assert(readAll(req.Body), Equals, "part2") - - // Send part 3 with shorter body. - req = testServer.WaitRequest() - c.Assert(req.Method, Equals, "PUT") - c.Assert(req.URL.Path, Equals, "/sample/multi") - c.Assert(req.Form["partNumber"], DeepEquals, []string{"3"}) - c.Assert(req.Header["Content-Length"], DeepEquals, []string{"4"}) - c.Assert(readAll(req.Body), Equals, "last") -} - -func (s *S) TestPutAllZeroSizeFile(c *C) { - // Don't retry the NoSuchUpload error. - s.DisableRetries() - - etag1 := map[string]string{"ETag": `"etag1"`} - testServer.Response(200, nil, InitMultiResultDump) - testServer.Response(404, nil, NoSuchUploadErrorDump) - testServer.Response(200, etag1, "") - - b := s.s3.Bucket("sample") - - multi, err := b.InitMulti("multi", "text/plain", s3.Private) - c.Assert(err, IsNil) - - // Must send at least one part, so that completing it will work. - parts, err := multi.PutAll(strings.NewReader(""), 5) - c.Assert(parts, HasLen, 1) - c.Assert(parts[0].ETag, Equals, `"etag1"`) - c.Assert(err, IsNil) - - // Init - testServer.WaitRequest() - - // List old parts. Won't find anything. - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/sample/multi") - - // Send empty part. - req = testServer.WaitRequest() - c.Assert(req.Method, Equals, "PUT") - c.Assert(req.URL.Path, Equals, "/sample/multi") - c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"}) - c.Assert(req.Header["Content-Length"], DeepEquals, []string{"0"}) - c.Assert(readAll(req.Body), Equals, "") -} - -func (s *S) TestPutAllResume(c *C) { - etag2 := map[string]string{"ETag": `"etag2"`} - testServer.Response(200, nil, InitMultiResultDump) - testServer.Response(200, nil, ListPartsResultDump1) - testServer.Response(200, nil, ListPartsResultDump2) - testServer.Response(200, etag2, "") - - b := s.s3.Bucket("sample") - - multi, err := b.InitMulti("multi", "text/plain", s3.Private) - c.Assert(err, IsNil) - - // "part1" and "part3" match the checksums in ResultDump1. - // The middle one is a mismatch (it refers to "part2"). - parts, err := multi.PutAll(strings.NewReader("part1partXpart3"), 5) - c.Assert(parts, HasLen, 3) - c.Assert(parts[0].N, Equals, 1) - c.Assert(parts[0].Size, Equals, int64(5)) - c.Assert(parts[0].ETag, Equals, `"ffc88b4ca90a355f8ddba6b2c3b2af5c"`) - c.Assert(parts[1].N, Equals, 2) - c.Assert(parts[1].Size, Equals, int64(5)) - c.Assert(parts[1].ETag, Equals, `"etag2"`) - c.Assert(parts[2].N, Equals, 3) - c.Assert(parts[2].Size, Equals, int64(5)) - c.Assert(parts[2].ETag, Equals, `"49dcd91231f801159e893fb5c6674985"`) - c.Assert(err, IsNil) - - // Init - testServer.WaitRequest() - - // List old parts, broken in two requests. - for i := 0; i < 2; i++ { - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/sample/multi") - } - - // Send part 2, as it didn't match the checksum. - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "PUT") - c.Assert(req.URL.Path, Equals, "/sample/multi") - c.Assert(req.Form["partNumber"], DeepEquals, []string{"2"}) - c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"}) - c.Assert(readAll(req.Body), Equals, "partX") -} - -func (s *S) TestMultiComplete(c *C) { - testServer.Response(200, nil, InitMultiResultDump) - // Note the 200 response. Completing will hold the connection on some - // kind of long poll, and may return a late error even after a 200. - testServer.Response(200, nil, InternalErrorDump) - testServer.Response(200, nil, "") - - b := s.s3.Bucket("sample") - - multi, err := b.InitMulti("multi", "text/plain", s3.Private) - c.Assert(err, IsNil) - - err = multi.Complete([]s3.Part{{2, `"ETag2"`, 32}, {1, `"ETag1"`, 64}}) - c.Assert(err, IsNil) - - testServer.WaitRequest() - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "POST") - c.Assert(req.URL.Path, Equals, "/sample/multi") - c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--") - - var payload struct { - XMLName xml.Name - Part []struct { - PartNumber int - ETag string - } - } - - dec := xml.NewDecoder(req.Body) - err = dec.Decode(&payload) - c.Assert(err, IsNil) - - c.Assert(payload.XMLName.Local, Equals, "CompleteMultipartUpload") - c.Assert(len(payload.Part), Equals, 2) - c.Assert(payload.Part[0].PartNumber, Equals, 1) - c.Assert(payload.Part[0].ETag, Equals, `"ETag1"`) - c.Assert(payload.Part[1].PartNumber, Equals, 2) - c.Assert(payload.Part[1].ETag, Equals, `"ETag2"`) -} - -func (s *S) TestMultiAbort(c *C) { - testServer.Response(200, nil, InitMultiResultDump) - testServer.Response(200, nil, "") - - b := s.s3.Bucket("sample") - - multi, err := b.InitMulti("multi", "text/plain", s3.Private) - c.Assert(err, IsNil) - - err = multi.Abort() - c.Assert(err, IsNil) - - testServer.WaitRequest() - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "DELETE") - c.Assert(req.URL.Path, Equals, "/sample/multi") - c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--") -} - -func (s *S) TestListMulti(c *C) { - testServer.Response(200, nil, ListMultiResultDump) - - b := s.s3.Bucket("sample") - - multis, prefixes, err := b.ListMulti("", "/") - c.Assert(err, IsNil) - c.Assert(prefixes, DeepEquals, []string{"a/", "b/"}) - c.Assert(multis, HasLen, 2) - c.Assert(multis[0].Key, Equals, "multi1") - c.Assert(multis[0].UploadId, Equals, "iUVug89pPvSswrikD") - c.Assert(multis[1].Key, Equals, "multi2") - c.Assert(multis[1].UploadId, Equals, "DkirwsSvPp98guVUi") - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/sample/") - c.Assert(req.Form["uploads"], DeepEquals, []string{""}) - c.Assert(req.Form["prefix"], DeepEquals, []string{""}) - c.Assert(req.Form["delimiter"], DeepEquals, []string{"/"}) - c.Assert(req.Form["max-uploads"], DeepEquals, []string{"1000"}) -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/responses_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/responses_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/responses_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/responses_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,241 +0,0 @@ -package s3_test - -var GetObjectErrorDump = ` - -NoSuchBucketThe specified bucket does not exist -non-existent-bucket3F1B667FAD71C3D8 -L4ee/zrm1irFXY5F45fKXIRdOf9ktsKY/8TDVawuMK2jWRb1RF84i1uBzkdNqS5D -` - -var GetListResultDump1 = ` - - - quotes - N - false - - Nelson - 2006-01-01T12:00:00.000Z - "828ef3fdfa96f00ad9f27c383fc9ac7f" - 5 - STANDARD - - bcaf161ca5fb16fd081034f - webfile - - - - Neo - 2006-01-01T12:00:00.000Z - "828ef3fdfa96f00ad9f27c383fc9ac7f" - 4 - STANDARD - - bcaf1ffd86a5fb16fd081034f - webfile - - - -` - -var GetListResultDump2 = ` - - example-bucket - photos/2006/ - some-marker - 1000 - / - false - - - photos/2006/feb/ - - - photos/2006/jan/ - - -` - -var InitMultiResultDump = ` - - - sample - multi - JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ-- - -` - -var ListPartsResultDump1 = ` - - - sample - multi - JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ-- - - bb5c0f63b0b25f2d099c - joe - - - bb5c0f63b0b25f2d099c - joe - - STANDARD - 0 - 2 - 2 - true - - 1 - 2013-01-30T13:45:51.000Z - "ffc88b4ca90a355f8ddba6b2c3b2af5c" - 5 - - - 2 - 2013-01-30T13:45:52.000Z - "d067a0fa9dc61a6e7195ca99696b5a89" - 5 - - -` - -var ListPartsResultDump2 = ` - - - sample - multi - JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ-- - - bb5c0f63b0b25f2d099c - joe - - - bb5c0f63b0b25f2d099c - joe - - STANDARD - 2 - 3 - 2 - false - - 3 - 2013-01-30T13:46:50.000Z - "49dcd91231f801159e893fb5c6674985" - 5 - - -` - -var ListMultiResultDump = ` - - - goamz-test-bucket-us-east-1-akiajk3wyewhctyqbf7a - - - multi1 - iUVug89pPvSswrikD72p8uO62EzhNtpDxRmwC5WSiWDdK9SfzmDqe3xpP1kMWimyimSnz4uzFc3waVM5ufrKYQ-- - / - 1000 - false - - multi1 - iUVug89pPvSswrikD - - bb5c0f63b0b25f2d0 - gustavoniemeyer - - - bb5c0f63b0b25f2d0 - gustavoniemeyer - - STANDARD - 2013-01-30T18:15:47.000Z - - - multi2 - DkirwsSvPp98guVUi - - bb5c0f63b0b25f2d0 - joe - - - bb5c0f63b0b25f2d0 - joe - - STANDARD - 2013-01-30T18:15:47.000Z - - - a/ - - - b/ - - -` - -var NoSuchUploadErrorDump = ` - - - NoSuchUpload - Not relevant - sample - 3F1B667FAD71C3D8 - kjhwqk - -` - -var InternalErrorDump = ` - - - InternalError - Not relevant - sample - 3F1B667FAD71C3D8 - kjhwqk - -` - -var GetKeyHeaderDump = map[string]string{ - "x-amz-id-2": "ef8yU9AS1ed4OpIszj7UDNEHGran", - "x-amz-request-id": "318BC8BC143432E5", - "x-amz-version-id": "3HL4kqtJlcpXroDTDmjVBH40Nrjfkd", - "Date": "Wed, 28 Oct 2009 22:32:00 GMT", - "Last-Modified": "Sun, 1 Jan 2006 12:00:00 GMT", - "ETag": "fba9dede5f27731c9771645a39863328", - "Content-Length": "434234", - "Content-Type": "text/plain", -} - -var GetListBucketsDump = ` - - - - bb5c0f63b0b25f2d0 - joe - - - - bucket1 - 2012-01-01T02:03:04.000Z - - - bucket2 - 2014-01-11T02:03:04.000Z - - - -` - -var MultiDelDump = ` - - - - a.go - - - b.go - - -` diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/s3.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/s3.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/s3.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/s3.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,893 +0,0 @@ -// -// goamz - Go packages to interact with the Amazon Web Services. -// -// https://wiki.ubuntu.com/goamz -// -// Copyright (c) 2011 Canonical Ltd. -// -// Written by Gustavo Niemeyer -// - -package s3 - -import ( - "bytes" - "crypto/md5" - "encoding/base64" - "encoding/xml" - "fmt" - "github.com/mitchellh/goamz/aws" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "net/http/httputil" - "net/url" - "strconv" - "strings" - "time" -) - -const debug = false - -// The S3 type encapsulates operations with an S3 region. -type S3 struct { - aws.Auth - aws.Region - HTTPClient func() *http.Client - - private byte // Reserve the right of using private data. -} - -// The Bucket type encapsulates operations with an S3 bucket. -type Bucket struct { - *S3 - Name string -} - -// The Owner type represents the owner of the object in an S3 bucket. -type Owner struct { - ID string - DisplayName string -} - -var attempts = aws.AttemptStrategy{ - Min: 5, - Total: 5 * time.Second, - Delay: 200 * time.Millisecond, -} - -// New creates a new S3. -func New(auth aws.Auth, region aws.Region) *S3 { - return &S3{ - Auth: auth, - Region: region, - HTTPClient: func() *http.Client { - return http.DefaultClient - }, - private: 0} -} - -// Bucket returns a Bucket with the given name. -func (s3 *S3) Bucket(name string) *Bucket { - if s3.Region.S3BucketEndpoint != "" || s3.Region.S3LowercaseBucket { - name = strings.ToLower(name) - } - return &Bucket{s3, name} -} - -var createBucketConfiguration = ` - %s -` - -// locationConstraint returns an io.Reader specifying a LocationConstraint if -// required for the region. -// -// See http://goo.gl/bh9Kq for details. -func (s3 *S3) locationConstraint() io.Reader { - constraint := "" - if s3.Region.S3LocationConstraint { - constraint = fmt.Sprintf(createBucketConfiguration, s3.Region.Name) - } - return strings.NewReader(constraint) -} - -type ACL string - -const ( - Private = ACL("private") - PublicRead = ACL("public-read") - PublicReadWrite = ACL("public-read-write") - AuthenticatedRead = ACL("authenticated-read") - BucketOwnerRead = ACL("bucket-owner-read") - BucketOwnerFull = ACL("bucket-owner-full-control") -) - -// The ListBucketsResp type holds the results of a List buckets operation. -type ListBucketsResp struct { - Buckets []Bucket `xml:">Bucket"` -} - -// ListBuckets lists all buckets -// -// See: http://goo.gl/NqlyMN -func (s3 *S3) ListBuckets() (result *ListBucketsResp, err error) { - req := &request{ - path: "/", - } - result = &ListBucketsResp{} - for attempt := attempts.Start(); attempt.Next(); { - err = s3.query(req, result) - if !shouldRetry(err) { - break - } - } - if err != nil { - return nil, err - } - // set S3 instance on buckets - for i := range result.Buckets { - result.Buckets[i].S3 = s3 - } - return result, nil -} - -// PutBucket creates a new bucket. -// -// See http://goo.gl/ndjnR for details. -func (b *Bucket) PutBucket(perm ACL) error { - headers := map[string][]string{ - "x-amz-acl": {string(perm)}, - } - req := &request{ - method: "PUT", - bucket: b.Name, - path: "/", - headers: headers, - payload: b.locationConstraint(), - } - return b.S3.query(req, nil) -} - -// DelBucket removes an existing S3 bucket. All objects in the bucket must -// be removed before the bucket itself can be removed. -// -// See http://goo.gl/GoBrY for details. -func (b *Bucket) DelBucket() (err error) { - req := &request{ - method: "DELETE", - bucket: b.Name, - path: "/", - } - for attempt := attempts.Start(); attempt.Next(); { - err = b.S3.query(req, nil) - if !shouldRetry(err) { - break - } - } - return err -} - -// Get retrieves an object from an S3 bucket. -// -// See http://goo.gl/isCO7 for details. -func (b *Bucket) Get(path string) (data []byte, err error) { - body, err := b.GetReader(path) - if err != nil { - return nil, err - } - data, err = ioutil.ReadAll(body) - body.Close() - return data, err -} - -// GetReader retrieves an object from an S3 bucket. -// It is the caller's responsibility to call Close on rc when -// finished reading. -func (b *Bucket) GetReader(path string) (rc io.ReadCloser, err error) { - resp, err := b.GetResponse(path) - if resp != nil { - return resp.Body, err - } - return nil, err -} - -// GetResponse retrieves an object from an S3 bucket returning the http response -// It is the caller's responsibility to call Close on rc when -// finished reading. -func (b *Bucket) GetResponse(path string) (*http.Response, error) { - return b.getResponseParams(path, nil) -} - -// GetTorrent retrieves an Torrent object from an S3 bucket an io.ReadCloser. -// It is the caller's responsibility to call Close on rc when finished reading. -func (b *Bucket) GetTorrentReader(path string) (io.ReadCloser, error) { - resp, err := b.getResponseParams(path, url.Values{"torrent": {""}}) - if err != nil { - return nil, err - } - return resp.Body, nil -} - -// GetTorrent retrieves an Torrent object from an S3, returning -// the torrent as a []byte. -func (b *Bucket) GetTorrent(path string) ([]byte, error) { - body, err := b.GetTorrentReader(path) - if err != nil { - return nil, err - } - defer body.Close() - - return ioutil.ReadAll(body) -} - -func (b *Bucket) getResponseParams(path string, params url.Values) (*http.Response, error) { - req := &request{ - bucket: b.Name, - path: path, - params: params, - } - err := b.S3.prepare(req) - if err != nil { - return nil, err - } - for attempt := attempts.Start(); attempt.Next(); { - resp, err := b.S3.run(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, err - } - return resp, nil - } - panic("unreachable") -} - -func (b *Bucket) Head(path string) (*http.Response, error) { - req := &request{ - method: "HEAD", - bucket: b.Name, - path: path, - } - err := b.S3.prepare(req) - if err != nil { - return nil, err - } - for attempt := attempts.Start(); attempt.Next(); { - resp, err := b.S3.run(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, err - } - return resp, nil - } - panic("unreachable") -} - -// Put inserts an object into the S3 bucket. -// -// See http://goo.gl/FEBPD for details. -func (b *Bucket) Put(path string, data []byte, contType string, perm ACL) error { - body := bytes.NewBuffer(data) - return b.PutReader(path, body, int64(len(data)), contType, perm) -} - -/* -PutHeader - like Put, inserts an object into the S3 bucket. -Instead of Content-Type string, pass in custom headers to override defaults. -*/ -func (b *Bucket) PutHeader(path string, data []byte, customHeaders map[string][]string, perm ACL) error { - body := bytes.NewBuffer(data) - return b.PutReaderHeader(path, body, int64(len(data)), customHeaders, perm) -} - -// PutReader inserts an object into the S3 bucket by consuming data -// from r until EOF. -func (b *Bucket) PutReader(path string, r io.Reader, length int64, contType string, perm ACL) error { - headers := map[string][]string{ - "Content-Length": {strconv.FormatInt(length, 10)}, - "Content-Type": {contType}, - "x-amz-acl": {string(perm)}, - } - req := &request{ - method: "PUT", - bucket: b.Name, - path: path, - headers: headers, - payload: r, - } - return b.S3.query(req, nil) -} - -/* -PutReaderHeader - like PutReader, inserts an object into S3 from a reader. -Instead of Content-Type string, pass in custom headers to override defaults. -*/ -func (b *Bucket) PutReaderHeader(path string, r io.Reader, length int64, customHeaders map[string][]string, perm ACL) error { - // Default headers - headers := map[string][]string{ - "Content-Length": {strconv.FormatInt(length, 10)}, - "Content-Type": {"application/text"}, - "x-amz-acl": {string(perm)}, - } - - // Override with custom headers - for key, value := range customHeaders { - headers[key] = value - } - - req := &request{ - method: "PUT", - bucket: b.Name, - path: path, - headers: headers, - payload: r, - } - return b.S3.query(req, nil) -} - -/* -Copy - copy objects inside bucket -*/ -func (b *Bucket) Copy(oldPath, newPath string, perm ACL) error { - if !strings.HasPrefix(oldPath, "/") { - oldPath = "/" + oldPath - } - - req := &request{ - method: "PUT", - bucket: b.Name, - path: newPath, - headers: map[string][]string{ - "x-amz-copy-source": {amazonEscape("/" + b.Name + oldPath)}, - "x-amz-acl": {string(perm)}, - }, - } - - err := b.S3.prepare(req) - if err != nil { - return err - } - - for attempt := attempts.Start(); attempt.Next(); { - _, err = b.S3.run(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return err - } - return nil - } - panic("unreachable") -} - -// Del removes an object from the S3 bucket. -// -// See http://goo.gl/APeTt for details. -func (b *Bucket) Del(path string) error { - req := &request{ - method: "DELETE", - bucket: b.Name, - path: path, - } - return b.S3.query(req, nil) -} - -type Object struct { - Key string -} - -type MultiObjectDeleteBody struct { - XMLName xml.Name `xml:"Delete"` - Quiet bool - Object []Object -} - -func base64md5(data []byte) string { - h := md5.New() - h.Write(data) - return base64.StdEncoding.EncodeToString(h.Sum(nil)) -} - -// MultiDel removes multiple objects from the S3 bucket efficiently. -// A maximum of 1000 keys at once may be specified. -// -// See http://goo.gl/WvA5sj for details. -func (b *Bucket) MultiDel(paths []string) error { - // create XML payload - v := MultiObjectDeleteBody{} - v.Object = make([]Object, len(paths)) - for i, path := range paths { - v.Object[i] = Object{path} - } - data, _ := xml.Marshal(v) - - // Content-MD5 is required - md5hash := base64md5(data) - req := &request{ - method: "POST", - bucket: b.Name, - path: "/", - params: url.Values{"delete": {""}}, - headers: http.Header{"Content-MD5": {md5hash}}, - payload: bytes.NewReader(data), - } - - return b.S3.query(req, nil) -} - -// The ListResp type holds the results of a List bucket operation. -type ListResp struct { - Name string - Prefix string - Delimiter string - Marker string - NextMarker string - MaxKeys int - // IsTruncated is true if the results have been truncated because - // there are more keys and prefixes than can fit in MaxKeys. - // N.B. this is the opposite sense to that documented (incorrectly) in - // http://goo.gl/YjQTc - IsTruncated bool - Contents []Key - CommonPrefixes []string `xml:">Prefix"` -} - -// The Key type represents an item stored in an S3 bucket. -type Key struct { - Key string - LastModified string - Size int64 - // ETag gives the hex-encoded MD5 sum of the contents, - // surrounded with double-quotes. - ETag string - StorageClass string - Owner Owner -} - -// List returns information about objects in an S3 bucket. -// -// The prefix parameter limits the response to keys that begin with the -// specified prefix. -// -// The delim parameter causes the response to group all of the keys that -// share a common prefix up to the next delimiter in a single entry within -// the CommonPrefixes field. You can use delimiters to separate a bucket -// into different groupings of keys, similar to how folders would work. -// -// The marker parameter specifies the key to start with when listing objects -// in a bucket. Amazon S3 lists objects in alphabetical order and -// will return keys alphabetically greater than the marker. -// -// The max parameter specifies how many keys + common prefixes to return in -// the response. The default is 1000. -// -// For example, given these keys in a bucket: -// -// index.html -// index2.html -// photos/2006/January/sample.jpg -// photos/2006/February/sample2.jpg -// photos/2006/February/sample3.jpg -// photos/2006/February/sample4.jpg -// -// Listing this bucket with delimiter set to "/" would yield the -// following result: -// -// &ListResp{ -// Name: "sample-bucket", -// MaxKeys: 1000, -// Delimiter: "/", -// Contents: []Key{ -// {Key: "index.html", "index2.html"}, -// }, -// CommonPrefixes: []string{ -// "photos/", -// }, -// } -// -// Listing the same bucket with delimiter set to "/" and prefix set to -// "photos/2006/" would yield the following result: -// -// &ListResp{ -// Name: "sample-bucket", -// MaxKeys: 1000, -// Delimiter: "/", -// Prefix: "photos/2006/", -// CommonPrefixes: []string{ -// "photos/2006/February/", -// "photos/2006/January/", -// }, -// } -// -// See http://goo.gl/YjQTc for details. -func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp, err error) { - params := map[string][]string{ - "prefix": {prefix}, - "delimiter": {delim}, - "marker": {marker}, - } - if max != 0 { - params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)} - } - req := &request{ - bucket: b.Name, - params: params, - } - result = &ListResp{} - for attempt := attempts.Start(); attempt.Next(); { - err = b.S3.query(req, result) - if !shouldRetry(err) { - break - } - } - if err != nil { - return nil, err - } - return result, nil -} - -// Returns a mapping of all key names in this bucket to Key objects -func (b *Bucket) GetBucketContents() (*map[string]Key, error) { - bucket_contents := map[string]Key{} - prefix := "" - path_separator := "" - marker := "" - for { - contents, err := b.List(prefix, path_separator, marker, 1000) - if err != nil { - return &bucket_contents, err - } - last_key := "" - for _, key := range contents.Contents { - bucket_contents[key.Key] = key - last_key = key.Key - } - if contents.IsTruncated { - marker = contents.NextMarker - if marker == "" { - // From the s3 docs: If response does not include the - // NextMarker and it is truncated, you can use the value of the - // last Key in the response as the marker in the subsequent - // request to get the next set of object keys. - marker = last_key - } - } else { - break - } - } - - return &bucket_contents, nil -} - -// Get metadata from the key without returning the key content -func (b *Bucket) GetKey(path string) (*Key, error) { - req := &request{ - bucket: b.Name, - path: path, - method: "HEAD", - } - err := b.S3.prepare(req) - if err != nil { - return nil, err - } - key := &Key{} - for attempt := attempts.Start(); attempt.Next(); { - resp, err := b.S3.run(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, err - } - key.Key = path - key.LastModified = resp.Header.Get("Last-Modified") - key.ETag = resp.Header.Get("ETag") - contentLength := resp.Header.Get("Content-Length") - size, err := strconv.ParseInt(contentLength, 10, 64) - if err != nil { - return key, fmt.Errorf("bad s3 content-length %v: %v", - contentLength, err) - } - key.Size = size - return key, nil - } - panic("unreachable") -} - -// URL returns a non-signed URL that allows retriving the -// object at path. It only works if the object is publicly -// readable (see SignedURL). -func (b *Bucket) URL(path string) string { - req := &request{ - bucket: b.Name, - path: path, - } - err := b.S3.prepare(req) - if err != nil { - panic(err) - } - u, err := req.url(true) - if err != nil { - panic(err) - } - u.RawQuery = "" - return u.String() -} - -// SignedURL returns a signed URL that allows anyone holding the URL -// to retrieve the object at path. The signature is valid until expires. -func (b *Bucket) SignedURL(path string, expires time.Time) string { - req := &request{ - bucket: b.Name, - path: path, - params: url.Values{"Expires": {strconv.FormatInt(expires.Unix(), 10)}}, - } - err := b.S3.prepare(req) - if err != nil { - panic(err) - } - u, err := req.url(true) - if err != nil { - panic(err) - } - return u.String() -} - -type request struct { - method string - bucket string - path string - signpath string - params url.Values - headers http.Header - baseurl string - payload io.Reader - prepared bool -} - -// amazonShouldEscape returns true if byte should be escaped -func amazonShouldEscape(c byte) bool { - return !((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || - (c >= '0' && c <= '9') || c == '_' || c == '-' || c == '~' || c == '.' || c == '/' || c == ':') -} - -// amazonEscape does uri escaping exactly as Amazon does -func amazonEscape(s string) string { - hexCount := 0 - - for i := 0; i < len(s); i++ { - if amazonShouldEscape(s[i]) { - hexCount++ - } - } - - if hexCount == 0 { - return s - } - - t := make([]byte, len(s)+2*hexCount) - j := 0 - for i := 0; i < len(s); i++ { - if c := s[i]; amazonShouldEscape(c) { - t[j] = '%' - t[j+1] = "0123456789ABCDEF"[c>>4] - t[j+2] = "0123456789ABCDEF"[c&15] - j += 3 - } else { - t[j] = s[i] - j++ - } - } - return string(t) -} - -// url returns url to resource, either full (with host/scheme) or -// partial for HTTP request -func (req *request) url(full bool) (*url.URL, error) { - u, err := url.Parse(req.baseurl) - if err != nil { - return nil, fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err) - } - - u.Opaque = amazonEscape(req.path) - if full { - u.Opaque = "//" + u.Host + u.Opaque - } - u.RawQuery = req.params.Encode() - - return u, nil -} - -// query prepares and runs the req request. -// If resp is not nil, the XML data contained in the response -// body will be unmarshalled on it. -func (s3 *S3) query(req *request, resp interface{}) error { - err := s3.prepare(req) - if err == nil { - var httpResponse *http.Response - httpResponse, err = s3.run(req, resp) - if resp == nil && httpResponse != nil { - httpResponse.Body.Close() - } - } - return err -} - -// prepare sets up req to be delivered to S3. -func (s3 *S3) prepare(req *request) error { - if !req.prepared { - req.prepared = true - if req.method == "" { - req.method = "GET" - } - // Copy so they can be mutated without affecting on retries. - params := make(url.Values) - headers := make(http.Header) - for k, v := range req.params { - params[k] = v - } - for k, v := range req.headers { - headers[k] = v - } - req.params = params - req.headers = headers - if !strings.HasPrefix(req.path, "/") { - req.path = "/" + req.path - } - req.signpath = req.path - - if req.bucket != "" { - req.baseurl = s3.Region.S3BucketEndpoint - if req.baseurl == "" { - // Use the path method to address the bucket. - req.baseurl = s3.Region.S3Endpoint - req.path = "/" + req.bucket + req.path - } else { - // Just in case, prevent injection. - if strings.IndexAny(req.bucket, "/:@") >= 0 { - return fmt.Errorf("bad S3 bucket: %q", req.bucket) - } - req.baseurl = strings.Replace(req.baseurl, "${bucket}", req.bucket, -1) - } - req.signpath = "/" + req.bucket + req.signpath - } else { - req.baseurl = s3.Region.S3Endpoint - } - } - - // Always sign again as it's not clear how far the - // server has handled a previous attempt. - u, err := url.Parse(req.baseurl) - if err != nil { - return fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err) - } - req.headers["Host"] = []string{u.Host} - req.headers["Date"] = []string{time.Now().In(time.UTC).Format(time.RFC1123)} - sign(s3.Auth, req.method, amazonEscape(req.signpath), req.params, req.headers) - return nil -} - -// run sends req and returns the http response from the server. -// If resp is not nil, the XML data contained in the response -// body will be unmarshalled on it. -func (s3 *S3) run(req *request, resp interface{}) (*http.Response, error) { - if debug { - log.Printf("Running S3 request: %#v", req) - } - - u, err := req.url(false) - if err != nil { - return nil, err - } - - hreq := http.Request{ - URL: u, - Method: req.method, - ProtoMajor: 1, - ProtoMinor: 1, - Close: true, - Header: req.headers, - } - - if v, ok := req.headers["Content-Length"]; ok { - hreq.ContentLength, _ = strconv.ParseInt(v[0], 10, 64) - delete(req.headers, "Content-Length") - } - if req.payload != nil { - hreq.Body = ioutil.NopCloser(req.payload) - } - - hresp, err := s3.HTTPClient().Do(&hreq) - if err != nil { - return nil, err - } - if debug { - dump, _ := httputil.DumpResponse(hresp, true) - log.Printf("} -> %s\n", dump) - } - if hresp.StatusCode != 200 && hresp.StatusCode != 204 { - defer hresp.Body.Close() - return nil, buildError(hresp) - } - if resp != nil { - err = xml.NewDecoder(hresp.Body).Decode(resp) - hresp.Body.Close() - } - return hresp, err -} - -// Error represents an error in an operation with S3. -type Error struct { - StatusCode int // HTTP status code (200, 403, ...) - Code string // EC2 error code ("UnsupportedOperation", ...) - Message string // The human-oriented error message - BucketName string - RequestId string - HostId string -} - -func (e *Error) Error() string { - return e.Message -} - -func buildError(r *http.Response) error { - if debug { - log.Printf("got error (status code %v)", r.StatusCode) - data, err := ioutil.ReadAll(r.Body) - if err != nil { - log.Printf("\tread error: %v", err) - } else { - log.Printf("\tdata:\n%s\n\n", data) - } - r.Body = ioutil.NopCloser(bytes.NewBuffer(data)) - } - - err := Error{} - // TODO return error if Unmarshal fails? - xml.NewDecoder(r.Body).Decode(&err) - r.Body.Close() - err.StatusCode = r.StatusCode - if err.Message == "" { - err.Message = r.Status - } - if debug { - log.Printf("err: %#v\n", err) - } - return &err -} - -func shouldRetry(err error) bool { - if err == nil { - return false - } - switch err { - case io.ErrUnexpectedEOF, io.EOF: - return true - } - switch e := err.(type) { - case *net.DNSError: - return true - case *net.OpError: - switch e.Op { - case "read", "write": - return true - } - case *Error: - switch e.Code { - case "InternalError", "NoSuchUpload", "NoSuchBucket": - return true - } - } - return false -} - -func hasCode(err error, code string) bool { - s3err, ok := err.(*Error) - return ok && s3err.Code == code -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/s3i_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/s3i_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/s3i_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/s3i_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,616 +0,0 @@ -package s3_test - -import ( - "bytes" - "crypto/md5" - "fmt" - "io/ioutil" - "net/http" - "strings" - - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/s3" - "github.com/mitchellh/goamz/testutil" - . "github.com/motain/gocheck" - "net" - "sort" - "time" -) - -// AmazonServer represents an Amazon S3 server. -type AmazonServer struct { - auth aws.Auth -} - -func (s *AmazonServer) SetUp(c *C) { - auth, err := aws.EnvAuth() - if err != nil { - c.Fatal(err.Error()) - } - s.auth = auth -} - -var _ = Suite(&AmazonClientSuite{Region: aws.USEast}) -var _ = Suite(&AmazonClientSuite{Region: aws.EUWest}) -var _ = Suite(&AmazonClientSuite{Region: aws.EUCentral}) -var _ = Suite(&AmazonDomainClientSuite{Region: aws.USEast}) - -// AmazonClientSuite tests the client against a live S3 server. -type AmazonClientSuite struct { - aws.Region - srv AmazonServer - ClientTests -} - -func (s *AmazonClientSuite) SetUpSuite(c *C) { - if !testutil.Amazon { - c.Skip("live tests against AWS disabled (no -amazon)") - } - s.srv.SetUp(c) - s.s3 = s3.New(s.srv.auth, s.Region) - // In case tests were interrupted in the middle before. - s.ClientTests.Cleanup() -} - -func (s *AmazonClientSuite) TearDownTest(c *C) { - s.ClientTests.Cleanup() -} - -// AmazonDomainClientSuite tests the client against a live S3 -// server using bucket names in the endpoint domain name rather -// than the request path. -type AmazonDomainClientSuite struct { - aws.Region - srv AmazonServer - ClientTests -} - -func (s *AmazonDomainClientSuite) SetUpSuite(c *C) { - if !testutil.Amazon { - c.Skip("live tests against AWS disabled (no -amazon)") - } - s.srv.SetUp(c) - region := s.Region - region.S3BucketEndpoint = "https://${bucket}.s3.amazonaws.com" - s.s3 = s3.New(s.srv.auth, region) - s.ClientTests.Cleanup() -} - -func (s *AmazonDomainClientSuite) TearDownTest(c *C) { - s.ClientTests.Cleanup() -} - -// ClientTests defines integration tests designed to test the client. -// It is not used as a test suite in itself, but embedded within -// another type. -type ClientTests struct { - s3 *s3.S3 - authIsBroken bool -} - -func (s *ClientTests) Cleanup() { - killBucket(testBucket(s.s3)) -} - -func testBucket(s *s3.S3) *s3.Bucket { - // Watch out! If this function is corrupted and made to match with something - // people own, killBucket will happily remove *everything* inside the bucket. - key := s.Auth.AccessKey - if len(key) >= 8 { - key = s.Auth.AccessKey[:8] - } - return s.Bucket(fmt.Sprintf("goamz-%s-%s", s.Region.Name, key)) -} - -var attempts = aws.AttemptStrategy{ - Min: 5, - Total: 20 * time.Second, - Delay: 100 * time.Millisecond, -} - -func killBucket(b *s3.Bucket) { - var err error - for attempt := attempts.Start(); attempt.Next(); { - err = b.DelBucket() - if err == nil { - return - } - if _, ok := err.(*net.DNSError); ok { - return - } - e, ok := err.(*s3.Error) - if ok && e.Code == "NoSuchBucket" { - return - } - if ok && e.Code == "BucketNotEmpty" { - // Errors are ignored here. Just retry. - resp, err := b.List("", "", "", 1000) - if err == nil { - for _, key := range resp.Contents { - _ = b.Del(key.Key) - } - } - multis, _, _ := b.ListMulti("", "") - for _, m := range multis { - _ = m.Abort() - } - } - } - message := "cannot delete test bucket" - if err != nil { - message += ": " + err.Error() - } - panic(message) -} - -func get(url string) ([]byte, error) { - for attempt := attempts.Start(); attempt.Next(); { - resp, err := http.Get(url) - if err != nil { - if attempt.HasNext() { - continue - } - return nil, err - } - data, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - if attempt.HasNext() { - continue - } - return nil, err - } - return data, err - } - panic("unreachable") -} - -func (s *ClientTests) TestBasicFunctionality(c *C) { - b := testBucket(s.s3) - err := b.PutBucket(s3.PublicRead) - c.Assert(err, IsNil) - - err = b.Put("name", []byte("yo!"), "text/plain", s3.PublicRead) - c.Assert(err, IsNil) - defer b.Del("name") - - data, err := b.Get("name") - c.Assert(err, IsNil) - c.Assert(string(data), Equals, "yo!") - - data, err = get(b.URL("name")) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, "yo!") - - buf := bytes.NewBufferString("hey!") - err = b.PutReader("name2", buf, int64(buf.Len()), "text/plain", s3.Private) - c.Assert(err, IsNil) - defer b.Del("name2") - - rc, err := b.GetReader("name2") - c.Assert(err, IsNil) - data, err = ioutil.ReadAll(rc) - c.Check(err, IsNil) - c.Check(string(data), Equals, "hey!") - rc.Close() - - data, err = get(b.SignedURL("name2", time.Now().Add(time.Hour))) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, "hey!") - - if !s.authIsBroken { - data, err = get(b.SignedURL("name2", time.Now().Add(-time.Hour))) - c.Assert(err, IsNil) - c.Assert(string(data), Matches, "(?s).*AccessDenied.*") - } - - err = b.DelBucket() - c.Assert(err, NotNil) - - s3err, ok := err.(*s3.Error) - c.Assert(ok, Equals, true) - c.Assert(s3err.Code, Equals, "BucketNotEmpty") - c.Assert(s3err.BucketName, Equals, b.Name) - c.Assert(s3err.Message, Equals, "The bucket you tried to delete is not empty") - - err = b.Del("name") - c.Assert(err, IsNil) - err = b.Del("name2") - c.Assert(err, IsNil) - - err = b.DelBucket() - c.Assert(err, IsNil) -} - -func (s *ClientTests) TestCopy(c *C) { - b := testBucket(s.s3) - err := b.PutBucket(s3.PublicRead) - - err = b.Put("name+1", []byte("yo!"), "text/plain", s3.PublicRead) - c.Assert(err, IsNil) - defer b.Del("name+1") - - err = b.Copy("name+1", "name+2", s3.PublicRead) - c.Assert(err, IsNil) - defer b.Del("name+2") - - data, err := b.Get("name+2") - c.Assert(err, IsNil) - c.Assert(string(data), Equals, "yo!") - - err = b.Del("name+1") - c.Assert(err, IsNil) - err = b.Del("name+2") - c.Assert(err, IsNil) - - err = b.DelBucket() - c.Assert(err, IsNil) -} - -func (s *ClientTests) TestGetNotFound(c *C) { - b := s.s3.Bucket("goamz-" + s.s3.Auth.AccessKey) - data, err := b.Get("non-existent") - - s3err, _ := err.(*s3.Error) - c.Assert(s3err, NotNil) - c.Assert(s3err.StatusCode, Equals, 404) - c.Assert(s3err.Code, Equals, "NoSuchBucket") - c.Assert(s3err.Message, Equals, "The specified bucket does not exist") - c.Assert(data, IsNil) -} - -// Communicate with all endpoints to see if they are alive. -func (s *ClientTests) TestRegions(c *C) { - errs := make(chan error, len(aws.Regions)) - for _, region := range aws.Regions { - go func(r aws.Region) { - s := s3.New(s.s3.Auth, r) - b := s.Bucket("goamz-" + s.Auth.AccessKey) - _, err := b.Get("non-existent") - errs <- err - }(region) - } - for _ = range aws.Regions { - err := <-errs - if err != nil { - s3_err, ok := err.(*s3.Error) - if ok { - c.Check(s3_err.Code, Matches, "NoSuchBucket") - } else if _, ok = err.(*net.DNSError); ok { - // Okay as well. - } else { - c.Errorf("Non-S3 error: %s", err) - } - } else { - c.Errorf("Test should have errored but it seems to have succeeded") - } - } -} - -var objectNames = []string{ - "index.html", - "index2.html", - "photos/2006/February/sample2.jpg", - "photos/2006/February/sample3.jpg", - "photos/2006/February/sample4.jpg", - "photos/2006/January/sample.jpg", - "test/bar", - "test/foo", -} - -func keys(names ...string) []s3.Key { - ks := make([]s3.Key, len(names)) - for i, name := range names { - ks[i].Key = name - } - return ks -} - -// As the ListResp specifies all the parameters to the -// request too, we use it to specify request parameters -// and expected results. The Contents field is -// used only for the key names inside it. -var listTests = []s3.ListResp{ - // normal list. - { - Contents: keys(objectNames...), - }, { - Marker: objectNames[0], - Contents: keys(objectNames[1:]...), - }, { - Marker: objectNames[0] + "a", - Contents: keys(objectNames[1:]...), - }, { - Marker: "z", - }, - - // limited results. - { - MaxKeys: 2, - Contents: keys(objectNames[0:2]...), - IsTruncated: true, - }, { - MaxKeys: 2, - Marker: objectNames[0], - Contents: keys(objectNames[1:3]...), - IsTruncated: true, - }, { - MaxKeys: 2, - Marker: objectNames[len(objectNames)-2], - Contents: keys(objectNames[len(objectNames)-1:]...), - }, - - // with delimiter - { - Delimiter: "/", - CommonPrefixes: []string{"photos/", "test/"}, - Contents: keys("index.html", "index2.html"), - }, { - Delimiter: "/", - Prefix: "photos/2006/", - CommonPrefixes: []string{"photos/2006/February/", "photos/2006/January/"}, - }, { - Delimiter: "/", - Prefix: "t", - CommonPrefixes: []string{"test/"}, - }, { - Delimiter: "/", - MaxKeys: 1, - Contents: keys("index.html"), - IsTruncated: true, - }, { - Delimiter: "/", - MaxKeys: 1, - Marker: "index2.html", - CommonPrefixes: []string{"photos/"}, - IsTruncated: true, - }, { - Delimiter: "/", - MaxKeys: 1, - Marker: "photos/", - CommonPrefixes: []string{"test/"}, - IsTruncated: false, - }, { - Delimiter: "Feb", - CommonPrefixes: []string{"photos/2006/Feb"}, - Contents: keys("index.html", "index2.html", "photos/2006/January/sample.jpg", "test/bar", "test/foo"), - }, -} - -func (s *ClientTests) TestDoublePutBucket(c *C) { - b := testBucket(s.s3) - err := b.PutBucket(s3.PublicRead) - c.Assert(err, IsNil) - - err = b.PutBucket(s3.PublicRead) - if err != nil { - c.Assert(err, FitsTypeOf, new(s3.Error)) - c.Assert(err.(*s3.Error).Code, Equals, "BucketAlreadyOwnedByYou") - } -} - -func (s *ClientTests) TestBucketList(c *C) { - b := testBucket(s.s3) - err := b.PutBucket(s3.Private) - c.Assert(err, IsNil) - - objData := make(map[string][]byte) - for i, path := range objectNames { - data := []byte(strings.Repeat("a", i)) - err := b.Put(path, data, "text/plain", s3.Private) - c.Assert(err, IsNil) - defer b.Del(path) - objData[path] = data - } - - for i, t := range listTests { - c.Logf("test %d", i) - resp, err := b.List(t.Prefix, t.Delimiter, t.Marker, t.MaxKeys) - c.Assert(err, IsNil) - c.Check(resp.Name, Equals, b.Name) - c.Check(resp.Delimiter, Equals, t.Delimiter) - c.Check(resp.IsTruncated, Equals, t.IsTruncated) - c.Check(resp.CommonPrefixes, DeepEquals, t.CommonPrefixes) - checkContents(c, resp.Contents, objData, t.Contents) - } -} - -func etag(data []byte) string { - sum := md5.New() - sum.Write(data) - return fmt.Sprintf(`"%x"`, sum.Sum(nil)) -} - -func checkContents(c *C, contents []s3.Key, data map[string][]byte, expected []s3.Key) { - c.Assert(contents, HasLen, len(expected)) - for i, k := range contents { - c.Check(k.Key, Equals, expected[i].Key) - // TODO mtime - c.Check(k.Size, Equals, int64(len(data[k.Key]))) - c.Check(k.ETag, Equals, etag(data[k.Key])) - } -} - -func (s *ClientTests) TestMultiInitPutList(c *C) { - b := testBucket(s.s3) - err := b.PutBucket(s3.Private) - c.Assert(err, IsNil) - - multi, err := b.InitMulti("multi", "text/plain", s3.Private) - c.Assert(err, IsNil) - c.Assert(multi.UploadId, Matches, ".+") - defer multi.Abort() - - var sent []s3.Part - - for i := 0; i < 5; i++ { - p, err := multi.PutPart(i+1, strings.NewReader(fmt.Sprintf("", i+1))) - c.Assert(err, IsNil) - c.Assert(p.N, Equals, i+1) - c.Assert(p.Size, Equals, int64(8)) - c.Assert(p.ETag, Matches, ".+") - sent = append(sent, p) - } - - s3.SetListPartsMax(2) - - parts, err := multi.ListParts() - c.Assert(err, IsNil) - c.Assert(parts, HasLen, len(sent)) - for i := range parts { - c.Assert(parts[i].N, Equals, sent[i].N) - c.Assert(parts[i].Size, Equals, sent[i].Size) - c.Assert(parts[i].ETag, Equals, sent[i].ETag) - } - - err = multi.Complete(parts) - s3err, failed := err.(*s3.Error) - c.Assert(failed, Equals, true) - c.Assert(s3err.Code, Equals, "EntityTooSmall") - - err = multi.Abort() - c.Assert(err, IsNil) - _, err = multi.ListParts() - s3err, ok := err.(*s3.Error) - c.Assert(ok, Equals, true) - c.Assert(s3err.Code, Equals, "NoSuchUpload") -} - -// This may take a minute or more due to the minimum size accepted S3 -// on multipart upload parts. -func (s *ClientTests) TestMultiComplete(c *C) { - b := testBucket(s.s3) - err := b.PutBucket(s3.Private) - c.Assert(err, IsNil) - - multi, err := b.InitMulti("multi", "text/plain", s3.Private) - c.Assert(err, IsNil) - c.Assert(multi.UploadId, Matches, ".+") - defer multi.Abort() - - // Minimum size S3 accepts for all but the last part is 5MB. - data1 := make([]byte, 5*1024*1024) - data2 := []byte("") - - part1, err := multi.PutPart(1, bytes.NewReader(data1)) - c.Assert(err, IsNil) - part2, err := multi.PutPart(2, bytes.NewReader(data2)) - c.Assert(err, IsNil) - - // Purposefully reversed. The order requirement must be handled. - err = multi.Complete([]s3.Part{part2, part1}) - c.Assert(err, IsNil) - - data, err := b.Get("multi") - c.Assert(err, IsNil) - - c.Assert(len(data), Equals, len(data1)+len(data2)) - for i := range data1 { - if data[i] != data1[i] { - c.Fatalf("uploaded object at byte %d: want %d, got %d", data1[i], data[i]) - } - } - c.Assert(string(data[len(data1):]), Equals, string(data2)) -} - -type multiList []*s3.Multi - -func (l multiList) Len() int { return len(l) } -func (l multiList) Less(i, j int) bool { return l[i].Key < l[j].Key } -func (l multiList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } - -func (s *ClientTests) TestListMulti(c *C) { - b := testBucket(s.s3) - err := b.PutBucket(s3.Private) - c.Assert(err, IsNil) - - // Ensure an empty state before testing its behavior. - multis, _, err := b.ListMulti("", "") - for _, m := range multis { - err := m.Abort() - c.Assert(err, IsNil) - } - - keys := []string{ - "a/multi2", - "a/multi3", - "b/multi4", - "multi1", - } - for _, key := range keys { - m, err := b.InitMulti(key, "", s3.Private) - c.Assert(err, IsNil) - defer m.Abort() - } - - // Amazon's implementation of the multiple-request listing for - // multipart uploads in progress seems broken in multiple ways. - // (next tokens are not provided, etc). - //s3.SetListMultiMax(2) - - multis, prefixes, err := b.ListMulti("", "") - c.Assert(err, IsNil) - for attempt := attempts.Start(); attempt.Next() && len(multis) < len(keys); { - multis, prefixes, err = b.ListMulti("", "") - c.Assert(err, IsNil) - } - sort.Sort(multiList(multis)) - c.Assert(prefixes, IsNil) - var gotKeys []string - for _, m := range multis { - gotKeys = append(gotKeys, m.Key) - } - c.Assert(gotKeys, DeepEquals, keys) - for _, m := range multis { - c.Assert(m.Bucket, Equals, b) - c.Assert(m.UploadId, Matches, ".+") - } - - multis, prefixes, err = b.ListMulti("", "/") - for attempt := attempts.Start(); attempt.Next() && len(prefixes) < 2; { - multis, prefixes, err = b.ListMulti("", "") - c.Assert(err, IsNil) - } - c.Assert(err, IsNil) - c.Assert(prefixes, DeepEquals, []string{"a/", "b/"}) - c.Assert(multis, HasLen, 1) - c.Assert(multis[0].Bucket, Equals, b) - c.Assert(multis[0].Key, Equals, "multi1") - c.Assert(multis[0].UploadId, Matches, ".+") - - for attempt := attempts.Start(); attempt.Next() && len(multis) < 2; { - multis, prefixes, err = b.ListMulti("", "") - c.Assert(err, IsNil) - } - multis, prefixes, err = b.ListMulti("a/", "/") - c.Assert(err, IsNil) - c.Assert(prefixes, IsNil) - c.Assert(multis, HasLen, 2) - c.Assert(multis[0].Bucket, Equals, b) - c.Assert(multis[0].Key, Equals, "a/multi2") - c.Assert(multis[0].UploadId, Matches, ".+") - c.Assert(multis[1].Bucket, Equals, b) - c.Assert(multis[1].Key, Equals, "a/multi3") - c.Assert(multis[1].UploadId, Matches, ".+") -} - -func (s *ClientTests) TestMultiPutAllZeroLength(c *C) { - b := testBucket(s.s3) - err := b.PutBucket(s3.Private) - c.Assert(err, IsNil) - - multi, err := b.InitMulti("multi", "text/plain", s3.Private) - c.Assert(err, IsNil) - defer multi.Abort() - - // This tests an edge case. Amazon requires at least one - // part for multiprat uploads to work, even the part is empty. - parts, err := multi.PutAll(strings.NewReader(""), 5*1024*1024) - c.Assert(err, IsNil) - c.Assert(parts, HasLen, 1) - c.Assert(parts[0].Size, Equals, int64(0)) - c.Assert(parts[0].ETag, Equals, `"d41d8cd98f00b204e9800998ecf8427e"`) - - err = multi.Complete(parts) - c.Assert(err, IsNil) -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/s3test/server.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/s3test/server.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/s3test/server.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/s3test/server.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,666 +0,0 @@ -package s3test - -import ( - "bytes" - "crypto/md5" - "encoding/hex" - "encoding/xml" - "fmt" - "github.com/mitchellh/goamz/s3" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "net/url" - "regexp" - "sort" - "strconv" - "strings" - "sync" - "time" -) - -const debug = false - -type s3Error struct { - statusCode int - XMLName struct{} `xml:"Error"` - Code string - Message string - BucketName string - RequestId string - HostId string -} - -type action struct { - srv *Server - w http.ResponseWriter - req *http.Request - reqId string -} - -// Config controls the internal behaviour of the Server. A nil config is the default -// and behaves as if all configurations assume their default behaviour. Once passed -// to NewServer, the configuration must not be modified. -type Config struct { - // Send409Conflict controls how the Server will respond to calls to PUT on a - // previously existing bucket. The default is false, and corresponds to the - // us-east-1 s3 enpoint. Setting this value to true emulates the behaviour of - // all other regions. - // http://docs.amazonwebservices.com/AmazonS3/latest/API/ErrorResponses.html - Send409Conflict bool -} - -func (c *Config) send409Conflict() bool { - if c != nil { - return c.Send409Conflict - } - return false -} - -// Server is a fake S3 server for testing purposes. -// All of the data for the server is kept in memory. -type Server struct { - url string - reqId int - listener net.Listener - mu sync.Mutex - buckets map[string]*bucket - config *Config -} - -type bucket struct { - name string - acl s3.ACL - ctime time.Time - objects map[string]*object -} - -type object struct { - name string - mtime time.Time - meta http.Header // metadata to return with requests. - checksum []byte // also held as Content-MD5 in meta. - data []byte -} - -// A resource encapsulates the subject of an HTTP request. -// The resource referred to may or may not exist -// when the request is made. -type resource interface { - put(a *action) interface{} - get(a *action) interface{} - post(a *action) interface{} - delete(a *action) interface{} -} - -func NewServer(config *Config) (*Server, error) { - l, err := net.Listen("tcp", "localhost:0") - if err != nil { - return nil, fmt.Errorf("cannot listen on localhost: %v", err) - } - srv := &Server{ - listener: l, - url: "http://" + l.Addr().String(), - buckets: make(map[string]*bucket), - config: config, - } - go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - srv.serveHTTP(w, req) - })) - return srv, nil -} - -// Quit closes down the server. -func (srv *Server) Quit() { - srv.listener.Close() -} - -// URL returns a URL for the server. -func (srv *Server) URL() string { - return srv.url -} - -func fatalf(code int, codeStr string, errf string, a ...interface{}) { - panic(&s3Error{ - statusCode: code, - Code: codeStr, - Message: fmt.Sprintf(errf, a...), - }) -} - -// serveHTTP serves the S3 protocol. -func (srv *Server) serveHTTP(w http.ResponseWriter, req *http.Request) { - // ignore error from ParseForm as it's usually spurious. - req.ParseForm() - - srv.mu.Lock() - defer srv.mu.Unlock() - - if debug { - log.Printf("s3test %q %q", req.Method, req.URL) - } - a := &action{ - srv: srv, - w: w, - req: req, - reqId: fmt.Sprintf("%09X", srv.reqId), - } - srv.reqId++ - - var r resource - defer func() { - switch err := recover().(type) { - case *s3Error: - switch r := r.(type) { - case objectResource: - err.BucketName = r.bucket.name - case bucketResource: - err.BucketName = r.name - } - err.RequestId = a.reqId - // TODO HostId - w.Header().Set("Content-Type", `xml version="1.0" encoding="UTF-8"`) - w.WriteHeader(err.statusCode) - xmlMarshal(w, err) - case nil: - default: - panic(err) - } - }() - - r = srv.resourceForURL(req.URL) - - var resp interface{} - switch req.Method { - case "PUT": - resp = r.put(a) - case "GET", "HEAD": - resp = r.get(a) - case "DELETE": - resp = r.delete(a) - case "POST": - resp = r.post(a) - default: - fatalf(400, "MethodNotAllowed", "unknown http request method %q", req.Method) - } - if resp != nil && req.Method != "HEAD" { - xmlMarshal(w, resp) - } -} - -// xmlMarshal is the same as xml.Marshal except that -// it panics on error. The marshalling should not fail, -// but we want to know if it does. -func xmlMarshal(w io.Writer, x interface{}) { - if err := xml.NewEncoder(w).Encode(x); err != nil { - panic(fmt.Errorf("error marshalling %#v: %v", x, err)) - } -} - -// In a fully implemented test server, each of these would have -// its own resource type. -var unimplementedBucketResourceNames = map[string]bool{ - "acl": true, - "lifecycle": true, - "policy": true, - "location": true, - "logging": true, - "notification": true, - "versions": true, - "requestPayment": true, - "versioning": true, - "website": true, - "uploads": true, -} - -var unimplementedObjectResourceNames = map[string]bool{ - "uploadId": true, - "acl": true, - "torrent": true, - "uploads": true, -} - -var pathRegexp = regexp.MustCompile("/(([^/]+)(/(.*))?)?") - -// resourceForURL returns a resource object for the given URL. -func (srv *Server) resourceForURL(u *url.URL) (r resource) { - - if u.Path == "/" { - return serviceResource{ - buckets: srv.buckets, - } - } - - m := pathRegexp.FindStringSubmatch(u.Path) - if m == nil { - fatalf(404, "InvalidURI", "Couldn't parse the specified URI") - } - bucketName := m[2] - objectName := m[4] - if bucketName == "" { - return nullResource{} // root - } - b := bucketResource{ - name: bucketName, - bucket: srv.buckets[bucketName], - } - q := u.Query() - if objectName == "" { - for name := range q { - if unimplementedBucketResourceNames[name] { - return nullResource{} - } - } - return b - - } - if b.bucket == nil { - fatalf(404, "NoSuchBucket", "The specified bucket does not exist") - } - objr := objectResource{ - name: objectName, - version: q.Get("versionId"), - bucket: b.bucket, - } - for name := range q { - if unimplementedObjectResourceNames[name] { - return nullResource{} - } - } - if obj := objr.bucket.objects[objr.name]; obj != nil { - objr.object = obj - } - return objr -} - -// nullResource has error stubs for all resource methods. -type nullResource struct{} - -func notAllowed() interface{} { - fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource") - return nil -} - -func (nullResource) put(a *action) interface{} { return notAllowed() } -func (nullResource) get(a *action) interface{} { return notAllowed() } -func (nullResource) post(a *action) interface{} { return notAllowed() } -func (nullResource) delete(a *action) interface{} { return notAllowed() } - -const timeFormat = "2006-01-02T15:04:05.000Z07:00" - -type serviceResource struct { - buckets map[string]*bucket -} - -func (serviceResource) put(a *action) interface{} { return notAllowed() } -func (serviceResource) post(a *action) interface{} { return notAllowed() } -func (serviceResource) delete(a *action) interface{} { return notAllowed() } - -// GET on an s3 service lists the buckets. -// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTServiceGET.html -func (r serviceResource) get(a *action) interface{} { - type respBucket struct { - Name string - } - - type response struct { - Buckets []respBucket `xml:">Bucket"` - } - - resp := response{} - - for _, bucketPtr := range r.buckets { - bkt := respBucket{ - Name: bucketPtr.name, - } - resp.Buckets = append(resp.Buckets, bkt) - } - - return &resp -} - -type bucketResource struct { - name string - bucket *bucket // non-nil if the bucket already exists. -} - -// GET on a bucket lists the objects in the bucket. -// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGET.html -func (r bucketResource) get(a *action) interface{} { - if r.bucket == nil { - fatalf(404, "NoSuchBucket", "The specified bucket does not exist") - } - delimiter := a.req.Form.Get("delimiter") - marker := a.req.Form.Get("marker") - maxKeys := -1 - if s := a.req.Form.Get("max-keys"); s != "" { - i, err := strconv.Atoi(s) - if err != nil || i < 0 { - fatalf(400, "invalid value for max-keys: %q", s) - } - maxKeys = i - } - prefix := a.req.Form.Get("prefix") - a.w.Header().Set("Content-Type", "application/xml") - - if a.req.Method == "HEAD" { - return nil - } - - var objs orderedObjects - - // first get all matching objects and arrange them in alphabetical order. - for name, obj := range r.bucket.objects { - if strings.HasPrefix(name, prefix) { - objs = append(objs, obj) - } - } - sort.Sort(objs) - - if maxKeys <= 0 { - maxKeys = 1000 - } - resp := &s3.ListResp{ - Name: r.bucket.name, - Prefix: prefix, - Delimiter: delimiter, - Marker: marker, - MaxKeys: maxKeys, - } - - var prefixes []string - for _, obj := range objs { - if !strings.HasPrefix(obj.name, prefix) { - continue - } - name := obj.name - isPrefix := false - if delimiter != "" { - if i := strings.Index(obj.name[len(prefix):], delimiter); i >= 0 { - name = obj.name[:len(prefix)+i+len(delimiter)] - if prefixes != nil && prefixes[len(prefixes)-1] == name { - continue - } - isPrefix = true - } - } - if name <= marker { - continue - } - if len(resp.Contents)+len(prefixes) >= maxKeys { - resp.IsTruncated = true - break - } - if isPrefix { - prefixes = append(prefixes, name) - } else { - // Contents contains only keys not found in CommonPrefixes - resp.Contents = append(resp.Contents, obj.s3Key()) - } - } - resp.CommonPrefixes = prefixes - return resp -} - -// orderedObjects holds a slice of objects that can be sorted -// by name. -type orderedObjects []*object - -func (s orderedObjects) Len() int { - return len(s) -} -func (s orderedObjects) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s orderedObjects) Less(i, j int) bool { - return s[i].name < s[j].name -} - -func (obj *object) s3Key() s3.Key { - return s3.Key{ - Key: obj.name, - LastModified: obj.mtime.Format(timeFormat), - Size: int64(len(obj.data)), - ETag: fmt.Sprintf(`"%x"`, obj.checksum), - // TODO StorageClass - // TODO Owner - } -} - -// DELETE on a bucket deletes the bucket if it's not empty. -func (r bucketResource) delete(a *action) interface{} { - b := r.bucket - if b == nil { - fatalf(404, "NoSuchBucket", "The specified bucket does not exist") - } - if len(b.objects) > 0 { - fatalf(400, "BucketNotEmpty", "The bucket you tried to delete is not empty") - } - delete(a.srv.buckets, b.name) - return nil -} - -// PUT on a bucket creates the bucket. -// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html -func (r bucketResource) put(a *action) interface{} { - var created bool - if r.bucket == nil { - if !validBucketName(r.name) { - fatalf(400, "InvalidBucketName", "The specified bucket is not valid") - } - if loc := locationConstraint(a); loc == "" { - fatalf(400, "InvalidRequets", "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to.") - } - // TODO validate acl - r.bucket = &bucket{ - name: r.name, - // TODO default acl - objects: make(map[string]*object), - } - a.srv.buckets[r.name] = r.bucket - created = true - } - if !created && a.srv.config.send409Conflict() { - fatalf(409, "BucketAlreadyOwnedByYou", "Your previous request to create the named bucket succeeded and you already own it.") - } - r.bucket.acl = s3.ACL(a.req.Header.Get("x-amz-acl")) - return nil -} - -func (bucketResource) post(a *action) interface{} { - fatalf(400, "Method", "bucket POST method not available") - return nil -} - -// validBucketName returns whether name is a valid bucket name. -// Here are the rules, from: -// http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/BucketRestrictions.html -// -// Can contain lowercase letters, numbers, periods (.), underscores (_), -// and dashes (-). You can use uppercase letters for buckets only in the -// US Standard region. -// -// Must start with a number or letter -// -// Must be between 3 and 255 characters long -// -// There's one extra rule (Must not be formatted as an IP address (e.g., 192.168.5.4) -// but the real S3 server does not seem to check that rule, so we will not -// check it either. -// -func validBucketName(name string) bool { - if len(name) < 3 || len(name) > 255 { - return false - } - r := name[0] - if !(r >= '0' && r <= '9' || r >= 'a' && r <= 'z') { - return false - } - for _, r := range name { - switch { - case r >= '0' && r <= '9': - case r >= 'a' && r <= 'z': - case r == '_' || r == '-': - case r == '.': - default: - return false - } - } - return true -} - -var responseParams = map[string]bool{ - "content-type": true, - "content-language": true, - "expires": true, - "cache-control": true, - "content-disposition": true, - "content-encoding": true, -} - -type objectResource struct { - name string - version string - bucket *bucket // always non-nil. - object *object // may be nil. -} - -// GET on an object gets the contents of the object. -// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html -func (objr objectResource) get(a *action) interface{} { - obj := objr.object - if obj == nil { - fatalf(404, "NoSuchKey", "The specified key does not exist.") - } - h := a.w.Header() - // add metadata - for name, d := range obj.meta { - h[name] = d - } - // override header values in response to request parameters. - for name, vals := range a.req.Form { - if strings.HasPrefix(name, "response-") { - name = name[len("response-"):] - if !responseParams[name] { - continue - } - h.Set(name, vals[0]) - } - } - if r := a.req.Header.Get("Range"); r != "" { - fatalf(400, "NotImplemented", "range unimplemented") - } - // TODO Last-Modified-Since - // TODO If-Modified-Since - // TODO If-Unmodified-Since - // TODO If-Match - // TODO If-None-Match - // TODO Connection: close ?? - // TODO x-amz-request-id - h.Set("Content-Length", fmt.Sprint(len(obj.data))) - h.Set("ETag", hex.EncodeToString(obj.checksum)) - h.Set("Last-Modified", obj.mtime.Format(time.RFC1123)) - if a.req.Method == "HEAD" { - return nil - } - // TODO avoid holding the lock when writing data. - _, err := a.w.Write(obj.data) - if err != nil { - // we can't do much except just log the fact. - log.Printf("error writing data: %v", err) - } - return nil -} - -var metaHeaders = map[string]bool{ - "Content-MD5": true, - "x-amz-acl": true, - "Content-Type": true, - "Content-Encoding": true, - "Content-Disposition": true, -} - -// PUT on an object creates the object. -func (objr objectResource) put(a *action) interface{} { - // TODO Cache-Control header - // TODO Expires header - // TODO x-amz-server-side-encryption - // TODO x-amz-storage-class - - // TODO is this correct, or should we erase all previous metadata? - obj := objr.object - if obj == nil { - obj = &object{ - name: objr.name, - meta: make(http.Header), - } - } - - var expectHash []byte - if c := a.req.Header.Get("Content-MD5"); c != "" { - var err error - expectHash, err = hex.DecodeString(c) - if err != nil || len(expectHash) != md5.Size { - fatalf(400, "InvalidDigest", "The Content-MD5 you specified was invalid") - } - } - sum := md5.New() - // TODO avoid holding lock while reading data. - data, err := ioutil.ReadAll(io.TeeReader(a.req.Body, sum)) - if err != nil { - fatalf(400, "TODO", "read error") - } - gotHash := sum.Sum(nil) - if expectHash != nil && bytes.Compare(gotHash, expectHash) != 0 { - fatalf(400, "BadDigest", "The Content-MD5 you specified did not match what we received") - } - if a.req.ContentLength >= 0 && int64(len(data)) != a.req.ContentLength { - fatalf(400, "IncompleteBody", "You did not provide the number of bytes specified by the Content-Length HTTP header") - } - - // PUT request has been successful - save data and metadata - for key, values := range a.req.Header { - key = http.CanonicalHeaderKey(key) - if metaHeaders[key] || strings.HasPrefix(key, "X-Amz-Meta-") { - obj.meta[key] = values - } - } - obj.data = data - obj.checksum = gotHash - obj.mtime = time.Now() - objr.bucket.objects[objr.name] = obj - return nil -} - -func (objr objectResource) delete(a *action) interface{} { - delete(objr.bucket.objects, objr.name) - return nil -} - -func (objr objectResource) post(a *action) interface{} { - fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource") - return nil -} - -type CreateBucketConfiguration struct { - LocationConstraint string -} - -// locationConstraint parses the request body (if present). -// If there is no body, an empty string will be returned. -func locationConstraint(a *action) string { - var body bytes.Buffer - if _, err := io.Copy(&body, a.req.Body); err != nil { - fatalf(400, "InvalidRequest", err.Error()) - } - if body.Len() == 0 { - return "" - } - var loc CreateBucketConfiguration - if err := xml.NewDecoder(&body).Decode(&loc); err != nil { - fatalf(400, "InvalidRequest", err.Error()) - } - return loc.LocationConstraint -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/s3_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/s3_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/s3_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/s3_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,435 +0,0 @@ -package s3_test - -import ( - "bytes" - "io/ioutil" - "net/http" - "testing" - - "time" - - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/s3" - "github.com/mitchellh/goamz/testutil" - . "github.com/motain/gocheck" -) - -func Test(t *testing.T) { - TestingT(t) -} - -type S struct { - s3 *s3.S3 -} - -var _ = Suite(&S{}) - -var testServer = testutil.NewHTTPServer() - -func (s *S) SetUpSuite(c *C) { - testServer.Start() - auth := aws.Auth{"abc", "123", ""} - s.s3 = s3.New(auth, aws.Region{Name: "faux-region-1", S3Endpoint: testServer.URL}) -} - -func (s *S) TearDownSuite(c *C) { - s3.SetAttemptStrategy(nil) -} - -func (s *S) SetUpTest(c *C) { - attempts := aws.AttemptStrategy{ - Total: 300 * time.Millisecond, - Delay: 100 * time.Millisecond, - } - s3.SetAttemptStrategy(&attempts) -} - -func (s *S) TearDownTest(c *C) { - testServer.Flush() -} - -func (s *S) DisableRetries() { - s3.SetAttemptStrategy(&aws.AttemptStrategy{}) -} - -// PutBucket docs: http://goo.gl/kBTCu - -func (s *S) TestPutBucket(c *C) { - testServer.Response(200, nil, "") - - b := s.s3.Bucket("bucket") - err := b.PutBucket(s3.Private) - c.Assert(err, IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "PUT") - c.Assert(req.URL.Path, Equals, "/bucket/") - c.Assert(req.Header["Date"], Not(Equals), "") -} - -// DeleteBucket docs: http://goo.gl/GoBrY - -func (s *S) TestDelBucket(c *C) { - testServer.Response(204, nil, "") - - b := s.s3.Bucket("bucket") - err := b.DelBucket() - c.Assert(err, IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "DELETE") - c.Assert(req.URL.Path, Equals, "/bucket/") - c.Assert(req.Header["Date"], Not(Equals), "") -} - -// ListBuckets: http://goo.gl/NqlyMN - -func (s *S) TestListBuckets(c *C) { - testServer.Response(200, nil, GetListBucketsDump) - - buckets, err := s.s3.ListBuckets() - c.Assert(err, IsNil) - c.Assert(len(buckets.Buckets), Equals, 2) - c.Assert(buckets.Buckets[0].Name, Equals, "bucket1") - c.Assert(buckets.Buckets[1].Name, Equals, "bucket2") - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/") -} - -// GetObject docs: http://goo.gl/isCO7 - -func (s *S) TestGet(c *C) { - testServer.Response(200, nil, "content") - - b := s.s3.Bucket("bucket") - data, err := b.Get("name") - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/bucket/name") - c.Assert(req.Header["Date"], Not(Equals), "") - - c.Assert(err, IsNil) - c.Assert(string(data), Equals, "content") -} - -func (s *S) TestHead(c *C) { - testServer.Response(200, nil, "") - b := s.s3.Bucket("bucket") - resp, err := b.Head("name") - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "HEAD") - c.Assert(req.URL.Path, Equals, "/bucket/name") - c.Assert(req.Header["Date"], Not(Equals), "") - - c.Assert(err, IsNil) - body, err := ioutil.ReadAll(resp.Body) - c.Assert(err, IsNil) - c.Assert(len(body), Equals, 0) -} - -func (s *S) TestURL(c *C) { - testServer.Response(200, nil, "content") - - b := s.s3.Bucket("bucket") - url := b.URL("name") - r, err := http.Get(url) - c.Assert(err, IsNil) - data, err := ioutil.ReadAll(r.Body) - r.Body.Close() - c.Assert(err, IsNil) - c.Assert(string(data), Equals, "content") - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/bucket/name") -} - -func (s *S) TestGetReader(c *C) { - testServer.Response(200, nil, "content") - - b := s.s3.Bucket("bucket") - rc, err := b.GetReader("name") - c.Assert(err, IsNil) - data, err := ioutil.ReadAll(rc) - rc.Close() - c.Assert(err, IsNil) - c.Assert(string(data), Equals, "content") - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/bucket/name") - c.Assert(req.Header["Date"], Not(Equals), "") -} - -func (s *S) TestGetNotFound(c *C) { - for i := 0; i < 10; i++ { - testServer.Response(404, nil, GetObjectErrorDump) - } - - b := s.s3.Bucket("non-existent-bucket") - data, err := b.Get("non-existent") - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/non-existent-bucket/non-existent") - c.Assert(req.Header["Date"], Not(Equals), "") - - s3err, _ := err.(*s3.Error) - c.Assert(s3err, NotNil) - c.Assert(s3err.StatusCode, Equals, 404) - c.Assert(s3err.BucketName, Equals, "non-existent-bucket") - c.Assert(s3err.RequestId, Equals, "3F1B667FAD71C3D8") - c.Assert(s3err.HostId, Equals, "L4ee/zrm1irFXY5F45fKXIRdOf9ktsKY/8TDVawuMK2jWRb1RF84i1uBzkdNqS5D") - c.Assert(s3err.Code, Equals, "NoSuchBucket") - c.Assert(s3err.Message, Equals, "The specified bucket does not exist") - c.Assert(s3err.Error(), Equals, "The specified bucket does not exist") - c.Assert(data, IsNil) -} - -// PutObject docs: http://goo.gl/FEBPD - -func (s *S) TestPutObject(c *C) { - testServer.Response(200, nil, "") - - b := s.s3.Bucket("bucket") - err := b.Put("name", []byte("content"), "content-type", s3.Private) - c.Assert(err, IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "PUT") - c.Assert(req.URL.Path, Equals, "/bucket/name") - c.Assert(req.Header["Date"], Not(DeepEquals), []string{""}) - c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"}) - c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"}) - //c.Assert(req.Header["Content-MD5"], DeepEquals, "...") - c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"}) -} - -func (s *S) TestPutObjectHeader(c *C) { - testServer.Response(200, nil, "") - - b := s.s3.Bucket("bucket") - err := b.PutHeader( - "name", - []byte("content"), - map[string][]string{"Content-Type": {"content-type"}}, - s3.Private, - ) - c.Assert(err, IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "PUT") - c.Assert(req.URL.Path, Equals, "/bucket/name") - c.Assert(req.Header["Date"], Not(DeepEquals), []string{""}) - c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"}) - c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"}) - //c.Assert(req.Header["Content-MD5"], DeepEquals, "...") - c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"}) -} - -func (s *S) TestPutReader(c *C) { - testServer.Response(200, nil, "") - - b := s.s3.Bucket("bucket") - buf := bytes.NewBufferString("content") - err := b.PutReader("name", buf, int64(buf.Len()), "content-type", s3.Private) - c.Assert(err, IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "PUT") - c.Assert(req.URL.Path, Equals, "/bucket/name") - c.Assert(req.Header["Date"], Not(DeepEquals), []string{""}) - c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"}) - c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"}) - //c.Assert(req.Header["Content-MD5"], Equals, "...") - c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"}) -} - -func (s *S) TestPutReaderHeader(c *C) { - testServer.Response(200, nil, "") - - b := s.s3.Bucket("bucket") - buf := bytes.NewBufferString("content") - err := b.PutReaderHeader( - "name", - buf, - int64(buf.Len()), - map[string][]string{"Content-Type": {"content-type"}}, - s3.Private, - ) - c.Assert(err, IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "PUT") - c.Assert(req.URL.Path, Equals, "/bucket/name") - c.Assert(req.Header["Date"], Not(DeepEquals), []string{""}) - c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"}) - c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"}) - //c.Assert(req.Header["Content-MD5"], Equals, "...") - c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"}) -} - -func (s *S) TestCopy(c *C) { - testServer.Response(200, nil, "") - - b := s.s3.Bucket("bucket") - err := b.Copy( - "old/file", - "new/file", - s3.Private, - ) - c.Assert(err, IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "PUT") - c.Assert(req.URL.Path, Equals, "/bucket/new/file") - c.Assert(req.Header["X-Amz-Copy-Source"], DeepEquals, []string{"/bucket/old/file"}) - c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"}) -} - -func (s *S) TestPlusInURL(c *C) { - testServer.Response(200, nil, "") - - b := s.s3.Bucket("bucket") - err := b.Copy( - "dir/old+f?le", - "dir/new+f?le", - s3.Private, - ) - c.Assert(err, IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "PUT") - c.Assert(req.RequestURI, Equals, "/bucket/dir/new%2Bf%3Fle") - c.Assert(req.Header["X-Amz-Copy-Source"], DeepEquals, []string{"/bucket/dir/old%2Bf%3Fle"}) - c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"}) -} - -// DelObject docs: http://goo.gl/APeTt - -func (s *S) TestDelObject(c *C) { - testServer.Response(200, nil, "") - - b := s.s3.Bucket("bucket") - err := b.Del("name") - c.Assert(err, IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "DELETE") - c.Assert(req.URL.Path, Equals, "/bucket/name") - c.Assert(req.Header["Date"], Not(Equals), "") -} - -// Delete Multiple Objects docs: http://goo.gl/WvA5sj - -func (s *S) TestMultiDelObject(c *C) { - testServer.Response(200, nil, "") - - b := s.s3.Bucket("bucket") - err := b.MultiDel([]string{"a", "b"}) - c.Assert(err, IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "POST") - c.Assert(req.URL.Path, Equals, "/bucket/") - c.Assert(req.RequestURI, Equals, "/bucket/?delete=") - c.Assert(req.Header["Content-Md5"], DeepEquals, []string{"nos/vZNvjGs17xIyjEFlwQ=="}) - data, err := ioutil.ReadAll(req.Body) - req.Body.Close() - c.Assert(err, IsNil) - c.Assert(string(data), Equals, "falseab") -} - -// Bucket List Objects docs: http://goo.gl/YjQTc - -func (s *S) TestList(c *C) { - testServer.Response(200, nil, GetListResultDump1) - - b := s.s3.Bucket("quotes") - - data, err := b.List("N", "", "", 0) - c.Assert(err, IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/quotes/") - c.Assert(req.Header["Date"], Not(Equals), "") - c.Assert(req.Form["prefix"], DeepEquals, []string{"N"}) - c.Assert(req.Form["delimiter"], DeepEquals, []string{""}) - c.Assert(req.Form["marker"], DeepEquals, []string{""}) - c.Assert(req.Form["max-keys"], DeepEquals, []string(nil)) - - c.Assert(data.Name, Equals, "quotes") - c.Assert(data.Prefix, Equals, "N") - c.Assert(data.IsTruncated, Equals, false) - c.Assert(len(data.Contents), Equals, 2) - - c.Assert(data.Contents[0].Key, Equals, "Nelson") - c.Assert(data.Contents[0].LastModified, Equals, "2006-01-01T12:00:00.000Z") - c.Assert(data.Contents[0].ETag, Equals, `"828ef3fdfa96f00ad9f27c383fc9ac7f"`) - c.Assert(data.Contents[0].Size, Equals, int64(5)) - c.Assert(data.Contents[0].StorageClass, Equals, "STANDARD") - c.Assert(data.Contents[0].Owner.ID, Equals, "bcaf161ca5fb16fd081034f") - c.Assert(data.Contents[0].Owner.DisplayName, Equals, "webfile") - - c.Assert(data.Contents[1].Key, Equals, "Neo") - c.Assert(data.Contents[1].LastModified, Equals, "2006-01-01T12:00:00.000Z") - c.Assert(data.Contents[1].ETag, Equals, `"828ef3fdfa96f00ad9f27c383fc9ac7f"`) - c.Assert(data.Contents[1].Size, Equals, int64(4)) - c.Assert(data.Contents[1].StorageClass, Equals, "STANDARD") - c.Assert(data.Contents[1].Owner.ID, Equals, "bcaf1ffd86a5fb16fd081034f") - c.Assert(data.Contents[1].Owner.DisplayName, Equals, "webfile") -} - -func (s *S) TestListWithDelimiter(c *C) { - testServer.Response(200, nil, GetListResultDump2) - - b := s.s3.Bucket("quotes") - - data, err := b.List("photos/2006/", "/", "some-marker", 1000) - c.Assert(err, IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "GET") - c.Assert(req.URL.Path, Equals, "/quotes/") - c.Assert(req.Header["Date"], Not(Equals), "") - c.Assert(req.Form["prefix"], DeepEquals, []string{"photos/2006/"}) - c.Assert(req.Form["delimiter"], DeepEquals, []string{"/"}) - c.Assert(req.Form["marker"], DeepEquals, []string{"some-marker"}) - c.Assert(req.Form["max-keys"], DeepEquals, []string{"1000"}) - - c.Assert(data.Name, Equals, "example-bucket") - c.Assert(data.Prefix, Equals, "photos/2006/") - c.Assert(data.Delimiter, Equals, "/") - c.Assert(data.Marker, Equals, "some-marker") - c.Assert(data.IsTruncated, Equals, false) - c.Assert(len(data.Contents), Equals, 0) - c.Assert(data.CommonPrefixes, DeepEquals, []string{"photos/2006/feb/", "photos/2006/jan/"}) -} - -func (s *S) TestGetKey(c *C) { - testServer.Response(200, GetKeyHeaderDump, "") - - b := s.s3.Bucket("bucket") - key, err := b.GetKey("name") - - req := testServer.WaitRequest() - c.Assert(req.Method, Equals, "HEAD") - c.Assert(req.URL.Path, Equals, "/bucket/name") - c.Assert(req.Header["Date"], Not(Equals), "") - - c.Assert(err, IsNil) - c.Assert(key.Key, Equals, "name") - c.Assert(key.LastModified, Equals, GetKeyHeaderDump["Last-Modified"]) - c.Assert(key.Size, Equals, int64(434234)) - c.Assert(key.ETag, Equals, GetKeyHeaderDump["ETag"]) -} - -func (s *S) TestUnescapedColon(c *C) { - b := s.s3.Bucket("bucket") - u := b.URL("foo:bar") - c.Assert(u, Equals, "http://localhost:4444/bucket/foo:bar") -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/s3t_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/s3t_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/s3t_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/s3t_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,79 +0,0 @@ -package s3_test - -import ( - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/s3" - "github.com/mitchellh/goamz/s3/s3test" - . "github.com/motain/gocheck" -) - -type LocalServer struct { - auth aws.Auth - region aws.Region - srv *s3test.Server - config *s3test.Config -} - -func (s *LocalServer) SetUp(c *C) { - srv, err := s3test.NewServer(s.config) - c.Assert(err, IsNil) - c.Assert(srv, NotNil) - - s.srv = srv - s.region = aws.Region{ - Name: "faux-region-1", - S3Endpoint: srv.URL(), - S3LocationConstraint: true, // s3test server requires a LocationConstraint - } -} - -// LocalServerSuite defines tests that will run -// against the local s3test server. It includes -// selected tests from ClientTests; -// when the s3test functionality is sufficient, it should -// include all of them, and ClientTests can be simply embedded. -type LocalServerSuite struct { - srv LocalServer - clientTests ClientTests -} - -var ( - // run tests twice, once in us-east-1 mode, once not. - _ = Suite(&LocalServerSuite{}) - _ = Suite(&LocalServerSuite{ - srv: LocalServer{ - config: &s3test.Config{ - Send409Conflict: true, - }, - }, - }) -) - -func (s *LocalServerSuite) SetUpSuite(c *C) { - s.srv.SetUp(c) - s.clientTests.s3 = s3.New(s.srv.auth, s.srv.region) - - // TODO Sadly the fake server ignores auth completely right now. :-( - s.clientTests.authIsBroken = true - s.clientTests.Cleanup() -} - -func (s *LocalServerSuite) TearDownTest(c *C) { - s.clientTests.Cleanup() -} - -func (s *LocalServerSuite) TestBasicFunctionality(c *C) { - s.clientTests.TestBasicFunctionality(c) -} - -func (s *LocalServerSuite) TestGetNotFound(c *C) { - s.clientTests.TestGetNotFound(c) -} - -func (s *LocalServerSuite) TestBucketList(c *C) { - s.clientTests.TestBucketList(c) -} - -func (s *LocalServerSuite) TestDoublePutBucket(c *C) { - s.clientTests.TestDoublePutBucket(c) -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/sign.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/sign.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/sign.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/sign.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,126 +0,0 @@ -package s3 - -import ( - "crypto/hmac" - "crypto/sha1" - "encoding/base64" - "log" - "sort" - "strings" - - "github.com/mitchellh/goamz/aws" -) - -var b64 = base64.StdEncoding - -// ---------------------------------------------------------------------------- -// S3 signing (http://goo.gl/G1LrK) - -var s3ParamsToSign = map[string]bool{ - "acl": true, - "delete": true, - "location": true, - "logging": true, - "notification": true, - "partNumber": true, - "policy": true, - "requestPayment": true, - "torrent": true, - "uploadId": true, - "uploads": true, - "versionId": true, - "versioning": true, - "versions": true, - "response-content-type": true, - "response-content-language": true, - "response-expires": true, - "response-cache-control": true, - "response-content-disposition": true, - "response-content-encoding": true, -} - -func sign(auth aws.Auth, method, canonicalPath string, params, headers map[string][]string) { - var md5, ctype, date, xamz string - var xamzDate bool - var sarray []string - - // add security token - if auth.Token != "" { - headers["x-amz-security-token"] = []string{auth.Token} - } - - if auth.SecretKey == "" { - // no auth secret; skip signing, e.g. for public read-only buckets. - return - } - - for k, v := range headers { - k = strings.ToLower(k) - switch k { - case "content-md5": - md5 = v[0] - case "content-type": - ctype = v[0] - case "date": - if !xamzDate { - date = v[0] - } - default: - if strings.HasPrefix(k, "x-amz-") { - vall := strings.Join(v, ",") - sarray = append(sarray, k+":"+vall) - if k == "x-amz-date" { - xamzDate = true - date = "" - } - } - } - } - if len(sarray) > 0 { - sort.StringSlice(sarray).Sort() - xamz = strings.Join(sarray, "\n") + "\n" - } - - expires := false - if v, ok := params["Expires"]; ok { - // Query string request authentication alternative. - expires = true - date = v[0] - params["AWSAccessKeyId"] = []string{auth.AccessKey} - } - - sarray = sarray[0:0] - for k, v := range params { - if s3ParamsToSign[k] { - for _, vi := range v { - if vi == "" { - sarray = append(sarray, k) - } else { - // "When signing you do not encode these values." - sarray = append(sarray, k+"="+vi) - } - } - } - } - if len(sarray) > 0 { - sort.StringSlice(sarray).Sort() - canonicalPath = canonicalPath + "?" + strings.Join(sarray, "&") - } - - payload := method + "\n" + md5 + "\n" + ctype + "\n" + date + "\n" + xamz + canonicalPath - hash := hmac.New(sha1.New, []byte(auth.SecretKey)) - hash.Write([]byte(payload)) - signature := make([]byte, b64.EncodedLen(hash.Size())) - b64.Encode(signature, hash.Sum(nil)) - - if expires { - params["Signature"] = []string{string(signature)} - } else { - headers["Authorization"] = []string{"AWS " + auth.AccessKey + ":" + string(signature)} - } - - if debug { - log.Printf("Signature payload: %q", payload) - log.Printf("Signature: %q", signature) - } -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/sign_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/sign_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/sign_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/s3/sign_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,194 +0,0 @@ -package s3_test - -import ( - "github.com/mitchellh/goamz/aws" - "github.com/mitchellh/goamz/s3" - . "github.com/motain/gocheck" -) - -// S3 ReST authentication docs: http://goo.gl/G1LrK - -var testAuth = aws.Auth{"0PN5J17HBGZHT7JJ3X82", "uV3F3YluFJax1cknvbcGwgjvx4QpvB+leU8dUj2o", ""} -var emptyAuth = aws.Auth{"", "", ""} - -func (s *S) TestSignExampleObjectGet(c *C) { - method := "GET" - path := "/johnsmith/photos/puppy.jpg" - headers := map[string][]string{ - "Host": {"johnsmith.s3.amazonaws.com"}, - "Date": {"Tue, 27 Mar 2007 19:36:42 +0000"}, - } - s3.Sign(testAuth, method, path, nil, headers) - expected := "AWS 0PN5J17HBGZHT7JJ3X82:xXjDGYUmKxnwqr5KXNPGldn5LbA=" - c.Assert(headers["Authorization"], DeepEquals, []string{expected}) -} - -func (s *S) TestSignExampleObjectGetNoAuth(c *C) { - method := "GET" - path := "/johnsmith/photos/puppy.jpg" - headers := map[string][]string{ - "Host": {"johnsmith.s3.amazonaws.com"}, - "Date": {"Tue, 27 Mar 2007 19:36:42 +0000"}, - } - s3.Sign(emptyAuth, method, path, nil, headers) - c.Assert(headers["Authorization"], IsNil) -} - -func (s *S) TestSignExampleObjectPut(c *C) { - method := "PUT" - path := "/johnsmith/photos/puppy.jpg" - headers := map[string][]string{ - "Host": {"johnsmith.s3.amazonaws.com"}, - "Date": {"Tue, 27 Mar 2007 21:15:45 +0000"}, - "Content-Type": {"image/jpeg"}, - "Content-Length": {"94328"}, - } - s3.Sign(testAuth, method, path, nil, headers) - expected := "AWS 0PN5J17HBGZHT7JJ3X82:hcicpDDvL9SsO6AkvxqmIWkmOuQ=" - c.Assert(headers["Authorization"], DeepEquals, []string{expected}) -} - -func (s *S) TestSignExampleList(c *C) { - method := "GET" - path := "/johnsmith/" - params := map[string][]string{ - "prefix": {"photos"}, - "max-keys": {"50"}, - "marker": {"puppy"}, - } - headers := map[string][]string{ - "Host": {"johnsmith.s3.amazonaws.com"}, - "Date": {"Tue, 27 Mar 2007 19:42:41 +0000"}, - "User-Agent": {"Mozilla/5.0"}, - } - s3.Sign(testAuth, method, path, params, headers) - expected := "AWS 0PN5J17HBGZHT7JJ3X82:jsRt/rhG+Vtp88HrYL706QhE4w4=" - c.Assert(headers["Authorization"], DeepEquals, []string{expected}) -} - -func (s *S) TestSignExampleListNoAuth(c *C) { - method := "GET" - path := "/johnsmith/" - params := map[string][]string{ - "prefix": {"photos"}, - "max-keys": {"50"}, - "marker": {"puppy"}, - } - headers := map[string][]string{ - "Host": {"johnsmith.s3.amazonaws.com"}, - "Date": {"Tue, 27 Mar 2007 19:42:41 +0000"}, - "User-Agent": {"Mozilla/5.0"}, - } - s3.Sign(emptyAuth, method, path, params, headers) - c.Assert(headers["Authorization"], IsNil) -} - -func (s *S) TestSignExampleFetch(c *C) { - method := "GET" - path := "/johnsmith/" - params := map[string][]string{ - "acl": {""}, - } - headers := map[string][]string{ - "Host": {"johnsmith.s3.amazonaws.com"}, - "Date": {"Tue, 27 Mar 2007 19:44:46 +0000"}, - } - s3.Sign(testAuth, method, path, params, headers) - expected := "AWS 0PN5J17HBGZHT7JJ3X82:thdUi9VAkzhkniLj96JIrOPGi0g=" - c.Assert(headers["Authorization"], DeepEquals, []string{expected}) -} - -func (s *S) TestSignExampleFetchNoAuth(c *C) { - method := "GET" - path := "/johnsmith/" - params := map[string][]string{ - "acl": {""}, - } - headers := map[string][]string{ - "Host": {"johnsmith.s3.amazonaws.com"}, - "Date": {"Tue, 27 Mar 2007 19:44:46 +0000"}, - } - s3.Sign(emptyAuth, method, path, params, headers) - c.Assert(headers["Authorization"], IsNil) -} - -func (s *S) TestSignExampleDelete(c *C) { - method := "DELETE" - path := "/johnsmith/photos/puppy.jpg" - params := map[string][]string{} - headers := map[string][]string{ - "Host": {"s3.amazonaws.com"}, - "Date": {"Tue, 27 Mar 2007 21:20:27 +0000"}, - "User-Agent": {"dotnet"}, - "x-amz-date": {"Tue, 27 Mar 2007 21:20:26 +0000"}, - } - s3.Sign(testAuth, method, path, params, headers) - expected := "AWS 0PN5J17HBGZHT7JJ3X82:k3nL7gH3+PadhTEVn5Ip83xlYzk=" - c.Assert(headers["Authorization"], DeepEquals, []string{expected}) -} - -func (s *S) TestSignExampleUpload(c *C) { - method := "PUT" - path := "/static.johnsmith.net/db-backup.dat.gz" - params := map[string][]string{} - headers := map[string][]string{ - "Host": {"static.johnsmith.net:8080"}, - "Date": {"Tue, 27 Mar 2007 21:06:08 +0000"}, - "User-Agent": {"curl/7.15.5"}, - "x-amz-acl": {"public-read"}, - "content-type": {"application/x-download"}, - "Content-MD5": {"4gJE4saaMU4BqNR0kLY+lw=="}, - "X-Amz-Meta-ReviewedBy": {"joe@johnsmith.net,jane@johnsmith.net"}, - "X-Amz-Meta-FileChecksum": {"0x02661779"}, - "X-Amz-Meta-ChecksumAlgorithm": {"crc32"}, - "Content-Disposition": {"attachment; filename=database.dat"}, - "Content-Encoding": {"gzip"}, - "Content-Length": {"5913339"}, - } - s3.Sign(testAuth, method, path, params, headers) - expected := "AWS 0PN5J17HBGZHT7JJ3X82:C0FlOtU8Ylb9KDTpZqYkZPX91iI=" - c.Assert(headers["Authorization"], DeepEquals, []string{expected}) -} - -func (s *S) TestSignExampleListAllMyBuckets(c *C) { - method := "GET" - path := "/" - headers := map[string][]string{ - "Host": {"s3.amazonaws.com"}, - "Date": {"Wed, 28 Mar 2007 01:29:59 +0000"}, - } - s3.Sign(testAuth, method, path, nil, headers) - expected := "AWS 0PN5J17HBGZHT7JJ3X82:Db+gepJSUbZKwpx1FR0DLtEYoZA=" - c.Assert(headers["Authorization"], DeepEquals, []string{expected}) -} - -func (s *S) TestSignExampleUnicodeKeys(c *C) { - method := "GET" - path := "/dictionary/fran%C3%A7ais/pr%c3%a9f%c3%a8re" - headers := map[string][]string{ - "Host": {"s3.amazonaws.com"}, - "Date": {"Wed, 28 Mar 2007 01:49:49 +0000"}, - } - s3.Sign(testAuth, method, path, nil, headers) - expected := "AWS 0PN5J17HBGZHT7JJ3X82:dxhSBHoI6eVSPcXJqEghlUzZMnY=" - c.Assert(headers["Authorization"], DeepEquals, []string{expected}) -} - -// Not included in AWS documentation - -func (s *S) TestSignWithIAMToken(c *C) { - method := "GET" - path := "/" - headers := map[string][]string{ - "Host": {"s3.amazonaws.com"}, - "Date": {"Wed, 28 Mar 2007 01:29:59 +0000"}, - } - - authWithToken := testAuth - authWithToken.Token = "totallysecret" - - s3.Sign(authWithToken, method, path, nil, headers) - expected := "AWS 0PN5J17HBGZHT7JJ3X82:SJ0yQO7NpHyXJ7zkxY+/fGQ6aUw=" - c.Assert(headers["Authorization"], DeepEquals, []string{expected}) - c.Assert(headers["x-amz-security-token"], DeepEquals, []string{authWithToken.Token}) -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/testutil/http.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/testutil/http.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/testutil/http.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/testutil/http.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,180 +0,0 @@ -package testutil - -import ( - "bytes" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "time" -) - -type HTTPServer struct { - URL string - Timeout time.Duration - started bool - request chan *http.Request - response chan ResponseFunc -} - -type Response struct { - Status int - Headers map[string]string - Body string -} - -var DefaultClient = &http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - }, -} - -func NewHTTPServer() *HTTPServer { - return &HTTPServer{URL: "http://localhost:4444", Timeout: 5 * time.Second} -} - -type ResponseFunc func(path string) Response - -func (s *HTTPServer) Start() { - if s.started { - return - } - s.started = true - s.request = make(chan *http.Request, 1024) - s.response = make(chan ResponseFunc, 1024) - u, err := url.Parse(s.URL) - if err != nil { - panic(err) - } - l, err := net.Listen("tcp", u.Host) - if err != nil { - panic(err) - } - go http.Serve(l, s) - - s.Response(203, nil, "") - for { - // Wait for it to be up. - resp, err := http.Get(s.URL) - if err == nil && resp.StatusCode == 203 { - break - } - time.Sleep(1e8) - } - s.WaitRequest() // Consume dummy request. -} - -// Flush discards all pending requests and responses. -func (s *HTTPServer) Flush() { - for { - select { - case <-s.request: - case <-s.response: - default: - return - } - } -} - -func body(req *http.Request) string { - data, err := ioutil.ReadAll(req.Body) - if err != nil { - panic(err) - } - return string(data) -} - -func (s *HTTPServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { - req.ParseMultipartForm(1e6) - data, err := ioutil.ReadAll(req.Body) - if err != nil { - panic(err) - } - req.Body = ioutil.NopCloser(bytes.NewBuffer(data)) - s.request <- req - var resp Response - select { - case respFunc := <-s.response: - resp = respFunc(req.URL.Path) - case <-time.After(s.Timeout): - const msg = "ERROR: Timeout waiting for test to prepare a response\n" - fmt.Fprintf(os.Stderr, msg) - resp = Response{500, nil, msg} - } - if resp.Headers != nil { - h := w.Header() - for k, v := range resp.Headers { - h.Set(k, v) - } - } - if resp.Status != 0 { - w.WriteHeader(resp.Status) - } - w.Write([]byte(resp.Body)) -} - -// WaitRequests returns the next n requests made to the http server from -// the queue. If not enough requests were previously made, it waits until -// the timeout value for them to be made. -func (s *HTTPServer) WaitRequests(n int) []*http.Request { - reqs := make([]*http.Request, 0, n) - for i := 0; i < n; i++ { - select { - case req := <-s.request: - reqs = append(reqs, req) - case <-time.After(s.Timeout): - panic("Timeout waiting for request") - } - } - return reqs -} - -// WaitRequest returns the next request made to the http server from -// the queue. If no requests were previously made, it waits until the -// timeout value for one to be made. -func (s *HTTPServer) WaitRequest() *http.Request { - return s.WaitRequests(1)[0] -} - -// ResponseFunc prepares the test server to respond the following n -// requests using f to build each response. -func (s *HTTPServer) ResponseFunc(n int, f ResponseFunc) { - for i := 0; i < n; i++ { - s.response <- f - } -} - -// ResponseMap maps request paths to responses. -type ResponseMap map[string]Response - -// ResponseMap prepares the test server to respond the following n -// requests using the m to obtain the responses. -func (s *HTTPServer) ResponseMap(n int, m ResponseMap) { - f := func(path string) Response { - for rpath, resp := range m { - if rpath == path { - return resp - } - } - body := "Path not found in response map: " + path - return Response{Status: 500, Body: body} - } - s.ResponseFunc(n, f) -} - -// Responses prepares the test server to respond the following n requests -// using the provided response parameters. -func (s *HTTPServer) Responses(n int, status int, headers map[string]string, body string) { - f := func(path string) Response { - return Response{status, headers, body} - } - s.ResponseFunc(n, f) -} - -// Response prepares the test server to respond the following request -// using the provided response parameters. -func (s *HTTPServer) Response(status int, headers map[string]string, body string) { - s.Responses(1, status, headers, body) -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/testutil/suite.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/testutil/suite.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/testutil/suite.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/mitchellh/goamz/testutil/suite.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,30 +0,0 @@ -package testutil - -import ( - "flag" - "github.com/mitchellh/goamz/aws" - . "github.com/motain/gocheck" -) - -// Amazon must be used by all tested packages to determine whether to -// run functional tests against the real AWS servers. -var Amazon bool - -func init() { - flag.BoolVar(&Amazon, "amazon", false, "Enable tests against amazon server") -} - -type LiveSuite struct { - auth aws.Auth -} - -func (s *LiveSuite) SetUpSuite(c *C) { - if !Amazon { - c.Skip("amazon tests not enabled (-amazon flag)") - } - auth, err := aws.EnvAuth() - if err != nil { - c.Fatal(err.Error()) - } - s.auth = auth -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/awsauth.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/awsauth.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/awsauth.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/awsauth.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,219 @@ +// Package awsauth implements AWS request signing using Signed Signature Version 2, +// Signed Signature Version 3, and Signed Signature Version 4. Supports S3 and STS. +package awsauth + +import ( + "net/http" + "net/url" + "time" +) + +// Credentials stores the information necessary to authorize with AWS and it +// is from this information that requests are signed. +type Credentials struct { + AccessKeyID string + SecretAccessKey string + SecurityToken string `json:"Token"` + Expiration time.Time +} + +// Sign signs a request bound for AWS. It automatically chooses the best +// authentication scheme based on the service the request is going to. +func Sign(request *http.Request, credentials ...Credentials) *http.Request { + service, _ := serviceAndRegion(request.URL.Host) + signVersion := awsSignVersion[service] + + switch signVersion { + case 2: + return Sign2(request, credentials...) + case 3: + return Sign3(request, credentials...) + case 4: + return Sign4(request, credentials...) + case -1: + return SignS3(request, credentials...) + } + + return nil +} + +// Sign4 signs a request with Signed Signature Version 4. +func Sign4(request *http.Request, credentials ...Credentials) *http.Request { + keys := chooseKeys(credentials) + + // Add the X-Amz-Security-Token header when using STS + if keys.SecurityToken != "" { + request.Header.Set("X-Amz-Security-Token", keys.SecurityToken) + } + + prepareRequestV4(request) + meta := new(metadata) + + // Task 1 + hashedCanonReq := hashedCanonicalRequestV4(request, meta) + + // Task 2 + stringToSign := stringToSignV4(request, hashedCanonReq, meta) + + // Task 3 + signingKey := signingKeyV4(keys.SecretAccessKey, meta.date, meta.region, meta.service) + signature := signatureV4(signingKey, stringToSign) + + request.Header.Set("Authorization", buildAuthHeaderV4(signature, meta, keys)) + + return request +} + +// Sign3 signs a request with Signed Signature Version 3. +// If the service you're accessing supports Version 4, use that instead. +func Sign3(request *http.Request, credentials ...Credentials) *http.Request { + keys := chooseKeys(credentials) + + // Add the X-Amz-Security-Token header when using STS + if keys.SecurityToken != "" { + request.Header.Set("X-Amz-Security-Token", keys.SecurityToken) + } + + prepareRequestV3(request) + + // Task 1 + stringToSign := stringToSignV3(request) + + // Task 2 + signature := signatureV3(stringToSign, keys) + + // Task 3 + request.Header.Set("X-Amzn-Authorization", buildAuthHeaderV3(signature, keys)) + + return request +} + +// Sign2 signs a request with Signed Signature Version 2. +// If the service you're accessing supports Version 4, use that instead. +func Sign2(request *http.Request, credentials ...Credentials) *http.Request { + keys := chooseKeys(credentials) + + // Add the SecurityToken parameter when using STS + // This must be added before the signature is calculated + if keys.SecurityToken != "" { + values := url.Values{} + values.Set("SecurityToken", keys.SecurityToken) + augmentRequestQuery(request, values) + } + + prepareRequestV2(request, keys) + + stringToSign := stringToSignV2(request) + signature := signatureV2(stringToSign, keys) + + values := url.Values{} + values.Set("Signature", signature) + + augmentRequestQuery(request, values) + + return request +} + +// SignS3 signs a request bound for Amazon S3 using their custom +// HTTP authentication scheme. +func SignS3(request *http.Request, credentials ...Credentials) *http.Request { + keys := chooseKeys(credentials) + + // Add the X-Amz-Security-Token header when using STS + if keys.SecurityToken != "" { + request.Header.Set("X-Amz-Security-Token", keys.SecurityToken) + } + + prepareRequestS3(request) + + stringToSign := stringToSignS3(request) + signature := signatureS3(stringToSign, keys) + + authHeader := "AWS " + keys.AccessKeyID + ":" + signature + request.Header.Set("Authorization", authHeader) + + return request +} + +// SignS3Url signs a GET request for a resource on Amazon S3 by appending +// query string parameters containing credentials and signature. You must +// specify an expiration date for these signed requests. After that date, +// a request signed with this method will be rejected by S3. +func SignS3Url(request *http.Request, expire time.Time, credentials ...Credentials) *http.Request { + keys := chooseKeys(credentials) + + stringToSign := stringToSignS3Url("GET", expire, request.URL.Path) + signature := signatureS3(stringToSign, keys) + + query := request.URL.Query() + query.Set("AWSAccessKeyId", keys.AccessKeyID) + query.Set("Signature", signature) + query.Set("Expires", timeToUnixEpochString(expire)) + request.URL.RawQuery = query.Encode() + + return request +} + +// expired checks to see if the temporary credentials from an IAM role are +// within 4 minutes of expiration (The IAM documentation says that new keys +// will be provisioned 5 minutes before the old keys expire). Credentials +// that do not have an Expiration cannot expire. +func (this *Credentials) expired() bool { + if this.Expiration.IsZero() { + // Credentials with no expiration can't expire + return false + } + expireTime := this.Expiration.Add(-4 * time.Minute) + // if t - 4 mins is before now, true + if expireTime.Before(time.Now()) { + return true + } else { + return false + } +} + +type metadata struct { + algorithm string + credentialScope string + signedHeaders string + date string + region string + service string +} + +const ( + envAccessKey = "AWS_ACCESS_KEY" + envAccessKeyID = "AWS_ACCESS_KEY_ID" + envSecretKey = "AWS_SECRET_KEY" + envSecretAccessKey = "AWS_SECRET_ACCESS_KEY" + envSecurityToken = "AWS_SECURITY_TOKEN" +) + +var ( + awsSignVersion = map[string]int{ + "autoscaling": 4, + "cloudfront": 4, + "cloudformation": 4, + "cloudsearch": 4, + "monitoring": 4, + "dynamodb": 4, + "ec2": 2, + "elasticmapreduce": 4, + "elastictranscoder": 4, + "elasticache": 2, + "glacier": 4, + "kinesis": 4, + "redshift": 4, + "rds": 4, + "sdb": 2, + "sns": 4, + "sqs": 4, + "s3": 4, + "elasticbeanstalk": 4, + "importexport": 2, + "iam": 4, + "route53": 3, + "elasticloadbalancing": 4, + "email": 3, + } +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/awsauth_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/awsauth_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/awsauth_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/awsauth_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,289 @@ +package awsauth + +import ( + "io/ioutil" + "net/http" + "net/url" + "os" + "strings" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping long-running integration test.") + } + + Convey("Given real credentials from environment variables", t, func() { + Convey("A request (with out-of-order query string) with to IAM should succeed (assuming Administrator Access policy)", func() { + request := newRequest("GET", "https://iam.amazonaws.com/?Version=2010-05-08&Action=ListRoles", nil) + + if !credentialsSet() { + SkipSo(http.StatusOK, ShouldEqual, http.StatusOK) + } else { + response := sign4AndDo(request) + if response.StatusCode != http.StatusOK { + message, _ := ioutil.ReadAll(response.Body) + t.Error(string(message)) + } + So(response.StatusCode, ShouldEqual, http.StatusOK) + } + }) + + Convey("A request to S3 should succeed", func() { + request, _ := http.NewRequest("GET", "https://s3.amazonaws.com", nil) + + if !credentialsSet() { + SkipSo(http.StatusOK, ShouldEqual, http.StatusOK) + } else { + response := sign4AndDo(request) + if response.StatusCode != http.StatusOK { + message, _ := ioutil.ReadAll(response.Body) + t.Error(string(message)) + } + So(response.StatusCode, ShouldEqual, http.StatusOK) + } + }) + + Convey("A request to EC2 should succeed", func() { + request := newRequest("GET", "https://ec2.amazonaws.com/?Version=2013-10-15&Action=DescribeInstances", nil) + + if !credentialsSet() { + SkipSo(http.StatusOK, ShouldEqual, http.StatusOK) + } else { + response := sign2AndDo(request) + if response.StatusCode != http.StatusOK { + message, _ := ioutil.ReadAll(response.Body) + t.Error(string(message)) + } + So(response.StatusCode, ShouldEqual, http.StatusOK) + } + }) + + Convey("A request to SQS should succeed", func() { + request := newRequest("POST", "https://sqs.us-west-2.amazonaws.com", url.Values{ + "Action": []string{"ListQueues"}, + }) + + if !credentialsSet() { + SkipSo(http.StatusOK, ShouldEqual, http.StatusOK) + } else { + response := sign4AndDo(request) + if response.StatusCode != http.StatusOK { + message, _ := ioutil.ReadAll(response.Body) + t.Error(string(message)) + } + So(response.StatusCode, ShouldEqual, http.StatusOK) + } + }) + + Convey("A request to SES should succeed", func() { + request := newRequest("GET", "https://email.us-east-1.amazonaws.com/?Action=GetSendStatistics", nil) + + if !credentialsSet() { + SkipSo(http.StatusOK, ShouldEqual, http.StatusOK) + } else { + response := sign3AndDo(request) + if response.StatusCode != http.StatusOK { + message, _ := ioutil.ReadAll(response.Body) + t.Error(string(message)) + } + So(response.StatusCode, ShouldEqual, http.StatusOK) + } + }) + + Convey("A request to Route 53 should succeed", func() { + request := newRequest("GET", "https://route53.amazonaws.com/2013-04-01/hostedzone?maxitems=1", nil) + + if !credentialsSet() { + SkipSo(http.StatusOK, ShouldEqual, http.StatusOK) + } else { + response := sign3AndDo(request) + if response.StatusCode != http.StatusOK { + message, _ := ioutil.ReadAll(response.Body) + t.Error(string(message)) + } + So(response.StatusCode, ShouldEqual, http.StatusOK) + } + }) + + Convey("A request to SimpleDB should succeed", func() { + request := newRequest("GET", "https://sdb.amazonaws.com/?Action=ListDomains&Version=2009-04-15", nil) + + if !credentialsSet() { + SkipSo(http.StatusOK, ShouldEqual, http.StatusOK) + } else { + response := sign2AndDo(request) + if response.StatusCode != http.StatusOK { + message, _ := ioutil.ReadAll(response.Body) + t.Error(string(message)) + } + So(response.StatusCode, ShouldEqual, http.StatusOK) + } + }) + + Convey("If S3Resource env variable is set", func() { + s3res := os.Getenv("S3Resource") + + Convey("A URL-signed request to that S3 resource should succeed", func() { + request, _ := http.NewRequest("GET", s3res, nil) + + if !credentialsSet() || s3res == "" { + SkipSo(http.StatusOK, ShouldEqual, http.StatusOK) + } else { + response := signS3UrlAndDo(request) + if response.StatusCode != http.StatusOK { + message, _ := ioutil.ReadAll(response.Body) + t.Error(string(message)) + } + So(response.StatusCode, ShouldEqual, http.StatusOK) + } + }) + }) + }) +} + +func TestSign(t *testing.T) { + Convey("Requests to services using Version 2 should be signed accordingly", t, func() { + reqs := []*http.Request{ + newRequest("GET", "https://ec2.amazonaws.com", url.Values{}), + newRequest("GET", "https://elasticache.amazonaws.com/", url.Values{}), + } + for _, request := range reqs { + signedReq := Sign(request) + So(signedReq.URL.Query().Get("SignatureVersion"), ShouldEqual, "2") + } + }) + + Convey("Requests to services using Version 3 should be signed accordingly", t, func() { + reqs := []*http.Request{ + newRequest("GET", "https://route53.amazonaws.com", url.Values{}), + newRequest("GET", "https://email.us-east-1.amazonaws.com/", url.Values{}), + } + for _, request := range reqs { + signedReq := Sign(request) + So(signedReq.Header.Get("X-Amzn-Authorization"), ShouldNotBeBlank) + } + }) + + Convey("Requests to services using Version 4 should be signed accordingly", t, func() { + reqs := []*http.Request{ + newRequest("POST", "https://sqs.amazonaws.com/", url.Values{}), + newRequest("GET", "https://iam.amazonaws.com", url.Values{}), + newRequest("GET", "https://s3.amazonaws.com", url.Values{}), + } + for _, request := range reqs { + signedReq := Sign(request) + So(signedReq.Header.Get("Authorization"), ShouldContainSubstring, ", Signature=") + } + }) + + var keys Credentials + keys = newKeys() + Convey("Requests to services using existing credentials Version 2 should be signed accordingly", t, func() { + reqs := []*http.Request{ + newRequest("GET", "https://ec2.amazonaws.com", url.Values{}), + newRequest("GET", "https://elasticache.amazonaws.com/", url.Values{}), + } + for _, request := range reqs { + signedReq := Sign(request, keys) + So(signedReq.URL.Query().Get("SignatureVersion"), ShouldEqual, "2") + } + }) + + Convey("Requests to services using existing credentials Version 3 should be signed accordingly", t, func() { + reqs := []*http.Request{ + newRequest("GET", "https://route53.amazonaws.com", url.Values{}), + newRequest("GET", "https://email.us-east-1.amazonaws.com/", url.Values{}), + } + for _, request := range reqs { + signedReq := Sign(request, keys) + So(signedReq.Header.Get("X-Amzn-Authorization"), ShouldNotBeBlank) + } + }) + + Convey("Requests to services using existing credentials Version 4 should be signed accordingly", t, func() { + reqs := []*http.Request{ + newRequest("POST", "https://sqs.amazonaws.com/", url.Values{}), + newRequest("GET", "https://iam.amazonaws.com", url.Values{}), + newRequest("GET", "https://s3.amazonaws.com", url.Values{}), + } + for _, request := range reqs { + signedReq := Sign(request, keys) + So(signedReq.Header.Get("Authorization"), ShouldContainSubstring, ", Signature=") + } + }) +} + +func TestExpiration(t *testing.T) { + var credentials = &Credentials{} + + Convey("Credentials without an expiration can't expire", t, func() { + So(credentials.expired(), ShouldBeFalse) + }) + + Convey("Credentials that expire in 5 minutes aren't expired", t, func() { + credentials.Expiration = time.Now().Add(5 * time.Minute) + So(credentials.expired(), ShouldBeFalse) + }) + + Convey("Credentials that expire in 1 minute are expired", t, func() { + credentials.Expiration = time.Now().Add(1 * time.Minute) + So(credentials.expired(), ShouldBeTrue) + }) + + Convey("Credentials that expired 2 hours ago are expired", t, func() { + credentials.Expiration = time.Now().Add(-2 * time.Hour) + So(credentials.expired(), ShouldBeTrue) + }) +} + +func credentialsSet() bool { + var keys Credentials + keys = newKeys() + if keys.AccessKeyID == "" { + return false + } else { + return true + } +} + +func newRequest(method string, url string, v url.Values) *http.Request { + request, _ := http.NewRequest(method, url, strings.NewReader(v.Encode())) + return request +} + +func sign2AndDo(request *http.Request) *http.Response { + Sign2(request) + response, _ := client.Do(request) + return response +} + +func sign3AndDo(request *http.Request) *http.Response { + Sign3(request) + response, _ := client.Do(request) + return response +} + +func sign4AndDo(request *http.Request) *http.Response { + Sign4(request) + response, _ := client.Do(request) + return response +} + +func signS3AndDo(request *http.Request) *http.Response { + SignS3(request) + response, _ := client.Do(request) + return response +} + +func signS3UrlAndDo(request *http.Request) *http.Response { + SignS3Url(request, time.Now().AddDate(0, 0, 1)) + response, _ := client.Do(request) + return response +} + +var client = &http.Client{} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/common.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/common.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/common.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/common.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,311 @@ +package awsauth + +import ( + "bufio" + "bytes" + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "strings" + "time" +) + +type location struct { + ec2 bool + checked bool +} + +var loc *location + +// serviceAndRegion parsers a hostname to find out which ones it is. +// http://docs.aws.amazon.com/general/latest/gr/rande.html +func serviceAndRegion(host string) (service string, region string) { + // These are the defaults if the hostname doesn't suggest something else + region = "us-east-1" + service = "s3" + + parts := strings.Split(host, ".") + if len(parts) == 4 { + // Either service.region.amazonaws.com or virtual-host.region.amazonaws.com + if parts[1] == "s3" { + service = "s3" + } else if strings.HasPrefix(parts[1], "s3-") { + region = parts[1][3:] + service = "s3" + } else { + service = parts[0] + region = parts[1] + } + } else if len(parts) == 5 { + service = parts[2] + region = parts[1] + } else { + // Either service.amazonaws.com or s3-region.amazonaws.com + if strings.HasPrefix(parts[0], "s3-") { + region = parts[0][3:] + } else { + service = parts[0] + } + } + + if region == "external-1" { + region = "us-east-1" + } + + return +} + +// newKeys produces a set of credentials based on the environment +func newKeys() (newCredentials Credentials) { + // First use credentials from environment variables + newCredentials.AccessKeyID = os.Getenv(envAccessKeyID) + if newCredentials.AccessKeyID == "" { + newCredentials.AccessKeyID = os.Getenv(envAccessKey) + } + + newCredentials.SecretAccessKey = os.Getenv(envSecretAccessKey) + if newCredentials.SecretAccessKey == "" { + newCredentials.SecretAccessKey = os.Getenv(envSecretKey) + } + + newCredentials.SecurityToken = os.Getenv(envSecurityToken) + + // If there is no Access Key and you are on EC2, get the key from the role + if (newCredentials.AccessKeyID == "" || newCredentials.SecretAccessKey == "") && onEC2() { + newCredentials = *getIAMRoleCredentials() + } + + // If the key is expiring, get a new key + if newCredentials.expired() && onEC2() { + newCredentials = *getIAMRoleCredentials() + } + + return newCredentials +} + +// checkKeys gets credentials depending on if any were passed in as an argument +// or it makes new ones based on the environment. +func chooseKeys(cred []Credentials) Credentials { + if len(cred) == 0 { + return newKeys() + } else { + return cred[0] + } +} + +// onEC2 checks to see if the program is running on an EC2 instance. +// It does this by looking for the EC2 metadata service. +// This caches that information in a struct so that it doesn't waste time. +func onEC2() bool { + if loc == nil { + loc = &location{} + } + if !(loc.checked) { + c, err := net.DialTimeout("tcp", "169.254.169.254:80", time.Millisecond*100) + + if err != nil { + loc.ec2 = false + } else { + c.Close() + loc.ec2 = true + } + loc.checked = true + } + + return loc.ec2 +} + +// getIAMRoleList gets a list of the roles that are available to this instance +func getIAMRoleList() []string { + + var roles []string + url := "http://169.254.169.254/latest/meta-data/iam/security-credentials/" + + client := &http.Client{} + + request, err := http.NewRequest("GET", url, nil) + + if err != nil { + return roles + } + + response, err := client.Do(request) + + if err != nil { + return roles + } + defer response.Body.Close() + + scanner := bufio.NewScanner(response.Body) + for scanner.Scan() { + roles = append(roles, scanner.Text()) + } + return roles +} + +func getIAMRoleCredentials() *Credentials { + + roles := getIAMRoleList() + + if len(roles) < 1 { + return &Credentials{} + } + + // Use the first role in the list + role := roles[0] + + url := "http://169.254.169.254/latest/meta-data/iam/security-credentials/" + + // Create the full URL of the role + var buffer bytes.Buffer + buffer.WriteString(url) + buffer.WriteString(role) + roleURL := buffer.String() + + // Get the role + roleRequest, err := http.NewRequest("GET", roleURL, nil) + + if err != nil { + return &Credentials{} + } + + client := &http.Client{} + roleResponse, err := client.Do(roleRequest) + + if err != nil { + return &Credentials{} + } + defer roleResponse.Body.Close() + + roleBuffer := new(bytes.Buffer) + roleBuffer.ReadFrom(roleResponse.Body) + + credentials := Credentials{} + + err = json.Unmarshal(roleBuffer.Bytes(), &credentials) + + if err != nil { + return &Credentials{} + } + + return &credentials + +} + +func augmentRequestQuery(request *http.Request, values url.Values) *http.Request { + for key, array := range request.URL.Query() { + for _, value := range array { + values.Set(key, value) + } + } + + request.URL.RawQuery = values.Encode() + + return request +} + +func hmacSHA256(key []byte, content string) []byte { + mac := hmac.New(sha256.New, key) + mac.Write([]byte(content)) + return mac.Sum(nil) +} + +func hmacSHA1(key []byte, content string) []byte { + mac := hmac.New(sha1.New, key) + mac.Write([]byte(content)) + return mac.Sum(nil) +} + +func hashSHA256(content []byte) string { + h := sha256.New() + h.Write(content) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +func hashMD5(content []byte) string { + h := md5.New() + h.Write(content) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func readAndReplaceBody(request *http.Request) []byte { + if request.Body == nil { + return []byte{} + } + payload, _ := ioutil.ReadAll(request.Body) + request.Body = ioutil.NopCloser(bytes.NewReader(payload)) + return payload +} + +func concat(delim string, str ...string) string { + return strings.Join(str, delim) +} + +var now = func() time.Time { + return time.Now().UTC() +} + +func normuri(uri string) string { + parts := strings.Split(uri, "/") + for i := range parts { + parts[i] = encodePathFrag(parts[i]) + } + return strings.Join(parts, "/") +} + +func encodePathFrag(s string) string { + hexCount := 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEscape(c) { + hexCount++ + } + } + t := make([]byte, len(s)+2*hexCount) + j := 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEscape(c) { + t[j] = '%' + t[j+1] = "0123456789ABCDEF"[c>>4] + t[j+2] = "0123456789ABCDEF"[c&15] + j += 3 + } else { + t[j] = c + j++ + } + } + return string(t) +} + +func shouldEscape(c byte) bool { + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' { + return false + } + if '0' <= c && c <= '9' { + return false + } + if c == '-' || c == '_' || c == '.' || c == '~' { + return false + } + return true +} + +func normquery(v url.Values) string { + queryString := v.Encode() + + // Go encodes a space as '+' but Amazon requires '%20'. Luckily any '+' in the + // original query string has been percent escaped so all '+' chars that are left + // were originally spaces. + + return strings.Replace(queryString, "+", "%20", -1) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/common_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/common_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/common_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/common_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,89 @@ +package awsauth + +import ( + "net/url" + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestCommonFunctions(t *testing.T) { + Convey("Service and region should be properly extracted from host strings", t, func() { + service, region := serviceAndRegion("sqs.us-west-2.amazonaws.com") + So(service, ShouldEqual, "sqs") + So(region, ShouldEqual, "us-west-2") + + service, region = serviceAndRegion("iam.amazonaws.com") + So(service, ShouldEqual, "iam") + So(region, ShouldEqual, "us-east-1") + + service, region = serviceAndRegion("sns.us-west-2.amazonaws.com") + So(service, ShouldEqual, "sns") + So(region, ShouldEqual, "us-west-2") + + service, region = serviceAndRegion("bucketname.s3.amazonaws.com") + So(service, ShouldEqual, "s3") + So(region, ShouldEqual, "us-east-1") + + service, region = serviceAndRegion("s3.amazonaws.com") + So(service, ShouldEqual, "s3") + So(region, ShouldEqual, "us-east-1") + + service, region = serviceAndRegion("s3-us-west-1.amazonaws.com") + So(service, ShouldEqual, "s3") + So(region, ShouldEqual, "us-west-1") + + service, region = serviceAndRegion("s3-external-1.amazonaws.com") + So(service, ShouldEqual, "s3") + So(region, ShouldEqual, "us-east-1") + }) + + Convey("MD5 hashes should be properly computed and base-64 encoded", t, func() { + input := []byte("Pretend this is a REALLY long byte array...") + actual := hashMD5(input) + + So(actual, ShouldEqual, "KbVTY8Vl6VccnzQf1AGOFw==") + }) + + Convey("SHA-256 hashes should be properly hex-encoded (base 16)", t, func() { + input := []byte("This is... Sparta!!") + actual := hashSHA256(input) + + So(actual, ShouldEqual, "5c81a4ef1172e89b1a9d575f4cd82f4ed20ea9137e61aa7f1ab936291d24e79a") + }) + + Convey("Given a key and contents", t, func() { + key := []byte("asdf1234") + contents := "SmartyStreets was here" + + Convey("HMAC-SHA256 should be properly computed", func() { + expected := []byte{65, 46, 186, 78, 2, 155, 71, 104, 49, 37, 5, 66, 195, 129, 159, 227, 239, 53, 240, 107, 83, 21, 235, 198, 238, 216, 108, 149, 143, 222, 144, 94} + actual := hmacSHA256(key, contents) + + So(actual, ShouldResemble, expected) + }) + + Convey("HMAC-SHA1 should be properly computed", func() { + expected := []byte{164, 77, 252, 0, 87, 109, 207, 110, 163, 75, 228, 122, 83, 255, 233, 237, 125, 206, 85, 70} + actual := hmacSHA1(key, contents) + + So(actual, ShouldResemble, expected) + }) + }) + + Convey("Strings should be properly concatenated with a delimiter", t, func() { + So(concat("\n", "Test1", "Test2"), ShouldEqual, "Test1\nTest2") + So(concat(".", "Test1"), ShouldEqual, "Test1") + So(concat("\t", "1", "2", "3", "4"), ShouldEqual, "1\t2\t3\t4") + }) + + Convey("URI components should be properly encoded", t, func() { + So(normuri("/-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"), ShouldEqual, "/-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz") + So(normuri("/ /foo"), ShouldEqual, "/%20/foo") + So(normuri("/(foo)"), ShouldEqual, "/%28foo%29") + }) + + Convey("URI query strings should be properly encoded", t, func() { + So(normquery(url.Values{"p": []string{" +&;-=._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"}}), ShouldEqual, "p=%20%2B%26%3B-%3D._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz") + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/LICENSE aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/LICENSE --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/LICENSE 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 SmartyStreets + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/README.md aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/README.md --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/README.md 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/README.md 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,86 @@ +go-aws-auth +=========== + +[![GoDoc](https://godoc.org/github.com/smartystreets/go-aws-auth?status.svg)](http://godoc.org/github.com/smartystreets/go-aws-auth) + +Go-AWS-Auth is a comprehensive, lightweight library for signing requests to Amazon Web Services. + +It's easy to use: simply build your HTTP request and call `awsauth.Sign(req)` before sending your request over the wire. + + + +### Supported signing mechanisms + +- [Signed Signature Version 2](http://docs.aws.amazon.com/general/latest/gr/signature-version-2.html) +- [Signed Signature Version 3](http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +- [Signed Signature Version 4](http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html) +- [Custom S3 Authentication Scheme](http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) +- [Security Token Service](http://docs.aws.amazon.com/STS/latest/APIReference/Welcome.html) +- [S3 Query String Authentication](http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationQueryStringAuth) +- [IAM Role](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#instance-metadata-security-credentials) + +For more info about AWS authentication, see the [comprehensive docs](http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) at AWS. + + +### Install + +Go get it: + + $ go get github.com/smartystreets/go-aws-auth + +Then import it: + + import "github.com/smartystreets/go-aws-auth" + + +### Using your AWS Credentials + +The library looks for credentials in this order: + +1. **Hard-code:** You can manually pass in an instance of `awsauth.Credentials` to any call to a signing function as a second argument: + + ```go + awsauth.Sign(req, awsauth.Credentials{ + AccessKeyID: "Access Key ID", + SecretAccessKey: "Secret Access Key", + SecurityToken: "Security Token", // STS (optional) + }) + ``` + + +2. **Environment variables:** Set the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables with your credentials. The library will automatically detect and use them. Optionally, you may also set the `AWS_SECURITY_TOKEN` environment variable if you are using temporary credentials from [STS](http://docs.aws.amazon.com/STS/latest/APIReference/Welcome.html). + +3. **IAM Role:** If running on EC2 and the credentials are neither hard-coded nor in the environment, go-aws-auth will detect the first IAM role assigned to the current EC2 instance and use those credentials. + +(Be especially careful hard-coding credentials into your application if the code is committed to source control.) + + + +### Signing requests + +Just make the request, have it signed, and perform the request as you normally would. + +```go +url := "https://iam.amazonaws.com/?Action=ListRoles&Version=2010-05-08" +client := new(http.Client) + +req, err := http.NewRequest("GET", url, nil) + +awsauth.Sign(req) // Automatically chooses the best signing mechanism for the service + +resp, err := client.Do(req) +``` + +You can use `Sign` to have the library choose the best signing algorithm depending on the service, or you can specify it manually if you know what you need: + +- `Sign2` +- `Sign3` +- `Sign4` +- `SignS3` (deprecated for Sign4) +- `SignS3Url` (for pre-signed S3 URLs; GETs only) + + + +### Contributing + +Please feel free to contribute! Bug fixes are more than welcome any time, as long as tests assert correct behavior. If you'd like to change an existing implementation or see a new feature, open an issue first so we can discuss it. Thanks to all contributors! diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/s3.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/s3.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/s3.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/s3.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,126 @@ +package awsauth + +import ( + "encoding/base64" + "net/http" + "sort" + "strconv" + "strings" + "time" +) + +func signatureS3(stringToSign string, keys Credentials) string { + hashed := hmacSHA1([]byte(keys.SecretAccessKey), stringToSign) + return base64.StdEncoding.EncodeToString(hashed) +} + +func stringToSignS3(request *http.Request) string { + str := request.Method + "\n" + + if request.Header.Get("Content-Md5") != "" { + str += request.Header.Get("Content-Md5") + } + str += "\n" + + str += request.Header.Get("Content-Type") + "\n" + + if request.Header.Get("Date") != "" { + str += request.Header.Get("Date") + } else { + str += timestampS3() + } + + str += "\n" + + canonicalHeaders := canonicalAmzHeadersS3(request) + if canonicalHeaders != "" { + str += canonicalHeaders + } + + str += canonicalResourceS3(request) + + return str +} + +func stringToSignS3Url(method string, expire time.Time, path string) string { + return method + "\n\n\n" + timeToUnixEpochString(expire) + "\n" + path +} + +func timeToUnixEpochString(t time.Time) string { + return strconv.FormatInt(t.Unix(), 10) +} + +func canonicalAmzHeadersS3(request *http.Request) string { + var headers []string + + for header := range request.Header { + standardized := strings.ToLower(strings.TrimSpace(header)) + if strings.HasPrefix(standardized, "x-amz") { + headers = append(headers, standardized) + } + } + + sort.Strings(headers) + + for i, header := range headers { + headers[i] = header + ":" + strings.Replace(request.Header.Get(header), "\n", " ", -1) + } + + if len(headers) > 0 { + return strings.Join(headers, "\n") + "\n" + } else { + return "" + } +} + +func canonicalResourceS3(request *http.Request) string { + res := "" + + if isS3VirtualHostedStyle(request) { + bucketname := strings.Split(request.Host, ".")[0] + res += "/" + bucketname + } + + uri := request.URL.Opaque + if uri != "" { + uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/") + } else { + uri = request.URL.Path + } + if uri == "" { + uri = "/" + } + + res += uri + + for _, subres := range strings.Split(subresourcesS3, ",") { + if strings.HasPrefix(request.URL.RawQuery, subres) { + res += "?" + subres + } + } + + return res +} + +func prepareRequestS3(request *http.Request) *http.Request { + request.Header.Set("Date", timestampS3()) + if request.URL.Path == "" { + request.URL.Path += "/" + } + return request +} + +// Info: http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html +func isS3VirtualHostedStyle(request *http.Request) bool { + service, _ := serviceAndRegion(request.Host) + return service == "s3" && strings.Count(request.Host, ".") == 3 +} + +func timestampS3() string { + return now().Format(timeFormatS3) +} + +const ( + timeFormatS3 = time.RFC1123Z + subresourcesS3 = "acl,lifecycle,location,logging,notification,partNumber,policy,requestPayment,torrent,uploadId,uploads,versionId,versioning,versions,website" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/s3_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/s3_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/s3_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/s3_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,153 @@ +package awsauth + +import ( + "fmt" + "net/http" + "net/url" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestSignatureS3(t *testing.T) { + // http://docs.aws.amazon.com/AmazonS3/2006-03-01/dev/RESTAuthentication.html + // Note: S3 now supports signed signature version 4 + // (but signed URL requests still utilize a lot of the same functionality) + + Convey("Given a GET request to Amazon S3", t, func() { + keys := *testCredS3 + request := test_plainRequestS3() + + // Mock time + now = func() time.Time { + parsed, _ := time.Parse(timeFormatS3, exampleReqTsS3) + return parsed + } + + Convey("The request should be prepared with a Date header", func() { + prepareRequestS3(request) + So(request.Header.Get("Date"), ShouldEqual, exampleReqTsS3) + }) + + Convey("The CanonicalizedAmzHeaders should be built properly", func() { + req2 := test_headerRequestS3() + actual := canonicalAmzHeadersS3(req2) + So(actual, ShouldEqual, expectedCanonAmzHeadersS3) + }) + + Convey("The CanonicalizedResource should be built properly", func() { + actual := canonicalResourceS3(request) + So(actual, ShouldEqual, expectedCanonResourceS3) + }) + + Convey("The string to sign should be correct", func() { + actual := stringToSignS3(request) + So(actual, ShouldEqual, expectedStringToSignS3) + }) + + Convey("The final signature string should be exactly correct", func() { + actual := signatureS3(stringToSignS3(request), keys) + So(actual, ShouldEqual, "bWq2s1WEIj+Ydj0vQ697zp+IXMU=") + }) + }) + + Convey("Given a GET request for a resource on S3 for query string authentication", t, func() { + keys := *testCredS3 + request, _ := http.NewRequest("GET", "https://johnsmith.s3.amazonaws.com/johnsmith/photos/puppy.jpg", nil) + + now = func() time.Time { + parsed, _ := time.Parse(timeFormatS3, exampleReqTsS3) + return parsed + } + + Convey("The string to sign should be correct", func() { + actual := stringToSignS3Url("GET", now(), request.URL.Path) + So(actual, ShouldEqual, expectedStringToSignS3Url) + }) + + Convey("The signature of string to sign should be correct", func() { + actual := signatureS3(expectedStringToSignS3Url, keys) + So(actual, ShouldEqual, "R2K/+9bbnBIbVDCs7dqlz3XFtBQ=") + }) + + Convey("The finished signed URL should be correct", func() { + expiry := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) + So(SignS3Url(request, expiry, keys).URL.String(), ShouldEqual, expectedSignedS3Url) + }) + }) +} + +func TestS3STSRequestPreparer(t *testing.T) { + Convey("Given a plain request with no custom headers", t, func() { + request := test_plainRequestS3() + + Convey("And a set of credentials with an STS token", func() { + keys := *testCredS3WithSTS + + Convey("It should include an X-Amz-Security-Token when the request is signed", func() { + actualSigned := SignS3(request, keys) + actual := actualSigned.Header.Get("X-Amz-Security-Token") + + So(actual, ShouldNotBeBlank) + So(actual, ShouldEqual, testCredS3WithSTS.SecurityToken) + + }) + }) + }) +} + +func test_plainRequestS3() *http.Request { + request, _ := http.NewRequest("GET", "https://johnsmith.s3.amazonaws.com/photos/puppy.jpg", nil) + return request +} + +func test_headerRequestS3() *http.Request { + request := test_plainRequestS3() + request.Header.Set("X-Amz-Meta-Something", "more foobar") + request.Header.Set("X-Amz-Date", "foobar") + request.Header.Set("X-Foobar", "nanoo-nanoo") + return request +} + +func TestCanonical(t *testing.T) { + expectedCanonicalString := "PUT\nc8fdb181845a4ca6b8fec737b3581d76\ntext/html\nThu, 17 Nov 2005 18:49:58 GMT\nx-amz-magic:abracadabra\nx-amz-meta-author:foo@bar.com\n/quotes/nelson" + + origUrl := "https://s3.amazonaws.com/" + resource := "/quotes/nelson" + + u, _ := url.ParseRequestURI(origUrl) + u.Path = resource + urlStr := fmt.Sprintf("%v", u) + + request, _ := http.NewRequest("PUT", urlStr, nil) + request.Header.Add("Content-Md5", "c8fdb181845a4ca6b8fec737b3581d76") + request.Header.Add("Content-Type", "text/html") + request.Header.Add("Date", "Thu, 17 Nov 2005 18:49:58 GMT") + request.Header.Add("X-Amz-Meta-Author", "foo@bar.com") + request.Header.Add("X-Amz-Magic", "abracadabra") + + if stringToSignS3(request) != expectedCanonicalString { + t.Errorf("----Got\n***%s***\n----Expected\n***%s***", stringToSignS3(request), expectedCanonicalString) + } +} + +var ( + testCredS3 = &Credentials{ + AccessKeyID: "AKIAIOSFODNN7EXAMPLE", + SecretAccessKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + } + + testCredS3WithSTS = &Credentials{ + AccessKeyID: "AKIDEXAMPLE", + SecretAccessKey: "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY", + SecurityToken: "AQoDYXdzEHcaoAJ1Aqwx1Sum0iW2NQjXJcWlKR7vuB6lnAeGBaQnjDRZPVyniwc48ml5hx+0qiXenVJdfusMMl9XLhSncfhx9Rb1UF8IAOaQ+CkpWXvoH67YYN+93dgckSVgVEBRByTl/BvLOZhe0ii/pOWkuQtBm5T7lBHRe4Dfmxy9X6hd8L3FrWxgnGV3fWZ3j0gASdYXaa+VBJlU0E2/GmCzn3T+t2mjYaeoInAnYVKVpmVMOrh6lNAeETTOHElLopblSa7TAmROq5xHIyu4a9i2qwjERTwa3Yk4Jk6q7JYVA5Cu7kS8wKVml8LdzzCTsy+elJgvH+Jf6ivpaHt/En0AJ5PZUJDev2+Y5+9j4AYfrmXfm4L73DC1ZJFJrv+Yh+EXAMPLE=", + } + + expectedCanonAmzHeadersS3 = "x-amz-date:foobar\nx-amz-meta-something:more foobar\n" + expectedCanonResourceS3 = "/johnsmith/photos/puppy.jpg" + expectedStringToSignS3 = "GET\n\n\nTue, 27 Mar 2007 19:36:42 +0000\n/johnsmith/photos/puppy.jpg" + expectedStringToSignS3Url = "GET\n\n\n1175024202\n/johnsmith/photos/puppy.jpg" + expectedSignedS3Url = "https://johnsmith.s3.amazonaws.com/johnsmith/photos/puppy.jpg?AWSAccessKeyId=AKIAIOSFODNN7EXAMPLE&Expires=1257894000&Signature=X%2FarTLAJP08uP1Bsap52rwmsVok%3D" + exampleReqTsS3 = "Tue, 27 Mar 2007 19:36:42 +0000" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/sign2.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/sign2.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/sign2.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/sign2.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,50 @@ +package awsauth + +import ( + "encoding/base64" + "net/http" + "net/url" + "strings" +) + +func prepareRequestV2(request *http.Request, keys Credentials) *http.Request { + + keyID := keys.AccessKeyID + + values := url.Values{} + values.Set("AWSAccessKeyId", keyID) + values.Set("SignatureVersion", "2") + values.Set("SignatureMethod", "HmacSHA256") + values.Set("Timestamp", timestampV2()) + + augmentRequestQuery(request, values) + + if request.URL.Path == "" { + request.URL.Path += "/" + } + + return request +} + +func stringToSignV2(request *http.Request) string { + str := request.Method + "\n" + str += strings.ToLower(request.URL.Host) + "\n" + str += request.URL.Path + "\n" + str += canonicalQueryStringV2(request) + return str +} + +func signatureV2(strToSign string, keys Credentials) string { + hashed := hmacSHA256([]byte(keys.SecretAccessKey), strToSign) + return base64.StdEncoding.EncodeToString(hashed) +} + +func canonicalQueryStringV2(request *http.Request) string { + return request.URL.RawQuery +} + +func timestampV2() string { + return now().Format(timeFormatV2) +} + +const timeFormatV2 = "2006-01-02T15:04:05" diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/sign2_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/sign2_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/sign2_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/sign2_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,126 @@ +package awsauth + +import ( + "net/http" + "net/url" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestSignature2(t *testing.T) { + // http://docs.aws.amazon.com/general/latest/gr/signature-version-2.html + + Convey("Given bogus credentials", t, func() { + keys := *testCredV2 + + // Mock time + now = func() time.Time { + parsed, _ := time.Parse(timeFormatV2, exampleReqTsV2) + return parsed + } + + Convey("Given a plain request that is unprepared", func() { + request := test_plainRequestV2() + + Convey("The request should be prepared to be signed", func() { + expectedUnsigned := test_unsignedRequestV2() + prepareRequestV2(request, keys) + So(request, ShouldResemble, expectedUnsigned) + }) + }) + + Convey("Given a prepared, but unsigned, request", func() { + request := test_unsignedRequestV2() + + Convey("The canonical query string should be correct", func() { + actual := canonicalQueryStringV2(request) + expected := canonicalQsV2 + So(actual, ShouldEqual, expected) + }) + + Convey("The absolute path should be extracted correctly", func() { + So(request.URL.Path, ShouldEqual, "/") + }) + + Convey("The string to sign should be well-formed", func() { + actual := stringToSignV2(request) + So(actual, ShouldEqual, expectedStringToSignV2) + }) + + Convey("The resulting signature should be correct", func() { + actual := signatureV2(stringToSignV2(request), keys) + So(actual, ShouldEqual, "i91nKc4PWAt0JJIdXwz9HxZCJDdiy6cf/Mj6vPxyYIs=") + }) + + Convey("The final signed request should be correctly formed", func() { + Sign2(request, keys) + actual := request.URL.String() + So(actual, ShouldResemble, expectedFinalUrlV2) + }) + }) + }) +} + +func TestVersion2STSRequestPreparer(t *testing.T) { + Convey("Given a plain request ", t, func() { + request := test_plainRequestV2() + + Convey("And a set of credentials with an STS token", func() { + var keys Credentials + keys = *testCredV2WithSTS + + Convey("It should include the SecurityToken parameter when the request is signed", func() { + actualSigned := Sign2(request, keys) + actual := actualSigned.URL.Query()["SecurityToken"][0] + + So(actual, ShouldNotBeBlank) + So(actual, ShouldEqual, testCredV2WithSTS.SecurityToken) + + }) + }) + }) + +} + +func test_plainRequestV2() *http.Request { + values := url.Values{} + values.Set("Action", "DescribeJobFlows") + values.Set("Version", "2009-03-31") + + url := baseUrlV2 + "?" + values.Encode() + + request, err := http.NewRequest("GET", url, nil) + if err != nil { + panic(err) + } + + return request +} + +func test_unsignedRequestV2() *http.Request { + request := test_plainRequestV2() + newUrl, _ := url.Parse(baseUrlV2 + "/?" + canonicalQsV2) + request.URL = newUrl + return request +} + +var ( + testCredV2 = &Credentials{ + AccessKeyID: "AKIAIOSFODNN7EXAMPLE", + SecretAccessKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + } + + testCredV2WithSTS = &Credentials{ + AccessKeyID: "AKIDEXAMPLE", + SecretAccessKey: "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY", + SecurityToken: "AQoDYXdzEHcaoAJ1Aqwx1Sum0iW2NQjXJcWlKR7vuB6lnAeGBaQnjDRZPVyniwc48ml5hx+0qiXenVJdfusMMl9XLhSncfhx9Rb1UF8IAOaQ+CkpWXvoH67YYN+93dgckSVgVEBRByTl/BvLOZhe0ii/pOWkuQtBm5T7lBHRe4Dfmxy9X6hd8L3FrWxgnGV3fWZ3j0gASdYXaa+VBJlU0E2/GmCzn3T+t2mjYaeoInAnYVKVpmVMOrh6lNAeETTOHElLopblSa7TAmROq5xHIyu4a9i2qwjERTwa3Yk4Jk6q7JYVA5Cu7kS8wKVml8LdzzCTsy+elJgvH+Jf6ivpaHt/En0AJ5PZUJDev2+Y5+9j4AYfrmXfm4L73DC1ZJFJrv+Yh+EXAMPLE=", + } + + exampleReqTsV2 = "2011-10-03T15:19:30" + baseUrlV2 = "https://elasticmapreduce.amazonaws.com" + canonicalQsV2 = "AWSAccessKeyId=AKIAIOSFODNN7EXAMPLE&Action=DescribeJobFlows&SignatureMethod=HmacSHA256&SignatureVersion=2&Timestamp=2011-10-03T15%3A19%3A30&Version=2009-03-31" + expectedStringToSignV2 = "GET\nelasticmapreduce.amazonaws.com\n/\n" + canonicalQsV2 + expectedFinalUrlV2 = baseUrlV2 + "/?AWSAccessKeyId=AKIAIOSFODNN7EXAMPLE&Action=DescribeJobFlows&Signature=i91nKc4PWAt0JJIdXwz9HxZCJDdiy6cf%2FMj6vPxyYIs%3D&SignatureMethod=HmacSHA256&SignatureVersion=2&Timestamp=2011-10-03T15%3A19%3A30&Version=2009-03-31" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/sign3.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/sign3.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/sign3.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/sign3.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,58 @@ +// Thanks to Michael Vierling for contributing sign3.go + +package awsauth + +import ( + "encoding/base64" + "net/http" + "time" +) + +func stringToSignV3(request *http.Request) string { + // TASK 1. http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/RESTAuthentication.html#StringToSign + + return request.Header.Get("Date") + request.Header.Get("x-amz-nonce") +} + +func signatureV3(stringToSign string, keys Credentials) string { + // TASK 2. http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/RESTAuthentication.html#Signature + + hash := hmacSHA256([]byte(keys.SecretAccessKey), stringToSign) + return base64.StdEncoding.EncodeToString(hash) +} + +func buildAuthHeaderV3(signature string, keys Credentials) string { + // TASK 3. http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/RESTAuthentication.html#AuthorizationHeader + + return "AWS3-HTTPS AWSAccessKeyId=" + keys.AccessKeyID + + ", Algorithm=HmacSHA256" + + ", Signature=" + signature +} + +func prepareRequestV3(request *http.Request) *http.Request { + ts := timestampV3() + necessaryDefaults := map[string]string{ + "Content-Type": "application/x-www-form-urlencoded; charset=utf-8", + "x-amz-date": ts, + "Date": ts, + "x-amz-nonce": "", + } + + for header, value := range necessaryDefaults { + if request.Header.Get(header) == "" { + request.Header.Set(header, value) + } + } + + if request.URL.Path == "" { + request.URL.Path += "/" + } + + return request +} + +func timestampV3() string { + return now().Format(timeFormatV3) +} + +const timeFormatV3 = time.RFC1123 diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/sign3_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/sign3_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/sign3_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/sign3_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,122 @@ +package awsauth + +import ( + "net/http" + "net/url" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestSignature3(t *testing.T) { + // http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/RESTAuthentication.html + // http://docs.aws.amazon.com/ses/latest/DeveloperGuide/query-interface-authentication.html + + Convey("Given bogus credentials", t, func() { + keys := *testCredV3 + + // Mock time + now = func() time.Time { + parsed, _ := time.Parse(timeFormatV3, exampleReqTsV3) + return parsed + } + + Convey("Given a plain request that is unprepared", func() { + request := test_plainRequestV3() + + Convey("The request should be prepared to be signed", func() { + expectedUnsigned := test_unsignedRequestV3() + prepareRequestV3(request) + So(request, ShouldResemble, expectedUnsigned) + }) + }) + + Convey("Given a prepared, but unsigned, request", func() { + request := test_unsignedRequestV3() + + Convey("The absolute path should be extracted correctly", func() { + So(request.URL.Path, ShouldEqual, "/") + }) + + Convey("The string to sign should be well-formed", func() { + actual := stringToSignV3(request) + So(actual, ShouldEqual, expectedStringToSignV3) + }) + + Convey("The resulting signature should be correct", func() { + actual := signatureV3(stringToSignV3(request), keys) + So(actual, ShouldEqual, "PjAJ6buiV6l4WyzmmuwtKE59NJXVg5Dr3Sn4PCMZ0Yk=") + }) + + Convey("The final signed request should be correctly formed", func() { + Sign3(request, keys) + actual := request.Header.Get("X-Amzn-Authorization") + So(actual, ShouldResemble, expectedAuthHeaderV3) + }) + }) + }) +} + +func test_plainRequestV3() *http.Request { + values := url.Values{} + values.Set("Action", "GetSendStatistics") + values.Set("Version", "2010-12-01") + + url := baseUrlV3 + "/?" + values.Encode() + + request, err := http.NewRequest("GET", url, nil) + if err != nil { + panic(err) + } + + return request +} + +func test_unsignedRequestV3() *http.Request { + request := test_plainRequestV3() + request.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + request.Header.Set("x-amz-date", exampleReqTsV3) + request.Header.Set("Date", exampleReqTsV3) + request.Header.Set("x-amz-nonce", "") + return request +} + +func TestVersion3STSRequestPreparer(t *testing.T) { + Convey("Given a plain request with no custom headers", t, func() { + request := test_plainRequestV3() + + Convey("And a set of credentials with an STS token", func() { + var keys Credentials + keys = *testCredV3WithSTS + + Convey("It should include an X-Amz-Security-Token when the request is signed", func() { + actualSigned := Sign3(request, keys) + actual := actualSigned.Header.Get("X-Amz-Security-Token") + + So(actual, ShouldNotBeBlank) + So(actual, ShouldEqual, testCredV4WithSTS.SecurityToken) + + }) + }) + }) + +} + +var ( + testCredV3 = &Credentials{ + AccessKeyID: "AKIAIOSFODNN7EXAMPLE", + SecretAccessKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + } + + testCredV3WithSTS = &Credentials{ + AccessKeyID: "AKIDEXAMPLE", + SecretAccessKey: "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY", + SecurityToken: "AQoDYXdzEHcaoAJ1Aqwx1Sum0iW2NQjXJcWlKR7vuB6lnAeGBaQnjDRZPVyniwc48ml5hx+0qiXenVJdfusMMl9XLhSncfhx9Rb1UF8IAOaQ+CkpWXvoH67YYN+93dgckSVgVEBRByTl/BvLOZhe0ii/pOWkuQtBm5T7lBHRe4Dfmxy9X6hd8L3FrWxgnGV3fWZ3j0gASdYXaa+VBJlU0E2/GmCzn3T+t2mjYaeoInAnYVKVpmVMOrh6lNAeETTOHElLopblSa7TAmROq5xHIyu4a9i2qwjERTwa3Yk4Jk6q7JYVA5Cu7kS8wKVml8LdzzCTsy+elJgvH+Jf6ivpaHt/En0AJ5PZUJDev2+Y5+9j4AYfrmXfm4L73DC1ZJFJrv+Yh+EXAMPLE=", + } + + exampleReqTsV3 = "Thu, 14 Aug 2008 17:08:48 GMT" + baseUrlV3 = "https://email.us-east-1.amazonaws.com" + expectedStringToSignV3 = exampleReqTsV3 + expectedAuthHeaderV3 = "AWS3-HTTPS AWSAccessKeyId=" + testCredV3.AccessKeyID + ", Algorithm=HmacSHA256, Signature=PjAJ6buiV6l4WyzmmuwtKE59NJXVg5Dr3Sn4PCMZ0Yk=" +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/sign4.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/sign4.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/sign4.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/sign4.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,117 @@ +package awsauth + +import ( + "encoding/hex" + "net/http" + "sort" + "strings" +) + +func hashedCanonicalRequestV4(request *http.Request, meta *metadata) string { + // TASK 1. http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + + payload := readAndReplaceBody(request) + payloadHash := hashSHA256(payload) + request.Header.Set("X-Amz-Content-Sha256", payloadHash) + + // Set this in header values to make it appear in the range of headers to sign + request.Header.Set("Host", request.Host) + + var sortedHeaderKeys []string + for key, _ := range request.Header { + switch key { + case "Content-Type", "Content-Md5", "Host": + default: + if !strings.HasPrefix(key, "X-Amz-") { + continue + } + } + sortedHeaderKeys = append(sortedHeaderKeys, strings.ToLower(key)) + } + sort.Strings(sortedHeaderKeys) + + var headersToSign string + for _, key := range sortedHeaderKeys { + value := strings.TrimSpace(request.Header.Get(key)) + if key == "host" { + //AWS does not include port in signing request. + if strings.Contains(value, ":") { + split := strings.Split(value, ":") + port := split[1] + if port == "80" || port == "443" { + value = split[0] + } + } + } + headersToSign += key + ":" + value + "\n" + } + meta.signedHeaders = concat(";", sortedHeaderKeys...) + canonicalRequest := concat("\n", request.Method, normuri(request.URL.Path), normquery(request.URL.Query()), headersToSign, meta.signedHeaders, payloadHash) + + return hashSHA256([]byte(canonicalRequest)) +} + +func stringToSignV4(request *http.Request, hashedCanonReq string, meta *metadata) string { + // TASK 2. http://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html + + requestTs := request.Header.Get("X-Amz-Date") + + meta.algorithm = "AWS4-HMAC-SHA256" + meta.service, meta.region = serviceAndRegion(request.Host) + meta.date = tsDateV4(requestTs) + meta.credentialScope = concat("/", meta.date, meta.region, meta.service, "aws4_request") + + return concat("\n", meta.algorithm, requestTs, meta.credentialScope, hashedCanonReq) +} + +func signatureV4(signingKey []byte, stringToSign string) string { + // TASK 3. http://docs.aws.amazon.com/general/latest/gr/sigv4-calculate-signature.html + + return hex.EncodeToString(hmacSHA256(signingKey, stringToSign)) +} + +func prepareRequestV4(request *http.Request) *http.Request { + necessaryDefaults := map[string]string{ + "Content-Type": "application/x-www-form-urlencoded; charset=utf-8", + "X-Amz-Date": timestampV4(), + } + + for header, value := range necessaryDefaults { + if request.Header.Get(header) == "" { + request.Header.Set(header, value) + } + } + + if request.URL.Path == "" { + request.URL.Path += "/" + } + + return request +} + +func signingKeyV4(secretKey, date, region, service string) []byte { + kDate := hmacSHA256([]byte("AWS4"+secretKey), date) + kRegion := hmacSHA256(kDate, region) + kService := hmacSHA256(kRegion, service) + kSigning := hmacSHA256(kService, "aws4_request") + return kSigning +} + +func buildAuthHeaderV4(signature string, meta *metadata, keys Credentials) string { + credential := keys.AccessKeyID + "/" + meta.credentialScope + + return meta.algorithm + + " Credential=" + credential + + ", SignedHeaders=" + meta.signedHeaders + + ", Signature=" + signature +} + +func timestampV4() string { + return now().Format(timeFormatV4) +} + +func tsDateV4(timestamp string) string { + return timestamp[:8] +} + +const timeFormatV4 = "20060102T150405Z" diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/sign4_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/sign4_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/sign4_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/go-aws-auth/sign4_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,217 @@ +package awsauth + +import ( + "net/http" + "net/url" + "strings" + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestVersion4RequestPreparer(t *testing.T) { + Convey("Given a plain request with no custom headers", t, func() { + request := test_plainRequestV4(false) + + expectedUnsigned := test_unsignedRequestV4(true, false) + expectedUnsigned.Header.Set("X-Amz-Date", timestampV4()) + + Convey("The necessary, default headers should be appended", func() { + prepareRequestV4(request) + So(request, ShouldResemble, expectedUnsigned) + }) + + Convey("Forward-slash should be appended to URI if not present", func() { + prepareRequestV4(request) + So(request.URL.Path, ShouldEqual, "/") + }) + + Convey("And a set of credentials", func() { + var keys Credentials + keys = *testCredV4 + + Convey("It should be signed with an Authorization header", func() { + actualSigned := Sign4(request, keys) + actual := actualSigned.Header.Get("Authorization") + + So(actual, ShouldNotBeBlank) + So(actual, ShouldContainSubstring, "Credential="+testCredV4.AccessKeyID) + So(actual, ShouldContainSubstring, "SignedHeaders=") + So(actual, ShouldContainSubstring, "Signature=") + So(actual, ShouldContainSubstring, "AWS4") + }) + }) + }) + + Convey("Given a request with custom, necessary headers", t, func() { + Convey("The custom, necessary headers must not be changed", func() { + request := test_unsignedRequestV4(true, false) + prepareRequestV4(request) + So(request, ShouldResemble, test_unsignedRequestV4(true, false)) + }) + }) +} + +func TestVersion4STSRequestPreparer(t *testing.T) { + Convey("Given a plain request with no custom headers", t, func() { + request := test_plainRequestV4(false) + + Convey("And a set of credentials with an STS token", func() { + var keys Credentials + keys = *testCredV4WithSTS + + Convey("It should include an X-Amz-Security-Token when the request is signed", func() { + actualSigned := Sign4(request, keys) + actual := actualSigned.Header.Get("X-Amz-Security-Token") + + So(actual, ShouldNotBeBlank) + So(actual, ShouldEqual, testCredV4WithSTS.SecurityToken) + + }) + }) + }) + +} + +func TestVersion4SigningTasks(t *testing.T) { + // http://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html + + Convey("Given a bogus request and credentials from AWS documentation with an additional meta tag", t, func() { + request := test_unsignedRequestV4(true, true) + meta := new(metadata) + + Convey("(Task 1) The canonical request should be built correctly", func() { + hashedCanonReq := hashedCanonicalRequestV4(request, meta) + + So(hashedCanonReq, ShouldEqual, expectingV4["CanonicalHash"]) + }) + + Convey("(Task 2) The string to sign should be built correctly", func() { + hashedCanonReq := hashedCanonicalRequestV4(request, meta) + stringToSign := stringToSignV4(request, hashedCanonReq, meta) + + So(stringToSign, ShouldEqual, expectingV4["StringToSign"]) + }) + + Convey("(Task 3) The version 4 signed signature should be correct", func() { + hashedCanonReq := hashedCanonicalRequestV4(request, meta) + stringToSign := stringToSignV4(request, hashedCanonReq, meta) + signature := signatureV4(test_signingKeyV4(), stringToSign) + + So(signature, ShouldEqual, expectingV4["SignatureV4"]) + }) + }) +} + +func TestSignature4Helpers(t *testing.T) { + + keys := *testCredV4 + + Convey("The signing key should be properly generated", t, func() { + expected := []byte{152, 241, 216, 137, 254, 196, 244, 66, 26, 220, 82, 43, 171, 12, 225, 248, 46, 105, 41, 194, 98, 237, 21, 229, 169, 76, 144, 239, 209, 227, 176, 231} + actual := test_signingKeyV4() + + So(actual, ShouldResemble, expected) + }) + + Convey("Authorization headers should be built properly", t, func() { + meta := &metadata{ + algorithm: "AWS4-HMAC-SHA256", + credentialScope: "20110909/us-east-1/iam/aws4_request", + signedHeaders: "content-type;host;x-amz-date", + } + expected := expectingV4["AuthHeader"] + expectingV4["SignatureV4"] + actual := buildAuthHeaderV4(expectingV4["SignatureV4"], meta, keys) + + So(actual, ShouldEqual, expected) + }) + + Convey("Timestamps should be in the correct format, in UTC time", t, func() { + actual := timestampV4() + + So(len(actual), ShouldEqual, 16) + So(actual, ShouldNotContainSubstring, ":") + So(actual, ShouldNotContainSubstring, "-") + So(actual, ShouldNotContainSubstring, " ") + So(actual, ShouldEndWith, "Z") + So(actual, ShouldContainSubstring, "T") + }) + + Convey("Given an Version 4 AWS-formatted timestamp", t, func() { + ts := "20110909T233600Z" + + Convey("The date string should be extracted properly", func() { + So(tsDateV4(ts), ShouldEqual, "20110909") + }) + }) + + Convey("Given any request with a body", t, func() { + request := test_plainRequestV4(false) + + Convey("Its body should be read and replaced without differences", func() { + expected := []byte(requestValuesV4.Encode()) + + actual1 := readAndReplaceBody(request) + So(actual1, ShouldResemble, expected) + + actual2 := readAndReplaceBody(request) + So(actual2, ShouldResemble, expected) + }) + }) +} + +func test_plainRequestV4(trailingSlash bool) *http.Request { + url := "http://iam.amazonaws.com" + body := strings.NewReader(requestValuesV4.Encode()) + + if trailingSlash { + url += "/" + } + + request, err := http.NewRequest("POST", url, body) + + if err != nil { + panic(err) + } + + return request +} + +func test_unsignedRequestV4(trailingSlash, tag bool) *http.Request { + request := test_plainRequestV4(trailingSlash) + request.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + request.Header.Set("X-Amz-Date", "20110909T233600Z") + if tag { + request.Header.Set("X-Amz-Meta-Foo", "Bar!") + } + return request +} + +func test_signingKeyV4() []byte { + return signingKeyV4(testCredV4.SecretAccessKey, "20110909", "us-east-1", "iam") +} + +var ( + testCredV4 = &Credentials{ + AccessKeyID: "AKIDEXAMPLE", + SecretAccessKey: "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY", + } + + testCredV4WithSTS = &Credentials{ + AccessKeyID: "AKIDEXAMPLE", + SecretAccessKey: "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY", + SecurityToken: "AQoDYXdzEHcaoAJ1Aqwx1Sum0iW2NQjXJcWlKR7vuB6lnAeGBaQnjDRZPVyniwc48ml5hx+0qiXenVJdfusMMl9XLhSncfhx9Rb1UF8IAOaQ+CkpWXvoH67YYN+93dgckSVgVEBRByTl/BvLOZhe0ii/pOWkuQtBm5T7lBHRe4Dfmxy9X6hd8L3FrWxgnGV3fWZ3j0gASdYXaa+VBJlU0E2/GmCzn3T+t2mjYaeoInAnYVKVpmVMOrh6lNAeETTOHElLopblSa7TAmROq5xHIyu4a9i2qwjERTwa3Yk4Jk6q7JYVA5Cu7kS8wKVml8LdzzCTsy+elJgvH+Jf6ivpaHt/En0AJ5PZUJDev2+Y5+9j4AYfrmXfm4L73DC1ZJFJrv+Yh+EXAMPLE=", + } + + expectingV4 = map[string]string{ + "CanonicalHash": "41c56ed0df12052f7c10407a809e64cd61a4b0471956cdea28d6d1bb904f5d92", + "StringToSign": "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/iam/aws4_request\n41c56ed0df12052f7c10407a809e64cd61a4b0471956cdea28d6d1bb904f5d92", + "SignatureV4": "08292a4b86aae1a6f80f1988182a33cbf73ccc70c5da505303e355a67cc64cb4", + "AuthHeader": "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/iam/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature=", + } + + requestValuesV4 = &url.Values{ + "Action": []string{"ListUsers"}, + "Version": []string{"2010-05-08"}, + } +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/lzma/lzma_go/main.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/lzma/lzma_go/main.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/smira/lzma/lzma_go/main.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/smira/lzma/lzma_go/main.go 2016-05-24 07:05:22.000000000 +0000 @@ -6,7 +6,7 @@ import ( //"compress/lzma" - "code.google.com/p/lzma" + "github.com/smira/lzma" "flag" "fmt" "io" diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/batch.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/batch.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/batch.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/batch.go 2016-05-24 07:05:22.000000000 +0000 @@ -12,8 +12,10 @@ "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/memdb" + "github.com/syndtr/goleveldb/leveldb/storage" ) +// ErrBatchCorrupted records reason of batch corruption. type ErrBatchCorrupted struct { Reason string } @@ -23,7 +25,7 @@ } func newErrBatchCorrupted(reason string) error { - return errors.NewErrCorrupted(nil, &ErrBatchCorrupted{reason}) + return errors.NewErrCorrupted(storage.FileDesc{}, &ErrBatchCorrupted{reason}) } const ( @@ -31,6 +33,7 @@ batchGrowRec = 3000 ) +// BatchReplay wraps basic batch operations. type BatchReplay interface { Put(key, value []byte) Delete(key []byte) @@ -67,20 +70,20 @@ } } -func (b *Batch) appendRec(kt kType, key, value []byte) { +func (b *Batch) appendRec(kt keyType, key, value []byte) { n := 1 + binary.MaxVarintLen32 + len(key) - if kt == ktVal { + if kt == keyTypeVal { n += binary.MaxVarintLen32 + len(value) } b.grow(n) off := len(b.data) data := b.data[:off+n] data[off] = byte(kt) - off += 1 + off++ off += binary.PutUvarint(data[off:], uint64(len(key))) copy(data[off:], key) off += len(key) - if kt == ktVal { + if kt == keyTypeVal { off += binary.PutUvarint(data[off:], uint64(len(value))) copy(data[off:], value) off += len(value) @@ -94,13 +97,13 @@ // Put appends 'put operation' of the given key/value pair to the batch. // It is safe to modify the contents of the argument after Put returns. func (b *Batch) Put(key, value []byte) { - b.appendRec(ktVal, key, value) + b.appendRec(keyTypeVal, key, value) } // Delete appends 'delete operation' of the given key to the batch. // It is safe to modify the contents of the argument after Delete returns. func (b *Batch) Delete(key []byte) { - b.appendRec(ktDel, key, nil) + b.appendRec(keyTypeDel, key, nil) } // Dump dumps batch contents. The returned slice can be loaded into the @@ -121,13 +124,14 @@ // Replay replays batch contents. func (b *Batch) Replay(r BatchReplay) error { - return b.decodeRec(func(i int, kt kType, key, value []byte) { + return b.decodeRec(func(i int, kt keyType, key, value []byte) error { switch kt { - case ktVal: + case keyTypeVal: r.Put(key, value) - case ktDel: + case keyTypeDel: r.Delete(key) } + return nil }) } @@ -193,18 +197,19 @@ return nil } -func (b *Batch) decodeRec(f func(i int, kt kType, key, value []byte)) (err error) { +func (b *Batch) decodeRec(f func(i int, kt keyType, key, value []byte) error) error { off := batchHdrLen for i := 0; i < b.rLen; i++ { if off >= len(b.data) { return newErrBatchCorrupted("invalid records length") } - kt := kType(b.data[off]) - if kt > ktVal { + kt := keyType(b.data[off]) + if kt > keyTypeVal { + panic(kt) return newErrBatchCorrupted("bad record: invalid type") } - off += 1 + off++ x, n := binary.Uvarint(b.data[off:]) off += n @@ -214,7 +219,7 @@ key := b.data[off : off+int(x)] off += int(x) var value []byte - if kt == ktVal { + if kt == keyTypeVal { x, n := binary.Uvarint(b.data[off:]) off += n if n <= 0 || off+int(x) > len(b.data) { @@ -224,16 +229,19 @@ off += int(x) } - f(i, kt, key, value) + if err := f(i, kt, key, value); err != nil { + return err + } } return nil } func (b *Batch) memReplay(to *memdb.DB) error { - return b.decodeRec(func(i int, kt kType, key, value []byte) { - ikey := newIkey(key, b.seq+uint64(i), kt) - to.Put(ikey, value) + var ikScratch []byte + return b.decodeRec(func(i int, kt keyType, key, value []byte) error { + ikScratch = makeInternalKey(ikScratch, key, b.seq+uint64(i), kt) + return to.Put(ikScratch, value) }) } @@ -245,8 +253,9 @@ } func (b *Batch) revertMemReplay(to *memdb.DB) error { - return b.decodeRec(func(i int, kt kType, key, value []byte) { - ikey := newIkey(key, b.seq+uint64(i), kt) - to.Delete(ikey) + var ikScratch []byte + return b.decodeRec(func(i int, kt keyType, key, value []byte) error { + ikScratch := makeInternalKey(ikScratch, key, b.seq+uint64(i), kt) + return to.Delete(ikScratch) }) } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/batch_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/batch_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/batch_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/batch_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -15,7 +15,7 @@ ) type tbRec struct { - kt kType + kt keyType key, value []byte } @@ -24,11 +24,11 @@ } func (p *testBatch) Put(key, value []byte) { - p.rec = append(p.rec, &tbRec{ktVal, key, value}) + p.rec = append(p.rec, &tbRec{keyTypeVal, key, value}) } func (p *testBatch) Delete(key []byte) { - p.rec = append(p.rec, &tbRec{ktDel, key, nil}) + p.rec = append(p.rec, &tbRec{keyTypeDel, key, nil}) } func compareBatch(t *testing.T, b1, b2 *Batch) { @@ -55,7 +55,7 @@ if !bytes.Equal(r1.key, r2.key) { t.Errorf("invalid key on record '%d' want %s, got %s", i, string(r1.key), string(r2.key)) } - if r1.kt == ktVal { + if r1.kt == keyTypeVal { if !bytes.Equal(r1.value, r2.value) { t.Errorf("invalid value on record '%d' want %s, got %s", i, string(r1.value), string(r2.value)) } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,58 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build !go1.2 - -package leveldb - -import ( - "sync/atomic" - "testing" -) - -func BenchmarkDBReadConcurrent(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - defer p.close() - - b.ResetTimer() - b.SetBytes(116) - - b.RunParallel(func(pb *testing.PB) { - iter := p.newIter() - defer iter.Release() - for pb.Next() && iter.Next() { - } - }) -} - -func BenchmarkDBReadConcurrent2(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - defer p.close() - - b.ResetTimer() - b.SetBytes(116) - - var dir uint32 - b.RunParallel(func(pb *testing.PB) { - iter := p.newIter() - defer iter.Release() - if atomic.AddUint32(&dir, 1)%2 == 0 { - for pb.Next() && iter.Next() { - } - } else { - if pb.Next() && iter.Last() { - for pb.Next() && iter.Prev() { - } - } - } - }) -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/bench_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/bench_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/bench_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/bench_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -13,6 +13,7 @@ "os" "path/filepath" "runtime" + "sync/atomic" "testing" "github.com/syndtr/goleveldb/leveldb/iterator" @@ -90,7 +91,7 @@ ro: &opt.ReadOptions{}, wo: &opt.WriteOptions{}, } - p.stor, err = storage.OpenFile(benchDB) + p.stor, err = storage.OpenFile(benchDB, false) if err != nil { b.Fatal("cannot open stor: ", err) } @@ -462,3 +463,47 @@ p.gets() p.close() } + +func BenchmarkDBReadConcurrent(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.gc() + defer p.close() + + b.ResetTimer() + b.SetBytes(116) + + b.RunParallel(func(pb *testing.PB) { + iter := p.newIter() + defer iter.Release() + for pb.Next() && iter.Next() { + } + }) +} + +func BenchmarkDBReadConcurrent2(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.gc() + defer p.close() + + b.ResetTimer() + b.SetBytes(116) + + var dir uint32 + b.RunParallel(func(pb *testing.PB) { + iter := p.newIter() + defer iter.Release() + if atomic.AddUint32(&dir, 1)%2 == 0 { + for pb.Next() && iter.Next() { + } + } else { + if pb.Next() && iter.Last() { + for pb.Next() && iter.Prev() { + } + } + } + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,30 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build !go1.2 - -package cache - -import ( - "math/rand" - "testing" -) - -func BenchmarkLRUCache(b *testing.B) { - c := NewCache(NewLRU(10000)) - - b.SetParallelism(10) - b.RunParallel(func(pb *testing.PB) { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - for pb.Next() { - key := uint64(r.Intn(1000000)) - c.Get(0, key, func() (int, Value) { - return 1, key - }).Release() - } - }) -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/cache/bench_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/cache/bench_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/cache/bench_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/cache/bench_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,29 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package cache + +import ( + "math/rand" + "testing" + "time" +) + +func BenchmarkLRUCache(b *testing.B) { + c := NewCache(NewLRU(10000)) + + b.SetParallelism(10) + b.RunParallel(func(pb *testing.PB) { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + + for pb.Next() { + key := uint64(r.Intn(1000000)) + c.Get(0, key, func() (int, Value) { + return 1, key + }).Release() + } + }) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go 2016-05-24 07:05:22.000000000 +0000 @@ -47,17 +47,21 @@ // so the the Release method will be called once object is released. type Value interface{} -type CacheGetter struct { +// NamespaceGetter provides convenient wrapper for namespace. +type NamespaceGetter struct { Cache *Cache NS uint64 } -func (g *CacheGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle { +// Get simply calls Cache.Get() method. +func (g *NamespaceGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle { return g.Cache.Get(g.NS, key, setFunc) } // The hash tables implementation is based on: -// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu, Kunlong Zhang, and Michael Spear. ACM Symposium on Principles of Distributed Computing, Jul 2014. +// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu, +// Kunlong Zhang, and Michael Spear. +// ACM Symposium on Principles of Distributed Computing, Jul 2014. const ( mInitialSize = 1 << 4 @@ -610,10 +614,12 @@ } } +// Handle is a 'cache handle' of a 'cache node'. type Handle struct { n unsafe.Pointer // *Node } +// Value returns the value of the 'cache node'. func (h *Handle) Value() Value { n := (*Node)(atomic.LoadPointer(&h.n)) if n != nil { @@ -622,6 +628,8 @@ return nil } +// Release releases this 'cache handle'. +// It is safe to call release multiple times. func (h *Handle) Release() { nPtr := atomic.LoadPointer(&h.n) if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) { diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -45,9 +45,8 @@ return c.Get(ns, key, func() (int, Value) { if relf != nil { return charge, releaserFunc{relf, value} - } else { - return charge, value } + return charge, value }) } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/comparer.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/comparer.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/comparer.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/comparer.go 2016-05-24 07:05:22.000000000 +0000 @@ -33,9 +33,9 @@ } func (icmp *iComparer) Compare(a, b []byte) int { - x := icmp.ucmp.Compare(iKey(a).ukey(), iKey(b).ukey()) + x := icmp.ucmp.Compare(internalKey(a).ukey(), internalKey(b).ukey()) if x == 0 { - if m, n := iKey(a).num(), iKey(b).num(); m > n { + if m, n := internalKey(a).num(), internalKey(b).num(); m > n { x = -1 } else if m < n { x = 1 @@ -45,13 +45,13 @@ } func (icmp *iComparer) Separator(dst, a, b []byte) []byte { - ua, ub := iKey(a).ukey(), iKey(b).ukey() + ua, ub := internalKey(a).ukey(), internalKey(b).ukey() dst = icmp.ucmp.Separator(dst, ua, ub) if dst == nil { return nil } if len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 { - dst = append(dst, kMaxNumBytes...) + dst = append(dst, keyMaxNumBytes...) } else { // Did not close possibilities that n maybe longer than len(ub). dst = append(dst, a[len(a)-8:]...) @@ -60,13 +60,13 @@ } func (icmp *iComparer) Successor(dst, b []byte) []byte { - ub := iKey(b).ukey() + ub := internalKey(b).ukey() dst = icmp.ucmp.Successor(dst, ub) if dst == nil { return nil } if len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 { - dst = append(dst, kMaxNumBytes...) + dst = append(dst, keyMaxNumBytes...) } else { // Did not close possibilities that n maybe longer than len(ub). dst = append(dst, b[len(b)-8:]...) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -9,12 +9,13 @@ import ( "bytes" "fmt" - "github.com/syndtr/goleveldb/leveldb/filter" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" "io" "math/rand" "testing" + + "github.com/syndtr/goleveldb/leveldb/filter" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" ) const ctValSize = 1000 @@ -99,19 +100,17 @@ p := &h.dbHarness t := p.t - ff, _ := p.stor.GetFiles(ft) - sff := files(ff) - sff.sort() + fds, _ := p.stor.List(ft) + sortFds(fds) if fi < 0 { - fi = len(sff) - 1 + fi = len(fds) - 1 } - if fi >= len(sff) { + if fi >= len(fds) { t.Fatalf("no such file with type %q with index %d", ft, fi) } - file := sff[fi] - - r, err := file.Open() + fd := fds[fi] + r, err := h.stor.Open(fd) if err != nil { t.Fatal("cannot open file: ", err) } @@ -149,11 +148,11 @@ buf[offset+i] ^= 0x80 } - err = file.Remove() + err = h.stor.Remove(fd) if err != nil { t.Fatal("cannot remove old file: ", err) } - w, err := file.Create() + w, err := h.stor.Create(fd) if err != nil { t.Fatal("cannot create new file: ", err) } @@ -165,25 +164,37 @@ } func (h *dbCorruptHarness) removeAll(ft storage.FileType) { - ff, err := h.stor.GetFiles(ft) + fds, err := h.stor.List(ft) if err != nil { h.t.Fatal("get files: ", err) } - for _, f := range ff { - if err := f.Remove(); err != nil { + for _, fd := range fds { + if err := h.stor.Remove(fd); err != nil { + h.t.Error("remove file: ", err) + } + } +} + +func (h *dbCorruptHarness) forceRemoveAll(ft storage.FileType) { + fds, err := h.stor.List(ft) + if err != nil { + h.t.Fatal("get files: ", err) + } + for _, fd := range fds { + if err := h.stor.ForceRemove(fd); err != nil { h.t.Error("remove file: ", err) } } } func (h *dbCorruptHarness) removeOne(ft storage.FileType) { - ff, err := h.stor.GetFiles(ft) + fds, err := h.stor.List(ft) if err != nil { h.t.Fatal("get files: ", err) } - f := ff[rand.Intn(len(ff))] - h.t.Logf("removing file @%d", f.Num()) - if err := f.Remove(); err != nil { + fd := fds[rand.Intn(len(fds))] + h.t.Logf("removing file @%d", fd.Num) + if err := h.stor.Remove(fd); err != nil { h.t.Error("remove file: ", err) } } @@ -221,6 +232,7 @@ func TestCorruptDB_Journal(t *testing.T) { h := newDbCorruptHarness(t) + defer h.close() h.build(100) h.check(100, 100) @@ -230,12 +242,11 @@ h.openDB() h.check(36, 36) - - h.close() } func TestCorruptDB_Table(t *testing.T) { h := newDbCorruptHarness(t) + defer h.close() h.build(100) h.compactMem() @@ -246,12 +257,11 @@ h.openDB() h.check(99, 99) - - h.close() } func TestCorruptDB_TableIndex(t *testing.T) { h := newDbCorruptHarness(t) + defer h.close() h.build(10000) h.compactMem() @@ -260,8 +270,6 @@ h.openDB() h.check(5000, 9999) - - h.close() } func TestCorruptDB_MissingManifest(t *testing.T) { @@ -271,6 +279,7 @@ Strict: opt.StrictJournalChecksum, WriteBuffer: 1000 * 60, }) + defer h.close() h.build(1000) h.compactMem() @@ -286,10 +295,8 @@ h.compactMem() h.closeDB() - h.stor.SetIgnoreOpenErr(storage.TypeManifest) - h.removeAll(storage.TypeManifest) + h.forceRemoveAll(storage.TypeManifest) h.openAssert(false) - h.stor.SetIgnoreOpenErr(0) h.recover() h.check(1000, 1000) @@ -300,12 +307,11 @@ h.recover() h.check(1000, 1000) - - h.close() } func TestCorruptDB_SequenceNumberRecovery(t *testing.T) { h := newDbCorruptHarness(t) + defer h.close() h.put("foo", "v1") h.put("foo", "v2") @@ -321,12 +327,11 @@ h.reopenDB() h.getVal("foo", "v6") - - h.close() } func TestCorruptDB_SequenceNumberRecoveryTable(t *testing.T) { h := newDbCorruptHarness(t) + defer h.close() h.put("foo", "v1") h.put("foo", "v2") @@ -344,12 +349,11 @@ h.reopenDB() h.getVal("foo", "v6") - - h.close() } func TestCorruptDB_CorruptedManifest(t *testing.T) { h := newDbCorruptHarness(t) + defer h.close() h.put("foo", "hello") h.compactMem() @@ -360,12 +364,11 @@ h.recover() h.getVal("foo", "hello") - - h.close() } func TestCorruptDB_CompactionInputError(t *testing.T) { h := newDbCorruptHarness(t) + defer h.close() h.build(10) h.compactMem() @@ -377,12 +380,11 @@ h.build(10000) h.check(10000, 10000) - - h.close() } func TestCorruptDB_UnrelatedKeys(t *testing.T) { h := newDbCorruptHarness(t) + defer h.close() h.build(10) h.compactMem() @@ -394,12 +396,11 @@ h.getVal(string(tkey(1000)), string(tval(1000, ctValSize))) h.compactMem() h.getVal(string(tkey(1000)), string(tval(1000, ctValSize))) - - h.close() } func TestCorruptDB_Level0NewerFileHasOlderSeqnum(t *testing.T) { h := newDbCorruptHarness(t) + defer h.close() h.put("a", "v1") h.put("b", "v1") @@ -421,12 +422,11 @@ h.getVal("b", "v3") h.getVal("c", "v0") h.getVal("d", "v0") - - h.close() } func TestCorruptDB_RecoverInvalidSeq_Issue53(t *testing.T) { h := newDbCorruptHarness(t) + defer h.close() h.put("a", "v1") h.put("b", "v1") @@ -448,12 +448,11 @@ h.getVal("b", "v3") h.getVal("c", "v0") h.getVal("d", "v0") - - h.close() } func TestCorruptDB_MissingTableFiles(t *testing.T) { h := newDbCorruptHarness(t) + defer h.close() h.put("a", "v1") h.put("b", "v1") @@ -467,8 +466,6 @@ h.removeOne(storage.TypeTable) h.openAssert(false) - - h.close() } func TestCorruptDB_RecoverTable(t *testing.T) { @@ -477,6 +474,7 @@ CompactionTableSize: 90 * opt.KiB, Filter: filter.NewBloomFilter(10), }) + defer h.close() h.build(1000) h.compactMem() @@ -495,6 +493,4 @@ t.Errorf("invalid seq, want=%d got=%d", seq, h.db.seq) } h.check(985, 985) - - h.close() } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go 2016-05-24 07:05:22.000000000 +0000 @@ -12,55 +12,76 @@ "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" ) var ( errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting") ) -type cStats struct { - sync.Mutex +type cStat struct { duration time.Duration - read uint64 - write uint64 + read int64 + write int64 } -func (p *cStats) add(n *cStatsStaging) { - p.Lock() +func (p *cStat) add(n *cStatStaging) { p.duration += n.duration p.read += n.read p.write += n.write - p.Unlock() } -func (p *cStats) get() (duration time.Duration, read, write uint64) { - p.Lock() - defer p.Unlock() +func (p *cStat) get() (duration time.Duration, read, write int64) { return p.duration, p.read, p.write } -type cStatsStaging struct { +type cStatStaging struct { start time.Time duration time.Duration on bool - read uint64 - write uint64 + read int64 + write int64 } -func (p *cStatsStaging) startTimer() { +func (p *cStatStaging) startTimer() { if !p.on { p.start = time.Now() p.on = true } } -func (p *cStatsStaging) stopTimer() { +func (p *cStatStaging) stopTimer() { if p.on { p.duration += time.Since(p.start) p.on = false } } +type cStats struct { + lk sync.Mutex + stats []cStat +} + +func (p *cStats) addStat(level int, n *cStatStaging) { + p.lk.Lock() + if level >= len(p.stats) { + newStats := make([]cStat, level+1) + copy(newStats, p.stats) + p.stats = newStats + } + p.stats[level].add(n) + p.lk.Unlock() +} + +func (p *cStats) getStat(level int) (duration time.Duration, read, write int64) { + p.lk.Lock() + defer p.lk.Unlock() + if level < len(p.stats) { + return p.stats[level].get() + } + return +} + func (db *DB) compactionError() { var err error noerr: @@ -235,6 +256,14 @@ panic(errCompactionTransactExiting) } +func (db *DB) compactionCommit(name string, rec *sessionRecord) { + db.compCommitLk.Lock() + defer db.compCommitLk.Unlock() // Defer is necessary. + db.compactionTransactFunc(name+"@commit", func(cnt *compactionTransactCounter) error { + return db.s.commit(rec) + }, nil) +} + func (db *DB) memCompaction() { mdb := db.getFrozenMem() if mdb == nil { @@ -265,41 +294,40 @@ var ( rec = &sessionRecord{} - stats = &cStatsStaging{} + stats = &cStatStaging{} flushLevel int ) + // Generate tables. db.compactionTransactFunc("memdb@flush", func(cnt *compactionTransactCounter) (err error) { stats.startTimer() - flushLevel, err = db.s.flushMemdb(rec, mdb.DB, -1) + flushLevel, err = db.s.flushMemdb(rec, mdb.DB, db.memdbMaxLevel) stats.stopTimer() return }, func() error { for _, r := range rec.addedTables { db.logf("memdb@flush revert @%d", r.num) - f := db.s.getTableFile(r.num) - if err := f.Remove(); err != nil { + if err := db.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: r.num}); err != nil { return err } } return nil }) - db.compactionTransactFunc("memdb@commit", func(cnt *compactionTransactCounter) (err error) { - stats.startTimer() - rec.setJournalNum(db.journalFile.Num()) - rec.setSeqNum(db.frozenSeq) - err = db.s.commit(rec) - stats.stopTimer() - return - }, nil) + rec.setJournalNum(db.journalFd.Num) + rec.setSeqNum(db.frozenSeq) + + // Commit. + stats.startTimer() + db.compactionCommit("memdb", rec) + stats.stopTimer() db.logf("memdb@flush committed F·%d T·%v", len(rec.addedTables), stats.duration) for _, r := range rec.addedTables { stats.write += r.size } - db.compStats[flushLevel].add(stats) + db.compStats.addStat(flushLevel, stats) // Drop frozen memdb. db.dropFrozenMem() @@ -315,7 +343,7 @@ } // Trigger table compaction. - db.compSendTrigger(db.tcompCmdC) + db.compTrigger(db.tcompCmdC) } type tableCompactionBuilder struct { @@ -323,7 +351,7 @@ s *session c *compaction rec *sessionRecord - stat0, stat1 *cStatsStaging + stat0, stat1 *cStatStaging snapHasLastUkey bool snapLastUkey []byte @@ -377,9 +405,9 @@ if err != nil { return err } - b.rec.addTableFile(b.c.level+1, t) + b.rec.addTableFile(b.c.sourceLevel+1, t) b.stat1.write += t.size - b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.level+1, t.file.Num(), b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax) + b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.sourceLevel+1, t.fd.Num, b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax) b.tw = nil return nil } @@ -424,7 +452,7 @@ } ikey := iter.Key() - ukey, seq, kt, kerr := parseIkey(ikey) + ukey, seq, kt, kerr := parseInternalKey(ikey) if kerr == nil { shouldStop := !resumed && b.c.shouldStopBefore(ikey) @@ -450,14 +478,14 @@ hasLastUkey = true lastUkey = append(lastUkey[:0], ukey...) - lastSeq = kMaxSeq + lastSeq = keyMaxSeq } switch { case lastSeq <= b.minSeq: // Dropped because newer entry for same user key exist fallthrough // (A) - case kt == ktDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey): + case kt == keyTypeDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey): // For this user key: // (1) there is no data in higher levels // (2) data in lower levels will have larger seq numbers @@ -479,7 +507,7 @@ // Don't drop corrupted keys. hasLastUkey = false lastUkey = lastUkey[:0] - lastSeq = kMaxSeq + lastSeq = keyMaxSeq b.kerrCnt++ } @@ -502,8 +530,7 @@ func (b *tableCompactionBuilder) revert() error { for _, at := range b.rec.addedTables { b.s.logf("table@build revert @%d", at.num) - f := b.s.getTableFile(at.num) - if err := f.Remove(); err != nil { + if err := b.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: at.num}); err != nil { return err } } @@ -514,30 +541,28 @@ defer c.release() rec := &sessionRecord{} - rec.addCompPtr(c.level, c.imax) + rec.addCompPtr(c.sourceLevel, c.imax) if !noTrivial && c.trivial() { - t := c.tables[0][0] - db.logf("table@move L%d@%d -> L%d", c.level, t.file.Num(), c.level+1) - rec.delTable(c.level, t.file.Num()) - rec.addTableFile(c.level+1, t) - db.compactionTransactFunc("table@move", func(cnt *compactionTransactCounter) (err error) { - return db.s.commit(rec) - }, nil) + t := c.levels[0][0] + db.logf("table@move L%d@%d -> L%d", c.sourceLevel, t.fd.Num, c.sourceLevel+1) + rec.delTable(c.sourceLevel, t.fd.Num) + rec.addTableFile(c.sourceLevel+1, t) + db.compactionCommit("table-move", rec) return } - var stats [2]cStatsStaging - for i, tables := range c.tables { + var stats [2]cStatStaging + for i, tables := range c.levels { for _, t := range tables { stats[i].read += t.size // Insert deleted tables into record - rec.delTable(c.level+i, t.file.Num()) + rec.delTable(c.sourceLevel+i, t.fd.Num) } } sourceSize := int(stats[0].read + stats[1].read) minSeq := db.minSeq() - db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.level, len(c.tables[0]), c.level+1, len(c.tables[1]), shortenb(sourceSize), minSeq) + db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.sourceLevel, len(c.levels[0]), c.sourceLevel+1, len(c.levels[1]), shortenb(sourceSize), minSeq) b := &tableCompactionBuilder{ db: db, @@ -547,49 +572,60 @@ stat1: &stats[1], minSeq: minSeq, strict: db.s.o.GetStrict(opt.StrictCompaction), - tableSize: db.s.o.GetCompactionTableSize(c.level + 1), + tableSize: db.s.o.GetCompactionTableSize(c.sourceLevel + 1), } db.compactionTransact("table@build", b) - // Commit changes - db.compactionTransactFunc("table@commit", func(cnt *compactionTransactCounter) (err error) { - stats[1].startTimer() - defer stats[1].stopTimer() - return db.s.commit(rec) - }, nil) + // Commit. + stats[1].startTimer() + db.compactionCommit("table", rec) + stats[1].stopTimer() resultSize := int(stats[1].write) db.logf("table@compaction committed F%s S%s Ke·%d D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), b.kerrCnt, b.dropCnt, stats[1].duration) // Save compaction stats for i := range stats { - db.compStats[c.level+1].add(&stats[i]) + db.compStats.addStat(c.sourceLevel+1, &stats[i]) } } -func (db *DB) tableRangeCompaction(level int, umin, umax []byte) { +func (db *DB) tableRangeCompaction(level int, umin, umax []byte) error { db.logf("table@compaction range L%d %q:%q", level, umin, umax) - if level >= 0 { - if c := db.s.getCompactionRange(level, umin, umax); c != nil { + if c := db.s.getCompactionRange(level, umin, umax, true); c != nil { db.tableCompaction(c, true) } } else { - v := db.s.version() - m := 1 - for i, t := range v.tables[1:] { - if t.overlaps(db.s.icmp, umin, umax, false) { - m = i + 1 + // Retry until nothing to compact. + for { + compacted := false + + // Scan for maximum level with overlapped tables. + v := db.s.version() + m := 1 + for i := m; i < len(v.levels); i++ { + tables := v.levels[i] + if tables.overlaps(db.s.icmp, umin, umax, false) { + m = i + } } - } - v.release() - - for level := 0; level < m; level++ { - if c := db.s.getCompactionRange(level, umin, umax); c != nil { - db.tableCompaction(c, true) + v.release() + + for level := 0; level < m; level++ { + if c := db.s.getCompactionRange(level, umin, umax, false); c != nil { + db.tableCompaction(c, true) + compacted = true + } + } + + if !compacted { + break } } } + + return nil } func (db *DB) tableAutoCompaction() { @@ -616,11 +652,11 @@ ack(err error) } -type cIdle struct { +type cAuto struct { ackC chan<- error } -func (r cIdle) ack(err error) { +func (r cAuto) ack(err error) { if r.ackC != nil { defer func() { recover() @@ -644,13 +680,21 @@ } } +// This will trigger auto compaction but will not wait for it. +func (db *DB) compTrigger(compC chan<- cCmd) { + select { + case compC <- cAuto{}: + default: + } +} + // This will trigger auto compation and/or wait for all compaction to be done. -func (db *DB) compSendIdle(compC chan<- cCmd) (err error) { +func (db *DB) compTriggerWait(compC chan<- cCmd) (err error) { ch := make(chan error) defer close(ch) // Send cmd. select { - case compC <- cIdle{ch}: + case compC <- cAuto{ch}: case err = <-db.compErrC: return case _, _ = <-db.closeC: @@ -666,16 +710,8 @@ return err } -// This will trigger auto compaction but will not wait for it. -func (db *DB) compSendTrigger(compC chan<- cCmd) { - select { - case compC <- cIdle{}: - default: - } -} - // Send range compaction request. -func (db *DB) compSendRange(compC chan<- cCmd, level int, min, max []byte) (err error) { +func (db *DB) compTriggerRange(compC chan<- cCmd, level int, min, max []byte) (err error) { ch := make(chan error) defer close(ch) // Send cmd. @@ -715,7 +751,7 @@ select { case x = <-db.mcompCmdC: switch x.(type) { - case cIdle: + case cAuto: db.memCompaction() x.ack(nil) x = nil @@ -776,11 +812,10 @@ } if x != nil { switch cmd := x.(type) { - case cIdle: + case cAuto: ackQ = append(ackQ, x) case cRange: - db.tableRangeCompaction(cmd.level, cmd.min, cmd.max) - x.ack(nil) + x.ack(db.tableRangeCompaction(cmd.level, cmd.min, cmd.max)) default: panic("leveldb: unknown command") } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db.go 2016-05-24 07:05:22.000000000 +0000 @@ -36,14 +36,14 @@ s *session // MemDB. - memMu sync.RWMutex - memPool chan *memdb.DB - mem, frozenMem *memDB - journal *journal.Writer - journalWriter storage.Writer - journalFile storage.File - frozenJournalFile storage.File - frozenSeq uint64 + memMu sync.RWMutex + memPool chan *memdb.DB + mem, frozenMem *memDB + journal *journal.Writer + journalWriter storage.Writer + journalFd storage.FileDesc + frozenJournalFd storage.FileDesc + frozenSeq uint64 // Snapshot. snapsMu sync.Mutex @@ -61,8 +61,10 @@ writeDelayN int journalC chan *Batch journalAckC chan error + tr *Transaction // Compaction. + compCommitLk sync.Mutex tcompCmdC chan cCmd tcompPauseC chan chan<- struct{} mcompCmdC chan cCmd @@ -70,7 +72,8 @@ compPerErrC chan error compErrSetC chan error compWriteLocking bool - compStats []cStats + compStats cStats + memdbMaxLevel int // For testing. // Close. closeW sync.WaitGroup @@ -104,7 +107,6 @@ compErrC: make(chan error), compPerErrC: make(chan error), compErrSetC: make(chan error), - compStats: make([]cStats, s.o.GetNumLevel()), // Close closeC: make(chan struct{}), } @@ -209,7 +211,7 @@ // The returned DB instance is goroutine-safe. // The DB must be closed after use, by calling Close method. func OpenFile(path string, o *opt.Options) (db *DB, err error) { - stor, err := storage.OpenFile(path) + stor, err := storage.OpenFile(path, o.GetReadOnly()) if err != nil { return } @@ -259,7 +261,7 @@ // The returned DB instance is goroutine-safe. // The DB must be closed after use, by calling Close method. func RecoverFile(path string, o *opt.Options) (db *DB, err error) { - stor, err := storage.OpenFile(path) + stor, err := storage.OpenFile(path, false) if err != nil { return } @@ -278,12 +280,11 @@ o.Strict &= ^opt.StrictReader // Get all tables and sort it by file number. - tableFiles_, err := s.getFiles(storage.TypeTable) + fds, err := s.stor.List(storage.TypeTable) if err != nil { return err } - tableFiles := files(tableFiles_) - tableFiles.sort() + sortFds(fds) var ( maxSeq uint64 @@ -296,17 +297,17 @@ rec = &sessionRecord{} bpool = util.NewBufferPool(o.GetBlockSize() + 5) ) - buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) { - tmp = s.newTemp() - writer, err := tmp.Create() + buildTable := func(iter iterator.Iterator) (tmpFd storage.FileDesc, size int64, err error) { + tmpFd = s.newTemp() + writer, err := s.stor.Create(tmpFd) if err != nil { return } defer func() { writer.Close() if err != nil { - tmp.Remove() - tmp = nil + s.stor.Remove(tmpFd) + tmpFd = storage.FileDesc{} } }() @@ -314,7 +315,7 @@ tw := table.NewWriter(writer, o) for iter.Next() { key := iter.Key() - if validIkey(key) { + if validInternalKey(key) { err = tw.Append(key, iter.Value()) if err != nil { return @@ -338,9 +339,9 @@ size = int64(tw.BytesLen()) return } - recoverTable := func(file storage.File) error { - s.logf("table@recovery recovering @%d", file.Num()) - reader, err := file.Open() + recoverTable := func(fd storage.FileDesc) error { + s.logf("table@recovery recovering @%d", fd.Num) + reader, err := s.stor.Open(fd) if err != nil { return err } @@ -362,7 +363,7 @@ tgoodKey, tcorruptedKey, tcorruptedBlock int imin, imax []byte ) - tr, err := table.NewReader(reader, size, storage.NewFileInfo(file), nil, bpool, o) + tr, err := table.NewReader(reader, size, fd, nil, bpool, o) if err != nil { return err } @@ -370,7 +371,7 @@ if itererr, ok := iter.(iterator.ErrorCallbackSetter); ok { itererr.SetErrorCallback(func(err error) { if errors.IsCorrupted(err) { - s.logf("table@recovery block corruption @%d %q", file.Num(), err) + s.logf("table@recovery block corruption @%d %q", fd.Num, err) tcorruptedBlock++ } }) @@ -379,7 +380,7 @@ // Scan the table. for iter.Next() { key := iter.Key() - _, seq, _, kerr := parseIkey(key) + _, seq, _, kerr := parseInternalKey(key) if kerr != nil { tcorruptedKey++ continue @@ -405,23 +406,23 @@ if strict && (tcorruptedKey > 0 || tcorruptedBlock > 0) { droppedTable++ - s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq) + s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", fd.Num, tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq) return nil } if tgoodKey > 0 { if tcorruptedKey > 0 || tcorruptedBlock > 0 { // Rebuild the table. - s.logf("table@recovery rebuilding @%d", file.Num()) + s.logf("table@recovery rebuilding @%d", fd.Num) iter := tr.NewIterator(nil, nil) - tmp, newSize, err := buildTable(iter) + tmpFd, newSize, err := buildTable(iter) iter.Release() if err != nil { return err } closed = true reader.Close() - if err := file.Replace(tmp); err != nil { + if err := s.stor.Rename(tmpFd, fd); err != nil { return err } size = newSize @@ -431,30 +432,30 @@ } recoveredKey += tgoodKey // Add table to level 0. - rec.addTable(0, file.Num(), uint64(size), imin, imax) - s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq) + rec.addTable(0, fd.Num, size, imin, imax) + s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", fd.Num, tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq) } else { droppedTable++ - s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", file.Num(), tcorruptedKey, tcorruptedBlock, size) + s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", fd.Num, tcorruptedKey, tcorruptedBlock, size) } return nil } // Recover all tables. - if len(tableFiles) > 0 { - s.logf("table@recovery F·%d", len(tableFiles)) + if len(fds) > 0 { + s.logf("table@recovery F·%d", len(fds)) // Mark file number as used. - s.markFileNum(tableFiles[len(tableFiles)-1].Num()) + s.markFileNum(fds[len(fds)-1].Num) - for _, file := range tableFiles { - if err := recoverTable(file); err != nil { + for _, fd := range fds { + if err := recoverTable(fd); err != nil { return err } } - s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(tableFiles), recoveredKey, goodKey, corruptedKey, maxSeq) + s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(fds), recoveredKey, goodKey, corruptedKey, maxSeq) } // Set sequence number. @@ -471,31 +472,31 @@ func (db *DB) recoverJournal() error { // Get all journals and sort it by file number. - allJournalFiles, err := db.s.getFiles(storage.TypeJournal) + rawFds, err := db.s.stor.List(storage.TypeJournal) if err != nil { return err } - files(allJournalFiles).sort() + sortFds(rawFds) // Journals that will be recovered. - var recJournalFiles []storage.File - for _, jf := range allJournalFiles { - if jf.Num() >= db.s.stJournalNum || jf.Num() == db.s.stPrevJournalNum { - recJournalFiles = append(recJournalFiles, jf) + var fds []storage.FileDesc + for _, fd := range rawFds { + if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum { + fds = append(fds, fd) } } var ( - of storage.File // Obsolete file. + ofd storage.FileDesc // Obsolete file. rec = &sessionRecord{} ) // Recover journals. - if len(recJournalFiles) > 0 { - db.logf("journal@recovery F·%d", len(recJournalFiles)) + if len(fds) > 0 { + db.logf("journal@recovery F·%d", len(fds)) // Mark file number as used. - db.s.markFileNum(recJournalFiles[len(recJournalFiles)-1].Num()) + db.s.markFileNum(fds[len(fds)-1].Num) var ( // Options. @@ -509,31 +510,31 @@ batch = &Batch{} ) - for _, jf := range recJournalFiles { - db.logf("journal@recovery recovering @%d", jf.Num()) + for _, fd := range fds { + db.logf("journal@recovery recovering @%d", fd.Num) - fr, err := jf.Open() + fr, err := db.s.stor.Open(fd) if err != nil { return err } // Create or reset journal reader instance. if jr == nil { - jr = journal.NewReader(fr, dropper{db.s, jf}, strict, checksum) + jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum) } else { - jr.Reset(fr, dropper{db.s, jf}, strict, checksum) + jr.Reset(fr, dropper{db.s, fd}, strict, checksum) } // Flush memdb and remove obsolete journal file. - if of != nil { + if !ofd.Nil() { if mdb.Len() > 0 { - if _, err := db.s.flushMemdb(rec, mdb, -1); err != nil { + if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { fr.Close() return err } } - rec.setJournalNum(jf.Num()) + rec.setJournalNum(fd.Num) rec.setSeqNum(db.seq) if err := db.s.commit(rec); err != nil { fr.Close() @@ -541,8 +542,8 @@ } rec.resetAddedTables() - of.Remove() - of = nil + db.s.stor.Remove(ofd) + ofd = storage.FileDesc{} } // Replay journal to memdb. @@ -555,7 +556,7 @@ } fr.Close() - return errors.SetFile(err, jf) + return errors.SetFd(err, fd) } buf.Reset() @@ -566,7 +567,7 @@ } fr.Close() - return errors.SetFile(err, jf) + return errors.SetFd(err, fd) } if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil { if !strict && errors.IsCorrupted(err) { @@ -576,7 +577,7 @@ } fr.Close() - return errors.SetFile(err, jf) + return errors.SetFd(err, fd) } // Save sequence number. @@ -594,7 +595,7 @@ } fr.Close() - of = jf + ofd = fd } // Flush the last memdb. @@ -611,7 +612,7 @@ } // Commit. - rec.setJournalNum(db.journalFile.Num()) + rec.setJournalNum(db.journalFd.Num) rec.setSeqNum(db.seq) if err := db.s.commit(rec); err != nil { // Close journal on error. @@ -623,8 +624,8 @@ } // Remove the last obsolete journal file. - if of != nil { - of.Remove() + if !ofd.Nil() { + db.s.stor.Remove(ofd) } return nil @@ -632,17 +633,17 @@ func (db *DB) recoverJournalRO() error { // Get all journals and sort it by file number. - allJournalFiles, err := db.s.getFiles(storage.TypeJournal) + rawFds, err := db.s.stor.List(storage.TypeJournal) if err != nil { return err } - files(allJournalFiles).sort() + sortFds(rawFds) // Journals that will be recovered. - var recJournalFiles []storage.File - for _, jf := range allJournalFiles { - if jf.Num() >= db.s.stJournalNum || jf.Num() == db.s.stPrevJournalNum { - recJournalFiles = append(recJournalFiles, jf) + var fds []storage.FileDesc + for _, fd := range rawFds { + if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum { + fds = append(fds, fd) } } @@ -656,8 +657,8 @@ ) // Recover journals. - if len(recJournalFiles) > 0 { - db.logf("journal@recovery RO·Mode F·%d", len(recJournalFiles)) + if len(fds) > 0 { + db.logf("journal@recovery RO·Mode F·%d", len(fds)) var ( jr *journal.Reader @@ -665,19 +666,19 @@ batch = &Batch{} ) - for _, jf := range recJournalFiles { - db.logf("journal@recovery recovering @%d", jf.Num()) + for _, fd := range fds { + db.logf("journal@recovery recovering @%d", fd.Num) - fr, err := jf.Open() + fr, err := db.s.stor.Open(fd) if err != nil { return err } // Create or reset journal reader instance. if jr == nil { - jr = journal.NewReader(fr, dropper{db.s, jf}, strict, checksum) + jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum) } else { - jr.Reset(fr, dropper{db.s, jf}, strict, checksum) + jr.Reset(fr, dropper{db.s, fd}, strict, checksum) } // Replay journal to memdb. @@ -689,7 +690,7 @@ } fr.Close() - return errors.SetFile(err, jf) + return errors.SetFd(err, fd) } buf.Reset() @@ -700,7 +701,7 @@ } fr.Close() - return errors.SetFile(err, jf) + return errors.SetFd(err, fd) } if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil { if !strict && errors.IsCorrupted(err) { @@ -710,7 +711,7 @@ } fr.Close() - return errors.SetFile(err, jf) + return errors.SetFd(err, fd) } // Save sequence number. @@ -727,8 +728,35 @@ return nil } -func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) { - ikey := newIkey(key, seq, ktSeek) +func memGet(mdb *memdb.DB, ikey internalKey, icmp *iComparer) (ok bool, mv []byte, err error) { + mk, mv, err := mdb.Find(ikey) + if err == nil { + ukey, _, kt, kerr := parseInternalKey(mk) + if kerr != nil { + // Shouldn't have had happen. + panic(kerr) + } + if icmp.uCompare(ukey, ikey.ukey()) == 0 { + if kt == keyTypeDel { + return true, nil, ErrNotFound + } + return true, mv, nil + + } + } else if err != ErrNotFound { + return true, nil, err + } + return +} + +func (db *DB) get(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) { + ikey := makeInternalKey(nil, key, seq, keyTypeSeek) + + if auxm != nil { + if ok, mv, me := memGet(auxm, ikey, db.s.icmp); ok { + return append([]byte{}, mv...), me + } + } em, fm := db.getMems() for _, m := range [...]*memDB{em, fm} { @@ -737,36 +765,36 @@ } defer m.decref() - mk, mv, me := m.Find(ikey) - if me == nil { - ukey, _, kt, kerr := parseIkey(mk) - if kerr != nil { - // Shouldn't have had happen. - panic(kerr) - } - if db.s.icmp.uCompare(ukey, key) == 0 { - if kt == ktDel { - return nil, ErrNotFound - } - return append([]byte{}, mv...), nil - } - } else if me != ErrNotFound { - return nil, me + if ok, mv, me := memGet(m.DB, ikey, db.s.icmp); ok { + return append([]byte{}, mv...), me } } v := db.s.version() - value, cSched, err := v.get(ikey, ro, false) + value, cSched, err := v.get(auxt, ikey, ro, false) v.release() if cSched { // Trigger table compaction. - db.compSendTrigger(db.tcompCmdC) + db.compTrigger(db.tcompCmdC) } return } -func (db *DB) has(key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) { - ikey := newIkey(key, seq, ktSeek) +func nilIfNotFound(err error) error { + if err == ErrNotFound { + return nil + } + return err +} + +func (db *DB) has(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) { + ikey := makeInternalKey(nil, key, seq, keyTypeSeek) + + if auxm != nil { + if ok, _, me := memGet(auxm, ikey, db.s.icmp); ok { + return me == nil, nilIfNotFound(me) + } + } em, fm := db.getMems() for _, m := range [...]*memDB{em, fm} { @@ -775,30 +803,17 @@ } defer m.decref() - mk, _, me := m.Find(ikey) - if me == nil { - ukey, _, kt, kerr := parseIkey(mk) - if kerr != nil { - // Shouldn't have had happen. - panic(kerr) - } - if db.s.icmp.uCompare(ukey, key) == 0 { - if kt == ktDel { - return false, nil - } - return true, nil - } - } else if me != ErrNotFound { - return false, me + if ok, _, me := memGet(m.DB, ikey, db.s.icmp); ok { + return me == nil, nilIfNotFound(me) } } v := db.s.version() - _, cSched, err := v.get(ikey, ro, true) + _, cSched, err := v.get(auxt, ikey, ro, true) v.release() if cSched { // Trigger table compaction. - db.compSendTrigger(db.tcompCmdC) + db.compTrigger(db.tcompCmdC) } if err == nil { ret = true @@ -822,7 +837,7 @@ se := db.acquireSnapshot() defer db.releaseSnapshot(se) - return db.get(key, se.seq, ro) + return db.get(nil, nil, key, se.seq, ro) } // Has returns true if the DB does contains the given key. @@ -836,11 +851,11 @@ se := db.acquireSnapshot() defer db.releaseSnapshot(se) - return db.has(key, se.seq, ro) + return db.has(nil, nil, key, se.seq, ro) } // NewIterator returns an iterator for the latest snapshot of the -// uderlying DB. +// underlying DB. // The returned iterator is not goroutine-safe, but it is safe to use // multiple iterators concurrently, with each in a dedicated goroutine. // It is also safe to use an iterator concurrently with modifying its @@ -864,7 +879,7 @@ defer db.releaseSnapshot(se) // Iterator holds 'version' lock, 'version' is immutable so snapshot // can be released after iterator created. - return db.newIterator(se.seq, slice, ro) + return db.newIterator(nil, nil, se.seq, slice, ro) } // GetSnapshot returns a latest snapshot of the underlying DB. A snapshot @@ -920,7 +935,7 @@ var level uint var rest string n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest) - if n != 1 || int(level) >= db.s.o.GetNumLevel() { + if n != 1 { err = ErrNotFound } else { value = fmt.Sprint(v.tLen(int(level))) @@ -929,8 +944,8 @@ value = "Compactions\n" + " Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" + "-------+------------+---------------+---------------+---------------+---------------\n" - for level, tables := range v.tables { - duration, read, write := db.compStats[level].get() + for level, tables := range v.levels { + duration, read, write := db.compStats.getStat(level) if len(tables) == 0 && duration == 0 { continue } @@ -939,10 +954,10 @@ float64(read)/1048576.0, float64(write)/1048576.0) } case p == "sstables": - for level, tables := range v.tables { + for level, tables := range v.levels { value += fmt.Sprintf("--- level %d ---\n", level) for _, t := range tables { - value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.file.Num(), t.size, t.imin, t.imax) + value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.fd.Num, t.size, t.imin, t.imax) } } case p == "blockpool": @@ -982,8 +997,8 @@ sizes := make(Sizes, 0, len(ranges)) for _, r := range ranges { - imin := newIkey(r.Start, kMaxSeq, ktSeek) - imax := newIkey(r.Limit, kMaxSeq, ktSeek) + imin := makeInternalKey(nil, r.Start, keyMaxSeq, keyTypeSeek) + imax := makeInternalKey(nil, r.Limit, keyMaxSeq, keyTypeSeek) start, err := v.offsetOf(imin) if err != nil { return nil, err @@ -992,7 +1007,7 @@ if err != nil { return nil, err } - var size uint64 + var size int64 if limit >= start { size = limit - start } @@ -1002,8 +1017,8 @@ return sizes, nil } -// Close closes the DB. This will also releases any outstanding snapshot and -// abort any in-flight compaction. +// Close closes the DB. This will also releases any outstanding snapshot, +// abort any in-flight compaction and discard open transaction. // // It is not safe to close a DB until all outstanding iterators are released. // It is valid to call Close multiple times. Other methods should not be @@ -1032,11 +1047,18 @@ // Signal all goroutines. close(db.closeC) + // Discard open transaction. + if db.tr != nil { + db.tr.Discard() + } + + // Acquire writer lock. + db.writeLockC <- struct{}{} + // Wait for all gorotines to exit. db.closeW.Wait() - // Lock writer and closes journal. - db.writeLockC <- struct{}{} + // Closes journal. if db.journal != nil { db.journal.Close() db.journalWriter.Close() @@ -1063,8 +1085,6 @@ db.frozenMem = nil db.journal = nil db.journalWriter = nil - db.journalFile = nil - db.frozenJournalFile = nil db.closer = nil return err diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_iter.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_iter.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_iter.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_iter.go 2016-05-24 07:05:22.000000000 +0000 @@ -19,7 +19,7 @@ ) var ( - errInvalidIkey = errors.New("leveldb: Iterator: invalid internal key") + errInvalidInternalKey = errors.New("leveldb: Iterator: invalid internal key") ) type memdbReleaser struct { @@ -33,40 +33,50 @@ }) } -func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { +func (db *DB) newRawIterator(auxm *memDB, auxt tFiles, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader) em, fm := db.getMems() v := db.s.version() - ti := v.getIterators(slice, ro) - n := len(ti) + 2 - i := make([]iterator.Iterator, 0, n) + tableIts := v.getIterators(slice, ro) + n := len(tableIts) + len(auxt) + 3 + its := make([]iterator.Iterator, 0, n) + + if auxm != nil { + ami := auxm.NewIterator(slice) + ami.SetReleaser(&memdbReleaser{m: auxm}) + its = append(its, ami) + } + for _, t := range auxt { + its = append(its, v.s.tops.newIterator(t, slice, ro)) + } + emi := em.NewIterator(slice) emi.SetReleaser(&memdbReleaser{m: em}) - i = append(i, emi) + its = append(its, emi) if fm != nil { fmi := fm.NewIterator(slice) fmi.SetReleaser(&memdbReleaser{m: fm}) - i = append(i, fmi) + its = append(its, fmi) } - i = append(i, ti...) - strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader) - mi := iterator.NewMergedIterator(i, db.s.icmp, strict) + its = append(its, tableIts...) + mi := iterator.NewMergedIterator(its, db.s.icmp, strict) mi.SetReleaser(&versionReleaser{v: v}) return mi } -func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter { +func (db *DB) newIterator(auxm *memDB, auxt tFiles, seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter { var islice *util.Range if slice != nil { islice = &util.Range{} if slice.Start != nil { - islice.Start = newIkey(slice.Start, kMaxSeq, ktSeek) + islice.Start = makeInternalKey(nil, slice.Start, keyMaxSeq, keyTypeSeek) } if slice.Limit != nil { - islice.Limit = newIkey(slice.Limit, kMaxSeq, ktSeek) + islice.Limit = makeInternalKey(nil, slice.Limit, keyMaxSeq, keyTypeSeek) } } - rawIter := db.newRawIterator(islice, ro) + rawIter := db.newRawIterator(auxm, auxt, islice, ro) iter := &dbIter{ db: db, icmp: db.s.icmp, @@ -177,7 +187,7 @@ return false } - ikey := newIkey(key, i.seq, ktSeek) + ikey := makeInternalKey(nil, key, i.seq, keyTypeSeek) if i.iter.Seek(ikey) { i.dir = dirSOI return i.next() @@ -189,15 +199,15 @@ func (i *dbIter) next() bool { for { - if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil { + if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil { i.sampleSeek() if seq <= i.seq { switch kt { - case ktDel: + case keyTypeDel: // Skip deleted key. i.key = append(i.key[:0], ukey...) i.dir = dirForward - case ktVal: + case keyTypeVal: if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 { i.key = append(i.key[:0], ukey...) i.value = append(i.value[:0], i.iter.Value()...) @@ -240,13 +250,13 @@ del := true if i.iter.Valid() { for { - if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil { + if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil { i.sampleSeek() if seq <= i.seq { if !del && i.icmp.uCompare(ukey, i.key) < 0 { return true } - del = (kt == ktDel) + del = (kt == keyTypeDel) if !del { i.key = append(i.key[:0], ukey...) i.value = append(i.value[:0], i.iter.Value()...) @@ -282,7 +292,7 @@ return i.Last() case dirForward: for i.iter.Prev() { - if ukey, _, _, kerr := parseIkey(i.iter.Key()); kerr == nil { + if ukey, _, _, kerr := parseInternalKey(i.iter.Key()); kerr == nil { i.sampleSeek() if i.icmp.uCompare(ukey, i.key) < 0 { goto cont diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go 2016-05-24 07:05:22.000000000 +0000 @@ -110,7 +110,7 @@ err = ErrSnapshotReleased return } - return snap.db.get(key, snap.elem.seq, ro) + return snap.db.get(nil, nil, key, snap.elem.seq, ro) } // Has returns true if the DB does contains the given key. @@ -127,10 +127,10 @@ err = ErrSnapshotReleased return } - return snap.db.has(key, snap.elem.seq, ro) + return snap.db.has(nil, nil, key, snap.elem.seq, ro) } -// NewIterator returns an iterator for the snapshot of the uderlying DB. +// NewIterator returns an iterator for the snapshot of the underlying DB. // The returned iterator is not goroutine-safe, but it is safe to use // multiple iterators concurrently, with each in a dedicated goroutine. // It is also safe to use an iterator concurrently with modifying its @@ -158,7 +158,7 @@ } // Since iterator already hold version ref, it doesn't need to // hold snapshot ref. - return snap.db.newIterator(snap.elem.seq, slice, ro) + return snap.db.newIterator(nil, nil, snap.elem.seq, slice, ro) } // Release releases the snapshot. This will not release any returned diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_state.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_state.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_state.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_state.go 2016-05-24 07:05:22.000000000 +0000 @@ -12,6 +12,7 @@ "github.com/syndtr/goleveldb/leveldb/journal" "github.com/syndtr/goleveldb/leveldb/memdb" + "github.com/syndtr/goleveldb/leveldb/storage" ) type memDB struct { @@ -20,6 +21,10 @@ ref int32 } +func (m *memDB) getref() int32 { + return atomic.LoadInt32(&m.ref) +} + func (m *memDB) incref() { atomic.AddInt32(&m.ref, 1) } @@ -48,11 +53,15 @@ atomic.AddUint64(&db.seq, delta) } -func (db *DB) sampleSeek(ikey iKey) { +func (db *DB) setSeq(seq uint64) { + atomic.StoreUint64(&db.seq, seq) +} + +func (db *DB) sampleSeek(ikey internalKey) { v := db.s.version() if v.sampleSeek(ikey) { // Trigger table compaction. - db.compSendTrigger(db.tcompCmdC) + db.compTrigger(db.tcompCmdC) } v.release() } @@ -67,12 +76,18 @@ } } -func (db *DB) mpoolGet() *memdb.DB { +func (db *DB) mpoolGet(n int) *memDB { + var mdb *memdb.DB select { - case mem := <-db.memPool: - return mem + case mdb = <-db.memPool: default: - return nil + } + if mdb == nil || mdb.Capacity() < n { + mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n)) + } + return &memDB{ + db: db, + DB: mdb, } } @@ -95,11 +110,10 @@ // Create new memdb and froze the old one; need external synchronization. // newMem only called synchronously by the writer. func (db *DB) newMem(n int) (mem *memDB, err error) { - num := db.s.allocFileNum() - file := db.s.getJournalFile(num) - w, err := file.Create() + fd := storage.FileDesc{Type: storage.TypeJournal, Num: db.s.allocFileNum()} + w, err := db.s.stor.Create(fd) if err != nil { - db.s.reuseFileNum(num) + db.s.reuseFileNum(fd.Num) return } @@ -115,20 +129,14 @@ } else { db.journal.Reset(w) db.journalWriter.Close() - db.frozenJournalFile = db.journalFile + db.frozenJournalFd = db.journalFd } db.journalWriter = w - db.journalFile = file + db.journalFd = fd db.frozenMem = db.mem - mdb := db.mpoolGet() - if mdb == nil || mdb.Capacity() < n { - mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n)) - } - mem = &memDB{ - db: db, - DB: mdb, - ref: 2, - } + mem = db.mpoolGet(n) + mem.incref() // for self + mem.incref() // for caller db.mem = mem // The seq only incremented by the writer. And whoever called newMem // should hold write lock, so no need additional synchronization here. @@ -181,12 +189,12 @@ // Drop frozen memdb; assume that frozen memdb isn't nil. func (db *DB) dropFrozenMem() { db.memMu.Lock() - if err := db.frozenJournalFile.Remove(); err != nil { - db.logf("journal@remove removing @%d %q", db.frozenJournalFile.Num(), err) + if err := db.s.stor.Remove(db.frozenJournalFd); err != nil { + db.logf("journal@remove removing @%d %q", db.frozenJournalFd.Num, err) } else { - db.logf("journal@remove removed @%d", db.frozenJournalFile.Num()) + db.logf("journal@remove removed @%d", db.frozenJournalFd.Num) } - db.frozenJournalFile = nil + db.frozenJournalFd = storage.FileDesc{} db.frozenMem.decref() db.frozenMem = nil db.memMu.Unlock() diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -23,12 +23,15 @@ "time" "unsafe" + "github.com/onsi/gomega" + "github.com/syndtr/goleveldb/leveldb/comparer" "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/filter" "github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/storage" + "github.com/syndtr/goleveldb/leveldb/testutil" "github.com/syndtr/goleveldb/leveldb/util" ) @@ -41,10 +44,23 @@ return randomString(r, n) } +func testingLogger(t *testing.T) func(log string) { + return func(log string) { + t.Log(log) + } +} + +func testingPreserveOnFailed(t *testing.T) func() (preserve bool, err error) { + return func() (preserve bool, err error) { + preserve = t.Failed() + return + } +} + type dbHarness struct { t *testing.T - stor *testStorage + stor *testutil.Storage db *DB o *opt.Options ro *opt.ReadOptions @@ -58,12 +74,15 @@ } func newDbHarness(t *testing.T) *dbHarness { - return newDbHarnessWopt(t, &opt.Options{}) + return newDbHarnessWopt(t, &opt.Options{DisableLargeBatchTransaction: true}) } func (h *dbHarness) init(t *testing.T, o *opt.Options) { + gomega.RegisterTestingT(t) h.t = t - h.stor = newTestStorage(t) + h.stor = testutil.NewStorage() + h.stor.OnLog(testingLogger(t)) + h.stor.OnClose(testingPreserveOnFailed(t)) h.o = o h.ro = nil h.wo = nil @@ -93,21 +112,28 @@ } func (h *dbHarness) closeDB() { - if err := h.closeDB0(); err != nil { - h.t.Error("Close: got error: ", err) + if h.db != nil { + if err := h.closeDB0(); err != nil { + h.t.Error("Close: got error: ", err) + } + h.db = nil } h.stor.CloseCheck() runtime.GC() } func (h *dbHarness) reopenDB() { - h.closeDB() + if h.db != nil { + h.closeDB() + } h.openDB() } func (h *dbHarness) close() { - h.closeDB0() - h.db = nil + if h.db != nil { + h.closeDB0() + h.db = nil + } h.stor.Close() h.stor = nil runtime.GC() @@ -149,24 +175,26 @@ } } -func (h *dbHarness) maxNextLevelOverlappingBytes(want uint64) { +func (h *dbHarness) maxNextLevelOverlappingBytes(want int64) { t := h.t db := h.db var ( - maxOverlaps uint64 + maxOverlaps int64 maxLevel int ) v := db.s.version() - for i, tt := range v.tables[1 : len(v.tables)-1] { - level := i + 1 - next := v.tables[level+1] - for _, t := range tt { - r := next.getOverlaps(nil, db.s.icmp, t.imin.ukey(), t.imax.ukey(), false) - sum := r.size() - if sum > maxOverlaps { - maxOverlaps = sum - maxLevel = level + if len(v.levels) > 2 { + for i, tt := range v.levels[1 : len(v.levels)-1] { + level := i + 1 + next := v.levels[level+1] + for _, t := range tt { + r := next.getOverlaps(nil, db.s.icmp, t.imin.ukey(), t.imax.ukey(), false) + sum := r.size() + if sum > maxOverlaps { + maxOverlaps = sum + maxLevel = level + } } } } @@ -248,8 +276,8 @@ db := h.db s := db.s - ikey := newIkey([]byte(key), kMaxSeq, ktVal) - iter := db.newRawIterator(nil, nil) + ikey := makeInternalKey(nil, []byte(key), keyMaxSeq, keyTypeVal) + iter := db.newRawIterator(nil, nil, nil, nil) if !iter.Seek(ikey) && iter.Error() != nil { t.Error("AllEntries: error during seek, err: ", iter.Error()) return @@ -257,7 +285,7 @@ res := "[ " first := true for iter.Valid() { - if ukey, _, kt, kerr := parseIkey(iter.Key()); kerr == nil { + if ukey, _, kt, kerr := parseInternalKey(iter.Key()); kerr == nil { if s.icmp.uCompare(ikey.ukey(), ukey) != 0 { break } @@ -266,9 +294,9 @@ } first = false switch kt { - case ktVal: + case keyTypeVal: res += string(iter.Value()) - case ktDel: + case keyTypeDel: res += "DEL" } } else { @@ -315,7 +343,7 @@ func (h *dbHarness) waitCompaction() { t := h.t db := h.db - if err := db.compSendIdle(db.tcompCmdC); err != nil { + if err := db.compTriggerWait(db.tcompCmdC); err != nil { t.Error("compaction error: ", err) } } @@ -324,7 +352,7 @@ t := h.t db := h.db - if err := db.compSendIdle(db.mcompCmdC); err != nil { + if err := db.compTriggerWait(db.mcompCmdC); err != nil { t.Error("compaction error: ", err) } } @@ -340,10 +368,7 @@ <-db.writeLockC }() - if _, err := db.rotateMem(0); err != nil { - t.Error("compaction error: ", err) - } - if err := db.compSendIdle(db.mcompCmdC); err != nil { + if _, err := db.rotateMem(0, true); err != nil { t.Error("compaction error: ", err) } @@ -368,7 +393,7 @@ t.Logf("starting table range compaction: level=%d, min=%q, max=%q", level, min, max) - if err := db.compSendRange(db.tcompCmdC, level, _min, _max); err != nil { + if err := db.compTriggerRange(db.tcompCmdC, level, _min, _max); err != nil { if wanterr { t.Log("CompactRangeAt: got error (expected): ", err) } else { @@ -405,7 +430,7 @@ t.Log("DB range compaction done") } -func (h *dbHarness) sizeOf(start, limit string) uint64 { +func (h *dbHarness) sizeOf(start, limit string) int64 { sz, err := h.db.SizeOf([]util.Range{ {[]byte(start), []byte(limit)}, }) @@ -415,7 +440,7 @@ return sz.Sum() } -func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) { +func (h *dbHarness) sizeAssert(start, limit string, low, hi int64) { sz := h.sizeOf(start, limit) if sz < low || sz > hi { h.t.Errorf("sizeOf %q to %q not in range, want %d - %d, got %d", @@ -430,21 +455,26 @@ } return } -func (h *dbHarness) tablesPerLevel(want string) { + +func (h *dbHarness) getTablesPerLevel() string { res := "" nz := 0 v := h.db.s.version() - for level, tt := range v.tables { + for level, tables := range v.levels { if level > 0 { res += "," } - res += fmt.Sprint(len(tt)) - if len(tt) > 0 { + res += fmt.Sprint(len(tables)) + if len(tables) > 0 { nz = len(res) } } v.release() - res = res[:nz] + return res[:nz] +} + +func (h *dbHarness) tablesPerLevel(want string) { + res := h.getTablesPerLevel() if res != want { h.t.Errorf("invalid tables len, want=%s, got=%s", want, res) } @@ -452,8 +482,8 @@ func (h *dbHarness) totalTables() (n int) { v := h.db.s.version() - for _, tt := range v.tables { - n += len(tt) + for _, tables := range v.levels { + n += len(tables) } v.release() return @@ -475,7 +505,7 @@ return fmt.Sprintf("key%06d", num) } -var _bloom_filter = filter.NewBloomFilter(10) +var testingBloomFilter = filter.NewBloomFilter(10) func truno(t *testing.T, o *opt.Options, f func(h *dbHarness)) { for i := 0; i < 4; i++ { @@ -484,16 +514,22 @@ case 0: case 1: if o == nil { - o = &opt.Options{Filter: _bloom_filter} + o = &opt.Options{ + DisableLargeBatchTransaction: true, + Filter: testingBloomFilter, + } } else { old := o o = &opt.Options{} *o = *old - o.Filter = _bloom_filter + o.Filter = testingBloomFilter } case 2: if o == nil { - o = &opt.Options{Compression: opt.NoCompression} + o = &opt.Options{ + DisableLargeBatchTransaction: true, + Compression: opt.NoCompression, + } } else { old := o o = &opt.Options{} @@ -591,24 +627,27 @@ } func TestDB_GetFromFrozen(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100100}) + h := newDbHarnessWopt(t, &opt.Options{ + DisableLargeBatchTransaction: true, + WriteBuffer: 100100, + }) defer h.close() h.put("foo", "v1") h.getVal("foo", "v1") - h.stor.DelaySync(storage.TypeTable) // Block sync calls - h.put("k1", strings.Repeat("x", 100000)) // Fill memtable - h.put("k2", strings.Repeat("y", 100000)) // Trigger compaction + h.stor.Stall(testutil.ModeSync, storage.TypeTable) // Block sync calls + h.put("k1", strings.Repeat("x", 100000)) // Fill memtable + h.put("k2", strings.Repeat("y", 100000)) // Trigger compaction for i := 0; h.db.getFrozenMem() == nil && i < 100; i++ { time.Sleep(10 * time.Microsecond) } if h.db.getFrozenMem() == nil { - h.stor.ReleaseSync(storage.TypeTable) + h.stor.Release(testutil.ModeSync, storage.TypeTable) t.Fatal("No frozen mem") } h.getVal("foo", "v1") - h.stor.ReleaseSync(storage.TypeTable) // Release sync calls + h.stor.Release(testutil.ModeSync, storage.TypeTable) // Release sync calls h.reopenDB() h.getVal("foo", "v1") @@ -660,6 +699,8 @@ func TestDB_GetLevel0Ordering(t *testing.T) { trun(t, func(h *dbHarness) { + h.db.memdbMaxLevel = 2 + for i := 0; i < 4; i++ { h.put("bar", fmt.Sprintf("b%d", i)) h.put("foo", fmt.Sprintf("v%d", i)) @@ -719,6 +760,8 @@ func TestDB_GetEncountersEmptyLevel(t *testing.T) { trun(t, func(h *dbHarness) { + h.db.memdbMaxLevel = 2 + // Arrange for the following to happen: // * sstable A in level 0 // * nothing in level 1 @@ -859,13 +902,13 @@ } func TestDB_RecoverDuringMemtableCompaction(t *testing.T) { - truno(t, &opt.Options{WriteBuffer: 1000000}, func(h *dbHarness) { + truno(t, &opt.Options{DisableLargeBatchTransaction: true, WriteBuffer: 1000000}, func(h *dbHarness) { - h.stor.DelaySync(storage.TypeTable) + h.stor.Stall(testutil.ModeSync, storage.TypeTable) h.put("big1", strings.Repeat("x", 10000000)) h.put("big2", strings.Repeat("y", 1000)) h.put("bar", "v2") - h.stor.ReleaseSync(storage.TypeTable) + h.stor.Release(testutil.ModeSync, storage.TypeTable) h.reopenDB() h.getVal("bar", "v2") @@ -875,7 +918,7 @@ } func TestDB_MinorCompactionsHappen(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 10000}) + h := newDbHarnessWopt(t, &opt.Options{DisableLargeBatchTransaction: true, WriteBuffer: 10000}) defer h.close() n := 500 @@ -925,8 +968,9 @@ func TestDB_CompactionsGenerateMultipleFiles(t *testing.T) { h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 10000000, - Compression: opt.NoCompression, + DisableLargeBatchTransaction: true, + WriteBuffer: 10000000, + Compression: opt.NoCompression, }) defer h.close() @@ -962,10 +1006,10 @@ } func TestDB_RepeatedWritesToSameKey(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000}) + h := newDbHarnessWopt(t, &opt.Options{DisableLargeBatchTransaction: true, WriteBuffer: 100000}) defer h.close() - maxTables := h.o.GetNumLevel() + h.o.GetWriteL0PauseTrigger() + maxTables := h.o.GetWriteL0PauseTrigger() + 7 value := strings.Repeat("v", 2*h.o.GetWriteBuffer()) for i := 0; i < 5*maxTables; i++ { @@ -978,12 +1022,15 @@ } func TestDB_RepeatedWritesToSameKeyAfterReopen(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000}) + h := newDbHarnessWopt(t, &opt.Options{ + DisableLargeBatchTransaction: true, + WriteBuffer: 100000, + }) defer h.close() h.reopenDB() - maxTables := h.o.GetNumLevel() + h.o.GetWriteL0PauseTrigger() + maxTables := h.o.GetWriteL0PauseTrigger() + 7 value := strings.Repeat("v", 2*h.o.GetWriteBuffer()) for i := 0; i < 5*maxTables; i++ { @@ -996,10 +1043,10 @@ } func TestDB_SparseMerge(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression}) + h := newDbHarnessWopt(t, &opt.Options{DisableLargeBatchTransaction: true, Compression: opt.NoCompression}) defer h.close() - h.putMulti(h.o.GetNumLevel(), "A", "Z") + h.putMulti(7, "A", "Z") // Suppose there is: // small amount of data with prefix A @@ -1035,8 +1082,9 @@ func TestDB_SizeOf(t *testing.T) { h := newDbHarnessWopt(t, &opt.Options{ - Compression: opt.NoCompression, - WriteBuffer: 10000000, + DisableLargeBatchTransaction: true, + Compression: opt.NoCompression, + WriteBuffer: 10000000, }) defer h.close() @@ -1061,13 +1109,13 @@ for cs := 0; cs < n; cs += 10 { for i := 0; i < n; i += 10 { - h.sizeAssert("", numKey(i), uint64(s1*i), uint64(s2*i)) - h.sizeAssert("", numKey(i)+".suffix", uint64(s1*(i+1)), uint64(s2*(i+1))) - h.sizeAssert(numKey(i), numKey(i+10), uint64(s1*10), uint64(s2*10)) + h.sizeAssert("", numKey(i), int64(s1*i), int64(s2*i)) + h.sizeAssert("", numKey(i)+".suffix", int64(s1*(i+1)), int64(s2*(i+1))) + h.sizeAssert(numKey(i), numKey(i+10), int64(s1*10), int64(s2*10)) } - h.sizeAssert("", numKey(50), uint64(s1*50), uint64(s2*50)) - h.sizeAssert("", numKey(50)+".suffix", uint64(s1*50), uint64(s2*50)) + h.sizeAssert("", numKey(50), int64(s1*50), int64(s2*50)) + h.sizeAssert("", numKey(50)+".suffix", int64(s1*50), int64(s2*50)) h.compactRangeAt(0, numKey(cs), numKey(cs+9)) } @@ -1084,10 +1132,13 @@ } func TestDB_SizeOf_MixOfSmallAndLarge(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression}) + h := newDbHarnessWopt(t, &opt.Options{ + DisableLargeBatchTransaction: true, + Compression: opt.NoCompression, + }) defer h.close() - sizes := []uint64{ + sizes := []int64{ 10000, 10000, 100000, @@ -1105,7 +1156,7 @@ for r := 0; r < 3; r++ { h.reopenDB() - var x uint64 + var x int64 for i, n := range sizes { y := x if i > 0 { @@ -1192,9 +1243,11 @@ trun(t, func(h *dbHarness) { s := h.db.s + m := 2 + h.db.memdbMaxLevel = m + h.put("foo", "v1") h.compactMem() - m := h.o.GetMaxMemCompationLevel() v := s.version() num := v.tLen(m) v.release() @@ -1236,9 +1289,11 @@ defer h.close() s := h.db.s + m := 2 + h.db.memdbMaxLevel = m + h.put("foo", "v1") h.compactMem() - m := h.o.GetMaxMemCompationLevel() v := s.version() num := v.tLen(m) v.release() @@ -1273,9 +1328,14 @@ } func TestDB_CompactionTableOpenError(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{OpenFilesCacheCapacity: -1}) + h := newDbHarnessWopt(t, &opt.Options{ + DisableLargeBatchTransaction: true, + OpenFilesCacheCapacity: -1, + }) defer h.close() + h.db.memdbMaxLevel = 2 + im := 10 jm := 10 for r := 0; r < 2; r++ { @@ -1288,17 +1348,17 @@ } if n := h.totalTables(); n != im*2 { - t.Errorf("total tables is %d, want %d", n, im) + t.Errorf("total tables is %d, want %d", n, im*2) } - h.stor.SetEmuErr(storage.TypeTable, tsOpOpen) + h.stor.EmulateError(testutil.ModeOpen, storage.TypeTable, errors.New("open error during table compaction")) go h.db.CompactRange(util.Range{}) - if err := h.db.compSendIdle(h.db.tcompCmdC); err != nil { + if err := h.db.compTriggerWait(h.db.tcompCmdC); err != nil { t.Log("compaction error: ", err) } h.closeDB0() h.openDB() - h.stor.SetEmuErr(0, tsOpOpen) + h.stor.EmulateError(testutil.ModeOpen, storage.TypeTable, nil) for i := 0; i < im; i++ { for j := 0; j < jm; j++ { @@ -1309,9 +1369,7 @@ func TestDB_OverlapInLevel0(t *testing.T) { trun(t, func(h *dbHarness) { - if h.o.GetMaxMemCompationLevel() != 2 { - t.Fatal("fix test to reflect the config") - } + h.db.memdbMaxLevel = 2 // Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0. h.put("100", "v100") @@ -1429,23 +1487,23 @@ h.compactMem() h.getVal("foo", "bar") v := h.db.s.version() - if n := v.tLen(h.o.GetMaxMemCompationLevel()); n != 1 { + if n := v.tLen(0); n != 1 { t.Errorf("invalid total tables, want=1 got=%d", n) } v.release() if i == 0 { - h.stor.SetEmuErr(storage.TypeManifest, tsOpWrite) + h.stor.EmulateError(testutil.ModeWrite, storage.TypeManifest, errors.New("manifest write error")) } else { - h.stor.SetEmuErr(storage.TypeManifest, tsOpSync) + h.stor.EmulateError(testutil.ModeSync, storage.TypeManifest, errors.New("manifest sync error")) } // Merging compaction (will fail) - h.compactRangeAtErr(h.o.GetMaxMemCompationLevel(), "", "", true) + h.compactRangeAtErr(0, "", "", true) h.db.Close() - h.stor.SetEmuErr(0, tsOpWrite) - h.stor.SetEmuErr(0, tsOpSync) + h.stor.EmulateError(testutil.ModeWrite, storage.TypeManifest, nil) + h.stor.EmulateError(testutil.ModeSync, storage.TypeManifest, nil) // Should not lose data h.openDB() @@ -1563,8 +1621,9 @@ func TestDB_CustomComparer(t *testing.T) { h := newDbHarnessWopt(t, &opt.Options{ - Comparer: numberComparer{}, - WriteBuffer: 1000, + DisableLargeBatchTransaction: true, + Comparer: numberComparer{}, + WriteBuffer: 1000, }) defer h.close() @@ -1595,9 +1654,7 @@ h := newDbHarness(t) defer h.close() - if h.o.GetMaxMemCompationLevel() != 2 { - t.Fatal("fix test to reflect the config") - } + h.db.memdbMaxLevel = 2 h.putMulti(3, "p", "q") h.tablesPerLevel("1,1,1") @@ -1631,8 +1688,9 @@ func TestDB_BloomFilter(t *testing.T) { h := newDbHarnessWopt(t, &opt.Options{ - DisableBlockCache: true, - Filter: filter.NewBloomFilter(10), + DisableLargeBatchTransaction: true, + DisableBlockCache: true, + Filter: filter.NewBloomFilter(10), }) defer h.close() @@ -1654,168 +1712,193 @@ h.compactMem() // Prevent auto compactions triggered by seeks - h.stor.DelaySync(storage.TypeTable) + h.stor.Stall(testutil.ModeSync, storage.TypeTable) // Lookup present keys. Should rarely read from small sstable. - h.stor.SetReadCounter(storage.TypeTable) + h.stor.ResetCounter(testutil.ModeRead, storage.TypeTable) for i := 0; i < n; i++ { h.getVal(key(i), key(i)) } - cnt := int(h.stor.ReadCounter()) + cnt, _ := h.stor.Counter(testutil.ModeRead, storage.TypeTable) t.Logf("lookup of %d present keys yield %d sstable I/O reads", n, cnt) - if min, max := n, n+2*n/100; cnt < min || cnt > max { t.Errorf("num of sstable I/O reads of present keys not in range of %d - %d, got %d", min, max, cnt) } // Lookup missing keys. Should rarely read from either sstable. - h.stor.ResetReadCounter() + h.stor.ResetCounter(testutil.ModeRead, storage.TypeTable) for i := 0; i < n; i++ { h.get(key(i)+".missing", false) } - cnt = int(h.stor.ReadCounter()) + cnt, _ = h.stor.Counter(testutil.ModeRead, storage.TypeTable) t.Logf("lookup of %d missing keys yield %d sstable I/O reads", n, cnt) if max := 3 * n / 100; cnt > max { t.Errorf("num of sstable I/O reads of missing keys was more than %d, got %d", max, cnt) } - h.stor.ReleaseSync(storage.TypeTable) + h.stor.Release(testutil.ModeSync, storage.TypeTable) } func TestDB_Concurrent(t *testing.T) { - const n, secs, maxkey = 4, 2, 1000 + const n, secs, maxkey = 4, 6, 1000 + h := newDbHarness(t) + defer h.close() - runtime.GOMAXPROCS(n) - trun(t, func(h *dbHarness) { - var closeWg sync.WaitGroup - var stop uint32 - var cnt [n]uint32 - - for i := 0; i < n; i++ { - closeWg.Add(1) - go func(i int) { - var put, get, found uint - defer func() { - t.Logf("goroutine %d stopped after %d ops, put=%d get=%d found=%d missing=%d", - i, cnt[i], put, get, found, get-found) - closeWg.Done() - }() - - rnd := rand.New(rand.NewSource(int64(1000 + i))) - for atomic.LoadUint32(&stop) == 0 { - x := cnt[i] - - k := rnd.Intn(maxkey) - kstr := fmt.Sprintf("%016d", k) - - if (rnd.Int() % 2) > 0 { - put++ - h.put(kstr, fmt.Sprintf("%d.%d.%-1000d", k, i, x)) - } else { - get++ - v, err := h.db.Get([]byte(kstr), h.ro) - if err == nil { - found++ - rk, ri, rx := 0, -1, uint32(0) - fmt.Sscanf(string(v), "%d.%d.%d", &rk, &ri, &rx) - if rk != k { - t.Errorf("invalid key want=%d got=%d", k, rk) - } - if ri < 0 || ri >= n { - t.Error("invalid goroutine number: ", ri) - } else { - tx := atomic.LoadUint32(&(cnt[ri])) - if rx > tx { - t.Errorf("invalid seq number, %d > %d ", rx, tx) - } + runtime.GOMAXPROCS(runtime.NumCPU()) + + var ( + closeWg sync.WaitGroup + stop uint32 + cnt [n]uint32 + ) + + for i := 0; i < n; i++ { + closeWg.Add(1) + go func(i int) { + var put, get, found uint + defer func() { + t.Logf("goroutine %d stopped after %d ops, put=%d get=%d found=%d missing=%d", + i, cnt[i], put, get, found, get-found) + closeWg.Done() + }() + + rnd := rand.New(rand.NewSource(int64(1000 + i))) + for atomic.LoadUint32(&stop) == 0 { + x := cnt[i] + + k := rnd.Intn(maxkey) + kstr := fmt.Sprintf("%016d", k) + + if (rnd.Int() % 2) > 0 { + put++ + h.put(kstr, fmt.Sprintf("%d.%d.%-1000d", k, i, x)) + } else { + get++ + v, err := h.db.Get([]byte(kstr), h.ro) + if err == nil { + found++ + rk, ri, rx := 0, -1, uint32(0) + fmt.Sscanf(string(v), "%d.%d.%d", &rk, &ri, &rx) + if rk != k { + t.Errorf("invalid key want=%d got=%d", k, rk) + } + if ri < 0 || ri >= n { + t.Error("invalid goroutine number: ", ri) + } else { + tx := atomic.LoadUint32(&(cnt[ri])) + if rx > tx { + t.Errorf("invalid seq number, %d > %d ", rx, tx) } - } else if err != ErrNotFound { - t.Error("Get: got error: ", err) - return } + } else if err != ErrNotFound { + t.Error("Get: got error: ", err) + return } - atomic.AddUint32(&cnt[i], 1) } - }(i) - } - - time.Sleep(secs * time.Second) - atomic.StoreUint32(&stop, 1) - closeWg.Wait() - }) + atomic.AddUint32(&cnt[i], 1) + } + }(i) + } - runtime.GOMAXPROCS(1) + time.Sleep(secs * time.Second) + atomic.StoreUint32(&stop, 1) + closeWg.Wait() } -func TestDB_Concurrent2(t *testing.T) { - const n, n2 = 4, 4000 +func TestDB_ConcurrentIterator(t *testing.T) { + const n, n2 = 4, 1000 + h := newDbHarnessWopt(t, &opt.Options{DisableLargeBatchTransaction: true, WriteBuffer: 30}) + defer h.close() - runtime.GOMAXPROCS(n*2 + 2) - truno(t, &opt.Options{WriteBuffer: 30}, func(h *dbHarness) { - var closeWg sync.WaitGroup - var stop uint32 + runtime.GOMAXPROCS(runtime.NumCPU()) - for i := 0; i < n; i++ { - closeWg.Add(1) - go func(i int) { - for k := 0; atomic.LoadUint32(&stop) == 0; k++ { - h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10)) - } - closeWg.Done() - }(i) - } + var ( + closeWg sync.WaitGroup + stop uint32 + ) - for i := 0; i < n; i++ { - closeWg.Add(1) - go func(i int) { - for k := 1000000; k < 0 || atomic.LoadUint32(&stop) == 0; k-- { - h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10)) - } - closeWg.Done() - }(i) - } + for i := 0; i < n; i++ { + closeWg.Add(1) + go func(i int) { + for k := 0; atomic.LoadUint32(&stop) == 0; k++ { + h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10)) + } + closeWg.Done() + }(i) + } - cmp := comparer.DefaultComparer - for i := 0; i < n2; i++ { - closeWg.Add(1) - go func(i int) { - it := h.db.NewIterator(nil, nil) - var pk []byte - for it.Next() { - kk := it.Key() - if cmp.Compare(kk, pk) <= 0 { - t.Errorf("iter %d: %q is successor of %q", i, pk, kk) - } - pk = append(pk[:0], kk...) - var k, vk, vi int - if n, err := fmt.Sscanf(string(it.Key()), "k%d", &k); err != nil { - t.Errorf("iter %d: Scanf error on key %q: %v", i, it.Key(), err) - } else if n < 1 { - t.Errorf("iter %d: Cannot parse key %q", i, it.Key()) - } - if n, err := fmt.Sscanf(string(it.Value()), "%d.%d", &vk, &vi); err != nil { - t.Errorf("iter %d: Scanf error on value %q: %v", i, it.Value(), err) - } else if n < 2 { - t.Errorf("iter %d: Cannot parse value %q", i, it.Value()) - } + for i := 0; i < n; i++ { + closeWg.Add(1) + go func(i int) { + for k := 1000000; k < 0 || atomic.LoadUint32(&stop) == 0; k-- { + h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10)) + } + closeWg.Done() + }(i) + } - if vk != k { - t.Errorf("iter %d: invalid value i=%d, want=%d got=%d", i, vi, k, vk) - } + cmp := comparer.DefaultComparer + for i := 0; i < n2; i++ { + closeWg.Add(1) + go func(i int) { + it := h.db.NewIterator(nil, nil) + var pk []byte + for it.Next() { + kk := it.Key() + if cmp.Compare(kk, pk) <= 0 { + t.Errorf("iter %d: %q is successor of %q", i, pk, kk) } - if err := it.Error(); err != nil { - t.Errorf("iter %d: Got error: %v", i, err) + pk = append(pk[:0], kk...) + var k, vk, vi int + if n, err := fmt.Sscanf(string(it.Key()), "k%d", &k); err != nil { + t.Errorf("iter %d: Scanf error on key %q: %v", i, it.Key(), err) + } else if n < 1 { + t.Errorf("iter %d: Cannot parse key %q", i, it.Key()) + } + if n, err := fmt.Sscanf(string(it.Value()), "%d.%d", &vk, &vi); err != nil { + t.Errorf("iter %d: Scanf error on value %q: %v", i, it.Value(), err) + } else if n < 2 { + t.Errorf("iter %d: Cannot parse value %q", i, it.Value()) } - it.Release() - closeWg.Done() - }(i) - } - atomic.StoreUint32(&stop, 1) - closeWg.Wait() - }) + if vk != k { + t.Errorf("iter %d: invalid value i=%d, want=%d got=%d", i, vi, k, vk) + } + } + if err := it.Error(); err != nil { + t.Errorf("iter %d: Got error: %v", i, err) + } + it.Release() + closeWg.Done() + }(i) + } + + atomic.StoreUint32(&stop, 1) + closeWg.Wait() +} + +func TestDB_ConcurrentWrite(t *testing.T) { + const n, niter = 10, 10000 + h := newDbHarness(t) + defer h.close() + + runtime.GOMAXPROCS(runtime.NumCPU()) - runtime.GOMAXPROCS(1) + var wg sync.WaitGroup + for i := 0; i < n; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + for k := 0; k < niter; k++ { + kstr := fmt.Sprintf("%d.%d", i, k) + vstr := fmt.Sprintf("v%d", k) + h.put(kstr, vstr) + // Key should immediately available after put returns. + h.getVal(kstr, vstr) + } + }(i) + } + wg.Wait() } func TestDB_CreateReopenDbOnFile(t *testing.T) { @@ -1826,7 +1909,7 @@ defer os.RemoveAll(dbpath) for i := 0; i < 3; i++ { - stor, err := storage.OpenFile(dbpath) + stor, err := storage.OpenFile(dbpath, false) if err != nil { t.Fatalf("(%d) cannot open storage: %s", i, err) } @@ -1889,7 +1972,10 @@ // Disable compression since it affects the creation of layers and the // code below is trying to test against a very specific scenario. - h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression}) + h := newDbHarnessWopt(t, &opt.Options{ + DisableLargeBatchTransaction: true, + Compression: opt.NoCompression, + }) defer h.close() // Create first key range. @@ -1950,7 +2036,8 @@ func TestDB_GoleveldbIssue74(t *testing.T) { h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 1 * opt.MiB, + DisableLargeBatchTransaction: true, + WriteBuffer: 1 * opt.MiB, }) defer h.close() @@ -2068,8 +2155,9 @@ func TestDB_GoleveldbIssue72and83(t *testing.T) { h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 1 * opt.MiB, - OpenFilesCacheCapacity: 3, + DisableLargeBatchTransaction: true, + WriteBuffer: 1 * opt.MiB, + OpenFilesCacheCapacity: 3, }) defer h.close() @@ -2201,9 +2289,10 @@ func TestDB_TransientError(t *testing.T) { h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 128 * opt.KiB, - OpenFilesCacheCapacity: 3, - DisableCompactionBackoff: true, + DisableLargeBatchTransaction: true, + WriteBuffer: 128 * opt.KiB, + OpenFilesCacheCapacity: 3, + DisableCompactionBackoff: true, }) defer h.close() @@ -2223,10 +2312,10 @@ key := fmt.Sprintf("KEY%8d", k) b.Put([]byte(key), []byte(key+vtail)) } - h.stor.SetEmuRandErr(storage.TypeTable, tsOpOpen, tsOpRead, tsOpReadAt) + h.stor.EmulateError(testutil.ModeOpen|testutil.ModeRead, storage.TypeTable, errors.New("table transient read error")) if err := h.db.Write(b, nil); err != nil { t.Logf("WRITE #%d error: %v", i, err) - h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt, tsOpWrite) + h.stor.EmulateError(testutil.ModeOpen|testutil.ModeRead, storage.TypeTable, nil) for { if err := h.db.Write(b, nil); err == nil { break @@ -2242,10 +2331,10 @@ key := fmt.Sprintf("KEY%8d", k) b.Delete([]byte(key)) } - h.stor.SetEmuRandErr(storage.TypeTable, tsOpOpen, tsOpRead, tsOpReadAt) + h.stor.EmulateError(testutil.ModeOpen|testutil.ModeRead, storage.TypeTable, errors.New("table transient read error")) if err := h.db.Write(b, nil); err != nil { t.Logf("WRITE #%d error: %v", i, err) - h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt) + h.stor.EmulateError(testutil.ModeOpen|testutil.ModeRead, storage.TypeTable, nil) for { if err := h.db.Write(b, nil); err == nil { break @@ -2255,7 +2344,7 @@ } } } - h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt) + h.stor.EmulateError(testutil.ModeOpen|testutil.ModeRead, storage.TypeTable, nil) runtime.GOMAXPROCS(runtime.NumCPU()) @@ -2314,9 +2403,10 @@ func TestDB_UkeyShouldntHopAcrossTable(t *testing.T) { h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 112 * opt.KiB, - CompactionTableSize: 90 * opt.KiB, - CompactionExpandLimitFactor: 1, + DisableLargeBatchTransaction: true, + WriteBuffer: 112 * opt.KiB, + CompactionTableSize: 90 * opt.KiB, + CompactionExpandLimitFactor: 1, }) defer h.close() @@ -2354,24 +2444,24 @@ h.compactMem() h.waitCompaction() - for level, tables := range h.db.s.stVersion.tables { + for level, tables := range h.db.s.stVersion.levels { for _, table := range tables { - t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax) + t.Logf("L%d@%d %q:%q", level, table.fd.Num, table.imin, table.imax) } } h.compactRangeAt(0, "", "") h.waitCompaction() - for level, tables := range h.db.s.stVersion.tables { + for level, tables := range h.db.s.stVersion.levels { for _, table := range tables { - t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax) + t.Logf("L%d@%d %q:%q", level, table.fd.Num, table.imin, table.imax) } } h.compactRangeAt(1, "", "") h.waitCompaction() - for level, tables := range h.db.s.stVersion.tables { + for level, tables := range h.db.s.stVersion.levels { for _, table := range tables { - t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax) + t.Logf("L%d@%d %q:%q", level, table.fd.Num, table.imin, table.imax) } } runtime.GOMAXPROCS(runtime.NumCPU()) @@ -2402,17 +2492,21 @@ } func TestDB_TableCompactionBuilder(t *testing.T) { - stor := newTestStorage(t) + gomega.RegisterTestingT(t) + stor := testutil.NewStorage() + stor.OnLog(testingLogger(t)) + stor.OnClose(testingPreserveOnFailed(t)) defer stor.Close() const nSeq = 99 o := &opt.Options{ - WriteBuffer: 112 * opt.KiB, - CompactionTableSize: 43 * opt.KiB, - CompactionExpandLimitFactor: 1, - CompactionGPOverlapsFactor: 1, - DisableBlockCache: true, + DisableLargeBatchTransaction: true, + WriteBuffer: 112 * opt.KiB, + CompactionTableSize: 43 * opt.KiB, + CompactionExpandLimitFactor: 1, + CompactionGPOverlapsFactor: 1, + DisableBlockCache: true, } s, err := newSession(stor, o) if err != nil { @@ -2436,7 +2530,7 @@ key := []byte(fmt.Sprintf("%09d", k)) seq += nSeq - 1 for x := uint64(0); x < nSeq; x++ { - if err := tw.append(newIkey(key, seq-x, ktVal), value); err != nil { + if err := tw.append(makeInternalKey(nil, key, seq-x, keyTypeVal), value); err != nil { t.Fatal(err) } } @@ -2454,13 +2548,13 @@ // Build grandparent. v := s.version() - c := newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...)) + c := newCompaction(s, v, 1, append(tFiles{}, v.levels[1]...)) rec := &sessionRecord{} b := &tableCompactionBuilder{ s: s, c: c, rec: rec, - stat1: new(cStatsStaging), + stat1: new(cStatStaging), minSeq: 0, strict: true, tableSize: o.CompactionTableSize/3 + 961, @@ -2468,8 +2562,8 @@ if err := b.run(new(compactionTransactCounter)); err != nil { t.Fatal(err) } - for _, t := range c.tables[0] { - rec.delTable(c.level, t.file.Num()) + for _, t := range c.levels[0] { + rec.delTable(c.sourceLevel, t.fd.Num) } if err := s.commit(rec); err != nil { t.Fatal(err) @@ -2478,13 +2572,13 @@ // Build level-1. v = s.version() - c = newCompaction(s, v, 0, append(tFiles{}, v.tables[0]...)) + c = newCompaction(s, v, 0, append(tFiles{}, v.levels[0]...)) rec = &sessionRecord{} b = &tableCompactionBuilder{ s: s, c: c, rec: rec, - stat1: new(cStatsStaging), + stat1: new(cStatStaging), minSeq: 0, strict: true, tableSize: o.CompactionTableSize, @@ -2492,12 +2586,12 @@ if err := b.run(new(compactionTransactCounter)); err != nil { t.Fatal(err) } - for _, t := range c.tables[0] { - rec.delTable(c.level, t.file.Num()) + for _, t := range c.levels[0] { + rec.delTable(c.sourceLevel, t.fd.Num) } // Move grandparent to level-3 - for _, t := range v.tables[2] { - rec.delTable(2, t.file.Num()) + for _, t := range v.levels[2] { + rec.delTable(2, t.fd.Num) rec.addTableFile(3, t) } if err := s.commit(rec); err != nil { @@ -2506,36 +2600,35 @@ c.release() v = s.version() - for level, want := range []bool{false, true, false, true, false} { - got := len(v.tables[level]) > 0 + for level, want := range []bool{false, true, false, true} { + got := len(v.levels[level]) > 0 if want != got { t.Fatalf("invalid level-%d tables len: want %v, got %v", level, want, got) } } - for i, f := range v.tables[1][:len(v.tables[1])-1] { - nf := v.tables[1][i+1] + for i, f := range v.levels[1][:len(v.levels[1])-1] { + nf := v.levels[1][i+1] if bytes.Equal(f.imax.ukey(), nf.imin.ukey()) { - t.Fatalf("KEY %q hop across table %d .. %d", f.imax.ukey(), f.file.Num(), nf.file.Num()) + t.Fatalf("KEY %q hop across table %d .. %d", f.imax.ukey(), f.fd.Num, nf.fd.Num) } } v.release() // Compaction with transient error. v = s.version() - c = newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...)) + c = newCompaction(s, v, 1, append(tFiles{}, v.levels[1]...)) rec = &sessionRecord{} b = &tableCompactionBuilder{ s: s, c: c, rec: rec, - stat1: new(cStatsStaging), + stat1: new(cStatStaging), minSeq: 0, strict: true, tableSize: o.CompactionTableSize, } - stor.SetEmuErrOnce(storage.TypeTable, tsOpSync) - stor.SetEmuRandErr(storage.TypeTable, tsOpRead, tsOpReadAt, tsOpWrite) - stor.SetEmuRandErrProb(0xf0) + stor.EmulateErrorOnce(testutil.ModeSync, storage.TypeTable, errors.New("table sync error (once)")) + stor.EmulateRandomError(testutil.ModeRead|testutil.ModeWrite, storage.TypeTable, 0.01, errors.New("table random IO error")) for { if err := b.run(new(compactionTransactCounter)); err != nil { t.Logf("(expected) b.run: %v", err) @@ -2548,15 +2641,15 @@ } c.release() - stor.SetEmuErrOnce(0, tsOpSync) - stor.SetEmuRandErr(0, tsOpRead, tsOpReadAt, tsOpWrite) + stor.EmulateErrorOnce(testutil.ModeSync, storage.TypeTable, nil) + stor.EmulateRandomError(testutil.ModeRead|testutil.ModeWrite, storage.TypeTable, 0, nil) v = s.version() - if len(v.tables[1]) != len(v.tables[2]) { - t.Fatalf("invalid tables length, want %d, got %d", len(v.tables[1]), len(v.tables[2])) + if len(v.levels[1]) != len(v.levels[2]) { + t.Fatalf("invalid tables length, want %d, got %d", len(v.levels[1]), len(v.levels[2])) } - for i, f0 := range v.tables[1] { - f1 := v.tables[2][i] + for i, f0 := range v.levels[1] { + f1 := v.levels[2][i] iter0 := s.tops.newIterator(f0, nil, nil) iter1 := s.tops.newIterator(f1, nil, nil) for j := 0; true; j++ { @@ -2589,11 +2682,14 @@ ) h := newDbHarnessWopt(t, &opt.Options{ - Compression: opt.NoCompression, - DisableBlockCache: true, + DisableLargeBatchTransaction: true, + Compression: opt.NoCompression, + DisableBlockCache: true, }) defer h.close() + h.db.memdbMaxLevel = 2 + key := func(x int) string { return fmt.Sprintf("v%06d", x) } @@ -2681,7 +2777,8 @@ t.Fatalf("SetReadOnly error: %v", err) } - h.stor.SetEmuErr(storage.TypeAll, tsOpCreate, tsOpReplace, tsOpRemove, tsOpWrite, tsOpWrite, tsOpSync) + mode := testutil.ModeCreate | testutil.ModeRemove | testutil.ModeRename | testutil.ModeWrite | testutil.ModeSync + h.stor.EmulateError(mode, storage.TypeAll, errors.New("read-only DB shouldn't writes")) ro := func(key, value, wantValue string) { if err := h.db.Put([]byte(key), []byte(value), h.wo); err != ErrReadOnly { @@ -2699,3 +2796,34 @@ ro("bar", "vx", "v2") h.assertNumKeys(4) } + +func TestDB_BulkInsertDelete(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{ + DisableLargeBatchTransaction: true, + Compression: opt.NoCompression, + CompactionTableSize: 128 * opt.KiB, + CompactionTotalSize: 1 * opt.MiB, + WriteBuffer: 256 * opt.KiB, + }) + defer h.close() + + const R = 100 + const N = 2500 + key := make([]byte, 4) + value := make([]byte, 256) + for i := 0; i < R; i++ { + offset := N * i + for j := 0; j < N; j++ { + binary.BigEndian.PutUint32(key, uint32(offset+j)) + h.db.Put(key, value, nil) + } + for j := 0; j < N; j++ { + binary.BigEndian.PutUint32(key, uint32(offset+j)) + h.db.Delete(key, nil) + } + } + + if tot := h.totalTables(); tot > 10 { + t.Fatalf("too many uncompacted tables: %d (%s)", tot, h.getTablesPerLevel()) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_transaction.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_transaction.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_transaction.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_transaction.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,289 @@ +// Copyright (c) 2016, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "errors" + "sync" + "time" + + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" +) + +var errTransactionDone = errors.New("leveldb: transaction already closed") + +// Transaction is the transaction handle. +type Transaction struct { + db *DB + lk sync.RWMutex + seq uint64 + mem *memDB + tables tFiles + ikScratch []byte + rec sessionRecord + stats cStatStaging + closed bool +} + +// Get gets the value for the given key. It returns ErrNotFound if the +// DB does not contains the key. +// +// The returned slice is its own copy, it is safe to modify the contents +// of the returned slice. +// It is safe to modify the contents of the argument after Get returns. +func (tr *Transaction) Get(key []byte, ro *opt.ReadOptions) ([]byte, error) { + tr.lk.RLock() + defer tr.lk.RUnlock() + if tr.closed { + return nil, errTransactionDone + } + return tr.db.get(tr.mem.DB, tr.tables, key, tr.seq, ro) +} + +// Has returns true if the DB does contains the given key. +// +// It is safe to modify the contents of the argument after Has returns. +func (tr *Transaction) Has(key []byte, ro *opt.ReadOptions) (bool, error) { + tr.lk.RLock() + defer tr.lk.RUnlock() + if tr.closed { + return false, errTransactionDone + } + return tr.db.has(tr.mem.DB, tr.tables, key, tr.seq, ro) +} + +// NewIterator returns an iterator for the latest snapshot of the transaction. +// The returned iterator is not goroutine-safe, but it is safe to use multiple +// iterators concurrently, with each in a dedicated goroutine. +// It is also safe to use an iterator concurrently while writes to the +// transaction. The resultant key/value pairs are guaranteed to be consistent. +// +// Slice allows slicing the iterator to only contains keys in the given +// range. A nil Range.Start is treated as a key before all keys in the +// DB. And a nil Range.Limit is treated as a key after all keys in +// the DB. +// +// The iterator must be released after use, by calling Release method. +// +// Also read Iterator documentation of the leveldb/iterator package. +func (tr *Transaction) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + tr.lk.RLock() + defer tr.lk.RUnlock() + if tr.closed { + return iterator.NewEmptyIterator(errTransactionDone) + } + tr.mem.incref() + return tr.db.newIterator(tr.mem, tr.tables, tr.seq, slice, ro) +} + +func (tr *Transaction) flush() error { + // Flush memdb. + if tr.mem.Len() != 0 { + tr.stats.startTimer() + iter := tr.mem.NewIterator(nil) + t, n, err := tr.db.s.tops.createFrom(iter) + iter.Release() + tr.stats.stopTimer() + if err != nil { + return err + } + if tr.mem.getref() == 1 { + tr.mem.Reset() + } else { + tr.mem.decref() + tr.mem = tr.db.mpoolGet(0) + tr.mem.incref() + } + tr.tables = append(tr.tables, t) + tr.rec.addTableFile(0, t) + tr.stats.write += t.size + tr.db.logf("transaction@flush created L0@%d N·%d S·%s %q:%q", t.fd.Num, n, shortenb(int(t.size)), t.imin, t.imax) + } + return nil +} + +func (tr *Transaction) put(kt keyType, key, value []byte) error { + tr.ikScratch = makeInternalKey(tr.ikScratch, key, tr.seq+1, kt) + if tr.mem.Free() < len(tr.ikScratch)+len(value) { + if err := tr.flush(); err != nil { + return err + } + } + if err := tr.mem.Put(tr.ikScratch, value); err != nil { + return err + } + tr.seq++ + return nil +} + +// Put sets the value for the given key. It overwrites any previous value +// for that key; a DB is not a multi-map. +// Please note that the transaction is not compacted until committed, so if you +// writes 10 same keys, then those 10 same keys are in the transaction. +// +// It is safe to modify the contents of the arguments after Put returns. +func (tr *Transaction) Put(key, value []byte, wo *opt.WriteOptions) error { + tr.lk.Lock() + defer tr.lk.Unlock() + if tr.closed { + return errTransactionDone + } + return tr.put(keyTypeVal, key, value) +} + +// Delete deletes the value for the given key. +// Please note that the transaction is not compacted until committed, so if you +// writes 10 same keys, then those 10 same keys are in the transaction. +// +// It is safe to modify the contents of the arguments after Delete returns. +func (tr *Transaction) Delete(key []byte, wo *opt.WriteOptions) error { + tr.lk.Lock() + defer tr.lk.Unlock() + if tr.closed { + return errTransactionDone + } + return tr.put(keyTypeDel, key, nil) +} + +// Write apply the given batch to the transaction. The batch will be applied +// sequentially. +// Please note that the transaction is not compacted until committed, so if you +// writes 10 same keys, then those 10 same keys are in the transaction. +// +// It is safe to modify the contents of the arguments after Write returns. +func (tr *Transaction) Write(b *Batch, wo *opt.WriteOptions) error { + if b == nil || b.Len() == 0 { + return nil + } + + tr.lk.Lock() + defer tr.lk.Unlock() + if tr.closed { + return errTransactionDone + } + return b.decodeRec(func(i int, kt keyType, key, value []byte) error { + return tr.put(kt, key, value) + }) +} + +func (tr *Transaction) setDone() { + tr.closed = true + tr.db.tr = nil + tr.mem.decref() + <-tr.db.writeLockC +} + +// Commit commits the transaction. +// +// Other methods should not be called after transaction has been committed. +func (tr *Transaction) Commit() error { + if err := tr.db.ok(); err != nil { + return err + } + + tr.lk.Lock() + defer tr.lk.Unlock() + if tr.closed { + return errTransactionDone + } + defer tr.setDone() + if err := tr.flush(); err != nil { + tr.discard() + return err + } + if len(tr.tables) != 0 { + // Committing transaction. + tr.rec.setSeqNum(tr.seq) + tr.db.compCommitLk.Lock() + defer tr.db.compCommitLk.Unlock() + for retry := 0; retry < 3; retry++ { + if err := tr.db.s.commit(&tr.rec); err != nil { + tr.db.logf("transaction@commit error R·%d %q", retry, err) + select { + case <-time.After(time.Second): + case _, _ = <-tr.db.closeC: + tr.db.logf("transaction@commit exiting") + return err + } + } else { + // Success. Set db.seq. + tr.db.setSeq(tr.seq) + break + } + } + // Trigger table auto-compaction. + tr.db.compTrigger(tr.db.tcompCmdC) + } + return nil +} + +func (tr *Transaction) discard() { + // Discard transaction. + for _, t := range tr.tables { + tr.db.logf("transaction@discard @%d", t.fd.Num) + if err1 := tr.db.s.stor.Remove(t.fd); err1 == nil { + tr.db.s.reuseFileNum(t.fd.Num) + } + } +} + +// Discard discards the transaction. +// +// Other methods should not be called after transaction has been discarded. +func (tr *Transaction) Discard() { + tr.lk.Lock() + if !tr.closed { + tr.discard() + tr.setDone() + } + tr.lk.Unlock() +} + +// OpenTransaction opens an atomic DB transaction. Only one transaction can be +// opened at a time. Write will be blocked until the transaction is committed or +// discarded. +// The returned transaction handle is goroutine-safe. +// +// The transaction must be closed once done, either by committing or discarding +// the transaction. +// Closing the DB will discard open transaction. +func (db *DB) OpenTransaction() (*Transaction, error) { + if err := db.ok(); err != nil { + return nil, err + } + + // The write happen synchronously. + select { + case db.writeLockC <- struct{}{}: + case err := <-db.compPerErrC: + return nil, err + case _, _ = <-db.closeC: + return nil, ErrClosed + } + + if db.tr != nil { + panic("leveldb: has open transaction") + } + + // Flush current memdb. + if db.mem != nil && db.mem.Len() != 0 { + if _, err := db.rotateMem(0, true); err != nil { + return nil, err + } + } + + tr := &Transaction{ + db: db, + seq: db.seq, + mem: db.mpoolGet(0), + } + tr.mem.incref() + db.tr = tr + return tr, nil +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_util.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_util.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_util.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_util.go 2016-05-24 07:05:22.000000000 +0000 @@ -21,14 +21,16 @@ NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator } -type Sizes []uint64 +// Sizes is list of size. +type Sizes []int64 // Sum returns sum of the sizes. -func (p Sizes) Sum() (n uint64) { - for _, s := range p { - n += s +func (sizes Sizes) Sum() int64 { + var sum int64 + for _, size := range sizes { + sum += size } - return n + return sum } // Logging. @@ -40,59 +42,59 @@ v := db.s.version() defer v.release() - tablesMap := make(map[uint64]bool) - for _, tables := range v.tables { + tmap := make(map[int64]bool) + for _, tables := range v.levels { for _, t := range tables { - tablesMap[t.file.Num()] = false + tmap[t.fd.Num] = false } } - files, err := db.s.getFiles(storage.TypeAll) + fds, err := db.s.stor.List(storage.TypeAll) if err != nil { return err } - var nTables int - var rem []storage.File - for _, f := range files { + var nt int + var rem []storage.FileDesc + for _, fd := range fds { keep := true - switch f.Type() { + switch fd.Type { case storage.TypeManifest: - keep = f.Num() >= db.s.manifestFile.Num() + keep = fd.Num >= db.s.manifestFd.Num case storage.TypeJournal: - if db.frozenJournalFile != nil { - keep = f.Num() >= db.frozenJournalFile.Num() + if !db.frozenJournalFd.Nil() { + keep = fd.Num >= db.frozenJournalFd.Num } else { - keep = f.Num() >= db.journalFile.Num() + keep = fd.Num >= db.journalFd.Num } case storage.TypeTable: - _, keep = tablesMap[f.Num()] + _, keep = tmap[fd.Num] if keep { - tablesMap[f.Num()] = true - nTables++ + tmap[fd.Num] = true + nt++ } } if !keep { - rem = append(rem, f) + rem = append(rem, fd) } } - if nTables != len(tablesMap) { - var missing []*storage.FileInfo - for num, present := range tablesMap { + if nt != len(tmap) { + var mfds []storage.FileDesc + for num, present := range tmap { if !present { - missing = append(missing, &storage.FileInfo{Type: storage.TypeTable, Num: num}) + mfds = append(mfds, storage.FileDesc{storage.TypeTable, num}) db.logf("db@janitor table missing @%d", num) } } - return errors.NewErrCorrupted(nil, &errors.ErrMissingFiles{Files: missing}) + return errors.NewErrCorrupted(storage.FileDesc{}, &errors.ErrMissingFiles{Fds: mfds}) } - db.logf("db@janitor F·%d G·%d", len(files), len(rem)) - for _, f := range rem { - db.logf("db@janitor removing %s-%d", f.Type(), f.Num()) - if err := f.Remove(); err != nil { + db.logf("db@janitor F·%d G·%d", len(fds), len(rem)) + for _, fd := range rem { + db.logf("db@janitor removing %s-%d", fd.Type, fd.Num) + if err := db.s.stor.Remove(fd); err != nil { return err } } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_write.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_write.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_write.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/db_write.go 2016-05-24 07:05:22.000000000 +0000 @@ -45,9 +45,9 @@ } } -func (db *DB) rotateMem(n int) (mem *memDB, err error) { +func (db *DB) rotateMem(n int, wait bool) (mem *memDB, err error) { // Wait for pending memdb compaction. - err = db.compSendIdle(db.mcompCmdC) + err = db.compTriggerWait(db.mcompCmdC) if err != nil { return } @@ -59,7 +59,11 @@ } // Schedule memdb compaction. - db.compSendTrigger(db.mcompCmdC) + if wait { + err = db.compTriggerWait(db.mcompCmdC) + } else { + db.compTrigger(db.mcompCmdC) + } return } @@ -84,7 +88,7 @@ return false case v.tLen(0) >= db.s.o.GetWriteL0PauseTrigger(): delayed = true - err = db.compSendIdle(db.tcompCmdC) + err = db.compTriggerWait(db.tcompCmdC) if err != nil { return false } @@ -94,7 +98,7 @@ mdbFree = n } else { mdb.decref() - mdb, err = db.rotateMem(n) + mdb, err = db.rotateMem(n, false) if err == nil { mdbFree = mdb.Free() } else { @@ -131,12 +135,27 @@ b.init(wo.GetSync() && !db.s.o.GetNoSync()) + if b.size() > db.s.o.GetWriteBuffer() && !db.s.o.GetDisableLargeBatchTransaction() { + // Writes using transaction. + tr, err1 := db.OpenTransaction() + if err1 != nil { + return err1 + } + if err1 := tr.Write(b, wo); err1 != nil { + tr.Discard() + return err1 + } + return tr.Commit() + } + // The write happen synchronously. select { case db.writeC <- b: if <-db.writeMergedC { return <-db.writeAckC } + // Continue, the write lock already acquired by previous writer + // and handed out to us. case db.writeLockC <- struct{}{}: case err = <-db.compPerErrC: return @@ -147,14 +166,15 @@ merged := 0 danglingMerge := false defer func() { + for i := 0; i < merged; i++ { + db.writeAckC <- err + } if danglingMerge { + // Only one dangling merge at most, so this is safe. db.writeMergedC <- false } else { <-db.writeLockC } - for i := 0; i < merged; i++ { - db.writeAckC <- err - } }() mdb, mdbFree, err := db.flush(b.size()) @@ -234,7 +254,7 @@ db.addSeq(uint64(b.Len())) if b.size() >= mdbFree { - db.rotateMem(0) + db.rotateMem(0, false) } return } @@ -261,8 +281,8 @@ func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool { iter := mem.NewIterator(nil) defer iter.Release() - return (max == nil || (iter.First() && icmp.uCompare(max, iKey(iter.Key()).ukey()) >= 0)) && - (min == nil || (iter.Last() && icmp.uCompare(min, iKey(iter.Key()).ukey()) <= 0)) + return (max == nil || (iter.First() && icmp.uCompare(max, internalKey(iter.Key()).ukey()) >= 0)) && + (min == nil || (iter.Last() && icmp.uCompare(min, internalKey(iter.Key()).ukey()) <= 0)) } // CompactRange compacts the underlying DB for the given key range. @@ -293,12 +313,12 @@ defer mdb.decref() if isMemOverlaps(db.s.icmp, mdb.DB, r.Start, r.Limit) { // Memdb compaction. - if _, err := db.rotateMem(0); err != nil { + if _, err := db.rotateMem(0, false); err != nil { <-db.writeLockC return err } <-db.writeLockC - if err := db.compSendIdle(db.mcompCmdC); err != nil { + if err := db.compTriggerWait(db.mcompCmdC); err != nil { return err } } else { @@ -306,7 +326,7 @@ } // Table compaction. - return db.compSendRange(db.tcompCmdC, -1, r.Start, r.Limit) + return db.compTriggerRange(db.tcompCmdC, -1, r.Start, r.Limit) } // SetReadOnly makes DB read-only. It will stay read-only until reopened. diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go 2016-05-24 07:05:22.000000000 +0000 @@ -29,21 +29,21 @@ // ErrCorrupted is the type that wraps errors that indicate corruption in // the database. type ErrCorrupted struct { - File *storage.FileInfo - Err error + Fd storage.FileDesc + Err error } func (e *ErrCorrupted) Error() string { - if e.File != nil { - return fmt.Sprintf("%v [file=%v]", e.Err, e.File) + if !e.Fd.Nil() { + return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd) } else { return e.Err.Error() } } // NewErrCorrupted creates new ErrCorrupted error. -func NewErrCorrupted(f storage.File, err error) error { - return &ErrCorrupted{storage.NewFileInfo(f), err} +func NewErrCorrupted(fd storage.FileDesc, err error) error { + return &ErrCorrupted{fd, err} } // IsCorrupted returns a boolean indicating whether the error is indicating @@ -61,17 +61,17 @@ // ErrMissingFiles is the type that indicating a corruption due to missing // files. ErrMissingFiles always wrapped with ErrCorrupted. type ErrMissingFiles struct { - Files []*storage.FileInfo + Fds []storage.FileDesc } func (e *ErrMissingFiles) Error() string { return "file missing" } -// SetFile sets 'file info' of the given error with the given file. +// SetFd sets 'file info' of the given error with the given file. // Currently only ErrCorrupted is supported, otherwise will do nothing. -func SetFile(err error, f storage.File) error { +func SetFd(err error, fd storage.FileDesc) error { switch x := err.(type) { case *ErrCorrupted: - x.File = storage.NewFileInfo(f) + x.Fd = fd return x } return err diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/external_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/external_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/external_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/external_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -54,5 +54,64 @@ db.(*testingDB).TestClose() }) }) + + Describe("transaction test", func() { + It("should do transaction correctly", func(done Done) { + db := newTestingDB(o, nil, nil) + + By("creating first transaction") + var err error + tr := &testingTransaction{} + tr.Transaction, err = db.OpenTransaction() + Expect(err).NotTo(HaveOccurred()) + t0 := &testutil.DBTesting{ + DB: tr, + Deleted: testutil.KeyValue_Generate(nil, 200, 1, 50, 5, 5).Clone(), + } + testutil.DoDBTesting(t0) + testutil.TestGet(tr, t0.Present) + testutil.TestHas(tr, t0.Present) + + By("committing first transaction") + err = tr.Commit() + Expect(err).NotTo(HaveOccurred()) + testutil.TestIter(db, nil, t0.Present) + testutil.TestGet(db, t0.Present) + testutil.TestHas(db, t0.Present) + + By("manipulating DB without transaction") + t0.DB = db + testutil.DoDBTesting(t0) + + By("creating second transaction") + tr.Transaction, err = db.OpenTransaction() + Expect(err).NotTo(HaveOccurred()) + t1 := &testutil.DBTesting{ + DB: tr, + Deleted: t0.Deleted.Clone(), + Present: t0.Present.Clone(), + } + testutil.DoDBTesting(t1) + testutil.TestIter(db, nil, t0.Present) + + By("discarding second transaction") + tr.Discard() + testutil.TestIter(db, nil, t0.Present) + + By("creating third transaction") + tr.Transaction, err = db.OpenTransaction() + Expect(err).NotTo(HaveOccurred()) + t0.DB = tr + testutil.DoDBTesting(t0) + + By("committing third transaction") + err = tr.Commit() + Expect(err).NotTo(HaveOccurred()) + testutil.TestIter(db, nil, t0.Present) + + db.TestClose() + done <- true + }, 30.0) + }) }) }) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/filter.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/filter.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/filter.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/filter.go 2016-05-24 07:05:22.000000000 +0000 @@ -15,7 +15,7 @@ } func (f iFilter) Contains(filter, key []byte) bool { - return f.Filter.Contains(filter, iKey(key).ukey()) + return f.Filter.Contains(filter, internalKey(key).ukey()) } func (f iFilter) NewGenerator() filter.FilterGenerator { @@ -27,5 +27,5 @@ } func (g iFilterGenerator) Add(key []byte) { - g.FilterGenerator.Add(iKey(key).ukey()) + g.FilterGenerator.Add(internalKey(key).ukey()) } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go 2016-05-24 07:05:22.000000000 +0000 @@ -83,6 +83,7 @@ "io" "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/storage" "github.com/syndtr/goleveldb/leveldb/util" ) @@ -165,7 +166,7 @@ r.dropper.Drop(&ErrCorrupted{n, reason}) } if r.strict && !skip { - r.err = errors.NewErrCorrupted(nil, &ErrCorrupted{n, reason}) + r.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrCorrupted{n, reason}) return r.err } return errSkip diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/key.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/key.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/key.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/key.go 2016-05-24 07:05:22.000000000 +0000 @@ -11,28 +11,30 @@ "fmt" "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/storage" ) -type ErrIkeyCorrupted struct { +// ErrInternalKeyCorrupted records internal key corruption. +type ErrInternalKeyCorrupted struct { Ikey []byte Reason string } -func (e *ErrIkeyCorrupted) Error() string { - return fmt.Sprintf("leveldb: iKey %q corrupted: %s", e.Ikey, e.Reason) +func (e *ErrInternalKeyCorrupted) Error() string { + return fmt.Sprintf("leveldb: internal key %q corrupted: %s", e.Ikey, e.Reason) } -func newErrIkeyCorrupted(ikey []byte, reason string) error { - return errors.NewErrCorrupted(nil, &ErrIkeyCorrupted{append([]byte{}, ikey...), reason}) +func newErrInternalKeyCorrupted(ikey []byte, reason string) error { + return errors.NewErrCorrupted(storage.FileDesc{}, &ErrInternalKeyCorrupted{append([]byte{}, ikey...), reason}) } -type kType int +type keyType uint -func (kt kType) String() string { +func (kt keyType) String() string { switch kt { - case ktDel: + case keyTypeDel: return "d" - case ktVal: + case keyTypeVal: return "v" } return "x" @@ -41,102 +43,105 @@ // Value types encoded as the last component of internal keys. // Don't modify; this value are saved to disk. const ( - ktDel kType = iota - ktVal + keyTypeDel keyType = iota + keyTypeVal ) -// ktSeek defines the kType that should be passed when constructing an +// keyTypeSeek defines the keyType that should be passed when constructing an // internal key for seeking to a particular sequence number (since we // sort sequence numbers in decreasing order and the value type is // embedded as the low 8 bits in the sequence number in internal keys, // we need to use the highest-numbered ValueType, not the lowest). -const ktSeek = ktVal +const keyTypeSeek = keyTypeVal const ( // Maximum value possible for sequence number; the 8-bits are // used by value type, so its can packed together in single // 64-bit integer. - kMaxSeq uint64 = (uint64(1) << 56) - 1 + keyMaxSeq = (uint64(1) << 56) - 1 // Maximum value possible for packed sequence number and type. - kMaxNum uint64 = (kMaxSeq << 8) | uint64(ktSeek) + keyMaxNum = (keyMaxSeq << 8) | uint64(keyTypeSeek) ) // Maximum number encoded in bytes. -var kMaxNumBytes = make([]byte, 8) +var keyMaxNumBytes = make([]byte, 8) func init() { - binary.LittleEndian.PutUint64(kMaxNumBytes, kMaxNum) + binary.LittleEndian.PutUint64(keyMaxNumBytes, keyMaxNum) } -type iKey []byte +type internalKey []byte -func newIkey(ukey []byte, seq uint64, kt kType) iKey { - if seq > kMaxSeq { +func makeInternalKey(dst, ukey []byte, seq uint64, kt keyType) internalKey { + if seq > keyMaxSeq { panic("leveldb: invalid sequence number") - } else if kt > ktVal { + } else if kt > keyTypeVal { panic("leveldb: invalid type") } - ik := make(iKey, len(ukey)+8) - copy(ik, ukey) - binary.LittleEndian.PutUint64(ik[len(ukey):], (seq<<8)|uint64(kt)) - return ik + if n := len(ukey) + 8; cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + copy(dst, ukey) + binary.LittleEndian.PutUint64(dst[len(ukey):], (seq<<8)|uint64(kt)) + return internalKey(dst) } -func parseIkey(ik []byte) (ukey []byte, seq uint64, kt kType, err error) { +func parseInternalKey(ik []byte) (ukey []byte, seq uint64, kt keyType, err error) { if len(ik) < 8 { - return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid length") + return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid length") } num := binary.LittleEndian.Uint64(ik[len(ik)-8:]) - seq, kt = uint64(num>>8), kType(num&0xff) - if kt > ktVal { - return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid type") + seq, kt = uint64(num>>8), keyType(num&0xff) + if kt > keyTypeVal { + return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid type") } ukey = ik[:len(ik)-8] return } -func validIkey(ik []byte) bool { - _, _, _, err := parseIkey(ik) +func validInternalKey(ik []byte) bool { + _, _, _, err := parseInternalKey(ik) return err == nil } -func (ik iKey) assert() { +func (ik internalKey) assert() { if ik == nil { - panic("leveldb: nil iKey") + panic("leveldb: nil internalKey") } if len(ik) < 8 { - panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid length", []byte(ik), len(ik))) + panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid length", []byte(ik), len(ik))) } } -func (ik iKey) ukey() []byte { +func (ik internalKey) ukey() []byte { ik.assert() return ik[:len(ik)-8] } -func (ik iKey) num() uint64 { +func (ik internalKey) num() uint64 { ik.assert() return binary.LittleEndian.Uint64(ik[len(ik)-8:]) } -func (ik iKey) parseNum() (seq uint64, kt kType) { +func (ik internalKey) parseNum() (seq uint64, kt keyType) { num := ik.num() - seq, kt = uint64(num>>8), kType(num&0xff) - if kt > ktVal { - panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt)) + seq, kt = uint64(num>>8), keyType(num&0xff) + if kt > keyTypeVal { + panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt)) } return } -func (ik iKey) String() string { +func (ik internalKey) String() string { if ik == nil { return "" } - if ukey, seq, kt, err := parseIkey(ik); err == nil { + if ukey, seq, kt, err := parseInternalKey(ik); err == nil { return fmt.Sprintf("%s,%s%d", shorten(string(ukey)), kt, seq) - } else { - return "" } + return "" } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/key_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/key_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/key_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/key_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -15,8 +15,8 @@ var defaultIComparer = &iComparer{comparer.DefaultComparer} -func ikey(key string, seq uint64, kt kType) iKey { - return newIkey([]byte(key), uint64(seq), kt) +func ikey(key string, seq uint64, kt keyType) internalKey { + return makeInternalKey(nil, []byte(key), uint64(seq), kt) } func shortSep(a, b []byte) []byte { @@ -37,7 +37,7 @@ return dst } -func testSingleKey(t *testing.T, key string, seq uint64, kt kType) { +func testSingleKey(t *testing.T, key string, seq uint64, kt keyType) { ik := ikey(key, seq, kt) if !bytes.Equal(ik.ukey(), []byte(key)) { @@ -52,7 +52,7 @@ t.Errorf("type does not equal, got %v, want %v", rt, kt) } - if rukey, rseq, rt, kerr := parseIkey(ik); kerr == nil { + if rukey, rseq, rt, kerr := parseInternalKey(ik); kerr == nil { if !bytes.Equal(rukey, []byte(key)) { t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key) } @@ -67,7 +67,7 @@ } } -func TestIkey_EncodeDecode(t *testing.T) { +func TestInternalKey_EncodeDecode(t *testing.T) { keys := []string{"", "k", "hello", "longggggggggggggggggggggg"} seqs := []uint64{ 1, 2, 3, @@ -77,8 +77,8 @@ } for _, key := range keys { for _, seq := range seqs { - testSingleKey(t, key, seq, ktVal) - testSingleKey(t, "hello", 1, ktDel) + testSingleKey(t, key, seq, keyTypeVal) + testSingleKey(t, "hello", 1, keyTypeDel) } } } @@ -89,45 +89,45 @@ } } -func TestIkeyShortSeparator(t *testing.T) { +func TestInternalKeyShortSeparator(t *testing.T) { // When user keys are same - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foo", 99, ktVal))) - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foo", 101, ktVal))) - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foo", 100, ktVal))) - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foo", 100, ktDel))) + assertBytes(t, ikey("foo", 100, keyTypeVal), + shortSep(ikey("foo", 100, keyTypeVal), + ikey("foo", 99, keyTypeVal))) + assertBytes(t, ikey("foo", 100, keyTypeVal), + shortSep(ikey("foo", 100, keyTypeVal), + ikey("foo", 101, keyTypeVal))) + assertBytes(t, ikey("foo", 100, keyTypeVal), + shortSep(ikey("foo", 100, keyTypeVal), + ikey("foo", 100, keyTypeVal))) + assertBytes(t, ikey("foo", 100, keyTypeVal), + shortSep(ikey("foo", 100, keyTypeVal), + ikey("foo", 100, keyTypeDel))) // When user keys are misordered - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("bar", 99, ktVal))) + assertBytes(t, ikey("foo", 100, keyTypeVal), + shortSep(ikey("foo", 100, keyTypeVal), + ikey("bar", 99, keyTypeVal))) // When user keys are different, but correctly ordered - assertBytes(t, ikey("g", uint64(kMaxSeq), ktSeek), - shortSep(ikey("foo", 100, ktVal), - ikey("hello", 200, ktVal))) + assertBytes(t, ikey("g", uint64(keyMaxSeq), keyTypeSeek), + shortSep(ikey("foo", 100, keyTypeVal), + ikey("hello", 200, keyTypeVal))) // When start user key is prefix of limit user key - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foobar", 200, ktVal))) + assertBytes(t, ikey("foo", 100, keyTypeVal), + shortSep(ikey("foo", 100, keyTypeVal), + ikey("foobar", 200, keyTypeVal))) // When limit user key is prefix of start user key - assertBytes(t, ikey("foobar", 100, ktVal), - shortSep(ikey("foobar", 100, ktVal), - ikey("foo", 200, ktVal))) + assertBytes(t, ikey("foobar", 100, keyTypeVal), + shortSep(ikey("foobar", 100, keyTypeVal), + ikey("foo", 200, keyTypeVal))) } -func TestIkeyShortestSuccessor(t *testing.T) { - assertBytes(t, ikey("g", uint64(kMaxSeq), ktSeek), - shortSuccessor(ikey("foo", 100, ktVal))) - assertBytes(t, ikey("\xff\xff", 100, ktVal), - shortSuccessor(ikey("\xff\xff", 100, ktVal))) +func TestInternalKeyShortestSuccessor(t *testing.T) { + assertBytes(t, ikey("g", uint64(keyMaxSeq), keyTypeSeek), + shortSuccessor(ikey("foo", 100, keyTypeVal))) + assertBytes(t, ikey("\xff\xff", 100, keyTypeVal), + shortSuccessor(ikey("\xff\xff", 100, keyTypeVal))) } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/opt/options.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/opt/options.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/opt/options.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/opt/options.go 2016-05-24 07:05:22.000000000 +0000 @@ -8,10 +8,11 @@ package opt import ( + "math" + "github.com/syndtr/goleveldb/leveldb/cache" "github.com/syndtr/goleveldb/leveldb/comparer" "github.com/syndtr/goleveldb/leveldb/filter" - "math" ) const ( @@ -35,8 +36,6 @@ DefaultCompactionTotalSizeMultiplier = 10.0 DefaultCompressionType = SnappyCompression DefaultIteratorSamplingRate = 1 * MiB - DefaultMaxMemCompationLevel = 2 - DefaultNumLevel = 7 DefaultOpenFilesCacher = LRUCacher DefaultOpenFilesCacheCapacity = 500 DefaultWriteBuffer = 4 * MiB @@ -266,6 +265,13 @@ // The default value is false. DisableCompactionBackoff bool + // DisableLargeBatchTransaction allows disabling switch-to-transaction mode + // on large batch write. If enable batch writes large than WriteBuffer will + // use transaction. + // + // The default is false. + DisableLargeBatchTransaction bool + // ErrorIfExist defines whether an error should returned if the DB already // exist. // @@ -301,24 +307,11 @@ // The default is 1MiB. IteratorSamplingRate int - // MaxMemCompationLevel defines maximum level a newly compacted 'memdb' - // will be pushed into if doesn't creates overlap. This should less than - // NumLevel. Use -1 for level-0. - // - // The default is 2. - MaxMemCompationLevel int - // NoSync allows completely disable fsync. // // The default is false. NoSync bool - // NumLevel defines number of database level. The level shouldn't changed - // between opens, or the database will panic. - // - // The default is 7. - NumLevel int - // OpenFilesCacher provides cache algorithm for open files caching. // Specify NoCacher to disable caching algorithm. // @@ -440,7 +433,7 @@ if o.CompactionTableSize > 0 { base = o.CompactionTableSize } - if len(o.CompactionTableSizeMultiplierPerLevel) > level && o.CompactionTableSizeMultiplierPerLevel[level] > 0 { + if level < len(o.CompactionTableSizeMultiplierPerLevel) && o.CompactionTableSizeMultiplierPerLevel[level] > 0 { mult = o.CompactionTableSizeMultiplierPerLevel[level] } else if o.CompactionTableSizeMultiplier > 0 { mult = math.Pow(o.CompactionTableSizeMultiplier, float64(level)) @@ -461,7 +454,7 @@ if o.CompactionTotalSize > 0 { base = o.CompactionTotalSize } - if len(o.CompactionTotalSizeMultiplierPerLevel) > level && o.CompactionTotalSizeMultiplierPerLevel[level] > 0 { + if level < len(o.CompactionTotalSizeMultiplierPerLevel) && o.CompactionTotalSizeMultiplierPerLevel[level] > 0 { mult = o.CompactionTotalSizeMultiplierPerLevel[level] } else if o.CompactionTotalSizeMultiplier > 0 { mult = math.Pow(o.CompactionTotalSizeMultiplier, float64(level)) @@ -508,6 +501,13 @@ return o.DisableCompactionBackoff } +func (o *Options) GetDisableLargeBatchTransaction() bool { + if o == nil { + return false + } + return o.DisableLargeBatchTransaction +} + func (o *Options) GetErrorIfExist() bool { if o == nil { return false @@ -536,21 +536,6 @@ return o.IteratorSamplingRate } -func (o *Options) GetMaxMemCompationLevel() int { - level := DefaultMaxMemCompationLevel - if o != nil { - if o.MaxMemCompationLevel > 0 { - level = o.MaxMemCompationLevel - } else if o.MaxMemCompationLevel < 0 { - level = 0 - } - } - if level >= o.GetNumLevel() { - return o.GetNumLevel() - 1 - } - return level -} - func (o *Options) GetNoSync() bool { if o == nil { return false @@ -558,13 +543,6 @@ return o.NoSync } -func (o *Options) GetNumLevel() int { - if o == nil || o.NumLevel <= 0 { - return DefaultNumLevel - } - return o.NumLevel -} - func (o *Options) GetOpenFilesCacher() Cacher { if o == nil || o.OpenFilesCacher == nil { return DefaultOpenFilesCacher diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/options.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/options.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/options.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/options.go 2016-05-24 07:05:22.000000000 +0000 @@ -43,6 +43,8 @@ s.o.cache() } +const optCachedLevel = 7 + type cachedOptions struct { *opt.Options @@ -54,15 +56,13 @@ } func (co *cachedOptions) cache() { - numLevel := co.Options.GetNumLevel() - - co.compactionExpandLimit = make([]int, numLevel) - co.compactionGPOverlaps = make([]int, numLevel) - co.compactionSourceLimit = make([]int, numLevel) - co.compactionTableSize = make([]int, numLevel) - co.compactionTotalSize = make([]int64, numLevel) + co.compactionExpandLimit = make([]int, optCachedLevel) + co.compactionGPOverlaps = make([]int, optCachedLevel) + co.compactionSourceLimit = make([]int, optCachedLevel) + co.compactionTableSize = make([]int, optCachedLevel) + co.compactionTotalSize = make([]int64, optCachedLevel) - for level := 0; level < numLevel; level++ { + for level := 0; level < optCachedLevel; level++ { co.compactionExpandLimit[level] = co.Options.GetCompactionExpandLimit(level) co.compactionGPOverlaps[level] = co.Options.GetCompactionGPOverlaps(level) co.compactionSourceLimit[level] = co.Options.GetCompactionSourceLimit(level) @@ -72,21 +72,36 @@ } func (co *cachedOptions) GetCompactionExpandLimit(level int) int { - return co.compactionExpandLimit[level] + if level < optCachedLevel { + return co.compactionExpandLimit[level] + } + return co.Options.GetCompactionExpandLimit(level) } func (co *cachedOptions) GetCompactionGPOverlaps(level int) int { - return co.compactionGPOverlaps[level] + if level < optCachedLevel { + return co.compactionGPOverlaps[level] + } + return co.Options.GetCompactionGPOverlaps(level) } func (co *cachedOptions) GetCompactionSourceLimit(level int) int { - return co.compactionSourceLimit[level] + if level < optCachedLevel { + return co.compactionSourceLimit[level] + } + return co.Options.GetCompactionSourceLimit(level) } func (co *cachedOptions) GetCompactionTableSize(level int) int { - return co.compactionTableSize[level] + if level < optCachedLevel { + return co.compactionTableSize[level] + } + return co.Options.GetCompactionTableSize(level) } func (co *cachedOptions) GetCompactionTotalSize(level int) int64 { - return co.compactionTotalSize[level] + if level < optCachedLevel { + return co.compactionTotalSize[level] + } + return co.Options.GetCompactionTotalSize(level) } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/session_compaction.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/session_compaction.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/session_compaction.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/session_compaction.go 2016-05-24 07:05:22.000000000 +0000 @@ -14,41 +14,46 @@ "github.com/syndtr/goleveldb/leveldb/opt" ) -func (s *session) pickMemdbLevel(umin, umax []byte) int { +func (s *session) pickMemdbLevel(umin, umax []byte, maxLevel int) int { v := s.version() defer v.release() - return v.pickMemdbLevel(umin, umax) + return v.pickMemdbLevel(umin, umax, maxLevel) } -func (s *session) flushMemdb(rec *sessionRecord, mdb *memdb.DB, level int) (level_ int, err error) { +func (s *session) flushMemdb(rec *sessionRecord, mdb *memdb.DB, maxLevel int) (int, error) { // Create sorted table. iter := mdb.NewIterator(nil) defer iter.Release() t, n, err := s.tops.createFrom(iter) if err != nil { - return level, err + return 0, err } - // Pick level and add to record. - if level < 0 { - level = s.pickMemdbLevel(t.imin.ukey(), t.imax.ukey()) - } - rec.addTableFile(level, t) + // Pick level other than zero can cause compaction issue with large + // bulk insert and delete on strictly incrementing key-space. The + // problem is that the small deletion markers trapped at lower level, + // while key/value entries keep growing at higher level. Since the + // key-space is strictly incrementing it will not overlaps with + // higher level, thus maximum possible level is always picked, while + // overlapping deletion marker pushed into lower level. + // See: https://github.com/syndtr/goleveldb/issues/127. + flushLevel := s.pickMemdbLevel(t.imin.ukey(), t.imax.ukey(), maxLevel) + rec.addTableFile(flushLevel, t) - s.logf("memdb@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.imin, t.imax) - return level, nil + s.logf("memdb@flush created L%d@%d N·%d S·%s %q:%q", flushLevel, t.fd.Num, n, shortenb(int(t.size)), t.imin, t.imax) + return flushLevel, nil } // Pick a compaction based on current state; need external synchronization. func (s *session) pickCompaction() *compaction { v := s.version() - var level int + var sourceLevel int var t0 tFiles if v.cScore >= 1 { - level = v.cLevel - cptr := s.stCompPtrs[level] - tables := v.tables[level] + sourceLevel = v.cLevel + cptr := s.getCompPtr(sourceLevel) + tables := v.levels[sourceLevel] for _, t := range tables { if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 { t0 = append(t0, t) @@ -61,7 +66,7 @@ } else { if p := atomic.LoadPointer(&v.cSeek); p != nil { ts := (*tSet)(p) - level = ts.level + sourceLevel = ts.level t0 = append(t0, ts.table) } else { v.release() @@ -69,14 +74,19 @@ } } - return newCompaction(s, v, level, t0) + return newCompaction(s, v, sourceLevel, t0) } // Create compaction from given level and range; need external synchronization. -func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction { +func (s *session) getCompactionRange(sourceLevel int, umin, umax []byte, noLimit bool) *compaction { v := s.version() - t0 := v.tables[level].getOverlaps(nil, s.icmp, umin, umax, level == 0) + if sourceLevel >= len(v.levels) { + v.release() + return nil + } + + t0 := v.levels[sourceLevel].getOverlaps(nil, s.icmp, umin, umax, sourceLevel == 0) if len(t0) == 0 { v.release() return nil @@ -86,9 +96,9 @@ // But we cannot do this for level-0 since level-0 files can overlap // and we must not pick one file and drop another older file if the // two files overlap. - if level > 0 { - limit := uint64(v.s.o.GetCompactionSourceLimit(level)) - total := uint64(0) + if !noLimit && sourceLevel > 0 { + limit := int64(v.s.o.GetCompactionSourceLimit(sourceLevel)) + total := int64(0) for i, t := range t0 { total += t.size if total >= limit { @@ -99,17 +109,17 @@ } } - return newCompaction(s, v, level, t0) + return newCompaction(s, v, sourceLevel, t0) } -func newCompaction(s *session, v *version, level int, t0 tFiles) *compaction { +func newCompaction(s *session, v *version, sourceLevel int, t0 tFiles) *compaction { c := &compaction{ s: s, v: v, - level: level, - tables: [2]tFiles{t0, nil}, - maxGPOverlaps: uint64(s.o.GetCompactionGPOverlaps(level)), - tPtrs: make([]int, s.o.GetNumLevel()), + sourceLevel: sourceLevel, + levels: [2]tFiles{t0, nil}, + maxGPOverlaps: int64(s.o.GetCompactionGPOverlaps(sourceLevel)), + tPtrs: make([]int, len(v.levels)), } c.expand() c.save() @@ -121,21 +131,21 @@ s *session v *version - level int - tables [2]tFiles - maxGPOverlaps uint64 + sourceLevel int + levels [2]tFiles + maxGPOverlaps int64 gp tFiles gpi int seenKey bool - gpOverlappedBytes uint64 - imin, imax iKey + gpOverlappedBytes int64 + imin, imax internalKey tPtrs []int released bool snapGPI int snapSeenKey bool - snapGPOverlappedBytes uint64 + snapGPOverlappedBytes int64 snapTPtrs []int } @@ -162,30 +172,34 @@ // Expand compacted tables; need external synchronization. func (c *compaction) expand() { - limit := uint64(c.s.o.GetCompactionExpandLimit(c.level)) - vt0, vt1 := c.v.tables[c.level], c.v.tables[c.level+1] + limit := int64(c.s.o.GetCompactionExpandLimit(c.sourceLevel)) + vt0 := c.v.levels[c.sourceLevel] + vt1 := tFiles{} + if level := c.sourceLevel + 1; level < len(c.v.levels) { + vt1 = c.v.levels[level] + } - t0, t1 := c.tables[0], c.tables[1] + t0, t1 := c.levels[0], c.levels[1] imin, imax := t0.getRange(c.s.icmp) // We expand t0 here just incase ukey hop across tables. - t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.level == 0) - if len(t0) != len(c.tables[0]) { + t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.sourceLevel == 0) + if len(t0) != len(c.levels[0]) { imin, imax = t0.getRange(c.s.icmp) } t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false) // Get entire range covered by compaction. amin, amax := append(t0, t1...).getRange(c.s.icmp) - // See if we can grow the number of inputs in "level" without - // changing the number of "level+1" files we pick up. + // See if we can grow the number of inputs in "sourceLevel" without + // changing the number of "sourceLevel+1" files we pick up. if len(t1) > 0 { - exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.level == 0) + exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.sourceLevel == 0) if len(exp0) > len(t0) && t1.size()+exp0.size() < limit { xmin, xmax := exp0.getRange(c.s.icmp) exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false) if len(exp1) == len(t1) { c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)", - c.level, c.level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())), + c.sourceLevel, c.sourceLevel+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())), len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size()))) imin, imax = xmin, xmax t0, t1 = exp0, exp1 @@ -195,22 +209,23 @@ } // Compute the set of grandparent files that overlap this compaction - // (parent == level+1; grandparent == level+2) - if c.level+2 < c.s.o.GetNumLevel() { - c.gp = c.v.tables[c.level+2].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false) + // (parent == sourceLevel+1; grandparent == sourceLevel+2) + if level := c.sourceLevel + 2; level < len(c.v.levels) { + c.gp = c.v.levels[level].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false) } - c.tables[0], c.tables[1] = t0, t1 + c.levels[0], c.levels[1] = t0, t1 c.imin, c.imax = imin, imax } // Check whether compaction is trivial. func (c *compaction) trivial() bool { - return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= c.maxGPOverlaps + return len(c.levels[0]) == 1 && len(c.levels[1]) == 0 && c.gp.size() <= c.maxGPOverlaps } func (c *compaction) baseLevelForKey(ukey []byte) bool { - for level, tables := range c.v.tables[c.level+2:] { + for level := c.sourceLevel + 2; level < len(c.v.levels); level++ { + tables := c.v.levels[level] for c.tPtrs[level] < len(tables) { t := tables[c.tPtrs[level]] if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 { @@ -227,7 +242,7 @@ return true } -func (c *compaction) shouldStopBefore(ikey iKey) bool { +func (c *compaction) shouldStopBefore(ikey internalKey) bool { for ; c.gpi < len(c.gp); c.gpi++ { gp := c.gp[c.gpi] if c.s.icmp.Compare(ikey, gp.imax) <= 0 { @@ -250,10 +265,10 @@ // Creates an iterator. func (c *compaction) newIterator() iterator.Iterator { // Creates iterator slice. - icap := len(c.tables) - if c.level == 0 { + icap := len(c.levels) + if c.sourceLevel == 0 { // Special case for level-0. - icap = len(c.tables[0]) + 1 + icap = len(c.levels[0]) + 1 } its := make([]iterator.Iterator, 0, icap) @@ -267,13 +282,13 @@ ro.Strict |= opt.StrictReader } - for i, tables := range c.tables { + for i, tables := range c.levels { if len(tables) == 0 { continue } // Level-0 is not sorted and may overlaps each other. - if c.level+i == 0 { + if c.sourceLevel+i == 0 { for _, t := range tables { its = append(its, c.s.tops.newIterator(t, nil, ro)) } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/session.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/session.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/session.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/session.go 2016-05-24 07:05:22.000000000 +0000 @@ -16,9 +16,9 @@ "github.com/syndtr/goleveldb/leveldb/journal" "github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" ) +// ErrManifestCorrupted records manifest corruption. type ErrManifestCorrupted struct { Field string Reason string @@ -28,31 +28,31 @@ return fmt.Sprintf("leveldb: manifest corrupted (field '%s'): %s", e.Field, e.Reason) } -func newErrManifestCorrupted(f storage.File, field, reason string) error { - return errors.NewErrCorrupted(f, &ErrManifestCorrupted{field, reason}) +func newErrManifestCorrupted(fd storage.FileDesc, field, reason string) error { + return errors.NewErrCorrupted(fd, &ErrManifestCorrupted{field, reason}) } // session represent a persistent database session. type session struct { // Need 64-bit alignment. - stNextFileNum uint64 // current unused file number - stJournalNum uint64 // current journal file number; need external synchronization - stPrevJournalNum uint64 // prev journal file number; no longer used; for compatibility with older version of leveldb + stNextFileNum int64 // current unused file number + stJournalNum int64 // current journal file number; need external synchronization + stPrevJournalNum int64 // prev journal file number; no longer used; for compatibility with older version of leveldb + stTempFileNum int64 stSeqNum uint64 // last mem compacted seq; need external synchronization - stTempFileNum uint64 stor storage.Storage - storLock util.Releaser + storLock storage.Lock o *cachedOptions icmp *iComparer tops *tOps manifest *journal.Writer manifestWriter storage.Writer - manifestFile storage.File + manifestFd storage.FileDesc - stCompPtrs []iKey // compaction pointers; need external synchronization - stVersion *version // current version + stCompPtrs []internalKey // compaction pointers; need external synchronization + stVersion *version // current version vmu sync.Mutex } @@ -66,9 +66,8 @@ return } s = &session{ - stor: stor, - storLock: storLock, - stCompPtrs: make([]iKey, o.GetNumLevel()), + stor: stor, + storLock: storLock, } s.setOptions(o) s.tops = newTableOps(s) @@ -88,7 +87,6 @@ } s.manifest = nil s.manifestWriter = nil - s.manifestFile = nil s.stVersion = nil } @@ -109,18 +107,18 @@ if os.IsNotExist(err) { // Don't return os.ErrNotExist if the underlying storage contains // other files that belong to LevelDB. So the DB won't get trashed. - if files, _ := s.stor.GetFiles(storage.TypeAll); len(files) > 0 { - err = &errors.ErrCorrupted{File: &storage.FileInfo{Type: storage.TypeManifest}, Err: &errors.ErrMissingFiles{}} + if fds, _ := s.stor.List(storage.TypeAll); len(fds) > 0 { + err = &errors.ErrCorrupted{Fd: storage.FileDesc{Type: storage.TypeManifest}, Err: &errors.ErrMissingFiles{}} } } }() - m, err := s.stor.GetManifest() + fd, err := s.stor.GetMeta() if err != nil { return } - reader, err := m.Open() + reader, err := s.stor.Open(fd) if err != nil { return } @@ -128,10 +126,9 @@ var ( // Options. - numLevel = s.o.GetNumLevel() - strict = s.o.GetStrict(opt.StrictManifest) + strict = s.o.GetStrict(opt.StrictManifest) - jr = journal.NewReader(reader, dropper{s, m}, strict, true) + jr = journal.NewReader(reader, dropper{s, fd}, strict, true) rec = &sessionRecord{} staging = s.stVersion.newStaging() ) @@ -143,24 +140,23 @@ err = nil break } - return errors.SetFile(err, m) + return errors.SetFd(err, fd) } - err = rec.decode(r, numLevel) + err = rec.decode(r) if err == nil { // save compact pointers for _, r := range rec.compPtrs { - s.stCompPtrs[r.level] = iKey(r.ikey) + s.setCompPtr(r.level, internalKey(r.ikey)) } // commit record to version staging staging.commit(rec) } else { - err = errors.SetFile(err, m) + err = errors.SetFd(err, fd) if strict || !errors.IsCorrupted(err) { return - } else { - s.logf("manifest error: %v (skipped)", errors.SetFile(err, m)) } + s.logf("manifest error: %v (skipped)", errors.SetFd(err, fd)) } rec.resetCompPtrs() rec.resetAddedTables() @@ -169,18 +165,18 @@ switch { case !rec.has(recComparer): - return newErrManifestCorrupted(m, "comparer", "missing") + return newErrManifestCorrupted(fd, "comparer", "missing") case rec.comparer != s.icmp.uName(): - return newErrManifestCorrupted(m, "comparer", fmt.Sprintf("mismatch: want '%s', got '%s'", s.icmp.uName(), rec.comparer)) + return newErrManifestCorrupted(fd, "comparer", fmt.Sprintf("mismatch: want '%s', got '%s'", s.icmp.uName(), rec.comparer)) case !rec.has(recNextFileNum): - return newErrManifestCorrupted(m, "next-file-num", "missing") + return newErrManifestCorrupted(fd, "next-file-num", "missing") case !rec.has(recJournalNum): - return newErrManifestCorrupted(m, "journal-file-num", "missing") + return newErrManifestCorrupted(fd, "journal-file-num", "missing") case !rec.has(recSeqNum): - return newErrManifestCorrupted(m, "seq-num", "missing") + return newErrManifestCorrupted(fd, "seq-num", "missing") } - s.manifestFile = m + s.manifestFd = fd s.setVersion(staging.finish()) s.setNextFileNum(rec.nextFileNum) s.recordCommited(rec) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/session_record.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/session_record.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/session_record.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/session_record.go 2016-05-24 07:05:22.000000000 +0000 @@ -13,6 +13,7 @@ "strings" "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/storage" ) type byteReader interface { @@ -35,28 +36,28 @@ type cpRecord struct { level int - ikey iKey + ikey internalKey } type atRecord struct { level int - num uint64 - size uint64 - imin iKey - imax iKey + num int64 + size int64 + imin internalKey + imax internalKey } type dtRecord struct { level int - num uint64 + num int64 } type sessionRecord struct { hasRec int comparer string - journalNum uint64 - prevJournalNum uint64 - nextFileNum uint64 + journalNum int64 + prevJournalNum int64 + nextFileNum int64 seqNum uint64 compPtrs []cpRecord addedTables []atRecord @@ -75,17 +76,17 @@ p.comparer = name } -func (p *sessionRecord) setJournalNum(num uint64) { +func (p *sessionRecord) setJournalNum(num int64) { p.hasRec |= 1 << recJournalNum p.journalNum = num } -func (p *sessionRecord) setPrevJournalNum(num uint64) { +func (p *sessionRecord) setPrevJournalNum(num int64) { p.hasRec |= 1 << recPrevJournalNum p.prevJournalNum = num } -func (p *sessionRecord) setNextFileNum(num uint64) { +func (p *sessionRecord) setNextFileNum(num int64) { p.hasRec |= 1 << recNextFileNum p.nextFileNum = num } @@ -95,7 +96,7 @@ p.seqNum = num } -func (p *sessionRecord) addCompPtr(level int, ikey iKey) { +func (p *sessionRecord) addCompPtr(level int, ikey internalKey) { p.hasRec |= 1 << recCompPtr p.compPtrs = append(p.compPtrs, cpRecord{level, ikey}) } @@ -105,13 +106,13 @@ p.compPtrs = p.compPtrs[:0] } -func (p *sessionRecord) addTable(level int, num, size uint64, imin, imax iKey) { +func (p *sessionRecord) addTable(level int, num, size int64, imin, imax internalKey) { p.hasRec |= 1 << recAddTable p.addedTables = append(p.addedTables, atRecord{level, num, size, imin, imax}) } func (p *sessionRecord) addTableFile(level int, t *tFile) { - p.addTable(level, t.file.Num(), t.size, t.imin, t.imax) + p.addTable(level, t.fd.Num, t.size, t.imin, t.imax) } func (p *sessionRecord) resetAddedTables() { @@ -119,7 +120,7 @@ p.addedTables = p.addedTables[:0] } -func (p *sessionRecord) delTable(level int, num uint64) { +func (p *sessionRecord) delTable(level int, num int64) { p.hasRec |= 1 << recDelTable p.deletedTables = append(p.deletedTables, dtRecord{level, num}) } @@ -137,6 +138,13 @@ _, p.err = w.Write(p.scratch[:n]) } +func (p *sessionRecord) putVarint(w io.Writer, x int64) { + if x < 0 { + panic("invalid negative value") + } + p.putUvarint(w, uint64(x)) +} + func (p *sessionRecord) putBytes(w io.Writer, x []byte) { if p.err != nil { return @@ -156,11 +164,11 @@ } if p.has(recJournalNum) { p.putUvarint(w, recJournalNum) - p.putUvarint(w, p.journalNum) + p.putVarint(w, p.journalNum) } if p.has(recNextFileNum) { p.putUvarint(w, recNextFileNum) - p.putUvarint(w, p.nextFileNum) + p.putVarint(w, p.nextFileNum) } if p.has(recSeqNum) { p.putUvarint(w, recSeqNum) @@ -174,13 +182,13 @@ for _, r := range p.deletedTables { p.putUvarint(w, recDelTable) p.putUvarint(w, uint64(r.level)) - p.putUvarint(w, r.num) + p.putVarint(w, r.num) } for _, r := range p.addedTables { p.putUvarint(w, recAddTable) p.putUvarint(w, uint64(r.level)) - p.putUvarint(w, r.num) - p.putUvarint(w, r.size) + p.putVarint(w, r.num) + p.putVarint(w, r.size) p.putBytes(w, r.imin) p.putBytes(w, r.imax) } @@ -194,9 +202,9 @@ x, err := binary.ReadUvarint(r) if err != nil { if err == io.ErrUnexpectedEOF || (mayEOF == false && err == io.EOF) { - p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "short read"}) + p.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrManifestCorrupted{field, "short read"}) } else if strings.HasPrefix(err.Error(), "binary:") { - p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, err.Error()}) + p.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrManifestCorrupted{field, err.Error()}) } else { p.err = err } @@ -209,6 +217,14 @@ return p.readUvarintMayEOF(field, r, false) } +func (p *sessionRecord) readVarint(field string, r io.ByteReader) int64 { + x := int64(p.readUvarintMayEOF(field, r, false)) + if x < 0 { + p.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrManifestCorrupted{field, "invalid negative value"}) + } + return x +} + func (p *sessionRecord) readBytes(field string, r byteReader) []byte { if p.err != nil { return nil @@ -221,14 +237,14 @@ _, p.err = io.ReadFull(r, x) if p.err != nil { if p.err == io.ErrUnexpectedEOF { - p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "short read"}) + p.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrManifestCorrupted{field, "short read"}) } return nil } return x } -func (p *sessionRecord) readLevel(field string, r io.ByteReader, numLevel int) int { +func (p *sessionRecord) readLevel(field string, r io.ByteReader) int { if p.err != nil { return 0 } @@ -236,14 +252,10 @@ if p.err != nil { return 0 } - if x >= uint64(numLevel) { - p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "invalid level number"}) - return 0 - } return int(x) } -func (p *sessionRecord) decode(r io.Reader, numLevel int) error { +func (p *sessionRecord) decode(r io.Reader) error { br, ok := r.(byteReader) if !ok { br = bufio.NewReader(r) @@ -264,17 +276,17 @@ p.setComparer(string(x)) } case recJournalNum: - x := p.readUvarint("journal-num", br) + x := p.readVarint("journal-num", br) if p.err == nil { p.setJournalNum(x) } case recPrevJournalNum: - x := p.readUvarint("prev-journal-num", br) + x := p.readVarint("prev-journal-num", br) if p.err == nil { p.setPrevJournalNum(x) } case recNextFileNum: - x := p.readUvarint("next-file-num", br) + x := p.readVarint("next-file-num", br) if p.err == nil { p.setNextFileNum(x) } @@ -284,23 +296,23 @@ p.setSeqNum(x) } case recCompPtr: - level := p.readLevel("comp-ptr.level", br, numLevel) + level := p.readLevel("comp-ptr.level", br) ikey := p.readBytes("comp-ptr.ikey", br) if p.err == nil { - p.addCompPtr(level, iKey(ikey)) + p.addCompPtr(level, internalKey(ikey)) } case recAddTable: - level := p.readLevel("add-table.level", br, numLevel) - num := p.readUvarint("add-table.num", br) - size := p.readUvarint("add-table.size", br) + level := p.readLevel("add-table.level", br) + num := p.readVarint("add-table.num", br) + size := p.readVarint("add-table.size", br) imin := p.readBytes("add-table.imin", br) imax := p.readBytes("add-table.imax", br) if p.err == nil { p.addTable(level, num, size, imin, imax) } case recDelTable: - level := p.readLevel("del-table.level", br, numLevel) - num := p.readUvarint("del-table.num", br) + level := p.readLevel("del-table.level", br) + num := p.readVarint("del-table.num", br) if p.err == nil { p.delTable(level, num) } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -9,8 +9,6 @@ import ( "bytes" "testing" - - "github.com/syndtr/goleveldb/leveldb/opt" ) func decodeEncode(v *sessionRecord) (res bool, err error) { @@ -20,7 +18,7 @@ return } v2 := &sessionRecord{} - err = v.decode(b, opt.DefaultNumLevel) + err = v.decode(b) if err != nil { return } @@ -33,9 +31,9 @@ } func TestSessionRecord_EncodeDecode(t *testing.T) { - big := uint64(1) << 50 + big := int64(1) << 50 v := &sessionRecord{} - i := uint64(0) + i := int64(0) test := func() { res, err := decodeEncode(v) if err != nil { @@ -49,16 +47,16 @@ for ; i < 4; i++ { test() v.addTable(3, big+300+i, big+400+i, - newIkey([]byte("foo"), big+500+1, ktVal), - newIkey([]byte("zoo"), big+600+1, ktDel)) + makeInternalKey(nil, []byte("foo"), uint64(big+500+1), keyTypeVal), + makeInternalKey(nil, []byte("zoo"), uint64(big+600+1), keyTypeDel)) v.delTable(4, big+700+i) - v.addCompPtr(int(i), newIkey([]byte("x"), big+900+1, ktVal)) + v.addCompPtr(int(i), makeInternalKey(nil, []byte("x"), uint64(big+900+1), keyTypeVal)) } v.setComparer("foo") v.setJournalNum(big + 100) v.setPrevJournalNum(big + 99) v.setNextFileNum(big + 200) - v.setSeqNum(big + 1000) + v.setSeqNum(uint64(big + 1000)) test() } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/session_util.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/session_util.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/session_util.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/session_util.go 2016-05-24 07:05:22.000000000 +0000 @@ -17,15 +17,15 @@ // Logging. type dropper struct { - s *session - file storage.File + s *session + fd storage.FileDesc } func (d dropper) Drop(err error) { if e, ok := err.(*journal.ErrCorrupted); ok { - d.s.logf("journal@drop %s-%d S·%s %q", d.file.Type(), d.file.Num(), shortenb(e.Size), e.Reason) + d.s.logf("journal@drop %s-%d S·%s %q", d.fd.Type, d.fd.Num, shortenb(e.Size), e.Reason) } else { - d.s.logf("journal@drop %s-%d %q", d.file.Type(), d.file.Num(), err) + d.s.logf("journal@drop %s-%d %q", d.fd.Type, d.fd.Num, err) } } @@ -34,25 +34,9 @@ // File utils. -func (s *session) getJournalFile(num uint64) storage.File { - return s.stor.GetFile(num, storage.TypeJournal) -} - -func (s *session) getTableFile(num uint64) storage.File { - return s.stor.GetFile(num, storage.TypeTable) -} - -func (s *session) getFiles(t storage.FileType) ([]storage.File, error) { - return s.stor.GetFiles(t) -} - -func (s *session) newTemp() storage.File { - num := atomic.AddUint64(&s.stTempFileNum, 1) - 1 - return s.stor.GetFile(num, storage.TypeTemp) -} - -func (s *session) tableFileFromRecord(r atRecord) *tFile { - return newTableFile(s.getTableFile(r.num), r.size, r.imin, r.imax) +func (s *session) newTemp() storage.FileDesc { + num := atomic.AddInt64(&s.stTempFileNum, 1) - 1 + return storage.FileDesc{storage.TypeTemp, num} } // Session state. @@ -80,47 +64,65 @@ } // Get current unused file number. -func (s *session) nextFileNum() uint64 { - return atomic.LoadUint64(&s.stNextFileNum) +func (s *session) nextFileNum() int64 { + return atomic.LoadInt64(&s.stNextFileNum) } // Set current unused file number to num. -func (s *session) setNextFileNum(num uint64) { - atomic.StoreUint64(&s.stNextFileNum, num) +func (s *session) setNextFileNum(num int64) { + atomic.StoreInt64(&s.stNextFileNum, num) } // Mark file number as used. -func (s *session) markFileNum(num uint64) { +func (s *session) markFileNum(num int64) { nextFileNum := num + 1 for { old, x := s.stNextFileNum, nextFileNum if old > x { x = old } - if atomic.CompareAndSwapUint64(&s.stNextFileNum, old, x) { + if atomic.CompareAndSwapInt64(&s.stNextFileNum, old, x) { break } } } // Allocate a file number. -func (s *session) allocFileNum() uint64 { - return atomic.AddUint64(&s.stNextFileNum, 1) - 1 +func (s *session) allocFileNum() int64 { + return atomic.AddInt64(&s.stNextFileNum, 1) - 1 } // Reuse given file number. -func (s *session) reuseFileNum(num uint64) { +func (s *session) reuseFileNum(num int64) { for { old, x := s.stNextFileNum, num if old != x+1 { x = old } - if atomic.CompareAndSwapUint64(&s.stNextFileNum, old, x) { + if atomic.CompareAndSwapInt64(&s.stNextFileNum, old, x) { break } } } +// Set compaction ptr at given level; need external synchronization. +func (s *session) setCompPtr(level int, ik internalKey) { + if level >= len(s.stCompPtrs) { + newCompPtrs := make([]internalKey, level+1) + copy(newCompPtrs, s.stCompPtrs) + s.stCompPtrs = newCompPtrs + } + s.stCompPtrs[level] = append(internalKey{}, ik...) +} + +// Get compaction ptr at given level; need external synchronization. +func (s *session) getCompPtr(level int) internalKey { + if level >= len(s.stCompPtrs) { + return nil + } + return s.stCompPtrs[level] +} + // Manifest related utils. // Fill given session record obj with current states; need external @@ -149,29 +151,28 @@ // Mark if record has been committed, this will update session state; // need external synchronization. -func (s *session) recordCommited(r *sessionRecord) { - if r.has(recJournalNum) { - s.stJournalNum = r.journalNum +func (s *session) recordCommited(rec *sessionRecord) { + if rec.has(recJournalNum) { + s.stJournalNum = rec.journalNum } - if r.has(recPrevJournalNum) { - s.stPrevJournalNum = r.prevJournalNum + if rec.has(recPrevJournalNum) { + s.stPrevJournalNum = rec.prevJournalNum } - if r.has(recSeqNum) { - s.stSeqNum = r.seqNum + if rec.has(recSeqNum) { + s.stSeqNum = rec.seqNum } - for _, p := range r.compPtrs { - s.stCompPtrs[p.level] = iKey(p.ikey) + for _, r := range rec.compPtrs { + s.setCompPtr(r.level, internalKey(r.ikey)) } } // Create a new manifest file; need external synchronization. func (s *session) newManifest(rec *sessionRecord, v *version) (err error) { - num := s.allocFileNum() - file := s.stor.GetFile(num, storage.TypeManifest) - writer, err := file.Create() + fd := storage.FileDesc{storage.TypeManifest, s.allocFileNum()} + writer, err := s.stor.Create(fd) if err != nil { return } @@ -196,16 +197,16 @@ if s.manifestWriter != nil { s.manifestWriter.Close() } - if s.manifestFile != nil { - s.manifestFile.Remove() + if !s.manifestFd.Nil() { + s.stor.Remove(s.manifestFd) } - s.manifestFile = file + s.manifestFd = fd s.manifestWriter = writer s.manifest = jw } else { writer.Close() - file.Remove() - s.reuseFileNum(num) + s.stor.Remove(fd) + s.reuseFileNum(fd.Num) } }() @@ -221,7 +222,7 @@ if err != nil { return } - err = s.stor.SetManifest(file) + err = s.stor.SetMeta(fd) return } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go 2016-05-24 07:05:22.000000000 +0000 @@ -17,11 +17,12 @@ "strings" "sync" "time" - - "github.com/syndtr/goleveldb/leveldb/util" ) -var errFileOpen = errors.New("leveldb/storage: file still open") +var ( + errFileOpen = errors.New("leveldb/storage: file still open") + errReadOnly = errors.New("leveldb/storage: storage is read-only") +) type fileLock interface { release() error @@ -32,40 +33,52 @@ } func (lock *fileStorageLock) Release() { - fs := lock.fs - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.slock == lock { - fs.slock = nil + if lock.fs != nil { + lock.fs.mu.Lock() + defer lock.fs.mu.Unlock() + if lock.fs.slock == lock { + lock.fs.slock = nil + } } - return } +const logSizeThreshold = 1024 * 1024 // 1 MiB + // fileStorage is a file-system backed storage. type fileStorage struct { - path string + path string + readOnly bool - mu sync.Mutex - flock fileLock - slock *fileStorageLock - logw *os.File - buf []byte + mu sync.Mutex + flock fileLock + slock *fileStorageLock + logw *os.File + logSize int64 + buf []byte // Opened file counter; if open < 0 means closed. open int day int } // OpenFile returns a new filesytem-backed storage implementation with the given -// path. This also hold a file lock, so any subsequent attempt to open the same -// path will fail. +// path. This also acquire a file lock, so any subsequent attempt to open the +// same path will fail. // // The storage must be closed after use, by calling Close method. -func OpenFile(path string) (Storage, error) { - if err := os.MkdirAll(path, 0755); err != nil { +func OpenFile(path string, readOnly bool) (Storage, error) { + if fi, err := os.Stat(path); err == nil { + if !fi.IsDir() { + return nil, fmt.Errorf("leveldb/storage: open %s: not a directory", path) + } + } else if os.IsNotExist(err) && !readOnly { + if err := os.MkdirAll(path, 0755); err != nil { + return nil, err + } + } else { return nil, err } - flock, err := newFileLock(filepath.Join(path, "LOCK")) + flock, err := newFileLock(filepath.Join(path, "LOCK"), readOnly) if err != nil { return nil, err } @@ -76,23 +89,42 @@ } }() - rename(filepath.Join(path, "LOG"), filepath.Join(path, "LOG.old")) - logw, err := os.OpenFile(filepath.Join(path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644) - if err != nil { - return nil, err + var ( + logw *os.File + logSize int64 + ) + if !readOnly { + logw, err = os.OpenFile(filepath.Join(path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + return nil, err + } + logSize, err = logw.Seek(0, os.SEEK_END) + if err != nil { + logw.Close() + return nil, err + } } - fs := &fileStorage{path: path, flock: flock, logw: logw} + fs := &fileStorage{ + path: path, + readOnly: readOnly, + flock: flock, + logw: logw, + logSize: logSize, + } runtime.SetFinalizer(fs, (*fileStorage).Close) return fs, nil } -func (fs *fileStorage) Lock() (util.Releaser, error) { +func (fs *fileStorage) Lock() (Lock, error) { fs.mu.Lock() defer fs.mu.Unlock() if fs.open < 0 { return nil, ErrClosed } + if fs.readOnly { + return &fileStorageLock{}, nil + } if fs.slock != nil { return nil, ErrLocked } @@ -101,7 +133,7 @@ } func itoa(buf []byte, i int, wid int) []byte { - var u uint = uint(i) + u := uint(i) if u == 0 && wid <= 1 { return append(buf, '0') } @@ -126,6 +158,22 @@ } func (fs *fileStorage) doLog(t time.Time, str string) { + if fs.logSize > logSizeThreshold { + // Rotate log file. + fs.logw.Close() + fs.logw = nil + fs.logSize = 0 + rename(filepath.Join(fs.path, "LOG"), filepath.Join(fs.path, "LOG.old")) + } + if fs.logw == nil { + var err error + fs.logw, err = os.OpenFile(filepath.Join(fs.path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + return + } + // Force printDay on new log file. + fs.day = 0 + } fs.printDay(t) hour, min, sec := t.Clock() msec := t.Nanosecond() / 1e3 @@ -145,65 +193,71 @@ } func (fs *fileStorage) Log(str string) { - t := time.Now() - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return + if !fs.readOnly { + t := time.Now() + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return + } + fs.doLog(t, str) } - fs.doLog(t, str) } func (fs *fileStorage) log(str string) { - fs.doLog(time.Now(), str) + if !fs.readOnly { + fs.doLog(time.Now(), str) + } } -func (fs *fileStorage) GetFile(num uint64, t FileType) File { - return &file{fs: fs, num: num, t: t} -} +func (fs *fileStorage) SetMeta(fd FileDesc) (err error) { + if !FileDescOk(fd) { + return ErrInvalidFile + } + if fs.readOnly { + return errReadOnly + } -func (fs *fileStorage) GetFiles(t FileType) (ff []File, err error) { fs.mu.Lock() defer fs.mu.Unlock() if fs.open < 0 { - return nil, ErrClosed + return ErrClosed } - dir, err := os.Open(fs.path) + defer func() { + if err != nil { + fs.log(fmt.Sprintf("CURRENT: %v", err)) + } + }() + path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), fd.Num) + w, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { return } - fnn, err := dir.Readdirnames(0) - // Close the dir first before checking for Readdirnames error. - if err := dir.Close(); err != nil { - fs.log(fmt.Sprintf("close dir: %v", err)) + _, err = fmt.Fprintln(w, fsGenName(fd)) + // Close the file first. + if cerr := w.Close(); cerr != nil { + fs.log(fmt.Sprintf("close CURRENT.%d: %v", fd.Num, cerr)) } if err != nil { return } - f := &file{fs: fs} - for _, fn := range fnn { - if f.parse(fn) && (f.t&t) != 0 { - ff = append(ff, f) - f = &file{fs: fs} - } - } - return + return rename(path, filepath.Join(fs.path, "CURRENT")) } -func (fs *fileStorage) GetManifest() (f File, err error) { +func (fs *fileStorage) GetMeta() (fd FileDesc, err error) { fs.mu.Lock() defer fs.mu.Unlock() if fs.open < 0 { - return nil, ErrClosed + return FileDesc{}, ErrClosed } dir, err := os.Open(fs.path) if err != nil { return } - fnn, err := dir.Readdirnames(0) + names, err := dir.Readdirnames(0) // Close the dir first before checking for Readdirnames error. - if err := dir.Close(); err != nil { - fs.log(fmt.Sprintf("close dir: %v", err)) + if ce := dir.Close(); ce != nil { + fs.log(fmt.Sprintf("close dir: %v", ce)) } if err != nil { return @@ -212,58 +266,64 @@ var rem []string var pend bool var cerr error - for _, fn := range fnn { - if strings.HasPrefix(fn, "CURRENT") { - pend1 := len(fn) > 7 + for _, name := range names { + if strings.HasPrefix(name, "CURRENT") { + pend1 := len(name) > 7 + var pendNum int64 // Make sure it is valid name for a CURRENT file, otherwise skip it. if pend1 { - if fn[7] != '.' || len(fn) < 9 { - fs.log(fmt.Sprintf("skipping %s: invalid file name", fn)) + if name[7] != '.' || len(name) < 9 { + fs.log(fmt.Sprintf("skipping %s: invalid file name", name)) continue } - if _, e1 := strconv.ParseUint(fn[8:], 10, 0); e1 != nil { - fs.log(fmt.Sprintf("skipping %s: invalid file num: %v", fn, e1)) + var e1 error + if pendNum, e1 = strconv.ParseInt(name[8:], 10, 0); e1 != nil { + fs.log(fmt.Sprintf("skipping %s: invalid file num: %v", name, e1)) continue } } - path := filepath.Join(fs.path, fn) + path := filepath.Join(fs.path, name) r, e1 := os.OpenFile(path, os.O_RDONLY, 0) if e1 != nil { - return nil, e1 + return FileDesc{}, e1 } b, e1 := ioutil.ReadAll(r) if e1 != nil { r.Close() - return nil, e1 + return FileDesc{}, e1 } - f1 := &file{fs: fs} - if len(b) < 1 || b[len(b)-1] != '\n' || !f1.parse(string(b[:len(b)-1])) { - fs.log(fmt.Sprintf("skipping %s: corrupted or incomplete", fn)) + var fd1 FileDesc + if len(b) < 1 || b[len(b)-1] != '\n' || !fsParseNamePtr(string(b[:len(b)-1]), &fd1) { + fs.log(fmt.Sprintf("skipping %s: corrupted or incomplete", name)) if pend1 { - rem = append(rem, fn) + rem = append(rem, name) } if !pend1 || cerr == nil { + metaFd, _ := fsParseName(name) cerr = &ErrCorrupted{ - File: fsParseName(filepath.Base(fn)), - Err: errors.New("leveldb/storage: corrupted or incomplete manifest file"), + Fd: metaFd, + Err: errors.New("leveldb/storage: corrupted or incomplete meta file"), } } - } else if f != nil && f1.Num() < f.Num() { - fs.log(fmt.Sprintf("skipping %s: obsolete", fn)) + } else if pend1 && pendNum != fd1.Num { + fs.log(fmt.Sprintf("skipping %s: inconsistent pending-file num: %d vs %d", name, pendNum, fd1.Num)) + rem = append(rem, name) + } else if fd1.Num < fd.Num { + fs.log(fmt.Sprintf("skipping %s: obsolete", name)) if pend1 { - rem = append(rem, fn) + rem = append(rem, name) } } else { - f = f1 + fd = fd1 pend = pend1 } if err := r.Close(); err != nil { - fs.log(fmt.Sprintf("close %s: %v", fn, err)) + fs.log(fmt.Sprintf("close %s: %v", name, err)) } } } // Don't remove any files if there is no valid CURRENT file. - if f == nil { + if fd.Nil() { if cerr != nil { err = cerr } else { @@ -271,273 +331,253 @@ } return } - // Rename pending CURRENT file to an effective CURRENT. - if pend { - path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), f.Num()) - if err := rename(path, filepath.Join(fs.path, "CURRENT")); err != nil { - fs.log(fmt.Sprintf("CURRENT.%d -> CURRENT: %v", f.Num(), err)) + if !fs.readOnly { + // Rename pending CURRENT file to an effective CURRENT. + if pend { + path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), fd.Num) + if err := rename(path, filepath.Join(fs.path, "CURRENT")); err != nil { + fs.log(fmt.Sprintf("CURRENT.%d -> CURRENT: %v", fd.Num, err)) + } } - } - // Remove obsolete or incomplete pending CURRENT files. - for _, fn := range rem { - path := filepath.Join(fs.path, fn) - if err := os.Remove(path); err != nil { - fs.log(fmt.Sprintf("remove %s: %v", fn, err)) + // Remove obsolete or incomplete pending CURRENT files. + for _, name := range rem { + path := filepath.Join(fs.path, name) + if err := os.Remove(path); err != nil { + fs.log(fmt.Sprintf("remove %s: %v", name, err)) + } } } return } -func (fs *fileStorage) SetManifest(f File) (err error) { +func (fs *fileStorage) List(ft FileType) (fds []FileDesc, err error) { fs.mu.Lock() defer fs.mu.Unlock() if fs.open < 0 { - return ErrClosed - } - f2, ok := f.(*file) - if !ok || f2.t != TypeManifest { - return ErrInvalidFile + return nil, ErrClosed } - defer func() { - if err != nil { - fs.log(fmt.Sprintf("CURRENT: %v", err)) - } - }() - path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), f2.Num()) - w, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + dir, err := os.Open(fs.path) if err != nil { - return err + return } - _, err = fmt.Fprintln(w, f2.name()) - // Close the file first. - if err := w.Close(); err != nil { - fs.log(fmt.Sprintf("close CURRENT.%d: %v", f2.num, err)) + names, err := dir.Readdirnames(0) + // Close the dir first before checking for Readdirnames error. + if cerr := dir.Close(); cerr != nil { + fs.log(fmt.Sprintf("close dir: %v", cerr)) } - if err != nil { - return err + if err == nil { + for _, name := range names { + if fd, ok := fsParseName(name); ok && fd.Type&ft != 0 { + fds = append(fds, fd) + } + } } - return rename(path, filepath.Join(fs.path, "CURRENT")) + return } -func (fs *fileStorage) Close() error { +func (fs *fileStorage) Open(fd FileDesc) (Reader, error) { + if !FileDescOk(fd) { + return nil, ErrInvalidFile + } + fs.mu.Lock() defer fs.mu.Unlock() if fs.open < 0 { - return ErrClosed - } - // Clear the finalizer. - runtime.SetFinalizer(fs, nil) - - if fs.open > 0 { - fs.log(fmt.Sprintf("close: warning, %d files still open", fs.open)) + return nil, ErrClosed } - fs.open = -1 - e1 := fs.logw.Close() - err := fs.flock.release() - if err == nil { - err = e1 + of, err := os.OpenFile(filepath.Join(fs.path, fsGenName(fd)), os.O_RDONLY, 0) + if err != nil { + if fsHasOldName(fd) && os.IsNotExist(err) { + of, err = os.OpenFile(filepath.Join(fs.path, fsGenOldName(fd)), os.O_RDONLY, 0) + if err == nil { + goto ok + } + } + return nil, err } - return err -} - -type fileWrap struct { - *os.File - f *file +ok: + fs.open++ + return &fileWrap{File: of, fs: fs, fd: fd}, nil } -func (fw fileWrap) Sync() error { - if err := fw.File.Sync(); err != nil { - return err +func (fs *fileStorage) Create(fd FileDesc) (Writer, error) { + if !FileDescOk(fd) { + return nil, ErrInvalidFile } - if fw.f.Type() == TypeManifest { - // Also sync parent directory if file type is manifest. - // See: https://code.google.com/p/leveldb/issues/detail?id=190. - if err := syncDir(fw.f.fs.path); err != nil { - return err - } + if fs.readOnly { + return nil, errReadOnly } - return nil -} -func (fw fileWrap) Close() error { - f := fw.f - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if !f.open { - return ErrClosed + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return nil, ErrClosed } - f.open = false - f.fs.open-- - err := fw.File.Close() + of, err := os.OpenFile(filepath.Join(fs.path, fsGenName(fd)), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { - f.fs.log(fmt.Sprintf("close %s.%d: %v", f.Type(), f.Num(), err)) + return nil, err } - return err + fs.open++ + return &fileWrap{File: of, fs: fs, fd: fd}, nil } -type file struct { - fs *fileStorage - num uint64 - t FileType - open bool -} - -func (f *file) Open() (Reader, error) { - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if f.fs.open < 0 { - return nil, ErrClosed +func (fs *fileStorage) Remove(fd FileDesc) error { + if !FileDescOk(fd) { + return ErrInvalidFile } - if f.open { - return nil, errFileOpen + if fs.readOnly { + return errReadOnly } - of, err := os.OpenFile(f.path(), os.O_RDONLY, 0) + + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return ErrClosed + } + err := os.Remove(filepath.Join(fs.path, fsGenName(fd))) if err != nil { - if f.hasOldName() && os.IsNotExist(err) { - of, err = os.OpenFile(f.oldPath(), os.O_RDONLY, 0) - if err == nil { - goto ok + if fsHasOldName(fd) && os.IsNotExist(err) { + if e1 := os.Remove(filepath.Join(fs.path, fsGenOldName(fd))); !os.IsNotExist(e1) { + fs.log(fmt.Sprintf("remove %s: %v (old name)", fd, err)) + err = e1 } + } else { + fs.log(fmt.Sprintf("remove %s: %v", fd, err)) } - return nil, err } -ok: - f.open = true - f.fs.open++ - return fileWrap{of, f}, nil + return err } -func (f *file) Create() (Writer, error) { - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if f.fs.open < 0 { - return nil, ErrClosed +func (fs *fileStorage) Rename(oldfd, newfd FileDesc) error { + if !FileDescOk(oldfd) || !FileDescOk(newfd) { + return ErrInvalidFile } - if f.open { - return nil, errFileOpen + if oldfd == newfd { + return nil } - of, err := os.OpenFile(f.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return nil, err + if fs.readOnly { + return errReadOnly } - f.open = true - f.fs.open++ - return fileWrap{of, f}, nil + + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return ErrClosed + } + return rename(filepath.Join(fs.path, fsGenName(oldfd)), filepath.Join(fs.path, fsGenName(newfd))) } -func (f *file) Replace(newfile File) error { - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if f.fs.open < 0 { +func (fs *fileStorage) Close() error { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { return ErrClosed } - newfile2, ok := newfile.(*file) - if !ok { - return ErrInvalidFile + // Clear the finalizer. + runtime.SetFinalizer(fs, nil) + + if fs.open > 0 { + fs.log(fmt.Sprintf("close: warning, %d files still open", fs.open)) } - if f.open || newfile2.open { - return errFileOpen + fs.open = -1 + if fs.logw != nil { + fs.logw.Close() } - return rename(newfile2.path(), f.path()) + return fs.flock.release() } -func (f *file) Type() FileType { - return f.t +type fileWrap struct { + *os.File + fs *fileStorage + fd FileDesc + closed bool } -func (f *file) Num() uint64 { - return f.num +func (fw *fileWrap) Sync() error { + if err := fw.File.Sync(); err != nil { + return err + } + if fw.fd.Type == TypeManifest { + // Also sync parent directory if file type is manifest. + // See: https://code.google.com/p/leveldb/issues/detail?id=190. + if err := syncDir(fw.fs.path); err != nil { + fw.fs.log(fmt.Sprintf("syncDir: %v", err)) + return err + } + } + return nil } -func (f *file) Remove() error { - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if f.fs.open < 0 { +func (fw *fileWrap) Close() error { + fw.fs.mu.Lock() + defer fw.fs.mu.Unlock() + if fw.closed { return ErrClosed } - if f.open { - return errFileOpen - } - err := os.Remove(f.path()) + fw.closed = true + fw.fs.open-- + err := fw.File.Close() if err != nil { - f.fs.log(fmt.Sprintf("remove %s.%d: %v", f.Type(), f.Num(), err)) - } - // Also try remove file with old name, just in case. - if f.hasOldName() { - if e1 := os.Remove(f.oldPath()); !os.IsNotExist(e1) { - f.fs.log(fmt.Sprintf("remove %s.%d: %v (old name)", f.Type(), f.Num(), err)) - err = e1 - } + fw.fs.log(fmt.Sprintf("close %s: %v", fw.fd, err)) } return err } -func (f *file) hasOldName() bool { - return f.t == TypeTable -} - -func (f *file) oldName() string { - switch f.t { - case TypeTable: - return fmt.Sprintf("%06d.sst", f.num) - } - return f.name() -} - -func (f *file) oldPath() string { - return filepath.Join(f.fs.path, f.oldName()) -} - -func (f *file) name() string { - switch f.t { +func fsGenName(fd FileDesc) string { + switch fd.Type { case TypeManifest: - return fmt.Sprintf("MANIFEST-%06d", f.num) + return fmt.Sprintf("MANIFEST-%06d", fd.Num) case TypeJournal: - return fmt.Sprintf("%06d.log", f.num) + return fmt.Sprintf("%06d.log", fd.Num) case TypeTable: - return fmt.Sprintf("%06d.ldb", f.num) + return fmt.Sprintf("%06d.ldb", fd.Num) case TypeTemp: - return fmt.Sprintf("%06d.tmp", f.num) + return fmt.Sprintf("%06d.tmp", fd.Num) default: panic("invalid file type") } } -func (f *file) path() string { - return filepath.Join(f.fs.path, f.name()) +func fsHasOldName(fd FileDesc) bool { + return fd.Type == TypeTable } -func fsParseName(name string) *FileInfo { - fi := &FileInfo{} +func fsGenOldName(fd FileDesc) string { + switch fd.Type { + case TypeTable: + return fmt.Sprintf("%06d.sst", fd.Num) + } + return fsGenName(fd) +} + +func fsParseName(name string) (fd FileDesc, ok bool) { var tail string - _, err := fmt.Sscanf(name, "%d.%s", &fi.Num, &tail) + _, err := fmt.Sscanf(name, "%d.%s", &fd.Num, &tail) if err == nil { switch tail { case "log": - fi.Type = TypeJournal + fd.Type = TypeJournal case "ldb", "sst": - fi.Type = TypeTable + fd.Type = TypeTable case "tmp": - fi.Type = TypeTemp + fd.Type = TypeTemp default: - return nil + return } - return fi + return fd, true } - n, _ := fmt.Sscanf(name, "MANIFEST-%d%s", &fi.Num, &tail) + n, _ := fmt.Sscanf(name, "MANIFEST-%d%s", &fd.Num, &tail) if n == 1 { - fi.Type = TypeManifest - return fi + fd.Type = TypeManifest + return fd, true } - return nil + return } -func (f *file) parse(name string) bool { - fi := fsParseName(name) - if fi == nil { - return false - } - f.t = fi.Type - f.num = fi.Num - return true +func fsParseNamePtr(name string, fd *FileDesc) bool { + _fd, ok := fsParseName(name) + if fd != nil { + *fd = _fd + } + return ok } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go 2016-05-24 07:05:22.000000000 +0000 @@ -19,8 +19,21 @@ return fl.f.Close() } -func newFileLock(path string) (fl fileLock, err error) { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0644) +func newFileLock(path string, readOnly bool) (fl fileLock, err error) { + var ( + flag int + perm os.FileMode + ) + if readOnly { + flag = os.O_RDONLY + } else { + flag = os.O_RDWR + perm = os.ModeExclusive + } + f, err := os.OpenFile(path, flag, perm) + if os.IsNotExist(err) { + f, err = os.OpenFile(path, flag|os.O_CREATE, perm|0644) + } if err != nil { return } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go 2016-05-24 07:05:22.000000000 +0000 @@ -18,18 +18,27 @@ } func (fl *unixFileLock) release() error { - if err := setFileLock(fl.f, false); err != nil { + if err := setFileLock(fl.f, false, false); err != nil { return err } return fl.f.Close() } -func newFileLock(path string) (fl fileLock, err error) { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) +func newFileLock(path string, readOnly bool) (fl fileLock, err error) { + var flag int + if readOnly { + flag = os.O_RDONLY + } else { + flag = os.O_RDWR + } + f, err := os.OpenFile(path, flag, 0) + if os.IsNotExist(err) { + f, err = os.OpenFile(path, flag|os.O_CREATE, 0644) + } if err != nil { return } - err = setFileLock(f, true) + err = setFileLock(f, readOnly, true) if err != nil { f.Close() return @@ -38,7 +47,7 @@ return } -func setFileLock(f *os.File, lock bool) error { +func setFileLock(f *os.File, readOnly, lock bool) error { flock := syscall.Flock_t{ Type: syscall.F_UNLCK, Start: 0, @@ -46,7 +55,11 @@ Whence: 1, } if lock { - flock.Type = syscall.F_WRLCK + if readOnly { + flock.Type = syscall.F_RDLCK + } else { + flock.Type = syscall.F_WRLCK + } } return syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &flock) } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -17,14 +17,14 @@ oldName []string name string ftype FileType - num uint64 + num int64 }{ {nil, "000100.log", TypeJournal, 100}, {nil, "000000.log", TypeJournal, 0}, {[]string{"000000.sst"}, "000000.ldb", TypeTable, 0}, {nil, "MANIFEST-000002", TypeManifest, 2}, {nil, "MANIFEST-000007", TypeManifest, 7}, - {nil, "18446744073709551615.log", TypeJournal, 18446744073709551615}, + {nil, "9223372036854775807.log", TypeJournal, 9223372036854775807}, {nil, "000100.tmp", TypeTemp, 100}, } @@ -55,9 +55,8 @@ func TestFileStorage_CreateFileName(t *testing.T) { for _, c := range cases { - f := &file{num: c.num, t: c.ftype} - if f.name() != c.name { - t.Errorf("invalid filename got '%s', want '%s'", f.name(), c.name) + if name := fsGenName(FileDesc{c.ftype, c.num}); name != c.name { + t.Errorf("invalid filename got '%s', want '%s'", name, c.name) } } } @@ -65,16 +64,16 @@ func TestFileStorage_ParseFileName(t *testing.T) { for _, c := range cases { for _, name := range append([]string{c.name}, c.oldName...) { - f := new(file) - if !f.parse(name) { + fd, ok := fsParseName(name) + if !ok { t.Errorf("cannot parse filename '%s'", name) continue } - if f.Type() != c.ftype { - t.Errorf("filename '%s' invalid type got '%d', want '%d'", name, f.Type(), c.ftype) + if fd.Type != c.ftype { + t.Errorf("filename '%s' invalid type got '%d', want '%d'", name, fd.Type, c.ftype) } - if f.Num() != c.num { - t.Errorf("filename '%s' invalid number got '%d', want '%d'", name, f.Num(), c.num) + if fd.Num != c.num { + t.Errorf("filename '%s' invalid number got '%d', want '%d'", name, fd.Num, c.num) } } } @@ -82,32 +81,25 @@ func TestFileStorage_InvalidFileName(t *testing.T) { for _, name := range invalidCases { - f := new(file) - if f.parse(name) { + if fsParseNamePtr(name, nil) { t.Errorf("filename '%s' should be invalid", name) } } } func TestFileStorage_Locking(t *testing.T) { - path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestfd-%d", os.Getuid())) - - _, err := os.Stat(path) - if err == nil { - err = os.RemoveAll(path) - if err != nil { - t.Fatal("RemoveAll: got error: ", err) - } + path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-testrwlock-%d", os.Getuid())) + if err := os.RemoveAll(path); err != nil && !os.IsNotExist(err) { + t.Fatal("RemoveAll: got error: ", err) } + defer os.RemoveAll(path) - p1, err := OpenFile(path) + p1, err := OpenFile(path, false) if err != nil { t.Fatal("OpenFile(1): got error: ", err) } - defer os.RemoveAll(path) - - p2, err := OpenFile(path) + p2, err := OpenFile(path, false) if err != nil { t.Logf("OpenFile(2): got error: %s (expected)", err) } else { @@ -118,7 +110,7 @@ p1.Close() - p3, err := OpenFile(path) + p3, err := OpenFile(path, false) if err != nil { t.Fatal("OpenFile(3): got error: ", err) } @@ -140,3 +132,45 @@ t.Fatal("storage lock failed(2): ", err) } } + +func TestFileStorage_ReadOnlyLocking(t *testing.T) { + path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-testrolock-%d", os.Getuid())) + if err := os.RemoveAll(path); err != nil && !os.IsNotExist(err) { + t.Fatal("RemoveAll: got error: ", err) + } + defer os.RemoveAll(path) + + p1, err := OpenFile(path, false) + if err != nil { + t.Fatal("OpenFile(1): got error: ", err) + } + + _, err = OpenFile(path, true) + if err != nil { + t.Logf("OpenFile(2): got error: %s (expected)", err) + } else { + t.Fatal("OpenFile(2): expect error") + } + + p1.Close() + + p3, err := OpenFile(path, true) + if err != nil { + t.Fatal("OpenFile(3): got error: ", err) + } + + p4, err := OpenFile(path, true) + if err != nil { + t.Fatal("OpenFile(4): got error: ", err) + } + + _, err = OpenFile(path, false) + if err != nil { + t.Logf("OpenFile(5): got error: %s (expected)", err) + } else { + t.Fatal("OpenFile(2): expect error") + } + + p3.Close() + p4.Close() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go 2016-05-24 07:05:22.000000000 +0000 @@ -18,18 +18,27 @@ } func (fl *unixFileLock) release() error { - if err := setFileLock(fl.f, false); err != nil { + if err := setFileLock(fl.f, false, false); err != nil { return err } return fl.f.Close() } -func newFileLock(path string) (fl fileLock, err error) { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) +func newFileLock(path string, readOnly bool) (fl fileLock, err error) { + var flag int + if readOnly { + flag = os.O_RDONLY + } else { + flag = os.O_RDWR + } + f, err := os.OpenFile(path, flag, 0) + if os.IsNotExist(err) { + f, err = os.OpenFile(path, flag|os.O_CREATE, 0644) + } if err != nil { return } - err = setFileLock(f, true) + err = setFileLock(f, readOnly, true) if err != nil { f.Close() return @@ -38,10 +47,14 @@ return } -func setFileLock(f *os.File, lock bool) error { +func setFileLock(f *os.File, readOnly, lock bool) error { how := syscall.LOCK_UN if lock { - how = syscall.LOCK_EX + if readOnly { + how = syscall.LOCK_SH + } else { + how = syscall.LOCK_EX + } } return syscall.Flock(int(f.Fd()), how|syscall.LOCK_NB) } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go 2016-05-24 07:05:22.000000000 +0000 @@ -29,12 +29,22 @@ return syscall.Close(fl.fd) } -func newFileLock(path string) (fl fileLock, err error) { +func newFileLock(path string, readOnly bool) (fl fileLock, err error) { pathp, err := syscall.UTF16PtrFromString(path) if err != nil { return } - fd, err := syscall.CreateFile(pathp, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.CREATE_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0) + var access, shareMode uint32 + if readOnly { + access = syscall.GENERIC_READ + shareMode = syscall.FILE_SHARE_READ + } else { + access = syscall.GENERIC_READ | syscall.GENERIC_WRITE + } + fd, err := syscall.CreateFile(pathp, access, shareMode, nil, syscall.OPEN_EXISTING, syscall.FILE_ATTRIBUTE_NORMAL, 0) + if err == syscall.ERROR_FILE_NOT_FOUND { + fd, err = syscall.CreateFile(pathp, access, shareMode, nil, syscall.OPEN_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0) + } if err != nil { return } @@ -47,9 +57,8 @@ if r1 == 0 { if e1 != 0 { return error(e1) - } else { - return syscall.EINVAL } + return syscall.EINVAL } return nil } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go 2016-05-24 07:05:22.000000000 +0000 @@ -10,8 +10,6 @@ "bytes" "os" "sync" - - "github.com/syndtr/goleveldb/leveldb/util" ) const typeShift = 3 @@ -32,10 +30,10 @@ // memStorage is a memory-backed storage. type memStorage struct { - mu sync.Mutex - slock *memStorageLock - files map[uint64]*memFile - manifest *memFilePtr + mu sync.Mutex + slock *memStorageLock + files map[uint64]*memFile + meta FileDesc } // NewMemStorage returns a new memory-backed storage implementation. @@ -45,7 +43,7 @@ } } -func (ms *memStorage) Lock() (util.Releaser, error) { +func (ms *memStorage) Lock() (Lock, error) { ms.mu.Lock() defer ms.mu.Unlock() if ms.slock != nil { @@ -57,147 +55,164 @@ func (*memStorage) Log(str string) {} -func (ms *memStorage) GetFile(num uint64, t FileType) File { - return &memFilePtr{ms: ms, num: num, t: t} -} +func (ms *memStorage) SetMeta(fd FileDesc) error { + if !FileDescOk(fd) { + return ErrInvalidFile + } -func (ms *memStorage) GetFiles(t FileType) ([]File, error) { ms.mu.Lock() - var ff []File - for x, _ := range ms.files { - num, mt := x>>typeShift, FileType(x)&TypeAll - if mt&t == 0 { - continue - } - ff = append(ff, &memFilePtr{ms: ms, num: num, t: mt}) - } + ms.meta = fd ms.mu.Unlock() - return ff, nil + return nil } -func (ms *memStorage) GetManifest() (File, error) { +func (ms *memStorage) GetMeta() (FileDesc, error) { ms.mu.Lock() defer ms.mu.Unlock() - if ms.manifest == nil { - return nil, os.ErrNotExist + if ms.meta.Nil() { + return FileDesc{}, os.ErrNotExist } - return ms.manifest, nil + return ms.meta, nil } -func (ms *memStorage) SetManifest(f File) error { - fm, ok := f.(*memFilePtr) - if !ok || fm.t != TypeManifest { - return ErrInvalidFile - } +func (ms *memStorage) List(ft FileType) ([]FileDesc, error) { ms.mu.Lock() - ms.manifest = fm + var fds []FileDesc + for x, _ := range ms.files { + fd := unpackFile(x) + if fd.Type&ft != 0 { + fds = append(fds, fd) + } + } ms.mu.Unlock() - return nil + return fds, nil } -func (*memStorage) Close() error { return nil } - -type memReader struct { - *bytes.Reader - m *memFile -} - -func (mr *memReader) Close() error { - return mr.m.Close() -} - -type memFile struct { - bytes.Buffer - ms *memStorage - open bool -} - -func (*memFile) Sync() error { return nil } -func (m *memFile) Close() error { - m.ms.mu.Lock() - m.open = false - m.ms.mu.Unlock() - return nil -} - -type memFilePtr struct { - ms *memStorage - num uint64 - t FileType -} - -func (p *memFilePtr) x() uint64 { - return p.Num()<> typeShift)} } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -30,18 +30,17 @@ t.Fatal("storage lock failed(2): ", err) } - f := m.GetFile(1, TypeTable) - if f.Num() != 1 && f.Type() != TypeTable { - t.Fatal("invalid file number and type") + w, err := m.Create(FileDesc{TypeTable, 1}) + if err != nil { + t.Fatal("Storage.Create: ", err) } - w, _ := f.Create() w.Write([]byte("abc")) w.Close() - if ff, _ := m.GetFiles(TypeAll); len(ff) != 1 { + if fds, _ := m.List(TypeAll); len(fds) != 1 { t.Fatal("invalid GetFiles len") } buf := new(bytes.Buffer) - r, err := f.Open() + r, err := m.Open(FileDesc{TypeTable, 1}) if err != nil { t.Fatal("Open: got error: ", err) } @@ -50,17 +49,17 @@ if got := buf.String(); got != "abc" { t.Fatalf("Read: invalid value, want=abc got=%s", got) } - if _, err := f.Open(); err != nil { + if _, err := m.Open(FileDesc{TypeTable, 1}); err != nil { t.Fatal("Open: got error: ", err) } - if _, err := m.GetFile(1, TypeTable).Open(); err == nil { + if _, err := m.Open(FileDesc{TypeTable, 1}); err == nil { t.Fatal("expecting error") } - f.Remove() - if ff, _ := m.GetFiles(TypeAll); len(ff) != 0 { - t.Fatal("invalid GetFiles len", len(ff)) + m.Remove(FileDesc{TypeTable, 1}) + if fds, _ := m.List(TypeAll); len(fds) != 0 { + t.Fatal("invalid GetFiles len", len(fds)) } - if _, err := f.Open(); err == nil { + if _, err := m.Open(FileDesc{TypeTable, 1}); err == nil { t.Fatal("expecting error") } } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go 2016-05-24 07:05:22.000000000 +0000 @@ -15,7 +15,7 @@ "github.com/syndtr/goleveldb/leveldb/util" ) -type FileType uint32 +type FileType int const ( TypeManifest FileType = 1 << iota @@ -50,13 +50,13 @@ // a file. Package storage has its own type instead of using // errors.ErrCorrupted to prevent circular import. type ErrCorrupted struct { - File *FileInfo - Err error + Fd FileDesc + Err error } func (e *ErrCorrupted) Error() string { - if e.File != nil { - return fmt.Sprintf("%v [file=%v]", e.Err, e.File) + if !e.Fd.Nil() { + return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd) } else { return e.Err.Error() } @@ -83,31 +83,47 @@ Syncer } -// File is the file. A file instance must be goroutine-safe. -type File interface { - // Open opens the file for read. Returns os.ErrNotExist error - // if the file does not exist. - // Returns ErrClosed if the underlying storage is closed. - Open() (r Reader, err error) - - // Create creates the file for writting. Truncate the file if - // already exist. - // Returns ErrClosed if the underlying storage is closed. - Create() (w Writer, err error) +type Lock interface { + util.Releaser +} - // Replace replaces file with newfile. - // Returns ErrClosed if the underlying storage is closed. - Replace(newfile File) error +// FileDesc is a file descriptor. +type FileDesc struct { + Type FileType + Num int64 +} - // Type returns the file type - Type() FileType +func (fd FileDesc) String() string { + switch fd.Type { + case TypeManifest: + return fmt.Sprintf("MANIFEST-%06d", fd.Num) + case TypeJournal: + return fmt.Sprintf("%06d.log", fd.Num) + case TypeTable: + return fmt.Sprintf("%06d.ldb", fd.Num) + case TypeTemp: + return fmt.Sprintf("%06d.tmp", fd.Num) + default: + return fmt.Sprintf("%#x-%d", fd.Type, fd.Num) + } +} - // Num returns the file number. - Num() uint64 +// Nil returns true if fd == (FileDesc{}). +func (fd FileDesc) Nil() bool { + return fd == (FileDesc{}) +} - // Remove removes the file. - // Returns ErrClosed if the underlying storage is closed. - Remove() error +// FileDescOk returns true if fd is a valid file descriptor. +func FileDescOk(fd FileDesc) bool { + switch fd.Type { + case TypeManifest: + case TypeJournal: + case TypeTable: + case TypeTemp: + default: + return false + } + return fd.Num >= 0 } // Storage is the storage. A storage instance must be goroutine-safe. @@ -115,59 +131,47 @@ // Lock locks the storage. Any subsequent attempt to call Lock will fail // until the last lock released. // After use the caller should call the Release method. - Lock() (l util.Releaser, err error) + Lock() (Lock, error) - // Log logs a string. This is used for logging. An implementation - // may write to a file, stdout or simply do nothing. + // Log logs a string. This is used for logging. + // An implementation may write to a file, stdout or simply do nothing. Log(str string) - // GetFile returns a file for the given number and type. GetFile will never - // returns nil, even if the underlying storage is closed. - GetFile(num uint64, t FileType) File + // SetMeta sets to point to the given fd, which then can be acquired using + // GetMeta method. + // SetMeta should be implemented in such way that changes should happened + // atomically. + SetMeta(fd FileDesc) error + + // GetManifest returns a manifest file. + // Returns os.ErrNotExist if meta doesn't point to any fd, or point to fd + // that doesn't exist. + GetMeta() (FileDesc, error) - // GetFiles returns a slice of files that match the given file types. + // List returns fds that match the given file types. // The file types may be OR'ed together. - GetFiles(t FileType) ([]File, error) + List(ft FileType) ([]FileDesc, error) - // GetManifest returns a manifest file. Returns os.ErrNotExist if manifest - // file does not exist. - GetManifest() (File, error) - - // SetManifest sets the given file as manifest file. The given file should - // be a manifest file type or error will be returned. - SetManifest(f File) error + // Open opens file with the given fd read-only. + // Returns os.ErrNotExist error if the file does not exist. + // Returns ErrClosed if the underlying storage is closed. + Open(fd FileDesc) (Reader, error) - // Close closes the storage. It is valid to call Close multiple times. - // Other methods should not be called after the storage has been closed. - Close() error -} + // Create creates file with the given fd, truncate if already exist and + // opens write-only. + // Returns ErrClosed if the underlying storage is closed. + Create(fd FileDesc) (Writer, error) -// FileInfo wraps basic file info. -type FileInfo struct { - Type FileType - Num uint64 -} + // Remove removes file with the given fd. + // Returns ErrClosed if the underlying storage is closed. + Remove(fd FileDesc) error -func (fi FileInfo) String() string { - switch fi.Type { - case TypeManifest: - return fmt.Sprintf("MANIFEST-%06d", fi.Num) - case TypeJournal: - return fmt.Sprintf("%06d.log", fi.Num) - case TypeTable: - return fmt.Sprintf("%06d.ldb", fi.Num) - case TypeTemp: - return fmt.Sprintf("%06d.tmp", fi.Num) - default: - return fmt.Sprintf("%#x-%d", fi.Type, fi.Num) - } -} + // Rename renames file from oldfd to newfd. + // Returns ErrClosed if the underlying storage is closed. + Rename(oldfd, newfd FileDesc) error -// NewFileInfo creates new FileInfo from the given File. It will returns nil -// if File is nil. -func NewFileInfo(f File) *FileInfo { - if f == nil { - return nil - } - return &FileInfo{f.Type(), f.Num()} + // Close closes the storage. + // It is valid to call Close multiple times. Other methods should not be + // called after the storage has been closed. + Close() error } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/storage_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,549 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENE file. - -package leveldb - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "math/rand" - "os" - "path/filepath" - "sync" - "testing" - - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" -) - -const typeShift = 4 - -var ( - tsErrInvalidFile = errors.New("leveldb.testStorage: invalid file for argument") - tsErrFileOpen = errors.New("leveldb.testStorage: file still open") -) - -var ( - tsFSEnv = os.Getenv("GOLEVELDB_USEFS") - tsTempdir = os.Getenv("GOLEVELDB_TEMPDIR") - tsKeepFS = tsFSEnv == "2" - tsFS = tsKeepFS || tsFSEnv == "" || tsFSEnv == "1" - tsMU = &sync.Mutex{} - tsNum = 0 -) - -type tsOp uint - -const ( - tsOpOpen tsOp = iota - tsOpCreate - tsOpReplace - tsOpRemove - tsOpRead - tsOpReadAt - tsOpWrite - tsOpSync - - tsOpNum -) - -type tsLock struct { - ts *testStorage - r util.Releaser -} - -func (l tsLock) Release() { - l.r.Release() - l.ts.t.Log("I: storage lock released") -} - -type tsReader struct { - tf tsFile - storage.Reader -} - -func (tr tsReader) Read(b []byte) (n int, err error) { - ts := tr.tf.ts - ts.countRead(tr.tf.Type()) - if tr.tf.shouldErrLocked(tsOpRead) { - return 0, errors.New("leveldb.testStorage: emulated read error") - } - n, err = tr.Reader.Read(b) - if err != nil && err != io.EOF { - ts.t.Errorf("E: read error, num=%d type=%v n=%d: %v", tr.tf.Num(), tr.tf.Type(), n, err) - } - return -} - -func (tr tsReader) ReadAt(b []byte, off int64) (n int, err error) { - ts := tr.tf.ts - ts.countRead(tr.tf.Type()) - if tr.tf.shouldErrLocked(tsOpReadAt) { - return 0, errors.New("leveldb.testStorage: emulated readAt error") - } - n, err = tr.Reader.ReadAt(b, off) - if err != nil && err != io.EOF { - ts.t.Errorf("E: readAt error, num=%d type=%v off=%d n=%d: %v", tr.tf.Num(), tr.tf.Type(), off, n, err) - } - return -} - -func (tr tsReader) Close() (err error) { - err = tr.Reader.Close() - tr.tf.close("reader", err) - return -} - -type tsWriter struct { - tf tsFile - storage.Writer -} - -func (tw tsWriter) Write(b []byte) (n int, err error) { - if tw.tf.shouldErrLocked(tsOpWrite) { - return 0, errors.New("leveldb.testStorage: emulated write error") - } - n, err = tw.Writer.Write(b) - if err != nil { - tw.tf.ts.t.Errorf("E: write error, num=%d type=%v n=%d: %v", tw.tf.Num(), tw.tf.Type(), n, err) - } - return -} - -func (tw tsWriter) Sync() (err error) { - ts := tw.tf.ts - ts.mu.Lock() - for ts.emuDelaySync&tw.tf.Type() != 0 { - ts.cond.Wait() - } - ts.mu.Unlock() - if tw.tf.shouldErrLocked(tsOpSync) { - return errors.New("leveldb.testStorage: emulated sync error") - } - err = tw.Writer.Sync() - if err != nil { - tw.tf.ts.t.Errorf("E: sync error, num=%d type=%v: %v", tw.tf.Num(), tw.tf.Type(), err) - } - return -} - -func (tw tsWriter) Close() (err error) { - err = tw.Writer.Close() - tw.tf.close("writer", err) - return -} - -type tsFile struct { - ts *testStorage - storage.File -} - -func (tf tsFile) x() uint64 { - return tf.Num()<>typeShift, storage.FileType(x)&storage.TypeAll - ts.t.Errorf("E: * num=%d type=%v writer=%v", num, tt, writer) - } - } - ts.mu.Unlock() -} - -func newTestStorage(t *testing.T) *testStorage { - var stor storage.Storage - var closeFn func() error - if tsFS { - for { - tsMU.Lock() - num := tsNum - tsNum++ - tsMU.Unlock() - tempdir := tsTempdir - if tempdir == "" { - tempdir = os.TempDir() - } - path := filepath.Join(tempdir, fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num)) - if _, err := os.Stat(path); err != nil { - stor, err = storage.OpenFile(path) - if err != nil { - t.Fatalf("F: cannot create storage: %v", err) - } - t.Logf("I: storage created: %s", path) - closeFn = func() error { - for _, name := range []string{"LOG.old", "LOG"} { - f, err := os.Open(filepath.Join(path, name)) - if err != nil { - continue - } - if log, err := ioutil.ReadAll(f); err != nil { - t.Logf("---------------------- %s ----------------------", name) - t.Logf("cannot read log: %v", err) - t.Logf("---------------------- %s ----------------------", name) - } else if len(log) > 0 { - t.Logf("---------------------- %s ----------------------\n%s", name, string(log)) - t.Logf("---------------------- %s ----------------------", name) - } - f.Close() - } - if t.Failed() { - t.Logf("testing failed, test DB preserved at %s", path) - return nil - } - if tsKeepFS { - return nil - } - return os.RemoveAll(path) - } - - break - } - } - } else { - stor = storage.NewMemStorage() - } - ts := &testStorage{ - t: t, - Storage: stor, - closeFn: closeFn, - opens: make(map[uint64]bool), - emuErrOnceMap: make(map[uint64]uint), - emuRandErrProb: 0x999, - emuRandRand: rand.New(rand.NewSource(0xfacedead)), - } - ts.cond.L = &ts.mu - return ts -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/table/reader.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/table/reader.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/table/reader.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/table/reader.go 2016-05-24 07:05:22.000000000 +0000 @@ -507,9 +507,9 @@ // Reader is a table reader. type Reader struct { mu sync.RWMutex - fi *storage.FileInfo + fd storage.FileDesc reader io.ReaderAt - cache *cache.CacheGetter + cache *cache.NamespaceGetter err error bpool *util.BufferPool // Options @@ -539,7 +539,7 @@ } func (r *Reader) newErrCorrupted(pos, size int64, kind, reason string) error { - return &errors.ErrCorrupted{File: r.fi, Err: &ErrCorrupted{Pos: pos, Size: size, Kind: kind, Reason: reason}} + return &errors.ErrCorrupted{Fd: r.fd, Err: &ErrCorrupted{Pos: pos, Size: size, Kind: kind, Reason: reason}} } func (r *Reader) newErrCorruptedBH(bh blockHandle, reason string) error { @@ -551,7 +551,7 @@ cerr.Pos = int64(bh.offset) cerr.Size = int64(bh.length) cerr.Kind = r.blockKind(bh) - return &errors.ErrCorrupted{File: r.fi, Err: cerr} + return &errors.ErrCorrupted{Fd: r.fd, Err: cerr} } return err } @@ -988,13 +988,13 @@ // The fi, cache and bpool is optional and can be nil. // // The returned table reader instance is goroutine-safe. -func NewReader(f io.ReaderAt, size int64, fi *storage.FileInfo, cache *cache.CacheGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) { +func NewReader(f io.ReaderAt, size int64, fd storage.FileDesc, cache *cache.NamespaceGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) { if f == nil { return nil, errors.New("leveldb/table: nil file") } r := &Reader{ - fi: fi, + fd: fd, reader: f, cache: cache, bpool: bpool, diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -14,6 +14,7 @@ "github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" "github.com/syndtr/goleveldb/leveldb/testutil" "github.com/syndtr/goleveldb/leveldb/util" ) @@ -59,7 +60,7 @@ It("Should be able to approximate offset of a key correctly", func() { Expect(err).ShouldNot(HaveOccurred()) - tr, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, nil, o) + tr, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), storage.FileDesc{}, nil, nil, o) Expect(err).ShouldNot(HaveOccurred()) CheckOffset := func(key string, expect, threshold int) { offset, err := tr.OffsetOf([]byte(key)) @@ -96,7 +97,7 @@ tw.Close() // Opening the table. - tr, _ := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, nil, o) + tr, _ := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), storage.FileDesc{}, nil, nil, o) return tableWrapper{tr} } Test := func(kv *testutil.KeyValue, body func(r *Reader)) func() { diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/table.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/table.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/table.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/table.go 2016-05-24 07:05:22.000000000 +0000 @@ -21,10 +21,10 @@ // tFile holds basic information about a table. type tFile struct { - file storage.File + fd storage.FileDesc seekLeft int32 - size uint64 - imin, imax iKey + size int64 + imin, imax internalKey } // Returns true if given key is after largest key of this table. @@ -48,9 +48,9 @@ } // Creates new tFile. -func newTableFile(file storage.File, size uint64, imin, imax iKey) *tFile { +func newTableFile(fd storage.FileDesc, size int64, imin, imax internalKey) *tFile { f := &tFile{ - file: file, + fd: fd, size: size, imin: imin, imax: imax, @@ -77,6 +77,10 @@ return f } +func tableFileFromRecord(r atRecord) *tFile { + return newTableFile(storage.FileDesc{storage.TypeTable, r.num}, r.size, r.imin, r.imax) +} + // tFiles hold multiple tFile. type tFiles []*tFile @@ -89,7 +93,7 @@ if i != 0 { x += ", " } - x += fmt.Sprint(f.file.Num()) + x += fmt.Sprint(f.fd.Num) } x += " ]" return x @@ -101,7 +105,7 @@ a, b := tf[i], tf[j] n := icmp.Compare(a.imin, b.imin) if n == 0 { - return a.file.Num() < b.file.Num() + return a.fd.Num < b.fd.Num } return n < 0 } @@ -109,7 +113,7 @@ // Returns true if i file number is greater than j. // This used for sort by file number in descending order. func (tf tFiles) lessByNum(i, j int) bool { - return tf[i].file.Num() > tf[j].file.Num() + return tf[i].fd.Num > tf[j].fd.Num } // Sorts tables by key in ascending order. @@ -123,7 +127,7 @@ } // Returns sum of all tables size. -func (tf tFiles) size() (sum uint64) { +func (tf tFiles) size() (sum int64) { for _, t := range tf { sum += t.size } @@ -132,7 +136,7 @@ // Searches smallest index of tables whose its smallest // key is after or equal with given key. -func (tf tFiles) searchMin(icmp *iComparer, ikey iKey) int { +func (tf tFiles) searchMin(icmp *iComparer, ikey internalKey) int { return sort.Search(len(tf), func(i int) bool { return icmp.Compare(tf[i].imin, ikey) >= 0 }) @@ -140,7 +144,7 @@ // Searches smallest index of tables whose its largest // key is after or equal with given key. -func (tf tFiles) searchMax(icmp *iComparer, ikey iKey) int { +func (tf tFiles) searchMax(icmp *iComparer, ikey internalKey) int { return sort.Search(len(tf), func(i int) bool { return icmp.Compare(tf[i].imax, ikey) >= 0 }) @@ -162,7 +166,7 @@ i := 0 if len(umin) > 0 { // Find the earliest possible internal key for min. - i = tf.searchMax(icmp, newIkey(umin, kMaxSeq, ktSeek)) + i = tf.searchMax(icmp, makeInternalKey(nil, umin, keyMaxSeq, keyTypeSeek)) } if i >= len(tf) { // Beginning of range is after all files, so no overlap. @@ -205,7 +209,7 @@ } // Returns tables key range. -func (tf tFiles) getRange(icmp *iComparer) (imin, imax iKey) { +func (tf tFiles) getRange(icmp *iComparer) (imin, imax internalKey) { for i, t := range tf { if i == 0 { imin, imax = t.imin, t.imax @@ -227,10 +231,10 @@ if slice != nil { var start, limit int if slice.Start != nil { - start = tf.searchMax(icmp, iKey(slice.Start)) + start = tf.searchMax(icmp, internalKey(slice.Start)) } if slice.Limit != nil { - limit = tf.searchMin(icmp, iKey(slice.Limit)) + limit = tf.searchMin(icmp, internalKey(slice.Limit)) } else { limit = tf.Len() } @@ -255,7 +259,7 @@ } func (a *tFilesArrayIndexer) Search(key []byte) int { - return a.searchMax(a.icmp, iKey(key)) + return a.searchMax(a.icmp, internalKey(key)) } func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator { @@ -295,16 +299,16 @@ // Creates an empty table and returns table writer. func (t *tOps) create() (*tWriter, error) { - file := t.s.getTableFile(t.s.allocFileNum()) - fw, err := file.Create() + fd := storage.FileDesc{storage.TypeTable, t.s.allocFileNum()} + fw, err := t.s.stor.Create(fd) if err != nil { return nil, err } return &tWriter{ - t: t, - file: file, - w: fw, - tw: table.NewWriter(fw, t.s.o.Options), + t: t, + fd: fd, + w: fw, + tw: table.NewWriter(fw, t.s.o.Options), }, nil } @@ -340,21 +344,20 @@ // Opens table. It returns a cache handle, which should // be released after use. func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) { - num := f.file.Num() - ch = t.cache.Get(0, num, func() (size int, value cache.Value) { + ch = t.cache.Get(0, uint64(f.fd.Num), func() (size int, value cache.Value) { var r storage.Reader - r, err = f.file.Open() + r, err = t.s.stor.Open(f.fd) if err != nil { return 0, nil } - var bcache *cache.CacheGetter + var bcache *cache.NamespaceGetter if t.bcache != nil { - bcache = &cache.CacheGetter{Cache: t.bcache, NS: num} + bcache = &cache.NamespaceGetter{Cache: t.bcache, NS: uint64(f.fd.Num)} } var tr *table.Reader - tr, err = table.NewReader(r, int64(f.size), storage.NewFileInfo(f.file), bcache, t.bpool, t.s.o.Options) + tr, err = table.NewReader(r, f.size, f.fd, bcache, t.bpool, t.s.o.Options) if err != nil { r.Close() return 0, nil @@ -390,14 +393,13 @@ } // Returns approximate offset of the given key. -func (t *tOps) offsetOf(f *tFile, key []byte) (offset uint64, err error) { +func (t *tOps) offsetOf(f *tFile, key []byte) (offset int64, err error) { ch, err := t.open(f) if err != nil { return } defer ch.Release() - offset_, err := ch.Value().(*table.Reader).OffsetOf(key) - return uint64(offset_), err + return ch.Value().(*table.Reader).OffsetOf(key) } // Creates an iterator from the given table. @@ -414,15 +416,14 @@ // Removes table from persistent storage. It waits until // no one use the the table. func (t *tOps) remove(f *tFile) { - num := f.file.Num() - t.cache.Delete(0, num, func() { - if err := f.file.Remove(); err != nil { - t.s.logf("table@remove removing @%d %q", num, err) + t.cache.Delete(0, uint64(f.fd.Num), func() { + if err := t.s.stor.Remove(f.fd); err != nil { + t.s.logf("table@remove removing @%d %q", f.fd.Num, err) } else { - t.s.logf("table@remove removed @%d", num) + t.s.logf("table@remove removed @%d", f.fd.Num) } if t.bcache != nil { - t.bcache.EvictNS(num) + t.bcache.EvictNS(uint64(f.fd.Num)) } }) } @@ -471,9 +472,9 @@ type tWriter struct { t *tOps - file storage.File - w storage.Writer - tw *table.Writer + fd storage.FileDesc + w storage.Writer + tw *table.Writer first, last []byte } @@ -513,16 +514,15 @@ return } } - f = newTableFile(w.file, uint64(w.tw.BytesLen()), iKey(w.first), iKey(w.last)) + f = newTableFile(w.fd, int64(w.tw.BytesLen()), internalKey(w.first), internalKey(w.last)) return } // Drops the table. func (w *tWriter) drop() { w.close() - w.file.Remove() - w.t.s.reuseFileNum(w.file.Num()) - w.file = nil + w.t.s.stor.Remove(w.fd) + w.t.s.reuseFileNum(w.fd.Num) w.tw = nil w.first = nil w.last = nil diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go 2016-05-24 07:05:22.000000000 +0000 @@ -17,6 +17,84 @@ "github.com/syndtr/goleveldb/leveldb/util" ) +func TestFind(db Find, kv KeyValue) { + ShuffledIndex(nil, kv.Len(), 1, func(i int) { + key_, key, value := kv.IndexInexact(i) + + // Using exact key. + rkey, rvalue, err := db.TestFind(key) + Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) + Expect(rkey).Should(Equal(key), "Key") + Expect(rvalue).Should(Equal(value), "Value for key %q", key) + + // Using inexact key. + rkey, rvalue, err = db.TestFind(key_) + Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q)", key_, key) + Expect(rkey).Should(Equal(key)) + Expect(rvalue).Should(Equal(value), "Value for key %q (%q)", key_, key) + }) +} + +func TestFindAfterLast(db Find, kv KeyValue) { + var key []byte + if kv.Len() > 0 { + key_, _ := kv.Index(kv.Len() - 1) + key = BytesAfter(key_) + } + rkey, _, err := db.TestFind(key) + Expect(err).Should(HaveOccurred(), "Find for key %q yield key %q", key, rkey) + Expect(err).Should(Equal(errors.ErrNotFound)) +} + +func TestGet(db Get, kv KeyValue) { + ShuffledIndex(nil, kv.Len(), 1, func(i int) { + key_, key, value := kv.IndexInexact(i) + + // Using exact key. + rvalue, err := db.TestGet(key) + Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) + Expect(rvalue).Should(Equal(value), "Value for key %q", key) + + // Using inexact key. + if len(key_) > 0 { + _, err = db.TestGet(key_) + Expect(err).Should(HaveOccurred(), "Error for key %q", key_) + Expect(err).Should(Equal(errors.ErrNotFound)) + } + }) +} + +func TestHas(db Has, kv KeyValue) { + ShuffledIndex(nil, kv.Len(), 1, func(i int) { + key_, key, _ := kv.IndexInexact(i) + + // Using exact key. + ret, err := db.TestHas(key) + Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) + Expect(ret).Should(BeTrue(), "False for key %q", key) + + // Using inexact key. + if len(key_) > 0 { + ret, err = db.TestHas(key_) + Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key_) + Expect(ret).ShouldNot(BeTrue(), "True for key %q", key) + } + }) +} + +func TestIter(db NewIterator, r *util.Range, kv KeyValue) { + iter := db.TestNewIterator(r) + Expect(iter.Error()).ShouldNot(HaveOccurred()) + + t := IteratorTesting{ + KeyValue: kv, + Iter: iter, + } + + DoIteratorTesting(&t) + iter.Release() +} + func KeyValueTesting(rnd *rand.Rand, kv KeyValue, p DB, setup func(KeyValue) DB, teardown func(DB)) { if rnd == nil { rnd = NewRand() @@ -35,122 +113,68 @@ It("Should find all keys with Find", func() { if db, ok := p.(Find); ok { - ShuffledIndex(nil, kv.Len(), 1, func(i int) { - key_, key, value := kv.IndexInexact(i) - - // Using exact key. - rkey, rvalue, err := db.TestFind(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) - Expect(rkey).Should(Equal(key), "Key") - Expect(rvalue).Should(Equal(value), "Value for key %q", key) - - // Using inexact key. - rkey, rvalue, err = db.TestFind(key_) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q)", key_, key) - Expect(rkey).Should(Equal(key)) - Expect(rvalue).Should(Equal(value), "Value for key %q (%q)", key_, key) - }) + TestFind(db, kv) } }) - It("Should return error if the key is not present", func() { + It("Should return error if Find on key after the last", func() { if db, ok := p.(Find); ok { - var key []byte - if kv.Len() > 0 { - key_, _ := kv.Index(kv.Len() - 1) - key = BytesAfter(key_) - } - rkey, _, err := db.TestFind(key) - Expect(err).Should(HaveOccurred(), "Find for key %q yield key %q", key, rkey) - Expect(err).Should(Equal(errors.ErrNotFound)) + TestFindAfterLast(db, kv) } }) It("Should only find exact key with Get", func() { if db, ok := p.(Get); ok { - ShuffledIndex(nil, kv.Len(), 1, func(i int) { - key_, key, value := kv.IndexInexact(i) - - // Using exact key. - rvalue, err := db.TestGet(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) - Expect(rvalue).Should(Equal(value), "Value for key %q", key) - - // Using inexact key. - if len(key_) > 0 { - _, err = db.TestGet(key_) - Expect(err).Should(HaveOccurred(), "Error for key %q", key_) - Expect(err).Should(Equal(errors.ErrNotFound)) - } - }) + TestGet(db, kv) } }) It("Should only find present key with Has", func() { if db, ok := p.(Has); ok { - ShuffledIndex(nil, kv.Len(), 1, func(i int) { - key_, key, _ := kv.IndexInexact(i) - - // Using exact key. - ret, err := db.TestHas(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) - Expect(ret).Should(BeTrue(), "False for key %q", key) - - // Using inexact key. - if len(key_) > 0 { - ret, err = db.TestHas(key_) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key_) - Expect(ret).ShouldNot(BeTrue(), "True for key %q", key) - } - }) + TestHas(db, kv) } }) - TestIter := func(r *util.Range, _kv KeyValue) { + It("Should iterates and seeks correctly", func(done Done) { if db, ok := p.(NewIterator); ok { - iter := db.TestNewIterator(r) - Expect(iter.Error()).ShouldNot(HaveOccurred()) - - t := IteratorTesting{ - KeyValue: _kv, - Iter: iter, - } - - DoIteratorTesting(&t) - iter.Release() + TestIter(db, nil, kv.Clone()) } - } - - It("Should iterates and seeks correctly", func(done Done) { - TestIter(nil, kv.Clone()) done <- true }, 3.0) - RandomIndex(rnd, kv.Len(), Min(kv.Len(), 50), func(i int) { - type slice struct { - r *util.Range - start, limit int - } - - key_, _, _ := kv.IndexInexact(i) - for _, x := range []slice{ - {&util.Range{Start: key_, Limit: nil}, i, kv.Len()}, - {&util.Range{Start: nil, Limit: key_}, 0, i}, - } { - It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", x.start, x.limit), func(done Done) { - TestIter(x.r, kv.Slice(x.start, x.limit)) - done <- true - }, 3.0) + It("Should iterates and seeks slice correctly", func(done Done) { + if db, ok := p.(NewIterator); ok { + RandomIndex(rnd, kv.Len(), Min(kv.Len(), 50), func(i int) { + type slice struct { + r *util.Range + start, limit int + } + + key_, _, _ := kv.IndexInexact(i) + for _, x := range []slice{ + {&util.Range{Start: key_, Limit: nil}, i, kv.Len()}, + {&util.Range{Start: nil, Limit: key_}, 0, i}, + } { + By(fmt.Sprintf("Random index of %d .. %d", x.start, x.limit), func() { + TestIter(db, x.r, kv.Slice(x.start, x.limit)) + }) + } + }) } - }) + done <- true + }, 50.0) - RandomRange(rnd, kv.Len(), Min(kv.Len(), 50), func(start, limit int) { - It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", start, limit), func(done Done) { - r := kv.Range(start, limit) - TestIter(&r, kv.Slice(start, limit)) - done <- true - }, 3.0) - }) + It("Should iterates and seeks slice correctly", func(done Done) { + if db, ok := p.(NewIterator); ok { + RandomRange(rnd, kv.Len(), Min(kv.Len(), 50), func(start, limit int) { + By(fmt.Sprintf("Random range of %d .. %d", start, limit), func() { + r := kv.Range(start, limit) + TestIter(db, &r, kv.Slice(start, limit)) + }) + }) + } + done <- true + }, 50.0) } func AllKeyValueTesting(rnd *rand.Rand, body, setup func(KeyValue) DB, teardown func(DB)) { diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go 2016-05-24 07:05:22.000000000 +0000 @@ -10,6 +10,7 @@ "bytes" "fmt" "io" + "math/rand" "os" "path/filepath" "runtime" @@ -24,8 +25,8 @@ var ( storageMu sync.Mutex - storageUseFS bool = true - storageKeepFS bool = false + storageUseFS = true + storageKeepFS = false storageNum int ) @@ -35,6 +36,7 @@ ModeOpen StorageMode = 1 << iota ModeCreate ModeRemove + ModeRename ModeRead ModeWrite ModeSync @@ -45,6 +47,7 @@ modeOpen = iota modeCreate modeRemove + modeRename modeRead modeWrite modeSync @@ -73,6 +76,8 @@ x = modeCreate case ModeRemove: x = modeRemove + case ModeRename: + x = modeRename case ModeRead: x = modeRead case ModeWrite: @@ -121,6 +126,8 @@ add(modeCreate) case m&ModeRemove != 0: add(modeRemove) + case m&ModeRename != 0: + add(modeRename) case m&ModeRead != 0: add(modeRead) case m&ModeWrite != 0: @@ -133,15 +140,15 @@ return ret } -func packFile(num uint64, t storage.FileType) uint64 { - if num>>(64-typeCount) != 0 { +func packFile(fd storage.FileDesc) uint64 { + if fd.Num>>(63-typeCount) != 0 { panic("overflow") } - return num<> typeCount, storage.FileType(x) & storage.TypeAll +func unpackFile(x uint64) storage.FileDesc { + return storage.FileDesc{storage.FileType(x) & storage.TypeAll, int64(x >> typeCount)} } type emulatedError struct { @@ -163,189 +170,98 @@ } type reader struct { - f *file + s *Storage + fd storage.FileDesc storage.Reader } func (r *reader) Read(p []byte) (n int, err error) { - err = r.f.s.emulateError(ModeRead, r.f.Type()) + err = r.s.emulateError(ModeRead, r.fd.Type) if err == nil { - r.f.s.stall(ModeRead, r.f.Type()) + r.s.stall(ModeRead, r.fd.Type) n, err = r.Reader.Read(p) } - r.f.s.count(ModeRead, r.f.Type(), n) + r.s.count(ModeRead, r.fd.Type, n) if err != nil && err != io.EOF { - r.f.s.logI("read error, num=%d type=%v n=%d err=%v", r.f.Num(), r.f.Type(), n, err) + r.s.logI("read error, fd=%s n=%d err=%v", r.fd, n, err) } return } func (r *reader) ReadAt(p []byte, off int64) (n int, err error) { - err = r.f.s.emulateError(ModeRead, r.f.Type()) + err = r.s.emulateError(ModeRead, r.fd.Type) if err == nil { - r.f.s.stall(ModeRead, r.f.Type()) + r.s.stall(ModeRead, r.fd.Type) n, err = r.Reader.ReadAt(p, off) } - r.f.s.count(ModeRead, r.f.Type(), n) + r.s.count(ModeRead, r.fd.Type, n) if err != nil && err != io.EOF { - r.f.s.logI("readAt error, num=%d type=%v offset=%d n=%d err=%v", r.f.Num(), r.f.Type(), off, n, err) + r.s.logI("readAt error, fd=%s offset=%d n=%d err=%v", r.fd, off, n, err) } return } func (r *reader) Close() (err error) { - return r.f.doClose(r.Reader) + return r.s.fileClose(r.fd, r.Reader) } type writer struct { - f *file + s *Storage + fd storage.FileDesc storage.Writer } func (w *writer) Write(p []byte) (n int, err error) { - err = w.f.s.emulateError(ModeWrite, w.f.Type()) + err = w.s.emulateError(ModeWrite, w.fd.Type) if err == nil { - w.f.s.stall(ModeWrite, w.f.Type()) + w.s.stall(ModeWrite, w.fd.Type) n, err = w.Writer.Write(p) } - w.f.s.count(ModeWrite, w.f.Type(), n) + w.s.count(ModeWrite, w.fd.Type, n) if err != nil && err != io.EOF { - w.f.s.logI("write error, num=%d type=%v n=%d err=%v", w.f.Num(), w.f.Type(), n, err) + w.s.logI("write error, fd=%s n=%d err=%v", w.fd, n, err) } return } func (w *writer) Sync() (err error) { - err = w.f.s.emulateError(ModeSync, w.f.Type()) + err = w.s.emulateError(ModeSync, w.fd.Type) if err == nil { - w.f.s.stall(ModeSync, w.f.Type()) + w.s.stall(ModeSync, w.fd.Type) err = w.Writer.Sync() } - w.f.s.count(ModeSync, w.f.Type(), 0) + w.s.count(ModeSync, w.fd.Type, 0) if err != nil { - w.f.s.logI("sync error, num=%d type=%v err=%v", w.f.Num(), w.f.Type(), err) + w.s.logI("sync error, fd=%s err=%v", w.fd, err) } return } func (w *writer) Close() (err error) { - return w.f.doClose(w.Writer) -} - -type file struct { - s *Storage - storage.File -} - -func (f *file) pack() uint64 { - return packFile(f.Num(), f.Type()) -} - -func (f *file) assertOpen() { - ExpectWithOffset(2, f.s.opens).NotTo(HaveKey(f.pack()), "File open, num=%d type=%v writer=%v", f.Num(), f.Type(), f.s.opens[f.pack()]) -} - -func (f *file) doClose(closer io.Closer) (err error) { - err = f.s.emulateError(ModeClose, f.Type()) - if err == nil { - f.s.stall(ModeClose, f.Type()) - } - f.s.mu.Lock() - defer f.s.mu.Unlock() - if err == nil { - ExpectWithOffset(2, f.s.opens).To(HaveKey(f.pack()), "File closed, num=%d type=%v", f.Num(), f.Type()) - err = closer.Close() - } - f.s.countNB(ModeClose, f.Type(), 0) - writer := f.s.opens[f.pack()] - if err != nil { - f.s.logISkip(1, "file close failed, num=%d type=%v writer=%v err=%v", f.Num(), f.Type(), writer, err) - } else { - f.s.logISkip(1, "file closed, num=%d type=%v writer=%v", f.Num(), f.Type(), writer) - delete(f.s.opens, f.pack()) - } - return -} - -func (f *file) Open() (r storage.Reader, err error) { - err = f.s.emulateError(ModeOpen, f.Type()) - if err == nil { - f.s.stall(ModeOpen, f.Type()) - } - f.s.mu.Lock() - defer f.s.mu.Unlock() - if err == nil { - f.assertOpen() - f.s.countNB(ModeOpen, f.Type(), 0) - r, err = f.File.Open() - } - if err != nil { - f.s.logI("file open failed, num=%d type=%v err=%v", f.Num(), f.Type(), err) - } else { - f.s.logI("file opened, num=%d type=%v", f.Num(), f.Type()) - f.s.opens[f.pack()] = false - r = &reader{f, r} - } - return -} - -func (f *file) Create() (w storage.Writer, err error) { - err = f.s.emulateError(ModeCreate, f.Type()) - if err == nil { - f.s.stall(ModeCreate, f.Type()) - } - f.s.mu.Lock() - defer f.s.mu.Unlock() - if err == nil { - f.assertOpen() - f.s.countNB(ModeCreate, f.Type(), 0) - w, err = f.File.Create() - } - if err != nil { - f.s.logI("file create failed, num=%d type=%v err=%v", f.Num(), f.Type(), err) - } else { - f.s.logI("file created, num=%d type=%v", f.Num(), f.Type()) - f.s.opens[f.pack()] = true - w = &writer{f, w} - } - return -} - -func (f *file) Remove() (err error) { - err = f.s.emulateError(ModeRemove, f.Type()) - if err == nil { - f.s.stall(ModeRemove, f.Type()) - } - f.s.mu.Lock() - defer f.s.mu.Unlock() - if err == nil { - f.assertOpen() - f.s.countNB(ModeRemove, f.Type(), 0) - err = f.File.Remove() - } - if err != nil { - f.s.logI("file remove failed, num=%d type=%v err=%v", f.Num(), f.Type(), err) - } else { - f.s.logI("file removed, num=%d type=%v", f.Num(), f.Type()) - } - return + return w.s.fileClose(w.fd, w.Writer) } type Storage struct { storage.Storage - closeFn func() error + path string + onClose func() (preserve bool, err error) + onLog func(str string) lmu sync.Mutex lb bytes.Buffer - mu sync.Mutex + mu sync.Mutex + rand *rand.Rand // Open files, true=writer, false=reader - opens map[uint64]bool - counters [flattenCount]int - bytesCounter [flattenCount]int64 - emulatedError [flattenCount]error - stallCond sync.Cond - stalled [flattenCount]bool + opens map[uint64]bool + counters [flattenCount]int + bytesCounter [flattenCount]int64 + emulatedError [flattenCount]error + emulatedErrorOnce [flattenCount]bool + emulatedRandomError [flattenCount]error + emulatedRandomErrorProb [flattenCount]float64 + stallCond sync.Cond + stalled [flattenCount]bool } func (s *Storage) log(skip int, str string) { @@ -374,7 +290,12 @@ } s.lb.WriteString(line) } - s.lb.WriteByte('\n') + if s.onLog != nil { + s.onLog(s.lb.String()) + s.lb.Reset() + } else { + s.lb.WriteByte('\n') + } } func (s *Storage) logISkip(skip int, format string, args ...interface{}) { @@ -395,74 +316,220 @@ s.logISkip(1, format, args...) } +func (s *Storage) OnLog(onLog func(log string)) { + s.lmu.Lock() + s.onLog = onLog + if s.lb.Len() != 0 { + log := s.lb.String() + s.onLog(log[:len(log)-1]) + s.lb.Reset() + } + s.lmu.Unlock() +} + func (s *Storage) Log(str string) { s.log(1, "Log: "+str) s.Storage.Log(str) } -func (s *Storage) Lock() (r util.Releaser, err error) { - r, err = s.Storage.Lock() +func (s *Storage) Lock() (l storage.Lock, err error) { + l, err = s.Storage.Lock() if err != nil { s.logI("storage locking failed, err=%v", err) } else { s.logI("storage locked") - r = storageLock{s, r} + l = storageLock{s, l} } return } -func (s *Storage) GetFile(num uint64, t storage.FileType) storage.File { - return &file{s, s.Storage.GetFile(num, t)} -} - -func (s *Storage) GetFiles(t storage.FileType) (files []storage.File, err error) { - rfiles, err := s.Storage.GetFiles(t) +func (s *Storage) List(t storage.FileType) (fds []storage.FileDesc, err error) { + fds, err = s.Storage.List(t) if err != nil { - s.logI("get files failed, err=%v", err) + s.logI("list failed, err=%v", err) return } - files = make([]storage.File, len(rfiles)) - for i, f := range rfiles { - files[i] = &file{s, f} - } - s.logI("get files, type=0x%x count=%d", int(t), len(files)) + s.logI("list, type=0x%x count=%d", int(t), len(fds)) return } -func (s *Storage) GetManifest() (f storage.File, err error) { - manifest, err := s.Storage.GetManifest() +func (s *Storage) GetMeta() (fd storage.FileDesc, err error) { + fd, err = s.Storage.GetMeta() if err != nil { if !os.IsNotExist(err) { - s.logI("get manifest failed, err=%v", err) + s.logI("get meta failed, err=%v", err) } return } - s.logI("get manifest, num=%d", manifest.Num()) - return &file{s, manifest}, nil + s.logI("get meta, fd=%s", fd) + return } -func (s *Storage) SetManifest(f storage.File) error { - f_, ok := f.(*file) - ExpectWithOffset(1, ok).To(BeTrue()) - ExpectWithOffset(1, f_.Type()).To(Equal(storage.TypeManifest)) - err := s.Storage.SetManifest(f_.File) +func (s *Storage) SetMeta(fd storage.FileDesc) error { + ExpectWithOffset(1, fd.Type).To(Equal(storage.TypeManifest)) + err := s.Storage.SetMeta(fd) if err != nil { - s.logI("set manifest failed, err=%v", err) + s.logI("set meta failed, fd=%s err=%v", fd, err) } else { - s.logI("set manifest, num=%d", f_.Num()) + s.logI("set meta, fd=%s", fd) } return err } +func (s *Storage) fileClose(fd storage.FileDesc, closer io.Closer) (err error) { + err = s.emulateError(ModeClose, fd.Type) + if err == nil { + s.stall(ModeClose, fd.Type) + } + x := packFile(fd) + s.mu.Lock() + defer s.mu.Unlock() + if err == nil { + ExpectWithOffset(2, s.opens).To(HaveKey(x), "File closed, fd=%s", fd) + err = closer.Close() + } + s.countNB(ModeClose, fd.Type, 0) + writer := s.opens[x] + if err != nil { + s.logISkip(1, "file close failed, fd=%s writer=%v err=%v", fd, writer, err) + } else { + s.logISkip(1, "file closed, fd=%s writer=%v", fd, writer) + delete(s.opens, x) + } + return +} + +func (s *Storage) assertOpen(fd storage.FileDesc) { + x := packFile(fd) + ExpectWithOffset(2, s.opens).NotTo(HaveKey(x), "File open, fd=%s writer=%v", fd, s.opens[x]) +} + +func (s *Storage) Open(fd storage.FileDesc) (r storage.Reader, err error) { + err = s.emulateError(ModeOpen, fd.Type) + if err == nil { + s.stall(ModeOpen, fd.Type) + } + s.mu.Lock() + defer s.mu.Unlock() + if err == nil { + s.assertOpen(fd) + s.countNB(ModeOpen, fd.Type, 0) + r, err = s.Storage.Open(fd) + } + if err != nil { + s.logI("file open failed, fd=%s err=%v", fd, err) + } else { + s.logI("file opened, fd=%s", fd) + s.opens[packFile(fd)] = false + r = &reader{s, fd, r} + } + return +} + +func (s *Storage) Create(fd storage.FileDesc) (w storage.Writer, err error) { + err = s.emulateError(ModeCreate, fd.Type) + if err == nil { + s.stall(ModeCreate, fd.Type) + } + s.mu.Lock() + defer s.mu.Unlock() + if err == nil { + s.assertOpen(fd) + s.countNB(ModeCreate, fd.Type, 0) + w, err = s.Storage.Create(fd) + } + if err != nil { + s.logI("file create failed, fd=%s err=%v", fd, err) + } else { + s.logI("file created, fd=%s", fd) + s.opens[packFile(fd)] = true + w = &writer{s, fd, w} + } + return +} + +func (s *Storage) Remove(fd storage.FileDesc) (err error) { + err = s.emulateError(ModeRemove, fd.Type) + if err == nil { + s.stall(ModeRemove, fd.Type) + } + s.mu.Lock() + defer s.mu.Unlock() + if err == nil { + s.assertOpen(fd) + s.countNB(ModeRemove, fd.Type, 0) + err = s.Storage.Remove(fd) + } + if err != nil { + s.logI("file remove failed, fd=%s err=%v", fd, err) + } else { + s.logI("file removed, fd=%s", fd) + } + return +} + +func (s *Storage) ForceRemove(fd storage.FileDesc) (err error) { + s.countNB(ModeRemove, fd.Type, 0) + if err = s.Storage.Remove(fd); err != nil { + s.logI("file remove failed (forced), fd=%s err=%v", fd, err) + } else { + s.logI("file removed (forced), fd=%s", fd) + } + return +} + +func (s *Storage) Rename(oldfd, newfd storage.FileDesc) (err error) { + err = s.emulateError(ModeRename, oldfd.Type) + if err == nil { + s.stall(ModeRename, oldfd.Type) + } + s.mu.Lock() + defer s.mu.Unlock() + if err == nil { + s.assertOpen(oldfd) + s.assertOpen(newfd) + s.countNB(ModeRename, oldfd.Type, 0) + err = s.Storage.Rename(oldfd, newfd) + } + if err != nil { + s.logI("file rename failed, oldfd=%s newfd=%s err=%v", oldfd, newfd, err) + } else { + s.logI("file renamed, oldfd=%s newfd=%s", oldfd, newfd) + } + return +} + +func (s *Storage) ForceRename(oldfd, newfd storage.FileDesc) (err error) { + s.countNB(ModeRename, oldfd.Type, 0) + if err = s.Storage.Rename(oldfd, newfd); err != nil { + s.logI("file rename failed (forced), oldfd=%s newfd=%s err=%v", oldfd, newfd, err) + } else { + s.logI("file renamed (forced), oldfd=%s newfd=%s", oldfd, newfd) + } + return +} + func (s *Storage) openFiles() string { out := "Open files:" for x, writer := range s.opens { - num, t := unpackFile(x) - out += fmt.Sprintf("\n · num=%d type=%v writer=%v", num, t, writer) + fd := unpackFile(x) + out += fmt.Sprintf("\n · fd=%s writer=%v", fd, writer) } return out } +func (s *Storage) CloseCheck() { + s.mu.Lock() + defer s.mu.Unlock() + ExpectWithOffset(1, s.opens).To(BeEmpty(), s.openFiles()) +} + +func (s *Storage) OnClose(onClose func() (preserve bool, err error)) { + s.mu.Lock() + s.onClose = onClose + s.mu.Unlock() +} + func (s *Storage) Close() error { s.mu.Lock() defer s.mu.Unlock() @@ -473,9 +540,22 @@ } else { s.logI("storage closed") } - if s.closeFn != nil { - if err1 := s.closeFn(); err1 != nil { - s.logI("close func error, err=%v", err1) + var preserve bool + if s.onClose != nil { + var err0 error + if preserve, err0 = s.onClose(); err0 != nil { + s.logI("onClose error, err=%v", err0) + } + } + if s.path != "" { + if storageKeepFS || preserve { + s.logI("storage is preserved, path=%v", s.path) + } else { + if err1 := os.RemoveAll(s.path); err1 != nil { + s.logI("cannot remove storage, err=%v", err1) + } else { + s.logI("storage has been removed") + } } } return err @@ -510,8 +590,14 @@ func (s *Storage) emulateError(m StorageMode, t storage.FileType) error { s.mu.Lock() defer s.mu.Unlock() - err := s.emulatedError[flattenType(m, t)] - if err != nil { + x := flattenType(m, t) + if err := s.emulatedError[x]; err != nil { + if s.emulatedErrorOnce[x] { + s.emulatedError[x] = nil + } + return emulatedError{err} + } + if err := s.emulatedRandomError[x]; err != nil && s.rand.Float64() < s.emulatedRandomErrorProb[x] { return emulatedError{err} } return nil @@ -522,6 +608,25 @@ defer s.mu.Unlock() for _, x := range listFlattenType(m, t) { s.emulatedError[x] = err + s.emulatedErrorOnce[x] = false + } +} + +func (s *Storage) EmulateErrorOnce(m StorageMode, t storage.FileType, err error) { + s.mu.Lock() + defer s.mu.Unlock() + for _, x := range listFlattenType(m, t) { + s.emulatedError[x] = err + s.emulatedErrorOnce[x] = true + } +} + +func (s *Storage) EmulateRandomError(m StorageMode, t storage.FileType, prob float64, err error) { + s.mu.Lock() + defer s.mu.Unlock() + for _, x := range listFlattenType(m, t) { + s.emulatedRandomError[x] = err + s.emulatedRandomErrorProb[x] = prob } } @@ -552,24 +657,20 @@ } func NewStorage() *Storage { - var stor storage.Storage - var closeFn func() error + var ( + stor storage.Storage + path string + ) if storageUseFS { for { storageMu.Lock() num := storageNum storageNum++ storageMu.Unlock() - path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num)) + path = filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num)) if _, err := os.Stat(path); os.IsNotExist(err) { - stor, err = storage.OpenFile(path) + stor, err = storage.OpenFile(path, false) ExpectWithOffset(1, err).NotTo(HaveOccurred(), "creating storage at %s", path) - closeFn = func() error { - if storageKeepFS { - return nil - } - return os.RemoveAll(path) - } break } } @@ -578,9 +679,16 @@ } s := &Storage{ Storage: stor, - closeFn: closeFn, + path: path, + rand: NewRand(), opens: make(map[uint64]bool), } s.stallCond.L = &s.mu + if s.path != "" { + s.logI("using FS storage") + s.logI("storage path: %s", s.path) + } else { + s.logI("using MEM storage") + } return s } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -61,3 +61,31 @@ stor: stor, } } + +type testingTransaction struct { + *Transaction + ro *opt.ReadOptions + wo *opt.WriteOptions +} + +func (t *testingTransaction) TestPut(key []byte, value []byte) error { + return t.Put(key, value, t.wo) +} + +func (t *testingTransaction) TestDelete(key []byte) error { + return t.Delete(key, t.wo) +} + +func (t *testingTransaction) TestGet(key []byte) (value []byte, err error) { + return t.Get(key, t.ro) +} + +func (t *testingTransaction) TestHas(key []byte) (ret bool, err error) { + return t.Has(key, t.ro) +} + +func (t *testingTransaction) TestNewIterator(slice *util.Range) iterator.Iterator { + return t.NewIterator(slice, t.ro) +} + +func (t *testingTransaction) TestClose() {} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/util/pool.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/util/pool.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/util/pool.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/util/pool.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,21 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build go1.3 - -package util - -import ( - "sync" -) - -type Pool struct { - sync.Pool -} - -func NewPool(cap int) *Pool { - return &Pool{} -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,33 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build !go1.3 - -package util - -type Pool struct { - pool chan interface{} -} - -func (p *Pool) Get() interface{} { - select { - case x := <-p.pool: - return x - default: - return nil - } -} - -func (p *Pool) Put(x interface{}) { - select { - case p.pool <- x: - default: - } -} - -func NewPool(cap int) *Pool { - return &Pool{pool: make(chan interface{}, cap)} -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/util.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/util.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/util.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/util.go 2016-05-24 07:05:22.000000000 +0000 @@ -72,20 +72,20 @@ return b } -type files []storage.File +type fdSorter []storage.FileDesc -func (p files) Len() int { +func (p fdSorter) Len() int { return len(p) } -func (p files) Less(i, j int) bool { - return p[i].Num() < p[j].Num() +func (p fdSorter) Less(i, j int) bool { + return p[i].Num < p[j].Num } -func (p files) Swap(i, j int) { +func (p fdSorter) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p files) sort() { - sort.Sort(p) +func sortFds(fds []storage.FileDesc) { + sort.Sort(fdSorter(fds)) } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/version.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/version.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/version.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/version.go 2016-05-24 07:05:22.000000000 +0000 @@ -7,6 +7,7 @@ package leveldb import ( + "fmt" "sync/atomic" "unsafe" @@ -23,7 +24,7 @@ type version struct { s *session - tables []tFiles + levels []tFiles // Level that should be compacted next and its compaction score. // Score < 1 means compaction is not strictly needed. These fields @@ -39,7 +40,7 @@ } func newVersion(s *session) *version { - return &version{s: s, tables: make([]tFiles, s.o.GetNumLevel())} + return &version{s: s} } func (v *version) releaseNB() { @@ -51,18 +52,18 @@ panic("negative version ref") } - tables := make(map[uint64]bool) - for _, tt := range v.next.tables { + nextTables := make(map[int64]bool) + for _, tt := range v.next.levels { for _, t := range tt { - num := t.file.Num() - tables[num] = true + num := t.fd.Num + nextTables[num] = true } } - for _, tt := range v.tables { + for _, tt := range v.levels { for _, t := range tt { - num := t.file.Num() - if _, ok := tables[num]; !ok { + num := t.fd.Num + if _, ok := nextTables[num]; !ok { v.s.tops.remove(t) } } @@ -78,11 +79,26 @@ v.s.vmu.Unlock() } -func (v *version) walkOverlapping(ikey iKey, f func(level int, t *tFile) bool, lf func(level int) bool) { +func (v *version) walkOverlapping(aux tFiles, ikey internalKey, f func(level int, t *tFile) bool, lf func(level int) bool) { ukey := ikey.ukey() + // Aux level. + if aux != nil { + for _, t := range aux { + if t.overlaps(v.s.icmp, ukey, ukey) { + if !f(-1, t) { + return + } + } + } + + if lf != nil && !lf(-1) { + return + } + } + // Walk tables level-by-level. - for level, tables := range v.tables { + for level, tables := range v.levels { if len(tables) == 0 { continue } @@ -114,7 +130,7 @@ } } -func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byte, tcomp bool, err error) { +func (v *version) get(aux tFiles, ikey internalKey, ro *opt.ReadOptions, noValue bool) (value []byte, tcomp bool, err error) { ukey := ikey.ukey() var ( @@ -124,16 +140,16 @@ // Level-0. zfound bool zseq uint64 - zkt kType + zkt keyType zval []byte ) err = ErrNotFound - // Since entries never hope across level, finding key/value + // Since entries never hop across level, finding key/value // in smaller level make later levels irrelevant. - v.walkOverlapping(ikey, func(level int, t *tFile) bool { - if !tseek { + v.walkOverlapping(aux, ikey, func(level int, t *tFile) bool { + if level >= 0 && !tseek { if tset == nil { tset = &tSet{level, t} } else { @@ -150,6 +166,7 @@ } else { fikey, fval, ferr = v.s.tops.find(t, ikey, ro) } + switch ferr { case nil: case ErrNotFound: @@ -159,9 +176,10 @@ return false } - if fukey, fseq, fkt, fkerr := parseIkey(fikey); fkerr == nil { + if fukey, fseq, fkt, fkerr := parseInternalKey(fikey); fkerr == nil { if v.s.icmp.uCompare(ukey, fukey) == 0 { - if level == 0 { + // Level <= 0 may overlaps each-other. + if level <= 0 { if fseq >= zseq { zfound = true zseq = fseq @@ -170,12 +188,12 @@ } } else { switch fkt { - case ktVal: + case keyTypeVal: value = fval err = nil - case ktDel: + case keyTypeDel: default: - panic("leveldb: invalid iKey type") + panic("leveldb: invalid internalKey type") } return false } @@ -189,12 +207,12 @@ }, func(level int) bool { if zfound { switch zkt { - case ktVal: + case keyTypeVal: value = zval err = nil - case ktDel: + case keyTypeDel: default: - panic("leveldb: invalid iKey type") + panic("leveldb: invalid internalKey type") } return false } @@ -209,46 +227,40 @@ return } -func (v *version) sampleSeek(ikey iKey) (tcomp bool) { +func (v *version) sampleSeek(ikey internalKey) (tcomp bool) { var tset *tSet - v.walkOverlapping(ikey, func(level int, t *tFile) bool { + v.walkOverlapping(nil, ikey, func(level int, t *tFile) bool { if tset == nil { tset = &tSet{level, t} return true - } else { - if tset.table.consumeSeek() <= 0 { - tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset)) - } - return false } + if tset.table.consumeSeek() <= 0 { + tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset)) + } + return false }, nil) return } func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []iterator.Iterator) { - // Merge all level zero files together since they may overlap - for _, t := range v.tables[0] { - it := v.s.tops.newIterator(t, slice, ro) - its = append(its, it) - } - strict := opt.GetStrict(v.s.o.Options, ro, opt.StrictReader) - for _, tables := range v.tables[1:] { - if len(tables) == 0 { - continue + for level, tables := range v.levels { + if level == 0 { + // Merge all level zero files together since they may overlap. + for _, t := range tables { + its = append(its, v.s.tops.newIterator(t, slice, ro)) + } + } else if len(tables) != 0 { + its = append(its, iterator.NewIndexedIterator(tables.newIndexIterator(v.s.tops, v.s.icmp, slice, ro), strict)) } - - it := iterator.NewIndexedIterator(tables.newIndexIterator(v.s.tops, v.s.icmp, slice, ro), strict) - its = append(its, it) } - return } func (v *version) newStaging() *versionStaging { - return &versionStaging{base: v, tables: make([]tablesScratch, v.s.o.GetNumLevel())} + return &versionStaging{base: v} } // Spawn a new version based on this version. @@ -259,19 +271,22 @@ } func (v *version) fillRecord(r *sessionRecord) { - for level, ts := range v.tables { - for _, t := range ts { + for level, tables := range v.levels { + for _, t := range tables { r.addTableFile(level, t) } } } func (v *version) tLen(level int) int { - return len(v.tables[level]) + if level < len(v.levels) { + return len(v.levels[level]) + } + return 0 } -func (v *version) offsetOf(ikey iKey) (n uint64, err error) { - for level, tables := range v.tables { +func (v *version) offsetOf(ikey internalKey) (n int64, err error) { + for level, tables := range v.levels { for _, t := range tables { if v.s.icmp.Compare(t.imax, ikey) <= 0 { // Entire file is before "ikey", so just add the file size @@ -287,12 +302,11 @@ } else { // "ikey" falls in the range for this table. Add the // approximate offset of "ikey" within the table. - var nn uint64 - nn, err = v.s.tops.offsetOf(t, ikey) - if err != nil { + if m, err := v.s.tops.offsetOf(t, ikey); err == nil { + n += m + } else { return 0, err } - n += nn } } } @@ -300,37 +314,50 @@ return } -func (v *version) pickMemdbLevel(umin, umax []byte) (level int) { - if !v.tables[0].overlaps(v.s.icmp, umin, umax, true) { - var overlaps tFiles - maxLevel := v.s.o.GetMaxMemCompationLevel() - for ; level < maxLevel; level++ { - if v.tables[level+1].overlaps(v.s.icmp, umin, umax, false) { - break - } - overlaps = v.tables[level+2].getOverlaps(overlaps, v.s.icmp, umin, umax, false) - if overlaps.size() > uint64(v.s.o.GetCompactionGPOverlaps(level)) { - break +func (v *version) pickMemdbLevel(umin, umax []byte, maxLevel int) (level int) { + if maxLevel > 0 { + if len(v.levels) == 0 { + return maxLevel + } + if !v.levels[0].overlaps(v.s.icmp, umin, umax, true) { + var overlaps tFiles + for ; level < maxLevel; level++ { + if pLevel := level + 1; pLevel >= len(v.levels) { + return maxLevel + } else if v.levels[pLevel].overlaps(v.s.icmp, umin, umax, false) { + break + } + if gpLevel := level + 2; gpLevel < len(v.levels) { + overlaps = v.levels[gpLevel].getOverlaps(overlaps, v.s.icmp, umin, umax, false) + if overlaps.size() > int64(v.s.o.GetCompactionGPOverlaps(level)) { + break + } + } } } } - return } func (v *version) computeCompaction() { // Precomputed best level for next compaction - var bestLevel int = -1 - var bestScore float64 = -1 + bestLevel := int(-1) + bestScore := float64(-1) - for level, tables := range v.tables { + statFiles := make([]int, len(v.levels)) + statSizes := make([]string, len(v.levels)) + statScore := make([]string, len(v.levels)) + statTotSize := int64(0) + + for level, tables := range v.levels { var score float64 + size := tables.size() if level == 0 { // We treat level-0 specially by bounding the number of files // instead of number of bytes for two reasons: // // (1) With larger write-buffer sizes, it is nice not to do too - // many level-0 compactions. + // many level-0 compaction. // // (2) The files in level-0 are merged on every read and // therefore we wish to avoid too many files when the individual @@ -339,17 +366,24 @@ // overwrites/deletions). score = float64(len(tables)) / float64(v.s.o.GetCompactionL0Trigger()) } else { - score = float64(tables.size()) / float64(v.s.o.GetCompactionTotalSize(level)) + score = float64(size) / float64(v.s.o.GetCompactionTotalSize(level)) } if score > bestScore { bestLevel = level bestScore = score } + + statFiles[level] = len(tables) + statSizes[level] = shortenb(int(size)) + statScore[level] = fmt.Sprintf("%.2f", score) + statTotSize += size } v.cLevel = bestLevel v.cScore = bestScore + + v.s.logf("version@stat F·%v S·%s%v Sc·%v", statFiles, shortenb(int(statTotSize)), statSizes, statScore) } func (v *version) needCompaction() bool { @@ -357,43 +391,48 @@ } type tablesScratch struct { - added map[uint64]atRecord - deleted map[uint64]struct{} + added map[int64]atRecord + deleted map[int64]struct{} } type versionStaging struct { base *version - tables []tablesScratch + levels []tablesScratch +} + +func (p *versionStaging) getScratch(level int) *tablesScratch { + if level >= len(p.levels) { + newLevels := make([]tablesScratch, level+1) + copy(newLevels, p.levels) + p.levels = newLevels + } + return &(p.levels[level]) } func (p *versionStaging) commit(r *sessionRecord) { // Deleted tables. for _, r := range r.deletedTables { - tm := &(p.tables[r.level]) - - if len(p.base.tables[r.level]) > 0 { - if tm.deleted == nil { - tm.deleted = make(map[uint64]struct{}) + scratch := p.getScratch(r.level) + if r.level < len(p.base.levels) && len(p.base.levels[r.level]) > 0 { + if scratch.deleted == nil { + scratch.deleted = make(map[int64]struct{}) } - tm.deleted[r.num] = struct{}{} + scratch.deleted[r.num] = struct{}{} } - - if tm.added != nil { - delete(tm.added, r.num) + if scratch.added != nil { + delete(scratch.added, r.num) } } // New tables. for _, r := range r.addedTables { - tm := &(p.tables[r.level]) - - if tm.added == nil { - tm.added = make(map[uint64]atRecord) - } - tm.added[r.num] = r - - if tm.deleted != nil { - delete(tm.deleted, r.num) + scratch := p.getScratch(r.level) + if scratch.added == nil { + scratch.added = make(map[int64]atRecord) + } + scratch.added[r.num] = r + if scratch.deleted != nil { + delete(scratch.deleted, r.num) } } } @@ -401,40 +440,63 @@ func (p *versionStaging) finish() *version { // Build new version. nv := newVersion(p.base.s) - for level, tm := range p.tables { - btables := p.base.tables[level] - - n := len(btables) + len(tm.added) - len(tm.deleted) - if n < 0 { - n = 0 - } - nt := make(tFiles, 0, n) - - // Base tables. - for _, t := range btables { - if _, ok := tm.deleted[t.file.Num()]; ok { - continue + numLevel := len(p.levels) + if len(p.base.levels) > numLevel { + numLevel = len(p.base.levels) + } + nv.levels = make([]tFiles, numLevel) + for level := 0; level < numLevel; level++ { + var baseTabels tFiles + if level < len(p.base.levels) { + baseTabels = p.base.levels[level] + } + + if level < len(p.levels) { + scratch := p.levels[level] + + var nt tFiles + // Prealloc list if possible. + if n := len(baseTabels) + len(scratch.added) - len(scratch.deleted); n > 0 { + nt = make(tFiles, 0, n) + } + + // Base tables. + for _, t := range baseTabels { + if _, ok := scratch.deleted[t.fd.Num]; ok { + continue + } + if _, ok := scratch.added[t.fd.Num]; ok { + continue + } + nt = append(nt, t) } - if _, ok := tm.added[t.file.Num()]; ok { - continue + + // New tables. + for _, r := range scratch.added { + nt = append(nt, tableFileFromRecord(r)) } - nt = append(nt, t) - } - // New tables. - for _, r := range tm.added { - nt = append(nt, p.base.s.tableFileFromRecord(r)) - } + if len(nt) != 0 { + // Sort tables. + if level == 0 { + nt.sortByNum() + } else { + nt.sortByKey(p.base.s.icmp) + } - // Sort tables. - if level == 0 { - nt.sortByNum() + nv.levels[level] = nt + } } else { - nt.sortByKey(p.base.s.icmp) + nv.levels[level] = baseTabels } - nv.tables[level] = nt } + // Trim levels. + n := len(nv.levels) + for ; n > 0 && nv.levels[n-1] == nil; n-- { + } + nv.levels = nv.levels[:n] + // Compute compaction score for new version. nv.computeCompaction() diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/version_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/version_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/version_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/leveldb/version_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,181 @@ +package leveldb + +import ( + "encoding/binary" + "reflect" + "testing" + + "github.com/onsi/gomega" + + "github.com/syndtr/goleveldb/leveldb/testutil" +) + +type testFileRec struct { + level int + num int64 +} + +func TestVersionStaging(t *testing.T) { + gomega.RegisterTestingT(t) + stor := testutil.NewStorage() + defer stor.Close() + s, err := newSession(stor, nil) + if err != nil { + t.Fatal(err) + } + + v := newVersion(s) + v.newStaging() + + tmp := make([]byte, 4) + mik := func(i uint64) []byte { + binary.BigEndian.PutUint32(tmp, uint32(i)) + return []byte(makeInternalKey(nil, tmp, 0, keyTypeVal)) + } + + for i, x := range []struct { + add, del []testFileRec + levels [][]int64 + }{ + { + add: []testFileRec{ + {1, 1}, + }, + levels: [][]int64{ + {}, + {1}, + }, + }, + { + add: []testFileRec{ + {1, 1}, + }, + levels: [][]int64{ + {}, + {1}, + }, + }, + { + del: []testFileRec{ + {1, 1}, + }, + levels: [][]int64{}, + }, + { + add: []testFileRec{ + {0, 1}, + {0, 3}, + {0, 2}, + {2, 5}, + {1, 4}, + }, + levels: [][]int64{ + {3, 2, 1}, + {4}, + {5}, + }, + }, + { + add: []testFileRec{ + {1, 6}, + {2, 5}, + }, + del: []testFileRec{ + {0, 1}, + {0, 4}, + }, + levels: [][]int64{ + {3, 2}, + {4, 6}, + {5}, + }, + }, + { + del: []testFileRec{ + {0, 3}, + {0, 2}, + {1, 4}, + {1, 6}, + {2, 5}, + }, + levels: [][]int64{}, + }, + { + add: []testFileRec{ + {0, 1}, + }, + levels: [][]int64{ + {1}, + }, + }, + { + add: []testFileRec{ + {1, 2}, + }, + levels: [][]int64{ + {1}, + {2}, + }, + }, + { + add: []testFileRec{ + {0, 3}, + }, + levels: [][]int64{ + {3, 1}, + {2}, + }, + }, + { + add: []testFileRec{ + {6, 9}, + }, + levels: [][]int64{ + {3, 1}, + {2}, + {}, + {}, + {}, + {}, + {9}, + }, + }, + { + del: []testFileRec{ + {6, 9}, + }, + levels: [][]int64{ + {3, 1}, + {2}, + }, + }, + } { + rec := &sessionRecord{} + for _, f := range x.add { + ik := mik(uint64(f.num)) + rec.addTable(f.level, f.num, 1, ik, ik) + } + for _, f := range x.del { + rec.delTable(f.level, f.num) + } + vs := v.newStaging() + vs.commit(rec) + v = vs.finish() + if len(v.levels) != len(x.levels) { + t.Fatalf("#%d: invalid level count: want=%d got=%d", i, len(x.levels), len(v.levels)) + } + for j, want := range x.levels { + tables := v.levels[j] + if len(want) != len(tables) { + t.Fatalf("#%d.%d: invalid tables count: want=%d got=%d", i, j, len(want), len(tables)) + } + got := make([]int64, len(tables)) + for k, t := range tables { + got[k] = t.fd.Num + } + if !reflect.DeepEqual(want, got) { + t.Fatalf("#%d.%d: invalid tables: want=%v got=%v", i, j, want, got) + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/manualtest/dbstress/key.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/manualtest/dbstress/key.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/manualtest/dbstress/key.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/manualtest/dbstress/key.go 2016-05-24 07:05:22.000000000 +0000 @@ -5,6 +5,7 @@ "fmt" "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/storage" ) type ErrIkeyCorrupted struct { @@ -17,7 +18,7 @@ } func newErrIkeyCorrupted(ikey []byte, reason string) error { - return errors.NewErrCorrupted(nil, &ErrIkeyCorrupted{append([]byte{}, ikey...), reason}) + return errors.NewErrCorrupted(storage.FileDesc{}, &ErrIkeyCorrupted{append([]byte{}, ikey...), reason}) } type kType int diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/manualtest/dbstress/main.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/manualtest/dbstress/main.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/manualtest/dbstress/main.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/manualtest/dbstress/main.go 2016-05-24 07:05:22.000000000 +0000 @@ -30,9 +30,11 @@ var ( dbPath = path.Join(os.TempDir(), "goleveldb-testdb") openFilesCacheCapacity = 500 - dataLen = 63 + keyLen = 63 + valueLen = 256 numKeys = arrayInt{100000, 1332, 531, 1234, 9553, 1024, 35743} httpProf = "127.0.0.1:5454" + transactionProb = 0.5 enableBlockCache = false enableCompression = false enableBufferPool = false @@ -75,32 +77,42 @@ func init() { flag.StringVar(&dbPath, "db", dbPath, "testdb path") flag.IntVar(&openFilesCacheCapacity, "openfilescachecap", openFilesCacheCapacity, "open files cache capacity") - flag.IntVar(&dataLen, "datalen", dataLen, "data length") + flag.IntVar(&keyLen, "keylen", keyLen, "key length") + flag.IntVar(&valueLen, "valuelen", valueLen, "value length") flag.Var(&numKeys, "numkeys", "num keys") flag.StringVar(&httpProf, "httpprof", httpProf, "http pprof listen addr") + flag.Float64Var(&transactionProb, "transactionprob", transactionProb, "probablity of writes using transaction") flag.BoolVar(&enableBufferPool, "enablebufferpool", enableBufferPool, "enable buffer pool") flag.BoolVar(&enableBlockCache, "enableblockcache", enableBlockCache, "enable block cache") flag.BoolVar(&enableCompression, "enablecompression", enableCompression, "enable block compression") } -func randomData(dst []byte, ns, prefix byte, i uint32) []byte { - n := 2 + dataLen + 4 + 4 - n2 := n*2 + 4 - if cap(dst) < n2 { - dst = make([]byte, n2) +func randomData(dst []byte, ns, prefix byte, i uint32, dataLen int) []byte { + if dataLen < (2+4+4)*2+4 { + panic("dataLen is too small") + } + if cap(dst) < dataLen { + dst = make([]byte, dataLen) } else { - dst = dst[:n2] + dst = dst[:dataLen] } - _, err := rand.Reader.Read(dst[2 : n-8]) - if err != nil { + half := (dataLen - 4) / 2 + if _, err := rand.Reader.Read(dst[2 : half-8]); err != nil { panic(err) } dst[0] = ns dst[1] = prefix - binary.LittleEndian.PutUint32(dst[n-8:], i) - binary.LittleEndian.PutUint32(dst[n-4:], util.NewCRC(dst[:n-4]).Value()) - copy(dst[n:n+n], dst[:n]) - binary.LittleEndian.PutUint32(dst[n2-4:], util.NewCRC(dst[:n2-4]).Value()) + binary.LittleEndian.PutUint32(dst[half-8:], i) + binary.LittleEndian.PutUint32(dst[half-8:], i) + binary.LittleEndian.PutUint32(dst[half-4:], util.NewCRC(dst[:half-4]).Value()) + full := half * 2 + copy(dst[half:full], dst[:half]) + if full < dataLen-4 { + if _, err := rand.Reader.Read(dst[full : dataLen-4]); err != nil { + panic(err) + } + } + binary.LittleEndian.PutUint32(dst[dataLen-4:], util.NewCRC(dst[:dataLen-4]).Value()) return dst } @@ -118,7 +130,7 @@ } func dataI(data []byte) uint32 { - return binary.LittleEndian.Uint32(data[len(data)-12:]) + return binary.LittleEndian.Uint32(data[(len(data)-4)/2-8:]) } func dataChecksum(data []byte) (uint32, uint32) { @@ -135,110 +147,12 @@ return util.BytesPrefix([]byte{ns}) } -type testingFile struct { - storage.File -} - -func (tf *testingFile) Remove() error { - if atomic.LoadUint32(&fail) == 1 { - return nil - } - - if tf.Type() == storage.TypeTable { - if scanTable(tf, true) { - return nil - } - } - return tf.File.Remove() -} - type testingStorage struct { storage.Storage } -func (ts *testingStorage) GetFile(num uint64, t storage.FileType) storage.File { - return &testingFile{ts.Storage.GetFile(num, t)} -} - -func (ts *testingStorage) GetFiles(t storage.FileType) ([]storage.File, error) { - files, err := ts.Storage.GetFiles(t) - if err != nil { - return nil, err - } - for i := range files { - files[i] = &testingFile{files[i]} - } - return files, nil -} - -func (ts *testingStorage) GetManifest() (storage.File, error) { - f, err := ts.Storage.GetManifest() - if err == nil { - f = &testingFile{f} - } - return f, err -} - -func (ts *testingStorage) SetManifest(f storage.File) error { - return ts.Storage.SetManifest(f.(*testingFile).File) -} - -type latencyStats struct { - mark time.Time - dur, min, max time.Duration - num int -} - -func (s *latencyStats) start() { - s.mark = time.Now() -} - -func (s *latencyStats) record(n int) { - if s.mark.IsZero() { - panic("not started") - } - dur := time.Now().Sub(s.mark) - dur1 := dur / time.Duration(n) - if dur1 < s.min || s.min == 0 { - s.min = dur1 - } - if dur1 > s.max { - s.max = dur1 - } - s.dur += dur - s.num += n - s.mark = time.Time{} -} - -func (s *latencyStats) ratePerSec() int { - durSec := s.dur / time.Second - if durSec > 0 { - return s.num / int(durSec) - } - return s.num -} - -func (s *latencyStats) avg() time.Duration { - if s.num > 0 { - return s.dur / time.Duration(s.num) - } - return 0 -} - -func (s *latencyStats) add(x *latencyStats) { - if x.min < s.min || s.min == 0 { - s.min = x.min - } - if x.max > s.max { - s.max = x.max - } - s.dur += x.dur - s.num += x.num -} - -func scanTable(f storage.File, checksum bool) (corrupted bool) { - fi := storage.NewFileInfo(f) - r, err := f.Open() +func (ts *testingStorage) scanTable(fd storage.FileDesc, checksum bool) (corrupted bool) { + r, err := ts.Open(fd) if err != nil { log.Fatal(err) } @@ -249,11 +163,14 @@ log.Fatal(err) } - o := &opt.Options{Strict: opt.NoStrict} + o := &opt.Options{ + DisableLargeBatchTransaction: true, + Strict: opt.NoStrict, + } if checksum { o.Strict = opt.StrictBlockChecksum | opt.StrictReader } - tr, err := table.NewReader(r, size, fi, nil, bpool, o) + tr, err := table.NewReader(r, size, fd, nil, bpool, o) if err != nil { log.Fatal(err) } @@ -261,7 +178,7 @@ checkData := func(i int, t string, data []byte) bool { if len(data) == 0 { - panic(fmt.Sprintf("[%v] nil data: i=%d t=%s", fi, i, t)) + panic(fmt.Sprintf("[%v] nil data: i=%d t=%s", fd, i, t)) } checksum0, checksum1 := dataChecksum(data) @@ -274,7 +191,7 @@ data0c0, data0c1 := dataChecksum(data0) data1c0, data1c1 := dataChecksum(data1) log.Printf("FATAL: [%v] Corrupted data i=%d t=%s (%#x != %#x): %x(%v) vs %x(%v)", - fi, i, t, checksum0, checksum1, data0, data0c0 == data0c1, data1, data1c0 == data1c1) + fd, i, t, checksum0, checksum1, data0, data0c0 == data0c1, data1, data1c0 == data1c1) return true } return false @@ -289,7 +206,7 @@ atomic.StoreUint32(&done, 1) corrupted = true - log.Printf("FATAL: [%v] Corrupted ikey i=%d: %v", fi, i, kerr) + log.Printf("FATAL: [%v] Corrupted ikey i=%d: %v", fd, i, kerr) return } if checkData(i, "key", ukey) { @@ -305,7 +222,7 @@ atomic.StoreUint32(&done, 1) corrupted = true - log.Printf("FATAL: [%v] Corruption detected: %v", fi, err) + log.Printf("FATAL: [%v] Corruption detected: %v", fd, err) } else { log.Fatal(err) } @@ -314,6 +231,72 @@ return } +func (ts *testingStorage) Remove(fd storage.FileDesc) error { + if atomic.LoadUint32(&fail) == 1 { + return nil + } + + if fd.Type == storage.TypeTable { + if ts.scanTable(fd, true) { + return nil + } + } + return ts.Storage.Remove(fd) +} + +type latencyStats struct { + mark time.Time + dur, min, max time.Duration + num int +} + +func (s *latencyStats) start() { + s.mark = time.Now() +} + +func (s *latencyStats) record(n int) { + if s.mark.IsZero() { + panic("not started") + } + dur := time.Now().Sub(s.mark) + dur1 := dur / time.Duration(n) + if dur1 < s.min || s.min == 0 { + s.min = dur1 + } + if dur1 > s.max { + s.max = dur1 + } + s.dur += dur + s.num += n + s.mark = time.Time{} +} + +func (s *latencyStats) ratePerSec() int { + durSec := s.dur / time.Second + if durSec > 0 { + return s.num / int(durSec) + } + return s.num +} + +func (s *latencyStats) avg() time.Duration { + if s.num > 0 { + return s.dur / time.Duration(s.num) + } + return 0 +} + +func (s *latencyStats) add(x *latencyStats) { + if x.min < s.min || s.min == 0 { + s.min = x.min + } + if x.max > s.max { + s.max = x.max + } + s.dur += x.dur + s.num += x.num +} + func main() { flag.Parse() @@ -335,12 +318,12 @@ runtime.GOMAXPROCS(runtime.NumCPU()) os.RemoveAll(dbPath) - stor, err := storage.OpenFile(dbPath) + stor, err := storage.OpenFile(dbPath, false) if err != nil { log.Fatal(err) } - stor = &testingStorage{stor} - defer stor.Close() + tstor := &testingStorage{stor} + defer tstor.Close() fatalf := func(err error, format string, v ...interface{}) { atomic.StoreUint32(&fail, 1) @@ -348,10 +331,10 @@ log.Printf("FATAL: "+format, v...) if err != nil && errors.IsCorrupted(err) { cerr := err.(*errors.ErrCorrupted) - if cerr.File != nil && cerr.File.Type == storage.TypeTable { + if !cerr.Fd.Nil() && cerr.Fd.Type == storage.TypeTable { log.Print("FATAL: corruption detected, scanning...") - if !scanTable(stor.GetFile(cerr.File.Num, cerr.File.Type), false) { - log.Printf("FATAL: unable to find corrupted key/value pair in table %v", cerr.File) + if !tstor.scanTable(storage.FileDesc{Type: storage.TypeTable, Num: cerr.Fd.Num}, false) { + log.Printf("FATAL: unable to find corrupted key/value pair in table %v", cerr.Fd) } } } @@ -372,18 +355,19 @@ o.Compression = opt.DefaultCompression } - db, err := leveldb.Open(stor, o) + db, err := leveldb.Open(tstor, o) if err != nil { log.Fatal(err) } defer db.Close() var ( - mu = &sync.Mutex{} - gGetStat = &latencyStats{} - gIterStat = &latencyStats{} - gWriteStat = &latencyStats{} - startTime = time.Now() + mu = &sync.Mutex{} + gGetStat = &latencyStats{} + gIterStat = &latencyStats{} + gWriteStat = &latencyStats{} + gTrasactionStat = &latencyStats{} + startTime = time.Now() writeReq = make(chan *leveldb.Batch) writeAck = make(chan error) @@ -392,10 +376,26 @@ go func() { for b := range writeReq { - gWriteStat.start() - err := db.Write(b, nil) - if err == nil { - gWriteStat.record(b.Len()) + + var err error + if mrand.Float64() < transactionProb { + log.Print("> Write using transaction") + gTrasactionStat.start() + var tr *leveldb.Transaction + if tr, err = db.OpenTransaction(); err == nil { + if err = tr.Write(b, nil); err == nil { + if err = tr.Commit(); err == nil { + gTrasactionStat.record(b.Len()) + } + } else { + tr.Discard() + } + } + } else { + gWriteStat.start() + if err = db.Write(b, nil); err == nil { + gWriteStat.record(b.Len()) + } } writeAck <- err <-writeAckAck @@ -416,6 +416,8 @@ gIterStat.min, gIterStat.max, gIterStat.avg(), gIterStat.ratePerSec()) log.Printf("> WriteLatencyMin=%v WriteLatencyMax=%v WriteLatencyAvg=%v WriteRatePerSec=%d", gWriteStat.min, gWriteStat.max, gWriteStat.avg(), gWriteStat.ratePerSec()) + log.Printf("> TransactionLatencyMin=%v TransactionLatencyMax=%v TransactionLatencyAvg=%v TransactionRatePerSec=%d", + gTrasactionStat.min, gTrasactionStat.max, gTrasactionStat.avg(), gTrasactionStat.ratePerSec()) mu.Unlock() cachedblock, _ := db.GetProperty("leveldb.cachedblock") @@ -436,7 +438,7 @@ keys := make([][]byte, numKey) for i := range keys { - keys[i] = randomData(nil, byte(ns), 1, uint32(i)) + keys[i] = randomData(nil, byte(ns), 1, uint32(i), keyLen) } wg.Add(1) @@ -457,8 +459,8 @@ b.Reset() for _, k1 := range keys { - k2 = randomData(k2, byte(ns), 2, wi) - v2 = randomData(v2, byte(ns), 3, wi) + k2 = randomData(k2, byte(ns), 2, wi, keyLen) + v2 = randomData(v2, byte(ns), 3, wi, valueLen) b.Put(k2, v2) b.Put(k1, k2) } @@ -516,12 +518,17 @@ } getStat.start() - _, err := snap.Get(k2, nil) + v2, err := snap.Get(k2, nil) if err != nil { fatalf(err, "[%02d] READER #%d.%d K%d snap.Get: %v\nk1: %x\n -> k2: %x", ns, snapwi, ri, n, err, k1, k2) } getStat.record(1) + if checksum0, checksum1 := dataChecksum(v2); checksum0 != checksum1 { + err := &errors.ErrCorrupted{Fd: storage.FileDesc{0xff, 0}, Err: fmt.Errorf("v2: %x: checksum mismatch: %v vs %v", v2, checksum0, checksum1)} + fatalf(err, "[%02d] READER #%d.%d K%d snap.Get: %v\nk1: %x\n -> k2: %x", ns, snapwi, ri, n, err, k1, k2) + } + n++ iterStat.start() } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/manualtest/filelock/main.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/manualtest/filelock/main.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/manualtest/filelock/main.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/manualtest/filelock/main.go 2016-05-24 07:05:22.000000000 +0000 @@ -49,7 +49,7 @@ fmt.Println("Child flag set.") } - stor, err := storage.OpenFile(filename) + stor, err := storage.OpenFile(filename, false) if err != nil { fmt.Printf("Could not open storage: %s", err) os.Exit(10) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/README.md aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/README.md --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/README.md 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/README.md 2016-05-24 07:05:22.000000000 +0000 @@ -10,7 +10,7 @@ Requirements ----------- -* Need at least `go1.2` or newer. +* Need at least `go1.4` or newer. Usage ----------- diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/.travis.yml aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/.travis.yml --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/.travis.yml 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/github.com/syndtr/goleveldb/.travis.yml 2016-05-24 07:05:22.000000000 +0000 @@ -1,9 +1,11 @@ language: go go: - - 1.2 - - 1.3 - 1.4 + - 1.5 + - 1.6 - tip -script: go test -timeout 1h ./... +script: + - go test -timeout 1h ./... + - go test -timeout 30m -race -run "TestDB_(Concurrent|GoleveldbIssue74)" ./leveldb diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/bpf/asm.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/bpf/asm.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/bpf/asm.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/bpf/asm.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,41 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import "fmt" + +// Assemble converts insts into raw instructions suitable for loading +// into a BPF virtual machine. +// +// Currently, no optimization is attempted, the assembled program flow +// is exactly as provided. +func Assemble(insts []Instruction) ([]RawInstruction, error) { + ret := make([]RawInstruction, len(insts)) + var err error + for i, inst := range insts { + ret[i], err = inst.Assemble() + if err != nil { + return nil, fmt.Errorf("assembling instruction %d: %s", i+1, err) + } + } + return ret, nil +} + +// Disassemble attempts to parse raw back into +// Instructions. Unrecognized RawInstructions are assumed to be an +// extension not implemented by this package, and are passed through +// unchanged to the output. The allDecoded value reports whether insts +// contains no RawInstructions. +func Disassemble(raw []RawInstruction) (insts []Instruction, allDecoded bool) { + insts = make([]Instruction, len(raw)) + allDecoded = true + for i, r := range raw { + insts[i] = r.Disassemble() + if _, ok := insts[i].(RawInstruction); ok { + allDecoded = false + } + } + return insts, allDecoded +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/bpf/constants.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/bpf/constants.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/bpf/constants.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/bpf/constants.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,215 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +// A Register is a register of the BPF virtual machine. +type Register uint16 + +const ( + // RegA is the accumulator register. RegA is always the + // destination register of ALU operations. + RegA Register = iota + // RegX is the indirection register, used by LoadIndirect + // operations. + RegX +) + +// An ALUOp is an arithmetic or logic operation. +type ALUOp uint16 + +// ALU binary operation types. +const ( + ALUOpAdd ALUOp = iota << 4 + ALUOpSub + ALUOpMul + ALUOpDiv + ALUOpOr + ALUOpAnd + ALUOpShiftLeft + ALUOpShiftRight + aluOpNeg // Not exported because it's the only unary ALU operation, and gets its own instruction type. + ALUOpMod + ALUOpXor +) + +// A JumpTest is a comparison operator used in conditional jumps. +type JumpTest uint16 + +// Supported operators for conditional jumps. +const ( + // K == A + JumpEqual JumpTest = iota + // K != A + JumpNotEqual + // K > A + JumpGreaterThan + // K < A + JumpLessThan + // K >= A + JumpGreaterOrEqual + // K <= A + JumpLessOrEqual + // K & A != 0 + JumpBitsSet + // K & A == 0 + JumpBitsNotSet +) + +// An Extension is a function call provided by the kernel that +// performs advanced operations that are expensive or impossible +// within the BPF virtual machine. +// +// Extensions are only implemented by the Linux kernel. +// +// TODO: should we prune this list? Some of these extensions seem +// either broken or near-impossible to use correctly, whereas other +// (len, random, ifindex) are quite useful. +type Extension int + +// Extension functions available in the Linux kernel. +const ( + // ExtLen returns the length of the packet. + ExtLen Extension = 1 + // ExtProto returns the packet's L3 protocol type. + ExtProto = 0 + // ExtType returns the packet's type (skb->pkt_type in the kernel) + // + // TODO: better documentation. How nice an API do we want to + // provide for these esoteric extensions? + ExtType = 4 + // ExtPayloadOffset returns the offset of the packet payload, or + // the first protocol header that the kernel does not know how to + // parse. + ExtPayloadOffset = 52 + // ExtInterfaceIndex returns the index of the interface on which + // the packet was received. + ExtInterfaceIndex = 8 + // ExtNetlinkAttr returns the netlink attribute of type X at + // offset A. + ExtNetlinkAttr = 12 + // ExtNetlinkAttrNested returns the nested netlink attribute of + // type X at offset A. + ExtNetlinkAttrNested = 16 + // ExtMark returns the packet's mark value. + ExtMark = 20 + // ExtQueue returns the packet's assigned hardware queue. + ExtQueue = 24 + // ExtLinkLayerType returns the packet's hardware address type + // (e.g. Ethernet, Infiniband). + ExtLinkLayerType = 28 + // ExtRXHash returns the packets receive hash. + // + // TODO: figure out what this rxhash actually is. + ExtRXHash = 32 + // ExtCPUID returns the ID of the CPU processing the current + // packet. + ExtCPUID = 36 + // ExtVLANTag returns the packet's VLAN tag. + ExtVLANTag = 44 + // ExtVLANTagPresent returns non-zero if the packet has a VLAN + // tag. + // + // TODO: I think this might be a lie: it reads bit 0x1000 of the + // VLAN header, which changed meaning in recent revisions of the + // spec - this extension may now return meaningless information. + ExtVLANTagPresent = 48 + // ExtVLANProto returns 0x8100 if the frame has a VLAN header, + // 0x88a8 if the frame has a "Q-in-Q" double VLAN header, or some + // other value if no VLAN information is present. + ExtVLANProto = 60 + // ExtRand returns a uniformly random uint32. + ExtRand = 56 +) + +// The following gives names to various bit patterns used in opcode construction. + +const ( + opMaskCls uint16 = 0x7 + // opClsLoad masks + opMaskLoadDest = 0x01 + opMaskLoadWidth = 0x18 + opMaskLoadMode = 0xe0 + // opClsALU + opMaskOperandSrc = 0x08 + opMaskOperator = 0xf0 + // opClsJump + opMaskJumpConst = 0x0f + opMaskJumpCond = 0xf0 +) + +const ( + // +---------------+-----------------+---+---+---+ + // | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 0 | + // +---------------+-----------------+---+---+---+ + opClsLoadA uint16 = iota + // +---------------+-----------------+---+---+---+ + // | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 1 | + // +---------------+-----------------+---+---+---+ + opClsLoadX + // +---+---+---+---+---+---+---+---+ + // | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | + // +---+---+---+---+---+---+---+---+ + opClsStoreA + // +---+---+---+---+---+---+---+---+ + // | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | + // +---+---+---+---+---+---+---+---+ + opClsStoreX + // +---------------+-----------------+---+---+---+ + // | Operator (4b) | OperandSrc (1b) | 1 | 0 | 0 | + // +---------------+-----------------+---+---+---+ + opClsALU + // +-----------------------------+---+---+---+---+ + // | TestOperator (4b) | 0 | 1 | 0 | 1 | + // +-----------------------------+---+---+---+---+ + opClsJump + // +---+-------------------------+---+---+---+---+ + // | 0 | 0 | 0 | RetSrc (1b) | 0 | 1 | 1 | 0 | + // +---+-------------------------+---+---+---+---+ + opClsReturn + // +---+-------------------------+---+---+---+---+ + // | 0 | 0 | 0 | TXAorTAX (1b) | 0 | 1 | 1 | 1 | + // +---+-------------------------+---+---+---+---+ + opClsMisc +) + +const ( + opAddrModeImmediate uint16 = iota << 5 + opAddrModeAbsolute + opAddrModeIndirect + opAddrModeScratch + opAddrModePacketLen // actually an extension, not an addressing mode. + opAddrModeMemShift +) + +const ( + opLoadWidth4 uint16 = iota << 3 + opLoadWidth2 + opLoadWidth1 +) + +// Operator defined by ALUOp* + +const ( + opALUSrcConstant uint16 = iota << 3 + opALUSrcX +) + +const ( + opJumpAlways = iota << 4 + opJumpEqual + opJumpGT + opJumpGE + opJumpSet +) + +const ( + opRetSrcConstant uint16 = iota << 4 + opRetSrcA +) + +const ( + opMiscTAX = 0x00 + opMiscTXA = 0x80 +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/bpf/doc.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/bpf/doc.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/bpf/doc.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/bpf/doc.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,81 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + +Package bpf implements marshaling and unmarshaling of programs for the +Berkeley Packet Filter virtual machine. + +BPF's main use is to specify a packet filter for network taps, so that +the kernel doesn't have to expensively copy every packet it sees to +userspace. However, it's been repurposed to other areas where running +user code in-kernel is needed. For example, Linux's seccomp uses BPF +to apply security policies to system calls. For simplicity, this +documentation refers only to packets, but other uses of BPF have their +own data payloads. + +BPF programs run in a restricted virtual machine. It has almost no +access to kernel functions, and while conditional branches are +allowed, they can only jump forwards, to guarantee that there are no +infinite loops. + +The virtual machine + +The BPF VM is an accumulator machine. Its main register, called +register A, is an implicit source and destination in all arithmetic +and logic operations. The machine also has 16 scratch registers for +temporary storage, and an indirection register (register X) for +indirect memory access. All registers are 32 bits wide. + +Each run of a BPF program is given one packet, which is placed in the +VM's read-only "main memory". LoadAbsolute and LoadIndirect +instructions can fetch up to 32 bits at a time into register A for +examination. + +The goal of a BPF program is to produce and return a verdict (uint32), +which tells the kernel what to do with the packet. In the context of +packet filtering, the returned value is the number of bytes of the +packet to forward to userspace, or 0 to ignore the packet. Other +contexts like seccomp define their own return values. + +In order to simplify programs, attempts to read past the end of the +packet terminate the program execution with a verdict of 0 (ignore +packet). This means that the vast majority of BPF programs don't need +to do any explicit bounds checking. + +In addition to the bytes of the packet, some BPF programs have access +to extensions, which are essentially calls to kernel utility +functions. Currently, the only extensions supported by this package +are the Linux packet filter extensions. + +Examples + +This packet filter selects all ARP packets. + + bpf.Assemble([]bpf.Instruction{ + // Load "EtherType" field from the ethernet header. + bpf.LoadAbsolute{Off: 12, Size: 2}, + // Skip over the next instruction if EtherType is not ARP. + bpf.JumpIf{Cond: bpf.JumpNotEqual, Val: 0x0806, SkipTrue: 1}, + // Verdict is "send up to 4k of the packet to userspace." + bpf.RetConstant{Val: 4096}, + // Verdict is "ignore packet." + bpf.RetConstant{Val: 0}, + }) + +This packet filter captures a random 1% sample of traffic. + + bpf.Assemble([]bpf.Instruction{ + // Get a 32-bit random number from the Linux kernel. + bpf.LoadExtension{Num: bpf.ExtRand}, + // 1% dice roll? + bpf.JumpIf{Cond: bpf.JumpLessThan, Val: 2^32/100, SkipFalse: 1}, + // Capture. + bpf.RetConstant{Val: 4096}, + // Ignore. + bpf.RetConstant{Val: 0}, + }) + +*/ +package bpf // import "golang.org/x/net/bpf" diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/bpf/instructions.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/bpf/instructions.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/bpf/instructions.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/bpf/instructions.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,434 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import "fmt" + +// An Instruction is one instruction executed by the BPF virtual +// machine. +type Instruction interface { + // Assemble assembles the Instruction into a RawInstruction. + Assemble() (RawInstruction, error) +} + +// A RawInstruction is a raw BPF virtual machine instruction. +type RawInstruction struct { + // Operation to execute. + Op uint16 + // For conditional jump instructions, the number of instructions + // to skip if the condition is true/false. + Jt uint8 + Jf uint8 + // Constant parameter. The meaning depends on the Op. + K uint32 +} + +// Assemble implements the Instruction Assemble method. +func (ri RawInstruction) Assemble() (RawInstruction, error) { return ri, nil } + +// Disassemble parses ri into an Instruction and returns it. If ri is +// not recognized by this package, ri itself is returned. +func (ri RawInstruction) Disassemble() Instruction { + switch ri.Op & opMaskCls { + case opClsLoadA, opClsLoadX: + reg := Register(ri.Op & opMaskLoadDest) + sz := 0 + switch ri.Op & opMaskLoadWidth { + case opLoadWidth4: + sz = 4 + case opLoadWidth2: + sz = 2 + case opLoadWidth1: + sz = 1 + default: + return ri + } + switch ri.Op & opMaskLoadMode { + case opAddrModeImmediate: + if sz != 4 { + return ri + } + return LoadConstant{Dst: reg, Val: ri.K} + case opAddrModeScratch: + if sz != 4 || ri.K > 15 { + return ri + } + return LoadScratch{Dst: reg, N: int(ri.K)} + case opAddrModeAbsolute: + return LoadAbsolute{Size: sz, Off: ri.K} + case opAddrModeIndirect: + return LoadIndirect{Size: sz, Off: ri.K} + case opAddrModePacketLen: + if sz != 4 { + return ri + } + return LoadExtension{Num: ExtLen} + case opAddrModeMemShift: + return LoadMemShift{Off: ri.K} + default: + return ri + } + + case opClsStoreA: + if ri.Op != opClsStoreA || ri.K > 15 { + return ri + } + return StoreScratch{Src: RegA, N: int(ri.K)} + + case opClsStoreX: + if ri.Op != opClsStoreX || ri.K > 15 { + return ri + } + return StoreScratch{Src: RegX, N: int(ri.K)} + + case opClsALU: + switch op := ALUOp(ri.Op & opMaskOperator); op { + case ALUOpAdd, ALUOpSub, ALUOpMul, ALUOpDiv, ALUOpOr, ALUOpAnd, ALUOpShiftLeft, ALUOpShiftRight, ALUOpMod, ALUOpXor: + if ri.Op&opMaskOperandSrc != 0 { + return ALUOpX{Op: op} + } + return ALUOpConstant{Op: op, Val: ri.K} + case aluOpNeg: + return NegateA{} + default: + return ri + } + + case opClsJump: + if ri.Op&opMaskJumpConst != opClsJump { + return ri + } + switch ri.Op & opMaskJumpCond { + case opJumpAlways: + return Jump{Skip: ri.K} + case opJumpEqual: + return JumpIf{ + Cond: JumpEqual, + Val: ri.K, + SkipTrue: ri.Jt, + SkipFalse: ri.Jf, + } + case opJumpGT: + return JumpIf{ + Cond: JumpGreaterThan, + Val: ri.K, + SkipTrue: ri.Jt, + SkipFalse: ri.Jf, + } + case opJumpGE: + return JumpIf{ + Cond: JumpGreaterOrEqual, + Val: ri.K, + SkipTrue: ri.Jt, + SkipFalse: ri.Jf, + } + case opJumpSet: + return JumpIf{ + Cond: JumpBitsSet, + Val: ri.K, + SkipTrue: ri.Jt, + SkipFalse: ri.Jf, + } + default: + return ri + } + + case opClsReturn: + switch ri.Op { + case opClsReturn | opRetSrcA: + return RetA{} + case opClsReturn | opRetSrcConstant: + return RetConstant{Val: ri.K} + default: + return ri + } + + case opClsMisc: + switch ri.Op { + case opClsMisc | opMiscTAX: + return TAX{} + case opClsMisc | opMiscTXA: + return TXA{} + default: + return ri + } + + default: + panic("unreachable") // switch is exhaustive on the bit pattern + } +} + +// LoadConstant loads Val into register Dst. +type LoadConstant struct { + Dst Register + Val uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadConstant) Assemble() (RawInstruction, error) { + return assembleLoad(a.Dst, 4, opAddrModeImmediate, a.Val) +} + +// LoadScratch loads scratch[N] into register Dst. +type LoadScratch struct { + Dst Register + N int // 0-15 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadScratch) Assemble() (RawInstruction, error) { + if a.N < 0 || a.N > 15 { + return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N) + } + return assembleLoad(a.Dst, 4, opAddrModeScratch, uint32(a.N)) +} + +// LoadAbsolute loads packet[Off:Off+Size] as an integer value into +// register A. +type LoadAbsolute struct { + Off uint32 + Size int // 1, 2 or 4 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadAbsolute) Assemble() (RawInstruction, error) { + return assembleLoad(RegA, a.Size, opAddrModeAbsolute, a.Off) +} + +// LoadIndirect loads packet[X+Off:X+Off+Size] as an integer value +// into register A. +type LoadIndirect struct { + Off uint32 + Size int // 1, 2 or 4 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadIndirect) Assemble() (RawInstruction, error) { + return assembleLoad(RegA, a.Size, opAddrModeIndirect, a.Off) +} + +// LoadMemShift multiplies the first 4 bits of the byte at packet[Off] +// by 4 and stores the result in register X. +// +// This instruction is mainly useful to load into X the length of an +// IPv4 packet header in a single instruction, rather than have to do +// the arithmetic on the header's first byte by hand. +type LoadMemShift struct { + Off uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadMemShift) Assemble() (RawInstruction, error) { + return assembleLoad(RegX, 1, opAddrModeMemShift, a.Off) +} + +// LoadExtension invokes a linux-specific extension and stores the +// result in register A. +type LoadExtension struct { + Num Extension +} + +// Assemble implements the Instruction Assemble method. +func (a LoadExtension) Assemble() (RawInstruction, error) { + if a.Num == ExtLen { + return assembleLoad(RegA, 4, opAddrModePacketLen, 0) + } + return assembleLoad(RegA, 4, opAddrModeAbsolute, uint32(-0x1000+a.Num)) +} + +// StoreScratch stores register Src into scratch[N]. +type StoreScratch struct { + Src Register + N int // 0-15 +} + +// Assemble implements the Instruction Assemble method. +func (a StoreScratch) Assemble() (RawInstruction, error) { + if a.N < 0 || a.N > 15 { + return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N) + } + var op uint16 + switch a.Src { + case RegA: + op = opClsStoreA + case RegX: + op = opClsStoreX + default: + return RawInstruction{}, fmt.Errorf("invalid source register %v", a.Src) + } + + return RawInstruction{ + Op: op, + K: uint32(a.N), + }, nil +} + +// ALUOpConstant executes A = A Val. +type ALUOpConstant struct { + Op ALUOp + Val uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a ALUOpConstant) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsALU | opALUSrcConstant | uint16(a.Op), + K: a.Val, + }, nil +} + +// ALUOpX executes A = A X +type ALUOpX struct { + Op ALUOp +} + +// Assemble implements the Instruction Assemble method. +func (a ALUOpX) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsALU | opALUSrcX | uint16(a.Op), + }, nil +} + +// NegateA executes A = -A. +type NegateA struct{} + +// Assemble implements the Instruction Assemble method. +func (a NegateA) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsALU | uint16(aluOpNeg), + }, nil +} + +// Jump skips the following Skip instructions in the program. +type Jump struct { + Skip uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a Jump) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsJump | opJumpAlways, + K: a.Skip, + }, nil +} + +// JumpIf skips the following Skip instructions in the program if A +// Val is true. +type JumpIf struct { + Cond JumpTest + Val uint32 + SkipTrue uint8 + SkipFalse uint8 +} + +// Assemble implements the Instruction Assemble method. +func (a JumpIf) Assemble() (RawInstruction, error) { + var ( + cond uint16 + flip bool + ) + switch a.Cond { + case JumpEqual: + cond = opJumpEqual + case JumpNotEqual: + cond, flip = opJumpEqual, true + case JumpGreaterThan: + cond = opJumpGT + case JumpLessThan: + cond, flip = opJumpGE, true + case JumpGreaterOrEqual: + cond = opJumpGE + case JumpLessOrEqual: + cond, flip = opJumpGT, true + case JumpBitsSet: + cond = opJumpSet + case JumpBitsNotSet: + cond, flip = opJumpSet, true + default: + return RawInstruction{}, fmt.Errorf("unknown JumpTest %v", a.Cond) + } + jt, jf := a.SkipTrue, a.SkipFalse + if flip { + jt, jf = jf, jt + } + return RawInstruction{ + Op: opClsJump | cond, + Jt: jt, + Jf: jf, + K: a.Val, + }, nil +} + +// RetA exits the BPF program, returning the value of register A. +type RetA struct{} + +// Assemble implements the Instruction Assemble method. +func (a RetA) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsReturn | opRetSrcA, + }, nil +} + +// RetConstant exits the BPF program, returning a constant value. +type RetConstant struct { + Val uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a RetConstant) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsReturn | opRetSrcConstant, + K: a.Val, + }, nil +} + +// TXA copies the value of register X to register A. +type TXA struct{} + +// Assemble implements the Instruction Assemble method. +func (a TXA) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsMisc | opMiscTXA, + }, nil +} + +// TAX copies the value of register A to register X. +type TAX struct{} + +// Assemble implements the Instruction Assemble method. +func (a TAX) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsMisc | opMiscTAX, + }, nil +} + +func assembleLoad(dst Register, loadSize int, mode uint16, k uint32) (RawInstruction, error) { + var ( + cls uint16 + sz uint16 + ) + switch dst { + case RegA: + cls = opClsLoadA + case RegX: + cls = opClsLoadX + default: + return RawInstruction{}, fmt.Errorf("invalid target register %v", dst) + } + switch loadSize { + case 1: + sz = opLoadWidth1 + case 2: + sz = opLoadWidth2 + case 4: + sz = opLoadWidth4 + default: + return RawInstruction{}, fmt.Errorf("invalid load byte length %d", sz) + } + return RawInstruction{ + Op: cls | sz | mode, + K: k, + }, nil +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/bpf/instructions_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/bpf/instructions_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/bpf/instructions_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/bpf/instructions_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,184 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import ( + "io/ioutil" + "reflect" + "strconv" + "strings" + "testing" +) + +// This is a direct translation of the program in +// testdata/all_instructions.txt. +var allInstructions = []Instruction{ + LoadConstant{Dst: RegA, Val: 42}, + LoadConstant{Dst: RegX, Val: 42}, + + LoadScratch{Dst: RegA, N: 3}, + LoadScratch{Dst: RegX, N: 3}, + + LoadAbsolute{Off: 42, Size: 1}, + LoadAbsolute{Off: 42, Size: 2}, + LoadAbsolute{Off: 42, Size: 4}, + + LoadIndirect{Off: 42, Size: 1}, + LoadIndirect{Off: 42, Size: 2}, + LoadIndirect{Off: 42, Size: 4}, + + LoadMemShift{Off: 42}, + + LoadExtension{Num: ExtLen}, + LoadExtension{Num: ExtProto}, + LoadExtension{Num: ExtType}, + LoadExtension{Num: ExtRand}, + + StoreScratch{Src: RegA, N: 3}, + StoreScratch{Src: RegX, N: 3}, + + ALUOpConstant{Op: ALUOpAdd, Val: 42}, + ALUOpConstant{Op: ALUOpSub, Val: 42}, + ALUOpConstant{Op: ALUOpMul, Val: 42}, + ALUOpConstant{Op: ALUOpDiv, Val: 42}, + ALUOpConstant{Op: ALUOpOr, Val: 42}, + ALUOpConstant{Op: ALUOpAnd, Val: 42}, + ALUOpConstant{Op: ALUOpShiftLeft, Val: 42}, + ALUOpConstant{Op: ALUOpShiftRight, Val: 42}, + ALUOpConstant{Op: ALUOpMod, Val: 42}, + ALUOpConstant{Op: ALUOpXor, Val: 42}, + + ALUOpX{Op: ALUOpAdd}, + ALUOpX{Op: ALUOpSub}, + ALUOpX{Op: ALUOpMul}, + ALUOpX{Op: ALUOpDiv}, + ALUOpX{Op: ALUOpOr}, + ALUOpX{Op: ALUOpAnd}, + ALUOpX{Op: ALUOpShiftLeft}, + ALUOpX{Op: ALUOpShiftRight}, + ALUOpX{Op: ALUOpMod}, + ALUOpX{Op: ALUOpXor}, + + NegateA{}, + + Jump{Skip: 10}, + JumpIf{Cond: JumpEqual, Val: 42, SkipTrue: 8, SkipFalse: 9}, + JumpIf{Cond: JumpNotEqual, Val: 42, SkipTrue: 8}, + JumpIf{Cond: JumpLessThan, Val: 42, SkipTrue: 7}, + JumpIf{Cond: JumpLessOrEqual, Val: 42, SkipTrue: 6}, + JumpIf{Cond: JumpGreaterThan, Val: 42, SkipTrue: 4, SkipFalse: 5}, + JumpIf{Cond: JumpGreaterOrEqual, Val: 42, SkipTrue: 3, SkipFalse: 4}, + JumpIf{Cond: JumpBitsSet, Val: 42, SkipTrue: 2, SkipFalse: 3}, + + TAX{}, + TXA{}, + + RetA{}, + RetConstant{Val: 42}, +} +var allInstructionsExpected = "testdata/all_instructions.bpf" + +// Check that we produce the same output as the canonical bpf_asm +// linux kernel tool. +func TestInterop(t *testing.T) { + out, err := Assemble(allInstructions) + if err != nil { + t.Fatalf("assembly of allInstructions program failed: %s", err) + } + t.Logf("Assembled program is %d instructions long", len(out)) + + bs, err := ioutil.ReadFile(allInstructionsExpected) + if err != nil { + t.Fatalf("reading %s: %s", allInstructionsExpected, err) + } + // First statement is the number of statements, last statement is + // empty. We just ignore both and rely on slice length. + stmts := strings.Split(string(bs), ",") + if len(stmts)-2 != len(out) { + t.Fatalf("test program lengths don't match: %s has %d, Go implementation has %d", allInstructionsExpected, len(stmts)-2, len(allInstructions)) + } + + for i, stmt := range stmts[1 : len(stmts)-2] { + nums := strings.Split(stmt, " ") + if len(nums) != 4 { + t.Fatalf("malformed instruction %d in %s: %s", i+1, allInstructionsExpected, stmt) + } + + actual := out[i] + + op, err := strconv.ParseUint(nums[0], 10, 16) + if err != nil { + t.Fatalf("malformed opcode %s in instruction %d of %s", nums[0], i+1, allInstructionsExpected) + } + if actual.Op != uint16(op) { + t.Errorf("opcode mismatch on instruction %d (%#v): got 0x%02x, want 0x%02x", i+1, allInstructions[i], actual.Op, op) + } + + jt, err := strconv.ParseUint(nums[1], 10, 8) + if err != nil { + t.Fatalf("malformed jt offset %s in instruction %d of %s", nums[1], i+1, allInstructionsExpected) + } + if actual.Jt != uint8(jt) { + t.Errorf("jt mismatch on instruction %d (%#v): got %d, want %d", i+1, allInstructions[i], actual.Jt, jt) + } + + jf, err := strconv.ParseUint(nums[2], 10, 8) + if err != nil { + t.Fatalf("malformed jf offset %s in instruction %d of %s", nums[2], i+1, allInstructionsExpected) + } + if actual.Jf != uint8(jf) { + t.Errorf("jf mismatch on instruction %d (%#v): got %d, want %d", i+1, allInstructions[i], actual.Jf, jf) + } + + k, err := strconv.ParseUint(nums[3], 10, 32) + if err != nil { + t.Fatalf("malformed constant %s in instruction %d of %s", nums[3], i+1, allInstructionsExpected) + } + if actual.K != uint32(k) { + t.Errorf("constant mismatch on instruction %d (%#v): got %d, want %d", i+1, allInstructions[i], actual.K, k) + } + } +} + +// Check that assembly and disassembly match each other. +// +// Because we offer "fake" jump conditions that don't appear in the +// machine code, disassembly won't be a 1:1 match with the original +// source, although the behavior will be identical. However, +// reassembling the disassembly should produce an identical program. +func TestAsmDisasm(t *testing.T) { + prog1, err := Assemble(allInstructions) + if err != nil { + t.Fatalf("assembly of allInstructions program failed: %s", err) + } + t.Logf("Assembled program is %d instructions long", len(prog1)) + + src, allDecoded := Disassemble(prog1) + if !allDecoded { + t.Errorf("Disassemble(Assemble(allInstructions)) produced unrecognized instructions:") + for i, inst := range src { + if r, ok := inst.(RawInstruction); ok { + t.Logf(" insn %d, %#v --> %#v", i+1, allInstructions[i], r) + } + } + } + + prog2, err := Assemble(src) + if err != nil { + t.Fatalf("assembly of Disassemble(Assemble(allInstructions)) failed: %s", err) + } + + if len(prog2) != len(prog1) { + t.Fatalf("disassembly changed program size: %d insns before, %d insns after", len(prog1), len(prog2)) + } + if !reflect.DeepEqual(prog1, prog2) { + t.Errorf("program mutated by disassembly:") + for i := range prog2 { + if !reflect.DeepEqual(prog1[i], prog2[i]) { + t.Logf(" insn %d, s: %#v, p1: %#v, p2: %#v", i+1, allInstructions[i], prog1[i], prog2[i]) + } + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/bpf/testdata/all_instructions.bpf aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/bpf/testdata/all_instructions.bpf --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/bpf/testdata/all_instructions.bpf 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/bpf/testdata/all_instructions.bpf 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1 @@ +50,0 0 0 42,1 0 0 42,96 0 0 3,97 0 0 3,48 0 0 42,40 0 0 42,32 0 0 42,80 0 0 42,72 0 0 42,64 0 0 42,177 0 0 42,128 0 0 0,32 0 0 4294963200,32 0 0 4294963204,32 0 0 4294963256,2 0 0 3,3 0 0 3,4 0 0 42,20 0 0 42,36 0 0 42,52 0 0 42,68 0 0 42,84 0 0 42,100 0 0 42,116 0 0 42,148 0 0 42,164 0 0 42,12 0 0 0,28 0 0 0,44 0 0 0,60 0 0 0,76 0 0 0,92 0 0 0,108 0 0 0,124 0 0 0,156 0 0 0,172 0 0 0,132 0 0 0,5 0 0 10,21 8 9 42,21 0 8 42,53 0 7 42,37 0 6 42,37 4 5 42,53 3 4 42,69 2 3 42,7 0 0 0,135 0 0 0,22 0 0 0,6 0 0 0, diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/bpf/testdata/all_instructions.txt aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/bpf/testdata/all_instructions.txt --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/bpf/testdata/all_instructions.txt 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/bpf/testdata/all_instructions.txt 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,79 @@ +# This filter is compiled to all_instructions.bpf by the `bpf_asm` +# tool, which can be found in the linux kernel source tree under +# tools/net. + +# Load immediate +ld #42 +ldx #42 + +# Load scratch +ld M[3] +ldx M[3] + +# Load absolute +ldb [42] +ldh [42] +ld [42] + +# Load indirect +ldb [x + 42] +ldh [x + 42] +ld [x + 42] + +# Load IPv4 header length +ldx 4*([42]&0xf) + +# Run extension function +ld #len +ld #proto +ld #type +ld #rand + +# Store scratch +st M[3] +stx M[3] + +# A constant +add #42 +sub #42 +mul #42 +div #42 +or #42 +and #42 +lsh #42 +rsh #42 +mod #42 +xor #42 + +# A X +add x +sub x +mul x +div x +or x +and x +lsh x +rsh x +mod x +xor x + +# !A +neg + +# Jumps +ja end +jeq #42,prev,end +jne #42,end +jlt #42,end +jle #42,end +jgt #42,prev,end +jge #42,prev,end +jset #42,prev,end + +# Register transfers +tax +txa + +# Returns +prev: ret a +end: ret #42 diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/context/context.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/context/context.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/context/context.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/context/context.go 2016-05-24 07:05:22.000000000 +0000 @@ -36,12 +36,7 @@ // Contexts. package context // import "golang.org/x/net/context" -import ( - "errors" - "fmt" - "sync" - "time" -) +import "time" // A Context carries a deadline, a cancelation signal, and other values across // API boundaries. @@ -138,48 +133,6 @@ Value(key interface{}) interface{} } -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - // Background returns a non-nil, empty Context. It is never canceled, has no // values, and has no deadline. It is typically used by the main function, // initialization, and tests, and as the top-level Context for incoming @@ -201,247 +154,3 @@ // A CancelFunc does not wait for the work to stop. // After the first call, subsequent calls to a CancelFunc do nothing. type CancelFunc func() - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, &c) - return &c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) cancelCtx { - return cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return &c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/context/context_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/context/context_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/context/context_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/context/context_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build !go1.7 + package context import ( @@ -375,7 +377,7 @@ <-c.Done() }, limit: 8, - gccgoLimit: 15, + gccgoLimit: 16, }, { desc: "WithCancel(bg)", diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/context/ctxhttp/cancelreq.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/context/ctxhttp/cancelreq.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/context/ctxhttp/cancelreq.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/context/ctxhttp/cancelreq.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.5 - -package ctxhttp - -import "net/http" - -func canceler(client *http.Client, req *http.Request) func() { - // TODO(djd): Respect any existing value of req.Cancel. - ch := make(chan struct{}) - req.Cancel = ch - - return func() { - close(ch) - } -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.5 - -package ctxhttp - -import "net/http" - -type requestCanceler interface { - CancelRequest(*http.Request) -} - -func canceler(client *http.Client, req *http.Request) func() { - rc, ok := client.Transport.(requestCanceler) - if !ok { - return func() {} - } - return func() { - rc.CancelRequest(req) - } -} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp.go 2016-05-24 07:05:22.000000000 +0000 @@ -30,8 +30,9 @@ client = http.DefaultClient } - // Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go. - cancel := canceler(client, req) + // TODO(djd): Respect any existing value of req.Cancel. + cancel := make(chan struct{}) + req.Cancel = cancel type responseAndError struct { resp *http.Response @@ -39,6 +40,11 @@ } result := make(chan responseAndError, 1) + // Make local copies of test hooks closed over by goroutines below. + // Prevents data races in tests. + testHookDoReturned := testHookDoReturned + testHookDidBodyClose := testHookDidBodyClose + go func() { resp, err := client.Do(req) testHookDoReturned() @@ -50,7 +56,7 @@ select { case <-ctx.Done(): testHookContextDoneBeforeHeaders() - cancel() + close(cancel) // Clean up after the goroutine calling client.Do: go func() { if r := <-result; r.resp != nil { @@ -71,7 +77,7 @@ go func() { select { case <-ctx.Done(): - cancel() + close(cancel) case <-c: // The response's Body is closed. } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/context/go17.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/context/go17.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/context/go17.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/context/go17.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +var ( + todo = context.TODO() + background = context.Background() +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = context.DeadlineExceeded + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + ctx, f := context.WithCancel(parent) + return ctx, CancelFunc(f) +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + ctx, f := context.WithDeadline(parent, deadline) + return ctx, CancelFunc(f) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/context/pre_go17.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/context/pre_go17.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/context/pre_go17.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/context/pre_go17.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,300 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, c) + return c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) *cancelCtx { + return &cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + *cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/errors.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/errors.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/errors.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/errors.go 2016-05-24 07:05:22.000000000 +0000 @@ -4,7 +4,10 @@ package http2 -import "fmt" +import ( + "errors" + "fmt" +) // An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec. type ErrCode uint32 @@ -88,3 +91,32 @@ func (e connError) Error() string { return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason) } + +type pseudoHeaderError string + +func (e pseudoHeaderError) Error() string { + return fmt.Sprintf("invalid pseudo-header %q", string(e)) +} + +type duplicatePseudoHeaderError string + +func (e duplicatePseudoHeaderError) Error() string { + return fmt.Sprintf("duplicate pseudo-header %q", string(e)) +} + +type headerFieldNameError string + +func (e headerFieldNameError) Error() string { + return fmt.Sprintf("invalid header field name %q", string(e)) +} + +type headerFieldValueError string + +func (e headerFieldValueError) Error() string { + return fmt.Sprintf("invalid header field value %q", string(e)) +} + +var ( + errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers") + errPseudoAfterRegular = errors.New("pseudo header field after regular") +) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/frame.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/frame.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/frame.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/frame.go 2016-05-24 07:05:22.000000000 +0000 @@ -11,7 +11,10 @@ "fmt" "io" "log" + "strings" "sync" + + "golang.org/x/net/http2/hpack" ) const frameHeaderLen = 9 @@ -261,7 +264,7 @@ type Framer struct { r io.Reader lastFrame Frame - errReason string + errDetail error // lastHeaderStream is non-zero if the last frame was an // unfinished HEADERS/CONTINUATION. @@ -293,8 +296,20 @@ // to return non-compliant frames or frame orders. // This is for testing and permits using the Framer to test // other HTTP/2 implementations' conformance to the spec. + // It is not compatible with ReadMetaHeaders. AllowIllegalReads bool + // ReadMetaHeaders if non-nil causes ReadFrame to merge + // HEADERS and CONTINUATION frames together and return + // MetaHeadersFrame instead. + ReadMetaHeaders *hpack.Decoder + + // MaxHeaderListSize is the http2 MAX_HEADER_LIST_SIZE. + // It's used only if ReadMetaHeaders is set; 0 means a sane default + // (currently 16MB) + // If the limit is hit, MetaHeadersFrame.Truncated is set true. + MaxHeaderListSize uint32 + // TODO: track which type of frame & with which flags was sent // last. Then return an error (unless AllowIllegalWrites) if // we're in the middle of a header block and a @@ -307,6 +322,13 @@ debugFramerBuf *bytes.Buffer } +func (fr *Framer) maxHeaderListSize() uint32 { + if fr.MaxHeaderListSize == 0 { + return 16 << 20 // sane default, per docs + } + return fr.MaxHeaderListSize +} + func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) { // Write the FrameHeader. f.wbuf = append(f.wbuf[:0], @@ -402,6 +424,17 @@ fr.maxReadSize = v } +// ErrorDetail returns a more detailed error of the last error +// returned by Framer.ReadFrame. For instance, if ReadFrame +// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail +// will say exactly what was invalid. ErrorDetail is not guaranteed +// to return a non-nil value and like the rest of the http2 package, +// its return value is not protected by an API compatibility promise. +// ErrorDetail is reset after the next call to ReadFrame. +func (fr *Framer) ErrorDetail() error { + return fr.errDetail +} + // ErrFrameTooLarge is returned from Framer.ReadFrame when the peer // sends a frame that is larger than declared with SetMaxReadFrameSize. var ErrFrameTooLarge = errors.New("http2: frame too large") @@ -423,6 +456,7 @@ // ConnectionError, StreamError, or anything else from from the underlying // reader. func (fr *Framer) ReadFrame() (Frame, error) { + fr.errDetail = nil if fr.lastFrame != nil { fr.lastFrame.invalidate() } @@ -450,6 +484,9 @@ if fr.logReads { log.Printf("http2: Framer %p: read %v", fr, summarizeFrame(f)) } + if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil { + return fr.readMetaFrame(f.(*HeadersFrame)) + } return f, nil } @@ -458,7 +495,7 @@ // to the peer before hanging up on them. This might help others debug // their implementations. func (fr *Framer) connError(code ErrCode, reason string) error { - fr.errReason = reason + fr.errDetail = errors.New(reason) return ConnectionError(code) } @@ -1225,6 +1262,196 @@ HeadersEnded() bool } +type headersOrContinuation interface { + headersEnder + HeaderBlockFragment() []byte +} + +// A MetaHeadersFrame is the representation of one HEADERS frame and +// zero or more contiguous CONTINUATION frames and the decoding of +// their HPACK-encoded contents. +// +// This type of frame does not appear on the wire and is only returned +// by the Framer when Framer.ReadMetaHeaders is set. +type MetaHeadersFrame struct { + *HeadersFrame + + // Fields are the fields contained in the HEADERS and + // CONTINUATION frames. The underlying slice is owned by the + // Framer and must not be retained after the next call to + // ReadFrame. + // + // Fields are guaranteed to be in the correct http2 order and + // not have unknown pseudo header fields or invalid header + // field names or values. Required pseudo header fields may be + // missing, however. Use the MetaHeadersFrame.Pseudo accessor + // method access pseudo headers. + Fields []hpack.HeaderField + + // Truncated is whether the max header list size limit was hit + // and Fields is incomplete. The hpack decoder state is still + // valid, however. + Truncated bool +} + +// PseudoValue returns the given pseudo header field's value. +// The provided pseudo field should not contain the leading colon. +func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string { + for _, hf := range mh.Fields { + if !hf.IsPseudo() { + return "" + } + if hf.Name[1:] == pseudo { + return hf.Value + } + } + return "" +} + +// RegularFields returns the regular (non-pseudo) header fields of mh. +// The caller does not own the returned slice. +func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField { + for i, hf := range mh.Fields { + if !hf.IsPseudo() { + return mh.Fields[i:] + } + } + return nil +} + +// PseudoFields returns the pseudo header fields of mh. +// The caller does not own the returned slice. +func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField { + for i, hf := range mh.Fields { + if !hf.IsPseudo() { + return mh.Fields[:i] + } + } + return mh.Fields +} + +func (mh *MetaHeadersFrame) checkPseudos() error { + var isRequest, isResponse bool + pf := mh.PseudoFields() + for i, hf := range pf { + switch hf.Name { + case ":method", ":path", ":scheme", ":authority": + isRequest = true + case ":status": + isResponse = true + default: + return pseudoHeaderError(hf.Name) + } + // Check for duplicates. + // This would be a bad algorithm, but N is 4. + // And this doesn't allocate. + for _, hf2 := range pf[:i] { + if hf.Name == hf2.Name { + return duplicatePseudoHeaderError(hf.Name) + } + } + } + if isRequest && isResponse { + return errMixPseudoHeaderTypes + } + return nil +} + +func (fr *Framer) maxHeaderStringLen() int { + v := fr.maxHeaderListSize() + if uint32(int(v)) == v { + return int(v) + } + // They had a crazy big number for MaxHeaderBytes anyway, + // so give them unlimited header lengths: + return 0 +} + +// readMetaFrame returns 0 or more CONTINUATION frames from fr and +// merge them into into the provided hf and returns a MetaHeadersFrame +// with the decoded hpack values. +func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { + if fr.AllowIllegalReads { + return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") + } + mh := &MetaHeadersFrame{ + HeadersFrame: hf, + } + var remainSize = fr.maxHeaderListSize() + var sawRegular bool + + var invalid error // pseudo header field errors + hdec := fr.ReadMetaHeaders + hdec.SetEmitEnabled(true) + hdec.SetMaxStringLength(fr.maxHeaderStringLen()) + hdec.SetEmitFunc(func(hf hpack.HeaderField) { + if !validHeaderFieldValue(hf.Value) { + invalid = headerFieldValueError(hf.Value) + } + isPseudo := strings.HasPrefix(hf.Name, ":") + if isPseudo { + if sawRegular { + invalid = errPseudoAfterRegular + } + } else { + sawRegular = true + if !validHeaderFieldName(hf.Name) { + invalid = headerFieldNameError(hf.Name) + } + } + + if invalid != nil { + hdec.SetEmitEnabled(false) + return + } + + size := hf.Size() + if size > remainSize { + hdec.SetEmitEnabled(false) + mh.Truncated = true + return + } + remainSize -= size + + mh.Fields = append(mh.Fields, hf) + }) + // Lose reference to MetaHeadersFrame: + defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {}) + + var hc headersOrContinuation = hf + for { + frag := hc.HeaderBlockFragment() + if _, err := hdec.Write(frag); err != nil { + return nil, ConnectionError(ErrCodeCompression) + } + + if hc.HeadersEnded() { + break + } + if f, err := fr.ReadFrame(); err != nil { + return nil, err + } else { + hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder + } + } + + mh.HeadersFrame.headerFragBuf = nil + mh.HeadersFrame.invalidate() + + if err := hdec.Close(); err != nil { + return nil, ConnectionError(ErrCodeCompression) + } + if invalid != nil { + fr.errDetail = invalid + return nil, StreamError{mh.StreamID, ErrCodeProtocol} + } + if err := mh.checkPseudos(); err != nil { + fr.errDetail = err + return nil, StreamError{mh.StreamID, ErrCodeProtocol} + } + return mh, nil +} + func summarizeFrame(f Frame) string { var buf bytes.Buffer f.Header().writeDebug(&buf) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/frame_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/frame_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/frame_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/frame_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -12,6 +12,8 @@ "strings" "testing" "unsafe" + + "golang.org/x/net/http2/hpack" ) func testFramer() (*Framer, *bytes.Buffer) { @@ -725,11 +727,249 @@ t.Errorf("%d. after %d good frames, ReadFrame = %v; want ConnectionError(ErrCodeProtocol)\n%s", i, n, err, log.Bytes()) continue } - if f.errReason != tt.wantErr { - t.Errorf("%d. framer eror = %q; want %q\n%s", i, f.errReason, tt.wantErr, log.Bytes()) + if !((f.errDetail == nil && tt.wantErr == "") || (fmt.Sprint(f.errDetail) == tt.wantErr)) { + t.Errorf("%d. framer eror = %q; want %q\n%s", i, f.errDetail, tt.wantErr, log.Bytes()) } if n < tt.atLeast { t.Errorf("%d. framer only read %d frames; want at least %d\n%s", i, n, tt.atLeast, log.Bytes()) } } } + +func TestMetaFrameHeader(t *testing.T) { + write := func(f *Framer, frags ...[]byte) { + for i, frag := range frags { + end := (i == len(frags)-1) + if i == 0 { + f.WriteHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: frag, + EndHeaders: end, + }) + } else { + f.WriteContinuation(1, end, frag) + } + } + } + + want := func(flags Flags, length uint32, pairs ...string) *MetaHeadersFrame { + mh := &MetaHeadersFrame{ + HeadersFrame: &HeadersFrame{ + FrameHeader: FrameHeader{ + Type: FrameHeaders, + Flags: flags, + Length: length, + StreamID: 1, + }, + }, + Fields: []hpack.HeaderField(nil), + } + for len(pairs) > 0 { + mh.Fields = append(mh.Fields, hpack.HeaderField{ + Name: pairs[0], + Value: pairs[1], + }) + pairs = pairs[2:] + } + return mh + } + truncated := func(mh *MetaHeadersFrame) *MetaHeadersFrame { + mh.Truncated = true + return mh + } + + const noFlags Flags = 0 + + oneKBString := strings.Repeat("a", 1<<10) + + tests := [...]struct { + name string + w func(*Framer) + want interface{} // *MetaHeaderFrame or error + wantErrReason string + maxHeaderListSize uint32 + }{ + 0: { + name: "single_headers", + w: func(f *Framer) { + var he hpackEncoder + all := he.encodeHeaderRaw(t, ":method", "GET", ":path", "/") + write(f, all) + }, + want: want(FlagHeadersEndHeaders, 2, ":method", "GET", ":path", "/"), + }, + 1: { + name: "with_continuation", + w: func(f *Framer) { + var he hpackEncoder + all := he.encodeHeaderRaw(t, ":method", "GET", ":path", "/", "foo", "bar") + write(f, all[:1], all[1:]) + }, + want: want(noFlags, 1, ":method", "GET", ":path", "/", "foo", "bar"), + }, + 2: { + name: "with_two_continuation", + w: func(f *Framer) { + var he hpackEncoder + all := he.encodeHeaderRaw(t, ":method", "GET", ":path", "/", "foo", "bar") + write(f, all[:2], all[2:4], all[4:]) + }, + want: want(noFlags, 2, ":method", "GET", ":path", "/", "foo", "bar"), + }, + 3: { + name: "big_string_okay", + w: func(f *Framer) { + var he hpackEncoder + all := he.encodeHeaderRaw(t, ":method", "GET", ":path", "/", "foo", oneKBString) + write(f, all[:2], all[2:]) + }, + want: want(noFlags, 2, ":method", "GET", ":path", "/", "foo", oneKBString), + }, + 4: { + name: "big_string_error", + w: func(f *Framer) { + var he hpackEncoder + all := he.encodeHeaderRaw(t, ":method", "GET", ":path", "/", "foo", oneKBString) + write(f, all[:2], all[2:]) + }, + maxHeaderListSize: (1 << 10) / 2, + want: ConnectionError(ErrCodeCompression), + }, + 5: { + name: "max_header_list_truncated", + w: func(f *Framer) { + var he hpackEncoder + var pairs = []string{":method", "GET", ":path", "/"} + for i := 0; i < 100; i++ { + pairs = append(pairs, "foo", "bar") + } + all := he.encodeHeaderRaw(t, pairs...) + write(f, all[:2], all[2:]) + }, + maxHeaderListSize: (1 << 10) / 2, + want: truncated(want(noFlags, 2, + ":method", "GET", + ":path", "/", + "foo", "bar", + "foo", "bar", + "foo", "bar", + "foo", "bar", + "foo", "bar", + "foo", "bar", + "foo", "bar", + "foo", "bar", + "foo", "bar", + "foo", "bar", + "foo", "bar", // 11 + )), + }, + 6: { + name: "pseudo_order", + w: func(f *Framer) { + write(f, encodeHeaderRaw(t, + ":method", "GET", + "foo", "bar", + ":path", "/", // bogus + )) + }, + want: StreamError{1, ErrCodeProtocol}, + wantErrReason: "pseudo header field after regular", + }, + 7: { + name: "pseudo_unknown", + w: func(f *Framer) { + write(f, encodeHeaderRaw(t, + ":unknown", "foo", // bogus + "foo", "bar", + )) + }, + want: StreamError{1, ErrCodeProtocol}, + wantErrReason: "invalid pseudo-header \":unknown\"", + }, + 8: { + name: "pseudo_mix_request_response", + w: func(f *Framer) { + write(f, encodeHeaderRaw(t, + ":method", "GET", + ":status", "100", + )) + }, + want: StreamError{1, ErrCodeProtocol}, + wantErrReason: "mix of request and response pseudo headers", + }, + 9: { + name: "pseudo_dup", + w: func(f *Framer) { + write(f, encodeHeaderRaw(t, + ":method", "GET", + ":method", "POST", + )) + }, + want: StreamError{1, ErrCodeProtocol}, + wantErrReason: "duplicate pseudo-header \":method\"", + }, + 10: { + name: "trailer_okay_no_pseudo", + w: func(f *Framer) { write(f, encodeHeaderRaw(t, "foo", "bar")) }, + want: want(FlagHeadersEndHeaders, 8, "foo", "bar"), + }, + 11: { + name: "invalid_field_name", + w: func(f *Framer) { write(f, encodeHeaderRaw(t, "CapitalBad", "x")) }, + want: StreamError{1, ErrCodeProtocol}, + wantErrReason: "invalid header field name \"CapitalBad\"", + }, + 12: { + name: "invalid_field_value", + w: func(f *Framer) { write(f, encodeHeaderRaw(t, "key", "bad_null\x00")) }, + want: StreamError{1, ErrCodeProtocol}, + wantErrReason: "invalid header field value \"bad_null\\x00\"", + }, + } + for i, tt := range tests { + buf := new(bytes.Buffer) + f := NewFramer(buf, buf) + f.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + f.MaxHeaderListSize = tt.maxHeaderListSize + tt.w(f) + + name := tt.name + if name == "" { + name = fmt.Sprintf("test index %d", i) + } + + var got interface{} + var err error + got, err = f.ReadFrame() + if err != nil { + got = err + } + if !reflect.DeepEqual(got, tt.want) { + if mhg, ok := got.(*MetaHeadersFrame); ok { + if mhw, ok := tt.want.(*MetaHeadersFrame); ok { + hg := mhg.HeadersFrame + hw := mhw.HeadersFrame + if hg != nil && hw != nil && !reflect.DeepEqual(*hg, *hw) { + t.Errorf("%s: headers differ:\n got: %+v\nwant: %+v\n", name, *hg, *hw) + } + } + } + str := func(v interface{}) string { + if _, ok := v.(error); ok { + return fmt.Sprintf("error %v", v) + } else { + return fmt.Sprintf("value %#v", v) + } + } + t.Errorf("%s:\n got: %v\nwant: %s", name, str(got), str(tt.want)) + } + if tt.wantErrReason != "" && tt.wantErrReason != fmt.Sprint(f.errDetail) { + t.Errorf("%s: got error reason %q; want %q", name, f.errDetail, tt.wantErrReason) + } + } +} + +func encodeHeaderRaw(t *testing.T, pairs ...string) []byte { + var he hpackEncoder + return he.encodeHeaderRaw(t, pairs...) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/h2demo/launch.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/h2demo/launch.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/h2demo/launch.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/h2demo/launch.go 2016-05-24 07:05:22.000000000 +0000 @@ -170,9 +170,9 @@ }, }, NetworkInterfaces: []*compute.NetworkInterface{ - &compute.NetworkInterface{ + { AccessConfigs: []*compute.AccessConfig{ - &compute.AccessConfig{ + { Type: "ONE_TO_ONE_NAT", Name: "External NAT", NatIP: natIP, diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/h2i/h2i.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/h2i/h2i.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/h2i/h2i.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/h2i/h2i.go 2016-05-24 07:05:22.000000000 +0000 @@ -56,8 +56,8 @@ } var commands = map[string]command{ - "ping": command{run: (*h2i).cmdPing}, - "settings": command{ + "ping": {run: (*h2i).cmdPing}, + "settings": { run: (*h2i).cmdSettings, complete: func() []string { return []string{ @@ -71,14 +71,13 @@ } }, }, - "quit": command{run: (*h2i).cmdQuit}, - "headers": command{run: (*h2i).cmdHeaders}, + "quit": {run: (*h2i).cmdQuit}, + "headers": {run: (*h2i).cmdHeaders}, } func usage() { fmt.Fprintf(os.Stderr, "Usage: h2i \n\n") flag.PrintDefaults() - os.Exit(1) } // withPort adds ":443" if another port isn't already present. @@ -111,6 +110,7 @@ flag.Parse() if flag.NArg() != 1 { usage() + os.Exit(2) } log.SetFlags(0) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/hpack/encode.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/hpack/encode.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/hpack/encode.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/hpack/encode.go 2016-05-24 07:05:22.000000000 +0000 @@ -144,7 +144,7 @@ // shouldIndex reports whether f should be indexed. func (e *Encoder) shouldIndex(f HeaderField) bool { - return !f.Sensitive && f.size() <= e.dynTab.maxSize + return !f.Sensitive && f.Size() <= e.dynTab.maxSize } // appendIndexed appends index i, as encoded in "Indexed Header Field" diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/hpack/hpack.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/hpack/hpack.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/hpack/hpack.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/hpack/hpack.go 2016-05-24 07:05:22.000000000 +0000 @@ -41,6 +41,14 @@ Sensitive bool } +// IsPseudo reports whether the header field is an http2 pseudo header. +// That is, it reports whether it starts with a colon. +// It is not otherwise guaranteed to be a valid pseudo header field, +// though. +func (hf HeaderField) IsPseudo() bool { + return len(hf.Name) != 0 && hf.Name[0] == ':' +} + func (hf HeaderField) String() string { var suffix string if hf.Sensitive { @@ -49,7 +57,8 @@ return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) } -func (hf *HeaderField) size() uint32 { +// Size returns the size of an entry per RFC 7540 section 5.2. +func (hf HeaderField) Size() uint32 { // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 // "The size of the dynamic table is the sum of the size of // its entries. The size of an entry is the sum of its name's @@ -171,7 +180,7 @@ func (dt *dynamicTable) add(f HeaderField) { dt.ents = append(dt.ents, f) - dt.size += f.size() + dt.size += f.Size() dt.evict() } @@ -179,7 +188,7 @@ func (dt *dynamicTable) evict() { base := dt.ents // keep base pointer of slice for dt.size > dt.maxSize { - dt.size -= dt.ents[0].size() + dt.size -= dt.ents[0].Size() dt.ents = dt.ents[1:] } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/http2.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/http2.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/http2.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/http2.go 2016-05-24 07:05:22.000000000 +0000 @@ -23,6 +23,7 @@ "io" "net/http" "os" + "sort" "strconv" "strings" "sync" @@ -169,8 +170,9 @@ // RFC 7230 says: // header-field = field-name ":" OWS field-value OWS // field-name = token +// token = 1*tchar // tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / -// "^" / "_" / " +// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA // Further, http2 says: // "Just as in HTTP/1.x, header field names are strings of ASCII // characters that are compared in a case-insensitive @@ -320,7 +322,7 @@ } // bodyAllowedForStatus reports whether a given response status code -// permits a body. See RFC2616, section 4.4. +// permits a body. See RFC 2616, section 4.4. func bodyAllowedForStatus(status int) bool { switch { case status >= 100 && status <= 199: @@ -427,3 +429,36 @@ type connectionStater interface { ConnectionState() tls.ConnectionState } + +var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} + +type sorter struct { + v []string // owned by sorter +} + +func (s *sorter) Len() int { return len(s.v) } +func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } +func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } + +// Keys returns the sorted keys of h. +// +// The returned slice is only valid until s used again or returned to +// its pool. +func (s *sorter) Keys(h http.Header) []string { + keys := s.v[:0] + for k := range h { + keys = append(keys, k) + } + s.v = keys + sort.Sort(s) + return keys +} + +func (s *sorter) SortStrings(ss []string) { + // Our sorter works on s.v, which sorter owners, so + // stash it away while we sort the user's buffer. + save := s.v + s.v = ss + sort.Sort(s) + s.v = save +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/http2_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/http2_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/http2_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/http2_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -65,7 +65,7 @@ return len(p), nil } -// like encodeHeader, but don't add implicit psuedo headers. +// like encodeHeader, but don't add implicit pseudo headers. func encodeHeaderNoImplicit(t *testing.T, headers ...string) []byte { var buf bytes.Buffer enc := hpack.NewEncoder(&buf) @@ -172,3 +172,27 @@ d[0] = "XXX" } } + +func TestSorterPoolAllocs(t *testing.T) { + ss := []string{"a", "b", "c"} + h := http.Header{ + "a": nil, + "b": nil, + "c": nil, + } + sorter := new(sorter) + + if allocs := testing.AllocsPerRun(100, func() { + sorter.SortStrings(ss) + }); allocs >= 1 { + t.Logf("SortStrings allocs = %v; want <1", allocs) + } + + if allocs := testing.AllocsPerRun(5, func() { + if len(sorter.Keys(h)) != 3 { + t.Fatal("wrong result") + } + }); allocs > 0 { + t.Logf("Keys allocs = %v; want <1", allocs) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/server.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/server.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/server.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/server.go 2016-05-24 07:05:22.000000000 +0000 @@ -51,7 +51,6 @@ "os" "reflect" "runtime" - "sort" "strconv" "strings" "sync" @@ -276,10 +275,10 @@ sc.flow.add(initialWindowSize) sc.inflow.add(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) - sc.hpackDecoder = hpack.NewDecoder(initialHeaderTableSize, nil) - sc.hpackDecoder.SetMaxStringLength(sc.maxHeaderStringLen()) fr := NewFramer(sc.bw, c) + fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + fr.MaxHeaderListSize = sc.maxHeaderListSize() fr.SetMaxReadFrameSize(s.maxReadFrameSize()) sc.framer = fr @@ -375,7 +374,6 @@ bw *bufferedWriter // writing to conn handler http.Handler framer *Framer - hpackDecoder *hpack.Decoder doneServing chan struct{} // closed when serverConn.serve ends readFrameCh chan readFrameResult // written by serverConn.readFrames wantWriteFrameCh chan frameWriteMsg // from handlers -> serve @@ -402,7 +400,6 @@ headerTableSize uint32 peerMaxHeaderListSize uint32 // zero means unknown (default) canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case - req requestParam // non-zero while reading request headers writingFrame bool // started write goroutine but haven't heard back on wroteFrameCh needsFrameFlush bool // last frame write wasn't a flush writeSched writeScheduler @@ -411,22 +408,13 @@ goAwayCode ErrCode shutdownTimerCh <-chan time.Time // nil until used shutdownTimer *time.Timer // nil until used + freeRequestBodyBuf []byte // if non-nil, a free initialWindowSize buffer for getRequestBodyBuf // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer hpackEncoder *hpack.Encoder } -func (sc *serverConn) maxHeaderStringLen() int { - v := sc.maxHeaderListSize() - if uint32(int(v)) == v { - return int(v) - } - // They had a crazy big number for MaxHeaderBytes anyway, - // so give them unlimited header lengths: - return 0 -} - func (sc *serverConn) maxHeaderListSize() uint32 { n := sc.hs.MaxHeaderBytes if n <= 0 { @@ -439,21 +427,6 @@ return uint32(n + typicalHeaders*perFieldOverhead) } -// requestParam is the state of the next request, initialized over -// potentially several frames HEADERS + zero or more CONTINUATION -// frames. -type requestParam struct { - // stream is non-nil if we're reading (HEADER or CONTINUATION) - // frames for a request (but not DATA). - stream *stream - header http.Header - method, path string - scheme, authority string - sawRegularHeader bool // saw a non-pseudo header already - invalidHeader bool // an invalid header was seen - headerListSize int64 // actually uint32, but easier math this way -} - // stream represents a stream. This is the minimal metadata needed by // the serve goroutine. Most of the actual stream state is owned by // the http.Handler's goroutine in the responseWriter. Because the @@ -480,6 +453,7 @@ sentReset bool // only true once detached from streams map gotReset bool // only true once detacted from streams map gotTrailerHeader bool // HEADER frame for trailers was seen + reqBuf []byte trailer http.Header // accumulated trailers reqTrailer http.Header // handler's Request.Trailer @@ -589,87 +563,6 @@ } } -func (sc *serverConn) onNewHeaderField(f hpack.HeaderField) { - sc.serveG.check() - if VerboseLogs { - sc.vlogf("http2: server decoded %v", f) - } - switch { - case !validHeaderFieldValue(f.Value): // f.Name checked _after_ pseudo check, since ':' is invalid - sc.req.invalidHeader = true - case strings.HasPrefix(f.Name, ":"): - if sc.req.sawRegularHeader { - sc.logf("pseudo-header after regular header") - sc.req.invalidHeader = true - return - } - var dst *string - switch f.Name { - case ":method": - dst = &sc.req.method - case ":path": - dst = &sc.req.path - case ":scheme": - dst = &sc.req.scheme - case ":authority": - dst = &sc.req.authority - default: - // 8.1.2.1 Pseudo-Header Fields - // "Endpoints MUST treat a request or response - // that contains undefined or invalid - // pseudo-header fields as malformed (Section - // 8.1.2.6)." - sc.logf("invalid pseudo-header %q", f.Name) - sc.req.invalidHeader = true - return - } - if *dst != "" { - sc.logf("duplicate pseudo-header %q sent", f.Name) - sc.req.invalidHeader = true - return - } - *dst = f.Value - case !validHeaderFieldName(f.Name): - sc.req.invalidHeader = true - default: - sc.req.sawRegularHeader = true - sc.req.header.Add(sc.canonicalHeader(f.Name), f.Value) - const headerFieldOverhead = 32 // per spec - sc.req.headerListSize += int64(len(f.Name)) + int64(len(f.Value)) + headerFieldOverhead - if sc.req.headerListSize > int64(sc.maxHeaderListSize()) { - sc.hpackDecoder.SetEmitEnabled(false) - } - } -} - -func (st *stream) onNewTrailerField(f hpack.HeaderField) { - sc := st.sc - sc.serveG.check() - if VerboseLogs { - sc.vlogf("http2: server decoded trailer %v", f) - } - switch { - case strings.HasPrefix(f.Name, ":"): - sc.req.invalidHeader = true - return - case !validHeaderFieldName(f.Name) || !validHeaderFieldValue(f.Value): - sc.req.invalidHeader = true - return - default: - key := sc.canonicalHeader(f.Name) - if st.trailer != nil { - vv := append(st.trailer[key], f.Value) - st.trailer[key] = vv - - // arbitrary; TODO: read spec about header list size limits wrt trailers - const tooBig = 1000 - if len(vv) >= tooBig { - sc.hpackDecoder.SetEmitEnabled(false) - } - } - } -} - func (sc *serverConn) canonicalHeader(v string) string { sc.serveG.check() cv, ok := commonCanonHeader[v] @@ -704,10 +597,11 @@ // It's run on its own goroutine. func (sc *serverConn) readFrames() { gate := make(gate) + gateDone := gate.Done for { f, err := sc.framer.ReadFrame() select { - case sc.readFrameCh <- readFrameResult{f, err, gate.Done}: + case sc.readFrameCh <- readFrameResult{f, err, gateDone}: case <-sc.doneServing: return } @@ -1183,10 +1077,8 @@ switch f := f.(type) { case *SettingsFrame: return sc.processSettings(f) - case *HeadersFrame: + case *MetaHeadersFrame: return sc.processHeaders(f) - case *ContinuationFrame: - return sc.processContinuation(f) case *WindowUpdateFrame: return sc.processWindowUpdate(f) case *PingFrame: @@ -1286,6 +1178,18 @@ } st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc sc.writeSched.forgetStream(st.id) + if st.reqBuf != nil { + // Stash this request body buffer (64k) away for reuse + // by a future POST/PUT/etc. + // + // TODO(bradfitz): share on the server? sync.Pool? + // Server requires locks and might hurt contention. + // sync.Pool might work, or might be worse, depending + // on goroutine CPU migrations. (get and put on + // separate CPUs). Maybe a mix of strategies. But + // this is an easy win for now. + sc.freeRequestBodyBuf = st.reqBuf + } } func (sc *serverConn) processSettings(f *SettingsFrame) error { @@ -1442,7 +1346,7 @@ } } -func (sc *serverConn) processHeaders(f *HeadersFrame) error { +func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { sc.serveG.check() id := f.Header().StreamID if sc.inGoAway { @@ -1471,13 +1375,11 @@ // endpoint has opened or reserved. [...] An endpoint that // receives an unexpected stream identifier MUST respond with // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. - if id <= sc.maxStreamID || sc.req.stream != nil { + if id <= sc.maxStreamID { return ConnectionError(ErrCodeProtocol) } + sc.maxStreamID = id - if id > sc.maxStreamID { - sc.maxStreamID = id - } st = &stream{ sc: sc, id: id, @@ -1501,50 +1403,6 @@ if sc.curOpenStreams == 1 { sc.setConnState(http.StateActive) } - sc.req = requestParam{ - stream: st, - header: make(http.Header), - } - sc.hpackDecoder.SetEmitFunc(sc.onNewHeaderField) - sc.hpackDecoder.SetEmitEnabled(true) - return sc.processHeaderBlockFragment(st, f.HeaderBlockFragment(), f.HeadersEnded()) -} - -func (st *stream) processTrailerHeaders(f *HeadersFrame) error { - sc := st.sc - sc.serveG.check() - if st.gotTrailerHeader { - return ConnectionError(ErrCodeProtocol) - } - st.gotTrailerHeader = true - if !f.StreamEnded() { - return StreamError{st.id, ErrCodeProtocol} - } - sc.resetPendingRequest() // we use invalidHeader from it for trailers - return st.processTrailerHeaderBlockFragment(f.HeaderBlockFragment(), f.HeadersEnded()) -} - -func (sc *serverConn) processContinuation(f *ContinuationFrame) error { - sc.serveG.check() - st := sc.streams[f.Header().StreamID] - if st.gotTrailerHeader { - return st.processTrailerHeaderBlockFragment(f.HeaderBlockFragment(), f.HeadersEnded()) - } - return sc.processHeaderBlockFragment(st, f.HeaderBlockFragment(), f.HeadersEnded()) -} - -func (sc *serverConn) processHeaderBlockFragment(st *stream, frag []byte, end bool) error { - sc.serveG.check() - if _, err := sc.hpackDecoder.Write(frag); err != nil { - return ConnectionError(ErrCodeCompression) - } - if !end { - return nil - } - if err := sc.hpackDecoder.Close(); err != nil { - return ConnectionError(ErrCodeCompression) - } - defer sc.resetPendingRequest() if sc.curOpenStreams > sc.advMaxStreams { // "Endpoints MUST NOT exceed the limit set by their // peer. An endpoint that receives a HEADERS frame @@ -1564,7 +1422,7 @@ return StreamError{st.id, ErrCodeRefusedStream} } - rw, req, err := sc.newWriterAndRequest() + rw, req, err := sc.newWriterAndRequest(st, f) if err != nil { return err } @@ -1576,36 +1434,38 @@ st.declBodyBytes = req.ContentLength handler := sc.handler.ServeHTTP - if !sc.hpackDecoder.EmitEnabled() { + if f.Truncated { // Their header list was too long. Send a 431 error. handler = handleHeaderListTooLong + } else if err := checkValidHTTP2Request(req); err != nil { + handler = new400Handler(err) } go sc.runHandler(rw, req, handler) return nil } -func (st *stream) processTrailerHeaderBlockFragment(frag []byte, end bool) error { +func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { sc := st.sc sc.serveG.check() - sc.hpackDecoder.SetEmitFunc(st.onNewTrailerField) - if _, err := sc.hpackDecoder.Write(frag); err != nil { - return ConnectionError(ErrCodeCompression) + if st.gotTrailerHeader { + return ConnectionError(ErrCodeProtocol) } - if !end { - return nil + st.gotTrailerHeader = true + if !f.StreamEnded() { + return StreamError{st.id, ErrCodeProtocol} } - rp := &sc.req - if rp.invalidHeader { - return StreamError{rp.stream.id, ErrCodeProtocol} + if len(f.PseudoFields()) > 0 { + return StreamError{st.id, ErrCodeProtocol} } - - err := sc.hpackDecoder.Close() - st.endStream() - if err != nil { - return ConnectionError(ErrCodeCompression) + if st.trailer != nil { + for _, hf := range f.RegularFields() { + key := sc.canonicalHeader(hf.Name) + st.trailer[key] = append(st.trailer[key], hf.Value) + } } + st.endStream() return nil } @@ -1650,29 +1510,21 @@ } } -// resetPendingRequest zeros out all state related to a HEADERS frame -// and its zero or more CONTINUATION frames sent to start a new -// request. -func (sc *serverConn) resetPendingRequest() { - sc.serveG.check() - sc.req = requestParam{} -} - -func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, error) { +func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { sc.serveG.check() - rp := &sc.req - if rp.invalidHeader { - return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol} - } + method := f.PseudoValue("method") + path := f.PseudoValue("path") + scheme := f.PseudoValue("scheme") + authority := f.PseudoValue("authority") - isConnect := rp.method == "CONNECT" + isConnect := method == "CONNECT" if isConnect { - if rp.path != "" || rp.scheme != "" || rp.authority == "" { - return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol} + if path != "" || scheme != "" || authority == "" { + return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} } - } else if rp.method == "" || rp.path == "" || - (rp.scheme != "https" && rp.scheme != "http") { + } else if method == "" || path == "" || + (scheme != "https" && scheme != "http") { // See 8.1.2.6 Malformed Requests and Responses: // // Malformed requests or responses that are detected @@ -1683,35 +1535,40 @@ // "All HTTP/2 requests MUST include exactly one valid // value for the :method, :scheme, and :path // pseudo-header fields" - return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol} + return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} } - bodyOpen := rp.stream.state == stateOpen - if rp.method == "HEAD" && bodyOpen { + bodyOpen := !f.StreamEnded() + if method == "HEAD" && bodyOpen { // HEAD requests can't have bodies - return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol} + return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} } var tlsState *tls.ConnectionState // nil if not scheme https - if rp.scheme == "https" { + if scheme == "https" { tlsState = sc.tlsState } - authority := rp.authority + + header := make(http.Header) + for _, hf := range f.RegularFields() { + header.Add(sc.canonicalHeader(hf.Name), hf.Value) + } + if authority == "" { - authority = rp.header.Get("Host") + authority = header.Get("Host") } - needsContinue := rp.header.Get("Expect") == "100-continue" + needsContinue := header.Get("Expect") == "100-continue" if needsContinue { - rp.header.Del("Expect") + header.Del("Expect") } // Merge Cookie headers into one "; "-delimited value. - if cookies := rp.header["Cookie"]; len(cookies) > 1 { - rp.header.Set("Cookie", strings.Join(cookies, "; ")) + if cookies := header["Cookie"]; len(cookies) > 1 { + header.Set("Cookie", strings.Join(cookies, "; ")) } // Setup Trailers var trailer http.Header - for _, v := range rp.header["Trailer"] { + for _, v := range header["Trailer"] { for _, key := range strings.Split(v, ",") { key = http.CanonicalHeaderKey(strings.TrimSpace(key)) switch key { @@ -1726,31 +1583,31 @@ } } } - delete(rp.header, "Trailer") + delete(header, "Trailer") body := &requestBody{ conn: sc, - stream: rp.stream, + stream: st, needsContinue: needsContinue, } var url_ *url.URL var requestURI string if isConnect { - url_ = &url.URL{Host: rp.authority} - requestURI = rp.authority // mimic HTTP/1 server behavior + url_ = &url.URL{Host: authority} + requestURI = authority // mimic HTTP/1 server behavior } else { var err error - url_, err = url.ParseRequestURI(rp.path) + url_, err = url.ParseRequestURI(path) if err != nil { - return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol} + return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} } - requestURI = rp.path + requestURI = path } req := &http.Request{ - Method: rp.method, + Method: method, URL: url_, RemoteAddr: sc.remoteAddrStr, - Header: rp.header, + Header: header, RequestURI: requestURI, Proto: "HTTP/2.0", ProtoMajor: 2, @@ -1761,11 +1618,16 @@ Trailer: trailer, } if bodyOpen { + // Disabled, per golang.org/issue/14960: + // st.reqBuf = sc.getRequestBodyBuf() + // TODO: remove this 64k of garbage per request (again, but without a data race): + buf := make([]byte, initialWindowSize) + body.pipe = &pipe{ - b: &fixedBuffer{buf: make([]byte, initialWindowSize)}, // TODO: garbage + b: &fixedBuffer{buf: buf}, } - if vv, ok := rp.header["Content-Length"]; ok { + if vv, ok := header["Content-Length"]; ok { req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) } else { req.ContentLength = -1 @@ -1778,7 +1640,7 @@ rws.conn = sc rws.bw = bwSave rws.bw.Reset(chunkWriter{rws}) - rws.stream = rp.stream + rws.stream = st rws.req = req rws.body = body @@ -1786,6 +1648,15 @@ return rw, req, nil } +func (sc *serverConn) getRequestBodyBuf() []byte { + sc.serveG.check() + if buf := sc.freeRequestBodyBuf; buf != nil { + sc.freeRequestBodyBuf = nil + return buf + } + return make([]byte, initialWindowSize) +} + // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { didPanic := true @@ -2160,7 +2031,12 @@ rws.declareTrailer(trailerKey) rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv } - sort.Strings(rws.trailers) + + if len(rws.trailers) > 1 { + sorter := sorterPool.Get().(*sorter) + sorter.SortStrings(rws.trailers) + sorterPool.Put(sorter) + } } func (w *responseWriter) Flush() { @@ -2306,3 +2182,34 @@ } } } + +// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2 +var connHeaders = []string{ + "Connection", + "Keep-Alive", + "Proxy-Connection", + "Transfer-Encoding", + "Upgrade", +} + +// checkValidHTTP2Request checks whether req is a valid HTTP/2 request, +// per RFC 7540 Section 8.1.2.2. +// The returned error is reported to users. +func checkValidHTTP2Request(req *http.Request) error { + for _, h := range connHeaders { + if _, ok := req.Header[h]; ok { + return fmt.Errorf("request header %q is not valid in HTTP/2", h) + } + } + te := req.Header["Te"] + if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) { + return errors.New(`request header "TE" may only be "trailers" in HTTP/2`) + } + return nil +} + +func new400Handler(err error) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + http.Error(w, err.Error(), http.StatusBadRequest) + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/server_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/server_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/server_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/server_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -188,7 +188,7 @@ // awaitIdle heuristically awaits for the server conn's select loop to be idle. // The heuristic is that the server connection's serve loop must schedule -// 50 times in a row without any channel sends or receives occuring. +// 50 times in a row without any channel sends or receives occurring. func (st *serverTester) awaitIdle() { remain := 50 last := st.loopNum() @@ -204,6 +204,13 @@ } func (st *serverTester) Close() { + if st.t.Failed() { + // If we failed already (and are likely in a Fatal, + // unwindowing), force close the connection, so the + // httptest.Server doesn't wait forever for the conn + // to close. + st.cc.Close() + } st.ts.Close() if st.cc != nil { st.cc.Close() @@ -2515,7 +2522,7 @@ defer st.Close() st.greet() - maxAllowed := st.sc.maxHeaderStringLen() + maxAllowed := st.sc.framer.maxHeaderStringLen() // Crank this up, now that we have a conn connected with the // hpack.Decoder's max string length set has been initialized @@ -2524,8 +2531,12 @@ // the max string size. serverConfig.MaxHeaderBytes = 1 << 20 - // First a request with a header that's exactly the max allowed size. + // First a request with a header that's exactly the max allowed size + // for the hpack compression. It's still too long for the header list + // size, so we'll get the 431 error, but that keeps the compression + // context still valid. hbf := st.encodeHeader("foo", strings.Repeat("a", maxAllowed)) + st.writeHeaders(HeadersFrameParam{ StreamID: 1, BlockFragment: hbf, @@ -2533,8 +2544,24 @@ EndHeaders: true, }) h := st.wantHeaders() - if !h.HeadersEnded() || !h.StreamEnded() { - t.Errorf("Unexpected HEADER frame %v", h) + if !h.HeadersEnded() { + t.Fatalf("Got HEADERS without END_HEADERS set: %v", h) + } + headers := st.decodeHeader(h.HeaderBlockFragment()) + want := [][2]string{ + {":status", "431"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", "63"}, + } + if !reflect.DeepEqual(headers, want) { + t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) + } + df := st.wantData() + if !strings.Contains(string(df.Data()), "HTTP Error 431") { + t.Errorf("Unexpected data body: %q", df.Data()) + } + if !df.StreamEnded() { + t.Fatalf("expect data stream end") } // And now send one that's just one byte too big. @@ -2739,6 +2766,7 @@ } func BenchmarkServerGets(b *testing.B) { + defer disableGoroutineTracking()() b.ReportAllocs() const msg = "Hello, world" @@ -2770,6 +2798,7 @@ } func BenchmarkServerPosts(b *testing.B) { + defer disableGoroutineTracking()() b.ReportAllocs() const msg = "Hello, world" @@ -2982,6 +3011,76 @@ } } +func disableGoroutineTracking() (restore func()) { + old := DebugGoroutines + DebugGoroutines = false + return func() { DebugGoroutines = old } +} + +func BenchmarkServer_GetRequest(b *testing.B) { + defer disableGoroutineTracking()() + b.ReportAllocs() + const msg = "Hello, world." + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + n, err := io.Copy(ioutil.Discard, r.Body) + if err != nil || n > 0 { + b.Error("Read %d bytes, error %v; want 0 bytes.", n, err) + } + io.WriteString(w, msg) + }) + defer st.Close() + + st.greet() + // Give the server quota to reply. (plus it has the the 64KB) + if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { + b.Fatal(err) + } + hbf := st.encodeHeader(":method", "GET") + for i := 0; i < b.N; i++ { + streamID := uint32(1 + 2*i) + st.writeHeaders(HeadersFrameParam{ + StreamID: streamID, + BlockFragment: hbf, + EndStream: true, + EndHeaders: true, + }) + st.wantHeaders() + st.wantData() + } +} + +func BenchmarkServer_PostRequest(b *testing.B) { + defer disableGoroutineTracking()() + b.ReportAllocs() + const msg = "Hello, world." + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + n, err := io.Copy(ioutil.Discard, r.Body) + if err != nil || n > 0 { + b.Error("Read %d bytes, error %v; want 0 bytes.", n, err) + } + io.WriteString(w, msg) + }) + defer st.Close() + st.greet() + // Give the server quota to reply. (plus it has the the 64KB) + if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { + b.Fatal(err) + } + hbf := st.encodeHeader(":method", "POST") + for i := 0; i < b.N; i++ { + streamID := uint32(1 + 2*i) + st.writeHeaders(HeadersFrameParam{ + StreamID: streamID, + BlockFragment: hbf, + EndStream: false, + EndHeaders: true, + }) + st.writeData(streamID, true, nil) + st.wantHeaders() + st.wantData() + } +} + type connStateConn struct { net.Conn cs tls.ConnectionState @@ -3057,6 +3156,27 @@ } } +// golang.org/issue/14214 +func TestServer_Rejects_ConnHeaders(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + t.Errorf("should not get to Handler") + return nil + }, func(st *serverTester) { + st.bodylessReq1("connection", "foo") + hf := st.wantHeaders() + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "400"}, + {"content-type", "text/plain; charset=utf-8"}, + {"x-content-type-options", "nosniff"}, + {"content-length", "51"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + }) +} + type hpackEncoder struct { enc *hpack.Encoder buf bytes.Buffer @@ -3080,3 +3200,45 @@ } return he.buf.Bytes() } + +func TestCheckValidHTTP2Request(t *testing.T) { + tests := []struct { + req *http.Request + want error + }{ + { + req: &http.Request{Header: http.Header{"Te": {"trailers"}}}, + want: nil, + }, + { + req: &http.Request{Header: http.Header{"Te": {"trailers", "bogus"}}}, + want: errors.New(`request header "TE" may only be "trailers" in HTTP/2`), + }, + { + req: &http.Request{Header: http.Header{"Foo": {""}}}, + want: nil, + }, + { + req: &http.Request{Header: http.Header{"Connection": {""}}}, + want: errors.New(`request header "Connection" is not valid in HTTP/2`), + }, + { + req: &http.Request{Header: http.Header{"Proxy-Connection": {""}}}, + want: errors.New(`request header "Proxy-Connection" is not valid in HTTP/2`), + }, + { + req: &http.Request{Header: http.Header{"Keep-Alive": {""}}}, + want: errors.New(`request header "Keep-Alive" is not valid in HTTP/2`), + }, + { + req: &http.Request{Header: http.Header{"Upgrade": {""}}}, + want: errors.New(`request header "Upgrade" is not valid in HTTP/2`), + }, + } + for i, tt := range tests { + got := checkValidHTTP2Request(tt.req) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("%d. checkValidHTTP2Request = %v; want %v", i, got, tt.want) + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/transport.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/transport.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/transport.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/transport.go 2016-05-24 07:05:22.000000000 +0000 @@ -187,8 +187,8 @@ done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu // owned by clientConnReadLoop: - pastHeaders bool // got HEADERS w/ END_HEADERS - pastTrailers bool // got second HEADERS frame w/ END_HEADERS + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) trailer http.Header // accumulated trailers resTrailer *http.Header // client's Response.Trailer @@ -333,8 +333,12 @@ if t.TLSClientConfig != nil { *cfg = *t.TLSClientConfig } - cfg.NextProtos = []string{NextProtoTLS} // TODO: don't override if already in list - cfg.ServerName = host + if !strSliceContains(cfg.NextProtos, NextProtoTLS) { + cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) + } + if cfg.ServerName == "" { + cfg.ServerName = host + } return cfg } @@ -401,6 +405,8 @@ cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) cc.br = bufio.NewReader(c) cc.fr = NewFramer(cc.bw, cc.br) + cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + cc.fr.MaxHeaderListSize = t.maxHeaderListSize() // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on // henc in response to SETTINGS frames? @@ -412,8 +418,8 @@ } initialSettings := []Setting{ - Setting{ID: SettingEnablePush, Val: 0}, - Setting{ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, + {ID: SettingEnablePush, Val: 0}, + {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, } if max := t.maxHeaderListSize(); max != 0 { initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) @@ -939,14 +945,11 @@ // Host is :authority, already sent. // Content-Length is automatic, set below. continue - case "connection", "proxy-connection", "transfer-encoding", "upgrade": + case "connection", "proxy-connection", "transfer-encoding", "upgrade", "keep-alive": // Per 8.1.2.2 Connection-Specific Header // Fields, don't send connection-specific - // fields. We deal with these earlier in - // RoundTrip, deciding whether they're - // error-worthy, but we don't want to mutate - // the user's *Request so at this point, just - // skip over them at this point. + // fields. We have already checked if any + // are error-worthy so just ignore the rest. continue case "user-agent": // Match Go's http1 behavior: at most one @@ -1064,15 +1067,6 @@ cc *ClientConn activeRes map[uint32]*clientStream // keyed by streamID closeWhenIdle bool - - hdec *hpack.Decoder - - // Fields reset on each HEADERS: - nextRes *http.Response - sawRegHeader bool // saw non-pseudo header - reqMalformed error // non-nil once known to be malformed - lastHeaderEndsStream bool - headerListSize int64 // actually uint32, but easier math this way } // readLoop runs in its own goroutine and reads and dispatches frames. @@ -1081,7 +1075,6 @@ cc: cc, activeRes: make(map[uint32]*clientStream), } - rl.hdec = hpack.NewDecoder(initialHeaderTableSize, rl.onNewHeaderField) defer rl.cleanup() cc.readerErr = rl.run() @@ -1131,8 +1124,10 @@ cc.vlogf("Transport readFrame error: (%T) %v", err, err) } if se, ok := err.(StreamError); ok { - // TODO: deal with stream errors from the framer. - return se + if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil { + rl.endStreamError(cs, cc.fr.errDetail) + } + continue } else if err != nil { return err } @@ -1142,13 +1137,10 @@ maybeIdle := false // whether frame might transition us to idle switch f := f.(type) { - case *HeadersFrame: + case *MetaHeadersFrame: err = rl.processHeaders(f) maybeIdle = true gotReply = true - case *ContinuationFrame: - err = rl.processContinuation(f) - maybeIdle = true case *DataFrame: err = rl.processData(f) maybeIdle = true @@ -1178,92 +1170,98 @@ } } -func (rl *clientConnReadLoop) processHeaders(f *HeadersFrame) error { - rl.sawRegHeader = false - rl.reqMalformed = nil - rl.lastHeaderEndsStream = f.StreamEnded() - rl.headerListSize = 0 - rl.nextRes = &http.Response{ - Proto: "HTTP/2.0", - ProtoMajor: 2, - Header: make(http.Header), - } - rl.hdec.SetEmitEnabled(true) - return rl.processHeaderBlockFragment(f.HeaderBlockFragment(), f.StreamID, f.HeadersEnded()) -} - -func (rl *clientConnReadLoop) processContinuation(f *ContinuationFrame) error { - return rl.processHeaderBlockFragment(f.HeaderBlockFragment(), f.StreamID, f.HeadersEnded()) -} - -func (rl *clientConnReadLoop) processHeaderBlockFragment(frag []byte, streamID uint32, finalFrag bool) error { +func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { cc := rl.cc - streamEnded := rl.lastHeaderEndsStream - cs := cc.streamByID(streamID, streamEnded && finalFrag) + cs := cc.streamByID(f.StreamID, f.StreamEnded()) if cs == nil { // We'd get here if we canceled a request while the - // server was mid-way through replying with its - // headers. (The case of a CONTINUATION arriving - // without HEADERS would be rejected earlier by the - // Framer). So if this was just something we canceled, - // ignore it. + // server had its response still in flight. So if this + // was just something we canceled, ignore it. return nil } - if cs.pastHeaders { - rl.hdec.SetEmitFunc(func(f hpack.HeaderField) { rl.onNewTrailerField(cs, f) }) + if !cs.pastHeaders { + cs.pastHeaders = true } else { - rl.hdec.SetEmitFunc(rl.onNewHeaderField) + return rl.processTrailers(cs, f) } - _, err := rl.hdec.Write(frag) + + res, err := rl.handleResponse(cs, f) if err != nil { - return ConnectionError(ErrCodeCompression) - } - if finalFrag { - if err := rl.hdec.Close(); err != nil { - return ConnectionError(ErrCodeCompression) + if _, ok := err.(ConnectionError); ok { + return err } + // Any other error type is a stream error. + cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) + cs.resc <- resAndError{err: err} + return nil // return nil from process* funcs to keep conn alive } - - if !finalFrag { + if res == nil { + // (nil, nil) special case. See handleResponse docs. return nil } - - if !cs.pastHeaders { - cs.pastHeaders = true - } else { - // We're dealing with trailers. (and specifically the - // final frame of headers) - if cs.pastTrailers { - // Too many HEADERS frames for this stream. - return ConnectionError(ErrCodeProtocol) - } - cs.pastTrailers = true - if !streamEnded { - // We expect that any header block fragment - // frame for trailers with END_HEADERS also - // has END_STREAM. - return ConnectionError(ErrCodeProtocol) - } - rl.endStream(cs) - return nil + if res.Body != noBody { + rl.activeRes[cs.ID] = cs } + cs.resTrailer = &res.Trailer + cs.resc <- resAndError{res: res} + return nil +} - if rl.reqMalformed != nil { - cs.resc <- resAndError{err: rl.reqMalformed} - rl.cc.writeStreamReset(cs.ID, ErrCodeProtocol, rl.reqMalformed) - return nil +// may return error types nil, or ConnectionError. Any other error value +// is a StreamError of type ErrCodeProtocol. The returned error in that case +// is the detail. +// +// As a special case, handleResponse may return (nil, nil) to skip the +// frame (currently only used for 100 expect continue). This special +// case is going away after Issue 13851 is fixed. +func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { + if f.Truncated { + return nil, errResponseHeaderListSize + } + + status := f.PseudoValue("status") + if status == "" { + return nil, errors.New("missing status pseudo header") + } + statusCode, err := strconv.Atoi(status) + if err != nil { + return nil, errors.New("malformed non-numeric status pseudo header") } - res := rl.nextRes - - if res.StatusCode == 100 { + if statusCode == 100 { // Just skip 100-continue response headers for now. // TODO: golang.org/issue/13851 for doing it properly. cs.pastHeaders = false // do it all again - return nil + return nil, nil } - if !streamEnded || cs.req.Method == "HEAD" { + header := make(http.Header) + res := &http.Response{ + Proto: "HTTP/2.0", + ProtoMajor: 2, + Header: header, + StatusCode: statusCode, + Status: status + " " + http.StatusText(statusCode), + } + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + if key == "Trailer" { + t := res.Trailer + if t == nil { + t = make(http.Header) + res.Trailer = t + } + foreachHeaderElement(hf.Value, func(v string) { + t[http.CanonicalHeaderKey(v)] = nil + }) + } else { + header[key] = append(header[key], hf.Value) + } + } + + streamEnded := f.StreamEnded() + isHead := cs.req.Method == "HEAD" + if !streamEnded || isHead { res.ContentLength = -1 if clens := res.Header["Content-Length"]; len(clens) == 1 { if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { @@ -1278,27 +1276,51 @@ } } - if streamEnded { + if streamEnded || isHead { res.Body = noBody - } else { - buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage - cs.bufPipe = pipe{b: buf} - cs.bytesRemain = res.ContentLength - res.Body = transportResponseBody{cs} - go cs.awaitRequestCancel(requestCancel(cs.req)) - - if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { - res.Header.Del("Content-Encoding") - res.Header.Del("Content-Length") - res.ContentLength = -1 - res.Body = &gzipReader{body: res.Body} - } - rl.activeRes[cs.ID] = cs + return res, nil } - cs.resTrailer = &res.Trailer - cs.resc <- resAndError{res: res} - rl.nextRes = nil // unused now; will be reset next HEADERS frame + buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage + cs.bufPipe = pipe{b: buf} + cs.bytesRemain = res.ContentLength + res.Body = transportResponseBody{cs} + go cs.awaitRequestCancel(requestCancel(cs.req)) + + if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { + res.Header.Del("Content-Encoding") + res.Header.Del("Content-Length") + res.ContentLength = -1 + res.Body = &gzipReader{body: res.Body} + } + return res, nil +} + +func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { + if cs.pastTrailers { + // Too many HEADERS frames for this stream. + return ConnectionError(ErrCodeProtocol) + } + cs.pastTrailers = true + if !f.StreamEnded() { + // We expect that any headers for trailers also + // has END_STREAM. + return ConnectionError(ErrCodeProtocol) + } + if len(f.PseudoFields()) > 0 { + // No pseudo header fields are defined for trailers. + // TODO: ConnectionError might be overly harsh? Check. + return ConnectionError(ErrCodeProtocol) + } + + trailer := make(http.Header) + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + trailer[key] = append(trailer[key], hf.Value) + } + cs.trailer = trailer + + rl.endStream(cs) return nil } @@ -1416,6 +1438,7 @@ cc.mu.Unlock() if _, err := cs.bufPipe.Write(data); err != nil { + rl.endStreamError(cs, err) return err } } @@ -1431,11 +1454,14 @@ func (rl *clientConnReadLoop) endStream(cs *clientStream) { // TODO: check that any declared content-length matches, like // server.go's (*stream).endStream method. - err := io.EOF - code := cs.copyTrailers - if rl.reqMalformed != nil { - err = rl.reqMalformed - code = nil + rl.endStreamError(cs, nil) +} + +func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { + var code func() + if err == nil { + err = io.EOF + code = cs.copyTrailers } cs.bufPipe.closeWithErrorAndCode(err, code) delete(rl.activeRes, cs.ID) @@ -1574,118 +1600,6 @@ errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") ) -func (rl *clientConnReadLoop) checkHeaderField(f hpack.HeaderField) bool { - if rl.reqMalformed != nil { - return false - } - - const headerFieldOverhead = 32 // per spec - rl.headerListSize += int64(len(f.Name)) + int64(len(f.Value)) + headerFieldOverhead - if max := rl.cc.t.maxHeaderListSize(); max != 0 && rl.headerListSize > int64(max) { - rl.hdec.SetEmitEnabled(false) - rl.reqMalformed = errResponseHeaderListSize - return false - } - - if !validHeaderFieldValue(f.Value) { - rl.reqMalformed = errInvalidHeaderFieldValue - return false - } - - isPseudo := strings.HasPrefix(f.Name, ":") - if isPseudo { - if rl.sawRegHeader { - rl.reqMalformed = errors.New("http2: invalid pseudo header after regular header") - return false - } - } else { - if !validHeaderFieldName(f.Name) { - rl.reqMalformed = errInvalidHeaderFieldName - return false - } - rl.sawRegHeader = true - } - - return true -} - -// onNewHeaderField runs on the readLoop goroutine whenever a new -// hpack header field is decoded. -func (rl *clientConnReadLoop) onNewHeaderField(f hpack.HeaderField) { - cc := rl.cc - if VerboseLogs { - cc.logf("http2: Transport decoded %v", f) - } - - if !rl.checkHeaderField(f) { - return - } - - isPseudo := strings.HasPrefix(f.Name, ":") - if isPseudo { - switch f.Name { - case ":status": - code, err := strconv.Atoi(f.Value) - if err != nil { - rl.reqMalformed = errors.New("http2: invalid :status") - return - } - rl.nextRes.Status = f.Value + " " + http.StatusText(code) - rl.nextRes.StatusCode = code - default: - // "Endpoints MUST NOT generate pseudo-header - // fields other than those defined in this - // document." - rl.reqMalformed = fmt.Errorf("http2: unknown response pseudo header %q", f.Name) - } - return - } - - key := http.CanonicalHeaderKey(f.Name) - if key == "Trailer" { - t := rl.nextRes.Trailer - if t == nil { - t = make(http.Header) - rl.nextRes.Trailer = t - } - foreachHeaderElement(f.Value, func(v string) { - t[http.CanonicalHeaderKey(v)] = nil - }) - } else { - rl.nextRes.Header.Add(key, f.Value) - } -} - -func (rl *clientConnReadLoop) onNewTrailerField(cs *clientStream, f hpack.HeaderField) { - if VerboseLogs { - rl.cc.logf("http2: Transport decoded trailer %v", f) - } - if !rl.checkHeaderField(f) { - return - } - if strings.HasPrefix(f.Name, ":") { - // Pseudo-header fields MUST NOT appear in - // trailers. Endpoints MUST treat a request or - // response that contains undefined or invalid - // pseudo-header fields as malformed. - rl.reqMalformed = errPseudoTrailers - return - } - - key := http.CanonicalHeaderKey(f.Name) - - // The spec says one must predeclare their trailers but in practice - // popular users (which is to say the only user we found) do not so we - // violate the spec and accept all of them. - const acceptAllTrailers = true - if _, ok := (*cs.resTrailer)[key]; ok || acceptAllTrailers { - if cs.trailer == nil { - cs.trailer = make(http.Header) - } - cs.trailer[key] = append(cs.trailer[key], f.Value) - } -} - func (cc *ClientConn) logf(format string, args ...interface{}) { cc.t.logf(format, args...) } @@ -1723,13 +1637,18 @@ // call gzip.NewReader on the first call to Read type gzipReader struct { body io.ReadCloser // underlying Response.Body - zr io.Reader // lazily-initialized gzip reader + zr *gzip.Reader // lazily-initialized gzip reader + zerr error // sticky error } func (gz *gzipReader) Read(p []byte) (n int, err error) { + if gz.zerr != nil { + return 0, gz.zerr + } if gz.zr == nil { gz.zr, err = gzip.NewReader(gz.body) if err != nil { + gz.zerr = err return 0, err } } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/transport_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/transport_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/transport_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/transport_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -326,28 +326,28 @@ return string(b) } -var bodyTests = []struct { - body string - noContentLen bool -}{ - {body: "some message"}, - {body: "some message", noContentLen: true}, - {body: ""}, - {body: "", noContentLen: true}, - {body: strings.Repeat("a", 1<<20), noContentLen: true}, - {body: strings.Repeat("a", 1<<20)}, - {body: randString(16<<10 - 1)}, - {body: randString(16 << 10)}, - {body: randString(16<<10 + 1)}, - {body: randString(512<<10 - 1)}, - {body: randString(512 << 10)}, - {body: randString(512<<10 + 1)}, - {body: randString(1<<20 - 1)}, - {body: randString(1 << 20)}, - {body: randString(1<<20 + 2)}, -} - func TestTransportBody(t *testing.T) { + bodyTests := []struct { + body string + noContentLen bool + }{ + {body: "some message"}, + {body: "some message", noContentLen: true}, + {body: ""}, + {body: "", noContentLen: true}, + {body: strings.Repeat("a", 1<<20), noContentLen: true}, + {body: strings.Repeat("a", 1<<20)}, + {body: randString(16<<10 - 1)}, + {body: randString(16 << 10)}, + {body: randString(16<<10 + 1)}, + {body: randString(512<<10 - 1)}, + {body: randString(512 << 10)}, + {body: randString(512<<10 + 1)}, + {body: randString(1<<20 - 1)}, + {body: randString(1 << 20)}, + {body: randString(1<<20 + 2)}, + } + type reqInfo struct { req *http.Request slurp []byte @@ -1104,7 +1104,7 @@ testTransportInvalidTrailer_Pseudo(t, splitHeader) } func testTransportInvalidTrailer_Pseudo(t *testing.T, trailers headerType) { - testInvalidTrailer(t, trailers, errPseudoTrailers, func(enc *hpack.Encoder) { + testInvalidTrailer(t, trailers, pseudoHeaderError(":colon"), func(enc *hpack.Encoder) { enc.WriteField(hpack.HeaderField{Name: ":colon", Value: "foo"}) enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"}) }) @@ -1117,19 +1117,19 @@ testTransportInvalidTrailer_Capital(t, splitHeader) } func testTransportInvalidTrailer_Capital(t *testing.T, trailers headerType) { - testInvalidTrailer(t, trailers, errInvalidHeaderFieldName, func(enc *hpack.Encoder) { + testInvalidTrailer(t, trailers, headerFieldNameError("Capital"), func(enc *hpack.Encoder) { enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"}) enc.WriteField(hpack.HeaderField{Name: "Capital", Value: "bad"}) }) } func TestTransportInvalidTrailer_EmptyFieldName(t *testing.T) { - testInvalidTrailer(t, oneHeader, errInvalidHeaderFieldName, func(enc *hpack.Encoder) { + testInvalidTrailer(t, oneHeader, headerFieldNameError(""), func(enc *hpack.Encoder) { enc.WriteField(hpack.HeaderField{Name: "", Value: "bad"}) }) } func TestTransportInvalidTrailer_BinaryFieldValue(t *testing.T) { - testInvalidTrailer(t, oneHeader, errInvalidHeaderFieldValue, func(enc *hpack.Encoder) { - enc.WriteField(hpack.HeaderField{Name: "", Value: "has\nnewline"}) + testInvalidTrailer(t, oneHeader, headerFieldValueError("has\nnewline"), func(enc *hpack.Encoder) { + enc.WriteField(hpack.HeaderField{Name: "x", Value: "has\nnewline"}) }) } @@ -1147,7 +1147,7 @@ } slurp, err := ioutil.ReadAll(res.Body) if err != wantErr { - return fmt.Errorf("res.Body ReadAll error = %q, %v; want %v", slurp, err, wantErr) + return fmt.Errorf("res.Body ReadAll error = %q, %#v; want %T of %#v", slurp, err, wantErr, wantErr) } if len(slurp) > 0 { return fmt.Errorf("body = %q; want nothing", slurp) @@ -1642,6 +1642,11 @@ value: []string{"123"}, want: "Accept-Encoding,User-Agent", }, + { + key: "Keep-Alive", + value: []string{"doop"}, + want: "Accept-Encoding,User-Agent", + }, } for _, tt := range tests { @@ -1660,3 +1665,138 @@ } } } + +// Tests that gzipReader doesn't crash on a second Read call following +// the first Read call's gzip.NewReader returning an error. +func TestGzipReader_DoubleReadCrash(t *testing.T) { + gz := &gzipReader{ + body: ioutil.NopCloser(strings.NewReader("0123456789")), + } + var buf [1]byte + n, err1 := gz.Read(buf[:]) + if n != 0 || !strings.Contains(fmt.Sprint(err1), "invalid header") { + t.Fatalf("Read = %v, %v; want 0, invalid header", n, err1) + } + n, err2 := gz.Read(buf[:]) + if n != 0 || err2 != err1 { + t.Fatalf("second Read = %v, %v; want 0, %v", n, err2, err1) + } +} + +func TestTransportNewTLSConfig(t *testing.T) { + tests := [...]struct { + conf *tls.Config + host string + want *tls.Config + }{ + // Normal case. + 0: { + conf: nil, + host: "foo.com", + want: &tls.Config{ + ServerName: "foo.com", + NextProtos: []string{NextProtoTLS}, + }, + }, + + // User-provided name (bar.com) takes precedence: + 1: { + conf: &tls.Config{ + ServerName: "bar.com", + }, + host: "foo.com", + want: &tls.Config{ + ServerName: "bar.com", + NextProtos: []string{NextProtoTLS}, + }, + }, + + // NextProto is prepended: + 2: { + conf: &tls.Config{ + NextProtos: []string{"foo", "bar"}, + }, + host: "example.com", + want: &tls.Config{ + ServerName: "example.com", + NextProtos: []string{NextProtoTLS, "foo", "bar"}, + }, + }, + + // NextProto is not duplicated: + 3: { + conf: &tls.Config{ + NextProtos: []string{"foo", "bar", NextProtoTLS}, + }, + host: "example.com", + want: &tls.Config{ + ServerName: "example.com", + NextProtos: []string{"foo", "bar", NextProtoTLS}, + }, + }, + } + for i, tt := range tests { + tr := &Transport{TLSClientConfig: tt.conf} + got := tr.newTLSConfig(tt.host) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("%d. got %#v; want %#v", i, got, tt.want) + } + } +} + +// The Google GFE responds to HEAD requests with a HEADERS frame +// without END_STREAM, followed by a 0-length DATA frame with +// END_STREAM. Make sure we don't get confused by that. (We did.) +func TestTransportReadHeadResponse(t *testing.T) { + ct := newClientTester(t) + clientDone := make(chan struct{}) + ct.client = func() error { + defer close(clientDone) + req, _ := http.NewRequest("HEAD", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return err + } + if res.ContentLength != 123 { + return fmt.Errorf("Content-Length = %d; want 123", res.ContentLength) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("ReadAll: %v", err) + } + if len(slurp) > 0 { + return fmt.Errorf("Unexpected non-empty ReadAll body: %q", slurp) + } + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err != nil { + t.Logf("ReadFrame: %v", err) + return nil + } + hf, ok := f.(*HeadersFrame) + if !ok { + continue + } + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "123"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, // as the GFE does + BlockFragment: buf.Bytes(), + }) + ct.fr.WriteData(hf.StreamID, true, nil) + + <-clientDone + return nil + } + return nil + } + ct.run() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/write.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/write.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/write.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/write.go 2016-05-24 07:05:22.000000000 +0000 @@ -9,7 +9,6 @@ "fmt" "log" "net/http" - "sort" "time" "golang.org/x/net/http2/hpack" @@ -230,13 +229,13 @@ } func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { - // TODO: garbage. pool sorters like http1? hot path for 1 key? if keys == nil { - keys = make([]string, 0, len(h)) - for k := range h { - keys = append(keys, k) - } - sort.Strings(keys) + sorter := sorterPool.Get().(*sorter) + // Using defer here, since the returned keys from the + // sorter.Keys method is only valid until the sorter + // is returned: + defer sorterPool.Put(sorter) + keys = sorter.Keys(h) } for _, k := range keys { vv := h[k] diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/z_spec_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/z_spec_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/z_spec_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/http2/z_spec_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -37,7 +37,7 @@ } // covers marks all sentences for section sec in defaultSpecCoverage. Sentences not -// "covered" will be included in report outputed by TestSpecCoverage. +// "covered" will be included in report outputted by TestSpecCoverage. func covers(sec, sentences string) { loadSpecOnce.Do(loadSpec) defaultSpecCoverage.cover(sec, sentences) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/echo.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/echo.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/echo.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/echo.go 2016-05-24 07:05:22.000000000 +0000 @@ -4,6 +4,8 @@ package icmp +import "encoding/binary" + // An Echo represents an ICMP echo request or reply message body. type Echo struct { ID int // identifier @@ -22,8 +24,8 @@ // Marshal implements the Marshal method of MessageBody interface. func (p *Echo) Marshal(proto int) ([]byte, error) { b := make([]byte, 4+len(p.Data)) - b[0], b[1] = byte(p.ID>>8), byte(p.ID) - b[2], b[3] = byte(p.Seq>>8), byte(p.Seq) + binary.BigEndian.PutUint16(b[:2], uint16(p.ID)) + binary.BigEndian.PutUint16(b[2:4], uint16(p.Seq)) copy(b[4:], p.Data) return b, nil } @@ -34,7 +36,7 @@ if bodyLen < 4 { return nil, errMessageTooShort } - p := &Echo{ID: int(b[0])<<8 | int(b[1]), Seq: int(b[2])<<8 | int(b[3])} + p := &Echo{ID: int(binary.BigEndian.Uint16(b[:2])), Seq: int(binary.BigEndian.Uint16(b[2:4]))} if bodyLen > 4 { p.Data = make([]byte, bodyLen-4) copy(p.Data, b[4:]) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/endpoint.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/endpoint.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/endpoint.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/endpoint.go 2016-05-24 07:05:22.000000000 +0000 @@ -51,7 +51,7 @@ } // Please be informed that ipv4.NewPacketConn enables // IP_STRIPHDR option by default on Darwin. - // See golang.org/issue/9395 for futher information. + // See golang.org/issue/9395 for further information. if runtime.GOOS == "darwin" && c.p4 != nil { n, _, peer, err := c.p4.ReadFrom(b) return n, peer, err diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/extension.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/extension.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/extension.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/extension.go 2016-05-24 07:05:22.000000000 +0000 @@ -4,6 +4,8 @@ package icmp +import "encoding/binary" + // An Extension represents an ICMP extension. type Extension interface { // Len returns the length of ICMP extension. @@ -19,7 +21,7 @@ func validExtensionHeader(b []byte) bool { v := int(b[0]&0xf0) >> 4 - s := uint16(b[2])<<8 | uint16(b[3]) + s := binary.BigEndian.Uint16(b[2:4]) if s != 0 { s = checksum(b) } @@ -63,7 +65,7 @@ } var exts []Extension for b = b[l+4:]; len(b) >= 4; { - ol := int(b[0])<<8 | int(b[1]) + ol := int(binary.BigEndian.Uint16(b[:2])) if 4 > ol || ol > len(b) { break } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/helper.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/helper.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/helper.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/helper.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,27 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + "unsafe" +) + +var ( + // See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. + freebsdVersion uint32 + + nativeEndian binary.ByteOrder +) + +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + nativeEndian = binary.LittleEndian + } else { + nativeEndian = binary.BigEndian + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/interface.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/interface.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/interface.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/interface.go 2016-05-24 07:05:22.000000000 +0000 @@ -5,6 +5,7 @@ package icmp import ( + "encoding/binary" "net" "strings" @@ -89,7 +90,7 @@ } func (ifi *InterfaceInfo) marshal(proto int, b []byte, attrs, l int) error { - b[0], b[1] = byte(l>>8), byte(l) + binary.BigEndian.PutUint16(b[:2], uint16(l)) b[2], b[3] = classInterfaceInfo, byte(ifi.Type) for b = b[4:]; len(b) > 0 && attrs != 0; { switch { @@ -111,7 +112,7 @@ } func (ifi *InterfaceInfo) marshalIfIndex(proto int, b []byte) []byte { - b[0], b[1], b[2], b[3] = byte(ifi.Interface.Index>>24), byte(ifi.Interface.Index>>16), byte(ifi.Interface.Index>>8), byte(ifi.Interface.Index) + binary.BigEndian.PutUint32(b[:4], uint32(ifi.Interface.Index)) return b[4:] } @@ -119,18 +120,18 @@ if len(b) < 4 { return nil, errMessageTooShort } - ifi.Interface.Index = int(b[0])<<24 | int(b[1])<<16 | int(b[2])<<8 | int(b[3]) + ifi.Interface.Index = int(binary.BigEndian.Uint32(b[:4])) return b[4:], nil } func (ifi *InterfaceInfo) marshalIPAddr(proto int, b []byte) []byte { switch proto { case iana.ProtocolICMP: - b[0], b[1] = byte(afiIPv4>>8), byte(afiIPv4) + binary.BigEndian.PutUint16(b[:2], uint16(afiIPv4)) copy(b[4:4+net.IPv4len], ifi.Addr.IP.To4()) b = b[4+net.IPv4len:] case iana.ProtocolIPv6ICMP: - b[0], b[1] = byte(afiIPv6>>8), byte(afiIPv6) + binary.BigEndian.PutUint16(b[:2], uint16(afiIPv6)) copy(b[4:4+net.IPv6len], ifi.Addr.IP.To16()) b = b[4+net.IPv6len:] } @@ -141,7 +142,7 @@ if len(b) < 4 { return nil, errMessageTooShort } - afi := int(b[0])<<8 | int(b[1]) + afi := int(binary.BigEndian.Uint16(b[:2])) b = b[4:] switch afi { case afiIPv4: @@ -184,7 +185,7 @@ } func (ifi *InterfaceInfo) marshalMTU(proto int, b []byte) []byte { - b[0], b[1], b[2], b[3] = byte(ifi.Interface.MTU>>24), byte(ifi.Interface.MTU>>16), byte(ifi.Interface.MTU>>8), byte(ifi.Interface.MTU) + binary.BigEndian.PutUint32(b[:4], uint32(ifi.Interface.MTU)) return b[4:] } @@ -192,7 +193,7 @@ if len(b) < 4 { return nil, errMessageTooShort } - ifi.Interface.MTU = int(b[0])<<24 | int(b[1])<<16 | int(b[2])<<8 | int(b[3]) + ifi.Interface.MTU = int(binary.BigEndian.Uint32(b[:4])) return b[4:], nil } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/ipv4.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/ipv4.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/ipv4.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/ipv4.go 2016-05-24 07:05:22.000000000 +0000 @@ -5,16 +5,13 @@ package icmp import ( + "encoding/binary" "net" "runtime" - "unsafe" "golang.org/x/net/ipv4" ) -// See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. -var freebsdVersion uint32 - // ParseIPv4Header parses b as an IPv4 header of ICMP error message // invoking packet, which is contained in ICMP error message. func ParseIPv4Header(b []byte) (*ipv4.Header, error) { @@ -29,27 +26,25 @@ Version: int(b[0] >> 4), Len: hdrlen, TOS: int(b[1]), - ID: int(b[4])<<8 | int(b[5]), - FragOff: int(b[6])<<8 | int(b[7]), + ID: int(binary.BigEndian.Uint16(b[4:6])), + FragOff: int(binary.BigEndian.Uint16(b[6:8])), TTL: int(b[8]), Protocol: int(b[9]), - Checksum: int(b[10])<<8 | int(b[11]), + Checksum: int(binary.BigEndian.Uint16(b[10:12])), Src: net.IPv4(b[12], b[13], b[14], b[15]), Dst: net.IPv4(b[16], b[17], b[18], b[19]), } switch runtime.GOOS { case "darwin": - // TODO(mikio): fix potential misaligned memory access - h.TotalLen = int(*(*uint16)(unsafe.Pointer(&b[2:3][0]))) + h.TotalLen = int(nativeEndian.Uint16(b[2:4])) case "freebsd": if freebsdVersion >= 1000000 { - h.TotalLen = int(b[2])<<8 | int(b[3]) + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) } else { - // TODO(mikio): fix potential misaligned memory access - h.TotalLen = int(*(*uint16)(unsafe.Pointer(&b[2:3][0]))) + h.TotalLen = int(nativeEndian.Uint16(b[2:4])) } default: - h.TotalLen = int(b[2])<<8 | int(b[3]) + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) } h.Flags = ipv4.HeaderFlags(h.FragOff&0xe000) >> 13 h.FragOff = h.FragOff & 0x1fff diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/message.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/message.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/message.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/message.go 2016-05-24 07:05:22.000000000 +0000 @@ -14,6 +14,7 @@ package icmp // import "golang.org/x/net/icmp" import ( + "encoding/binary" "errors" "net" "syscall" @@ -94,7 +95,7 @@ return b, nil } off, l := 2*net.IPv6len, len(b)-len(psh) - b[off], b[off+1], b[off+2], b[off+3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + binary.BigEndian.PutUint32(b[off:off+4], uint32(l)) } s := checksum(b) // Place checksum back in header; using ^= avoids the @@ -128,7 +129,7 @@ return nil, errMessageTooShort } var err error - m := &Message{Code: int(b[1]), Checksum: int(b[2])<<8 | int(b[3])} + m := &Message{Code: int(b[1]), Checksum: int(binary.BigEndian.Uint16(b[2:4]))} switch proto { case iana.ProtocolICMP: m.Type = ipv4.ICMPType(b[0]) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/mpls.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/mpls.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/mpls.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/mpls.go 2016-05-24 07:05:22.000000000 +0000 @@ -4,6 +4,8 @@ package icmp +import "encoding/binary" + // A MPLSLabel represents a MPLS label stack entry. type MPLSLabel struct { Label int // label value @@ -40,7 +42,7 @@ func (ls *MPLSLabelStack) marshal(proto int, b []byte) error { l := ls.Len(proto) - b[0], b[1] = byte(l>>8), byte(l) + binary.BigEndian.PutUint16(b[:2], uint16(l)) b[2], b[3] = classMPLSLabelStack, typeIncomingMPLSLabelStack off := 4 for _, ll := range ls.Labels { diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/packettoobig.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/packettoobig.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/packettoobig.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/packettoobig.go 2016-05-24 07:05:22.000000000 +0000 @@ -4,6 +4,8 @@ package icmp +import "encoding/binary" + // A PacketTooBig represents an ICMP packet too big message body. type PacketTooBig struct { MTU int // maximum transmission unit of the nexthop link @@ -21,7 +23,7 @@ // Marshal implements the Marshal method of MessageBody interface. func (p *PacketTooBig) Marshal(proto int) ([]byte, error) { b := make([]byte, 4+len(p.Data)) - b[0], b[1], b[2], b[3] = byte(p.MTU>>24), byte(p.MTU>>16), byte(p.MTU>>8), byte(p.MTU) + binary.BigEndian.PutUint32(b[:4], uint32(p.MTU)) copy(b[4:], p.Data) return b, nil } @@ -32,7 +34,7 @@ if bodyLen < 4 { return nil, errMessageTooShort } - p := &PacketTooBig{MTU: int(b[0])<<24 | int(b[1])<<16 | int(b[2])<<8 | int(b[3])} + p := &PacketTooBig{MTU: int(binary.BigEndian.Uint32(b[:4]))} if bodyLen > 4 { p.Data = make([]byte, bodyLen-4) copy(p.Data, b[4:]) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/paramprob.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/paramprob.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/paramprob.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/icmp/paramprob.go 2016-05-24 07:05:22.000000000 +0000 @@ -4,7 +4,10 @@ package icmp -import "golang.org/x/net/internal/iana" +import ( + "encoding/binary" + "golang.org/x/net/internal/iana" +) // A ParamProb represents an ICMP parameter problem message body. type ParamProb struct { @@ -26,7 +29,7 @@ func (p *ParamProb) Marshal(proto int) ([]byte, error) { if proto == iana.ProtocolIPv6ICMP { b := make([]byte, p.Len(proto)) - b[0], b[1], b[2], b[3] = byte(p.Pointer>>24), byte(p.Pointer>>16), byte(p.Pointer>>8), byte(p.Pointer) + binary.BigEndian.PutUint32(b[:4], uint32(p.Pointer)) copy(b[4:], p.Data) return b, nil } @@ -45,7 +48,7 @@ } p := &ParamProb{} if proto == iana.ProtocolIPv6ICMP { - p.Pointer = uintptr(b[0])<<24 | uintptr(b[1])<<16 | uintptr(b[2])<<8 | uintptr(b[3]) + p.Pointer = uintptr(binary.BigEndian.Uint32(b[:4])) p.Data = make([]byte, len(b)-4) copy(p.Data, b[4:]) return p, nil diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/internal/iana/const.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/internal/iana/const.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/internal/iana/const.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/internal/iana/const.go 2016-05-24 07:05:22.000000000 +0000 @@ -38,7 +38,7 @@ CongestionExperienced = 0x3 // CE (Congestion Experienced) ) -// Protocol Numbers, Updated: 2015-06-23 +// Protocol Numbers, Updated: 2015-10-06 const ( ProtocolIP = 0 // IPv4 encapsulation, pseudo protocol number ProtocolHOPOPT = 0 // IPv6 Hop-by-Hop Option @@ -54,7 +54,6 @@ ProtocolBBNRCCMON = 10 // BBN RCC Monitoring ProtocolNVPII = 11 // Network Voice Protocol ProtocolPUP = 12 // PUP - ProtocolARGUS = 13 // ARGUS ProtocolEMCON = 14 // EMCON ProtocolXNET = 15 // Cross Net Debugger ProtocolCHAOS = 16 // Chaos diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/gen.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/gen.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/gen.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/gen.go 2016-05-24 07:05:22.000000000 +0000 @@ -52,7 +52,7 @@ if err != nil { return err } - // The ipv4 pacakge still supports go1.2, and so we need to + // The ipv4 package still supports go1.2, and so we need to // take care of additional platforms in go1.3 and above for // working with go1.2. switch { diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/header.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/header.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/header.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/header.go 2016-05-24 07:05:22.000000000 +0000 @@ -5,11 +5,11 @@ package ipv4 import ( + "encoding/binary" "fmt" "net" "runtime" "syscall" - "unsafe" ) const ( @@ -64,17 +64,16 @@ flagsAndFragOff := (h.FragOff & 0x1fff) | int(h.Flags<<13) switch runtime.GOOS { case "darwin", "dragonfly", "freebsd", "netbsd": - // TODO(mikio): fix potential misaligned memory access - *(*uint16)(unsafe.Pointer(&b[2:3][0])) = uint16(h.TotalLen) - *(*uint16)(unsafe.Pointer(&b[6:7][0])) = uint16(flagsAndFragOff) + nativeEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + nativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) default: - b[2], b[3] = byte(h.TotalLen>>8), byte(h.TotalLen) - b[6], b[7] = byte(flagsAndFragOff>>8), byte(flagsAndFragOff) + binary.BigEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + binary.BigEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) } - b[4], b[5] = byte(h.ID>>8), byte(h.ID) + binary.BigEndian.PutUint16(b[4:6], uint16(h.ID)) b[8] = byte(h.TTL) b[9] = byte(h.Protocol) - b[10], b[11] = byte(h.Checksum>>8), byte(h.Checksum) + binary.BigEndian.PutUint16(b[10:12], uint16(h.Checksum)) if ip := h.Src.To4(); ip != nil { copy(b[12:16], ip[:net.IPv4len]) } @@ -89,9 +88,6 @@ return b, nil } -// See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. -var freebsdVersion uint32 - // ParseHeader parses b as an IPv4 header. func ParseHeader(b []byte) (*Header, error) { if len(b) < HeaderLen { @@ -105,30 +101,26 @@ Version: int(b[0] >> 4), Len: hdrlen, TOS: int(b[1]), - ID: int(b[4])<<8 | int(b[5]), + ID: int(binary.BigEndian.Uint16(b[4:6])), TTL: int(b[8]), Protocol: int(b[9]), - Checksum: int(b[10])<<8 | int(b[11]), + Checksum: int(binary.BigEndian.Uint16(b[10:12])), Src: net.IPv4(b[12], b[13], b[14], b[15]), Dst: net.IPv4(b[16], b[17], b[18], b[19]), } switch runtime.GOOS { case "darwin", "dragonfly", "netbsd": - // TODO(mikio): fix potential misaligned memory access - h.TotalLen = int(*(*uint16)(unsafe.Pointer(&b[2:3][0]))) + hdrlen - // TODO(mikio): fix potential misaligned memory access - h.FragOff = int(*(*uint16)(unsafe.Pointer(&b[6:7][0]))) + h.TotalLen = int(nativeEndian.Uint16(b[2:4])) + hdrlen + h.FragOff = int(nativeEndian.Uint16(b[6:8])) case "freebsd": - // TODO(mikio): fix potential misaligned memory access - h.TotalLen = int(*(*uint16)(unsafe.Pointer(&b[2:3][0]))) + h.TotalLen = int(nativeEndian.Uint16(b[2:4])) if freebsdVersion < 1000000 { h.TotalLen += hdrlen } - // TODO(mikio): fix potential misaligned memory access - h.FragOff = int(*(*uint16)(unsafe.Pointer(&b[6:7][0]))) + h.FragOff = int(nativeEndian.Uint16(b[6:8])) default: - h.TotalLen = int(b[2])<<8 | int(b[3]) - h.FragOff = int(b[6])<<8 | int(b[7]) + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + h.FragOff = int(binary.BigEndian.Uint16(b[6:8])) } h.Flags = HeaderFlags(h.FragOff&0xe000) >> 13 h.FragOff = h.FragOff & 0x1fff diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/helper.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/helper.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/helper.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/helper.go 2016-05-24 07:05:22.000000000 +0000 @@ -5,8 +5,10 @@ package ipv4 import ( + "encoding/binary" "errors" "net" + "unsafe" ) var ( @@ -18,8 +20,23 @@ errOpNoSupport = errors.New("operation not supported") errNoSuchInterface = errors.New("no such interface") errNoSuchMulticastInterface = errors.New("no such multicast interface") + + // See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. + freebsdVersion uint32 + + nativeEndian binary.ByteOrder ) +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + nativeEndian = binary.LittleEndian + } else { + nativeEndian = binary.BigEndian + } +} + func boolint(b bool) int { if b { return 1 diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go 2016-05-24 07:05:22.000000000 +0000 @@ -16,7 +16,7 @@ func getsockoptIPMreqn(fd, name int) (*net.Interface, error) { var mreqn sysIPMreqn - l := sysSockoptLen(sysSizeofIPMreqn) + l := uint32(sysSizeofIPMreqn) if err := getsockopt(fd, iana.ProtocolIP, name, unsafe.Pointer(&mreqn), &l); err != nil { return nil, os.NewSyscallError("getsockopt", err) } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sockopt_asmreq_unix.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sockopt_asmreq_unix.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sockopt_asmreq_unix.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sockopt_asmreq_unix.go 2016-05-24 07:05:22.000000000 +0000 @@ -24,7 +24,7 @@ func getsockoptInterface(fd, name int) (*net.Interface, error) { var b [4]byte - l := sysSockoptLen(4) + l := uint32(4) if err := getsockopt(fd, iana.ProtocolIP, name, unsafe.Pointer(&b[0]), &l); err != nil { return nil, os.NewSyscallError("getsockopt", err) } @@ -42,5 +42,5 @@ } var b [4]byte copy(b[:], ip) - return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, name, unsafe.Pointer(&b[0]), sysSockoptLen(4))) + return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, name, unsafe.Pointer(&b[0]), uint32(4))) } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go 2016-05-24 07:05:22.000000000 +0000 @@ -23,7 +23,7 @@ } gr.setGroup(grp) var p unsafe.Pointer - var l sysSockoptLen + var l uint32 if freebsd32o64 { var d [sysSizeofGroupReq + 4]byte s := (*[sysSizeofGroupReq]byte)(unsafe.Pointer(&gr)) @@ -45,7 +45,7 @@ } gsr.setSourceGroup(grp, src) var p unsafe.Pointer - var l sysSockoptLen + var l uint32 if freebsd32o64 { var d [sysSizeofGroupSourceReq + 4]byte s := (*[sysSizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sockopt_unix.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sockopt_unix.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sockopt_unix.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sockopt_unix.go 2016-05-24 07:05:22.000000000 +0000 @@ -21,10 +21,10 @@ var i int32 var b byte p := unsafe.Pointer(&i) - l := sysSockoptLen(4) + l := uint32(4) if opt.typ == ssoTypeByte { p = unsafe.Pointer(&b) - l = sysSockoptLen(1) + l = 1 } if err := getsockopt(fd, iana.ProtocolIP, opt.name, p, &l); err != nil { return 0, os.NewSyscallError("getsockopt", err) @@ -42,11 +42,11 @@ i := int32(v) var b byte p := unsafe.Pointer(&i) - l := sysSockoptLen(4) + l := uint32(4) if opt.typ == ssoTypeByte { b = byte(v) p = unsafe.Pointer(&b) - l = sysSockoptLen(1) + l = 1 } return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, opt.name, p, l)) } @@ -84,7 +84,7 @@ return nil, errOpNoSupport } var f ICMPFilter - l := sysSockoptLen(sysSizeofICMPFilter) + l := uint32(sysSizeofICMPFilter) if err := getsockopt(fd, iana.ProtocolReserved, opt.name, unsafe.Pointer(&f.sysICMPFilter), &l); err != nil { return nil, os.NewSyscallError("getsockopt", err) } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sys_bsd.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sys_bsd.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sys_bsd.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sys_bsd.go 2016-05-24 07:05:22.000000000 +0000 @@ -11,8 +11,6 @@ "syscall" ) -type sysSockoptLen int32 - var ( ctlOpts = [ctlMax]ctlOpt{ ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/syscall_linux_386.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/syscall_linux_386.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/syscall_linux_386.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/syscall_linux_386.go 2016-05-24 07:05:22.000000000 +0000 @@ -16,14 +16,14 @@ func socketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (int, syscall.Errno) -func getsockopt(fd, level, name int, v unsafe.Pointer, l *sysSockoptLen) error { +func getsockopt(fd, level, name int, v unsafe.Pointer, l *uint32) error { if _, errno := socketcall(sysGETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0); errno != 0 { return error(errno) } return nil } -func setsockopt(fd, level, name int, v unsafe.Pointer, l sysSockoptLen) error { +func setsockopt(fd, level, name int, v unsafe.Pointer, l uint32) error { if _, errno := socketcall(sysSETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 { return error(errno) } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/syscall_unix.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/syscall_unix.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/syscall_unix.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/syscall_unix.go 2016-05-24 07:05:22.000000000 +0000 @@ -11,14 +11,14 @@ "unsafe" ) -func getsockopt(fd, level, name int, v unsafe.Pointer, l *sysSockoptLen) error { +func getsockopt(fd, level, name int, v unsafe.Pointer, l *uint32) error { if _, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0); errno != 0 { return error(errno) } return nil } -func setsockopt(fd, level, name int, v unsafe.Pointer, l sysSockoptLen) error { +func setsockopt(fd, level, name int, v unsafe.Pointer, l uint32) error { if _, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 { return error(errno) } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sys_darwin.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sys_darwin.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sys_darwin.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sys_darwin.go 2016-05-24 07:05:22.000000000 +0000 @@ -10,8 +10,6 @@ "unsafe" ) -type sysSockoptLen int32 - var ( ctlOpts = [ctlMax]ctlOpt{ ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sys_freebsd.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sys_freebsd.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sys_freebsd.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sys_freebsd.go 2016-05-24 07:05:22.000000000 +0000 @@ -12,8 +12,6 @@ "unsafe" ) -type sysSockoptLen int32 - var ( ctlOpts = [ctlMax]ctlOpt{ ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sys_linux.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sys_linux.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sys_linux.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sys_linux.go 2016-05-24 07:05:22.000000000 +0000 @@ -10,8 +10,6 @@ "unsafe" ) -type sysSockoptLen int32 - var ( ctlOpts = [ctlMax]ctlOpt{ ctlTTL: {sysIP_TTL, 1, marshalTTL, parseTTL}, diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sys_openbsd.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sys_openbsd.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sys_openbsd.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sys_openbsd.go 2016-05-24 07:05:22.000000000 +0000 @@ -9,8 +9,6 @@ "syscall" ) -type sysSockoptLen int32 - var ( ctlOpts = [ctlMax]ctlOpt{ ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sys_stub.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sys_stub.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sys_stub.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv4/sys_stub.go 2016-05-24 07:05:22.000000000 +0000 @@ -6,8 +6,6 @@ package ipv4 -type sysSockoptLen int32 - var ( ctlOpts = [ctlMax]ctlOpt{} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/control_rfc2292_unix.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/control_rfc2292_unix.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/control_rfc2292_unix.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/control_rfc2292_unix.go 2016-05-24 07:05:22.000000000 +0000 @@ -20,8 +20,7 @@ m.SetLen(syscall.CmsgLen(4)) if cm != nil { data := b[syscall.CmsgLen(0):] - // TODO(mikio): fix potential misaligned memory access - *(*int32)(unsafe.Pointer(&data[:4][0])) = int32(cm.HopLimit) + nativeEndian.PutUint32(data[:4], uint32(cm.HopLimit)) } return b[syscall.CmsgSpace(4):] } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/control_rfc3542_unix.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/control_rfc3542_unix.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/control_rfc3542_unix.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/control_rfc3542_unix.go 2016-05-24 07:05:22.000000000 +0000 @@ -20,15 +20,13 @@ m.SetLen(syscall.CmsgLen(4)) if cm != nil { data := b[syscall.CmsgLen(0):] - // TODO(mikio): fix potential misaligned memory access - *(*int32)(unsafe.Pointer(&data[:4][0])) = int32(cm.TrafficClass) + nativeEndian.PutUint32(data[:4], uint32(cm.TrafficClass)) } return b[syscall.CmsgSpace(4):] } func parseTrafficClass(cm *ControlMessage, b []byte) { - // TODO(mikio): fix potential misaligned memory access - cm.TrafficClass = int(*(*int32)(unsafe.Pointer(&b[:4][0]))) + cm.TrafficClass = int(nativeEndian.Uint32(b[:4])) } func marshalHopLimit(b []byte, cm *ControlMessage) []byte { @@ -38,15 +36,13 @@ m.SetLen(syscall.CmsgLen(4)) if cm != nil { data := b[syscall.CmsgLen(0):] - // TODO(mikio): fix potential misaligned memory access - *(*int32)(unsafe.Pointer(&data[:4][0])) = int32(cm.HopLimit) + nativeEndian.PutUint32(data[:4], uint32(cm.HopLimit)) } return b[syscall.CmsgSpace(4):] } func parseHopLimit(cm *ControlMessage, b []byte) { - // TODO(mikio): fix potential misaligned memory access - cm.HopLimit = int(*(*int32)(unsafe.Pointer(&b[:4][0]))) + cm.HopLimit = int(nativeEndian.Uint32(b[:4])) } func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/gen.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/gen.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/gen.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/gen.go 2016-05-24 07:05:22.000000000 +0000 @@ -52,7 +52,7 @@ if err != nil { return err } - // The ipv6 pacakge still supports go1.2, and so we need to + // The ipv6 package still supports go1.2, and so we need to // take care of additional platforms in go1.3 and above for // working with go1.2. switch { diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/header.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/header.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/header.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/header.go 2016-05-24 07:05:22.000000000 +0000 @@ -5,6 +5,7 @@ package ipv6 import ( + "encoding/binary" "fmt" "net" ) @@ -42,7 +43,7 @@ Version: int(b[0]) >> 4, TrafficClass: int(b[0]&0x0f)<<4 | int(b[1])>>4, FlowLabel: int(b[1]&0x0f)<<16 | int(b[2])<<8 | int(b[3]), - PayloadLen: int(b[4])<<8 | int(b[5]), + PayloadLen: int(binary.BigEndian.Uint16(b[4:6])), NextHeader: int(b[6]), HopLimit: int(b[7]), } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/helper.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/helper.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/helper.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/helper.go 2016-05-24 07:05:22.000000000 +0000 @@ -5,8 +5,10 @@ package ipv6 import ( + "encoding/binary" "errors" "net" + "unsafe" ) var ( @@ -15,8 +17,20 @@ errInvalidConnType = errors.New("invalid conn type") errOpNoSupport = errors.New("operation not supported") errNoSuchInterface = errors.New("no such interface") + + nativeEndian binary.ByteOrder ) +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + nativeEndian = binary.LittleEndian + } else { + nativeEndian = binary.BigEndian + } +} + func boolint(b bool) int { if b { return 1 diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/sockopt_ssmreq_unix.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/sockopt_ssmreq_unix.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/sockopt_ssmreq_unix.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/sockopt_ssmreq_unix.go 2016-05-24 07:05:22.000000000 +0000 @@ -21,7 +21,7 @@ } gr.setGroup(grp) var p unsafe.Pointer - var l sysSockoptLen + var l uint32 if freebsd32o64 { var d [sysSizeofGroupReq + 4]byte s := (*[sysSizeofGroupReq]byte)(unsafe.Pointer(&gr)) @@ -43,7 +43,7 @@ } gsr.setSourceGroup(grp, src) var p unsafe.Pointer - var l sysSockoptLen + var l uint32 if freebsd32o64 { var d [sysSizeofGroupSourceReq + 4]byte s := (*[sysSizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/sockopt_unix.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/sockopt_unix.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/sockopt_unix.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/sockopt_unix.go 2016-05-24 07:05:22.000000000 +0000 @@ -17,7 +17,7 @@ return 0, errOpNoSupport } var i int32 - l := sysSockoptLen(4) + l := uint32(4) if err := getsockopt(fd, opt.level, opt.name, unsafe.Pointer(&i), &l); err != nil { return 0, os.NewSyscallError("getsockopt", err) } @@ -29,7 +29,7 @@ return errOpNoSupport } i := int32(v) - return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, unsafe.Pointer(&i), sysSockoptLen(4))) + return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, unsafe.Pointer(&i), 4)) } func getInterface(fd int, opt *sockOpt) (*net.Interface, error) { @@ -37,7 +37,7 @@ return nil, errOpNoSupport } var i int32 - l := sysSockoptLen(4) + l := uint32(4) if err := getsockopt(fd, opt.level, opt.name, unsafe.Pointer(&i), &l); err != nil { return nil, os.NewSyscallError("getsockopt", err) } @@ -59,7 +59,7 @@ if ifi != nil { i = int32(ifi.Index) } - return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, unsafe.Pointer(&i), sysSockoptLen(4))) + return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, unsafe.Pointer(&i), 4)) } func getICMPFilter(fd int, opt *sockOpt) (*ICMPFilter, error) { @@ -67,7 +67,7 @@ return nil, errOpNoSupport } var f ICMPFilter - l := sysSockoptLen(sysSizeofICMPv6Filter) + l := uint32(sysSizeofICMPv6Filter) if err := getsockopt(fd, opt.level, opt.name, unsafe.Pointer(&f.sysICMPv6Filter), &l); err != nil { return nil, os.NewSyscallError("getsockopt", err) } @@ -86,7 +86,7 @@ return nil, 0, errOpNoSupport } var mi sysIPv6Mtuinfo - l := sysSockoptLen(sysSizeofIPv6Mtuinfo) + l := uint32(sysSizeofIPv6Mtuinfo) if err := getsockopt(fd, opt.level, opt.name, unsafe.Pointer(&mi), &l); err != nil { return nil, 0, os.NewSyscallError("getsockopt", err) } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/sys_bsd.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/sys_bsd.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/sys_bsd.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/sys_bsd.go 2016-05-24 07:05:22.000000000 +0000 @@ -13,8 +13,6 @@ "golang.org/x/net/internal/iana" ) -type sysSockoptLen int32 - var ( ctlOpts = [ctlMax]ctlOpt{ ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/syscall_linux_386.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/syscall_linux_386.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/syscall_linux_386.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/syscall_linux_386.go 2016-05-24 07:05:22.000000000 +0000 @@ -16,14 +16,14 @@ func socketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (int, syscall.Errno) -func getsockopt(fd, level, name int, v unsafe.Pointer, l *sysSockoptLen) error { +func getsockopt(fd, level, name int, v unsafe.Pointer, l *uint32) error { if _, errno := socketcall(sysGETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0); errno != 0 { return error(errno) } return nil } -func setsockopt(fd, level, name int, v unsafe.Pointer, l sysSockoptLen) error { +func setsockopt(fd, level, name int, v unsafe.Pointer, l uint32) error { if _, errno := socketcall(sysSETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 { return error(errno) } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/syscall_unix.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/syscall_unix.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/syscall_unix.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/syscall_unix.go 2016-05-24 07:05:22.000000000 +0000 @@ -11,14 +11,14 @@ "unsafe" ) -func getsockopt(fd, level, name int, v unsafe.Pointer, l *sysSockoptLen) error { +func getsockopt(fd, level, name int, v unsafe.Pointer, l *uint32) error { if _, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0); errno != 0 { return error(errno) } return nil } -func setsockopt(fd, level, name int, v unsafe.Pointer, l sysSockoptLen) error { +func setsockopt(fd, level, name int, v unsafe.Pointer, l uint32) error { if _, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 { return error(errno) } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/sys_darwin.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/sys_darwin.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/sys_darwin.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/sys_darwin.go 2016-05-24 07:05:22.000000000 +0000 @@ -12,8 +12,6 @@ "golang.org/x/net/internal/iana" ) -type sysSockoptLen int32 - var ( ctlOpts = [ctlMax]ctlOpt{ ctlHopLimit: {sysIPV6_2292HOPLIMIT, 4, marshal2292HopLimit, parseHopLimit}, diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/sys_freebsd.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/sys_freebsd.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/sys_freebsd.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/sys_freebsd.go 2016-05-24 07:05:22.000000000 +0000 @@ -14,8 +14,6 @@ "golang.org/x/net/internal/iana" ) -type sysSockoptLen int32 - var ( ctlOpts = [ctlMax]ctlOpt{ ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/sys_linux.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/sys_linux.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/sys_linux.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/sys_linux.go 2016-05-24 07:05:22.000000000 +0000 @@ -12,8 +12,6 @@ "golang.org/x/net/internal/iana" ) -type sysSockoptLen int32 - var ( ctlOpts = [ctlMax]ctlOpt{ ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/sys_stub.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/sys_stub.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/sys_stub.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/ipv6/sys_stub.go 2016-05-24 07:05:22.000000000 +0000 @@ -6,8 +6,6 @@ package ipv6 -type sysSockoptLen int32 - var ( ctlOpts = [ctlMax]ctlOpt{} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/publicsuffix/gen.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/publicsuffix/gen.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/publicsuffix/gen.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/publicsuffix/gen.go 2016-05-24 07:05:22.000000000 +0000 @@ -12,6 +12,8 @@ // go run gen.go -version "xxx" >table.go // go run gen.go -version "xxx" -test >table_test.go // +// Pass -v to print verbose progress information. +// // The version is derived from information found at // https://github.com/publicsuffix/list/commits/master/public_suffix_list.dat // @@ -35,11 +37,13 @@ ) const ( + // These sum of these four values must be no greater than 32. nodesBitsChildren = 9 nodesBitsICANN = 1 nodesBitsTextOffset = 15 nodesBitsTextLength = 6 + // These sum of these four values must be no greater than 32. childrenBitsWildcard = 1 childrenBitsNodeType = 2 childrenBitsHi = 14 @@ -98,7 +102,6 @@ // letters are not allowed. validSuffix = regexp.MustCompile(`^[a-z0-9_\!\*\-\.]+$`) - crush = flag.Bool("crush", true, "make the generated node text as small as possible") subset = flag.Bool("subset", false, "generate only a subset of the full table, for debugging") url = flag.String("url", "https://publicsuffix.org/list/effective_tld_names.dat", @@ -289,7 +292,7 @@ childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo, nodeTypeNormal, nodeTypeException, nodeTypeParentOnly, len(n.children)) - text := makeText() + text := combineText(labelsList) if text == "" { return fmt.Errorf("internal error: makeText returned no text") } @@ -299,8 +302,11 @@ return fmt.Errorf("internal error: could not find %q in text %q", label, text) } maxTextOffset, maxTextLength = max(maxTextOffset, offset), max(maxTextLength, length) - if offset >= 1<= 1<= 1<= 1<= 1<= 1<= 1<= 1<= 1< 0 && ss[0] == "" { ss = ss[1:] } + return ss +} - // Join strings where one suffix matches another prefix. - for { - // Find best i, j, k such that ss[i][len-k:] == ss[j][:k], - // maximizing overlap length k. - besti := -1 - bestj := -1 - bestk := 0 +// crush combines a list of strings, taking advantage of overlaps. It returns a +// single string that contains each input string as a substring. +func crush(ss []string) string { + maxLabelLen := 0 + for _, s := range ss { + if maxLabelLen < len(s) { + maxLabelLen = len(s) + } + } + + for prefixLen := maxLabelLen; prefixLen > 0; prefixLen-- { + prefixes := makePrefixMap(ss, prefixLen) for i, s := range ss { - if s == "" { + if len(s) <= prefixLen { continue } - for j, t := range ss { - if i == j { - continue - } - for k := bestk + 1; k <= len(s) && k <= len(t); k++ { - if s[len(s)-k:] == t[:k] { - besti = i - bestj = j - bestk = k - } - } - } + mergeLabel(ss, i, prefixLen, prefixes) } - if bestk > 0 { - if *v { - fmt.Fprintf(os.Stderr, "%d-length overlap at (%4d,%4d) out of (%4d,%4d): %q and %q\n", - bestk, besti, bestj, len(ss), len(ss), ss[besti], ss[bestj]) - } - ss[besti] += ss[bestj][bestk:] - ss[bestj] = "" + } + + return strings.Join(ss, "") +} + +// mergeLabel merges the label at ss[i] with the first available matching label +// in prefixMap, where the last "prefixLen" characters in ss[i] match the first +// "prefixLen" characters in the matching label. +// It will merge ss[i] repeatedly until no more matches are available. +// All matching labels merged into ss[i] are replaced by "". +func mergeLabel(ss []string, i, prefixLen int, prefixes prefixMap) { + s := ss[i] + suffix := s[len(s)-prefixLen:] + for _, j := range prefixes[suffix] { + // Empty strings mean "already used." Also avoid merging with self. + if ss[j] == "" || i == j { continue } - break + if *v { + fmt.Fprintf(os.Stderr, "%d-length overlap at (%4d,%4d): %q and %q share %q\n", + prefixLen, i, j, ss[i], ss[j], suffix) + } + ss[i] += ss[j][prefixLen:] + ss[j] = "" + // ss[i] has a new suffix, so merge again if possible. + // Note: we only have to merge again at the same prefix length. Shorter + // prefix lengths will be handled in the next iteration of crush's for loop. + // Can there be matches for longer prefix lengths, introduced by the merge? + // I believe that any such matches would by necessity have been eliminated + // during substring removal or merged at a higher prefix length. For + // instance, in crush("abc", "cde", "bcdef"), combining "abc" and "cde" + // would yield "abcde", which could be merged with "bcdef." However, in + // practice "cde" would already have been elimintated by removeSubstrings. + mergeLabel(ss, i, prefixLen, prefixes) + return } +} - text := strings.Join(ss, "") - if *v { - fmt.Fprintf(os.Stderr, "crushed %d bytes to become %d bytes\n", beforeLength, len(text)) +// prefixMap maps from a prefix to a list of strings containing that prefix. The +// list of strings is represented as indexes into a slice of strings stored +// elsewhere. +type prefixMap map[string][]int + +// makePrefixMap constructs a prefixMap from a slice of strings. +func makePrefixMap(ss []string, prefixLen int) prefixMap { + prefixes := make(prefixMap) + for i, s := range ss { + // We use < rather than <= because if a label matches on a prefix equal to + // its full length, that's actually a substring match handled by + // removeSubstrings. + if prefixLen < len(s) { + prefix := s[:prefixLen] + prefixes[prefix] = append(prefixes[prefix], i) + } } - return text + + return prefixes } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/publicsuffix/table.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/publicsuffix/table.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/publicsuffix/table.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/publicsuffix/table.go 2016-05-24 07:05:22.000000000 +0000 @@ -2,7 +2,7 @@ package publicsuffix -const version = "publicsuffix.org's public_suffix_list.dat, git revision 24caf4f (2016-01-30)" +const version = "publicsuffix.org's public_suffix_list.dat, git revision bade64c (2016-03-01)" const ( nodesBitsChildren = 9 @@ -23,431 +23,432 @@ ) // numTLD is the number of top level domains. -const numTLD = 1543 +const numTLD = 1545 // Text is the combined text of all labels. -const text = "bieszczadygeyachimataipeigersundurbanamexeterbievatmallorcadaque" + - "sanjotateshinanomachintaijinfinitinfoggiabifukagawalmartateyamab" + - "ihorologyusuharabikedagestangebilbaogakievenesannanikkoebenhavni" + - "kolaeverbankashiwazakiyokawarabillustrationikonantanangerbiomuta" + - "shinainuyamanouchikuhokuryugasakitashiobarabirdartcenterprisesak" + - "ikonaircraftraeumtgeradealstahaugesundurhamburgliwicebirkenesodd" + - "tangenovaravennaharimalvikasukabedzin-the-bandaioirasebastopolog" + - "yeongnamegawakembuchikumagayagawakkanaibetsubamericanfamilydsclo" + - "udappspotenzachpomorskienebakkeshibechambagriculturennebudapest-" + - "a-la-masioninohelplfinancialinzainvestmentsannohelsinkitahiroshi" + - "marshallstatebankasumigaurawa-mazowszextraspace-to-rentalstomako" + - "maibarabirthplacebjarkoyusuisservicesanokasuyakutiabjerkreimmobi" + - "lieninomiyakonojoshkar-olayangroupaleostrowiecartoonartdecoffeed" + - "backaszubyuudmurtiabjugnirasakis-a-candidateblockbusternidvrdnsa" + - "ntabarbarabloombergbauernrtatsunostrowwlkpmglobalashovhachinoheg" + - "uris-a-catererbluedatingloboehringerikebmoattachmentsantacruzsan" + - "tafedexhibitionishiazais-a-celticsfanishigotpantheonishiharabmsa" + - "nukis-a-chefarsundwglogowegroweibolzanore-og-uvdalipetskatowiceb" + - "mweirbnpparibaselburgloppenzaogashimadachicagobododgemologically" + - "ngenglandyndns-homednsaotomeldalivornobomloansapodhalewismillerb" + - "ondyndns-ip6bonnishiizunazukis-a-conservativefsncfailomzansimagi" + - "casadelamonedavvesiidazaifudaigodoesntexistanbullensakerbookingm" + - "inakamichigangwonishikatakazakis-a-cpadoval-daostavalleyuzawaboo" + - "tsapporoboschaefflerdalorenskogmodenakatombetsumidatlanticaseihi" + - "chisobetsuitairabostikatsushikabeeldengeluidyndns-mailotenkawabo" + - "stonakijinsekikogentingmxboxenishikatsuragivestbytomaritimekeepi" + - "ngretakamoriokamchatkameokameyamashinatsukigatakanabeatsaratoval" + - "leaostavernishikawazukanazawabotanicalgardenishimerabotanicgarde" + - "nishinomiyashironobotanyuzhno-sakhalinskatsuyamasfjordenishinoom" + - "otegotsukisosakitagatakamatsukawaboutiquebecngrimstadyndns-offic" + - "e-on-the-webcambridgestonewspaperbozentsujiiebradescorporationis" + - "hinoshimatta-varjjatattoolsztynsettlersardegnamsskoganeis-a-cubi" + - "cle-slavellinowtvalled-aostavropolicebrandywinevalleybrasiljan-m" + - "ayenishiokoppegardyndns-picsardiniabresciabrindisibenikebristolg" + - "alsacebritishcolumbialowiezagannakadomari-elasticbeanstalkaufeni" + - "shitosashimizunaminamiashigarabroadcastlebtimnetzgorabroadwaybro" + - "ke-itaxihuanishiwakis-a-democratgorybrokerrypropertiesarlottebro" + - "nnoysundyndns-remotegildeskalmykiabrothermesaverdefensejnybrumun" + - "ddalottokigawabrunelblagdenesnaaseralingenkainanaejrietisalatina" + - "benogatachikawakayamagadancebetsukubabia-goracleaningatlantagajo" + - "bojis-a-designerbrusselsarpsborgripebruxellesarufutsunomiyawakas" + - "aikaitakoelnissandoybryanskjakdnepropetrovskiervaapsteiermarkaut" + - "okeinobryneustarhubalestrandabergamoarekembroideryonabaruconnect" + - "arnobrzegjesdalimanowarudasnesoddenmarkets3-eu-central-1buskerud" + - "inewhampshirecipesaro-urbino-pesarourbinopesaromaniwakuratelekom" + - "munikationissayokoshibahikariwanumataketomisatokuyamatteledataba" + - "seballooningriwataraidyndns-serverbaniabuzenissedalouvrepbodyndn" + - "s-blogdnsasayamabuzzgorzeleccollegersundyndns-weberlincolnisshin" + - "guernseybwfashioniyodogawabzhitomirkutskjervoyagecloudfunctionsb" + - "schokoladencntjxn--0trq7p7nncolognewmexicoldwarmiamiastapleschol" + - "arshipschooluroycolonialwilliamsburgujolstercoloradoplateaudioco" + - "lumbusheycommunitysneschulezajskfhskhabarovskhakassiacomobaracom" + - "paremarkerryhotelschwarzgwangjuifminamibosogndalutskharkivguccip" + - "rianiigataitogitsuldaluxembourgulencompute-1computerhistoryofsci" + - "ence-fictioncomsecuritysvardoharuhrcondoshichinohedmarkhangelsky" + - "pescaravantaaconferenceconstructionconsuladollschweizgradconsult" + - "anthropologyconsultingvolluxurycontactkmaxxn--11b4c3dcontemporar" + - "yarteducationalchikugojomedicaltanissettaiwanairguardcontractors" + - "kenconventureshinodesashibetsuikimobetsuliguriacookingchannelver" + - "uminamidaitomangotembaixadacoolkuszippodlasiellakasamatsudoosand" + - "iegokaseljordcoopocznorthwesternmutualuzerncopenhagencyclopedica" + - "tholicasertaishinomakikuchikuseikarugapartmentsaseboknowsitallov" + - "egaskimitsubatamicabbottjeldsundyndns-wikindlegnicamerakershus-e" + - "ast-1corsicagliaridagawarszawashingtondclkharkovalledaostakkofue" + - "lvivano-frankivskhersoncorvettemasekhmelnitskiyamashikecosenzama" + - "mibuildersciencecentersciencehistorycostumedio-campidano-medioca" + - "mpidanomediocouncilcouponscientistor-elvdalcoursescjohnsoncq-acr" + - "anbrookuwanalyticscrapper-sitecreditcardcreditunioncremonashorok" + - "anaiecrewiiheyaizuwakamatsubushikusakadogawacricketrzyncrimeacro" + - "tonewportlligatewaycrowncrscrappingunmarriottmpalmspringsakercru" + - "isesettsurfastlycuisinellajollamericanexpressexyzjcbnlculturalce" + - "ntertainmentoyokawacuneocupcakecxn--1ck2e1balsanagochihayaakasak" + - "awaharaumakeupowiathletajimabariakepnordkappgjovikaruizawastrono" + - "mydstvedestrandgcahcesuolocalhistoryazannefrankfurtargets-itargi" + - "234cymruovatoyonakagyokutoshimacyouthdfcbankhmelnytskyivallee-ao" + - "steroyfilminamifuranofinalfinancefineartsfranziskanerimamateramo" + - "chizukirafinlandfinnoyfirebaseappamperedchefauskedsmokorsetagaya" + - "sells-for-lessevastopolefirenzefirestonextdirectoryfirmdalegolfe" + - "djejuegoshikiminokamoenairlinebraskaunbieidsvollfishingonohejis-" + - "a-geekhvalleeaosteigenfitjarqhachiojiyahikobeautydalfitnessettle" + - "mentoyonofjalerflickragerotikalugansklabudhabikinokawabarthachir" + - "ogatakanezawaflightshangrilangevagrarboretumbriaflirumansionshar" + - "is-a-greenfloguchikuzenfloraflorencefloridafloristanohatakaharus" + - "siafloromskogxn--1ctwolominamatamayukis-a-gurulsandvikcoromantov" + - "alle-daostavangerflowersharpanamaflsmidthruhereggiocalabriaflynn" + - "hubalsfjordiskstationaval-d-aosta-valleyonagoyaugustowadaegubs3-" + - "eu-west-1fndfolldalfoodnetworkangerfor-better-thandafor-ourfor-s" + - "omedizinhistorischeshawaiijimarylandfor-theaterforexrothadanotog" + - "awaforgotdnshellaspeziaforli-cesena-forlicesenaforlikes-piedmont" + - "blancomeereshimokawaforsaleikangerforsandasuolodingenfortmissoul" + - "an-udell-ogliastrakhanawawilliamhillfortworthadselfipirangaforum" + - "inamiiselectoyookarasjohkaminoyamatsuris-a-hard-workerfosneshimo" + - "kitayamafotoyosatotalfoxn--1lqs03nfreiburgzlgfreightcmwinbaltimo" + - "re-og-romsdalimitedunetflixilimoliserniaukraanghke164freseniusde" + - "corativeartshimonitayanagivingfribourgfriuli-v-giuliafriuli-ve-g" + - "iuliafriuli-vegiuliafriuli-venezia-giuliafriuli-veneziagiuliafri" + - "uli-vgiuliafriuliv-giuliafriulive-giuliafriulivegiuliafriulivene" + - "zia-giuliafriuliveneziagiuliafriulivgiuliafrlfroganshimonosekika" + - "wafrognfrolandfrom-akrehamnfrom-alfrom-arfrom-azpanasonicdn77-ss" + - "lattumetlifeinsurancefrom-canonoichikawamisatodayfrom-collection" + - "from-ctoyotaris-a-hunterfrom-dcheltenham-radio-operaunitelemarka" + - "zimierz-dolnyfrom-dellogliastraderfrom-flandershimosuwalkis-a-kn" + - "ightoyotomiyazakis-a-landscaperugiafrom-gaulardalfrom-higashiaga" + - "tsumagoirmitakeharafrom-iafrom-idfrom-ilfrom-incheonfrom-kshimot" + - "sukefrom-kyknetoyotsukaidovre-eikerfrom-lanbibaidarfrom-manxn--1" + - "lqs71dfrom-mdfrom-meetoyourafrom-microsoftbankmpspbambleborkarum" + - "aifarmsteadivtasvuodnakaiwamizawaurskog-holandroverhalla-speziae" + - "tnagahamaroygardendoftheinternetcimdbalatinordre-landds3-ap-sout" + - "heast-1kappleangaviikadenaamesjevuemielnoboribetsucks3-ap-northe" + - "ast-1from-mnfrom-modalenfrom-mshimotsumafrom-mtnfrom-nchelyabins" + - "kodjeffersonrwhalingrongausdalucaniafrom-ndfrom-nexusgardenfrom-" + - "nhktoystre-slidrettozawafrom-njcparaglidingfrom-nminamiizukamito" + - "ndabayashiogamagoriziafrom-nvanylvenicefrom-nyfrom-ohkurafrom-ok" + - "etogurafrom-orfrom-paderbornfrom-pratohmaoris-a-lawyerfrom-ris-a" + - "-liberalfrom-schoenbrunnfrom-sdnipropetrovskmshinichinanfrom-tnf" + - "rom-txn--1qqw23afrom-utazuerichardlikescandyndns-at-homedepotaru" + - "is-a-libertarianfrom-vadsochildrensgardenfrom-vtozsdefrom-wafrom" + - "-wielunnerfrom-wvaolbia-tempio-olbiatempioolbialystokkemerovodka" + - "goshimaintenancefrom-wyfrosinonefrostalowa-wolawafroyahabadajozo" + - "rahkkeravjudygarlandfstcgrouparisor-fronfujiiderafujikawaguchiko" + - "nefujiminohtawaramotoineppugliafujinomiyadafujiokayamarburgfujis" + - "atoshonairportland-4-salernogiessengerdalaskanittedallasalleasee" + - "klogesquarezzoologyfujisawafujishiroishidakabiratoridelmenhorsta" + - "lbanshinjournalismailillehammerfest-mon-blogueurovisionfujitsuru" + - "gashimarinefujixeroxn--2m4a15efujiyoshidafukayabeardubaiduckdnsd" + - "ojoburgfukuchiyamadafukudominichernigovernmentjmaxxxfinityfukuis" + - "-a-linux-useranishiaritabashikaoizumizakitaurayasudafukumitsubis" + - "higakirkeneshinjukumanofukuokazakirovogradoyfukuroishikarikaturi" + - "ndalfukusakiryuohaebaruminamimakis-a-llamarylhursteinkjerusalemb" + - "etsukuis-a-musicianfukuyamagatakahashimamakisarazure-mobileirfjo" + - "rdfunabashiriuchinadafunagatakahatakaishimoichinosekigaharafunah" + - "ashikamiamakusatsumasendaisennangoodyearthagakhanamigawafundacio" + - "fuoiskujukuriyamarcheaparliamentranbyfuosskoczowindmillfurniture" + - "ggioemiliaromagnakasatsunairtelecityeatshinkamigotoyohashimotomo" + - "bellunordreisa-geekokonoefurubiraquarelleasingleshinshinotsurgeo" + - "nshalloffamelhustkamisunagawafurudonostiafurukawairtraffichernih" + - "ivanovosibirskydivingrossetouchijiwadeltajimicrolightingroundhan" + - "dlingroznyfusodegaurafussaikishiwadafutabayamaguchinomigawafutbo" + - "ldlygoingnowhere-for-moregontrailroadfuttsurugiminamiminowafvgfy" + - "is-a-nascarfanfylkesbiblackfridayfyresdalhannovareserveftparoche" + - "rkasyzrankoshigayaltaikis-a-painteractivegarsheis-a-patsfanhanyu" + - "zenhapmirhappoulvikomaganehareidsbergenharstadharvestcelebration" + - "hasamarahasaminami-alpssells-for-unzenhashbanghasudahasvikomakiy" + - "osatokamachippubetsubetsugaruhatogayahoooshikamaishimofusartshin" + - "tokushimahatoyamazakitahatakaokamikitayamatotakadahatsukaichihar" + - "ahattfjelldalhayashimamotobuildinghazuminobusells-itrani-andria-" + - "barletta-trani-andriahbofagehembygdsforbundhemneshintomikasahara" + - "hemsedalherokussldheroyhgtvarggatraniandriabarlettatraniandriahi" + - "gashichichibungotakadatsunanjoetsuwanouchikujogaszkoladbrokesenn" + - "umamurogawalterhigashihiroshimanehigashiizumozakitakamiizumisano" + - "fiatranoyhigashikagawahigashikagurasoedahigashikawakitaaikitakat" + - "akarazukamikoaniikappulawyhigashikurumeguroroskoleitungsenhigash" + - "imatsushimarugame-hostinghigashimatsuyamakitaakitadaitoigawahiga" + - "shimurayamalatvuopmidoris-a-personaltrainerhigashinarusellsyourh" + - "omegoodshinyoshitomiokaniepcehigashinehigashiomihachimanchesterh" + - "igashiosakasayamamotorcycleshiojirishirifujiedahigashishirakawam" + - "atakasagooglecodespotransportrapaniimimatakatoris-a-photographer" + - "okuapparshioyameloyalistockholmestrandhigashisumiyoshikawaminami" + - "aikitakyushuaiahigashitsunowruzhgorodoyhigashiurausukitamidsundh" + - "igashiyamatokoriyamanakakogawahigashiyodogawahigashiyoshinogaris" + - "-a-playerhiraizumisatohnoshoohirakatashinagawahiranais-a-republi" + - "cancerresearchaeologicaliforniahirarahiratsukagawahirayaitakasak" + - "itamotosumitakaginankokubunjis-a-rockstarachowicehisayamanashiib" + - "aghdadultravelchannelhistorichouseshirahamatonbetsurgeryhitachio" + - "miyaginowaniihamatamakawajimaritimodellinghitachiotagopartis-a-s" + - "ocialistmeindianapolis-a-bloggerhitoyoshimifunehitradinghjartdal" + - "hjelmelandholeckobierzyceholidayhomeipartnershirakoenighomelinux" + - "n--30rr7yhomesensembokukitanakagusukumoduminamiogunicomcastresis" + - "tancehomeunixn--32vp30hagebostadhondahonefosshiranukanmakiwakuni" + - "gamihamadahoneywellhongorgehonjyoitakashimarumorimachidahorninda" + - "lhorseminehortendofinternetravelersinsurancehoteleshiraois-a-sox" + - "fanhotmailhoyangerhoylandetroitskomatsushimashikiyosemitehumanit" + - "ieshiraokannamiharuhurdalhurumajis-a-studentalhyllestadhyogoris-" + - "a-teacherkassymantechnologyhyugawarahyundaiwafunejgorajlchiryuky" + - "uragifuefukihaborokunohealthcareersaskatchewanggouvicenzajlljmpa" + - "rtshizukuishimogosenjnjelenia-gorajoyokaichibahcavuotnagaraholta" + - "lenjpmorganichitachinakagawatchandclockazojpnchitosetogakushimot" + - "oganewjerseyjprshizuokanoyakagejuniperjurkristiansandcatshoujis-" + - "bykristiansundkrodsheradkrokstadelvaldaostarostwodzislawinnersho" + - "wakryminamitanekumatorinokumejimasudakumenanyokkaichirurgiens-de" + - "ntisteshowtimemerckomonokunisakis-certifiedekakegawakunitachiara" + - "ilwaykunitomigusukumamotoyamassa-carrara-massacarraramassabunkyo" + - "nanaoshimageandsoundandvisionkunneppupartykunstsammlungkunstundd" + - "esignkuokgroupasadenamsosnowiechloekurepairbusantiquest-a-la-mai" + - "sondre-landebusinessebykleclerchocolatelevisionkurgankurobelaudi" + - "blebesbyglandkurogimilitarykuroisoftwarendalenugkuromatsunais-fo" + - "undationkurotakikawasakis-gonekurskomorotsukamishihoronobeokamin" + - "okawanishiaizubangekushirogawakustanais-into-animeiwamaseratis-a" + - "n-actorkusupersportrentino-stirolkutchanelkutnokuzbassnillfjordk" + - "uzumakis-into-carshisognekvafjordkvalsundkvamlidlugolekagaminord" + - "-aurdalvdalipayufuchukotkafjordkvanangenkvinesdalkvinnheradkvite" + - "seidskogkvitsoykwpspjelkavikomvuxn--3bst00minamisanrikubetsupply" + - "kyotobetsupplieshriramsterdambulanceokyowariasahikawamishimatsum" + - "otofukemissilelmisugitokonamegatakayamatsunomitourismolanciamito" + - "yoakemiuramiyazurewebsiteshikagamiishibukawamiyotamanomjondalenm" + - "lbarclaycards3-us-west-2monmouthaibarakitagawamonstermonticellol" + - "montrealestatefarmequipmentrentino-sud-tirolmonza-brianzaporizhz" + - "hekinannestadmonza-e-della-brianzaporizhzhiamonzabrianzapposlomb" + - "ardiamondsigdalmonzaebrianzaramonzaedellabrianzamoparachutingmor" + - "doviajessheiminamiuonumatsumaebashimodatemoriyamatsusakahoginoza" + - "waonsenmoriyoshiokamitsuemormoneymoroyamatsushigemortgagemoscowi" + - "ostrolekaneyamaxunjargamoseushistorymosjoenmoskenesimbirskongsvi" + - "ngermossimple-urlmosvikoninjamisonmoviemovistargardmtpccwitdkons" + - "kowolancashireisenmtranakayamatsuuramuenstermugithubusercontentr" + - "entino-sudtirolmuikamogawamukochikushinonsenergymulhouservebbsir" + - "dalmultichoicemunakatanemuncieszynmuosattemupassagenslingmurmans" + - "konsulatrobeermurotorcraftrentino-sued-tirolmusashimurayamatsuza" + - "kis-lostre-toteneis-an-actresshishikuis-an-accountantshiratakaha" + - "gis-a-techietis-a-therapistoiamusashinoharamuseetrentino-suedtir" + - "olmuseumverenigingmutsuzawamutuellevangermyokohamamatsudamypetsl" + - "upskonyveloftrentino-altoadigemyphotoshibahccavuotnagareyamaizur" + - "ubtsovskiptveterinairebungoonomichinomiyakemytis-a-bookkeepermin" + - "amiyamashirokawanabelgorodeophiladelphiaareadmyblogsitephilately" + - "philipsyphoenixn--3ds443gphotographysiopiagetmyipassenger-associ" + - "ationpictetrentinoa-adigepicturesnzpiemontepilotsokanrapinkoperv" + - "ikommunalforbundpioneerpippupiszpittsburghofermobilypiwatepizzap" + - "koryolasiteplanetariuminanoplantationplantsokndalplatformincommb" + - "ankongsbergplaystationplazaplchofunatorientexpressassaris-a-fina" + - "ncialadvisor-aurdaluccapebretonamiasakuchinotsuchiurakawassamuka" + - "wataricohdavvenjargamvikazunoplombardyndns-at-workinggroupavianc" + - "apetownplumbingotvbarclaysakuraibigawaustinnaturalsciencesnature" + - "lles3-external-1plusterpmnpodzonepohlpokerpokrovskosaigawapoliti" + - "endapolkowicepoltavalle-aostathellexuslivinghistorypomorzeszowit" + - "hgoogleapisa-hockeynutrentinoaadigepordenonepornporsangerporsang" + - "ugeporsgrunnanpoznanpraxis-a-bruinsfansolarssonprdpreservationpr" + - "esidioprgmrprimelbourneprincipeprivneprochowiceproductionsologne" + - "proferraraprogressivenneslaskerrylogisticsolundbeckosakaerodrome" + - "gallupinbarcelonagasakijobservercellierneues3-us-west-1projectre" + - "ntinoalto-adigepromombetsupportrentinoaltoadigepropertyprotectio" + - "nprudentialpruszkowithyoutubeneventochiokinoshimalselvendrellprz" + - "eworskogptzpvtrentinos-tirolpwchonanbugattipschmidtre-gauldaluce" + - "rnepzqldqponqslgbtrentinostirolqvchoseiroumuenchenstudiostudyndn" + - "s-freemasonryokamikawanehonbetsurutaharastuff-4-salestuttgartren" + - "tinosued-tirolsurnadalsurreysusakis-uberleetrentino-a-adigesuson" + - "osuzakanumazurysuzukanzakiwiensuzukis-very-badaddjamalborkdalsva" + - "lbardudinkakamigaharasveiosvelvikosherbrookegawasvizzeraswedensw" + - "idnicapitalonewhollandswiebodzindianmarketingswiftcoverisignswin" + - "oujscienceandhistoryswisshikis-very-evillagesxn--3e0b707etunesoo" + - "turystykarasjoksnesopotrentinosud-tiroltuscanytushuissier-justic" + - "etuvalle-d-aostatoilvestnesor-varangervestre-slidreamhostersorfo" + - "ldvestre-totennishiawakuravestvagoyvevelstadvibo-valentiaviboval" + - "entiavideovillaskoyabearalvahkihokumakogeniwaizumiotsukumiyamazo" + - "nawsabaerobaticketsorreisahayakawakamiichikaiseis-slickomforbana" + - "narepublicargodaddynathomebuiltarumizusawaustevollavangenaturalh" + - "istorymuseumcenterhcloudcontrolledigitalaziobiragroks-thisamitsu" + - "keisenbahnativeamericanantiques3-ap-southeast-2vinnicarbonia-igl" + - "esias-carboniaiglesiascarboniavinnytsiavipsinaappfizervirginiavi" + - "rtualvirtuelvisakatakinouevistaprintuitrentottoris-very-goodhand" + - "sonviterboltrevisohughesolutionsomavivoldavladikavkazanvladimirv" + - "ladivostokaizukarasuyamazoevlogvolkenkunderseaportroandinosaurep" + - "ortrentinosuedtirolvolkswagentsortlandvologdanskoshunantokashiki" + - "zunokunimilanovolvolgogradvolyngdalvoronezhytomyrvossevangenvote" + - "votingvotoursoruminnesotaketakatsukis-into-cartoonshisuifuettert" + - "dasnetzwindowshitaramavrnworse-thangglidingwowiwatsukiyonowrites" + - "thisblogspotrogstadwroclawloclawekostromahachijorpelandwtchoshib" + - "uyachiyodawtferrarittogoldpointelligencewuozuwwworldwzmiuwajimax" + - "n--4gbriminingxn--4gq48lf9jeonnamerikawauexn--4it168dxn--4it797k" + - "otohiradomainsureitrentino-s-tirollagrigentomologyeonggiehtavuoa" + - "tnagaivuotnagaokakyotambabydgoszczecinemailxn--4pvxsouthcarolina" + - "zawaxn--54b7fta0cchromediaxn--55qw42gxn--55qx5dxn--5js045dxn--5r" + - "tp49chryslerxn--5rtq34kotouraxn--5su34j936bgsgxn--5tzm5gxn--6btw" + - "5axn--6frz82gxn--6orx2rxn--6qq986b3xlxn--7t0a264chtrainingrpaler" + - "momasvuotnakatsugawaxn--80adxhksouthwestfalenxn--80ao21axn--80aq" + - "ecdr1axn--80asehdbarefootballangenoamishirasatobishimalopolskanl" + - "andivttasvuotnakamagayachtsakyotanabellevuelosangelesjaguarchite" + - "cturealtychyattorneyagawalbrzycharternopilawalesundiyonagunivers" + - "ityoriikasaokamiokamiminersalangenavigationavuotnakhodkanagawaus" + - "traliaisondriodejaneirochesterxn--80aswgxn--80audnedalnxn--8ltr6" + - "2kouhokutamakis-an-engineeringxn--8pvr4uxn--8y0a063axn--90a3acad" + - "emydroboatsaritsynologyeongbukounosunndalxn--90aishobaraomoriguc" + - "hiharagusaarlandxn--90azhair-surveillancexn--9dbhblg6diethnology" + - "xn--9dbq2axn--9et52uxn--9krt00axn--andy-iraxn--aroport-byanagawa" + - "xn--asky-iraxn--aurskog-hland-jnbargainstitutelefonicafederation" + - "ayoroceanographicsalondonetskashibatakasugaibmdnpalacemergencybe" + - "rlevagangaviikanonjiinetatamotorsaltdalindasiaustrheimatunduhren" + - "nesoyekaterinburgjemnes3-external-2xn--avery-yuasakegawaxn--b-5g" + - "axn--b4w605ferdxn--bck1b9a5dre4chungbukchristiansburgruexn--bddd" + - "j-mrabdxn--bearalvhki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bh" + - "ccavuotna-k7axn--bidr-5nachikatsuuraxn--bievt-0qa2xn--bjarky-fya" + - "naizuxn--bjddar-ptamboversaillesor-odalxn--blt-elaborxn--bmlo-gr" + - "aingerxn--bod-2naroyxn--brnny-wuaccident-investigationjukudoyama" + - "ceratabuseat-band-campaniamallamadridvagsoyericssonlineat-urlxn-" + - "-brnnysund-m8accident-preventionxn--brum-voagatromsakakinokiaxn-" + - "-btsfjord-9zaxn--c1avgxn--c2br7gxn--c3s14minternationalfirearmsi" + - "enarashinoxn--cck2b3barreauctionflatangerxn--cg4bkis-very-nicexn" + - "--ciqpnxn--clchc0ea0b2g2a9gcdn77-securecreationxn--comunicaes-v6" + - "a2oxn--correios-e-telecomunicaes-ghc29axn--czr694barrel-of-knowl" + - "edgeometre-experts-comptablesalvadordalibabaikaliszczytnordlandr" + - "angedalindesnesalzburgladeloittenrightathomeftpaccessamegawautho" + - "rdalandroidiscountyumenaturbruksgymnaturhistorisches3-fips-us-go" + - "v-west-1xn--czrs0tromsojavald-aostarnbergxn--czru2dxn--czrw28bar" + - "rell-of-knowledgeorgeorgiautomotivecodyn-o-saurlandes3-sa-east-1" + - "xn--d1acj3bashkiriautoscanadaejeonbukariyakumoldebinagisoccertif" + - "icationatuurwetenschappenaumburgjerdrumckinseyokosukareliancebin" + - "osegawakunedre-eikereviewskrakowebhopagefrontappagespeedmobilize" + - "robihirosakikamijimaeroportalabamagasakishimabarackmaze-burggfar" + - "merseinewyorkshireggio-emilia-romagnakanotoddenasushiobarabruzzo" + - "ologicalvinklein-addrammenuernbergdyniabogadocscbg12000xn--d1alf" + - "aromeoxn--d1atrusteexn--d5qv7z876chungnamdalseidfjordyroyrviking" + - "uideventsaudaxn--davvenjrga-y4axn--djrs72d6uyxn--djty4kouyamashi" + - "kokuchuoxn--dnna-grajewolterskluwerxn--drbak-wuaxn--dyry-iraxn--" + - "eckvdtc9dxn--efvn9sowaxn--efvy88hakatanotteroyxn--ehqz56nxn--elq" + - "q16hakodatevaksdalxn--estv75gxn--eveni-0qa01gaxn--f6qx53axn--fct" + - "429kouzushimasoyxn--fhbeiarnxn--finny-yuaxn--fiq228c5hspreadbett" + - "ingxn--fiq64basilicataniaveroykenvironmentalconservationaustdali" + - "llesandefjordiscoveryggeelvinckarlsoyokotebizenakaniikawatanagur" + - "amusementarantomsk-uralsk12xn--fiqs8spydebergxn--fiqz9srlxn--fjo" + - "rd-lraxn--fjq720axn--fl-ziaxn--flor-jraxn--flw351exn--fpcrj9c3dx" + - "n--frde-grandrapidsrtrentinosudtirolxn--frna-woaraisaijosoyrovig" + - "orlicexn--frya-hraxn--fzc2c9e2churchaseljeepilepsydneyxn--fzys8d" + - "69uvgmailxn--g2xx48chuvashiaxn--gckr3f0ferreroticampobassociates" + - "evenassisicilyxn--gecrj9circlegallocuscountryestateofdelawaredum" + - "brellahppiacenzakopanerairforcechirealtorlandxn--ggaviika-8ya47h" + - "akonexn--gildeskl-g0axn--givuotna-8yandexn--3oq18vl8pn36axn--gjv" + - "ik-wuaxn--gk3at1exn--gls-elacaixaxn--gmq050is-very-sweetrentino-" + - "aadigexn--gmqw5axn--h-2fairwindsrvdonskoseis-an-artistjohnxn--h1" + - "aeghakubankolobrzegyptianpachigasakidsmynasperschlesischesurance" + - "xn--h2brj9circuscultureggio-calabriaxn--hbmer-xqaxn--hcesuolo-7y" + - "a35basketballfinanz-2xn--hery-iraxn--hgebostad-g3axn--hmmrfeasta" + - "-s4acoachampionshiphopenair-traffic-controlleyxn--hnefoss-q1axn-" + - "-hobl-iraxn--holtlen-hxaxn--hpmir-xqaxn--hxt814exn--hyanger-q1ax" + - "n--hylandet-54axn--i1b6b1a6a2exn--imr513nxn--indery-fyaotsurguts" + - "iracusaitokyotangovtrverranzanxn--io0a7is-with-thebandoomdnsalia" + - "scolipicenord-odalxn--j1aefetsundxn--j1amhakuis-a-nurseoullensva" + - "nguardxn--j6w193gxn--jlq61u9w7batochigiftsamnangerxn--jlster-bya" + - "roslavlaanderenxn--jrpeland-54axn--jvr189misakis-into-gamessinas" + - "hikitchenxn--k7yn95exn--karmy-yuaxn--kbrq7oxn--kcrx77d1x4axn--kf" + - "jord-iuaxn--klbu-woaxn--klt787dxn--kltp7dxn--kltx9axn--klty5xn--" + - "3pxu8kosugexn--koluokta-7ya57hakusandnessjoenxn--kprw13dxn--kpry" + - "57dxn--kpu716fguovdageaidnulminamiechizenxn--kput3isleofmandalxn" + - "--krager-gyasakaiminatoyakokamisatohobby-sitexasdaburyatiaarphar" + - "maciensmolenskooris-an-anarchistoricalsocietyxn--kranghke-b0axn-" + - "-krdsherad-m8axn--krehamn-dxaxn--krjohka-hwab49jetztrentino-alto" + - "-adigexn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyasugissmarterthan" + - "youxn--kvnangen-k0axn--l-1faitheguardianquanconagawakuyabukicks-" + - "assedicitadeliverybnikahokutogliattiresauheradxn--l1accenturekla" + - "mborghiniizaxn--laheadju-7yasuokaratexn--langevg-jxaxn--lcvr32dx" + - "n--ldingen-q1axn--leagaviika-52batsfjordrivelandrobaknoluoktaina" + - "ikawachinaganoharamcoalaheadjudaicaaarborteaches-yogasawaracingr" + - "oks-theatreemersongdalenviknakanojohanamakinoharavocatanzarowedd" + - "ingjerstadotsuruokamakurazakisofukushimarnardalillyokozehimejibe" + - "stadishakotankarmoyomitanobninskarpaczeladz-1xn--lesund-huaxn--l" + - "gbbat1ad8jevnakerxn--lgrd-poaciticasinorfolkebiblefrakkestadyndn" + - "s-workshoppdalowiczest-le-patrondheimperiaxn--lhppi-xqaxn--linds" + - "-pramericanartrysilkoshimizumakiyosumykolaivaroyxn--lns-qlanxess" + - "toragexn--loabt-0qaxn--lrdal-sraxn--lrenskog-54axn--lt-liacivila" + - "viationxn--lten-granexn--lury-iraxn--mely-iraxn--merker-kuaxn--m" + - "gb2ddestordalxn--mgb9awbfidelityxn--mgba3a3ejtulansomnaritakuras" + - "hikis-saveducatorahimeshimakanegasakinkobayashikshacknetnedalxn-" + - "-mgba3a4f16axn--mgba3a4franamizuholdingsmileirvikozagawaxn--mgba" + - "7c0bbn0axn--mgbaakc7dvfidonnakamuratakahamannortonsbergushikamif" + - "uranotairesewildlifestylexn--mgbaam7a8haldenxn--mgbab2bdxn--mgba" + - "i9a5eva00bauhausposts-and-telecommunicationsnasadoes-itveronagas" + - "ukemrxn--mgbai9azgqp6jewelryxn--mgbayh7gpaduaxn--mgbb9fbpobanaza" + - "waxn--mgbbh1a71exn--mgbc0a9azcgxn--mgbca7dzdownloadxn--mgberp4a5" + - "d4a87gxn--mgberp4a5d4arxn--mgbi4ecexposedxn--mgbpl2fhvalerxn--mg" + - "bqly7c0a67fbcivilisationxn--mgbqly7cvafredrikstadtvstorenburgxn-" + - "-mgbt3dhdxn--mgbtf8flekkefjordxn--mgbtx2bbcarrierxn--mgbx4cd0abb" + - "vieeexn--mix082fieldxn--mix891figuerestaurantoyonezawaxn--mjndal" + - "en-64axn--mk0axindustriesteamfamberkeleyxn--mk1bu44civilizationx" + - "n--mkru45iwchernovtsykkylvenetoeiheijis-a-doctorayxn--mlatvuopmi" + - "-s4axn--mli-tlapyatigorskozakis-an-entertainerxn--mlselv-iuaxn--" + - "moreke-juaxn--mori-qsakuhokkaidontexisteingeekpnxn--mosjen-eyato" + - "minamiawajikixn--mot-tlaquilancasterxn--mre-og-romsdal-qqbbtatar" + - "stanfshostrodawaravoues3-us-gov-west-1xn--msy-ula0halsaintlouis-" + - "a-anarchistoirehabmerxn--mtta-vrjjat-k7afamilycompanycivilwarman" + - "agementjomemorialukowhoswhokksundxn--muost-0qaxn--mxtq1misasagur" + - "is-leetrdxn--ngbc5azdxn--ngbe9e0axn--ngbrxn--42c2d9axn--nit225kp" + - "pspiegelxn--nmesjevuemie-tcbajddarchaeologyxn--nnx388axn--nodess" + - "akuragawaxn--nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn" + - "--nttery-byaeservegame-serverdalxn--nvuotna-hwaxn--nyqy26axn--o1" + - "achattanooganorilskleppharmacysnoasaitamatsukuris-not-certifiedo" + - "gawarabikomaezakirunoshiroomuraxn--o3cw4hammarfeastafricamagiche" + - "rnivtsiciliaxn--od0algxn--od0aq3bbvacationswatch-and-clockerxn--" + - "ogbpf8flesbergxn--oppegrd-ixaxn--ostery-fyatsukaratsuginamikatag" + - "amihoboleslawieclaimsavannahgaxn--osyro-wuaxn--p1acfdxn--p1aixn-" + - "-pbt977clickddielddanuorrikuzentakatajirissagaeroclubmedecincinn" + - "ationwidealerimo-i-ranadexchangeiseiyoichiropracticbcn-north-1xn" + - "--pgbs0dhlxn--porsgu-sta26filateliaxn--pssu33lxn--pssy2uxn--q9jy" + - "b4clinicateringebudejjuedischesapeakebayernurembergrondarxn--qck" + - "a1pmcdonaldstorfjordxn--qqqt11misawaxn--qxamurskinderoyxn--rady-" + - "iraxn--rdal-poaxn--rde-ularvikrasnodarxn--rdy-0nabarixn--rennesy" + - "-v1axn--rhkkervju-01aflakstadaokagakibichuoxn--rholt-mragowoodsi" + - "dexn--rhqv96gxn--rht27zxn--rht3dxn--rht61exn--risa-5narusawaxn--" + - "risr-iraxn--rland-uuaxn--rlingen-mxaxn--rmskog-byatsushiroxn--rn" + - "y31hamurakamigoriginshinshiroxn--rovu88bentleyukuhashimojiitateb" + - "ayashijonawatextileksvikashiharaxasmatartanddesignieznorddalavag" + - "iske12xn--rros-granvindafjordxn--rskog-uuaxn--rst-0narutokorozaw" + - "axn--rsta-francaiseharaxn--ryken-vuaxn--ryrvik-byawaraxn--s-1far" + - "eastcoastaldefencexn--s9brj9cliniquenoharaxn--sandnessjen-ogbizh" + - "evskrasnoyarskommunexn--sandy-yuaxn--seral-lraxn--ses554gxn--sgn" + - "e-gratangenxn--skierv-utazaskvolloabathsbclintonoshoesaves-the-w" + - "halessandria-trani-barletta-andriatranibarlettaandriaxn--skjervy" + - "-v1axn--skjk-soaxn--sknit-yqaxn--sknland-fxaxn--slat-5narviikana" + - "nporovnoxn--slt-elabourxn--smla-hraxn--smna-gratis-a-bulls-fanxn" + - "--snase-nraxn--sndre-land-0cbremangerxn--snes-poaxn--snsa-roaxn-" + - "-sr-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-varanger-ggbe" + - "ppubolognagatorockartuzyurihonjournalistjordalshalsenhsamsclubin" + - "dalinkashiwaraxn--srfold-byawatahamaxn--srreisa-q1axn--srum-graz" + - "xn--stfold-9xaxn--stjrdal-s1axn--stjrdalshalsen-sqberndunloppaci" + - "ficartierxn--stre-toten-zcbstpetersburgxn--t60b56axn--tckweather" + - "channelxn--tiq49xqyjewishartgalleryxn--tjme-hraxn--tn0agrinetban" + - "kzxn--tnsberg-q1axn--tor131oxn--trany-yuaxn--trgstad-r1axn--trna" + - "-woaxn--troms-zuaxn--tysvr-vraxn--uc0atversicherungxn--uc0ay4axn" + - "--uist22hangoutsystemscloudcontrolapparmaxn--uisz3gxn--unjrga-rt" + - "aobaokinawashirosatobamagazinemurorangeologyxn--unup4yxn--uuwu58" + - "axn--vads-jraxn--vard-jraxn--vegrshei-c0axn--vermgensberater-ctb" + - "eskidynaliascoli-picenord-frontierxn--vermgensberatung-pwbestbuy" + - "shousesamsunglassassinationalheritagematsubarakawagoepostfoldnav" + - "yatkakudamatsuepsonyoursidegreevje-og-hornnesanfranciscotlanduns" + - "agamiharaxn--vestvgy-ixa6oxn--vg-yiabcgxn--vgan-qoaxn--vgsy-qoa0" + - "jfkomitamamuraxn--vgu402clothinguitarsavonaplesaxoxn--vhquvestfo" + - "ldxn--vler-qoaxn--vre-eiker-k8axn--vrggt-xqadxn--vry-yla5gxn--vu" + - "q861betainaboxfordeatnuorogersvpalanaklodzkodairaxn--w4r85el8fhu" + - "5dnraxn--w4rs40lxn--wcvs22dxn--wgbh1cloudfrontdoorxn--wgbl6axn--" + - "xhq521bhartiffanynysafetysfjordupontarioceanographiquexn--xkc2al" + - "3hye2axn--xkc2dl3a5ee0hannanmokuizumodernxn--y9a3aquariumisconfu" + - "sedxn--yer-znarvikredstonexn--yfro4i67oxn--ygarden-p1axn--ygbi2a" + - "mmxn--45brj9choyodobashichikashukujitawaraxn--ystre-slidre-ujbie" + - "lawallonieruchomoscienceandindustrynikiiyamanobeauxartsandcrafts" + - "angoddaxn--zbx025dxn--zf0ao64axn--zf0avxn--45q11christmasakikuga" + - "watchesatxjaworznoxn--zfr164biellaakesvuemieleccexperiaxz" +const text = "bievatmallorcadaquesanfranciscotlandupontarioceanographiquebifuk" + + "agawalmartateshinanomachintaijinuyamanouchikuhokuryugasakitashio" + + "barabihorologyusuharabikedagestangebilbaogakievenesangoddabillus" + + "trationikkoebenhavnikolaeverbankashiwarabiomutashinainvestmentsa" + + "njotateyamabirdartcenterprisesakikonaircraftraeumtgeradealstahau" + + "gesundurbanamexeterbirkenesoddtangenovaravennaharimalvikashiwaza" + + "kiyokawarabirthplacebjarkoyusuisservicesannanikonantanangerbjerk" + + "reimmobilieninohelplfinancialipetskasukabedzin-the-bandaioiraseb" + + "astopologyeongnamegawakembuchikumagayagawakkanaibetsubamericanfa" + + "milydscloudappspotenzachpomorskienebakkeshibechambagriculturenne" + + "budapest-a-la-masioninomiyakonojoshkar-olayangroupaleostrowiecar" + + "toonartdecoffeedbackasumigaurawa-mazowszextraspace-to-rentalstom" + + "akomaibarabjugnirasakis-a-candidateblockbusternidurhamburgliwice" + + "bloombergbauernrtatsunostrowwlkpmglobalashovhachinoheguris-a-cat" + + "ererbluedatingloboehringerikebmoattachmentsannohelsinkitahiroshi" + + "marshallstatebankasuyakutiabmsanokaszubyuudmurtiabmwegroweibolza" + + "nore-og-uvdalivornobnpparibaselburglogoweirbomloansantabarbarabo" + + "ndvrdnsantacruzsantafedexhibitionishiazais-a-celticsfanishigotpa" + + "ntheonishiharabonnishiizunazukis-a-chefarsundwgloppenzaogashimad" + + "achicagobododgemologicallyngenglandyndns-homednsanukis-a-conserv" + + "ativefsncfailomzansimagicasadelamonedavvesiidazaifudaigodoesntex" + + "istanbullensakerbookingmbhartiffanynysafetysfjordyndns-ip6bootsa" + + "otomeldalorenskogminakamichigangwonishikatakazakis-a-cpadoval-da" + + "ostavalleyuzawaboschaefflerdalotenkawabostikatowicebostonakijins" + + "ekikogentingmodenakasatsunairtrafficaseihichisobetsuitairabotani" + + "calgardenishikatsuragithubusercontentattoolsztynsettlersapodhale" + + "vangerbotanicgardenishikawazukanazawabotanyuzhno-sakhalinskatsus" + + "hikabeeldengeluidyndns-mailotteboutiquebecngmxboxenapponazure-mo" + + "bilebozentsujiiebradescorporationishimerabrandywinevalleybrasilj" + + "an-mayenishinomiyashironobresciabrindisibenikebristolgalsacebrit" + + "ishcolumbialowiezagannakadomari-elasticbeanstalkatsuyamasfjorden" + + "ishinoomotegotsukisosakitagatakamatsukawabroadcastlebtimnetzgora" + + "broadwaybroke-itaxihuanishinoshimatta-varjjatgorybrokerrypropert" + + "iesapporobronnoysundyndns-office-on-the-webcambridgestonewspaper" + + "brothermesaverdefensejnybrumunddalottokigawabrunelblagdenesnaase" + + "ralingenkainanaejrietisalatinabenogatachikawakayamagadancebetsuk" + + "ubabia-goracleaningatlantagajobojis-a-cubicle-slavellinowtvallea" + + "ostavernishiokoppegardyndns-picsaratovalled-aostavropolicebrusse" + + "lsardegnamsskoganeis-a-democratjeldsundyndns-remotegildeskalmyki" + + "abruxellesardiniabryanskjakdnepropetrovskiervaapsteiermarkaufeni" + + "shitosashimizunaminamiashigarabryneustarhubalestrandabergamoarek" + + "ehimejibestadishakotankarmoyokozembroideryomitanobninskarpaczela" + + "dz-1buskerudinewhampshirechtrainingretakamoriokamchatkameokameya" + + "mashinatsukigatakanabeatsarlouvrepairbusantiquest-a-la-maisondre" + + "-landebusinessebykleclercasertaishinomakikuchikuseikarugapartmen" + + "tsarpsborgrimstadyndns-serverbaniabuzenishiwakis-a-designerbuzzg" + + "orzeleccollegersundyndns-weberlincolnissandnessjoenissayokoshiba" + + "hikariwanumataketomisatokuyamatteledatabaseballooningripebwfashi" + + "onissedalovegaskimitsubatamicabbottjmaxxxfinitybzhitomirkutskjer" + + "voyagecloudfunctionsaudacntkmaxxn--11b4c3dcolognewmexicoldwarmia" + + "miastaplesauheradcolonialwilliamsburguideventsavannahgacoloradop" + + "lateaudiocolumbusheycommunitysnesaves-the-whalessandria-trani-ba" + + "rletta-andriatranibarlettaandriacomobaracomparemarkerryhotelsavo" + + "naplesaxocompute-1computerhistoryofscience-fictioncomsecuritysva" + + "rdoharuhrcondoshichinohedmarkhangelskypescaravantaaconferencecon" + + "structionconsuladollsbschokoladenconsultanthropologyconsultingvo" + + "llutskddielddanuorrikuzentakatajirissagaeroclubmedecincinnationw" + + "idealerimo-i-ranadexchangeiseiyoichiropracticbcn-north-1contactm" + + "palmspringsakercontemporaryarteducationalchikugojomedicaltanisse" + + "ttaiwanairguardcontractorskenconventureshinodesashibetsuikimobet" + + "suliguriacookingchannelveruminamibosogndaluxembourguitarscholars" + + "hipschooluxurycoolkuszgradcoopocznorthwesternmutualuzerncopenhag" + + "encyclopedicdn77-sslattumetlifeinsurancecorsicagliaridagawarszaw" + + "ashingtondclkfhskhabarovskhakassiacorvettemasekharkivguccipriani" + + "igataitogitsuldalvivano-frankivskharkovalledaostakkofuelcosenzam" + + "amibuilderschulexuslivinghistorycostumedio-campidano-mediocampid" + + "anomediocouncilcouponschwarzgwangjuifminamidaitomangotembaixadac" + + "ourseschweizippodlasiellakasamatsudovre-eikercq-acranbrookuwanal" + + "yticsciencecentersciencehistorycreditcardcreditunioncremonashoro" + + "kanaiecrewiiheyaizuwakamatsubushikusakadogawacricketrzyncrimeacr" + + "otonewportlligatewaycrowncrscientistor-elvdalcruisescjohnsoncuis" + + "inellajollamericanexpressexyzjcbnlculturalcentertainmentoyokawac" + + "uneocupcakecxn--1ck2e1balsanagochihayaakasakawaharaumakeupowiath" + + "letajimabariakepnordkappgjesdalillyonabaruconnectarnobrzegjovika" + + "ruizawaugustowadaegubs3-ap-southeast-2cymruovatoyonakagyokutoshi" + + "macyouthdfcbankhersonfilateliafilminamiechizenfinalfinancefinear" + + "tsettsurfastlyfinlandfinnoyfirebaseappamperedchefauskedsmokorset" + + "agayaseljordfirenzefirestonextdirectoryfirmdalegoldpointelligenc" + + "efishingolfbsbxn--1ctwolominamatamayukis-a-geekhmelnitskiyamashi" + + "kefitjarqhachiojiyahikobeautydalfitnessettlementoyookarasjohkami" + + "noyamatsuris-a-greenfjalerflickragerotikaluganskhmelnytskyivalle" + + "e-aosteroyflightsevastopolezajskhvalleeaosteigenflirumansionseve" + + "nassisicilyfloguchikuzenfloraflorencefloridafloristanohatakaharu" + + "ssiafloromskoguovdageaidnulminamifuranoflowersewildlifestyleflsm" + + "idthruhereggio-emilia-romagnakanotoddenflynnhubalsfjordiskstatio" + + "naustdalimanowarudaukraanghke164fndfolldalfoodnetworkangerfor-be" + + "tter-thandafor-ourfor-somedizinhistorischesfranziskanerimamatera" + + "mochizukirafor-theaterforexrothachirogatakanezawaforgotdnshangri" + + "langevagrarboretumbriaforli-cesena-forlicesenaforlikes-piedmontb" + + "lancomeeresharis-a-gurulsandoyforsaleikangerforsandasuolodingenf" + + "ortmissoulan-udell-ogliastrakhanawawilliamhillfortworthadanotoga" + + "waforuminamiiselectoyosatotalfosnesharpanamafotoyotaris-a-hard-w" + + "orkerfoxn--1lqs03nfreiburgushikamifuranotaireshawaiijimarylandfr" + + "eightcmwinbaltimore-og-romsdalimitedunetflixilimoliserniaurskog-" + + "holandroverhalla-speziaetnagahamaroygardendoftheinternetcimdbala" + + "tinordre-landds3-ap-northeast-2freseniusdecorativeartshellaspezi" + + "afribourgxn--1lqs71dfriuli-v-giuliafriuli-ve-giuliafriuli-vegiul" + + "iafriuli-venezia-giuliafriuli-veneziagiuliafriuli-vgiuliafriuliv" + + "-giuliafriulive-giuliafriulivegiuliafriulivenezia-giuliafriulive" + + "neziagiuliafriulivgiuliafrlfroganshimokawafrognfrolandfrom-akreh" + + "amnfrom-alfrom-arfrom-azlgzpanasonicheltenham-radio-operaunitele" + + "markautokeinofrom-canonoichikawamisatodayfrom-collectionfrom-cto" + + "yotomiyazakis-a-hunterfrom-dchelyabinskodjeffersonisshinguernsey" + + "from-dellogliastraderfrom-flandershimokitayamafrom-gaulardalfrom" + + "-higashiagatsumagoirmitakeharafrom-iafrom-idfrom-ilfrom-incheonf" + + "rom-kshimonitayanagivestbytomaritimekeepingfrom-kyknetoyotsukaid" + + "ownloadfrom-lanbibaidarfrom-manxn--1qqw23afrom-mdfrom-meetoyoura" + + "from-microsoftbanklabudhabikinokawabarthadselfipirangafrom-mnfro" + + "m-modalenfrom-mshimonosekikawafrom-mtnfrom-nchernigovernmentjome" + + "morialucaniafrom-ndfrom-nexusgardenfrom-nhktoystre-slidrettozawa" + + "from-njcparaglidingfrom-nminamiizukamitondabayashiogamagoriziafr" + + "om-nvanylvenicefrom-nyfrom-ohkurafrom-oketogurafrom-orfrom-pader" + + "bornfrom-pratohmaoris-a-knightozsdefrom-ris-a-landscaperugiafrom" + + "-schoenbrunnfrom-sdnipropetrovskmpspbambleborkarumaifarmsteadivt" + + "asvuodnakaiwamizawaustevollavangenativeamericanantiques3-eu-cent" + + "ral-1from-tnfrom-txn--2m4a15efrom-utazuerichardlikescandyndns-at" + + "-homedepotaruis-a-lawyerfrom-vadsochildrensgardenfrom-vtranbyfro" + + "m-wafrom-wielunnerfrom-wvaolbia-tempio-olbiatempioolbialystokkem" + + "erovodkagoshimaintenancefrom-wyfrosinonefrostalowa-wolawafroyaha" + + "badajozorahkkeravjudygarlandfstcgrouparisor-fronfujiiderafujikaw" + + "aguchikonefujiminohtawaramotoineppugliafujinomiyadafujiokayamarb" + + "urgfujisatoshonairportland-4-salernogiessengerdalaskanittedallas" + + "alleaseeklogesquarezzoologyfujisawafujishiroishidakabiratoridelm" + + "enhorstalbanshimosuwalkis-a-liberalfujitsurugashimarinefujixerox" + + "n--30rr7yfujiyoshidafukayabeardubaiduckdnsdojoburgfukuchiyamadaf" + + "ukudominichernihivanovosibirskydivingrondarfukuis-a-libertarianf" + + "ukumitsubishigakirkeneshimotsukefukuokazakirovogradoyfukuroishik" + + "arikaturindalfukusakiryuohaebaruminamimakis-a-linux-useranishiar" + + "itabashikaoizumizakitaurayasudafukuyamagatakahashimamakisarazure" + + "websiteshikagamiishibukawafunabashiriuchinadafunagatakahatakaish" + + "imoichinosekigaharafunahashikamiamakusatsumasendaisennangonoheji" + + "s-a-llamarylhursteinkjerusalembetsukuis-a-musicianfundaciofuoisk" + + "ujukuriyamarcheaparliamentrani-andria-barletta-trani-andriafuoss" + + "koczowindmillfurnitureggiocalabriafurubiraquarelleasingleshimots" + + "umafurudonostiafurukawairtelecityeatshinichinanfusodegaurafussai" + + "kishiwadafutabayamaguchinomigawafutboldlygoingnowhere-for-morego" + + "ntrailroadfuttsurugiminamiminowafvgfyis-a-nascarfanfylkesbiblack" + + "fridayfyresdalhannovareserveftparocherkasyzrankoshigayaltaikis-a" + + "-painteractivegarsheis-a-patsfanhanyuzenhapmirhappoulvikokonoeha" + + "reidsbergenharstadharvestcelebrationhasamarahasaminami-alpssells" + + "-for-unzenhashbanghasudahasvikolobrzegyptianpachigasakidsmynaspe" + + "rschlesischesurancehatogayahoooshikamaishimofusartshinkamigotoyo" + + "hashimotomobellunordreisa-geekomaganehatoyamazakitahatakaokamiki" + + "tayamatotakadahatsukaichiharahattfjelldalhayashimamotobuildingha" + + "zuminobusells-itraniandriabarlettatraniandriahbofagehembygdsforb" + + "undhemneshinshinotsurgeonshalloffamelhustkamisunagawahemsedalher" + + "okussldheroyhgtvarggatranoyhigashichichibungotakadatsunanjoetsuw" + + "anouchikujogaszkoladbrokesennumamurogawalterhigashihiroshimanehi" + + "gashiizumozakitakamiizumisanofiatransportrapaniimimatakatoris-a-" + + "personaltrainerhigashikagawahigashikagurasoedahigashikawakitaaik" + + "itakatakarazukamikoaniikappulawyhigashikurumeguroroskoleirvikoma" + + "kiyosatokamachippubetsubetsugaruhigashimatsushimarugame-hostingh" + + "igashimatsuyamakitaakitadaitoigawahigashimurayamalatvuopmidoris-" + + "a-photographerokuapparshinshirohigashinarusellsyourhomegoodshint" + + "okushimahigashinehigashiomihachimanchesterhigashiosakasayamamoto" + + "rcycleshintomikasaharahigashishirakawamatakasagooglecodespotrave" + + "lchannelhigashisumiyoshikawaminamiaikitakyushuaiahigashitsunowru" + + "zhgorodoyhigashiurausukitamidsundhigashiyamatokoriyamanakakogawa" + + "higashiyodogawahigashiyoshinogaris-a-playerhiraizumisatohnoshooh" + + "irakatashinagawahiranais-a-republicancerresearchaeologicaliforni" + + "ahirarahiratsukagawahirayaitakasakitamotosumitakaginankokubunjis" + + "-a-rockstarachowicehisayamanashiibaghdadultravelersinsurancehist" + + "orichouseshinyoshitomiokaniepcehitachiomiyaginowaniihamatamakawa" + + "jimaritimodellinghitachiotagopartis-a-socialistmeindianapolis-a-" + + "bloggerhitoyoshimifunehitradinghjartdalhjelmelandholeckobierzyce" + + "holidayhomeipartnershiojirishirifujiedahomelinuxn--32vp30hagebos" + + "tadhomesensembokukitanakagusukumoduminamiogunicomcastresistanceh" + + "omeunixn--3bst00minamisanrikubetsupplyhondahonefosshioyameloyali" + + "stockholmestrandhoneywellhongorgehonjyoitakashimarumorimachidaho" + + "rnindalhorseminehortendofinternetrdhoteleshirahamatonbetsurgeryh" + + "otmailhoyangerhoylandetroitskomatsushimashikiyosemitehumanitiesh" + + "irakoenighurdalhurumajis-a-soxfanhyllestadhyogoris-a-studentalhy" + + "ugawarahyundaiwafunejgorajlchitachinakagawatchandclockazimierz-d" + + "olnyjlljmpartshishikuis-an-actorjnjelenia-gorajoyokaichibahcavuo" + + "tnagaraholtalenjpmorganichitosetogakushimotoganewjerseyjpnchloej" + + "prshisognejuniperjurkristiansandcatshisuifuettertdasnetzwindowsh" + + "itaramakristiansundkrodsheradkrokstadelvaldaostarostwodzislawinn" + + "ershizukuishimogosenkryminamitanekumatorinokumejimasudakumenanyo" + + "kkaichirurgiens-dentisteshizuokanoyakagekunisakis-an-entertainer" + + "kunitachiarailwaykunitomigusukumamotoyamassa-carrara-massacarrar" + + "amassabunkyonanaoshimageandsoundandvisionkunneppupartykunstsamml" + + "ungkunstunddesignkuokgroupasadenamsosnowiechocolatelevisionrwhal" + + "ingrongausdaluccapebretonamiasakuchinotsuchiurakawassamukawatari" + + "cohdavvenjargamvikazokureitrentino-stirolkurgankurobelaudiblebes" + + "byglandkurogimilitarykuroisoftwarendalenugkuromatsunais-bykurota" + + "kikawasakis-certifiedekakegawakurskomonokushirogawakustanais-fou" + + "ndationkusupersportrentino-sud-tirolkutchanelkutnokuzbassnillfjo" + + "rdkuzumakis-gonekvafjordkvalsundkvamlidlugolekagaminord-aurdalvd" + + "alipayufuchukotkafjordkvanangenkvinesdalkvinnheradkviteseidskogk" + + "vitsoykwpspjelkavikomorotsukamishihoronobeokaminokawanishiaizuba" + + "ngekyotobetsupplieshoujis-into-animeiwamaseratis-a-therapistoiak" + + "yowariasahikawamishimatsumotofukemissileksvikongsbergmisugitokon" + + "amegatakayamatsunomitourismolanciamitoyoakemiuramiyazumiyotamano" + + "mjondalenmlbarclaycards3-us-west-1monmouthaibarakitagawamonsterm" + + "onticellolmontrealestatefarmequipmentrentino-sudtirolmonza-brian" + + "zaporizhzhekinannestadmonza-e-della-brianzaporizhzhiamonzabrianz" + + "apposlombardiamondshowtimemerckongsvingermonzaebrianzaramonzaede" + + "llabrianzamoparachutingmordoviajessheiminamiuonumatsumaebashimod" + + "atemoriyamatsusakahoginozawaonsenmoriyoshiokamitsuemormoneymoroy" + + "amatsushigemortgagemoscowiostrolekaneyamaxunjargamoseushistorymo" + + "sjoenmoskeneshriramsterdambulanceomossienarashinomosvikoninjamis" + + "onmoviemovistargardmtpccwitdkonskowolancashirehabmermtranakayama" + + "tsuuramuenstermugithubcloudusercontentrentino-sued-tirolmuikamog" + + "awamukochikushinonsenergymulhouservebbsigdalmultichoicemunakatan" + + "emuncieszynmuosattemupassagensimbirskonsulatrobeermurmanskonyvel" + + "oftrentino-s-tirollagrigentomologyeonggiehtavuoatnagaivuotnagaok" + + "akyotambabydgoszczecinemailmurotorcraftrentino-suedtirolmusashim" + + "urayamatsuzakis-leetrentino-a-adigemusashinoharamuseetrentinoa-a" + + "digemuseumverenigingmutsuzawamutuellelmyokohamamatsudamypetsimpl" + + "e-urlmyphotoshibahccavuotnagareyamaizurubtsovskiptveterinairebun" + + "goonomichinomiyakemytis-a-bookkeeperminamiyamashirokawanabelgoro" + + "deophiladelphiaareadmyblogsitephilatelyphilipsyphoenixn--3e0b707" + + "ephotographysiopiagetmyipassenger-associationpictetrentinoaadige" + + "pictureslupskooris-an-actresshiraois-a-techietis-a-teacherkassym" + + "antechnologypiemontepilotsmolenskopervikommunalforbundpinkoryola" + + "sitepioneerpippupiszpittsburghofedjejuegoshikiminokamoenairlineb" + + "raskaunbieidsvollpiwatepizzapkosaigawaplanetariuminanoplantation" + + "plantsnoasaitamatsukuris-lostre-toteneis-an-accountantshiranukan" + + "makiwakunigamihamadaplatformincommbankomvuxn--3ds443gplaystation" + + "plazaplchofunatorientexpressasayamaplombardyndns-at-workinggroup" + + "aviancapetownplumbingotvbarclays3-us-west-2plusterpmnpodzonepohl" + + "pokerpokrovskosakaerodromegallupinbarcelonagasakijobservercellie" + + "rneues3-us-gov-west-1politiendapolkowicepoltavalle-aostathellewi" + + "smillerpomorzeszowithgoogleapisa-hockeynutrentinoalto-adigeporde" + + "nonepornporsangerporsangugeporsgrunnanpoznanpraxis-a-bruinsfansn" + + "zprdpreservationpresidioprgmrprimelbourneprincipeprivneprochowic" + + "eproductionsokanraprofermobilyprogressivenneslaskerrylogisticsok" + + "ndalprojectrentinoaltoadigepromombetsupportrentinos-tirolpropert" + + "yprotectionprudentialpruszkowithyoutubeneventochiokinoshimalselv" + + "endrellprzeworskogptzpvtrentinostirolpwchonanbugattipschmidtre-g" + + "auldalucernepzqldqponqslgbtrentinosud-tirolqvchoseiroumuenchenst" + + "orfjordstpetersburgstreamurskinderoystudiostudyndns-freemasonryo" + + "kamikawanehonbetsurutaharastuff-4-salestuttgartrentinosuedtirols" + + "urnadalsurreysusakis-slickomforbananarepublicargodaddynathomebui" + + "ltarumizusawaustinnaturalhistorymuseumcentereviewskrakowebhopage" + + "frontappagespeedmobilizerobihirosakikamijimagroks-thisamitsukeis" + + "enbahnasushiobaraeroportalabamagasakishimabarackmaze-burggfarmer" + + "seinewyorkshireggio-calabriabruzzoologicalvinklein-addrammenuern" + + "bergdyniabogadocscbg12000susonosuzakanumazurysuzukanzakiwiensuzu" + + "kis-uberleetrentino-aadigesvalbardudinkakamigaharasveiosvelvikos" + + "himizumakiyosumykolaivaroysvizzeraswedenswidnicapitalonewholland" + + "swiebodzindianmarketingswiftcoverisignswinoujscienceandhistorysw" + + "isshikis-very-badaddjamalborkdalsxn--3oq18vl8pn36atuscanytushuis" + + "sier-justicetuvalle-daostavangervestnesopotrentinosudtirolvestre" + + "-slidreamhostersor-odalvestre-totennishiawakuravestvagoyvevelsta" + + "dvibo-valentiavibovalentiavideovillaskoyabearalvahkihokumakogeni" + + "waizumiotsukumiyamazonawsabaerobaticketsor-varangervinnicarbonia" + + "-iglesias-carboniaiglesiascarboniavinnytsiavipsinaappfizervirgin" + + "iavirtualvirtuelvisakatakinouevistaprintuitrentottoris-very-evil" + + "lageviterboltrevisohughesolognevivoldavladikavkazanvladimirvladi" + + "vostokaizukarasuyamazoevlogvolkenkunderseaportroandinosaurepbody" + + "ndns-blogdnsolundbeckoseis-an-anarchistoricalsocietyvolkswagents" + + "orfoldvologdanskostromahachijorpelandvolvolgogradvolyngdalvorone" + + "zhytomyrvossevangenvotevotingvotoursorreisahayakawakamiichikaise" + + "is-saveducatorahimeshimakanegasakinkobayashikshacknetnedalvrnwor" + + "se-thangglidingwowiwatsukiyonowritesthisblogspotrogstadwroclawlo" + + "clawekosugewtchoshibuyachiyodawtferrarawuozuwwworldwzmiuwajimaxn" + + "--4gq48lf9jeonnamerikawauexn--4it168dxn--4it797kotouraxn--4pvxso" + + "rtlandxn--54b7fta0cchromediaxn--55qw42gxn--55qx5dxn--5js045dxn--" + + "5rtp49chryslerxn--5rtq34kouhokutamakis-an-artistjohnxn--5su34j93" + + "6bgsgxn--5tzm5gxn--6btw5axn--6frz82gxn--6orx2rxn--6qq986b3xlxn--" + + "7t0a264chungbukazunoxn--80adxhksoruminnesotaketakatsukis-into-ca" + + "rshiraokannamiharuxn--80ao21axn--80aqecdr1axn--80asehdbarefootba" + + "llangenoamishirasatobishimalopolskanlandivttasvuotnakamagayachts" + + "akuraibigawaustraliaisondriodejaneirochesterhcloudcontrolledigit" + + "alaziobirakunedre-eikereportarantomsk-uralsk12xn--80aswgxn--80au" + + "dnedalnxn--8ltr62kounosunndalxn--8pvr4uxn--8y0a063axn--90a3acade" + + "mydroboatsaritsynologyeongbukouyamashikokuchuoxn--90aishobaraomo" + + "riguchiharagusaarlandxn--90azhair-surveillancexn--9dbhblg6diethn" + + "ologyxn--9dbq2axn--9et52uxn--9krt00axn--andy-iraxn--aroport-byan" + + "agawaxn--asky-iraxn--aurskog-hland-jnbargainstitutelefonicafeder" + + "ationaval-d-aosta-valleyonagoyaustrheimatunduhrennesoyekaterinbu" + + "rgjemnes3-eu-west-1xn--avery-yuasakegawaxn--b-5gaxn--b4w605ferdx" + + "n--bck1b9a5dre4chungnamdalseidfjordyroyrvikingrossetouchijiwadel" + + "tajimicrolightingroundhandlingroznyxn--bdddj-mrabdxn--bearalvhki" + + "-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7axn--bidr" + + "-5nachikatsuuraxn--bievt-0qa2xn--bjarky-fyanaizuxn--bjddar-ptamb" + + "oversaillesooxn--blt-elaborxn--bmlo-graingerxn--bod-2naroyxn--br" + + "nny-wuaccident-investigationjukudoyamaceratabuseat-band-campania" + + "mallamadridvagsoyericssonlineat-urlxn--brnnysund-m8accident-prev" + + "entionxn--brum-voagatromsakakinokiaxn--btsfjord-9zaxn--c1avgxn--" + + "c2br7gxn--c3s14minternationalfirearmshowaxn--cck2b3barreauctiona" + + "vigationavuotnakhodkanagawauthordalandroidiscountyumenaturalscie" + + "ncesnaturelles3-external-1xn--cg4bkis-very-goodhandsonxn--ciqpnx" + + "n--clchc0ea0b2g2a9gcdn77-securecipesaro-urbino-pesarourbinopesar" + + "omaniwakuratelekommunikationxn--comunicaes-v6a2oxn--correios-e-t" + + "elecomunicaes-ghc29axn--czr694barrel-of-knowledgeometre-experts-" + + "comptablesakyotanabellevuelosangelesjaguarchitecturealtychyattor" + + "neyagawalbrzycharternopilawalesundiyonaguniversityoriikasaokamio" + + "kamiminersalangenayoroceanographicsalondonetskashibatakasugaibmd" + + "npalacemergencyberlevagangaviikanonjiinetatamotorsaltdalindasiau" + + "tomotivecodyn-o-saurlandes3-external-2xn--czrs0tromsojavald-aost" + + "arnbergxn--czru2dxn--czrw28barrell-of-knowledgeorgeorgiautoscana" + + "daejeonbukariyakumoldebinagisoccertificationaturbruksgymnaturhis" + + "torisches3-fips-us-gov-west-1xn--d1acj3bashkiriaveroykenvironmen" + + "talconservationatuurwetenschappenaumburgjerdrumckinseyokosukarel" + + "iancebinosegawasmatartanddesignieznorddalavagiske12xn--d1alfarom" + + "eoxn--d1atrusteexn--d5qv7z876churchaseljeepilepsydneyxn--davvenj" + + "rga-y4axn--djrs72d6uyxn--djty4kouzushimasoyxn--dnna-grajewolters" + + "kluwerxn--drbak-wuaxn--dyry-iraxn--eckvdtc9dxn--efvn9southcaroli" + + "nazawaxn--efvy88hakatanotteroyxn--ehqz56nxn--elqq16hakodatevaksd" + + "alxn--estv75gxn--eveni-0qa01gaxn--f6qx53axn--fct429kozagawaxn--f" + + "hbeiarnxn--finny-yuaxn--fiq228c5hsouthwestfalenxn--fiq64basilica" + + "taniavocatanzaroweddingjerstadotsuruokamakurazakisofukushimarnar" + + "dalillesandefjordiscoveryggeelvinckarlsoyokotebizenakaniikawatan" + + "agurasnesoddenmarkets3-ap-southeast-1kappleangaviikadenaamesjevu" + + "emielnoboribetsucks3-ap-northeast-1xn--fiqs8sowaxn--fiqz9spreadb" + + "ettingxn--fjord-lraxn--fjq720axn--fl-ziaxn--flor-jraxn--flw351ex" + + "n--fpcrj9c3dxn--frde-grandrapidspydebergxn--frna-woaraisaijosoyr" + + "ovigorlicexn--frya-hraxn--fzc2c9e2chuvashiaxn--fzys8d69uvgmailxn" + + "--g2xx48circlegallocuscountryestateofdelawarecreationxn--gckr3f0" + + "ferrarittogokasells-for-lesscrapper-sitexn--gecrj9circuscultured" + + "umbrellahppiacenzakopanerairforcechirealtorlandxn--ggaviika-8ya4" + + "7hakonexn--gildeskl-g0axn--givuotna-8yandexn--3pxu8kotohiradomai" + + "nsureisenxn--gjvik-wuaxn--gk3at1exn--gls-elacaixaxn--gmq050is-ve" + + "ry-nicexn--gmqw5axn--h-2fairwindsrlxn--h1aeghakubankmshinjournal" + + "ismailillehammerfest-mon-blogueurovisionxn--h2brj9citadeliverybn" + + "ikahokutogliattiresaskatchewanggouvicenzaxn--hbmer-xqaxn--hcesuo" + + "lo-7ya35basketballfinanz-2xn--hery-iraxn--hgebostad-g3axn--hmmrf" + + "easta-s4acctrverranzanxn--hnefoss-q1axn--hobl-iraxn--holtlen-hxa" + + "xn--hpmir-xqaxn--hxt814exn--hyanger-q1axn--hylandet-54axn--i1b6b" + + "1a6a2exn--imr513nxn--indery-fyaotsurgutsiracusaitokyotangovtrysi" + + "lkoshunantokashikizunokunimilanoxn--io0a7is-very-sweetrentino-al" + + "to-adigexn--j1aeferreroticampobassociatescrappingujolsterxn--j1a" + + "mhakuis-a-nurseoullensvanguardxn--j6w193gxn--jlq61u9w7batochigif" + + "tsalvadordalibabaikaliszczytnordlandrangedalindesnesalzburgladel" + + "oittenrightathomeftpaccessamegawavoues3-sa-east-1xn--jlster-byar" + + "oslavlaanderenxn--jrpeland-54axn--jvr189misakis-into-cartoonshir" + + "atakahagivingxn--k7yn95exn--karmy-yuaxn--kbrq7oxn--kcrx77d1x4axn" + + "--kfjord-iuaxn--klbu-woaxn--klt787dxn--kltp7dxn--kltx9axn--klty5" + + "xn--42c2d9axn--koluokta-7ya57hakusandiegoodyearthagakhanamigawax" + + "n--kprw13dxn--kpry57dxn--kpu716fetsundxn--kput3is-with-thebandoo" + + "mdnsaliascolipicenord-odalxn--krager-gyasakaiminatoyakokamisatoh" + + "obby-sitexasdaburyatiaarpharmaciensirdalxn--kranghke-b0axn--krds" + + "herad-m8axn--krehamn-dxaxn--krjohka-hwab49jetztrentino-altoadige" + + "xn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyasugisleofmandalxn--kvn" + + "angen-k0axn--l-1faitheguardianquanconagawakuyabukicks-assediciti" + + "cateringebudejjuedischesapeakebayernurembergriwataraidyndns-work" + + "shoppdalowiczest-le-patrondheimperiaxn--l1accentureklamborghinii" + + "zaxn--laheadju-7yasuokaratexn--langevg-jxaxn--lcvr32dxn--ldingen" + + "-q1axn--leagaviika-52batsfjordrivelandrobaknoluoktainaikawachina" + + "ganoharamcoalaheadjudaicaaarborteaches-yogasawaracingroks-theatr" + + "eemersongdalenviknakanojohanamakinoharaxastronomydstvedestrandgc" + + "ahcesuolocalhistoryazannefrankfurtargets-itargi234xn--lesund-hua" + + "xn--lgbbat1ad8jevnakerxn--lgrd-poacoachampionshiphopenair-traffi" + + "c-controlleyxn--lhppi-xqaxn--linds-pramericanartulansolutionsola" + + "rssonxn--lns-qlanxessrtrentinosued-tirolxn--loabt-0qaxn--lrdal-s" + + "raxn--lrenskog-54axn--lt-liacivilaviationxn--lten-granexn--lury-" + + "iraxn--mely-iraxn--merker-kuaxn--mgb2ddesrvdonskosherbrookegawax" + + "n--mgb9awbfgulenxn--mgba3a3ejtunesomaxn--mgba3a4f16axn--mgba3a4f" + + "ranamizuholdingsmileirfjordxn--mgba7c0bbn0axn--mgbaakc7dvfidelit" + + "yxn--mgbaam7a8haldenxn--mgbab2bdxn--mgbai9a5eva00bauhausposts-an" + + "d-telecommunicationsnasadoes-itveronagasukemrxn--mgbai9azgqp6jew" + + "elryxn--mgbayh7gpaduaxn--mgbb9fbpobanazawaxn--mgbbh1a71exn--mgbc" + + "0a9azcgxn--mgbca7dzdoxn--mgberp4a5d4a87gxn--mgberp4a5d4arxn--mgb" + + "i4ecexposedxn--mgbpl2fhvalerxn--mgbqly7c0a67fbcivilisationxn--mg" + + "bqly7cvafredrikstadtvstoragexn--mgbt3dhdxn--mgbtf8flekkefjordxn-" + + "-mgbtx2bbcarrierxn--mgbx4cd0abbvieeexn--mix082fidonnakamuratakah" + + "amannortonsbergunmarriottoyonezawaxn--mix891fieldxn--mjndalen-64" + + "axn--mk0axindustriesteamfamberkeleyxn--mk1bu44civilizationxn--mk" + + "ru45issmarterthanyouxn--mlatvuopmi-s4axn--mli-tlapyatigorskozaki" + + "s-an-engineeringxn--mlselv-iuaxn--moreke-juaxn--mori-qsakuhokkai" + + "dontexisteingeekpnxn--mosjen-eyatominamiawajikiwchiryukyuragifue" + + "fukihaborokunohealthcareersarufutsunomiyawakasaikaitakoelniyodog" + + "awaxn--mot-tlaquilancasterxn--mre-og-romsdal-qqbbtatarstanflatan" + + "gerxn--msy-ula0halsaintlouis-a-anarchistoireggioemiliaromagnakat" + + "ombetsumidatlantichernivtsiciliaxn--mtta-vrjjat-k7afamilycompany" + + "civilwarmanagementjxjaworznoxn--muost-0qaxn--mxtq1misasaguris-in" + + "to-gamessinashikitchenxn--ngbc5azdxn--ngbe9e0axn--ngbrxn--45brj9" + + "choyodobashichikashukujitawaraxn--nit225kppspiegelxn--nmesjevuem" + + "ie-tcbajddarchaeologyxn--nnx388axn--nodessakuragawaxn--nqv7fs00e" + + "maxn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn--nttery-byaeservegame" + + "-serverdalxn--nvuotna-hwaxn--nyqy26axn--o1achattanooganorilsklep" + + "pharmacyslingxn--o3cw4hammarfeastafricamagichernovtsykkylvenetoe" + + "iheijis-a-doctorayxn--od0algxn--od0aq3bbvacationswatch-and-clock" + + "erxn--ogbpf8flesbergxn--oppegrd-ixaxn--ostery-fyatsukaratsuginam" + + "ikatagamihoboleslawieclaimsassaris-a-financialadvisor-aurdaluroy" + + "xn--osyro-wuaxn--p1acfdxn--p1aixn--pbt977clickchristiansburgrpal" + + "ermomasvuotnakatsugawaxn--pgbs0dhlxn--porsgu-sta26figuerestauran" + + "toyonoxn--pssu33lxn--pssy2uxn--q9jyb4clinicatholicasinorfolkebib" + + "lefrakkestadyndns-wikindlegnicamerakershus-east-1xn--qcka1pmcdon" + + "aldstordalxn--qqqt11misawaxn--qxamusementurystykarasjoksnesomnar" + + "itakurashikis-not-certifiedogawarabikomaezakirunoshiroomuraxn--r" + + "ady-iraxn--rdal-poaxn--rde-ularvikrasnodarxn--rdy-0nabarixn--ren" + + "nesy-v1axn--rhkkervju-01aflakstadaokagakibichuoxn--rholt-mragowo" + + "odsidexn--rhqv96gxn--rht27zxn--rht3dxn--rht61exn--risa-5narusawa" + + "xn--risr-iraxn--rland-uuaxn--rlingen-mxaxn--rmskog-byatsushiroxn" + + "--rny31hamurakamigoriginshinjukumanoxn--rovu88bentleyukuhashimoj" + + "iitatebayashijonawatextileitungsenfshostrodawaraxn--rros-granvin" + + "dafjordxn--rskog-uuaxn--rst-0narutokorozawaxn--rsta-francaisehar" + + "axn--ryken-vuaxn--ryrvik-byawaraxn--s-1fareastcoastaldefencexn--" + + "s9brj9cliniquenoharaxn--sandnessjen-ogbizhevskrasnoyarskommunexn" + + "--sandy-yuaxn--seral-lraxn--ses554gxn--sgne-gratangenxn--skierv-" + + "utazaskvolloabathsbclintonoshoesatxn--0trq7p7nnxn--skjervy-v1axn" + + "--skjk-soaxn--sknit-yqaxn--sknland-fxaxn--slat-5narviikananporov" + + "noxn--slt-elabourxn--smla-hraxn--smna-gratis-a-bulls-fanxn--snas" + + "e-nraxn--sndre-land-0cbremangerxn--snes-poaxn--snsa-roaxn--sr-au" + + "rdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-varanger-ggbeppubol" + + "ognagatorockartuzyurihonjournalistjordalshalsenhsamnangerxn--srf" + + "old-byawatahamaxn--srreisa-q1axn--srum-grazxn--stfold-9xaxn--stj" + + "rdal-s1axn--stjrdalshalsen-sqberndunloppacificartierxn--stre-tot" + + "en-zcbstorenburgxn--t60b56axn--tckweatherchannelxn--tiq49xqyjewi" + + "shartgalleryxn--tjme-hraxn--tn0agrinetbankzxn--tnsberg-q1axn--to" + + "r131oxn--trany-yuaxn--trgstad-r1axn--trna-woaxn--troms-zuaxn--ty" + + "svr-vraxn--uc0atversicherungxn--uc0ay4axn--uist22hangoutsystemsc" + + "loudcontrolapparmaxn--uisz3gxn--unjrga-rtaobaokinawashirosatobam" + + "agazinemurorangeologyxn--unup4yxn--uuwu58axn--vads-jraxn--vard-j" + + "raxn--vegrshei-c0axn--vermgensberater-ctbeskidynaliascoli-piceno" + + "rd-frontierxn--vermgensberatung-pwbestbuyshousesamsclubindalinka" + + "shiharaxn--vestvgy-ixa6oxn--vg-yiabcgxn--vgan-qoaxn--vgsy-qoa0jf" + + "komitamamuraxn--vgu402clothingruexn--vhquvestfoldxn--vler-qoaxn-" + + "-vre-eiker-k8axn--vrggt-xqadxn--vry-yla5gxn--vuq861betainaboxfor" + + "deatnuorogersvpalanaklodzkodairaxn--w4r85el8fhu5dnraxn--w4rs40lx" + + "n--wcvs22dxn--wgbh1cloudfrontdoorxn--wgbl6axn--xhq521bielawallon" + + "ieruchomoscienceandindustrynikiiyamanobeauxartsandcraftsamsungla" + + "ssassinationalheritagematsubarakawagoepostfoldnavyatkakudamatsue" + + "psonyoursidegreevje-og-hornnesandvikcoromantovalle-d-aostatoilin" + + "zainfinitinfoggiaxn--xkc2al3hye2axn--xkc2dl3a5ee0hannanmokuizumo" + + "dernxn--y9a3aquariumisconfusedxn--yer-znarvikredstonexn--yfro4i6" + + "7oxn--ygarden-p1axn--ygbi2ammxn--45q11christmasakikugawatchesase" + + "boknowsitallukowhoswhokksundynv6xn--ystre-slidre-ujbiellaakesvue" + + "mieleccexn--zbx025dxn--zf0ao64axn--zf0avxn--4gbriminingxn--zfr16" + + "4bieszczadygeyachimataipeigersundunsagamiharaxperiaxz" // nodes is the list of nodes. Each node is represented as a uint32, which // encodes the node's children, wildcard bit and node type (as an index into @@ -466,7874 +467,7893 @@ // [15 bits] text index // [ 6 bits] text length var nodes = [...]uint32{ - 0x00352883, // n0x0000 c0x0000 (---------------) + I aaa - 0x0034ae44, // n0x0001 c0x0000 (---------------) + I aarp - 0x00250d46, // n0x0002 c0x0000 (---------------) + I abarth - 0x0023e483, // n0x0003 c0x0000 (---------------) + I abb - 0x0023e486, // n0x0004 c0x0000 (---------------) + I abbott - 0x00365f46, // n0x0005 c0x0000 (---------------) + I abbvie - 0x0039a8c3, // n0x0006 c0x0000 (---------------) + I abc - 0x0031f604, // n0x0007 c0x0000 (---------------) + I able - 0x00329987, // n0x0008 c0x0000 (---------------) + I abogado - 0x00250988, // n0x0009 c0x0000 (---------------) + I abudhabi - 0x01a00342, // n0x000a c0x0006 (n0x0607-n0x060d) + I ac - 0x0030cf07, // n0x000b c0x0000 (---------------) + I academy - 0x0034fd09, // n0x000c c0x0000 (---------------) + I accenture - 0x002cfaca, // n0x000d c0x0000 (---------------) + I accountant - 0x002cfacb, // n0x000e c0x0000 (---------------) + I accountants - 0x00233d03, // n0x000f c0x0000 (---------------) + I aco - 0x0028c8c6, // n0x0010 c0x0000 (---------------) + I active - 0x00239e85, // n0x0011 c0x0000 (---------------) + I actor - 0x01e001c2, // n0x0012 c0x0007 (n0x060d-n0x060e) + I ad - 0x00212904, // n0x0013 c0x0000 (---------------) + I adac - 0x0025ab43, // n0x0014 c0x0000 (---------------) + I ads - 0x002a3105, // n0x0015 c0x0000 (---------------) + I adult - 0x022030c2, // n0x0016 c0x0008 (n0x060e-n0x0616) + I ae - 0x00255e03, // n0x0017 c0x0000 (---------------) + I aeg - 0x026e2dc4, // n0x0018 c0x0009 (n0x0616-n0x066d) + I aero - 0x0026af85, // n0x0019 c0x0000 (---------------) + I aetna - 0x02a05242, // n0x001a c0x000a (n0x066d-n0x0672) + I af - 0x0036d9ce, // n0x001b c0x0000 (---------------) + I afamilycompany - 0x00251303, // n0x001c c0x0000 (---------------) + I afl - 0x00374cc6, // n0x001d c0x0000 (---------------) + I africa - 0x00374ccb, // n0x001e c0x0000 (---------------) + I africamagic - 0x02e01b82, // n0x001f c0x000b (n0x0672-n0x0677) + I ag - 0x00283a87, // n0x0020 c0x0000 (---------------) + I agakhan - 0x0023cc46, // n0x0021 c0x0000 (---------------) + I agency - 0x03200502, // n0x0022 c0x000c (n0x0677-n0x067b) + I ai - 0x00215703, // n0x0023 c0x0000 (---------------) + I aig - 0x00215704, // n0x0024 c0x0000 (---------------) + I aigo - 0x002b7a06, // n0x0025 c0x0000 (---------------) + I airbus - 0x003385c8, // n0x0026 c0x0000 (---------------) + I airforce - 0x00285646, // n0x0027 c0x0000 (---------------) + I airtel - 0x00229704, // n0x0028 c0x0000 (---------------) + I akdn - 0x03600d02, // n0x0029 c0x000d (n0x067b-n0x0682) + I al - 0x00329f49, // n0x002a c0x0000 (---------------) + I alfaromeo - 0x0031f947, // n0x002b c0x0000 (---------------) + I alibaba - 0x002be246, // n0x002c c0x0000 (---------------) + I alipay - 0x0033e289, // n0x002d c0x0000 (---------------) + I allfinanz - 0x0020a148, // n0x002e c0x0000 (---------------) + I allstate - 0x00212f04, // n0x002f c0x0000 (---------------) + I ally - 0x00222006, // n0x0030 c0x0000 (---------------) + I alsace - 0x0020adc6, // n0x0031 c0x0000 (---------------) + I alstom - 0x03a00942, // n0x0032 c0x000e (n0x0682-n0x0683) + I am - 0x0024678f, // n0x0033 c0x0000 (---------------) + I americanexpress - 0x00207b4e, // n0x0034 c0x0000 (---------------) + I americanfamily - 0x00200944, // n0x0035 c0x0000 (---------------) + I amex - 0x003676c5, // n0x0036 c0x0000 (---------------) + I amfam - 0x0023e385, // n0x0037 c0x0000 (---------------) + I amica - 0x002c0509, // n0x0038 c0x0000 (---------------) + I amsterdam - 0x00243249, // n0x0039 c0x0000 (---------------) + I analytics - 0x00321107, // n0x003a c0x0000 (---------------) + I android - 0x0034e986, // n0x003b c0x0000 (---------------) + I anquan - 0x0024bac3, // n0x003c c0x0000 (---------------) + I anz - 0x03e02882, // n0x003d c0x000f (n0x0683-n0x0689) + I ao - 0x00275443, // n0x003e c0x0000 (---------------) + I aol - 0x0023d94a, // n0x003f c0x0000 (---------------) + I apartments - 0x00208083, // n0x0040 c0x0000 (---------------) + I app - 0x0026c205, // n0x0041 c0x0000 (---------------) + I apple - 0x00200f02, // n0x0042 c0x0000 (---------------) + I aq - 0x00286809, // n0x0043 c0x0000 (---------------) + I aquarelle - 0x04201d42, // n0x0044 c0x0010 (n0x0689-n0x0692) + I ar - 0x00202344, // n0x0045 c0x0000 (---------------) + I arab - 0x003523c6, // n0x0046 c0x0000 (---------------) + I aramco - 0x00308e45, // n0x0047 c0x0000 (---------------) + I archi - 0x00346644, // n0x0048 c0x0000 (---------------) + I army - 0x04a54644, // n0x0049 c0x0012 (n0x0693-n0x0699) + I arpa - 0x00239044, // n0x004a c0x0000 (---------------) + I arte - 0x04e03302, // n0x004b c0x0013 (n0x0699-n0x069a) + I as - 0x0034ab84, // n0x004c c0x0000 (---------------) + I asda - 0x00312a84, // n0x004d c0x0000 (---------------) + I asia - 0x00336d8a, // n0x004e c0x0000 (---------------) + I associates - 0x05200482, // n0x004f c0x0014 (n0x069a-n0x06a1) + I at - 0x00248447, // n0x0050 c0x0000 (---------------) + I athleta - 0x00309308, // n0x0051 c0x0000 (---------------) + I attorney - 0x05a05782, // n0x0052 c0x0016 (n0x06a2-n0x06b4) + I au - 0x0031c647, // n0x0053 c0x0000 (---------------) + I auction - 0x00232e44, // n0x0054 c0x0000 (---------------) + I audi - 0x002b8f07, // n0x0055 c0x0000 (---------------) + I audible - 0x00232e45, // n0x0056 c0x0000 (---------------) + I audio - 0x00360347, // n0x0057 c0x0000 (---------------) + I auspost - 0x00320ec6, // n0x0058 c0x0000 (---------------) + I author - 0x00229f44, // n0x0059 c0x0000 (---------------) + I auto - 0x00324485, // n0x005a c0x0000 (---------------) + I autos - 0x002dbe87, // n0x005b c0x0000 (---------------) + I avianca - 0x06a01c02, // n0x005c c0x001a (n0x06c2-n0x06c3) + I aw - 0x002f2043, // n0x005d c0x0000 (---------------) + I aws - 0x00224142, // n0x005e c0x0000 (---------------) + I ax - 0x003827c3, // n0x005f c0x0000 (---------------) + I axa - 0x06e03442, // n0x0060 c0x001b (n0x06c3-n0x06cf) + I az - 0x00281bc5, // n0x0061 c0x0000 (---------------) + I azure - 0x07200882, // n0x0062 c0x001c (n0x06cf-n0x06da) + I ba - 0x003025c4, // n0x0063 c0x0000 (---------------) + I baby - 0x0027cd45, // n0x0064 c0x0000 (---------------) + I baidu - 0x00200887, // n0x0065 c0x0000 (---------------) + I banamex - 0x002f2f0e, // n0x0066 c0x0000 (---------------) + I bananarepublic - 0x00206a84, // n0x0067 c0x0000 (---------------) + I band - 0x00203204, // n0x0068 c0x0000 (---------------) + I bank - 0x002049c3, // n0x0069 c0x0000 (---------------) + I bar - 0x002e3209, // n0x006a c0x0000 (---------------) + I barcelona - 0x002c304b, // n0x006b c0x0000 (---------------) + I barclaycard - 0x002dc488, // n0x006c c0x0000 (---------------) + I barclays - 0x00307448, // n0x006d c0x0000 (---------------) + I barefoot - 0x00310808, // n0x006e c0x0000 (---------------) + I bargains - 0x0022df88, // n0x006f c0x0000 (---------------) + I baseball - 0x0033e0ca, // n0x0070 c0x0000 (---------------) + I basketball - 0x00360247, // n0x0071 c0x0000 (---------------) + I bauhaus - 0x0037b946, // n0x0072 c0x0000 (---------------) + I bayern - 0x0763e4c2, // n0x0073 c0x001d (n0x06da-n0x06e4) + I bb - 0x00365a03, // n0x0074 c0x0000 (---------------) + I bbc - 0x0036be43, // n0x0075 c0x0000 (---------------) + I bbt - 0x00375804, // n0x0076 c0x0000 (---------------) + I bbva - 0x0039a903, // n0x0077 c0x0000 (---------------) + I bcg - 0x00379cc3, // n0x0078 c0x0000 (---------------) + I bcn - 0x01714f02, // n0x0079 c0x0005 (---------------)* o I bd - 0x07a02e02, // n0x007a c0x001e (n0x06e4-n0x06e6) + I be - 0x0021acc5, // n0x007b c0x0000 (---------------) + I beats - 0x0024fac6, // n0x007c c0x0000 (---------------) + I beauty - 0x002ce284, // n0x007d c0x0000 (---------------) + I beer - 0x003819c7, // n0x007e c0x0000 (---------------) + I bentley - 0x0022fc06, // n0x007f c0x0000 (---------------) + I berlin - 0x00354f84, // n0x0080 c0x0000 (---------------) + I best - 0x00397e47, // n0x0081 c0x0000 (---------------) + I bestbuy - 0x002079c3, // n0x0082 c0x0000 (---------------) + I bet - 0x07f5b5c2, // n0x0083 c0x001f (n0x06e6-n0x06e7) + I bf - 0x08304a82, // n0x0084 c0x0020 (n0x06e7-n0x070c) + I bg - 0x0870ebc2, // n0x0085 c0x0021 (n0x070c-n0x0711) + I bh - 0x0039f186, // n0x0086 c0x0000 (---------------) + I bharti - 0x08a00002, // n0x0087 c0x0022 (n0x0711-n0x0716) + I bi - 0x00356b45, // n0x0088 c0x0000 (---------------) + I bible - 0x00316443, // n0x0089 c0x0000 (---------------) + I bid - 0x00202404, // n0x008a c0x0000 (---------------) + I bike - 0x002dc2c4, // n0x008b c0x0000 (---------------) + I bing - 0x002dc2c5, // n0x008c c0x0000 (---------------) + I bingo - 0x00203e43, // n0x008d c0x0000 (---------------) + I bio - 0x08f31a83, // n0x008e c0x0023 (n0x0716-n0x071e) + I biz - 0x0920b442, // n0x008f c0x0024 (n0x071e-n0x0722) + I bj - 0x0028b585, // n0x0090 c0x0000 (---------------) + I black - 0x0028b58b, // n0x0091 c0x0000 (---------------) + I blackfriday - 0x00259006, // n0x0092 c0x0000 (---------------) + I blanco - 0x0020db0b, // n0x0093 c0x0000 (---------------) + I blockbuster - 0x0022f084, // n0x0094 c0x0000 (---------------) + I blog - 0x0020e289, // n0x0095 c0x0000 (---------------) + I bloomberg - 0x0020f384, // n0x0096 c0x0000 (---------------) + I blue - 0x0960f9c2, // n0x0097 c0x0025 (n0x0722-n0x0727) + I bm - 0x00210f03, // n0x0098 c0x0000 (---------------) + I bms - 0x00211fc3, // n0x0099 c0x0000 (---------------) + I bmw - 0x01612142, // n0x009a c0x0005 (---------------)* o I bn - 0x00246cc3, // n0x009b c0x0000 (---------------) + I bnl - 0x0021214a, // n0x009c c0x0000 (---------------) + I bnpparibas - 0x09a0f682, // n0x009d c0x0026 (n0x0727-n0x0730) + I bo - 0x0030d185, // n0x009e c0x0000 (---------------) + I boats - 0x0020f68a, // n0x009f c0x0000 (---------------) + I boehringer - 0x00292584, // n0x00a0 c0x0000 (---------------) + I bofa - 0x00213983, // n0x00a1 c0x0000 (---------------) + I bom - 0x00213fc4, // n0x00a2 c0x0000 (---------------) + I bond - 0x00215e03, // n0x00a3 c0x0000 (---------------) + I boo - 0x00215e04, // n0x00a4 c0x0000 (---------------) + I book - 0x00215e07, // n0x00a5 c0x0000 (---------------) + I booking - 0x00216f45, // n0x00a6 c0x0000 (---------------) + I boots - 0x00217205, // n0x00a7 c0x0000 (---------------) + I bosch - 0x00218406, // n0x00a8 c0x0000 (---------------) + I bostik - 0x00218f86, // n0x00a9 c0x0000 (---------------) + I boston - 0x0021b743, // n0x00aa c0x0000 (---------------) + I bot - 0x0021d848, // n0x00ab c0x0000 (---------------) + I boutique - 0x00219643, // n0x00ac c0x0000 (---------------) + I box - 0x09e1e3c2, // n0x00ad c0x0027 (n0x0730-n0x0776) + I br - 0x0021eb48, // n0x00ae c0x0000 (---------------) + I bradesco - 0x0021e3cb, // n0x00af c0x0000 (---------------) + I bridgestone - 0x00223d48, // n0x00b0 c0x0000 (---------------) + I broadway - 0x00224906, // n0x00b1 c0x0000 (---------------) + I broker - 0x00225887, // n0x00b2 c0x0000 (---------------) + I brother - 0x00228408, // n0x00b3 c0x0000 (---------------) + I brussels - 0x0a630fc2, // n0x00b4 c0x0029 (n0x0777-n0x077c) + I bs - 0x0aa23a42, // n0x00b5 c0x002a (n0x077c-n0x0781) + I bt - 0x00208dc8, // n0x00b6 c0x0000 (---------------) + I budapest - 0x002e6907, // n0x00b7 c0x0000 (---------------) + I bugatti - 0x00241085, // n0x00b8 c0x0000 (---------------) + I build - 0x00241088, // n0x00b9 c0x0000 (---------------) + I builders - 0x002b8308, // n0x00ba c0x0000 (---------------) + I business - 0x002fefc3, // n0x00bb c0x0000 (---------------) + I buy - 0x0022f404, // n0x00bc c0x0000 (---------------) + I buzz - 0x00365fc2, // n0x00bd c0x0000 (---------------) + I bv - 0x0ae30202, // n0x00be c0x002b (n0x0781-n0x0783) + I bw - 0x0b20d202, // n0x00bf c0x002c (n0x0783-n0x0787) + I by - 0x0ba30682, // n0x00c0 c0x002e (n0x0788-n0x078e) + I bz - 0x00230683, // n0x00c1 c0x0000 (---------------) + I bzh - 0x0be00e42, // n0x00c2 c0x002f (n0x078e-n0x079f) + I ca - 0x0023e443, // n0x00c3 c0x0000 (---------------) + I cab - 0x00310d04, // n0x00c4 c0x0000 (---------------) + I cafe - 0x00212ec3, // n0x00c5 c0x0000 (---------------) + I cal - 0x00212ec4, // n0x00c6 c0x0000 (---------------) + I call - 0x0032914b, // n0x00c7 c0x0000 (---------------) + I calvinklein - 0x0023ec46, // n0x00c8 c0x0000 (---------------) + I camera - 0x00241bc4, // n0x00c9 c0x0000 (---------------) + I camp - 0x002a100e, // n0x00ca c0x0000 (---------------) + I cancerresearch - 0x00263685, // n0x00cb c0x0000 (---------------) + I canon - 0x002dbfc8, // n0x00cc c0x0000 (---------------) + I capetown - 0x002ec107, // n0x00cd c0x0000 (---------------) + I capital - 0x002ec10a, // n0x00ce c0x0000 (---------------) + I capitalone - 0x0020cb43, // n0x00cf c0x0000 (---------------) + I car - 0x002370c7, // n0x00d0 c0x0000 (---------------) + I caravan - 0x002c3205, // n0x00d1 c0x0000 (---------------) + I cards - 0x002ae704, // n0x00d2 c0x0000 (---------------) + I care - 0x002ae706, // n0x00d3 c0x0000 (---------------) + I career - 0x002ae707, // n0x00d4 c0x0000 (---------------) + I careers - 0x002bd384, // n0x00d5 c0x0000 (---------------) + I cars - 0x00390087, // n0x00d6 c0x0000 (---------------) + I cartier - 0x00215004, // n0x00d7 c0x0000 (---------------) + I casa - 0x00217e44, // n0x00d8 c0x0000 (---------------) + I case - 0x00217e46, // n0x00d9 c0x0000 (---------------) + I caseih - 0x002cb1c4, // n0x00da c0x0000 (---------------) + I cash - 0x00356846, // n0x00db c0x0000 (---------------) + I casino - 0x0020f1c3, // n0x00dc c0x0000 (---------------) + I cat - 0x0037b1c8, // n0x00dd c0x0000 (---------------) + I catering - 0x0023cf88, // n0x00de c0x0000 (---------------) + I catholic - 0x0024a9c3, // n0x00df c0x0000 (---------------) + I cba - 0x00246c83, // n0x00e0 c0x0000 (---------------) + I cbn - 0x0038b6c4, // n0x00e1 c0x0000 (---------------) + I cbre - 0x00390643, // n0x00e2 c0x0000 (---------------) + I cbs - 0x0c22f6c2, // n0x00e3 c0x0030 (n0x079f-n0x07a3) + I cc - 0x0c662e02, // n0x00e4 c0x0031 (n0x07a3-n0x07a4) + I cd - 0x00205cc3, // n0x00e5 c0x0000 (---------------) + I ceb - 0x00204c86, // n0x00e6 c0x0000 (---------------) + I center - 0x002c0883, // n0x00e7 c0x0000 (---------------) + I ceo - 0x002e6f84, // n0x00e8 c0x0000 (---------------) + I cern - 0x0ca14c02, // n0x00e9 c0x0032 (n0x07a4-n0x07a5) + I cf - 0x00214c03, // n0x00ea c0x0000 (---------------) + I cfa - 0x00377c83, // n0x00eb c0x0000 (---------------) + I cfd - 0x0021be82, // n0x00ec c0x0000 (---------------) + I cg - 0x0ce00382, // n0x00ed c0x0033 (n0x07a5-n0x07a6) + I ch - 0x002bc906, // n0x00ee c0x0000 (---------------) + I chanel - 0x0023ad87, // n0x00ef c0x0000 (---------------) + I channel - 0x00335885, // n0x00f0 c0x0000 (---------------) + I chase - 0x0021a404, // n0x00f1 c0x0000 (---------------) + I chat - 0x00284485, // n0x00f2 c0x0000 (---------------) + I cheap - 0x00201487, // n0x00f3 c0x0000 (---------------) + I chintai - 0x002b7785, // n0x00f4 c0x0000 (---------------) + I chloe - 0x003a4c09, // n0x00f5 c0x0000 (---------------) + I christmas - 0x00303446, // n0x00f6 c0x0000 (---------------) + I chrome - 0x00304108, // n0x00f7 c0x0000 (---------------) + I chrysler - 0x00335786, // n0x00f8 c0x0000 (---------------) + I church - 0x0d209602, // n0x00f9 c0x0034 (n0x07a6-n0x07b5) + I ci - 0x00234f48, // n0x00fa c0x0000 (---------------) + I cipriani - 0x00337606, // n0x00fb c0x0000 (---------------) + I circle - 0x00399cc5, // n0x00fc c0x0000 (---------------) + I cisco - 0x0034f187, // n0x00fd c0x0000 (---------------) + I citadel - 0x00356744, // n0x00fe c0x0000 (---------------) + I citi - 0x00356745, // n0x00ff c0x0000 (---------------) + I citic - 0x00285804, // n0x0100 c0x0000 (---------------) + I city - 0x00285808, // n0x0101 c0x0000 (---------------) + I cityeats - 0x0d60d082, // n0x0102 c0x0035 (n0x07b5-n0x07b6)* o I ck - 0x0da07f42, // n0x0103 c0x0036 (n0x07b6-n0x07bb) + I cl - 0x003773c6, // n0x0104 c0x0000 (---------------) + I claims - 0x00227b48, // n0x0105 c0x0000 (---------------) + I cleaning - 0x003781c5, // n0x0106 c0x0000 (---------------) + I click - 0x0037b086, // n0x0107 c0x0000 (---------------) + I clinic - 0x00385708, // n0x0108 c0x0000 (---------------) + I clinique - 0x0039b608, // n0x0109 c0x0000 (---------------) + I clothing - 0x00207f45, // n0x010a c0x0000 (---------------) + I cloud - 0x00378c44, // n0x010b c0x0000 (---------------) + I club - 0x00378c47, // n0x010c c0x0000 (---------------) + I clubmed - 0x0de5cd02, // n0x010d c0x0037 (n0x07bb-n0x07bf) + I cm - 0x0e21dac2, // n0x010e c0x0038 (n0x07bf-n0x07ec) + I cn - 0x0fa0ce42, // n0x010f c0x003e (n0x07f1-n0x07fe) + I co - 0x0033f105, // n0x0110 c0x0000 (---------------) + I coach - 0x0029c345, // n0x0111 c0x0000 (---------------) + I codes - 0x0020ce46, // n0x0112 c0x0000 (---------------) + I coffee - 0x0022f707, // n0x0113 c0x0000 (---------------) + I college - 0x00231707, // n0x0114 c0x0000 (---------------) + I cologne - 0x10233243, // n0x0115 c0x0040 (n0x07ff-n0x08d1) + I com - 0x002a7d07, // n0x0116 c0x0000 (---------------) + I comcast - 0x002d8ec8, // n0x0117 c0x0000 (---------------) + I commbank - 0x00233249, // n0x0118 c0x0000 (---------------) + I community - 0x0036db87, // n0x0119 c0x0000 (---------------) + I company - 0x00233f47, // n0x011a c0x0000 (---------------) + I compare - 0x00235b08, // n0x011b c0x0000 (---------------) + I computer - 0x00236306, // n0x011c c0x0000 (---------------) + I comsec - 0x002368c6, // n0x011d c0x0000 (---------------) + I condos - 0x002375cc, // n0x011e c0x0000 (---------------) + I construction - 0x0023830a, // n0x011f c0x0000 (---------------) + I consulting - 0x002387c7, // n0x0120 c0x0000 (---------------) + I contact - 0x00239d4b, // n0x0121 c0x0000 (---------------) + I contractors - 0x0023abc7, // n0x0122 c0x0000 (---------------) + I cooking - 0x0023abce, // n0x0123 c0x0000 (---------------) + I cookingchannel - 0x0023b6c4, // n0x0124 c0x0000 (---------------) + I cool - 0x0023c344, // n0x0125 c0x0000 (---------------) + I coop - 0x0023f147, // n0x0126 c0x0000 (---------------) + I corsica - 0x00337987, // n0x0127 c0x0000 (---------------) + I country - 0x002424c6, // n0x0128 c0x0000 (---------------) + I coupon - 0x002424c7, // n0x0129 c0x0000 (---------------) + I coupons - 0x00242ac7, // n0x012a c0x0000 (---------------) + I courses - 0x11a051c2, // n0x012b c0x0046 (n0x08f0-n0x08f7) + I cr - 0x00243786, // n0x012c c0x0000 (---------------) + I credit - 0x0024378a, // n0x012d c0x0000 (---------------) + I creditcard - 0x00243a0b, // n0x012e c0x0000 (---------------) + I creditunion - 0x00244b07, // n0x012f c0x0000 (---------------) + I cricket - 0x002454c5, // n0x0130 c0x0000 (---------------) + I crown - 0x00245603, // n0x0131 c0x0000 (---------------) + I crs - 0x00245f46, // n0x0132 c0x0000 (---------------) + I cruise - 0x00245f47, // n0x0133 c0x0000 (---------------) + I cruises - 0x00243403, // n0x0134 c0x0000 (---------------) + I csc - 0x11e08b42, // n0x0135 c0x0047 (n0x08f7-n0x08fd) + I cu - 0x0024640a, // n0x0136 c0x0000 (---------------) + I cuisinella - 0x12350e42, // n0x0137 c0x0048 (n0x08fd-n0x08fe) + I cv - 0x126cadc2, // n0x0138 c0x0049 (n0x08fe-n0x0902) + I cw - 0x12a477c2, // n0x0139 c0x004a (n0x0902-n0x0904) + I cx - 0x12e3cd42, // n0x013a c0x004b (n0x0904-n0x0911) o I cy - 0x0024a0c5, // n0x013b c0x0000 (---------------) + I cymru - 0x0024a7c4, // n0x013c c0x0000 (---------------) + I cyou - 0x13600142, // n0x013d c0x004d (n0x0912-n0x0914) + I cz - 0x0034ac05, // n0x013e c0x0000 (---------------) + I dabur - 0x002a30c3, // n0x013f c0x0000 (---------------) + I dad - 0x002275c5, // n0x0140 c0x0000 (---------------) + I dance - 0x0020da04, // n0x0141 c0x0000 (---------------) + I date - 0x0020f486, // n0x0142 c0x0000 (---------------) + I dating - 0x00294506, // n0x0143 c0x0000 (---------------) + I datsun - 0x00263b83, // n0x0144 c0x0000 (---------------) + I day - 0x0023f984, // n0x0145 c0x0000 (---------------) + I dclk - 0x0026bd03, // n0x0146 c0x0000 (---------------) + I dds - 0x13a05582, // n0x0147 c0x004e (n0x0914-n0x091c) + I de - 0x00205584, // n0x0148 c0x0000 (---------------) + I deal - 0x003791c6, // n0x0149 c0x0000 (---------------) + I dealer - 0x00205585, // n0x014a c0x0000 (---------------) + I deals - 0x00399646, // n0x014b c0x0000 (---------------) + I degree - 0x0034f288, // n0x014c c0x0000 (---------------) + I delivery - 0x0025a104, // n0x014d c0x0000 (---------------) + I dell - 0x00320608, // n0x014e c0x0000 (---------------) + I deloitte - 0x00288905, // n0x014f c0x0000 (---------------) + I delta - 0x00224608, // n0x0150 c0x0000 (---------------) + I democrat - 0x002aca06, // n0x0151 c0x0000 (---------------) + I dental - 0x002b3f87, // n0x0152 c0x0000 (---------------) + I dentist - 0x00228204, // n0x0153 c0x0000 (---------------) + I desi - 0x00228206, // n0x0154 c0x0000 (---------------) + I design - 0x0032b083, // n0x0155 c0x0000 (---------------) + I dev - 0x0037a1c3, // n0x0156 c0x0000 (---------------) + I dhl - 0x002c6088, // n0x0157 c0x0000 (---------------) + I diamonds - 0x0030ed44, // n0x0158 c0x0000 (---------------) + I diet - 0x002f4847, // n0x0159 c0x0000 (---------------) + I digital - 0x0024da86, // n0x015a c0x0000 (---------------) + I direct - 0x0024da89, // n0x015b c0x0000 (---------------) + I directory - 0x00321288, // n0x015c c0x0000 (---------------) + I discount - 0x00331308, // n0x015d c0x0000 (---------------) + I discover - 0x003550c4, // n0x015e c0x0000 (---------------) + I dish - 0x00309c83, // n0x015f c0x0000 (---------------) + I diy - 0x0024e002, // n0x0160 c0x0000 (---------------) + I dj - 0x13e489c2, // n0x0161 c0x004f (n0x091c-n0x091d) + I dk - 0x1420d302, // n0x0162 c0x0050 (n0x091d-n0x0922) + I dm - 0x00311b83, // n0x0163 c0x0000 (---------------) + I dnp - 0x14612c02, // n0x0164 c0x0051 (n0x0922-n0x092c) + I do - 0x00329ac4, // n0x0165 c0x0000 (---------------) + I docs - 0x00212c05, // n0x0166 c0x0000 (---------------) + I dodge - 0x00230503, // n0x0167 c0x0000 (---------------) + I dog - 0x002366c4, // n0x0168 c0x0000 (---------------) + I doha - 0x003011c7, // n0x0169 c0x0000 (---------------) + I domains - 0x0023be46, // n0x016a c0x0000 (---------------) + I doosan - 0x00354243, // n0x016b c0x0000 (---------------) + I dot - 0x00362ac8, // n0x016c c0x0000 (---------------) + I download - 0x003519c5, // n0x016d c0x0000 (---------------) + I drive - 0x00249084, // n0x016e c0x0000 (---------------) + I dstv - 0x00364c03, // n0x016f c0x0000 (---------------) + I dtv - 0x0027ccc5, // n0x0170 c0x0000 (---------------) + I dubai - 0x0027ce04, // n0x0171 c0x0000 (---------------) + I duck - 0x0038fd86, // n0x0172 c0x0000 (---------------) + I dunlop - 0x00399f04, // n0x0173 c0x0000 (---------------) + I duns - 0x0039f786, // n0x0174 c0x0000 (---------------) + I dupont - 0x002007c6, // n0x0175 c0x0000 (---------------) + I durban - 0x00319904, // n0x0176 c0x0000 (---------------) + I dvag - 0x00211443, // n0x0177 c0x0000 (---------------) + I dwg - 0x14a06842, // n0x0178 c0x0052 (n0x092c-n0x0934) + I dz - 0x00283945, // n0x0179 c0x0000 (---------------) + I earth - 0x0021ad03, // n0x017a c0x0000 (---------------) + I eat - 0x14e088c2, // n0x017b c0x0053 (n0x0934-n0x0940) + I ec - 0x002b4a45, // n0x017c c0x0000 (---------------) + I edeka - 0x00239103, // n0x017d c0x0000 (---------------) + I edu - 0x00239109, // n0x017e c0x0000 (---------------) + I education - 0x1520cf42, // n0x017f c0x0054 (n0x0940-n0x094a) + I ee - 0x15a07202, // n0x0180 c0x0056 (n0x094b-n0x0954) + I eg - 0x00302985, // n0x0181 c0x0000 (---------------) + I email - 0x002b4346, // n0x0182 c0x0000 (---------------) + I emerck - 0x003532c7, // n0x0183 c0x0000 (---------------) + I emerson - 0x002cca86, // n0x0184 c0x0000 (---------------) + I energy - 0x0030c4c8, // n0x0185 c0x0000 (---------------) + I engineer - 0x0030c4cb, // n0x0186 c0x0000 (---------------) + I engineering - 0x00204ccb, // n0x0187 c0x0000 (---------------) + I enterprises - 0x00398d05, // n0x0188 c0x0000 (---------------) + I epost - 0x00399385, // n0x0189 c0x0000 (---------------) + I epson - 0x002c4489, // n0x018a c0x0000 (---------------) + I equipment - 0x01600682, // n0x018b c0x0005 (---------------)* o I er - 0x00319ac8, // n0x018c c0x0000 (---------------) + I ericsson - 0x0020dd44, // n0x018d c0x0000 (---------------) + I erni - 0x16200082, // n0x018e c0x0058 (n0x0955-n0x095a) + I es - 0x0027a103, // n0x018f c0x0000 (---------------) + I esq - 0x002c4206, // n0x0190 c0x0000 (---------------) + I estate - 0x0033ce08, // n0x0191 c0x0000 (---------------) + I esurance - 0x16a00a42, // n0x0192 c0x005a (n0x095b-n0x0963) + I et - 0x00226d48, // n0x0193 c0x0000 (---------------) + I etisalat - 0x00205382, // n0x0194 c0x0000 (---------------) + I eu - 0x0027bb4a, // n0x0195 c0x0000 (---------------) + I eurovision - 0x0022a283, // n0x0196 c0x0000 (---------------) + I eus - 0x0032b0c6, // n0x0197 c0x0000 (---------------) + I events - 0x00203108, // n0x0198 c0x0000 (---------------) + I everbank - 0x00379608, // n0x0199 c0x0000 (---------------) + I exchange - 0x0031f2c6, // n0x019a c0x0000 (---------------) + I expert - 0x00363887, // n0x019b c0x0000 (---------------) + I exposed - 0x00246987, // n0x019c c0x0000 (---------------) + I express - 0x0020a94a, // n0x019d c0x0000 (---------------) + I extraspace - 0x00292604, // n0x019e c0x0000 (---------------) + I fage - 0x00214c44, // n0x019f c0x0000 (---------------) + I fail - 0x0033b5c9, // n0x01a0 c0x0000 (---------------) + I fairwinds - 0x0034e685, // n0x01a1 c0x0000 (---------------) + I faith - 0x00207d46, // n0x01a2 c0x0000 (---------------) + I family - 0x00210883, // n0x01a3 c0x0000 (---------------) + I fan - 0x002e08c4, // n0x01a4 c0x0000 (---------------) + I fans - 0x0026a084, // n0x01a5 c0x0000 (---------------) + I farm - 0x00327f47, // n0x01a6 c0x0000 (---------------) + I farmers - 0x00230287, // n0x01a7 c0x0000 (---------------) + I fashion - 0x00246284, // n0x01a8 c0x0000 (---------------) + I fast - 0x00210085, // n0x01a9 c0x0000 (---------------) + I fedex - 0x0020cf08, // n0x01aa c0x0000 (---------------) + I feedback - 0x002ff307, // n0x01ab c0x0000 (---------------) + I ferrari - 0x003369c7, // n0x01ac c0x0000 (---------------) + I ferrero - 0x16e01702, // n0x01ad c0x005b (n0x0963-n0x0966) + I fi - 0x00296004, // n0x01ae c0x0000 (---------------) + I fiat - 0x0035b608, // n0x01af c0x0000 (---------------) + I fidelity - 0x0035e584, // n0x01b0 c0x0000 (---------------) + I fido - 0x0024b184, // n0x01b1 c0x0000 (---------------) + I film - 0x0024b545, // n0x01b2 c0x0000 (---------------) + I final - 0x0024b687, // n0x01b3 c0x0000 (---------------) + I finance - 0x002094c9, // n0x01b4 c0x0000 (---------------) + I financial - 0x0024c584, // n0x01b5 c0x0000 (---------------) + I fire - 0x0024d7c9, // n0x01b6 c0x0000 (---------------) + I firestone - 0x0024dcc8, // n0x01b7 c0x0000 (---------------) + I firmdale - 0x0024ec04, // n0x01b8 c0x0000 (---------------) + I fish - 0x0024ec07, // n0x01b9 c0x0000 (---------------) + I fishing - 0x0024f583, // n0x01ba c0x0000 (---------------) + I fit - 0x0024fd07, // n0x01bb c0x0000 (---------------) + I fitness - 0x0161cc42, // n0x01bc c0x0005 (---------------)* o I fj - 0x0179b042, // n0x01bd c0x0005 (---------------)* o I fk - 0x002503c6, // n0x01be c0x0000 (---------------) + I flickr - 0x00251347, // n0x01bf c0x0000 (---------------) + I flights - 0x00251c04, // n0x01c0 c0x0000 (---------------) + I flir - 0x00252ac7, // n0x01c1 c0x0000 (---------------) + I florist - 0x00254447, // n0x01c2 c0x0000 (---------------) + I flowers - 0x00254848, // n0x01c3 c0x0000 (---------------) + I flsmidth - 0x00254ec3, // n0x01c4 c0x0000 (---------------) + I fly - 0x00234802, // n0x01c5 c0x0000 (---------------) + I fm - 0x002018c2, // n0x01c6 c0x0000 (---------------) + I fo - 0x002564c3, // n0x01c7 c0x0000 (---------------) + I foo - 0x002564cb, // n0x01c8 c0x0000 (---------------) + I foodnetwork - 0x00307548, // n0x01c9 c0x0000 (---------------) + I football - 0x0039d384, // n0x01ca c0x0000 (---------------) + I ford - 0x00257bc5, // n0x01cb c0x0000 (---------------) + I forex - 0x00259507, // n0x01cc c0x0000 (---------------) + I forsale - 0x0025aec5, // n0x01cd c0x0000 (---------------) + I forum - 0x002b9f8a, // n0x01ce c0x0000 (---------------) + I foundation - 0x0025c543, // n0x01cf c0x0000 (---------------) + I fox - 0x17240202, // n0x01d0 c0x005c (n0x0966-n0x097e) + I fr - 0x002e8084, // n0x01d1 c0x0000 (---------------) + I free - 0x0025dd49, // n0x01d2 c0x0000 (---------------) + I fresenius - 0x00261a83, // n0x01d3 c0x0000 (---------------) + I frl - 0x00261b47, // n0x01d4 c0x0000 (---------------) + I frogans - 0x0039ea49, // n0x01d5 c0x0000 (---------------) + I frontdoor - 0x00397688, // n0x01d6 c0x0000 (---------------) + I frontier - 0x00205283, // n0x01d7 c0x0000 (---------------) + I ftr - 0x0027bdc7, // n0x01d8 c0x0000 (---------------) + I fujitsu - 0x0027c2c9, // n0x01d9 c0x0000 (---------------) + I fujixerox - 0x00283e04, // n0x01da c0x0000 (---------------) + I fund - 0x00284dc9, // n0x01db c0x0000 (---------------) + I furniture - 0x00289ec6, // n0x01dc c0x0000 (---------------) + I futbol - 0x0028af83, // n0x01dd c0x0000 (---------------) + I fyi - 0x00201bc2, // n0x01de c0x0000 (---------------) + I ga - 0x00221fc3, // n0x01df c0x0000 (---------------) + I gal - 0x00391707, // n0x01e0 c0x0000 (---------------) + I gallery - 0x00337785, // n0x01e1 c0x0000 (---------------) + I gallo - 0x002e3006, // n0x01e2 c0x0000 (---------------) + I gallup - 0x00298344, // n0x01e3 c0x0000 (---------------) + I game - 0x00345d85, // n0x01e4 c0x0000 (---------------) + I games - 0x0023d903, // n0x01e5 c0x0000 (---------------) + I gap - 0x0021b986, // n0x01e6 c0x0000 (---------------) + I garden - 0x0020e482, // n0x01e7 c0x0000 (---------------) + I gb - 0x00385ec4, // n0x01e8 c0x0000 (---------------) + I gbiz - 0x002265c2, // n0x01e9 c0x0000 (---------------) + I gd - 0x0022f143, // n0x01ea c0x0000 (---------------) + I gdn - 0x17600282, // n0x01eb c0x005d (n0x097e-n0x0985) + I ge - 0x002b6283, // n0x01ec c0x0000 (---------------) + I gea - 0x00219404, // n0x01ed c0x0000 (---------------) + I gent - 0x00219407, // n0x01ee c0x0000 (---------------) + I genting - 0x003233c6, // n0x01ef c0x0000 (---------------) + I george - 0x0025cb02, // n0x01f0 c0x0000 (---------------) + I gf - 0x17a01942, // n0x01f1 c0x005e (n0x0985-n0x0988) + I gg - 0x00331544, // n0x01f2 c0x0000 (---------------) + I ggee - 0x17e51402, // n0x01f3 c0x005f (n0x0988-n0x098d) + I gh - 0x18201982, // n0x01f4 c0x0060 (n0x098d-n0x0993) + I gi - 0x00344944, // n0x01f5 c0x0000 (---------------) + I gift - 0x00344945, // n0x01f6 c0x0000 (---------------) + I gifts - 0x00219a45, // n0x01f7 c0x0000 (---------------) + I gives - 0x0025e606, // n0x01f8 c0x0000 (---------------) + I giving - 0x18605b82, // n0x01f9 c0x0061 (n0x0993-n0x0998) + I gl - 0x00320545, // n0x01fa c0x0000 (---------------) + I glade - 0x00398305, // n0x01fb c0x0000 (---------------) + I glass - 0x00286b43, // n0x01fc c0x0000 (---------------) + I gle - 0x0020eac6, // n0x01fd c0x0000 (---------------) + I global - 0x0020f5c5, // n0x01fe c0x0000 (---------------) + I globo - 0x00215f82, // n0x01ff c0x0000 (---------------) + I gm - 0x00336105, // n0x0200 c0x0000 (---------------) + I gmail - 0x00217783, // n0x0201 c0x0000 (---------------) + I gmo - 0x00219583, // n0x0202 c0x0000 (---------------) + I gmx - 0x18a07102, // n0x0203 c0x0062 (n0x0998-n0x099e) + I gn - 0x002f3307, // n0x0204 c0x0000 (---------------) + I godaddy - 0x002ff584, // n0x0205 c0x0000 (---------------) + I gold - 0x002ff589, // n0x0206 c0x0000 (---------------) + I goldpoint - 0x0024dec4, // n0x0207 c0x0000 (---------------) + I golf - 0x00283803, // n0x0208 c0x0000 (---------------) + I goo - 0x002f7e09, // n0x0209 c0x0000 (---------------) + I goodhands - 0x00283808, // n0x020a c0x0000 (---------------) + I goodyear - 0x0029c1c4, // n0x020b c0x0000 (---------------) + I goog - 0x0029c1c6, // n0x020c c0x0000 (---------------) + I google - 0x002a4d03, // n0x020d c0x0000 (---------------) + I gop - 0x00210a43, // n0x020e c0x0000 (---------------) + I got - 0x002dc384, // n0x020f c0x0000 (---------------) + I gotv - 0x0027d903, // n0x0210 c0x0000 (---------------) + I gov - 0x18ed5602, // n0x0211 c0x0063 (n0x099e-n0x09a4) + I gp - 0x003004c2, // n0x0212 c0x0000 (---------------) + I gq - 0x19208a82, // n0x0213 c0x0064 (n0x09a4-n0x09aa) + I gr - 0x00317f88, // n0x0214 c0x0000 (---------------) + I grainger - 0x00311288, // n0x0215 c0x0000 (---------------) + I graphics - 0x0038ab06, // n0x0216 c0x0000 (---------------) + I gratis - 0x00252145, // n0x0217 c0x0000 (---------------) + I green - 0x002287c5, // n0x0218 c0x0000 (---------------) + I gripe - 0x0020c745, // n0x0219 c0x0000 (---------------) + I group - 0x00245dc2, // n0x021a c0x0000 (---------------) + I gs - 0x1963f882, // n0x021b c0x0065 (n0x09aa-n0x09b1) + I gt - 0x0160efc2, // n0x021c c0x0005 (---------------)* o I gu - 0x0034e808, // n0x021d c0x0000 (---------------) + I guardian - 0x00234e85, // n0x021e c0x0000 (---------------) + I gucci - 0x002dffc4, // n0x021f c0x0000 (---------------) + I guge - 0x0032afc5, // n0x0220 c0x0000 (---------------) + I guide - 0x0039b7c7, // n0x0221 c0x0000 (---------------) + I guitars - 0x00253a84, // n0x0222 c0x0000 (---------------) + I guru - 0x00216342, // n0x0223 c0x0000 (---------------) + I gw - 0x19a021c2, // n0x0224 c0x0066 (n0x09b1-n0x09b7) + I gy - 0x0030e604, // n0x0225 c0x0000 (---------------) + I hair - 0x00205a07, // n0x0226 c0x0000 (---------------) + I hamburg - 0x00394207, // n0x0227 c0x0000 (---------------) + I hangout - 0x00360304, // n0x0228 c0x0000 (---------------) + I haus - 0x00292543, // n0x0229 c0x0000 (---------------) + I hbo - 0x0024a904, // n0x022a c0x0000 (---------------) + I hdfc - 0x0024a908, // n0x022b c0x0000 (---------------) + I hdfcbank - 0x002ae586, // n0x022c c0x0000 (---------------) + I health - 0x002ae58a, // n0x022d c0x0000 (---------------) + I healthcare - 0x00209384, // n0x022e c0x0000 (---------------) + I help - 0x00209bc8, // n0x022f c0x0000 (---------------) + I helsinki - 0x00254ac4, // n0x0230 c0x0000 (---------------) + I here - 0x00225986, // n0x0231 c0x0000 (---------------) + I hermes - 0x002935c4, // n0x0232 c0x0000 (---------------) + I hgtv - 0x0033f406, // n0x0233 c0x0000 (---------------) + I hiphop - 0x002f4dc9, // n0x0234 c0x0000 (---------------) + I hisamitsu - 0x002a3e07, // n0x0235 c0x0000 (---------------) + I hitachi - 0x00287fc3, // n0x0236 c0x0000 (---------------) + I hiv - 0x19e0c482, // n0x0237 c0x0067 (n0x09b7-n0x09cf) + I hk - 0x0026f043, // n0x0238 c0x0000 (---------------) + I hkt - 0x0020fbc2, // n0x0239 c0x0000 (---------------) + I hm - 0x1a21c682, // n0x023a c0x0068 (n0x09cf-n0x09d5) + I hn - 0x002df346, // n0x023b c0x0000 (---------------) + I hockey - 0x0035d808, // n0x023c c0x0000 (---------------) + I holdings - 0x002a6647, // n0x023d c0x0000 (---------------) + I holiday - 0x00273d09, // n0x023e c0x0000 (---------------) + I homedepot - 0x00299fc9, // n0x023f c0x0000 (---------------) + I homegoods - 0x002a7245, // n0x0240 c0x0000 (---------------) + I homes - 0x002a7249, // n0x0241 c0x0000 (---------------) + I homesense - 0x002a8805, // n0x0242 c0x0000 (---------------) + I honda - 0x002a9289, // n0x0243 c0x0000 (---------------) + I honeywell - 0x002aa045, // n0x0244 c0x0000 (---------------) + I horse - 0x00298484, // n0x0245 c0x0000 (---------------) + I host - 0x00298487, // n0x0246 c0x0000 (---------------) + I hosting - 0x00234303, // n0x0247 c0x0000 (---------------) + I hot - 0x002aab07, // n0x0248 c0x0000 (---------------) + I hoteles - 0x002ab0c7, // n0x0249 c0x0000 (---------------) + I hotmail - 0x002a3745, // n0x024a c0x0000 (---------------) + I house - 0x002a2ac3, // n0x024b c0x0000 (---------------) + I how - 0x1a60f742, // n0x024c c0x0069 (n0x09d5-n0x09da) + I hr - 0x00387984, // n0x024d c0x0000 (---------------) + I hsbc - 0x1aa51442, // n0x024e c0x006a (n0x09da-n0x09eb) + I ht - 0x0025cc83, // n0x024f c0x0000 (---------------) + I htc - 0x1ae24202, // n0x0250 c0x006b (n0x09eb-n0x0a0b) + I hu - 0x002f8486, // n0x0251 c0x0000 (---------------) + I hughes - 0x00309285, // n0x0252 c0x0000 (---------------) + I hyatt - 0x002ad907, // n0x0253 c0x0000 (---------------) + I hyundai - 0x00311ac3, // n0x0254 c0x0000 (---------------) + I ibm - 0x00379c44, // n0x0255 c0x0000 (---------------) + I icbc - 0x00205c83, // n0x0256 c0x0000 (---------------) + I ice - 0x00208b03, // n0x0257 c0x0000 (---------------) + I icu - 0x1b20d9c2, // n0x0258 c0x006c (n0x0a0b-n0x0a16) + I id - 0x1ba00042, // n0x0259 c0x006e (n0x0a17-n0x0a19) + I ie - 0x00366044, // n0x025a c0x0000 (---------------) + I ieee - 0x002347c3, // n0x025b c0x0000 (---------------) + I ifm - 0x003124c5, // n0x025c c0x0000 (---------------) + I iinet - 0x00312305, // n0x025d c0x0000 (---------------) + I ikano - 0x1be027c2, // n0x025e c0x006f (n0x0a19-n0x0a21) + I il - 0x1c600402, // n0x025f c0x0071 (n0x0a22-n0x0a29) + I im - 0x0024bd46, // n0x0260 c0x0000 (---------------) + I imamat - 0x0026b844, // n0x0261 c0x0000 (---------------) + I imdb - 0x0020be84, // n0x0262 c0x0000 (---------------) + I immo - 0x0020be8a, // n0x0263 c0x0000 (---------------) + I immobilien - 0x1ce012c2, // n0x0264 c0x0073 (n0x0a2b-n0x0a38) + I in - 0x003673ca, // n0x0265 c0x0000 (---------------) + I industries - 0x00201688, // n0x0266 c0x0000 (---------------) + I infiniti - 0x1d201844, // n0x0267 c0x0074 (n0x0a38-n0x0a42) + I info - 0x0020f543, // n0x0268 c0x0000 (---------------) + I ing - 0x00209cc3, // n0x0269 c0x0000 (---------------) + I ink - 0x00310949, // n0x026a c0x0000 (---------------) + I institute - 0x00263309, // n0x026b c0x0000 (---------------) + I insurance - 0x003012c6, // n0x026c c0x0000 (---------------) + I insure - 0x1d601503, // n0x026d c0x0075 (n0x0a42-n0x0a43) + I int - 0x002ff705, // n0x026e c0x0000 (---------------) + I intel - 0x0031ba8d, // n0x026f c0x0000 (---------------) + I international - 0x002f7846, // n0x0270 c0x0000 (---------------) + I intuit - 0x0020980b, // n0x0271 c0x0000 (---------------) + I investments - 0x1da03a02, // n0x0272 c0x0076 (n0x0a43-n0x0a49) + I io - 0x0025acc8, // n0x0273 c0x0000 (---------------) + I ipiranga - 0x1de1d942, // n0x0274 c0x0077 (n0x0a49-n0x0a4f) + I iq - 0x1e204b02, // n0x0275 c0x0078 (n0x0a4f-n0x0a58) + I ir - 0x0029b805, // n0x0276 c0x0000 (---------------) + I irish - 0x1e604e82, // n0x0277 c0x0079 (n0x0a58-n0x0a60) + I is - 0x0025b147, // n0x0278 c0x0000 (---------------) + I iselect - 0x0027b3c7, // n0x0279 c0x0000 (---------------) + I ismaili - 0x00215a03, // n0x027a c0x0000 (---------------) + I ist - 0x00215a08, // n0x027b c0x0000 (---------------) + I istanbul - 0x1ea017c2, // n0x027c c0x007a (n0x0a60-n0x0bd1) + I it - 0x0027ea04, // n0x027d c0x0000 (---------------) + I itau - 0x00360d43, // n0x027e c0x0000 (---------------) + I itv - 0x00323805, // n0x027f c0x0000 (---------------) + I iveco - 0x00368243, // n0x0280 c0x0000 (---------------) + I iwc - 0x00308d46, // n0x0281 c0x0000 (---------------) + I jaguar - 0x003225c4, // n0x0282 c0x0000 (---------------) + I java - 0x00246c43, // n0x0283 c0x0000 (---------------) + I jcb - 0x0026f783, // n0x0284 c0x0000 (---------------) + I jcp - 0x1ee0bd02, // n0x0285 c0x007b (n0x0bd1-n0x0bd4) + I je - 0x00335a04, // n0x0286 c0x0000 (---------------) + I jeep - 0x0034cbc5, // n0x0287 c0x0000 (---------------) + I jetzt - 0x00361547, // n0x0288 c0x0000 (---------------) + I jewelry - 0x00278b43, // n0x0289 c0x0000 (---------------) + I jio - 0x002add83, // n0x028a c0x0000 (---------------) + I jlc - 0x002aee43, // n0x028b c0x0000 (---------------) + I jll - 0x0167db82, // n0x028c c0x0005 (---------------)* o I jm - 0x002aef03, // n0x028d c0x0000 (---------------) + I jmp - 0x002af503, // n0x028e c0x0000 (---------------) + I jnj - 0x1f2010c2, // n0x028f c0x007c (n0x0bd4-n0x0bdc) + I jo - 0x002e35c4, // n0x0290 c0x0000 (---------------) + I jobs - 0x0027d046, // n0x0291 c0x0000 (---------------) + I joburg - 0x002010c3, // n0x0292 c0x0000 (---------------) + I jot - 0x002af883, // n0x0293 c0x0000 (---------------) + I joy - 0x1f6b00c2, // n0x0294 c0x007d (n0x0bdc-n0x0c4b) + I jp - 0x002b00c8, // n0x0295 c0x0000 (---------------) + I jpmorgan - 0x002b1304, // n0x0296 c0x0000 (---------------) + I jprs - 0x0024e0c6, // n0x0297 c0x0000 (---------------) + I juegos - 0x002b17c7, // n0x0298 c0x0000 (---------------) + I juniper - 0x00222e46, // n0x0299 c0x0000 (---------------) + I kaufen - 0x003782c4, // n0x029a c0x0000 (---------------) + I kddi - 0x2d202482, // n0x029b c0x00b4 (n0x12df-n0x12e0)* o I ke - 0x002341cb, // n0x029c c0x0000 (---------------) + I kerryhotels - 0x002e270e, // n0x029d c0x0000 (---------------) + I kerrylogistics - 0x002249cf, // n0x029e c0x0000 (---------------) + I kerryproperties - 0x002337c3, // n0x029f c0x0000 (---------------) + I kfh - 0x2dab7202, // n0x02a0 c0x00b6 (n0x12e1-n0x12e7) + I kg - 0x0161c802, // n0x02a1 c0x0005 (---------------)* o I kh - 0x2de02982, // n0x02a2 c0x00b7 (n0x12e7-n0x12ee) + I ki - 0x002257c3, // n0x02a3 c0x0000 (---------------) + I kia - 0x0023a7c3, // n0x02a4 c0x0000 (---------------) + I kim - 0x0037cbc6, // n0x02a5 c0x0000 (---------------) + I kinder - 0x0023ea06, // n0x02a6 c0x0000 (---------------) + I kindle - 0x00346087, // n0x02a7 c0x0000 (---------------) + I kitchen - 0x002ea604, // n0x02a8 c0x0000 (---------------) + I kiwi - 0x2e238982, // n0x02a9 c0x00b8 (n0x12ee-n0x12ff) + I km - 0x2e63dd02, // n0x02aa c0x00b9 (n0x12ff-n0x1303) + I kn - 0x002291c5, // n0x02ab c0x0000 (---------------) + I koeln - 0x002ab807, // n0x02ac c0x0000 (---------------) + I komatsu - 0x002eb806, // n0x02ad c0x0000 (---------------) + I kosher - 0x2ea0ea02, // n0x02ae c0x00ba (n0x1303-n0x1309) + I kp - 0x0020ea04, // n0x02af c0x0000 (---------------) + I kpmg - 0x0036ab43, // n0x02b0 c0x0000 (---------------) + I kpn - 0x2ee0bdc2, // n0x02b1 c0x00bb (n0x1309-n0x1327) + I kr - 0x0034c043, // n0x02b2 c0x0000 (---------------) + I krd - 0x003a1444, // n0x02b3 c0x0000 (---------------) + I kred - 0x002b7149, // n0x02b4 c0x0000 (---------------) + I kuokgroup - 0x016bf3c2, // n0x02b5 c0x0005 (---------------)* o I kw - 0x2f236f82, // n0x02b6 c0x00bc (n0x1327-n0x132c) + I ky - 0x00268246, // n0x02b7 c0x0000 (---------------) + I kyknet - 0x002c0005, // n0x02b8 c0x0000 (---------------) + I kyoto - 0x2f792002, // n0x02b9 c0x00bd (n0x132c-n0x1332) + I kz - 0x2fa03082, // n0x02ba c0x00be (n0x1332-n0x133b) + I la - 0x0033a687, // n0x02bb c0x0000 (---------------) + I lacaixa - 0x00294d09, // n0x02bc c0x0000 (---------------) + I ladbrokes - 0x0034ff8b, // n0x02bd c0x0000 (---------------) + I lamborghini - 0x00246745, // n0x02be c0x0000 (---------------) + I lamer - 0x0036b6c9, // n0x02bf c0x0000 (---------------) + I lancaster - 0x002c1d86, // n0x02c0 c0x0000 (---------------) + I lancia - 0x00259047, // n0x02c1 c0x0000 (---------------) + I lancome - 0x00213144, // n0x02c2 c0x0000 (---------------) + I land - 0x0026aa89, // n0x02c3 c0x0000 (---------------) + I landrover - 0x00358e47, // n0x02c4 c0x0000 (---------------) + I lanxess - 0x00279d47, // n0x02c5 c0x0000 (---------------) + I lasalle - 0x00226e83, // n0x02c6 c0x0000 (---------------) + I lat - 0x0026b986, // n0x02c7 c0x0000 (---------------) + I latino - 0x002ce147, // n0x02c8 c0x0000 (---------------) + I latrobe - 0x00271c03, // n0x02c9 c0x0000 (---------------) + I law - 0x00271c06, // n0x02ca c0x0000 (---------------) + I lawyer - 0x2fe02802, // n0x02cb c0x00bf (n0x133b-n0x1340) + I lb - 0x30239382, // n0x02cc c0x00c0 (n0x1340-n0x1346) + I lc - 0x0023e683, // n0x02cd c0x0000 (---------------) + I lds - 0x00279e85, // n0x02ce c0x0000 (---------------) + I lease - 0x002b8607, // n0x02cf c0x0000 (---------------) + I leclerc - 0x00356c06, // n0x02d0 c0x0000 (---------------) + I lefrak - 0x00337705, // n0x02d1 c0x0000 (---------------) + I legal - 0x0024de44, // n0x02d2 c0x0000 (---------------) + I lego - 0x002de845, // n0x02d3 c0x0000 (---------------) + I lexus - 0x002e7384, // n0x02d4 c0x0000 (---------------) + I lgbt - 0x30605bc2, // n0x02d5 c0x00c1 (n0x1346-n0x1347) + I li - 0x0030b0c7, // n0x02d6 c0x0000 (---------------) + I liaison - 0x002bdb44, // n0x02d7 c0x0000 (---------------) + I lidl - 0x00263204, // n0x02d8 c0x0000 (---------------) + I life - 0x0026320d, // n0x02d9 c0x0000 (---------------) + I lifeinsurance - 0x0035f409, // n0x02da c0x0000 (---------------) + I lifestyle - 0x00288c08, // n0x02db c0x0000 (---------------) + I lighting - 0x00258c84, // n0x02dc c0x0000 (---------------) + I like - 0x00354b85, // n0x02dd c0x0000 (---------------) + I lilly - 0x0025d307, // n0x02de c0x0000 (---------------) + I limited - 0x0025d704, // n0x02df c0x0000 (---------------) + I limo - 0x0022fcc7, // n0x02e0 c0x0000 (---------------) + I lincoln - 0x00320185, // n0x02e1 c0x0000 (---------------) + I linde - 0x0038e084, // n0x02e2 c0x0000 (---------------) + I link - 0x002d50c5, // n0x02e3 c0x0000 (---------------) + I lipsy - 0x00260744, // n0x02e4 c0x0000 (---------------) + I live - 0x002de986, // n0x02e5 c0x0000 (---------------) + I living - 0x0025d605, // n0x02e6 c0x0000 (---------------) + I lixil - 0x30a0e9c2, // n0x02e7 c0x00c2 (n0x1347-n0x1356) + I lk - 0x00213a44, // n0x02e8 c0x0000 (---------------) + I loan - 0x00213a45, // n0x02e9 c0x0000 (---------------) + I loans - 0x00375d86, // n0x02ea c0x0000 (---------------) + I locker - 0x00337845, // n0x02eb c0x0000 (---------------) + I locus - 0x002d2244, // n0x02ec c0x0000 (---------------) + I loft - 0x002c3f43, // n0x02ed c0x0000 (---------------) + I lol - 0x003114c6, // n0x02ee c0x0000 (---------------) + I london - 0x00224e05, // n0x02ef c0x0000 (---------------) + I lotte - 0x002260c5, // n0x02f0 c0x0000 (---------------) + I lotto - 0x0023df44, // n0x02f1 c0x0000 (---------------) + I love - 0x00209403, // n0x02f2 c0x0000 (---------------) + I lpl - 0x0020940c, // n0x02f3 c0x0000 (---------------) + I lplfinancial - 0x30e8a802, // n0x02f4 c0x00c3 (n0x1356-n0x135b) + I lr - 0x31205642, // n0x02f5 c0x00c4 (n0x135b-n0x135d) + I ls - 0x31608bc2, // n0x02f6 c0x00c5 (n0x135d-n0x135f) + I lt - 0x00312883, // n0x02f7 c0x0000 (---------------) + I ltd - 0x00312884, // n0x02f8 c0x0000 (---------------) + I ltda - 0x31a03842, // n0x02f9 c0x00c6 (n0x135f-n0x1360) + I lu - 0x002e2ac8, // n0x02fa c0x0000 (---------------) + I lundbeck - 0x002e30c5, // n0x02fb c0x0000 (---------------) + I lupin - 0x00235544, // n0x02fc c0x0000 (---------------) + I luxe - 0x00238646, // n0x02fd c0x0000 (---------------) + I luxury - 0x31e06582, // n0x02fe c0x00c7 (n0x1360-n0x1369) + I lv - 0x32207e42, // n0x02ff c0x00c8 (n0x1369-n0x1372) + I ly - 0x32600442, // n0x0300 c0x00c9 (n0x1372-n0x1378) + I ma - 0x00373685, // n0x0301 c0x0000 (---------------) + I macys - 0x003197c6, // n0x0302 c0x0000 (---------------) + I madrid - 0x00269fc4, // n0x0303 c0x0000 (---------------) + I maif - 0x002b7f46, // n0x0304 c0x0000 (---------------) + I maison - 0x00248206, // n0x0305 c0x0000 (---------------) + I makeup - 0x00204283, // n0x0306 c0x0000 (---------------) + I man - 0x0036df4a, // n0x0307 c0x0000 (---------------) + I management - 0x0023b305, // n0x0308 c0x0000 (---------------) + I mango - 0x0022b8c6, // n0x0309 c0x0000 (---------------) + I market - 0x002ec909, // n0x030a c0x0000 (---------------) + I marketing - 0x0022b8c7, // n0x030b c0x0000 (---------------) + I markets - 0x00245948, // n0x030c c0x0000 (---------------) + I marriott - 0x0020a009, // n0x030d c0x0000 (---------------) + I marshalls - 0x002bbd48, // n0x030e c0x0000 (---------------) + I maserati - 0x0022dcc6, // n0x030f c0x0000 (---------------) + I mattel - 0x002089c3, // n0x0310 c0x0000 (---------------) + I mba - 0x32a1a3c2, // n0x0311 c0x00ca (n0x1378-n0x137a) + I mc - 0x0037c0c3, // n0x0312 c0x0000 (---------------) + I mcd - 0x0037c0c9, // n0x0313 c0x0000 (---------------) + I mcdonalds - 0x00325988, // n0x0314 c0x0000 (---------------) + I mckinsey - 0x32e4dd82, // n0x0315 c0x00cb (n0x137a-n0x137b) + I md - 0x33200982, // n0x0316 c0x00cc (n0x137b-n0x1388) + I me - 0x00213443, // n0x0317 c0x0000 (---------------) + I med - 0x00303545, // n0x0318 c0x0000 (---------------) + I media - 0x00269484, // n0x0319 c0x0000 (---------------) + I meet - 0x002e1389, // n0x031a c0x0000 (---------------) + I melbourne - 0x002b4304, // n0x031b c0x0000 (---------------) + I meme - 0x0036e248, // n0x031c c0x0000 (---------------) + I memorial - 0x00209983, // n0x031d c0x0000 (---------------) + I men - 0x003295c4, // n0x031e c0x0000 (---------------) + I menu - 0x0021a583, // n0x031f c0x0000 (---------------) + I meo - 0x00263147, // n0x0320 c0x0000 (---------------) + I metlife - 0x3360ea82, // n0x0321 c0x00cd (n0x1388-n0x1391) + I mg - 0x0025a7c2, // n0x0322 c0x0000 (---------------) + I mh - 0x00231bc5, // n0x0323 c0x0000 (---------------) + I miami - 0x00269849, // n0x0324 c0x0000 (---------------) + I microsoft - 0x00207dc3, // n0x0325 c0x0000 (---------------) + I mil - 0x0027d684, // n0x0326 c0x0000 (---------------) + I mini - 0x0031ba44, // n0x0327 c0x0000 (---------------) + I mint - 0x0023e183, // n0x0328 c0x0000 (---------------) + I mit - 0x0027ee0a, // n0x0329 c0x0000 (---------------) + I mitsubishi - 0x33b67282, // n0x032a c0x00ce (n0x1391-n0x1399) + I mk - 0x33e13a02, // n0x032b c0x00cf (n0x1399-n0x13a0) + I ml - 0x002c2fc3, // n0x032c c0x0000 (---------------) + I mlb - 0x00369c83, // n0x032d c0x0000 (---------------) + I mls - 0x0160bec2, // n0x032e c0x0005 (---------------)* o I mm - 0x00374a83, // n0x032f c0x0000 (---------------) + I mma - 0x34223b02, // n0x0330 c0x00d0 (n0x13a0-n0x13a4) + I mn - 0x00223b04, // n0x0331 c0x0000 (---------------) + I mnet - 0x34608442, // n0x0332 c0x00d1 (n0x13a4-n0x13a9) + I mo - 0x34a0bf04, // n0x0333 c0x00d2 (n0x13a9-n0x13aa) + I mobi - 0x002d7b86, // n0x0334 c0x0000 (---------------) + I mobily - 0x0026d444, // n0x0335 c0x0000 (---------------) + I moda - 0x0024e4c3, // n0x0336 c0x0000 (---------------) + I moe - 0x00282a43, // n0x0337 c0x0000 (---------------) + I moi - 0x002e4483, // n0x0338 c0x0000 (---------------) + I mom - 0x00243d86, // n0x0339 c0x0000 (---------------) + I monash - 0x002c8705, // n0x033a c0x0000 (---------------) + I money - 0x002c3b87, // n0x033b c0x0000 (---------------) + I monster - 0x00258f09, // n0x033c c0x0000 (---------------) + I montblanc - 0x002c6c05, // n0x033d c0x0000 (---------------) + I mopar - 0x002c8646, // n0x033e c0x0000 (---------------) + I mormon - 0x002c8c48, // n0x033f c0x0000 (---------------) + I mortgage - 0x002c8e46, // n0x0340 c0x0000 (---------------) + I moscow - 0x00278444, // n0x0341 c0x0000 (---------------) + I moto - 0x0029b44b, // n0x0342 c0x0000 (---------------) + I motorcycles - 0x002ca883, // n0x0343 c0x0000 (---------------) + I mov - 0x002ca885, // n0x0344 c0x0000 (---------------) + I movie - 0x002ca9c8, // n0x0345 c0x0000 (---------------) + I movistar - 0x0022c182, // n0x0346 c0x0000 (---------------) + I mp - 0x0033a982, // n0x0347 c0x0000 (---------------) + I mq - 0x34e4a142, // n0x0348 c0x00d3 (n0x13aa-n0x13ac) + I mr - 0x35210f42, // n0x0349 c0x00d4 (n0x13ac-n0x13b1) + I ms - 0x0025d203, // n0x034a c0x0000 (---------------) + I msd - 0x35605402, // n0x034b c0x00d5 (n0x13b1-n0x13b5) + I mt - 0x0026db43, // n0x034c c0x0000 (---------------) + I mtn - 0x002cacc4, // n0x034d c0x0000 (---------------) + I mtpc - 0x002cb483, // n0x034e c0x0000 (---------------) + I mtr - 0x35e03f02, // n0x034f c0x00d7 (n0x13b6-n0x13bd) + I mu - 0x002cd0cb, // n0x0350 c0x0000 (---------------) + I multichoice - 0x362d1086, // n0x0351 c0x00d8 (n0x13bd-n0x15e1) + I museum - 0x0023c806, // n0x0352 c0x0000 (---------------) + I mutual - 0x002d16c8, // n0x0353 c0x0000 (---------------) + I mutuelle - 0x366bf742, // n0x0354 c0x00d9 (n0x15e1-n0x15ef) + I mv - 0x36a12002, // n0x0355 c0x00da (n0x15ef-n0x15fa) + I mw - 0x36e195c2, // n0x0356 c0x00db (n0x15fa-n0x1600) + I mx - 0x37225742, // n0x0357 c0x00dc (n0x1600-n0x1608) + I my - 0x37614d82, // n0x0358 c0x00dd (n0x1608-n0x1609)* o I mz - 0x00214d8b, // n0x0359 c0x0000 (---------------) + I mzansimagic - 0x37a00902, // n0x035a c0x00de (n0x1609-n0x161a) + I na - 0x0021ac43, // n0x035b c0x0000 (---------------) + I nab - 0x00379545, // n0x035c c0x0000 (---------------) + I nadex - 0x00255a46, // n0x035d c0x0000 (---------------) + I nagoya - 0x37e00904, // n0x035e c0x00df (n0x161a-n0x161c) + I name - 0x0033ca07, // n0x035f c0x0000 (---------------) + I naspers - 0x00378fca, // n0x0360 c0x0000 (---------------) + I nationwide - 0x002dcac6, // n0x0361 c0x0000 (---------------) + I natura - 0x00398f44, // n0x0362 c0x0000 (---------------) + I navy - 0x0025ce03, // n0x0363 c0x0000 (---------------) + I nba - 0x38a095c2, // n0x0364 c0x00e2 (n0x161e-n0x161f) + I nc - 0x00202ac2, // n0x0365 c0x0000 (---------------) + I ne - 0x0022af03, // n0x0366 c0x0000 (---------------) + I nec - 0x38e23b43, // n0x0367 c0x00e3 (n0x161f-n0x1654) + I net - 0x00391e87, // n0x0368 c0x0000 (---------------) + I netbank - 0x0025d507, // n0x0369 c0x0000 (---------------) + I netflix - 0x002565c7, // n0x036a c0x0000 (---------------) + I network - 0x0022a247, // n0x036b c0x0000 (---------------) + I neustar - 0x0021e603, // n0x036c c0x0000 (---------------) + I new - 0x002ec30a, // n0x036d c0x0000 (---------------) + I newholland - 0x0021e604, // n0x036e c0x0000 (---------------) + I news - 0x0024d984, // n0x036f c0x0000 (---------------) + I next - 0x0024d98a, // n0x0370 c0x0000 (---------------) + I nextdirect - 0x0026ec05, // n0x0371 c0x0000 (---------------) + I nexus - 0x3a2016c2, // n0x0372 c0x00e8 (n0x165c-n0x1666) + I nf - 0x00252243, // n0x0373 c0x0000 (---------------) + I nfl - 0x3a6026c2, // n0x0374 c0x00e9 (n0x1666-n0x1670) + I ng - 0x0023b383, // n0x0375 c0x0000 (---------------) + I ngo - 0x0026f003, // n0x0376 c0x0000 (---------------) + I nhk - 0x3ae01782, // n0x0377 c0x00eb (n0x1671-n0x167f) o I ni - 0x002a7c84, // n0x0378 c0x0000 (---------------) + I nico - 0x00221d04, // n0x0379 c0x0000 (---------------) + I nike - 0x00203a85, // n0x037a c0x0000 (---------------) + I nikon - 0x002ca605, // n0x037b c0x0000 (---------------) + I ninja - 0x002292c6, // n0x037c c0x0000 (---------------) + I nissan - 0x0022d246, // n0x037d c0x0000 (---------------) + I nissay - 0x3b246d02, // n0x037e c0x00ec (n0x167f-n0x1682) + I nl - 0x3b601382, // n0x037f c0x00ed (n0x1682-n0x1958) + I no - 0x0031ae05, // n0x0380 c0x0000 (---------------) + I nokia - 0x0023c512, // n0x0381 c0x0000 (---------------) + I northwesternmutual - 0x0035eb06, // n0x0382 c0x0000 (---------------) + I norton - 0x00220303, // n0x0383 c0x0000 (---------------) + I now - 0x0029e646, // n0x0384 c0x0000 (---------------) + I nowruz - 0x00220305, // n0x0385 c0x0000 (---------------) + I nowtv - 0x01612182, // n0x0386 c0x0005 (---------------)* o I np - 0x43a0e602, // n0x0387 c0x010e (n0x1980-n0x1987) + I nr - 0x002d6d03, // n0x0388 c0x0000 (---------------) + I nra - 0x0026e2c3, // n0x0389 c0x0000 (---------------) + I nrw - 0x00372083, // n0x038a c0x0000 (---------------) + I ntt - 0x43e04182, // n0x038b c0x010f (n0x1987-n0x198a) + I nu - 0x0036dcc3, // n0x038c c0x0000 (---------------) + I nyc - 0x44208282, // n0x038d c0x0110 (n0x198a-n0x199a) + I nz - 0x0020bf43, // n0x038e c0x0000 (---------------) + I obi - 0x002e3608, // n0x038f c0x0000 (---------------) + I observer - 0x0020ce83, // n0x0390 c0x0000 (---------------) + I off - 0x0021dec6, // n0x0391 c0x0000 (---------------) + I office - 0x00395107, // n0x0392 c0x0000 (---------------) + I okinawa - 0x0020c5c6, // n0x0393 c0x0000 (---------------) + I olayan - 0x0020c5cb, // n0x0394 c0x0000 (---------------) + I olayangroup - 0x00398e87, // n0x0395 c0x0000 (---------------) + I oldnavy - 0x00387784, // n0x0396 c0x0000 (---------------) + I ollo - 0x44a013c2, // n0x0397 c0x0112 (n0x199b-n0x19a4) + I om - 0x002e2f45, // n0x0398 c0x0000 (---------------) + I omega - 0x00215243, // n0x0399 c0x0000 (---------------) + I one - 0x00207083, // n0x039a c0x0000 (---------------) + I ong - 0x00319c43, // n0x039b c0x0000 (---------------) + I onl - 0x00319c46, // n0x039c c0x0000 (---------------) + I online - 0x0039944a, // n0x039d c0x0000 (---------------) + I onyourside - 0x0028f983, // n0x039e c0x0000 (---------------) + I ooo - 0x0023cb04, // n0x039f c0x0000 (---------------) + I open - 0x00227a86, // n0x03a0 c0x0000 (---------------) + I oracle - 0x00395846, // n0x03a1 c0x0000 (---------------) + I orange - 0x44e28743, // n0x03a2 c0x0113 (n0x19a4-n0x19e1) + I org - 0x002b0187, // n0x03a3 c0x0000 (---------------) + I organic - 0x002d994d, // n0x03a4 c0x0000 (---------------) + I orientexpress - 0x00381387, // n0x03a5 c0x0000 (---------------) + I origins - 0x0029b185, // n0x03a6 c0x0000 (---------------) + I osaka - 0x00268446, // n0x03a7 c0x0000 (---------------) + I otsuka - 0x00224e43, // n0x03a8 c0x0000 (---------------) + I ott - 0x0020ed03, // n0x03a9 c0x0000 (---------------) + I ovh - 0x4660aac2, // n0x03aa c0x0119 (n0x1a1e-n0x1a29) + I pa - 0x00326904, // n0x03ab c0x0000 (---------------) + I page - 0x0024c80c, // n0x03ac c0x0000 (---------------) + I pamperedchef - 0x00262c09, // n0x03ad c0x0000 (---------------) + I panasonic - 0x00338487, // n0x03ae c0x0000 (---------------) + I panerai - 0x00277705, // n0x03af c0x0000 (---------------) + I paris - 0x0029d144, // n0x03b0 c0x0000 (---------------) + I pars - 0x002a6948, // n0x03b1 c0x0000 (---------------) + I partners - 0x002aef85, // n0x03b2 c0x0000 (---------------) + I parts - 0x002b6945, // n0x03b3 c0x0000 (---------------) + I party - 0x002cdb09, // n0x03b4 c0x0000 (---------------) + I passagens - 0x002be303, // n0x03b5 c0x0000 (---------------) + I pay - 0x002be304, // n0x03b6 c0x0000 (---------------) + I payu - 0x002cad44, // n0x03b7 c0x0000 (---------------) + I pccw - 0x46a00582, // n0x03b8 c0x011a (n0x1a29-n0x1a31) + I pe - 0x00211cc3, // n0x03b9 c0x0000 (---------------) + I pet - 0x46ef6c02, // n0x03ba c0x011b (n0x1a31-n0x1a34) + I pf - 0x002f6c06, // n0x03bb c0x0000 (---------------) + I pfizer - 0x01648ac2, // n0x03bc c0x0005 (---------------)* o I pg - 0x4729cd02, // n0x03bd c0x011c (n0x1a34-n0x1a3c) + I ph - 0x00373588, // n0x03be c0x0000 (---------------) + I pharmacy - 0x002d5007, // n0x03bf c0x0000 (---------------) + I philips - 0x0029cd05, // n0x03c0 c0x0000 (---------------) + I photo - 0x002d564b, // n0x03c1 c0x0000 (---------------) + I photography - 0x002d2806, // n0x03c2 c0x0000 (---------------) + I photos - 0x002d5846, // n0x03c3 c0x0000 (---------------) + I physio - 0x002d59c6, // n0x03c4 c0x0000 (---------------) + I piaget - 0x00221604, // n0x03c5 c0x0000 (---------------) + I pics - 0x002d6146, // n0x03c6 c0x0000 (---------------) + I pictet - 0x002d6648, // n0x03c7 c0x0000 (---------------) + I pictures - 0x00241c83, // n0x03c8 c0x0000 (---------------) + I pid - 0x00219f83, // n0x03c9 c0x0000 (---------------) + I pin - 0x00219f84, // n0x03ca c0x0000 (---------------) + I ping - 0x002d6dc4, // n0x03cb c0x0000 (---------------) + I pink - 0x002d7407, // n0x03cc c0x0000 (---------------) + I pioneer - 0x002d7e85, // n0x03cd c0x0000 (---------------) + I pizza - 0x476d7fc2, // n0x03ce c0x011d (n0x1a3c-n0x1a4a) + I pk - 0x47a09442, // n0x03cf c0x011e (n0x1a4a-n0x1aef) + I pl - 0x0020b305, // n0x03d0 c0x0000 (---------------) + I place - 0x002a00c4, // n0x03d1 c0x0000 (---------------) + I play - 0x002d92cb, // n0x03d2 c0x0000 (---------------) + I playstation - 0x002dc1c8, // n0x03d3 c0x0000 (---------------) + I plumbing - 0x002dd404, // n0x03d4 c0x0000 (---------------) + I plus - 0x0020ea42, // n0x03d5 c0x0000 (---------------) + I pm - 0x482488c2, // n0x03d6 c0x0120 (n0x1b1e-n0x1b23) + I pn - 0x002b0ac3, // n0x03d7 c0x0000 (---------------) + I pnc - 0x002dd844, // n0x03d8 c0x0000 (---------------) + I pohl - 0x002dd945, // n0x03d9 c0x0000 (---------------) + I poker - 0x002dde87, // n0x03da c0x0000 (---------------) + I politie - 0x002dfb04, // n0x03db c0x0000 (---------------) + I porn - 0x00360404, // n0x03dc c0x0000 (---------------) + I post - 0x48604e02, // n0x03dd c0x0121 (n0x1b23-n0x1b30) + I pr - 0x00358049, // n0x03de c0x0000 (---------------) + I pramerica - 0x002e0505, // n0x03df c0x0000 (---------------) + I praxi - 0x00246a05, // n0x03e0 c0x0000 (---------------) + I press - 0x002e12c5, // n0x03e1 c0x0000 (---------------) + I prime - 0x48a24b03, // n0x03e2 c0x0122 (n0x1b30-n0x1b37) + I pro - 0x002e1bc4, // n0x03e3 c0x0000 (---------------) + I prod - 0x002e1bcb, // n0x03e4 c0x0000 (---------------) + I productions - 0x002e2004, // n0x03e5 c0x0000 (---------------) + I prof - 0x002e228b, // n0x03e6 c0x0000 (---------------) + I progressive - 0x002e43c5, // n0x03e7 c0x0000 (---------------) + I promo - 0x00224b0a, // n0x03e8 c0x0000 (---------------) + I properties - 0x002e4bc8, // n0x03e9 c0x0000 (---------------) + I property - 0x002e4dca, // n0x03ea c0x0000 (---------------) + I protection - 0x002e5043, // n0x03eb c0x0000 (---------------) + I pru - 0x002e504a, // n0x03ec c0x0000 (---------------) + I prudential - 0x48e08102, // n0x03ed c0x0123 (n0x1b37-n0x1b3e) + I ps - 0x492d3242, // n0x03ee c0x0124 (n0x1b3e-n0x1b47) + I pt - 0x0028f343, // n0x03ef c0x0000 (---------------) + I pub - 0x496e6702, // n0x03f0 c0x0125 (n0x1b47-n0x1b4d) + I pw - 0x002e6703, // n0x03f1 c0x0000 (---------------) + I pwc - 0x49b32942, // n0x03f2 c0x0126 (n0x1b4d-n0x1b54) + I py - 0x49f16bc2, // n0x03f3 c0x0127 (n0x1b54-n0x1b5d) + I qa - 0x002e7204, // n0x03f4 c0x0000 (---------------) + I qpon - 0x0021d986, // n0x03f5 c0x0000 (---------------) + I quebec - 0x002b7c85, // n0x03f6 c0x0000 (---------------) + I quest - 0x002e77c3, // n0x03f7 c0x0000 (---------------) + I qvc - 0x00352e46, // n0x03f8 c0x0000 (---------------) + I racing - 0x0022e484, // n0x03f9 c0x0000 (---------------) + I raid - 0x4a208c82, // n0x03fa c0x0128 (n0x1b5d-n0x1b61) + I re - 0x002d4a44, // n0x03fb c0x0000 (---------------) + I read - 0x002c410a, // n0x03fc c0x0000 (---------------) + I realestate - 0x00338887, // n0x03fd c0x0000 (---------------) + I realtor - 0x003090c6, // n0x03fe c0x0000 (---------------) + I realty - 0x0022c2c7, // n0x03ff c0x0000 (---------------) + I recipes - 0x002437c3, // n0x0400 c0x0000 (---------------) + I red - 0x003a1488, // n0x0401 c0x0000 (---------------) + I redstone - 0x00337ecb, // n0x0402 c0x0000 (---------------) + I redumbrella - 0x0036d345, // n0x0403 c0x0000 (---------------) + I rehab - 0x002cb305, // n0x0404 c0x0000 (---------------) + I reise - 0x002cb306, // n0x0405 c0x0000 (---------------) + I reisen - 0x003013c4, // n0x0406 c0x0000 (---------------) + I reit - 0x00325d48, // n0x0407 c0x0000 (---------------) + I reliance - 0x00208c83, // n0x0408 c0x0000 (---------------) + I ren - 0x0020acc4, // n0x0409 c0x0000 (---------------) + I rent - 0x0020acc7, // n0x040a c0x0000 (---------------) + I rentals - 0x002b7946, // n0x040b c0x0000 (---------------) + I repair - 0x002f9f46, // n0x040c c0x0000 (---------------) + I report - 0x002a0e4a, // n0x040d c0x0000 (---------------) + I republican - 0x0024d844, // n0x040e c0x0000 (---------------) + I rest - 0x003668ca, // n0x040f c0x0000 (---------------) + I restaurant - 0x003264c6, // n0x0410 c0x0000 (---------------) + I review - 0x003264c7, // n0x0411 c0x0000 (---------------) + I reviews - 0x00257c47, // n0x0412 c0x0000 (---------------) + I rexroth - 0x002736c4, // n0x0413 c0x0000 (---------------) + I rich - 0x002736c9, // n0x0414 c0x0000 (---------------) + I richardli - 0x002db105, // n0x0415 c0x0000 (---------------) + I ricoh - 0x0032084b, // n0x0416 c0x0000 (---------------) + I rightathome - 0x00251603, // n0x0417 c0x0000 (---------------) + I ril - 0x0021a283, // n0x0418 c0x0000 (---------------) + I rio - 0x00228803, // n0x0419 c0x0000 (---------------) + I rip - 0x00267204, // n0x041a c0x0000 (---------------) + I rmit - 0x4a6020c2, // n0x041b c0x0129 (n0x1b61-n0x1b6d) + I ro - 0x0028bec6, // n0x041c c0x0000 (---------------) + I rocher - 0x002a2845, // n0x041d c0x0000 (---------------) + I rocks - 0x002d45c5, // n0x041e c0x0000 (---------------) + I rodeo - 0x0039d606, // n0x041f c0x0000 (---------------) + I rogers - 0x00374604, // n0x0420 c0x0000 (---------------) + I room - 0x4aa006c2, // n0x0421 c0x012a (n0x1b6d-n0x1b74) + I rs - 0x0039d704, // n0x0422 c0x0000 (---------------) + I rsvp - 0x4ae0fe82, // n0x0423 c0x012b (n0x1b74-n0x1bf7) + I ru - 0x002367c4, // n0x0424 c0x0000 (---------------) + I ruhr - 0x002263c3, // n0x0425 c0x0000 (---------------) + I run - 0x4b26e302, // n0x0426 c0x012c (n0x1bf7-n0x1c00) + I rw - 0x003252c3, // n0x0427 c0x0000 (---------------) + I rwe - 0x002adec6, // n0x0428 c0x0000 (---------------) + I ryukyu - 0x4b601002, // n0x0429 c0x012d (n0x1c00-n0x1c08) + I sa - 0x0030e208, // n0x042a c0x0000 (---------------) + I saarland - 0x0039f4c4, // n0x042b c0x0000 (---------------) + I safe - 0x0039f4c6, // n0x042c c0x0000 (---------------) + I safety - 0x002dc646, // n0x042d c0x0000 (---------------) + I sakura - 0x002595c4, // n0x042e c0x0000 (---------------) + I sale - 0x00311445, // n0x042f c0x0000 (---------------) + I salon - 0x0038dd88, // n0x0430 c0x0000 (---------------) + I samsclub - 0x00398187, // n0x0431 c0x0000 (---------------) + I samsung - 0x00253bc7, // n0x0432 c0x0000 (---------------) + I sandvik - 0x00253bcf, // n0x0433 c0x0000 (---------------) + I sandvikcoromant - 0x00295f06, // n0x0434 c0x0000 (---------------) + I sanofi - 0x00213b43, // n0x0435 c0x0000 (---------------) + I sap - 0x00213b44, // n0x0436 c0x0000 (---------------) + I sapo - 0x00224d44, // n0x0437 c0x0000 (---------------) + I sarl - 0x00223143, // n0x0438 c0x0000 (---------------) + I sas - 0x00225ac4, // n0x0439 c0x0000 (---------------) + I save - 0x0039bb84, // n0x043a c0x0000 (---------------) + I saxo - 0x4ba286c2, // n0x043b c0x012e (n0x1c08-n0x1c0d) + I sb - 0x0028b4c3, // n0x043c c0x0000 (---------------) + I sbi - 0x00230f83, // n0x043d c0x0000 (---------------) + I sbs - 0x4be07f02, // n0x043e c0x012f (n0x1c0d-n0x1c12) + I sc - 0x00237083, // n0x043f c0x0000 (---------------) + I sca - 0x00329b83, // n0x0440 c0x0000 (---------------) + I scb - 0x0021728a, // n0x0441 c0x0000 (---------------) + I schaeffler - 0x002e6b07, // n0x0442 c0x0000 (---------------) + I schmidt - 0x00231ecc, // n0x0443 c0x0000 (---------------) + I scholarships - 0x00232186, // n0x0444 c0x0000 (---------------) + I school - 0x00233546, // n0x0445 c0x0000 (---------------) + I schule - 0x00234447, // n0x0446 c0x0000 (---------------) + I schwarz - 0x00235f47, // n0x0447 c0x0000 (---------------) + I science - 0x00242c49, // n0x0448 c0x0000 (---------------) + I scjohnson - 0x0021ec84, // n0x0449 c0x0000 (---------------) + I scor - 0x00399d44, // n0x044a c0x0000 (---------------) + I scot - 0x4c22b2c2, // n0x044b c0x0130 (n0x1c12-n0x1c1a) + I sd - 0x4c604ec2, // n0x044c c0x0131 (n0x1c1a-n0x1c43) + I se - 0x00319204, // n0x044d c0x0000 (---------------) + I seat - 0x0031d846, // n0x044e c0x0000 (---------------) + I secure - 0x002363c8, // n0x044f c0x0000 (---------------) + I security - 0x00279f44, // n0x0450 c0x0000 (---------------) + I seek - 0x0025b186, // n0x0451 c0x0000 (---------------) + I select - 0x002cca45, // n0x0452 c0x0000 (---------------) + I sener - 0x0020b748, // n0x0453 c0x0000 (---------------) + I services - 0x00204ec3, // n0x0454 c0x0000 (---------------) + I ses - 0x00336fc5, // n0x0455 c0x0000 (---------------) + I seven - 0x0035f283, // n0x0456 c0x0000 (---------------) + I sew - 0x00246b03, // n0x0457 c0x0000 (---------------) + I sex - 0x00246b04, // n0x0458 c0x0000 (---------------) + I sexy - 0x0024ba03, // n0x0459 c0x0000 (---------------) + I sfr - 0x4ca6ed02, // n0x045a c0x0132 (n0x1c43-n0x1c4a) + I sg - 0x4ce01242, // n0x045b c0x0133 (n0x1c4a-n0x1c51) + I sh - 0x002514c9, // n0x045c c0x0000 (---------------) + I shangrila - 0x002545c5, // n0x045d c0x0000 (---------------) + I sharp - 0x002574c4, // n0x045e c0x0000 (---------------) + I shaw - 0x002582c5, // n0x045f c0x0000 (---------------) + I shell - 0x00210404, // n0x0460 c0x0000 (---------------) + I shia - 0x0035cb07, // n0x0461 c0x0000 (---------------) + I shiksha - 0x00387c45, // n0x0462 c0x0000 (---------------) + I shoes - 0x002b1e06, // n0x0463 c0x0000 (---------------) + I shouji - 0x002b2f44, // n0x0464 c0x0000 (---------------) + I show - 0x002b4188, // n0x0465 c0x0000 (---------------) + I showtime - 0x002c03c7, // n0x0466 c0x0000 (---------------) + I shriram - 0x4d2091c2, // n0x0467 c0x0134 (n0x1c51-n0x1c52) + I si - 0x00358404, // n0x0468 c0x0000 (---------------) + I silk - 0x002f6a84, // n0x0469 c0x0000 (---------------) + I sina - 0x00286a87, // n0x046a c0x0000 (---------------) + I singles - 0x00243684, // n0x046b c0x0000 (---------------) + I site - 0x0025b502, // n0x046c c0x0000 (---------------) + I sj - 0x4d608502, // n0x046d c0x0135 (n0x1c52-n0x1c53) + I sk - 0x00208503, // n0x046e c0x0000 (---------------) + I ski - 0x0037cb84, // n0x046f c0x0000 (---------------) + I skin - 0x00236f43, // n0x0470 c0x0000 (---------------) + I sky - 0x00236f45, // n0x0471 c0x0000 (---------------) + I skype - 0x4da20102, // n0x0472 c0x0136 (n0x1c53-n0x1c58) + I sl - 0x002cdd05, // n0x0473 c0x0000 (---------------) + I sling - 0x00213e02, // n0x0474 c0x0000 (---------------) + I sm - 0x0034dd45, // n0x0475 c0x0000 (---------------) + I smart - 0x0035d9c5, // n0x0476 c0x0000 (---------------) + I smile - 0x4de14b82, // n0x0477 c0x0137 (n0x1c58-n0x1c60) + I sn - 0x00214b84, // n0x0478 c0x0000 (---------------) + I sncf - 0x4e205f02, // n0x0479 c0x0138 (n0x1c60-n0x1c63) + I so - 0x00324dc6, // n0x047a c0x0000 (---------------) + I soccer - 0x002a4fc6, // n0x047b c0x0000 (---------------) + I social - 0x00269988, // n0x047c c0x0000 (---------------) + I softbank - 0x002b9808, // n0x047d c0x0000 (---------------) + I software - 0x002f8404, // n0x047e c0x0000 (---------------) + I sohu - 0x002e0985, // n0x047f c0x0000 (---------------) + I solar - 0x002f85c9, // n0x0480 c0x0000 (---------------) + I solutions - 0x003533c4, // n0x0481 c0x0000 (---------------) + I song - 0x00399404, // n0x0482 c0x0000 (---------------) + I sony - 0x002bf303, // n0x0483 c0x0000 (---------------) + I soy - 0x0020aa85, // n0x0484 c0x0000 (---------------) + I space - 0x00370047, // n0x0485 c0x0000 (---------------) + I spiegel - 0x00208144, // n0x0486 c0x0000 (---------------) + I spot - 0x0032fd8d, // n0x0487 c0x0000 (---------------) + I spreadbetting - 0x00332d82, // n0x0488 c0x0000 (---------------) + I sr - 0x00332d83, // n0x0489 c0x0000 (---------------) + I srl - 0x00334483, // n0x048a c0x0000 (---------------) + I srt - 0x4e602602, // n0x048b c0x0139 (n0x1c63-n0x1c6f) + I st - 0x0037e645, // n0x048c c0x0000 (---------------) + I stada - 0x00231d47, // n0x048d c0x0000 (---------------) + I staples - 0x0022a304, // n0x048e c0x0000 (---------------) + I star - 0x0022a307, // n0x048f c0x0000 (---------------) + I starhub - 0x0020a209, // n0x0490 c0x0000 (---------------) + I statebank - 0x002c4249, // n0x0491 c0x0000 (---------------) + I statefarm - 0x002ef347, // n0x0492 c0x0000 (---------------) + I statoil - 0x00277543, // n0x0493 c0x0000 (---------------) + I stc - 0x00277548, // n0x0494 c0x0000 (---------------) + I stcgroup - 0x0029d589, // n0x0495 c0x0000 (---------------) + I stockholm - 0x00358fc7, // n0x0496 c0x0000 (---------------) + I storage - 0x00364cc5, // n0x0497 c0x0000 (---------------) + I store - 0x002e7c86, // n0x0498 c0x0000 (---------------) + I studio - 0x002e7e05, // n0x0499 c0x0000 (---------------) + I study - 0x0035f505, // n0x049a c0x0000 (---------------) + I style - 0x4ea00702, // n0x049b c0x013a (n0x1c6f-n0x1c8f) + I su - 0x0026cc05, // n0x049c c0x0000 (---------------) + I sucks - 0x002bc24a, // n0x049d c0x0000 (---------------) + I supersport - 0x002c0208, // n0x049e c0x0000 (---------------) + I supplies - 0x002bfe86, // n0x049f c0x0000 (---------------) + I supply - 0x002e4607, // n0x04a0 c0x0000 (---------------) + I support - 0x002461c4, // n0x04a1 c0x0000 (---------------) + I surf - 0x002a3c47, // n0x04a2 c0x0000 (---------------) + I surgery - 0x002ea786, // n0x04a3 c0x0000 (---------------) + I suzuki - 0x4ee365c2, // n0x04a4 c0x013b (n0x1c8f-n0x1c94) + I sv - 0x00375a86, // n0x04a5 c0x0000 (---------------) + I swatch - 0x002ecb4a, // n0x04a6 c0x0000 (---------------) + I swiftcover - 0x002ed505, // n0x04a7 c0x0000 (---------------) + I swiss - 0x4f2edb02, // n0x04a8 c0x013c (n0x1c94-n0x1c95) + I sx - 0x4f68c0c2, // n0x04a9 c0x013d (n0x1c95-n0x1c9b) + I sy - 0x00335c06, // n0x04aa c0x0000 (---------------) + I sydney - 0x002ad308, // n0x04ab c0x0000 (---------------) + I symantec - 0x003943c7, // n0x04ac c0x0000 (---------------) + I systems - 0x4fa000c2, // n0x04ad c0x013e (n0x1c9b-n0x1c9e) + I sz - 0x0020e043, // n0x04ae c0x0000 (---------------) + I tab - 0x002004c6, // n0x04af c0x0000 (---------------) + I taipei - 0x00222d84, // n0x04b0 c0x0000 (---------------) + I talk - 0x00394fc6, // n0x04b1 c0x0000 (---------------) + I taobao - 0x00249c86, // n0x04b2 c0x0000 (---------------) + I target - 0x003125ca, // n0x04b3 c0x0000 (---------------) + I tatamotors - 0x0036bec5, // n0x04b4 c0x0000 (---------------) + I tatar - 0x0021f4c6, // n0x04b5 c0x0000 (---------------) + I tattoo - 0x00224103, // n0x04b6 c0x0000 (---------------) + I tax - 0x00224104, // n0x04b7 c0x0000 (---------------) + I taxi - 0x00204c42, // n0x04b8 c0x0000 (---------------) + I tc - 0x0026b7c3, // n0x04b9 c0x0000 (---------------) + I tci - 0x4fe0cd82, // n0x04ba c0x013f (n0x1c9e-n0x1c9f) + I td - 0x002cae83, // n0x04bb c0x0000 (---------------) + I tdk - 0x00367644, // n0x04bc c0x0000 (---------------) + I team - 0x002ad444, // n0x04bd c0x0000 (---------------) + I tech - 0x002ad44a, // n0x04be c0x0000 (---------------) + I technology - 0x0022ce43, // n0x04bf c0x0000 (---------------) + I tel - 0x00285708, // n0x04c0 c0x0000 (---------------) + I telecity - 0x00310b0a, // n0x04c1 c0x0000 (---------------) + I telefonica - 0x00240747, // n0x04c2 c0x0000 (---------------) + I temasek - 0x002f02c6, // n0x04c3 c0x0000 (---------------) + I tennis - 0x0032e244, // n0x04c4 c0x0000 (---------------) + I teva - 0x0025d582, // n0x04c5 c0x0000 (---------------) + I tf - 0x00205442, // n0x04c6 c0x0000 (---------------) + I tg - 0x50206982, // n0x04c7 c0x0140 (n0x1c9f-n0x1ca6) + I th - 0x0024a8c3, // n0x04c8 c0x0000 (---------------) + I thd - 0x00257a07, // n0x04c9 c0x0000 (---------------) + I theater - 0x00353107, // n0x04ca c0x0000 (---------------) + I theatre - 0x0034e74b, // n0x04cb c0x0000 (---------------) + I theguardian - 0x0034adc4, // n0x04cc c0x0000 (---------------) + I tiaa - 0x002f2307, // n0x04cd c0x0000 (---------------) + I tickets - 0x002ddf86, // n0x04ce c0x0000 (---------------) + I tienda - 0x0039f287, // n0x04cf c0x0000 (---------------) + I tiffany - 0x002e6a44, // n0x04d0 c0x0000 (---------------) + I tips - 0x0034f885, // n0x04d1 c0x0000 (---------------) + I tires - 0x002bc705, // n0x04d2 c0x0000 (---------------) + I tirol - 0x50631342, // n0x04d3 c0x0141 (n0x1ca6-n0x1cb5) + I tj - 0x0027db46, // n0x04d4 c0x0000 (---------------) + I tjmaxx - 0x00231343, // n0x04d5 c0x0000 (---------------) + I tjx - 0x0021a4c2, // n0x04d6 c0x0000 (---------------) + I tk - 0x00238946, // n0x04d7 c0x0000 (---------------) + I tkmaxx - 0x50a17cc2, // n0x04d8 c0x0142 (n0x1cb5-n0x1cb6) + I tl - 0x50e00c82, // n0x04d9 c0x0143 (n0x1cb6-n0x1cbe) + I tm - 0x00200c85, // n0x04da c0x0000 (---------------) + I tmall - 0x5124fd82, // n0x04db c0x0144 (n0x1cbe-n0x1cd2) + I tn - 0x51606e42, // n0x04dc c0x0145 (n0x1cd2-n0x1cd8) + I to - 0x00263b05, // n0x04dd c0x0000 (---------------) + I today - 0x00342205, // n0x04de c0x0000 (---------------) + I tokyo - 0x0021f585, // n0x04df c0x0000 (---------------) + I tools - 0x00206e43, // n0x04e0 c0x0000 (---------------) + I top - 0x00368b45, // n0x04e1 c0x0000 (---------------) + I toray - 0x002d28c7, // n0x04e2 c0x0000 (---------------) + I toshiba - 0x0025c405, // n0x04e3 c0x0000 (---------------) + I total - 0x002fc205, // n0x04e4 c0x0000 (---------------) + I tours - 0x002dc0c4, // n0x04e5 c0x0000 (---------------) + I town - 0x00264186, // n0x04e6 c0x0000 (---------------) + I toyota - 0x0026f0c4, // n0x04e7 c0x0000 (---------------) + I toys - 0x51a03902, // n0x04e8 c0x0146 (n0x1cd8-n0x1ced) + I tr - 0x00265705, // n0x04e9 c0x0000 (---------------) + I trade - 0x002a5c47, // n0x04ea c0x0000 (---------------) + I trading - 0x00305c88, // n0x04eb c0x0000 (---------------) + I training - 0x002a3206, // n0x04ec c0x0000 (---------------) + I travel - 0x002a320d, // n0x04ed c0x0000 (---------------) + I travelchannel - 0x002aa689, // n0x04ee c0x0000 (---------------) + I travelers - 0x002aa692, // n0x04ef c0x0000 (---------------) + I travelersinsurance - 0x0032a345, // n0x04f0 c0x0000 (---------------) + I trust - 0x003424c3, // n0x04f1 c0x0000 (---------------) + I trv - 0x5260fac2, // n0x04f2 c0x0149 (n0x1cef-n0x1d00) + I tt - 0x002e5644, // n0x04f3 c0x0000 (---------------) + I tube - 0x002f78c3, // n0x04f4 c0x0000 (---------------) + I tui - 0x002ede45, // n0x04f5 c0x0000 (---------------) + I tunes - 0x002eeb85, // n0x04f6 c0x0000 (---------------) + I tushu - 0x52a203c2, // n0x04f7 c0x014a (n0x1d00-n0x1d04) + I tv - 0x00364c43, // n0x04f8 c0x0000 (---------------) + I tvs - 0x52e534c2, // n0x04f9 c0x014b (n0x1d04-n0x1d12) + I tw - 0x53223bc2, // n0x04fa c0x014c (n0x1d12-n0x1d1e) + I tz - 0x53624242, // n0x04fb c0x014d (n0x1d1e-n0x1d6d) + I ua - 0x0033c185, // n0x04fc c0x0000 (---------------) + I ubank - 0x00255ec3, // n0x04fd c0x0000 (---------------) + I ubs - 0x0022ae08, // n0x04fe c0x0000 (---------------) + I uconnect - 0x53a04682, // n0x04ff c0x014e (n0x1d6d-n0x1d76) + I ug - 0x53e01b02, // n0x0500 c0x014f (n0x1d76-n0x1d81) + I uk - 0x002a7c46, // n0x0501 c0x0000 (---------------) + I unicom - 0x00309e4a, // n0x0502 c0x0000 (---------------) + I university - 0x0020e783, // n0x0503 c0x0000 (---------------) + I uno - 0x00249583, // n0x0504 c0x0000 (---------------) + I uol - 0x002d2003, // n0x0505 c0x0000 (---------------) + I ups - 0x54a02242, // n0x0506 c0x0152 (n0x1d83-n0x1dc2) + I us - 0x62e041c2, // n0x0507 c0x018b (n0x1e65-n0x1e6b) + I uy - 0x6360fec2, // n0x0508 c0x018d (n0x1e6c-n0x1e70) + I uz - 0x00200c02, // n0x0509 c0x0000 (---------------) + I va - 0x00375889, // n0x050a c0x0000 (---------------) + I vacations - 0x002be804, // n0x050b c0x0000 (---------------) + I vana - 0x00343f88, // n0x050c c0x0000 (---------------) + I vanguard - 0x63ae7802, // n0x050d c0x018e (n0x1e70-n0x1e76) + I vc - 0x63e02a42, // n0x050e c0x018f (n0x1e76-n0x1e87) + I ve - 0x0023dfc5, // n0x050f c0x0000 (---------------) + I vegas - 0x0023a188, // n0x0510 c0x0000 (---------------) + I ventures - 0x002ecd08, // n0x0511 c0x0000 (---------------) + I verisign - 0x003939cc, // n0x0512 c0x0000 (---------------) + I versicherung - 0x00240683, // n0x0513 c0x0000 (---------------) + I vet - 0x00234e42, // n0x0514 c0x0000 (---------------) + I vg - 0x642065c2, // n0x0515 c0x0190 (n0x1e87-n0x1e8c) + I vi - 0x002c7086, // n0x0516 c0x0000 (---------------) + I viajes - 0x002f1145, // n0x0517 c0x0000 (---------------) + I video - 0x0030a8c3, // n0x0518 c0x0000 (---------------) + I vig - 0x0032ae86, // n0x0519 c0x0000 (---------------) + I viking - 0x002f1286, // n0x051a c0x0000 (---------------) + I villas - 0x0025e683, // n0x051b c0x0000 (---------------) + I vin - 0x002f69c3, // n0x051c c0x0000 (---------------) + I vip - 0x002f6d86, // n0x051d c0x0000 (---------------) + I virgin - 0x002f7304, // n0x051e c0x0000 (---------------) + I visa - 0x0027bc46, // n0x051f c0x0000 (---------------) + I vision - 0x002caa45, // n0x0520 c0x0000 (---------------) + I vista - 0x002f768a, // n0x0521 c0x0000 (---------------) + I vistaprint - 0x00240044, // n0x0522 c0x0000 (---------------) + I viva - 0x002f88c4, // n0x0523 c0x0000 (---------------) + I vivo - 0x0034514a, // n0x0524 c0x0000 (---------------) + I vlaanderen - 0x64602f42, // n0x0525 c0x0191 (n0x1e8c-n0x1e99) + I vn - 0x00275ec5, // n0x0526 c0x0000 (---------------) + I vodka - 0x002fa4ca, // n0x0527 c0x0000 (---------------) + I volkswagen - 0x002fb385, // n0x0528 c0x0000 (---------------) + I volvo - 0x002fbf04, // n0x0529 c0x0000 (---------------) + I vote - 0x002fc006, // n0x052a c0x0000 (---------------) + I voting - 0x002fc184, // n0x052b c0x0000 (---------------) + I voto - 0x00230ac6, // n0x052c c0x0000 (---------------) + I voyage - 0x64a6a402, // n0x052d c0x0192 (n0x1e99-n0x1e9d) + I vu - 0x00308a06, // n0x052e c0x0000 (---------------) + I vuelos - 0x00309ac5, // n0x052f c0x0000 (---------------) + I wales - 0x00201c47, // n0x0530 c0x0000 (---------------) + I walmart - 0x00295246, // n0x0531 c0x0000 (---------------) + I walter - 0x00234644, // n0x0532 c0x0000 (---------------) + I wang - 0x002aeac7, // n0x0533 c0x0000 (---------------) + I wanggou - 0x0036de86, // n0x0534 c0x0000 (---------------) + I warman - 0x002b0685, // n0x0535 c0x0000 (---------------) + I watch - 0x003a5007, // n0x0536 c0x0000 (---------------) + I watches - 0x00390e47, // n0x0537 c0x0000 (---------------) + I weather - 0x00390e4e, // n0x0538 c0x0000 (---------------) + I weatherchannel - 0x0021e246, // n0x0539 c0x0000 (---------------) + I webcam - 0x0022fb85, // n0x053a c0x0000 (---------------) + I weber - 0x002c2487, // n0x053b c0x0000 (---------------) + I website - 0x002ebe43, // n0x053c c0x0000 (---------------) + I wed - 0x00353f07, // n0x053d c0x0000 (---------------) + I wedding - 0x00211745, // n0x053e c0x0000 (---------------) + I weibo - 0x00212044, // n0x053f c0x0000 (---------------) + I weir - 0x00230242, // n0x0540 c0x0000 (---------------) + I wf - 0x0036e507, // n0x0541 c0x0000 (---------------) + I whoswho - 0x002ea684, // n0x0542 c0x0000 (---------------) + I wien - 0x0023e984, // n0x0543 c0x0000 (---------------) + I wiki - 0x0025a64b, // n0x0544 c0x0000 (---------------) + I williamhill - 0x00220ac3, // n0x0545 c0x0000 (---------------) + I win - 0x002fd1c7, // n0x0546 c0x0000 (---------------) + I windows - 0x00220ac4, // n0x0547 c0x0000 (---------------) + I wine - 0x002b2dc7, // n0x0548 c0x0000 (---------------) + I winners - 0x002318c3, // n0x0549 c0x0000 (---------------) + I wme - 0x0032c58d, // n0x054a c0x0000 (---------------) + I wolterskluwer - 0x0037ee88, // n0x054b c0x0000 (---------------) + I woodside - 0x00256684, // n0x054c c0x0000 (---------------) + I work - 0x00357085, // n0x054d c0x0000 (---------------) + I works - 0x002ffbc5, // n0x054e c0x0000 (---------------) + I world - 0x002fdac3, // n0x054f c0x0000 (---------------) + I wow - 0x64e0a882, // n0x0550 c0x0193 (n0x1e9d-n0x1ea4) + I ws - 0x002fedc3, // n0x0551 c0x0000 (---------------) + I wtc - 0x002ff283, // n0x0552 c0x0000 (---------------) + I wtf - 0x00219604, // n0x0553 c0x0000 (---------------) + I xbox - 0x0027c3c5, // n0x0554 c0x0000 (---------------) + I xerox - 0x0027dcc7, // n0x0555 c0x0000 (---------------) + I xfinity - 0x00224186, // n0x0556 c0x0000 (---------------) + I xihuan - 0x00367383, // n0x0557 c0x0000 (---------------) + I xin - 0x00238a8b, // n0x0558 c0x0000 (---------------) + I xn--11b4c3d - 0x0024780b, // n0x0559 c0x0000 (---------------) + I xn--1ck2e1b - 0x0027314b, // n0x055a c0x0000 (---------------) + I xn--1qqw23a - 0x002a6fca, // n0x055b c0x0000 (---------------) + I xn--30rr7y - 0x002bf80b, // n0x055c c0x0000 (---------------) + I xn--3bst00m - 0x002d538b, // n0x055d c0x0000 (---------------) + I xn--3ds443g - 0x002edb4c, // n0x055e c0x0000 (---------------) + I xn--3e0b707e - 0x00339a11, // n0x055f c0x0000 (---------------) + I xn--3oq18vl8pn36a - 0x00347f0a, // n0x0560 c0x0000 (---------------) + I xn--3pxu8k - 0x0036fa4b, // n0x0561 c0x0000 (---------------) + I xn--42c2d9a - 0x003a208b, // n0x0562 c0x0000 (---------------) + I xn--45brj9c - 0x003a49ca, // n0x0563 c0x0000 (---------------) + I xn--45q11c - 0x002fffca, // n0x0564 c0x0000 (---------------) + I xn--4gbrim - 0x0030038d, // n0x0565 c0x0000 (---------------) + I xn--4gq48lf9j - 0x0030310e, // n0x0566 c0x0000 (---------------) + I xn--54b7fta0cc - 0x0030368b, // n0x0567 c0x0000 (---------------) + I xn--55qw42g - 0x0030394a, // n0x0568 c0x0000 (---------------) + I xn--55qx5d - 0x00304751, // n0x0569 c0x0000 (---------------) + I xn--5su34j936bgsg - 0x00304b8a, // n0x056a c0x0000 (---------------) + I xn--5tzm5g - 0x0030508b, // n0x056b c0x0000 (---------------) + I xn--6frz82g - 0x003055ce, // n0x056c c0x0000 (---------------) + I xn--6qq986b3xl - 0x0030650c, // n0x056d c0x0000 (---------------) + I xn--80adxhks - 0x00306b4b, // n0x056e c0x0000 (---------------) + I xn--80ao21a - 0x00306e0e, // n0x056f c0x0000 (---------------) + I xn--80aqecdr1a - 0x0030718c, // n0x0570 c0x0000 (---------------) + I xn--80asehdb - 0x0030b78a, // n0x0571 c0x0000 (---------------) + I xn--80aswg - 0x0030ca0c, // n0x0572 c0x0000 (---------------) + I xn--8y0a063a - 0x6530cd0a, // n0x0573 c0x0194 (n0x1ea4-n0x1eaa) + I xn--90a3ac - 0x0030da49, // n0x0574 c0x0000 (---------------) + I xn--90ais - 0x0030f00a, // n0x0575 c0x0000 (---------------) + I xn--9dbq2a - 0x0030f28a, // n0x0576 c0x0000 (---------------) + I xn--9et52u - 0x0030f50b, // n0x0577 c0x0000 (---------------) + I xn--9krt00a - 0x0031404e, // n0x0578 c0x0000 (---------------) + I xn--b4w605ferd - 0x003143d1, // n0x0579 c0x0000 (---------------) + I xn--bck1b9a5dre4c - 0x0031b349, // n0x057a c0x0000 (---------------) + I xn--c1avg - 0x0031b58a, // n0x057b c0x0000 (---------------) + I xn--c2br7g - 0x0031c28b, // n0x057c c0x0000 (---------------) + I xn--cck2b3b - 0x0031ca4a, // n0x057d c0x0000 (---------------) + I xn--cg4bki - 0x0031d1d6, // n0x057e c0x0000 (---------------) + I xn--clchc0ea0b2g2a9gcd - 0x0031e9cb, // n0x057f c0x0000 (---------------) + I xn--czr694b - 0x0032220a, // n0x0580 c0x0000 (---------------) + I xn--czrs0t - 0x00322a4a, // n0x0581 c0x0000 (---------------) + I xn--czru2d - 0x0032400b, // n0x0582 c0x0000 (---------------) + I xn--d1acj3b - 0x00329dc9, // n0x0583 c0x0000 (---------------) + I xn--d1alf - 0x0032cf0d, // n0x0584 c0x0000 (---------------) + I xn--eckvdtc9d - 0x0032d58b, // n0x0585 c0x0000 (---------------) + I xn--efvy88h - 0x0032e48b, // n0x0586 c0x0000 (---------------) + I xn--estv75g - 0x0032ee4b, // n0x0587 c0x0000 (---------------) + I xn--fct429k - 0x0032f409, // n0x0588 c0x0000 (---------------) + I xn--fhbei - 0x0032fa4e, // n0x0589 c0x0000 (---------------) + I xn--fiq228c5hs - 0x003300ca, // n0x058a c0x0000 (---------------) + I xn--fiq64b - 0x003326ca, // n0x058b c0x0000 (---------------) + I xn--fiqs8s - 0x00332b4a, // n0x058c c0x0000 (---------------) + I xn--fiqz9s - 0x0033318b, // n0x058d c0x0000 (---------------) + I xn--fjq720a - 0x003339cb, // n0x058e c0x0000 (---------------) + I xn--flw351e - 0x00333c8d, // n0x058f c0x0000 (---------------) + I xn--fpcrj9c3d - 0x0033548d, // n0x0590 c0x0000 (---------------) + I xn--fzc2c9e2c - 0x00335d90, // n0x0591 c0x0000 (---------------) + I xn--fzys8d69uvgm - 0x0033624b, // n0x0592 c0x0000 (---------------) + I xn--g2xx48c - 0x0033670c, // n0x0593 c0x0000 (---------------) + I xn--gckr3f0f - 0x0033738b, // n0x0594 c0x0000 (---------------) + I xn--gecrj9c - 0x0033a18b, // n0x0595 c0x0000 (---------------) + I xn--gk3at1e - 0x0033d00b, // n0x0596 c0x0000 (---------------) + I xn--h2brj9c - 0x0034094b, // n0x0597 c0x0000 (---------------) + I xn--hxt814e - 0x003413cf, // n0x0598 c0x0000 (---------------) + I xn--i1b6b1a6a2e - 0x0034178b, // n0x0599 c0x0000 (---------------) + I xn--imr513n - 0x0034278a, // n0x059a c0x0000 (---------------) + I xn--io0a7i - 0x00343489, // n0x059b c0x0000 (---------------) + I xn--j1aef - 0x00343849, // n0x059c c0x0000 (---------------) + I xn--j1amh - 0x0034418b, // n0x059d c0x0000 (---------------) + I xn--j6w193g - 0x0034444e, // n0x059e c0x0000 (---------------) + I xn--jlq61u9w7b - 0x003457cb, // n0x059f c0x0000 (---------------) + I xn--jvr189m - 0x00346acf, // n0x05a0 c0x0000 (---------------) + I xn--kcrx77d1x4a - 0x00348b4b, // n0x05a1 c0x0000 (---------------) + I xn--kprw13d - 0x00348e0b, // n0x05a2 c0x0000 (---------------) + I xn--kpry57d - 0x003490cb, // n0x05a3 c0x0000 (---------------) + I xn--kpu716f - 0x00349a4a, // n0x05a4 c0x0000 (---------------) + I xn--kput3i - 0x0034fb89, // n0x05a5 c0x0000 (---------------) + I xn--l1acc - 0x00355ecf, // n0x05a6 c0x0000 (---------------) + I xn--lgbbat1ad8j - 0x0035aecc, // n0x05a7 c0x0000 (---------------) + I xn--mgb2ddes - 0x0035b34c, // n0x05a8 c0x0000 (---------------) + I xn--mgb9awbf - 0x0035b80e, // n0x05a9 c0x0000 (---------------) + I xn--mgba3a3ejt - 0x0035cf4f, // n0x05aa c0x0000 (---------------) + I xn--mgba3a4f16a - 0x0035d30e, // n0x05ab c0x0000 (---------------) + I xn--mgba3a4fra - 0x0035de10, // n0x05ac c0x0000 (---------------) + I xn--mgba7c0bbn0a - 0x0035e20f, // n0x05ad c0x0000 (---------------) + I xn--mgbaakc7dvf - 0x0035f64e, // n0x05ae c0x0000 (---------------) + I xn--mgbaam7a8h - 0x0035fb0c, // n0x05af c0x0000 (---------------) + I xn--mgbab2bd - 0x0035fe12, // n0x05b0 c0x0000 (---------------) + I xn--mgbai9a5eva00b - 0x00361151, // n0x05b1 c0x0000 (---------------) + I xn--mgbai9azgqp6j - 0x0036170e, // n0x05b2 c0x0000 (---------------) + I xn--mgbayh7gpa - 0x00361b4e, // n0x05b3 c0x0000 (---------------) + I xn--mgbb9fbpob - 0x0036208e, // n0x05b4 c0x0000 (---------------) + I xn--mgbbh1a71e - 0x0036240f, // n0x05b5 c0x0000 (---------------) + I xn--mgbc0a9azcg - 0x003627ce, // n0x05b6 c0x0000 (---------------) + I xn--mgbca7dzdo - 0x00362cd3, // n0x05b7 c0x0000 (---------------) + I xn--mgberp4a5d4a87g - 0x00363191, // n0x05b8 c0x0000 (---------------) + I xn--mgberp4a5d4ar - 0x003635ce, // n0x05b9 c0x0000 (---------------) + I xn--mgbi4ecexp - 0x00363a4c, // n0x05ba c0x0000 (---------------) + I xn--mgbpl2fh - 0x00363e93, // n0x05bb c0x0000 (---------------) + I xn--mgbqly7c0a67fbc - 0x00364610, // n0x05bc c0x0000 (---------------) + I xn--mgbqly7cvafr - 0x00364f4c, // n0x05bd c0x0000 (---------------) + I xn--mgbt3dhd - 0x0036524c, // n0x05be c0x0000 (---------------) + I xn--mgbtf8fl - 0x0036578b, // n0x05bf c0x0000 (---------------) + I xn--mgbtx2b - 0x00365c4e, // n0x05c0 c0x0000 (---------------) + I xn--mgbx4cd0ab - 0x0036614b, // n0x05c1 c0x0000 (---------------) + I xn--mix082f - 0x0036650b, // n0x05c2 c0x0000 (---------------) + I xn--mix891f - 0x00367a0c, // n0x05c3 c0x0000 (---------------) + I xn--mk1bu44c - 0x0036eb8a, // n0x05c4 c0x0000 (---------------) + I xn--mxtq1m - 0x0036f24c, // n0x05c5 c0x0000 (---------------) + I xn--ngbc5azd - 0x0036f54c, // n0x05c6 c0x0000 (---------------) + I xn--ngbe9e0a - 0x0036f849, // n0x05c7 c0x0000 (---------------) + I xn--ngbrx - 0x00370acb, // n0x05c8 c0x0000 (---------------) + I xn--nnx388a - 0x00370d88, // n0x05c9 c0x0000 (---------------) + I xn--node - 0x00371249, // n0x05ca c0x0000 (---------------) + I xn--nqv7f - 0x0037124f, // n0x05cb c0x0000 (---------------) + I xn--nqv7fs00ema - 0x00372bcb, // n0x05cc c0x0000 (---------------) + I xn--nyqy26a - 0x003747ca, // n0x05cd c0x0000 (---------------) + I xn--o3cw4h - 0x00375f0c, // n0x05ce c0x0000 (---------------) + I xn--ogbpf8fl - 0x00377ac9, // n0x05cf c0x0000 (---------------) + I xn--p1acf - 0x00377d48, // n0x05d0 c0x0000 (---------------) + I xn--p1ai - 0x00377f4b, // n0x05d1 c0x0000 (---------------) + I xn--pbt977c - 0x00379f8b, // n0x05d2 c0x0000 (---------------) + I xn--pgbs0dh - 0x0037ab8a, // n0x05d3 c0x0000 (---------------) + I xn--pssy2u - 0x0037ae0b, // n0x05d4 c0x0000 (---------------) + I xn--q9jyb4c - 0x0037be4c, // n0x05d5 c0x0000 (---------------) + I xn--qcka1pmc - 0x0037c908, // n0x05d6 c0x0000 (---------------) + I xn--qxam - 0x0037f08b, // n0x05d7 c0x0000 (---------------) + I xn--rhqv96g - 0x0038174b, // n0x05d8 c0x0000 (---------------) + I xn--rovu88b - 0x0038548b, // n0x05d9 c0x0000 (---------------) + I xn--s9brj9c - 0x00386b8b, // n0x05da c0x0000 (---------------) + I xn--ses554g - 0x003909cb, // n0x05db c0x0000 (---------------) + I xn--t60b56a - 0x00390c89, // n0x05dc c0x0000 (---------------) + I xn--tckwe - 0x003911cd, // n0x05dd c0x0000 (---------------) + I xn--tiq49xqyj - 0x00395b0a, // n0x05de c0x0000 (---------------) + I xn--unup4y - 0x00396a57, // n0x05df c0x0000 (---------------) + I xn--vermgensberater-ctb - 0x00397898, // n0x05e0 c0x0000 (---------------) + I xn--vermgensberatung-pwb - 0x0039bc89, // n0x05e1 c0x0000 (---------------) + I xn--vhquv - 0x0039ce8b, // n0x05e2 c0x0000 (---------------) + I xn--vuq861b - 0x0039dc54, // n0x05e3 c0x0000 (---------------) + I xn--w4r85el8fhu5dnra - 0x0039e14b, // n0x05e4 c0x0000 (---------------) + I xn--w4rs40l - 0x0039e6ca, // n0x05e5 c0x0000 (---------------) + I xn--wgbh1c - 0x0039ec8a, // n0x05e6 c0x0000 (---------------) + I xn--wgbl6a - 0x0039ef0b, // n0x05e7 c0x0000 (---------------) + I xn--xhq521b - 0x0039fd90, // n0x05e8 c0x0000 (---------------) + I xn--xkc2al3hye2a - 0x003a0191, // n0x05e9 c0x0000 (---------------) + I xn--xkc2dl3a5ee0h - 0x003a0a4a, // n0x05ea c0x0000 (---------------) + I xn--y9a3aq - 0x003a168d, // n0x05eb c0x0000 (---------------) + I xn--yfro4i67o - 0x003a1d8d, // n0x05ec c0x0000 (---------------) + I xn--ygbi2ammx - 0x003a548b, // n0x05ed c0x0000 (---------------) + I xn--zfr164b - 0x003a5c46, // n0x05ee c0x0000 (---------------) + I xperia - 0x0027dc43, // n0x05ef c0x0000 (---------------) + I xxx - 0x00246b83, // n0x05f0 c0x0000 (---------------) + I xyz - 0x00308546, // n0x05f1 c0x0000 (---------------) + I yachts - 0x0028f8c5, // n0x05f2 c0x0000 (---------------) + I yahoo - 0x002c92c7, // n0x05f3 c0x0000 (---------------) + I yamaxun - 0x003398c6, // n0x05f4 c0x0000 (---------------) + I yandex - 0x01607002, // n0x05f5 c0x0005 (---------------)* o I ye - 0x003a23c9, // n0x05f6 c0x0000 (---------------) + I yodobashi - 0x00352c44, // n0x05f7 c0x0000 (---------------) + I yoga - 0x002d1a88, // n0x05f8 c0x0000 (---------------) + I yokohama - 0x0024a803, // n0x05f9 c0x0000 (---------------) + I you - 0x002e5587, // n0x05fa c0x0000 (---------------) + I youtube - 0x00219c02, // n0x05fb c0x0000 (---------------) + I yt - 0x002ad943, // n0x05fc c0x0000 (---------------) + I yun - 0x65600182, // n0x05fd c0x0195 (n0x1eaa-n0x1ebb) o I za - 0x002c5d86, // n0x05fe c0x0000 (---------------) + I zappos - 0x002c6684, // n0x05ff c0x0000 (---------------) + I zara - 0x00326f84, // n0x0600 c0x0000 (---------------) + I zero - 0x0023b883, // n0x0601 c0x0000 (---------------) + I zip - 0x0023b885, // n0x0602 c0x0000 (---------------) + I zippo - 0x016ffd42, // n0x0603 c0x0005 (---------------)* o I zm - 0x002dd744, // n0x0604 c0x0000 (---------------) + I zone - 0x00273607, // n0x0605 c0x0000 (---------------) + I zuerich - 0x016fd182, // n0x0606 c0x0005 (---------------)* o I zw - 0x00233243, // n0x0607 c0x0000 (---------------) + I com - 0x00239103, // n0x0608 c0x0000 (---------------) + I edu - 0x0027d903, // n0x0609 c0x0000 (---------------) + I gov - 0x00207dc3, // n0x060a c0x0000 (---------------) + I mil - 0x00223b43, // n0x060b c0x0000 (---------------) + I net - 0x00228743, // n0x060c c0x0000 (---------------) + I org - 0x00201383, // n0x060d c0x0000 (---------------) + I nom - 0x00200342, // n0x060e c0x0000 (---------------) + I ac - 0x000fe108, // n0x060f c0x0000 (---------------) + blogspot - 0x0020ce42, // n0x0610 c0x0000 (---------------) + I co - 0x0027d903, // n0x0611 c0x0000 (---------------) + I gov - 0x00207dc3, // n0x0612 c0x0000 (---------------) + I mil - 0x00223b43, // n0x0613 c0x0000 (---------------) + I net - 0x00228743, // n0x0614 c0x0000 (---------------) + I org - 0x00217283, // n0x0615 c0x0000 (---------------) + I sch - 0x00318816, // n0x0616 c0x0000 (---------------) + I accident-investigation - 0x0031a353, // n0x0617 c0x0000 (---------------) + I accident-prevention - 0x002f2189, // n0x0618 c0x0000 (---------------) + I aerobatic - 0x00378b48, // n0x0619 c0x0000 (---------------) + I aeroclub - 0x002e2dc9, // n0x061a c0x0000 (---------------) + I aerodrome - 0x002fa646, // n0x061b c0x0000 (---------------) + I agents - 0x0030e650, // n0x061c c0x0000 (---------------) + I air-surveillance - 0x0033f613, // n0x061d c0x0000 (---------------) + I air-traffic-control - 0x00205108, // n0x061e c0x0000 (---------------) + I aircraft - 0x0024e5c7, // n0x061f c0x0000 (---------------) + I airline - 0x002791c7, // n0x0620 c0x0000 (---------------) + I airport - 0x00287c0a, // n0x0621 c0x0000 (---------------) + I airtraffic - 0x002c06c9, // n0x0622 c0x0000 (---------------) + I ambulance - 0x00332009, // n0x0623 c0x0000 (---------------) + I amusement - 0x002d5e8b, // n0x0624 c0x0000 (---------------) + I association - 0x00320ec6, // n0x0625 c0x0000 (---------------) + I author - 0x0022e08a, // n0x0626 c0x0000 (---------------) + I ballooning - 0x00224906, // n0x0627 c0x0000 (---------------) + I broker - 0x00352843, // n0x0628 c0x0000 (---------------) + I caa - 0x002f3245, // n0x0629 c0x0000 (---------------) + I cargo - 0x0037b1c8, // n0x062a c0x0000 (---------------) + I catering - 0x00324e8d, // n0x062b c0x0000 (---------------) + I certification - 0x0033f1cc, // n0x062c c0x0000 (---------------) + I championship - 0x00309787, // n0x062d c0x0000 (---------------) + I charter - 0x00359e8d, // n0x062e c0x0000 (---------------) + I civilaviation - 0x00378c44, // n0x062f c0x0000 (---------------) + I club - 0x0023734a, // n0x0630 c0x0000 (---------------) + I conference - 0x00237e4a, // n0x0631 c0x0000 (---------------) + I consultant - 0x0023830a, // n0x0632 c0x0000 (---------------) + I consulting - 0x002f4607, // n0x0633 c0x0000 (---------------) + I control - 0x00242307, // n0x0634 c0x0000 (---------------) + I council - 0x00244144, // n0x0635 c0x0000 (---------------) + I crew - 0x00228206, // n0x0636 c0x0000 (---------------) + I design - 0x00249384, // n0x0637 c0x0000 (---------------) + I dgca - 0x0035c248, // n0x0638 c0x0000 (---------------) + I educator - 0x00311d49, // n0x0639 c0x0000 (---------------) + I emergency - 0x0030c4c6, // n0x063a c0x0000 (---------------) + I engine - 0x0030c4c8, // n0x063b c0x0000 (---------------) + I engineer - 0x00246fcd, // n0x063c c0x0000 (---------------) + I entertainment - 0x002c4489, // n0x063d c0x0000 (---------------) + I equipment - 0x00379608, // n0x063e c0x0000 (---------------) + I exchange - 0x00246987, // n0x063f c0x0000 (---------------) + I express - 0x00310d8a, // n0x0640 c0x0000 (---------------) + I federation - 0x00251346, // n0x0641 c0x0000 (---------------) + I flight - 0x0025cb47, // n0x0642 c0x0000 (---------------) + I freight - 0x0023ff44, // n0x0643 c0x0000 (---------------) + I fuel - 0x0026f907, // n0x0644 c0x0000 (---------------) + I gliding - 0x0027d90a, // n0x0645 c0x0000 (---------------) + I government - 0x00288dce, // n0x0646 c0x0000 (---------------) + I groundhandling - 0x0020c745, // n0x0647 c0x0000 (---------------) + I group - 0x002fd80b, // n0x0648 c0x0000 (---------------) + I hanggliding - 0x002f3589, // n0x0649 c0x0000 (---------------) + I homebuilt - 0x00263309, // n0x064a c0x0000 (---------------) + I insurance - 0x0027b207, // n0x064b c0x0000 (---------------) + I journal - 0x0038d78a, // n0x064c c0x0000 (---------------) + I journalist - 0x002869c7, // n0x064d c0x0000 (---------------) + I leasing - 0x002e2849, // n0x064e c0x0000 (---------------) + I logistics - 0x00395588, // n0x064f c0x0000 (---------------) + I magazine - 0x0027614b, // n0x0650 c0x0000 (---------------) + I maintenance - 0x00303545, // n0x0651 c0x0000 (---------------) + I media - 0x00288aca, // n0x0652 c0x0000 (---------------) + I microlight - 0x002a4849, // n0x0653 c0x0000 (---------------) + I modelling - 0x0030a84a, // n0x0654 c0x0000 (---------------) + I navigation - 0x002c6c8b, // n0x0655 c0x0000 (---------------) + I parachuting - 0x0026f80b, // n0x0656 c0x0000 (---------------) + I paragliding - 0x002d5c15, // n0x0657 c0x0000 (---------------) + I passenger-association - 0x002d6ac5, // n0x0658 c0x0000 (---------------) + I pilot - 0x00246a05, // n0x0659 c0x0000 (---------------) + I press - 0x002e1bca, // n0x065a c0x0000 (---------------) + I production - 0x0031d94a, // n0x065b c0x0000 (---------------) + I recreation - 0x0022ed87, // n0x065c c0x0000 (---------------) + I repbody - 0x00221903, // n0x065d c0x0000 (---------------) + I res - 0x002a1188, // n0x065e c0x0000 (---------------) + I research - 0x002ce40a, // n0x065f c0x0000 (---------------) + I rotorcraft - 0x0039f4c6, // n0x0660 c0x0000 (---------------) + I safety - 0x00242649, // n0x0661 c0x0000 (---------------) + I scientist - 0x0020b748, // n0x0662 c0x0000 (---------------) + I services - 0x002b2f44, // n0x0663 c0x0000 (---------------) + I show - 0x00288309, // n0x0664 c0x0000 (---------------) + I skydiving - 0x002b9808, // n0x0665 c0x0000 (---------------) + I software - 0x002ac947, // n0x0666 c0x0000 (---------------) + I student - 0x00265706, // n0x0667 c0x0000 (---------------) + I trader - 0x002a5c47, // n0x0668 c0x0000 (---------------) + I trading - 0x00299907, // n0x0669 c0x0000 (---------------) + I trainer - 0x00243b85, // n0x066a c0x0000 (---------------) + I union - 0x002dbb8c, // n0x066b c0x0000 (---------------) + I workinggroup - 0x00357085, // n0x066c c0x0000 (---------------) + I works - 0x00233243, // n0x066d c0x0000 (---------------) + I com - 0x00239103, // n0x066e c0x0000 (---------------) + I edu - 0x0027d903, // n0x066f c0x0000 (---------------) + I gov - 0x00223b43, // n0x0670 c0x0000 (---------------) + I net - 0x00228743, // n0x0671 c0x0000 (---------------) + I org - 0x0020ce42, // n0x0672 c0x0000 (---------------) + I co - 0x00233243, // n0x0673 c0x0000 (---------------) + I com - 0x00223b43, // n0x0674 c0x0000 (---------------) + I net - 0x00201383, // n0x0675 c0x0000 (---------------) + I nom - 0x00228743, // n0x0676 c0x0000 (---------------) + I org - 0x00233243, // n0x0677 c0x0000 (---------------) + I com - 0x00223b43, // n0x0678 c0x0000 (---------------) + I net - 0x0020ce83, // n0x0679 c0x0000 (---------------) + I off - 0x00228743, // n0x067a c0x0000 (---------------) + I org - 0x000fe108, // n0x067b c0x0000 (---------------) + blogspot - 0x00233243, // n0x067c c0x0000 (---------------) + I com - 0x00239103, // n0x067d c0x0000 (---------------) + I edu - 0x0027d903, // n0x067e c0x0000 (---------------) + I gov - 0x00207dc3, // n0x067f c0x0000 (---------------) + I mil - 0x00223b43, // n0x0680 c0x0000 (---------------) + I net - 0x00228743, // n0x0681 c0x0000 (---------------) + I org - 0x000fe108, // n0x0682 c0x0000 (---------------) + blogspot - 0x0020ce42, // n0x0683 c0x0000 (---------------) + I co - 0x002024c2, // n0x0684 c0x0000 (---------------) + I ed - 0x00238542, // n0x0685 c0x0000 (---------------) + I gv - 0x002017c2, // n0x0686 c0x0000 (---------------) + I it - 0x00201902, // n0x0687 c0x0000 (---------------) + I og - 0x0022ee02, // n0x0688 c0x0000 (---------------) + I pb - 0x04633243, // n0x0689 c0x0011 (n0x0692-n0x0693) + I com - 0x00239103, // n0x068a c0x0000 (---------------) + I edu - 0x00212b03, // n0x068b c0x0000 (---------------) + I gob - 0x0027d903, // n0x068c c0x0000 (---------------) + I gov - 0x00201503, // n0x068d c0x0000 (---------------) + I int - 0x00207dc3, // n0x068e c0x0000 (---------------) + I mil - 0x00223b43, // n0x068f c0x0000 (---------------) + I net - 0x00228743, // n0x0690 c0x0000 (---------------) + I org - 0x00208c03, // n0x0691 c0x0000 (---------------) + I tur - 0x000fe108, // n0x0692 c0x0000 (---------------) + blogspot - 0x0025dc44, // n0x0693 c0x0000 (---------------) + I e164 - 0x00329387, // n0x0694 c0x0000 (---------------) + I in-addr - 0x00214243, // n0x0695 c0x0000 (---------------) + I ip6 - 0x0029b804, // n0x0696 c0x0000 (---------------) + I iris - 0x0020f003, // n0x0697 c0x0000 (---------------) + I uri - 0x0027b283, // n0x0698 c0x0000 (---------------) + I urn - 0x0027d903, // n0x0699 c0x0000 (---------------) + I gov - 0x00200342, // n0x069a c0x0000 (---------------) + I ac - 0x00131a83, // n0x069b c0x0000 (---------------) + biz - 0x0560ce42, // n0x069c c0x0015 (n0x06a1-n0x06a2) + I co - 0x00238542, // n0x069d c0x0000 (---------------) + I gv - 0x00001844, // n0x069e c0x0000 (---------------) + info - 0x00200dc2, // n0x069f c0x0000 (---------------) + I or - 0x000e17c4, // n0x06a0 c0x0000 (---------------) + priv - 0x000fe108, // n0x06a1 c0x0000 (---------------) + blogspot - 0x002388c3, // n0x06a2 c0x0000 (---------------) + I act - 0x0022b643, // n0x06a3 c0x0000 (---------------) + I asn - 0x05e33243, // n0x06a4 c0x0017 (n0x06b4-n0x06b5) + I com - 0x00237344, // n0x06a5 c0x0000 (---------------) + I conf - 0x06239103, // n0x06a6 c0x0018 (n0x06b5-n0x06bd) + I edu - 0x0667d903, // n0x06a7 c0x0019 (n0x06bd-n0x06c2) + I gov - 0x0020d9c2, // n0x06a8 c0x0000 (---------------) + I id - 0x00201844, // n0x06a9 c0x0000 (---------------) + I info - 0x00223b43, // n0x06aa c0x0000 (---------------) + I net - 0x002ebf43, // n0x06ab c0x0000 (---------------) + I nsw - 0x00201542, // n0x06ac c0x0000 (---------------) + I nt - 0x00228743, // n0x06ad c0x0000 (---------------) + I org - 0x0021e882, // n0x06ae c0x0000 (---------------) + I oz - 0x002e7143, // n0x06af c0x0000 (---------------) + I qld - 0x00201002, // n0x06b0 c0x0000 (---------------) + I sa - 0x00203f83, // n0x06b1 c0x0000 (---------------) + I tas - 0x0020b803, // n0x06b2 c0x0000 (---------------) + I vic - 0x00201c42, // n0x06b3 c0x0000 (---------------) + I wa - 0x000fe108, // n0x06b4 c0x0000 (---------------) + blogspot - 0x002388c3, // n0x06b5 c0x0000 (---------------) + I act - 0x002ebf43, // n0x06b6 c0x0000 (---------------) + I nsw - 0x00201542, // n0x06b7 c0x0000 (---------------) + I nt - 0x002e7143, // n0x06b8 c0x0000 (---------------) + I qld - 0x00201002, // n0x06b9 c0x0000 (---------------) + I sa - 0x00203f83, // n0x06ba c0x0000 (---------------) + I tas - 0x0020b803, // n0x06bb c0x0000 (---------------) + I vic - 0x00201c42, // n0x06bc c0x0000 (---------------) + I wa - 0x002e7143, // n0x06bd c0x0000 (---------------) + I qld - 0x00201002, // n0x06be c0x0000 (---------------) + I sa - 0x00203f83, // n0x06bf c0x0000 (---------------) + I tas - 0x0020b803, // n0x06c0 c0x0000 (---------------) + I vic - 0x00201c42, // n0x06c1 c0x0000 (---------------) + I wa - 0x00233243, // n0x06c2 c0x0000 (---------------) + I com - 0x00331a83, // n0x06c3 c0x0000 (---------------) + I biz - 0x00233243, // n0x06c4 c0x0000 (---------------) + I com - 0x00239103, // n0x06c5 c0x0000 (---------------) + I edu - 0x0027d903, // n0x06c6 c0x0000 (---------------) + I gov - 0x00201844, // n0x06c7 c0x0000 (---------------) + I info - 0x00201503, // n0x06c8 c0x0000 (---------------) + I int - 0x00207dc3, // n0x06c9 c0x0000 (---------------) + I mil - 0x00200904, // n0x06ca c0x0000 (---------------) + I name - 0x00223b43, // n0x06cb c0x0000 (---------------) + I net - 0x00228743, // n0x06cc c0x0000 (---------------) + I org - 0x002080c2, // n0x06cd c0x0000 (---------------) + I pp - 0x00224b03, // n0x06ce c0x0000 (---------------) + I pro - 0x000fe108, // n0x06cf c0x0000 (---------------) + blogspot - 0x0020ce42, // n0x06d0 c0x0000 (---------------) + I co - 0x00233243, // n0x06d1 c0x0000 (---------------) + I com - 0x00239103, // n0x06d2 c0x0000 (---------------) + I edu - 0x0027d903, // n0x06d3 c0x0000 (---------------) + I gov - 0x00207dc3, // n0x06d4 c0x0000 (---------------) + I mil - 0x00223b43, // n0x06d5 c0x0000 (---------------) + I net - 0x00228743, // n0x06d6 c0x0000 (---------------) + I org - 0x002006c2, // n0x06d7 c0x0000 (---------------) + I rs - 0x0024e904, // n0x06d8 c0x0000 (---------------) + I unbi - 0x00399f44, // n0x06d9 c0x0000 (---------------) + I unsa - 0x00331a83, // n0x06da c0x0000 (---------------) + I biz - 0x0020ce42, // n0x06db c0x0000 (---------------) + I co - 0x00233243, // n0x06dc c0x0000 (---------------) + I com - 0x00239103, // n0x06dd c0x0000 (---------------) + I edu - 0x0027d903, // n0x06de c0x0000 (---------------) + I gov - 0x00201844, // n0x06df c0x0000 (---------------) + I info - 0x00223b43, // n0x06e0 c0x0000 (---------------) + I net - 0x00228743, // n0x06e1 c0x0000 (---------------) + I org - 0x00364cc5, // n0x06e2 c0x0000 (---------------) + I store - 0x002203c2, // n0x06e3 c0x0000 (---------------) + I tv - 0x00200342, // n0x06e4 c0x0000 (---------------) + I ac - 0x000fe108, // n0x06e5 c0x0000 (---------------) + blogspot - 0x0027d903, // n0x06e6 c0x0000 (---------------) + I gov - 0x002314c1, // n0x06e7 c0x0000 (---------------) + I 0 - 0x0022bdc1, // n0x06e8 c0x0000 (---------------) + I 1 - 0x002479c1, // n0x06e9 c0x0000 (---------------) + I 2 - 0x0022ba81, // n0x06ea c0x0000 (---------------) + I 3 - 0x00238c41, // n0x06eb c0x0000 (---------------) + I 4 - 0x0027c701, // n0x06ec c0x0000 (---------------) + I 5 - 0x002142c1, // n0x06ed c0x0000 (---------------) + I 6 - 0x002315c1, // n0x06ee c0x0000 (---------------) + I 7 - 0x00300581, // n0x06ef c0x0000 (---------------) + I 8 - 0x00300641, // n0x06f0 c0x0000 (---------------) + I 9 - 0x002001c1, // n0x06f1 c0x0000 (---------------) + I a - 0x00200001, // n0x06f2 c0x0000 (---------------) + I b - 0x000fe108, // n0x06f3 c0x0000 (---------------) + blogspot - 0x00200141, // n0x06f4 c0x0000 (---------------) + I c - 0x00200201, // n0x06f5 c0x0000 (---------------) + I d - 0x00200081, // n0x06f6 c0x0000 (---------------) + I e - 0x00201701, // n0x06f7 c0x0000 (---------------) + I f - 0x00200281, // n0x06f8 c0x0000 (---------------) + I g - 0x002003c1, // n0x06f9 c0x0000 (---------------) + I h - 0x00200041, // n0x06fa c0x0000 (---------------) + I i - 0x002010c1, // n0x06fb c0x0000 (---------------) + I j - 0x00201b41, // n0x06fc c0x0000 (---------------) + I k - 0x00200d41, // n0x06fd c0x0000 (---------------) + I l - 0x00200441, // n0x06fe c0x0000 (---------------) + I m - 0x00200781, // n0x06ff c0x0000 (---------------) + I n - 0x00200dc1, // n0x0700 c0x0000 (---------------) + I o - 0x00200581, // n0x0701 c0x0000 (---------------) + I p - 0x00200f41, // n0x0702 c0x0000 (---------------) + I q - 0x002006c1, // n0x0703 c0x0000 (---------------) + I r - 0x002000c1, // n0x0704 c0x0000 (---------------) + I s - 0x002004c1, // n0x0705 c0x0000 (---------------) + I t - 0x00200741, // n0x0706 c0x0000 (---------------) + I u - 0x00200c01, // n0x0707 c0x0000 (---------------) + I v - 0x00201c41, // n0x0708 c0x0000 (---------------) + I w - 0x00200a01, // n0x0709 c0x0000 (---------------) + I x - 0x00200241, // n0x070a c0x0000 (---------------) + I y - 0x00200101, // n0x070b c0x0000 (---------------) + I z - 0x00233243, // n0x070c c0x0000 (---------------) + I com - 0x00239103, // n0x070d c0x0000 (---------------) + I edu - 0x0027d903, // n0x070e c0x0000 (---------------) + I gov - 0x00223b43, // n0x070f c0x0000 (---------------) + I net - 0x00228743, // n0x0710 c0x0000 (---------------) + I org - 0x0020ce42, // n0x0711 c0x0000 (---------------) + I co - 0x00233243, // n0x0712 c0x0000 (---------------) + I com - 0x00239103, // n0x0713 c0x0000 (---------------) + I edu - 0x00200dc2, // n0x0714 c0x0000 (---------------) + I or - 0x00228743, // n0x0715 c0x0000 (---------------) + I org - 0x00007ec7, // n0x0716 c0x0000 (---------------) + dscloud - 0x00013206, // n0x0717 c0x0000 (---------------) + dyndns - 0x000568ca, // n0x0718 c0x0000 (---------------) + for-better - 0x0008a448, // n0x0719 c0x0000 (---------------) + for-more - 0x00056ec8, // n0x071a c0x0000 (---------------) + for-some - 0x00057907, // n0x071b c0x0000 (---------------) + for-the - 0x0005abc6, // n0x071c c0x0000 (---------------) + selfip - 0x001267c6, // n0x071d c0x0000 (---------------) + webhop - 0x002d5e84, // n0x071e c0x0000 (---------------) + I asso - 0x0031c507, // n0x071f c0x0000 (---------------) + I barreau - 0x000fe108, // n0x0720 c0x0000 (---------------) + blogspot - 0x002aebc4, // n0x0721 c0x0000 (---------------) + I gouv - 0x00233243, // n0x0722 c0x0000 (---------------) + I com - 0x00239103, // n0x0723 c0x0000 (---------------) + I edu - 0x0027d903, // n0x0724 c0x0000 (---------------) + I gov - 0x00223b43, // n0x0725 c0x0000 (---------------) + I net - 0x00228743, // n0x0726 c0x0000 (---------------) + I org - 0x00233243, // n0x0727 c0x0000 (---------------) + I com - 0x00239103, // n0x0728 c0x0000 (---------------) + I edu - 0x00212b03, // n0x0729 c0x0000 (---------------) + I gob - 0x0027d903, // n0x072a c0x0000 (---------------) + I gov - 0x00201503, // n0x072b c0x0000 (---------------) + I int - 0x00207dc3, // n0x072c c0x0000 (---------------) + I mil - 0x00223b43, // n0x072d c0x0000 (---------------) + I net - 0x00228743, // n0x072e c0x0000 (---------------) + I org - 0x002203c2, // n0x072f c0x0000 (---------------) + I tv - 0x002c52c3, // n0x0730 c0x0000 (---------------) + I adm - 0x002da143, // n0x0731 c0x0000 (---------------) + I adv - 0x00208a43, // n0x0732 c0x0000 (---------------) + I agr - 0x00200942, // n0x0733 c0x0000 (---------------) + I am - 0x0024f683, // n0x0734 c0x0000 (---------------) + I arq - 0x00201d43, // n0x0735 c0x0000 (---------------) + I art - 0x00211e03, // n0x0736 c0x0000 (---------------) + I ato - 0x00200001, // n0x0737 c0x0000 (---------------) + I b - 0x00203e43, // n0x0738 c0x0000 (---------------) + I bio - 0x0022f084, // n0x0739 c0x0000 (---------------) + I blog - 0x00311b03, // n0x073a c0x0000 (---------------) + I bmd - 0x0026b803, // n0x073b c0x0000 (---------------) + I cim - 0x0021dac3, // n0x073c c0x0000 (---------------) + I cng - 0x002312c3, // n0x073d c0x0000 (---------------) + I cnt - 0x0a233243, // n0x073e c0x0028 (n0x0776-n0x0777) + I com - 0x0023c344, // n0x073f c0x0000 (---------------) + I coop - 0x0021da83, // n0x0740 c0x0000 (---------------) + I ecn - 0x0020ce03, // n0x0741 c0x0000 (---------------) + I eco - 0x00239103, // n0x0742 c0x0000 (---------------) + I edu - 0x00238e43, // n0x0743 c0x0000 (---------------) + I emp - 0x00213083, // n0x0744 c0x0000 (---------------) + I eng - 0x0029c403, // n0x0745 c0x0000 (---------------) + I esp - 0x0026b783, // n0x0746 c0x0000 (---------------) + I etc - 0x00226d43, // n0x0747 c0x0000 (---------------) + I eti - 0x002112c3, // n0x0748 c0x0000 (---------------) + I far - 0x00252284, // n0x0749 c0x0000 (---------------) + I flog - 0x00234802, // n0x074a c0x0000 (---------------) + I fm - 0x00256243, // n0x074b c0x0000 (---------------) + I fnd - 0x0025c203, // n0x074c c0x0000 (---------------) + I fot - 0x00277503, // n0x074d c0x0000 (---------------) + I fst - 0x00329c43, // n0x074e c0x0000 (---------------) + I g12 - 0x00327ec3, // n0x074f c0x0000 (---------------) + I ggf - 0x0027d903, // n0x0750 c0x0000 (---------------) + I gov - 0x002c9cc3, // n0x0751 c0x0000 (---------------) + I imb - 0x00221b03, // n0x0752 c0x0000 (---------------) + I ind - 0x00201683, // n0x0753 c0x0000 (---------------) + I inf - 0x0021cc83, // n0x0754 c0x0000 (---------------) + I jor - 0x002eee83, // n0x0755 c0x0000 (---------------) + I jus - 0x0022f7c3, // n0x0756 c0x0000 (---------------) + I leg - 0x002c1303, // n0x0757 c0x0000 (---------------) + I lel - 0x00200443, // n0x0758 c0x0000 (---------------) + I mat - 0x00213443, // n0x0759 c0x0000 (---------------) + I med - 0x00207dc3, // n0x075a c0x0000 (---------------) + I mil - 0x0022c182, // n0x075b c0x0000 (---------------) + I mp - 0x002812c3, // n0x075c c0x0000 (---------------) + I mus - 0x00223b43, // n0x075d c0x0000 (---------------) + I net - 0x01601383, // n0x075e c0x0005 (---------------)* o I nom - 0x00257ec3, // n0x075f c0x0000 (---------------) + I not - 0x0022bc43, // n0x0760 c0x0000 (---------------) + I ntr - 0x00212bc3, // n0x0761 c0x0000 (---------------) + I odo - 0x00228743, // n0x0762 c0x0000 (---------------) + I org - 0x00248a83, // n0x0763 c0x0000 (---------------) + I ppg - 0x00224b03, // n0x0764 c0x0000 (---------------) + I pro - 0x00232143, // n0x0765 c0x0000 (---------------) + I psc - 0x002f6a43, // n0x0766 c0x0000 (---------------) + I psi - 0x002e7303, // n0x0767 c0x0000 (---------------) + I qsl - 0x00264a45, // n0x0768 c0x0000 (---------------) + I radio - 0x0022c2c3, // n0x0769 c0x0000 (---------------) + I rec - 0x002e7343, // n0x076a c0x0000 (---------------) + I slg - 0x0033b7c3, // n0x076b c0x0000 (---------------) + I srv - 0x00224104, // n0x076c c0x0000 (---------------) + I taxi - 0x00337c43, // n0x076d c0x0000 (---------------) + I teo - 0x00245b03, // n0x076e c0x0000 (---------------) + I tmp - 0x0036f183, // n0x076f c0x0000 (---------------) + I trd - 0x00208c03, // n0x0770 c0x0000 (---------------) + I tur - 0x002203c2, // n0x0771 c0x0000 (---------------) + I tv - 0x00240683, // n0x0772 c0x0000 (---------------) + I vet - 0x002f96c4, // n0x0773 c0x0000 (---------------) + I vlog - 0x0023e984, // n0x0774 c0x0000 (---------------) + I wiki - 0x0025ca83, // n0x0775 c0x0000 (---------------) + I zlg - 0x000fe108, // n0x0776 c0x0000 (---------------) + blogspot - 0x00233243, // n0x0777 c0x0000 (---------------) + I com - 0x00239103, // n0x0778 c0x0000 (---------------) + I edu - 0x0027d903, // n0x0779 c0x0000 (---------------) + I gov - 0x00223b43, // n0x077a c0x0000 (---------------) + I net - 0x00228743, // n0x077b c0x0000 (---------------) + I org - 0x00233243, // n0x077c c0x0000 (---------------) + I com - 0x00239103, // n0x077d c0x0000 (---------------) + I edu - 0x0027d903, // n0x077e c0x0000 (---------------) + I gov - 0x00223b43, // n0x077f c0x0000 (---------------) + I net - 0x00228743, // n0x0780 c0x0000 (---------------) + I org - 0x0020ce42, // n0x0781 c0x0000 (---------------) + I co - 0x00228743, // n0x0782 c0x0000 (---------------) + I org - 0x0b633243, // n0x0783 c0x002d (n0x0787-n0x0788) + I com - 0x0027d903, // n0x0784 c0x0000 (---------------) + I gov - 0x00207dc3, // n0x0785 c0x0000 (---------------) + I mil - 0x0020ce82, // n0x0786 c0x0000 (---------------) + I of - 0x000fe108, // n0x0787 c0x0000 (---------------) + blogspot - 0x00233243, // n0x0788 c0x0000 (---------------) + I com - 0x00239103, // n0x0789 c0x0000 (---------------) + I edu - 0x0027d903, // n0x078a c0x0000 (---------------) + I gov - 0x00223b43, // n0x078b c0x0000 (---------------) + I net - 0x00228743, // n0x078c c0x0000 (---------------) + I org - 0x00000182, // n0x078d c0x0000 (---------------) + za - 0x00201a02, // n0x078e c0x0000 (---------------) + I ab - 0x0021e2c2, // n0x078f c0x0000 (---------------) + I bc - 0x000fe108, // n0x0790 c0x0000 (---------------) + blogspot - 0x0000ce42, // n0x0791 c0x0000 (---------------) + co - 0x0023ad42, // n0x0792 c0x0000 (---------------) + I gc - 0x00205a82, // n0x0793 c0x0000 (---------------) + I mb - 0x00215b02, // n0x0794 c0x0000 (---------------) + I nb - 0x002016c2, // n0x0795 c0x0000 (---------------) + I nf - 0x00246d02, // n0x0796 c0x0000 (---------------) + I nl - 0x0020df42, // n0x0797 c0x0000 (---------------) + I ns - 0x00201542, // n0x0798 c0x0000 (---------------) + I nt - 0x00204182, // n0x0799 c0x0000 (---------------) + I nu - 0x00203a42, // n0x079a c0x0000 (---------------) + I on - 0x00200582, // n0x079b c0x0000 (---------------) + I pe - 0x0037bf42, // n0x079c c0x0000 (---------------) + I qc - 0x00208502, // n0x079d c0x0000 (---------------) + I sk - 0x00225782, // n0x079e c0x0000 (---------------) + I yk - 0x00120b09, // n0x079f c0x0000 (---------------) + ftpaccess - 0x0017248b, // n0x07a0 c0x0000 (---------------) + game-server - 0x000d2788, // n0x07a1 c0x0000 (---------------) + myphotos - 0x00045689, // n0x07a2 c0x0000 (---------------) + scrapping - 0x0027d903, // n0x07a3 c0x0000 (---------------) + I gov - 0x000fe108, // n0x07a4 c0x0000 (---------------) + blogspot - 0x000fe108, // n0x07a5 c0x0000 (---------------) + blogspot - 0x00200342, // n0x07a6 c0x0000 (---------------) + I ac - 0x002d5e84, // n0x07a7 c0x0000 (---------------) + I asso - 0x0020ce42, // n0x07a8 c0x0000 (---------------) + I co - 0x00233243, // n0x07a9 c0x0000 (---------------) + I com - 0x002024c2, // n0x07aa c0x0000 (---------------) + I ed - 0x00239103, // n0x07ab c0x0000 (---------------) + I edu - 0x00210a42, // n0x07ac c0x0000 (---------------) + I go - 0x002aebc4, // n0x07ad c0x0000 (---------------) + I gouv - 0x00201503, // n0x07ae c0x0000 (---------------) + I int - 0x0024dd82, // n0x07af c0x0000 (---------------) + I md - 0x00223b43, // n0x07b0 c0x0000 (---------------) + I net - 0x00200dc2, // n0x07b1 c0x0000 (---------------) + I or - 0x00228743, // n0x07b2 c0x0000 (---------------) + I org - 0x00246a06, // n0x07b3 c0x0000 (---------------) + I presse - 0x0030facf, // n0x07b4 c0x0000 (---------------) + I xn--aroport-bya - 0x006ffb43, // n0x07b5 c0x0001 (---------------) ! I www - 0x000fe108, // n0x07b6 c0x0000 (---------------) + blogspot - 0x0020ce42, // n0x07b7 c0x0000 (---------------) + I co - 0x00212b03, // n0x07b8 c0x0000 (---------------) + I gob - 0x0027d903, // n0x07b9 c0x0000 (---------------) + I gov - 0x00207dc3, // n0x07ba c0x0000 (---------------) + I mil - 0x0020ce42, // n0x07bb c0x0000 (---------------) + I co - 0x00233243, // n0x07bc c0x0000 (---------------) + I com - 0x0027d903, // n0x07bd c0x0000 (---------------) + I gov - 0x00223b43, // n0x07be c0x0000 (---------------) + I net - 0x00200342, // n0x07bf c0x0000 (---------------) + I ac - 0x00205702, // n0x07c0 c0x0000 (---------------) + I ah - 0x0e6f1ec9, // n0x07c1 c0x0039 (n0x07ec-n0x07ed) o I amazonaws - 0x0020b442, // n0x07c2 c0x0000 (---------------) + I bj - 0x0ee33243, // n0x07c3 c0x003b (n0x07ee-n0x07ef) + I com - 0x00242e82, // n0x07c4 c0x0000 (---------------) + I cq - 0x00239103, // n0x07c5 c0x0000 (---------------) + I edu - 0x0021cc42, // n0x07c6 c0x0000 (---------------) + I fj - 0x002265c2, // n0x07c7 c0x0000 (---------------) + I gd - 0x0027d903, // n0x07c8 c0x0000 (---------------) + I gov - 0x00245dc2, // n0x07c9 c0x0000 (---------------) + I gs - 0x00253302, // n0x07ca c0x0000 (---------------) + I gx - 0x0025ca42, // n0x07cb c0x0000 (---------------) + I gz - 0x00202302, // n0x07cc c0x0000 (---------------) + I ha - 0x0028ea42, // n0x07cd c0x0000 (---------------) + I hb - 0x002069c2, // n0x07ce c0x0000 (---------------) + I he - 0x002003c2, // n0x07cf c0x0000 (---------------) + I hi - 0x0020c482, // n0x07d0 c0x0000 (---------------) + I hk - 0x002484c2, // n0x07d1 c0x0000 (---------------) + I hl - 0x0021c682, // n0x07d2 c0x0000 (---------------) + I hn - 0x002add82, // n0x07d3 c0x0000 (---------------) + I jl - 0x00233742, // n0x07d4 c0x0000 (---------------) + I js - 0x00231382, // n0x07d5 c0x0000 (---------------) + I jx - 0x00229282, // n0x07d6 c0x0000 (---------------) + I ln - 0x00207dc3, // n0x07d7 c0x0000 (---------------) + I mil - 0x00208442, // n0x07d8 c0x0000 (---------------) + I mo - 0x00223b43, // n0x07d9 c0x0000 (---------------) + I net - 0x0022b882, // n0x07da c0x0000 (---------------) + I nm - 0x00268e82, // n0x07db c0x0000 (---------------) + I nx - 0x00228743, // n0x07dc c0x0000 (---------------) + I org - 0x0024f702, // n0x07dd c0x0000 (---------------) + I qh - 0x00207f02, // n0x07de c0x0000 (---------------) + I sc - 0x0022b2c2, // n0x07df c0x0000 (---------------) + I sd - 0x00201242, // n0x07e0 c0x0000 (---------------) + I sh - 0x00214b82, // n0x07e1 c0x0000 (---------------) + I sn - 0x002edb02, // n0x07e2 c0x0000 (---------------) + I sx - 0x00231342, // n0x07e3 c0x0000 (---------------) + I tj - 0x002534c2, // n0x07e4 c0x0000 (---------------) + I tw - 0x003a5242, // n0x07e5 c0x0000 (---------------) + I xj - 0x0030394a, // n0x07e6 c0x0000 (---------------) + I xn--55qx5d - 0x0034278a, // n0x07e7 c0x0000 (---------------) + I xn--io0a7i - 0x0037530a, // n0x07e8 c0x0000 (---------------) + I xn--od0alg - 0x003a5dc2, // n0x07e9 c0x0000 (---------------) + I xz - 0x00212fc2, // n0x07ea c0x0000 (---------------) + I yn - 0x00246c02, // n0x07eb c0x0000 (---------------) + I zj - 0x0e8358c7, // n0x07ec c0x003a (n0x07ed-n0x07ee) + compute - 0x00179d0a, // n0x07ed c0x0000 (---------------) + cn-north-1 - 0x0f2f1ec9, // n0x07ee c0x003c (n0x07ef-n0x07f0) o I amazonaws - 0x0f779d0a, // n0x07ef c0x003d (n0x07f0-n0x07f1) o I cn-north-1 - 0x0002ba42, // n0x07f0 c0x0000 (---------------) + s3 - 0x0024b944, // n0x07f1 c0x0000 (---------------) + I arts - 0x0fe33243, // n0x07f2 c0x003f (n0x07fe-n0x07ff) + I com - 0x00239103, // n0x07f3 c0x0000 (---------------) + I edu - 0x0024dcc4, // n0x07f4 c0x0000 (---------------) + I firm - 0x0027d903, // n0x07f5 c0x0000 (---------------) + I gov - 0x00201844, // n0x07f6 c0x0000 (---------------) + I info - 0x00201503, // n0x07f7 c0x0000 (---------------) + I int - 0x00207dc3, // n0x07f8 c0x0000 (---------------) + I mil - 0x00223b43, // n0x07f9 c0x0000 (---------------) + I net - 0x00201383, // n0x07fa c0x0000 (---------------) + I nom - 0x00228743, // n0x07fb c0x0000 (---------------) + I org - 0x0022c2c3, // n0x07fc c0x0000 (---------------) + I rec - 0x0021e243, // n0x07fd c0x0000 (---------------) + I web - 0x000fe108, // n0x07fe c0x0000 (---------------) + blogspot - 0x0006c185, // n0x07ff c0x0000 (---------------) + 1kapp - 0x0010c982, // n0x0800 c0x0000 (---------------) + 4u - 0x00174cc6, // n0x0801 c0x0000 (---------------) + africa - 0x106f1ec9, // n0x0802 c0x0041 (n0x08d1-n0x08e3) o I amazonaws - 0x00008087, // n0x0803 c0x0000 (---------------) + appspot - 0x00001d42, // n0x0804 c0x0000 (---------------) + ar - 0x0019d10a, // n0x0805 c0x0000 (---------------) + betainabox - 0x0002f087, // n0x0806 c0x0000 (---------------) + blogdns - 0x000fe108, // n0x0807 c0x0000 (---------------) + blogspot - 0x0001e3c2, // n0x0808 c0x0000 (---------------) + br - 0x00138747, // n0x0809 c0x0000 (---------------) + cechire - 0x0019458f, // n0x080a c0x0000 (---------------) + cloudcontrolapp - 0x000f44cf, // n0x080b c0x0000 (---------------) + cloudcontrolled - 0x0001dac2, // n0x080c c0x0000 (---------------) + cn - 0x0000ce42, // n0x080d c0x0000 (---------------) + co - 0x0009c348, // n0x080e c0x0000 (---------------) + codespot - 0x00005582, // n0x080f c0x0000 (---------------) + de - 0x00142e48, // n0x0810 c0x0000 (---------------) + dnsalias - 0x0007cf07, // n0x0811 c0x0000 (---------------) + dnsdojo - 0x0001580b, // n0x0812 c0x0000 (---------------) + doesntexist - 0x0016a789, // n0x0813 c0x0000 (---------------) + dontexist - 0x00142d47, // n0x0814 c0x0000 (---------------) + doomdns - 0x000efc0c, // n0x0815 c0x0000 (---------------) + dreamhosters - 0x0013c907, // n0x0816 c0x0000 (---------------) + dsmynas - 0x0012394a, // n0x0817 c0x0000 (---------------) + dyn-o-saur - 0x00197108, // n0x0818 c0x0000 (---------------) + dynalias - 0x00073a8e, // n0x0819 c0x0000 (---------------) + dyndns-at-home - 0x000db90e, // n0x081a c0x0000 (---------------) + dyndns-at-work - 0x0002eecb, // n0x081b c0x0000 (---------------) + dyndns-blog - 0x000e7ecb, // n0x081c c0x0000 (---------------) + dyndns-free - 0x0001320b, // n0x081d c0x0000 (---------------) + dyndns-home - 0x00014089, // n0x081e c0x0000 (---------------) + dyndns-ip - 0x00018acb, // n0x081f c0x0000 (---------------) + dyndns-mail - 0x0001dd0d, // n0x0820 c0x0000 (---------------) + dyndns-office - 0x0002144b, // n0x0821 c0x0000 (---------------) + dyndns-pics - 0x000251cd, // n0x0822 c0x0000 (---------------) + dyndns-remote - 0x0002e54d, // n0x0823 c0x0000 (---------------) + dyndns-server - 0x0002f9ca, // n0x0824 c0x0000 (---------------) + dyndns-web - 0x0003e7cb, // n0x0825 c0x0000 (---------------) + dyndns-wiki - 0x00156ecb, // n0x0826 c0x0000 (---------------) + dyndns-work - 0x00022a90, // n0x0827 c0x0000 (---------------) + elasticbeanstalk - 0x000b7d0f, // n0x0828 c0x0000 (---------------) + est-a-la-maison - 0x00008f0f, // n0x0829 c0x0000 (---------------) + est-a-la-masion - 0x001574cd, // n0x082a c0x0000 (---------------) + est-le-patron - 0x0007b810, // n0x082b c0x0000 (---------------) + est-mon-blogueur - 0x00005382, // n0x082c c0x0000 (---------------) + eu - 0x00007d48, // n0x082d c0x0000 (---------------) + familyds - 0x0004c58b, // n0x082e c0x0000 (---------------) + firebaseapp - 0x00054ec8, // n0x082f c0x0000 (---------------) + flynnhub - 0x00062387, // n0x0830 c0x0000 (---------------) + from-ak - 0x000626c7, // n0x0831 c0x0000 (---------------) + from-al - 0x00062887, // n0x0832 c0x0000 (---------------) + from-ar - 0x00063547, // n0x0833 c0x0000 (---------------) + from-ca - 0x00064007, // n0x0834 c0x0000 (---------------) + from-ct - 0x00064607, // n0x0835 c0x0000 (---------------) + from-dc - 0x00065347, // n0x0836 c0x0000 (---------------) + from-de - 0x00065887, // n0x0837 c0x0000 (---------------) + from-fl - 0x000668c7, // n0x0838 c0x0000 (---------------) + from-ga - 0x00066c47, // n0x0839 c0x0000 (---------------) + from-hi - 0x000674c7, // n0x083a c0x0000 (---------------) + from-ia - 0x00067687, // n0x083b c0x0000 (---------------) + from-id - 0x00067847, // n0x083c c0x0000 (---------------) + from-il - 0x00067a07, // n0x083d c0x0000 (---------------) + from-in - 0x00067d07, // n0x083e c0x0000 (---------------) + from-ks - 0x00068107, // n0x083f c0x0000 (---------------) + from-ky - 0x00068cc7, // n0x0840 c0x0000 (---------------) + from-ma - 0x00069187, // n0x0841 c0x0000 (---------------) + from-md - 0x00069707, // n0x0842 c0x0000 (---------------) + from-mi - 0x0006d147, // n0x0843 c0x0000 (---------------) + from-mn - 0x0006d307, // n0x0844 c0x0000 (---------------) + from-mo - 0x0006d607, // n0x0845 c0x0000 (---------------) + from-ms - 0x0006da07, // n0x0846 c0x0000 (---------------) + from-mt - 0x0006dc07, // n0x0847 c0x0000 (---------------) + from-nc - 0x0006e907, // n0x0848 c0x0000 (---------------) + from-nd - 0x0006eac7, // n0x0849 c0x0000 (---------------) + from-ne - 0x0006eec7, // n0x084a c0x0000 (---------------) + from-nh - 0x0006f607, // n0x084b c0x0000 (---------------) + from-nj - 0x0006fac7, // n0x084c c0x0000 (---------------) + from-nm - 0x00070587, // n0x084d c0x0000 (---------------) + from-nv - 0x00070b87, // n0x084e c0x0000 (---------------) + from-oh - 0x00070e47, // n0x084f c0x0000 (---------------) + from-ok - 0x000711c7, // n0x0850 c0x0000 (---------------) + from-or - 0x00071387, // n0x0851 c0x0000 (---------------) + from-pa - 0x00071707, // n0x0852 c0x0000 (---------------) + from-pr - 0x00071d87, // n0x0853 c0x0000 (---------------) + from-ri - 0x00072207, // n0x0854 c0x0000 (---------------) + from-sc - 0x00072607, // n0x0855 c0x0000 (---------------) + from-sd - 0x00072e07, // n0x0856 c0x0000 (---------------) + from-tn - 0x00072fc7, // n0x0857 c0x0000 (---------------) + from-tx - 0x00073407, // n0x0858 c0x0000 (---------------) + from-ut - 0x00074407, // n0x0859 c0x0000 (---------------) + from-va - 0x00074a47, // n0x085a c0x0000 (---------------) + from-vt - 0x00074d47, // n0x085b c0x0000 (---------------) + from-wa - 0x00074f07, // n0x085c c0x0000 (---------------) + from-wi - 0x00075287, // n0x085d c0x0000 (---------------) + from-wv - 0x00076407, // n0x085e c0x0000 (---------------) + from-wy - 0x0000e482, // n0x085f c0x0000 (---------------) + gb - 0x000d5a87, // n0x0860 c0x0000 (---------------) + getmyip - 0x000cbb91, // n0x0861 c0x0000 (---------------) + githubusercontent - 0x000df04a, // n0x0862 c0x0000 (---------------) + googleapis - 0x0009c1ca, // n0x0863 c0x0000 (---------------) + googlecode - 0x00058186, // n0x0864 c0x0000 (---------------) + gotdns - 0x00010a4b, // n0x0865 c0x0000 (---------------) + gotpantheon - 0x00008a82, // n0x0866 c0x0000 (---------------) + gr - 0x0009cf49, // n0x0867 c0x0000 (---------------) + herokuapp - 0x00093209, // n0x0868 c0x0000 (---------------) + herokussl - 0x0000c482, // n0x0869 c0x0000 (---------------) + hk - 0x0014a8ca, // n0x086a c0x0000 (---------------) + hobby-site - 0x000a6dc9, // n0x086b c0x0000 (---------------) + homelinux - 0x000a8148, // n0x086c c0x0000 (---------------) + homeunix - 0x00024202, // n0x086d c0x0000 (---------------) + hu - 0x00119609, // n0x086e c0x0000 (---------------) + iamallama - 0x0016cf4e, // n0x086f c0x0000 (---------------) + is-a-anarchist - 0x000a550c, // n0x0870 c0x0000 (---------------) + is-a-blogger - 0x000d3b8f, // n0x0871 c0x0000 (---------------) + is-a-bookkeeper - 0x0018ac0e, // n0x0872 c0x0000 (---------------) + is-a-bulls-fan - 0x0000f08c, // n0x0873 c0x0000 (---------------) + is-a-caterer - 0x000110c9, // n0x0874 c0x0000 (---------------) + is-a-chef - 0x00014711, // n0x0875 c0x0000 (---------------) + is-a-conservative - 0x00016788, // n0x0876 c0x0000 (---------------) + is-a-cpa - 0x0001fdd2, // n0x0877 c0x0000 (---------------) + is-a-cubicle-slave - 0x000244cd, // n0x0878 c0x0000 (---------------) + is-a-democrat - 0x000280cd, // n0x0879 c0x0000 (---------------) + is-a-designer - 0x0016894b, // n0x087a c0x0000 (---------------) + is-a-doctor - 0x000d9dd5, // n0x087b c0x0000 (---------------) + is-a-financialadvisor - 0x0004ef49, // n0x087c c0x0000 (---------------) + is-a-geek - 0x0005200a, // n0x087d c0x0000 (---------------) + is-a-green - 0x00053949, // n0x087e c0x0000 (---------------) + is-a-guru - 0x0005b990, // n0x087f c0x0000 (---------------) + is-a-hard-worker - 0x0006434b, // n0x0880 c0x0000 (---------------) + is-a-hunter - 0x0006640f, // n0x0881 c0x0000 (---------------) + is-a-landscaper - 0x00071acb, // n0x0882 c0x0000 (---------------) + is-a-lawyer - 0x00071f0c, // n0x0883 c0x0000 (---------------) + is-a-liberal - 0x00074010, // n0x0884 c0x0000 (---------------) + is-a-libertarian - 0x0008080a, // n0x0885 c0x0000 (---------------) + is-a-llama - 0x0008118d, // n0x0886 c0x0000 (---------------) + is-a-musician - 0x0008b00e, // n0x0887 c0x0000 (---------------) + is-a-nascarfan - 0x00143b4a, // n0x0888 c0x0000 (---------------) + is-a-nurse - 0x0008c5cc, // n0x0889 c0x0000 (---------------) + is-a-painter - 0x000995d4, // n0x088a c0x0000 (---------------) + is-a-personaltrainer - 0x0009cbd1, // n0x088b c0x0000 (---------------) + is-a-photographer - 0x0009ff8b, // n0x088c c0x0000 (---------------) + is-a-player - 0x000a0d0f, // n0x088d c0x0000 (---------------) + is-a-republican - 0x000a270d, // n0x088e c0x0000 (---------------) + is-a-rockstar - 0x000a4e8e, // n0x088f c0x0000 (---------------) + is-a-socialist - 0x000ac80c, // n0x0890 c0x0000 (---------------) + is-a-student - 0x000acf4c, // n0x0891 c0x0000 (---------------) + is-a-teacher - 0x000d004b, // n0x0892 c0x0000 (---------------) + is-a-techie - 0x000d034e, // n0x0893 c0x0000 (---------------) + is-a-therapist - 0x000cf950, // n0x0894 c0x0000 (---------------) + is-an-accountant - 0x000bbf0b, // n0x0895 c0x0000 (---------------) + is-an-actor - 0x000cf44d, // n0x0896 c0x0000 (---------------) + is-an-actress - 0x0014b44f, // n0x0897 c0x0000 (---------------) + is-an-anarchist - 0x0013ba8c, // n0x0898 c0x0000 (---------------) + is-an-artist - 0x0010c34e, // n0x0899 c0x0000 (---------------) + is-an-engineer - 0x00169751, // n0x089a c0x0000 (---------------) + is-an-entertainer - 0x000b47cc, // n0x089b c0x0000 (---------------) + is-certified - 0x000ba5c7, // n0x089c c0x0000 (---------------) + is-gone - 0x000bb94d, // n0x089d c0x0000 (---------------) + is-into-anime - 0x000bd18c, // n0x089e c0x0000 (---------------) + is-into-cars - 0x000fc8d0, // n0x089f c0x0000 (---------------) + is-into-cartoons - 0x00145b8d, // n0x08a0 c0x0000 (---------------) + is-into-games - 0x0016f007, // n0x08a1 c0x0000 (---------------) + is-leet - 0x00173bd0, // n0x08a2 c0x0000 (---------------) + is-not-certified - 0x000f2bc8, // n0x08a3 c0x0000 (---------------) + is-slick - 0x000e984b, // n0x08a4 c0x0000 (---------------) + is-uberleet - 0x001429cf, // n0x08a5 c0x0000 (---------------) + is-with-theband - 0x000862c8, // n0x08a6 c0x0000 (---------------) + isa-geek - 0x000df24d, // n0x08a7 c0x0000 (---------------) + isa-hockeynut - 0x0014dcd0, // n0x08a8 c0x0000 (---------------) + issmarterthanyou - 0x000b0a83, // n0x08a9 c0x0000 (---------------) + jpn - 0x0000bdc2, // n0x08aa c0x0000 (---------------) + kr - 0x00058c89, // n0x08ab c0x0000 (---------------) + likes-pie - 0x0007388a, // n0x08ac c0x0000 (---------------) + likescandy - 0x00000983, // n0x08ad c0x0000 (---------------) + mex - 0x0010d047, // n0x08ae c0x0000 (---------------) + mydrobo - 0x00119d48, // n0x08af c0x0000 (---------------) + neat-url - 0x0016c0c7, // n0x08b0 c0x0000 (---------------) + nfshost - 0x00001382, // n0x08b1 c0x0000 (---------------) + no - 0x00064bca, // n0x08b2 c0x0000 (---------------) + operaunite - 0x0019430f, // n0x08b3 c0x0000 (---------------) + outsystemscloud - 0x0012690c, // n0x08b4 c0x0000 (---------------) + pagefrontapp - 0x00126bd2, // n0x08b5 c0x0000 (---------------) + pagespeedmobilizer - 0x116e1185, // n0x08b6 c0x0045 (n0x08ef-n0x08f0) o I prgmr - 0x00116bc3, // n0x08b7 c0x0000 (---------------) + qa2 - 0x0017bf42, // n0x08b8 c0x0000 (---------------) + qc - 0x00127bc8, // n0x08b9 c0x0000 (---------------) + rackmaze - 0x000f4447, // n0x08ba c0x0000 (---------------) + rhcloud - 0x000020c2, // n0x08bb c0x0000 (---------------) + ro - 0x0000fe82, // n0x08bc c0x0000 (---------------) + ru - 0x00001002, // n0x08bd c0x0000 (---------------) + sa - 0x00187d50, // n0x08be c0x0000 (---------------) + saves-the-whales - 0x00004ec2, // n0x08bf c0x0000 (---------------) + se - 0x0005abc6, // n0x08c0 c0x0000 (---------------) + selfip - 0x0004d00e, // n0x08c1 c0x0000 (---------------) + sells-for-less - 0x0008e5cb, // n0x08c2 c0x0000 (---------------) + sells-for-u - 0x000ccd88, // n0x08c3 c0x0000 (---------------) + servebbs - 0x000ca1ca, // n0x08c4 c0x0000 (---------------) + simple-url - 0x000f6a87, // n0x08c5 c0x0000 (---------------) + sinaapp - 0x0000aa8d, // n0x08c6 c0x0000 (---------------) + space-to-rent - 0x00152a4c, // n0x08c7 c0x0000 (---------------) + teaches-yoga - 0x00001b02, // n0x08c8 c0x0000 (---------------) + uk - 0x00002242, // n0x08c9 c0x0000 (---------------) + us - 0x000041c2, // n0x08ca c0x0000 (---------------) + uy - 0x000f69ca, // n0x08cb c0x0000 (---------------) + vipsinaapp - 0x000def4a, // n0x08cc c0x0000 (---------------) + withgoogle - 0x000e548b, // n0x08cd c0x0000 (---------------) + withyoutube - 0x000fde8e, // n0x08ce c0x0000 (---------------) + writesthisblog - 0x000d80c8, // n0x08cf c0x0000 (---------------) + yolasite - 0x00000182, // n0x08d0 c0x0000 (---------------) + za - 0x108358c7, // n0x08d1 c0x0042 (n0x08e3-n0x08ec) + compute - 0x10c358c9, // n0x08d2 c0x0043 (n0x08ec-n0x08ee) + compute-1 - 0x000123c3, // n0x08d3 c0x0000 (---------------) + elb - 0x1122bb0c, // n0x08d4 c0x0044 (n0x08ee-n0x08ef) o I eu-central-1 - 0x0002ba42, // n0x08d5 c0x0000 (---------------) + s3 - 0x0006cd11, // n0x08d6 c0x0000 (---------------) + s3-ap-northeast-1 - 0x0006bd91, // n0x08d7 c0x0000 (---------------) + s3-ap-southeast-1 - 0x000f5791, // n0x08d8 c0x0000 (---------------) + s3-ap-southeast-2 - 0x0002ba4f, // n0x08d9 c0x0000 (---------------) + s3-eu-central-1 - 0x00055f4c, // n0x08da c0x0000 (---------------) + s3-eu-west-1 - 0x000dd0cd, // n0x08db c0x0000 (---------------) + s3-external-1 - 0x0011358d, // n0x08dc c0x0000 (---------------) + s3-external-2 - 0x00121cd5, // n0x08dd c0x0000 (---------------) + s3-fips-us-gov-west-1 - 0x00123d0c, // n0x08de c0x0000 (---------------) + s3-sa-east-1 - 0x0016c590, // n0x08df c0x0000 (---------------) + s3-us-gov-west-1 - 0x000e3acc, // n0x08e0 c0x0000 (---------------) + s3-us-west-1 - 0x000c330c, // n0x08e1 c0x0000 (---------------) + s3-us-west-2 - 0x0003ef09, // n0x08e2 c0x0000 (---------------) + us-east-1 - 0x0006cdce, // n0x08e3 c0x0000 (---------------) + ap-northeast-1 - 0x0006be4e, // n0x08e4 c0x0000 (---------------) + ap-southeast-1 - 0x000f584e, // n0x08e5 c0x0000 (---------------) + ap-southeast-2 - 0x0002bb0c, // n0x08e6 c0x0000 (---------------) + eu-central-1 - 0x00056009, // n0x08e7 c0x0000 (---------------) + eu-west-1 - 0x00123dc9, // n0x08e8 c0x0000 (---------------) + sa-east-1 - 0x00121ecd, // n0x08e9 c0x0000 (---------------) + us-gov-west-1 - 0x000e3b89, // n0x08ea c0x0000 (---------------) + us-west-1 - 0x000c33c9, // n0x08eb c0x0000 (---------------) + us-west-2 - 0x00155a83, // n0x08ec c0x0000 (---------------) + z-1 - 0x0013e483, // n0x08ed c0x0000 (---------------) + z-2 - 0x0002ba42, // n0x08ee c0x0000 (---------------) + s3 - 0x000196c3, // n0x08ef c0x0000 (---------------) + xen - 0x00200342, // n0x08f0 c0x0000 (---------------) + I ac - 0x0020ce42, // n0x08f1 c0x0000 (---------------) + I co - 0x002024c2, // n0x08f2 c0x0000 (---------------) + I ed - 0x00201702, // n0x08f3 c0x0000 (---------------) + I fi - 0x00210a42, // n0x08f4 c0x0000 (---------------) + I go - 0x00200dc2, // n0x08f5 c0x0000 (---------------) + I or - 0x00201002, // n0x08f6 c0x0000 (---------------) + I sa - 0x00233243, // n0x08f7 c0x0000 (---------------) + I com - 0x00239103, // n0x08f8 c0x0000 (---------------) + I edu - 0x0027d903, // n0x08f9 c0x0000 (---------------) + I gov - 0x00201683, // n0x08fa c0x0000 (---------------) + I inf - 0x00223b43, // n0x08fb c0x0000 (---------------) + I net - 0x00228743, // n0x08fc c0x0000 (---------------) + I org - 0x000fe108, // n0x08fd c0x0000 (---------------) + blogspot - 0x00233243, // n0x08fe c0x0000 (---------------) + I com - 0x00239103, // n0x08ff c0x0000 (---------------) + I edu - 0x00223b43, // n0x0900 c0x0000 (---------------) + I net - 0x00228743, // n0x0901 c0x0000 (---------------) + I org - 0x0003cfc3, // n0x0902 c0x0000 (---------------) + ath - 0x0027d903, // n0x0903 c0x0000 (---------------) + I gov - 0x00200342, // n0x0904 c0x0000 (---------------) + I ac - 0x00331a83, // n0x0905 c0x0000 (---------------) + I biz - 0x13233243, // n0x0906 c0x004c (n0x0911-n0x0912) + I com - 0x00279fc7, // n0x0907 c0x0000 (---------------) + I ekloges - 0x0027d903, // n0x0908 c0x0000 (---------------) + I gov - 0x00312883, // n0x0909 c0x0000 (---------------) + I ltd - 0x00200904, // n0x090a c0x0000 (---------------) + I name - 0x00223b43, // n0x090b c0x0000 (---------------) + I net - 0x00228743, // n0x090c c0x0000 (---------------) + I org - 0x0028458a, // n0x090d c0x0000 (---------------) + I parliament - 0x00246a05, // n0x090e c0x0000 (---------------) + I press - 0x00224b03, // n0x090f c0x0000 (---------------) + I pro - 0x00200c82, // n0x0910 c0x0000 (---------------) + I tm - 0x000fe108, // n0x0911 c0x0000 (---------------) + blogspot - 0x000fe108, // n0x0912 c0x0000 (---------------) + blogspot - 0x0000ce42, // n0x0913 c0x0000 (---------------) + co - 0x000fe108, // n0x0914 c0x0000 (---------------) + blogspot - 0x00033243, // n0x0915 c0x0000 (---------------) + com - 0x000fce0f, // n0x0916 c0x0000 (---------------) + fuettertdasnetz - 0x0016a90a, // n0x0917 c0x0000 (---------------) + isteingeek - 0x000a5147, // n0x0918 c0x0000 (---------------) + istmein - 0x000239ca, // n0x0919 c0x0000 (---------------) + lebtimnetz - 0x00097c0a, // n0x091a c0x0000 (---------------) + leitungsen - 0x000052cd, // n0x091b c0x0000 (---------------) + traeumtgerade - 0x000fe108, // n0x091c c0x0000 (---------------) + blogspot - 0x00233243, // n0x091d c0x0000 (---------------) + I com - 0x00239103, // n0x091e c0x0000 (---------------) + I edu - 0x0027d903, // n0x091f c0x0000 (---------------) + I gov - 0x00223b43, // n0x0920 c0x0000 (---------------) + I net - 0x00228743, // n0x0921 c0x0000 (---------------) + I org - 0x00201d43, // n0x0922 c0x0000 (---------------) + I art - 0x00233243, // n0x0923 c0x0000 (---------------) + I com - 0x00239103, // n0x0924 c0x0000 (---------------) + I edu - 0x00212b03, // n0x0925 c0x0000 (---------------) + I gob - 0x0027d903, // n0x0926 c0x0000 (---------------) + I gov - 0x00207dc3, // n0x0927 c0x0000 (---------------) + I mil - 0x00223b43, // n0x0928 c0x0000 (---------------) + I net - 0x00228743, // n0x0929 c0x0000 (---------------) + I org - 0x002933c3, // n0x092a c0x0000 (---------------) + I sld - 0x0021e243, // n0x092b c0x0000 (---------------) + I web - 0x00201d43, // n0x092c c0x0000 (---------------) + I art - 0x002d5e84, // n0x092d c0x0000 (---------------) + I asso - 0x00233243, // n0x092e c0x0000 (---------------) + I com - 0x00239103, // n0x092f c0x0000 (---------------) + I edu - 0x0027d903, // n0x0930 c0x0000 (---------------) + I gov - 0x00223b43, // n0x0931 c0x0000 (---------------) + I net - 0x00228743, // n0x0932 c0x0000 (---------------) + I org - 0x00206ec3, // n0x0933 c0x0000 (---------------) + I pol - 0x00233243, // n0x0934 c0x0000 (---------------) + I com - 0x00239103, // n0x0935 c0x0000 (---------------) + I edu - 0x00201703, // n0x0936 c0x0000 (---------------) + I fin - 0x00212b03, // n0x0937 c0x0000 (---------------) + I gob - 0x0027d903, // n0x0938 c0x0000 (---------------) + I gov - 0x00201844, // n0x0939 c0x0000 (---------------) + I info - 0x00332603, // n0x093a c0x0000 (---------------) + I k12 - 0x00213443, // n0x093b c0x0000 (---------------) + I med - 0x00207dc3, // n0x093c c0x0000 (---------------) + I mil - 0x00223b43, // n0x093d c0x0000 (---------------) + I net - 0x00228743, // n0x093e c0x0000 (---------------) + I org - 0x00224b03, // n0x093f c0x0000 (---------------) + I pro - 0x00200503, // n0x0940 c0x0000 (---------------) + I aip - 0x15633243, // n0x0941 c0x0055 (n0x094a-n0x094b) + I com - 0x00239103, // n0x0942 c0x0000 (---------------) + I edu - 0x002b49c3, // n0x0943 c0x0000 (---------------) + I fie - 0x0027d903, // n0x0944 c0x0000 (---------------) + I gov - 0x00272043, // n0x0945 c0x0000 (---------------) + I lib - 0x00213443, // n0x0946 c0x0000 (---------------) + I med - 0x00228743, // n0x0947 c0x0000 (---------------) + I org - 0x00204e03, // n0x0948 c0x0000 (---------------) + I pri - 0x0030a104, // n0x0949 c0x0000 (---------------) + I riik - 0x000fe108, // n0x094a c0x0000 (---------------) + blogspot - 0x15e33243, // n0x094b c0x0057 (n0x0954-n0x0955) + I com - 0x00239103, // n0x094c c0x0000 (---------------) + I edu - 0x002a8203, // n0x094d c0x0000 (---------------) + I eun - 0x0027d903, // n0x094e c0x0000 (---------------) + I gov - 0x00207dc3, // n0x094f c0x0000 (---------------) + I mil - 0x00200904, // n0x0950 c0x0000 (---------------) + I name - 0x00223b43, // n0x0951 c0x0000 (---------------) + I net - 0x00228743, // n0x0952 c0x0000 (---------------) + I org - 0x00221983, // n0x0953 c0x0000 (---------------) + I sci - 0x000fe108, // n0x0954 c0x0000 (---------------) + blogspot - 0x16633243, // n0x0955 c0x0059 (n0x095a-n0x095b) + I com - 0x00239103, // n0x0956 c0x0000 (---------------) + I edu - 0x00212b03, // n0x0957 c0x0000 (---------------) + I gob - 0x00201383, // n0x0958 c0x0000 (---------------) + I nom - 0x00228743, // n0x0959 c0x0000 (---------------) + I org - 0x000fe108, // n0x095a c0x0000 (---------------) + blogspot - 0x00331a83, // n0x095b c0x0000 (---------------) + I biz - 0x00233243, // n0x095c c0x0000 (---------------) + I com - 0x00239103, // n0x095d c0x0000 (---------------) + I edu - 0x0027d903, // n0x095e c0x0000 (---------------) + I gov - 0x00201844, // n0x095f c0x0000 (---------------) + I info - 0x00200904, // n0x0960 c0x0000 (---------------) + I name - 0x00223b43, // n0x0961 c0x0000 (---------------) + I net - 0x00228743, // n0x0962 c0x0000 (---------------) + I org - 0x00321085, // n0x0963 c0x0000 (---------------) + I aland - 0x000fe108, // n0x0964 c0x0000 (---------------) + blogspot - 0x0003a783, // n0x0965 c0x0000 (---------------) + iki - 0x003274c8, // n0x0966 c0x0000 (---------------) + I aeroport - 0x0034f007, // n0x0967 c0x0000 (---------------) + I assedic - 0x002d5e84, // n0x0968 c0x0000 (---------------) + I asso - 0x00353c06, // n0x0969 c0x0000 (---------------) + I avocat - 0x0036c446, // n0x096a c0x0000 (---------------) + I avoues - 0x000fe108, // n0x096b c0x0000 (---------------) + blogspot - 0x00234f03, // n0x096c c0x0000 (---------------) + I cci - 0x00208909, // n0x096d c0x0000 (---------------) + I chambagri - 0x002b3c95, // n0x096e c0x0000 (---------------) + I chirurgiens-dentistes - 0x00233243, // n0x096f c0x0000 (---------------) + I com - 0x0031f2d2, // n0x0970 c0x0000 (---------------) + I experts-comptables - 0x0031f08f, // n0x0971 c0x0000 (---------------) + I geometre-expert - 0x002aebc4, // n0x0972 c0x0000 (---------------) + I gouv - 0x0021a045, // n0x0973 c0x0000 (---------------) + I greta - 0x002eec50, // n0x0974 c0x0000 (---------------) + I huissier-justice - 0x00378d47, // n0x0975 c0x0000 (---------------) + I medecin - 0x00201383, // n0x0976 c0x0000 (---------------) + I nom - 0x0035f0c8, // n0x0977 c0x0000 (---------------) + I notaires - 0x0034af0a, // n0x0978 c0x0000 (---------------) + I pharmacien - 0x00245144, // n0x0979 c0x0000 (---------------) + I port - 0x002e0bc3, // n0x097a c0x0000 (---------------) + I prd - 0x00246a06, // n0x097b c0x0000 (---------------) + I presse - 0x00200c82, // n0x097c c0x0000 (---------------) + I tm - 0x002d32cb, // n0x097d c0x0000 (---------------) + I veterinaire - 0x00233243, // n0x097e c0x0000 (---------------) + I com - 0x00239103, // n0x097f c0x0000 (---------------) + I edu - 0x0027d903, // n0x0980 c0x0000 (---------------) + I gov - 0x00207dc3, // n0x0981 c0x0000 (---------------) + I mil - 0x00223b43, // n0x0982 c0x0000 (---------------) + I net - 0x00228743, // n0x0983 c0x0000 (---------------) + I org - 0x002e62c3, // n0x0984 c0x0000 (---------------) + I pvt - 0x0020ce42, // n0x0985 c0x0000 (---------------) + I co - 0x00223b43, // n0x0986 c0x0000 (---------------) + I net - 0x00228743, // n0x0987 c0x0000 (---------------) + I org - 0x00233243, // n0x0988 c0x0000 (---------------) + I com - 0x00239103, // n0x0989 c0x0000 (---------------) + I edu - 0x0027d903, // n0x098a c0x0000 (---------------) + I gov - 0x00207dc3, // n0x098b c0x0000 (---------------) + I mil - 0x00228743, // n0x098c c0x0000 (---------------) + I org - 0x00233243, // n0x098d c0x0000 (---------------) + I com - 0x00239103, // n0x098e c0x0000 (---------------) + I edu - 0x0027d903, // n0x098f c0x0000 (---------------) + I gov - 0x00312883, // n0x0990 c0x0000 (---------------) + I ltd - 0x002177c3, // n0x0991 c0x0000 (---------------) + I mod - 0x00228743, // n0x0992 c0x0000 (---------------) + I org - 0x0020ce42, // n0x0993 c0x0000 (---------------) + I co - 0x00233243, // n0x0994 c0x0000 (---------------) + I com - 0x00239103, // n0x0995 c0x0000 (---------------) + I edu - 0x00223b43, // n0x0996 c0x0000 (---------------) + I net - 0x00228743, // n0x0997 c0x0000 (---------------) + I org - 0x00200342, // n0x0998 c0x0000 (---------------) + I ac - 0x00233243, // n0x0999 c0x0000 (---------------) + I com - 0x00239103, // n0x099a c0x0000 (---------------) + I edu - 0x0027d903, // n0x099b c0x0000 (---------------) + I gov - 0x00223b43, // n0x099c c0x0000 (---------------) + I net - 0x00228743, // n0x099d c0x0000 (---------------) + I org - 0x002d5e84, // n0x099e c0x0000 (---------------) + I asso - 0x00233243, // n0x099f c0x0000 (---------------) + I com - 0x00239103, // n0x09a0 c0x0000 (---------------) + I edu - 0x0020bf04, // n0x09a1 c0x0000 (---------------) + I mobi - 0x00223b43, // n0x09a2 c0x0000 (---------------) + I net - 0x00228743, // n0x09a3 c0x0000 (---------------) + I org - 0x000fe108, // n0x09a4 c0x0000 (---------------) + blogspot - 0x00233243, // n0x09a5 c0x0000 (---------------) + I com - 0x00239103, // n0x09a6 c0x0000 (---------------) + I edu - 0x0027d903, // n0x09a7 c0x0000 (---------------) + I gov - 0x00223b43, // n0x09a8 c0x0000 (---------------) + I net - 0x00228743, // n0x09a9 c0x0000 (---------------) + I org - 0x00233243, // n0x09aa c0x0000 (---------------) + I com - 0x00239103, // n0x09ab c0x0000 (---------------) + I edu - 0x00212b03, // n0x09ac c0x0000 (---------------) + I gob - 0x00221b03, // n0x09ad c0x0000 (---------------) + I ind - 0x00207dc3, // n0x09ae c0x0000 (---------------) + I mil - 0x00223b43, // n0x09af c0x0000 (---------------) + I net - 0x00228743, // n0x09b0 c0x0000 (---------------) + I org - 0x0020ce42, // n0x09b1 c0x0000 (---------------) + I co - 0x00233243, // n0x09b2 c0x0000 (---------------) + I com - 0x00239103, // n0x09b3 c0x0000 (---------------) + I edu - 0x0027d903, // n0x09b4 c0x0000 (---------------) + I gov - 0x00223b43, // n0x09b5 c0x0000 (---------------) + I net - 0x00228743, // n0x09b6 c0x0000 (---------------) + I org - 0x000fe108, // n0x09b7 c0x0000 (---------------) + blogspot - 0x00233243, // n0x09b8 c0x0000 (---------------) + I com - 0x00239103, // n0x09b9 c0x0000 (---------------) + I edu - 0x0027d903, // n0x09ba c0x0000 (---------------) + I gov - 0x0020de03, // n0x09bb c0x0000 (---------------) + I idv - 0x0002fd03, // n0x09bc c0x0000 (---------------) + inc - 0x00112883, // n0x09bd c0x0000 (---------------) + ltd - 0x00223b43, // n0x09be c0x0000 (---------------) + I net - 0x00228743, // n0x09bf c0x0000 (---------------) + I org - 0x0030394a, // n0x09c0 c0x0000 (---------------) + I xn--55qx5d - 0x0031cf89, // n0x09c1 c0x0000 (---------------) + I xn--ciqpn - 0x0033a84b, // n0x09c2 c0x0000 (---------------) + I xn--gmq050i - 0x0033b18a, // n0x09c3 c0x0000 (---------------) + I xn--gmqw5a - 0x0034278a, // n0x09c4 c0x0000 (---------------) + I xn--io0a7i - 0x00350d0b, // n0x09c5 c0x0000 (---------------) + I xn--lcvr32d - 0x0036718a, // n0x09c6 c0x0000 (---------------) + I xn--mk0axi - 0x0036eb8a, // n0x09c7 c0x0000 (---------------) + I xn--mxtq1m - 0x0037530a, // n0x09c8 c0x0000 (---------------) + I xn--od0alg - 0x0037558b, // n0x09c9 c0x0000 (---------------) + I xn--od0aq3b - 0x00391bc9, // n0x09ca c0x0000 (---------------) + I xn--tn0ag - 0x0039378a, // n0x09cb c0x0000 (---------------) + I xn--uc0atv - 0x00393ccb, // n0x09cc c0x0000 (---------------) + I xn--uc0ay4a - 0x0039e40b, // n0x09cd c0x0000 (---------------) + I xn--wcvs22d - 0x003a478a, // n0x09ce c0x0000 (---------------) + I xn--zf0avx - 0x00233243, // n0x09cf c0x0000 (---------------) + I com - 0x00239103, // n0x09d0 c0x0000 (---------------) + I edu - 0x00212b03, // n0x09d1 c0x0000 (---------------) + I gob - 0x00207dc3, // n0x09d2 c0x0000 (---------------) + I mil - 0x00223b43, // n0x09d3 c0x0000 (---------------) + I net - 0x00228743, // n0x09d4 c0x0000 (---------------) + I org - 0x000fe108, // n0x09d5 c0x0000 (---------------) + blogspot - 0x00233243, // n0x09d6 c0x0000 (---------------) + I com - 0x00262384, // n0x09d7 c0x0000 (---------------) + I from - 0x00214502, // n0x09d8 c0x0000 (---------------) + I iz - 0x00200904, // n0x09d9 c0x0000 (---------------) + I name - 0x002a3105, // n0x09da c0x0000 (---------------) + I adult - 0x00201d43, // n0x09db c0x0000 (---------------) + I art - 0x002d5e84, // n0x09dc c0x0000 (---------------) + I asso - 0x00233243, // n0x09dd c0x0000 (---------------) + I com - 0x0023c344, // n0x09de c0x0000 (---------------) + I coop - 0x00239103, // n0x09df c0x0000 (---------------) + I edu - 0x0024dcc4, // n0x09e0 c0x0000 (---------------) + I firm - 0x002aebc4, // n0x09e1 c0x0000 (---------------) + I gouv - 0x00201844, // n0x09e2 c0x0000 (---------------) + I info - 0x00213443, // n0x09e3 c0x0000 (---------------) + I med - 0x00223b43, // n0x09e4 c0x0000 (---------------) + I net - 0x00228743, // n0x09e5 c0x0000 (---------------) + I org - 0x00299705, // n0x09e6 c0x0000 (---------------) + I perso - 0x00206ec3, // n0x09e7 c0x0000 (---------------) + I pol - 0x00224b03, // n0x09e8 c0x0000 (---------------) + I pro - 0x00286903, // n0x09e9 c0x0000 (---------------) + I rel - 0x00357184, // n0x09ea c0x0000 (---------------) + I shop - 0x00329cc4, // n0x09eb c0x0000 (---------------) + I 2000 - 0x00251805, // n0x09ec c0x0000 (---------------) + I agrar - 0x000fe108, // n0x09ed c0x0000 (---------------) + blogspot - 0x002f8204, // n0x09ee c0x0000 (---------------) + I bolt - 0x00356846, // n0x09ef c0x0000 (---------------) + I casino - 0x00285804, // n0x09f0 c0x0000 (---------------) + I city - 0x0020ce42, // n0x09f1 c0x0000 (---------------) + I co - 0x00336ac7, // n0x09f2 c0x0000 (---------------) + I erotica - 0x002505c7, // n0x09f3 c0x0000 (---------------) + I erotika - 0x0024b184, // n0x09f4 c0x0000 (---------------) + I film - 0x0025aec5, // n0x09f5 c0x0000 (---------------) + I forum - 0x00345d85, // n0x09f6 c0x0000 (---------------) + I games - 0x00234305, // n0x09f7 c0x0000 (---------------) + I hotel - 0x00201844, // n0x09f8 c0x0000 (---------------) + I info - 0x00227c88, // n0x09f9 c0x0000 (---------------) + I ingatlan - 0x00294b06, // n0x09fa c0x0000 (---------------) + I jogasz - 0x002d20c8, // n0x09fb c0x0000 (---------------) + I konyvelo - 0x0023bb85, // n0x09fc c0x0000 (---------------) + I lakas - 0x00303545, // n0x09fd c0x0000 (---------------) + I media - 0x0021e604, // n0x09fe c0x0000 (---------------) + I news - 0x00228743, // n0x09ff c0x0000 (---------------) + I org - 0x002e17c4, // n0x0a00 c0x0000 (---------------) + I priv - 0x0034fec6, // n0x0a01 c0x0000 (---------------) + I reklam - 0x00246b03, // n0x0a02 c0x0000 (---------------) + I sex - 0x00357184, // n0x0a03 c0x0000 (---------------) + I shop - 0x0029c605, // n0x0a04 c0x0000 (---------------) + I sport - 0x0023a984, // n0x0a05 c0x0000 (---------------) + I suli - 0x0020a8c4, // n0x0a06 c0x0000 (---------------) + I szex - 0x00200c82, // n0x0a07 c0x0000 (---------------) + I tm - 0x00274bc6, // n0x0a08 c0x0000 (---------------) + I tozsde - 0x00387586, // n0x0a09 c0x0000 (---------------) + I utazas - 0x002f1145, // n0x0a0a c0x0000 (---------------) + I video - 0x00200342, // n0x0a0b c0x0000 (---------------) + I ac - 0x00331a83, // n0x0a0c c0x0000 (---------------) + I biz - 0x1b60ce42, // n0x0a0d c0x006d (n0x0a16-n0x0a17) + I co - 0x0023a484, // n0x0a0e c0x0000 (---------------) + I desa - 0x00210a42, // n0x0a0f c0x0000 (---------------) + I go - 0x00207dc3, // n0x0a10 c0x0000 (---------------) + I mil - 0x00225742, // n0x0a11 c0x0000 (---------------) + I my - 0x00223b43, // n0x0a12 c0x0000 (---------------) + I net - 0x00200dc2, // n0x0a13 c0x0000 (---------------) + I or - 0x00217283, // n0x0a14 c0x0000 (---------------) + I sch - 0x0021e243, // n0x0a15 c0x0000 (---------------) + I web - 0x000fe108, // n0x0a16 c0x0000 (---------------) + blogspot - 0x000fe108, // n0x0a17 c0x0000 (---------------) + blogspot - 0x0027d903, // n0x0a18 c0x0000 (---------------) + I gov - 0x00200342, // n0x0a19 c0x0000 (---------------) + I ac - 0x1c20ce42, // n0x0a1a c0x0070 (n0x0a21-n0x0a22) + I co - 0x0027d903, // n0x0a1b c0x0000 (---------------) + I gov - 0x002677c3, // n0x0a1c c0x0000 (---------------) + I idf - 0x00332603, // n0x0a1d c0x0000 (---------------) + I k12 - 0x0022d004, // n0x0a1e c0x0000 (---------------) + I muni - 0x00223b43, // n0x0a1f c0x0000 (---------------) + I net - 0x00228743, // n0x0a20 c0x0000 (---------------) + I org - 0x000fe108, // n0x0a21 c0x0000 (---------------) + blogspot - 0x00200342, // n0x0a22 c0x0000 (---------------) + I ac - 0x1ca0ce42, // n0x0a23 c0x0072 (n0x0a29-n0x0a2b) + I co - 0x00233243, // n0x0a24 c0x0000 (---------------) + I com - 0x00223b43, // n0x0a25 c0x0000 (---------------) + I net - 0x00228743, // n0x0a26 c0x0000 (---------------) + I org - 0x0020fac2, // n0x0a27 c0x0000 (---------------) + I tt - 0x002203c2, // n0x0a28 c0x0000 (---------------) + I tv - 0x00312883, // n0x0a29 c0x0000 (---------------) + I ltd - 0x002d96c3, // n0x0a2a c0x0000 (---------------) + I plc - 0x00200342, // n0x0a2b c0x0000 (---------------) + I ac - 0x000fe108, // n0x0a2c c0x0000 (---------------) + blogspot - 0x0020ce42, // n0x0a2d c0x0000 (---------------) + I co - 0x00239103, // n0x0a2e c0x0000 (---------------) + I edu - 0x0024dcc4, // n0x0a2f c0x0000 (---------------) + I firm - 0x002060c3, // n0x0a30 c0x0000 (---------------) + I gen - 0x0027d903, // n0x0a31 c0x0000 (---------------) + I gov - 0x00221b03, // n0x0a32 c0x0000 (---------------) + I ind - 0x00207dc3, // n0x0a33 c0x0000 (---------------) + I mil - 0x00223b43, // n0x0a34 c0x0000 (---------------) + I net - 0x0021b843, // n0x0a35 c0x0000 (---------------) + I nic - 0x00228743, // n0x0a36 c0x0000 (---------------) + I org - 0x00221903, // n0x0a37 c0x0000 (---------------) + I res - 0x0011ec53, // n0x0a38 c0x0000 (---------------) + barrel-of-knowledge - 0x00122f54, // n0x0a39 c0x0000 (---------------) + barrell-of-knowledge - 0x00013206, // n0x0a3a c0x0000 (---------------) + dyndns - 0x00056d07, // n0x0a3b c0x0000 (---------------) + for-our - 0x00152f89, // n0x0a3c c0x0000 (---------------) + groks-the - 0x000f4c0a, // n0x0a3d c0x0000 (---------------) + groks-this - 0x0008a30d, // n0x0a3e c0x0000 (---------------) + here-for-more - 0x0003dd0a, // n0x0a3f c0x0000 (---------------) + knowsitall - 0x0005abc6, // n0x0a40 c0x0000 (---------------) + selfip - 0x001267c6, // n0x0a41 c0x0000 (---------------) + webhop - 0x00205382, // n0x0a42 c0x0000 (---------------) + I eu - 0x00233243, // n0x0a43 c0x0000 (---------------) + I com - 0x000cbb86, // n0x0a44 c0x0000 (---------------) + github - 0x00152f45, // n0x0a45 c0x0000 (---------------) + ngrok - 0x0000ddc3, // n0x0a46 c0x0000 (---------------) + nid - 0x00010b08, // n0x0a47 c0x0000 (---------------) + pantheon - 0x000b1c48, // n0x0a48 c0x0000 (---------------) + sandcats - 0x00233243, // n0x0a49 c0x0000 (---------------) + I com - 0x00239103, // n0x0a4a c0x0000 (---------------) + I edu - 0x0027d903, // n0x0a4b c0x0000 (---------------) + I gov - 0x00207dc3, // n0x0a4c c0x0000 (---------------) + I mil - 0x00223b43, // n0x0a4d c0x0000 (---------------) + I net - 0x00228743, // n0x0a4e c0x0000 (---------------) + I org - 0x00200342, // n0x0a4f c0x0000 (---------------) + I ac - 0x0020ce42, // n0x0a50 c0x0000 (---------------) + I co - 0x0027d903, // n0x0a51 c0x0000 (---------------) + I gov - 0x0020d9c2, // n0x0a52 c0x0000 (---------------) + I id - 0x00223b43, // n0x0a53 c0x0000 (---------------) + I net - 0x00228743, // n0x0a54 c0x0000 (---------------) + I org - 0x00217283, // n0x0a55 c0x0000 (---------------) + I sch - 0x0035cf4f, // n0x0a56 c0x0000 (---------------) + I xn--mgba3a4f16a - 0x0035d30e, // n0x0a57 c0x0000 (---------------) + I xn--mgba3a4fra - 0x000fe108, // n0x0a58 c0x0000 (---------------) + blogspot - 0x00233243, // n0x0a59 c0x0000 (---------------) + I com - 0x00047607, // n0x0a5a c0x0000 (---------------) + cupcake - 0x00239103, // n0x0a5b c0x0000 (---------------) + I edu - 0x0027d903, // n0x0a5c c0x0000 (---------------) + I gov - 0x00201503, // n0x0a5d c0x0000 (---------------) + I int - 0x00223b43, // n0x0a5e c0x0000 (---------------) + I net - 0x00228743, // n0x0a5f c0x0000 (---------------) + I org - 0x00221883, // n0x0a60 c0x0000 (---------------) + I abr - 0x00328e47, // n0x0a61 c0x0000 (---------------) + I abruzzo - 0x00201b82, // n0x0a62 c0x0000 (---------------) + I ag - 0x003018c9, // n0x0a63 c0x0000 (---------------) + I agrigento - 0x00200d02, // n0x0a64 c0x0000 (---------------) + I al - 0x0038804b, // n0x0a65 c0x0000 (---------------) + I alessandria - 0x002e414a, // n0x0a66 c0x0000 (---------------) + I alto-adige - 0x002d2549, // n0x0a67 c0x0000 (---------------) + I altoadige - 0x002008c2, // n0x0a68 c0x0000 (---------------) + I an - 0x0034ea86, // n0x0a69 c0x0000 (---------------) + I ancona - 0x00291e55, // n0x0a6a c0x0000 (---------------) + I andria-barletta-trani - 0x00388195, // n0x0a6b c0x0000 (---------------) + I andria-trani-barletta - 0x00293953, // n0x0a6c c0x0000 (---------------) + I andriabarlettatrani - 0x00388713, // n0x0a6d c0x0000 (---------------) + I andriatranibarletta - 0x00202882, // n0x0a6e c0x0000 (---------------) + I ao - 0x00216b45, // n0x0a6f c0x0000 (---------------) + I aosta - 0x0025570c, // n0x0a70 c0x0000 (---------------) + I aosta-valley - 0x00216b4b, // n0x0a71 c0x0000 (---------------) + I aostavalley - 0x0024af85, // n0x0a72 c0x0000 (---------------) + I aoste - 0x00208082, // n0x0a73 c0x0000 (---------------) + I ap - 0x00200f02, // n0x0a74 c0x0000 (---------------) + I aq - 0x0036b5c6, // n0x0a75 c0x0000 (---------------) + I aquila - 0x00201d42, // n0x0a76 c0x0000 (---------------) + I ar - 0x0027a206, // n0x0a77 c0x0000 (---------------) + I arezzo - 0x0039728d, // n0x0a78 c0x0000 (---------------) + I ascoli-piceno - 0x00342fcc, // n0x0a79 c0x0000 (---------------) + I ascolipiceno - 0x00222b04, // n0x0a7a c0x0000 (---------------) + I asti - 0x00200482, // n0x0a7b c0x0000 (---------------) + I at - 0x00202f02, // n0x0a7c c0x0000 (---------------) + I av - 0x00220188, // n0x0a7d c0x0000 (---------------) + I avellino - 0x00200882, // n0x0a7e c0x0000 (---------------) + I ba - 0x00247a86, // n0x0a7f c0x0000 (---------------) + I balsan - 0x00248704, // n0x0a80 c0x0000 (---------------) + I bari - 0x00292015, // n0x0a81 c0x0000 (---------------) + I barletta-trani-andria - 0x00293ad3, // n0x0a82 c0x0000 (---------------) + I barlettatraniandria - 0x00206d83, // n0x0a83 c0x0000 (---------------) + I bas - 0x0033030a, // n0x0a84 c0x0000 (---------------) + I basilicata - 0x00286007, // n0x0a85 c0x0000 (---------------) + I belluno - 0x002e56c9, // n0x0a86 c0x0000 (---------------) + I benevento - 0x0022a747, // n0x0a87 c0x0000 (---------------) + I bergamo - 0x00304a82, // n0x0a88 c0x0000 (---------------) + I bg - 0x00200002, // n0x0a89 c0x0000 (---------------) + I bi - 0x003a5706, // n0x0a8a c0x0000 (---------------) + I biella - 0x0020db02, // n0x0a8b c0x0000 (---------------) + I bl - 0x000fe108, // n0x0a8c c0x0000 (---------------) + blogspot - 0x00212142, // n0x0a8d c0x0000 (---------------) + I bn - 0x0020f682, // n0x0a8e c0x0000 (---------------) + I bo - 0x0038d0c7, // n0x0a8f c0x0000 (---------------) + I bologna - 0x00211807, // n0x0a90 c0x0000 (---------------) + I bolzano - 0x0021e845, // n0x0a91 c0x0000 (---------------) + I bozen - 0x0021e3c2, // n0x0a92 c0x0000 (---------------) + I br - 0x002218c7, // n0x0a93 c0x0000 (---------------) + I brescia - 0x00221a88, // n0x0a94 c0x0000 (---------------) + I brindisi - 0x00230fc2, // n0x0a95 c0x0000 (---------------) + I bs - 0x00223a42, // n0x0a96 c0x0000 (---------------) + I bt - 0x00230682, // n0x0a97 c0x0000 (---------------) + I bz - 0x00200e42, // n0x0a98 c0x0000 (---------------) + I ca - 0x0023f288, // n0x0a99 c0x0000 (---------------) + I cagliari - 0x00212ec3, // n0x0a9a c0x0000 (---------------) + I cal - 0x00254cc8, // n0x0a9b c0x0000 (---------------) + I calabria - 0x0023970d, // n0x0a9c c0x0000 (---------------) + I caltanissetta - 0x0021e303, // n0x0a9d c0x0000 (---------------) + I cam - 0x00319488, // n0x0a9e c0x0000 (---------------) + I campania - 0x00241bcf, // n0x0a9f c0x0000 (---------------) + I campidano-medio - 0x00241f8e, // n0x0aa0 c0x0000 (---------------) + I campidanomedio - 0x00336c0a, // n0x0aa1 c0x0000 (---------------) + I campobasso - 0x002f5d11, // n0x0aa2 c0x0000 (---------------) + I carbonia-iglesias - 0x002f6190, // n0x0aa3 c0x0000 (---------------) + I carboniaiglesias - 0x002b584d, // n0x0aa4 c0x0000 (---------------) + I carrara-massa - 0x002b5b8c, // n0x0aa5 c0x0000 (---------------) + I carraramassa - 0x0023d147, // n0x0aa6 c0x0000 (---------------) + I caserta - 0x00330487, // n0x0aa7 c0x0000 (---------------) + I catania - 0x00353cc9, // n0x0aa8 c0x0000 (---------------) + I catanzaro - 0x00222c02, // n0x0aa9 c0x0000 (---------------) + I cb - 0x00204c82, // n0x0aaa c0x0000 (---------------) + I ce - 0x0025874c, // n0x0aab c0x0000 (---------------) + I cesena-forli - 0x00258a4b, // n0x0aac c0x0000 (---------------) + I cesenaforli - 0x00200382, // n0x0aad c0x0000 (---------------) + I ch - 0x002d0206, // n0x0aae c0x0000 (---------------) + I chieti - 0x00209602, // n0x0aaf c0x0000 (---------------) + I ci - 0x00207f42, // n0x0ab0 c0x0000 (---------------) + I cl - 0x0021dac2, // n0x0ab1 c0x0000 (---------------) + I cn - 0x0020ce42, // n0x0ab2 c0x0000 (---------------) + I co - 0x00233d44, // n0x0ab3 c0x0000 (---------------) + I como - 0x00240dc7, // n0x0ab4 c0x0000 (---------------) + I cosenza - 0x002051c2, // n0x0ab5 c0x0000 (---------------) + I cr - 0x00243cc7, // n0x0ab6 c0x0000 (---------------) + I cremona - 0x00244f47, // n0x0ab7 c0x0000 (---------------) + I crotone - 0x00210802, // n0x0ab8 c0x0000 (---------------) + I cs - 0x0022af82, // n0x0ab9 c0x0000 (---------------) + I ct - 0x002474c5, // n0x0aba c0x0000 (---------------) + I cuneo - 0x00200142, // n0x0abb c0x0000 (---------------) + I cz - 0x0025a10e, // n0x0abc c0x0000 (---------------) + I dell-ogliastra - 0x0026548d, // n0x0abd c0x0000 (---------------) + I dellogliastra - 0x00239103, // n0x0abe c0x0000 (---------------) + I edu - 0x003285ce, // n0x0abf c0x0000 (---------------) + I emilia-romagna - 0x0028510d, // n0x0ac0 c0x0000 (---------------) + I emiliaromagna - 0x00361083, // n0x0ac1 c0x0000 (---------------) + I emr - 0x00202a82, // n0x0ac2 c0x0000 (---------------) + I en - 0x00206304, // n0x0ac3 c0x0000 (---------------) + I enna - 0x0024a982, // n0x0ac4 c0x0000 (---------------) + I fc - 0x0020cf02, // n0x0ac5 c0x0000 (---------------) + I fe - 0x002d7ac5, // n0x0ac6 c0x0000 (---------------) + I fermo - 0x002e20c7, // n0x0ac7 c0x0000 (---------------) + I ferrara - 0x00349342, // n0x0ac8 c0x0000 (---------------) + I fg - 0x00201702, // n0x0ac9 c0x0000 (---------------) + I fi - 0x0024d607, // n0x0aca c0x0000 (---------------) + I firenze - 0x00252708, // n0x0acb c0x0000 (---------------) + I florence - 0x00234802, // n0x0acc c0x0000 (---------------) + I fm - 0x002018c6, // n0x0acd c0x0000 (---------------) + I foggia - 0x002585cc, // n0x0ace c0x0000 (---------------) + I forli-cesena - 0x0025890b, // n0x0acf c0x0000 (---------------) + I forlicesena - 0x00240202, // n0x0ad0 c0x0000 (---------------) + I fr - 0x0025e98f, // n0x0ad1 c0x0000 (---------------) + I friuli-v-giulia - 0x0025ed50, // n0x0ad2 c0x0000 (---------------) + I friuli-ve-giulia - 0x0025f14f, // n0x0ad3 c0x0000 (---------------) + I friuli-vegiulia - 0x0025f515, // n0x0ad4 c0x0000 (---------------) + I friuli-venezia-giulia - 0x0025fa54, // n0x0ad5 c0x0000 (---------------) + I friuli-veneziagiulia - 0x0025ff4e, // n0x0ad6 c0x0000 (---------------) + I friuli-vgiulia - 0x002602ce, // n0x0ad7 c0x0000 (---------------) + I friuliv-giulia - 0x0026064f, // n0x0ad8 c0x0000 (---------------) + I friulive-giulia - 0x00260a0e, // n0x0ad9 c0x0000 (---------------) + I friulivegiulia - 0x00260d94, // n0x0ada c0x0000 (---------------) + I friulivenezia-giulia - 0x00261293, // n0x0adb c0x0000 (---------------) + I friuliveneziagiulia - 0x0026174d, // n0x0adc c0x0000 (---------------) + I friulivgiulia - 0x002765c9, // n0x0add c0x0000 (---------------) + I frosinone - 0x0028aec3, // n0x0ade c0x0000 (---------------) + I fvg - 0x00200282, // n0x0adf c0x0000 (---------------) + I ge - 0x003077c5, // n0x0ae0 c0x0000 (---------------) + I genoa - 0x002060c6, // n0x0ae1 c0x0000 (---------------) + I genova - 0x00210a42, // n0x0ae2 c0x0000 (---------------) + I go - 0x002703c7, // n0x0ae3 c0x0000 (---------------) + I gorizia - 0x0027d903, // n0x0ae4 c0x0000 (---------------) + I gov - 0x00208a82, // n0x0ae5 c0x0000 (---------------) + I gr - 0x00288508, // n0x0ae6 c0x0000 (---------------) + I grosseto - 0x002f5f51, // n0x0ae7 c0x0000 (---------------) + I iglesias-carbonia - 0x002f6390, // n0x0ae8 c0x0000 (---------------) + I iglesiascarbonia - 0x00200402, // n0x0ae9 c0x0000 (---------------) + I im - 0x003578c7, // n0x0aea c0x0000 (---------------) + I imperia - 0x00204e82, // n0x0aeb c0x0000 (---------------) + I is - 0x0025d847, // n0x0aec c0x0000 (---------------) + I isernia - 0x0020bdc2, // n0x0aed c0x0000 (---------------) + I kr - 0x0026ad89, // n0x0aee c0x0000 (---------------) + I la-spezia - 0x0036b587, // n0x0aef c0x0000 (---------------) + I laquila - 0x002583c8, // n0x0af0 c0x0000 (---------------) + I laspezia - 0x00226e86, // n0x0af1 c0x0000 (---------------) + I latina - 0x002d95c3, // n0x0af2 c0x0000 (---------------) + I laz - 0x002f49c5, // n0x0af3 c0x0000 (---------------) + I lazio - 0x00239382, // n0x0af4 c0x0000 (---------------) + I lc - 0x0020c8c2, // n0x0af5 c0x0000 (---------------) + I le - 0x003a5b05, // n0x0af6 c0x0000 (---------------) + I lecce - 0x0022f645, // n0x0af7 c0x0000 (---------------) + I lecco - 0x00205bc2, // n0x0af8 c0x0000 (---------------) + I li - 0x0023aa03, // n0x0af9 c0x0000 (---------------) + I lig - 0x0023aa07, // n0x0afa c0x0000 (---------------) + I liguria - 0x002137c7, // n0x0afb c0x0000 (---------------) + I livorno - 0x00200d82, // n0x0afc c0x0000 (---------------) + I lo - 0x00259b44, // n0x0afd c0x0000 (---------------) + I lodi - 0x00214d03, // n0x0afe c0x0000 (---------------) + I lom - 0x002c5f09, // n0x0aff c0x0000 (---------------) + I lombardia - 0x002db788, // n0x0b00 c0x0000 (---------------) + I lombardy - 0x00208bc2, // n0x0b01 c0x0000 (---------------) + I lt - 0x00203842, // n0x0b02 c0x0000 (---------------) + I lu - 0x0026e747, // n0x0b03 c0x0000 (---------------) + I lucania - 0x002da485, // n0x0b04 c0x0000 (---------------) + I lucca - 0x00318f88, // n0x0b05 c0x0000 (---------------) + I macerata - 0x00253e87, // n0x0b06 c0x0000 (---------------) + I mantova - 0x00201d03, // n0x0b07 c0x0000 (---------------) + I mar - 0x002843c6, // n0x0b08 c0x0000 (---------------) + I marche - 0x002b56cd, // n0x0b09 c0x0000 (---------------) + I massa-carrara - 0x002b5a4c, // n0x0b0a c0x0000 (---------------) + I massacarrara - 0x0024be06, // n0x0b0b c0x0000 (---------------) + I matera - 0x00205a82, // n0x0b0c c0x0000 (---------------) + I mb - 0x0021a3c2, // n0x0b0d c0x0000 (---------------) + I mc - 0x00200982, // n0x0b0e c0x0000 (---------------) + I me - 0x00241a4f, // n0x0b0f c0x0000 (---------------) + I medio-campidano - 0x00241e4e, // n0x0b10 c0x0000 (---------------) + I mediocampidano - 0x00345e07, // n0x0b11 c0x0000 (---------------) + I messina - 0x00207dc2, // n0x0b12 c0x0000 (---------------) + I mi - 0x002fb205, // n0x0b13 c0x0000 (---------------) + I milan - 0x002fb206, // n0x0b14 c0x0000 (---------------) + I milano - 0x00223b02, // n0x0b15 c0x0000 (---------------) + I mn - 0x00208442, // n0x0b16 c0x0000 (---------------) + I mo - 0x002177c6, // n0x0b17 c0x0000 (---------------) + I modena - 0x00212d43, // n0x0b18 c0x0000 (---------------) + I mol - 0x0025d786, // n0x0b19 c0x0000 (---------------) + I molise - 0x002c4b05, // n0x0b1a c0x0000 (---------------) + I monza - 0x002c4b0d, // n0x0b1b c0x0000 (---------------) + I monza-brianza - 0x002c5355, // n0x0b1c c0x0000 (---------------) + I monza-e-della-brianza - 0x002c5b0c, // n0x0b1d c0x0000 (---------------) + I monzabrianza - 0x002c63cd, // n0x0b1e c0x0000 (---------------) + I monzaebrianza - 0x002c6792, // n0x0b1f c0x0000 (---------------) + I monzaedellabrianza - 0x00210f42, // n0x0b20 c0x0000 (---------------) + I ms - 0x00205402, // n0x0b21 c0x0000 (---------------) + I mt - 0x00200902, // n0x0b22 c0x0000 (---------------) + I na - 0x0039ba46, // n0x0b23 c0x0000 (---------------) + I naples - 0x002a53c6, // n0x0b24 c0x0000 (---------------) + I napoli - 0x00201382, // n0x0b25 c0x0000 (---------------) + I no - 0x00206146, // n0x0b26 c0x0000 (---------------) + I novara - 0x00204182, // n0x0b27 c0x0000 (---------------) + I nu - 0x0039d545, // n0x0b28 c0x0000 (---------------) + I nuoro - 0x00201902, // n0x0b29 c0x0000 (---------------) + I og - 0x0025a249, // n0x0b2a c0x0000 (---------------) + I ogliastra - 0x0027548c, // n0x0b2b c0x0000 (---------------) + I olbia-tempio - 0x002757cb, // n0x0b2c c0x0000 (---------------) + I olbiatempio - 0x00200dc2, // n0x0b2d c0x0000 (---------------) + I or - 0x00252b48, // n0x0b2e c0x0000 (---------------) + I oristano - 0x00201102, // n0x0b2f c0x0000 (---------------) + I ot - 0x0020aac2, // n0x0b30 c0x0000 (---------------) + I pa - 0x00216906, // n0x0b31 c0x0000 (---------------) + I padova - 0x00361a05, // n0x0b32 c0x0000 (---------------) + I padua - 0x00305ec7, // n0x0b33 c0x0000 (---------------) + I palermo - 0x00394905, // n0x0b34 c0x0000 (---------------) + I parma - 0x002dbe45, // n0x0b35 c0x0000 (---------------) + I pavia - 0x00247682, // n0x0b36 c0x0000 (---------------) + I pc - 0x00357282, // n0x0b37 c0x0000 (---------------) + I pd - 0x00200582, // n0x0b38 c0x0000 (---------------) + I pe - 0x00266707, // n0x0b39 c0x0000 (---------------) + I perugia - 0x0022c3cd, // n0x0b3a c0x0000 (---------------) + I pesaro-urbino - 0x0022c74c, // n0x0b3b c0x0000 (---------------) + I pesarourbino - 0x00237007, // n0x0b3c c0x0000 (---------------) + I pescara - 0x00248ac2, // n0x0b3d c0x0000 (---------------) + I pg - 0x00219f82, // n0x0b3e c0x0000 (---------------) + I pi - 0x00338208, // n0x0b3f c0x0000 (---------------) + I piacenza - 0x00258e08, // n0x0b40 c0x0000 (---------------) + I piedmont - 0x002d68c8, // n0x0b41 c0x0000 (---------------) + I piemonte - 0x002df204, // n0x0b42 c0x0000 (---------------) + I pisa - 0x002d05c7, // n0x0b43 c0x0000 (---------------) + I pistoia - 0x002dd5c3, // n0x0b44 c0x0000 (---------------) + I pmn - 0x002488c2, // n0x0b45 c0x0000 (---------------) + I pn - 0x00206ec2, // n0x0b46 c0x0000 (---------------) + I po - 0x002df8c9, // n0x0b47 c0x0000 (---------------) + I pordenone - 0x00208187, // n0x0b48 c0x0000 (---------------) + I potenza - 0x00204e02, // n0x0b49 c0x0000 (---------------) + I pr - 0x00271845, // n0x0b4a c0x0000 (---------------) + I prato - 0x002d3242, // n0x0b4b c0x0000 (---------------) + I pt - 0x00235982, // n0x0b4c c0x0000 (---------------) + I pu - 0x00278643, // n0x0b4d c0x0000 (---------------) + I pug - 0x00278646, // n0x0b4e c0x0000 (---------------) + I puglia - 0x002e62c2, // n0x0b4f c0x0000 (---------------) + I pv - 0x002e70c2, // n0x0b50 c0x0000 (---------------) + I pz - 0x00202382, // n0x0b51 c0x0000 (---------------) + I ra - 0x0030e106, // n0x0b52 c0x0000 (---------------) + I ragusa - 0x00206247, // n0x0b53 c0x0000 (---------------) + I ravenna - 0x00200e02, // n0x0b54 c0x0000 (---------------) + I rc - 0x00208c82, // n0x0b55 c0x0000 (---------------) + I re - 0x0033d54f, // n0x0b56 c0x0000 (---------------) + I reggio-calabria - 0x0032840d, // n0x0b57 c0x0000 (---------------) + I reggio-emilia - 0x00254b4e, // n0x0b58 c0x0000 (---------------) + I reggiocalabria - 0x00284f8c, // n0x0b59 c0x0000 (---------------) + I reggioemilia - 0x00205b42, // n0x0b5a c0x0000 (---------------) + I rg - 0x00204e42, // n0x0b5b c0x0000 (---------------) + I ri - 0x00226cc5, // n0x0b5c c0x0000 (---------------) + I rieti - 0x00300186, // n0x0b5d c0x0000 (---------------) + I rimini - 0x00225a02, // n0x0b5e c0x0000 (---------------) + I rm - 0x0020dd82, // n0x0b5f c0x0000 (---------------) + I rn - 0x002020c2, // n0x0b60 c0x0000 (---------------) + I ro - 0x0022cb44, // n0x0b61 c0x0000 (---------------) + I roma - 0x002e2f04, // n0x0b62 c0x0000 (---------------) + I rome - 0x00334ec6, // n0x0b63 c0x0000 (---------------) + I rovigo - 0x00201002, // n0x0b64 c0x0000 (---------------) + I sa - 0x00279547, // n0x0b65 c0x0000 (---------------) + I salerno - 0x0021adc3, // n0x0b66 c0x0000 (---------------) + I sar - 0x0021f988, // n0x0b67 c0x0000 (---------------) + I sardegna - 0x002216c8, // n0x0b68 c0x0000 (---------------) + I sardinia - 0x002d9c47, // n0x0b69 c0x0000 (---------------) + I sassari - 0x0039b946, // n0x0b6a c0x0000 (---------------) + I savona - 0x002091c2, // n0x0b6b c0x0000 (---------------) + I si - 0x0023f203, // n0x0b6c c0x0000 (---------------) + I sic - 0x00375147, // n0x0b6d c0x0000 (---------------) + I sicilia - 0x00337206, // n0x0b6e c0x0000 (---------------) + I sicily - 0x0031bf85, // n0x0b6f c0x0000 (---------------) + I siena - 0x00341fc8, // n0x0b70 c0x0000 (---------------) + I siracusa - 0x00205f02, // n0x0b71 c0x0000 (---------------) + I so - 0x0030b1c7, // n0x0b72 c0x0000 (---------------) + I sondrio - 0x00208142, // n0x0b73 c0x0000 (---------------) + I sp - 0x00332d82, // n0x0b74 c0x0000 (---------------) + I sr - 0x0020b702, // n0x0b75 c0x0000 (---------------) + I ss - 0x002d0e49, // n0x0b76 c0x0000 (---------------) + I suedtirol - 0x002365c2, // n0x0b77 c0x0000 (---------------) + I sv - 0x002004c2, // n0x0b78 c0x0000 (---------------) + I ta - 0x00237283, // n0x0b79 c0x0000 (---------------) + I taa - 0x00332207, // n0x0b7a c0x0000 (---------------) + I taranto - 0x00200a82, // n0x0b7b c0x0000 (---------------) + I te - 0x0027560c, // n0x0b7c c0x0000 (---------------) + I tempio-olbia - 0x0027590b, // n0x0b7d c0x0000 (---------------) + I tempioolbia - 0x0024be86, // n0x0b7e c0x0000 (---------------) + I teramo - 0x0020dd05, // n0x0b7f c0x0000 (---------------) + I terni - 0x0024fd82, // n0x0b80 c0x0000 (---------------) + I tn - 0x00206e42, // n0x0b81 c0x0000 (---------------) + I to - 0x002b34c6, // n0x0b82 c0x0000 (---------------) + I torino - 0x002230c3, // n0x0b83 c0x0000 (---------------) + I tos - 0x00324507, // n0x0b84 c0x0000 (---------------) + I toscana - 0x00210ac2, // n0x0b85 c0x0000 (---------------) + I tp - 0x00203902, // n0x0b86 c0x0000 (---------------) + I tr - 0x00291cd5, // n0x0b87 c0x0000 (---------------) + I trani-andria-barletta - 0x00388355, // n0x0b88 c0x0000 (---------------) + I trani-barletta-andria - 0x00293813, // n0x0b89 c0x0000 (---------------) + I traniandriabarletta - 0x00388893, // n0x0b8a c0x0000 (---------------) + I tranibarlettaandria - 0x0029c707, // n0x0b8b c0x0000 (---------------) + I trapani - 0x002bc488, // n0x0b8c c0x0000 (---------------) + I trentino - 0x002e9ad0, // n0x0b8d c0x0000 (---------------) + I trentino-a-adige - 0x0033adcf, // n0x0b8e c0x0000 (---------------) + I trentino-aadige - 0x0034ccd3, // n0x0b8f c0x0000 (---------------) + I trentino-alto-adige - 0x002d2312, // n0x0b90 c0x0000 (---------------) + I trentino-altoadige - 0x00301490, // n0x0b91 c0x0000 (---------------) + I trentino-s-tirol - 0x002bc48f, // n0x0b92 c0x0000 (---------------) + I trentino-stirol - 0x002c4692, // n0x0b93 c0x0000 (---------------) + I trentino-sud-tirol - 0x002cbf91, // n0x0b94 c0x0000 (---------------) + I trentino-sudtirol - 0x002ce653, // n0x0b95 c0x0000 (---------------) + I trentino-sued-tirol - 0x002d0c12, // n0x0b96 c0x0000 (---------------) + I trentino-suedtirol - 0x002d628f, // n0x0b97 c0x0000 (---------------) + I trentinoa-adige - 0x002df54e, // n0x0b98 c0x0000 (---------------) + I trentinoaadige - 0x002e3f52, // n0x0b99 c0x0000 (---------------) + I trentinoalto-adige - 0x002e4791, // n0x0b9a c0x0000 (---------------) + I trentinoaltoadige - 0x002e634f, // n0x0b9b c0x0000 (---------------) + I trentinos-tirol - 0x002e744e, // n0x0b9c c0x0000 (---------------) + I trentinostirol - 0x002ee591, // n0x0b9d c0x0000 (---------------) + I trentinosud-tirol - 0x00334510, // n0x0b9e c0x0000 (---------------) + I trentinosudtirol - 0x002e8f12, // n0x0b9f c0x0000 (---------------) + I trentinosued-tirol - 0x002fa091, // n0x0ba0 c0x0000 (---------------) + I trentinosuedtirol - 0x002f7986, // n0x0ba1 c0x0000 (---------------) + I trento - 0x002f82c7, // n0x0ba2 c0x0000 (---------------) + I treviso - 0x00367507, // n0x0ba3 c0x0000 (---------------) + I trieste - 0x00207a42, // n0x0ba4 c0x0000 (---------------) + I ts - 0x0027ff05, // n0x0ba5 c0x0000 (---------------) + I turin - 0x002ee9c7, // n0x0ba6 c0x0000 (---------------) + I tuscany - 0x002203c2, // n0x0ba7 c0x0000 (---------------) + I tv - 0x00208002, // n0x0ba8 c0x0000 (---------------) + I ud - 0x0022bf85, // n0x0ba9 c0x0000 (---------------) + I udine - 0x00222403, // n0x0baa c0x0000 (---------------) + I umb - 0x00251a86, // n0x0bab c0x0000 (---------------) + I umbria - 0x0022c58d, // n0x0bac c0x0000 (---------------) + I urbino-pesaro - 0x0022c8cc, // n0x0bad c0x0000 (---------------) + I urbinopesaro - 0x00200c02, // n0x0bae c0x0000 (---------------) + I va - 0x0025558b, // n0x0baf c0x0000 (---------------) + I val-d-aosta - 0x00216a0a, // n0x0bb0 c0x0000 (---------------) + I val-daosta - 0x0032264a, // n0x0bb1 c0x0000 (---------------) + I vald-aosta - 0x002b2889, // n0x0bb2 c0x0000 (---------------) + I valdaosta - 0x002de48b, // n0x0bb3 c0x0000 (---------------) + I valle-aosta - 0x002ef0cd, // n0x0bb4 c0x0000 (---------------) + I valle-d-aosta - 0x00253fcc, // n0x0bb5 c0x0000 (---------------) + I valle-daosta - 0x0021af4a, // n0x0bb6 c0x0000 (---------------) + I valleaosta - 0x0022040c, // n0x0bb7 c0x0000 (---------------) + I valled-aosta - 0x0023fbcb, // n0x0bb8 c0x0000 (---------------) + I valledaosta - 0x0024adcc, // n0x0bb9 c0x0000 (---------------) + I vallee-aoste - 0x0024f1cb, // n0x0bba c0x0000 (---------------) + I valleeaoste - 0x00275403, // n0x0bbb c0x0000 (---------------) + I vao - 0x0028bb86, // n0x0bbc c0x0000 (---------------) + I varese - 0x002dc442, // n0x0bbd c0x0000 (---------------) + I vb - 0x002e7802, // n0x0bbe c0x0000 (---------------) + I vc - 0x00211b83, // n0x0bbf c0x0000 (---------------) + I vda - 0x00202a42, // n0x0bc0 c0x0000 (---------------) + I ve - 0x00202a43, // n0x0bc1 c0x0000 (---------------) + I ven - 0x00368646, // n0x0bc2 c0x0000 (---------------) + I veneto - 0x0025f6c7, // n0x0bc3 c0x0000 (---------------) + I venezia - 0x00270846, // n0x0bc4 c0x0000 (---------------) + I venice - 0x0022e7c8, // n0x0bc5 c0x0000 (---------------) + I verbania - 0x002e3748, // n0x0bc6 c0x0000 (---------------) + I vercelli - 0x00360dc6, // n0x0bc7 c0x0000 (---------------) + I verona - 0x002065c2, // n0x0bc8 c0x0000 (---------------) + I vi - 0x002f0b0d, // n0x0bc9 c0x0000 (---------------) + I vibo-valentia - 0x002f0e4c, // n0x0bca c0x0000 (---------------) + I vibovalentia - 0x002aec87, // n0x0bcb c0x0000 (---------------) + I vicenza - 0x002f80c7, // n0x0bcc c0x0000 (---------------) + I viterbo - 0x0020de82, // n0x0bcd c0x0000 (---------------) + I vr - 0x00229a42, // n0x0bce c0x0000 (---------------) + I vs - 0x0026a302, // n0x0bcf c0x0000 (---------------) + I vt - 0x00215382, // n0x0bd0 c0x0000 (---------------) + I vv - 0x0020ce42, // n0x0bd1 c0x0000 (---------------) + I co - 0x00223b43, // n0x0bd2 c0x0000 (---------------) + I net - 0x00228743, // n0x0bd3 c0x0000 (---------------) + I org - 0x00233243, // n0x0bd4 c0x0000 (---------------) + I com - 0x00239103, // n0x0bd5 c0x0000 (---------------) + I edu - 0x0027d903, // n0x0bd6 c0x0000 (---------------) + I gov - 0x00207dc3, // n0x0bd7 c0x0000 (---------------) + I mil - 0x00200904, // n0x0bd8 c0x0000 (---------------) + I name - 0x00223b43, // n0x0bd9 c0x0000 (---------------) + I net - 0x00228743, // n0x0bda c0x0000 (---------------) + I org - 0x00217283, // n0x0bdb c0x0000 (---------------) + I sch - 0x00200342, // n0x0bdc c0x0000 (---------------) + I ac - 0x002001c2, // n0x0bdd c0x0000 (---------------) + I ad - 0x1fa90e05, // n0x0bde c0x007e (n0x0c4b-n0x0c7f) + I aichi - 0x1fe04785, // n0x0bdf c0x007f (n0x0c7f-n0x0c9b) + I akita - 0x2030ddc6, // n0x0be0 c0x0080 (n0x0c9b-n0x0cb1) + I aomori - 0x000fe108, // n0x0be1 c0x0000 (---------------) + blogspot - 0x206afa45, // n0x0be2 c0x0081 (n0x0cb1-n0x0ceb) + I chiba - 0x0020ce42, // n0x0be3 c0x0000 (---------------) + I co - 0x002024c2, // n0x0be4 c0x0000 (---------------) + I ed - 0x20b54dc5, // n0x0be5 c0x0082 (n0x0ceb-n0x0d01) + I ehime - 0x20e7de85, // n0x0be6 c0x0083 (n0x0d01-n0x0d10) + I fukui - 0x2127f5c7, // n0x0be7 c0x0084 (n0x0d10-n0x0d4f) + I fukuoka - 0x217547c9, // n0x0be8 c0x0085 (n0x0d4f-n0x0d82) + I fukushima - 0x21aae0c4, // n0x0be9 c0x0086 (n0x0d82-n0x0da8) + I gifu - 0x00210a42, // n0x0bea c0x0000 (---------------) + I go - 0x00208a82, // n0x0beb c0x0000 (---------------) + I gr - 0x21e45885, // n0x0bec c0x0087 (n0x0da8-n0x0dcc) + I gunma - 0x22209e49, // n0x0bed c0x0088 (n0x0dcc-n0x0de5) + I hiroshima - 0x2276a608, // n0x0bee c0x0089 (n0x0de5-n0x0e73) + I hokkaido - 0x22aacdc5, // n0x0bef c0x008a (n0x0e73-n0x0ea1) + I hyogo - 0x22ec3847, // n0x0bf0 c0x008b (n0x0ea1-n0x0ed4) + I ibaraki - 0x2321b2c8, // n0x0bf1 c0x008c (n0x0ed4-n0x0ee7) + I ishikawa - 0x236d7d45, // n0x0bf2 c0x008d (n0x0ee7-n0x0f09) + I iwate - 0x23a01b46, // n0x0bf3 c0x008e (n0x0f09-n0x0f18) + I kagawa - 0x23e75f89, // n0x0bf4 c0x008f (n0x0f18-n0x0f2c) + I kagoshima - 0x2430ad88, // n0x0bf5 c0x0090 (n0x0f2c-n0x0f4a) + I kanagawa - 0x246ba408, // n0x0bf6 c0x0091 (n0x0f4a-n0x0f4b)* o I kawasaki - 0x24a9e08a, // n0x0bf7 c0x0092 (n0x0f4b-n0x0f4c)* o I kitakyushu - 0x24e4fa44, // n0x0bf8 c0x0093 (n0x0f4c-n0x0f4d)* o I kobe - 0x252cc705, // n0x0bf9 c0x0094 (n0x0f4d-n0x0f6c) + I kochi - 0x256b5448, // n0x0bfa c0x0095 (n0x0f6c-n0x0f86) + I kumamoto - 0x25ac0005, // n0x0bfb c0x0096 (n0x0f86-n0x0fa5) + I kyoto - 0x0021b942, // n0x0bfc c0x0000 (---------------) + I lg - 0x25e65083, // n0x0bfd c0x0097 (n0x0fa5-n0x0fc3) + I mie - 0x262a4006, // n0x0bfe c0x0098 (n0x0fc3-n0x0fe4) + I miyagi - 0x26666248, // n0x0bff c0x0099 (n0x0fe4-n0x0fff) + I miyazaki - 0x26b52206, // n0x0c00 c0x009a (n0x0fff-n0x104a) + I nagano - 0x26ee33c8, // n0x0c01 c0x009b (n0x104a-n0x1060) + I nagasaki - 0x27255a46, // n0x0c02 c0x009c (n0x1060-n0x1061)* o I nagoya - 0x2771c044, // n0x0c03 c0x009d (n0x1061-n0x1087) + I nara - 0x00202ac2, // n0x0c04 c0x0000 (---------------) + I ne - 0x27a350c7, // n0x0c05 c0x009e (n0x1087-n0x10a9) + I niigata - 0x27ea9804, // n0x0c06 c0x009f (n0x10a9-n0x10bc) + I oita - 0x28278bc7, // n0x0c07 c0x00a0 (n0x10bc-n0x10d6) + I okayama - 0x28795107, // n0x0c08 c0x00a1 (n0x10d6-n0x1100) + I okinawa - 0x00200dc2, // n0x0c09 c0x0000 (---------------) + I or - 0x28a9b185, // n0x0c0a c0x00a2 (n0x1100-n0x1132) + I osaka - 0x28f78a84, // n0x0c0b c0x00a3 (n0x1132-n0x114c) + I saga - 0x29373887, // n0x0c0c c0x00a4 (n0x114c-n0x1191) + I saitama - 0x29617047, // n0x0c0d c0x00a5 (n0x1191-n0x1192)* o I sapporo - 0x29a83506, // n0x0c0e c0x00a6 (n0x1192-n0x1193)* o I sendai - 0x29e235c5, // n0x0c0f c0x00a7 (n0x1193-n0x11aa) + I shiga - 0x2a295687, // n0x0c10 c0x00a8 (n0x11aa-n0x11c1) + I shimane - 0x2a6b13c8, // n0x0c11 c0x00a9 (n0x11c1-n0x11e5) + I shizuoka - 0x2ab44807, // n0x0c12 c0x00aa (n0x11e5-n0x1204) + I tochigi - 0x2ae90009, // n0x0c13 c0x00ab (n0x1204-n0x1215) + I tokushima - 0x2b342205, // n0x0c14 c0x00ac (n0x1215-n0x124e) + I tokyo - 0x2b6f7a87, // n0x0c15 c0x00ad (n0x124e-n0x125b) + I tottori - 0x2ba902c6, // n0x0c16 c0x00ae (n0x125b-n0x1273) + I toyama - 0x2be27348, // n0x0c17 c0x00af (n0x1273-n0x1290) + I wakayama - 0x002313cd, // n0x0c18 c0x0000 (---------------) + I xn--0trq7p7nn - 0x00253349, // n0x0c19 c0x0000 (---------------) + I xn--1ctwo - 0x0025c5cb, // n0x0c1a c0x0000 (---------------) + I xn--1lqs03n - 0x00268ecb, // n0x0c1b c0x0000 (---------------) + I xn--1lqs71d - 0x0027c4cb, // n0x0c1c c0x0000 (---------------) + I xn--2m4a15e - 0x002a830b, // n0x0c1d c0x0000 (---------------) + I xn--32vp30h - 0x00300a8b, // n0x0c1e c0x0000 (---------------) + I xn--4it168d - 0x00300d4b, // n0x0c1f c0x0000 (---------------) + I xn--4it797k - 0x00302ac9, // n0x0c20 c0x0000 (---------------) + I xn--4pvxs - 0x00303bcb, // n0x0c21 c0x0000 (---------------) + I xn--5js045d - 0x00303e8b, // n0x0c22 c0x0000 (---------------) + I xn--5rtp49c - 0x0030430b, // n0x0c23 c0x0000 (---------------) + I xn--5rtq34k - 0x00304e0a, // n0x0c24 c0x0000 (---------------) + I xn--6btw5a - 0x0030534a, // n0x0c25 c0x0000 (---------------) + I xn--6orx2r - 0x0030594c, // n0x0c26 c0x0000 (---------------) + I xn--7t0a264c - 0x0030bdcb, // n0x0c27 c0x0000 (---------------) + I xn--8ltr62k - 0x0030c78a, // n0x0c28 c0x0000 (---------------) + I xn--8pvr4u - 0x0031b80a, // n0x0c29 c0x0000 (---------------) + I xn--c3s14m - 0x0032a50e, // n0x0c2a c0x0000 (---------------) + I xn--d5qv7z876c - 0x0032b7ce, // n0x0c2b c0x0000 (---------------) + I xn--djrs72d6uy - 0x0032bb4a, // n0x0c2c c0x0000 (---------------) + I xn--djty4k - 0x0032d24a, // n0x0c2d c0x0000 (---------------) + I xn--efvn9s - 0x0032db8b, // n0x0c2e c0x0000 (---------------) + I xn--ehqz56n - 0x0032de4b, // n0x0c2f c0x0000 (---------------) + I xn--elqq16h - 0x0032eb8b, // n0x0c30 c0x0000 (---------------) + I xn--f6qx53a - 0x0034624b, // n0x0c31 c0x0000 (---------------) + I xn--k7yn95e - 0x0034684a, // n0x0c32 c0x0000 (---------------) + I xn--kbrq7o - 0x0034750b, // n0x0c33 c0x0000 (---------------) + I xn--klt787d - 0x003477ca, // n0x0c34 c0x0000 (---------------) + I xn--kltp7d - 0x00347a4a, // n0x0c35 c0x0000 (---------------) + I xn--kltx9a - 0x00347cca, // n0x0c36 c0x0000 (---------------) + I xn--klty5x - 0x00367fcb, // n0x0c37 c0x0000 (---------------) + I xn--mkru45i - 0x0036fd0b, // n0x0c38 c0x0000 (---------------) + I xn--nit225k - 0x0037194e, // n0x0c39 c0x0000 (---------------) + I xn--ntso0iqx3a - 0x00371ccb, // n0x0c3a c0x0000 (---------------) + I xn--ntsq17g - 0x0037a8cb, // n0x0c3b c0x0000 (---------------) + I xn--pssu33l - 0x0037c50b, // n0x0c3c c0x0000 (---------------) + I xn--qqqt11m - 0x0037f34a, // n0x0c3d c0x0000 (---------------) + I xn--rht27z - 0x0037f5c9, // n0x0c3e c0x0000 (---------------) + I xn--rht3d - 0x0037f80a, // n0x0c3f c0x0000 (---------------) + I xn--rht61e - 0x00380e8a, // n0x0c40 c0x0000 (---------------) + I xn--rny31h - 0x0039244b, // n0x0c41 c0x0000 (---------------) + I xn--tor131o - 0x00393f8b, // n0x0c42 c0x0000 (---------------) + I xn--uist22h - 0x00394a4a, // n0x0c43 c0x0000 (---------------) + I xn--uisz3g - 0x00395d8b, // n0x0c44 c0x0000 (---------------) + I xn--uuwu58a - 0x0039b38b, // n0x0c45 c0x0000 (---------------) + I xn--vgu402c - 0x003a41cb, // n0x0c46 c0x0000 (---------------) + I xn--zbx025d - 0x2c2815c8, // n0x0c47 c0x00b0 (n0x1290-n0x12b2) + I yamagata - 0x2c689a89, // n0x0c48 c0x00b1 (n0x12b2-n0x12c2) + I yamaguchi - 0x2caa2d49, // n0x0c49 c0x00b2 (n0x12c2-n0x12de) + I yamanashi - 0x2ced1a88, // n0x0c4a c0x00b3 (n0x12de-n0x12df)* o I yokohama - 0x00334c45, // n0x0c4b c0x0000 (---------------) + I aisai - 0x00201f03, // n0x0c4c c0x0000 (---------------) + I ama - 0x00201044, // n0x0c4d c0x0000 (---------------) + I anjo - 0x00360f85, // n0x0c4e c0x0000 (---------------) + I asuke - 0x002ade06, // n0x0c4f c0x0000 (---------------) + I chiryu - 0x002b0305, // n0x0c50 c0x0000 (---------------) + I chita - 0x00289284, // n0x0c51 c0x0000 (---------------) + I fuso - 0x002702c8, // n0x0c52 c0x0000 (---------------) + I gamagori - 0x00256bc5, // n0x0c53 c0x0000 (---------------) + I handa - 0x00291884, // n0x0c54 c0x0000 (---------------) + I hazu - 0x002c5007, // n0x0c55 c0x0000 (---------------) + I hekinan - 0x0029e9ca, // n0x0c56 c0x0000 (---------------) + I higashiura - 0x002d37ca, // n0x0c57 c0x0000 (---------------) + I ichinomiya - 0x00302f47, // n0x0c58 c0x0000 (---------------) + I inazawa - 0x00204147, // n0x0c59 c0x0000 (---------------) + I inuyama - 0x002ed587, // n0x0c5a c0x0000 (---------------) + I isshiki - 0x0022cc87, // n0x0c5b c0x0000 (---------------) + I iwakura - 0x0029a545, // n0x0c5c c0x0000 (---------------) + I kanie - 0x00324906, // n0x0c5d c0x0000 (---------------) + I kariya - 0x00311947, // n0x0c5e c0x0000 (---------------) + I kasugai - 0x0024c144, // n0x0c5f c0x0000 (---------------) + I kira - 0x00358786, // n0x0c60 c0x0000 (---------------) + I kiyosu - 0x0028ee46, // n0x0c61 c0x0000 (---------------) + I komaki - 0x00203b05, // n0x0c62 c0x0000 (---------------) + I konan - 0x00355204, // n0x0c63 c0x0000 (---------------) + I kota - 0x002a9086, // n0x0c64 c0x0000 (---------------) + I mihama - 0x0029dbc7, // n0x0c65 c0x0000 (---------------) + I miyoshi - 0x002210c6, // n0x0c66 c0x0000 (---------------) + I nishio - 0x0022fe47, // n0x0c67 c0x0000 (---------------) + I nisshin - 0x0027d083, // n0x0c68 c0x0000 (---------------) + I obu - 0x00252306, // n0x0c69 c0x0000 (---------------) + I oguchi - 0x00236705, // n0x0c6a c0x0000 (---------------) + I oharu - 0x0027f6c7, // n0x0c6b c0x0000 (---------------) + I okazaki - 0x002c09ca, // n0x0c6c c0x0000 (---------------) + I owariasahi - 0x00288604, // n0x0c6d c0x0000 (---------------) + I seto - 0x002197c8, // n0x0c6e c0x0000 (---------------) + I shikatsu - 0x00381509, // n0x0c6f c0x0000 (---------------) + I shinshiro - 0x002fd347, // n0x0c70 c0x0000 (---------------) + I shitara - 0x002e8886, // n0x0c71 c0x0000 (---------------) + I tahara - 0x0035e8c8, // n0x0c72 c0x0000 (---------------) + I takahama - 0x00307b49, // n0x0c73 c0x0000 (---------------) + I tobishima - 0x00368744, // n0x0c74 c0x0000 (---------------) + I toei - 0x002ff504, // n0x0c75 c0x0000 (---------------) + I togo - 0x002f91c5, // n0x0c76 c0x0000 (---------------) + I tokai - 0x002c1548, // n0x0c77 c0x0000 (---------------) + I tokoname - 0x002c1f87, // n0x0c78 c0x0000 (---------------) + I toyoake - 0x00285c49, // n0x0c79 c0x0000 (---------------) + I toyohashi - 0x002472c8, // n0x0c7a c0x0000 (---------------) + I toyokawa - 0x00366b06, // n0x0c7b c0x0000 (---------------) + I toyone - 0x00264186, // n0x0c7c c0x0000 (---------------) + I toyota - 0x002980c8, // n0x0c7d c0x0000 (---------------) + I tsushima - 0x0036af06, // n0x0c7e c0x0000 (---------------) + I yatomi - 0x00204785, // n0x0c7f c0x0000 (---------------) + I akita - 0x002835c6, // n0x0c80 c0x0000 (---------------) + I daisen - 0x00278ec8, // n0x0c81 c0x0000 (---------------) + I fujisato - 0x00239506, // n0x0c82 c0x0000 (---------------) + I gojome - 0x00250e8b, // n0x0c83 c0x0000 (---------------) + I hachirogata - 0x0028d246, // n0x0c84 c0x0000 (---------------) + I happou - 0x00299acd, // n0x0c85 c0x0000 (---------------) + I higashinaruse - 0x0038d6c5, // n0x0c86 c0x0000 (---------------) + I honjo - 0x002a96c6, // n0x0c87 c0x0000 (---------------) + I honjyo - 0x0021b385, // n0x0c88 c0x0000 (---------------) + I ikawa - 0x002971c9, // n0x0c89 c0x0000 (---------------) + I kamikoani - 0x0030a307, // n0x0c8a c0x0000 (---------------) + I kamioka - 0x00376ec8, // n0x0c8b c0x0000 (---------------) + I katagami - 0x002db5c6, // n0x0c8c c0x0000 (---------------) + I kazuno - 0x00298a49, // n0x0c8d c0x0000 (---------------) + I kitaakita - 0x002e2c86, // n0x0c8e c0x0000 (---------------) + I kosaka - 0x002c0945, // n0x0c8f c0x0000 (---------------) + I kyowa - 0x0022da46, // n0x0c90 c0x0000 (---------------) + I misato - 0x002b3246, // n0x0c91 c0x0000 (---------------) + I mitane - 0x002c81c9, // n0x0c92 c0x0000 (---------------) + I moriyoshi - 0x0034f4c6, // n0x0c93 c0x0000 (---------------) + I nikaho - 0x003744c7, // n0x0c94 c0x0000 (---------------) + I noshiro - 0x002c7905, // n0x0c95 c0x0000 (---------------) + I odate - 0x002028c3, // n0x0c96 c0x0000 (---------------) + I oga - 0x002270c5, // n0x0c97 c0x0000 (---------------) + I ogata - 0x002a7407, // n0x0c98 c0x0000 (---------------) + I semboku - 0x00331906, // n0x0c99 c0x0000 (---------------) + I yokote - 0x0038d5c9, // n0x0c9a c0x0000 (---------------) + I yurihonjo - 0x0030ddc6, // n0x0c9b c0x0000 (---------------) + I aomori - 0x0024ed86, // n0x0c9c c0x0000 (---------------) + I gonohe - 0x0020ed89, // n0x0c9d c0x0000 (---------------) + I hachinohe - 0x00282fc9, // n0x0c9e c0x0000 (---------------) + I hashikami - 0x002a0b87, // n0x0c9f c0x0000 (---------------) + I hiranai - 0x00327108, // n0x0ca0 c0x0000 (---------------) + I hirosaki - 0x0025e449, // n0x0ca1 c0x0000 (---------------) + I itayanagi - 0x0027fb88, // n0x0ca2 c0x0000 (---------------) + I kuroishi - 0x0037c786, // n0x0ca3 c0x0000 (---------------) + I misawa - 0x002d1485, // n0x0ca4 c0x0000 (---------------) + I mutsu - 0x002227ca, // n0x0ca5 c0x0000 (---------------) + I nakadomari - 0x0024ee06, // n0x0ca6 c0x0000 (---------------) + I noheji - 0x00206c06, // n0x0ca7 c0x0000 (---------------) + I oirase - 0x002a41c5, // n0x0ca8 c0x0000 (---------------) + I owani - 0x002ae408, // n0x0ca9 c0x0000 (---------------) + I rokunohe - 0x00209a87, // n0x0caa c0x0000 (---------------) + I sannohe - 0x00236a0a, // n0x0cab c0x0000 (---------------) + I shichinohe - 0x0024ec86, // n0x0cac c0x0000 (---------------) + I shingo - 0x0023fe05, // n0x0cad c0x0000 (---------------) + I takko - 0x00255cc6, // n0x0cae c0x0000 (---------------) + I towada - 0x0028f587, // n0x0caf c0x0000 (---------------) + I tsugaru - 0x002e8747, // n0x0cb0 c0x0000 (---------------) + I tsuruta - 0x00374145, // n0x0cb1 c0x0000 (---------------) + I abiko - 0x002c0b05, // n0x0cb2 c0x0000 (---------------) + I asahi - 0x002e6786, // n0x0cb3 c0x0000 (---------------) + I chonan - 0x002e7846, // n0x0cb4 c0x0000 (---------------) + I chosei - 0x002fee46, // n0x0cb5 c0x0000 (---------------) + I choshi - 0x0032c104, // n0x0cb6 c0x0000 (---------------) + I chuo - 0x00282089, // n0x0cb7 c0x0000 (---------------) + I funabashi - 0x0028a946, // n0x0cb8 c0x0000 (---------------) + I futtsu - 0x00283b8a, // n0x0cb9 c0x0000 (---------------) + I hanamigawa - 0x00290e48, // n0x0cba c0x0000 (---------------) + I ichihara - 0x00263808, // n0x0cbb c0x0000 (---------------) + I ichikawa - 0x002d37ca, // n0x0cbc c0x0000 (---------------) + I ichinomiya - 0x00209705, // n0x0cbd c0x0000 (---------------) + I inzai - 0x0029db05, // n0x0cbe c0x0000 (---------------) + I isumi - 0x003083c8, // n0x0cbf c0x0000 (---------------) + I kamagaya - 0x002cc488, // n0x0cc0 c0x0000 (---------------) + I kamogawa - 0x002032c7, // n0x0cc1 c0x0000 (---------------) + I kashiwa - 0x0029ca86, // n0x0cc2 c0x0000 (---------------) + I katori - 0x00316708, // n0x0cc3 c0x0000 (---------------) + I katsuura - 0x0023e107, // n0x0cc4 c0x0000 (---------------) + I kimitsu - 0x00281a88, // n0x0cc5 c0x0000 (---------------) + I kisarazu - 0x00369606, // n0x0cc6 c0x0000 (---------------) + I kozaki - 0x00284148, // n0x0cc7 c0x0000 (---------------) + I kujukuri - 0x002b5f46, // n0x0cc8 c0x0000 (---------------) + I kyonan - 0x0023bd07, // n0x0cc9 c0x0000 (---------------) + I matsudo - 0x00299486, // n0x0cca c0x0000 (---------------) + I midori - 0x002a9086, // n0x0ccb c0x0000 (---------------) + I mihama - 0x0023484a, // n0x0ccc c0x0000 (---------------) + I minamiboso - 0x00233dc6, // n0x0ccd c0x0000 (---------------) + I mobara - 0x002d1489, // n0x0cce c0x0000 (---------------) + I mutsuzawa - 0x002afd46, // n0x0ccf c0x0000 (---------------) + I nagara - 0x002d2c8a, // n0x0cd0 c0x0000 (---------------) + I nagareyama - 0x0031c049, // n0x0cd1 c0x0000 (---------------) + I narashino - 0x0035bd46, // n0x0cd2 c0x0000 (---------------) + I narita - 0x0037d844, // n0x0cd3 c0x0000 (---------------) + I noda - 0x0030788d, // n0x0cd4 c0x0000 (---------------) + I oamishirasato - 0x00289d07, // n0x0cd5 c0x0000 (---------------) + I omigawa - 0x00318d06, // n0x0cd6 c0x0000 (---------------) + I onjuku - 0x002ba2c5, // n0x0cd7 c0x0000 (---------------) + I otaki - 0x002e2d05, // n0x0cd8 c0x0000 (---------------) + I sakae - 0x002dc646, // n0x0cd9 c0x0000 (---------------) + I sakura - 0x0028fc49, // n0x0cda c0x0000 (---------------) + I shimofusa - 0x002a6b07, // n0x0cdb c0x0000 (---------------) + I shirako - 0x0027a7c6, // n0x0cdc c0x0000 (---------------) + I shiroi - 0x002fcc86, // n0x0cdd c0x0000 (---------------) + I shisui - 0x00289309, // n0x0cde c0x0000 (---------------) + I sodegaura - 0x0021d284, // n0x0cdf c0x0000 (---------------) + I sosa - 0x00229144, // n0x0ce0 c0x0000 (---------------) + I tako - 0x00201dc8, // n0x0ce1 c0x0000 (---------------) + I tateyama - 0x002b0fc6, // n0x0ce2 c0x0000 (---------------) + I togane - 0x002a0508, // n0x0ce3 c0x0000 (---------------) + I tohnosho - 0x0022d9c8, // n0x0ce4 c0x0000 (---------------) + I tomisato - 0x0027eac7, // n0x0ce5 c0x0000 (---------------) + I urayasu - 0x00200309, // n0x0ce6 c0x0000 (---------------) + I yachimata - 0x002ff047, // n0x0ce7 c0x0000 (---------------) + I yachiyo - 0x002af90a, // n0x0ce8 c0x0000 (---------------) + I yokaichiba - 0x0022d38f, // n0x0ce9 c0x0000 (---------------) + I yokoshibahikari - 0x0026840a, // n0x0cea c0x0000 (---------------) + I yotsukaido - 0x00226ac5, // n0x0ceb c0x0000 (---------------) + I ainan - 0x00279105, // n0x0cec c0x0000 (---------------) + I honai - 0x00216505, // n0x0ced c0x0000 (---------------) + I ikata - 0x00248647, // n0x0cee c0x0000 (---------------) + I imabari - 0x00203543, // n0x0cef c0x0000 (---------------) + I iyo - 0x00327308, // n0x0cf0 c0x0000 (---------------) + I kamijima - 0x002f1746, // n0x0cf1 c0x0000 (---------------) + I kihoku - 0x002f1849, // n0x0cf2 c0x0000 (---------------) + I kumakogen - 0x003a4d86, // n0x0cf3 c0x0000 (---------------) + I masaki - 0x002c1947, // n0x0cf4 c0x0000 (---------------) + I matsuno - 0x00298809, // n0x0cf5 c0x0000 (---------------) + I matsuyama - 0x00376dc8, // n0x0cf6 c0x0000 (---------------) + I namikata - 0x002a4287, // n0x0cf7 c0x0000 (---------------) + I niihama - 0x002ffa83, // n0x0cf8 c0x0000 (---------------) + I ozu - 0x00334cc5, // n0x0cf9 c0x0000 (---------------) + I saijo - 0x00379845, // n0x0cfa c0x0000 (---------------) + I seiyo - 0x0032bf4b, // n0x0cfb c0x0000 (---------------) + I shikokuchuo - 0x002c00c4, // n0x0cfc c0x0000 (---------------) + I tobe - 0x0020cc04, // n0x0cfd c0x0000 (---------------) + I toon - 0x00277e86, // n0x0cfe c0x0000 (---------------) + I uchiko - 0x002ffe07, // n0x0cff c0x0000 (---------------) + I uwajima - 0x0038e68a, // n0x0d00 c0x0000 (---------------) + I yawatahama - 0x00349887, // n0x0d01 c0x0000 (---------------) + I echizen - 0x003687c7, // n0x0d02 c0x0000 (---------------) + I eiheiji - 0x0027de85, // n0x0d03 c0x0000 (---------------) + I fukui - 0x00202445, // n0x0d04 c0x0000 (---------------) + I ikeda - 0x0021c9c9, // n0x0d05 c0x0000 (---------------) + I katsuyama - 0x002a9086, // n0x0d06 c0x0000 (---------------) + I mihama - 0x0034970d, // n0x0d07 c0x0000 (---------------) + I minamiechizen - 0x003954c5, // n0x0d08 c0x0000 (---------------) + I obama - 0x002a0703, // n0x0d09 c0x0000 (---------------) + I ohi - 0x0020c303, // n0x0d0a c0x0000 (---------------) + I ono - 0x002f20c5, // n0x0d0b c0x0000 (---------------) + I sabae - 0x0034a305, // n0x0d0c c0x0000 (---------------) + I sakai - 0x0035e8c8, // n0x0d0d c0x0000 (---------------) + I takahama - 0x0027bec7, // n0x0d0e c0x0000 (---------------) + I tsuruga - 0x00228ec6, // n0x0d0f c0x0000 (---------------) + I wakasa - 0x0029f086, // n0x0d10 c0x0000 (---------------) + I ashiya - 0x0022e9c5, // n0x0d11 c0x0000 (---------------) + I buzen - 0x002393c7, // n0x0d12 c0x0000 (---------------) + I chikugo - 0x002043c7, // n0x0d13 c0x0000 (---------------) + I chikuho - 0x002949c7, // n0x0d14 c0x0000 (---------------) + I chikujo - 0x002cc78a, // n0x0d15 c0x0000 (---------------) + I chikushino - 0x002523c8, // n0x0d16 c0x0000 (---------------) + I chikuzen - 0x0032c104, // n0x0d17 c0x0000 (---------------) + I chuo - 0x00215507, // n0x0d18 c0x0000 (---------------) + I dazaifu - 0x0027d1c7, // n0x0d19 c0x0000 (---------------) + I fukuchi - 0x0032d806, // n0x0d1a c0x0000 (---------------) + I hakata - 0x00266d87, // n0x0d1b c0x0000 (---------------) + I higashi - 0x002d4208, // n0x0d1c c0x0000 (---------------) + I hirokawa - 0x002a2c48, // n0x0d1d c0x0000 (---------------) + I hisayama - 0x0026fd86, // n0x0d1e c0x0000 (---------------) + I iizuka - 0x0021a8c8, // n0x0d1f c0x0000 (---------------) + I inatsuki - 0x002c7d84, // n0x0d20 c0x0000 (---------------) + I kaho - 0x00311946, // n0x0d21 c0x0000 (---------------) + I kasuga - 0x0020ba06, // n0x0d22 c0x0000 (---------------) + I kasuya - 0x00203606, // n0x0d23 c0x0000 (---------------) + I kawara - 0x002f5006, // n0x0d24 c0x0000 (---------------) + I keisen - 0x0021fc44, // n0x0d25 c0x0000 (---------------) + I koga - 0x0022cd46, // n0x0d26 c0x0000 (---------------) + I kurate - 0x002b9346, // n0x0d27 c0x0000 (---------------) + I kurogi - 0x00297846, // n0x0d28 c0x0000 (---------------) + I kurume - 0x00223406, // n0x0d29 c0x0000 (---------------) + I minami - 0x0020c1c6, // n0x0d2a c0x0000 (---------------) + I miyako - 0x002d4046, // n0x0d2b c0x0000 (---------------) + I miyama - 0x00228dc8, // n0x0d2c c0x0000 (---------------) + I miyawaka - 0x00358608, // n0x0d2d c0x0000 (---------------) + I mizumaki - 0x002cd388, // n0x0d2e c0x0000 (---------------) + I munakata - 0x002b0508, // n0x0d2f c0x0000 (---------------) + I nakagawa - 0x00308346, // n0x0d30 c0x0000 (---------------) + I nakama - 0x00210385, // n0x0d31 c0x0000 (---------------) + I nishi - 0x00227086, // n0x0d32 c0x0000 (---------------) + I nogata - 0x002ace45, // n0x0d33 c0x0000 (---------------) + I ogori - 0x0037e787, // n0x0d34 c0x0000 (---------------) + I okagaki - 0x002035c5, // n0x0d35 c0x0000 (---------------) + I okawa - 0x00215e83, // n0x0d36 c0x0000 (---------------) + I oki - 0x00203ec5, // n0x0d37 c0x0000 (---------------) + I omuta - 0x0026e544, // n0x0d38 c0x0000 (---------------) + I onga - 0x0020c305, // n0x0d39 c0x0000 (---------------) + I onojo - 0x002135c3, // n0x0d3a c0x0000 (---------------) + I oto - 0x002ddcc7, // n0x0d3b c0x0000 (---------------) + I saigawa - 0x0036ee48, // n0x0d3c c0x0000 (---------------) + I sasaguri - 0x0022ff06, // n0x0d3d c0x0000 (---------------) + I shingu - 0x0029a1cd, // n0x0d3e c0x0000 (---------------) + I shinyoshitomi - 0x002790c6, // n0x0d3f c0x0000 (---------------) + I shonai - 0x002968c5, // n0x0d40 c0x0000 (---------------) + I soeda - 0x002c8583, // n0x0d41 c0x0000 (---------------) + I sue - 0x002b4e09, // n0x0d42 c0x0000 (---------------) + I tachiarai - 0x002c3a06, // n0x0d43 c0x0000 (---------------) + I tagawa - 0x00296ec6, // n0x0d44 c0x0000 (---------------) + I takata - 0x0034a844, // n0x0d45 c0x0000 (---------------) + I toho - 0x00268387, // n0x0d46 c0x0000 (---------------) + I toyotsu - 0x0023a6c6, // n0x0d47 c0x0000 (---------------) + I tsuiki - 0x002ae245, // n0x0d48 c0x0000 (---------------) + I ukiha - 0x0020a4c3, // n0x0d49 c0x0000 (---------------) + I umi - 0x0020b604, // n0x0d4a c0x0000 (---------------) + I usui - 0x0027d386, // n0x0d4b c0x0000 (---------------) + I yamada - 0x0029d304, // n0x0d4c c0x0000 (---------------) + I yame - 0x0030fe08, // n0x0d4d c0x0000 (---------------) + I yanagawa - 0x00381b49, // n0x0d4e c0x0000 (---------------) + I yukuhashi - 0x002bb289, // n0x0d4f c0x0000 (---------------) + I aizubange - 0x002a030a, // n0x0d50 c0x0000 (---------------) + I aizumisato - 0x0024438d, // n0x0d51 c0x0000 (---------------) + I aizuwakamatsu - 0x00247f07, // n0x0d52 c0x0000 (---------------) + I asakawa - 0x00206a86, // n0x0d53 c0x0000 (---------------) + I bandai - 0x0020da04, // n0x0d54 c0x0000 (---------------) + I date - 0x003547c9, // n0x0d55 c0x0000 (---------------) + I fukushima - 0x00287748, // n0x0d56 c0x0000 (---------------) + I furudono - 0x00289906, // n0x0d57 c0x0000 (---------------) + I futaba - 0x0025a4c6, // n0x0d58 c0x0000 (---------------) + I hanawa - 0x00266d87, // n0x0d59 c0x0000 (---------------) + I higashi - 0x002cfd86, // n0x0d5a c0x0000 (---------------) + I hirata - 0x0021c306, // n0x0d5b c0x0000 (---------------) + I hirono - 0x00381e46, // n0x0d5c c0x0000 (---------------) + I iitate - 0x0039518a, // n0x0d5d c0x0000 (---------------) + I inawashiro - 0x0021b2c8, // n0x0d5e c0x0000 (---------------) + I ishikawa - 0x002243c5, // n0x0d5f c0x0000 (---------------) + I iwaki - 0x0027e809, // n0x0d60 c0x0000 (---------------) + I izumizaki - 0x002c270a, // n0x0d61 c0x0000 (---------------) + I kagamiishi - 0x002c91c8, // n0x0d62 c0x0000 (---------------) + I kaneyama - 0x0029bec8, // n0x0d63 c0x0000 (---------------) + I kawamata - 0x00296e48, // n0x0d64 c0x0000 (---------------) + I kitakata - 0x002047cc, // n0x0d65 c0x0000 (---------------) + I kitashiobara - 0x0034b345, // n0x0d66 c0x0000 (---------------) + I koori - 0x0029f308, // n0x0d67 c0x0000 (---------------) + I koriyama - 0x002fb106, // n0x0d68 c0x0000 (---------------) + I kunimi - 0x002ac346, // n0x0d69 c0x0000 (---------------) + I miharu - 0x002c0d47, // n0x0d6a c0x0000 (---------------) + I mishima - 0x00349785, // n0x0d6b c0x0000 (---------------) + I namie - 0x00283745, // n0x0d6c c0x0000 (---------------) + I nango - 0x002bb149, // n0x0d6d c0x0000 (---------------) + I nishiaizu - 0x00210907, // n0x0d6e c0x0000 (---------------) + I nishigo - 0x002f1805, // n0x0d6f c0x0000 (---------------) + I okuma - 0x0021cf87, // n0x0d70 c0x0000 (---------------) + I omotego - 0x0020c303, // n0x0d71 c0x0000 (---------------) + I ono - 0x002c2bc5, // n0x0d72 c0x0000 (---------------) + I otama - 0x00320d08, // n0x0d73 c0x0000 (---------------) + I samegawa - 0x002af287, // n0x0d74 c0x0000 (---------------) + I shimogo - 0x0029bd89, // n0x0d75 c0x0000 (---------------) + I shirakawa - 0x002b2f45, // n0x0d76 c0x0000 (---------------) + I showa - 0x002f87c4, // n0x0d77 c0x0000 (---------------) + I soma - 0x002a1a88, // n0x0d78 c0x0000 (---------------) + I sukagawa - 0x0023d287, // n0x0d79 c0x0000 (---------------) + I taishin - 0x002a4448, // n0x0d7a c0x0000 (---------------) + I tamakawa - 0x00331e48, // n0x0d7b c0x0000 (---------------) + I tanagura - 0x002cf345, // n0x0d7c c0x0000 (---------------) + I tenei - 0x0034ed86, // n0x0d7d c0x0000 (---------------) + I yabuki - 0x00290986, // n0x0d7e c0x0000 (---------------) + I yamato - 0x0025b789, // n0x0d7f c0x0000 (---------------) + I yamatsuri - 0x00316f87, // n0x0d80 c0x0000 (---------------) + I yanaizu - 0x002ad706, // n0x0d81 c0x0000 (---------------) + I yugawa - 0x0033c5c7, // n0x0d82 c0x0000 (---------------) + I anpachi - 0x00217883, // n0x0d83 c0x0000 (---------------) + I ena - 0x002ae0c4, // n0x0d84 c0x0000 (---------------) + I gifu - 0x002a23c5, // n0x0d85 c0x0000 (---------------) + I ginan - 0x00215784, // n0x0d86 c0x0000 (---------------) + I godo - 0x002328c4, // n0x0d87 c0x0000 (---------------) + I gujo - 0x00281847, // n0x0d88 c0x0000 (---------------) + I hashima - 0x00217f87, // n0x0d89 c0x0000 (---------------) + I hichiso - 0x0027a984, // n0x0d8a c0x0000 (---------------) + I hida - 0x0029bbd0, // n0x0d8b c0x0000 (---------------) + I higashishirakawa - 0x002dc7c7, // n0x0d8c c0x0000 (---------------) + I ibigawa - 0x00202445, // n0x0d8d c0x0000 (---------------) + I ikeda - 0x002eb24c, // n0x0d8e c0x0000 (---------------) + I kakamigahara - 0x00279ac4, // n0x0d8f c0x0000 (---------------) + I kani - 0x00292e08, // n0x0d90 c0x0000 (---------------) + I kasahara - 0x0023bc09, // n0x0d91 c0x0000 (---------------) + I kasamatsu - 0x00300906, // n0x0d92 c0x0000 (---------------) + I kawaue - 0x0021d388, // n0x0d93 c0x0000 (---------------) + I kitagata - 0x0024e344, // n0x0d94 c0x0000 (---------------) + I mino - 0x0024e348, // n0x0d95 c0x0000 (---------------) + I minokamo - 0x00267246, // n0x0d96 c0x0000 (---------------) + I mitake - 0x00223288, // n0x0d97 c0x0000 (---------------) + I mizunami - 0x002a20c6, // n0x0d98 c0x0000 (---------------) + I motosu - 0x0030624b, // n0x0d99 c0x0000 (---------------) + I nakatsugawa - 0x002028c5, // n0x0d9a c0x0000 (---------------) + I ogaki - 0x002c7d08, // n0x0d9b c0x0000 (---------------) + I sakahogi - 0x00219284, // n0x0d9c c0x0000 (---------------) + I seki - 0x00282c4a, // n0x0d9d c0x0000 (---------------) + I sekigahara - 0x0029bd89, // n0x0d9e c0x0000 (---------------) + I shirakawa - 0x002889c6, // n0x0d9f c0x0000 (---------------) + I tajimi - 0x002c17c8, // n0x0da0 c0x0000 (---------------) + I takayama - 0x00273f05, // n0x0da1 c0x0000 (---------------) + I tarui - 0x00226184, // n0x0da2 c0x0000 (---------------) + I toki - 0x00292d06, // n0x0da3 c0x0000 (---------------) + I tomika - 0x00294888, // n0x0da4 c0x0000 (---------------) + I wanouchi - 0x002815c8, // n0x0da5 c0x0000 (---------------) + I yamagata - 0x00341d46, // n0x0da6 c0x0000 (---------------) + I yaotsu - 0x00311044, // n0x0da7 c0x0000 (---------------) + I yoro - 0x00222746, // n0x0da8 c0x0000 (---------------) + I annaka - 0x002ff0c7, // n0x0da9 c0x0000 (---------------) + I chiyoda - 0x00278ac7, // n0x0daa c0x0000 (---------------) + I fujioka - 0x00266d8f, // n0x0dab c0x0000 (---------------) + I higashiagatsuma - 0x00204e87, // n0x0dac c0x0000 (---------------) + I isesaki - 0x0035be07, // n0x0dad c0x0000 (---------------) + I itakura - 0x002ac205, // n0x0dae c0x0000 (---------------) + I kanna - 0x002d6c85, // n0x0daf c0x0000 (---------------) + I kanra - 0x002a0849, // n0x0db0 c0x0000 (---------------) + I katashina - 0x00250c86, // n0x0db1 c0x0000 (---------------) + I kawaba - 0x00280285, // n0x0db2 c0x0000 (---------------) + I kiryu - 0x002832c7, // n0x0db3 c0x0000 (---------------) + I kusatsu - 0x002c76c8, // n0x0db4 c0x0000 (---------------) + I maebashi - 0x002bbc05, // n0x0db5 c0x0000 (---------------) + I meiwa - 0x00299486, // n0x0db6 c0x0000 (---------------) + I midori - 0x00215fc8, // n0x0db7 c0x0000 (---------------) + I minakami - 0x0035220a, // n0x0db8 c0x0000 (---------------) + I naganohara - 0x003536c8, // n0x0db9 c0x0000 (---------------) + I nakanojo - 0x003a0647, // n0x0dba c0x0000 (---------------) + I nanmoku - 0x0022d7c6, // n0x0dbb c0x0000 (---------------) + I numata - 0x0027e7c6, // n0x0dbc c0x0000 (---------------) + I oizumi - 0x0021edc3, // n0x0dbd c0x0000 (---------------) + I ora - 0x00201103, // n0x0dbe c0x0000 (---------------) + I ota - 0x002c28c9, // n0x0dbf c0x0000 (---------------) + I shibukawa - 0x0025e2c9, // n0x0dc0 c0x0000 (---------------) + I shimonita - 0x0028ff06, // n0x0dc1 c0x0000 (---------------) + I shinto - 0x002b2f45, // n0x0dc2 c0x0000 (---------------) + I showa - 0x002a1e48, // n0x0dc3 c0x0000 (---------------) + I takasaki - 0x002c17c8, // n0x0dc4 c0x0000 (---------------) + I takayama - 0x0039b188, // n0x0dc5 c0x0000 (---------------) + I tamamura - 0x00381ecb, // n0x0dc6 c0x0000 (---------------) + I tatebayashi - 0x0029a407, // n0x0dc7 c0x0000 (---------------) + I tomioka - 0x002fdc49, // n0x0dc8 c0x0000 (---------------) + I tsukiyono - 0x00267008, // n0x0dc9 c0x0000 (---------------) + I tsumagoi - 0x00385884, // n0x0dca c0x0000 (---------------) + I ueno - 0x002c82c8, // n0x0dcb c0x0000 (---------------) + I yoshioka - 0x0028e249, // n0x0dcc c0x0000 (---------------) + I asaminami - 0x002ada05, // n0x0dcd c0x0000 (---------------) + I daiwa - 0x00248547, // n0x0dce c0x0000 (---------------) + I etajima - 0x002be405, // n0x0dcf c0x0000 (---------------) + I fuchu - 0x002814c8, // n0x0dd0 c0x0000 (---------------) + I fukuyama - 0x00290c8b, // n0x0dd1 c0x0000 (---------------) + I hatsukaichi - 0x002953d0, // n0x0dd2 c0x0000 (---------------) + I higashihiroshima - 0x002a94c5, // n0x0dd3 c0x0000 (---------------) + I hongo - 0x002191cc, // n0x0dd4 c0x0000 (---------------) + I jinsekikogen - 0x00229085, // n0x0dd5 c0x0000 (---------------) + I kaita - 0x0027df03, // n0x0dd6 c0x0000 (---------------) + I kui - 0x0027f446, // n0x0dd7 c0x0000 (---------------) + I kumano - 0x002b78c4, // n0x0dd8 c0x0000 (---------------) + I kure - 0x0039a0c6, // n0x0dd9 c0x0000 (---------------) + I mihara - 0x0029dbc7, // n0x0dda c0x0000 (---------------) + I miyoshi - 0x00216044, // n0x0ddb c0x0000 (---------------) + I naka - 0x002d36c8, // n0x0ddc c0x0000 (---------------) + I onomichi - 0x003271cd, // n0x0ddd c0x0000 (---------------) + I osakikamijima - 0x002fc585, // n0x0dde c0x0000 (---------------) + I otake - 0x00244884, // n0x0ddf c0x0000 (---------------) + I saka - 0x00226804, // n0x0de0 c0x0000 (---------------) + I sera - 0x0027e289, // n0x0de1 c0x0000 (---------------) + I seranishi - 0x00272b48, // n0x0de2 c0x0000 (---------------) + I shinichi - 0x0030dc47, // n0x0de3 c0x0000 (---------------) + I shobara - 0x002672c8, // n0x0de4 c0x0000 (---------------) + I takehara - 0x00282148, // n0x0de5 c0x0000 (---------------) + I abashiri - 0x0027aac5, // n0x0de6 c0x0000 (---------------) + I abira - 0x00207947, // n0x0de7 c0x0000 (---------------) + I aibetsu - 0x0027aa47, // n0x0de8 c0x0000 (---------------) + I akabira - 0x002086c7, // n0x0de9 c0x0000 (---------------) + I akkeshi - 0x002c0b09, // n0x0dea c0x0000 (---------------) + I asahikawa - 0x0023a549, // n0x0deb c0x0000 (---------------) + I ashibetsu - 0x00243e46, // n0x0dec c0x0000 (---------------) + I ashoro - 0x002b5d86, // n0x0ded c0x0000 (---------------) + I assabu - 0x00266fc6, // n0x0dee c0x0000 (---------------) + I atsuma - 0x00268ac5, // n0x0def c0x0000 (---------------) + I bibai - 0x0024e984, // n0x0df0 c0x0000 (---------------) + I biei - 0x00201a46, // n0x0df1 c0x0000 (---------------) + I bifuka - 0x00201fc6, // n0x0df2 c0x0000 (---------------) + I bihoro - 0x0027ab08, // n0x0df3 c0x0000 (---------------) + I biratori - 0x0028f24b, // n0x0df4 c0x0000 (---------------) + I chippubetsu - 0x002b0b47, // n0x0df5 c0x0000 (---------------) + I chitose - 0x0020da04, // n0x0df6 c0x0000 (---------------) + I date - 0x002276c6, // n0x0df7 c0x0000 (---------------) + I ebetsu - 0x00280f47, // n0x0df8 c0x0000 (---------------) + I embetsu - 0x002f1a05, // n0x0df9 c0x0000 (---------------) + I eniwa - 0x003792c5, // n0x0dfa c0x0000 (---------------) + I erimo - 0x00200fc4, // n0x0dfb c0x0000 (---------------) + I esan - 0x0023a4c6, // n0x0dfc c0x0000 (---------------) + I esashi - 0x00201ac8, // n0x0dfd c0x0000 (---------------) + I fukagawa - 0x003547c9, // n0x0dfe c0x0000 (---------------) + I fukushima - 0x0024b3c6, // n0x0dff c0x0000 (---------------) + I furano - 0x00286648, // n0x0e00 c0x0000 (---------------) + I furubira - 0x002ae306, // n0x0e01 c0x0000 (---------------) + I haboro - 0x0032e0c8, // n0x0e02 c0x0000 (---------------) + I hakodate - 0x002a39cc, // n0x0e03 c0x0000 (---------------) + I hamatonbetsu - 0x0027a986, // n0x0e04 c0x0000 (---------------) + I hidaka - 0x0029658d, // n0x0e05 c0x0000 (---------------) + I higashikagura - 0x00296a0b, // n0x0e06 c0x0000 (---------------) + I higashikawa - 0x00374585, // n0x0e07 c0x0000 (---------------) + I hiroo - 0x00204507, // n0x0e08 c0x0000 (---------------) + I hokuryu - 0x0034f5c6, // n0x0e09 c0x0000 (---------------) + I hokuto - 0x002e8608, // n0x0e0a c0x0000 (---------------) + I honbetsu - 0x00243ec9, // n0x0e0b c0x0000 (---------------) + I horokanai - 0x002bac88, // n0x0e0c c0x0000 (---------------) + I horonobe - 0x00202445, // n0x0e0d c0x0000 (---------------) + I ikeda - 0x0035c607, // n0x0e0e c0x0000 (---------------) + I imakane - 0x0027fc88, // n0x0e0f c0x0000 (---------------) + I ishikari - 0x0026a609, // n0x0e10 c0x0000 (---------------) + I iwamizawa - 0x00239a46, // n0x0e11 c0x0000 (---------------) + I iwanai - 0x0035eeca, // n0x0e12 c0x0000 (---------------) + I kamifurano - 0x002e8388, // n0x0e13 c0x0000 (---------------) + I kamikawa - 0x002baacb, // n0x0e14 c0x0000 (---------------) + I kamishihoro - 0x0028744c, // n0x0e15 c0x0000 (---------------) + I kamisunagawa - 0x0024e448, // n0x0e16 c0x0000 (---------------) + I kamoenai - 0x0027cac6, // n0x0e17 c0x0000 (---------------) + I kayabe - 0x00207348, // n0x0e18 c0x0000 (---------------) + I kembuchi - 0x00204fc7, // n0x0e19 c0x0000 (---------------) + I kikonai - 0x0023a7c9, // n0x0e1a c0x0000 (---------------) + I kimobetsu - 0x00209d4d, // n0x0e1b c0x0000 (---------------) + I kitahiroshima - 0x0029ed06, // n0x0e1c c0x0000 (---------------) + I kitami - 0x0028ef48, // n0x0e1d c0x0000 (---------------) + I kiyosato - 0x003584c9, // n0x0e1e c0x0000 (---------------) + I koshimizu - 0x002b6748, // n0x0e1f c0x0000 (---------------) + I kunneppu - 0x00284248, // n0x0e20 c0x0000 (---------------) + I kuriyama - 0x002b9c0c, // n0x0e21 c0x0000 (---------------) + I kuromatsunai - 0x002bb4c7, // n0x0e22 c0x0000 (---------------) + I kushiro - 0x002bc847, // n0x0e23 c0x0000 (---------------) + I kutchan - 0x002c0945, // n0x0e24 c0x0000 (---------------) + I kyowa - 0x00240c07, // n0x0e25 c0x0000 (---------------) + I mashike - 0x002c7588, // n0x0e26 c0x0000 (---------------) + I matsumae - 0x00292d86, // n0x0e27 c0x0000 (---------------) + I mikasa - 0x0024b24c, // n0x0e28 c0x0000 (---------------) + I minamifurano - 0x002e4488, // n0x0e29 c0x0000 (---------------) + I mombetsu - 0x002c95c8, // n0x0e2a c0x0000 (---------------) + I moseushi - 0x002daf06, // n0x0e2b c0x0000 (---------------) + I mukawa - 0x00395787, // n0x0e2c c0x0000 (---------------) + I muroran - 0x00244044, // n0x0e2d c0x0000 (---------------) + I naie - 0x002b0508, // n0x0e2e c0x0000 (---------------) + I nakagawa - 0x002853cc, // n0x0e2f c0x0000 (---------------) + I nakasatsunai - 0x002178cc, // n0x0e30 c0x0000 (---------------) + I nakatombetsu - 0x00226b45, // n0x0e31 c0x0000 (---------------) + I nanae - 0x00389f87, // n0x0e32 c0x0000 (---------------) + I nanporo - 0x00310fc6, // n0x0e33 c0x0000 (---------------) + I nayoro - 0x00395706, // n0x0e34 c0x0000 (---------------) + I nemuro - 0x00297388, // n0x0e35 c0x0000 (---------------) + I niikappu - 0x003a38c4, // n0x0e36 c0x0000 (---------------) + I niki - 0x002210cb, // n0x0e37 c0x0000 (---------------) + I nishiokoppe - 0x0026c9cb, // n0x0e38 c0x0000 (---------------) + I noboribetsu - 0x0022d7c6, // n0x0e39 c0x0000 (---------------) + I numata - 0x00327047, // n0x0e3a c0x0000 (---------------) + I obihiro - 0x002f4ac5, // n0x0e3b c0x0000 (---------------) + I obira - 0x00270f85, // n0x0e3c c0x0000 (---------------) + I oketo - 0x00221206, // n0x0e3d c0x0000 (---------------) + I okoppe - 0x00273ec5, // n0x0e3e c0x0000 (---------------) + I otaru - 0x002c0085, // n0x0e3f c0x0000 (---------------) + I otobe - 0x002c1007, // n0x0e40 c0x0000 (---------------) + I otofuke - 0x00278489, // n0x0e41 c0x0000 (---------------) + I otoineppu - 0x002e7a04, // n0x0e42 c0x0000 (---------------) + I oumu - 0x00276f45, // n0x0e43 c0x0000 (---------------) + I ozora - 0x002d75c5, // n0x0e44 c0x0000 (---------------) + I pippu - 0x0028c188, // n0x0e45 c0x0000 (---------------) + I rankoshi - 0x002d3505, // n0x0e46 c0x0000 (---------------) + I rebun - 0x002bfcc9, // n0x0e47 c0x0000 (---------------) + I rikubetsu - 0x0029b847, // n0x0e48 c0x0000 (---------------) + I rishiri - 0x0029b84b, // n0x0e49 c0x0000 (---------------) + I rishirifuji - 0x0022cac6, // n0x0e4a c0x0000 (---------------) + I saroma - 0x00228b09, // n0x0e4b c0x0000 (---------------) + I sarufutsu - 0x00355148, // n0x0e4c c0x0000 (---------------) + I shakotan - 0x00251f05, // n0x0e4d c0x0000 (---------------) + I shari - 0x002087c8, // n0x0e4e c0x0000 (---------------) + I shibecha - 0x0023a588, // n0x0e4f c0x0000 (---------------) + I shibetsu - 0x00218687, // n0x0e50 c0x0000 (---------------) + I shikabe - 0x0027e687, // n0x0e51 c0x0000 (---------------) + I shikaoi - 0x002818c9, // n0x0e52 c0x0000 (---------------) + I shimamaki - 0x002231c7, // n0x0e53 c0x0000 (---------------) + I shimizu - 0x002592c9, // n0x0e54 c0x0000 (---------------) + I shimokawa - 0x00286c0c, // n0x0e55 c0x0000 (---------------) + I shinshinotsu - 0x0028ff08, // n0x0e56 c0x0000 (---------------) + I shintoku - 0x002a8b09, // n0x0e57 c0x0000 (---------------) + I shiranuka - 0x002aac87, // n0x0e58 c0x0000 (---------------) + I shiraoi - 0x00282209, // n0x0e59 c0x0000 (---------------) + I shiriuchi - 0x002180c7, // n0x0e5a c0x0000 (---------------) + I sobetsu - 0x00287548, // n0x0e5b c0x0000 (---------------) + I sunagawa - 0x0028c4c5, // n0x0e5c c0x0000 (---------------) + I taiki - 0x003118c6, // n0x0e5d c0x0000 (---------------) + I takasu - 0x002ba308, // n0x0e5e c0x0000 (---------------) + I takikawa - 0x002f7488, // n0x0e5f c0x0000 (---------------) + I takinoue - 0x002c25c9, // n0x0e60 c0x0000 (---------------) + I teshikaga - 0x002c00c7, // n0x0e61 c0x0000 (---------------) + I tobetsu - 0x00271905, // n0x0e62 c0x0000 (---------------) + I tohma - 0x0020ae89, // n0x0e63 c0x0000 (---------------) + I tomakomai - 0x00219c46, // n0x0e64 c0x0000 (---------------) + I tomari - 0x002902c4, // n0x0e65 c0x0000 (---------------) + I toya - 0x0034a546, // n0x0e66 c0x0000 (---------------) + I toyako - 0x002660c8, // n0x0e67 c0x0000 (---------------) + I toyotomi - 0x00269547, // n0x0e68 c0x0000 (---------------) + I toyoura - 0x0028f448, // n0x0e69 c0x0000 (---------------) + I tsubetsu - 0x0021a989, // n0x0e6a c0x0000 (---------------) + I tsukigata - 0x002dac87, // n0x0e6b c0x0000 (---------------) + I urakawa - 0x0029eb86, // n0x0e6c c0x0000 (---------------) + I urausu - 0x002045c4, // n0x0e6d c0x0000 (---------------) + I uryu - 0x00203f49, // n0x0e6e c0x0000 (---------------) + I utashinai - 0x002077c8, // n0x0e6f c0x0000 (---------------) + I wakkanai - 0x002dadc7, // n0x0e70 c0x0000 (---------------) + I wassamu - 0x00324a06, // n0x0e71 c0x0000 (---------------) + I yakumo - 0x00379906, // n0x0e72 c0x0000 (---------------) + I yoichi - 0x00206b84, // n0x0e73 c0x0000 (---------------) + I aioi - 0x002a98c6, // n0x0e74 c0x0000 (---------------) + I akashi - 0x0020af43, // n0x0e75 c0x0000 (---------------) + I ako - 0x003277c9, // n0x0e76 c0x0000 (---------------) + I amagasaki - 0x00202886, // n0x0e77 c0x0000 (---------------) + I aogaki - 0x0029c105, // n0x0e78 c0x0000 (---------------) + I asago - 0x0029f086, // n0x0e79 c0x0000 (---------------) + I ashiya - 0x002a4585, // n0x0e7a c0x0000 (---------------) + I awaji - 0x00280108, // n0x0e7b c0x0000 (---------------) + I fukusaki - 0x0024e187, // n0x0e7c c0x0000 (---------------) + I goshiki - 0x00206406, // n0x0e7d c0x0000 (---------------) + I harima - 0x00354e06, // n0x0e7e c0x0000 (---------------) + I himeji - 0x00263808, // n0x0e7f c0x0000 (---------------) + I ichikawa - 0x002a09c7, // n0x0e80 c0x0000 (---------------) + I inagawa - 0x0029ed45, // n0x0e81 c0x0000 (---------------) + I itami - 0x0029f588, // n0x0e82 c0x0000 (---------------) + I kakogawa - 0x00381248, // n0x0e83 c0x0000 (---------------) + I kamigori - 0x002e8388, // n0x0e84 c0x0000 (---------------) + I kamikawa - 0x00228f45, // n0x0e85 c0x0000 (---------------) + I kasai - 0x00311946, // n0x0e86 c0x0000 (---------------) + I kasuga - 0x002bb049, // n0x0e87 c0x0000 (---------------) + I kawanishi - 0x00290804, // n0x0e88 c0x0000 (---------------) + I miki - 0x0036b00b, // n0x0e89 c0x0000 (---------------) + I minamiawaji - 0x0021c00b, // n0x0e8a c0x0000 (---------------) + I nishinomiya - 0x002242c9, // n0x0e8b c0x0000 (---------------) + I nishiwaki - 0x0020c303, // n0x0e8c c0x0000 (---------------) + I ono - 0x00259945, // n0x0e8d c0x0000 (---------------) + I sanda - 0x00202b46, // n0x0e8e c0x0000 (---------------) + I sannan - 0x0022f208, // n0x0e8f c0x0000 (---------------) + I sasayama - 0x0022d304, // n0x0e90 c0x0000 (---------------) + I sayo - 0x0022ff06, // n0x0e91 c0x0000 (---------------) + I shingu - 0x002cc8c9, // n0x0e92 c0x0000 (---------------) + I shinonsen - 0x002bd445, // n0x0e93 c0x0000 (---------------) + I shiso - 0x002c0f46, // n0x0e94 c0x0000 (---------------) + I sumoto - 0x0023d286, // n0x0e95 c0x0000 (---------------) + I taishi - 0x002165c4, // n0x0e96 c0x0000 (---------------) + I taka - 0x00296fca, // n0x0e97 c0x0000 (---------------) + I takarazuka - 0x0029c048, // n0x0e98 c0x0000 (---------------) + I takasago - 0x002f7486, // n0x0e99 c0x0000 (---------------) + I takino - 0x00302505, // n0x0e9a c0x0000 (---------------) + I tamba - 0x0020e687, // n0x0e9b c0x0000 (---------------) + I tatsuno - 0x0025b2c7, // n0x0e9c c0x0000 (---------------) + I toyooka - 0x0034ed84, // n0x0e9d c0x0000 (---------------) + I yabu - 0x0021c247, // n0x0e9e c0x0000 (---------------) + I yashiro - 0x00203584, // n0x0e9f c0x0000 (---------------) + I yoka - 0x00203586, // n0x0ea0 c0x0000 (---------------) + I yokawa - 0x00207d83, // n0x0ea1 c0x0000 (---------------) + I ami - 0x002c0b05, // n0x0ea2 c0x0000 (---------------) + I asahi - 0x00342c85, // n0x0ea3 c0x0000 (---------------) + I bando - 0x0023d608, // n0x0ea4 c0x0000 (---------------) + I chikusei - 0x002156c5, // n0x0ea5 c0x0000 (---------------) + I daigo - 0x0027a6c9, // n0x0ea6 c0x0000 (---------------) + I fujishiro - 0x002a3e07, // n0x0ea7 c0x0000 (---------------) + I hitachi - 0x002b034b, // n0x0ea8 c0x0000 (---------------) + I hitachinaka - 0x002a3e0c, // n0x0ea9 c0x0000 (---------------) + I hitachiomiya - 0x002a4a8a, // n0x0eaa c0x0000 (---------------) + I hitachiota - 0x002c3847, // n0x0eab c0x0000 (---------------) + I ibaraki - 0x002012c3, // n0x0eac c0x0000 (---------------) + I ina - 0x00345f08, // n0x0ead c0x0000 (---------------) + I inashiki - 0x00229105, // n0x0eae c0x0000 (---------------) + I itako - 0x002bbc85, // n0x0eaf c0x0000 (---------------) + I iwama - 0x00334d84, // n0x0eb0 c0x0000 (---------------) + I joso - 0x00287446, // n0x0eb1 c0x0000 (---------------) + I kamisu - 0x0023bc06, // n0x0eb2 c0x0000 (---------------) + I kasama - 0x002a9907, // n0x0eb3 c0x0000 (---------------) + I kashima - 0x0020a40b, // n0x0eb4 c0x0000 (---------------) + I kasumigaura - 0x0021fc44, // n0x0eb5 c0x0000 (---------------) + I koga - 0x00377044, // n0x0eb6 c0x0000 (---------------) + I miho - 0x0026ff04, // n0x0eb7 c0x0000 (---------------) + I mito - 0x002c7a46, // n0x0eb8 c0x0000 (---------------) + I moriya - 0x00216044, // n0x0eb9 c0x0000 (---------------) + I naka - 0x002c1648, // n0x0eba c0x0000 (---------------) + I namegata - 0x00334b85, // n0x0ebb c0x0000 (---------------) + I oarai - 0x00230545, // n0x0ebc c0x0000 (---------------) + I ogawa - 0x0039b0c7, // n0x0ebd c0x0000 (---------------) + I omitama - 0x00204609, // n0x0ebe c0x0000 (---------------) + I ryugasaki - 0x0034a305, // n0x0ebf c0x0000 (---------------) + I sakai - 0x00370fca, // n0x0ec0 c0x0000 (---------------) + I sakuragawa - 0x002c7809, // n0x0ec1 c0x0000 (---------------) + I shimodate - 0x0026d78a, // n0x0ec2 c0x0000 (---------------) + I shimotsuma - 0x003952c9, // n0x0ec3 c0x0000 (---------------) + I shirosato - 0x0032d484, // n0x0ec4 c0x0000 (---------------) + I sowa - 0x002fcd45, // n0x0ec5 c0x0000 (---------------) + I suifu - 0x002cfe88, // n0x0ec6 c0x0000 (---------------) + I takahagi - 0x0037394b, // n0x0ec7 c0x0000 (---------------) + I tamatsukuri - 0x002f91c5, // n0x0ec8 c0x0000 (---------------) + I tokai - 0x00285f06, // n0x0ec9 c0x0000 (---------------) + I tomobe - 0x0021e584, // n0x0eca c0x0000 (---------------) + I tone - 0x0027ac06, // n0x0ecb c0x0000 (---------------) + I toride - 0x002dab09, // n0x0ecc c0x0000 (---------------) + I tsuchiura - 0x00227787, // n0x0ecd c0x0000 (---------------) + I tsukuba - 0x0030df88, // n0x0ece c0x0000 (---------------) + I uchihara - 0x00244706, // n0x0ecf c0x0000 (---------------) + I ushiku - 0x002ff047, // n0x0ed0 c0x0000 (---------------) + I yachiyo - 0x002815c8, // n0x0ed1 c0x0000 (---------------) + I yamagata - 0x00384c06, // n0x0ed2 c0x0000 (---------------) + I yawara - 0x00253884, // n0x0ed3 c0x0000 (---------------) + I yuki - 0x0035d647, // n0x0ed4 c0x0000 (---------------) + I anamizu - 0x00343a45, // n0x0ed5 c0x0000 (---------------) + I hakui - 0x00348747, // n0x0ed6 c0x0000 (---------------) + I hakusan - 0x00201b44, // n0x0ed7 c0x0000 (---------------) + I kaga - 0x0034f546, // n0x0ed8 c0x0000 (---------------) + I kahoku - 0x0021b548, // n0x0ed9 c0x0000 (---------------) + I kanazawa - 0x00296bc8, // n0x0eda c0x0000 (---------------) + I kawakita - 0x002ab807, // n0x0edb c0x0000 (---------------) + I komatsu - 0x003288c8, // n0x0edc c0x0000 (---------------) + I nakanoto - 0x002b6005, // n0x0edd c0x0000 (---------------) + I nanao - 0x0020c144, // n0x0ede c0x0000 (---------------) + I nomi - 0x00263708, // n0x0edf c0x0000 (---------------) + I nonoichi - 0x00257ec4, // n0x0ee0 c0x0000 (---------------) + I noto - 0x00216485, // n0x0ee1 c0x0000 (---------------) + I shika - 0x002ea3c4, // n0x0ee2 c0x0000 (---------------) + I suzu - 0x0023e207, // n0x0ee3 c0x0000 (---------------) + I tsubata - 0x0028aa07, // n0x0ee4 c0x0000 (---------------) + I tsurugi - 0x00282348, // n0x0ee5 c0x0000 (---------------) + I uchinada - 0x002a45c6, // n0x0ee6 c0x0000 (---------------) + I wajima - 0x00215645, // n0x0ee7 c0x0000 (---------------) + I fudai - 0x0027a4c8, // n0x0ee8 c0x0000 (---------------) + I fujisawa - 0x003538c8, // n0x0ee9 c0x0000 (---------------) + I hanamaki - 0x002a0249, // n0x0eea c0x0000 (---------------) + I hiraizumi - 0x0021c306, // n0x0eeb c0x0000 (---------------) + I hirono - 0x00236a88, // n0x0eec c0x0000 (---------------) + I ichinohe - 0x00282aca, // n0x0eed c0x0000 (---------------) + I ichinoseki - 0x002f1a88, // n0x0eee c0x0000 (---------------) + I iwaizumi - 0x002d7d45, // n0x0eef c0x0000 (---------------) + I iwate - 0x00227f86, // n0x0ef0 c0x0000 (---------------) + I joboji - 0x0028fb08, // n0x0ef1 c0x0000 (---------------) + I kamaishi - 0x0035c6ca, // n0x0ef2 c0x0000 (---------------) + I kanegasaki - 0x00269ec7, // n0x0ef3 c0x0000 (---------------) + I karumai - 0x00287b45, // n0x0ef4 c0x0000 (---------------) + I kawai - 0x00295bc8, // n0x0ef5 c0x0000 (---------------) + I kitakami - 0x003a2804, // n0x0ef6 c0x0000 (---------------) + I kuji - 0x002ae486, // n0x0ef7 c0x0000 (---------------) + I kunohe - 0x002bcfc8, // n0x0ef8 c0x0000 (---------------) + I kuzumaki - 0x0020c1c6, // n0x0ef9 c0x0000 (---------------) + I miyako - 0x002f3888, // n0x0efa c0x0000 (---------------) + I mizusawa - 0x0021a207, // n0x0efb c0x0000 (---------------) + I morioka - 0x00209286, // n0x0efc c0x0000 (---------------) + I ninohe - 0x0037d844, // n0x0efd c0x0000 (---------------) + I noda - 0x002d97c7, // n0x0efe c0x0000 (---------------) + I ofunato - 0x002fac04, // n0x0eff c0x0000 (---------------) + I oshu - 0x002daac7, // n0x0f00 c0x0000 (---------------) + I otsuchi - 0x0037860d, // n0x0f01 c0x0000 (---------------) + I rikuzentakata - 0x00203345, // n0x0f02 c0x0000 (---------------) + I shiwa - 0x002af08b, // n0x0f03 c0x0000 (---------------) + I shizukuishi - 0x002a21c6, // n0x0f04 c0x0000 (---------------) + I sumita - 0x00252c48, // n0x0f05 c0x0000 (---------------) + I tanohata - 0x00387b44, // n0x0f06 c0x0000 (---------------) + I tono - 0x00276d06, // n0x0f07 c0x0000 (---------------) + I yahaba - 0x0027d386, // n0x0f08 c0x0000 (---------------) + I yamada - 0x00207687, // n0x0f09 c0x0000 (---------------) + I ayagawa - 0x0029624d, // n0x0f0a c0x0000 (---------------) + I higashikagawa - 0x00312347, // n0x0f0b c0x0000 (---------------) + I kanonji - 0x00300fc8, // n0x0f0c c0x0000 (---------------) + I kotohira - 0x0035ea45, // n0x0f0d c0x0000 (---------------) + I manno - 0x00298248, // n0x0f0e c0x0000 (---------------) + I marugame - 0x002c1f06, // n0x0f0f c0x0000 (---------------) + I mitoyo - 0x002b6088, // n0x0f10 c0x0000 (---------------) + I naoshima - 0x00210f86, // n0x0f11 c0x0000 (---------------) + I sanuki - 0x003541c7, // n0x0f12 c0x0000 (---------------) + I tadotsu - 0x0021d509, // n0x0f13 c0x0000 (---------------) + I takamatsu - 0x00387b47, // n0x0f14 c0x0000 (---------------) + I tonosho - 0x00289bc8, // n0x0f15 c0x0000 (---------------) + I uchinomi - 0x00273545, // n0x0f16 c0x0000 (---------------) + I utazu - 0x0021e8c8, // n0x0f17 c0x0000 (---------------) + I zentsuji - 0x00326185, // n0x0f18 c0x0000 (---------------) + I akune - 0x00240f45, // n0x0f19 c0x0000 (---------------) + I amami - 0x002e5945, // n0x0f1a c0x0000 (---------------) + I hioki - 0x00226dc3, // n0x0f1b c0x0000 (---------------) + I isa - 0x00283644, // n0x0f1c c0x0000 (---------------) + I isen - 0x0027e805, // n0x0f1d c0x0000 (---------------) + I izumi - 0x00275f89, // n0x0f1e c0x0000 (---------------) + I kagoshima - 0x002b1546, // n0x0f1f c0x0000 (---------------) + I kanoya - 0x002d4308, // n0x0f20 c0x0000 (---------------) + I kawanabe - 0x0035c8c5, // n0x0f21 c0x0000 (---------------) + I kinko - 0x0032bd87, // n0x0f22 c0x0000 (---------------) + I kouyama - 0x003544ca, // n0x0f23 c0x0000 (---------------) + I makurazaki - 0x002c0e89, // n0x0f24 c0x0000 (---------------) + I matsumoto - 0x002b314a, // n0x0f25 c0x0000 (---------------) + I minamitane - 0x002cd408, // n0x0f26 c0x0000 (---------------) + I nakatane - 0x0021cdcc, // n0x0f27 c0x0000 (---------------) + I nishinoomote - 0x0028334d, // n0x0f28 c0x0000 (---------------) + I satsumasendai - 0x002edf43, // n0x0f29 c0x0000 (---------------) + I soo - 0x002f3788, // n0x0f2a c0x0000 (---------------) + I tarumizu - 0x0020b5c5, // n0x0f2b c0x0000 (---------------) + I yusui - 0x00351fc6, // n0x0f2c c0x0000 (---------------) + I aikawa - 0x00376c46, // n0x0f2d c0x0000 (---------------) + I atsugi - 0x0024cf45, // n0x0f2e c0x0000 (---------------) + I ayase - 0x0033c6c9, // n0x0f2f c0x0000 (---------------) + I chigasaki - 0x00324c05, // n0x0f30 c0x0000 (---------------) + I ebina - 0x0027a4c8, // n0x0f31 c0x0000 (---------------) + I fujisawa - 0x00257dc6, // n0x0f32 c0x0000 (---------------) + I hadano - 0x00338fc6, // n0x0f33 c0x0000 (---------------) + I hakone - 0x002a1949, // n0x0f34 c0x0000 (---------------) + I hiratsuka - 0x00384407, // n0x0f35 c0x0000 (---------------) + I isehara - 0x002f2a86, // n0x0f36 c0x0000 (---------------) + I kaisei - 0x00354448, // n0x0f37 c0x0000 (---------------) + I kamakura - 0x00203508, // n0x0f38 c0x0000 (---------------) + I kiyokawa - 0x002d1c87, // n0x0f39 c0x0000 (---------------) + I matsuda - 0x0022340e, // n0x0f3a c0x0000 (---------------) + I minamiashigara - 0x002c2145, // n0x0f3b c0x0000 (---------------) + I miura - 0x0026a505, // n0x0f3c c0x0000 (---------------) + I nakai - 0x0020c0c8, // n0x0f3d c0x0000 (---------------) + I ninomiya - 0x0036c2c7, // n0x0f3e c0x0000 (---------------) + I odawara - 0x00206c02, // n0x0f3f c0x0000 (---------------) + I oi - 0x002b9784, // n0x0f40 c0x0000 (---------------) + I oiso - 0x00399fca, // n0x0f41 c0x0000 (---------------) + I sagamihara - 0x002dae88, // n0x0f42 c0x0000 (---------------) + I samukawa - 0x00281046, // n0x0f43 c0x0000 (---------------) + I tsukui - 0x00298948, // n0x0f44 c0x0000 (---------------) + I yamakita - 0x00290986, // n0x0f45 c0x0000 (---------------) + I yamato - 0x00325b48, // n0x0f46 c0x0000 (---------------) + I yokosuka - 0x002ad708, // n0x0f47 c0x0000 (---------------) + I yugawara - 0x00240f04, // n0x0f48 c0x0000 (---------------) + I zama - 0x0032f185, // n0x0f49 c0x0000 (---------------) + I zushi - 0x00685804, // n0x0f4a c0x0001 (---------------) ! I city - 0x00685804, // n0x0f4b c0x0001 (---------------) ! I city - 0x00685804, // n0x0f4c c0x0001 (---------------) ! I city - 0x00202943, // n0x0f4d c0x0000 (---------------) + I aki - 0x00379786, // n0x0f4e c0x0000 (---------------) + I geisei - 0x0027a986, // n0x0f4f c0x0000 (---------------) + I hidaka - 0x0029e3cc, // n0x0f50 c0x0000 (---------------) + I higashitsuno - 0x002092c3, // n0x0f51 c0x0000 (---------------) + I ino - 0x002bdd86, // n0x0f52 c0x0000 (---------------) + I kagami - 0x002160c4, // n0x0f53 c0x0000 (---------------) + I kami - 0x002c3988, // n0x0f54 c0x0000 (---------------) + I kitagawa - 0x002cc705, // n0x0f55 c0x0000 (---------------) + I kochi - 0x0039a0c6, // n0x0f56 c0x0000 (---------------) + I mihara - 0x002b5548, // n0x0f57 c0x0000 (---------------) + I motoyama - 0x002ce386, // n0x0f58 c0x0000 (---------------) + I muroto - 0x00206386, // n0x0f59 c0x0000 (---------------) + I nahari - 0x0035e6c8, // n0x0f5a c0x0000 (---------------) + I nakamura - 0x002a2447, // n0x0f5b c0x0000 (---------------) + I nankoku - 0x00222f89, // n0x0f5c c0x0000 (---------------) + I nishitosa - 0x0023040a, // n0x0f5d c0x0000 (---------------) + I niyodogawa - 0x00247c84, // n0x0f5e c0x0000 (---------------) + I ochi - 0x002035c5, // n0x0f5f c0x0000 (---------------) + I okawa - 0x0025c245, // n0x0f60 c0x0000 (---------------) + I otoyo - 0x0021d106, // n0x0f61 c0x0000 (---------------) + I otsuki - 0x00247f46, // n0x0f62 c0x0000 (---------------) + I sakawa - 0x002a7846, // n0x0f63 c0x0000 (---------------) + I sukumo - 0x002e9706, // n0x0f64 c0x0000 (---------------) + I susaki - 0x002230c4, // n0x0f65 c0x0000 (---------------) + I tosa - 0x002230cb, // n0x0f66 c0x0000 (---------------) + I tosashimizu - 0x002472c4, // n0x0f67 c0x0000 (---------------) + I toyo - 0x0020e705, // n0x0f68 c0x0000 (---------------) + I tsuno - 0x002ac705, // n0x0f69 c0x0000 (---------------) + I umaji - 0x0027eb86, // n0x0f6a c0x0000 (---------------) + I yasuda - 0x00202208, // n0x0f6b c0x0000 (---------------) + I yusuhara - 0x00283207, // n0x0f6c c0x0000 (---------------) + I amakusa - 0x0030dd44, // n0x0f6d c0x0000 (---------------) + I arao - 0x00262cc3, // n0x0f6e c0x0000 (---------------) + I aso - 0x003a2305, // n0x0f6f c0x0000 (---------------) + I choyo - 0x0024a4c7, // n0x0f70 c0x0000 (---------------) + I gyokuto - 0x002a5809, // n0x0f71 c0x0000 (---------------) + I hitoyoshi - 0x0028310b, // n0x0f72 c0x0000 (---------------) + I kamiamakusa - 0x002a9907, // n0x0f73 c0x0000 (---------------) + I kashima - 0x0023d507, // n0x0f74 c0x0000 (---------------) + I kikuchi - 0x002ddc44, // n0x0f75 c0x0000 (---------------) + I kosa - 0x002b5448, // n0x0f76 c0x0000 (---------------) + I kumamoto - 0x002aba87, // n0x0f77 c0x0000 (---------------) + I mashiki - 0x002a5a46, // n0x0f78 c0x0000 (---------------) + I mifune - 0x00253608, // n0x0f79 c0x0000 (---------------) + I minamata - 0x002a7a4b, // n0x0f7a c0x0000 (---------------) + I minamioguni - 0x00360ec6, // n0x0f7b c0x0000 (---------------) + I nagasu - 0x00210cc9, // n0x0f7c c0x0000 (---------------) + I nishihara - 0x002a7bc5, // n0x0f7d c0x0000 (---------------) + I oguni - 0x002ffa83, // n0x0f7e c0x0000 (---------------) + I ozu - 0x002c0f46, // n0x0f7f c0x0000 (---------------) + I sumoto - 0x0021a108, // n0x0f80 c0x0000 (---------------) + I takamori - 0x00211043, // n0x0f81 c0x0000 (---------------) + I uki - 0x00229f83, // n0x0f82 c0x0000 (---------------) + I uto - 0x00227446, // n0x0f83 c0x0000 (---------------) + I yamaga - 0x00290986, // n0x0f84 c0x0000 (---------------) + I yamato - 0x00380c0a, // n0x0f85 c0x0000 (---------------) + I yatsushiro - 0x0027cb05, // n0x0f86 c0x0000 (---------------) + I ayabe - 0x0027d1cb, // n0x0f87 c0x0000 (---------------) + I fukuchiyama - 0x0029efcb, // n0x0f88 c0x0000 (---------------) + I higashiyama - 0x0022ab43, // n0x0f89 c0x0000 (---------------) + I ide - 0x00220b03, // n0x0f8a c0x0000 (---------------) + I ine - 0x002af884, // n0x0f8b c0x0000 (---------------) + I joyo - 0x0021a507, // n0x0f8c c0x0000 (---------------) + I kameoka - 0x0021a184, // n0x0f8d c0x0000 (---------------) + I kamo - 0x002047c4, // n0x0f8e c0x0000 (---------------) + I kita - 0x002faf84, // n0x0f8f c0x0000 (---------------) + I kizu - 0x002f1d88, // n0x0f90 c0x0000 (---------------) + I kumiyama - 0x00302448, // n0x0f91 c0x0000 (---------------) + I kyotamba - 0x00308709, // n0x0f92 c0x0000 (---------------) + I kyotanabe - 0x00342288, // n0x0f93 c0x0000 (---------------) + I kyotango - 0x002d2e87, // n0x0f94 c0x0000 (---------------) + I maizuru - 0x00223406, // n0x0f95 c0x0000 (---------------) + I minami - 0x002d3f4f, // n0x0f96 c0x0000 (---------------) + I minamiyamashiro - 0x002c2286, // n0x0f97 c0x0000 (---------------) + I miyazu - 0x002cc684, // n0x0f98 c0x0000 (---------------) + I muko - 0x0030228a, // n0x0f99 c0x0000 (---------------) + I nagaokakyo - 0x0024a3c7, // n0x0f9a c0x0000 (---------------) + I nakagyo - 0x00203b86, // n0x0f9b c0x0000 (---------------) + I nantan - 0x00290309, // n0x0f9c c0x0000 (---------------) + I oyamazaki - 0x00308685, // n0x0f9d c0x0000 (---------------) + I sakyo - 0x0023d745, // n0x0f9e c0x0000 (---------------) + I seika - 0x003087c6, // n0x0f9f c0x0000 (---------------) + I tanabe - 0x0021ea03, // n0x0fa0 c0x0000 (---------------) + I uji - 0x003a2849, // n0x0fa1 c0x0000 (---------------) + I ujitawara - 0x0021b446, // n0x0fa2 c0x0000 (---------------) + I wazuka - 0x0021a749, // n0x0fa3 c0x0000 (---------------) + I yamashina - 0x0038e686, // n0x0fa4 c0x0000 (---------------) + I yawata - 0x002c0b05, // n0x0fa5 c0x0000 (---------------) + I asahi - 0x00226f45, // n0x0fa6 c0x0000 (---------------) + I inabe - 0x00204e83, // n0x0fa7 c0x0000 (---------------) + I ise - 0x0021a648, // n0x0fa8 c0x0000 (---------------) + I kameyama - 0x00398b87, // n0x0fa9 c0x0000 (---------------) + I kawagoe - 0x002f1744, // n0x0faa c0x0000 (---------------) + I kiho - 0x0021d208, // n0x0fab c0x0000 (---------------) + I kisosaki - 0x002a8e04, // n0x0fac c0x0000 (---------------) + I kiwa - 0x002b4486, // n0x0fad c0x0000 (---------------) + I komono - 0x0027f446, // n0x0fae c0x0000 (---------------) + I kumano - 0x00243186, // n0x0faf c0x0000 (---------------) + I kuwana - 0x002c7bc9, // n0x0fb0 c0x0000 (---------------) + I matsusaka - 0x002bbc05, // n0x0fb1 c0x0000 (---------------) + I meiwa - 0x002a9086, // n0x0fb2 c0x0000 (---------------) + I mihama - 0x0025afc9, // n0x0fb3 c0x0000 (---------------) + I minamiise - 0x002c13c6, // n0x0fb4 c0x0000 (---------------) + I misugi - 0x002d4046, // n0x0fb5 c0x0000 (---------------) + I miyama - 0x0037dbc6, // n0x0fb6 c0x0000 (---------------) + I nabari - 0x00209f45, // n0x0fb7 c0x0000 (---------------) + I shima - 0x002ea3c6, // n0x0fb8 c0x0000 (---------------) + I suzuka - 0x003541c4, // n0x0fb9 c0x0000 (---------------) + I tado - 0x0028c4c5, // n0x0fba c0x0000 (---------------) + I taiki - 0x002ba304, // n0x0fbb c0x0000 (---------------) + I taki - 0x0030c206, // n0x0fbc c0x0000 (---------------) + I tamaki - 0x00395484, // n0x0fbd c0x0000 (---------------) + I toba - 0x00207a43, // n0x0fbe c0x0000 (---------------) + I tsu - 0x00287805, // n0x0fbf c0x0000 (---------------) + I udono - 0x0023a288, // n0x0fc0 c0x0000 (---------------) + I ureshino - 0x0022e387, // n0x0fc1 c0x0000 (---------------) + I watarai - 0x002b3b09, // n0x0fc2 c0x0000 (---------------) + I yokkaichi - 0x00287a48, // n0x0fc3 c0x0000 (---------------) + I furukawa - 0x00297e91, // n0x0fc4 c0x0000 (---------------) + I higashimatsushima - 0x0023d30a, // n0x0fc5 c0x0000 (---------------) + I ishinomaki - 0x0022d707, // n0x0fc6 c0x0000 (---------------) + I iwanuma - 0x003990c6, // n0x0fc7 c0x0000 (---------------) + I kakuda - 0x002160c4, // n0x0fc8 c0x0000 (---------------) + I kami - 0x002ba408, // n0x0fc9 c0x0000 (---------------) + I kawasaki - 0x00294e89, // n0x0fca c0x0000 (---------------) + I kesennuma - 0x002a9a48, // n0x0fcb c0x0000 (---------------) + I marumori - 0x0029804a, // n0x0fcc c0x0000 (---------------) + I matsushima - 0x002bfa8d, // n0x0fcd c0x0000 (---------------) + I minamisanriku - 0x0022da46, // n0x0fce c0x0000 (---------------) + I misato - 0x0035e7c6, // n0x0fcf c0x0000 (---------------) + I murata - 0x002d9886, // n0x0fd0 c0x0000 (---------------) + I natori - 0x00373fc7, // n0x0fd1 c0x0000 (---------------) + I ogawara - 0x002a0705, // n0x0fd2 c0x0000 (---------------) + I ohira - 0x0034eb47, // n0x0fd3 c0x0000 (---------------) + I onagawa - 0x0021d2c5, // n0x0fd4 c0x0000 (---------------) + I osaki - 0x0029b984, // n0x0fd5 c0x0000 (---------------) + I rifu - 0x002aa106, // n0x0fd6 c0x0000 (---------------) + I semine - 0x00311787, // n0x0fd7 c0x0000 (---------------) + I shibata - 0x003a254d, // n0x0fd8 c0x0000 (---------------) + I shichikashuku - 0x0028fa47, // n0x0fd9 c0x0000 (---------------) + I shikama - 0x002701c8, // n0x0fda c0x0000 (---------------) + I shiogama - 0x0027a7c9, // n0x0fdb c0x0000 (---------------) + I shiroishi - 0x00227e86, // n0x0fdc c0x0000 (---------------) + I tagajo - 0x002399c5, // n0x0fdd c0x0000 (---------------) + I taiwa - 0x00213604, // n0x0fde c0x0000 (---------------) + I tome - 0x002661c6, // n0x0fdf c0x0000 (---------------) + I tomiya - 0x0034ec86, // n0x0fe0 c0x0000 (---------------) + I wakuya - 0x002db006, // n0x0fe1 c0x0000 (---------------) + I watari - 0x0029b348, // n0x0fe2 c0x0000 (---------------) + I yamamoto - 0x002126c3, // n0x0fe3 c0x0000 (---------------) + I zao - 0x00207683, // n0x0fe4 c0x0000 (---------------) + I aya - 0x00325f05, // n0x0fe5 c0x0000 (---------------) + I ebino - 0x0023c086, // n0x0fe6 c0x0000 (---------------) + I gokase - 0x002ad6c5, // n0x0fe7 c0x0000 (---------------) + I hyuga - 0x00244908, // n0x0fe8 c0x0000 (---------------) + I kadogawa - 0x0029dd8a, // n0x0fe9 c0x0000 (---------------) + I kawaminami - 0x002e3544, // n0x0fea c0x0000 (---------------) + I kijo - 0x002c3988, // n0x0feb c0x0000 (---------------) + I kitagawa - 0x00296e48, // n0x0fec c0x0000 (---------------) + I kitakata - 0x0027e9c7, // n0x0fed c0x0000 (---------------) + I kitaura - 0x0035c989, // n0x0fee c0x0000 (---------------) + I kobayashi - 0x002b5148, // n0x0fef c0x0000 (---------------) + I kunitomi - 0x00290087, // n0x0ff0 c0x0000 (---------------) + I kushima - 0x0029c906, // n0x0ff1 c0x0000 (---------------) + I mimata - 0x0020c1ca, // n0x0ff2 c0x0000 (---------------) + I miyakonojo - 0x00266248, // n0x0ff3 c0x0000 (---------------) + I miyazaki - 0x002ba909, // n0x0ff4 c0x0000 (---------------) + I morotsuka - 0x00272c08, // n0x0ff5 c0x0000 (---------------) + I nichinan - 0x0021bac9, // n0x0ff6 c0x0000 (---------------) + I nishimera - 0x002bad87, // n0x0ff7 c0x0000 (---------------) + I nobeoka - 0x00342145, // n0x0ff8 c0x0000 (---------------) + I saito - 0x002a2ec6, // n0x0ff9 c0x0000 (---------------) + I shiiba - 0x00292c08, // n0x0ffa c0x0000 (---------------) + I shintomi - 0x00252dc8, // n0x0ffb c0x0000 (---------------) + I takaharu - 0x0021ab48, // n0x0ffc c0x0000 (---------------) + I takanabe - 0x002165c8, // n0x0ffd c0x0000 (---------------) + I takazaki - 0x0020e705, // n0x0ffe c0x0000 (---------------) + I tsuno - 0x00200344, // n0x0fff c0x0000 (---------------) + I achi - 0x00398888, // n0x1000 c0x0000 (---------------) + I agematsu - 0x00203c84, // n0x1001 c0x0000 (---------------) + I anan - 0x003950c4, // n0x1002 c0x0000 (---------------) + I aoki - 0x002c0b05, // n0x1003 c0x0000 (---------------) + I asahi - 0x002918c7, // n0x1004 c0x0000 (---------------) + I azumino - 0x002043c9, // n0x1005 c0x0000 (---------------) + I chikuhoku - 0x00207487, // n0x1006 c0x0000 (---------------) + I chikuma - 0x0020ee05, // n0x1007 c0x0000 (---------------) + I chino - 0x00278086, // n0x1008 c0x0000 (---------------) + I fujimi - 0x0033c0c6, // n0x1009 c0x0000 (---------------) + I hakuba - 0x00202304, // n0x100a c0x0000 (---------------) + I hara - 0x002a1c86, // n0x100b c0x0000 (---------------) + I hiraya - 0x00215484, // n0x100c c0x0000 (---------------) + I iida - 0x00257606, // n0x100d c0x0000 (---------------) + I iijima - 0x003a3986, // n0x100e c0x0000 (---------------) + I iiyama - 0x002144c6, // n0x100f c0x0000 (---------------) + I iizuna - 0x00202445, // n0x1010 c0x0000 (---------------) + I ikeda - 0x002447c7, // n0x1011 c0x0000 (---------------) + I ikusaka - 0x002012c3, // n0x1012 c0x0000 (---------------) + I ina - 0x00248c49, // n0x1013 c0x0000 (---------------) + I karuizawa - 0x002f2788, // n0x1014 c0x0000 (---------------) + I kawakami - 0x0021d204, // n0x1015 c0x0000 (---------------) + I kiso - 0x003546cd, // n0x1016 c0x0000 (---------------) + I kisofukushima - 0x00296cc8, // n0x1017 c0x0000 (---------------) + I kitaaiki - 0x0028d488, // n0x1018 c0x0000 (---------------) + I komagane - 0x002ba886, // n0x1019 c0x0000 (---------------) + I komoro - 0x0021d609, // n0x101a c0x0000 (---------------) + I matsukawa - 0x002c0e89, // n0x101b c0x0000 (---------------) + I matsumoto - 0x002da805, // n0x101c c0x0000 (---------------) + I miasa - 0x0029de8a, // n0x101d c0x0000 (---------------) + I minamiaiki - 0x002805ca, // n0x101e c0x0000 (---------------) + I minamimaki - 0x0028abcc, // n0x101f c0x0000 (---------------) + I minamiminowa - 0x0028ad46, // n0x1020 c0x0000 (---------------) + I minowa - 0x00278946, // n0x1021 c0x0000 (---------------) + I miyada - 0x002c2b06, // n0x1022 c0x0000 (---------------) + I miyota - 0x0024bf89, // n0x1023 c0x0000 (---------------) + I mochizuki - 0x00352206, // n0x1024 c0x0000 (---------------) + I nagano - 0x002875c6, // n0x1025 c0x0000 (---------------) + I nagawa - 0x00324cc6, // n0x1026 c0x0000 (---------------) + I nagiso - 0x002b0508, // n0x1027 c0x0000 (---------------) + I nakagawa - 0x003288c6, // n0x1028 c0x0000 (---------------) + I nakano - 0x002c7f0b, // n0x1029 c0x0000 (---------------) + I nozawaonsen - 0x00291a45, // n0x102a c0x0000 (---------------) + I obuse - 0x00230545, // n0x102b c0x0000 (---------------) + I ogawa - 0x00278bc5, // n0x102c c0x0000 (---------------) + I okaya - 0x002013c6, // n0x102d c0x0000 (---------------) + I omachi - 0x0020c183, // n0x102e c0x0000 (---------------) + I omi - 0x00243106, // n0x102f c0x0000 (---------------) + I ookuwa - 0x0028f9c7, // n0x1030 c0x0000 (---------------) + I ooshika - 0x002ba2c5, // n0x1031 c0x0000 (---------------) + I otaki - 0x00264245, // n0x1032 c0x0000 (---------------) + I otari - 0x002e2d05, // n0x1033 c0x0000 (---------------) + I sakae - 0x0031ac86, // n0x1034 c0x0000 (---------------) + I sakaki - 0x002da8c4, // n0x1035 c0x0000 (---------------) + I saku - 0x0036a506, // n0x1036 c0x0000 (---------------) + I sakuho - 0x00265b89, // n0x1037 c0x0000 (---------------) + I shimosuwa - 0x0020124c, // n0x1038 c0x0000 (---------------) + I shinanomachi - 0x0029b6c8, // n0x1039 c0x0000 (---------------) + I shiojiri - 0x00265cc4, // n0x103a c0x0000 (---------------) + I suwa - 0x002ea046, // n0x103b c0x0000 (---------------) + I suzaka - 0x002a22c6, // n0x103c c0x0000 (---------------) + I takagi - 0x0021a108, // n0x103d c0x0000 (---------------) + I takamori - 0x002c17c8, // n0x103e c0x0000 (---------------) + I takayama - 0x00201149, // n0x103f c0x0000 (---------------) + I tateshina - 0x0020e687, // n0x1040 c0x0000 (---------------) + I tatsuno - 0x002b0d09, // n0x1041 c0x0000 (---------------) + I togakushi - 0x00271046, // n0x1042 c0x0000 (---------------) + I togura - 0x0022d9c4, // n0x1043 c0x0000 (---------------) + I tomi - 0x0020f404, // n0x1044 c0x0000 (---------------) + I ueda - 0x00255d44, // n0x1045 c0x0000 (---------------) + I wada - 0x002815c8, // n0x1046 c0x0000 (---------------) + I yamagata - 0x0020420a, // n0x1047 c0x0000 (---------------) + I yamanouchi - 0x0034a286, // n0x1048 c0x0000 (---------------) + I yasaka - 0x00350687, // n0x1049 c0x0000 (---------------) + I yasuoka - 0x00288747, // n0x104a c0x0000 (---------------) + I chijiwa - 0x00228c05, // n0x104b c0x0000 (---------------) + I futsu - 0x00285bc4, // n0x104c c0x0000 (---------------) + I goto - 0x0028e206, // n0x104d c0x0000 (---------------) + I hasami - 0x003010c6, // n0x104e c0x0000 (---------------) + I hirado - 0x0023a783, // n0x104f c0x0000 (---------------) + I iki - 0x002f25c7, // n0x1050 c0x0000 (---------------) + I isahaya - 0x00331d48, // n0x1051 c0x0000 (---------------) + I kawatana - 0x002da94a, // n0x1052 c0x0000 (---------------) + I kuchinotsu - 0x002cb708, // n0x1053 c0x0000 (---------------) + I matsuura - 0x002e33c8, // n0x1054 c0x0000 (---------------) + I nagasaki - 0x003954c5, // n0x1055 c0x0000 (---------------) + I obama - 0x00374685, // n0x1056 c0x0000 (---------------) + I omura - 0x002b0c45, // n0x1057 c0x0000 (---------------) + I oseto - 0x00228fc6, // n0x1058 c0x0000 (---------------) + I saikai - 0x0023db86, // n0x1059 c0x0000 (---------------) + I sasebo - 0x00217ec5, // n0x105a c0x0000 (---------------) + I seihi - 0x00327a09, // n0x105b c0x0000 (---------------) + I shimabara - 0x002859cc, // n0x105c c0x0000 (---------------) + I shinkamigoto - 0x002352c7, // n0x105d c0x0000 (---------------) + I togitsu - 0x002980c8, // n0x105e c0x0000 (---------------) + I tsushima - 0x0028e845, // n0x105f c0x0000 (---------------) + I unzen - 0x00685804, // n0x1060 c0x0001 (---------------) ! I city - 0x002293c4, // n0x1061 c0x0000 (---------------) + I ando - 0x002af3c4, // n0x1062 c0x0000 (---------------) + I gose - 0x0020ef46, // n0x1063 c0x0000 (---------------) + I heguri - 0x0029fb4e, // n0x1064 c0x0000 (---------------) + I higashiyoshino - 0x0023d7c7, // n0x1065 c0x0000 (---------------) + I ikaruga - 0x0028d445, // n0x1066 c0x0000 (---------------) + I ikoma - 0x0029078c, // n0x1067 c0x0000 (---------------) + I kamikitayama - 0x002a8cc7, // n0x1068 c0x0000 (---------------) + I kanmaki - 0x00311707, // n0x1069 c0x0000 (---------------) + I kashiba - 0x003825c9, // n0x106a c0x0000 (---------------) + I kashihara - 0x00219889, // n0x106b c0x0000 (---------------) + I katsuragi - 0x00287b45, // n0x106c c0x0000 (---------------) + I kawai - 0x002f2788, // n0x106d c0x0000 (---------------) + I kawakami - 0x002bb049, // n0x106e c0x0000 (---------------) + I kawanishi - 0x002d8005, // n0x106f c0x0000 (---------------) + I koryo - 0x002ba208, // n0x1070 c0x0000 (---------------) + I kurotaki - 0x002c84c6, // n0x1071 c0x0000 (---------------) + I mitsue - 0x002d3946, // n0x1072 c0x0000 (---------------) + I miyake - 0x0031c044, // n0x1073 c0x0000 (---------------) + I nara - 0x00325fc8, // n0x1074 c0x0000 (---------------) + I nosegawa - 0x00228043, // n0x1075 c0x0000 (---------------) + I oji - 0x00207fc4, // n0x1076 c0x0000 (---------------) + I ouda - 0x003a2385, // n0x1077 c0x0000 (---------------) + I oyodo - 0x002dc647, // n0x1078 c0x0000 (---------------) + I sakurai - 0x003a3fc5, // n0x1079 c0x0000 (---------------) + I sango - 0x00282989, // n0x107a c0x0000 (---------------) + I shimoichi - 0x0025becd, // n0x107b c0x0000 (---------------) + I shimokitayama - 0x0027b106, // n0x107c c0x0000 (---------------) + I shinjo - 0x00262d04, // n0x107d c0x0000 (---------------) + I soni - 0x0029ca08, // n0x107e c0x0000 (---------------) + I takatori - 0x002782ca, // n0x107f c0x0000 (---------------) + I tawaramoto - 0x00218dc7, // n0x1080 c0x0000 (---------------) + I tenkawa - 0x00320785, // n0x1081 c0x0000 (---------------) + I tenri - 0x00208003, // n0x1082 c0x0000 (---------------) + I uda - 0x0029f18e, // n0x1083 c0x0000 (---------------) + I yamatokoriyama - 0x0029098c, // n0x1084 c0x0000 (---------------) + I yamatotakada - 0x002f9507, // n0x1085 c0x0000 (---------------) + I yamazoe - 0x0029fd07, // n0x1086 c0x0000 (---------------) + I yoshino - 0x00201b83, // n0x1087 c0x0000 (---------------) + I aga - 0x00352245, // n0x1088 c0x0000 (---------------) + I agano - 0x002af3c5, // n0x1089 c0x0000 (---------------) + I gosen - 0x00298d08, // n0x108a c0x0000 (---------------) + I itoigawa - 0x00295a09, // n0x108b c0x0000 (---------------) + I izumozaki - 0x00294706, // n0x108c c0x0000 (---------------) + I joetsu - 0x0021a184, // n0x108d c0x0000 (---------------) + I kamo - 0x0022d646, // n0x108e c0x0000 (---------------) + I kariwa - 0x002032cb, // n0x108f c0x0000 (---------------) + I kashiwazaki - 0x002c730c, // n0x1090 c0x0000 (---------------) + I minamiuonuma - 0x002f4ec7, // n0x1091 c0x0000 (---------------) + I mitsuke - 0x002cc3c5, // n0x1092 c0x0000 (---------------) + I muika - 0x00381148, // n0x1093 c0x0000 (---------------) + I murakami - 0x002d1a45, // n0x1094 c0x0000 (---------------) + I myoko - 0x00302287, // n0x1095 c0x0000 (---------------) + I nagaoka - 0x002350c7, // n0x1096 c0x0000 (---------------) + I niigata - 0x0024f885, // n0x1097 c0x0000 (---------------) + I ojiya - 0x0020c183, // n0x1098 c0x0000 (---------------) + I omi - 0x00360b84, // n0x1099 c0x0000 (---------------) + I sado - 0x00201005, // n0x109a c0x0000 (---------------) + I sanjo - 0x002e7905, // n0x109b c0x0000 (---------------) + I seiro - 0x002e7906, // n0x109c c0x0000 (---------------) + I seirou - 0x00261e88, // n0x109d c0x0000 (---------------) + I sekikawa - 0x00311787, // n0x109e c0x0000 (---------------) + I shibata - 0x00376f46, // n0x109f c0x0000 (---------------) + I tagami - 0x00351ec6, // n0x10a0 c0x0000 (---------------) + I tainai - 0x002e5886, // n0x10a1 c0x0000 (---------------) + I tochio - 0x0028f0c9, // n0x10a2 c0x0000 (---------------) + I tokamachi - 0x00207a47, // n0x10a3 c0x0000 (---------------) + I tsubame - 0x00294586, // n0x10a4 c0x0000 (---------------) + I tsunan - 0x002c7486, // n0x10a5 c0x0000 (---------------) + I uonuma - 0x0024f946, // n0x10a6 c0x0000 (---------------) + I yahiko - 0x002a97c5, // n0x10a7 c0x0000 (---------------) + I yoita - 0x00216dc6, // n0x10a8 c0x0000 (---------------) + I yuzawa - 0x0038cf85, // n0x10a9 c0x0000 (---------------) + I beppu - 0x002d3588, // n0x10aa c0x0000 (---------------) + I bungoono - 0x002942cb, // n0x10ab c0x0000 (---------------) + I bungotakada - 0x0028e006, // n0x10ac c0x0000 (---------------) + I hasama - 0x00288784, // n0x10ad c0x0000 (---------------) + I hiji - 0x0035c489, // n0x10ae c0x0000 (---------------) + I himeshima - 0x002a3e04, // n0x10af c0x0000 (---------------) + I hita - 0x002c8448, // n0x10b0 c0x0000 (---------------) + I kamitsue - 0x00286487, // n0x10b1 c0x0000 (---------------) + I kokonoe - 0x00284144, // n0x10b2 c0x0000 (---------------) + I kuju - 0x002b4608, // n0x10b3 c0x0000 (---------------) + I kunisaki - 0x002bc1c4, // n0x10b4 c0x0000 (---------------) + I kusu - 0x002a9804, // n0x10b5 c0x0000 (---------------) + I oita - 0x00289605, // n0x10b6 c0x0000 (---------------) + I saiki - 0x002fc5c6, // n0x10b7 c0x0000 (---------------) + I taketa - 0x002f1cc7, // n0x10b8 c0x0000 (---------------) + I tsukumi - 0x00244843, // n0x10b9 c0x0000 (---------------) + I usa - 0x0029ec45, // n0x10ba c0x0000 (---------------) + I usuki - 0x002be384, // n0x10bb c0x0000 (---------------) + I yufu - 0x0026a546, // n0x10bc c0x0000 (---------------) + I akaiwa - 0x002da888, // n0x10bd c0x0000 (---------------) + I asakuchi - 0x00331a85, // n0x10be c0x0000 (---------------) + I bizen - 0x00291349, // n0x10bf c0x0000 (---------------) + I hayashima - 0x0020b085, // n0x10c0 c0x0000 (---------------) + I ibara - 0x002bdd88, // n0x10c1 c0x0000 (---------------) + I kagamino - 0x0030a1c7, // n0x10c2 c0x0000 (---------------) + I kasaoka - 0x0037e8c8, // n0x10c3 c0x0000 (---------------) + I kibichuo - 0x002b3947, // n0x10c4 c0x0000 (---------------) + I kumenan - 0x0035bec9, // n0x10c5 c0x0000 (---------------) + I kurashiki - 0x0022cbc6, // n0x10c6 c0x0000 (---------------) + I maniwa - 0x00345a46, // n0x10c7 c0x0000 (---------------) + I misaki - 0x0025e584, // n0x10c8 c0x0000 (---------------) + I nagi - 0x0029c845, // n0x10c9 c0x0000 (---------------) + I niimi - 0x002f038c, // n0x10ca c0x0000 (---------------) + I nishiawakura - 0x00278bc7, // n0x10cb c0x0000 (---------------) + I okayama - 0x00278fc7, // n0x10cc c0x0000 (---------------) + I satosho - 0x00288608, // n0x10cd c0x0000 (---------------) + I setouchi - 0x0027b106, // n0x10ce c0x0000 (---------------) + I shinjo - 0x002a0644, // n0x10cf c0x0000 (---------------) + I shoo - 0x00322544, // n0x10d0 c0x0000 (---------------) + I soja - 0x00281749, // n0x10d1 c0x0000 (---------------) + I takahashi - 0x002c2c06, // n0x10d2 c0x0000 (---------------) + I tamano - 0x0021ca47, // n0x10d3 c0x0000 (---------------) + I tsuyama - 0x002072c4, // n0x10d4 c0x0000 (---------------) + I wake - 0x002b1646, // n0x10d5 c0x0000 (---------------) + I yakage - 0x00309dc5, // n0x10d6 c0x0000 (---------------) + I aguni - 0x002a4107, // n0x10d7 c0x0000 (---------------) + I ginowan - 0x002c7e86, // n0x10d8 c0x0000 (---------------) + I ginoza - 0x0035ed89, // n0x10d9 c0x0000 (---------------) + I gushikami - 0x00280407, // n0x10da c0x0000 (---------------) + I haebaru - 0x00266d87, // n0x10db c0x0000 (---------------) + I higashi - 0x002a17c6, // n0x10dc c0x0000 (---------------) + I hirara - 0x00244285, // n0x10dd c0x0000 (---------------) + I iheya - 0x0027ef88, // n0x10de c0x0000 (---------------) + I ishigaki - 0x0021b2c8, // n0x10df c0x0000 (---------------) + I ishikawa - 0x0023b246, // n0x10e0 c0x0000 (---------------) + I itoman - 0x00331ac5, // n0x10e1 c0x0000 (---------------) + I izena - 0x0026c506, // n0x10e2 c0x0000 (---------------) + I kadena - 0x00215ec3, // n0x10e3 c0x0000 (---------------) + I kin - 0x00298b89, // n0x10e4 c0x0000 (---------------) + I kitadaito - 0x002a75ce, // n0x10e5 c0x0000 (---------------) + I kitanakagusuku - 0x002b3648, // n0x10e6 c0x0000 (---------------) + I kumejima - 0x002a8f08, // n0x10e7 c0x0000 (---------------) + I kunigami - 0x0023b04b, // n0x10e8 c0x0000 (---------------) + I minamidaito - 0x00291586, // n0x10e9 c0x0000 (---------------) + I motobu - 0x00247bc4, // n0x10ea c0x0000 (---------------) + I nago - 0x00206384, // n0x10eb c0x0000 (---------------) + I naha - 0x002a76ca, // n0x10ec c0x0000 (---------------) + I nakagusuku - 0x002190c7, // n0x10ed c0x0000 (---------------) + I nakijin - 0x00294645, // n0x10ee c0x0000 (---------------) + I nanjo - 0x00210cc9, // n0x10ef c0x0000 (---------------) + I nishihara - 0x002b9405, // n0x10f0 c0x0000 (---------------) + I ogimi - 0x00395107, // n0x10f1 c0x0000 (---------------) + I okinawa - 0x00300704, // n0x10f2 c0x0000 (---------------) + I onna - 0x00381cc7, // n0x10f3 c0x0000 (---------------) + I shimoji - 0x0022d8c8, // n0x10f4 c0x0000 (---------------) + I taketomi - 0x002fd406, // n0x10f5 c0x0000 (---------------) + I tarama - 0x002fadc9, // n0x10f6 c0x0000 (---------------) + I tokashiki - 0x002b524a, // n0x10f7 c0x0000 (---------------) + I tomigusuku - 0x00219046, // n0x10f8 c0x0000 (---------------) + I tonaki - 0x00296806, // n0x10f9 c0x0000 (---------------) + I urasoe - 0x002ac685, // n0x10fa c0x0000 (---------------) + I uruma - 0x00372285, // n0x10fb c0x0000 (---------------) + I yaese - 0x00355487, // n0x10fc c0x0000 (---------------) + I yomitan - 0x0022ac48, // n0x10fd c0x0000 (---------------) + I yonabaru - 0x00309d08, // n0x10fe c0x0000 (---------------) + I yonaguni - 0x00240f06, // n0x10ff c0x0000 (---------------) + I zamami - 0x00226fc5, // n0x1100 c0x0000 (---------------) + I abeno - 0x00247cce, // n0x1101 c0x0000 (---------------) + I chihayaakasaka - 0x0032c104, // n0x1102 c0x0000 (---------------) + I chuo - 0x0023b1c5, // n0x1103 c0x0000 (---------------) + I daito - 0x00277a09, // n0x1104 c0x0000 (---------------) + I fujiidera - 0x00250a88, // n0x1105 c0x0000 (---------------) + I habikino - 0x003a0586, // n0x1106 c0x0000 (---------------) + I hannan - 0x0029afcc, // n0x1107 c0x0000 (---------------) + I higashiosaka - 0x0029d990, // n0x1108 c0x0000 (---------------) + I higashisumiyoshi - 0x0029f78f, // n0x1109 c0x0000 (---------------) + I higashiyodogawa - 0x002a0748, // n0x110a c0x0000 (---------------) + I hirakata - 0x002c3847, // n0x110b c0x0000 (---------------) + I ibaraki - 0x00202445, // n0x110c c0x0000 (---------------) + I ikeda - 0x0027e805, // n0x110d c0x0000 (---------------) + I izumi - 0x002f1b49, // n0x110e c0x0000 (---------------) + I izumiotsu - 0x00295dc9, // n0x110f c0x0000 (---------------) + I izumisano - 0x00222846, // n0x1110 c0x0000 (---------------) + I kadoma - 0x002f9247, // n0x1111 c0x0000 (---------------) + I kaizuka - 0x00389f05, // n0x1112 c0x0000 (---------------) + I kanan - 0x0038e149, // n0x1113 c0x0000 (---------------) + I kashiwara - 0x0032d886, // n0x1114 c0x0000 (---------------) + I katano - 0x0035204d, // n0x1115 c0x0000 (---------------) + I kawachinagano - 0x002896c9, // n0x1116 c0x0000 (---------------) + I kishiwada - 0x002047c4, // n0x1117 c0x0000 (---------------) + I kita - 0x002b33c8, // n0x1118 c0x0000 (---------------) + I kumatori - 0x00398949, // n0x1119 c0x0000 (---------------) + I matsubara - 0x0034a446, // n0x111a c0x0000 (---------------) + I minato - 0x00278185, // n0x111b c0x0000 (---------------) + I minoh - 0x00345a46, // n0x111c c0x0000 (---------------) + I misaki - 0x0030de49, // n0x111d c0x0000 (---------------) + I moriguchi - 0x00309448, // n0x111e c0x0000 (---------------) + I neyagawa - 0x00210385, // n0x111f c0x0000 (---------------) + I nishi - 0x00261e04, // n0x1120 c0x0000 (---------------) + I nose - 0x0029b18b, // n0x1121 c0x0000 (---------------) + I osakasayama - 0x0034a305, // n0x1122 c0x0000 (---------------) + I sakai - 0x0022f286, // n0x1123 c0x0000 (---------------) + I sayama - 0x00283686, // n0x1124 c0x0000 (---------------) + I sennan - 0x002460c6, // n0x1125 c0x0000 (---------------) + I settsu - 0x003820cb, // n0x1126 c0x0000 (---------------) + I shijonawate - 0x00291449, // n0x1127 c0x0000 (---------------) + I shimamoto - 0x00218205, // n0x1128 c0x0000 (---------------) + I suita - 0x0037e687, // n0x1129 c0x0000 (---------------) + I tadaoka - 0x0023d286, // n0x112a c0x0000 (---------------) + I taishi - 0x003788c6, // n0x112b c0x0000 (---------------) + I tajiri - 0x00282848, // n0x112c c0x0000 (---------------) + I takaishi - 0x002fc6c9, // n0x112d c0x0000 (---------------) + I takatsuki - 0x0026ff8c, // n0x112e c0x0000 (---------------) + I tondabayashi - 0x0024a2c8, // n0x112f c0x0000 (---------------) + I toyonaka - 0x002500c6, // n0x1130 c0x0000 (---------------) + I toyono - 0x00341d43, // n0x1131 c0x0000 (---------------) + I yao - 0x00248746, // n0x1132 c0x0000 (---------------) + I ariake - 0x0027e4c5, // n0x1133 c0x0000 (---------------) + I arita - 0x0027d508, // n0x1134 c0x0000 (---------------) + I fukudomi - 0x002269c6, // n0x1135 c0x0000 (---------------) + I genkai - 0x002a4348, // n0x1136 c0x0000 (---------------) + I hamatama - 0x00349905, // n0x1137 c0x0000 (---------------) + I hizen - 0x0027c105, // n0x1138 c0x0000 (---------------) + I imari - 0x0030a448, // n0x1139 c0x0000 (---------------) + I kamimine - 0x002ea4c7, // n0x113a c0x0000 (---------------) + I kanzaki - 0x00376b87, // n0x113b c0x0000 (---------------) + I karatsu - 0x002a9907, // n0x113c c0x0000 (---------------) + I kashima - 0x0021d388, // n0x113d c0x0000 (---------------) + I kitagata - 0x002904c8, // n0x113e c0x0000 (---------------) + I kitahata - 0x00240b06, // n0x113f c0x0000 (---------------) + I kiyama - 0x0030c047, // n0x1140 c0x0000 (---------------) + I kouhoku - 0x002adf87, // n0x1141 c0x0000 (---------------) + I kyuragi - 0x0027e38a, // n0x1142 c0x0000 (---------------) + I nishiarita - 0x00212e03, // n0x1143 c0x0000 (---------------) + I ogi - 0x002013c6, // n0x1144 c0x0000 (---------------) + I omachi - 0x00204345, // n0x1145 c0x0000 (---------------) + I ouchi - 0x00378a84, // n0x1146 c0x0000 (---------------) + I saga - 0x0027a7c9, // n0x1147 c0x0000 (---------------) + I shiroishi - 0x0035be44, // n0x1148 c0x0000 (---------------) + I taku - 0x0022e404, // n0x1149 c0x0000 (---------------) + I tara - 0x002a2144, // n0x114a c0x0000 (---------------) + I tosu - 0x0029fd0b, // n0x114b c0x0000 (---------------) + I yoshinogari - 0x00398ac7, // n0x114c c0x0000 (---------------) + I arakawa - 0x00247f05, // n0x114d c0x0000 (---------------) + I asaka - 0x00294148, // n0x114e c0x0000 (---------------) + I chichibu - 0x00278086, // n0x114f c0x0000 (---------------) + I fujimi - 0x00278088, // n0x1150 c0x0000 (---------------) + I fujimino - 0x0027ca46, // n0x1151 c0x0000 (---------------) + I fukaya - 0x0028ba45, // n0x1152 c0x0000 (---------------) + I hanno - 0x0028cec5, // n0x1153 c0x0000 (---------------) + I hanyu - 0x0028eb86, // n0x1154 c0x0000 (---------------) + I hasuda - 0x0028f748, // n0x1155 c0x0000 (---------------) + I hatogaya - 0x00290248, // n0x1156 c0x0000 (---------------) + I hatoyama - 0x0027a986, // n0x1157 c0x0000 (---------------) + I hidaka - 0x00293f8f, // n0x1158 c0x0000 (---------------) + I higashichichibu - 0x00298650, // n0x1159 c0x0000 (---------------) + I higashimatsuyama - 0x0038d6c5, // n0x115a c0x0000 (---------------) + I honjo - 0x002012c3, // n0x115b c0x0000 (---------------) + I ina - 0x00251c85, // n0x115c c0x0000 (---------------) + I iruma - 0x002fdb88, // n0x115d c0x0000 (---------------) + I iwatsuki - 0x00295cc9, // n0x115e c0x0000 (---------------) + I kamiizumi - 0x002e8388, // n0x115f c0x0000 (---------------) + I kamikawa - 0x0034a6c8, // n0x1160 c0x0000 (---------------) + I kamisato - 0x00206648, // n0x1161 c0x0000 (---------------) + I kasukabe - 0x00398b87, // n0x1162 c0x0000 (---------------) + I kawagoe - 0x00277d49, // n0x1163 c0x0000 (---------------) + I kawaguchi - 0x002a4548, // n0x1164 c0x0000 (---------------) + I kawajima - 0x002b0984, // n0x1165 c0x0000 (---------------) + I kazo - 0x002a1fc8, // n0x1166 c0x0000 (---------------) + I kitamoto - 0x0028c249, // n0x1167 c0x0000 (---------------) + I koshigaya - 0x0030d747, // n0x1168 c0x0000 (---------------) + I kounosu - 0x002a7544, // n0x1169 c0x0000 (---------------) + I kuki - 0x00207548, // n0x116a c0x0000 (---------------) + I kumagaya - 0x0024458a, // n0x116b c0x0000 (---------------) + I matsubushi - 0x002d8546, // n0x116c c0x0000 (---------------) + I minano - 0x0022da46, // n0x116d c0x0000 (---------------) + I misato - 0x0021c1c9, // n0x116e c0x0000 (---------------) + I miyashiro - 0x0029dbc7, // n0x116f c0x0000 (---------------) + I miyoshi - 0x002c8848, // n0x1170 c0x0000 (---------------) + I moroyama - 0x0038d208, // n0x1171 c0x0000 (---------------) + I nagatoro - 0x00207148, // n0x1172 c0x0000 (---------------) + I namegawa - 0x003501c5, // n0x1173 c0x0000 (---------------) + I niiza - 0x00373245, // n0x1174 c0x0000 (---------------) + I ogano - 0x00230545, // n0x1175 c0x0000 (---------------) + I ogawa - 0x002af385, // n0x1176 c0x0000 (---------------) + I ogose - 0x002eba47, // n0x1177 c0x0000 (---------------) + I okegawa - 0x0020c185, // n0x1178 c0x0000 (---------------) + I omiya - 0x002ba2c5, // n0x1179 c0x0000 (---------------) + I otaki - 0x00342606, // n0x117a c0x0000 (---------------) + I ranzan - 0x002e82c7, // n0x117b c0x0000 (---------------) + I ryokami - 0x00373887, // n0x117c c0x0000 (---------------) + I saitama - 0x00244886, // n0x117d c0x0000 (---------------) + I sakado - 0x002cd945, // n0x117e c0x0000 (---------------) + I satte - 0x0022f286, // n0x117f c0x0000 (---------------) + I sayama - 0x0024e205, // n0x1180 c0x0000 (---------------) + I shiki - 0x002ac088, // n0x1181 c0x0000 (---------------) + I shiraoka - 0x002d6c04, // n0x1182 c0x0000 (---------------) + I soka - 0x002c1446, // n0x1183 c0x0000 (---------------) + I sugito - 0x00263b04, // n0x1184 c0x0000 (---------------) + I toda - 0x00226188, // n0x1185 c0x0000 (---------------) + I tokigawa - 0x00383dca, // n0x1186 c0x0000 (---------------) + I tokorozawa - 0x0027becc, // n0x1187 c0x0000 (---------------) + I tsurugashima - 0x0020a605, // n0x1188 c0x0000 (---------------) + I urawa - 0x00203686, // n0x1189 c0x0000 (---------------) + I warabi - 0x00270146, // n0x118a c0x0000 (---------------) + I yashio - 0x00354c86, // n0x118b c0x0000 (---------------) + I yokoze - 0x00250144, // n0x118c c0x0000 (---------------) + I yono - 0x0030a085, // n0x118d c0x0000 (---------------) + I yorii - 0x0027c887, // n0x118e c0x0000 (---------------) + I yoshida - 0x0029dc49, // n0x118f c0x0000 (---------------) + I yoshikawa - 0x002a5907, // n0x1190 c0x0000 (---------------) + I yoshimi - 0x00685804, // n0x1191 c0x0001 (---------------) ! I city - 0x00685804, // n0x1192 c0x0001 (---------------) ! I city - 0x0030dbc5, // n0x1193 c0x0000 (---------------) + I aisho - 0x0022a804, // n0x1194 c0x0000 (---------------) + I gamo - 0x0029a98a, // n0x1195 c0x0000 (---------------) + I higashiomi - 0x00277f06, // n0x1196 c0x0000 (---------------) + I hikone - 0x0034a644, // n0x1197 c0x0000 (---------------) + I koka - 0x00203b05, // n0x1198 c0x0000 (---------------) + I konan - 0x0033b985, // n0x1199 c0x0000 (---------------) + I kosei - 0x00300fc4, // n0x119a c0x0000 (---------------) + I koto - 0x002832c7, // n0x119b c0x0000 (---------------) + I kusatsu - 0x0020b007, // n0x119c c0x0000 (---------------) + I maibara - 0x002c7a48, // n0x119d c0x0000 (---------------) + I moriyama - 0x0026b048, // n0x119e c0x0000 (---------------) + I nagahama - 0x00210389, // n0x119f c0x0000 (---------------) + I nishiazai - 0x00257ec8, // n0x11a0 c0x0000 (---------------) + I notogawa - 0x0029ab4b, // n0x11a1 c0x0000 (---------------) + I omihachiman - 0x0021d104, // n0x11a2 c0x0000 (---------------) + I otsu - 0x002ff445, // n0x11a3 c0x0000 (---------------) + I ritto - 0x00280305, // n0x11a4 c0x0000 (---------------) + I ryuoh - 0x002a9889, // n0x11a5 c0x0000 (---------------) + I takashima - 0x002fc6c9, // n0x11a6 c0x0000 (---------------) + I takatsuki - 0x0035c388, // n0x11a7 c0x0000 (---------------) + I torahime - 0x0025c288, // n0x11a8 c0x0000 (---------------) + I toyosato - 0x0027eb84, // n0x11a9 c0x0000 (---------------) + I yasu - 0x002a2305, // n0x11aa c0x0000 (---------------) + I akagi - 0x00201f03, // n0x11ab c0x0000 (---------------) + I ama - 0x0021d0c5, // n0x11ac c0x0000 (---------------) + I gotsu - 0x002a9106, // n0x11ad c0x0000 (---------------) + I hamada - 0x0029584c, // n0x11ae c0x0000 (---------------) + I higashiizumo - 0x0021b346, // n0x11af c0x0000 (---------------) + I hikawa - 0x0024e246, // n0x11b0 c0x0000 (---------------) + I hikimi - 0x00295a05, // n0x11b1 c0x0000 (---------------) + I izumo - 0x0031ad08, // n0x11b2 c0x0000 (---------------) + I kakinoki - 0x002b37c6, // n0x11b3 c0x0000 (---------------) + I masuda - 0x00399246, // n0x11b4 c0x0000 (---------------) + I matsue - 0x0022da46, // n0x11b5 c0x0000 (---------------) + I misato - 0x0021ef4c, // n0x11b6 c0x0000 (---------------) + I nishinoshima - 0x002db1c4, // n0x11b7 c0x0000 (---------------) + I ohda - 0x002e59ca, // n0x11b8 c0x0000 (---------------) + I okinoshima - 0x003a0748, // n0x11b9 c0x0000 (---------------) + I okuizumo - 0x00295687, // n0x11ba c0x0000 (---------------) + I shimane - 0x00253786, // n0x11bb c0x0000 (---------------) + I tamayu - 0x002947c7, // n0x11bc c0x0000 (---------------) + I tsuwano - 0x002e0245, // n0x11bd c0x0000 (---------------) + I unnan - 0x00324a06, // n0x11be c0x0000 (---------------) + I yakumo - 0x0034db86, // n0x11bf c0x0000 (---------------) + I yasugi - 0x00376a47, // n0x11c0 c0x0000 (---------------) + I yatsuka - 0x0022e444, // n0x11c1 c0x0000 (---------------) + I arai - 0x0023e305, // n0x11c2 c0x0000 (---------------) + I atami - 0x00277a04, // n0x11c3 c0x0000 (---------------) + I fuji - 0x0029ba07, // n0x11c4 c0x0000 (---------------) + I fujieda - 0x00277c48, // n0x11c5 c0x0000 (---------------) + I fujikawa - 0x002787ca, // n0x11c6 c0x0000 (---------------) + I fujinomiya - 0x0027fb07, // n0x11c7 c0x0000 (---------------) + I fukuroi - 0x0023b3c7, // n0x11c8 c0x0000 (---------------) + I gotemba - 0x002c37c7, // n0x11c9 c0x0000 (---------------) + I haibara - 0x002d1b89, // n0x11ca c0x0000 (---------------) + I hamamatsu - 0x0029584a, // n0x11cb c0x0000 (---------------) + I higashiizu - 0x00223083, // n0x11cc c0x0000 (---------------) + I ito - 0x0022e345, // n0x11cd c0x0000 (---------------) + I iwata - 0x00214503, // n0x11ce c0x0000 (---------------) + I izu - 0x002fafc9, // n0x11cf c0x0000 (---------------) + I izunokuni - 0x002b4b08, // n0x11d0 c0x0000 (---------------) + I kakegawa - 0x002ac207, // n0x11d1 c0x0000 (---------------) + I kannami - 0x002e8489, // n0x11d2 c0x0000 (---------------) + I kawanehon - 0x0021b3c6, // n0x11d3 c0x0000 (---------------) + I kawazu - 0x003a4e88, // n0x11d4 c0x0000 (---------------) + I kikugawa - 0x002ddc45, // n0x11d5 c0x0000 (---------------) + I kosai - 0x003539ca, // n0x11d6 c0x0000 (---------------) + I makinohara - 0x002cee49, // n0x11d7 c0x0000 (---------------) + I matsuzaki - 0x0026fc49, // n0x11d8 c0x0000 (---------------) + I minamiizu - 0x002c0d47, // n0x11d9 c0x0000 (---------------) + I mishima - 0x002a9b49, // n0x11da c0x0000 (---------------) + I morimachi - 0x002143c8, // n0x11db c0x0000 (---------------) + I nishiizu - 0x002ea1c6, // n0x11dc c0x0000 (---------------) + I numazu - 0x00374248, // n0x11dd c0x0000 (---------------) + I omaezaki - 0x00212807, // n0x11de c0x0000 (---------------) + I shimada - 0x002231c7, // n0x11df c0x0000 (---------------) + I shimizu - 0x002c7807, // n0x11e0 c0x0000 (---------------) + I shimoda - 0x002b13c8, // n0x11e1 c0x0000 (---------------) + I shizuoka - 0x002e9ec6, // n0x11e2 c0x0000 (---------------) + I susono - 0x00244345, // n0x11e3 c0x0000 (---------------) + I yaizu - 0x0027c887, // n0x11e4 c0x0000 (---------------) + I yoshida - 0x00296308, // n0x11e5 c0x0000 (---------------) + I ashikaga - 0x00344784, // n0x11e6 c0x0000 (---------------) + I bato - 0x00283a44, // n0x11e7 c0x0000 (---------------) + I haga - 0x002f2987, // n0x11e8 c0x0000 (---------------) + I ichikai - 0x002ada87, // n0x11e9 c0x0000 (---------------) + I iwafune - 0x002baeca, // n0x11ea c0x0000 (---------------) + I kaminokawa - 0x002ea146, // n0x11eb c0x0000 (---------------) + I kanuma - 0x002f938a, // n0x11ec c0x0000 (---------------) + I karasuyama - 0x002b96c7, // n0x11ed c0x0000 (---------------) + I kuroiso - 0x0032bec7, // n0x11ee c0x0000 (---------------) + I mashiko - 0x00241004, // n0x11ef c0x0000 (---------------) + I mibu - 0x00259384, // n0x11f0 c0x0000 (---------------) + I moka - 0x00225406, // n0x11f1 c0x0000 (---------------) + I motegi - 0x00328b84, // n0x11f2 c0x0000 (---------------) + I nasu - 0x00328b8c, // n0x11f3 c0x0000 (---------------) + I nasushiobara - 0x00202c85, // n0x11f4 c0x0000 (---------------) + I nikko - 0x00216409, // n0x11f5 c0x0000 (---------------) + I nishikata - 0x00279684, // n0x11f6 c0x0000 (---------------) + I nogi - 0x002a0705, // n0x11f7 c0x0000 (---------------) + I ohira - 0x00278248, // n0x11f8 c0x0000 (---------------) + I ohtawara - 0x0025b745, // n0x11f9 c0x0000 (---------------) + I oyama - 0x002dc646, // n0x11fa c0x0000 (---------------) + I sakura - 0x0020b904, // n0x11fb c0x0000 (---------------) + I sano - 0x00267e8a, // n0x11fc c0x0000 (---------------) + I shimotsuke - 0x0029d206, // n0x11fd c0x0000 (---------------) + I shioya - 0x002510ca, // n0x11fe c0x0000 (---------------) + I takanezawa - 0x00344807, // n0x11ff c0x0000 (---------------) + I tochigi - 0x0028f585, // n0x1200 c0x0000 (---------------) + I tsuga - 0x0021ea05, // n0x1201 c0x0000 (---------------) + I ujiie - 0x00228c4a, // n0x1202 c0x0000 (---------------) + I utsunomiya - 0x002a1d85, // n0x1203 c0x0000 (---------------) + I yaita - 0x002a0306, // n0x1204 c0x0000 (---------------) + I aizumi - 0x00203c84, // n0x1205 c0x0000 (---------------) + I anan - 0x002afa06, // n0x1206 c0x0000 (---------------) + I ichiba - 0x00355545, // n0x1207 c0x0000 (---------------) + I itano - 0x00226a86, // n0x1208 c0x0000 (---------------) + I kainan - 0x002ab80c, // n0x1209 c0x0000 (---------------) + I komatsushima - 0x002c89ca, // n0x120a c0x0000 (---------------) + I matsushige - 0x002806c4, // n0x120b c0x0000 (---------------) + I mima - 0x00223406, // n0x120c c0x0000 (---------------) + I minami - 0x0029dbc7, // n0x120d c0x0000 (---------------) + I miyoshi - 0x002cbb04, // n0x120e c0x0000 (---------------) + I mugi - 0x002b0508, // n0x120f c0x0000 (---------------) + I nakagawa - 0x00383cc6, // n0x1210 c0x0000 (---------------) + I naruto - 0x00247b49, // n0x1211 c0x0000 (---------------) + I sanagochi - 0x002cf749, // n0x1212 c0x0000 (---------------) + I shishikui - 0x00290009, // n0x1213 c0x0000 (---------------) + I tokushima - 0x0036b1c6, // n0x1214 c0x0000 (---------------) + I wajiki - 0x00212906, // n0x1215 c0x0000 (---------------) + I adachi - 0x00374387, // n0x1216 c0x0000 (---------------) + I akiruno - 0x00327948, // n0x1217 c0x0000 (---------------) + I akishima - 0x00212709, // n0x1218 c0x0000 (---------------) + I aogashima - 0x00398ac7, // n0x1219 c0x0000 (---------------) + I arakawa - 0x002b5e86, // n0x121a c0x0000 (---------------) + I bunkyo - 0x002ff0c7, // n0x121b c0x0000 (---------------) + I chiyoda - 0x002d9745, // n0x121c c0x0000 (---------------) + I chofu - 0x0032c104, // n0x121d c0x0000 (---------------) + I chuo - 0x00373f47, // n0x121e c0x0000 (---------------) + I edogawa - 0x002be405, // n0x121f c0x0000 (---------------) + I fuchu - 0x00289545, // n0x1220 c0x0000 (---------------) + I fussa - 0x002fea47, // n0x1221 c0x0000 (---------------) + I hachijo - 0x0024f748, // n0x1222 c0x0000 (---------------) + I hachioji - 0x003810c6, // n0x1223 c0x0000 (---------------) + I hamura - 0x0029768d, // n0x1224 c0x0000 (---------------) + I higashikurume - 0x00298f0f, // n0x1225 c0x0000 (---------------) + I higashimurayama - 0x0029efcd, // n0x1226 c0x0000 (---------------) + I higashiyamato - 0x0020ee44, // n0x1227 c0x0000 (---------------) + I hino - 0x0023a386, // n0x1228 c0x0000 (---------------) + I hinode - 0x002d08c8, // n0x1229 c0x0000 (---------------) + I hinohara - 0x00324c85, // n0x122a c0x0000 (---------------) + I inagi - 0x0027e548, // n0x122b c0x0000 (---------------) + I itabashi - 0x0021854a, // n0x122c c0x0000 (---------------) + I katsushika - 0x002047c4, // n0x122d c0x0000 (---------------) + I kita - 0x002abbc6, // n0x122e c0x0000 (---------------) + I kiyose - 0x0039da87, // n0x122f c0x0000 (---------------) + I kodaira - 0x0021fc47, // n0x1230 c0x0000 (---------------) + I koganei - 0x002a2509, // n0x1231 c0x0000 (---------------) + I kokubunji - 0x00374205, // n0x1232 c0x0000 (---------------) + I komae - 0x00300fc4, // n0x1233 c0x0000 (---------------) + I koto - 0x0032f0ca, // n0x1234 c0x0000 (---------------) + I kouzushima - 0x002b4d09, // n0x1235 c0x0000 (---------------) + I kunitachi - 0x002a9c47, // n0x1236 c0x0000 (---------------) + I machida - 0x00297946, // n0x1237 c0x0000 (---------------) + I meguro - 0x0034a446, // n0x1238 c0x0000 (---------------) + I minato - 0x002a2246, // n0x1239 c0x0000 (---------------) + I mitaka - 0x0035d706, // n0x123a c0x0000 (---------------) + I mizuho - 0x002ceb0f, // n0x123b c0x0000 (---------------) + I musashimurayama - 0x002d0789, // n0x123c c0x0000 (---------------) + I musashino - 0x003288c6, // n0x123d c0x0000 (---------------) + I nakano - 0x0024bc86, // n0x123e c0x0000 (---------------) + I nerima - 0x00352c89, // n0x123f c0x0000 (---------------) + I ogasawara - 0x0030c147, // n0x1240 c0x0000 (---------------) + I okutama - 0x00213403, // n0x1241 c0x0000 (---------------) + I ome - 0x00209f06, // n0x1242 c0x0000 (---------------) + I oshima - 0x00201103, // n0x1243 c0x0000 (---------------) + I ota - 0x0024ce08, // n0x1244 c0x0000 (---------------) + I setagaya - 0x002fef07, // n0x1245 c0x0000 (---------------) + I shibuya - 0x002a0949, // n0x1246 c0x0000 (---------------) + I shinagawa - 0x0027f2c8, // n0x1247 c0x0000 (---------------) + I shinjuku - 0x00376cc8, // n0x1248 c0x0000 (---------------) + I suginami - 0x00217b46, // n0x1249 c0x0000 (---------------) + I sumida - 0x00227189, // n0x124a c0x0000 (---------------) + I tachikawa - 0x00235205, // n0x124b c0x0000 (---------------) + I taito - 0x00253784, // n0x124c c0x0000 (---------------) + I tama - 0x0024a607, // n0x124d c0x0000 (---------------) + I toshima - 0x0024c005, // n0x124e c0x0000 (---------------) + I chizu - 0x0020ee44, // n0x124f c0x0000 (---------------) + I hino - 0x00247fc8, // n0x1250 c0x0000 (---------------) + I kawahara - 0x00219384, // n0x1251 c0x0000 (---------------) + I koge - 0x00304587, // n0x1252 c0x0000 (---------------) + I kotoura - 0x0036edc6, // n0x1253 c0x0000 (---------------) + I misasa - 0x002e6845, // n0x1254 c0x0000 (---------------) + I nanbu - 0x00272c08, // n0x1255 c0x0000 (---------------) + I nichinan - 0x0034a30b, // n0x1256 c0x0000 (---------------) + I sakaiminato - 0x002f7a87, // n0x1257 c0x0000 (---------------) + I tottori - 0x00228ec6, // n0x1258 c0x0000 (---------------) + I wakasa - 0x002c2304, // n0x1259 c0x0000 (---------------) + I yazu - 0x002559c6, // n0x125a c0x0000 (---------------) + I yonago - 0x002c0b05, // n0x125b c0x0000 (---------------) + I asahi - 0x002be405, // n0x125c c0x0000 (---------------) + I fuchu - 0x0027ed09, // n0x125d c0x0000 (---------------) + I fukumitsu - 0x00282ec9, // n0x125e c0x0000 (---------------) + I funahashi - 0x00223204, // n0x125f c0x0000 (---------------) + I himi - 0x00223245, // n0x1260 c0x0000 (---------------) + I imizu - 0x00223445, // n0x1261 c0x0000 (---------------) + I inami - 0x00353846, // n0x1262 c0x0000 (---------------) + I johana - 0x002f2888, // n0x1263 c0x0000 (---------------) + I kamiichi - 0x002b8d46, // n0x1264 c0x0000 (---------------) + I kurobe - 0x00331b8b, // n0x1265 c0x0000 (---------------) + I nakaniikawa - 0x0030078a, // n0x1266 c0x0000 (---------------) + I namerikawa - 0x002fad05, // n0x1267 c0x0000 (---------------) + I nanto - 0x0028cf46, // n0x1268 c0x0000 (---------------) + I nyuzen - 0x002f1445, // n0x1269 c0x0000 (---------------) + I oyabe - 0x002182c5, // n0x126a c0x0000 (---------------) + I taira - 0x00290647, // n0x126b c0x0000 (---------------) + I takaoka - 0x00201dc8, // n0x126c c0x0000 (---------------) + I tateyama - 0x00257f44, // n0x126d c0x0000 (---------------) + I toga - 0x002da706, // n0x126e c0x0000 (---------------) + I tonami - 0x002902c6, // n0x126f c0x0000 (---------------) + I toyama - 0x00214587, // n0x1270 c0x0000 (---------------) + I unazuki - 0x002ffa44, // n0x1271 c0x0000 (---------------) + I uozu - 0x0027d386, // n0x1272 c0x0000 (---------------) + I yamada - 0x0023f3c5, // n0x1273 c0x0000 (---------------) + I arida - 0x0023f3c9, // n0x1274 c0x0000 (---------------) + I aridagawa - 0x00212b04, // n0x1275 c0x0000 (---------------) + I gobo - 0x00285d49, // n0x1276 c0x0000 (---------------) + I hashimoto - 0x0027a986, // n0x1277 c0x0000 (---------------) + I hidaka - 0x002bb588, // n0x1278 c0x0000 (---------------) + I hirogawa - 0x00223445, // n0x1279 c0x0000 (---------------) + I inami - 0x00288845, // n0x127a c0x0000 (---------------) + I iwade - 0x00226a86, // n0x127b c0x0000 (---------------) + I kainan - 0x0026fe89, // n0x127c c0x0000 (---------------) + I kamitonda - 0x00219889, // n0x127d c0x0000 (---------------) + I katsuragi - 0x0024e2c6, // n0x127e c0x0000 (---------------) + I kimino - 0x00250b88, // n0x127f c0x0000 (---------------) + I kinokawa - 0x0025c008, // n0x1280 c0x0000 (---------------) + I kitayama - 0x002f1404, // n0x1281 c0x0000 (---------------) + I koya - 0x0035dc04, // n0x1282 c0x0000 (---------------) + I koza - 0x0035dc08, // n0x1283 c0x0000 (---------------) + I kozagawa - 0x00318e08, // n0x1284 c0x0000 (---------------) + I kudoyama - 0x002b0e09, // n0x1285 c0x0000 (---------------) + I kushimoto - 0x002a9086, // n0x1286 c0x0000 (---------------) + I mihama - 0x0022da46, // n0x1287 c0x0000 (---------------) + I misato - 0x003165cd, // n0x1288 c0x0000 (---------------) + I nachikatsuura - 0x0022ff06, // n0x1289 c0x0000 (---------------) + I shingu - 0x002a3889, // n0x128a c0x0000 (---------------) + I shirahama - 0x00201585, // n0x128b c0x0000 (---------------) + I taiji - 0x003087c6, // n0x128c c0x0000 (---------------) + I tanabe - 0x00227348, // n0x128d c0x0000 (---------------) + I wakayama - 0x00313b45, // n0x128e c0x0000 (---------------) + I yuasa - 0x002adfc4, // n0x128f c0x0000 (---------------) + I yura - 0x002c0b05, // n0x1290 c0x0000 (---------------) + I asahi - 0x00282548, // n0x1291 c0x0000 (---------------) + I funagata - 0x0029a749, // n0x1292 c0x0000 (---------------) + I higashine - 0x00277ac4, // n0x1293 c0x0000 (---------------) + I iide - 0x0034f546, // n0x1294 c0x0000 (---------------) + I kahoku - 0x0025b60a, // n0x1295 c0x0000 (---------------) + I kaminoyama - 0x002c91c8, // n0x1296 c0x0000 (---------------) + I kaneyama - 0x002bb049, // n0x1297 c0x0000 (---------------) + I kawanishi - 0x0029504a, // n0x1298 c0x0000 (---------------) + I mamurogawa - 0x002e8406, // n0x1299 c0x0000 (---------------) + I mikawa - 0x002990c8, // n0x129a c0x0000 (---------------) + I murayama - 0x00302045, // n0x129b c0x0000 (---------------) + I nagai - 0x002cb588, // n0x129c c0x0000 (---------------) + I nakayama - 0x002b3a45, // n0x129d c0x0000 (---------------) + I nanyo - 0x0021b289, // n0x129e c0x0000 (---------------) + I nishikawa - 0x00361e49, // n0x129f c0x0000 (---------------) + I obanazawa - 0x00202d82, // n0x12a0 c0x0000 (---------------) + I oe - 0x002a7bc5, // n0x12a1 c0x0000 (---------------) + I oguni - 0x00270cc6, // n0x12a2 c0x0000 (---------------) + I ohkura - 0x0027a8c7, // n0x12a3 c0x0000 (---------------) + I oishida - 0x00378a85, // n0x12a4 c0x0000 (---------------) + I sagae - 0x002f7386, // n0x12a5 c0x0000 (---------------) + I sakata - 0x00313c08, // n0x12a6 c0x0000 (---------------) + I sakegawa - 0x0027b106, // n0x12a7 c0x0000 (---------------) + I shinjo - 0x002cfd49, // n0x12a8 c0x0000 (---------------) + I shirataka - 0x002790c6, // n0x12a9 c0x0000 (---------------) + I shonai - 0x002826c8, // n0x12aa c0x0000 (---------------) + I takahata - 0x002aa345, // n0x12ab c0x0000 (---------------) + I tendo - 0x0026f486, // n0x12ac c0x0000 (---------------) + I tozawa - 0x003542c8, // n0x12ad c0x0000 (---------------) + I tsuruoka - 0x002815c8, // n0x12ae c0x0000 (---------------) + I yamagata - 0x003a3a08, // n0x12af c0x0000 (---------------) + I yamanobe - 0x00366b88, // n0x12b0 c0x0000 (---------------) + I yonezawa - 0x00216dc4, // n0x12b1 c0x0000 (---------------) + I yuza - 0x0022e983, // n0x12b2 c0x0000 (---------------) + I abu - 0x002cff84, // n0x12b3 c0x0000 (---------------) + I hagi - 0x0022d5c6, // n0x12b4 c0x0000 (---------------) + I hikari - 0x002d9784, // n0x12b5 c0x0000 (---------------) + I hofu - 0x002a8e47, // n0x12b6 c0x0000 (---------------) + I iwakuni - 0x00399149, // n0x12b7 c0x0000 (---------------) + I kudamatsu - 0x002c1b05, // n0x12b8 c0x0000 (---------------) + I mitou - 0x0038d206, // n0x12b9 c0x0000 (---------------) + I nagato - 0x00209f06, // n0x12ba c0x0000 (---------------) + I oshima - 0x00261ccb, // n0x12bb c0x0000 (---------------) + I shimonoseki - 0x002fac46, // n0x12bc c0x0000 (---------------) + I shunan - 0x00319106, // n0x12bd c0x0000 (---------------) + I tabuse - 0x0022db48, // n0x12be c0x0000 (---------------) + I tokuyama - 0x00264186, // n0x12bf c0x0000 (---------------) + I toyota - 0x0028f383, // n0x12c0 c0x0000 (---------------) + I ube - 0x0020d243, // n0x12c1 c0x0000 (---------------) + I yuu - 0x0032c104, // n0x12c2 c0x0000 (---------------) + I chuo - 0x00236985, // n0x12c3 c0x0000 (---------------) + I doshi - 0x002ae147, // n0x12c4 c0x0000 (---------------) + I fuefuki - 0x00277c48, // n0x12c5 c0x0000 (---------------) + I fujikawa - 0x00277c4f, // n0x12c6 c0x0000 (---------------) + I fujikawaguchiko - 0x0027c78b, // n0x12c7 c0x0000 (---------------) + I fujiyoshida - 0x002f2688, // n0x12c8 c0x0000 (---------------) + I hayakawa - 0x0034f5c6, // n0x12c9 c0x0000 (---------------) + I hokuto - 0x0026380e, // n0x12ca c0x0000 (---------------) + I ichikawamisato - 0x00226a83, // n0x12cb c0x0000 (---------------) + I kai - 0x0023fec4, // n0x12cc c0x0000 (---------------) + I kofu - 0x002fabc5, // n0x12cd c0x0000 (---------------) + I koshu - 0x00348146, // n0x12ce c0x0000 (---------------) + I kosuge - 0x0028e30b, // n0x12cf c0x0000 (---------------) + I minami-alps - 0x00291986, // n0x12d0 c0x0000 (---------------) + I minobu - 0x00216049, // n0x12d1 c0x0000 (---------------) + I nakamichi - 0x002e6845, // n0x12d2 c0x0000 (---------------) + I nanbu - 0x0037fd08, // n0x12d3 c0x0000 (---------------) + I narusawa - 0x0020d5c8, // n0x12d4 c0x0000 (---------------) + I nirasaki - 0x0021974c, // n0x12d5 c0x0000 (---------------) + I nishikatsura - 0x0029fd46, // n0x12d6 c0x0000 (---------------) + I oshino - 0x0021d106, // n0x12d7 c0x0000 (---------------) + I otsuki - 0x002b2f45, // n0x12d8 c0x0000 (---------------) + I showa - 0x00289988, // n0x12d9 c0x0000 (---------------) + I tabayama - 0x0027bec5, // n0x12da c0x0000 (---------------) + I tsuru - 0x00385888, // n0x12db c0x0000 (---------------) + I uenohara - 0x0029f40a, // n0x12dc c0x0000 (---------------) + I yamanakako - 0x002a2d49, // n0x12dd c0x0000 (---------------) + I yamanashi - 0x00685804, // n0x12de c0x0001 (---------------) ! I city - 0x2d60ce42, // n0x12df c0x00b5 (n0x12e0-n0x12e1) o I co - 0x000fe108, // n0x12e0 c0x0000 (---------------) + blogspot - 0x00233243, // n0x12e1 c0x0000 (---------------) + I com - 0x00239103, // n0x12e2 c0x0000 (---------------) + I edu - 0x0027d903, // n0x12e3 c0x0000 (---------------) + I gov - 0x00207dc3, // n0x12e4 c0x0000 (---------------) + I mil - 0x00223b43, // n0x12e5 c0x0000 (---------------) + I net - 0x00228743, // n0x12e6 c0x0000 (---------------) + I org - 0x00331a83, // n0x12e7 c0x0000 (---------------) + I biz - 0x00233243, // n0x12e8 c0x0000 (---------------) + I com - 0x00239103, // n0x12e9 c0x0000 (---------------) + I edu - 0x0027d903, // n0x12ea c0x0000 (---------------) + I gov - 0x00201844, // n0x12eb c0x0000 (---------------) + I info - 0x00223b43, // n0x12ec c0x0000 (---------------) + I net - 0x00228743, // n0x12ed c0x0000 (---------------) + I org - 0x00233c03, // n0x12ee c0x0000 (---------------) + I ass - 0x002d5e84, // n0x12ef c0x0000 (---------------) + I asso - 0x00233243, // n0x12f0 c0x0000 (---------------) + I com - 0x0023c344, // n0x12f1 c0x0000 (---------------) + I coop - 0x00239103, // n0x12f2 c0x0000 (---------------) + I edu - 0x002aebc4, // n0x12f3 c0x0000 (---------------) + I gouv - 0x0027d903, // n0x12f4 c0x0000 (---------------) + I gov - 0x00378d47, // n0x12f5 c0x0000 (---------------) + I medecin - 0x00207dc3, // n0x12f6 c0x0000 (---------------) + I mil - 0x00201383, // n0x12f7 c0x0000 (---------------) + I nom - 0x0035f0c8, // n0x12f8 c0x0000 (---------------) + I notaires - 0x00228743, // n0x12f9 c0x0000 (---------------) + I org - 0x0034af0b, // n0x12fa c0x0000 (---------------) + I pharmaciens - 0x002e0bc3, // n0x12fb c0x0000 (---------------) + I prd - 0x00246a06, // n0x12fc c0x0000 (---------------) + I presse - 0x00200c82, // n0x12fd c0x0000 (---------------) + I tm - 0x002d32cb, // n0x12fe c0x0000 (---------------) + I veterinaire - 0x00239103, // n0x12ff c0x0000 (---------------) + I edu - 0x0027d903, // n0x1300 c0x0000 (---------------) + I gov - 0x00223b43, // n0x1301 c0x0000 (---------------) + I net - 0x00228743, // n0x1302 c0x0000 (---------------) + I org - 0x00233243, // n0x1303 c0x0000 (---------------) + I com - 0x00239103, // n0x1304 c0x0000 (---------------) + I edu - 0x0027d903, // n0x1305 c0x0000 (---------------) + I gov - 0x00228743, // n0x1306 c0x0000 (---------------) + I org - 0x0022ed83, // n0x1307 c0x0000 (---------------) + I rep - 0x00203903, // n0x1308 c0x0000 (---------------) + I tra - 0x00200342, // n0x1309 c0x0000 (---------------) + I ac - 0x000fe108, // n0x130a c0x0000 (---------------) + blogspot - 0x002b7ac5, // n0x130b c0x0000 (---------------) + I busan - 0x003147c8, // n0x130c c0x0000 (---------------) + I chungbuk - 0x0032a848, // n0x130d c0x0000 (---------------) + I chungnam - 0x0020ce42, // n0x130e c0x0000 (---------------) + I co - 0x00255dc5, // n0x130f c0x0000 (---------------) + I daegu - 0x003246c7, // n0x1310 c0x0000 (---------------) + I daejeon - 0x00200082, // n0x1311 c0x0000 (---------------) + I es - 0x00216287, // n0x1312 c0x0000 (---------------) + I gangwon - 0x00210a42, // n0x1313 c0x0000 (---------------) + I go - 0x00234607, // n0x1314 c0x0000 (---------------) + I gwangju - 0x0030d549, // n0x1315 c0x0000 (---------------) + I gyeongbuk - 0x00301c08, // n0x1316 c0x0000 (---------------) + I gyeonggi - 0x00206fc9, // n0x1317 c0x0000 (---------------) + I gyeongnam - 0x00233842, // n0x1318 c0x0000 (---------------) + I hs - 0x00267b47, // n0x1319 c0x0000 (---------------) + I incheon - 0x0024e044, // n0x131a c0x0000 (---------------) + I jeju - 0x00324787, // n0x131b c0x0000 (---------------) + I jeonbuk - 0x00300687, // n0x131c c0x0000 (---------------) + I jeonnam - 0x002b7202, // n0x131d c0x0000 (---------------) + I kg - 0x00207dc3, // n0x131e c0x0000 (---------------) + I mil - 0x00210f42, // n0x131f c0x0000 (---------------) + I ms - 0x00202ac2, // n0x1320 c0x0000 (---------------) + I ne - 0x00200dc2, // n0x1321 c0x0000 (---------------) + I or - 0x00200582, // n0x1322 c0x0000 (---------------) + I pe - 0x00208c82, // n0x1323 c0x0000 (---------------) + I re - 0x00207f02, // n0x1324 c0x0000 (---------------) + I sc - 0x00343d45, // n0x1325 c0x0000 (---------------) + I seoul - 0x00253b45, // n0x1326 c0x0000 (---------------) + I ulsan - 0x00233243, // n0x1327 c0x0000 (---------------) + I com - 0x00239103, // n0x1328 c0x0000 (---------------) + I edu - 0x0027d903, // n0x1329 c0x0000 (---------------) + I gov - 0x00223b43, // n0x132a c0x0000 (---------------) + I net - 0x00228743, // n0x132b c0x0000 (---------------) + I org - 0x00233243, // n0x132c c0x0000 (---------------) + I com - 0x00239103, // n0x132d c0x0000 (---------------) + I edu - 0x0027d903, // n0x132e c0x0000 (---------------) + I gov - 0x00207dc3, // n0x132f c0x0000 (---------------) + I mil - 0x00223b43, // n0x1330 c0x0000 (---------------) + I net - 0x00228743, // n0x1331 c0x0000 (---------------) + I org - 0x00000141, // n0x1332 c0x0000 (---------------) + c - 0x00233243, // n0x1333 c0x0000 (---------------) + I com - 0x00239103, // n0x1334 c0x0000 (---------------) + I edu - 0x0027d903, // n0x1335 c0x0000 (---------------) + I gov - 0x00201844, // n0x1336 c0x0000 (---------------) + I info - 0x00201503, // n0x1337 c0x0000 (---------------) + I int - 0x00223b43, // n0x1338 c0x0000 (---------------) + I net - 0x00228743, // n0x1339 c0x0000 (---------------) + I org - 0x0021e783, // n0x133a c0x0000 (---------------) + I per - 0x00233243, // n0x133b c0x0000 (---------------) + I com - 0x00239103, // n0x133c c0x0000 (---------------) + I edu - 0x0027d903, // n0x133d c0x0000 (---------------) + I gov - 0x00223b43, // n0x133e c0x0000 (---------------) + I net - 0x00228743, // n0x133f c0x0000 (---------------) + I org - 0x0020ce42, // n0x1340 c0x0000 (---------------) + I co - 0x00233243, // n0x1341 c0x0000 (---------------) + I com - 0x00239103, // n0x1342 c0x0000 (---------------) + I edu - 0x0027d903, // n0x1343 c0x0000 (---------------) + I gov - 0x00223b43, // n0x1344 c0x0000 (---------------) + I net - 0x00228743, // n0x1345 c0x0000 (---------------) + I org - 0x000fe108, // n0x1346 c0x0000 (---------------) + blogspot - 0x00200342, // n0x1347 c0x0000 (---------------) + I ac - 0x002bccc4, // n0x1348 c0x0000 (---------------) + I assn - 0x00233243, // n0x1349 c0x0000 (---------------) + I com - 0x00239103, // n0x134a c0x0000 (---------------) + I edu - 0x0027d903, // n0x134b c0x0000 (---------------) + I gov - 0x00305e43, // n0x134c c0x0000 (---------------) + I grp - 0x00234305, // n0x134d c0x0000 (---------------) + I hotel - 0x00201503, // n0x134e c0x0000 (---------------) + I int - 0x00312883, // n0x134f c0x0000 (---------------) + I ltd - 0x00223b43, // n0x1350 c0x0000 (---------------) + I net - 0x0023b383, // n0x1351 c0x0000 (---------------) + I ngo - 0x00228743, // n0x1352 c0x0000 (---------------) + I org - 0x00217283, // n0x1353 c0x0000 (---------------) + I sch - 0x00274603, // n0x1354 c0x0000 (---------------) + I soc - 0x0021e243, // n0x1355 c0x0000 (---------------) + I web - 0x00233243, // n0x1356 c0x0000 (---------------) + I com - 0x00239103, // n0x1357 c0x0000 (---------------) + I edu - 0x0027d903, // n0x1358 c0x0000 (---------------) + I gov - 0x00223b43, // n0x1359 c0x0000 (---------------) + I net - 0x00228743, // n0x135a c0x0000 (---------------) + I org - 0x0020ce42, // n0x135b c0x0000 (---------------) + I co - 0x00228743, // n0x135c c0x0000 (---------------) + I org - 0x000fe108, // n0x135d c0x0000 (---------------) + blogspot - 0x0027d903, // n0x135e c0x0000 (---------------) + I gov - 0x000fe108, // n0x135f c0x0000 (---------------) + blogspot - 0x0022b643, // n0x1360 c0x0000 (---------------) + I asn - 0x00233243, // n0x1361 c0x0000 (---------------) + I com - 0x00237344, // n0x1362 c0x0000 (---------------) + I conf - 0x00239103, // n0x1363 c0x0000 (---------------) + I edu - 0x0027d903, // n0x1364 c0x0000 (---------------) + I gov - 0x0020d9c2, // n0x1365 c0x0000 (---------------) + I id - 0x00207dc3, // n0x1366 c0x0000 (---------------) + I mil - 0x00223b43, // n0x1367 c0x0000 (---------------) + I net - 0x00228743, // n0x1368 c0x0000 (---------------) + I org - 0x00233243, // n0x1369 c0x0000 (---------------) + I com - 0x00239103, // n0x136a c0x0000 (---------------) + I edu - 0x0027d903, // n0x136b c0x0000 (---------------) + I gov - 0x0020d9c2, // n0x136c c0x0000 (---------------) + I id - 0x00213443, // n0x136d c0x0000 (---------------) + I med - 0x00223b43, // n0x136e c0x0000 (---------------) + I net - 0x00228743, // n0x136f c0x0000 (---------------) + I org - 0x002d96c3, // n0x1370 c0x0000 (---------------) + I plc - 0x00217283, // n0x1371 c0x0000 (---------------) + I sch - 0x00200342, // n0x1372 c0x0000 (---------------) + I ac - 0x0020ce42, // n0x1373 c0x0000 (---------------) + I co - 0x0027d903, // n0x1374 c0x0000 (---------------) + I gov - 0x00223b43, // n0x1375 c0x0000 (---------------) + I net - 0x00228743, // n0x1376 c0x0000 (---------------) + I org - 0x00246a05, // n0x1377 c0x0000 (---------------) + I press - 0x002d5e84, // n0x1378 c0x0000 (---------------) + I asso - 0x00200c82, // n0x1379 c0x0000 (---------------) + I tm - 0x000fe108, // n0x137a c0x0000 (---------------) + blogspot - 0x00200342, // n0x137b c0x0000 (---------------) + I ac - 0x0020ce42, // n0x137c c0x0000 (---------------) + I co - 0x0005528b, // n0x137d c0x0000 (---------------) + diskstation - 0x00007ec7, // n0x137e c0x0000 (---------------) + dscloud - 0x00239103, // n0x137f c0x0000 (---------------) + I edu - 0x0027d903, // n0x1380 c0x0000 (---------------) + I gov - 0x00049fc4, // n0x1381 c0x0000 (---------------) + i234 - 0x00235383, // n0x1382 c0x0000 (---------------) + I its - 0x00049004, // n0x1383 c0x0000 (---------------) + myds - 0x00223b43, // n0x1384 c0x0000 (---------------) + I net - 0x00228743, // n0x1385 c0x0000 (---------------) + I org - 0x002e17c4, // n0x1386 c0x0000 (---------------) + I priv - 0x0010d3c8, // n0x1387 c0x0000 (---------------) + synology - 0x0020ce42, // n0x1388 c0x0000 (---------------) + I co - 0x00233243, // n0x1389 c0x0000 (---------------) + I com - 0x00239103, // n0x138a c0x0000 (---------------) + I edu - 0x0027d903, // n0x138b c0x0000 (---------------) + I gov - 0x00207dc3, // n0x138c c0x0000 (---------------) + I mil - 0x00201383, // n0x138d c0x0000 (---------------) + I nom - 0x00228743, // n0x138e c0x0000 (---------------) + I org - 0x002e0bc3, // n0x138f c0x0000 (---------------) + I prd - 0x00200c82, // n0x1390 c0x0000 (---------------) + I tm - 0x000fe108, // n0x1391 c0x0000 (---------------) + blogspot - 0x00233243, // n0x1392 c0x0000 (---------------) + I com - 0x00239103, // n0x1393 c0x0000 (---------------) + I edu - 0x0027d903, // n0x1394 c0x0000 (---------------) + I gov - 0x00201683, // n0x1395 c0x0000 (---------------) + I inf - 0x00200904, // n0x1396 c0x0000 (---------------) + I name - 0x00223b43, // n0x1397 c0x0000 (---------------) + I net - 0x00228743, // n0x1398 c0x0000 (---------------) + I org - 0x00233243, // n0x1399 c0x0000 (---------------) + I com - 0x00239103, // n0x139a c0x0000 (---------------) + I edu - 0x002aebc4, // n0x139b c0x0000 (---------------) + I gouv - 0x0027d903, // n0x139c c0x0000 (---------------) + I gov - 0x00223b43, // n0x139d c0x0000 (---------------) + I net - 0x00228743, // n0x139e c0x0000 (---------------) + I org - 0x00246a06, // n0x139f c0x0000 (---------------) + I presse - 0x00239103, // n0x13a0 c0x0000 (---------------) + I edu - 0x0027d903, // n0x13a1 c0x0000 (---------------) + I gov - 0x0016dcc3, // n0x13a2 c0x0000 (---------------) + nyc - 0x00228743, // n0x13a3 c0x0000 (---------------) + I org - 0x00233243, // n0x13a4 c0x0000 (---------------) + I com - 0x00239103, // n0x13a5 c0x0000 (---------------) + I edu - 0x0027d903, // n0x13a6 c0x0000 (---------------) + I gov - 0x00223b43, // n0x13a7 c0x0000 (---------------) + I net - 0x00228743, // n0x13a8 c0x0000 (---------------) + I org - 0x00007ec7, // n0x13a9 c0x0000 (---------------) + dscloud - 0x000fe108, // n0x13aa c0x0000 (---------------) + blogspot - 0x0027d903, // n0x13ab c0x0000 (---------------) + I gov - 0x00233243, // n0x13ac c0x0000 (---------------) + I com - 0x00239103, // n0x13ad c0x0000 (---------------) + I edu - 0x0027d903, // n0x13ae c0x0000 (---------------) + I gov - 0x00223b43, // n0x13af c0x0000 (---------------) + I net - 0x00228743, // n0x13b0 c0x0000 (---------------) + I org - 0x35a33243, // n0x13b1 c0x00d6 (n0x13b5-n0x13b6) + I com - 0x00239103, // n0x13b2 c0x0000 (---------------) + I edu - 0x00223b43, // n0x13b3 c0x0000 (---------------) + I net - 0x00228743, // n0x13b4 c0x0000 (---------------) + I org - 0x000fe108, // n0x13b5 c0x0000 (---------------) + blogspot - 0x00200342, // n0x13b6 c0x0000 (---------------) + I ac - 0x0020ce42, // n0x13b7 c0x0000 (---------------) + I co - 0x00233243, // n0x13b8 c0x0000 (---------------) + I com - 0x0027d903, // n0x13b9 c0x0000 (---------------) + I gov - 0x00223b43, // n0x13ba c0x0000 (---------------) + I net - 0x00200dc2, // n0x13bb c0x0000 (---------------) + I or - 0x00228743, // n0x13bc c0x0000 (---------------) + I org - 0x0030cf07, // n0x13bd c0x0000 (---------------) + I academy - 0x00208a4b, // n0x13be c0x0000 (---------------) + I agriculture - 0x00205103, // n0x13bf c0x0000 (---------------) + I air - 0x00239b48, // n0x13c0 c0x0000 (---------------) + I airguard - 0x003276c7, // n0x13c1 c0x0000 (---------------) + I alabama - 0x002799c6, // n0x13c2 c0x0000 (---------------) + I alaska - 0x00367785, // n0x13c3 c0x0000 (---------------) + I amber - 0x002c06c9, // n0x13c4 c0x0000 (---------------) + I ambulance - 0x00207b48, // n0x13c5 c0x0000 (---------------) + I american - 0x002f53c9, // n0x13c6 c0x0000 (---------------) + I americana - 0x002f53d0, // n0x13c7 c0x0000 (---------------) + I americanantiques - 0x003580cb, // n0x13c8 c0x0000 (---------------) + I americanart - 0x002c0509, // n0x13c9 c0x0000 (---------------) + I amsterdam - 0x00206ac3, // n0x13ca c0x0000 (---------------) + I and - 0x00249989, // n0x13cb c0x0000 (---------------) + I annefrank - 0x00238006, // n0x13cc c0x0000 (---------------) + I anthro - 0x0023800c, // n0x13cd c0x0000 (---------------) + I anthropology - 0x002b7b88, // n0x13ce c0x0000 (---------------) + I antiques - 0x003a0c48, // n0x13cf c0x0000 (---------------) + I aquarium - 0x002518c9, // n0x13d0 c0x0000 (---------------) + I arboretum - 0x002a128e, // n0x13d1 c0x0000 (---------------) + I archaeological - 0x0037080b, // n0x13d2 c0x0000 (---------------) + I archaeology - 0x00308e4c, // n0x13d3 c0x0000 (---------------) + I architecture - 0x00201d43, // n0x13d4 c0x0000 (---------------) + I art - 0x0038298c, // n0x13d5 c0x0000 (---------------) + I artanddesign - 0x00204bc9, // n0x13d6 c0x0000 (---------------) + I artcenter - 0x0020cd07, // n0x13d7 c0x0000 (---------------) + I artdeco - 0x0023904c, // n0x13d8 c0x0000 (---------------) + I arteducation - 0x0039164a, // n0x13d9 c0x0000 (---------------) + I artgallery - 0x0024b944, // n0x13da c0x0000 (---------------) + I arts - 0x003a3ccd, // n0x13db c0x0000 (---------------) + I artsandcrafts - 0x00382848, // n0x13dc c0x0000 (---------------) + I asmatart - 0x0039838d, // n0x13dd c0x0000 (---------------) + I assassination - 0x00337106, // n0x13de c0x0000 (---------------) + I assisi - 0x002d5e8b, // n0x13df c0x0000 (---------------) + I association - 0x00248e49, // n0x13e0 c0x0000 (---------------) + I astronomy - 0x00227d47, // n0x13e1 c0x0000 (---------------) + I atlanta - 0x002dc946, // n0x13e2 c0x0000 (---------------) + I austin - 0x0030af49, // n0x13e3 c0x0000 (---------------) + I australia - 0x0032364a, // n0x13e4 c0x0000 (---------------) + I automotive - 0x00359fc8, // n0x13e5 c0x0000 (---------------) + I aviation - 0x002e0584, // n0x13e6 c0x0000 (---------------) + I axis - 0x00276e07, // n0x13e7 c0x0000 (---------------) + I badajoz - 0x002a2fc7, // n0x13e8 c0x0000 (---------------) + I baghdad - 0x002f5184, // n0x13e9 c0x0000 (---------------) + I bahn - 0x0022a484, // n0x13ea c0x0000 (---------------) + I bale - 0x0025ce49, // n0x13eb c0x0000 (---------------) + I baltimore - 0x002e3209, // n0x13ec c0x0000 (---------------) + I barcelona - 0x0022df88, // n0x13ed c0x0000 (---------------) + I baseball - 0x00212305, // n0x13ee c0x0000 (---------------) + I basel - 0x003878c5, // n0x13ef c0x0000 (---------------) + I baths - 0x0020e4c6, // n0x13f0 c0x0000 (---------------) + I bauern - 0x003a3b89, // n0x13f1 c0x0000 (---------------) + I beauxarts - 0x002187cd, // n0x13f2 c0x0000 (---------------) + I beeldengeluid - 0x003088c8, // n0x13f3 c0x0000 (---------------) + I bellevue - 0x0020e3c7, // n0x13f4 c0x0000 (---------------) + I bergbau - 0x00367808, // n0x13f5 c0x0000 (---------------) + I berkeley - 0x0022fc06, // n0x13f6 c0x0000 (---------------) + I berlin - 0x0038fc84, // n0x13f7 c0x0000 (---------------) + I bern - 0x00356b45, // n0x13f8 c0x0000 (---------------) + I bible - 0x00202786, // n0x13f9 c0x0000 (---------------) + I bilbao - 0x00203784, // n0x13fa c0x0000 (---------------) + I bill - 0x00204ac7, // n0x13fb c0x0000 (---------------) + I birdart - 0x0020b1ca, // n0x13fc c0x0000 (---------------) + I birthplace - 0x00214304, // n0x13fd c0x0000 (---------------) + I bonn - 0x00218f86, // n0x13fe c0x0000 (---------------) + I boston - 0x0021b749, // n0x13ff c0x0000 (---------------) + I botanical - 0x0021b74f, // n0x1400 c0x0000 (---------------) + I botanicalgarden - 0x0021bd0d, // n0x1401 c0x0000 (---------------) + I botanicgarden - 0x0021c486, // n0x1402 c0x0000 (---------------) + I botany - 0x00220950, // n0x1403 c0x0000 (---------------) + I brandywinevalley - 0x00220d46, // n0x1404 c0x0000 (---------------) + I brasil - 0x00221e07, // n0x1405 c0x0000 (---------------) + I bristol - 0x00222187, // n0x1406 c0x0000 (---------------) + I british - 0x0022218f, // n0x1407 c0x0000 (---------------) + I britishcolumbia - 0x00223789, // n0x1408 c0x0000 (---------------) + I broadcast - 0x00226386, // n0x1409 c0x0000 (---------------) + I brunel - 0x00228407, // n0x140a c0x0000 (---------------) + I brussel - 0x00228408, // n0x140b c0x0000 (---------------) + I brussels - 0x00228909, // n0x140c c0x0000 (---------------) + I bruxelles - 0x00291688, // n0x140d c0x0000 (---------------) + I building - 0x002d7947, // n0x140e c0x0000 (---------------) + I burghof - 0x0020dc43, // n0x140f c0x0000 (---------------) + I bus - 0x002330c6, // n0x1410 c0x0000 (---------------) + I bushey - 0x00200e48, // n0x1411 c0x0000 (---------------) + I cadaques - 0x002a154a, // n0x1412 c0x0000 (---------------) + I california - 0x0021e309, // n0x1413 c0x0000 (---------------) + I cambridge - 0x00207c83, // n0x1414 c0x0000 (---------------) + I can - 0x003245c6, // n0x1415 c0x0000 (---------------) + I canada - 0x002da54a, // n0x1416 c0x0000 (---------------) + I capebreton - 0x00365a87, // n0x1417 c0x0000 (---------------) + I carrier - 0x0020cb4a, // n0x1418 c0x0000 (---------------) + I cartoonart - 0x0021500e, // n0x1419 c0x0000 (---------------) + I casadelamoneda - 0x002238c6, // n0x141a c0x0000 (---------------) + I castle - 0x002a7dc7, // n0x141b c0x0000 (---------------) + I castres - 0x002106c6, // n0x141c c0x0000 (---------------) + I celtic - 0x00204c86, // n0x141d c0x0000 (---------------) + I center - 0x0037304b, // n0x141e c0x0000 (---------------) + I chattanooga - 0x0026478a, // n0x141f c0x0000 (---------------) + I cheltenham - 0x0037b6cd, // n0x1420 c0x0000 (---------------) + I chesapeakebay - 0x002129c7, // n0x1421 c0x0000 (---------------) + I chicago - 0x00274688, // n0x1422 c0x0000 (---------------) + I children - 0x00274689, // n0x1423 c0x0000 (---------------) + I childrens - 0x0027468f, // n0x1424 c0x0000 (---------------) + I childrensgarden - 0x003799cc, // n0x1425 c0x0000 (---------------) + I chiropractic - 0x002b8789, // n0x1426 c0x0000 (---------------) + I chocolate - 0x003149ce, // n0x1427 c0x0000 (---------------) + I christiansburg - 0x00378e4a, // n0x1428 c0x0000 (---------------) + I cincinnati - 0x003028c6, // n0x1429 c0x0000 (---------------) + I cinema - 0x0033d286, // n0x142a c0x0000 (---------------) + I circus - 0x0036430c, // n0x142b c0x0000 (---------------) + I civilisation - 0x00367ccc, // n0x142c c0x0000 (---------------) + I civilization - 0x0036dd48, // n0x142d c0x0000 (---------------) + I civilwar - 0x00387a47, // n0x142e c0x0000 (---------------) + I clinton - 0x002b0885, // n0x142f c0x0000 (---------------) + I clock - 0x003524c4, // n0x1430 c0x0000 (---------------) + I coal - 0x0038510e, // n0x1431 c0x0000 (---------------) + I coastaldefence - 0x003238c4, // n0x1432 c0x0000 (---------------) + I cody - 0x00231a07, // n0x1433 c0x0000 (---------------) + I coldwar - 0x00263d8a, // n0x1434 c0x0000 (---------------) + I collection - 0x00232414, // n0x1435 c0x0000 (---------------) + I colonialwilliamsburg - 0x00232b0f, // n0x1436 c0x0000 (---------------) + I coloradoplateau - 0x00222348, // n0x1437 c0x0000 (---------------) + I columbia - 0x00232f88, // n0x1438 c0x0000 (---------------) + I columbus - 0x0036078d, // n0x1439 c0x0000 (---------------) + I communication - 0x0036078e, // n0x143a c0x0000 (---------------) + I communications - 0x00233249, // n0x143b c0x0000 (---------------) + I community - 0x00235b08, // n0x143c c0x0000 (---------------) + I computer - 0x00235b0f, // n0x143d c0x0000 (---------------) + I computerhistory - 0x00238d4c, // n0x143e c0x0000 (---------------) + I contemporary - 0x00238d4f, // n0x143f c0x0000 (---------------) + I contemporaryart - 0x0023a0c7, // n0x1440 c0x0000 (---------------) + I convent - 0x0023caca, // n0x1441 c0x0000 (---------------) + I copenhagen - 0x0021eccb, // n0x1442 c0x0000 (---------------) + I corporation - 0x002405c8, // n0x1443 c0x0000 (---------------) + I corvette - 0x00241907, // n0x1444 c0x0000 (---------------) + I costume - 0x0033798d, // n0x1445 c0x0000 (---------------) + I countryestate - 0x00321346, // n0x1446 c0x0000 (---------------) + I county - 0x003a3e86, // n0x1447 c0x0000 (---------------) + I crafts - 0x00242f89, // n0x1448 c0x0000 (---------------) + I cranbrook - 0x0031d9c8, // n0x1449 c0x0000 (---------------) + I creation - 0x00246d88, // n0x144a c0x0000 (---------------) + I cultural - 0x00246d8e, // n0x144b c0x0000 (---------------) + I culturalcenter - 0x00208b47, // n0x144c c0x0000 (---------------) + I culture - 0x00311f05, // n0x144d c0x0000 (---------------) + I cyber - 0x0024a0c5, // n0x144e c0x0000 (---------------) + I cymru - 0x00211bc4, // n0x144f c0x0000 (---------------) + I dali - 0x00279c86, // n0x1450 c0x0000 (---------------) + I dallas - 0x0022de88, // n0x1451 c0x0000 (---------------) + I database - 0x00329483, // n0x1452 c0x0000 (---------------) + I ddr - 0x0025df8e, // n0x1453 c0x0000 (---------------) + I decorativearts - 0x00337d48, // n0x1454 c0x0000 (---------------) + I delaware - 0x0027ad0b, // n0x1455 c0x0000 (---------------) + I delmenhorst - 0x0022b807, // n0x1456 c0x0000 (---------------) + I denmark - 0x00273e05, // n0x1457 c0x0000 (---------------) + I depot - 0x00228206, // n0x1458 c0x0000 (---------------) + I design - 0x002ab607, // n0x1459 c0x0000 (---------------) + I detroit - 0x002f9d88, // n0x145a c0x0000 (---------------) + I dinosaur - 0x00331309, // n0x145b c0x0000 (---------------) + I discovery - 0x00237a85, // n0x145c c0x0000 (---------------) + I dolls - 0x00287848, // n0x145d c0x0000 (---------------) + I donostia - 0x00205946, // n0x145e c0x0000 (---------------) + I durham - 0x00374bca, // n0x145f c0x0000 (---------------) + I eastafrica - 0x00385009, // n0x1460 c0x0000 (---------------) + I eastcoast - 0x00239109, // n0x1461 c0x0000 (---------------) + I education - 0x0023910b, // n0x1462 c0x0000 (---------------) + I educational - 0x0033c448, // n0x1463 c0x0000 (---------------) + I egyptian - 0x002f5049, // n0x1464 c0x0000 (---------------) + I eisenbahn - 0x002123c6, // n0x1465 c0x0000 (---------------) + I elburg - 0x002e5cca, // n0x1466 c0x0000 (---------------) + I elvendrell - 0x0022aa0a, // n0x1467 c0x0000 (---------------) + I embroidery - 0x0023cccc, // n0x1468 c0x0000 (---------------) + I encyclopedic - 0x00213087, // n0x1469 c0x0000 (---------------) + I england - 0x00301a0a, // n0x146a c0x0000 (---------------) + I entomology - 0x003307cb, // n0x146b c0x0000 (---------------) + I environment - 0x003307d9, // n0x146c c0x0000 (---------------) + I environmentalconservation - 0x00335a88, // n0x146d c0x0000 (---------------) + I epilepsy - 0x00246a85, // n0x146e c0x0000 (---------------) + I essex - 0x002c4206, // n0x146f c0x0000 (---------------) + I estate - 0x0030edc9, // n0x1470 c0x0000 (---------------) + I ethnology - 0x002009c6, // n0x1471 c0x0000 (---------------) + I exeter - 0x0021014a, // n0x1472 c0x0000 (---------------) + I exhibition - 0x00207d46, // n0x1473 c0x0000 (---------------) + I family - 0x0026a084, // n0x1474 c0x0000 (---------------) + I farm - 0x002c438d, // n0x1475 c0x0000 (---------------) + I farmequipment - 0x00327f47, // n0x1476 c0x0000 (---------------) + I farmers - 0x0026a089, // n0x1477 c0x0000 (---------------) + I farmstead - 0x003663c5, // n0x1478 c0x0000 (---------------) + I field - 0x00366788, // n0x1479 c0x0000 (---------------) + I figueres - 0x0037a689, // n0x147a c0x0000 (---------------) + I filatelia - 0x0024b184, // n0x147b c0x0000 (---------------) + I film - 0x0024b847, // n0x147c c0x0000 (---------------) + I fineart - 0x0024b848, // n0x147d c0x0000 (---------------) + I finearts - 0x0024c247, // n0x147e c0x0000 (---------------) + I finland - 0x002659c8, // n0x147f c0x0000 (---------------) + I flanders - 0x00252907, // n0x1480 c0x0000 (---------------) + I florida - 0x00338685, // n0x1481 c0x0000 (---------------) + I force - 0x00259d4c, // n0x1482 c0x0000 (---------------) + I fortmissoula - 0x0025a909, // n0x1483 c0x0000 (---------------) + I fortworth - 0x002b9f8a, // n0x1484 c0x0000 (---------------) + I foundation - 0x00384289, // n0x1485 c0x0000 (---------------) + I francaise - 0x00249a89, // n0x1486 c0x0000 (---------------) + I frankfurt - 0x0024ba4c, // n0x1487 c0x0000 (---------------) + I franziskaner - 0x002e808b, // n0x1488 c0x0000 (---------------) + I freemasonry - 0x0025c888, // n0x1489 c0x0000 (---------------) + I freiburg - 0x0025e788, // n0x148a c0x0000 (---------------) + I fribourg - 0x00261b44, // n0x148b c0x0000 (---------------) + I frog - 0x00283e08, // n0x148c c0x0000 (---------------) + I fundacio - 0x00284dc9, // n0x148d c0x0000 (---------------) + I furniture - 0x00391707, // n0x148e c0x0000 (---------------) + I gallery - 0x0021b986, // n0x148f c0x0000 (---------------) + I garden - 0x00245307, // n0x1490 c0x0000 (---------------) + I gateway - 0x00331589, // n0x1491 c0x0000 (---------------) + I geelvinck - 0x00212ccb, // n0x1492 c0x0000 (---------------) + I gemological - 0x00395947, // n0x1493 c0x0000 (---------------) + I geology - 0x003234c7, // n0x1494 c0x0000 (---------------) + I georgia - 0x00279707, // n0x1495 c0x0000 (---------------) + I giessen - 0x00398304, // n0x1496 c0x0000 (---------------) + I glas - 0x00398305, // n0x1497 c0x0000 (---------------) + I glass - 0x002a9585, // n0x1498 c0x0000 (---------------) + I gorge - 0x0033420b, // n0x1499 c0x0000 (---------------) + I grandrapids - 0x0038ef04, // n0x149a c0x0000 (---------------) + I graz - 0x00230008, // n0x149b c0x0000 (---------------) + I guernsey - 0x0028708a, // n0x149c c0x0000 (---------------) + I halloffame - 0x00205a07, // n0x149d c0x0000 (---------------) + I hamburg - 0x002f7f07, // n0x149e c0x0000 (---------------) + I handson - 0x0028db92, // n0x149f c0x0000 (---------------) + I harvestcelebration - 0x00257506, // n0x14a0 c0x0000 (---------------) + I hawaii - 0x002ae586, // n0x14a1 c0x0000 (---------------) + I health - 0x00312c8e, // n0x14a2 c0x0000 (---------------) + I heimatunduhren - 0x00258306, // n0x14a3 c0x0000 (---------------) + I hellas - 0x00209bc8, // n0x14a4 c0x0000 (---------------) + I helsinki - 0x0029270f, // n0x14a5 c0x0000 (---------------) + I hembygdsforbund - 0x00398748, // n0x14a6 c0x0000 (---------------) + I heritage - 0x0036d1c8, // n0x14a7 c0x0000 (---------------) + I histoire - 0x0034b70a, // n0x14a8 c0x0000 (---------------) + I historical - 0x0034b711, // n0x14a9 c0x0000 (---------------) + I historicalsociety - 0x002a354e, // n0x14aa c0x0000 (---------------) + I historichouses - 0x0025720a, // n0x14ab c0x0000 (---------------) + I historisch - 0x0025720c, // n0x14ac c0x0000 (---------------) + I historisches - 0x00235d07, // n0x14ad c0x0000 (---------------) + I history - 0x00235d10, // n0x14ae c0x0000 (---------------) + I historyofscience - 0x00202048, // n0x14af c0x0000 (---------------) + I horology - 0x002a3745, // n0x14b0 c0x0000 (---------------) + I house - 0x002abe4a, // n0x14b1 c0x0000 (---------------) + I humanities - 0x002037cc, // n0x14b2 c0x0000 (---------------) + I illustration - 0x002b61cd, // n0x14b3 c0x0000 (---------------) + I imageandsound - 0x002a5286, // n0x14b4 c0x0000 (---------------) + I indian - 0x002a5287, // n0x14b5 c0x0000 (---------------) + I indiana - 0x002a528c, // n0x14b6 c0x0000 (---------------) + I indianapolis - 0x002ec78c, // n0x14b7 c0x0000 (---------------) + I indianmarket - 0x002ff70c, // n0x14b8 c0x0000 (---------------) + I intelligence - 0x0028c78b, // n0x14b9 c0x0000 (---------------) + I interactive - 0x00286784, // n0x14ba c0x0000 (---------------) + I iraq - 0x0021c344, // n0x14bb c0x0000 (---------------) + I iron - 0x00349c89, // n0x14bc c0x0000 (---------------) + I isleofman - 0x002ca6c7, // n0x14bd c0x0000 (---------------) + I jamison - 0x0026e0c9, // n0x14be c0x0000 (---------------) + I jefferson - 0x00280d89, // n0x14bf c0x0000 (---------------) + I jerusalem - 0x00361547, // n0x14c0 c0x0000 (---------------) + I jewelry - 0x003914c6, // n0x14c1 c0x0000 (---------------) + I jewish - 0x003914c9, // n0x14c2 c0x0000 (---------------) + I jewishart - 0x0039b003, // n0x14c3 c0x0000 (---------------) + I jfk - 0x0027b20a, // n0x14c4 c0x0000 (---------------) + I journalism - 0x00352707, // n0x14c5 c0x0000 (---------------) + I judaica - 0x0027724b, // n0x14c6 c0x0000 (---------------) + I judygarland - 0x0037b54a, // n0x14c7 c0x0000 (---------------) + I juedisches - 0x00234744, // n0x14c8 c0x0000 (---------------) + I juif - 0x003507c6, // n0x14c9 c0x0000 (---------------) + I karate - 0x0027fd89, // n0x14ca c0x0000 (---------------) + I karikatur - 0x0033c884, // n0x14cb c0x0000 (---------------) + I kids - 0x00202d4a, // n0x14cc c0x0000 (---------------) + I koebenhavn - 0x002291c5, // n0x14cd c0x0000 (---------------) + I koeln - 0x002b6a85, // n0x14ce c0x0000 (---------------) + I kunst - 0x002b6a8d, // n0x14cf c0x0000 (---------------) + I kunstsammlung - 0x002b6dce, // n0x14d0 c0x0000 (---------------) + I kunstunddesign - 0x00317c05, // n0x14d1 c0x0000 (---------------) + I labor - 0x0038a446, // n0x14d2 c0x0000 (---------------) + I labour - 0x00246607, // n0x14d3 c0x0000 (---------------) + I lajolla - 0x002cb10a, // n0x14d4 c0x0000 (---------------) + I lancashire - 0x00323bc6, // n0x14d5 c0x0000 (---------------) + I landes - 0x0035bbc4, // n0x14d6 c0x0000 (---------------) + I lans - 0x002e0a07, // n0x14d7 c0x0000 (---------------) + I larsson - 0x00213d0b, // n0x14d8 c0x0000 (---------------) + I lewismiller - 0x0022fcc7, // n0x14d9 c0x0000 (---------------) + I lincoln - 0x002096c4, // n0x14da c0x0000 (---------------) + I linz - 0x002de986, // n0x14db c0x0000 (---------------) + I living - 0x002de98d, // n0x14dc c0x0000 (---------------) + I livinghistory - 0x0024960c, // n0x14dd c0x0000 (---------------) + I localhistory - 0x003114c6, // n0x14de c0x0000 (---------------) + I london - 0x00308aca, // n0x14df c0x0000 (---------------) + I losangeles - 0x0022ec86, // n0x14e0 c0x0000 (---------------) + I louvre - 0x0029d408, // n0x14e1 c0x0000 (---------------) + I loyalist - 0x002e6f07, // n0x14e2 c0x0000 (---------------) + I lucerne - 0x0023554a, // n0x14e3 c0x0000 (---------------) + I luxembourg - 0x0023c946, // n0x14e4 c0x0000 (---------------) + I luzern - 0x002128c3, // n0x14e5 c0x0000 (---------------) + I mad - 0x003197c6, // n0x14e6 c0x0000 (---------------) + I madrid - 0x00200cc8, // n0x14e7 c0x0000 (---------------) + I mallorca - 0x0029ad4a, // n0x14e8 c0x0000 (---------------) + I manchester - 0x00251d47, // n0x14e9 c0x0000 (---------------) + I mansion - 0x00251d48, // n0x14ea c0x0000 (---------------) + I mansions - 0x00268e04, // n0x14eb c0x0000 (---------------) + I manx - 0x00278d07, // n0x14ec c0x0000 (---------------) + I marburg - 0x00219cc8, // n0x14ed c0x0000 (---------------) + I maritime - 0x002a46c8, // n0x14ee c0x0000 (---------------) + I maritimo - 0x00257708, // n0x14ef c0x0000 (---------------) + I maryland - 0x00280a0a, // n0x14f0 c0x0000 (---------------) + I marylhurst - 0x00303545, // n0x14f1 c0x0000 (---------------) + I media - 0x00239607, // n0x14f2 c0x0000 (---------------) + I medical - 0x00257053, // n0x14f3 c0x0000 (---------------) + I medizinhistorisches - 0x00259186, // n0x14f4 c0x0000 (---------------) + I meeres - 0x0036e248, // n0x14f5 c0x0000 (---------------) + I memorial - 0x00225a49, // n0x14f6 c0x0000 (---------------) + I mesaverde - 0x00216148, // n0x14f7 c0x0000 (---------------) + I michigan - 0x00217bcb, // n0x14f8 c0x0000 (---------------) + I midatlantic - 0x002b94c8, // n0x14f9 c0x0000 (---------------) + I military - 0x00213e44, // n0x14fa c0x0000 (---------------) + I mill - 0x0030a546, // n0x14fb c0x0000 (---------------) + I miners - 0x00300206, // n0x14fc c0x0000 (---------------) + I mining - 0x002fc409, // n0x14fd c0x0000 (---------------) + I minnesota - 0x002c11c7, // n0x14fe c0x0000 (---------------) + I missile - 0x00259e48, // n0x14ff c0x0000 (---------------) + I missoula - 0x003a08c6, // n0x1500 c0x0000 (---------------) + I modern - 0x00306004, // n0x1501 c0x0000 (---------------) + I moma - 0x002c8705, // n0x1502 c0x0000 (---------------) + I money - 0x002c3608, // n0x1503 c0x0000 (---------------) + I monmouth - 0x002c3d4a, // n0x1504 c0x0000 (---------------) + I monticello - 0x002c4008, // n0x1505 c0x0000 (---------------) + I montreal - 0x002c8e46, // n0x1506 c0x0000 (---------------) + I moscow - 0x0029b44a, // n0x1507 c0x0000 (---------------) + I motorcycle - 0x002e7a88, // n0x1508 c0x0000 (---------------) + I muenchen - 0x002cb908, // n0x1509 c0x0000 (---------------) + I muenster - 0x002ccc08, // n0x150a c0x0000 (---------------) + I mulhouse - 0x002cd606, // n0x150b c0x0000 (---------------) + I muncie - 0x002d0ac6, // n0x150c c0x0000 (---------------) + I museet - 0x002f418c, // n0x150d c0x0000 (---------------) + I museumcenter - 0x002d1090, // n0x150e c0x0000 (---------------) + I museumvereniging - 0x002812c5, // n0x150f c0x0000 (---------------) + I music - 0x0031bbc8, // n0x1510 c0x0000 (---------------) + I national - 0x0031bbd0, // n0x1511 c0x0000 (---------------) + I nationalfirearms - 0x00398550, // n0x1512 c0x0000 (---------------) + I nationalheritage - 0x002f524e, // n0x1513 c0x0000 (---------------) + I nativeamerican - 0x002f3e0e, // n0x1514 c0x0000 (---------------) + I naturalhistory - 0x002f3e14, // n0x1515 c0x0000 (---------------) + I naturalhistorymuseum - 0x002dcacf, // n0x1516 c0x0000 (---------------) + I naturalsciences - 0x002dce86, // n0x1517 c0x0000 (---------------) + I nature - 0x003218d1, // n0x1518 c0x0000 (---------------) + I naturhistorisches - 0x00325193, // n0x1519 c0x0000 (---------------) + I natuurwetenschappen - 0x00325608, // n0x151a c0x0000 (---------------) + I naumburg - 0x00255505, // n0x151b c0x0000 (---------------) + I naval - 0x0024e708, // n0x151c c0x0000 (---------------) + I nebraska - 0x002e39c5, // n0x151d c0x0000 (---------------) + I neues - 0x0022c04c, // n0x151e c0x0000 (---------------) + I newhampshire - 0x002b10c9, // n0x151f c0x0000 (---------------) + I newjersey - 0x00231849, // n0x1520 c0x0000 (---------------) + I newmexico - 0x00245087, // n0x1521 c0x0000 (---------------) + I newport - 0x0021e609, // n0x1522 c0x0000 (---------------) + I newspaper - 0x00328187, // n0x1523 c0x0000 (---------------) + I newyork - 0x0029a5c6, // n0x1524 c0x0000 (---------------) + I niepce - 0x00356947, // n0x1525 c0x0000 (---------------) + I norfolk - 0x0023c505, // n0x1526 c0x0000 (---------------) + I north - 0x0026e2c3, // n0x1527 c0x0000 (---------------) + I nrw - 0x00329649, // n0x1528 c0x0000 (---------------) + I nuernberg - 0x0037ba89, // n0x1529 c0x0000 (---------------) + I nuremberg - 0x0036dcc3, // n0x152a c0x0000 (---------------) + I nyc - 0x0039f3c4, // n0x152b c0x0000 (---------------) + I nyny - 0x0031110d, // n0x152c c0x0000 (---------------) + I oceanographic - 0x0039f9cf, // n0x152d c0x0000 (---------------) + I oceanographique - 0x002fe985, // n0x152e c0x0000 (---------------) + I omaha - 0x00319c46, // n0x152f c0x0000 (---------------) + I online - 0x0039f847, // n0x1530 c0x0000 (---------------) + I ontario - 0x0033f507, // n0x1531 c0x0000 (---------------) + I openair - 0x0028a586, // n0x1532 c0x0000 (---------------) + I oregon - 0x0028a58b, // n0x1533 c0x0000 (---------------) + I oregontrail - 0x002a4c45, // n0x1534 c0x0000 (---------------) + I otago - 0x0039d306, // n0x1535 c0x0000 (---------------) + I oxford - 0x0038ff07, // n0x1536 c0x0000 (---------------) + I pacific - 0x002714c9, // n0x1537 c0x0000 (---------------) + I paderborn - 0x00311c06, // n0x1538 c0x0000 (---------------) + I palace - 0x0020c845, // n0x1539 c0x0000 (---------------) + I paleo - 0x00245b8b, // n0x153a c0x0000 (---------------) + I palmsprings - 0x002546c6, // n0x153b c0x0000 (---------------) + I panama - 0x00277705, // n0x153c c0x0000 (---------------) + I paris - 0x002b7348, // n0x153d c0x0000 (---------------) + I pasadena - 0x00373588, // n0x153e c0x0000 (---------------) + I pharmacy - 0x002d470c, // n0x153f c0x0000 (---------------) + I philadelphia - 0x002d4710, // n0x1540 c0x0000 (---------------) + I philadelphiaarea - 0x002d4dc9, // n0x1541 c0x0000 (---------------) + I philately - 0x002d5207, // n0x1542 c0x0000 (---------------) + I phoenix - 0x002d564b, // n0x1543 c0x0000 (---------------) + I photography - 0x002d6ac6, // n0x1544 c0x0000 (---------------) + I pilots - 0x002d780a, // n0x1545 c0x0000 (---------------) + I pittsburgh - 0x002d82cb, // n0x1546 c0x0000 (---------------) + I planetarium - 0x002d86ca, // n0x1547 c0x0000 (---------------) + I plantation - 0x002d8946, // n0x1548 c0x0000 (---------------) + I plants - 0x002d9585, // n0x1549 c0x0000 (---------------) + I plaza - 0x003275c6, // n0x154a c0x0000 (---------------) + I portal - 0x00279288, // n0x154b c0x0000 (---------------) + I portland - 0x0024514a, // n0x154c c0x0000 (---------------) + I portlligat - 0x0036041c, // n0x154d c0x0000 (---------------) + I posts-and-telecommunications - 0x002e0c8c, // n0x154e c0x0000 (---------------) + I preservation - 0x002e0f88, // n0x154f c0x0000 (---------------) + I presidio - 0x00246a05, // n0x1550 c0x0000 (---------------) + I press - 0x002e3dc7, // n0x1551 c0x0000 (---------------) + I project - 0x002a0ec6, // n0x1552 c0x0000 (---------------) + I public - 0x0038d045, // n0x1553 c0x0000 (---------------) + I pubol - 0x0021d986, // n0x1554 c0x0000 (---------------) + I quebec - 0x0028a748, // n0x1555 c0x0000 (---------------) + I railroad - 0x002b4f87, // n0x1556 c0x0000 (---------------) + I railway - 0x002a1188, // n0x1557 c0x0000 (---------------) + I research - 0x002a7eca, // n0x1558 c0x0000 (---------------) + I resistance - 0x0030b2cc, // n0x1559 c0x0000 (---------------) + I riodejaneiro - 0x0030b549, // n0x155a c0x0000 (---------------) + I rochester - 0x0038d387, // n0x155b c0x0000 (---------------) + I rockart - 0x0022cb44, // n0x155c c0x0000 (---------------) + I roma - 0x00252f46, // n0x155d c0x0000 (---------------) + I russia - 0x0036cd4a, // n0x155e c0x0000 (---------------) + I saintlouis - 0x00280e85, // n0x155f c0x0000 (---------------) + I salem - 0x0031f70c, // n0x1560 c0x0000 (---------------) + I salvadordali - 0x00320388, // n0x1561 c0x0000 (---------------) + I salzburg - 0x0023bf08, // n0x1562 c0x0000 (---------------) + I sandiego - 0x00399b0c, // n0x1563 c0x0000 (---------------) + I sanfrancisco - 0x0020df8c, // n0x1564 c0x0000 (---------------) + I santabarbara - 0x0020fd09, // n0x1565 c0x0000 (---------------) + I santacruz - 0x0020ff47, // n0x1566 c0x0000 (---------------) + I santafe - 0x002ae88c, // n0x1567 c0x0000 (---------------) + I saskatchewan - 0x003a5184, // n0x1568 c0x0000 (---------------) + I satx - 0x0037750a, // n0x1569 c0x0000 (---------------) + I savannahga - 0x0033cb8c, // n0x156a c0x0000 (---------------) + I schlesisches - 0x0027234b, // n0x156b c0x0000 (---------------) + I schoenbrunn - 0x0023100b, // n0x156c c0x0000 (---------------) + I schokoladen - 0x00232186, // n0x156d c0x0000 (---------------) + I school - 0x00237b87, // n0x156e c0x0000 (---------------) + I schweiz - 0x00235f47, // n0x156f c0x0000 (---------------) + I science - 0x00235f4f, // n0x1570 c0x0000 (---------------) + I science-fiction - 0x002ed0d1, // n0x1571 c0x0000 (---------------) + I scienceandhistory - 0x003a3452, // n0x1572 c0x0000 (---------------) + I scienceandindustry - 0x0024124d, // n0x1573 c0x0000 (---------------) + I sciencecenter - 0x0024124e, // n0x1574 c0x0000 (---------------) + I sciencecenters - 0x0024158e, // n0x1575 c0x0000 (---------------) + I sciencehistory - 0x002dcc88, // n0x1576 c0x0000 (---------------) + I sciences - 0x002dcc92, // n0x1577 c0x0000 (---------------) + I sciencesnaturelles - 0x00399d48, // n0x1578 c0x0000 (---------------) + I scotland - 0x002f9ac7, // n0x1579 c0x0000 (---------------) + I seaport - 0x0024fe8a, // n0x157a c0x0000 (---------------) + I settlement - 0x0021f7c8, // n0x157b c0x0000 (---------------) + I settlers - 0x002582c5, // n0x157c c0x0000 (---------------) + I shell - 0x002eb88a, // n0x157d c0x0000 (---------------) + I sherbrooke - 0x00221c07, // n0x157e c0x0000 (---------------) + I sibenik - 0x00358404, // n0x157f c0x0000 (---------------) + I silk - 0x00208503, // n0x1580 c0x0000 (---------------) + I ski - 0x00297b45, // n0x1581 c0x0000 (---------------) + I skole - 0x0034b987, // n0x1582 c0x0000 (---------------) + I society - 0x002e1e47, // n0x1583 c0x0000 (---------------) + I sologne - 0x002b63ce, // n0x1584 c0x0000 (---------------) + I soundandvision - 0x00302ccd, // n0x1585 c0x0000 (---------------) + I southcarolina - 0x003067c9, // n0x1586 c0x0000 (---------------) + I southwest - 0x0020aa85, // n0x1587 c0x0000 (---------------) + I space - 0x00332903, // n0x1588 c0x0000 (---------------) + I spy - 0x0027a146, // n0x1589 c0x0000 (---------------) + I square - 0x00364b45, // n0x158a c0x0000 (---------------) + I stadt - 0x0027af48, // n0x158b c0x0000 (---------------) + I stalbans - 0x00322809, // n0x158c c0x0000 (---------------) + I starnberg - 0x0020a205, // n0x158d c0x0000 (---------------) + I state - 0x00337b8f, // n0x158e c0x0000 (---------------) + I stateofdelaware - 0x00255387, // n0x158f c0x0000 (---------------) + I station - 0x00367605, // n0x1590 c0x0000 (---------------) + I steam - 0x00229cca, // n0x1591 c0x0000 (---------------) + I steiermark - 0x0033bd06, // n0x1592 c0x0000 (---------------) + I stjohn - 0x0029d589, // n0x1593 c0x0000 (---------------) + I stockholm - 0x003906cc, // n0x1594 c0x0000 (---------------) + I stpetersburg - 0x002e8d09, // n0x1595 c0x0000 (---------------) + I stuttgart - 0x0020b646, // n0x1596 c0x0000 (---------------) + I suisse - 0x00286e8c, // n0x1597 c0x0000 (---------------) + I surgeonshall - 0x002e9586, // n0x1598 c0x0000 (---------------) + I surrey - 0x002ebc08, // n0x1599 c0x0000 (---------------) + I svizzera - 0x002ebe06, // n0x159a c0x0000 (---------------) + I sweden - 0x00335c06, // n0x159b c0x0000 (---------------) + I sydney - 0x00355284, // n0x159c c0x0000 (---------------) + I tank - 0x0025ccc3, // n0x159d c0x0000 (---------------) + I tcm - 0x002ad44a, // n0x159e c0x0000 (---------------) + I technology - 0x0022ce51, // n0x159f c0x0000 (---------------) + I telekommunikation - 0x002b894a, // n0x15a0 c0x0000 (---------------) + I television - 0x0034aac5, // n0x15a1 c0x0000 (---------------) + I texas - 0x00382307, // n0x15a2 c0x0000 (---------------) + I textile - 0x00257a07, // n0x15a3 c0x0000 (---------------) + I theater - 0x00219dc4, // n0x15a4 c0x0000 (---------------) + I time - 0x00219dcb, // n0x15a5 c0x0000 (---------------) + I timekeeping - 0x00206e48, // n0x15a6 c0x0000 (---------------) + I topology - 0x002b34c6, // n0x15a7 c0x0000 (---------------) + I torino - 0x00288685, // n0x15a8 c0x0000 (---------------) + I touch - 0x002dc0c4, // n0x15a9 c0x0000 (---------------) + I town - 0x0029c509, // n0x15aa c0x0000 (---------------) + I transport - 0x00353204, // n0x15ab c0x0000 (---------------) + I tree - 0x0033f9c7, // n0x15ac c0x0000 (---------------) + I trolley - 0x0032a345, // n0x15ad c0x0000 (---------------) + I trust - 0x0032a347, // n0x15ae c0x0000 (---------------) + I trustee - 0x00312ec5, // n0x15af c0x0000 (---------------) + I uhren - 0x00349683, // n0x15b0 c0x0000 (---------------) + I ulm - 0x002f9988, // n0x15b1 c0x0000 (---------------) + I undersea - 0x00309e4a, // n0x15b2 c0x0000 (---------------) + I university - 0x00244843, // n0x15b3 c0x0000 (---------------) + I usa - 0x002b7b0a, // n0x15b4 c0x0000 (---------------) + I usantiques - 0x0028fdc6, // n0x15b5 c0x0000 (---------------) + I usarts - 0x0033790f, // n0x15b6 c0x0000 (---------------) + I uscountryestate - 0x0033d389, // n0x15b7 c0x0000 (---------------) + I usculture - 0x0025df10, // n0x15b8 c0x0000 (---------------) + I usdecorativearts - 0x0026ecc8, // n0x15b9 c0x0000 (---------------) + I usgarden - 0x002c96c9, // n0x15ba c0x0000 (---------------) + I ushistory - 0x0029e207, // n0x15bb c0x0000 (---------------) + I ushuaia - 0x002de90f, // n0x15bc c0x0000 (---------------) + I uslivinghistory - 0x002e8844, // n0x15bd c0x0000 (---------------) + I utah - 0x002aec44, // n0x15be c0x0000 (---------------) + I uvic - 0x00216c86, // n0x15bf c0x0000 (---------------) + I valley - 0x002371c6, // n0x15c0 c0x0000 (---------------) + I vantaa - 0x0031758a, // n0x15c1 c0x0000 (---------------) + I versailles - 0x0032ae86, // n0x15c2 c0x0000 (---------------) + I viking - 0x002ed947, // n0x15c3 c0x0000 (---------------) + I village - 0x002f6d88, // n0x15c4 c0x0000 (---------------) + I virginia - 0x002f6f87, // n0x15c5 c0x0000 (---------------) + I virtual - 0x002f7147, // n0x15c6 c0x0000 (---------------) + I virtuel - 0x0034514a, // n0x15c7 c0x0000 (---------------) + I vlaanderen - 0x002f97cb, // n0x15c8 c0x0000 (---------------) + I volkenkunde - 0x00309ac5, // n0x15c9 c0x0000 (---------------) + I wales - 0x003a3088, // n0x15ca c0x0000 (---------------) + I wallonie - 0x00203683, // n0x15cb c0x0000 (---------------) + I war - 0x0023f70c, // n0x15cc c0x0000 (---------------) + I washingtondc - 0x00375acf, // n0x15cd c0x0000 (---------------) + I watch-and-clock - 0x002b068d, // n0x15ce c0x0000 (---------------) + I watchandclock - 0x0023c647, // n0x15cf c0x0000 (---------------) + I western - 0x00306909, // n0x15d0 c0x0000 (---------------) + I westfalen - 0x0026e347, // n0x15d1 c0x0000 (---------------) + I whaling - 0x0035f308, // n0x15d2 c0x0000 (---------------) + I wildlife - 0x0023260c, // n0x15d3 c0x0000 (---------------) + I williamsburg - 0x00284bc8, // n0x15d4 c0x0000 (---------------) + I windmill - 0x00357088, // n0x15d5 c0x0000 (---------------) + I workshop - 0x0030ea4e, // n0x15d6 c0x0000 (---------------) + I xn--9dbhblg6di - 0x0031dbd4, // n0x15d7 c0x0000 (---------------) + I xn--comunicaes-v6a2o - 0x0031e0e4, // n0x15d8 c0x0000 (---------------) + I xn--correios-e-telecomunicaes-ghc29a - 0x0033be8a, // n0x15d9 c0x0000 (---------------) + I xn--h1aegh - 0x00358c0b, // n0x15da c0x0000 (---------------) + I xn--lns-qla - 0x00328244, // n0x15db c0x0000 (---------------) + I york - 0x00328249, // n0x15dc c0x0000 (---------------) + I yorkshire - 0x002abc48, // n0x15dd c0x0000 (---------------) + I yosemite - 0x0024a805, // n0x15de c0x0000 (---------------) + I youth - 0x00328f8a, // n0x15df c0x0000 (---------------) + I zoological - 0x0027a307, // n0x15e0 c0x0000 (---------------) + I zoology - 0x002e2dc4, // n0x15e1 c0x0000 (---------------) + I aero - 0x00331a83, // n0x15e2 c0x0000 (---------------) + I biz - 0x00233243, // n0x15e3 c0x0000 (---------------) + I com - 0x0023c344, // n0x15e4 c0x0000 (---------------) + I coop - 0x00239103, // n0x15e5 c0x0000 (---------------) + I edu - 0x0027d903, // n0x15e6 c0x0000 (---------------) + I gov - 0x00201844, // n0x15e7 c0x0000 (---------------) + I info - 0x00201503, // n0x15e8 c0x0000 (---------------) + I int - 0x00207dc3, // n0x15e9 c0x0000 (---------------) + I mil - 0x002d1086, // n0x15ea c0x0000 (---------------) + I museum - 0x00200904, // n0x15eb c0x0000 (---------------) + I name - 0x00223b43, // n0x15ec c0x0000 (---------------) + I net - 0x00228743, // n0x15ed c0x0000 (---------------) + I org - 0x00224b03, // n0x15ee c0x0000 (---------------) + I pro - 0x00200342, // n0x15ef c0x0000 (---------------) + I ac - 0x00331a83, // n0x15f0 c0x0000 (---------------) + I biz - 0x0020ce42, // n0x15f1 c0x0000 (---------------) + I co - 0x00233243, // n0x15f2 c0x0000 (---------------) + I com - 0x0023c344, // n0x15f3 c0x0000 (---------------) + I coop - 0x00239103, // n0x15f4 c0x0000 (---------------) + I edu - 0x0027d903, // n0x15f5 c0x0000 (---------------) + I gov - 0x00201503, // n0x15f6 c0x0000 (---------------) + I int - 0x002d1086, // n0x15f7 c0x0000 (---------------) + I museum - 0x00223b43, // n0x15f8 c0x0000 (---------------) + I net - 0x00228743, // n0x15f9 c0x0000 (---------------) + I org - 0x000fe108, // n0x15fa c0x0000 (---------------) + blogspot - 0x00233243, // n0x15fb c0x0000 (---------------) + I com - 0x00239103, // n0x15fc c0x0000 (---------------) + I edu - 0x00212b03, // n0x15fd c0x0000 (---------------) + I gob - 0x00223b43, // n0x15fe c0x0000 (---------------) + I net - 0x00228743, // n0x15ff c0x0000 (---------------) + I org - 0x000fe108, // n0x1600 c0x0000 (---------------) + blogspot - 0x00233243, // n0x1601 c0x0000 (---------------) + I com - 0x00239103, // n0x1602 c0x0000 (---------------) + I edu - 0x0027d903, // n0x1603 c0x0000 (---------------) + I gov - 0x00207dc3, // n0x1604 c0x0000 (---------------) + I mil - 0x00200904, // n0x1605 c0x0000 (---------------) + I name - 0x00223b43, // n0x1606 c0x0000 (---------------) + I net - 0x00228743, // n0x1607 c0x0000 (---------------) + I org - 0x0062dd88, // n0x1608 c0x0001 (---------------) ! I teledata - 0x00200e42, // n0x1609 c0x0000 (---------------) + I ca - 0x0022f6c2, // n0x160a c0x0000 (---------------) + I cc - 0x0020ce42, // n0x160b c0x0000 (---------------) + I co - 0x00233243, // n0x160c c0x0000 (---------------) + I com - 0x0026ab42, // n0x160d c0x0000 (---------------) + I dr - 0x002012c2, // n0x160e c0x0000 (---------------) + I in - 0x00201844, // n0x160f c0x0000 (---------------) + I info - 0x0020bf04, // n0x1610 c0x0000 (---------------) + I mobi - 0x002195c2, // n0x1611 c0x0000 (---------------) + I mx - 0x00200904, // n0x1612 c0x0000 (---------------) + I name - 0x00200dc2, // n0x1613 c0x0000 (---------------) + I or - 0x00228743, // n0x1614 c0x0000 (---------------) + I org - 0x00224b03, // n0x1615 c0x0000 (---------------) + I pro - 0x00232186, // n0x1616 c0x0000 (---------------) + I school - 0x002203c2, // n0x1617 c0x0000 (---------------) + I tv - 0x00202242, // n0x1618 c0x0000 (---------------) + I us - 0x0020a882, // n0x1619 c0x0000 (---------------) + I ws - 0x38225983, // n0x161a c0x00e0 (n0x161c-n0x161d) o I her - 0x38618043, // n0x161b c0x00e1 (n0x161d-n0x161e) o I his - 0x000580c6, // n0x161c c0x0000 (---------------) + forgot - 0x000580c6, // n0x161d c0x0000 (---------------) + forgot - 0x002d5e84, // n0x161e c0x0000 (---------------) + I asso - 0x0011928c, // n0x161f c0x0000 (---------------) + at-band-camp - 0x00081bcc, // n0x1620 c0x0000 (---------------) + azure-mobile - 0x000c234d, // n0x1621 c0x0000 (---------------) + azurewebsites - 0x0002f087, // n0x1622 c0x0000 (---------------) + blogdns - 0x00023f48, // n0x1623 c0x0000 (---------------) + broke-it - 0x00197f4a, // n0x1624 c0x0000 (---------------) + buyshouses - 0x39262e05, // n0x1625 c0x00e4 (n0x1654-n0x1655) o I cdn77 - 0x00062e09, // n0x1626 c0x0000 (---------------) + cdn77-ssl - 0x00007f48, // n0x1627 c0x0000 (---------------) + cloudapp - 0x0019e90a, // n0x1628 c0x0000 (---------------) + cloudfront - 0x00030c4e, // n0x1629 c0x0000 (---------------) + cloudfunctions - 0x00142e48, // n0x162a c0x0000 (---------------) + dnsalias - 0x0007cf07, // n0x162b c0x0000 (---------------) + dnsdojo - 0x00160c07, // n0x162c c0x0000 (---------------) + does-it - 0x0016a789, // n0x162d c0x0000 (---------------) + dontexist - 0x0013c907, // n0x162e c0x0000 (---------------) + dsmynas - 0x00197108, // n0x162f c0x0000 (---------------) + dynalias - 0x000f3449, // n0x1630 c0x0000 (---------------) + dynathome - 0x000aa38d, // n0x1631 c0x0000 (---------------) + endofinternet - 0x00007d48, // n0x1632 c0x0000 (---------------) + familyds - 0x39646286, // n0x1633 c0x00e5 (n0x1655-n0x1657) o I fastly - 0x00062a47, // n0x1634 c0x0000 (---------------) + from-az - 0x00063c47, // n0x1635 c0x0000 (---------------) + from-co - 0x000688c7, // n0x1636 c0x0000 (---------------) + from-la - 0x000709c7, // n0x1637 c0x0000 (---------------) + from-ny - 0x0000e482, // n0x1638 c0x0000 (---------------) + gb - 0x00049d47, // n0x1639 c0x0000 (---------------) + gets-it - 0x0006494c, // n0x163a c0x0000 (---------------) + ham-radio-op - 0x00120a07, // n0x163b c0x0000 (---------------) + homeftp - 0x000a6806, // n0x163c c0x0000 (---------------) + homeip - 0x000a6dc9, // n0x163d c0x0000 (---------------) + homelinux - 0x000a8148, // n0x163e c0x0000 (---------------) + homeunix - 0x00024202, // n0x163f c0x0000 (---------------) + hu - 0x000012c2, // n0x1640 c0x0000 (---------------) + in - 0x000068cb, // n0x1641 c0x0000 (---------------) + in-the-band - 0x000110c9, // n0x1642 c0x0000 (---------------) + is-a-chef - 0x0004ef49, // n0x1643 c0x0000 (---------------) + is-a-geek - 0x000862c8, // n0x1644 c0x0000 (---------------) + isa-geek - 0x000b00c2, // n0x1645 c0x0000 (---------------) + jp - 0x0014ee89, // n0x1646 c0x0000 (---------------) + kicks-ass - 0x0001decd, // n0x1647 c0x0000 (---------------) + office-on-the - 0x000dd687, // n0x1648 c0x0000 (---------------) + podzone - 0x00127bc8, // n0x1649 c0x0000 (---------------) + rackmaze - 0x0004344d, // n0x164a c0x0000 (---------------) + scrapper-site - 0x00004ec2, // n0x164b c0x0000 (---------------) + se - 0x0005abc6, // n0x164c c0x0000 (---------------) + selfip - 0x00091b08, // n0x164d c0x0000 (---------------) + sells-it - 0x000ccd88, // n0x164e c0x0000 (---------------) + servebbs - 0x0008bc88, // n0x164f c0x0000 (---------------) + serveftp - 0x000549c8, // n0x1650 c0x0000 (---------------) + thruhere - 0x00001b02, // n0x1651 c0x0000 (---------------) + uk - 0x001267c6, // n0x1652 c0x0000 (---------------) + webhop - 0x00000182, // n0x1653 c0x0000 (---------------) + za - 0x000006c1, // n0x1654 c0x0000 (---------------) + r - 0x39ae1bc4, // n0x1655 c0x00e6 (n0x1657-n0x1659) o I prod - 0x39e62f83, // n0x1656 c0x00e7 (n0x1659-n0x165c) o I ssl - 0x000001c1, // n0x1657 c0x0000 (---------------) + a - 0x0000eac6, // n0x1658 c0x0000 (---------------) + global - 0x000001c1, // n0x1659 c0x0000 (---------------) + a - 0x00000001, // n0x165a c0x0000 (---------------) + b - 0x0000eac6, // n0x165b c0x0000 (---------------) + global - 0x0024b944, // n0x165c c0x0000 (---------------) + I arts - 0x00233243, // n0x165d c0x0000 (---------------) + I com - 0x0024dcc4, // n0x165e c0x0000 (---------------) + I firm - 0x00201844, // n0x165f c0x0000 (---------------) + I info - 0x00223b43, // n0x1660 c0x0000 (---------------) + I net - 0x00225905, // n0x1661 c0x0000 (---------------) + I other - 0x0021e783, // n0x1662 c0x0000 (---------------) + I per - 0x0022c2c3, // n0x1663 c0x0000 (---------------) + I rec - 0x00364cc5, // n0x1664 c0x0000 (---------------) + I store - 0x0021e243, // n0x1665 c0x0000 (---------------) + I web - 0x3aa33243, // n0x1666 c0x00ea (n0x1670-n0x1671) + I com - 0x00239103, // n0x1667 c0x0000 (---------------) + I edu - 0x0027d903, // n0x1668 c0x0000 (---------------) + I gov - 0x00200041, // n0x1669 c0x0000 (---------------) + I i - 0x00207dc3, // n0x166a c0x0000 (---------------) + I mil - 0x0020bf04, // n0x166b c0x0000 (---------------) + I mobi - 0x00200904, // n0x166c c0x0000 (---------------) + I name - 0x00223b43, // n0x166d c0x0000 (---------------) + I net - 0x00228743, // n0x166e c0x0000 (---------------) + I org - 0x00217283, // n0x166f c0x0000 (---------------) + I sch - 0x000fe108, // n0x1670 c0x0000 (---------------) + blogspot - 0x00200342, // n0x1671 c0x0000 (---------------) + I ac - 0x00331a83, // n0x1672 c0x0000 (---------------) + I biz - 0x0020ce42, // n0x1673 c0x0000 (---------------) + I co - 0x00233243, // n0x1674 c0x0000 (---------------) + I com - 0x00239103, // n0x1675 c0x0000 (---------------) + I edu - 0x00212b03, // n0x1676 c0x0000 (---------------) + I gob - 0x002012c2, // n0x1677 c0x0000 (---------------) + I in - 0x00201844, // n0x1678 c0x0000 (---------------) + I info - 0x00201503, // n0x1679 c0x0000 (---------------) + I int - 0x00207dc3, // n0x167a c0x0000 (---------------) + I mil - 0x00223b43, // n0x167b c0x0000 (---------------) + I net - 0x00201383, // n0x167c c0x0000 (---------------) + I nom - 0x00228743, // n0x167d c0x0000 (---------------) + I org - 0x0021e243, // n0x167e c0x0000 (---------------) + I web - 0x000fe108, // n0x167f c0x0000 (---------------) + blogspot - 0x00365fc2, // n0x1680 c0x0000 (---------------) + I bv - 0x0000ce42, // n0x1681 c0x0000 (---------------) + co - 0x3ba26782, // n0x1682 c0x00ee (n0x1958-n0x1959) + I aa - 0x003528c8, // n0x1683 c0x0000 (---------------) + I aarborte - 0x00226c06, // n0x1684 c0x0000 (---------------) + I aejrie - 0x002bd6c6, // n0x1685 c0x0000 (---------------) + I afjord - 0x00226587, // n0x1686 c0x0000 (---------------) + I agdenes - 0x3be05702, // n0x1687 c0x00ef (n0x1959-n0x195a) + I ah - 0x3c23ed88, // n0x1688 c0x00f0 (n0x195a-n0x195b) o I akershus - 0x00351cca, // n0x1689 c0x0000 (---------------) + I aknoluokta - 0x002624c8, // n0x168a c0x0000 (---------------) + I akrehamn - 0x00200d02, // n0x168b c0x0000 (---------------) + I al - 0x00352549, // n0x168c c0x0000 (---------------) + I alaheadju - 0x00309b07, // n0x168d c0x0000 (---------------) + I alesund - 0x0021b906, // n0x168e c0x0000 (---------------) + I algard - 0x00205609, // n0x168f c0x0000 (---------------) + I alstahaug - 0x00239744, // n0x1690 c0x0000 (---------------) + I alta - 0x002be146, // n0x1691 c0x0000 (---------------) + I alvdal - 0x002bdac4, // n0x1692 c0x0000 (---------------) + I amli - 0x00278404, // n0x1693 c0x0000 (---------------) + I amot - 0x00259989, // n0x1694 c0x0000 (---------------) + I andasuolo - 0x002b8206, // n0x1695 c0x0000 (---------------) + I andebu - 0x002293c5, // n0x1696 c0x0000 (---------------) + I andoy - 0x00266b05, // n0x1697 c0x0000 (---------------) + I ardal - 0x00234047, // n0x1698 c0x0000 (---------------) + I aremark - 0x002b9947, // n0x1699 c0x0000 (---------------) + I arendal - 0x003549c4, // n0x169a c0x0000 (---------------) + I arna - 0x002267c6, // n0x169b c0x0000 (---------------) + I aseral - 0x002e2685, // n0x169c c0x0000 (---------------) + I asker - 0x0023e085, // n0x169d c0x0000 (---------------) + I askim - 0x002f1385, // n0x169e c0x0000 (---------------) + I askoy - 0x00387687, // n0x169f c0x0000 (---------------) + I askvoll - 0x0022b645, // n0x16a0 c0x0000 (---------------) + I asnes - 0x0030bb89, // n0x16a1 c0x0000 (---------------) + I audnedaln - 0x0025d9c5, // n0x16a2 c0x0000 (---------------) + I aukra - 0x002f9ec4, // n0x16a3 c0x0000 (---------------) + I aure - 0x00323b07, // n0x16a4 c0x0000 (---------------) + I aurland - 0x0026a80e, // n0x16a5 c0x0000 (---------------) + I aurskog-holand - 0x002f3a49, // n0x16a6 c0x0000 (---------------) + I austevoll - 0x00312b49, // n0x16a7 c0x0000 (---------------) + I austrheim - 0x00330606, // n0x16a8 c0x0000 (---------------) + I averoy - 0x002eaac8, // n0x16a9 c0x0000 (---------------) + I badaddja - 0x002afb0b, // n0x16aa c0x0000 (---------------) + I bahcavuotna - 0x002d2a0c, // n0x16ab c0x0000 (---------------) + I bahccavuotna - 0x00268b46, // n0x16ac c0x0000 (---------------) + I baidar - 0x003706c7, // n0x16ad c0x0000 (---------------) + I bajddar - 0x0026b905, // n0x16ae c0x0000 (---------------) + I balat - 0x0022a48a, // n0x16af c0x0000 (---------------) + I balestrand - 0x00307649, // n0x16b0 c0x0000 (---------------) + I ballangen - 0x00255089, // n0x16b1 c0x0000 (---------------) + I balsfjord - 0x00269c86, // n0x16b2 c0x0000 (---------------) + I bamble - 0x002eb045, // n0x16b3 c0x0000 (---------------) + I bardu - 0x002804c5, // n0x16b4 c0x0000 (---------------) + I barum - 0x003517c9, // n0x16b5 c0x0000 (---------------) + I batsfjord - 0x002f150b, // n0x16b6 c0x0000 (---------------) + I bearalvahki - 0x0027cbc6, // n0x16b7 c0x0000 (---------------) + I beardu - 0x0032f586, // n0x16b8 c0x0000 (---------------) + I beiarn - 0x0020e3c4, // n0x16b9 c0x0000 (---------------) + I berg - 0x0028d846, // n0x16ba c0x0000 (---------------) + I bergen - 0x00311f88, // n0x16bb c0x0000 (---------------) + I berlevag - 0x00200b46, // n0x16bc c0x0000 (---------------) + I bievat - 0x0038df46, // n0x16bd c0x0000 (---------------) + I bindal - 0x00205d48, // n0x16be c0x0000 (---------------) + I birkenes - 0x0020b447, // n0x16bf c0x0000 (---------------) + I bjarkoy - 0x0020bcc9, // n0x16c0 c0x0000 (---------------) + I bjerkreim - 0x0020d4c5, // n0x16c1 c0x0000 (---------------) + I bjugn - 0x000fe108, // n0x16c2 c0x0000 (---------------) + blogspot - 0x00212b84, // n0x16c3 c0x0000 (---------------) + I bodo - 0x0023dc84, // n0x16c4 c0x0000 (---------------) + I bokn - 0x00213985, // n0x16c5 c0x0000 (---------------) + I bomlo - 0x0038b709, // n0x16c6 c0x0000 (---------------) + I bremanger - 0x00224f47, // n0x16c7 c0x0000 (---------------) + I bronnoy - 0x00224f4b, // n0x16c8 c0x0000 (---------------) + I bronnoysund - 0x00225e8a, // n0x16c9 c0x0000 (---------------) + I brumunddal - 0x0022a185, // n0x16ca c0x0000 (---------------) + I bryne - 0x3c605ac2, // n0x16cb c0x00f1 (n0x195b-n0x195c) + I bu - 0x0037b407, // n0x16cc c0x0000 (---------------) + I budejju - 0x3ca2be08, // n0x16cd c0x00f2 (n0x195c-n0x195d) o I buskerud - 0x002b9187, // n0x16ce c0x0000 (---------------) + I bygland - 0x002b8545, // n0x16cf c0x0000 (---------------) + I bykle - 0x0024940a, // n0x16d0 c0x0000 (---------------) + I cahcesuolo - 0x0000ce42, // n0x16d1 c0x0000 (---------------) + co - 0x002db24b, // n0x16d2 c0x0000 (---------------) + I davvenjarga - 0x0021530a, // n0x16d3 c0x0000 (---------------) + I davvesiida - 0x0039d446, // n0x16d4 c0x0000 (---------------) + I deatnu - 0x00273e03, // n0x16d5 c0x0000 (---------------) + I dep - 0x0037834d, // n0x16d6 c0x0000 (---------------) + I dielddanuorri - 0x0026a28c, // n0x16d7 c0x0000 (---------------) + I divtasvuodna - 0x0030808d, // n0x16d8 c0x0000 (---------------) + I divttasvuotna - 0x0035e605, // n0x16d9 c0x0000 (---------------) + I donna - 0x00268605, // n0x16da c0x0000 (---------------) + I dovre - 0x003294c7, // n0x16db c0x0000 (---------------) + I drammen - 0x0031ff89, // n0x16dc c0x0000 (---------------) + I drangedal - 0x00351bc6, // n0x16dd c0x0000 (---------------) + I drobak - 0x0032ad05, // n0x16de c0x0000 (---------------) + I dyroy - 0x0022f808, // n0x16df c0x0000 (---------------) + I egersund - 0x0024ea03, // n0x16e0 c0x0000 (---------------) + I eid - 0x0032ab48, // n0x16e1 c0x0000 (---------------) + I eidfjord - 0x0028d748, // n0x16e2 c0x0000 (---------------) + I eidsberg - 0x002bf047, // n0x16e3 c0x0000 (---------------) + I eidskog - 0x0024ea08, // n0x16e4 c0x0000 (---------------) + I eidsvoll - 0x002005c9, // n0x16e5 c0x0000 (---------------) + I eigersund - 0x0023aec7, // n0x16e6 c0x0000 (---------------) + I elverum - 0x002085c7, // n0x16e7 c0x0000 (---------------) + I enebakk - 0x00279848, // n0x16e8 c0x0000 (---------------) + I engerdal - 0x0035cd84, // n0x16e9 c0x0000 (---------------) + I etne - 0x0035cd87, // n0x16ea c0x0000 (---------------) + I etnedal - 0x00337008, // n0x16eb c0x0000 (---------------) + I evenassi - 0x00202a06, // n0x16ec c0x0000 (---------------) + I evenes - 0x0039978f, // n0x16ed c0x0000 (---------------) + I evje-og-hornnes - 0x002112c7, // n0x16ee c0x0000 (---------------) + I farsund - 0x0024cac6, // n0x16ef c0x0000 (---------------) + I fauske - 0x0024df85, // n0x16f0 c0x0000 (---------------) + I fedje - 0x00343683, // n0x16f1 c0x0000 (---------------) + I fet - 0x00343687, // n0x16f2 c0x0000 (---------------) + I fetsund - 0x00233803, // n0x16f3 c0x0000 (---------------) + I fhs - 0x0024c406, // n0x16f4 c0x0000 (---------------) + I finnoy - 0x0024f586, // n0x16f5 c0x0000 (---------------) + I fitjar - 0x00250246, // n0x16f6 c0x0000 (---------------) + I fjaler - 0x00291145, // n0x16f7 c0x0000 (---------------) + I fjell - 0x002659c3, // n0x16f8 c0x0000 (---------------) + I fla - 0x0037e548, // n0x16f9 c0x0000 (---------------) + I flakstad - 0x0031c809, // n0x16fa c0x0000 (---------------) + I flatanger - 0x003654cb, // n0x16fb c0x0000 (---------------) + I flekkefjord - 0x00376188, // n0x16fc c0x0000 (---------------) + I flesberg - 0x002525c5, // n0x16fd c0x0000 (---------------) + I flora - 0x002530c5, // n0x16fe c0x0000 (---------------) + I floro - 0x3ce34802, // n0x16ff c0x00f3 (n0x195d-n0x195e) + I fm - 0x00356a09, // n0x1700 c0x0000 (---------------) + I folkebibl - 0x00256307, // n0x1701 c0x0000 (---------------) + I folldal - 0x0039d385, // n0x1702 c0x0000 (---------------) + I forde - 0x00259887, // n0x1703 c0x0000 (---------------) + I forsand - 0x0025bd86, // n0x1704 c0x0000 (---------------) + I fosnes - 0x0035d5c5, // n0x1705 c0x0000 (---------------) + I frana - 0x0036498b, // n0x1706 c0x0000 (---------------) + I fredrikstad - 0x0025c884, // n0x1707 c0x0000 (---------------) + I frei - 0x00262085, // n0x1708 c0x0000 (---------------) + I frogn - 0x002621c7, // n0x1709 c0x0000 (---------------) + I froland - 0x00276806, // n0x170a c0x0000 (---------------) + I frosta - 0x00276c45, // n0x170b c0x0000 (---------------) + I froya - 0x00284007, // n0x170c c0x0000 (---------------) + I fuoisku - 0x00284947, // n0x170d c0x0000 (---------------) + I fuossko - 0x0028fd84, // n0x170e c0x0000 (---------------) + I fusa - 0x0028b38a, // n0x170f c0x0000 (---------------) + I fylkesbibl - 0x0028b848, // n0x1710 c0x0000 (---------------) + I fyresdal - 0x003020c9, // n0x1711 c0x0000 (---------------) + I gaivuotna - 0x00221fc5, // n0x1712 c0x0000 (---------------) + I galsa - 0x002db486, // n0x1713 c0x0000 (---------------) + I gamvik - 0x0031214a, // n0x1714 c0x0000 (---------------) + I gangaviika - 0x00266a06, // n0x1715 c0x0000 (---------------) + I gaular - 0x0026e5c7, // n0x1716 c0x0000 (---------------) + I gausdal - 0x00301d8d, // n0x1717 c0x0000 (---------------) + I giehtavuoatna - 0x00225509, // n0x1718 c0x0000 (---------------) + I gildeskal - 0x00382fc5, // n0x1719 c0x0000 (---------------) + I giske - 0x00313407, // n0x171a c0x0000 (---------------) + I gjemnes - 0x003257c8, // n0x171b c0x0000 (---------------) + I gjerdrum - 0x00354088, // n0x171c c0x0000 (---------------) + I gjerstad - 0x0022b207, // n0x171d c0x0000 (---------------) + I gjesdal - 0x00248b06, // n0x171e c0x0000 (---------------) + I gjovik - 0x00212507, // n0x171f c0x0000 (---------------) + I gloppen - 0x0024dec3, // n0x1720 c0x0000 (---------------) + I gol - 0x00334204, // n0x1721 c0x0000 (---------------) + I gran - 0x0035a405, // n0x1722 c0x0000 (---------------) + I grane - 0x003833c7, // n0x1723 c0x0000 (---------------) + I granvin - 0x00387089, // n0x1724 c0x0000 (---------------) + I gratangen - 0x0021db48, // n0x1725 c0x0000 (---------------) + I grimstad - 0x0026e4c5, // n0x1726 c0x0000 (---------------) + I grong - 0x00314d04, // n0x1727 c0x0000 (---------------) + I grue - 0x00235785, // n0x1728 c0x0000 (---------------) + I gulen - 0x0034938d, // n0x1729 c0x0000 (---------------) + I guovdageaidnu - 0x00202302, // n0x172a c0x0000 (---------------) + I ha - 0x0036d3c6, // n0x172b c0x0000 (---------------) + I habmer - 0x0025ab06, // n0x172c c0x0000 (---------------) + I hadsel - 0x002a858a, // n0x172d c0x0000 (---------------) + I hagebostad - 0x0035f986, // n0x172e c0x0000 (---------------) + I halden - 0x0036cc85, // n0x172f c0x0000 (---------------) + I halsa - 0x0026b145, // n0x1730 c0x0000 (---------------) + I hamar - 0x0026b147, // n0x1731 c0x0000 (---------------) + I hamaroy - 0x00374a0c, // n0x1732 c0x0000 (---------------) + I hammarfeasta - 0x0027b64a, // n0x1733 c0x0000 (---------------) + I hammerfest - 0x0028d0c6, // n0x1734 c0x0000 (---------------) + I hapmir - 0x002d09c5, // n0x1735 c0x0000 (---------------) + I haram - 0x0028d686, // n0x1736 c0x0000 (---------------) + I hareid - 0x0028d9c7, // n0x1737 c0x0000 (---------------) + I harstad - 0x0028ed06, // n0x1738 c0x0000 (---------------) + I hasvik - 0x0029104c, // n0x1739 c0x0000 (---------------) + I hattfjelldal - 0x00205749, // n0x173a c0x0000 (---------------) + I haugesund - 0x3d236c07, // n0x173b c0x00f4 (n0x195e-n0x1961) o I hedmark - 0x00292ac5, // n0x173c c0x0000 (---------------) + I hemne - 0x00292ac6, // n0x173d c0x0000 (---------------) + I hemnes - 0x00293008, // n0x173e c0x0000 (---------------) + I hemsedal - 0x002b24c5, // n0x173f c0x0000 (---------------) + I herad - 0x002a5bc5, // n0x1740 c0x0000 (---------------) + I hitra - 0x002a5e08, // n0x1741 c0x0000 (---------------) + I hjartdal - 0x002a600a, // n0x1742 c0x0000 (---------------) + I hjelmeland - 0x3d6484c2, // n0x1743 c0x00f5 (n0x1961-n0x1962) + I hl - 0x3da0fbc2, // n0x1744 c0x00f6 (n0x1962-n0x1963) + I hm - 0x003770c5, // n0x1745 c0x0000 (---------------) + I hobol - 0x002d7a43, // n0x1746 c0x0000 (---------------) + I hof - 0x0036e648, // n0x1747 c0x0000 (---------------) + I hokksund - 0x00231f43, // n0x1748 c0x0000 (---------------) + I hol - 0x002a6284, // n0x1749 c0x0000 (---------------) + I hole - 0x0029d6cb, // n0x174a c0x0000 (---------------) + I holmestrand - 0x002afec8, // n0x174b c0x0000 (---------------) + I holtalen - 0x002a8948, // n0x174c c0x0000 (---------------) + I honefoss - 0x3df20f89, // n0x174d c0x00f7 (n0x1963-n0x1964) o I hordaland - 0x002a9e09, // n0x174e c0x0000 (---------------) + I hornindal - 0x002aa286, // n0x174f c0x0000 (---------------) + I horten - 0x002ab288, // n0x1750 c0x0000 (---------------) + I hoyanger - 0x002ab489, // n0x1751 c0x0000 (---------------) + I hoylandet - 0x002ac4c6, // n0x1752 c0x0000 (---------------) + I hurdal - 0x002ac645, // n0x1753 c0x0000 (---------------) + I hurum - 0x00363d06, // n0x1754 c0x0000 (---------------) + I hvaler - 0x002acb89, // n0x1755 c0x0000 (---------------) + I hyllestad - 0x00354f47, // n0x1756 c0x0000 (---------------) + I ibestad - 0x0026f346, // n0x1757 c0x0000 (---------------) + I idrett - 0x0037cc07, // n0x1758 c0x0000 (---------------) + I inderoy - 0x00351a47, // n0x1759 c0x0000 (---------------) + I iveland - 0x00234e04, // n0x175a c0x0000 (---------------) + I ivgu - 0x3e220ec9, // n0x175b c0x00f8 (n0x1964-n0x1965) + I jan-mayen - 0x002c7148, // n0x175c c0x0000 (---------------) + I jessheim - 0x00356248, // n0x175d c0x0000 (---------------) + I jevnaker - 0x00232947, // n0x175e c0x0000 (---------------) + I jolster - 0x002c2dc6, // n0x175f c0x0000 (---------------) + I jondal - 0x002feb89, // n0x1760 c0x0000 (---------------) + I jorpeland - 0x002be607, // n0x1761 c0x0000 (---------------) + I kafjord - 0x0025b40a, // n0x1762 c0x0000 (---------------) + I karasjohka - 0x002ee1c8, // n0x1763 c0x0000 (---------------) + I karasjok - 0x00331787, // n0x1764 c0x0000 (---------------) + I karlsoy - 0x00355346, // n0x1765 c0x0000 (---------------) + I karmoy - 0x00229f0a, // n0x1766 c0x0000 (---------------) + I kautokeino - 0x0027f108, // n0x1767 c0x0000 (---------------) + I kirkenes - 0x00250905, // n0x1768 c0x0000 (---------------) + I klabu - 0x00373485, // n0x1769 c0x0000 (---------------) + I klepp - 0x00386347, // n0x176a c0x0000 (---------------) + I kommune - 0x002d9089, // n0x176b c0x0000 (---------------) + I kongsberg - 0x002c9e4b, // n0x176c c0x0000 (---------------) + I kongsvinger - 0x002d6e88, // n0x176d c0x0000 (---------------) + I kopervik - 0x0025da49, // n0x176e c0x0000 (---------------) + I kraanghke - 0x002504c7, // n0x176f c0x0000 (---------------) + I kragero - 0x002b1a4c, // n0x1770 c0x0000 (---------------) + I kristiansand - 0x002b208c, // n0x1771 c0x0000 (---------------) + I kristiansund - 0x002b238a, // n0x1772 c0x0000 (---------------) + I krodsherad - 0x002b260c, // n0x1773 c0x0000 (---------------) + I krokstadelva - 0x002bd648, // n0x1774 c0x0000 (---------------) + I kvafjord - 0x002bd848, // n0x1775 c0x0000 (---------------) + I kvalsund - 0x002bda44, // n0x1776 c0x0000 (---------------) + I kvam - 0x002be7c9, // n0x1777 c0x0000 (---------------) + I kvanangen - 0x002bea09, // n0x1778 c0x0000 (---------------) + I kvinesdal - 0x002bec4a, // n0x1779 c0x0000 (---------------) + I kvinnherad - 0x002beec9, // n0x177a c0x0000 (---------------) + I kviteseid - 0x002bf207, // n0x177b c0x0000 (---------------) + I kvitsoy - 0x003a580c, // n0x177c c0x0000 (---------------) + I laakesvuemie - 0x00338106, // n0x177d c0x0000 (---------------) + I lahppi - 0x00251688, // n0x177e c0x0000 (---------------) + I langevag - 0x00266ac6, // n0x177f c0x0000 (---------------) + I lardal - 0x0037d606, // n0x1780 c0x0000 (---------------) + I larvik - 0x00382ec7, // n0x1781 c0x0000 (---------------) + I lavagis - 0x002f3c48, // n0x1782 c0x0000 (---------------) + I lavangen - 0x0026c2cb, // n0x1783 c0x0000 (---------------) + I leangaviika - 0x002b9047, // n0x1784 c0x0000 (---------------) + I lebesby - 0x00259649, // n0x1785 c0x0000 (---------------) + I leikanger - 0x00281e49, // n0x1786 c0x0000 (---------------) + I leirfjord - 0x0035da87, // n0x1787 c0x0000 (---------------) + I leirvik - 0x002bdd04, // n0x1788 c0x0000 (---------------) + I leka - 0x00382447, // n0x1789 c0x0000 (---------------) + I leksvik - 0x00353546, // n0x178a c0x0000 (---------------) + I lenvik - 0x00217446, // n0x178b c0x0000 (---------------) + I lerdal - 0x00308c85, // n0x178c c0x0000 (---------------) + I lesja - 0x002d1848, // n0x178d c0x0000 (---------------) + I levanger - 0x002e38c4, // n0x178e c0x0000 (---------------) + I lier - 0x002e38c6, // n0x178f c0x0000 (---------------) + I lierne - 0x0027b50b, // n0x1790 c0x0000 (---------------) + I lillehammer - 0x00330f89, // n0x1791 c0x0000 (---------------) + I lillesand - 0x00312986, // n0x1792 c0x0000 (---------------) + I lindas - 0x00320189, // n0x1793 c0x0000 (---------------) + I lindesnes - 0x00387806, // n0x1794 c0x0000 (---------------) + I loabat - 0x00259b48, // n0x1795 c0x0000 (---------------) + I lodingen - 0x00214d03, // n0x1796 c0x0000 (---------------) + I lom - 0x0038fe45, // n0x1797 c0x0000 (---------------) + I loppa - 0x00217589, // n0x1798 c0x0000 (---------------) + I lorenskog - 0x00218d45, // n0x1799 c0x0000 (---------------) + I loten - 0x002e2ac4, // n0x179a c0x0000 (---------------) + I lund - 0x00275106, // n0x179b c0x0000 (---------------) + I lunner - 0x002322c5, // n0x179c c0x0000 (---------------) + I luroy - 0x002dd446, // n0x179d c0x0000 (---------------) + I luster - 0x002fb707, // n0x179e c0x0000 (---------------) + I lyngdal - 0x00212f86, // n0x179f c0x0000 (---------------) + I lyngen - 0x0029924b, // n0x17a0 c0x0000 (---------------) + I malatvuopmi - 0x002e5bc7, // n0x17a1 c0x0000 (---------------) + I malselv - 0x00206506, // n0x17a2 c0x0000 (---------------) + I malvik - 0x00349e06, // n0x17a3 c0x0000 (---------------) + I mandal - 0x00234106, // n0x17a4 c0x0000 (---------------) + I marker - 0x00354989, // n0x17a5 c0x0000 (---------------) + I marnardal - 0x0021cb8a, // n0x17a6 c0x0000 (---------------) + I masfjorden - 0x0032f2c5, // n0x17a7 c0x0000 (---------------) + I masoy - 0x0021f1cd, // n0x17a8 c0x0000 (---------------) + I matta-varjjat - 0x002a6106, // n0x17a9 c0x0000 (---------------) + I meland - 0x00213686, // n0x17aa c0x0000 (---------------) + I meldal - 0x00287286, // n0x17ab c0x0000 (---------------) + I melhus - 0x0029d385, // n0x17ac c0x0000 (---------------) + I meloy - 0x0023ecc7, // n0x17ad c0x0000 (---------------) + I meraker - 0x0029ee07, // n0x17ae c0x0000 (---------------) + I midsund - 0x002e6bce, // n0x17af c0x0000 (---------------) + I midtre-gauldal - 0x00207dc3, // n0x17b0 c0x0000 (---------------) + I mil - 0x002c2d89, // n0x17b1 c0x0000 (---------------) + I mjondalen - 0x00379389, // n0x17b2 c0x0000 (---------------) + I mo-i-rana - 0x0022a887, // n0x17b3 c0x0000 (---------------) + I moareke - 0x0026d447, // n0x17b4 c0x0000 (---------------) + I modalen - 0x002a7945, // n0x17b5 c0x0000 (---------------) + I modum - 0x00324b05, // n0x17b6 c0x0000 (---------------) + I molde - 0x3e65cf8f, // n0x17b7 c0x00f9 (n0x1965-n0x1967) o I more-og-romsdal - 0x002c9907, // n0x17b8 c0x0000 (---------------) + I mosjoen - 0x002c9ac8, // n0x17b9 c0x0000 (---------------) + I moskenes - 0x002ca104, // n0x17ba c0x0000 (---------------) + I moss - 0x002ca446, // n0x17bb c0x0000 (---------------) + I mosvik - 0x3ea4a142, // n0x17bc c0x00fa (n0x1967-n0x1968) + I mr - 0x002cd886, // n0x17bd c0x0000 (---------------) + I muosat - 0x002d1086, // n0x17be c0x0000 (---------------) + I museum - 0x0026c60e, // n0x17bf c0x0000 (---------------) + I naamesjevuemie - 0x0032a98a, // n0x17c0 c0x0000 (---------------) + I namdalseid - 0x002b74c6, // n0x17c1 c0x0000 (---------------) + I namsos - 0x0021fb0a, // n0x17c2 c0x0000 (---------------) + I namsskogan - 0x002c5109, // n0x17c3 c0x0000 (---------------) + I nannestad - 0x003183c5, // n0x17c4 c0x0000 (---------------) + I naroy - 0x00389d88, // n0x17c5 c0x0000 (---------------) + I narviika - 0x003a1306, // n0x17c6 c0x0000 (---------------) + I narvik - 0x00330dc8, // n0x17c7 c0x0000 (---------------) + I naustdal - 0x0030aa88, // n0x17c8 c0x0000 (---------------) + I navuotna - 0x0032624b, // n0x17c9 c0x0000 (---------------) + I nedre-eiker - 0x00226685, // n0x17ca c0x0000 (---------------) + I nesna - 0x0022b6c8, // n0x17cb c0x0000 (---------------) + I nesodden - 0x00205e8c, // n0x17cc c0x0000 (---------------) + I nesoddtangen - 0x002b8407, // n0x17cd c0x0000 (---------------) + I nesseby - 0x0024fdc6, // n0x17ce c0x0000 (---------------) + I nesset - 0x0022eac8, // n0x17cf c0x0000 (---------------) + I nissedal - 0x00279b48, // n0x17d0 c0x0000 (---------------) + I nittedal - 0x3ee46d02, // n0x17d1 c0x00fb (n0x1968-n0x1969) + I nl - 0x002bdf0b, // n0x17d2 c0x0000 (---------------) + I nord-aurdal - 0x00397549, // n0x17d3 c0x0000 (---------------) + I nord-fron - 0x00343249, // n0x17d4 c0x0000 (---------------) + I nord-odal - 0x00382d47, // n0x17d5 c0x0000 (---------------) + I norddal - 0x00248908, // n0x17d6 c0x0000 (---------------) + I nordkapp - 0x3f31fdc8, // n0x17d7 c0x00fc (n0x1969-n0x196d) o I nordland - 0x0026ba8b, // n0x17d8 c0x0000 (---------------) + I nordre-land - 0x00286149, // n0x17d9 c0x0000 (---------------) + I nordreisa - 0x0021194d, // n0x17da c0x0000 (---------------) + I nore-og-uvdal - 0x003289c8, // n0x17db c0x0000 (---------------) + I notodden - 0x0032d988, // n0x17dc c0x0000 (---------------) + I notteroy - 0x3f601542, // n0x17dd c0x00fd (n0x196d-n0x196e) + I nt - 0x003a40c4, // n0x17de c0x0000 (---------------) + I odda - 0x3fa0ce82, // n0x17df c0x00fe (n0x196e-n0x196f) + I of - 0x002ee346, // n0x17e0 c0x0000 (---------------) + I oksnes - 0x3fe02102, // n0x17e1 c0x00ff (n0x196f-n0x1970) + I ol - 0x0030604a, // n0x17e2 c0x0000 (---------------) + I omasvuotna - 0x00357206, // n0x17e3 c0x0000 (---------------) + I oppdal - 0x00221288, // n0x17e4 c0x0000 (---------------) + I oppegard - 0x002566c8, // n0x17e5 c0x0000 (---------------) + I orkanger - 0x002eadc6, // n0x17e6 c0x0000 (---------------) + I orkdal - 0x003389c6, // n0x17e7 c0x0000 (---------------) + I orland - 0x002e6086, // n0x17e8 c0x0000 (---------------) + I orskog - 0x0027aec5, // n0x17e9 c0x0000 (---------------) + I orsta - 0x00240e04, // n0x17ea c0x0000 (---------------) + I osen - 0x402c5e84, // n0x17eb c0x0100 (n0x1970-n0x1971) + I oslo - 0x00334dc6, // n0x17ec c0x0000 (---------------) + I osoyro - 0x0024afc7, // n0x17ed c0x0000 (---------------) + I osteroy - 0x40798d87, // n0x17ee c0x0101 (n0x1971-n0x1972) o I ostfold - 0x002cf14b, // n0x17ef c0x0000 (---------------) + I ostre-toten - 0x0026abc9, // n0x17f0 c0x0000 (---------------) + I overhalla - 0x0026864a, // n0x17f1 c0x0000 (---------------) + I ovre-eiker - 0x00319a44, // n0x17f2 c0x0000 (---------------) + I oyer - 0x0026b288, // n0x17f3 c0x0000 (---------------) + I oygarden - 0x0026f10d, // n0x17f4 c0x0000 (---------------) + I oystre-slidre - 0x002dfc09, // n0x17f5 c0x0000 (---------------) + I porsanger - 0x002dfe48, // n0x17f6 c0x0000 (---------------) + I porsangu - 0x002e00c9, // n0x17f7 c0x0000 (---------------) + I porsgrunn - 0x002e17c4, // n0x17f8 c0x0000 (---------------) + I priv - 0x00205504, // n0x17f9 c0x0000 (---------------) + I rade - 0x0027f9c5, // n0x17fa c0x0000 (---------------) + I radoy - 0x0027700b, // n0x17fb c0x0000 (---------------) + I rahkkeravju - 0x002afe46, // n0x17fc c0x0000 (---------------) + I raholt - 0x00334c05, // n0x17fd c0x0000 (---------------) + I raisa - 0x00356cc9, // n0x17fe c0x0000 (---------------) + I rakkestad - 0x00226888, // n0x17ff c0x0000 (---------------) + I ralingen - 0x002a0c04, // n0x1800 c0x0000 (---------------) + I rana - 0x0022a609, // n0x1801 c0x0000 (---------------) + I randaberg - 0x00248145, // n0x1802 c0x0000 (---------------) + I rauma - 0x002b9988, // n0x1803 c0x0000 (---------------) + I rendalen - 0x00208c87, // n0x1804 c0x0000 (---------------) + I rennebu - 0x00312f48, // n0x1805 c0x0000 (---------------) + I rennesoy - 0x0027ff86, // n0x1806 c0x0000 (---------------) + I rindal - 0x0037b2c7, // n0x1807 c0x0000 (---------------) + I ringebu - 0x0020f789, // n0x1808 c0x0000 (---------------) + I ringerike - 0x00245d09, // n0x1809 c0x0000 (---------------) + I ringsaker - 0x00277785, // n0x180a c0x0000 (---------------) + I risor - 0x003789c5, // n0x180b c0x0000 (---------------) + I rissa - 0x40a24dc2, // n0x180c c0x0102 (n0x1972-n0x1973) + I rl - 0x002f9c84, // n0x180d c0x0000 (---------------) + I roan - 0x0029e885, // n0x180e c0x0000 (---------------) + I rodoy - 0x003017c6, // n0x180f c0x0000 (---------------) + I rollag - 0x0031abc5, // n0x1810 c0x0000 (---------------) + I romsa - 0x00253187, // n0x1811 c0x0000 (---------------) + I romskog - 0x00297a45, // n0x1812 c0x0000 (---------------) + I roros - 0x00276844, // n0x1813 c0x0000 (---------------) + I rost - 0x003306c6, // n0x1814 c0x0000 (---------------) + I royken - 0x0032ad87, // n0x1815 c0x0000 (---------------) + I royrvik - 0x0024a186, // n0x1816 c0x0000 (---------------) + I ruovat - 0x003314c5, // n0x1817 c0x0000 (---------------) + I rygge - 0x0030a688, // n0x1818 c0x0000 (---------------) + I salangen - 0x00226e05, // n0x1819 c0x0000 (---------------) + I salat - 0x00312807, // n0x181a c0x0000 (---------------) + I saltdal - 0x00344a49, // n0x181b c0x0000 (---------------) + I samnanger - 0x003310ca, // n0x181c c0x0000 (---------------) + I sandefjord - 0x00348847, // n0x181d c0x0000 (---------------) + I sandnes - 0x0034884c, // n0x181e c0x0000 (---------------) + I sandnessjoen - 0x00229386, // n0x181f c0x0000 (---------------) + I sandoy - 0x002285c9, // n0x1820 c0x0000 (---------------) + I sarpsborg - 0x0032b205, // n0x1821 c0x0000 (---------------) + I sauda - 0x0034f988, // n0x1822 c0x0000 (---------------) + I sauherad - 0x00212383, // n0x1823 c0x0000 (---------------) + I sel - 0x00212385, // n0x1824 c0x0000 (---------------) + I selbu - 0x00335945, // n0x1825 c0x0000 (---------------) + I selje - 0x0023c187, // n0x1826 c0x0000 (---------------) + I seljord - 0x40e10842, // n0x1827 c0x0103 (n0x1973-n0x1974) + I sf - 0x0023ba87, // n0x1828 c0x0000 (---------------) + I siellak - 0x002c6246, // n0x1829 c0x0000 (---------------) + I sigdal - 0x00220e06, // n0x182a c0x0000 (---------------) + I siljan - 0x002ccf46, // n0x182b c0x0000 (---------------) + I sirdal - 0x00279a86, // n0x182c c0x0000 (---------------) + I skanit - 0x00307ec8, // n0x182d c0x0000 (---------------) + I skanland - 0x0024e845, // n0x182e c0x0000 (---------------) + I skaun - 0x0024cb87, // n0x182f c0x0000 (---------------) + I skedsmo - 0x0024cb8d, // n0x1830 c0x0000 (---------------) + I skedsmokorset - 0x00208503, // n0x1831 c0x0000 (---------------) + I ski - 0x00208505, // n0x1832 c0x0000 (---------------) + I skien - 0x00229a87, // n0x1833 c0x0000 (---------------) + I skierva - 0x002d3188, // n0x1834 c0x0000 (---------------) + I skiptvet - 0x00229645, // n0x1835 c0x0000 (---------------) + I skjak - 0x00230988, // n0x1836 c0x0000 (---------------) + I skjervoy - 0x0026dfc6, // n0x1837 c0x0000 (---------------) + I skodje - 0x00262fc7, // n0x1838 c0x0000 (---------------) + I slattum - 0x002c1cc5, // n0x1839 c0x0000 (---------------) + I smola - 0x00226706, // n0x183a c0x0000 (---------------) + I snaase - 0x00360ac5, // n0x183b c0x0000 (---------------) + I snasa - 0x002bcd4a, // n0x183c c0x0000 (---------------) + I snillfjord - 0x00373786, // n0x183d c0x0000 (---------------) + I snoasa - 0x00234a47, // n0x183e c0x0000 (---------------) + I sogndal - 0x002bd505, // n0x183f c0x0000 (---------------) + I sogne - 0x002d8a87, // n0x1840 c0x0000 (---------------) + I sokndal - 0x002e0984, // n0x1841 c0x0000 (---------------) + I sola - 0x002e2a46, // n0x1842 c0x0000 (---------------) + I solund - 0x0035bc85, // n0x1843 c0x0000 (---------------) + I somna - 0x002b800b, // n0x1844 c0x0000 (---------------) + I sondre-land - 0x003533c9, // n0x1845 c0x0000 (---------------) + I songdalen - 0x002da24a, // n0x1846 c0x0000 (---------------) + I sor-aurdal - 0x00277808, // n0x1847 c0x0000 (---------------) + I sor-fron - 0x003177c8, // n0x1848 c0x0000 (---------------) + I sor-odal - 0x002ef68c, // n0x1849 c0x0000 (---------------) + I sor-varanger - 0x002efec7, // n0x184a c0x0000 (---------------) + I sorfold - 0x002f2488, // n0x184b c0x0000 (---------------) + I sorreisa - 0x002fa788, // n0x184c c0x0000 (---------------) + I sortland - 0x002fc305, // n0x184d c0x0000 (---------------) + I sorum - 0x002bf48a, // n0x184e c0x0000 (---------------) + I spjelkavik - 0x00332909, // n0x184f c0x0000 (---------------) + I spydeberg - 0x41202602, // n0x1850 c0x0104 (n0x1974-n0x1975) + I st - 0x00202606, // n0x1851 c0x0000 (---------------) + I stange - 0x0020a204, // n0x1852 c0x0000 (---------------) + I stat - 0x002de689, // n0x1853 c0x0000 (---------------) + I stathelle - 0x00254209, // n0x1854 c0x0000 (---------------) + I stavanger - 0x0021b107, // n0x1855 c0x0000 (---------------) + I stavern - 0x0024f3c7, // n0x1856 c0x0000 (---------------) + I steigen - 0x00280c09, // n0x1857 c0x0000 (---------------) + I steinkjer - 0x0038d988, // n0x1858 c0x0000 (---------------) + I stjordal - 0x0038d98f, // n0x1859 c0x0000 (---------------) + I stjordalshalsen - 0x00275c46, // n0x185a c0x0000 (---------------) + I stokke - 0x0024280b, // n0x185b c0x0000 (---------------) + I stor-elvdal - 0x0035b185, // n0x185c c0x0000 (---------------) + I stord - 0x0035b187, // n0x185d c0x0000 (---------------) + I stordal - 0x0037c2c9, // n0x185e c0x0000 (---------------) + I storfjord - 0x0022a586, // n0x185f c0x0000 (---------------) + I strand - 0x0022a587, // n0x1860 c0x0000 (---------------) + I stranda - 0x003a37c5, // n0x1861 c0x0000 (---------------) + I stryn - 0x00237984, // n0x1862 c0x0000 (---------------) + I sula - 0x00235406, // n0x1863 c0x0000 (---------------) + I suldal - 0x00200704, // n0x1864 c0x0000 (---------------) + I sund - 0x0030d887, // n0x1865 c0x0000 (---------------) + I sunndal - 0x002e9388, // n0x1866 c0x0000 (---------------) + I surnadal - 0x416eaf48, // n0x1867 c0x0105 (n0x1975-n0x1976) + I svalbard - 0x002eb545, // n0x1868 c0x0000 (---------------) + I sveio - 0x002eb687, // n0x1869 c0x0000 (---------------) + I svelvik - 0x003684c9, // n0x186a c0x0000 (---------------) + I sykkylven - 0x00203c44, // n0x186b c0x0000 (---------------) + I tana - 0x00203c48, // n0x186c c0x0000 (---------------) + I tananger - 0x41a64dc8, // n0x186d c0x0106 (n0x1976-n0x1978) o I telemark - 0x00219dc4, // n0x186e c0x0000 (---------------) + I time - 0x00238488, // n0x186f c0x0000 (---------------) + I tingvoll - 0x002dca04, // n0x1870 c0x0000 (---------------) + I tinn - 0x0023e5c9, // n0x1871 c0x0000 (---------------) + I tjeldsund - 0x0036e185, // n0x1872 c0x0000 (---------------) + I tjome - 0x41e00c82, // n0x1873 c0x0107 (n0x1978-n0x1979) + I tm - 0x00275c85, // n0x1874 c0x0000 (---------------) + I tokke - 0x00221f05, // n0x1875 c0x0000 (---------------) + I tolga - 0x0035ebc8, // n0x1876 c0x0000 (---------------) + I tonsberg - 0x00239f07, // n0x1877 c0x0000 (---------------) + I torsken - 0x42203902, // n0x1878 c0x0108 (n0x1979-n0x197a) + I tr - 0x002cb4c5, // n0x1879 c0x0000 (---------------) + I trana - 0x002847c6, // n0x187a c0x0000 (---------------) + I tranby - 0x002960c6, // n0x187b c0x0000 (---------------) + I tranoy - 0x002f9c48, // n0x187c c0x0000 (---------------) + I troandin - 0x002fe2c8, // n0x187d c0x0000 (---------------) + I trogstad - 0x0031ab86, // n0x187e c0x0000 (---------------) + I tromsa - 0x00322446, // n0x187f c0x0000 (---------------) + I tromso - 0x00357709, // n0x1880 c0x0000 (---------------) + I trondheim - 0x00358346, // n0x1881 c0x0000 (---------------) + I trysil - 0x0024910b, // n0x1882 c0x0000 (---------------) + I tvedestrand - 0x0024fbc5, // n0x1883 c0x0000 (---------------) + I tydal - 0x0021f706, // n0x1884 c0x0000 (---------------) + I tynset - 0x0039f5c8, // n0x1885 c0x0000 (---------------) + I tysfjord - 0x00233406, // n0x1886 c0x0000 (---------------) + I tysnes - 0x00236546, // n0x1887 c0x0000 (---------------) + I tysvar - 0x00215b8a, // n0x1888 c0x0000 (---------------) + I ullensaker - 0x00343e0a, // n0x1889 c0x0000 (---------------) + I ullensvang - 0x0028d385, // n0x188a c0x0000 (---------------) + I ulvik - 0x002c9407, // n0x188b c0x0000 (---------------) + I unjarga - 0x00341f46, // n0x188c c0x0000 (---------------) + I utsira - 0x42600c02, // n0x188d c0x0109 (n0x197a-n0x197b) + I va - 0x00229bc7, // n0x188e c0x0000 (---------------) + I vaapste - 0x00274545, // n0x188f c0x0000 (---------------) + I vadso - 0x003120c4, // n0x1890 c0x0000 (---------------) + I vaga - 0x003120c5, // n0x1891 c0x0000 (---------------) + I vagan - 0x00319946, // n0x1892 c0x0000 (---------------) + I vagsoy - 0x0032e2c7, // n0x1893 c0x0000 (---------------) + I vaksdal - 0x00216c85, // n0x1894 c0x0000 (---------------) + I valle - 0x002542c4, // n0x1895 c0x0000 (---------------) + I vang - 0x00270708, // n0x1896 c0x0000 (---------------) + I vanylven - 0x00236605, // n0x1897 c0x0000 (---------------) + I vardo - 0x00293687, // n0x1898 c0x0000 (---------------) + I varggat - 0x00358ac5, // n0x1899 c0x0000 (---------------) + I varoy - 0x00214ac5, // n0x189a c0x0000 (---------------) + I vefsn - 0x0023dfc4, // n0x189b c0x0000 (---------------) + I vega - 0x0028c9c9, // n0x189c c0x0000 (---------------) + I vegarshei - 0x002e24c8, // n0x189d c0x0000 (---------------) + I vennesla - 0x00372686, // n0x189e c0x0000 (---------------) + I verdal - 0x00342546, // n0x189f c0x0000 (---------------) + I verran - 0x00219ac6, // n0x18a0 c0x0000 (---------------) + I vestby - 0x42b9be88, // n0x18a1 c0x010a (n0x197b-n0x197c) o I vestfold - 0x002ef507, // n0x18a2 c0x0000 (---------------) + I vestnes - 0x002ef98d, // n0x18a3 c0x0000 (---------------) + I vestre-slidre - 0x002f008c, // n0x18a4 c0x0000 (---------------) + I vestre-toten - 0x002f0689, // n0x18a5 c0x0000 (---------------) + I vestvagoy - 0x002f08c9, // n0x18a6 c0x0000 (---------------) + I vevelstad - 0x42f4d602, // n0x18a7 c0x010b (n0x197c-n0x197d) + I vf - 0x0039adc3, // n0x18a8 c0x0000 (---------------) + I vgs - 0x002065c3, // n0x18a9 c0x0000 (---------------) + I vik - 0x00353605, // n0x18aa c0x0000 (---------------) + I vikna - 0x003834ca, // n0x18ab c0x0000 (---------------) + I vindafjord - 0x0031aa46, // n0x18ac c0x0000 (---------------) + I voagat - 0x002f8945, // n0x18ad c0x0000 (---------------) + I volda - 0x002fbc44, // n0x18ae c0x0000 (---------------) + I voss - 0x002fbc4b, // n0x18af c0x0000 (---------------) + I vossevangen - 0x0030f7cc, // n0x18b0 c0x0000 (---------------) + I xn--andy-ira - 0x0031000c, // n0x18b1 c0x0000 (---------------) + I xn--asky-ira - 0x00310315, // n0x18b2 c0x0000 (---------------) + I xn--aurskog-hland-jnb - 0x003138cd, // n0x18b3 c0x0000 (---------------) + I xn--avery-yua - 0x00314e0f, // n0x18b4 c0x0000 (---------------) + I xn--bdddj-mrabd - 0x003151d2, // n0x18b5 c0x0000 (---------------) + I xn--bearalvhki-y4a - 0x0031564f, // n0x18b6 c0x0000 (---------------) + I xn--berlevg-jxa - 0x00315a12, // n0x18b7 c0x0000 (---------------) + I xn--bhcavuotna-s4a - 0x00315e93, // n0x18b8 c0x0000 (---------------) + I xn--bhccavuotna-k7a - 0x0031634d, // n0x18b9 c0x0000 (---------------) + I xn--bidr-5nac - 0x0031690d, // n0x18ba c0x0000 (---------------) + I xn--bievt-0qa - 0x00316c8e, // n0x18bb c0x0000 (---------------) + I xn--bjarky-fya - 0x0031714e, // n0x18bc c0x0000 (---------------) + I xn--bjddar-pta - 0x003179cc, // n0x18bd c0x0000 (---------------) + I xn--blt-elab - 0x00317d4c, // n0x18be c0x0000 (---------------) + I xn--bmlo-gra - 0x0031818b, // n0x18bf c0x0000 (---------------) + I xn--bod-2na - 0x0031850e, // n0x18c0 c0x0000 (---------------) + I xn--brnny-wuac - 0x00319f52, // n0x18c1 c0x0000 (---------------) + I xn--brnnysund-m8ac - 0x0031a80c, // n0x18c2 c0x0000 (---------------) + I xn--brum-voa - 0x0031af50, // n0x18c3 c0x0000 (---------------) + I xn--btsfjord-9za - 0x0032b352, // n0x18c4 c0x0000 (---------------) + I xn--davvenjrga-y4a - 0x0032c20c, // n0x18c5 c0x0000 (---------------) + I xn--dnna-gra - 0x0032c8cd, // n0x18c6 c0x0000 (---------------) + I xn--drbak-wua - 0x0032cc0c, // n0x18c7 c0x0000 (---------------) + I xn--dyry-ira - 0x0032e751, // n0x18c8 c0x0000 (---------------) + I xn--eveni-0qa01ga - 0x0032f70d, // n0x18c9 c0x0000 (---------------) + I xn--finny-yua - 0x00332e4d, // n0x18ca c0x0000 (---------------) + I xn--fjord-lra - 0x0033344a, // n0x18cb c0x0000 (---------------) + I xn--fl-zia - 0x003336cc, // n0x18cc c0x0000 (---------------) + I xn--flor-jra - 0x00333fcc, // n0x18cd c0x0000 (---------------) + I xn--frde-gra - 0x0033490c, // n0x18ce c0x0000 (---------------) + I xn--frna-woa - 0x0033518c, // n0x18cf c0x0000 (---------------) + I xn--frya-hra - 0x00338b53, // n0x18d0 c0x0000 (---------------) + I xn--ggaviika-8ya47h - 0x00339150, // n0x18d1 c0x0000 (---------------) + I xn--gildeskl-g0a - 0x00339550, // n0x18d2 c0x0000 (---------------) + I xn--givuotna-8ya - 0x00339e4d, // n0x18d3 c0x0000 (---------------) + I xn--gjvik-wua - 0x0033a44c, // n0x18d4 c0x0000 (---------------) + I xn--gls-elac - 0x0033b409, // n0x18d5 c0x0000 (---------------) + I xn--h-2fa - 0x0033d90d, // n0x18d6 c0x0000 (---------------) + I xn--hbmer-xqa - 0x0033dc53, // n0x18d7 c0x0000 (---------------) + I xn--hcesuolo-7ya35b - 0x0033e851, // n0x18d8 c0x0000 (---------------) + I xn--hgebostad-g3a - 0x0033ec93, // n0x18d9 c0x0000 (---------------) + I xn--hmmrfeasta-s4ac - 0x0033fb8f, // n0x18da c0x0000 (---------------) + I xn--hnefoss-q1a - 0x0033ff4c, // n0x18db c0x0000 (---------------) + I xn--hobl-ira - 0x0034024f, // n0x18dc c0x0000 (---------------) + I xn--holtlen-hxa - 0x0034060d, // n0x18dd c0x0000 (---------------) + I xn--hpmir-xqa - 0x00340c0f, // n0x18de c0x0000 (---------------) + I xn--hyanger-q1a - 0x00340fd0, // n0x18df c0x0000 (---------------) + I xn--hylandet-54a - 0x00341a4e, // n0x18e0 c0x0000 (---------------) + I xn--indery-fya - 0x00344c8e, // n0x18e1 c0x0000 (---------------) + I xn--jlster-bya - 0x003453d0, // n0x18e2 c0x0000 (---------------) + I xn--jrpeland-54a - 0x0034650d, // n0x18e3 c0x0000 (---------------) + I xn--karmy-yua - 0x00346e8e, // n0x18e4 c0x0000 (---------------) + I xn--kfjord-iua - 0x0034720c, // n0x18e5 c0x0000 (---------------) + I xn--klbu-woa - 0x003482d3, // n0x18e6 c0x0000 (---------------) + I xn--koluokta-7ya57h - 0x00349f8e, // n0x18e7 c0x0000 (---------------) + I xn--krager-gya - 0x0034bb50, // n0x18e8 c0x0000 (---------------) + I xn--kranghke-b0a - 0x0034bf51, // n0x18e9 c0x0000 (---------------) + I xn--krdsherad-m8a - 0x0034c38f, // n0x18ea c0x0000 (---------------) + I xn--krehamn-dxa - 0x0034c753, // n0x18eb c0x0000 (---------------) + I xn--krjohka-hwab49j - 0x0034d18d, // n0x18ec c0x0000 (---------------) + I xn--ksnes-uua - 0x0034d4cf, // n0x18ed c0x0000 (---------------) + I xn--kvfjord-nxa - 0x0034d88e, // n0x18ee c0x0000 (---------------) + I xn--kvitsy-fya - 0x0034e0d0, // n0x18ef c0x0000 (---------------) + I xn--kvnangen-k0a - 0x0034e4c9, // n0x18f0 c0x0000 (---------------) + I xn--l-1fa - 0x00350310, // n0x18f1 c0x0000 (---------------) + I xn--laheadju-7ya - 0x0035094f, // n0x18f2 c0x0000 (---------------) + I xn--langevg-jxa - 0x00350fcf, // n0x18f3 c0x0000 (---------------) + I xn--ldingen-q1a - 0x00351392, // n0x18f4 c0x0000 (---------------) + I xn--leagaviika-52b - 0x00355b4e, // n0x18f5 c0x0000 (---------------) + I xn--lesund-hua - 0x0035644d, // n0x18f6 c0x0000 (---------------) + I xn--lgrd-poac - 0x00357a8d, // n0x18f7 c0x0000 (---------------) + I xn--lhppi-xqa - 0x00357dcd, // n0x18f8 c0x0000 (---------------) + I xn--linds-pra - 0x0035918d, // n0x18f9 c0x0000 (---------------) + I xn--loabt-0qa - 0x003594cd, // n0x18fa c0x0000 (---------------) + I xn--lrdal-sra - 0x00359810, // n0x18fb c0x0000 (---------------) + I xn--lrenskog-54a - 0x00359c0b, // n0x18fc c0x0000 (---------------) + I xn--lt-liac - 0x0035a1cc, // n0x18fd c0x0000 (---------------) + I xn--lten-gra - 0x0035a54c, // n0x18fe c0x0000 (---------------) + I xn--lury-ira - 0x0035a84c, // n0x18ff c0x0000 (---------------) + I xn--mely-ira - 0x0035ab4e, // n0x1900 c0x0000 (---------------) + I xn--merker-kua - 0x00366d90, // n0x1901 c0x0000 (---------------) + I xn--mjndalen-64a - 0x00368c92, // n0x1902 c0x0000 (---------------) + I xn--mlatvuopmi-s4a - 0x0036910b, // n0x1903 c0x0000 (---------------) + I xn--mli-tla - 0x00369b8e, // n0x1904 c0x0000 (---------------) + I xn--mlselv-iua - 0x00369f0e, // n0x1905 c0x0000 (---------------) + I xn--moreke-jua - 0x0036ac0e, // n0x1906 c0x0000 (---------------) + I xn--mosjen-eya - 0x0036b34b, // n0x1907 c0x0000 (---------------) + I xn--mot-tla - 0x4336b916, // n0x1908 c0x010c (n0x197d-n0x197f) o I xn--mre-og-romsdal-qqb - 0x0036c98d, // n0x1909 c0x0000 (---------------) + I xn--msy-ula0h - 0x0036d554, // n0x190a c0x0000 (---------------) + I xn--mtta-vrjjat-k7af - 0x0036e84d, // n0x190b c0x0000 (---------------) + I xn--muost-0qa - 0x00370215, // n0x190c c0x0000 (---------------) + I xn--nmesjevuemie-tcba - 0x0037160d, // n0x190d c0x0000 (---------------) + I xn--nry-yla5g - 0x00371f8f, // n0x190e c0x0000 (---------------) + I xn--nttery-byae - 0x0037280f, // n0x190f c0x0000 (---------------) + I xn--nvuotna-hwa - 0x0037638f, // n0x1910 c0x0000 (---------------) + I xn--oppegrd-ixa - 0x0037674e, // n0x1911 c0x0000 (---------------) + I xn--ostery-fya - 0x0037778d, // n0x1912 c0x0000 (---------------) + I xn--osyro-wua - 0x0037a291, // n0x1913 c0x0000 (---------------) + I xn--porsgu-sta26f - 0x0037cdcc, // n0x1914 c0x0000 (---------------) + I xn--rady-ira - 0x0037d0cc, // n0x1915 c0x0000 (---------------) + I xn--rdal-poa - 0x0037d3cb, // n0x1916 c0x0000 (---------------) + I xn--rde-ula - 0x0037d98c, // n0x1917 c0x0000 (---------------) + I xn--rdy-0nab - 0x0037dd4f, // n0x1918 c0x0000 (---------------) + I xn--rennesy-v1a - 0x0037e112, // n0x1919 c0x0000 (---------------) + I xn--rhkkervju-01af - 0x0037eacd, // n0x191a c0x0000 (---------------) + I xn--rholt-mra - 0x0037fa8c, // n0x191b c0x0000 (---------------) + I xn--risa-5na - 0x0037ff0c, // n0x191c c0x0000 (---------------) + I xn--risr-ira - 0x0038020d, // n0x191d c0x0000 (---------------) + I xn--rland-uua - 0x0038054f, // n0x191e c0x0000 (---------------) + I xn--rlingen-mxa - 0x0038090e, // n0x191f c0x0000 (---------------) + I xn--rmskog-bya - 0x0038318c, // n0x1920 c0x0000 (---------------) + I xn--rros-gra - 0x0038374d, // n0x1921 c0x0000 (---------------) + I xn--rskog-uua - 0x00383a8b, // n0x1922 c0x0000 (---------------) + I xn--rst-0na - 0x0038404c, // n0x1923 c0x0000 (---------------) + I xn--rsta-fra - 0x003845cd, // n0x1924 c0x0000 (---------------) + I xn--ryken-vua - 0x0038490e, // n0x1925 c0x0000 (---------------) + I xn--ryrvik-bya - 0x00384d89, // n0x1926 c0x0000 (---------------) + I xn--s-1fa - 0x00385a93, // n0x1927 c0x0000 (---------------) + I xn--sandnessjen-ogb - 0x0038650d, // n0x1928 c0x0000 (---------------) + I xn--sandy-yua - 0x0038684d, // n0x1929 c0x0000 (---------------) + I xn--seral-lra - 0x00386e4c, // n0x192a c0x0000 (---------------) + I xn--sgne-gra - 0x003872ce, // n0x192b c0x0000 (---------------) + I xn--skierv-uta - 0x00388d4f, // n0x192c c0x0000 (---------------) + I xn--skjervy-v1a - 0x0038910c, // n0x192d c0x0000 (---------------) + I xn--skjk-soa - 0x0038940d, // n0x192e c0x0000 (---------------) + I xn--sknit-yqa - 0x0038974f, // n0x192f c0x0000 (---------------) + I xn--sknland-fxa - 0x00389b0c, // n0x1930 c0x0000 (---------------) + I xn--slat-5na - 0x0038a20c, // n0x1931 c0x0000 (---------------) + I xn--slt-elab - 0x0038a5cc, // n0x1932 c0x0000 (---------------) + I xn--smla-hra - 0x0038a8cc, // n0x1933 c0x0000 (---------------) + I xn--smna-gra - 0x0038af8d, // n0x1934 c0x0000 (---------------) + I xn--snase-nra - 0x0038b2d2, // n0x1935 c0x0000 (---------------) + I xn--sndre-land-0cb - 0x0038b94c, // n0x1936 c0x0000 (---------------) + I xn--snes-poa - 0x0038bc4c, // n0x1937 c0x0000 (---------------) + I xn--snsa-roa - 0x0038bf51, // n0x1938 c0x0000 (---------------) + I xn--sr-aurdal-l8a - 0x0038c38f, // n0x1939 c0x0000 (---------------) + I xn--sr-fron-q1a - 0x0038c74f, // n0x193a c0x0000 (---------------) + I xn--sr-odal-q1a - 0x0038cb13, // n0x193b c0x0000 (---------------) + I xn--sr-varanger-ggb - 0x0038e38e, // n0x193c c0x0000 (---------------) + I xn--srfold-bya - 0x0038e90f, // n0x193d c0x0000 (---------------) + I xn--srreisa-q1a - 0x0038eccc, // n0x193e c0x0000 (---------------) + I xn--srum-gra - 0x4378f00e, // n0x193f c0x010d (n0x197f-n0x1980) o I xn--stfold-9xa - 0x0038f38f, // n0x1940 c0x0000 (---------------) + I xn--stjrdal-s1a - 0x0038f756, // n0x1941 c0x0000 (---------------) + I xn--stjrdalshalsen-sqb - 0x00390252, // n0x1942 c0x0000 (---------------) + I xn--stre-toten-zcb - 0x003918cc, // n0x1943 c0x0000 (---------------) + I xn--tjme-hra - 0x0039208f, // n0x1944 c0x0000 (---------------) + I xn--tnsberg-q1a - 0x0039270d, // n0x1945 c0x0000 (---------------) + I xn--trany-yua - 0x00392a4f, // n0x1946 c0x0000 (---------------) + I xn--trgstad-r1a - 0x00392e0c, // n0x1947 c0x0000 (---------------) + I xn--trna-woa - 0x0039310d, // n0x1948 c0x0000 (---------------) + I xn--troms-zua - 0x0039344d, // n0x1949 c0x0000 (---------------) + I xn--tysvr-vra - 0x00394cce, // n0x194a c0x0000 (---------------) + I xn--unjrga-rta - 0x0039604c, // n0x194b c0x0000 (---------------) + I xn--vads-jra - 0x0039634c, // n0x194c c0x0000 (---------------) + I xn--vard-jra - 0x00396650, // n0x194d c0x0000 (---------------) + I xn--vegrshei-c0a - 0x0039a251, // n0x194e c0x0000 (---------------) + I xn--vestvgy-ixa6o - 0x0039a68b, // n0x194f c0x0000 (---------------) + I xn--vg-yiab - 0x0039a9cc, // n0x1950 c0x0000 (---------------) + I xn--vgan-qoa - 0x0039acce, // n0x1951 c0x0000 (---------------) + I xn--vgsy-qoa0j - 0x0039c391, // n0x1952 c0x0000 (---------------) + I xn--vre-eiker-k8a - 0x0039c7ce, // n0x1953 c0x0000 (---------------) + I xn--vrggt-xqad - 0x0039cb4d, // n0x1954 c0x0000 (---------------) + I xn--vry-yla5g - 0x003a10cb, // n0x1955 c0x0000 (---------------) + I xn--yer-zna - 0x003a19cf, // n0x1956 c0x0000 (---------------) + I xn--ygarden-p1a - 0x003a2a94, // n0x1957 c0x0000 (---------------) + I xn--ystre-slidre-ujb - 0x00245dc2, // n0x1958 c0x0000 (---------------) + I gs - 0x00245dc2, // n0x1959 c0x0000 (---------------) + I gs - 0x00202ac3, // n0x195a c0x0000 (---------------) + I nes - 0x00245dc2, // n0x195b c0x0000 (---------------) + I gs - 0x00202ac3, // n0x195c c0x0000 (---------------) + I nes - 0x00245dc2, // n0x195d c0x0000 (---------------) + I gs - 0x00209f02, // n0x195e c0x0000 (---------------) + I os - 0x00363d45, // n0x195f c0x0000 (---------------) + I valer - 0x0039c08c, // n0x1960 c0x0000 (---------------) + I xn--vler-qoa - 0x00245dc2, // n0x1961 c0x0000 (---------------) + I gs - 0x00245dc2, // n0x1962 c0x0000 (---------------) + I gs - 0x00209f02, // n0x1963 c0x0000 (---------------) + I os - 0x00245dc2, // n0x1964 c0x0000 (---------------) + I gs - 0x00293485, // n0x1965 c0x0000 (---------------) + I heroy - 0x003310c5, // n0x1966 c0x0000 (---------------) + I sande - 0x00245dc2, // n0x1967 c0x0000 (---------------) + I gs - 0x00245dc2, // n0x1968 c0x0000 (---------------) + I gs - 0x0020f682, // n0x1969 c0x0000 (---------------) + I bo - 0x00293485, // n0x196a c0x0000 (---------------) + I heroy - 0x00313e09, // n0x196b c0x0000 (---------------) + I xn--b-5ga - 0x0033e54c, // n0x196c c0x0000 (---------------) + I xn--hery-ira - 0x00245dc2, // n0x196d c0x0000 (---------------) + I gs - 0x00245dc2, // n0x196e c0x0000 (---------------) + I gs - 0x00245dc2, // n0x196f c0x0000 (---------------) + I gs - 0x00245dc2, // n0x1970 c0x0000 (---------------) + I gs - 0x00363d45, // n0x1971 c0x0000 (---------------) + I valer - 0x00245dc2, // n0x1972 c0x0000 (---------------) + I gs - 0x00245dc2, // n0x1973 c0x0000 (---------------) + I gs - 0x00245dc2, // n0x1974 c0x0000 (---------------) + I gs - 0x00245dc2, // n0x1975 c0x0000 (---------------) + I gs - 0x0020f682, // n0x1976 c0x0000 (---------------) + I bo - 0x00313e09, // n0x1977 c0x0000 (---------------) + I xn--b-5ga - 0x00245dc2, // n0x1978 c0x0000 (---------------) + I gs - 0x00245dc2, // n0x1979 c0x0000 (---------------) + I gs - 0x00245dc2, // n0x197a c0x0000 (---------------) + I gs - 0x003310c5, // n0x197b c0x0000 (---------------) + I sande - 0x00245dc2, // n0x197c c0x0000 (---------------) + I gs - 0x003310c5, // n0x197d c0x0000 (---------------) + I sande - 0x0033e54c, // n0x197e c0x0000 (---------------) + I xn--hery-ira - 0x0039c08c, // n0x197f c0x0000 (---------------) + I xn--vler-qoa - 0x00331a83, // n0x1980 c0x0000 (---------------) + I biz - 0x00233243, // n0x1981 c0x0000 (---------------) + I com - 0x00239103, // n0x1982 c0x0000 (---------------) + I edu - 0x0027d903, // n0x1983 c0x0000 (---------------) + I gov - 0x00201844, // n0x1984 c0x0000 (---------------) + I info - 0x00223b43, // n0x1985 c0x0000 (---------------) + I net - 0x00228743, // n0x1986 c0x0000 (---------------) + I org - 0x00128008, // n0x1987 c0x0000 (---------------) + merseine - 0x000aa184, // n0x1988 c0x0000 (---------------) + mine - 0x0015cc08, // n0x1989 c0x0000 (---------------) + shacknet - 0x00200342, // n0x198a c0x0000 (---------------) + I ac - 0x4460ce42, // n0x198b c0x0111 (n0x199a-n0x199b) + I co - 0x00244b03, // n0x198c c0x0000 (---------------) + I cri - 0x0024f084, // n0x198d c0x0000 (---------------) + I geek - 0x002060c3, // n0x198e c0x0000 (---------------) + I gen - 0x00342404, // n0x198f c0x0000 (---------------) + I govt - 0x002ae586, // n0x1990 c0x0000 (---------------) + I health - 0x00205c03, // n0x1991 c0x0000 (---------------) + I iwi - 0x002ea604, // n0x1992 c0x0000 (---------------) + I kiwi - 0x002719c5, // n0x1993 c0x0000 (---------------) + I maori - 0x00207dc3, // n0x1994 c0x0000 (---------------) + I mil - 0x00223b43, // n0x1995 c0x0000 (---------------) + I net - 0x00228743, // n0x1996 c0x0000 (---------------) + I org - 0x0028458a, // n0x1997 c0x0000 (---------------) + I parliament - 0x00232186, // n0x1998 c0x0000 (---------------) + I school - 0x0036a28c, // n0x1999 c0x0000 (---------------) + I xn--mori-qsa - 0x000fe108, // n0x199a c0x0000 (---------------) + blogspot - 0x0020ce42, // n0x199b c0x0000 (---------------) + I co - 0x00233243, // n0x199c c0x0000 (---------------) + I com - 0x00239103, // n0x199d c0x0000 (---------------) + I edu - 0x0027d903, // n0x199e c0x0000 (---------------) + I gov - 0x00213443, // n0x199f c0x0000 (---------------) + I med - 0x002d1086, // n0x19a0 c0x0000 (---------------) + I museum - 0x00223b43, // n0x19a1 c0x0000 (---------------) + I net - 0x00228743, // n0x19a2 c0x0000 (---------------) + I org - 0x00224b03, // n0x19a3 c0x0000 (---------------) + I pro - 0x000030c2, // n0x19a4 c0x0000 (---------------) + ae - 0x0002f087, // n0x19a5 c0x0000 (---------------) + blogdns - 0x000d4bc8, // n0x19a6 c0x0000 (---------------) + blogsite - 0x0000f9ce, // n0x19a7 c0x0000 (---------------) + bmoattachments - 0x00089f92, // n0x19a8 c0x0000 (---------------) + boldlygoingnowhere - 0x45262e05, // n0x19a9 c0x0114 (n0x19e1-n0x19e3) o I cdn77 - 0x4571d6cc, // n0x19aa c0x0115 (n0x19e3-n0x19e4) o I cdn77-secure - 0x00142e48, // n0x19ab c0x0000 (---------------) + dnsalias - 0x0007cf07, // n0x19ac c0x0000 (---------------) + dnsdojo - 0x0001580b, // n0x19ad c0x0000 (---------------) + doesntexist - 0x0016a789, // n0x19ae c0x0000 (---------------) + dontexist - 0x00142d47, // n0x19af c0x0000 (---------------) + doomdns - 0x0013c907, // n0x19b0 c0x0000 (---------------) + dsmynas - 0x0007ce07, // n0x19b1 c0x0000 (---------------) + duckdns - 0x0000de46, // n0x19b2 c0x0000 (---------------) + dvrdns - 0x00197108, // n0x19b3 c0x0000 (---------------) + dynalias - 0x45c13206, // n0x19b4 c0x0117 (n0x19e5-n0x19e7) + dyndns - 0x000aa38d, // n0x19b5 c0x0000 (---------------) + endofinternet - 0x0006b410, // n0x19b6 c0x0000 (---------------) + endoftheinternet - 0x46005382, // n0x19b7 c0x0118 (n0x19e7-n0x1a1e) + eu - 0x00007d48, // n0x19b8 c0x0000 (---------------) + familyds - 0x00069347, // n0x19b9 c0x0000 (---------------) + from-me - 0x00098349, // n0x19ba c0x0000 (---------------) + game-host - 0x00058186, // n0x19bb c0x0000 (---------------) + gotdns - 0x0000c482, // n0x19bc c0x0000 (---------------) + hk - 0x0014a8ca, // n0x19bd c0x0000 (---------------) + hobby-site - 0x000133c7, // n0x19be c0x0000 (---------------) + homedns - 0x00120a07, // n0x19bf c0x0000 (---------------) + homeftp - 0x000a6dc9, // n0x19c0 c0x0000 (---------------) + homelinux - 0x000a8148, // n0x19c1 c0x0000 (---------------) + homeunix - 0x000e060e, // n0x19c2 c0x0000 (---------------) + is-a-bruinsfan - 0x0000d78e, // n0x19c3 c0x0000 (---------------) + is-a-candidate - 0x0001058f, // n0x19c4 c0x0000 (---------------) + is-a-celticsfan - 0x000110c9, // n0x19c5 c0x0000 (---------------) + is-a-chef - 0x0004ef49, // n0x19c6 c0x0000 (---------------) + is-a-geek - 0x00065e4b, // n0x19c7 c0x0000 (---------------) + is-a-knight - 0x0007df8f, // n0x19c8 c0x0000 (---------------) + is-a-linux-user - 0x0008cbcc, // n0x19c9 c0x0000 (---------------) + is-a-patsfan - 0x000aae0b, // n0x19ca c0x0000 (---------------) + is-a-soxfan - 0x000b9ec8, // n0x19cb c0x0000 (---------------) + is-found - 0x000cf047, // n0x19cc c0x0000 (---------------) + is-lost - 0x0015c0c8, // n0x19cd c0x0000 (---------------) + is-saved - 0x000ea8cb, // n0x19ce c0x0000 (---------------) + is-very-bad - 0x000ed70c, // n0x19cf c0x0000 (---------------) + is-very-evil - 0x000f7c0c, // n0x19d0 c0x0000 (---------------) + is-very-good - 0x0011cc8c, // n0x19d1 c0x0000 (---------------) + is-very-nice - 0x0013aacd, // n0x19d2 c0x0000 (---------------) + is-very-sweet - 0x000862c8, // n0x19d3 c0x0000 (---------------) + isa-geek - 0x0014ee89, // n0x19d4 c0x0000 (---------------) + kicks-ass - 0x001a0e0b, // n0x19d5 c0x0000 (---------------) + misconfused - 0x000dd687, // n0x19d6 c0x0000 (---------------) + podzone - 0x000d4a4a, // n0x19d7 c0x0000 (---------------) + readmyblog - 0x0005abc6, // n0x19d8 c0x0000 (---------------) + selfip - 0x00099d8d, // n0x19d9 c0x0000 (---------------) + sellsyourhome - 0x000ccd88, // n0x19da c0x0000 (---------------) + servebbs - 0x0008bc88, // n0x19db c0x0000 (---------------) + serveftp - 0x00172349, // n0x19dc c0x0000 (---------------) + servegame - 0x000e8a0c, // n0x19dd c0x0000 (---------------) + stuff-4-sale - 0x00002242, // n0x19de c0x0000 (---------------) + us - 0x001267c6, // n0x19df c0x0000 (---------------) + webhop - 0x00000182, // n0x19e0 c0x0000 (---------------) + za - 0x00000141, // n0x19e1 c0x0000 (---------------) + c - 0x00041203, // n0x19e2 c0x0000 (---------------) + rsc - 0x45b81386, // n0x19e3 c0x0116 (n0x19e4-n0x19e5) o I origin - 0x00062f83, // n0x19e4 c0x0000 (---------------) + ssl - 0x00010a42, // n0x19e5 c0x0000 (---------------) + go - 0x000133c4, // n0x19e6 c0x0000 (---------------) + home - 0x00000d02, // n0x19e7 c0x0000 (---------------) + al - 0x000d5e84, // n0x19e8 c0x0000 (---------------) + asso - 0x00000482, // n0x19e9 c0x0000 (---------------) + at - 0x00005782, // n0x19ea c0x0000 (---------------) + au - 0x00002e02, // n0x19eb c0x0000 (---------------) + be - 0x00104a82, // n0x19ec c0x0000 (---------------) + bg - 0x00000e42, // n0x19ed c0x0000 (---------------) + ca - 0x00062e02, // n0x19ee c0x0000 (---------------) + cd - 0x00000382, // n0x19ef c0x0000 (---------------) + ch - 0x0001dac2, // n0x19f0 c0x0000 (---------------) + cn - 0x0003cd42, // n0x19f1 c0x0000 (---------------) + cy - 0x00000142, // n0x19f2 c0x0000 (---------------) + cz - 0x00005582, // n0x19f3 c0x0000 (---------------) + de - 0x000489c2, // n0x19f4 c0x0000 (---------------) + dk - 0x00039103, // n0x19f5 c0x0000 (---------------) + edu - 0x0000cf42, // n0x19f6 c0x0000 (---------------) + ee - 0x00000082, // n0x19f7 c0x0000 (---------------) + es - 0x00001702, // n0x19f8 c0x0000 (---------------) + fi - 0x00040202, // n0x19f9 c0x0000 (---------------) + fr - 0x00008a82, // n0x19fa c0x0000 (---------------) + gr - 0x0000f742, // n0x19fb c0x0000 (---------------) + hr - 0x00024202, // n0x19fc c0x0000 (---------------) + hu - 0x00000042, // n0x19fd c0x0000 (---------------) + ie - 0x000027c2, // n0x19fe c0x0000 (---------------) + il - 0x000012c2, // n0x19ff c0x0000 (---------------) + in - 0x00001503, // n0x1a00 c0x0000 (---------------) + int - 0x00004e82, // n0x1a01 c0x0000 (---------------) + is - 0x000017c2, // n0x1a02 c0x0000 (---------------) + it - 0x000b00c2, // n0x1a03 c0x0000 (---------------) + jp - 0x0000bdc2, // n0x1a04 c0x0000 (---------------) + kr - 0x00008bc2, // n0x1a05 c0x0000 (---------------) + lt - 0x00003842, // n0x1a06 c0x0000 (---------------) + lu - 0x00006582, // n0x1a07 c0x0000 (---------------) + lv - 0x0001a3c2, // n0x1a08 c0x0000 (---------------) + mc - 0x00000982, // n0x1a09 c0x0000 (---------------) + me - 0x00167282, // n0x1a0a c0x0000 (---------------) + mk - 0x00005402, // n0x1a0b c0x0000 (---------------) + mt - 0x00025742, // n0x1a0c c0x0000 (---------------) + my - 0x00023b43, // n0x1a0d c0x0000 (---------------) + net - 0x000026c2, // n0x1a0e c0x0000 (---------------) + ng - 0x00046d02, // n0x1a0f c0x0000 (---------------) + nl - 0x00001382, // n0x1a10 c0x0000 (---------------) + no - 0x00008282, // n0x1a11 c0x0000 (---------------) + nz - 0x00077705, // n0x1a12 c0x0000 (---------------) + paris - 0x00009442, // n0x1a13 c0x0000 (---------------) + pl - 0x000d3242, // n0x1a14 c0x0000 (---------------) + pt - 0x00042ec3, // n0x1a15 c0x0000 (---------------) + q-a - 0x000020c2, // n0x1a16 c0x0000 (---------------) + ro - 0x0000fe82, // n0x1a17 c0x0000 (---------------) + ru - 0x00004ec2, // n0x1a18 c0x0000 (---------------) + se - 0x000091c2, // n0x1a19 c0x0000 (---------------) + si - 0x00008502, // n0x1a1a c0x0000 (---------------) + sk - 0x00003902, // n0x1a1b c0x0000 (---------------) + tr - 0x00001b02, // n0x1a1c c0x0000 (---------------) + uk - 0x00002242, // n0x1a1d c0x0000 (---------------) + us - 0x00216f03, // n0x1a1e c0x0000 (---------------) + I abo - 0x00200342, // n0x1a1f c0x0000 (---------------) + I ac - 0x00233243, // n0x1a20 c0x0000 (---------------) + I com - 0x00239103, // n0x1a21 c0x0000 (---------------) + I edu - 0x00212b03, // n0x1a22 c0x0000 (---------------) + I gob - 0x0020f543, // n0x1a23 c0x0000 (---------------) + I ing - 0x00213443, // n0x1a24 c0x0000 (---------------) + I med - 0x00223b43, // n0x1a25 c0x0000 (---------------) + I net - 0x00201383, // n0x1a26 c0x0000 (---------------) + I nom - 0x00228743, // n0x1a27 c0x0000 (---------------) + I org - 0x002933c3, // n0x1a28 c0x0000 (---------------) + I sld - 0x000fe108, // n0x1a29 c0x0000 (---------------) + blogspot - 0x00233243, // n0x1a2a c0x0000 (---------------) + I com - 0x00239103, // n0x1a2b c0x0000 (---------------) + I edu - 0x00212b03, // n0x1a2c c0x0000 (---------------) + I gob - 0x00207dc3, // n0x1a2d c0x0000 (---------------) + I mil - 0x00223b43, // n0x1a2e c0x0000 (---------------) + I net - 0x00201383, // n0x1a2f c0x0000 (---------------) + I nom - 0x00228743, // n0x1a30 c0x0000 (---------------) + I org - 0x00233243, // n0x1a31 c0x0000 (---------------) + I com - 0x00239103, // n0x1a32 c0x0000 (---------------) + I edu - 0x00228743, // n0x1a33 c0x0000 (---------------) + I org - 0x00233243, // n0x1a34 c0x0000 (---------------) + I com - 0x00239103, // n0x1a35 c0x0000 (---------------) + I edu - 0x0027d903, // n0x1a36 c0x0000 (---------------) + I gov - 0x00200041, // n0x1a37 c0x0000 (---------------) + I i - 0x00207dc3, // n0x1a38 c0x0000 (---------------) + I mil - 0x00223b43, // n0x1a39 c0x0000 (---------------) + I net - 0x0023b383, // n0x1a3a c0x0000 (---------------) + I ngo - 0x00228743, // n0x1a3b c0x0000 (---------------) + I org - 0x00331a83, // n0x1a3c c0x0000 (---------------) + I biz - 0x00233243, // n0x1a3d c0x0000 (---------------) + I com - 0x00239103, // n0x1a3e c0x0000 (---------------) + I edu - 0x00207d43, // n0x1a3f c0x0000 (---------------) + I fam - 0x00212b03, // n0x1a40 c0x0000 (---------------) + I gob - 0x0023c083, // n0x1a41 c0x0000 (---------------) + I gok - 0x0024ed83, // n0x1a42 c0x0000 (---------------) + I gon - 0x002a4d03, // n0x1a43 c0x0000 (---------------) + I gop - 0x0024e183, // n0x1a44 c0x0000 (---------------) + I gos - 0x0027d903, // n0x1a45 c0x0000 (---------------) + I gov - 0x00201844, // n0x1a46 c0x0000 (---------------) + I info - 0x00223b43, // n0x1a47 c0x0000 (---------------) + I net - 0x00228743, // n0x1a48 c0x0000 (---------------) + I org - 0x0021e243, // n0x1a49 c0x0000 (---------------) + I web - 0x002f4bc4, // n0x1a4a c0x0000 (---------------) + I agro - 0x0022e4c3, // n0x1a4b c0x0000 (---------------) + I aid - 0x00001d43, // n0x1a4c c0x0000 (---------------) + art - 0x00200c43, // n0x1a4d c0x0000 (---------------) + I atm - 0x00255b88, // n0x1a4e c0x0000 (---------------) + I augustow - 0x00229f44, // n0x1a4f c0x0000 (---------------) + I auto - 0x002278ca, // n0x1a50 c0x0000 (---------------) + I babia-gora - 0x002067c6, // n0x1a51 c0x0000 (---------------) + I bedzin - 0x00396fc7, // n0x1a52 c0x0000 (---------------) + I beskidy - 0x0022248a, // n0x1a53 c0x0000 (---------------) + I bialowieza - 0x00275b09, // n0x1a54 c0x0000 (---------------) + I bialystok - 0x003a2f47, // n0x1a55 c0x0000 (---------------) + I bielawa - 0x0020000a, // n0x1a56 c0x0000 (---------------) + I bieszczady - 0x00331a83, // n0x1a57 c0x0000 (---------------) + I biz - 0x0037714b, // n0x1a58 c0x0000 (---------------) + I boleslawiec - 0x00302649, // n0x1a59 c0x0000 (---------------) + I bydgoszcz - 0x00219bc5, // n0x1a5a c0x0000 (---------------) + I bytom - 0x002cd6c7, // n0x1a5b c0x0000 (---------------) + I cieszyn - 0x0000ce42, // n0x1a5c c0x0000 (---------------) + co - 0x00233243, // n0x1a5d c0x0000 (---------------) + I com - 0x00355907, // n0x1a5e c0x0000 (---------------) + I czeladz - 0x00357445, // n0x1a5f c0x0000 (---------------) + I czest - 0x002bdbc9, // n0x1a60 c0x0000 (---------------) + I dlugoleka - 0x00239103, // n0x1a61 c0x0000 (---------------) + I edu - 0x00226486, // n0x1a62 c0x0000 (---------------) + I elblag - 0x002bca03, // n0x1a63 c0x0000 (---------------) + I elk - 0x000c62c3, // n0x1a64 c0x0000 (---------------) + gda - 0x000faa86, // n0x1a65 c0x0000 (---------------) + gdansk - 0x00129846, // n0x1a66 c0x0000 (---------------) + gdynia - 0x00005b87, // n0x1a67 c0x0000 (---------------) + gliwice - 0x002114c6, // n0x1a68 c0x0000 (---------------) + I glogow - 0x00215f85, // n0x1a69 c0x0000 (---------------) + I gmina - 0x00382c07, // n0x1a6a c0x0000 (---------------) + I gniezno - 0x00334fc7, // n0x1a6b c0x0000 (---------------) + I gorlice - 0x47e7d903, // n0x1a6c c0x011f (n0x1aef-n0x1b1e) + I gov - 0x0032c447, // n0x1a6d c0x0000 (---------------) + I grajewo - 0x0035d983, // n0x1a6e c0x0000 (---------------) + I gsm - 0x00309a05, // n0x1a6f c0x0000 (---------------) + I ilawa - 0x00201844, // n0x1a70 c0x0000 (---------------) + I info - 0x003a5288, // n0x1a71 c0x0000 (---------------) + I jaworzno - 0x002af58c, // n0x1a72 c0x0000 (---------------) + I jelenia-gora - 0x002adc45, // n0x1a73 c0x0000 (---------------) + I jgora - 0x0031fb46, // n0x1a74 c0x0000 (---------------) + I kalisz - 0x003557c7, // n0x1a75 c0x0000 (---------------) + I karpacz - 0x0038d447, // n0x1a76 c0x0000 (---------------) + I kartuzy - 0x0020d0c7, // n0x1a77 c0x0000 (---------------) + I kaszuby - 0x00211dc8, // n0x1a78 c0x0000 (---------------) + I katowice - 0x00264f8f, // n0x1a79 c0x0000 (---------------) + I kazimierz-dolny - 0x00248845, // n0x1a7a c0x0000 (---------------) + I kepno - 0x00244c07, // n0x1a7b c0x0000 (---------------) + I ketrzyn - 0x0039d947, // n0x1a7c c0x0000 (---------------) + I klodzko - 0x002a63ca, // n0x1a7d c0x0000 (---------------) + I kobierzyce - 0x0033c289, // n0x1a7e c0x0000 (---------------) + I kolobrzeg - 0x002ca585, // n0x1a7f c0x0000 (---------------) + I konin - 0x002caf0a, // n0x1a80 c0x0000 (---------------) + I konskowola - 0x00126686, // n0x1a81 c0x0000 (---------------) + krakow - 0x002bca85, // n0x1a82 c0x0000 (---------------) + I kutno - 0x00369344, // n0x1a83 c0x0000 (---------------) + I lapy - 0x00269d86, // n0x1a84 c0x0000 (---------------) + I lebork - 0x0023eb07, // n0x1a85 c0x0000 (---------------) + I legnica - 0x00233647, // n0x1a86 c0x0000 (---------------) + I lezajsk - 0x0022b388, // n0x1a87 c0x0000 (---------------) + I limanowa - 0x00214d05, // n0x1a88 c0x0000 (---------------) + I lomza - 0x00357346, // n0x1a89 c0x0000 (---------------) + I lowicz - 0x0038dec5, // n0x1a8a c0x0000 (---------------) + I lubin - 0x0036e405, // n0x1a8b c0x0000 (---------------) + I lukow - 0x00218c84, // n0x1a8c c0x0000 (---------------) + I mail - 0x002eacc7, // n0x1a8d c0x0000 (---------------) + I malbork - 0x00307d0a, // n0x1a8e c0x0000 (---------------) + I malopolska - 0x0020a788, // n0x1a8f c0x0000 (---------------) + I mazowsze - 0x002ea246, // n0x1a90 c0x0000 (---------------) + I mazury - 0x00013443, // n0x1a91 c0x0000 (---------------) + med - 0x00303545, // n0x1a92 c0x0000 (---------------) + I media - 0x00231c86, // n0x1a93 c0x0000 (---------------) + I miasta - 0x003a5a46, // n0x1a94 c0x0000 (---------------) + I mielec - 0x0026c8c6, // n0x1a95 c0x0000 (---------------) + I mielno - 0x00207dc3, // n0x1a96 c0x0000 (---------------) + I mil - 0x0037ed47, // n0x1a97 c0x0000 (---------------) + I mragowo - 0x0039d8c5, // n0x1a98 c0x0000 (---------------) + I naklo - 0x00223b43, // n0x1a99 c0x0000 (---------------) + I net - 0x003a31cd, // n0x1a9a c0x0000 (---------------) + I nieruchomosci - 0x00201383, // n0x1a9b c0x0000 (---------------) + I nom - 0x0022b488, // n0x1a9c c0x0000 (---------------) + I nowaruda - 0x0039f444, // n0x1a9d c0x0000 (---------------) + I nysa - 0x00276b05, // n0x1a9e c0x0000 (---------------) + I olawa - 0x002a62c6, // n0x1a9f c0x0000 (---------------) + I olecko - 0x0023b746, // n0x1aa0 c0x0000 (---------------) + I olkusz - 0x0021f607, // n0x1aa1 c0x0000 (---------------) + I olsztyn - 0x0023c3c7, // n0x1aa2 c0x0000 (---------------) + I opoczno - 0x0024d4c5, // n0x1aa3 c0x0000 (---------------) + I opole - 0x00228743, // n0x1aa4 c0x0000 (---------------) + I org - 0x0036c1c7, // n0x1aa5 c0x0000 (---------------) + I ostroda - 0x002c9009, // n0x1aa6 c0x0000 (---------------) + I ostroleka - 0x0020c949, // n0x1aa7 c0x0000 (---------------) + I ostrowiec - 0x0020e80a, // n0x1aa8 c0x0000 (---------------) + I ostrowwlkp - 0x00247682, // n0x1aa9 c0x0000 (---------------) + I pc - 0x003099c4, // n0x1aaa c0x0000 (---------------) + I pila - 0x002d7704, // n0x1aab c0x0000 (---------------) + I pisz - 0x00213bc7, // n0x1aac c0x0000 (---------------) + I podhale - 0x0023b948, // n0x1aad c0x0000 (---------------) + I podlasie - 0x002de109, // n0x1aae c0x0000 (---------------) + I polkowice - 0x002083c9, // n0x1aaf c0x0000 (---------------) + I pomorskie - 0x002decc7, // n0x1ab0 c0x0000 (---------------) + I pomorze - 0x00248346, // n0x1ab1 c0x0000 (---------------) + I powiat - 0x000e0386, // n0x1ab2 c0x0000 (---------------) + poznan - 0x002e17c4, // n0x1ab3 c0x0000 (---------------) + I priv - 0x002e194a, // n0x1ab4 c0x0000 (---------------) + I prochowice - 0x002e52c8, // n0x1ab5 c0x0000 (---------------) + I pruszkow - 0x002e5f49, // n0x1ab6 c0x0000 (---------------) + I przeworsk - 0x00297506, // n0x1ab7 c0x0000 (---------------) + I pulawy - 0x00301145, // n0x1ab8 c0x0000 (---------------) + I radom - 0x0020a648, // n0x1ab9 c0x0000 (---------------) + I rawa-maz - 0x002c410a, // n0x1aba c0x0000 (---------------) + I realestate - 0x00286903, // n0x1abb c0x0000 (---------------) + I rel - 0x0034f406, // n0x1abc c0x0000 (---------------) + I rybnik - 0x002dedc7, // n0x1abd c0x0000 (---------------) + I rzeszow - 0x0020b905, // n0x1abe c0x0000 (---------------) + I sanok - 0x00225d45, // n0x1abf c0x0000 (---------------) + I sejny - 0x00246b03, // n0x1ac0 c0x0000 (---------------) + I sex - 0x00357184, // n0x1ac1 c0x0000 (---------------) + I shop - 0x00373445, // n0x1ac2 c0x0000 (---------------) + I sklep - 0x00284a47, // n0x1ac3 c0x0000 (---------------) + I skoczow - 0x002e2605, // n0x1ac4 c0x0000 (---------------) + I slask - 0x002d1f86, // n0x1ac5 c0x0000 (---------------) + I slupsk - 0x000ee485, // n0x1ac6 c0x0000 (---------------) + sopot - 0x0021d283, // n0x1ac7 c0x0000 (---------------) + I sos - 0x002b7589, // n0x1ac8 c0x0000 (---------------) + I sosnowiec - 0x002768cc, // n0x1ac9 c0x0000 (---------------) + I stalowa-wola - 0x002a294c, // n0x1aca c0x0000 (---------------) + I starachowice - 0x002caac8, // n0x1acb c0x0000 (---------------) + I stargard - 0x00265cc7, // n0x1acc c0x0000 (---------------) + I suwalki - 0x002ebf88, // n0x1acd c0x0000 (---------------) + I swidnica - 0x002ec58a, // n0x1ace c0x0000 (---------------) + I swiebodzin - 0x002ecf0b, // n0x1acf c0x0000 (---------------) + I swinoujscie - 0x00302788, // n0x1ad0 c0x0000 (---------------) + I szczecin - 0x0031fc48, // n0x1ad1 c0x0000 (---------------) + I szczytno - 0x00294c06, // n0x1ad2 c0x0000 (---------------) + I szkola - 0x00249ec5, // n0x1ad3 c0x0000 (---------------) + I targi - 0x0022afca, // n0x1ad4 c0x0000 (---------------) + I tarnobrzeg - 0x002247c5, // n0x1ad5 c0x0000 (---------------) + I tgory - 0x00200c82, // n0x1ad6 c0x0000 (---------------) + I tm - 0x002c1b87, // n0x1ad7 c0x0000 (---------------) + I tourism - 0x002a3206, // n0x1ad8 c0x0000 (---------------) + I travel - 0x0034fe45, // n0x1ad9 c0x0000 (---------------) + I turek - 0x002ee009, // n0x1ada c0x0000 (---------------) + I turystyka - 0x003091c5, // n0x1adb c0x0000 (---------------) + I tychy - 0x00287385, // n0x1adc c0x0000 (---------------) + I ustka - 0x003095c9, // n0x1add c0x0000 (---------------) + I walbrzych - 0x00231b06, // n0x1ade c0x0000 (---------------) + I warmia - 0x0023f588, // n0x1adf c0x0000 (---------------) + I warszawa - 0x0025a5c3, // n0x1ae0 c0x0000 (---------------) + I waw - 0x00211606, // n0x1ae1 c0x0000 (---------------) + I wegrow - 0x00275046, // n0x1ae2 c0x0000 (---------------) + I wielun - 0x002fe645, // n0x1ae3 c0x0000 (---------------) + I wlocl - 0x002fe649, // n0x1ae4 c0x0000 (---------------) + I wloclawek - 0x002b2bc9, // n0x1ae5 c0x0000 (---------------) + I wodzislaw - 0x00253507, // n0x1ae6 c0x0000 (---------------) + I wolomin - 0x000fe4c4, // n0x1ae7 c0x0000 (---------------) + wroc - 0x002fe4c7, // n0x1ae8 c0x0000 (---------------) + I wroclaw - 0x002082c9, // n0x1ae9 c0x0000 (---------------) + I zachpomor - 0x00222685, // n0x1aea c0x0000 (---------------) + I zagan - 0x00138388, // n0x1aeb c0x0000 (---------------) + zakopane - 0x00353e05, // n0x1aec c0x0000 (---------------) + I zarow - 0x00223c05, // n0x1aed c0x0000 (---------------) + I zgora - 0x0022f4c9, // n0x1aee c0x0000 (---------------) + I zgorzelec - 0x00208082, // n0x1aef c0x0000 (---------------) + I ap - 0x0022e2c4, // n0x1af0 c0x0000 (---------------) + I griw - 0x00205c82, // n0x1af1 c0x0000 (---------------) + I ic - 0x00204e82, // n0x1af2 c0x0000 (---------------) + I is - 0x00269b45, // n0x1af3 c0x0000 (---------------) + I kmpsp - 0x002ce008, // n0x1af4 c0x0000 (---------------) + I konsulat - 0x0036ff85, // n0x1af5 c0x0000 (---------------) + I kppsp - 0x002bf3c3, // n0x1af6 c0x0000 (---------------) + I kwp - 0x002bf3c5, // n0x1af7 c0x0000 (---------------) + I kwpsp - 0x002cda83, // n0x1af8 c0x0000 (---------------) + I mup - 0x00212002, // n0x1af9 c0x0000 (---------------) + I mw - 0x00267184, // n0x1afa c0x0000 (---------------) + I oirm - 0x002e7a03, // n0x1afb c0x0000 (---------------) + I oum - 0x0020aac2, // n0x1afc c0x0000 (---------------) + I pa - 0x002e3144, // n0x1afd c0x0000 (---------------) + I pinb - 0x002d7d03, // n0x1afe c0x0000 (---------------) + I piw - 0x00206ec2, // n0x1aff c0x0000 (---------------) + I po - 0x00208103, // n0x1b00 c0x0000 (---------------) + I psp - 0x0028e544, // n0x1b01 c0x0000 (---------------) + I psse - 0x002b68c3, // n0x1b02 c0x0000 (---------------) + I pup - 0x00234584, // n0x1b03 c0x0000 (---------------) + I rzgw - 0x00201002, // n0x1b04 c0x0000 (---------------) + I sa - 0x00272743, // n0x1b05 c0x0000 (---------------) + I sdn - 0x002176c3, // n0x1b06 c0x0000 (---------------) + I sko - 0x00205f02, // n0x1b07 c0x0000 (---------------) + I so - 0x00332d82, // n0x1b08 c0x0000 (---------------) + I sr - 0x002b2a09, // n0x1b09 c0x0000 (---------------) + I starostwo - 0x00204682, // n0x1b0a c0x0000 (---------------) + I ug - 0x0028ab04, // n0x1b0b c0x0000 (---------------) + I ugim - 0x002053c2, // n0x1b0c c0x0000 (---------------) + I um - 0x0020a4c4, // n0x1b0d c0x0000 (---------------) + I umig - 0x00248304, // n0x1b0e c0x0000 (---------------) + I upow - 0x002e4644, // n0x1b0f c0x0000 (---------------) + I uppo - 0x00202242, // n0x1b10 c0x0000 (---------------) + I us - 0x002431c2, // n0x1b11 c0x0000 (---------------) + I uw - 0x0020fec3, // n0x1b12 c0x0000 (---------------) + I uzs - 0x002ecb83, // n0x1b13 c0x0000 (---------------) + I wif - 0x00244204, // n0x1b14 c0x0000 (---------------) + I wiih - 0x0025cd84, // n0x1b15 c0x0000 (---------------) + I winb - 0x002c8f84, // n0x1b16 c0x0000 (---------------) + I wios - 0x002cae04, // n0x1b17 c0x0000 (---------------) + I witd - 0x002fdb43, // n0x1b18 c0x0000 (---------------) + I wiw - 0x002f2083, // n0x1b19 c0x0000 (---------------) + I wsa - 0x00326604, // n0x1b1a c0x0000 (---------------) + I wskr - 0x002ffa04, // n0x1b1b c0x0000 (---------------) + I wuoz - 0x002ffd06, // n0x1b1c c0x0000 (---------------) + I wzmiuw - 0x00262bc2, // n0x1b1d c0x0000 (---------------) + I zp - 0x0020ce42, // n0x1b1e c0x0000 (---------------) + I co - 0x00239103, // n0x1b1f c0x0000 (---------------) + I edu - 0x0027d903, // n0x1b20 c0x0000 (---------------) + I gov - 0x00223b43, // n0x1b21 c0x0000 (---------------) + I net - 0x00228743, // n0x1b22 c0x0000 (---------------) + I org - 0x00200342, // n0x1b23 c0x0000 (---------------) + I ac - 0x00331a83, // n0x1b24 c0x0000 (---------------) + I biz - 0x00233243, // n0x1b25 c0x0000 (---------------) + I com - 0x00239103, // n0x1b26 c0x0000 (---------------) + I edu - 0x002025c3, // n0x1b27 c0x0000 (---------------) + I est - 0x0027d903, // n0x1b28 c0x0000 (---------------) + I gov - 0x00201844, // n0x1b29 c0x0000 (---------------) + I info - 0x002b2cc4, // n0x1b2a c0x0000 (---------------) + I isla - 0x00200904, // n0x1b2b c0x0000 (---------------) + I name - 0x00223b43, // n0x1b2c c0x0000 (---------------) + I net - 0x00228743, // n0x1b2d c0x0000 (---------------) + I org - 0x00224b03, // n0x1b2e c0x0000 (---------------) + I pro - 0x002e2004, // n0x1b2f c0x0000 (---------------) + I prof - 0x002b5b43, // n0x1b30 c0x0000 (---------------) + I aca - 0x002049c3, // n0x1b31 c0x0000 (---------------) + I bar - 0x002168c3, // n0x1b32 c0x0000 (---------------) + I cpa - 0x00213083, // n0x1b33 c0x0000 (---------------) + I eng - 0x002b1983, // n0x1b34 c0x0000 (---------------) + I jur - 0x00271c03, // n0x1b35 c0x0000 (---------------) + I law - 0x00213443, // n0x1b36 c0x0000 (---------------) + I med - 0x00233243, // n0x1b37 c0x0000 (---------------) + I com - 0x00239103, // n0x1b38 c0x0000 (---------------) + I edu - 0x0027d903, // n0x1b39 c0x0000 (---------------) + I gov - 0x00223b43, // n0x1b3a c0x0000 (---------------) + I net - 0x00228743, // n0x1b3b c0x0000 (---------------) + I org - 0x002db743, // n0x1b3c c0x0000 (---------------) + I plo - 0x002363c3, // n0x1b3d c0x0000 (---------------) + I sec - 0x000fe108, // n0x1b3e c0x0000 (---------------) + blogspot - 0x00233243, // n0x1b3f c0x0000 (---------------) + I com - 0x00239103, // n0x1b40 c0x0000 (---------------) + I edu - 0x0027d903, // n0x1b41 c0x0000 (---------------) + I gov - 0x00201503, // n0x1b42 c0x0000 (---------------) + I int - 0x00223b43, // n0x1b43 c0x0000 (---------------) + I net - 0x00242144, // n0x1b44 c0x0000 (---------------) + I nome - 0x00228743, // n0x1b45 c0x0000 (---------------) + I org - 0x002a0ec4, // n0x1b46 c0x0000 (---------------) + I publ - 0x002b8e45, // n0x1b47 c0x0000 (---------------) + I belau - 0x0020ce42, // n0x1b48 c0x0000 (---------------) + I co - 0x002024c2, // n0x1b49 c0x0000 (---------------) + I ed - 0x00210a42, // n0x1b4a c0x0000 (---------------) + I go - 0x00202ac2, // n0x1b4b c0x0000 (---------------) + I ne - 0x00200dc2, // n0x1b4c c0x0000 (---------------) + I or - 0x00233243, // n0x1b4d c0x0000 (---------------) + I com - 0x0023c344, // n0x1b4e c0x0000 (---------------) + I coop - 0x00239103, // n0x1b4f c0x0000 (---------------) + I edu - 0x0027d903, // n0x1b50 c0x0000 (---------------) + I gov - 0x00207dc3, // n0x1b51 c0x0000 (---------------) + I mil - 0x00223b43, // n0x1b52 c0x0000 (---------------) + I net - 0x00228743, // n0x1b53 c0x0000 (---------------) + I org - 0x000fe108, // n0x1b54 c0x0000 (---------------) + blogspot - 0x00233243, // n0x1b55 c0x0000 (---------------) + I com - 0x00239103, // n0x1b56 c0x0000 (---------------) + I edu - 0x0027d903, // n0x1b57 c0x0000 (---------------) + I gov - 0x00207dc3, // n0x1b58 c0x0000 (---------------) + I mil - 0x00200904, // n0x1b59 c0x0000 (---------------) + I name - 0x00223b43, // n0x1b5a c0x0000 (---------------) + I net - 0x00228743, // n0x1b5b c0x0000 (---------------) + I org - 0x00217283, // n0x1b5c c0x0000 (---------------) + I sch - 0x002d5e84, // n0x1b5d c0x0000 (---------------) + I asso - 0x000fe108, // n0x1b5e c0x0000 (---------------) + blogspot - 0x00233243, // n0x1b5f c0x0000 (---------------) + I com - 0x00201383, // n0x1b60 c0x0000 (---------------) + I nom - 0x0024b944, // n0x1b61 c0x0000 (---------------) + I arts - 0x000fe108, // n0x1b62 c0x0000 (---------------) + blogspot - 0x00233243, // n0x1b63 c0x0000 (---------------) + I com - 0x0024dcc4, // n0x1b64 c0x0000 (---------------) + I firm - 0x00201844, // n0x1b65 c0x0000 (---------------) + I info - 0x00201383, // n0x1b66 c0x0000 (---------------) + I nom - 0x00201542, // n0x1b67 c0x0000 (---------------) + I nt - 0x00228743, // n0x1b68 c0x0000 (---------------) + I org - 0x0022c2c3, // n0x1b69 c0x0000 (---------------) + I rec - 0x00364cc5, // n0x1b6a c0x0000 (---------------) + I store - 0x00200c82, // n0x1b6b c0x0000 (---------------) + I tm - 0x002ffb43, // n0x1b6c c0x0000 (---------------) + I www - 0x00200342, // n0x1b6d c0x0000 (---------------) + I ac - 0x000fe108, // n0x1b6e c0x0000 (---------------) + blogspot - 0x0020ce42, // n0x1b6f c0x0000 (---------------) + I co - 0x00239103, // n0x1b70 c0x0000 (---------------) + I edu - 0x0027d903, // n0x1b71 c0x0000 (---------------) + I gov - 0x002012c2, // n0x1b72 c0x0000 (---------------) + I in - 0x00228743, // n0x1b73 c0x0000 (---------------) + I org - 0x00200342, // n0x1b74 c0x0000 (---------------) + I ac - 0x002001c7, // n0x1b75 c0x0000 (---------------) + I adygeya - 0x0028c445, // n0x1b76 c0x0000 (---------------) + I altai - 0x00295084, // n0x1b77 c0x0000 (---------------) + I amur - 0x0037ca86, // n0x1b78 c0x0000 (---------------) + I amursk - 0x00236d0b, // n0x1b79 c0x0000 (---------------) + I arkhangelsk - 0x0025a349, // n0x1b7a c0x0000 (---------------) + I astrakhan - 0x0031fa86, // n0x1b7b c0x0000 (---------------) + I baikal - 0x00324289, // n0x1b7c c0x0000 (---------------) + I bashkiria - 0x002d4488, // n0x1b7d c0x0000 (---------------) + I belgorod - 0x00204ac3, // n0x1b7e c0x0000 (---------------) + I bir - 0x000fe108, // n0x1b7f c0x0000 (---------------) + blogspot - 0x00229507, // n0x1b80 c0x0000 (---------------) + I bryansk - 0x0034ac88, // n0x1b81 c0x0000 (---------------) + I buryatia - 0x00329bc3, // n0x1b82 c0x0000 (---------------) + I cbg - 0x00264784, // n0x1b83 c0x0000 (---------------) + I chel - 0x0026dd8b, // n0x1b84 c0x0000 (---------------) + I chelyabinsk - 0x002b0305, // n0x1b85 c0x0000 (---------------) + I chita - 0x002be488, // n0x1b86 c0x0000 (---------------) + I chukotka - 0x003364c9, // n0x1b87 c0x0000 (---------------) + I chuvashia - 0x0025cd03, // n0x1b88 c0x0000 (---------------) + I cmw - 0x00233243, // n0x1b89 c0x0000 (---------------) + I com - 0x00202508, // n0x1b8a c0x0000 (---------------) + I dagestan - 0x002eb107, // n0x1b8b c0x0000 (---------------) + I dudinka - 0x00327d86, // n0x1b8c c0x0000 (---------------) + I e-burg - 0x00239103, // n0x1b8d c0x0000 (---------------) + I edu - 0x00384f47, // n0x1b8e c0x0000 (---------------) + I fareast - 0x0027d903, // n0x1b8f c0x0000 (---------------) + I gov - 0x00289106, // n0x1b90 c0x0000 (---------------) + I grozny - 0x00201503, // n0x1b91 c0x0000 (---------------) + I int - 0x00230847, // n0x1b92 c0x0000 (---------------) + I irkutsk - 0x00288007, // n0x1b93 c0x0000 (---------------) + I ivanovo - 0x00385f47, // n0x1b94 c0x0000 (---------------) + I izhevsk - 0x002eac45, // n0x1b95 c0x0000 (---------------) + I jamal - 0x0020b483, // n0x1b96 c0x0000 (---------------) + I jar - 0x0020c3cb, // n0x1b97 c0x0000 (---------------) + I joshkar-ola - 0x00332448, // n0x1b98 c0x0000 (---------------) + I k-uralsk - 0x00225688, // n0x1b99 c0x0000 (---------------) + I kalmykia - 0x00250706, // n0x1b9a c0x0000 (---------------) + I kaluga - 0x0021a349, // n0x1b9b c0x0000 (---------------) + I kamchatka - 0x00325cc7, // n0x1b9c c0x0000 (---------------) + I karelia - 0x002f8c85, // n0x1b9d c0x0000 (---------------) + I kazan - 0x00314984, // n0x1b9e c0x0000 (---------------) + I kchr - 0x00275d48, // n0x1b9f c0x0000 (---------------) + I kemerovo - 0x002338ca, // n0x1ba0 c0x0000 (---------------) + I khabarovsk - 0x00233b09, // n0x1ba1 c0x0000 (---------------) + I khakassia - 0x0024f143, // n0x1ba2 c0x0000 (---------------) + I khv - 0x0027f805, // n0x1ba3 c0x0000 (---------------) + I kirov - 0x00272ac3, // n0x1ba4 c0x0000 (---------------) + I kms - 0x002a6c46, // n0x1ba5 c0x0000 (---------------) + I koenig - 0x0039b084, // n0x1ba6 c0x0000 (---------------) + I komi - 0x002fe848, // n0x1ba7 c0x0000 (---------------) + I kostroma - 0x003860cb, // n0x1ba8 c0x0000 (---------------) + I krasnoyarsk - 0x0033c145, // n0x1ba9 c0x0000 (---------------) + I kuban - 0x002b8bc6, // n0x1baa c0x0000 (---------------) + I kurgan - 0x002ba785, // n0x1bab c0x0000 (---------------) + I kursk - 0x002bb788, // n0x1bac c0x0000 (---------------) + I kustanai - 0x002bcbc7, // n0x1bad c0x0000 (---------------) + I kuzbass - 0x00211c47, // n0x1bae c0x0000 (---------------) + I lipetsk - 0x002274c7, // n0x1baf c0x0000 (---------------) + I magadan - 0x00219cc4, // n0x1bb0 c0x0000 (---------------) + I mari - 0x00222947, // n0x1bb1 c0x0000 (---------------) + I mari-el - 0x0027c146, // n0x1bb2 c0x0000 (---------------) + I marine - 0x00207dc3, // n0x1bb3 c0x0000 (---------------) + I mil - 0x002c6f48, // n0x1bb4 c0x0000 (---------------) + I mordovia - 0x00253203, // n0x1bb5 c0x0000 (---------------) + I msk - 0x002cde48, // n0x1bb6 c0x0000 (---------------) + I murmansk - 0x002d3ac5, // n0x1bb7 c0x0000 (---------------) + I mytis - 0x0030ac08, // n0x1bb8 c0x0000 (---------------) + I nakhodka - 0x00239307, // n0x1bb9 c0x0000 (---------------) + I nalchik - 0x00223b43, // n0x1bba c0x0000 (---------------) + I net - 0x00391fc3, // n0x1bbb c0x0000 (---------------) + I nkz - 0x0028bac4, // n0x1bbc c0x0000 (---------------) + I nnov - 0x00373307, // n0x1bbd c0x0000 (---------------) + I norilsk - 0x00206143, // n0x1bbe c0x0000 (---------------) + I nov - 0x002880cb, // n0x1bbf c0x0000 (---------------) + I novosibirsk - 0x00217683, // n0x1bc0 c0x0000 (---------------) + I nsk - 0x002531c4, // n0x1bc1 c0x0000 (---------------) + I omsk - 0x00364d48, // n0x1bc2 c0x0000 (---------------) + I orenburg - 0x00228743, // n0x1bc3 c0x0000 (---------------) + I org - 0x002d8045, // n0x1bc4 c0x0000 (---------------) + I oryol - 0x00297b05, // n0x1bc5 c0x0000 (---------------) + I oskol - 0x0039d7c6, // n0x1bc6 c0x0000 (---------------) + I palana - 0x00212605, // n0x1bc7 c0x0000 (---------------) + I penza - 0x002d3e84, // n0x1bc8 c0x0000 (---------------) + I perm - 0x002080c2, // n0x1bc9 c0x0000 (---------------) + I pp - 0x002e6203, // n0x1bca c0x0000 (---------------) + I ptz - 0x003693ca, // n0x1bcb c0x0000 (---------------) + I pyatigorsk - 0x0038fd03, // n0x1bcc c0x0000 (---------------) + I rnd - 0x002d2fc9, // n0x1bcd c0x0000 (---------------) + I rubtsovsk - 0x00249886, // n0x1bce c0x0000 (---------------) + I ryazan - 0x0021c788, // n0x1bcf c0x0000 (---------------) + I sakhalin - 0x0028e086, // n0x1bd0 c0x0000 (---------------) + I samara - 0x0021adc7, // n0x1bd1 c0x0000 (---------------) + I saratov - 0x002c9c88, // n0x1bd2 c0x0000 (---------------) + I simbirsk - 0x0034b188, // n0x1bd3 c0x0000 (---------------) + I smolensk - 0x002d6803, // n0x1bd4 c0x0000 (---------------) + I snz - 0x00269c03, // n0x1bd5 c0x0000 (---------------) + I spb - 0x00220649, // n0x1bd6 c0x0000 (---------------) + I stavropol - 0x002490c3, // n0x1bd7 c0x0000 (---------------) + I stv - 0x00341e46, // n0x1bd8 c0x0000 (---------------) + I surgut - 0x0028c0c6, // n0x1bd9 c0x0000 (---------------) + I syzran - 0x00317446, // n0x1bda c0x0000 (---------------) + I tambov - 0x0036bec9, // n0x1bdb c0x0000 (---------------) + I tatarstan - 0x002fdf44, // n0x1bdc c0x0000 (---------------) + I test - 0x0020ae83, // n0x1bdd c0x0000 (---------------) + I tom - 0x00332345, // n0x1bde c0x0000 (---------------) + I tomsk - 0x0030d249, // n0x1bdf c0x0000 (---------------) + I tsaritsyn - 0x00211d43, // n0x1be0 c0x0000 (---------------) + I tsk - 0x0035bb44, // n0x1be1 c0x0000 (---------------) + I tula - 0x002ef044, // n0x1be2 c0x0000 (---------------) + I tuva - 0x00360d84, // n0x1be3 c0x0000 (---------------) + I tver - 0x00321446, // n0x1be4 c0x0000 (---------------) + I tyumen - 0x0020d2c3, // n0x1be5 c0x0000 (---------------) + I udm - 0x0020d2c8, // n0x1be6 c0x0000 (---------------) + I udmurtia - 0x00259f88, // n0x1be7 c0x0000 (---------------) + I ulan-ude - 0x0033b846, // n0x1be8 c0x0000 (---------------) + I vdonsk - 0x002f8a8b, // n0x1be9 c0x0000 (---------------) + I vladikavkaz - 0x002f8dc8, // n0x1bea c0x0000 (---------------) + I vladimir - 0x002f8fcb, // n0x1beb c0x0000 (---------------) + I vladivostok - 0x002fb449, // n0x1bec c0x0000 (---------------) + I volgograd - 0x002fa987, // n0x1bed c0x0000 (---------------) + I vologda - 0x002fb8c8, // n0x1bee c0x0000 (---------------) + I voronezh - 0x002fd583, // n0x1bef c0x0000 (---------------) + I vrn - 0x00398fc6, // n0x1bf0 c0x0000 (---------------) + I vyatka - 0x0020bb07, // n0x1bf1 c0x0000 (---------------) + I yakutia - 0x002991c5, // n0x1bf2 c0x0000 (---------------) + I yamal - 0x00344f89, // n0x1bf3 c0x0000 (---------------) + I yaroslavl - 0x0031310d, // n0x1bf4 c0x0000 (---------------) + I yekaterinburg - 0x0021c5d1, // n0x1bf5 c0x0000 (---------------) + I yuzhno-sakhalinsk - 0x00237d05, // n0x1bf6 c0x0000 (---------------) + I zgrad - 0x00200342, // n0x1bf7 c0x0000 (---------------) + I ac - 0x0020ce42, // n0x1bf8 c0x0000 (---------------) + I co - 0x00233243, // n0x1bf9 c0x0000 (---------------) + I com - 0x00239103, // n0x1bfa c0x0000 (---------------) + I edu - 0x002aebc4, // n0x1bfb c0x0000 (---------------) + I gouv - 0x0027d903, // n0x1bfc c0x0000 (---------------) + I gov - 0x00201503, // n0x1bfd c0x0000 (---------------) + I int - 0x00207dc3, // n0x1bfe c0x0000 (---------------) + I mil - 0x00223b43, // n0x1bff c0x0000 (---------------) + I net - 0x00233243, // n0x1c00 c0x0000 (---------------) + I com - 0x00239103, // n0x1c01 c0x0000 (---------------) + I edu - 0x0027d903, // n0x1c02 c0x0000 (---------------) + I gov - 0x00213443, // n0x1c03 c0x0000 (---------------) + I med - 0x00223b43, // n0x1c04 c0x0000 (---------------) + I net - 0x00228743, // n0x1c05 c0x0000 (---------------) + I org - 0x0028f343, // n0x1c06 c0x0000 (---------------) + I pub - 0x00217283, // n0x1c07 c0x0000 (---------------) + I sch - 0x00233243, // n0x1c08 c0x0000 (---------------) + I com - 0x00239103, // n0x1c09 c0x0000 (---------------) + I edu - 0x0027d903, // n0x1c0a c0x0000 (---------------) + I gov - 0x00223b43, // n0x1c0b c0x0000 (---------------) + I net - 0x00228743, // n0x1c0c c0x0000 (---------------) + I org - 0x00233243, // n0x1c0d c0x0000 (---------------) + I com - 0x00239103, // n0x1c0e c0x0000 (---------------) + I edu - 0x0027d903, // n0x1c0f c0x0000 (---------------) + I gov - 0x00223b43, // n0x1c10 c0x0000 (---------------) + I net - 0x00228743, // n0x1c11 c0x0000 (---------------) + I org - 0x00233243, // n0x1c12 c0x0000 (---------------) + I com - 0x00239103, // n0x1c13 c0x0000 (---------------) + I edu - 0x0027d903, // n0x1c14 c0x0000 (---------------) + I gov - 0x00201844, // n0x1c15 c0x0000 (---------------) + I info - 0x00213443, // n0x1c16 c0x0000 (---------------) + I med - 0x00223b43, // n0x1c17 c0x0000 (---------------) + I net - 0x00228743, // n0x1c18 c0x0000 (---------------) + I org - 0x002203c2, // n0x1c19 c0x0000 (---------------) + I tv - 0x002001c1, // n0x1c1a c0x0000 (---------------) + I a - 0x00200342, // n0x1c1b c0x0000 (---------------) + I ac - 0x00200001, // n0x1c1c c0x0000 (---------------) + I b - 0x00314f02, // n0x1c1d c0x0000 (---------------) + I bd - 0x000fe108, // n0x1c1e c0x0000 (---------------) + blogspot - 0x00220945, // n0x1c1f c0x0000 (---------------) + I brand - 0x00200141, // n0x1c20 c0x0000 (---------------) + I c - 0x00033243, // n0x1c21 c0x0000 (---------------) + com - 0x00200201, // n0x1c22 c0x0000 (---------------) + I d - 0x00200081, // n0x1c23 c0x0000 (---------------) + I e - 0x00201701, // n0x1c24 c0x0000 (---------------) + I f - 0x00233802, // n0x1c25 c0x0000 (---------------) + I fh - 0x00233804, // n0x1c26 c0x0000 (---------------) + I fhsk - 0x00363cc3, // n0x1c27 c0x0000 (---------------) + I fhv - 0x00200281, // n0x1c28 c0x0000 (---------------) + I g - 0x002003c1, // n0x1c29 c0x0000 (---------------) + I h - 0x00200041, // n0x1c2a c0x0000 (---------------) + I i - 0x00201b41, // n0x1c2b c0x0000 (---------------) + I k - 0x002f2d87, // n0x1c2c c0x0000 (---------------) + I komforb - 0x002d704f, // n0x1c2d c0x0000 (---------------) + I kommunalforbund - 0x002bf6c6, // n0x1c2e c0x0000 (---------------) + I komvux - 0x00200d41, // n0x1c2f c0x0000 (---------------) + I l - 0x00268a06, // n0x1c30 c0x0000 (---------------) + I lanbib - 0x00200441, // n0x1c31 c0x0000 (---------------) + I m - 0x00200781, // n0x1c32 c0x0000 (---------------) + I n - 0x0032158e, // n0x1c33 c0x0000 (---------------) + I naturbruksgymn - 0x00200dc1, // n0x1c34 c0x0000 (---------------) + I o - 0x00228743, // n0x1c35 c0x0000 (---------------) + I org - 0x00200581, // n0x1c36 c0x0000 (---------------) + I p - 0x002a4d85, // n0x1c37 c0x0000 (---------------) + I parti - 0x002080c2, // n0x1c38 c0x0000 (---------------) + I pp - 0x00246a05, // n0x1c39 c0x0000 (---------------) + I press - 0x002006c1, // n0x1c3a c0x0000 (---------------) + I r - 0x002000c1, // n0x1c3b c0x0000 (---------------) + I s - 0x002004c1, // n0x1c3c c0x0000 (---------------) + I t - 0x00200c82, // n0x1c3d c0x0000 (---------------) + I tm - 0x00200741, // n0x1c3e c0x0000 (---------------) + I u - 0x00201c41, // n0x1c3f c0x0000 (---------------) + I w - 0x00200a01, // n0x1c40 c0x0000 (---------------) + I x - 0x00200241, // n0x1c41 c0x0000 (---------------) + I y - 0x00200101, // n0x1c42 c0x0000 (---------------) + I z - 0x000fe108, // n0x1c43 c0x0000 (---------------) + blogspot - 0x00233243, // n0x1c44 c0x0000 (---------------) + I com - 0x00239103, // n0x1c45 c0x0000 (---------------) + I edu - 0x0027d903, // n0x1c46 c0x0000 (---------------) + I gov - 0x00223b43, // n0x1c47 c0x0000 (---------------) + I net - 0x00228743, // n0x1c48 c0x0000 (---------------) + I org - 0x0021e783, // n0x1c49 c0x0000 (---------------) + I per - 0x00233243, // n0x1c4a c0x0000 (---------------) + I com - 0x0027d903, // n0x1c4b c0x0000 (---------------) + I gov - 0x0008e988, // n0x1c4c c0x0000 (---------------) + hashbang - 0x00207dc3, // n0x1c4d c0x0000 (---------------) + I mil - 0x00223b43, // n0x1c4e c0x0000 (---------------) + I net - 0x00228743, // n0x1c4f c0x0000 (---------------) + I org - 0x014d8c48, // n0x1c50 c0x0005 (---------------)* o platform - 0x000fe108, // n0x1c51 c0x0000 (---------------) + blogspot - 0x000fe108, // n0x1c52 c0x0000 (---------------) + blogspot - 0x00233243, // n0x1c53 c0x0000 (---------------) + I com - 0x00239103, // n0x1c54 c0x0000 (---------------) + I edu - 0x0027d903, // n0x1c55 c0x0000 (---------------) + I gov - 0x00223b43, // n0x1c56 c0x0000 (---------------) + I net - 0x00228743, // n0x1c57 c0x0000 (---------------) + I org - 0x00201d43, // n0x1c58 c0x0000 (---------------) + I art - 0x000fe108, // n0x1c59 c0x0000 (---------------) + blogspot - 0x00233243, // n0x1c5a c0x0000 (---------------) + I com - 0x00239103, // n0x1c5b c0x0000 (---------------) + I edu - 0x002aebc4, // n0x1c5c c0x0000 (---------------) + I gouv - 0x00228743, // n0x1c5d c0x0000 (---------------) + I org - 0x00299705, // n0x1c5e c0x0000 (---------------) + I perso - 0x00309e44, // n0x1c5f c0x0000 (---------------) + I univ - 0x00233243, // n0x1c60 c0x0000 (---------------) + I com - 0x00223b43, // n0x1c61 c0x0000 (---------------) + I net - 0x00228743, // n0x1c62 c0x0000 (---------------) + I org - 0x0020ce42, // n0x1c63 c0x0000 (---------------) + I co - 0x00233243, // n0x1c64 c0x0000 (---------------) + I com - 0x002378c9, // n0x1c65 c0x0000 (---------------) + I consulado - 0x00239103, // n0x1c66 c0x0000 (---------------) + I edu - 0x0023b489, // n0x1c67 c0x0000 (---------------) + I embaixada - 0x0027d903, // n0x1c68 c0x0000 (---------------) + I gov - 0x00207dc3, // n0x1c69 c0x0000 (---------------) + I mil - 0x00223b43, // n0x1c6a c0x0000 (---------------) + I net - 0x00228743, // n0x1c6b c0x0000 (---------------) + I org - 0x002e15c8, // n0x1c6c c0x0000 (---------------) + I principe - 0x00213547, // n0x1c6d c0x0000 (---------------) + I saotome - 0x00364cc5, // n0x1c6e c0x0000 (---------------) + I store - 0x002001c7, // n0x1c6f c0x0000 (---------------) + I adygeya - 0x00236d0b, // n0x1c70 c0x0000 (---------------) + I arkhangelsk - 0x0020eb88, // n0x1c71 c0x0000 (---------------) + I balashov - 0x00324289, // n0x1c72 c0x0000 (---------------) + I bashkiria - 0x00229507, // n0x1c73 c0x0000 (---------------) + I bryansk - 0x00202508, // n0x1c74 c0x0000 (---------------) + I dagestan - 0x00289106, // n0x1c75 c0x0000 (---------------) + I grozny - 0x00288007, // n0x1c76 c0x0000 (---------------) + I ivanovo - 0x00225688, // n0x1c77 c0x0000 (---------------) + I kalmykia - 0x00250706, // n0x1c78 c0x0000 (---------------) + I kaluga - 0x00325cc7, // n0x1c79 c0x0000 (---------------) + I karelia - 0x00233b09, // n0x1c7a c0x0000 (---------------) + I khakassia - 0x0037d749, // n0x1c7b c0x0000 (---------------) + I krasnodar - 0x002b8bc6, // n0x1c7c c0x0000 (---------------) + I kurgan - 0x002b9ac5, // n0x1c7d c0x0000 (---------------) + I lenug - 0x002c6f48, // n0x1c7e c0x0000 (---------------) + I mordovia - 0x00253203, // n0x1c7f c0x0000 (---------------) + I msk - 0x002cde48, // n0x1c80 c0x0000 (---------------) + I murmansk - 0x00239307, // n0x1c81 c0x0000 (---------------) + I nalchik - 0x00206143, // n0x1c82 c0x0000 (---------------) + I nov - 0x00355647, // n0x1c83 c0x0000 (---------------) + I obninsk - 0x00212605, // n0x1c84 c0x0000 (---------------) + I penza - 0x002dda88, // n0x1c85 c0x0000 (---------------) + I pokrovsk - 0x00274605, // n0x1c86 c0x0000 (---------------) + I sochi - 0x00269c03, // n0x1c87 c0x0000 (---------------) + I spb - 0x0034f6c9, // n0x1c88 c0x0000 (---------------) + I togliatti - 0x002ab687, // n0x1c89 c0x0000 (---------------) + I troitsk - 0x0035bb44, // n0x1c8a c0x0000 (---------------) + I tula - 0x002ef044, // n0x1c8b c0x0000 (---------------) + I tuva - 0x002f8a8b, // n0x1c8c c0x0000 (---------------) + I vladikavkaz - 0x002f8dc8, // n0x1c8d c0x0000 (---------------) + I vladimir - 0x002fa987, // n0x1c8e c0x0000 (---------------) + I vologda - 0x00233243, // n0x1c8f c0x0000 (---------------) + I com - 0x00239103, // n0x1c90 c0x0000 (---------------) + I edu - 0x00212b03, // n0x1c91 c0x0000 (---------------) + I gob - 0x00228743, // n0x1c92 c0x0000 (---------------) + I org - 0x002437c3, // n0x1c93 c0x0000 (---------------) + I red - 0x0027d903, // n0x1c94 c0x0000 (---------------) + I gov - 0x00233243, // n0x1c95 c0x0000 (---------------) + I com - 0x00239103, // n0x1c96 c0x0000 (---------------) + I edu - 0x0027d903, // n0x1c97 c0x0000 (---------------) + I gov - 0x00207dc3, // n0x1c98 c0x0000 (---------------) + I mil - 0x00223b43, // n0x1c99 c0x0000 (---------------) + I net - 0x00228743, // n0x1c9a c0x0000 (---------------) + I org - 0x00200342, // n0x1c9b c0x0000 (---------------) + I ac - 0x0020ce42, // n0x1c9c c0x0000 (---------------) + I co - 0x00228743, // n0x1c9d c0x0000 (---------------) + I org - 0x000fe108, // n0x1c9e c0x0000 (---------------) + blogspot - 0x00200342, // n0x1c9f c0x0000 (---------------) + I ac - 0x0020ce42, // n0x1ca0 c0x0000 (---------------) + I co - 0x00210a42, // n0x1ca1 c0x0000 (---------------) + I go - 0x002012c2, // n0x1ca2 c0x0000 (---------------) + I in - 0x00207dc2, // n0x1ca3 c0x0000 (---------------) + I mi - 0x00223b43, // n0x1ca4 c0x0000 (---------------) + I net - 0x00200dc2, // n0x1ca5 c0x0000 (---------------) + I or - 0x00200342, // n0x1ca6 c0x0000 (---------------) + I ac - 0x00331a83, // n0x1ca7 c0x0000 (---------------) + I biz - 0x0020ce42, // n0x1ca8 c0x0000 (---------------) + I co - 0x00233243, // n0x1ca9 c0x0000 (---------------) + I com - 0x00239103, // n0x1caa c0x0000 (---------------) + I edu - 0x00210a42, // n0x1cab c0x0000 (---------------) + I go - 0x0027d903, // n0x1cac c0x0000 (---------------) + I gov - 0x00201503, // n0x1cad c0x0000 (---------------) + I int - 0x00207dc3, // n0x1cae c0x0000 (---------------) + I mil - 0x00200904, // n0x1caf c0x0000 (---------------) + I name - 0x00223b43, // n0x1cb0 c0x0000 (---------------) + I net - 0x0021b843, // n0x1cb1 c0x0000 (---------------) + I nic - 0x00228743, // n0x1cb2 c0x0000 (---------------) + I org - 0x002fdf44, // n0x1cb3 c0x0000 (---------------) + I test - 0x0021e243, // n0x1cb4 c0x0000 (---------------) + I web - 0x0027d903, // n0x1cb5 c0x0000 (---------------) + I gov - 0x0020ce42, // n0x1cb6 c0x0000 (---------------) + I co - 0x00233243, // n0x1cb7 c0x0000 (---------------) + I com - 0x00239103, // n0x1cb8 c0x0000 (---------------) + I edu - 0x0027d903, // n0x1cb9 c0x0000 (---------------) + I gov - 0x00207dc3, // n0x1cba c0x0000 (---------------) + I mil - 0x00223b43, // n0x1cbb c0x0000 (---------------) + I net - 0x00201383, // n0x1cbc c0x0000 (---------------) + I nom - 0x00228743, // n0x1cbd c0x0000 (---------------) + I org - 0x00391d87, // n0x1cbe c0x0000 (---------------) + I agrinet - 0x00233243, // n0x1cbf c0x0000 (---------------) + I com - 0x00225c07, // n0x1cc0 c0x0000 (---------------) + I defense - 0x0025d446, // n0x1cc1 c0x0000 (---------------) + I edunet - 0x00215c43, // n0x1cc2 c0x0000 (---------------) + I ens - 0x00201703, // n0x1cc3 c0x0000 (---------------) + I fin - 0x0027d903, // n0x1cc4 c0x0000 (---------------) + I gov - 0x00221b03, // n0x1cc5 c0x0000 (---------------) + I ind - 0x00201844, // n0x1cc6 c0x0000 (---------------) + I info - 0x0036cdc4, // n0x1cc7 c0x0000 (---------------) + I intl - 0x002d8e06, // n0x1cc8 c0x0000 (---------------) + I mincom - 0x0021a903, // n0x1cc9 c0x0000 (---------------) + I nat - 0x00223b43, // n0x1cca c0x0000 (---------------) + I net - 0x00228743, // n0x1ccb c0x0000 (---------------) + I org - 0x00299705, // n0x1ccc c0x0000 (---------------) + I perso - 0x0020e5c4, // n0x1ccd c0x0000 (---------------) + I rnrt - 0x002300c3, // n0x1cce c0x0000 (---------------) + I rns - 0x0037ba43, // n0x1ccf c0x0000 (---------------) + I rnu - 0x002c1b87, // n0x1cd0 c0x0000 (---------------) + I tourism - 0x00208c05, // n0x1cd1 c0x0000 (---------------) + I turen - 0x00233243, // n0x1cd2 c0x0000 (---------------) + I com - 0x00239103, // n0x1cd3 c0x0000 (---------------) + I edu - 0x0027d903, // n0x1cd4 c0x0000 (---------------) + I gov - 0x00207dc3, // n0x1cd5 c0x0000 (---------------) + I mil - 0x00223b43, // n0x1cd6 c0x0000 (---------------) + I net - 0x00228743, // n0x1cd7 c0x0000 (---------------) + I org - 0x00202f02, // n0x1cd8 c0x0000 (---------------) + I av - 0x002ccec3, // n0x1cd9 c0x0000 (---------------) + I bbs - 0x00286003, // n0x1cda c0x0000 (---------------) + I bel - 0x00331a83, // n0x1cdb c0x0000 (---------------) + I biz - 0x51e33243, // n0x1cdc c0x0147 (n0x1ced-n0x1cee) + I com - 0x0026ab42, // n0x1cdd c0x0000 (---------------) + I dr - 0x00239103, // n0x1cde c0x0000 (---------------) + I edu - 0x002060c3, // n0x1cdf c0x0000 (---------------) + I gen - 0x0027d903, // n0x1ce0 c0x0000 (---------------) + I gov - 0x00201844, // n0x1ce1 c0x0000 (---------------) + I info - 0x00332603, // n0x1ce2 c0x0000 (---------------) + I k12 - 0x00248843, // n0x1ce3 c0x0000 (---------------) + I kep - 0x00207dc3, // n0x1ce4 c0x0000 (---------------) + I mil - 0x00200904, // n0x1ce5 c0x0000 (---------------) + I name - 0x522095c2, // n0x1ce6 c0x0148 (n0x1cee-n0x1cef) + I nc - 0x00223b43, // n0x1ce7 c0x0000 (---------------) + I net - 0x00228743, // n0x1ce8 c0x0000 (---------------) + I org - 0x00206ec3, // n0x1ce9 c0x0000 (---------------) + I pol - 0x0022ce43, // n0x1cea c0x0000 (---------------) + I tel - 0x002203c2, // n0x1ceb c0x0000 (---------------) + I tv - 0x0021e243, // n0x1cec c0x0000 (---------------) + I web - 0x000fe108, // n0x1ced c0x0000 (---------------) + blogspot - 0x0027d903, // n0x1cee c0x0000 (---------------) + I gov - 0x002e2dc4, // n0x1cef c0x0000 (---------------) + I aero - 0x00331a83, // n0x1cf0 c0x0000 (---------------) + I biz - 0x0020ce42, // n0x1cf1 c0x0000 (---------------) + I co - 0x00233243, // n0x1cf2 c0x0000 (---------------) + I com - 0x0023c344, // n0x1cf3 c0x0000 (---------------) + I coop - 0x00239103, // n0x1cf4 c0x0000 (---------------) + I edu - 0x0027d903, // n0x1cf5 c0x0000 (---------------) + I gov - 0x00201844, // n0x1cf6 c0x0000 (---------------) + I info - 0x00201503, // n0x1cf7 c0x0000 (---------------) + I int - 0x002e35c4, // n0x1cf8 c0x0000 (---------------) + I jobs - 0x0020bf04, // n0x1cf9 c0x0000 (---------------) + I mobi - 0x002d1086, // n0x1cfa c0x0000 (---------------) + I museum - 0x00200904, // n0x1cfb c0x0000 (---------------) + I name - 0x00223b43, // n0x1cfc c0x0000 (---------------) + I net - 0x00228743, // n0x1cfd c0x0000 (---------------) + I org - 0x00224b03, // n0x1cfe c0x0000 (---------------) + I pro - 0x002a3206, // n0x1cff c0x0000 (---------------) + I travel - 0x000569cb, // n0x1d00 c0x0000 (---------------) + better-than - 0x00013206, // n0x1d01 c0x0000 (---------------) + dyndns - 0x0001e08a, // n0x1d02 c0x0000 (---------------) + on-the-web - 0x000fd64a, // n0x1d03 c0x0000 (---------------) + worse-than - 0x000fe108, // n0x1d04 c0x0000 (---------------) + blogspot - 0x00378c44, // n0x1d05 c0x0000 (---------------) + I club - 0x00233243, // n0x1d06 c0x0000 (---------------) + I com - 0x00331a44, // n0x1d07 c0x0000 (---------------) + I ebiz - 0x00239103, // n0x1d08 c0x0000 (---------------) + I edu - 0x00298344, // n0x1d09 c0x0000 (---------------) + I game - 0x0027d903, // n0x1d0a c0x0000 (---------------) + I gov - 0x0020de03, // n0x1d0b c0x0000 (---------------) + I idv - 0x00207dc3, // n0x1d0c c0x0000 (---------------) + I mil - 0x00223b43, // n0x1d0d c0x0000 (---------------) + I net - 0x00228743, // n0x1d0e c0x0000 (---------------) + I org - 0x00322ccb, // n0x1d0f c0x0000 (---------------) + I xn--czrw28b - 0x0039378a, // n0x1d10 c0x0000 (---------------) + I xn--uc0atv - 0x003a448c, // n0x1d11 c0x0000 (---------------) + I xn--zf0ao64a - 0x00200342, // n0x1d12 c0x0000 (---------------) + I ac - 0x0020ce42, // n0x1d13 c0x0000 (---------------) + I co - 0x00210a42, // n0x1d14 c0x0000 (---------------) + I go - 0x00234305, // n0x1d15 c0x0000 (---------------) + I hotel - 0x00201844, // n0x1d16 c0x0000 (---------------) + I info - 0x00200982, // n0x1d17 c0x0000 (---------------) + I me - 0x00207dc3, // n0x1d18 c0x0000 (---------------) + I mil - 0x0020bf04, // n0x1d19 c0x0000 (---------------) + I mobi - 0x00202ac2, // n0x1d1a c0x0000 (---------------) + I ne - 0x00200dc2, // n0x1d1b c0x0000 (---------------) + I or - 0x00207f02, // n0x1d1c c0x0000 (---------------) + I sc - 0x002203c2, // n0x1d1d c0x0000 (---------------) + I tv - 0x00131a83, // n0x1d1e c0x0000 (---------------) + biz - 0x002ad149, // n0x1d1f c0x0000 (---------------) + I cherkassy - 0x0028bf48, // n0x1d20 c0x0000 (---------------) + I cherkasy - 0x0027d789, // n0x1d21 c0x0000 (---------------) + I chernigov - 0x00287e49, // n0x1d22 c0x0000 (---------------) + I chernihiv - 0x00374f4a, // n0x1d23 c0x0000 (---------------) + I chernivtsi - 0x003682ca, // n0x1d24 c0x0000 (---------------) + I chernovtsy - 0x0020d082, // n0x1d25 c0x0000 (---------------) + I ck - 0x0021dac2, // n0x1d26 c0x0000 (---------------) + I cn - 0x0000ce42, // n0x1d27 c0x0000 (---------------) + co - 0x00233243, // n0x1d28 c0x0000 (---------------) + I com - 0x002051c2, // n0x1d29 c0x0000 (---------------) + I cr - 0x00244dc6, // n0x1d2a c0x0000 (---------------) + I crimea - 0x00350e42, // n0x1d2b c0x0000 (---------------) + I cv - 0x0020df02, // n0x1d2c c0x0000 (---------------) + I dn - 0x0022978e, // n0x1d2d c0x0000 (---------------) + I dnepropetrovsk - 0x0027278e, // n0x1d2e c0x0000 (---------------) + I dnipropetrovsk - 0x0027d607, // n0x1d2f c0x0000 (---------------) + I dominic - 0x00311587, // n0x1d30 c0x0000 (---------------) + I donetsk - 0x002d73c2, // n0x1d31 c0x0000 (---------------) + I dp - 0x00239103, // n0x1d32 c0x0000 (---------------) + I edu - 0x0027d903, // n0x1d33 c0x0000 (---------------) + I gov - 0x00201a82, // n0x1d34 c0x0000 (---------------) + I if - 0x002012c2, // n0x1d35 c0x0000 (---------------) + I in - 0x0024008f, // n0x1d36 c0x0000 (---------------) + I ivano-frankivsk - 0x0021c802, // n0x1d37 c0x0000 (---------------) + I kh - 0x00234cc7, // n0x1d38 c0x0000 (---------------) + I kharkiv - 0x0023fa47, // n0x1d39 c0x0000 (---------------) + I kharkov - 0x00240407, // n0x1d3a c0x0000 (---------------) + I kherson - 0x002408cc, // n0x1d3b c0x0000 (---------------) + I khmelnitskiy - 0x0024aacc, // n0x1d3c c0x0000 (---------------) + I khmelnytskyi - 0x00202984, // n0x1d3d c0x0000 (---------------) + I kiev - 0x0027f80a, // n0x1d3e c0x0000 (---------------) + I kirovograd - 0x00238982, // n0x1d3f c0x0000 (---------------) + I km - 0x0020bdc2, // n0x1d40 c0x0000 (---------------) + I kr - 0x002b3084, // n0x1d41 c0x0000 (---------------) + I krym - 0x00255342, // n0x1d42 c0x0000 (---------------) + I ks - 0x002bd642, // n0x1d43 c0x0000 (---------------) + I kv - 0x0024ad04, // n0x1d44 c0x0000 (---------------) + I kyiv - 0x0021b942, // n0x1d45 c0x0000 (---------------) + I lg - 0x00208bc2, // n0x1d46 c0x0000 (---------------) + I lt - 0x00250787, // n0x1d47 c0x0000 (---------------) + I lugansk - 0x00234bc5, // n0x1d48 c0x0000 (---------------) + I lutsk - 0x00206582, // n0x1d49 c0x0000 (---------------) + I lv - 0x00240004, // n0x1d4a c0x0000 (---------------) + I lviv - 0x00367282, // n0x1d4b c0x0000 (---------------) + I mk - 0x00358908, // n0x1d4c c0x0000 (---------------) + I mykolaiv - 0x00223b43, // n0x1d4d c0x0000 (---------------) + I net - 0x00202f88, // n0x1d4e c0x0000 (---------------) + I nikolaev - 0x00205f42, // n0x1d4f c0x0000 (---------------) + I od - 0x0023a445, // n0x1d50 c0x0000 (---------------) + I odesa - 0x00370ec6, // n0x1d51 c0x0000 (---------------) + I odessa - 0x00228743, // n0x1d52 c0x0000 (---------------) + I org - 0x00209442, // n0x1d53 c0x0000 (---------------) + I pl - 0x002de347, // n0x1d54 c0x0000 (---------------) + I poltava - 0x000080c2, // n0x1d55 c0x0000 (---------------) + pp - 0x002e1805, // n0x1d56 c0x0000 (---------------) + I rivne - 0x0038a0c5, // n0x1d57 c0x0000 (---------------) + I rovno - 0x0020b7c2, // n0x1d58 c0x0000 (---------------) + I rv - 0x002286c2, // n0x1d59 c0x0000 (---------------) + I sb - 0x00206d0a, // n0x1d5a c0x0000 (---------------) + I sebastopol - 0x0024d34a, // n0x1d5b c0x0000 (---------------) + I sevastopol - 0x00213e02, // n0x1d5c c0x0000 (---------------) + I sm - 0x00358884, // n0x1d5d c0x0000 (---------------) + I sumy - 0x00200a82, // n0x1d5e c0x0000 (---------------) + I te - 0x00309888, // n0x1d5f c0x0000 (---------------) + I ternopil - 0x0020fec2, // n0x1d60 c0x0000 (---------------) + I uz - 0x0029e748, // n0x1d61 c0x0000 (---------------) + I uzhgorod - 0x002f5bc7, // n0x1d62 c0x0000 (---------------) + I vinnica - 0x002f6789, // n0x1d63 c0x0000 (---------------) + I vinnytsia - 0x00202f42, // n0x1d64 c0x0000 (---------------) + I vn - 0x002fb685, // n0x1d65 c0x0000 (---------------) + I volyn - 0x0028c405, // n0x1d66 c0x0000 (---------------) + I yalta - 0x002c4dcb, // n0x1d67 c0x0000 (---------------) + I zaporizhzhe - 0x002c580c, // n0x1d68 c0x0000 (---------------) + I zaporizhzhia - 0x002306c8, // n0x1d69 c0x0000 (---------------) + I zhitomir - 0x002fba48, // n0x1d6a c0x0000 (---------------) + I zhytomyr - 0x00262bc2, // n0x1d6b c0x0000 (---------------) + I zp - 0x0021f6c2, // n0x1d6c c0x0000 (---------------) + I zt - 0x00200342, // n0x1d6d c0x0000 (---------------) + I ac - 0x000fe108, // n0x1d6e c0x0000 (---------------) + blogspot - 0x0020ce42, // n0x1d6f c0x0000 (---------------) + I co - 0x00233243, // n0x1d70 c0x0000 (---------------) + I com - 0x00210a42, // n0x1d71 c0x0000 (---------------) + I go - 0x00202ac2, // n0x1d72 c0x0000 (---------------) + I ne - 0x00200dc2, // n0x1d73 c0x0000 (---------------) + I or - 0x00228743, // n0x1d74 c0x0000 (---------------) + I org - 0x00207f02, // n0x1d75 c0x0000 (---------------) + I sc - 0x00200342, // n0x1d76 c0x0000 (---------------) + I ac - 0x5420ce42, // n0x1d77 c0x0150 (n0x1d81-n0x1d82) + I co - 0x5467d903, // n0x1d78 c0x0151 (n0x1d82-n0x1d83) + I gov - 0x00312883, // n0x1d79 c0x0000 (---------------) + I ltd - 0x00200982, // n0x1d7a c0x0000 (---------------) + I me - 0x00223b43, // n0x1d7b c0x0000 (---------------) + I net - 0x0038dd03, // n0x1d7c c0x0000 (---------------) + I nhs - 0x00228743, // n0x1d7d c0x0000 (---------------) + I org - 0x002d96c3, // n0x1d7e c0x0000 (---------------) + I plc - 0x002207c6, // n0x1d7f c0x0000 (---------------) + I police - 0x01617283, // n0x1d80 c0x0005 (---------------)* o I sch - 0x000fe108, // n0x1d81 c0x0000 (---------------) + blogspot - 0x0000b747, // n0x1d82 c0x0000 (---------------) + service - 0x54e02942, // n0x1d83 c0x0153 (n0x1dc2-n0x1dc5) + I ak - 0x55200d02, // n0x1d84 c0x0154 (n0x1dc5-n0x1dc8) + I al - 0x55601d42, // n0x1d85 c0x0155 (n0x1dc8-n0x1dcb) + I ar - 0x55a03302, // n0x1d86 c0x0156 (n0x1dcb-n0x1dce) + I as - 0x55e03442, // n0x1d87 c0x0157 (n0x1dce-n0x1dd1) + I az - 0x56200e42, // n0x1d88 c0x0158 (n0x1dd1-n0x1dd4) + I ca - 0x5660ce42, // n0x1d89 c0x0159 (n0x1dd4-n0x1dd7) + I co - 0x56a2af82, // n0x1d8a c0x015a (n0x1dd7-n0x1dda) + I ct - 0x56e23882, // n0x1d8b c0x015b (n0x1dda-n0x1ddd) + I dc - 0x57205582, // n0x1d8c c0x015c (n0x1ddd-n0x1de0) + I de - 0x00272783, // n0x1d8d c0x0000 (---------------) + I dni - 0x00210083, // n0x1d8e c0x0000 (---------------) + I fed - 0x57617402, // n0x1d8f c0x015d (n0x1de0-n0x1de3) + I fl - 0x57a01bc2, // n0x1d90 c0x015e (n0x1de3-n0x1de6) + I ga - 0x57e0efc2, // n0x1d91 c0x015f (n0x1de6-n0x1de9) + I gu - 0x582003c2, // n0x1d92 c0x0160 (n0x1de9-n0x1deb) + I hi - 0x586019c2, // n0x1d93 c0x0161 (n0x1deb-n0x1dee) + I ia - 0x58a0d9c2, // n0x1d94 c0x0162 (n0x1dee-n0x1df1) + I id - 0x58e027c2, // n0x1d95 c0x0163 (n0x1df1-n0x1df4) + I il - 0x592012c2, // n0x1d96 c0x0164 (n0x1df4-n0x1df7) + I in - 0x000b1f45, // n0x1d97 c0x0000 (---------------) + is-by - 0x00226dc3, // n0x1d98 c0x0000 (---------------) + I isa - 0x0033c884, // n0x1d99 c0x0000 (---------------) + I kids - 0x59655342, // n0x1d9a c0x0165 (n0x1df7-n0x1dfa) + I ks - 0x59a36f82, // n0x1d9b c0x0166 (n0x1dfa-n0x1dfd) + I ky - 0x59e03082, // n0x1d9c c0x0167 (n0x1dfd-n0x1e00) + I la - 0x0007938b, // n0x1d9d c0x0000 (---------------) + land-4-sale - 0x5a200442, // n0x1d9e c0x0168 (n0x1e00-n0x1e03) + I ma - 0x5aa4dd82, // n0x1d9f c0x016a (n0x1e06-n0x1e09) + I md - 0x5ae00982, // n0x1da0 c0x016b (n0x1e09-n0x1e0c) + I me - 0x5b207dc2, // n0x1da1 c0x016c (n0x1e0c-n0x1e0f) + I mi - 0x5b623b02, // n0x1da2 c0x016d (n0x1e0f-n0x1e12) + I mn - 0x5ba08442, // n0x1da3 c0x016e (n0x1e12-n0x1e15) + I mo - 0x5be10f42, // n0x1da4 c0x016f (n0x1e15-n0x1e18) + I ms - 0x5c205402, // n0x1da5 c0x0170 (n0x1e18-n0x1e1b) + I mt - 0x5c6095c2, // n0x1da6 c0x0171 (n0x1e1b-n0x1e1e) + I nc - 0x5ca00782, // n0x1da7 c0x0172 (n0x1e1e-n0x1e20) + I nd - 0x5ce02ac2, // n0x1da8 c0x0173 (n0x1e20-n0x1e23) + I ne - 0x5d202e82, // n0x1da9 c0x0174 (n0x1e23-n0x1e26) + I nh - 0x5d601082, // n0x1daa c0x0175 (n0x1e26-n0x1e29) + I nj - 0x5da2b882, // n0x1dab c0x0176 (n0x1e29-n0x1e2c) + I nm - 0x00360a83, // n0x1dac c0x0000 (---------------) + I nsn - 0x5de09842, // n0x1dad c0x0177 (n0x1e2c-n0x1e2f) + I nv - 0x5e21c582, // n0x1dae c0x0178 (n0x1e2f-n0x1e32) + I ny - 0x5e609342, // n0x1daf c0x0179 (n0x1e32-n0x1e35) + I oh - 0x5ea035c2, // n0x1db0 c0x017a (n0x1e35-n0x1e38) + I ok - 0x5ee00dc2, // n0x1db1 c0x017b (n0x1e38-n0x1e3b) + I or - 0x5f20aac2, // n0x1db2 c0x017c (n0x1e3b-n0x1e3e) + I pa - 0x5f604e02, // n0x1db3 c0x017d (n0x1e3e-n0x1e41) + I pr - 0x5fa04e42, // n0x1db4 c0x017e (n0x1e41-n0x1e44) + I ri - 0x5fe07f02, // n0x1db5 c0x017f (n0x1e44-n0x1e47) + I sc - 0x6022b2c2, // n0x1db6 c0x0180 (n0x1e47-n0x1e49) + I sd - 0x000e8a0c, // n0x1db7 c0x0000 (---------------) + stuff-4-sale - 0x6064fd82, // n0x1db8 c0x0181 (n0x1e49-n0x1e4c) + I tn - 0x60a73102, // n0x1db9 c0x0182 (n0x1e4c-n0x1e4f) + I tx - 0x60e03f42, // n0x1dba c0x0183 (n0x1e4f-n0x1e52) + I ut - 0x61200c02, // n0x1dbb c0x0184 (n0x1e52-n0x1e55) + I va - 0x616065c2, // n0x1dbc c0x0185 (n0x1e55-n0x1e58) + I vi - 0x61a6a302, // n0x1dbd c0x0186 (n0x1e58-n0x1e5b) + I vt - 0x61e01c42, // n0x1dbe c0x0187 (n0x1e5b-n0x1e5e) + I wa - 0x62205c42, // n0x1dbf c0x0188 (n0x1e5e-n0x1e61) + I wi - 0x626753c2, // n0x1dc0 c0x0189 (n0x1e61-n0x1e62) + I wv - 0x62a71c82, // n0x1dc1 c0x018a (n0x1e62-n0x1e65) + I wy - 0x0022f6c2, // n0x1dc2 c0x0000 (---------------) + I cc - 0x00332603, // n0x1dc3 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1dc4 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1dc5 c0x0000 (---------------) + I cc - 0x00332603, // n0x1dc6 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1dc7 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1dc8 c0x0000 (---------------) + I cc - 0x00332603, // n0x1dc9 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1dca c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1dcb c0x0000 (---------------) + I cc - 0x00332603, // n0x1dcc c0x0000 (---------------) + I k12 - 0x00272043, // n0x1dcd c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1dce c0x0000 (---------------) + I cc - 0x00332603, // n0x1dcf c0x0000 (---------------) + I k12 - 0x00272043, // n0x1dd0 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1dd1 c0x0000 (---------------) + I cc - 0x00332603, // n0x1dd2 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1dd3 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1dd4 c0x0000 (---------------) + I cc - 0x00332603, // n0x1dd5 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1dd6 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1dd7 c0x0000 (---------------) + I cc - 0x00332603, // n0x1dd8 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1dd9 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1dda c0x0000 (---------------) + I cc - 0x00332603, // n0x1ddb c0x0000 (---------------) + I k12 - 0x00272043, // n0x1ddc c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1ddd c0x0000 (---------------) + I cc - 0x00332603, // n0x1dde c0x0000 (---------------) + I k12 - 0x00272043, // n0x1ddf c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1de0 c0x0000 (---------------) + I cc - 0x00332603, // n0x1de1 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1de2 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1de3 c0x0000 (---------------) + I cc - 0x00332603, // n0x1de4 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1de5 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1de6 c0x0000 (---------------) + I cc - 0x00332603, // n0x1de7 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1de8 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1de9 c0x0000 (---------------) + I cc - 0x00272043, // n0x1dea c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1deb c0x0000 (---------------) + I cc - 0x00332603, // n0x1dec c0x0000 (---------------) + I k12 - 0x00272043, // n0x1ded c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1dee c0x0000 (---------------) + I cc - 0x00332603, // n0x1def c0x0000 (---------------) + I k12 - 0x00272043, // n0x1df0 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1df1 c0x0000 (---------------) + I cc - 0x00332603, // n0x1df2 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1df3 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1df4 c0x0000 (---------------) + I cc - 0x00332603, // n0x1df5 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1df6 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1df7 c0x0000 (---------------) + I cc - 0x00332603, // n0x1df8 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1df9 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1dfa c0x0000 (---------------) + I cc - 0x00332603, // n0x1dfb c0x0000 (---------------) + I k12 - 0x00272043, // n0x1dfc c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1dfd c0x0000 (---------------) + I cc - 0x00332603, // n0x1dfe c0x0000 (---------------) + I k12 - 0x00272043, // n0x1dff c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e00 c0x0000 (---------------) + I cc - 0x5a732603, // n0x1e01 c0x0169 (n0x1e03-n0x1e06) + I k12 - 0x00272043, // n0x1e02 c0x0000 (---------------) + I lib - 0x00305c04, // n0x1e03 c0x0000 (---------------) + I chtr - 0x0028be46, // n0x1e04 c0x0000 (---------------) + I paroch - 0x002e62c3, // n0x1e05 c0x0000 (---------------) + I pvt - 0x0022f6c2, // n0x1e06 c0x0000 (---------------) + I cc - 0x00332603, // n0x1e07 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e08 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e09 c0x0000 (---------------) + I cc - 0x00332603, // n0x1e0a c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e0b c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e0c c0x0000 (---------------) + I cc - 0x00332603, // n0x1e0d c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e0e c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e0f c0x0000 (---------------) + I cc - 0x00332603, // n0x1e10 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e11 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e12 c0x0000 (---------------) + I cc - 0x00332603, // n0x1e13 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e14 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e15 c0x0000 (---------------) + I cc - 0x00332603, // n0x1e16 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e17 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e18 c0x0000 (---------------) + I cc - 0x00332603, // n0x1e19 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e1a c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e1b c0x0000 (---------------) + I cc - 0x00332603, // n0x1e1c c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e1d c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e1e c0x0000 (---------------) + I cc - 0x00272043, // n0x1e1f c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e20 c0x0000 (---------------) + I cc - 0x00332603, // n0x1e21 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e22 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e23 c0x0000 (---------------) + I cc - 0x00332603, // n0x1e24 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e25 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e26 c0x0000 (---------------) + I cc - 0x00332603, // n0x1e27 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e28 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e29 c0x0000 (---------------) + I cc - 0x00332603, // n0x1e2a c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e2b c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e2c c0x0000 (---------------) + I cc - 0x00332603, // n0x1e2d c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e2e c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e2f c0x0000 (---------------) + I cc - 0x00332603, // n0x1e30 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e31 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e32 c0x0000 (---------------) + I cc - 0x00332603, // n0x1e33 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e34 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e35 c0x0000 (---------------) + I cc - 0x00332603, // n0x1e36 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e37 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e38 c0x0000 (---------------) + I cc - 0x00332603, // n0x1e39 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e3a c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e3b c0x0000 (---------------) + I cc - 0x00332603, // n0x1e3c c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e3d c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e3e c0x0000 (---------------) + I cc - 0x00332603, // n0x1e3f c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e40 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e41 c0x0000 (---------------) + I cc - 0x00332603, // n0x1e42 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e43 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e44 c0x0000 (---------------) + I cc - 0x00332603, // n0x1e45 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e46 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e47 c0x0000 (---------------) + I cc - 0x00272043, // n0x1e48 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e49 c0x0000 (---------------) + I cc - 0x00332603, // n0x1e4a c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e4b c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e4c c0x0000 (---------------) + I cc - 0x00332603, // n0x1e4d c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e4e c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e4f c0x0000 (---------------) + I cc - 0x00332603, // n0x1e50 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e51 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e52 c0x0000 (---------------) + I cc - 0x00332603, // n0x1e53 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e54 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e55 c0x0000 (---------------) + I cc - 0x00332603, // n0x1e56 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e57 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e58 c0x0000 (---------------) + I cc - 0x00332603, // n0x1e59 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e5a c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e5b c0x0000 (---------------) + I cc - 0x00332603, // n0x1e5c c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e5d c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e5e c0x0000 (---------------) + I cc - 0x00332603, // n0x1e5f c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e60 c0x0000 (---------------) + I lib - 0x0022f6c2, // n0x1e61 c0x0000 (---------------) + I cc - 0x0022f6c2, // n0x1e62 c0x0000 (---------------) + I cc - 0x00332603, // n0x1e63 c0x0000 (---------------) + I k12 - 0x00272043, // n0x1e64 c0x0000 (---------------) + I lib - 0x63233243, // n0x1e65 c0x018c (n0x1e6b-n0x1e6c) + I com - 0x00239103, // n0x1e66 c0x0000 (---------------) + I edu - 0x00255e83, // n0x1e67 c0x0000 (---------------) + I gub - 0x00207dc3, // n0x1e68 c0x0000 (---------------) + I mil - 0x00223b43, // n0x1e69 c0x0000 (---------------) + I net - 0x00228743, // n0x1e6a c0x0000 (---------------) + I org - 0x000fe108, // n0x1e6b c0x0000 (---------------) + blogspot - 0x0020ce42, // n0x1e6c c0x0000 (---------------) + I co - 0x00233243, // n0x1e6d c0x0000 (---------------) + I com - 0x00223b43, // n0x1e6e c0x0000 (---------------) + I net - 0x00228743, // n0x1e6f c0x0000 (---------------) + I org - 0x00233243, // n0x1e70 c0x0000 (---------------) + I com - 0x00239103, // n0x1e71 c0x0000 (---------------) + I edu - 0x0027d903, // n0x1e72 c0x0000 (---------------) + I gov - 0x00207dc3, // n0x1e73 c0x0000 (---------------) + I mil - 0x00223b43, // n0x1e74 c0x0000 (---------------) + I net - 0x00228743, // n0x1e75 c0x0000 (---------------) + I org - 0x0024b944, // n0x1e76 c0x0000 (---------------) + I arts - 0x0020ce42, // n0x1e77 c0x0000 (---------------) + I co - 0x00233243, // n0x1e78 c0x0000 (---------------) + I com - 0x003830c3, // n0x1e79 c0x0000 (---------------) + I e12 - 0x00239103, // n0x1e7a c0x0000 (---------------) + I edu - 0x0024dcc4, // n0x1e7b c0x0000 (---------------) + I firm - 0x00212b03, // n0x1e7c c0x0000 (---------------) + I gob - 0x0027d903, // n0x1e7d c0x0000 (---------------) + I gov - 0x00201844, // n0x1e7e c0x0000 (---------------) + I info - 0x00201503, // n0x1e7f c0x0000 (---------------) + I int - 0x00207dc3, // n0x1e80 c0x0000 (---------------) + I mil - 0x00223b43, // n0x1e81 c0x0000 (---------------) + I net - 0x00228743, // n0x1e82 c0x0000 (---------------) + I org - 0x0022c2c3, // n0x1e83 c0x0000 (---------------) + I rec - 0x00364cc5, // n0x1e84 c0x0000 (---------------) + I store - 0x00243703, // n0x1e85 c0x0000 (---------------) + I tec - 0x0021e243, // n0x1e86 c0x0000 (---------------) + I web - 0x0020ce42, // n0x1e87 c0x0000 (---------------) + I co - 0x00233243, // n0x1e88 c0x0000 (---------------) + I com - 0x00332603, // n0x1e89 c0x0000 (---------------) + I k12 - 0x00223b43, // n0x1e8a c0x0000 (---------------) + I net - 0x00228743, // n0x1e8b c0x0000 (---------------) + I org - 0x00200342, // n0x1e8c c0x0000 (---------------) + I ac - 0x00331a83, // n0x1e8d c0x0000 (---------------) + I biz - 0x000fe108, // n0x1e8e c0x0000 (---------------) + blogspot - 0x00233243, // n0x1e8f c0x0000 (---------------) + I com - 0x00239103, // n0x1e90 c0x0000 (---------------) + I edu - 0x0027d903, // n0x1e91 c0x0000 (---------------) + I gov - 0x002ae586, // n0x1e92 c0x0000 (---------------) + I health - 0x00201844, // n0x1e93 c0x0000 (---------------) + I info - 0x00201503, // n0x1e94 c0x0000 (---------------) + I int - 0x00200904, // n0x1e95 c0x0000 (---------------) + I name - 0x00223b43, // n0x1e96 c0x0000 (---------------) + I net - 0x00228743, // n0x1e97 c0x0000 (---------------) + I org - 0x00224b03, // n0x1e98 c0x0000 (---------------) + I pro - 0x00233243, // n0x1e99 c0x0000 (---------------) + I com - 0x00239103, // n0x1e9a c0x0000 (---------------) + I edu - 0x00223b43, // n0x1e9b c0x0000 (---------------) + I net - 0x00228743, // n0x1e9c c0x0000 (---------------) + I org - 0x00233243, // n0x1e9d c0x0000 (---------------) + I com - 0x00013206, // n0x1e9e c0x0000 (---------------) + dyndns - 0x00239103, // n0x1e9f c0x0000 (---------------) + I edu - 0x0027d903, // n0x1ea0 c0x0000 (---------------) + I gov - 0x000d1e46, // n0x1ea1 c0x0000 (---------------) + mypets - 0x00223b43, // n0x1ea2 c0x0000 (---------------) + I net - 0x00228743, // n0x1ea3 c0x0000 (---------------) + I org - 0x0030ba08, // n0x1ea4 c0x0000 (---------------) + I xn--80au - 0x0030e409, // n0x1ea5 c0x0000 (---------------) + I xn--90azh - 0x0031b349, // n0x1ea6 c0x0000 (---------------) + I xn--c1avg - 0x0032a188, // n0x1ea7 c0x0000 (---------------) + I xn--d1at - 0x00372e88, // n0x1ea8 c0x0000 (---------------) + I xn--o1ac - 0x00372e89, // n0x1ea9 c0x0000 (---------------) + I xn--o1ach - 0x00200342, // n0x1eaa c0x0000 (---------------) + I ac - 0x00208a45, // n0x1eab c0x0000 (---------------) + I agric - 0x00239743, // n0x1eac c0x0000 (---------------) + I alt - 0x65a0ce42, // n0x1ead c0x0196 (n0x1ebb-n0x1ebc) + I co - 0x00239103, // n0x1eae c0x0000 (---------------) + I edu - 0x0027d903, // n0x1eaf c0x0000 (---------------) + I gov - 0x0037bc87, // n0x1eb0 c0x0000 (---------------) + I grondar - 0x00271c03, // n0x1eb1 c0x0000 (---------------) + I law - 0x00207dc3, // n0x1eb2 c0x0000 (---------------) + I mil - 0x00223b43, // n0x1eb3 c0x0000 (---------------) + I net - 0x0023b383, // n0x1eb4 c0x0000 (---------------) + I ngo - 0x00210383, // n0x1eb5 c0x0000 (---------------) + I nis - 0x00201383, // n0x1eb6 c0x0000 (---------------) + I nom - 0x00228743, // n0x1eb7 c0x0000 (---------------) + I org - 0x00232186, // n0x1eb8 c0x0000 (---------------) + I school - 0x00200c82, // n0x1eb9 c0x0000 (---------------) + I tm - 0x0021e243, // n0x1eba c0x0000 (---------------) + I web - 0x000fe108, // n0x1ebb c0x0000 (---------------) + blogspot + 0x00355603, // n0x0000 c0x0000 (---------------) + I aaa + 0x0034d544, // n0x0001 c0x0000 (---------------) + I aarp + 0x0026b886, // n0x0002 c0x0000 (---------------) + I abarth + 0x00230743, // n0x0003 c0x0000 (---------------) + I abb + 0x00230746, // n0x0004 c0x0000 (---------------) + I abbott + 0x00365706, // n0x0005 c0x0000 (---------------) + I abbvie + 0x00399843, // n0x0006 c0x0000 (---------------) + I abc + 0x0031f144, // n0x0007 c0x0000 (---------------) + I able + 0x002ee207, // n0x0008 c0x0000 (---------------) + I abogado + 0x0026b4c8, // n0x0009 c0x0000 (---------------) + I abudhabi + 0x01a01542, // n0x000a c0x0006 (n0x0609-n0x060f) + I ac + 0x0030aec7, // n0x000b c0x0000 (---------------) + I academy + 0x00352a89, // n0x000c c0x0000 (---------------) + I accenture + 0x002d9b0a, // n0x000d c0x0000 (---------------) + I accountant + 0x002d9b0b, // n0x000e c0x0000 (---------------) + I accountants + 0x00232d83, // n0x000f c0x0000 (---------------) + I aco + 0x0028a206, // n0x0010 c0x0000 (---------------) + I active + 0x0023b505, // n0x0011 c0x0000 (---------------) + I actor + 0x01e00342, // n0x0012 c0x0007 (n0x060f-n0x0610) + I ad + 0x00212f84, // n0x0013 c0x0000 (---------------) + I adac + 0x0026ba03, // n0x0014 c0x0000 (---------------) + I ads + 0x002a1985, // n0x0015 c0x0000 (---------------) + I adult + 0x022035c2, // n0x0016 c0x0008 (n0x0610-n0x0618) + I ae + 0x0024a403, // n0x0017 c0x0000 (---------------) + I aeg + 0x026389c4, // n0x0018 c0x0009 (n0x0618-n0x066f) + I aero + 0x0025e585, // n0x0019 c0x0000 (---------------) + I aetna + 0x02a04a42, // n0x001a c0x000a (n0x066f-n0x0674) + I af + 0x0036ec8e, // n0x001b c0x0000 (---------------) + I afamilycompany + 0x00252703, // n0x001c c0x0000 (---------------) + I afl + 0x00375846, // n0x001d c0x0000 (---------------) + I africa + 0x0037584b, // n0x001e c0x0000 (---------------) + I africamagic + 0x02e01002, // n0x001f c0x000b (n0x0674-n0x0679) + I ag + 0x0034ac47, // n0x0020 c0x0000 (---------------) + I agakhan + 0x0023df86, // n0x0021 c0x0000 (---------------) + I agency + 0x032016c2, // n0x0022 c0x000c (n0x0679-n0x067d) + I ai + 0x00214d03, // n0x0023 c0x0000 (---------------) + I aig + 0x00214d04, // n0x0024 c0x0000 (---------------) + I aigo + 0x0022b886, // n0x0025 c0x0000 (---------------) + I airbus + 0x00338648, // n0x0026 c0x0000 (---------------) + I airforce + 0x00286586, // n0x0027 c0x0000 (---------------) + I airtel + 0x00227644, // n0x0028 c0x0000 (---------------) + I akdn + 0x036001c2, // n0x0029 c0x000d (n0x067d-n0x0684) + I al + 0x00328e49, // n0x002a c0x0000 (---------------) + I alfaromeo + 0x00345287, // n0x002b c0x0000 (---------------) + I alibaba + 0x002bc006, // n0x002c c0x0000 (---------------) + I alipay + 0x0033e3c9, // n0x002d c0x0000 (---------------) + I allfinanz + 0x0020f148, // n0x002e c0x0000 (---------------) + I allstate + 0x00213584, // n0x002f c0x0000 (---------------) + I ally + 0x0021dd86, // n0x0030 c0x0000 (---------------) + I alsace + 0x0020be86, // n0x0031 c0x0000 (---------------) + I alstom + 0x03a01882, // n0x0032 c0x000e (n0x0684-n0x0685) + I am + 0x0024728f, // n0x0033 c0x0000 (---------------) + I americanexpress + 0x00208d8e, // n0x0034 c0x0000 (---------------) + I americanfamily + 0x002052c4, // n0x0035 c0x0000 (---------------) + I amex + 0x00367585, // n0x0036 c0x0000 (---------------) + I amfam + 0x00230645, // n0x0037 c0x0000 (---------------) + I amica + 0x002c8449, // n0x0038 c0x0000 (---------------) + I amsterdam + 0x00243f09, // n0x0039 c0x0000 (---------------) + I analytics + 0x0031a8c7, // n0x003a c0x0000 (---------------) + I android + 0x00350706, // n0x003b c0x0000 (---------------) + I anquan + 0x00256b43, // n0x003c c0x0000 (---------------) + I anz + 0x03e029c2, // n0x003d c0x000f (n0x0685-n0x068b) + I ao + 0x00275643, // n0x003e c0x0000 (---------------) + I aol + 0x0022ce0a, // n0x003f c0x0000 (---------------) + I apartments + 0x002092c3, // n0x0040 c0x0000 (---------------) + I app + 0x00331985, // n0x0041 c0x0000 (---------------) + I apple + 0x002003c2, // n0x0042 c0x0000 (---------------) + I aq + 0x00285a49, // n0x0043 c0x0000 (---------------) + I aquarelle + 0x04200a42, // n0x0044 c0x0010 (n0x068b-n0x0694) + I ar + 0x00202044, // n0x0045 c0x0000 (---------------) + I arab + 0x00355146, // n0x0046 c0x0000 (---------------) + I aramco + 0x002fb805, // n0x0047 c0x0000 (---------------) + I archi + 0x00348744, // n0x0048 c0x0000 (---------------) + I army + 0x04a29dc4, // n0x0049 c0x0012 (n0x0695-n0x069b) + I arpa + 0x0023a6c4, // n0x004a c0x0000 (---------------) + I arte + 0x04e01d42, // n0x004b c0x0013 (n0x069b-n0x069c) + I as + 0x0034d284, // n0x004c c0x0000 (---------------) + I asda + 0x00322ec4, // n0x004d c0x0000 (---------------) + I asia + 0x003437ca, // n0x004e c0x0000 (---------------) + I associates + 0x05200102, // n0x004f c0x0014 (n0x069c-n0x06a3) + I at + 0x00248f47, // n0x0050 c0x0000 (---------------) + I athleta + 0x0031fec8, // n0x0051 c0x0000 (---------------) + I attorney + 0x05a04f82, // n0x0052 c0x0016 (n0x06a4-n0x06b6) + I au + 0x00319e07, // n0x0053 c0x0000 (---------------) + I auction + 0x00233104, // n0x0054 c0x0000 (---------------) + I audi + 0x002b7d87, // n0x0055 c0x0000 (---------------) + I audible + 0x00233105, // n0x0056 c0x0000 (---------------) + I audio + 0x0035fd47, // n0x0057 c0x0000 (---------------) + I auspost + 0x0031a686, // n0x0058 c0x0000 (---------------) + I author + 0x00265104, // n0x0059 c0x0000 (---------------) + I auto + 0x00324dc5, // n0x005a c0x0000 (---------------) + I autos + 0x002dc007, // n0x005b c0x0000 (---------------) + I avianca + 0x06a01082, // n0x005c c0x001a (n0x06c4-n0x06c5) + I aw + 0x002f6583, // n0x005d c0x0000 (---------------) + I aws + 0x00220402, // n0x005e c0x0000 (---------------) + I ax + 0x00356983, // n0x005f c0x0000 (---------------) + I axa + 0x06e05f42, // n0x0060 c0x001b (n0x06c5-n0x06d1) + I az + 0x0021be05, // n0x0061 c0x0000 (---------------) + I azure + 0x07202002, // n0x0062 c0x001c (n0x06d1-n0x06dc) + I ba + 0x002ce1c4, // n0x0063 c0x0000 (---------------) + I baby + 0x0027c805, // n0x0064 c0x0000 (---------------) + I baidu + 0x00205207, // n0x0065 c0x0000 (---------------) + I banamex + 0x002e97ce, // n0x0066 c0x0000 (---------------) + I bananarepublic + 0x00207cc4, // n0x0067 c0x0000 (---------------) + I band + 0x00203704, // n0x0068 c0x0000 (---------------) + I bank + 0x00202003, // n0x0069 c0x0000 (---------------) + I bar + 0x002dd889, // n0x006a c0x0000 (---------------) + I barcelona + 0x002c12cb, // n0x006b c0x0000 (---------------) + I barclaycard + 0x002dc608, // n0x006c c0x0000 (---------------) + I barclays + 0x00306d88, // n0x006d c0x0000 (---------------) + I barefoot + 0x0030e948, // n0x006e c0x0000 (---------------) + I bargains + 0x0022f9c8, // n0x006f c0x0000 (---------------) + I baseball + 0x0033e20a, // n0x0070 c0x0000 (---------------) + I basketball + 0x0035fc47, // n0x0071 c0x0000 (---------------) + I bauhaus + 0x00351786, // n0x0072 c0x0000 (---------------) + I bayern + 0x07630782, // n0x0073 c0x001d (n0x06dc-n0x06e6) + I bb + 0x003651c3, // n0x0074 c0x0000 (---------------) + I bbc + 0x0036cbc3, // n0x0075 c0x0000 (---------------) + I bbt + 0x00376984, // n0x0076 c0x0000 (---------------) + I bbva + 0x00399883, // n0x0077 c0x0000 (---------------) + I bcg + 0x00239b43, // n0x0078 c0x0000 (---------------) + I bcn + 0x017129c2, // n0x0079 c0x0005 (---------------)* o I bd + 0x07a03302, // n0x007a c0x001e (n0x06e6-n0x06e8) + I be + 0x0022b505, // n0x007b c0x0000 (---------------) + I beats + 0x0024f5c6, // n0x007c c0x0000 (---------------) + I beauty + 0x002ccb84, // n0x007d c0x0000 (---------------) + I beer + 0x00383b87, // n0x007e c0x0000 (---------------) + I bentley + 0x0022e6c6, // n0x007f c0x0000 (---------------) + I berlin + 0x002291c4, // n0x0080 c0x0000 (---------------) + I best + 0x00398887, // n0x0081 c0x0000 (---------------) + I bestbuy + 0x00208c03, // n0x0082 c0x0000 (---------------) + I bet + 0x07f5d242, // n0x0083 c0x001f (n0x06e8-n0x06e9) + I bf + 0x082ee482, // n0x0084 c0x0020 (n0x06e9-n0x070e) + I bg + 0x08615602, // n0x0085 c0x0021 (n0x070e-n0x0713) + I bh + 0x00215606, // n0x0086 c0x0000 (---------------) + I bharti + 0x08a00002, // n0x0087 c0x0022 (n0x0713-n0x0718) + I bi + 0x0037bf45, // n0x0088 c0x0000 (---------------) + I bible + 0x00313f03, // n0x0089 c0x0000 (---------------) + I bid + 0x00202544, // n0x008a c0x0000 (---------------) + I bike + 0x002dc444, // n0x008b c0x0000 (---------------) + I bing + 0x002dc445, // n0x008c c0x0000 (---------------) + I bingo + 0x00203a03, // n0x008d c0x0000 (---------------) + I bio + 0x08f30b83, // n0x008e c0x0023 (n0x0718-n0x0720) + I biz + 0x09206502, // n0x008f c0x0024 (n0x0720-n0x0724) + I bj + 0x00288ec5, // n0x0090 c0x0000 (---------------) + I black + 0x00288ecb, // n0x0091 c0x0000 (---------------) + I blackfriday + 0x00258fc6, // n0x0092 c0x0000 (---------------) + I blanco + 0x0020c8cb, // n0x0093 c0x0000 (---------------) + I blockbuster + 0x002a4004, // n0x0094 c0x0000 (---------------) + I blog + 0x0020d009, // n0x0095 c0x0000 (---------------) + I bloomberg + 0x0020e104, // n0x0096 c0x0000 (---------------) + I blue + 0x0960e742, // n0x0097 c0x0025 (n0x0724-n0x0729) + I bm + 0x0020f6c3, // n0x0098 c0x0000 (---------------) + I bms + 0x0020fc43, // n0x0099 c0x0000 (---------------) + I bmw + 0x016104c2, // n0x009a c0x0005 (---------------)* o I bn + 0x002477c3, // n0x009b c0x0000 (---------------) + I bnl + 0x002104ca, // n0x009c c0x0000 (---------------) + I bnpparibas + 0x09a0e402, // n0x009d c0x0026 (n0x0729-n0x0732) + I bo + 0x0030b145, // n0x009e c0x0000 (---------------) + I boats + 0x0020e40a, // n0x009f c0x0000 (---------------) + I boehringer + 0x00290b84, // n0x00a0 c0x0000 (---------------) + I bofa + 0x00210ac3, // n0x00a1 c0x0000 (---------------) + I bom + 0x00210f84, // n0x00a2 c0x0000 (---------------) + I bond + 0x00215403, // n0x00a3 c0x0000 (---------------) + I boo + 0x00215404, // n0x00a4 c0x0000 (---------------) + I book + 0x00215407, // n0x00a5 c0x0000 (---------------) + I booking + 0x00215e85, // n0x00a6 c0x0000 (---------------) + I boots + 0x002173c5, // n0x00a7 c0x0000 (---------------) + I bosch + 0x00217986, // n0x00a8 c0x0000 (---------------) + I bostik + 0x00217cc6, // n0x00a9 c0x0000 (---------------) + I boston + 0x00218e83, // n0x00aa c0x0000 (---------------) + I bot + 0x0021b7c8, // n0x00ab c0x0000 (---------------) + I boutique + 0x0021bb83, // n0x00ac c0x0000 (---------------) + I box + 0x09e1c402, // n0x00ad c0x0027 (n0x0732-n0x0778) + I br + 0x0021c408, // n0x00ae c0x0000 (---------------) + I bradesco + 0x00221b8b, // n0x00af c0x0000 (---------------) + I bridgestone + 0x00220008, // n0x00b0 c0x0000 (---------------) + I broadway + 0x00220c46, // n0x00b1 c0x0000 (---------------) + I broker + 0x00222007, // n0x00b2 c0x0000 (---------------) + I brother + 0x00225e88, // n0x00b3 c0x0000 (---------------) + I brussels + 0x0a637542, // n0x00b4 c0x0029 (n0x0779-n0x077e) + I bs + 0x0aa1fd02, // n0x00b5 c0x002a (n0x077e-n0x0783) + I bt + 0x0020a008, // n0x00b6 c0x0000 (---------------) + I budapest + 0x002e5b47, // n0x00b7 c0x0000 (---------------) + I bugatti + 0x002410c5, // n0x00b8 c0x0000 (---------------) + I build + 0x002410c8, // n0x00b9 c0x0000 (---------------) + I builders + 0x0022c188, // n0x00ba c0x0000 (---------------) + I business + 0x003004c3, // n0x00bb c0x0000 (---------------) + I buy + 0x0022dec4, // n0x00bc c0x0000 (---------------) + I buzz + 0x00365782, // n0x00bd c0x0000 (---------------) + I bv + 0x0ae2fe42, // n0x00be c0x002b (n0x0783-n0x0785) + I bw + 0x0b20f982, // n0x00bf c0x002c (n0x0785-n0x0789) + I by + 0x0ba30bc2, // n0x00c0 c0x002e (n0x078a-n0x0790) + I bz + 0x00230bc3, // n0x00c1 c0x0000 (---------------) + I bzh + 0x0be00302, // n0x00c2 c0x002f (n0x0790-n0x07a1) + I ca + 0x00230703, // n0x00c3 c0x0000 (---------------) + I cab + 0x0030ee44, // n0x00c4 c0x0000 (---------------) + I cafe + 0x00213543, // n0x00c5 c0x0000 (---------------) + I cal + 0x00213544, // n0x00c6 c0x0000 (---------------) + I call + 0x002ed9cb, // n0x00c7 c0x0000 (---------------) + I calvinklein + 0x0037c746, // n0x00c8 c0x0000 (---------------) + I camera + 0x00241ac4, // n0x00c9 c0x0000 (---------------) + I camp + 0x0029f88e, // n0x00ca c0x0000 (---------------) + I cancerresearch + 0x00265485, // n0x00cb c0x0000 (---------------) + I canon + 0x002dc148, // n0x00cc c0x0000 (---------------) + I capetown + 0x002f0b87, // n0x00cd c0x0000 (---------------) + I capital + 0x002f0b8a, // n0x00ce c0x0000 (---------------) + I capitalone + 0x0020af43, // n0x00cf c0x0000 (---------------) + I car + 0x00236a47, // n0x00d0 c0x0000 (---------------) + I caravan + 0x002c1485, // n0x00d1 c0x0000 (---------------) + I cards + 0x0036b504, // n0x00d2 c0x0000 (---------------) + I care + 0x0036b506, // n0x00d3 c0x0000 (---------------) + I career + 0x0036b507, // n0x00d4 c0x0000 (---------------) + I careers + 0x00305f84, // n0x00d5 c0x0000 (---------------) + I cars + 0x00390b47, // n0x00d6 c0x0000 (---------------) + I cartier + 0x00214604, // n0x00d7 c0x0000 (---------------) + I casa + 0x002188c4, // n0x00d8 c0x0000 (---------------) + I case + 0x002188c6, // n0x00d9 c0x0000 (---------------) + I caseih + 0x002c99c4, // n0x00da c0x0000 (---------------) + I cash + 0x0037bc46, // n0x00db c0x0000 (---------------) + I casino + 0x0020df43, // n0x00dc c0x0000 (---------------) + I cat + 0x00351008, // n0x00dd c0x0000 (---------------) + I catering + 0x0037ba88, // n0x00de c0x0000 (---------------) + I catholic + 0x0024b283, // n0x00df c0x0000 (---------------) + I cba + 0x00247783, // n0x00e0 c0x0000 (---------------) + I cbn + 0x0038c544, // n0x00e1 c0x0000 (---------------) + I cbre + 0x00391103, // n0x00e2 c0x0000 (---------------) + I cbs + 0x0c22e182, // n0x00e3 c0x0030 (n0x07a1-n0x07a5) + I cc + 0x0c63e2c2, // n0x00e4 c0x0031 (n0x07a5-n0x07a6) + I cd + 0x00206483, // n0x00e5 c0x0000 (---------------) + I ceb + 0x00204486, // n0x00e6 c0x0000 (---------------) + I center + 0x002c87c3, // n0x00e7 c0x0000 (---------------) + I ceo + 0x002e61c4, // n0x00e8 c0x0000 (---------------) + I cern + 0x0ca14202, // n0x00e9 c0x0032 (n0x07a6-n0x07a7) + I cf + 0x00214203, // n0x00ea c0x0000 (---------------) + I cfa + 0x00379503, // n0x00eb c0x0000 (---------------) + I cfd + 0x0021a302, // n0x00ec c0x0000 (---------------) + I cg + 0x0ce01582, // n0x00ed c0x0033 (n0x07a7-n0x07a8) + I ch + 0x002ba9c6, // n0x00ee c0x0000 (---------------) + I chanel + 0x0023c407, // n0x00ef c0x0000 (---------------) + I channel + 0x00329845, // n0x00f0 c0x0000 (---------------) + I chase + 0x0022ac44, // n0x00f1 c0x0000 (---------------) + I chat + 0x00284305, // n0x00f2 c0x0000 (---------------) + I cheap + 0x00201587, // n0x00f3 c0x0000 (---------------) + I chintai + 0x002aee85, // n0x00f4 c0x0000 (---------------) + I chloe + 0x003a3989, // n0x00f5 c0x0000 (---------------) + I christmas + 0x003024c6, // n0x00f6 c0x0000 (---------------) + I chrome + 0x00303188, // n0x00f7 c0x0000 (---------------) + I chrysler + 0x00329746, // n0x00f8 c0x0000 (---------------) + I church + 0x0d200682, // n0x00f9 c0x0034 (n0x07a8-n0x07b7) + I ci + 0x0023fe08, // n0x00fa c0x0000 (---------------) + I cipriani + 0x00336206, // n0x00fb c0x0000 (---------------) + I circle + 0x00200685, // n0x00fc c0x0000 (---------------) + I cisco + 0x0033cc87, // n0x00fd c0x0000 (---------------) + I citadel + 0x00350f04, // n0x00fe c0x0000 (---------------) + I citi + 0x00350f05, // n0x00ff c0x0000 (---------------) + I citic + 0x00286744, // n0x0100 c0x0000 (---------------) + I city + 0x00286748, // n0x0101 c0x0000 (---------------) + I cityeats + 0x0d60b482, // n0x0102 c0x0035 (n0x07b7-n0x07b8)* o I ck + 0x0da09182, // n0x0103 c0x0036 (n0x07b8-n0x07bd) + I cl + 0x00378546, // n0x0104 c0x0000 (---------------) + I claims + 0x002242c8, // n0x0105 c0x0000 (---------------) + I cleaning + 0x00379a45, // n0x0106 c0x0000 (---------------) + I click + 0x0037b946, // n0x0107 c0x0000 (---------------) + I clinic + 0x00387188, // n0x0108 c0x0000 (---------------) + I clinique + 0x0039a588, // n0x0109 c0x0000 (---------------) + I clothing + 0x00209185, // n0x010a c0x0000 (---------------) + I cloud + 0x00238ac4, // n0x010b c0x0000 (---------------) + I club + 0x00238ac7, // n0x010c c0x0000 (---------------) + I clubmed + 0x0de5d142, // n0x010d c0x0037 (n0x07bd-n0x07c1) + I cm + 0x0e21ba42, // n0x010e c0x0038 (n0x07c1-n0x07ee) + I cn + 0x0fa00742, // n0x010f c0x003e (n0x07f3-n0x0800) + I co + 0x00358885, // n0x0110 c0x0000 (---------------) + I coach + 0x0029bd05, // n0x0111 c0x0000 (---------------) + I codes + 0x0020b246, // n0x0112 c0x0000 (---------------) + I coffee + 0x0022e1c7, // n0x0113 c0x0000 (---------------) + I college + 0x00231a87, // n0x0114 c0x0000 (---------------) + I cologne + 0x10233503, // n0x0115 c0x0040 (n0x0801-n0x08d7) + I com + 0x002a6b87, // n0x0116 c0x0000 (---------------) + I comcast + 0x002da788, // n0x0117 c0x0000 (---------------) + I commbank + 0x00233509, // n0x0118 c0x0000 (---------------) + I community + 0x0036ee47, // n0x0119 c0x0000 (---------------) + I company + 0x00234a07, // n0x011a c0x0000 (---------------) + I compare + 0x00235488, // n0x011b c0x0000 (---------------) + I computer + 0x00235c86, // n0x011c c0x0000 (---------------) + I comsec + 0x00236246, // n0x011d c0x0000 (---------------) + I condos + 0x00236f4c, // n0x011e c0x0000 (---------------) + I construction + 0x00237d0a, // n0x011f c0x0000 (---------------) + I consulting + 0x00239e07, // n0x0120 c0x0000 (---------------) + I contact + 0x0023b3cb, // n0x0121 c0x0000 (---------------) + I contractors + 0x0023c247, // n0x0122 c0x0000 (---------------) + I cooking + 0x0023c24e, // n0x0123 c0x0000 (---------------) + I cookingchannel + 0x0023d384, // n0x0124 c0x0000 (---------------) + I cool + 0x0023d684, // n0x0125 c0x0000 (---------------) + I coop + 0x0023ea07, // n0x0126 c0x0000 (---------------) + I corsica + 0x00336587, // n0x0127 c0x0000 (---------------) + I country + 0x002423c6, // n0x0128 c0x0000 (---------------) + I coupon + 0x002423c7, // n0x0129 c0x0000 (---------------) + I coupons + 0x00242fc7, // n0x012a c0x0000 (---------------) + I courses + 0x126049c2, // n0x012b c0x0049 (n0x08fe-n0x0905) + I cr + 0x002447c6, // n0x012c c0x0000 (---------------) + I credit + 0x002447ca, // n0x012d c0x0000 (---------------) + I creditcard + 0x00244a4b, // n0x012e c0x0000 (---------------) + I creditunion + 0x00245b47, // n0x012f c0x0000 (---------------) + I cricket + 0x00246505, // n0x0130 c0x0000 (---------------) + I crown + 0x00246643, // n0x0131 c0x0000 (---------------) + I crs + 0x00246b46, // n0x0132 c0x0000 (---------------) + I cruise + 0x00246b47, // n0x0133 c0x0000 (---------------) + I cruises + 0x002440c3, // n0x0134 c0x0000 (---------------) + I csc + 0x12a09d82, // n0x0135 c0x004a (n0x0905-n0x090b) + I cu + 0x00246f0a, // n0x0136 c0x0000 (---------------) + I cuisinella + 0x12f53bc2, // n0x0137 c0x004b (n0x090b-n0x090c) + I cv + 0x132c95c2, // n0x0138 c0x004c (n0x090c-n0x0910) + I cw + 0x136482c2, // n0x0139 c0x004d (n0x0910-n0x0912) + I cx + 0x13a3e082, // n0x013a c0x004e (n0x0912-n0x091f) o I cy + 0x0024a985, // n0x013b c0x0000 (---------------) + I cymru + 0x0024b084, // n0x013c c0x0000 (---------------) + I cyou + 0x14229ec2, // n0x013d c0x0050 (n0x0920-n0x0922) + I cz + 0x0034d305, // n0x013e c0x0000 (---------------) + I dabur + 0x002a1943, // n0x013f c0x0000 (---------------) + I dad + 0x00223d45, // n0x0140 c0x0000 (---------------) + I dance + 0x0020c7c4, // n0x0141 c0x0000 (---------------) + I date + 0x0020e206, // n0x0142 c0x0000 (---------------) + I dating + 0x00292c46, // n0x0143 c0x0000 (---------------) + I datsun + 0x00265983, // n0x0144 c0x0000 (---------------) + I day + 0x0023f244, // n0x0145 c0x0000 (---------------) + I dclk + 0x0025f303, // n0x0146 c0x0000 (---------------) + I dds + 0x14604d82, // n0x0147 c0x0051 (n0x0922-n0x092a) + I de + 0x00204d84, // n0x0148 c0x0000 (---------------) + I deal + 0x00239046, // n0x0149 c0x0000 (---------------) + I dealer + 0x00204d85, // n0x014a c0x0000 (---------------) + I deals + 0x003a0286, // n0x014b c0x0000 (---------------) + I degree + 0x0033cd88, // n0x014c c0x0000 (---------------) + I delivery + 0x0025a384, // n0x014d c0x0000 (---------------) + I dell + 0x00345f48, // n0x014e c0x0000 (---------------) + I deloitte + 0x00311f45, // n0x014f c0x0000 (---------------) + I delta + 0x002265c8, // n0x0150 c0x0000 (---------------) + I democrat + 0x002abe06, // n0x0151 c0x0000 (---------------) + I dental + 0x002b2407, // n0x0152 c0x0000 (---------------) + I dentist + 0x0022dcc4, // n0x0153 c0x0000 (---------------) + I desi + 0x0022dcc6, // n0x0154 c0x0000 (---------------) + I design + 0x002329c3, // n0x0155 c0x0000 (---------------) + I dev + 0x0037a7c3, // n0x0156 c0x0000 (---------------) + I dhl + 0x002c42c8, // n0x0157 c0x0000 (---------------) + I diamonds + 0x0030ce84, // n0x0158 c0x0000 (---------------) + I diet + 0x00308ec7, // n0x0159 c0x0000 (---------------) + I digital + 0x0024d786, // n0x015a c0x0000 (---------------) + I direct + 0x0024d789, // n0x015b c0x0000 (---------------) + I directory + 0x0031aa48, // n0x015c c0x0000 (---------------) + I discount + 0x00330408, // n0x015d c0x0000 (---------------) + I discover + 0x00229304, // n0x015e c0x0000 (---------------) + I dish + 0x00320843, // n0x015f c0x0000 (---------------) + I diy + 0x00266a02, // n0x0160 c0x0000 (---------------) + I dj + 0x14a494c2, // n0x0161 c0x0052 (n0x092a-n0x092b) + I dk + 0x14e0fa82, // n0x0162 c0x0053 (n0x092b-n0x0930) + I dm + 0x00321fc3, // n0x0163 c0x0000 (---------------) + I dnp + 0x15213282, // n0x0164 c0x0054 (n0x0930-n0x093a) + I do + 0x002ee344, // n0x0165 c0x0000 (---------------) + I docs + 0x00213285, // n0x0166 c0x0000 (---------------) + I dodge + 0x002459c3, // n0x0167 c0x0000 (---------------) + I dog + 0x00236044, // n0x0168 c0x0000 (---------------) + I doha + 0x00339ec7, // n0x0169 c0x0000 (---------------) + I domains + 0x0032f743, // n0x016a c0x0000 (---------------) + I dot + 0x00269fc8, // n0x016b c0x0000 (---------------) + I download + 0x00354745, // n0x016c c0x0000 (---------------) + I drive + 0x00356c44, // n0x016d c0x0000 (---------------) + I dstv + 0x00364483, // n0x016e c0x0000 (---------------) + I dtv + 0x0027c785, // n0x016f c0x0000 (---------------) + I dubai + 0x0027c8c4, // n0x0170 c0x0000 (---------------) + I duck + 0x00390846, // n0x0171 c0x0000 (---------------) + I dunlop + 0x003a6804, // n0x0172 c0x0000 (---------------) + I duns + 0x002008c6, // n0x0173 c0x0000 (---------------) + I dupont + 0x00205146, // n0x0174 c0x0000 (---------------) + I durban + 0x00317284, // n0x0175 c0x0000 (---------------) + I dvag + 0x00212b03, // n0x0176 c0x0000 (---------------) + I dwg + 0x15607a82, // n0x0177 c0x0055 (n0x093a-n0x0942) + I dz + 0x0034ab05, // n0x0178 c0x0000 (---------------) + I earth + 0x0022b543, // n0x0179 c0x0000 (---------------) + I eat + 0x15a09b02, // n0x017a c0x0056 (n0x0942-n0x094e) + I ec + 0x002b94c5, // n0x017b c0x0000 (---------------) + I edeka + 0x0023a783, // n0x017c c0x0000 (---------------) + I edu + 0x0023a789, // n0x017d c0x0000 (---------------) + I education + 0x15e0b342, // n0x017e c0x0057 (n0x094e-n0x0958) + I ee + 0x16608442, // n0x017f c0x0059 (n0x0959-n0x0962) + I eg + 0x002ce585, // n0x0180 c0x0000 (---------------) + I email + 0x002c4646, // n0x0181 c0x0000 (---------------) + I emerck + 0x00356047, // n0x0182 c0x0000 (---------------) + I emerson + 0x002cb4c6, // n0x0183 c0x0000 (---------------) + I energy + 0x00369148, // n0x0184 c0x0000 (---------------) + I engineer + 0x0036914b, // n0x0185 c0x0000 (---------------) + I engineering + 0x002044cb, // n0x0186 c0x0000 (---------------) + I enterprises + 0x0039f945, // n0x0187 c0x0000 (---------------) + I epost + 0x0039ffc5, // n0x0188 c0x0000 (---------------) + I epson + 0x002c2709, // n0x0189 c0x0000 (---------------) + I equipment + 0x01603682, // n0x018a c0x0005 (---------------)* o I er + 0x00317448, // n0x018b c0x0000 (---------------) + I ericsson + 0x0020cb04, // n0x018c c0x0000 (---------------) + I erni + 0x16e00482, // n0x018d c0x005b (n0x0963-n0x0968) + I es + 0x0027a303, // n0x018e c0x0000 (---------------) + I esq + 0x002c2486, // n0x018f c0x0000 (---------------) + I estate + 0x0028d2c8, // n0x0190 c0x0000 (---------------) + I esurance + 0x176053c2, // n0x0191 c0x005d (n0x0969-n0x0971) + I et + 0x002234c8, // n0x0192 c0x0000 (---------------) + I etisalat + 0x00204b82, // n0x0193 c0x0000 (---------------) + I eu + 0x0033c78a, // n0x0194 c0x0000 (---------------) + I eurovision + 0x00228883, // n0x0195 c0x0000 (---------------) + I eus + 0x00232a06, // n0x0196 c0x0000 (---------------) + I events + 0x00203608, // n0x0197 c0x0000 (---------------) + I everbank + 0x00239488, // n0x0198 c0x0000 (---------------) + I exchange + 0x0031ee06, // n0x0199 c0x0000 (---------------) + I expert + 0x00363107, // n0x019a c0x0000 (---------------) + I exposed + 0x00247487, // n0x019b c0x0000 (---------------) + I express + 0x0020ba0a, // n0x019c c0x0000 (---------------) + I extraspace + 0x00290c04, // n0x019d c0x0000 (---------------) + I fage + 0x00214244, // n0x019e c0x0000 (---------------) + I fail + 0x0033b609, // n0x019f c0x0000 (---------------) + I fairwinds + 0x00350405, // n0x01a0 c0x0000 (---------------) + I faith + 0x00208f86, // n0x01a1 c0x0000 (---------------) + I family + 0x00211d03, // n0x01a2 c0x0000 (---------------) + I fan + 0x002e0ec4, // n0x01a3 c0x0000 (---------------) + I fans + 0x00271d04, // n0x01a4 c0x0000 (---------------) + I farm + 0x002ece87, // n0x01a5 c0x0000 (---------------) + I farmers + 0x0022fec7, // n0x01a6 c0x0000 (---------------) + I fashion + 0x0024c204, // n0x01a7 c0x0000 (---------------) + I fast + 0x00211505, // n0x01a8 c0x0000 (---------------) + I fedex + 0x0020b308, // n0x01a9 c0x0000 (---------------) + I feedback + 0x00337007, // n0x01aa c0x0000 (---------------) + I ferrari + 0x00343407, // n0x01ab c0x0000 (---------------) + I ferrero + 0x17a07502, // n0x01ac c0x005e (n0x0971-n0x0974) + I fi + 0x00294744, // n0x01ad c0x0000 (---------------) + I fiat + 0x0035ee48, // n0x01ae c0x0000 (---------------) + I fidelity + 0x00365b84, // n0x01af c0x0000 (---------------) + I fido + 0x0024b784, // n0x01b0 c0x0000 (---------------) + I film + 0x0024bb85, // n0x01b1 c0x0000 (---------------) + I final + 0x0024bcc7, // n0x01b2 c0x0000 (---------------) + I finance + 0x00207509, // n0x01b3 c0x0000 (---------------) + I financial + 0x0024c6c4, // n0x01b4 c0x0000 (---------------) + I fire + 0x0024d4c9, // n0x01b5 c0x0000 (---------------) + I firestone + 0x0024d9c8, // n0x01b6 c0x0000 (---------------) + I firmdale + 0x0024e044, // n0x01b7 c0x0000 (---------------) + I fish + 0x0024e047, // n0x01b8 c0x0000 (---------------) + I fishing + 0x0024f083, // n0x01b9 c0x0000 (---------------) + I fit + 0x0024f807, // n0x01ba c0x0000 (---------------) + I fitness + 0x01615b02, // n0x01bb c0x0005 (---------------)* o I fj + 0x01799fc2, // n0x01bc c0x0005 (---------------)* o I fk + 0x00250686, // n0x01bd c0x0000 (---------------) + I flickr + 0x00251287, // n0x01be c0x0000 (---------------) + I flights + 0x00251c04, // n0x01bf c0x0000 (---------------) + I flir + 0x00252b07, // n0x01c0 c0x0000 (---------------) + I florist + 0x002539c7, // n0x01c1 c0x0000 (---------------) + I flowers + 0x00253f08, // n0x01c2 c0x0000 (---------------) + I flsmidth + 0x002549c3, // n0x01c3 c0x0000 (---------------) + I fly + 0x00242902, // n0x01c4 c0x0000 (---------------) + I fm + 0x002558c2, // n0x01c5 c0x0000 (---------------) + I fo + 0x00255a83, // n0x01c6 c0x0000 (---------------) + I foo + 0x00255a8b, // n0x01c7 c0x0000 (---------------) + I foodnetwork + 0x00306e88, // n0x01c8 c0x0000 (---------------) + I football + 0x0039bf44, // n0x01c9 c0x0000 (---------------) + I ford + 0x00257585, // n0x01ca c0x0000 (---------------) + I forex + 0x00259787, // n0x01cb c0x0000 (---------------) + I forsale + 0x0025b085, // n0x01cc c0x0000 (---------------) + I forum + 0x002b9f4a, // n0x01cd c0x0000 (---------------) + I foundation + 0x0025c143, // n0x01ce c0x0000 (---------------) + I fox + 0x17e00582, // n0x01cf c0x005f (n0x0974-n0x098c) + I fr + 0x002e7d04, // n0x01d0 c0x0000 (---------------) + I free + 0x0025f7c9, // n0x01d1 c0x0000 (---------------) + I fresenius + 0x00263603, // n0x01d2 c0x0000 (---------------) + I frl + 0x002636c7, // n0x01d3 c0x0000 (---------------) + I frogans + 0x0039d609, // n0x01d4 c0x0000 (---------------) + I frontdoor + 0x003980c8, // n0x01d5 c0x0000 (---------------) + I frontier + 0x00204a83, // n0x01d6 c0x0000 (---------------) + I ftr + 0x0027b8c7, // n0x01d7 c0x0000 (---------------) + I fujitsu + 0x0027bdc9, // n0x01d8 c0x0000 (---------------) + I fujixerox + 0x002312c3, // n0x01d9 c0x0000 (---------------) + I fun + 0x00283c84, // n0x01da c0x0000 (---------------) + I fund + 0x00285349, // n0x01db c0x0000 (---------------) + I furniture + 0x00287806, // n0x01dc c0x0000 (---------------) + I futbol + 0x002888c3, // n0x01dd c0x0000 (---------------) + I fyi + 0x00201042, // n0x01de c0x0000 (---------------) + I ga + 0x0021dd43, // n0x01df c0x0000 (---------------) + I gal + 0x00392147, // n0x01e0 c0x0000 (---------------) + I gallery + 0x00336385, // n0x01e1 c0x0000 (---------------) + I gallo + 0x002dd686, // n0x01e2 c0x0000 (---------------) + I gallup + 0x00297cc4, // n0x01e3 c0x0000 (---------------) + I game + 0x003700c5, // n0x01e4 c0x0000 (---------------) + I games + 0x0022cdc3, // n0x01e5 c0x0000 (---------------) + I gap + 0x002190c6, // n0x01e6 c0x0000 (---------------) + I garden + 0x0020d202, // n0x01e7 c0x0000 (---------------) + I gb + 0x00387944, // n0x01e8 c0x0000 (---------------) + I gbiz + 0x00222d42, // n0x01e9 c0x0000 (---------------) + I gd + 0x002fb203, // n0x01ea c0x0000 (---------------) + I gdn + 0x182026c2, // n0x01eb c0x0060 (n0x098c-n0x0993) + I ge + 0x002534c3, // n0x01ec c0x0000 (---------------) + I gea + 0x00218144, // n0x01ed c0x0000 (---------------) + I gent + 0x00218147, // n0x01ee c0x0000 (---------------) + I genting + 0x00324b46, // n0x01ef c0x0000 (---------------) + I george + 0x00269a82, // n0x01f0 c0x0000 (---------------) + I gf + 0x18654282, // n0x01f1 c0x0061 (n0x0993-n0x0996) + I gg + 0x00330644, // n0x01f2 c0x0000 (---------------) + I ggee + 0x18a41602, // n0x01f3 c0x0062 (n0x0996-n0x099b) + I gh + 0x18e134c2, // n0x01f4 c0x0063 (n0x099b-n0x09a1) + I gi + 0x00344f44, // n0x01f5 c0x0000 (---------------) + I gift + 0x00344f45, // n0x01f6 c0x0000 (---------------) + I gifts + 0x00269485, // n0x01f7 c0x0000 (---------------) + I gives + 0x003481c6, // n0x01f8 c0x0000 (---------------) + I giving + 0x1920ce42, // n0x01f9 c0x0064 (n0x09a1-n0x09a6) + I gl + 0x00345e85, // n0x01fa c0x0000 (---------------) + I glade + 0x0039ef45, // n0x01fb c0x0000 (---------------) + I glass + 0x00285d83, // n0x01fc c0x0000 (---------------) + I gle + 0x0020d846, // n0x01fd c0x0000 (---------------) + I global + 0x0020e345, // n0x01fe c0x0000 (---------------) + I globo + 0x00215582, // n0x01ff c0x0000 (---------------) + I gm + 0x00335e45, // n0x0200 c0x0000 (---------------) + I gmail + 0x00215584, // n0x0201 c0x0000 (---------------) + I gmbh + 0x002182c3, // n0x0202 c0x0000 (---------------) + I gmo + 0x0021bac3, // n0x0203 c0x0000 (---------------) + I gmx + 0x19608342, // n0x0204 c0x0065 (n0x09a6-n0x09ac) + I gn + 0x002e9bc7, // n0x0205 c0x0000 (---------------) + I godaddy + 0x0024dbc4, // n0x0206 c0x0000 (---------------) + I gold + 0x0024dbc9, // n0x0207 c0x0000 (---------------) + I goldpoint + 0x0024e1c4, // n0x0208 c0x0000 (---------------) + I golf + 0x00299dc3, // n0x0209 c0x0000 (---------------) + I goo + 0x0031bac9, // n0x020a c0x0000 (---------------) + I goodhands + 0x0034a9c8, // n0x020b c0x0000 (---------------) + I goodyear + 0x0029bb84, // n0x020c c0x0000 (---------------) + I goog + 0x0029bb86, // n0x020d c0x0000 (---------------) + I google + 0x002a36c3, // n0x020e c0x0000 (---------------) + I gop + 0x00211ec3, // n0x020f c0x0000 (---------------) + I got + 0x002dc504, // n0x0210 c0x0000 (---------------) + I gotv + 0x0026cc83, // n0x0211 c0x0000 (---------------) + I gov + 0x19adad02, // n0x0212 c0x0066 (n0x09ac-n0x09b2) + I gp + 0x003010c2, // n0x0213 c0x0000 (---------------) + I gq + 0x19e00c82, // n0x0214 c0x0067 (n0x09b2-n0x09b8) + I gr + 0x00315908, // n0x0215 c0x0000 (---------------) + I grainger + 0x003216c8, // n0x0216 c0x0000 (---------------) + I graphics + 0x0038b986, // n0x0217 c0x0000 (---------------) + I gratis + 0x002503c5, // n0x0218 c0x0000 (---------------) + I green + 0x0022fd05, // n0x0219 c0x0000 (---------------) + I gripe + 0x0020ab45, // n0x021a c0x0000 (---------------) + I group + 0x0023a242, // n0x021b c0x0000 (---------------) + I gs + 0x1a23f142, // n0x021c c0x0068 (n0x09b8-n0x09bf) + I gt + 0x0160dd42, // n0x021d c0x0005 (---------------)* o I gu + 0x00350588, // n0x021e c0x0000 (---------------) + I guardian + 0x0023fd45, // n0x021f c0x0000 (---------------) + I gucci + 0x002e05c4, // n0x0220 c0x0000 (---------------) + I guge + 0x00232905, // n0x0221 c0x0000 (---------------) + I guide + 0x0023cc87, // n0x0222 c0x0000 (---------------) + I guitars + 0x002594c4, // n0x0223 c0x0000 (---------------) + I guru + 0x002167c2, // n0x0224 c0x0000 (---------------) + I gw + 0x1a602302, // n0x0225 c0x0069 (n0x09bf-n0x09c5) + I gy + 0x0030c744, // n0x0226 c0x0000 (---------------) + I hair + 0x0020ccc7, // n0x0227 c0x0000 (---------------) + I hamburg + 0x00394c47, // n0x0228 c0x0000 (---------------) + I hangout + 0x0035fd04, // n0x0229 c0x0000 (---------------) + I haus + 0x00290b43, // n0x022a c0x0000 (---------------) + I hbo + 0x0024b1c4, // n0x022b c0x0000 (---------------) + I hdfc + 0x0024b1c8, // n0x022c c0x0000 (---------------) + I hdfcbank + 0x0036b386, // n0x022d c0x0000 (---------------) + I health + 0x0036b38a, // n0x022e c0x0000 (---------------) + I healthcare + 0x002073c4, // n0x022f c0x0000 (---------------) + I help + 0x0020ebc8, // n0x0230 c0x0000 (---------------) + I helsinki + 0x00254184, // n0x0231 c0x0000 (---------------) + I here + 0x00222106, // n0x0232 c0x0000 (---------------) + I hermes + 0x00292304, // n0x0233 c0x0000 (---------------) + I hgtv + 0x00358b86, // n0x0234 c0x0000 (---------------) + I hiphop + 0x002ebcc9, // n0x0235 c0x0000 (---------------) + I hisamitsu + 0x002a27c7, // n0x0236 c0x0000 (---------------) + I hitachi + 0x0027d3c3, // n0x0237 c0x0000 (---------------) + I hiv + 0x1aa0a882, // n0x0238 c0x006a (n0x09c5-n0x09dd) + I hk + 0x0026da43, // n0x0239 c0x0000 (---------------) + I hkt + 0x0020e942, // n0x023a c0x0000 (---------------) + I hm + 0x1ae1ab42, // n0x023b c0x006b (n0x09dd-n0x09e3) + I hn + 0x002df846, // n0x023c c0x0000 (---------------) + I hockey + 0x0035e208, // n0x023d c0x0000 (---------------) + I holdings + 0x002a5007, // n0x023e c0x0000 (---------------) + I holiday + 0x00274049, // n0x023f c0x0000 (---------------) + I homedepot + 0x00299cc9, // n0x0240 c0x0000 (---------------) + I homegoods + 0x002a60c5, // n0x0241 c0x0000 (---------------) + I homes + 0x002a60c9, // n0x0242 c0x0000 (---------------) + I homesense + 0x002a7985, // n0x0243 c0x0000 (---------------) + I honda + 0x002a8409, // n0x0244 c0x0000 (---------------) + I honeywell + 0x002a91c5, // n0x0245 c0x0000 (---------------) + I horse + 0x00297e04, // n0x0246 c0x0000 (---------------) + I host + 0x00297e07, // n0x0247 c0x0000 (---------------) + I hosting + 0x00234dc3, // n0x0248 c0x0000 (---------------) + I hot + 0x002a98c7, // n0x0249 c0x0000 (---------------) + I hoteles + 0x002a9fc7, // n0x024a c0x0000 (---------------) + I hotmail + 0x002a2105, // n0x024b c0x0000 (---------------) + I house + 0x002a1343, // n0x024c c0x0000 (---------------) + I how + 0x1b20e4c2, // n0x024d c0x006c (n0x09e3-n0x09e8) + I hr + 0x00389404, // n0x024e c0x0000 (---------------) + I hsbc + 0x1b62a682, // n0x024f c0x006d (n0x09e8-n0x09f9) + I ht + 0x0025d0c3, // n0x0250 c0x0000 (---------------) + I htc + 0x1ba195c2, // n0x0251 c0x006e (n0x09f9-n0x0a19) + I hu + 0x002f94c6, // n0x0252 c0x0000 (---------------) + I hughes + 0x0031fe45, // n0x0253 c0x0000 (---------------) + I hyatt + 0x002ac1c7, // n0x0254 c0x0000 (---------------) + I hyundai + 0x00321f03, // n0x0255 c0x0000 (---------------) + I ibm + 0x00239ac4, // n0x0256 c0x0000 (---------------) + I icbc + 0x00206903, // n0x0257 c0x0000 (---------------) + I ice + 0x00209d43, // n0x0258 c0x0000 (---------------) + I icu + 0x1be0c782, // n0x0259 c0x006f (n0x0a19-n0x0a24) + I id + 0x1c600042, // n0x025a c0x0071 (n0x0a25-n0x0a27) + I ie + 0x00365804, // n0x025b c0x0000 (---------------) + I ieee + 0x002428c3, // n0x025c c0x0000 (---------------) + I ifm + 0x00322905, // n0x025d c0x0000 (---------------) + I iinet + 0x00322745, // n0x025e c0x0000 (---------------) + I ikano + 0x1ca02902, // n0x025f c0x0072 (n0x0a27-n0x0a2f) + I il + 0x1d205c42, // n0x0260 c0x0074 (n0x0a30-n0x0a37) + I im + 0x00256dc6, // n0x0261 c0x0000 (---------------) + I imamat + 0x0025ee44, // n0x0262 c0x0000 (---------------) + I imdb + 0x00207084, // n0x0263 c0x0000 (---------------) + I immo + 0x0020708a, // n0x0264 c0x0000 (---------------) + I immobilien + 0x1da013c2, // n0x0265 c0x0076 (n0x0a39-n0x0a46) + I in + 0x0036728a, // n0x0266 c0x0000 (---------------) + I industries + 0x003a1088, // n0x0267 c0x0000 (---------------) + I infiniti + 0x1dfa1244, // n0x0268 c0x0077 (n0x0a46-n0x0a50) + I info + 0x0020e2c3, // n0x0269 c0x0000 (---------------) + I ing + 0x0020ecc3, // n0x026a c0x0000 (---------------) + I ink + 0x0030ea89, // n0x026b c0x0000 (---------------) + I institute + 0x0023e7c9, // n0x026c c0x0000 (---------------) + I insurance + 0x00339fc6, // n0x026d c0x0000 (---------------) + I insure + 0x1e201603, // n0x026e c0x0078 (n0x0a50-n0x0a51) + I int + 0x0024dd45, // n0x026f c0x0000 (---------------) + I intel + 0x0031940d, // n0x0270 c0x0000 (---------------) + I international + 0x002f8946, // n0x0271 c0x0000 (---------------) + I intuit + 0x00203d0b, // n0x0272 c0x0000 (---------------) + I investments + 0x1e600ac2, // n0x0273 c0x0079 (n0x0a51-n0x0a57) + I io + 0x0026bb88, // n0x0274 c0x0000 (---------------) + I ipiranga + 0x1ea00dc2, // n0x0275 c0x007a (n0x0a57-n0x0a5d) + I iq + 0x1ee04302, // n0x0276 c0x007b (n0x0a5d-n0x0a66) + I ir + 0x002a5605, // n0x0277 c0x0000 (---------------) + I irish + 0x1f2006c2, // n0x0278 c0x007c (n0x0a66-n0x0a6e) + I is + 0x0025b307, // n0x0279 c0x0000 (---------------) + I iselect + 0x0033c007, // n0x027a c0x0000 (---------------) + I ismaili + 0x00215003, // n0x027b c0x0000 (---------------) + I ist + 0x00215008, // n0x027c c0x0000 (---------------) + I istanbul + 0x1f601e42, // n0x027d c0x007d (n0x0a6e-n0x0bdf) + I it + 0x002804c4, // n0x027e c0x0000 (---------------) + I itau + 0x00360743, // n0x027f c0x0000 (---------------) + I itv + 0x00323145, // n0x0280 c0x0000 (---------------) + I iveco + 0x0036ab83, // n0x0281 c0x0000 (---------------) + I iwc + 0x0031f906, // n0x0282 c0x0000 (---------------) + I jaguar + 0x00323d44, // n0x0283 c0x0000 (---------------) + I java + 0x00247743, // n0x0284 c0x0000 (---------------) + I jcb + 0x0026e183, // n0x0285 c0x0000 (---------------) + I jcp + 0x1fa06f02, // n0x0286 c0x007e (n0x0bdf-n0x0be2) + I je + 0x003299c4, // n0x0287 c0x0000 (---------------) + I jeep + 0x0034ea85, // n0x0288 c0x0000 (---------------) + I jetzt + 0x00360f47, // n0x0289 c0x0000 (---------------) + I jewelry + 0x00278d43, // n0x028a c0x0000 (---------------) + I jio + 0x002ac643, // n0x028b c0x0000 (---------------) + I jlc + 0x002ad103, // n0x028c c0x0000 (---------------) + I jll + 0x016308c2, // n0x028d c0x0005 (---------------)* o I jm + 0x002ad1c3, // n0x028e c0x0000 (---------------) + I jmp + 0x002ad803, // n0x028f c0x0000 (---------------) + I jnj + 0x1fe04042, // n0x0290 c0x007f (n0x0be2-n0x0bea) + I jo + 0x002ddc44, // n0x0291 c0x0000 (---------------) + I jobs + 0x0027cb06, // n0x0292 c0x0000 (---------------) + I joburg + 0x00204043, // n0x0293 c0x0000 (---------------) + I jot + 0x002adb83, // n0x0294 c0x0000 (---------------) + I joy + 0x202ae3c2, // n0x0295 c0x0080 (n0x0bea-n0x0c59) + I jp + 0x002ae3c8, // n0x0296 c0x0000 (---------------) + I jpmorgan + 0x002aefc4, // n0x0297 c0x0000 (---------------) + I jprs + 0x002d7906, // n0x0298 c0x0000 (---------------) + I juegos + 0x002af287, // n0x0299 c0x0000 (---------------) + I juniper + 0x00227e46, // n0x029a c0x0000 (---------------) + I kaufen + 0x00238144, // n0x029b c0x0000 (---------------) + I kddi + 0x2de025c2, // n0x029c c0x00b7 (n0x12ed-n0x12ee)* o I ke + 0x00234c8b, // n0x029d c0x0000 (---------------) + I kerryhotels + 0x002e2c0e, // n0x029e c0x0000 (---------------) + I kerrylogistics + 0x00220d0f, // n0x029f c0x0000 (---------------) + I kerryproperties + 0x0023f303, // n0x02a0 c0x0000 (---------------) + I kfh + 0x2e6b5502, // n0x02a1 c0x00b9 (n0x12ef-n0x12f5) + I kg + 0x0161acc2, // n0x02a2 c0x0005 (---------------)* o I kh + 0x2ea01e02, // n0x02a3 c0x00ba (n0x12f5-n0x12fc) + I ki + 0x00226f83, // n0x02a4 c0x0000 (---------------) + I kia + 0x002303c3, // n0x02a5 c0x0000 (---------------) + I kim + 0x002e7706, // n0x02a6 c0x0000 (---------------) + I kinder + 0x0037c506, // n0x02a7 c0x0000 (---------------) + I kindle + 0x003703c7, // n0x02a8 c0x0000 (---------------) + I kitchen + 0x002eed84, // n0x02a9 c0x0000 (---------------) + I kiwi + 0x2ee316c2, // n0x02aa c0x00bb (n0x12fc-n0x130d) + I km + 0x2f269c82, // n0x02ab c0x00bc (n0x130d-n0x1311) + I kn + 0x0036bd45, // n0x02ac c0x0000 (---------------) + I koeln + 0x002aa707, // n0x02ad c0x0000 (---------------) + I komatsu + 0x0035cbc6, // n0x02ae c0x0000 (---------------) + I kosher + 0x2f60d782, // n0x02af c0x00bd (n0x1311-n0x1317) + I kp + 0x0020d784, // n0x02b0 c0x0000 (---------------) + I kpmg + 0x0036a3c3, // n0x02b1 c0x0000 (---------------) + I kpn + 0x2fa06fc2, // n0x02b2 c0x00be (n0x1317-n0x1335) + I kr + 0x0034df03, // n0x02b3 c0x0000 (---------------) + I krd + 0x003a2b04, // n0x02b4 c0x0000 (---------------) + I kred + 0x002b5449, // n0x02b5 c0x0000 (---------------) + I kuokgroup + 0x016bd182, // n0x02b6 c0x0005 (---------------)* o I kw + 0x2fe36902, // n0x02b7 c0x00bf (n0x1335-n0x133a) + I ky + 0x00269c06, // n0x02b8 c0x0000 (---------------) + I kyknet + 0x002be0c5, // n0x02b9 c0x0000 (---------------) + I kyoto + 0x30392a42, // n0x02ba c0x00c0 (n0x133a-n0x1340) + I kz + 0x30600802, // n0x02bb c0x00c1 (n0x1340-n0x1349) + I la + 0x0033aa87, // n0x02bc c0x0000 (---------------) + I lacaixa + 0x00293449, // n0x02bd c0x0000 (---------------) + I ladbrokes + 0x00352d0b, // n0x02be c0x0000 (---------------) + I lamborghini + 0x00247245, // n0x02bf c0x0000 (---------------) + I lamer + 0x0036c449, // n0x02c0 c0x0000 (---------------) + I lancaster + 0x002c0706, // n0x02c1 c0x0000 (---------------) + I lancia + 0x00259007, // n0x02c2 c0x0000 (---------------) + I lancome + 0x00200804, // n0x02c3 c0x0000 (---------------) + I land + 0x0025e089, // n0x02c4 c0x0000 (---------------) + I landrover + 0x0035a387, // n0x02c5 c0x0000 (---------------) + I lanxess + 0x00279f47, // n0x02c6 c0x0000 (---------------) + I lasalle + 0x00223603, // n0x02c7 c0x0000 (---------------) + I lat + 0x0025ef86, // n0x02c8 c0x0000 (---------------) + I latino + 0x002cca47, // n0x02c9 c0x0000 (---------------) + I latrobe + 0x00274483, // n0x02ca c0x0000 (---------------) + I law + 0x00274486, // n0x02cb c0x0000 (---------------) + I lawyer + 0x30a02942, // n0x02cc c0x00c2 (n0x1349-n0x134e) + I lb + 0x30e3aa02, // n0x02cd c0x00c3 (n0x134e-n0x1354) + I lc + 0x00226843, // n0x02ce c0x0000 (---------------) + I lds + 0x0027a085, // n0x02cf c0x0000 (---------------) + I lease + 0x0022c487, // n0x02d0 c0x0000 (---------------) + I leclerc + 0x0037c006, // n0x02d1 c0x0000 (---------------) + I lefrak + 0x00336305, // n0x02d2 c0x0000 (---------------) + I legal + 0x0024db44, // n0x02d3 c0x0000 (---------------) + I lego + 0x00241385, // n0x02d4 c0x0000 (---------------) + I lexus + 0x002e65c4, // n0x02d5 c0x0000 (---------------) + I lgbt + 0x31207202, // n0x02d6 c0x00c4 (n0x1354-n0x1355) + I li + 0x00308447, // n0x02d7 c0x0000 (---------------) + I liaison + 0x002bb904, // n0x02d8 c0x0000 (---------------) + I lidl + 0x0023e6c4, // n0x02d9 c0x0000 (---------------) + I life + 0x0023e6cd, // n0x02da c0x0000 (---------------) + I lifeinsurance + 0x00253cc9, // n0x02db c0x0000 (---------------) + I lifestyle + 0x00312248, // n0x02dc c0x0000 (---------------) + I lighting + 0x00258c44, // n0x02dd c0x0000 (---------------) + I like + 0x00249785, // n0x02de c0x0000 (---------------) + I lilly + 0x0025d747, // n0x02df c0x0000 (---------------) + I limited + 0x0025db44, // n0x02e0 c0x0000 (---------------) + I limo + 0x0022e787, // n0x02e1 c0x0000 (---------------) + I lincoln + 0x00345ac5, // n0x02e2 c0x0000 (---------------) + I linde + 0x00398ec4, // n0x02e3 c0x0000 (---------------) + I link + 0x002d3a85, // n0x02e4 c0x0000 (---------------) + I lipsy + 0x002622c4, // n0x02e5 c0x0000 (---------------) + I live + 0x002414c6, // n0x02e6 c0x0000 (---------------) + I living + 0x0025da45, // n0x02e7 c0x0000 (---------------) + I lixil + 0x3160d742, // n0x02e8 c0x00c5 (n0x1355-n0x1364) + I lk + 0x00210b84, // n0x02e9 c0x0000 (---------------) + I loan + 0x00210b85, // n0x02ea c0x0000 (---------------) + I loans + 0x00376f06, // n0x02eb c0x0000 (---------------) + I locker + 0x00336445, // n0x02ec c0x0000 (---------------) + I locus + 0x002ccfc4, // n0x02ed c0x0000 (---------------) + I loft + 0x002c21c3, // n0x02ee c0x0000 (---------------) + I lol + 0x00321906, // n0x02ef c0x0000 (---------------) + I london + 0x0021b685, // n0x02f0 c0x0000 (---------------) + I lotte + 0x00222845, // n0x02f1 c0x0000 (---------------) + I lotto + 0x00230204, // n0x02f2 c0x0000 (---------------) + I love + 0x00207443, // n0x02f3 c0x0000 (---------------) + I lpl + 0x0020744c, // n0x02f4 c0x0000 (---------------) + I lplfinancial + 0x31a88142, // n0x02f5 c0x00c6 (n0x1364-n0x1369) + I lr + 0x31e04e42, // n0x02f6 c0x00c7 (n0x1369-n0x136b) + I ls + 0x32209e02, // n0x02f7 c0x00c8 (n0x136b-n0x136d) + I lt + 0x00322cc3, // n0x02f8 c0x0000 (---------------) + I ltd + 0x00322cc4, // n0x02f9 c0x0000 (---------------) + I ltda + 0x32602f42, // n0x02fa c0x00c9 (n0x136d-n0x136e) + I lu + 0x002fb348, // n0x02fb c0x0000 (---------------) + I lundbeck + 0x002dd745, // n0x02fc c0x0000 (---------------) + I lupin + 0x0023ca44, // n0x02fd c0x0000 (---------------) + I luxe + 0x0023d206, // n0x02fe c0x0000 (---------------) + I luxury + 0x32a05d02, // n0x02ff c0x00ca (n0x136e-n0x1377) + I lv + 0x32e09082, // n0x0300 c0x00cb (n0x1377-n0x1380) + I ly + 0x33200182, // n0x0301 c0x00cc (n0x1380-n0x1386) + I ma + 0x00375105, // n0x0302 c0x0000 (---------------) + I macys + 0x00317146, // n0x0303 c0x0000 (---------------) + I madrid + 0x00271c44, // n0x0304 c0x0000 (---------------) + I maif + 0x0022bdc6, // n0x0305 c0x0000 (---------------) + I maison + 0x00248d06, // n0x0306 c0x0000 (---------------) + I makeup + 0x002018c3, // n0x0307 c0x0000 (---------------) + I man + 0x0036f20a, // n0x0308 c0x0000 (---------------) + I management + 0x00242c05, // n0x0309 c0x0000 (---------------) + I mango + 0x002f1386, // n0x030a c0x0000 (---------------) + I market + 0x002f1389, // n0x030b c0x0000 (---------------) + I marketing + 0x00331387, // n0x030c c0x0000 (---------------) + I markets + 0x00366448, // n0x030d c0x0000 (---------------) + I marriott + 0x0020f009, // n0x030e c0x0000 (---------------) + I marshalls + 0x002be9c8, // n0x030f c0x0000 (---------------) + I maserati + 0x0022f706, // n0x0310 c0x0000 (---------------) + I mattel + 0x00209c03, // n0x0311 c0x0000 (---------------) + I mba + 0x3362ac02, // n0x0312 c0x00cd (n0x1386-n0x1388) + I mc + 0x0037cec3, // n0x0313 c0x0000 (---------------) + I mcd + 0x0037cec9, // n0x0314 c0x0000 (---------------) + I mcdonalds + 0x00327b88, // n0x0315 c0x0000 (---------------) + I mckinsey + 0x33a4da82, // n0x0316 c0x00ce (n0x1388-n0x1389) + I md + 0x33e03e82, // n0x0317 c0x00cf (n0x1389-n0x1396) + I me + 0x00213ac3, // n0x0318 c0x0000 (---------------) + I med + 0x003025c5, // n0x0319 c0x0000 (---------------) + I media + 0x0026ad84, // n0x031a c0x0000 (---------------) + I meet + 0x002e1809, // n0x031b c0x0000 (---------------) + I melbourne + 0x002c4604, // n0x031c c0x0000 (---------------) + I meme + 0x0026cf88, // n0x031d c0x0000 (---------------) + I memorial + 0x00203e83, // n0x031e c0x0000 (---------------) + I men + 0x002ede44, // n0x031f c0x0000 (---------------) + I menu + 0x0022adc3, // n0x0320 c0x0000 (---------------) + I meo + 0x0023e607, // n0x0321 c0x0000 (---------------) + I metlife + 0x3420d802, // n0x0322 c0x00d0 (n0x1396-n0x139f) + I mg + 0x0025aa42, // n0x0323 c0x0000 (---------------) + I mh + 0x00231f45, // n0x0324 c0x0000 (---------------) + I miami + 0x0026b149, // n0x0325 c0x0000 (---------------) + I microsoft + 0x00209003, // n0x0326 c0x0000 (---------------) + I mil + 0x0027d144, // n0x0327 c0x0000 (---------------) + I mini + 0x003193c4, // n0x0328 c0x0000 (---------------) + I mint + 0x00229ac3, // n0x0329 c0x0000 (---------------) + I mit + 0x0027e0ca, // n0x032a c0x0000 (---------------) + I mitsubishi + 0x34767142, // n0x032b c0x00d1 (n0x139f-n0x13a7) + I mk + 0x34a10b42, // n0x032c c0x00d2 (n0x13a7-n0x13ae) + I ml + 0x002c1243, // n0x032d c0x0000 (---------------) + I mlb + 0x00369503, // n0x032e c0x0000 (---------------) + I mls + 0x016070c2, // n0x032f c0x0005 (---------------)* o I mm + 0x00375603, // n0x0330 c0x0000 (---------------) + I mma + 0x34e1fdc2, // n0x0331 c0x00d3 (n0x13ae-n0x13b2) + I mn + 0x0021fdc4, // n0x0332 c0x0000 (---------------) + I mnet + 0x35207102, // n0x0333 c0x00d4 (n0x13b2-n0x13b7) + I mo + 0x35607104, // n0x0334 c0x00d5 (n0x13b7-n0x13b8) + I mobi + 0x002e2606, // n0x0335 c0x0000 (---------------) + I mobily + 0x0026c084, // n0x0336 c0x0000 (---------------) + I moda + 0x002d7d03, // n0x0337 c0x0000 (---------------) + I moe + 0x00282043, // n0x0338 c0x0000 (---------------) + I moi + 0x002e3783, // n0x0339 c0x0000 (---------------) + I mom + 0x00244dc6, // n0x033a c0x0000 (---------------) + I monash + 0x002c6d85, // n0x033b c0x0000 (---------------) + I money + 0x002c1e07, // n0x033c c0x0000 (---------------) + I monster + 0x00258ec9, // n0x033d c0x0000 (---------------) + I montblanc + 0x002c5285, // n0x033e c0x0000 (---------------) + I mopar + 0x002c6cc6, // n0x033f c0x0000 (---------------) + I mormon + 0x002c72c8, // n0x0340 c0x0000 (---------------) + I mortgage + 0x002c74c6, // n0x0341 c0x0000 (---------------) + I moscow + 0x00278644, // n0x0342 c0x0000 (---------------) + I moto + 0x0029af0b, // n0x0343 c0x0000 (---------------) + I motorcycles + 0x002c9083, // n0x0344 c0x0000 (---------------) + I mov + 0x002c9085, // n0x0345 c0x0000 (---------------) + I movie + 0x002c91c8, // n0x0346 c0x0000 (---------------) + I movistar + 0x0022a482, // n0x0347 c0x0000 (---------------) + I mp + 0x0033ad82, // n0x0348 c0x0000 (---------------) + I mq + 0x35a4aa02, // n0x0349 c0x00d6 (n0x13b8-n0x13ba) + I mr + 0x35e0f702, // n0x034a c0x00d7 (n0x13ba-n0x13bf) + I ms + 0x0025d643, // n0x034b c0x0000 (---------------) + I msd + 0x36204c02, // n0x034c c0x00d8 (n0x13bf-n0x13c3) + I mt + 0x0026c8c3, // n0x034d c0x0000 (---------------) + I mtn + 0x002c94c4, // n0x034e c0x0000 (---------------) + I mtpc + 0x002c9d03, // n0x034f c0x0000 (---------------) + I mtr + 0x36a03ac2, // n0x0350 c0x00da (n0x13c4-n0x13cb) + I mu + 0x002cbb0b, // n0x0351 c0x0000 (---------------) + I multichoice + 0x36ed0106, // n0x0352 c0x00db (n0x13cb-n0x15ef) + I museum + 0x0023db46, // n0x0353 c0x0000 (---------------) + I mutual + 0x002d0748, // n0x0354 c0x0000 (---------------) + I mutuelle + 0x372b7382, // n0x0355 c0x00dc (n0x15ef-n0x15fd) + I mv + 0x3760fc82, // n0x0356 c0x00dd (n0x15fd-n0x1608) + I mw + 0x37a1bb02, // n0x0357 c0x00de (n0x1608-n0x160e) + I mx + 0x37e26f02, // n0x0358 c0x00df (n0x160e-n0x1616) + I my + 0x38214382, // n0x0359 c0x00e0 (n0x1616-n0x1617)* o I mz + 0x0021438b, // n0x035a c0x0000 (---------------) + I mzansimagic + 0x38601402, // n0x035b c0x00e1 (n0x1617-n0x1628) + I na + 0x00223703, // n0x035c c0x0000 (---------------) + I nab + 0x002393c5, // n0x035d c0x0000 (---------------) + I nadex + 0x0030f646, // n0x035e c0x0000 (---------------) + I nagoya + 0x38a05284, // n0x035f c0x00e2 (n0x1628-n0x162a) + I name + 0x0028cec7, // n0x0360 c0x0000 (---------------) + I naspers + 0x00238e4a, // n0x0361 c0x0000 (---------------) + I nationwide + 0x002ea486, // n0x0362 c0x0000 (---------------) + I natura + 0x0039fb84, // n0x0363 c0x0000 (---------------) + I navy + 0x0025d243, // n0x0364 c0x0000 (---------------) + I nba + 0x39600642, // n0x0365 c0x00e5 (n0x162c-n0x162d) + I nc + 0x00202c02, // n0x0366 c0x0000 (---------------) + I ne + 0x00249b43, // n0x0367 c0x0000 (---------------) + I nec + 0x39a1fe03, // n0x0368 c0x00e6 (n0x162d-n0x1663) + I net + 0x003928c7, // n0x0369 c0x0000 (---------------) + I netbank + 0x0025d947, // n0x036a c0x0000 (---------------) + I netflix + 0x00255b87, // n0x036b c0x0000 (---------------) + I network + 0x00228847, // n0x036c c0x0000 (---------------) + I neustar + 0x00221dc3, // n0x036d c0x0000 (---------------) + I new + 0x002f0d8a, // n0x036e c0x0000 (---------------) + I newholland + 0x00221dc4, // n0x036f c0x0000 (---------------) + I news + 0x0024d684, // n0x0370 c0x0000 (---------------) + I next + 0x0024d68a, // n0x0371 c0x0000 (---------------) + I nextdirect + 0x0026d605, // n0x0372 c0x0000 (---------------) + I nexus + 0x3ae00542, // n0x0373 c0x00eb (n0x166b-n0x1675) + I nf + 0x00251bc3, // n0x0374 c0x0000 (---------------) + I nfl + 0x3b202802, // n0x0375 c0x00ec (n0x1675-n0x167f) + I ng + 0x00202d03, // n0x0376 c0x0000 (---------------) + I ngo + 0x0026da03, // n0x0377 c0x0000 (---------------) + I nhk + 0x3ba03182, // n0x0378 c0x00ee (n0x1680-n0x168e) o I ni + 0x002a6b04, // n0x0379 c0x0000 (---------------) + I nico + 0x0021da84, // n0x037a c0x0000 (---------------) + I nike + 0x00206b05, // n0x037b c0x0000 (---------------) + I nikon + 0x002c8e05, // n0x037c c0x0000 (---------------) + I ninja + 0x0022e906, // n0x037d c0x0000 (---------------) + I nissan + 0x0022ec86, // n0x037e c0x0000 (---------------) + I nissay + 0x3be47802, // n0x037f c0x00ef (n0x168e-n0x1691) + I nl + 0x3c200c02, // n0x0380 c0x00f0 (n0x1691-n0x1967) + I no + 0x00318785, // n0x0381 c0x0000 (---------------) + I nokia + 0x0023d852, // n0x0382 c0x0000 (---------------) + I northwesternmutual + 0x00366106, // n0x0383 c0x0000 (---------------) + I norton + 0x00224d83, // n0x0384 c0x0000 (---------------) + I now + 0x0029cec6, // n0x0385 c0x0000 (---------------) + I nowruz + 0x00224d85, // n0x0386 c0x0000 (---------------) + I nowtv + 0x01610502, // n0x0387 c0x0005 (---------------)* o I np + 0x4460d382, // n0x0388 c0x0111 (n0x198f-n0x1996) + I nr + 0x002e23c3, // n0x0389 c0x0000 (---------------) + I nra + 0x002b5e83, // n0x038a c0x0000 (---------------) + I nrw + 0x00373b03, // n0x038b c0x0000 (---------------) + I ntt + 0x44a017c2, // n0x038c c0x0112 (n0x1996-n0x1999) + I nu + 0x0036ef83, // n0x038d c0x0000 (---------------) + I nyc + 0x44e094c2, // n0x038e c0x0113 (n0x1999-n0x19a9) + I nz + 0x00207143, // n0x038f c0x0000 (---------------) + I obi + 0x002ddc88, // n0x0390 c0x0000 (---------------) + I observer + 0x0020b283, // n0x0391 c0x0000 (---------------) + I off + 0x00221686, // n0x0392 c0x0000 (---------------) + I office + 0x00395b47, // n0x0393 c0x0000 (---------------) + I okinawa + 0x0020a9c6, // n0x0394 c0x0000 (---------------) + I olayan + 0x0020a9cb, // n0x0395 c0x0000 (---------------) + I olayangroup + 0x0039fac7, // n0x0396 c0x0000 (---------------) + I oldnavy + 0x00389204, // n0x0397 c0x0000 (---------------) + I ollo + 0x456014c2, // n0x0398 c0x0115 (n0x19aa-n0x19b3) + I om + 0x002dd5c5, // n0x0399 c0x0000 (---------------) + I omega + 0x00214843, // n0x039a c0x0000 (---------------) + I one + 0x002082c3, // n0x039b c0x0000 (---------------) + I ong + 0x003175c3, // n0x039c c0x0000 (---------------) + I onl + 0x003175c6, // n0x039d c0x0000 (---------------) + I online + 0x003a008a, // n0x039e c0x0000 (---------------) + I onyourside + 0x0028d703, // n0x039f c0x0000 (---------------) + I ooo + 0x0023de44, // n0x03a0 c0x0000 (---------------) + I open + 0x00224206, // n0x03a1 c0x0000 (---------------) + I oracle + 0x00396286, // n0x03a2 c0x0000 (---------------) + I orange + 0x45a2d1c3, // n0x03a3 c0x0116 (n0x19b3-n0x19f0) + I org + 0x002ae487, // n0x03a4 c0x0000 (---------------) + I organic + 0x002db3cd, // n0x03a5 c0x0000 (---------------) + I orientexpress + 0x00383487, // n0x03a6 c0x0000 (---------------) + I origins + 0x0029ac45, // n0x03a7 c0x0000 (---------------) + I osaka + 0x00269e06, // n0x03a8 c0x0000 (---------------) + I otsuka + 0x0021b6c3, // n0x03a9 c0x0000 (---------------) + I ott + 0x0020da83, // n0x03aa c0x0000 (---------------) + I ovh + 0x4720ac42, // n0x03ab c0x011c (n0x1a2d-n0x1a38) + I pa + 0x002eaf04, // n0x03ac c0x0000 (---------------) + I page + 0x0024c94c, // n0x03ad c0x0000 (---------------) + I pamperedchef + 0x002646c9, // n0x03ae c0x0000 (---------------) + I panasonic + 0x00338507, // n0x03af c0x0000 (---------------) + I panerai + 0x00277905, // n0x03b0 c0x0000 (---------------) + I paris + 0x002994c4, // n0x03b1 c0x0000 (---------------) + I pars + 0x002a5308, // n0x03b2 c0x0000 (---------------) + I partners + 0x002ad245, // n0x03b3 c0x0000 (---------------) + I parts + 0x002b4c45, // n0x03b4 c0x0000 (---------------) + I party + 0x002cc549, // n0x03b5 c0x0000 (---------------) + I passagens + 0x002bc0c3, // n0x03b6 c0x0000 (---------------) + I pay + 0x002bc0c4, // n0x03b7 c0x0000 (---------------) + I payu + 0x002c9544, // n0x03b8 c0x0000 (---------------) + I pccw + 0x47607782, // n0x03b9 c0x011d (n0x1a38-n0x1a40) + I pe + 0x00207783, // n0x03ba c0x0000 (---------------) + I pet + 0x47af7d02, // n0x03bb c0x011e (n0x1a40-n0x1a43) + I pf + 0x002f7d06, // n0x03bc c0x0000 (---------------) + I pfizer + 0x016495c2, // n0x03bd c0x0005 (---------------)* o I pg + 0x47e00d42, // n0x03be c0x011f (n0x1a43-n0x1a4b) + I ph + 0x00375008, // n0x03bf c0x0000 (---------------) + I pharmacy + 0x002d39c7, // n0x03c0 c0x0000 (---------------) + I philips + 0x00299085, // n0x03c1 c0x0000 (---------------) + I photo + 0x002d404b, // n0x03c2 c0x0000 (---------------) + I photography + 0x002d11c6, // n0x03c3 c0x0000 (---------------) + I photos + 0x002d4246, // n0x03c4 c0x0000 (---------------) + I physio + 0x002d43c6, // n0x03c5 c0x0000 (---------------) + I piaget + 0x00225704, // n0x03c6 c0x0000 (---------------) + I pics + 0x002d4b46, // n0x03c7 c0x0000 (---------------) + I pictet + 0x002d5008, // n0x03c8 c0x0000 (---------------) + I pictures + 0x00241b83, // n0x03c9 c0x0000 (---------------) + I pid + 0x002699c3, // n0x03ca c0x0000 (---------------) + I pin + 0x002699c4, // n0x03cb c0x0000 (---------------) + I ping + 0x002d6d84, // n0x03cc c0x0000 (---------------) + I pink + 0x002d7107, // n0x03cd c0x0000 (---------------) + I pioneer + 0x002d85c5, // n0x03ce c0x0000 (---------------) + I pizza + 0x482d8702, // n0x03cf c0x0120 (n0x1a4b-n0x1a59) + I pk + 0x486063c2, // n0x03d0 c0x0121 (n0x1a59-n0x1afe) + I pl + 0x002063c5, // n0x03d1 c0x0000 (---------------) + I place + 0x0029e944, // n0x03d2 c0x0000 (---------------) + I play + 0x002dad4b, // n0x03d3 c0x0000 (---------------) + I playstation + 0x002dc348, // n0x03d4 c0x0000 (---------------) + I plumbing + 0x002dcac4, // n0x03d5 c0x0000 (---------------) + I plus + 0x0020d7c2, // n0x03d6 c0x0000 (---------------) + I pm + 0x48e493c2, // n0x03d7 c0x0123 (n0x1b2d-n0x1b32) + I pn + 0x002aee03, // n0x03d8 c0x0000 (---------------) + I pnc + 0x002dcf04, // n0x03d9 c0x0000 (---------------) + I pohl + 0x002dd005, // n0x03da c0x0000 (---------------) + I poker + 0x002de547, // n0x03db c0x0000 (---------------) + I politie + 0x002e0104, // n0x03dc c0x0000 (---------------) + I porn + 0x0035fe04, // n0x03dd c0x0000 (---------------) + I post + 0x49204602, // n0x03de c0x0124 (n0x1b32-n0x1b3f) + I pr + 0x003598c9, // n0x03df c0x0000 (---------------) + I pramerica + 0x002e0b05, // n0x03e0 c0x0000 (---------------) + I praxi + 0x00247505, // n0x03e1 c0x0000 (---------------) + I press + 0x002e1745, // n0x03e2 c0x0000 (---------------) + I prime + 0x49620e43, // n0x03e3 c0x0125 (n0x1b3f-n0x1b4a) + I pro + 0x002e2044, // n0x03e4 c0x0000 (---------------) + I prod + 0x002e204b, // n0x03e5 c0x0000 (---------------) + I productions + 0x002e2484, // n0x03e6 c0x0000 (---------------) + I prof + 0x002e278b, // n0x03e7 c0x0000 (---------------) + I progressive + 0x002e36c5, // n0x03e8 c0x0000 (---------------) + I promo + 0x00220e4a, // n0x03e9 c0x0000 (---------------) + I properties + 0x002e3e48, // n0x03ea c0x0000 (---------------) + I property + 0x002e404a, // n0x03eb c0x0000 (---------------) + I protection + 0x002e42c3, // n0x03ec c0x0000 (---------------) + I pru + 0x002e42ca, // n0x03ed c0x0000 (---------------) + I prudential + 0x49a09342, // n0x03ee c0x0126 (n0x1b4a-n0x1b51) + I ps + 0x49e8c9c2, // n0x03ef c0x0127 (n0x1b51-n0x1b5a) + I pt + 0x00297403, // n0x03f0 c0x0000 (---------------) + I pub + 0x4a2e5942, // n0x03f1 c0x0128 (n0x1b5a-n0x1b60) + I pw + 0x002e5943, // n0x03f2 c0x0000 (---------------) + I pwc + 0x4a734802, // n0x03f3 c0x0129 (n0x1b60-n0x1b67) + I py + 0x4ab14682, // n0x03f4 c0x012a (n0x1b67-n0x1b70) + I qa + 0x002e6444, // n0x03f5 c0x0000 (---------------) + I qpon + 0x0021b906, // n0x03f6 c0x0000 (---------------) + I quebec + 0x0022bb05, // n0x03f7 c0x0000 (---------------) + I quest + 0x002e6ac3, // n0x03f8 c0x0000 (---------------) + I qvc + 0x00355bc6, // n0x03f9 c0x0000 (---------------) + I racing + 0x00351c84, // n0x03fa c0x0000 (---------------) + I raid + 0x4ae07002, // n0x03fb c0x012b (n0x1b70-n0x1b74) + I re + 0x002d3404, // n0x03fc c0x0000 (---------------) + I read + 0x002c238a, // n0x03fd c0x0000 (---------------) + I realestate + 0x00338907, // n0x03fe c0x0000 (---------------) + I realtor + 0x0031fc86, // n0x03ff c0x0000 (---------------) + I realty + 0x0031c747, // n0x0400 c0x0000 (---------------) + I recipes + 0x00244803, // n0x0401 c0x0000 (---------------) + I red + 0x003a2b48, // n0x0402 c0x0000 (---------------) + I redstone + 0x00337f4b, // n0x0403 c0x0000 (---------------) + I redumbrella + 0x002c9b05, // n0x0404 c0x0000 (---------------) + I rehab + 0x0033a0c5, // n0x0405 c0x0000 (---------------) + I reise + 0x0033a0c6, // n0x0406 c0x0000 (---------------) + I reisen + 0x002b75c4, // n0x0407 c0x0000 (---------------) + I reit + 0x00327f48, // n0x0408 c0x0000 (---------------) + I reliance + 0x00209ec3, // n0x0409 c0x0000 (---------------) + I ren + 0x0020bd84, // n0x040a c0x0000 (---------------) + I rent + 0x0020bd87, // n0x040b c0x0000 (---------------) + I rentals + 0x0022b7c6, // n0x040c c0x0000 (---------------) + I repair + 0x00309586, // n0x040d c0x0000 (---------------) + I report + 0x0029f6ca, // n0x040e c0x0000 (---------------) + I republican + 0x0024d544, // n0x040f c0x0000 (---------------) + I rest + 0x0037adca, // n0x0410 c0x0000 (---------------) + I restaurant + 0x002eaac6, // n0x0411 c0x0000 (---------------) + I review + 0x002eaac7, // n0x0412 c0x0000 (---------------) + I reviews + 0x00257607, // n0x0413 c0x0000 (---------------) + I rexroth + 0x00273a04, // n0x0414 c0x0000 (---------------) + I rich + 0x00273a09, // n0x0415 c0x0000 (---------------) + I richardli + 0x002b6f85, // n0x0416 c0x0000 (---------------) + I ricoh + 0x0034618b, // n0x0417 c0x0000 (---------------) + I rightathome + 0x00257f83, // n0x0418 c0x0000 (---------------) + I ril + 0x00200a83, // n0x0419 c0x0000 (---------------) + I rio + 0x0022fd43, // n0x041a c0x0000 (---------------) + I rip + 0x002684c4, // n0x041b c0x0000 (---------------) + I rmit + 0x4b202202, // n0x041c c0x012c (n0x1b74-n0x1b80) + I ro + 0x00289806, // n0x041d c0x0000 (---------------) + I rocher + 0x002a10c5, // n0x041e c0x0000 (---------------) + I rocks + 0x002d2f85, // n0x041f c0x0000 (---------------) + I rodeo + 0x0039c1c6, // n0x0420 c0x0000 (---------------) + I rogers + 0x0037ed04, // n0x0421 c0x0000 (---------------) + I room + 0x4b609702, // n0x0422 c0x012d (n0x1b80-n0x1b87) + I rs + 0x0039c2c4, // n0x0423 c0x0000 (---------------) + I rsvp + 0x4ba11302, // n0x0424 c0x012e (n0x1b87-n0x1c0a) + I ru + 0x00236144, // n0x0425 c0x0000 (---------------) + I ruhr + 0x00222b43, // n0x0426 c0x0000 (---------------) + I run + 0x4beb5ec2, // n0x0427 c0x012f (n0x1c0a-n0x1c13) + I rw + 0x003274c3, // n0x0428 c0x0000 (---------------) + I rwe + 0x0036acc6, // n0x0429 c0x0000 (---------------) + I ryukyu + 0x4c2004c2, // n0x042a c0x0130 (n0x1c13-n0x1c1b) + I sa + 0x0030c348, // n0x042b c0x0000 (---------------) + I saarland + 0x00215944, // n0x042c c0x0000 (---------------) + I safe + 0x00215946, // n0x042d c0x0000 (---------------) + I safety + 0x00307fc6, // n0x042e c0x0000 (---------------) + I sakura + 0x00259844, // n0x042f c0x0000 (---------------) + I sale + 0x00321885, // n0x0430 c0x0000 (---------------) + I salon + 0x00398bc8, // n0x0431 c0x0000 (---------------) + I samsclub + 0x0039edc7, // n0x0432 c0x0000 (---------------) + I samsung + 0x003a0747, // n0x0433 c0x0000 (---------------) + I sandvik + 0x003a074f, // n0x0434 c0x0000 (---------------) + I sandvikcoromant + 0x00294646, // n0x0435 c0x0000 (---------------) + I sanofi + 0x00219dc3, // n0x0436 c0x0000 (---------------) + I sap + 0x00219dc4, // n0x0437 c0x0000 (---------------) + I sapo + 0x0022b604, // n0x0438 c0x0000 (---------------) + I sarl + 0x00228143, // n0x0439 c0x0000 (---------------) + I sas + 0x00222244, // n0x043a c0x0000 (---------------) + I save + 0x00235144, // n0x043b c0x0000 (---------------) + I saxo + 0x4c62d142, // n0x043c c0x0131 (n0x1c1b-n0x1c20) + I sb + 0x00288e03, // n0x043d c0x0000 (---------------) + I sbi + 0x00237503, // n0x043e c0x0000 (---------------) + I sbs + 0x4ca00702, // n0x043f c0x0132 (n0x1c20-n0x1c25) + I sc + 0x00236a03, // n0x0440 c0x0000 (---------------) + I sca + 0x002ee403, // n0x0441 c0x0000 (---------------) + I scb + 0x0021744a, // n0x0442 c0x0000 (---------------) + I schaeffler + 0x002e5d47, // n0x0443 c0x0000 (---------------) + I schmidt + 0x0023ce0c, // n0x0444 c0x0000 (---------------) + I scholarships + 0x0023d0c6, // n0x0445 c0x0000 (---------------) + I school + 0x00241286, // n0x0446 c0x0000 (---------------) + I schule + 0x00242547, // n0x0447 c0x0000 (---------------) + I schwarz + 0x002358c7, // n0x0448 c0x0000 (---------------) + I science + 0x00246cc9, // n0x0449 c0x0000 (---------------) + I scjohnson + 0x0021c544, // n0x044a c0x0000 (---------------) + I scor + 0x00200704, // n0x044b c0x0000 (---------------) + I scot + 0x4ce496c2, // n0x044c c0x0133 (n0x1c25-n0x1c2d) + I sd + 0x4d2046c2, // n0x044d c0x0134 (n0x1c2d-n0x1c56) + I se + 0x00316b84, // n0x044e c0x0000 (---------------) + I seat + 0x0031c646, // n0x044f c0x0000 (---------------) + I secure + 0x00235d48, // n0x0450 c0x0000 (---------------) + I security + 0x0027a144, // n0x0451 c0x0000 (---------------) + I seek + 0x0025b346, // n0x0452 c0x0000 (---------------) + I select + 0x002cb485, // n0x0453 c0x0000 (---------------) + I sener + 0x00206808, // n0x0454 c0x0000 (---------------) + I services + 0x002046c3, // n0x0455 c0x0000 (---------------) + I ses + 0x00251f05, // n0x0456 c0x0000 (---------------) + I seven + 0x00253b43, // n0x0457 c0x0000 (---------------) + I sew + 0x00247603, // n0x0458 c0x0000 (---------------) + I sex + 0x00247604, // n0x0459 c0x0000 (---------------) + I sexy + 0x00256a83, // n0x045a c0x0000 (---------------) + I sfr + 0x4d66d702, // n0x045b c0x0135 (n0x1c56-n0x1c5d) + I sg + 0x4da01342, // n0x045c c0x0136 (n0x1c5d-n0x1c64) + I sh + 0x00257e49, // n0x045d c0x0000 (---------------) + I shangrila + 0x0025b885, // n0x045e c0x0000 (---------------) + I sharp + 0x0025cb44, // n0x045f c0x0000 (---------------) + I shaw + 0x0025fd45, // n0x0460 c0x0000 (---------------) + I shell + 0x00211884, // n0x0461 c0x0000 (---------------) + I shia + 0x002fea47, // n0x0462 c0x0000 (---------------) + I shiksha + 0x003896c5, // n0x0463 c0x0000 (---------------) + I shoes + 0x002be486, // n0x0464 c0x0000 (---------------) + I shouji + 0x002c4484, // n0x0465 c0x0000 (---------------) + I show + 0x002c4488, // n0x0466 c0x0000 (---------------) + I showtime + 0x002c8307, // n0x0467 c0x0000 (---------------) + I shriram + 0x4de0a402, // n0x0468 c0x0137 (n0x1c64-n0x1c65) + I si + 0x00341f84, // n0x0469 c0x0000 (---------------) + I silk + 0x002f7b84, // n0x046a c0x0000 (---------------) + I sina + 0x00285cc7, // n0x046b c0x0000 (---------------) + I singles + 0x002810c4, // n0x046c c0x0000 (---------------) + I site + 0x0022eb82, // n0x046d c0x0000 (---------------) + I sj + 0x4e207842, // n0x046e c0x0138 (n0x1c65-n0x1c66) + I sk + 0x00209743, // n0x046f c0x0000 (---------------) + I ski + 0x002e76c4, // n0x0470 c0x0000 (---------------) + I skin + 0x002368c3, // n0x0471 c0x0000 (---------------) + I sky + 0x002368c5, // n0x0472 c0x0000 (---------------) + I skype + 0x4e624b82, // n0x0473 c0x0139 (n0x1c66-n0x1c6b) + I sl + 0x00375205, // n0x0474 c0x0000 (---------------) + I sling + 0x0024cdc2, // n0x0475 c0x0000 (---------------) + I sm + 0x00368185, // n0x0476 c0x0000 (---------------) + I smart + 0x0035e3c5, // n0x0477 c0x0000 (---------------) + I smile + 0x4ea14182, // n0x0478 c0x013a (n0x1c6b-n0x1c73) + I sn + 0x00214184, // n0x0479 c0x0000 (---------------) + I sncf + 0x4ee05682, // n0x047a c0x013b (n0x1c73-n0x1c76) + I so + 0x00325706, // n0x047b c0x0000 (---------------) + I soccer + 0x002a3986, // n0x047c c0x0000 (---------------) + I social + 0x0026b288, // n0x047d c0x0000 (---------------) + I softbank + 0x002b8688, // n0x047e c0x0000 (---------------) + I software + 0x002f9444, // n0x047f c0x0000 (---------------) + I sohu + 0x00359f05, // n0x0480 c0x0000 (---------------) + I solar + 0x00359d09, // n0x0481 c0x0000 (---------------) + I solutions + 0x00356144, // n0x0482 c0x0000 (---------------) + I song + 0x003a0044, // n0x0483 c0x0000 (---------------) + I sony + 0x002bd0c3, // n0x0484 c0x0000 (---------------) + I soy + 0x0020bb45, // n0x0485 c0x0000 (---------------) + I space + 0x00371ac7, // n0x0486 c0x0000 (---------------) + I spiegel + 0x00209384, // n0x0487 c0x0000 (---------------) + I spot + 0x00332e4d, // n0x0488 c0x0000 (---------------) + I spreadbetting + 0x0033b802, // n0x0489 c0x0000 (---------------) + I sr + 0x0033b803, // n0x048a c0x0000 (---------------) + I srl + 0x0035a503, // n0x048b c0x0000 (---------------) + I srt + 0x4f202742, // n0x048c c0x013c (n0x1c76-n0x1c82) + I st + 0x00380745, // n0x048d c0x0000 (---------------) + I stada + 0x002320c7, // n0x048e c0x0000 (---------------) + I staples + 0x00228904, // n0x048f c0x0000 (---------------) + I star + 0x00228907, // n0x0490 c0x0000 (---------------) + I starhub + 0x0020f209, // n0x0491 c0x0000 (---------------) + I statebank + 0x002c24c9, // n0x0492 c0x0000 (---------------) + I statefarm + 0x003a0dc7, // n0x0493 c0x0000 (---------------) + I statoil + 0x00277743, // n0x0494 c0x0000 (---------------) + I stc + 0x00277748, // n0x0495 c0x0000 (---------------) + I stcgroup + 0x002a8009, // n0x0496 c0x0000 (---------------) + I stockholm + 0x00364547, // n0x0497 c0x0000 (---------------) + I storage + 0x00391185, // n0x0498 c0x0000 (---------------) + I store + 0x002e74c6, // n0x0499 c0x0000 (---------------) + I stream + 0x002e7906, // n0x049a c0x0000 (---------------) + I studio + 0x002e7a85, // n0x049b c0x0000 (---------------) + I study + 0x00253dc5, // n0x049c c0x0000 (---------------) + I style + 0x4f6023c2, // n0x049d c0x013d (n0x1c82-n0x1ca2) + I su + 0x00332385, // n0x049e c0x0000 (---------------) + I sucks + 0x002ba24a, // n0x049f c0x0000 (---------------) + I supersport + 0x002be2c8, // n0x04a0 c0x0000 (---------------) + I supplies + 0x002a7806, // n0x04a1 c0x0000 (---------------) + I supply + 0x002e3907, // n0x04a2 c0x0000 (---------------) + I support + 0x0024c144, // n0x04a3 c0x0000 (---------------) + I surf + 0x002a9e07, // n0x04a4 c0x0000 (---------------) + I surgery + 0x002eef06, // n0x04a5 c0x0000 (---------------) + I suzuki + 0x4fa35f42, // n0x04a6 c0x013e (n0x1ca2-n0x1ca7) + I sv + 0x00376c06, // n0x04a7 c0x0000 (---------------) + I swatch + 0x002f15ca, // n0x04a8 c0x0000 (---------------) + I swiftcover + 0x002f1f85, // n0x04a9 c0x0000 (---------------) + I swiss + 0x4fef2802, // n0x04aa c0x013f (n0x1ca7-n0x1ca8) + I sx + 0x50289a02, // n0x04ab c0x0140 (n0x1ca8-n0x1cae) + I sy + 0x00329bc6, // n0x04ac c0x0000 (---------------) + I sydney + 0x002d5f48, // n0x04ad c0x0000 (---------------) + I symantec + 0x00394e07, // n0x04ae c0x0000 (---------------) + I systems + 0x5060b982, // n0x04af c0x0141 (n0x1cae-n0x1cb1) + I sz + 0x00210d43, // n0x04b0 c0x0000 (---------------) + I tab + 0x003a6506, // n0x04b1 c0x0000 (---------------) + I taipei + 0x0021eb04, // n0x04b2 c0x0000 (---------------) + I talk + 0x00395a06, // n0x04b3 c0x0000 (---------------) + I taobao + 0x00357846, // n0x04b4 c0x0000 (---------------) + I target + 0x00322a0a, // n0x04b5 c0x0000 (---------------) + I tatamotors + 0x0036cc45, // n0x04b6 c0x0000 (---------------) + I tatar + 0x00219906, // n0x04b7 c0x0000 (---------------) + I tattoo + 0x002203c3, // n0x04b8 c0x0000 (---------------) + I tax + 0x002203c4, // n0x04b9 c0x0000 (---------------) + I taxi + 0x00204442, // n0x04ba c0x0000 (---------------) + I tc + 0x0025edc3, // n0x04bb c0x0000 (---------------) + I tci + 0x50a0b182, // n0x04bc c0x0142 (n0x1cb1-n0x1cb2) + I td + 0x002c9683, // n0x04bd c0x0000 (---------------) + I tdk + 0x00367504, // n0x04be c0x0000 (---------------) + I team + 0x002d59c4, // n0x04bf c0x0000 (---------------) + I tech + 0x002d608a, // n0x04c0 c0x0000 (---------------) + I technology + 0x0022f7c3, // n0x04c1 c0x0000 (---------------) + I tel + 0x00286648, // n0x04c2 c0x0000 (---------------) + I telecity + 0x0030ec4a, // n0x04c3 c0x0000 (---------------) + I telefonica + 0x0023fa07, // n0x04c4 c0x0000 (---------------) + I temasek + 0x002f4806, // n0x04c5 c0x0000 (---------------) + I tennis + 0x0032ce44, // n0x04c6 c0x0000 (---------------) + I teva + 0x0025d9c2, // n0x04c7 c0x0000 (---------------) + I tf + 0x00204c42, // n0x04c8 c0x0000 (---------------) + I tg + 0x50e06342, // n0x04c9 c0x0143 (n0x1cb2-n0x1cb9) + I th + 0x0024b183, // n0x04ca c0x0000 (---------------) + I thd + 0x002573c7, // n0x04cb c0x0000 (---------------) + I theater + 0x00355e87, // n0x04cc c0x0000 (---------------) + I theatre + 0x003504cb, // n0x04cd c0x0000 (---------------) + I theguardian + 0x0034d4c4, // n0x04ce c0x0000 (---------------) + I tiaa + 0x002f6847, // n0x04cf c0x0000 (---------------) + I tickets + 0x002de646, // n0x04d0 c0x0000 (---------------) + I tienda + 0x00215707, // n0x04d1 c0x0000 (---------------) + I tiffany + 0x002e5c84, // n0x04d2 c0x0000 (---------------) + I tips + 0x0033d385, // n0x04d3 c0x0000 (---------------) + I tires + 0x002b7905, // n0x04d4 c0x0000 (---------------) + I tirol + 0x51226782, // n0x04d5 c0x0144 (n0x1cb9-n0x1cc8) + I tj + 0x00230886, // n0x04d6 c0x0000 (---------------) + I tjmaxx + 0x0036f443, // n0x04d7 c0x0000 (---------------) + I tjx + 0x0022ad02, // n0x04d8 c0x0000 (---------------) + I tk + 0x00231686, // n0x04d9 c0x0000 (---------------) + I tkmaxx + 0x516007c2, // n0x04da c0x0145 (n0x1cc8-n0x1cc9) + I tl + 0x51a00142, // n0x04db c0x0146 (n0x1cc9-n0x1cd1) + I tm + 0x00200145, // n0x04dc c0x0000 (---------------) + I tmall + 0x51e4f882, // n0x04dd c0x0147 (n0x1cd1-n0x1ce5) + I tn + 0x52208082, // n0x04de c0x0148 (n0x1ce5-n0x1ceb) + I to + 0x00265905, // n0x04df c0x0000 (---------------) + I today + 0x00341c05, // n0x04e0 c0x0000 (---------------) + I tokyo + 0x002199c5, // n0x04e1 c0x0000 (---------------) + I tools + 0x00208083, // n0x04e2 c0x0000 (---------------) + I top + 0x00376345, // n0x04e3 c0x0000 (---------------) + I toray + 0x002d1287, // n0x04e4 c0x0000 (---------------) + I toshiba + 0x0025b605, // n0x04e5 c0x0000 (---------------) + I total + 0x002fd7c5, // n0x04e6 c0x0000 (---------------) + I tours + 0x002dc244, // n0x04e7 c0x0000 (---------------) + I town + 0x0025bb86, // n0x04e8 c0x0000 (---------------) + I toyota + 0x0026dac4, // n0x04e9 c0x0000 (---------------) + I toys + 0x52603002, // n0x04ea c0x0149 (n0x1ceb-n0x1d00) + I tr + 0x002673c5, // n0x04eb c0x0000 (---------------) + I trade + 0x002a4607, // n0x04ec c0x0000 (---------------) + I trading + 0x0022a6c8, // n0x04ed c0x0000 (---------------) + I training + 0x0029bec6, // n0x04ee c0x0000 (---------------) + I travel + 0x0029becd, // n0x04ef c0x0000 (---------------) + I travelchannel + 0x002a1a89, // n0x04f0 c0x0000 (---------------) + I travelers + 0x002a1a92, // n0x04f1 c0x0000 (---------------) + I travelersinsurance + 0x00329245, // n0x04f2 c0x0000 (---------------) + I trust + 0x0033f2c3, // n0x04f3 c0x0000 (---------------) + I trv + 0x5320e842, // n0x04f4 c0x014c (n0x1d02-n0x1d13) + I tt + 0x002e48c4, // n0x04f5 c0x0000 (---------------) + I tube + 0x002f89c3, // n0x04f6 c0x0000 (---------------) + I tui + 0x0035d745, // n0x04f7 c0x0000 (---------------) + I tunes + 0x002f2e45, // n0x04f8 c0x0000 (---------------) + I tushu + 0x53624e42, // n0x04f9 c0x014d (n0x1d13-n0x1d17) + I tv + 0x003644c3, // n0x04fa c0x0000 (---------------) + I tvs + 0x53a4e502, // n0x04fb c0x014e (n0x1d17-n0x1d25) + I tw + 0x53e1fe82, // n0x04fc c0x014f (n0x1d25-n0x1d31) + I tz + 0x54220502, // n0x04fd c0x0150 (n0x1d31-n0x1d80) + I ua + 0x0033bbc5, // n0x04fe c0x0000 (---------------) + I ubank + 0x0024a4c3, // n0x04ff c0x0000 (---------------) + I ubs + 0x00249a48, // n0x0500 c0x0000 (---------------) + I uconnect + 0x54601cc2, // n0x0501 c0x0151 (n0x1d80-n0x1d89) + I ug + 0x54a00f82, // n0x0502 c0x0152 (n0x1d89-n0x1d94) + I uk + 0x002a6ac6, // n0x0503 c0x0000 (---------------) + I unicom + 0x00320a0a, // n0x0504 c0x0000 (---------------) + I university + 0x0020d503, // n0x0505 c0x0000 (---------------) + I uno + 0x00259d43, // n0x0506 c0x0000 (---------------) + I uol + 0x002d5243, // n0x0507 c0x0000 (---------------) + I ups + 0x55602382, // n0x0508 c0x0155 (n0x1d96-n0x1dd5) + I us + 0x63a01802, // n0x0509 c0x018e (n0x1e78-n0x1e7e) + I uy + 0x64211342, // n0x050a c0x0190 (n0x1e7f-n0x1e83) + I uz + 0x002000c2, // n0x050b c0x0000 (---------------) + I va + 0x00376a09, // n0x050c c0x0000 (---------------) + I vacations + 0x002bc5c4, // n0x050d c0x0000 (---------------) + I vana + 0x00344588, // n0x050e c0x0000 (---------------) + I vanguard + 0x646e6b02, // n0x050f c0x0191 (n0x1e83-n0x1e89) + I vc + 0x64a02b82, // n0x0510 c0x0192 (n0x1e89-n0x1e9a) + I ve + 0x00230285, // n0x0511 c0x0000 (---------------) + I vegas + 0x0023b808, // n0x0512 c0x0000 (---------------) + I ventures + 0x002f1788, // n0x0513 c0x0000 (---------------) + I verisign + 0x0039440c, // n0x0514 c0x0000 (---------------) + I versicherung + 0x0023f943, // n0x0515 c0x0000 (---------------) + I vet + 0x0023fd02, // n0x0516 c0x0000 (---------------) + I vg + 0x64e05d42, // n0x0517 c0x0193 (n0x1e9a-n0x1e9f) + I vi + 0x002c5706, // n0x0518 c0x0000 (---------------) + I viajes + 0x002f5685, // n0x0519 c0x0000 (---------------) + I video + 0x0031a003, // n0x051a c0x0000 (---------------) + I vig + 0x00311a06, // n0x051b c0x0000 (---------------) + I viking + 0x002f57c6, // n0x051c c0x0000 (---------------) + I villas + 0x00241543, // n0x051d c0x0000 (---------------) + I vin + 0x002f7ac3, // n0x051e c0x0000 (---------------) + I vip + 0x002f7e86, // n0x051f c0x0000 (---------------) + I virgin + 0x002f8404, // n0x0520 c0x0000 (---------------) + I visa + 0x002b48c6, // n0x0521 c0x0000 (---------------) + I vision + 0x002c9245, // n0x0522 c0x0000 (---------------) + I vista + 0x002f878a, // n0x0523 c0x0000 (---------------) + I vistaprint + 0x00240444, // n0x0524 c0x0000 (---------------) + I viva + 0x002f97c4, // n0x0525 c0x0000 (---------------) + I vivo + 0x0034710a, // n0x0526 c0x0000 (---------------) + I vlaanderen + 0x65203442, // n0x0527 c0x0194 (n0x1e9f-n0x1eac) + I vn + 0x002760c5, // n0x0528 c0x0000 (---------------) + I vodka + 0x002fbd0a, // n0x0529 c0x0000 (---------------) + I volkswagen + 0x002fc945, // n0x052a c0x0000 (---------------) + I volvo + 0x002fd4c4, // n0x052b c0x0000 (---------------) + I vote + 0x002fd5c6, // n0x052c c0x0000 (---------------) + I voting + 0x002fd744, // n0x052d c0x0000 (---------------) + I voto + 0x00231006, // n0x052e c0x0000 (---------------) + I voyage + 0x65672082, // n0x052f c0x0195 (n0x1eac-n0x1eb0) + I vu + 0x0031f5c6, // n0x0530 c0x0000 (---------------) + I vuelos + 0x00320685, // n0x0531 c0x0000 (---------------) + I wales + 0x002010c7, // n0x0532 c0x0000 (---------------) + I walmart + 0x00293986, // n0x0533 c0x0000 (---------------) + I walter + 0x00242744, // n0x0534 c0x0000 (---------------) + I wang + 0x0033d6c7, // n0x0535 c0x0000 (---------------) + I wanggou + 0x0036f146, // n0x0536 c0x0000 (---------------) + I warman + 0x002aca45, // n0x0537 c0x0000 (---------------) + I watch + 0x003a3d87, // n0x0538 c0x0000 (---------------) + I watches + 0x00391887, // n0x0539 c0x0000 (---------------) + I weather + 0x0039188e, // n0x053a c0x0000 (---------------) + I weatherchannel + 0x00221a06, // n0x053b c0x0000 (---------------) + I webcam + 0x0022e645, // n0x053c c0x0000 (---------------) + I weber + 0x00281007, // n0x053d c0x0000 (---------------) + I website + 0x002f08c3, // n0x053e c0x0000 (---------------) + I wed + 0x0032f407, // n0x053f c0x0000 (---------------) + I wedding + 0x0020fe05, // n0x0540 c0x0000 (---------------) + I weibo + 0x002109c4, // n0x0541 c0x0000 (---------------) + I weir + 0x0022fe82, // n0x0542 c0x0000 (---------------) + I wf + 0x003a43c7, // n0x0543 c0x0000 (---------------) + I whoswho + 0x002eee04, // n0x0544 c0x0000 (---------------) + I wien + 0x0037c484, // n0x0545 c0x0000 (---------------) + I wiki + 0x0025a8cb, // n0x0546 c0x0000 (---------------) + I williamhill + 0x0021cbc3, // n0x0547 c0x0000 (---------------) + I win + 0x002afe07, // n0x0548 c0x0000 (---------------) + I windows + 0x0021cbc4, // n0x0549 c0x0000 (---------------) + I wine + 0x002b0f07, // n0x054a c0x0000 (---------------) + I winners + 0x00231c43, // n0x054b c0x0000 (---------------) + I wme + 0x0032ae4d, // n0x054c c0x0000 (---------------) + I wolterskluwer + 0x00380f88, // n0x054d c0x0000 (---------------) + I woodside + 0x00255c44, // n0x054e c0x0000 (---------------) + I work + 0x00351f05, // n0x054f c0x0000 (---------------) + I works + 0x00300b85, // n0x0550 c0x0000 (---------------) + I world + 0x002ff3c3, // n0x0551 c0x0000 (---------------) + I wow + 0x65a0b942, // n0x0552 c0x0196 (n0x1eb0-n0x1eb7) + I ws + 0x003002c3, // n0x0553 c0x0000 (---------------) + I wtc + 0x00300783, // n0x0554 c0x0000 (---------------) + I wtf + 0x0021bb44, // n0x0555 c0x0000 (---------------) + I xbox + 0x0027bec5, // n0x0556 c0x0000 (---------------) + I xerox + 0x00230a07, // n0x0557 c0x0000 (---------------) + I xfinity + 0x00220446, // n0x0558 c0x0000 (---------------) + I xihuan + 0x00367243, // n0x0559 c0x0000 (---------------) + I xin + 0x002317cb, // n0x055a c0x0000 (---------------) + I xn--11b4c3d + 0x0024830b, // n0x055b c0x0000 (---------------) + I xn--1ck2e1b + 0x0026a7cb, // n0x055c c0x0000 (---------------) + I xn--1qqw23a + 0x0027bfca, // n0x055d c0x0000 (---------------) + I xn--30rr7y + 0x002a718b, // n0x055e c0x0000 (---------------) + I xn--3bst00m + 0x002daa8b, // n0x055f c0x0000 (---------------) + I xn--3ds443g + 0x002d3d4c, // n0x0560 c0x0000 (---------------) + I xn--3e0b707e + 0x002f2851, // n0x0561 c0x0000 (---------------) + I xn--3oq18vl8pn36a + 0x00339a8a, // n0x0562 c0x0000 (---------------) + I xn--3pxu8k + 0x0034a00b, // n0x0563 c0x0000 (---------------) + I xn--42c2d9a + 0x00370d8b, // n0x0564 c0x0000 (---------------) + I xn--45brj9c + 0x003a374a, // n0x0565 c0x0000 (---------------) + I xn--45q11c + 0x003a5a0a, // n0x0566 c0x0000 (---------------) + I xn--4gbrim + 0x00300f8d, // n0x0567 c0x0000 (---------------) + I xn--4gq48lf9j + 0x0030218e, // n0x0568 c0x0000 (---------------) + I xn--54b7fta0cc + 0x0030270b, // n0x0569 c0x0000 (---------------) + I xn--55qw42g + 0x003029ca, // n0x056a c0x0000 (---------------) + I xn--55qx5d + 0x00303d11, // n0x056b c0x0000 (---------------) + I xn--5su34j936bgsg + 0x0030414a, // n0x056c c0x0000 (---------------) + I xn--5tzm5g + 0x0030464b, // n0x056d c0x0000 (---------------) + I xn--6frz82g + 0x00304b8e, // n0x056e c0x0000 (---------------) + I xn--6qq986b3xl + 0x0030550c, // n0x056f c0x0000 (---------------) + I xn--80adxhks + 0x0030648b, // n0x0570 c0x0000 (---------------) + I xn--80ao21a + 0x0030674e, // n0x0571 c0x0000 (---------------) + I xn--80aqecdr1a + 0x00306acc, // n0x0572 c0x0000 (---------------) + I xn--80asehdb + 0x00309b8a, // n0x0573 c0x0000 (---------------) + I xn--80aswg + 0x0030a9cc, // n0x0574 c0x0000 (---------------) + I xn--8y0a063a + 0x65f0acca, // n0x0575 c0x0197 (n0x1eb7-n0x1ebd) + I xn--90a3ac + 0x0030bb89, // n0x0576 c0x0000 (---------------) + I xn--90ais + 0x0030d14a, // n0x0577 c0x0000 (---------------) + I xn--9dbq2a + 0x0030d3ca, // n0x0578 c0x0000 (---------------) + I xn--9et52u + 0x0030d64b, // n0x0579 c0x0000 (---------------) + I xn--9krt00a + 0x00310c4e, // n0x057a c0x0000 (---------------) + I xn--b4w605ferd + 0x00310fd1, // n0x057b c0x0000 (---------------) + I xn--bck1b9a5dre4c + 0x00318cc9, // n0x057c c0x0000 (---------------) + I xn--c1avg + 0x00318f0a, // n0x057d c0x0000 (---------------) + I xn--c2br7g + 0x00319a4b, // n0x057e c0x0000 (---------------) + I xn--cck2b3b + 0x0031b68a, // n0x057f c0x0000 (---------------) + I xn--cg4bki + 0x0031bfd6, // n0x0580 c0x0000 (---------------) + I xn--clchc0ea0b2g2a9gcd + 0x0031e50b, // n0x0581 c0x0000 (---------------) + I xn--czr694b + 0x0032398a, // n0x0582 c0x0000 (---------------) + I xn--czrs0t + 0x003241ca, // n0x0583 c0x0000 (---------------) + I xn--czru2d + 0x0032674b, // n0x0584 c0x0000 (---------------) + I xn--d1acj3b + 0x00328cc9, // n0x0585 c0x0000 (---------------) + I xn--d1alf + 0x0032b7cd, // n0x0586 c0x0000 (---------------) + I xn--eckvdtc9d + 0x0032c18b, // n0x0587 c0x0000 (---------------) + I xn--efvy88h + 0x0032d08b, // n0x0588 c0x0000 (---------------) + I xn--estv75g + 0x0032da4b, // n0x0589 c0x0000 (---------------) + I xn--fct429k + 0x0032dec9, // n0x058a c0x0000 (---------------) + I xn--fhbei + 0x0032e50e, // n0x058b c0x0000 (---------------) + I xn--fiq228c5hs + 0x0032ebca, // n0x058c c0x0000 (---------------) + I xn--fiq64b + 0x003328ca, // n0x058d c0x0000 (---------------) + I xn--fiqs8s + 0x00332c0a, // n0x058e c0x0000 (---------------) + I xn--fiqz9s + 0x003334cb, // n0x058f c0x0000 (---------------) + I xn--fjq720a + 0x00333d0b, // n0x0590 c0x0000 (---------------) + I xn--flw351e + 0x00333fcd, // n0x0591 c0x0000 (---------------) + I xn--fpcrj9c3d + 0x0033558d, // n0x0592 c0x0000 (---------------) + I xn--fzc2c9e2c + 0x00335ad0, // n0x0593 c0x0000 (---------------) + I xn--fzys8d69uvgm + 0x00335f8b, // n0x0594 c0x0000 (---------------) + I xn--g2xx48c + 0x00336d4c, // n0x0595 c0x0000 (---------------) + I xn--gckr3f0f + 0x00337a0b, // n0x0596 c0x0000 (---------------) + I xn--gecrj9c + 0x0033a58b, // n0x0597 c0x0000 (---------------) + I xn--gk3at1e + 0x0033ca0b, // n0x0598 c0x0000 (---------------) + I xn--h2brj9c + 0x0034034b, // n0x0599 c0x0000 (---------------) + I xn--hxt814e + 0x00340dcf, // n0x059a c0x0000 (---------------) + I xn--i1b6b1a6a2e + 0x0034118b, // n0x059b c0x0000 (---------------) + I xn--imr513n + 0x0034280a, // n0x059c c0x0000 (---------------) + I xn--io0a7i + 0x00343209, // n0x059d c0x0000 (---------------) + I xn--j1aef + 0x00343e49, // n0x059e c0x0000 (---------------) + I xn--j1amh + 0x0034478b, // n0x059f c0x0000 (---------------) + I xn--j6w193g + 0x00344a4e, // n0x05a0 c0x0000 (---------------) + I xn--jlq61u9w7b + 0x0034778b, // n0x05a1 c0x0000 (---------------) + I xn--jvr189m + 0x00348bcf, // n0x05a2 c0x0000 (---------------) + I xn--kcrx77d1x4a + 0x0034afcb, // n0x05a3 c0x0000 (---------------) + I xn--kprw13d + 0x0034b28b, // n0x05a4 c0x0000 (---------------) + I xn--kpry57d + 0x0034b54b, // n0x05a5 c0x0000 (---------------) + I xn--kpu716f + 0x0034b98a, // n0x05a6 c0x0000 (---------------) + I xn--kput3i + 0x00352909, // n0x05a7 c0x0000 (---------------) + I xn--l1acc + 0x0035800f, // n0x05a8 c0x0000 (---------------) + I xn--lgbbat1ad8j + 0x0035c74c, // n0x05a9 c0x0000 (---------------) + I xn--mgb2ddes + 0x0035cfcc, // n0x05aa c0x0000 (---------------) + I xn--mgb9awbf + 0x0035d40e, // n0x05ab c0x0000 (---------------) + I xn--mgba3a3ejt + 0x0035d94f, // n0x05ac c0x0000 (---------------) + I xn--mgba3a4f16a + 0x0035dd0e, // n0x05ad c0x0000 (---------------) + I xn--mgba3a4fra + 0x0035e6d0, // n0x05ae c0x0000 (---------------) + I xn--mgba7c0bbn0a + 0x0035eacf, // n0x05af c0x0000 (---------------) + I xn--mgbaakc7dvf + 0x0035f04e, // n0x05b0 c0x0000 (---------------) + I xn--mgbaam7a8h + 0x0035f50c, // n0x05b1 c0x0000 (---------------) + I xn--mgbab2bd + 0x0035f812, // n0x05b2 c0x0000 (---------------) + I xn--mgbai9a5eva00b + 0x00360b51, // n0x05b3 c0x0000 (---------------) + I xn--mgbai9azgqp6j + 0x0036110e, // n0x05b4 c0x0000 (---------------) + I xn--mgbayh7gpa + 0x0036154e, // n0x05b5 c0x0000 (---------------) + I xn--mgbb9fbpob + 0x00361a8e, // n0x05b6 c0x0000 (---------------) + I xn--mgbbh1a71e + 0x00361e0f, // n0x05b7 c0x0000 (---------------) + I xn--mgbc0a9azcg + 0x003621ce, // n0x05b8 c0x0000 (---------------) + I xn--mgbca7dzdo + 0x00362553, // n0x05b9 c0x0000 (---------------) + I xn--mgberp4a5d4a87g + 0x00362a11, // n0x05ba c0x0000 (---------------) + I xn--mgberp4a5d4ar + 0x00362e4e, // n0x05bb c0x0000 (---------------) + I xn--mgbi4ecexp + 0x003632cc, // n0x05bc c0x0000 (---------------) + I xn--mgbpl2fh + 0x00363713, // n0x05bd c0x0000 (---------------) + I xn--mgbqly7c0a67fbc + 0x00363e90, // n0x05be c0x0000 (---------------) + I xn--mgbqly7cvafr + 0x0036470c, // n0x05bf c0x0000 (---------------) + I xn--mgbt3dhd + 0x00364a0c, // n0x05c0 c0x0000 (---------------) + I xn--mgbtf8fl + 0x00364f4b, // n0x05c1 c0x0000 (---------------) + I xn--mgbtx2b + 0x0036540e, // n0x05c2 c0x0000 (---------------) + I xn--mgbx4cd0ab + 0x0036590b, // n0x05c3 c0x0000 (---------------) + I xn--mix082f + 0x0036688b, // n0x05c4 c0x0000 (---------------) + I xn--mix891f + 0x003678cc, // n0x05c5 c0x0000 (---------------) + I xn--mk1bu44c + 0x0036fa4a, // n0x05c6 c0x0000 (---------------) + I xn--mxtq1m + 0x0037058c, // n0x05c7 c0x0000 (---------------) + I xn--ngbc5azd + 0x0037088c, // n0x05c8 c0x0000 (---------------) + I xn--ngbe9e0a + 0x00370b89, // n0x05c9 c0x0000 (---------------) + I xn--ngbrx + 0x0037254b, // n0x05ca c0x0000 (---------------) + I xn--nnx388a + 0x00372808, // n0x05cb c0x0000 (---------------) + I xn--node + 0x00372cc9, // n0x05cc c0x0000 (---------------) + I xn--nqv7f + 0x00372ccf, // n0x05cd c0x0000 (---------------) + I xn--nqv7fs00ema + 0x0037464b, // n0x05ce c0x0000 (---------------) + I xn--nyqy26a + 0x0037534a, // n0x05cf c0x0000 (---------------) + I xn--o3cw4h + 0x0037708c, // n0x05d0 c0x0000 (---------------) + I xn--ogbpf8fl + 0x00379349, // n0x05d1 c0x0000 (---------------) + I xn--p1acf + 0x003795c8, // n0x05d2 c0x0000 (---------------) + I xn--p1ai + 0x003797cb, // n0x05d3 c0x0000 (---------------) + I xn--pbt977c + 0x0037a58b, // n0x05d4 c0x0000 (---------------) + I xn--pgbs0dh + 0x0037b44a, // n0x05d5 c0x0000 (---------------) + I xn--pssy2u + 0x0037b6cb, // n0x05d6 c0x0000 (---------------) + I xn--q9jyb4c + 0x0037cc4c, // n0x05d7 c0x0000 (---------------) + I xn--qcka1pmc + 0x0037d688, // n0x05d8 c0x0000 (---------------) + I xn--qxam + 0x0038118b, // n0x05d9 c0x0000 (---------------) + I xn--rhqv96g + 0x0038390b, // n0x05da c0x0000 (---------------) + I xn--rovu88b + 0x00386f0b, // n0x05db c0x0000 (---------------) + I xn--s9brj9c + 0x0038860b, // n0x05dc c0x0000 (---------------) + I xn--ses554g + 0x0039140b, // n0x05dd c0x0000 (---------------) + I xn--t60b56a + 0x003916c9, // n0x05de c0x0000 (---------------) + I xn--tckwe + 0x00391c0d, // n0x05df c0x0000 (---------------) + I xn--tiq49xqyj + 0x0039654a, // n0x05e0 c0x0000 (---------------) + I xn--unup4y + 0x00397497, // n0x05e1 c0x0000 (---------------) + I xn--vermgensberater-ctb + 0x003982d8, // n0x05e2 c0x0000 (---------------) + I xn--vermgensberatung-pwb + 0x0039a849, // n0x05e3 c0x0000 (---------------) + I xn--vhquv + 0x0039ba4b, // n0x05e4 c0x0000 (---------------) + I xn--vuq861b + 0x0039c814, // n0x05e5 c0x0000 (---------------) + I xn--w4r85el8fhu5dnra + 0x0039cd0b, // n0x05e6 c0x0000 (---------------) + I xn--w4rs40l + 0x0039d28a, // n0x05e7 c0x0000 (---------------) + I xn--wgbh1c + 0x0039d84a, // n0x05e8 c0x0000 (---------------) + I xn--wgbl6a + 0x0039dacb, // n0x05e9 c0x0000 (---------------) + I xn--xhq521b + 0x003a1450, // n0x05ea c0x0000 (---------------) + I xn--xkc2al3hye2a + 0x003a1851, // n0x05eb c0x0000 (---------------) + I xn--xkc2dl3a5ee0h + 0x003a210a, // n0x05ec c0x0000 (---------------) + I xn--y9a3aq + 0x003a2d4d, // n0x05ed c0x0000 (---------------) + I xn--yfro4i67o + 0x003a344d, // n0x05ee c0x0000 (---------------) + I xn--ygbi2ammx + 0x003a5dcb, // n0x05ef c0x0000 (---------------) + I xn--zfr164b + 0x003a6b46, // n0x05f0 c0x0000 (---------------) + I xperia + 0x00230983, // n0x05f1 c0x0000 (---------------) + I xxx + 0x00247683, // n0x05f2 c0x0000 (---------------) + I xyz + 0x00307e86, // n0x05f3 c0x0000 (---------------) + I yachts + 0x0028d645, // n0x05f4 c0x0000 (---------------) + I yahoo + 0x002c7947, // n0x05f5 c0x0000 (---------------) + I yamaxun + 0x00339946, // n0x05f6 c0x0000 (---------------) + I yandex + 0x01608242, // n0x05f7 c0x0005 (---------------)* o I ye + 0x003710c9, // n0x05f8 c0x0000 (---------------) + I yodobashi + 0x003559c4, // n0x05f9 c0x0000 (---------------) + I yoga + 0x002d09c8, // n0x05fa c0x0000 (---------------) + I yokohama + 0x0024b0c3, // n0x05fb c0x0000 (---------------) + I you + 0x002e4807, // n0x05fc c0x0000 (---------------) + I youtube + 0x00244002, // n0x05fd c0x0000 (---------------) + I yt + 0x002ac203, // n0x05fe c0x0000 (---------------) + I yun + 0x66205f82, // n0x05ff c0x0198 (n0x1ebd-n0x1ece) o I za + 0x002c3fc6, // n0x0600 c0x0000 (---------------) + I zappos + 0x002c4d04, // n0x0601 c0x0000 (---------------) + I zara + 0x002eb584, // n0x0602 c0x0000 (---------------) + I zero + 0x002432c3, // n0x0603 c0x0000 (---------------) + I zip + 0x002432c5, // n0x0604 c0x0000 (---------------) + I zippo + 0x01700d02, // n0x0605 c0x0005 (---------------)* o I zm + 0x002dce04, // n0x0606 c0x0000 (---------------) + I zone + 0x00273947, // n0x0607 c0x0000 (---------------) + I zuerich + 0x016afdc2, // n0x0608 c0x0005 (---------------)* o I zw + 0x00233503, // n0x0609 c0x0000 (---------------) + I com + 0x0023a783, // n0x060a c0x0000 (---------------) + I edu + 0x0026cc83, // n0x060b c0x0000 (---------------) + I gov + 0x00209003, // n0x060c c0x0000 (---------------) + I mil + 0x0021fe03, // n0x060d c0x0000 (---------------) + I net + 0x0022d1c3, // n0x060e c0x0000 (---------------) + I org + 0x00201483, // n0x060f c0x0000 (---------------) + I nom + 0x00201542, // n0x0610 c0x0000 (---------------) + I ac + 0x000ffa08, // n0x0611 c0x0000 (---------------) + blogspot + 0x00200742, // n0x0612 c0x0000 (---------------) + I co + 0x0026cc83, // n0x0613 c0x0000 (---------------) + I gov + 0x00209003, // n0x0614 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x0615 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0616 c0x0000 (---------------) + I org + 0x00217443, // n0x0617 c0x0000 (---------------) + I sch + 0x00316196, // n0x0618 c0x0000 (---------------) + I accident-investigation + 0x00317cd3, // n0x0619 c0x0000 (---------------) + I accident-prevention + 0x002f66c9, // n0x061a c0x0000 (---------------) + I aerobatic + 0x002389c8, // n0x061b c0x0000 (---------------) + I aeroclub + 0x002dd449, // n0x061c c0x0000 (---------------) + I aerodrome + 0x002fbe86, // n0x061d c0x0000 (---------------) + I agents + 0x0030c790, // n0x061e c0x0000 (---------------) + I air-surveillance + 0x00358d93, // n0x061f c0x0000 (---------------) + I air-traffic-control + 0x00204908, // n0x0620 c0x0000 (---------------) + I aircraft + 0x002d7e07, // n0x0621 c0x0000 (---------------) + I airline + 0x002793c7, // n0x0622 c0x0000 (---------------) + I airport + 0x0021868a, // n0x0623 c0x0000 (---------------) + I airtraffic + 0x002c8609, // n0x0624 c0x0000 (---------------) + I ambulance + 0x0037d809, // n0x0625 c0x0000 (---------------) + I amusement + 0x002d488b, // n0x0626 c0x0000 (---------------) + I association + 0x0031a686, // n0x0627 c0x0000 (---------------) + I author + 0x0022faca, // n0x0628 c0x0000 (---------------) + I ballooning + 0x00220c46, // n0x0629 c0x0000 (---------------) + I broker + 0x003555c3, // n0x062a c0x0000 (---------------) + I caa + 0x002e9b05, // n0x062b c0x0000 (---------------) + I cargo + 0x00351008, // n0x062c c0x0000 (---------------) + I catering + 0x003257cd, // n0x062d c0x0000 (---------------) + I certification + 0x0035894c, // n0x062e c0x0000 (---------------) + I championship + 0x00320347, // n0x062f c0x0000 (---------------) + I charter + 0x0035b70d, // n0x0630 c0x0000 (---------------) + I civilaviation + 0x00238ac4, // n0x0631 c0x0000 (---------------) + I club + 0x00236cca, // n0x0632 c0x0000 (---------------) + I conference + 0x0023784a, // n0x0633 c0x0000 (---------------) + I consultant + 0x00237d0a, // n0x0634 c0x0000 (---------------) + I consulting + 0x00308c87, // n0x0635 c0x0000 (---------------) + I control + 0x00242207, // n0x0636 c0x0000 (---------------) + I council + 0x00245184, // n0x0637 c0x0000 (---------------) + I crew + 0x0022dcc6, // n0x0638 c0x0000 (---------------) + I design + 0x00356f44, // n0x0639 c0x0000 (---------------) + I dgca + 0x002fe188, // n0x063a c0x0000 (---------------) + I educator + 0x00322189, // n0x063b c0x0000 (---------------) + I emergency + 0x00369146, // n0x063c c0x0000 (---------------) + I engine + 0x00369148, // n0x063d c0x0000 (---------------) + I engineer + 0x00247acd, // n0x063e c0x0000 (---------------) + I entertainment + 0x002c2709, // n0x063f c0x0000 (---------------) + I equipment + 0x00239488, // n0x0640 c0x0000 (---------------) + I exchange + 0x00247487, // n0x0641 c0x0000 (---------------) + I express + 0x0030eeca, // n0x0642 c0x0000 (---------------) + I federation + 0x00251286, // n0x0643 c0x0000 (---------------) + I flight + 0x0025cf87, // n0x0644 c0x0000 (---------------) + I freight + 0x00240d04, // n0x0645 c0x0000 (---------------) + I fuel + 0x0026e307, // n0x0646 c0x0000 (---------------) + I gliding + 0x0026cc8a, // n0x0647 c0x0000 (---------------) + I government + 0x0031240e, // n0x0648 c0x0000 (---------------) + I groundhandling + 0x0020ab45, // n0x0649 c0x0000 (---------------) + I group + 0x002ff10b, // n0x064a c0x0000 (---------------) + I hanggliding + 0x002e9e49, // n0x064b c0x0000 (---------------) + I homebuilt + 0x0023e7c9, // n0x064c c0x0000 (---------------) + I insurance + 0x0033be47, // n0x064d c0x0000 (---------------) + I journal + 0x0038e60a, // n0x064e c0x0000 (---------------) + I journalist + 0x00285c07, // n0x064f c0x0000 (---------------) + I leasing + 0x002e2d49, // n0x0650 c0x0000 (---------------) + I logistics + 0x00395fc8, // n0x0651 c0x0000 (---------------) + I magazine + 0x0027634b, // n0x0652 c0x0000 (---------------) + I maintenance + 0x003025c5, // n0x0653 c0x0000 (---------------) + I media + 0x0031210a, // n0x0654 c0x0000 (---------------) + I microlight + 0x002a3209, // n0x0655 c0x0000 (---------------) + I modelling + 0x00319f8a, // n0x0656 c0x0000 (---------------) + I navigation + 0x002c530b, // n0x0657 c0x0000 (---------------) + I parachuting + 0x0026e20b, // n0x0658 c0x0000 (---------------) + I paragliding + 0x002d4615, // n0x0659 c0x0000 (---------------) + I passenger-association + 0x002d6505, // n0x065a c0x0000 (---------------) + I pilot + 0x00247505, // n0x065b c0x0000 (---------------) + I press + 0x002e204a, // n0x065c c0x0000 (---------------) + I production + 0x00336aca, // n0x065d c0x0000 (---------------) + I recreation + 0x002fae47, // n0x065e c0x0000 (---------------) + I repbody + 0x0021d683, // n0x065f c0x0000 (---------------) + I res + 0x0029fa08, // n0x0660 c0x0000 (---------------) + I research + 0x002ce74a, // n0x0661 c0x0000 (---------------) + I rotorcraft + 0x00215946, // n0x0662 c0x0000 (---------------) + I safety + 0x002466c9, // n0x0663 c0x0000 (---------------) + I scientist + 0x00206808, // n0x0664 c0x0000 (---------------) + I services + 0x002c4484, // n0x0665 c0x0000 (---------------) + I show + 0x0027d709, // n0x0666 c0x0000 (---------------) + I skydiving + 0x002b8688, // n0x0667 c0x0000 (---------------) + I software + 0x002abd47, // n0x0668 c0x0000 (---------------) + I student + 0x002673c6, // n0x0669 c0x0000 (---------------) + I trader + 0x002a4607, // n0x066a c0x0000 (---------------) + I trading + 0x00295207, // n0x066b c0x0000 (---------------) + I trainer + 0x00244bc5, // n0x066c c0x0000 (---------------) + I union + 0x002dbd0c, // n0x066d c0x0000 (---------------) + I workinggroup + 0x00351f05, // n0x066e c0x0000 (---------------) + I works + 0x00233503, // n0x066f c0x0000 (---------------) + I com + 0x0023a783, // n0x0670 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x0671 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x0672 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0673 c0x0000 (---------------) + I org + 0x00200742, // n0x0674 c0x0000 (---------------) + I co + 0x00233503, // n0x0675 c0x0000 (---------------) + I com + 0x0021fe03, // n0x0676 c0x0000 (---------------) + I net + 0x00201483, // n0x0677 c0x0000 (---------------) + I nom + 0x0022d1c3, // n0x0678 c0x0000 (---------------) + I org + 0x00233503, // n0x0679 c0x0000 (---------------) + I com + 0x0021fe03, // n0x067a c0x0000 (---------------) + I net + 0x0020b283, // n0x067b c0x0000 (---------------) + I off + 0x0022d1c3, // n0x067c c0x0000 (---------------) + I org + 0x000ffa08, // n0x067d c0x0000 (---------------) + blogspot + 0x00233503, // n0x067e c0x0000 (---------------) + I com + 0x0023a783, // n0x067f c0x0000 (---------------) + I edu + 0x0026cc83, // n0x0680 c0x0000 (---------------) + I gov + 0x00209003, // n0x0681 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x0682 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0683 c0x0000 (---------------) + I org + 0x000ffa08, // n0x0684 c0x0000 (---------------) + blogspot + 0x00200742, // n0x0685 c0x0000 (---------------) + I co + 0x00202602, // n0x0686 c0x0000 (---------------) + I ed + 0x00237f42, // n0x0687 c0x0000 (---------------) + I gv + 0x00201e42, // n0x0688 c0x0000 (---------------) + I it + 0x00200c42, // n0x0689 c0x0000 (---------------) + I og + 0x002718c2, // n0x068a c0x0000 (---------------) + I pb + 0x04633503, // n0x068b c0x0011 (n0x0694-n0x0695) + I com + 0x0023a783, // n0x068c c0x0000 (---------------) + I edu + 0x00213183, // n0x068d c0x0000 (---------------) + I gob + 0x0026cc83, // n0x068e c0x0000 (---------------) + I gov + 0x00201603, // n0x068f c0x0000 (---------------) + I int + 0x00209003, // n0x0690 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x0691 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0692 c0x0000 (---------------) + I org + 0x00209e43, // n0x0693 c0x0000 (---------------) + I tur + 0x000ffa08, // n0x0694 c0x0000 (---------------) + blogspot + 0x00255704, // n0x0695 c0x0000 (---------------) + I e164 + 0x002edc07, // n0x0696 c0x0000 (---------------) + I in-addr + 0x00215dc3, // n0x0697 c0x0000 (---------------) + I ip6 + 0x00238804, // n0x0698 c0x0000 (---------------) + I iris + 0x0020dd83, // n0x0699 c0x0000 (---------------) + I uri + 0x00285383, // n0x069a c0x0000 (---------------) + I urn + 0x0026cc83, // n0x069b c0x0000 (---------------) + I gov + 0x00201542, // n0x069c c0x0000 (---------------) + I ac + 0x00130b83, // n0x069d c0x0000 (---------------) + biz + 0x05600742, // n0x069e c0x0015 (n0x06a3-n0x06a4) + I co + 0x00237f42, // n0x069f c0x0000 (---------------) + I gv + 0x001a1244, // n0x06a0 c0x0000 (---------------) + info + 0x00200282, // n0x06a1 c0x0000 (---------------) + I or + 0x000e1c44, // n0x06a2 c0x0000 (---------------) + priv + 0x000ffa08, // n0x06a3 c0x0000 (---------------) + blogspot + 0x00239a03, // n0x06a4 c0x0000 (---------------) + I act + 0x002afc83, // n0x06a5 c0x0000 (---------------) + I asn + 0x05e33503, // n0x06a6 c0x0017 (n0x06b6-n0x06b7) + I com + 0x00236cc4, // n0x06a7 c0x0000 (---------------) + I conf + 0x0623a783, // n0x06a8 c0x0018 (n0x06b7-n0x06bf) + I edu + 0x0666cc83, // n0x06a9 c0x0019 (n0x06bf-n0x06c4) + I gov + 0x0020c782, // n0x06aa c0x0000 (---------------) + I id + 0x003a1244, // n0x06ab c0x0000 (---------------) + I info + 0x0021fe03, // n0x06ac c0x0000 (---------------) + I net + 0x002f09c3, // n0x06ad c0x0000 (---------------) + I nsw + 0x002009c2, // n0x06ae c0x0000 (---------------) + I nt + 0x0022d1c3, // n0x06af c0x0000 (---------------) + I org + 0x0021c142, // n0x06b0 c0x0000 (---------------) + I oz + 0x002e6383, // n0x06b1 c0x0000 (---------------) + I qld + 0x002004c2, // n0x06b2 c0x0000 (---------------) + I sa + 0x00201e83, // n0x06b3 c0x0000 (---------------) + I tas + 0x002068c3, // n0x06b4 c0x0000 (---------------) + I vic + 0x002010c2, // n0x06b5 c0x0000 (---------------) + I wa + 0x000ffa08, // n0x06b6 c0x0000 (---------------) + blogspot + 0x00239a03, // n0x06b7 c0x0000 (---------------) + I act + 0x002f09c3, // n0x06b8 c0x0000 (---------------) + I nsw + 0x002009c2, // n0x06b9 c0x0000 (---------------) + I nt + 0x002e6383, // n0x06ba c0x0000 (---------------) + I qld + 0x002004c2, // n0x06bb c0x0000 (---------------) + I sa + 0x00201e83, // n0x06bc c0x0000 (---------------) + I tas + 0x002068c3, // n0x06bd c0x0000 (---------------) + I vic + 0x002010c2, // n0x06be c0x0000 (---------------) + I wa + 0x002e6383, // n0x06bf c0x0000 (---------------) + I qld + 0x002004c2, // n0x06c0 c0x0000 (---------------) + I sa + 0x00201e83, // n0x06c1 c0x0000 (---------------) + I tas + 0x002068c3, // n0x06c2 c0x0000 (---------------) + I vic + 0x002010c2, // n0x06c3 c0x0000 (---------------) + I wa + 0x00233503, // n0x06c4 c0x0000 (---------------) + I com + 0x00330b83, // n0x06c5 c0x0000 (---------------) + I biz + 0x00233503, // n0x06c6 c0x0000 (---------------) + I com + 0x0023a783, // n0x06c7 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x06c8 c0x0000 (---------------) + I gov + 0x003a1244, // n0x06c9 c0x0000 (---------------) + I info + 0x00201603, // n0x06ca c0x0000 (---------------) + I int + 0x00209003, // n0x06cb c0x0000 (---------------) + I mil + 0x00205284, // n0x06cc c0x0000 (---------------) + I name + 0x0021fe03, // n0x06cd c0x0000 (---------------) + I net + 0x0022d1c3, // n0x06ce c0x0000 (---------------) + I org + 0x00209302, // n0x06cf c0x0000 (---------------) + I pp + 0x00220e43, // n0x06d0 c0x0000 (---------------) + I pro + 0x000ffa08, // n0x06d1 c0x0000 (---------------) + blogspot + 0x00200742, // n0x06d2 c0x0000 (---------------) + I co + 0x00233503, // n0x06d3 c0x0000 (---------------) + I com + 0x0023a783, // n0x06d4 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x06d5 c0x0000 (---------------) + I gov + 0x00209003, // n0x06d6 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x06d7 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x06d8 c0x0000 (---------------) + I org + 0x00209702, // n0x06d9 c0x0000 (---------------) + I rs + 0x002d8144, // n0x06da c0x0000 (---------------) + I unbi + 0x003a6844, // n0x06db c0x0000 (---------------) + I unsa + 0x00330b83, // n0x06dc c0x0000 (---------------) + I biz + 0x00200742, // n0x06dd c0x0000 (---------------) + I co + 0x00233503, // n0x06de c0x0000 (---------------) + I com + 0x0023a783, // n0x06df c0x0000 (---------------) + I edu + 0x0026cc83, // n0x06e0 c0x0000 (---------------) + I gov + 0x003a1244, // n0x06e1 c0x0000 (---------------) + I info + 0x0021fe03, // n0x06e2 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x06e3 c0x0000 (---------------) + I org + 0x00391185, // n0x06e4 c0x0000 (---------------) + I store + 0x00224e42, // n0x06e5 c0x0000 (---------------) + I tv + 0x00201542, // n0x06e6 c0x0000 (---------------) + I ac + 0x000ffa08, // n0x06e7 c0x0000 (---------------) + blogspot + 0x0026cc83, // n0x06e8 c0x0000 (---------------) + I gov + 0x0025c3c1, // n0x06e9 c0x0000 (---------------) + I 0 + 0x0022a0c1, // n0x06ea c0x0000 (---------------) + I 1 + 0x002484c1, // n0x06eb c0x0000 (---------------) + I 2 + 0x00231a01, // n0x06ec c0x0000 (---------------) + I 3 + 0x00231981, // n0x06ed c0x0000 (---------------) + I 4 + 0x002736c1, // n0x06ee c0x0000 (---------------) + I 5 + 0x00215e41, // n0x06ef c0x0000 (---------------) + I 6 + 0x0023e381, // n0x06f0 c0x0000 (---------------) + I 7 + 0x002f2a41, // n0x06f1 c0x0000 (---------------) + I 8 + 0x00301241, // n0x06f2 c0x0000 (---------------) + I 9 + 0x00200101, // n0x06f3 c0x0000 (---------------) + I a + 0x00200001, // n0x06f4 c0x0000 (---------------) + I b + 0x000ffa08, // n0x06f5 c0x0000 (---------------) + blogspot + 0x00200301, // n0x06f6 c0x0000 (---------------) + I c + 0x00200381, // n0x06f7 c0x0000 (---------------) + I d + 0x00200081, // n0x06f8 c0x0000 (---------------) + I e + 0x00200581, // n0x06f9 c0x0000 (---------------) + I f + 0x00200c81, // n0x06fa c0x0000 (---------------) + I g + 0x00200d81, // n0x06fb c0x0000 (---------------) + I h + 0x00200041, // n0x06fc c0x0000 (---------------) + I i + 0x00201741, // n0x06fd c0x0000 (---------------) + I j + 0x00200fc1, // n0x06fe c0x0000 (---------------) + I k + 0x00200201, // n0x06ff c0x0000 (---------------) + I l + 0x00200181, // n0x0700 c0x0000 (---------------) + I m + 0x00200541, // n0x0701 c0x0000 (---------------) + I n + 0x00200281, // n0x0702 c0x0000 (---------------) + I o + 0x00200941, // n0x0703 c0x0000 (---------------) + I p + 0x00200401, // n0x0704 c0x0000 (---------------) + I q + 0x002002c1, // n0x0705 c0x0000 (---------------) + I r + 0x002004c1, // n0x0706 c0x0000 (---------------) + I s + 0x00200141, // n0x0707 c0x0000 (---------------) + I t + 0x00200441, // n0x0708 c0x0000 (---------------) + I u + 0x002000c1, // n0x0709 c0x0000 (---------------) + I v + 0x002010c1, // n0x070a c0x0000 (---------------) + I w + 0x00205381, // n0x070b c0x0000 (---------------) + I x + 0x00201841, // n0x070c c0x0000 (---------------) + I y + 0x00205f81, // n0x070d c0x0000 (---------------) + I z + 0x00233503, // n0x070e c0x0000 (---------------) + I com + 0x0023a783, // n0x070f c0x0000 (---------------) + I edu + 0x0026cc83, // n0x0710 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x0711 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0712 c0x0000 (---------------) + I org + 0x00200742, // n0x0713 c0x0000 (---------------) + I co + 0x00233503, // n0x0714 c0x0000 (---------------) + I com + 0x0023a783, // n0x0715 c0x0000 (---------------) + I edu + 0x00200282, // n0x0716 c0x0000 (---------------) + I or + 0x0022d1c3, // n0x0717 c0x0000 (---------------) + I org + 0x00009107, // n0x0718 c0x0000 (---------------) + dscloud + 0x00013886, // n0x0719 c0x0000 (---------------) + dyndns + 0x00055e8a, // n0x071a c0x0000 (---------------) + for-better + 0x00087d88, // n0x071b c0x0000 (---------------) + for-more + 0x00056488, // n0x071c c0x0000 (---------------) + for-some + 0x000572c7, // n0x071d c0x0000 (---------------) + for-the + 0x0006ba86, // n0x071e c0x0000 (---------------) + selfip + 0x000eadc6, // n0x071f c0x0000 (---------------) + webhop + 0x002d4884, // n0x0720 c0x0000 (---------------) + I asso + 0x00319cc7, // n0x0721 c0x0000 (---------------) + I barreau + 0x000ffa08, // n0x0722 c0x0000 (---------------) + blogspot + 0x0033d7c4, // n0x0723 c0x0000 (---------------) + I gouv + 0x00233503, // n0x0724 c0x0000 (---------------) + I com + 0x0023a783, // n0x0725 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x0726 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x0727 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0728 c0x0000 (---------------) + I org + 0x00233503, // n0x0729 c0x0000 (---------------) + I com + 0x0023a783, // n0x072a c0x0000 (---------------) + I edu + 0x00213183, // n0x072b c0x0000 (---------------) + I gob + 0x0026cc83, // n0x072c c0x0000 (---------------) + I gov + 0x00201603, // n0x072d c0x0000 (---------------) + I int + 0x00209003, // n0x072e c0x0000 (---------------) + I mil + 0x0021fe03, // n0x072f c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0730 c0x0000 (---------------) + I org + 0x00224e42, // n0x0731 c0x0000 (---------------) + I tv + 0x002c3503, // n0x0732 c0x0000 (---------------) + I adm + 0x002f4fc3, // n0x0733 c0x0000 (---------------) + I adv + 0x00209c83, // n0x0734 c0x0000 (---------------) + I agr + 0x00201882, // n0x0735 c0x0000 (---------------) + I am + 0x0024f183, // n0x0736 c0x0000 (---------------) + I arq + 0x002011c3, // n0x0737 c0x0000 (---------------) + I art + 0x00217b03, // n0x0738 c0x0000 (---------------) + I ato + 0x00200001, // n0x0739 c0x0000 (---------------) + I b + 0x00203a03, // n0x073a c0x0000 (---------------) + I bio + 0x002a4004, // n0x073b c0x0000 (---------------) + I blog + 0x00321f43, // n0x073c c0x0000 (---------------) + I bmd + 0x0025ee03, // n0x073d c0x0000 (---------------) + I cim + 0x0021ba43, // n0x073e c0x0000 (---------------) + I cng + 0x00231603, // n0x073f c0x0000 (---------------) + I cnt + 0x0a233503, // n0x0740 c0x0028 (n0x0778-n0x0779) + I com + 0x0023d684, // n0x0741 c0x0000 (---------------) + I coop + 0x0021ba03, // n0x0742 c0x0000 (---------------) + I ecn + 0x0020b203, // n0x0743 c0x0000 (---------------) + I eco + 0x0023a783, // n0x0744 c0x0000 (---------------) + I edu + 0x0023a4c3, // n0x0745 c0x0000 (---------------) + I emp + 0x00213703, // n0x0746 c0x0000 (---------------) + I eng + 0x0029bdc3, // n0x0747 c0x0000 (---------------) + I esp + 0x0025ed83, // n0x0748 c0x0000 (---------------) + I etc + 0x002234c3, // n0x0749 c0x0000 (---------------) + I eti + 0x00212983, // n0x074a c0x0000 (---------------) + I far + 0x002522c4, // n0x074b c0x0000 (---------------) + I flog + 0x00242902, // n0x074c c0x0000 (---------------) + I fm + 0x00255803, // n0x074d c0x0000 (---------------) + I fnd + 0x0025bb03, // n0x074e c0x0000 (---------------) + I fot + 0x00277703, // n0x074f c0x0000 (---------------) + I fst + 0x002ee4c3, // n0x0750 c0x0000 (---------------) + I g12 + 0x002ece03, // n0x0751 c0x0000 (---------------) + I ggf + 0x0026cc83, // n0x0752 c0x0000 (---------------) + I gov + 0x002cc783, // n0x0753 c0x0000 (---------------) + I imb + 0x0021d883, // n0x0754 c0x0000 (---------------) + I ind + 0x003a1083, // n0x0755 c0x0000 (---------------) + I inf + 0x00215b43, // n0x0756 c0x0000 (---------------) + I jor + 0x002f3143, // n0x0757 c0x0000 (---------------) + I jus + 0x0022e283, // n0x0758 c0x0000 (---------------) + I leg + 0x002d08c3, // n0x0759 c0x0000 (---------------) + I lel + 0x0021f803, // n0x075a c0x0000 (---------------) + I mat + 0x00213ac3, // n0x075b c0x0000 (---------------) + I med + 0x00209003, // n0x075c c0x0000 (---------------) + I mil + 0x0022a482, // n0x075d c0x0000 (---------------) + I mp + 0x00283a83, // n0x075e c0x0000 (---------------) + I mus + 0x0021fe03, // n0x075f c0x0000 (---------------) + I net + 0x01601483, // n0x0760 c0x0005 (---------------)* o I nom + 0x002547c3, // n0x0761 c0x0000 (---------------) + I not + 0x0023b443, // n0x0762 c0x0000 (---------------) + I ntr + 0x00213243, // n0x0763 c0x0000 (---------------) + I odo + 0x0022d1c3, // n0x0764 c0x0000 (---------------) + I org + 0x00249583, // n0x0765 c0x0000 (---------------) + I ppg + 0x00220e43, // n0x0766 c0x0000 (---------------) + I pro + 0x0023d083, // n0x0767 c0x0000 (---------------) + I psc + 0x002f7b43, // n0x0768 c0x0000 (---------------) + I psi + 0x002e6543, // n0x0769 c0x0000 (---------------) + I qsl + 0x00264b85, // n0x076a c0x0000 (---------------) + I radio + 0x0022a5c3, // n0x076b c0x0000 (---------------) + I rec + 0x002e6583, // n0x076c c0x0000 (---------------) + I slg + 0x0035ca03, // n0x076d c0x0000 (---------------) + I srv + 0x002203c4, // n0x076e c0x0000 (---------------) + I taxi + 0x00336843, // n0x076f c0x0000 (---------------) + I teo + 0x00239f83, // n0x0770 c0x0000 (---------------) + I tmp + 0x002a9803, // n0x0771 c0x0000 (---------------) + I trd + 0x00209e43, // n0x0772 c0x0000 (---------------) + I tur + 0x00224e42, // n0x0773 c0x0000 (---------------) + I tv + 0x0023f943, // n0x0774 c0x0000 (---------------) + I vet + 0x002fa5c4, // n0x0775 c0x0000 (---------------) + I vlog + 0x0037c484, // n0x0776 c0x0000 (---------------) + I wiki + 0x002645c3, // n0x0777 c0x0000 (---------------) + I zlg + 0x000ffa08, // n0x0778 c0x0000 (---------------) + blogspot + 0x00233503, // n0x0779 c0x0000 (---------------) + I com + 0x0023a783, // n0x077a c0x0000 (---------------) + I edu + 0x0026cc83, // n0x077b c0x0000 (---------------) + I gov + 0x0021fe03, // n0x077c c0x0000 (---------------) + I net + 0x0022d1c3, // n0x077d c0x0000 (---------------) + I org + 0x00233503, // n0x077e c0x0000 (---------------) + I com + 0x0023a783, // n0x077f c0x0000 (---------------) + I edu + 0x0026cc83, // n0x0780 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x0781 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0782 c0x0000 (---------------) + I org + 0x00200742, // n0x0783 c0x0000 (---------------) + I co + 0x0022d1c3, // n0x0784 c0x0000 (---------------) + I org + 0x0b633503, // n0x0785 c0x002d (n0x0789-n0x078a) + I com + 0x0026cc83, // n0x0786 c0x0000 (---------------) + I gov + 0x00209003, // n0x0787 c0x0000 (---------------) + I mil + 0x0020b282, // n0x0788 c0x0000 (---------------) + I of + 0x000ffa08, // n0x0789 c0x0000 (---------------) + blogspot + 0x00233503, // n0x078a c0x0000 (---------------) + I com + 0x0023a783, // n0x078b c0x0000 (---------------) + I edu + 0x0026cc83, // n0x078c c0x0000 (---------------) + I gov + 0x0021fe03, // n0x078d c0x0000 (---------------) + I net + 0x0022d1c3, // n0x078e c0x0000 (---------------) + I org + 0x00005f82, // n0x078f c0x0000 (---------------) + za + 0x002020c2, // n0x0790 c0x0000 (---------------) + I ab + 0x00221a82, // n0x0791 c0x0000 (---------------) + I bc + 0x000ffa08, // n0x0792 c0x0000 (---------------) + blogspot + 0x00000742, // n0x0793 c0x0000 (---------------) + co + 0x0023c3c2, // n0x0794 c0x0000 (---------------) + I gc + 0x00208602, // n0x0795 c0x0000 (---------------) + I mb + 0x00215102, // n0x0796 c0x0000 (---------------) + I nb + 0x00200542, // n0x0797 c0x0000 (---------------) + I nf + 0x00247802, // n0x0798 c0x0000 (---------------) + I nl + 0x00210c42, // n0x0799 c0x0000 (---------------) + I ns + 0x002009c2, // n0x079a c0x0000 (---------------) + I nt + 0x002017c2, // n0x079b c0x0000 (---------------) + I nu + 0x00200982, // n0x079c c0x0000 (---------------) + I on + 0x00207782, // n0x079d c0x0000 (---------------) + I pe + 0x0037cd42, // n0x079e c0x0000 (---------------) + I qc + 0x00207842, // n0x079f c0x0000 (---------------) + I sk + 0x00226f42, // n0x07a0 c0x0000 (---------------) + I yk + 0x00146449, // n0x07a1 c0x0000 (---------------) + ftpaccess + 0x00173f0b, // n0x07a2 c0x0000 (---------------) + game-server + 0x000d1148, // n0x07a3 c0x0000 (---------------) + myphotos + 0x00143a09, // n0x07a4 c0x0000 (---------------) + scrapping + 0x0026cc83, // n0x07a5 c0x0000 (---------------) + I gov + 0x000ffa08, // n0x07a6 c0x0000 (---------------) + blogspot + 0x000ffa08, // n0x07a7 c0x0000 (---------------) + blogspot + 0x00201542, // n0x07a8 c0x0000 (---------------) + I ac + 0x002d4884, // n0x07a9 c0x0000 (---------------) + I asso + 0x00200742, // n0x07aa c0x0000 (---------------) + I co + 0x00233503, // n0x07ab c0x0000 (---------------) + I com + 0x00202602, // n0x07ac c0x0000 (---------------) + I ed + 0x0023a783, // n0x07ad c0x0000 (---------------) + I edu + 0x00202d42, // n0x07ae c0x0000 (---------------) + I go + 0x0033d7c4, // n0x07af c0x0000 (---------------) + I gouv + 0x00201603, // n0x07b0 c0x0000 (---------------) + I int + 0x0024da82, // n0x07b1 c0x0000 (---------------) + I md + 0x0021fe03, // n0x07b2 c0x0000 (---------------) + I net + 0x00200282, // n0x07b3 c0x0000 (---------------) + I or + 0x0022d1c3, // n0x07b4 c0x0000 (---------------) + I org + 0x00247506, // n0x07b5 c0x0000 (---------------) + I presse + 0x0030dc0f, // n0x07b6 c0x0000 (---------------) + I xn--aroport-bya + 0x00700b03, // n0x07b7 c0x0001 (---------------) ! I www + 0x000ffa08, // n0x07b8 c0x0000 (---------------) + blogspot + 0x00200742, // n0x07b9 c0x0000 (---------------) + I co + 0x00213183, // n0x07ba c0x0000 (---------------) + I gob + 0x0026cc83, // n0x07bb c0x0000 (---------------) + I gov + 0x00209003, // n0x07bc c0x0000 (---------------) + I mil + 0x00200742, // n0x07bd c0x0000 (---------------) + I co + 0x00233503, // n0x07be c0x0000 (---------------) + I com + 0x0026cc83, // n0x07bf c0x0000 (---------------) + I gov + 0x0021fe03, // n0x07c0 c0x0000 (---------------) + I net + 0x00201542, // n0x07c1 c0x0000 (---------------) + I ac + 0x00204f02, // n0x07c2 c0x0000 (---------------) + I ah + 0x0e6f6409, // n0x07c3 c0x0039 (n0x07ee-n0x07ef) o I amazonaws + 0x00206502, // n0x07c4 c0x0000 (---------------) + I bj + 0x0ee33503, // n0x07c5 c0x003b (n0x07f0-n0x07f1) + I com + 0x00243b42, // n0x07c6 c0x0000 (---------------) + I cq + 0x0023a783, // n0x07c7 c0x0000 (---------------) + I edu + 0x00215b02, // n0x07c8 c0x0000 (---------------) + I fj + 0x00222d42, // n0x07c9 c0x0000 (---------------) + I gd + 0x0026cc83, // n0x07ca c0x0000 (---------------) + I gov + 0x0023a242, // n0x07cb c0x0000 (---------------) + I gs + 0x00260202, // n0x07cc c0x0000 (---------------) + I gx + 0x00264642, // n0x07cd c0x0000 (---------------) + I gz + 0x00202442, // n0x07ce c0x0000 (---------------) + I ha + 0x0028c342, // n0x07cf c0x0000 (---------------) + I hb + 0x002073c2, // n0x07d0 c0x0000 (---------------) + I he + 0x00200d82, // n0x07d1 c0x0000 (---------------) + I hi + 0x0020a882, // n0x07d2 c0x0000 (---------------) + I hk + 0x00248fc2, // n0x07d3 c0x0000 (---------------) + I hl + 0x0021ab42, // n0x07d4 c0x0000 (---------------) + I hn + 0x002ac642, // n0x07d5 c0x0000 (---------------) + I jl + 0x00251742, // n0x07d6 c0x0000 (---------------) + I js + 0x00313402, // n0x07d7 c0x0000 (---------------) + I jx + 0x0022e8c2, // n0x07d8 c0x0000 (---------------) + I ln + 0x00209003, // n0x07d9 c0x0000 (---------------) + I mil + 0x00207102, // n0x07da c0x0000 (---------------) + I mo + 0x0021fe03, // n0x07db c0x0000 (---------------) + I net + 0x0023db02, // n0x07dc c0x0000 (---------------) + I nm + 0x0026a782, // n0x07dd c0x0000 (---------------) + I nx + 0x0022d1c3, // n0x07de c0x0000 (---------------) + I org + 0x0024f202, // n0x07df c0x0000 (---------------) + I qh + 0x00200702, // n0x07e0 c0x0000 (---------------) + I sc + 0x002496c2, // n0x07e1 c0x0000 (---------------) + I sd + 0x00201342, // n0x07e2 c0x0000 (---------------) + I sh + 0x00214182, // n0x07e3 c0x0000 (---------------) + I sn + 0x002f2802, // n0x07e4 c0x0000 (---------------) + I sx + 0x00226782, // n0x07e5 c0x0000 (---------------) + I tj + 0x0024e502, // n0x07e6 c0x0000 (---------------) + I tw + 0x0036f4c2, // n0x07e7 c0x0000 (---------------) + I xj + 0x003029ca, // n0x07e8 c0x0000 (---------------) + I xn--55qx5d + 0x0034280a, // n0x07e9 c0x0000 (---------------) + I xn--io0a7i + 0x0037648a, // n0x07ea c0x0000 (---------------) + I xn--od0alg + 0x003a6cc2, // n0x07eb c0x0000 (---------------) + I xz + 0x00213642, // n0x07ec c0x0000 (---------------) + I yn + 0x00247702, // n0x07ed c0x0000 (---------------) + I zj + 0x0e835247, // n0x07ee c0x003a (n0x07ef-n0x07f0) + compute + 0x00039b8a, // n0x07ef c0x0000 (---------------) + cn-north-1 + 0x0f2f6409, // n0x07f0 c0x003c (n0x07f1-n0x07f2) o I amazonaws + 0x0f639b8a, // n0x07f1 c0x003d (n0x07f2-n0x07f3) o I cn-north-1 + 0x0004a542, // n0x07f2 c0x0000 (---------------) + s3 + 0x0024bf84, // n0x07f3 c0x0000 (---------------) + I arts + 0x0fe33503, // n0x07f4 c0x003f (n0x0800-n0x0801) + I com + 0x0023a783, // n0x07f5 c0x0000 (---------------) + I edu + 0x0024d9c4, // n0x07f6 c0x0000 (---------------) + I firm + 0x0026cc83, // n0x07f7 c0x0000 (---------------) + I gov + 0x003a1244, // n0x07f8 c0x0000 (---------------) + I info + 0x00201603, // n0x07f9 c0x0000 (---------------) + I int + 0x00209003, // n0x07fa c0x0000 (---------------) + I mil + 0x0021fe03, // n0x07fb c0x0000 (---------------) + I net + 0x00201483, // n0x07fc c0x0000 (---------------) + I nom + 0x0022d1c3, // n0x07fd c0x0000 (---------------) + I org + 0x0022a5c3, // n0x07fe c0x0000 (---------------) + I rec + 0x00221a03, // n0x07ff c0x0000 (---------------) + I web + 0x000ffa08, // n0x0800 c0x0000 (---------------) + blogspot + 0x00131905, // n0x0801 c0x0000 (---------------) + 1kapp + 0x0010a942, // n0x0802 c0x0000 (---------------) + 4u + 0x00175846, // n0x0803 c0x0000 (---------------) + africa + 0x106f6409, // n0x0804 c0x0041 (n0x08d7-n0x08eb) o I amazonaws + 0x000092c7, // n0x0805 c0x0000 (---------------) + appspot + 0x00000a42, // n0x0806 c0x0000 (---------------) + ar + 0x0019bcca, // n0x0807 c0x0000 (---------------) + betainabox + 0x000fb147, // n0x0808 c0x0000 (---------------) + blogdns + 0x000ffa08, // n0x0809 c0x0000 (---------------) + blogspot + 0x0001c402, // n0x080a c0x0000 (---------------) + br + 0x001387c7, // n0x080b c0x0000 (---------------) + cechire + 0x00194fcf, // n0x080c c0x0000 (---------------) + cloudcontrolapp + 0x00108b4f, // n0x080d c0x0000 (---------------) + cloudcontrolled + 0x0001ba42, // n0x080e c0x0000 (---------------) + cn + 0x00000742, // n0x080f c0x0000 (---------------) + co + 0x0009bd08, // n0x0810 c0x0000 (---------------) + codespot + 0x00004d82, // n0x0811 c0x0000 (---------------) + de + 0x0014c048, // n0x0812 c0x0000 (---------------) + dnsalias + 0x0007c9c7, // n0x0813 c0x0000 (---------------) + dnsdojo + 0x00014e0b, // n0x0814 c0x0000 (---------------) + doesntexist + 0x0016a009, // n0x0815 c0x0000 (---------------) + dontexist + 0x0014bf47, // n0x0816 c0x0000 (---------------) + doomdns + 0x000f410c, // n0x0817 c0x0000 (---------------) + dreamhosters + 0x0008cdc7, // n0x0818 c0x0000 (---------------) + dsmynas + 0x0012328a, // n0x0819 c0x0000 (---------------) + dyn-o-saur + 0x00197b48, // n0x081a c0x0000 (---------------) + dynalias + 0x00073dce, // n0x081b c0x0000 (---------------) + dyndns-at-home + 0x000dba8e, // n0x081c c0x0000 (---------------) + dyndns-at-work + 0x000faf8b, // n0x081d c0x0000 (---------------) + dyndns-blog + 0x000e7b4b, // n0x081e c0x0000 (---------------) + dyndns-free + 0x0001388b, // n0x081f c0x0000 (---------------) + dyndns-home + 0x00015c09, // n0x0820 c0x0000 (---------------) + dyndns-ip + 0x0001b40b, // n0x0821 c0x0000 (---------------) + dyndns-mail + 0x000214cd, // n0x0822 c0x0000 (---------------) + dyndns-office + 0x0002554b, // n0x0823 c0x0000 (---------------) + dyndns-pics + 0x0002698d, // n0x0824 c0x0000 (---------------) + dyndns-remote + 0x0002d40d, // n0x0825 c0x0000 (---------------) + dyndns-server + 0x0002e48a, // n0x0826 c0x0000 (---------------) + dyndns-web + 0x0017c2cb, // n0x0827 c0x0000 (---------------) + dyndns-wiki + 0x00151d4b, // n0x0828 c0x0000 (---------------) + dyndns-work + 0x0001e810, // n0x0829 c0x0000 (---------------) + elasticbeanstalk + 0x0002bb8f, // n0x082a c0x0000 (---------------) + est-a-la-maison + 0x0000a14f, // n0x082b c0x0000 (---------------) + est-a-la-masion + 0x0015234d, // n0x082c c0x0000 (---------------) + est-le-patron + 0x0013c450, // n0x082d c0x0000 (---------------) + est-mon-blogueur + 0x00004b82, // n0x082e c0x0000 (---------------) + eu + 0x00008f88, // n0x082f c0x0000 (---------------) + familyds + 0x11a4e285, // n0x0830 c0x0046 (n0x08f9-n0x08fa) o I fbsbx + 0x0004c6cb, // n0x0831 c0x0000 (---------------) + firebaseapp + 0x000549c8, // n0x0832 c0x0000 (---------------) + flynnhub + 0x00063d87, // n0x0833 c0x0000 (---------------) + from-ak + 0x000640c7, // n0x0834 c0x0000 (---------------) + from-al + 0x00064287, // n0x0835 c0x0000 (---------------) + from-ar + 0x00065347, // n0x0836 c0x0000 (---------------) + from-ca + 0x00065e07, // n0x0837 c0x0000 (---------------) + from-ct + 0x00066587, // n0x0838 c0x0000 (---------------) + from-dc + 0x00067007, // n0x0839 c0x0000 (---------------) + from-de + 0x00067547, // n0x083a c0x0000 (---------------) + from-fl + 0x00067b87, // n0x083b c0x0000 (---------------) + from-ga + 0x00067f07, // n0x083c c0x0000 (---------------) + from-hi + 0x00068787, // n0x083d c0x0000 (---------------) + from-ia + 0x00068947, // n0x083e c0x0000 (---------------) + from-id + 0x00068b07, // n0x083f c0x0000 (---------------) + from-il + 0x00068cc7, // n0x0840 c0x0000 (---------------) + from-in + 0x00068fc7, // n0x0841 c0x0000 (---------------) + from-ks + 0x00069ac7, // n0x0842 c0x0000 (---------------) + from-ky + 0x0006a5c7, // n0x0843 c0x0000 (---------------) + from-ma + 0x0006aa87, // n0x0844 c0x0000 (---------------) + from-md + 0x0006b007, // n0x0845 c0x0000 (---------------) + from-mi + 0x0006bd87, // n0x0846 c0x0000 (---------------) + from-mn + 0x0006bf47, // n0x0847 c0x0000 (---------------) + from-mo + 0x0006c247, // n0x0848 c0x0000 (---------------) + from-ms + 0x0006c787, // n0x0849 c0x0000 (---------------) + from-mt + 0x0006c987, // n0x084a c0x0000 (---------------) + from-nc + 0x0006d307, // n0x084b c0x0000 (---------------) + from-nd + 0x0006d4c7, // n0x084c c0x0000 (---------------) + from-ne + 0x0006d8c7, // n0x084d c0x0000 (---------------) + from-nh + 0x0006e007, // n0x084e c0x0000 (---------------) + from-nj + 0x0006e4c7, // n0x084f c0x0000 (---------------) + from-nm + 0x0006ef87, // n0x0850 c0x0000 (---------------) + from-nv + 0x0006f587, // n0x0851 c0x0000 (---------------) + from-oh + 0x0006f847, // n0x0852 c0x0000 (---------------) + from-ok + 0x0006fbc7, // n0x0853 c0x0000 (---------------) + from-or + 0x0006fd87, // n0x0854 c0x0000 (---------------) + from-pa + 0x00070107, // n0x0855 c0x0000 (---------------) + from-pr + 0x000708c7, // n0x0856 c0x0000 (---------------) + from-ri + 0x00070f07, // n0x0857 c0x0000 (---------------) + from-sc + 0x00071307, // n0x0858 c0x0000 (---------------) + from-sd + 0x00073147, // n0x0859 c0x0000 (---------------) + from-tn + 0x00073307, // n0x085a c0x0000 (---------------) + from-tx + 0x00073747, // n0x085b c0x0000 (---------------) + from-ut + 0x00074607, // n0x085c c0x0000 (---------------) + from-va + 0x00074c47, // n0x085d c0x0000 (---------------) + from-vt + 0x00074f47, // n0x085e c0x0000 (---------------) + from-wa + 0x00075107, // n0x085f c0x0000 (---------------) + from-wi + 0x00075487, // n0x0860 c0x0000 (---------------) + from-wv + 0x00076607, // n0x0861 c0x0000 (---------------) + from-wy + 0x0000d202, // n0x0862 c0x0000 (---------------) + gb + 0x000d4487, // n0x0863 c0x0000 (---------------) + getmyip + 0x11cca40b, // n0x0864 c0x0047 (n0x08fa-n0x08fd) + githubcloud + 0x014ca416, // n0x0865 c0x0005 (---------------)* o githubcloudusercontent + 0x00019511, // n0x0866 c0x0000 (---------------) + githubusercontent + 0x000df54a, // n0x0867 c0x0000 (---------------) + googleapis + 0x0009bb8a, // n0x0868 c0x0000 (---------------) + googlecode + 0x00057d06, // n0x0869 c0x0000 (---------------) + gotdns + 0x00011ecb, // n0x086a c0x0000 (---------------) + gotpantheon + 0x00000c82, // n0x086b c0x0000 (---------------) + gr + 0x000992c9, // n0x086c c0x0000 (---------------) + herokuapp + 0x00091f49, // n0x086d c0x0000 (---------------) + herokussl + 0x0000a882, // n0x086e c0x0000 (---------------) + hk + 0x0014cfca, // n0x086f c0x0000 (---------------) + hobby-site + 0x000a59c9, // n0x0870 c0x0000 (---------------) + homelinux + 0x000a6fc8, // n0x0871 c0x0000 (---------------) + homeunix + 0x000195c2, // n0x0872 c0x0000 (---------------) + hu + 0x00116f89, // n0x0873 c0x0000 (---------------) + iamallama + 0x0016d68e, // n0x0874 c0x0000 (---------------) + is-a-anarchist + 0x000a3ecc, // n0x0875 c0x0000 (---------------) + is-a-blogger + 0x000d254f, // n0x0876 c0x0000 (---------------) + is-a-bookkeeper + 0x0018ba8e, // n0x0877 c0x0000 (---------------) + is-a-bulls-fan + 0x0000de0c, // n0x0878 c0x0000 (---------------) + is-a-caterer + 0x00012789, // n0x0879 c0x0000 (---------------) + is-a-chef + 0x00013d11, // n0x087a c0x0000 (---------------) + is-a-conservative + 0x00016c08, // n0x087b c0x0000 (---------------) + is-a-cpa + 0x00024852, // n0x087c c0x0000 (---------------) + is-a-cubicle-slave + 0x0002648d, // n0x087d c0x0000 (---------------) + is-a-democrat + 0x0002db8d, // n0x087e c0x0000 (---------------) + is-a-designer + 0x0017614b, // n0x087f c0x0000 (---------------) + is-a-doctor + 0x00178815, // n0x0880 c0x0000 (---------------) + is-a-financialadvisor + 0x0004e989, // n0x0881 c0x0000 (---------------) + is-a-geek + 0x0005028a, // n0x0882 c0x0000 (---------------) + is-a-green + 0x00059389, // n0x0883 c0x0000 (---------------) + is-a-guru + 0x0005bd50, // n0x0884 c0x0000 (---------------) + is-a-hard-worker + 0x000662cb, // n0x0885 c0x0000 (---------------) + is-a-hunter + 0x00070a4f, // n0x0886 c0x0000 (---------------) + is-a-landscaper + 0x0007434b, // n0x0887 c0x0000 (---------------) + is-a-lawyer + 0x0007b5cc, // n0x0888 c0x0000 (---------------) + is-a-liberal + 0x0007dbd0, // n0x0889 c0x0000 (---------------) + is-a-libertarian + 0x00082fca, // n0x088a c0x0000 (---------------) + is-a-llama + 0x0008394d, // n0x088b c0x0000 (---------------) + is-a-musician + 0x0008894e, // n0x088c c0x0000 (---------------) + is-a-nascarfan + 0x0014414a, // n0x088d c0x0000 (---------------) + is-a-nurse + 0x00089f0c, // n0x088e c0x0000 (---------------) + is-a-painter + 0x00094ed4, // n0x088f c0x0000 (---------------) + is-a-personaltrainer + 0x00098f51, // n0x0890 c0x0000 (---------------) + is-a-photographer + 0x0009e80b, // n0x0891 c0x0000 (---------------) + is-a-player + 0x0009f58f, // n0x0892 c0x0000 (---------------) + is-a-republican + 0x000a0f8d, // n0x0893 c0x0000 (---------------) + is-a-rockstar + 0x000a384e, // n0x0894 c0x0000 (---------------) + is-a-socialist + 0x000abc0c, // n0x0895 c0x0000 (---------------) + is-a-student + 0x000d5b8c, // n0x0896 c0x0000 (---------------) + is-a-teacher + 0x000d588b, // n0x0897 c0x0000 (---------------) + is-a-techie + 0x000beb8e, // n0x0898 c0x0000 (---------------) + is-a-therapist + 0x000d9990, // n0x0899 c0x0000 (---------------) + is-an-accountant + 0x000ad54b, // n0x089a c0x0000 (---------------) + is-an-actor + 0x000d540d, // n0x089b c0x0000 (---------------) + is-an-actress + 0x000fb60f, // n0x089c c0x0000 (---------------) + is-an-anarchist + 0x0010390c, // n0x089d c0x0000 (---------------) + is-an-artist + 0x00168fce, // n0x089e c0x0000 (---------------) + is-an-engineer + 0x000b2bd1, // n0x089f c0x0000 (---------------) + is-an-entertainer + 0x000b924c, // n0x08a0 c0x0000 (---------------) + is-certified + 0x000bb247, // n0x08a1 c0x0000 (---------------) + is-gone + 0x000be5cd, // n0x08a2 c0x0000 (---------------) + is-into-anime + 0x00105d8c, // n0x08a3 c0x0000 (---------------) + is-into-cars + 0x00147b50, // n0x08a4 c0x0000 (---------------) + is-into-cartoons + 0x0016fecd, // n0x08a5 c0x0000 (---------------) + is-into-games + 0x000cf347, // n0x08a6 c0x0000 (---------------) + is-leet + 0x0017e2d0, // n0x08a7 c0x0000 (---------------) + is-not-certified + 0x000e9488, // n0x08a8 c0x0000 (---------------) + is-slick + 0x000ef04b, // n0x08a9 c0x0000 (---------------) + is-uberleet + 0x0014bbcf, // n0x08aa c0x0000 (---------------) + is-with-theband + 0x0008e588, // n0x08ab c0x0000 (---------------) + isa-geek + 0x000df74d, // n0x08ac c0x0000 (---------------) + isa-hockeynut + 0x00168110, // n0x08ad c0x0000 (---------------) + issmarterthanyou + 0x000aedc3, // n0x08ae c0x0000 (---------------) + jpn + 0x00006fc2, // n0x08af c0x0000 (---------------) + kr + 0x00058c49, // n0x08b0 c0x0000 (---------------) + likes-pie + 0x00073bca, // n0x08b1 c0x0000 (---------------) + likescandy + 0x00005303, // n0x08b2 c0x0000 (---------------) + mex + 0x0010b007, // n0x08b3 c0x0000 (---------------) + mydrobo + 0x001176c8, // n0x08b4 c0x0000 (---------------) + neat-url + 0x00184847, // n0x08b5 c0x0000 (---------------) + nfshost + 0x00000c02, // n0x08b6 c0x0000 (---------------) + no + 0x00064d0a, // n0x08b7 c0x0000 (---------------) + operaunite + 0x00194d4f, // n0x08b8 c0x0000 (---------------) + outsystemscloud + 0x000eaf0c, // n0x08b9 c0x0000 (---------------) + pagefrontapp + 0x000eb1d2, // n0x08ba c0x0000 (---------------) + pagespeedmobilizer + 0x122e1605, // n0x08bb c0x0048 (n0x08fd-n0x08fe) o I prgmr + 0x00114683, // n0x08bc c0x0000 (---------------) + qa2 + 0x0017cd42, // n0x08bd c0x0000 (---------------) + qc + 0x000ecb08, // n0x08be c0x0000 (---------------) + rackmaze + 0x00108ac7, // n0x08bf c0x0000 (---------------) + rhcloud + 0x00002202, // n0x08c0 c0x0000 (---------------) + ro + 0x00011302, // n0x08c1 c0x0000 (---------------) + ru + 0x000004c2, // n0x08c2 c0x0000 (---------------) + sa + 0x00033810, // n0x08c3 c0x0000 (---------------) + saves-the-whales + 0x000046c2, // n0x08c4 c0x0000 (---------------) + se + 0x0006ba86, // n0x08c5 c0x0000 (---------------) + selfip + 0x0013738e, // n0x08c6 c0x0000 (---------------) + sells-for-less + 0x0008becb, // n0x08c7 c0x0000 (---------------) + sells-for-u + 0x000cb7c8, // n0x08c8 c0x0000 (---------------) + servebbs + 0x000d0eca, // n0x08c9 c0x0000 (---------------) + simple-url + 0x000f7b87, // n0x08ca c0x0000 (---------------) + sinaapp + 0x0000bb4d, // n0x08cb c0x0000 (---------------) + space-to-rent + 0x001557cc, // n0x08cc c0x0000 (---------------) + teaches-yoga + 0x00000f82, // n0x08cd c0x0000 (---------------) + uk + 0x00002382, // n0x08ce c0x0000 (---------------) + us + 0x00001802, // n0x08cf c0x0000 (---------------) + uy + 0x000f7aca, // n0x08d0 c0x0000 (---------------) + vipsinaapp + 0x000df44a, // n0x08d1 c0x0000 (---------------) + withgoogle + 0x000e470b, // n0x08d2 c0x0000 (---------------) + withyoutube + 0x000ff78e, // n0x08d3 c0x0000 (---------------) + writesthisblog + 0x0001bc0d, // n0x08d4 c0x0000 (---------------) + xenapponazure + 0x000d6f08, // n0x08d5 c0x0000 (---------------) + yolasite + 0x00005f82, // n0x08d6 c0x0000 (---------------) + za + 0x10a5f44e, // n0x08d7 c0x0042 (n0x08eb-n0x08ec) o I ap-northeast-2 + 0x10c35247, // n0x08d8 c0x0043 (n0x08ec-n0x08f6) + compute + 0x11035249, // n0x08d9 c0x0044 (n0x08f6-n0x08f8) + compute-1 + 0x00010743, // n0x08da c0x0000 (---------------) + elb + 0x11672e4c, // n0x08db c0x0045 (n0x08f8-n0x08f9) o I eu-central-1 + 0x0004a542, // n0x08dc c0x0000 (---------------) + s3 + 0x00132491, // n0x08dd c0x0000 (---------------) + s3-ap-northeast-1 + 0x0005f391, // n0x08de c0x0000 (---------------) + s3-ap-northeast-2 + 0x00131511, // n0x08df c0x0000 (---------------) + s3-ap-southeast-1 + 0x0004a551, // n0x08e0 c0x0000 (---------------) + s3-ap-southeast-2 + 0x00072d8f, // n0x08e1 c0x0000 (---------------) + s3-eu-central-1 + 0x001101cc, // n0x08e2 c0x0000 (---------------) + s3-eu-west-1 + 0x0011b34d, // n0x08e3 c0x0000 (---------------) + s3-external-1 + 0x0012364d, // n0x08e4 c0x0000 (---------------) + s3-external-2 + 0x00126215, // n0x08e5 c0x0000 (---------------) + s3-fips-us-gov-west-1 + 0x0014694c, // n0x08e6 c0x0000 (---------------) + s3-sa-east-1 + 0x000de150, // n0x08e7 c0x0000 (---------------) + s3-us-gov-west-1 + 0x000c158c, // n0x08e8 c0x0000 (---------------) + s3-us-west-1 + 0x000dc7cc, // n0x08e9 c0x0000 (---------------) + s3-us-west-2 + 0x0017ca09, // n0x08ea c0x0000 (---------------) + us-east-1 + 0x0004a542, // n0x08eb c0x0000 (---------------) + s3 + 0x0013254e, // n0x08ec c0x0000 (---------------) + ap-northeast-1 + 0x0005f44e, // n0x08ed c0x0000 (---------------) + ap-northeast-2 + 0x001315ce, // n0x08ee c0x0000 (---------------) + ap-southeast-1 + 0x0004a60e, // n0x08ef c0x0000 (---------------) + ap-southeast-2 + 0x00072e4c, // n0x08f0 c0x0000 (---------------) + eu-central-1 + 0x00110289, // n0x08f1 c0x0000 (---------------) + eu-west-1 + 0x00146a09, // n0x08f2 c0x0000 (---------------) + sa-east-1 + 0x000de20d, // n0x08f3 c0x0000 (---------------) + us-gov-west-1 + 0x000c1649, // n0x08f4 c0x0000 (---------------) + us-west-1 + 0x000dc889, // n0x08f5 c0x0000 (---------------) + us-west-2 + 0x0002a043, // n0x08f6 c0x0000 (---------------) + z-1 + 0x0013e5c3, // n0x08f7 c0x0000 (---------------) + z-2 + 0x0004a542, // n0x08f8 c0x0000 (---------------) + s3 + 0x000092c4, // n0x08f9 c0x0000 (---------------) + apps + 0x014bedc3, // n0x08fa c0x0005 (---------------)* o api + 0x0140ba03, // n0x08fb c0x0005 (---------------)* o ext + 0x000e2dc4, // n0x08fc c0x0000 (---------------) + gist + 0x0001bc03, // n0x08fd c0x0000 (---------------) + xen + 0x00201542, // n0x08fe c0x0000 (---------------) + I ac + 0x00200742, // n0x08ff c0x0000 (---------------) + I co + 0x00202602, // n0x0900 c0x0000 (---------------) + I ed + 0x00207502, // n0x0901 c0x0000 (---------------) + I fi + 0x00202d42, // n0x0902 c0x0000 (---------------) + I go + 0x00200282, // n0x0903 c0x0000 (---------------) + I or + 0x002004c2, // n0x0904 c0x0000 (---------------) + I sa + 0x00233503, // n0x0905 c0x0000 (---------------) + I com + 0x0023a783, // n0x0906 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x0907 c0x0000 (---------------) + I gov + 0x003a1083, // n0x0908 c0x0000 (---------------) + I inf + 0x0021fe03, // n0x0909 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x090a c0x0000 (---------------) + I org + 0x000ffa08, // n0x090b c0x0000 (---------------) + blogspot + 0x00233503, // n0x090c c0x0000 (---------------) + I com + 0x0023a783, // n0x090d c0x0000 (---------------) + I edu + 0x0021fe03, // n0x090e c0x0000 (---------------) + I net + 0x0022d1c3, // n0x090f c0x0000 (---------------) + I org + 0x00048f43, // n0x0910 c0x0000 (---------------) + ath + 0x0026cc83, // n0x0911 c0x0000 (---------------) + I gov + 0x00201542, // n0x0912 c0x0000 (---------------) + I ac + 0x00330b83, // n0x0913 c0x0000 (---------------) + I biz + 0x13e33503, // n0x0914 c0x004f (n0x091f-n0x0920) + I com + 0x0027a1c7, // n0x0915 c0x0000 (---------------) + I ekloges + 0x0026cc83, // n0x0916 c0x0000 (---------------) + I gov + 0x00322cc3, // n0x0917 c0x0000 (---------------) + I ltd + 0x00205284, // n0x0918 c0x0000 (---------------) + I name + 0x0021fe03, // n0x0919 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x091a c0x0000 (---------------) + I org + 0x0028440a, // n0x091b c0x0000 (---------------) + I parliament + 0x00247505, // n0x091c c0x0000 (---------------) + I press + 0x00220e43, // n0x091d c0x0000 (---------------) + I pro + 0x00200142, // n0x091e c0x0000 (---------------) + I tm + 0x000ffa08, // n0x091f c0x0000 (---------------) + blogspot + 0x000ffa08, // n0x0920 c0x0000 (---------------) + blogspot + 0x00000742, // n0x0921 c0x0000 (---------------) + co + 0x000ffa08, // n0x0922 c0x0000 (---------------) + blogspot + 0x00033503, // n0x0923 c0x0000 (---------------) + com + 0x000afa4f, // n0x0924 c0x0000 (---------------) + fuettertdasnetz + 0x0016a18a, // n0x0925 c0x0000 (---------------) + isteingeek + 0x000a3b07, // n0x0926 c0x0000 (---------------) + istmein + 0x0001fc8a, // n0x0927 c0x0000 (---------------) + lebtimnetz + 0x0018460a, // n0x0928 c0x0000 (---------------) + leitungsen + 0x00004acd, // n0x0929 c0x0000 (---------------) + traeumtgerade + 0x000ffa08, // n0x092a c0x0000 (---------------) + blogspot + 0x00233503, // n0x092b c0x0000 (---------------) + I com + 0x0023a783, // n0x092c c0x0000 (---------------) + I edu + 0x0026cc83, // n0x092d c0x0000 (---------------) + I gov + 0x0021fe03, // n0x092e c0x0000 (---------------) + I net + 0x0022d1c3, // n0x092f c0x0000 (---------------) + I org + 0x002011c3, // n0x0930 c0x0000 (---------------) + I art + 0x00233503, // n0x0931 c0x0000 (---------------) + I com + 0x0023a783, // n0x0932 c0x0000 (---------------) + I edu + 0x00213183, // n0x0933 c0x0000 (---------------) + I gob + 0x0026cc83, // n0x0934 c0x0000 (---------------) + I gov + 0x00209003, // n0x0935 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x0936 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0937 c0x0000 (---------------) + I org + 0x00292103, // n0x0938 c0x0000 (---------------) + I sld + 0x00221a03, // n0x0939 c0x0000 (---------------) + I web + 0x002011c3, // n0x093a c0x0000 (---------------) + I art + 0x002d4884, // n0x093b c0x0000 (---------------) + I asso + 0x00233503, // n0x093c c0x0000 (---------------) + I com + 0x0023a783, // n0x093d c0x0000 (---------------) + I edu + 0x0026cc83, // n0x093e c0x0000 (---------------) + I gov + 0x0021fe03, // n0x093f c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0940 c0x0000 (---------------) + I org + 0x00208103, // n0x0941 c0x0000 (---------------) + I pol + 0x00233503, // n0x0942 c0x0000 (---------------) + I com + 0x0023a783, // n0x0943 c0x0000 (---------------) + I edu + 0x00207503, // n0x0944 c0x0000 (---------------) + I fin + 0x00213183, // n0x0945 c0x0000 (---------------) + I gob + 0x0026cc83, // n0x0946 c0x0000 (---------------) + I gov + 0x003a1244, // n0x0947 c0x0000 (---------------) + I info + 0x00309ac3, // n0x0948 c0x0000 (---------------) + I k12 + 0x00213ac3, // n0x0949 c0x0000 (---------------) + I med + 0x00209003, // n0x094a c0x0000 (---------------) + I mil + 0x0021fe03, // n0x094b c0x0000 (---------------) + I net + 0x0022d1c3, // n0x094c c0x0000 (---------------) + I org + 0x00220e43, // n0x094d c0x0000 (---------------) + I pro + 0x003a6543, // n0x094e c0x0000 (---------------) + I aip + 0x16233503, // n0x094f c0x0058 (n0x0958-n0x0959) + I com + 0x0023a783, // n0x0950 c0x0000 (---------------) + I edu + 0x002b9443, // n0x0951 c0x0000 (---------------) + I fie + 0x0026cc83, // n0x0952 c0x0000 (---------------) + I gov + 0x0027b703, // n0x0953 c0x0000 (---------------) + I lib + 0x00213ac3, // n0x0954 c0x0000 (---------------) + I med + 0x0022d1c3, // n0x0955 c0x0000 (---------------) + I org + 0x00204603, // n0x0956 c0x0000 (---------------) + I pri + 0x00320cc4, // n0x0957 c0x0000 (---------------) + I riik + 0x000ffa08, // n0x0958 c0x0000 (---------------) + blogspot + 0x16a33503, // n0x0959 c0x005a (n0x0962-n0x0963) + I com + 0x0023a783, // n0x095a c0x0000 (---------------) + I edu + 0x002a7083, // n0x095b c0x0000 (---------------) + I eun + 0x0026cc83, // n0x095c c0x0000 (---------------) + I gov + 0x00209003, // n0x095d c0x0000 (---------------) + I mil + 0x00205284, // n0x095e c0x0000 (---------------) + I name + 0x0021fe03, // n0x095f c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0960 c0x0000 (---------------) + I org + 0x0021d703, // n0x0961 c0x0000 (---------------) + I sci + 0x000ffa08, // n0x0962 c0x0000 (---------------) + blogspot + 0x17233503, // n0x0963 c0x005c (n0x0968-n0x0969) + I com + 0x0023a783, // n0x0964 c0x0000 (---------------) + I edu + 0x00213183, // n0x0965 c0x0000 (---------------) + I gob + 0x00201483, // n0x0966 c0x0000 (---------------) + I nom + 0x0022d1c3, // n0x0967 c0x0000 (---------------) + I org + 0x000ffa08, // n0x0968 c0x0000 (---------------) + blogspot + 0x00330b83, // n0x0969 c0x0000 (---------------) + I biz + 0x00233503, // n0x096a c0x0000 (---------------) + I com + 0x0023a783, // n0x096b c0x0000 (---------------) + I edu + 0x0026cc83, // n0x096c c0x0000 (---------------) + I gov + 0x003a1244, // n0x096d c0x0000 (---------------) + I info + 0x00205284, // n0x096e c0x0000 (---------------) + I name + 0x0021fe03, // n0x096f c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0970 c0x0000 (---------------) + I org + 0x0031a845, // n0x0971 c0x0000 (---------------) + I aland + 0x000ffa08, // n0x0972 c0x0000 (---------------) + blogspot + 0x0003be03, // n0x0973 c0x0000 (---------------) + iki + 0x002ec408, // n0x0974 c0x0000 (---------------) + I aeroport + 0x00350d87, // n0x0975 c0x0000 (---------------) + I assedic + 0x002d4884, // n0x0976 c0x0000 (---------------) + I asso + 0x0032f106, // n0x0977 c0x0000 (---------------) + I avocat + 0x00346806, // n0x0978 c0x0000 (---------------) + I avoues + 0x000ffa08, // n0x0979 c0x0000 (---------------) + blogspot + 0x0023fdc3, // n0x097a c0x0000 (---------------) + I cci + 0x00209b49, // n0x097b c0x0000 (---------------) + I chambagri + 0x002b2115, // n0x097c c0x0000 (---------------) + I chirurgiens-dentistes + 0x00233503, // n0x097d c0x0000 (---------------) + I com + 0x0031ee12, // n0x097e c0x0000 (---------------) + I experts-comptables + 0x0031ebcf, // n0x097f c0x0000 (---------------) + I geometre-expert + 0x0033d7c4, // n0x0980 c0x0000 (---------------) + I gouv + 0x0022a885, // n0x0981 c0x0000 (---------------) + I greta + 0x002f2f10, // n0x0982 c0x0000 (---------------) + I huissier-justice + 0x00238bc7, // n0x0983 c0x0000 (---------------) + I medecin + 0x00201483, // n0x0984 c0x0000 (---------------) + I nom + 0x0025c988, // n0x0985 c0x0000 (---------------) + I notaires + 0x0034d60a, // n0x0986 c0x0000 (---------------) + I pharmacien + 0x00246184, // n0x0987 c0x0000 (---------------) + I port + 0x002e1043, // n0x0988 c0x0000 (---------------) + I prd + 0x00247506, // n0x0989 c0x0000 (---------------) + I presse + 0x00200142, // n0x098a c0x0000 (---------------) + I tm + 0x002d1c8b, // n0x098b c0x0000 (---------------) + I veterinaire + 0x00233503, // n0x098c c0x0000 (---------------) + I com + 0x0023a783, // n0x098d c0x0000 (---------------) + I edu + 0x0026cc83, // n0x098e c0x0000 (---------------) + I gov + 0x00209003, // n0x098f c0x0000 (---------------) + I mil + 0x0021fe03, // n0x0990 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0991 c0x0000 (---------------) + I org + 0x002e5543, // n0x0992 c0x0000 (---------------) + I pvt + 0x00200742, // n0x0993 c0x0000 (---------------) + I co + 0x0021fe03, // n0x0994 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0995 c0x0000 (---------------) + I org + 0x00233503, // n0x0996 c0x0000 (---------------) + I com + 0x0023a783, // n0x0997 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x0998 c0x0000 (---------------) + I gov + 0x00209003, // n0x0999 c0x0000 (---------------) + I mil + 0x0022d1c3, // n0x099a c0x0000 (---------------) + I org + 0x00233503, // n0x099b c0x0000 (---------------) + I com + 0x0023a783, // n0x099c c0x0000 (---------------) + I edu + 0x0026cc83, // n0x099d c0x0000 (---------------) + I gov + 0x00322cc3, // n0x099e c0x0000 (---------------) + I ltd + 0x00218303, // n0x099f c0x0000 (---------------) + I mod + 0x0022d1c3, // n0x09a0 c0x0000 (---------------) + I org + 0x00200742, // n0x09a1 c0x0000 (---------------) + I co + 0x00233503, // n0x09a2 c0x0000 (---------------) + I com + 0x0023a783, // n0x09a3 c0x0000 (---------------) + I edu + 0x0021fe03, // n0x09a4 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x09a5 c0x0000 (---------------) + I org + 0x00201542, // n0x09a6 c0x0000 (---------------) + I ac + 0x00233503, // n0x09a7 c0x0000 (---------------) + I com + 0x0023a783, // n0x09a8 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x09a9 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x09aa c0x0000 (---------------) + I net + 0x0022d1c3, // n0x09ab c0x0000 (---------------) + I org + 0x002d4884, // n0x09ac c0x0000 (---------------) + I asso + 0x00233503, // n0x09ad c0x0000 (---------------) + I com + 0x0023a783, // n0x09ae c0x0000 (---------------) + I edu + 0x00207104, // n0x09af c0x0000 (---------------) + I mobi + 0x0021fe03, // n0x09b0 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x09b1 c0x0000 (---------------) + I org + 0x000ffa08, // n0x09b2 c0x0000 (---------------) + blogspot + 0x00233503, // n0x09b3 c0x0000 (---------------) + I com + 0x0023a783, // n0x09b4 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x09b5 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x09b6 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x09b7 c0x0000 (---------------) + I org + 0x00233503, // n0x09b8 c0x0000 (---------------) + I com + 0x0023a783, // n0x09b9 c0x0000 (---------------) + I edu + 0x00213183, // n0x09ba c0x0000 (---------------) + I gob + 0x0021d883, // n0x09bb c0x0000 (---------------) + I ind + 0x00209003, // n0x09bc c0x0000 (---------------) + I mil + 0x0021fe03, // n0x09bd c0x0000 (---------------) + I net + 0x0022d1c3, // n0x09be c0x0000 (---------------) + I org + 0x00200742, // n0x09bf c0x0000 (---------------) + I co + 0x00233503, // n0x09c0 c0x0000 (---------------) + I com + 0x0023a783, // n0x09c1 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x09c2 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x09c3 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x09c4 c0x0000 (---------------) + I org + 0x000ffa08, // n0x09c5 c0x0000 (---------------) + blogspot + 0x00233503, // n0x09c6 c0x0000 (---------------) + I com + 0x0023a783, // n0x09c7 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x09c8 c0x0000 (---------------) + I gov + 0x00317243, // n0x09c9 c0x0000 (---------------) + I idv + 0x0002e7c3, // n0x09ca c0x0000 (---------------) + inc + 0x00122cc3, // n0x09cb c0x0000 (---------------) + ltd + 0x0021fe03, // n0x09cc c0x0000 (---------------) + I net + 0x0022d1c3, // n0x09cd c0x0000 (---------------) + I org + 0x003029ca, // n0x09ce c0x0000 (---------------) + I xn--55qx5d + 0x0031bd89, // n0x09cf c0x0000 (---------------) + I xn--ciqpn + 0x0033ac4b, // n0x09d0 c0x0000 (---------------) + I xn--gmq050i + 0x0033b1ca, // n0x09d1 c0x0000 (---------------) + I xn--gmqw5a + 0x0034280a, // n0x09d2 c0x0000 (---------------) + I xn--io0a7i + 0x00353a8b, // n0x09d3 c0x0000 (---------------) + I xn--lcvr32d + 0x0036704a, // n0x09d4 c0x0000 (---------------) + I xn--mk0axi + 0x0036fa4a, // n0x09d5 c0x0000 (---------------) + I xn--mxtq1m + 0x0037648a, // n0x09d6 c0x0000 (---------------) + I xn--od0alg + 0x0037670b, // n0x09d7 c0x0000 (---------------) + I xn--od0aq3b + 0x00392609, // n0x09d8 c0x0000 (---------------) + I xn--tn0ag + 0x003941ca, // n0x09d9 c0x0000 (---------------) + I xn--uc0atv + 0x0039470b, // n0x09da c0x0000 (---------------) + I xn--uc0ay4a + 0x0039cfcb, // n0x09db c0x0000 (---------------) + I xn--wcvs22d + 0x003a57ca, // n0x09dc c0x0000 (---------------) + I xn--zf0avx + 0x00233503, // n0x09dd c0x0000 (---------------) + I com + 0x0023a783, // n0x09de c0x0000 (---------------) + I edu + 0x00213183, // n0x09df c0x0000 (---------------) + I gob + 0x00209003, // n0x09e0 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x09e1 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x09e2 c0x0000 (---------------) + I org + 0x000ffa08, // n0x09e3 c0x0000 (---------------) + blogspot + 0x00233503, // n0x09e4 c0x0000 (---------------) + I com + 0x00263d84, // n0x09e5 c0x0000 (---------------) + I from + 0x00212582, // n0x09e6 c0x0000 (---------------) + I iz + 0x00205284, // n0x09e7 c0x0000 (---------------) + I name + 0x002a1985, // n0x09e8 c0x0000 (---------------) + I adult + 0x002011c3, // n0x09e9 c0x0000 (---------------) + I art + 0x002d4884, // n0x09ea c0x0000 (---------------) + I asso + 0x00233503, // n0x09eb c0x0000 (---------------) + I com + 0x0023d684, // n0x09ec c0x0000 (---------------) + I coop + 0x0023a783, // n0x09ed c0x0000 (---------------) + I edu + 0x0024d9c4, // n0x09ee c0x0000 (---------------) + I firm + 0x0033d7c4, // n0x09ef c0x0000 (---------------) + I gouv + 0x003a1244, // n0x09f0 c0x0000 (---------------) + I info + 0x00213ac3, // n0x09f1 c0x0000 (---------------) + I med + 0x0021fe03, // n0x09f2 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x09f3 c0x0000 (---------------) + I org + 0x00295005, // n0x09f4 c0x0000 (---------------) + I perso + 0x00208103, // n0x09f5 c0x0000 (---------------) + I pol + 0x00220e43, // n0x09f6 c0x0000 (---------------) + I pro + 0x00285b43, // n0x09f7 c0x0000 (---------------) + I rel + 0x00352004, // n0x09f8 c0x0000 (---------------) + I shop + 0x002ee544, // n0x09f9 c0x0000 (---------------) + I 2000 + 0x00258185, // n0x09fa c0x0000 (---------------) + I agrar + 0x000ffa08, // n0x09fb c0x0000 (---------------) + blogspot + 0x002f9244, // n0x09fc c0x0000 (---------------) + I bolt + 0x0037bc46, // n0x09fd c0x0000 (---------------) + I casino + 0x00286744, // n0x09fe c0x0000 (---------------) + I city + 0x00200742, // n0x09ff c0x0000 (---------------) + I co + 0x00343507, // n0x0a00 c0x0000 (---------------) + I erotica + 0x00250887, // n0x0a01 c0x0000 (---------------) + I erotika + 0x0024b784, // n0x0a02 c0x0000 (---------------) + I film + 0x0025b085, // n0x0a03 c0x0000 (---------------) + I forum + 0x003700c5, // n0x0a04 c0x0000 (---------------) + I games + 0x00234dc5, // n0x0a05 c0x0000 (---------------) + I hotel + 0x003a1244, // n0x0a06 c0x0000 (---------------) + I info + 0x00224408, // n0x0a07 c0x0000 (---------------) + I ingatlan + 0x00293246, // n0x0a08 c0x0000 (---------------) + I jogasz + 0x002cce48, // n0x0a09 c0x0000 (---------------) + I konyvelo + 0x002435c5, // n0x0a0a c0x0000 (---------------) + I lakas + 0x003025c5, // n0x0a0b c0x0000 (---------------) + I media + 0x00221dc4, // n0x0a0c c0x0000 (---------------) + I news + 0x0022d1c3, // n0x0a0d c0x0000 (---------------) + I org + 0x002e1c44, // n0x0a0e c0x0000 (---------------) + I priv + 0x00352c46, // n0x0a0f c0x0000 (---------------) + I reklam + 0x00247603, // n0x0a10 c0x0000 (---------------) + I sex + 0x00352004, // n0x0a11 c0x0000 (---------------) + I shop + 0x00294905, // n0x0a12 c0x0000 (---------------) + I sport + 0x0023c004, // n0x0a13 c0x0000 (---------------) + I suli + 0x0020b984, // n0x0a14 c0x0000 (---------------) + I szex + 0x00200142, // n0x0a15 c0x0000 (---------------) + I tm + 0x00270746, // n0x0a16 c0x0000 (---------------) + I tozsde + 0x00389006, // n0x0a17 c0x0000 (---------------) + I utazas + 0x002f5685, // n0x0a18 c0x0000 (---------------) + I video + 0x00201542, // n0x0a19 c0x0000 (---------------) + I ac + 0x00330b83, // n0x0a1a c0x0000 (---------------) + I biz + 0x1c200742, // n0x0a1b c0x0070 (n0x0a24-n0x0a25) + I co + 0x0023bb04, // n0x0a1c c0x0000 (---------------) + I desa + 0x00202d42, // n0x0a1d c0x0000 (---------------) + I go + 0x00209003, // n0x0a1e c0x0000 (---------------) + I mil + 0x00226f02, // n0x0a1f c0x0000 (---------------) + I my + 0x0021fe03, // n0x0a20 c0x0000 (---------------) + I net + 0x00200282, // n0x0a21 c0x0000 (---------------) + I or + 0x00217443, // n0x0a22 c0x0000 (---------------) + I sch + 0x00221a03, // n0x0a23 c0x0000 (---------------) + I web + 0x000ffa08, // n0x0a24 c0x0000 (---------------) + blogspot + 0x000ffa08, // n0x0a25 c0x0000 (---------------) + blogspot + 0x0026cc83, // n0x0a26 c0x0000 (---------------) + I gov + 0x00201542, // n0x0a27 c0x0000 (---------------) + I ac + 0x1ce00742, // n0x0a28 c0x0073 (n0x0a2f-n0x0a30) + I co + 0x0026cc83, // n0x0a29 c0x0000 (---------------) + I gov + 0x00268a83, // n0x0a2a c0x0000 (---------------) + I idf + 0x00309ac3, // n0x0a2b c0x0000 (---------------) + I k12 + 0x002335c4, // n0x0a2c c0x0000 (---------------) + I muni + 0x0021fe03, // n0x0a2d c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0a2e c0x0000 (---------------) + I org + 0x000ffa08, // n0x0a2f c0x0000 (---------------) + blogspot + 0x00201542, // n0x0a30 c0x0000 (---------------) + I ac + 0x1d600742, // n0x0a31 c0x0075 (n0x0a37-n0x0a39) + I co + 0x00233503, // n0x0a32 c0x0000 (---------------) + I com + 0x0021fe03, // n0x0a33 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0a34 c0x0000 (---------------) + I org + 0x0020e842, // n0x0a35 c0x0000 (---------------) + I tt + 0x00224e42, // n0x0a36 c0x0000 (---------------) + I tv + 0x00322cc3, // n0x0a37 c0x0000 (---------------) + I ltd + 0x002db143, // n0x0a38 c0x0000 (---------------) + I plc + 0x00201542, // n0x0a39 c0x0000 (---------------) + I ac + 0x000ffa08, // n0x0a3a c0x0000 (---------------) + blogspot + 0x00200742, // n0x0a3b c0x0000 (---------------) + I co + 0x0023a783, // n0x0a3c c0x0000 (---------------) + I edu + 0x0024d9c4, // n0x0a3d c0x0000 (---------------) + I firm + 0x00205843, // n0x0a3e c0x0000 (---------------) + I gen + 0x0026cc83, // n0x0a3f c0x0000 (---------------) + I gov + 0x0021d883, // n0x0a40 c0x0000 (---------------) + I ind + 0x00209003, // n0x0a41 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x0a42 c0x0000 (---------------) + I net + 0x00218f83, // n0x0a43 c0x0000 (---------------) + I nic + 0x0022d1c3, // n0x0a44 c0x0000 (---------------) + I org + 0x0021d683, // n0x0a45 c0x0000 (---------------) + I res + 0x0011e793, // n0x0a46 c0x0000 (---------------) + barrel-of-knowledge + 0x001246d4, // n0x0a47 c0x0000 (---------------) + barrell-of-knowledge + 0x00013886, // n0x0a48 c0x0000 (---------------) + dyndns + 0x000562c7, // n0x0a49 c0x0000 (---------------) + for-our + 0x00155d09, // n0x0a4a c0x0000 (---------------) + groks-the + 0x000ebb0a, // n0x0a4b c0x0000 (---------------) + groks-this + 0x00087c4d, // n0x0a4c c0x0000 (---------------) + here-for-more + 0x001a408a, // n0x0a4d c0x0000 (---------------) + knowsitall + 0x0006ba86, // n0x0a4e c0x0000 (---------------) + selfip + 0x000eadc6, // n0x0a4f c0x0000 (---------------) + webhop + 0x00204b82, // n0x0a50 c0x0000 (---------------) + I eu + 0x00233503, // n0x0a51 c0x0000 (---------------) + I com + 0x00019506, // n0x0a52 c0x0000 (---------------) + github + 0x00155cc5, // n0x0a53 c0x0000 (---------------) + ngrok + 0x0000cb83, // n0x0a54 c0x0000 (---------------) + nid + 0x00011f88, // n0x0a55 c0x0000 (---------------) + pantheon + 0x000af708, // n0x0a56 c0x0000 (---------------) + sandcats + 0x00233503, // n0x0a57 c0x0000 (---------------) + I com + 0x0023a783, // n0x0a58 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x0a59 c0x0000 (---------------) + I gov + 0x00209003, // n0x0a5a c0x0000 (---------------) + I mil + 0x0021fe03, // n0x0a5b c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0a5c c0x0000 (---------------) + I org + 0x00201542, // n0x0a5d c0x0000 (---------------) + I ac + 0x00200742, // n0x0a5e c0x0000 (---------------) + I co + 0x0026cc83, // n0x0a5f c0x0000 (---------------) + I gov + 0x0020c782, // n0x0a60 c0x0000 (---------------) + I id + 0x0021fe03, // n0x0a61 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0a62 c0x0000 (---------------) + I org + 0x00217443, // n0x0a63 c0x0000 (---------------) + I sch + 0x0035d94f, // n0x0a64 c0x0000 (---------------) + I xn--mgba3a4f16a + 0x0035dd0e, // n0x0a65 c0x0000 (---------------) + I xn--mgba3a4fra + 0x000ffa08, // n0x0a66 c0x0000 (---------------) + blogspot + 0x00233503, // n0x0a67 c0x0000 (---------------) + I com + 0x00048107, // n0x0a68 c0x0000 (---------------) + cupcake + 0x0023a783, // n0x0a69 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x0a6a c0x0000 (---------------) + I gov + 0x00201603, // n0x0a6b c0x0000 (---------------) + I int + 0x0021fe03, // n0x0a6c c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0a6d c0x0000 (---------------) + I org + 0x0021ca03, // n0x0a6e c0x0000 (---------------) + I abr + 0x002ed6c7, // n0x0a6f c0x0000 (---------------) + I abruzzo + 0x00201002, // n0x0a70 c0x0000 (---------------) + I ag + 0x002cd4c9, // n0x0a71 c0x0000 (---------------) + I agrigento + 0x002001c2, // n0x0a72 c0x0000 (---------------) + I al + 0x00233b0b, // n0x0a73 c0x0000 (---------------) + I alessandria + 0x002dfc4a, // n0x0a74 c0x0000 (---------------) + I alto-adige + 0x002e3489, // n0x0a75 c0x0000 (---------------) + I altoadige + 0x00200502, // n0x0a76 c0x0000 (---------------) + I an + 0x00350806, // n0x0a77 c0x0000 (---------------) + I ancona + 0x002847d5, // n0x0a78 c0x0000 (---------------) + I andria-barletta-trani + 0x00233c55, // n0x0a79 c0x0000 (---------------) + I andria-trani-barletta + 0x00290513, // n0x0a7a c0x0000 (---------------) + I andriabarlettatrani + 0x002341d3, // n0x0a7b c0x0000 (---------------) + I andriatranibarletta + 0x002029c2, // n0x0a7c c0x0000 (---------------) + I ao + 0x00216fc5, // n0x0a7d c0x0000 (---------------) + I aosta + 0x0030f30c, // n0x0a7e c0x0000 (---------------) + I aosta-valley + 0x00216fcb, // n0x0a7f c0x0000 (---------------) + I aostavalley + 0x00251085, // n0x0a80 c0x0000 (---------------) + I aoste + 0x00200d02, // n0x0a81 c0x0000 (---------------) + I ap + 0x002003c2, // n0x0a82 c0x0000 (---------------) + I aq + 0x0036c346, // n0x0a83 c0x0000 (---------------) + I aquila + 0x00200a42, // n0x0a84 c0x0000 (---------------) + I ar + 0x0027a406, // n0x0a85 c0x0000 (---------------) + I arezzo + 0x00397ccd, // n0x0a86 c0x0000 (---------------) + I ascoli-piceno + 0x0034c1cc, // n0x0a87 c0x0000 (---------------) + I ascolipiceno + 0x0021e884, // n0x0a88 c0x0000 (---------------) + I asti + 0x00200102, // n0x0a89 c0x0000 (---------------) + I at + 0x00203402, // n0x0a8a c0x0000 (---------------) + I av + 0x00224c08, // n0x0a8b c0x0000 (---------------) + I avellino + 0x00202002, // n0x0a8c c0x0000 (---------------) + I ba + 0x00248586, // n0x0a8d c0x0000 (---------------) + I balsan + 0x00249204, // n0x0a8e c0x0000 (---------------) + I bari + 0x00284995, // n0x0a8f c0x0000 (---------------) + I barletta-trani-andria + 0x00290693, // n0x0a90 c0x0000 (---------------) + I barlettatraniandria + 0x00207fc3, // n0x0a91 c0x0000 (---------------) + I bas + 0x0032ee0a, // n0x0a92 c0x0000 (---------------) + I basilicata + 0x0028e2c7, // n0x0a93 c0x0000 (---------------) + I belluno + 0x002e4949, // n0x0a94 c0x0000 (---------------) + I benevento + 0x00228d47, // n0x0a95 c0x0000 (---------------) + I bergamo + 0x002ee482, // n0x0a96 c0x0000 (---------------) + I bg + 0x00200002, // n0x0a97 c0x0000 (---------------) + I bi + 0x003a4cc6, // n0x0a98 c0x0000 (---------------) + I biella + 0x0020c8c2, // n0x0a99 c0x0000 (---------------) + I bl + 0x000ffa08, // n0x0a9a c0x0000 (---------------) + blogspot + 0x002104c2, // n0x0a9b c0x0000 (---------------) + I bn + 0x0020e402, // n0x0a9c c0x0000 (---------------) + I bo + 0x0038df47, // n0x0a9d c0x0000 (---------------) + I bologna + 0x0020fec7, // n0x0a9e c0x0000 (---------------) + I bolzano + 0x0021c105, // n0x0a9f c0x0000 (---------------) + I bozen + 0x0021c402, // n0x0aa0 c0x0000 (---------------) + I br + 0x0021d647, // n0x0aa1 c0x0000 (---------------) + I brescia + 0x0021d808, // n0x0aa2 c0x0000 (---------------) + I brindisi + 0x00237542, // n0x0aa3 c0x0000 (---------------) + I bs + 0x0021fd02, // n0x0aa4 c0x0000 (---------------) + I bt + 0x00230bc2, // n0x0aa5 c0x0000 (---------------) + I bz + 0x00200302, // n0x0aa6 c0x0000 (---------------) + I ca + 0x0023eb48, // n0x0aa7 c0x0000 (---------------) + I cagliari + 0x00213543, // n0x0aa8 c0x0000 (---------------) + I cal + 0x00285688, // n0x0aa9 c0x0000 (---------------) + I calabria + 0x0023ad8d, // n0x0aaa c0x0000 (---------------) + I caltanissetta + 0x00221ac3, // n0x0aab c0x0000 (---------------) + I cam + 0x00316e08, // n0x0aac c0x0000 (---------------) + I campania + 0x00241acf, // n0x0aad c0x0000 (---------------) + I campidano-medio + 0x00241e8e, // n0x0aae c0x0000 (---------------) + I campidanomedio + 0x0034364a, // n0x0aaf c0x0000 (---------------) + I campobasso + 0x002f6e11, // n0x0ab0 c0x0000 (---------------) + I carbonia-iglesias + 0x002f7290, // n0x0ab1 c0x0000 (---------------) + I carboniaiglesias + 0x002b3b4d, // n0x0ab2 c0x0000 (---------------) + I carrara-massa + 0x002b3e8c, // n0x0ab3 c0x0000 (---------------) + I carraramassa + 0x0022c607, // n0x0ab4 c0x0000 (---------------) + I caserta + 0x0032ef87, // n0x0ab5 c0x0000 (---------------) + I catania + 0x0032f1c9, // n0x0ab6 c0x0000 (---------------) + I catanzaro + 0x0021e982, // n0x0ab7 c0x0000 (---------------) + I cb + 0x00200b42, // n0x0ab8 c0x0000 (---------------) + I ce + 0x0025870c, // n0x0ab9 c0x0000 (---------------) + I cesena-forli + 0x00258a0b, // n0x0aba c0x0000 (---------------) + I cesenaforli + 0x00201582, // n0x0abb c0x0000 (---------------) + I ch + 0x002d5a46, // n0x0abc c0x0000 (---------------) + I chieti + 0x00200682, // n0x0abd c0x0000 (---------------) + I ci + 0x00209182, // n0x0abe c0x0000 (---------------) + I cl + 0x0021ba42, // n0x0abf c0x0000 (---------------) + I cn + 0x00200742, // n0x0ac0 c0x0000 (---------------) + I co + 0x00234804, // n0x0ac1 c0x0000 (---------------) + I como + 0x00240e07, // n0x0ac2 c0x0000 (---------------) + I cosenza + 0x002049c2, // n0x0ac3 c0x0000 (---------------) + I cr + 0x00244d07, // n0x0ac4 c0x0000 (---------------) + I cremona + 0x00245f87, // n0x0ac5 c0x0000 (---------------) + I crotone + 0x00211c82, // n0x0ac6 c0x0000 (---------------) + I cs + 0x00231382, // n0x0ac7 c0x0000 (---------------) + I ct + 0x00247fc5, // n0x0ac8 c0x0000 (---------------) + I cuneo + 0x00229ec2, // n0x0ac9 c0x0000 (---------------) + I cz + 0x0025a38e, // n0x0aca c0x0000 (---------------) + I dell-ogliastra + 0x0026714d, // n0x0acb c0x0000 (---------------) + I dellogliastra + 0x0023a783, // n0x0acc c0x0000 (---------------) + I edu + 0x002543ce, // n0x0acd c0x0000 (---------------) + I emilia-romagna + 0x0036dc0d, // n0x0ace c0x0000 (---------------) + I emiliaromagna + 0x00360a83, // n0x0acf c0x0000 (---------------) + I emr + 0x00202bc2, // n0x0ad0 c0x0000 (---------------) + I en + 0x00205a84, // n0x0ad1 c0x0000 (---------------) + I enna + 0x0024b242, // n0x0ad2 c0x0000 (---------------) + I fc + 0x0020b302, // n0x0ad3 c0x0000 (---------------) + I fe + 0x002e2545, // n0x0ad4 c0x0000 (---------------) + I fermo + 0x00300807, // n0x0ad5 c0x0000 (---------------) + I ferrara + 0x0035d282, // n0x0ad6 c0x0000 (---------------) + I fg + 0x00207502, // n0x0ad7 c0x0000 (---------------) + I fi + 0x0024d307, // n0x0ad8 c0x0000 (---------------) + I firenze + 0x00252748, // n0x0ad9 c0x0000 (---------------) + I florence + 0x00242902, // n0x0ada c0x0000 (---------------) + I fm + 0x003a12c6, // n0x0adb c0x0000 (---------------) + I foggia + 0x0025858c, // n0x0adc c0x0000 (---------------) + I forli-cesena + 0x002588cb, // n0x0add c0x0000 (---------------) + I forlicesena + 0x00200582, // n0x0ade c0x0000 (---------------) + I fr + 0x0026050f, // n0x0adf c0x0000 (---------------) + I friuli-v-giulia + 0x002608d0, // n0x0ae0 c0x0000 (---------------) + I friuli-ve-giulia + 0x00260ccf, // n0x0ae1 c0x0000 (---------------) + I friuli-vegiulia + 0x00261095, // n0x0ae2 c0x0000 (---------------) + I friuli-venezia-giulia + 0x002615d4, // n0x0ae3 c0x0000 (---------------) + I friuli-veneziagiulia + 0x00261ace, // n0x0ae4 c0x0000 (---------------) + I friuli-vgiulia + 0x00261e4e, // n0x0ae5 c0x0000 (---------------) + I friuliv-giulia + 0x002621cf, // n0x0ae6 c0x0000 (---------------) + I friulive-giulia + 0x0026258e, // n0x0ae7 c0x0000 (---------------) + I friulivegiulia + 0x00262914, // n0x0ae8 c0x0000 (---------------) + I friulivenezia-giulia + 0x00262e13, // n0x0ae9 c0x0000 (---------------) + I friuliveneziagiulia + 0x002632cd, // n0x0aea c0x0000 (---------------) + I friulivgiulia + 0x002767c9, // n0x0aeb c0x0000 (---------------) + I frosinone + 0x00288803, // n0x0aec c0x0000 (---------------) + I fvg + 0x002026c2, // n0x0aed c0x0000 (---------------) + I ge + 0x00307105, // n0x0aee c0x0000 (---------------) + I genoa + 0x00205846, // n0x0aef c0x0000 (---------------) + I genova + 0x00202d42, // n0x0af0 c0x0000 (---------------) + I go + 0x0026edc7, // n0x0af1 c0x0000 (---------------) + I gorizia + 0x0026cc83, // n0x0af2 c0x0000 (---------------) + I gov + 0x00200c82, // n0x0af3 c0x0000 (---------------) + I gr + 0x00311b48, // n0x0af4 c0x0000 (---------------) + I grosseto + 0x002f7051, // n0x0af5 c0x0000 (---------------) + I iglesias-carbonia + 0x002f7490, // n0x0af6 c0x0000 (---------------) + I iglesiascarbonia + 0x00205c42, // n0x0af7 c0x0000 (---------------) + I im + 0x00352747, // n0x0af8 c0x0000 (---------------) + I imperia + 0x002006c2, // n0x0af9 c0x0000 (---------------) + I is + 0x0025dc87, // n0x0afa c0x0000 (---------------) + I isernia + 0x00206fc2, // n0x0afb c0x0000 (---------------) + I kr + 0x0025e389, // n0x0afc c0x0000 (---------------) + I la-spezia + 0x0036c307, // n0x0afd c0x0000 (---------------) + I laquila + 0x0025fe48, // n0x0afe c0x0000 (---------------) + I laspezia + 0x00223606, // n0x0aff c0x0000 (---------------) + I latina + 0x002db043, // n0x0b00 c0x0000 (---------------) + I laz + 0x00309045, // n0x0b01 c0x0000 (---------------) + I lazio + 0x0023aa02, // n0x0b02 c0x0000 (---------------) + I lc + 0x0020acc2, // n0x0b03 c0x0000 (---------------) + I le + 0x003a50c5, // n0x0b04 c0x0000 (---------------) + I lecce + 0x0022e105, // n0x0b05 c0x0000 (---------------) + I lecco + 0x00207202, // n0x0b06 c0x0000 (---------------) + I li + 0x0023c083, // n0x0b07 c0x0000 (---------------) + I lig + 0x0023c087, // n0x0b08 c0x0000 (---------------) + I liguria + 0x00210307, // n0x0b09 c0x0000 (---------------) + I livorno + 0x00200242, // n0x0b0a c0x0000 (---------------) + I lo + 0x00259dc4, // n0x0b0b c0x0000 (---------------) + I lodi + 0x00214303, // n0x0b0c c0x0000 (---------------) + I lom + 0x002c4149, // n0x0b0d c0x0000 (---------------) + I lombardia + 0x002db908, // n0x0b0e c0x0000 (---------------) + I lombardy + 0x00209e02, // n0x0b0f c0x0000 (---------------) + I lt + 0x00202f42, // n0x0b10 c0x0000 (---------------) + I lu + 0x0026d147, // n0x0b11 c0x0000 (---------------) + I lucania + 0x002b6305, // n0x0b12 c0x0000 (---------------) + I lucca + 0x00316908, // n0x0b13 c0x0000 (---------------) + I macerata + 0x003a0a07, // n0x0b14 c0x0000 (---------------) + I mantova + 0x00201183, // n0x0b15 c0x0000 (---------------) + I mar + 0x00284246, // n0x0b16 c0x0000 (---------------) + I marche + 0x002b39cd, // n0x0b17 c0x0000 (---------------) + I massa-carrara + 0x002b3d4c, // n0x0b18 c0x0000 (---------------) + I massacarrara + 0x00256e86, // n0x0b19 c0x0000 (---------------) + I matera + 0x00208602, // n0x0b1a c0x0000 (---------------) + I mb + 0x0022ac02, // n0x0b1b c0x0000 (---------------) + I mc + 0x00203e82, // n0x0b1c c0x0000 (---------------) + I me + 0x0024194f, // n0x0b1d c0x0000 (---------------) + I medio-campidano + 0x00241d4e, // n0x0b1e c0x0000 (---------------) + I mediocampidano + 0x00370147, // n0x0b1f c0x0000 (---------------) + I messina + 0x00209002, // n0x0b20 c0x0000 (---------------) + I mi + 0x00342685, // n0x0b21 c0x0000 (---------------) + I milan + 0x00342686, // n0x0b22 c0x0000 (---------------) + I milano + 0x0021fdc2, // n0x0b23 c0x0000 (---------------) + I mn + 0x00207102, // n0x0b24 c0x0000 (---------------) + I mo + 0x00218306, // n0x0b25 c0x0000 (---------------) + I modena + 0x002133c3, // n0x0b26 c0x0000 (---------------) + I mol + 0x0025dbc6, // n0x0b27 c0x0000 (---------------) + I molise + 0x002c2d45, // n0x0b28 c0x0000 (---------------) + I monza + 0x002c2d4d, // n0x0b29 c0x0000 (---------------) + I monza-brianza + 0x002c3595, // n0x0b2a c0x0000 (---------------) + I monza-e-della-brianza + 0x002c3d4c, // n0x0b2b c0x0000 (---------------) + I monzabrianza + 0x002c4a4d, // n0x0b2c c0x0000 (---------------) + I monzaebrianza + 0x002c4e12, // n0x0b2d c0x0000 (---------------) + I monzaedellabrianza + 0x0020f702, // n0x0b2e c0x0000 (---------------) + I ms + 0x00204c02, // n0x0b2f c0x0000 (---------------) + I mt + 0x00201402, // n0x0b30 c0x0000 (---------------) + I na + 0x00235006, // n0x0b31 c0x0000 (---------------) + I naples + 0x002a3d86, // n0x0b32 c0x0000 (---------------) + I napoli + 0x00200c02, // n0x0b33 c0x0000 (---------------) + I no + 0x002058c6, // n0x0b34 c0x0000 (---------------) + I novara + 0x002017c2, // n0x0b35 c0x0000 (---------------) + I nu + 0x0039c105, // n0x0b36 c0x0000 (---------------) + I nuoro + 0x00200c42, // n0x0b37 c0x0000 (---------------) + I og + 0x0025a4c9, // n0x0b38 c0x0000 (---------------) + I ogliastra + 0x0027568c, // n0x0b39 c0x0000 (---------------) + I olbia-tempio + 0x002759cb, // n0x0b3a c0x0000 (---------------) + I olbiatempio + 0x00200282, // n0x0b3b c0x0000 (---------------) + I or + 0x00252b88, // n0x0b3c c0x0000 (---------------) + I oristano + 0x00200782, // n0x0b3d c0x0000 (---------------) + I ot + 0x0020ac42, // n0x0b3e c0x0000 (---------------) + I pa + 0x00216d86, // n0x0b3f c0x0000 (---------------) + I padova + 0x00361405, // n0x0b40 c0x0000 (---------------) + I padua + 0x00379f47, // n0x0b41 c0x0000 (---------------) + I palermo + 0x00395345, // n0x0b42 c0x0000 (---------------) + I parma + 0x002dbfc5, // n0x0b43 c0x0000 (---------------) + I pavia + 0x00248182, // n0x0b44 c0x0000 (---------------) + I pc + 0x00352102, // n0x0b45 c0x0000 (---------------) + I pd + 0x00207782, // n0x0b46 c0x0000 (---------------) + I pe + 0x00270d47, // n0x0b47 c0x0000 (---------------) + I perugia + 0x0031c84d, // n0x0b48 c0x0000 (---------------) + I pesaro-urbino + 0x0031cbcc, // n0x0b49 c0x0000 (---------------) + I pesarourbino + 0x00236987, // n0x0b4a c0x0000 (---------------) + I pescara + 0x002495c2, // n0x0b4b c0x0000 (---------------) + I pg + 0x00225702, // n0x0b4c c0x0000 (---------------) + I pi + 0x00338288, // n0x0b4d c0x0000 (---------------) + I piacenza + 0x00258dc8, // n0x0b4e c0x0000 (---------------) + I piedmont + 0x002d6308, // n0x0b4f c0x0000 (---------------) + I piemonte + 0x002df704, // n0x0b50 c0x0000 (---------------) + I pisa + 0x002bee07, // n0x0b51 c0x0000 (---------------) + I pistoia + 0x002dcc83, // n0x0b52 c0x0000 (---------------) + I pmn + 0x002493c2, // n0x0b53 c0x0000 (---------------) + I pn + 0x00200942, // n0x0b54 c0x0000 (---------------) + I po + 0x002dfec9, // n0x0b55 c0x0000 (---------------) + I pordenone + 0x002093c7, // n0x0b56 c0x0000 (---------------) + I potenza + 0x00204602, // n0x0b57 c0x0000 (---------------) + I pr + 0x00270245, // n0x0b58 c0x0000 (---------------) + I prato + 0x0028c9c2, // n0x0b59 c0x0000 (---------------) + I pt + 0x00235302, // n0x0b5a c0x0000 (---------------) + I pu + 0x00278843, // n0x0b5b c0x0000 (---------------) + I pug + 0x00278846, // n0x0b5c c0x0000 (---------------) + I puglia + 0x002e5542, // n0x0b5d c0x0000 (---------------) + I pv + 0x002e6302, // n0x0b5e c0x0000 (---------------) + I pz + 0x002005c2, // n0x0b5f c0x0000 (---------------) + I ra + 0x0030c246, // n0x0b60 c0x0000 (---------------) + I ragusa + 0x002059c7, // n0x0b61 c0x0000 (---------------) + I ravenna + 0x002002c2, // n0x0b62 c0x0000 (---------------) + I rc + 0x00207002, // n0x0b63 c0x0000 (---------------) + I re + 0x002ed34f, // n0x0b64 c0x0000 (---------------) + I reggio-calabria + 0x0025420d, // n0x0b65 c0x0000 (---------------) + I reggio-emilia + 0x0028550e, // n0x0b66 c0x0000 (---------------) + I reggiocalabria + 0x0036da8c, // n0x0b67 c0x0000 (---------------) + I reggioemilia + 0x0020ce02, // n0x0b68 c0x0000 (---------------) + I rg + 0x00200a82, // n0x0b69 c0x0000 (---------------) + I ri + 0x00223445, // n0x0b6a c0x0000 (---------------) + I rieti + 0x003a5bc6, // n0x0b6b c0x0000 (---------------) + I rimini + 0x00222182, // n0x0b6c c0x0000 (---------------) + I rm + 0x0020cb42, // n0x0b6d c0x0000 (---------------) + I rn + 0x00202202, // n0x0b6e c0x0000 (---------------) + I ro + 0x00254584, // n0x0b6f c0x0000 (---------------) + I roma + 0x002dd584, // n0x0b70 c0x0000 (---------------) + I rome + 0x00334fc6, // n0x0b71 c0x0000 (---------------) + I rovigo + 0x002004c2, // n0x0b72 c0x0000 (---------------) + I sa + 0x00279747, // n0x0b73 c0x0000 (---------------) + I salerno + 0x002257c3, // n0x0b74 c0x0000 (---------------) + I sar + 0x00226048, // n0x0b75 c0x0000 (---------------) + I sardegna + 0x00227248, // n0x0b76 c0x0000 (---------------) + I sardinia + 0x00378687, // n0x0b77 c0x0000 (---------------) + I sassari + 0x00234f06, // n0x0b78 c0x0000 (---------------) + I savona + 0x0020a402, // n0x0b79 c0x0000 (---------------) + I si + 0x0023eac3, // n0x0b7a c0x0000 (---------------) + I sic + 0x0036e647, // n0x0b7b c0x0000 (---------------) + I sicilia + 0x00252146, // n0x0b7c c0x0000 (---------------) + I sicily + 0x002c8945, // n0x0b7d c0x0000 (---------------) + I siena + 0x003419c8, // n0x0b7e c0x0000 (---------------) + I siracusa + 0x00205682, // n0x0b7f c0x0000 (---------------) + I so + 0x00308547, // n0x0b80 c0x0000 (---------------) + I sondrio + 0x00209382, // n0x0b81 c0x0000 (---------------) + I sp + 0x0033b802, // n0x0b82 c0x0000 (---------------) + I sr + 0x002067c2, // n0x0b83 c0x0000 (---------------) + I ss + 0x002cebc9, // n0x0b84 c0x0000 (---------------) + I suedtirol + 0x00235f42, // n0x0b85 c0x0000 (---------------) + I sv + 0x00200a02, // n0x0b86 c0x0000 (---------------) + I ta + 0x00234603, // n0x0b87 c0x0000 (---------------) + I taa + 0x003096c7, // n0x0b88 c0x0000 (---------------) + I taranto + 0x002012c2, // n0x0b89 c0x0000 (---------------) + I te + 0x0027580c, // n0x0b8a c0x0000 (---------------) + I tempio-olbia + 0x00275b0b, // n0x0b8b c0x0000 (---------------) + I tempioolbia + 0x00256f06, // n0x0b8c c0x0000 (---------------) + I teramo + 0x0020cac5, // n0x0b8d c0x0000 (---------------) + I terni + 0x0024f882, // n0x0b8e c0x0000 (---------------) + I tn + 0x00208082, // n0x0b8f c0x0000 (---------------) + I to + 0x002b1946, // n0x0b90 c0x0000 (---------------) + I torino + 0x002280c3, // n0x0b91 c0x0000 (---------------) + I tos + 0x00324e47, // n0x0b92 c0x0000 (---------------) + I toscana + 0x00211f42, // n0x0b93 c0x0000 (---------------) + I tp + 0x00203002, // n0x0b94 c0x0000 (---------------) + I tr + 0x00284655, // n0x0b95 c0x0000 (---------------) + I trani-andria-barletta + 0x00233e15, // n0x0b96 c0x0000 (---------------) + I trani-barletta-andria + 0x002903d3, // n0x0b97 c0x0000 (---------------) + I traniandriabarletta + 0x00234353, // n0x0b98 c0x0000 (---------------) + I tranibarlettaandria + 0x00294a07, // n0x0b99 c0x0000 (---------------) + I trapani + 0x002b7688, // n0x0b9a c0x0000 (---------------) + I trentino + 0x002cf4d0, // n0x0b9b c0x0000 (---------------) + I trentino-a-adige + 0x002ef2cf, // n0x0b9c c0x0000 (---------------) + I trentino-aadige + 0x00342d53, // n0x0b9d c0x0000 (---------------) + I trentino-alto-adige + 0x0034eb92, // n0x0b9e c0x0000 (---------------) + I trentino-altoadige + 0x002cd090, // n0x0b9f c0x0000 (---------------) + I trentino-s-tirol + 0x002b768f, // n0x0ba0 c0x0000 (---------------) + I trentino-stirol + 0x002ba492, // n0x0ba1 c0x0000 (---------------) + I trentino-sud-tirol + 0x002c2911, // n0x0ba2 c0x0000 (---------------) + I trentino-sudtirol + 0x002ca953, // n0x0ba3 c0x0000 (---------------) + I trentino-sued-tirol + 0x002ce992, // n0x0ba4 c0x0000 (---------------) + I trentino-suedtirol + 0x002cfd4f, // n0x0ba5 c0x0000 (---------------) + I trentinoa-adige + 0x002d4c8e, // n0x0ba6 c0x0000 (---------------) + I trentinoaadige + 0x002dfa52, // n0x0ba7 c0x0000 (---------------) + I trentinoalto-adige + 0x002e3291, // n0x0ba8 c0x0000 (---------------) + I trentinoaltoadige + 0x002e3a8f, // n0x0ba9 c0x0000 (---------------) + I trentinos-tirol + 0x002e55ce, // n0x0baa c0x0000 (---------------) + I trentinostirol + 0x002e6691, // n0x0bab c0x0000 (---------------) + I trentinosud-tirol + 0x002f3a90, // n0x0bac c0x0000 (---------------) + I trentinosudtirol + 0x0035a592, // n0x0bad c0x0000 (---------------) + I trentinosued-tirol + 0x002e8b91, // n0x0bae c0x0000 (---------------) + I trentinosuedtirol + 0x002f8a86, // n0x0baf c0x0000 (---------------) + I trento + 0x002f9307, // n0x0bb0 c0x0000 (---------------) + I treviso + 0x003673c7, // n0x0bb1 c0x0000 (---------------) + I trieste + 0x00203f42, // n0x0bb2 c0x0000 (---------------) + I ts + 0x0027f145, // n0x0bb3 c0x0000 (---------------) + I turin + 0x002f2c87, // n0x0bb4 c0x0000 (---------------) + I tuscany + 0x00224e42, // n0x0bb5 c0x0000 (---------------) + I tv + 0x00209242, // n0x0bb6 c0x0000 (---------------) + I ud + 0x0022a285, // n0x0bb7 c0x0000 (---------------) + I udine + 0x0021e183, // n0x0bb8 c0x0000 (---------------) + I umb + 0x00258406, // n0x0bb9 c0x0000 (---------------) + I umbria + 0x0031ca0d, // n0x0bba c0x0000 (---------------) + I urbino-pesaro + 0x0031cd4c, // n0x0bbb c0x0000 (---------------) + I urbinopesaro + 0x002000c2, // n0x0bbc c0x0000 (---------------) + I va + 0x0030f18b, // n0x0bbd c0x0000 (---------------) + I val-d-aosta + 0x00216e8a, // n0x0bbe c0x0000 (---------------) + I val-daosta + 0x00323dca, // n0x0bbf c0x0000 (---------------) + I vald-aosta + 0x002b09c9, // n0x0bc0 c0x0000 (---------------) + I valdaosta + 0x002deb4b, // n0x0bc1 c0x0000 (---------------) + I valle-aosta + 0x003a0b4d, // n0x0bc2 c0x0000 (---------------) + I valle-d-aosta + 0x002f338c, // n0x0bc3 c0x0000 (---------------) + I valle-daosta + 0x00224e8a, // n0x0bc4 c0x0000 (---------------) + I valleaosta + 0x0022594c, // n0x0bc5 c0x0000 (---------------) + I valled-aosta + 0x0024098b, // n0x0bc6 c0x0000 (---------------) + I valledaosta + 0x00250ecc, // n0x0bc7 c0x0000 (---------------) + I vallee-aoste + 0x0025184b, // n0x0bc8 c0x0000 (---------------) + I valleeaoste + 0x00275603, // n0x0bc9 c0x0000 (---------------) + I vao + 0x002894c6, // n0x0bca c0x0000 (---------------) + I varese + 0x002dc5c2, // n0x0bcb c0x0000 (---------------) + I vb + 0x002e6b02, // n0x0bcc c0x0000 (---------------) + I vc + 0x00210243, // n0x0bcd c0x0000 (---------------) + I vda + 0x00202b82, // n0x0bce c0x0000 (---------------) + I ve + 0x00202b83, // n0x0bcf c0x0000 (---------------) + I ven + 0x00375e46, // n0x0bd0 c0x0000 (---------------) + I veneto + 0x00261247, // n0x0bd1 c0x0000 (---------------) + I venezia + 0x0026f246, // n0x0bd2 c0x0000 (---------------) + I venice + 0x0022d688, // n0x0bd3 c0x0000 (---------------) + I verbania + 0x002dddc8, // n0x0bd4 c0x0000 (---------------) + I vercelli + 0x003607c6, // n0x0bd5 c0x0000 (---------------) + I verona + 0x00205d42, // n0x0bd6 c0x0000 (---------------) + I vi + 0x002f504d, // n0x0bd7 c0x0000 (---------------) + I vibo-valentia + 0x002f538c, // n0x0bd8 c0x0000 (---------------) + I vibovalentia + 0x0033d887, // n0x0bd9 c0x0000 (---------------) + I vicenza + 0x002f9107, // n0x0bda c0x0000 (---------------) + I viterbo + 0x00211082, // n0x0bdb c0x0000 (---------------) + I vr + 0x00227982, // n0x0bdc c0x0000 (---------------) + I vs + 0x00271f82, // n0x0bdd c0x0000 (---------------) + I vt + 0x00214982, // n0x0bde c0x0000 (---------------) + I vv + 0x00200742, // n0x0bdf c0x0000 (---------------) + I co + 0x0021fe03, // n0x0be0 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0be1 c0x0000 (---------------) + I org + 0x00233503, // n0x0be2 c0x0000 (---------------) + I com + 0x0023a783, // n0x0be3 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x0be4 c0x0000 (---------------) + I gov + 0x00209003, // n0x0be5 c0x0000 (---------------) + I mil + 0x00205284, // n0x0be6 c0x0000 (---------------) + I name + 0x0021fe03, // n0x0be7 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x0be8 c0x0000 (---------------) + I org + 0x00217443, // n0x0be9 c0x0000 (---------------) + I sch + 0x00201542, // n0x0bea c0x0000 (---------------) + I ac + 0x00200342, // n0x0beb c0x0000 (---------------) + I ad + 0x2068f505, // n0x0bec c0x0081 (n0x0c59-n0x0c8d) + I aichi + 0x20a01dc5, // n0x0bed c0x0082 (n0x0c8d-n0x0ca9) + I akita + 0x20f0bf06, // n0x0bee c0x0083 (n0x0ca9-n0x0cbf) + I aomori + 0x000ffa08, // n0x0bef c0x0000 (---------------) + blogspot + 0x212add45, // n0x0bf0 c0x0084 (n0x0cbf-n0x0cf9) + I chiba + 0x00200742, // n0x0bf1 c0x0000 (---------------) + I co + 0x00202602, // n0x0bf2 c0x0000 (---------------) + I ed + 0x21629005, // n0x0bf3 c0x0085 (n0x0cf9-n0x0d0f) + I ehime + 0x21a7dac5, // n0x0bf4 c0x0086 (n0x0d0f-n0x0d1e) + I fukui + 0x21e7e807, // n0x0bf5 c0x0087 (n0x0d1e-n0x0d5d) + I fukuoka + 0x2232fcc9, // n0x0bf6 c0x0088 (n0x0d5d-n0x0d90) + I fukushima + 0x2276aec4, // n0x0bf7 c0x0089 (n0x0d90-n0x0db6) + I gifu + 0x00202d42, // n0x0bf8 c0x0000 (---------------) + I go + 0x00200c82, // n0x0bf9 c0x0000 (---------------) + I gr + 0x22b66385, // n0x0bfa c0x008a (n0x0db6-n0x0dda) + I gunma + 0x22e0ee49, // n0x0bfb c0x008b (n0x0dda-n0x0df3) + I hiroshima + 0x23369e88, // n0x0bfc c0x008c (n0x0df3-n0x0e81) + I hokkaido + 0x236aba85, // n0x0bfd c0x008d (n0x0e81-n0x0eaf) + I hyogo + 0x23ac1ac7, // n0x0bfe c0x008e (n0x0eaf-n0x0ee2) + I ibaraki + 0x23e1a4c8, // n0x0bff c0x008f (n0x0ee2-n0x0ef5) + I ishikawa + 0x242d8485, // n0x0c00 c0x0090 (n0x0ef5-n0x0f17) + I iwate + 0x24600fc6, // n0x0c01 c0x0091 (n0x0f17-n0x0f26) + I kagawa + 0x24a76189, // n0x0c02 c0x0092 (n0x0f26-n0x0f3a) + I kagoshima + 0x24f1a4c8, // n0x0c03 c0x0093 (n0x0f3a-n0x0f58) + I kanagawa + 0x252b9088, // n0x0c04 c0x0094 (n0x0f58-n0x0f59)* o I kawasaki + 0x2569c90a, // n0x0c05 c0x0095 (n0x0f59-n0x0f5a)* o I kitakyushu + 0x25a4f544, // n0x0c06 c0x0096 (n0x0f5a-n0x0f5b)* o I kobe + 0x25ecb145, // n0x0c07 c0x0097 (n0x0f5b-n0x0f7a) + I kochi + 0x262b3748, // n0x0c08 c0x0098 (n0x0f7a-n0x0f94) + I kumamoto + 0x266be0c5, // n0x0c09 c0x0099 (n0x0f94-n0x0fb3) + I kyoto + 0x00219082, // n0x0c0a c0x0000 (---------------) + I lg + 0x26a4b943, // n0x0c0b c0x009a (n0x0fb3-n0x0fd1) + I mie + 0x26ea29c6, // n0x0c0c c0x009b (n0x0fd1-n0x0ff2) + I miyagi + 0x27266108, // n0x0c0d c0x009c (n0x0ff2-n0x100d) + I miyazaki + 0x27754f86, // n0x0c0e c0x009d (n0x100d-n0x1058) + I nagano + 0x27adda48, // n0x0c0f c0x009e (n0x1058-n0x106e) + I nagasaki + 0x27f0f646, // n0x0c10 c0x009f (n0x106e-n0x106f)* o I nagoya + 0x282c8a04, // n0x0c11 c0x00a0 (n0x106f-n0x1095) + I nara + 0x00202c02, // n0x0c12 c0x0000 (---------------) + I ne + 0x2863ff87, // n0x0c13 c0x00a1 (n0x1095-n0x10b7) + I niigata + 0x28aa8984, // n0x0c14 c0x00a2 (n0x10b7-n0x10ca) + I oita + 0x28e78dc7, // n0x0c15 c0x00a3 (n0x10ca-n0x10e4) + I okayama + 0x29395b47, // n0x0c16 c0x00a4 (n0x10e4-n0x110e) + I okinawa + 0x00200282, // n0x0c17 c0x0000 (---------------) + I or + 0x2969ac45, // n0x0c18 c0x00a5 (n0x110e-n0x1140) + I osaka + 0x29a38904, // n0x0c19 c0x00a6 (n0x1140-n0x115a) + I saga + 0x29ed9247, // n0x0c1a c0x00a7 (n0x115a-n0x119f) + I saitama + 0x2a221087, // n0x0c1b c0x00a8 (n0x119f-n0x11a0)* o I sapporo + 0x2a682b06, // n0x0c1c c0x00a9 (n0x11a0-n0x11a1)* o I sendai + 0x2aa285c5, // n0x0c1d c0x00aa (n0x11a1-n0x11b8) + I shiga + 0x2ae93dc7, // n0x0c1e c0x00ab (n0x11b8-n0x11cf) + I shimane + 0x2b2b2608, // n0x0c1f c0x00ac (n0x11cf-n0x11f3) + I shizuoka + 0x2b744e07, // n0x0c20 c0x00ad (n0x11f3-n0x1212) + I tochigi + 0x2ba99fc9, // n0x0c21 c0x00ae (n0x1212-n0x1223) + I tokushima + 0x2bf41c05, // n0x0c22 c0x00af (n0x1223-n0x125c) + I tokyo + 0x2c2f8b87, // n0x0c23 c0x00b0 (n0x125c-n0x1269) + I tottori + 0x2c68e9c6, // n0x0c24 c0x00b1 (n0x1269-n0x1281) + I toyama + 0x2ca23ac8, // n0x0c25 c0x00b2 (n0x1281-n0x129e) + I wakayama + 0x0038988d, // n0x0c26 c0x0000 (---------------) + I xn--0trq7p7nn + 0x0024e389, // n0x0c27 c0x0000 (---------------) + I xn--1ctwo + 0x0025c1cb, // n0x0c28 c0x0000 (---------------) + I xn--1lqs03n + 0x0026024b, // n0x0c29 c0x0000 (---------------) + I xn--1lqs71d + 0x0027348b, // n0x0c2a c0x0000 (---------------) + I xn--2m4a15e + 0x002a5bcb, // n0x0c2b c0x0000 (---------------) + I xn--32vp30h + 0x0030168b, // n0x0c2c c0x0000 (---------------) + I xn--4it168d + 0x0030194b, // n0x0c2d c0x0000 (---------------) + I xn--4it797k + 0x00301d89, // n0x0c2e c0x0000 (---------------) + I xn--4pvxs + 0x00302c4b, // n0x0c2f c0x0000 (---------------) + I xn--5js045d + 0x00302f0b, // n0x0c30 c0x0000 (---------------) + I xn--5rtp49c + 0x0030338b, // n0x0c31 c0x0000 (---------------) + I xn--5rtq34k + 0x003043ca, // n0x0c32 c0x0000 (---------------) + I xn--6btw5a + 0x0030490a, // n0x0c33 c0x0000 (---------------) + I xn--6orx2r + 0x00304f0c, // n0x0c34 c0x0000 (---------------) + I xn--7t0a264c + 0x0030a1cb, // n0x0c35 c0x0000 (---------------) + I xn--8ltr62k + 0x0030a74a, // n0x0c36 c0x0000 (---------------) + I xn--8pvr4u + 0x0031918a, // n0x0c37 c0x0000 (---------------) + I xn--c3s14m + 0x0032940e, // n0x0c38 c0x0000 (---------------) + I xn--d5qv7z876c + 0x0032a1ce, // n0x0c39 c0x0000 (---------------) + I xn--djrs72d6uy + 0x0032a54a, // n0x0c3a c0x0000 (---------------) + I xn--djty4k + 0x0032bb0a, // n0x0c3b c0x0000 (---------------) + I xn--efvn9s + 0x0032c78b, // n0x0c3c c0x0000 (---------------) + I xn--ehqz56n + 0x0032ca4b, // n0x0c3d c0x0000 (---------------) + I xn--elqq16h + 0x0032d78b, // n0x0c3e c0x0000 (---------------) + I xn--f6qx53a + 0x0034834b, // n0x0c3f c0x0000 (---------------) + I xn--k7yn95e + 0x0034894a, // n0x0c40 c0x0000 (---------------) + I xn--kbrq7o + 0x0034960b, // n0x0c41 c0x0000 (---------------) + I xn--klt787d + 0x003498ca, // n0x0c42 c0x0000 (---------------) + I xn--kltp7d + 0x00349b4a, // n0x0c43 c0x0000 (---------------) + I xn--kltx9a + 0x00349dca, // n0x0c44 c0x0000 (---------------) + I xn--klty5x + 0x00367e8b, // n0x0c45 c0x0000 (---------------) + I xn--mkru45i + 0x0037178b, // n0x0c46 c0x0000 (---------------) + I xn--nit225k + 0x003733ce, // n0x0c47 c0x0000 (---------------) + I xn--ntso0iqx3a + 0x0037374b, // n0x0c48 c0x0000 (---------------) + I xn--ntsq17g + 0x0037b18b, // n0x0c49 c0x0000 (---------------) + I xn--pssu33l + 0x0037d28b, // n0x0c4a c0x0000 (---------------) + I xn--qqqt11m + 0x0038144a, // n0x0c4b c0x0000 (---------------) + I xn--rht27z + 0x003816c9, // n0x0c4c c0x0000 (---------------) + I xn--rht3d + 0x0038190a, // n0x0c4d c0x0000 (---------------) + I xn--rht61e + 0x00382f8a, // n0x0c4e c0x0000 (---------------) + I xn--rny31h + 0x00392e8b, // n0x0c4f c0x0000 (---------------) + I xn--tor131o + 0x003949cb, // n0x0c50 c0x0000 (---------------) + I xn--uist22h + 0x0039548a, // n0x0c51 c0x0000 (---------------) + I xn--uisz3g + 0x003967cb, // n0x0c52 c0x0000 (---------------) + I xn--uuwu58a + 0x0039a30b, // n0x0c53 c0x0000 (---------------) + I xn--vgu402c + 0x003a520b, // n0x0c54 c0x0000 (---------------) + I xn--zbx025d + 0x2ce808c8, // n0x0c55 c0x00b3 (n0x129e-n0x12c0) + I yamagata + 0x2d2873c9, // n0x0c56 c0x00b4 (n0x12c0-n0x12d0) + I yamaguchi + 0x2d6a15c9, // n0x0c57 c0x00b5 (n0x12d0-n0x12ec) + I yamanashi + 0x2dad09c8, // n0x0c58 c0x00b6 (n0x12ec-n0x12ed)* o I yokohama + 0x00334d45, // n0x0c59 c0x0000 (---------------) + I aisai + 0x00201883, // n0x0c5a c0x0000 (---------------) + I ama + 0x00203fc4, // n0x0c5b c0x0000 (---------------) + I anjo + 0x00360985, // n0x0c5c c0x0000 (---------------) + I asuke + 0x0036ac06, // n0x0c5d c0x0000 (---------------) + I chiryu + 0x002ac6c5, // n0x0c5e c0x0000 (---------------) + I chita + 0x00286bc4, // n0x0c5f c0x0000 (---------------) + I fuso + 0x0026ecc8, // n0x0c60 c0x0000 (---------------) + I gamagori + 0x00256185, // n0x0c61 c0x0000 (---------------) + I handa + 0x0028ff84, // n0x0c62 c0x0000 (---------------) + I hazu + 0x002c3247, // n0x0c63 c0x0000 (---------------) + I hekinan + 0x0029d24a, // n0x0c64 c0x0000 (---------------) + I higashiura + 0x002d218a, // n0x0c65 c0x0000 (---------------) + I ichinomiya + 0x0032bfc7, // n0x0c66 c0x0000 (---------------) + I inazawa + 0x00201787, // n0x0c67 c0x0000 (---------------) + I inuyama + 0x002f2007, // n0x0c68 c0x0000 (---------------) + I isshiki + 0x0031d107, // n0x0c69 c0x0000 (---------------) + I iwakura + 0x002a25c5, // n0x0c6a c0x0000 (---------------) + I kanie + 0x00325246, // n0x0c6b c0x0000 (---------------) + I kariya + 0x00321d87, // n0x0c6c c0x0000 (---------------) + I kasugai + 0x002571c4, // n0x0c6d c0x0000 (---------------) + I kira + 0x002f0206, // n0x0c6e c0x0000 (---------------) + I kiyosu + 0x00296f06, // n0x0c6f c0x0000 (---------------) + I komaki + 0x00206b85, // n0x0c70 c0x0000 (---------------) + I konan + 0x00229444, // n0x0c71 c0x0000 (---------------) + I kota + 0x002da306, // n0x0c72 c0x0000 (---------------) + I mihama + 0x0029c447, // n0x0c73 c0x0000 (---------------) + I miyoshi + 0x002251c6, // n0x0c74 c0x0000 (---------------) + I nishio + 0x00266c47, // n0x0c75 c0x0000 (---------------) + I nisshin + 0x0027cb43, // n0x0c76 c0x0000 (---------------) + I obu + 0x00252346, // n0x0c77 c0x0000 (---------------) + I oguchi + 0x00236085, // n0x0c78 c0x0000 (---------------) + I oharu + 0x0027e907, // n0x0c79 c0x0000 (---------------) + I okazaki + 0x002bf04a, // n0x0c7a c0x0000 (---------------) + I owariasahi + 0x002ae744, // n0x0c7b c0x0000 (---------------) + I seto + 0x00219288, // n0x0c7c c0x0000 (---------------) + I shikatsu + 0x00299589, // n0x0c7d c0x0000 (---------------) + I shinshiro + 0x002aff87, // n0x0c7e c0x0000 (---------------) + I shitara + 0x002e8506, // n0x0c7f c0x0000 (---------------) + I tahara + 0x00365ec8, // n0x0c80 c0x0000 (---------------) + I takahama + 0x00307489, // n0x0c81 c0x0000 (---------------) + I tobishima + 0x00375f44, // n0x0c82 c0x0000 (---------------) + I toei + 0x00337204, // n0x0c83 c0x0000 (---------------) + I togo + 0x002fa0c5, // n0x0c84 c0x0000 (---------------) + I tokai + 0x002bfec8, // n0x0c85 c0x0000 (---------------) + I tokoname + 0x002c0907, // n0x0c86 c0x0000 (---------------) + I toyoake + 0x0028df09, // n0x0c87 c0x0000 (---------------) + I toyohashi + 0x00247dc8, // n0x0c88 c0x0000 (---------------) + I toyokawa + 0x00366606, // n0x0c89 c0x0000 (---------------) + I toyone + 0x0025bb86, // n0x0c8a c0x0000 (---------------) + I toyota + 0x00297a48, // n0x0c8b c0x0000 (---------------) + I tsushima + 0x0036a786, // n0x0c8c c0x0000 (---------------) + I yatomi + 0x00201dc5, // n0x0c8d c0x0000 (---------------) + I akita + 0x00282bc6, // n0x0c8e c0x0000 (---------------) + I daisen + 0x002790c8, // n0x0c8f c0x0000 (---------------) + I fujisato + 0x0023ab86, // n0x0c90 c0x0000 (---------------) + I gojome + 0x0025778b, // n0x0c91 c0x0000 (---------------) + I hachirogata + 0x0028ab86, // n0x0c92 c0x0000 (---------------) + I happou + 0x002997cd, // n0x0c93 c0x0000 (---------------) + I higashinaruse + 0x0038e545, // n0x0c94 c0x0000 (---------------) + I honjo + 0x002a8846, // n0x0c95 c0x0000 (---------------) + I honjyo + 0x0021a585, // n0x0c96 c0x0000 (---------------) + I ikawa + 0x00296349, // n0x0c97 c0x0000 (---------------) + I kamikoani + 0x00320ec7, // n0x0c98 c0x0000 (---------------) + I kamioka + 0x00378048, // n0x0c99 c0x0000 (---------------) + I katagami + 0x00305386, // n0x0c9a c0x0000 (---------------) + I kazuno + 0x002983c9, // n0x0c9b c0x0000 (---------------) + I kitaakita + 0x002dd306, // n0x0c9c c0x0000 (---------------) + I kosaka + 0x002befc5, // n0x0c9d c0x0000 (---------------) + I kyowa + 0x0022f486, // n0x0c9e c0x0000 (---------------) + I misato + 0x002b16c6, // n0x0c9f c0x0000 (---------------) + I mitane + 0x002c6849, // n0x0ca0 c0x0000 (---------------) + I moriyoshi + 0x0033cfc6, // n0x0ca1 c0x0000 (---------------) + I nikaho + 0x0037ebc7, // n0x0ca2 c0x0000 (---------------) + I noshiro + 0x002c5f85, // n0x0ca3 c0x0000 (---------------) + I odate + 0x00202a03, // n0x0ca4 c0x0000 (---------------) + I oga + 0x00223845, // n0x0ca5 c0x0000 (---------------) + I ogata + 0x002a6287, // n0x0ca6 c0x0000 (---------------) + I semboku + 0x00330a06, // n0x0ca7 c0x0000 (---------------) + I yokote + 0x0038e449, // n0x0ca8 c0x0000 (---------------) + I yurihonjo + 0x0030bf06, // n0x0ca9 c0x0000 (---------------) + I aomori + 0x00282e06, // n0x0caa c0x0000 (---------------) + I gonohe + 0x0020db09, // n0x0cab c0x0000 (---------------) + I hachinohe + 0x002825c9, // n0x0cac c0x0000 (---------------) + I hashikami + 0x0029f407, // n0x0cad c0x0000 (---------------) + I hiranai + 0x002eb708, // n0x0cae c0x0000 (---------------) + I hirosaki + 0x002692c9, // n0x0caf c0x0000 (---------------) + I itayanagi + 0x0027edc8, // n0x0cb0 c0x0000 (---------------) + I kuroishi + 0x0037d506, // n0x0cb1 c0x0000 (---------------) + I misawa + 0x002d0505, // n0x0cb2 c0x0000 (---------------) + I mutsu + 0x0021e54a, // n0x0cb3 c0x0000 (---------------) + I nakadomari + 0x00282e86, // n0x0cb4 c0x0000 (---------------) + I noheji + 0x00207e46, // n0x0cb5 c0x0000 (---------------) + I oirase + 0x002a2b85, // n0x0cb6 c0x0000 (---------------) + I owani + 0x0036b208, // n0x0cb7 c0x0000 (---------------) + I rokunohe + 0x0020ea87, // n0x0cb8 c0x0000 (---------------) + I sannohe + 0x0023638a, // n0x0cb9 c0x0000 (---------------) + I shichinohe + 0x0024e0c6, // n0x0cba c0x0000 (---------------) + I shingo + 0x00240bc5, // n0x0cbb c0x0000 (---------------) + I takko + 0x0024a2c6, // n0x0cbc c0x0000 (---------------) + I towada + 0x00297647, // n0x0cbd c0x0000 (---------------) + I tsugaru + 0x002e83c7, // n0x0cbe c0x0000 (---------------) + I tsuruta + 0x0037e845, // n0x0cbf c0x0000 (---------------) + I abiko + 0x002bf185, // n0x0cc0 c0x0000 (---------------) + I asahi + 0x002e59c6, // n0x0cc1 c0x0000 (---------------) + I chonan + 0x002e6b46, // n0x0cc2 c0x0000 (---------------) + I chosei + 0x00300346, // n0x0cc3 c0x0000 (---------------) + I choshi + 0x0030ba84, // n0x0cc4 c0x0000 (---------------) + I chuo + 0x00281689, // n0x0cc5 c0x0000 (---------------) + I funabashi + 0x00288286, // n0x0cc6 c0x0000 (---------------) + I futtsu + 0x0034ad4a, // n0x0cc7 c0x0000 (---------------) + I hanamigawa + 0x0028f548, // n0x0cc8 c0x0000 (---------------) + I ichihara + 0x00265608, // n0x0cc9 c0x0000 (---------------) + I ichikawa + 0x002d218a, // n0x0cca c0x0000 (---------------) + I ichinomiya + 0x003a0f85, // n0x0ccb c0x0000 (---------------) + I inzai + 0x0029c385, // n0x0ccc c0x0000 (---------------) + I isumi + 0x00307d08, // n0x0ccd c0x0000 (---------------) + I kamagaya + 0x002caec8, // n0x0cce c0x0000 (---------------) + I kamogawa + 0x002037c7, // n0x0ccf c0x0000 (---------------) + I kashiwa + 0x00294d86, // n0x0cd0 c0x0000 (---------------) + I katori + 0x003141c8, // n0x0cd1 c0x0000 (---------------) + I katsuura + 0x002303c7, // n0x0cd2 c0x0000 (---------------) + I kimitsu + 0x00280d88, // n0x0cd3 c0x0000 (---------------) + I kisarazu + 0x00368e86, // n0x0cd4 c0x0000 (---------------) + I kozaki + 0x00283fc8, // n0x0cd5 c0x0000 (---------------) + I kujukuri + 0x002b4246, // n0x0cd6 c0x0000 (---------------) + I kyonan + 0x00243747, // n0x0cd7 c0x0000 (---------------) + I matsudo + 0x00298e06, // n0x0cd8 c0x0000 (---------------) + I midori + 0x002da306, // n0x0cd9 c0x0000 (---------------) + I mihama + 0x0023c6ca, // n0x0cda c0x0000 (---------------) + I minamiboso + 0x00234886, // n0x0cdb c0x0000 (---------------) + I mobara + 0x002d0509, // n0x0cdc c0x0000 (---------------) + I mutsuzawa + 0x002ae046, // n0x0cdd c0x0000 (---------------) + I nagara + 0x002d164a, // n0x0cde c0x0000 (---------------) + I nagareyama + 0x002c8a09, // n0x0cdf c0x0000 (---------------) + I narashino + 0x0037df46, // n0x0ce0 c0x0000 (---------------) + I narita + 0x0037f944, // n0x0ce1 c0x0000 (---------------) + I noda + 0x003071cd, // n0x0ce2 c0x0000 (---------------) + I oamishirasato + 0x00287647, // n0x0ce3 c0x0000 (---------------) + I omigawa + 0x00316686, // n0x0ce4 c0x0000 (---------------) + I onjuku + 0x002b8f45, // n0x0ce5 c0x0000 (---------------) + I otaki + 0x002dd385, // n0x0ce6 c0x0000 (---------------) + I sakae + 0x00307fc6, // n0x0ce7 c0x0000 (---------------) + I sakura + 0x0028d9c9, // n0x0ce8 c0x0000 (---------------) + I shimofusa + 0x002aaf87, // n0x0ce9 c0x0000 (---------------) + I shirako + 0x0027a9c6, // n0x0cea c0x0000 (---------------) + I shiroi + 0x002af8c6, // n0x0ceb c0x0000 (---------------) + I shisui + 0x00286c49, // n0x0cec c0x0000 (---------------) + I sodegaura + 0x0021f484, // n0x0ced c0x0000 (---------------) + I sosa + 0x0036bcc4, // n0x0cee c0x0000 (---------------) + I tako + 0x002040c8, // n0x0cef c0x0000 (---------------) + I tateyama + 0x002aea86, // n0x0cf0 c0x0000 (---------------) + I togane + 0x0029ed88, // n0x0cf1 c0x0000 (---------------) + I tohnosho + 0x0022f408, // n0x0cf2 c0x0000 (---------------) + I tomisato + 0x00280587, // n0x0cf3 c0x0000 (---------------) + I urayasu + 0x003a6349, // n0x0cf4 c0x0000 (---------------) + I yachimata + 0x00300547, // n0x0cf5 c0x0000 (---------------) + I yachiyo + 0x002adc0a, // n0x0cf6 c0x0000 (---------------) + I yokaichiba + 0x0022edcf, // n0x0cf7 c0x0000 (---------------) + I yokoshibahikari + 0x00269dca, // n0x0cf8 c0x0000 (---------------) + I yotsukaido + 0x00223245, // n0x0cf9 c0x0000 (---------------) + I ainan + 0x00279305, // n0x0cfa c0x0000 (---------------) + I honai + 0x00216985, // n0x0cfb c0x0000 (---------------) + I ikata + 0x00249147, // n0x0cfc c0x0000 (---------------) + I imabari + 0x00206043, // n0x0cfd c0x0000 (---------------) + I iyo + 0x002eb908, // n0x0cfe c0x0000 (---------------) + I kamijima + 0x002f5c86, // n0x0cff c0x0000 (---------------) + I kihoku + 0x002f5d89, // n0x0d00 c0x0000 (---------------) + I kumakogen + 0x003a3b06, // n0x0d01 c0x0000 (---------------) + I masaki + 0x002c02c7, // n0x0d02 c0x0000 (---------------) + I matsuno + 0x00298189, // n0x0d03 c0x0000 (---------------) + I matsuyama + 0x00377f48, // n0x0d04 c0x0000 (---------------) + I namikata + 0x002a2c47, // n0x0d05 c0x0000 (---------------) + I niihama + 0x00300a43, // n0x0d06 c0x0000 (---------------) + I ozu + 0x00334dc5, // n0x0d07 c0x0000 (---------------) + I saijo + 0x002396c5, // n0x0d08 c0x0000 (---------------) + I seiyo + 0x0030b8cb, // n0x0d09 c0x0000 (---------------) + I shikokuchuo + 0x002be184, // n0x0d0a c0x0000 (---------------) + I tobe + 0x0020b004, // n0x0d0b c0x0000 (---------------) + I toon + 0x00278086, // n0x0d0c c0x0000 (---------------) + I uchiko + 0x00300dc7, // n0x0d0d c0x0000 (---------------) + I uwajima + 0x0038f14a, // n0x0d0e c0x0000 (---------------) + I yawatahama + 0x0024b9c7, // n0x0d0f c0x0000 (---------------) + I echizen + 0x00375fc7, // n0x0d10 c0x0000 (---------------) + I eiheiji + 0x0027dac5, // n0x0d11 c0x0000 (---------------) + I fukui + 0x00202585, // n0x0d12 c0x0000 (---------------) + I ikeda + 0x0021ebc9, // n0x0d13 c0x0000 (---------------) + I katsuyama + 0x002da306, // n0x0d14 c0x0000 (---------------) + I mihama + 0x0024b84d, // n0x0d15 c0x0000 (---------------) + I minamiechizen + 0x00395f05, // n0x0d16 c0x0000 (---------------) + I obama + 0x00299783, // n0x0d17 c0x0000 (---------------) + I ohi + 0x0020a703, // n0x0d18 c0x0000 (---------------) + I ono + 0x002f6605, // n0x0d19 c0x0000 (---------------) + I sabae + 0x0034ca05, // n0x0d1a c0x0000 (---------------) + I sakai + 0x00365ec8, // n0x0d1b c0x0000 (---------------) + I takahama + 0x0027b9c7, // n0x0d1c c0x0000 (---------------) + I tsuruga + 0x0036ba46, // n0x0d1d c0x0000 (---------------) + I wakasa + 0x0029d906, // n0x0d1e c0x0000 (---------------) + I ashiya + 0x0022d885, // n0x0d1f c0x0000 (---------------) + I buzen + 0x0023aa47, // n0x0d20 c0x0000 (---------------) + I chikugo + 0x00201a07, // n0x0d21 c0x0000 (---------------) + I chikuho + 0x00293107, // n0x0d22 c0x0000 (---------------) + I chikujo + 0x002cb1ca, // n0x0d23 c0x0000 (---------------) + I chikushino + 0x00252408, // n0x0d24 c0x0000 (---------------) + I chikuzen + 0x0030ba84, // n0x0d25 c0x0000 (---------------) + I chuo + 0x00214b07, // n0x0d26 c0x0000 (---------------) + I dazaifu + 0x0027cc87, // n0x0d27 c0x0000 (---------------) + I fukuchi + 0x0032c406, // n0x0d28 c0x0000 (---------------) + I hakata + 0x00268047, // n0x0d29 c0x0000 (---------------) + I higashi + 0x002d2bc8, // n0x0d2a c0x0000 (---------------) + I hirokawa + 0x002a14c8, // n0x0d2b c0x0000 (---------------) + I hisayama + 0x0026e786, // n0x0d2c c0x0000 (---------------) + I iizuka + 0x0022b108, // n0x0d2d c0x0000 (---------------) + I inatsuki + 0x002c6404, // n0x0d2e c0x0000 (---------------) + I kaho + 0x00321d86, // n0x0d2f c0x0000 (---------------) + I kasuga + 0x0020f406, // n0x0d30 c0x0000 (---------------) + I kasuya + 0x00206106, // n0x0d31 c0x0000 (---------------) + I kawara + 0x002ebf06, // n0x0d32 c0x0000 (---------------) + I keisen + 0x00226304, // n0x0d33 c0x0000 (---------------) + I koga + 0x0031d1c6, // n0x0d34 c0x0000 (---------------) + I kurate + 0x002b81c6, // n0x0d35 c0x0000 (---------------) + I kurogi + 0x002969c6, // n0x0d36 c0x0000 (---------------) + I kurume + 0x00228406, // n0x0d37 c0x0000 (---------------) + I minami + 0x0020a5c6, // n0x0d38 c0x0000 (---------------) + I miyako + 0x002d2a06, // n0x0d39 c0x0000 (---------------) + I miyama + 0x0036b948, // n0x0d3a c0x0000 (---------------) + I miyawaka + 0x002f0088, // n0x0d3b c0x0000 (---------------) + I mizumaki + 0x002cbdc8, // n0x0d3c c0x0000 (---------------) + I munakata + 0x002ac8c8, // n0x0d3d c0x0000 (---------------) + I nakagawa + 0x00307c86, // n0x0d3e c0x0000 (---------------) + I nakama + 0x00211805, // n0x0d3f c0x0000 (---------------) + I nishi + 0x00223806, // n0x0d40 c0x0000 (---------------) + I nogata + 0x002abb05, // n0x0d41 c0x0000 (---------------) + I ogori + 0x00380887, // n0x0d42 c0x0000 (---------------) + I okagaki + 0x002060c5, // n0x0d43 c0x0000 (---------------) + I okawa + 0x00215483, // n0x0d44 c0x0000 (---------------) + I oki + 0x00203a85, // n0x0d45 c0x0000 (---------------) + I omuta + 0x002b6104, // n0x0d46 c0x0000 (---------------) + I onga + 0x0020a705, // n0x0d47 c0x0000 (---------------) + I onojo + 0x00216003, // n0x0d48 c0x0000 (---------------) + I oto + 0x002d87c7, // n0x0d49 c0x0000 (---------------) + I saigawa + 0x0036fd08, // n0x0d4a c0x0000 (---------------) + I sasaguri + 0x00266d06, // n0x0d4b c0x0000 (---------------) + I shingu + 0x002a224d, // n0x0d4c c0x0000 (---------------) + I shinyoshitomi + 0x002792c6, // n0x0d4d c0x0000 (---------------) + I shonai + 0x00295a45, // n0x0d4e c0x0000 (---------------) + I soeda + 0x002c6c03, // n0x0d4f c0x0000 (---------------) + I sue + 0x002b3109, // n0x0d50 c0x0000 (---------------) + I tachiarai + 0x002c1c86, // n0x0d51 c0x0000 (---------------) + I tagawa + 0x00238646, // n0x0d52 c0x0000 (---------------) + I takata + 0x0034cf44, // n0x0d53 c0x0000 (---------------) + I toho + 0x00269d47, // n0x0d54 c0x0000 (---------------) + I toyotsu + 0x0023bd46, // n0x0d55 c0x0000 (---------------) + I tsuiki + 0x0036b045, // n0x0d56 c0x0000 (---------------) + I ukiha + 0x0020b583, // n0x0d57 c0x0000 (---------------) + I umi + 0x002066c4, // n0x0d58 c0x0000 (---------------) + I usui + 0x0027ce46, // n0x0d59 c0x0000 (---------------) + I yamada + 0x002a7d84, // n0x0d5a c0x0000 (---------------) + I yame + 0x0030df48, // n0x0d5b c0x0000 (---------------) + I yanagawa + 0x00383d09, // n0x0d5c c0x0000 (---------------) + I yukuhashi + 0x002bde89, // n0x0d5d c0x0000 (---------------) + I aizubange + 0x0029eb8a, // n0x0d5e c0x0000 (---------------) + I aizumisato + 0x002453cd, // n0x0d5f c0x0000 (---------------) + I aizuwakamatsu + 0x00248a07, // n0x0d60 c0x0000 (---------------) + I asakawa + 0x00207cc6, // n0x0d61 c0x0000 (---------------) + I bandai + 0x0020c7c4, // n0x0d62 c0x0000 (---------------) + I date + 0x0032fcc9, // n0x0d63 c0x0000 (---------------) + I fukushima + 0x002860c8, // n0x0d64 c0x0000 (---------------) + I furudono + 0x00287246, // n0x0d65 c0x0000 (---------------) + I futaba + 0x0025a746, // n0x0d66 c0x0000 (---------------) + I hanawa + 0x00268047, // n0x0d67 c0x0000 (---------------) + I higashi + 0x00347f46, // n0x0d68 c0x0000 (---------------) + I hirata + 0x0021d4c6, // n0x0d69 c0x0000 (---------------) + I hirono + 0x00384006, // n0x0d6a c0x0000 (---------------) + I iitate + 0x00395bca, // n0x0d6b c0x0000 (---------------) + I inawashiro + 0x0021a4c8, // n0x0d6c c0x0000 (---------------) + I ishikawa + 0x0022da85, // n0x0d6d c0x0000 (---------------) + I iwaki + 0x002802c9, // n0x0d6e c0x0000 (---------------) + I izumizaki + 0x0028128a, // n0x0d6f c0x0000 (---------------) + I kagamiishi + 0x002c7848, // n0x0d70 c0x0000 (---------------) + I kaneyama + 0x0029b888, // n0x0d71 c0x0000 (---------------) + I kawamata + 0x00295fc8, // n0x0d72 c0x0000 (---------------) + I kitakata + 0x00201e0c, // n0x0d73 c0x0000 (---------------) + I kitashiobara + 0x002d5305, // n0x0d74 c0x0000 (---------------) + I koori + 0x0029db88, // n0x0d75 c0x0000 (---------------) + I koriyama + 0x00342586, // n0x0d76 c0x0000 (---------------) + I kunimi + 0x00306306, // n0x0d77 c0x0000 (---------------) + I miharu + 0x002bf3c7, // n0x0d78 c0x0000 (---------------) + I mishima + 0x0024b8c5, // n0x0d79 c0x0000 (---------------) + I namie + 0x00282d45, // n0x0d7a c0x0000 (---------------) + I nango + 0x002bdd49, // n0x0d7b c0x0000 (---------------) + I nishiaizu + 0x00211d87, // n0x0d7c c0x0000 (---------------) + I nishigo + 0x002f5d45, // n0x0d7d c0x0000 (---------------) + I okuma + 0x0021f187, // n0x0d7e c0x0000 (---------------) + I omotego + 0x0020a703, // n0x0d7f c0x0000 (---------------) + I ono + 0x002c0e45, // n0x0d80 c0x0000 (---------------) + I otama + 0x00346648, // n0x0d81 c0x0000 (---------------) + I samegawa + 0x002b1287, // n0x0d82 c0x0000 (---------------) + I shimogo + 0x0029b749, // n0x0d83 c0x0000 (---------------) + I shirakawa + 0x00319905, // n0x0d84 c0x0000 (---------------) + I showa + 0x0035d844, // n0x0d85 c0x0000 (---------------) + I soma + 0x002a0308, // n0x0d86 c0x0000 (---------------) + I sukagawa + 0x0022c747, // n0x0d87 c0x0000 (---------------) + I taishin + 0x002a2e08, // n0x0d88 c0x0000 (---------------) + I tamakawa + 0x00330f48, // n0x0d89 c0x0000 (---------------) + I tanagura + 0x002d9885, // n0x0d8a c0x0000 (---------------) + I tenei + 0x00350b06, // n0x0d8b c0x0000 (---------------) + I yabuki + 0x0028f086, // n0x0d8c c0x0000 (---------------) + I yamato + 0x00250089, // n0x0d8d c0x0000 (---------------) + I yamatsuri + 0x00314a47, // n0x0d8e c0x0000 (---------------) + I yanaizu + 0x002abfc6, // n0x0d8f c0x0000 (---------------) + I yugawa + 0x0028ca87, // n0x0d90 c0x0000 (---------------) + I anpachi + 0x002183c3, // n0x0d91 c0x0000 (---------------) + I ena + 0x0036aec4, // n0x0d92 c0x0000 (---------------) + I gifu + 0x002a0c45, // n0x0d93 c0x0000 (---------------) + I ginan + 0x00214d84, // n0x0d94 c0x0000 (---------------) + I godo + 0x00343c04, // n0x0d95 c0x0000 (---------------) + I gujo + 0x00280b47, // n0x0d96 c0x0000 (---------------) + I hashima + 0x00218a07, // n0x0d97 c0x0000 (---------------) + I hichiso + 0x0027ab84, // n0x0d98 c0x0000 (---------------) + I hida + 0x0029b590, // n0x0d99 c0x0000 (---------------) + I higashishirakawa + 0x00308147, // n0x0d9a c0x0000 (---------------) + I ibigawa + 0x00202585, // n0x0d9b c0x0000 (---------------) + I ikeda + 0x002ef98c, // n0x0d9c c0x0000 (---------------) + I kakamigahara + 0x00279cc4, // n0x0d9d c0x0000 (---------------) + I kani + 0x0029b388, // n0x0d9e c0x0000 (---------------) + I kasahara + 0x00243649, // n0x0d9f c0x0000 (---------------) + I kasamatsu + 0x00301506, // n0x0da0 c0x0000 (---------------) + I kawaue + 0x0021f588, // n0x0da1 c0x0000 (---------------) + I kitagata + 0x0024ff84, // n0x0da2 c0x0000 (---------------) + I mino + 0x002d7b88, // n0x0da3 c0x0000 (---------------) + I minokamo + 0x00268506, // n0x0da4 c0x0000 (---------------) + I mitake + 0x00228288, // n0x0da5 c0x0000 (---------------) + I mizunami + 0x002a0946, // n0x0da6 c0x0000 (---------------) + I motosu + 0x0037a2cb, // n0x0da7 c0x0000 (---------------) + I nakatsugawa + 0x00202a05, // n0x0da8 c0x0000 (---------------) + I ogaki + 0x002c6388, // n0x0da9 c0x0000 (---------------) + I sakahogi + 0x00217fc4, // n0x0daa c0x0000 (---------------) + I seki + 0x0028224a, // n0x0dab c0x0000 (---------------) + I sekigahara + 0x0029b749, // n0x0dac c0x0000 (---------------) + I shirakawa + 0x00312006, // n0x0dad c0x0000 (---------------) + I tajimi + 0x002c0148, // n0x0dae c0x0000 (---------------) + I takayama + 0x00274245, // n0x0daf c0x0000 (---------------) + I tarui + 0x00222904, // n0x0db0 c0x0000 (---------------) + I toki + 0x0029b286, // n0x0db1 c0x0000 (---------------) + I tomika + 0x00292fc8, // n0x0db2 c0x0000 (---------------) + I wanouchi + 0x002808c8, // n0x0db3 c0x0000 (---------------) + I yamagata + 0x00341746, // n0x0db4 c0x0000 (---------------) + I yaotsu + 0x00321484, // n0x0db5 c0x0000 (---------------) + I yoro + 0x0021e4c6, // n0x0db6 c0x0000 (---------------) + I annaka + 0x003005c7, // n0x0db7 c0x0000 (---------------) + I chiyoda + 0x00278cc7, // n0x0db8 c0x0000 (---------------) + I fujioka + 0x0026804f, // n0x0db9 c0x0000 (---------------) + I higashiagatsuma + 0x00204687, // n0x0dba c0x0000 (---------------) + I isesaki + 0x0037e007, // n0x0dbb c0x0000 (---------------) + I itakura + 0x003061c5, // n0x0dbc c0x0000 (---------------) + I kanna + 0x002e2345, // n0x0dbd c0x0000 (---------------) + I kanra + 0x0029f0c9, // n0x0dbe c0x0000 (---------------) + I katashina + 0x0026b7c6, // n0x0dbf c0x0000 (---------------) + I kawaba + 0x0027f4c5, // n0x0dc0 c0x0000 (---------------) + I kiryu + 0x002828c7, // n0x0dc1 c0x0000 (---------------) + I kusatsu + 0x002c5d48, // n0x0dc2 c0x0000 (---------------) + I maebashi + 0x002be885, // n0x0dc3 c0x0000 (---------------) + I meiwa + 0x00298e06, // n0x0dc4 c0x0000 (---------------) + I midori + 0x00216448, // n0x0dc5 c0x0000 (---------------) + I minakami + 0x00354f8a, // n0x0dc6 c0x0000 (---------------) + I naganohara + 0x00356448, // n0x0dc7 c0x0000 (---------------) + I nakanojo + 0x003a1d07, // n0x0dc8 c0x0000 (---------------) + I nanmoku + 0x0022f206, // n0x0dc9 c0x0000 (---------------) + I numata + 0x00280286, // n0x0dca c0x0000 (---------------) + I oizumi + 0x0021c683, // n0x0dcb c0x0000 (---------------) + I ora + 0x00204083, // n0x0dcc c0x0000 (---------------) + I ota + 0x00281449, // n0x0dcd c0x0000 (---------------) + I shibukawa + 0x00269149, // n0x0dce c0x0000 (---------------) + I shimonita + 0x00299ec6, // n0x0dcf c0x0000 (---------------) + I shinto + 0x00319905, // n0x0dd0 c0x0000 (---------------) + I showa + 0x002a06c8, // n0x0dd1 c0x0000 (---------------) + I takasaki + 0x002c0148, // n0x0dd2 c0x0000 (---------------) + I takayama + 0x0039a108, // n0x0dd3 c0x0000 (---------------) + I tamamura + 0x0038408b, // n0x0dd4 c0x0000 (---------------) + I tatebayashi + 0x002a2487, // n0x0dd5 c0x0000 (---------------) + I tomioka + 0x002ff549, // n0x0dd6 c0x0000 (---------------) + I tsukiyono + 0x002682c8, // n0x0dd7 c0x0000 (---------------) + I tsumagoi + 0x00387304, // n0x0dd8 c0x0000 (---------------) + I ueno + 0x002c6948, // n0x0dd9 c0x0000 (---------------) + I yoshioka + 0x0028bb49, // n0x0dda c0x0000 (---------------) + I asaminami + 0x002ac2c5, // n0x0ddb c0x0000 (---------------) + I daiwa + 0x00249047, // n0x0ddc c0x0000 (---------------) + I etajima + 0x002bc1c5, // n0x0ddd c0x0000 (---------------) + I fuchu + 0x002807c8, // n0x0dde c0x0000 (---------------) + I fukuyama + 0x0028f38b, // n0x0ddf c0x0000 (---------------) + I hatsukaichi + 0x00293b10, // n0x0de0 c0x0000 (---------------) + I higashihiroshima + 0x002a8645, // n0x0de1 c0x0000 (---------------) + I hongo + 0x00217f0c, // n0x0de2 c0x0000 (---------------) + I jinsekikogen + 0x0036bc05, // n0x0de3 c0x0000 (---------------) + I kaita + 0x0027db43, // n0x0de4 c0x0000 (---------------) + I kui + 0x00383786, // n0x0de5 c0x0000 (---------------) + I kumano + 0x002b7544, // n0x0de6 c0x0000 (---------------) + I kure + 0x003a69c6, // n0x0de7 c0x0000 (---------------) + I mihara + 0x0029c447, // n0x0de8 c0x0000 (---------------) + I miyoshi + 0x002164c4, // n0x0de9 c0x0000 (---------------) + I naka + 0x002d2088, // n0x0dea c0x0000 (---------------) + I onomichi + 0x002eb7cd, // n0x0deb c0x0000 (---------------) + I osakikamijima + 0x00305a45, // n0x0dec c0x0000 (---------------) + I otake + 0x002458c4, // n0x0ded c0x0000 (---------------) + I saka + 0x00222f84, // n0x0dee c0x0000 (---------------) + I sera + 0x0027fd49, // n0x0def c0x0000 (---------------) + I seranishi + 0x00286908, // n0x0df0 c0x0000 (---------------) + I shinichi + 0x0030bd87, // n0x0df1 c0x0000 (---------------) + I shobara + 0x00268588, // n0x0df2 c0x0000 (---------------) + I takehara + 0x00281748, // n0x0df3 c0x0000 (---------------) + I abashiri + 0x0027acc5, // n0x0df4 c0x0000 (---------------) + I abira + 0x00208b87, // n0x0df5 c0x0000 (---------------) + I aibetsu + 0x0027ac47, // n0x0df6 c0x0000 (---------------) + I akabira + 0x00209907, // n0x0df7 c0x0000 (---------------) + I akkeshi + 0x002bf189, // n0x0df8 c0x0000 (---------------) + I asahikawa + 0x0023bbc9, // n0x0df9 c0x0000 (---------------) + I ashibetsu + 0x00244e86, // n0x0dfa c0x0000 (---------------) + I ashoro + 0x002b4086, // n0x0dfb c0x0000 (---------------) + I assabu + 0x00268286, // n0x0dfc c0x0000 (---------------) + I atsuma + 0x0026a3c5, // n0x0dfd c0x0000 (---------------) + I bibai + 0x002d81c4, // n0x0dfe c0x0000 (---------------) + I biei + 0x00200ec6, // n0x0dff c0x0000 (---------------) + I bifuka + 0x00202106, // n0x0e00 c0x0000 (---------------) + I bihoro + 0x0027ad08, // n0x0e01 c0x0000 (---------------) + I biratori + 0x0029730b, // n0x0e02 c0x0000 (---------------) + I chippubetsu + 0x002ae607, // n0x0e03 c0x0000 (---------------) + I chitose + 0x0020c7c4, // n0x0e04 c0x0000 (---------------) + I date + 0x00223e46, // n0x0e05 c0x0000 (---------------) + I ebetsu + 0x00283707, // n0x0e06 c0x0000 (---------------) + I embetsu + 0x002f5f45, // n0x0e07 c0x0000 (---------------) + I eniwa + 0x00239145, // n0x0e08 c0x0000 (---------------) + I erimo + 0x00200484, // n0x0e09 c0x0000 (---------------) + I esan + 0x0023bb46, // n0x0e0a c0x0000 (---------------) + I esashi + 0x00200f48, // n0x0e0b c0x0000 (---------------) + I fukagawa + 0x0032fcc9, // n0x0e0c c0x0000 (---------------) + I fukushima + 0x00253846, // n0x0e0d c0x0000 (---------------) + I furano + 0x00285888, // n0x0e0e c0x0000 (---------------) + I furubira + 0x0036b106, // n0x0e0f c0x0000 (---------------) + I haboro + 0x0032ccc8, // n0x0e10 c0x0000 (---------------) + I hakodate + 0x002a9b8c, // n0x0e11 c0x0000 (---------------) + I hamatonbetsu + 0x0027ab86, // n0x0e12 c0x0000 (---------------) + I hidaka + 0x0029570d, // n0x0e13 c0x0000 (---------------) + I higashikagura + 0x00295b8b, // n0x0e14 c0x0000 (---------------) + I higashikawa + 0x0037ec85, // n0x0e15 c0x0000 (---------------) + I hiroo + 0x00201b47, // n0x0e16 c0x0000 (---------------) + I hokuryu + 0x0033d0c6, // n0x0e17 c0x0000 (---------------) + I hokuto + 0x002e8288, // n0x0e18 c0x0000 (---------------) + I honbetsu + 0x00244f09, // n0x0e19 c0x0000 (---------------) + I horokanai + 0x002bd888, // n0x0e1a c0x0000 (---------------) + I horonobe + 0x00202585, // n0x0e1b c0x0000 (---------------) + I ikeda + 0x002fe547, // n0x0e1c c0x0000 (---------------) + I imakane + 0x0027eec8, // n0x0e1d c0x0000 (---------------) + I ishikari + 0x00272289, // n0x0e1e c0x0000 (---------------) + I iwamizawa + 0x0023b0c6, // n0x0e1f c0x0000 (---------------) + I iwanai + 0x0025c78a, // n0x0e20 c0x0000 (---------------) + I kamifurano + 0x002e8008, // n0x0e21 c0x0000 (---------------) + I kamikawa + 0x002bd6cb, // n0x0e22 c0x0000 (---------------) + I kamishihoro + 0x00291a4c, // n0x0e23 c0x0000 (---------------) + I kamisunagawa + 0x002d7c88, // n0x0e24 c0x0000 (---------------) + I kamoenai + 0x0027c586, // n0x0e25 c0x0000 (---------------) + I kayabe + 0x00208588, // n0x0e26 c0x0000 (---------------) + I kembuchi + 0x002047c7, // n0x0e27 c0x0000 (---------------) + I kikonai + 0x0023be49, // n0x0e28 c0x0000 (---------------) + I kimobetsu + 0x0020ed4d, // n0x0e29 c0x0000 (---------------) + I kitahiroshima + 0x0029d586, // n0x0e2a c0x0000 (---------------) + I kitami + 0x00297008, // n0x0e2b c0x0000 (---------------) + I kiyosato + 0x002eff49, // n0x0e2c c0x0000 (---------------) + I koshimizu + 0x002b4a48, // n0x0e2d c0x0000 (---------------) + I kunneppu + 0x002840c8, // n0x0e2e c0x0000 (---------------) + I kuriyama + 0x002b8a8c, // n0x0e2f c0x0000 (---------------) + I kuromatsunai + 0x002b9a07, // n0x0e30 c0x0000 (---------------) + I kushiro + 0x002ba907, // n0x0e31 c0x0000 (---------------) + I kutchan + 0x002befc5, // n0x0e32 c0x0000 (---------------) + I kyowa + 0x0024eec7, // n0x0e33 c0x0000 (---------------) + I mashike + 0x002c5c08, // n0x0e34 c0x0000 (---------------) + I matsumae + 0x0029b306, // n0x0e35 c0x0000 (---------------) + I mikasa + 0x002536cc, // n0x0e36 c0x0000 (---------------) + I minamifurano + 0x002e3788, // n0x0e37 c0x0000 (---------------) + I mombetsu + 0x002c7c48, // n0x0e38 c0x0000 (---------------) + I moseushi + 0x002b6d86, // n0x0e39 c0x0000 (---------------) + I mukawa + 0x003961c7, // n0x0e3a c0x0000 (---------------) + I muroran + 0x00245084, // n0x0e3b c0x0000 (---------------) + I naie + 0x002ac8c8, // n0x0e3c c0x0000 (---------------) + I nakagawa + 0x0021840c, // n0x0e3d c0x0000 (---------------) + I nakasatsunai + 0x0036decc, // n0x0e3e c0x0000 (---------------) + I nakatombetsu + 0x002232c5, // n0x0e3f c0x0000 (---------------) + I nanae + 0x0038ae07, // n0x0e40 c0x0000 (---------------) + I nanporo + 0x00321406, // n0x0e41 c0x0000 (---------------) + I nayoro + 0x00396146, // n0x0e42 c0x0000 (---------------) + I nemuro + 0x00296508, // n0x0e43 c0x0000 (---------------) + I niikappu + 0x0039e6c4, // n0x0e44 c0x0000 (---------------) + I niki + 0x002251cb, // n0x0e45 c0x0000 (---------------) + I nishiokoppe + 0x0033214b, // n0x0e46 c0x0000 (---------------) + I noboribetsu + 0x0022f206, // n0x0e47 c0x0000 (---------------) + I numata + 0x002eb647, // n0x0e48 c0x0000 (---------------) + I obihiro + 0x00309145, // n0x0e49 c0x0000 (---------------) + I obira + 0x0026f985, // n0x0e4a c0x0000 (---------------) + I oketo + 0x00225306, // n0x0e4b c0x0000 (---------------) + I okoppe + 0x00274205, // n0x0e4c c0x0000 (---------------) + I otaru + 0x002be145, // n0x0e4d c0x0000 (---------------) + I otobe + 0x002bf687, // n0x0e4e c0x0000 (---------------) + I otofuke + 0x00278689, // n0x0e4f c0x0000 (---------------) + I otoineppu + 0x002e6d04, // n0x0e50 c0x0000 (---------------) + I oumu + 0x00277145, // n0x0e51 c0x0000 (---------------) + I ozora + 0x002d72c5, // n0x0e52 c0x0000 (---------------) + I pippu + 0x00289ac8, // n0x0e53 c0x0000 (---------------) + I rankoshi + 0x002d1ec5, // n0x0e54 c0x0000 (---------------) + I rebun + 0x002a7649, // n0x0e55 c0x0000 (---------------) + I rikubetsu + 0x002a5647, // n0x0e56 c0x0000 (---------------) + I rishiri + 0x002a564b, // n0x0e57 c0x0000 (---------------) + I rishirifuji + 0x0031cf46, // n0x0e58 c0x0000 (---------------) + I saroma + 0x0036b689, // n0x0e59 c0x0000 (---------------) + I sarufutsu + 0x00229388, // n0x0e5a c0x0000 (---------------) + I shakotan + 0x00259285, // n0x0e5b c0x0000 (---------------) + I shari + 0x00209a08, // n0x0e5c c0x0000 (---------------) + I shibecha + 0x0023bc08, // n0x0e5d c0x0000 (---------------) + I shibetsu + 0x0021afc7, // n0x0e5e c0x0000 (---------------) + I shikabe + 0x00280147, // n0x0e5f c0x0000 (---------------) + I shikaoi + 0x00280bc9, // n0x0e60 c0x0000 (---------------) + I shimamaki + 0x002281c7, // n0x0e61 c0x0000 (---------------) + I shimizu + 0x00263849, // n0x0e62 c0x0000 (---------------) + I shimokawa + 0x0029120c, // n0x0e63 c0x0000 (---------------) + I shinshinotsu + 0x00299ec8, // n0x0e64 c0x0000 (---------------) + I shintoku + 0x002d9d89, // n0x0e65 c0x0000 (---------------) + I shiranuka + 0x002d5707, // n0x0e66 c0x0000 (---------------) + I shiraoi + 0x00281809, // n0x0e67 c0x0000 (---------------) + I shiriuchi + 0x00218b47, // n0x0e68 c0x0000 (---------------) + I sobetsu + 0x00291b48, // n0x0e69 c0x0000 (---------------) + I sunagawa + 0x00289e05, // n0x0e6a c0x0000 (---------------) + I taiki + 0x00321d06, // n0x0e6b c0x0000 (---------------) + I takasu + 0x002b8f88, // n0x0e6c c0x0000 (---------------) + I takikawa + 0x002f8588, // n0x0e6d c0x0000 (---------------) + I takinoue + 0x00281149, // n0x0e6e c0x0000 (---------------) + I teshikaga + 0x002be187, // n0x0e6f c0x0000 (---------------) + I tobetsu + 0x00270305, // n0x0e70 c0x0000 (---------------) + I tohma + 0x0020bf49, // n0x0e71 c0x0000 (---------------) + I tomakomai + 0x00269686, // n0x0e72 c0x0000 (---------------) + I tomari + 0x0028e9c4, // n0x0e73 c0x0000 (---------------) + I toya + 0x0034cc46, // n0x0e74 c0x0000 (---------------) + I toyako + 0x00265f88, // n0x0e75 c0x0000 (---------------) + I toyotomi + 0x0026ae47, // n0x0e76 c0x0000 (---------------) + I toyoura + 0x00297508, // n0x0e77 c0x0000 (---------------) + I tsubetsu + 0x0022b1c9, // n0x0e78 c0x0000 (---------------) + I tsukigata + 0x002b6b07, // n0x0e79 c0x0000 (---------------) + I urakawa + 0x0029d406, // n0x0e7a c0x0000 (---------------) + I urausu + 0x00201c04, // n0x0e7b c0x0000 (---------------) + I uryu + 0x00203b09, // n0x0e7c c0x0000 (---------------) + I utashinai + 0x00208a08, // n0x0e7d c0x0000 (---------------) + I wakkanai + 0x002b6c47, // n0x0e7e c0x0000 (---------------) + I wassamu + 0x00325346, // n0x0e7f c0x0000 (---------------) + I yakumo + 0x00239786, // n0x0e80 c0x0000 (---------------) + I yoichi + 0x00207dc4, // n0x0e81 c0x0000 (---------------) + I aioi + 0x002a8a46, // n0x0e82 c0x0000 (---------------) + I akashi + 0x0020a683, // n0x0e83 c0x0000 (---------------) + I ako + 0x002ec709, // n0x0e84 c0x0000 (---------------) + I amagasaki + 0x002029c6, // n0x0e85 c0x0000 (---------------) + I aogaki + 0x0029bac5, // n0x0e86 c0x0000 (---------------) + I asago + 0x0029d906, // n0x0e87 c0x0000 (---------------) + I ashiya + 0x002a2f45, // n0x0e88 c0x0000 (---------------) + I awaji + 0x0027f348, // n0x0e89 c0x0000 (---------------) + I fukusaki + 0x002d79c7, // n0x0e8a c0x0000 (---------------) + I goshiki + 0x00205b86, // n0x0e8b c0x0000 (---------------) + I harima + 0x00229046, // n0x0e8c c0x0000 (---------------) + I himeji + 0x00265608, // n0x0e8d c0x0000 (---------------) + I ichikawa + 0x0029f247, // n0x0e8e c0x0000 (---------------) + I inagawa + 0x0029d5c5, // n0x0e8f c0x0000 (---------------) + I itami + 0x0029de08, // n0x0e90 c0x0000 (---------------) + I kakogawa + 0x00383348, // n0x0e91 c0x0000 (---------------) + I kamigori + 0x002e8008, // n0x0e92 c0x0000 (---------------) + I kamikawa + 0x0036bac5, // n0x0e93 c0x0000 (---------------) + I kasai + 0x00321d86, // n0x0e94 c0x0000 (---------------) + I kasuga + 0x002bdc49, // n0x0e95 c0x0000 (---------------) + I kawanishi + 0x0028ef04, // n0x0e96 c0x0000 (---------------) + I miki + 0x0036a88b, // n0x0e97 c0x0000 (---------------) + I minamiawaji + 0x0021d1cb, // n0x0e98 c0x0000 (---------------) + I nishinomiya + 0x0022d989, // n0x0e99 c0x0000 (---------------) + I nishiwaki + 0x0020a703, // n0x0e9a c0x0000 (---------------) + I ono + 0x00259bc5, // n0x0e9b c0x0000 (---------------) + I sanda + 0x002069c6, // n0x0e9c c0x0000 (---------------) + I sannan + 0x002db6c8, // n0x0e9d c0x0000 (---------------) + I sasayama + 0x0022ed44, // n0x0e9e c0x0000 (---------------) + I sayo + 0x00266d06, // n0x0e9f c0x0000 (---------------) + I shingu + 0x002cb309, // n0x0ea0 c0x0000 (---------------) + I shinonsen + 0x002af085, // n0x0ea1 c0x0000 (---------------) + I shiso + 0x002bf5c6, // n0x0ea2 c0x0000 (---------------) + I sumoto + 0x0022c746, // n0x0ea3 c0x0000 (---------------) + I taishi + 0x00216a44, // n0x0ea4 c0x0000 (---------------) + I taka + 0x0029614a, // n0x0ea5 c0x0000 (---------------) + I takarazuka + 0x0029ba08, // n0x0ea6 c0x0000 (---------------) + I takasago + 0x002f8586, // n0x0ea7 c0x0000 (---------------) + I takino + 0x002ce105, // n0x0ea8 c0x0000 (---------------) + I tamba + 0x0020d407, // n0x0ea9 c0x0000 (---------------) + I tatsuno + 0x0024fbc7, // n0x0eaa c0x0000 (---------------) + I toyooka + 0x00350b04, // n0x0eab c0x0000 (---------------) + I yabu + 0x0021d407, // n0x0eac c0x0000 (---------------) + I yashiro + 0x00206084, // n0x0ead c0x0000 (---------------) + I yoka + 0x00206086, // n0x0eae c0x0000 (---------------) + I yokawa + 0x00208fc3, // n0x0eaf c0x0000 (---------------) + I ami + 0x002bf185, // n0x0eb0 c0x0000 (---------------) + I asahi + 0x0034be85, // n0x0eb1 c0x0000 (---------------) + I bando + 0x0022cac8, // n0x0eb2 c0x0000 (---------------) + I chikusei + 0x00214cc5, // n0x0eb3 c0x0000 (---------------) + I daigo + 0x0027a8c9, // n0x0eb4 c0x0000 (---------------) + I fujishiro + 0x002a27c7, // n0x0eb5 c0x0000 (---------------) + I hitachi + 0x002ac70b, // n0x0eb6 c0x0000 (---------------) + I hitachinaka + 0x002a27cc, // n0x0eb7 c0x0000 (---------------) + I hitachiomiya + 0x002a344a, // n0x0eb8 c0x0000 (---------------) + I hitachiota + 0x002c1ac7, // n0x0eb9 c0x0000 (---------------) + I ibaraki + 0x002013c3, // n0x0eba c0x0000 (---------------) + I ina + 0x00370248, // n0x0ebb c0x0000 (---------------) + I inashiki + 0x0036bc85, // n0x0ebc c0x0000 (---------------) + I itako + 0x002be905, // n0x0ebd c0x0000 (---------------) + I iwama + 0x00334e84, // n0x0ebe c0x0000 (---------------) + I joso + 0x00291a46, // n0x0ebf c0x0000 (---------------) + I kamisu + 0x00243646, // n0x0ec0 c0x0000 (---------------) + I kasama + 0x002a8a87, // n0x0ec1 c0x0000 (---------------) + I kashima + 0x0020b4cb, // n0x0ec2 c0x0000 (---------------) + I kasumigaura + 0x00226304, // n0x0ec3 c0x0000 (---------------) + I koga + 0x003781c4, // n0x0ec4 c0x0000 (---------------) + I miho + 0x0026e904, // n0x0ec5 c0x0000 (---------------) + I mito + 0x002c60c6, // n0x0ec6 c0x0000 (---------------) + I moriya + 0x002164c4, // n0x0ec7 c0x0000 (---------------) + I naka + 0x002bffc8, // n0x0ec8 c0x0000 (---------------) + I namegata + 0x00334c85, // n0x0ec9 c0x0000 (---------------) + I oarai + 0x00245a05, // n0x0eca c0x0000 (---------------) + I ogawa + 0x0039a047, // n0x0ecb c0x0000 (---------------) + I omitama + 0x00201c49, // n0x0ecc c0x0000 (---------------) + I ryugasaki + 0x0034ca05, // n0x0ecd c0x0000 (---------------) + I sakai + 0x00372a4a, // n0x0ece c0x0000 (---------------) + I sakuragawa + 0x002c5e89, // n0x0ecf c0x0000 (---------------) + I shimodate + 0x00285e4a, // n0x0ed0 c0x0000 (---------------) + I shimotsuma + 0x00395d09, // n0x0ed1 c0x0000 (---------------) + I shirosato + 0x00332b04, // n0x0ed2 c0x0000 (---------------) + I sowa + 0x002af985, // n0x0ed3 c0x0000 (---------------) + I suifu + 0x00348048, // n0x0ed4 c0x0000 (---------------) + I takahagi + 0x002d930b, // n0x0ed5 c0x0000 (---------------) + I tamatsukuri + 0x002fa0c5, // n0x0ed6 c0x0000 (---------------) + I tokai + 0x0028e1c6, // n0x0ed7 c0x0000 (---------------) + I tomobe + 0x00221d44, // n0x0ed8 c0x0000 (---------------) + I tone + 0x0027ae06, // n0x0ed9 c0x0000 (---------------) + I toride + 0x002b6989, // n0x0eda c0x0000 (---------------) + I tsuchiura + 0x00223f07, // n0x0edb c0x0000 (---------------) + I tsukuba + 0x0030c0c8, // n0x0edc c0x0000 (---------------) + I uchihara + 0x00245746, // n0x0edd c0x0000 (---------------) + I ushiku + 0x00300547, // n0x0ede c0x0000 (---------------) + I yachiyo + 0x002808c8, // n0x0edf c0x0000 (---------------) + I yamagata + 0x00386686, // n0x0ee0 c0x0000 (---------------) + I yawara + 0x0024e8c4, // n0x0ee1 c0x0000 (---------------) + I yuki + 0x0035e047, // n0x0ee2 c0x0000 (---------------) + I anamizu + 0x00344045, // n0x0ee3 c0x0000 (---------------) + I hakui + 0x0034a747, // n0x0ee4 c0x0000 (---------------) + I hakusan + 0x00200fc4, // n0x0ee5 c0x0000 (---------------) + I kaga + 0x0033d046, // n0x0ee6 c0x0000 (---------------) + I kahoku + 0x0021a748, // n0x0ee7 c0x0000 (---------------) + I kanazawa + 0x00295d48, // n0x0ee8 c0x0000 (---------------) + I kawakita + 0x002aa707, // n0x0ee9 c0x0000 (---------------) + I komatsu + 0x002546c8, // n0x0eea c0x0000 (---------------) + I nakanoto + 0x002b4305, // n0x0eeb c0x0000 (---------------) + I nanao + 0x0020a544, // n0x0eec c0x0000 (---------------) + I nomi + 0x00265508, // n0x0eed c0x0000 (---------------) + I nonoichi + 0x002547c4, // n0x0eee c0x0000 (---------------) + I noto + 0x00216905, // n0x0eef c0x0000 (---------------) + I shika + 0x002eeb44, // n0x0ef0 c0x0000 (---------------) + I suzu + 0x002304c7, // n0x0ef1 c0x0000 (---------------) + I tsubata + 0x00288347, // n0x0ef2 c0x0000 (---------------) + I tsurugi + 0x00281948, // n0x0ef3 c0x0000 (---------------) + I uchinada + 0x002a2f86, // n0x0ef4 c0x0000 (---------------) + I wajima + 0x00214c45, // n0x0ef5 c0x0000 (---------------) + I fudai + 0x0027a6c8, // n0x0ef6 c0x0000 (---------------) + I fujisawa + 0x00356648, // n0x0ef7 c0x0000 (---------------) + I hanamaki + 0x0029eac9, // n0x0ef8 c0x0000 (---------------) + I hiraizumi + 0x0021d4c6, // n0x0ef9 c0x0000 (---------------) + I hirono + 0x00236408, // n0x0efa c0x0000 (---------------) + I ichinohe + 0x002820ca, // n0x0efb c0x0000 (---------------) + I ichinoseki + 0x002f5fc8, // n0x0efc c0x0000 (---------------) + I iwaizumi + 0x002d8485, // n0x0efd c0x0000 (---------------) + I iwate + 0x00224706, // n0x0efe c0x0000 (---------------) + I joboji + 0x0028d888, // n0x0eff c0x0000 (---------------) + I kamaishi + 0x002fe60a, // n0x0f00 c0x0000 (---------------) + I kanegasaki + 0x00271b47, // n0x0f01 c0x0000 (---------------) + I karumai + 0x002864c5, // n0x0f02 c0x0000 (---------------) + I kawai + 0x00294308, // n0x0f03 c0x0000 (---------------) + I kitakami + 0x00371504, // n0x0f04 c0x0000 (---------------) + I kuji + 0x0036b286, // n0x0f05 c0x0000 (---------------) + I kunohe + 0x002bb088, // n0x0f06 c0x0000 (---------------) + I kuzumaki + 0x0020a5c6, // n0x0f07 c0x0000 (---------------) + I miyako + 0x002ea148, // n0x0f08 c0x0000 (---------------) + I mizusawa + 0x0022aa47, // n0x0f09 c0x0000 (---------------) + I morioka + 0x002072c6, // n0x0f0a c0x0000 (---------------) + I ninohe + 0x0037f944, // n0x0f0b c0x0000 (---------------) + I noda + 0x002db247, // n0x0f0c c0x0000 (---------------) + I ofunato + 0x00342084, // n0x0f0d c0x0000 (---------------) + I oshu + 0x002b6947, // n0x0f0e c0x0000 (---------------) + I otsuchi + 0x0023848d, // n0x0f0f c0x0000 (---------------) + I rikuzentakata + 0x00203845, // n0x0f10 c0x0000 (---------------) + I shiwa + 0x002b108b, // n0x0f11 c0x0000 (---------------) + I shizukuishi + 0x002a0a46, // n0x0f12 c0x0000 (---------------) + I sumita + 0x00252c88, // n0x0f13 c0x0000 (---------------) + I tanohata + 0x003895c4, // n0x0f14 c0x0000 (---------------) + I tono + 0x00276f06, // n0x0f15 c0x0000 (---------------) + I yahaba + 0x0027ce46, // n0x0f16 c0x0000 (---------------) + I yamada + 0x002088c7, // n0x0f17 c0x0000 (---------------) + I ayagawa + 0x002953cd, // n0x0f18 c0x0000 (---------------) + I higashikagawa + 0x00322787, // n0x0f19 c0x0000 (---------------) + I kanonji + 0x00339cc8, // n0x0f1a c0x0000 (---------------) + I kotohira + 0x00366045, // n0x0f1b c0x0000 (---------------) + I manno + 0x00297bc8, // n0x0f1c c0x0000 (---------------) + I marugame + 0x002c0886, // n0x0f1d c0x0000 (---------------) + I mitoyo + 0x002b4388, // n0x0f1e c0x0000 (---------------) + I naoshima + 0x00213bc6, // n0x0f1f c0x0000 (---------------) + I sanuki + 0x0032f6c7, // n0x0f20 c0x0000 (---------------) + I tadotsu + 0x0021f709, // n0x0f21 c0x0000 (---------------) + I takamatsu + 0x003895c7, // n0x0f22 c0x0000 (---------------) + I tonosho + 0x00287508, // n0x0f23 c0x0000 (---------------) + I uchinomi + 0x00273885, // n0x0f24 c0x0000 (---------------) + I utazu + 0x0021c188, // n0x0f25 c0x0000 (---------------) + I zentsuji + 0x00309245, // n0x0f26 c0x0000 (---------------) + I akune + 0x00240f85, // n0x0f27 c0x0000 (---------------) + I amami + 0x002e4bc5, // n0x0f28 c0x0000 (---------------) + I hioki + 0x00223543, // n0x0f29 c0x0000 (---------------) + I isa + 0x00282c44, // n0x0f2a c0x0000 (---------------) + I isen + 0x002802c5, // n0x0f2b c0x0000 (---------------) + I izumi + 0x00276189, // n0x0f2c c0x0000 (---------------) + I kagoshima + 0x002b2786, // n0x0f2d c0x0000 (---------------) + I kanoya + 0x002d2cc8, // n0x0f2e c0x0000 (---------------) + I kawanabe + 0x002fe805, // n0x0f2f c0x0000 (---------------) + I kinko + 0x0030b707, // n0x0f30 c0x0000 (---------------) + I kouyama + 0x0032f9ca, // n0x0f31 c0x0000 (---------------) + I makurazaki + 0x002bf509, // n0x0f32 c0x0000 (---------------) + I matsumoto + 0x002b15ca, // n0x0f33 c0x0000 (---------------) + I minamitane + 0x002cbe48, // n0x0f34 c0x0000 (---------------) + I nakatane + 0x0021efcc, // n0x0f35 c0x0000 (---------------) + I nishinoomote + 0x0028294d, // n0x0f36 c0x0000 (---------------) + I satsumasendai + 0x00315283, // n0x0f37 c0x0000 (---------------) + I soo + 0x002ea048, // n0x0f38 c0x0000 (---------------) + I tarumizu + 0x00206685, // n0x0f39 c0x0000 (---------------) + I yusui + 0x00354d46, // n0x0f3a c0x0000 (---------------) + I aikawa + 0x00377dc6, // n0x0f3b c0x0000 (---------------) + I atsugi + 0x0024d085, // n0x0f3c c0x0000 (---------------) + I ayase + 0x0028cb89, // n0x0f3d c0x0000 (---------------) + I chigasaki + 0x00325545, // n0x0f3e c0x0000 (---------------) + I ebina + 0x0027a6c8, // n0x0f3f c0x0000 (---------------) + I fujisawa + 0x0025ad86, // n0x0f40 c0x0000 (---------------) + I hadano + 0x00339046, // n0x0f41 c0x0000 (---------------) + I hakone + 0x002a01c9, // n0x0f42 c0x0000 (---------------) + I hiratsuka + 0x00385e87, // n0x0f43 c0x0000 (---------------) + I isehara + 0x002fdec6, // n0x0f44 c0x0000 (---------------) + I kaisei + 0x0032f948, // n0x0f45 c0x0000 (---------------) + I kamakura + 0x00206008, // n0x0f46 c0x0000 (---------------) + I kiyokawa + 0x002d0bc7, // n0x0f47 c0x0000 (---------------) + I matsuda + 0x0022840e, // n0x0f48 c0x0000 (---------------) + I minamiashigara + 0x002c0ac5, // n0x0f49 c0x0000 (---------------) + I miura + 0x00272185, // n0x0f4a c0x0000 (---------------) + I nakai + 0x0020a4c8, // n0x0f4b c0x0000 (---------------) + I ninomiya + 0x00384a47, // n0x0f4c c0x0000 (---------------) + I odawara + 0x00207e42, // n0x0f4d c0x0000 (---------------) + I oi + 0x002b8604, // n0x0f4e c0x0000 (---------------) + I oiso + 0x003a68ca, // n0x0f4f c0x0000 (---------------) + I sagamihara + 0x002b6d08, // n0x0f50 c0x0000 (---------------) + I samukawa + 0x00283806, // n0x0f51 c0x0000 (---------------) + I tsukui + 0x002982c8, // n0x0f52 c0x0000 (---------------) + I yamakita + 0x0028f086, // n0x0f53 c0x0000 (---------------) + I yamato + 0x00327d48, // n0x0f54 c0x0000 (---------------) + I yokosuka + 0x002abfc8, // n0x0f55 c0x0000 (---------------) + I yugawara + 0x00240f44, // n0x0f56 c0x0000 (---------------) + I zama + 0x0032a845, // n0x0f57 c0x0000 (---------------) + I zushi + 0x00686744, // n0x0f58 c0x0001 (---------------) ! I city + 0x00686744, // n0x0f59 c0x0001 (---------------) ! I city + 0x00686744, // n0x0f5a c0x0001 (---------------) ! I city + 0x00201dc3, // n0x0f5b c0x0000 (---------------) + I aki + 0x00239606, // n0x0f5c c0x0000 (---------------) + I geisei + 0x0027ab86, // n0x0f5d c0x0000 (---------------) + I hidaka + 0x0029cc4c, // n0x0f5e c0x0000 (---------------) + I higashitsuno + 0x00207303, // n0x0f5f c0x0000 (---------------) + I ino + 0x00281286, // n0x0f60 c0x0000 (---------------) + I kagami + 0x00216544, // n0x0f61 c0x0000 (---------------) + I kami + 0x002c1c08, // n0x0f62 c0x0000 (---------------) + I kitagawa + 0x002cb145, // n0x0f63 c0x0000 (---------------) + I kochi + 0x003a69c6, // n0x0f64 c0x0000 (---------------) + I mihara + 0x002b3848, // n0x0f65 c0x0000 (---------------) + I motoyama + 0x002ce6c6, // n0x0f66 c0x0000 (---------------) + I muroto + 0x00205b06, // n0x0f67 c0x0000 (---------------) + I nahari + 0x00365cc8, // n0x0f68 c0x0000 (---------------) + I nakamura + 0x002a0cc7, // n0x0f69 c0x0000 (---------------) + I nankoku + 0x00227f89, // n0x0f6a c0x0000 (---------------) + I nishitosa + 0x0036be4a, // n0x0f6b c0x0000 (---------------) + I niyodogawa + 0x00248784, // n0x0f6c c0x0000 (---------------) + I ochi + 0x002060c5, // n0x0f6d c0x0000 (---------------) + I okawa + 0x0025bb45, // n0x0f6e c0x0000 (---------------) + I otoyo + 0x0021f306, // n0x0f6f c0x0000 (---------------) + I otsuki + 0x00248a46, // n0x0f70 c0x0000 (---------------) + I sakawa + 0x002a66c6, // n0x0f71 c0x0000 (---------------) + I sukumo + 0x002e9346, // n0x0f72 c0x0000 (---------------) + I susaki + 0x002280c4, // n0x0f73 c0x0000 (---------------) + I tosa + 0x002280cb, // n0x0f74 c0x0000 (---------------) + I tosashimizu + 0x00247dc4, // n0x0f75 c0x0000 (---------------) + I toyo + 0x0020d485, // n0x0f76 c0x0000 (---------------) + I tsuno + 0x002ab485, // n0x0f77 c0x0000 (---------------) + I umaji + 0x00280646, // n0x0f78 c0x0000 (---------------) + I yasuda + 0x00202348, // n0x0f79 c0x0000 (---------------) + I yusuhara + 0x00282807, // n0x0f7a c0x0000 (---------------) + I amakusa + 0x0030be84, // n0x0f7b c0x0000 (---------------) + I arao + 0x00264783, // n0x0f7c c0x0000 (---------------) + I aso + 0x00371005, // n0x0f7d c0x0000 (---------------) + I choyo + 0x0024ad87, // n0x0f7e c0x0000 (---------------) + I gyokuto + 0x002a41c9, // n0x0f7f c0x0000 (---------------) + I hitoyoshi + 0x0028270b, // n0x0f80 c0x0000 (---------------) + I kamiamakusa + 0x002a8a87, // n0x0f81 c0x0000 (---------------) + I kashima + 0x0022c9c7, // n0x0f82 c0x0000 (---------------) + I kikuchi + 0x002d8744, // n0x0f83 c0x0000 (---------------) + I kosa + 0x002b3748, // n0x0f84 c0x0000 (---------------) + I kumamoto + 0x002aa987, // n0x0f85 c0x0000 (---------------) + I mashiki + 0x002a4406, // n0x0f86 c0x0000 (---------------) + I mifune + 0x0024e648, // n0x0f87 c0x0000 (---------------) + I minamata + 0x002a68cb, // n0x0f88 c0x0000 (---------------) + I minamioguni + 0x003608c6, // n0x0f89 c0x0000 (---------------) + I nagasu + 0x00212149, // n0x0f8a c0x0000 (---------------) + I nishihara + 0x002a6a45, // n0x0f8b c0x0000 (---------------) + I oguni + 0x00300a43, // n0x0f8c c0x0000 (---------------) + I ozu + 0x002bf5c6, // n0x0f8d c0x0000 (---------------) + I sumoto + 0x0022a948, // n0x0f8e c0x0000 (---------------) + I takamori + 0x00212703, // n0x0f8f c0x0000 (---------------) + I uki + 0x0024ae83, // n0x0f90 c0x0000 (---------------) + I uto + 0x00223bc6, // n0x0f91 c0x0000 (---------------) + I yamaga + 0x0028f086, // n0x0f92 c0x0000 (---------------) + I yamato + 0x00382d0a, // n0x0f93 c0x0000 (---------------) + I yatsushiro + 0x0027c5c5, // n0x0f94 c0x0000 (---------------) + I ayabe + 0x0027cc8b, // n0x0f95 c0x0000 (---------------) + I fukuchiyama + 0x0029d84b, // n0x0f96 c0x0000 (---------------) + I higashiyama + 0x00229943, // n0x0f97 c0x0000 (---------------) + I ide + 0x0021cc03, // n0x0f98 c0x0000 (---------------) + I ine + 0x002adb84, // n0x0f99 c0x0000 (---------------) + I joyo + 0x0022ad47, // n0x0f9a c0x0000 (---------------) + I kameoka + 0x0022a9c4, // n0x0f9b c0x0000 (---------------) + I kamo + 0x00201e04, // n0x0f9c c0x0000 (---------------) + I kita + 0x00342404, // n0x0f9d c0x0000 (---------------) + I kizu + 0x002f62c8, // n0x0f9e c0x0000 (---------------) + I kumiyama + 0x002ce048, // n0x0f9f c0x0000 (---------------) + I kyotamba + 0x0031f2c9, // n0x0fa0 c0x0000 (---------------) + I kyotanabe + 0x00341c88, // n0x0fa1 c0x0000 (---------------) + I kyotango + 0x002d1847, // n0x0fa2 c0x0000 (---------------) + I maizuru + 0x00228406, // n0x0fa3 c0x0000 (---------------) + I minami + 0x002d290f, // n0x0fa4 c0x0000 (---------------) + I minamiyamashiro + 0x002c0c06, // n0x0fa5 c0x0000 (---------------) + I miyazu + 0x002cb0c4, // n0x0fa6 c0x0000 (---------------) + I muko + 0x002cde8a, // n0x0fa7 c0x0000 (---------------) + I nagaokakyo + 0x0024ac87, // n0x0fa8 c0x0000 (---------------) + I nakagyo + 0x00206c06, // n0x0fa9 c0x0000 (---------------) + I nantan + 0x0028ea09, // n0x0faa c0x0000 (---------------) + I oyamazaki + 0x0031f245, // n0x0fab c0x0000 (---------------) + I sakyo + 0x0022cc05, // n0x0fac c0x0000 (---------------) + I seika + 0x0031f386, // n0x0fad c0x0000 (---------------) + I tanabe + 0x0021c2c3, // n0x0fae c0x0000 (---------------) + I uji + 0x00371549, // n0x0faf c0x0000 (---------------) + I ujitawara + 0x0021a646, // n0x0fb0 c0x0000 (---------------) + I wazuka + 0x0022af89, // n0x0fb1 c0x0000 (---------------) + I yamashina + 0x0038f146, // n0x0fb2 c0x0000 (---------------) + I yawata + 0x002bf185, // n0x0fb3 c0x0000 (---------------) + I asahi + 0x002236c5, // n0x0fb4 c0x0000 (---------------) + I inabe + 0x00204683, // n0x0fb5 c0x0000 (---------------) + I ise + 0x0022ae88, // n0x0fb6 c0x0000 (---------------) + I kameyama + 0x0039f7c7, // n0x0fb7 c0x0000 (---------------) + I kawagoe + 0x002f5c84, // n0x0fb8 c0x0000 (---------------) + I kiho + 0x0021f408, // n0x0fb9 c0x0000 (---------------) + I kisosaki + 0x002da084, // n0x0fba c0x0000 (---------------) + I kiwa + 0x002b9886, // n0x0fbb c0x0000 (---------------) + I komono + 0x00383786, // n0x0fbc c0x0000 (---------------) + I kumano + 0x00243e46, // n0x0fbd c0x0000 (---------------) + I kuwana + 0x002c6249, // n0x0fbe c0x0000 (---------------) + I matsusaka + 0x002be885, // n0x0fbf c0x0000 (---------------) + I meiwa + 0x002da306, // n0x0fc0 c0x0000 (---------------) + I mihama + 0x0025b189, // n0x0fc1 c0x0000 (---------------) + I minamiise + 0x002bfd46, // n0x0fc2 c0x0000 (---------------) + I misugi + 0x002d2a06, // n0x0fc3 c0x0000 (---------------) + I miyama + 0x0037fcc6, // n0x0fc4 c0x0000 (---------------) + I nabari + 0x0020ef45, // n0x0fc5 c0x0000 (---------------) + I shima + 0x002eeb46, // n0x0fc6 c0x0000 (---------------) + I suzuka + 0x0032f6c4, // n0x0fc7 c0x0000 (---------------) + I tado + 0x00289e05, // n0x0fc8 c0x0000 (---------------) + I taiki + 0x002b8f84, // n0x0fc9 c0x0000 (---------------) + I taki + 0x003037c6, // n0x0fca c0x0000 (---------------) + I tamaki + 0x00395ec4, // n0x0fcb c0x0000 (---------------) + I toba + 0x00208c83, // n0x0fcc c0x0000 (---------------) + I tsu + 0x00286185, // n0x0fcd c0x0000 (---------------) + I udono + 0x0023b908, // n0x0fce c0x0000 (---------------) + I ureshino + 0x00351b87, // n0x0fcf c0x0000 (---------------) + I watarai + 0x002b1f89, // n0x0fd0 c0x0000 (---------------) + I yokkaichi + 0x002863c8, // n0x0fd1 c0x0000 (---------------) + I furukawa + 0x00297811, // n0x0fd2 c0x0000 (---------------) + I higashimatsushima + 0x0022c7ca, // n0x0fd3 c0x0000 (---------------) + I ishinomaki + 0x0022f147, // n0x0fd4 c0x0000 (---------------) + I iwanuma + 0x0039fd06, // n0x0fd5 c0x0000 (---------------) + I kakuda + 0x00216544, // n0x0fd6 c0x0000 (---------------) + I kami + 0x002b9088, // n0x0fd7 c0x0000 (---------------) + I kawasaki + 0x002935c9, // n0x0fd8 c0x0000 (---------------) + I kesennuma + 0x002a8bc8, // n0x0fd9 c0x0000 (---------------) + I marumori + 0x002979ca, // n0x0fda c0x0000 (---------------) + I matsushima + 0x002a740d, // n0x0fdb c0x0000 (---------------) + I minamisanriku + 0x0022f486, // n0x0fdc c0x0000 (---------------) + I misato + 0x00365dc6, // n0x0fdd c0x0000 (---------------) + I murata + 0x002db306, // n0x0fde c0x0000 (---------------) + I natori + 0x0037e6c7, // n0x0fdf c0x0000 (---------------) + I ogawara + 0x0029ef85, // n0x0fe0 c0x0000 (---------------) + I ohira + 0x003508c7, // n0x0fe1 c0x0000 (---------------) + I onagawa + 0x0021f4c5, // n0x0fe2 c0x0000 (---------------) + I osaki + 0x002a5784, // n0x0fe3 c0x0000 (---------------) + I rifu + 0x002a9286, // n0x0fe4 c0x0000 (---------------) + I semine + 0x00321bc7, // n0x0fe5 c0x0000 (---------------) + I shibata + 0x0037124d, // n0x0fe6 c0x0000 (---------------) + I shichikashuku + 0x0028d7c7, // n0x0fe7 c0x0000 (---------------) + I shikama + 0x0026ebc8, // n0x0fe8 c0x0000 (---------------) + I shiogama + 0x0027a9c9, // n0x0fe9 c0x0000 (---------------) + I shiroishi + 0x00224606, // n0x0fea c0x0000 (---------------) + I tagajo + 0x0023b045, // n0x0feb c0x0000 (---------------) + I taiwa + 0x00216044, // n0x0fec c0x0000 (---------------) + I tome + 0x00266086, // n0x0fed c0x0000 (---------------) + I tomiya + 0x00350a06, // n0x0fee c0x0000 (---------------) + I wakuya + 0x002b6e86, // n0x0fef c0x0000 (---------------) + I watari + 0x0029ae08, // n0x0ff0 c0x0000 (---------------) + I yamamoto + 0x00212d43, // n0x0ff1 c0x0000 (---------------) + I zao + 0x002088c3, // n0x0ff2 c0x0000 (---------------) + I aya + 0x00328105, // n0x0ff3 c0x0000 (---------------) + I ebino + 0x00337286, // n0x0ff4 c0x0000 (---------------) + I gokase + 0x002abf85, // n0x0ff5 c0x0000 (---------------) + I hyuga + 0x00245948, // n0x0ff6 c0x0000 (---------------) + I kadogawa + 0x0029c60a, // n0x0ff7 c0x0000 (---------------) + I kawaminami + 0x002ddbc4, // n0x0ff8 c0x0000 (---------------) + I kijo + 0x002c1c08, // n0x0ff9 c0x0000 (---------------) + I kitagawa + 0x00295fc8, // n0x0ffa c0x0000 (---------------) + I kitakata + 0x00280487, // n0x0ffb c0x0000 (---------------) + I kitaura + 0x002fe8c9, // n0x0ffc c0x0000 (---------------) + I kobayashi + 0x002b3448, // n0x0ffd c0x0000 (---------------) + I kunitomi + 0x0029a047, // n0x0ffe c0x0000 (---------------) + I kushima + 0x00294c06, // n0x0fff c0x0000 (---------------) + I mimata + 0x0020a5ca, // n0x1000 c0x0000 (---------------) + I miyakonojo + 0x00266108, // n0x1001 c0x0000 (---------------) + I miyazaki + 0x002bd509, // n0x1002 c0x0000 (---------------) + I morotsuka + 0x002869c8, // n0x1003 c0x0000 (---------------) + I nichinan + 0x0021c809, // n0x1004 c0x0000 (---------------) + I nishimera + 0x002bd987, // n0x1005 c0x0000 (---------------) + I nobeoka + 0x00341b45, // n0x1006 c0x0000 (---------------) + I saito + 0x002a1746, // n0x1007 c0x0000 (---------------) + I shiiba + 0x0029b188, // n0x1008 c0x0000 (---------------) + I shintomi + 0x00252e08, // n0x1009 c0x0000 (---------------) + I takaharu + 0x0022b388, // n0x100a c0x0000 (---------------) + I takanabe + 0x00216a48, // n0x100b c0x0000 (---------------) + I takazaki + 0x0020d485, // n0x100c c0x0000 (---------------) + I tsuno + 0x00201544, // n0x100d c0x0000 (---------------) + I achi + 0x0039f4c8, // n0x100e c0x0000 (---------------) + I agematsu + 0x00206d04, // n0x100f c0x0000 (---------------) + I anan + 0x00395b04, // n0x1010 c0x0000 (---------------) + I aoki + 0x002bf185, // n0x1011 c0x0000 (---------------) + I asahi + 0x0028ffc7, // n0x1012 c0x0000 (---------------) + I azumino + 0x00201a09, // n0x1013 c0x0000 (---------------) + I chikuhoku + 0x002086c7, // n0x1014 c0x0000 (---------------) + I chikuma + 0x0020db85, // n0x1015 c0x0000 (---------------) + I chino + 0x00278286, // n0x1016 c0x0000 (---------------) + I fujimi + 0x0033bb06, // n0x1017 c0x0000 (---------------) + I hakuba + 0x00202444, // n0x1018 c0x0000 (---------------) + I hara + 0x002a0506, // n0x1019 c0x0000 (---------------) + I hiraya + 0x00214a84, // n0x101a c0x0000 (---------------) + I iida + 0x0025cc86, // n0x101b c0x0000 (---------------) + I iijima + 0x0039e786, // n0x101c c0x0000 (---------------) + I iiyama + 0x00212546, // n0x101d c0x0000 (---------------) + I iizuna + 0x00202585, // n0x101e c0x0000 (---------------) + I ikeda + 0x00245807, // n0x101f c0x0000 (---------------) + I ikusaka + 0x002013c3, // n0x1020 c0x0000 (---------------) + I ina + 0x00249f89, // n0x1021 c0x0000 (---------------) + I karuizawa + 0x002fdbc8, // n0x1022 c0x0000 (---------------) + I kawakami + 0x0021f404, // n0x1023 c0x0000 (---------------) + I kiso + 0x0032fbcd, // n0x1024 c0x0000 (---------------) + I kisofukushima + 0x00295e48, // n0x1025 c0x0000 (---------------) + I kitaaiki + 0x0028e748, // n0x1026 c0x0000 (---------------) + I komagane + 0x002bd486, // n0x1027 c0x0000 (---------------) + I komoro + 0x0021f809, // n0x1028 c0x0000 (---------------) + I matsukawa + 0x002bf509, // n0x1029 c0x0000 (---------------) + I matsumoto + 0x002b6685, // n0x102a c0x0000 (---------------) + I miasa + 0x0029c70a, // n0x102b c0x0000 (---------------) + I minamiaiki + 0x0027f80a, // n0x102c c0x0000 (---------------) + I minamimaki + 0x0028850c, // n0x102d c0x0000 (---------------) + I minamiminowa + 0x00288686, // n0x102e c0x0000 (---------------) + I minowa + 0x00278b46, // n0x102f c0x0000 (---------------) + I miyada + 0x002c0d86, // n0x1030 c0x0000 (---------------) + I miyota + 0x00257009, // n0x1031 c0x0000 (---------------) + I mochizuki + 0x00354f86, // n0x1032 c0x0000 (---------------) + I nagano + 0x00291bc6, // n0x1033 c0x0000 (---------------) + I nagawa + 0x00325606, // n0x1034 c0x0000 (---------------) + I nagiso + 0x002ac8c8, // n0x1035 c0x0000 (---------------) + I nakagawa + 0x002546c6, // n0x1036 c0x0000 (---------------) + I nakano + 0x002c658b, // n0x1037 c0x0000 (---------------) + I nozawaonsen + 0x00290145, // n0x1038 c0x0000 (---------------) + I obuse + 0x00245a05, // n0x1039 c0x0000 (---------------) + I ogawa + 0x00278dc5, // n0x103a c0x0000 (---------------) + I okaya + 0x002014c6, // n0x103b c0x0000 (---------------) + I omachi + 0x0020a583, // n0x103c c0x0000 (---------------) + I omi + 0x00243dc6, // n0x103d c0x0000 (---------------) + I ookuwa + 0x0028d747, // n0x103e c0x0000 (---------------) + I ooshika + 0x002b8f45, // n0x103f c0x0000 (---------------) + I otaki + 0x0025bc45, // n0x1040 c0x0000 (---------------) + I otari + 0x002dd385, // n0x1041 c0x0000 (---------------) + I sakae + 0x00318606, // n0x1042 c0x0000 (---------------) + I sakaki + 0x002b6744, // n0x1043 c0x0000 (---------------) + I saku + 0x00369d86, // n0x1044 c0x0000 (---------------) + I sakuho + 0x0027b309, // n0x1045 c0x0000 (---------------) + I shimosuwa + 0x0020134c, // n0x1046 c0x0000 (---------------) + I shinanomachi + 0x002a54c8, // n0x1047 c0x0000 (---------------) + I shiojiri + 0x0027b444, // n0x1048 c0x0000 (---------------) + I suwa + 0x002ee7c6, // n0x1049 c0x0000 (---------------) + I suzaka + 0x002a0b46, // n0x104a c0x0000 (---------------) + I takagi + 0x0022a948, // n0x104b c0x0000 (---------------) + I takamori + 0x002c0148, // n0x104c c0x0000 (---------------) + I takayama + 0x00201249, // n0x104d c0x0000 (---------------) + I tateshina + 0x0020d407, // n0x104e c0x0000 (---------------) + I tatsuno + 0x002ae7c9, // n0x104f c0x0000 (---------------) + I togakushi + 0x0026fa46, // n0x1050 c0x0000 (---------------) + I togura + 0x0022f404, // n0x1051 c0x0000 (---------------) + I tomi + 0x0020e184, // n0x1052 c0x0000 (---------------) + I ueda + 0x0024a344, // n0x1053 c0x0000 (---------------) + I wada + 0x002808c8, // n0x1054 c0x0000 (---------------) + I yamagata + 0x0020184a, // n0x1055 c0x0000 (---------------) + I yamanouchi + 0x0034c986, // n0x1056 c0x0000 (---------------) + I yasaka + 0x00353407, // n0x1057 c0x0000 (---------------) + I yasuoka + 0x00311d87, // n0x1058 c0x0000 (---------------) + I chijiwa + 0x0036b785, // n0x1059 c0x0000 (---------------) + I futsu + 0x0028de84, // n0x105a c0x0000 (---------------) + I goto + 0x0028bb06, // n0x105b c0x0000 (---------------) + I hasami + 0x00339dc6, // n0x105c c0x0000 (---------------) + I hirado + 0x0023be03, // n0x105d c0x0000 (---------------) + I iki + 0x002fda07, // n0x105e c0x0000 (---------------) + I isahaya + 0x00330e48, // n0x105f c0x0000 (---------------) + I kawatana + 0x002b67ca, // n0x1060 c0x0000 (---------------) + I kuchinotsu + 0x002c9f88, // n0x1061 c0x0000 (---------------) + I matsuura + 0x002dda48, // n0x1062 c0x0000 (---------------) + I nagasaki + 0x00395f05, // n0x1063 c0x0000 (---------------) + I obama + 0x0037ed85, // n0x1064 c0x0000 (---------------) + I omura + 0x002ae705, // n0x1065 c0x0000 (---------------) + I oseto + 0x0036bb46, // n0x1066 c0x0000 (---------------) + I saikai + 0x003a3f06, // n0x1067 c0x0000 (---------------) + I sasebo + 0x00218945, // n0x1068 c0x0000 (---------------) + I seihi + 0x002ec949, // n0x1069 c0x0000 (---------------) + I shimabara + 0x0028dc8c, // n0x106a c0x0000 (---------------) + I shinkamigoto + 0x00240187, // n0x106b c0x0000 (---------------) + I togitsu + 0x00297a48, // n0x106c c0x0000 (---------------) + I tsushima + 0x0028c145, // n0x106d c0x0000 (---------------) + I unzen + 0x00686744, // n0x106e c0x0001 (---------------) ! I city + 0x00259644, // n0x106f c0x0000 (---------------) + I ando + 0x002b13c4, // n0x1070 c0x0000 (---------------) + I gose + 0x0020dcc6, // n0x1071 c0x0000 (---------------) + I heguri + 0x0029e3ce, // n0x1072 c0x0000 (---------------) + I higashiyoshino + 0x0022cc87, // n0x1073 c0x0000 (---------------) + I ikaruga + 0x00296ec5, // n0x1074 c0x0000 (---------------) + I ikoma + 0x0028ee8c, // n0x1075 c0x0000 (---------------) + I kamikitayama + 0x002d9f47, // n0x1076 c0x0000 (---------------) + I kanmaki + 0x00321b47, // n0x1077 c0x0000 (---------------) + I kashiba + 0x00398f89, // n0x1078 c0x0000 (---------------) + I kashihara + 0x00219349, // n0x1079 c0x0000 (---------------) + I katsuragi + 0x002864c5, // n0x107a c0x0000 (---------------) + I kawai + 0x002fdbc8, // n0x107b c0x0000 (---------------) + I kawakami + 0x002bdc49, // n0x107c c0x0000 (---------------) + I kawanishi + 0x002d6e45, // n0x107d c0x0000 (---------------) + I koryo + 0x002b8e88, // n0x107e c0x0000 (---------------) + I kurotaki + 0x002c6b46, // n0x107f c0x0000 (---------------) + I mitsue + 0x002d2306, // n0x1080 c0x0000 (---------------) + I miyake + 0x002c8a04, // n0x1081 c0x0000 (---------------) + I nara + 0x003281c8, // n0x1082 c0x0000 (---------------) + I nosegawa + 0x002247c3, // n0x1083 c0x0000 (---------------) + I oji + 0x00209204, // n0x1084 c0x0000 (---------------) + I ouda + 0x00371085, // n0x1085 c0x0000 (---------------) + I oyodo + 0x00307fc7, // n0x1086 c0x0000 (---------------) + I sakurai + 0x00202c85, // n0x1087 c0x0000 (---------------) + I sango + 0x00281f89, // n0x1088 c0x0000 (---------------) + I shimoichi + 0x0026784d, // n0x1089 c0x0000 (---------------) + I shimokitayama + 0x0033bd46, // n0x108a c0x0000 (---------------) + I shinjo + 0x002647c4, // n0x108b c0x0000 (---------------) + I soni + 0x00294d08, // n0x108c c0x0000 (---------------) + I takatori + 0x002784ca, // n0x108d c0x0000 (---------------) + I tawaramoto + 0x002177c7, // n0x108e c0x0000 (---------------) + I tenkawa + 0x003460c5, // n0x108f c0x0000 (---------------) + I tenri + 0x00209243, // n0x1090 c0x0000 (---------------) + I uda + 0x0029da0e, // n0x1091 c0x0000 (---------------) + I yamatokoriyama + 0x0028f08c, // n0x1092 c0x0000 (---------------) + I yamatotakada + 0x002fa407, // n0x1093 c0x0000 (---------------) + I yamazoe + 0x0029e587, // n0x1094 c0x0000 (---------------) + I yoshino + 0x00201003, // n0x1095 c0x0000 (---------------) + I aga + 0x00354fc5, // n0x1096 c0x0000 (---------------) + I agano + 0x002b13c5, // n0x1097 c0x0000 (---------------) + I gosen + 0x00298688, // n0x1098 c0x0000 (---------------) + I itoigawa + 0x00294149, // n0x1099 c0x0000 (---------------) + I izumozaki + 0x00292e46, // n0x109a c0x0000 (---------------) + I joetsu + 0x0022a9c4, // n0x109b c0x0000 (---------------) + I kamo + 0x0022f086, // n0x109c c0x0000 (---------------) + I kariwa + 0x00205dcb, // n0x109d c0x0000 (---------------) + I kashiwazaki + 0x002c598c, // n0x109e c0x0000 (---------------) + I minamiuonuma + 0x002ebdc7, // n0x109f c0x0000 (---------------) + I mitsuke + 0x002cae05, // n0x10a0 c0x0000 (---------------) + I muika + 0x00383248, // n0x10a1 c0x0000 (---------------) + I murakami + 0x002d0985, // n0x10a2 c0x0000 (---------------) + I myoko + 0x002cde87, // n0x10a3 c0x0000 (---------------) + I nagaoka + 0x0023ff87, // n0x10a4 c0x0000 (---------------) + I niigata + 0x0024f385, // n0x10a5 c0x0000 (---------------) + I ojiya + 0x0020a583, // n0x10a6 c0x0000 (---------------) + I omi + 0x00360584, // n0x10a7 c0x0000 (---------------) + I sado + 0x00203f85, // n0x10a8 c0x0000 (---------------) + I sanjo + 0x002e6c05, // n0x10a9 c0x0000 (---------------) + I seiro + 0x002e6c06, // n0x10aa c0x0000 (---------------) + I seirou + 0x0026c588, // n0x10ab c0x0000 (---------------) + I sekikawa + 0x00321bc7, // n0x10ac c0x0000 (---------------) + I shibata + 0x003780c6, // n0x10ad c0x0000 (---------------) + I tagami + 0x00354c46, // n0x10ae c0x0000 (---------------) + I tainai + 0x002e4b06, // n0x10af c0x0000 (---------------) + I tochio + 0x00297189, // n0x10b0 c0x0000 (---------------) + I tokamachi + 0x00208c87, // n0x10b1 c0x0000 (---------------) + I tsubame + 0x00292cc6, // n0x10b2 c0x0000 (---------------) + I tsunan + 0x002c5b06, // n0x10b3 c0x0000 (---------------) + I uonuma + 0x0024f446, // n0x10b4 c0x0000 (---------------) + I yahiko + 0x002a8945, // n0x10b5 c0x0000 (---------------) + I yoita + 0x00217246, // n0x10b6 c0x0000 (---------------) + I yuzawa + 0x0038de05, // n0x10b7 c0x0000 (---------------) + I beppu + 0x002d1f48, // n0x10b8 c0x0000 (---------------) + I bungoono + 0x00292a0b, // n0x10b9 c0x0000 (---------------) + I bungotakada + 0x0028b906, // n0x10ba c0x0000 (---------------) + I hasama + 0x00311dc4, // n0x10bb c0x0000 (---------------) + I hiji + 0x002fe3c9, // n0x10bc c0x0000 (---------------) + I himeshima + 0x002a27c4, // n0x10bd c0x0000 (---------------) + I hita + 0x002c6ac8, // n0x10be c0x0000 (---------------) + I kamitsue + 0x0028adc7, // n0x10bf c0x0000 (---------------) + I kokonoe + 0x00283fc4, // n0x10c0 c0x0000 (---------------) + I kuju + 0x002b2a08, // n0x10c1 c0x0000 (---------------) + I kunisaki + 0x002ba1c4, // n0x10c2 c0x0000 (---------------) + I kusu + 0x002a8984, // n0x10c3 c0x0000 (---------------) + I oita + 0x00286f45, // n0x10c4 c0x0000 (---------------) + I saiki + 0x00305a86, // n0x10c5 c0x0000 (---------------) + I taketa + 0x002f6207, // n0x10c6 c0x0000 (---------------) + I tsukumi + 0x0022b983, // n0x10c7 c0x0000 (---------------) + I usa + 0x0029d4c5, // n0x10c8 c0x0000 (---------------) + I usuki + 0x002bc144, // n0x10c9 c0x0000 (---------------) + I yufu + 0x002721c6, // n0x10ca c0x0000 (---------------) + I akaiwa + 0x002b6708, // n0x10cb c0x0000 (---------------) + I asakuchi + 0x00330b85, // n0x10cc c0x0000 (---------------) + I bizen + 0x0028fa49, // n0x10cd c0x0000 (---------------) + I hayashima + 0x0020c145, // n0x10ce c0x0000 (---------------) + I ibara + 0x002bbb48, // n0x10cf c0x0000 (---------------) + I kagamino + 0x00320d87, // n0x10d0 c0x0000 (---------------) + I kasaoka + 0x003809c8, // n0x10d1 c0x0000 (---------------) + I kibichuo + 0x002b1dc7, // n0x10d2 c0x0000 (---------------) + I kumenan + 0x0037e0c9, // n0x10d3 c0x0000 (---------------) + I kurashiki + 0x0031d046, // n0x10d4 c0x0000 (---------------) + I maniwa + 0x00347a06, // n0x10d5 c0x0000 (---------------) + I misaki + 0x00269404, // n0x10d6 c0x0000 (---------------) + I nagi + 0x00294b45, // n0x10d7 c0x0000 (---------------) + I niimi + 0x002f48cc, // n0x10d8 c0x0000 (---------------) + I nishiawakura + 0x00278dc7, // n0x10d9 c0x0000 (---------------) + I okayama + 0x002791c7, // n0x10da c0x0000 (---------------) + I satosho + 0x00311c48, // n0x10db c0x0000 (---------------) + I setouchi + 0x0033bd46, // n0x10dc c0x0000 (---------------) + I shinjo + 0x0029eec4, // n0x10dd c0x0000 (---------------) + I shoo + 0x00323cc4, // n0x10de c0x0000 (---------------) + I soja + 0x00280a49, // n0x10df c0x0000 (---------------) + I takahashi + 0x002c0e86, // n0x10e0 c0x0000 (---------------) + I tamano + 0x0021ec47, // n0x10e1 c0x0000 (---------------) + I tsuyama + 0x00208504, // n0x10e2 c0x0000 (---------------) + I wake + 0x002b2886, // n0x10e3 c0x0000 (---------------) + I yakage + 0x00320985, // n0x10e4 c0x0000 (---------------) + I aguni + 0x002a2ac7, // n0x10e5 c0x0000 (---------------) + I ginowan + 0x002c6506, // n0x10e6 c0x0000 (---------------) + I ginoza + 0x0025c649, // n0x10e7 c0x0000 (---------------) + I gushikami + 0x0027f647, // n0x10e8 c0x0000 (---------------) + I haebaru + 0x00268047, // n0x10e9 c0x0000 (---------------) + I higashi + 0x002a0046, // n0x10ea c0x0000 (---------------) + I hirara + 0x002452c5, // n0x10eb c0x0000 (---------------) + I iheya + 0x0027e248, // n0x10ec c0x0000 (---------------) + I ishigaki + 0x0021a4c8, // n0x10ed c0x0000 (---------------) + I ishikawa + 0x00242b46, // n0x10ee c0x0000 (---------------) + I itoman + 0x00330bc5, // n0x10ef c0x0000 (---------------) + I izena + 0x00331c86, // n0x10f0 c0x0000 (---------------) + I kadena + 0x002154c3, // n0x10f1 c0x0000 (---------------) + I kin + 0x00298509, // n0x10f2 c0x0000 (---------------) + I kitadaito + 0x002a644e, // n0x10f3 c0x0000 (---------------) + I kitanakagusuku + 0x002b1ac8, // n0x10f4 c0x0000 (---------------) + I kumejima + 0x002da188, // n0x10f5 c0x0000 (---------------) + I kunigami + 0x0024294b, // n0x10f6 c0x0000 (---------------) + I minamidaito + 0x0028fc86, // n0x10f7 c0x0000 (---------------) + I motobu + 0x002486c4, // n0x10f8 c0x0000 (---------------) + I nago + 0x00205b04, // n0x10f9 c0x0000 (---------------) + I naha + 0x002a654a, // n0x10fa c0x0000 (---------------) + I nakagusuku + 0x00217e07, // n0x10fb c0x0000 (---------------) + I nakijin + 0x00292d85, // n0x10fc c0x0000 (---------------) + I nanjo + 0x00212149, // n0x10fd c0x0000 (---------------) + I nishihara + 0x002b8285, // n0x10fe c0x0000 (---------------) + I ogimi + 0x00395b47, // n0x10ff c0x0000 (---------------) + I okinawa + 0x00301304, // n0x1100 c0x0000 (---------------) + I onna + 0x00383e87, // n0x1101 c0x0000 (---------------) + I shimoji + 0x0022f308, // n0x1102 c0x0000 (---------------) + I taketomi + 0x002b0046, // n0x1103 c0x0000 (---------------) + I tarama + 0x00342249, // n0x1104 c0x0000 (---------------) + I tokashiki + 0x002b354a, // n0x1105 c0x0000 (---------------) + I tomigusuku + 0x00217d86, // n0x1106 c0x0000 (---------------) + I tonaki + 0x00295986, // n0x1107 c0x0000 (---------------) + I urasoe + 0x002ab405, // n0x1108 c0x0000 (---------------) + I uruma + 0x00373d05, // n0x1109 c0x0000 (---------------) + I yaese + 0x00229a47, // n0x110a c0x0000 (---------------) + I yomitan + 0x00249888, // n0x110b c0x0000 (---------------) + I yonabaru + 0x003208c8, // n0x110c c0x0000 (---------------) + I yonaguni + 0x00240f46, // n0x110d c0x0000 (---------------) + I zamami + 0x00223745, // n0x110e c0x0000 (---------------) + I abeno + 0x002487ce, // n0x110f c0x0000 (---------------) + I chihayaakasaka + 0x0030ba84, // n0x1110 c0x0000 (---------------) + I chuo + 0x00242ac5, // n0x1111 c0x0000 (---------------) + I daito + 0x00277c09, // n0x1112 c0x0000 (---------------) + I fujiidera + 0x0026b5c8, // n0x1113 c0x0000 (---------------) + I habikino + 0x003a1c46, // n0x1114 c0x0000 (---------------) + I hannan + 0x0029aa8c, // n0x1115 c0x0000 (---------------) + I higashiosaka + 0x0029c210, // n0x1116 c0x0000 (---------------) + I higashisumiyoshi + 0x0029e00f, // n0x1117 c0x0000 (---------------) + I higashiyodogawa + 0x0029efc8, // n0x1118 c0x0000 (---------------) + I hirakata + 0x002c1ac7, // n0x1119 c0x0000 (---------------) + I ibaraki + 0x00202585, // n0x111a c0x0000 (---------------) + I ikeda + 0x002802c5, // n0x111b c0x0000 (---------------) + I izumi + 0x002f6089, // n0x111c c0x0000 (---------------) + I izumiotsu + 0x00294509, // n0x111d c0x0000 (---------------) + I izumisano + 0x0021e5c6, // n0x111e c0x0000 (---------------) + I kadoma + 0x002fa147, // n0x111f c0x0000 (---------------) + I kaizuka + 0x0038ad85, // n0x1120 c0x0000 (---------------) + I kanan + 0x002037c9, // n0x1121 c0x0000 (---------------) + I kashiwara + 0x0032c486, // n0x1122 c0x0000 (---------------) + I katano + 0x00354dcd, // n0x1123 c0x0000 (---------------) + I kawachinagano + 0x00287009, // n0x1124 c0x0000 (---------------) + I kishiwada + 0x00201e04, // n0x1125 c0x0000 (---------------) + I kita + 0x002b1848, // n0x1126 c0x0000 (---------------) + I kumatori + 0x0039f589, // n0x1127 c0x0000 (---------------) + I matsubara + 0x0034cb46, // n0x1128 c0x0000 (---------------) + I minato + 0x00278385, // n0x1129 c0x0000 (---------------) + I minoh + 0x00347a06, // n0x112a c0x0000 (---------------) + I misaki + 0x0030bf89, // n0x112b c0x0000 (---------------) + I moriguchi + 0x00320008, // n0x112c c0x0000 (---------------) + I neyagawa + 0x00211805, // n0x112d c0x0000 (---------------) + I nishi + 0x0026c504, // n0x112e c0x0000 (---------------) + I nose + 0x0029ac4b, // n0x112f c0x0000 (---------------) + I osakasayama + 0x0034ca05, // n0x1130 c0x0000 (---------------) + I sakai + 0x0029ad86, // n0x1131 c0x0000 (---------------) + I sayama + 0x00282c86, // n0x1132 c0x0000 (---------------) + I sennan + 0x0024c046, // n0x1133 c0x0000 (---------------) + I settsu + 0x0038428b, // n0x1134 c0x0000 (---------------) + I shijonawate + 0x0028fb49, // n0x1135 c0x0000 (---------------) + I shimamoto + 0x00218c85, // n0x1136 c0x0000 (---------------) + I suita + 0x00380787, // n0x1137 c0x0000 (---------------) + I tadaoka + 0x0022c746, // n0x1138 c0x0000 (---------------) + I taishi + 0x00238746, // n0x1139 c0x0000 (---------------) + I tajiri + 0x00281e48, // n0x113a c0x0000 (---------------) + I takaishi + 0x00305b89, // n0x113b c0x0000 (---------------) + I takatsuki + 0x0026e98c, // n0x113c c0x0000 (---------------) + I tondabayashi + 0x0024ab88, // n0x113d c0x0000 (---------------) + I toyonaka + 0x0037b006, // n0x113e c0x0000 (---------------) + I toyono + 0x00341743, // n0x113f c0x0000 (---------------) + I yao + 0x00249246, // n0x1140 c0x0000 (---------------) + I ariake + 0x0027ff85, // n0x1141 c0x0000 (---------------) + I arita + 0x0027cfc8, // n0x1142 c0x0000 (---------------) + I fukudomi + 0x00223146, // n0x1143 c0x0000 (---------------) + I genkai + 0x002a2d08, // n0x1144 c0x0000 (---------------) + I hamatama + 0x0024ba45, // n0x1145 c0x0000 (---------------) + I hizen + 0x0027bc05, // n0x1146 c0x0000 (---------------) + I imari + 0x00321008, // n0x1147 c0x0000 (---------------) + I kamimine + 0x002eec47, // n0x1148 c0x0000 (---------------) + I kanzaki + 0x00377d07, // n0x1149 c0x0000 (---------------) + I karatsu + 0x002a8a87, // n0x114a c0x0000 (---------------) + I kashima + 0x0021f588, // n0x114b c0x0000 (---------------) + I kitagata + 0x0028ebc8, // n0x114c c0x0000 (---------------) + I kitahata + 0x0024edc6, // n0x114d c0x0000 (---------------) + I kiyama + 0x00303607, // n0x114e c0x0000 (---------------) + I kouhoku + 0x0036ad87, // n0x114f c0x0000 (---------------) + I kyuragi + 0x0027fe4a, // n0x1150 c0x0000 (---------------) + I nishiarita + 0x00213483, // n0x1151 c0x0000 (---------------) + I ogi + 0x002014c6, // n0x1152 c0x0000 (---------------) + I omachi + 0x00201985, // n0x1153 c0x0000 (---------------) + I ouchi + 0x00238904, // n0x1154 c0x0000 (---------------) + I saga + 0x0027a9c9, // n0x1155 c0x0000 (---------------) + I shiroishi + 0x0037e044, // n0x1156 c0x0000 (---------------) + I taku + 0x002a1204, // n0x1157 c0x0000 (---------------) + I tara + 0x002a09c4, // n0x1158 c0x0000 (---------------) + I tosu + 0x0029e58b, // n0x1159 c0x0000 (---------------) + I yoshinogari + 0x0039f707, // n0x115a c0x0000 (---------------) + I arakawa + 0x00248a05, // n0x115b c0x0000 (---------------) + I asaka + 0x00292888, // n0x115c c0x0000 (---------------) + I chichibu + 0x00278286, // n0x115d c0x0000 (---------------) + I fujimi + 0x00278288, // n0x115e c0x0000 (---------------) + I fujimino + 0x0027c506, // n0x115f c0x0000 (---------------) + I fukaya + 0x00289385, // n0x1160 c0x0000 (---------------) + I hanno + 0x0028a805, // n0x1161 c0x0000 (---------------) + I hanyu + 0x0028c486, // n0x1162 c0x0000 (---------------) + I hasuda + 0x0028d4c8, // n0x1163 c0x0000 (---------------) + I hatogaya + 0x0028e948, // n0x1164 c0x0000 (---------------) + I hatoyama + 0x0027ab86, // n0x1165 c0x0000 (---------------) + I hidaka + 0x002926cf, // n0x1166 c0x0000 (---------------) + I higashichichibu + 0x00297fd0, // n0x1167 c0x0000 (---------------) + I higashimatsuyama + 0x0038e545, // n0x1168 c0x0000 (---------------) + I honjo + 0x002013c3, // n0x1169 c0x0000 (---------------) + I ina + 0x00251c85, // n0x116a c0x0000 (---------------) + I iruma + 0x002ff488, // n0x116b c0x0000 (---------------) + I iwatsuki + 0x00294409, // n0x116c c0x0000 (---------------) + I kamiizumi + 0x002e8008, // n0x116d c0x0000 (---------------) + I kamikawa + 0x0034cdc8, // n0x116e c0x0000 (---------------) + I kamisato + 0x00207888, // n0x116f c0x0000 (---------------) + I kasukabe + 0x0039f7c7, // n0x1170 c0x0000 (---------------) + I kawagoe + 0x00277f49, // n0x1171 c0x0000 (---------------) + I kawaguchi + 0x002a2f08, // n0x1172 c0x0000 (---------------) + I kawajima + 0x002b7444, // n0x1173 c0x0000 (---------------) + I kazo + 0x002a0848, // n0x1174 c0x0000 (---------------) + I kitamoto + 0x00289b89, // n0x1175 c0x0000 (---------------) + I koshigaya + 0x0030a447, // n0x1176 c0x0000 (---------------) + I kounosu + 0x002a63c4, // n0x1177 c0x0000 (---------------) + I kuki + 0x00208788, // n0x1178 c0x0000 (---------------) + I kumagaya + 0x002455ca, // n0x1179 c0x0000 (---------------) + I matsubushi + 0x002d8c06, // n0x117a c0x0000 (---------------) + I minano + 0x0022f486, // n0x117b c0x0000 (---------------) + I misato + 0x0021d389, // n0x117c c0x0000 (---------------) + I miyashiro + 0x0029c447, // n0x117d c0x0000 (---------------) + I miyoshi + 0x002c6ec8, // n0x117e c0x0000 (---------------) + I moroyama + 0x0038e088, // n0x117f c0x0000 (---------------) + I nagatoro + 0x00208388, // n0x1180 c0x0000 (---------------) + I namegawa + 0x00352f45, // n0x1181 c0x0000 (---------------) + I niiza + 0x00374cc5, // n0x1182 c0x0000 (---------------) + I ogano + 0x00245a05, // n0x1183 c0x0000 (---------------) + I ogawa + 0x002b1385, // n0x1184 c0x0000 (---------------) + I ogose + 0x0035ce07, // n0x1185 c0x0000 (---------------) + I okegawa + 0x0020a585, // n0x1186 c0x0000 (---------------) + I omiya + 0x002b8f45, // n0x1187 c0x0000 (---------------) + I otaki + 0x0033f406, // n0x1188 c0x0000 (---------------) + I ranzan + 0x002e7f47, // n0x1189 c0x0000 (---------------) + I ryokami + 0x002d9247, // n0x118a c0x0000 (---------------) + I saitama + 0x002458c6, // n0x118b c0x0000 (---------------) + I sakado + 0x002cc385, // n0x118c c0x0000 (---------------) + I satte + 0x0029ad86, // n0x118d c0x0000 (---------------) + I sayama + 0x002aaa05, // n0x118e c0x0000 (---------------) + I shiki + 0x00306048, // n0x118f c0x0000 (---------------) + I shiraoka + 0x002e22c4, // n0x1190 c0x0000 (---------------) + I soka + 0x002bfdc6, // n0x1191 c0x0000 (---------------) + I sugito + 0x00265904, // n0x1192 c0x0000 (---------------) + I toda + 0x00222908, // n0x1193 c0x0000 (---------------) + I tokigawa + 0x0038584a, // n0x1194 c0x0000 (---------------) + I tokorozawa + 0x0027b9cc, // n0x1195 c0x0000 (---------------) + I tsurugashima + 0x0020b6c5, // n0x1196 c0x0000 (---------------) + I urawa + 0x00203906, // n0x1197 c0x0000 (---------------) + I warabi + 0x0026eb46, // n0x1198 c0x0000 (---------------) + I yashio + 0x002296c6, // n0x1199 c0x0000 (---------------) + I yokoze + 0x002ff684, // n0x119a c0x0000 (---------------) + I yono + 0x00320c45, // n0x119b c0x0000 (---------------) + I yorii + 0x0027c347, // n0x119c c0x0000 (---------------) + I yoshida + 0x0029c4c9, // n0x119d c0x0000 (---------------) + I yoshikawa + 0x002a42c7, // n0x119e c0x0000 (---------------) + I yoshimi + 0x00686744, // n0x119f c0x0001 (---------------) ! I city + 0x00686744, // n0x11a0 c0x0001 (---------------) ! I city + 0x0030bd05, // n0x11a1 c0x0000 (---------------) + I aisho + 0x00228e04, // n0x11a2 c0x0000 (---------------) + I gamo + 0x0029a44a, // n0x11a3 c0x0000 (---------------) + I higashiomi + 0x00278106, // n0x11a4 c0x0000 (---------------) + I hikone + 0x0034cd44, // n0x11a5 c0x0000 (---------------) + I koka + 0x00206b85, // n0x11a6 c0x0000 (---------------) + I konan + 0x002fb505, // n0x11a7 c0x0000 (---------------) + I kosei + 0x00301bc4, // n0x11a8 c0x0000 (---------------) + I koto + 0x002828c7, // n0x11a9 c0x0000 (---------------) + I kusatsu + 0x0020c0c7, // n0x11aa c0x0000 (---------------) + I maibara + 0x002c60c8, // n0x11ab c0x0000 (---------------) + I moriyama + 0x0025e648, // n0x11ac c0x0000 (---------------) + I nagahama + 0x00211809, // n0x11ad c0x0000 (---------------) + I nishiazai + 0x0025ae88, // n0x11ae c0x0000 (---------------) + I notogawa + 0x0029a60b, // n0x11af c0x0000 (---------------) + I omihachiman + 0x0021f304, // n0x11b0 c0x0000 (---------------) + I otsu + 0x00337145, // n0x11b1 c0x0000 (---------------) + I ritto + 0x0027f545, // n0x11b2 c0x0000 (---------------) + I ryuoh + 0x002a8a09, // n0x11b3 c0x0000 (---------------) + I takashima + 0x00305b89, // n0x11b4 c0x0000 (---------------) + I takatsuki + 0x002fe2c8, // n0x11b5 c0x0000 (---------------) + I torahime + 0x0025b488, // n0x11b6 c0x0000 (---------------) + I toyosato + 0x00280644, // n0x11b7 c0x0000 (---------------) + I yasu + 0x002a0b85, // n0x11b8 c0x0000 (---------------) + I akagi + 0x00201883, // n0x11b9 c0x0000 (---------------) + I ama + 0x0021f2c5, // n0x11ba c0x0000 (---------------) + I gotsu + 0x002da386, // n0x11bb c0x0000 (---------------) + I hamada + 0x00293f8c, // n0x11bc c0x0000 (---------------) + I higashiizumo + 0x0021a546, // n0x11bd c0x0000 (---------------) + I hikawa + 0x002d7a86, // n0x11be c0x0000 (---------------) + I hikimi + 0x00294145, // n0x11bf c0x0000 (---------------) + I izumo + 0x00318688, // n0x11c0 c0x0000 (---------------) + I kakinoki + 0x002b1c46, // n0x11c1 c0x0000 (---------------) + I masuda + 0x0039fe86, // n0x11c2 c0x0000 (---------------) + I matsue + 0x0022f486, // n0x11c3 c0x0000 (---------------) + I misato + 0x0022058c, // n0x11c4 c0x0000 (---------------) + I nishinoshima + 0x002b7044, // n0x11c5 c0x0000 (---------------) + I ohda + 0x002e4c4a, // n0x11c6 c0x0000 (---------------) + I okinoshima + 0x003a1e08, // n0x11c7 c0x0000 (---------------) + I okuizumo + 0x00293dc7, // n0x11c8 c0x0000 (---------------) + I shimane + 0x0024e7c6, // n0x11c9 c0x0000 (---------------) + I tamayu + 0x00292f07, // n0x11ca c0x0000 (---------------) + I tsuwano + 0x002e0845, // n0x11cb c0x0000 (---------------) + I unnan + 0x00325346, // n0x11cc c0x0000 (---------------) + I yakumo + 0x0034fa06, // n0x11cd c0x0000 (---------------) + I yasugi + 0x00377bc7, // n0x11ce c0x0000 (---------------) + I yatsuka + 0x002b3244, // n0x11cf c0x0000 (---------------) + I arai + 0x002305c5, // n0x11d0 c0x0000 (---------------) + I atami + 0x00277c04, // n0x11d1 c0x0000 (---------------) + I fuji + 0x002a5807, // n0x11d2 c0x0000 (---------------) + I fujieda + 0x00277e48, // n0x11d3 c0x0000 (---------------) + I fujikawa + 0x002789ca, // n0x11d4 c0x0000 (---------------) + I fujinomiya + 0x0027ed47, // n0x11d5 c0x0000 (---------------) + I fukuroi + 0x00242cc7, // n0x11d6 c0x0000 (---------------) + I gotemba + 0x002c1a47, // n0x11d7 c0x0000 (---------------) + I haibara + 0x002d0ac9, // n0x11d8 c0x0000 (---------------) + I hamamatsu + 0x00293f8a, // n0x11d9 c0x0000 (---------------) + I higashiizu + 0x00228083, // n0x11da c0x0000 (---------------) + I ito + 0x00351b45, // n0x11db c0x0000 (---------------) + I iwata + 0x00212583, // n0x11dc c0x0000 (---------------) + I izu + 0x00342449, // n0x11dd c0x0000 (---------------) + I izunokuni + 0x002b9588, // n0x11de c0x0000 (---------------) + I kakegawa + 0x003061c7, // n0x11df c0x0000 (---------------) + I kannami + 0x002e8109, // n0x11e0 c0x0000 (---------------) + I kawanehon + 0x0021a5c6, // n0x11e1 c0x0000 (---------------) + I kawazu + 0x003a3c08, // n0x11e2 c0x0000 (---------------) + I kikugawa + 0x002d8745, // n0x11e3 c0x0000 (---------------) + I kosai + 0x0035674a, // n0x11e4 c0x0000 (---------------) + I makinohara + 0x002cf149, // n0x11e5 c0x0000 (---------------) + I matsuzaki + 0x0026e649, // n0x11e6 c0x0000 (---------------) + I minamiizu + 0x002bf3c7, // n0x11e7 c0x0000 (---------------) + I mishima + 0x002a8cc9, // n0x11e8 c0x0000 (---------------) + I morimachi + 0x00212448, // n0x11e9 c0x0000 (---------------) + I nishiizu + 0x002ee946, // n0x11ea c0x0000 (---------------) + I numazu + 0x0037e948, // n0x11eb c0x0000 (---------------) + I omaezaki + 0x00212e87, // n0x11ec c0x0000 (---------------) + I shimada + 0x002281c7, // n0x11ed c0x0000 (---------------) + I shimizu + 0x002c5e87, // n0x11ee c0x0000 (---------------) + I shimoda + 0x002b2608, // n0x11ef c0x0000 (---------------) + I shizuoka + 0x002ee646, // n0x11f0 c0x0000 (---------------) + I susono + 0x00245385, // n0x11f1 c0x0000 (---------------) + I yaizu + 0x0027c347, // n0x11f2 c0x0000 (---------------) + I yoshida + 0x00295488, // n0x11f3 c0x0000 (---------------) + I ashikaga + 0x00344d84, // n0x11f4 c0x0000 (---------------) + I bato + 0x0034ac04, // n0x11f5 c0x0000 (---------------) + I haga + 0x002fddc7, // n0x11f6 c0x0000 (---------------) + I ichikai + 0x002ac347, // n0x11f7 c0x0000 (---------------) + I iwafune + 0x002bdaca, // n0x11f8 c0x0000 (---------------) + I kaminokawa + 0x002ee8c6, // n0x11f9 c0x0000 (---------------) + I kanuma + 0x002fa28a, // n0x11fa c0x0000 (---------------) + I karasuyama + 0x002b8547, // n0x11fb c0x0000 (---------------) + I kuroiso + 0x0030b847, // n0x11fc c0x0000 (---------------) + I mashiko + 0x00241044, // n0x11fd c0x0000 (---------------) + I mibu + 0x00263904, // n0x11fe c0x0000 (---------------) + I moka + 0x00226bc6, // n0x11ff c0x0000 (---------------) + I motegi + 0x002ec144, // n0x1200 c0x0000 (---------------) + I nasu + 0x002ec14c, // n0x1201 c0x0000 (---------------) + I nasushiobara + 0x00203185, // n0x1202 c0x0000 (---------------) + I nikko + 0x00216889, // n0x1203 c0x0000 (---------------) + I nishikata + 0x00279884, // n0x1204 c0x0000 (---------------) + I nogi + 0x0029ef85, // n0x1205 c0x0000 (---------------) + I ohira + 0x00278448, // n0x1206 c0x0000 (---------------) + I ohtawara + 0x00250045, // n0x1207 c0x0000 (---------------) + I oyama + 0x00307fc6, // n0x1208 c0x0000 (---------------) + I sakura + 0x0020f744, // n0x1209 c0x0000 (---------------) + I sano + 0x0027e58a, // n0x120a c0x0000 (---------------) + I shimotsuke + 0x002a7c86, // n0x120b c0x0000 (---------------) + I shioya + 0x002579ca, // n0x120c c0x0000 (---------------) + I takanezawa + 0x00344e07, // n0x120d c0x0000 (---------------) + I tochigi + 0x00297645, // n0x120e c0x0000 (---------------) + I tsuga + 0x0021c2c5, // n0x120f c0x0000 (---------------) + I ujiie + 0x0036b7ca, // n0x1210 c0x0000 (---------------) + I utsunomiya + 0x002a0605, // n0x1211 c0x0000 (---------------) + I yaita + 0x0029eb86, // n0x1212 c0x0000 (---------------) + I aizumi + 0x00206d04, // n0x1213 c0x0000 (---------------) + I anan + 0x002add06, // n0x1214 c0x0000 (---------------) + I ichiba + 0x00229b05, // n0x1215 c0x0000 (---------------) + I itano + 0x00223206, // n0x1216 c0x0000 (---------------) + I kainan + 0x002aa70c, // n0x1217 c0x0000 (---------------) + I komatsushima + 0x002c704a, // n0x1218 c0x0000 (---------------) + I matsushige + 0x0027f904, // n0x1219 c0x0000 (---------------) + I mima + 0x00228406, // n0x121a c0x0000 (---------------) + I minami + 0x0029c447, // n0x121b c0x0000 (---------------) + I miyoshi + 0x002ca384, // n0x121c c0x0000 (---------------) + I mugi + 0x002ac8c8, // n0x121d c0x0000 (---------------) + I nakagawa + 0x00385746, // n0x121e c0x0000 (---------------) + I naruto + 0x00248649, // n0x121f c0x0000 (---------------) + I sanagochi + 0x002ad349, // n0x1220 c0x0000 (---------------) + I shishikui + 0x00299fc9, // n0x1221 c0x0000 (---------------) + I tokushima + 0x0036aa46, // n0x1222 c0x0000 (---------------) + I wajiki + 0x00212f86, // n0x1223 c0x0000 (---------------) + I adachi + 0x0037ea87, // n0x1224 c0x0000 (---------------) + I akiruno + 0x002ec888, // n0x1225 c0x0000 (---------------) + I akishima + 0x00212d89, // n0x1226 c0x0000 (---------------) + I aogashima + 0x0039f707, // n0x1227 c0x0000 (---------------) + I arakawa + 0x002b4186, // n0x1228 c0x0000 (---------------) + I bunkyo + 0x003005c7, // n0x1229 c0x0000 (---------------) + I chiyoda + 0x002db1c5, // n0x122a c0x0000 (---------------) + I chofu + 0x0030ba84, // n0x122b c0x0000 (---------------) + I chuo + 0x0037e647, // n0x122c c0x0000 (---------------) + I edogawa + 0x002bc1c5, // n0x122d c0x0000 (---------------) + I fuchu + 0x00286e85, // n0x122e c0x0000 (---------------) + I fussa + 0x002fc5c7, // n0x122f c0x0000 (---------------) + I hachijo + 0x0024f248, // n0x1230 c0x0000 (---------------) + I hachioji + 0x003831c6, // n0x1231 c0x0000 (---------------) + I hamura + 0x0029680d, // n0x1232 c0x0000 (---------------) + I higashikurume + 0x0029888f, // n0x1233 c0x0000 (---------------) + I higashimurayama + 0x0029d84d, // n0x1234 c0x0000 (---------------) + I higashiyamato + 0x0020dbc4, // n0x1235 c0x0000 (---------------) + I hino + 0x0023ba06, // n0x1236 c0x0000 (---------------) + I hinode + 0x002cfa08, // n0x1237 c0x0000 (---------------) + I hinohara + 0x003255c5, // n0x1238 c0x0000 (---------------) + I inagi + 0x00280008, // n0x1239 c0x0000 (---------------) + I itabashi + 0x0021ae8a, // n0x123a c0x0000 (---------------) + I katsushika + 0x00201e04, // n0x123b c0x0000 (---------------) + I kita + 0x002aaac6, // n0x123c c0x0000 (---------------) + I kiyose + 0x0039c647, // n0x123d c0x0000 (---------------) + I kodaira + 0x00226307, // n0x123e c0x0000 (---------------) + I koganei + 0x002a0d89, // n0x123f c0x0000 (---------------) + I kokubunji + 0x0037e905, // n0x1240 c0x0000 (---------------) + I komae + 0x00301bc4, // n0x1241 c0x0000 (---------------) + I koto + 0x0032a78a, // n0x1242 c0x0000 (---------------) + I kouzushima + 0x002b3009, // n0x1243 c0x0000 (---------------) + I kunitachi + 0x002a8dc7, // n0x1244 c0x0000 (---------------) + I machida + 0x00296ac6, // n0x1245 c0x0000 (---------------) + I meguro + 0x0034cb46, // n0x1246 c0x0000 (---------------) + I minato + 0x002a0ac6, // n0x1247 c0x0000 (---------------) + I mitaka + 0x0035e106, // n0x1248 c0x0000 (---------------) + I mizuho + 0x002cee0f, // n0x1249 c0x0000 (---------------) + I musashimurayama + 0x002cf8c9, // n0x124a c0x0000 (---------------) + I musashino + 0x002546c6, // n0x124b c0x0000 (---------------) + I nakano + 0x00256d06, // n0x124c c0x0000 (---------------) + I nerima + 0x00355a09, // n0x124d c0x0000 (---------------) + I ogasawara + 0x00303707, // n0x124e c0x0000 (---------------) + I okutama + 0x00213a83, // n0x124f c0x0000 (---------------) + I ome + 0x0020ef06, // n0x1250 c0x0000 (---------------) + I oshima + 0x00204083, // n0x1251 c0x0000 (---------------) + I ota + 0x0024cf48, // n0x1252 c0x0000 (---------------) + I setagaya + 0x00300407, // n0x1253 c0x0000 (---------------) + I shibuya + 0x0029f1c9, // n0x1254 c0x0000 (---------------) + I shinagawa + 0x00383608, // n0x1255 c0x0000 (---------------) + I shinjuku + 0x00377e48, // n0x1256 c0x0000 (---------------) + I suginami + 0x0036e146, // n0x1257 c0x0000 (---------------) + I sumida + 0x00223909, // n0x1258 c0x0000 (---------------) + I tachikawa + 0x002400c5, // n0x1259 c0x0000 (---------------) + I taito + 0x0024e7c4, // n0x125a c0x0000 (---------------) + I tama + 0x0024aec7, // n0x125b c0x0000 (---------------) + I toshima + 0x00257085, // n0x125c c0x0000 (---------------) + I chizu + 0x0020dbc4, // n0x125d c0x0000 (---------------) + I hino + 0x00248ac8, // n0x125e c0x0000 (---------------) + I kawahara + 0x002180c4, // n0x125f c0x0000 (---------------) + I koge + 0x00301bc7, // n0x1260 c0x0000 (---------------) + I kotoura + 0x0036fc86, // n0x1261 c0x0000 (---------------) + I misasa + 0x002e5a85, // n0x1262 c0x0000 (---------------) + I nanbu + 0x002869c8, // n0x1263 c0x0000 (---------------) + I nichinan + 0x0034ca0b, // n0x1264 c0x0000 (---------------) + I sakaiminato + 0x002f8b87, // n0x1265 c0x0000 (---------------) + I tottori + 0x0036ba46, // n0x1266 c0x0000 (---------------) + I wakasa + 0x002c0c84, // n0x1267 c0x0000 (---------------) + I yazu + 0x0030f5c6, // n0x1268 c0x0000 (---------------) + I yonago + 0x002bf185, // n0x1269 c0x0000 (---------------) + I asahi + 0x002bc1c5, // n0x126a c0x0000 (---------------) + I fuchu + 0x0027dfc9, // n0x126b c0x0000 (---------------) + I fukumitsu + 0x002824c9, // n0x126c c0x0000 (---------------) + I funahashi + 0x00228204, // n0x126d c0x0000 (---------------) + I himi + 0x00228245, // n0x126e c0x0000 (---------------) + I imizu + 0x00228445, // n0x126f c0x0000 (---------------) + I inami + 0x003565c6, // n0x1270 c0x0000 (---------------) + I johana + 0x002fdcc8, // n0x1271 c0x0000 (---------------) + I kamiichi + 0x002b7bc6, // n0x1272 c0x0000 (---------------) + I kurobe + 0x00330c8b, // n0x1273 c0x0000 (---------------) + I nakaniikawa + 0x0030138a, // n0x1274 c0x0000 (---------------) + I namerikawa + 0x00342185, // n0x1275 c0x0000 (---------------) + I nanto + 0x0028a886, // n0x1276 c0x0000 (---------------) + I nyuzen + 0x002f5985, // n0x1277 c0x0000 (---------------) + I oyabe + 0x00218d45, // n0x1278 c0x0000 (---------------) + I taira + 0x0028ed47, // n0x1279 c0x0000 (---------------) + I takaoka + 0x002040c8, // n0x127a c0x0000 (---------------) + I tateyama + 0x0025af04, // n0x127b c0x0000 (---------------) + I toga + 0x002b6586, // n0x127c c0x0000 (---------------) + I tonami + 0x0028e9c6, // n0x127d c0x0000 (---------------) + I toyama + 0x00212607, // n0x127e c0x0000 (---------------) + I unazuki + 0x00300a04, // n0x127f c0x0000 (---------------) + I uozu + 0x0027ce46, // n0x1280 c0x0000 (---------------) + I yamada + 0x0023ec85, // n0x1281 c0x0000 (---------------) + I arida + 0x0023ec89, // n0x1282 c0x0000 (---------------) + I aridagawa + 0x00213184, // n0x1283 c0x0000 (---------------) + I gobo + 0x0028e009, // n0x1284 c0x0000 (---------------) + I hashimoto + 0x0027ab86, // n0x1285 c0x0000 (---------------) + I hidaka + 0x002b9ac8, // n0x1286 c0x0000 (---------------) + I hirogawa + 0x00228445, // n0x1287 c0x0000 (---------------) + I inami + 0x00311e85, // n0x1288 c0x0000 (---------------) + I iwade + 0x00223206, // n0x1289 c0x0000 (---------------) + I kainan + 0x0026e889, // n0x128a c0x0000 (---------------) + I kamitonda + 0x00219349, // n0x128b c0x0000 (---------------) + I katsuragi + 0x002d7b06, // n0x128c c0x0000 (---------------) + I kimino + 0x0026b6c8, // n0x128d c0x0000 (---------------) + I kinokawa + 0x00267988, // n0x128e c0x0000 (---------------) + I kitayama + 0x002f5944, // n0x128f c0x0000 (---------------) + I koya + 0x0032dcc4, // n0x1290 c0x0000 (---------------) + I koza + 0x0032dcc8, // n0x1291 c0x0000 (---------------) + I kozagawa + 0x00316788, // n0x1292 c0x0000 (---------------) + I kudoyama + 0x002ae8c9, // n0x1293 c0x0000 (---------------) + I kushimoto + 0x002da306, // n0x1294 c0x0000 (---------------) + I mihama + 0x0022f486, // n0x1295 c0x0000 (---------------) + I misato + 0x0031408d, // n0x1296 c0x0000 (---------------) + I nachikatsuura + 0x00266d06, // n0x1297 c0x0000 (---------------) + I shingu + 0x002a9a49, // n0x1298 c0x0000 (---------------) + I shirahama + 0x00201685, // n0x1299 c0x0000 (---------------) + I taiji + 0x0031f386, // n0x129a c0x0000 (---------------) + I tanabe + 0x00223ac8, // n0x129b c0x0000 (---------------) + I wakayama + 0x00310745, // n0x129c c0x0000 (---------------) + I yuasa + 0x0036adc4, // n0x129d c0x0000 (---------------) + I yura + 0x002bf185, // n0x129e c0x0000 (---------------) + I asahi + 0x00281b48, // n0x129f c0x0000 (---------------) + I funagata + 0x0029a209, // n0x12a0 c0x0000 (---------------) + I higashine + 0x00277cc4, // n0x12a1 c0x0000 (---------------) + I iide + 0x0033d046, // n0x12a2 c0x0000 (---------------) + I kahoku + 0x0024ff0a, // n0x12a3 c0x0000 (---------------) + I kaminoyama + 0x002c7848, // n0x12a4 c0x0000 (---------------) + I kaneyama + 0x002bdc49, // n0x12a5 c0x0000 (---------------) + I kawanishi + 0x0029378a, // n0x12a6 c0x0000 (---------------) + I mamurogawa + 0x002e8086, // n0x12a7 c0x0000 (---------------) + I mikawa + 0x00298a48, // n0x12a8 c0x0000 (---------------) + I murayama + 0x002cdc45, // n0x12a9 c0x0000 (---------------) + I nagai + 0x002c9e08, // n0x12aa c0x0000 (---------------) + I nakayama + 0x002b1ec5, // n0x12ab c0x0000 (---------------) + I nanyo + 0x0021a489, // n0x12ac c0x0000 (---------------) + I nishikawa + 0x00361849, // n0x12ad c0x0000 (---------------) + I obanazawa + 0x00203282, // n0x12ae c0x0000 (---------------) + I oe + 0x002a6a45, // n0x12af c0x0000 (---------------) + I oguni + 0x0026f6c6, // n0x12b0 c0x0000 (---------------) + I ohkura + 0x0027aac7, // n0x12b1 c0x0000 (---------------) + I oishida + 0x00238905, // n0x12b2 c0x0000 (---------------) + I sagae + 0x002f8486, // n0x12b3 c0x0000 (---------------) + I sakata + 0x00310808, // n0x12b4 c0x0000 (---------------) + I sakegawa + 0x0033bd46, // n0x12b5 c0x0000 (---------------) + I shinjo + 0x00347f09, // n0x12b6 c0x0000 (---------------) + I shirataka + 0x002792c6, // n0x12b7 c0x0000 (---------------) + I shonai + 0x00281cc8, // n0x12b8 c0x0000 (---------------) + I takahata + 0x002a94c5, // n0x12b9 c0x0000 (---------------) + I tendo + 0x0026de86, // n0x12ba c0x0000 (---------------) + I tozawa + 0x0032f7c8, // n0x12bb c0x0000 (---------------) + I tsuruoka + 0x002808c8, // n0x12bc c0x0000 (---------------) + I yamagata + 0x0039e808, // n0x12bd c0x0000 (---------------) + I yamanobe + 0x00366688, // n0x12be c0x0000 (---------------) + I yonezawa + 0x00217244, // n0x12bf c0x0000 (---------------) + I yuza + 0x0022d843, // n0x12c0 c0x0000 (---------------) + I abu + 0x00348144, // n0x12c1 c0x0000 (---------------) + I hagi + 0x0022f006, // n0x12c2 c0x0000 (---------------) + I hikari + 0x002db204, // n0x12c3 c0x0000 (---------------) + I hofu + 0x002da0c7, // n0x12c4 c0x0000 (---------------) + I iwakuni + 0x0039fd89, // n0x12c5 c0x0000 (---------------) + I kudamatsu + 0x002c0485, // n0x12c6 c0x0000 (---------------) + I mitou + 0x0038e086, // n0x12c7 c0x0000 (---------------) + I nagato + 0x0020ef06, // n0x12c8 c0x0000 (---------------) + I oshima + 0x0026c3cb, // n0x12c9 c0x0000 (---------------) + I shimonoseki + 0x003420c6, // n0x12ca c0x0000 (---------------) + I shunan + 0x00316a86, // n0x12cb c0x0000 (---------------) + I tabuse + 0x0022f588, // n0x12cc c0x0000 (---------------) + I tokuyama + 0x0025bb86, // n0x12cd c0x0000 (---------------) + I toyota + 0x00297443, // n0x12ce c0x0000 (---------------) + I ube + 0x0020f9c3, // n0x12cf c0x0000 (---------------) + I yuu + 0x0030ba84, // n0x12d0 c0x0000 (---------------) + I chuo + 0x00236305, // n0x12d1 c0x0000 (---------------) + I doshi + 0x0036af47, // n0x12d2 c0x0000 (---------------) + I fuefuki + 0x00277e48, // n0x12d3 c0x0000 (---------------) + I fujikawa + 0x00277e4f, // n0x12d4 c0x0000 (---------------) + I fujikawaguchiko + 0x0027c24b, // n0x12d5 c0x0000 (---------------) + I fujiyoshida + 0x002fdac8, // n0x12d6 c0x0000 (---------------) + I hayakawa + 0x0033d0c6, // n0x12d7 c0x0000 (---------------) + I hokuto + 0x0026560e, // n0x12d8 c0x0000 (---------------) + I ichikawamisato + 0x00223203, // n0x12d9 c0x0000 (---------------) + I kai + 0x00240c84, // n0x12da c0x0000 (---------------) + I kofu + 0x00342045, // n0x12db c0x0000 (---------------) + I koshu + 0x00300146, // n0x12dc c0x0000 (---------------) + I kosuge + 0x0028bc0b, // n0x12dd c0x0000 (---------------) + I minami-alps + 0x00290086, // n0x12de c0x0000 (---------------) + I minobu + 0x002164c9, // n0x12df c0x0000 (---------------) + I nakamichi + 0x002e5a85, // n0x12e0 c0x0000 (---------------) + I nanbu + 0x00381e08, // n0x12e1 c0x0000 (---------------) + I narusawa + 0x0020c388, // n0x12e2 c0x0000 (---------------) + I nirasaki + 0x0021920c, // n0x12e3 c0x0000 (---------------) + I nishikatsura + 0x0029e5c6, // n0x12e4 c0x0000 (---------------) + I oshino + 0x0021f306, // n0x12e5 c0x0000 (---------------) + I otsuki + 0x00319905, // n0x12e6 c0x0000 (---------------) + I showa + 0x002872c8, // n0x12e7 c0x0000 (---------------) + I tabayama + 0x0027b9c5, // n0x12e8 c0x0000 (---------------) + I tsuru + 0x00387308, // n0x12e9 c0x0000 (---------------) + I uenohara + 0x0029dc8a, // n0x12ea c0x0000 (---------------) + I yamanakako + 0x002a15c9, // n0x12eb c0x0000 (---------------) + I yamanashi + 0x00686744, // n0x12ec c0x0001 (---------------) ! I city + 0x2e200742, // n0x12ed c0x00b8 (n0x12ee-n0x12ef) o I co + 0x000ffa08, // n0x12ee c0x0000 (---------------) + blogspot + 0x00233503, // n0x12ef c0x0000 (---------------) + I com + 0x0023a783, // n0x12f0 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x12f1 c0x0000 (---------------) + I gov + 0x00209003, // n0x12f2 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x12f3 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x12f4 c0x0000 (---------------) + I org + 0x00330b83, // n0x12f5 c0x0000 (---------------) + I biz + 0x00233503, // n0x12f6 c0x0000 (---------------) + I com + 0x0023a783, // n0x12f7 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x12f8 c0x0000 (---------------) + I gov + 0x003a1244, // n0x12f9 c0x0000 (---------------) + I info + 0x0021fe03, // n0x12fa c0x0000 (---------------) + I net + 0x0022d1c3, // n0x12fb c0x0000 (---------------) + I org + 0x0023f743, // n0x12fc c0x0000 (---------------) + I ass + 0x002d4884, // n0x12fd c0x0000 (---------------) + I asso + 0x00233503, // n0x12fe c0x0000 (---------------) + I com + 0x0023d684, // n0x12ff c0x0000 (---------------) + I coop + 0x0023a783, // n0x1300 c0x0000 (---------------) + I edu + 0x0033d7c4, // n0x1301 c0x0000 (---------------) + I gouv + 0x0026cc83, // n0x1302 c0x0000 (---------------) + I gov + 0x00238bc7, // n0x1303 c0x0000 (---------------) + I medecin + 0x00209003, // n0x1304 c0x0000 (---------------) + I mil + 0x00201483, // n0x1305 c0x0000 (---------------) + I nom + 0x0025c988, // n0x1306 c0x0000 (---------------) + I notaires + 0x0022d1c3, // n0x1307 c0x0000 (---------------) + I org + 0x0034d60b, // n0x1308 c0x0000 (---------------) + I pharmaciens + 0x002e1043, // n0x1309 c0x0000 (---------------) + I prd + 0x00247506, // n0x130a c0x0000 (---------------) + I presse + 0x00200142, // n0x130b c0x0000 (---------------) + I tm + 0x002d1c8b, // n0x130c c0x0000 (---------------) + I veterinaire + 0x0023a783, // n0x130d c0x0000 (---------------) + I edu + 0x0026cc83, // n0x130e c0x0000 (---------------) + I gov + 0x0021fe03, // n0x130f c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1310 c0x0000 (---------------) + I org + 0x00233503, // n0x1311 c0x0000 (---------------) + I com + 0x0023a783, // n0x1312 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1313 c0x0000 (---------------) + I gov + 0x0022d1c3, // n0x1314 c0x0000 (---------------) + I org + 0x0022b7c3, // n0x1315 c0x0000 (---------------) + I rep + 0x00203003, // n0x1316 c0x0000 (---------------) + I tra + 0x00201542, // n0x1317 c0x0000 (---------------) + I ac + 0x000ffa08, // n0x1318 c0x0000 (---------------) + blogspot + 0x0022b945, // n0x1319 c0x0000 (---------------) + I busan + 0x003051c8, // n0x131a c0x0000 (---------------) + I chungbuk + 0x003113c8, // n0x131b c0x0000 (---------------) + I chungnam + 0x00200742, // n0x131c c0x0000 (---------------) + I co + 0x0024a3c5, // n0x131d c0x0000 (---------------) + I daegu + 0x00325007, // n0x131e c0x0000 (---------------) + I daejeon + 0x00200482, // n0x131f c0x0000 (---------------) + I es + 0x00216707, // n0x1320 c0x0000 (---------------) + I gangwon + 0x00202d42, // n0x1321 c0x0000 (---------------) + I go + 0x00242707, // n0x1322 c0x0000 (---------------) + I gwangju + 0x0030b509, // n0x1323 c0x0000 (---------------) + I gyeongbuk + 0x002cd808, // n0x1324 c0x0000 (---------------) + I gyeonggi + 0x00208209, // n0x1325 c0x0000 (---------------) + I gyeongnam + 0x0023f382, // n0x1326 c0x0000 (---------------) + I hs + 0x00268e07, // n0x1327 c0x0000 (---------------) + I incheon + 0x002d7884, // n0x1328 c0x0000 (---------------) + I jeju + 0x003250c7, // n0x1329 c0x0000 (---------------) + I jeonbuk + 0x00301287, // n0x132a c0x0000 (---------------) + I jeonnam + 0x002b5502, // n0x132b c0x0000 (---------------) + I kg + 0x00209003, // n0x132c c0x0000 (---------------) + I mil + 0x0020f702, // n0x132d c0x0000 (---------------) + I ms + 0x00202c02, // n0x132e c0x0000 (---------------) + I ne + 0x00200282, // n0x132f c0x0000 (---------------) + I or + 0x00207782, // n0x1330 c0x0000 (---------------) + I pe + 0x00207002, // n0x1331 c0x0000 (---------------) + I re + 0x00200702, // n0x1332 c0x0000 (---------------) + I sc + 0x00344345, // n0x1333 c0x0000 (---------------) + I seoul + 0x00259585, // n0x1334 c0x0000 (---------------) + I ulsan + 0x00233503, // n0x1335 c0x0000 (---------------) + I com + 0x0023a783, // n0x1336 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1337 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x1338 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1339 c0x0000 (---------------) + I org + 0x00233503, // n0x133a c0x0000 (---------------) + I com + 0x0023a783, // n0x133b c0x0000 (---------------) + I edu + 0x0026cc83, // n0x133c c0x0000 (---------------) + I gov + 0x00209003, // n0x133d c0x0000 (---------------) + I mil + 0x0021fe03, // n0x133e c0x0000 (---------------) + I net + 0x0022d1c3, // n0x133f c0x0000 (---------------) + I org + 0x00000301, // n0x1340 c0x0000 (---------------) + c + 0x00233503, // n0x1341 c0x0000 (---------------) + I com + 0x0023a783, // n0x1342 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1343 c0x0000 (---------------) + I gov + 0x003a1244, // n0x1344 c0x0000 (---------------) + I info + 0x00201603, // n0x1345 c0x0000 (---------------) + I int + 0x0021fe03, // n0x1346 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1347 c0x0000 (---------------) + I org + 0x00220f03, // n0x1348 c0x0000 (---------------) + I per + 0x00233503, // n0x1349 c0x0000 (---------------) + I com + 0x0023a783, // n0x134a c0x0000 (---------------) + I edu + 0x0026cc83, // n0x134b c0x0000 (---------------) + I gov + 0x0021fe03, // n0x134c c0x0000 (---------------) + I net + 0x0022d1c3, // n0x134d c0x0000 (---------------) + I org + 0x00200742, // n0x134e c0x0000 (---------------) + I co + 0x00233503, // n0x134f c0x0000 (---------------) + I com + 0x0023a783, // n0x1350 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1351 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x1352 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1353 c0x0000 (---------------) + I org + 0x000ffa08, // n0x1354 c0x0000 (---------------) + blogspot + 0x00201542, // n0x1355 c0x0000 (---------------) + I ac + 0x002bad84, // n0x1356 c0x0000 (---------------) + I assn + 0x00233503, // n0x1357 c0x0000 (---------------) + I com + 0x0023a783, // n0x1358 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1359 c0x0000 (---------------) + I gov + 0x00379ec3, // n0x135a c0x0000 (---------------) + I grp + 0x00234dc5, // n0x135b c0x0000 (---------------) + I hotel + 0x00201603, // n0x135c c0x0000 (---------------) + I int + 0x00322cc3, // n0x135d c0x0000 (---------------) + I ltd + 0x0021fe03, // n0x135e c0x0000 (---------------) + I net + 0x00202d03, // n0x135f c0x0000 (---------------) + I ngo + 0x0022d1c3, // n0x1360 c0x0000 (---------------) + I org + 0x00217443, // n0x1361 c0x0000 (---------------) + I sch + 0x00274803, // n0x1362 c0x0000 (---------------) + I soc + 0x00221a03, // n0x1363 c0x0000 (---------------) + I web + 0x00233503, // n0x1364 c0x0000 (---------------) + I com + 0x0023a783, // n0x1365 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1366 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x1367 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1368 c0x0000 (---------------) + I org + 0x00200742, // n0x1369 c0x0000 (---------------) + I co + 0x0022d1c3, // n0x136a c0x0000 (---------------) + I org + 0x000ffa08, // n0x136b c0x0000 (---------------) + blogspot + 0x0026cc83, // n0x136c c0x0000 (---------------) + I gov + 0x000ffa08, // n0x136d c0x0000 (---------------) + blogspot + 0x002afc83, // n0x136e c0x0000 (---------------) + I asn + 0x00233503, // n0x136f c0x0000 (---------------) + I com + 0x00236cc4, // n0x1370 c0x0000 (---------------) + I conf + 0x0023a783, // n0x1371 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1372 c0x0000 (---------------) + I gov + 0x0020c782, // n0x1373 c0x0000 (---------------) + I id + 0x00209003, // n0x1374 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1375 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1376 c0x0000 (---------------) + I org + 0x00233503, // n0x1377 c0x0000 (---------------) + I com + 0x0023a783, // n0x1378 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1379 c0x0000 (---------------) + I gov + 0x0020c782, // n0x137a c0x0000 (---------------) + I id + 0x00213ac3, // n0x137b c0x0000 (---------------) + I med + 0x0021fe03, // n0x137c c0x0000 (---------------) + I net + 0x0022d1c3, // n0x137d c0x0000 (---------------) + I org + 0x002db143, // n0x137e c0x0000 (---------------) + I plc + 0x00217443, // n0x137f c0x0000 (---------------) + I sch + 0x00201542, // n0x1380 c0x0000 (---------------) + I ac + 0x00200742, // n0x1381 c0x0000 (---------------) + I co + 0x0026cc83, // n0x1382 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x1383 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1384 c0x0000 (---------------) + I org + 0x00247505, // n0x1385 c0x0000 (---------------) + I press + 0x002d4884, // n0x1386 c0x0000 (---------------) + I asso + 0x00200142, // n0x1387 c0x0000 (---------------) + I tm + 0x000ffa08, // n0x1388 c0x0000 (---------------) + blogspot + 0x00201542, // n0x1389 c0x0000 (---------------) + I ac + 0x00200742, // n0x138a c0x0000 (---------------) + I co + 0x00054d8b, // n0x138b c0x0000 (---------------) + diskstation + 0x00009107, // n0x138c c0x0000 (---------------) + dscloud + 0x0023a783, // n0x138d c0x0000 (---------------) + I edu + 0x0026cc83, // n0x138e c0x0000 (---------------) + I gov + 0x00157b84, // n0x138f c0x0000 (---------------) + i234 + 0x00230483, // n0x1390 c0x0000 (---------------) + I its + 0x00156bc4, // n0x1391 c0x0000 (---------------) + myds + 0x0021fe03, // n0x1392 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1393 c0x0000 (---------------) + I org + 0x002e1c44, // n0x1394 c0x0000 (---------------) + I priv + 0x0010b388, // n0x1395 c0x0000 (---------------) + synology + 0x00200742, // n0x1396 c0x0000 (---------------) + I co + 0x00233503, // n0x1397 c0x0000 (---------------) + I com + 0x0023a783, // n0x1398 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1399 c0x0000 (---------------) + I gov + 0x00209003, // n0x139a c0x0000 (---------------) + I mil + 0x00201483, // n0x139b c0x0000 (---------------) + I nom + 0x0022d1c3, // n0x139c c0x0000 (---------------) + I org + 0x002e1043, // n0x139d c0x0000 (---------------) + I prd + 0x00200142, // n0x139e c0x0000 (---------------) + I tm + 0x000ffa08, // n0x139f c0x0000 (---------------) + blogspot + 0x00233503, // n0x13a0 c0x0000 (---------------) + I com + 0x0023a783, // n0x13a1 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x13a2 c0x0000 (---------------) + I gov + 0x003a1083, // n0x13a3 c0x0000 (---------------) + I inf + 0x00205284, // n0x13a4 c0x0000 (---------------) + I name + 0x0021fe03, // n0x13a5 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x13a6 c0x0000 (---------------) + I org + 0x00233503, // n0x13a7 c0x0000 (---------------) + I com + 0x0023a783, // n0x13a8 c0x0000 (---------------) + I edu + 0x0033d7c4, // n0x13a9 c0x0000 (---------------) + I gouv + 0x0026cc83, // n0x13aa c0x0000 (---------------) + I gov + 0x0021fe03, // n0x13ab c0x0000 (---------------) + I net + 0x0022d1c3, // n0x13ac c0x0000 (---------------) + I org + 0x00247506, // n0x13ad c0x0000 (---------------) + I presse + 0x0023a783, // n0x13ae c0x0000 (---------------) + I edu + 0x0026cc83, // n0x13af c0x0000 (---------------) + I gov + 0x0016ef83, // n0x13b0 c0x0000 (---------------) + nyc + 0x0022d1c3, // n0x13b1 c0x0000 (---------------) + I org + 0x00233503, // n0x13b2 c0x0000 (---------------) + I com + 0x0023a783, // n0x13b3 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x13b4 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x13b5 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x13b6 c0x0000 (---------------) + I org + 0x00009107, // n0x13b7 c0x0000 (---------------) + dscloud + 0x000ffa08, // n0x13b8 c0x0000 (---------------) + blogspot + 0x0026cc83, // n0x13b9 c0x0000 (---------------) + I gov + 0x00233503, // n0x13ba c0x0000 (---------------) + I com + 0x0023a783, // n0x13bb c0x0000 (---------------) + I edu + 0x0026cc83, // n0x13bc c0x0000 (---------------) + I gov + 0x0021fe03, // n0x13bd c0x0000 (---------------) + I net + 0x0022d1c3, // n0x13be c0x0000 (---------------) + I org + 0x36633503, // n0x13bf c0x00d9 (n0x13c3-n0x13c4) + I com + 0x0023a783, // n0x13c0 c0x0000 (---------------) + I edu + 0x0021fe03, // n0x13c1 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x13c2 c0x0000 (---------------) + I org + 0x000ffa08, // n0x13c3 c0x0000 (---------------) + blogspot + 0x00201542, // n0x13c4 c0x0000 (---------------) + I ac + 0x00200742, // n0x13c5 c0x0000 (---------------) + I co + 0x00233503, // n0x13c6 c0x0000 (---------------) + I com + 0x0026cc83, // n0x13c7 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x13c8 c0x0000 (---------------) + I net + 0x00200282, // n0x13c9 c0x0000 (---------------) + I or + 0x0022d1c3, // n0x13ca c0x0000 (---------------) + I org + 0x0030aec7, // n0x13cb c0x0000 (---------------) + I academy + 0x00209c8b, // n0x13cc c0x0000 (---------------) + I agriculture + 0x00204903, // n0x13cd c0x0000 (---------------) + I air + 0x0023b1c8, // n0x13ce c0x0000 (---------------) + I airguard + 0x002ec607, // n0x13cf c0x0000 (---------------) + I alabama + 0x00279bc6, // n0x13d0 c0x0000 (---------------) + I alaska + 0x00367645, // n0x13d1 c0x0000 (---------------) + I amber + 0x002c8609, // n0x13d2 c0x0000 (---------------) + I ambulance + 0x00208d88, // n0x13d3 c0x0000 (---------------) + I american + 0x002729c9, // n0x13d4 c0x0000 (---------------) + I americana + 0x002729d0, // n0x13d5 c0x0000 (---------------) + I americanantiques + 0x0035994b, // n0x13d6 c0x0000 (---------------) + I americanart + 0x002c8449, // n0x13d7 c0x0000 (---------------) + I amsterdam + 0x00200843, // n0x13d8 c0x0000 (---------------) + I and + 0x00357549, // n0x13d9 c0x0000 (---------------) + I annefrank + 0x00237a06, // n0x13da c0x0000 (---------------) + I anthro + 0x00237a0c, // n0x13db c0x0000 (---------------) + I anthropology + 0x0022ba08, // n0x13dc c0x0000 (---------------) + I antiques + 0x003a2308, // n0x13dd c0x0000 (---------------) + I aquarium + 0x00258249, // n0x13de c0x0000 (---------------) + I arboretum + 0x0029fb0e, // n0x13df c0x0000 (---------------) + I archaeological + 0x0037228b, // n0x13e0 c0x0000 (---------------) + I archaeology + 0x0031fa0c, // n0x13e1 c0x0000 (---------------) + I architecture + 0x002011c3, // n0x13e2 c0x0000 (---------------) + I art + 0x003284cc, // n0x13e3 c0x0000 (---------------) + I artanddesign + 0x002043c9, // n0x13e4 c0x0000 (---------------) + I artcenter + 0x0020b107, // n0x13e5 c0x0000 (---------------) + I artdeco + 0x0023a6cc, // n0x13e6 c0x0000 (---------------) + I arteducation + 0x0039208a, // n0x13e7 c0x0000 (---------------) + I artgallery + 0x0024bf84, // n0x13e8 c0x0000 (---------------) + I arts + 0x0039eacd, // n0x13e9 c0x0000 (---------------) + I artsandcrafts + 0x00328388, // n0x13ea c0x0000 (---------------) + I asmatart + 0x0039efcd, // n0x13eb c0x0000 (---------------) + I assassination + 0x00252046, // n0x13ec c0x0000 (---------------) + I assisi + 0x002d488b, // n0x13ed c0x0000 (---------------) + I association + 0x00356a09, // n0x13ee c0x0000 (---------------) + I astronomy + 0x002244c7, // n0x13ef c0x0000 (---------------) + I atlanta + 0x002ea306, // n0x13f0 c0x0000 (---------------) + I austin + 0x003082c9, // n0x13f1 c0x0000 (---------------) + I australia + 0x00322f8a, // n0x13f2 c0x0000 (---------------) + I automotive + 0x0035b848, // n0x13f3 c0x0000 (---------------) + I aviation + 0x002e0b84, // n0x13f4 c0x0000 (---------------) + I axis + 0x00277007, // n0x13f5 c0x0000 (---------------) + I badajoz + 0x002a1847, // n0x13f6 c0x0000 (---------------) + I baghdad + 0x002ec084, // n0x13f7 c0x0000 (---------------) + I bahn + 0x00228a84, // n0x13f8 c0x0000 (---------------) + I bale + 0x0025d289, // n0x13f9 c0x0000 (---------------) + I baltimore + 0x002dd889, // n0x13fa c0x0000 (---------------) + I barcelona + 0x0022f9c8, // n0x13fb c0x0000 (---------------) + I baseball + 0x00210685, // n0x13fc c0x0000 (---------------) + I basel + 0x00389345, // n0x13fd c0x0000 (---------------) + I baths + 0x0020d246, // n0x13fe c0x0000 (---------------) + I bauern + 0x0039e989, // n0x13ff c0x0000 (---------------) + I beauxarts + 0x0021b10d, // n0x1400 c0x0000 (---------------) + I beeldengeluid + 0x0031f488, // n0x1401 c0x0000 (---------------) + I bellevue + 0x0020d147, // n0x1402 c0x0000 (---------------) + I bergbau + 0x003676c8, // n0x1403 c0x0000 (---------------) + I berkeley + 0x0022e6c6, // n0x1404 c0x0000 (---------------) + I berlin + 0x00390744, // n0x1405 c0x0000 (---------------) + I bern + 0x0037bf45, // n0x1406 c0x0000 (---------------) + I bible + 0x002028c6, // n0x1407 c0x0000 (---------------) + I bilbao + 0x00202e84, // n0x1408 c0x0000 (---------------) + I bill + 0x002042c7, // n0x1409 c0x0000 (---------------) + I birdart + 0x0020628a, // n0x140a c0x0000 (---------------) + I birthplace + 0x00212384, // n0x140b c0x0000 (---------------) + I bonn + 0x00217cc6, // n0x140c c0x0000 (---------------) + I boston + 0x00218e89, // n0x140d c0x0000 (---------------) + I botanical + 0x00218e8f, // n0x140e c0x0000 (---------------) + I botanicalgarden + 0x0021a18d, // n0x140f c0x0000 (---------------) + I botanicgarden + 0x0021a946, // n0x1410 c0x0000 (---------------) + I botany + 0x0021ca50, // n0x1411 c0x0000 (---------------) + I brandywinevalley + 0x0021ce46, // n0x1412 c0x0000 (---------------) + I brasil + 0x0021db87, // n0x1413 c0x0000 (---------------) + I bristol + 0x0021df07, // n0x1414 c0x0000 (---------------) + I british + 0x0021df0f, // n0x1415 c0x0000 (---------------) + I britishcolumbia + 0x0021fa49, // n0x1416 c0x0000 (---------------) + I broadcast + 0x00222b06, // n0x1417 c0x0000 (---------------) + I brunel + 0x00225e87, // n0x1418 c0x0000 (---------------) + I brussel + 0x00225e88, // n0x1419 c0x0000 (---------------) + I brussels + 0x00227049, // n0x141a c0x0000 (---------------) + I bruxelles + 0x0028fd88, // n0x141b c0x0000 (---------------) + I building + 0x002d7647, // n0x141c c0x0000 (---------------) + I burghof + 0x0020ca03, // n0x141d c0x0000 (---------------) + I bus + 0x00233386, // n0x141e c0x0000 (---------------) + I bushey + 0x00200308, // n0x141f c0x0000 (---------------) + I cadaques + 0x0029fdca, // n0x1420 c0x0000 (---------------) + I california + 0x00221ac9, // n0x1421 c0x0000 (---------------) + I cambridge + 0x00208ec3, // n0x1422 c0x0000 (---------------) + I can + 0x00324f06, // n0x1423 c0x0000 (---------------) + I canada + 0x002b63ca, // n0x1424 c0x0000 (---------------) + I capebreton + 0x00365247, // n0x1425 c0x0000 (---------------) + I carrier + 0x0020af4a, // n0x1426 c0x0000 (---------------) + I cartoonart + 0x0021460e, // n0x1427 c0x0000 (---------------) + I casadelamoneda + 0x0021fb86, // n0x1428 c0x0000 (---------------) + I castle + 0x002a6c47, // n0x1429 c0x0000 (---------------) + I castres + 0x00211b46, // n0x142a c0x0000 (---------------) + I celtic + 0x00204486, // n0x142b c0x0000 (---------------) + I center + 0x00374acb, // n0x142c c0x0000 (---------------) + I chattanooga + 0x002648ca, // n0x142d c0x0000 (---------------) + I cheltenham + 0x0035150d, // n0x142e c0x0000 (---------------) + I chesapeakebay + 0x00213047, // n0x142f c0x0000 (---------------) + I chicago + 0x00274888, // n0x1430 c0x0000 (---------------) + I children + 0x00274889, // n0x1431 c0x0000 (---------------) + I childrens + 0x0027488f, // n0x1432 c0x0000 (---------------) + I childrensgarden + 0x0023984c, // n0x1433 c0x0000 (---------------) + I chiropractic + 0x002b5a89, // n0x1434 c0x0000 (---------------) + I chocolate + 0x00379b8e, // n0x1435 c0x0000 (---------------) + I christiansburg + 0x00238cca, // n0x1436 c0x0000 (---------------) + I cincinnati + 0x002ce4c6, // n0x1437 c0x0000 (---------------) + I cinema + 0x00337c86, // n0x1438 c0x0000 (---------------) + I circus + 0x00363b8c, // n0x1439 c0x0000 (---------------) + I civilisation + 0x00367b8c, // n0x143a c0x0000 (---------------) + I civilization + 0x0036f008, // n0x143b c0x0000 (---------------) + I civilwar + 0x003894c7, // n0x143c c0x0000 (---------------) + I clinton + 0x002acc45, // n0x143d c0x0000 (---------------) + I clock + 0x00355244, // n0x143e c0x0000 (---------------) + I coal + 0x00386b8e, // n0x143f c0x0000 (---------------) + I coastaldefence + 0x00323204, // n0x1440 c0x0000 (---------------) + I cody + 0x00231d87, // n0x1441 c0x0000 (---------------) + I coldwar + 0x00265b8a, // n0x1442 c0x0000 (---------------) + I collection + 0x00232454, // n0x1443 c0x0000 (---------------) + I colonialwilliamsburg + 0x00232dcf, // n0x1444 c0x0000 (---------------) + I coloradoplateau + 0x0021e0c8, // n0x1445 c0x0000 (---------------) + I columbia + 0x00233248, // n0x1446 c0x0000 (---------------) + I columbus + 0x0036018d, // n0x1447 c0x0000 (---------------) + I communication + 0x0036018e, // n0x1448 c0x0000 (---------------) + I communications + 0x00233509, // n0x1449 c0x0000 (---------------) + I community + 0x00235488, // n0x144a c0x0000 (---------------) + I computer + 0x0023548f, // n0x144b c0x0000 (---------------) + I computerhistory + 0x0023a3cc, // n0x144c c0x0000 (---------------) + I contemporary + 0x0023a3cf, // n0x144d c0x0000 (---------------) + I contemporaryart + 0x0023b747, // n0x144e c0x0000 (---------------) + I convent + 0x0023de0a, // n0x144f c0x0000 (---------------) + I copenhagen + 0x0021c58b, // n0x1450 c0x0000 (---------------) + I corporation + 0x0023f888, // n0x1451 c0x0000 (---------------) + I corvette + 0x00241807, // n0x1452 c0x0000 (---------------) + I costume + 0x0033658d, // n0x1453 c0x0000 (---------------) + I countryestate + 0x0031ab06, // n0x1454 c0x0000 (---------------) + I county + 0x0039ec86, // n0x1455 c0x0000 (---------------) + I crafts + 0x00243c49, // n0x1456 c0x0000 (---------------) + I cranbrook + 0x00336b48, // n0x1457 c0x0000 (---------------) + I creation + 0x00247888, // n0x1458 c0x0000 (---------------) + I cultural + 0x0024788e, // n0x1459 c0x0000 (---------------) + I culturalcenter + 0x00209d87, // n0x145a c0x0000 (---------------) + I culture + 0x00322345, // n0x145b c0x0000 (---------------) + I cyber + 0x0024a985, // n0x145c c0x0000 (---------------) + I cymru + 0x00210284, // n0x145d c0x0000 (---------------) + I dali + 0x00279e86, // n0x145e c0x0000 (---------------) + I dallas + 0x0022f8c8, // n0x145f c0x0000 (---------------) + I database + 0x002edd03, // n0x1460 c0x0000 (---------------) + I ddr + 0x0025fa0e, // n0x1461 c0x0000 (---------------) + I decorativearts + 0x00336948, // n0x1462 c0x0000 (---------------) + I delaware + 0x0027af0b, // n0x1463 c0x0000 (---------------) + I delmenhorst + 0x003312c7, // n0x1464 c0x0000 (---------------) + I denmark + 0x00274145, // n0x1465 c0x0000 (---------------) + I depot + 0x0022dcc6, // n0x1466 c0x0000 (---------------) + I design + 0x002aa507, // n0x1467 c0x0000 (---------------) + I detroit + 0x002fac88, // n0x1468 c0x0000 (---------------) + I dinosaur + 0x00330409, // n0x1469 c0x0000 (---------------) + I discovery + 0x00237405, // n0x146a c0x0000 (---------------) + I dolls + 0x002861c8, // n0x146b c0x0000 (---------------) + I donostia + 0x0020cc06, // n0x146c c0x0000 (---------------) + I durham + 0x0037574a, // n0x146d c0x0000 (---------------) + I eastafrica + 0x00386a89, // n0x146e c0x0000 (---------------) + I eastcoast + 0x0023a789, // n0x146f c0x0000 (---------------) + I education + 0x0023a78b, // n0x1470 c0x0000 (---------------) + I educational + 0x0028c908, // n0x1471 c0x0000 (---------------) + I egyptian + 0x002ebf49, // n0x1472 c0x0000 (---------------) + I eisenbahn + 0x00210746, // n0x1473 c0x0000 (---------------) + I elburg + 0x002e4f4a, // n0x1474 c0x0000 (---------------) + I elvendrell + 0x0022980a, // n0x1475 c0x0000 (---------------) + I embroidery + 0x0023e00c, // n0x1476 c0x0000 (---------------) + I encyclopedic + 0x00213707, // n0x1477 c0x0000 (---------------) + I england + 0x002cd60a, // n0x1478 c0x0000 (---------------) + I entomology + 0x00326d8b, // n0x1479 c0x0000 (---------------) + I environment + 0x00326d99, // n0x147a c0x0000 (---------------) + I environmentalconservation + 0x00329a48, // n0x147b c0x0000 (---------------) + I epilepsy + 0x00247585, // n0x147c c0x0000 (---------------) + I essex + 0x002c2486, // n0x147d c0x0000 (---------------) + I estate + 0x0030cf09, // n0x147e c0x0000 (---------------) + I ethnology + 0x00205346, // n0x147f c0x0000 (---------------) + I exeter + 0x002115ca, // n0x1480 c0x0000 (---------------) + I exhibition + 0x00208f86, // n0x1481 c0x0000 (---------------) + I family + 0x00271d04, // n0x1482 c0x0000 (---------------) + I farm + 0x002c260d, // n0x1483 c0x0000 (---------------) + I farmequipment + 0x002ece87, // n0x1484 c0x0000 (---------------) + I farmers + 0x00271d09, // n0x1485 c0x0000 (---------------) + I farmstead + 0x00366b05, // n0x1486 c0x0000 (---------------) + I field + 0x0037ac88, // n0x1487 c0x0000 (---------------) + I figueres + 0x0024b549, // n0x1488 c0x0000 (---------------) + I filatelia + 0x0024b784, // n0x1489 c0x0000 (---------------) + I film + 0x0024be87, // n0x148a c0x0000 (---------------) + I fineart + 0x0024be88, // n0x148b c0x0000 (---------------) + I finearts + 0x0024c387, // n0x148c c0x0000 (---------------) + I finland + 0x00267688, // n0x148d c0x0000 (---------------) + I flanders + 0x00252947, // n0x148e c0x0000 (---------------) + I florida + 0x00338705, // n0x148f c0x0000 (---------------) + I force + 0x00259fcc, // n0x1490 c0x0000 (---------------) + I fortmissoula + 0x0025ab89, // n0x1491 c0x0000 (---------------) + I fortworth + 0x002b9f4a, // n0x1492 c0x0000 (---------------) + I foundation + 0x00385d09, // n0x1493 c0x0000 (---------------) + I francaise + 0x00357649, // n0x1494 c0x0000 (---------------) + I frankfurt + 0x00256acc, // n0x1495 c0x0000 (---------------) + I franziskaner + 0x002e7d0b, // n0x1496 c0x0000 (---------------) + I freemasonry + 0x0025c488, // n0x1497 c0x0000 (---------------) + I freiburg + 0x00260048, // n0x1498 c0x0000 (---------------) + I fribourg + 0x002636c4, // n0x1499 c0x0000 (---------------) + I frog + 0x00283c88, // n0x149a c0x0000 (---------------) + I fundacio + 0x00285349, // n0x149b c0x0000 (---------------) + I furniture + 0x00392147, // n0x149c c0x0000 (---------------) + I gallery + 0x002190c6, // n0x149d c0x0000 (---------------) + I garden + 0x00246347, // n0x149e c0x0000 (---------------) + I gateway + 0x00330689, // n0x149f c0x0000 (---------------) + I geelvinck + 0x0021334b, // n0x14a0 c0x0000 (---------------) + I gemological + 0x00396387, // n0x14a1 c0x0000 (---------------) + I geology + 0x00324c47, // n0x14a2 c0x0000 (---------------) + I georgia + 0x00279907, // n0x14a3 c0x0000 (---------------) + I giessen + 0x0039ef44, // n0x14a4 c0x0000 (---------------) + I glas + 0x0039ef45, // n0x14a5 c0x0000 (---------------) + I glass + 0x002a8705, // n0x14a6 c0x0000 (---------------) + I gorge + 0x0033454b, // n0x14a7 c0x0000 (---------------) + I grandrapids + 0x0038f9c4, // n0x14a8 c0x0000 (---------------) + I graz + 0x00266e08, // n0x14a9 c0x0000 (---------------) + I guernsey + 0x0029168a, // n0x14aa c0x0000 (---------------) + I halloffame + 0x0020ccc7, // n0x14ab c0x0000 (---------------) + I hamburg + 0x0031bbc7, // n0x14ac c0x0000 (---------------) + I handson + 0x0028b492, // n0x14ad c0x0000 (---------------) + I harvestcelebration + 0x0025cb86, // n0x14ae c0x0000 (---------------) + I hawaii + 0x0036b386, // n0x14af c0x0000 (---------------) + I health + 0x0030f8ce, // n0x14b0 c0x0000 (---------------) + I heimatunduhren + 0x0025fd86, // n0x14b1 c0x0000 (---------------) + I hellas + 0x0020ebc8, // n0x14b2 c0x0000 (---------------) + I helsinki + 0x00290d0f, // n0x14b3 c0x0000 (---------------) + I hembygdsforbund + 0x0039f388, // n0x14b4 c0x0000 (---------------) + I heritage + 0x0036d908, // n0x14b5 c0x0000 (---------------) + I histoire + 0x002fb8ca, // n0x14b6 c0x0000 (---------------) + I historical + 0x002fb8d1, // n0x14b7 c0x0000 (---------------) + I historicalsociety + 0x002a1f0e, // n0x14b8 c0x0000 (---------------) + I historichouses + 0x002567ca, // n0x14b9 c0x0000 (---------------) + I historisch + 0x002567cc, // n0x14ba c0x0000 (---------------) + I historisches + 0x00235687, // n0x14bb c0x0000 (---------------) + I history + 0x00235690, // n0x14bc c0x0000 (---------------) + I historyofscience + 0x00202188, // n0x14bd c0x0000 (---------------) + I horology + 0x002a2105, // n0x14be c0x0000 (---------------) + I house + 0x002aad4a, // n0x14bf c0x0000 (---------------) + I humanities + 0x00202ecc, // n0x14c0 c0x0000 (---------------) + I illustration + 0x002b44cd, // n0x14c1 c0x0000 (---------------) + I imageandsound + 0x002a3c46, // n0x14c2 c0x0000 (---------------) + I indian + 0x002a3c47, // n0x14c3 c0x0000 (---------------) + I indiana + 0x002a3c4c, // n0x14c4 c0x0000 (---------------) + I indianapolis + 0x002f120c, // n0x14c5 c0x0000 (---------------) + I indianmarket + 0x0024dd4c, // n0x14c6 c0x0000 (---------------) + I intelligence + 0x0028a0cb, // n0x14c7 c0x0000 (---------------) + I interactive + 0x002859c4, // n0x14c8 c0x0000 (---------------) + I iraq + 0x0021d504, // n0x14c9 c0x0000 (---------------) + I iron + 0x0034fb49, // n0x14ca c0x0000 (---------------) + I isleofman + 0x002c8ec7, // n0x14cb c0x0000 (---------------) + I jamison + 0x00266a49, // n0x14cc c0x0000 (---------------) + I jefferson + 0x00283549, // n0x14cd c0x0000 (---------------) + I jerusalem + 0x00360f47, // n0x14ce c0x0000 (---------------) + I jewelry + 0x00391f06, // n0x14cf c0x0000 (---------------) + I jewish + 0x00391f09, // n0x14d0 c0x0000 (---------------) + I jewishart + 0x00399f83, // n0x14d1 c0x0000 (---------------) + I jfk + 0x0033be4a, // n0x14d2 c0x0000 (---------------) + I journalism + 0x00355487, // n0x14d3 c0x0000 (---------------) + I judaica + 0x0027744b, // n0x14d4 c0x0000 (---------------) + I judygarland + 0x0035138a, // n0x14d5 c0x0000 (---------------) + I juedisches + 0x00242844, // n0x14d6 c0x0000 (---------------) + I juif + 0x00353546, // n0x14d7 c0x0000 (---------------) + I karate + 0x0027efc9, // n0x14d8 c0x0000 (---------------) + I karikatur + 0x0028cd44, // n0x14d9 c0x0000 (---------------) + I kids + 0x0020324a, // n0x14da c0x0000 (---------------) + I koebenhavn + 0x0036bd45, // n0x14db c0x0000 (---------------) + I koeln + 0x002b4d85, // n0x14dc c0x0000 (---------------) + I kunst + 0x002b4d8d, // n0x14dd c0x0000 (---------------) + I kunstsammlung + 0x002b50ce, // n0x14de c0x0000 (---------------) + I kunstunddesign + 0x00315585, // n0x14df c0x0000 (---------------) + I labor + 0x0038b2c6, // n0x14e0 c0x0000 (---------------) + I labour + 0x00247107, // n0x14e1 c0x0000 (---------------) + I lajolla + 0x002c990a, // n0x14e2 c0x0000 (---------------) + I lancashire + 0x00323506, // n0x14e3 c0x0000 (---------------) + I landes + 0x00359c44, // n0x14e4 c0x0000 (---------------) + I lans + 0x00359f87, // n0x14e5 c0x0000 (---------------) + I larsson + 0x002def0b, // n0x14e6 c0x0000 (---------------) + I lewismiller + 0x0022e787, // n0x14e7 c0x0000 (---------------) + I lincoln + 0x003a0f44, // n0x14e8 c0x0000 (---------------) + I linz + 0x002414c6, // n0x14e9 c0x0000 (---------------) + I living + 0x002414cd, // n0x14ea c0x0000 (---------------) + I livinghistory + 0x003571cc, // n0x14eb c0x0000 (---------------) + I localhistory + 0x00321906, // n0x14ec c0x0000 (---------------) + I london + 0x0031f68a, // n0x14ed c0x0000 (---------------) + I losangeles + 0x0022b6c6, // n0x14ee c0x0000 (---------------) + I louvre + 0x002a7e88, // n0x14ef c0x0000 (---------------) + I loyalist + 0x002e6147, // n0x14f0 c0x0000 (---------------) + I lucerne + 0x0023ca4a, // n0x14f1 c0x0000 (---------------) + I luxembourg + 0x0023dc86, // n0x14f2 c0x0000 (---------------) + I luzern + 0x00212f43, // n0x14f3 c0x0000 (---------------) + I mad + 0x00317146, // n0x14f4 c0x0000 (---------------) + I madrid + 0x00200188, // n0x14f5 c0x0000 (---------------) + I mallorca + 0x0029a80a, // n0x14f6 c0x0000 (---------------) + I manchester + 0x00251d47, // n0x14f7 c0x0000 (---------------) + I mansion + 0x00251d48, // n0x14f8 c0x0000 (---------------) + I mansions + 0x0026a704, // n0x14f9 c0x0000 (---------------) + I manx + 0x00278f07, // n0x14fa c0x0000 (---------------) + I marburg + 0x00269708, // n0x14fb c0x0000 (---------------) + I maritime + 0x002a3088, // n0x14fc c0x0000 (---------------) + I maritimo + 0x0025cd88, // n0x14fd c0x0000 (---------------) + I maryland + 0x002831ca, // n0x14fe c0x0000 (---------------) + I marylhurst + 0x003025c5, // n0x14ff c0x0000 (---------------) + I media + 0x0023ac87, // n0x1500 c0x0000 (---------------) + I medical + 0x00256613, // n0x1501 c0x0000 (---------------) + I medizinhistorisches + 0x00259146, // n0x1502 c0x0000 (---------------) + I meeres + 0x0026cf88, // n0x1503 c0x0000 (---------------) + I memorial + 0x002221c9, // n0x1504 c0x0000 (---------------) + I mesaverde + 0x002165c8, // n0x1505 c0x0000 (---------------) + I michigan + 0x0036e1cb, // n0x1506 c0x0000 (---------------) + I midatlantic + 0x002b8348, // n0x1507 c0x0000 (---------------) + I military + 0x00285244, // n0x1508 c0x0000 (---------------) + I mill + 0x00321106, // n0x1509 c0x0000 (---------------) + I miners + 0x003a5c46, // n0x150a c0x0000 (---------------) + I mining + 0x003058c9, // n0x150b c0x0000 (---------------) + I minnesota + 0x002bf847, // n0x150c c0x0000 (---------------) + I missile + 0x0025a0c8, // n0x150d c0x0000 (---------------) + I missoula + 0x003a1f86, // n0x150e c0x0000 (---------------) + I modern + 0x0037a084, // n0x150f c0x0000 (---------------) + I moma + 0x002c6d85, // n0x1510 c0x0000 (---------------) + I money + 0x002c1888, // n0x1511 c0x0000 (---------------) + I monmouth + 0x002c1fca, // n0x1512 c0x0000 (---------------) + I monticello + 0x002c2288, // n0x1513 c0x0000 (---------------) + I montreal + 0x002c74c6, // n0x1514 c0x0000 (---------------) + I moscow + 0x0029af0a, // n0x1515 c0x0000 (---------------) + I motorcycle + 0x002e6d88, // n0x1516 c0x0000 (---------------) + I muenchen + 0x002ca188, // n0x1517 c0x0000 (---------------) + I muenster + 0x002cb648, // n0x1518 c0x0000 (---------------) + I mulhouse + 0x002cc046, // n0x1519 c0x0000 (---------------) + I muncie + 0x002cfc06, // n0x151a c0x0000 (---------------) + I museet + 0x002ea80c, // n0x151b c0x0000 (---------------) + I museumcenter + 0x002d0110, // n0x151c c0x0000 (---------------) + I museumvereniging + 0x00283a85, // n0x151d c0x0000 (---------------) + I music + 0x00319548, // n0x151e c0x0000 (---------------) + I national + 0x00319550, // n0x151f c0x0000 (---------------) + I nationalfirearms + 0x0039f190, // n0x1520 c0x0000 (---------------) + I nationalheritage + 0x0027284e, // n0x1521 c0x0000 (---------------) + I nativeamerican + 0x002ea48e, // n0x1522 c0x0000 (---------------) + I naturalhistory + 0x002ea494, // n0x1523 c0x0000 (---------------) + I naturalhistorymuseum + 0x0031ad4f, // n0x1524 c0x0000 (---------------) + I naturalsciences + 0x0031b106, // n0x1525 c0x0000 (---------------) + I nature + 0x00325e11, // n0x1526 c0x0000 (---------------) + I naturhistorisches + 0x00327393, // n0x1527 c0x0000 (---------------) + I natuurwetenschappen + 0x00327808, // n0x1528 c0x0000 (---------------) + I naumburg + 0x0030f105, // n0x1529 c0x0000 (---------------) + I naval + 0x002d7f48, // n0x152a c0x0000 (---------------) + I nebraska + 0x002de045, // n0x152b c0x0000 (---------------) + I neues + 0x0022a34c, // n0x152c c0x0000 (---------------) + I newhampshire + 0x002aeb89, // n0x152d c0x0000 (---------------) + I newjersey + 0x00231bc9, // n0x152e c0x0000 (---------------) + I newmexico + 0x002460c7, // n0x152f c0x0000 (---------------) + I newport + 0x00221dc9, // n0x1530 c0x0000 (---------------) + I newspaper + 0x002ed0c7, // n0x1531 c0x0000 (---------------) + I newyork + 0x002a2646, // n0x1532 c0x0000 (---------------) + I niepce + 0x0037bd47, // n0x1533 c0x0000 (---------------) + I norfolk + 0x00239c45, // n0x1534 c0x0000 (---------------) + I north + 0x002b5e83, // n0x1535 c0x0000 (---------------) + I nrw + 0x002edec9, // n0x1536 c0x0000 (---------------) + I nuernberg + 0x003518c9, // n0x1537 c0x0000 (---------------) + I nuremberg + 0x0036ef83, // n0x1538 c0x0000 (---------------) + I nyc + 0x00215844, // n0x1539 c0x0000 (---------------) + I nyny + 0x0032154d, // n0x153a c0x0000 (---------------) + I oceanographic + 0x00200b0f, // n0x153b c0x0000 (---------------) + I oceanographique + 0x002fc505, // n0x153c c0x0000 (---------------) + I omaha + 0x003175c6, // n0x153d c0x0000 (---------------) + I online + 0x00200987, // n0x153e c0x0000 (---------------) + I ontario + 0x00358c87, // n0x153f c0x0000 (---------------) + I openair + 0x00287ec6, // n0x1540 c0x0000 (---------------) + I oregon + 0x00287ecb, // n0x1541 c0x0000 (---------------) + I oregontrail + 0x002a3605, // n0x1542 c0x0000 (---------------) + I otago + 0x0039bec6, // n0x1543 c0x0000 (---------------) + I oxford + 0x003909c7, // n0x1544 c0x0000 (---------------) + I pacific + 0x0026fec9, // n0x1545 c0x0000 (---------------) + I paderborn + 0x00322046, // n0x1546 c0x0000 (---------------) + I palace + 0x0020ac45, // n0x1547 c0x0000 (---------------) + I paleo + 0x0023a00b, // n0x1548 c0x0000 (---------------) + I palmsprings + 0x0025b986, // n0x1549 c0x0000 (---------------) + I panama + 0x00277905, // n0x154a c0x0000 (---------------) + I paris + 0x002b5648, // n0x154b c0x0000 (---------------) + I pasadena + 0x00375008, // n0x154c c0x0000 (---------------) + I pharmacy + 0x002d30cc, // n0x154d c0x0000 (---------------) + I philadelphia + 0x002d30d0, // n0x154e c0x0000 (---------------) + I philadelphiaarea + 0x002d3789, // n0x154f c0x0000 (---------------) + I philately + 0x002d3bc7, // n0x1550 c0x0000 (---------------) + I phoenix + 0x002d404b, // n0x1551 c0x0000 (---------------) + I photography + 0x002d6506, // n0x1552 c0x0000 (---------------) + I pilots + 0x002d750a, // n0x1553 c0x0000 (---------------) + I pittsburgh + 0x002d898b, // n0x1554 c0x0000 (---------------) + I planetarium + 0x002d8d8a, // n0x1555 c0x0000 (---------------) + I plantation + 0x002d9006, // n0x1556 c0x0000 (---------------) + I plants + 0x002db005, // n0x1557 c0x0000 (---------------) + I plaza + 0x002ec506, // n0x1558 c0x0000 (---------------) + I portal + 0x00279488, // n0x1559 c0x0000 (---------------) + I portland + 0x0024618a, // n0x155a c0x0000 (---------------) + I portlligat + 0x0035fe1c, // n0x155b c0x0000 (---------------) + I posts-and-telecommunications + 0x002e110c, // n0x155c c0x0000 (---------------) + I preservation + 0x002e1408, // n0x155d c0x0000 (---------------) + I presidio + 0x00247505, // n0x155e c0x0000 (---------------) + I press + 0x002e3107, // n0x155f c0x0000 (---------------) + I project + 0x0029f746, // n0x1560 c0x0000 (---------------) + I public + 0x0038dec5, // n0x1561 c0x0000 (---------------) + I pubol + 0x0021b906, // n0x1562 c0x0000 (---------------) + I quebec + 0x00288088, // n0x1563 c0x0000 (---------------) + I railroad + 0x002b3287, // n0x1564 c0x0000 (---------------) + I railway + 0x0029fa08, // n0x1565 c0x0000 (---------------) + I research + 0x002a6d4a, // n0x1566 c0x0000 (---------------) + I resistance + 0x0030864c, // n0x1567 c0x0000 (---------------) + I riodejaneiro + 0x003088c9, // n0x1568 c0x0000 (---------------) + I rochester + 0x0038e207, // n0x1569 c0x0000 (---------------) + I rockart + 0x00254584, // n0x156a c0x0000 (---------------) + I roma + 0x00252f86, // n0x156b c0x0000 (---------------) + I russia + 0x0036d48a, // n0x156c c0x0000 (---------------) + I saintlouis + 0x00283645, // n0x156d c0x0000 (---------------) + I salem + 0x0034504c, // n0x156e c0x0000 (---------------) + I salvadordali + 0x00345cc8, // n0x156f c0x0000 (---------------) + I salzburg + 0x0034a848, // n0x1570 c0x0000 (---------------) + I sandiego + 0x002004cc, // n0x1571 c0x0000 (---------------) + I sanfrancisco + 0x00210c8c, // n0x1572 c0x0000 (---------------) + I santabarbara + 0x00211189, // n0x1573 c0x0000 (---------------) + I santacruz + 0x002113c7, // n0x1574 c0x0000 (---------------) + I santafe + 0x0033d48c, // n0x1575 c0x0000 (---------------) + I saskatchewan + 0x003897c4, // n0x1576 c0x0000 (---------------) + I satx + 0x00232b4a, // n0x1577 c0x0000 (---------------) + I savannahga + 0x0028d04c, // n0x1578 c0x0000 (---------------) + I schlesisches + 0x0027104b, // n0x1579 c0x0000 (---------------) + I schoenbrunn + 0x0023758b, // n0x157a c0x0000 (---------------) + I schokoladen + 0x0023d0c6, // n0x157b c0x0000 (---------------) + I school + 0x00243147, // n0x157c c0x0000 (---------------) + I schweiz + 0x002358c7, // n0x157d c0x0000 (---------------) + I science + 0x002358cf, // n0x157e c0x0000 (---------------) + I science-fiction + 0x002f1b51, // n0x157f c0x0000 (---------------) + I scienceandhistory + 0x0039e252, // n0x1580 c0x0000 (---------------) + I scienceandindustry + 0x0024410d, // n0x1581 c0x0000 (---------------) + I sciencecenter + 0x0024410e, // n0x1582 c0x0000 (---------------) + I sciencecenters + 0x0024444e, // n0x1583 c0x0000 (---------------) + I sciencehistory + 0x0031af08, // n0x1584 c0x0000 (---------------) + I sciences + 0x0031af12, // n0x1585 c0x0000 (---------------) + I sciencesnaturelles + 0x00200708, // n0x1586 c0x0000 (---------------) + I scotland + 0x002fa9c7, // n0x1587 c0x0000 (---------------) + I seaport + 0x0024f98a, // n0x1588 c0x0000 (---------------) + I settlement + 0x00219c08, // n0x1589 c0x0000 (---------------) + I settlers + 0x0025fd45, // n0x158a c0x0000 (---------------) + I shell + 0x0035cc4a, // n0x158b c0x0000 (---------------) + I sherbrooke + 0x0021d987, // n0x158c c0x0000 (---------------) + I sibenik + 0x00341f84, // n0x158d c0x0000 (---------------) + I silk + 0x00209743, // n0x158e c0x0000 (---------------) + I ski + 0x00296cc5, // n0x158f c0x0000 (---------------) + I skole + 0x002fbb47, // n0x1590 c0x0000 (---------------) + I society + 0x002f9607, // n0x1591 c0x0000 (---------------) + I sologne + 0x002b46ce, // n0x1592 c0x0000 (---------------) + I soundandvision + 0x0032bd4d, // n0x1593 c0x0000 (---------------) + I southcarolina + 0x0032e849, // n0x1594 c0x0000 (---------------) + I southwest + 0x0020bb45, // n0x1595 c0x0000 (---------------) + I space + 0x003347c3, // n0x1596 c0x0000 (---------------) + I spy + 0x0027a346, // n0x1597 c0x0000 (---------------) + I square + 0x003643c5, // n0x1598 c0x0000 (---------------) + I stadt + 0x0027b148, // n0x1599 c0x0000 (---------------) + I stalbans + 0x00323f89, // n0x159a c0x0000 (---------------) + I starnberg + 0x0020f205, // n0x159b c0x0000 (---------------) + I state + 0x0033678f, // n0x159c c0x0000 (---------------) + I stateofdelaware + 0x00254e87, // n0x159d c0x0000 (---------------) + I station + 0x003674c5, // n0x159e c0x0000 (---------------) + I steam + 0x00227c0a, // n0x159f c0x0000 (---------------) + I steiermark + 0x00303b86, // n0x15a0 c0x0000 (---------------) + I stjohn + 0x002a8009, // n0x15a1 c0x0000 (---------------) + I stockholm + 0x002e71cc, // n0x15a2 c0x0000 (---------------) + I stpetersburg + 0x002e8989, // n0x15a3 c0x0000 (---------------) + I stuttgart + 0x00206706, // n0x15a4 c0x0000 (---------------) + I suisse + 0x0029148c, // n0x15a5 c0x0000 (---------------) + I surgeonshall + 0x002e91c6, // n0x15a6 c0x0000 (---------------) + I surrey + 0x002f0688, // n0x15a7 c0x0000 (---------------) + I svizzera + 0x002f0886, // n0x15a8 c0x0000 (---------------) + I sweden + 0x00329bc6, // n0x15a9 c0x0000 (---------------) + I sydney + 0x002294c4, // n0x15aa c0x0000 (---------------) + I tank + 0x0025d103, // n0x15ab c0x0000 (---------------) + I tcm + 0x002d608a, // n0x15ac c0x0000 (---------------) + I technology + 0x0031d2d1, // n0x15ad c0x0000 (---------------) + I telekommunikation + 0x002b5c4a, // n0x15ae c0x0000 (---------------) + I television + 0x0034d1c5, // n0x15af c0x0000 (---------------) + I texas + 0x003844c7, // n0x15b0 c0x0000 (---------------) + I textile + 0x002573c7, // n0x15b1 c0x0000 (---------------) + I theater + 0x00269804, // n0x15b2 c0x0000 (---------------) + I time + 0x0026980b, // n0x15b3 c0x0000 (---------------) + I timekeeping + 0x00208088, // n0x15b4 c0x0000 (---------------) + I topology + 0x002b1946, // n0x15b5 c0x0000 (---------------) + I torino + 0x00311cc5, // n0x15b6 c0x0000 (---------------) + I touch + 0x002dc244, // n0x15b7 c0x0000 (---------------) + I town + 0x00294809, // n0x15b8 c0x0000 (---------------) + I transport + 0x00355f84, // n0x15b9 c0x0000 (---------------) + I tree + 0x00359147, // n0x15ba c0x0000 (---------------) + I trolley + 0x00329245, // n0x15bb c0x0000 (---------------) + I trust + 0x00329247, // n0x15bc c0x0000 (---------------) + I trustee + 0x0030fb05, // n0x15bd c0x0000 (---------------) + I uhren + 0x00253643, // n0x15be c0x0000 (---------------) + I ulm + 0x002fa888, // n0x15bf c0x0000 (---------------) + I undersea + 0x00320a0a, // n0x15c0 c0x0000 (---------------) + I university + 0x0022b983, // n0x15c1 c0x0000 (---------------) + I usa + 0x0022b98a, // n0x15c2 c0x0000 (---------------) + I usantiques + 0x0028db46, // n0x15c3 c0x0000 (---------------) + I usarts + 0x0033650f, // n0x15c4 c0x0000 (---------------) + I uscountryestate + 0x00337d89, // n0x15c5 c0x0000 (---------------) + I usculture + 0x0025f990, // n0x15c6 c0x0000 (---------------) + I usdecorativearts + 0x0026d6c8, // n0x15c7 c0x0000 (---------------) + I usgarden + 0x002c7d49, // n0x15c8 c0x0000 (---------------) + I ushistory + 0x0029ca87, // n0x15c9 c0x0000 (---------------) + I ushuaia + 0x0024144f, // n0x15ca c0x0000 (---------------) + I uslivinghistory + 0x002e84c4, // n0x15cb c0x0000 (---------------) + I utah + 0x0033d844, // n0x15cc c0x0000 (---------------) + I uvic + 0x00217106, // n0x15cd c0x0000 (---------------) + I valley + 0x00236b46, // n0x15ce c0x0000 (---------------) + I vantaa + 0x0031504a, // n0x15cf c0x0000 (---------------) + I versailles + 0x00311a06, // n0x15d0 c0x0000 (---------------) + I viking + 0x002f8f47, // n0x15d1 c0x0000 (---------------) + I village + 0x002f7e88, // n0x15d2 c0x0000 (---------------) + I virginia + 0x002f8087, // n0x15d3 c0x0000 (---------------) + I virtual + 0x002f8247, // n0x15d4 c0x0000 (---------------) + I virtuel + 0x0034710a, // n0x15d5 c0x0000 (---------------) + I vlaanderen + 0x002fa6cb, // n0x15d6 c0x0000 (---------------) + I volkenkunde + 0x00320685, // n0x15d7 c0x0000 (---------------) + I wales + 0x0039de88, // n0x15d8 c0x0000 (---------------) + I wallonie + 0x00203903, // n0x15d9 c0x0000 (---------------) + I war + 0x0023efcc, // n0x15da c0x0000 (---------------) + I washingtondc + 0x00376c4f, // n0x15db c0x0000 (---------------) + I watch-and-clock + 0x002aca4d, // n0x15dc c0x0000 (---------------) + I watchandclock + 0x0023d987, // n0x15dd c0x0000 (---------------) + I western + 0x0032e989, // n0x15de c0x0000 (---------------) + I westfalen + 0x002b5f07, // n0x15df c0x0000 (---------------) + I whaling + 0x00253bc8, // n0x15e0 c0x0000 (---------------) + I wildlife + 0x0023264c, // n0x15e1 c0x0000 (---------------) + I williamsburg + 0x00285148, // n0x15e2 c0x0000 (---------------) + I windmill + 0x00351f08, // n0x15e3 c0x0000 (---------------) + I workshop + 0x0030cb8e, // n0x15e4 c0x0000 (---------------) + I xn--9dbhblg6di + 0x0031d714, // n0x15e5 c0x0000 (---------------) + I xn--comunicaes-v6a2o + 0x0031dc24, // n0x15e6 c0x0000 (---------------) + I xn--correios-e-telecomunicaes-ghc29a + 0x0033b8ca, // n0x15e7 c0x0000 (---------------) + I xn--h1aegh + 0x0035a14b, // n0x15e8 c0x0000 (---------------) + I xn--lns-qla + 0x002ed184, // n0x15e9 c0x0000 (---------------) + I york + 0x002ed189, // n0x15ea c0x0000 (---------------) + I yorkshire + 0x002aab48, // n0x15eb c0x0000 (---------------) + I yosemite + 0x0024b0c5, // n0x15ec c0x0000 (---------------) + I youth + 0x002ed80a, // n0x15ed c0x0000 (---------------) + I zoological + 0x0027a507, // n0x15ee c0x0000 (---------------) + I zoology + 0x002389c4, // n0x15ef c0x0000 (---------------) + I aero + 0x00330b83, // n0x15f0 c0x0000 (---------------) + I biz + 0x00233503, // n0x15f1 c0x0000 (---------------) + I com + 0x0023d684, // n0x15f2 c0x0000 (---------------) + I coop + 0x0023a783, // n0x15f3 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x15f4 c0x0000 (---------------) + I gov + 0x003a1244, // n0x15f5 c0x0000 (---------------) + I info + 0x00201603, // n0x15f6 c0x0000 (---------------) + I int + 0x00209003, // n0x15f7 c0x0000 (---------------) + I mil + 0x002d0106, // n0x15f8 c0x0000 (---------------) + I museum + 0x00205284, // n0x15f9 c0x0000 (---------------) + I name + 0x0021fe03, // n0x15fa c0x0000 (---------------) + I net + 0x0022d1c3, // n0x15fb c0x0000 (---------------) + I org + 0x00220e43, // n0x15fc c0x0000 (---------------) + I pro + 0x00201542, // n0x15fd c0x0000 (---------------) + I ac + 0x00330b83, // n0x15fe c0x0000 (---------------) + I biz + 0x00200742, // n0x15ff c0x0000 (---------------) + I co + 0x00233503, // n0x1600 c0x0000 (---------------) + I com + 0x0023d684, // n0x1601 c0x0000 (---------------) + I coop + 0x0023a783, // n0x1602 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1603 c0x0000 (---------------) + I gov + 0x00201603, // n0x1604 c0x0000 (---------------) + I int + 0x002d0106, // n0x1605 c0x0000 (---------------) + I museum + 0x0021fe03, // n0x1606 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1607 c0x0000 (---------------) + I org + 0x000ffa08, // n0x1608 c0x0000 (---------------) + blogspot + 0x00233503, // n0x1609 c0x0000 (---------------) + I com + 0x0023a783, // n0x160a c0x0000 (---------------) + I edu + 0x00213183, // n0x160b c0x0000 (---------------) + I gob + 0x0021fe03, // n0x160c c0x0000 (---------------) + I net + 0x0022d1c3, // n0x160d c0x0000 (---------------) + I org + 0x000ffa08, // n0x160e c0x0000 (---------------) + blogspot + 0x00233503, // n0x160f c0x0000 (---------------) + I com + 0x0023a783, // n0x1610 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1611 c0x0000 (---------------) + I gov + 0x00209003, // n0x1612 c0x0000 (---------------) + I mil + 0x00205284, // n0x1613 c0x0000 (---------------) + I name + 0x0021fe03, // n0x1614 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1615 c0x0000 (---------------) + I org + 0x0062f7c8, // n0x1616 c0x0001 (---------------) ! I teledata + 0x00200302, // n0x1617 c0x0000 (---------------) + I ca + 0x0022e182, // n0x1618 c0x0000 (---------------) + I cc + 0x00200742, // n0x1619 c0x0000 (---------------) + I co + 0x00233503, // n0x161a c0x0000 (---------------) + I com + 0x0022bf42, // n0x161b c0x0000 (---------------) + I dr + 0x002013c2, // n0x161c c0x0000 (---------------) + I in + 0x003a1244, // n0x161d c0x0000 (---------------) + I info + 0x00207104, // n0x161e c0x0000 (---------------) + I mobi + 0x0021bb02, // n0x161f c0x0000 (---------------) + I mx + 0x00205284, // n0x1620 c0x0000 (---------------) + I name + 0x00200282, // n0x1621 c0x0000 (---------------) + I or + 0x0022d1c3, // n0x1622 c0x0000 (---------------) + I org + 0x00220e43, // n0x1623 c0x0000 (---------------) + I pro + 0x0023d0c6, // n0x1624 c0x0000 (---------------) + I school + 0x00224e42, // n0x1625 c0x0000 (---------------) + I tv + 0x00202382, // n0x1626 c0x0000 (---------------) + I us + 0x0020b942, // n0x1627 c0x0000 (---------------) + I ws + 0x38e22103, // n0x1628 c0x00e3 (n0x162a-n0x162b) o I her + 0x39218ac3, // n0x1629 c0x00e4 (n0x162b-n0x162c) o I his + 0x00057c46, // n0x162a c0x0000 (---------------) + forgot + 0x00057c46, // n0x162b c0x0000 (---------------) + forgot + 0x002d4884, // n0x162c c0x0000 (---------------) + I asso + 0x00116c0c, // n0x162d c0x0000 (---------------) + at-band-camp + 0x0001be0c, // n0x162e c0x0000 (---------------) + azure-mobile + 0x00080ecd, // n0x162f c0x0000 (---------------) + azurewebsites + 0x000fb147, // n0x1630 c0x0000 (---------------) + blogdns + 0x00020208, // n0x1631 c0x0000 (---------------) + broke-it + 0x0019898a, // n0x1632 c0x0000 (---------------) + buyshouses + 0x39e3e2c5, // n0x1633 c0x00e7 (n0x1663-n0x1664) o I cdn77 + 0x0003e2c9, // n0x1634 c0x0000 (---------------) + cdn77-ssl + 0x00009188, // n0x1635 c0x0000 (---------------) + cloudapp + 0x0019d4ca, // n0x1636 c0x0000 (---------------) + cloudfront + 0x0003118e, // n0x1637 c0x0000 (---------------) + cloudfunctions + 0x0014c048, // n0x1638 c0x0000 (---------------) + dnsalias + 0x0007c9c7, // n0x1639 c0x0000 (---------------) + dnsdojo + 0x00160607, // n0x163a c0x0000 (---------------) + does-it + 0x0016a009, // n0x163b c0x0000 (---------------) + dontexist + 0x0008cdc7, // n0x163c c0x0000 (---------------) + dsmynas + 0x00197b48, // n0x163d c0x0000 (---------------) + dynalias + 0x000e9d09, // n0x163e c0x0000 (---------------) + dynathome + 0x001a46c5, // n0x163f c0x0000 (---------------) + dynv6 + 0x000a950d, // n0x1640 c0x0000 (---------------) + endofinternet + 0x00008f88, // n0x1641 c0x0000 (---------------) + familyds + 0x3a24c206, // n0x1642 c0x00e8 (n0x1664-n0x1666) o I fastly + 0x00064447, // n0x1643 c0x0000 (---------------) + from-az + 0x00065a47, // n0x1644 c0x0000 (---------------) + from-co + 0x0006a1c7, // n0x1645 c0x0000 (---------------) + from-la + 0x0006f3c7, // n0x1646 c0x0000 (---------------) + from-ny + 0x0000d202, // n0x1647 c0x0000 (---------------) + gb + 0x00157907, // n0x1648 c0x0000 (---------------) + gets-it + 0x00064a8c, // n0x1649 c0x0000 (---------------) + ham-radio-op + 0x00146347, // n0x164a c0x0000 (---------------) + homeftp + 0x000a51c6, // n0x164b c0x0000 (---------------) + homeip + 0x000a59c9, // n0x164c c0x0000 (---------------) + homelinux + 0x000a6fc8, // n0x164d c0x0000 (---------------) + homeunix + 0x000195c2, // n0x164e c0x0000 (---------------) + hu + 0x000013c2, // n0x164f c0x0000 (---------------) + in + 0x00007b0b, // n0x1650 c0x0000 (---------------) + in-the-band + 0x00012789, // n0x1651 c0x0000 (---------------) + is-a-chef + 0x0004e989, // n0x1652 c0x0000 (---------------) + is-a-geek + 0x0008e588, // n0x1653 c0x0000 (---------------) + isa-geek + 0x000ae3c2, // n0x1654 c0x0000 (---------------) + jp + 0x00150c09, // n0x1655 c0x0000 (---------------) + kicks-ass + 0x0002168d, // n0x1656 c0x0000 (---------------) + office-on-the + 0x000dcd47, // n0x1657 c0x0000 (---------------) + podzone + 0x000ecb08, // n0x1658 c0x0000 (---------------) + rackmaze + 0x001376cd, // n0x1659 c0x0000 (---------------) + scrapper-site + 0x000046c2, // n0x165a c0x0000 (---------------) + se + 0x0006ba86, // n0x165b c0x0000 (---------------) + selfip + 0x00090208, // n0x165c c0x0000 (---------------) + sells-it + 0x000cb7c8, // n0x165d c0x0000 (---------------) + servebbs + 0x000895c8, // n0x165e c0x0000 (---------------) + serveftp + 0x00054088, // n0x165f c0x0000 (---------------) + thruhere + 0x00000f82, // n0x1660 c0x0000 (---------------) + uk + 0x000eadc6, // n0x1661 c0x0000 (---------------) + webhop + 0x00005f82, // n0x1662 c0x0000 (---------------) + za + 0x000002c1, // n0x1663 c0x0000 (---------------) + r + 0x3a6e2044, // n0x1664 c0x00e9 (n0x1666-n0x1668) o I prod + 0x3aa3e443, // n0x1665 c0x00ea (n0x1668-n0x166b) o I ssl + 0x00000101, // n0x1666 c0x0000 (---------------) + a + 0x0000d846, // n0x1667 c0x0000 (---------------) + global + 0x00000101, // n0x1668 c0x0000 (---------------) + a + 0x00000001, // n0x1669 c0x0000 (---------------) + b + 0x0000d846, // n0x166a c0x0000 (---------------) + global + 0x0024bf84, // n0x166b c0x0000 (---------------) + I arts + 0x00233503, // n0x166c c0x0000 (---------------) + I com + 0x0024d9c4, // n0x166d c0x0000 (---------------) + I firm + 0x003a1244, // n0x166e c0x0000 (---------------) + I info + 0x0021fe03, // n0x166f c0x0000 (---------------) + I net + 0x00222085, // n0x1670 c0x0000 (---------------) + I other + 0x00220f03, // n0x1671 c0x0000 (---------------) + I per + 0x0022a5c3, // n0x1672 c0x0000 (---------------) + I rec + 0x00391185, // n0x1673 c0x0000 (---------------) + I store + 0x00221a03, // n0x1674 c0x0000 (---------------) + I web + 0x3b633503, // n0x1675 c0x00ed (n0x167f-n0x1680) + I com + 0x0023a783, // n0x1676 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1677 c0x0000 (---------------) + I gov + 0x00200041, // n0x1678 c0x0000 (---------------) + I i + 0x00209003, // n0x1679 c0x0000 (---------------) + I mil + 0x00207104, // n0x167a c0x0000 (---------------) + I mobi + 0x00205284, // n0x167b c0x0000 (---------------) + I name + 0x0021fe03, // n0x167c c0x0000 (---------------) + I net + 0x0022d1c3, // n0x167d c0x0000 (---------------) + I org + 0x00217443, // n0x167e c0x0000 (---------------) + I sch + 0x000ffa08, // n0x167f c0x0000 (---------------) + blogspot + 0x00201542, // n0x1680 c0x0000 (---------------) + I ac + 0x00330b83, // n0x1681 c0x0000 (---------------) + I biz + 0x00200742, // n0x1682 c0x0000 (---------------) + I co + 0x00233503, // n0x1683 c0x0000 (---------------) + I com + 0x0023a783, // n0x1684 c0x0000 (---------------) + I edu + 0x00213183, // n0x1685 c0x0000 (---------------) + I gob + 0x002013c2, // n0x1686 c0x0000 (---------------) + I in + 0x003a1244, // n0x1687 c0x0000 (---------------) + I info + 0x00201603, // n0x1688 c0x0000 (---------------) + I int + 0x00209003, // n0x1689 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x168a c0x0000 (---------------) + I net + 0x00201483, // n0x168b c0x0000 (---------------) + I nom + 0x0022d1c3, // n0x168c c0x0000 (---------------) + I org + 0x00221a03, // n0x168d c0x0000 (---------------) + I web + 0x000ffa08, // n0x168e c0x0000 (---------------) + blogspot + 0x00365782, // n0x168f c0x0000 (---------------) + I bv + 0x00000742, // n0x1690 c0x0000 (---------------) + co + 0x3c622f02, // n0x1691 c0x00f1 (n0x1967-n0x1968) + I aa + 0x00355648, // n0x1692 c0x0000 (---------------) + I aarborte + 0x00223386, // n0x1693 c0x0000 (---------------) + I aejrie + 0x002bb486, // n0x1694 c0x0000 (---------------) + I afjord + 0x00222d07, // n0x1695 c0x0000 (---------------) + I agdenes + 0x3ca04f02, // n0x1696 c0x00f2 (n0x1968-n0x1969) + I ah + 0x3cf7c888, // n0x1697 c0x00f3 (n0x1969-n0x196a) o I akershus + 0x00354a4a, // n0x1698 c0x0000 (---------------) + I aknoluokta + 0x00263ec8, // n0x1699 c0x0000 (---------------) + I akrehamn + 0x002001c2, // n0x169a c0x0000 (---------------) + I al + 0x003552c9, // n0x169b c0x0000 (---------------) + I alaheadju + 0x003206c7, // n0x169c c0x0000 (---------------) + I alesund + 0x00219046, // n0x169d c0x0000 (---------------) + I algard + 0x00204e09, // n0x169e c0x0000 (---------------) + I alstahaug + 0x0023adc4, // n0x169f c0x0000 (---------------) + I alta + 0x002bbf06, // n0x16a0 c0x0000 (---------------) + I alvdal + 0x002bb884, // n0x16a1 c0x0000 (---------------) + I amli + 0x00278604, // n0x16a2 c0x0000 (---------------) + I amot + 0x00259c09, // n0x16a3 c0x0000 (---------------) + I andasuolo + 0x0022c086, // n0x16a4 c0x0000 (---------------) + I andebu + 0x00259645, // n0x16a5 c0x0000 (---------------) + I andoy + 0x00267dc5, // n0x16a6 c0x0000 (---------------) + I ardal + 0x00234b07, // n0x16a7 c0x0000 (---------------) + I aremark + 0x002b87c7, // n0x16a8 c0x0000 (---------------) + I arendal + 0x0032fec4, // n0x16a9 c0x0000 (---------------) + I arna + 0x00222f46, // n0x16aa c0x0000 (---------------) + I aseral + 0x002e2b85, // n0x16ab c0x0000 (---------------) + I asker + 0x00230345, // n0x16ac c0x0000 (---------------) + I askim + 0x002f58c5, // n0x16ad c0x0000 (---------------) + I askoy + 0x00389107, // n0x16ae c0x0000 (---------------) + I askvoll + 0x00331105, // n0x16af c0x0000 (---------------) + I asnes + 0x00309f89, // n0x16b0 c0x0000 (---------------) + I audnedaln + 0x00255485, // n0x16b1 c0x0000 (---------------) + I aukra + 0x002fadc4, // n0x16b2 c0x0000 (---------------) + I aure + 0x00323447, // n0x16b3 c0x0000 (---------------) + I aurland + 0x0025de0e, // n0x16b4 c0x0000 (---------------) + I aurskog-holand + 0x00272489, // n0x16b5 c0x0000 (---------------) + I austevoll + 0x0030f789, // n0x16b6 c0x0000 (---------------) + I austrheim + 0x00326bc6, // n0x16b7 c0x0000 (---------------) + I averoy + 0x002f2388, // n0x16b8 c0x0000 (---------------) + I badaddja + 0x002ade0b, // n0x16b9 c0x0000 (---------------) + I bahcavuotna + 0x002d13cc, // n0x16ba c0x0000 (---------------) + I bahccavuotna + 0x0026a446, // n0x16bb c0x0000 (---------------) + I baidar + 0x00372147, // n0x16bc c0x0000 (---------------) + I bajddar + 0x0025ef05, // n0x16bd c0x0000 (---------------) + I balat + 0x00228a8a, // n0x16be c0x0000 (---------------) + I balestrand + 0x00306f89, // n0x16bf c0x0000 (---------------) + I ballangen + 0x00254b89, // n0x16c0 c0x0000 (---------------) + I balsfjord + 0x00271906, // n0x16c1 c0x0000 (---------------) + I bamble + 0x002ef785, // n0x16c2 c0x0000 (---------------) + I bardu + 0x0027f705, // n0x16c3 c0x0000 (---------------) + I barum + 0x00354549, // n0x16c4 c0x0000 (---------------) + I batsfjord + 0x002f5a4b, // n0x16c5 c0x0000 (---------------) + I bearalvahki + 0x0027c686, // n0x16c6 c0x0000 (---------------) + I beardu + 0x0032e046, // n0x16c7 c0x0000 (---------------) + I beiarn + 0x0020d144, // n0x16c8 c0x0000 (---------------) + I berg + 0x0028b146, // n0x16c9 c0x0000 (---------------) + I bergen + 0x003223c8, // n0x16ca c0x0000 (---------------) + I berlevag + 0x00200006, // n0x16cb c0x0000 (---------------) + I bievat + 0x00398d86, // n0x16cc c0x0000 (---------------) + I bindal + 0x002054c8, // n0x16cd c0x0000 (---------------) + I birkenes + 0x00206507, // n0x16ce c0x0000 (---------------) + I bjarkoy + 0x00206ec9, // n0x16cf c0x0000 (---------------) + I bjerkreim + 0x0020c285, // n0x16d0 c0x0000 (---------------) + I bjugn + 0x000ffa08, // n0x16d1 c0x0000 (---------------) + blogspot + 0x00213204, // n0x16d2 c0x0000 (---------------) + I bodo + 0x003a4004, // n0x16d3 c0x0000 (---------------) + I bokn + 0x00210ac5, // n0x16d4 c0x0000 (---------------) + I bomlo + 0x0038c589, // n0x16d5 c0x0000 (---------------) + I bremanger + 0x00221247, // n0x16d6 c0x0000 (---------------) + I bronnoy + 0x0022124b, // n0x16d7 c0x0000 (---------------) + I bronnoysund + 0x0022260a, // n0x16d8 c0x0000 (---------------) + I brumunddal + 0x00228785, // n0x16d9 c0x0000 (---------------) + I bryne + 0x3d208642, // n0x16da c0x00f4 (n0x196a-n0x196b) + I bu + 0x00351247, // n0x16db c0x0000 (---------------) + I budejju + 0x3d62a108, // n0x16dc c0x00f5 (n0x196b-n0x196c) o I buskerud + 0x002b8007, // n0x16dd c0x0000 (---------------) + I bygland + 0x0022c3c5, // n0x16de c0x0000 (---------------) + I bykle + 0x00356fca, // n0x16df c0x0000 (---------------) + I cahcesuolo + 0x00000742, // n0x16e0 c0x0000 (---------------) + co + 0x002b70cb, // n0x16e1 c0x0000 (---------------) + I davvenjarga + 0x0021490a, // n0x16e2 c0x0000 (---------------) + I davvesiida + 0x0039c006, // n0x16e3 c0x0000 (---------------) + I deatnu + 0x00274143, // n0x16e4 c0x0000 (---------------) + I dep + 0x002381cd, // n0x16e5 c0x0000 (---------------) + I dielddanuorri + 0x00271f0c, // n0x16e6 c0x0000 (---------------) + I divtasvuodna + 0x003079cd, // n0x16e7 c0x0000 (---------------) + I divttasvuotna + 0x00365c05, // n0x16e8 c0x0000 (---------------) + I donna + 0x00243885, // n0x16e9 c0x0000 (---------------) + I dovre + 0x002edd47, // n0x16ea c0x0000 (---------------) + I drammen + 0x003458c9, // n0x16eb c0x0000 (---------------) + I drangedal + 0x00354946, // n0x16ec c0x0000 (---------------) + I drobak + 0x00311885, // n0x16ed c0x0000 (---------------) + I dyroy + 0x0022e2c8, // n0x16ee c0x0000 (---------------) + I egersund + 0x0028b043, // n0x16ef c0x0000 (---------------) + I eid + 0x003116c8, // n0x16f0 c0x0000 (---------------) + I eidfjord + 0x0028b048, // n0x16f1 c0x0000 (---------------) + I eidsberg + 0x002bce07, // n0x16f2 c0x0000 (---------------) + I eidskog + 0x002d8248, // n0x16f3 c0x0000 (---------------) + I eidsvoll + 0x003a6609, // n0x16f4 c0x0000 (---------------) + I eigersund + 0x0023c547, // n0x16f5 c0x0000 (---------------) + I elverum + 0x00209807, // n0x16f6 c0x0000 (---------------) + I enebakk + 0x00279a48, // n0x16f7 c0x0000 (---------------) + I engerdal + 0x002fecc4, // n0x16f8 c0x0000 (---------------) + I etne + 0x002fecc7, // n0x16f9 c0x0000 (---------------) + I etnedal + 0x00251f48, // n0x16fa c0x0000 (---------------) + I evenassi + 0x00202b46, // n0x16fb c0x0000 (---------------) + I evenes + 0x003a03cf, // n0x16fc c0x0000 (---------------) + I evje-og-hornnes + 0x00212987, // n0x16fd c0x0000 (---------------) + I farsund + 0x0024cc06, // n0x16fe c0x0000 (---------------) + I fauske + 0x002d77c5, // n0x16ff c0x0000 (---------------) + I fedje + 0x002159c3, // n0x1700 c0x0000 (---------------) + I fet + 0x0034b7c7, // n0x1701 c0x0000 (---------------) + I fetsund + 0x0023f343, // n0x1702 c0x0000 (---------------) + I fhs + 0x0024c546, // n0x1703 c0x0000 (---------------) + I finnoy + 0x0024f086, // n0x1704 c0x0000 (---------------) + I fitjar + 0x00250506, // n0x1705 c0x0000 (---------------) + I fjaler + 0x0028f845, // n0x1706 c0x0000 (---------------) + I fjell + 0x00267683, // n0x1707 c0x0000 (---------------) + I fla + 0x00380648, // n0x1708 c0x0000 (---------------) + I flakstad + 0x0036ce89, // n0x1709 c0x0000 (---------------) + I flatanger + 0x00364c8b, // n0x170a c0x0000 (---------------) + I flekkefjord + 0x00377308, // n0x170b c0x0000 (---------------) + I flesberg + 0x00252605, // n0x170c c0x0000 (---------------) + I flora + 0x00253105, // n0x170d c0x0000 (---------------) + I floro + 0x3da42902, // n0x170e c0x00f6 (n0x196c-n0x196d) + I fm + 0x0037be09, // n0x170f c0x0000 (---------------) + I folkebibl + 0x002558c7, // n0x1710 c0x0000 (---------------) + I folldal + 0x0039bf45, // n0x1711 c0x0000 (---------------) + I forde + 0x00259b07, // n0x1712 c0x0000 (---------------) + I forsand + 0x0025b746, // n0x1713 c0x0000 (---------------) + I fosnes + 0x0035dfc5, // n0x1714 c0x0000 (---------------) + I frana + 0x0036420b, // n0x1715 c0x0000 (---------------) + I fredrikstad + 0x0025c484, // n0x1716 c0x0000 (---------------) + I frei + 0x00263a85, // n0x1717 c0x0000 (---------------) + I frogn + 0x00263bc7, // n0x1718 c0x0000 (---------------) + I froland + 0x00276a06, // n0x1719 c0x0000 (---------------) + I frosta + 0x00276e45, // n0x171a c0x0000 (---------------) + I froya + 0x00283e87, // n0x171b c0x0000 (---------------) + I fuoisku + 0x00284ec7, // n0x171c c0x0000 (---------------) + I fuossko + 0x0028db04, // n0x171d c0x0000 (---------------) + I fusa + 0x00288cca, // n0x171e c0x0000 (---------------) + I fylkesbibl + 0x00289188, // n0x171f c0x0000 (---------------) + I fyresdal + 0x002cdcc9, // n0x1720 c0x0000 (---------------) + I gaivuotna + 0x0021dd45, // n0x1721 c0x0000 (---------------) + I galsa + 0x002b7306, // n0x1722 c0x0000 (---------------) + I gamvik + 0x0032258a, // n0x1723 c0x0000 (---------------) + I gangaviika + 0x00267cc6, // n0x1724 c0x0000 (---------------) + I gaular + 0x002b6187, // n0x1725 c0x0000 (---------------) + I gausdal + 0x002cd98d, // n0x1726 c0x0000 (---------------) + I giehtavuoatna + 0x00226cc9, // n0x1727 c0x0000 (---------------) + I gildeskal + 0x00328b05, // n0x1728 c0x0000 (---------------) + I giske + 0x00310047, // n0x1729 c0x0000 (---------------) + I gjemnes + 0x003279c8, // n0x172a c0x0000 (---------------) + I gjerdrum + 0x0032f588, // n0x172b c0x0000 (---------------) + I gjerstad + 0x00249607, // n0x172c c0x0000 (---------------) + I gjesdal + 0x00249e46, // n0x172d c0x0000 (---------------) + I gjovik + 0x00212b87, // n0x172e c0x0000 (---------------) + I gloppen + 0x0024dbc3, // n0x172f c0x0000 (---------------) + I gol + 0x00334544, // n0x1730 c0x0000 (---------------) + I gran + 0x0035bc85, // n0x1731 c0x0000 (---------------) + I grane + 0x00384e47, // n0x1732 c0x0000 (---------------) + I granvin + 0x00388b09, // n0x1733 c0x0000 (---------------) + I gratangen + 0x0022d248, // n0x1734 c0x0000 (---------------) + I grimstad + 0x002b6085, // n0x1735 c0x0000 (---------------) + I grong + 0x0039a744, // n0x1736 c0x0000 (---------------) + I grue + 0x0035d2c5, // n0x1737 c0x0000 (---------------) + I gulen + 0x0025334d, // n0x1738 c0x0000 (---------------) + I guovdageaidnu + 0x00202442, // n0x1739 c0x0000 (---------------) + I ha + 0x002c9b86, // n0x173a c0x0000 (---------------) + I habmer + 0x0026b9c6, // n0x173b c0x0000 (---------------) + I hadsel + 0x002a5e4a, // n0x173c c0x0000 (---------------) + I hagebostad + 0x0035f386, // n0x173d c0x0000 (---------------) + I halden + 0x0036d3c5, // n0x173e c0x0000 (---------------) + I halsa + 0x0025e745, // n0x173f c0x0000 (---------------) + I hamar + 0x0025e747, // n0x1740 c0x0000 (---------------) + I hamaroy + 0x0037558c, // n0x1741 c0x0000 (---------------) + I hammarfeasta + 0x0033c28a, // n0x1742 c0x0000 (---------------) + I hammerfest + 0x0028aa06, // n0x1743 c0x0000 (---------------) + I hapmir + 0x002cfb05, // n0x1744 c0x0000 (---------------) + I haram + 0x0028af86, // n0x1745 c0x0000 (---------------) + I hareid + 0x0028b2c7, // n0x1746 c0x0000 (---------------) + I harstad + 0x0028c606, // n0x1747 c0x0000 (---------------) + I hasvik + 0x0028f74c, // n0x1748 c0x0000 (---------------) + I hattfjelldal + 0x00204f49, // n0x1749 c0x0000 (---------------) + I haugesund + 0x3de36587, // n0x174a c0x00f7 (n0x196d-n0x1970) o I hedmark + 0x002910c5, // n0x174b c0x0000 (---------------) + I hemne + 0x002910c6, // n0x174c c0x0000 (---------------) + I hemnes + 0x00291d48, // n0x174d c0x0000 (---------------) + I hemsedal + 0x00232305, // n0x174e c0x0000 (---------------) + I herad + 0x002a4585, // n0x174f c0x0000 (---------------) + I hitra + 0x002a47c8, // n0x1750 c0x0000 (---------------) + I hjartdal + 0x002a49ca, // n0x1751 c0x0000 (---------------) + I hjelmeland + 0x3e248fc2, // n0x1752 c0x00f8 (n0x1970-n0x1971) + I hl + 0x3e60e942, // n0x1753 c0x00f9 (n0x1971-n0x1972) + I hm + 0x00378245, // n0x1754 c0x0000 (---------------) + I hobol + 0x002d7743, // n0x1755 c0x0000 (---------------) + I hof + 0x003a4508, // n0x1756 c0x0000 (---------------) + I hokksund + 0x0023ce83, // n0x1757 c0x0000 (---------------) + I hol + 0x002a4c44, // n0x1758 c0x0000 (---------------) + I hole + 0x002a814b, // n0x1759 c0x0000 (---------------) + I holmestrand + 0x002ae1c8, // n0x175a c0x0000 (---------------) + I holtalen + 0x002a7ac8, // n0x175b c0x0000 (---------------) + I honefoss + 0x3eb1a749, // n0x175c c0x00fa (n0x1972-n0x1973) o I hordaland + 0x002a8f89, // n0x175d c0x0000 (---------------) + I hornindal + 0x002a9406, // n0x175e c0x0000 (---------------) + I horten + 0x002aa188, // n0x175f c0x0000 (---------------) + I hoyanger + 0x002aa389, // n0x1760 c0x0000 (---------------) + I hoylandet + 0x002ab246, // n0x1761 c0x0000 (---------------) + I hurdal + 0x002ab3c5, // n0x1762 c0x0000 (---------------) + I hurum + 0x00363586, // n0x1763 c0x0000 (---------------) + I hvaler + 0x002ab849, // n0x1764 c0x0000 (---------------) + I hyllestad + 0x00229187, // n0x1765 c0x0000 (---------------) + I ibestad + 0x0026dd46, // n0x1766 c0x0000 (---------------) + I idrett + 0x002e7747, // n0x1767 c0x0000 (---------------) + I inderoy + 0x003547c7, // n0x1768 c0x0000 (---------------) + I iveland + 0x0023fcc4, // n0x1769 c0x0000 (---------------) + I ivgu + 0x3ee1cfc9, // n0x176a c0x00fb (n0x1973-n0x1974) + I jan-mayen + 0x002c57c8, // n0x176b c0x0000 (---------------) + I jessheim + 0x00358388, // n0x176c c0x0000 (---------------) + I jevnaker + 0x00343c87, // n0x176d c0x0000 (---------------) + I jolster + 0x002c1046, // n0x176e c0x0000 (---------------) + I jondal + 0x002fc709, // n0x176f c0x0000 (---------------) + I jorpeland + 0x002bc3c7, // n0x1770 c0x0000 (---------------) + I kafjord + 0x0024fd0a, // n0x1771 c0x0000 (---------------) + I karasjohka + 0x0037dbc8, // n0x1772 c0x0000 (---------------) + I karasjok + 0x00330887, // n0x1773 c0x0000 (---------------) + I karlsoy + 0x00229586, // n0x1774 c0x0000 (---------------) + I karmoy + 0x002650ca, // n0x1775 c0x0000 (---------------) + I kautokeino + 0x0027e3c8, // n0x1776 c0x0000 (---------------) + I kirkenes + 0x0026b445, // n0x1777 c0x0000 (---------------) + I klabu + 0x00374f05, // n0x1778 c0x0000 (---------------) + I klepp + 0x00387dc7, // n0x1779 c0x0000 (---------------) + I kommune + 0x002bfb09, // n0x177a c0x0000 (---------------) + I kongsberg + 0x002c478b, // n0x177b c0x0000 (---------------) + I kongsvinger + 0x002d6808, // n0x177c c0x0000 (---------------) + I kopervik + 0x00255509, // n0x177d c0x0000 (---------------) + I kraanghke + 0x00250787, // n0x177e c0x0000 (---------------) + I kragero + 0x002af50c, // n0x177f c0x0000 (---------------) + I kristiansand + 0x002b01cc, // n0x1780 c0x0000 (---------------) + I kristiansund + 0x002b04ca, // n0x1781 c0x0000 (---------------) + I krodsherad + 0x002b074c, // n0x1782 c0x0000 (---------------) + I krokstadelva + 0x002bb408, // n0x1783 c0x0000 (---------------) + I kvafjord + 0x002bb608, // n0x1784 c0x0000 (---------------) + I kvalsund + 0x002bb804, // n0x1785 c0x0000 (---------------) + I kvam + 0x002bc589, // n0x1786 c0x0000 (---------------) + I kvanangen + 0x002bc7c9, // n0x1787 c0x0000 (---------------) + I kvinesdal + 0x002bca0a, // n0x1788 c0x0000 (---------------) + I kvinnherad + 0x002bcc89, // n0x1789 c0x0000 (---------------) + I kviteseid + 0x002bcfc7, // n0x178a c0x0000 (---------------) + I kvitsoy + 0x003a4dcc, // n0x178b c0x0000 (---------------) + I laakesvuemie + 0x00338186, // n0x178c c0x0000 (---------------) + I lahppi + 0x00258008, // n0x178d c0x0000 (---------------) + I langevag + 0x00267d86, // n0x178e c0x0000 (---------------) + I lardal + 0x0037f706, // n0x178f c0x0000 (---------------) + I larvik + 0x00328a07, // n0x1790 c0x0000 (---------------) + I lavagis + 0x00272688, // n0x1791 c0x0000 (---------------) + I lavangen + 0x00331a4b, // n0x1792 c0x0000 (---------------) + I leangaviika + 0x002b7ec7, // n0x1793 c0x0000 (---------------) + I lebesby + 0x002598c9, // n0x1794 c0x0000 (---------------) + I leikanger + 0x0035e489, // n0x1795 c0x0000 (---------------) + I leirfjord + 0x00296d87, // n0x1796 c0x0000 (---------------) + I leirvik + 0x002bbac4, // n0x1797 c0x0000 (---------------) + I leka + 0x002bf987, // n0x1798 c0x0000 (---------------) + I leksvik + 0x003562c6, // n0x1799 c0x0000 (---------------) + I lenvik + 0x00217606, // n0x179a c0x0000 (---------------) + I lerdal + 0x0031f845, // n0x179b c0x0000 (---------------) + I lesja + 0x00219f88, // n0x179c c0x0000 (---------------) + I levanger + 0x002ddf44, // n0x179d c0x0000 (---------------) + I lier + 0x002ddf46, // n0x179e c0x0000 (---------------) + I lierne + 0x0033c14b, // n0x179f c0x0000 (---------------) + I lillehammer + 0x00330089, // n0x17a0 c0x0000 (---------------) + I lillesand + 0x00322dc6, // n0x17a1 c0x0000 (---------------) + I lindas + 0x00345ac9, // n0x17a2 c0x0000 (---------------) + I lindesnes + 0x00389286, // n0x17a3 c0x0000 (---------------) + I loabat + 0x00259dc8, // n0x17a4 c0x0000 (---------------) + I lodingen + 0x00214303, // n0x17a5 c0x0000 (---------------) + I lom + 0x00390905, // n0x17a6 c0x0000 (---------------) + I loppa + 0x00216209, // n0x17a7 c0x0000 (---------------) + I lorenskog + 0x00217745, // n0x17a8 c0x0000 (---------------) + I loten + 0x002fb344, // n0x17a9 c0x0000 (---------------) + I lund + 0x00275306, // n0x17aa c0x0000 (---------------) + I lunner + 0x00378ec5, // n0x17ab c0x0000 (---------------) + I luroy + 0x002dcb06, // n0x17ac c0x0000 (---------------) + I luster + 0x002fccc7, // n0x17ad c0x0000 (---------------) + I lyngdal + 0x00213606, // n0x17ae c0x0000 (---------------) + I lyngen + 0x00298bcb, // n0x17af c0x0000 (---------------) + I malatvuopmi + 0x002e4e47, // n0x17b0 c0x0000 (---------------) + I malselv + 0x00205c86, // n0x17b1 c0x0000 (---------------) + I malvik + 0x0034fcc6, // n0x17b2 c0x0000 (---------------) + I mandal + 0x00234bc6, // n0x17b3 c0x0000 (---------------) + I marker + 0x0032fe89, // n0x17b4 c0x0000 (---------------) + I marnardal + 0x0021ed8a, // n0x17b5 c0x0000 (---------------) + I masfjorden + 0x0032a985, // n0x17b6 c0x0000 (---------------) + I masoy + 0x0022080d, // n0x17b7 c0x0000 (---------------) + I matta-varjjat + 0x002a4ac6, // n0x17b8 c0x0000 (---------------) + I meland + 0x002160c6, // n0x17b9 c0x0000 (---------------) + I meldal + 0x00291886, // n0x17ba c0x0000 (---------------) + I melhus + 0x002a7e05, // n0x17bb c0x0000 (---------------) + I meloy + 0x0037c7c7, // n0x17bc c0x0000 (---------------) + I meraker + 0x0029d687, // n0x17bd c0x0000 (---------------) + I midsund + 0x002e5e0e, // n0x17be c0x0000 (---------------) + I midtre-gauldal + 0x00209003, // n0x17bf c0x0000 (---------------) + I mil + 0x002c1009, // n0x17c0 c0x0000 (---------------) + I mjondalen + 0x00239209, // n0x17c1 c0x0000 (---------------) + I mo-i-rana + 0x00228e87, // n0x17c2 c0x0000 (---------------) + I moareke + 0x0026c087, // n0x17c3 c0x0000 (---------------) + I modalen + 0x002a67c5, // n0x17c4 c0x0000 (---------------) + I modum + 0x00325445, // n0x17c5 c0x0000 (---------------) + I molde + 0x3f25d3cf, // n0x17c6 c0x00fc (n0x1974-n0x1976) o I more-og-romsdal + 0x002c7f87, // n0x17c7 c0x0000 (---------------) + I mosjoen + 0x002c8148, // n0x17c8 c0x0000 (---------------) + I moskenes + 0x002c8884, // n0x17c9 c0x0000 (---------------) + I moss + 0x002c8c46, // n0x17ca c0x0000 (---------------) + I mosvik + 0x3f64aa02, // n0x17cb c0x00fd (n0x1976-n0x1977) + I mr + 0x002cc2c6, // n0x17cc c0x0000 (---------------) + I muosat + 0x002d0106, // n0x17cd c0x0000 (---------------) + I museum + 0x00331d8e, // n0x17ce c0x0000 (---------------) + I naamesjevuemie + 0x0031150a, // n0x17cf c0x0000 (---------------) + I namdalseid + 0x002b57c6, // n0x17d0 c0x0000 (---------------) + I namsos + 0x002261ca, // n0x17d1 c0x0000 (---------------) + I namsskogan + 0x002c3349, // n0x17d2 c0x0000 (---------------) + I nannestad + 0x00315d45, // n0x17d3 c0x0000 (---------------) + I naroy + 0x0038ac08, // n0x17d4 c0x0000 (---------------) + I narviika + 0x003a29c6, // n0x17d5 c0x0000 (---------------) + I narvik + 0x00255008, // n0x17d6 c0x0000 (---------------) + I naustdal + 0x0031a1c8, // n0x17d7 c0x0000 (---------------) + I navuotna + 0x0030930b, // n0x17d8 c0x0000 (---------------) + I nedre-eiker + 0x00222e05, // n0x17d9 c0x0000 (---------------) + I nesna + 0x00331188, // n0x17da c0x0000 (---------------) + I nesodden + 0x0020560c, // n0x17db c0x0000 (---------------) + I nesoddtangen + 0x0022c287, // n0x17dc c0x0000 (---------------) + I nesseby + 0x0024f8c6, // n0x17dd c0x0000 (---------------) + I nesset + 0x00230048, // n0x17de c0x0000 (---------------) + I nissedal + 0x00279d48, // n0x17df c0x0000 (---------------) + I nittedal + 0x3fa47802, // n0x17e0 c0x00fe (n0x1977-n0x1978) + I nl + 0x002bbccb, // n0x17e1 c0x0000 (---------------) + I nord-aurdal + 0x00397f89, // n0x17e2 c0x0000 (---------------) + I nord-fron + 0x0034c449, // n0x17e3 c0x0000 (---------------) + I nord-odal + 0x00328887, // n0x17e4 c0x0000 (---------------) + I norddal + 0x00249408, // n0x17e5 c0x0000 (---------------) + I nordkapp + 0x3ff45708, // n0x17e6 c0x00ff (n0x1978-n0x197c) o I nordland + 0x0025f08b, // n0x17e7 c0x0000 (---------------) + I nordre-land + 0x0028e409, // n0x17e8 c0x0000 (---------------) + I nordreisa + 0x0021000d, // n0x17e9 c0x0000 (---------------) + I nore-og-uvdal + 0x002547c8, // n0x17ea c0x0000 (---------------) + I notodden + 0x0032c588, // n0x17eb c0x0000 (---------------) + I notteroy + 0x402009c2, // n0x17ec c0x0100 (n0x197c-n0x197d) + I nt + 0x00202d84, // n0x17ed c0x0000 (---------------) + I odda + 0x4060b282, // n0x17ee c0x0101 (n0x197d-n0x197e) + I of + 0x0037dd46, // n0x17ef c0x0000 (---------------) + I oksnes + 0x40a02242, // n0x17f0 c0x0102 (n0x197e-n0x197f) + I ol + 0x0037a0ca, // n0x17f1 c0x0000 (---------------) + I omasvuotna + 0x00352086, // n0x17f2 c0x0000 (---------------) + I oppdal + 0x00225388, // n0x17f3 c0x0000 (---------------) + I oppegard + 0x00255c88, // n0x17f4 c0x0000 (---------------) + I orkanger + 0x002f2686, // n0x17f5 c0x0000 (---------------) + I orkdal + 0x00338a46, // n0x17f6 c0x0000 (---------------) + I orland + 0x002e5306, // n0x17f7 c0x0000 (---------------) + I orskog + 0x0027b0c5, // n0x17f8 c0x0000 (---------------) + I orsta + 0x00240e44, // n0x17f9 c0x0000 (---------------) + I osen + 0x40ec40c4, // n0x17fa c0x0103 (n0x197f-n0x1980) + I oslo + 0x00334ec6, // n0x17fb c0x0000 (---------------) + I osoyro + 0x002510c7, // n0x17fc c0x0000 (---------------) + I osteroy + 0x4139f9c7, // n0x17fd c0x0104 (n0x1980-n0x1981) o I ostfold + 0x002d968b, // n0x17fe c0x0000 (---------------) + I ostre-toten + 0x0025e1c9, // n0x17ff c0x0000 (---------------) + I overhalla + 0x002438ca, // n0x1800 c0x0000 (---------------) + I ovre-eiker + 0x003173c4, // n0x1801 c0x0000 (---------------) + I oyer + 0x0025e888, // n0x1802 c0x0000 (---------------) + I oygarden + 0x0026db0d, // n0x1803 c0x0000 (---------------) + I oystre-slidre + 0x002e0209, // n0x1804 c0x0000 (---------------) + I porsanger + 0x002e0448, // n0x1805 c0x0000 (---------------) + I porsangu + 0x002e06c9, // n0x1806 c0x0000 (---------------) + I porsgrunn + 0x002e1c44, // n0x1807 c0x0000 (---------------) + I priv + 0x00204d04, // n0x1808 c0x0000 (---------------) + I rade + 0x0027ec05, // n0x1809 c0x0000 (---------------) + I radoy + 0x0027720b, // n0x180a c0x0000 (---------------) + I rahkkeravju + 0x002ae146, // n0x180b c0x0000 (---------------) + I raholt + 0x00334d05, // n0x180c c0x0000 (---------------) + I raisa + 0x0037c0c9, // n0x180d c0x0000 (---------------) + I rakkestad + 0x00223008, // n0x180e c0x0000 (---------------) + I ralingen + 0x00239344, // n0x180f c0x0000 (---------------) + I rana + 0x00228c09, // n0x1810 c0x0000 (---------------) + I randaberg + 0x00248c45, // n0x1811 c0x0000 (---------------) + I rauma + 0x002b8808, // n0x1812 c0x0000 (---------------) + I rendalen + 0x00209ec7, // n0x1813 c0x0000 (---------------) + I rennebu + 0x0030fb88, // n0x1814 c0x0000 (---------------) + I rennesoy + 0x0027f1c6, // n0x1815 c0x0000 (---------------) + I rindal + 0x00351107, // n0x1816 c0x0000 (---------------) + I ringebu + 0x0020e509, // n0x1817 c0x0000 (---------------) + I ringerike + 0x0023a189, // n0x1818 c0x0000 (---------------) + I ringsaker + 0x00277985, // n0x1819 c0x0000 (---------------) + I risor + 0x00238845, // n0x181a c0x0000 (---------------) + I rissa + 0x4162b682, // n0x181b c0x0105 (n0x1981-n0x1982) + I rl + 0x002fab84, // n0x181c c0x0000 (---------------) + I roan + 0x0029d105, // n0x181d c0x0000 (---------------) + I rodoy + 0x002cd3c6, // n0x181e c0x0000 (---------------) + I rollag + 0x00318545, // n0x181f c0x0000 (---------------) + I romsa + 0x002531c7, // n0x1820 c0x0000 (---------------) + I romskog + 0x00296bc5, // n0x1821 c0x0000 (---------------) + I roros + 0x00276a44, // n0x1822 c0x0000 (---------------) + I rost + 0x00326c86, // n0x1823 c0x0000 (---------------) + I royken + 0x00311907, // n0x1824 c0x0000 (---------------) + I royrvik + 0x0024aa46, // n0x1825 c0x0000 (---------------) + I ruovat + 0x003305c5, // n0x1826 c0x0000 (---------------) + I rygge + 0x00321248, // n0x1827 c0x0000 (---------------) + I salangen + 0x00223585, // n0x1828 c0x0000 (---------------) + I salat + 0x00322c47, // n0x1829 c0x0000 (---------------) + I saltdal + 0x0038ec09, // n0x182a c0x0000 (---------------) + I samnanger + 0x003301ca, // n0x182b c0x0000 (---------------) + I sandefjord + 0x0022e9c7, // n0x182c c0x0000 (---------------) + I sandnes + 0x0022e9cc, // n0x182d c0x0000 (---------------) + I sandnessjoen + 0x00259606, // n0x182e c0x0000 (---------------) + I sandoy + 0x0022d049, // n0x182f c0x0000 (---------------) + I sarpsborg + 0x002314c5, // n0x1830 c0x0000 (---------------) + I sauda + 0x00232248, // n0x1831 c0x0000 (---------------) + I sauherad + 0x00210703, // n0x1832 c0x0000 (---------------) + I sel + 0x00210705, // n0x1833 c0x0000 (---------------) + I selbu + 0x00329905, // n0x1834 c0x0000 (---------------) + I selje + 0x0024d147, // n0x1835 c0x0000 (---------------) + I seljord + 0x41a11cc2, // n0x1836 c0x0106 (n0x1982-n0x1983) + I sf + 0x002434c7, // n0x1837 c0x0000 (---------------) + I siellak + 0x002cb986, // n0x1838 c0x0000 (---------------) + I sigdal + 0x0021cf06, // n0x1839 c0x0000 (---------------) + I siljan + 0x0034d886, // n0x183a c0x0000 (---------------) + I sirdal + 0x00279c86, // n0x183b c0x0000 (---------------) + I skanit + 0x00307808, // n0x183c c0x0000 (---------------) + I skanland + 0x002d8085, // n0x183d c0x0000 (---------------) + I skaun + 0x0024ccc7, // n0x183e c0x0000 (---------------) + I skedsmo + 0x0024cccd, // n0x183f c0x0000 (---------------) + I skedsmokorset + 0x00209743, // n0x1840 c0x0000 (---------------) + I ski + 0x00209745, // n0x1841 c0x0000 (---------------) + I skien + 0x002279c7, // n0x1842 c0x0000 (---------------) + I skierva + 0x002d1b48, // n0x1843 c0x0000 (---------------) + I skiptvet + 0x00227585, // n0x1844 c0x0000 (---------------) + I skjak + 0x00230ec8, // n0x1845 c0x0000 (---------------) + I skjervoy + 0x00266946, // n0x1846 c0x0000 (---------------) + I skodje + 0x0023e487, // n0x1847 c0x0000 (---------------) + I slattum + 0x002c0645, // n0x1848 c0x0000 (---------------) + I smola + 0x00222e86, // n0x1849 c0x0000 (---------------) + I snaase + 0x003604c5, // n0x184a c0x0000 (---------------) + I snasa + 0x002bae0a, // n0x184b c0x0000 (---------------) + I snillfjord + 0x002d9146, // n0x184c c0x0000 (---------------) + I snoasa + 0x0023c8c7, // n0x184d c0x0000 (---------------) + I sogndal + 0x002af145, // n0x184e c0x0000 (---------------) + I sogne + 0x002e2f47, // n0x184f c0x0000 (---------------) + I sokndal + 0x00359f04, // n0x1850 c0x0000 (---------------) + I sola + 0x002fb2c6, // n0x1851 c0x0000 (---------------) + I solund + 0x0037de85, // n0x1852 c0x0000 (---------------) + I somna + 0x0022be8b, // n0x1853 c0x0000 (---------------) + I sondre-land + 0x00356149, // n0x1854 c0x0000 (---------------) + I songdalen + 0x00378c8a, // n0x1855 c0x0000 (---------------) + I sor-aurdal + 0x00277a08, // n0x1856 c0x0000 (---------------) + I sor-fron + 0x002f43c8, // n0x1857 c0x0000 (---------------) + I sor-odal + 0x002f69cc, // n0x1858 c0x0000 (---------------) + I sor-varanger + 0x002fbfc7, // n0x1859 c0x0000 (---------------) + I sorfold + 0x002fd8c8, // n0x185a c0x0000 (---------------) + I sorreisa + 0x00301f88, // n0x185b c0x0000 (---------------) + I sortland + 0x003057c5, // n0x185c c0x0000 (---------------) + I sorum + 0x002bd24a, // n0x185d c0x0000 (---------------) + I spjelkavik + 0x003347c9, // n0x185e c0x0000 (---------------) + I spydeberg + 0x41e02742, // n0x185f c0x0107 (n0x1983-n0x1984) + I st + 0x00202746, // n0x1860 c0x0000 (---------------) + I stange + 0x0020f204, // n0x1861 c0x0000 (---------------) + I stat + 0x002ded49, // n0x1862 c0x0000 (---------------) + I stathelle + 0x002f35c9, // n0x1863 c0x0000 (---------------) + I stavanger + 0x00225047, // n0x1864 c0x0000 (---------------) + I stavern + 0x00251a47, // n0x1865 c0x0000 (---------------) + I steigen + 0x002833c9, // n0x1866 c0x0000 (---------------) + I steinkjer + 0x0038e808, // n0x1867 c0x0000 (---------------) + I stjordal + 0x0038e80f, // n0x1868 c0x0000 (---------------) + I stjordalshalsen + 0x00275e46, // n0x1869 c0x0000 (---------------) + I stokke + 0x0024688b, // n0x186a c0x0000 (---------------) + I stor-elvdal + 0x0037d0c5, // n0x186b c0x0000 (---------------) + I stord + 0x0037d0c7, // n0x186c c0x0000 (---------------) + I stordal + 0x002e6f89, // n0x186d c0x0000 (---------------) + I storfjord + 0x00228b86, // n0x186e c0x0000 (---------------) + I strand + 0x00228b87, // n0x186f c0x0000 (---------------) + I stranda + 0x0039e5c5, // n0x1870 c0x0000 (---------------) + I stryn + 0x00237304, // n0x1871 c0x0000 (---------------) + I sula + 0x002402c6, // n0x1872 c0x0000 (---------------) + I suldal + 0x00205084, // n0x1873 c0x0000 (---------------) + I sund + 0x0030a587, // n0x1874 c0x0000 (---------------) + I sunndal + 0x002e8fc8, // n0x1875 c0x0000 (---------------) + I surnadal + 0x422ef688, // n0x1876 c0x0108 (n0x1984-n0x1985) + I svalbard + 0x002efc85, // n0x1877 c0x0000 (---------------) + I sveio + 0x002efdc7, // n0x1878 c0x0000 (---------------) + I svelvik + 0x00375cc9, // n0x1879 c0x0000 (---------------) + I sykkylven + 0x00206cc4, // n0x187a c0x0000 (---------------) + I tana + 0x00206cc8, // n0x187b c0x0000 (---------------) + I tananger + 0x42664f08, // n0x187c c0x0109 (n0x1985-n0x1987) o I telemark + 0x00269804, // n0x187d c0x0000 (---------------) + I time + 0x00237e88, // n0x187e c0x0000 (---------------) + I tingvoll + 0x002ea3c4, // n0x187f c0x0000 (---------------) + I tinn + 0x00226789, // n0x1880 c0x0000 (---------------) + I tjeldsund + 0x0026cec5, // n0x1881 c0x0000 (---------------) + I tjome + 0x42a00142, // n0x1882 c0x010a (n0x1987-n0x1988) + I tm + 0x00275e85, // n0x1883 c0x0000 (---------------) + I tokke + 0x0021dc85, // n0x1884 c0x0000 (---------------) + I tolga + 0x003661c8, // n0x1885 c0x0000 (---------------) + I tonsberg + 0x0023b587, // n0x1886 c0x0000 (---------------) + I torsken + 0x42e03002, // n0x1887 c0x010b (n0x1988-n0x1989) + I tr + 0x002c9d45, // n0x1888 c0x0000 (---------------) + I trana + 0x00274dc6, // n0x1889 c0x0000 (---------------) + I tranby + 0x00292546, // n0x188a c0x0000 (---------------) + I tranoy + 0x002fab48, // n0x188b c0x0000 (---------------) + I troandin + 0x002ffbc8, // n0x188c c0x0000 (---------------) + I trogstad + 0x00318506, // n0x188d c0x0000 (---------------) + I tromsa + 0x00323bc6, // n0x188e c0x0000 (---------------) + I tromso + 0x00352589, // n0x188f c0x0000 (---------------) + I trondheim + 0x00341ec6, // n0x1890 c0x0000 (---------------) + I trysil + 0x00356ccb, // n0x1891 c0x0000 (---------------) + I tvedestrand + 0x0024f6c5, // n0x1892 c0x0000 (---------------) + I tydal + 0x00219b46, // n0x1893 c0x0000 (---------------) + I tynset + 0x00215a48, // n0x1894 c0x0000 (---------------) + I tysfjord + 0x002336c6, // n0x1895 c0x0000 (---------------) + I tysnes + 0x00235ec6, // n0x1896 c0x0000 (---------------) + I tysvar + 0x0021518a, // n0x1897 c0x0000 (---------------) + I ullensaker + 0x0034440a, // n0x1898 c0x0000 (---------------) + I ullensvang + 0x0028acc5, // n0x1899 c0x0000 (---------------) + I ulvik + 0x002c7a87, // n0x189a c0x0000 (---------------) + I unjarga + 0x00341946, // n0x189b c0x0000 (---------------) + I utsira + 0x432000c2, // n0x189c c0x010c (n0x1989-n0x198a) + I va + 0x00227b07, // n0x189d c0x0000 (---------------) + I vaapste + 0x00274745, // n0x189e c0x0000 (---------------) + I vadso + 0x00322504, // n0x189f c0x0000 (---------------) + I vaga + 0x00322505, // n0x18a0 c0x0000 (---------------) + I vagan + 0x003172c6, // n0x18a1 c0x0000 (---------------) + I vagsoy + 0x0032cec7, // n0x18a2 c0x0000 (---------------) + I vaksdal + 0x00217105, // n0x18a3 c0x0000 (---------------) + I valle + 0x0021a004, // n0x18a4 c0x0000 (---------------) + I vang + 0x0026f108, // n0x18a5 c0x0000 (---------------) + I vanylven + 0x00235f85, // n0x18a6 c0x0000 (---------------) + I vardo + 0x002923c7, // n0x18a7 c0x0000 (---------------) + I varggat + 0x002f0545, // n0x18a8 c0x0000 (---------------) + I varoy + 0x002140c5, // n0x18a9 c0x0000 (---------------) + I vefsn + 0x00230284, // n0x18aa c0x0000 (---------------) + I vega + 0x0028a309, // n0x18ab c0x0000 (---------------) + I vegarshei + 0x002e29c8, // n0x18ac c0x0000 (---------------) + I vennesla + 0x00374106, // n0x18ad c0x0000 (---------------) + I verdal + 0x0033f346, // n0x18ae c0x0000 (---------------) + I verran + 0x00269506, // n0x18af c0x0000 (---------------) + I vestby + 0x4379aa48, // n0x18b0 c0x010d (n0x198a-n0x198b) o I vestfold + 0x002f3807, // n0x18b1 c0x0000 (---------------) + I vestnes + 0x002f3e8d, // n0x18b2 c0x0000 (---------------) + I vestre-slidre + 0x002f45cc, // n0x18b3 c0x0000 (---------------) + I vestre-toten + 0x002f4bc9, // n0x18b4 c0x0000 (---------------) + I vestvagoy + 0x002f4e09, // n0x18b5 c0x0000 (---------------) + I vevelstad + 0x43b4f482, // n0x18b6 c0x010e (n0x198b-n0x198c) + I vf + 0x00399d43, // n0x18b7 c0x0000 (---------------) + I vgs + 0x00205d43, // n0x18b8 c0x0000 (---------------) + I vik + 0x00356385, // n0x18b9 c0x0000 (---------------) + I vikna + 0x00384f4a, // n0x18ba c0x0000 (---------------) + I vindafjord + 0x003183c6, // n0x18bb c0x0000 (---------------) + I voagat + 0x002f9845, // n0x18bc c0x0000 (---------------) + I volda + 0x002fd204, // n0x18bd c0x0000 (---------------) + I voss + 0x002fd20b, // n0x18be c0x0000 (---------------) + I vossevangen + 0x0030d90c, // n0x18bf c0x0000 (---------------) + I xn--andy-ira + 0x0030e14c, // n0x18c0 c0x0000 (---------------) + I xn--asky-ira + 0x0030e455, // n0x18c1 c0x0000 (---------------) + I xn--aurskog-hland-jnb + 0x003104cd, // n0x18c2 c0x0000 (---------------) + I xn--avery-yua + 0x003128cf, // n0x18c3 c0x0000 (---------------) + I xn--bdddj-mrabd + 0x00312c92, // n0x18c4 c0x0000 (---------------) + I xn--bearalvhki-y4a + 0x0031310f, // n0x18c5 c0x0000 (---------------) + I xn--berlevg-jxa + 0x003134d2, // n0x18c6 c0x0000 (---------------) + I xn--bhcavuotna-s4a + 0x00313953, // n0x18c7 c0x0000 (---------------) + I xn--bhccavuotna-k7a + 0x00313e0d, // n0x18c8 c0x0000 (---------------) + I xn--bidr-5nac + 0x003143cd, // n0x18c9 c0x0000 (---------------) + I xn--bievt-0qa + 0x0031474e, // n0x18ca c0x0000 (---------------) + I xn--bjarky-fya + 0x00314c0e, // n0x18cb c0x0000 (---------------) + I xn--bjddar-pta + 0x0031534c, // n0x18cc c0x0000 (---------------) + I xn--blt-elab + 0x003156cc, // n0x18cd c0x0000 (---------------) + I xn--bmlo-gra + 0x00315b0b, // n0x18ce c0x0000 (---------------) + I xn--bod-2na + 0x00315e8e, // n0x18cf c0x0000 (---------------) + I xn--brnny-wuac + 0x003178d2, // n0x18d0 c0x0000 (---------------) + I xn--brnnysund-m8ac + 0x0031818c, // n0x18d1 c0x0000 (---------------) + I xn--brum-voa + 0x003188d0, // n0x18d2 c0x0000 (---------------) + I xn--btsfjord-9za + 0x00329d52, // n0x18d3 c0x0000 (---------------) + I xn--davvenjrga-y4a + 0x0032aacc, // n0x18d4 c0x0000 (---------------) + I xn--dnna-gra + 0x0032b18d, // n0x18d5 c0x0000 (---------------) + I xn--drbak-wua + 0x0032b4cc, // n0x18d6 c0x0000 (---------------) + I xn--dyry-ira + 0x0032d351, // n0x18d7 c0x0000 (---------------) + I xn--eveni-0qa01ga + 0x0032e1cd, // n0x18d8 c0x0000 (---------------) + I xn--finny-yua + 0x0033318d, // n0x18d9 c0x0000 (---------------) + I xn--fjord-lra + 0x0033378a, // n0x18da c0x0000 (---------------) + I xn--fl-zia + 0x00333a0c, // n0x18db c0x0000 (---------------) + I xn--flor-jra + 0x0033430c, // n0x18dc c0x0000 (---------------) + I xn--frde-gra + 0x00334a0c, // n0x18dd c0x0000 (---------------) + I xn--frna-woa + 0x0033528c, // n0x18de c0x0000 (---------------) + I xn--frya-hra + 0x00338bd3, // n0x18df c0x0000 (---------------) + I xn--ggaviika-8ya47h + 0x003391d0, // n0x18e0 c0x0000 (---------------) + I xn--gildeskl-g0a + 0x003395d0, // n0x18e1 c0x0000 (---------------) + I xn--givuotna-8ya + 0x0033a24d, // n0x18e2 c0x0000 (---------------) + I xn--gjvik-wua + 0x0033a84c, // n0x18e3 c0x0000 (---------------) + I xn--gls-elac + 0x0033b449, // n0x18e4 c0x0000 (---------------) + I xn--h-2fa + 0x0033da4d, // n0x18e5 c0x0000 (---------------) + I xn--hbmer-xqa + 0x0033dd93, // n0x18e6 c0x0000 (---------------) + I xn--hcesuolo-7ya35b + 0x0033e991, // n0x18e7 c0x0000 (---------------) + I xn--hgebostad-g3a + 0x0033edd3, // n0x18e8 c0x0000 (---------------) + I xn--hmmrfeasta-s4ac + 0x0033f58f, // n0x18e9 c0x0000 (---------------) + I xn--hnefoss-q1a + 0x0033f94c, // n0x18ea c0x0000 (---------------) + I xn--hobl-ira + 0x0033fc4f, // n0x18eb c0x0000 (---------------) + I xn--holtlen-hxa + 0x0034000d, // n0x18ec c0x0000 (---------------) + I xn--hpmir-xqa + 0x0034060f, // n0x18ed c0x0000 (---------------) + I xn--hyanger-q1a + 0x003409d0, // n0x18ee c0x0000 (---------------) + I xn--hylandet-54a + 0x0034144e, // n0x18ef c0x0000 (---------------) + I xn--indery-fya + 0x00346c4e, // n0x18f0 c0x0000 (---------------) + I xn--jlster-bya + 0x00347390, // n0x18f1 c0x0000 (---------------) + I xn--jrpeland-54a + 0x0034860d, // n0x18f2 c0x0000 (---------------) + I xn--karmy-yua + 0x00348f8e, // n0x18f3 c0x0000 (---------------) + I xn--kfjord-iua + 0x0034930c, // n0x18f4 c0x0000 (---------------) + I xn--klbu-woa + 0x0034a2d3, // n0x18f5 c0x0000 (---------------) + I xn--koluokta-7ya57h + 0x0034c68e, // n0x18f6 c0x0000 (---------------) + I xn--krager-gya + 0x0034da10, // n0x18f7 c0x0000 (---------------) + I xn--kranghke-b0a + 0x0034de11, // n0x18f8 c0x0000 (---------------) + I xn--krdsherad-m8a + 0x0034e24f, // n0x18f9 c0x0000 (---------------) + I xn--krehamn-dxa + 0x0034e613, // n0x18fa c0x0000 (---------------) + I xn--krjohka-hwab49j + 0x0034f00d, // n0x18fb c0x0000 (---------------) + I xn--ksnes-uua + 0x0034f34f, // n0x18fc c0x0000 (---------------) + I xn--kvfjord-nxa + 0x0034f70e, // n0x18fd c0x0000 (---------------) + I xn--kvitsy-fya + 0x0034fe50, // n0x18fe c0x0000 (---------------) + I xn--kvnangen-k0a + 0x00350249, // n0x18ff c0x0000 (---------------) + I xn--l-1fa + 0x00353090, // n0x1900 c0x0000 (---------------) + I xn--laheadju-7ya + 0x003536cf, // n0x1901 c0x0000 (---------------) + I xn--langevg-jxa + 0x00353d4f, // n0x1902 c0x0000 (---------------) + I xn--ldingen-q1a + 0x00354112, // n0x1903 c0x0000 (---------------) + I xn--leagaviika-52b + 0x00357c8e, // n0x1904 c0x0000 (---------------) + I xn--lesund-hua + 0x0035858d, // n0x1905 c0x0000 (---------------) + I xn--lgrd-poac + 0x0035930d, // n0x1906 c0x0000 (---------------) + I xn--lhppi-xqa + 0x0035964d, // n0x1907 c0x0000 (---------------) + I xn--linds-pra + 0x0035aa0d, // n0x1908 c0x0000 (---------------) + I xn--loabt-0qa + 0x0035ad4d, // n0x1909 c0x0000 (---------------) + I xn--lrdal-sra + 0x0035b090, // n0x190a c0x0000 (---------------) + I xn--lrenskog-54a + 0x0035b48b, // n0x190b c0x0000 (---------------) + I xn--lt-liac + 0x0035ba4c, // n0x190c c0x0000 (---------------) + I xn--lten-gra + 0x0035bdcc, // n0x190d c0x0000 (---------------) + I xn--lury-ira + 0x0035c0cc, // n0x190e c0x0000 (---------------) + I xn--mely-ira + 0x0035c3ce, // n0x190f c0x0000 (---------------) + I xn--merker-kua + 0x00366c50, // n0x1910 c0x0000 (---------------) + I xn--mjndalen-64a + 0x00368512, // n0x1911 c0x0000 (---------------) + I xn--mlatvuopmi-s4a + 0x0036898b, // n0x1912 c0x0000 (---------------) + I xn--mli-tla + 0x0036940e, // n0x1913 c0x0000 (---------------) + I xn--mlselv-iua + 0x0036978e, // n0x1914 c0x0000 (---------------) + I xn--moreke-jua + 0x0036a48e, // n0x1915 c0x0000 (---------------) + I xn--mosjen-eya + 0x0036c0cb, // n0x1916 c0x0000 (---------------) + I xn--mot-tla + 0x43f6c696, // n0x1917 c0x010f (n0x198c-n0x198e) o I xn--mre-og-romsdal-qqb + 0x0036d0cd, // n0x1918 c0x0000 (---------------) + I xn--msy-ula0h + 0x0036e814, // n0x1919 c0x0000 (---------------) + I xn--mtta-vrjjat-k7af + 0x0036f70d, // n0x191a c0x0000 (---------------) + I xn--muost-0qa + 0x00371c95, // n0x191b c0x0000 (---------------) + I xn--nmesjevuemie-tcba + 0x0037308d, // n0x191c c0x0000 (---------------) + I xn--nry-yla5g + 0x00373a0f, // n0x191d c0x0000 (---------------) + I xn--nttery-byae + 0x0037428f, // n0x191e c0x0000 (---------------) + I xn--nvuotna-hwa + 0x0037750f, // n0x191f c0x0000 (---------------) + I xn--oppegrd-ixa + 0x003778ce, // n0x1920 c0x0000 (---------------) + I xn--ostery-fya + 0x0037900d, // n0x1921 c0x0000 (---------------) + I xn--osyro-wua + 0x0037a891, // n0x1922 c0x0000 (---------------) + I xn--porsgu-sta26f + 0x0037eecc, // n0x1923 c0x0000 (---------------) + I xn--rady-ira + 0x0037f1cc, // n0x1924 c0x0000 (---------------) + I xn--rdal-poa + 0x0037f4cb, // n0x1925 c0x0000 (---------------) + I xn--rde-ula + 0x0037fa8c, // n0x1926 c0x0000 (---------------) + I xn--rdy-0nab + 0x0037fe4f, // n0x1927 c0x0000 (---------------) + I xn--rennesy-v1a + 0x00380212, // n0x1928 c0x0000 (---------------) + I xn--rhkkervju-01af + 0x00380bcd, // n0x1929 c0x0000 (---------------) + I xn--rholt-mra + 0x00381b8c, // n0x192a c0x0000 (---------------) + I xn--risa-5na + 0x0038200c, // n0x192b c0x0000 (---------------) + I xn--risr-ira + 0x0038230d, // n0x192c c0x0000 (---------------) + I xn--rland-uua + 0x0038264f, // n0x192d c0x0000 (---------------) + I xn--rlingen-mxa + 0x00382a0e, // n0x192e c0x0000 (---------------) + I xn--rmskog-bya + 0x00384c0c, // n0x192f c0x0000 (---------------) + I xn--rros-gra + 0x003851cd, // n0x1930 c0x0000 (---------------) + I xn--rskog-uua + 0x0038550b, // n0x1931 c0x0000 (---------------) + I xn--rst-0na + 0x00385acc, // n0x1932 c0x0000 (---------------) + I xn--rsta-fra + 0x0038604d, // n0x1933 c0x0000 (---------------) + I xn--ryken-vua + 0x0038638e, // n0x1934 c0x0000 (---------------) + I xn--ryrvik-bya + 0x00386809, // n0x1935 c0x0000 (---------------) + I xn--s-1fa + 0x00387513, // n0x1936 c0x0000 (---------------) + I xn--sandnessjen-ogb + 0x00387f8d, // n0x1937 c0x0000 (---------------) + I xn--sandy-yua + 0x003882cd, // n0x1938 c0x0000 (---------------) + I xn--seral-lra + 0x003888cc, // n0x1939 c0x0000 (---------------) + I xn--sgne-gra + 0x00388d4e, // n0x193a c0x0000 (---------------) + I xn--skierv-uta + 0x00389bcf, // n0x193b c0x0000 (---------------) + I xn--skjervy-v1a + 0x00389f8c, // n0x193c c0x0000 (---------------) + I xn--skjk-soa + 0x0038a28d, // n0x193d c0x0000 (---------------) + I xn--sknit-yqa + 0x0038a5cf, // n0x193e c0x0000 (---------------) + I xn--sknland-fxa + 0x0038a98c, // n0x193f c0x0000 (---------------) + I xn--slat-5na + 0x0038b08c, // n0x1940 c0x0000 (---------------) + I xn--slt-elab + 0x0038b44c, // n0x1941 c0x0000 (---------------) + I xn--smla-hra + 0x0038b74c, // n0x1942 c0x0000 (---------------) + I xn--smna-gra + 0x0038be0d, // n0x1943 c0x0000 (---------------) + I xn--snase-nra + 0x0038c152, // n0x1944 c0x0000 (---------------) + I xn--sndre-land-0cb + 0x0038c7cc, // n0x1945 c0x0000 (---------------) + I xn--snes-poa + 0x0038cacc, // n0x1946 c0x0000 (---------------) + I xn--snsa-roa + 0x0038cdd1, // n0x1947 c0x0000 (---------------) + I xn--sr-aurdal-l8a + 0x0038d20f, // n0x1948 c0x0000 (---------------) + I xn--sr-fron-q1a + 0x0038d5cf, // n0x1949 c0x0000 (---------------) + I xn--sr-odal-q1a + 0x0038d993, // n0x194a c0x0000 (---------------) + I xn--sr-varanger-ggb + 0x0038ee4e, // n0x194b c0x0000 (---------------) + I xn--srfold-bya + 0x0038f3cf, // n0x194c c0x0000 (---------------) + I xn--srreisa-q1a + 0x0038f78c, // n0x194d c0x0000 (---------------) + I xn--srum-gra + 0x4438face, // n0x194e c0x0110 (n0x198e-n0x198f) o I xn--stfold-9xa + 0x0038fe4f, // n0x194f c0x0000 (---------------) + I xn--stjrdal-s1a + 0x00390216, // n0x1950 c0x0000 (---------------) + I xn--stjrdalshalsen-sqb + 0x00390d12, // n0x1951 c0x0000 (---------------) + I xn--stre-toten-zcb + 0x0039230c, // n0x1952 c0x0000 (---------------) + I xn--tjme-hra + 0x00392acf, // n0x1953 c0x0000 (---------------) + I xn--tnsberg-q1a + 0x0039314d, // n0x1954 c0x0000 (---------------) + I xn--trany-yua + 0x0039348f, // n0x1955 c0x0000 (---------------) + I xn--trgstad-r1a + 0x0039384c, // n0x1956 c0x0000 (---------------) + I xn--trna-woa + 0x00393b4d, // n0x1957 c0x0000 (---------------) + I xn--troms-zua + 0x00393e8d, // n0x1958 c0x0000 (---------------) + I xn--tysvr-vra + 0x0039570e, // n0x1959 c0x0000 (---------------) + I xn--unjrga-rta + 0x00396a8c, // n0x195a c0x0000 (---------------) + I xn--vads-jra + 0x00396d8c, // n0x195b c0x0000 (---------------) + I xn--vard-jra + 0x00397090, // n0x195c c0x0000 (---------------) + I xn--vegrshei-c0a + 0x003991d1, // n0x195d c0x0000 (---------------) + I xn--vestvgy-ixa6o + 0x0039960b, // n0x195e c0x0000 (---------------) + I xn--vg-yiab + 0x0039994c, // n0x195f c0x0000 (---------------) + I xn--vgan-qoa + 0x00399c4e, // n0x1960 c0x0000 (---------------) + I xn--vgsy-qoa0j + 0x0039af51, // n0x1961 c0x0000 (---------------) + I xn--vre-eiker-k8a + 0x0039b38e, // n0x1962 c0x0000 (---------------) + I xn--vrggt-xqad + 0x0039b70d, // n0x1963 c0x0000 (---------------) + I xn--vry-yla5g + 0x003a278b, // n0x1964 c0x0000 (---------------) + I xn--yer-zna + 0x003a308f, // n0x1965 c0x0000 (---------------) + I xn--ygarden-p1a + 0x003a4814, // n0x1966 c0x0000 (---------------) + I xn--ystre-slidre-ujb + 0x0023a242, // n0x1967 c0x0000 (---------------) + I gs + 0x0023a242, // n0x1968 c0x0000 (---------------) + I gs + 0x00202c03, // n0x1969 c0x0000 (---------------) + I nes + 0x0023a242, // n0x196a c0x0000 (---------------) + I gs + 0x00202c03, // n0x196b c0x0000 (---------------) + I nes + 0x0023a242, // n0x196c c0x0000 (---------------) + I gs + 0x0020a802, // n0x196d c0x0000 (---------------) + I os + 0x003635c5, // n0x196e c0x0000 (---------------) + I valer + 0x0039ac4c, // n0x196f c0x0000 (---------------) + I xn--vler-qoa + 0x0023a242, // n0x1970 c0x0000 (---------------) + I gs + 0x0023a242, // n0x1971 c0x0000 (---------------) + I gs + 0x0020a802, // n0x1972 c0x0000 (---------------) + I os + 0x0023a242, // n0x1973 c0x0000 (---------------) + I gs + 0x002921c5, // n0x1974 c0x0000 (---------------) + I heroy + 0x003301c5, // n0x1975 c0x0000 (---------------) + I sande + 0x0023a242, // n0x1976 c0x0000 (---------------) + I gs + 0x0023a242, // n0x1977 c0x0000 (---------------) + I gs + 0x0020e402, // n0x1978 c0x0000 (---------------) + I bo + 0x002921c5, // n0x1979 c0x0000 (---------------) + I heroy + 0x00310a09, // n0x197a c0x0000 (---------------) + I xn--b-5ga + 0x0033e68c, // n0x197b c0x0000 (---------------) + I xn--hery-ira + 0x0023a242, // n0x197c c0x0000 (---------------) + I gs + 0x0023a242, // n0x197d c0x0000 (---------------) + I gs + 0x0023a242, // n0x197e c0x0000 (---------------) + I gs + 0x0023a242, // n0x197f c0x0000 (---------------) + I gs + 0x003635c5, // n0x1980 c0x0000 (---------------) + I valer + 0x0023a242, // n0x1981 c0x0000 (---------------) + I gs + 0x0023a242, // n0x1982 c0x0000 (---------------) + I gs + 0x0023a242, // n0x1983 c0x0000 (---------------) + I gs + 0x0023a242, // n0x1984 c0x0000 (---------------) + I gs + 0x0020e402, // n0x1985 c0x0000 (---------------) + I bo + 0x00310a09, // n0x1986 c0x0000 (---------------) + I xn--b-5ga + 0x0023a242, // n0x1987 c0x0000 (---------------) + I gs + 0x0023a242, // n0x1988 c0x0000 (---------------) + I gs + 0x0023a242, // n0x1989 c0x0000 (---------------) + I gs + 0x003301c5, // n0x198a c0x0000 (---------------) + I sande + 0x0023a242, // n0x198b c0x0000 (---------------) + I gs + 0x003301c5, // n0x198c c0x0000 (---------------) + I sande + 0x0033e68c, // n0x198d c0x0000 (---------------) + I xn--hery-ira + 0x0039ac4c, // n0x198e c0x0000 (---------------) + I xn--vler-qoa + 0x00330b83, // n0x198f c0x0000 (---------------) + I biz + 0x00233503, // n0x1990 c0x0000 (---------------) + I com + 0x0023a783, // n0x1991 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1992 c0x0000 (---------------) + I gov + 0x003a1244, // n0x1993 c0x0000 (---------------) + I info + 0x0021fe03, // n0x1994 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1995 c0x0000 (---------------) + I org + 0x000ecf48, // n0x1996 c0x0000 (---------------) + merseine + 0x000a9304, // n0x1997 c0x0000 (---------------) + mine + 0x000feb48, // n0x1998 c0x0000 (---------------) + shacknet + 0x00201542, // n0x1999 c0x0000 (---------------) + I ac + 0x45200742, // n0x199a c0x0114 (n0x19a9-n0x19aa) + I co + 0x00245b43, // n0x199b c0x0000 (---------------) + I cri + 0x0024eac4, // n0x199c c0x0000 (---------------) + I geek + 0x00205843, // n0x199d c0x0000 (---------------) + I gen + 0x00341e04, // n0x199e c0x0000 (---------------) + I govt + 0x0036b386, // n0x199f c0x0000 (---------------) + I health + 0x0020cec3, // n0x19a0 c0x0000 (---------------) + I iwi + 0x002eed84, // n0x19a1 c0x0000 (---------------) + I kiwi + 0x002703c5, // n0x19a2 c0x0000 (---------------) + I maori + 0x00209003, // n0x19a3 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x19a4 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x19a5 c0x0000 (---------------) + I org + 0x0028440a, // n0x19a6 c0x0000 (---------------) + I parliament + 0x0023d0c6, // n0x19a7 c0x0000 (---------------) + I school + 0x00369b0c, // n0x19a8 c0x0000 (---------------) + I xn--mori-qsa + 0x000ffa08, // n0x19a9 c0x0000 (---------------) + blogspot + 0x00200742, // n0x19aa c0x0000 (---------------) + I co + 0x00233503, // n0x19ab c0x0000 (---------------) + I com + 0x0023a783, // n0x19ac c0x0000 (---------------) + I edu + 0x0026cc83, // n0x19ad c0x0000 (---------------) + I gov + 0x00213ac3, // n0x19ae c0x0000 (---------------) + I med + 0x002d0106, // n0x19af c0x0000 (---------------) + I museum + 0x0021fe03, // n0x19b0 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x19b1 c0x0000 (---------------) + I org + 0x00220e43, // n0x19b2 c0x0000 (---------------) + I pro + 0x000035c2, // n0x19b3 c0x0000 (---------------) + ae + 0x000fb147, // n0x19b4 c0x0000 (---------------) + blogdns + 0x000d3588, // n0x19b5 c0x0000 (---------------) + blogsite + 0x0000e74e, // n0x19b6 c0x0000 (---------------) + bmoattachments + 0x000878d2, // n0x19b7 c0x0000 (---------------) + boldlygoingnowhere + 0x45e3e2c5, // n0x19b8 c0x0117 (n0x19f0-n0x19f2) o I cdn77 + 0x4631c4cc, // n0x19b9 c0x0118 (n0x19f2-n0x19f3) o I cdn77-secure + 0x0014c048, // n0x19ba c0x0000 (---------------) + dnsalias + 0x0007c9c7, // n0x19bb c0x0000 (---------------) + dnsdojo + 0x00014e0b, // n0x19bc c0x0000 (---------------) + doesntexist + 0x0016a009, // n0x19bd c0x0000 (---------------) + dontexist + 0x0014bf47, // n0x19be c0x0000 (---------------) + doomdns + 0x0008cdc7, // n0x19bf c0x0000 (---------------) + dsmynas + 0x0007c8c7, // n0x19c0 c0x0000 (---------------) + duckdns + 0x00011046, // n0x19c1 c0x0000 (---------------) + dvrdns + 0x00197b48, // n0x19c2 c0x0000 (---------------) + dynalias + 0x46813886, // n0x19c3 c0x011a (n0x19f4-n0x19f6) + dyndns + 0x000a950d, // n0x19c4 c0x0000 (---------------) + endofinternet + 0x0005ea10, // n0x19c5 c0x0000 (---------------) + endoftheinternet + 0x46c04b82, // n0x19c6 c0x011b (n0x19f6-n0x1a2d) + eu + 0x00008f88, // n0x19c7 c0x0000 (---------------) + familyds + 0x0006ac47, // n0x19c8 c0x0000 (---------------) + from-me + 0x00097cc9, // n0x19c9 c0x0000 (---------------) + game-host + 0x00057d06, // n0x19ca c0x0000 (---------------) + gotdns + 0x0000a882, // n0x19cb c0x0000 (---------------) + hk + 0x0014cfca, // n0x19cc c0x0000 (---------------) + hobby-site + 0x00013a47, // n0x19cd c0x0000 (---------------) + homedns + 0x00146347, // n0x19ce c0x0000 (---------------) + homeftp + 0x000a59c9, // n0x19cf c0x0000 (---------------) + homelinux + 0x000a6fc8, // n0x19d0 c0x0000 (---------------) + homeunix + 0x000e0c0e, // n0x19d1 c0x0000 (---------------) + is-a-bruinsfan + 0x0000c54e, // n0x19d2 c0x0000 (---------------) + is-a-candidate + 0x00011a0f, // n0x19d3 c0x0000 (---------------) + is-a-celticsfan + 0x00012789, // n0x19d4 c0x0000 (---------------) + is-a-chef + 0x0004e989, // n0x19d5 c0x0000 (---------------) + is-a-geek + 0x000704cb, // n0x19d6 c0x0000 (---------------) + is-a-knight + 0x0007fa4f, // n0x19d7 c0x0000 (---------------) + is-a-linux-user + 0x0008a50c, // n0x19d8 c0x0000 (---------------) + is-a-patsfan + 0x000ab58b, // n0x19d9 c0x0000 (---------------) + is-a-soxfan + 0x000b9e88, // n0x19da c0x0000 (---------------) + is-found + 0x000d9587, // n0x19db c0x0000 (---------------) + is-lost + 0x000fe008, // n0x19dc c0x0000 (---------------) + is-saved + 0x000f218b, // n0x19dd c0x0000 (---------------) + is-very-bad + 0x000f8d0c, // n0x19de c0x0000 (---------------) + is-very-evil + 0x0011b8cc, // n0x19df c0x0000 (---------------) + is-very-good + 0x0013aecc, // n0x19e0 c0x0000 (---------------) + is-very-nice + 0x00142a4d, // n0x19e1 c0x0000 (---------------) + is-very-sweet + 0x0008e588, // n0x19e2 c0x0000 (---------------) + isa-geek + 0x00150c09, // n0x19e3 c0x0000 (---------------) + kicks-ass + 0x001a24cb, // n0x19e4 c0x0000 (---------------) + misconfused + 0x000dcd47, // n0x19e5 c0x0000 (---------------) + podzone + 0x000d340a, // n0x19e6 c0x0000 (---------------) + readmyblog + 0x0006ba86, // n0x19e7 c0x0000 (---------------) + selfip + 0x00099a8d, // n0x19e8 c0x0000 (---------------) + sellsyourhome + 0x000cb7c8, // n0x19e9 c0x0000 (---------------) + servebbs + 0x000895c8, // n0x19ea c0x0000 (---------------) + serveftp + 0x00173dc9, // n0x19eb c0x0000 (---------------) + servegame + 0x000e868c, // n0x19ec c0x0000 (---------------) + stuff-4-sale + 0x00002382, // n0x19ed c0x0000 (---------------) + us + 0x000eadc6, // n0x19ee c0x0000 (---------------) + webhop + 0x00005f82, // n0x19ef c0x0000 (---------------) + za + 0x00000301, // n0x19f0 c0x0000 (---------------) + c + 0x0003cdc3, // n0x19f1 c0x0000 (---------------) + rsc + 0x46783486, // n0x19f2 c0x0119 (n0x19f3-n0x19f4) o I origin + 0x0003e443, // n0x19f3 c0x0000 (---------------) + ssl + 0x00002d42, // n0x19f4 c0x0000 (---------------) + go + 0x00013a44, // n0x19f5 c0x0000 (---------------) + home + 0x000001c2, // n0x19f6 c0x0000 (---------------) + al + 0x000d4884, // n0x19f7 c0x0000 (---------------) + asso + 0x00000102, // n0x19f8 c0x0000 (---------------) + at + 0x00004f82, // n0x19f9 c0x0000 (---------------) + au + 0x00003302, // n0x19fa c0x0000 (---------------) + be + 0x000ee482, // n0x19fb c0x0000 (---------------) + bg + 0x00000302, // n0x19fc c0x0000 (---------------) + ca + 0x0003e2c2, // n0x19fd c0x0000 (---------------) + cd + 0x00001582, // n0x19fe c0x0000 (---------------) + ch + 0x0001ba42, // n0x19ff c0x0000 (---------------) + cn + 0x0003e082, // n0x1a00 c0x0000 (---------------) + cy + 0x00029ec2, // n0x1a01 c0x0000 (---------------) + cz + 0x00004d82, // n0x1a02 c0x0000 (---------------) + de + 0x000494c2, // n0x1a03 c0x0000 (---------------) + dk + 0x0003a783, // n0x1a04 c0x0000 (---------------) + edu + 0x0000b342, // n0x1a05 c0x0000 (---------------) + ee + 0x00000482, // n0x1a06 c0x0000 (---------------) + es + 0x00007502, // n0x1a07 c0x0000 (---------------) + fi + 0x00000582, // n0x1a08 c0x0000 (---------------) + fr + 0x00000c82, // n0x1a09 c0x0000 (---------------) + gr + 0x0000e4c2, // n0x1a0a c0x0000 (---------------) + hr + 0x000195c2, // n0x1a0b c0x0000 (---------------) + hu + 0x00000042, // n0x1a0c c0x0000 (---------------) + ie + 0x00002902, // n0x1a0d c0x0000 (---------------) + il + 0x000013c2, // n0x1a0e c0x0000 (---------------) + in + 0x00001603, // n0x1a0f c0x0000 (---------------) + int + 0x000006c2, // n0x1a10 c0x0000 (---------------) + is + 0x00001e42, // n0x1a11 c0x0000 (---------------) + it + 0x000ae3c2, // n0x1a12 c0x0000 (---------------) + jp + 0x00006fc2, // n0x1a13 c0x0000 (---------------) + kr + 0x00009e02, // n0x1a14 c0x0000 (---------------) + lt + 0x00002f42, // n0x1a15 c0x0000 (---------------) + lu + 0x00005d02, // n0x1a16 c0x0000 (---------------) + lv + 0x0002ac02, // n0x1a17 c0x0000 (---------------) + mc + 0x00003e82, // n0x1a18 c0x0000 (---------------) + me + 0x00167142, // n0x1a19 c0x0000 (---------------) + mk + 0x00004c02, // n0x1a1a c0x0000 (---------------) + mt + 0x00026f02, // n0x1a1b c0x0000 (---------------) + my + 0x0001fe03, // n0x1a1c c0x0000 (---------------) + net + 0x00002802, // n0x1a1d c0x0000 (---------------) + ng + 0x00047802, // n0x1a1e c0x0000 (---------------) + nl + 0x00000c02, // n0x1a1f c0x0000 (---------------) + no + 0x000094c2, // n0x1a20 c0x0000 (---------------) + nz + 0x00077905, // n0x1a21 c0x0000 (---------------) + paris + 0x000063c2, // n0x1a22 c0x0000 (---------------) + pl + 0x0008c9c2, // n0x1a23 c0x0000 (---------------) + pt + 0x00043b83, // n0x1a24 c0x0000 (---------------) + q-a + 0x00002202, // n0x1a25 c0x0000 (---------------) + ro + 0x00011302, // n0x1a26 c0x0000 (---------------) + ru + 0x000046c2, // n0x1a27 c0x0000 (---------------) + se + 0x0000a402, // n0x1a28 c0x0000 (---------------) + si + 0x00007842, // n0x1a29 c0x0000 (---------------) + sk + 0x00003002, // n0x1a2a c0x0000 (---------------) + tr + 0x00000f82, // n0x1a2b c0x0000 (---------------) + uk + 0x00002382, // n0x1a2c c0x0000 (---------------) + us + 0x00210f43, // n0x1a2d c0x0000 (---------------) + I abo + 0x00201542, // n0x1a2e c0x0000 (---------------) + I ac + 0x00233503, // n0x1a2f c0x0000 (---------------) + I com + 0x0023a783, // n0x1a30 c0x0000 (---------------) + I edu + 0x00213183, // n0x1a31 c0x0000 (---------------) + I gob + 0x0020e2c3, // n0x1a32 c0x0000 (---------------) + I ing + 0x00213ac3, // n0x1a33 c0x0000 (---------------) + I med + 0x0021fe03, // n0x1a34 c0x0000 (---------------) + I net + 0x00201483, // n0x1a35 c0x0000 (---------------) + I nom + 0x0022d1c3, // n0x1a36 c0x0000 (---------------) + I org + 0x00292103, // n0x1a37 c0x0000 (---------------) + I sld + 0x000ffa08, // n0x1a38 c0x0000 (---------------) + blogspot + 0x00233503, // n0x1a39 c0x0000 (---------------) + I com + 0x0023a783, // n0x1a3a c0x0000 (---------------) + I edu + 0x00213183, // n0x1a3b c0x0000 (---------------) + I gob + 0x00209003, // n0x1a3c c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1a3d c0x0000 (---------------) + I net + 0x00201483, // n0x1a3e c0x0000 (---------------) + I nom + 0x0022d1c3, // n0x1a3f c0x0000 (---------------) + I org + 0x00233503, // n0x1a40 c0x0000 (---------------) + I com + 0x0023a783, // n0x1a41 c0x0000 (---------------) + I edu + 0x0022d1c3, // n0x1a42 c0x0000 (---------------) + I org + 0x00233503, // n0x1a43 c0x0000 (---------------) + I com + 0x0023a783, // n0x1a44 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1a45 c0x0000 (---------------) + I gov + 0x00200041, // n0x1a46 c0x0000 (---------------) + I i + 0x00209003, // n0x1a47 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1a48 c0x0000 (---------------) + I net + 0x00202d03, // n0x1a49 c0x0000 (---------------) + I ngo + 0x0022d1c3, // n0x1a4a c0x0000 (---------------) + I org + 0x00330b83, // n0x1a4b c0x0000 (---------------) + I biz + 0x00233503, // n0x1a4c c0x0000 (---------------) + I com + 0x0023a783, // n0x1a4d c0x0000 (---------------) + I edu + 0x00208f83, // n0x1a4e c0x0000 (---------------) + I fam + 0x00213183, // n0x1a4f c0x0000 (---------------) + I gob + 0x00337283, // n0x1a50 c0x0000 (---------------) + I gok + 0x00282e03, // n0x1a51 c0x0000 (---------------) + I gon + 0x002a36c3, // n0x1a52 c0x0000 (---------------) + I gop + 0x00276203, // n0x1a53 c0x0000 (---------------) + I gos + 0x0026cc83, // n0x1a54 c0x0000 (---------------) + I gov + 0x003a1244, // n0x1a55 c0x0000 (---------------) + I info + 0x0021fe03, // n0x1a56 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1a57 c0x0000 (---------------) + I org + 0x00221a03, // n0x1a58 c0x0000 (---------------) + I web + 0x002ebac4, // n0x1a59 c0x0000 (---------------) + I agro + 0x00253543, // n0x1a5a c0x0000 (---------------) + I aid + 0x000011c3, // n0x1a5b c0x0000 (---------------) + art + 0x00200103, // n0x1a5c c0x0000 (---------------) + I atm + 0x0024a188, // n0x1a5d c0x0000 (---------------) + I augustow + 0x00265104, // n0x1a5e c0x0000 (---------------) + I auto + 0x0022404a, // n0x1a5f c0x0000 (---------------) + I babia-gora + 0x00207a06, // n0x1a60 c0x0000 (---------------) + I bedzin + 0x00397a07, // n0x1a61 c0x0000 (---------------) + I beskidy + 0x0021e20a, // n0x1a62 c0x0000 (---------------) + I bialowieza + 0x00275d09, // n0x1a63 c0x0000 (---------------) + I bialystok + 0x0039dd47, // n0x1a64 c0x0000 (---------------) + I bielawa + 0x003a604a, // n0x1a65 c0x0000 (---------------) + I bieszczady + 0x00330b83, // n0x1a66 c0x0000 (---------------) + I biz + 0x003782cb, // n0x1a67 c0x0000 (---------------) + I boleslawiec + 0x002ce249, // n0x1a68 c0x0000 (---------------) + I bydgoszcz + 0x00269605, // n0x1a69 c0x0000 (---------------) + I bytom + 0x002cc107, // n0x1a6a c0x0000 (---------------) + I cieszyn + 0x00000742, // n0x1a6b c0x0000 (---------------) + co + 0x00233503, // n0x1a6c c0x0000 (---------------) + I com + 0x00229ec7, // n0x1a6d c0x0000 (---------------) + I czeladz + 0x003522c5, // n0x1a6e c0x0000 (---------------) + I czest + 0x002bb989, // n0x1a6f c0x0000 (---------------) + I dlugoleka + 0x0023a783, // n0x1a70 c0x0000 (---------------) + I edu + 0x00222c06, // n0x1a71 c0x0000 (---------------) + I elblag + 0x002baac3, // n0x1a72 c0x0000 (---------------) + I elk + 0x000cba03, // n0x1a73 c0x0000 (---------------) + gda + 0x000fc286, // n0x1a74 c0x0000 (---------------) + gdansk + 0x000ee0c6, // n0x1a75 c0x0000 (---------------) + gdynia + 0x0000ce47, // n0x1a76 c0x0000 (---------------) + gliwice + 0x00210886, // n0x1a77 c0x0000 (---------------) + I glogow + 0x00216405, // n0x1a78 c0x0000 (---------------) + I gmina + 0x00328747, // n0x1a79 c0x0000 (---------------) + I gniezno + 0x003350c7, // n0x1a7a c0x0000 (---------------) + I gorlice + 0x48a6cc83, // n0x1a7b c0x0122 (n0x1afe-n0x1b2d) + I gov + 0x0032ad07, // n0x1a7c c0x0000 (---------------) + I grajewo + 0x0035e383, // n0x1a7d c0x0000 (---------------) + I gsm + 0x003205c5, // n0x1a7e c0x0000 (---------------) + I ilawa + 0x003a1244, // n0x1a7f c0x0000 (---------------) + I info + 0x0036f508, // n0x1a80 c0x0000 (---------------) + I jaworzno + 0x002ad88c, // n0x1a81 c0x0000 (---------------) + I jelenia-gora + 0x002ac505, // n0x1a82 c0x0000 (---------------) + I jgora + 0x00345486, // n0x1a83 c0x0000 (---------------) + I kalisz + 0x00229d87, // n0x1a84 c0x0000 (---------------) + I karpacz + 0x0038e2c7, // n0x1a85 c0x0000 (---------------) + I kartuzy + 0x0020f847, // n0x1a86 c0x0000 (---------------) + I kaszuby + 0x00217ac8, // n0x1a87 c0x0000 (---------------) + I katowice + 0x002acd4f, // n0x1a88 c0x0000 (---------------) + I kazimierz-dolny + 0x00249345, // n0x1a89 c0x0000 (---------------) + I kepno + 0x00245c47, // n0x1a8a c0x0000 (---------------) + I ketrzyn + 0x0039c507, // n0x1a8b c0x0000 (---------------) + I klodzko + 0x002a4d8a, // n0x1a8c c0x0000 (---------------) + I kobierzyce + 0x0028c749, // n0x1a8d c0x0000 (---------------) + I kolobrzeg + 0x002c8d85, // n0x1a8e c0x0000 (---------------) + I konin + 0x002c970a, // n0x1a8f c0x0000 (---------------) + I konskowola + 0x000eac86, // n0x1a90 c0x0000 (---------------) + krakow + 0x002bab45, // n0x1a91 c0x0000 (---------------) + I kutno + 0x00368bc4, // n0x1a92 c0x0000 (---------------) + I lapy + 0x00271a06, // n0x1a93 c0x0000 (---------------) + I lebork + 0x0037c607, // n0x1a94 c0x0000 (---------------) + I legnica + 0x00251647, // n0x1a95 c0x0000 (---------------) + I lezajsk + 0x002551c8, // n0x1a96 c0x0000 (---------------) + I limanowa + 0x00214305, // n0x1a97 c0x0000 (---------------) + I lomza + 0x003521c6, // n0x1a98 c0x0000 (---------------) + I lowicz + 0x00398d05, // n0x1a99 c0x0000 (---------------) + I lubin + 0x003a42c5, // n0x1a9a c0x0000 (---------------) + I lukow + 0x0021b5c4, // n0x1a9b c0x0000 (---------------) + I mail + 0x002f2587, // n0x1a9c c0x0000 (---------------) + I malbork + 0x0030764a, // n0x1a9d c0x0000 (---------------) + I malopolska + 0x0020b848, // n0x1a9e c0x0000 (---------------) + I mazowsze + 0x002ee9c6, // n0x1a9f c0x0000 (---------------) + I mazury + 0x00013ac3, // n0x1aa0 c0x0000 (---------------) + med + 0x003025c5, // n0x1aa1 c0x0000 (---------------) + I media + 0x00232006, // n0x1aa2 c0x0000 (---------------) + I miasta + 0x003a5006, // n0x1aa3 c0x0000 (---------------) + I mielec + 0x00332046, // n0x1aa4 c0x0000 (---------------) + I mielno + 0x00209003, // n0x1aa5 c0x0000 (---------------) + I mil + 0x00380e47, // n0x1aa6 c0x0000 (---------------) + I mragowo + 0x0039c485, // n0x1aa7 c0x0000 (---------------) + I naklo + 0x0021fe03, // n0x1aa8 c0x0000 (---------------) + I net + 0x0039dfcd, // n0x1aa9 c0x0000 (---------------) + I nieruchomosci + 0x00201483, // n0x1aaa c0x0000 (---------------) + I nom + 0x002552c8, // n0x1aab c0x0000 (---------------) + I nowaruda + 0x002158c4, // n0x1aac c0x0000 (---------------) + I nysa + 0x00276d05, // n0x1aad c0x0000 (---------------) + I olawa + 0x002a4c86, // n0x1aae c0x0000 (---------------) + I olecko + 0x0023d406, // n0x1aaf c0x0000 (---------------) + I olkusz + 0x00219a47, // n0x1ab0 c0x0000 (---------------) + I olsztyn + 0x0023d707, // n0x1ab1 c0x0000 (---------------) + I opoczno + 0x00251585, // n0x1ab2 c0x0000 (---------------) + I opole + 0x0022d1c3, // n0x1ab3 c0x0000 (---------------) + I org + 0x00384947, // n0x1ab4 c0x0000 (---------------) + I ostroda + 0x002c7689, // n0x1ab5 c0x0000 (---------------) + I ostroleka + 0x0020ad49, // n0x1ab6 c0x0000 (---------------) + I ostrowiec + 0x0020d58a, // n0x1ab7 c0x0000 (---------------) + I ostrowwlkp + 0x00248182, // n0x1ab8 c0x0000 (---------------) + I pc + 0x00320584, // n0x1ab9 c0x0000 (---------------) + I pila + 0x002d7404, // n0x1aba c0x0000 (---------------) + I pisz + 0x00219e47, // n0x1abb c0x0000 (---------------) + I podhale + 0x00243388, // n0x1abc c0x0000 (---------------) + I podlasie + 0x002de7c9, // n0x1abd c0x0000 (---------------) + I polkowice + 0x00209609, // n0x1abe c0x0000 (---------------) + I pomorskie + 0x002df1c7, // n0x1abf c0x0000 (---------------) + I pomorze + 0x00248e46, // n0x1ac0 c0x0000 (---------------) + I powiat + 0x000e0986, // n0x1ac1 c0x0000 (---------------) + poznan + 0x002e1c44, // n0x1ac2 c0x0000 (---------------) + I priv + 0x002e1dca, // n0x1ac3 c0x0000 (---------------) + I prochowice + 0x002e4548, // n0x1ac4 c0x0000 (---------------) + I pruszkow + 0x002e51c9, // n0x1ac5 c0x0000 (---------------) + I przeworsk + 0x00296686, // n0x1ac6 c0x0000 (---------------) + I pulawy + 0x00339e45, // n0x1ac7 c0x0000 (---------------) + I radom + 0x0020b708, // n0x1ac8 c0x0000 (---------------) + I rawa-maz + 0x002c238a, // n0x1ac9 c0x0000 (---------------) + I realestate + 0x00285b43, // n0x1aca c0x0000 (---------------) + I rel + 0x0033cf06, // n0x1acb c0x0000 (---------------) + I rybnik + 0x002df2c7, // n0x1acc c0x0000 (---------------) + I rzeszow + 0x0020f745, // n0x1acd c0x0000 (---------------) + I sanok + 0x002224c5, // n0x1ace c0x0000 (---------------) + I sejny + 0x00247603, // n0x1acf c0x0000 (---------------) + I sex + 0x00352004, // n0x1ad0 c0x0000 (---------------) + I shop + 0x00374ec5, // n0x1ad1 c0x0000 (---------------) + I sklep + 0x00284fc7, // n0x1ad2 c0x0000 (---------------) + I skoczow + 0x002e2b05, // n0x1ad3 c0x0000 (---------------) + I slask + 0x002d51c6, // n0x1ad4 c0x0000 (---------------) + I slupsk + 0x000f3985, // n0x1ad5 c0x0000 (---------------) + sopot + 0x0021f483, // n0x1ad6 c0x0000 (---------------) + I sos + 0x002b5889, // n0x1ad7 c0x0000 (---------------) + I sosnowiec + 0x00276acc, // n0x1ad8 c0x0000 (---------------) + I stalowa-wola + 0x002a11cc, // n0x1ad9 c0x0000 (---------------) + I starachowice + 0x002c92c8, // n0x1ada c0x0000 (---------------) + I stargard + 0x0027b447, // n0x1adb c0x0000 (---------------) + I suwalki + 0x002f0a08, // n0x1adc c0x0000 (---------------) + I swidnica + 0x002f100a, // n0x1add c0x0000 (---------------) + I swiebodzin + 0x002f198b, // n0x1ade c0x0000 (---------------) + I swinoujscie + 0x002ce388, // n0x1adf c0x0000 (---------------) + I szczecin + 0x00345588, // n0x1ae0 c0x0000 (---------------) + I szczytno + 0x00293346, // n0x1ae1 c0x0000 (---------------) + I szkola + 0x00357a85, // n0x1ae2 c0x0000 (---------------) + I targi + 0x00249c0a, // n0x1ae3 c0x0000 (---------------) + I tarnobrzeg + 0x00220b05, // n0x1ae4 c0x0000 (---------------) + I tgory + 0x00200142, // n0x1ae5 c0x0000 (---------------) + I tm + 0x002c0507, // n0x1ae6 c0x0000 (---------------) + I tourism + 0x0029bec6, // n0x1ae7 c0x0000 (---------------) + I travel + 0x00352bc5, // n0x1ae8 c0x0000 (---------------) + I turek + 0x0037da09, // n0x1ae9 c0x0000 (---------------) + I turystyka + 0x0031fd85, // n0x1aea c0x0000 (---------------) + I tychy + 0x00291985, // n0x1aeb c0x0000 (---------------) + I ustka + 0x00320189, // n0x1aec c0x0000 (---------------) + I walbrzych + 0x00231e86, // n0x1aed c0x0000 (---------------) + I warmia + 0x0023ee48, // n0x1aee c0x0000 (---------------) + I warszawa + 0x0025a843, // n0x1aef c0x0000 (---------------) + I waw + 0x0020fcc6, // n0x1af0 c0x0000 (---------------) + I wegrow + 0x00275246, // n0x1af1 c0x0000 (---------------) + I wielun + 0x002fff45, // n0x1af2 c0x0000 (---------------) + I wlocl + 0x002fff49, // n0x1af3 c0x0000 (---------------) + I wloclawek + 0x002b0d09, // n0x1af4 c0x0000 (---------------) + I wodzislaw + 0x0024e547, // n0x1af5 c0x0000 (---------------) + I wolomin + 0x000ffdc4, // n0x1af6 c0x0000 (---------------) + wroc + 0x002ffdc7, // n0x1af7 c0x0000 (---------------) + I wroclaw + 0x00209509, // n0x1af8 c0x0000 (---------------) + I zachpomor + 0x0021e405, // n0x1af9 c0x0000 (---------------) + I zagan + 0x00138408, // n0x1afa c0x0000 (---------------) + zakopane + 0x0032f305, // n0x1afb c0x0000 (---------------) + I zarow + 0x0021fec5, // n0x1afc c0x0000 (---------------) + I zgora + 0x0022df89, // n0x1afd c0x0000 (---------------) + I zgorzelec + 0x00200d02, // n0x1afe c0x0000 (---------------) + I ap + 0x00351ac4, // n0x1aff c0x0000 (---------------) + I griw + 0x00206902, // n0x1b00 c0x0000 (---------------) + I ic + 0x002006c2, // n0x1b01 c0x0000 (---------------) + I is + 0x002717c5, // n0x1b02 c0x0000 (---------------) + I kmpsp + 0x002cc908, // n0x1b03 c0x0000 (---------------) + I konsulat + 0x00371a05, // n0x1b04 c0x0000 (---------------) + I kppsp + 0x002bd183, // n0x1b05 c0x0000 (---------------) + I kwp + 0x002bd185, // n0x1b06 c0x0000 (---------------) + I kwpsp + 0x002cc4c3, // n0x1b07 c0x0000 (---------------) + I mup + 0x0020fc82, // n0x1b08 c0x0000 (---------------) + I mw + 0x00268444, // n0x1b09 c0x0000 (---------------) + I oirm + 0x002e6d03, // n0x1b0a c0x0000 (---------------) + I oum + 0x0020ac42, // n0x1b0b c0x0000 (---------------) + I pa + 0x002dd7c4, // n0x1b0c c0x0000 (---------------) + I pinb + 0x002d8443, // n0x1b0d c0x0000 (---------------) + I piw + 0x00200942, // n0x1b0e c0x0000 (---------------) + I po + 0x00209343, // n0x1b0f c0x0000 (---------------) + I psp + 0x0028be44, // n0x1b10 c0x0000 (---------------) + I psse + 0x002b4bc3, // n0x1b11 c0x0000 (---------------) + I pup + 0x00242684, // n0x1b12 c0x0000 (---------------) + I rzgw + 0x002004c2, // n0x1b13 c0x0000 (---------------) + I sa + 0x00271443, // n0x1b14 c0x0000 (---------------) + I sdn + 0x00216343, // n0x1b15 c0x0000 (---------------) + I sko + 0x00205682, // n0x1b16 c0x0000 (---------------) + I so + 0x0033b802, // n0x1b17 c0x0000 (---------------) + I sr + 0x002b0b49, // n0x1b18 c0x0000 (---------------) + I starostwo + 0x00201cc2, // n0x1b19 c0x0000 (---------------) + I ug + 0x00288444, // n0x1b1a c0x0000 (---------------) + I ugim + 0x00204bc2, // n0x1b1b c0x0000 (---------------) + I um + 0x0020b584, // n0x1b1c c0x0000 (---------------) + I umig + 0x00248e04, // n0x1b1d c0x0000 (---------------) + I upow + 0x002e3944, // n0x1b1e c0x0000 (---------------) + I uppo + 0x00202382, // n0x1b1f c0x0000 (---------------) + I us + 0x00243e82, // n0x1b20 c0x0000 (---------------) + I uw + 0x00211343, // n0x1b21 c0x0000 (---------------) + I uzs + 0x002f1603, // n0x1b22 c0x0000 (---------------) + I wif + 0x00245244, // n0x1b23 c0x0000 (---------------) + I wiih + 0x0025d1c4, // n0x1b24 c0x0000 (---------------) + I winb + 0x002c7604, // n0x1b25 c0x0000 (---------------) + I wios + 0x002c9604, // n0x1b26 c0x0000 (---------------) + I witd + 0x002ff443, // n0x1b27 c0x0000 (---------------) + I wiw + 0x002f65c3, // n0x1b28 c0x0000 (---------------) + I wsa + 0x002eac04, // n0x1b29 c0x0000 (---------------) + I wskr + 0x003009c4, // n0x1b2a c0x0000 (---------------) + I wuoz + 0x00300cc6, // n0x1b2b c0x0000 (---------------) + I wzmiuw + 0x00264682, // n0x1b2c c0x0000 (---------------) + I zp + 0x00200742, // n0x1b2d c0x0000 (---------------) + I co + 0x0023a783, // n0x1b2e c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1b2f c0x0000 (---------------) + I gov + 0x0021fe03, // n0x1b30 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1b31 c0x0000 (---------------) + I org + 0x00201542, // n0x1b32 c0x0000 (---------------) + I ac + 0x00330b83, // n0x1b33 c0x0000 (---------------) + I biz + 0x00233503, // n0x1b34 c0x0000 (---------------) + I com + 0x0023a783, // n0x1b35 c0x0000 (---------------) + I edu + 0x00202703, // n0x1b36 c0x0000 (---------------) + I est + 0x0026cc83, // n0x1b37 c0x0000 (---------------) + I gov + 0x003a1244, // n0x1b38 c0x0000 (---------------) + I info + 0x002b0e04, // n0x1b39 c0x0000 (---------------) + I isla + 0x00205284, // n0x1b3a c0x0000 (---------------) + I name + 0x0021fe03, // n0x1b3b c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1b3c c0x0000 (---------------) + I org + 0x00220e43, // n0x1b3d c0x0000 (---------------) + I pro + 0x002e2484, // n0x1b3e c0x0000 (---------------) + I prof + 0x00355603, // n0x1b3f c0x0000 (---------------) + I aaa + 0x002b3e43, // n0x1b40 c0x0000 (---------------) + I aca + 0x0033f204, // n0x1b41 c0x0000 (---------------) + I acct + 0x0032f106, // n0x1b42 c0x0000 (---------------) + I avocat + 0x00202003, // n0x1b43 c0x0000 (---------------) + I bar + 0x00216d43, // n0x1b44 c0x0000 (---------------) + I cpa + 0x00213703, // n0x1b45 c0x0000 (---------------) + I eng + 0x002af443, // n0x1b46 c0x0000 (---------------) + I jur + 0x00274483, // n0x1b47 c0x0000 (---------------) + I law + 0x00213ac3, // n0x1b48 c0x0000 (---------------) + I med + 0x0022a5c5, // n0x1b49 c0x0000 (---------------) + I recht + 0x00233503, // n0x1b4a c0x0000 (---------------) + I com + 0x0023a783, // n0x1b4b c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1b4c c0x0000 (---------------) + I gov + 0x0021fe03, // n0x1b4d c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1b4e c0x0000 (---------------) + I org + 0x002db8c3, // n0x1b4f c0x0000 (---------------) + I plo + 0x00235d43, // n0x1b50 c0x0000 (---------------) + I sec + 0x000ffa08, // n0x1b51 c0x0000 (---------------) + blogspot + 0x00233503, // n0x1b52 c0x0000 (---------------) + I com + 0x0023a783, // n0x1b53 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1b54 c0x0000 (---------------) + I gov + 0x00201603, // n0x1b55 c0x0000 (---------------) + I int + 0x0021fe03, // n0x1b56 c0x0000 (---------------) + I net + 0x00242044, // n0x1b57 c0x0000 (---------------) + I nome + 0x0022d1c3, // n0x1b58 c0x0000 (---------------) + I org + 0x0029f744, // n0x1b59 c0x0000 (---------------) + I publ + 0x002b7cc5, // n0x1b5a c0x0000 (---------------) + I belau + 0x00200742, // n0x1b5b c0x0000 (---------------) + I co + 0x00202602, // n0x1b5c c0x0000 (---------------) + I ed + 0x00202d42, // n0x1b5d c0x0000 (---------------) + I go + 0x00202c02, // n0x1b5e c0x0000 (---------------) + I ne + 0x00200282, // n0x1b5f c0x0000 (---------------) + I or + 0x00233503, // n0x1b60 c0x0000 (---------------) + I com + 0x0023d684, // n0x1b61 c0x0000 (---------------) + I coop + 0x0023a783, // n0x1b62 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1b63 c0x0000 (---------------) + I gov + 0x00209003, // n0x1b64 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1b65 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1b66 c0x0000 (---------------) + I org + 0x000ffa08, // n0x1b67 c0x0000 (---------------) + blogspot + 0x00233503, // n0x1b68 c0x0000 (---------------) + I com + 0x0023a783, // n0x1b69 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1b6a c0x0000 (---------------) + I gov + 0x00209003, // n0x1b6b c0x0000 (---------------) + I mil + 0x00205284, // n0x1b6c c0x0000 (---------------) + I name + 0x0021fe03, // n0x1b6d c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1b6e c0x0000 (---------------) + I org + 0x00217443, // n0x1b6f c0x0000 (---------------) + I sch + 0x002d4884, // n0x1b70 c0x0000 (---------------) + I asso + 0x000ffa08, // n0x1b71 c0x0000 (---------------) + blogspot + 0x00233503, // n0x1b72 c0x0000 (---------------) + I com + 0x00201483, // n0x1b73 c0x0000 (---------------) + I nom + 0x0024bf84, // n0x1b74 c0x0000 (---------------) + I arts + 0x000ffa08, // n0x1b75 c0x0000 (---------------) + blogspot + 0x00233503, // n0x1b76 c0x0000 (---------------) + I com + 0x0024d9c4, // n0x1b77 c0x0000 (---------------) + I firm + 0x003a1244, // n0x1b78 c0x0000 (---------------) + I info + 0x00201483, // n0x1b79 c0x0000 (---------------) + I nom + 0x002009c2, // n0x1b7a c0x0000 (---------------) + I nt + 0x0022d1c3, // n0x1b7b c0x0000 (---------------) + I org + 0x0022a5c3, // n0x1b7c c0x0000 (---------------) + I rec + 0x00391185, // n0x1b7d c0x0000 (---------------) + I store + 0x00200142, // n0x1b7e c0x0000 (---------------) + I tm + 0x00300b03, // n0x1b7f c0x0000 (---------------) + I www + 0x00201542, // n0x1b80 c0x0000 (---------------) + I ac + 0x000ffa08, // n0x1b81 c0x0000 (---------------) + blogspot + 0x00200742, // n0x1b82 c0x0000 (---------------) + I co + 0x0023a783, // n0x1b83 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1b84 c0x0000 (---------------) + I gov + 0x002013c2, // n0x1b85 c0x0000 (---------------) + I in + 0x0022d1c3, // n0x1b86 c0x0000 (---------------) + I org + 0x00201542, // n0x1b87 c0x0000 (---------------) + I ac + 0x003a6207, // n0x1b88 c0x0000 (---------------) + I adygeya + 0x00289d85, // n0x1b89 c0x0000 (---------------) + I altai + 0x002937c4, // n0x1b8a c0x0000 (---------------) + I amur + 0x002e75c6, // n0x1b8b c0x0000 (---------------) + I amursk + 0x0023668b, // n0x1b8c c0x0000 (---------------) + I arkhangelsk + 0x0025a5c9, // n0x1b8d c0x0000 (---------------) + I astrakhan + 0x003453c6, // n0x1b8e c0x0000 (---------------) + I baikal + 0x003269c9, // n0x1b8f c0x0000 (---------------) + I bashkiria + 0x002d2e48, // n0x1b90 c0x0000 (---------------) + I belgorod + 0x002042c3, // n0x1b91 c0x0000 (---------------) + I bir + 0x000ffa08, // n0x1b92 c0x0000 (---------------) + blogspot + 0x00227447, // n0x1b93 c0x0000 (---------------) + I bryansk + 0x0034d388, // n0x1b94 c0x0000 (---------------) + I buryatia + 0x002ee443, // n0x1b95 c0x0000 (---------------) + I cbg + 0x002648c4, // n0x1b96 c0x0000 (---------------) + I chel + 0x0026670b, // n0x1b97 c0x0000 (---------------) + I chelyabinsk + 0x002ac6c5, // n0x1b98 c0x0000 (---------------) + I chita + 0x002bc248, // n0x1b99 c0x0000 (---------------) + I chukotka + 0x00335889, // n0x1b9a c0x0000 (---------------) + I chuvashia + 0x0025d143, // n0x1b9b c0x0000 (---------------) + I cmw + 0x00233503, // n0x1b9c c0x0000 (---------------) + I com + 0x00202648, // n0x1b9d c0x0000 (---------------) + I dagestan + 0x002ef847, // n0x1b9e c0x0000 (---------------) + I dudinka + 0x002eccc6, // n0x1b9f c0x0000 (---------------) + I e-burg + 0x0023a783, // n0x1ba0 c0x0000 (---------------) + I edu + 0x003869c7, // n0x1ba1 c0x0000 (---------------) + I fareast + 0x0026cc83, // n0x1ba2 c0x0000 (---------------) + I gov + 0x00312746, // n0x1ba3 c0x0000 (---------------) + I grozny + 0x00201603, // n0x1ba4 c0x0000 (---------------) + I int + 0x00230d87, // n0x1ba5 c0x0000 (---------------) + I irkutsk + 0x0027d407, // n0x1ba6 c0x0000 (---------------) + I ivanovo + 0x003879c7, // n0x1ba7 c0x0000 (---------------) + I izhevsk + 0x002f2505, // n0x1ba8 c0x0000 (---------------) + I jamal + 0x00206543, // n0x1ba9 c0x0000 (---------------) + I jar + 0x0020a7cb, // n0x1baa c0x0000 (---------------) + I joshkar-ola + 0x00309908, // n0x1bab c0x0000 (---------------) + I k-uralsk + 0x00226e48, // n0x1bac c0x0000 (---------------) + I kalmykia + 0x002509c6, // n0x1bad c0x0000 (---------------) + I kaluga + 0x0022ab89, // n0x1bae c0x0000 (---------------) + I kamchatka + 0x00327ec7, // n0x1baf c0x0000 (---------------) + I karelia + 0x002f9b85, // n0x1bb0 c0x0000 (---------------) + I kazan + 0x00379b44, // n0x1bb1 c0x0000 (---------------) + I kchr + 0x00275f48, // n0x1bb2 c0x0000 (---------------) + I kemerovo + 0x0023f40a, // n0x1bb3 c0x0000 (---------------) + I khabarovsk + 0x0023f649, // n0x1bb4 c0x0000 (---------------) + I khakassia + 0x002517c3, // n0x1bb5 c0x0000 (---------------) + I khv + 0x0027ea45, // n0x1bb6 c0x0000 (---------------) + I kirov + 0x0033bcc3, // n0x1bb7 c0x0000 (---------------) + I kms + 0x002ab0c6, // n0x1bb8 c0x0000 (---------------) + I koenig + 0x0039a004, // n0x1bb9 c0x0000 (---------------) + I komi + 0x002fc3c8, // n0x1bba c0x0000 (---------------) + I kostroma + 0x00387b4b, // n0x1bbb c0x0000 (---------------) + I krasnoyarsk + 0x0033bb85, // n0x1bbc c0x0000 (---------------) + I kuban + 0x002b7a46, // n0x1bbd c0x0000 (---------------) + I kurgan + 0x002b9785, // n0x1bbe c0x0000 (---------------) + I kursk + 0x002b9cc8, // n0x1bbf c0x0000 (---------------) + I kustanai + 0x002bac87, // n0x1bc0 c0x0000 (---------------) + I kuzbass + 0x00207707, // n0x1bc1 c0x0000 (---------------) + I lipetsk + 0x00223c47, // n0x1bc2 c0x0000 (---------------) + I magadan + 0x0021e6c4, // n0x1bc3 c0x0000 (---------------) + I mari + 0x0021e6c7, // n0x1bc4 c0x0000 (---------------) + I mari-el + 0x0027bc46, // n0x1bc5 c0x0000 (---------------) + I marine + 0x00209003, // n0x1bc6 c0x0000 (---------------) + I mil + 0x002c55c8, // n0x1bc7 c0x0000 (---------------) + I mordovia + 0x00253243, // n0x1bc8 c0x0000 (---------------) + I msk + 0x002ccc88, // n0x1bc9 c0x0000 (---------------) + I murmansk + 0x002d2485, // n0x1bca c0x0000 (---------------) + I mytis + 0x0031a348, // n0x1bcb c0x0000 (---------------) + I nakhodka + 0x0023a987, // n0x1bcc c0x0000 (---------------) + I nalchik + 0x0021fe03, // n0x1bcd c0x0000 (---------------) + I net + 0x00392a03, // n0x1bce c0x0000 (---------------) + I nkz + 0x00289404, // n0x1bcf c0x0000 (---------------) + I nnov + 0x00374d87, // n0x1bd0 c0x0000 (---------------) + I norilsk + 0x002058c3, // n0x1bd1 c0x0000 (---------------) + I nov + 0x0027d4cb, // n0x1bd2 c0x0000 (---------------) + I novosibirsk + 0x00216303, // n0x1bd3 c0x0000 (---------------) + I nsk + 0x00253204, // n0x1bd4 c0x0000 (---------------) + I omsk + 0x00391208, // n0x1bd5 c0x0000 (---------------) + I orenburg + 0x0022d1c3, // n0x1bd6 c0x0000 (---------------) + I org + 0x002d6e85, // n0x1bd7 c0x0000 (---------------) + I oryol + 0x00296c85, // n0x1bd8 c0x0000 (---------------) + I oskol + 0x0039c386, // n0x1bd9 c0x0000 (---------------) + I palana + 0x00212c85, // n0x1bda c0x0000 (---------------) + I penza + 0x002d2844, // n0x1bdb c0x0000 (---------------) + I perm + 0x00209302, // n0x1bdc c0x0000 (---------------) + I pp + 0x002e5483, // n0x1bdd c0x0000 (---------------) + I ptz + 0x00368c4a, // n0x1bde c0x0000 (---------------) + I pyatigorsk + 0x003907c3, // n0x1bdf c0x0000 (---------------) + I rnd + 0x002d1989, // n0x1be0 c0x0000 (---------------) + I rubtsovsk + 0x00357446, // n0x1be1 c0x0000 (---------------) + I ryazan + 0x0021ac48, // n0x1be2 c0x0000 (---------------) + I sakhalin + 0x0028b986, // n0x1be3 c0x0000 (---------------) + I samara + 0x002257c7, // n0x1be4 c0x0000 (---------------) + I saratov + 0x002cc748, // n0x1be5 c0x0000 (---------------) + I simbirsk + 0x002d6648, // n0x1be6 c0x0000 (---------------) + I smolensk + 0x002e0f83, // n0x1be7 c0x0000 (---------------) + I snz + 0x00271883, // n0x1be8 c0x0000 (---------------) + I spb + 0x00225b89, // n0x1be9 c0x0000 (---------------) + I stavropol + 0x002f4c43, // n0x1bea c0x0000 (---------------) + I stv + 0x00341846, // n0x1beb c0x0000 (---------------) + I surgut + 0x00289a06, // n0x1bec c0x0000 (---------------) + I syzran + 0x00314f06, // n0x1bed c0x0000 (---------------) + I tambov + 0x0036cc49, // n0x1bee c0x0000 (---------------) + I tatarstan + 0x002ff844, // n0x1bef c0x0000 (---------------) + I test + 0x0020bf43, // n0x1bf0 c0x0000 (---------------) + I tom + 0x00309805, // n0x1bf1 c0x0000 (---------------) + I tomsk + 0x0030b209, // n0x1bf2 c0x0000 (---------------) + I tsaritsyn + 0x00207803, // n0x1bf3 c0x0000 (---------------) + I tsk + 0x00359bc4, // n0x1bf4 c0x0000 (---------------) + I tula + 0x002f3304, // n0x1bf5 c0x0000 (---------------) + I tuva + 0x00360784, // n0x1bf6 c0x0000 (---------------) + I tver + 0x0031ac06, // n0x1bf7 c0x0000 (---------------) + I tyumen + 0x0020fa43, // n0x1bf8 c0x0000 (---------------) + I udm + 0x0020fa48, // n0x1bf9 c0x0000 (---------------) + I udmurtia + 0x0025a208, // n0x1bfa c0x0000 (---------------) + I ulan-ude + 0x0035ca86, // n0x1bfb c0x0000 (---------------) + I vdonsk + 0x002f998b, // n0x1bfc c0x0000 (---------------) + I vladikavkaz + 0x002f9cc8, // n0x1bfd c0x0000 (---------------) + I vladimir + 0x002f9ecb, // n0x1bfe c0x0000 (---------------) + I vladivostok + 0x002fca09, // n0x1bff c0x0000 (---------------) + I volgograd + 0x002fc187, // n0x1c00 c0x0000 (---------------) + I vologda + 0x002fce88, // n0x1c01 c0x0000 (---------------) + I voronezh + 0x002fee83, // n0x1c02 c0x0000 (---------------) + I vrn + 0x0039fc06, // n0x1c03 c0x0000 (---------------) + I vyatka + 0x0020f507, // n0x1c04 c0x0000 (---------------) + I yakutia + 0x00298b45, // n0x1c05 c0x0000 (---------------) + I yamal + 0x00346f49, // n0x1c06 c0x0000 (---------------) + I yaroslavl + 0x0030fd4d, // n0x1c07 c0x0000 (---------------) + I yekaterinburg + 0x0021aa91, // n0x1c08 c0x0000 (---------------) + I yuzhno-sakhalinsk + 0x0023d545, // n0x1c09 c0x0000 (---------------) + I zgrad + 0x00201542, // n0x1c0a c0x0000 (---------------) + I ac + 0x00200742, // n0x1c0b c0x0000 (---------------) + I co + 0x00233503, // n0x1c0c c0x0000 (---------------) + I com + 0x0023a783, // n0x1c0d c0x0000 (---------------) + I edu + 0x0033d7c4, // n0x1c0e c0x0000 (---------------) + I gouv + 0x0026cc83, // n0x1c0f c0x0000 (---------------) + I gov + 0x00201603, // n0x1c10 c0x0000 (---------------) + I int + 0x00209003, // n0x1c11 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1c12 c0x0000 (---------------) + I net + 0x00233503, // n0x1c13 c0x0000 (---------------) + I com + 0x0023a783, // n0x1c14 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1c15 c0x0000 (---------------) + I gov + 0x00213ac3, // n0x1c16 c0x0000 (---------------) + I med + 0x0021fe03, // n0x1c17 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1c18 c0x0000 (---------------) + I org + 0x00297403, // n0x1c19 c0x0000 (---------------) + I pub + 0x00217443, // n0x1c1a c0x0000 (---------------) + I sch + 0x00233503, // n0x1c1b c0x0000 (---------------) + I com + 0x0023a783, // n0x1c1c c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1c1d c0x0000 (---------------) + I gov + 0x0021fe03, // n0x1c1e c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1c1f c0x0000 (---------------) + I org + 0x00233503, // n0x1c20 c0x0000 (---------------) + I com + 0x0023a783, // n0x1c21 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1c22 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x1c23 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1c24 c0x0000 (---------------) + I org + 0x00233503, // n0x1c25 c0x0000 (---------------) + I com + 0x0023a783, // n0x1c26 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1c27 c0x0000 (---------------) + I gov + 0x003a1244, // n0x1c28 c0x0000 (---------------) + I info + 0x00213ac3, // n0x1c29 c0x0000 (---------------) + I med + 0x0021fe03, // n0x1c2a c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1c2b c0x0000 (---------------) + I org + 0x00224e42, // n0x1c2c c0x0000 (---------------) + I tv + 0x00200101, // n0x1c2d c0x0000 (---------------) + I a + 0x00201542, // n0x1c2e c0x0000 (---------------) + I ac + 0x00200001, // n0x1c2f c0x0000 (---------------) + I b + 0x003129c2, // n0x1c30 c0x0000 (---------------) + I bd + 0x000ffa08, // n0x1c31 c0x0000 (---------------) + blogspot + 0x0021ca45, // n0x1c32 c0x0000 (---------------) + I brand + 0x00200301, // n0x1c33 c0x0000 (---------------) + I c + 0x00033503, // n0x1c34 c0x0000 (---------------) + com + 0x00200381, // n0x1c35 c0x0000 (---------------) + I d + 0x00200081, // n0x1c36 c0x0000 (---------------) + I e + 0x00200581, // n0x1c37 c0x0000 (---------------) + I f + 0x0023f342, // n0x1c38 c0x0000 (---------------) + I fh + 0x0023f344, // n0x1c39 c0x0000 (---------------) + I fhsk + 0x00363543, // n0x1c3a c0x0000 (---------------) + I fhv + 0x00200c81, // n0x1c3b c0x0000 (---------------) + I g + 0x00200d81, // n0x1c3c c0x0000 (---------------) + I h + 0x00200041, // n0x1c3d c0x0000 (---------------) + I i + 0x00200fc1, // n0x1c3e c0x0000 (---------------) + I k + 0x002e9647, // n0x1c3f c0x0000 (---------------) + I komforb + 0x002d69cf, // n0x1c40 c0x0000 (---------------) + I kommunalforbund + 0x002da946, // n0x1c41 c0x0000 (---------------) + I komvux + 0x00200201, // n0x1c42 c0x0000 (---------------) + I l + 0x0026a306, // n0x1c43 c0x0000 (---------------) + I lanbib + 0x00200181, // n0x1c44 c0x0000 (---------------) + I m + 0x00200541, // n0x1c45 c0x0000 (---------------) + I n + 0x00325ace, // n0x1c46 c0x0000 (---------------) + I naturbruksgymn + 0x00200281, // n0x1c47 c0x0000 (---------------) + I o + 0x0022d1c3, // n0x1c48 c0x0000 (---------------) + I org + 0x00200941, // n0x1c49 c0x0000 (---------------) + I p + 0x002a3745, // n0x1c4a c0x0000 (---------------) + I parti + 0x00209302, // n0x1c4b c0x0000 (---------------) + I pp + 0x00247505, // n0x1c4c c0x0000 (---------------) + I press + 0x002002c1, // n0x1c4d c0x0000 (---------------) + I r + 0x002004c1, // n0x1c4e c0x0000 (---------------) + I s + 0x00200141, // n0x1c4f c0x0000 (---------------) + I t + 0x00200142, // n0x1c50 c0x0000 (---------------) + I tm + 0x00200441, // n0x1c51 c0x0000 (---------------) + I u + 0x002010c1, // n0x1c52 c0x0000 (---------------) + I w + 0x00205381, // n0x1c53 c0x0000 (---------------) + I x + 0x00201841, // n0x1c54 c0x0000 (---------------) + I y + 0x00205f81, // n0x1c55 c0x0000 (---------------) + I z + 0x000ffa08, // n0x1c56 c0x0000 (---------------) + blogspot + 0x00233503, // n0x1c57 c0x0000 (---------------) + I com + 0x0023a783, // n0x1c58 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1c59 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x1c5a c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1c5b c0x0000 (---------------) + I org + 0x00220f03, // n0x1c5c c0x0000 (---------------) + I per + 0x00233503, // n0x1c5d c0x0000 (---------------) + I com + 0x0026cc83, // n0x1c5e c0x0000 (---------------) + I gov + 0x0008c288, // n0x1c5f c0x0000 (---------------) + hashbang + 0x00209003, // n0x1c60 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1c61 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1c62 c0x0000 (---------------) + I org + 0x014da508, // n0x1c63 c0x0005 (---------------)* o platform + 0x000ffa08, // n0x1c64 c0x0000 (---------------) + blogspot + 0x000ffa08, // n0x1c65 c0x0000 (---------------) + blogspot + 0x00233503, // n0x1c66 c0x0000 (---------------) + I com + 0x0023a783, // n0x1c67 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1c68 c0x0000 (---------------) + I gov + 0x0021fe03, // n0x1c69 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1c6a c0x0000 (---------------) + I org + 0x002011c3, // n0x1c6b c0x0000 (---------------) + I art + 0x000ffa08, // n0x1c6c c0x0000 (---------------) + blogspot + 0x00233503, // n0x1c6d c0x0000 (---------------) + I com + 0x0023a783, // n0x1c6e c0x0000 (---------------) + I edu + 0x0033d7c4, // n0x1c6f c0x0000 (---------------) + I gouv + 0x0022d1c3, // n0x1c70 c0x0000 (---------------) + I org + 0x00295005, // n0x1c71 c0x0000 (---------------) + I perso + 0x00320a04, // n0x1c72 c0x0000 (---------------) + I univ + 0x00233503, // n0x1c73 c0x0000 (---------------) + I com + 0x0021fe03, // n0x1c74 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1c75 c0x0000 (---------------) + I org + 0x00200742, // n0x1c76 c0x0000 (---------------) + I co + 0x00233503, // n0x1c77 c0x0000 (---------------) + I com + 0x00237249, // n0x1c78 c0x0000 (---------------) + I consulado + 0x0023a783, // n0x1c79 c0x0000 (---------------) + I edu + 0x00242d89, // n0x1c7a c0x0000 (---------------) + I embaixada + 0x0026cc83, // n0x1c7b c0x0000 (---------------) + I gov + 0x00209003, // n0x1c7c c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1c7d c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1c7e c0x0000 (---------------) + I org + 0x002e1a48, // n0x1c7f c0x0000 (---------------) + I principe + 0x00215f87, // n0x1c80 c0x0000 (---------------) + I saotome + 0x00391185, // n0x1c81 c0x0000 (---------------) + I store + 0x003a6207, // n0x1c82 c0x0000 (---------------) + I adygeya + 0x0023668b, // n0x1c83 c0x0000 (---------------) + I arkhangelsk + 0x0020d908, // n0x1c84 c0x0000 (---------------) + I balashov + 0x003269c9, // n0x1c85 c0x0000 (---------------) + I bashkiria + 0x00227447, // n0x1c86 c0x0000 (---------------) + I bryansk + 0x00202648, // n0x1c87 c0x0000 (---------------) + I dagestan + 0x00312746, // n0x1c88 c0x0000 (---------------) + I grozny + 0x0027d407, // n0x1c89 c0x0000 (---------------) + I ivanovo + 0x00226e48, // n0x1c8a c0x0000 (---------------) + I kalmykia + 0x002509c6, // n0x1c8b c0x0000 (---------------) + I kaluga + 0x00327ec7, // n0x1c8c c0x0000 (---------------) + I karelia + 0x0023f649, // n0x1c8d c0x0000 (---------------) + I khakassia + 0x0037f849, // n0x1c8e c0x0000 (---------------) + I krasnodar + 0x002b7a46, // n0x1c8f c0x0000 (---------------) + I kurgan + 0x002b8945, // n0x1c90 c0x0000 (---------------) + I lenug + 0x002c55c8, // n0x1c91 c0x0000 (---------------) + I mordovia + 0x00253243, // n0x1c92 c0x0000 (---------------) + I msk + 0x002ccc88, // n0x1c93 c0x0000 (---------------) + I murmansk + 0x0023a987, // n0x1c94 c0x0000 (---------------) + I nalchik + 0x002058c3, // n0x1c95 c0x0000 (---------------) + I nov + 0x00229c07, // n0x1c96 c0x0000 (---------------) + I obninsk + 0x00212c85, // n0x1c97 c0x0000 (---------------) + I penza + 0x002dd148, // n0x1c98 c0x0000 (---------------) + I pokrovsk + 0x00274805, // n0x1c99 c0x0000 (---------------) + I sochi + 0x00271883, // n0x1c9a c0x0000 (---------------) + I spb + 0x0033d1c9, // n0x1c9b c0x0000 (---------------) + I togliatti + 0x002aa587, // n0x1c9c c0x0000 (---------------) + I troitsk + 0x00359bc4, // n0x1c9d c0x0000 (---------------) + I tula + 0x002f3304, // n0x1c9e c0x0000 (---------------) + I tuva + 0x002f998b, // n0x1c9f c0x0000 (---------------) + I vladikavkaz + 0x002f9cc8, // n0x1ca0 c0x0000 (---------------) + I vladimir + 0x002fc187, // n0x1ca1 c0x0000 (---------------) + I vologda + 0x00233503, // n0x1ca2 c0x0000 (---------------) + I com + 0x0023a783, // n0x1ca3 c0x0000 (---------------) + I edu + 0x00213183, // n0x1ca4 c0x0000 (---------------) + I gob + 0x0022d1c3, // n0x1ca5 c0x0000 (---------------) + I org + 0x00244803, // n0x1ca6 c0x0000 (---------------) + I red + 0x0026cc83, // n0x1ca7 c0x0000 (---------------) + I gov + 0x00233503, // n0x1ca8 c0x0000 (---------------) + I com + 0x0023a783, // n0x1ca9 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1caa c0x0000 (---------------) + I gov + 0x00209003, // n0x1cab c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1cac c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1cad c0x0000 (---------------) + I org + 0x00201542, // n0x1cae c0x0000 (---------------) + I ac + 0x00200742, // n0x1caf c0x0000 (---------------) + I co + 0x0022d1c3, // n0x1cb0 c0x0000 (---------------) + I org + 0x000ffa08, // n0x1cb1 c0x0000 (---------------) + blogspot + 0x00201542, // n0x1cb2 c0x0000 (---------------) + I ac + 0x00200742, // n0x1cb3 c0x0000 (---------------) + I co + 0x00202d42, // n0x1cb4 c0x0000 (---------------) + I go + 0x002013c2, // n0x1cb5 c0x0000 (---------------) + I in + 0x00209002, // n0x1cb6 c0x0000 (---------------) + I mi + 0x0021fe03, // n0x1cb7 c0x0000 (---------------) + I net + 0x00200282, // n0x1cb8 c0x0000 (---------------) + I or + 0x00201542, // n0x1cb9 c0x0000 (---------------) + I ac + 0x00330b83, // n0x1cba c0x0000 (---------------) + I biz + 0x00200742, // n0x1cbb c0x0000 (---------------) + I co + 0x00233503, // n0x1cbc c0x0000 (---------------) + I com + 0x0023a783, // n0x1cbd c0x0000 (---------------) + I edu + 0x00202d42, // n0x1cbe c0x0000 (---------------) + I go + 0x0026cc83, // n0x1cbf c0x0000 (---------------) + I gov + 0x00201603, // n0x1cc0 c0x0000 (---------------) + I int + 0x00209003, // n0x1cc1 c0x0000 (---------------) + I mil + 0x00205284, // n0x1cc2 c0x0000 (---------------) + I name + 0x0021fe03, // n0x1cc3 c0x0000 (---------------) + I net + 0x00218f83, // n0x1cc4 c0x0000 (---------------) + I nic + 0x0022d1c3, // n0x1cc5 c0x0000 (---------------) + I org + 0x002ff844, // n0x1cc6 c0x0000 (---------------) + I test + 0x00221a03, // n0x1cc7 c0x0000 (---------------) + I web + 0x0026cc83, // n0x1cc8 c0x0000 (---------------) + I gov + 0x00200742, // n0x1cc9 c0x0000 (---------------) + I co + 0x00233503, // n0x1cca c0x0000 (---------------) + I com + 0x0023a783, // n0x1ccb c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1ccc c0x0000 (---------------) + I gov + 0x00209003, // n0x1ccd c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1cce c0x0000 (---------------) + I net + 0x00201483, // n0x1ccf c0x0000 (---------------) + I nom + 0x0022d1c3, // n0x1cd0 c0x0000 (---------------) + I org + 0x003927c7, // n0x1cd1 c0x0000 (---------------) + I agrinet + 0x00233503, // n0x1cd2 c0x0000 (---------------) + I com + 0x00222387, // n0x1cd3 c0x0000 (---------------) + I defense + 0x0025d886, // n0x1cd4 c0x0000 (---------------) + I edunet + 0x00215243, // n0x1cd5 c0x0000 (---------------) + I ens + 0x00207503, // n0x1cd6 c0x0000 (---------------) + I fin + 0x0026cc83, // n0x1cd7 c0x0000 (---------------) + I gov + 0x0021d883, // n0x1cd8 c0x0000 (---------------) + I ind + 0x003a1244, // n0x1cd9 c0x0000 (---------------) + I info + 0x0036d504, // n0x1cda c0x0000 (---------------) + I intl + 0x002da6c6, // n0x1cdb c0x0000 (---------------) + I mincom + 0x0022b143, // n0x1cdc c0x0000 (---------------) + I nat + 0x0021fe03, // n0x1cdd c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1cde c0x0000 (---------------) + I org + 0x00295005, // n0x1cdf c0x0000 (---------------) + I perso + 0x0020d344, // n0x1ce0 c0x0000 (---------------) + I rnrt + 0x00266ec3, // n0x1ce1 c0x0000 (---------------) + I rns + 0x00351883, // n0x1ce2 c0x0000 (---------------) + I rnu + 0x002c0507, // n0x1ce3 c0x0000 (---------------) + I tourism + 0x00209e45, // n0x1ce4 c0x0000 (---------------) + I turen + 0x00233503, // n0x1ce5 c0x0000 (---------------) + I com + 0x0023a783, // n0x1ce6 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1ce7 c0x0000 (---------------) + I gov + 0x00209003, // n0x1ce8 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1ce9 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1cea c0x0000 (---------------) + I org + 0x00203402, // n0x1ceb c0x0000 (---------------) + I av + 0x002cb903, // n0x1cec c0x0000 (---------------) + I bbs + 0x0028e2c3, // n0x1ced c0x0000 (---------------) + I bel + 0x00330b83, // n0x1cee c0x0000 (---------------) + I biz + 0x52a33503, // n0x1cef c0x014a (n0x1d00-n0x1d01) + I com + 0x0022bf42, // n0x1cf0 c0x0000 (---------------) + I dr + 0x0023a783, // n0x1cf1 c0x0000 (---------------) + I edu + 0x00205843, // n0x1cf2 c0x0000 (---------------) + I gen + 0x0026cc83, // n0x1cf3 c0x0000 (---------------) + I gov + 0x003a1244, // n0x1cf4 c0x0000 (---------------) + I info + 0x00309ac3, // n0x1cf5 c0x0000 (---------------) + I k12 + 0x00249343, // n0x1cf6 c0x0000 (---------------) + I kep + 0x00209003, // n0x1cf7 c0x0000 (---------------) + I mil + 0x00205284, // n0x1cf8 c0x0000 (---------------) + I name + 0x52e00642, // n0x1cf9 c0x014b (n0x1d01-n0x1d02) + I nc + 0x0021fe03, // n0x1cfa c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1cfb c0x0000 (---------------) + I org + 0x00208103, // n0x1cfc c0x0000 (---------------) + I pol + 0x0022f7c3, // n0x1cfd c0x0000 (---------------) + I tel + 0x00224e42, // n0x1cfe c0x0000 (---------------) + I tv + 0x00221a03, // n0x1cff c0x0000 (---------------) + I web + 0x000ffa08, // n0x1d00 c0x0000 (---------------) + blogspot + 0x0026cc83, // n0x1d01 c0x0000 (---------------) + I gov + 0x002389c4, // n0x1d02 c0x0000 (---------------) + I aero + 0x00330b83, // n0x1d03 c0x0000 (---------------) + I biz + 0x00200742, // n0x1d04 c0x0000 (---------------) + I co + 0x00233503, // n0x1d05 c0x0000 (---------------) + I com + 0x0023d684, // n0x1d06 c0x0000 (---------------) + I coop + 0x0023a783, // n0x1d07 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1d08 c0x0000 (---------------) + I gov + 0x003a1244, // n0x1d09 c0x0000 (---------------) + I info + 0x00201603, // n0x1d0a c0x0000 (---------------) + I int + 0x002ddc44, // n0x1d0b c0x0000 (---------------) + I jobs + 0x00207104, // n0x1d0c c0x0000 (---------------) + I mobi + 0x002d0106, // n0x1d0d c0x0000 (---------------) + I museum + 0x00205284, // n0x1d0e c0x0000 (---------------) + I name + 0x0021fe03, // n0x1d0f c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1d10 c0x0000 (---------------) + I org + 0x00220e43, // n0x1d11 c0x0000 (---------------) + I pro + 0x0029bec6, // n0x1d12 c0x0000 (---------------) + I travel + 0x00055f8b, // n0x1d13 c0x0000 (---------------) + better-than + 0x00013886, // n0x1d14 c0x0000 (---------------) + dyndns + 0x0002184a, // n0x1d15 c0x0000 (---------------) + on-the-web + 0x000fef4a, // n0x1d16 c0x0000 (---------------) + worse-than + 0x000ffa08, // n0x1d17 c0x0000 (---------------) + blogspot + 0x00238ac4, // n0x1d18 c0x0000 (---------------) + I club + 0x00233503, // n0x1d19 c0x0000 (---------------) + I com + 0x00330b44, // n0x1d1a c0x0000 (---------------) + I ebiz + 0x0023a783, // n0x1d1b c0x0000 (---------------) + I edu + 0x00297cc4, // n0x1d1c c0x0000 (---------------) + I game + 0x0026cc83, // n0x1d1d c0x0000 (---------------) + I gov + 0x00317243, // n0x1d1e c0x0000 (---------------) + I idv + 0x00209003, // n0x1d1f c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1d20 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1d21 c0x0000 (---------------) + I org + 0x0032444b, // n0x1d22 c0x0000 (---------------) + I xn--czrw28b + 0x003941ca, // n0x1d23 c0x0000 (---------------) + I xn--uc0atv + 0x003a54cc, // n0x1d24 c0x0000 (---------------) + I xn--zf0ao64a + 0x00201542, // n0x1d25 c0x0000 (---------------) + I ac + 0x00200742, // n0x1d26 c0x0000 (---------------) + I co + 0x00202d42, // n0x1d27 c0x0000 (---------------) + I go + 0x00234dc5, // n0x1d28 c0x0000 (---------------) + I hotel + 0x003a1244, // n0x1d29 c0x0000 (---------------) + I info + 0x00203e82, // n0x1d2a c0x0000 (---------------) + I me + 0x00209003, // n0x1d2b c0x0000 (---------------) + I mil + 0x00207104, // n0x1d2c c0x0000 (---------------) + I mobi + 0x00202c02, // n0x1d2d c0x0000 (---------------) + I ne + 0x00200282, // n0x1d2e c0x0000 (---------------) + I or + 0x00200702, // n0x1d2f c0x0000 (---------------) + I sc + 0x00224e42, // n0x1d30 c0x0000 (---------------) + I tv + 0x00130b83, // n0x1d31 c0x0000 (---------------) + biz + 0x002d5d89, // n0x1d32 c0x0000 (---------------) + I cherkassy + 0x00289888, // n0x1d33 c0x0000 (---------------) + I cherkasy + 0x0026cb09, // n0x1d34 c0x0000 (---------------) + I chernigov + 0x0027d249, // n0x1d35 c0x0000 (---------------) + I chernihiv + 0x0036e44a, // n0x1d36 c0x0000 (---------------) + I chernivtsi + 0x00375aca, // n0x1d37 c0x0000 (---------------) + I chernovtsy + 0x0020b482, // n0x1d38 c0x0000 (---------------) + I ck + 0x0021ba42, // n0x1d39 c0x0000 (---------------) + I cn + 0x00000742, // n0x1d3a c0x0000 (---------------) + co + 0x00233503, // n0x1d3b c0x0000 (---------------) + I com + 0x002049c2, // n0x1d3c c0x0000 (---------------) + I cr + 0x00245e06, // n0x1d3d c0x0000 (---------------) + I crimea + 0x00353bc2, // n0x1d3e c0x0000 (---------------) + I cv + 0x00211102, // n0x1d3f c0x0000 (---------------) + I dn + 0x002276ce, // n0x1d40 c0x0000 (---------------) + I dnepropetrovsk + 0x0027148e, // n0x1d41 c0x0000 (---------------) + I dnipropetrovsk + 0x0027d0c7, // n0x1d42 c0x0000 (---------------) + I dominic + 0x003219c7, // n0x1d43 c0x0000 (---------------) + I donetsk + 0x0024dc82, // n0x1d44 c0x0000 (---------------) + I dp + 0x0023a783, // n0x1d45 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1d46 c0x0000 (---------------) + I gov + 0x00200f02, // n0x1d47 c0x0000 (---------------) + I if + 0x002013c2, // n0x1d48 c0x0000 (---------------) + I in + 0x0024048f, // n0x1d49 c0x0000 (---------------) + I ivano-frankivsk + 0x0021acc2, // n0x1d4a c0x0000 (---------------) + I kh + 0x0023fb87, // n0x1d4b c0x0000 (---------------) + I kharkiv + 0x00240807, // n0x1d4c c0x0000 (---------------) + I kharkov + 0x0024b387, // n0x1d4d c0x0000 (---------------) + I kherson + 0x0024eb8c, // n0x1d4e c0x0000 (---------------) + I khmelnitskiy + 0x00250bcc, // n0x1d4f c0x0000 (---------------) + I khmelnytskyi + 0x00202ac4, // n0x1d50 c0x0000 (---------------) + I kiev + 0x0027ea4a, // n0x1d51 c0x0000 (---------------) + I kirovograd + 0x002316c2, // n0x1d52 c0x0000 (---------------) + I km + 0x00206fc2, // n0x1d53 c0x0000 (---------------) + I kr + 0x002b1504, // n0x1d54 c0x0000 (---------------) + I krym + 0x00254e42, // n0x1d55 c0x0000 (---------------) + I ks + 0x002bb402, // n0x1d56 c0x0000 (---------------) + I kv + 0x00250e04, // n0x1d57 c0x0000 (---------------) + I kyiv + 0x00219082, // n0x1d58 c0x0000 (---------------) + I lg + 0x00209e02, // n0x1d59 c0x0000 (---------------) + I lt + 0x00250a47, // n0x1d5a c0x0000 (---------------) + I lugansk + 0x00238045, // n0x1d5b c0x0000 (---------------) + I lutsk + 0x00205d02, // n0x1d5c c0x0000 (---------------) + I lv + 0x00240404, // n0x1d5d c0x0000 (---------------) + I lviv + 0x00367142, // n0x1d5e c0x0000 (---------------) + I mk + 0x002f0388, // n0x1d5f c0x0000 (---------------) + I mykolaiv + 0x0021fe03, // n0x1d60 c0x0000 (---------------) + I net + 0x00203488, // n0x1d61 c0x0000 (---------------) + I nikolaev + 0x00202d82, // n0x1d62 c0x0000 (---------------) + I od + 0x0023bac5, // n0x1d63 c0x0000 (---------------) + I odesa + 0x00372946, // n0x1d64 c0x0000 (---------------) + I odessa + 0x0022d1c3, // n0x1d65 c0x0000 (---------------) + I org + 0x002063c2, // n0x1d66 c0x0000 (---------------) + I pl + 0x002dea07, // n0x1d67 c0x0000 (---------------) + I poltava + 0x00009302, // n0x1d68 c0x0000 (---------------) + pp + 0x002e1c85, // n0x1d69 c0x0000 (---------------) + I rivne + 0x0038af45, // n0x1d6a c0x0000 (---------------) + I rovno + 0x00206882, // n0x1d6b c0x0000 (---------------) + I rv + 0x0022d142, // n0x1d6c c0x0000 (---------------) + I sb + 0x00207f4a, // n0x1d6d c0x0000 (---------------) + I sebastopol + 0x0025140a, // n0x1d6e c0x0000 (---------------) + I sevastopol + 0x0024cdc2, // n0x1d6f c0x0000 (---------------) + I sm + 0x002f0304, // n0x1d70 c0x0000 (---------------) + I sumy + 0x002012c2, // n0x1d71 c0x0000 (---------------) + I te + 0x00320448, // n0x1d72 c0x0000 (---------------) + I ternopil + 0x00211342, // n0x1d73 c0x0000 (---------------) + I uz + 0x0029cfc8, // n0x1d74 c0x0000 (---------------) + I uzhgorod + 0x002f6cc7, // n0x1d75 c0x0000 (---------------) + I vinnica + 0x002f7889, // n0x1d76 c0x0000 (---------------) + I vinnytsia + 0x00203442, // n0x1d77 c0x0000 (---------------) + I vn + 0x002fcc45, // n0x1d78 c0x0000 (---------------) + I volyn + 0x00289d45, // n0x1d79 c0x0000 (---------------) + I yalta + 0x002c300b, // n0x1d7a c0x0000 (---------------) + I zaporizhzhe + 0x002c3a4c, // n0x1d7b c0x0000 (---------------) + I zaporizhzhia + 0x00230c08, // n0x1d7c c0x0000 (---------------) + I zhitomir + 0x002fd008, // n0x1d7d c0x0000 (---------------) + I zhytomyr + 0x00264682, // n0x1d7e c0x0000 (---------------) + I zp + 0x00219b02, // n0x1d7f c0x0000 (---------------) + I zt + 0x00201542, // n0x1d80 c0x0000 (---------------) + I ac + 0x000ffa08, // n0x1d81 c0x0000 (---------------) + blogspot + 0x00200742, // n0x1d82 c0x0000 (---------------) + I co + 0x00233503, // n0x1d83 c0x0000 (---------------) + I com + 0x00202d42, // n0x1d84 c0x0000 (---------------) + I go + 0x00202c02, // n0x1d85 c0x0000 (---------------) + I ne + 0x00200282, // n0x1d86 c0x0000 (---------------) + I or + 0x0022d1c3, // n0x1d87 c0x0000 (---------------) + I org + 0x00200702, // n0x1d88 c0x0000 (---------------) + I sc + 0x00201542, // n0x1d89 c0x0000 (---------------) + I ac + 0x54e00742, // n0x1d8a c0x0153 (n0x1d94-n0x1d95) + I co + 0x5526cc83, // n0x1d8b c0x0154 (n0x1d95-n0x1d96) + I gov + 0x00322cc3, // n0x1d8c c0x0000 (---------------) + I ltd + 0x00203e82, // n0x1d8d c0x0000 (---------------) + I me + 0x0021fe03, // n0x1d8e c0x0000 (---------------) + I net + 0x0038eb83, // n0x1d8f c0x0000 (---------------) + I nhs + 0x0022d1c3, // n0x1d90 c0x0000 (---------------) + I org + 0x002db143, // n0x1d91 c0x0000 (---------------) + I plc + 0x00225d06, // n0x1d92 c0x0000 (---------------) + I police + 0x01617443, // n0x1d93 c0x0005 (---------------)* o I sch + 0x000ffa08, // n0x1d94 c0x0000 (---------------) + blogspot + 0x00006807, // n0x1d95 c0x0000 (---------------) + service + 0x55a01dc2, // n0x1d96 c0x0156 (n0x1dd5-n0x1dd8) + I ak + 0x55e001c2, // n0x1d97 c0x0157 (n0x1dd8-n0x1ddb) + I al + 0x56200a42, // n0x1d98 c0x0158 (n0x1ddb-n0x1dde) + I ar + 0x56601d42, // n0x1d99 c0x0159 (n0x1dde-n0x1de1) + I as + 0x56a05f42, // n0x1d9a c0x015a (n0x1de1-n0x1de4) + I az + 0x56e00302, // n0x1d9b c0x015b (n0x1de4-n0x1de7) + I ca + 0x57200742, // n0x1d9c c0x015c (n0x1de7-n0x1dea) + I co + 0x57631382, // n0x1d9d c0x015d (n0x1dea-n0x1ded) + I ct + 0x57a1fb42, // n0x1d9e c0x015e (n0x1ded-n0x1df0) + I dc + 0x57e04d82, // n0x1d9f c0x015f (n0x1df0-n0x1df3) + I de + 0x00271483, // n0x1da0 c0x0000 (---------------) + I dni + 0x00211503, // n0x1da1 c0x0000 (---------------) + I fed + 0x582175c2, // n0x1da2 c0x0160 (n0x1df3-n0x1df6) + I fl + 0x58601042, // n0x1da3 c0x0161 (n0x1df6-n0x1df9) + I ga + 0x58a0dd42, // n0x1da4 c0x0162 (n0x1df9-n0x1dfc) + I gu + 0x58e00d82, // n0x1da5 c0x0163 (n0x1dfc-n0x1dfe) + I hi + 0x59207682, // n0x1da6 c0x0164 (n0x1dfe-n0x1e01) + I ia + 0x5960c782, // n0x1da7 c0x0165 (n0x1e01-n0x1e04) + I id + 0x59a02902, // n0x1da8 c0x0166 (n0x1e04-n0x1e07) + I il + 0x59e013c2, // n0x1da9 c0x0167 (n0x1e07-n0x1e0a) + I in + 0x000b8d45, // n0x1daa c0x0000 (---------------) + is-by + 0x00223543, // n0x1dab c0x0000 (---------------) + I isa + 0x0028cd44, // n0x1dac c0x0000 (---------------) + I kids + 0x5a254e42, // n0x1dad c0x0168 (n0x1e0a-n0x1e0d) + I ks + 0x5a636902, // n0x1dae c0x0169 (n0x1e0d-n0x1e10) + I ky + 0x5aa00802, // n0x1daf c0x016a (n0x1e10-n0x1e13) + I la + 0x0007958b, // n0x1db0 c0x0000 (---------------) + land-4-sale + 0x5ae00182, // n0x1db1 c0x016b (n0x1e13-n0x1e16) + I ma + 0x5b64da82, // n0x1db2 c0x016d (n0x1e19-n0x1e1c) + I md + 0x5ba03e82, // n0x1db3 c0x016e (n0x1e1c-n0x1e1f) + I me + 0x5be09002, // n0x1db4 c0x016f (n0x1e1f-n0x1e22) + I mi + 0x5c21fdc2, // n0x1db5 c0x0170 (n0x1e22-n0x1e25) + I mn + 0x5c607102, // n0x1db6 c0x0171 (n0x1e25-n0x1e28) + I mo + 0x5ca0f702, // n0x1db7 c0x0172 (n0x1e28-n0x1e2b) + I ms + 0x5ce04c02, // n0x1db8 c0x0173 (n0x1e2b-n0x1e2e) + I mt + 0x5d200642, // n0x1db9 c0x0174 (n0x1e2e-n0x1e31) + I nc + 0x5d600882, // n0x1dba c0x0175 (n0x1e31-n0x1e33) + I nd + 0x5da02c02, // n0x1dbb c0x0176 (n0x1e33-n0x1e36) + I ne + 0x5de03382, // n0x1dbc c0x0177 (n0x1e36-n0x1e39) + I nh + 0x5e204002, // n0x1dbd c0x0178 (n0x1e39-n0x1e3c) + I nj + 0x5e63db02, // n0x1dbe c0x0179 (n0x1e3c-n0x1e3f) + I nm + 0x002e0f43, // n0x1dbf c0x0000 (---------------) + I nsn + 0x5ea03d42, // n0x1dc0 c0x017a (n0x1e3f-n0x1e42) + I nv + 0x5ee15842, // n0x1dc1 c0x017b (n0x1e42-n0x1e45) + I ny + 0x5f207382, // n0x1dc2 c0x017c (n0x1e45-n0x1e48) + I oh + 0x5f601b82, // n0x1dc3 c0x017d (n0x1e48-n0x1e4b) + I ok + 0x5fa00282, // n0x1dc4 c0x017e (n0x1e4b-n0x1e4e) + I or + 0x5fe0ac42, // n0x1dc5 c0x017f (n0x1e4e-n0x1e51) + I pa + 0x60204602, // n0x1dc6 c0x0180 (n0x1e51-n0x1e54) + I pr + 0x60600a82, // n0x1dc7 c0x0181 (n0x1e54-n0x1e57) + I ri + 0x60a00702, // n0x1dc8 c0x0182 (n0x1e57-n0x1e5a) + I sc + 0x60e496c2, // n0x1dc9 c0x0183 (n0x1e5a-n0x1e5c) + I sd + 0x000e868c, // n0x1dca c0x0000 (---------------) + stuff-4-sale + 0x6124f882, // n0x1dcb c0x0184 (n0x1e5c-n0x1e5f) + I tn + 0x61673442, // n0x1dcc c0x0185 (n0x1e5f-n0x1e62) + I tx + 0x61a03b02, // n0x1dcd c0x0186 (n0x1e62-n0x1e65) + I ut + 0x61e000c2, // n0x1dce c0x0187 (n0x1e65-n0x1e68) + I va + 0x62205d42, // n0x1dcf c0x0188 (n0x1e68-n0x1e6b) + I vi + 0x62671f82, // n0x1dd0 c0x0189 (n0x1e6b-n0x1e6e) + I vt + 0x62a010c2, // n0x1dd1 c0x018a (n0x1e6e-n0x1e71) + I wa + 0x62e0ae82, // n0x1dd2 c0x018b (n0x1e71-n0x1e74) + I wi + 0x632755c2, // n0x1dd3 c0x018c (n0x1e74-n0x1e75) + I wv + 0x63674502, // n0x1dd4 c0x018d (n0x1e75-n0x1e78) + I wy + 0x0022e182, // n0x1dd5 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1dd6 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1dd7 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1dd8 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1dd9 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1dda c0x0000 (---------------) + I lib + 0x0022e182, // n0x1ddb c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1ddc c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1ddd c0x0000 (---------------) + I lib + 0x0022e182, // n0x1dde c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1ddf c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1de0 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1de1 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1de2 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1de3 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1de4 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1de5 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1de6 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1de7 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1de8 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1de9 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1dea c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1deb c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1dec c0x0000 (---------------) + I lib + 0x0022e182, // n0x1ded c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1dee c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1def c0x0000 (---------------) + I lib + 0x0022e182, // n0x1df0 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1df1 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1df2 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1df3 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1df4 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1df5 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1df6 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1df7 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1df8 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1df9 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1dfa c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1dfb c0x0000 (---------------) + I lib + 0x0022e182, // n0x1dfc c0x0000 (---------------) + I cc + 0x0027b703, // n0x1dfd c0x0000 (---------------) + I lib + 0x0022e182, // n0x1dfe c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1dff c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e00 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e01 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e02 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e03 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e04 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e05 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e06 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e07 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e08 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e09 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e0a c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e0b c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e0c c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e0d c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e0e c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e0f c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e10 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e11 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e12 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e13 c0x0000 (---------------) + I cc + 0x5b309ac3, // n0x1e14 c0x016c (n0x1e16-n0x1e19) + I k12 + 0x0027b703, // n0x1e15 c0x0000 (---------------) + I lib + 0x0022a644, // n0x1e16 c0x0000 (---------------) + I chtr + 0x00289786, // n0x1e17 c0x0000 (---------------) + I paroch + 0x002e5543, // n0x1e18 c0x0000 (---------------) + I pvt + 0x0022e182, // n0x1e19 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e1a c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e1b c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e1c c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e1d c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e1e c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e1f c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e20 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e21 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e22 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e23 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e24 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e25 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e26 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e27 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e28 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e29 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e2a c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e2b c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e2c c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e2d c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e2e c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e2f c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e30 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e31 c0x0000 (---------------) + I cc + 0x0027b703, // n0x1e32 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e33 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e34 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e35 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e36 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e37 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e38 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e39 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e3a c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e3b c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e3c c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e3d c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e3e c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e3f c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e40 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e41 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e42 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e43 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e44 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e45 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e46 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e47 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e48 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e49 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e4a c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e4b c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e4c c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e4d c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e4e c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e4f c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e50 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e51 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e52 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e53 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e54 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e55 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e56 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e57 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e58 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e59 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e5a c0x0000 (---------------) + I cc + 0x0027b703, // n0x1e5b c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e5c c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e5d c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e5e c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e5f c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e60 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e61 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e62 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e63 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e64 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e65 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e66 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e67 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e68 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e69 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e6a c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e6b c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e6c c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e6d c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e6e c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e6f c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e70 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e71 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e72 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e73 c0x0000 (---------------) + I lib + 0x0022e182, // n0x1e74 c0x0000 (---------------) + I cc + 0x0022e182, // n0x1e75 c0x0000 (---------------) + I cc + 0x00309ac3, // n0x1e76 c0x0000 (---------------) + I k12 + 0x0027b703, // n0x1e77 c0x0000 (---------------) + I lib + 0x63e33503, // n0x1e78 c0x018f (n0x1e7e-n0x1e7f) + I com + 0x0023a783, // n0x1e79 c0x0000 (---------------) + I edu + 0x0024a483, // n0x1e7a c0x0000 (---------------) + I gub + 0x00209003, // n0x1e7b c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1e7c c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1e7d c0x0000 (---------------) + I org + 0x000ffa08, // n0x1e7e c0x0000 (---------------) + blogspot + 0x00200742, // n0x1e7f c0x0000 (---------------) + I co + 0x00233503, // n0x1e80 c0x0000 (---------------) + I com + 0x0021fe03, // n0x1e81 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1e82 c0x0000 (---------------) + I org + 0x00233503, // n0x1e83 c0x0000 (---------------) + I com + 0x0023a783, // n0x1e84 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1e85 c0x0000 (---------------) + I gov + 0x00209003, // n0x1e86 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1e87 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1e88 c0x0000 (---------------) + I org + 0x0024bf84, // n0x1e89 c0x0000 (---------------) + I arts + 0x00200742, // n0x1e8a c0x0000 (---------------) + I co + 0x00233503, // n0x1e8b c0x0000 (---------------) + I com + 0x00328c03, // n0x1e8c c0x0000 (---------------) + I e12 + 0x0023a783, // n0x1e8d c0x0000 (---------------) + I edu + 0x0024d9c4, // n0x1e8e c0x0000 (---------------) + I firm + 0x00213183, // n0x1e8f c0x0000 (---------------) + I gob + 0x0026cc83, // n0x1e90 c0x0000 (---------------) + I gov + 0x003a1244, // n0x1e91 c0x0000 (---------------) + I info + 0x00201603, // n0x1e92 c0x0000 (---------------) + I int + 0x00209003, // n0x1e93 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1e94 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1e95 c0x0000 (---------------) + I org + 0x0022a5c3, // n0x1e96 c0x0000 (---------------) + I rec + 0x00391185, // n0x1e97 c0x0000 (---------------) + I store + 0x002d59c3, // n0x1e98 c0x0000 (---------------) + I tec + 0x00221a03, // n0x1e99 c0x0000 (---------------) + I web + 0x00200742, // n0x1e9a c0x0000 (---------------) + I co + 0x00233503, // n0x1e9b c0x0000 (---------------) + I com + 0x00309ac3, // n0x1e9c c0x0000 (---------------) + I k12 + 0x0021fe03, // n0x1e9d c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1e9e c0x0000 (---------------) + I org + 0x00201542, // n0x1e9f c0x0000 (---------------) + I ac + 0x00330b83, // n0x1ea0 c0x0000 (---------------) + I biz + 0x000ffa08, // n0x1ea1 c0x0000 (---------------) + blogspot + 0x00233503, // n0x1ea2 c0x0000 (---------------) + I com + 0x0023a783, // n0x1ea3 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1ea4 c0x0000 (---------------) + I gov + 0x0036b386, // n0x1ea5 c0x0000 (---------------) + I health + 0x003a1244, // n0x1ea6 c0x0000 (---------------) + I info + 0x00201603, // n0x1ea7 c0x0000 (---------------) + I int + 0x00205284, // n0x1ea8 c0x0000 (---------------) + I name + 0x0021fe03, // n0x1ea9 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1eaa c0x0000 (---------------) + I org + 0x00220e43, // n0x1eab c0x0000 (---------------) + I pro + 0x00233503, // n0x1eac c0x0000 (---------------) + I com + 0x0023a783, // n0x1ead c0x0000 (---------------) + I edu + 0x0021fe03, // n0x1eae c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1eaf c0x0000 (---------------) + I org + 0x00233503, // n0x1eb0 c0x0000 (---------------) + I com + 0x00013886, // n0x1eb1 c0x0000 (---------------) + dyndns + 0x0023a783, // n0x1eb2 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1eb3 c0x0000 (---------------) + I gov + 0x000d0d86, // n0x1eb4 c0x0000 (---------------) + mypets + 0x0021fe03, // n0x1eb5 c0x0000 (---------------) + I net + 0x0022d1c3, // n0x1eb6 c0x0000 (---------------) + I org + 0x00309e08, // n0x1eb7 c0x0000 (---------------) + I xn--80au + 0x0030c549, // n0x1eb8 c0x0000 (---------------) + I xn--90azh + 0x00318cc9, // n0x1eb9 c0x0000 (---------------) + I xn--c1avg + 0x00329088, // n0x1eba c0x0000 (---------------) + I xn--d1at + 0x00374908, // n0x1ebb c0x0000 (---------------) + I xn--o1ac + 0x00374909, // n0x1ebc c0x0000 (---------------) + I xn--o1ach + 0x00201542, // n0x1ebd c0x0000 (---------------) + I ac + 0x00209c85, // n0x1ebe c0x0000 (---------------) + I agric + 0x0023adc3, // n0x1ebf c0x0000 (---------------) + I alt + 0x66600742, // n0x1ec0 c0x0199 (n0x1ece-n0x1ecf) + I co + 0x0023a783, // n0x1ec1 c0x0000 (---------------) + I edu + 0x0026cc83, // n0x1ec2 c0x0000 (---------------) + I gov + 0x0027d907, // n0x1ec3 c0x0000 (---------------) + I grondar + 0x00274483, // n0x1ec4 c0x0000 (---------------) + I law + 0x00209003, // n0x1ec5 c0x0000 (---------------) + I mil + 0x0021fe03, // n0x1ec6 c0x0000 (---------------) + I net + 0x00202d03, // n0x1ec7 c0x0000 (---------------) + I ngo + 0x00211803, // n0x1ec8 c0x0000 (---------------) + I nis + 0x00201483, // n0x1ec9 c0x0000 (---------------) + I nom + 0x0022d1c3, // n0x1eca c0x0000 (---------------) + I org + 0x0023d0c6, // n0x1ecb c0x0000 (---------------) + I school + 0x00200142, // n0x1ecc c0x0000 (---------------) + I tm + 0x00221a03, // n0x1ecd c0x0000 (---------------) + I web + 0x000ffa08, // n0x1ece c0x0000 (---------------) + blogspot } // children is the list of nodes' children, the parent's wildcard bit and the @@ -8353,411 +8373,414 @@ 0x40000000, // c0x0003 (---------------)* + 0x50000000, // c0x0004 (---------------)* ! 0x60000000, // c0x0005 (---------------)* o - 0x01834607, // c0x0006 (n0x0607-n0x060d) + - 0x0183860d, // c0x0007 (n0x060d-n0x060e) + - 0x0185860e, // c0x0008 (n0x060e-n0x0616) + - 0x019b4616, // c0x0009 (n0x0616-n0x066d) + - 0x019c866d, // c0x000a (n0x066d-n0x0672) + - 0x019dc672, // c0x000b (n0x0672-n0x0677) + - 0x019ec677, // c0x000c (n0x0677-n0x067b) + - 0x01a0867b, // c0x000d (n0x067b-n0x0682) + - 0x01a0c682, // c0x000e (n0x0682-n0x0683) + - 0x01a24683, // c0x000f (n0x0683-n0x0689) + - 0x01a48689, // c0x0010 (n0x0689-n0x0692) + - 0x01a4c692, // c0x0011 (n0x0692-n0x0693) + - 0x01a64693, // c0x0012 (n0x0693-n0x0699) + - 0x01a68699, // c0x0013 (n0x0699-n0x069a) + - 0x01a8469a, // c0x0014 (n0x069a-n0x06a1) + - 0x01a886a1, // c0x0015 (n0x06a1-n0x06a2) + - 0x01ad06a2, // c0x0016 (n0x06a2-n0x06b4) + - 0x01ad46b4, // c0x0017 (n0x06b4-n0x06b5) + - 0x01af46b5, // c0x0018 (n0x06b5-n0x06bd) + - 0x01b086bd, // c0x0019 (n0x06bd-n0x06c2) + - 0x01b0c6c2, // c0x001a (n0x06c2-n0x06c3) + - 0x01b3c6c3, // c0x001b (n0x06c3-n0x06cf) + - 0x01b686cf, // c0x001c (n0x06cf-n0x06da) + - 0x01b906da, // c0x001d (n0x06da-n0x06e4) + - 0x01b986e4, // c0x001e (n0x06e4-n0x06e6) + - 0x01b9c6e6, // c0x001f (n0x06e6-n0x06e7) + - 0x01c306e7, // c0x0020 (n0x06e7-n0x070c) + - 0x01c4470c, // c0x0021 (n0x070c-n0x0711) + - 0x01c58711, // c0x0022 (n0x0711-n0x0716) + - 0x01c78716, // c0x0023 (n0x0716-n0x071e) + - 0x01c8871e, // c0x0024 (n0x071e-n0x0722) + - 0x01c9c722, // c0x0025 (n0x0722-n0x0727) + - 0x01cc0727, // c0x0026 (n0x0727-n0x0730) + - 0x01dd8730, // c0x0027 (n0x0730-n0x0776) + - 0x01ddc776, // c0x0028 (n0x0776-n0x0777) + - 0x01df0777, // c0x0029 (n0x0777-n0x077c) + - 0x01e0477c, // c0x002a (n0x077c-n0x0781) + - 0x01e0c781, // c0x002b (n0x0781-n0x0783) + - 0x01e1c783, // c0x002c (n0x0783-n0x0787) + - 0x01e20787, // c0x002d (n0x0787-n0x0788) + - 0x01e38788, // c0x002e (n0x0788-n0x078e) + - 0x01e7c78e, // c0x002f (n0x078e-n0x079f) + - 0x01e8c79f, // c0x0030 (n0x079f-n0x07a3) + - 0x01e907a3, // c0x0031 (n0x07a3-n0x07a4) + - 0x01e947a4, // c0x0032 (n0x07a4-n0x07a5) + - 0x01e987a5, // c0x0033 (n0x07a5-n0x07a6) + - 0x01ed47a6, // c0x0034 (n0x07a6-n0x07b5) + - 0x61ed87b5, // c0x0035 (n0x07b5-n0x07b6)* o - 0x01eec7b6, // c0x0036 (n0x07b6-n0x07bb) + - 0x01efc7bb, // c0x0037 (n0x07bb-n0x07bf) + - 0x01fb07bf, // c0x0038 (n0x07bf-n0x07ec) + - 0x21fb47ec, // c0x0039 (n0x07ec-n0x07ed) o - 0x01fb87ed, // c0x003a (n0x07ed-n0x07ee) + - 0x01fbc7ee, // c0x003b (n0x07ee-n0x07ef) + - 0x21fc07ef, // c0x003c (n0x07ef-n0x07f0) o - 0x21fc47f0, // c0x003d (n0x07f0-n0x07f1) o - 0x01ff87f1, // c0x003e (n0x07f1-n0x07fe) + - 0x01ffc7fe, // c0x003f (n0x07fe-n0x07ff) + - 0x023447ff, // c0x0040 (n0x07ff-n0x08d1) + - 0x2238c8d1, // c0x0041 (n0x08d1-n0x08e3) o - 0x023b08e3, // c0x0042 (n0x08e3-n0x08ec) + - 0x023b88ec, // c0x0043 (n0x08ec-n0x08ee) + - 0x223bc8ee, // c0x0044 (n0x08ee-n0x08ef) o - 0x223c08ef, // c0x0045 (n0x08ef-n0x08f0) o - 0x023dc8f0, // c0x0046 (n0x08f0-n0x08f7) + - 0x023f48f7, // c0x0047 (n0x08f7-n0x08fd) + - 0x023f88fd, // c0x0048 (n0x08fd-n0x08fe) + - 0x024088fe, // c0x0049 (n0x08fe-n0x0902) + - 0x02410902, // c0x004a (n0x0902-n0x0904) + - 0x22444904, // c0x004b (n0x0904-n0x0911) o - 0x02448911, // c0x004c (n0x0911-n0x0912) + - 0x02450912, // c0x004d (n0x0912-n0x0914) + - 0x02470914, // c0x004e (n0x0914-n0x091c) + - 0x0247491c, // c0x004f (n0x091c-n0x091d) + - 0x0248891d, // c0x0050 (n0x091d-n0x0922) + - 0x024b0922, // c0x0051 (n0x0922-n0x092c) + - 0x024d092c, // c0x0052 (n0x092c-n0x0934) + - 0x02500934, // c0x0053 (n0x0934-n0x0940) + - 0x02528940, // c0x0054 (n0x0940-n0x094a) + - 0x0252c94a, // c0x0055 (n0x094a-n0x094b) + - 0x0255094b, // c0x0056 (n0x094b-n0x0954) + - 0x02554954, // c0x0057 (n0x0954-n0x0955) + - 0x02568955, // c0x0058 (n0x0955-n0x095a) + - 0x0256c95a, // c0x0059 (n0x095a-n0x095b) + - 0x0258c95b, // c0x005a (n0x095b-n0x0963) + - 0x02598963, // c0x005b (n0x0963-n0x0966) + - 0x025f8966, // c0x005c (n0x0966-n0x097e) + - 0x0261497e, // c0x005d (n0x097e-n0x0985) + - 0x02620985, // c0x005e (n0x0985-n0x0988) + - 0x02634988, // c0x005f (n0x0988-n0x098d) + - 0x0264c98d, // c0x0060 (n0x098d-n0x0993) + - 0x02660993, // c0x0061 (n0x0993-n0x0998) + - 0x02678998, // c0x0062 (n0x0998-n0x099e) + - 0x0269099e, // c0x0063 (n0x099e-n0x09a4) + - 0x026a89a4, // c0x0064 (n0x09a4-n0x09aa) + - 0x026c49aa, // c0x0065 (n0x09aa-n0x09b1) + - 0x026dc9b1, // c0x0066 (n0x09b1-n0x09b7) + - 0x0273c9b7, // c0x0067 (n0x09b7-n0x09cf) + - 0x027549cf, // c0x0068 (n0x09cf-n0x09d5) + - 0x027689d5, // c0x0069 (n0x09d5-n0x09da) + - 0x027ac9da, // c0x006a (n0x09da-n0x09eb) + - 0x0282c9eb, // c0x006b (n0x09eb-n0x0a0b) + - 0x02858a0b, // c0x006c (n0x0a0b-n0x0a16) + - 0x0285ca16, // c0x006d (n0x0a16-n0x0a17) + - 0x02864a17, // c0x006e (n0x0a17-n0x0a19) + - 0x02884a19, // c0x006f (n0x0a19-n0x0a21) + - 0x02888a21, // c0x0070 (n0x0a21-n0x0a22) + - 0x028a4a22, // c0x0071 (n0x0a22-n0x0a29) + - 0x028aca29, // c0x0072 (n0x0a29-n0x0a2b) + - 0x028e0a2b, // c0x0073 (n0x0a2b-n0x0a38) + - 0x02908a38, // c0x0074 (n0x0a38-n0x0a42) + - 0x0290ca42, // c0x0075 (n0x0a42-n0x0a43) + - 0x02924a43, // c0x0076 (n0x0a43-n0x0a49) + - 0x0293ca49, // c0x0077 (n0x0a49-n0x0a4f) + - 0x02960a4f, // c0x0078 (n0x0a4f-n0x0a58) + - 0x02980a58, // c0x0079 (n0x0a58-n0x0a60) + - 0x02f44a60, // c0x007a (n0x0a60-n0x0bd1) + - 0x02f50bd1, // c0x007b (n0x0bd1-n0x0bd4) + - 0x02f70bd4, // c0x007c (n0x0bd4-n0x0bdc) + - 0x0312cbdc, // c0x007d (n0x0bdc-n0x0c4b) + - 0x031fcc4b, // c0x007e (n0x0c4b-n0x0c7f) + - 0x0326cc7f, // c0x007f (n0x0c7f-n0x0c9b) + - 0x032c4c9b, // c0x0080 (n0x0c9b-n0x0cb1) + - 0x033accb1, // c0x0081 (n0x0cb1-n0x0ceb) + - 0x03404ceb, // c0x0082 (n0x0ceb-n0x0d01) + - 0x03440d01, // c0x0083 (n0x0d01-n0x0d10) + - 0x0353cd10, // c0x0084 (n0x0d10-n0x0d4f) + - 0x03608d4f, // c0x0085 (n0x0d4f-n0x0d82) + - 0x036a0d82, // c0x0086 (n0x0d82-n0x0da8) + - 0x03730da8, // c0x0087 (n0x0da8-n0x0dcc) + - 0x03794dcc, // c0x0088 (n0x0dcc-n0x0de5) + - 0x039ccde5, // c0x0089 (n0x0de5-n0x0e73) + - 0x03a84e73, // c0x008a (n0x0e73-n0x0ea1) + - 0x03b50ea1, // c0x008b (n0x0ea1-n0x0ed4) + - 0x03b9ced4, // c0x008c (n0x0ed4-n0x0ee7) + - 0x03c24ee7, // c0x008d (n0x0ee7-n0x0f09) + - 0x03c60f09, // c0x008e (n0x0f09-n0x0f18) + - 0x03cb0f18, // c0x008f (n0x0f18-n0x0f2c) + - 0x03d28f2c, // c0x0090 (n0x0f2c-n0x0f4a) + - 0x63d2cf4a, // c0x0091 (n0x0f4a-n0x0f4b)* o - 0x63d30f4b, // c0x0092 (n0x0f4b-n0x0f4c)* o - 0x63d34f4c, // c0x0093 (n0x0f4c-n0x0f4d)* o - 0x03db0f4d, // c0x0094 (n0x0f4d-n0x0f6c) + - 0x03e18f6c, // c0x0095 (n0x0f6c-n0x0f86) + - 0x03e94f86, // c0x0096 (n0x0f86-n0x0fa5) + - 0x03f0cfa5, // c0x0097 (n0x0fa5-n0x0fc3) + - 0x03f90fc3, // c0x0098 (n0x0fc3-n0x0fe4) + - 0x03ffcfe4, // c0x0099 (n0x0fe4-n0x0fff) + - 0x04128fff, // c0x009a (n0x0fff-n0x104a) + - 0x0418104a, // c0x009b (n0x104a-n0x1060) + - 0x64185060, // c0x009c (n0x1060-n0x1061)* o - 0x0421d061, // c0x009d (n0x1061-n0x1087) + - 0x042a5087, // c0x009e (n0x1087-n0x10a9) + - 0x042f10a9, // c0x009f (n0x10a9-n0x10bc) + - 0x043590bc, // c0x00a0 (n0x10bc-n0x10d6) + - 0x044010d6, // c0x00a1 (n0x10d6-n0x1100) + - 0x044c9100, // c0x00a2 (n0x1100-n0x1132) + - 0x04531132, // c0x00a3 (n0x1132-n0x114c) + - 0x0464514c, // c0x00a4 (n0x114c-n0x1191) + - 0x64649191, // c0x00a5 (n0x1191-n0x1192)* o - 0x6464d192, // c0x00a6 (n0x1192-n0x1193)* o - 0x046a9193, // c0x00a7 (n0x1193-n0x11aa) + - 0x047051aa, // c0x00a8 (n0x11aa-n0x11c1) + - 0x047951c1, // c0x00a9 (n0x11c1-n0x11e5) + - 0x048111e5, // c0x00aa (n0x11e5-n0x1204) + - 0x04855204, // c0x00ab (n0x1204-n0x1215) + - 0x04939215, // c0x00ac (n0x1215-n0x124e) + - 0x0496d24e, // c0x00ad (n0x124e-n0x125b) + - 0x049cd25b, // c0x00ae (n0x125b-n0x1273) + - 0x04a41273, // c0x00af (n0x1273-n0x1290) + - 0x04ac9290, // c0x00b0 (n0x1290-n0x12b2) + - 0x04b092b2, // c0x00b1 (n0x12b2-n0x12c2) + - 0x04b792c2, // c0x00b2 (n0x12c2-n0x12de) + - 0x64b7d2de, // c0x00b3 (n0x12de-n0x12df)* o - 0x64b812df, // c0x00b4 (n0x12df-n0x12e0)* o - 0x24b852e0, // c0x00b5 (n0x12e0-n0x12e1) o - 0x04b9d2e1, // c0x00b6 (n0x12e1-n0x12e7) + - 0x04bb92e7, // c0x00b7 (n0x12e7-n0x12ee) + - 0x04bfd2ee, // c0x00b8 (n0x12ee-n0x12ff) + - 0x04c0d2ff, // c0x00b9 (n0x12ff-n0x1303) + - 0x04c25303, // c0x00ba (n0x1303-n0x1309) + - 0x04c9d309, // c0x00bb (n0x1309-n0x1327) + - 0x04cb1327, // c0x00bc (n0x1327-n0x132c) + - 0x04cc932c, // c0x00bd (n0x132c-n0x1332) + - 0x04ced332, // c0x00be (n0x1332-n0x133b) + - 0x04d0133b, // c0x00bf (n0x133b-n0x1340) + - 0x04d19340, // c0x00c0 (n0x1340-n0x1346) + - 0x04d1d346, // c0x00c1 (n0x1346-n0x1347) + - 0x04d59347, // c0x00c2 (n0x1347-n0x1356) + - 0x04d6d356, // c0x00c3 (n0x1356-n0x135b) + - 0x04d7535b, // c0x00c4 (n0x135b-n0x135d) + - 0x04d7d35d, // c0x00c5 (n0x135d-n0x135f) + - 0x04d8135f, // c0x00c6 (n0x135f-n0x1360) + - 0x04da5360, // c0x00c7 (n0x1360-n0x1369) + - 0x04dc9369, // c0x00c8 (n0x1369-n0x1372) + - 0x04de1372, // c0x00c9 (n0x1372-n0x1378) + - 0x04de9378, // c0x00ca (n0x1378-n0x137a) + - 0x04ded37a, // c0x00cb (n0x137a-n0x137b) + - 0x04e2137b, // c0x00cc (n0x137b-n0x1388) + - 0x04e45388, // c0x00cd (n0x1388-n0x1391) + - 0x04e65391, // c0x00ce (n0x1391-n0x1399) + - 0x04e81399, // c0x00cf (n0x1399-n0x13a0) + - 0x04e913a0, // c0x00d0 (n0x13a0-n0x13a4) + - 0x04ea53a4, // c0x00d1 (n0x13a4-n0x13a9) + - 0x04ea93a9, // c0x00d2 (n0x13a9-n0x13aa) + - 0x04eb13aa, // c0x00d3 (n0x13aa-n0x13ac) + - 0x04ec53ac, // c0x00d4 (n0x13ac-n0x13b1) + - 0x04ed53b1, // c0x00d5 (n0x13b1-n0x13b5) + - 0x04ed93b5, // c0x00d6 (n0x13b5-n0x13b6) + - 0x04ef53b6, // c0x00d7 (n0x13b6-n0x13bd) + - 0x057853bd, // c0x00d8 (n0x13bd-n0x15e1) + - 0x057bd5e1, // c0x00d9 (n0x15e1-n0x15ef) + - 0x057e95ef, // c0x00da (n0x15ef-n0x15fa) + - 0x058015fa, // c0x00db (n0x15fa-n0x1600) + - 0x05821600, // c0x00dc (n0x1600-n0x1608) + - 0x65825608, // c0x00dd (n0x1608-n0x1609)* o - 0x05869609, // c0x00de (n0x1609-n0x161a) + - 0x0587161a, // c0x00df (n0x161a-n0x161c) + - 0x2587561c, // c0x00e0 (n0x161c-n0x161d) o - 0x2587961d, // c0x00e1 (n0x161d-n0x161e) o - 0x0587d61e, // c0x00e2 (n0x161e-n0x161f) + - 0x0595161f, // c0x00e3 (n0x161f-n0x1654) + - 0x25955654, // c0x00e4 (n0x1654-n0x1655) o - 0x2595d655, // c0x00e5 (n0x1655-n0x1657) o - 0x25965657, // c0x00e6 (n0x1657-n0x1659) o - 0x25971659, // c0x00e7 (n0x1659-n0x165c) o - 0x0599965c, // c0x00e8 (n0x165c-n0x1666) + - 0x059c1666, // c0x00e9 (n0x1666-n0x1670) + - 0x059c5670, // c0x00ea (n0x1670-n0x1671) + - 0x259fd671, // c0x00eb (n0x1671-n0x167f) o - 0x05a0967f, // c0x00ec (n0x167f-n0x1682) + - 0x06561682, // c0x00ed (n0x1682-n0x1958) + - 0x06565958, // c0x00ee (n0x1958-n0x1959) + - 0x06569959, // c0x00ef (n0x1959-n0x195a) + - 0x2656d95a, // c0x00f0 (n0x195a-n0x195b) o - 0x0657195b, // c0x00f1 (n0x195b-n0x195c) + - 0x2657595c, // c0x00f2 (n0x195c-n0x195d) o - 0x0657995d, // c0x00f3 (n0x195d-n0x195e) + - 0x2658595e, // c0x00f4 (n0x195e-n0x1961) o - 0x06589961, // c0x00f5 (n0x1961-n0x1962) + - 0x0658d962, // c0x00f6 (n0x1962-n0x1963) + - 0x26591963, // c0x00f7 (n0x1963-n0x1964) o - 0x06595964, // c0x00f8 (n0x1964-n0x1965) + - 0x2659d965, // c0x00f9 (n0x1965-n0x1967) o - 0x065a1967, // c0x00fa (n0x1967-n0x1968) + - 0x065a5968, // c0x00fb (n0x1968-n0x1969) + - 0x265b5969, // c0x00fc (n0x1969-n0x196d) o - 0x065b996d, // c0x00fd (n0x196d-n0x196e) + - 0x065bd96e, // c0x00fe (n0x196e-n0x196f) + - 0x065c196f, // c0x00ff (n0x196f-n0x1970) + - 0x065c5970, // c0x0100 (n0x1970-n0x1971) + - 0x265c9971, // c0x0101 (n0x1971-n0x1972) o - 0x065cd972, // c0x0102 (n0x1972-n0x1973) + - 0x065d1973, // c0x0103 (n0x1973-n0x1974) + - 0x065d5974, // c0x0104 (n0x1974-n0x1975) + - 0x065d9975, // c0x0105 (n0x1975-n0x1976) + - 0x265e1976, // c0x0106 (n0x1976-n0x1978) o - 0x065e5978, // c0x0107 (n0x1978-n0x1979) + - 0x065e9979, // c0x0108 (n0x1979-n0x197a) + - 0x065ed97a, // c0x0109 (n0x197a-n0x197b) + - 0x265f197b, // c0x010a (n0x197b-n0x197c) o - 0x065f597c, // c0x010b (n0x197c-n0x197d) + - 0x265fd97d, // c0x010c (n0x197d-n0x197f) o - 0x2660197f, // c0x010d (n0x197f-n0x1980) o - 0x0661d980, // c0x010e (n0x1980-n0x1987) + - 0x06629987, // c0x010f (n0x1987-n0x198a) + - 0x0666998a, // c0x0110 (n0x198a-n0x199a) + - 0x0666d99a, // c0x0111 (n0x199a-n0x199b) + - 0x0669199b, // c0x0112 (n0x199b-n0x19a4) + - 0x067859a4, // c0x0113 (n0x19a4-n0x19e1) + - 0x2678d9e1, // c0x0114 (n0x19e1-n0x19e3) o - 0x267919e3, // c0x0115 (n0x19e3-n0x19e4) o - 0x267959e4, // c0x0116 (n0x19e4-n0x19e5) o - 0x0679d9e5, // c0x0117 (n0x19e5-n0x19e7) + - 0x068799e7, // c0x0118 (n0x19e7-n0x1a1e) + - 0x068a5a1e, // c0x0119 (n0x1a1e-n0x1a29) + - 0x068c5a29, // c0x011a (n0x1a29-n0x1a31) + - 0x068d1a31, // c0x011b (n0x1a31-n0x1a34) + - 0x068f1a34, // c0x011c (n0x1a34-n0x1a3c) + - 0x06929a3c, // c0x011d (n0x1a3c-n0x1a4a) + - 0x06bbda4a, // c0x011e (n0x1a4a-n0x1aef) + - 0x06c79aef, // c0x011f (n0x1aef-n0x1b1e) + - 0x06c8db1e, // c0x0120 (n0x1b1e-n0x1b23) + - 0x06cc1b23, // c0x0121 (n0x1b23-n0x1b30) + - 0x06cddb30, // c0x0122 (n0x1b30-n0x1b37) + - 0x06cf9b37, // c0x0123 (n0x1b37-n0x1b3e) + - 0x06d1db3e, // c0x0124 (n0x1b3e-n0x1b47) + - 0x06d35b47, // c0x0125 (n0x1b47-n0x1b4d) + - 0x06d51b4d, // c0x0126 (n0x1b4d-n0x1b54) + - 0x06d75b54, // c0x0127 (n0x1b54-n0x1b5d) + - 0x06d85b5d, // c0x0128 (n0x1b5d-n0x1b61) + - 0x06db5b61, // c0x0129 (n0x1b61-n0x1b6d) + - 0x06dd1b6d, // c0x012a (n0x1b6d-n0x1b74) + - 0x06fddb74, // c0x012b (n0x1b74-n0x1bf7) + - 0x07001bf7, // c0x012c (n0x1bf7-n0x1c00) + - 0x07021c00, // c0x012d (n0x1c00-n0x1c08) + - 0x07035c08, // c0x012e (n0x1c08-n0x1c0d) + - 0x07049c0d, // c0x012f (n0x1c0d-n0x1c12) + - 0x07069c12, // c0x0130 (n0x1c12-n0x1c1a) + - 0x0710dc1a, // c0x0131 (n0x1c1a-n0x1c43) + - 0x07129c43, // c0x0132 (n0x1c43-n0x1c4a) + - 0x07145c4a, // c0x0133 (n0x1c4a-n0x1c51) + - 0x07149c51, // c0x0134 (n0x1c51-n0x1c52) + - 0x0714dc52, // c0x0135 (n0x1c52-n0x1c53) + - 0x07161c53, // c0x0136 (n0x1c53-n0x1c58) + - 0x07181c58, // c0x0137 (n0x1c58-n0x1c60) + - 0x0718dc60, // c0x0138 (n0x1c60-n0x1c63) + - 0x071bdc63, // c0x0139 (n0x1c63-n0x1c6f) + - 0x0723dc6f, // c0x013a (n0x1c6f-n0x1c8f) + - 0x07251c8f, // c0x013b (n0x1c8f-n0x1c94) + - 0x07255c94, // c0x013c (n0x1c94-n0x1c95) + - 0x0726dc95, // c0x013d (n0x1c95-n0x1c9b) + - 0x07279c9b, // c0x013e (n0x1c9b-n0x1c9e) + - 0x0727dc9e, // c0x013f (n0x1c9e-n0x1c9f) + - 0x07299c9f, // c0x0140 (n0x1c9f-n0x1ca6) + - 0x072d5ca6, // c0x0141 (n0x1ca6-n0x1cb5) + - 0x072d9cb5, // c0x0142 (n0x1cb5-n0x1cb6) + - 0x072f9cb6, // c0x0143 (n0x1cb6-n0x1cbe) + - 0x07349cbe, // c0x0144 (n0x1cbe-n0x1cd2) + - 0x07361cd2, // c0x0145 (n0x1cd2-n0x1cd8) + - 0x073b5cd8, // c0x0146 (n0x1cd8-n0x1ced) + - 0x073b9ced, // c0x0147 (n0x1ced-n0x1cee) + - 0x073bdcee, // c0x0148 (n0x1cee-n0x1cef) + - 0x07401cef, // c0x0149 (n0x1cef-n0x1d00) + - 0x07411d00, // c0x014a (n0x1d00-n0x1d04) + - 0x07449d04, // c0x014b (n0x1d04-n0x1d12) + - 0x07479d12, // c0x014c (n0x1d12-n0x1d1e) + - 0x075b5d1e, // c0x014d (n0x1d1e-n0x1d6d) + - 0x075d9d6d, // c0x014e (n0x1d6d-n0x1d76) + - 0x07605d76, // c0x014f (n0x1d76-n0x1d81) + - 0x07609d81, // c0x0150 (n0x1d81-n0x1d82) + - 0x0760dd82, // c0x0151 (n0x1d82-n0x1d83) + - 0x07709d83, // c0x0152 (n0x1d83-n0x1dc2) + - 0x07715dc2, // c0x0153 (n0x1dc2-n0x1dc5) + - 0x07721dc5, // c0x0154 (n0x1dc5-n0x1dc8) + - 0x0772ddc8, // c0x0155 (n0x1dc8-n0x1dcb) + - 0x07739dcb, // c0x0156 (n0x1dcb-n0x1dce) + - 0x07745dce, // c0x0157 (n0x1dce-n0x1dd1) + - 0x07751dd1, // c0x0158 (n0x1dd1-n0x1dd4) + - 0x0775ddd4, // c0x0159 (n0x1dd4-n0x1dd7) + - 0x07769dd7, // c0x015a (n0x1dd7-n0x1dda) + - 0x07775dda, // c0x015b (n0x1dda-n0x1ddd) + - 0x07781ddd, // c0x015c (n0x1ddd-n0x1de0) + - 0x0778dde0, // c0x015d (n0x1de0-n0x1de3) + - 0x07799de3, // c0x015e (n0x1de3-n0x1de6) + - 0x077a5de6, // c0x015f (n0x1de6-n0x1de9) + - 0x077adde9, // c0x0160 (n0x1de9-n0x1deb) + - 0x077b9deb, // c0x0161 (n0x1deb-n0x1dee) + - 0x077c5dee, // c0x0162 (n0x1dee-n0x1df1) + - 0x077d1df1, // c0x0163 (n0x1df1-n0x1df4) + - 0x077dddf4, // c0x0164 (n0x1df4-n0x1df7) + - 0x077e9df7, // c0x0165 (n0x1df7-n0x1dfa) + - 0x077f5dfa, // c0x0166 (n0x1dfa-n0x1dfd) + - 0x07801dfd, // c0x0167 (n0x1dfd-n0x1e00) + - 0x0780de00, // c0x0168 (n0x1e00-n0x1e03) + - 0x07819e03, // c0x0169 (n0x1e03-n0x1e06) + - 0x07825e06, // c0x016a (n0x1e06-n0x1e09) + - 0x07831e09, // c0x016b (n0x1e09-n0x1e0c) + - 0x0783de0c, // c0x016c (n0x1e0c-n0x1e0f) + - 0x07849e0f, // c0x016d (n0x1e0f-n0x1e12) + - 0x07855e12, // c0x016e (n0x1e12-n0x1e15) + - 0x07861e15, // c0x016f (n0x1e15-n0x1e18) + - 0x0786de18, // c0x0170 (n0x1e18-n0x1e1b) + - 0x07879e1b, // c0x0171 (n0x1e1b-n0x1e1e) + - 0x07881e1e, // c0x0172 (n0x1e1e-n0x1e20) + - 0x0788de20, // c0x0173 (n0x1e20-n0x1e23) + - 0x07899e23, // c0x0174 (n0x1e23-n0x1e26) + - 0x078a5e26, // c0x0175 (n0x1e26-n0x1e29) + - 0x078b1e29, // c0x0176 (n0x1e29-n0x1e2c) + - 0x078bde2c, // c0x0177 (n0x1e2c-n0x1e2f) + - 0x078c9e2f, // c0x0178 (n0x1e2f-n0x1e32) + - 0x078d5e32, // c0x0179 (n0x1e32-n0x1e35) + - 0x078e1e35, // c0x017a (n0x1e35-n0x1e38) + - 0x078ede38, // c0x017b (n0x1e38-n0x1e3b) + - 0x078f9e3b, // c0x017c (n0x1e3b-n0x1e3e) + - 0x07905e3e, // c0x017d (n0x1e3e-n0x1e41) + - 0x07911e41, // c0x017e (n0x1e41-n0x1e44) + - 0x0791de44, // c0x017f (n0x1e44-n0x1e47) + - 0x07925e47, // c0x0180 (n0x1e47-n0x1e49) + - 0x07931e49, // c0x0181 (n0x1e49-n0x1e4c) + - 0x0793de4c, // c0x0182 (n0x1e4c-n0x1e4f) + - 0x07949e4f, // c0x0183 (n0x1e4f-n0x1e52) + - 0x07955e52, // c0x0184 (n0x1e52-n0x1e55) + - 0x07961e55, // c0x0185 (n0x1e55-n0x1e58) + - 0x0796de58, // c0x0186 (n0x1e58-n0x1e5b) + - 0x07979e5b, // c0x0187 (n0x1e5b-n0x1e5e) + - 0x07985e5e, // c0x0188 (n0x1e5e-n0x1e61) + - 0x07989e61, // c0x0189 (n0x1e61-n0x1e62) + - 0x07995e62, // c0x018a (n0x1e62-n0x1e65) + - 0x079ade65, // c0x018b (n0x1e65-n0x1e6b) + - 0x079b1e6b, // c0x018c (n0x1e6b-n0x1e6c) + - 0x079c1e6c, // c0x018d (n0x1e6c-n0x1e70) + - 0x079d9e70, // c0x018e (n0x1e70-n0x1e76) + - 0x07a1de76, // c0x018f (n0x1e76-n0x1e87) + - 0x07a31e87, // c0x0190 (n0x1e87-n0x1e8c) + - 0x07a65e8c, // c0x0191 (n0x1e8c-n0x1e99) + - 0x07a75e99, // c0x0192 (n0x1e99-n0x1e9d) + - 0x07a91e9d, // c0x0193 (n0x1e9d-n0x1ea4) + - 0x07aa9ea4, // c0x0194 (n0x1ea4-n0x1eaa) + - 0x27aedeaa, // c0x0195 (n0x1eaa-n0x1ebb) o - 0x07af1ebb, // c0x0196 (n0x1ebb-n0x1ebc) + + 0x0183c609, // c0x0006 (n0x0609-n0x060f) + + 0x0184060f, // c0x0007 (n0x060f-n0x0610) + + 0x01860610, // c0x0008 (n0x0610-n0x0618) + + 0x019bc618, // c0x0009 (n0x0618-n0x066f) + + 0x019d066f, // c0x000a (n0x066f-n0x0674) + + 0x019e4674, // c0x000b (n0x0674-n0x0679) + + 0x019f4679, // c0x000c (n0x0679-n0x067d) + + 0x01a1067d, // c0x000d (n0x067d-n0x0684) + + 0x01a14684, // c0x000e (n0x0684-n0x0685) + + 0x01a2c685, // c0x000f (n0x0685-n0x068b) + + 0x01a5068b, // c0x0010 (n0x068b-n0x0694) + + 0x01a54694, // c0x0011 (n0x0694-n0x0695) + + 0x01a6c695, // c0x0012 (n0x0695-n0x069b) + + 0x01a7069b, // c0x0013 (n0x069b-n0x069c) + + 0x01a8c69c, // c0x0014 (n0x069c-n0x06a3) + + 0x01a906a3, // c0x0015 (n0x06a3-n0x06a4) + + 0x01ad86a4, // c0x0016 (n0x06a4-n0x06b6) + + 0x01adc6b6, // c0x0017 (n0x06b6-n0x06b7) + + 0x01afc6b7, // c0x0018 (n0x06b7-n0x06bf) + + 0x01b106bf, // c0x0019 (n0x06bf-n0x06c4) + + 0x01b146c4, // c0x001a (n0x06c4-n0x06c5) + + 0x01b446c5, // c0x001b (n0x06c5-n0x06d1) + + 0x01b706d1, // c0x001c (n0x06d1-n0x06dc) + + 0x01b986dc, // c0x001d (n0x06dc-n0x06e6) + + 0x01ba06e6, // c0x001e (n0x06e6-n0x06e8) + + 0x01ba46e8, // c0x001f (n0x06e8-n0x06e9) + + 0x01c386e9, // c0x0020 (n0x06e9-n0x070e) + + 0x01c4c70e, // c0x0021 (n0x070e-n0x0713) + + 0x01c60713, // c0x0022 (n0x0713-n0x0718) + + 0x01c80718, // c0x0023 (n0x0718-n0x0720) + + 0x01c90720, // c0x0024 (n0x0720-n0x0724) + + 0x01ca4724, // c0x0025 (n0x0724-n0x0729) + + 0x01cc8729, // c0x0026 (n0x0729-n0x0732) + + 0x01de0732, // c0x0027 (n0x0732-n0x0778) + + 0x01de4778, // c0x0028 (n0x0778-n0x0779) + + 0x01df8779, // c0x0029 (n0x0779-n0x077e) + + 0x01e0c77e, // c0x002a (n0x077e-n0x0783) + + 0x01e14783, // c0x002b (n0x0783-n0x0785) + + 0x01e24785, // c0x002c (n0x0785-n0x0789) + + 0x01e28789, // c0x002d (n0x0789-n0x078a) + + 0x01e4078a, // c0x002e (n0x078a-n0x0790) + + 0x01e84790, // c0x002f (n0x0790-n0x07a1) + + 0x01e947a1, // c0x0030 (n0x07a1-n0x07a5) + + 0x01e987a5, // c0x0031 (n0x07a5-n0x07a6) + + 0x01e9c7a6, // c0x0032 (n0x07a6-n0x07a7) + + 0x01ea07a7, // c0x0033 (n0x07a7-n0x07a8) + + 0x01edc7a8, // c0x0034 (n0x07a8-n0x07b7) + + 0x61ee07b7, // c0x0035 (n0x07b7-n0x07b8)* o + 0x01ef47b8, // c0x0036 (n0x07b8-n0x07bd) + + 0x01f047bd, // c0x0037 (n0x07bd-n0x07c1) + + 0x01fb87c1, // c0x0038 (n0x07c1-n0x07ee) + + 0x21fbc7ee, // c0x0039 (n0x07ee-n0x07ef) o + 0x01fc07ef, // c0x003a (n0x07ef-n0x07f0) + + 0x01fc47f0, // c0x003b (n0x07f0-n0x07f1) + + 0x21fc87f1, // c0x003c (n0x07f1-n0x07f2) o + 0x21fcc7f2, // c0x003d (n0x07f2-n0x07f3) o + 0x020007f3, // c0x003e (n0x07f3-n0x0800) + + 0x02004800, // c0x003f (n0x0800-n0x0801) + + 0x0235c801, // c0x0040 (n0x0801-n0x08d7) + + 0x223ac8d7, // c0x0041 (n0x08d7-n0x08eb) o + 0x223b08eb, // c0x0042 (n0x08eb-n0x08ec) o + 0x023d88ec, // c0x0043 (n0x08ec-n0x08f6) + + 0x023e08f6, // c0x0044 (n0x08f6-n0x08f8) + + 0x223e48f8, // c0x0045 (n0x08f8-n0x08f9) o + 0x223e88f9, // c0x0046 (n0x08f9-n0x08fa) o + 0x023f48fa, // c0x0047 (n0x08fa-n0x08fd) + + 0x223f88fd, // c0x0048 (n0x08fd-n0x08fe) o + 0x024148fe, // c0x0049 (n0x08fe-n0x0905) + + 0x0242c905, // c0x004a (n0x0905-n0x090b) + + 0x0243090b, // c0x004b (n0x090b-n0x090c) + + 0x0244090c, // c0x004c (n0x090c-n0x0910) + + 0x02448910, // c0x004d (n0x0910-n0x0912) + + 0x2247c912, // c0x004e (n0x0912-n0x091f) o + 0x0248091f, // c0x004f (n0x091f-n0x0920) + + 0x02488920, // c0x0050 (n0x0920-n0x0922) + + 0x024a8922, // c0x0051 (n0x0922-n0x092a) + + 0x024ac92a, // c0x0052 (n0x092a-n0x092b) + + 0x024c092b, // c0x0053 (n0x092b-n0x0930) + + 0x024e8930, // c0x0054 (n0x0930-n0x093a) + + 0x0250893a, // c0x0055 (n0x093a-n0x0942) + + 0x02538942, // c0x0056 (n0x0942-n0x094e) + + 0x0256094e, // c0x0057 (n0x094e-n0x0958) + + 0x02564958, // c0x0058 (n0x0958-n0x0959) + + 0x02588959, // c0x0059 (n0x0959-n0x0962) + + 0x0258c962, // c0x005a (n0x0962-n0x0963) + + 0x025a0963, // c0x005b (n0x0963-n0x0968) + + 0x025a4968, // c0x005c (n0x0968-n0x0969) + + 0x025c4969, // c0x005d (n0x0969-n0x0971) + + 0x025d0971, // c0x005e (n0x0971-n0x0974) + + 0x02630974, // c0x005f (n0x0974-n0x098c) + + 0x0264c98c, // c0x0060 (n0x098c-n0x0993) + + 0x02658993, // c0x0061 (n0x0993-n0x0996) + + 0x0266c996, // c0x0062 (n0x0996-n0x099b) + + 0x0268499b, // c0x0063 (n0x099b-n0x09a1) + + 0x026989a1, // c0x0064 (n0x09a1-n0x09a6) + + 0x026b09a6, // c0x0065 (n0x09a6-n0x09ac) + + 0x026c89ac, // c0x0066 (n0x09ac-n0x09b2) + + 0x026e09b2, // c0x0067 (n0x09b2-n0x09b8) + + 0x026fc9b8, // c0x0068 (n0x09b8-n0x09bf) + + 0x027149bf, // c0x0069 (n0x09bf-n0x09c5) + + 0x027749c5, // c0x006a (n0x09c5-n0x09dd) + + 0x0278c9dd, // c0x006b (n0x09dd-n0x09e3) + + 0x027a09e3, // c0x006c (n0x09e3-n0x09e8) + + 0x027e49e8, // c0x006d (n0x09e8-n0x09f9) + + 0x028649f9, // c0x006e (n0x09f9-n0x0a19) + + 0x02890a19, // c0x006f (n0x0a19-n0x0a24) + + 0x02894a24, // c0x0070 (n0x0a24-n0x0a25) + + 0x0289ca25, // c0x0071 (n0x0a25-n0x0a27) + + 0x028bca27, // c0x0072 (n0x0a27-n0x0a2f) + + 0x028c0a2f, // c0x0073 (n0x0a2f-n0x0a30) + + 0x028dca30, // c0x0074 (n0x0a30-n0x0a37) + + 0x028e4a37, // c0x0075 (n0x0a37-n0x0a39) + + 0x02918a39, // c0x0076 (n0x0a39-n0x0a46) + + 0x02940a46, // c0x0077 (n0x0a46-n0x0a50) + + 0x02944a50, // c0x0078 (n0x0a50-n0x0a51) + + 0x0295ca51, // c0x0079 (n0x0a51-n0x0a57) + + 0x02974a57, // c0x007a (n0x0a57-n0x0a5d) + + 0x02998a5d, // c0x007b (n0x0a5d-n0x0a66) + + 0x029b8a66, // c0x007c (n0x0a66-n0x0a6e) + + 0x02f7ca6e, // c0x007d (n0x0a6e-n0x0bdf) + + 0x02f88bdf, // c0x007e (n0x0bdf-n0x0be2) + + 0x02fa8be2, // c0x007f (n0x0be2-n0x0bea) + + 0x03164bea, // c0x0080 (n0x0bea-n0x0c59) + + 0x03234c59, // c0x0081 (n0x0c59-n0x0c8d) + + 0x032a4c8d, // c0x0082 (n0x0c8d-n0x0ca9) + + 0x032fcca9, // c0x0083 (n0x0ca9-n0x0cbf) + + 0x033e4cbf, // c0x0084 (n0x0cbf-n0x0cf9) + + 0x0343ccf9, // c0x0085 (n0x0cf9-n0x0d0f) + + 0x03478d0f, // c0x0086 (n0x0d0f-n0x0d1e) + + 0x03574d1e, // c0x0087 (n0x0d1e-n0x0d5d) + + 0x03640d5d, // c0x0088 (n0x0d5d-n0x0d90) + + 0x036d8d90, // c0x0089 (n0x0d90-n0x0db6) + + 0x03768db6, // c0x008a (n0x0db6-n0x0dda) + + 0x037ccdda, // c0x008b (n0x0dda-n0x0df3) + + 0x03a04df3, // c0x008c (n0x0df3-n0x0e81) + + 0x03abce81, // c0x008d (n0x0e81-n0x0eaf) + + 0x03b88eaf, // c0x008e (n0x0eaf-n0x0ee2) + + 0x03bd4ee2, // c0x008f (n0x0ee2-n0x0ef5) + + 0x03c5cef5, // c0x0090 (n0x0ef5-n0x0f17) + + 0x03c98f17, // c0x0091 (n0x0f17-n0x0f26) + + 0x03ce8f26, // c0x0092 (n0x0f26-n0x0f3a) + + 0x03d60f3a, // c0x0093 (n0x0f3a-n0x0f58) + + 0x63d64f58, // c0x0094 (n0x0f58-n0x0f59)* o + 0x63d68f59, // c0x0095 (n0x0f59-n0x0f5a)* o + 0x63d6cf5a, // c0x0096 (n0x0f5a-n0x0f5b)* o + 0x03de8f5b, // c0x0097 (n0x0f5b-n0x0f7a) + + 0x03e50f7a, // c0x0098 (n0x0f7a-n0x0f94) + + 0x03eccf94, // c0x0099 (n0x0f94-n0x0fb3) + + 0x03f44fb3, // c0x009a (n0x0fb3-n0x0fd1) + + 0x03fc8fd1, // c0x009b (n0x0fd1-n0x0ff2) + + 0x04034ff2, // c0x009c (n0x0ff2-n0x100d) + + 0x0416100d, // c0x009d (n0x100d-n0x1058) + + 0x041b9058, // c0x009e (n0x1058-n0x106e) + + 0x641bd06e, // c0x009f (n0x106e-n0x106f)* o + 0x0425506f, // c0x00a0 (n0x106f-n0x1095) + + 0x042dd095, // c0x00a1 (n0x1095-n0x10b7) + + 0x043290b7, // c0x00a2 (n0x10b7-n0x10ca) + + 0x043910ca, // c0x00a3 (n0x10ca-n0x10e4) + + 0x044390e4, // c0x00a4 (n0x10e4-n0x110e) + + 0x0450110e, // c0x00a5 (n0x110e-n0x1140) + + 0x04569140, // c0x00a6 (n0x1140-n0x115a) + + 0x0467d15a, // c0x00a7 (n0x115a-n0x119f) + + 0x6468119f, // c0x00a8 (n0x119f-n0x11a0)* o + 0x646851a0, // c0x00a9 (n0x11a0-n0x11a1)* o + 0x046e11a1, // c0x00aa (n0x11a1-n0x11b8) + + 0x0473d1b8, // c0x00ab (n0x11b8-n0x11cf) + + 0x047cd1cf, // c0x00ac (n0x11cf-n0x11f3) + + 0x048491f3, // c0x00ad (n0x11f3-n0x1212) + + 0x0488d212, // c0x00ae (n0x1212-n0x1223) + + 0x04971223, // c0x00af (n0x1223-n0x125c) + + 0x049a525c, // c0x00b0 (n0x125c-n0x1269) + + 0x04a05269, // c0x00b1 (n0x1269-n0x1281) + + 0x04a79281, // c0x00b2 (n0x1281-n0x129e) + + 0x04b0129e, // c0x00b3 (n0x129e-n0x12c0) + + 0x04b412c0, // c0x00b4 (n0x12c0-n0x12d0) + + 0x04bb12d0, // c0x00b5 (n0x12d0-n0x12ec) + + 0x64bb52ec, // c0x00b6 (n0x12ec-n0x12ed)* o + 0x64bb92ed, // c0x00b7 (n0x12ed-n0x12ee)* o + 0x24bbd2ee, // c0x00b8 (n0x12ee-n0x12ef) o + 0x04bd52ef, // c0x00b9 (n0x12ef-n0x12f5) + + 0x04bf12f5, // c0x00ba (n0x12f5-n0x12fc) + + 0x04c352fc, // c0x00bb (n0x12fc-n0x130d) + + 0x04c4530d, // c0x00bc (n0x130d-n0x1311) + + 0x04c5d311, // c0x00bd (n0x1311-n0x1317) + + 0x04cd5317, // c0x00be (n0x1317-n0x1335) + + 0x04ce9335, // c0x00bf (n0x1335-n0x133a) + + 0x04d0133a, // c0x00c0 (n0x133a-n0x1340) + + 0x04d25340, // c0x00c1 (n0x1340-n0x1349) + + 0x04d39349, // c0x00c2 (n0x1349-n0x134e) + + 0x04d5134e, // c0x00c3 (n0x134e-n0x1354) + + 0x04d55354, // c0x00c4 (n0x1354-n0x1355) + + 0x04d91355, // c0x00c5 (n0x1355-n0x1364) + + 0x04da5364, // c0x00c6 (n0x1364-n0x1369) + + 0x04dad369, // c0x00c7 (n0x1369-n0x136b) + + 0x04db536b, // c0x00c8 (n0x136b-n0x136d) + + 0x04db936d, // c0x00c9 (n0x136d-n0x136e) + + 0x04ddd36e, // c0x00ca (n0x136e-n0x1377) + + 0x04e01377, // c0x00cb (n0x1377-n0x1380) + + 0x04e19380, // c0x00cc (n0x1380-n0x1386) + + 0x04e21386, // c0x00cd (n0x1386-n0x1388) + + 0x04e25388, // c0x00ce (n0x1388-n0x1389) + + 0x04e59389, // c0x00cf (n0x1389-n0x1396) + + 0x04e7d396, // c0x00d0 (n0x1396-n0x139f) + + 0x04e9d39f, // c0x00d1 (n0x139f-n0x13a7) + + 0x04eb93a7, // c0x00d2 (n0x13a7-n0x13ae) + + 0x04ec93ae, // c0x00d3 (n0x13ae-n0x13b2) + + 0x04edd3b2, // c0x00d4 (n0x13b2-n0x13b7) + + 0x04ee13b7, // c0x00d5 (n0x13b7-n0x13b8) + + 0x04ee93b8, // c0x00d6 (n0x13b8-n0x13ba) + + 0x04efd3ba, // c0x00d7 (n0x13ba-n0x13bf) + + 0x04f0d3bf, // c0x00d8 (n0x13bf-n0x13c3) + + 0x04f113c3, // c0x00d9 (n0x13c3-n0x13c4) + + 0x04f2d3c4, // c0x00da (n0x13c4-n0x13cb) + + 0x057bd3cb, // c0x00db (n0x13cb-n0x15ef) + + 0x057f55ef, // c0x00dc (n0x15ef-n0x15fd) + + 0x058215fd, // c0x00dd (n0x15fd-n0x1608) + + 0x05839608, // c0x00de (n0x1608-n0x160e) + + 0x0585960e, // c0x00df (n0x160e-n0x1616) + + 0x6585d616, // c0x00e0 (n0x1616-n0x1617)* o + 0x058a1617, // c0x00e1 (n0x1617-n0x1628) + + 0x058a9628, // c0x00e2 (n0x1628-n0x162a) + + 0x258ad62a, // c0x00e3 (n0x162a-n0x162b) o + 0x258b162b, // c0x00e4 (n0x162b-n0x162c) o + 0x058b562c, // c0x00e5 (n0x162c-n0x162d) + + 0x0598d62d, // c0x00e6 (n0x162d-n0x1663) + + 0x25991663, // c0x00e7 (n0x1663-n0x1664) o + 0x25999664, // c0x00e8 (n0x1664-n0x1666) o + 0x259a1666, // c0x00e9 (n0x1666-n0x1668) o + 0x259ad668, // c0x00ea (n0x1668-n0x166b) o + 0x059d566b, // c0x00eb (n0x166b-n0x1675) + + 0x059fd675, // c0x00ec (n0x1675-n0x167f) + + 0x05a0167f, // c0x00ed (n0x167f-n0x1680) + + 0x25a39680, // c0x00ee (n0x1680-n0x168e) o + 0x05a4568e, // c0x00ef (n0x168e-n0x1691) + + 0x0659d691, // c0x00f0 (n0x1691-n0x1967) + + 0x065a1967, // c0x00f1 (n0x1967-n0x1968) + + 0x065a5968, // c0x00f2 (n0x1968-n0x1969) + + 0x265a9969, // c0x00f3 (n0x1969-n0x196a) o + 0x065ad96a, // c0x00f4 (n0x196a-n0x196b) + + 0x265b196b, // c0x00f5 (n0x196b-n0x196c) o + 0x065b596c, // c0x00f6 (n0x196c-n0x196d) + + 0x265c196d, // c0x00f7 (n0x196d-n0x1970) o + 0x065c5970, // c0x00f8 (n0x1970-n0x1971) + + 0x065c9971, // c0x00f9 (n0x1971-n0x1972) + + 0x265cd972, // c0x00fa (n0x1972-n0x1973) o + 0x065d1973, // c0x00fb (n0x1973-n0x1974) + + 0x265d9974, // c0x00fc (n0x1974-n0x1976) o + 0x065dd976, // c0x00fd (n0x1976-n0x1977) + + 0x065e1977, // c0x00fe (n0x1977-n0x1978) + + 0x265f1978, // c0x00ff (n0x1978-n0x197c) o + 0x065f597c, // c0x0100 (n0x197c-n0x197d) + + 0x065f997d, // c0x0101 (n0x197d-n0x197e) + + 0x065fd97e, // c0x0102 (n0x197e-n0x197f) + + 0x0660197f, // c0x0103 (n0x197f-n0x1980) + + 0x26605980, // c0x0104 (n0x1980-n0x1981) o + 0x06609981, // c0x0105 (n0x1981-n0x1982) + + 0x0660d982, // c0x0106 (n0x1982-n0x1983) + + 0x06611983, // c0x0107 (n0x1983-n0x1984) + + 0x06615984, // c0x0108 (n0x1984-n0x1985) + + 0x2661d985, // c0x0109 (n0x1985-n0x1987) o + 0x06621987, // c0x010a (n0x1987-n0x1988) + + 0x06625988, // c0x010b (n0x1988-n0x1989) + + 0x06629989, // c0x010c (n0x1989-n0x198a) + + 0x2662d98a, // c0x010d (n0x198a-n0x198b) o + 0x0663198b, // c0x010e (n0x198b-n0x198c) + + 0x2663998c, // c0x010f (n0x198c-n0x198e) o + 0x2663d98e, // c0x0110 (n0x198e-n0x198f) o + 0x0665998f, // c0x0111 (n0x198f-n0x1996) + + 0x06665996, // c0x0112 (n0x1996-n0x1999) + + 0x066a5999, // c0x0113 (n0x1999-n0x19a9) + + 0x066a99a9, // c0x0114 (n0x19a9-n0x19aa) + + 0x066cd9aa, // c0x0115 (n0x19aa-n0x19b3) + + 0x067c19b3, // c0x0116 (n0x19b3-n0x19f0) + + 0x267c99f0, // c0x0117 (n0x19f0-n0x19f2) o + 0x267cd9f2, // c0x0118 (n0x19f2-n0x19f3) o + 0x267d19f3, // c0x0119 (n0x19f3-n0x19f4) o + 0x067d99f4, // c0x011a (n0x19f4-n0x19f6) + + 0x068b59f6, // c0x011b (n0x19f6-n0x1a2d) + + 0x068e1a2d, // c0x011c (n0x1a2d-n0x1a38) + + 0x06901a38, // c0x011d (n0x1a38-n0x1a40) + + 0x0690da40, // c0x011e (n0x1a40-n0x1a43) + + 0x0692da43, // c0x011f (n0x1a43-n0x1a4b) + + 0x06965a4b, // c0x0120 (n0x1a4b-n0x1a59) + + 0x06bf9a59, // c0x0121 (n0x1a59-n0x1afe) + + 0x06cb5afe, // c0x0122 (n0x1afe-n0x1b2d) + + 0x06cc9b2d, // c0x0123 (n0x1b2d-n0x1b32) + + 0x06cfdb32, // c0x0124 (n0x1b32-n0x1b3f) + + 0x06d29b3f, // c0x0125 (n0x1b3f-n0x1b4a) + + 0x06d45b4a, // c0x0126 (n0x1b4a-n0x1b51) + + 0x06d69b51, // c0x0127 (n0x1b51-n0x1b5a) + + 0x06d81b5a, // c0x0128 (n0x1b5a-n0x1b60) + + 0x06d9db60, // c0x0129 (n0x1b60-n0x1b67) + + 0x06dc1b67, // c0x012a (n0x1b67-n0x1b70) + + 0x06dd1b70, // c0x012b (n0x1b70-n0x1b74) + + 0x06e01b74, // c0x012c (n0x1b74-n0x1b80) + + 0x06e1db80, // c0x012d (n0x1b80-n0x1b87) + + 0x07029b87, // c0x012e (n0x1b87-n0x1c0a) + + 0x0704dc0a, // c0x012f (n0x1c0a-n0x1c13) + + 0x0706dc13, // c0x0130 (n0x1c13-n0x1c1b) + + 0x07081c1b, // c0x0131 (n0x1c1b-n0x1c20) + + 0x07095c20, // c0x0132 (n0x1c20-n0x1c25) + + 0x070b5c25, // c0x0133 (n0x1c25-n0x1c2d) + + 0x07159c2d, // c0x0134 (n0x1c2d-n0x1c56) + + 0x07175c56, // c0x0135 (n0x1c56-n0x1c5d) + + 0x07191c5d, // c0x0136 (n0x1c5d-n0x1c64) + + 0x07195c64, // c0x0137 (n0x1c64-n0x1c65) + + 0x07199c65, // c0x0138 (n0x1c65-n0x1c66) + + 0x071adc66, // c0x0139 (n0x1c66-n0x1c6b) + + 0x071cdc6b, // c0x013a (n0x1c6b-n0x1c73) + + 0x071d9c73, // c0x013b (n0x1c73-n0x1c76) + + 0x07209c76, // c0x013c (n0x1c76-n0x1c82) + + 0x07289c82, // c0x013d (n0x1c82-n0x1ca2) + + 0x0729dca2, // c0x013e (n0x1ca2-n0x1ca7) + + 0x072a1ca7, // c0x013f (n0x1ca7-n0x1ca8) + + 0x072b9ca8, // c0x0140 (n0x1ca8-n0x1cae) + + 0x072c5cae, // c0x0141 (n0x1cae-n0x1cb1) + + 0x072c9cb1, // c0x0142 (n0x1cb1-n0x1cb2) + + 0x072e5cb2, // c0x0143 (n0x1cb2-n0x1cb9) + + 0x07321cb9, // c0x0144 (n0x1cb9-n0x1cc8) + + 0x07325cc8, // c0x0145 (n0x1cc8-n0x1cc9) + + 0x07345cc9, // c0x0146 (n0x1cc9-n0x1cd1) + + 0x07395cd1, // c0x0147 (n0x1cd1-n0x1ce5) + + 0x073adce5, // c0x0148 (n0x1ce5-n0x1ceb) + + 0x07401ceb, // c0x0149 (n0x1ceb-n0x1d00) + + 0x07405d00, // c0x014a (n0x1d00-n0x1d01) + + 0x07409d01, // c0x014b (n0x1d01-n0x1d02) + + 0x0744dd02, // c0x014c (n0x1d02-n0x1d13) + + 0x0745dd13, // c0x014d (n0x1d13-n0x1d17) + + 0x07495d17, // c0x014e (n0x1d17-n0x1d25) + + 0x074c5d25, // c0x014f (n0x1d25-n0x1d31) + + 0x07601d31, // c0x0150 (n0x1d31-n0x1d80) + + 0x07625d80, // c0x0151 (n0x1d80-n0x1d89) + + 0x07651d89, // c0x0152 (n0x1d89-n0x1d94) + + 0x07655d94, // c0x0153 (n0x1d94-n0x1d95) + + 0x07659d95, // c0x0154 (n0x1d95-n0x1d96) + + 0x07755d96, // c0x0155 (n0x1d96-n0x1dd5) + + 0x07761dd5, // c0x0156 (n0x1dd5-n0x1dd8) + + 0x0776ddd8, // c0x0157 (n0x1dd8-n0x1ddb) + + 0x07779ddb, // c0x0158 (n0x1ddb-n0x1dde) + + 0x07785dde, // c0x0159 (n0x1dde-n0x1de1) + + 0x07791de1, // c0x015a (n0x1de1-n0x1de4) + + 0x0779dde4, // c0x015b (n0x1de4-n0x1de7) + + 0x077a9de7, // c0x015c (n0x1de7-n0x1dea) + + 0x077b5dea, // c0x015d (n0x1dea-n0x1ded) + + 0x077c1ded, // c0x015e (n0x1ded-n0x1df0) + + 0x077cddf0, // c0x015f (n0x1df0-n0x1df3) + + 0x077d9df3, // c0x0160 (n0x1df3-n0x1df6) + + 0x077e5df6, // c0x0161 (n0x1df6-n0x1df9) + + 0x077f1df9, // c0x0162 (n0x1df9-n0x1dfc) + + 0x077f9dfc, // c0x0163 (n0x1dfc-n0x1dfe) + + 0x07805dfe, // c0x0164 (n0x1dfe-n0x1e01) + + 0x07811e01, // c0x0165 (n0x1e01-n0x1e04) + + 0x0781de04, // c0x0166 (n0x1e04-n0x1e07) + + 0x07829e07, // c0x0167 (n0x1e07-n0x1e0a) + + 0x07835e0a, // c0x0168 (n0x1e0a-n0x1e0d) + + 0x07841e0d, // c0x0169 (n0x1e0d-n0x1e10) + + 0x0784de10, // c0x016a (n0x1e10-n0x1e13) + + 0x07859e13, // c0x016b (n0x1e13-n0x1e16) + + 0x07865e16, // c0x016c (n0x1e16-n0x1e19) + + 0x07871e19, // c0x016d (n0x1e19-n0x1e1c) + + 0x0787de1c, // c0x016e (n0x1e1c-n0x1e1f) + + 0x07889e1f, // c0x016f (n0x1e1f-n0x1e22) + + 0x07895e22, // c0x0170 (n0x1e22-n0x1e25) + + 0x078a1e25, // c0x0171 (n0x1e25-n0x1e28) + + 0x078ade28, // c0x0172 (n0x1e28-n0x1e2b) + + 0x078b9e2b, // c0x0173 (n0x1e2b-n0x1e2e) + + 0x078c5e2e, // c0x0174 (n0x1e2e-n0x1e31) + + 0x078cde31, // c0x0175 (n0x1e31-n0x1e33) + + 0x078d9e33, // c0x0176 (n0x1e33-n0x1e36) + + 0x078e5e36, // c0x0177 (n0x1e36-n0x1e39) + + 0x078f1e39, // c0x0178 (n0x1e39-n0x1e3c) + + 0x078fde3c, // c0x0179 (n0x1e3c-n0x1e3f) + + 0x07909e3f, // c0x017a (n0x1e3f-n0x1e42) + + 0x07915e42, // c0x017b (n0x1e42-n0x1e45) + + 0x07921e45, // c0x017c (n0x1e45-n0x1e48) + + 0x0792de48, // c0x017d (n0x1e48-n0x1e4b) + + 0x07939e4b, // c0x017e (n0x1e4b-n0x1e4e) + + 0x07945e4e, // c0x017f (n0x1e4e-n0x1e51) + + 0x07951e51, // c0x0180 (n0x1e51-n0x1e54) + + 0x0795de54, // c0x0181 (n0x1e54-n0x1e57) + + 0x07969e57, // c0x0182 (n0x1e57-n0x1e5a) + + 0x07971e5a, // c0x0183 (n0x1e5a-n0x1e5c) + + 0x0797de5c, // c0x0184 (n0x1e5c-n0x1e5f) + + 0x07989e5f, // c0x0185 (n0x1e5f-n0x1e62) + + 0x07995e62, // c0x0186 (n0x1e62-n0x1e65) + + 0x079a1e65, // c0x0187 (n0x1e65-n0x1e68) + + 0x079ade68, // c0x0188 (n0x1e68-n0x1e6b) + + 0x079b9e6b, // c0x0189 (n0x1e6b-n0x1e6e) + + 0x079c5e6e, // c0x018a (n0x1e6e-n0x1e71) + + 0x079d1e71, // c0x018b (n0x1e71-n0x1e74) + + 0x079d5e74, // c0x018c (n0x1e74-n0x1e75) + + 0x079e1e75, // c0x018d (n0x1e75-n0x1e78) + + 0x079f9e78, // c0x018e (n0x1e78-n0x1e7e) + + 0x079fde7e, // c0x018f (n0x1e7e-n0x1e7f) + + 0x07a0de7f, // c0x0190 (n0x1e7f-n0x1e83) + + 0x07a25e83, // c0x0191 (n0x1e83-n0x1e89) + + 0x07a69e89, // c0x0192 (n0x1e89-n0x1e9a) + + 0x07a7de9a, // c0x0193 (n0x1e9a-n0x1e9f) + + 0x07ab1e9f, // c0x0194 (n0x1e9f-n0x1eac) + + 0x07ac1eac, // c0x0195 (n0x1eac-n0x1eb0) + + 0x07addeb0, // c0x0196 (n0x1eb0-n0x1eb7) + + 0x07af5eb7, // c0x0197 (n0x1eb7-n0x1ebd) + + 0x27b39ebd, // c0x0198 (n0x1ebd-n0x1ece) o + 0x07b3dece, // c0x0199 (n0x1ece-n0x1ecf) + } -// max children 406 (capacity 511) -// max text offset 26999 (capacity 32767) +// max children 409 (capacity 511) +// max text offset 27059 (capacity 32767) // max text length 36 (capacity 63) -// max hi 7868 (capacity 16383) -// max lo 7867 (capacity 16383) +// max hi 7887 (capacity 16383) +// max lo 7886 (capacity 16383) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/publicsuffix/table_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/publicsuffix/table_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/publicsuffix/table_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/publicsuffix/table_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -5048,13 +5048,17 @@ "prof.pr", "ac.pr", "pro", + "aaa.pro", "aca.pro", + "acct.pro", + "avocat.pro", "bar.pro", "cpa.pro", + "eng.pro", "jur.pro", "law.pro", "med.pro", - "eng.pro", + "recht.pro", "ps", "edu.ps", "gov.ps", @@ -6340,7 +6344,6 @@ "dog", "doha", "domains", - "doosan", "dot", "download", "drive", @@ -6437,6 +6440,7 @@ "ftr", "fujitsu", "fujixerox", + "fun", "fund", "furniture", "futbol", @@ -6466,6 +6470,7 @@ "global", "globo", "gmail", + "gmbh", "gmo", "gmx", "godaddy", @@ -6995,6 +7000,7 @@ "stockholm", "storage", "store", + "stream", "studio", "study", "style", @@ -7265,14 +7271,15 @@ "zuerich", "cloudfront.net", "ap-northeast-1.compute.amazonaws.com", + "ap-northeast-2.compute.amazonaws.com", "ap-southeast-1.compute.amazonaws.com", "ap-southeast-2.compute.amazonaws.com", "cn-north-1.compute.amazonaws.cn", + "compute-1.amazonaws.com", "compute.amazonaws.cn", "compute.amazonaws.com", - "compute-1.amazonaws.com", - "eu-west-1.compute.amazonaws.com", "eu-central-1.compute.amazonaws.com", + "eu-west-1.compute.amazonaws.com", "sa-east-1.compute.amazonaws.com", "us-east-1.amazonaws.com", "us-gov-west-1.compute.amazonaws.com", @@ -7284,17 +7291,19 @@ "elb.amazonaws.com", "s3.amazonaws.com", "s3-ap-northeast-1.amazonaws.com", + "s3-ap-northeast-2.amazonaws.com", "s3-ap-southeast-1.amazonaws.com", "s3-ap-southeast-2.amazonaws.com", + "s3-eu-central-1.amazonaws.com", + "s3-eu-west-1.amazonaws.com", "s3-external-1.amazonaws.com", "s3-external-2.amazonaws.com", "s3-fips-us-gov-west-1.amazonaws.com", - "s3-eu-central-1.amazonaws.com", - "s3-eu-west-1.amazonaws.com", "s3-sa-east-1.amazonaws.com", "s3-us-gov-west-1.amazonaws.com", "s3-us-west-1.amazonaws.com", "s3-us-west-2.amazonaws.com", + "s3.ap-northeast-2.amazonaws.com", "s3.cn-north-1.amazonaws.com.cn", "s3.eu-central-1.amazonaws.com", "betainabox.com", @@ -7327,6 +7336,7 @@ "za.bz", "za.com", "africa.com", + "xenapponazure.com", "gr.com", "in.net", "us.org", @@ -7627,6 +7637,7 @@ "webhop.org", "worse-than.tv", "writesthisblog.com", + "dynv6.net", "eu.org", "al.eu.org", "asso.eu.org", @@ -7683,6 +7694,7 @@ "tr.eu.org", "uk.eu.org", "us.eu.org", + "apps.fbsbx.com", "a.ssl.fastly.net", "b.ssl.fastly.net", "global.ssl.fastly.net", @@ -7693,6 +7705,11 @@ "service.gov.uk", "github.io", "githubusercontent.com", + "githubcloud.com", + "*.api.githubcloud.com", + "*.ext.githubcloud.com", + "gist.githubcloud.com", + "*.githubcloudusercontent.com", "ro.com", "appspot.com", "blogspot.ae", @@ -8206,7 +8223,6 @@ "dog", "doha", "domains", - "doosan", "dot", "download", "drive", @@ -8318,6 +8334,7 @@ "ftr", "fujitsu", "fujixerox", + "fun", "fund", "furniture", "futbol", @@ -8357,6 +8374,7 @@ "globo", "gm", "gmail", + "gmbh", "gmo", "gmx", "gn", @@ -9020,6 +9038,7 @@ "stockholm", "storage", "store", + "stream", "studio", "study", "style", @@ -9938,6 +9957,7 @@ "est-mon-blogueur", "eu", "familyds", + "fbsbx", "firebaseapp", "flynnhub", "from-ak", @@ -9989,6 +10009,8 @@ "from-wy", "gb", "getmyip", + "githubcloud", + "githubcloudusercontent", "githubusercontent", "googleapis", "googlecode", @@ -10099,14 +10121,17 @@ "withgoogle", "withyoutube", "writesthisblog", + "xenapponazure", "yolasite", "za", + "ap-northeast-2", "compute", "compute-1", "elb", "eu-central-1", "s3", "s3-ap-northeast-1", + "s3-ap-northeast-2", "s3-ap-southeast-1", "s3-ap-southeast-2", "s3-eu-central-1", @@ -10119,7 +10144,9 @@ "s3-us-west-1", "s3-us-west-2", "us-east-1", + "s3", "ap-northeast-1", + "ap-northeast-2", "ap-southeast-1", "ap-southeast-2", "eu-central-1", @@ -10131,6 +10158,10 @@ "z-1", "z-2", "s3", + "apps", + "api", + "ext", + "gist", "xen", "ac", "co", @@ -13525,6 +13556,7 @@ "dsmynas", "dynalias", "dynathome", + "dynv6", "endofinternet", "familyds", "fastly", @@ -14804,13 +14836,17 @@ "org", "pro", "prof", + "aaa", "aca", + "acct", + "avocat", "bar", "cpa", "eng", "jur", "law", "med", + "recht", "com", "edu", "gov", diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/trace/trace.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/trace/trace.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/trace/trace.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/trace/trace.go 2016-05-24 07:05:22.000000000 +0000 @@ -95,11 +95,14 @@ // // The default AuthRequest function returns (true, true) iff the request comes from localhost/127.0.0.1/[::1]. var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. host, _, err := net.SplitHostPort(req.RemoteAddr) - switch { - case err != nil: // Badly formed address; fail closed. - return false, false - case host == "localhost" || host == "127.0.0.1" || host == "::1": + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": return true, true default: return false, false diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/trace/trace_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/trace/trace_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/trace/trace_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/trace/trace_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -5,6 +5,7 @@ package trace import ( + "net/http" "reflect" "testing" ) @@ -44,3 +45,27 @@ t.Errorf("reset didn't clear all fields: %+v", el) } } + +func TestAuthRequest(t *testing.T) { + testCases := []struct { + host string + want bool + }{ + {host: "192.168.23.1", want: false}, + {host: "192.168.23.1:8080", want: false}, + {host: "malformed remote addr", want: false}, + {host: "localhost", want: true}, + {host: "localhost:8080", want: true}, + {host: "127.0.0.1", want: true}, + {host: "127.0.0.1:8080", want: true}, + {host: "::1", want: true}, + {host: "[::1]:8080", want: true}, + } + for _, tt := range testCases { + req := &http.Request{RemoteAddr: tt.host} + any, sensitive := AuthRequest(req) + if any != tt.want || sensitive != tt.want { + t.Errorf("AuthRequest(%q) = %t, %t; want %t, %t", tt.host, any, sensitive, tt.want, tt.want) + } + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/file.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/file.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/file.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/file.go 2016-05-24 07:05:22.000000000 +0000 @@ -5,6 +5,7 @@ package webdav import ( + "encoding/xml" "io" "net/http" "os" @@ -13,8 +14,6 @@ "strings" "sync" "time" - - "golang.org/x/net/webdav/internal/xml" ) // slashClean is equivalent to but slightly more efficient than diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/file_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/file_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/file_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/file_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -5,6 +5,7 @@ package webdav import ( + "encoding/xml" "fmt" "io" "io/ioutil" @@ -17,8 +18,6 @@ "strconv" "strings" "testing" - - "golang.org/x/net/webdav/internal/xml" ) func TestSlashClean(t *testing.T) { diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/prop.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/prop.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/prop.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/prop.go 2016-05-24 07:05:22.000000000 +0000 @@ -5,6 +5,7 @@ package webdav import ( + "encoding/xml" "fmt" "io" "mime" @@ -12,8 +13,6 @@ "os" "path/filepath" "strconv" - - "golang.org/x/net/webdav/internal/xml" ) // Proppatch describes a property update instruction as defined in RFC 4918. diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/prop_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/prop_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/prop_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/prop_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -5,14 +5,13 @@ package webdav import ( + "encoding/xml" "fmt" "net/http" "os" "reflect" "sort" "testing" - - "golang.org/x/net/webdav/internal/xml" ) func TestMemPS(t *testing.T) { @@ -75,21 +74,21 @@ op: "propname", name: "/dir", wantPnames: []xml.Name{ - xml.Name{Space: "DAV:", Local: "resourcetype"}, - xml.Name{Space: "DAV:", Local: "displayname"}, - xml.Name{Space: "DAV:", Local: "supportedlock"}, + {Space: "DAV:", Local: "resourcetype"}, + {Space: "DAV:", Local: "displayname"}, + {Space: "DAV:", Local: "supportedlock"}, }, }, { op: "propname", name: "/file", wantPnames: []xml.Name{ - xml.Name{Space: "DAV:", Local: "resourcetype"}, - xml.Name{Space: "DAV:", Local: "displayname"}, - xml.Name{Space: "DAV:", Local: "getcontentlength"}, - xml.Name{Space: "DAV:", Local: "getlastmodified"}, - xml.Name{Space: "DAV:", Local: "getcontenttype"}, - xml.Name{Space: "DAV:", Local: "getetag"}, - xml.Name{Space: "DAV:", Local: "supportedlock"}, + {Space: "DAV:", Local: "resourcetype"}, + {Space: "DAV:", Local: "displayname"}, + {Space: "DAV:", Local: "getcontentlength"}, + {Space: "DAV:", Local: "getlastmodified"}, + {Space: "DAV:", Local: "getcontenttype"}, + {Space: "DAV:", Local: "getetag"}, + {Space: "DAV:", Local: "supportedlock"}, }, }}, }, { @@ -452,14 +451,14 @@ op: "propname", name: "/file", wantPnames: []xml.Name{ - xml.Name{Space: "DAV:", Local: "resourcetype"}, - xml.Name{Space: "DAV:", Local: "displayname"}, - xml.Name{Space: "DAV:", Local: "getcontentlength"}, - xml.Name{Space: "DAV:", Local: "getlastmodified"}, - xml.Name{Space: "DAV:", Local: "getcontenttype"}, - xml.Name{Space: "DAV:", Local: "getetag"}, - xml.Name{Space: "DAV:", Local: "supportedlock"}, - xml.Name{Space: "foo", Local: "bar"}, + {Space: "DAV:", Local: "resourcetype"}, + {Space: "DAV:", Local: "displayname"}, + {Space: "DAV:", Local: "getcontentlength"}, + {Space: "DAV:", Local: "getlastmodified"}, + {Space: "DAV:", Local: "getcontenttype"}, + {Space: "DAV:", Local: "getetag"}, + {Space: "DAV:", Local: "supportedlock"}, + {Space: "foo", Local: "bar"}, }, }}, }, { diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/webdav.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/webdav.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/webdav.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/webdav.go 2016-05-24 07:05:22.000000000 +0000 @@ -2,42 +2,21 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package webdav etc etc TODO. +// Package webdav provides a WebDAV server implementation. package webdav // import "golang.org/x/net/webdav" import ( "errors" "fmt" "io" - "log" "net/http" "net/url" "os" "path" - "runtime" "strings" "time" ) -// Package webdav's XML output requires the standard library's encoding/xml -// package version 1.5 or greater. Otherwise, it will produce malformed XML. -// -// As of May 2015, the Go stable release is version 1.4, so we print a message -// to let users know that this golang.org/x/etc package won't work yet. -// -// This package also won't work with Go 1.3 and earlier, but making this -// runtime version check catch all the earlier versions too, and not just -// "1.4.x", isn't worth the complexity. -// -// TODO: delete this check at some point after Go 1.5 is released. -var go1Dot4 = strings.HasPrefix(runtime.Version(), "go1.4.") - -func init() { - if go1Dot4 { - log.Println("package webdav requires Go version 1.5 or greater") - } -} - type Handler struct { // Prefix is the URL path prefix to strip from WebDAV resource paths. Prefix string diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/webdav_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/webdav_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/webdav_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/webdav_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -147,10 +147,10 @@ } sort.Strings(got) want := map[string][]string{ - "/": []string{"/", "/a", "/a/b", "/a/b/c", "/a/b/e", "/a/b/e/f"}, - "/a/": []string{"/", "/b", "/b/c", "/b/e", "/b/e/f"}, - "/a/b/": []string{"/", "/c", "/e", "/e/f"}, - "/a/b/c/": []string{"/"}, + "/": {"/", "/a", "/a/b", "/a/b/c", "/a/b/e", "/a/b/e/f"}, + "/a/": {"/", "/b", "/b/c", "/b/e", "/b/e/f"}, + "/a/b/": {"/", "/c", "/e", "/e/f"}, + "/a/b/c/": {"/"}, }[prefix] if !reflect.DeepEqual(got, want) { t.Errorf("prefix=%-9q find:\ngot %v\nwant %v", prefix, got, want) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/xml.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/xml.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/xml.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/xml.go 2016-05-24 07:05:22.000000000 +0000 @@ -9,17 +9,35 @@ import ( "bytes" + "encoding/xml" "fmt" "io" "net/http" "time" - "golang.org/x/net/webdav/internal/xml" + // As of https://go-review.googlesource.com/#/c/12772/ which was submitted + // in July 2015, this package uses an internal fork of the standard + // library's encoding/xml package, due to changes in the way namespaces + // were encoded. Such changes were introduced in the Go 1.5 cycle, but were + // rolled back in response to https://github.com/golang/go/issues/11841 + // + // However, this package's exported API, specifically the Property and + // DeadPropsHolder types, need to refer to the standard library's version + // of the xml.Name type, as code that imports this package cannot refer to + // the internal version. + // + // This file therefore imports both the internal and external versions, as + // ixml and xml, and converts between them. + // + // In the long term, this package should use the standard library's version + // only, and the internal fork deleted, once + // https://github.com/golang/go/issues/13400 is resolved. + ixml "golang.org/x/net/webdav/internal/xml" ) // http://www.webdav.org/specs/rfc4918.html#ELEMENT_lockinfo type lockInfo struct { - XMLName xml.Name `xml:"lockinfo"` + XMLName ixml.Name `xml:"lockinfo"` Exclusive *struct{} `xml:"lockscope>exclusive"` Shared *struct{} `xml:"lockscope>shared"` Write *struct{} `xml:"locktype>write"` @@ -33,7 +51,7 @@ func readLockInfo(r io.Reader) (li lockInfo, status int, err error) { c := &countingReader{r: r} - if err = xml.NewDecoder(c).Decode(&li); err != nil { + if err = ixml.NewDecoder(c).Decode(&li); err != nil { if err == io.EOF { if c.n == 0 { // An empty body means to refresh the lock. @@ -88,7 +106,7 @@ switch s[i] { case '"', '&', '\'', '<', '>': b := bytes.NewBuffer(nil) - xml.EscapeText(b, []byte(s)) + ixml.EscapeText(b, []byte(s)) return b.String() } } @@ -100,14 +118,14 @@ // and directives. // http://www.webdav.org/specs/rfc4918.html#property_values // http://www.webdav.org/specs/rfc4918.html#xml-extensibility -func next(d *xml.Decoder) (xml.Token, error) { +func next(d *ixml.Decoder) (ixml.Token, error) { for { t, err := d.Token() if err != nil { return t, err } switch t.(type) { - case xml.Comment, xml.Directive, xml.ProcInst: + case ixml.Comment, ixml.Directive, ixml.ProcInst: continue default: return t, nil @@ -122,35 +140,35 @@ // // It returns an error if start does not contain any properties or if // properties contain values. Character data between properties is ignored. -func (pn *propfindProps) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { +func (pn *propfindProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { for { t, err := next(d) if err != nil { return err } switch t.(type) { - case xml.EndElement: + case ixml.EndElement: if len(*pn) == 0 { return fmt.Errorf("%s must not be empty", start.Name.Local) } return nil - case xml.StartElement: - name := t.(xml.StartElement).Name + case ixml.StartElement: + name := t.(ixml.StartElement).Name t, err = next(d) if err != nil { return err } - if _, ok := t.(xml.EndElement); !ok { + if _, ok := t.(ixml.EndElement); !ok { return fmt.Errorf("unexpected token %T", t) } - *pn = append(*pn, name) + *pn = append(*pn, xml.Name(name)) } } } // http://www.webdav.org/specs/rfc4918.html#ELEMENT_propfind type propfind struct { - XMLName xml.Name `xml:"DAV: propfind"` + XMLName ixml.Name `xml:"DAV: propfind"` Allprop *struct{} `xml:"DAV: allprop"` Propname *struct{} `xml:"DAV: propname"` Prop propfindProps `xml:"DAV: prop"` @@ -159,7 +177,7 @@ func readPropfind(r io.Reader) (pf propfind, status int, err error) { c := countingReader{r: r} - if err = xml.NewDecoder(&c).Decode(&pf); err != nil { + if err = ixml.NewDecoder(&c).Decode(&pf); err != nil { if err == io.EOF { if c.n == 0 { // An empty body means to propfind allprop. @@ -206,11 +224,19 @@ InnerXML []byte `xml:",innerxml"` } +// ixmlProperty is the same as the Property type except it holds an ixml.Name +// instead of an xml.Name. +type ixmlProperty struct { + XMLName ixml.Name + Lang string `xml:"xml:lang,attr,omitempty"` + InnerXML []byte `xml:",innerxml"` +} + // http://www.webdav.org/specs/rfc4918.html#ELEMENT_error // See multistatusWriter for the "D:" namespace prefix. type xmlError struct { - XMLName xml.Name `xml:"D:error"` - InnerXML []byte `xml:",innerxml"` + XMLName ixml.Name `xml:"D:error"` + InnerXML []byte `xml:",innerxml"` } // http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat @@ -222,24 +248,48 @@ ResponseDescription string `xml:"D:responsedescription,omitempty"` } +// ixmlPropstat is the same as the propstat type except it holds an ixml.Name +// instead of an xml.Name. +type ixmlPropstat struct { + Prop []ixmlProperty `xml:"D:prop>_ignored_"` + Status string `xml:"D:status"` + Error *xmlError `xml:"D:error"` + ResponseDescription string `xml:"D:responsedescription,omitempty"` +} + // MarshalXML prepends the "D:" namespace prefix on properties in the DAV: namespace // before encoding. See multistatusWriter. -func (ps propstat) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (ps propstat) MarshalXML(e *ixml.Encoder, start ixml.StartElement) error { + // Convert from a propstat to an ixmlPropstat. + ixmlPs := ixmlPropstat{ + Prop: make([]ixmlProperty, len(ps.Prop)), + Status: ps.Status, + Error: ps.Error, + ResponseDescription: ps.ResponseDescription, + } for k, prop := range ps.Prop { + ixmlPs.Prop[k] = ixmlProperty{ + XMLName: ixml.Name(prop.XMLName), + Lang: prop.Lang, + InnerXML: prop.InnerXML, + } + } + + for k, prop := range ixmlPs.Prop { if prop.XMLName.Space == "DAV:" { - prop.XMLName = xml.Name{Space: "", Local: "D:" + prop.XMLName.Local} - ps.Prop[k] = prop + prop.XMLName = ixml.Name{Space: "", Local: "D:" + prop.XMLName.Local} + ixmlPs.Prop[k] = prop } } // Distinct type to avoid infinite recursion of MarshalXML. - type newpropstat propstat - return e.EncodeElement(newpropstat(ps), start) + type newpropstat ixmlPropstat + return e.EncodeElement(newpropstat(ixmlPs), start) } // http://www.webdav.org/specs/rfc4918.html#ELEMENT_response // See multistatusWriter for the "D:" namespace prefix. type response struct { - XMLName xml.Name `xml:"D:response"` + XMLName ixml.Name `xml:"D:response"` Href []string `xml:"D:href"` Propstat []propstat `xml:"D:propstat"` Status string `xml:"D:status,omitempty"` @@ -264,7 +314,7 @@ responseDescription string w http.ResponseWriter - enc *xml.Encoder + enc *ixml.Encoder } // Write validates and emits a DAV response as part of a multistatus response @@ -308,14 +358,14 @@ if err != nil { return err } - w.enc = xml.NewEncoder(w.w) - return w.enc.EncodeToken(xml.StartElement{ - Name: xml.Name{ + w.enc = ixml.NewEncoder(w.w) + return w.enc.EncodeToken(ixml.StartElement{ + Name: ixml.Name{ Space: "DAV:", Local: "multistatus", }, - Attr: []xml.Attr{{ - Name: xml.Name{Space: "xmlns", Local: "D"}, + Attr: []ixml.Attr{{ + Name: ixml.Name{Space: "xmlns", Local: "D"}, Value: "DAV:", }}, }) @@ -329,17 +379,17 @@ if w.enc == nil { return nil } - var end []xml.Token + var end []ixml.Token if w.responseDescription != "" { - name := xml.Name{Space: "DAV:", Local: "responsedescription"} + name := ixml.Name{Space: "DAV:", Local: "responsedescription"} end = append(end, - xml.StartElement{Name: name}, - xml.CharData(w.responseDescription), - xml.EndElement{Name: name}, + ixml.StartElement{Name: name}, + ixml.CharData(w.responseDescription), + ixml.EndElement{Name: name}, ) } - end = append(end, xml.EndElement{ - Name: xml.Name{Space: "DAV:", Local: "multistatus"}, + end = append(end, ixml.EndElement{ + Name: ixml.Name{Space: "DAV:", Local: "multistatus"}, }) for _, t := range end { err := w.enc.EncodeToken(t) @@ -350,12 +400,9 @@ return w.enc.Flush() } -// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for proppatch) -type proppatchProps []Property - -var xmlLangName = xml.Name{Space: "http://www.w3.org/XML/1998/namespace", Local: "lang"} +var xmlLangName = ixml.Name{Space: "http://www.w3.org/XML/1998/namespace", Local: "lang"} -func xmlLang(s xml.StartElement, d string) string { +func xmlLang(s ixml.StartElement, d string) string { for _, attr := range s.Attr { if attr.Name == xmlLangName { return attr.Value @@ -366,19 +413,19 @@ type xmlValue []byte -func (v *xmlValue) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { +func (v *xmlValue) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { // The XML value of a property can be arbitrary, mixed-content XML. // To make sure that the unmarshalled value contains all required // namespaces, we encode all the property value XML tokens into a // buffer. This forces the encoder to redeclare any used namespaces. var b bytes.Buffer - e := xml.NewEncoder(&b) + e := ixml.NewEncoder(&b) for { t, err := next(d) if err != nil { return err } - if e, ok := t.(xml.EndElement); ok && e.Name == start.Name { + if e, ok := t.(ixml.EndElement); ok && e.Name == start.Name { break } if err = e.EncodeToken(t); err != nil { @@ -393,6 +440,9 @@ return nil } +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for proppatch) +type proppatchProps []Property + // UnmarshalXML appends the property names and values enclosed within start // to ps. // @@ -401,7 +451,7 @@ // // UnmarshalXML returns an error if start does not contain any properties or if // property values contain syntactically incorrect XML. -func (ps *proppatchProps) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { +func (ps *proppatchProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { lang := xmlLang(start, "") for { t, err := next(d) @@ -409,15 +459,15 @@ return err } switch elem := t.(type) { - case xml.EndElement: + case ixml.EndElement: if len(*ps) == 0 { return fmt.Errorf("%s must not be empty", start.Name.Local) } return nil - case xml.StartElement: + case ixml.StartElement: p := Property{ - XMLName: t.(xml.StartElement).Name, - Lang: xmlLang(t.(xml.StartElement), lang), + XMLName: xml.Name(t.(ixml.StartElement).Name), + Lang: xmlLang(t.(ixml.StartElement), lang), } err = d.DecodeElement(((*xmlValue)(&p.InnerXML)), &elem) if err != nil { @@ -431,29 +481,29 @@ // http://www.webdav.org/specs/rfc4918.html#ELEMENT_set // http://www.webdav.org/specs/rfc4918.html#ELEMENT_remove type setRemove struct { - XMLName xml.Name + XMLName ixml.Name Lang string `xml:"xml:lang,attr,omitempty"` Prop proppatchProps `xml:"DAV: prop"` } // http://www.webdav.org/specs/rfc4918.html#ELEMENT_propertyupdate type propertyupdate struct { - XMLName xml.Name `xml:"DAV: propertyupdate"` + XMLName ixml.Name `xml:"DAV: propertyupdate"` Lang string `xml:"xml:lang,attr,omitempty"` SetRemove []setRemove `xml:",any"` } func readProppatch(r io.Reader) (patches []Proppatch, status int, err error) { var pu propertyupdate - if err = xml.NewDecoder(r).Decode(&pu); err != nil { + if err = ixml.NewDecoder(r).Decode(&pu); err != nil { return nil, http.StatusBadRequest, err } for _, op := range pu.SetRemove { remove := false switch op.XMLName { - case xml.Name{Space: "DAV:", Local: "set"}: + case ixml.Name{Space: "DAV:", Local: "set"}: // No-op. - case xml.Name{Space: "DAV:", Local: "remove"}: + case ixml.Name{Space: "DAV:", Local: "remove"}: for _, p := range op.Prop { if len(p.InnerXML) > 0 { return nil, http.StatusBadRequest, errInvalidProppatch diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/xml_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/xml_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/xml_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/webdav/xml_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -6,6 +6,7 @@ import ( "bytes" + "encoding/xml" "fmt" "io" "net/http" @@ -15,7 +16,7 @@ "strings" "testing" - "golang.org/x/net/webdav/internal/xml" + ixml "golang.org/x/net/webdav/internal/xml" ) func TestReadLockInfo(t *testing.T) { @@ -86,7 +87,7 @@ " gopher\n" + "", lockInfo{ - XMLName: xml.Name{Space: "DAV:", Local: "lockinfo"}, + XMLName: ixml.Name{Space: "DAV:", Local: "lockinfo"}, Exclusive: new(struct{}), Write: new(struct{}), Owner: owner{ @@ -105,7 +106,7 @@ " \n" + "", lockInfo{ - XMLName: xml.Name{Space: "DAV:", Local: "lockinfo"}, + XMLName: ixml.Name{Space: "DAV:", Local: "lockinfo"}, Exclusive: new(struct{}), Write: new(struct{}), Owner: owner{ @@ -147,7 +148,7 @@ " \n" + "", wantPF: propfind{ - XMLName: xml.Name{Space: "DAV:", Local: "propfind"}, + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, Propname: new(struct{}), }, }, { @@ -163,7 +164,7 @@ " \n" + "", wantPF: propfind{ - XMLName: xml.Name{Space: "DAV:", Local: "propfind"}, + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, Allprop: new(struct{}), }, }, { @@ -174,7 +175,7 @@ " \n" + "", wantPF: propfind{ - XMLName: xml.Name{Space: "DAV:", Local: "propfind"}, + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, Allprop: new(struct{}), Include: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, }, @@ -186,7 +187,7 @@ " \n" + "", wantPF: propfind{ - XMLName: xml.Name{Space: "DAV:", Local: "propfind"}, + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, Allprop: new(struct{}), Include: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, }, @@ -197,7 +198,7 @@ " \n" + "", wantPF: propfind{ - XMLName: xml.Name{Space: "DAV:", Local: "propfind"}, + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, }, }, { @@ -210,7 +211,7 @@ " \n" + "", wantPF: propfind{ - XMLName: xml.Name{Space: "DAV:", Local: "propfind"}, + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, }, }, { @@ -220,7 +221,7 @@ " \n" + "", wantPF: propfind{ - XMLName: xml.Name{Space: "DAV:", Local: "propfind"}, + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, }, }, { @@ -230,7 +231,7 @@ " foobar\n" + "", wantPF: propfind{ - XMLName: xml.Name{Space: "DAV:", Local: "propfind"}, + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, }, }, { @@ -241,7 +242,7 @@ " *boss*\n" + "", wantPF: propfind{ - XMLName: xml.Name{Space: "DAV:", Local: "propfind"}, + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, Propname: new(struct{}), }, }, { @@ -348,10 +349,6 @@ } func TestMultistatusWriter(t *testing.T) { - if go1Dot4 { - t.Skip("TestMultistatusWriter requires Go version 1.5 or greater") - } - ///The "section x.y.z" test cases come from section x.y.z of the spec at // http://www.webdav.org/specs/rfc4918.html testCases := []struct { @@ -802,7 +799,7 @@ var n xmlNormalizer for _, tc := range testCases { - d := xml.NewDecoder(strings.NewReader(tc.input)) + d := ixml.NewDecoder(strings.NewReader(tc.input)) var v xmlValue if err := d.Decode(&v); err != nil { t.Errorf("%s: got error %v, want nil", tc.desc, err) @@ -840,8 +837,8 @@ // * Remove comments, if instructed to do so. // func (n *xmlNormalizer) normalize(w io.Writer, r io.Reader) error { - d := xml.NewDecoder(r) - e := xml.NewEncoder(w) + d := ixml.NewDecoder(r) + e := ixml.NewEncoder(w) for { t, err := d.Token() if err != nil { @@ -851,18 +848,18 @@ return err } switch val := t.(type) { - case xml.Directive, xml.ProcInst: + case ixml.Directive, ixml.ProcInst: continue - case xml.Comment: + case ixml.Comment: if n.omitComments { continue } - case xml.CharData: + case ixml.CharData: if n.omitWhitespace && len(bytes.TrimSpace(val)) == 0 { continue } - case xml.StartElement: - start, _ := xml.CopyToken(val).(xml.StartElement) + case ixml.StartElement: + start, _ := ixml.CopyToken(val).(ixml.StartElement) attr := start.Attr[:0] for _, a := range start.Attr { if a.Name.Space == "xmlns" || a.Name.Local == "xmlns" { @@ -897,7 +894,7 @@ return normA == normB, nil } -type byName []xml.Attr +type byName []ixml.Attr func (a byName) Len() int { return len(a) } func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/websocket/websocket.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/websocket/websocket.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/websocket/websocket.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/golang.org/x/net/websocket/websocket.go 2016-05-24 07:05:22.000000000 +0000 @@ -144,6 +144,8 @@ } // Conn represents a WebSocket connection. +// +// Multiple goroutines may invoke methods on a Conn simultaneously. type Conn struct { config *Config request *http.Request @@ -207,9 +209,6 @@ } n, err = w.Write(msg) w.Close() - if err != nil { - return n, err - } return n, err } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/go-playground/validator.v8/baked_in.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/go-playground/validator.v8/baked_in.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/go-playground/validator.v8/baked_in.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/go-playground/validator.v8/baked_in.go 2016-05-24 07:05:22.000000000 +0000 @@ -745,7 +745,20 @@ switch fieldKind { case reflect.String: - _, err := url.ParseRequestURI(field.String()) + + s := field.String() + + // checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195 + // emulate browser and strip the '#' suffix prior to validation. see issue-#237 + if i := strings.Index(s, "#"); i > -1 { + s = s[:i] + } + + if s == blank { + return false + } + + _, err := url.ParseRequestURI(s) return err == nil } @@ -760,13 +773,23 @@ switch fieldKind { case reflect.String: - url, err := url.ParseRequestURI(field.String()) - if err != nil { + var i int + s := field.String() + + // checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195 + // emulate browser and strip the '#' suffix prior to validation. see issue-#237 + if i = strings.Index(s, "#"); i > -1 { + s = s[:i] + } + + if s == blank { return false } - if url.Scheme == blank { + url, err := url.ParseRequestURI(s) + + if err != nil || url.Scheme == blank { return false } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/go-playground/validator.v8/README.md aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/go-playground/validator.v8/README.md --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/go-playground/validator.v8/README.md 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/go-playground/validator.v8/README.md 2016-05-24 07:05:22.000000000 +0000 @@ -2,10 +2,12 @@ ================ [![Join the chat at https://gitter.im/bluesuncorp/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +![Project status](https://img.shields.io/badge/version-8.17.1-green.svg) [![Build Status](https://semaphoreci.com/api/v1/projects/ec20115f-ef1b-4c7d-9393-cc76aba74eb4/530054/badge.svg)](https://semaphoreci.com/joeybloggs/validator) [![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=v8&service=github)](https://coveralls.io/github/go-playground/validator?branch=v8) [![Go Report Card](http://goreportcard.com/badge/go-playground/validator)](http://goreportcard.com/report/go-playground/validator) [![GoDoc](https://godoc.org/gopkg.in/go-playground/validator.v8?status.svg)](https://godoc.org/gopkg.in/go-playground/validator.v8) +![License](https://img.shields.io/dub/l/vibe-d.svg) Package validator implements value validations for structs and individual fields based on tags. @@ -306,38 +308,38 @@ Benchmarks ------ -###### Run on MacBook Pro (Retina, 15-inch, Late 2013) 2.6 GHz Intel Core i7 16 GB 1600 MHz DDR3 using Go version go1.5.2 darwin/amd64 +###### Run on MacBook Pro (Retina, 15-inch, Late 2013) 2.6 GHz Intel Core i7 16 GB 1600 MHz DDR3 using Go version go1.5.3 darwin/amd64 ```go go test -cpu=4 -bench=. -benchmem=true PASS -BenchmarkFieldSuccess-4 10000000 176 ns/op 0 B/op 0 allocs/op -BenchmarkFieldFailure-4 2000000 727 ns/op 432 B/op 4 allocs/op -BenchmarkFieldDiveSuccess-4 500000 3220 ns/op 480 B/op 27 allocs/op -BenchmarkFieldDiveFailure-4 500000 3823 ns/op 912 B/op 31 allocs/op -BenchmarkFieldCustomTypeSuccess-4 5000000 368 ns/op 32 B/op 2 allocs/op -BenchmarkFieldCustomTypeFailure-4 2000000 699 ns/op 432 B/op 4 allocs/op -BenchmarkFieldOrTagSuccess-4 1000000 1265 ns/op 16 B/op 1 allocs/op -BenchmarkFieldOrTagFailure-4 1000000 1182 ns/op 464 B/op 6 allocs/op -BenchmarkStructLevelValidationSuccess-4 2000000 739 ns/op 176 B/op 6 allocs/op -BenchmarkStructLevelValidationFailure-4 1000000 1368 ns/op 640 B/op 11 allocs/op -BenchmarkStructSimpleCustomTypeSuccess-4 2000000 965 ns/op 80 B/op 5 allocs/op -BenchmarkStructSimpleCustomTypeFailure-4 1000000 1561 ns/op 688 B/op 11 allocs/op -BenchmarkStructPartialSuccess-4 1000000 1285 ns/op 384 B/op 10 allocs/op -BenchmarkStructPartialFailure-4 1000000 1879 ns/op 832 B/op 15 allocs/op -BenchmarkStructExceptSuccess-4 2000000 1038 ns/op 336 B/op 7 allocs/op -BenchmarkStructExceptFailure-4 1000000 1330 ns/op 384 B/op 10 allocs/op -BenchmarkStructSimpleCrossFieldSuccess-4 1000000 1081 ns/op 128 B/op 6 allocs/op -BenchmarkStructSimpleCrossFieldFailure-4 1000000 1737 ns/op 592 B/op 11 allocs/op -BenchmarkStructSimpleCrossStructCrossFieldSuccess-4 1000000 1790 ns/op 192 B/op 10 allocs/op -BenchmarkStructSimpleCrossStructCrossFieldFailure-4 500000 2431 ns/op 656 B/op 15 allocs/op -BenchmarkStructSimpleSuccess-4 2000000 950 ns/op 48 B/op 3 allocs/op -BenchmarkStructSimpleFailure-4 1000000 1672 ns/op 688 B/op 11 allocs/op -BenchmarkStructSimpleSuccessParallel-4 5000000 271 ns/op 48 B/op 3 allocs/op -BenchmarkStructSimpleFailureParallel-4 2000000 670 ns/op 688 B/op 11 allocs/op -BenchmarkStructComplexSuccess-4 300000 5828 ns/op 544 B/op 32 allocs/op -BenchmarkStructComplexFailure-4 200000 11382 ns/op 3912 B/op 77 allocs/op -BenchmarkStructComplexSuccessParallel-4 1000000 1739 ns/op 544 B/op 32 allocs/op -BenchmarkStructComplexFailureParallel-4 300000 4682 ns/op 3912 B/op 77 allocs/op +BenchmarkFieldSuccess-4 10000000 167 ns/op 0 B/op 0 allocs/op +BenchmarkFieldFailure-4 2000000 701 ns/op 432 B/op 4 allocs/op +BenchmarkFieldDiveSuccess-4 500000 2937 ns/op 480 B/op 27 allocs/op +BenchmarkFieldDiveFailure-4 500000 3536 ns/op 912 B/op 31 allocs/op +BenchmarkFieldCustomTypeSuccess-4 5000000 341 ns/op 32 B/op 2 allocs/op +BenchmarkFieldCustomTypeFailure-4 2000000 679 ns/op 432 B/op 4 allocs/op +BenchmarkFieldOrTagSuccess-4 1000000 1157 ns/op 16 B/op 1 allocs/op +BenchmarkFieldOrTagFailure-4 1000000 1109 ns/op 464 B/op 6 allocs/op +BenchmarkStructLevelValidationSuccess-4 2000000 694 ns/op 176 B/op 6 allocs/op +BenchmarkStructLevelValidationFailure-4 1000000 1311 ns/op 640 B/op 11 allocs/op +BenchmarkStructSimpleCustomTypeSuccess-4 2000000 894 ns/op 80 B/op 5 allocs/op +BenchmarkStructSimpleCustomTypeFailure-4 1000000 1496 ns/op 688 B/op 11 allocs/op +BenchmarkStructPartialSuccess-4 1000000 1229 ns/op 384 B/op 10 allocs/op +BenchmarkStructPartialFailure-4 1000000 1838 ns/op 832 B/op 15 allocs/op +BenchmarkStructExceptSuccess-4 2000000 961 ns/op 336 B/op 7 allocs/op +BenchmarkStructExceptFailure-4 1000000 1218 ns/op 384 B/op 10 allocs/op +BenchmarkStructSimpleCrossFieldSuccess-4 2000000 954 ns/op 128 B/op 6 allocs/op +BenchmarkStructSimpleCrossFieldFailure-4 1000000 1569 ns/op 592 B/op 11 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldSuccess-4 1000000 1588 ns/op 192 B/op 10 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldFailure-4 1000000 2217 ns/op 656 B/op 15 allocs/op +BenchmarkStructSimpleSuccess-4 2000000 925 ns/op 48 B/op 3 allocs/op +BenchmarkStructSimpleFailure-4 1000000 1650 ns/op 688 B/op 11 allocs/op +BenchmarkStructSimpleSuccessParallel-4 5000000 261 ns/op 48 B/op 3 allocs/op +BenchmarkStructSimpleFailureParallel-4 2000000 758 ns/op 688 B/op 11 allocs/op +BenchmarkStructComplexSuccess-4 300000 5868 ns/op 544 B/op 32 allocs/op +BenchmarkStructComplexFailure-4 200000 10767 ns/op 3912 B/op 77 allocs/op +BenchmarkStructComplexSuccessParallel-4 1000000 1559 ns/op 544 B/op 32 allocs/op +BenchmarkStructComplexFailureParallel-4 500000 3747 ns/op 3912 B/op 77 allocs ``` Complimentary Software diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/go-playground/validator.v8/validator.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/go-playground/validator.v8/validator.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/go-playground/validator.v8/validator.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/go-playground/validator.v8/validator.go 2016-05-24 07:05:22.000000000 +0000 @@ -512,7 +512,7 @@ fld = typ.Field(i) - if fld.PkgPath != blank { + if fld.PkgPath != blank && !fld.Anonymous { continue } diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/go-playground/validator.v8/validator_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/go-playground/validator.v8/validator_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/go-playground/validator.v8/validator_test.go 2016-02-10 14:34:39.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/go-playground/validator.v8/validator_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -4977,6 +4977,7 @@ {"rtmp://foobar.com", true}, {"http://www.foo_bar.com/", true}, {"http://localhost:3000/", true}, + {"http://foobar.com/#baz", true}, {"http://foobar.com#baz=qux", true}, {"http://foobar.com/t$-_.+!*\\'(),", true}, {"http://www.foobar.com/~foobar", true}, @@ -5280,6 +5281,26 @@ errs := validate.Field(s, "email") Equal(t, errs, nil) + s = "Dörte@Sörensen.example.com" + errs = validate.Field(s, "email") + Equal(t, errs, nil) + + s = "θσερ@εχαμπλε.ψομ" + errs = validate.Field(s, "email") + Equal(t, errs, nil) + + s = "юзер@екзампл.ком" + errs = validate.Field(s, "email") + Equal(t, errs, nil) + + s = "उपयोगकर्ता@उदाहरण.कॉम" + errs = validate.Field(s, "email") + Equal(t, errs, nil) + + s = "用户@例子.广告" + errs = validate.Field(s, "email") + Equal(t, errs, nil) + s = "" errs = validate.Field(s, "email") NotEqual(t, errs, nil) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/apic.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/apic.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/apic.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/apic.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,742 @@ +package yaml + +import ( + "io" + "os" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// File read handler. +func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_file.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_file_read_handler + parser.input_file = file +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } + return true +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// File write handler. +func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_file.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_file_write_handler + emitter.output_file = file +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } + return true +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } + return true +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } + return true +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } + return true +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } + return true +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compliler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/decode.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/decode.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/decode.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/decode.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,683 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + + if len(b) == 0 { + b = []byte{'\n'} + } + + yaml_parser_set_input_string(&p.parser, b) + + p.skip() + if p.event.typ != yaml_STREAM_START_EVENT { + panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return &p +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +func (p *parser) skip() { + if p.event.typ != yaml_NO_EVENT { + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + yaml_event_delete(&p.event) + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + switch p.event.typ { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) + } + panic("unreachable") +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.skip() + n.children = append(n.children, p.parse()) + if p.event.typ != yaml_DOCUMENT_END_EVENT { + panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + p.skip() + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.skip() + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.skip() + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.skip() + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[string]bool + mapType reflect.Type + terrors []string +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() +) + +func newDecoder() *decoder { + d := &decoder{mapType: defaultMapType} + d.aliases = make(map[string]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + an, ok := d.doc.anchors[n.value] + if !ok { + failf("unknown anchor '%s' referenced", n.value) + } + if d.aliases[n.value] { + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n.value] = true + good = d.unmarshal(an, out) + delete(d.aliases, n.value) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if s, ok := resolved.(string); ok && out.CanAddr() { + if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok { + err := u.UnmarshalText([]byte(s)) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + good = true + } else if resolved != nil { + out.SetString(n.value) + good = true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else { + out.Set(reflect.ValueOf(resolved)) + } + good = true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + good = true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + good = true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + good = true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + good = true + case int64: + out.SetFloat(float64(resolved)) + good = true + case uint64: + out.SetFloat(float64(resolved)) + good = true + case float64: + out.SetFloat(resolved) + good = true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + good = true + } + } + if !good { + d.terror(n, tag, out) + } + return good +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + out.Set(out.Slice(0, j)) + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + out.SetMapIndex(k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + inlineMap.SetMapIndex(name, value) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + an, ok := d.doc.anchors[n.value] + if ok && an.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + an, ok := d.doc.anchors[ni.value] + if ok && an.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/decode_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/decode_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/decode_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/decode_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,988 @@ +package yaml_test + +import ( + "errors" + . "gopkg.in/check.v1" + "gopkg.in/yaml.v2" + "math" + "net" + "reflect" + "strings" + "time" +) + +var unmarshalIntTest = 123 + +var unmarshalTests = []struct { + data string + value interface{} +}{ + { + "", + &struct{}{}, + }, { + "{}", &struct{}{}, + }, { + "v: hi", + map[string]string{"v": "hi"}, + }, { + "v: hi", map[string]interface{}{"v": "hi"}, + }, { + "v: true", + map[string]string{"v": "true"}, + }, { + "v: true", + map[string]interface{}{"v": true}, + }, { + "v: 10", + map[string]interface{}{"v": 10}, + }, { + "v: 0b10", + map[string]interface{}{"v": 2}, + }, { + "v: 0xA", + map[string]interface{}{"v": 10}, + }, { + "v: 4294967296", + map[string]int64{"v": 4294967296}, + }, { + "v: 0.1", + map[string]interface{}{"v": 0.1}, + }, { + "v: .1", + map[string]interface{}{"v": 0.1}, + }, { + "v: .Inf", + map[string]interface{}{"v": math.Inf(+1)}, + }, { + "v: -.Inf", + map[string]interface{}{"v": math.Inf(-1)}, + }, { + "v: -10", + map[string]interface{}{"v": -10}, + }, { + "v: -.1", + map[string]interface{}{"v": -0.1}, + }, + + // Simple values. + { + "123", + &unmarshalIntTest, + }, + + // Floats from spec + { + "canonical: 6.8523e+5", + map[string]interface{}{"canonical": 6.8523e+5}, + }, { + "expo: 685.230_15e+03", + map[string]interface{}{"expo": 685.23015e+03}, + }, { + "fixed: 685_230.15", + map[string]interface{}{"fixed": 685230.15}, + }, { + "neginf: -.inf", + map[string]interface{}{"neginf": math.Inf(-1)}, + }, { + "fixed: 685_230.15", + map[string]float64{"fixed": 685230.15}, + }, + //{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported + //{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails. + + // Bools from spec + { + "canonical: y", + map[string]interface{}{"canonical": true}, + }, { + "answer: NO", + map[string]interface{}{"answer": false}, + }, { + "logical: True", + map[string]interface{}{"logical": true}, + }, { + "option: on", + map[string]interface{}{"option": true}, + }, { + "option: on", + map[string]bool{"option": true}, + }, + // Ints from spec + { + "canonical: 685230", + map[string]interface{}{"canonical": 685230}, + }, { + "decimal: +685_230", + map[string]interface{}{"decimal": 685230}, + }, { + "octal: 02472256", + map[string]interface{}{"octal": 685230}, + }, { + "hexa: 0x_0A_74_AE", + map[string]interface{}{"hexa": 685230}, + }, { + "bin: 0b1010_0111_0100_1010_1110", + map[string]interface{}{"bin": 685230}, + }, { + "bin: -0b101010", + map[string]interface{}{"bin": -42}, + }, { + "decimal: +685_230", + map[string]int{"decimal": 685230}, + }, + + //{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported + + // Nulls from spec + { + "empty:", + map[string]interface{}{"empty": nil}, + }, { + "canonical: ~", + map[string]interface{}{"canonical": nil}, + }, { + "english: null", + map[string]interface{}{"english": nil}, + }, { + "~: null key", + map[interface{}]string{nil: "null key"}, + }, { + "empty:", + map[string]*bool{"empty": nil}, + }, + + // Flow sequence + { + "seq: [A,B]", + map[string]interface{}{"seq": []interface{}{"A", "B"}}, + }, { + "seq: [A,B,C,]", + map[string][]string{"seq": []string{"A", "B", "C"}}, + }, { + "seq: [A,1,C]", + map[string][]string{"seq": []string{"A", "1", "C"}}, + }, { + "seq: [A,1,C]", + map[string][]int{"seq": []int{1}}, + }, { + "seq: [A,1,C]", + map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, + }, + // Block sequence + { + "seq:\n - A\n - B", + map[string]interface{}{"seq": []interface{}{"A", "B"}}, + }, { + "seq:\n - A\n - B\n - C", + map[string][]string{"seq": []string{"A", "B", "C"}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string][]string{"seq": []string{"A", "1", "C"}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string][]int{"seq": []int{1}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, + }, + + // Literal block scalar + { + "scalar: | # Comment\n\n literal\n\n \ttext\n\n", + map[string]string{"scalar": "\nliteral\n\n\ttext\n"}, + }, + + // Folded block scalar + { + "scalar: > # Comment\n\n folded\n line\n \n next\n line\n * one\n * two\n\n last\n line\n\n", + map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"}, + }, + + // Map inside interface with no type hints. + { + "a: {b: c}", + map[interface{}]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, + }, + + // Structs and type conversions. + { + "hello: world", + &struct{ Hello string }{"world"}, + }, { + "a: {b: c}", + &struct{ A struct{ B string } }{struct{ B string }{"c"}}, + }, { + "a: {b: c}", + &struct{ A *struct{ B string } }{&struct{ B string }{"c"}}, + }, { + "a: {b: c}", + &struct{ A map[string]string }{map[string]string{"b": "c"}}, + }, { + "a: {b: c}", + &struct{ A *map[string]string }{&map[string]string{"b": "c"}}, + }, { + "a:", + &struct{ A map[string]string }{}, + }, { + "a: 1", + &struct{ A int }{1}, + }, { + "a: 1", + &struct{ A float64 }{1}, + }, { + "a: 1.0", + &struct{ A int }{1}, + }, { + "a: 1.0", + &struct{ A uint }{1}, + }, { + "a: [1, 2]", + &struct{ A []int }{[]int{1, 2}}, + }, { + "a: 1", + &struct{ B int }{0}, + }, { + "a: 1", + &struct { + B int "a" + }{1}, + }, { + "a: y", + &struct{ A bool }{true}, + }, + + // Some cross type conversions + { + "v: 42", + map[string]uint{"v": 42}, + }, { + "v: -42", + map[string]uint{}, + }, { + "v: 4294967296", + map[string]uint64{"v": 4294967296}, + }, { + "v: -4294967296", + map[string]uint64{}, + }, + + // int + { + "int_max: 2147483647", + map[string]int{"int_max": math.MaxInt32}, + }, + { + "int_min: -2147483648", + map[string]int{"int_min": math.MinInt32}, + }, + { + "int_overflow: 9223372036854775808", // math.MaxInt64 + 1 + map[string]int{}, + }, + + // int64 + { + "int64_max: 9223372036854775807", + map[string]int64{"int64_max": math.MaxInt64}, + }, + { + "int64_max_base2: 0b111111111111111111111111111111111111111111111111111111111111111", + map[string]int64{"int64_max_base2": math.MaxInt64}, + }, + { + "int64_min: -9223372036854775808", + map[string]int64{"int64_min": math.MinInt64}, + }, + { + "int64_neg_base2: -0b111111111111111111111111111111111111111111111111111111111111111", + map[string]int64{"int64_neg_base2": -math.MaxInt64}, + }, + { + "int64_overflow: 9223372036854775808", // math.MaxInt64 + 1 + map[string]int64{}, + }, + + // uint + { + "uint_min: 0", + map[string]uint{"uint_min": 0}, + }, + { + "uint_max: 4294967295", + map[string]uint{"uint_max": math.MaxUint32}, + }, + { + "uint_underflow: -1", + map[string]uint{}, + }, + + // uint64 + { + "uint64_min: 0", + map[string]uint{"uint64_min": 0}, + }, + { + "uint64_max: 18446744073709551615", + map[string]uint64{"uint64_max": math.MaxUint64}, + }, + { + "uint64_max_base2: 0b1111111111111111111111111111111111111111111111111111111111111111", + map[string]uint64{"uint64_max_base2": math.MaxUint64}, + }, + { + "uint64_maxint64: 9223372036854775807", + map[string]uint64{"uint64_maxint64": math.MaxInt64}, + }, + { + "uint64_underflow: -1", + map[string]uint64{}, + }, + + // float32 + { + "float32_max: 3.40282346638528859811704183484516925440e+38", + map[string]float32{"float32_max": math.MaxFloat32}, + }, + { + "float32_nonzero: 1.401298464324817070923729583289916131280e-45", + map[string]float32{"float32_nonzero": math.SmallestNonzeroFloat32}, + }, + { + "float32_maxuint64: 18446744073709551615", + map[string]float32{"float32_maxuint64": float32(math.MaxUint64)}, + }, + { + "float32_maxuint64+1: 18446744073709551616", + map[string]float32{"float32_maxuint64+1": float32(math.MaxUint64 + 1)}, + }, + + // float64 + { + "float64_max: 1.797693134862315708145274237317043567981e+308", + map[string]float64{"float64_max": math.MaxFloat64}, + }, + { + "float64_nonzero: 4.940656458412465441765687928682213723651e-324", + map[string]float64{"float64_nonzero": math.SmallestNonzeroFloat64}, + }, + { + "float64_maxuint64: 18446744073709551615", + map[string]float64{"float64_maxuint64": float64(math.MaxUint64)}, + }, + { + "float64_maxuint64+1: 18446744073709551616", + map[string]float64{"float64_maxuint64+1": float64(math.MaxUint64 + 1)}, + }, + + // Overflow cases. + { + "v: 4294967297", + map[string]int32{}, + }, { + "v: 128", + map[string]int8{}, + }, + + // Quoted values. + { + "'1': '\"2\"'", + map[interface{}]interface{}{"1": "\"2\""}, + }, { + "v:\n- A\n- 'B\n\n C'\n", + map[string][]string{"v": []string{"A", "B\nC"}}, + }, + + // Explicit tags. + { + "v: !!float '1.1'", + map[string]interface{}{"v": 1.1}, + }, { + "v: !!null ''", + map[string]interface{}{"v": nil}, + }, { + "%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'", + map[string]interface{}{"v": 1}, + }, + + // Anchors and aliases. + { + "a: &x 1\nb: &y 2\nc: *x\nd: *y\n", + &struct{ A, B, C, D int }{1, 2, 1, 2}, + }, { + "a: &a {c: 1}\nb: *a", + &struct { + A, B struct { + C int + } + }{struct{ C int }{1}, struct{ C int }{1}}, + }, { + "a: &a [1, 2]\nb: *a", + &struct{ B []int }{[]int{1, 2}}, + }, { + "b: *a\na: &a {c: 1}", + &struct { + A, B struct { + C int + } + }{struct{ C int }{1}, struct{ C int }{1}}, + }, + + // Bug #1133337 + { + "foo: ''", + map[string]*string{"foo": new(string)}, + }, { + "foo: null", + map[string]string{"foo": ""}, + }, { + "foo: null", + map[string]interface{}{"foo": nil}, + }, + + // Ignored field + { + "a: 1\nb: 2\n", + &struct { + A int + B int "-" + }{1, 0}, + }, + + // Bug #1191981 + { + "" + + "%YAML 1.1\n" + + "--- !!str\n" + + `"Generic line break (no glyph)\n\` + "\n" + + ` Generic line break (glyphed)\n\` + "\n" + + ` Line separator\u2028\` + "\n" + + ` Paragraph separator\u2029"` + "\n", + "" + + "Generic line break (no glyph)\n" + + "Generic line break (glyphed)\n" + + "Line separator\u2028Paragraph separator\u2029", + }, + + // Struct inlining + { + "a: 1\nb: 2\nc: 3\n", + &struct { + A int + C inlineB `yaml:",inline"` + }{1, inlineB{2, inlineC{3}}}, + }, + + // Map inlining + { + "a: 1\nb: 2\nc: 3\n", + &struct { + A int + C map[string]int `yaml:",inline"` + }{1, map[string]int{"b": 2, "c": 3}}, + }, + + // bug 1243827 + { + "a: -b_c", + map[string]interface{}{"a": "-b_c"}, + }, + { + "a: +b_c", + map[string]interface{}{"a": "+b_c"}, + }, + { + "a: 50cent_of_dollar", + map[string]interface{}{"a": "50cent_of_dollar"}, + }, + + // Duration + { + "a: 3s", + map[string]time.Duration{"a": 3 * time.Second}, + }, + + // Issue #24. + { + "a: ", + map[string]string{"a": ""}, + }, + + // Base 60 floats are obsolete and unsupported. + { + "a: 1:1\n", + map[string]string{"a": "1:1"}, + }, + + // Binary data. + { + "a: !!binary gIGC\n", + map[string]string{"a": "\x80\x81\x82"}, + }, { + "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", + map[string]string{"a": strings.Repeat("\x90", 54)}, + }, { + "a: !!binary |\n " + strings.Repeat("A", 70) + "\n ==\n", + map[string]string{"a": strings.Repeat("\x00", 52)}, + }, + + // Ordered maps. + { + "{b: 2, a: 1, d: 4, c: 3, sub: {e: 5}}", + &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}}, + }, + + // Issue #39. + { + "a:\n b:\n c: d\n", + map[string]struct{ B interface{} }{"a": {map[interface{}]interface{}{"c": "d"}}}, + }, + + // Custom map type. + { + "a: {b: c}", + M{"a": M{"b": "c"}}, + }, + + // Support encoding.TextUnmarshaler. + { + "a: 1.2.3.4\n", + map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)}, + }, + { + "a: 2015-02-24T18:19:39Z\n", + map[string]time.Time{"a": time.Unix(1424801979, 0)}, + }, + + // Encode empty lists as zero-length slices. + { + "a: []", + &struct{ A []int }{[]int{}}, + }, + + // UTF-16-LE + { + "\xff\xfe\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00\n\x00", + M{"ñoño": "very yes"}, + }, + // UTF-16-LE with surrogate. + { + "\xff\xfe\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00 \x00=\xd8\xd4\xdf\n\x00", + M{"ñoño": "very yes 🟔"}, + }, + + // UTF-16-BE + { + "\xfe\xff\x00\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00\n", + M{"ñoño": "very yes"}, + }, + // UTF-16-BE with surrogate. + { + "\xfe\xff\x00\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00 \xd8=\xdf\xd4\x00\n", + M{"ñoño": "very yes 🟔"}, + }, +} + +type M map[interface{}]interface{} + +type inlineB struct { + B int + inlineC `yaml:",inline"` +} + +type inlineC struct { + C int +} + +func (s *S) TestUnmarshal(c *C) { + for _, item := range unmarshalTests { + t := reflect.ValueOf(item.value).Type() + var value interface{} + switch t.Kind() { + case reflect.Map: + value = reflect.MakeMap(t).Interface() + case reflect.String: + value = reflect.New(t).Interface() + case reflect.Ptr: + value = reflect.New(t.Elem()).Interface() + default: + c.Fatalf("missing case for %s", t) + } + err := yaml.Unmarshal([]byte(item.data), value) + if _, ok := err.(*yaml.TypeError); !ok { + c.Assert(err, IsNil) + } + if t.Kind() == reflect.String { + c.Assert(*value.(*string), Equals, item.value) + } else { + c.Assert(value, DeepEquals, item.value) + } + } +} + +func (s *S) TestUnmarshalNaN(c *C) { + value := map[string]interface{}{} + err := yaml.Unmarshal([]byte("notanum: .NaN"), &value) + c.Assert(err, IsNil) + c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true) +} + +var unmarshalErrorTests = []struct { + data, error string +}{ + {"v: !!float 'error'", "yaml: cannot decode !!str `error` as a !!float"}, + {"v: [A,", "yaml: line 1: did not find expected node content"}, + {"v:\n- [A,", "yaml: line 2: did not find expected node content"}, + {"a: *b\n", "yaml: unknown anchor 'b' referenced"}, + {"a: &a\n b: *a\n", "yaml: anchor 'a' value contains itself"}, + {"value: -", "yaml: block sequence entries are not allowed in this context"}, + {"a: !!binary ==", "yaml: !!binary value contains invalid base64 data"}, + {"{[.]}", `yaml: invalid map key: \[\]interface \{\}\{"\."\}`}, + {"{{.}}", `yaml: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`}, +} + +func (s *S) TestUnmarshalErrors(c *C) { + for _, item := range unmarshalErrorTests { + var value interface{} + err := yaml.Unmarshal([]byte(item.data), &value) + c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value)) + } +} + +var unmarshalerTests = []struct { + data, tag string + value interface{} +}{ + {"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}}, + {"_: [1,A]", "!!seq", []interface{}{1, "A"}}, + {"_: 10", "!!int", 10}, + {"_: null", "!!null", nil}, + {`_: BAR!`, "!!str", "BAR!"}, + {`_: "BAR!"`, "!!str", "BAR!"}, + {"_: !!foo 'BAR!'", "!!foo", "BAR!"}, +} + +var unmarshalerResult = map[int]error{} + +type unmarshalerType struct { + value interface{} +} + +func (o *unmarshalerType) UnmarshalYAML(unmarshal func(v interface{}) error) error { + if err := unmarshal(&o.value); err != nil { + return err + } + if i, ok := o.value.(int); ok { + if result, ok := unmarshalerResult[i]; ok { + return result + } + } + return nil +} + +type unmarshalerPointer struct { + Field *unmarshalerType "_" +} + +type unmarshalerValue struct { + Field unmarshalerType "_" +} + +func (s *S) TestUnmarshalerPointerField(c *C) { + for _, item := range unmarshalerTests { + obj := &unmarshalerPointer{} + err := yaml.Unmarshal([]byte(item.data), obj) + c.Assert(err, IsNil) + if item.value == nil { + c.Assert(obj.Field, IsNil) + } else { + c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) + c.Assert(obj.Field.value, DeepEquals, item.value) + } + } +} + +func (s *S) TestUnmarshalerValueField(c *C) { + for _, item := range unmarshalerTests { + obj := &unmarshalerValue{} + err := yaml.Unmarshal([]byte(item.data), obj) + c.Assert(err, IsNil) + c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) + c.Assert(obj.Field.value, DeepEquals, item.value) + } +} + +func (s *S) TestUnmarshalerWholeDocument(c *C) { + obj := &unmarshalerType{} + err := yaml.Unmarshal([]byte(unmarshalerTests[0].data), obj) + c.Assert(err, IsNil) + value, ok := obj.value.(map[interface{}]interface{}) + c.Assert(ok, Equals, true, Commentf("value: %#v", obj.value)) + c.Assert(value["_"], DeepEquals, unmarshalerTests[0].value) +} + +func (s *S) TestUnmarshalerTypeError(c *C) { + unmarshalerResult[2] = &yaml.TypeError{[]string{"foo"}} + unmarshalerResult[4] = &yaml.TypeError{[]string{"bar"}} + defer func() { + delete(unmarshalerResult, 2) + delete(unmarshalerResult, 4) + }() + + type T struct { + Before int + After int + M map[string]*unmarshalerType + } + var v T + data := `{before: A, m: {abc: 1, def: 2, ghi: 3, jkl: 4}, after: B}` + err := yaml.Unmarshal([]byte(data), &v) + c.Assert(err, ErrorMatches, ""+ + "yaml: unmarshal errors:\n"+ + " line 1: cannot unmarshal !!str `A` into int\n"+ + " foo\n"+ + " bar\n"+ + " line 1: cannot unmarshal !!str `B` into int") + c.Assert(v.M["abc"], NotNil) + c.Assert(v.M["def"], IsNil) + c.Assert(v.M["ghi"], NotNil) + c.Assert(v.M["jkl"], IsNil) + + c.Assert(v.M["abc"].value, Equals, 1) + c.Assert(v.M["ghi"].value, Equals, 3) +} + +type proxyTypeError struct{} + +func (v *proxyTypeError) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + var a int32 + var b int64 + if err := unmarshal(&s); err != nil { + panic(err) + } + if s == "a" { + if err := unmarshal(&b); err == nil { + panic("should have failed") + } + return unmarshal(&a) + } + if err := unmarshal(&a); err == nil { + panic("should have failed") + } + return unmarshal(&b) +} + +func (s *S) TestUnmarshalerTypeErrorProxying(c *C) { + type T struct { + Before int + After int + M map[string]*proxyTypeError + } + var v T + data := `{before: A, m: {abc: a, def: b}, after: B}` + err := yaml.Unmarshal([]byte(data), &v) + c.Assert(err, ErrorMatches, ""+ + "yaml: unmarshal errors:\n"+ + " line 1: cannot unmarshal !!str `A` into int\n"+ + " line 1: cannot unmarshal !!str `a` into int32\n"+ + " line 1: cannot unmarshal !!str `b` into int64\n"+ + " line 1: cannot unmarshal !!str `B` into int") +} + +type failingUnmarshaler struct{} + +var failingErr = errors.New("failingErr") + +func (ft *failingUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error { + return failingErr +} + +func (s *S) TestUnmarshalerError(c *C) { + err := yaml.Unmarshal([]byte("a: b"), &failingUnmarshaler{}) + c.Assert(err, Equals, failingErr) +} + +type sliceUnmarshaler []int + +func (su *sliceUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error { + var slice []int + err := unmarshal(&slice) + if err == nil { + *su = slice + return nil + } + + var intVal int + err = unmarshal(&intVal) + if err == nil { + *su = []int{intVal} + return nil + } + + return err +} + +func (s *S) TestUnmarshalerRetry(c *C) { + var su sliceUnmarshaler + err := yaml.Unmarshal([]byte("[1, 2, 3]"), &su) + c.Assert(err, IsNil) + c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1, 2, 3})) + + err = yaml.Unmarshal([]byte("1"), &su) + c.Assert(err, IsNil) + c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1})) +} + +// From http://yaml.org/type/merge.html +var mergeTests = ` +anchors: + list: + - &CENTER { "x": 1, "y": 2 } + - &LEFT { "x": 0, "y": 2 } + - &BIG { "r": 10 } + - &SMALL { "r": 1 } + +# All the following maps are equal: + +plain: + # Explicit keys + "x": 1 + "y": 2 + "r": 10 + label: center/big + +mergeOne: + # Merge one map + << : *CENTER + "r": 10 + label: center/big + +mergeMultiple: + # Merge multiple maps + << : [ *CENTER, *BIG ] + label: center/big + +override: + # Override + << : [ *BIG, *LEFT, *SMALL ] + "x": 1 + label: center/big + +shortTag: + # Explicit short merge tag + !!merge "<<" : [ *CENTER, *BIG ] + label: center/big + +longTag: + # Explicit merge long tag + ! "<<" : [ *CENTER, *BIG ] + label: center/big + +inlineMap: + # Inlined map + << : {"x": 1, "y": 2, "r": 10} + label: center/big + +inlineSequenceMap: + # Inlined map in sequence + << : [ *CENTER, {"r": 10} ] + label: center/big +` + +func (s *S) TestMerge(c *C) { + var want = map[interface{}]interface{}{ + "x": 1, + "y": 2, + "r": 10, + "label": "center/big", + } + + var m map[interface{}]interface{} + err := yaml.Unmarshal([]byte(mergeTests), &m) + c.Assert(err, IsNil) + for name, test := range m { + if name == "anchors" { + continue + } + c.Assert(test, DeepEquals, want, Commentf("test %q failed", name)) + } +} + +func (s *S) TestMergeStruct(c *C) { + type Data struct { + X, Y, R int + Label string + } + want := Data{1, 2, 10, "center/big"} + + var m map[string]Data + err := yaml.Unmarshal([]byte(mergeTests), &m) + c.Assert(err, IsNil) + for name, test := range m { + if name == "anchors" { + continue + } + c.Assert(test, Equals, want, Commentf("test %q failed", name)) + } +} + +var unmarshalNullTests = []func() interface{}{ + func() interface{} { var v interface{}; v = "v"; return &v }, + func() interface{} { var s = "s"; return &s }, + func() interface{} { var s = "s"; sptr := &s; return &sptr }, + func() interface{} { var i = 1; return &i }, + func() interface{} { var i = 1; iptr := &i; return &iptr }, + func() interface{} { m := map[string]int{"s": 1}; return &m }, + func() interface{} { m := map[string]int{"s": 1}; return m }, +} + +func (s *S) TestUnmarshalNull(c *C) { + for _, test := range unmarshalNullTests { + item := test() + zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface() + err := yaml.Unmarshal([]byte("null"), item) + c.Assert(err, IsNil) + if reflect.TypeOf(item).Kind() == reflect.Map { + c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface()) + } else { + c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero) + } + } +} + +func (s *S) TestUnmarshalSliceOnPreset(c *C) { + // Issue #48. + v := struct{ A []int }{[]int{1}} + yaml.Unmarshal([]byte("a: [2]"), &v) + c.Assert(v.A, DeepEquals, []int{2}) +} + +//var data []byte +//func init() { +// var err error +// data, err = ioutil.ReadFile("/tmp/file.yaml") +// if err != nil { +// panic(err) +// } +//} +// +//func (s *S) BenchmarkUnmarshal(c *C) { +// var err error +// for i := 0; i < c.N; i++ { +// var v map[string]interface{} +// err = yaml.Unmarshal(data, &v) +// } +// if err != nil { +// panic(err) +// } +//} +// +//func (s *S) BenchmarkMarshal(c *C) { +// var v map[string]interface{} +// yaml.Unmarshal(data, &v) +// c.ResetTimer() +// for i := 0; i < c.N; i++ { +// yaml.Marshal(&v) +// } +//} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/emitterc.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/emitterc.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/emitterc.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/emitterc.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") + } + return false +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an achor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceeded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceeded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceeded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceeded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/encode.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/encode.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/encode.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/encode.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,306 @@ +package yaml + +import ( + "encoding" + "fmt" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool +} + +func newEncoder() (e *encoder) { + e = &encoder{} + e.must(yaml_emitter_initialize(&e.emitter)) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) + e.emit() + e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) + e.emit() + return e +} + +func (e *encoder) finish() { + e.must(yaml_document_end_event_initialize(&e.event, true)) + e.emit() + e.emitter.open_ended = false + e.must(yaml_stream_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { + e.must(false) + } +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() { + e.nilv() + return + } + iface := in.Interface() + if m, ok := iface.(Marshaler); ok { + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + } else if m, ok := iface.(encoding.TextMarshaler); ok { + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + } + switch in.Kind() { + case reflect.Interface: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + f() + e.must(yaml_mapping_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + rtag, rs := resolve("", s) + if rtag == yaml_BINARY_TAG { + if tag == "" || tag == yaml_STR_TAG { + tag = rtag + s = rs.(string) + } else if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } else { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + } + if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else if strings.Contains(s, "\n") { + style = yaml_LITERAL_SCALAR_STYLE + } else { + style = yaml_PLAIN_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // FIXME: Handle 64 bits here. + s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/encode_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/encode_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/encode_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/encode_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,501 @@ +package yaml_test + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" + + . "gopkg.in/check.v1" + "gopkg.in/yaml.v2" + "net" + "os" +) + +var marshalIntTest = 123 + +var marshalTests = []struct { + value interface{} + data string +}{ + { + nil, + "null\n", + }, { + &struct{}{}, + "{}\n", + }, { + map[string]string{"v": "hi"}, + "v: hi\n", + }, { + map[string]interface{}{"v": "hi"}, + "v: hi\n", + }, { + map[string]string{"v": "true"}, + "v: \"true\"\n", + }, { + map[string]string{"v": "false"}, + "v: \"false\"\n", + }, { + map[string]interface{}{"v": true}, + "v: true\n", + }, { + map[string]interface{}{"v": false}, + "v: false\n", + }, { + map[string]interface{}{"v": 10}, + "v: 10\n", + }, { + map[string]interface{}{"v": -10}, + "v: -10\n", + }, { + map[string]uint{"v": 42}, + "v: 42\n", + }, { + map[string]interface{}{"v": int64(4294967296)}, + "v: 4294967296\n", + }, { + map[string]int64{"v": int64(4294967296)}, + "v: 4294967296\n", + }, { + map[string]uint64{"v": 4294967296}, + "v: 4294967296\n", + }, { + map[string]interface{}{"v": "10"}, + "v: \"10\"\n", + }, { + map[string]interface{}{"v": 0.1}, + "v: 0.1\n", + }, { + map[string]interface{}{"v": float64(0.1)}, + "v: 0.1\n", + }, { + map[string]interface{}{"v": -0.1}, + "v: -0.1\n", + }, { + map[string]interface{}{"v": math.Inf(+1)}, + "v: .inf\n", + }, { + map[string]interface{}{"v": math.Inf(-1)}, + "v: -.inf\n", + }, { + map[string]interface{}{"v": math.NaN()}, + "v: .nan\n", + }, { + map[string]interface{}{"v": nil}, + "v: null\n", + }, { + map[string]interface{}{"v": ""}, + "v: \"\"\n", + }, { + map[string][]string{"v": []string{"A", "B"}}, + "v:\n- A\n- B\n", + }, { + map[string][]string{"v": []string{"A", "B\nC"}}, + "v:\n- A\n- |-\n B\n C\n", + }, { + map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}}, + "v:\n- A\n- 1\n- B:\n - 2\n - 3\n", + }, { + map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, + "a:\n b: c\n", + }, { + map[string]interface{}{"a": "-"}, + "a: '-'\n", + }, + + // Simple values. + { + &marshalIntTest, + "123\n", + }, + + // Structures + { + &struct{ Hello string }{"world"}, + "hello: world\n", + }, { + &struct { + A struct { + B string + } + }{struct{ B string }{"c"}}, + "a:\n b: c\n", + }, { + &struct { + A *struct { + B string + } + }{&struct{ B string }{"c"}}, + "a:\n b: c\n", + }, { + &struct { + A *struct { + B string + } + }{}, + "a: null\n", + }, { + &struct{ A int }{1}, + "a: 1\n", + }, { + &struct{ A []int }{[]int{1, 2}}, + "a:\n- 1\n- 2\n", + }, { + &struct { + B int "a" + }{1}, + "a: 1\n", + }, { + &struct{ A bool }{true}, + "a: true\n", + }, + + // Conditional flag + { + &struct { + A int "a,omitempty" + B int "b,omitempty" + }{1, 0}, + "a: 1\n", + }, { + &struct { + A int "a,omitempty" + B int "b,omitempty" + }{0, 0}, + "{}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{&struct{ X, y int }{1, 2}}, + "a: {x: 1}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{nil}, + "{}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{&struct{ X, y int }{}}, + "a: {x: 0}\n", + }, { + &struct { + A struct{ X, y int } "a,omitempty,flow" + }{struct{ X, y int }{1, 2}}, + "a: {x: 1}\n", + }, { + &struct { + A struct{ X, y int } "a,omitempty,flow" + }{struct{ X, y int }{0, 1}}, + "{}\n", + }, { + &struct { + A float64 "a,omitempty" + B float64 "b,omitempty" + }{1, 0}, + "a: 1\n", + }, + + // Flow flag + { + &struct { + A []int "a,flow" + }{[]int{1, 2}}, + "a: [1, 2]\n", + }, { + &struct { + A map[string]string "a,flow" + }{map[string]string{"b": "c", "d": "e"}}, + "a: {b: c, d: e}\n", + }, { + &struct { + A struct { + B, D string + } "a,flow" + }{struct{ B, D string }{"c", "e"}}, + "a: {b: c, d: e}\n", + }, + + // Unexported field + { + &struct { + u int + A int + }{0, 1}, + "a: 1\n", + }, + + // Ignored field + { + &struct { + A int + B int "-" + }{1, 2}, + "a: 1\n", + }, + + // Struct inlining + { + &struct { + A int + C inlineB `yaml:",inline"` + }{1, inlineB{2, inlineC{3}}}, + "a: 1\nb: 2\nc: 3\n", + }, + + // Map inlining + { + &struct { + A int + C map[string]int `yaml:",inline"` + }{1, map[string]int{"b": 2, "c": 3}}, + "a: 1\nb: 2\nc: 3\n", + }, + + // Duration + { + map[string]time.Duration{"a": 3 * time.Second}, + "a: 3s\n", + }, + + // Issue #24: bug in map merging logic. + { + map[string]string{"a": ""}, + "a: \n", + }, + + // Issue #34: marshal unsupported base 60 floats quoted for compatibility + // with old YAML 1.1 parsers. + { + map[string]string{"a": "1:1"}, + "a: \"1:1\"\n", + }, + + // Binary data. + { + map[string]string{"a": "\x00"}, + "a: \"\\0\"\n", + }, { + map[string]string{"a": "\x80\x81\x82"}, + "a: !!binary gIGC\n", + }, { + map[string]string{"a": strings.Repeat("\x90", 54)}, + "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", + }, + + // Ordered maps. + { + &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}}, + "b: 2\na: 1\nd: 4\nc: 3\nsub:\n e: 5\n", + }, + + // Encode unicode as utf-8 rather than in escaped form. + { + map[string]string{"a": "你好"}, + "a: 你好\n", + }, + + // Support encoding.TextMarshaler. + { + map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)}, + "a: 1.2.3.4\n", + }, + { + map[string]time.Time{"a": time.Unix(1424801979, 0)}, + "a: 2015-02-24T18:19:39Z\n", + }, + + // Ensure strings containing ": " are quoted (reported as PR #43, but not reproducible). + { + map[string]string{"a": "b: c"}, + "a: 'b: c'\n", + }, + + // Containing hash mark ('#') in string should be quoted + { + map[string]string{"a": "Hello #comment"}, + "a: 'Hello #comment'\n", + }, + { + map[string]string{"a": "你好 #comment"}, + "a: '你好 #comment'\n", + }, +} + +func (s *S) TestMarshal(c *C) { + defer os.Setenv("TZ", os.Getenv("TZ")) + os.Setenv("TZ", "UTC") + for _, item := range marshalTests { + data, err := yaml.Marshal(item.value) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, item.data) + } +} + +var marshalErrorTests = []struct { + value interface{} + error string + panic string +}{{ + value: &struct { + B int + inlineB ",inline" + }{1, inlineB{2, inlineC{3}}}, + panic: `Duplicated key 'b' in struct struct \{ B int; .*`, +}, { + value: &struct { + A int + B map[string]int ",inline" + }{1, map[string]int{"a": 2}}, + panic: `Can't have key "a" in inlined map; conflicts with struct field`, +}} + +func (s *S) TestMarshalErrors(c *C) { + for _, item := range marshalErrorTests { + if item.panic != "" { + c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic) + } else { + _, err := yaml.Marshal(item.value) + c.Assert(err, ErrorMatches, item.error) + } + } +} + +func (s *S) TestMarshalTypeCache(c *C) { + var data []byte + var err error + func() { + type T struct{ A int } + data, err = yaml.Marshal(&T{}) + c.Assert(err, IsNil) + }() + func() { + type T struct{ B int } + data, err = yaml.Marshal(&T{}) + c.Assert(err, IsNil) + }() + c.Assert(string(data), Equals, "b: 0\n") +} + +var marshalerTests = []struct { + data string + value interface{} +}{ + {"_:\n hi: there\n", map[interface{}]interface{}{"hi": "there"}}, + {"_:\n- 1\n- A\n", []interface{}{1, "A"}}, + {"_: 10\n", 10}, + {"_: null\n", nil}, + {"_: BAR!\n", "BAR!"}, +} + +type marshalerType struct { + value interface{} +} + +func (o marshalerType) MarshalText() ([]byte, error) { + panic("MarshalText called on type with MarshalYAML") +} + +func (o marshalerType) MarshalYAML() (interface{}, error) { + return o.value, nil +} + +type marshalerValue struct { + Field marshalerType "_" +} + +func (s *S) TestMarshaler(c *C) { + for _, item := range marshalerTests { + obj := &marshalerValue{} + obj.Field.value = item.value + data, err := yaml.Marshal(obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, string(item.data)) + } +} + +func (s *S) TestMarshalerWholeDocument(c *C) { + obj := &marshalerType{} + obj.value = map[string]string{"hello": "world!"} + data, err := yaml.Marshal(obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, "hello: world!\n") +} + +type failingMarshaler struct{} + +func (ft *failingMarshaler) MarshalYAML() (interface{}, error) { + return nil, failingErr +} + +func (s *S) TestMarshalerError(c *C) { + _, err := yaml.Marshal(&failingMarshaler{}) + c.Assert(err, Equals, failingErr) +} + +func (s *S) TestSortedOutput(c *C) { + order := []interface{}{ + false, + true, + 1, + uint(1), + 1.0, + 1.1, + 1.2, + 2, + uint(2), + 2.0, + 2.1, + "", + ".1", + ".2", + ".a", + "1", + "2", + "a!10", + "a/2", + "a/10", + "a~10", + "ab/1", + "b/1", + "b/01", + "b/2", + "b/02", + "b/3", + "b/03", + "b1", + "b01", + "b3", + "c2.10", + "c10.2", + "d1", + "d12", + "d12a", + } + m := make(map[interface{}]int) + for _, k := range order { + m[k] = 1 + } + data, err := yaml.Marshal(m) + c.Assert(err, IsNil) + out := "\n" + string(data) + last := 0 + for i, k := range order { + repr := fmt.Sprint(k) + if s, ok := k.(string); ok { + if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil { + repr = `"` + repr + `"` + } + } + index := strings.Index(out, "\n"+repr+":") + if index == -1 { + c.Fatalf("%#v is not in the output: %#v", k, out) + } + if index < last { + c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out) + } + last = index + } +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/LICENSE aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/LICENSE --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/LICENSE 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,188 @@ + +Copyright (c) 2011-2014 - Canonical Inc. + +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/LICENSE.libyaml aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/LICENSE.libyaml --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/LICENSE.libyaml 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/LICENSE.libyaml 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/parserc.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/parserc.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/parserc.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/parserc.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,1096 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } + return false +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/readerc.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/readerc.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/readerc.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/readerc.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,394 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/README.md aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/README.md --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/README.md 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/README.md 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,131 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/resolve.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/resolve.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/resolve.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/resolve.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,203 @@ +package yaml + +import ( + "encoding/base64" + "math" + "strconv" + "strings" + "unicode/utf8" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: + return true + } + return false +} + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt(plain[3:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, -int(intv) + } else { + return yaml_INT_TAG, -intv + } + } + } + // XXX Handle timestamps here. + + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + if tag == yaml_BINARY_TAG { + return yaml_BINARY_TAG, in + } + if utf8.ValidString(in) { + return yaml_STR_TAG, in + } + return yaml_BINARY_TAG, encodeBase64(in) +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/scannerc.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/scannerc.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/scannerc.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/scannerc.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,2710 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // Check if we really need to fetch more tokens. + need_more_tokens := false + + if parser.tokens_head == len(parser.tokens) { + // Queue is empty. + need_more_tokens = true + } else { + // Check if any potential simple key may occupy the head position. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + if simple_key.possible && simple_key.token_number == parser.tokens_parsed { + need_more_tokens = true + break + } + } + } + + // We are finished. + if !need_more_tokens { + break + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Remove obsolete potential simple keys. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +// Check the list of potential simple keys and remove the positions that +// cannot contain simple keys anymore. +func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { + // Check for a potential simple key for each flow level. + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + + // The specification requires that a simple key + // + // - is limited to a single line, + // - is shorter than 1024 characters. + if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { + + // Check if the potential simple key to be removed is required. + if simple_key.required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + } + } + return true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // A simple key is required only when it is the first token in the current + // line. Therefore it is always allowed. But we add a check anyway. + if required && !parser.simple_key_allowed { + panic("should not happen") + } + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + } + simple_key.mark = parser.mark + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + return true +} + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // Increase the flow level. + parser.flow_level++ + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + } + return true +} + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if simple_key.possible { + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && !(s[0] == '!' && s[1] == 0) { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the tag is non-empty. + if len(s) == 0 { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". + if parser.flow_level > 0 && + parser.buffer[parser.buffer_pos] == ':' && + !is_blankz(parser.buffer, parser.buffer_pos+1) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found unexpected ':'") + return false + } + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab character that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violate indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/sorter.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/sorter.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/sorter.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/sorter.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,104 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/suite_test.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/suite_test.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/suite_test.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/suite_test.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,12 @@ +package yaml_test + +import ( + . "gopkg.in/check.v1" + "testing" +) + +func Test(t *testing.T) { TestingT(t) } + +type S struct{} + +var _ = Suite(&S{}) diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/.travis.yml aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/.travis.yml --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/.travis.yml 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,9 @@ +language: go + +go: + - 1.4 + - 1.5 + - 1.6 + - tip + +go_import_path: gopkg.in/yaml.v2 diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/writerc.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/writerc.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/writerc.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/writerc.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,89 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + // If the output encoding is UTF-8, we don't need to recode the buffer. + if emitter.encoding == yaml_UTF8_ENCODING { + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true + } + + // Recode the buffer into the raw buffer. + var low, high int + if emitter.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + high, low = 1, 0 + } + + pos := 0 + for pos < emitter.buffer_pos { + // See the "reader.c" code for more details on UTF-8 encoding. Note + // that we assume that the buffer contains a valid UTF-8 sequence. + + // Read the next UTF-8 character. + octet := emitter.buffer[pos] + + var w int + var value rune + switch { + case octet&0x80 == 0x00: + w, value = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, value = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, value = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, value = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = emitter.buffer[pos+k] + value = (value << 6) + (rune(octet) & 0x3F) + } + pos += w + + // Write the character. + if value < 0x10000 { + var b [2]byte + b[high] = byte(value >> 8) + b[low] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) + } else { + // Write the character using a surrogate pair (check "reader.c"). + var b [4]byte + value -= 0x10000 + b[high] = byte(0xD8 + (value >> 18)) + b[low] = byte((value >> 10) & 0xFF) + b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) + b[low+2] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) + } + } + + // Write the raw buffer. + if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + emitter.raw_buffer = emitter.raw_buffer[:0] + return true +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/yaml.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/yaml.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/yaml.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/yaml.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,346 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only unmarshalled if they are exported (have an upper case +// first letter), and are unmarshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Does not apply to zero valued structs. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int "a,omitempty" +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshal("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/yamlh.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/yamlh.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/yamlh.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/yamlh.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,716 @@ +package yaml + +import ( + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occured. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_file io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_file io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff -Nru aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/yamlprivateh.go aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/yamlprivateh.go --- aptly-0.9.6/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/yamlprivateh.go 1970-01-01 00:00:00.000000000 +0000 +++ aptly-0.9.7/src/github.com/smira/aptly/_vendor/src/gopkg.in/yaml.v2/yamlprivateh.go 2016-05-24 07:05:22.000000000 +0000 @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +}